#define IN_GCC 1 #define ONE_COMPILATION_UNIT 1 #define HAVE_CONFIG_H 1 /* Extended regular expression matching and search library, version 0.12. (Implements POSIX draft P1003.2/D11.2, except for some of the internationalization features.) Copyright (C) 1993-1999, 2000, 2001, 2002 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ /* This file has been modified for usage in libiberty. It includes "xregex.h" instead of . The "xregex.h" header file renames all external routines with an "x" prefix so they do not collide with the native regex routines or with other components regex routines. */ /* AIX requires this to be the first thing in the file. */ #if defined _AIX && !defined __GNUC__ && !defined REGEX_MALLOC #pragma alloca #endif #undef _GNU_SOURCE #define _GNU_SOURCE #ifndef GCC_CONFIG_H #define GCC_CONFIG_H /* auto-host.h. Generated by configure. */ /* config.in. Generated from configure.ac by autoheader. */ /* 1234 = LIL_ENDIAN, 4321 = BIGENDIAN */ #define BYTEORDER 1234 /* Define as the number of bits in a byte, if \`limits.h' doesn't. */ /* #undef CHAR_BIT */ /* Define 0/1 to force the choice for exception handling model. */ /* #undef CONFIG_SJLJ_EXCEPTIONS */ /* Define to enable the use of a default assembler. */ /* #undef DEFAULT_ASSEMBLER */ /* Define to enable the use of a default linker. */ /* #undef DEFAULT_LINKER */ /* Define if you want to use __cxa_atexit, rather than atexit, to register C++ destructors for local statics and global objects. This is essential for fully standards-compliant handling of destructors, but requires __cxa_atexit in libc. */ /* #undef DEFAULT_USE_CXA_ATEXIT */ /* Define if you want more run-time sanity checks. This one gets a grab bag of miscellaneous but relatively cheap checks. */ /* #undef ENABLE_CHECKING */ /* Define if you want fold checked that it never destructs its argument. This is quite expensive. */ /* #undef ENABLE_FOLD_CHECKING */ /* Define if you want the garbage collector to operate in maximally paranoid mode, validating the entire heap and collecting garbage at every opportunity. This is extremely expensive. */ /* #undef ENABLE_GC_ALWAYS_COLLECT */ /* Define if you want the garbage collector to do object poisoning and other memory allocation checks. This is quite expensive. */ /* #undef ENABLE_GC_CHECKING */ /* Define to 1 if translation of program messages to the user's native language is requested. */ #define ENABLE_NLS 1 /* Define if you want all operations on RTL (the basic data structure of the optimizer and back end) to be checked for dynamic type safety at runtime. This is quite expensive. */ /* #undef ENABLE_RTL_CHECKING */ /* Define if you want RTL flag accesses to be checked against the RTL codes that are supported for each access macro. This is relatively cheap. */ /* #undef ENABLE_RTL_FLAG_CHECKING */ /* Define if you want all operations on trees (the basic data structure of the front ends) to be checked for dynamic type safety at runtime. This is moderately expensive. The tree browser debugging routines will also be enabled by this option. */ /* #undef ENABLE_TREE_CHECKING */ /* Define if you want to run subprograms and generated programs through valgrind (a memory checker). This is extremely expensive. */ /* #undef ENABLE_VALGRIND_CHECKING */ /* Define to 1 if installation paths should be looked up in Windows32 Registry. Ignored on non windows32 hosts. */ /* #undef ENABLE_WIN32_REGISTRY */ /* Define to the name of a file containing a list of extra machine modes for this architecture. */ #define EXTRA_MODES_FILE "config/i386/i386-modes.def" /* Define to enable detailed memory allocation stats gathering. */ /* #undef GATHER_STATISTICS */ /* Define to the type of elements in the array set by `getgroups'. Usually this is either `int' or `gid_t'. */ #define GETGROUPS_T gid_t /* Define to 1 if you have the `alphasort' function. */ #define HAVE_ALPHASORT 1 /* Define if your assembler supports dwarf2 .file/.loc directives, and preserves file table indices exactly as given. */ #define HAVE_AS_DWARF2_DEBUG_LINE 1 /* Define if your assembler supports explicit relocations. */ /* #undef HAVE_AS_EXPLICIT_RELOCS */ /* Define if your assembler supports the --gdwarf2 option. */ #define HAVE_AS_GDWARF2_DEBUG_FLAG 1 /* Define true if the assembler supports '.long foo@GOTOFF'. */ #define HAVE_AS_GOTOFF_IN_DATA 1 /* Define if your assembler supports the --gstabs option. */ #define HAVE_AS_GSTABS_DEBUG_FLAG 1 /* Define if your assembler supports the Sun syntax for cmov. */ /* #undef HAVE_AS_IX86_CMOV_SUN_SYNTAX */ /* Define if your assembler supports .sleb128 and .uleb128. */ #define HAVE_AS_LEB128 1 /* Define if your assembler supports ltoffx and ldxmov relocations. */ /* #undef HAVE_AS_LTOFFX_LDXMOV_RELOCS */ /* Define if your assembler supports mfcr field. */ /* #undef HAVE_AS_MFCRF */ /* Define if your assembler supports the -no-mul-bug-abort option. */ /* #undef HAVE_AS_NO_MUL_BUG_ABORT_OPTION */ /* Define if your assembler supports offsetable %lo(). */ /* #undef HAVE_AS_OFFSETABLE_LO10 */ /* Define if your assembler supports .register. */ /* #undef HAVE_AS_REGISTER_PSEUDO_OP */ /* Define if your assembler supports -relax option. */ /* #undef HAVE_AS_RELAX_OPTION */ /* Define if your assembler and linker support unaligned PC relative relocs. */ /* #undef HAVE_AS_SPARC_UA_PCREL */ /* Define if your assembler and linker support unaligned PC relative relocs against hidden symbols. */ /* #undef HAVE_AS_SPARC_UA_PCREL_HIDDEN */ /* Define if your assembler supports thread-local storage. */ /* #undef HAVE_AS_TLS */ /* Define to 1 if you have the `atoll' function. */ #define HAVE_ATOLL 1 /* Define to 1 if you have the `atoq' function. */ /* #undef HAVE_ATOQ */ /* Define if BANSHEE is available */ /* #undef HAVE_BANSHEE */ /* Define to 1 if you have the `clock' function. */ #define HAVE_CLOCK 1 /* Define if defines clock_t. */ #define HAVE_CLOCK_T 1 /* Define to 1 if we found a declaration for 'abort', otherwise define to 0. */ #define HAVE_DECL_ABORT 1 /* Define to 1 if we found a declaration for 'atof', otherwise define to 0. */ #define HAVE_DECL_ATOF 1 /* Define to 1 if we found a declaration for 'atol', otherwise define to 0. */ #define HAVE_DECL_ATOL 1 /* Define to 1 if we found a declaration for 'basename', otherwise define to 0. */ #define HAVE_DECL_BASENAME 1 /* Define to 1 if we found a declaration for 'calloc', otherwise define to 0. */ #define HAVE_DECL_CALLOC 1 /* Define to 1 if we found a declaration for 'clock', otherwise define to 0. */ #define HAVE_DECL_CLOCK 1 /* Define to 1 if we found a declaration for 'errno', otherwise define to 0. */ #define HAVE_DECL_ERRNO 1 /* Define to 1 if we found a declaration for 'fprintf_unlocked', otherwise define to 0. */ #define HAVE_DECL_FPRINTF_UNLOCKED 0 /* Define to 1 if we found a declaration for 'fputs_unlocked', otherwise define to 0. */ #define HAVE_DECL_FPUTS_UNLOCKED 0 /* Define to 1 if we found a declaration for 'free', otherwise define to 0. */ #define HAVE_DECL_FREE 1 /* Define to 1 if we found a declaration for 'fwrite_unlocked', otherwise define to 0. */ #define HAVE_DECL_FWRITE_UNLOCKED 1 /* Define to 1 if we found a declaration for 'getcwd', otherwise define to 0. */ #define HAVE_DECL_GETCWD 1 /* Define to 1 if we found a declaration for 'getenv', otherwise define to 0. */ #define HAVE_DECL_GETENV 1 /* Define to 1 if we found a declaration for 'getopt', otherwise define to 0. */ #define HAVE_DECL_GETOPT 0 /* Define to 1 if we found a declaration for 'getrlimit', otherwise define to 0. */ #define HAVE_DECL_GETRLIMIT 1 /* Define to 1 if we found a declaration for 'getrusage', otherwise define to 0. */ #define HAVE_DECL_GETRUSAGE 1 /* Define to 1 if we found a declaration for 'getwd', otherwise define to 0. */ #define HAVE_DECL_GETWD 1 /* Define to 1 if we found a declaration for 'ldgetname', otherwise define to 0. */ #define HAVE_DECL_LDGETNAME 0 /* Define to 1 if we found a declaration for 'malloc', otherwise define to 0. */ #define HAVE_DECL_MALLOC 1 /* Define to 1 if we found a declaration for 'putc_unlocked', otherwise define to 0. */ #define HAVE_DECL_PUTC_UNLOCKED 1 /* Define to 1 if we found a declaration for 'realloc', otherwise define to 0. */ #define HAVE_DECL_REALLOC 1 /* Define to 1 if we found a declaration for 'sbrk', otherwise define to 0. */ #define HAVE_DECL_SBRK 1 /* Define to 1 if we found a declaration for 'setrlimit', otherwise define to 0. */ #define HAVE_DECL_SETRLIMIT 1 /* Define to 1 if we found a declaration for 'snprintf', otherwise define to 0. */ #define HAVE_DECL_SNPRINTF 1 /* Define to 1 if we found a declaration for 'strsignal', otherwise define to 0. */ #define HAVE_DECL_STRSIGNAL 1 /* Define to 1 if we found a declaration for 'strstr', otherwise define to 0. */ #define HAVE_DECL_STRSTR 1 /* Define to 1 if we found a declaration for 'times', otherwise define to 0. */ #define HAVE_DECL_TIMES 1 /* Define to 1 if we found a declaration for 'vasprintf', otherwise define to 0. */ #define HAVE_DECL_VASPRINTF 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_DIRECT_H */ /* Define to 1 if you have the `dup2' function. */ #define HAVE_DUP2 1 /* Define to 1 if you have the header file. */ #define HAVE_FCNTL_H 1 /* Define to 1 if you have the `fork' function. */ #define HAVE_FORK 1 /* Define to 1 if you have the `fprintf_unlocked' function. */ /* #undef HAVE_FPRINTF_UNLOCKED */ /* Define to 1 if you have the `fputc_unlocked' function. */ #define HAVE_FPUTC_UNLOCKED 1 /* Define to 1 if you have the `fputs_unlocked' function. */ #define HAVE_FPUTS_UNLOCKED 1 /* Define to 1 if you have the `fwrite_unlocked' function. */ #define HAVE_FWRITE_UNLOCKED 1 /* Define if your assembler supports .balign and .p2align. */ #define HAVE_GAS_BALIGN_AND_P2ALIGN 1 /* Define if your assembler uses the new HImode fild and fist notation. */ #define HAVE_GAS_FILDS_FISTS 1 /* Define if your assembler and linker support .hidden. */ /* #undef HAVE_GAS_HIDDEN */ /* Define if your assembler supports specifying the maximum number of bytes to skip when using the GAS .p2align command. */ #define HAVE_GAS_MAX_SKIP_P2ALIGN 1 /* Define if your assembler and linker support 32-bit section relative relocs via '.secrel32 label'. */ /* #undef HAVE_GAS_PE_SECREL32_RELOC */ /* Define 0/1 if your assembler supports marking sections with SHF_MERGE flag. */ #define HAVE_GAS_SHF_MERGE 1 /* Define if your assembler supports .subsection and .subsection -1 starts emitting at the beginning of your section. */ #define HAVE_GAS_SUBSECTION_ORDERING 1 /* Define if your assembler supports .weak. */ #define HAVE_GAS_WEAK 1 /* Define to 1 if you have the `getrlimit' function. */ #define HAVE_GETRLIMIT 1 /* Define to 1 if you have the `getrusage' function. */ #define HAVE_GETRUSAGE 1 /* Define to 1 if you have the `gettimeofday' function. */ #define HAVE_GETTIMEOFDAY 1 /* Define if you have the iconv() function. */ #define HAVE_ICONV 1 /* Define to 1 if you have the header file. */ #define HAVE_ICONV_H 1 /* Define .init_array/.fini_array sections are available and working. */ /* #undef HAVE_INITFINI_ARRAY */ /* Define if you have a working header file. */ #define HAVE_INTTYPES_H 1 /* Define to 1 if you have the `kill' function. */ #define HAVE_KILL 1 /* Define to 1 if you have the header file. */ #define HAVE_LANGINFO_H 1 /* Define if your file defines LC_MESSAGES. */ #define HAVE_LC_MESSAGES 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_LDFCN_H */ /* Define if your linker supports --as-needed and --no-as-needed options. */ /* #undef HAVE_LD_AS_NEEDED */ /* Define if your linker supports --eh-frame-hdr option. */ #define HAVE_LD_EH_FRAME_HDR 1 /* Define if your linker supports -pie option. */ /* #undef HAVE_LD_PIE */ /* Define if your linker links a mix of read-only and read-write sections into a read-write section. */ #define HAVE_LD_RO_RW_SECTION_MIXING 1 /* Define to 1 if you have the header file. */ #define HAVE_LIMITS_H 1 /* Define to 1 if you have the header file. */ #define HAVE_LOCALE_H 1 /* Define if your compiler supports the \`long long' type. */ #define HAVE_LONG_LONG 1 /* Define to 1 if you have the header file. */ #define HAVE_MALLOC_H 1 /* Define to 1 if you have the `mbstowcs' function. */ #define HAVE_MBSTOWCS 1 /* Define if valgrind's memcheck.h header is installed. */ /* #undef HAVE_MEMCHECK_H */ /* Define to 1 if you have the header file. */ #define HAVE_MEMORY_H 1 /* Define to 1 if you have the `mincore' function. */ #define HAVE_MINCORE 1 /* Define to 1 if you have the `mmap' function. */ #define HAVE_MMAP 1 /* Define if mmap with MAP_ANON(YMOUS) works. */ #define HAVE_MMAP_ANON 1 /* Define if mmap of /dev/zero works. */ #define HAVE_MMAP_DEV_ZERO 1 /* Define if read-only mmap of a plain file works. */ #define HAVE_MMAP_FILE 1 /* Define to 1 if you have the `nl_langinfo' function. */ #define HAVE_NL_LANGINFO 1 /* Define if printf supports "%p". */ #define HAVE_PRINTF_PTR 1 /* Define to 1 if you have the `putc_unlocked' function. */ #define HAVE_PUTC_UNLOCKED 1 /* Define to 1 if you have the `scandir' function. */ #define HAVE_SCANDIR 1 /* Define to 1 if you have the `setlocale' function. */ #define HAVE_SETLOCALE 1 /* Define to 1 if you have the `setrlimit' function. */ #define HAVE_SETRLIMIT 1 /* Define to 1 if you have the header file. */ #define HAVE_STDDEF_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STDINT_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STDLIB_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STRINGS_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STRING_H 1 /* Define to 1 if you have the `strsignal' function. */ #define HAVE_STRSIGNAL 1 /* Define if defines struct tms. */ #define HAVE_STRUCT_TMS 1 /* Define to 1 if you have the `sysconf' function. */ #define HAVE_SYSCONF 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_FILE_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_MMAN_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_PARAM_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_RESOURCE_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_STAT_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_TIMES_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_TIME_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_TYPES_H 1 /* Define to 1 if you have that is POSIX.1 compatible. */ #define HAVE_SYS_WAIT_H 1 /* Define to 1 if you have the `times' function. */ #define HAVE_TIMES 1 /* Define to 1 if you have the header file. */ #define HAVE_TIME_H 1 /* Define to 1 if you have the header file. */ #define HAVE_UNISTD_H 1 /* Define if valgrind's valgrind/memcheck.h header is installed. */ /* #undef HAVE_VALGRIND_MEMCHECK_H */ /* Define to 1 if you have the `vfork' function. */ #define HAVE_VFORK 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_VFORK_H */ /* Define to 1 if you have the header file. */ #define HAVE_WCHAR_H 1 /* Define to 1 if you have the `wcswidth' function. */ #define HAVE_WCSWIDTH 1 /* Define to 1 if `fork' works. */ #define HAVE_WORKING_FORK 1 /* Define this macro if mbstowcs does not crash when its first argument is NULL. */ #define HAVE_WORKING_MBSTOWCS 1 /* Define to 1 if `vfork' works. */ #define HAVE_WORKING_VFORK 1 /* Define if your compiler supports the \`__int64' type. */ /* #undef HAVE___INT64 */ /* Define if the host machine stores words of multi-word integers in big-endian order. */ /* #undef HOST_WORDS_BIG_ENDIAN */ /* Define as const if the declaration of iconv() needs const. */ #define ICONV_CONST /* Define if host mkdir takes a single argument. */ /* #undef MKDIR_TAKES_ONE_ARG */ /* Define to 1 if HOST_WIDE_INT must be 64 bits wide (see hwint.h). */ /* #undef NEED_64BIT_HOST_WIDE_INT */ /* Define to 1 if your C compiler doesn't accept -c and -o together. */ /* #undef NO_MINUS_C_MINUS_O */ /* Define to PREFIX/include if cpp should also search that directory. */ #define PREFIX_INCLUDE_DIR "/scratch2/smcc-extras/build/gcc-cvs/install/include" /* The number of bytes in type int */ #define SIZEOF_INT 4 /* The number of bytes in type long */ #define SIZEOF_LONG 4 /* The number of bytes in type long long */ #define SIZEOF_LONG_LONG 8 /* The number of bytes in type short */ #define SIZEOF_SHORT 2 /* The number of bytes in type void * */ #define SIZEOF_VOID_P 4 /* The number of bytes in type __int64 */ /* #undef SIZEOF___INT64 */ /* Define to 1 if you have the ANSI C header files. */ #define STDC_HEADERS 1 /* Define if you can safely include both and . */ #define STRING_WITH_STRINGS 1 /* Define to 1 if you can safely include both and . */ #define TIME_WITH_SYS_TIME 1 /* Define if your assembler mis-optimizes .eh_frame data. */ /* #undef USE_AS_TRADITIONAL_FORMAT */ /* Define if gcc should use -lunwind. */ /* #undef USE_LIBUNWIND_EXCEPTIONS */ /* Define if location_t is fileline integer cookie. */ /* #undef USE_MAPPED_LOCATION */ /* Define to be the last portion of registry key on windows hosts. */ /* #undef WIN32_REGISTRY_KEY */ /* whether byteorder is bigendian */ /* #undef WORDS_BIGENDIAN */ /* Define to `int' if doesn't define. */ /* #undef gid_t */ /* Define to `__inline__' or `__inline' if that's what the C compiler calls it, or to nothing if 'inline' is not supported under any name. */ #ifndef __cplusplus /* #undef inline */ #endif /* Define to `int' if does not define. */ /* #undef pid_t */ /* Define to \`long' if doesn't define. */ /* #undef rlim_t */ /* Define to `int' if does not define. */ /* #undef ssize_t */ /* Define to `int' if doesn't define. */ /* #undef uid_t */ /* Define as `fork' if `vfork' does not work. */ /* #undef vfork */ #ifdef IN_GCC /* ANSI and traditional C compatability macros Copyright 1991, 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001 Free Software Foundation, Inc. This file is part of the GNU C Library. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* ANSI and traditional C compatibility macros ANSI C is assumed if __STDC__ is #defined. Macro ANSI C definition Traditional C definition ----- ---- - ---------- ----------- - ---------- ANSI_PROTOTYPES 1 not defined PTR `void *' `char *' PTRCONST `void *const' `char *' LONG_DOUBLE `long double' `double' const not defined `' volatile not defined `' signed not defined `' VA_START(ap, var) va_start(ap, var) va_start(ap) Note that it is safe to write "void foo();" indicating a function with no return value, in all K+R compilers we have been able to test. For declaring functions with prototypes, we also provide these: PARAMS ((prototype)) -- for functions which take a fixed number of arguments. Use this when declaring the function. When defining the function, write a K+R style argument list. For example: char *strcpy PARAMS ((char *dest, char *source)); ... char * strcpy (dest, source) char *dest; char *source; { ... } VPARAMS ((prototype, ...)) -- for functions which take a variable number of arguments. Use PARAMS to declare the function, VPARAMS to define it. For example: int printf PARAMS ((const char *format, ...)); ... int printf VPARAMS ((const char *format, ...)) { ... } For writing functions which take variable numbers of arguments, we also provide the VA_OPEN, VA_CLOSE, and VA_FIXEDARG macros. These hide the differences between K+R and C89 more thoroughly than the simple VA_START() macro mentioned above. VA_OPEN and VA_CLOSE are used *instead of* va_start and va_end. Immediately after VA_OPEN, put a sequence of VA_FIXEDARG calls corresponding to the list of fixed arguments. Then use va_arg normally to get the variable arguments, or pass your va_list object around. You do not declare the va_list yourself; VA_OPEN does it for you. Here is a complete example: int printf VPARAMS ((const char *format, ...)) { int result; VA_OPEN (ap, format); VA_FIXEDARG (ap, const char *, format); result = vfprintf (stdout, format, ap); VA_CLOSE (ap); return result; } You can declare variables either before or after the VA_OPEN, VA_FIXEDARG sequence. Also, VA_OPEN and VA_CLOSE are the beginning and end of a block. They must appear at the same nesting level, and any variables declared after VA_OPEN go out of scope at VA_CLOSE. Unfortunately, with a K+R compiler, that includes the argument list. You can have multiple instances of VA_OPEN/VA_CLOSE pairs in a single function in case you need to traverse the argument list more than once. For ease of writing code which uses GCC extensions but needs to be portable to other compilers, we provide the GCC_VERSION macro that simplifies testing __GNUC__ and __GNUC_MINOR__ together, and various wrappers around __attribute__. Also, __extension__ will be #defined to nothing if it doesn't work. See below. This header also defines a lot of obsolete macros: CONST, VOLATILE, SIGNED, PROTO, EXFUN, DEFUN, DEFUN_VOID, AND, DOTS, NOARGS. Don't use them. */ #ifndef _ANSIDECL_H #define _ANSIDECL_H 1 /* Every source file includes this file, so they will all get the switch for lint. */ /* LINTLIBRARY */ /* Using MACRO(x,y) in cpp #if conditionals does not work with some older preprocessors. Thus we can't define something like this: #define HAVE_GCC_VERSION(MAJOR, MINOR) \ (__GNUC__ > (MAJOR) || (__GNUC__ == (MAJOR) && __GNUC_MINOR__ >= (MINOR))) and then test "#if HAVE_GCC_VERSION(2,7)". So instead we use the macro below and test it against specific values. */ /* This macro simplifies testing whether we are using gcc, and if it is of a particular minimum version. (Both major & minor numbers are significant.) This macro will evaluate to 0 if we are not using gcc at all. */ #ifndef GCC_VERSION #define GCC_VERSION (__GNUC__ * 1000 + __GNUC_MINOR__) #endif /* GCC_VERSION */ #if defined (__STDC__) || defined (_AIX) || (defined (__mips) && defined (_SYSTYPE_SVR4)) || defined(_WIN32) || (defined(__alpha) && defined(__cplusplus)) /* All known AIX compilers implement these things (but don't always define __STDC__). The RISC/OS MIPS compiler defines these things in SVR4 mode, but does not define __STDC__. */ /* eraxxon@alumni.rice.edu: The Compaq C++ compiler, unlike many other C++ compilers, does not define __STDC__, though it acts as if this was so. (Verified versions: 5.7, 6.2, 6.3, 6.5) */ #define ANSI_PROTOTYPES 1 #define PTR void * #define PTRCONST void *const #define LONG_DOUBLE long double #define PARAMS(ARGS) ARGS #define VPARAMS(ARGS) ARGS #define VA_START(VA_LIST, VAR) va_start(VA_LIST, VAR) /* variadic function helper macros */ /* "struct Qdmy" swallows the semicolon after VA_OPEN/VA_FIXEDARG's use without inhibiting further decls and without declaring an actual variable. */ #define VA_OPEN(AP, VAR) { va_list AP; va_start(AP, VAR); { struct Qdmy #define VA_CLOSE(AP) } va_end(AP); } #define VA_FIXEDARG(AP, T, N) struct Qdmy #undef const #undef volatile #undef signed /* inline requires special treatment; it's in C99, and GCC >=2.7 supports it too, but it's not in C89. */ #undef inline #if __STDC_VERSION__ > 199901L /* it's a keyword */ #else # if GCC_VERSION >= 2007 # define inline __inline__ /* __inline__ prevents -pedantic warnings */ # else # define inline /* nothing */ # endif #endif /* These are obsolete. Do not use. */ #ifndef IN_GCC #define CONST const #define VOLATILE volatile #define SIGNED signed #define PROTO(type, name, arglist) type name arglist #define EXFUN(name, proto) name proto #define DEFUN(name, arglist, args) name(args) #define DEFUN_VOID(name) name(void) #define AND , #define DOTS , ... #define NOARGS void #endif /* ! IN_GCC */ #else /* Not ANSI C. */ #undef ANSI_PROTOTYPES #define PTR char * #define PTRCONST PTR #define LONG_DOUBLE double #define PARAMS(args) () #define VPARAMS(args) (va_alist) va_dcl #define VA_START(va_list, var) va_start(va_list) #define VA_OPEN(AP, VAR) { va_list AP; va_start(AP); { struct Qdmy #define VA_CLOSE(AP) } va_end(AP); } #define VA_FIXEDARG(AP, TYPE, NAME) TYPE NAME = va_arg(AP, TYPE) /* some systems define these in header files for non-ansi mode */ #undef const #undef volatile #undef signed #undef inline #define const #define volatile #define signed #define inline #ifndef IN_GCC #define CONST #define VOLATILE #define SIGNED #define PROTO(type, name, arglist) type name () #define EXFUN(name, proto) name() #define DEFUN(name, arglist, args) name arglist args; #define DEFUN_VOID(name) name() #define AND ; #define DOTS #define NOARGS #endif /* ! IN_GCC */ #endif /* ANSI C. */ /* Define macros for some gcc attributes. This permits us to use the macros freely, and know that they will come into play for the version of gcc in which they are supported. */ #if (GCC_VERSION < 2007) # define __attribute__(x) #endif /* Attribute __malloc__ on functions was valid as of gcc 2.96. */ #ifndef ATTRIBUTE_MALLOC # if (GCC_VERSION >= 2096) # define ATTRIBUTE_MALLOC __attribute__ ((__malloc__)) # else # define ATTRIBUTE_MALLOC # endif /* GNUC >= 2.96 */ #endif /* ATTRIBUTE_MALLOC */ /* Attributes on labels were valid as of gcc 2.93. */ #ifndef ATTRIBUTE_UNUSED_LABEL # if (GCC_VERSION >= 2093) # define ATTRIBUTE_UNUSED_LABEL ATTRIBUTE_UNUSED # else # define ATTRIBUTE_UNUSED_LABEL # endif /* GNUC >= 2.93 */ #endif /* ATTRIBUTE_UNUSED_LABEL */ #ifndef ATTRIBUTE_UNUSED #define ATTRIBUTE_UNUSED __attribute__ ((__unused__)) #endif /* ATTRIBUTE_UNUSED */ #ifndef ATTRIBUTE_NORETURN #define ATTRIBUTE_NORETURN __attribute__ ((__noreturn__)) #endif /* ATTRIBUTE_NORETURN */ /* Attribute `nonnull' was valid as of gcc 3.3. */ #ifndef ATTRIBUTE_NONNULL # if (GCC_VERSION >= 3003) # define ATTRIBUTE_NONNULL(m) __attribute__ ((__nonnull__ (m))) # else # define ATTRIBUTE_NONNULL(m) # endif /* GNUC >= 3.3 */ #endif /* ATTRIBUTE_NONNULL */ /* Use ATTRIBUTE_PRINTF when the format specifier must not be NULL. This was the case for the `printf' format attribute by itself before GCC 3.3, but as of 3.3 we need to add the `nonnull' attribute to retain this behavior. */ #ifndef ATTRIBUTE_PRINTF #define ATTRIBUTE_PRINTF(m, n) __attribute__ ((__format__ (__printf__, m, n))) ATTRIBUTE_NONNULL(m) #define ATTRIBUTE_PRINTF_1 ATTRIBUTE_PRINTF(1, 2) #define ATTRIBUTE_PRINTF_2 ATTRIBUTE_PRINTF(2, 3) #define ATTRIBUTE_PRINTF_3 ATTRIBUTE_PRINTF(3, 4) #define ATTRIBUTE_PRINTF_4 ATTRIBUTE_PRINTF(4, 5) #define ATTRIBUTE_PRINTF_5 ATTRIBUTE_PRINTF(5, 6) #endif /* ATTRIBUTE_PRINTF */ /* Use ATTRIBUTE_NULL_PRINTF when the format specifier may be NULL. A NULL format specifier was allowed as of gcc 3.3. */ #ifndef ATTRIBUTE_NULL_PRINTF # if (GCC_VERSION >= 3003) # define ATTRIBUTE_NULL_PRINTF(m, n) __attribute__ ((__format__ (__printf__, m, n))) # else # define ATTRIBUTE_NULL_PRINTF(m, n) # endif /* GNUC >= 3.3 */ # define ATTRIBUTE_NULL_PRINTF_1 ATTRIBUTE_NULL_PRINTF(1, 2) # define ATTRIBUTE_NULL_PRINTF_2 ATTRIBUTE_NULL_PRINTF(2, 3) # define ATTRIBUTE_NULL_PRINTF_3 ATTRIBUTE_NULL_PRINTF(3, 4) # define ATTRIBUTE_NULL_PRINTF_4 ATTRIBUTE_NULL_PRINTF(4, 5) # define ATTRIBUTE_NULL_PRINTF_5 ATTRIBUTE_NULL_PRINTF(5, 6) #endif /* ATTRIBUTE_NULL_PRINTF */ /* We use __extension__ in some places to suppress -pedantic warnings about GCC extensions. This feature didn't work properly before gcc 2.8. */ #if GCC_VERSION < 2008 #define __extension__ #endif #endif /* ansidecl.h */ #endif /* config.h. Generated by configure. */ /* config.in. Generated from configure.ac by autoheader. */ /* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP systems. This function is required for `alloca.c' support on those systems. */ /* #undef CRAY_STACKSEG_END */ /* Define to 1 if using `alloca.c'. */ /* #undef C_ALLOCA */ /* Define if you want more run-time sanity checks. */ /* #undef ENABLE_CHECKING */ /* Define to 1 if translation of program messages to the user's native language is requested. */ #define ENABLE_NLS 1 /* Define to 1 if you have `alloca', as a function or macro. */ #define HAVE_ALLOCA 1 /* Define to 1 if you have and it should be used (not on Ultrix). */ #define HAVE_ALLOCA_H 1 /* Define to 1 if you have the declaration of `abort', and to 0 if you don't. */ #define HAVE_DECL_ABORT 1 /* Define to 1 if you have the declaration of `fprintf_unlocked', and to 0 if you don't. */ #define HAVE_DECL_FPRINTF_UNLOCKED 0 /* Define to 1 if you have the declaration of `fputc_unlocked', and to 0 if you don't. */ #define HAVE_DECL_FPUTC_UNLOCKED 1 /* Define to 1 if you have the declaration of `fwrite_unlocked', and to 0 if you don't. */ #define HAVE_DECL_FWRITE_UNLOCKED 1 /* Define to 1 if you have the declaration of `putc_unlocked', and to 0 if you don't. */ #define HAVE_DECL_PUTC_UNLOCKED 1 /* Define to 1 if you have the header file. */ #define HAVE_FCNTL_H 1 /* Define to 1 if you have the `fprintf_unlocked' function. */ /* #undef HAVE_FPRINTF_UNLOCKED */ /* Define to 1 if you have the `fputc_unlocked' function. */ #define HAVE_FPUTC_UNLOCKED 1 /* Define to 1 if you have the `fputs_unlocked' function. */ #define HAVE_FPUTS_UNLOCKED 1 /* Define to 1 if you have the `fwrite_unlocked' function. */ #define HAVE_FWRITE_UNLOCKED 1 /* Define if you have the iconv() function. */ #define HAVE_ICONV 1 /* Define to 1 if you have the header file. */ #define HAVE_ICONV_H 1 /* Define to 1 if you have the header file. */ #define HAVE_INTTYPES_H 1 /* Define if you have and nl_langinfo(CODESET). */ #define HAVE_LANGINFO_CODESET 1 /* Define to 1 if you have the header file. */ #define HAVE_LIMITS_H 1 /* Define to 1 if you have the header file. */ #define HAVE_LOCALE_H 1 /* Define to 1 if you have the header file. */ #define HAVE_MEMORY_H 1 /* Define to 1 if libc includes obstacks. */ #define HAVE_OBSTACK 1 /* Define to 1 if you have the `putc_unlocked' function. */ #define HAVE_PUTC_UNLOCKED 1 /* Define to 1 if you have the header file. */ #define HAVE_STDDEF_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STDINT_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STDLIB_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STRINGS_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STRING_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_FILE_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_STAT_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_TYPES_H 1 /* Define if defines \`uchar'. */ /* #undef HAVE_UCHAR */ /* Define to 1 if you have the header file. */ #define HAVE_UNISTD_H 1 /* Define to the widest efficient host integer type at least as wide as the target's size_t type. */ #define HOST_WIDE_INT long /* Define as const if the declaration of iconv() needs const. */ #define ICONV_CONST /* Name of package */ #define PACKAGE "cpplib" /* Define to the address where bug reports for this package should be sent. */ #define PACKAGE_BUGREPORT "gcc-bugs@gcc.gnu.org" /* Define to the full name of this package. */ #define PACKAGE_NAME "cpplib" /* Define to the full name and version of this package. */ #define PACKAGE_STRING "cpplib " /* Define to the one symbol short name of this package. */ #define PACKAGE_TARNAME "cpplib" /* Define to the version of this package. */ #define PACKAGE_VERSION " " /* The size of a `int', as computed by sizeof. */ #define SIZEOF_INT 4 /* The size of a `long', as computed by sizeof. */ #define SIZEOF_LONG 4 /* If using the C implementation of alloca, define if you know the direction of stack growth for your system; otherwise it will be automatically deduced at run-time. STACK_DIRECTION > 0 => grows toward higher addresses STACK_DIRECTION < 0 => grows toward lower addresses STACK_DIRECTION = 0 => direction of growth unknown */ /* #undef STACK_DIRECTION */ /* Define to 1 if you have the ANSI C header files. */ #define STDC_HEADERS 1 /* Define if you can safely include both and . */ #define STRING_WITH_STRINGS 1 /* Define to 1 if you can safely include both and . */ #define TIME_WITH_SYS_TIME 1 /* Define to 1 if your declares `struct tm'. */ /* #undef TM_IN_SYS_TIME */ /* Version number of package */ #define VERSION " " /* Define to empty if `const' does not conform to ANSI C. */ /* #undef const */ /* Define to `__inline__' or `__inline' if that's what the C compiler calls it, or to nothing if 'inline' is not supported under any name. */ #ifndef __cplusplus /* #undef inline */ #endif /* Define to `long' if does not define. */ /* #undef off_t */ /* Define to `unsigned' if does not define. */ /* #undef size_t */ /* config.h. Generated by configure. */ /* config.in. Generated from configure.ac by autoheader. */ /* 1234 = LIL_ENDIAN, 4321 = BIGENDIAN */ #define BYTEORDER 1234 /* Define to one of _getb67, GETB67, getb67 for Cray-2 and Cray-YMP systems. This function is required for alloca.c support on those systems. */ /* #undef CRAY_STACKSEG_END */ /* Define to 1 if you have the header file. */ #define HAVE_ALLOCA_H 1 /* Define to 1 if you have the `asprintf' function. */ #define HAVE_ASPRINTF 1 /* Define to 1 if you have the `atexit' function. */ #define HAVE_ATEXIT 1 /* Define to 1 if you have the `basename' function. */ #define HAVE_BASENAME 1 /* Define to 1 if you have the `bcmp' function. */ #define HAVE_BCMP 1 /* Define to 1 if you have the `bcopy' function. */ #define HAVE_BCOPY 1 /* Define to 1 if you have the `bsearch' function. */ #define HAVE_BSEARCH 1 /* Define to 1 if you have the `bzero' function. */ #define HAVE_BZERO 1 /* Define to 1 if you have the `calloc' function. */ #define HAVE_CALLOC 1 /* Define to 1 if you have the `canonicalize_file_name' function. */ #define HAVE_CANONICALIZE_FILE_NAME 1 /* Define to 1 if you have the `clock' function. */ #define HAVE_CLOCK 1 /* Define to 1 if you have the header file. */ #define HAVE_FCNTL_H 1 /* Define to 1 if you have the `ffs' function. */ #define HAVE_FFS 1 /* Define to 1 if you have the `fork' function. */ #define HAVE_FORK 1 /* Define to 1 if you have the `getcwd' function. */ #define HAVE_GETCWD 1 /* Define to 1 if you have the `getpagesize' function. */ #define HAVE_GETPAGESIZE 1 /* Define to 1 if you have the `getrusage' function. */ #define HAVE_GETRUSAGE 1 /* Define to 1 if you have the `getsysinfo' function. */ /* #undef HAVE_GETSYSINFO */ /* Define to 1 if you have the `gettimeofday' function. */ #define HAVE_GETTIMEOFDAY 1 /* Define to 1 if you have the `index' function. */ #define HAVE_INDEX 1 /* Define to 1 if you have the `insque' function. */ #define HAVE_INSQUE 1 /* Define to 1 if you have the header file. */ #define HAVE_INTTYPES_H 1 /* Define to 1 if you have the header file. */ #define HAVE_LIMITS_H 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_MACHINE_HAL_SYSINFO_H */ /* Define to 1 if you have the header file. */ #define HAVE_MALLOC_H 1 /* Define to 1 if you have the `memchr' function. */ #define HAVE_MEMCHR 1 /* Define to 1 if you have the `memcmp' function. */ #define HAVE_MEMCMP 1 /* Define to 1 if you have the `memcpy' function. */ #define HAVE_MEMCPY 1 /* Define to 1 if you have the `memmove' function. */ #define HAVE_MEMMOVE 1 /* Define to 1 if you have the header file. */ #define HAVE_MEMORY_H 1 /* Define to 1 if you have the `mempcpy' function. */ #define HAVE_MEMPCPY 1 /* Define to 1 if you have the `memset' function. */ #define HAVE_MEMSET 1 /* Define to 1 if you have the `mkstemps' function. */ /* #undef HAVE_MKSTEMPS */ /* Define to 1 if you have a working `mmap' system call. */ #define HAVE_MMAP 1 /* Define to 1 if you have the `on_exit' function. */ #define HAVE_ON_EXIT 1 /* Define to 1 if you have the `psignal' function. */ #define HAVE_PSIGNAL 1 /* Define to 1 if you have the `pstat_getdynamic' function. */ /* #undef HAVE_PSTAT_GETDYNAMIC */ /* Define to 1 if you have the `pstat_getstatic' function. */ /* #undef HAVE_PSTAT_GETSTATIC */ /* Define to 1 if you have the `putenv' function. */ #define HAVE_PUTENV 1 /* Define to 1 if you have the `random' function. */ #define HAVE_RANDOM 1 /* Define to 1 if you have the `realpath' function. */ #define HAVE_REALPATH 1 /* Define to 1 if you have the `rename' function. */ #define HAVE_RENAME 1 /* Define to 1 if you have the `rindex' function. */ #define HAVE_RINDEX 1 /* Define to 1 if you have the `sbrk' function. */ #define HAVE_SBRK 1 /* Define to 1 if you have the `setenv' function. */ #define HAVE_SETENV 1 /* Define to 1 if you have the `sigsetmask' function. */ #define HAVE_SIGSETMASK 1 /* Define to 1 if you have the `snprintf' function. */ #define HAVE_SNPRINTF 1 /* Define to 1 if you have the header file. */ #define HAVE_STDINT_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STDLIB_H 1 /* Define to 1 if you have the `stpcpy' function. */ #define HAVE_STPCPY 1 /* Define to 1 if you have the `stpncpy' function. */ #define HAVE_STPNCPY 1 /* Define to 1 if you have the `strcasecmp' function. */ #define HAVE_STRCASECMP 1 /* Define to 1 if you have the `strchr' function. */ #define HAVE_STRCHR 1 /* Define to 1 if you have the `strdup' function. */ #define HAVE_STRDUP 1 /* Define to 1 if you have the `strerror' function. */ #define HAVE_STRERROR 1 /* Define to 1 if you have the header file. */ #define HAVE_STRINGS_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STRING_H 1 /* Define to 1 if you have the `strncasecmp' function. */ #define HAVE_STRNCASECMP 1 /* Define to 1 if you have the `strrchr' function. */ #define HAVE_STRRCHR 1 /* Define to 1 if you have the `strsignal' function. */ #define HAVE_STRSIGNAL 1 /* Define to 1 if you have the `strstr' function. */ #define HAVE_STRSTR 1 /* Define to 1 if you have the `strtod' function. */ #define HAVE_STRTOD 1 /* Define to 1 if you have the `strtol' function. */ #define HAVE_STRTOL 1 /* Define to 1 if you have the `strtoul' function. */ #define HAVE_STRTOUL 1 /* Define to 1 if you have the `sysconf' function. */ #define HAVE_SYSCONF 1 /* Define to 1 if you have the `sysctl' function. */ #define HAVE_SYSCTL 1 /* Define to 1 if you have the `sysmp' function. */ /* #undef HAVE_SYSMP */ /* Define if you have the sys_errlist variable. */ #define HAVE_SYS_ERRLIST 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_FILE_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_MMAN_H 1 /* Define if you have the sys_nerr variable. */ #define HAVE_SYS_NERR 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_PARAM_H 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_SYS_PSTAT_H */ /* Define to 1 if you have the header file. */ #define HAVE_SYS_RESOURCE_H 1 /* Define if you have the sys_siglist variable. */ #define HAVE_SYS_SIGLIST 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_STAT_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_SYSCTL_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_SYSINFO_H 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_SYS_SYSMP_H */ /* Define to 1 if you have the header file. */ /* #undef HAVE_SYS_SYSTEMCFG_H */ /* Define to 1 if you have the header file. */ /* #undef HAVE_SYS_TABLE_H */ /* Define to 1 if you have the header file. */ #define HAVE_SYS_TIME_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_TYPES_H 1 /* Define to 1 if you have that is POSIX.1 compatible. */ #define HAVE_SYS_WAIT_H 1 /* Define to 1 if you have the `table' function. */ /* #undef HAVE_TABLE */ /* Define to 1 if you have the `times' function. */ #define HAVE_TIMES 1 /* Define to 1 if you have the header file. */ #define HAVE_TIME_H 1 /* Define to 1 if you have the `tmpnam' function. */ #define HAVE_TMPNAM 1 /* Define if you have the \`uintptr_t' type. */ #define HAVE_UINTPTR_T 1 /* Define to 1 if you have the header file. */ #define HAVE_UNISTD_H 1 /* Define to 1 if you have the `vasprintf' function. */ #define HAVE_VASPRINTF 1 /* Define to 1 if you have the `vfork' function. */ #define HAVE_VFORK 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_VFORK_H */ /* Define to 1 if you have the `vfprintf' function. */ #define HAVE_VFPRINTF 1 /* Define to 1 if you have the `vprintf' function. */ #define HAVE_VPRINTF 1 /* Define to 1 if you have the `vsnprintf' function. */ #define HAVE_VSNPRINTF 1 /* Define to 1 if you have the `vsprintf' function. */ #define HAVE_VSPRINTF 1 /* Define to 1 if you have the `waitpid' function. */ #define HAVE_WAITPID 1 /* Define to 1 if `fork' works. */ #define HAVE_WORKING_FORK 1 /* Define to 1 if `vfork' works. */ #define HAVE_WORKING_VFORK 1 /* Define to 1 if you have the `_doprnt' function. */ /* #undef HAVE__DOPRNT */ /* Define if you have the _system_configuration variable. */ /* #undef HAVE__SYSTEM_CONFIGURATION */ /* Define if the host machine stores words of multi-word integers in big-endian order. */ /* #undef HOST_WORDS_BIG_ENDIAN */ /* Define if canonicalize_file_name is not declared in system header files. */ #define NEED_DECLARATION_CANONICALIZE_FILE_NAME 1 /* Define if errno must be declared even when is included. */ /* #undef NEED_DECLARATION_ERRNO */ /* Define to 1 if your C compiler doesn't accept -c and -o together. */ /* #undef NO_MINUS_C_MINUS_O */ /* Define if you know the direction of stack growth for your system; otherwise it will be automatically deduced at run-time. STACK_DIRECTION > 0 => grows toward higher addresses STACK_DIRECTION < 0 => grows toward lower addresses STACK_DIRECTION = 0 => direction of growth unknown */ #define STACK_DIRECTION -1 /* Define to 1 if you have the ANSI C header files. */ #define STDC_HEADERS 1 /* Define to 1 if you can safely include both and . */ #define TIME_WITH_SYS_TIME 1 /* Define to an unsigned 64-bit type available in the compiler. */ #define UNSIGNED_64BIT_TYPE uint64_t /* whether byteorder is bigendian */ /* #undef WORDS_BIGENDIAN */ /* Define to empty if `const' does not conform to ANSI C. */ /* #undef const */ /* Define to `__inline__' or `__inline' if that's what the C compiler calls it, or to nothing if 'inline' is not supported under any name. */ #ifndef __cplusplus /* #undef inline */ #endif /* Define to `int' if does not define. */ /* #undef pid_t */ /* Define to `unsigned long' if does not define. */ /* #undef uintptr_t */ /* Define as `fork' if `vfork' does not work. */ /* #undef vfork */ #define HAVE_LIBINTL_H 1 #endif /* GCC_CONFIG_H */ #ifndef PARAMS # if defined __GNUC__ || (defined __STDC__ && __STDC__) # define PARAMS(args) args # else # define PARAMS(args) () # endif /* GCC. */ #endif /* Not PARAMS. */ #ifndef INSIDE_RECURSION # if defined STDC_HEADERS && !defined emacs # include # else /* We need this for `regex.h', and perhaps for the Emacs include files. */ # include # endif # define WIDE_CHAR_SUPPORT (HAVE_WCTYPE_H && HAVE_WCHAR_H && HAVE_BTOWC) /* For platform which support the ISO C amendement 1 functionality we support user defined character classes. */ # if defined _LIBC || WIDE_CHAR_SUPPORT /* Solaris 2.5 has a bug: must be included before . */ # include # include # endif /* This is for other GNU distributions with internationalized messages. */ # if (HAVE_LIBINTL_H && ENABLE_NLS) || defined _LIBC # include # ifdef _LIBC # undef gettext # define gettext(msgid) __dcgettext ("libc", msgid, LC_MESSAGES) # endif # else # define gettext(msgid) (msgid) # endif # ifndef gettext_noop /* This define is so xgettext can find the internationalizable strings. */ # define gettext_noop(String) String # endif /* The `emacs' switch turns on certain matching commands that make sense only in Emacs. */ # ifdef emacs # else /* not emacs */ /* If we are not linking with Emacs proper, we can't use the relocating allocator even if config.h says that we can. */ # undef REL_ALLOC # if defined STDC_HEADERS || defined _LIBC # include # else char *malloc (); char *realloc (); # endif /* When used in Emacs's lib-src, we need to get bzero and bcopy somehow. If nothing else has been done, use the method below. */ # ifdef INHIBIT_STRING_HEADER # if !(defined HAVE_BZERO && defined HAVE_BCOPY) # if !defined bzero && !defined bcopy # undef INHIBIT_STRING_HEADER # endif # endif # endif /* This is the normal way of making sure we have a bcopy and a bzero. This is used in most programs--a few other programs avoid this by defining INHIBIT_STRING_HEADER. */ # ifndef INHIBIT_STRING_HEADER # if defined HAVE_STRING_H || defined STDC_HEADERS || defined _LIBC # include # ifndef bzero # ifndef _LIBC # define bzero(s, n) (memset (s, '\0', n), (s)) # else # define bzero(s, n) __bzero (s, n) # endif # endif # else # include # ifndef memcmp # define memcmp(s1, s2, n) bcmp (s1, s2, n) # endif # ifndef memcpy # define memcpy(d, s, n) (bcopy (s, d, n), (d)) # endif # endif # endif /* Define the syntax stuff for \<, \>, etc. */ /* This must be nonzero for the wordchar and notwordchar pattern commands in re_match_2. */ # ifndef Sword # define Sword 1 # endif # ifdef SWITCH_ENUM_BUG # define SWITCH_ENUM_CAST(x) ((int)(x)) # else # define SWITCH_ENUM_CAST(x) (x) # endif # endif /* not emacs */ # if defined _LIBC || HAVE_LIMITS_H # include # endif # ifndef MB_LEN_MAX # define MB_LEN_MAX 1 # endif /* Get the interface, including the syntax bits. */ /* This file redefines all regex external names before including a renamed copy of glibc's regex.h. */ #ifndef _XREGEX_H #define _XREGEX_H 1 # define regfree xregfree # define regexec xregexec # define regcomp xregcomp # define regerror xregerror # define re_set_registers xre_set_registers # define re_match_2 xre_match_2 # define re_match xre_match # define re_search xre_search # define re_compile_pattern xre_compile_pattern # define re_set_syntax xre_set_syntax # define re_search_2 xre_search_2 # define re_compile_fastmap xre_compile_fastmap # define re_syntax_options xre_syntax_options # define re_max_failures xre_max_failures # define _REGEX_RE_COMP # define re_comp xre_comp # define re_exec xre_exec /* Definitions for data structures and routines for the regular expression library, version 0.12. Copyright (C) 1985,1989-1993,1995-1998, 2000 Free Software Foundation, Inc. This file is part of the GNU C Library. Its master source is NOT part of the C library, however. The master source lives in /gd/gnu/lib. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ #ifndef _REGEX_H #define _REGEX_H 1 /* Allow the use in C++ code. */ #ifdef __cplusplus extern "C" { #endif /* POSIX says that must be included (by the caller) before . */ #if !defined _POSIX_C_SOURCE && !defined _POSIX_SOURCE && defined VMS /* VMS doesn't have `size_t' in , even though POSIX says it should be there. */ # include #endif /* The following two types have to be signed and unsigned integer type wide enough to hold a value of a pointer. For most ANSI compilers ptrdiff_t and size_t should be likely OK. Still size of these two types is 2 for Microsoft C. Ugh... */ typedef long int s_reg_t; typedef unsigned long int active_reg_t; /* The following bits are used to determine the regexp syntax we recognize. The set/not-set meanings are chosen so that Emacs syntax remains the value 0. The bits are given in alphabetical order, and the definitions shifted by one from the previous bit; thus, when we add or remove a bit, only one other definition need change. */ typedef unsigned long int reg_syntax_t; /* If this bit is not set, then \ inside a bracket expression is literal. If set, then such a \ quotes the following character. */ #define RE_BACKSLASH_ESCAPE_IN_LISTS ((unsigned long int) 1) /* If this bit is not set, then + and ? are operators, and \+ and \? are literals. If set, then \+ and \? are operators and + and ? are literals. */ #define RE_BK_PLUS_QM (RE_BACKSLASH_ESCAPE_IN_LISTS << 1) /* If this bit is set, then character classes are supported. They are: [:alpha:], [:upper:], [:lower:], [:digit:], [:alnum:], [:xdigit:], [:space:], [:print:], [:punct:], [:graph:], and [:cntrl:]. If not set, then character classes are not supported. */ #define RE_CHAR_CLASSES (RE_BK_PLUS_QM << 1) /* If this bit is set, then ^ and $ are always anchors (outside bracket expressions, of course). If this bit is not set, then it depends: ^ is an anchor if it is at the beginning of a regular expression or after an open-group or an alternation operator; $ is an anchor if it is at the end of a regular expression, or before a close-group or an alternation operator. This bit could be (re)combined with RE_CONTEXT_INDEP_OPS, because POSIX draft 11.2 says that * etc. in leading positions is undefined. We already implemented a previous draft which made those constructs invalid, though, so we haven't changed the code back. */ #define RE_CONTEXT_INDEP_ANCHORS (RE_CHAR_CLASSES << 1) /* If this bit is set, then special characters are always special regardless of where they are in the pattern. If this bit is not set, then special characters are special only in some contexts; otherwise they are ordinary. Specifically, * + ? and intervals are only special when not after the beginning, open-group, or alternation operator. */ #define RE_CONTEXT_INDEP_OPS (RE_CONTEXT_INDEP_ANCHORS << 1) /* If this bit is set, then *, +, ?, and { cannot be first in an re or immediately after an alternation or begin-group operator. */ #define RE_CONTEXT_INVALID_OPS (RE_CONTEXT_INDEP_OPS << 1) /* If this bit is set, then . matches newline. If not set, then it doesn't. */ #define RE_DOT_NEWLINE (RE_CONTEXT_INVALID_OPS << 1) /* If this bit is set, then . doesn't match NUL. If not set, then it does. */ #define RE_DOT_NOT_NULL (RE_DOT_NEWLINE << 1) /* If this bit is set, nonmatching lists [^...] do not match newline. If not set, they do. */ #define RE_HAT_LISTS_NOT_NEWLINE (RE_DOT_NOT_NULL << 1) /* If this bit is set, either \{...\} or {...} defines an interval, depending on RE_NO_BK_BRACES. If not set, \{, \}, {, and } are literals. */ #define RE_INTERVALS (RE_HAT_LISTS_NOT_NEWLINE << 1) /* If this bit is set, +, ? and | aren't recognized as operators. If not set, they are. */ #define RE_LIMITED_OPS (RE_INTERVALS << 1) /* If this bit is set, newline is an alternation operator. If not set, newline is literal. */ #define RE_NEWLINE_ALT (RE_LIMITED_OPS << 1) /* If this bit is set, then `{...}' defines an interval, and \{ and \} are literals. If not set, then `\{...\}' defines an interval. */ #define RE_NO_BK_BRACES (RE_NEWLINE_ALT << 1) /* If this bit is set, (...) defines a group, and \( and \) are literals. If not set, \(...\) defines a group, and ( and ) are literals. */ #define RE_NO_BK_PARENS (RE_NO_BK_BRACES << 1) /* If this bit is set, then \ matches . If not set, then \ is a back-reference. */ #define RE_NO_BK_REFS (RE_NO_BK_PARENS << 1) /* If this bit is set, then | is an alternation operator, and \| is literal. If not set, then \| is an alternation operator, and | is literal. */ #define RE_NO_BK_VBAR (RE_NO_BK_REFS << 1) /* If this bit is set, then an ending range point collating higher than the starting range point, as in [z-a], is invalid. If not set, then when ending range point collates higher than the starting range point, the range is ignored. */ #define RE_NO_EMPTY_RANGES (RE_NO_BK_VBAR << 1) /* If this bit is set, then an unmatched ) is ordinary. If not set, then an unmatched ) is invalid. */ #define RE_UNMATCHED_RIGHT_PAREN_ORD (RE_NO_EMPTY_RANGES << 1) /* If this bit is set, succeed as soon as we match the whole pattern, without further backtracking. */ #define RE_NO_POSIX_BACKTRACKING (RE_UNMATCHED_RIGHT_PAREN_ORD << 1) /* If this bit is set, do not process the GNU regex operators. If not set, then the GNU regex operators are recognized. */ #define RE_NO_GNU_OPS (RE_NO_POSIX_BACKTRACKING << 1) /* If this bit is set, turn on internal regex debugging. If not set, and debugging was on, turn it off. This only works if regex.c is compiled -DDEBUG. We define this bit always, so that all that's needed to turn on debugging is to recompile regex.c; the calling code can always have this bit set, and it won't affect anything in the normal case. */ #define RE_DEBUG (RE_NO_GNU_OPS << 1) /* If this bit is set, a syntactically invalid interval is treated as a string of ordinary characters. For example, the ERE 'a{1' is treated as 'a\{1'. */ #define RE_INVALID_INTERVAL_ORD (RE_DEBUG << 1) /* This global variable defines the particular regexp syntax to use (for some interfaces). When a regexp is compiled, the syntax used is stored in the pattern buffer, so changing this does not affect already-compiled regexps. */ extern reg_syntax_t re_syntax_options; /* Define combinations of the above bits for the standard possibilities. (The [[[ comments delimit what gets put into the Texinfo file, so don't delete them!) */ /* [[[begin syntaxes]]] */ #define RE_SYNTAX_EMACS 0 #define RE_SYNTAX_AWK \ (RE_BACKSLASH_ESCAPE_IN_LISTS | RE_DOT_NOT_NULL \ | RE_NO_BK_PARENS | RE_NO_BK_REFS \ | RE_NO_BK_VBAR | RE_NO_EMPTY_RANGES \ | RE_DOT_NEWLINE | RE_CONTEXT_INDEP_ANCHORS \ | RE_UNMATCHED_RIGHT_PAREN_ORD | RE_NO_GNU_OPS) #define RE_SYNTAX_GNU_AWK \ ((RE_SYNTAX_POSIX_EXTENDED | RE_BACKSLASH_ESCAPE_IN_LISTS | RE_DEBUG) \ & ~(RE_DOT_NOT_NULL | RE_INTERVALS | RE_CONTEXT_INDEP_OPS)) #define RE_SYNTAX_POSIX_AWK \ (RE_SYNTAX_POSIX_EXTENDED | RE_BACKSLASH_ESCAPE_IN_LISTS \ | RE_INTERVALS | RE_NO_GNU_OPS) #define RE_SYNTAX_GREP \ (RE_BK_PLUS_QM | RE_CHAR_CLASSES \ | RE_HAT_LISTS_NOT_NEWLINE | RE_INTERVALS \ | RE_NEWLINE_ALT) #define RE_SYNTAX_EGREP \ (RE_CHAR_CLASSES | RE_CONTEXT_INDEP_ANCHORS \ | RE_CONTEXT_INDEP_OPS | RE_HAT_LISTS_NOT_NEWLINE \ | RE_NEWLINE_ALT | RE_NO_BK_PARENS \ | RE_NO_BK_VBAR) #define RE_SYNTAX_POSIX_EGREP \ (RE_SYNTAX_EGREP | RE_INTERVALS | RE_NO_BK_BRACES \ | RE_INVALID_INTERVAL_ORD) /* P1003.2/D11.2, section 4.20.7.1, lines 5078ff. */ #define RE_SYNTAX_ED RE_SYNTAX_POSIX_BASIC #define RE_SYNTAX_SED RE_SYNTAX_POSIX_BASIC /* Syntax bits common to both basic and extended POSIX regex syntax. */ #define _RE_SYNTAX_POSIX_COMMON \ (RE_CHAR_CLASSES | RE_DOT_NEWLINE | RE_DOT_NOT_NULL \ | RE_INTERVALS | RE_NO_EMPTY_RANGES) #define RE_SYNTAX_POSIX_BASIC \ (_RE_SYNTAX_POSIX_COMMON | RE_BK_PLUS_QM) /* Differs from ..._POSIX_BASIC only in that RE_BK_PLUS_QM becomes RE_LIMITED_OPS, i.e., \? \+ \| are not recognized. Actually, this isn't minimal, since other operators, such as \`, aren't disabled. */ #define RE_SYNTAX_POSIX_MINIMAL_BASIC \ (_RE_SYNTAX_POSIX_COMMON | RE_LIMITED_OPS) #define RE_SYNTAX_POSIX_EXTENDED \ (_RE_SYNTAX_POSIX_COMMON | RE_CONTEXT_INDEP_ANCHORS \ | RE_CONTEXT_INDEP_OPS | RE_NO_BK_BRACES \ | RE_NO_BK_PARENS | RE_NO_BK_VBAR \ | RE_CONTEXT_INVALID_OPS | RE_UNMATCHED_RIGHT_PAREN_ORD) /* Differs from ..._POSIX_EXTENDED in that RE_CONTEXT_INDEP_OPS is removed and RE_NO_BK_REFS is added. */ #define RE_SYNTAX_POSIX_MINIMAL_EXTENDED \ (_RE_SYNTAX_POSIX_COMMON | RE_CONTEXT_INDEP_ANCHORS \ | RE_CONTEXT_INVALID_OPS | RE_NO_BK_BRACES \ | RE_NO_BK_PARENS | RE_NO_BK_REFS \ | RE_NO_BK_VBAR | RE_UNMATCHED_RIGHT_PAREN_ORD) /* [[[end syntaxes]]] */ /* Maximum number of duplicates an interval can allow. Some systems (erroneously) define this in other header files, but we want our value, so remove any previous define. */ #ifdef RE_DUP_MAX # undef RE_DUP_MAX #endif /* If sizeof(int) == 2, then ((1 << 15) - 1) overflows. */ #define RE_DUP_MAX (0x7fff) /* POSIX `cflags' bits (i.e., information for `regcomp'). */ /* If this bit is set, then use extended regular expression syntax. If not set, then use basic regular expression syntax. */ #define REG_EXTENDED 1 /* If this bit is set, then ignore case when matching. If not set, then case is significant. */ #define REG_ICASE (REG_EXTENDED << 1) /* If this bit is set, then anchors do not match at newline characters in the string. If not set, then anchors do match at newlines. */ #define REG_NEWLINE (REG_ICASE << 1) /* If this bit is set, then report only success or fail in regexec. If not set, then returns differ between not matching and errors. */ #define REG_NOSUB (REG_NEWLINE << 1) /* POSIX `eflags' bits (i.e., information for regexec). */ /* If this bit is set, then the beginning-of-line operator doesn't match the beginning of the string (presumably because it's not the beginning of a line). If not set, then the beginning-of-line operator does match the beginning of the string. */ #define REG_NOTBOL 1 /* Like REG_NOTBOL, except for the end-of-line. */ #define REG_NOTEOL (1 << 1) /* If any error codes are removed, changed, or added, update the `re_error_msg' table in regex.c. */ typedef enum { #ifdef _XOPEN_SOURCE REG_ENOSYS = -1, /* This will never happen for this implementation. */ #endif REG_NOERROR = 0, /* Success. */ REG_NOMATCH, /* Didn't find a match (for regexec). */ /* POSIX regcomp return error codes. (In the order listed in the standard.) */ REG_BADPAT, /* Invalid pattern. */ REG_ECOLLATE, /* Not implemented. */ REG_ECTYPE, /* Invalid character class name. */ REG_EESCAPE, /* Trailing backslash. */ REG_ESUBREG, /* Invalid back reference. */ REG_EBRACK, /* Unmatched left bracket. */ REG_EPAREN, /* Parenthesis imbalance. */ REG_EBRACE, /* Unmatched \{. */ REG_BADBR, /* Invalid contents of \{\}. */ REG_ERANGE, /* Invalid range end. */ REG_ESPACE, /* Ran out of memory. */ REG_BADRPT, /* No preceding re for repetition op. */ /* Error codes we've added. */ REG_EEND, /* Premature end. */ REG_ESIZE, /* Compiled pattern bigger than 2^16 bytes. */ REG_ERPAREN /* Unmatched ) or \); not returned from regcomp. */ } reg_errcode_t; /* This data structure represents a compiled pattern. Before calling the pattern compiler, the fields `buffer', `allocated', `fastmap', `translate', and `no_sub' can be set. After the pattern has been compiled, the `re_nsub' field is available. All other fields are private to the regex routines. */ #ifndef RE_TRANSLATE_TYPE # define RE_TRANSLATE_TYPE char * #endif struct re_pattern_buffer { /* [[[begin pattern_buffer]]] */ /* Space that holds the compiled pattern. It is declared as `unsigned char *' because its elements are sometimes used as array indexes. */ unsigned char *buffer; /* Number of bytes to which `buffer' points. */ unsigned long int allocated; /* Number of bytes actually used in `buffer'. */ unsigned long int used; /* Syntax setting with which the pattern was compiled. */ reg_syntax_t syntax; /* Pointer to a fastmap, if any, otherwise zero. re_search uses the fastmap, if there is one, to skip over impossible starting points for matches. */ char *fastmap; /* Either a translate table to apply to all characters before comparing them, or zero for no translation. The translation is applied to a pattern when it is compiled and to a string when it is matched. */ RE_TRANSLATE_TYPE translate; /* Number of subexpressions found by the compiler. */ size_t re_nsub; /* Zero if this pattern cannot match the empty string, one else. Well, in truth it's used only in `re_search_2', to see whether or not we should use the fastmap, so we don't set this absolutely perfectly; see `re_compile_fastmap' (the `duplicate' case). */ unsigned can_be_null : 1; /* If REGS_UNALLOCATED, allocate space in the `regs' structure for `max (RE_NREGS, re_nsub + 1)' groups. If REGS_REALLOCATE, reallocate space if necessary. If REGS_FIXED, use what's there. */ #define REGS_UNALLOCATED 0 #define REGS_REALLOCATE 1 #define REGS_FIXED 2 unsigned regs_allocated : 2; /* Set to zero when `regex_compile' compiles a pattern; set to one by `re_compile_fastmap' if it updates the fastmap. */ unsigned fastmap_accurate : 1; /* If set, `re_match_2' does not return information about subexpressions. */ unsigned no_sub : 1; /* If set, a beginning-of-line anchor doesn't match at the beginning of the string. */ unsigned not_bol : 1; /* Similarly for an end-of-line anchor. */ unsigned not_eol : 1; /* If true, an anchor at a newline matches. */ unsigned newline_anchor : 1; /* [[[end pattern_buffer]]] */ }; typedef struct re_pattern_buffer regex_t; /* Type for byte offsets within the string. POSIX mandates this. */ typedef int regoff_t; /* This is the structure we store register match data in. See regex.texinfo for a full description of what registers match. */ struct re_registers { unsigned num_regs; regoff_t *start; regoff_t *end; }; /* If `regs_allocated' is REGS_UNALLOCATED in the pattern buffer, `re_match_2' returns information about at least this many registers the first time a `regs' structure is passed. */ #ifndef RE_NREGS # define RE_NREGS 30 #endif /* POSIX specification for registers. Aside from the different names than `re_registers', POSIX uses an array of structures, instead of a structure of arrays. */ typedef struct { regoff_t rm_so; /* Byte offset from string's start to substring's start. */ regoff_t rm_eo; /* Byte offset from string's start to substring's end. */ } regmatch_t; /* Declarations for routines. */ /* To avoid duplicating every routine declaration -- once with a prototype (if we are ANSI), and once without (if we aren't) -- we use the following macro to declare argument types. This unfortunately clutters up the declarations a bit, but I think it's worth it. */ #if __STDC__ # define _RE_ARGS(args) args #else /* not __STDC__ */ # define _RE_ARGS(args) () #endif /* not __STDC__ */ /* Sets the current default syntax to SYNTAX, and return the old syntax. You can also simply assign to the `re_syntax_options' variable. */ extern reg_syntax_t re_set_syntax _RE_ARGS ((reg_syntax_t syntax)); /* Compile the regular expression PATTERN, with length LENGTH and syntax given by the global `re_syntax_options', into the buffer BUFFER. Return NULL if successful, and an error string if not. */ extern const char *re_compile_pattern _RE_ARGS ((const char *pattern, size_t length, struct re_pattern_buffer *buffer)); /* Compile a fastmap for the compiled pattern in BUFFER; used to accelerate searches. Return 0 if successful and -2 if was an internal error. */ extern int re_compile_fastmap _RE_ARGS ((struct re_pattern_buffer *buffer)); /* Search in the string STRING (with length LENGTH) for the pattern compiled into BUFFER. Start searching at position START, for RANGE characters. Return the starting position of the match, -1 for no match, or -2 for an internal error. Also return register information in REGS (if REGS and BUFFER->no_sub are nonzero). */ extern int re_search _RE_ARGS ((struct re_pattern_buffer *buffer, const char *string, int length, int start, int range, struct re_registers *regs)); /* Like `re_search', but search in the concatenation of STRING1 and STRING2. Also, stop searching at index START + STOP. */ extern int re_search_2 _RE_ARGS ((struct re_pattern_buffer *buffer, const char *string1, int length1, const char *string2, int length2, int start, int range, struct re_registers *regs, int stop)); /* Like `re_search', but return how many characters in STRING the regexp in BUFFER matched, starting at position START. */ extern int re_match _RE_ARGS ((struct re_pattern_buffer *buffer, const char *string, int length, int start, struct re_registers *regs)); /* Relates to `re_match' as `re_search_2' relates to `re_search'. */ extern int re_match_2 _RE_ARGS ((struct re_pattern_buffer *buffer, const char *string1, int length1, const char *string2, int length2, int start, struct re_registers *regs, int stop)); /* Set REGS to hold NUM_REGS registers, storing them in STARTS and ENDS. Subsequent matches using BUFFER and REGS will use this memory for recording register information. STARTS and ENDS must be allocated with malloc, and must each be at least `NUM_REGS * sizeof (regoff_t)' bytes long. If NUM_REGS == 0, then subsequent matches should allocate their own register data. Unless this function is called, the first search or match using PATTERN_BUFFER will allocate its own register data, without freeing the old data. */ extern void re_set_registers _RE_ARGS ((struct re_pattern_buffer *buffer, struct re_registers *regs, unsigned num_regs, regoff_t *starts, regoff_t *ends)); #if defined _REGEX_RE_COMP || defined _LIBC # ifndef _CRAY /* 4.2 bsd compatibility. */ extern char *re_comp _RE_ARGS ((const char *)); extern int re_exec _RE_ARGS ((const char *)); # endif #endif /* GCC 2.95 and later have "__restrict"; C99 compilers have "restrict", and "configure" may have defined "restrict". */ #ifndef __restrict # if ! (2 < __GNUC__ || (2 == __GNUC__ && 95 <= __GNUC_MINOR__)) # if defined restrict || 199901L <= __STDC_VERSION__ # define __restrict restrict # else # define __restrict # endif # endif #endif /* GCC 3.1 and later support declaring arrays as non-overlapping using the syntax array_name[restrict] */ #ifndef __restrict_arr # if ! (3 < __GNUC__ || (3 == __GNUC__ && 1 <= __GNUC_MINOR__)) || defined (__GNUG__) # define __restrict_arr # else # define __restrict_arr __restrict # endif #endif /* POSIX compatibility. */ extern int regcomp _RE_ARGS ((regex_t *__restrict __preg, const char *__restrict __pattern, int __cflags)); extern int regexec _RE_ARGS ((const regex_t *__restrict __preg, const char *__restrict __string, size_t __nmatch, regmatch_t __pmatch[__restrict_arr], int __eflags)); extern size_t regerror _RE_ARGS ((int __errcode, const regex_t *__preg, char *__errbuf, size_t __errbuf_size)); extern void regfree _RE_ARGS ((regex_t *__preg)); #ifdef __cplusplus } #endif /* C++ */ #endif /* regex.h */ /* Local variables: make-backup-files: t version-control: t trim-versions-without-asking: nil End: */ #endif /* xregex.h */ /* isalpha etc. are used for the character classes. */ # include /* Jim Meyering writes: "... Some ctype macros are valid only for character codes that isascii says are ASCII (SGI's IRIX-4.0.5 is one such system --when using /bin/cc or gcc but without giving an ansi option). So, all ctype uses should be through macros like ISPRINT... If STDC_HEADERS is defined, then autoconf has verified that the ctype macros don't need to be guarded with references to isascii. ... Defining isascii to 1 should let any compiler worth its salt eliminate the && through constant folding." Solaris defines some of these symbols so we must undefine them first. */ # undef ISASCII # if defined STDC_HEADERS || (!defined isascii && !defined HAVE_ISASCII) # define ISASCII(c) 1 # else # define ISASCII(c) isascii(c) # endif # ifdef isblank # define ISBLANK(c) (ISASCII (c) && isblank (c)) # else # define ISBLANK(c) ((c) == ' ' || (c) == '\t') # endif # ifdef isgraph # define ISGRAPH(c) (ISASCII (c) && isgraph (c)) # else # define ISGRAPH(c) (ISASCII (c) && isprint (c) && !isspace (c)) # endif # undef ISPRINT # define ISPRINT(c) (ISASCII (c) && isprint (c)) # define ISDIGIT(c) (ISASCII (c) && isdigit (c)) # define ISALNUM(c) (ISASCII (c) && isalnum (c)) # define ISALPHA(c) (ISASCII (c) && isalpha (c)) # define ISCNTRL(c) (ISASCII (c) && iscntrl (c)) # define ISLOWER(c) (ISASCII (c) && islower (c)) # define ISPUNCT(c) (ISASCII (c) && ispunct (c)) # define ISSPACE(c) (ISASCII (c) && isspace (c)) # define ISUPPER(c) (ISASCII (c) && isupper (c)) # define ISXDIGIT(c) (ISASCII (c) && isxdigit (c)) # ifdef _tolower # define TOLOWER(c) _tolower(c) # else # define TOLOWER(c) tolower(c) # endif # ifndef NULL # define NULL (void *)0 # endif /* We remove any previous definition of `SIGN_EXTEND_CHAR', since ours (we hope) works properly with all combinations of machines, compilers, `char' and `unsigned char' argument types. (Per Bothner suggested the basic approach.) */ # undef SIGN_EXTEND_CHAR # if __STDC__ # define SIGN_EXTEND_CHAR(c) ((signed char) (c)) # else /* not __STDC__ */ /* As in Harbison and Steele. */ # define SIGN_EXTEND_CHAR(c) ((((unsigned char) (c)) ^ 128) - 128) # endif # ifndef emacs /* How many characters in the character set. */ # define CHAR_SET_SIZE 256 # ifdef SYNTAX_TABLE extern char *re_syntax_table; # else /* not SYNTAX_TABLE */ static char re_syntax_table[CHAR_SET_SIZE]; static void init_syntax_once PARAMS ((void)); static void init_syntax_once () { register int c; static int done = 0; if (done) return; bzero (re_syntax_table, sizeof re_syntax_table); for (c = 0; c < CHAR_SET_SIZE; ++c) if (ISALNUM (c)) re_syntax_table[c] = Sword; re_syntax_table['_'] = Sword; done = 1; } # endif /* not SYNTAX_TABLE */ # define SYNTAX(c) re_syntax_table[(unsigned char) (c)] # endif /* emacs */ /* Integer type for pointers. */ # if !defined _LIBC && !defined HAVE_UINTPTR_T typedef unsigned long int uintptr_t; # endif /* Should we use malloc or alloca? If REGEX_MALLOC is not defined, we use `alloca' instead of `malloc'. This is because using malloc in re_search* or re_match* could cause memory leaks when C-g is used in Emacs; also, malloc is slower and causes storage fragmentation. On the other hand, malloc is more portable, and easier to debug. Because we sometimes use alloca, some routines have to be macros, not functions -- `alloca'-allocated space disappears at the end of the function it is called in. */ # ifdef REGEX_MALLOC # define REGEX_ALLOCATE malloc # define REGEX_REALLOCATE(source, osize, nsize) realloc (source, nsize) # define REGEX_FREE free # else /* not REGEX_MALLOC */ /* Emacs already defines alloca, sometimes. */ # ifndef alloca /* Make alloca work the best possible way. */ # ifdef __GNUC__ # define alloca __builtin_alloca # else /* not __GNUC__ */ # if HAVE_ALLOCA_H # include # endif /* HAVE_ALLOCA_H */ # endif /* not __GNUC__ */ # endif /* not alloca */ # define REGEX_ALLOCATE alloca /* Assumes a `char *destination' variable. */ # define REGEX_REALLOCATE(source, osize, nsize) \ (destination = (char *) alloca (nsize), \ memcpy (destination, source, osize)) /* No need to do anything to free, after alloca. */ # define REGEX_FREE(arg) ((void)0) /* Do nothing! But inhibit gcc warning. */ # endif /* not REGEX_MALLOC */ /* Define how to allocate the failure stack. */ # if defined REL_ALLOC && defined REGEX_MALLOC # define REGEX_ALLOCATE_STACK(size) \ r_alloc (&failure_stack_ptr, (size)) # define REGEX_REALLOCATE_STACK(source, osize, nsize) \ r_re_alloc (&failure_stack_ptr, (nsize)) # define REGEX_FREE_STACK(ptr) \ r_alloc_free (&failure_stack_ptr) # else /* not using relocating allocator */ # ifdef REGEX_MALLOC # define REGEX_ALLOCATE_STACK malloc # define REGEX_REALLOCATE_STACK(source, osize, nsize) realloc (source, nsize) # define REGEX_FREE_STACK free # else /* not REGEX_MALLOC */ # define REGEX_ALLOCATE_STACK alloca # define REGEX_REALLOCATE_STACK(source, osize, nsize) \ REGEX_REALLOCATE (source, osize, nsize) /* No need to explicitly free anything. */ # define REGEX_FREE_STACK(arg) # endif /* not REGEX_MALLOC */ # endif /* not using relocating allocator */ /* True if `size1' is non-NULL and PTR is pointing anywhere inside `string1' or just past its end. This works if PTR is NULL, which is a good thing. */ # define FIRST_STRING_P(ptr) \ (size1 && string1 <= (ptr) && (ptr) <= string1 + size1) /* (Re)Allocate N items of type T using malloc, or fail. */ # define TALLOC(n, t) ((t *) malloc ((n) * sizeof (t))) # define RETALLOC(addr, n, t) ((addr) = (t *) realloc (addr, (n) * sizeof (t))) # define RETALLOC_IF(addr, n, t) \ if (addr) RETALLOC((addr), (n), t); else (addr) = TALLOC ((n), t) # define REGEX_TALLOC(n, t) ((t *) REGEX_ALLOCATE ((n) * sizeof (t))) # define BYTEWIDTH 8 /* In bits. */ # define STREQ(s1, s2) ((strcmp (s1, s2) == 0)) # undef MAX # undef MIN # define MAX(a, b) ((a) > (b) ? (a) : (b)) # define MIN(a, b) ((a) < (b) ? (a) : (b)) typedef char boolean; # define false 0 # define true 1 static reg_errcode_t byte_regex_compile _RE_ARGS ((const char *pattern, size_t size, reg_syntax_t syntax, struct re_pattern_buffer *bufp)); static int byte_re_match_2_internal PARAMS ((struct re_pattern_buffer *bufp, const char *string1, int size1, const char *string2, int size2, int pos, struct re_registers *regs, int stop)); static int byte_re_search_2 PARAMS ((struct re_pattern_buffer *bufp, const char *string1, int size1, const char *string2, int size2, int startpos, int range, struct re_registers *regs, int stop)); static int byte_re_compile_fastmap PARAMS ((struct re_pattern_buffer *bufp)); #ifdef MBS_SUPPORT static reg_errcode_t wcs_regex_compile _RE_ARGS ((const char *pattern, size_t size, reg_syntax_t syntax, struct re_pattern_buffer *bufp)); static int wcs_re_match_2_internal PARAMS ((struct re_pattern_buffer *bufp, const char *cstring1, int csize1, const char *cstring2, int csize2, int pos, struct re_registers *regs, int stop, wchar_t *string1, int size1, wchar_t *string2, int size2, int *mbs_offset1, int *mbs_offset2)); static int wcs_re_search_2 PARAMS ((struct re_pattern_buffer *bufp, const char *string1, int size1, const char *string2, int size2, int startpos, int range, struct re_registers *regs, int stop)); static int wcs_re_compile_fastmap PARAMS ((struct re_pattern_buffer *bufp)); #endif /* These are the command codes that appear in compiled regular expressions. Some opcodes are followed by argument bytes. A command code can specify any interpretation whatsoever for its arguments. Zero bytes may appear in the compiled regular expression. */ typedef enum { no_op = 0, /* Succeed right away--no more backtracking. */ succeed, /* Followed by one byte giving n, then by n literal bytes. */ exactn, # ifdef MBS_SUPPORT /* Same as exactn, but contains binary data. */ exactn_bin, # endif /* Matches any (more or less) character. */ anychar, /* Matches any one char belonging to specified set. First following byte is number of bitmap bytes. Then come bytes for a bitmap saying which chars are in. Bits in each byte are ordered low-bit-first. A character is in the set if its bit is 1. A character too large to have a bit in the map is automatically not in the set. */ /* ifdef MBS_SUPPORT, following element is length of character classes, length of collating symbols, length of equivalence classes, length of character ranges, and length of characters. Next, character class element, collating symbols elements, equivalence class elements, range elements, and character elements follow. See regex_compile function. */ charset, /* Same parameters as charset, but match any character that is not one of those specified. */ charset_not, /* Start remembering the text that is matched, for storing in a register. Followed by one byte with the register number, in the range 0 to one less than the pattern buffer's re_nsub field. Then followed by one byte with the number of groups inner to this one. (This last has to be part of the start_memory only because we need it in the on_failure_jump of re_match_2.) */ start_memory, /* Stop remembering the text that is matched and store it in a memory register. Followed by one byte with the register number, in the range 0 to one less than `re_nsub' in the pattern buffer, and one byte with the number of inner groups, just like `start_memory'. (We need the number of inner groups here because we don't have any easy way of finding the corresponding start_memory when we're at a stop_memory.) */ stop_memory, /* Match a duplicate of something remembered. Followed by one byte containing the register number. */ duplicate, /* Fail unless at beginning of line. */ begline, /* Fail unless at end of line. */ endline, /* Succeeds if at beginning of buffer (if emacs) or at beginning of string to be matched (if not). */ begbuf, /* Analogously, for end of buffer/string. */ endbuf, /* Followed by two byte relative address to which to jump. */ jump, /* Same as jump, but marks the end of an alternative. */ jump_past_alt, /* Followed by two-byte relative address of place to resume at in case of failure. */ /* ifdef MBS_SUPPORT, the size of address is 1. */ on_failure_jump, /* Like on_failure_jump, but pushes a placeholder instead of the current string position when executed. */ on_failure_keep_string_jump, /* Throw away latest failure point and then jump to following two-byte relative address. */ /* ifdef MBS_SUPPORT, the size of address is 1. */ pop_failure_jump, /* Change to pop_failure_jump if know won't have to backtrack to match; otherwise change to jump. This is used to jump back to the beginning of a repeat. If what follows this jump clearly won't match what the repeat does, such that we can be sure that there is no use backtracking out of repetitions already matched, then we change it to a pop_failure_jump. Followed by two-byte address. */ /* ifdef MBS_SUPPORT, the size of address is 1. */ maybe_pop_jump, /* Jump to following two-byte address, and push a dummy failure point. This failure point will be thrown away if an attempt is made to use it for a failure. A `+' construct makes this before the first repeat. Also used as an intermediary kind of jump when compiling an alternative. */ /* ifdef MBS_SUPPORT, the size of address is 1. */ dummy_failure_jump, /* Push a dummy failure point and continue. Used at the end of alternatives. */ push_dummy_failure, /* Followed by two-byte relative address and two-byte number n. After matching N times, jump to the address upon failure. */ /* ifdef MBS_SUPPORT, the size of address is 1. */ succeed_n, /* Followed by two-byte relative address, and two-byte number n. Jump to the address N times, then fail. */ /* ifdef MBS_SUPPORT, the size of address is 1. */ jump_n, /* Set the following two-byte relative address to the subsequent two-byte number. The address *includes* the two bytes of number. */ /* ifdef MBS_SUPPORT, the size of address is 1. */ set_number_at, wordchar, /* Matches any word-constituent character. */ notwordchar, /* Matches any char that is not a word-constituent. */ wordbeg, /* Succeeds if at word beginning. */ wordend, /* Succeeds if at word end. */ wordbound, /* Succeeds if at a word boundary. */ notwordbound /* Succeeds if not at a word boundary. */ # ifdef emacs ,before_dot, /* Succeeds if before point. */ at_dot, /* Succeeds if at point. */ after_dot, /* Succeeds if after point. */ /* Matches any character whose syntax is specified. Followed by a byte which contains a syntax code, e.g., Sword. */ syntaxspec, /* Matches any character whose syntax is not that specified. */ notsyntaxspec # endif /* emacs */ } re_opcode_t; #endif /* not INSIDE_RECURSION */ #ifdef BYTE # define CHAR_T char # define UCHAR_T unsigned char # define COMPILED_BUFFER_VAR bufp->buffer # define OFFSET_ADDRESS_SIZE 2 # if defined (__STDC__) || defined (ALMOST_STDC) || defined (HAVE_STRINGIZE) # define PREFIX(name) byte_##name # else # define PREFIX(name) byte_/**/name # endif # define ARG_PREFIX(name) name # define PUT_CHAR(c) putchar (c) #else # ifdef WCHAR # define CHAR_T wchar_t # define UCHAR_T wchar_t # define COMPILED_BUFFER_VAR wc_buffer # define OFFSET_ADDRESS_SIZE 1 /* the size which STORE_NUMBER macro use */ # define CHAR_CLASS_SIZE ((__alignof__(wctype_t)+sizeof(wctype_t))/sizeof(CHAR_T)+1) # if defined (__STDC__) || defined (ALMOST_STDC) || defined (HAVE_STRINGIZE) # define PREFIX(name) wcs_##name # define ARG_PREFIX(name) c##name # else # define PREFIX(name) wcs_/**/name # define ARG_PREFIX(name) c/**/name # endif /* Should we use wide stream?? */ # define PUT_CHAR(c) printf ("%C", c); # define TRUE 1 # define FALSE 0 # else # ifdef MBS_SUPPORT # define WCHAR # define INSIDE_RECURSION /* Extended regular expression matching and search library, version 0.12. (Implements POSIX draft P1003.2/D11.2, except for some of the internationalization features.) Copyright (C) 1993-1999, 2000, 2001, 2002 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ /* This file has been modified for usage in libiberty. It includes "xregex.h" instead of . The "xregex.h" header file renames all external routines with an "x" prefix so they do not collide with the native regex routines or with other components regex routines. */ /* AIX requires this to be the first thing in the file. */ #if defined _AIX && !defined __GNUC__ && !defined REGEX_MALLOC #pragma alloca #endif #undef _GNU_SOURCE #define _GNU_SOURCE #ifdef HAVE_CONFIG_H #endif #ifndef PARAMS # if defined __GNUC__ || (defined __STDC__ && __STDC__) # define PARAMS(args) args # else # define PARAMS(args) () # endif /* GCC. */ #endif /* Not PARAMS. */ #ifdef BYTE # define CHAR_T char # define UCHAR_T unsigned char # define COMPILED_BUFFER_VAR bufp->buffer # define OFFSET_ADDRESS_SIZE 2 # if defined (__STDC__) || defined (ALMOST_STDC) || defined (HAVE_STRINGIZE) # define PREFIX(name) byte_##name # else # define PREFIX(name) byte_/**/name # endif # define ARG_PREFIX(name) name # define PUT_CHAR(c) putchar (c) #else # ifdef WCHAR # define CHAR_T wchar_t # define UCHAR_T wchar_t # define COMPILED_BUFFER_VAR wc_buffer # define OFFSET_ADDRESS_SIZE 1 /* the size which STORE_NUMBER macro use */ # define CHAR_CLASS_SIZE ((__alignof__(wctype_t)+sizeof(wctype_t))/sizeof(CHAR_T)+1) # if defined (__STDC__) || defined (ALMOST_STDC) || defined (HAVE_STRINGIZE) # define PREFIX(name) wcs_##name # define ARG_PREFIX(name) c##name # else # define PREFIX(name) wcs_/**/name # define ARG_PREFIX(name) c/**/name # endif /* Should we use wide stream?? */ # define PUT_CHAR(c) printf ("%C", c); # define TRUE 1 # define FALSE 0 # else # ifdef MBS_SUPPORT # define WCHAR # define INSIDE_RECURSION /*# include "regex.c"*/ # undef INSIDE_RECURSION # endif # define BYTE # define INSIDE_RECURSION /*# include "regex.c"*/ # undef INSIDE_RECURSION # endif #endif #ifdef INSIDE_RECURSION /* Common operations on the compiled pattern. */ /* Store NUMBER in two contiguous bytes starting at DESTINATION. */ /* ifdef MBS_SUPPORT, we store NUMBER in 1 element. */ # ifdef WCHAR # define STORE_NUMBER(destination, number) \ do { \ *(destination) = (UCHAR_T)(number); \ } while (0) # else /* BYTE */ # define STORE_NUMBER(destination, number) \ do { \ (destination)[0] = (number) & 0377; \ (destination)[1] = (number) >> 8; \ } while (0) # endif /* WCHAR */ /* Same as STORE_NUMBER, except increment DESTINATION to the byte after where the number is stored. Therefore, DESTINATION must be an lvalue. */ /* ifdef MBS_SUPPORT, we store NUMBER in 1 element. */ # define STORE_NUMBER_AND_INCR(destination, number) \ do { \ STORE_NUMBER (destination, number); \ (destination) += OFFSET_ADDRESS_SIZE; \ } while (0) /* Put into DESTINATION a number stored in two contiguous bytes starting at SOURCE. */ /* ifdef MBS_SUPPORT, we store NUMBER in 1 element. */ # ifdef WCHAR # define EXTRACT_NUMBER(destination, source) \ do { \ (destination) = *(source); \ } while (0) # else /* BYTE */ # define EXTRACT_NUMBER(destination, source) \ do { \ (destination) = *(source) & 0377; \ (destination) += SIGN_EXTEND_CHAR (*((source) + 1)) << 8; \ } while (0) # endif # ifdef DEBUG static void PREFIX(extract_number) _RE_ARGS ((int *dest, UCHAR_T *source)); static void PREFIX(extract_number) (dest, source) int *dest; UCHAR_T *source; { # ifdef WCHAR *dest = *source; # else /* BYTE */ int temp = SIGN_EXTEND_CHAR (*(source + 1)); *dest = *source & 0377; *dest += temp << 8; # endif } # ifndef EXTRACT_MACROS /* To debug the macros. */ # undef EXTRACT_NUMBER # define EXTRACT_NUMBER(dest, src) PREFIX(extract_number) (&dest, src) # endif /* not EXTRACT_MACROS */ # endif /* DEBUG */ /* Same as EXTRACT_NUMBER, except increment SOURCE to after the number. SOURCE must be an lvalue. */ # define EXTRACT_NUMBER_AND_INCR(destination, source) \ do { \ EXTRACT_NUMBER (destination, source); \ (source) += OFFSET_ADDRESS_SIZE; \ } while (0) # ifdef DEBUG static void PREFIX(extract_number_and_incr) _RE_ARGS ((int *destination, UCHAR_T **source)); static void PREFIX(extract_number_and_incr) (destination, source) int *destination; UCHAR_T **source; { PREFIX(extract_number) (destination, *source); *source += OFFSET_ADDRESS_SIZE; } # ifndef EXTRACT_MACROS # undef EXTRACT_NUMBER_AND_INCR # define EXTRACT_NUMBER_AND_INCR(dest, src) \ PREFIX(extract_number_and_incr) (&dest, &src) # endif /* not EXTRACT_MACROS */ # endif /* DEBUG */ /* If DEBUG is defined, Regex prints many voluminous messages about what it is doing (if the variable `debug' is nonzero). If linked with the main program in `iregex.c', you can enter patterns and strings interactively. And if linked with the main program in `main.c' and the other test files, you can run the already-written tests. */ # ifdef DEBUG # ifndef DEFINED_ONCE /* We use standard I/O for debugging. */ # include /* It is useful to test things that ``must'' be true when debugging. */ # include static int debug; # define DEBUG_STATEMENT(e) e # define DEBUG_PRINT1(x) if (debug) printf (x) # define DEBUG_PRINT2(x1, x2) if (debug) printf (x1, x2) # define DEBUG_PRINT3(x1, x2, x3) if (debug) printf (x1, x2, x3) # define DEBUG_PRINT4(x1, x2, x3, x4) if (debug) printf (x1, x2, x3, x4) # endif /* not DEFINED_ONCE */ # define DEBUG_PRINT_COMPILED_PATTERN(p, s, e) \ if (debug) PREFIX(print_partial_compiled_pattern) (s, e) # define DEBUG_PRINT_DOUBLE_STRING(w, s1, sz1, s2, sz2) \ if (debug) PREFIX(print_double_string) (w, s1, sz1, s2, sz2) /* Print the fastmap in human-readable form. */ # ifndef DEFINED_ONCE void print_fastmap (fastmap) char *fastmap; { unsigned was_a_range = 0; unsigned i = 0; while (i < (1 << BYTEWIDTH)) { if (fastmap[i++]) { was_a_range = 0; putchar (i - 1); while (i < (1 << BYTEWIDTH) && fastmap[i]) { was_a_range = 1; i++; } if (was_a_range) { printf ("-"); putchar (i - 1); } } } putchar ('\n'); } # endif /* not DEFINED_ONCE */ /* Print a compiled pattern string in human-readable form, starting at the START pointer into it and ending just before the pointer END. */ void PREFIX(print_partial_compiled_pattern) (start, end) UCHAR_T *start; UCHAR_T *end; { int mcnt, mcnt2; UCHAR_T *p1; UCHAR_T *p = start; UCHAR_T *pend = end; if (start == NULL) { printf ("(null)\n"); return; } /* Loop over pattern commands. */ while (p < pend) { # ifdef _LIBC printf ("%td:\t", p - start); # else printf ("%ld:\t", (long int) (p - start)); # endif switch ((re_opcode_t) *p++) { case no_op: printf ("/no_op"); break; case exactn: mcnt = *p++; printf ("/exactn/%d", mcnt); do { putchar ('/'); PUT_CHAR (*p++); } while (--mcnt); break; # ifdef MBS_SUPPORT case exactn_bin: mcnt = *p++; printf ("/exactn_bin/%d", mcnt); do { printf("/%lx", (long int) *p++); } while (--mcnt); break; # endif /* MBS_SUPPORT */ case start_memory: mcnt = *p++; printf ("/start_memory/%d/%ld", mcnt, (long int) *p++); break; case stop_memory: mcnt = *p++; printf ("/stop_memory/%d/%ld", mcnt, (long int) *p++); break; case duplicate: printf ("/duplicate/%ld", (long int) *p++); break; case anychar: printf ("/anychar"); break; case charset: case charset_not: { # ifdef WCHAR int i, length; wchar_t *workp = p; printf ("/charset [%s", (re_opcode_t) *(workp - 1) == charset_not ? "^" : ""); p += 5; length = *workp++; /* the length of char_classes */ for (i=0 ; ibuffer; PREFIX(print_partial_compiled_pattern) (buffer, buffer + bufp->used / sizeof(UCHAR_T)); printf ("%ld bytes used/%ld bytes allocated.\n", bufp->used, bufp->allocated); if (bufp->fastmap_accurate && bufp->fastmap) { printf ("fastmap: "); print_fastmap (bufp->fastmap); } # ifdef _LIBC printf ("re_nsub: %Zd\t", bufp->re_nsub); # else printf ("re_nsub: %ld\t", (long int) bufp->re_nsub); # endif printf ("regs_alloc: %d\t", bufp->regs_allocated); printf ("can_be_null: %d\t", bufp->can_be_null); printf ("newline_anchor: %d\n", bufp->newline_anchor); printf ("no_sub: %d\t", bufp->no_sub); printf ("not_bol: %d\t", bufp->not_bol); printf ("not_eol: %d\t", bufp->not_eol); printf ("syntax: %lx\n", bufp->syntax); /* Perhaps we should print the translate table? */ } void PREFIX(print_double_string) (where, string1, size1, string2, size2) const CHAR_T *where; const CHAR_T *string1; const CHAR_T *string2; int size1; int size2; { int this_char; if (where == NULL) printf ("(null)"); else { int cnt; if (FIRST_STRING_P (where)) { for (this_char = where - string1; this_char < size1; this_char++) PUT_CHAR (string1[this_char]); where = string2; } cnt = 0; for (this_char = where - string2; this_char < size2; this_char++) { PUT_CHAR (string2[this_char]); if (++cnt > 100) { fputs ("...", stdout); break; } } } } # ifndef DEFINED_ONCE void printchar (c) int c; { putc (c, stderr); } # endif # else /* not DEBUG */ # ifndef DEFINED_ONCE # undef assert # define assert(e) # define DEBUG_STATEMENT(e) # define DEBUG_PRINT1(x) # define DEBUG_PRINT2(x1, x2) # define DEBUG_PRINT3(x1, x2, x3) # define DEBUG_PRINT4(x1, x2, x3, x4) # endif /* not DEFINED_ONCE */ # define DEBUG_PRINT_COMPILED_PATTERN(p, s, e) # define DEBUG_PRINT_DOUBLE_STRING(w, s1, sz1, s2, sz2) # endif /* not DEBUG */ # ifdef WCHAR /* This convert a multibyte string to a wide character string. And write their correspondances to offset_buffer(see below) and write whether each wchar_t is binary data to is_binary. This assume invalid multibyte sequences as binary data. We assume offset_buffer and is_binary is already allocated enough space. */ static size_t convert_mbs_to_wcs (CHAR_T *dest, const unsigned char* src, size_t len, int *offset_buffer, char *is_binary); static size_t convert_mbs_to_wcs (dest, src, len, offset_buffer, is_binary) CHAR_T *dest; const unsigned char* src; size_t len; /* the length of multibyte string. */ /* It hold correspondances between src(char string) and dest(wchar_t string) for optimization. e.g. src = "xxxyzz" dest = {'X', 'Y', 'Z'} (each "xxx", "y" and "zz" represent one multibyte character corresponding to 'X', 'Y' and 'Z'.) offset_buffer = {0, 0+3("xxx"), 0+3+1("y"), 0+3+1+2("zz")} = {0, 3, 4, 6} */ int *offset_buffer; char *is_binary; { wchar_t *pdest = dest; const unsigned char *psrc = src; size_t wc_count = 0; mbstate_t mbs; int i, consumed; size_t mb_remain = len; size_t mb_count = 0; /* Initialize the conversion state. */ memset (&mbs, 0, sizeof (mbstate_t)); offset_buffer[0] = 0; for( ; mb_remain > 0 ; ++wc_count, ++pdest, mb_remain -= consumed, psrc += consumed) { #ifdef _LIBC consumed = __mbrtowc (pdest, psrc, mb_remain, &mbs); #else consumed = mbrtowc (pdest, psrc, mb_remain, &mbs); #endif if (consumed <= 0) /* failed to convert. maybe src contains binary data. So we consume 1 byte manualy. */ { *pdest = *psrc; consumed = 1; is_binary[wc_count] = TRUE; } else is_binary[wc_count] = FALSE; /* In sjis encoding, we use yen sign as escape character in place of reverse solidus. So we convert 0x5c(yen sign in sjis) to not 0xa5(yen sign in UCS2) but 0x5c(reverse solidus in UCS2). */ if (consumed == 1 && (int) *psrc == 0x5c && (int) *pdest == 0xa5) *pdest = (wchar_t) *psrc; offset_buffer[wc_count + 1] = mb_count += consumed; } /* Fill remain of the buffer with sentinel. */ for (i = wc_count + 1 ; i <= len ; i++) offset_buffer[i] = mb_count + 1; return wc_count; } # endif /* WCHAR */ #else /* not INSIDE_RECURSION */ /* Set by `re_set_syntax' to the current regexp syntax to recognize. Can also be assigned to arbitrarily: each pattern buffer stores its own syntax, so it can be changed between regex compilations. */ /* This has no initializer because initialized variables in Emacs become read-only after dumping. */ reg_syntax_t re_syntax_options; /* Specify the precise syntax of regexps for compilation. This provides for compatibility for various utilities which historically have different, incompatible syntaxes. The argument SYNTAX is a bit mask comprised of the various bits defined in regex.h. We return the old syntax. */ reg_syntax_t re_set_syntax (syntax) reg_syntax_t syntax; { reg_syntax_t ret = re_syntax_options; re_syntax_options = syntax; # ifdef DEBUG if (syntax & RE_DEBUG) debug = 1; else if (debug) /* was on but now is not */ debug = 0; # endif /* DEBUG */ return ret; } # ifdef _LIBC weak_alias (__re_set_syntax, re_set_syntax) # endif /* This table gives an error message for each of the error codes listed in regex.h. Obviously the order here has to be same as there. POSIX doesn't require that we do anything for REG_NOERROR, but why not be nice? */ static const char *re_error_msgid[] = { gettext_noop ("Success"), /* REG_NOERROR */ gettext_noop ("No match"), /* REG_NOMATCH */ gettext_noop ("Invalid regular expression"), /* REG_BADPAT */ gettext_noop ("Invalid collation character"), /* REG_ECOLLATE */ gettext_noop ("Invalid character class name"), /* REG_ECTYPE */ gettext_noop ("Trailing backslash"), /* REG_EESCAPE */ gettext_noop ("Invalid back reference"), /* REG_ESUBREG */ gettext_noop ("Unmatched [ or [^"), /* REG_EBRACK */ gettext_noop ("Unmatched ( or \\("), /* REG_EPAREN */ gettext_noop ("Unmatched \\{"), /* REG_EBRACE */ gettext_noop ("Invalid content of \\{\\}"), /* REG_BADBR */ gettext_noop ("Invalid range end"), /* REG_ERANGE */ gettext_noop ("Memory exhausted"), /* REG_ESPACE */ gettext_noop ("Invalid preceding regular expression"), /* REG_BADRPT */ gettext_noop ("Premature end of regular expression"), /* REG_EEND */ gettext_noop ("Regular expression too big"), /* REG_ESIZE */ gettext_noop ("Unmatched ) or \\)") /* REG_ERPAREN */ }; #endif /* INSIDE_RECURSION */ #ifndef DEFINED_ONCE /* Avoiding alloca during matching, to placate r_alloc. */ /* Define MATCH_MAY_ALLOCATE unless we need to make sure that the searching and matching functions should not call alloca. On some systems, alloca is implemented in terms of malloc, and if we're using the relocating allocator routines, then malloc could cause a relocation, which might (if the strings being searched are in the ralloc heap) shift the data out from underneath the regexp routines. Here's another reason to avoid allocation: Emacs processes input from X in a signal handler; processing X input may call malloc; if input arrives while a matching routine is calling malloc, then we're scrod. But Emacs can't just block input while calling matching routines; then we don't notice interrupts when they come in. So, Emacs blocks input around all regexp calls except the matching calls, which it leaves unprotected, in the faith that they will not malloc. */ /* Normally, this is fine. */ # define MATCH_MAY_ALLOCATE /* When using GNU C, we are not REALLY using the C alloca, no matter what config.h may say. So don't take precautions for it. */ # ifdef __GNUC__ # undef C_ALLOCA # endif /* The match routines may not allocate if (1) they would do it with malloc and (2) it's not safe for them to use malloc. Note that if REL_ALLOC is defined, matching would not use malloc for the failure stack, but we would still use it for the register vectors; so REL_ALLOC should not affect this. */ # if (defined C_ALLOCA || defined REGEX_MALLOC) && defined emacs # undef MATCH_MAY_ALLOCATE # endif #endif /* not DEFINED_ONCE */ #ifdef INSIDE_RECURSION /* Failure stack declarations and macros; both re_compile_fastmap and re_match_2 use a failure stack. These have to be macros because of REGEX_ALLOCATE_STACK. */ /* Number of failure points for which to initially allocate space when matching. If this number is exceeded, we allocate more space, so it is not a hard limit. */ # ifndef INIT_FAILURE_ALLOC # define INIT_FAILURE_ALLOC 5 # endif /* Roughly the maximum number of failure points on the stack. Would be exactly that if always used MAX_FAILURE_ITEMS items each time we failed. This is a variable only so users of regex can assign to it; we never change it ourselves. */ # ifdef INT_IS_16BIT # ifndef DEFINED_ONCE # if defined MATCH_MAY_ALLOCATE /* 4400 was enough to cause a crash on Alpha OSF/1, whose default stack limit is 2mb. */ long int re_max_failures = 4000; # else long int re_max_failures = 2000; # endif # endif union PREFIX(fail_stack_elt) { UCHAR_T *pointer; long int integer; }; typedef union PREFIX(fail_stack_elt) PREFIX(fail_stack_elt_t); typedef struct { PREFIX(fail_stack_elt_t) *stack; unsigned long int size; unsigned long int avail; /* Offset of next open position. */ } PREFIX(fail_stack_type); # else /* not INT_IS_16BIT */ # ifndef DEFINED_ONCE # if defined MATCH_MAY_ALLOCATE /* 4400 was enough to cause a crash on Alpha OSF/1, whose default stack limit is 2mb. */ int re_max_failures = 4000; # else int re_max_failures = 2000; # endif # endif union PREFIX(fail_stack_elt) { UCHAR_T *pointer; int integer; }; typedef union PREFIX(fail_stack_elt) PREFIX(fail_stack_elt_t); typedef struct { PREFIX(fail_stack_elt_t) *stack; unsigned size; unsigned avail; /* Offset of next open position. */ } PREFIX(fail_stack_type); # endif /* INT_IS_16BIT */ # ifndef DEFINED_ONCE # define FAIL_STACK_EMPTY() (fail_stack.avail == 0) # define FAIL_STACK_PTR_EMPTY() (fail_stack_ptr->avail == 0) # define FAIL_STACK_FULL() (fail_stack.avail == fail_stack.size) # endif /* Define macros to initialize and free the failure stack. Do `return -2' if the alloc fails. */ # ifdef MATCH_MAY_ALLOCATE # define INIT_FAIL_STACK() \ do { \ fail_stack.stack = (PREFIX(fail_stack_elt_t) *) \ REGEX_ALLOCATE_STACK (INIT_FAILURE_ALLOC * sizeof (PREFIX(fail_stack_elt_t))); \ \ if (fail_stack.stack == NULL) \ return -2; \ \ fail_stack.size = INIT_FAILURE_ALLOC; \ fail_stack.avail = 0; \ } while (0) # define RESET_FAIL_STACK() REGEX_FREE_STACK (fail_stack.stack) # else # define INIT_FAIL_STACK() \ do { \ fail_stack.avail = 0; \ } while (0) # define RESET_FAIL_STACK() # endif /* Double the size of FAIL_STACK, up to approximately `re_max_failures' items. Return 1 if succeeds, and 0 if either ran out of memory allocating space for it or it was already too large. REGEX_REALLOCATE_STACK requires `destination' be declared. */ # define DOUBLE_FAIL_STACK(fail_stack) \ ((fail_stack).size > (unsigned) (re_max_failures * MAX_FAILURE_ITEMS) \ ? 0 \ : ((fail_stack).stack = (PREFIX(fail_stack_elt_t) *) \ REGEX_REALLOCATE_STACK ((fail_stack).stack, \ (fail_stack).size * sizeof (PREFIX(fail_stack_elt_t)), \ ((fail_stack).size << 1) * sizeof (PREFIX(fail_stack_elt_t))),\ \ (fail_stack).stack == NULL \ ? 0 \ : ((fail_stack).size <<= 1, \ 1))) /* Push pointer POINTER on FAIL_STACK. Return 1 if was able to do so and 0 if ran out of memory allocating space to do so. */ # define PUSH_PATTERN_OP(POINTER, FAIL_STACK) \ ((FAIL_STACK_FULL () \ && !DOUBLE_FAIL_STACK (FAIL_STACK)) \ ? 0 \ : ((FAIL_STACK).stack[(FAIL_STACK).avail++].pointer = POINTER, \ 1)) /* Push a pointer value onto the failure stack. Assumes the variable `fail_stack'. Probably should only be called from within `PUSH_FAILURE_POINT'. */ # define PUSH_FAILURE_POINTER(item) \ fail_stack.stack[fail_stack.avail++].pointer = (UCHAR_T *) (item) /* This pushes an integer-valued item onto the failure stack. Assumes the variable `fail_stack'. Probably should only be called from within `PUSH_FAILURE_POINT'. */ # define PUSH_FAILURE_INT(item) \ fail_stack.stack[fail_stack.avail++].integer = (item) /* Push a fail_stack_elt_t value onto the failure stack. Assumes the variable `fail_stack'. Probably should only be called from within `PUSH_FAILURE_POINT'. */ # define PUSH_FAILURE_ELT(item) \ fail_stack.stack[fail_stack.avail++] = (item) /* These three POP... operations complement the three PUSH... operations. All assume that `fail_stack' is nonempty. */ # define POP_FAILURE_POINTER() fail_stack.stack[--fail_stack.avail].pointer # define POP_FAILURE_INT() fail_stack.stack[--fail_stack.avail].integer # define POP_FAILURE_ELT() fail_stack.stack[--fail_stack.avail] /* Used to omit pushing failure point id's when we're not debugging. */ # ifdef DEBUG # define DEBUG_PUSH PUSH_FAILURE_INT # define DEBUG_POP(item_addr) *(item_addr) = POP_FAILURE_INT () # else # define DEBUG_PUSH(item) # define DEBUG_POP(item_addr) # endif /* Push the information about the state we will need if we ever fail back to it. Requires variables fail_stack, regstart, regend, reg_info, and num_regs_pushed be declared. DOUBLE_FAIL_STACK requires `destination' be declared. Does `return FAILURE_CODE' if runs out of memory. */ # define PUSH_FAILURE_POINT(pattern_place, string_place, failure_code) \ do { \ char *destination; \ /* Must be int, so when we don't save any registers, the arithmetic \ of 0 + -1 isn't done as unsigned. */ \ /* Can't be int, since there is not a shred of a guarantee that int \ is wide enough to hold a value of something to which pointer can \ be assigned */ \ active_reg_t this_reg; \ \ DEBUG_STATEMENT (failure_id++); \ DEBUG_STATEMENT (nfailure_points_pushed++); \ DEBUG_PRINT2 ("\nPUSH_FAILURE_POINT #%u:\n", failure_id); \ DEBUG_PRINT2 (" Before push, next avail: %d\n", (fail_stack).avail);\ DEBUG_PRINT2 (" size: %d\n", (fail_stack).size);\ \ DEBUG_PRINT2 (" slots needed: %ld\n", NUM_FAILURE_ITEMS); \ DEBUG_PRINT2 (" available: %d\n", REMAINING_AVAIL_SLOTS); \ \ /* Ensure we have enough space allocated for what we will push. */ \ while (REMAINING_AVAIL_SLOTS < NUM_FAILURE_ITEMS) \ { \ if (!DOUBLE_FAIL_STACK (fail_stack)) \ return failure_code; \ \ DEBUG_PRINT2 ("\n Doubled stack; size now: %d\n", \ (fail_stack).size); \ DEBUG_PRINT2 (" slots available: %d\n", REMAINING_AVAIL_SLOTS);\ } \ \ /* Push the info, starting with the registers. */ \ DEBUG_PRINT1 ("\n"); \ \ if (1) \ for (this_reg = lowest_active_reg; this_reg <= highest_active_reg; \ this_reg++) \ { \ DEBUG_PRINT2 (" Pushing reg: %lu\n", this_reg); \ DEBUG_STATEMENT (num_regs_pushed++); \ \ DEBUG_PRINT2 (" start: %p\n", regstart[this_reg]); \ PUSH_FAILURE_POINTER (regstart[this_reg]); \ \ DEBUG_PRINT2 (" end: %p\n", regend[this_reg]); \ PUSH_FAILURE_POINTER (regend[this_reg]); \ \ DEBUG_PRINT2 (" info: %p\n ", \ reg_info[this_reg].word.pointer); \ DEBUG_PRINT2 (" match_null=%d", \ REG_MATCH_NULL_STRING_P (reg_info[this_reg])); \ DEBUG_PRINT2 (" active=%d", IS_ACTIVE (reg_info[this_reg])); \ DEBUG_PRINT2 (" matched_something=%d", \ MATCHED_SOMETHING (reg_info[this_reg])); \ DEBUG_PRINT2 (" ever_matched=%d", \ EVER_MATCHED_SOMETHING (reg_info[this_reg])); \ DEBUG_PRINT1 ("\n"); \ PUSH_FAILURE_ELT (reg_info[this_reg].word); \ } \ \ DEBUG_PRINT2 (" Pushing low active reg: %ld\n", lowest_active_reg);\ PUSH_FAILURE_INT (lowest_active_reg); \ \ DEBUG_PRINT2 (" Pushing high active reg: %ld\n", highest_active_reg);\ PUSH_FAILURE_INT (highest_active_reg); \ \ DEBUG_PRINT2 (" Pushing pattern %p:\n", pattern_place); \ DEBUG_PRINT_COMPILED_PATTERN (bufp, pattern_place, pend); \ PUSH_FAILURE_POINTER (pattern_place); \ \ DEBUG_PRINT2 (" Pushing string %p: `", string_place); \ DEBUG_PRINT_DOUBLE_STRING (string_place, string1, size1, string2, \ size2); \ DEBUG_PRINT1 ("'\n"); \ PUSH_FAILURE_POINTER (string_place); \ \ DEBUG_PRINT2 (" Pushing failure id: %u\n", failure_id); \ DEBUG_PUSH (failure_id); \ } while (0) # ifndef DEFINED_ONCE /* This is the number of items that are pushed and popped on the stack for each register. */ # define NUM_REG_ITEMS 3 /* Individual items aside from the registers. */ # ifdef DEBUG # define NUM_NONREG_ITEMS 5 /* Includes failure point id. */ # else # define NUM_NONREG_ITEMS 4 # endif /* We push at most this many items on the stack. */ /* We used to use (num_regs - 1), which is the number of registers this regexp will save; but that was changed to 5 to avoid stack overflow for a regexp with lots of parens. */ # define MAX_FAILURE_ITEMS (5 * NUM_REG_ITEMS + NUM_NONREG_ITEMS) /* We actually push this many items. */ # define NUM_FAILURE_ITEMS \ (((0 \ ? 0 : highest_active_reg - lowest_active_reg + 1) \ * NUM_REG_ITEMS) \ + NUM_NONREG_ITEMS) /* How many items can still be added to the stack without overflowing it. */ # define REMAINING_AVAIL_SLOTS ((fail_stack).size - (fail_stack).avail) # endif /* not DEFINED_ONCE */ /* Pops what PUSH_FAIL_STACK pushes. We restore into the parameters, all of which should be lvalues: STR -- the saved data position. PAT -- the saved pattern position. LOW_REG, HIGH_REG -- the highest and lowest active registers. REGSTART, REGEND -- arrays of string positions. REG_INFO -- array of information about each subexpression. Also assumes the variables `fail_stack' and (if debugging), `bufp', `pend', `string1', `size1', `string2', and `size2'. */ # define POP_FAILURE_POINT(str, pat, low_reg, high_reg, regstart, regend, reg_info)\ { \ DEBUG_STATEMENT (unsigned failure_id;) \ active_reg_t this_reg; \ const UCHAR_T *string_temp; \ \ assert (!FAIL_STACK_EMPTY ()); \ \ /* Remove failure points and point to how many regs pushed. */ \ DEBUG_PRINT1 ("POP_FAILURE_POINT:\n"); \ DEBUG_PRINT2 (" Before pop, next avail: %d\n", fail_stack.avail); \ DEBUG_PRINT2 (" size: %d\n", fail_stack.size); \ \ assert (fail_stack.avail >= NUM_NONREG_ITEMS); \ \ DEBUG_POP (&failure_id); \ DEBUG_PRINT2 (" Popping failure id: %u\n", failure_id); \ \ /* If the saved string location is NULL, it came from an \ on_failure_keep_string_jump opcode, and we want to throw away the \ saved NULL, thus retaining our current position in the string. */ \ string_temp = POP_FAILURE_POINTER (); \ if (string_temp != NULL) \ str = (const CHAR_T *) string_temp; \ \ DEBUG_PRINT2 (" Popping string %p: `", str); \ DEBUG_PRINT_DOUBLE_STRING (str, string1, size1, string2, size2); \ DEBUG_PRINT1 ("'\n"); \ \ pat = (UCHAR_T *) POP_FAILURE_POINTER (); \ DEBUG_PRINT2 (" Popping pattern %p:\n", pat); \ DEBUG_PRINT_COMPILED_PATTERN (bufp, pat, pend); \ \ /* Restore register info. */ \ high_reg = (active_reg_t) POP_FAILURE_INT (); \ DEBUG_PRINT2 (" Popping high active reg: %ld\n", high_reg); \ \ low_reg = (active_reg_t) POP_FAILURE_INT (); \ DEBUG_PRINT2 (" Popping low active reg: %ld\n", low_reg); \ \ if (1) \ for (this_reg = high_reg; this_reg >= low_reg; this_reg--) \ { \ DEBUG_PRINT2 (" Popping reg: %ld\n", this_reg); \ \ reg_info[this_reg].word = POP_FAILURE_ELT (); \ DEBUG_PRINT2 (" info: %p\n", \ reg_info[this_reg].word.pointer); \ \ regend[this_reg] = (const CHAR_T *) POP_FAILURE_POINTER (); \ DEBUG_PRINT2 (" end: %p\n", regend[this_reg]); \ \ regstart[this_reg] = (const CHAR_T *) POP_FAILURE_POINTER (); \ DEBUG_PRINT2 (" start: %p\n", regstart[this_reg]); \ } \ else \ { \ for (this_reg = highest_active_reg; this_reg > high_reg; this_reg--) \ { \ reg_info[this_reg].word.integer = 0; \ regend[this_reg] = 0; \ regstart[this_reg] = 0; \ } \ highest_active_reg = high_reg; \ } \ \ set_regs_matched_done = 0; \ DEBUG_STATEMENT (nfailure_points_popped++); \ } /* POP_FAILURE_POINT */ /* Structure for per-register (a.k.a. per-group) information. Other register information, such as the starting and ending positions (which are addresses), and the list of inner groups (which is a bits list) are maintained in separate variables. We are making a (strictly speaking) nonportable assumption here: that the compiler will pack our bit fields into something that fits into the type of `word', i.e., is something that fits into one item on the failure stack. */ /* Declarations and macros for re_match_2. */ typedef union { PREFIX(fail_stack_elt_t) word; struct { /* This field is one if this group can match the empty string, zero if not. If not yet determined, `MATCH_NULL_UNSET_VALUE'. */ # define MATCH_NULL_UNSET_VALUE 3 unsigned match_null_string_p : 2; unsigned is_active : 1; unsigned matched_something : 1; unsigned ever_matched_something : 1; } bits; } PREFIX(register_info_type); # ifndef DEFINED_ONCE # define REG_MATCH_NULL_STRING_P(R) ((R).bits.match_null_string_p) # define IS_ACTIVE(R) ((R).bits.is_active) # define MATCHED_SOMETHING(R) ((R).bits.matched_something) # define EVER_MATCHED_SOMETHING(R) ((R).bits.ever_matched_something) /* Call this when have matched a real character; it sets `matched' flags for the subexpressions which we are currently inside. Also records that those subexprs have matched. */ # define SET_REGS_MATCHED() \ do \ { \ if (!set_regs_matched_done) \ { \ active_reg_t r; \ set_regs_matched_done = 1; \ for (r = lowest_active_reg; r <= highest_active_reg; r++) \ { \ MATCHED_SOMETHING (reg_info[r]) \ = EVER_MATCHED_SOMETHING (reg_info[r]) \ = 1; \ } \ } \ } \ while (0) # endif /* not DEFINED_ONCE */ /* Registers are set to a sentinel when they haven't yet matched. */ static CHAR_T PREFIX(reg_unset_dummy); # define REG_UNSET_VALUE (&PREFIX(reg_unset_dummy)) # define REG_UNSET(e) ((e) == REG_UNSET_VALUE) /* Subroutine declarations and macros for regex_compile. */ static void PREFIX(store_op1) _RE_ARGS ((re_opcode_t op, UCHAR_T *loc, int arg)); static void PREFIX(store_op2) _RE_ARGS ((re_opcode_t op, UCHAR_T *loc, int arg1, int arg2)); static void PREFIX(insert_op1) _RE_ARGS ((re_opcode_t op, UCHAR_T *loc, int arg, UCHAR_T *end)); static void PREFIX(insert_op2) _RE_ARGS ((re_opcode_t op, UCHAR_T *loc, int arg1, int arg2, UCHAR_T *end)); static boolean PREFIX(at_begline_loc_p) _RE_ARGS ((const CHAR_T *pattern, const CHAR_T *p, reg_syntax_t syntax)); static boolean PREFIX(at_endline_loc_p) _RE_ARGS ((const CHAR_T *p, const CHAR_T *pend, reg_syntax_t syntax)); # ifdef WCHAR static reg_errcode_t wcs_compile_range _RE_ARGS ((CHAR_T range_start, const CHAR_T **p_ptr, const CHAR_T *pend, char *translate, reg_syntax_t syntax, UCHAR_T *b, CHAR_T *char_set)); static void insert_space _RE_ARGS ((int num, CHAR_T *loc, CHAR_T *end)); # else /* BYTE */ static reg_errcode_t byte_compile_range _RE_ARGS ((unsigned int range_start, const char **p_ptr, const char *pend, char *translate, reg_syntax_t syntax, unsigned char *b)); # endif /* WCHAR */ /* Fetch the next character in the uncompiled pattern---translating it if necessary. Also cast from a signed character in the constant string passed to us by the user to an unsigned char that we can use as an array index (in, e.g., `translate'). */ /* ifdef MBS_SUPPORT, we translate only if character <= 0xff, because it is impossible to allocate 4GB array for some encodings which have 4 byte character_set like UCS4. */ # ifndef PATFETCH # ifdef WCHAR # define PATFETCH(c) \ do {if (p == pend) return REG_EEND; \ c = (UCHAR_T) *p++; \ if (translate && (c <= 0xff)) c = (UCHAR_T) translate[c]; \ } while (0) # else /* BYTE */ # define PATFETCH(c) \ do {if (p == pend) return REG_EEND; \ c = (unsigned char) *p++; \ if (translate) c = (unsigned char) translate[c]; \ } while (0) # endif /* WCHAR */ # endif /* Fetch the next character in the uncompiled pattern, with no translation. */ # define PATFETCH_RAW(c) \ do {if (p == pend) return REG_EEND; \ c = (UCHAR_T) *p++; \ } while (0) /* Go backwards one character in the pattern. */ # define PATUNFETCH p-- /* If `translate' is non-null, return translate[D], else just D. We cast the subscript to translate because some data is declared as `char *', to avoid warnings when a string constant is passed. But when we use a character as a subscript we must make it unsigned. */ /* ifdef MBS_SUPPORT, we translate only if character <= 0xff, because it is impossible to allocate 4GB array for some encodings which have 4 byte character_set like UCS4. */ # ifndef TRANSLATE # ifdef WCHAR # define TRANSLATE(d) \ ((translate && ((UCHAR_T) (d)) <= 0xff) \ ? (char) translate[(unsigned char) (d)] : (d)) # else /* BYTE */ # define TRANSLATE(d) \ (translate ? (char) translate[(unsigned char) (d)] : (d)) # endif /* WCHAR */ # endif /* Macros for outputting the compiled pattern into `buffer'. */ /* If the buffer isn't allocated when it comes in, use this. */ # define INIT_BUF_SIZE (32 * sizeof(UCHAR_T)) /* Make sure we have at least N more bytes of space in buffer. */ # ifdef WCHAR # define GET_BUFFER_SPACE(n) \ while (((unsigned long)b - (unsigned long)COMPILED_BUFFER_VAR \ + (n)*sizeof(CHAR_T)) > bufp->allocated) \ EXTEND_BUFFER () # else /* BYTE */ # define GET_BUFFER_SPACE(n) \ while ((unsigned long) (b - bufp->buffer + (n)) > bufp->allocated) \ EXTEND_BUFFER () # endif /* WCHAR */ /* Make sure we have one more byte of buffer space and then add C to it. */ # define BUF_PUSH(c) \ do { \ GET_BUFFER_SPACE (1); \ *b++ = (UCHAR_T) (c); \ } while (0) /* Ensure we have two more bytes of buffer space and then append C1 and C2. */ # define BUF_PUSH_2(c1, c2) \ do { \ GET_BUFFER_SPACE (2); \ *b++ = (UCHAR_T) (c1); \ *b++ = (UCHAR_T) (c2); \ } while (0) /* As with BUF_PUSH_2, except for three bytes. */ # define BUF_PUSH_3(c1, c2, c3) \ do { \ GET_BUFFER_SPACE (3); \ *b++ = (UCHAR_T) (c1); \ *b++ = (UCHAR_T) (c2); \ *b++ = (UCHAR_T) (c3); \ } while (0) /* Store a jump with opcode OP at LOC to location TO. We store a relative address offset by the three bytes the jump itself occupies. */ # define STORE_JUMP(op, loc, to) \ PREFIX(store_op1) (op, loc, (int) ((to) - (loc) - (1 + OFFSET_ADDRESS_SIZE))) /* Likewise, for a two-argument jump. */ # define STORE_JUMP2(op, loc, to, arg) \ PREFIX(store_op2) (op, loc, (int) ((to) - (loc) - (1 + OFFSET_ADDRESS_SIZE)), arg) /* Like `STORE_JUMP', but for inserting. Assume `b' is the buffer end. */ # define INSERT_JUMP(op, loc, to) \ PREFIX(insert_op1) (op, loc, (int) ((to) - (loc) - (1 + OFFSET_ADDRESS_SIZE)), b) /* Like `STORE_JUMP2', but for inserting. Assume `b' is the buffer end. */ # define INSERT_JUMP2(op, loc, to, arg) \ PREFIX(insert_op2) (op, loc, (int) ((to) - (loc) - (1 + OFFSET_ADDRESS_SIZE)),\ arg, b) /* This is not an arbitrary limit: the arguments which represent offsets into the pattern are two bytes long. So if 2^16 bytes turns out to be too small, many things would have to change. */ /* Any other compiler which, like MSC, has allocation limit below 2^16 bytes will have to use approach similar to what was done below for MSC and drop MAX_BUF_SIZE a bit. Otherwise you may end up reallocating to 0 bytes. Such thing is not going to work too well. You have been warned!! */ # ifndef DEFINED_ONCE # if defined _MSC_VER && !defined WIN32 /* Microsoft C 16-bit versions limit malloc to approx 65512 bytes. The REALLOC define eliminates a flurry of conversion warnings, but is not required. */ # define MAX_BUF_SIZE 65500L # define REALLOC(p,s) realloc ((p), (size_t) (s)) # else # define MAX_BUF_SIZE (1L << 16) # define REALLOC(p,s) realloc ((p), (s)) # endif /* Extend the buffer by twice its current size via realloc and reset the pointers that pointed into the old block to point to the correct places in the new one. If extending the buffer results in it being larger than MAX_BUF_SIZE, then flag memory exhausted. */ # if __BOUNDED_POINTERS__ # define SET_HIGH_BOUND(P) (__ptrhigh (P) = __ptrlow (P) + bufp->allocated) # define MOVE_BUFFER_POINTER(P) \ (__ptrlow (P) += incr, SET_HIGH_BOUND (P), __ptrvalue (P) += incr) # define ELSE_EXTEND_BUFFER_HIGH_BOUND \ else \ { \ SET_HIGH_BOUND (b); \ SET_HIGH_BOUND (begalt); \ if (fixup_alt_jump) \ SET_HIGH_BOUND (fixup_alt_jump); \ if (laststart) \ SET_HIGH_BOUND (laststart); \ if (pending_exact) \ SET_HIGH_BOUND (pending_exact); \ } # else # define MOVE_BUFFER_POINTER(P) (P) += incr # define ELSE_EXTEND_BUFFER_HIGH_BOUND # endif # endif /* not DEFINED_ONCE */ # ifdef WCHAR # define EXTEND_BUFFER() \ do { \ UCHAR_T *old_buffer = COMPILED_BUFFER_VAR; \ int wchar_count; \ if (bufp->allocated + sizeof(UCHAR_T) > MAX_BUF_SIZE) \ return REG_ESIZE; \ bufp->allocated <<= 1; \ if (bufp->allocated > MAX_BUF_SIZE) \ bufp->allocated = MAX_BUF_SIZE; \ /* How many characters the new buffer can have? */ \ wchar_count = bufp->allocated / sizeof(UCHAR_T); \ if (wchar_count == 0) wchar_count = 1; \ /* Truncate the buffer to CHAR_T align. */ \ bufp->allocated = wchar_count * sizeof(UCHAR_T); \ RETALLOC (COMPILED_BUFFER_VAR, wchar_count, UCHAR_T); \ bufp->buffer = (char*)COMPILED_BUFFER_VAR; \ if (COMPILED_BUFFER_VAR == NULL) \ return REG_ESPACE; \ /* If the buffer moved, move all the pointers into it. */ \ if (old_buffer != COMPILED_BUFFER_VAR) \ { \ int incr = COMPILED_BUFFER_VAR - old_buffer; \ MOVE_BUFFER_POINTER (b); \ MOVE_BUFFER_POINTER (begalt); \ if (fixup_alt_jump) \ MOVE_BUFFER_POINTER (fixup_alt_jump); \ if (laststart) \ MOVE_BUFFER_POINTER (laststart); \ if (pending_exact) \ MOVE_BUFFER_POINTER (pending_exact); \ } \ ELSE_EXTEND_BUFFER_HIGH_BOUND \ } while (0) # else /* BYTE */ # define EXTEND_BUFFER() \ do { \ UCHAR_T *old_buffer = COMPILED_BUFFER_VAR; \ if (bufp->allocated == MAX_BUF_SIZE) \ return REG_ESIZE; \ bufp->allocated <<= 1; \ if (bufp->allocated > MAX_BUF_SIZE) \ bufp->allocated = MAX_BUF_SIZE; \ bufp->buffer = (UCHAR_T *) REALLOC (COMPILED_BUFFER_VAR, \ bufp->allocated); \ if (COMPILED_BUFFER_VAR == NULL) \ return REG_ESPACE; \ /* If the buffer moved, move all the pointers into it. */ \ if (old_buffer != COMPILED_BUFFER_VAR) \ { \ int incr = COMPILED_BUFFER_VAR - old_buffer; \ MOVE_BUFFER_POINTER (b); \ MOVE_BUFFER_POINTER (begalt); \ if (fixup_alt_jump) \ MOVE_BUFFER_POINTER (fixup_alt_jump); \ if (laststart) \ MOVE_BUFFER_POINTER (laststart); \ if (pending_exact) \ MOVE_BUFFER_POINTER (pending_exact); \ } \ ELSE_EXTEND_BUFFER_HIGH_BOUND \ } while (0) # endif /* WCHAR */ # ifndef DEFINED_ONCE /* Since we have one byte reserved for the register number argument to {start,stop}_memory, the maximum number of groups we can report things about is what fits in that byte. */ # define MAX_REGNUM 255 /* But patterns can have more than `MAX_REGNUM' registers. We just ignore the excess. */ typedef unsigned regnum_t; /* Macros for the compile stack. */ /* Since offsets can go either forwards or backwards, this type needs to be able to hold values from -(MAX_BUF_SIZE - 1) to MAX_BUF_SIZE - 1. */ /* int may be not enough when sizeof(int) == 2. */ typedef long pattern_offset_t; typedef struct { pattern_offset_t begalt_offset; pattern_offset_t fixup_alt_jump; pattern_offset_t inner_group_offset; pattern_offset_t laststart_offset; regnum_t regnum; } compile_stack_elt_t; typedef struct { compile_stack_elt_t *stack; unsigned size; unsigned avail; /* Offset of next open position. */ } compile_stack_type; # define INIT_COMPILE_STACK_SIZE 32 # define COMPILE_STACK_EMPTY (compile_stack.avail == 0) # define COMPILE_STACK_FULL (compile_stack.avail == compile_stack.size) /* The next available element. */ # define COMPILE_STACK_TOP (compile_stack.stack[compile_stack.avail]) # endif /* not DEFINED_ONCE */ /* Set the bit for character C in a list. */ # ifndef DEFINED_ONCE # define SET_LIST_BIT(c) \ (b[((unsigned char) (c)) / BYTEWIDTH] \ |= 1 << (((unsigned char) c) % BYTEWIDTH)) # endif /* DEFINED_ONCE */ /* Get the next unsigned number in the uncompiled pattern. */ # define GET_UNSIGNED_NUMBER(num) \ { \ while (p != pend) \ { \ PATFETCH (c); \ if (c < '0' || c > '9') \ break; \ if (num <= RE_DUP_MAX) \ { \ if (num < 0) \ num = 0; \ num = num * 10 + c - '0'; \ } \ } \ } # ifndef DEFINED_ONCE # if defined _LIBC || WIDE_CHAR_SUPPORT /* The GNU C library provides support for user-defined character classes and the functions from ISO C amendement 1. */ # ifdef CHARCLASS_NAME_MAX # define CHAR_CLASS_MAX_LENGTH CHARCLASS_NAME_MAX # else /* This shouldn't happen but some implementation might still have this problem. Use a reasonable default value. */ # define CHAR_CLASS_MAX_LENGTH 256 # endif # ifdef _LIBC # define IS_CHAR_CLASS(string) __wctype (string) # else # define IS_CHAR_CLASS(string) wctype (string) # endif # else # define CHAR_CLASS_MAX_LENGTH 6 /* Namely, `xdigit'. */ # define IS_CHAR_CLASS(string) \ (STREQ (string, "alpha") || STREQ (string, "upper") \ || STREQ (string, "lower") || STREQ (string, "digit") \ || STREQ (string, "alnum") || STREQ (string, "xdigit") \ || STREQ (string, "space") || STREQ (string, "print") \ || STREQ (string, "punct") || STREQ (string, "graph") \ || STREQ (string, "cntrl") || STREQ (string, "blank")) # endif # endif /* DEFINED_ONCE */ # ifndef MATCH_MAY_ALLOCATE /* If we cannot allocate large objects within re_match_2_internal, we make the fail stack and register vectors global. The fail stack, we grow to the maximum size when a regexp is compiled. The register vectors, we adjust in size each time we compile a regexp, according to the number of registers it needs. */ static PREFIX(fail_stack_type) fail_stack; /* Size with which the following vectors are currently allocated. That is so we can make them bigger as needed, but never make them smaller. */ # ifdef DEFINED_ONCE static int regs_allocated_size; static const char ** regstart, ** regend; static const char ** old_regstart, ** old_regend; static const char **best_regstart, **best_regend; static const char **reg_dummy; # endif /* DEFINED_ONCE */ static PREFIX(register_info_type) *PREFIX(reg_info); static PREFIX(register_info_type) *PREFIX(reg_info_dummy); /* Make the register vectors big enough for NUM_REGS registers, but don't make them smaller. */ static void PREFIX(regex_grow_registers) (num_regs) int num_regs; { if (num_regs > regs_allocated_size) { RETALLOC_IF (regstart, num_regs, const char *); RETALLOC_IF (regend, num_regs, const char *); RETALLOC_IF (old_regstart, num_regs, const char *); RETALLOC_IF (old_regend, num_regs, const char *); RETALLOC_IF (best_regstart, num_regs, const char *); RETALLOC_IF (best_regend, num_regs, const char *); RETALLOC_IF (PREFIX(reg_info), num_regs, PREFIX(register_info_type)); RETALLOC_IF (reg_dummy, num_regs, const char *); RETALLOC_IF (PREFIX(reg_info_dummy), num_regs, PREFIX(register_info_type)); regs_allocated_size = num_regs; } } # endif /* not MATCH_MAY_ALLOCATE */ # ifndef DEFINED_ONCE static boolean group_in_compile_stack _RE_ARGS ((compile_stack_type compile_stack, regnum_t regnum)); # endif /* not DEFINED_ONCE */ /* `regex_compile' compiles PATTERN (of length SIZE) according to SYNTAX. Returns one of error codes defined in `regex.h', or zero for success. Assumes the `allocated' (and perhaps `buffer') and `translate' fields are set in BUFP on entry. If it succeeds, results are put in BUFP (if it returns an error, the contents of BUFP are undefined): `buffer' is the compiled pattern; `syntax' is set to SYNTAX; `used' is set to the length of the compiled pattern; `fastmap_accurate' is zero; `re_nsub' is the number of subexpressions in PATTERN; `not_bol' and `not_eol' are zero; The `fastmap' and `newline_anchor' fields are neither examined nor set. */ /* Return, freeing storage we allocated. */ # ifdef WCHAR # define FREE_STACK_RETURN(value) \ return (free(pattern), free(mbs_offset), free(is_binary), free (compile_stack.stack), value) # else # define FREE_STACK_RETURN(value) \ return (free (compile_stack.stack), value) # endif /* WCHAR */ static reg_errcode_t PREFIX(regex_compile) (ARG_PREFIX(pattern), ARG_PREFIX(size), syntax, bufp) const char *ARG_PREFIX(pattern); size_t ARG_PREFIX(size); reg_syntax_t syntax; struct re_pattern_buffer *bufp; { /* We fetch characters from PATTERN here. Even though PATTERN is `char *' (i.e., signed), we declare these variables as unsigned, so they can be reliably used as array indices. */ register UCHAR_T c, c1; #ifdef WCHAR /* A temporary space to keep wchar_t pattern and compiled pattern. */ CHAR_T *pattern, *COMPILED_BUFFER_VAR; size_t size; /* offset buffer for optimization. See convert_mbs_to_wc. */ int *mbs_offset = NULL; /* It hold whether each wchar_t is binary data or not. */ char *is_binary = NULL; /* A flag whether exactn is handling binary data or not. */ char is_exactn_bin = FALSE; #endif /* WCHAR */ /* A random temporary spot in PATTERN. */ const CHAR_T *p1; /* Points to the end of the buffer, where we should append. */ register UCHAR_T *b; /* Keeps track of unclosed groups. */ compile_stack_type compile_stack; /* Points to the current (ending) position in the pattern. */ #ifdef WCHAR const CHAR_T *p; const CHAR_T *pend; #else /* BYTE */ const CHAR_T *p = pattern; const CHAR_T *pend = pattern + size; #endif /* WCHAR */ /* How to translate the characters in the pattern. */ RE_TRANSLATE_TYPE translate = bufp->translate; /* Address of the count-byte of the most recently inserted `exactn' command. This makes it possible to tell if a new exact-match character can be added to that command or if the character requires a new `exactn' command. */ UCHAR_T *pending_exact = 0; /* Address of start of the most recently finished expression. This tells, e.g., postfix * where to find the start of its operand. Reset at the beginning of groups and alternatives. */ UCHAR_T *laststart = 0; /* Address of beginning of regexp, or inside of last group. */ UCHAR_T *begalt; /* Address of the place where a forward jump should go to the end of the containing expression. Each alternative of an `or' -- except the last -- ends with a forward jump of this sort. */ UCHAR_T *fixup_alt_jump = 0; /* Counts open-groups as they are encountered. Remembered for the matching close-group on the compile stack, so the same register number is put in the stop_memory as the start_memory. */ regnum_t regnum = 0; #ifdef WCHAR /* Initialize the wchar_t PATTERN and offset_buffer. */ p = pend = pattern = TALLOC(csize + 1, CHAR_T); mbs_offset = TALLOC(csize + 1, int); is_binary = TALLOC(csize + 1, char); if (pattern == NULL || mbs_offset == NULL || is_binary == NULL) { free(pattern); free(mbs_offset); free(is_binary); return REG_ESPACE; } pattern[csize] = L'\0'; /* sentinel */ size = convert_mbs_to_wcs(pattern, cpattern, csize, mbs_offset, is_binary); pend = p + size; if (size < 0) { free(pattern); free(mbs_offset); free(is_binary); return REG_BADPAT; } #endif #ifdef DEBUG DEBUG_PRINT1 ("\nCompiling pattern: "); if (debug) { unsigned debug_count; for (debug_count = 0; debug_count < size; debug_count++) PUT_CHAR (pattern[debug_count]); putchar ('\n'); } #endif /* DEBUG */ /* Initialize the compile stack. */ compile_stack.stack = TALLOC (INIT_COMPILE_STACK_SIZE, compile_stack_elt_t); if (compile_stack.stack == NULL) { #ifdef WCHAR free(pattern); free(mbs_offset); free(is_binary); #endif return REG_ESPACE; } compile_stack.size = INIT_COMPILE_STACK_SIZE; compile_stack.avail = 0; /* Initialize the pattern buffer. */ bufp->syntax = syntax; bufp->fastmap_accurate = 0; bufp->not_bol = bufp->not_eol = 0; /* Set `used' to zero, so that if we return an error, the pattern printer (for debugging) will think there's no pattern. We reset it at the end. */ bufp->used = 0; /* Always count groups, whether or not bufp->no_sub is set. */ bufp->re_nsub = 0; #if !defined emacs && !defined SYNTAX_TABLE /* Initialize the syntax table. */ init_syntax_once (); #endif if (bufp->allocated == 0) { if (bufp->buffer) { /* If zero allocated, but buffer is non-null, try to realloc enough space. This loses if buffer's address is bogus, but that is the user's responsibility. */ #ifdef WCHAR /* Free bufp->buffer and allocate an array for wchar_t pattern buffer. */ free(bufp->buffer); COMPILED_BUFFER_VAR = TALLOC (INIT_BUF_SIZE/sizeof(UCHAR_T), UCHAR_T); #else RETALLOC (COMPILED_BUFFER_VAR, INIT_BUF_SIZE, UCHAR_T); #endif /* WCHAR */ } else { /* Caller did not allocate a buffer. Do it for them. */ COMPILED_BUFFER_VAR = TALLOC (INIT_BUF_SIZE / sizeof(UCHAR_T), UCHAR_T); } if (!COMPILED_BUFFER_VAR) FREE_STACK_RETURN (REG_ESPACE); #ifdef WCHAR bufp->buffer = (char*)COMPILED_BUFFER_VAR; #endif /* WCHAR */ bufp->allocated = INIT_BUF_SIZE; } #ifdef WCHAR else COMPILED_BUFFER_VAR = (UCHAR_T*) bufp->buffer; #endif begalt = b = COMPILED_BUFFER_VAR; /* Loop through the uncompiled pattern until we're at the end. */ while (p != pend) { PATFETCH (c); switch (c) { case '^': { if ( /* If at start of pattern, it's an operator. */ p == pattern + 1 /* If context independent, it's an operator. */ || syntax & RE_CONTEXT_INDEP_ANCHORS /* Otherwise, depends on what's come before. */ || PREFIX(at_begline_loc_p) (pattern, p, syntax)) BUF_PUSH (begline); else goto normal_char; } break; case '$': { if ( /* If at end of pattern, it's an operator. */ p == pend /* If context independent, it's an operator. */ || syntax & RE_CONTEXT_INDEP_ANCHORS /* Otherwise, depends on what's next. */ || PREFIX(at_endline_loc_p) (p, pend, syntax)) BUF_PUSH (endline); else goto normal_char; } break; case '+': case '?': if ((syntax & RE_BK_PLUS_QM) || (syntax & RE_LIMITED_OPS)) goto normal_char; handle_plus: case '*': /* If there is no previous pattern... */ if (!laststart) { if (syntax & RE_CONTEXT_INVALID_OPS) FREE_STACK_RETURN (REG_BADRPT); else if (!(syntax & RE_CONTEXT_INDEP_OPS)) goto normal_char; } { /* Are we optimizing this jump? */ boolean keep_string_p = false; /* 1 means zero (many) matches is allowed. */ char zero_times_ok = 0, many_times_ok = 0; /* If there is a sequence of repetition chars, collapse it down to just one (the right one). We can't combine interval operators with these because of, e.g., `a{2}*', which should only match an even number of `a's. */ for (;;) { zero_times_ok |= c != '+'; many_times_ok |= c != '?'; if (p == pend) break; PATFETCH (c); if (c == '*' || (!(syntax & RE_BK_PLUS_QM) && (c == '+' || c == '?'))) ; else if (syntax & RE_BK_PLUS_QM && c == '\\') { if (p == pend) FREE_STACK_RETURN (REG_EESCAPE); PATFETCH (c1); if (!(c1 == '+' || c1 == '?')) { PATUNFETCH; PATUNFETCH; break; } c = c1; } else { PATUNFETCH; break; } /* If we get here, we found another repeat character. */ } /* Star, etc. applied to an empty pattern is equivalent to an empty pattern. */ if (!laststart) break; /* Now we know whether or not zero matches is allowed and also whether or not two or more matches is allowed. */ if (many_times_ok) { /* More than one repetition is allowed, so put in at the end a backward relative jump from `b' to before the next jump we're going to put in below (which jumps from laststart to after this jump). But if we are at the `*' in the exact sequence `.*\n', insert an unconditional jump backwards to the ., instead of the beginning of the loop. This way we only push a failure point once, instead of every time through the loop. */ assert (p - 1 > pattern); /* Allocate the space for the jump. */ GET_BUFFER_SPACE (1 + OFFSET_ADDRESS_SIZE); /* We know we are not at the first character of the pattern, because laststart was nonzero. And we've already incremented `p', by the way, to be the character after the `*'. Do we have to do something analogous here for null bytes, because of RE_DOT_NOT_NULL? */ if (TRANSLATE (*(p - 2)) == TRANSLATE ('.') && zero_times_ok && p < pend && TRANSLATE (*p) == TRANSLATE ('\n') && !(syntax & RE_DOT_NEWLINE)) { /* We have .*\n. */ STORE_JUMP (jump, b, laststart); keep_string_p = true; } else /* Anything else. */ STORE_JUMP (maybe_pop_jump, b, laststart - (1 + OFFSET_ADDRESS_SIZE)); /* We've added more stuff to the buffer. */ b += 1 + OFFSET_ADDRESS_SIZE; } /* On failure, jump from laststart to b + 3, which will be the end of the buffer after this jump is inserted. */ /* ifdef WCHAR, 'b + 1 + OFFSET_ADDRESS_SIZE' instead of 'b + 3'. */ GET_BUFFER_SPACE (1 + OFFSET_ADDRESS_SIZE); INSERT_JUMP (keep_string_p ? on_failure_keep_string_jump : on_failure_jump, laststart, b + 1 + OFFSET_ADDRESS_SIZE); pending_exact = 0; b += 1 + OFFSET_ADDRESS_SIZE; if (!zero_times_ok) { /* At least one repetition is required, so insert a `dummy_failure_jump' before the initial `on_failure_jump' instruction of the loop. This effects a skip over that instruction the first time we hit that loop. */ GET_BUFFER_SPACE (1 + OFFSET_ADDRESS_SIZE); INSERT_JUMP (dummy_failure_jump, laststart, laststart + 2 + 2 * OFFSET_ADDRESS_SIZE); b += 1 + OFFSET_ADDRESS_SIZE; } } break; case '.': laststart = b; BUF_PUSH (anychar); break; case '[': { boolean had_char_class = false; #ifdef WCHAR CHAR_T range_start = 0xffffffff; #else unsigned int range_start = 0xffffffff; #endif if (p == pend) FREE_STACK_RETURN (REG_EBRACK); #ifdef WCHAR /* We assume a charset(_not) structure as a wchar_t array. charset[0] = (re_opcode_t) charset(_not) charset[1] = l (= length of char_classes) charset[2] = m (= length of collating_symbols) charset[3] = n (= length of equivalence_classes) charset[4] = o (= length of char_ranges) charset[5] = p (= length of chars) charset[6] = char_class (wctype_t) charset[6+CHAR_CLASS_SIZE] = char_class (wctype_t) ... charset[l+5] = char_class (wctype_t) charset[l+6] = collating_symbol (wchar_t) ... charset[l+m+5] = collating_symbol (wchar_t) ifdef _LIBC we use the index if _NL_COLLATE_SYMB_EXTRAMB instead of wchar_t string. charset[l+m+6] = equivalence_classes (wchar_t) ... charset[l+m+n+5] = equivalence_classes (wchar_t) ifdef _LIBC we use the index in _NL_COLLATE_WEIGHT instead of wchar_t string. charset[l+m+n+6] = range_start charset[l+m+n+7] = range_end ... charset[l+m+n+2o+4] = range_start charset[l+m+n+2o+5] = range_end ifdef _LIBC we use the value looked up in _NL_COLLATE_COLLSEQ instead of wchar_t character. charset[l+m+n+2o+6] = char ... charset[l+m+n+2o+p+5] = char */ /* We need at least 6 spaces: the opcode, the length of char_classes, the length of collating_symbols, the length of equivalence_classes, the length of char_ranges, the length of chars. */ GET_BUFFER_SPACE (6); /* Save b as laststart. And We use laststart as the pointer to the first element of the charset here. In other words, laststart[i] indicates charset[i]. */ laststart = b; /* We test `*p == '^' twice, instead of using an if statement, so we only need one BUF_PUSH. */ BUF_PUSH (*p == '^' ? charset_not : charset); if (*p == '^') p++; /* Push the length of char_classes, the length of collating_symbols, the length of equivalence_classes, the length of char_ranges and the length of chars. */ BUF_PUSH_3 (0, 0, 0); BUF_PUSH_2 (0, 0); /* Remember the first position in the bracket expression. */ p1 = p; /* charset_not matches newline according to a syntax bit. */ if ((re_opcode_t) b[-6] == charset_not && (syntax & RE_HAT_LISTS_NOT_NEWLINE)) { BUF_PUSH('\n'); laststart[5]++; /* Update the length of characters */ } /* Read in characters and ranges, setting map bits. */ for (;;) { if (p == pend) FREE_STACK_RETURN (REG_EBRACK); PATFETCH (c); /* \ might escape characters inside [...] and [^...]. */ if ((syntax & RE_BACKSLASH_ESCAPE_IN_LISTS) && c == '\\') { if (p == pend) FREE_STACK_RETURN (REG_EESCAPE); PATFETCH (c1); BUF_PUSH(c1); laststart[5]++; /* Update the length of chars */ range_start = c1; continue; } /* Could be the end of the bracket expression. If it's not (i.e., when the bracket expression is `[]' so far), the ']' character bit gets set way below. */ if (c == ']' && p != p1 + 1) break; /* Look ahead to see if it's a range when the last thing was a character class. */ if (had_char_class && c == '-' && *p != ']') FREE_STACK_RETURN (REG_ERANGE); /* Look ahead to see if it's a range when the last thing was a character: if this is a hyphen not at the beginning or the end of a list, then it's the range operator. */ if (c == '-' && !(p - 2 >= pattern && p[-2] == '[') && !(p - 3 >= pattern && p[-3] == '[' && p[-2] == '^') && *p != ']') { reg_errcode_t ret; /* Allocate the space for range_start and range_end. */ GET_BUFFER_SPACE (2); /* Update the pointer to indicate end of buffer. */ b += 2; ret = wcs_compile_range (range_start, &p, pend, translate, syntax, b, laststart); if (ret != REG_NOERROR) FREE_STACK_RETURN (ret); range_start = 0xffffffff; } else if (p[0] == '-' && p[1] != ']') { /* This handles ranges made up of characters only. */ reg_errcode_t ret; /* Move past the `-'. */ PATFETCH (c1); /* Allocate the space for range_start and range_end. */ GET_BUFFER_SPACE (2); /* Update the pointer to indicate end of buffer. */ b += 2; ret = wcs_compile_range (c, &p, pend, translate, syntax, b, laststart); if (ret != REG_NOERROR) FREE_STACK_RETURN (ret); range_start = 0xffffffff; } /* See if we're at the beginning of a possible character class. */ else if (syntax & RE_CHAR_CLASSES && c == '[' && *p == ':') { /* Leave room for the null. */ char str[CHAR_CLASS_MAX_LENGTH + 1]; PATFETCH (c); c1 = 0; /* If pattern is `[[:'. */ if (p == pend) FREE_STACK_RETURN (REG_EBRACK); for (;;) { PATFETCH (c); if ((c == ':' && *p == ']') || p == pend) break; if (c1 < CHAR_CLASS_MAX_LENGTH) str[c1++] = c; else /* This is in any case an invalid class name. */ str[0] = '\0'; } str[c1] = '\0'; /* If isn't a word bracketed by `[:' and `:]': undo the ending character, the letters, and leave the leading `:' and `[' (but store them as character). */ if (c == ':' && *p == ']') { wctype_t wt; uintptr_t alignedp; /* Query the character class as wctype_t. */ wt = IS_CHAR_CLASS (str); if (wt == 0) FREE_STACK_RETURN (REG_ECTYPE); /* Throw away the ] at the end of the character class. */ PATFETCH (c); if (p == pend) FREE_STACK_RETURN (REG_EBRACK); /* Allocate the space for character class. */ GET_BUFFER_SPACE(CHAR_CLASS_SIZE); /* Update the pointer to indicate end of buffer. */ b += CHAR_CLASS_SIZE; /* Move data which follow character classes not to violate the data. */ insert_space(CHAR_CLASS_SIZE, laststart + 6 + laststart[1], b - 1); alignedp = ((uintptr_t)(laststart + 6 + laststart[1]) + __alignof__(wctype_t) - 1) & ~(uintptr_t)(__alignof__(wctype_t) - 1); /* Store the character class. */ *((wctype_t*)alignedp) = wt; /* Update length of char_classes */ laststart[1] += CHAR_CLASS_SIZE; had_char_class = true; } else { c1++; while (c1--) PATUNFETCH; BUF_PUSH ('['); BUF_PUSH (':'); laststart[5] += 2; /* Update the length of characters */ range_start = ':'; had_char_class = false; } } else if (syntax & RE_CHAR_CLASSES && c == '[' && (*p == '=' || *p == '.')) { CHAR_T str[128]; /* Should be large enough. */ CHAR_T delim = *p; /* '=' or '.' */ # ifdef _LIBC uint32_t nrules = _NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_NRULES); # endif PATFETCH (c); c1 = 0; /* If pattern is `[[=' or '[[.'. */ if (p == pend) FREE_STACK_RETURN (REG_EBRACK); for (;;) { PATFETCH (c); if ((c == delim && *p == ']') || p == pend) break; if (c1 < sizeof (str) - 1) str[c1++] = c; else /* This is in any case an invalid class name. */ str[0] = '\0'; } str[c1] = '\0'; if (c == delim && *p == ']' && str[0] != '\0') { unsigned int i, offset; /* If we have no collation data we use the default collation in which each character is in a class by itself. It also means that ASCII is the character set and therefore we cannot have character with more than one byte in the multibyte representation. */ /* If not defined _LIBC, we push the name and `\0' for the sake of matching performance. */ int datasize = c1 + 1; # ifdef _LIBC int32_t idx = 0; if (nrules == 0) # endif { if (c1 != 1) FREE_STACK_RETURN (REG_ECOLLATE); } # ifdef _LIBC else { const int32_t *table; const int32_t *weights; const int32_t *extra; const int32_t *indirect; wint_t *cp; /* This #include defines a local function! */ if(delim == '=') { /* We push the index for equivalence class. */ cp = (wint_t*)str; table = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_TABLEWC); weights = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_WEIGHTWC); extra = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_EXTRAWC); indirect = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_INDIRECTWC); idx = findidx ((const wint_t**)&cp); if (idx == 0 || cp < (wint_t*) str + c1) /* This is no valid character. */ FREE_STACK_RETURN (REG_ECOLLATE); str[0] = (wchar_t)idx; } else /* delim == '.' */ { /* We push collation sequence value for collating symbol. */ int32_t table_size; const int32_t *symb_table; const unsigned char *extra; int32_t idx; int32_t elem; int32_t second; int32_t hash; char char_str[c1]; /* We have to convert the name to a single-byte string. This is possible since the names consist of ASCII characters and the internal representation is UCS4. */ for (i = 0; i < c1; ++i) char_str[i] = str[i]; table_size = _NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_SYMB_HASH_SIZEMB); symb_table = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_SYMB_TABLEMB); extra = (const unsigned char *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_SYMB_EXTRAMB); /* Locate the character in the hashing table. */ hash = elem_hash (char_str, c1); idx = 0; elem = hash % table_size; second = hash % (table_size - 2); while (symb_table[2 * elem] != 0) { /* First compare the hashing value. */ if (symb_table[2 * elem] == hash && c1 == extra[symb_table[2 * elem + 1]] && memcmp (char_str, &extra[symb_table[2 * elem + 1] + 1], c1) == 0) { /* Yep, this is the entry. */ idx = symb_table[2 * elem + 1]; idx += 1 + extra[idx]; break; } /* Next entry. */ elem += second; } if (symb_table[2 * elem] != 0) { /* Compute the index of the byte sequence in the table. */ idx += 1 + extra[idx]; /* Adjust for the alignment. */ idx = (idx + 3) & ~3; str[0] = (wchar_t) idx + 4; } else if (symb_table[2 * elem] == 0 && c1 == 1) { /* No valid character. Match it as a single byte character. */ had_char_class = false; BUF_PUSH(str[0]); /* Update the length of characters */ laststart[5]++; range_start = str[0]; /* Throw away the ] at the end of the collating symbol. */ PATFETCH (c); /* exit from the switch block. */ continue; } else FREE_STACK_RETURN (REG_ECOLLATE); } datasize = 1; } # endif /* Throw away the ] at the end of the equivalence class (or collating symbol). */ PATFETCH (c); /* Allocate the space for the equivalence class (or collating symbol) (and '\0' if needed). */ GET_BUFFER_SPACE(datasize); /* Update the pointer to indicate end of buffer. */ b += datasize; if (delim == '=') { /* equivalence class */ /* Calculate the offset of char_ranges, which is next to equivalence_classes. */ offset = laststart[1] + laststart[2] + laststart[3] +6; /* Insert space. */ insert_space(datasize, laststart + offset, b - 1); /* Write the equivalence_class and \0. */ for (i = 0 ; i < datasize ; i++) laststart[offset + i] = str[i]; /* Update the length of equivalence_classes. */ laststart[3] += datasize; had_char_class = true; } else /* delim == '.' */ { /* collating symbol */ /* Calculate the offset of the equivalence_classes, which is next to collating_symbols. */ offset = laststart[1] + laststart[2] + 6; /* Insert space and write the collationg_symbol and \0. */ insert_space(datasize, laststart + offset, b-1); for (i = 0 ; i < datasize ; i++) laststart[offset + i] = str[i]; /* In re_match_2_internal if range_start < -1, we assume -range_start is the offset of the collating symbol which is specified as the character of the range start. So we assign -(laststart[1] + laststart[2] + 6) to range_start. */ range_start = -(laststart[1] + laststart[2] + 6); /* Update the length of collating_symbol. */ laststart[2] += datasize; had_char_class = false; } } else { c1++; while (c1--) PATUNFETCH; BUF_PUSH ('['); BUF_PUSH (delim); laststart[5] += 2; /* Update the length of characters */ range_start = delim; had_char_class = false; } } else { had_char_class = false; BUF_PUSH(c); laststart[5]++; /* Update the length of characters */ range_start = c; } } #else /* BYTE */ /* Ensure that we have enough space to push a charset: the opcode, the length count, and the bitset; 34 bytes in all. */ GET_BUFFER_SPACE (34); laststart = b; /* We test `*p == '^' twice, instead of using an if statement, so we only need one BUF_PUSH. */ BUF_PUSH (*p == '^' ? charset_not : charset); if (*p == '^') p++; /* Remember the first position in the bracket expression. */ p1 = p; /* Push the number of bytes in the bitmap. */ BUF_PUSH ((1 << BYTEWIDTH) / BYTEWIDTH); /* Clear the whole map. */ bzero (b, (1 << BYTEWIDTH) / BYTEWIDTH); /* charset_not matches newline according to a syntax bit. */ if ((re_opcode_t) b[-2] == charset_not && (syntax & RE_HAT_LISTS_NOT_NEWLINE)) SET_LIST_BIT ('\n'); /* Read in characters and ranges, setting map bits. */ for (;;) { if (p == pend) FREE_STACK_RETURN (REG_EBRACK); PATFETCH (c); /* \ might escape characters inside [...] and [^...]. */ if ((syntax & RE_BACKSLASH_ESCAPE_IN_LISTS) && c == '\\') { if (p == pend) FREE_STACK_RETURN (REG_EESCAPE); PATFETCH (c1); SET_LIST_BIT (c1); range_start = c1; continue; } /* Could be the end of the bracket expression. If it's not (i.e., when the bracket expression is `[]' so far), the ']' character bit gets set way below. */ if (c == ']' && p != p1 + 1) break; /* Look ahead to see if it's a range when the last thing was a character class. */ if (had_char_class && c == '-' && *p != ']') FREE_STACK_RETURN (REG_ERANGE); /* Look ahead to see if it's a range when the last thing was a character: if this is a hyphen not at the beginning or the end of a list, then it's the range operator. */ if (c == '-' && !(p - 2 >= pattern && p[-2] == '[') && !(p - 3 >= pattern && p[-3] == '[' && p[-2] == '^') && *p != ']') { reg_errcode_t ret = byte_compile_range (range_start, &p, pend, translate, syntax, b); if (ret != REG_NOERROR) FREE_STACK_RETURN (ret); range_start = 0xffffffff; } else if (p[0] == '-' && p[1] != ']') { /* This handles ranges made up of characters only. */ reg_errcode_t ret; /* Move past the `-'. */ PATFETCH (c1); ret = byte_compile_range (c, &p, pend, translate, syntax, b); if (ret != REG_NOERROR) FREE_STACK_RETURN (ret); range_start = 0xffffffff; } /* See if we're at the beginning of a possible character class. */ else if (syntax & RE_CHAR_CLASSES && c == '[' && *p == ':') { /* Leave room for the null. */ char str[CHAR_CLASS_MAX_LENGTH + 1]; PATFETCH (c); c1 = 0; /* If pattern is `[[:'. */ if (p == pend) FREE_STACK_RETURN (REG_EBRACK); for (;;) { PATFETCH (c); if ((c == ':' && *p == ']') || p == pend) break; if (c1 < CHAR_CLASS_MAX_LENGTH) str[c1++] = c; else /* This is in any case an invalid class name. */ str[0] = '\0'; } str[c1] = '\0'; /* If isn't a word bracketed by `[:' and `:]': undo the ending character, the letters, and leave the leading `:' and `[' (but set bits for them). */ if (c == ':' && *p == ']') { # if defined _LIBC || WIDE_CHAR_SUPPORT boolean is_lower = STREQ (str, "lower"); boolean is_upper = STREQ (str, "upper"); wctype_t wt; int ch; wt = IS_CHAR_CLASS (str); if (wt == 0) FREE_STACK_RETURN (REG_ECTYPE); /* Throw away the ] at the end of the character class. */ PATFETCH (c); if (p == pend) FREE_STACK_RETURN (REG_EBRACK); for (ch = 0; ch < 1 << BYTEWIDTH; ++ch) { # ifdef _LIBC if (__iswctype (__btowc (ch), wt)) SET_LIST_BIT (ch); # else if (iswctype (btowc (ch), wt)) SET_LIST_BIT (ch); # endif if (translate && (is_upper || is_lower) && (ISUPPER (ch) || ISLOWER (ch))) SET_LIST_BIT (ch); } had_char_class = true; # else int ch; boolean is_alnum = STREQ (str, "alnum"); boolean is_alpha = STREQ (str, "alpha"); boolean is_blank = STREQ (str, "blank"); boolean is_cntrl = STREQ (str, "cntrl"); boolean is_digit = STREQ (str, "digit"); boolean is_graph = STREQ (str, "graph"); boolean is_lower = STREQ (str, "lower"); boolean is_print = STREQ (str, "print"); boolean is_punct = STREQ (str, "punct"); boolean is_space = STREQ (str, "space"); boolean is_upper = STREQ (str, "upper"); boolean is_xdigit = STREQ (str, "xdigit"); if (!IS_CHAR_CLASS (str)) FREE_STACK_RETURN (REG_ECTYPE); /* Throw away the ] at the end of the character class. */ PATFETCH (c); if (p == pend) FREE_STACK_RETURN (REG_EBRACK); for (ch = 0; ch < 1 << BYTEWIDTH; ch++) { /* This was split into 3 if's to avoid an arbitrary limit in some compiler. */ if ( (is_alnum && ISALNUM (ch)) || (is_alpha && ISALPHA (ch)) || (is_blank && ISBLANK (ch)) || (is_cntrl && ISCNTRL (ch))) SET_LIST_BIT (ch); if ( (is_digit && ISDIGIT (ch)) || (is_graph && ISGRAPH (ch)) || (is_lower && ISLOWER (ch)) || (is_print && ISPRINT (ch))) SET_LIST_BIT (ch); if ( (is_punct && ISPUNCT (ch)) || (is_space && ISSPACE (ch)) || (is_upper && ISUPPER (ch)) || (is_xdigit && ISXDIGIT (ch))) SET_LIST_BIT (ch); if ( translate && (is_upper || is_lower) && (ISUPPER (ch) || ISLOWER (ch))) SET_LIST_BIT (ch); } had_char_class = true; # endif /* libc || wctype.h */ } else { c1++; while (c1--) PATUNFETCH; SET_LIST_BIT ('['); SET_LIST_BIT (':'); range_start = ':'; had_char_class = false; } } else if (syntax & RE_CHAR_CLASSES && c == '[' && *p == '=') { unsigned char str[MB_LEN_MAX + 1]; # ifdef _LIBC uint32_t nrules = _NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_NRULES); # endif PATFETCH (c); c1 = 0; /* If pattern is `[[='. */ if (p == pend) FREE_STACK_RETURN (REG_EBRACK); for (;;) { PATFETCH (c); if ((c == '=' && *p == ']') || p == pend) break; if (c1 < MB_LEN_MAX) str[c1++] = c; else /* This is in any case an invalid class name. */ str[0] = '\0'; } str[c1] = '\0'; if (c == '=' && *p == ']' && str[0] != '\0') { /* If we have no collation data we use the default collation in which each character is in a class by itself. It also means that ASCII is the character set and therefore we cannot have character with more than one byte in the multibyte representation. */ # ifdef _LIBC if (nrules == 0) # endif { if (c1 != 1) FREE_STACK_RETURN (REG_ECOLLATE); /* Throw away the ] at the end of the equivalence class. */ PATFETCH (c); /* Set the bit for the character. */ SET_LIST_BIT (str[0]); } # ifdef _LIBC else { /* Try to match the byte sequence in `str' against those known to the collate implementation. First find out whether the bytes in `str' are actually from exactly one character. */ const int32_t *table; const unsigned char *weights; const unsigned char *extra; const int32_t *indirect; int32_t idx; const unsigned char *cp = str; int ch; /* This #include defines a local function! */ table = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_TABLEMB); weights = (const unsigned char *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_WEIGHTMB); extra = (const unsigned char *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_EXTRAMB); indirect = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_INDIRECTMB); idx = findidx (&cp); if (idx == 0 || cp < str + c1) /* This is no valid character. */ FREE_STACK_RETURN (REG_ECOLLATE); /* Throw away the ] at the end of the equivalence class. */ PATFETCH (c); /* Now we have to go throught the whole table and find all characters which have the same first level weight. XXX Note that this is not entirely correct. we would have to match multibyte sequences but this is not possible with the current implementation. */ for (ch = 1; ch < 256; ++ch) /* XXX This test would have to be changed if we would allow matching multibyte sequences. */ if (table[ch] > 0) { int32_t idx2 = table[ch]; size_t len = weights[idx2]; /* Test whether the lenghts match. */ if (weights[idx] == len) { /* They do. New compare the bytes of the weight. */ size_t cnt = 0; while (cnt < len && (weights[idx + 1 + cnt] == weights[idx2 + 1 + cnt])) ++cnt; if (cnt == len) /* They match. Mark the character as acceptable. */ SET_LIST_BIT (ch); } } } # endif had_char_class = true; } else { c1++; while (c1--) PATUNFETCH; SET_LIST_BIT ('['); SET_LIST_BIT ('='); range_start = '='; had_char_class = false; } } else if (syntax & RE_CHAR_CLASSES && c == '[' && *p == '.') { unsigned char str[128]; /* Should be large enough. */ # ifdef _LIBC uint32_t nrules = _NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_NRULES); # endif PATFETCH (c); c1 = 0; /* If pattern is `[[.'. */ if (p == pend) FREE_STACK_RETURN (REG_EBRACK); for (;;) { PATFETCH (c); if ((c == '.' && *p == ']') || p == pend) break; if (c1 < sizeof (str)) str[c1++] = c; else /* This is in any case an invalid class name. */ str[0] = '\0'; } str[c1] = '\0'; if (c == '.' && *p == ']' && str[0] != '\0') { /* If we have no collation data we use the default collation in which each character is the name for its own class which contains only the one character. It also means that ASCII is the character set and therefore we cannot have character with more than one byte in the multibyte representation. */ # ifdef _LIBC if (nrules == 0) # endif { if (c1 != 1) FREE_STACK_RETURN (REG_ECOLLATE); /* Throw away the ] at the end of the equivalence class. */ PATFETCH (c); /* Set the bit for the character. */ SET_LIST_BIT (str[0]); range_start = ((const unsigned char *) str)[0]; } # ifdef _LIBC else { /* Try to match the byte sequence in `str' against those known to the collate implementation. First find out whether the bytes in `str' are actually from exactly one character. */ int32_t table_size; const int32_t *symb_table; const unsigned char *extra; int32_t idx; int32_t elem; int32_t second; int32_t hash; table_size = _NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_SYMB_HASH_SIZEMB); symb_table = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_SYMB_TABLEMB); extra = (const unsigned char *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_SYMB_EXTRAMB); /* Locate the character in the hashing table. */ hash = elem_hash (str, c1); idx = 0; elem = hash % table_size; second = hash % (table_size - 2); while (symb_table[2 * elem] != 0) { /* First compare the hashing value. */ if (symb_table[2 * elem] == hash && c1 == extra[symb_table[2 * elem + 1]] && memcmp (str, &extra[symb_table[2 * elem + 1] + 1], c1) == 0) { /* Yep, this is the entry. */ idx = symb_table[2 * elem + 1]; idx += 1 + extra[idx]; break; } /* Next entry. */ elem += second; } if (symb_table[2 * elem] == 0) /* This is no valid character. */ FREE_STACK_RETURN (REG_ECOLLATE); /* Throw away the ] at the end of the equivalence class. */ PATFETCH (c); /* Now add the multibyte character(s) we found to the accept list. XXX Note that this is not entirely correct. we would have to match multibyte sequences but this is not possible with the current implementation. Also, we have to match collating symbols, which expand to more than one file, as a whole and not allow the individual bytes. */ c1 = extra[idx++]; if (c1 == 1) range_start = extra[idx]; while (c1-- > 0) { SET_LIST_BIT (extra[idx]); ++idx; } } # endif had_char_class = false; } else { c1++; while (c1--) PATUNFETCH; SET_LIST_BIT ('['); SET_LIST_BIT ('.'); range_start = '.'; had_char_class = false; } } else { had_char_class = false; SET_LIST_BIT (c); range_start = c; } } /* Discard any (non)matching list bytes that are all 0 at the end of the map. Decrease the map-length byte too. */ while ((int) b[-1] > 0 && b[b[-1] - 1] == 0) b[-1]--; b += b[-1]; #endif /* WCHAR */ } break; case '(': if (syntax & RE_NO_BK_PARENS) goto handle_open; else goto normal_char; case ')': if (syntax & RE_NO_BK_PARENS) goto handle_close; else goto normal_char; case '\n': if (syntax & RE_NEWLINE_ALT) goto handle_alt; else goto normal_char; case '|': if (syntax & RE_NO_BK_VBAR) goto handle_alt; else goto normal_char; case '{': if (syntax & RE_INTERVALS && syntax & RE_NO_BK_BRACES) goto handle_interval; else goto normal_char; case '\\': if (p == pend) FREE_STACK_RETURN (REG_EESCAPE); /* Do not translate the character after the \, so that we can distinguish, e.g., \B from \b, even if we normally would translate, e.g., B to b. */ PATFETCH_RAW (c); switch (c) { case '(': if (syntax & RE_NO_BK_PARENS) goto normal_backslash; handle_open: bufp->re_nsub++; regnum++; if (COMPILE_STACK_FULL) { RETALLOC (compile_stack.stack, compile_stack.size << 1, compile_stack_elt_t); if (compile_stack.stack == NULL) return REG_ESPACE; compile_stack.size <<= 1; } /* These are the values to restore when we hit end of this group. They are all relative offsets, so that if the whole pattern moves because of realloc, they will still be valid. */ COMPILE_STACK_TOP.begalt_offset = begalt - COMPILED_BUFFER_VAR; COMPILE_STACK_TOP.fixup_alt_jump = fixup_alt_jump ? fixup_alt_jump - COMPILED_BUFFER_VAR + 1 : 0; COMPILE_STACK_TOP.laststart_offset = b - COMPILED_BUFFER_VAR; COMPILE_STACK_TOP.regnum = regnum; /* We will eventually replace the 0 with the number of groups inner to this one. But do not push a start_memory for groups beyond the last one we can represent in the compiled pattern. */ if (regnum <= MAX_REGNUM) { COMPILE_STACK_TOP.inner_group_offset = b - COMPILED_BUFFER_VAR + 2; BUF_PUSH_3 (start_memory, regnum, 0); } compile_stack.avail++; fixup_alt_jump = 0; laststart = 0; begalt = b; /* If we've reached MAX_REGNUM groups, then this open won't actually generate any code, so we'll have to clear pending_exact explicitly. */ pending_exact = 0; break; case ')': if (syntax & RE_NO_BK_PARENS) goto normal_backslash; if (COMPILE_STACK_EMPTY) { if (syntax & RE_UNMATCHED_RIGHT_PAREN_ORD) goto normal_backslash; else FREE_STACK_RETURN (REG_ERPAREN); } handle_close: if (fixup_alt_jump) { /* Push a dummy failure point at the end of the alternative for a possible future `pop_failure_jump' to pop. See comments at `push_dummy_failure' in `re_match_2'. */ BUF_PUSH (push_dummy_failure); /* We allocated space for this jump when we assigned to `fixup_alt_jump', in the `handle_alt' case below. */ STORE_JUMP (jump_past_alt, fixup_alt_jump, b - 1); } /* See similar code for backslashed left paren above. */ if (COMPILE_STACK_EMPTY) { if (syntax & RE_UNMATCHED_RIGHT_PAREN_ORD) goto normal_char; else FREE_STACK_RETURN (REG_ERPAREN); } /* Since we just checked for an empty stack above, this ``can't happen''. */ assert (compile_stack.avail != 0); { /* We don't just want to restore into `regnum', because later groups should continue to be numbered higher, as in `(ab)c(de)' -- the second group is #2. */ regnum_t this_group_regnum; compile_stack.avail--; begalt = COMPILED_BUFFER_VAR + COMPILE_STACK_TOP.begalt_offset; fixup_alt_jump = COMPILE_STACK_TOP.fixup_alt_jump ? COMPILED_BUFFER_VAR + COMPILE_STACK_TOP.fixup_alt_jump - 1 : 0; laststart = COMPILED_BUFFER_VAR + COMPILE_STACK_TOP.laststart_offset; this_group_regnum = COMPILE_STACK_TOP.regnum; /* If we've reached MAX_REGNUM groups, then this open won't actually generate any code, so we'll have to clear pending_exact explicitly. */ pending_exact = 0; /* We're at the end of the group, so now we know how many groups were inside this one. */ if (this_group_regnum <= MAX_REGNUM) { UCHAR_T *inner_group_loc = COMPILED_BUFFER_VAR + COMPILE_STACK_TOP.inner_group_offset; *inner_group_loc = regnum - this_group_regnum; BUF_PUSH_3 (stop_memory, this_group_regnum, regnum - this_group_regnum); } } break; case '|': /* `\|'. */ if (syntax & RE_LIMITED_OPS || syntax & RE_NO_BK_VBAR) goto normal_backslash; handle_alt: if (syntax & RE_LIMITED_OPS) goto normal_char; /* Insert before the previous alternative a jump which jumps to this alternative if the former fails. */ GET_BUFFER_SPACE (1 + OFFSET_ADDRESS_SIZE); INSERT_JUMP (on_failure_jump, begalt, b + 2 + 2 * OFFSET_ADDRESS_SIZE); pending_exact = 0; b += 1 + OFFSET_ADDRESS_SIZE; /* The alternative before this one has a jump after it which gets executed if it gets matched. Adjust that jump so it will jump to this alternative's analogous jump (put in below, which in turn will jump to the next (if any) alternative's such jump, etc.). The last such jump jumps to the correct final destination. A picture: _____ _____ | | | | | v | v a | b | c If we are at `b', then fixup_alt_jump right now points to a three-byte space after `a'. We'll put in the jump, set fixup_alt_jump to right after `b', and leave behind three bytes which we'll fill in when we get to after `c'. */ if (fixup_alt_jump) STORE_JUMP (jump_past_alt, fixup_alt_jump, b); /* Mark and leave space for a jump after this alternative, to be filled in later either by next alternative or when know we're at the end of a series of alternatives. */ fixup_alt_jump = b; GET_BUFFER_SPACE (1 + OFFSET_ADDRESS_SIZE); b += 1 + OFFSET_ADDRESS_SIZE; laststart = 0; begalt = b; break; case '{': /* If \{ is a literal. */ if (!(syntax & RE_INTERVALS) /* If we're at `\{' and it's not the open-interval operator. */ || (syntax & RE_NO_BK_BRACES)) goto normal_backslash; handle_interval: { /* If got here, then the syntax allows intervals. */ /* At least (most) this many matches must be made. */ int lower_bound = -1, upper_bound = -1; /* Place in the uncompiled pattern (i.e., just after the '{') to go back to if the interval is invalid. */ const CHAR_T *beg_interval = p; if (p == pend) goto invalid_interval; GET_UNSIGNED_NUMBER (lower_bound); if (c == ',') { GET_UNSIGNED_NUMBER (upper_bound); if (upper_bound < 0) upper_bound = RE_DUP_MAX; } else /* Interval such as `{1}' => match exactly once. */ upper_bound = lower_bound; if (! (0 <= lower_bound && lower_bound <= upper_bound)) goto invalid_interval; if (!(syntax & RE_NO_BK_BRACES)) { if (c != '\\' || p == pend) goto invalid_interval; PATFETCH (c); } if (c != '}') goto invalid_interval; /* If it's invalid to have no preceding re. */ if (!laststart) { if (syntax & RE_CONTEXT_INVALID_OPS && !(syntax & RE_INVALID_INTERVAL_ORD)) FREE_STACK_RETURN (REG_BADRPT); else if (syntax & RE_CONTEXT_INDEP_OPS) laststart = b; else goto unfetch_interval; } /* We just parsed a valid interval. */ if (RE_DUP_MAX < upper_bound) FREE_STACK_RETURN (REG_BADBR); /* If the upper bound is zero, don't want to succeed at all; jump from `laststart' to `b + 3', which will be the end of the buffer after we insert the jump. */ /* ifdef WCHAR, 'b + 1 + OFFSET_ADDRESS_SIZE' instead of 'b + 3'. */ if (upper_bound == 0) { GET_BUFFER_SPACE (1 + OFFSET_ADDRESS_SIZE); INSERT_JUMP (jump, laststart, b + 1 + OFFSET_ADDRESS_SIZE); b += 1 + OFFSET_ADDRESS_SIZE; } /* Otherwise, we have a nontrivial interval. When we're all done, the pattern will look like: set_number_at set_number_at succeed_n jump_n (The upper bound and `jump_n' are omitted if `upper_bound' is 1, though.) */ else { /* If the upper bound is > 1, we need to insert more at the end of the loop. */ unsigned nbytes = 2 + 4 * OFFSET_ADDRESS_SIZE + (upper_bound > 1) * (2 + 4 * OFFSET_ADDRESS_SIZE); GET_BUFFER_SPACE (nbytes); /* Initialize lower bound of the `succeed_n', even though it will be set during matching by its attendant `set_number_at' (inserted next), because `re_compile_fastmap' needs to know. Jump to the `jump_n' we might insert below. */ INSERT_JUMP2 (succeed_n, laststart, b + 1 + 2 * OFFSET_ADDRESS_SIZE + (upper_bound > 1) * (1 + 2 * OFFSET_ADDRESS_SIZE) , lower_bound); b += 1 + 2 * OFFSET_ADDRESS_SIZE; /* Code to initialize the lower bound. Insert before the `succeed_n'. The `5' is the last two bytes of this `set_number_at', plus 3 bytes of the following `succeed_n'. */ /* ifdef WCHAR, The '1+2*OFFSET_ADDRESS_SIZE' is the 'set_number_at', plus '1+OFFSET_ADDRESS_SIZE' of the following `succeed_n'. */ PREFIX(insert_op2) (set_number_at, laststart, 1 + 2 * OFFSET_ADDRESS_SIZE, lower_bound, b); b += 1 + 2 * OFFSET_ADDRESS_SIZE; if (upper_bound > 1) { /* More than one repetition is allowed, so append a backward jump to the `succeed_n' that starts this interval. When we've reached this during matching, we'll have matched the interval once, so jump back only `upper_bound - 1' times. */ STORE_JUMP2 (jump_n, b, laststart + 2 * OFFSET_ADDRESS_SIZE + 1, upper_bound - 1); b += 1 + 2 * OFFSET_ADDRESS_SIZE; /* The location we want to set is the second parameter of the `jump_n'; that is `b-2' as an absolute address. `laststart' will be the `set_number_at' we're about to insert; `laststart+3' the number to set, the source for the relative address. But we are inserting into the middle of the pattern -- so everything is getting moved up by 5. Conclusion: (b - 2) - (laststart + 3) + 5, i.e., b - laststart. We insert this at the beginning of the loop so that if we fail during matching, we'll reinitialize the bounds. */ PREFIX(insert_op2) (set_number_at, laststart, b - laststart, upper_bound - 1, b); b += 1 + 2 * OFFSET_ADDRESS_SIZE; } } pending_exact = 0; break; invalid_interval: if (!(syntax & RE_INVALID_INTERVAL_ORD)) FREE_STACK_RETURN (p == pend ? REG_EBRACE : REG_BADBR); unfetch_interval: /* Match the characters as literals. */ p = beg_interval; c = '{'; if (syntax & RE_NO_BK_BRACES) goto normal_char; else goto normal_backslash; } #ifdef emacs /* There is no way to specify the before_dot and after_dot operators. rms says this is ok. --karl */ case '=': BUF_PUSH (at_dot); break; case 's': laststart = b; PATFETCH (c); BUF_PUSH_2 (syntaxspec, syntax_spec_code[c]); break; case 'S': laststart = b; PATFETCH (c); BUF_PUSH_2 (notsyntaxspec, syntax_spec_code[c]); break; #endif /* emacs */ case 'w': if (syntax & RE_NO_GNU_OPS) goto normal_char; laststart = b; BUF_PUSH (wordchar); break; case 'W': if (syntax & RE_NO_GNU_OPS) goto normal_char; laststart = b; BUF_PUSH (notwordchar); break; case '<': if (syntax & RE_NO_GNU_OPS) goto normal_char; BUF_PUSH (wordbeg); break; case '>': if (syntax & RE_NO_GNU_OPS) goto normal_char; BUF_PUSH (wordend); break; case 'b': if (syntax & RE_NO_GNU_OPS) goto normal_char; BUF_PUSH (wordbound); break; case 'B': if (syntax & RE_NO_GNU_OPS) goto normal_char; BUF_PUSH (notwordbound); break; case '`': if (syntax & RE_NO_GNU_OPS) goto normal_char; BUF_PUSH (begbuf); break; case '\'': if (syntax & RE_NO_GNU_OPS) goto normal_char; BUF_PUSH (endbuf); break; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': if (syntax & RE_NO_BK_REFS) goto normal_char; c1 = c - '0'; if (c1 > regnum) FREE_STACK_RETURN (REG_ESUBREG); /* Can't back reference to a subexpression if inside of it. */ if (group_in_compile_stack (compile_stack, (regnum_t) c1)) goto normal_char; laststart = b; BUF_PUSH_2 (duplicate, c1); break; case '+': case '?': if (syntax & RE_BK_PLUS_QM) goto handle_plus; else goto normal_backslash; default: normal_backslash: /* You might think it would be useful for \ to mean not to translate; but if we don't translate it it will never match anything. */ c = TRANSLATE (c); goto normal_char; } break; default: /* Expects the character in `c'. */ normal_char: /* If no exactn currently being built. */ if (!pending_exact #ifdef WCHAR /* If last exactn handle binary(or character) and new exactn handle character(or binary). */ || is_exactn_bin != is_binary[p - 1 - pattern] #endif /* WCHAR */ /* If last exactn not at current position. */ || pending_exact + *pending_exact + 1 != b /* We have only one byte following the exactn for the count. */ || *pending_exact == (1 << BYTEWIDTH) - 1 /* If followed by a repetition operator. */ || *p == '*' || *p == '^' || ((syntax & RE_BK_PLUS_QM) ? *p == '\\' && (p[1] == '+' || p[1] == '?') : (*p == '+' || *p == '?')) || ((syntax & RE_INTERVALS) && ((syntax & RE_NO_BK_BRACES) ? *p == '{' : (p[0] == '\\' && p[1] == '{')))) { /* Start building a new exactn. */ laststart = b; #ifdef WCHAR /* Is this exactn binary data or character? */ is_exactn_bin = is_binary[p - 1 - pattern]; if (is_exactn_bin) BUF_PUSH_2 (exactn_bin, 0); else BUF_PUSH_2 (exactn, 0); #else BUF_PUSH_2 (exactn, 0); #endif /* WCHAR */ pending_exact = b - 1; } BUF_PUSH (c); (*pending_exact)++; break; } /* switch (c) */ } /* while p != pend */ /* Through the pattern now. */ if (fixup_alt_jump) STORE_JUMP (jump_past_alt, fixup_alt_jump, b); if (!COMPILE_STACK_EMPTY) FREE_STACK_RETURN (REG_EPAREN); /* If we don't want backtracking, force success the first time we reach the end of the compiled pattern. */ if (syntax & RE_NO_POSIX_BACKTRACKING) BUF_PUSH (succeed); #ifdef WCHAR free (pattern); free (mbs_offset); free (is_binary); #endif free (compile_stack.stack); /* We have succeeded; set the length of the buffer. */ #ifdef WCHAR bufp->used = (uintptr_t) b - (uintptr_t) COMPILED_BUFFER_VAR; #else bufp->used = b - bufp->buffer; #endif #ifdef DEBUG if (debug) { DEBUG_PRINT1 ("\nCompiled pattern: \n"); PREFIX(print_compiled_pattern) (bufp); } #endif /* DEBUG */ #ifndef MATCH_MAY_ALLOCATE /* Initialize the failure stack to the largest possible stack. This isn't necessary unless we're trying to avoid calling alloca in the search and match routines. */ { int num_regs = bufp->re_nsub + 1; /* Since DOUBLE_FAIL_STACK refuses to double only if the current size is strictly greater than re_max_failures, the largest possible stack is 2 * re_max_failures failure points. */ if (fail_stack.size < (2 * re_max_failures * MAX_FAILURE_ITEMS)) { fail_stack.size = (2 * re_max_failures * MAX_FAILURE_ITEMS); # ifdef emacs if (! fail_stack.stack) fail_stack.stack = (PREFIX(fail_stack_elt_t) *) xmalloc (fail_stack.size * sizeof (PREFIX(fail_stack_elt_t))); else fail_stack.stack = (PREFIX(fail_stack_elt_t) *) xrealloc (fail_stack.stack, (fail_stack.size * sizeof (PREFIX(fail_stack_elt_t)))); # else /* not emacs */ if (! fail_stack.stack) fail_stack.stack = (PREFIX(fail_stack_elt_t) *) malloc (fail_stack.size * sizeof (PREFIX(fail_stack_elt_t))); else fail_stack.stack = (PREFIX(fail_stack_elt_t) *) realloc (fail_stack.stack, (fail_stack.size * sizeof (PREFIX(fail_stack_elt_t)))); # endif /* not emacs */ } PREFIX(regex_grow_registers) (num_regs); } #endif /* not MATCH_MAY_ALLOCATE */ return REG_NOERROR; } /* regex_compile */ /* Subroutines for `regex_compile'. */ /* Store OP at LOC followed by two-byte integer parameter ARG. */ /* ifdef WCHAR, integer parameter is 1 wchar_t. */ static void PREFIX(store_op1) (op, loc, arg) re_opcode_t op; UCHAR_T *loc; int arg; { *loc = (UCHAR_T) op; STORE_NUMBER (loc + 1, arg); } /* Like `store_op1', but for two two-byte parameters ARG1 and ARG2. */ /* ifdef WCHAR, integer parameter is 1 wchar_t. */ static void PREFIX(store_op2) (op, loc, arg1, arg2) re_opcode_t op; UCHAR_T *loc; int arg1, arg2; { *loc = (UCHAR_T) op; STORE_NUMBER (loc + 1, arg1); STORE_NUMBER (loc + 1 + OFFSET_ADDRESS_SIZE, arg2); } /* Copy the bytes from LOC to END to open up three bytes of space at LOC for OP followed by two-byte integer parameter ARG. */ /* ifdef WCHAR, integer parameter is 1 wchar_t. */ static void PREFIX(insert_op1) (op, loc, arg, end) re_opcode_t op; UCHAR_T *loc; int arg; UCHAR_T *end; { register UCHAR_T *pfrom = end; register UCHAR_T *pto = end + 1 + OFFSET_ADDRESS_SIZE; while (pfrom != loc) *--pto = *--pfrom; PREFIX(store_op1) (op, loc, arg); } /* Like `insert_op1', but for two two-byte parameters ARG1 and ARG2. */ /* ifdef WCHAR, integer parameter is 1 wchar_t. */ static void PREFIX(insert_op2) (op, loc, arg1, arg2, end) re_opcode_t op; UCHAR_T *loc; int arg1, arg2; UCHAR_T *end; { register UCHAR_T *pfrom = end; register UCHAR_T *pto = end + 1 + 2 * OFFSET_ADDRESS_SIZE; while (pfrom != loc) *--pto = *--pfrom; PREFIX(store_op2) (op, loc, arg1, arg2); } /* P points to just after a ^ in PATTERN. Return true if that ^ comes after an alternative or a begin-subexpression. We assume there is at least one character before the ^. */ static boolean PREFIX(at_begline_loc_p) (pattern, p, syntax) const CHAR_T *pattern, *p; reg_syntax_t syntax; { const CHAR_T *prev = p - 2; boolean prev_prev_backslash = prev > pattern && prev[-1] == '\\'; return /* After a subexpression? */ (*prev == '(' && (syntax & RE_NO_BK_PARENS || prev_prev_backslash)) /* After an alternative? */ || (*prev == '|' && (syntax & RE_NO_BK_VBAR || prev_prev_backslash)); } /* The dual of at_begline_loc_p. This one is for $. We assume there is at least one character after the $, i.e., `P < PEND'. */ static boolean PREFIX(at_endline_loc_p) (p, pend, syntax) const CHAR_T *p, *pend; reg_syntax_t syntax; { const CHAR_T *next = p; boolean next_backslash = *next == '\\'; const CHAR_T *next_next = p + 1 < pend ? p + 1 : 0; return /* Before a subexpression? */ (syntax & RE_NO_BK_PARENS ? *next == ')' : next_backslash && next_next && *next_next == ')') /* Before an alternative? */ || (syntax & RE_NO_BK_VBAR ? *next == '|' : next_backslash && next_next && *next_next == '|'); } #else /* not INSIDE_RECURSION */ /* Returns true if REGNUM is in one of COMPILE_STACK's elements and false if it's not. */ static boolean group_in_compile_stack (compile_stack, regnum) compile_stack_type compile_stack; regnum_t regnum; { int this_element; for (this_element = compile_stack.avail - 1; this_element >= 0; this_element--) if (compile_stack.stack[this_element].regnum == regnum) return true; return false; } #endif /* not INSIDE_RECURSION */ #ifdef INSIDE_RECURSION #ifdef WCHAR /* This insert space, which size is "num", into the pattern at "loc". "end" must point the end of the allocated buffer. */ static void insert_space (num, loc, end) int num; CHAR_T *loc; CHAR_T *end; { register CHAR_T *pto = end; register CHAR_T *pfrom = end - num; while (pfrom >= loc) *pto-- = *pfrom--; } #endif /* WCHAR */ #ifdef WCHAR static reg_errcode_t wcs_compile_range (range_start_char, p_ptr, pend, translate, syntax, b, char_set) CHAR_T range_start_char; const CHAR_T **p_ptr, *pend; CHAR_T *char_set, *b; RE_TRANSLATE_TYPE translate; reg_syntax_t syntax; { const CHAR_T *p = *p_ptr; CHAR_T range_start, range_end; reg_errcode_t ret; # ifdef _LIBC uint32_t nrules; uint32_t start_val, end_val; # endif if (p == pend) return REG_ERANGE; # ifdef _LIBC nrules = _NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_NRULES); if (nrules != 0) { const char *collseq = (const char *) _NL_CURRENT(LC_COLLATE, _NL_COLLATE_COLLSEQWC); const unsigned char *extra = (const unsigned char *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_SYMB_EXTRAMB); if (range_start_char < -1) { /* range_start is a collating symbol. */ int32_t *wextra; /* Retreive the index and get collation sequence value. */ wextra = (int32_t*)(extra + char_set[-range_start_char]); start_val = wextra[1 + *wextra]; } else start_val = collseq_table_lookup(collseq, TRANSLATE(range_start_char)); end_val = collseq_table_lookup (collseq, TRANSLATE (p[0])); /* Report an error if the range is empty and the syntax prohibits this. */ ret = ((syntax & RE_NO_EMPTY_RANGES) && (start_val > end_val))? REG_ERANGE : REG_NOERROR; /* Insert space to the end of the char_ranges. */ insert_space(2, b - char_set[5] - 2, b - 1); *(b - char_set[5] - 2) = (wchar_t)start_val; *(b - char_set[5] - 1) = (wchar_t)end_val; char_set[4]++; /* ranges_index */ } else # endif { range_start = (range_start_char >= 0)? TRANSLATE (range_start_char): range_start_char; range_end = TRANSLATE (p[0]); /* Report an error if the range is empty and the syntax prohibits this. */ ret = ((syntax & RE_NO_EMPTY_RANGES) && (range_start > range_end))? REG_ERANGE : REG_NOERROR; /* Insert space to the end of the char_ranges. */ insert_space(2, b - char_set[5] - 2, b - 1); *(b - char_set[5] - 2) = range_start; *(b - char_set[5] - 1) = range_end; char_set[4]++; /* ranges_index */ } /* Have to increment the pointer into the pattern string, so the caller isn't still at the ending character. */ (*p_ptr)++; return ret; } #else /* BYTE */ /* Read the ending character of a range (in a bracket expression) from the uncompiled pattern *P_PTR (which ends at PEND). We assume the starting character is in `P[-2]'. (`P[-1]' is the character `-'.) Then we set the translation of all bits between the starting and ending characters (inclusive) in the compiled pattern B. Return an error code. We use these short variable names so we can use the same macros as `regex_compile' itself. */ static reg_errcode_t byte_compile_range (range_start_char, p_ptr, pend, translate, syntax, b) unsigned int range_start_char; const char **p_ptr, *pend; RE_TRANSLATE_TYPE translate; reg_syntax_t syntax; unsigned char *b; { unsigned this_char; const char *p = *p_ptr; reg_errcode_t ret; # if _LIBC const unsigned char *collseq; unsigned int start_colseq; unsigned int end_colseq; # else unsigned end_char; # endif if (p == pend) return REG_ERANGE; /* Have to increment the pointer into the pattern string, so the caller isn't still at the ending character. */ (*p_ptr)++; /* Report an error if the range is empty and the syntax prohibits this. */ ret = syntax & RE_NO_EMPTY_RANGES ? REG_ERANGE : REG_NOERROR; # if _LIBC collseq = (const unsigned char *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_COLLSEQMB); start_colseq = collseq[(unsigned char) TRANSLATE (range_start_char)]; end_colseq = collseq[(unsigned char) TRANSLATE (p[0])]; for (this_char = 0; this_char <= (unsigned char) -1; ++this_char) { unsigned int this_colseq = collseq[(unsigned char) TRANSLATE (this_char)]; if (start_colseq <= this_colseq && this_colseq <= end_colseq) { SET_LIST_BIT (TRANSLATE (this_char)); ret = REG_NOERROR; } } # else /* Here we see why `this_char' has to be larger than an `unsigned char' -- we would otherwise go into an infinite loop, since all characters <= 0xff. */ range_start_char = TRANSLATE (range_start_char); /* TRANSLATE(p[0]) is casted to char (not unsigned char) in TRANSLATE, and some compilers cast it to int implicitly, so following for_loop may fall to (almost) infinite loop. e.g. If translate[p[0]] = 0xff, end_char may equals to 0xffffffff. To avoid this, we cast p[0] to unsigned int and truncate it. */ end_char = ((unsigned)TRANSLATE(p[0]) & ((1 << BYTEWIDTH) - 1)); for (this_char = range_start_char; this_char <= end_char; ++this_char) { SET_LIST_BIT (TRANSLATE (this_char)); ret = REG_NOERROR; } # endif return ret; } #endif /* WCHAR */ /* re_compile_fastmap computes a ``fastmap'' for the compiled pattern in BUFP. A fastmap records which of the (1 << BYTEWIDTH) possible characters can start a string that matches the pattern. This fastmap is used by re_search to skip quickly over impossible starting points. The caller must supply the address of a (1 << BYTEWIDTH)-byte data area as BUFP->fastmap. We set the `fastmap', `fastmap_accurate', and `can_be_null' fields in the pattern buffer. Returns 0 if we succeed, -2 if an internal error. */ #ifdef WCHAR /* local function for re_compile_fastmap. truncate wchar_t character to char. */ static unsigned char truncate_wchar (CHAR_T c); static unsigned char truncate_wchar (c) CHAR_T c; { unsigned char buf[MB_CUR_MAX]; mbstate_t state; int retval; memset (&state, '\0', sizeof (state)); # ifdef _LIBC retval = __wcrtomb (buf, c, &state); # else retval = wcrtomb (buf, c, &state); # endif return retval > 0 ? buf[0] : (unsigned char) c; } #endif /* WCHAR */ static int PREFIX(re_compile_fastmap) (bufp) struct re_pattern_buffer *bufp; { int j, k; #ifdef MATCH_MAY_ALLOCATE PREFIX(fail_stack_type) fail_stack; #endif #ifndef REGEX_MALLOC char *destination; #endif register char *fastmap = bufp->fastmap; #ifdef WCHAR /* We need to cast pattern to (wchar_t*), because we casted this compiled pattern to (char*) in regex_compile. */ UCHAR_T *pattern = (UCHAR_T*)bufp->buffer; register UCHAR_T *pend = (UCHAR_T*) (bufp->buffer + bufp->used); #else /* BYTE */ UCHAR_T *pattern = bufp->buffer; register UCHAR_T *pend = pattern + bufp->used; #endif /* WCHAR */ UCHAR_T *p = pattern; #ifdef REL_ALLOC /* This holds the pointer to the failure stack, when it is allocated relocatably. */ fail_stack_elt_t *failure_stack_ptr; #endif /* Assume that each path through the pattern can be null until proven otherwise. We set this false at the bottom of switch statement, to which we get only if a particular path doesn't match the empty string. */ boolean path_can_be_null = true; /* We aren't doing a `succeed_n' to begin with. */ boolean succeed_n_p = false; assert (fastmap != NULL && p != NULL); INIT_FAIL_STACK (); bzero (fastmap, 1 << BYTEWIDTH); /* Assume nothing's valid. */ bufp->fastmap_accurate = 1; /* It will be when we're done. */ bufp->can_be_null = 0; while (1) { if (p == pend || *p == (UCHAR_T) succeed) { /* We have reached the (effective) end of pattern. */ if (!FAIL_STACK_EMPTY ()) { bufp->can_be_null |= path_can_be_null; /* Reset for next path. */ path_can_be_null = true; p = fail_stack.stack[--fail_stack.avail].pointer; continue; } else break; } /* We should never be about to go beyond the end of the pattern. */ assert (p < pend); switch (SWITCH_ENUM_CAST ((re_opcode_t) *p++)) { /* I guess the idea here is to simply not bother with a fastmap if a backreference is used, since it's too hard to figure out the fastmap for the corresponding group. Setting `can_be_null' stops `re_search_2' from using the fastmap, so that is all we do. */ case duplicate: bufp->can_be_null = 1; goto done; /* Following are the cases which match a character. These end with `break'. */ #ifdef WCHAR case exactn: fastmap[truncate_wchar(p[1])] = 1; break; #else /* BYTE */ case exactn: fastmap[p[1]] = 1; break; #endif /* WCHAR */ #ifdef MBS_SUPPORT case exactn_bin: fastmap[p[1]] = 1; break; #endif #ifdef WCHAR /* It is hard to distinguish fastmap from (multi byte) characters which depends on current locale. */ case charset: case charset_not: case wordchar: case notwordchar: bufp->can_be_null = 1; goto done; #else /* BYTE */ case charset: for (j = *p++ * BYTEWIDTH - 1; j >= 0; j--) if (p[j / BYTEWIDTH] & (1 << (j % BYTEWIDTH))) fastmap[j] = 1; break; case charset_not: /* Chars beyond end of map must be allowed. */ for (j = *p * BYTEWIDTH; j < (1 << BYTEWIDTH); j++) fastmap[j] = 1; for (j = *p++ * BYTEWIDTH - 1; j >= 0; j--) if (!(p[j / BYTEWIDTH] & (1 << (j % BYTEWIDTH)))) fastmap[j] = 1; break; case wordchar: for (j = 0; j < (1 << BYTEWIDTH); j++) if (SYNTAX (j) == Sword) fastmap[j] = 1; break; case notwordchar: for (j = 0; j < (1 << BYTEWIDTH); j++) if (SYNTAX (j) != Sword) fastmap[j] = 1; break; #endif /* WCHAR */ case anychar: { int fastmap_newline = fastmap['\n']; /* `.' matches anything ... */ for (j = 0; j < (1 << BYTEWIDTH); j++) fastmap[j] = 1; /* ... except perhaps newline. */ if (!(bufp->syntax & RE_DOT_NEWLINE)) fastmap['\n'] = fastmap_newline; /* Return if we have already set `can_be_null'; if we have, then the fastmap is irrelevant. Something's wrong here. */ else if (bufp->can_be_null) goto done; /* Otherwise, have to check alternative paths. */ break; } #ifdef emacs case syntaxspec: k = *p++; for (j = 0; j < (1 << BYTEWIDTH); j++) if (SYNTAX (j) == (enum syntaxcode) k) fastmap[j] = 1; break; case notsyntaxspec: k = *p++; for (j = 0; j < (1 << BYTEWIDTH); j++) if (SYNTAX (j) != (enum syntaxcode) k) fastmap[j] = 1; break; /* All cases after this match the empty string. These end with `continue'. */ case before_dot: case at_dot: case after_dot: continue; #endif /* emacs */ case no_op: case begline: case endline: case begbuf: case endbuf: case wordbound: case notwordbound: case wordbeg: case wordend: case push_dummy_failure: continue; case jump_n: case pop_failure_jump: case maybe_pop_jump: case jump: case jump_past_alt: case dummy_failure_jump: EXTRACT_NUMBER_AND_INCR (j, p); p += j; if (j > 0) continue; /* Jump backward implies we just went through the body of a loop and matched nothing. Opcode jumped to should be `on_failure_jump' or `succeed_n'. Just treat it like an ordinary jump. For a * loop, it has pushed its failure point already; if so, discard that as redundant. */ if ((re_opcode_t) *p != on_failure_jump && (re_opcode_t) *p != succeed_n) continue; p++; EXTRACT_NUMBER_AND_INCR (j, p); p += j; /* If what's on the stack is where we are now, pop it. */ if (!FAIL_STACK_EMPTY () && fail_stack.stack[fail_stack.avail - 1].pointer == p) fail_stack.avail--; continue; case on_failure_jump: case on_failure_keep_string_jump: handle_on_failure_jump: EXTRACT_NUMBER_AND_INCR (j, p); /* For some patterns, e.g., `(a?)?', `p+j' here points to the end of the pattern. We don't want to push such a point, since when we restore it above, entering the switch will increment `p' past the end of the pattern. We don't need to push such a point since we obviously won't find any more fastmap entries beyond `pend'. Such a pattern can match the null string, though. */ if (p + j < pend) { if (!PUSH_PATTERN_OP (p + j, fail_stack)) { RESET_FAIL_STACK (); return -2; } } else bufp->can_be_null = 1; if (succeed_n_p) { EXTRACT_NUMBER_AND_INCR (k, p); /* Skip the n. */ succeed_n_p = false; } continue; case succeed_n: /* Get to the number of times to succeed. */ p += OFFSET_ADDRESS_SIZE; /* Increment p past the n for when k != 0. */ EXTRACT_NUMBER_AND_INCR (k, p); if (k == 0) { p -= 2 * OFFSET_ADDRESS_SIZE; succeed_n_p = true; /* Spaghetti code alert. */ goto handle_on_failure_jump; } continue; case set_number_at: p += 2 * OFFSET_ADDRESS_SIZE; continue; case start_memory: case stop_memory: p += 2; continue; default: abort (); /* We have listed all the cases. */ } /* switch *p++ */ /* Getting here means we have found the possible starting characters for one path of the pattern -- and that the empty string does not match. We need not follow this path further. Instead, look at the next alternative (remembered on the stack), or quit if no more. The test at the top of the loop does these things. */ path_can_be_null = false; p = pend; } /* while p */ /* Set `can_be_null' for the last path (also the first path, if the pattern is empty). */ bufp->can_be_null |= path_can_be_null; done: RESET_FAIL_STACK (); return 0; } #else /* not INSIDE_RECURSION */ int re_compile_fastmap (bufp) struct re_pattern_buffer *bufp; { # ifdef MBS_SUPPORT if (MB_CUR_MAX != 1) return wcs_re_compile_fastmap(bufp); else # endif return byte_re_compile_fastmap(bufp); } /* re_compile_fastmap */ #ifdef _LIBC weak_alias (__re_compile_fastmap, re_compile_fastmap) #endif /* Set REGS to hold NUM_REGS registers, storing them in STARTS and ENDS. Subsequent matches using PATTERN_BUFFER and REGS will use this memory for recording register information. STARTS and ENDS must be allocated using the malloc library routine, and must each be at least NUM_REGS * sizeof (regoff_t) bytes long. If NUM_REGS == 0, then subsequent matches should allocate their own register data. Unless this function is called, the first search or match using PATTERN_BUFFER will allocate its own register data, without freeing the old data. */ void re_set_registers (bufp, regs, num_regs, starts, ends) struct re_pattern_buffer *bufp; struct re_registers *regs; unsigned num_regs; regoff_t *starts, *ends; { if (num_regs) { bufp->regs_allocated = REGS_REALLOCATE; regs->num_regs = num_regs; regs->start = starts; regs->end = ends; } else { bufp->regs_allocated = REGS_UNALLOCATED; regs->num_regs = 0; regs->start = regs->end = (regoff_t *) 0; } } #ifdef _LIBC weak_alias (__re_set_registers, re_set_registers) #endif /* Searching routines. */ /* Like re_search_2, below, but only one string is specified, and doesn't let you say where to stop matching. */ int re_search (bufp, string, size, startpos, range, regs) struct re_pattern_buffer *bufp; const char *string; int size, startpos, range; struct re_registers *regs; { return re_search_2 (bufp, NULL, 0, string, size, startpos, range, regs, size); } #ifdef _LIBC weak_alias (__re_search, re_search) #endif /* Using the compiled pattern in BUFP->buffer, first tries to match the virtual concatenation of STRING1 and STRING2, starting first at index STARTPOS, then at STARTPOS + 1, and so on. STRING1 and STRING2 have length SIZE1 and SIZE2, respectively. RANGE is how far to scan while trying to match. RANGE = 0 means try only at STARTPOS; in general, the last start tried is STARTPOS + RANGE. In REGS, return the indices of the virtual concatenation of STRING1 and STRING2 that matched the entire BUFP->buffer and its contained subexpressions. Do not consider matching one past the index STOP in the virtual concatenation of STRING1 and STRING2. We return either the position in the strings at which the match was found, -1 if no match, or -2 if error (such as failure stack overflow). */ int re_search_2 (bufp, string1, size1, string2, size2, startpos, range, regs, stop) struct re_pattern_buffer *bufp; const char *string1, *string2; int size1, size2; int startpos; int range; struct re_registers *regs; int stop; { # ifdef MBS_SUPPORT if (MB_CUR_MAX != 1) return wcs_re_search_2 (bufp, string1, size1, string2, size2, startpos, range, regs, stop); else # endif return byte_re_search_2 (bufp, string1, size1, string2, size2, startpos, range, regs, stop); } /* re_search_2 */ #ifdef _LIBC weak_alias (__re_search_2, re_search_2) #endif #endif /* not INSIDE_RECURSION */ #ifdef INSIDE_RECURSION #ifdef MATCH_MAY_ALLOCATE # define FREE_VAR(var) if (var) REGEX_FREE (var); var = NULL #else # define FREE_VAR(var) if (var) free (var); var = NULL #endif #ifdef WCHAR # define MAX_ALLOCA_SIZE 2000 # define FREE_WCS_BUFFERS() \ do { \ if (size1 > MAX_ALLOCA_SIZE) \ { \ free (wcs_string1); \ free (mbs_offset1); \ } \ else \ { \ FREE_VAR (wcs_string1); \ FREE_VAR (mbs_offset1); \ } \ if (size2 > MAX_ALLOCA_SIZE) \ { \ free (wcs_string2); \ free (mbs_offset2); \ } \ else \ { \ FREE_VAR (wcs_string2); \ FREE_VAR (mbs_offset2); \ } \ } while (0) #endif static int PREFIX(re_search_2) (bufp, string1, size1, string2, size2, startpos, range, regs, stop) struct re_pattern_buffer *bufp; const char *string1, *string2; int size1, size2; int startpos; int range; struct re_registers *regs; int stop; { int val; register char *fastmap = bufp->fastmap; register RE_TRANSLATE_TYPE translate = bufp->translate; int total_size = size1 + size2; int endpos = startpos + range; #ifdef WCHAR /* We need wchar_t* buffers correspond to cstring1, cstring2. */ wchar_t *wcs_string1 = NULL, *wcs_string2 = NULL; /* We need the size of wchar_t buffers correspond to csize1, csize2. */ int wcs_size1 = 0, wcs_size2 = 0; /* offset buffer for optimizatoin. See convert_mbs_to_wc. */ int *mbs_offset1 = NULL, *mbs_offset2 = NULL; /* They hold whether each wchar_t is binary data or not. */ char *is_binary = NULL; #endif /* WCHAR */ /* Check for out-of-range STARTPOS. */ if (startpos < 0 || startpos > total_size) return -1; /* Fix up RANGE if it might eventually take us outside the virtual concatenation of STRING1 and STRING2. Make sure we won't move STARTPOS below 0 or above TOTAL_SIZE. */ if (endpos < 0) range = 0 - startpos; else if (endpos > total_size) range = total_size - startpos; /* If the search isn't to be a backwards one, don't waste time in a search for a pattern that must be anchored. */ if (bufp->used > 0 && range > 0 && ((re_opcode_t) bufp->buffer[0] == begbuf /* `begline' is like `begbuf' if it cannot match at newlines. */ || ((re_opcode_t) bufp->buffer[0] == begline && !bufp->newline_anchor))) { if (startpos > 0) return -1; else range = 1; } #ifdef emacs /* In a forward search for something that starts with \=. don't keep searching past point. */ if (bufp->used > 0 && (re_opcode_t) bufp->buffer[0] == at_dot && range > 0) { range = PT - startpos; if (range <= 0) return -1; } #endif /* emacs */ /* Update the fastmap now if not correct already. */ if (fastmap && !bufp->fastmap_accurate) if (re_compile_fastmap (bufp) == -2) return -2; #ifdef WCHAR /* Allocate wchar_t array for wcs_string1 and wcs_string2 and fill them with converted string. */ if (size1 != 0) { if (size1 > MAX_ALLOCA_SIZE) { wcs_string1 = TALLOC (size1 + 1, CHAR_T); mbs_offset1 = TALLOC (size1 + 1, int); is_binary = TALLOC (size1 + 1, char); } else { wcs_string1 = REGEX_TALLOC (size1 + 1, CHAR_T); mbs_offset1 = REGEX_TALLOC (size1 + 1, int); is_binary = REGEX_TALLOC (size1 + 1, char); } if (!wcs_string1 || !mbs_offset1 || !is_binary) { if (size1 > MAX_ALLOCA_SIZE) { free (wcs_string1); free (mbs_offset1); free (is_binary); } else { FREE_VAR (wcs_string1); FREE_VAR (mbs_offset1); FREE_VAR (is_binary); } return -2; } wcs_size1 = convert_mbs_to_wcs(wcs_string1, string1, size1, mbs_offset1, is_binary); wcs_string1[wcs_size1] = L'\0'; /* for a sentinel */ if (size1 > MAX_ALLOCA_SIZE) free (is_binary); else FREE_VAR (is_binary); } if (size2 != 0) { if (size2 > MAX_ALLOCA_SIZE) { wcs_string2 = TALLOC (size2 + 1, CHAR_T); mbs_offset2 = TALLOC (size2 + 1, int); is_binary = TALLOC (size2 + 1, char); } else { wcs_string2 = REGEX_TALLOC (size2 + 1, CHAR_T); mbs_offset2 = REGEX_TALLOC (size2 + 1, int); is_binary = REGEX_TALLOC (size2 + 1, char); } if (!wcs_string2 || !mbs_offset2 || !is_binary) { FREE_WCS_BUFFERS (); if (size2 > MAX_ALLOCA_SIZE) free (is_binary); else FREE_VAR (is_binary); return -2; } wcs_size2 = convert_mbs_to_wcs(wcs_string2, string2, size2, mbs_offset2, is_binary); wcs_string2[wcs_size2] = L'\0'; /* for a sentinel */ if (size2 > MAX_ALLOCA_SIZE) free (is_binary); else FREE_VAR (is_binary); } #endif /* WCHAR */ /* Loop through the string, looking for a place to start matching. */ for (;;) { /* If a fastmap is supplied, skip quickly over characters that cannot be the start of a match. If the pattern can match the null string, however, we don't need to skip characters; we want the first null string. */ if (fastmap && startpos < total_size && !bufp->can_be_null) { if (range > 0) /* Searching forwards. */ { register const char *d; register int lim = 0; int irange = range; if (startpos < size1 && startpos + range >= size1) lim = range - (size1 - startpos); d = (startpos >= size1 ? string2 - size1 : string1) + startpos; /* Written out as an if-else to avoid testing `translate' inside the loop. */ if (translate) while (range > lim && !fastmap[(unsigned char) translate[(unsigned char) *d++]]) range--; else while (range > lim && !fastmap[(unsigned char) *d++]) range--; startpos += irange - range; } else /* Searching backwards. */ { register CHAR_T c = (size1 == 0 || startpos >= size1 ? string2[startpos - size1] : string1[startpos]); if (!fastmap[(unsigned char) TRANSLATE (c)]) goto advance; } } /* If can't match the null string, and that's all we have left, fail. */ if (range >= 0 && startpos == total_size && fastmap && !bufp->can_be_null) { #ifdef WCHAR FREE_WCS_BUFFERS (); #endif return -1; } #ifdef WCHAR val = wcs_re_match_2_internal (bufp, string1, size1, string2, size2, startpos, regs, stop, wcs_string1, wcs_size1, wcs_string2, wcs_size2, mbs_offset1, mbs_offset2); #else /* BYTE */ val = byte_re_match_2_internal (bufp, string1, size1, string2, size2, startpos, regs, stop); #endif /* BYTE */ #ifndef REGEX_MALLOC # ifdef C_ALLOCA alloca (0); # endif #endif if (val >= 0) { #ifdef WCHAR FREE_WCS_BUFFERS (); #endif return startpos; } if (val == -2) { #ifdef WCHAR FREE_WCS_BUFFERS (); #endif return -2; } advance: if (!range) break; else if (range > 0) { range--; startpos++; } else { range++; startpos--; } } #ifdef WCHAR FREE_WCS_BUFFERS (); #endif return -1; } #ifdef WCHAR /* This converts PTR, a pointer into one of the search wchar_t strings `string1' and `string2' into an multibyte string offset from the beginning of that string. We use mbs_offset to optimize. See convert_mbs_to_wcs. */ # define POINTER_TO_OFFSET(ptr) \ (FIRST_STRING_P (ptr) \ ? ((regoff_t)(mbs_offset1 != NULL? mbs_offset1[(ptr)-string1] : 0)) \ : ((regoff_t)((mbs_offset2 != NULL? mbs_offset2[(ptr)-string2] : 0) \ + csize1))) #else /* BYTE */ /* This converts PTR, a pointer into one of the search strings `string1' and `string2' into an offset from the beginning of that string. */ # define POINTER_TO_OFFSET(ptr) \ (FIRST_STRING_P (ptr) \ ? ((regoff_t) ((ptr) - string1)) \ : ((regoff_t) ((ptr) - string2 + size1))) #endif /* WCHAR */ /* Macros for dealing with the split strings in re_match_2. */ #define MATCHING_IN_FIRST_STRING (dend == end_match_1) /* Call before fetching a character with *d. This switches over to string2 if necessary. */ #define PREFETCH() \ while (d == dend) \ { \ /* End of string2 => fail. */ \ if (dend == end_match_2) \ goto fail; \ /* End of string1 => advance to string2. */ \ d = string2; \ dend = end_match_2; \ } /* Test if at very beginning or at very end of the virtual concatenation of `string1' and `string2'. If only one string, it's `string2'. */ #define AT_STRINGS_BEG(d) ((d) == (size1 ? string1 : string2) || !size2) #define AT_STRINGS_END(d) ((d) == end2) /* Test if D points to a character which is word-constituent. We have two special cases to check for: if past the end of string1, look at the first character in string2; and if before the beginning of string2, look at the last character in string1. */ #ifdef WCHAR /* Use internationalized API instead of SYNTAX. */ # define WORDCHAR_P(d) \ (iswalnum ((wint_t)((d) == end1 ? *string2 \ : (d) == string2 - 1 ? *(end1 - 1) : *(d))) != 0 \ || ((d) == end1 ? *string2 \ : (d) == string2 - 1 ? *(end1 - 1) : *(d)) == L'_') #else /* BYTE */ # define WORDCHAR_P(d) \ (SYNTAX ((d) == end1 ? *string2 \ : (d) == string2 - 1 ? *(end1 - 1) : *(d)) \ == Sword) #endif /* WCHAR */ /* Disabled due to a compiler bug -- see comment at case wordbound */ #if 0 /* Test if the character before D and the one at D differ with respect to being word-constituent. */ #define AT_WORD_BOUNDARY(d) \ (AT_STRINGS_BEG (d) || AT_STRINGS_END (d) \ || WORDCHAR_P (d - 1) != WORDCHAR_P (d)) #endif /* Free everything we malloc. */ #ifdef MATCH_MAY_ALLOCATE # ifdef WCHAR # define FREE_VARIABLES() \ do { \ REGEX_FREE_STACK (fail_stack.stack); \ FREE_VAR (regstart); \ FREE_VAR (regend); \ FREE_VAR (old_regstart); \ FREE_VAR (old_regend); \ FREE_VAR (best_regstart); \ FREE_VAR (best_regend); \ FREE_VAR (reg_info); \ FREE_VAR (reg_dummy); \ FREE_VAR (reg_info_dummy); \ if (!cant_free_wcs_buf) \ { \ FREE_VAR (string1); \ FREE_VAR (string2); \ FREE_VAR (mbs_offset1); \ FREE_VAR (mbs_offset2); \ } \ } while (0) # else /* BYTE */ # define FREE_VARIABLES() \ do { \ REGEX_FREE_STACK (fail_stack.stack); \ FREE_VAR (regstart); \ FREE_VAR (regend); \ FREE_VAR (old_regstart); \ FREE_VAR (old_regend); \ FREE_VAR (best_regstart); \ FREE_VAR (best_regend); \ FREE_VAR (reg_info); \ FREE_VAR (reg_dummy); \ FREE_VAR (reg_info_dummy); \ } while (0) # endif /* WCHAR */ #else # ifdef WCHAR # define FREE_VARIABLES() \ do { \ if (!cant_free_wcs_buf) \ { \ FREE_VAR (string1); \ FREE_VAR (string2); \ FREE_VAR (mbs_offset1); \ FREE_VAR (mbs_offset2); \ } \ } while (0) # else /* BYTE */ # define FREE_VARIABLES() ((void)0) /* Do nothing! But inhibit gcc warning. */ # endif /* WCHAR */ #endif /* not MATCH_MAY_ALLOCATE */ /* These values must meet several constraints. They must not be valid register values; since we have a limit of 255 registers (because we use only one byte in the pattern for the register number), we can use numbers larger than 255. They must differ by 1, because of NUM_FAILURE_ITEMS above. And the value for the lowest register must be larger than the value for the highest register, so we do not try to actually save any registers when none are active. */ #define NO_HIGHEST_ACTIVE_REG (1 << BYTEWIDTH) #define NO_LOWEST_ACTIVE_REG (NO_HIGHEST_ACTIVE_REG + 1) #else /* not INSIDE_RECURSION */ /* Matching routines. */ #ifndef emacs /* Emacs never uses this. */ /* re_match is like re_match_2 except it takes only a single string. */ int re_match (bufp, string, size, pos, regs) struct re_pattern_buffer *bufp; const char *string; int size, pos; struct re_registers *regs; { int result; # ifdef MBS_SUPPORT if (MB_CUR_MAX != 1) result = wcs_re_match_2_internal (bufp, NULL, 0, string, size, pos, regs, size, NULL, 0, NULL, 0, NULL, NULL); else # endif result = byte_re_match_2_internal (bufp, NULL, 0, string, size, pos, regs, size); # ifndef REGEX_MALLOC # ifdef C_ALLOCA alloca (0); # endif # endif return result; } # ifdef _LIBC weak_alias (__re_match, re_match) # endif #endif /* not emacs */ #endif /* not INSIDE_RECURSION */ #ifdef INSIDE_RECURSION static boolean PREFIX(group_match_null_string_p) _RE_ARGS ((UCHAR_T **p, UCHAR_T *end, PREFIX(register_info_type) *reg_info)); static boolean PREFIX(alt_match_null_string_p) _RE_ARGS ((UCHAR_T *p, UCHAR_T *end, PREFIX(register_info_type) *reg_info)); static boolean PREFIX(common_op_match_null_string_p) _RE_ARGS ((UCHAR_T **p, UCHAR_T *end, PREFIX(register_info_type) *reg_info)); static int PREFIX(bcmp_translate) _RE_ARGS ((const CHAR_T *s1, const CHAR_T *s2, int len, char *translate)); #else /* not INSIDE_RECURSION */ /* re_match_2 matches the compiled pattern in BUFP against the the (virtual) concatenation of STRING1 and STRING2 (of length SIZE1 and SIZE2, respectively). We start matching at POS, and stop matching at STOP. If REGS is non-null and the `no_sub' field of BUFP is nonzero, we store offsets for the substring each group matched in REGS. See the documentation for exactly how many groups we fill. We return -1 if no match, -2 if an internal error (such as the failure stack overflowing). Otherwise, we return the length of the matched substring. */ int re_match_2 (bufp, string1, size1, string2, size2, pos, regs, stop) struct re_pattern_buffer *bufp; const char *string1, *string2; int size1, size2; int pos; struct re_registers *regs; int stop; { int result; # ifdef MBS_SUPPORT if (MB_CUR_MAX != 1) result = wcs_re_match_2_internal (bufp, string1, size1, string2, size2, pos, regs, stop, NULL, 0, NULL, 0, NULL, NULL); else # endif result = byte_re_match_2_internal (bufp, string1, size1, string2, size2, pos, regs, stop); #ifndef REGEX_MALLOC # ifdef C_ALLOCA alloca (0); # endif #endif return result; } #ifdef _LIBC weak_alias (__re_match_2, re_match_2) #endif #endif /* not INSIDE_RECURSION */ #ifdef INSIDE_RECURSION #ifdef WCHAR static int count_mbs_length PARAMS ((int *, int)); /* This check the substring (from 0, to length) of the multibyte string, to which offset_buffer correspond. And count how many wchar_t_characters the substring occupy. We use offset_buffer to optimization. See convert_mbs_to_wcs. */ static int count_mbs_length(offset_buffer, length) int *offset_buffer; int length; { int upper, lower; /* Check whether the size is valid. */ if (length < 0) return -1; if (offset_buffer == NULL) return 0; /* If there are no multibyte character, offset_buffer[i] == i. Optmize for this case. */ if (offset_buffer[length] == length) return length; /* Set up upper with length. (because for all i, offset_buffer[i] >= i) */ upper = length; lower = 0; while (true) { int middle = (lower + upper) / 2; if (middle == lower || middle == upper) break; if (offset_buffer[middle] > length) upper = middle; else if (offset_buffer[middle] < length) lower = middle; else return middle; } return -1; } #endif /* WCHAR */ /* This is a separate function so that we can force an alloca cleanup afterwards. */ #ifdef WCHAR static int wcs_re_match_2_internal (bufp, cstring1, csize1, cstring2, csize2, pos, regs, stop, string1, size1, string2, size2, mbs_offset1, mbs_offset2) struct re_pattern_buffer *bufp; const char *cstring1, *cstring2; int csize1, csize2; int pos; struct re_registers *regs; int stop; /* string1 == string2 == NULL means string1/2, size1/2 and mbs_offset1/2 need seting up in this function. */ /* We need wchar_t* buffers correspond to cstring1, cstring2. */ wchar_t *string1, *string2; /* We need the size of wchar_t buffers correspond to csize1, csize2. */ int size1, size2; /* offset buffer for optimizatoin. See convert_mbs_to_wc. */ int *mbs_offset1, *mbs_offset2; #else /* BYTE */ static int byte_re_match_2_internal (bufp, string1, size1,string2, size2, pos, regs, stop) struct re_pattern_buffer *bufp; const char *string1, *string2; int size1, size2; int pos; struct re_registers *regs; int stop; #endif /* BYTE */ { /* General temporaries. */ int mcnt; UCHAR_T *p1; #ifdef WCHAR /* They hold whether each wchar_t is binary data or not. */ char *is_binary = NULL; /* If true, we can't free string1/2, mbs_offset1/2. */ int cant_free_wcs_buf = 1; #endif /* WCHAR */ /* Just past the end of the corresponding string. */ const CHAR_T *end1, *end2; /* Pointers into string1 and string2, just past the last characters in each to consider matching. */ const CHAR_T *end_match_1, *end_match_2; /* Where we are in the data, and the end of the current string. */ const CHAR_T *d, *dend; /* Where we are in the pattern, and the end of the pattern. */ #ifdef WCHAR UCHAR_T *pattern, *p; register UCHAR_T *pend; #else /* BYTE */ UCHAR_T *p = bufp->buffer; register UCHAR_T *pend = p + bufp->used; #endif /* WCHAR */ /* Mark the opcode just after a start_memory, so we can test for an empty subpattern when we get to the stop_memory. */ UCHAR_T *just_past_start_mem = 0; /* We use this to map every character in the string. */ RE_TRANSLATE_TYPE translate = bufp->translate; /* Failure point stack. Each place that can handle a failure further down the line pushes a failure point on this stack. It consists of restart, regend, and reg_info for all registers corresponding to the subexpressions we're currently inside, plus the number of such registers, and, finally, two char *'s. The first char * is where to resume scanning the pattern; the second one is where to resume scanning the strings. If the latter is zero, the failure point is a ``dummy''; if a failure happens and the failure point is a dummy, it gets discarded and the next next one is tried. */ #ifdef MATCH_MAY_ALLOCATE /* otherwise, this is global. */ PREFIX(fail_stack_type) fail_stack; #endif #ifdef DEBUG static unsigned failure_id; unsigned nfailure_points_pushed = 0, nfailure_points_popped = 0; #endif #ifdef REL_ALLOC /* This holds the pointer to the failure stack, when it is allocated relocatably. */ fail_stack_elt_t *failure_stack_ptr; #endif /* We fill all the registers internally, independent of what we return, for use in backreferences. The number here includes an element for register zero. */ size_t num_regs = bufp->re_nsub + 1; /* The currently active registers. */ active_reg_t lowest_active_reg = NO_LOWEST_ACTIVE_REG; active_reg_t highest_active_reg = NO_HIGHEST_ACTIVE_REG; /* Information on the contents of registers. These are pointers into the input strings; they record just what was matched (on this attempt) by a subexpression part of the pattern, that is, the regnum-th regstart pointer points to where in the pattern we began matching and the regnum-th regend points to right after where we stopped matching the regnum-th subexpression. (The zeroth register keeps track of what the whole pattern matches.) */ #ifdef MATCH_MAY_ALLOCATE /* otherwise, these are global. */ const CHAR_T **regstart, **regend; #endif /* If a group that's operated upon by a repetition operator fails to match anything, then the register for its start will need to be restored because it will have been set to wherever in the string we are when we last see its open-group operator. Similarly for a register's end. */ #ifdef MATCH_MAY_ALLOCATE /* otherwise, these are global. */ const CHAR_T **old_regstart, **old_regend; #endif /* The is_active field of reg_info helps us keep track of which (possibly nested) subexpressions we are currently in. The matched_something field of reg_info[reg_num] helps us tell whether or not we have matched any of the pattern so far this time through the reg_num-th subexpression. These two fields get reset each time through any loop their register is in. */ #ifdef MATCH_MAY_ALLOCATE /* otherwise, this is global. */ PREFIX(register_info_type) *reg_info; #endif /* The following record the register info as found in the above variables when we find a match better than any we've seen before. This happens as we backtrack through the failure points, which in turn happens only if we have not yet matched the entire string. */ unsigned best_regs_set = false; #ifdef MATCH_MAY_ALLOCATE /* otherwise, these are global. */ const CHAR_T **best_regstart, **best_regend; #endif /* Logically, this is `best_regend[0]'. But we don't want to have to allocate space for that if we're not allocating space for anything else (see below). Also, we never need info about register 0 for any of the other register vectors, and it seems rather a kludge to treat `best_regend' differently than the rest. So we keep track of the end of the best match so far in a separate variable. We initialize this to NULL so that when we backtrack the first time and need to test it, it's not garbage. */ const CHAR_T *match_end = NULL; /* This helps SET_REGS_MATCHED avoid doing redundant work. */ int set_regs_matched_done = 0; /* Used when we pop values we don't care about. */ #ifdef MATCH_MAY_ALLOCATE /* otherwise, these are global. */ const CHAR_T **reg_dummy; PREFIX(register_info_type) *reg_info_dummy; #endif #ifdef DEBUG /* Counts the total number of registers pushed. */ unsigned num_regs_pushed = 0; #endif DEBUG_PRINT1 ("\n\nEntering re_match_2.\n"); INIT_FAIL_STACK (); #ifdef MATCH_MAY_ALLOCATE /* Do not bother to initialize all the register variables if there are no groups in the pattern, as it takes a fair amount of time. If there are groups, we include space for register 0 (the whole pattern), even though we never use it, since it simplifies the array indexing. We should fix this. */ if (bufp->re_nsub) { regstart = REGEX_TALLOC (num_regs, const CHAR_T *); regend = REGEX_TALLOC (num_regs, const CHAR_T *); old_regstart = REGEX_TALLOC (num_regs, const CHAR_T *); old_regend = REGEX_TALLOC (num_regs, const CHAR_T *); best_regstart = REGEX_TALLOC (num_regs, const CHAR_T *); best_regend = REGEX_TALLOC (num_regs, const CHAR_T *); reg_info = REGEX_TALLOC (num_regs, PREFIX(register_info_type)); reg_dummy = REGEX_TALLOC (num_regs, const CHAR_T *); reg_info_dummy = REGEX_TALLOC (num_regs, PREFIX(register_info_type)); if (!(regstart && regend && old_regstart && old_regend && reg_info && best_regstart && best_regend && reg_dummy && reg_info_dummy)) { FREE_VARIABLES (); return -2; } } else { /* We must initialize all our variables to NULL, so that `FREE_VARIABLES' doesn't try to free them. */ regstart = regend = old_regstart = old_regend = best_regstart = best_regend = reg_dummy = NULL; reg_info = reg_info_dummy = (PREFIX(register_info_type) *) NULL; } #endif /* MATCH_MAY_ALLOCATE */ /* The starting position is bogus. */ #ifdef WCHAR if (pos < 0 || pos > csize1 + csize2) #else /* BYTE */ if (pos < 0 || pos > size1 + size2) #endif { FREE_VARIABLES (); return -1; } #ifdef WCHAR /* Allocate wchar_t array for string1 and string2 and fill them with converted string. */ if (string1 == NULL && string2 == NULL) { /* We need seting up buffers here. */ /* We must free wcs buffers in this function. */ cant_free_wcs_buf = 0; if (csize1 != 0) { string1 = REGEX_TALLOC (csize1 + 1, CHAR_T); mbs_offset1 = REGEX_TALLOC (csize1 + 1, int); is_binary = REGEX_TALLOC (csize1 + 1, char); if (!string1 || !mbs_offset1 || !is_binary) { FREE_VAR (string1); FREE_VAR (mbs_offset1); FREE_VAR (is_binary); return -2; } } if (csize2 != 0) { string2 = REGEX_TALLOC (csize2 + 1, CHAR_T); mbs_offset2 = REGEX_TALLOC (csize2 + 1, int); is_binary = REGEX_TALLOC (csize2 + 1, char); if (!string2 || !mbs_offset2 || !is_binary) { FREE_VAR (string1); FREE_VAR (mbs_offset1); FREE_VAR (string2); FREE_VAR (mbs_offset2); FREE_VAR (is_binary); return -2; } size2 = convert_mbs_to_wcs(string2, cstring2, csize2, mbs_offset2, is_binary); string2[size2] = L'\0'; /* for a sentinel */ FREE_VAR (is_binary); } } /* We need to cast pattern to (wchar_t*), because we casted this compiled pattern to (char*) in regex_compile. */ p = pattern = (CHAR_T*)bufp->buffer; pend = (CHAR_T*)(bufp->buffer + bufp->used); #endif /* WCHAR */ /* Initialize subexpression text positions to -1 to mark ones that no start_memory/stop_memory has been seen for. Also initialize the register information struct. */ for (mcnt = 1; (unsigned) mcnt < num_regs; mcnt++) { regstart[mcnt] = regend[mcnt] = old_regstart[mcnt] = old_regend[mcnt] = REG_UNSET_VALUE; REG_MATCH_NULL_STRING_P (reg_info[mcnt]) = MATCH_NULL_UNSET_VALUE; IS_ACTIVE (reg_info[mcnt]) = 0; MATCHED_SOMETHING (reg_info[mcnt]) = 0; EVER_MATCHED_SOMETHING (reg_info[mcnt]) = 0; } /* We move `string1' into `string2' if the latter's empty -- but not if `string1' is null. */ if (size2 == 0 && string1 != NULL) { string2 = string1; size2 = size1; string1 = 0; size1 = 0; #ifdef WCHAR mbs_offset2 = mbs_offset1; csize2 = csize1; mbs_offset1 = NULL; csize1 = 0; #endif } end1 = string1 + size1; end2 = string2 + size2; /* Compute where to stop matching, within the two strings. */ #ifdef WCHAR if (stop <= csize1) { mcnt = count_mbs_length(mbs_offset1, stop); end_match_1 = string1 + mcnt; end_match_2 = string2; } else { if (stop > csize1 + csize2) stop = csize1 + csize2; end_match_1 = end1; mcnt = count_mbs_length(mbs_offset2, stop-csize1); end_match_2 = string2 + mcnt; } if (mcnt < 0) { /* count_mbs_length return error. */ FREE_VARIABLES (); return -1; } #else if (stop <= size1) { end_match_1 = string1 + stop; end_match_2 = string2; } else { end_match_1 = end1; end_match_2 = string2 + stop - size1; } #endif /* WCHAR */ /* `p' scans through the pattern as `d' scans through the data. `dend' is the end of the input string that `d' points within. `d' is advanced into the following input string whenever necessary, but this happens before fetching; therefore, at the beginning of the loop, `d' can be pointing at the end of a string, but it cannot equal `string2'. */ #ifdef WCHAR if (size1 > 0 && pos <= csize1) { mcnt = count_mbs_length(mbs_offset1, pos); d = string1 + mcnt; dend = end_match_1; } else { mcnt = count_mbs_length(mbs_offset2, pos-csize1); d = string2 + mcnt; dend = end_match_2; } if (mcnt < 0) { /* count_mbs_length return error. */ FREE_VARIABLES (); return -1; } #else if (size1 > 0 && pos <= size1) { d = string1 + pos; dend = end_match_1; } else { d = string2 + pos - size1; dend = end_match_2; } #endif /* WCHAR */ DEBUG_PRINT1 ("The compiled pattern is:\n"); DEBUG_PRINT_COMPILED_PATTERN (bufp, p, pend); DEBUG_PRINT1 ("The string to match is: `"); DEBUG_PRINT_DOUBLE_STRING (d, string1, size1, string2, size2); DEBUG_PRINT1 ("'\n"); /* This loops over pattern commands. It exits by returning from the function if the match is complete, or it drops through if the match fails at this starting point in the input data. */ for (;;) { #ifdef _LIBC DEBUG_PRINT2 ("\n%p: ", p); #else DEBUG_PRINT2 ("\n0x%x: ", p); #endif if (p == pend) { /* End of pattern means we might have succeeded. */ DEBUG_PRINT1 ("end of pattern ... "); /* If we haven't matched the entire string, and we want the longest match, try backtracking. */ if (d != end_match_2) { /* 1 if this match ends in the same string (string1 or string2) as the best previous match. */ boolean same_str_p = (FIRST_STRING_P (match_end) == MATCHING_IN_FIRST_STRING); /* 1 if this match is the best seen so far. */ boolean best_match_p; /* AIX compiler got confused when this was combined with the previous declaration. */ if (same_str_p) best_match_p = d > match_end; else best_match_p = !MATCHING_IN_FIRST_STRING; DEBUG_PRINT1 ("backtracking.\n"); if (!FAIL_STACK_EMPTY ()) { /* More failure points to try. */ /* If exceeds best match so far, save it. */ if (!best_regs_set || best_match_p) { best_regs_set = true; match_end = d; DEBUG_PRINT1 ("\nSAVING match as best so far.\n"); for (mcnt = 1; (unsigned) mcnt < num_regs; mcnt++) { best_regstart[mcnt] = regstart[mcnt]; best_regend[mcnt] = regend[mcnt]; } } goto fail; } /* If no failure points, don't restore garbage. And if last match is real best match, don't restore second best one. */ else if (best_regs_set && !best_match_p) { restore_best_regs: /* Restore best match. It may happen that `dend == end_match_1' while the restored d is in string2. For example, the pattern `x.*y.*z' against the strings `x-' and `y-z-', if the two strings are not consecutive in memory. */ DEBUG_PRINT1 ("Restoring best registers.\n"); d = match_end; dend = ((d >= string1 && d <= end1) ? end_match_1 : end_match_2); for (mcnt = 1; (unsigned) mcnt < num_regs; mcnt++) { regstart[mcnt] = best_regstart[mcnt]; regend[mcnt] = best_regend[mcnt]; } } } /* d != end_match_2 */ succeed_label: DEBUG_PRINT1 ("Accepting match.\n"); /* If caller wants register contents data back, do it. */ if (regs && !bufp->no_sub) { /* Have the register data arrays been allocated? */ if (bufp->regs_allocated == REGS_UNALLOCATED) { /* No. So allocate them with malloc. We need one extra element beyond `num_regs' for the `-1' marker GNU code uses. */ regs->num_regs = MAX (RE_NREGS, num_regs + 1); regs->start = TALLOC (regs->num_regs, regoff_t); regs->end = TALLOC (regs->num_regs, regoff_t); if (regs->start == NULL || regs->end == NULL) { FREE_VARIABLES (); return -2; } bufp->regs_allocated = REGS_REALLOCATE; } else if (bufp->regs_allocated == REGS_REALLOCATE) { /* Yes. If we need more elements than were already allocated, reallocate them. If we need fewer, just leave it alone. */ if (regs->num_regs < num_regs + 1) { regs->num_regs = num_regs + 1; RETALLOC (regs->start, regs->num_regs, regoff_t); RETALLOC (regs->end, regs->num_regs, regoff_t); if (regs->start == NULL || regs->end == NULL) { FREE_VARIABLES (); return -2; } } } else { /* These braces fend off a "empty body in an else-statement" warning under GCC when assert expands to nothing. */ assert (bufp->regs_allocated == REGS_FIXED); } /* Convert the pointer data in `regstart' and `regend' to indices. Register zero has to be set differently, since we haven't kept track of any info for it. */ if (regs->num_regs > 0) { regs->start[0] = pos; #ifdef WCHAR if (MATCHING_IN_FIRST_STRING) regs->end[0] = mbs_offset1 != NULL ? mbs_offset1[d-string1] : 0; else regs->end[0] = csize1 + (mbs_offset2 != NULL ? mbs_offset2[d-string2] : 0); #else regs->end[0] = (MATCHING_IN_FIRST_STRING ? ((regoff_t) (d - string1)) : ((regoff_t) (d - string2 + size1))); #endif /* WCHAR */ } /* Go through the first `min (num_regs, regs->num_regs)' registers, since that is all we initialized. */ for (mcnt = 1; (unsigned) mcnt < MIN (num_regs, regs->num_regs); mcnt++) { if (REG_UNSET (regstart[mcnt]) || REG_UNSET (regend[mcnt])) regs->start[mcnt] = regs->end[mcnt] = -1; else { regs->start[mcnt] = (regoff_t) POINTER_TO_OFFSET (regstart[mcnt]); regs->end[mcnt] = (regoff_t) POINTER_TO_OFFSET (regend[mcnt]); } } /* If the regs structure we return has more elements than were in the pattern, set the extra elements to -1. If we (re)allocated the registers, this is the case, because we always allocate enough to have at least one -1 at the end. */ for (mcnt = num_regs; (unsigned) mcnt < regs->num_regs; mcnt++) regs->start[mcnt] = regs->end[mcnt] = -1; } /* regs && !bufp->no_sub */ DEBUG_PRINT4 ("%u failure points pushed, %u popped (%u remain).\n", nfailure_points_pushed, nfailure_points_popped, nfailure_points_pushed - nfailure_points_popped); DEBUG_PRINT2 ("%u registers pushed.\n", num_regs_pushed); #ifdef WCHAR if (MATCHING_IN_FIRST_STRING) mcnt = mbs_offset1 != NULL ? mbs_offset1[d-string1] : 0; else mcnt = (mbs_offset2 != NULL ? mbs_offset2[d-string2] : 0) + csize1; mcnt -= pos; #else mcnt = d - pos - (MATCHING_IN_FIRST_STRING ? string1 : string2 - size1); #endif /* WCHAR */ DEBUG_PRINT2 ("Returning %d from re_match_2.\n", mcnt); FREE_VARIABLES (); return mcnt; } /* Otherwise match next pattern command. */ switch (SWITCH_ENUM_CAST ((re_opcode_t) *p++)) { /* Ignore these. Used to ignore the n of succeed_n's which currently have n == 0. */ case no_op: DEBUG_PRINT1 ("EXECUTING no_op.\n"); break; case succeed: DEBUG_PRINT1 ("EXECUTING succeed.\n"); goto succeed_label; /* Match the next n pattern characters exactly. The following byte in the pattern defines n, and the n bytes after that are the characters to match. */ case exactn: #ifdef MBS_SUPPORT case exactn_bin: #endif mcnt = *p++; DEBUG_PRINT2 ("EXECUTING exactn %d.\n", mcnt); /* This is written out as an if-else so we don't waste time testing `translate' inside the loop. */ if (translate) { do { PREFETCH (); #ifdef WCHAR if (*d <= 0xff) { if ((UCHAR_T) translate[(unsigned char) *d++] != (UCHAR_T) *p++) goto fail; } else { if (*d++ != (CHAR_T) *p++) goto fail; } #else if ((UCHAR_T) translate[(unsigned char) *d++] != (UCHAR_T) *p++) goto fail; #endif /* WCHAR */ } while (--mcnt); } else { do { PREFETCH (); if (*d++ != (CHAR_T) *p++) goto fail; } while (--mcnt); } SET_REGS_MATCHED (); break; /* Match any character except possibly a newline or a null. */ case anychar: DEBUG_PRINT1 ("EXECUTING anychar.\n"); PREFETCH (); if ((!(bufp->syntax & RE_DOT_NEWLINE) && TRANSLATE (*d) == '\n') || (bufp->syntax & RE_DOT_NOT_NULL && TRANSLATE (*d) == '\000')) goto fail; SET_REGS_MATCHED (); DEBUG_PRINT2 (" Matched `%ld'.\n", (long int) *d); d++; break; case charset: case charset_not: { register UCHAR_T c; #ifdef WCHAR unsigned int i, char_class_length, coll_symbol_length, equiv_class_length, ranges_length, chars_length, length; CHAR_T *workp, *workp2, *charset_top; #define WORK_BUFFER_SIZE 128 CHAR_T str_buf[WORK_BUFFER_SIZE]; # ifdef _LIBC uint32_t nrules; # endif /* _LIBC */ #endif /* WCHAR */ boolean not = (re_opcode_t) *(p - 1) == charset_not; DEBUG_PRINT2 ("EXECUTING charset%s.\n", not ? "_not" : ""); PREFETCH (); c = TRANSLATE (*d); /* The character to match. */ #ifdef WCHAR # ifdef _LIBC nrules = _NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_NRULES); # endif /* _LIBC */ charset_top = p - 1; char_class_length = *p++; coll_symbol_length = *p++; equiv_class_length = *p++; ranges_length = *p++; chars_length = *p++; /* p points charset[6], so the address of the next instruction (charset[l+m+n+2o+k+p']) equals p[l+m+n+2*o+p'], where l=length of char_classes, m=length of collating_symbol, n=equivalence_class, o=length of char_range, p'=length of character. */ workp = p; /* Update p to indicate the next instruction. */ p += char_class_length + coll_symbol_length+ equiv_class_length + 2*ranges_length + chars_length; /* match with char_class? */ for (i = 0; i < char_class_length ; i += CHAR_CLASS_SIZE) { wctype_t wctype; uintptr_t alignedp = ((uintptr_t)workp + __alignof__(wctype_t) - 1) & ~(uintptr_t)(__alignof__(wctype_t) - 1); wctype = *((wctype_t*)alignedp); workp += CHAR_CLASS_SIZE; # ifdef _LIBC if (__iswctype((wint_t)c, wctype)) goto char_set_matched; # else if (iswctype((wint_t)c, wctype)) goto char_set_matched; # endif } /* match with collating_symbol? */ # ifdef _LIBC if (nrules != 0) { const unsigned char *extra = (const unsigned char *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_SYMB_EXTRAMB); for (workp2 = workp + coll_symbol_length ; workp < workp2 ; workp++) { int32_t *wextra; wextra = (int32_t*)(extra + *workp++); for (i = 0; i < *wextra; ++i) if (TRANSLATE(d[i]) != wextra[1 + i]) break; if (i == *wextra) { /* Update d, however d will be incremented at char_set_matched:, we decrement d here. */ d += i - 1; goto char_set_matched; } } } else /* (nrules == 0) */ # endif /* If we can't look up collation data, we use wcscoll instead. */ { for (workp2 = workp + coll_symbol_length ; workp < workp2 ;) { const CHAR_T *backup_d = d, *backup_dend = dend; # ifdef _LIBC length = __wcslen (workp); # else length = wcslen (workp); # endif /* If wcscoll(the collating symbol, whole string) > 0, any substring of the string never match with the collating symbol. */ # ifdef _LIBC if (__wcscoll (workp, d) > 0) # else if (wcscoll (workp, d) > 0) # endif { workp += length + 1; continue; } /* First, we compare the collating symbol with the first character of the string. If it don't match, we add the next character to the compare buffer in turn. */ for (i = 0 ; i < WORK_BUFFER_SIZE-1 ; i++, d++) { int match; if (d == dend) { if (dend == end_match_2) break; d = string2; dend = end_match_2; } /* add next character to the compare buffer. */ str_buf[i] = TRANSLATE(*d); str_buf[i+1] = '\0'; # ifdef _LIBC match = __wcscoll (workp, str_buf); # else match = wcscoll (workp, str_buf); # endif if (match == 0) goto char_set_matched; if (match < 0) /* (str_buf > workp) indicate (str_buf + X > workp), because for all X (str_buf + X > str_buf). So we don't need continue this loop. */ break; /* Otherwise(str_buf < workp), (str_buf+next_character) may equals (workp). So we continue this loop. */ } /* not matched */ d = backup_d; dend = backup_dend; workp += length + 1; } } /* match with equivalence_class? */ # ifdef _LIBC if (nrules != 0) { const CHAR_T *backup_d = d, *backup_dend = dend; /* Try to match the equivalence class against those known to the collate implementation. */ const int32_t *table; const int32_t *weights; const int32_t *extra; const int32_t *indirect; int32_t idx, idx2; wint_t *cp; size_t len; /* This #include defines a local function! */ table = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_TABLEWC); weights = (const wint_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_WEIGHTWC); extra = (const wint_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_EXTRAWC); indirect = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_INDIRECTWC); /* Write 1 collating element to str_buf, and get its index. */ idx2 = 0; for (i = 0 ; idx2 == 0 && i < WORK_BUFFER_SIZE - 1; i++) { cp = (wint_t*)str_buf; if (d == dend) { if (dend == end_match_2) break; d = string2; dend = end_match_2; } str_buf[i] = TRANSLATE(*(d+i)); str_buf[i+1] = '\0'; /* sentinel */ idx2 = findidx ((const wint_t**)&cp); } /* Update d, however d will be incremented at char_set_matched:, we decrement d here. */ d = backup_d + ((wchar_t*)cp - (wchar_t*)str_buf - 1); if (d >= dend) { if (dend == end_match_2) d = dend; else { d = string2; dend = end_match_2; } } len = weights[idx2]; for (workp2 = workp + equiv_class_length ; workp < workp2 ; workp++) { idx = (int32_t)*workp; /* We already checked idx != 0 in regex_compile. */ if (idx2 != 0 && len == weights[idx]) { int cnt = 0; while (cnt < len && (weights[idx + 1 + cnt] == weights[idx2 + 1 + cnt])) ++cnt; if (cnt == len) goto char_set_matched; } } /* not matched */ d = backup_d; dend = backup_dend; } else /* (nrules == 0) */ # endif /* If we can't look up collation data, we use wcscoll instead. */ { for (workp2 = workp + equiv_class_length ; workp < workp2 ;) { const CHAR_T *backup_d = d, *backup_dend = dend; # ifdef _LIBC length = __wcslen (workp); # else length = wcslen (workp); # endif /* If wcscoll(the collating symbol, whole string) > 0, any substring of the string never match with the collating symbol. */ # ifdef _LIBC if (__wcscoll (workp, d) > 0) # else if (wcscoll (workp, d) > 0) # endif { workp += length + 1; break; } /* First, we compare the equivalence class with the first character of the string. If it don't match, we add the next character to the compare buffer in turn. */ for (i = 0 ; i < WORK_BUFFER_SIZE - 1 ; i++, d++) { int match; if (d == dend) { if (dend == end_match_2) break; d = string2; dend = end_match_2; } /* add next character to the compare buffer. */ str_buf[i] = TRANSLATE(*d); str_buf[i+1] = '\0'; # ifdef _LIBC match = __wcscoll (workp, str_buf); # else match = wcscoll (workp, str_buf); # endif if (match == 0) goto char_set_matched; if (match < 0) /* (str_buf > workp) indicate (str_buf + X > workp), because for all X (str_buf + X > str_buf). So we don't need continue this loop. */ break; /* Otherwise(str_buf < workp), (str_buf+next_character) may equals (workp). So we continue this loop. */ } /* not matched */ d = backup_d; dend = backup_dend; workp += length + 1; } } /* match with char_range? */ # ifdef _LIBC if (nrules != 0) { uint32_t collseqval; const char *collseq = (const char *) _NL_CURRENT(LC_COLLATE, _NL_COLLATE_COLLSEQWC); collseqval = collseq_table_lookup (collseq, c); for (; workp < p - chars_length ;) { uint32_t start_val, end_val; /* We already compute the collation sequence value of the characters (or collating symbols). */ start_val = (uint32_t) *workp++; /* range_start */ end_val = (uint32_t) *workp++; /* range_end */ if (start_val <= collseqval && collseqval <= end_val) goto char_set_matched; } } else # endif { /* We set range_start_char at str_buf[0], range_end_char at str_buf[4], and compared char at str_buf[2]. */ str_buf[1] = 0; str_buf[2] = c; str_buf[3] = 0; str_buf[5] = 0; for (; workp < p - chars_length ;) { wchar_t *range_start_char, *range_end_char; /* match if (range_start_char <= c <= range_end_char). */ /* If range_start(or end) < 0, we assume -range_start(end) is the offset of the collating symbol which is specified as the character of the range start(end). */ /* range_start */ if (*workp < 0) range_start_char = charset_top - (*workp++); else { str_buf[0] = *workp++; range_start_char = str_buf; } /* range_end */ if (*workp < 0) range_end_char = charset_top - (*workp++); else { str_buf[4] = *workp++; range_end_char = str_buf + 4; } # ifdef _LIBC if (__wcscoll (range_start_char, str_buf+2) <= 0 && __wcscoll (str_buf+2, range_end_char) <= 0) # else if (wcscoll (range_start_char, str_buf+2) <= 0 && wcscoll (str_buf+2, range_end_char) <= 0) # endif goto char_set_matched; } } /* match with char? */ for (; workp < p ; workp++) if (c == *workp) goto char_set_matched; not = !not; char_set_matched: if (not) goto fail; #else /* Cast to `unsigned' instead of `unsigned char' in case the bit list is a full 32 bytes long. */ if (c < (unsigned) (*p * BYTEWIDTH) && p[1 + c / BYTEWIDTH] & (1 << (c % BYTEWIDTH))) not = !not; p += 1 + *p; if (!not) goto fail; #undef WORK_BUFFER_SIZE #endif /* WCHAR */ SET_REGS_MATCHED (); d++; break; } /* The beginning of a group is represented by start_memory. The arguments are the register number in the next byte, and the number of groups inner to this one in the next. The text matched within the group is recorded (in the internal registers data structure) under the register number. */ case start_memory: DEBUG_PRINT3 ("EXECUTING start_memory %ld (%ld):\n", (long int) *p, (long int) p[1]); /* Find out if this group can match the empty string. */ p1 = p; /* To send to group_match_null_string_p. */ if (REG_MATCH_NULL_STRING_P (reg_info[*p]) == MATCH_NULL_UNSET_VALUE) REG_MATCH_NULL_STRING_P (reg_info[*p]) = PREFIX(group_match_null_string_p) (&p1, pend, reg_info); /* Save the position in the string where we were the last time we were at this open-group operator in case the group is operated upon by a repetition operator, e.g., with `(a*)*b' against `ab'; then we want to ignore where we are now in the string in case this attempt to match fails. */ old_regstart[*p] = REG_MATCH_NULL_STRING_P (reg_info[*p]) ? REG_UNSET (regstart[*p]) ? d : regstart[*p] : regstart[*p]; DEBUG_PRINT2 (" old_regstart: %d\n", POINTER_TO_OFFSET (old_regstart[*p])); regstart[*p] = d; DEBUG_PRINT2 (" regstart: %d\n", POINTER_TO_OFFSET (regstart[*p])); IS_ACTIVE (reg_info[*p]) = 1; MATCHED_SOMETHING (reg_info[*p]) = 0; /* Clear this whenever we change the register activity status. */ set_regs_matched_done = 0; /* This is the new highest active register. */ highest_active_reg = *p; /* If nothing was active before, this is the new lowest active register. */ if (lowest_active_reg == NO_LOWEST_ACTIVE_REG) lowest_active_reg = *p; /* Move past the register number and inner group count. */ p += 2; just_past_start_mem = p; break; /* The stop_memory opcode represents the end of a group. Its arguments are the same as start_memory's: the register number, and the number of inner groups. */ case stop_memory: DEBUG_PRINT3 ("EXECUTING stop_memory %ld (%ld):\n", (long int) *p, (long int) p[1]); /* We need to save the string position the last time we were at this close-group operator in case the group is operated upon by a repetition operator, e.g., with `((a*)*(b*)*)*' against `aba'; then we want to ignore where we are now in the string in case this attempt to match fails. */ old_regend[*p] = REG_MATCH_NULL_STRING_P (reg_info[*p]) ? REG_UNSET (regend[*p]) ? d : regend[*p] : regend[*p]; DEBUG_PRINT2 (" old_regend: %d\n", POINTER_TO_OFFSET (old_regend[*p])); regend[*p] = d; DEBUG_PRINT2 (" regend: %d\n", POINTER_TO_OFFSET (regend[*p])); /* This register isn't active anymore. */ IS_ACTIVE (reg_info[*p]) = 0; /* Clear this whenever we change the register activity status. */ set_regs_matched_done = 0; /* If this was the only register active, nothing is active anymore. */ if (lowest_active_reg == highest_active_reg) { lowest_active_reg = NO_LOWEST_ACTIVE_REG; highest_active_reg = NO_HIGHEST_ACTIVE_REG; } else { /* We must scan for the new highest active register, since it isn't necessarily one less than now: consider (a(b)c(d(e)f)g). When group 3 ends, after the f), the new highest active register is 1. */ UCHAR_T r = *p - 1; while (r > 0 && !IS_ACTIVE (reg_info[r])) r--; /* If we end up at register zero, that means that we saved the registers as the result of an `on_failure_jump', not a `start_memory', and we jumped to past the innermost `stop_memory'. For example, in ((.)*) we save registers 1 and 2 as a result of the *, but when we pop back to the second ), we are at the stop_memory 1. Thus, nothing is active. */ if (r == 0) { lowest_active_reg = NO_LOWEST_ACTIVE_REG; highest_active_reg = NO_HIGHEST_ACTIVE_REG; } else highest_active_reg = r; } /* If just failed to match something this time around with a group that's operated on by a repetition operator, try to force exit from the ``loop'', and restore the register information for this group that we had before trying this last match. */ if ((!MATCHED_SOMETHING (reg_info[*p]) || just_past_start_mem == p - 1) && (p + 2) < pend) { boolean is_a_jump_n = false; p1 = p + 2; mcnt = 0; switch ((re_opcode_t) *p1++) { case jump_n: is_a_jump_n = true; case pop_failure_jump: case maybe_pop_jump: case jump: case dummy_failure_jump: EXTRACT_NUMBER_AND_INCR (mcnt, p1); if (is_a_jump_n) p1 += OFFSET_ADDRESS_SIZE; break; default: /* do nothing */ ; } p1 += mcnt; /* If the next operation is a jump backwards in the pattern to an on_failure_jump right before the start_memory corresponding to this stop_memory, exit from the loop by forcing a failure after pushing on the stack the on_failure_jump's jump in the pattern, and d. */ if (mcnt < 0 && (re_opcode_t) *p1 == on_failure_jump && (re_opcode_t) p1[1+OFFSET_ADDRESS_SIZE] == start_memory && p1[2+OFFSET_ADDRESS_SIZE] == *p) { /* If this group ever matched anything, then restore what its registers were before trying this last failed match, e.g., with `(a*)*b' against `ab' for regstart[1], and, e.g., with `((a*)*(b*)*)*' against `aba' for regend[3]. Also restore the registers for inner groups for, e.g., `((a*)(b*))*' against `aba' (register 3 would otherwise get trashed). */ if (EVER_MATCHED_SOMETHING (reg_info[*p])) { unsigned r; EVER_MATCHED_SOMETHING (reg_info[*p]) = 0; /* Restore this and inner groups' (if any) registers. */ for (r = *p; r < (unsigned) *p + (unsigned) *(p + 1); r++) { regstart[r] = old_regstart[r]; /* xx why this test? */ if (old_regend[r] >= regstart[r]) regend[r] = old_regend[r]; } } p1++; EXTRACT_NUMBER_AND_INCR (mcnt, p1); PUSH_FAILURE_POINT (p1 + mcnt, d, -2); goto fail; } } /* Move past the register number and the inner group count. */ p += 2; break; /* \ has been turned into a `duplicate' command which is followed by the numeric value of as the register number. */ case duplicate: { register const CHAR_T *d2, *dend2; int regno = *p++; /* Get which register to match against. */ DEBUG_PRINT2 ("EXECUTING duplicate %d.\n", regno); /* Can't back reference a group which we've never matched. */ if (REG_UNSET (regstart[regno]) || REG_UNSET (regend[regno])) goto fail; /* Where in input to try to start matching. */ d2 = regstart[regno]; /* Where to stop matching; if both the place to start and the place to stop matching are in the same string, then set to the place to stop, otherwise, for now have to use the end of the first string. */ dend2 = ((FIRST_STRING_P (regstart[regno]) == FIRST_STRING_P (regend[regno])) ? regend[regno] : end_match_1); for (;;) { /* If necessary, advance to next segment in register contents. */ while (d2 == dend2) { if (dend2 == end_match_2) break; if (dend2 == regend[regno]) break; /* End of string1 => advance to string2. */ d2 = string2; dend2 = regend[regno]; } /* At end of register contents => success */ if (d2 == dend2) break; /* If necessary, advance to next segment in data. */ PREFETCH (); /* How many characters left in this segment to match. */ mcnt = dend - d; /* Want how many consecutive characters we can match in one shot, so, if necessary, adjust the count. */ if (mcnt > dend2 - d2) mcnt = dend2 - d2; /* Compare that many; failure if mismatch, else move past them. */ if (translate ? PREFIX(bcmp_translate) (d, d2, mcnt, translate) : memcmp (d, d2, mcnt*sizeof(UCHAR_T))) goto fail; d += mcnt, d2 += mcnt; /* Do this because we've match some characters. */ SET_REGS_MATCHED (); } } break; /* begline matches the empty string at the beginning of the string (unless `not_bol' is set in `bufp'), and, if `newline_anchor' is set, after newlines. */ case begline: DEBUG_PRINT1 ("EXECUTING begline.\n"); if (AT_STRINGS_BEG (d)) { if (!bufp->not_bol) break; } else if (d[-1] == '\n' && bufp->newline_anchor) { break; } /* In all other cases, we fail. */ goto fail; /* endline is the dual of begline. */ case endline: DEBUG_PRINT1 ("EXECUTING endline.\n"); if (AT_STRINGS_END (d)) { if (!bufp->not_eol) break; } /* We have to ``prefetch'' the next character. */ else if ((d == end1 ? *string2 : *d) == '\n' && bufp->newline_anchor) { break; } goto fail; /* Match at the very beginning of the data. */ case begbuf: DEBUG_PRINT1 ("EXECUTING begbuf.\n"); if (AT_STRINGS_BEG (d)) break; goto fail; /* Match at the very end of the data. */ case endbuf: DEBUG_PRINT1 ("EXECUTING endbuf.\n"); if (AT_STRINGS_END (d)) break; goto fail; /* on_failure_keep_string_jump is used to optimize `.*\n'. It pushes NULL as the value for the string on the stack. Then `pop_failure_point' will keep the current value for the string, instead of restoring it. To see why, consider matching `foo\nbar' against `.*\n'. The .* matches the foo; then the . fails against the \n. But the next thing we want to do is match the \n against the \n; if we restored the string value, we would be back at the foo. Because this is used only in specific cases, we don't need to check all the things that `on_failure_jump' does, to make sure the right things get saved on the stack. Hence we don't share its code. The only reason to push anything on the stack at all is that otherwise we would have to change `anychar's code to do something besides goto fail in this case; that seems worse than this. */ case on_failure_keep_string_jump: DEBUG_PRINT1 ("EXECUTING on_failure_keep_string_jump"); EXTRACT_NUMBER_AND_INCR (mcnt, p); #ifdef _LIBC DEBUG_PRINT3 (" %d (to %p):\n", mcnt, p + mcnt); #else DEBUG_PRINT3 (" %d (to 0x%x):\n", mcnt, p + mcnt); #endif PUSH_FAILURE_POINT (p + mcnt, NULL, -2); break; /* Uses of on_failure_jump: Each alternative starts with an on_failure_jump that points to the beginning of the next alternative. Each alternative except the last ends with a jump that in effect jumps past the rest of the alternatives. (They really jump to the ending jump of the following alternative, because tensioning these jumps is a hassle.) Repeats start with an on_failure_jump that points past both the repetition text and either the following jump or pop_failure_jump back to this on_failure_jump. */ case on_failure_jump: on_failure: DEBUG_PRINT1 ("EXECUTING on_failure_jump"); EXTRACT_NUMBER_AND_INCR (mcnt, p); #ifdef _LIBC DEBUG_PRINT3 (" %d (to %p)", mcnt, p + mcnt); #else DEBUG_PRINT3 (" %d (to 0x%x)", mcnt, p + mcnt); #endif /* If this on_failure_jump comes right before a group (i.e., the original * applied to a group), save the information for that group and all inner ones, so that if we fail back to this point, the group's information will be correct. For example, in \(a*\)*\1, we need the preceding group, and in \(zz\(a*\)b*\)\2, we need the inner group. */ /* We can't use `p' to check ahead because we push a failure point to `p + mcnt' after we do this. */ p1 = p; /* We need to skip no_op's before we look for the start_memory in case this on_failure_jump is happening as the result of a completed succeed_n, as in \(a\)\{1,3\}b\1 against aba. */ while (p1 < pend && (re_opcode_t) *p1 == no_op) p1++; if (p1 < pend && (re_opcode_t) *p1 == start_memory) { /* We have a new highest active register now. This will get reset at the start_memory we are about to get to, but we will have saved all the registers relevant to this repetition op, as described above. */ highest_active_reg = *(p1 + 1) + *(p1 + 2); if (lowest_active_reg == NO_LOWEST_ACTIVE_REG) lowest_active_reg = *(p1 + 1); } DEBUG_PRINT1 (":\n"); PUSH_FAILURE_POINT (p + mcnt, d, -2); break; /* A smart repeat ends with `maybe_pop_jump'. We change it to either `pop_failure_jump' or `jump'. */ case maybe_pop_jump: EXTRACT_NUMBER_AND_INCR (mcnt, p); DEBUG_PRINT2 ("EXECUTING maybe_pop_jump %d.\n", mcnt); { register UCHAR_T *p2 = p; /* Compare the beginning of the repeat with what in the pattern follows its end. If we can establish that there is nothing that they would both match, i.e., that we would have to backtrack because of (as in, e.g., `a*a') then we can change to pop_failure_jump, because we'll never have to backtrack. This is not true in the case of alternatives: in `(a|ab)*' we do need to backtrack to the `ab' alternative (e.g., if the string was `ab'). But instead of trying to detect that here, the alternative has put on a dummy failure point which is what we will end up popping. */ /* Skip over open/close-group commands. If what follows this loop is a ...+ construct, look at what begins its body, since we will have to match at least one of that. */ while (1) { if (p2 + 2 < pend && ((re_opcode_t) *p2 == stop_memory || (re_opcode_t) *p2 == start_memory)) p2 += 3; else if (p2 + 2 + 2 * OFFSET_ADDRESS_SIZE < pend && (re_opcode_t) *p2 == dummy_failure_jump) p2 += 2 + 2 * OFFSET_ADDRESS_SIZE; else break; } p1 = p + mcnt; /* p1[0] ... p1[2] are the `on_failure_jump' corresponding to the `maybe_finalize_jump' of this case. Examine what follows. */ /* If we're at the end of the pattern, we can change. */ if (p2 == pend) { /* Consider what happens when matching ":\(.*\)" against ":/". I don't really understand this code yet. */ p[-(1+OFFSET_ADDRESS_SIZE)] = (UCHAR_T) pop_failure_jump; DEBUG_PRINT1 (" End of pattern: change to `pop_failure_jump'.\n"); } else if ((re_opcode_t) *p2 == exactn #ifdef MBS_SUPPORT || (re_opcode_t) *p2 == exactn_bin #endif || (bufp->newline_anchor && (re_opcode_t) *p2 == endline)) { register UCHAR_T c = *p2 == (UCHAR_T) endline ? '\n' : p2[2]; if (((re_opcode_t) p1[1+OFFSET_ADDRESS_SIZE] == exactn #ifdef MBS_SUPPORT || (re_opcode_t) p1[1+OFFSET_ADDRESS_SIZE] == exactn_bin #endif ) && p1[3+OFFSET_ADDRESS_SIZE] != c) { p[-(1+OFFSET_ADDRESS_SIZE)] = (UCHAR_T) pop_failure_jump; #ifdef WCHAR DEBUG_PRINT3 (" %C != %C => pop_failure_jump.\n", (wint_t) c, (wint_t) p1[3+OFFSET_ADDRESS_SIZE]); #else DEBUG_PRINT3 (" %c != %c => pop_failure_jump.\n", (char) c, (char) p1[3+OFFSET_ADDRESS_SIZE]); #endif } #ifndef WCHAR else if ((re_opcode_t) p1[3] == charset || (re_opcode_t) p1[3] == charset_not) { int not = (re_opcode_t) p1[3] == charset_not; if (c < (unsigned) (p1[4] * BYTEWIDTH) && p1[5 + c / BYTEWIDTH] & (1 << (c % BYTEWIDTH))) not = !not; /* `not' is equal to 1 if c would match, which means that we can't change to pop_failure_jump. */ if (!not) { p[-3] = (unsigned char) pop_failure_jump; DEBUG_PRINT1 (" No match => pop_failure_jump.\n"); } } #endif /* not WCHAR */ } #ifndef WCHAR else if ((re_opcode_t) *p2 == charset) { /* We win if the first character of the loop is not part of the charset. */ if ((re_opcode_t) p1[3] == exactn && ! ((int) p2[1] * BYTEWIDTH > (int) p1[5] && (p2[2 + p1[5] / BYTEWIDTH] & (1 << (p1[5] % BYTEWIDTH))))) { p[-3] = (unsigned char) pop_failure_jump; DEBUG_PRINT1 (" No match => pop_failure_jump.\n"); } else if ((re_opcode_t) p1[3] == charset_not) { int idx; /* We win if the charset_not inside the loop lists every character listed in the charset after. */ for (idx = 0; idx < (int) p2[1]; idx++) if (! (p2[2 + idx] == 0 || (idx < (int) p1[4] && ((p2[2 + idx] & ~ p1[5 + idx]) == 0)))) break; if (idx == p2[1]) { p[-3] = (unsigned char) pop_failure_jump; DEBUG_PRINT1 (" No match => pop_failure_jump.\n"); } } else if ((re_opcode_t) p1[3] == charset) { int idx; /* We win if the charset inside the loop has no overlap with the one after the loop. */ for (idx = 0; idx < (int) p2[1] && idx < (int) p1[4]; idx++) if ((p2[2 + idx] & p1[5 + idx]) != 0) break; if (idx == p2[1] || idx == p1[4]) { p[-3] = (unsigned char) pop_failure_jump; DEBUG_PRINT1 (" No match => pop_failure_jump.\n"); } } } #endif /* not WCHAR */ } p -= OFFSET_ADDRESS_SIZE; /* Point at relative address again. */ if ((re_opcode_t) p[-1] != pop_failure_jump) { p[-1] = (UCHAR_T) jump; DEBUG_PRINT1 (" Match => jump.\n"); goto unconditional_jump; } /* Note fall through. */ /* The end of a simple repeat has a pop_failure_jump back to its matching on_failure_jump, where the latter will push a failure point. The pop_failure_jump takes off failure points put on by this pop_failure_jump's matching on_failure_jump; we got through the pattern to here from the matching on_failure_jump, so didn't fail. */ case pop_failure_jump: { /* We need to pass separate storage for the lowest and highest registers, even though we don't care about the actual values. Otherwise, we will restore only one register from the stack, since lowest will == highest in `pop_failure_point'. */ active_reg_t dummy_low_reg, dummy_high_reg; UCHAR_T *pdummy = NULL; const CHAR_T *sdummy = NULL; DEBUG_PRINT1 ("EXECUTING pop_failure_jump.\n"); POP_FAILURE_POINT (sdummy, pdummy, dummy_low_reg, dummy_high_reg, reg_dummy, reg_dummy, reg_info_dummy); } /* Note fall through. */ unconditional_jump: #ifdef _LIBC DEBUG_PRINT2 ("\n%p: ", p); #else DEBUG_PRINT2 ("\n0x%x: ", p); #endif /* Note fall through. */ /* Unconditionally jump (without popping any failure points). */ case jump: EXTRACT_NUMBER_AND_INCR (mcnt, p); /* Get the amount to jump. */ DEBUG_PRINT2 ("EXECUTING jump %d ", mcnt); p += mcnt; /* Do the jump. */ #ifdef _LIBC DEBUG_PRINT2 ("(to %p).\n", p); #else DEBUG_PRINT2 ("(to 0x%x).\n", p); #endif break; /* We need this opcode so we can detect where alternatives end in `group_match_null_string_p' et al. */ case jump_past_alt: DEBUG_PRINT1 ("EXECUTING jump_past_alt.\n"); goto unconditional_jump; /* Normally, the on_failure_jump pushes a failure point, which then gets popped at pop_failure_jump. We will end up at pop_failure_jump, also, and with a pattern of, say, `a+', we are skipping over the on_failure_jump, so we have to push something meaningless for pop_failure_jump to pop. */ case dummy_failure_jump: DEBUG_PRINT1 ("EXECUTING dummy_failure_jump.\n"); /* It doesn't matter what we push for the string here. What the code at `fail' tests is the value for the pattern. */ PUSH_FAILURE_POINT (NULL, NULL, -2); goto unconditional_jump; /* At the end of an alternative, we need to push a dummy failure point in case we are followed by a `pop_failure_jump', because we don't want the failure point for the alternative to be popped. For example, matching `(a|ab)*' against `aab' requires that we match the `ab' alternative. */ case push_dummy_failure: DEBUG_PRINT1 ("EXECUTING push_dummy_failure.\n"); /* See comments just above at `dummy_failure_jump' about the two zeroes. */ PUSH_FAILURE_POINT (NULL, NULL, -2); break; /* Have to succeed matching what follows at least n times. After that, handle like `on_failure_jump'. */ case succeed_n: EXTRACT_NUMBER (mcnt, p + OFFSET_ADDRESS_SIZE); DEBUG_PRINT2 ("EXECUTING succeed_n %d.\n", mcnt); assert (mcnt >= 0); /* Originally, this is how many times we HAVE to succeed. */ if (mcnt > 0) { mcnt--; p += OFFSET_ADDRESS_SIZE; STORE_NUMBER_AND_INCR (p, mcnt); #ifdef _LIBC DEBUG_PRINT3 (" Setting %p to %d.\n", p - OFFSET_ADDRESS_SIZE , mcnt); #else DEBUG_PRINT3 (" Setting 0x%x to %d.\n", p - OFFSET_ADDRESS_SIZE , mcnt); #endif } else if (mcnt == 0) { #ifdef _LIBC DEBUG_PRINT2 (" Setting two bytes from %p to no_op.\n", p + OFFSET_ADDRESS_SIZE); #else DEBUG_PRINT2 (" Setting two bytes from 0x%x to no_op.\n", p + OFFSET_ADDRESS_SIZE); #endif /* _LIBC */ #ifdef WCHAR p[1] = (UCHAR_T) no_op; #else p[2] = (UCHAR_T) no_op; p[3] = (UCHAR_T) no_op; #endif /* WCHAR */ goto on_failure; } break; case jump_n: EXTRACT_NUMBER (mcnt, p + OFFSET_ADDRESS_SIZE); DEBUG_PRINT2 ("EXECUTING jump_n %d.\n", mcnt); /* Originally, this is how many times we CAN jump. */ if (mcnt) { mcnt--; STORE_NUMBER (p + OFFSET_ADDRESS_SIZE, mcnt); #ifdef _LIBC DEBUG_PRINT3 (" Setting %p to %d.\n", p + OFFSET_ADDRESS_SIZE, mcnt); #else DEBUG_PRINT3 (" Setting 0x%x to %d.\n", p + OFFSET_ADDRESS_SIZE, mcnt); #endif /* _LIBC */ goto unconditional_jump; } /* If don't have to jump any more, skip over the rest of command. */ else p += 2 * OFFSET_ADDRESS_SIZE; break; case set_number_at: { DEBUG_PRINT1 ("EXECUTING set_number_at.\n"); EXTRACT_NUMBER_AND_INCR (mcnt, p); p1 = p + mcnt; EXTRACT_NUMBER_AND_INCR (mcnt, p); #ifdef _LIBC DEBUG_PRINT3 (" Setting %p to %d.\n", p1, mcnt); #else DEBUG_PRINT3 (" Setting 0x%x to %d.\n", p1, mcnt); #endif STORE_NUMBER (p1, mcnt); break; } #if 0 /* The DEC Alpha C compiler 3.x generates incorrect code for the test WORDCHAR_P (d - 1) != WORDCHAR_P (d) in the expansion of AT_WORD_BOUNDARY, so this code is disabled. Expanding the macro and introducing temporary variables works around the bug. */ case wordbound: DEBUG_PRINT1 ("EXECUTING wordbound.\n"); if (AT_WORD_BOUNDARY (d)) break; goto fail; case notwordbound: DEBUG_PRINT1 ("EXECUTING notwordbound.\n"); if (AT_WORD_BOUNDARY (d)) goto fail; break; #else case wordbound: { boolean prevchar, thischar; DEBUG_PRINT1 ("EXECUTING wordbound.\n"); if (AT_STRINGS_BEG (d) || AT_STRINGS_END (d)) break; prevchar = WORDCHAR_P (d - 1); thischar = WORDCHAR_P (d); if (prevchar != thischar) break; goto fail; } case notwordbound: { boolean prevchar, thischar; DEBUG_PRINT1 ("EXECUTING notwordbound.\n"); if (AT_STRINGS_BEG (d) || AT_STRINGS_END (d)) goto fail; prevchar = WORDCHAR_P (d - 1); thischar = WORDCHAR_P (d); if (prevchar != thischar) goto fail; break; } #endif case wordbeg: DEBUG_PRINT1 ("EXECUTING wordbeg.\n"); if (!AT_STRINGS_END (d) && WORDCHAR_P (d) && (AT_STRINGS_BEG (d) || !WORDCHAR_P (d - 1))) break; goto fail; case wordend: DEBUG_PRINT1 ("EXECUTING wordend.\n"); if (!AT_STRINGS_BEG (d) && WORDCHAR_P (d - 1) && (AT_STRINGS_END (d) || !WORDCHAR_P (d))) break; goto fail; #ifdef emacs case before_dot: DEBUG_PRINT1 ("EXECUTING before_dot.\n"); if (PTR_CHAR_POS ((unsigned char *) d) >= point) goto fail; break; case at_dot: DEBUG_PRINT1 ("EXECUTING at_dot.\n"); if (PTR_CHAR_POS ((unsigned char *) d) != point) goto fail; break; case after_dot: DEBUG_PRINT1 ("EXECUTING after_dot.\n"); if (PTR_CHAR_POS ((unsigned char *) d) <= point) goto fail; break; case syntaxspec: DEBUG_PRINT2 ("EXECUTING syntaxspec %d.\n", mcnt); mcnt = *p++; goto matchsyntax; case wordchar: DEBUG_PRINT1 ("EXECUTING Emacs wordchar.\n"); mcnt = (int) Sword; matchsyntax: PREFETCH (); /* Can't use *d++ here; SYNTAX may be an unsafe macro. */ d++; if (SYNTAX (d[-1]) != (enum syntaxcode) mcnt) goto fail; SET_REGS_MATCHED (); break; case notsyntaxspec: DEBUG_PRINT2 ("EXECUTING notsyntaxspec %d.\n", mcnt); mcnt = *p++; goto matchnotsyntax; case notwordchar: DEBUG_PRINT1 ("EXECUTING Emacs notwordchar.\n"); mcnt = (int) Sword; matchnotsyntax: PREFETCH (); /* Can't use *d++ here; SYNTAX may be an unsafe macro. */ d++; if (SYNTAX (d[-1]) == (enum syntaxcode) mcnt) goto fail; SET_REGS_MATCHED (); break; #else /* not emacs */ case wordchar: DEBUG_PRINT1 ("EXECUTING non-Emacs wordchar.\n"); PREFETCH (); if (!WORDCHAR_P (d)) goto fail; SET_REGS_MATCHED (); d++; break; case notwordchar: DEBUG_PRINT1 ("EXECUTING non-Emacs notwordchar.\n"); PREFETCH (); if (WORDCHAR_P (d)) goto fail; SET_REGS_MATCHED (); d++; break; #endif /* not emacs */ default: abort (); } continue; /* Successfully executed one pattern command; keep going. */ /* We goto here if a matching operation fails. */ fail: if (!FAIL_STACK_EMPTY ()) { /* A restart point is known. Restore to that state. */ DEBUG_PRINT1 ("\nFAIL:\n"); POP_FAILURE_POINT (d, p, lowest_active_reg, highest_active_reg, regstart, regend, reg_info); /* If this failure point is a dummy, try the next one. */ if (!p) goto fail; /* If we failed to the end of the pattern, don't examine *p. */ assert (p <= pend); if (p < pend) { boolean is_a_jump_n = false; /* If failed to a backwards jump that's part of a repetition loop, need to pop this failure point and use the next one. */ switch ((re_opcode_t) *p) { case jump_n: is_a_jump_n = true; case maybe_pop_jump: case pop_failure_jump: case jump: p1 = p + 1; EXTRACT_NUMBER_AND_INCR (mcnt, p1); p1 += mcnt; if ((is_a_jump_n && (re_opcode_t) *p1 == succeed_n) || (!is_a_jump_n && (re_opcode_t) *p1 == on_failure_jump)) goto fail; break; default: /* do nothing */ ; } } if (d >= string1 && d <= end1) dend = end_match_1; } else break; /* Matching at this starting point really fails. */ } /* for (;;) */ if (best_regs_set) goto restore_best_regs; FREE_VARIABLES (); return -1; /* Failure to match. */ } /* re_match_2 */ /* Subroutine definitions for re_match_2. */ /* We are passed P pointing to a register number after a start_memory. Return true if the pattern up to the corresponding stop_memory can match the empty string, and false otherwise. If we find the matching stop_memory, sets P to point to one past its number. Otherwise, sets P to an undefined byte less than or equal to END. We don't handle duplicates properly (yet). */ static boolean PREFIX(group_match_null_string_p) (p, end, reg_info) UCHAR_T **p, *end; PREFIX(register_info_type) *reg_info; { int mcnt; /* Point to after the args to the start_memory. */ UCHAR_T *p1 = *p + 2; while (p1 < end) { /* Skip over opcodes that can match nothing, and return true or false, as appropriate, when we get to one that can't, or to the matching stop_memory. */ switch ((re_opcode_t) *p1) { /* Could be either a loop or a series of alternatives. */ case on_failure_jump: p1++; EXTRACT_NUMBER_AND_INCR (mcnt, p1); /* If the next operation is not a jump backwards in the pattern. */ if (mcnt >= 0) { /* Go through the on_failure_jumps of the alternatives, seeing if any of the alternatives cannot match nothing. The last alternative starts with only a jump, whereas the rest start with on_failure_jump and end with a jump, e.g., here is the pattern for `a|b|c': /on_failure_jump/0/6/exactn/1/a/jump_past_alt/0/6 /on_failure_jump/0/6/exactn/1/b/jump_past_alt/0/3 /exactn/1/c So, we have to first go through the first (n-1) alternatives and then deal with the last one separately. */ /* Deal with the first (n-1) alternatives, which start with an on_failure_jump (see above) that jumps to right past a jump_past_alt. */ while ((re_opcode_t) p1[mcnt-(1+OFFSET_ADDRESS_SIZE)] == jump_past_alt) { /* `mcnt' holds how many bytes long the alternative is, including the ending `jump_past_alt' and its number. */ if (!PREFIX(alt_match_null_string_p) (p1, p1 + mcnt - (1 + OFFSET_ADDRESS_SIZE), reg_info)) return false; /* Move to right after this alternative, including the jump_past_alt. */ p1 += mcnt; /* Break if it's the beginning of an n-th alternative that doesn't begin with an on_failure_jump. */ if ((re_opcode_t) *p1 != on_failure_jump) break; /* Still have to check that it's not an n-th alternative that starts with an on_failure_jump. */ p1++; EXTRACT_NUMBER_AND_INCR (mcnt, p1); if ((re_opcode_t) p1[mcnt-(1+OFFSET_ADDRESS_SIZE)] != jump_past_alt) { /* Get to the beginning of the n-th alternative. */ p1 -= 1 + OFFSET_ADDRESS_SIZE; break; } } /* Deal with the last alternative: go back and get number of the `jump_past_alt' just before it. `mcnt' contains the length of the alternative. */ EXTRACT_NUMBER (mcnt, p1 - OFFSET_ADDRESS_SIZE); if (!PREFIX(alt_match_null_string_p) (p1, p1 + mcnt, reg_info)) return false; p1 += mcnt; /* Get past the n-th alternative. */ } /* if mcnt > 0 */ break; case stop_memory: assert (p1[1] == **p); *p = p1 + 2; return true; default: if (!PREFIX(common_op_match_null_string_p) (&p1, end, reg_info)) return false; } } /* while p1 < end */ return false; } /* group_match_null_string_p */ /* Similar to group_match_null_string_p, but doesn't deal with alternatives: It expects P to be the first byte of a single alternative and END one byte past the last. The alternative can contain groups. */ static boolean PREFIX(alt_match_null_string_p) (p, end, reg_info) UCHAR_T *p, *end; PREFIX(register_info_type) *reg_info; { int mcnt; UCHAR_T *p1 = p; while (p1 < end) { /* Skip over opcodes that can match nothing, and break when we get to one that can't. */ switch ((re_opcode_t) *p1) { /* It's a loop. */ case on_failure_jump: p1++; EXTRACT_NUMBER_AND_INCR (mcnt, p1); p1 += mcnt; break; default: if (!PREFIX(common_op_match_null_string_p) (&p1, end, reg_info)) return false; } } /* while p1 < end */ return true; } /* alt_match_null_string_p */ /* Deals with the ops common to group_match_null_string_p and alt_match_null_string_p. Sets P to one after the op and its arguments, if any. */ static boolean PREFIX(common_op_match_null_string_p) (p, end, reg_info) UCHAR_T **p, *end; PREFIX(register_info_type) *reg_info; { int mcnt; boolean ret; int reg_no; UCHAR_T *p1 = *p; switch ((re_opcode_t) *p1++) { case no_op: case begline: case endline: case begbuf: case endbuf: case wordbeg: case wordend: case wordbound: case notwordbound: #ifdef emacs case before_dot: case at_dot: case after_dot: #endif break; case start_memory: reg_no = *p1; assert (reg_no > 0 && reg_no <= MAX_REGNUM); ret = PREFIX(group_match_null_string_p) (&p1, end, reg_info); /* Have to set this here in case we're checking a group which contains a group and a back reference to it. */ if (REG_MATCH_NULL_STRING_P (reg_info[reg_no]) == MATCH_NULL_UNSET_VALUE) REG_MATCH_NULL_STRING_P (reg_info[reg_no]) = ret; if (!ret) return false; break; /* If this is an optimized succeed_n for zero times, make the jump. */ case jump: EXTRACT_NUMBER_AND_INCR (mcnt, p1); if (mcnt >= 0) p1 += mcnt; else return false; break; case succeed_n: /* Get to the number of times to succeed. */ p1 += OFFSET_ADDRESS_SIZE; EXTRACT_NUMBER_AND_INCR (mcnt, p1); if (mcnt == 0) { p1 -= 2 * OFFSET_ADDRESS_SIZE; EXTRACT_NUMBER_AND_INCR (mcnt, p1); p1 += mcnt; } else return false; break; case duplicate: if (!REG_MATCH_NULL_STRING_P (reg_info[*p1])) return false; break; case set_number_at: p1 += 2 * OFFSET_ADDRESS_SIZE; default: /* All other opcodes mean we cannot match the empty string. */ return false; } *p = p1; return true; } /* common_op_match_null_string_p */ /* Return zero if TRANSLATE[S1] and TRANSLATE[S2] are identical for LEN bytes; nonzero otherwise. */ static int PREFIX(bcmp_translate) (s1, s2, len, translate) const CHAR_T *s1, *s2; register int len; RE_TRANSLATE_TYPE translate; { register const UCHAR_T *p1 = (const UCHAR_T *) s1; register const UCHAR_T *p2 = (const UCHAR_T *) s2; while (len) { #ifdef WCHAR if (((*p1<=0xff)?translate[*p1++]:*p1++) != ((*p2<=0xff)?translate[*p2++]:*p2++)) return 1; #else /* BYTE */ if (translate[*p1++] != translate[*p2++]) return 1; #endif /* WCHAR */ len--; } return 0; } #else /* not INSIDE_RECURSION */ /* Entry points for GNU code. */ /* re_compile_pattern is the GNU regular expression compiler: it compiles PATTERN (of length SIZE) and puts the result in BUFP. Returns 0 if the pattern was valid, otherwise an error string. Assumes the `allocated' (and perhaps `buffer') and `translate' fields are set in BUFP on entry. We call regex_compile to do the actual compilation. */ const char * re_compile_pattern (pattern, length, bufp) const char *pattern; size_t length; struct re_pattern_buffer *bufp; { reg_errcode_t ret; /* GNU code is written to assume at least RE_NREGS registers will be set (and at least one extra will be -1). */ bufp->regs_allocated = REGS_UNALLOCATED; /* And GNU code determines whether or not to get register information by passing null for the REGS argument to re_match, etc., not by setting no_sub. */ bufp->no_sub = 0; /* Match anchors at newline. */ bufp->newline_anchor = 1; # ifdef MBS_SUPPORT if (MB_CUR_MAX != 1) ret = wcs_regex_compile (pattern, length, re_syntax_options, bufp); else # endif ret = byte_regex_compile (pattern, length, re_syntax_options, bufp); if (!ret) return NULL; return gettext (re_error_msgid[(int) ret]); } #ifdef _LIBC weak_alias (__re_compile_pattern, re_compile_pattern) #endif /* Entry points compatible with 4.2 BSD regex library. We don't define them unless specifically requested. */ #if defined _REGEX_RE_COMP || defined _LIBC /* BSD has one and only one pattern buffer. */ static struct re_pattern_buffer re_comp_buf; char * #ifdef _LIBC /* Make these definitions weak in libc, so POSIX programs can redefine these names if they don't use our functions, and still use regcomp/regexec below without link errors. */ weak_function #endif re_comp (s) const char *s; { reg_errcode_t ret; if (!s) { if (!re_comp_buf.buffer) return gettext ("No previous regular expression"); return 0; } if (!re_comp_buf.buffer) { re_comp_buf.buffer = (unsigned char *) malloc (200); if (re_comp_buf.buffer == NULL) return (char *) gettext (re_error_msgid[(int) REG_ESPACE]); re_comp_buf.allocated = 200; re_comp_buf.fastmap = (char *) malloc (1 << BYTEWIDTH); if (re_comp_buf.fastmap == NULL) return (char *) gettext (re_error_msgid[(int) REG_ESPACE]); } /* Since `re_exec' always passes NULL for the `regs' argument, we don't need to initialize the pattern buffer fields which affect it. */ /* Match anchors at newlines. */ re_comp_buf.newline_anchor = 1; # ifdef MBS_SUPPORT if (MB_CUR_MAX != 1) ret = wcs_regex_compile (s, strlen (s), re_syntax_options, &re_comp_buf); else # endif ret = byte_regex_compile (s, strlen (s), re_syntax_options, &re_comp_buf); if (!ret) return NULL; /* Yes, we're discarding `const' here if !HAVE_LIBINTL. */ return (char *) gettext (re_error_msgid[(int) ret]); } int #ifdef _LIBC weak_function #endif re_exec (s) const char *s; { const int len = strlen (s); return 0 <= re_search (&re_comp_buf, s, len, 0, len, (struct re_registers *) 0); } #endif /* _REGEX_RE_COMP */ /* POSIX.2 functions. Don't define these for Emacs. */ #ifndef emacs /* regcomp takes a regular expression as a string and compiles it. PREG is a regex_t *. We do not expect any fields to be initialized, since POSIX says we shouldn't. Thus, we set `buffer' to the compiled pattern; `used' to the length of the compiled pattern; `syntax' to RE_SYNTAX_POSIX_EXTENDED if the REG_EXTENDED bit in CFLAGS is set; otherwise, to RE_SYNTAX_POSIX_BASIC; `newline_anchor' to REG_NEWLINE being set in CFLAGS; `fastmap' to an allocated space for the fastmap; `fastmap_accurate' to zero; `re_nsub' to the number of subexpressions in PATTERN. PATTERN is the address of the pattern string. CFLAGS is a series of bits which affect compilation. If REG_EXTENDED is set, we use POSIX extended syntax; otherwise, we use POSIX basic syntax. If REG_NEWLINE is set, then . and [^...] don't match newline. Also, regexec will try a match beginning after every newline. If REG_ICASE is set, then we considers upper- and lowercase versions of letters to be equivalent when matching. If REG_NOSUB is set, then when PREG is passed to regexec, that routine will report only success or failure, and nothing about the registers. It returns 0 if it succeeds, nonzero if it doesn't. (See regex.h for the return codes and their meanings.) */ int regcomp (preg, pattern, cflags) regex_t *preg; const char *pattern; int cflags; { reg_errcode_t ret; reg_syntax_t syntax = (cflags & REG_EXTENDED) ? RE_SYNTAX_POSIX_EXTENDED : RE_SYNTAX_POSIX_BASIC; /* regex_compile will allocate the space for the compiled pattern. */ preg->buffer = 0; preg->allocated = 0; preg->used = 0; /* Try to allocate space for the fastmap. */ preg->fastmap = (char *) malloc (1 << BYTEWIDTH); if (cflags & REG_ICASE) { unsigned i; preg->translate = (RE_TRANSLATE_TYPE) malloc (CHAR_SET_SIZE * sizeof (*(RE_TRANSLATE_TYPE)0)); if (preg->translate == NULL) return (int) REG_ESPACE; /* Map uppercase characters to corresponding lowercase ones. */ for (i = 0; i < CHAR_SET_SIZE; i++) preg->translate[i] = ISUPPER (i) ? TOLOWER (i) : (int) i; } else preg->translate = NULL; /* If REG_NEWLINE is set, newlines are treated differently. */ if (cflags & REG_NEWLINE) { /* REG_NEWLINE implies neither . nor [^...] match newline. */ syntax &= ~RE_DOT_NEWLINE; syntax |= RE_HAT_LISTS_NOT_NEWLINE; /* It also changes the matching behavior. */ preg->newline_anchor = 1; } else preg->newline_anchor = 0; preg->no_sub = !!(cflags & REG_NOSUB); /* POSIX says a null character in the pattern terminates it, so we can use strlen here in compiling the pattern. */ # ifdef MBS_SUPPORT if (MB_CUR_MAX != 1) ret = wcs_regex_compile (pattern, strlen (pattern), syntax, preg); else # endif ret = byte_regex_compile (pattern, strlen (pattern), syntax, preg); /* POSIX doesn't distinguish between an unmatched open-group and an unmatched close-group: both are REG_EPAREN. */ if (ret == REG_ERPAREN) ret = REG_EPAREN; if (ret == REG_NOERROR && preg->fastmap) { /* Compute the fastmap now, since regexec cannot modify the pattern buffer. */ if (re_compile_fastmap (preg) == -2) { /* Some error occurred while computing the fastmap, just forget about it. */ free (preg->fastmap); preg->fastmap = NULL; } } return (int) ret; } #ifdef _LIBC weak_alias (__regcomp, regcomp) #endif /* regexec searches for a given pattern, specified by PREG, in the string STRING. If NMATCH is zero or REG_NOSUB was set in the cflags argument to `regcomp', we ignore PMATCH. Otherwise, we assume PMATCH has at least NMATCH elements, and we set them to the offsets of the corresponding matched substrings. EFLAGS specifies `execution flags' which affect matching: if REG_NOTBOL is set, then ^ does not match at the beginning of the string; if REG_NOTEOL is set, then $ does not match at the end. We return 0 if we find a match and REG_NOMATCH if not. */ int regexec (preg, string, nmatch, pmatch, eflags) const regex_t *preg; const char *string; size_t nmatch; regmatch_t pmatch[]; int eflags; { int ret; struct re_registers regs; regex_t private_preg; int len = strlen (string); boolean want_reg_info = !preg->no_sub && nmatch > 0; private_preg = *preg; private_preg.not_bol = !!(eflags & REG_NOTBOL); private_preg.not_eol = !!(eflags & REG_NOTEOL); /* The user has told us exactly how many registers to return information about, via `nmatch'. We have to pass that on to the matching routines. */ private_preg.regs_allocated = REGS_FIXED; if (want_reg_info) { regs.num_regs = nmatch; regs.start = TALLOC (nmatch * 2, regoff_t); if (regs.start == NULL) return (int) REG_NOMATCH; regs.end = regs.start + nmatch; } /* Perform the searching operation. */ ret = re_search (&private_preg, string, len, /* start: */ 0, /* range: */ len, want_reg_info ? ®s : (struct re_registers *) 0); /* Copy the register information to the POSIX structure. */ if (want_reg_info) { if (ret >= 0) { unsigned r; for (r = 0; r < nmatch; r++) { pmatch[r].rm_so = regs.start[r]; pmatch[r].rm_eo = regs.end[r]; } } /* If we needed the temporary register info, free the space now. */ free (regs.start); } /* We want zero return to mean success, unlike `re_search'. */ return ret >= 0 ? (int) REG_NOERROR : (int) REG_NOMATCH; } #ifdef _LIBC weak_alias (__regexec, regexec) #endif /* Returns a message corresponding to an error code, ERRCODE, returned from either regcomp or regexec. We don't use PREG here. */ size_t regerror (errcode, preg, errbuf, errbuf_size) int errcode; const regex_t *preg ATTRIBUTE_UNUSED; char *errbuf; size_t errbuf_size; { const char *msg; size_t msg_size; if (errcode < 0 || errcode >= (int) (sizeof (re_error_msgid) / sizeof (re_error_msgid[0]))) /* Only error codes returned by the rest of the code should be passed to this routine. If we are given anything else, or if other regex code generates an invalid error code, then the program has a bug. Dump core so we can fix it. */ abort (); msg = gettext (re_error_msgid[errcode]); msg_size = strlen (msg) + 1; /* Includes the null. */ if (errbuf_size != 0) { if (msg_size > errbuf_size) { #if defined HAVE_MEMPCPY || defined _LIBC *((char *) mempcpy (errbuf, msg, errbuf_size - 1)) = '\0'; #else memcpy (errbuf, msg, errbuf_size - 1); errbuf[errbuf_size - 1] = 0; #endif } else memcpy (errbuf, msg, msg_size); } return msg_size; } #ifdef _LIBC weak_alias (__regerror, regerror) #endif /* Free dynamically allocated space used by PREG. */ void regfree (preg) regex_t *preg; { if (preg->buffer != NULL) free (preg->buffer); preg->buffer = NULL; preg->allocated = 0; preg->used = 0; if (preg->fastmap != NULL) free (preg->fastmap); preg->fastmap = NULL; preg->fastmap_accurate = 0; if (preg->translate != NULL) free (preg->translate); preg->translate = NULL; } #ifdef _LIBC weak_alias (__regfree, regfree) #endif #endif /* not emacs */ #endif /* not INSIDE_RECURSION */ #undef STORE_NUMBER #undef STORE_NUMBER_AND_INCR #undef EXTRACT_NUMBER #undef EXTRACT_NUMBER_AND_INCR #undef DEBUG_PRINT_COMPILED_PATTERN #undef DEBUG_PRINT_DOUBLE_STRING #undef INIT_FAIL_STACK #undef RESET_FAIL_STACK #undef DOUBLE_FAIL_STACK #undef PUSH_PATTERN_OP #undef PUSH_FAILURE_POINTER #undef PUSH_FAILURE_INT #undef PUSH_FAILURE_ELT #undef POP_FAILURE_POINTER #undef POP_FAILURE_INT #undef POP_FAILURE_ELT #undef DEBUG_PUSH #undef DEBUG_POP #undef PUSH_FAILURE_POINT #undef POP_FAILURE_POINT #undef REG_UNSET_VALUE #undef REG_UNSET #undef PATFETCH #undef PATFETCH_RAW #undef PATUNFETCH #undef TRANSLATE #undef INIT_BUF_SIZE #undef GET_BUFFER_SPACE #undef BUF_PUSH #undef BUF_PUSH_2 #undef BUF_PUSH_3 #undef STORE_JUMP #undef STORE_JUMP2 #undef INSERT_JUMP #undef INSERT_JUMP2 #undef EXTEND_BUFFER #undef GET_UNSIGNED_NUMBER #undef FREE_STACK_RETURN # undef POINTER_TO_OFFSET # undef MATCHING_IN_FRST_STRING # undef PREFETCH # undef AT_STRINGS_BEG # undef AT_STRINGS_END # undef WORDCHAR_P # undef FREE_VAR # undef FREE_VARIABLES # undef NO_HIGHEST_ACTIVE_REG # undef NO_LOWEST_ACTIVE_REG # undef CHAR_T # undef UCHAR_T # undef COMPILED_BUFFER_VAR # undef OFFSET_ADDRESS_SIZE # undef CHAR_CLASS_SIZE # undef PREFIX # undef ARG_PREFIX # undef PUT_CHAR # undef BYTE # undef WCHAR #undef ISALPHA #undef ISALNUM #undef ISBLANK #undef ISCNTRL #undef ISDIGIT #undef ISGRAPH #undef ISLOWER #undef ISPRINT #undef ISPUNCT #undef ISSPACE #undef ISXDIGIT # define DEFINED_ONCE # undef INSIDE_RECURSION # endif # define BYTE # define INSIDE_RECURSION /* Extended regular expression matching and search library, version 0.12. (Implements POSIX draft P1003.2/D11.2, except for some of the internationalization features.) Copyright (C) 1993-1999, 2000, 2001, 2002 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ /* This file has been modified for usage in libiberty. It includes "xregex.h" instead of . The "xregex.h" header file renames all external routines with an "x" prefix so they do not collide with the native regex routines or with other components regex routines. */ /* AIX requires this to be the first thing in the file. */ #if defined _AIX && !defined __GNUC__ && !defined REGEX_MALLOC #pragma alloca #endif #undef _GNU_SOURCE #define _GNU_SOURCE #ifdef HAVE_CONFIG_H #endif #ifndef PARAMS # if defined __GNUC__ || (defined __STDC__ && __STDC__) # define PARAMS(args) args # else # define PARAMS(args) () # endif /* GCC. */ #endif /* Not PARAMS. */ #ifdef BYTE # define CHAR_T char # define UCHAR_T unsigned char # define COMPILED_BUFFER_VAR bufp->buffer # define OFFSET_ADDRESS_SIZE 2 # if defined (__STDC__) || defined (ALMOST_STDC) || defined (HAVE_STRINGIZE) # define PREFIX(name) byte_##name # else # define PREFIX(name) byte_/**/name # endif # define ARG_PREFIX(name) name # define PUT_CHAR(c) putchar (c) #else # ifdef WCHAR # define CHAR_T wchar_t # define UCHAR_T wchar_t # define COMPILED_BUFFER_VAR wc_buffer # define OFFSET_ADDRESS_SIZE 1 /* the size which STORE_NUMBER macro use */ # define CHAR_CLASS_SIZE ((__alignof__(wctype_t)+sizeof(wctype_t))/sizeof(CHAR_T)+1) # if defined (__STDC__) || defined (ALMOST_STDC) || defined (HAVE_STRINGIZE) # define PREFIX(name) wcs_##name # define ARG_PREFIX(name) c##name # else # define PREFIX(name) wcs_/**/name # define ARG_PREFIX(name) c/**/name # endif /* Should we use wide stream?? */ # define PUT_CHAR(c) printf ("%C", c); # define TRUE 1 # define FALSE 0 # else # ifdef MBS_SUPPORT # define WCHAR # define INSIDE_RECURSION /*# include "regex.c"*/ # undef INSIDE_RECURSION # endif # define BYTE # define INSIDE_RECURSION /*# include "regex.c"*/ # undef INSIDE_RECURSION # endif #endif #ifdef INSIDE_RECURSION /* Common operations on the compiled pattern. */ /* Store NUMBER in two contiguous bytes starting at DESTINATION. */ /* ifdef MBS_SUPPORT, we store NUMBER in 1 element. */ # ifdef WCHAR # define STORE_NUMBER(destination, number) \ do { \ *(destination) = (UCHAR_T)(number); \ } while (0) # else /* BYTE */ # define STORE_NUMBER(destination, number) \ do { \ (destination)[0] = (number) & 0377; \ (destination)[1] = (number) >> 8; \ } while (0) # endif /* WCHAR */ /* Same as STORE_NUMBER, except increment DESTINATION to the byte after where the number is stored. Therefore, DESTINATION must be an lvalue. */ /* ifdef MBS_SUPPORT, we store NUMBER in 1 element. */ # define STORE_NUMBER_AND_INCR(destination, number) \ do { \ STORE_NUMBER (destination, number); \ (destination) += OFFSET_ADDRESS_SIZE; \ } while (0) /* Put into DESTINATION a number stored in two contiguous bytes starting at SOURCE. */ /* ifdef MBS_SUPPORT, we store NUMBER in 1 element. */ # ifdef WCHAR # define EXTRACT_NUMBER(destination, source) \ do { \ (destination) = *(source); \ } while (0) # else /* BYTE */ # define EXTRACT_NUMBER(destination, source) \ do { \ (destination) = *(source) & 0377; \ (destination) += SIGN_EXTEND_CHAR (*((source) + 1)) << 8; \ } while (0) # endif # ifdef DEBUG static void PREFIX(extract_number) _RE_ARGS ((int *dest, UCHAR_T *source)); static void PREFIX(extract_number) (dest, source) int *dest; UCHAR_T *source; { # ifdef WCHAR *dest = *source; # else /* BYTE */ int temp = SIGN_EXTEND_CHAR (*(source + 1)); *dest = *source & 0377; *dest += temp << 8; # endif } # ifndef EXTRACT_MACROS /* To debug the macros. */ # undef EXTRACT_NUMBER # define EXTRACT_NUMBER(dest, src) PREFIX(extract_number) (&dest, src) # endif /* not EXTRACT_MACROS */ # endif /* DEBUG */ /* Same as EXTRACT_NUMBER, except increment SOURCE to after the number. SOURCE must be an lvalue. */ # define EXTRACT_NUMBER_AND_INCR(destination, source) \ do { \ EXTRACT_NUMBER (destination, source); \ (source) += OFFSET_ADDRESS_SIZE; \ } while (0) # ifdef DEBUG static void PREFIX(extract_number_and_incr) _RE_ARGS ((int *destination, UCHAR_T **source)); static void PREFIX(extract_number_and_incr) (destination, source) int *destination; UCHAR_T **source; { PREFIX(extract_number) (destination, *source); *source += OFFSET_ADDRESS_SIZE; } # ifndef EXTRACT_MACROS # undef EXTRACT_NUMBER_AND_INCR # define EXTRACT_NUMBER_AND_INCR(dest, src) \ PREFIX(extract_number_and_incr) (&dest, &src) # endif /* not EXTRACT_MACROS */ # endif /* DEBUG */ /* If DEBUG is defined, Regex prints many voluminous messages about what it is doing (if the variable `debug' is nonzero). If linked with the main program in `iregex.c', you can enter patterns and strings interactively. And if linked with the main program in `main.c' and the other test files, you can run the already-written tests. */ # ifdef DEBUG # ifndef DEFINED_ONCE /* We use standard I/O for debugging. */ # include /* It is useful to test things that ``must'' be true when debugging. */ # include static int debug; # define DEBUG_STATEMENT(e) e # define DEBUG_PRINT1(x) if (debug) printf (x) # define DEBUG_PRINT2(x1, x2) if (debug) printf (x1, x2) # define DEBUG_PRINT3(x1, x2, x3) if (debug) printf (x1, x2, x3) # define DEBUG_PRINT4(x1, x2, x3, x4) if (debug) printf (x1, x2, x3, x4) # endif /* not DEFINED_ONCE */ # define DEBUG_PRINT_COMPILED_PATTERN(p, s, e) \ if (debug) PREFIX(print_partial_compiled_pattern) (s, e) # define DEBUG_PRINT_DOUBLE_STRING(w, s1, sz1, s2, sz2) \ if (debug) PREFIX(print_double_string) (w, s1, sz1, s2, sz2) /* Print the fastmap in human-readable form. */ # ifndef DEFINED_ONCE void print_fastmap (fastmap) char *fastmap; { unsigned was_a_range = 0; unsigned i = 0; while (i < (1 << BYTEWIDTH)) { if (fastmap[i++]) { was_a_range = 0; putchar (i - 1); while (i < (1 << BYTEWIDTH) && fastmap[i]) { was_a_range = 1; i++; } if (was_a_range) { printf ("-"); putchar (i - 1); } } } putchar ('\n'); } # endif /* not DEFINED_ONCE */ /* Print a compiled pattern string in human-readable form, starting at the START pointer into it and ending just before the pointer END. */ void PREFIX(print_partial_compiled_pattern) (start, end) UCHAR_T *start; UCHAR_T *end; { int mcnt, mcnt2; UCHAR_T *p1; UCHAR_T *p = start; UCHAR_T *pend = end; if (start == NULL) { printf ("(null)\n"); return; } /* Loop over pattern commands. */ while (p < pend) { # ifdef _LIBC printf ("%td:\t", p - start); # else printf ("%ld:\t", (long int) (p - start)); # endif switch ((re_opcode_t) *p++) { case no_op: printf ("/no_op"); break; case exactn: mcnt = *p++; printf ("/exactn/%d", mcnt); do { putchar ('/'); PUT_CHAR (*p++); } while (--mcnt); break; # ifdef MBS_SUPPORT case exactn_bin: mcnt = *p++; printf ("/exactn_bin/%d", mcnt); do { printf("/%lx", (long int) *p++); } while (--mcnt); break; # endif /* MBS_SUPPORT */ case start_memory: mcnt = *p++; printf ("/start_memory/%d/%ld", mcnt, (long int) *p++); break; case stop_memory: mcnt = *p++; printf ("/stop_memory/%d/%ld", mcnt, (long int) *p++); break; case duplicate: printf ("/duplicate/%ld", (long int) *p++); break; case anychar: printf ("/anychar"); break; case charset: case charset_not: { # ifdef WCHAR int i, length; wchar_t *workp = p; printf ("/charset [%s", (re_opcode_t) *(workp - 1) == charset_not ? "^" : ""); p += 5; length = *workp++; /* the length of char_classes */ for (i=0 ; ibuffer; PREFIX(print_partial_compiled_pattern) (buffer, buffer + bufp->used / sizeof(UCHAR_T)); printf ("%ld bytes used/%ld bytes allocated.\n", bufp->used, bufp->allocated); if (bufp->fastmap_accurate && bufp->fastmap) { printf ("fastmap: "); print_fastmap (bufp->fastmap); } # ifdef _LIBC printf ("re_nsub: %Zd\t", bufp->re_nsub); # else printf ("re_nsub: %ld\t", (long int) bufp->re_nsub); # endif printf ("regs_alloc: %d\t", bufp->regs_allocated); printf ("can_be_null: %d\t", bufp->can_be_null); printf ("newline_anchor: %d\n", bufp->newline_anchor); printf ("no_sub: %d\t", bufp->no_sub); printf ("not_bol: %d\t", bufp->not_bol); printf ("not_eol: %d\t", bufp->not_eol); printf ("syntax: %lx\n", bufp->syntax); /* Perhaps we should print the translate table? */ } void PREFIX(print_double_string) (where, string1, size1, string2, size2) const CHAR_T *where; const CHAR_T *string1; const CHAR_T *string2; int size1; int size2; { int this_char; if (where == NULL) printf ("(null)"); else { int cnt; if (FIRST_STRING_P (where)) { for (this_char = where - string1; this_char < size1; this_char++) PUT_CHAR (string1[this_char]); where = string2; } cnt = 0; for (this_char = where - string2; this_char < size2; this_char++) { PUT_CHAR (string2[this_char]); if (++cnt > 100) { fputs ("...", stdout); break; } } } } # ifndef DEFINED_ONCE void printchar (c) int c; { putc (c, stderr); } # endif # else /* not DEBUG */ # ifndef DEFINED_ONCE # undef assert # define assert(e) # define DEBUG_STATEMENT(e) # define DEBUG_PRINT1(x) # define DEBUG_PRINT2(x1, x2) # define DEBUG_PRINT3(x1, x2, x3) # define DEBUG_PRINT4(x1, x2, x3, x4) # endif /* not DEFINED_ONCE */ # define DEBUG_PRINT_COMPILED_PATTERN(p, s, e) # define DEBUG_PRINT_DOUBLE_STRING(w, s1, sz1, s2, sz2) # endif /* not DEBUG */ # ifdef WCHAR /* This convert a multibyte string to a wide character string. And write their correspondances to offset_buffer(see below) and write whether each wchar_t is binary data to is_binary. This assume invalid multibyte sequences as binary data. We assume offset_buffer and is_binary is already allocated enough space. */ static size_t convert_mbs_to_wcs (CHAR_T *dest, const unsigned char* src, size_t len, int *offset_buffer, char *is_binary); static size_t convert_mbs_to_wcs (dest, src, len, offset_buffer, is_binary) CHAR_T *dest; const unsigned char* src; size_t len; /* the length of multibyte string. */ /* It hold correspondances between src(char string) and dest(wchar_t string) for optimization. e.g. src = "xxxyzz" dest = {'X', 'Y', 'Z'} (each "xxx", "y" and "zz" represent one multibyte character corresponding to 'X', 'Y' and 'Z'.) offset_buffer = {0, 0+3("xxx"), 0+3+1("y"), 0+3+1+2("zz")} = {0, 3, 4, 6} */ int *offset_buffer; char *is_binary; { wchar_t *pdest = dest; const unsigned char *psrc = src; size_t wc_count = 0; mbstate_t mbs; int i, consumed; size_t mb_remain = len; size_t mb_count = 0; /* Initialize the conversion state. */ memset (&mbs, 0, sizeof (mbstate_t)); offset_buffer[0] = 0; for( ; mb_remain > 0 ; ++wc_count, ++pdest, mb_remain -= consumed, psrc += consumed) { #ifdef _LIBC consumed = __mbrtowc (pdest, psrc, mb_remain, &mbs); #else consumed = mbrtowc (pdest, psrc, mb_remain, &mbs); #endif if (consumed <= 0) /* failed to convert. maybe src contains binary data. So we consume 1 byte manualy. */ { *pdest = *psrc; consumed = 1; is_binary[wc_count] = TRUE; } else is_binary[wc_count] = FALSE; /* In sjis encoding, we use yen sign as escape character in place of reverse solidus. So we convert 0x5c(yen sign in sjis) to not 0xa5(yen sign in UCS2) but 0x5c(reverse solidus in UCS2). */ if (consumed == 1 && (int) *psrc == 0x5c && (int) *pdest == 0xa5) *pdest = (wchar_t) *psrc; offset_buffer[wc_count + 1] = mb_count += consumed; } /* Fill remain of the buffer with sentinel. */ for (i = wc_count + 1 ; i <= len ; i++) offset_buffer[i] = mb_count + 1; return wc_count; } # endif /* WCHAR */ #else /* not INSIDE_RECURSION */ /* Set by `re_set_syntax' to the current regexp syntax to recognize. Can also be assigned to arbitrarily: each pattern buffer stores its own syntax, so it can be changed between regex compilations. */ /* This has no initializer because initialized variables in Emacs become read-only after dumping. */ reg_syntax_t re_syntax_options; /* Specify the precise syntax of regexps for compilation. This provides for compatibility for various utilities which historically have different, incompatible syntaxes. The argument SYNTAX is a bit mask comprised of the various bits defined in regex.h. We return the old syntax. */ reg_syntax_t re_set_syntax (syntax) reg_syntax_t syntax; { reg_syntax_t ret = re_syntax_options; re_syntax_options = syntax; # ifdef DEBUG if (syntax & RE_DEBUG) debug = 1; else if (debug) /* was on but now is not */ debug = 0; # endif /* DEBUG */ return ret; } # ifdef _LIBC weak_alias (__re_set_syntax, re_set_syntax) # endif /* This table gives an error message for each of the error codes listed in regex.h. Obviously the order here has to be same as there. POSIX doesn't require that we do anything for REG_NOERROR, but why not be nice? */ static const char *re_error_msgid[] = { gettext_noop ("Success"), /* REG_NOERROR */ gettext_noop ("No match"), /* REG_NOMATCH */ gettext_noop ("Invalid regular expression"), /* REG_BADPAT */ gettext_noop ("Invalid collation character"), /* REG_ECOLLATE */ gettext_noop ("Invalid character class name"), /* REG_ECTYPE */ gettext_noop ("Trailing backslash"), /* REG_EESCAPE */ gettext_noop ("Invalid back reference"), /* REG_ESUBREG */ gettext_noop ("Unmatched [ or [^"), /* REG_EBRACK */ gettext_noop ("Unmatched ( or \\("), /* REG_EPAREN */ gettext_noop ("Unmatched \\{"), /* REG_EBRACE */ gettext_noop ("Invalid content of \\{\\}"), /* REG_BADBR */ gettext_noop ("Invalid range end"), /* REG_ERANGE */ gettext_noop ("Memory exhausted"), /* REG_ESPACE */ gettext_noop ("Invalid preceding regular expression"), /* REG_BADRPT */ gettext_noop ("Premature end of regular expression"), /* REG_EEND */ gettext_noop ("Regular expression too big"), /* REG_ESIZE */ gettext_noop ("Unmatched ) or \\)") /* REG_ERPAREN */ }; #endif /* INSIDE_RECURSION */ #ifndef DEFINED_ONCE /* Avoiding alloca during matching, to placate r_alloc. */ /* Define MATCH_MAY_ALLOCATE unless we need to make sure that the searching and matching functions should not call alloca. On some systems, alloca is implemented in terms of malloc, and if we're using the relocating allocator routines, then malloc could cause a relocation, which might (if the strings being searched are in the ralloc heap) shift the data out from underneath the regexp routines. Here's another reason to avoid allocation: Emacs processes input from X in a signal handler; processing X input may call malloc; if input arrives while a matching routine is calling malloc, then we're scrod. But Emacs can't just block input while calling matching routines; then we don't notice interrupts when they come in. So, Emacs blocks input around all regexp calls except the matching calls, which it leaves unprotected, in the faith that they will not malloc. */ /* Normally, this is fine. */ # define MATCH_MAY_ALLOCATE /* When using GNU C, we are not REALLY using the C alloca, no matter what config.h may say. So don't take precautions for it. */ # ifdef __GNUC__ # undef C_ALLOCA # endif /* The match routines may not allocate if (1) they would do it with malloc and (2) it's not safe for them to use malloc. Note that if REL_ALLOC is defined, matching would not use malloc for the failure stack, but we would still use it for the register vectors; so REL_ALLOC should not affect this. */ # if (defined C_ALLOCA || defined REGEX_MALLOC) && defined emacs # undef MATCH_MAY_ALLOCATE # endif #endif /* not DEFINED_ONCE */ #ifdef INSIDE_RECURSION /* Failure stack declarations and macros; both re_compile_fastmap and re_match_2 use a failure stack. These have to be macros because of REGEX_ALLOCATE_STACK. */ /* Number of failure points for which to initially allocate space when matching. If this number is exceeded, we allocate more space, so it is not a hard limit. */ # ifndef INIT_FAILURE_ALLOC # define INIT_FAILURE_ALLOC 5 # endif /* Roughly the maximum number of failure points on the stack. Would be exactly that if always used MAX_FAILURE_ITEMS items each time we failed. This is a variable only so users of regex can assign to it; we never change it ourselves. */ # ifdef INT_IS_16BIT # ifndef DEFINED_ONCE # if defined MATCH_MAY_ALLOCATE /* 4400 was enough to cause a crash on Alpha OSF/1, whose default stack limit is 2mb. */ long int re_max_failures = 4000; # else long int re_max_failures = 2000; # endif # endif union PREFIX(fail_stack_elt) { UCHAR_T *pointer; long int integer; }; typedef union PREFIX(fail_stack_elt) PREFIX(fail_stack_elt_t); typedef struct { PREFIX(fail_stack_elt_t) *stack; unsigned long int size; unsigned long int avail; /* Offset of next open position. */ } PREFIX(fail_stack_type); # else /* not INT_IS_16BIT */ # ifndef DEFINED_ONCE # if defined MATCH_MAY_ALLOCATE /* 4400 was enough to cause a crash on Alpha OSF/1, whose default stack limit is 2mb. */ int re_max_failures = 4000; # else int re_max_failures = 2000; # endif # endif union PREFIX(fail_stack_elt) { UCHAR_T *pointer; int integer; }; typedef union PREFIX(fail_stack_elt) PREFIX(fail_stack_elt_t); typedef struct { PREFIX(fail_stack_elt_t) *stack; unsigned size; unsigned avail; /* Offset of next open position. */ } PREFIX(fail_stack_type); # endif /* INT_IS_16BIT */ # ifndef DEFINED_ONCE # define FAIL_STACK_EMPTY() (fail_stack.avail == 0) # define FAIL_STACK_PTR_EMPTY() (fail_stack_ptr->avail == 0) # define FAIL_STACK_FULL() (fail_stack.avail == fail_stack.size) # endif /* Define macros to initialize and free the failure stack. Do `return -2' if the alloc fails. */ # ifdef MATCH_MAY_ALLOCATE # define INIT_FAIL_STACK() \ do { \ fail_stack.stack = (PREFIX(fail_stack_elt_t) *) \ REGEX_ALLOCATE_STACK (INIT_FAILURE_ALLOC * sizeof (PREFIX(fail_stack_elt_t))); \ \ if (fail_stack.stack == NULL) \ return -2; \ \ fail_stack.size = INIT_FAILURE_ALLOC; \ fail_stack.avail = 0; \ } while (0) # define RESET_FAIL_STACK() REGEX_FREE_STACK (fail_stack.stack) # else # define INIT_FAIL_STACK() \ do { \ fail_stack.avail = 0; \ } while (0) # define RESET_FAIL_STACK() # endif /* Double the size of FAIL_STACK, up to approximately `re_max_failures' items. Return 1 if succeeds, and 0 if either ran out of memory allocating space for it or it was already too large. REGEX_REALLOCATE_STACK requires `destination' be declared. */ # define DOUBLE_FAIL_STACK(fail_stack) \ ((fail_stack).size > (unsigned) (re_max_failures * MAX_FAILURE_ITEMS) \ ? 0 \ : ((fail_stack).stack = (PREFIX(fail_stack_elt_t) *) \ REGEX_REALLOCATE_STACK ((fail_stack).stack, \ (fail_stack).size * sizeof (PREFIX(fail_stack_elt_t)), \ ((fail_stack).size << 1) * sizeof (PREFIX(fail_stack_elt_t))),\ \ (fail_stack).stack == NULL \ ? 0 \ : ((fail_stack).size <<= 1, \ 1))) /* Push pointer POINTER on FAIL_STACK. Return 1 if was able to do so and 0 if ran out of memory allocating space to do so. */ # define PUSH_PATTERN_OP(POINTER, FAIL_STACK) \ ((FAIL_STACK_FULL () \ && !DOUBLE_FAIL_STACK (FAIL_STACK)) \ ? 0 \ : ((FAIL_STACK).stack[(FAIL_STACK).avail++].pointer = POINTER, \ 1)) /* Push a pointer value onto the failure stack. Assumes the variable `fail_stack'. Probably should only be called from within `PUSH_FAILURE_POINT'. */ # define PUSH_FAILURE_POINTER(item) \ fail_stack.stack[fail_stack.avail++].pointer = (UCHAR_T *) (item) /* This pushes an integer-valued item onto the failure stack. Assumes the variable `fail_stack'. Probably should only be called from within `PUSH_FAILURE_POINT'. */ # define PUSH_FAILURE_INT(item) \ fail_stack.stack[fail_stack.avail++].integer = (item) /* Push a fail_stack_elt_t value onto the failure stack. Assumes the variable `fail_stack'. Probably should only be called from within `PUSH_FAILURE_POINT'. */ # define PUSH_FAILURE_ELT(item) \ fail_stack.stack[fail_stack.avail++] = (item) /* These three POP... operations complement the three PUSH... operations. All assume that `fail_stack' is nonempty. */ # define POP_FAILURE_POINTER() fail_stack.stack[--fail_stack.avail].pointer # define POP_FAILURE_INT() fail_stack.stack[--fail_stack.avail].integer # define POP_FAILURE_ELT() fail_stack.stack[--fail_stack.avail] /* Used to omit pushing failure point id's when we're not debugging. */ # ifdef DEBUG # define DEBUG_PUSH PUSH_FAILURE_INT # define DEBUG_POP(item_addr) *(item_addr) = POP_FAILURE_INT () # else # define DEBUG_PUSH(item) # define DEBUG_POP(item_addr) # endif /* Push the information about the state we will need if we ever fail back to it. Requires variables fail_stack, regstart, regend, reg_info, and num_regs_pushed be declared. DOUBLE_FAIL_STACK requires `destination' be declared. Does `return FAILURE_CODE' if runs out of memory. */ # define PUSH_FAILURE_POINT(pattern_place, string_place, failure_code) \ do { \ char *destination; \ /* Must be int, so when we don't save any registers, the arithmetic \ of 0 + -1 isn't done as unsigned. */ \ /* Can't be int, since there is not a shred of a guarantee that int \ is wide enough to hold a value of something to which pointer can \ be assigned */ \ active_reg_t this_reg; \ \ DEBUG_STATEMENT (failure_id++); \ DEBUG_STATEMENT (nfailure_points_pushed++); \ DEBUG_PRINT2 ("\nPUSH_FAILURE_POINT #%u:\n", failure_id); \ DEBUG_PRINT2 (" Before push, next avail: %d\n", (fail_stack).avail);\ DEBUG_PRINT2 (" size: %d\n", (fail_stack).size);\ \ DEBUG_PRINT2 (" slots needed: %ld\n", NUM_FAILURE_ITEMS); \ DEBUG_PRINT2 (" available: %d\n", REMAINING_AVAIL_SLOTS); \ \ /* Ensure we have enough space allocated for what we will push. */ \ while (REMAINING_AVAIL_SLOTS < NUM_FAILURE_ITEMS) \ { \ if (!DOUBLE_FAIL_STACK (fail_stack)) \ return failure_code; \ \ DEBUG_PRINT2 ("\n Doubled stack; size now: %d\n", \ (fail_stack).size); \ DEBUG_PRINT2 (" slots available: %d\n", REMAINING_AVAIL_SLOTS);\ } \ \ /* Push the info, starting with the registers. */ \ DEBUG_PRINT1 ("\n"); \ \ if (1) \ for (this_reg = lowest_active_reg; this_reg <= highest_active_reg; \ this_reg++) \ { \ DEBUG_PRINT2 (" Pushing reg: %lu\n", this_reg); \ DEBUG_STATEMENT (num_regs_pushed++); \ \ DEBUG_PRINT2 (" start: %p\n", regstart[this_reg]); \ PUSH_FAILURE_POINTER (regstart[this_reg]); \ \ DEBUG_PRINT2 (" end: %p\n", regend[this_reg]); \ PUSH_FAILURE_POINTER (regend[this_reg]); \ \ DEBUG_PRINT2 (" info: %p\n ", \ reg_info[this_reg].word.pointer); \ DEBUG_PRINT2 (" match_null=%d", \ REG_MATCH_NULL_STRING_P (reg_info[this_reg])); \ DEBUG_PRINT2 (" active=%d", IS_ACTIVE (reg_info[this_reg])); \ DEBUG_PRINT2 (" matched_something=%d", \ MATCHED_SOMETHING (reg_info[this_reg])); \ DEBUG_PRINT2 (" ever_matched=%d", \ EVER_MATCHED_SOMETHING (reg_info[this_reg])); \ DEBUG_PRINT1 ("\n"); \ PUSH_FAILURE_ELT (reg_info[this_reg].word); \ } \ \ DEBUG_PRINT2 (" Pushing low active reg: %ld\n", lowest_active_reg);\ PUSH_FAILURE_INT (lowest_active_reg); \ \ DEBUG_PRINT2 (" Pushing high active reg: %ld\n", highest_active_reg);\ PUSH_FAILURE_INT (highest_active_reg); \ \ DEBUG_PRINT2 (" Pushing pattern %p:\n", pattern_place); \ DEBUG_PRINT_COMPILED_PATTERN (bufp, pattern_place, pend); \ PUSH_FAILURE_POINTER (pattern_place); \ \ DEBUG_PRINT2 (" Pushing string %p: `", string_place); \ DEBUG_PRINT_DOUBLE_STRING (string_place, string1, size1, string2, \ size2); \ DEBUG_PRINT1 ("'\n"); \ PUSH_FAILURE_POINTER (string_place); \ \ DEBUG_PRINT2 (" Pushing failure id: %u\n", failure_id); \ DEBUG_PUSH (failure_id); \ } while (0) # ifndef DEFINED_ONCE /* This is the number of items that are pushed and popped on the stack for each register. */ # define NUM_REG_ITEMS 3 /* Individual items aside from the registers. */ # ifdef DEBUG # define NUM_NONREG_ITEMS 5 /* Includes failure point id. */ # else # define NUM_NONREG_ITEMS 4 # endif /* We push at most this many items on the stack. */ /* We used to use (num_regs - 1), which is the number of registers this regexp will save; but that was changed to 5 to avoid stack overflow for a regexp with lots of parens. */ # define MAX_FAILURE_ITEMS (5 * NUM_REG_ITEMS + NUM_NONREG_ITEMS) /* We actually push this many items. */ # define NUM_FAILURE_ITEMS \ (((0 \ ? 0 : highest_active_reg - lowest_active_reg + 1) \ * NUM_REG_ITEMS) \ + NUM_NONREG_ITEMS) /* How many items can still be added to the stack without overflowing it. */ # define REMAINING_AVAIL_SLOTS ((fail_stack).size - (fail_stack).avail) # endif /* not DEFINED_ONCE */ /* Pops what PUSH_FAIL_STACK pushes. We restore into the parameters, all of which should be lvalues: STR -- the saved data position. PAT -- the saved pattern position. LOW_REG, HIGH_REG -- the highest and lowest active registers. REGSTART, REGEND -- arrays of string positions. REG_INFO -- array of information about each subexpression. Also assumes the variables `fail_stack' and (if debugging), `bufp', `pend', `string1', `size1', `string2', and `size2'. */ # define POP_FAILURE_POINT(str, pat, low_reg, high_reg, regstart, regend, reg_info)\ { \ DEBUG_STATEMENT (unsigned failure_id;) \ active_reg_t this_reg; \ const UCHAR_T *string_temp; \ \ assert (!FAIL_STACK_EMPTY ()); \ \ /* Remove failure points and point to how many regs pushed. */ \ DEBUG_PRINT1 ("POP_FAILURE_POINT:\n"); \ DEBUG_PRINT2 (" Before pop, next avail: %d\n", fail_stack.avail); \ DEBUG_PRINT2 (" size: %d\n", fail_stack.size); \ \ assert (fail_stack.avail >= NUM_NONREG_ITEMS); \ \ DEBUG_POP (&failure_id); \ DEBUG_PRINT2 (" Popping failure id: %u\n", failure_id); \ \ /* If the saved string location is NULL, it came from an \ on_failure_keep_string_jump opcode, and we want to throw away the \ saved NULL, thus retaining our current position in the string. */ \ string_temp = POP_FAILURE_POINTER (); \ if (string_temp != NULL) \ str = (const CHAR_T *) string_temp; \ \ DEBUG_PRINT2 (" Popping string %p: `", str); \ DEBUG_PRINT_DOUBLE_STRING (str, string1, size1, string2, size2); \ DEBUG_PRINT1 ("'\n"); \ \ pat = (UCHAR_T *) POP_FAILURE_POINTER (); \ DEBUG_PRINT2 (" Popping pattern %p:\n", pat); \ DEBUG_PRINT_COMPILED_PATTERN (bufp, pat, pend); \ \ /* Restore register info. */ \ high_reg = (active_reg_t) POP_FAILURE_INT (); \ DEBUG_PRINT2 (" Popping high active reg: %ld\n", high_reg); \ \ low_reg = (active_reg_t) POP_FAILURE_INT (); \ DEBUG_PRINT2 (" Popping low active reg: %ld\n", low_reg); \ \ if (1) \ for (this_reg = high_reg; this_reg >= low_reg; this_reg--) \ { \ DEBUG_PRINT2 (" Popping reg: %ld\n", this_reg); \ \ reg_info[this_reg].word = POP_FAILURE_ELT (); \ DEBUG_PRINT2 (" info: %p\n", \ reg_info[this_reg].word.pointer); \ \ regend[this_reg] = (const CHAR_T *) POP_FAILURE_POINTER (); \ DEBUG_PRINT2 (" end: %p\n", regend[this_reg]); \ \ regstart[this_reg] = (const CHAR_T *) POP_FAILURE_POINTER (); \ DEBUG_PRINT2 (" start: %p\n", regstart[this_reg]); \ } \ else \ { \ for (this_reg = highest_active_reg; this_reg > high_reg; this_reg--) \ { \ reg_info[this_reg].word.integer = 0; \ regend[this_reg] = 0; \ regstart[this_reg] = 0; \ } \ highest_active_reg = high_reg; \ } \ \ set_regs_matched_done = 0; \ DEBUG_STATEMENT (nfailure_points_popped++); \ } /* POP_FAILURE_POINT */ /* Structure for per-register (a.k.a. per-group) information. Other register information, such as the starting and ending positions (which are addresses), and the list of inner groups (which is a bits list) are maintained in separate variables. We are making a (strictly speaking) nonportable assumption here: that the compiler will pack our bit fields into something that fits into the type of `word', i.e., is something that fits into one item on the failure stack. */ /* Declarations and macros for re_match_2. */ typedef union { PREFIX(fail_stack_elt_t) word; struct { /* This field is one if this group can match the empty string, zero if not. If not yet determined, `MATCH_NULL_UNSET_VALUE'. */ # define MATCH_NULL_UNSET_VALUE 3 unsigned match_null_string_p : 2; unsigned is_active : 1; unsigned matched_something : 1; unsigned ever_matched_something : 1; } bits; } PREFIX(register_info_type); # ifndef DEFINED_ONCE # define REG_MATCH_NULL_STRING_P(R) ((R).bits.match_null_string_p) # define IS_ACTIVE(R) ((R).bits.is_active) # define MATCHED_SOMETHING(R) ((R).bits.matched_something) # define EVER_MATCHED_SOMETHING(R) ((R).bits.ever_matched_something) /* Call this when have matched a real character; it sets `matched' flags for the subexpressions which we are currently inside. Also records that those subexprs have matched. */ # define SET_REGS_MATCHED() \ do \ { \ if (!set_regs_matched_done) \ { \ active_reg_t r; \ set_regs_matched_done = 1; \ for (r = lowest_active_reg; r <= highest_active_reg; r++) \ { \ MATCHED_SOMETHING (reg_info[r]) \ = EVER_MATCHED_SOMETHING (reg_info[r]) \ = 1; \ } \ } \ } \ while (0) # endif /* not DEFINED_ONCE */ /* Registers are set to a sentinel when they haven't yet matched. */ static CHAR_T PREFIX(reg_unset_dummy); # define REG_UNSET_VALUE (&PREFIX(reg_unset_dummy)) # define REG_UNSET(e) ((e) == REG_UNSET_VALUE) /* Subroutine declarations and macros for regex_compile. */ static void PREFIX(store_op1) _RE_ARGS ((re_opcode_t op, UCHAR_T *loc, int arg)); static void PREFIX(store_op2) _RE_ARGS ((re_opcode_t op, UCHAR_T *loc, int arg1, int arg2)); static void PREFIX(insert_op1) _RE_ARGS ((re_opcode_t op, UCHAR_T *loc, int arg, UCHAR_T *end)); static void PREFIX(insert_op2) _RE_ARGS ((re_opcode_t op, UCHAR_T *loc, int arg1, int arg2, UCHAR_T *end)); static boolean PREFIX(at_begline_loc_p) _RE_ARGS ((const CHAR_T *pattern, const CHAR_T *p, reg_syntax_t syntax)); static boolean PREFIX(at_endline_loc_p) _RE_ARGS ((const CHAR_T *p, const CHAR_T *pend, reg_syntax_t syntax)); # ifdef WCHAR static reg_errcode_t wcs_compile_range _RE_ARGS ((CHAR_T range_start, const CHAR_T **p_ptr, const CHAR_T *pend, char *translate, reg_syntax_t syntax, UCHAR_T *b, CHAR_T *char_set)); static void insert_space _RE_ARGS ((int num, CHAR_T *loc, CHAR_T *end)); # else /* BYTE */ static reg_errcode_t byte_compile_range _RE_ARGS ((unsigned int range_start, const char **p_ptr, const char *pend, char *translate, reg_syntax_t syntax, unsigned char *b)); # endif /* WCHAR */ /* Fetch the next character in the uncompiled pattern---translating it if necessary. Also cast from a signed character in the constant string passed to us by the user to an unsigned char that we can use as an array index (in, e.g., `translate'). */ /* ifdef MBS_SUPPORT, we translate only if character <= 0xff, because it is impossible to allocate 4GB array for some encodings which have 4 byte character_set like UCS4. */ # ifndef PATFETCH # ifdef WCHAR # define PATFETCH(c) \ do {if (p == pend) return REG_EEND; \ c = (UCHAR_T) *p++; \ if (translate && (c <= 0xff)) c = (UCHAR_T) translate[c]; \ } while (0) # else /* BYTE */ # define PATFETCH(c) \ do {if (p == pend) return REG_EEND; \ c = (unsigned char) *p++; \ if (translate) c = (unsigned char) translate[c]; \ } while (0) # endif /* WCHAR */ # endif /* Fetch the next character in the uncompiled pattern, with no translation. */ # define PATFETCH_RAW(c) \ do {if (p == pend) return REG_EEND; \ c = (UCHAR_T) *p++; \ } while (0) /* Go backwards one character in the pattern. */ # define PATUNFETCH p-- /* If `translate' is non-null, return translate[D], else just D. We cast the subscript to translate because some data is declared as `char *', to avoid warnings when a string constant is passed. But when we use a character as a subscript we must make it unsigned. */ /* ifdef MBS_SUPPORT, we translate only if character <= 0xff, because it is impossible to allocate 4GB array for some encodings which have 4 byte character_set like UCS4. */ # ifndef TRANSLATE # ifdef WCHAR # define TRANSLATE(d) \ ((translate && ((UCHAR_T) (d)) <= 0xff) \ ? (char) translate[(unsigned char) (d)] : (d)) # else /* BYTE */ # define TRANSLATE(d) \ (translate ? (char) translate[(unsigned char) (d)] : (d)) # endif /* WCHAR */ # endif /* Macros for outputting the compiled pattern into `buffer'. */ /* If the buffer isn't allocated when it comes in, use this. */ # define INIT_BUF_SIZE (32 * sizeof(UCHAR_T)) /* Make sure we have at least N more bytes of space in buffer. */ # ifdef WCHAR # define GET_BUFFER_SPACE(n) \ while (((unsigned long)b - (unsigned long)COMPILED_BUFFER_VAR \ + (n)*sizeof(CHAR_T)) > bufp->allocated) \ EXTEND_BUFFER () # else /* BYTE */ # define GET_BUFFER_SPACE(n) \ while ((unsigned long) (b - bufp->buffer + (n)) > bufp->allocated) \ EXTEND_BUFFER () # endif /* WCHAR */ /* Make sure we have one more byte of buffer space and then add C to it. */ # define BUF_PUSH(c) \ do { \ GET_BUFFER_SPACE (1); \ *b++ = (UCHAR_T) (c); \ } while (0) /* Ensure we have two more bytes of buffer space and then append C1 and C2. */ # define BUF_PUSH_2(c1, c2) \ do { \ GET_BUFFER_SPACE (2); \ *b++ = (UCHAR_T) (c1); \ *b++ = (UCHAR_T) (c2); \ } while (0) /* As with BUF_PUSH_2, except for three bytes. */ # define BUF_PUSH_3(c1, c2, c3) \ do { \ GET_BUFFER_SPACE (3); \ *b++ = (UCHAR_T) (c1); \ *b++ = (UCHAR_T) (c2); \ *b++ = (UCHAR_T) (c3); \ } while (0) /* Store a jump with opcode OP at LOC to location TO. We store a relative address offset by the three bytes the jump itself occupies. */ # define STORE_JUMP(op, loc, to) \ PREFIX(store_op1) (op, loc, (int) ((to) - (loc) - (1 + OFFSET_ADDRESS_SIZE))) /* Likewise, for a two-argument jump. */ # define STORE_JUMP2(op, loc, to, arg) \ PREFIX(store_op2) (op, loc, (int) ((to) - (loc) - (1 + OFFSET_ADDRESS_SIZE)), arg) /* Like `STORE_JUMP', but for inserting. Assume `b' is the buffer end. */ # define INSERT_JUMP(op, loc, to) \ PREFIX(insert_op1) (op, loc, (int) ((to) - (loc) - (1 + OFFSET_ADDRESS_SIZE)), b) /* Like `STORE_JUMP2', but for inserting. Assume `b' is the buffer end. */ # define INSERT_JUMP2(op, loc, to, arg) \ PREFIX(insert_op2) (op, loc, (int) ((to) - (loc) - (1 + OFFSET_ADDRESS_SIZE)),\ arg, b) /* This is not an arbitrary limit: the arguments which represent offsets into the pattern are two bytes long. So if 2^16 bytes turns out to be too small, many things would have to change. */ /* Any other compiler which, like MSC, has allocation limit below 2^16 bytes will have to use approach similar to what was done below for MSC and drop MAX_BUF_SIZE a bit. Otherwise you may end up reallocating to 0 bytes. Such thing is not going to work too well. You have been warned!! */ # ifndef DEFINED_ONCE # if defined _MSC_VER && !defined WIN32 /* Microsoft C 16-bit versions limit malloc to approx 65512 bytes. The REALLOC define eliminates a flurry of conversion warnings, but is not required. */ # define MAX_BUF_SIZE 65500L # define REALLOC(p,s) realloc ((p), (size_t) (s)) # else # define MAX_BUF_SIZE (1L << 16) # define REALLOC(p,s) realloc ((p), (s)) # endif /* Extend the buffer by twice its current size via realloc and reset the pointers that pointed into the old block to point to the correct places in the new one. If extending the buffer results in it being larger than MAX_BUF_SIZE, then flag memory exhausted. */ # if __BOUNDED_POINTERS__ # define SET_HIGH_BOUND(P) (__ptrhigh (P) = __ptrlow (P) + bufp->allocated) # define MOVE_BUFFER_POINTER(P) \ (__ptrlow (P) += incr, SET_HIGH_BOUND (P), __ptrvalue (P) += incr) # define ELSE_EXTEND_BUFFER_HIGH_BOUND \ else \ { \ SET_HIGH_BOUND (b); \ SET_HIGH_BOUND (begalt); \ if (fixup_alt_jump) \ SET_HIGH_BOUND (fixup_alt_jump); \ if (laststart) \ SET_HIGH_BOUND (laststart); \ if (pending_exact) \ SET_HIGH_BOUND (pending_exact); \ } # else # define MOVE_BUFFER_POINTER(P) (P) += incr # define ELSE_EXTEND_BUFFER_HIGH_BOUND # endif # endif /* not DEFINED_ONCE */ # ifdef WCHAR # define EXTEND_BUFFER() \ do { \ UCHAR_T *old_buffer = COMPILED_BUFFER_VAR; \ int wchar_count; \ if (bufp->allocated + sizeof(UCHAR_T) > MAX_BUF_SIZE) \ return REG_ESIZE; \ bufp->allocated <<= 1; \ if (bufp->allocated > MAX_BUF_SIZE) \ bufp->allocated = MAX_BUF_SIZE; \ /* How many characters the new buffer can have? */ \ wchar_count = bufp->allocated / sizeof(UCHAR_T); \ if (wchar_count == 0) wchar_count = 1; \ /* Truncate the buffer to CHAR_T align. */ \ bufp->allocated = wchar_count * sizeof(UCHAR_T); \ RETALLOC (COMPILED_BUFFER_VAR, wchar_count, UCHAR_T); \ bufp->buffer = (char*)COMPILED_BUFFER_VAR; \ if (COMPILED_BUFFER_VAR == NULL) \ return REG_ESPACE; \ /* If the buffer moved, move all the pointers into it. */ \ if (old_buffer != COMPILED_BUFFER_VAR) \ { \ int incr = COMPILED_BUFFER_VAR - old_buffer; \ MOVE_BUFFER_POINTER (b); \ MOVE_BUFFER_POINTER (begalt); \ if (fixup_alt_jump) \ MOVE_BUFFER_POINTER (fixup_alt_jump); \ if (laststart) \ MOVE_BUFFER_POINTER (laststart); \ if (pending_exact) \ MOVE_BUFFER_POINTER (pending_exact); \ } \ ELSE_EXTEND_BUFFER_HIGH_BOUND \ } while (0) # else /* BYTE */ # define EXTEND_BUFFER() \ do { \ UCHAR_T *old_buffer = COMPILED_BUFFER_VAR; \ if (bufp->allocated == MAX_BUF_SIZE) \ return REG_ESIZE; \ bufp->allocated <<= 1; \ if (bufp->allocated > MAX_BUF_SIZE) \ bufp->allocated = MAX_BUF_SIZE; \ bufp->buffer = (UCHAR_T *) REALLOC (COMPILED_BUFFER_VAR, \ bufp->allocated); \ if (COMPILED_BUFFER_VAR == NULL) \ return REG_ESPACE; \ /* If the buffer moved, move all the pointers into it. */ \ if (old_buffer != COMPILED_BUFFER_VAR) \ { \ int incr = COMPILED_BUFFER_VAR - old_buffer; \ MOVE_BUFFER_POINTER (b); \ MOVE_BUFFER_POINTER (begalt); \ if (fixup_alt_jump) \ MOVE_BUFFER_POINTER (fixup_alt_jump); \ if (laststart) \ MOVE_BUFFER_POINTER (laststart); \ if (pending_exact) \ MOVE_BUFFER_POINTER (pending_exact); \ } \ ELSE_EXTEND_BUFFER_HIGH_BOUND \ } while (0) # endif /* WCHAR */ # ifndef DEFINED_ONCE /* Since we have one byte reserved for the register number argument to {start,stop}_memory, the maximum number of groups we can report things about is what fits in that byte. */ # define MAX_REGNUM 255 /* But patterns can have more than `MAX_REGNUM' registers. We just ignore the excess. */ typedef unsigned regnum_t; /* Macros for the compile stack. */ /* Since offsets can go either forwards or backwards, this type needs to be able to hold values from -(MAX_BUF_SIZE - 1) to MAX_BUF_SIZE - 1. */ /* int may be not enough when sizeof(int) == 2. */ typedef long pattern_offset_t; typedef struct { pattern_offset_t begalt_offset; pattern_offset_t fixup_alt_jump; pattern_offset_t inner_group_offset; pattern_offset_t laststart_offset; regnum_t regnum; } compile_stack_elt_t; typedef struct { compile_stack_elt_t *stack; unsigned size; unsigned avail; /* Offset of next open position. */ } compile_stack_type; # define INIT_COMPILE_STACK_SIZE 32 # define COMPILE_STACK_EMPTY (compile_stack.avail == 0) # define COMPILE_STACK_FULL (compile_stack.avail == compile_stack.size) /* The next available element. */ # define COMPILE_STACK_TOP (compile_stack.stack[compile_stack.avail]) # endif /* not DEFINED_ONCE */ /* Set the bit for character C in a list. */ # ifndef DEFINED_ONCE # define SET_LIST_BIT(c) \ (b[((unsigned char) (c)) / BYTEWIDTH] \ |= 1 << (((unsigned char) c) % BYTEWIDTH)) # endif /* DEFINED_ONCE */ /* Get the next unsigned number in the uncompiled pattern. */ # define GET_UNSIGNED_NUMBER(num) \ { \ while (p != pend) \ { \ PATFETCH (c); \ if (c < '0' || c > '9') \ break; \ if (num <= RE_DUP_MAX) \ { \ if (num < 0) \ num = 0; \ num = num * 10 + c - '0'; \ } \ } \ } # ifndef DEFINED_ONCE # if defined _LIBC || WIDE_CHAR_SUPPORT /* The GNU C library provides support for user-defined character classes and the functions from ISO C amendement 1. */ # ifdef CHARCLASS_NAME_MAX # define CHAR_CLASS_MAX_LENGTH CHARCLASS_NAME_MAX # else /* This shouldn't happen but some implementation might still have this problem. Use a reasonable default value. */ # define CHAR_CLASS_MAX_LENGTH 256 # endif # ifdef _LIBC # define IS_CHAR_CLASS(string) __wctype (string) # else # define IS_CHAR_CLASS(string) wctype (string) # endif # else # define CHAR_CLASS_MAX_LENGTH 6 /* Namely, `xdigit'. */ # define IS_CHAR_CLASS(string) \ (STREQ (string, "alpha") || STREQ (string, "upper") \ || STREQ (string, "lower") || STREQ (string, "digit") \ || STREQ (string, "alnum") || STREQ (string, "xdigit") \ || STREQ (string, "space") || STREQ (string, "print") \ || STREQ (string, "punct") || STREQ (string, "graph") \ || STREQ (string, "cntrl") || STREQ (string, "blank")) # endif # endif /* DEFINED_ONCE */ # ifndef MATCH_MAY_ALLOCATE /* If we cannot allocate large objects within re_match_2_internal, we make the fail stack and register vectors global. The fail stack, we grow to the maximum size when a regexp is compiled. The register vectors, we adjust in size each time we compile a regexp, according to the number of registers it needs. */ static PREFIX(fail_stack_type) fail_stack; /* Size with which the following vectors are currently allocated. That is so we can make them bigger as needed, but never make them smaller. */ # ifdef DEFINED_ONCE static int regs_allocated_size; static const char ** regstart, ** regend; static const char ** old_regstart, ** old_regend; static const char **best_regstart, **best_regend; static const char **reg_dummy; # endif /* DEFINED_ONCE */ static PREFIX(register_info_type) *PREFIX(reg_info); static PREFIX(register_info_type) *PREFIX(reg_info_dummy); /* Make the register vectors big enough for NUM_REGS registers, but don't make them smaller. */ static void PREFIX(regex_grow_registers) (num_regs) int num_regs; { if (num_regs > regs_allocated_size) { RETALLOC_IF (regstart, num_regs, const char *); RETALLOC_IF (regend, num_regs, const char *); RETALLOC_IF (old_regstart, num_regs, const char *); RETALLOC_IF (old_regend, num_regs, const char *); RETALLOC_IF (best_regstart, num_regs, const char *); RETALLOC_IF (best_regend, num_regs, const char *); RETALLOC_IF (PREFIX(reg_info), num_regs, PREFIX(register_info_type)); RETALLOC_IF (reg_dummy, num_regs, const char *); RETALLOC_IF (PREFIX(reg_info_dummy), num_regs, PREFIX(register_info_type)); regs_allocated_size = num_regs; } } # endif /* not MATCH_MAY_ALLOCATE */ # ifndef DEFINED_ONCE static boolean group_in_compile_stack _RE_ARGS ((compile_stack_type compile_stack, regnum_t regnum)); # endif /* not DEFINED_ONCE */ /* `regex_compile' compiles PATTERN (of length SIZE) according to SYNTAX. Returns one of error codes defined in `regex.h', or zero for success. Assumes the `allocated' (and perhaps `buffer') and `translate' fields are set in BUFP on entry. If it succeeds, results are put in BUFP (if it returns an error, the contents of BUFP are undefined): `buffer' is the compiled pattern; `syntax' is set to SYNTAX; `used' is set to the length of the compiled pattern; `fastmap_accurate' is zero; `re_nsub' is the number of subexpressions in PATTERN; `not_bol' and `not_eol' are zero; The `fastmap' and `newline_anchor' fields are neither examined nor set. */ /* Return, freeing storage we allocated. */ # ifdef WCHAR # define FREE_STACK_RETURN(value) \ return (free(pattern), free(mbs_offset), free(is_binary), free (compile_stack.stack), value) # else # define FREE_STACK_RETURN(value) \ return (free (compile_stack.stack), value) # endif /* WCHAR */ static reg_errcode_t PREFIX(regex_compile) (ARG_PREFIX(pattern), ARG_PREFIX(size), syntax, bufp) const char *ARG_PREFIX(pattern); size_t ARG_PREFIX(size); reg_syntax_t syntax; struct re_pattern_buffer *bufp; { /* We fetch characters from PATTERN here. Even though PATTERN is `char *' (i.e., signed), we declare these variables as unsigned, so they can be reliably used as array indices. */ register UCHAR_T c, c1; #ifdef WCHAR /* A temporary space to keep wchar_t pattern and compiled pattern. */ CHAR_T *pattern, *COMPILED_BUFFER_VAR; size_t size; /* offset buffer for optimization. See convert_mbs_to_wc. */ int *mbs_offset = NULL; /* It hold whether each wchar_t is binary data or not. */ char *is_binary = NULL; /* A flag whether exactn is handling binary data or not. */ char is_exactn_bin = FALSE; #endif /* WCHAR */ /* A random temporary spot in PATTERN. */ const CHAR_T *p1; /* Points to the end of the buffer, where we should append. */ register UCHAR_T *b; /* Keeps track of unclosed groups. */ compile_stack_type compile_stack; /* Points to the current (ending) position in the pattern. */ #ifdef WCHAR const CHAR_T *p; const CHAR_T *pend; #else /* BYTE */ const CHAR_T *p = pattern; const CHAR_T *pend = pattern + size; #endif /* WCHAR */ /* How to translate the characters in the pattern. */ RE_TRANSLATE_TYPE translate = bufp->translate; /* Address of the count-byte of the most recently inserted `exactn' command. This makes it possible to tell if a new exact-match character can be added to that command or if the character requires a new `exactn' command. */ UCHAR_T *pending_exact = 0; /* Address of start of the most recently finished expression. This tells, e.g., postfix * where to find the start of its operand. Reset at the beginning of groups and alternatives. */ UCHAR_T *laststart = 0; /* Address of beginning of regexp, or inside of last group. */ UCHAR_T *begalt; /* Address of the place where a forward jump should go to the end of the containing expression. Each alternative of an `or' -- except the last -- ends with a forward jump of this sort. */ UCHAR_T *fixup_alt_jump = 0; /* Counts open-groups as they are encountered. Remembered for the matching close-group on the compile stack, so the same register number is put in the stop_memory as the start_memory. */ regnum_t regnum = 0; #ifdef WCHAR /* Initialize the wchar_t PATTERN and offset_buffer. */ p = pend = pattern = TALLOC(csize + 1, CHAR_T); mbs_offset = TALLOC(csize + 1, int); is_binary = TALLOC(csize + 1, char); if (pattern == NULL || mbs_offset == NULL || is_binary == NULL) { free(pattern); free(mbs_offset); free(is_binary); return REG_ESPACE; } pattern[csize] = L'\0'; /* sentinel */ size = convert_mbs_to_wcs(pattern, cpattern, csize, mbs_offset, is_binary); pend = p + size; if (size < 0) { free(pattern); free(mbs_offset); free(is_binary); return REG_BADPAT; } #endif #ifdef DEBUG DEBUG_PRINT1 ("\nCompiling pattern: "); if (debug) { unsigned debug_count; for (debug_count = 0; debug_count < size; debug_count++) PUT_CHAR (pattern[debug_count]); putchar ('\n'); } #endif /* DEBUG */ /* Initialize the compile stack. */ compile_stack.stack = TALLOC (INIT_COMPILE_STACK_SIZE, compile_stack_elt_t); if (compile_stack.stack == NULL) { #ifdef WCHAR free(pattern); free(mbs_offset); free(is_binary); #endif return REG_ESPACE; } compile_stack.size = INIT_COMPILE_STACK_SIZE; compile_stack.avail = 0; /* Initialize the pattern buffer. */ bufp->syntax = syntax; bufp->fastmap_accurate = 0; bufp->not_bol = bufp->not_eol = 0; /* Set `used' to zero, so that if we return an error, the pattern printer (for debugging) will think there's no pattern. We reset it at the end. */ bufp->used = 0; /* Always count groups, whether or not bufp->no_sub is set. */ bufp->re_nsub = 0; #if !defined emacs && !defined SYNTAX_TABLE /* Initialize the syntax table. */ init_syntax_once (); #endif if (bufp->allocated == 0) { if (bufp->buffer) { /* If zero allocated, but buffer is non-null, try to realloc enough space. This loses if buffer's address is bogus, but that is the user's responsibility. */ #ifdef WCHAR /* Free bufp->buffer and allocate an array for wchar_t pattern buffer. */ free(bufp->buffer); COMPILED_BUFFER_VAR = TALLOC (INIT_BUF_SIZE/sizeof(UCHAR_T), UCHAR_T); #else RETALLOC (COMPILED_BUFFER_VAR, INIT_BUF_SIZE, UCHAR_T); #endif /* WCHAR */ } else { /* Caller did not allocate a buffer. Do it for them. */ COMPILED_BUFFER_VAR = TALLOC (INIT_BUF_SIZE / sizeof(UCHAR_T), UCHAR_T); } if (!COMPILED_BUFFER_VAR) FREE_STACK_RETURN (REG_ESPACE); #ifdef WCHAR bufp->buffer = (char*)COMPILED_BUFFER_VAR; #endif /* WCHAR */ bufp->allocated = INIT_BUF_SIZE; } #ifdef WCHAR else COMPILED_BUFFER_VAR = (UCHAR_T*) bufp->buffer; #endif begalt = b = COMPILED_BUFFER_VAR; /* Loop through the uncompiled pattern until we're at the end. */ while (p != pend) { PATFETCH (c); switch (c) { case '^': { if ( /* If at start of pattern, it's an operator. */ p == pattern + 1 /* If context independent, it's an operator. */ || syntax & RE_CONTEXT_INDEP_ANCHORS /* Otherwise, depends on what's come before. */ || PREFIX(at_begline_loc_p) (pattern, p, syntax)) BUF_PUSH (begline); else goto normal_char; } break; case '$': { if ( /* If at end of pattern, it's an operator. */ p == pend /* If context independent, it's an operator. */ || syntax & RE_CONTEXT_INDEP_ANCHORS /* Otherwise, depends on what's next. */ || PREFIX(at_endline_loc_p) (p, pend, syntax)) BUF_PUSH (endline); else goto normal_char; } break; case '+': case '?': if ((syntax & RE_BK_PLUS_QM) || (syntax & RE_LIMITED_OPS)) goto normal_char; handle_plus: case '*': /* If there is no previous pattern... */ if (!laststart) { if (syntax & RE_CONTEXT_INVALID_OPS) FREE_STACK_RETURN (REG_BADRPT); else if (!(syntax & RE_CONTEXT_INDEP_OPS)) goto normal_char; } { /* Are we optimizing this jump? */ boolean keep_string_p = false; /* 1 means zero (many) matches is allowed. */ char zero_times_ok = 0, many_times_ok = 0; /* If there is a sequence of repetition chars, collapse it down to just one (the right one). We can't combine interval operators with these because of, e.g., `a{2}*', which should only match an even number of `a's. */ for (;;) { zero_times_ok |= c != '+'; many_times_ok |= c != '?'; if (p == pend) break; PATFETCH (c); if (c == '*' || (!(syntax & RE_BK_PLUS_QM) && (c == '+' || c == '?'))) ; else if (syntax & RE_BK_PLUS_QM && c == '\\') { if (p == pend) FREE_STACK_RETURN (REG_EESCAPE); PATFETCH (c1); if (!(c1 == '+' || c1 == '?')) { PATUNFETCH; PATUNFETCH; break; } c = c1; } else { PATUNFETCH; break; } /* If we get here, we found another repeat character. */ } /* Star, etc. applied to an empty pattern is equivalent to an empty pattern. */ if (!laststart) break; /* Now we know whether or not zero matches is allowed and also whether or not two or more matches is allowed. */ if (many_times_ok) { /* More than one repetition is allowed, so put in at the end a backward relative jump from `b' to before the next jump we're going to put in below (which jumps from laststart to after this jump). But if we are at the `*' in the exact sequence `.*\n', insert an unconditional jump backwards to the ., instead of the beginning of the loop. This way we only push a failure point once, instead of every time through the loop. */ assert (p - 1 > pattern); /* Allocate the space for the jump. */ GET_BUFFER_SPACE (1 + OFFSET_ADDRESS_SIZE); /* We know we are not at the first character of the pattern, because laststart was nonzero. And we've already incremented `p', by the way, to be the character after the `*'. Do we have to do something analogous here for null bytes, because of RE_DOT_NOT_NULL? */ if (TRANSLATE (*(p - 2)) == TRANSLATE ('.') && zero_times_ok && p < pend && TRANSLATE (*p) == TRANSLATE ('\n') && !(syntax & RE_DOT_NEWLINE)) { /* We have .*\n. */ STORE_JUMP (jump, b, laststart); keep_string_p = true; } else /* Anything else. */ STORE_JUMP (maybe_pop_jump, b, laststart - (1 + OFFSET_ADDRESS_SIZE)); /* We've added more stuff to the buffer. */ b += 1 + OFFSET_ADDRESS_SIZE; } /* On failure, jump from laststart to b + 3, which will be the end of the buffer after this jump is inserted. */ /* ifdef WCHAR, 'b + 1 + OFFSET_ADDRESS_SIZE' instead of 'b + 3'. */ GET_BUFFER_SPACE (1 + OFFSET_ADDRESS_SIZE); INSERT_JUMP (keep_string_p ? on_failure_keep_string_jump : on_failure_jump, laststart, b + 1 + OFFSET_ADDRESS_SIZE); pending_exact = 0; b += 1 + OFFSET_ADDRESS_SIZE; if (!zero_times_ok) { /* At least one repetition is required, so insert a `dummy_failure_jump' before the initial `on_failure_jump' instruction of the loop. This effects a skip over that instruction the first time we hit that loop. */ GET_BUFFER_SPACE (1 + OFFSET_ADDRESS_SIZE); INSERT_JUMP (dummy_failure_jump, laststart, laststart + 2 + 2 * OFFSET_ADDRESS_SIZE); b += 1 + OFFSET_ADDRESS_SIZE; } } break; case '.': laststart = b; BUF_PUSH (anychar); break; case '[': { boolean had_char_class = false; #ifdef WCHAR CHAR_T range_start = 0xffffffff; #else unsigned int range_start = 0xffffffff; #endif if (p == pend) FREE_STACK_RETURN (REG_EBRACK); #ifdef WCHAR /* We assume a charset(_not) structure as a wchar_t array. charset[0] = (re_opcode_t) charset(_not) charset[1] = l (= length of char_classes) charset[2] = m (= length of collating_symbols) charset[3] = n (= length of equivalence_classes) charset[4] = o (= length of char_ranges) charset[5] = p (= length of chars) charset[6] = char_class (wctype_t) charset[6+CHAR_CLASS_SIZE] = char_class (wctype_t) ... charset[l+5] = char_class (wctype_t) charset[l+6] = collating_symbol (wchar_t) ... charset[l+m+5] = collating_symbol (wchar_t) ifdef _LIBC we use the index if _NL_COLLATE_SYMB_EXTRAMB instead of wchar_t string. charset[l+m+6] = equivalence_classes (wchar_t) ... charset[l+m+n+5] = equivalence_classes (wchar_t) ifdef _LIBC we use the index in _NL_COLLATE_WEIGHT instead of wchar_t string. charset[l+m+n+6] = range_start charset[l+m+n+7] = range_end ... charset[l+m+n+2o+4] = range_start charset[l+m+n+2o+5] = range_end ifdef _LIBC we use the value looked up in _NL_COLLATE_COLLSEQ instead of wchar_t character. charset[l+m+n+2o+6] = char ... charset[l+m+n+2o+p+5] = char */ /* We need at least 6 spaces: the opcode, the length of char_classes, the length of collating_symbols, the length of equivalence_classes, the length of char_ranges, the length of chars. */ GET_BUFFER_SPACE (6); /* Save b as laststart. And We use laststart as the pointer to the first element of the charset here. In other words, laststart[i] indicates charset[i]. */ laststart = b; /* We test `*p == '^' twice, instead of using an if statement, so we only need one BUF_PUSH. */ BUF_PUSH (*p == '^' ? charset_not : charset); if (*p == '^') p++; /* Push the length of char_classes, the length of collating_symbols, the length of equivalence_classes, the length of char_ranges and the length of chars. */ BUF_PUSH_3 (0, 0, 0); BUF_PUSH_2 (0, 0); /* Remember the first position in the bracket expression. */ p1 = p; /* charset_not matches newline according to a syntax bit. */ if ((re_opcode_t) b[-6] == charset_not && (syntax & RE_HAT_LISTS_NOT_NEWLINE)) { BUF_PUSH('\n'); laststart[5]++; /* Update the length of characters */ } /* Read in characters and ranges, setting map bits. */ for (;;) { if (p == pend) FREE_STACK_RETURN (REG_EBRACK); PATFETCH (c); /* \ might escape characters inside [...] and [^...]. */ if ((syntax & RE_BACKSLASH_ESCAPE_IN_LISTS) && c == '\\') { if (p == pend) FREE_STACK_RETURN (REG_EESCAPE); PATFETCH (c1); BUF_PUSH(c1); laststart[5]++; /* Update the length of chars */ range_start = c1; continue; } /* Could be the end of the bracket expression. If it's not (i.e., when the bracket expression is `[]' so far), the ']' character bit gets set way below. */ if (c == ']' && p != p1 + 1) break; /* Look ahead to see if it's a range when the last thing was a character class. */ if (had_char_class && c == '-' && *p != ']') FREE_STACK_RETURN (REG_ERANGE); /* Look ahead to see if it's a range when the last thing was a character: if this is a hyphen not at the beginning or the end of a list, then it's the range operator. */ if (c == '-' && !(p - 2 >= pattern && p[-2] == '[') && !(p - 3 >= pattern && p[-3] == '[' && p[-2] == '^') && *p != ']') { reg_errcode_t ret; /* Allocate the space for range_start and range_end. */ GET_BUFFER_SPACE (2); /* Update the pointer to indicate end of buffer. */ b += 2; ret = wcs_compile_range (range_start, &p, pend, translate, syntax, b, laststart); if (ret != REG_NOERROR) FREE_STACK_RETURN (ret); range_start = 0xffffffff; } else if (p[0] == '-' && p[1] != ']') { /* This handles ranges made up of characters only. */ reg_errcode_t ret; /* Move past the `-'. */ PATFETCH (c1); /* Allocate the space for range_start and range_end. */ GET_BUFFER_SPACE (2); /* Update the pointer to indicate end of buffer. */ b += 2; ret = wcs_compile_range (c, &p, pend, translate, syntax, b, laststart); if (ret != REG_NOERROR) FREE_STACK_RETURN (ret); range_start = 0xffffffff; } /* See if we're at the beginning of a possible character class. */ else if (syntax & RE_CHAR_CLASSES && c == '[' && *p == ':') { /* Leave room for the null. */ char str[CHAR_CLASS_MAX_LENGTH + 1]; PATFETCH (c); c1 = 0; /* If pattern is `[[:'. */ if (p == pend) FREE_STACK_RETURN (REG_EBRACK); for (;;) { PATFETCH (c); if ((c == ':' && *p == ']') || p == pend) break; if (c1 < CHAR_CLASS_MAX_LENGTH) str[c1++] = c; else /* This is in any case an invalid class name. */ str[0] = '\0'; } str[c1] = '\0'; /* If isn't a word bracketed by `[:' and `:]': undo the ending character, the letters, and leave the leading `:' and `[' (but store them as character). */ if (c == ':' && *p == ']') { wctype_t wt; uintptr_t alignedp; /* Query the character class as wctype_t. */ wt = IS_CHAR_CLASS (str); if (wt == 0) FREE_STACK_RETURN (REG_ECTYPE); /* Throw away the ] at the end of the character class. */ PATFETCH (c); if (p == pend) FREE_STACK_RETURN (REG_EBRACK); /* Allocate the space for character class. */ GET_BUFFER_SPACE(CHAR_CLASS_SIZE); /* Update the pointer to indicate end of buffer. */ b += CHAR_CLASS_SIZE; /* Move data which follow character classes not to violate the data. */ insert_space(CHAR_CLASS_SIZE, laststart + 6 + laststart[1], b - 1); alignedp = ((uintptr_t)(laststart + 6 + laststart[1]) + __alignof__(wctype_t) - 1) & ~(uintptr_t)(__alignof__(wctype_t) - 1); /* Store the character class. */ *((wctype_t*)alignedp) = wt; /* Update length of char_classes */ laststart[1] += CHAR_CLASS_SIZE; had_char_class = true; } else { c1++; while (c1--) PATUNFETCH; BUF_PUSH ('['); BUF_PUSH (':'); laststart[5] += 2; /* Update the length of characters */ range_start = ':'; had_char_class = false; } } else if (syntax & RE_CHAR_CLASSES && c == '[' && (*p == '=' || *p == '.')) { CHAR_T str[128]; /* Should be large enough. */ CHAR_T delim = *p; /* '=' or '.' */ # ifdef _LIBC uint32_t nrules = _NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_NRULES); # endif PATFETCH (c); c1 = 0; /* If pattern is `[[=' or '[[.'. */ if (p == pend) FREE_STACK_RETURN (REG_EBRACK); for (;;) { PATFETCH (c); if ((c == delim && *p == ']') || p == pend) break; if (c1 < sizeof (str) - 1) str[c1++] = c; else /* This is in any case an invalid class name. */ str[0] = '\0'; } str[c1] = '\0'; if (c == delim && *p == ']' && str[0] != '\0') { unsigned int i, offset; /* If we have no collation data we use the default collation in which each character is in a class by itself. It also means that ASCII is the character set and therefore we cannot have character with more than one byte in the multibyte representation. */ /* If not defined _LIBC, we push the name and `\0' for the sake of matching performance. */ int datasize = c1 + 1; # ifdef _LIBC int32_t idx = 0; if (nrules == 0) # endif { if (c1 != 1) FREE_STACK_RETURN (REG_ECOLLATE); } # ifdef _LIBC else { const int32_t *table; const int32_t *weights; const int32_t *extra; const int32_t *indirect; wint_t *cp; /* This #include defines a local function! */ if(delim == '=') { /* We push the index for equivalence class. */ cp = (wint_t*)str; table = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_TABLEWC); weights = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_WEIGHTWC); extra = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_EXTRAWC); indirect = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_INDIRECTWC); idx = findidx ((const wint_t**)&cp); if (idx == 0 || cp < (wint_t*) str + c1) /* This is no valid character. */ FREE_STACK_RETURN (REG_ECOLLATE); str[0] = (wchar_t)idx; } else /* delim == '.' */ { /* We push collation sequence value for collating symbol. */ int32_t table_size; const int32_t *symb_table; const unsigned char *extra; int32_t idx; int32_t elem; int32_t second; int32_t hash; char char_str[c1]; /* We have to convert the name to a single-byte string. This is possible since the names consist of ASCII characters and the internal representation is UCS4. */ for (i = 0; i < c1; ++i) char_str[i] = str[i]; table_size = _NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_SYMB_HASH_SIZEMB); symb_table = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_SYMB_TABLEMB); extra = (const unsigned char *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_SYMB_EXTRAMB); /* Locate the character in the hashing table. */ hash = elem_hash (char_str, c1); idx = 0; elem = hash % table_size; second = hash % (table_size - 2); while (symb_table[2 * elem] != 0) { /* First compare the hashing value. */ if (symb_table[2 * elem] == hash && c1 == extra[symb_table[2 * elem + 1]] && memcmp (char_str, &extra[symb_table[2 * elem + 1] + 1], c1) == 0) { /* Yep, this is the entry. */ idx = symb_table[2 * elem + 1]; idx += 1 + extra[idx]; break; } /* Next entry. */ elem += second; } if (symb_table[2 * elem] != 0) { /* Compute the index of the byte sequence in the table. */ idx += 1 + extra[idx]; /* Adjust for the alignment. */ idx = (idx + 3) & ~3; str[0] = (wchar_t) idx + 4; } else if (symb_table[2 * elem] == 0 && c1 == 1) { /* No valid character. Match it as a single byte character. */ had_char_class = false; BUF_PUSH(str[0]); /* Update the length of characters */ laststart[5]++; range_start = str[0]; /* Throw away the ] at the end of the collating symbol. */ PATFETCH (c); /* exit from the switch block. */ continue; } else FREE_STACK_RETURN (REG_ECOLLATE); } datasize = 1; } # endif /* Throw away the ] at the end of the equivalence class (or collating symbol). */ PATFETCH (c); /* Allocate the space for the equivalence class (or collating symbol) (and '\0' if needed). */ GET_BUFFER_SPACE(datasize); /* Update the pointer to indicate end of buffer. */ b += datasize; if (delim == '=') { /* equivalence class */ /* Calculate the offset of char_ranges, which is next to equivalence_classes. */ offset = laststart[1] + laststart[2] + laststart[3] +6; /* Insert space. */ insert_space(datasize, laststart + offset, b - 1); /* Write the equivalence_class and \0. */ for (i = 0 ; i < datasize ; i++) laststart[offset + i] = str[i]; /* Update the length of equivalence_classes. */ laststart[3] += datasize; had_char_class = true; } else /* delim == '.' */ { /* collating symbol */ /* Calculate the offset of the equivalence_classes, which is next to collating_symbols. */ offset = laststart[1] + laststart[2] + 6; /* Insert space and write the collationg_symbol and \0. */ insert_space(datasize, laststart + offset, b-1); for (i = 0 ; i < datasize ; i++) laststart[offset + i] = str[i]; /* In re_match_2_internal if range_start < -1, we assume -range_start is the offset of the collating symbol which is specified as the character of the range start. So we assign -(laststart[1] + laststart[2] + 6) to range_start. */ range_start = -(laststart[1] + laststart[2] + 6); /* Update the length of collating_symbol. */ laststart[2] += datasize; had_char_class = false; } } else { c1++; while (c1--) PATUNFETCH; BUF_PUSH ('['); BUF_PUSH (delim); laststart[5] += 2; /* Update the length of characters */ range_start = delim; had_char_class = false; } } else { had_char_class = false; BUF_PUSH(c); laststart[5]++; /* Update the length of characters */ range_start = c; } } #else /* BYTE */ /* Ensure that we have enough space to push a charset: the opcode, the length count, and the bitset; 34 bytes in all. */ GET_BUFFER_SPACE (34); laststart = b; /* We test `*p == '^' twice, instead of using an if statement, so we only need one BUF_PUSH. */ BUF_PUSH (*p == '^' ? charset_not : charset); if (*p == '^') p++; /* Remember the first position in the bracket expression. */ p1 = p; /* Push the number of bytes in the bitmap. */ BUF_PUSH ((1 << BYTEWIDTH) / BYTEWIDTH); /* Clear the whole map. */ bzero (b, (1 << BYTEWIDTH) / BYTEWIDTH); /* charset_not matches newline according to a syntax bit. */ if ((re_opcode_t) b[-2] == charset_not && (syntax & RE_HAT_LISTS_NOT_NEWLINE)) SET_LIST_BIT ('\n'); /* Read in characters and ranges, setting map bits. */ for (;;) { if (p == pend) FREE_STACK_RETURN (REG_EBRACK); PATFETCH (c); /* \ might escape characters inside [...] and [^...]. */ if ((syntax & RE_BACKSLASH_ESCAPE_IN_LISTS) && c == '\\') { if (p == pend) FREE_STACK_RETURN (REG_EESCAPE); PATFETCH (c1); SET_LIST_BIT (c1); range_start = c1; continue; } /* Could be the end of the bracket expression. If it's not (i.e., when the bracket expression is `[]' so far), the ']' character bit gets set way below. */ if (c == ']' && p != p1 + 1) break; /* Look ahead to see if it's a range when the last thing was a character class. */ if (had_char_class && c == '-' && *p != ']') FREE_STACK_RETURN (REG_ERANGE); /* Look ahead to see if it's a range when the last thing was a character: if this is a hyphen not at the beginning or the end of a list, then it's the range operator. */ if (c == '-' && !(p - 2 >= pattern && p[-2] == '[') && !(p - 3 >= pattern && p[-3] == '[' && p[-2] == '^') && *p != ']') { reg_errcode_t ret = byte_compile_range (range_start, &p, pend, translate, syntax, b); if (ret != REG_NOERROR) FREE_STACK_RETURN (ret); range_start = 0xffffffff; } else if (p[0] == '-' && p[1] != ']') { /* This handles ranges made up of characters only. */ reg_errcode_t ret; /* Move past the `-'. */ PATFETCH (c1); ret = byte_compile_range (c, &p, pend, translate, syntax, b); if (ret != REG_NOERROR) FREE_STACK_RETURN (ret); range_start = 0xffffffff; } /* See if we're at the beginning of a possible character class. */ else if (syntax & RE_CHAR_CLASSES && c == '[' && *p == ':') { /* Leave room for the null. */ char str[CHAR_CLASS_MAX_LENGTH + 1]; PATFETCH (c); c1 = 0; /* If pattern is `[[:'. */ if (p == pend) FREE_STACK_RETURN (REG_EBRACK); for (;;) { PATFETCH (c); if ((c == ':' && *p == ']') || p == pend) break; if (c1 < CHAR_CLASS_MAX_LENGTH) str[c1++] = c; else /* This is in any case an invalid class name. */ str[0] = '\0'; } str[c1] = '\0'; /* If isn't a word bracketed by `[:' and `:]': undo the ending character, the letters, and leave the leading `:' and `[' (but set bits for them). */ if (c == ':' && *p == ']') { # if defined _LIBC || WIDE_CHAR_SUPPORT boolean is_lower = STREQ (str, "lower"); boolean is_upper = STREQ (str, "upper"); wctype_t wt; int ch; wt = IS_CHAR_CLASS (str); if (wt == 0) FREE_STACK_RETURN (REG_ECTYPE); /* Throw away the ] at the end of the character class. */ PATFETCH (c); if (p == pend) FREE_STACK_RETURN (REG_EBRACK); for (ch = 0; ch < 1 << BYTEWIDTH; ++ch) { # ifdef _LIBC if (__iswctype (__btowc (ch), wt)) SET_LIST_BIT (ch); # else if (iswctype (btowc (ch), wt)) SET_LIST_BIT (ch); # endif if (translate && (is_upper || is_lower) && (ISUPPER (ch) || ISLOWER (ch))) SET_LIST_BIT (ch); } had_char_class = true; # else int ch; boolean is_alnum = STREQ (str, "alnum"); boolean is_alpha = STREQ (str, "alpha"); boolean is_blank = STREQ (str, "blank"); boolean is_cntrl = STREQ (str, "cntrl"); boolean is_digit = STREQ (str, "digit"); boolean is_graph = STREQ (str, "graph"); boolean is_lower = STREQ (str, "lower"); boolean is_print = STREQ (str, "print"); boolean is_punct = STREQ (str, "punct"); boolean is_space = STREQ (str, "space"); boolean is_upper = STREQ (str, "upper"); boolean is_xdigit = STREQ (str, "xdigit"); if (!IS_CHAR_CLASS (str)) FREE_STACK_RETURN (REG_ECTYPE); /* Throw away the ] at the end of the character class. */ PATFETCH (c); if (p == pend) FREE_STACK_RETURN (REG_EBRACK); for (ch = 0; ch < 1 << BYTEWIDTH; ch++) { /* This was split into 3 if's to avoid an arbitrary limit in some compiler. */ if ( (is_alnum && ISALNUM (ch)) || (is_alpha && ISALPHA (ch)) || (is_blank && ISBLANK (ch)) || (is_cntrl && ISCNTRL (ch))) SET_LIST_BIT (ch); if ( (is_digit && ISDIGIT (ch)) || (is_graph && ISGRAPH (ch)) || (is_lower && ISLOWER (ch)) || (is_print && ISPRINT (ch))) SET_LIST_BIT (ch); if ( (is_punct && ISPUNCT (ch)) || (is_space && ISSPACE (ch)) || (is_upper && ISUPPER (ch)) || (is_xdigit && ISXDIGIT (ch))) SET_LIST_BIT (ch); if ( translate && (is_upper || is_lower) && (ISUPPER (ch) || ISLOWER (ch))) SET_LIST_BIT (ch); } had_char_class = true; # endif /* libc || wctype.h */ } else { c1++; while (c1--) PATUNFETCH; SET_LIST_BIT ('['); SET_LIST_BIT (':'); range_start = ':'; had_char_class = false; } } else if (syntax & RE_CHAR_CLASSES && c == '[' && *p == '=') { unsigned char str[MB_LEN_MAX + 1]; # ifdef _LIBC uint32_t nrules = _NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_NRULES); # endif PATFETCH (c); c1 = 0; /* If pattern is `[[='. */ if (p == pend) FREE_STACK_RETURN (REG_EBRACK); for (;;) { PATFETCH (c); if ((c == '=' && *p == ']') || p == pend) break; if (c1 < MB_LEN_MAX) str[c1++] = c; else /* This is in any case an invalid class name. */ str[0] = '\0'; } str[c1] = '\0'; if (c == '=' && *p == ']' && str[0] != '\0') { /* If we have no collation data we use the default collation in which each character is in a class by itself. It also means that ASCII is the character set and therefore we cannot have character with more than one byte in the multibyte representation. */ # ifdef _LIBC if (nrules == 0) # endif { if (c1 != 1) FREE_STACK_RETURN (REG_ECOLLATE); /* Throw away the ] at the end of the equivalence class. */ PATFETCH (c); /* Set the bit for the character. */ SET_LIST_BIT (str[0]); } # ifdef _LIBC else { /* Try to match the byte sequence in `str' against those known to the collate implementation. First find out whether the bytes in `str' are actually from exactly one character. */ const int32_t *table; const unsigned char *weights; const unsigned char *extra; const int32_t *indirect; int32_t idx; const unsigned char *cp = str; int ch; /* This #include defines a local function! */ table = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_TABLEMB); weights = (const unsigned char *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_WEIGHTMB); extra = (const unsigned char *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_EXTRAMB); indirect = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_INDIRECTMB); idx = findidx (&cp); if (idx == 0 || cp < str + c1) /* This is no valid character. */ FREE_STACK_RETURN (REG_ECOLLATE); /* Throw away the ] at the end of the equivalence class. */ PATFETCH (c); /* Now we have to go throught the whole table and find all characters which have the same first level weight. XXX Note that this is not entirely correct. we would have to match multibyte sequences but this is not possible with the current implementation. */ for (ch = 1; ch < 256; ++ch) /* XXX This test would have to be changed if we would allow matching multibyte sequences. */ if (table[ch] > 0) { int32_t idx2 = table[ch]; size_t len = weights[idx2]; /* Test whether the lenghts match. */ if (weights[idx] == len) { /* They do. New compare the bytes of the weight. */ size_t cnt = 0; while (cnt < len && (weights[idx + 1 + cnt] == weights[idx2 + 1 + cnt])) ++cnt; if (cnt == len) /* They match. Mark the character as acceptable. */ SET_LIST_BIT (ch); } } } # endif had_char_class = true; } else { c1++; while (c1--) PATUNFETCH; SET_LIST_BIT ('['); SET_LIST_BIT ('='); range_start = '='; had_char_class = false; } } else if (syntax & RE_CHAR_CLASSES && c == '[' && *p == '.') { unsigned char str[128]; /* Should be large enough. */ # ifdef _LIBC uint32_t nrules = _NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_NRULES); # endif PATFETCH (c); c1 = 0; /* If pattern is `[[.'. */ if (p == pend) FREE_STACK_RETURN (REG_EBRACK); for (;;) { PATFETCH (c); if ((c == '.' && *p == ']') || p == pend) break; if (c1 < sizeof (str)) str[c1++] = c; else /* This is in any case an invalid class name. */ str[0] = '\0'; } str[c1] = '\0'; if (c == '.' && *p == ']' && str[0] != '\0') { /* If we have no collation data we use the default collation in which each character is the name for its own class which contains only the one character. It also means that ASCII is the character set and therefore we cannot have character with more than one byte in the multibyte representation. */ # ifdef _LIBC if (nrules == 0) # endif { if (c1 != 1) FREE_STACK_RETURN (REG_ECOLLATE); /* Throw away the ] at the end of the equivalence class. */ PATFETCH (c); /* Set the bit for the character. */ SET_LIST_BIT (str[0]); range_start = ((const unsigned char *) str)[0]; } # ifdef _LIBC else { /* Try to match the byte sequence in `str' against those known to the collate implementation. First find out whether the bytes in `str' are actually from exactly one character. */ int32_t table_size; const int32_t *symb_table; const unsigned char *extra; int32_t idx; int32_t elem; int32_t second; int32_t hash; table_size = _NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_SYMB_HASH_SIZEMB); symb_table = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_SYMB_TABLEMB); extra = (const unsigned char *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_SYMB_EXTRAMB); /* Locate the character in the hashing table. */ hash = elem_hash (str, c1); idx = 0; elem = hash % table_size; second = hash % (table_size - 2); while (symb_table[2 * elem] != 0) { /* First compare the hashing value. */ if (symb_table[2 * elem] == hash && c1 == extra[symb_table[2 * elem + 1]] && memcmp (str, &extra[symb_table[2 * elem + 1] + 1], c1) == 0) { /* Yep, this is the entry. */ idx = symb_table[2 * elem + 1]; idx += 1 + extra[idx]; break; } /* Next entry. */ elem += second; } if (symb_table[2 * elem] == 0) /* This is no valid character. */ FREE_STACK_RETURN (REG_ECOLLATE); /* Throw away the ] at the end of the equivalence class. */ PATFETCH (c); /* Now add the multibyte character(s) we found to the accept list. XXX Note that this is not entirely correct. we would have to match multibyte sequences but this is not possible with the current implementation. Also, we have to match collating symbols, which expand to more than one file, as a whole and not allow the individual bytes. */ c1 = extra[idx++]; if (c1 == 1) range_start = extra[idx]; while (c1-- > 0) { SET_LIST_BIT (extra[idx]); ++idx; } } # endif had_char_class = false; } else { c1++; while (c1--) PATUNFETCH; SET_LIST_BIT ('['); SET_LIST_BIT ('.'); range_start = '.'; had_char_class = false; } } else { had_char_class = false; SET_LIST_BIT (c); range_start = c; } } /* Discard any (non)matching list bytes that are all 0 at the end of the map. Decrease the map-length byte too. */ while ((int) b[-1] > 0 && b[b[-1] - 1] == 0) b[-1]--; b += b[-1]; #endif /* WCHAR */ } break; case '(': if (syntax & RE_NO_BK_PARENS) goto handle_open; else goto normal_char; case ')': if (syntax & RE_NO_BK_PARENS) goto handle_close; else goto normal_char; case '\n': if (syntax & RE_NEWLINE_ALT) goto handle_alt; else goto normal_char; case '|': if (syntax & RE_NO_BK_VBAR) goto handle_alt; else goto normal_char; case '{': if (syntax & RE_INTERVALS && syntax & RE_NO_BK_BRACES) goto handle_interval; else goto normal_char; case '\\': if (p == pend) FREE_STACK_RETURN (REG_EESCAPE); /* Do not translate the character after the \, so that we can distinguish, e.g., \B from \b, even if we normally would translate, e.g., B to b. */ PATFETCH_RAW (c); switch (c) { case '(': if (syntax & RE_NO_BK_PARENS) goto normal_backslash; handle_open: bufp->re_nsub++; regnum++; if (COMPILE_STACK_FULL) { RETALLOC (compile_stack.stack, compile_stack.size << 1, compile_stack_elt_t); if (compile_stack.stack == NULL) return REG_ESPACE; compile_stack.size <<= 1; } /* These are the values to restore when we hit end of this group. They are all relative offsets, so that if the whole pattern moves because of realloc, they will still be valid. */ COMPILE_STACK_TOP.begalt_offset = begalt - COMPILED_BUFFER_VAR; COMPILE_STACK_TOP.fixup_alt_jump = fixup_alt_jump ? fixup_alt_jump - COMPILED_BUFFER_VAR + 1 : 0; COMPILE_STACK_TOP.laststart_offset = b - COMPILED_BUFFER_VAR; COMPILE_STACK_TOP.regnum = regnum; /* We will eventually replace the 0 with the number of groups inner to this one. But do not push a start_memory for groups beyond the last one we can represent in the compiled pattern. */ if (regnum <= MAX_REGNUM) { COMPILE_STACK_TOP.inner_group_offset = b - COMPILED_BUFFER_VAR + 2; BUF_PUSH_3 (start_memory, regnum, 0); } compile_stack.avail++; fixup_alt_jump = 0; laststart = 0; begalt = b; /* If we've reached MAX_REGNUM groups, then this open won't actually generate any code, so we'll have to clear pending_exact explicitly. */ pending_exact = 0; break; case ')': if (syntax & RE_NO_BK_PARENS) goto normal_backslash; if (COMPILE_STACK_EMPTY) { if (syntax & RE_UNMATCHED_RIGHT_PAREN_ORD) goto normal_backslash; else FREE_STACK_RETURN (REG_ERPAREN); } handle_close: if (fixup_alt_jump) { /* Push a dummy failure point at the end of the alternative for a possible future `pop_failure_jump' to pop. See comments at `push_dummy_failure' in `re_match_2'. */ BUF_PUSH (push_dummy_failure); /* We allocated space for this jump when we assigned to `fixup_alt_jump', in the `handle_alt' case below. */ STORE_JUMP (jump_past_alt, fixup_alt_jump, b - 1); } /* See similar code for backslashed left paren above. */ if (COMPILE_STACK_EMPTY) { if (syntax & RE_UNMATCHED_RIGHT_PAREN_ORD) goto normal_char; else FREE_STACK_RETURN (REG_ERPAREN); } /* Since we just checked for an empty stack above, this ``can't happen''. */ assert (compile_stack.avail != 0); { /* We don't just want to restore into `regnum', because later groups should continue to be numbered higher, as in `(ab)c(de)' -- the second group is #2. */ regnum_t this_group_regnum; compile_stack.avail--; begalt = COMPILED_BUFFER_VAR + COMPILE_STACK_TOP.begalt_offset; fixup_alt_jump = COMPILE_STACK_TOP.fixup_alt_jump ? COMPILED_BUFFER_VAR + COMPILE_STACK_TOP.fixup_alt_jump - 1 : 0; laststart = COMPILED_BUFFER_VAR + COMPILE_STACK_TOP.laststart_offset; this_group_regnum = COMPILE_STACK_TOP.regnum; /* If we've reached MAX_REGNUM groups, then this open won't actually generate any code, so we'll have to clear pending_exact explicitly. */ pending_exact = 0; /* We're at the end of the group, so now we know how many groups were inside this one. */ if (this_group_regnum <= MAX_REGNUM) { UCHAR_T *inner_group_loc = COMPILED_BUFFER_VAR + COMPILE_STACK_TOP.inner_group_offset; *inner_group_loc = regnum - this_group_regnum; BUF_PUSH_3 (stop_memory, this_group_regnum, regnum - this_group_regnum); } } break; case '|': /* `\|'. */ if (syntax & RE_LIMITED_OPS || syntax & RE_NO_BK_VBAR) goto normal_backslash; handle_alt: if (syntax & RE_LIMITED_OPS) goto normal_char; /* Insert before the previous alternative a jump which jumps to this alternative if the former fails. */ GET_BUFFER_SPACE (1 + OFFSET_ADDRESS_SIZE); INSERT_JUMP (on_failure_jump, begalt, b + 2 + 2 * OFFSET_ADDRESS_SIZE); pending_exact = 0; b += 1 + OFFSET_ADDRESS_SIZE; /* The alternative before this one has a jump after it which gets executed if it gets matched. Adjust that jump so it will jump to this alternative's analogous jump (put in below, which in turn will jump to the next (if any) alternative's such jump, etc.). The last such jump jumps to the correct final destination. A picture: _____ _____ | | | | | v | v a | b | c If we are at `b', then fixup_alt_jump right now points to a three-byte space after `a'. We'll put in the jump, set fixup_alt_jump to right after `b', and leave behind three bytes which we'll fill in when we get to after `c'. */ if (fixup_alt_jump) STORE_JUMP (jump_past_alt, fixup_alt_jump, b); /* Mark and leave space for a jump after this alternative, to be filled in later either by next alternative or when know we're at the end of a series of alternatives. */ fixup_alt_jump = b; GET_BUFFER_SPACE (1 + OFFSET_ADDRESS_SIZE); b += 1 + OFFSET_ADDRESS_SIZE; laststart = 0; begalt = b; break; case '{': /* If \{ is a literal. */ if (!(syntax & RE_INTERVALS) /* If we're at `\{' and it's not the open-interval operator. */ || (syntax & RE_NO_BK_BRACES)) goto normal_backslash; handle_interval: { /* If got here, then the syntax allows intervals. */ /* At least (most) this many matches must be made. */ int lower_bound = -1, upper_bound = -1; /* Place in the uncompiled pattern (i.e., just after the '{') to go back to if the interval is invalid. */ const CHAR_T *beg_interval = p; if (p == pend) goto invalid_interval; GET_UNSIGNED_NUMBER (lower_bound); if (c == ',') { GET_UNSIGNED_NUMBER (upper_bound); if (upper_bound < 0) upper_bound = RE_DUP_MAX; } else /* Interval such as `{1}' => match exactly once. */ upper_bound = lower_bound; if (! (0 <= lower_bound && lower_bound <= upper_bound)) goto invalid_interval; if (!(syntax & RE_NO_BK_BRACES)) { if (c != '\\' || p == pend) goto invalid_interval; PATFETCH (c); } if (c != '}') goto invalid_interval; /* If it's invalid to have no preceding re. */ if (!laststart) { if (syntax & RE_CONTEXT_INVALID_OPS && !(syntax & RE_INVALID_INTERVAL_ORD)) FREE_STACK_RETURN (REG_BADRPT); else if (syntax & RE_CONTEXT_INDEP_OPS) laststart = b; else goto unfetch_interval; } /* We just parsed a valid interval. */ if (RE_DUP_MAX < upper_bound) FREE_STACK_RETURN (REG_BADBR); /* If the upper bound is zero, don't want to succeed at all; jump from `laststart' to `b + 3', which will be the end of the buffer after we insert the jump. */ /* ifdef WCHAR, 'b + 1 + OFFSET_ADDRESS_SIZE' instead of 'b + 3'. */ if (upper_bound == 0) { GET_BUFFER_SPACE (1 + OFFSET_ADDRESS_SIZE); INSERT_JUMP (jump, laststart, b + 1 + OFFSET_ADDRESS_SIZE); b += 1 + OFFSET_ADDRESS_SIZE; } /* Otherwise, we have a nontrivial interval. When we're all done, the pattern will look like: set_number_at set_number_at succeed_n jump_n (The upper bound and `jump_n' are omitted if `upper_bound' is 1, though.) */ else { /* If the upper bound is > 1, we need to insert more at the end of the loop. */ unsigned nbytes = 2 + 4 * OFFSET_ADDRESS_SIZE + (upper_bound > 1) * (2 + 4 * OFFSET_ADDRESS_SIZE); GET_BUFFER_SPACE (nbytes); /* Initialize lower bound of the `succeed_n', even though it will be set during matching by its attendant `set_number_at' (inserted next), because `re_compile_fastmap' needs to know. Jump to the `jump_n' we might insert below. */ INSERT_JUMP2 (succeed_n, laststart, b + 1 + 2 * OFFSET_ADDRESS_SIZE + (upper_bound > 1) * (1 + 2 * OFFSET_ADDRESS_SIZE) , lower_bound); b += 1 + 2 * OFFSET_ADDRESS_SIZE; /* Code to initialize the lower bound. Insert before the `succeed_n'. The `5' is the last two bytes of this `set_number_at', plus 3 bytes of the following `succeed_n'. */ /* ifdef WCHAR, The '1+2*OFFSET_ADDRESS_SIZE' is the 'set_number_at', plus '1+OFFSET_ADDRESS_SIZE' of the following `succeed_n'. */ PREFIX(insert_op2) (set_number_at, laststart, 1 + 2 * OFFSET_ADDRESS_SIZE, lower_bound, b); b += 1 + 2 * OFFSET_ADDRESS_SIZE; if (upper_bound > 1) { /* More than one repetition is allowed, so append a backward jump to the `succeed_n' that starts this interval. When we've reached this during matching, we'll have matched the interval once, so jump back only `upper_bound - 1' times. */ STORE_JUMP2 (jump_n, b, laststart + 2 * OFFSET_ADDRESS_SIZE + 1, upper_bound - 1); b += 1 + 2 * OFFSET_ADDRESS_SIZE; /* The location we want to set is the second parameter of the `jump_n'; that is `b-2' as an absolute address. `laststart' will be the `set_number_at' we're about to insert; `laststart+3' the number to set, the source for the relative address. But we are inserting into the middle of the pattern -- so everything is getting moved up by 5. Conclusion: (b - 2) - (laststart + 3) + 5, i.e., b - laststart. We insert this at the beginning of the loop so that if we fail during matching, we'll reinitialize the bounds. */ PREFIX(insert_op2) (set_number_at, laststart, b - laststart, upper_bound - 1, b); b += 1 + 2 * OFFSET_ADDRESS_SIZE; } } pending_exact = 0; break; invalid_interval: if (!(syntax & RE_INVALID_INTERVAL_ORD)) FREE_STACK_RETURN (p == pend ? REG_EBRACE : REG_BADBR); unfetch_interval: /* Match the characters as literals. */ p = beg_interval; c = '{'; if (syntax & RE_NO_BK_BRACES) goto normal_char; else goto normal_backslash; } #ifdef emacs /* There is no way to specify the before_dot and after_dot operators. rms says this is ok. --karl */ case '=': BUF_PUSH (at_dot); break; case 's': laststart = b; PATFETCH (c); BUF_PUSH_2 (syntaxspec, syntax_spec_code[c]); break; case 'S': laststart = b; PATFETCH (c); BUF_PUSH_2 (notsyntaxspec, syntax_spec_code[c]); break; #endif /* emacs */ case 'w': if (syntax & RE_NO_GNU_OPS) goto normal_char; laststart = b; BUF_PUSH (wordchar); break; case 'W': if (syntax & RE_NO_GNU_OPS) goto normal_char; laststart = b; BUF_PUSH (notwordchar); break; case '<': if (syntax & RE_NO_GNU_OPS) goto normal_char; BUF_PUSH (wordbeg); break; case '>': if (syntax & RE_NO_GNU_OPS) goto normal_char; BUF_PUSH (wordend); break; case 'b': if (syntax & RE_NO_GNU_OPS) goto normal_char; BUF_PUSH (wordbound); break; case 'B': if (syntax & RE_NO_GNU_OPS) goto normal_char; BUF_PUSH (notwordbound); break; case '`': if (syntax & RE_NO_GNU_OPS) goto normal_char; BUF_PUSH (begbuf); break; case '\'': if (syntax & RE_NO_GNU_OPS) goto normal_char; BUF_PUSH (endbuf); break; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': if (syntax & RE_NO_BK_REFS) goto normal_char; c1 = c - '0'; if (c1 > regnum) FREE_STACK_RETURN (REG_ESUBREG); /* Can't back reference to a subexpression if inside of it. */ if (group_in_compile_stack (compile_stack, (regnum_t) c1)) goto normal_char; laststart = b; BUF_PUSH_2 (duplicate, c1); break; case '+': case '?': if (syntax & RE_BK_PLUS_QM) goto handle_plus; else goto normal_backslash; default: normal_backslash: /* You might think it would be useful for \ to mean not to translate; but if we don't translate it it will never match anything. */ c = TRANSLATE (c); goto normal_char; } break; default: /* Expects the character in `c'. */ normal_char: /* If no exactn currently being built. */ if (!pending_exact #ifdef WCHAR /* If last exactn handle binary(or character) and new exactn handle character(or binary). */ || is_exactn_bin != is_binary[p - 1 - pattern] #endif /* WCHAR */ /* If last exactn not at current position. */ || pending_exact + *pending_exact + 1 != b /* We have only one byte following the exactn for the count. */ || *pending_exact == (1 << BYTEWIDTH) - 1 /* If followed by a repetition operator. */ || *p == '*' || *p == '^' || ((syntax & RE_BK_PLUS_QM) ? *p == '\\' && (p[1] == '+' || p[1] == '?') : (*p == '+' || *p == '?')) || ((syntax & RE_INTERVALS) && ((syntax & RE_NO_BK_BRACES) ? *p == '{' : (p[0] == '\\' && p[1] == '{')))) { /* Start building a new exactn. */ laststart = b; #ifdef WCHAR /* Is this exactn binary data or character? */ is_exactn_bin = is_binary[p - 1 - pattern]; if (is_exactn_bin) BUF_PUSH_2 (exactn_bin, 0); else BUF_PUSH_2 (exactn, 0); #else BUF_PUSH_2 (exactn, 0); #endif /* WCHAR */ pending_exact = b - 1; } BUF_PUSH (c); (*pending_exact)++; break; } /* switch (c) */ } /* while p != pend */ /* Through the pattern now. */ if (fixup_alt_jump) STORE_JUMP (jump_past_alt, fixup_alt_jump, b); if (!COMPILE_STACK_EMPTY) FREE_STACK_RETURN (REG_EPAREN); /* If we don't want backtracking, force success the first time we reach the end of the compiled pattern. */ if (syntax & RE_NO_POSIX_BACKTRACKING) BUF_PUSH (succeed); #ifdef WCHAR free (pattern); free (mbs_offset); free (is_binary); #endif free (compile_stack.stack); /* We have succeeded; set the length of the buffer. */ #ifdef WCHAR bufp->used = (uintptr_t) b - (uintptr_t) COMPILED_BUFFER_VAR; #else bufp->used = b - bufp->buffer; #endif #ifdef DEBUG if (debug) { DEBUG_PRINT1 ("\nCompiled pattern: \n"); PREFIX(print_compiled_pattern) (bufp); } #endif /* DEBUG */ #ifndef MATCH_MAY_ALLOCATE /* Initialize the failure stack to the largest possible stack. This isn't necessary unless we're trying to avoid calling alloca in the search and match routines. */ { int num_regs = bufp->re_nsub + 1; /* Since DOUBLE_FAIL_STACK refuses to double only if the current size is strictly greater than re_max_failures, the largest possible stack is 2 * re_max_failures failure points. */ if (fail_stack.size < (2 * re_max_failures * MAX_FAILURE_ITEMS)) { fail_stack.size = (2 * re_max_failures * MAX_FAILURE_ITEMS); # ifdef emacs if (! fail_stack.stack) fail_stack.stack = (PREFIX(fail_stack_elt_t) *) xmalloc (fail_stack.size * sizeof (PREFIX(fail_stack_elt_t))); else fail_stack.stack = (PREFIX(fail_stack_elt_t) *) xrealloc (fail_stack.stack, (fail_stack.size * sizeof (PREFIX(fail_stack_elt_t)))); # else /* not emacs */ if (! fail_stack.stack) fail_stack.stack = (PREFIX(fail_stack_elt_t) *) malloc (fail_stack.size * sizeof (PREFIX(fail_stack_elt_t))); else fail_stack.stack = (PREFIX(fail_stack_elt_t) *) realloc (fail_stack.stack, (fail_stack.size * sizeof (PREFIX(fail_stack_elt_t)))); # endif /* not emacs */ } PREFIX(regex_grow_registers) (num_regs); } #endif /* not MATCH_MAY_ALLOCATE */ return REG_NOERROR; } /* regex_compile */ /* Subroutines for `regex_compile'. */ /* Store OP at LOC followed by two-byte integer parameter ARG. */ /* ifdef WCHAR, integer parameter is 1 wchar_t. */ static void PREFIX(store_op1) (op, loc, arg) re_opcode_t op; UCHAR_T *loc; int arg; { *loc = (UCHAR_T) op; STORE_NUMBER (loc + 1, arg); } /* Like `store_op1', but for two two-byte parameters ARG1 and ARG2. */ /* ifdef WCHAR, integer parameter is 1 wchar_t. */ static void PREFIX(store_op2) (op, loc, arg1, arg2) re_opcode_t op; UCHAR_T *loc; int arg1, arg2; { *loc = (UCHAR_T) op; STORE_NUMBER (loc + 1, arg1); STORE_NUMBER (loc + 1 + OFFSET_ADDRESS_SIZE, arg2); } /* Copy the bytes from LOC to END to open up three bytes of space at LOC for OP followed by two-byte integer parameter ARG. */ /* ifdef WCHAR, integer parameter is 1 wchar_t. */ static void PREFIX(insert_op1) (op, loc, arg, end) re_opcode_t op; UCHAR_T *loc; int arg; UCHAR_T *end; { register UCHAR_T *pfrom = end; register UCHAR_T *pto = end + 1 + OFFSET_ADDRESS_SIZE; while (pfrom != loc) *--pto = *--pfrom; PREFIX(store_op1) (op, loc, arg); } /* Like `insert_op1', but for two two-byte parameters ARG1 and ARG2. */ /* ifdef WCHAR, integer parameter is 1 wchar_t. */ static void PREFIX(insert_op2) (op, loc, arg1, arg2, end) re_opcode_t op; UCHAR_T *loc; int arg1, arg2; UCHAR_T *end; { register UCHAR_T *pfrom = end; register UCHAR_T *pto = end + 1 + 2 * OFFSET_ADDRESS_SIZE; while (pfrom != loc) *--pto = *--pfrom; PREFIX(store_op2) (op, loc, arg1, arg2); } /* P points to just after a ^ in PATTERN. Return true if that ^ comes after an alternative or a begin-subexpression. We assume there is at least one character before the ^. */ static boolean PREFIX(at_begline_loc_p) (pattern, p, syntax) const CHAR_T *pattern, *p; reg_syntax_t syntax; { const CHAR_T *prev = p - 2; boolean prev_prev_backslash = prev > pattern && prev[-1] == '\\'; return /* After a subexpression? */ (*prev == '(' && (syntax & RE_NO_BK_PARENS || prev_prev_backslash)) /* After an alternative? */ || (*prev == '|' && (syntax & RE_NO_BK_VBAR || prev_prev_backslash)); } /* The dual of at_begline_loc_p. This one is for $. We assume there is at least one character after the $, i.e., `P < PEND'. */ static boolean PREFIX(at_endline_loc_p) (p, pend, syntax) const CHAR_T *p, *pend; reg_syntax_t syntax; { const CHAR_T *next = p; boolean next_backslash = *next == '\\'; const CHAR_T *next_next = p + 1 < pend ? p + 1 : 0; return /* Before a subexpression? */ (syntax & RE_NO_BK_PARENS ? *next == ')' : next_backslash && next_next && *next_next == ')') /* Before an alternative? */ || (syntax & RE_NO_BK_VBAR ? *next == '|' : next_backslash && next_next && *next_next == '|'); } #else /* not INSIDE_RECURSION */ /* Returns true if REGNUM is in one of COMPILE_STACK's elements and false if it's not. */ static boolean group_in_compile_stack (compile_stack, regnum) compile_stack_type compile_stack; regnum_t regnum; { int this_element; for (this_element = compile_stack.avail - 1; this_element >= 0; this_element--) if (compile_stack.stack[this_element].regnum == regnum) return true; return false; } #endif /* not INSIDE_RECURSION */ #ifdef INSIDE_RECURSION #ifdef WCHAR /* This insert space, which size is "num", into the pattern at "loc". "end" must point the end of the allocated buffer. */ static void insert_space (num, loc, end) int num; CHAR_T *loc; CHAR_T *end; { register CHAR_T *pto = end; register CHAR_T *pfrom = end - num; while (pfrom >= loc) *pto-- = *pfrom--; } #endif /* WCHAR */ #ifdef WCHAR static reg_errcode_t wcs_compile_range (range_start_char, p_ptr, pend, translate, syntax, b, char_set) CHAR_T range_start_char; const CHAR_T **p_ptr, *pend; CHAR_T *char_set, *b; RE_TRANSLATE_TYPE translate; reg_syntax_t syntax; { const CHAR_T *p = *p_ptr; CHAR_T range_start, range_end; reg_errcode_t ret; # ifdef _LIBC uint32_t nrules; uint32_t start_val, end_val; # endif if (p == pend) return REG_ERANGE; # ifdef _LIBC nrules = _NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_NRULES); if (nrules != 0) { const char *collseq = (const char *) _NL_CURRENT(LC_COLLATE, _NL_COLLATE_COLLSEQWC); const unsigned char *extra = (const unsigned char *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_SYMB_EXTRAMB); if (range_start_char < -1) { /* range_start is a collating symbol. */ int32_t *wextra; /* Retreive the index and get collation sequence value. */ wextra = (int32_t*)(extra + char_set[-range_start_char]); start_val = wextra[1 + *wextra]; } else start_val = collseq_table_lookup(collseq, TRANSLATE(range_start_char)); end_val = collseq_table_lookup (collseq, TRANSLATE (p[0])); /* Report an error if the range is empty and the syntax prohibits this. */ ret = ((syntax & RE_NO_EMPTY_RANGES) && (start_val > end_val))? REG_ERANGE : REG_NOERROR; /* Insert space to the end of the char_ranges. */ insert_space(2, b - char_set[5] - 2, b - 1); *(b - char_set[5] - 2) = (wchar_t)start_val; *(b - char_set[5] - 1) = (wchar_t)end_val; char_set[4]++; /* ranges_index */ } else # endif { range_start = (range_start_char >= 0)? TRANSLATE (range_start_char): range_start_char; range_end = TRANSLATE (p[0]); /* Report an error if the range is empty and the syntax prohibits this. */ ret = ((syntax & RE_NO_EMPTY_RANGES) && (range_start > range_end))? REG_ERANGE : REG_NOERROR; /* Insert space to the end of the char_ranges. */ insert_space(2, b - char_set[5] - 2, b - 1); *(b - char_set[5] - 2) = range_start; *(b - char_set[5] - 1) = range_end; char_set[4]++; /* ranges_index */ } /* Have to increment the pointer into the pattern string, so the caller isn't still at the ending character. */ (*p_ptr)++; return ret; } #else /* BYTE */ /* Read the ending character of a range (in a bracket expression) from the uncompiled pattern *P_PTR (which ends at PEND). We assume the starting character is in `P[-2]'. (`P[-1]' is the character `-'.) Then we set the translation of all bits between the starting and ending characters (inclusive) in the compiled pattern B. Return an error code. We use these short variable names so we can use the same macros as `regex_compile' itself. */ static reg_errcode_t byte_compile_range (range_start_char, p_ptr, pend, translate, syntax, b) unsigned int range_start_char; const char **p_ptr, *pend; RE_TRANSLATE_TYPE translate; reg_syntax_t syntax; unsigned char *b; { unsigned this_char; const char *p = *p_ptr; reg_errcode_t ret; # if _LIBC const unsigned char *collseq; unsigned int start_colseq; unsigned int end_colseq; # else unsigned end_char; # endif if (p == pend) return REG_ERANGE; /* Have to increment the pointer into the pattern string, so the caller isn't still at the ending character. */ (*p_ptr)++; /* Report an error if the range is empty and the syntax prohibits this. */ ret = syntax & RE_NO_EMPTY_RANGES ? REG_ERANGE : REG_NOERROR; # if _LIBC collseq = (const unsigned char *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_COLLSEQMB); start_colseq = collseq[(unsigned char) TRANSLATE (range_start_char)]; end_colseq = collseq[(unsigned char) TRANSLATE (p[0])]; for (this_char = 0; this_char <= (unsigned char) -1; ++this_char) { unsigned int this_colseq = collseq[(unsigned char) TRANSLATE (this_char)]; if (start_colseq <= this_colseq && this_colseq <= end_colseq) { SET_LIST_BIT (TRANSLATE (this_char)); ret = REG_NOERROR; } } # else /* Here we see why `this_char' has to be larger than an `unsigned char' -- we would otherwise go into an infinite loop, since all characters <= 0xff. */ range_start_char = TRANSLATE (range_start_char); /* TRANSLATE(p[0]) is casted to char (not unsigned char) in TRANSLATE, and some compilers cast it to int implicitly, so following for_loop may fall to (almost) infinite loop. e.g. If translate[p[0]] = 0xff, end_char may equals to 0xffffffff. To avoid this, we cast p[0] to unsigned int and truncate it. */ end_char = ((unsigned)TRANSLATE(p[0]) & ((1 << BYTEWIDTH) - 1)); for (this_char = range_start_char; this_char <= end_char; ++this_char) { SET_LIST_BIT (TRANSLATE (this_char)); ret = REG_NOERROR; } # endif return ret; } #endif /* WCHAR */ /* re_compile_fastmap computes a ``fastmap'' for the compiled pattern in BUFP. A fastmap records which of the (1 << BYTEWIDTH) possible characters can start a string that matches the pattern. This fastmap is used by re_search to skip quickly over impossible starting points. The caller must supply the address of a (1 << BYTEWIDTH)-byte data area as BUFP->fastmap. We set the `fastmap', `fastmap_accurate', and `can_be_null' fields in the pattern buffer. Returns 0 if we succeed, -2 if an internal error. */ #ifdef WCHAR /* local function for re_compile_fastmap. truncate wchar_t character to char. */ static unsigned char truncate_wchar (CHAR_T c); static unsigned char truncate_wchar (c) CHAR_T c; { unsigned char buf[MB_CUR_MAX]; mbstate_t state; int retval; memset (&state, '\0', sizeof (state)); # ifdef _LIBC retval = __wcrtomb (buf, c, &state); # else retval = wcrtomb (buf, c, &state); # endif return retval > 0 ? buf[0] : (unsigned char) c; } #endif /* WCHAR */ static int PREFIX(re_compile_fastmap) (bufp) struct re_pattern_buffer *bufp; { int j, k; #ifdef MATCH_MAY_ALLOCATE PREFIX(fail_stack_type) fail_stack; #endif #ifndef REGEX_MALLOC char *destination; #endif register char *fastmap = bufp->fastmap; #ifdef WCHAR /* We need to cast pattern to (wchar_t*), because we casted this compiled pattern to (char*) in regex_compile. */ UCHAR_T *pattern = (UCHAR_T*)bufp->buffer; register UCHAR_T *pend = (UCHAR_T*) (bufp->buffer + bufp->used); #else /* BYTE */ UCHAR_T *pattern = bufp->buffer; register UCHAR_T *pend = pattern + bufp->used; #endif /* WCHAR */ UCHAR_T *p = pattern; #ifdef REL_ALLOC /* This holds the pointer to the failure stack, when it is allocated relocatably. */ fail_stack_elt_t *failure_stack_ptr; #endif /* Assume that each path through the pattern can be null until proven otherwise. We set this false at the bottom of switch statement, to which we get only if a particular path doesn't match the empty string. */ boolean path_can_be_null = true; /* We aren't doing a `succeed_n' to begin with. */ boolean succeed_n_p = false; assert (fastmap != NULL && p != NULL); INIT_FAIL_STACK (); bzero (fastmap, 1 << BYTEWIDTH); /* Assume nothing's valid. */ bufp->fastmap_accurate = 1; /* It will be when we're done. */ bufp->can_be_null = 0; while (1) { if (p == pend || *p == (UCHAR_T) succeed) { /* We have reached the (effective) end of pattern. */ if (!FAIL_STACK_EMPTY ()) { bufp->can_be_null |= path_can_be_null; /* Reset for next path. */ path_can_be_null = true; p = fail_stack.stack[--fail_stack.avail].pointer; continue; } else break; } /* We should never be about to go beyond the end of the pattern. */ assert (p < pend); switch (SWITCH_ENUM_CAST ((re_opcode_t) *p++)) { /* I guess the idea here is to simply not bother with a fastmap if a backreference is used, since it's too hard to figure out the fastmap for the corresponding group. Setting `can_be_null' stops `re_search_2' from using the fastmap, so that is all we do. */ case duplicate: bufp->can_be_null = 1; goto done; /* Following are the cases which match a character. These end with `break'. */ #ifdef WCHAR case exactn: fastmap[truncate_wchar(p[1])] = 1; break; #else /* BYTE */ case exactn: fastmap[p[1]] = 1; break; #endif /* WCHAR */ #ifdef MBS_SUPPORT case exactn_bin: fastmap[p[1]] = 1; break; #endif #ifdef WCHAR /* It is hard to distinguish fastmap from (multi byte) characters which depends on current locale. */ case charset: case charset_not: case wordchar: case notwordchar: bufp->can_be_null = 1; goto done; #else /* BYTE */ case charset: for (j = *p++ * BYTEWIDTH - 1; j >= 0; j--) if (p[j / BYTEWIDTH] & (1 << (j % BYTEWIDTH))) fastmap[j] = 1; break; case charset_not: /* Chars beyond end of map must be allowed. */ for (j = *p * BYTEWIDTH; j < (1 << BYTEWIDTH); j++) fastmap[j] = 1; for (j = *p++ * BYTEWIDTH - 1; j >= 0; j--) if (!(p[j / BYTEWIDTH] & (1 << (j % BYTEWIDTH)))) fastmap[j] = 1; break; case wordchar: for (j = 0; j < (1 << BYTEWIDTH); j++) if (SYNTAX (j) == Sword) fastmap[j] = 1; break; case notwordchar: for (j = 0; j < (1 << BYTEWIDTH); j++) if (SYNTAX (j) != Sword) fastmap[j] = 1; break; #endif /* WCHAR */ case anychar: { int fastmap_newline = fastmap['\n']; /* `.' matches anything ... */ for (j = 0; j < (1 << BYTEWIDTH); j++) fastmap[j] = 1; /* ... except perhaps newline. */ if (!(bufp->syntax & RE_DOT_NEWLINE)) fastmap['\n'] = fastmap_newline; /* Return if we have already set `can_be_null'; if we have, then the fastmap is irrelevant. Something's wrong here. */ else if (bufp->can_be_null) goto done; /* Otherwise, have to check alternative paths. */ break; } #ifdef emacs case syntaxspec: k = *p++; for (j = 0; j < (1 << BYTEWIDTH); j++) if (SYNTAX (j) == (enum syntaxcode) k) fastmap[j] = 1; break; case notsyntaxspec: k = *p++; for (j = 0; j < (1 << BYTEWIDTH); j++) if (SYNTAX (j) != (enum syntaxcode) k) fastmap[j] = 1; break; /* All cases after this match the empty string. These end with `continue'. */ case before_dot: case at_dot: case after_dot: continue; #endif /* emacs */ case no_op: case begline: case endline: case begbuf: case endbuf: case wordbound: case notwordbound: case wordbeg: case wordend: case push_dummy_failure: continue; case jump_n: case pop_failure_jump: case maybe_pop_jump: case jump: case jump_past_alt: case dummy_failure_jump: EXTRACT_NUMBER_AND_INCR (j, p); p += j; if (j > 0) continue; /* Jump backward implies we just went through the body of a loop and matched nothing. Opcode jumped to should be `on_failure_jump' or `succeed_n'. Just treat it like an ordinary jump. For a * loop, it has pushed its failure point already; if so, discard that as redundant. */ if ((re_opcode_t) *p != on_failure_jump && (re_opcode_t) *p != succeed_n) continue; p++; EXTRACT_NUMBER_AND_INCR (j, p); p += j; /* If what's on the stack is where we are now, pop it. */ if (!FAIL_STACK_EMPTY () && fail_stack.stack[fail_stack.avail - 1].pointer == p) fail_stack.avail--; continue; case on_failure_jump: case on_failure_keep_string_jump: handle_on_failure_jump: EXTRACT_NUMBER_AND_INCR (j, p); /* For some patterns, e.g., `(a?)?', `p+j' here points to the end of the pattern. We don't want to push such a point, since when we restore it above, entering the switch will increment `p' past the end of the pattern. We don't need to push such a point since we obviously won't find any more fastmap entries beyond `pend'. Such a pattern can match the null string, though. */ if (p + j < pend) { if (!PUSH_PATTERN_OP (p + j, fail_stack)) { RESET_FAIL_STACK (); return -2; } } else bufp->can_be_null = 1; if (succeed_n_p) { EXTRACT_NUMBER_AND_INCR (k, p); /* Skip the n. */ succeed_n_p = false; } continue; case succeed_n: /* Get to the number of times to succeed. */ p += OFFSET_ADDRESS_SIZE; /* Increment p past the n for when k != 0. */ EXTRACT_NUMBER_AND_INCR (k, p); if (k == 0) { p -= 2 * OFFSET_ADDRESS_SIZE; succeed_n_p = true; /* Spaghetti code alert. */ goto handle_on_failure_jump; } continue; case set_number_at: p += 2 * OFFSET_ADDRESS_SIZE; continue; case start_memory: case stop_memory: p += 2; continue; default: abort (); /* We have listed all the cases. */ } /* switch *p++ */ /* Getting here means we have found the possible starting characters for one path of the pattern -- and that the empty string does not match. We need not follow this path further. Instead, look at the next alternative (remembered on the stack), or quit if no more. The test at the top of the loop does these things. */ path_can_be_null = false; p = pend; } /* while p */ /* Set `can_be_null' for the last path (also the first path, if the pattern is empty). */ bufp->can_be_null |= path_can_be_null; done: RESET_FAIL_STACK (); return 0; } #else /* not INSIDE_RECURSION */ int re_compile_fastmap (bufp) struct re_pattern_buffer *bufp; { # ifdef MBS_SUPPORT if (MB_CUR_MAX != 1) return wcs_re_compile_fastmap(bufp); else # endif return byte_re_compile_fastmap(bufp); } /* re_compile_fastmap */ #ifdef _LIBC weak_alias (__re_compile_fastmap, re_compile_fastmap) #endif /* Set REGS to hold NUM_REGS registers, storing them in STARTS and ENDS. Subsequent matches using PATTERN_BUFFER and REGS will use this memory for recording register information. STARTS and ENDS must be allocated using the malloc library routine, and must each be at least NUM_REGS * sizeof (regoff_t) bytes long. If NUM_REGS == 0, then subsequent matches should allocate their own register data. Unless this function is called, the first search or match using PATTERN_BUFFER will allocate its own register data, without freeing the old data. */ void re_set_registers (bufp, regs, num_regs, starts, ends) struct re_pattern_buffer *bufp; struct re_registers *regs; unsigned num_regs; regoff_t *starts, *ends; { if (num_regs) { bufp->regs_allocated = REGS_REALLOCATE; regs->num_regs = num_regs; regs->start = starts; regs->end = ends; } else { bufp->regs_allocated = REGS_UNALLOCATED; regs->num_regs = 0; regs->start = regs->end = (regoff_t *) 0; } } #ifdef _LIBC weak_alias (__re_set_registers, re_set_registers) #endif /* Searching routines. */ /* Like re_search_2, below, but only one string is specified, and doesn't let you say where to stop matching. */ int re_search (bufp, string, size, startpos, range, regs) struct re_pattern_buffer *bufp; const char *string; int size, startpos, range; struct re_registers *regs; { return re_search_2 (bufp, NULL, 0, string, size, startpos, range, regs, size); } #ifdef _LIBC weak_alias (__re_search, re_search) #endif /* Using the compiled pattern in BUFP->buffer, first tries to match the virtual concatenation of STRING1 and STRING2, starting first at index STARTPOS, then at STARTPOS + 1, and so on. STRING1 and STRING2 have length SIZE1 and SIZE2, respectively. RANGE is how far to scan while trying to match. RANGE = 0 means try only at STARTPOS; in general, the last start tried is STARTPOS + RANGE. In REGS, return the indices of the virtual concatenation of STRING1 and STRING2 that matched the entire BUFP->buffer and its contained subexpressions. Do not consider matching one past the index STOP in the virtual concatenation of STRING1 and STRING2. We return either the position in the strings at which the match was found, -1 if no match, or -2 if error (such as failure stack overflow). */ int re_search_2 (bufp, string1, size1, string2, size2, startpos, range, regs, stop) struct re_pattern_buffer *bufp; const char *string1, *string2; int size1, size2; int startpos; int range; struct re_registers *regs; int stop; { # ifdef MBS_SUPPORT if (MB_CUR_MAX != 1) return wcs_re_search_2 (bufp, string1, size1, string2, size2, startpos, range, regs, stop); else # endif return byte_re_search_2 (bufp, string1, size1, string2, size2, startpos, range, regs, stop); } /* re_search_2 */ #ifdef _LIBC weak_alias (__re_search_2, re_search_2) #endif #endif /* not INSIDE_RECURSION */ #ifdef INSIDE_RECURSION #ifdef MATCH_MAY_ALLOCATE # define FREE_VAR(var) if (var) REGEX_FREE (var); var = NULL #else # define FREE_VAR(var) if (var) free (var); var = NULL #endif #ifdef WCHAR # define MAX_ALLOCA_SIZE 2000 # define FREE_WCS_BUFFERS() \ do { \ if (size1 > MAX_ALLOCA_SIZE) \ { \ free (wcs_string1); \ free (mbs_offset1); \ } \ else \ { \ FREE_VAR (wcs_string1); \ FREE_VAR (mbs_offset1); \ } \ if (size2 > MAX_ALLOCA_SIZE) \ { \ free (wcs_string2); \ free (mbs_offset2); \ } \ else \ { \ FREE_VAR (wcs_string2); \ FREE_VAR (mbs_offset2); \ } \ } while (0) #endif static int PREFIX(re_search_2) (bufp, string1, size1, string2, size2, startpos, range, regs, stop) struct re_pattern_buffer *bufp; const char *string1, *string2; int size1, size2; int startpos; int range; struct re_registers *regs; int stop; { int val; register char *fastmap = bufp->fastmap; register RE_TRANSLATE_TYPE translate = bufp->translate; int total_size = size1 + size2; int endpos = startpos + range; #ifdef WCHAR /* We need wchar_t* buffers correspond to cstring1, cstring2. */ wchar_t *wcs_string1 = NULL, *wcs_string2 = NULL; /* We need the size of wchar_t buffers correspond to csize1, csize2. */ int wcs_size1 = 0, wcs_size2 = 0; /* offset buffer for optimizatoin. See convert_mbs_to_wc. */ int *mbs_offset1 = NULL, *mbs_offset2 = NULL; /* They hold whether each wchar_t is binary data or not. */ char *is_binary = NULL; #endif /* WCHAR */ /* Check for out-of-range STARTPOS. */ if (startpos < 0 || startpos > total_size) return -1; /* Fix up RANGE if it might eventually take us outside the virtual concatenation of STRING1 and STRING2. Make sure we won't move STARTPOS below 0 or above TOTAL_SIZE. */ if (endpos < 0) range = 0 - startpos; else if (endpos > total_size) range = total_size - startpos; /* If the search isn't to be a backwards one, don't waste time in a search for a pattern that must be anchored. */ if (bufp->used > 0 && range > 0 && ((re_opcode_t) bufp->buffer[0] == begbuf /* `begline' is like `begbuf' if it cannot match at newlines. */ || ((re_opcode_t) bufp->buffer[0] == begline && !bufp->newline_anchor))) { if (startpos > 0) return -1; else range = 1; } #ifdef emacs /* In a forward search for something that starts with \=. don't keep searching past point. */ if (bufp->used > 0 && (re_opcode_t) bufp->buffer[0] == at_dot && range > 0) { range = PT - startpos; if (range <= 0) return -1; } #endif /* emacs */ /* Update the fastmap now if not correct already. */ if (fastmap && !bufp->fastmap_accurate) if (re_compile_fastmap (bufp) == -2) return -2; #ifdef WCHAR /* Allocate wchar_t array for wcs_string1 and wcs_string2 and fill them with converted string. */ if (size1 != 0) { if (size1 > MAX_ALLOCA_SIZE) { wcs_string1 = TALLOC (size1 + 1, CHAR_T); mbs_offset1 = TALLOC (size1 + 1, int); is_binary = TALLOC (size1 + 1, char); } else { wcs_string1 = REGEX_TALLOC (size1 + 1, CHAR_T); mbs_offset1 = REGEX_TALLOC (size1 + 1, int); is_binary = REGEX_TALLOC (size1 + 1, char); } if (!wcs_string1 || !mbs_offset1 || !is_binary) { if (size1 > MAX_ALLOCA_SIZE) { free (wcs_string1); free (mbs_offset1); free (is_binary); } else { FREE_VAR (wcs_string1); FREE_VAR (mbs_offset1); FREE_VAR (is_binary); } return -2; } wcs_size1 = convert_mbs_to_wcs(wcs_string1, string1, size1, mbs_offset1, is_binary); wcs_string1[wcs_size1] = L'\0'; /* for a sentinel */ if (size1 > MAX_ALLOCA_SIZE) free (is_binary); else FREE_VAR (is_binary); } if (size2 != 0) { if (size2 > MAX_ALLOCA_SIZE) { wcs_string2 = TALLOC (size2 + 1, CHAR_T); mbs_offset2 = TALLOC (size2 + 1, int); is_binary = TALLOC (size2 + 1, char); } else { wcs_string2 = REGEX_TALLOC (size2 + 1, CHAR_T); mbs_offset2 = REGEX_TALLOC (size2 + 1, int); is_binary = REGEX_TALLOC (size2 + 1, char); } if (!wcs_string2 || !mbs_offset2 || !is_binary) { FREE_WCS_BUFFERS (); if (size2 > MAX_ALLOCA_SIZE) free (is_binary); else FREE_VAR (is_binary); return -2; } wcs_size2 = convert_mbs_to_wcs(wcs_string2, string2, size2, mbs_offset2, is_binary); wcs_string2[wcs_size2] = L'\0'; /* for a sentinel */ if (size2 > MAX_ALLOCA_SIZE) free (is_binary); else FREE_VAR (is_binary); } #endif /* WCHAR */ /* Loop through the string, looking for a place to start matching. */ for (;;) { /* If a fastmap is supplied, skip quickly over characters that cannot be the start of a match. If the pattern can match the null string, however, we don't need to skip characters; we want the first null string. */ if (fastmap && startpos < total_size && !bufp->can_be_null) { if (range > 0) /* Searching forwards. */ { register const char *d; register int lim = 0; int irange = range; if (startpos < size1 && startpos + range >= size1) lim = range - (size1 - startpos); d = (startpos >= size1 ? string2 - size1 : string1) + startpos; /* Written out as an if-else to avoid testing `translate' inside the loop. */ if (translate) while (range > lim && !fastmap[(unsigned char) translate[(unsigned char) *d++]]) range--; else while (range > lim && !fastmap[(unsigned char) *d++]) range--; startpos += irange - range; } else /* Searching backwards. */ { register CHAR_T c = (size1 == 0 || startpos >= size1 ? string2[startpos - size1] : string1[startpos]); if (!fastmap[(unsigned char) TRANSLATE (c)]) goto advance; } } /* If can't match the null string, and that's all we have left, fail. */ if (range >= 0 && startpos == total_size && fastmap && !bufp->can_be_null) { #ifdef WCHAR FREE_WCS_BUFFERS (); #endif return -1; } #ifdef WCHAR val = wcs_re_match_2_internal (bufp, string1, size1, string2, size2, startpos, regs, stop, wcs_string1, wcs_size1, wcs_string2, wcs_size2, mbs_offset1, mbs_offset2); #else /* BYTE */ val = byte_re_match_2_internal (bufp, string1, size1, string2, size2, startpos, regs, stop); #endif /* BYTE */ #ifndef REGEX_MALLOC # ifdef C_ALLOCA alloca (0); # endif #endif if (val >= 0) { #ifdef WCHAR FREE_WCS_BUFFERS (); #endif return startpos; } if (val == -2) { #ifdef WCHAR FREE_WCS_BUFFERS (); #endif return -2; } advance: if (!range) break; else if (range > 0) { range--; startpos++; } else { range++; startpos--; } } #ifdef WCHAR FREE_WCS_BUFFERS (); #endif return -1; } #ifdef WCHAR /* This converts PTR, a pointer into one of the search wchar_t strings `string1' and `string2' into an multibyte string offset from the beginning of that string. We use mbs_offset to optimize. See convert_mbs_to_wcs. */ # define POINTER_TO_OFFSET(ptr) \ (FIRST_STRING_P (ptr) \ ? ((regoff_t)(mbs_offset1 != NULL? mbs_offset1[(ptr)-string1] : 0)) \ : ((regoff_t)((mbs_offset2 != NULL? mbs_offset2[(ptr)-string2] : 0) \ + csize1))) #else /* BYTE */ /* This converts PTR, a pointer into one of the search strings `string1' and `string2' into an offset from the beginning of that string. */ # define POINTER_TO_OFFSET(ptr) \ (FIRST_STRING_P (ptr) \ ? ((regoff_t) ((ptr) - string1)) \ : ((regoff_t) ((ptr) - string2 + size1))) #endif /* WCHAR */ /* Macros for dealing with the split strings in re_match_2. */ #define MATCHING_IN_FIRST_STRING (dend == end_match_1) /* Call before fetching a character with *d. This switches over to string2 if necessary. */ #define PREFETCH() \ while (d == dend) \ { \ /* End of string2 => fail. */ \ if (dend == end_match_2) \ goto fail; \ /* End of string1 => advance to string2. */ \ d = string2; \ dend = end_match_2; \ } /* Test if at very beginning or at very end of the virtual concatenation of `string1' and `string2'. If only one string, it's `string2'. */ #define AT_STRINGS_BEG(d) ((d) == (size1 ? string1 : string2) || !size2) #define AT_STRINGS_END(d) ((d) == end2) /* Test if D points to a character which is word-constituent. We have two special cases to check for: if past the end of string1, look at the first character in string2; and if before the beginning of string2, look at the last character in string1. */ #ifdef WCHAR /* Use internationalized API instead of SYNTAX. */ # define WORDCHAR_P(d) \ (iswalnum ((wint_t)((d) == end1 ? *string2 \ : (d) == string2 - 1 ? *(end1 - 1) : *(d))) != 0 \ || ((d) == end1 ? *string2 \ : (d) == string2 - 1 ? *(end1 - 1) : *(d)) == L'_') #else /* BYTE */ # define WORDCHAR_P(d) \ (SYNTAX ((d) == end1 ? *string2 \ : (d) == string2 - 1 ? *(end1 - 1) : *(d)) \ == Sword) #endif /* WCHAR */ /* Disabled due to a compiler bug -- see comment at case wordbound */ #if 0 /* Test if the character before D and the one at D differ with respect to being word-constituent. */ #define AT_WORD_BOUNDARY(d) \ (AT_STRINGS_BEG (d) || AT_STRINGS_END (d) \ || WORDCHAR_P (d - 1) != WORDCHAR_P (d)) #endif /* Free everything we malloc. */ #ifdef MATCH_MAY_ALLOCATE # ifdef WCHAR # define FREE_VARIABLES() \ do { \ REGEX_FREE_STACK (fail_stack.stack); \ FREE_VAR (regstart); \ FREE_VAR (regend); \ FREE_VAR (old_regstart); \ FREE_VAR (old_regend); \ FREE_VAR (best_regstart); \ FREE_VAR (best_regend); \ FREE_VAR (reg_info); \ FREE_VAR (reg_dummy); \ FREE_VAR (reg_info_dummy); \ if (!cant_free_wcs_buf) \ { \ FREE_VAR (string1); \ FREE_VAR (string2); \ FREE_VAR (mbs_offset1); \ FREE_VAR (mbs_offset2); \ } \ } while (0) # else /* BYTE */ # define FREE_VARIABLES() \ do { \ REGEX_FREE_STACK (fail_stack.stack); \ FREE_VAR (regstart); \ FREE_VAR (regend); \ FREE_VAR (old_regstart); \ FREE_VAR (old_regend); \ FREE_VAR (best_regstart); \ FREE_VAR (best_regend); \ FREE_VAR (reg_info); \ FREE_VAR (reg_dummy); \ FREE_VAR (reg_info_dummy); \ } while (0) # endif /* WCHAR */ #else # ifdef WCHAR # define FREE_VARIABLES() \ do { \ if (!cant_free_wcs_buf) \ { \ FREE_VAR (string1); \ FREE_VAR (string2); \ FREE_VAR (mbs_offset1); \ FREE_VAR (mbs_offset2); \ } \ } while (0) # else /* BYTE */ # define FREE_VARIABLES() ((void)0) /* Do nothing! But inhibit gcc warning. */ # endif /* WCHAR */ #endif /* not MATCH_MAY_ALLOCATE */ /* These values must meet several constraints. They must not be valid register values; since we have a limit of 255 registers (because we use only one byte in the pattern for the register number), we can use numbers larger than 255. They must differ by 1, because of NUM_FAILURE_ITEMS above. And the value for the lowest register must be larger than the value for the highest register, so we do not try to actually save any registers when none are active. */ #define NO_HIGHEST_ACTIVE_REG (1 << BYTEWIDTH) #define NO_LOWEST_ACTIVE_REG (NO_HIGHEST_ACTIVE_REG + 1) #else /* not INSIDE_RECURSION */ /* Matching routines. */ #ifndef emacs /* Emacs never uses this. */ /* re_match is like re_match_2 except it takes only a single string. */ int re_match (bufp, string, size, pos, regs) struct re_pattern_buffer *bufp; const char *string; int size, pos; struct re_registers *regs; { int result; # ifdef MBS_SUPPORT if (MB_CUR_MAX != 1) result = wcs_re_match_2_internal (bufp, NULL, 0, string, size, pos, regs, size, NULL, 0, NULL, 0, NULL, NULL); else # endif result = byte_re_match_2_internal (bufp, NULL, 0, string, size, pos, regs, size); # ifndef REGEX_MALLOC # ifdef C_ALLOCA alloca (0); # endif # endif return result; } # ifdef _LIBC weak_alias (__re_match, re_match) # endif #endif /* not emacs */ #endif /* not INSIDE_RECURSION */ #ifdef INSIDE_RECURSION static boolean PREFIX(group_match_null_string_p) _RE_ARGS ((UCHAR_T **p, UCHAR_T *end, PREFIX(register_info_type) *reg_info)); static boolean PREFIX(alt_match_null_string_p) _RE_ARGS ((UCHAR_T *p, UCHAR_T *end, PREFIX(register_info_type) *reg_info)); static boolean PREFIX(common_op_match_null_string_p) _RE_ARGS ((UCHAR_T **p, UCHAR_T *end, PREFIX(register_info_type) *reg_info)); static int PREFIX(bcmp_translate) _RE_ARGS ((const CHAR_T *s1, const CHAR_T *s2, int len, char *translate)); #else /* not INSIDE_RECURSION */ /* re_match_2 matches the compiled pattern in BUFP against the the (virtual) concatenation of STRING1 and STRING2 (of length SIZE1 and SIZE2, respectively). We start matching at POS, and stop matching at STOP. If REGS is non-null and the `no_sub' field of BUFP is nonzero, we store offsets for the substring each group matched in REGS. See the documentation for exactly how many groups we fill. We return -1 if no match, -2 if an internal error (such as the failure stack overflowing). Otherwise, we return the length of the matched substring. */ int re_match_2 (bufp, string1, size1, string2, size2, pos, regs, stop) struct re_pattern_buffer *bufp; const char *string1, *string2; int size1, size2; int pos; struct re_registers *regs; int stop; { int result; # ifdef MBS_SUPPORT if (MB_CUR_MAX != 1) result = wcs_re_match_2_internal (bufp, string1, size1, string2, size2, pos, regs, stop, NULL, 0, NULL, 0, NULL, NULL); else # endif result = byte_re_match_2_internal (bufp, string1, size1, string2, size2, pos, regs, stop); #ifndef REGEX_MALLOC # ifdef C_ALLOCA alloca (0); # endif #endif return result; } #ifdef _LIBC weak_alias (__re_match_2, re_match_2) #endif #endif /* not INSIDE_RECURSION */ #ifdef INSIDE_RECURSION #ifdef WCHAR static int count_mbs_length PARAMS ((int *, int)); /* This check the substring (from 0, to length) of the multibyte string, to which offset_buffer correspond. And count how many wchar_t_characters the substring occupy. We use offset_buffer to optimization. See convert_mbs_to_wcs. */ static int count_mbs_length(offset_buffer, length) int *offset_buffer; int length; { int upper, lower; /* Check whether the size is valid. */ if (length < 0) return -1; if (offset_buffer == NULL) return 0; /* If there are no multibyte character, offset_buffer[i] == i. Optmize for this case. */ if (offset_buffer[length] == length) return length; /* Set up upper with length. (because for all i, offset_buffer[i] >= i) */ upper = length; lower = 0; while (true) { int middle = (lower + upper) / 2; if (middle == lower || middle == upper) break; if (offset_buffer[middle] > length) upper = middle; else if (offset_buffer[middle] < length) lower = middle; else return middle; } return -1; } #endif /* WCHAR */ /* This is a separate function so that we can force an alloca cleanup afterwards. */ #ifdef WCHAR static int wcs_re_match_2_internal (bufp, cstring1, csize1, cstring2, csize2, pos, regs, stop, string1, size1, string2, size2, mbs_offset1, mbs_offset2) struct re_pattern_buffer *bufp; const char *cstring1, *cstring2; int csize1, csize2; int pos; struct re_registers *regs; int stop; /* string1 == string2 == NULL means string1/2, size1/2 and mbs_offset1/2 need seting up in this function. */ /* We need wchar_t* buffers correspond to cstring1, cstring2. */ wchar_t *string1, *string2; /* We need the size of wchar_t buffers correspond to csize1, csize2. */ int size1, size2; /* offset buffer for optimizatoin. See convert_mbs_to_wc. */ int *mbs_offset1, *mbs_offset2; #else /* BYTE */ static int byte_re_match_2_internal (bufp, string1, size1,string2, size2, pos, regs, stop) struct re_pattern_buffer *bufp; const char *string1, *string2; int size1, size2; int pos; struct re_registers *regs; int stop; #endif /* BYTE */ { /* General temporaries. */ int mcnt; UCHAR_T *p1; #ifdef WCHAR /* They hold whether each wchar_t is binary data or not. */ char *is_binary = NULL; /* If true, we can't free string1/2, mbs_offset1/2. */ int cant_free_wcs_buf = 1; #endif /* WCHAR */ /* Just past the end of the corresponding string. */ const CHAR_T *end1, *end2; /* Pointers into string1 and string2, just past the last characters in each to consider matching. */ const CHAR_T *end_match_1, *end_match_2; /* Where we are in the data, and the end of the current string. */ const CHAR_T *d, *dend; /* Where we are in the pattern, and the end of the pattern. */ #ifdef WCHAR UCHAR_T *pattern, *p; register UCHAR_T *pend; #else /* BYTE */ UCHAR_T *p = bufp->buffer; register UCHAR_T *pend = p + bufp->used; #endif /* WCHAR */ /* Mark the opcode just after a start_memory, so we can test for an empty subpattern when we get to the stop_memory. */ UCHAR_T *just_past_start_mem = 0; /* We use this to map every character in the string. */ RE_TRANSLATE_TYPE translate = bufp->translate; /* Failure point stack. Each place that can handle a failure further down the line pushes a failure point on this stack. It consists of restart, regend, and reg_info for all registers corresponding to the subexpressions we're currently inside, plus the number of such registers, and, finally, two char *'s. The first char * is where to resume scanning the pattern; the second one is where to resume scanning the strings. If the latter is zero, the failure point is a ``dummy''; if a failure happens and the failure point is a dummy, it gets discarded and the next next one is tried. */ #ifdef MATCH_MAY_ALLOCATE /* otherwise, this is global. */ PREFIX(fail_stack_type) fail_stack; #endif #ifdef DEBUG static unsigned failure_id; unsigned nfailure_points_pushed = 0, nfailure_points_popped = 0; #endif #ifdef REL_ALLOC /* This holds the pointer to the failure stack, when it is allocated relocatably. */ fail_stack_elt_t *failure_stack_ptr; #endif /* We fill all the registers internally, independent of what we return, for use in backreferences. The number here includes an element for register zero. */ size_t num_regs = bufp->re_nsub + 1; /* The currently active registers. */ active_reg_t lowest_active_reg = NO_LOWEST_ACTIVE_REG; active_reg_t highest_active_reg = NO_HIGHEST_ACTIVE_REG; /* Information on the contents of registers. These are pointers into the input strings; they record just what was matched (on this attempt) by a subexpression part of the pattern, that is, the regnum-th regstart pointer points to where in the pattern we began matching and the regnum-th regend points to right after where we stopped matching the regnum-th subexpression. (The zeroth register keeps track of what the whole pattern matches.) */ #ifdef MATCH_MAY_ALLOCATE /* otherwise, these are global. */ const CHAR_T **regstart, **regend; #endif /* If a group that's operated upon by a repetition operator fails to match anything, then the register for its start will need to be restored because it will have been set to wherever in the string we are when we last see its open-group operator. Similarly for a register's end. */ #ifdef MATCH_MAY_ALLOCATE /* otherwise, these are global. */ const CHAR_T **old_regstart, **old_regend; #endif /* The is_active field of reg_info helps us keep track of which (possibly nested) subexpressions we are currently in. The matched_something field of reg_info[reg_num] helps us tell whether or not we have matched any of the pattern so far this time through the reg_num-th subexpression. These two fields get reset each time through any loop their register is in. */ #ifdef MATCH_MAY_ALLOCATE /* otherwise, this is global. */ PREFIX(register_info_type) *reg_info; #endif /* The following record the register info as found in the above variables when we find a match better than any we've seen before. This happens as we backtrack through the failure points, which in turn happens only if we have not yet matched the entire string. */ unsigned best_regs_set = false; #ifdef MATCH_MAY_ALLOCATE /* otherwise, these are global. */ const CHAR_T **best_regstart, **best_regend; #endif /* Logically, this is `best_regend[0]'. But we don't want to have to allocate space for that if we're not allocating space for anything else (see below). Also, we never need info about register 0 for any of the other register vectors, and it seems rather a kludge to treat `best_regend' differently than the rest. So we keep track of the end of the best match so far in a separate variable. We initialize this to NULL so that when we backtrack the first time and need to test it, it's not garbage. */ const CHAR_T *match_end = NULL; /* This helps SET_REGS_MATCHED avoid doing redundant work. */ int set_regs_matched_done = 0; /* Used when we pop values we don't care about. */ #ifdef MATCH_MAY_ALLOCATE /* otherwise, these are global. */ const CHAR_T **reg_dummy; PREFIX(register_info_type) *reg_info_dummy; #endif #ifdef DEBUG /* Counts the total number of registers pushed. */ unsigned num_regs_pushed = 0; #endif DEBUG_PRINT1 ("\n\nEntering re_match_2.\n"); INIT_FAIL_STACK (); #ifdef MATCH_MAY_ALLOCATE /* Do not bother to initialize all the register variables if there are no groups in the pattern, as it takes a fair amount of time. If there are groups, we include space for register 0 (the whole pattern), even though we never use it, since it simplifies the array indexing. We should fix this. */ if (bufp->re_nsub) { regstart = REGEX_TALLOC (num_regs, const CHAR_T *); regend = REGEX_TALLOC (num_regs, const CHAR_T *); old_regstart = REGEX_TALLOC (num_regs, const CHAR_T *); old_regend = REGEX_TALLOC (num_regs, const CHAR_T *); best_regstart = REGEX_TALLOC (num_regs, const CHAR_T *); best_regend = REGEX_TALLOC (num_regs, const CHAR_T *); reg_info = REGEX_TALLOC (num_regs, PREFIX(register_info_type)); reg_dummy = REGEX_TALLOC (num_regs, const CHAR_T *); reg_info_dummy = REGEX_TALLOC (num_regs, PREFIX(register_info_type)); if (!(regstart && regend && old_regstart && old_regend && reg_info && best_regstart && best_regend && reg_dummy && reg_info_dummy)) { FREE_VARIABLES (); return -2; } } else { /* We must initialize all our variables to NULL, so that `FREE_VARIABLES' doesn't try to free them. */ regstart = regend = old_regstart = old_regend = best_regstart = best_regend = reg_dummy = NULL; reg_info = reg_info_dummy = (PREFIX(register_info_type) *) NULL; } #endif /* MATCH_MAY_ALLOCATE */ /* The starting position is bogus. */ #ifdef WCHAR if (pos < 0 || pos > csize1 + csize2) #else /* BYTE */ if (pos < 0 || pos > size1 + size2) #endif { FREE_VARIABLES (); return -1; } #ifdef WCHAR /* Allocate wchar_t array for string1 and string2 and fill them with converted string. */ if (string1 == NULL && string2 == NULL) { /* We need seting up buffers here. */ /* We must free wcs buffers in this function. */ cant_free_wcs_buf = 0; if (csize1 != 0) { string1 = REGEX_TALLOC (csize1 + 1, CHAR_T); mbs_offset1 = REGEX_TALLOC (csize1 + 1, int); is_binary = REGEX_TALLOC (csize1 + 1, char); if (!string1 || !mbs_offset1 || !is_binary) { FREE_VAR (string1); FREE_VAR (mbs_offset1); FREE_VAR (is_binary); return -2; } } if (csize2 != 0) { string2 = REGEX_TALLOC (csize2 + 1, CHAR_T); mbs_offset2 = REGEX_TALLOC (csize2 + 1, int); is_binary = REGEX_TALLOC (csize2 + 1, char); if (!string2 || !mbs_offset2 || !is_binary) { FREE_VAR (string1); FREE_VAR (mbs_offset1); FREE_VAR (string2); FREE_VAR (mbs_offset2); FREE_VAR (is_binary); return -2; } size2 = convert_mbs_to_wcs(string2, cstring2, csize2, mbs_offset2, is_binary); string2[size2] = L'\0'; /* for a sentinel */ FREE_VAR (is_binary); } } /* We need to cast pattern to (wchar_t*), because we casted this compiled pattern to (char*) in regex_compile. */ p = pattern = (CHAR_T*)bufp->buffer; pend = (CHAR_T*)(bufp->buffer + bufp->used); #endif /* WCHAR */ /* Initialize subexpression text positions to -1 to mark ones that no start_memory/stop_memory has been seen for. Also initialize the register information struct. */ for (mcnt = 1; (unsigned) mcnt < num_regs; mcnt++) { regstart[mcnt] = regend[mcnt] = old_regstart[mcnt] = old_regend[mcnt] = REG_UNSET_VALUE; REG_MATCH_NULL_STRING_P (reg_info[mcnt]) = MATCH_NULL_UNSET_VALUE; IS_ACTIVE (reg_info[mcnt]) = 0; MATCHED_SOMETHING (reg_info[mcnt]) = 0; EVER_MATCHED_SOMETHING (reg_info[mcnt]) = 0; } /* We move `string1' into `string2' if the latter's empty -- but not if `string1' is null. */ if (size2 == 0 && string1 != NULL) { string2 = string1; size2 = size1; string1 = 0; size1 = 0; #ifdef WCHAR mbs_offset2 = mbs_offset1; csize2 = csize1; mbs_offset1 = NULL; csize1 = 0; #endif } end1 = string1 + size1; end2 = string2 + size2; /* Compute where to stop matching, within the two strings. */ #ifdef WCHAR if (stop <= csize1) { mcnt = count_mbs_length(mbs_offset1, stop); end_match_1 = string1 + mcnt; end_match_2 = string2; } else { if (stop > csize1 + csize2) stop = csize1 + csize2; end_match_1 = end1; mcnt = count_mbs_length(mbs_offset2, stop-csize1); end_match_2 = string2 + mcnt; } if (mcnt < 0) { /* count_mbs_length return error. */ FREE_VARIABLES (); return -1; } #else if (stop <= size1) { end_match_1 = string1 + stop; end_match_2 = string2; } else { end_match_1 = end1; end_match_2 = string2 + stop - size1; } #endif /* WCHAR */ /* `p' scans through the pattern as `d' scans through the data. `dend' is the end of the input string that `d' points within. `d' is advanced into the following input string whenever necessary, but this happens before fetching; therefore, at the beginning of the loop, `d' can be pointing at the end of a string, but it cannot equal `string2'. */ #ifdef WCHAR if (size1 > 0 && pos <= csize1) { mcnt = count_mbs_length(mbs_offset1, pos); d = string1 + mcnt; dend = end_match_1; } else { mcnt = count_mbs_length(mbs_offset2, pos-csize1); d = string2 + mcnt; dend = end_match_2; } if (mcnt < 0) { /* count_mbs_length return error. */ FREE_VARIABLES (); return -1; } #else if (size1 > 0 && pos <= size1) { d = string1 + pos; dend = end_match_1; } else { d = string2 + pos - size1; dend = end_match_2; } #endif /* WCHAR */ DEBUG_PRINT1 ("The compiled pattern is:\n"); DEBUG_PRINT_COMPILED_PATTERN (bufp, p, pend); DEBUG_PRINT1 ("The string to match is: `"); DEBUG_PRINT_DOUBLE_STRING (d, string1, size1, string2, size2); DEBUG_PRINT1 ("'\n"); /* This loops over pattern commands. It exits by returning from the function if the match is complete, or it drops through if the match fails at this starting point in the input data. */ for (;;) { #ifdef _LIBC DEBUG_PRINT2 ("\n%p: ", p); #else DEBUG_PRINT2 ("\n0x%x: ", p); #endif if (p == pend) { /* End of pattern means we might have succeeded. */ DEBUG_PRINT1 ("end of pattern ... "); /* If we haven't matched the entire string, and we want the longest match, try backtracking. */ if (d != end_match_2) { /* 1 if this match ends in the same string (string1 or string2) as the best previous match. */ boolean same_str_p = (FIRST_STRING_P (match_end) == MATCHING_IN_FIRST_STRING); /* 1 if this match is the best seen so far. */ boolean best_match_p; /* AIX compiler got confused when this was combined with the previous declaration. */ if (same_str_p) best_match_p = d > match_end; else best_match_p = !MATCHING_IN_FIRST_STRING; DEBUG_PRINT1 ("backtracking.\n"); if (!FAIL_STACK_EMPTY ()) { /* More failure points to try. */ /* If exceeds best match so far, save it. */ if (!best_regs_set || best_match_p) { best_regs_set = true; match_end = d; DEBUG_PRINT1 ("\nSAVING match as best so far.\n"); for (mcnt = 1; (unsigned) mcnt < num_regs; mcnt++) { best_regstart[mcnt] = regstart[mcnt]; best_regend[mcnt] = regend[mcnt]; } } goto fail; } /* If no failure points, don't restore garbage. And if last match is real best match, don't restore second best one. */ else if (best_regs_set && !best_match_p) { restore_best_regs: /* Restore best match. It may happen that `dend == end_match_1' while the restored d is in string2. For example, the pattern `x.*y.*z' against the strings `x-' and `y-z-', if the two strings are not consecutive in memory. */ DEBUG_PRINT1 ("Restoring best registers.\n"); d = match_end; dend = ((d >= string1 && d <= end1) ? end_match_1 : end_match_2); for (mcnt = 1; (unsigned) mcnt < num_regs; mcnt++) { regstart[mcnt] = best_regstart[mcnt]; regend[mcnt] = best_regend[mcnt]; } } } /* d != end_match_2 */ succeed_label: DEBUG_PRINT1 ("Accepting match.\n"); /* If caller wants register contents data back, do it. */ if (regs && !bufp->no_sub) { /* Have the register data arrays been allocated? */ if (bufp->regs_allocated == REGS_UNALLOCATED) { /* No. So allocate them with malloc. We need one extra element beyond `num_regs' for the `-1' marker GNU code uses. */ regs->num_regs = MAX (RE_NREGS, num_regs + 1); regs->start = TALLOC (regs->num_regs, regoff_t); regs->end = TALLOC (regs->num_regs, regoff_t); if (regs->start == NULL || regs->end == NULL) { FREE_VARIABLES (); return -2; } bufp->regs_allocated = REGS_REALLOCATE; } else if (bufp->regs_allocated == REGS_REALLOCATE) { /* Yes. If we need more elements than were already allocated, reallocate them. If we need fewer, just leave it alone. */ if (regs->num_regs < num_regs + 1) { regs->num_regs = num_regs + 1; RETALLOC (regs->start, regs->num_regs, regoff_t); RETALLOC (regs->end, regs->num_regs, regoff_t); if (regs->start == NULL || regs->end == NULL) { FREE_VARIABLES (); return -2; } } } else { /* These braces fend off a "empty body in an else-statement" warning under GCC when assert expands to nothing. */ assert (bufp->regs_allocated == REGS_FIXED); } /* Convert the pointer data in `regstart' and `regend' to indices. Register zero has to be set differently, since we haven't kept track of any info for it. */ if (regs->num_regs > 0) { regs->start[0] = pos; #ifdef WCHAR if (MATCHING_IN_FIRST_STRING) regs->end[0] = mbs_offset1 != NULL ? mbs_offset1[d-string1] : 0; else regs->end[0] = csize1 + (mbs_offset2 != NULL ? mbs_offset2[d-string2] : 0); #else regs->end[0] = (MATCHING_IN_FIRST_STRING ? ((regoff_t) (d - string1)) : ((regoff_t) (d - string2 + size1))); #endif /* WCHAR */ } /* Go through the first `min (num_regs, regs->num_regs)' registers, since that is all we initialized. */ for (mcnt = 1; (unsigned) mcnt < MIN (num_regs, regs->num_regs); mcnt++) { if (REG_UNSET (regstart[mcnt]) || REG_UNSET (regend[mcnt])) regs->start[mcnt] = regs->end[mcnt] = -1; else { regs->start[mcnt] = (regoff_t) POINTER_TO_OFFSET (regstart[mcnt]); regs->end[mcnt] = (regoff_t) POINTER_TO_OFFSET (regend[mcnt]); } } /* If the regs structure we return has more elements than were in the pattern, set the extra elements to -1. If we (re)allocated the registers, this is the case, because we always allocate enough to have at least one -1 at the end. */ for (mcnt = num_regs; (unsigned) mcnt < regs->num_regs; mcnt++) regs->start[mcnt] = regs->end[mcnt] = -1; } /* regs && !bufp->no_sub */ DEBUG_PRINT4 ("%u failure points pushed, %u popped (%u remain).\n", nfailure_points_pushed, nfailure_points_popped, nfailure_points_pushed - nfailure_points_popped); DEBUG_PRINT2 ("%u registers pushed.\n", num_regs_pushed); #ifdef WCHAR if (MATCHING_IN_FIRST_STRING) mcnt = mbs_offset1 != NULL ? mbs_offset1[d-string1] : 0; else mcnt = (mbs_offset2 != NULL ? mbs_offset2[d-string2] : 0) + csize1; mcnt -= pos; #else mcnt = d - pos - (MATCHING_IN_FIRST_STRING ? string1 : string2 - size1); #endif /* WCHAR */ DEBUG_PRINT2 ("Returning %d from re_match_2.\n", mcnt); FREE_VARIABLES (); return mcnt; } /* Otherwise match next pattern command. */ switch (SWITCH_ENUM_CAST ((re_opcode_t) *p++)) { /* Ignore these. Used to ignore the n of succeed_n's which currently have n == 0. */ case no_op: DEBUG_PRINT1 ("EXECUTING no_op.\n"); break; case succeed: DEBUG_PRINT1 ("EXECUTING succeed.\n"); goto succeed_label; /* Match the next n pattern characters exactly. The following byte in the pattern defines n, and the n bytes after that are the characters to match. */ case exactn: #ifdef MBS_SUPPORT case exactn_bin: #endif mcnt = *p++; DEBUG_PRINT2 ("EXECUTING exactn %d.\n", mcnt); /* This is written out as an if-else so we don't waste time testing `translate' inside the loop. */ if (translate) { do { PREFETCH (); #ifdef WCHAR if (*d <= 0xff) { if ((UCHAR_T) translate[(unsigned char) *d++] != (UCHAR_T) *p++) goto fail; } else { if (*d++ != (CHAR_T) *p++) goto fail; } #else if ((UCHAR_T) translate[(unsigned char) *d++] != (UCHAR_T) *p++) goto fail; #endif /* WCHAR */ } while (--mcnt); } else { do { PREFETCH (); if (*d++ != (CHAR_T) *p++) goto fail; } while (--mcnt); } SET_REGS_MATCHED (); break; /* Match any character except possibly a newline or a null. */ case anychar: DEBUG_PRINT1 ("EXECUTING anychar.\n"); PREFETCH (); if ((!(bufp->syntax & RE_DOT_NEWLINE) && TRANSLATE (*d) == '\n') || (bufp->syntax & RE_DOT_NOT_NULL && TRANSLATE (*d) == '\000')) goto fail; SET_REGS_MATCHED (); DEBUG_PRINT2 (" Matched `%ld'.\n", (long int) *d); d++; break; case charset: case charset_not: { register UCHAR_T c; #ifdef WCHAR unsigned int i, char_class_length, coll_symbol_length, equiv_class_length, ranges_length, chars_length, length; CHAR_T *workp, *workp2, *charset_top; #define WORK_BUFFER_SIZE 128 CHAR_T str_buf[WORK_BUFFER_SIZE]; # ifdef _LIBC uint32_t nrules; # endif /* _LIBC */ #endif /* WCHAR */ boolean not = (re_opcode_t) *(p - 1) == charset_not; DEBUG_PRINT2 ("EXECUTING charset%s.\n", not ? "_not" : ""); PREFETCH (); c = TRANSLATE (*d); /* The character to match. */ #ifdef WCHAR # ifdef _LIBC nrules = _NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_NRULES); # endif /* _LIBC */ charset_top = p - 1; char_class_length = *p++; coll_symbol_length = *p++; equiv_class_length = *p++; ranges_length = *p++; chars_length = *p++; /* p points charset[6], so the address of the next instruction (charset[l+m+n+2o+k+p']) equals p[l+m+n+2*o+p'], where l=length of char_classes, m=length of collating_symbol, n=equivalence_class, o=length of char_range, p'=length of character. */ workp = p; /* Update p to indicate the next instruction. */ p += char_class_length + coll_symbol_length+ equiv_class_length + 2*ranges_length + chars_length; /* match with char_class? */ for (i = 0; i < char_class_length ; i += CHAR_CLASS_SIZE) { wctype_t wctype; uintptr_t alignedp = ((uintptr_t)workp + __alignof__(wctype_t) - 1) & ~(uintptr_t)(__alignof__(wctype_t) - 1); wctype = *((wctype_t*)alignedp); workp += CHAR_CLASS_SIZE; # ifdef _LIBC if (__iswctype((wint_t)c, wctype)) goto char_set_matched; # else if (iswctype((wint_t)c, wctype)) goto char_set_matched; # endif } /* match with collating_symbol? */ # ifdef _LIBC if (nrules != 0) { const unsigned char *extra = (const unsigned char *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_SYMB_EXTRAMB); for (workp2 = workp + coll_symbol_length ; workp < workp2 ; workp++) { int32_t *wextra; wextra = (int32_t*)(extra + *workp++); for (i = 0; i < *wextra; ++i) if (TRANSLATE(d[i]) != wextra[1 + i]) break; if (i == *wextra) { /* Update d, however d will be incremented at char_set_matched:, we decrement d here. */ d += i - 1; goto char_set_matched; } } } else /* (nrules == 0) */ # endif /* If we can't look up collation data, we use wcscoll instead. */ { for (workp2 = workp + coll_symbol_length ; workp < workp2 ;) { const CHAR_T *backup_d = d, *backup_dend = dend; # ifdef _LIBC length = __wcslen (workp); # else length = wcslen (workp); # endif /* If wcscoll(the collating symbol, whole string) > 0, any substring of the string never match with the collating symbol. */ # ifdef _LIBC if (__wcscoll (workp, d) > 0) # else if (wcscoll (workp, d) > 0) # endif { workp += length + 1; continue; } /* First, we compare the collating symbol with the first character of the string. If it don't match, we add the next character to the compare buffer in turn. */ for (i = 0 ; i < WORK_BUFFER_SIZE-1 ; i++, d++) { int match; if (d == dend) { if (dend == end_match_2) break; d = string2; dend = end_match_2; } /* add next character to the compare buffer. */ str_buf[i] = TRANSLATE(*d); str_buf[i+1] = '\0'; # ifdef _LIBC match = __wcscoll (workp, str_buf); # else match = wcscoll (workp, str_buf); # endif if (match == 0) goto char_set_matched; if (match < 0) /* (str_buf > workp) indicate (str_buf + X > workp), because for all X (str_buf + X > str_buf). So we don't need continue this loop. */ break; /* Otherwise(str_buf < workp), (str_buf+next_character) may equals (workp). So we continue this loop. */ } /* not matched */ d = backup_d; dend = backup_dend; workp += length + 1; } } /* match with equivalence_class? */ # ifdef _LIBC if (nrules != 0) { const CHAR_T *backup_d = d, *backup_dend = dend; /* Try to match the equivalence class against those known to the collate implementation. */ const int32_t *table; const int32_t *weights; const int32_t *extra; const int32_t *indirect; int32_t idx, idx2; wint_t *cp; size_t len; /* This #include defines a local function! */ table = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_TABLEWC); weights = (const wint_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_WEIGHTWC); extra = (const wint_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_EXTRAWC); indirect = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_INDIRECTWC); /* Write 1 collating element to str_buf, and get its index. */ idx2 = 0; for (i = 0 ; idx2 == 0 && i < WORK_BUFFER_SIZE - 1; i++) { cp = (wint_t*)str_buf; if (d == dend) { if (dend == end_match_2) break; d = string2; dend = end_match_2; } str_buf[i] = TRANSLATE(*(d+i)); str_buf[i+1] = '\0'; /* sentinel */ idx2 = findidx ((const wint_t**)&cp); } /* Update d, however d will be incremented at char_set_matched:, we decrement d here. */ d = backup_d + ((wchar_t*)cp - (wchar_t*)str_buf - 1); if (d >= dend) { if (dend == end_match_2) d = dend; else { d = string2; dend = end_match_2; } } len = weights[idx2]; for (workp2 = workp + equiv_class_length ; workp < workp2 ; workp++) { idx = (int32_t)*workp; /* We already checked idx != 0 in regex_compile. */ if (idx2 != 0 && len == weights[idx]) { int cnt = 0; while (cnt < len && (weights[idx + 1 + cnt] == weights[idx2 + 1 + cnt])) ++cnt; if (cnt == len) goto char_set_matched; } } /* not matched */ d = backup_d; dend = backup_dend; } else /* (nrules == 0) */ # endif /* If we can't look up collation data, we use wcscoll instead. */ { for (workp2 = workp + equiv_class_length ; workp < workp2 ;) { const CHAR_T *backup_d = d, *backup_dend = dend; # ifdef _LIBC length = __wcslen (workp); # else length = wcslen (workp); # endif /* If wcscoll(the collating symbol, whole string) > 0, any substring of the string never match with the collating symbol. */ # ifdef _LIBC if (__wcscoll (workp, d) > 0) # else if (wcscoll (workp, d) > 0) # endif { workp += length + 1; break; } /* First, we compare the equivalence class with the first character of the string. If it don't match, we add the next character to the compare buffer in turn. */ for (i = 0 ; i < WORK_BUFFER_SIZE - 1 ; i++, d++) { int match; if (d == dend) { if (dend == end_match_2) break; d = string2; dend = end_match_2; } /* add next character to the compare buffer. */ str_buf[i] = TRANSLATE(*d); str_buf[i+1] = '\0'; # ifdef _LIBC match = __wcscoll (workp, str_buf); # else match = wcscoll (workp, str_buf); # endif if (match == 0) goto char_set_matched; if (match < 0) /* (str_buf > workp) indicate (str_buf + X > workp), because for all X (str_buf + X > str_buf). So we don't need continue this loop. */ break; /* Otherwise(str_buf < workp), (str_buf+next_character) may equals (workp). So we continue this loop. */ } /* not matched */ d = backup_d; dend = backup_dend; workp += length + 1; } } /* match with char_range? */ # ifdef _LIBC if (nrules != 0) { uint32_t collseqval; const char *collseq = (const char *) _NL_CURRENT(LC_COLLATE, _NL_COLLATE_COLLSEQWC); collseqval = collseq_table_lookup (collseq, c); for (; workp < p - chars_length ;) { uint32_t start_val, end_val; /* We already compute the collation sequence value of the characters (or collating symbols). */ start_val = (uint32_t) *workp++; /* range_start */ end_val = (uint32_t) *workp++; /* range_end */ if (start_val <= collseqval && collseqval <= end_val) goto char_set_matched; } } else # endif { /* We set range_start_char at str_buf[0], range_end_char at str_buf[4], and compared char at str_buf[2]. */ str_buf[1] = 0; str_buf[2] = c; str_buf[3] = 0; str_buf[5] = 0; for (; workp < p - chars_length ;) { wchar_t *range_start_char, *range_end_char; /* match if (range_start_char <= c <= range_end_char). */ /* If range_start(or end) < 0, we assume -range_start(end) is the offset of the collating symbol which is specified as the character of the range start(end). */ /* range_start */ if (*workp < 0) range_start_char = charset_top - (*workp++); else { str_buf[0] = *workp++; range_start_char = str_buf; } /* range_end */ if (*workp < 0) range_end_char = charset_top - (*workp++); else { str_buf[4] = *workp++; range_end_char = str_buf + 4; } # ifdef _LIBC if (__wcscoll (range_start_char, str_buf+2) <= 0 && __wcscoll (str_buf+2, range_end_char) <= 0) # else if (wcscoll (range_start_char, str_buf+2) <= 0 && wcscoll (str_buf+2, range_end_char) <= 0) # endif goto char_set_matched; } } /* match with char? */ for (; workp < p ; workp++) if (c == *workp) goto char_set_matched; not = !not; char_set_matched: if (not) goto fail; #else /* Cast to `unsigned' instead of `unsigned char' in case the bit list is a full 32 bytes long. */ if (c < (unsigned) (*p * BYTEWIDTH) && p[1 + c / BYTEWIDTH] & (1 << (c % BYTEWIDTH))) not = !not; p += 1 + *p; if (!not) goto fail; #undef WORK_BUFFER_SIZE #endif /* WCHAR */ SET_REGS_MATCHED (); d++; break; } /* The beginning of a group is represented by start_memory. The arguments are the register number in the next byte, and the number of groups inner to this one in the next. The text matched within the group is recorded (in the internal registers data structure) under the register number. */ case start_memory: DEBUG_PRINT3 ("EXECUTING start_memory %ld (%ld):\n", (long int) *p, (long int) p[1]); /* Find out if this group can match the empty string. */ p1 = p; /* To send to group_match_null_string_p. */ if (REG_MATCH_NULL_STRING_P (reg_info[*p]) == MATCH_NULL_UNSET_VALUE) REG_MATCH_NULL_STRING_P (reg_info[*p]) = PREFIX(group_match_null_string_p) (&p1, pend, reg_info); /* Save the position in the string where we were the last time we were at this open-group operator in case the group is operated upon by a repetition operator, e.g., with `(a*)*b' against `ab'; then we want to ignore where we are now in the string in case this attempt to match fails. */ old_regstart[*p] = REG_MATCH_NULL_STRING_P (reg_info[*p]) ? REG_UNSET (regstart[*p]) ? d : regstart[*p] : regstart[*p]; DEBUG_PRINT2 (" old_regstart: %d\n", POINTER_TO_OFFSET (old_regstart[*p])); regstart[*p] = d; DEBUG_PRINT2 (" regstart: %d\n", POINTER_TO_OFFSET (regstart[*p])); IS_ACTIVE (reg_info[*p]) = 1; MATCHED_SOMETHING (reg_info[*p]) = 0; /* Clear this whenever we change the register activity status. */ set_regs_matched_done = 0; /* This is the new highest active register. */ highest_active_reg = *p; /* If nothing was active before, this is the new lowest active register. */ if (lowest_active_reg == NO_LOWEST_ACTIVE_REG) lowest_active_reg = *p; /* Move past the register number and inner group count. */ p += 2; just_past_start_mem = p; break; /* The stop_memory opcode represents the end of a group. Its arguments are the same as start_memory's: the register number, and the number of inner groups. */ case stop_memory: DEBUG_PRINT3 ("EXECUTING stop_memory %ld (%ld):\n", (long int) *p, (long int) p[1]); /* We need to save the string position the last time we were at this close-group operator in case the group is operated upon by a repetition operator, e.g., with `((a*)*(b*)*)*' against `aba'; then we want to ignore where we are now in the string in case this attempt to match fails. */ old_regend[*p] = REG_MATCH_NULL_STRING_P (reg_info[*p]) ? REG_UNSET (regend[*p]) ? d : regend[*p] : regend[*p]; DEBUG_PRINT2 (" old_regend: %d\n", POINTER_TO_OFFSET (old_regend[*p])); regend[*p] = d; DEBUG_PRINT2 (" regend: %d\n", POINTER_TO_OFFSET (regend[*p])); /* This register isn't active anymore. */ IS_ACTIVE (reg_info[*p]) = 0; /* Clear this whenever we change the register activity status. */ set_regs_matched_done = 0; /* If this was the only register active, nothing is active anymore. */ if (lowest_active_reg == highest_active_reg) { lowest_active_reg = NO_LOWEST_ACTIVE_REG; highest_active_reg = NO_HIGHEST_ACTIVE_REG; } else { /* We must scan for the new highest active register, since it isn't necessarily one less than now: consider (a(b)c(d(e)f)g). When group 3 ends, after the f), the new highest active register is 1. */ UCHAR_T r = *p - 1; while (r > 0 && !IS_ACTIVE (reg_info[r])) r--; /* If we end up at register zero, that means that we saved the registers as the result of an `on_failure_jump', not a `start_memory', and we jumped to past the innermost `stop_memory'. For example, in ((.)*) we save registers 1 and 2 as a result of the *, but when we pop back to the second ), we are at the stop_memory 1. Thus, nothing is active. */ if (r == 0) { lowest_active_reg = NO_LOWEST_ACTIVE_REG; highest_active_reg = NO_HIGHEST_ACTIVE_REG; } else highest_active_reg = r; } /* If just failed to match something this time around with a group that's operated on by a repetition operator, try to force exit from the ``loop'', and restore the register information for this group that we had before trying this last match. */ if ((!MATCHED_SOMETHING (reg_info[*p]) || just_past_start_mem == p - 1) && (p + 2) < pend) { boolean is_a_jump_n = false; p1 = p + 2; mcnt = 0; switch ((re_opcode_t) *p1++) { case jump_n: is_a_jump_n = true; case pop_failure_jump: case maybe_pop_jump: case jump: case dummy_failure_jump: EXTRACT_NUMBER_AND_INCR (mcnt, p1); if (is_a_jump_n) p1 += OFFSET_ADDRESS_SIZE; break; default: /* do nothing */ ; } p1 += mcnt; /* If the next operation is a jump backwards in the pattern to an on_failure_jump right before the start_memory corresponding to this stop_memory, exit from the loop by forcing a failure after pushing on the stack the on_failure_jump's jump in the pattern, and d. */ if (mcnt < 0 && (re_opcode_t) *p1 == on_failure_jump && (re_opcode_t) p1[1+OFFSET_ADDRESS_SIZE] == start_memory && p1[2+OFFSET_ADDRESS_SIZE] == *p) { /* If this group ever matched anything, then restore what its registers were before trying this last failed match, e.g., with `(a*)*b' against `ab' for regstart[1], and, e.g., with `((a*)*(b*)*)*' against `aba' for regend[3]. Also restore the registers for inner groups for, e.g., `((a*)(b*))*' against `aba' (register 3 would otherwise get trashed). */ if (EVER_MATCHED_SOMETHING (reg_info[*p])) { unsigned r; EVER_MATCHED_SOMETHING (reg_info[*p]) = 0; /* Restore this and inner groups' (if any) registers. */ for (r = *p; r < (unsigned) *p + (unsigned) *(p + 1); r++) { regstart[r] = old_regstart[r]; /* xx why this test? */ if (old_regend[r] >= regstart[r]) regend[r] = old_regend[r]; } } p1++; EXTRACT_NUMBER_AND_INCR (mcnt, p1); PUSH_FAILURE_POINT (p1 + mcnt, d, -2); goto fail; } } /* Move past the register number and the inner group count. */ p += 2; break; /* \ has been turned into a `duplicate' command which is followed by the numeric value of as the register number. */ case duplicate: { register const CHAR_T *d2, *dend2; int regno = *p++; /* Get which register to match against. */ DEBUG_PRINT2 ("EXECUTING duplicate %d.\n", regno); /* Can't back reference a group which we've never matched. */ if (REG_UNSET (regstart[regno]) || REG_UNSET (regend[regno])) goto fail; /* Where in input to try to start matching. */ d2 = regstart[regno]; /* Where to stop matching; if both the place to start and the place to stop matching are in the same string, then set to the place to stop, otherwise, for now have to use the end of the first string. */ dend2 = ((FIRST_STRING_P (regstart[regno]) == FIRST_STRING_P (regend[regno])) ? regend[regno] : end_match_1); for (;;) { /* If necessary, advance to next segment in register contents. */ while (d2 == dend2) { if (dend2 == end_match_2) break; if (dend2 == regend[regno]) break; /* End of string1 => advance to string2. */ d2 = string2; dend2 = regend[regno]; } /* At end of register contents => success */ if (d2 == dend2) break; /* If necessary, advance to next segment in data. */ PREFETCH (); /* How many characters left in this segment to match. */ mcnt = dend - d; /* Want how many consecutive characters we can match in one shot, so, if necessary, adjust the count. */ if (mcnt > dend2 - d2) mcnt = dend2 - d2; /* Compare that many; failure if mismatch, else move past them. */ if (translate ? PREFIX(bcmp_translate) (d, d2, mcnt, translate) : memcmp (d, d2, mcnt*sizeof(UCHAR_T))) goto fail; d += mcnt, d2 += mcnt; /* Do this because we've match some characters. */ SET_REGS_MATCHED (); } } break; /* begline matches the empty string at the beginning of the string (unless `not_bol' is set in `bufp'), and, if `newline_anchor' is set, after newlines. */ case begline: DEBUG_PRINT1 ("EXECUTING begline.\n"); if (AT_STRINGS_BEG (d)) { if (!bufp->not_bol) break; } else if (d[-1] == '\n' && bufp->newline_anchor) { break; } /* In all other cases, we fail. */ goto fail; /* endline is the dual of begline. */ case endline: DEBUG_PRINT1 ("EXECUTING endline.\n"); if (AT_STRINGS_END (d)) { if (!bufp->not_eol) break; } /* We have to ``prefetch'' the next character. */ else if ((d == end1 ? *string2 : *d) == '\n' && bufp->newline_anchor) { break; } goto fail; /* Match at the very beginning of the data. */ case begbuf: DEBUG_PRINT1 ("EXECUTING begbuf.\n"); if (AT_STRINGS_BEG (d)) break; goto fail; /* Match at the very end of the data. */ case endbuf: DEBUG_PRINT1 ("EXECUTING endbuf.\n"); if (AT_STRINGS_END (d)) break; goto fail; /* on_failure_keep_string_jump is used to optimize `.*\n'. It pushes NULL as the value for the string on the stack. Then `pop_failure_point' will keep the current value for the string, instead of restoring it. To see why, consider matching `foo\nbar' against `.*\n'. The .* matches the foo; then the . fails against the \n. But the next thing we want to do is match the \n against the \n; if we restored the string value, we would be back at the foo. Because this is used only in specific cases, we don't need to check all the things that `on_failure_jump' does, to make sure the right things get saved on the stack. Hence we don't share its code. The only reason to push anything on the stack at all is that otherwise we would have to change `anychar's code to do something besides goto fail in this case; that seems worse than this. */ case on_failure_keep_string_jump: DEBUG_PRINT1 ("EXECUTING on_failure_keep_string_jump"); EXTRACT_NUMBER_AND_INCR (mcnt, p); #ifdef _LIBC DEBUG_PRINT3 (" %d (to %p):\n", mcnt, p + mcnt); #else DEBUG_PRINT3 (" %d (to 0x%x):\n", mcnt, p + mcnt); #endif PUSH_FAILURE_POINT (p + mcnt, NULL, -2); break; /* Uses of on_failure_jump: Each alternative starts with an on_failure_jump that points to the beginning of the next alternative. Each alternative except the last ends with a jump that in effect jumps past the rest of the alternatives. (They really jump to the ending jump of the following alternative, because tensioning these jumps is a hassle.) Repeats start with an on_failure_jump that points past both the repetition text and either the following jump or pop_failure_jump back to this on_failure_jump. */ case on_failure_jump: on_failure: DEBUG_PRINT1 ("EXECUTING on_failure_jump"); EXTRACT_NUMBER_AND_INCR (mcnt, p); #ifdef _LIBC DEBUG_PRINT3 (" %d (to %p)", mcnt, p + mcnt); #else DEBUG_PRINT3 (" %d (to 0x%x)", mcnt, p + mcnt); #endif /* If this on_failure_jump comes right before a group (i.e., the original * applied to a group), save the information for that group and all inner ones, so that if we fail back to this point, the group's information will be correct. For example, in \(a*\)*\1, we need the preceding group, and in \(zz\(a*\)b*\)\2, we need the inner group. */ /* We can't use `p' to check ahead because we push a failure point to `p + mcnt' after we do this. */ p1 = p; /* We need to skip no_op's before we look for the start_memory in case this on_failure_jump is happening as the result of a completed succeed_n, as in \(a\)\{1,3\}b\1 against aba. */ while (p1 < pend && (re_opcode_t) *p1 == no_op) p1++; if (p1 < pend && (re_opcode_t) *p1 == start_memory) { /* We have a new highest active register now. This will get reset at the start_memory we are about to get to, but we will have saved all the registers relevant to this repetition op, as described above. */ highest_active_reg = *(p1 + 1) + *(p1 + 2); if (lowest_active_reg == NO_LOWEST_ACTIVE_REG) lowest_active_reg = *(p1 + 1); } DEBUG_PRINT1 (":\n"); PUSH_FAILURE_POINT (p + mcnt, d, -2); break; /* A smart repeat ends with `maybe_pop_jump'. We change it to either `pop_failure_jump' or `jump'. */ case maybe_pop_jump: EXTRACT_NUMBER_AND_INCR (mcnt, p); DEBUG_PRINT2 ("EXECUTING maybe_pop_jump %d.\n", mcnt); { register UCHAR_T *p2 = p; /* Compare the beginning of the repeat with what in the pattern follows its end. If we can establish that there is nothing that they would both match, i.e., that we would have to backtrack because of (as in, e.g., `a*a') then we can change to pop_failure_jump, because we'll never have to backtrack. This is not true in the case of alternatives: in `(a|ab)*' we do need to backtrack to the `ab' alternative (e.g., if the string was `ab'). But instead of trying to detect that here, the alternative has put on a dummy failure point which is what we will end up popping. */ /* Skip over open/close-group commands. If what follows this loop is a ...+ construct, look at what begins its body, since we will have to match at least one of that. */ while (1) { if (p2 + 2 < pend && ((re_opcode_t) *p2 == stop_memory || (re_opcode_t) *p2 == start_memory)) p2 += 3; else if (p2 + 2 + 2 * OFFSET_ADDRESS_SIZE < pend && (re_opcode_t) *p2 == dummy_failure_jump) p2 += 2 + 2 * OFFSET_ADDRESS_SIZE; else break; } p1 = p + mcnt; /* p1[0] ... p1[2] are the `on_failure_jump' corresponding to the `maybe_finalize_jump' of this case. Examine what follows. */ /* If we're at the end of the pattern, we can change. */ if (p2 == pend) { /* Consider what happens when matching ":\(.*\)" against ":/". I don't really understand this code yet. */ p[-(1+OFFSET_ADDRESS_SIZE)] = (UCHAR_T) pop_failure_jump; DEBUG_PRINT1 (" End of pattern: change to `pop_failure_jump'.\n"); } else if ((re_opcode_t) *p2 == exactn #ifdef MBS_SUPPORT || (re_opcode_t) *p2 == exactn_bin #endif || (bufp->newline_anchor && (re_opcode_t) *p2 == endline)) { register UCHAR_T c = *p2 == (UCHAR_T) endline ? '\n' : p2[2]; if (((re_opcode_t) p1[1+OFFSET_ADDRESS_SIZE] == exactn #ifdef MBS_SUPPORT || (re_opcode_t) p1[1+OFFSET_ADDRESS_SIZE] == exactn_bin #endif ) && p1[3+OFFSET_ADDRESS_SIZE] != c) { p[-(1+OFFSET_ADDRESS_SIZE)] = (UCHAR_T) pop_failure_jump; #ifdef WCHAR DEBUG_PRINT3 (" %C != %C => pop_failure_jump.\n", (wint_t) c, (wint_t) p1[3+OFFSET_ADDRESS_SIZE]); #else DEBUG_PRINT3 (" %c != %c => pop_failure_jump.\n", (char) c, (char) p1[3+OFFSET_ADDRESS_SIZE]); #endif } #ifndef WCHAR else if ((re_opcode_t) p1[3] == charset || (re_opcode_t) p1[3] == charset_not) { int not = (re_opcode_t) p1[3] == charset_not; if (c < (unsigned) (p1[4] * BYTEWIDTH) && p1[5 + c / BYTEWIDTH] & (1 << (c % BYTEWIDTH))) not = !not; /* `not' is equal to 1 if c would match, which means that we can't change to pop_failure_jump. */ if (!not) { p[-3] = (unsigned char) pop_failure_jump; DEBUG_PRINT1 (" No match => pop_failure_jump.\n"); } } #endif /* not WCHAR */ } #ifndef WCHAR else if ((re_opcode_t) *p2 == charset) { /* We win if the first character of the loop is not part of the charset. */ if ((re_opcode_t) p1[3] == exactn && ! ((int) p2[1] * BYTEWIDTH > (int) p1[5] && (p2[2 + p1[5] / BYTEWIDTH] & (1 << (p1[5] % BYTEWIDTH))))) { p[-3] = (unsigned char) pop_failure_jump; DEBUG_PRINT1 (" No match => pop_failure_jump.\n"); } else if ((re_opcode_t) p1[3] == charset_not) { int idx; /* We win if the charset_not inside the loop lists every character listed in the charset after. */ for (idx = 0; idx < (int) p2[1]; idx++) if (! (p2[2 + idx] == 0 || (idx < (int) p1[4] && ((p2[2 + idx] & ~ p1[5 + idx]) == 0)))) break; if (idx == p2[1]) { p[-3] = (unsigned char) pop_failure_jump; DEBUG_PRINT1 (" No match => pop_failure_jump.\n"); } } else if ((re_opcode_t) p1[3] == charset) { int idx; /* We win if the charset inside the loop has no overlap with the one after the loop. */ for (idx = 0; idx < (int) p2[1] && idx < (int) p1[4]; idx++) if ((p2[2 + idx] & p1[5 + idx]) != 0) break; if (idx == p2[1] || idx == p1[4]) { p[-3] = (unsigned char) pop_failure_jump; DEBUG_PRINT1 (" No match => pop_failure_jump.\n"); } } } #endif /* not WCHAR */ } p -= OFFSET_ADDRESS_SIZE; /* Point at relative address again. */ if ((re_opcode_t) p[-1] != pop_failure_jump) { p[-1] = (UCHAR_T) jump; DEBUG_PRINT1 (" Match => jump.\n"); goto unconditional_jump; } /* Note fall through. */ /* The end of a simple repeat has a pop_failure_jump back to its matching on_failure_jump, where the latter will push a failure point. The pop_failure_jump takes off failure points put on by this pop_failure_jump's matching on_failure_jump; we got through the pattern to here from the matching on_failure_jump, so didn't fail. */ case pop_failure_jump: { /* We need to pass separate storage for the lowest and highest registers, even though we don't care about the actual values. Otherwise, we will restore only one register from the stack, since lowest will == highest in `pop_failure_point'. */ active_reg_t dummy_low_reg, dummy_high_reg; UCHAR_T *pdummy = NULL; const CHAR_T *sdummy = NULL; DEBUG_PRINT1 ("EXECUTING pop_failure_jump.\n"); POP_FAILURE_POINT (sdummy, pdummy, dummy_low_reg, dummy_high_reg, reg_dummy, reg_dummy, reg_info_dummy); } /* Note fall through. */ unconditional_jump: #ifdef _LIBC DEBUG_PRINT2 ("\n%p: ", p); #else DEBUG_PRINT2 ("\n0x%x: ", p); #endif /* Note fall through. */ /* Unconditionally jump (without popping any failure points). */ case jump: EXTRACT_NUMBER_AND_INCR (mcnt, p); /* Get the amount to jump. */ DEBUG_PRINT2 ("EXECUTING jump %d ", mcnt); p += mcnt; /* Do the jump. */ #ifdef _LIBC DEBUG_PRINT2 ("(to %p).\n", p); #else DEBUG_PRINT2 ("(to 0x%x).\n", p); #endif break; /* We need this opcode so we can detect where alternatives end in `group_match_null_string_p' et al. */ case jump_past_alt: DEBUG_PRINT1 ("EXECUTING jump_past_alt.\n"); goto unconditional_jump; /* Normally, the on_failure_jump pushes a failure point, which then gets popped at pop_failure_jump. We will end up at pop_failure_jump, also, and with a pattern of, say, `a+', we are skipping over the on_failure_jump, so we have to push something meaningless for pop_failure_jump to pop. */ case dummy_failure_jump: DEBUG_PRINT1 ("EXECUTING dummy_failure_jump.\n"); /* It doesn't matter what we push for the string here. What the code at `fail' tests is the value for the pattern. */ PUSH_FAILURE_POINT (NULL, NULL, -2); goto unconditional_jump; /* At the end of an alternative, we need to push a dummy failure point in case we are followed by a `pop_failure_jump', because we don't want the failure point for the alternative to be popped. For example, matching `(a|ab)*' against `aab' requires that we match the `ab' alternative. */ case push_dummy_failure: DEBUG_PRINT1 ("EXECUTING push_dummy_failure.\n"); /* See comments just above at `dummy_failure_jump' about the two zeroes. */ PUSH_FAILURE_POINT (NULL, NULL, -2); break; /* Have to succeed matching what follows at least n times. After that, handle like `on_failure_jump'. */ case succeed_n: EXTRACT_NUMBER (mcnt, p + OFFSET_ADDRESS_SIZE); DEBUG_PRINT2 ("EXECUTING succeed_n %d.\n", mcnt); assert (mcnt >= 0); /* Originally, this is how many times we HAVE to succeed. */ if (mcnt > 0) { mcnt--; p += OFFSET_ADDRESS_SIZE; STORE_NUMBER_AND_INCR (p, mcnt); #ifdef _LIBC DEBUG_PRINT3 (" Setting %p to %d.\n", p - OFFSET_ADDRESS_SIZE , mcnt); #else DEBUG_PRINT3 (" Setting 0x%x to %d.\n", p - OFFSET_ADDRESS_SIZE , mcnt); #endif } else if (mcnt == 0) { #ifdef _LIBC DEBUG_PRINT2 (" Setting two bytes from %p to no_op.\n", p + OFFSET_ADDRESS_SIZE); #else DEBUG_PRINT2 (" Setting two bytes from 0x%x to no_op.\n", p + OFFSET_ADDRESS_SIZE); #endif /* _LIBC */ #ifdef WCHAR p[1] = (UCHAR_T) no_op; #else p[2] = (UCHAR_T) no_op; p[3] = (UCHAR_T) no_op; #endif /* WCHAR */ goto on_failure; } break; case jump_n: EXTRACT_NUMBER (mcnt, p + OFFSET_ADDRESS_SIZE); DEBUG_PRINT2 ("EXECUTING jump_n %d.\n", mcnt); /* Originally, this is how many times we CAN jump. */ if (mcnt) { mcnt--; STORE_NUMBER (p + OFFSET_ADDRESS_SIZE, mcnt); #ifdef _LIBC DEBUG_PRINT3 (" Setting %p to %d.\n", p + OFFSET_ADDRESS_SIZE, mcnt); #else DEBUG_PRINT3 (" Setting 0x%x to %d.\n", p + OFFSET_ADDRESS_SIZE, mcnt); #endif /* _LIBC */ goto unconditional_jump; } /* If don't have to jump any more, skip over the rest of command. */ else p += 2 * OFFSET_ADDRESS_SIZE; break; case set_number_at: { DEBUG_PRINT1 ("EXECUTING set_number_at.\n"); EXTRACT_NUMBER_AND_INCR (mcnt, p); p1 = p + mcnt; EXTRACT_NUMBER_AND_INCR (mcnt, p); #ifdef _LIBC DEBUG_PRINT3 (" Setting %p to %d.\n", p1, mcnt); #else DEBUG_PRINT3 (" Setting 0x%x to %d.\n", p1, mcnt); #endif STORE_NUMBER (p1, mcnt); break; } #if 0 /* The DEC Alpha C compiler 3.x generates incorrect code for the test WORDCHAR_P (d - 1) != WORDCHAR_P (d) in the expansion of AT_WORD_BOUNDARY, so this code is disabled. Expanding the macro and introducing temporary variables works around the bug. */ case wordbound: DEBUG_PRINT1 ("EXECUTING wordbound.\n"); if (AT_WORD_BOUNDARY (d)) break; goto fail; case notwordbound: DEBUG_PRINT1 ("EXECUTING notwordbound.\n"); if (AT_WORD_BOUNDARY (d)) goto fail; break; #else case wordbound: { boolean prevchar, thischar; DEBUG_PRINT1 ("EXECUTING wordbound.\n"); if (AT_STRINGS_BEG (d) || AT_STRINGS_END (d)) break; prevchar = WORDCHAR_P (d - 1); thischar = WORDCHAR_P (d); if (prevchar != thischar) break; goto fail; } case notwordbound: { boolean prevchar, thischar; DEBUG_PRINT1 ("EXECUTING notwordbound.\n"); if (AT_STRINGS_BEG (d) || AT_STRINGS_END (d)) goto fail; prevchar = WORDCHAR_P (d - 1); thischar = WORDCHAR_P (d); if (prevchar != thischar) goto fail; break; } #endif case wordbeg: DEBUG_PRINT1 ("EXECUTING wordbeg.\n"); if (!AT_STRINGS_END (d) && WORDCHAR_P (d) && (AT_STRINGS_BEG (d) || !WORDCHAR_P (d - 1))) break; goto fail; case wordend: DEBUG_PRINT1 ("EXECUTING wordend.\n"); if (!AT_STRINGS_BEG (d) && WORDCHAR_P (d - 1) && (AT_STRINGS_END (d) || !WORDCHAR_P (d))) break; goto fail; #ifdef emacs case before_dot: DEBUG_PRINT1 ("EXECUTING before_dot.\n"); if (PTR_CHAR_POS ((unsigned char *) d) >= point) goto fail; break; case at_dot: DEBUG_PRINT1 ("EXECUTING at_dot.\n"); if (PTR_CHAR_POS ((unsigned char *) d) != point) goto fail; break; case after_dot: DEBUG_PRINT1 ("EXECUTING after_dot.\n"); if (PTR_CHAR_POS ((unsigned char *) d) <= point) goto fail; break; case syntaxspec: DEBUG_PRINT2 ("EXECUTING syntaxspec %d.\n", mcnt); mcnt = *p++; goto matchsyntax; case wordchar: DEBUG_PRINT1 ("EXECUTING Emacs wordchar.\n"); mcnt = (int) Sword; matchsyntax: PREFETCH (); /* Can't use *d++ here; SYNTAX may be an unsafe macro. */ d++; if (SYNTAX (d[-1]) != (enum syntaxcode) mcnt) goto fail; SET_REGS_MATCHED (); break; case notsyntaxspec: DEBUG_PRINT2 ("EXECUTING notsyntaxspec %d.\n", mcnt); mcnt = *p++; goto matchnotsyntax; case notwordchar: DEBUG_PRINT1 ("EXECUTING Emacs notwordchar.\n"); mcnt = (int) Sword; matchnotsyntax: PREFETCH (); /* Can't use *d++ here; SYNTAX may be an unsafe macro. */ d++; if (SYNTAX (d[-1]) == (enum syntaxcode) mcnt) goto fail; SET_REGS_MATCHED (); break; #else /* not emacs */ case wordchar: DEBUG_PRINT1 ("EXECUTING non-Emacs wordchar.\n"); PREFETCH (); if (!WORDCHAR_P (d)) goto fail; SET_REGS_MATCHED (); d++; break; case notwordchar: DEBUG_PRINT1 ("EXECUTING non-Emacs notwordchar.\n"); PREFETCH (); if (WORDCHAR_P (d)) goto fail; SET_REGS_MATCHED (); d++; break; #endif /* not emacs */ default: abort (); } continue; /* Successfully executed one pattern command; keep going. */ /* We goto here if a matching operation fails. */ fail: if (!FAIL_STACK_EMPTY ()) { /* A restart point is known. Restore to that state. */ DEBUG_PRINT1 ("\nFAIL:\n"); POP_FAILURE_POINT (d, p, lowest_active_reg, highest_active_reg, regstart, regend, reg_info); /* If this failure point is a dummy, try the next one. */ if (!p) goto fail; /* If we failed to the end of the pattern, don't examine *p. */ assert (p <= pend); if (p < pend) { boolean is_a_jump_n = false; /* If failed to a backwards jump that's part of a repetition loop, need to pop this failure point and use the next one. */ switch ((re_opcode_t) *p) { case jump_n: is_a_jump_n = true; case maybe_pop_jump: case pop_failure_jump: case jump: p1 = p + 1; EXTRACT_NUMBER_AND_INCR (mcnt, p1); p1 += mcnt; if ((is_a_jump_n && (re_opcode_t) *p1 == succeed_n) || (!is_a_jump_n && (re_opcode_t) *p1 == on_failure_jump)) goto fail; break; default: /* do nothing */ ; } } if (d >= string1 && d <= end1) dend = end_match_1; } else break; /* Matching at this starting point really fails. */ } /* for (;;) */ if (best_regs_set) goto restore_best_regs; FREE_VARIABLES (); return -1; /* Failure to match. */ } /* re_match_2 */ /* Subroutine definitions for re_match_2. */ /* We are passed P pointing to a register number after a start_memory. Return true if the pattern up to the corresponding stop_memory can match the empty string, and false otherwise. If we find the matching stop_memory, sets P to point to one past its number. Otherwise, sets P to an undefined byte less than or equal to END. We don't handle duplicates properly (yet). */ static boolean PREFIX(group_match_null_string_p) (p, end, reg_info) UCHAR_T **p, *end; PREFIX(register_info_type) *reg_info; { int mcnt; /* Point to after the args to the start_memory. */ UCHAR_T *p1 = *p + 2; while (p1 < end) { /* Skip over opcodes that can match nothing, and return true or false, as appropriate, when we get to one that can't, or to the matching stop_memory. */ switch ((re_opcode_t) *p1) { /* Could be either a loop or a series of alternatives. */ case on_failure_jump: p1++; EXTRACT_NUMBER_AND_INCR (mcnt, p1); /* If the next operation is not a jump backwards in the pattern. */ if (mcnt >= 0) { /* Go through the on_failure_jumps of the alternatives, seeing if any of the alternatives cannot match nothing. The last alternative starts with only a jump, whereas the rest start with on_failure_jump and end with a jump, e.g., here is the pattern for `a|b|c': /on_failure_jump/0/6/exactn/1/a/jump_past_alt/0/6 /on_failure_jump/0/6/exactn/1/b/jump_past_alt/0/3 /exactn/1/c So, we have to first go through the first (n-1) alternatives and then deal with the last one separately. */ /* Deal with the first (n-1) alternatives, which start with an on_failure_jump (see above) that jumps to right past a jump_past_alt. */ while ((re_opcode_t) p1[mcnt-(1+OFFSET_ADDRESS_SIZE)] == jump_past_alt) { /* `mcnt' holds how many bytes long the alternative is, including the ending `jump_past_alt' and its number. */ if (!PREFIX(alt_match_null_string_p) (p1, p1 + mcnt - (1 + OFFSET_ADDRESS_SIZE), reg_info)) return false; /* Move to right after this alternative, including the jump_past_alt. */ p1 += mcnt; /* Break if it's the beginning of an n-th alternative that doesn't begin with an on_failure_jump. */ if ((re_opcode_t) *p1 != on_failure_jump) break; /* Still have to check that it's not an n-th alternative that starts with an on_failure_jump. */ p1++; EXTRACT_NUMBER_AND_INCR (mcnt, p1); if ((re_opcode_t) p1[mcnt-(1+OFFSET_ADDRESS_SIZE)] != jump_past_alt) { /* Get to the beginning of the n-th alternative. */ p1 -= 1 + OFFSET_ADDRESS_SIZE; break; } } /* Deal with the last alternative: go back and get number of the `jump_past_alt' just before it. `mcnt' contains the length of the alternative. */ EXTRACT_NUMBER (mcnt, p1 - OFFSET_ADDRESS_SIZE); if (!PREFIX(alt_match_null_string_p) (p1, p1 + mcnt, reg_info)) return false; p1 += mcnt; /* Get past the n-th alternative. */ } /* if mcnt > 0 */ break; case stop_memory: assert (p1[1] == **p); *p = p1 + 2; return true; default: if (!PREFIX(common_op_match_null_string_p) (&p1, end, reg_info)) return false; } } /* while p1 < end */ return false; } /* group_match_null_string_p */ /* Similar to group_match_null_string_p, but doesn't deal with alternatives: It expects P to be the first byte of a single alternative and END one byte past the last. The alternative can contain groups. */ static boolean PREFIX(alt_match_null_string_p) (p, end, reg_info) UCHAR_T *p, *end; PREFIX(register_info_type) *reg_info; { int mcnt; UCHAR_T *p1 = p; while (p1 < end) { /* Skip over opcodes that can match nothing, and break when we get to one that can't. */ switch ((re_opcode_t) *p1) { /* It's a loop. */ case on_failure_jump: p1++; EXTRACT_NUMBER_AND_INCR (mcnt, p1); p1 += mcnt; break; default: if (!PREFIX(common_op_match_null_string_p) (&p1, end, reg_info)) return false; } } /* while p1 < end */ return true; } /* alt_match_null_string_p */ /* Deals with the ops common to group_match_null_string_p and alt_match_null_string_p. Sets P to one after the op and its arguments, if any. */ static boolean PREFIX(common_op_match_null_string_p) (p, end, reg_info) UCHAR_T **p, *end; PREFIX(register_info_type) *reg_info; { int mcnt; boolean ret; int reg_no; UCHAR_T *p1 = *p; switch ((re_opcode_t) *p1++) { case no_op: case begline: case endline: case begbuf: case endbuf: case wordbeg: case wordend: case wordbound: case notwordbound: #ifdef emacs case before_dot: case at_dot: case after_dot: #endif break; case start_memory: reg_no = *p1; assert (reg_no > 0 && reg_no <= MAX_REGNUM); ret = PREFIX(group_match_null_string_p) (&p1, end, reg_info); /* Have to set this here in case we're checking a group which contains a group and a back reference to it. */ if (REG_MATCH_NULL_STRING_P (reg_info[reg_no]) == MATCH_NULL_UNSET_VALUE) REG_MATCH_NULL_STRING_P (reg_info[reg_no]) = ret; if (!ret) return false; break; /* If this is an optimized succeed_n for zero times, make the jump. */ case jump: EXTRACT_NUMBER_AND_INCR (mcnt, p1); if (mcnt >= 0) p1 += mcnt; else return false; break; case succeed_n: /* Get to the number of times to succeed. */ p1 += OFFSET_ADDRESS_SIZE; EXTRACT_NUMBER_AND_INCR (mcnt, p1); if (mcnt == 0) { p1 -= 2 * OFFSET_ADDRESS_SIZE; EXTRACT_NUMBER_AND_INCR (mcnt, p1); p1 += mcnt; } else return false; break; case duplicate: if (!REG_MATCH_NULL_STRING_P (reg_info[*p1])) return false; break; case set_number_at: p1 += 2 * OFFSET_ADDRESS_SIZE; default: /* All other opcodes mean we cannot match the empty string. */ return false; } *p = p1; return true; } /* common_op_match_null_string_p */ /* Return zero if TRANSLATE[S1] and TRANSLATE[S2] are identical for LEN bytes; nonzero otherwise. */ static int PREFIX(bcmp_translate) (s1, s2, len, translate) const CHAR_T *s1, *s2; register int len; RE_TRANSLATE_TYPE translate; { register const UCHAR_T *p1 = (const UCHAR_T *) s1; register const UCHAR_T *p2 = (const UCHAR_T *) s2; while (len) { #ifdef WCHAR if (((*p1<=0xff)?translate[*p1++]:*p1++) != ((*p2<=0xff)?translate[*p2++]:*p2++)) return 1; #else /* BYTE */ if (translate[*p1++] != translate[*p2++]) return 1; #endif /* WCHAR */ len--; } return 0; } #else /* not INSIDE_RECURSION */ /* Entry points for GNU code. */ /* re_compile_pattern is the GNU regular expression compiler: it compiles PATTERN (of length SIZE) and puts the result in BUFP. Returns 0 if the pattern was valid, otherwise an error string. Assumes the `allocated' (and perhaps `buffer') and `translate' fields are set in BUFP on entry. We call regex_compile to do the actual compilation. */ const char * re_compile_pattern (pattern, length, bufp) const char *pattern; size_t length; struct re_pattern_buffer *bufp; { reg_errcode_t ret; /* GNU code is written to assume at least RE_NREGS registers will be set (and at least one extra will be -1). */ bufp->regs_allocated = REGS_UNALLOCATED; /* And GNU code determines whether or not to get register information by passing null for the REGS argument to re_match, etc., not by setting no_sub. */ bufp->no_sub = 0; /* Match anchors at newline. */ bufp->newline_anchor = 1; # ifdef MBS_SUPPORT if (MB_CUR_MAX != 1) ret = wcs_regex_compile (pattern, length, re_syntax_options, bufp); else # endif ret = byte_regex_compile (pattern, length, re_syntax_options, bufp); if (!ret) return NULL; return gettext (re_error_msgid[(int) ret]); } #ifdef _LIBC weak_alias (__re_compile_pattern, re_compile_pattern) #endif /* Entry points compatible with 4.2 BSD regex library. We don't define them unless specifically requested. */ #if defined _REGEX_RE_COMP || defined _LIBC /* BSD has one and only one pattern buffer. */ static struct re_pattern_buffer re_comp_buf; char * #ifdef _LIBC /* Make these definitions weak in libc, so POSIX programs can redefine these names if they don't use our functions, and still use regcomp/regexec below without link errors. */ weak_function #endif re_comp (s) const char *s; { reg_errcode_t ret; if (!s) { if (!re_comp_buf.buffer) return gettext ("No previous regular expression"); return 0; } if (!re_comp_buf.buffer) { re_comp_buf.buffer = (unsigned char *) malloc (200); if (re_comp_buf.buffer == NULL) return (char *) gettext (re_error_msgid[(int) REG_ESPACE]); re_comp_buf.allocated = 200; re_comp_buf.fastmap = (char *) malloc (1 << BYTEWIDTH); if (re_comp_buf.fastmap == NULL) return (char *) gettext (re_error_msgid[(int) REG_ESPACE]); } /* Since `re_exec' always passes NULL for the `regs' argument, we don't need to initialize the pattern buffer fields which affect it. */ /* Match anchors at newlines. */ re_comp_buf.newline_anchor = 1; # ifdef MBS_SUPPORT if (MB_CUR_MAX != 1) ret = wcs_regex_compile (s, strlen (s), re_syntax_options, &re_comp_buf); else # endif ret = byte_regex_compile (s, strlen (s), re_syntax_options, &re_comp_buf); if (!ret) return NULL; /* Yes, we're discarding `const' here if !HAVE_LIBINTL. */ return (char *) gettext (re_error_msgid[(int) ret]); } int #ifdef _LIBC weak_function #endif re_exec (s) const char *s; { const int len = strlen (s); return 0 <= re_search (&re_comp_buf, s, len, 0, len, (struct re_registers *) 0); } #endif /* _REGEX_RE_COMP */ /* POSIX.2 functions. Don't define these for Emacs. */ #ifndef emacs /* regcomp takes a regular expression as a string and compiles it. PREG is a regex_t *. We do not expect any fields to be initialized, since POSIX says we shouldn't. Thus, we set `buffer' to the compiled pattern; `used' to the length of the compiled pattern; `syntax' to RE_SYNTAX_POSIX_EXTENDED if the REG_EXTENDED bit in CFLAGS is set; otherwise, to RE_SYNTAX_POSIX_BASIC; `newline_anchor' to REG_NEWLINE being set in CFLAGS; `fastmap' to an allocated space for the fastmap; `fastmap_accurate' to zero; `re_nsub' to the number of subexpressions in PATTERN. PATTERN is the address of the pattern string. CFLAGS is a series of bits which affect compilation. If REG_EXTENDED is set, we use POSIX extended syntax; otherwise, we use POSIX basic syntax. If REG_NEWLINE is set, then . and [^...] don't match newline. Also, regexec will try a match beginning after every newline. If REG_ICASE is set, then we considers upper- and lowercase versions of letters to be equivalent when matching. If REG_NOSUB is set, then when PREG is passed to regexec, that routine will report only success or failure, and nothing about the registers. It returns 0 if it succeeds, nonzero if it doesn't. (See regex.h for the return codes and their meanings.) */ int regcomp (preg, pattern, cflags) regex_t *preg; const char *pattern; int cflags; { reg_errcode_t ret; reg_syntax_t syntax = (cflags & REG_EXTENDED) ? RE_SYNTAX_POSIX_EXTENDED : RE_SYNTAX_POSIX_BASIC; /* regex_compile will allocate the space for the compiled pattern. */ preg->buffer = 0; preg->allocated = 0; preg->used = 0; /* Try to allocate space for the fastmap. */ preg->fastmap = (char *) malloc (1 << BYTEWIDTH); if (cflags & REG_ICASE) { unsigned i; preg->translate = (RE_TRANSLATE_TYPE) malloc (CHAR_SET_SIZE * sizeof (*(RE_TRANSLATE_TYPE)0)); if (preg->translate == NULL) return (int) REG_ESPACE; /* Map uppercase characters to corresponding lowercase ones. */ for (i = 0; i < CHAR_SET_SIZE; i++) preg->translate[i] = ISUPPER (i) ? TOLOWER (i) : (int) i; } else preg->translate = NULL; /* If REG_NEWLINE is set, newlines are treated differently. */ if (cflags & REG_NEWLINE) { /* REG_NEWLINE implies neither . nor [^...] match newline. */ syntax &= ~RE_DOT_NEWLINE; syntax |= RE_HAT_LISTS_NOT_NEWLINE; /* It also changes the matching behavior. */ preg->newline_anchor = 1; } else preg->newline_anchor = 0; preg->no_sub = !!(cflags & REG_NOSUB); /* POSIX says a null character in the pattern terminates it, so we can use strlen here in compiling the pattern. */ # ifdef MBS_SUPPORT if (MB_CUR_MAX != 1) ret = wcs_regex_compile (pattern, strlen (pattern), syntax, preg); else # endif ret = byte_regex_compile (pattern, strlen (pattern), syntax, preg); /* POSIX doesn't distinguish between an unmatched open-group and an unmatched close-group: both are REG_EPAREN. */ if (ret == REG_ERPAREN) ret = REG_EPAREN; if (ret == REG_NOERROR && preg->fastmap) { /* Compute the fastmap now, since regexec cannot modify the pattern buffer. */ if (re_compile_fastmap (preg) == -2) { /* Some error occurred while computing the fastmap, just forget about it. */ free (preg->fastmap); preg->fastmap = NULL; } } return (int) ret; } #ifdef _LIBC weak_alias (__regcomp, regcomp) #endif /* regexec searches for a given pattern, specified by PREG, in the string STRING. If NMATCH is zero or REG_NOSUB was set in the cflags argument to `regcomp', we ignore PMATCH. Otherwise, we assume PMATCH has at least NMATCH elements, and we set them to the offsets of the corresponding matched substrings. EFLAGS specifies `execution flags' which affect matching: if REG_NOTBOL is set, then ^ does not match at the beginning of the string; if REG_NOTEOL is set, then $ does not match at the end. We return 0 if we find a match and REG_NOMATCH if not. */ int regexec (preg, string, nmatch, pmatch, eflags) const regex_t *preg; const char *string; size_t nmatch; regmatch_t pmatch[]; int eflags; { int ret; struct re_registers regs; regex_t private_preg; int len = strlen (string); boolean want_reg_info = !preg->no_sub && nmatch > 0; private_preg = *preg; private_preg.not_bol = !!(eflags & REG_NOTBOL); private_preg.not_eol = !!(eflags & REG_NOTEOL); /* The user has told us exactly how many registers to return information about, via `nmatch'. We have to pass that on to the matching routines. */ private_preg.regs_allocated = REGS_FIXED; if (want_reg_info) { regs.num_regs = nmatch; regs.start = TALLOC (nmatch * 2, regoff_t); if (regs.start == NULL) return (int) REG_NOMATCH; regs.end = regs.start + nmatch; } /* Perform the searching operation. */ ret = re_search (&private_preg, string, len, /* start: */ 0, /* range: */ len, want_reg_info ? ®s : (struct re_registers *) 0); /* Copy the register information to the POSIX structure. */ if (want_reg_info) { if (ret >= 0) { unsigned r; for (r = 0; r < nmatch; r++) { pmatch[r].rm_so = regs.start[r]; pmatch[r].rm_eo = regs.end[r]; } } /* If we needed the temporary register info, free the space now. */ free (regs.start); } /* We want zero return to mean success, unlike `re_search'. */ return ret >= 0 ? (int) REG_NOERROR : (int) REG_NOMATCH; } #ifdef _LIBC weak_alias (__regexec, regexec) #endif /* Returns a message corresponding to an error code, ERRCODE, returned from either regcomp or regexec. We don't use PREG here. */ size_t regerror (errcode, preg, errbuf, errbuf_size) int errcode; const regex_t *preg ATTRIBUTE_UNUSED; char *errbuf; size_t errbuf_size; { const char *msg; size_t msg_size; if (errcode < 0 || errcode >= (int) (sizeof (re_error_msgid) / sizeof (re_error_msgid[0]))) /* Only error codes returned by the rest of the code should be passed to this routine. If we are given anything else, or if other regex code generates an invalid error code, then the program has a bug. Dump core so we can fix it. */ abort (); msg = gettext (re_error_msgid[errcode]); msg_size = strlen (msg) + 1; /* Includes the null. */ if (errbuf_size != 0) { if (msg_size > errbuf_size) { #if defined HAVE_MEMPCPY || defined _LIBC *((char *) mempcpy (errbuf, msg, errbuf_size - 1)) = '\0'; #else memcpy (errbuf, msg, errbuf_size - 1); errbuf[errbuf_size - 1] = 0; #endif } else memcpy (errbuf, msg, msg_size); } return msg_size; } #ifdef _LIBC weak_alias (__regerror, regerror) #endif /* Free dynamically allocated space used by PREG. */ void regfree (preg) regex_t *preg; { if (preg->buffer != NULL) free (preg->buffer); preg->buffer = NULL; preg->allocated = 0; preg->used = 0; if (preg->fastmap != NULL) free (preg->fastmap); preg->fastmap = NULL; preg->fastmap_accurate = 0; if (preg->translate != NULL) free (preg->translate); preg->translate = NULL; } #ifdef _LIBC weak_alias (__regfree, regfree) #endif #endif /* not emacs */ #endif /* not INSIDE_RECURSION */ #undef STORE_NUMBER #undef STORE_NUMBER_AND_INCR #undef EXTRACT_NUMBER #undef EXTRACT_NUMBER_AND_INCR #undef DEBUG_PRINT_COMPILED_PATTERN #undef DEBUG_PRINT_DOUBLE_STRING #undef INIT_FAIL_STACK #undef RESET_FAIL_STACK #undef DOUBLE_FAIL_STACK #undef PUSH_PATTERN_OP #undef PUSH_FAILURE_POINTER #undef PUSH_FAILURE_INT #undef PUSH_FAILURE_ELT #undef POP_FAILURE_POINTER #undef POP_FAILURE_INT #undef POP_FAILURE_ELT #undef DEBUG_PUSH #undef DEBUG_POP #undef PUSH_FAILURE_POINT #undef POP_FAILURE_POINT #undef REG_UNSET_VALUE #undef REG_UNSET #undef PATFETCH #undef PATFETCH_RAW #undef PATUNFETCH #undef TRANSLATE #undef INIT_BUF_SIZE #undef GET_BUFFER_SPACE #undef BUF_PUSH #undef BUF_PUSH_2 #undef BUF_PUSH_3 #undef STORE_JUMP #undef STORE_JUMP2 #undef INSERT_JUMP #undef INSERT_JUMP2 #undef EXTEND_BUFFER #undef GET_UNSIGNED_NUMBER #undef FREE_STACK_RETURN # undef POINTER_TO_OFFSET # undef MATCHING_IN_FRST_STRING # undef PREFETCH # undef AT_STRINGS_BEG # undef AT_STRINGS_END # undef WORDCHAR_P # undef FREE_VAR # undef FREE_VARIABLES # undef NO_HIGHEST_ACTIVE_REG # undef NO_LOWEST_ACTIVE_REG # undef CHAR_T # undef UCHAR_T # undef COMPILED_BUFFER_VAR # undef OFFSET_ADDRESS_SIZE # undef CHAR_CLASS_SIZE # undef PREFIX # undef ARG_PREFIX # undef PUT_CHAR # undef BYTE # undef WCHAR #undef ISALPHA #undef ISALNUM #undef ISBLANK #undef ISCNTRL #undef ISDIGIT #undef ISGRAPH #undef ISLOWER #undef ISPRINT #undef ISPUNCT #undef ISSPACE #undef ISXDIGIT # define DEFINED_ONCE # undef INSIDE_RECURSION # endif #endif #ifdef INSIDE_RECURSION /* Common operations on the compiled pattern. */ /* Store NUMBER in two contiguous bytes starting at DESTINATION. */ /* ifdef MBS_SUPPORT, we store NUMBER in 1 element. */ # ifdef WCHAR # define STORE_NUMBER(destination, number) \ do { \ *(destination) = (UCHAR_T)(number); \ } while (0) # else /* BYTE */ # define STORE_NUMBER(destination, number) \ do { \ (destination)[0] = (number) & 0377; \ (destination)[1] = (number) >> 8; \ } while (0) # endif /* WCHAR */ /* Same as STORE_NUMBER, except increment DESTINATION to the byte after where the number is stored. Therefore, DESTINATION must be an lvalue. */ /* ifdef MBS_SUPPORT, we store NUMBER in 1 element. */ # define STORE_NUMBER_AND_INCR(destination, number) \ do { \ STORE_NUMBER (destination, number); \ (destination) += OFFSET_ADDRESS_SIZE; \ } while (0) /* Put into DESTINATION a number stored in two contiguous bytes starting at SOURCE. */ /* ifdef MBS_SUPPORT, we store NUMBER in 1 element. */ # ifdef WCHAR # define EXTRACT_NUMBER(destination, source) \ do { \ (destination) = *(source); \ } while (0) # else /* BYTE */ # define EXTRACT_NUMBER(destination, source) \ do { \ (destination) = *(source) & 0377; \ (destination) += SIGN_EXTEND_CHAR (*((source) + 1)) << 8; \ } while (0) # endif # ifdef DEBUG static void PREFIX(extract_number) _RE_ARGS ((int *dest, UCHAR_T *source)); static void PREFIX(extract_number) (dest, source) int *dest; UCHAR_T *source; { # ifdef WCHAR *dest = *source; # else /* BYTE */ int temp = SIGN_EXTEND_CHAR (*(source + 1)); *dest = *source & 0377; *dest += temp << 8; # endif } # ifndef EXTRACT_MACROS /* To debug the macros. */ # undef EXTRACT_NUMBER # define EXTRACT_NUMBER(dest, src) PREFIX(extract_number) (&dest, src) # endif /* not EXTRACT_MACROS */ # endif /* DEBUG */ /* Same as EXTRACT_NUMBER, except increment SOURCE to after the number. SOURCE must be an lvalue. */ # define EXTRACT_NUMBER_AND_INCR(destination, source) \ do { \ EXTRACT_NUMBER (destination, source); \ (source) += OFFSET_ADDRESS_SIZE; \ } while (0) # ifdef DEBUG static void PREFIX(extract_number_and_incr) _RE_ARGS ((int *destination, UCHAR_T **source)); static void PREFIX(extract_number_and_incr) (destination, source) int *destination; UCHAR_T **source; { PREFIX(extract_number) (destination, *source); *source += OFFSET_ADDRESS_SIZE; } # ifndef EXTRACT_MACROS # undef EXTRACT_NUMBER_AND_INCR # define EXTRACT_NUMBER_AND_INCR(dest, src) \ PREFIX(extract_number_and_incr) (&dest, &src) # endif /* not EXTRACT_MACROS */ # endif /* DEBUG */ /* If DEBUG is defined, Regex prints many voluminous messages about what it is doing (if the variable `debug' is nonzero). If linked with the main program in `iregex.c', you can enter patterns and strings interactively. And if linked with the main program in `main.c' and the other test files, you can run the already-written tests. */ # ifdef DEBUG # ifndef DEFINED_ONCE /* We use standard I/O for debugging. */ # include /* It is useful to test things that ``must'' be true when debugging. */ # include static int debug; # define DEBUG_STATEMENT(e) e # define DEBUG_PRINT1(x) if (debug) printf (x) # define DEBUG_PRINT2(x1, x2) if (debug) printf (x1, x2) # define DEBUG_PRINT3(x1, x2, x3) if (debug) printf (x1, x2, x3) # define DEBUG_PRINT4(x1, x2, x3, x4) if (debug) printf (x1, x2, x3, x4) # endif /* not DEFINED_ONCE */ # define DEBUG_PRINT_COMPILED_PATTERN(p, s, e) \ if (debug) PREFIX(print_partial_compiled_pattern) (s, e) # define DEBUG_PRINT_DOUBLE_STRING(w, s1, sz1, s2, sz2) \ if (debug) PREFIX(print_double_string) (w, s1, sz1, s2, sz2) /* Print the fastmap in human-readable form. */ # ifndef DEFINED_ONCE void print_fastmap (fastmap) char *fastmap; { unsigned was_a_range = 0; unsigned i = 0; while (i < (1 << BYTEWIDTH)) { if (fastmap[i++]) { was_a_range = 0; putchar (i - 1); while (i < (1 << BYTEWIDTH) && fastmap[i]) { was_a_range = 1; i++; } if (was_a_range) { printf ("-"); putchar (i - 1); } } } putchar ('\n'); } # endif /* not DEFINED_ONCE */ /* Print a compiled pattern string in human-readable form, starting at the START pointer into it and ending just before the pointer END. */ void PREFIX(print_partial_compiled_pattern) (start, end) UCHAR_T *start; UCHAR_T *end; { int mcnt, mcnt2; UCHAR_T *p1; UCHAR_T *p = start; UCHAR_T *pend = end; if (start == NULL) { printf ("(null)\n"); return; } /* Loop over pattern commands. */ while (p < pend) { # ifdef _LIBC printf ("%td:\t", p - start); # else printf ("%ld:\t", (long int) (p - start)); # endif switch ((re_opcode_t) *p++) { case no_op: printf ("/no_op"); break; case exactn: mcnt = *p++; printf ("/exactn/%d", mcnt); do { putchar ('/'); PUT_CHAR (*p++); } while (--mcnt); break; # ifdef MBS_SUPPORT case exactn_bin: mcnt = *p++; printf ("/exactn_bin/%d", mcnt); do { printf("/%lx", (long int) *p++); } while (--mcnt); break; # endif /* MBS_SUPPORT */ case start_memory: mcnt = *p++; printf ("/start_memory/%d/%ld", mcnt, (long int) *p++); break; case stop_memory: mcnt = *p++; printf ("/stop_memory/%d/%ld", mcnt, (long int) *p++); break; case duplicate: printf ("/duplicate/%ld", (long int) *p++); break; case anychar: printf ("/anychar"); break; case charset: case charset_not: { # ifdef WCHAR int i, length; wchar_t *workp = p; printf ("/charset [%s", (re_opcode_t) *(workp - 1) == charset_not ? "^" : ""); p += 5; length = *workp++; /* the length of char_classes */ for (i=0 ; ibuffer; PREFIX(print_partial_compiled_pattern) (buffer, buffer + bufp->used / sizeof(UCHAR_T)); printf ("%ld bytes used/%ld bytes allocated.\n", bufp->used, bufp->allocated); if (bufp->fastmap_accurate && bufp->fastmap) { printf ("fastmap: "); print_fastmap (bufp->fastmap); } # ifdef _LIBC printf ("re_nsub: %Zd\t", bufp->re_nsub); # else printf ("re_nsub: %ld\t", (long int) bufp->re_nsub); # endif printf ("regs_alloc: %d\t", bufp->regs_allocated); printf ("can_be_null: %d\t", bufp->can_be_null); printf ("newline_anchor: %d\n", bufp->newline_anchor); printf ("no_sub: %d\t", bufp->no_sub); printf ("not_bol: %d\t", bufp->not_bol); printf ("not_eol: %d\t", bufp->not_eol); printf ("syntax: %lx\n", bufp->syntax); /* Perhaps we should print the translate table? */ } void PREFIX(print_double_string) (where, string1, size1, string2, size2) const CHAR_T *where; const CHAR_T *string1; const CHAR_T *string2; int size1; int size2; { int this_char; if (where == NULL) printf ("(null)"); else { int cnt; if (FIRST_STRING_P (where)) { for (this_char = where - string1; this_char < size1; this_char++) PUT_CHAR (string1[this_char]); where = string2; } cnt = 0; for (this_char = where - string2; this_char < size2; this_char++) { PUT_CHAR (string2[this_char]); if (++cnt > 100) { fputs ("...", stdout); break; } } } } # ifndef DEFINED_ONCE void printchar (c) int c; { putc (c, stderr); } # endif # else /* not DEBUG */ # ifndef DEFINED_ONCE # undef assert # define assert(e) # define DEBUG_STATEMENT(e) # define DEBUG_PRINT1(x) # define DEBUG_PRINT2(x1, x2) # define DEBUG_PRINT3(x1, x2, x3) # define DEBUG_PRINT4(x1, x2, x3, x4) # endif /* not DEFINED_ONCE */ # define DEBUG_PRINT_COMPILED_PATTERN(p, s, e) # define DEBUG_PRINT_DOUBLE_STRING(w, s1, sz1, s2, sz2) # endif /* not DEBUG */ # ifdef WCHAR /* This convert a multibyte string to a wide character string. And write their correspondances to offset_buffer(see below) and write whether each wchar_t is binary data to is_binary. This assume invalid multibyte sequences as binary data. We assume offset_buffer and is_binary is already allocated enough space. */ static size_t convert_mbs_to_wcs (CHAR_T *dest, const unsigned char* src, size_t len, int *offset_buffer, char *is_binary); static size_t convert_mbs_to_wcs (dest, src, len, offset_buffer, is_binary) CHAR_T *dest; const unsigned char* src; size_t len; /* the length of multibyte string. */ /* It hold correspondances between src(char string) and dest(wchar_t string) for optimization. e.g. src = "xxxyzz" dest = {'X', 'Y', 'Z'} (each "xxx", "y" and "zz" represent one multibyte character corresponding to 'X', 'Y' and 'Z'.) offset_buffer = {0, 0+3("xxx"), 0+3+1("y"), 0+3+1+2("zz")} = {0, 3, 4, 6} */ int *offset_buffer; char *is_binary; { wchar_t *pdest = dest; const unsigned char *psrc = src; size_t wc_count = 0; mbstate_t mbs; int i, consumed; size_t mb_remain = len; size_t mb_count = 0; /* Initialize the conversion state. */ memset (&mbs, 0, sizeof (mbstate_t)); offset_buffer[0] = 0; for( ; mb_remain > 0 ; ++wc_count, ++pdest, mb_remain -= consumed, psrc += consumed) { #ifdef _LIBC consumed = __mbrtowc (pdest, psrc, mb_remain, &mbs); #else consumed = mbrtowc (pdest, psrc, mb_remain, &mbs); #endif if (consumed <= 0) /* failed to convert. maybe src contains binary data. So we consume 1 byte manualy. */ { *pdest = *psrc; consumed = 1; is_binary[wc_count] = TRUE; } else is_binary[wc_count] = FALSE; /* In sjis encoding, we use yen sign as escape character in place of reverse solidus. So we convert 0x5c(yen sign in sjis) to not 0xa5(yen sign in UCS2) but 0x5c(reverse solidus in UCS2). */ if (consumed == 1 && (int) *psrc == 0x5c && (int) *pdest == 0xa5) *pdest = (wchar_t) *psrc; offset_buffer[wc_count + 1] = mb_count += consumed; } /* Fill remain of the buffer with sentinel. */ for (i = wc_count + 1 ; i <= len ; i++) offset_buffer[i] = mb_count + 1; return wc_count; } # endif /* WCHAR */ #else /* not INSIDE_RECURSION */ /* Set by `re_set_syntax' to the current regexp syntax to recognize. Can also be assigned to arbitrarily: each pattern buffer stores its own syntax, so it can be changed between regex compilations. */ /* This has no initializer because initialized variables in Emacs become read-only after dumping. */ reg_syntax_t re_syntax_options; /* Specify the precise syntax of regexps for compilation. This provides for compatibility for various utilities which historically have different, incompatible syntaxes. The argument SYNTAX is a bit mask comprised of the various bits defined in regex.h. We return the old syntax. */ reg_syntax_t re_set_syntax (syntax) reg_syntax_t syntax; { reg_syntax_t ret = re_syntax_options; re_syntax_options = syntax; # ifdef DEBUG if (syntax & RE_DEBUG) debug = 1; else if (debug) /* was on but now is not */ debug = 0; # endif /* DEBUG */ return ret; } # ifdef _LIBC weak_alias (__re_set_syntax, re_set_syntax) # endif /* This table gives an error message for each of the error codes listed in regex.h. Obviously the order here has to be same as there. POSIX doesn't require that we do anything for REG_NOERROR, but why not be nice? */ static const char *re_error_msgid[] = { gettext_noop ("Success"), /* REG_NOERROR */ gettext_noop ("No match"), /* REG_NOMATCH */ gettext_noop ("Invalid regular expression"), /* REG_BADPAT */ gettext_noop ("Invalid collation character"), /* REG_ECOLLATE */ gettext_noop ("Invalid character class name"), /* REG_ECTYPE */ gettext_noop ("Trailing backslash"), /* REG_EESCAPE */ gettext_noop ("Invalid back reference"), /* REG_ESUBREG */ gettext_noop ("Unmatched [ or [^"), /* REG_EBRACK */ gettext_noop ("Unmatched ( or \\("), /* REG_EPAREN */ gettext_noop ("Unmatched \\{"), /* REG_EBRACE */ gettext_noop ("Invalid content of \\{\\}"), /* REG_BADBR */ gettext_noop ("Invalid range end"), /* REG_ERANGE */ gettext_noop ("Memory exhausted"), /* REG_ESPACE */ gettext_noop ("Invalid preceding regular expression"), /* REG_BADRPT */ gettext_noop ("Premature end of regular expression"), /* REG_EEND */ gettext_noop ("Regular expression too big"), /* REG_ESIZE */ gettext_noop ("Unmatched ) or \\)") /* REG_ERPAREN */ }; #endif /* INSIDE_RECURSION */ #ifndef DEFINED_ONCE /* Avoiding alloca during matching, to placate r_alloc. */ /* Define MATCH_MAY_ALLOCATE unless we need to make sure that the searching and matching functions should not call alloca. On some systems, alloca is implemented in terms of malloc, and if we're using the relocating allocator routines, then malloc could cause a relocation, which might (if the strings being searched are in the ralloc heap) shift the data out from underneath the regexp routines. Here's another reason to avoid allocation: Emacs processes input from X in a signal handler; processing X input may call malloc; if input arrives while a matching routine is calling malloc, then we're scrod. But Emacs can't just block input while calling matching routines; then we don't notice interrupts when they come in. So, Emacs blocks input around all regexp calls except the matching calls, which it leaves unprotected, in the faith that they will not malloc. */ /* Normally, this is fine. */ # define MATCH_MAY_ALLOCATE /* When using GNU C, we are not REALLY using the C alloca, no matter what config.h may say. So don't take precautions for it. */ # ifdef __GNUC__ # undef C_ALLOCA # endif /* The match routines may not allocate if (1) they would do it with malloc and (2) it's not safe for them to use malloc. Note that if REL_ALLOC is defined, matching would not use malloc for the failure stack, but we would still use it for the register vectors; so REL_ALLOC should not affect this. */ # if (defined C_ALLOCA || defined REGEX_MALLOC) && defined emacs # undef MATCH_MAY_ALLOCATE # endif #endif /* not DEFINED_ONCE */ #ifdef INSIDE_RECURSION /* Failure stack declarations and macros; both re_compile_fastmap and re_match_2 use a failure stack. These have to be macros because of REGEX_ALLOCATE_STACK. */ /* Number of failure points for which to initially allocate space when matching. If this number is exceeded, we allocate more space, so it is not a hard limit. */ # ifndef INIT_FAILURE_ALLOC # define INIT_FAILURE_ALLOC 5 # endif /* Roughly the maximum number of failure points on the stack. Would be exactly that if always used MAX_FAILURE_ITEMS items each time we failed. This is a variable only so users of regex can assign to it; we never change it ourselves. */ # ifdef INT_IS_16BIT # ifndef DEFINED_ONCE # if defined MATCH_MAY_ALLOCATE /* 4400 was enough to cause a crash on Alpha OSF/1, whose default stack limit is 2mb. */ long int re_max_failures = 4000; # else long int re_max_failures = 2000; # endif # endif union PREFIX(fail_stack_elt) { UCHAR_T *pointer; long int integer; }; typedef union PREFIX(fail_stack_elt) PREFIX(fail_stack_elt_t); typedef struct { PREFIX(fail_stack_elt_t) *stack; unsigned long int size; unsigned long int avail; /* Offset of next open position. */ } PREFIX(fail_stack_type); # else /* not INT_IS_16BIT */ # ifndef DEFINED_ONCE # if defined MATCH_MAY_ALLOCATE /* 4400 was enough to cause a crash on Alpha OSF/1, whose default stack limit is 2mb. */ int re_max_failures = 4000; # else int re_max_failures = 2000; # endif # endif union PREFIX(fail_stack_elt) { UCHAR_T *pointer; int integer; }; typedef union PREFIX(fail_stack_elt) PREFIX(fail_stack_elt_t); typedef struct { PREFIX(fail_stack_elt_t) *stack; unsigned size; unsigned avail; /* Offset of next open position. */ } PREFIX(fail_stack_type); # endif /* INT_IS_16BIT */ # ifndef DEFINED_ONCE # define FAIL_STACK_EMPTY() (fail_stack.avail == 0) # define FAIL_STACK_PTR_EMPTY() (fail_stack_ptr->avail == 0) # define FAIL_STACK_FULL() (fail_stack.avail == fail_stack.size) # endif /* Define macros to initialize and free the failure stack. Do `return -2' if the alloc fails. */ # ifdef MATCH_MAY_ALLOCATE # define INIT_FAIL_STACK() \ do { \ fail_stack.stack = (PREFIX(fail_stack_elt_t) *) \ REGEX_ALLOCATE_STACK (INIT_FAILURE_ALLOC * sizeof (PREFIX(fail_stack_elt_t))); \ \ if (fail_stack.stack == NULL) \ return -2; \ \ fail_stack.size = INIT_FAILURE_ALLOC; \ fail_stack.avail = 0; \ } while (0) # define RESET_FAIL_STACK() REGEX_FREE_STACK (fail_stack.stack) # else # define INIT_FAIL_STACK() \ do { \ fail_stack.avail = 0; \ } while (0) # define RESET_FAIL_STACK() # endif /* Double the size of FAIL_STACK, up to approximately `re_max_failures' items. Return 1 if succeeds, and 0 if either ran out of memory allocating space for it or it was already too large. REGEX_REALLOCATE_STACK requires `destination' be declared. */ # define DOUBLE_FAIL_STACK(fail_stack) \ ((fail_stack).size > (unsigned) (re_max_failures * MAX_FAILURE_ITEMS) \ ? 0 \ : ((fail_stack).stack = (PREFIX(fail_stack_elt_t) *) \ REGEX_REALLOCATE_STACK ((fail_stack).stack, \ (fail_stack).size * sizeof (PREFIX(fail_stack_elt_t)), \ ((fail_stack).size << 1) * sizeof (PREFIX(fail_stack_elt_t))),\ \ (fail_stack).stack == NULL \ ? 0 \ : ((fail_stack).size <<= 1, \ 1))) /* Push pointer POINTER on FAIL_STACK. Return 1 if was able to do so and 0 if ran out of memory allocating space to do so. */ # define PUSH_PATTERN_OP(POINTER, FAIL_STACK) \ ((FAIL_STACK_FULL () \ && !DOUBLE_FAIL_STACK (FAIL_STACK)) \ ? 0 \ : ((FAIL_STACK).stack[(FAIL_STACK).avail++].pointer = POINTER, \ 1)) /* Push a pointer value onto the failure stack. Assumes the variable `fail_stack'. Probably should only be called from within `PUSH_FAILURE_POINT'. */ # define PUSH_FAILURE_POINTER(item) \ fail_stack.stack[fail_stack.avail++].pointer = (UCHAR_T *) (item) /* This pushes an integer-valued item onto the failure stack. Assumes the variable `fail_stack'. Probably should only be called from within `PUSH_FAILURE_POINT'. */ # define PUSH_FAILURE_INT(item) \ fail_stack.stack[fail_stack.avail++].integer = (item) /* Push a fail_stack_elt_t value onto the failure stack. Assumes the variable `fail_stack'. Probably should only be called from within `PUSH_FAILURE_POINT'. */ # define PUSH_FAILURE_ELT(item) \ fail_stack.stack[fail_stack.avail++] = (item) /* These three POP... operations complement the three PUSH... operations. All assume that `fail_stack' is nonempty. */ # define POP_FAILURE_POINTER() fail_stack.stack[--fail_stack.avail].pointer # define POP_FAILURE_INT() fail_stack.stack[--fail_stack.avail].integer # define POP_FAILURE_ELT() fail_stack.stack[--fail_stack.avail] /* Used to omit pushing failure point id's when we're not debugging. */ # ifdef DEBUG # define DEBUG_PUSH PUSH_FAILURE_INT # define DEBUG_POP(item_addr) *(item_addr) = POP_FAILURE_INT () # else # define DEBUG_PUSH(item) # define DEBUG_POP(item_addr) # endif /* Push the information about the state we will need if we ever fail back to it. Requires variables fail_stack, regstart, regend, reg_info, and num_regs_pushed be declared. DOUBLE_FAIL_STACK requires `destination' be declared. Does `return FAILURE_CODE' if runs out of memory. */ # define PUSH_FAILURE_POINT(pattern_place, string_place, failure_code) \ do { \ char *destination; \ /* Must be int, so when we don't save any registers, the arithmetic \ of 0 + -1 isn't done as unsigned. */ \ /* Can't be int, since there is not a shred of a guarantee that int \ is wide enough to hold a value of something to which pointer can \ be assigned */ \ active_reg_t this_reg; \ \ DEBUG_STATEMENT (failure_id++); \ DEBUG_STATEMENT (nfailure_points_pushed++); \ DEBUG_PRINT2 ("\nPUSH_FAILURE_POINT #%u:\n", failure_id); \ DEBUG_PRINT2 (" Before push, next avail: %d\n", (fail_stack).avail);\ DEBUG_PRINT2 (" size: %d\n", (fail_stack).size);\ \ DEBUG_PRINT2 (" slots needed: %ld\n", NUM_FAILURE_ITEMS); \ DEBUG_PRINT2 (" available: %d\n", REMAINING_AVAIL_SLOTS); \ \ /* Ensure we have enough space allocated for what we will push. */ \ while (REMAINING_AVAIL_SLOTS < NUM_FAILURE_ITEMS) \ { \ if (!DOUBLE_FAIL_STACK (fail_stack)) \ return failure_code; \ \ DEBUG_PRINT2 ("\n Doubled stack; size now: %d\n", \ (fail_stack).size); \ DEBUG_PRINT2 (" slots available: %d\n", REMAINING_AVAIL_SLOTS);\ } \ \ /* Push the info, starting with the registers. */ \ DEBUG_PRINT1 ("\n"); \ \ if (1) \ for (this_reg = lowest_active_reg; this_reg <= highest_active_reg; \ this_reg++) \ { \ DEBUG_PRINT2 (" Pushing reg: %lu\n", this_reg); \ DEBUG_STATEMENT (num_regs_pushed++); \ \ DEBUG_PRINT2 (" start: %p\n", regstart[this_reg]); \ PUSH_FAILURE_POINTER (regstart[this_reg]); \ \ DEBUG_PRINT2 (" end: %p\n", regend[this_reg]); \ PUSH_FAILURE_POINTER (regend[this_reg]); \ \ DEBUG_PRINT2 (" info: %p\n ", \ reg_info[this_reg].word.pointer); \ DEBUG_PRINT2 (" match_null=%d", \ REG_MATCH_NULL_STRING_P (reg_info[this_reg])); \ DEBUG_PRINT2 (" active=%d", IS_ACTIVE (reg_info[this_reg])); \ DEBUG_PRINT2 (" matched_something=%d", \ MATCHED_SOMETHING (reg_info[this_reg])); \ DEBUG_PRINT2 (" ever_matched=%d", \ EVER_MATCHED_SOMETHING (reg_info[this_reg])); \ DEBUG_PRINT1 ("\n"); \ PUSH_FAILURE_ELT (reg_info[this_reg].word); \ } \ \ DEBUG_PRINT2 (" Pushing low active reg: %ld\n", lowest_active_reg);\ PUSH_FAILURE_INT (lowest_active_reg); \ \ DEBUG_PRINT2 (" Pushing high active reg: %ld\n", highest_active_reg);\ PUSH_FAILURE_INT (highest_active_reg); \ \ DEBUG_PRINT2 (" Pushing pattern %p:\n", pattern_place); \ DEBUG_PRINT_COMPILED_PATTERN (bufp, pattern_place, pend); \ PUSH_FAILURE_POINTER (pattern_place); \ \ DEBUG_PRINT2 (" Pushing string %p: `", string_place); \ DEBUG_PRINT_DOUBLE_STRING (string_place, string1, size1, string2, \ size2); \ DEBUG_PRINT1 ("'\n"); \ PUSH_FAILURE_POINTER (string_place); \ \ DEBUG_PRINT2 (" Pushing failure id: %u\n", failure_id); \ DEBUG_PUSH (failure_id); \ } while (0) # ifndef DEFINED_ONCE /* This is the number of items that are pushed and popped on the stack for each register. */ # define NUM_REG_ITEMS 3 /* Individual items aside from the registers. */ # ifdef DEBUG # define NUM_NONREG_ITEMS 5 /* Includes failure point id. */ # else # define NUM_NONREG_ITEMS 4 # endif /* We push at most this many items on the stack. */ /* We used to use (num_regs - 1), which is the number of registers this regexp will save; but that was changed to 5 to avoid stack overflow for a regexp with lots of parens. */ # define MAX_FAILURE_ITEMS (5 * NUM_REG_ITEMS + NUM_NONREG_ITEMS) /* We actually push this many items. */ # define NUM_FAILURE_ITEMS \ (((0 \ ? 0 : highest_active_reg - lowest_active_reg + 1) \ * NUM_REG_ITEMS) \ + NUM_NONREG_ITEMS) /* How many items can still be added to the stack without overflowing it. */ # define REMAINING_AVAIL_SLOTS ((fail_stack).size - (fail_stack).avail) # endif /* not DEFINED_ONCE */ /* Pops what PUSH_FAIL_STACK pushes. We restore into the parameters, all of which should be lvalues: STR -- the saved data position. PAT -- the saved pattern position. LOW_REG, HIGH_REG -- the highest and lowest active registers. REGSTART, REGEND -- arrays of string positions. REG_INFO -- array of information about each subexpression. Also assumes the variables `fail_stack' and (if debugging), `bufp', `pend', `string1', `size1', `string2', and `size2'. */ # define POP_FAILURE_POINT(str, pat, low_reg, high_reg, regstart, regend, reg_info)\ { \ DEBUG_STATEMENT (unsigned failure_id;) \ active_reg_t this_reg; \ const UCHAR_T *string_temp; \ \ assert (!FAIL_STACK_EMPTY ()); \ \ /* Remove failure points and point to how many regs pushed. */ \ DEBUG_PRINT1 ("POP_FAILURE_POINT:\n"); \ DEBUG_PRINT2 (" Before pop, next avail: %d\n", fail_stack.avail); \ DEBUG_PRINT2 (" size: %d\n", fail_stack.size); \ \ assert (fail_stack.avail >= NUM_NONREG_ITEMS); \ \ DEBUG_POP (&failure_id); \ DEBUG_PRINT2 (" Popping failure id: %u\n", failure_id); \ \ /* If the saved string location is NULL, it came from an \ on_failure_keep_string_jump opcode, and we want to throw away the \ saved NULL, thus retaining our current position in the string. */ \ string_temp = POP_FAILURE_POINTER (); \ if (string_temp != NULL) \ str = (const CHAR_T *) string_temp; \ \ DEBUG_PRINT2 (" Popping string %p: `", str); \ DEBUG_PRINT_DOUBLE_STRING (str, string1, size1, string2, size2); \ DEBUG_PRINT1 ("'\n"); \ \ pat = (UCHAR_T *) POP_FAILURE_POINTER (); \ DEBUG_PRINT2 (" Popping pattern %p:\n", pat); \ DEBUG_PRINT_COMPILED_PATTERN (bufp, pat, pend); \ \ /* Restore register info. */ \ high_reg = (active_reg_t) POP_FAILURE_INT (); \ DEBUG_PRINT2 (" Popping high active reg: %ld\n", high_reg); \ \ low_reg = (active_reg_t) POP_FAILURE_INT (); \ DEBUG_PRINT2 (" Popping low active reg: %ld\n", low_reg); \ \ if (1) \ for (this_reg = high_reg; this_reg >= low_reg; this_reg--) \ { \ DEBUG_PRINT2 (" Popping reg: %ld\n", this_reg); \ \ reg_info[this_reg].word = POP_FAILURE_ELT (); \ DEBUG_PRINT2 (" info: %p\n", \ reg_info[this_reg].word.pointer); \ \ regend[this_reg] = (const CHAR_T *) POP_FAILURE_POINTER (); \ DEBUG_PRINT2 (" end: %p\n", regend[this_reg]); \ \ regstart[this_reg] = (const CHAR_T *) POP_FAILURE_POINTER (); \ DEBUG_PRINT2 (" start: %p\n", regstart[this_reg]); \ } \ else \ { \ for (this_reg = highest_active_reg; this_reg > high_reg; this_reg--) \ { \ reg_info[this_reg].word.integer = 0; \ regend[this_reg] = 0; \ regstart[this_reg] = 0; \ } \ highest_active_reg = high_reg; \ } \ \ set_regs_matched_done = 0; \ DEBUG_STATEMENT (nfailure_points_popped++); \ } /* POP_FAILURE_POINT */ /* Structure for per-register (a.k.a. per-group) information. Other register information, such as the starting and ending positions (which are addresses), and the list of inner groups (which is a bits list) are maintained in separate variables. We are making a (strictly speaking) nonportable assumption here: that the compiler will pack our bit fields into something that fits into the type of `word', i.e., is something that fits into one item on the failure stack. */ /* Declarations and macros for re_match_2. */ typedef union { PREFIX(fail_stack_elt_t) word; struct { /* This field is one if this group can match the empty string, zero if not. If not yet determined, `MATCH_NULL_UNSET_VALUE'. */ # define MATCH_NULL_UNSET_VALUE 3 unsigned match_null_string_p : 2; unsigned is_active : 1; unsigned matched_something : 1; unsigned ever_matched_something : 1; } bits; } PREFIX(register_info_type); # ifndef DEFINED_ONCE # define REG_MATCH_NULL_STRING_P(R) ((R).bits.match_null_string_p) # define IS_ACTIVE(R) ((R).bits.is_active) # define MATCHED_SOMETHING(R) ((R).bits.matched_something) # define EVER_MATCHED_SOMETHING(R) ((R).bits.ever_matched_something) /* Call this when have matched a real character; it sets `matched' flags for the subexpressions which we are currently inside. Also records that those subexprs have matched. */ # define SET_REGS_MATCHED() \ do \ { \ if (!set_regs_matched_done) \ { \ active_reg_t r; \ set_regs_matched_done = 1; \ for (r = lowest_active_reg; r <= highest_active_reg; r++) \ { \ MATCHED_SOMETHING (reg_info[r]) \ = EVER_MATCHED_SOMETHING (reg_info[r]) \ = 1; \ } \ } \ } \ while (0) # endif /* not DEFINED_ONCE */ /* Registers are set to a sentinel when they haven't yet matched. */ static CHAR_T PREFIX(reg_unset_dummy); # define REG_UNSET_VALUE (&PREFIX(reg_unset_dummy)) # define REG_UNSET(e) ((e) == REG_UNSET_VALUE) /* Subroutine declarations and macros for regex_compile. */ static void PREFIX(store_op1) _RE_ARGS ((re_opcode_t op, UCHAR_T *loc, int arg)); static void PREFIX(store_op2) _RE_ARGS ((re_opcode_t op, UCHAR_T *loc, int arg1, int arg2)); static void PREFIX(insert_op1) _RE_ARGS ((re_opcode_t op, UCHAR_T *loc, int arg, UCHAR_T *end)); static void PREFIX(insert_op2) _RE_ARGS ((re_opcode_t op, UCHAR_T *loc, int arg1, int arg2, UCHAR_T *end)); static boolean PREFIX(at_begline_loc_p) _RE_ARGS ((const CHAR_T *pattern, const CHAR_T *p, reg_syntax_t syntax)); static boolean PREFIX(at_endline_loc_p) _RE_ARGS ((const CHAR_T *p, const CHAR_T *pend, reg_syntax_t syntax)); # ifdef WCHAR static reg_errcode_t wcs_compile_range _RE_ARGS ((CHAR_T range_start, const CHAR_T **p_ptr, const CHAR_T *pend, char *translate, reg_syntax_t syntax, UCHAR_T *b, CHAR_T *char_set)); static void insert_space _RE_ARGS ((int num, CHAR_T *loc, CHAR_T *end)); # else /* BYTE */ static reg_errcode_t byte_compile_range _RE_ARGS ((unsigned int range_start, const char **p_ptr, const char *pend, char *translate, reg_syntax_t syntax, unsigned char *b)); # endif /* WCHAR */ /* Fetch the next character in the uncompiled pattern---translating it if necessary. Also cast from a signed character in the constant string passed to us by the user to an unsigned char that we can use as an array index (in, e.g., `translate'). */ /* ifdef MBS_SUPPORT, we translate only if character <= 0xff, because it is impossible to allocate 4GB array for some encodings which have 4 byte character_set like UCS4. */ # ifndef PATFETCH # ifdef WCHAR # define PATFETCH(c) \ do {if (p == pend) return REG_EEND; \ c = (UCHAR_T) *p++; \ if (translate && (c <= 0xff)) c = (UCHAR_T) translate[c]; \ } while (0) # else /* BYTE */ # define PATFETCH(c) \ do {if (p == pend) return REG_EEND; \ c = (unsigned char) *p++; \ if (translate) c = (unsigned char) translate[c]; \ } while (0) # endif /* WCHAR */ # endif /* Fetch the next character in the uncompiled pattern, with no translation. */ # define PATFETCH_RAW(c) \ do {if (p == pend) return REG_EEND; \ c = (UCHAR_T) *p++; \ } while (0) /* Go backwards one character in the pattern. */ # define PATUNFETCH p-- /* If `translate' is non-null, return translate[D], else just D. We cast the subscript to translate because some data is declared as `char *', to avoid warnings when a string constant is passed. But when we use a character as a subscript we must make it unsigned. */ /* ifdef MBS_SUPPORT, we translate only if character <= 0xff, because it is impossible to allocate 4GB array for some encodings which have 4 byte character_set like UCS4. */ # ifndef TRANSLATE # ifdef WCHAR # define TRANSLATE(d) \ ((translate && ((UCHAR_T) (d)) <= 0xff) \ ? (char) translate[(unsigned char) (d)] : (d)) # else /* BYTE */ # define TRANSLATE(d) \ (translate ? (char) translate[(unsigned char) (d)] : (d)) # endif /* WCHAR */ # endif /* Macros for outputting the compiled pattern into `buffer'. */ /* If the buffer isn't allocated when it comes in, use this. */ # define INIT_BUF_SIZE (32 * sizeof(UCHAR_T)) /* Make sure we have at least N more bytes of space in buffer. */ # ifdef WCHAR # define GET_BUFFER_SPACE(n) \ while (((unsigned long)b - (unsigned long)COMPILED_BUFFER_VAR \ + (n)*sizeof(CHAR_T)) > bufp->allocated) \ EXTEND_BUFFER () # else /* BYTE */ # define GET_BUFFER_SPACE(n) \ while ((unsigned long) (b - bufp->buffer + (n)) > bufp->allocated) \ EXTEND_BUFFER () # endif /* WCHAR */ /* Make sure we have one more byte of buffer space and then add C to it. */ # define BUF_PUSH(c) \ do { \ GET_BUFFER_SPACE (1); \ *b++ = (UCHAR_T) (c); \ } while (0) /* Ensure we have two more bytes of buffer space and then append C1 and C2. */ # define BUF_PUSH_2(c1, c2) \ do { \ GET_BUFFER_SPACE (2); \ *b++ = (UCHAR_T) (c1); \ *b++ = (UCHAR_T) (c2); \ } while (0) /* As with BUF_PUSH_2, except for three bytes. */ # define BUF_PUSH_3(c1, c2, c3) \ do { \ GET_BUFFER_SPACE (3); \ *b++ = (UCHAR_T) (c1); \ *b++ = (UCHAR_T) (c2); \ *b++ = (UCHAR_T) (c3); \ } while (0) /* Store a jump with opcode OP at LOC to location TO. We store a relative address offset by the three bytes the jump itself occupies. */ # define STORE_JUMP(op, loc, to) \ PREFIX(store_op1) (op, loc, (int) ((to) - (loc) - (1 + OFFSET_ADDRESS_SIZE))) /* Likewise, for a two-argument jump. */ # define STORE_JUMP2(op, loc, to, arg) \ PREFIX(store_op2) (op, loc, (int) ((to) - (loc) - (1 + OFFSET_ADDRESS_SIZE)), arg) /* Like `STORE_JUMP', but for inserting. Assume `b' is the buffer end. */ # define INSERT_JUMP(op, loc, to) \ PREFIX(insert_op1) (op, loc, (int) ((to) - (loc) - (1 + OFFSET_ADDRESS_SIZE)), b) /* Like `STORE_JUMP2', but for inserting. Assume `b' is the buffer end. */ # define INSERT_JUMP2(op, loc, to, arg) \ PREFIX(insert_op2) (op, loc, (int) ((to) - (loc) - (1 + OFFSET_ADDRESS_SIZE)),\ arg, b) /* This is not an arbitrary limit: the arguments which represent offsets into the pattern are two bytes long. So if 2^16 bytes turns out to be too small, many things would have to change. */ /* Any other compiler which, like MSC, has allocation limit below 2^16 bytes will have to use approach similar to what was done below for MSC and drop MAX_BUF_SIZE a bit. Otherwise you may end up reallocating to 0 bytes. Such thing is not going to work too well. You have been warned!! */ # ifndef DEFINED_ONCE # if defined _MSC_VER && !defined WIN32 /* Microsoft C 16-bit versions limit malloc to approx 65512 bytes. The REALLOC define eliminates a flurry of conversion warnings, but is not required. */ # define MAX_BUF_SIZE 65500L # define REALLOC(p,s) realloc ((p), (size_t) (s)) # else # define MAX_BUF_SIZE (1L << 16) # define REALLOC(p,s) realloc ((p), (s)) # endif /* Extend the buffer by twice its current size via realloc and reset the pointers that pointed into the old block to point to the correct places in the new one. If extending the buffer results in it being larger than MAX_BUF_SIZE, then flag memory exhausted. */ # if __BOUNDED_POINTERS__ # define SET_HIGH_BOUND(P) (__ptrhigh (P) = __ptrlow (P) + bufp->allocated) # define MOVE_BUFFER_POINTER(P) \ (__ptrlow (P) += incr, SET_HIGH_BOUND (P), __ptrvalue (P) += incr) # define ELSE_EXTEND_BUFFER_HIGH_BOUND \ else \ { \ SET_HIGH_BOUND (b); \ SET_HIGH_BOUND (begalt); \ if (fixup_alt_jump) \ SET_HIGH_BOUND (fixup_alt_jump); \ if (laststart) \ SET_HIGH_BOUND (laststart); \ if (pending_exact) \ SET_HIGH_BOUND (pending_exact); \ } # else # define MOVE_BUFFER_POINTER(P) (P) += incr # define ELSE_EXTEND_BUFFER_HIGH_BOUND # endif # endif /* not DEFINED_ONCE */ # ifdef WCHAR # define EXTEND_BUFFER() \ do { \ UCHAR_T *old_buffer = COMPILED_BUFFER_VAR; \ int wchar_count; \ if (bufp->allocated + sizeof(UCHAR_T) > MAX_BUF_SIZE) \ return REG_ESIZE; \ bufp->allocated <<= 1; \ if (bufp->allocated > MAX_BUF_SIZE) \ bufp->allocated = MAX_BUF_SIZE; \ /* How many characters the new buffer can have? */ \ wchar_count = bufp->allocated / sizeof(UCHAR_T); \ if (wchar_count == 0) wchar_count = 1; \ /* Truncate the buffer to CHAR_T align. */ \ bufp->allocated = wchar_count * sizeof(UCHAR_T); \ RETALLOC (COMPILED_BUFFER_VAR, wchar_count, UCHAR_T); \ bufp->buffer = (char*)COMPILED_BUFFER_VAR; \ if (COMPILED_BUFFER_VAR == NULL) \ return REG_ESPACE; \ /* If the buffer moved, move all the pointers into it. */ \ if (old_buffer != COMPILED_BUFFER_VAR) \ { \ int incr = COMPILED_BUFFER_VAR - old_buffer; \ MOVE_BUFFER_POINTER (b); \ MOVE_BUFFER_POINTER (begalt); \ if (fixup_alt_jump) \ MOVE_BUFFER_POINTER (fixup_alt_jump); \ if (laststart) \ MOVE_BUFFER_POINTER (laststart); \ if (pending_exact) \ MOVE_BUFFER_POINTER (pending_exact); \ } \ ELSE_EXTEND_BUFFER_HIGH_BOUND \ } while (0) # else /* BYTE */ # define EXTEND_BUFFER() \ do { \ UCHAR_T *old_buffer = COMPILED_BUFFER_VAR; \ if (bufp->allocated == MAX_BUF_SIZE) \ return REG_ESIZE; \ bufp->allocated <<= 1; \ if (bufp->allocated > MAX_BUF_SIZE) \ bufp->allocated = MAX_BUF_SIZE; \ bufp->buffer = (UCHAR_T *) REALLOC (COMPILED_BUFFER_VAR, \ bufp->allocated); \ if (COMPILED_BUFFER_VAR == NULL) \ return REG_ESPACE; \ /* If the buffer moved, move all the pointers into it. */ \ if (old_buffer != COMPILED_BUFFER_VAR) \ { \ int incr = COMPILED_BUFFER_VAR - old_buffer; \ MOVE_BUFFER_POINTER (b); \ MOVE_BUFFER_POINTER (begalt); \ if (fixup_alt_jump) \ MOVE_BUFFER_POINTER (fixup_alt_jump); \ if (laststart) \ MOVE_BUFFER_POINTER (laststart); \ if (pending_exact) \ MOVE_BUFFER_POINTER (pending_exact); \ } \ ELSE_EXTEND_BUFFER_HIGH_BOUND \ } while (0) # endif /* WCHAR */ # ifndef DEFINED_ONCE /* Since we have one byte reserved for the register number argument to {start,stop}_memory, the maximum number of groups we can report things about is what fits in that byte. */ # define MAX_REGNUM 255 /* But patterns can have more than `MAX_REGNUM' registers. We just ignore the excess. */ typedef unsigned regnum_t; /* Macros for the compile stack. */ /* Since offsets can go either forwards or backwards, this type needs to be able to hold values from -(MAX_BUF_SIZE - 1) to MAX_BUF_SIZE - 1. */ /* int may be not enough when sizeof(int) == 2. */ typedef long pattern_offset_t; typedef struct { pattern_offset_t begalt_offset; pattern_offset_t fixup_alt_jump; pattern_offset_t inner_group_offset; pattern_offset_t laststart_offset; regnum_t regnum; } compile_stack_elt_t; typedef struct { compile_stack_elt_t *stack; unsigned size; unsigned avail; /* Offset of next open position. */ } compile_stack_type; # define INIT_COMPILE_STACK_SIZE 32 # define COMPILE_STACK_EMPTY (compile_stack.avail == 0) # define COMPILE_STACK_FULL (compile_stack.avail == compile_stack.size) /* The next available element. */ # define COMPILE_STACK_TOP (compile_stack.stack[compile_stack.avail]) # endif /* not DEFINED_ONCE */ /* Set the bit for character C in a list. */ # ifndef DEFINED_ONCE # define SET_LIST_BIT(c) \ (b[((unsigned char) (c)) / BYTEWIDTH] \ |= 1 << (((unsigned char) c) % BYTEWIDTH)) # endif /* DEFINED_ONCE */ /* Get the next unsigned number in the uncompiled pattern. */ # define GET_UNSIGNED_NUMBER(num) \ { \ while (p != pend) \ { \ PATFETCH (c); \ if (c < '0' || c > '9') \ break; \ if (num <= RE_DUP_MAX) \ { \ if (num < 0) \ num = 0; \ num = num * 10 + c - '0'; \ } \ } \ } # ifndef DEFINED_ONCE # if defined _LIBC || WIDE_CHAR_SUPPORT /* The GNU C library provides support for user-defined character classes and the functions from ISO C amendement 1. */ # ifdef CHARCLASS_NAME_MAX # define CHAR_CLASS_MAX_LENGTH CHARCLASS_NAME_MAX # else /* This shouldn't happen but some implementation might still have this problem. Use a reasonable default value. */ # define CHAR_CLASS_MAX_LENGTH 256 # endif # ifdef _LIBC # define IS_CHAR_CLASS(string) __wctype (string) # else # define IS_CHAR_CLASS(string) wctype (string) # endif # else # define CHAR_CLASS_MAX_LENGTH 6 /* Namely, `xdigit'. */ # define IS_CHAR_CLASS(string) \ (STREQ (string, "alpha") || STREQ (string, "upper") \ || STREQ (string, "lower") || STREQ (string, "digit") \ || STREQ (string, "alnum") || STREQ (string, "xdigit") \ || STREQ (string, "space") || STREQ (string, "print") \ || STREQ (string, "punct") || STREQ (string, "graph") \ || STREQ (string, "cntrl") || STREQ (string, "blank")) # endif # endif /* DEFINED_ONCE */ # ifndef MATCH_MAY_ALLOCATE /* If we cannot allocate large objects within re_match_2_internal, we make the fail stack and register vectors global. The fail stack, we grow to the maximum size when a regexp is compiled. The register vectors, we adjust in size each time we compile a regexp, according to the number of registers it needs. */ static PREFIX(fail_stack_type) fail_stack; /* Size with which the following vectors are currently allocated. That is so we can make them bigger as needed, but never make them smaller. */ # ifdef DEFINED_ONCE static int regs_allocated_size; static const char ** regstart, ** regend; static const char ** old_regstart, ** old_regend; static const char **best_regstart, **best_regend; static const char **reg_dummy; # endif /* DEFINED_ONCE */ static PREFIX(register_info_type) *PREFIX(reg_info); static PREFIX(register_info_type) *PREFIX(reg_info_dummy); /* Make the register vectors big enough for NUM_REGS registers, but don't make them smaller. */ static void PREFIX(regex_grow_registers) (num_regs) int num_regs; { if (num_regs > regs_allocated_size) { RETALLOC_IF (regstart, num_regs, const char *); RETALLOC_IF (regend, num_regs, const char *); RETALLOC_IF (old_regstart, num_regs, const char *); RETALLOC_IF (old_regend, num_regs, const char *); RETALLOC_IF (best_regstart, num_regs, const char *); RETALLOC_IF (best_regend, num_regs, const char *); RETALLOC_IF (PREFIX(reg_info), num_regs, PREFIX(register_info_type)); RETALLOC_IF (reg_dummy, num_regs, const char *); RETALLOC_IF (PREFIX(reg_info_dummy), num_regs, PREFIX(register_info_type)); regs_allocated_size = num_regs; } } # endif /* not MATCH_MAY_ALLOCATE */ # ifndef DEFINED_ONCE static boolean group_in_compile_stack _RE_ARGS ((compile_stack_type compile_stack, regnum_t regnum)); # endif /* not DEFINED_ONCE */ /* `regex_compile' compiles PATTERN (of length SIZE) according to SYNTAX. Returns one of error codes defined in `regex.h', or zero for success. Assumes the `allocated' (and perhaps `buffer') and `translate' fields are set in BUFP on entry. If it succeeds, results are put in BUFP (if it returns an error, the contents of BUFP are undefined): `buffer' is the compiled pattern; `syntax' is set to SYNTAX; `used' is set to the length of the compiled pattern; `fastmap_accurate' is zero; `re_nsub' is the number of subexpressions in PATTERN; `not_bol' and `not_eol' are zero; The `fastmap' and `newline_anchor' fields are neither examined nor set. */ /* Return, freeing storage we allocated. */ # ifdef WCHAR # define FREE_STACK_RETURN(value) \ return (free(pattern), free(mbs_offset), free(is_binary), free (compile_stack.stack), value) # else # define FREE_STACK_RETURN(value) \ return (free (compile_stack.stack), value) # endif /* WCHAR */ static reg_errcode_t PREFIX(regex_compile) (ARG_PREFIX(pattern), ARG_PREFIX(size), syntax, bufp) const char *ARG_PREFIX(pattern); size_t ARG_PREFIX(size); reg_syntax_t syntax; struct re_pattern_buffer *bufp; { /* We fetch characters from PATTERN here. Even though PATTERN is `char *' (i.e., signed), we declare these variables as unsigned, so they can be reliably used as array indices. */ register UCHAR_T c, c1; #ifdef WCHAR /* A temporary space to keep wchar_t pattern and compiled pattern. */ CHAR_T *pattern, *COMPILED_BUFFER_VAR; size_t size; /* offset buffer for optimization. See convert_mbs_to_wc. */ int *mbs_offset = NULL; /* It hold whether each wchar_t is binary data or not. */ char *is_binary = NULL; /* A flag whether exactn is handling binary data or not. */ char is_exactn_bin = FALSE; #endif /* WCHAR */ /* A random temporary spot in PATTERN. */ const CHAR_T *p1; /* Points to the end of the buffer, where we should append. */ register UCHAR_T *b; /* Keeps track of unclosed groups. */ compile_stack_type compile_stack; /* Points to the current (ending) position in the pattern. */ #ifdef WCHAR const CHAR_T *p; const CHAR_T *pend; #else /* BYTE */ const CHAR_T *p = pattern; const CHAR_T *pend = pattern + size; #endif /* WCHAR */ /* How to translate the characters in the pattern. */ RE_TRANSLATE_TYPE translate = bufp->translate; /* Address of the count-byte of the most recently inserted `exactn' command. This makes it possible to tell if a new exact-match character can be added to that command or if the character requires a new `exactn' command. */ UCHAR_T *pending_exact = 0; /* Address of start of the most recently finished expression. This tells, e.g., postfix * where to find the start of its operand. Reset at the beginning of groups and alternatives. */ UCHAR_T *laststart = 0; /* Address of beginning of regexp, or inside of last group. */ UCHAR_T *begalt; /* Address of the place where a forward jump should go to the end of the containing expression. Each alternative of an `or' -- except the last -- ends with a forward jump of this sort. */ UCHAR_T *fixup_alt_jump = 0; /* Counts open-groups as they are encountered. Remembered for the matching close-group on the compile stack, so the same register number is put in the stop_memory as the start_memory. */ regnum_t regnum = 0; #ifdef WCHAR /* Initialize the wchar_t PATTERN and offset_buffer. */ p = pend = pattern = TALLOC(csize + 1, CHAR_T); mbs_offset = TALLOC(csize + 1, int); is_binary = TALLOC(csize + 1, char); if (pattern == NULL || mbs_offset == NULL || is_binary == NULL) { free(pattern); free(mbs_offset); free(is_binary); return REG_ESPACE; } pattern[csize] = L'\0'; /* sentinel */ size = convert_mbs_to_wcs(pattern, cpattern, csize, mbs_offset, is_binary); pend = p + size; if (size < 0) { free(pattern); free(mbs_offset); free(is_binary); return REG_BADPAT; } #endif #ifdef DEBUG DEBUG_PRINT1 ("\nCompiling pattern: "); if (debug) { unsigned debug_count; for (debug_count = 0; debug_count < size; debug_count++) PUT_CHAR (pattern[debug_count]); putchar ('\n'); } #endif /* DEBUG */ /* Initialize the compile stack. */ compile_stack.stack = TALLOC (INIT_COMPILE_STACK_SIZE, compile_stack_elt_t); if (compile_stack.stack == NULL) { #ifdef WCHAR free(pattern); free(mbs_offset); free(is_binary); #endif return REG_ESPACE; } compile_stack.size = INIT_COMPILE_STACK_SIZE; compile_stack.avail = 0; /* Initialize the pattern buffer. */ bufp->syntax = syntax; bufp->fastmap_accurate = 0; bufp->not_bol = bufp->not_eol = 0; /* Set `used' to zero, so that if we return an error, the pattern printer (for debugging) will think there's no pattern. We reset it at the end. */ bufp->used = 0; /* Always count groups, whether or not bufp->no_sub is set. */ bufp->re_nsub = 0; #if !defined emacs && !defined SYNTAX_TABLE /* Initialize the syntax table. */ init_syntax_once (); #endif if (bufp->allocated == 0) { if (bufp->buffer) { /* If zero allocated, but buffer is non-null, try to realloc enough space. This loses if buffer's address is bogus, but that is the user's responsibility. */ #ifdef WCHAR /* Free bufp->buffer and allocate an array for wchar_t pattern buffer. */ free(bufp->buffer); COMPILED_BUFFER_VAR = TALLOC (INIT_BUF_SIZE/sizeof(UCHAR_T), UCHAR_T); #else RETALLOC (COMPILED_BUFFER_VAR, INIT_BUF_SIZE, UCHAR_T); #endif /* WCHAR */ } else { /* Caller did not allocate a buffer. Do it for them. */ COMPILED_BUFFER_VAR = TALLOC (INIT_BUF_SIZE / sizeof(UCHAR_T), UCHAR_T); } if (!COMPILED_BUFFER_VAR) FREE_STACK_RETURN (REG_ESPACE); #ifdef WCHAR bufp->buffer = (char*)COMPILED_BUFFER_VAR; #endif /* WCHAR */ bufp->allocated = INIT_BUF_SIZE; } #ifdef WCHAR else COMPILED_BUFFER_VAR = (UCHAR_T*) bufp->buffer; #endif begalt = b = COMPILED_BUFFER_VAR; /* Loop through the uncompiled pattern until we're at the end. */ while (p != pend) { PATFETCH (c); switch (c) { case '^': { if ( /* If at start of pattern, it's an operator. */ p == pattern + 1 /* If context independent, it's an operator. */ || syntax & RE_CONTEXT_INDEP_ANCHORS /* Otherwise, depends on what's come before. */ || PREFIX(at_begline_loc_p) (pattern, p, syntax)) BUF_PUSH (begline); else goto normal_char; } break; case '$': { if ( /* If at end of pattern, it's an operator. */ p == pend /* If context independent, it's an operator. */ || syntax & RE_CONTEXT_INDEP_ANCHORS /* Otherwise, depends on what's next. */ || PREFIX(at_endline_loc_p) (p, pend, syntax)) BUF_PUSH (endline); else goto normal_char; } break; case '+': case '?': if ((syntax & RE_BK_PLUS_QM) || (syntax & RE_LIMITED_OPS)) goto normal_char; handle_plus: case '*': /* If there is no previous pattern... */ if (!laststart) { if (syntax & RE_CONTEXT_INVALID_OPS) FREE_STACK_RETURN (REG_BADRPT); else if (!(syntax & RE_CONTEXT_INDEP_OPS)) goto normal_char; } { /* Are we optimizing this jump? */ boolean keep_string_p = false; /* 1 means zero (many) matches is allowed. */ char zero_times_ok = 0, many_times_ok = 0; /* If there is a sequence of repetition chars, collapse it down to just one (the right one). We can't combine interval operators with these because of, e.g., `a{2}*', which should only match an even number of `a's. */ for (;;) { zero_times_ok |= c != '+'; many_times_ok |= c != '?'; if (p == pend) break; PATFETCH (c); if (c == '*' || (!(syntax & RE_BK_PLUS_QM) && (c == '+' || c == '?'))) ; else if (syntax & RE_BK_PLUS_QM && c == '\\') { if (p == pend) FREE_STACK_RETURN (REG_EESCAPE); PATFETCH (c1); if (!(c1 == '+' || c1 == '?')) { PATUNFETCH; PATUNFETCH; break; } c = c1; } else { PATUNFETCH; break; } /* If we get here, we found another repeat character. */ } /* Star, etc. applied to an empty pattern is equivalent to an empty pattern. */ if (!laststart) break; /* Now we know whether or not zero matches is allowed and also whether or not two or more matches is allowed. */ if (many_times_ok) { /* More than one repetition is allowed, so put in at the end a backward relative jump from `b' to before the next jump we're going to put in below (which jumps from laststart to after this jump). But if we are at the `*' in the exact sequence `.*\n', insert an unconditional jump backwards to the ., instead of the beginning of the loop. This way we only push a failure point once, instead of every time through the loop. */ assert (p - 1 > pattern); /* Allocate the space for the jump. */ GET_BUFFER_SPACE (1 + OFFSET_ADDRESS_SIZE); /* We know we are not at the first character of the pattern, because laststart was nonzero. And we've already incremented `p', by the way, to be the character after the `*'. Do we have to do something analogous here for null bytes, because of RE_DOT_NOT_NULL? */ if (TRANSLATE (*(p - 2)) == TRANSLATE ('.') && zero_times_ok && p < pend && TRANSLATE (*p) == TRANSLATE ('\n') && !(syntax & RE_DOT_NEWLINE)) { /* We have .*\n. */ STORE_JUMP (jump, b, laststart); keep_string_p = true; } else /* Anything else. */ STORE_JUMP (maybe_pop_jump, b, laststart - (1 + OFFSET_ADDRESS_SIZE)); /* We've added more stuff to the buffer. */ b += 1 + OFFSET_ADDRESS_SIZE; } /* On failure, jump from laststart to b + 3, which will be the end of the buffer after this jump is inserted. */ /* ifdef WCHAR, 'b + 1 + OFFSET_ADDRESS_SIZE' instead of 'b + 3'. */ GET_BUFFER_SPACE (1 + OFFSET_ADDRESS_SIZE); INSERT_JUMP (keep_string_p ? on_failure_keep_string_jump : on_failure_jump, laststart, b + 1 + OFFSET_ADDRESS_SIZE); pending_exact = 0; b += 1 + OFFSET_ADDRESS_SIZE; if (!zero_times_ok) { /* At least one repetition is required, so insert a `dummy_failure_jump' before the initial `on_failure_jump' instruction of the loop. This effects a skip over that instruction the first time we hit that loop. */ GET_BUFFER_SPACE (1 + OFFSET_ADDRESS_SIZE); INSERT_JUMP (dummy_failure_jump, laststart, laststart + 2 + 2 * OFFSET_ADDRESS_SIZE); b += 1 + OFFSET_ADDRESS_SIZE; } } break; case '.': laststart = b; BUF_PUSH (anychar); break; case '[': { boolean had_char_class = false; #ifdef WCHAR CHAR_T range_start = 0xffffffff; #else unsigned int range_start = 0xffffffff; #endif if (p == pend) FREE_STACK_RETURN (REG_EBRACK); #ifdef WCHAR /* We assume a charset(_not) structure as a wchar_t array. charset[0] = (re_opcode_t) charset(_not) charset[1] = l (= length of char_classes) charset[2] = m (= length of collating_symbols) charset[3] = n (= length of equivalence_classes) charset[4] = o (= length of char_ranges) charset[5] = p (= length of chars) charset[6] = char_class (wctype_t) charset[6+CHAR_CLASS_SIZE] = char_class (wctype_t) ... charset[l+5] = char_class (wctype_t) charset[l+6] = collating_symbol (wchar_t) ... charset[l+m+5] = collating_symbol (wchar_t) ifdef _LIBC we use the index if _NL_COLLATE_SYMB_EXTRAMB instead of wchar_t string. charset[l+m+6] = equivalence_classes (wchar_t) ... charset[l+m+n+5] = equivalence_classes (wchar_t) ifdef _LIBC we use the index in _NL_COLLATE_WEIGHT instead of wchar_t string. charset[l+m+n+6] = range_start charset[l+m+n+7] = range_end ... charset[l+m+n+2o+4] = range_start charset[l+m+n+2o+5] = range_end ifdef _LIBC we use the value looked up in _NL_COLLATE_COLLSEQ instead of wchar_t character. charset[l+m+n+2o+6] = char ... charset[l+m+n+2o+p+5] = char */ /* We need at least 6 spaces: the opcode, the length of char_classes, the length of collating_symbols, the length of equivalence_classes, the length of char_ranges, the length of chars. */ GET_BUFFER_SPACE (6); /* Save b as laststart. And We use laststart as the pointer to the first element of the charset here. In other words, laststart[i] indicates charset[i]. */ laststart = b; /* We test `*p == '^' twice, instead of using an if statement, so we only need one BUF_PUSH. */ BUF_PUSH (*p == '^' ? charset_not : charset); if (*p == '^') p++; /* Push the length of char_classes, the length of collating_symbols, the length of equivalence_classes, the length of char_ranges and the length of chars. */ BUF_PUSH_3 (0, 0, 0); BUF_PUSH_2 (0, 0); /* Remember the first position in the bracket expression. */ p1 = p; /* charset_not matches newline according to a syntax bit. */ if ((re_opcode_t) b[-6] == charset_not && (syntax & RE_HAT_LISTS_NOT_NEWLINE)) { BUF_PUSH('\n'); laststart[5]++; /* Update the length of characters */ } /* Read in characters and ranges, setting map bits. */ for (;;) { if (p == pend) FREE_STACK_RETURN (REG_EBRACK); PATFETCH (c); /* \ might escape characters inside [...] and [^...]. */ if ((syntax & RE_BACKSLASH_ESCAPE_IN_LISTS) && c == '\\') { if (p == pend) FREE_STACK_RETURN (REG_EESCAPE); PATFETCH (c1); BUF_PUSH(c1); laststart[5]++; /* Update the length of chars */ range_start = c1; continue; } /* Could be the end of the bracket expression. If it's not (i.e., when the bracket expression is `[]' so far), the ']' character bit gets set way below. */ if (c == ']' && p != p1 + 1) break; /* Look ahead to see if it's a range when the last thing was a character class. */ if (had_char_class && c == '-' && *p != ']') FREE_STACK_RETURN (REG_ERANGE); /* Look ahead to see if it's a range when the last thing was a character: if this is a hyphen not at the beginning or the end of a list, then it's the range operator. */ if (c == '-' && !(p - 2 >= pattern && p[-2] == '[') && !(p - 3 >= pattern && p[-3] == '[' && p[-2] == '^') && *p != ']') { reg_errcode_t ret; /* Allocate the space for range_start and range_end. */ GET_BUFFER_SPACE (2); /* Update the pointer to indicate end of buffer. */ b += 2; ret = wcs_compile_range (range_start, &p, pend, translate, syntax, b, laststart); if (ret != REG_NOERROR) FREE_STACK_RETURN (ret); range_start = 0xffffffff; } else if (p[0] == '-' && p[1] != ']') { /* This handles ranges made up of characters only. */ reg_errcode_t ret; /* Move past the `-'. */ PATFETCH (c1); /* Allocate the space for range_start and range_end. */ GET_BUFFER_SPACE (2); /* Update the pointer to indicate end of buffer. */ b += 2; ret = wcs_compile_range (c, &p, pend, translate, syntax, b, laststart); if (ret != REG_NOERROR) FREE_STACK_RETURN (ret); range_start = 0xffffffff; } /* See if we're at the beginning of a possible character class. */ else if (syntax & RE_CHAR_CLASSES && c == '[' && *p == ':') { /* Leave room for the null. */ char str[CHAR_CLASS_MAX_LENGTH + 1]; PATFETCH (c); c1 = 0; /* If pattern is `[[:'. */ if (p == pend) FREE_STACK_RETURN (REG_EBRACK); for (;;) { PATFETCH (c); if ((c == ':' && *p == ']') || p == pend) break; if (c1 < CHAR_CLASS_MAX_LENGTH) str[c1++] = c; else /* This is in any case an invalid class name. */ str[0] = '\0'; } str[c1] = '\0'; /* If isn't a word bracketed by `[:' and `:]': undo the ending character, the letters, and leave the leading `:' and `[' (but store them as character). */ if (c == ':' && *p == ']') { wctype_t wt; uintptr_t alignedp; /* Query the character class as wctype_t. */ wt = IS_CHAR_CLASS (str); if (wt == 0) FREE_STACK_RETURN (REG_ECTYPE); /* Throw away the ] at the end of the character class. */ PATFETCH (c); if (p == pend) FREE_STACK_RETURN (REG_EBRACK); /* Allocate the space for character class. */ GET_BUFFER_SPACE(CHAR_CLASS_SIZE); /* Update the pointer to indicate end of buffer. */ b += CHAR_CLASS_SIZE; /* Move data which follow character classes not to violate the data. */ insert_space(CHAR_CLASS_SIZE, laststart + 6 + laststart[1], b - 1); alignedp = ((uintptr_t)(laststart + 6 + laststart[1]) + __alignof__(wctype_t) - 1) & ~(uintptr_t)(__alignof__(wctype_t) - 1); /* Store the character class. */ *((wctype_t*)alignedp) = wt; /* Update length of char_classes */ laststart[1] += CHAR_CLASS_SIZE; had_char_class = true; } else { c1++; while (c1--) PATUNFETCH; BUF_PUSH ('['); BUF_PUSH (':'); laststart[5] += 2; /* Update the length of characters */ range_start = ':'; had_char_class = false; } } else if (syntax & RE_CHAR_CLASSES && c == '[' && (*p == '=' || *p == '.')) { CHAR_T str[128]; /* Should be large enough. */ CHAR_T delim = *p; /* '=' or '.' */ # ifdef _LIBC uint32_t nrules = _NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_NRULES); # endif PATFETCH (c); c1 = 0; /* If pattern is `[[=' or '[[.'. */ if (p == pend) FREE_STACK_RETURN (REG_EBRACK); for (;;) { PATFETCH (c); if ((c == delim && *p == ']') || p == pend) break; if (c1 < sizeof (str) - 1) str[c1++] = c; else /* This is in any case an invalid class name. */ str[0] = '\0'; } str[c1] = '\0'; if (c == delim && *p == ']' && str[0] != '\0') { unsigned int i, offset; /* If we have no collation data we use the default collation in which each character is in a class by itself. It also means that ASCII is the character set and therefore we cannot have character with more than one byte in the multibyte representation. */ /* If not defined _LIBC, we push the name and `\0' for the sake of matching performance. */ int datasize = c1 + 1; # ifdef _LIBC int32_t idx = 0; if (nrules == 0) # endif { if (c1 != 1) FREE_STACK_RETURN (REG_ECOLLATE); } # ifdef _LIBC else { const int32_t *table; const int32_t *weights; const int32_t *extra; const int32_t *indirect; wint_t *cp; /* This #include defines a local function! */ if(delim == '=') { /* We push the index for equivalence class. */ cp = (wint_t*)str; table = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_TABLEWC); weights = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_WEIGHTWC); extra = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_EXTRAWC); indirect = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_INDIRECTWC); idx = findidx ((const wint_t**)&cp); if (idx == 0 || cp < (wint_t*) str + c1) /* This is no valid character. */ FREE_STACK_RETURN (REG_ECOLLATE); str[0] = (wchar_t)idx; } else /* delim == '.' */ { /* We push collation sequence value for collating symbol. */ int32_t table_size; const int32_t *symb_table; const unsigned char *extra; int32_t idx; int32_t elem; int32_t second; int32_t hash; char char_str[c1]; /* We have to convert the name to a single-byte string. This is possible since the names consist of ASCII characters and the internal representation is UCS4. */ for (i = 0; i < c1; ++i) char_str[i] = str[i]; table_size = _NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_SYMB_HASH_SIZEMB); symb_table = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_SYMB_TABLEMB); extra = (const unsigned char *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_SYMB_EXTRAMB); /* Locate the character in the hashing table. */ hash = elem_hash (char_str, c1); idx = 0; elem = hash % table_size; second = hash % (table_size - 2); while (symb_table[2 * elem] != 0) { /* First compare the hashing value. */ if (symb_table[2 * elem] == hash && c1 == extra[symb_table[2 * elem + 1]] && memcmp (char_str, &extra[symb_table[2 * elem + 1] + 1], c1) == 0) { /* Yep, this is the entry. */ idx = symb_table[2 * elem + 1]; idx += 1 + extra[idx]; break; } /* Next entry. */ elem += second; } if (symb_table[2 * elem] != 0) { /* Compute the index of the byte sequence in the table. */ idx += 1 + extra[idx]; /* Adjust for the alignment. */ idx = (idx + 3) & ~3; str[0] = (wchar_t) idx + 4; } else if (symb_table[2 * elem] == 0 && c1 == 1) { /* No valid character. Match it as a single byte character. */ had_char_class = false; BUF_PUSH(str[0]); /* Update the length of characters */ laststart[5]++; range_start = str[0]; /* Throw away the ] at the end of the collating symbol. */ PATFETCH (c); /* exit from the switch block. */ continue; } else FREE_STACK_RETURN (REG_ECOLLATE); } datasize = 1; } # endif /* Throw away the ] at the end of the equivalence class (or collating symbol). */ PATFETCH (c); /* Allocate the space for the equivalence class (or collating symbol) (and '\0' if needed). */ GET_BUFFER_SPACE(datasize); /* Update the pointer to indicate end of buffer. */ b += datasize; if (delim == '=') { /* equivalence class */ /* Calculate the offset of char_ranges, which is next to equivalence_classes. */ offset = laststart[1] + laststart[2] + laststart[3] +6; /* Insert space. */ insert_space(datasize, laststart + offset, b - 1); /* Write the equivalence_class and \0. */ for (i = 0 ; i < datasize ; i++) laststart[offset + i] = str[i]; /* Update the length of equivalence_classes. */ laststart[3] += datasize; had_char_class = true; } else /* delim == '.' */ { /* collating symbol */ /* Calculate the offset of the equivalence_classes, which is next to collating_symbols. */ offset = laststart[1] + laststart[2] + 6; /* Insert space and write the collationg_symbol and \0. */ insert_space(datasize, laststart + offset, b-1); for (i = 0 ; i < datasize ; i++) laststart[offset + i] = str[i]; /* In re_match_2_internal if range_start < -1, we assume -range_start is the offset of the collating symbol which is specified as the character of the range start. So we assign -(laststart[1] + laststart[2] + 6) to range_start. */ range_start = -(laststart[1] + laststart[2] + 6); /* Update the length of collating_symbol. */ laststart[2] += datasize; had_char_class = false; } } else { c1++; while (c1--) PATUNFETCH; BUF_PUSH ('['); BUF_PUSH (delim); laststart[5] += 2; /* Update the length of characters */ range_start = delim; had_char_class = false; } } else { had_char_class = false; BUF_PUSH(c); laststart[5]++; /* Update the length of characters */ range_start = c; } } #else /* BYTE */ /* Ensure that we have enough space to push a charset: the opcode, the length count, and the bitset; 34 bytes in all. */ GET_BUFFER_SPACE (34); laststart = b; /* We test `*p == '^' twice, instead of using an if statement, so we only need one BUF_PUSH. */ BUF_PUSH (*p == '^' ? charset_not : charset); if (*p == '^') p++; /* Remember the first position in the bracket expression. */ p1 = p; /* Push the number of bytes in the bitmap. */ BUF_PUSH ((1 << BYTEWIDTH) / BYTEWIDTH); /* Clear the whole map. */ bzero (b, (1 << BYTEWIDTH) / BYTEWIDTH); /* charset_not matches newline according to a syntax bit. */ if ((re_opcode_t) b[-2] == charset_not && (syntax & RE_HAT_LISTS_NOT_NEWLINE)) SET_LIST_BIT ('\n'); /* Read in characters and ranges, setting map bits. */ for (;;) { if (p == pend) FREE_STACK_RETURN (REG_EBRACK); PATFETCH (c); /* \ might escape characters inside [...] and [^...]. */ if ((syntax & RE_BACKSLASH_ESCAPE_IN_LISTS) && c == '\\') { if (p == pend) FREE_STACK_RETURN (REG_EESCAPE); PATFETCH (c1); SET_LIST_BIT (c1); range_start = c1; continue; } /* Could be the end of the bracket expression. If it's not (i.e., when the bracket expression is `[]' so far), the ']' character bit gets set way below. */ if (c == ']' && p != p1 + 1) break; /* Look ahead to see if it's a range when the last thing was a character class. */ if (had_char_class && c == '-' && *p != ']') FREE_STACK_RETURN (REG_ERANGE); /* Look ahead to see if it's a range when the last thing was a character: if this is a hyphen not at the beginning or the end of a list, then it's the range operator. */ if (c == '-' && !(p - 2 >= pattern && p[-2] == '[') && !(p - 3 >= pattern && p[-3] == '[' && p[-2] == '^') && *p != ']') { reg_errcode_t ret = byte_compile_range (range_start, &p, pend, translate, syntax, b); if (ret != REG_NOERROR) FREE_STACK_RETURN (ret); range_start = 0xffffffff; } else if (p[0] == '-' && p[1] != ']') { /* This handles ranges made up of characters only. */ reg_errcode_t ret; /* Move past the `-'. */ PATFETCH (c1); ret = byte_compile_range (c, &p, pend, translate, syntax, b); if (ret != REG_NOERROR) FREE_STACK_RETURN (ret); range_start = 0xffffffff; } /* See if we're at the beginning of a possible character class. */ else if (syntax & RE_CHAR_CLASSES && c == '[' && *p == ':') { /* Leave room for the null. */ char str[CHAR_CLASS_MAX_LENGTH + 1]; PATFETCH (c); c1 = 0; /* If pattern is `[[:'. */ if (p == pend) FREE_STACK_RETURN (REG_EBRACK); for (;;) { PATFETCH (c); if ((c == ':' && *p == ']') || p == pend) break; if (c1 < CHAR_CLASS_MAX_LENGTH) str[c1++] = c; else /* This is in any case an invalid class name. */ str[0] = '\0'; } str[c1] = '\0'; /* If isn't a word bracketed by `[:' and `:]': undo the ending character, the letters, and leave the leading `:' and `[' (but set bits for them). */ if (c == ':' && *p == ']') { # if defined _LIBC || WIDE_CHAR_SUPPORT boolean is_lower = STREQ (str, "lower"); boolean is_upper = STREQ (str, "upper"); wctype_t wt; int ch; wt = IS_CHAR_CLASS (str); if (wt == 0) FREE_STACK_RETURN (REG_ECTYPE); /* Throw away the ] at the end of the character class. */ PATFETCH (c); if (p == pend) FREE_STACK_RETURN (REG_EBRACK); for (ch = 0; ch < 1 << BYTEWIDTH; ++ch) { # ifdef _LIBC if (__iswctype (__btowc (ch), wt)) SET_LIST_BIT (ch); # else if (iswctype (btowc (ch), wt)) SET_LIST_BIT (ch); # endif if (translate && (is_upper || is_lower) && (ISUPPER (ch) || ISLOWER (ch))) SET_LIST_BIT (ch); } had_char_class = true; # else int ch; boolean is_alnum = STREQ (str, "alnum"); boolean is_alpha = STREQ (str, "alpha"); boolean is_blank = STREQ (str, "blank"); boolean is_cntrl = STREQ (str, "cntrl"); boolean is_digit = STREQ (str, "digit"); boolean is_graph = STREQ (str, "graph"); boolean is_lower = STREQ (str, "lower"); boolean is_print = STREQ (str, "print"); boolean is_punct = STREQ (str, "punct"); boolean is_space = STREQ (str, "space"); boolean is_upper = STREQ (str, "upper"); boolean is_xdigit = STREQ (str, "xdigit"); if (!IS_CHAR_CLASS (str)) FREE_STACK_RETURN (REG_ECTYPE); /* Throw away the ] at the end of the character class. */ PATFETCH (c); if (p == pend) FREE_STACK_RETURN (REG_EBRACK); for (ch = 0; ch < 1 << BYTEWIDTH; ch++) { /* This was split into 3 if's to avoid an arbitrary limit in some compiler. */ if ( (is_alnum && ISALNUM (ch)) || (is_alpha && ISALPHA (ch)) || (is_blank && ISBLANK (ch)) || (is_cntrl && ISCNTRL (ch))) SET_LIST_BIT (ch); if ( (is_digit && ISDIGIT (ch)) || (is_graph && ISGRAPH (ch)) || (is_lower && ISLOWER (ch)) || (is_print && ISPRINT (ch))) SET_LIST_BIT (ch); if ( (is_punct && ISPUNCT (ch)) || (is_space && ISSPACE (ch)) || (is_upper && ISUPPER (ch)) || (is_xdigit && ISXDIGIT (ch))) SET_LIST_BIT (ch); if ( translate && (is_upper || is_lower) && (ISUPPER (ch) || ISLOWER (ch))) SET_LIST_BIT (ch); } had_char_class = true; # endif /* libc || wctype.h */ } else { c1++; while (c1--) PATUNFETCH; SET_LIST_BIT ('['); SET_LIST_BIT (':'); range_start = ':'; had_char_class = false; } } else if (syntax & RE_CHAR_CLASSES && c == '[' && *p == '=') { unsigned char str[MB_LEN_MAX + 1]; # ifdef _LIBC uint32_t nrules = _NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_NRULES); # endif PATFETCH (c); c1 = 0; /* If pattern is `[[='. */ if (p == pend) FREE_STACK_RETURN (REG_EBRACK); for (;;) { PATFETCH (c); if ((c == '=' && *p == ']') || p == pend) break; if (c1 < MB_LEN_MAX) str[c1++] = c; else /* This is in any case an invalid class name. */ str[0] = '\0'; } str[c1] = '\0'; if (c == '=' && *p == ']' && str[0] != '\0') { /* If we have no collation data we use the default collation in which each character is in a class by itself. It also means that ASCII is the character set and therefore we cannot have character with more than one byte in the multibyte representation. */ # ifdef _LIBC if (nrules == 0) # endif { if (c1 != 1) FREE_STACK_RETURN (REG_ECOLLATE); /* Throw away the ] at the end of the equivalence class. */ PATFETCH (c); /* Set the bit for the character. */ SET_LIST_BIT (str[0]); } # ifdef _LIBC else { /* Try to match the byte sequence in `str' against those known to the collate implementation. First find out whether the bytes in `str' are actually from exactly one character. */ const int32_t *table; const unsigned char *weights; const unsigned char *extra; const int32_t *indirect; int32_t idx; const unsigned char *cp = str; int ch; /* This #include defines a local function! */ table = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_TABLEMB); weights = (const unsigned char *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_WEIGHTMB); extra = (const unsigned char *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_EXTRAMB); indirect = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_INDIRECTMB); idx = findidx (&cp); if (idx == 0 || cp < str + c1) /* This is no valid character. */ FREE_STACK_RETURN (REG_ECOLLATE); /* Throw away the ] at the end of the equivalence class. */ PATFETCH (c); /* Now we have to go throught the whole table and find all characters which have the same first level weight. XXX Note that this is not entirely correct. we would have to match multibyte sequences but this is not possible with the current implementation. */ for (ch = 1; ch < 256; ++ch) /* XXX This test would have to be changed if we would allow matching multibyte sequences. */ if (table[ch] > 0) { int32_t idx2 = table[ch]; size_t len = weights[idx2]; /* Test whether the lenghts match. */ if (weights[idx] == len) { /* They do. New compare the bytes of the weight. */ size_t cnt = 0; while (cnt < len && (weights[idx + 1 + cnt] == weights[idx2 + 1 + cnt])) ++cnt; if (cnt == len) /* They match. Mark the character as acceptable. */ SET_LIST_BIT (ch); } } } # endif had_char_class = true; } else { c1++; while (c1--) PATUNFETCH; SET_LIST_BIT ('['); SET_LIST_BIT ('='); range_start = '='; had_char_class = false; } } else if (syntax & RE_CHAR_CLASSES && c == '[' && *p == '.') { unsigned char str[128]; /* Should be large enough. */ # ifdef _LIBC uint32_t nrules = _NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_NRULES); # endif PATFETCH (c); c1 = 0; /* If pattern is `[[.'. */ if (p == pend) FREE_STACK_RETURN (REG_EBRACK); for (;;) { PATFETCH (c); if ((c == '.' && *p == ']') || p == pend) break; if (c1 < sizeof (str)) str[c1++] = c; else /* This is in any case an invalid class name. */ str[0] = '\0'; } str[c1] = '\0'; if (c == '.' && *p == ']' && str[0] != '\0') { /* If we have no collation data we use the default collation in which each character is the name for its own class which contains only the one character. It also means that ASCII is the character set and therefore we cannot have character with more than one byte in the multibyte representation. */ # ifdef _LIBC if (nrules == 0) # endif { if (c1 != 1) FREE_STACK_RETURN (REG_ECOLLATE); /* Throw away the ] at the end of the equivalence class. */ PATFETCH (c); /* Set the bit for the character. */ SET_LIST_BIT (str[0]); range_start = ((const unsigned char *) str)[0]; } # ifdef _LIBC else { /* Try to match the byte sequence in `str' against those known to the collate implementation. First find out whether the bytes in `str' are actually from exactly one character. */ int32_t table_size; const int32_t *symb_table; const unsigned char *extra; int32_t idx; int32_t elem; int32_t second; int32_t hash; table_size = _NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_SYMB_HASH_SIZEMB); symb_table = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_SYMB_TABLEMB); extra = (const unsigned char *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_SYMB_EXTRAMB); /* Locate the character in the hashing table. */ hash = elem_hash (str, c1); idx = 0; elem = hash % table_size; second = hash % (table_size - 2); while (symb_table[2 * elem] != 0) { /* First compare the hashing value. */ if (symb_table[2 * elem] == hash && c1 == extra[symb_table[2 * elem + 1]] && memcmp (str, &extra[symb_table[2 * elem + 1] + 1], c1) == 0) { /* Yep, this is the entry. */ idx = symb_table[2 * elem + 1]; idx += 1 + extra[idx]; break; } /* Next entry. */ elem += second; } if (symb_table[2 * elem] == 0) /* This is no valid character. */ FREE_STACK_RETURN (REG_ECOLLATE); /* Throw away the ] at the end of the equivalence class. */ PATFETCH (c); /* Now add the multibyte character(s) we found to the accept list. XXX Note that this is not entirely correct. we would have to match multibyte sequences but this is not possible with the current implementation. Also, we have to match collating symbols, which expand to more than one file, as a whole and not allow the individual bytes. */ c1 = extra[idx++]; if (c1 == 1) range_start = extra[idx]; while (c1-- > 0) { SET_LIST_BIT (extra[idx]); ++idx; } } # endif had_char_class = false; } else { c1++; while (c1--) PATUNFETCH; SET_LIST_BIT ('['); SET_LIST_BIT ('.'); range_start = '.'; had_char_class = false; } } else { had_char_class = false; SET_LIST_BIT (c); range_start = c; } } /* Discard any (non)matching list bytes that are all 0 at the end of the map. Decrease the map-length byte too. */ while ((int) b[-1] > 0 && b[b[-1] - 1] == 0) b[-1]--; b += b[-1]; #endif /* WCHAR */ } break; case '(': if (syntax & RE_NO_BK_PARENS) goto handle_open; else goto normal_char; case ')': if (syntax & RE_NO_BK_PARENS) goto handle_close; else goto normal_char; case '\n': if (syntax & RE_NEWLINE_ALT) goto handle_alt; else goto normal_char; case '|': if (syntax & RE_NO_BK_VBAR) goto handle_alt; else goto normal_char; case '{': if (syntax & RE_INTERVALS && syntax & RE_NO_BK_BRACES) goto handle_interval; else goto normal_char; case '\\': if (p == pend) FREE_STACK_RETURN (REG_EESCAPE); /* Do not translate the character after the \, so that we can distinguish, e.g., \B from \b, even if we normally would translate, e.g., B to b. */ PATFETCH_RAW (c); switch (c) { case '(': if (syntax & RE_NO_BK_PARENS) goto normal_backslash; handle_open: bufp->re_nsub++; regnum++; if (COMPILE_STACK_FULL) { RETALLOC (compile_stack.stack, compile_stack.size << 1, compile_stack_elt_t); if (compile_stack.stack == NULL) return REG_ESPACE; compile_stack.size <<= 1; } /* These are the values to restore when we hit end of this group. They are all relative offsets, so that if the whole pattern moves because of realloc, they will still be valid. */ COMPILE_STACK_TOP.begalt_offset = begalt - COMPILED_BUFFER_VAR; COMPILE_STACK_TOP.fixup_alt_jump = fixup_alt_jump ? fixup_alt_jump - COMPILED_BUFFER_VAR + 1 : 0; COMPILE_STACK_TOP.laststart_offset = b - COMPILED_BUFFER_VAR; COMPILE_STACK_TOP.regnum = regnum; /* We will eventually replace the 0 with the number of groups inner to this one. But do not push a start_memory for groups beyond the last one we can represent in the compiled pattern. */ if (regnum <= MAX_REGNUM) { COMPILE_STACK_TOP.inner_group_offset = b - COMPILED_BUFFER_VAR + 2; BUF_PUSH_3 (start_memory, regnum, 0); } compile_stack.avail++; fixup_alt_jump = 0; laststart = 0; begalt = b; /* If we've reached MAX_REGNUM groups, then this open won't actually generate any code, so we'll have to clear pending_exact explicitly. */ pending_exact = 0; break; case ')': if (syntax & RE_NO_BK_PARENS) goto normal_backslash; if (COMPILE_STACK_EMPTY) { if (syntax & RE_UNMATCHED_RIGHT_PAREN_ORD) goto normal_backslash; else FREE_STACK_RETURN (REG_ERPAREN); } handle_close: if (fixup_alt_jump) { /* Push a dummy failure point at the end of the alternative for a possible future `pop_failure_jump' to pop. See comments at `push_dummy_failure' in `re_match_2'. */ BUF_PUSH (push_dummy_failure); /* We allocated space for this jump when we assigned to `fixup_alt_jump', in the `handle_alt' case below. */ STORE_JUMP (jump_past_alt, fixup_alt_jump, b - 1); } /* See similar code for backslashed left paren above. */ if (COMPILE_STACK_EMPTY) { if (syntax & RE_UNMATCHED_RIGHT_PAREN_ORD) goto normal_char; else FREE_STACK_RETURN (REG_ERPAREN); } /* Since we just checked for an empty stack above, this ``can't happen''. */ assert (compile_stack.avail != 0); { /* We don't just want to restore into `regnum', because later groups should continue to be numbered higher, as in `(ab)c(de)' -- the second group is #2. */ regnum_t this_group_regnum; compile_stack.avail--; begalt = COMPILED_BUFFER_VAR + COMPILE_STACK_TOP.begalt_offset; fixup_alt_jump = COMPILE_STACK_TOP.fixup_alt_jump ? COMPILED_BUFFER_VAR + COMPILE_STACK_TOP.fixup_alt_jump - 1 : 0; laststart = COMPILED_BUFFER_VAR + COMPILE_STACK_TOP.laststart_offset; this_group_regnum = COMPILE_STACK_TOP.regnum; /* If we've reached MAX_REGNUM groups, then this open won't actually generate any code, so we'll have to clear pending_exact explicitly. */ pending_exact = 0; /* We're at the end of the group, so now we know how many groups were inside this one. */ if (this_group_regnum <= MAX_REGNUM) { UCHAR_T *inner_group_loc = COMPILED_BUFFER_VAR + COMPILE_STACK_TOP.inner_group_offset; *inner_group_loc = regnum - this_group_regnum; BUF_PUSH_3 (stop_memory, this_group_regnum, regnum - this_group_regnum); } } break; case '|': /* `\|'. */ if (syntax & RE_LIMITED_OPS || syntax & RE_NO_BK_VBAR) goto normal_backslash; handle_alt: if (syntax & RE_LIMITED_OPS) goto normal_char; /* Insert before the previous alternative a jump which jumps to this alternative if the former fails. */ GET_BUFFER_SPACE (1 + OFFSET_ADDRESS_SIZE); INSERT_JUMP (on_failure_jump, begalt, b + 2 + 2 * OFFSET_ADDRESS_SIZE); pending_exact = 0; b += 1 + OFFSET_ADDRESS_SIZE; /* The alternative before this one has a jump after it which gets executed if it gets matched. Adjust that jump so it will jump to this alternative's analogous jump (put in below, which in turn will jump to the next (if any) alternative's such jump, etc.). The last such jump jumps to the correct final destination. A picture: _____ _____ | | | | | v | v a | b | c If we are at `b', then fixup_alt_jump right now points to a three-byte space after `a'. We'll put in the jump, set fixup_alt_jump to right after `b', and leave behind three bytes which we'll fill in when we get to after `c'. */ if (fixup_alt_jump) STORE_JUMP (jump_past_alt, fixup_alt_jump, b); /* Mark and leave space for a jump after this alternative, to be filled in later either by next alternative or when know we're at the end of a series of alternatives. */ fixup_alt_jump = b; GET_BUFFER_SPACE (1 + OFFSET_ADDRESS_SIZE); b += 1 + OFFSET_ADDRESS_SIZE; laststart = 0; begalt = b; break; case '{': /* If \{ is a literal. */ if (!(syntax & RE_INTERVALS) /* If we're at `\{' and it's not the open-interval operator. */ || (syntax & RE_NO_BK_BRACES)) goto normal_backslash; handle_interval: { /* If got here, then the syntax allows intervals. */ /* At least (most) this many matches must be made. */ int lower_bound = -1, upper_bound = -1; /* Place in the uncompiled pattern (i.e., just after the '{') to go back to if the interval is invalid. */ const CHAR_T *beg_interval = p; if (p == pend) goto invalid_interval; GET_UNSIGNED_NUMBER (lower_bound); if (c == ',') { GET_UNSIGNED_NUMBER (upper_bound); if (upper_bound < 0) upper_bound = RE_DUP_MAX; } else /* Interval such as `{1}' => match exactly once. */ upper_bound = lower_bound; if (! (0 <= lower_bound && lower_bound <= upper_bound)) goto invalid_interval; if (!(syntax & RE_NO_BK_BRACES)) { if (c != '\\' || p == pend) goto invalid_interval; PATFETCH (c); } if (c != '}') goto invalid_interval; /* If it's invalid to have no preceding re. */ if (!laststart) { if (syntax & RE_CONTEXT_INVALID_OPS && !(syntax & RE_INVALID_INTERVAL_ORD)) FREE_STACK_RETURN (REG_BADRPT); else if (syntax & RE_CONTEXT_INDEP_OPS) laststart = b; else goto unfetch_interval; } /* We just parsed a valid interval. */ if (RE_DUP_MAX < upper_bound) FREE_STACK_RETURN (REG_BADBR); /* If the upper bound is zero, don't want to succeed at all; jump from `laststart' to `b + 3', which will be the end of the buffer after we insert the jump. */ /* ifdef WCHAR, 'b + 1 + OFFSET_ADDRESS_SIZE' instead of 'b + 3'. */ if (upper_bound == 0) { GET_BUFFER_SPACE (1 + OFFSET_ADDRESS_SIZE); INSERT_JUMP (jump, laststart, b + 1 + OFFSET_ADDRESS_SIZE); b += 1 + OFFSET_ADDRESS_SIZE; } /* Otherwise, we have a nontrivial interval. When we're all done, the pattern will look like: set_number_at set_number_at succeed_n jump_n (The upper bound and `jump_n' are omitted if `upper_bound' is 1, though.) */ else { /* If the upper bound is > 1, we need to insert more at the end of the loop. */ unsigned nbytes = 2 + 4 * OFFSET_ADDRESS_SIZE + (upper_bound > 1) * (2 + 4 * OFFSET_ADDRESS_SIZE); GET_BUFFER_SPACE (nbytes); /* Initialize lower bound of the `succeed_n', even though it will be set during matching by its attendant `set_number_at' (inserted next), because `re_compile_fastmap' needs to know. Jump to the `jump_n' we might insert below. */ INSERT_JUMP2 (succeed_n, laststart, b + 1 + 2 * OFFSET_ADDRESS_SIZE + (upper_bound > 1) * (1 + 2 * OFFSET_ADDRESS_SIZE) , lower_bound); b += 1 + 2 * OFFSET_ADDRESS_SIZE; /* Code to initialize the lower bound. Insert before the `succeed_n'. The `5' is the last two bytes of this `set_number_at', plus 3 bytes of the following `succeed_n'. */ /* ifdef WCHAR, The '1+2*OFFSET_ADDRESS_SIZE' is the 'set_number_at', plus '1+OFFSET_ADDRESS_SIZE' of the following `succeed_n'. */ PREFIX(insert_op2) (set_number_at, laststart, 1 + 2 * OFFSET_ADDRESS_SIZE, lower_bound, b); b += 1 + 2 * OFFSET_ADDRESS_SIZE; if (upper_bound > 1) { /* More than one repetition is allowed, so append a backward jump to the `succeed_n' that starts this interval. When we've reached this during matching, we'll have matched the interval once, so jump back only `upper_bound - 1' times. */ STORE_JUMP2 (jump_n, b, laststart + 2 * OFFSET_ADDRESS_SIZE + 1, upper_bound - 1); b += 1 + 2 * OFFSET_ADDRESS_SIZE; /* The location we want to set is the second parameter of the `jump_n'; that is `b-2' as an absolute address. `laststart' will be the `set_number_at' we're about to insert; `laststart+3' the number to set, the source for the relative address. But we are inserting into the middle of the pattern -- so everything is getting moved up by 5. Conclusion: (b - 2) - (laststart + 3) + 5, i.e., b - laststart. We insert this at the beginning of the loop so that if we fail during matching, we'll reinitialize the bounds. */ PREFIX(insert_op2) (set_number_at, laststart, b - laststart, upper_bound - 1, b); b += 1 + 2 * OFFSET_ADDRESS_SIZE; } } pending_exact = 0; break; invalid_interval: if (!(syntax & RE_INVALID_INTERVAL_ORD)) FREE_STACK_RETURN (p == pend ? REG_EBRACE : REG_BADBR); unfetch_interval: /* Match the characters as literals. */ p = beg_interval; c = '{'; if (syntax & RE_NO_BK_BRACES) goto normal_char; else goto normal_backslash; } #ifdef emacs /* There is no way to specify the before_dot and after_dot operators. rms says this is ok. --karl */ case '=': BUF_PUSH (at_dot); break; case 's': laststart = b; PATFETCH (c); BUF_PUSH_2 (syntaxspec, syntax_spec_code[c]); break; case 'S': laststart = b; PATFETCH (c); BUF_PUSH_2 (notsyntaxspec, syntax_spec_code[c]); break; #endif /* emacs */ case 'w': if (syntax & RE_NO_GNU_OPS) goto normal_char; laststart = b; BUF_PUSH (wordchar); break; case 'W': if (syntax & RE_NO_GNU_OPS) goto normal_char; laststart = b; BUF_PUSH (notwordchar); break; case '<': if (syntax & RE_NO_GNU_OPS) goto normal_char; BUF_PUSH (wordbeg); break; case '>': if (syntax & RE_NO_GNU_OPS) goto normal_char; BUF_PUSH (wordend); break; case 'b': if (syntax & RE_NO_GNU_OPS) goto normal_char; BUF_PUSH (wordbound); break; case 'B': if (syntax & RE_NO_GNU_OPS) goto normal_char; BUF_PUSH (notwordbound); break; case '`': if (syntax & RE_NO_GNU_OPS) goto normal_char; BUF_PUSH (begbuf); break; case '\'': if (syntax & RE_NO_GNU_OPS) goto normal_char; BUF_PUSH (endbuf); break; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': if (syntax & RE_NO_BK_REFS) goto normal_char; c1 = c - '0'; if (c1 > regnum) FREE_STACK_RETURN (REG_ESUBREG); /* Can't back reference to a subexpression if inside of it. */ if (group_in_compile_stack (compile_stack, (regnum_t) c1)) goto normal_char; laststart = b; BUF_PUSH_2 (duplicate, c1); break; case '+': case '?': if (syntax & RE_BK_PLUS_QM) goto handle_plus; else goto normal_backslash; default: normal_backslash: /* You might think it would be useful for \ to mean not to translate; but if we don't translate it it will never match anything. */ c = TRANSLATE (c); goto normal_char; } break; default: /* Expects the character in `c'. */ normal_char: /* If no exactn currently being built. */ if (!pending_exact #ifdef WCHAR /* If last exactn handle binary(or character) and new exactn handle character(or binary). */ || is_exactn_bin != is_binary[p - 1 - pattern] #endif /* WCHAR */ /* If last exactn not at current position. */ || pending_exact + *pending_exact + 1 != b /* We have only one byte following the exactn for the count. */ || *pending_exact == (1 << BYTEWIDTH) - 1 /* If followed by a repetition operator. */ || *p == '*' || *p == '^' || ((syntax & RE_BK_PLUS_QM) ? *p == '\\' && (p[1] == '+' || p[1] == '?') : (*p == '+' || *p == '?')) || ((syntax & RE_INTERVALS) && ((syntax & RE_NO_BK_BRACES) ? *p == '{' : (p[0] == '\\' && p[1] == '{')))) { /* Start building a new exactn. */ laststart = b; #ifdef WCHAR /* Is this exactn binary data or character? */ is_exactn_bin = is_binary[p - 1 - pattern]; if (is_exactn_bin) BUF_PUSH_2 (exactn_bin, 0); else BUF_PUSH_2 (exactn, 0); #else BUF_PUSH_2 (exactn, 0); #endif /* WCHAR */ pending_exact = b - 1; } BUF_PUSH (c); (*pending_exact)++; break; } /* switch (c) */ } /* while p != pend */ /* Through the pattern now. */ if (fixup_alt_jump) STORE_JUMP (jump_past_alt, fixup_alt_jump, b); if (!COMPILE_STACK_EMPTY) FREE_STACK_RETURN (REG_EPAREN); /* If we don't want backtracking, force success the first time we reach the end of the compiled pattern. */ if (syntax & RE_NO_POSIX_BACKTRACKING) BUF_PUSH (succeed); #ifdef WCHAR free (pattern); free (mbs_offset); free (is_binary); #endif free (compile_stack.stack); /* We have succeeded; set the length of the buffer. */ #ifdef WCHAR bufp->used = (uintptr_t) b - (uintptr_t) COMPILED_BUFFER_VAR; #else bufp->used = b - bufp->buffer; #endif #ifdef DEBUG if (debug) { DEBUG_PRINT1 ("\nCompiled pattern: \n"); PREFIX(print_compiled_pattern) (bufp); } #endif /* DEBUG */ #ifndef MATCH_MAY_ALLOCATE /* Initialize the failure stack to the largest possible stack. This isn't necessary unless we're trying to avoid calling alloca in the search and match routines. */ { int num_regs = bufp->re_nsub + 1; /* Since DOUBLE_FAIL_STACK refuses to double only if the current size is strictly greater than re_max_failures, the largest possible stack is 2 * re_max_failures failure points. */ if (fail_stack.size < (2 * re_max_failures * MAX_FAILURE_ITEMS)) { fail_stack.size = (2 * re_max_failures * MAX_FAILURE_ITEMS); # ifdef emacs if (! fail_stack.stack) fail_stack.stack = (PREFIX(fail_stack_elt_t) *) xmalloc (fail_stack.size * sizeof (PREFIX(fail_stack_elt_t))); else fail_stack.stack = (PREFIX(fail_stack_elt_t) *) xrealloc (fail_stack.stack, (fail_stack.size * sizeof (PREFIX(fail_stack_elt_t)))); # else /* not emacs */ if (! fail_stack.stack) fail_stack.stack = (PREFIX(fail_stack_elt_t) *) malloc (fail_stack.size * sizeof (PREFIX(fail_stack_elt_t))); else fail_stack.stack = (PREFIX(fail_stack_elt_t) *) realloc (fail_stack.stack, (fail_stack.size * sizeof (PREFIX(fail_stack_elt_t)))); # endif /* not emacs */ } PREFIX(regex_grow_registers) (num_regs); } #endif /* not MATCH_MAY_ALLOCATE */ return REG_NOERROR; } /* regex_compile */ /* Subroutines for `regex_compile'. */ /* Store OP at LOC followed by two-byte integer parameter ARG. */ /* ifdef WCHAR, integer parameter is 1 wchar_t. */ static void PREFIX(store_op1) (op, loc, arg) re_opcode_t op; UCHAR_T *loc; int arg; { *loc = (UCHAR_T) op; STORE_NUMBER (loc + 1, arg); } /* Like `store_op1', but for two two-byte parameters ARG1 and ARG2. */ /* ifdef WCHAR, integer parameter is 1 wchar_t. */ static void PREFIX(store_op2) (op, loc, arg1, arg2) re_opcode_t op; UCHAR_T *loc; int arg1, arg2; { *loc = (UCHAR_T) op; STORE_NUMBER (loc + 1, arg1); STORE_NUMBER (loc + 1 + OFFSET_ADDRESS_SIZE, arg2); } /* Copy the bytes from LOC to END to open up three bytes of space at LOC for OP followed by two-byte integer parameter ARG. */ /* ifdef WCHAR, integer parameter is 1 wchar_t. */ static void PREFIX(insert_op1) (op, loc, arg, end) re_opcode_t op; UCHAR_T *loc; int arg; UCHAR_T *end; { register UCHAR_T *pfrom = end; register UCHAR_T *pto = end + 1 + OFFSET_ADDRESS_SIZE; while (pfrom != loc) *--pto = *--pfrom; PREFIX(store_op1) (op, loc, arg); } /* Like `insert_op1', but for two two-byte parameters ARG1 and ARG2. */ /* ifdef WCHAR, integer parameter is 1 wchar_t. */ static void PREFIX(insert_op2) (op, loc, arg1, arg2, end) re_opcode_t op; UCHAR_T *loc; int arg1, arg2; UCHAR_T *end; { register UCHAR_T *pfrom = end; register UCHAR_T *pto = end + 1 + 2 * OFFSET_ADDRESS_SIZE; while (pfrom != loc) *--pto = *--pfrom; PREFIX(store_op2) (op, loc, arg1, arg2); } /* P points to just after a ^ in PATTERN. Return true if that ^ comes after an alternative or a begin-subexpression. We assume there is at least one character before the ^. */ static boolean PREFIX(at_begline_loc_p) (pattern, p, syntax) const CHAR_T *pattern, *p; reg_syntax_t syntax; { const CHAR_T *prev = p - 2; boolean prev_prev_backslash = prev > pattern && prev[-1] == '\\'; return /* After a subexpression? */ (*prev == '(' && (syntax & RE_NO_BK_PARENS || prev_prev_backslash)) /* After an alternative? */ || (*prev == '|' && (syntax & RE_NO_BK_VBAR || prev_prev_backslash)); } /* The dual of at_begline_loc_p. This one is for $. We assume there is at least one character after the $, i.e., `P < PEND'. */ static boolean PREFIX(at_endline_loc_p) (p, pend, syntax) const CHAR_T *p, *pend; reg_syntax_t syntax; { const CHAR_T *next = p; boolean next_backslash = *next == '\\'; const CHAR_T *next_next = p + 1 < pend ? p + 1 : 0; return /* Before a subexpression? */ (syntax & RE_NO_BK_PARENS ? *next == ')' : next_backslash && next_next && *next_next == ')') /* Before an alternative? */ || (syntax & RE_NO_BK_VBAR ? *next == '|' : next_backslash && next_next && *next_next == '|'); } #else /* not INSIDE_RECURSION */ /* Returns true if REGNUM is in one of COMPILE_STACK's elements and false if it's not. */ static boolean group_in_compile_stack (compile_stack, regnum) compile_stack_type compile_stack; regnum_t regnum; { int this_element; for (this_element = compile_stack.avail - 1; this_element >= 0; this_element--) if (compile_stack.stack[this_element].regnum == regnum) return true; return false; } #endif /* not INSIDE_RECURSION */ #ifdef INSIDE_RECURSION #ifdef WCHAR /* This insert space, which size is "num", into the pattern at "loc". "end" must point the end of the allocated buffer. */ static void insert_space (num, loc, end) int num; CHAR_T *loc; CHAR_T *end; { register CHAR_T *pto = end; register CHAR_T *pfrom = end - num; while (pfrom >= loc) *pto-- = *pfrom--; } #endif /* WCHAR */ #ifdef WCHAR static reg_errcode_t wcs_compile_range (range_start_char, p_ptr, pend, translate, syntax, b, char_set) CHAR_T range_start_char; const CHAR_T **p_ptr, *pend; CHAR_T *char_set, *b; RE_TRANSLATE_TYPE translate; reg_syntax_t syntax; { const CHAR_T *p = *p_ptr; CHAR_T range_start, range_end; reg_errcode_t ret; # ifdef _LIBC uint32_t nrules; uint32_t start_val, end_val; # endif if (p == pend) return REG_ERANGE; # ifdef _LIBC nrules = _NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_NRULES); if (nrules != 0) { const char *collseq = (const char *) _NL_CURRENT(LC_COLLATE, _NL_COLLATE_COLLSEQWC); const unsigned char *extra = (const unsigned char *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_SYMB_EXTRAMB); if (range_start_char < -1) { /* range_start is a collating symbol. */ int32_t *wextra; /* Retreive the index and get collation sequence value. */ wextra = (int32_t*)(extra + char_set[-range_start_char]); start_val = wextra[1 + *wextra]; } else start_val = collseq_table_lookup(collseq, TRANSLATE(range_start_char)); end_val = collseq_table_lookup (collseq, TRANSLATE (p[0])); /* Report an error if the range is empty and the syntax prohibits this. */ ret = ((syntax & RE_NO_EMPTY_RANGES) && (start_val > end_val))? REG_ERANGE : REG_NOERROR; /* Insert space to the end of the char_ranges. */ insert_space(2, b - char_set[5] - 2, b - 1); *(b - char_set[5] - 2) = (wchar_t)start_val; *(b - char_set[5] - 1) = (wchar_t)end_val; char_set[4]++; /* ranges_index */ } else # endif { range_start = (range_start_char >= 0)? TRANSLATE (range_start_char): range_start_char; range_end = TRANSLATE (p[0]); /* Report an error if the range is empty and the syntax prohibits this. */ ret = ((syntax & RE_NO_EMPTY_RANGES) && (range_start > range_end))? REG_ERANGE : REG_NOERROR; /* Insert space to the end of the char_ranges. */ insert_space(2, b - char_set[5] - 2, b - 1); *(b - char_set[5] - 2) = range_start; *(b - char_set[5] - 1) = range_end; char_set[4]++; /* ranges_index */ } /* Have to increment the pointer into the pattern string, so the caller isn't still at the ending character. */ (*p_ptr)++; return ret; } #else /* BYTE */ /* Read the ending character of a range (in a bracket expression) from the uncompiled pattern *P_PTR (which ends at PEND). We assume the starting character is in `P[-2]'. (`P[-1]' is the character `-'.) Then we set the translation of all bits between the starting and ending characters (inclusive) in the compiled pattern B. Return an error code. We use these short variable names so we can use the same macros as `regex_compile' itself. */ static reg_errcode_t byte_compile_range (range_start_char, p_ptr, pend, translate, syntax, b) unsigned int range_start_char; const char **p_ptr, *pend; RE_TRANSLATE_TYPE translate; reg_syntax_t syntax; unsigned char *b; { unsigned this_char; const char *p = *p_ptr; reg_errcode_t ret; # if _LIBC const unsigned char *collseq; unsigned int start_colseq; unsigned int end_colseq; # else unsigned end_char; # endif if (p == pend) return REG_ERANGE; /* Have to increment the pointer into the pattern string, so the caller isn't still at the ending character. */ (*p_ptr)++; /* Report an error if the range is empty and the syntax prohibits this. */ ret = syntax & RE_NO_EMPTY_RANGES ? REG_ERANGE : REG_NOERROR; # if _LIBC collseq = (const unsigned char *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_COLLSEQMB); start_colseq = collseq[(unsigned char) TRANSLATE (range_start_char)]; end_colseq = collseq[(unsigned char) TRANSLATE (p[0])]; for (this_char = 0; this_char <= (unsigned char) -1; ++this_char) { unsigned int this_colseq = collseq[(unsigned char) TRANSLATE (this_char)]; if (start_colseq <= this_colseq && this_colseq <= end_colseq) { SET_LIST_BIT (TRANSLATE (this_char)); ret = REG_NOERROR; } } # else /* Here we see why `this_char' has to be larger than an `unsigned char' -- we would otherwise go into an infinite loop, since all characters <= 0xff. */ range_start_char = TRANSLATE (range_start_char); /* TRANSLATE(p[0]) is casted to char (not unsigned char) in TRANSLATE, and some compilers cast it to int implicitly, so following for_loop may fall to (almost) infinite loop. e.g. If translate[p[0]] = 0xff, end_char may equals to 0xffffffff. To avoid this, we cast p[0] to unsigned int and truncate it. */ end_char = ((unsigned)TRANSLATE(p[0]) & ((1 << BYTEWIDTH) - 1)); for (this_char = range_start_char; this_char <= end_char; ++this_char) { SET_LIST_BIT (TRANSLATE (this_char)); ret = REG_NOERROR; } # endif return ret; } #endif /* WCHAR */ /* re_compile_fastmap computes a ``fastmap'' for the compiled pattern in BUFP. A fastmap records which of the (1 << BYTEWIDTH) possible characters can start a string that matches the pattern. This fastmap is used by re_search to skip quickly over impossible starting points. The caller must supply the address of a (1 << BYTEWIDTH)-byte data area as BUFP->fastmap. We set the `fastmap', `fastmap_accurate', and `can_be_null' fields in the pattern buffer. Returns 0 if we succeed, -2 if an internal error. */ #ifdef WCHAR /* local function for re_compile_fastmap. truncate wchar_t character to char. */ static unsigned char truncate_wchar (CHAR_T c); static unsigned char truncate_wchar (c) CHAR_T c; { unsigned char buf[MB_CUR_MAX]; mbstate_t state; int retval; memset (&state, '\0', sizeof (state)); # ifdef _LIBC retval = __wcrtomb (buf, c, &state); # else retval = wcrtomb (buf, c, &state); # endif return retval > 0 ? buf[0] : (unsigned char) c; } #endif /* WCHAR */ static int PREFIX(re_compile_fastmap) (bufp) struct re_pattern_buffer *bufp; { int j, k; #ifdef MATCH_MAY_ALLOCATE PREFIX(fail_stack_type) fail_stack; #endif #ifndef REGEX_MALLOC char *destination; #endif register char *fastmap = bufp->fastmap; #ifdef WCHAR /* We need to cast pattern to (wchar_t*), because we casted this compiled pattern to (char*) in regex_compile. */ UCHAR_T *pattern = (UCHAR_T*)bufp->buffer; register UCHAR_T *pend = (UCHAR_T*) (bufp->buffer + bufp->used); #else /* BYTE */ UCHAR_T *pattern = bufp->buffer; register UCHAR_T *pend = pattern + bufp->used; #endif /* WCHAR */ UCHAR_T *p = pattern; #ifdef REL_ALLOC /* This holds the pointer to the failure stack, when it is allocated relocatably. */ fail_stack_elt_t *failure_stack_ptr; #endif /* Assume that each path through the pattern can be null until proven otherwise. We set this false at the bottom of switch statement, to which we get only if a particular path doesn't match the empty string. */ boolean path_can_be_null = true; /* We aren't doing a `succeed_n' to begin with. */ boolean succeed_n_p = false; assert (fastmap != NULL && p != NULL); INIT_FAIL_STACK (); bzero (fastmap, 1 << BYTEWIDTH); /* Assume nothing's valid. */ bufp->fastmap_accurate = 1; /* It will be when we're done. */ bufp->can_be_null = 0; while (1) { if (p == pend || *p == (UCHAR_T) succeed) { /* We have reached the (effective) end of pattern. */ if (!FAIL_STACK_EMPTY ()) { bufp->can_be_null |= path_can_be_null; /* Reset for next path. */ path_can_be_null = true; p = fail_stack.stack[--fail_stack.avail].pointer; continue; } else break; } /* We should never be about to go beyond the end of the pattern. */ assert (p < pend); switch (SWITCH_ENUM_CAST ((re_opcode_t) *p++)) { /* I guess the idea here is to simply not bother with a fastmap if a backreference is used, since it's too hard to figure out the fastmap for the corresponding group. Setting `can_be_null' stops `re_search_2' from using the fastmap, so that is all we do. */ case duplicate: bufp->can_be_null = 1; goto done; /* Following are the cases which match a character. These end with `break'. */ #ifdef WCHAR case exactn: fastmap[truncate_wchar(p[1])] = 1; break; #else /* BYTE */ case exactn: fastmap[p[1]] = 1; break; #endif /* WCHAR */ #ifdef MBS_SUPPORT case exactn_bin: fastmap[p[1]] = 1; break; #endif #ifdef WCHAR /* It is hard to distinguish fastmap from (multi byte) characters which depends on current locale. */ case charset: case charset_not: case wordchar: case notwordchar: bufp->can_be_null = 1; goto done; #else /* BYTE */ case charset: for (j = *p++ * BYTEWIDTH - 1; j >= 0; j--) if (p[j / BYTEWIDTH] & (1 << (j % BYTEWIDTH))) fastmap[j] = 1; break; case charset_not: /* Chars beyond end of map must be allowed. */ for (j = *p * BYTEWIDTH; j < (1 << BYTEWIDTH); j++) fastmap[j] = 1; for (j = *p++ * BYTEWIDTH - 1; j >= 0; j--) if (!(p[j / BYTEWIDTH] & (1 << (j % BYTEWIDTH)))) fastmap[j] = 1; break; case wordchar: for (j = 0; j < (1 << BYTEWIDTH); j++) if (SYNTAX (j) == Sword) fastmap[j] = 1; break; case notwordchar: for (j = 0; j < (1 << BYTEWIDTH); j++) if (SYNTAX (j) != Sword) fastmap[j] = 1; break; #endif /* WCHAR */ case anychar: { int fastmap_newline = fastmap['\n']; /* `.' matches anything ... */ for (j = 0; j < (1 << BYTEWIDTH); j++) fastmap[j] = 1; /* ... except perhaps newline. */ if (!(bufp->syntax & RE_DOT_NEWLINE)) fastmap['\n'] = fastmap_newline; /* Return if we have already set `can_be_null'; if we have, then the fastmap is irrelevant. Something's wrong here. */ else if (bufp->can_be_null) goto done; /* Otherwise, have to check alternative paths. */ break; } #ifdef emacs case syntaxspec: k = *p++; for (j = 0; j < (1 << BYTEWIDTH); j++) if (SYNTAX (j) == (enum syntaxcode) k) fastmap[j] = 1; break; case notsyntaxspec: k = *p++; for (j = 0; j < (1 << BYTEWIDTH); j++) if (SYNTAX (j) != (enum syntaxcode) k) fastmap[j] = 1; break; /* All cases after this match the empty string. These end with `continue'. */ case before_dot: case at_dot: case after_dot: continue; #endif /* emacs */ case no_op: case begline: case endline: case begbuf: case endbuf: case wordbound: case notwordbound: case wordbeg: case wordend: case push_dummy_failure: continue; case jump_n: case pop_failure_jump: case maybe_pop_jump: case jump: case jump_past_alt: case dummy_failure_jump: EXTRACT_NUMBER_AND_INCR (j, p); p += j; if (j > 0) continue; /* Jump backward implies we just went through the body of a loop and matched nothing. Opcode jumped to should be `on_failure_jump' or `succeed_n'. Just treat it like an ordinary jump. For a * loop, it has pushed its failure point already; if so, discard that as redundant. */ if ((re_opcode_t) *p != on_failure_jump && (re_opcode_t) *p != succeed_n) continue; p++; EXTRACT_NUMBER_AND_INCR (j, p); p += j; /* If what's on the stack is where we are now, pop it. */ if (!FAIL_STACK_EMPTY () && fail_stack.stack[fail_stack.avail - 1].pointer == p) fail_stack.avail--; continue; case on_failure_jump: case on_failure_keep_string_jump: handle_on_failure_jump: EXTRACT_NUMBER_AND_INCR (j, p); /* For some patterns, e.g., `(a?)?', `p+j' here points to the end of the pattern. We don't want to push such a point, since when we restore it above, entering the switch will increment `p' past the end of the pattern. We don't need to push such a point since we obviously won't find any more fastmap entries beyond `pend'. Such a pattern can match the null string, though. */ if (p + j < pend) { if (!PUSH_PATTERN_OP (p + j, fail_stack)) { RESET_FAIL_STACK (); return -2; } } else bufp->can_be_null = 1; if (succeed_n_p) { EXTRACT_NUMBER_AND_INCR (k, p); /* Skip the n. */ succeed_n_p = false; } continue; case succeed_n: /* Get to the number of times to succeed. */ p += OFFSET_ADDRESS_SIZE; /* Increment p past the n for when k != 0. */ EXTRACT_NUMBER_AND_INCR (k, p); if (k == 0) { p -= 2 * OFFSET_ADDRESS_SIZE; succeed_n_p = true; /* Spaghetti code alert. */ goto handle_on_failure_jump; } continue; case set_number_at: p += 2 * OFFSET_ADDRESS_SIZE; continue; case start_memory: case stop_memory: p += 2; continue; default: abort (); /* We have listed all the cases. */ } /* switch *p++ */ /* Getting here means we have found the possible starting characters for one path of the pattern -- and that the empty string does not match. We need not follow this path further. Instead, look at the next alternative (remembered on the stack), or quit if no more. The test at the top of the loop does these things. */ path_can_be_null = false; p = pend; } /* while p */ /* Set `can_be_null' for the last path (also the first path, if the pattern is empty). */ bufp->can_be_null |= path_can_be_null; done: RESET_FAIL_STACK (); return 0; } #else /* not INSIDE_RECURSION */ int re_compile_fastmap (bufp) struct re_pattern_buffer *bufp; { # ifdef MBS_SUPPORT if (MB_CUR_MAX != 1) return wcs_re_compile_fastmap(bufp); else # endif return byte_re_compile_fastmap(bufp); } /* re_compile_fastmap */ #ifdef _LIBC weak_alias (__re_compile_fastmap, re_compile_fastmap) #endif /* Set REGS to hold NUM_REGS registers, storing them in STARTS and ENDS. Subsequent matches using PATTERN_BUFFER and REGS will use this memory for recording register information. STARTS and ENDS must be allocated using the malloc library routine, and must each be at least NUM_REGS * sizeof (regoff_t) bytes long. If NUM_REGS == 0, then subsequent matches should allocate their own register data. Unless this function is called, the first search or match using PATTERN_BUFFER will allocate its own register data, without freeing the old data. */ void re_set_registers (bufp, regs, num_regs, starts, ends) struct re_pattern_buffer *bufp; struct re_registers *regs; unsigned num_regs; regoff_t *starts, *ends; { if (num_regs) { bufp->regs_allocated = REGS_REALLOCATE; regs->num_regs = num_regs; regs->start = starts; regs->end = ends; } else { bufp->regs_allocated = REGS_UNALLOCATED; regs->num_regs = 0; regs->start = regs->end = (regoff_t *) 0; } } #ifdef _LIBC weak_alias (__re_set_registers, re_set_registers) #endif /* Searching routines. */ /* Like re_search_2, below, but only one string is specified, and doesn't let you say where to stop matching. */ int re_search (bufp, string, size, startpos, range, regs) struct re_pattern_buffer *bufp; const char *string; int size, startpos, range; struct re_registers *regs; { return re_search_2 (bufp, NULL, 0, string, size, startpos, range, regs, size); } #ifdef _LIBC weak_alias (__re_search, re_search) #endif /* Using the compiled pattern in BUFP->buffer, first tries to match the virtual concatenation of STRING1 and STRING2, starting first at index STARTPOS, then at STARTPOS + 1, and so on. STRING1 and STRING2 have length SIZE1 and SIZE2, respectively. RANGE is how far to scan while trying to match. RANGE = 0 means try only at STARTPOS; in general, the last start tried is STARTPOS + RANGE. In REGS, return the indices of the virtual concatenation of STRING1 and STRING2 that matched the entire BUFP->buffer and its contained subexpressions. Do not consider matching one past the index STOP in the virtual concatenation of STRING1 and STRING2. We return either the position in the strings at which the match was found, -1 if no match, or -2 if error (such as failure stack overflow). */ int re_search_2 (bufp, string1, size1, string2, size2, startpos, range, regs, stop) struct re_pattern_buffer *bufp; const char *string1, *string2; int size1, size2; int startpos; int range; struct re_registers *regs; int stop; { # ifdef MBS_SUPPORT if (MB_CUR_MAX != 1) return wcs_re_search_2 (bufp, string1, size1, string2, size2, startpos, range, regs, stop); else # endif return byte_re_search_2 (bufp, string1, size1, string2, size2, startpos, range, regs, stop); } /* re_search_2 */ #ifdef _LIBC weak_alias (__re_search_2, re_search_2) #endif #endif /* not INSIDE_RECURSION */ #ifdef INSIDE_RECURSION #ifdef MATCH_MAY_ALLOCATE # define FREE_VAR(var) if (var) REGEX_FREE (var); var = NULL #else # define FREE_VAR(var) if (var) free (var); var = NULL #endif #ifdef WCHAR # define MAX_ALLOCA_SIZE 2000 # define FREE_WCS_BUFFERS() \ do { \ if (size1 > MAX_ALLOCA_SIZE) \ { \ free (wcs_string1); \ free (mbs_offset1); \ } \ else \ { \ FREE_VAR (wcs_string1); \ FREE_VAR (mbs_offset1); \ } \ if (size2 > MAX_ALLOCA_SIZE) \ { \ free (wcs_string2); \ free (mbs_offset2); \ } \ else \ { \ FREE_VAR (wcs_string2); \ FREE_VAR (mbs_offset2); \ } \ } while (0) #endif static int PREFIX(re_search_2) (bufp, string1, size1, string2, size2, startpos, range, regs, stop) struct re_pattern_buffer *bufp; const char *string1, *string2; int size1, size2; int startpos; int range; struct re_registers *regs; int stop; { int val; register char *fastmap = bufp->fastmap; register RE_TRANSLATE_TYPE translate = bufp->translate; int total_size = size1 + size2; int endpos = startpos + range; #ifdef WCHAR /* We need wchar_t* buffers correspond to cstring1, cstring2. */ wchar_t *wcs_string1 = NULL, *wcs_string2 = NULL; /* We need the size of wchar_t buffers correspond to csize1, csize2. */ int wcs_size1 = 0, wcs_size2 = 0; /* offset buffer for optimizatoin. See convert_mbs_to_wc. */ int *mbs_offset1 = NULL, *mbs_offset2 = NULL; /* They hold whether each wchar_t is binary data or not. */ char *is_binary = NULL; #endif /* WCHAR */ /* Check for out-of-range STARTPOS. */ if (startpos < 0 || startpos > total_size) return -1; /* Fix up RANGE if it might eventually take us outside the virtual concatenation of STRING1 and STRING2. Make sure we won't move STARTPOS below 0 or above TOTAL_SIZE. */ if (endpos < 0) range = 0 - startpos; else if (endpos > total_size) range = total_size - startpos; /* If the search isn't to be a backwards one, don't waste time in a search for a pattern that must be anchored. */ if (bufp->used > 0 && range > 0 && ((re_opcode_t) bufp->buffer[0] == begbuf /* `begline' is like `begbuf' if it cannot match at newlines. */ || ((re_opcode_t) bufp->buffer[0] == begline && !bufp->newline_anchor))) { if (startpos > 0) return -1; else range = 1; } #ifdef emacs /* In a forward search for something that starts with \=. don't keep searching past point. */ if (bufp->used > 0 && (re_opcode_t) bufp->buffer[0] == at_dot && range > 0) { range = PT - startpos; if (range <= 0) return -1; } #endif /* emacs */ /* Update the fastmap now if not correct already. */ if (fastmap && !bufp->fastmap_accurate) if (re_compile_fastmap (bufp) == -2) return -2; #ifdef WCHAR /* Allocate wchar_t array for wcs_string1 and wcs_string2 and fill them with converted string. */ if (size1 != 0) { if (size1 > MAX_ALLOCA_SIZE) { wcs_string1 = TALLOC (size1 + 1, CHAR_T); mbs_offset1 = TALLOC (size1 + 1, int); is_binary = TALLOC (size1 + 1, char); } else { wcs_string1 = REGEX_TALLOC (size1 + 1, CHAR_T); mbs_offset1 = REGEX_TALLOC (size1 + 1, int); is_binary = REGEX_TALLOC (size1 + 1, char); } if (!wcs_string1 || !mbs_offset1 || !is_binary) { if (size1 > MAX_ALLOCA_SIZE) { free (wcs_string1); free (mbs_offset1); free (is_binary); } else { FREE_VAR (wcs_string1); FREE_VAR (mbs_offset1); FREE_VAR (is_binary); } return -2; } wcs_size1 = convert_mbs_to_wcs(wcs_string1, string1, size1, mbs_offset1, is_binary); wcs_string1[wcs_size1] = L'\0'; /* for a sentinel */ if (size1 > MAX_ALLOCA_SIZE) free (is_binary); else FREE_VAR (is_binary); } if (size2 != 0) { if (size2 > MAX_ALLOCA_SIZE) { wcs_string2 = TALLOC (size2 + 1, CHAR_T); mbs_offset2 = TALLOC (size2 + 1, int); is_binary = TALLOC (size2 + 1, char); } else { wcs_string2 = REGEX_TALLOC (size2 + 1, CHAR_T); mbs_offset2 = REGEX_TALLOC (size2 + 1, int); is_binary = REGEX_TALLOC (size2 + 1, char); } if (!wcs_string2 || !mbs_offset2 || !is_binary) { FREE_WCS_BUFFERS (); if (size2 > MAX_ALLOCA_SIZE) free (is_binary); else FREE_VAR (is_binary); return -2; } wcs_size2 = convert_mbs_to_wcs(wcs_string2, string2, size2, mbs_offset2, is_binary); wcs_string2[wcs_size2] = L'\0'; /* for a sentinel */ if (size2 > MAX_ALLOCA_SIZE) free (is_binary); else FREE_VAR (is_binary); } #endif /* WCHAR */ /* Loop through the string, looking for a place to start matching. */ for (;;) { /* If a fastmap is supplied, skip quickly over characters that cannot be the start of a match. If the pattern can match the null string, however, we don't need to skip characters; we want the first null string. */ if (fastmap && startpos < total_size && !bufp->can_be_null) { if (range > 0) /* Searching forwards. */ { register const char *d; register int lim = 0; int irange = range; if (startpos < size1 && startpos + range >= size1) lim = range - (size1 - startpos); d = (startpos >= size1 ? string2 - size1 : string1) + startpos; /* Written out as an if-else to avoid testing `translate' inside the loop. */ if (translate) while (range > lim && !fastmap[(unsigned char) translate[(unsigned char) *d++]]) range--; else while (range > lim && !fastmap[(unsigned char) *d++]) range--; startpos += irange - range; } else /* Searching backwards. */ { register CHAR_T c = (size1 == 0 || startpos >= size1 ? string2[startpos - size1] : string1[startpos]); if (!fastmap[(unsigned char) TRANSLATE (c)]) goto advance; } } /* If can't match the null string, and that's all we have left, fail. */ if (range >= 0 && startpos == total_size && fastmap && !bufp->can_be_null) { #ifdef WCHAR FREE_WCS_BUFFERS (); #endif return -1; } #ifdef WCHAR val = wcs_re_match_2_internal (bufp, string1, size1, string2, size2, startpos, regs, stop, wcs_string1, wcs_size1, wcs_string2, wcs_size2, mbs_offset1, mbs_offset2); #else /* BYTE */ val = byte_re_match_2_internal (bufp, string1, size1, string2, size2, startpos, regs, stop); #endif /* BYTE */ #ifndef REGEX_MALLOC # ifdef C_ALLOCA alloca (0); # endif #endif if (val >= 0) { #ifdef WCHAR FREE_WCS_BUFFERS (); #endif return startpos; } if (val == -2) { #ifdef WCHAR FREE_WCS_BUFFERS (); #endif return -2; } advance: if (!range) break; else if (range > 0) { range--; startpos++; } else { range++; startpos--; } } #ifdef WCHAR FREE_WCS_BUFFERS (); #endif return -1; } #ifdef WCHAR /* This converts PTR, a pointer into one of the search wchar_t strings `string1' and `string2' into an multibyte string offset from the beginning of that string. We use mbs_offset to optimize. See convert_mbs_to_wcs. */ # define POINTER_TO_OFFSET(ptr) \ (FIRST_STRING_P (ptr) \ ? ((regoff_t)(mbs_offset1 != NULL? mbs_offset1[(ptr)-string1] : 0)) \ : ((regoff_t)((mbs_offset2 != NULL? mbs_offset2[(ptr)-string2] : 0) \ + csize1))) #else /* BYTE */ /* This converts PTR, a pointer into one of the search strings `string1' and `string2' into an offset from the beginning of that string. */ # define POINTER_TO_OFFSET(ptr) \ (FIRST_STRING_P (ptr) \ ? ((regoff_t) ((ptr) - string1)) \ : ((regoff_t) ((ptr) - string2 + size1))) #endif /* WCHAR */ /* Macros for dealing with the split strings in re_match_2. */ #define MATCHING_IN_FIRST_STRING (dend == end_match_1) /* Call before fetching a character with *d. This switches over to string2 if necessary. */ #define PREFETCH() \ while (d == dend) \ { \ /* End of string2 => fail. */ \ if (dend == end_match_2) \ goto fail; \ /* End of string1 => advance to string2. */ \ d = string2; \ dend = end_match_2; \ } /* Test if at very beginning or at very end of the virtual concatenation of `string1' and `string2'. If only one string, it's `string2'. */ #define AT_STRINGS_BEG(d) ((d) == (size1 ? string1 : string2) || !size2) #define AT_STRINGS_END(d) ((d) == end2) /* Test if D points to a character which is word-constituent. We have two special cases to check for: if past the end of string1, look at the first character in string2; and if before the beginning of string2, look at the last character in string1. */ #ifdef WCHAR /* Use internationalized API instead of SYNTAX. */ # define WORDCHAR_P(d) \ (iswalnum ((wint_t)((d) == end1 ? *string2 \ : (d) == string2 - 1 ? *(end1 - 1) : *(d))) != 0 \ || ((d) == end1 ? *string2 \ : (d) == string2 - 1 ? *(end1 - 1) : *(d)) == L'_') #else /* BYTE */ # define WORDCHAR_P(d) \ (SYNTAX ((d) == end1 ? *string2 \ : (d) == string2 - 1 ? *(end1 - 1) : *(d)) \ == Sword) #endif /* WCHAR */ /* Disabled due to a compiler bug -- see comment at case wordbound */ #if 0 /* Test if the character before D and the one at D differ with respect to being word-constituent. */ #define AT_WORD_BOUNDARY(d) \ (AT_STRINGS_BEG (d) || AT_STRINGS_END (d) \ || WORDCHAR_P (d - 1) != WORDCHAR_P (d)) #endif /* Free everything we malloc. */ #ifdef MATCH_MAY_ALLOCATE # ifdef WCHAR # define FREE_VARIABLES() \ do { \ REGEX_FREE_STACK (fail_stack.stack); \ FREE_VAR (regstart); \ FREE_VAR (regend); \ FREE_VAR (old_regstart); \ FREE_VAR (old_regend); \ FREE_VAR (best_regstart); \ FREE_VAR (best_regend); \ FREE_VAR (reg_info); \ FREE_VAR (reg_dummy); \ FREE_VAR (reg_info_dummy); \ if (!cant_free_wcs_buf) \ { \ FREE_VAR (string1); \ FREE_VAR (string2); \ FREE_VAR (mbs_offset1); \ FREE_VAR (mbs_offset2); \ } \ } while (0) # else /* BYTE */ # define FREE_VARIABLES() \ do { \ REGEX_FREE_STACK (fail_stack.stack); \ FREE_VAR (regstart); \ FREE_VAR (regend); \ FREE_VAR (old_regstart); \ FREE_VAR (old_regend); \ FREE_VAR (best_regstart); \ FREE_VAR (best_regend); \ FREE_VAR (reg_info); \ FREE_VAR (reg_dummy); \ FREE_VAR (reg_info_dummy); \ } while (0) # endif /* WCHAR */ #else # ifdef WCHAR # define FREE_VARIABLES() \ do { \ if (!cant_free_wcs_buf) \ { \ FREE_VAR (string1); \ FREE_VAR (string2); \ FREE_VAR (mbs_offset1); \ FREE_VAR (mbs_offset2); \ } \ } while (0) # else /* BYTE */ # define FREE_VARIABLES() ((void)0) /* Do nothing! But inhibit gcc warning. */ # endif /* WCHAR */ #endif /* not MATCH_MAY_ALLOCATE */ /* These values must meet several constraints. They must not be valid register values; since we have a limit of 255 registers (because we use only one byte in the pattern for the register number), we can use numbers larger than 255. They must differ by 1, because of NUM_FAILURE_ITEMS above. And the value for the lowest register must be larger than the value for the highest register, so we do not try to actually save any registers when none are active. */ #define NO_HIGHEST_ACTIVE_REG (1 << BYTEWIDTH) #define NO_LOWEST_ACTIVE_REG (NO_HIGHEST_ACTIVE_REG + 1) #else /* not INSIDE_RECURSION */ /* Matching routines. */ #ifndef emacs /* Emacs never uses this. */ /* re_match is like re_match_2 except it takes only a single string. */ int re_match (bufp, string, size, pos, regs) struct re_pattern_buffer *bufp; const char *string; int size, pos; struct re_registers *regs; { int result; # ifdef MBS_SUPPORT if (MB_CUR_MAX != 1) result = wcs_re_match_2_internal (bufp, NULL, 0, string, size, pos, regs, size, NULL, 0, NULL, 0, NULL, NULL); else # endif result = byte_re_match_2_internal (bufp, NULL, 0, string, size, pos, regs, size); # ifndef REGEX_MALLOC # ifdef C_ALLOCA alloca (0); # endif # endif return result; } # ifdef _LIBC weak_alias (__re_match, re_match) # endif #endif /* not emacs */ #endif /* not INSIDE_RECURSION */ #ifdef INSIDE_RECURSION static boolean PREFIX(group_match_null_string_p) _RE_ARGS ((UCHAR_T **p, UCHAR_T *end, PREFIX(register_info_type) *reg_info)); static boolean PREFIX(alt_match_null_string_p) _RE_ARGS ((UCHAR_T *p, UCHAR_T *end, PREFIX(register_info_type) *reg_info)); static boolean PREFIX(common_op_match_null_string_p) _RE_ARGS ((UCHAR_T **p, UCHAR_T *end, PREFIX(register_info_type) *reg_info)); static int PREFIX(bcmp_translate) _RE_ARGS ((const CHAR_T *s1, const CHAR_T *s2, int len, char *translate)); #else /* not INSIDE_RECURSION */ /* re_match_2 matches the compiled pattern in BUFP against the the (virtual) concatenation of STRING1 and STRING2 (of length SIZE1 and SIZE2, respectively). We start matching at POS, and stop matching at STOP. If REGS is non-null and the `no_sub' field of BUFP is nonzero, we store offsets for the substring each group matched in REGS. See the documentation for exactly how many groups we fill. We return -1 if no match, -2 if an internal error (such as the failure stack overflowing). Otherwise, we return the length of the matched substring. */ int re_match_2 (bufp, string1, size1, string2, size2, pos, regs, stop) struct re_pattern_buffer *bufp; const char *string1, *string2; int size1, size2; int pos; struct re_registers *regs; int stop; { int result; # ifdef MBS_SUPPORT if (MB_CUR_MAX != 1) result = wcs_re_match_2_internal (bufp, string1, size1, string2, size2, pos, regs, stop, NULL, 0, NULL, 0, NULL, NULL); else # endif result = byte_re_match_2_internal (bufp, string1, size1, string2, size2, pos, regs, stop); #ifndef REGEX_MALLOC # ifdef C_ALLOCA alloca (0); # endif #endif return result; } #ifdef _LIBC weak_alias (__re_match_2, re_match_2) #endif #endif /* not INSIDE_RECURSION */ #ifdef INSIDE_RECURSION #ifdef WCHAR static int count_mbs_length PARAMS ((int *, int)); /* This check the substring (from 0, to length) of the multibyte string, to which offset_buffer correspond. And count how many wchar_t_characters the substring occupy. We use offset_buffer to optimization. See convert_mbs_to_wcs. */ static int count_mbs_length(offset_buffer, length) int *offset_buffer; int length; { int upper, lower; /* Check whether the size is valid. */ if (length < 0) return -1; if (offset_buffer == NULL) return 0; /* If there are no multibyte character, offset_buffer[i] == i. Optmize for this case. */ if (offset_buffer[length] == length) return length; /* Set up upper with length. (because for all i, offset_buffer[i] >= i) */ upper = length; lower = 0; while (true) { int middle = (lower + upper) / 2; if (middle == lower || middle == upper) break; if (offset_buffer[middle] > length) upper = middle; else if (offset_buffer[middle] < length) lower = middle; else return middle; } return -1; } #endif /* WCHAR */ /* This is a separate function so that we can force an alloca cleanup afterwards. */ #ifdef WCHAR static int wcs_re_match_2_internal (bufp, cstring1, csize1, cstring2, csize2, pos, regs, stop, string1, size1, string2, size2, mbs_offset1, mbs_offset2) struct re_pattern_buffer *bufp; const char *cstring1, *cstring2; int csize1, csize2; int pos; struct re_registers *regs; int stop; /* string1 == string2 == NULL means string1/2, size1/2 and mbs_offset1/2 need seting up in this function. */ /* We need wchar_t* buffers correspond to cstring1, cstring2. */ wchar_t *string1, *string2; /* We need the size of wchar_t buffers correspond to csize1, csize2. */ int size1, size2; /* offset buffer for optimizatoin. See convert_mbs_to_wc. */ int *mbs_offset1, *mbs_offset2; #else /* BYTE */ static int byte_re_match_2_internal (bufp, string1, size1,string2, size2, pos, regs, stop) struct re_pattern_buffer *bufp; const char *string1, *string2; int size1, size2; int pos; struct re_registers *regs; int stop; #endif /* BYTE */ { /* General temporaries. */ int mcnt; UCHAR_T *p1; #ifdef WCHAR /* They hold whether each wchar_t is binary data or not. */ char *is_binary = NULL; /* If true, we can't free string1/2, mbs_offset1/2. */ int cant_free_wcs_buf = 1; #endif /* WCHAR */ /* Just past the end of the corresponding string. */ const CHAR_T *end1, *end2; /* Pointers into string1 and string2, just past the last characters in each to consider matching. */ const CHAR_T *end_match_1, *end_match_2; /* Where we are in the data, and the end of the current string. */ const CHAR_T *d, *dend; /* Where we are in the pattern, and the end of the pattern. */ #ifdef WCHAR UCHAR_T *pattern, *p; register UCHAR_T *pend; #else /* BYTE */ UCHAR_T *p = bufp->buffer; register UCHAR_T *pend = p + bufp->used; #endif /* WCHAR */ /* Mark the opcode just after a start_memory, so we can test for an empty subpattern when we get to the stop_memory. */ UCHAR_T *just_past_start_mem = 0; /* We use this to map every character in the string. */ RE_TRANSLATE_TYPE translate = bufp->translate; /* Failure point stack. Each place that can handle a failure further down the line pushes a failure point on this stack. It consists of restart, regend, and reg_info for all registers corresponding to the subexpressions we're currently inside, plus the number of such registers, and, finally, two char *'s. The first char * is where to resume scanning the pattern; the second one is where to resume scanning the strings. If the latter is zero, the failure point is a ``dummy''; if a failure happens and the failure point is a dummy, it gets discarded and the next next one is tried. */ #ifdef MATCH_MAY_ALLOCATE /* otherwise, this is global. */ PREFIX(fail_stack_type) fail_stack; #endif #ifdef DEBUG static unsigned failure_id; unsigned nfailure_points_pushed = 0, nfailure_points_popped = 0; #endif #ifdef REL_ALLOC /* This holds the pointer to the failure stack, when it is allocated relocatably. */ fail_stack_elt_t *failure_stack_ptr; #endif /* We fill all the registers internally, independent of what we return, for use in backreferences. The number here includes an element for register zero. */ size_t num_regs = bufp->re_nsub + 1; /* The currently active registers. */ active_reg_t lowest_active_reg = NO_LOWEST_ACTIVE_REG; active_reg_t highest_active_reg = NO_HIGHEST_ACTIVE_REG; /* Information on the contents of registers. These are pointers into the input strings; they record just what was matched (on this attempt) by a subexpression part of the pattern, that is, the regnum-th regstart pointer points to where in the pattern we began matching and the regnum-th regend points to right after where we stopped matching the regnum-th subexpression. (The zeroth register keeps track of what the whole pattern matches.) */ #ifdef MATCH_MAY_ALLOCATE /* otherwise, these are global. */ const CHAR_T **regstart, **regend; #endif /* If a group that's operated upon by a repetition operator fails to match anything, then the register for its start will need to be restored because it will have been set to wherever in the string we are when we last see its open-group operator. Similarly for a register's end. */ #ifdef MATCH_MAY_ALLOCATE /* otherwise, these are global. */ const CHAR_T **old_regstart, **old_regend; #endif /* The is_active field of reg_info helps us keep track of which (possibly nested) subexpressions we are currently in. The matched_something field of reg_info[reg_num] helps us tell whether or not we have matched any of the pattern so far this time through the reg_num-th subexpression. These two fields get reset each time through any loop their register is in. */ #ifdef MATCH_MAY_ALLOCATE /* otherwise, this is global. */ PREFIX(register_info_type) *reg_info; #endif /* The following record the register info as found in the above variables when we find a match better than any we've seen before. This happens as we backtrack through the failure points, which in turn happens only if we have not yet matched the entire string. */ unsigned best_regs_set = false; #ifdef MATCH_MAY_ALLOCATE /* otherwise, these are global. */ const CHAR_T **best_regstart, **best_regend; #endif /* Logically, this is `best_regend[0]'. But we don't want to have to allocate space for that if we're not allocating space for anything else (see below). Also, we never need info about register 0 for any of the other register vectors, and it seems rather a kludge to treat `best_regend' differently than the rest. So we keep track of the end of the best match so far in a separate variable. We initialize this to NULL so that when we backtrack the first time and need to test it, it's not garbage. */ const CHAR_T *match_end = NULL; /* This helps SET_REGS_MATCHED avoid doing redundant work. */ int set_regs_matched_done = 0; /* Used when we pop values we don't care about. */ #ifdef MATCH_MAY_ALLOCATE /* otherwise, these are global. */ const CHAR_T **reg_dummy; PREFIX(register_info_type) *reg_info_dummy; #endif #ifdef DEBUG /* Counts the total number of registers pushed. */ unsigned num_regs_pushed = 0; #endif DEBUG_PRINT1 ("\n\nEntering re_match_2.\n"); INIT_FAIL_STACK (); #ifdef MATCH_MAY_ALLOCATE /* Do not bother to initialize all the register variables if there are no groups in the pattern, as it takes a fair amount of time. If there are groups, we include space for register 0 (the whole pattern), even though we never use it, since it simplifies the array indexing. We should fix this. */ if (bufp->re_nsub) { regstart = REGEX_TALLOC (num_regs, const CHAR_T *); regend = REGEX_TALLOC (num_regs, const CHAR_T *); old_regstart = REGEX_TALLOC (num_regs, const CHAR_T *); old_regend = REGEX_TALLOC (num_regs, const CHAR_T *); best_regstart = REGEX_TALLOC (num_regs, const CHAR_T *); best_regend = REGEX_TALLOC (num_regs, const CHAR_T *); reg_info = REGEX_TALLOC (num_regs, PREFIX(register_info_type)); reg_dummy = REGEX_TALLOC (num_regs, const CHAR_T *); reg_info_dummy = REGEX_TALLOC (num_regs, PREFIX(register_info_type)); if (!(regstart && regend && old_regstart && old_regend && reg_info && best_regstart && best_regend && reg_dummy && reg_info_dummy)) { FREE_VARIABLES (); return -2; } } else { /* We must initialize all our variables to NULL, so that `FREE_VARIABLES' doesn't try to free them. */ regstart = regend = old_regstart = old_regend = best_regstart = best_regend = reg_dummy = NULL; reg_info = reg_info_dummy = (PREFIX(register_info_type) *) NULL; } #endif /* MATCH_MAY_ALLOCATE */ /* The starting position is bogus. */ #ifdef WCHAR if (pos < 0 || pos > csize1 + csize2) #else /* BYTE */ if (pos < 0 || pos > size1 + size2) #endif { FREE_VARIABLES (); return -1; } #ifdef WCHAR /* Allocate wchar_t array for string1 and string2 and fill them with converted string. */ if (string1 == NULL && string2 == NULL) { /* We need seting up buffers here. */ /* We must free wcs buffers in this function. */ cant_free_wcs_buf = 0; if (csize1 != 0) { string1 = REGEX_TALLOC (csize1 + 1, CHAR_T); mbs_offset1 = REGEX_TALLOC (csize1 + 1, int); is_binary = REGEX_TALLOC (csize1 + 1, char); if (!string1 || !mbs_offset1 || !is_binary) { FREE_VAR (string1); FREE_VAR (mbs_offset1); FREE_VAR (is_binary); return -2; } } if (csize2 != 0) { string2 = REGEX_TALLOC (csize2 + 1, CHAR_T); mbs_offset2 = REGEX_TALLOC (csize2 + 1, int); is_binary = REGEX_TALLOC (csize2 + 1, char); if (!string2 || !mbs_offset2 || !is_binary) { FREE_VAR (string1); FREE_VAR (mbs_offset1); FREE_VAR (string2); FREE_VAR (mbs_offset2); FREE_VAR (is_binary); return -2; } size2 = convert_mbs_to_wcs(string2, cstring2, csize2, mbs_offset2, is_binary); string2[size2] = L'\0'; /* for a sentinel */ FREE_VAR (is_binary); } } /* We need to cast pattern to (wchar_t*), because we casted this compiled pattern to (char*) in regex_compile. */ p = pattern = (CHAR_T*)bufp->buffer; pend = (CHAR_T*)(bufp->buffer + bufp->used); #endif /* WCHAR */ /* Initialize subexpression text positions to -1 to mark ones that no start_memory/stop_memory has been seen for. Also initialize the register information struct. */ for (mcnt = 1; (unsigned) mcnt < num_regs; mcnt++) { regstart[mcnt] = regend[mcnt] = old_regstart[mcnt] = old_regend[mcnt] = REG_UNSET_VALUE; REG_MATCH_NULL_STRING_P (reg_info[mcnt]) = MATCH_NULL_UNSET_VALUE; IS_ACTIVE (reg_info[mcnt]) = 0; MATCHED_SOMETHING (reg_info[mcnt]) = 0; EVER_MATCHED_SOMETHING (reg_info[mcnt]) = 0; } /* We move `string1' into `string2' if the latter's empty -- but not if `string1' is null. */ if (size2 == 0 && string1 != NULL) { string2 = string1; size2 = size1; string1 = 0; size1 = 0; #ifdef WCHAR mbs_offset2 = mbs_offset1; csize2 = csize1; mbs_offset1 = NULL; csize1 = 0; #endif } end1 = string1 + size1; end2 = string2 + size2; /* Compute where to stop matching, within the two strings. */ #ifdef WCHAR if (stop <= csize1) { mcnt = count_mbs_length(mbs_offset1, stop); end_match_1 = string1 + mcnt; end_match_2 = string2; } else { if (stop > csize1 + csize2) stop = csize1 + csize2; end_match_1 = end1; mcnt = count_mbs_length(mbs_offset2, stop-csize1); end_match_2 = string2 + mcnt; } if (mcnt < 0) { /* count_mbs_length return error. */ FREE_VARIABLES (); return -1; } #else if (stop <= size1) { end_match_1 = string1 + stop; end_match_2 = string2; } else { end_match_1 = end1; end_match_2 = string2 + stop - size1; } #endif /* WCHAR */ /* `p' scans through the pattern as `d' scans through the data. `dend' is the end of the input string that `d' points within. `d' is advanced into the following input string whenever necessary, but this happens before fetching; therefore, at the beginning of the loop, `d' can be pointing at the end of a string, but it cannot equal `string2'. */ #ifdef WCHAR if (size1 > 0 && pos <= csize1) { mcnt = count_mbs_length(mbs_offset1, pos); d = string1 + mcnt; dend = end_match_1; } else { mcnt = count_mbs_length(mbs_offset2, pos-csize1); d = string2 + mcnt; dend = end_match_2; } if (mcnt < 0) { /* count_mbs_length return error. */ FREE_VARIABLES (); return -1; } #else if (size1 > 0 && pos <= size1) { d = string1 + pos; dend = end_match_1; } else { d = string2 + pos - size1; dend = end_match_2; } #endif /* WCHAR */ DEBUG_PRINT1 ("The compiled pattern is:\n"); DEBUG_PRINT_COMPILED_PATTERN (bufp, p, pend); DEBUG_PRINT1 ("The string to match is: `"); DEBUG_PRINT_DOUBLE_STRING (d, string1, size1, string2, size2); DEBUG_PRINT1 ("'\n"); /* This loops over pattern commands. It exits by returning from the function if the match is complete, or it drops through if the match fails at this starting point in the input data. */ for (;;) { #ifdef _LIBC DEBUG_PRINT2 ("\n%p: ", p); #else DEBUG_PRINT2 ("\n0x%x: ", p); #endif if (p == pend) { /* End of pattern means we might have succeeded. */ DEBUG_PRINT1 ("end of pattern ... "); /* If we haven't matched the entire string, and we want the longest match, try backtracking. */ if (d != end_match_2) { /* 1 if this match ends in the same string (string1 or string2) as the best previous match. */ boolean same_str_p = (FIRST_STRING_P (match_end) == MATCHING_IN_FIRST_STRING); /* 1 if this match is the best seen so far. */ boolean best_match_p; /* AIX compiler got confused when this was combined with the previous declaration. */ if (same_str_p) best_match_p = d > match_end; else best_match_p = !MATCHING_IN_FIRST_STRING; DEBUG_PRINT1 ("backtracking.\n"); if (!FAIL_STACK_EMPTY ()) { /* More failure points to try. */ /* If exceeds best match so far, save it. */ if (!best_regs_set || best_match_p) { best_regs_set = true; match_end = d; DEBUG_PRINT1 ("\nSAVING match as best so far.\n"); for (mcnt = 1; (unsigned) mcnt < num_regs; mcnt++) { best_regstart[mcnt] = regstart[mcnt]; best_regend[mcnt] = regend[mcnt]; } } goto fail; } /* If no failure points, don't restore garbage. And if last match is real best match, don't restore second best one. */ else if (best_regs_set && !best_match_p) { restore_best_regs: /* Restore best match. It may happen that `dend == end_match_1' while the restored d is in string2. For example, the pattern `x.*y.*z' against the strings `x-' and `y-z-', if the two strings are not consecutive in memory. */ DEBUG_PRINT1 ("Restoring best registers.\n"); d = match_end; dend = ((d >= string1 && d <= end1) ? end_match_1 : end_match_2); for (mcnt = 1; (unsigned) mcnt < num_regs; mcnt++) { regstart[mcnt] = best_regstart[mcnt]; regend[mcnt] = best_regend[mcnt]; } } } /* d != end_match_2 */ succeed_label: DEBUG_PRINT1 ("Accepting match.\n"); /* If caller wants register contents data back, do it. */ if (regs && !bufp->no_sub) { /* Have the register data arrays been allocated? */ if (bufp->regs_allocated == REGS_UNALLOCATED) { /* No. So allocate them with malloc. We need one extra element beyond `num_regs' for the `-1' marker GNU code uses. */ regs->num_regs = MAX (RE_NREGS, num_regs + 1); regs->start = TALLOC (regs->num_regs, regoff_t); regs->end = TALLOC (regs->num_regs, regoff_t); if (regs->start == NULL || regs->end == NULL) { FREE_VARIABLES (); return -2; } bufp->regs_allocated = REGS_REALLOCATE; } else if (bufp->regs_allocated == REGS_REALLOCATE) { /* Yes. If we need more elements than were already allocated, reallocate them. If we need fewer, just leave it alone. */ if (regs->num_regs < num_regs + 1) { regs->num_regs = num_regs + 1; RETALLOC (regs->start, regs->num_regs, regoff_t); RETALLOC (regs->end, regs->num_regs, regoff_t); if (regs->start == NULL || regs->end == NULL) { FREE_VARIABLES (); return -2; } } } else { /* These braces fend off a "empty body in an else-statement" warning under GCC when assert expands to nothing. */ assert (bufp->regs_allocated == REGS_FIXED); } /* Convert the pointer data in `regstart' and `regend' to indices. Register zero has to be set differently, since we haven't kept track of any info for it. */ if (regs->num_regs > 0) { regs->start[0] = pos; #ifdef WCHAR if (MATCHING_IN_FIRST_STRING) regs->end[0] = mbs_offset1 != NULL ? mbs_offset1[d-string1] : 0; else regs->end[0] = csize1 + (mbs_offset2 != NULL ? mbs_offset2[d-string2] : 0); #else regs->end[0] = (MATCHING_IN_FIRST_STRING ? ((regoff_t) (d - string1)) : ((regoff_t) (d - string2 + size1))); #endif /* WCHAR */ } /* Go through the first `min (num_regs, regs->num_regs)' registers, since that is all we initialized. */ for (mcnt = 1; (unsigned) mcnt < MIN (num_regs, regs->num_regs); mcnt++) { if (REG_UNSET (regstart[mcnt]) || REG_UNSET (regend[mcnt])) regs->start[mcnt] = regs->end[mcnt] = -1; else { regs->start[mcnt] = (regoff_t) POINTER_TO_OFFSET (regstart[mcnt]); regs->end[mcnt] = (regoff_t) POINTER_TO_OFFSET (regend[mcnt]); } } /* If the regs structure we return has more elements than were in the pattern, set the extra elements to -1. If we (re)allocated the registers, this is the case, because we always allocate enough to have at least one -1 at the end. */ for (mcnt = num_regs; (unsigned) mcnt < regs->num_regs; mcnt++) regs->start[mcnt] = regs->end[mcnt] = -1; } /* regs && !bufp->no_sub */ DEBUG_PRINT4 ("%u failure points pushed, %u popped (%u remain).\n", nfailure_points_pushed, nfailure_points_popped, nfailure_points_pushed - nfailure_points_popped); DEBUG_PRINT2 ("%u registers pushed.\n", num_regs_pushed); #ifdef WCHAR if (MATCHING_IN_FIRST_STRING) mcnt = mbs_offset1 != NULL ? mbs_offset1[d-string1] : 0; else mcnt = (mbs_offset2 != NULL ? mbs_offset2[d-string2] : 0) + csize1; mcnt -= pos; #else mcnt = d - pos - (MATCHING_IN_FIRST_STRING ? string1 : string2 - size1); #endif /* WCHAR */ DEBUG_PRINT2 ("Returning %d from re_match_2.\n", mcnt); FREE_VARIABLES (); return mcnt; } /* Otherwise match next pattern command. */ switch (SWITCH_ENUM_CAST ((re_opcode_t) *p++)) { /* Ignore these. Used to ignore the n of succeed_n's which currently have n == 0. */ case no_op: DEBUG_PRINT1 ("EXECUTING no_op.\n"); break; case succeed: DEBUG_PRINT1 ("EXECUTING succeed.\n"); goto succeed_label; /* Match the next n pattern characters exactly. The following byte in the pattern defines n, and the n bytes after that are the characters to match. */ case exactn: #ifdef MBS_SUPPORT case exactn_bin: #endif mcnt = *p++; DEBUG_PRINT2 ("EXECUTING exactn %d.\n", mcnt); /* This is written out as an if-else so we don't waste time testing `translate' inside the loop. */ if (translate) { do { PREFETCH (); #ifdef WCHAR if (*d <= 0xff) { if ((UCHAR_T) translate[(unsigned char) *d++] != (UCHAR_T) *p++) goto fail; } else { if (*d++ != (CHAR_T) *p++) goto fail; } #else if ((UCHAR_T) translate[(unsigned char) *d++] != (UCHAR_T) *p++) goto fail; #endif /* WCHAR */ } while (--mcnt); } else { do { PREFETCH (); if (*d++ != (CHAR_T) *p++) goto fail; } while (--mcnt); } SET_REGS_MATCHED (); break; /* Match any character except possibly a newline or a null. */ case anychar: DEBUG_PRINT1 ("EXECUTING anychar.\n"); PREFETCH (); if ((!(bufp->syntax & RE_DOT_NEWLINE) && TRANSLATE (*d) == '\n') || (bufp->syntax & RE_DOT_NOT_NULL && TRANSLATE (*d) == '\000')) goto fail; SET_REGS_MATCHED (); DEBUG_PRINT2 (" Matched `%ld'.\n", (long int) *d); d++; break; case charset: case charset_not: { register UCHAR_T c; #ifdef WCHAR unsigned int i, char_class_length, coll_symbol_length, equiv_class_length, ranges_length, chars_length, length; CHAR_T *workp, *workp2, *charset_top; #define WORK_BUFFER_SIZE 128 CHAR_T str_buf[WORK_BUFFER_SIZE]; # ifdef _LIBC uint32_t nrules; # endif /* _LIBC */ #endif /* WCHAR */ boolean not = (re_opcode_t) *(p - 1) == charset_not; DEBUG_PRINT2 ("EXECUTING charset%s.\n", not ? "_not" : ""); PREFETCH (); c = TRANSLATE (*d); /* The character to match. */ #ifdef WCHAR # ifdef _LIBC nrules = _NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_NRULES); # endif /* _LIBC */ charset_top = p - 1; char_class_length = *p++; coll_symbol_length = *p++; equiv_class_length = *p++; ranges_length = *p++; chars_length = *p++; /* p points charset[6], so the address of the next instruction (charset[l+m+n+2o+k+p']) equals p[l+m+n+2*o+p'], where l=length of char_classes, m=length of collating_symbol, n=equivalence_class, o=length of char_range, p'=length of character. */ workp = p; /* Update p to indicate the next instruction. */ p += char_class_length + coll_symbol_length+ equiv_class_length + 2*ranges_length + chars_length; /* match with char_class? */ for (i = 0; i < char_class_length ; i += CHAR_CLASS_SIZE) { wctype_t wctype; uintptr_t alignedp = ((uintptr_t)workp + __alignof__(wctype_t) - 1) & ~(uintptr_t)(__alignof__(wctype_t) - 1); wctype = *((wctype_t*)alignedp); workp += CHAR_CLASS_SIZE; # ifdef _LIBC if (__iswctype((wint_t)c, wctype)) goto char_set_matched; # else if (iswctype((wint_t)c, wctype)) goto char_set_matched; # endif } /* match with collating_symbol? */ # ifdef _LIBC if (nrules != 0) { const unsigned char *extra = (const unsigned char *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_SYMB_EXTRAMB); for (workp2 = workp + coll_symbol_length ; workp < workp2 ; workp++) { int32_t *wextra; wextra = (int32_t*)(extra + *workp++); for (i = 0; i < *wextra; ++i) if (TRANSLATE(d[i]) != wextra[1 + i]) break; if (i == *wextra) { /* Update d, however d will be incremented at char_set_matched:, we decrement d here. */ d += i - 1; goto char_set_matched; } } } else /* (nrules == 0) */ # endif /* If we can't look up collation data, we use wcscoll instead. */ { for (workp2 = workp + coll_symbol_length ; workp < workp2 ;) { const CHAR_T *backup_d = d, *backup_dend = dend; # ifdef _LIBC length = __wcslen (workp); # else length = wcslen (workp); # endif /* If wcscoll(the collating symbol, whole string) > 0, any substring of the string never match with the collating symbol. */ # ifdef _LIBC if (__wcscoll (workp, d) > 0) # else if (wcscoll (workp, d) > 0) # endif { workp += length + 1; continue; } /* First, we compare the collating symbol with the first character of the string. If it don't match, we add the next character to the compare buffer in turn. */ for (i = 0 ; i < WORK_BUFFER_SIZE-1 ; i++, d++) { int match; if (d == dend) { if (dend == end_match_2) break; d = string2; dend = end_match_2; } /* add next character to the compare buffer. */ str_buf[i] = TRANSLATE(*d); str_buf[i+1] = '\0'; # ifdef _LIBC match = __wcscoll (workp, str_buf); # else match = wcscoll (workp, str_buf); # endif if (match == 0) goto char_set_matched; if (match < 0) /* (str_buf > workp) indicate (str_buf + X > workp), because for all X (str_buf + X > str_buf). So we don't need continue this loop. */ break; /* Otherwise(str_buf < workp), (str_buf+next_character) may equals (workp). So we continue this loop. */ } /* not matched */ d = backup_d; dend = backup_dend; workp += length + 1; } } /* match with equivalence_class? */ # ifdef _LIBC if (nrules != 0) { const CHAR_T *backup_d = d, *backup_dend = dend; /* Try to match the equivalence class against those known to the collate implementation. */ const int32_t *table; const int32_t *weights; const int32_t *extra; const int32_t *indirect; int32_t idx, idx2; wint_t *cp; size_t len; /* This #include defines a local function! */ table = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_TABLEWC); weights = (const wint_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_WEIGHTWC); extra = (const wint_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_EXTRAWC); indirect = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_INDIRECTWC); /* Write 1 collating element to str_buf, and get its index. */ idx2 = 0; for (i = 0 ; idx2 == 0 && i < WORK_BUFFER_SIZE - 1; i++) { cp = (wint_t*)str_buf; if (d == dend) { if (dend == end_match_2) break; d = string2; dend = end_match_2; } str_buf[i] = TRANSLATE(*(d+i)); str_buf[i+1] = '\0'; /* sentinel */ idx2 = findidx ((const wint_t**)&cp); } /* Update d, however d will be incremented at char_set_matched:, we decrement d here. */ d = backup_d + ((wchar_t*)cp - (wchar_t*)str_buf - 1); if (d >= dend) { if (dend == end_match_2) d = dend; else { d = string2; dend = end_match_2; } } len = weights[idx2]; for (workp2 = workp + equiv_class_length ; workp < workp2 ; workp++) { idx = (int32_t)*workp; /* We already checked idx != 0 in regex_compile. */ if (idx2 != 0 && len == weights[idx]) { int cnt = 0; while (cnt < len && (weights[idx + 1 + cnt] == weights[idx2 + 1 + cnt])) ++cnt; if (cnt == len) goto char_set_matched; } } /* not matched */ d = backup_d; dend = backup_dend; } else /* (nrules == 0) */ # endif /* If we can't look up collation data, we use wcscoll instead. */ { for (workp2 = workp + equiv_class_length ; workp < workp2 ;) { const CHAR_T *backup_d = d, *backup_dend = dend; # ifdef _LIBC length = __wcslen (workp); # else length = wcslen (workp); # endif /* If wcscoll(the collating symbol, whole string) > 0, any substring of the string never match with the collating symbol. */ # ifdef _LIBC if (__wcscoll (workp, d) > 0) # else if (wcscoll (workp, d) > 0) # endif { workp += length + 1; break; } /* First, we compare the equivalence class with the first character of the string. If it don't match, we add the next character to the compare buffer in turn. */ for (i = 0 ; i < WORK_BUFFER_SIZE - 1 ; i++, d++) { int match; if (d == dend) { if (dend == end_match_2) break; d = string2; dend = end_match_2; } /* add next character to the compare buffer. */ str_buf[i] = TRANSLATE(*d); str_buf[i+1] = '\0'; # ifdef _LIBC match = __wcscoll (workp, str_buf); # else match = wcscoll (workp, str_buf); # endif if (match == 0) goto char_set_matched; if (match < 0) /* (str_buf > workp) indicate (str_buf + X > workp), because for all X (str_buf + X > str_buf). So we don't need continue this loop. */ break; /* Otherwise(str_buf < workp), (str_buf+next_character) may equals (workp). So we continue this loop. */ } /* not matched */ d = backup_d; dend = backup_dend; workp += length + 1; } } /* match with char_range? */ # ifdef _LIBC if (nrules != 0) { uint32_t collseqval; const char *collseq = (const char *) _NL_CURRENT(LC_COLLATE, _NL_COLLATE_COLLSEQWC); collseqval = collseq_table_lookup (collseq, c); for (; workp < p - chars_length ;) { uint32_t start_val, end_val; /* We already compute the collation sequence value of the characters (or collating symbols). */ start_val = (uint32_t) *workp++; /* range_start */ end_val = (uint32_t) *workp++; /* range_end */ if (start_val <= collseqval && collseqval <= end_val) goto char_set_matched; } } else # endif { /* We set range_start_char at str_buf[0], range_end_char at str_buf[4], and compared char at str_buf[2]. */ str_buf[1] = 0; str_buf[2] = c; str_buf[3] = 0; str_buf[5] = 0; for (; workp < p - chars_length ;) { wchar_t *range_start_char, *range_end_char; /* match if (range_start_char <= c <= range_end_char). */ /* If range_start(or end) < 0, we assume -range_start(end) is the offset of the collating symbol which is specified as the character of the range start(end). */ /* range_start */ if (*workp < 0) range_start_char = charset_top - (*workp++); else { str_buf[0] = *workp++; range_start_char = str_buf; } /* range_end */ if (*workp < 0) range_end_char = charset_top - (*workp++); else { str_buf[4] = *workp++; range_end_char = str_buf + 4; } # ifdef _LIBC if (__wcscoll (range_start_char, str_buf+2) <= 0 && __wcscoll (str_buf+2, range_end_char) <= 0) # else if (wcscoll (range_start_char, str_buf+2) <= 0 && wcscoll (str_buf+2, range_end_char) <= 0) # endif goto char_set_matched; } } /* match with char? */ for (; workp < p ; workp++) if (c == *workp) goto char_set_matched; not = !not; char_set_matched: if (not) goto fail; #else /* Cast to `unsigned' instead of `unsigned char' in case the bit list is a full 32 bytes long. */ if (c < (unsigned) (*p * BYTEWIDTH) && p[1 + c / BYTEWIDTH] & (1 << (c % BYTEWIDTH))) not = !not; p += 1 + *p; if (!not) goto fail; #undef WORK_BUFFER_SIZE #endif /* WCHAR */ SET_REGS_MATCHED (); d++; break; } /* The beginning of a group is represented by start_memory. The arguments are the register number in the next byte, and the number of groups inner to this one in the next. The text matched within the group is recorded (in the internal registers data structure) under the register number. */ case start_memory: DEBUG_PRINT3 ("EXECUTING start_memory %ld (%ld):\n", (long int) *p, (long int) p[1]); /* Find out if this group can match the empty string. */ p1 = p; /* To send to group_match_null_string_p. */ if (REG_MATCH_NULL_STRING_P (reg_info[*p]) == MATCH_NULL_UNSET_VALUE) REG_MATCH_NULL_STRING_P (reg_info[*p]) = PREFIX(group_match_null_string_p) (&p1, pend, reg_info); /* Save the position in the string where we were the last time we were at this open-group operator in case the group is operated upon by a repetition operator, e.g., with `(a*)*b' against `ab'; then we want to ignore where we are now in the string in case this attempt to match fails. */ old_regstart[*p] = REG_MATCH_NULL_STRING_P (reg_info[*p]) ? REG_UNSET (regstart[*p]) ? d : regstart[*p] : regstart[*p]; DEBUG_PRINT2 (" old_regstart: %d\n", POINTER_TO_OFFSET (old_regstart[*p])); regstart[*p] = d; DEBUG_PRINT2 (" regstart: %d\n", POINTER_TO_OFFSET (regstart[*p])); IS_ACTIVE (reg_info[*p]) = 1; MATCHED_SOMETHING (reg_info[*p]) = 0; /* Clear this whenever we change the register activity status. */ set_regs_matched_done = 0; /* This is the new highest active register. */ highest_active_reg = *p; /* If nothing was active before, this is the new lowest active register. */ if (lowest_active_reg == NO_LOWEST_ACTIVE_REG) lowest_active_reg = *p; /* Move past the register number and inner group count. */ p += 2; just_past_start_mem = p; break; /* The stop_memory opcode represents the end of a group. Its arguments are the same as start_memory's: the register number, and the number of inner groups. */ case stop_memory: DEBUG_PRINT3 ("EXECUTING stop_memory %ld (%ld):\n", (long int) *p, (long int) p[1]); /* We need to save the string position the last time we were at this close-group operator in case the group is operated upon by a repetition operator, e.g., with `((a*)*(b*)*)*' against `aba'; then we want to ignore where we are now in the string in case this attempt to match fails. */ old_regend[*p] = REG_MATCH_NULL_STRING_P (reg_info[*p]) ? REG_UNSET (regend[*p]) ? d : regend[*p] : regend[*p]; DEBUG_PRINT2 (" old_regend: %d\n", POINTER_TO_OFFSET (old_regend[*p])); regend[*p] = d; DEBUG_PRINT2 (" regend: %d\n", POINTER_TO_OFFSET (regend[*p])); /* This register isn't active anymore. */ IS_ACTIVE (reg_info[*p]) = 0; /* Clear this whenever we change the register activity status. */ set_regs_matched_done = 0; /* If this was the only register active, nothing is active anymore. */ if (lowest_active_reg == highest_active_reg) { lowest_active_reg = NO_LOWEST_ACTIVE_REG; highest_active_reg = NO_HIGHEST_ACTIVE_REG; } else { /* We must scan for the new highest active register, since it isn't necessarily one less than now: consider (a(b)c(d(e)f)g). When group 3 ends, after the f), the new highest active register is 1. */ UCHAR_T r = *p - 1; while (r > 0 && !IS_ACTIVE (reg_info[r])) r--; /* If we end up at register zero, that means that we saved the registers as the result of an `on_failure_jump', not a `start_memory', and we jumped to past the innermost `stop_memory'. For example, in ((.)*) we save registers 1 and 2 as a result of the *, but when we pop back to the second ), we are at the stop_memory 1. Thus, nothing is active. */ if (r == 0) { lowest_active_reg = NO_LOWEST_ACTIVE_REG; highest_active_reg = NO_HIGHEST_ACTIVE_REG; } else highest_active_reg = r; } /* If just failed to match something this time around with a group that's operated on by a repetition operator, try to force exit from the ``loop'', and restore the register information for this group that we had before trying this last match. */ if ((!MATCHED_SOMETHING (reg_info[*p]) || just_past_start_mem == p - 1) && (p + 2) < pend) { boolean is_a_jump_n = false; p1 = p + 2; mcnt = 0; switch ((re_opcode_t) *p1++) { case jump_n: is_a_jump_n = true; case pop_failure_jump: case maybe_pop_jump: case jump: case dummy_failure_jump: EXTRACT_NUMBER_AND_INCR (mcnt, p1); if (is_a_jump_n) p1 += OFFSET_ADDRESS_SIZE; break; default: /* do nothing */ ; } p1 += mcnt; /* If the next operation is a jump backwards in the pattern to an on_failure_jump right before the start_memory corresponding to this stop_memory, exit from the loop by forcing a failure after pushing on the stack the on_failure_jump's jump in the pattern, and d. */ if (mcnt < 0 && (re_opcode_t) *p1 == on_failure_jump && (re_opcode_t) p1[1+OFFSET_ADDRESS_SIZE] == start_memory && p1[2+OFFSET_ADDRESS_SIZE] == *p) { /* If this group ever matched anything, then restore what its registers were before trying this last failed match, e.g., with `(a*)*b' against `ab' for regstart[1], and, e.g., with `((a*)*(b*)*)*' against `aba' for regend[3]. Also restore the registers for inner groups for, e.g., `((a*)(b*))*' against `aba' (register 3 would otherwise get trashed). */ if (EVER_MATCHED_SOMETHING (reg_info[*p])) { unsigned r; EVER_MATCHED_SOMETHING (reg_info[*p]) = 0; /* Restore this and inner groups' (if any) registers. */ for (r = *p; r < (unsigned) *p + (unsigned) *(p + 1); r++) { regstart[r] = old_regstart[r]; /* xx why this test? */ if (old_regend[r] >= regstart[r]) regend[r] = old_regend[r]; } } p1++; EXTRACT_NUMBER_AND_INCR (mcnt, p1); PUSH_FAILURE_POINT (p1 + mcnt, d, -2); goto fail; } } /* Move past the register number and the inner group count. */ p += 2; break; /* \ has been turned into a `duplicate' command which is followed by the numeric value of as the register number. */ case duplicate: { register const CHAR_T *d2, *dend2; int regno = *p++; /* Get which register to match against. */ DEBUG_PRINT2 ("EXECUTING duplicate %d.\n", regno); /* Can't back reference a group which we've never matched. */ if (REG_UNSET (regstart[regno]) || REG_UNSET (regend[regno])) goto fail; /* Where in input to try to start matching. */ d2 = regstart[regno]; /* Where to stop matching; if both the place to start and the place to stop matching are in the same string, then set to the place to stop, otherwise, for now have to use the end of the first string. */ dend2 = ((FIRST_STRING_P (regstart[regno]) == FIRST_STRING_P (regend[regno])) ? regend[regno] : end_match_1); for (;;) { /* If necessary, advance to next segment in register contents. */ while (d2 == dend2) { if (dend2 == end_match_2) break; if (dend2 == regend[regno]) break; /* End of string1 => advance to string2. */ d2 = string2; dend2 = regend[regno]; } /* At end of register contents => success */ if (d2 == dend2) break; /* If necessary, advance to next segment in data. */ PREFETCH (); /* How many characters left in this segment to match. */ mcnt = dend - d; /* Want how many consecutive characters we can match in one shot, so, if necessary, adjust the count. */ if (mcnt > dend2 - d2) mcnt = dend2 - d2; /* Compare that many; failure if mismatch, else move past them. */ if (translate ? PREFIX(bcmp_translate) (d, d2, mcnt, translate) : memcmp (d, d2, mcnt*sizeof(UCHAR_T))) goto fail; d += mcnt, d2 += mcnt; /* Do this because we've match some characters. */ SET_REGS_MATCHED (); } } break; /* begline matches the empty string at the beginning of the string (unless `not_bol' is set in `bufp'), and, if `newline_anchor' is set, after newlines. */ case begline: DEBUG_PRINT1 ("EXECUTING begline.\n"); if (AT_STRINGS_BEG (d)) { if (!bufp->not_bol) break; } else if (d[-1] == '\n' && bufp->newline_anchor) { break; } /* In all other cases, we fail. */ goto fail; /* endline is the dual of begline. */ case endline: DEBUG_PRINT1 ("EXECUTING endline.\n"); if (AT_STRINGS_END (d)) { if (!bufp->not_eol) break; } /* We have to ``prefetch'' the next character. */ else if ((d == end1 ? *string2 : *d) == '\n' && bufp->newline_anchor) { break; } goto fail; /* Match at the very beginning of the data. */ case begbuf: DEBUG_PRINT1 ("EXECUTING begbuf.\n"); if (AT_STRINGS_BEG (d)) break; goto fail; /* Match at the very end of the data. */ case endbuf: DEBUG_PRINT1 ("EXECUTING endbuf.\n"); if (AT_STRINGS_END (d)) break; goto fail; /* on_failure_keep_string_jump is used to optimize `.*\n'. It pushes NULL as the value for the string on the stack. Then `pop_failure_point' will keep the current value for the string, instead of restoring it. To see why, consider matching `foo\nbar' against `.*\n'. The .* matches the foo; then the . fails against the \n. But the next thing we want to do is match the \n against the \n; if we restored the string value, we would be back at the foo. Because this is used only in specific cases, we don't need to check all the things that `on_failure_jump' does, to make sure the right things get saved on the stack. Hence we don't share its code. The only reason to push anything on the stack at all is that otherwise we would have to change `anychar's code to do something besides goto fail in this case; that seems worse than this. */ case on_failure_keep_string_jump: DEBUG_PRINT1 ("EXECUTING on_failure_keep_string_jump"); EXTRACT_NUMBER_AND_INCR (mcnt, p); #ifdef _LIBC DEBUG_PRINT3 (" %d (to %p):\n", mcnt, p + mcnt); #else DEBUG_PRINT3 (" %d (to 0x%x):\n", mcnt, p + mcnt); #endif PUSH_FAILURE_POINT (p + mcnt, NULL, -2); break; /* Uses of on_failure_jump: Each alternative starts with an on_failure_jump that points to the beginning of the next alternative. Each alternative except the last ends with a jump that in effect jumps past the rest of the alternatives. (They really jump to the ending jump of the following alternative, because tensioning these jumps is a hassle.) Repeats start with an on_failure_jump that points past both the repetition text and either the following jump or pop_failure_jump back to this on_failure_jump. */ case on_failure_jump: on_failure: DEBUG_PRINT1 ("EXECUTING on_failure_jump"); EXTRACT_NUMBER_AND_INCR (mcnt, p); #ifdef _LIBC DEBUG_PRINT3 (" %d (to %p)", mcnt, p + mcnt); #else DEBUG_PRINT3 (" %d (to 0x%x)", mcnt, p + mcnt); #endif /* If this on_failure_jump comes right before a group (i.e., the original * applied to a group), save the information for that group and all inner ones, so that if we fail back to this point, the group's information will be correct. For example, in \(a*\)*\1, we need the preceding group, and in \(zz\(a*\)b*\)\2, we need the inner group. */ /* We can't use `p' to check ahead because we push a failure point to `p + mcnt' after we do this. */ p1 = p; /* We need to skip no_op's before we look for the start_memory in case this on_failure_jump is happening as the result of a completed succeed_n, as in \(a\)\{1,3\}b\1 against aba. */ while (p1 < pend && (re_opcode_t) *p1 == no_op) p1++; if (p1 < pend && (re_opcode_t) *p1 == start_memory) { /* We have a new highest active register now. This will get reset at the start_memory we are about to get to, but we will have saved all the registers relevant to this repetition op, as described above. */ highest_active_reg = *(p1 + 1) + *(p1 + 2); if (lowest_active_reg == NO_LOWEST_ACTIVE_REG) lowest_active_reg = *(p1 + 1); } DEBUG_PRINT1 (":\n"); PUSH_FAILURE_POINT (p + mcnt, d, -2); break; /* A smart repeat ends with `maybe_pop_jump'. We change it to either `pop_failure_jump' or `jump'. */ case maybe_pop_jump: EXTRACT_NUMBER_AND_INCR (mcnt, p); DEBUG_PRINT2 ("EXECUTING maybe_pop_jump %d.\n", mcnt); { register UCHAR_T *p2 = p; /* Compare the beginning of the repeat with what in the pattern follows its end. If we can establish that there is nothing that they would both match, i.e., that we would have to backtrack because of (as in, e.g., `a*a') then we can change to pop_failure_jump, because we'll never have to backtrack. This is not true in the case of alternatives: in `(a|ab)*' we do need to backtrack to the `ab' alternative (e.g., if the string was `ab'). But instead of trying to detect that here, the alternative has put on a dummy failure point which is what we will end up popping. */ /* Skip over open/close-group commands. If what follows this loop is a ...+ construct, look at what begins its body, since we will have to match at least one of that. */ while (1) { if (p2 + 2 < pend && ((re_opcode_t) *p2 == stop_memory || (re_opcode_t) *p2 == start_memory)) p2 += 3; else if (p2 + 2 + 2 * OFFSET_ADDRESS_SIZE < pend && (re_opcode_t) *p2 == dummy_failure_jump) p2 += 2 + 2 * OFFSET_ADDRESS_SIZE; else break; } p1 = p + mcnt; /* p1[0] ... p1[2] are the `on_failure_jump' corresponding to the `maybe_finalize_jump' of this case. Examine what follows. */ /* If we're at the end of the pattern, we can change. */ if (p2 == pend) { /* Consider what happens when matching ":\(.*\)" against ":/". I don't really understand this code yet. */ p[-(1+OFFSET_ADDRESS_SIZE)] = (UCHAR_T) pop_failure_jump; DEBUG_PRINT1 (" End of pattern: change to `pop_failure_jump'.\n"); } else if ((re_opcode_t) *p2 == exactn #ifdef MBS_SUPPORT || (re_opcode_t) *p2 == exactn_bin #endif || (bufp->newline_anchor && (re_opcode_t) *p2 == endline)) { register UCHAR_T c = *p2 == (UCHAR_T) endline ? '\n' : p2[2]; if (((re_opcode_t) p1[1+OFFSET_ADDRESS_SIZE] == exactn #ifdef MBS_SUPPORT || (re_opcode_t) p1[1+OFFSET_ADDRESS_SIZE] == exactn_bin #endif ) && p1[3+OFFSET_ADDRESS_SIZE] != c) { p[-(1+OFFSET_ADDRESS_SIZE)] = (UCHAR_T) pop_failure_jump; #ifdef WCHAR DEBUG_PRINT3 (" %C != %C => pop_failure_jump.\n", (wint_t) c, (wint_t) p1[3+OFFSET_ADDRESS_SIZE]); #else DEBUG_PRINT3 (" %c != %c => pop_failure_jump.\n", (char) c, (char) p1[3+OFFSET_ADDRESS_SIZE]); #endif } #ifndef WCHAR else if ((re_opcode_t) p1[3] == charset || (re_opcode_t) p1[3] == charset_not) { int not = (re_opcode_t) p1[3] == charset_not; if (c < (unsigned) (p1[4] * BYTEWIDTH) && p1[5 + c / BYTEWIDTH] & (1 << (c % BYTEWIDTH))) not = !not; /* `not' is equal to 1 if c would match, which means that we can't change to pop_failure_jump. */ if (!not) { p[-3] = (unsigned char) pop_failure_jump; DEBUG_PRINT1 (" No match => pop_failure_jump.\n"); } } #endif /* not WCHAR */ } #ifndef WCHAR else if ((re_opcode_t) *p2 == charset) { /* We win if the first character of the loop is not part of the charset. */ if ((re_opcode_t) p1[3] == exactn && ! ((int) p2[1] * BYTEWIDTH > (int) p1[5] && (p2[2 + p1[5] / BYTEWIDTH] & (1 << (p1[5] % BYTEWIDTH))))) { p[-3] = (unsigned char) pop_failure_jump; DEBUG_PRINT1 (" No match => pop_failure_jump.\n"); } else if ((re_opcode_t) p1[3] == charset_not) { int idx; /* We win if the charset_not inside the loop lists every character listed in the charset after. */ for (idx = 0; idx < (int) p2[1]; idx++) if (! (p2[2 + idx] == 0 || (idx < (int) p1[4] && ((p2[2 + idx] & ~ p1[5 + idx]) == 0)))) break; if (idx == p2[1]) { p[-3] = (unsigned char) pop_failure_jump; DEBUG_PRINT1 (" No match => pop_failure_jump.\n"); } } else if ((re_opcode_t) p1[3] == charset) { int idx; /* We win if the charset inside the loop has no overlap with the one after the loop. */ for (idx = 0; idx < (int) p2[1] && idx < (int) p1[4]; idx++) if ((p2[2 + idx] & p1[5 + idx]) != 0) break; if (idx == p2[1] || idx == p1[4]) { p[-3] = (unsigned char) pop_failure_jump; DEBUG_PRINT1 (" No match => pop_failure_jump.\n"); } } } #endif /* not WCHAR */ } p -= OFFSET_ADDRESS_SIZE; /* Point at relative address again. */ if ((re_opcode_t) p[-1] != pop_failure_jump) { p[-1] = (UCHAR_T) jump; DEBUG_PRINT1 (" Match => jump.\n"); goto unconditional_jump; } /* Note fall through. */ /* The end of a simple repeat has a pop_failure_jump back to its matching on_failure_jump, where the latter will push a failure point. The pop_failure_jump takes off failure points put on by this pop_failure_jump's matching on_failure_jump; we got through the pattern to here from the matching on_failure_jump, so didn't fail. */ case pop_failure_jump: { /* We need to pass separate storage for the lowest and highest registers, even though we don't care about the actual values. Otherwise, we will restore only one register from the stack, since lowest will == highest in `pop_failure_point'. */ active_reg_t dummy_low_reg, dummy_high_reg; UCHAR_T *pdummy = NULL; const CHAR_T *sdummy = NULL; DEBUG_PRINT1 ("EXECUTING pop_failure_jump.\n"); POP_FAILURE_POINT (sdummy, pdummy, dummy_low_reg, dummy_high_reg, reg_dummy, reg_dummy, reg_info_dummy); } /* Note fall through. */ unconditional_jump: #ifdef _LIBC DEBUG_PRINT2 ("\n%p: ", p); #else DEBUG_PRINT2 ("\n0x%x: ", p); #endif /* Note fall through. */ /* Unconditionally jump (without popping any failure points). */ case jump: EXTRACT_NUMBER_AND_INCR (mcnt, p); /* Get the amount to jump. */ DEBUG_PRINT2 ("EXECUTING jump %d ", mcnt); p += mcnt; /* Do the jump. */ #ifdef _LIBC DEBUG_PRINT2 ("(to %p).\n", p); #else DEBUG_PRINT2 ("(to 0x%x).\n", p); #endif break; /* We need this opcode so we can detect where alternatives end in `group_match_null_string_p' et al. */ case jump_past_alt: DEBUG_PRINT1 ("EXECUTING jump_past_alt.\n"); goto unconditional_jump; /* Normally, the on_failure_jump pushes a failure point, which then gets popped at pop_failure_jump. We will end up at pop_failure_jump, also, and with a pattern of, say, `a+', we are skipping over the on_failure_jump, so we have to push something meaningless for pop_failure_jump to pop. */ case dummy_failure_jump: DEBUG_PRINT1 ("EXECUTING dummy_failure_jump.\n"); /* It doesn't matter what we push for the string here. What the code at `fail' tests is the value for the pattern. */ PUSH_FAILURE_POINT (NULL, NULL, -2); goto unconditional_jump; /* At the end of an alternative, we need to push a dummy failure point in case we are followed by a `pop_failure_jump', because we don't want the failure point for the alternative to be popped. For example, matching `(a|ab)*' against `aab' requires that we match the `ab' alternative. */ case push_dummy_failure: DEBUG_PRINT1 ("EXECUTING push_dummy_failure.\n"); /* See comments just above at `dummy_failure_jump' about the two zeroes. */ PUSH_FAILURE_POINT (NULL, NULL, -2); break; /* Have to succeed matching what follows at least n times. After that, handle like `on_failure_jump'. */ case succeed_n: EXTRACT_NUMBER (mcnt, p + OFFSET_ADDRESS_SIZE); DEBUG_PRINT2 ("EXECUTING succeed_n %d.\n", mcnt); assert (mcnt >= 0); /* Originally, this is how many times we HAVE to succeed. */ if (mcnt > 0) { mcnt--; p += OFFSET_ADDRESS_SIZE; STORE_NUMBER_AND_INCR (p, mcnt); #ifdef _LIBC DEBUG_PRINT3 (" Setting %p to %d.\n", p - OFFSET_ADDRESS_SIZE , mcnt); #else DEBUG_PRINT3 (" Setting 0x%x to %d.\n", p - OFFSET_ADDRESS_SIZE , mcnt); #endif } else if (mcnt == 0) { #ifdef _LIBC DEBUG_PRINT2 (" Setting two bytes from %p to no_op.\n", p + OFFSET_ADDRESS_SIZE); #else DEBUG_PRINT2 (" Setting two bytes from 0x%x to no_op.\n", p + OFFSET_ADDRESS_SIZE); #endif /* _LIBC */ #ifdef WCHAR p[1] = (UCHAR_T) no_op; #else p[2] = (UCHAR_T) no_op; p[3] = (UCHAR_T) no_op; #endif /* WCHAR */ goto on_failure; } break; case jump_n: EXTRACT_NUMBER (mcnt, p + OFFSET_ADDRESS_SIZE); DEBUG_PRINT2 ("EXECUTING jump_n %d.\n", mcnt); /* Originally, this is how many times we CAN jump. */ if (mcnt) { mcnt--; STORE_NUMBER (p + OFFSET_ADDRESS_SIZE, mcnt); #ifdef _LIBC DEBUG_PRINT3 (" Setting %p to %d.\n", p + OFFSET_ADDRESS_SIZE, mcnt); #else DEBUG_PRINT3 (" Setting 0x%x to %d.\n", p + OFFSET_ADDRESS_SIZE, mcnt); #endif /* _LIBC */ goto unconditional_jump; } /* If don't have to jump any more, skip over the rest of command. */ else p += 2 * OFFSET_ADDRESS_SIZE; break; case set_number_at: { DEBUG_PRINT1 ("EXECUTING set_number_at.\n"); EXTRACT_NUMBER_AND_INCR (mcnt, p); p1 = p + mcnt; EXTRACT_NUMBER_AND_INCR (mcnt, p); #ifdef _LIBC DEBUG_PRINT3 (" Setting %p to %d.\n", p1, mcnt); #else DEBUG_PRINT3 (" Setting 0x%x to %d.\n", p1, mcnt); #endif STORE_NUMBER (p1, mcnt); break; } #if 0 /* The DEC Alpha C compiler 3.x generates incorrect code for the test WORDCHAR_P (d - 1) != WORDCHAR_P (d) in the expansion of AT_WORD_BOUNDARY, so this code is disabled. Expanding the macro and introducing temporary variables works around the bug. */ case wordbound: DEBUG_PRINT1 ("EXECUTING wordbound.\n"); if (AT_WORD_BOUNDARY (d)) break; goto fail; case notwordbound: DEBUG_PRINT1 ("EXECUTING notwordbound.\n"); if (AT_WORD_BOUNDARY (d)) goto fail; break; #else case wordbound: { boolean prevchar, thischar; DEBUG_PRINT1 ("EXECUTING wordbound.\n"); if (AT_STRINGS_BEG (d) || AT_STRINGS_END (d)) break; prevchar = WORDCHAR_P (d - 1); thischar = WORDCHAR_P (d); if (prevchar != thischar) break; goto fail; } case notwordbound: { boolean prevchar, thischar; DEBUG_PRINT1 ("EXECUTING notwordbound.\n"); if (AT_STRINGS_BEG (d) || AT_STRINGS_END (d)) goto fail; prevchar = WORDCHAR_P (d - 1); thischar = WORDCHAR_P (d); if (prevchar != thischar) goto fail; break; } #endif case wordbeg: DEBUG_PRINT1 ("EXECUTING wordbeg.\n"); if (!AT_STRINGS_END (d) && WORDCHAR_P (d) && (AT_STRINGS_BEG (d) || !WORDCHAR_P (d - 1))) break; goto fail; case wordend: DEBUG_PRINT1 ("EXECUTING wordend.\n"); if (!AT_STRINGS_BEG (d) && WORDCHAR_P (d - 1) && (AT_STRINGS_END (d) || !WORDCHAR_P (d))) break; goto fail; #ifdef emacs case before_dot: DEBUG_PRINT1 ("EXECUTING before_dot.\n"); if (PTR_CHAR_POS ((unsigned char *) d) >= point) goto fail; break; case at_dot: DEBUG_PRINT1 ("EXECUTING at_dot.\n"); if (PTR_CHAR_POS ((unsigned char *) d) != point) goto fail; break; case after_dot: DEBUG_PRINT1 ("EXECUTING after_dot.\n"); if (PTR_CHAR_POS ((unsigned char *) d) <= point) goto fail; break; case syntaxspec: DEBUG_PRINT2 ("EXECUTING syntaxspec %d.\n", mcnt); mcnt = *p++; goto matchsyntax; case wordchar: DEBUG_PRINT1 ("EXECUTING Emacs wordchar.\n"); mcnt = (int) Sword; matchsyntax: PREFETCH (); /* Can't use *d++ here; SYNTAX may be an unsafe macro. */ d++; if (SYNTAX (d[-1]) != (enum syntaxcode) mcnt) goto fail; SET_REGS_MATCHED (); break; case notsyntaxspec: DEBUG_PRINT2 ("EXECUTING notsyntaxspec %d.\n", mcnt); mcnt = *p++; goto matchnotsyntax; case notwordchar: DEBUG_PRINT1 ("EXECUTING Emacs notwordchar.\n"); mcnt = (int) Sword; matchnotsyntax: PREFETCH (); /* Can't use *d++ here; SYNTAX may be an unsafe macro. */ d++; if (SYNTAX (d[-1]) == (enum syntaxcode) mcnt) goto fail; SET_REGS_MATCHED (); break; #else /* not emacs */ case wordchar: DEBUG_PRINT1 ("EXECUTING non-Emacs wordchar.\n"); PREFETCH (); if (!WORDCHAR_P (d)) goto fail; SET_REGS_MATCHED (); d++; break; case notwordchar: DEBUG_PRINT1 ("EXECUTING non-Emacs notwordchar.\n"); PREFETCH (); if (WORDCHAR_P (d)) goto fail; SET_REGS_MATCHED (); d++; break; #endif /* not emacs */ default: abort (); } continue; /* Successfully executed one pattern command; keep going. */ /* We goto here if a matching operation fails. */ fail: if (!FAIL_STACK_EMPTY ()) { /* A restart point is known. Restore to that state. */ DEBUG_PRINT1 ("\nFAIL:\n"); POP_FAILURE_POINT (d, p, lowest_active_reg, highest_active_reg, regstart, regend, reg_info); /* If this failure point is a dummy, try the next one. */ if (!p) goto fail; /* If we failed to the end of the pattern, don't examine *p. */ assert (p <= pend); if (p < pend) { boolean is_a_jump_n = false; /* If failed to a backwards jump that's part of a repetition loop, need to pop this failure point and use the next one. */ switch ((re_opcode_t) *p) { case jump_n: is_a_jump_n = true; case maybe_pop_jump: case pop_failure_jump: case jump: p1 = p + 1; EXTRACT_NUMBER_AND_INCR (mcnt, p1); p1 += mcnt; if ((is_a_jump_n && (re_opcode_t) *p1 == succeed_n) || (!is_a_jump_n && (re_opcode_t) *p1 == on_failure_jump)) goto fail; break; default: /* do nothing */ ; } } if (d >= string1 && d <= end1) dend = end_match_1; } else break; /* Matching at this starting point really fails. */ } /* for (;;) */ if (best_regs_set) goto restore_best_regs; FREE_VARIABLES (); return -1; /* Failure to match. */ } /* re_match_2 */ /* Subroutine definitions for re_match_2. */ /* We are passed P pointing to a register number after a start_memory. Return true if the pattern up to the corresponding stop_memory can match the empty string, and false otherwise. If we find the matching stop_memory, sets P to point to one past its number. Otherwise, sets P to an undefined byte less than or equal to END. We don't handle duplicates properly (yet). */ static boolean PREFIX(group_match_null_string_p) (p, end, reg_info) UCHAR_T **p, *end; PREFIX(register_info_type) *reg_info; { int mcnt; /* Point to after the args to the start_memory. */ UCHAR_T *p1 = *p + 2; while (p1 < end) { /* Skip over opcodes that can match nothing, and return true or false, as appropriate, when we get to one that can't, or to the matching stop_memory. */ switch ((re_opcode_t) *p1) { /* Could be either a loop or a series of alternatives. */ case on_failure_jump: p1++; EXTRACT_NUMBER_AND_INCR (mcnt, p1); /* If the next operation is not a jump backwards in the pattern. */ if (mcnt >= 0) { /* Go through the on_failure_jumps of the alternatives, seeing if any of the alternatives cannot match nothing. The last alternative starts with only a jump, whereas the rest start with on_failure_jump and end with a jump, e.g., here is the pattern for `a|b|c': /on_failure_jump/0/6/exactn/1/a/jump_past_alt/0/6 /on_failure_jump/0/6/exactn/1/b/jump_past_alt/0/3 /exactn/1/c So, we have to first go through the first (n-1) alternatives and then deal with the last one separately. */ /* Deal with the first (n-1) alternatives, which start with an on_failure_jump (see above) that jumps to right past a jump_past_alt. */ while ((re_opcode_t) p1[mcnt-(1+OFFSET_ADDRESS_SIZE)] == jump_past_alt) { /* `mcnt' holds how many bytes long the alternative is, including the ending `jump_past_alt' and its number. */ if (!PREFIX(alt_match_null_string_p) (p1, p1 + mcnt - (1 + OFFSET_ADDRESS_SIZE), reg_info)) return false; /* Move to right after this alternative, including the jump_past_alt. */ p1 += mcnt; /* Break if it's the beginning of an n-th alternative that doesn't begin with an on_failure_jump. */ if ((re_opcode_t) *p1 != on_failure_jump) break; /* Still have to check that it's not an n-th alternative that starts with an on_failure_jump. */ p1++; EXTRACT_NUMBER_AND_INCR (mcnt, p1); if ((re_opcode_t) p1[mcnt-(1+OFFSET_ADDRESS_SIZE)] != jump_past_alt) { /* Get to the beginning of the n-th alternative. */ p1 -= 1 + OFFSET_ADDRESS_SIZE; break; } } /* Deal with the last alternative: go back and get number of the `jump_past_alt' just before it. `mcnt' contains the length of the alternative. */ EXTRACT_NUMBER (mcnt, p1 - OFFSET_ADDRESS_SIZE); if (!PREFIX(alt_match_null_string_p) (p1, p1 + mcnt, reg_info)) return false; p1 += mcnt; /* Get past the n-th alternative. */ } /* if mcnt > 0 */ break; case stop_memory: assert (p1[1] == **p); *p = p1 + 2; return true; default: if (!PREFIX(common_op_match_null_string_p) (&p1, end, reg_info)) return false; } } /* while p1 < end */ return false; } /* group_match_null_string_p */ /* Similar to group_match_null_string_p, but doesn't deal with alternatives: It expects P to be the first byte of a single alternative and END one byte past the last. The alternative can contain groups. */ static boolean PREFIX(alt_match_null_string_p) (p, end, reg_info) UCHAR_T *p, *end; PREFIX(register_info_type) *reg_info; { int mcnt; UCHAR_T *p1 = p; while (p1 < end) { /* Skip over opcodes that can match nothing, and break when we get to one that can't. */ switch ((re_opcode_t) *p1) { /* It's a loop. */ case on_failure_jump: p1++; EXTRACT_NUMBER_AND_INCR (mcnt, p1); p1 += mcnt; break; default: if (!PREFIX(common_op_match_null_string_p) (&p1, end, reg_info)) return false; } } /* while p1 < end */ return true; } /* alt_match_null_string_p */ /* Deals with the ops common to group_match_null_string_p and alt_match_null_string_p. Sets P to one after the op and its arguments, if any. */ static boolean PREFIX(common_op_match_null_string_p) (p, end, reg_info) UCHAR_T **p, *end; PREFIX(register_info_type) *reg_info; { int mcnt; boolean ret; int reg_no; UCHAR_T *p1 = *p; switch ((re_opcode_t) *p1++) { case no_op: case begline: case endline: case begbuf: case endbuf: case wordbeg: case wordend: case wordbound: case notwordbound: #ifdef emacs case before_dot: case at_dot: case after_dot: #endif break; case start_memory: reg_no = *p1; assert (reg_no > 0 && reg_no <= MAX_REGNUM); ret = PREFIX(group_match_null_string_p) (&p1, end, reg_info); /* Have to set this here in case we're checking a group which contains a group and a back reference to it. */ if (REG_MATCH_NULL_STRING_P (reg_info[reg_no]) == MATCH_NULL_UNSET_VALUE) REG_MATCH_NULL_STRING_P (reg_info[reg_no]) = ret; if (!ret) return false; break; /* If this is an optimized succeed_n for zero times, make the jump. */ case jump: EXTRACT_NUMBER_AND_INCR (mcnt, p1); if (mcnt >= 0) p1 += mcnt; else return false; break; case succeed_n: /* Get to the number of times to succeed. */ p1 += OFFSET_ADDRESS_SIZE; EXTRACT_NUMBER_AND_INCR (mcnt, p1); if (mcnt == 0) { p1 -= 2 * OFFSET_ADDRESS_SIZE; EXTRACT_NUMBER_AND_INCR (mcnt, p1); p1 += mcnt; } else return false; break; case duplicate: if (!REG_MATCH_NULL_STRING_P (reg_info[*p1])) return false; break; case set_number_at: p1 += 2 * OFFSET_ADDRESS_SIZE; default: /* All other opcodes mean we cannot match the empty string. */ return false; } *p = p1; return true; } /* common_op_match_null_string_p */ /* Return zero if TRANSLATE[S1] and TRANSLATE[S2] are identical for LEN bytes; nonzero otherwise. */ static int PREFIX(bcmp_translate) (s1, s2, len, translate) const CHAR_T *s1, *s2; register int len; RE_TRANSLATE_TYPE translate; { register const UCHAR_T *p1 = (const UCHAR_T *) s1; register const UCHAR_T *p2 = (const UCHAR_T *) s2; while (len) { #ifdef WCHAR if (((*p1<=0xff)?translate[*p1++]:*p1++) != ((*p2<=0xff)?translate[*p2++]:*p2++)) return 1; #else /* BYTE */ if (translate[*p1++] != translate[*p2++]) return 1; #endif /* WCHAR */ len--; } return 0; } #else /* not INSIDE_RECURSION */ /* Entry points for GNU code. */ /* re_compile_pattern is the GNU regular expression compiler: it compiles PATTERN (of length SIZE) and puts the result in BUFP. Returns 0 if the pattern was valid, otherwise an error string. Assumes the `allocated' (and perhaps `buffer') and `translate' fields are set in BUFP on entry. We call regex_compile to do the actual compilation. */ const char * re_compile_pattern (pattern, length, bufp) const char *pattern; size_t length; struct re_pattern_buffer *bufp; { reg_errcode_t ret; /* GNU code is written to assume at least RE_NREGS registers will be set (and at least one extra will be -1). */ bufp->regs_allocated = REGS_UNALLOCATED; /* And GNU code determines whether or not to get register information by passing null for the REGS argument to re_match, etc., not by setting no_sub. */ bufp->no_sub = 0; /* Match anchors at newline. */ bufp->newline_anchor = 1; # ifdef MBS_SUPPORT if (MB_CUR_MAX != 1) ret = wcs_regex_compile (pattern, length, re_syntax_options, bufp); else # endif ret = byte_regex_compile (pattern, length, re_syntax_options, bufp); if (!ret) return NULL; return gettext (re_error_msgid[(int) ret]); } #ifdef _LIBC weak_alias (__re_compile_pattern, re_compile_pattern) #endif /* Entry points compatible with 4.2 BSD regex library. We don't define them unless specifically requested. */ #if defined _REGEX_RE_COMP || defined _LIBC /* BSD has one and only one pattern buffer. */ static struct re_pattern_buffer re_comp_buf; char * #ifdef _LIBC /* Make these definitions weak in libc, so POSIX programs can redefine these names if they don't use our functions, and still use regcomp/regexec below without link errors. */ weak_function #endif re_comp (s) const char *s; { reg_errcode_t ret; if (!s) { if (!re_comp_buf.buffer) return gettext ("No previous regular expression"); return 0; } if (!re_comp_buf.buffer) { re_comp_buf.buffer = (unsigned char *) malloc (200); if (re_comp_buf.buffer == NULL) return (char *) gettext (re_error_msgid[(int) REG_ESPACE]); re_comp_buf.allocated = 200; re_comp_buf.fastmap = (char *) malloc (1 << BYTEWIDTH); if (re_comp_buf.fastmap == NULL) return (char *) gettext (re_error_msgid[(int) REG_ESPACE]); } /* Since `re_exec' always passes NULL for the `regs' argument, we don't need to initialize the pattern buffer fields which affect it. */ /* Match anchors at newlines. */ re_comp_buf.newline_anchor = 1; # ifdef MBS_SUPPORT if (MB_CUR_MAX != 1) ret = wcs_regex_compile (s, strlen (s), re_syntax_options, &re_comp_buf); else # endif ret = byte_regex_compile (s, strlen (s), re_syntax_options, &re_comp_buf); if (!ret) return NULL; /* Yes, we're discarding `const' here if !HAVE_LIBINTL. */ return (char *) gettext (re_error_msgid[(int) ret]); } int #ifdef _LIBC weak_function #endif re_exec (s) const char *s; { const int len = strlen (s); return 0 <= re_search (&re_comp_buf, s, len, 0, len, (struct re_registers *) 0); } #endif /* _REGEX_RE_COMP */ /* POSIX.2 functions. Don't define these for Emacs. */ #ifndef emacs /* regcomp takes a regular expression as a string and compiles it. PREG is a regex_t *. We do not expect any fields to be initialized, since POSIX says we shouldn't. Thus, we set `buffer' to the compiled pattern; `used' to the length of the compiled pattern; `syntax' to RE_SYNTAX_POSIX_EXTENDED if the REG_EXTENDED bit in CFLAGS is set; otherwise, to RE_SYNTAX_POSIX_BASIC; `newline_anchor' to REG_NEWLINE being set in CFLAGS; `fastmap' to an allocated space for the fastmap; `fastmap_accurate' to zero; `re_nsub' to the number of subexpressions in PATTERN. PATTERN is the address of the pattern string. CFLAGS is a series of bits which affect compilation. If REG_EXTENDED is set, we use POSIX extended syntax; otherwise, we use POSIX basic syntax. If REG_NEWLINE is set, then . and [^...] don't match newline. Also, regexec will try a match beginning after every newline. If REG_ICASE is set, then we considers upper- and lowercase versions of letters to be equivalent when matching. If REG_NOSUB is set, then when PREG is passed to regexec, that routine will report only success or failure, and nothing about the registers. It returns 0 if it succeeds, nonzero if it doesn't. (See regex.h for the return codes and their meanings.) */ int regcomp (preg, pattern, cflags) regex_t *preg; const char *pattern; int cflags; { reg_errcode_t ret; reg_syntax_t syntax = (cflags & REG_EXTENDED) ? RE_SYNTAX_POSIX_EXTENDED : RE_SYNTAX_POSIX_BASIC; /* regex_compile will allocate the space for the compiled pattern. */ preg->buffer = 0; preg->allocated = 0; preg->used = 0; /* Try to allocate space for the fastmap. */ preg->fastmap = (char *) malloc (1 << BYTEWIDTH); if (cflags & REG_ICASE) { unsigned i; preg->translate = (RE_TRANSLATE_TYPE) malloc (CHAR_SET_SIZE * sizeof (*(RE_TRANSLATE_TYPE)0)); if (preg->translate == NULL) return (int) REG_ESPACE; /* Map uppercase characters to corresponding lowercase ones. */ for (i = 0; i < CHAR_SET_SIZE; i++) preg->translate[i] = ISUPPER (i) ? TOLOWER (i) : (int) i; } else preg->translate = NULL; /* If REG_NEWLINE is set, newlines are treated differently. */ if (cflags & REG_NEWLINE) { /* REG_NEWLINE implies neither . nor [^...] match newline. */ syntax &= ~RE_DOT_NEWLINE; syntax |= RE_HAT_LISTS_NOT_NEWLINE; /* It also changes the matching behavior. */ preg->newline_anchor = 1; } else preg->newline_anchor = 0; preg->no_sub = !!(cflags & REG_NOSUB); /* POSIX says a null character in the pattern terminates it, so we can use strlen here in compiling the pattern. */ # ifdef MBS_SUPPORT if (MB_CUR_MAX != 1) ret = wcs_regex_compile (pattern, strlen (pattern), syntax, preg); else # endif ret = byte_regex_compile (pattern, strlen (pattern), syntax, preg); /* POSIX doesn't distinguish between an unmatched open-group and an unmatched close-group: both are REG_EPAREN. */ if (ret == REG_ERPAREN) ret = REG_EPAREN; if (ret == REG_NOERROR && preg->fastmap) { /* Compute the fastmap now, since regexec cannot modify the pattern buffer. */ if (re_compile_fastmap (preg) == -2) { /* Some error occurred while computing the fastmap, just forget about it. */ free (preg->fastmap); preg->fastmap = NULL; } } return (int) ret; } #ifdef _LIBC weak_alias (__regcomp, regcomp) #endif /* regexec searches for a given pattern, specified by PREG, in the string STRING. If NMATCH is zero or REG_NOSUB was set in the cflags argument to `regcomp', we ignore PMATCH. Otherwise, we assume PMATCH has at least NMATCH elements, and we set them to the offsets of the corresponding matched substrings. EFLAGS specifies `execution flags' which affect matching: if REG_NOTBOL is set, then ^ does not match at the beginning of the string; if REG_NOTEOL is set, then $ does not match at the end. We return 0 if we find a match and REG_NOMATCH if not. */ int regexec (preg, string, nmatch, pmatch, eflags) const regex_t *preg; const char *string; size_t nmatch; regmatch_t pmatch[]; int eflags; { int ret; struct re_registers regs; regex_t private_preg; int len = strlen (string); boolean want_reg_info = !preg->no_sub && nmatch > 0; private_preg = *preg; private_preg.not_bol = !!(eflags & REG_NOTBOL); private_preg.not_eol = !!(eflags & REG_NOTEOL); /* The user has told us exactly how many registers to return information about, via `nmatch'. We have to pass that on to the matching routines. */ private_preg.regs_allocated = REGS_FIXED; if (want_reg_info) { regs.num_regs = nmatch; regs.start = TALLOC (nmatch * 2, regoff_t); if (regs.start == NULL) return (int) REG_NOMATCH; regs.end = regs.start + nmatch; } /* Perform the searching operation. */ ret = re_search (&private_preg, string, len, /* start: */ 0, /* range: */ len, want_reg_info ? ®s : (struct re_registers *) 0); /* Copy the register information to the POSIX structure. */ if (want_reg_info) { if (ret >= 0) { unsigned r; for (r = 0; r < nmatch; r++) { pmatch[r].rm_so = regs.start[r]; pmatch[r].rm_eo = regs.end[r]; } } /* If we needed the temporary register info, free the space now. */ free (regs.start); } /* We want zero return to mean success, unlike `re_search'. */ return ret >= 0 ? (int) REG_NOERROR : (int) REG_NOMATCH; } #ifdef _LIBC weak_alias (__regexec, regexec) #endif /* Returns a message corresponding to an error code, ERRCODE, returned from either regcomp or regexec. We don't use PREG here. */ size_t regerror (errcode, preg, errbuf, errbuf_size) int errcode; const regex_t *preg ATTRIBUTE_UNUSED; char *errbuf; size_t errbuf_size; { const char *msg; size_t msg_size; if (errcode < 0 || errcode >= (int) (sizeof (re_error_msgid) / sizeof (re_error_msgid[0]))) /* Only error codes returned by the rest of the code should be passed to this routine. If we are given anything else, or if other regex code generates an invalid error code, then the program has a bug. Dump core so we can fix it. */ abort (); msg = gettext (re_error_msgid[errcode]); msg_size = strlen (msg) + 1; /* Includes the null. */ if (errbuf_size != 0) { if (msg_size > errbuf_size) { #if defined HAVE_MEMPCPY || defined _LIBC *((char *) mempcpy (errbuf, msg, errbuf_size - 1)) = '\0'; #else memcpy (errbuf, msg, errbuf_size - 1); errbuf[errbuf_size - 1] = 0; #endif } else memcpy (errbuf, msg, msg_size); } return msg_size; } #ifdef _LIBC weak_alias (__regerror, regerror) #endif /* Free dynamically allocated space used by PREG. */ void regfree (preg) regex_t *preg; { if (preg->buffer != NULL) free (preg->buffer); preg->buffer = NULL; preg->allocated = 0; preg->used = 0; if (preg->fastmap != NULL) free (preg->fastmap); preg->fastmap = NULL; preg->fastmap_accurate = 0; if (preg->translate != NULL) free (preg->translate); preg->translate = NULL; } #ifdef _LIBC weak_alias (__regfree, regfree) #endif #endif /* not emacs */ #endif /* not INSIDE_RECURSION */ #undef STORE_NUMBER #undef STORE_NUMBER_AND_INCR #undef EXTRACT_NUMBER #undef EXTRACT_NUMBER_AND_INCR #undef DEBUG_PRINT_COMPILED_PATTERN #undef DEBUG_PRINT_DOUBLE_STRING #undef INIT_FAIL_STACK #undef RESET_FAIL_STACK #undef DOUBLE_FAIL_STACK #undef PUSH_PATTERN_OP #undef PUSH_FAILURE_POINTER #undef PUSH_FAILURE_INT #undef PUSH_FAILURE_ELT #undef POP_FAILURE_POINTER #undef POP_FAILURE_INT #undef POP_FAILURE_ELT #undef DEBUG_PUSH #undef DEBUG_POP #undef PUSH_FAILURE_POINT #undef POP_FAILURE_POINT #undef REG_UNSET_VALUE #undef REG_UNSET #undef PATFETCH #undef PATFETCH_RAW #undef PATUNFETCH #undef TRANSLATE #undef INIT_BUF_SIZE #undef GET_BUFFER_SPACE #undef BUF_PUSH #undef BUF_PUSH_2 #undef BUF_PUSH_3 #undef STORE_JUMP #undef STORE_JUMP2 #undef INSERT_JUMP #undef INSERT_JUMP2 #undef EXTEND_BUFFER #undef GET_UNSIGNED_NUMBER #undef FREE_STACK_RETURN # undef POINTER_TO_OFFSET # undef MATCHING_IN_FRST_STRING # undef PREFETCH # undef AT_STRINGS_BEG # undef AT_STRINGS_END # undef WORDCHAR_P # undef FREE_VAR # undef FREE_VARIABLES # undef NO_HIGHEST_ACTIVE_REG # undef NO_LOWEST_ACTIVE_REG # undef CHAR_T # undef UCHAR_T # undef COMPILED_BUFFER_VAR # undef OFFSET_ADDRESS_SIZE # undef CHAR_CLASS_SIZE # undef PREFIX # undef ARG_PREFIX # undef PUT_CHAR # undef BYTE # undef WCHAR #undef ISALPHA #undef ISALNUM #undef ISBLANK #undef ISCNTRL #undef ISDIGIT #undef ISGRAPH #undef ISLOWER #undef ISPRINT #undef ISPUNCT #undef ISSPACE #undef ISXDIGIT # define DEFINED_ONCE /* Demangler for GNU C++ Copyright 1989, 1991, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Written by James Clark (jjc@jclark.uucp) Rewritten by Fred Fish (fnf@cygnus.com) for ARM and Lucid demangling Modified by Satish Pai (pai@apollo.hp.com) for HP demangling This file is part of the libiberty library. Libiberty is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. In addition to the permissions in the GNU Library General Public License, the Free Software Foundation gives you unlimited permission to link the compiled version of this file into combinations with other programs, and to distribute those combinations without any restriction coming from the use of this file. (The Library Public License restrictions do apply in other respects; for example, they cover modification of the file, and distribution when not linked into a combined executable.) Libiberty is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with libiberty; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file exports two functions; cplus_mangle_opname and cplus_demangle. This file imports xmalloc and xrealloc, which are like malloc and realloc except that they generate a fatal error if there is no available memory. */ /* This file lives in both GCC and libiberty. When making changes, please try not to break either. */ #ifdef HAVE_CONFIG_H #endif /* replacement macros. Copyright (C) 2000, 2001 Free Software Foundation, Inc. Contributed by Zack Weinberg . This file is part of the libiberty library. Libiberty is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Libiberty is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with libiberty; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This is a compatible replacement of the standard C library's with the following properties: - Implements all isxxx() macros required by C99. - Also implements some character classes useful when parsing C-like languages. - Does not change behavior depending on the current locale. - Behaves properly for all values in the range of a signed or unsigned char. To avoid conflicts, this header defines the isxxx functions in upper case, e.g. ISALPHA not isalpha. */ #ifndef SAFE_CTYPE_H #define SAFE_CTYPE_H #ifdef isalpha /*#error "safe-ctype.h and ctype.h may not be used simultaneously" */ /* Oh yeah? Watch me! */ #endif #undef ISUPPER #undef TOLOWER /* Determine host character set. */ #define HOST_CHARSET_UNKNOWN 0 #define HOST_CHARSET_ASCII 1 #define HOST_CHARSET_EBCDIC 2 #if '\n' == 0x0A && ' ' == 0x20 && '0' == 0x30 \ && 'A' == 0x41 && 'a' == 0x61 && '!' == 0x21 # define HOST_CHARSET HOST_CHARSET_ASCII #else # if '\n' == 0x15 && ' ' == 0x40 && '0' == 0xF0 \ && 'A' == 0xC1 && 'a' == 0x81 && '!' == 0x5A # define HOST_CHARSET HOST_CHARSET_EBCDIC # else # define HOST_CHARSET HOST_CHARSET_UNKNOWN # endif #endif /* Categories. */ enum { /* In C99 */ _sch_isblank = 0x0001, /* space \t */ _sch_iscntrl = 0x0002, /* nonprinting characters */ _sch_isdigit = 0x0004, /* 0-9 */ _sch_islower = 0x0008, /* a-z */ _sch_isprint = 0x0010, /* any printing character including ' ' */ _sch_ispunct = 0x0020, /* all punctuation */ _sch_isspace = 0x0040, /* space \t \n \r \f \v */ _sch_isupper = 0x0080, /* A-Z */ _sch_isxdigit = 0x0100, /* 0-9A-Fa-f */ /* Extra categories useful to cpplib. */ _sch_isidst = 0x0200, /* A-Za-z_ */ _sch_isvsp = 0x0400, /* \n \r */ _sch_isnvsp = 0x0800, /* space \t \f \v \0 */ /* Combinations of the above. */ _sch_isalpha = _sch_isupper|_sch_islower, /* A-Za-z */ _sch_isalnum = _sch_isalpha|_sch_isdigit, /* A-Za-z0-9 */ _sch_isidnum = _sch_isidst|_sch_isdigit, /* A-Za-z0-9_ */ _sch_isgraph = _sch_isalnum|_sch_ispunct, /* isprint and not space */ _sch_iscppsp = _sch_isvsp|_sch_isnvsp, /* isspace + \0 */ _sch_isbasic = _sch_isprint|_sch_iscppsp /* basic charset of ISO C (plus ` and @) */ }; /* Character classification. */ extern const unsigned short _sch_istable[256]; #define _sch_test(c, bit) (_sch_istable[(c) & 0xff] & (unsigned short)(bit)) #define ISALPHA(c) _sch_test(c, _sch_isalpha) #define ISALNUM(c) _sch_test(c, _sch_isalnum) #define ISBLANK(c) _sch_test(c, _sch_isblank) #define ISCNTRL(c) _sch_test(c, _sch_iscntrl) #define ISDIGIT(c) _sch_test(c, _sch_isdigit) #define ISGRAPH(c) _sch_test(c, _sch_isgraph) #define ISLOWER(c) _sch_test(c, _sch_islower) #define ISPRINT(c) _sch_test(c, _sch_isprint) #define ISPUNCT(c) _sch_test(c, _sch_ispunct) #define ISSPACE(c) _sch_test(c, _sch_isspace) #define ISUPPER(c) _sch_test(c, _sch_isupper) #define ISXDIGIT(c) _sch_test(c, _sch_isxdigit) #define ISIDNUM(c) _sch_test(c, _sch_isidnum) #define ISIDST(c) _sch_test(c, _sch_isidst) #define IS_ISOBASIC(c) _sch_test(c, _sch_isbasic) #define IS_VSPACE(c) _sch_test(c, _sch_isvsp) #define IS_NVSPACE(c) _sch_test(c, _sch_isnvsp) #define IS_SPACE_OR_NUL(c) _sch_test(c, _sch_iscppsp) /* Character transformation. */ extern const unsigned char _sch_toupper[256]; extern const unsigned char _sch_tolower[256]; #define TOUPPER(c) _sch_toupper[(c) & 0xff] #define TOLOWER(c) _sch_tolower[(c) & 0xff] #endif /* SAFE_CTYPE_H */ #include #include #include #ifdef HAVE_STDLIB_H #include #else char * malloc (); char * realloc (); #endif /* Defs for interface to demanglers. Copyright 1992, 1993, 1994, 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #if !defined (DEMANGLE_H) #define DEMANGLE_H /* Function declarations for libiberty. Copyright 2001, 2002 Free Software Foundation, Inc. Note - certain prototypes declared in this header file are for functions whoes implementation copyright does not belong to the FSF. Those prototypes are present in this file for reference purposes only and their presence in this file should not construed as an indication of ownership by the FSF of the implementation of those functions in any way or form whatsoever. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. Written by Cygnus Support, 1994. The libiberty library provides a number of functions which are missing on some operating systems. We do not declare those here, to avoid conflicts with the system header files on operating systems that do support those functions. In this file we only declare those functions which are specific to libiberty. */ #ifndef LIBIBERTY_H #define LIBIBERTY_H #ifdef __cplusplus extern "C" { #endif #ifdef ANSI_PROTOTYPES /* Get a definition for size_t. */ #include /* Get a definition for va_list. */ #include #endif /* Build an argument vector from a string. Allocates memory using malloc. Use freeargv to free the vector. */ extern char **buildargv PARAMS ((const char *)) ATTRIBUTE_MALLOC; /* Free a vector returned by buildargv. */ extern void freeargv PARAMS ((char **)); /* Duplicate an argument vector. Allocates memory using malloc. Use freeargv to free the vector. */ extern char **dupargv PARAMS ((char **)) ATTRIBUTE_MALLOC; /* Return the last component of a path name. Note that we can't use a prototype here because the parameter is declared inconsistently across different systems, sometimes as "char *" and sometimes as "const char *" */ /* HAVE_DECL_* is a three-state macro: undefined, 0 or 1. If it is undefined, we haven't run the autoconf check so provide the declaration without arguments. If it is 0, we checked and failed to find the declaration so provide a fully prototyped one. If it is 1, we found it so don't provide any declaration at all. */ #if !HAVE_DECL_BASENAME #if defined (__GNU_LIBRARY__ ) || defined (__linux__) || defined (__FreeBSD__) || defined (__OpenBSD__) || defined(__NetBSD__) || defined (__CYGWIN__) || defined (__CYGWIN32__) || defined (HAVE_DECL_BASENAME) extern char *basename PARAMS ((const char *)); #else extern char *basename (); #endif #endif /* A well-defined basename () that is always compiled in. */ extern const char *lbasename PARAMS ((const char *)); /* A well-defined realpath () that is always compiled in. */ extern char *lrealpath PARAMS ((const char *)); /* Concatenate an arbitrary number of strings. You must pass NULL as the last argument of this function, to terminate the list of strings. Allocates memory using xmalloc. */ extern char *concat PARAMS ((const char *, ...)) ATTRIBUTE_MALLOC; /* Concatenate an arbitrary number of strings. You must pass NULL as the last argument of this function, to terminate the list of strings. Allocates memory using xmalloc. The first argument is not one of the strings to be concatenated, but if not NULL is a pointer to be freed after the new string is created, similar to the way xrealloc works. */ extern char *reconcat PARAMS ((char *, const char *, ...)) ATTRIBUTE_MALLOC; /* Determine the length of concatenating an arbitrary number of strings. You must pass NULL as the last argument of this function, to terminate the list of strings. */ extern unsigned long concat_length PARAMS ((const char *, ...)); /* Concatenate an arbitrary number of strings into a SUPPLIED area of memory. You must pass NULL as the last argument of this function, to terminate the list of strings. The supplied memory is assumed to be large enough. */ extern char *concat_copy PARAMS ((char *, const char *, ...)); /* Concatenate an arbitrary number of strings into a GLOBAL area of memory. You must pass NULL as the last argument of this function, to terminate the list of strings. The supplied memory is assumed to be large enough. */ extern char *concat_copy2 PARAMS ((const char *, ...)); /* This is the global area used by concat_copy2. */ extern char *libiberty_concat_ptr; /* Concatenate an arbitrary number of strings. You must pass NULL as the last argument of this function, to terminate the list of strings. Allocates memory using alloca. The arguments are evaluated twice! */ #define ACONCAT(ACONCAT_PARAMS) \ (libiberty_concat_ptr = alloca (concat_length ACONCAT_PARAMS + 1), \ concat_copy2 ACONCAT_PARAMS) /* Check whether two file descriptors refer to the same file. */ extern int fdmatch PARAMS ((int fd1, int fd2)); /* Get the working directory. The result is cached, so don't call chdir() between calls to getpwd(). */ extern char * getpwd PARAMS ((void)); /* Get the amount of time the process has run, in microseconds. */ extern long get_run_time PARAMS ((void)); /* Generate a relocated path to some installation directory. Allocates return value using malloc. */ extern char *make_relative_prefix PARAMS ((const char *, const char *, const char *)); /* Choose a temporary directory to use for scratch files. */ extern char *choose_temp_base PARAMS ((void)) ATTRIBUTE_MALLOC; /* Return a temporary file name or NULL if unable to create one. */ extern char *make_temp_file PARAMS ((const char *)) ATTRIBUTE_MALLOC; /* Allocate memory filled with spaces. Allocates using malloc. */ extern const char *spaces PARAMS ((int count)); /* Return the maximum error number for which strerror will return a string. */ extern int errno_max PARAMS ((void)); /* Return the name of an errno value (e.g., strerrno (EINVAL) returns "EINVAL"). */ extern const char *strerrno PARAMS ((int)); /* Given the name of an errno value, return the value. */ extern int strtoerrno PARAMS ((const char *)); /* ANSI's strerror(), but more robust. */ extern char *xstrerror PARAMS ((int)); /* Return the maximum signal number for which strsignal will return a string. */ extern int signo_max PARAMS ((void)); /* Return a signal message string for a signal number (e.g., strsignal (SIGHUP) returns something like "Hangup"). */ /* This is commented out as it can conflict with one in system headers. We still document its existence though. */ /*extern const char *strsignal PARAMS ((int));*/ /* Return the name of a signal number (e.g., strsigno (SIGHUP) returns "SIGHUP"). */ extern const char *strsigno PARAMS ((int)); /* Given the name of a signal, return its number. */ extern int strtosigno PARAMS ((const char *)); /* Register a function to be run by xexit. Returns 0 on success. */ extern int xatexit PARAMS ((void (*fn) (void))); /* Exit, calling all the functions registered with xatexit. */ extern void xexit PARAMS ((int status)) ATTRIBUTE_NORETURN; /* Set the program name used by xmalloc. */ extern void xmalloc_set_program_name PARAMS ((const char *)); /* Report an allocation failure. */ extern void xmalloc_failed PARAMS ((size_t)) ATTRIBUTE_NORETURN; /* Allocate memory without fail. If malloc fails, this will print a message to stderr (using the name set by xmalloc_set_program_name, if any) and then call xexit. */ extern PTR xmalloc PARAMS ((size_t)) ATTRIBUTE_MALLOC; /* Reallocate memory without fail. This works like xmalloc. Note, realloc type functions are not suitable for attribute malloc since they may return the same address across multiple calls. */ extern PTR xrealloc PARAMS ((PTR, size_t)); /* Allocate memory without fail and set it to zero. This works like xmalloc. */ extern PTR xcalloc PARAMS ((size_t, size_t)) ATTRIBUTE_MALLOC; /* Copy a string into a memory buffer without fail. */ extern char *xstrdup PARAMS ((const char *)) ATTRIBUTE_MALLOC; /* Copy an existing memory buffer to a new memory buffer without fail. */ extern PTR xmemdup PARAMS ((const PTR, size_t, size_t)) ATTRIBUTE_MALLOC; /* Physical memory routines. Return values are in BYTES. */ extern double physmem_total PARAMS ((void)); extern double physmem_available PARAMS ((void)); /* hex character manipulation routines */ #define _hex_array_size 256 #define _hex_bad 99 extern const unsigned char _hex_value[_hex_array_size]; extern void hex_init PARAMS ((void)); #define hex_p(c) (hex_value (c) != _hex_bad) /* If you change this, note well: Some code relies on side effects in the argument being performed exactly once. */ #define hex_value(c) ((unsigned int) _hex_value[(unsigned char) (c)]) /* Definitions used by the pexecute routine. */ #define PEXECUTE_FIRST 1 #define PEXECUTE_LAST 2 #define PEXECUTE_ONE (PEXECUTE_FIRST + PEXECUTE_LAST) #define PEXECUTE_SEARCH 4 #define PEXECUTE_VERBOSE 8 /* Execute a program. */ extern int pexecute PARAMS ((const char *, char * const *, const char *, const char *, char **, char **, int)); /* Wait for pexecute to finish. */ extern int pwait PARAMS ((int, int *, int)); #if !HAVE_DECL_ASPRINTF /* Like sprintf but provides a pointer to malloc'd storage, which must be freed by the caller. */ extern int asprintf PARAMS ((char **, const char *, ...)) ATTRIBUTE_PRINTF_2; #endif #if !HAVE_DECL_VASPRINTF /* Like vsprintf but provides a pointer to malloc'd storage, which must be freed by the caller. */ extern int vasprintf PARAMS ((char **, const char *, va_list)) ATTRIBUTE_PRINTF(2,0); #endif #define ARRAY_SIZE(a) (sizeof (a) / sizeof ((a)[0])) /* Drastically simplified alloca configurator. If we're using GCC, we use __builtin_alloca; otherwise we use the C alloca. The C alloca is always available. You can override GCC by defining USE_C_ALLOCA yourself. The canonical autoconf macro C_ALLOCA is also set/unset as it is often used to indicate whether code needs to call alloca(0). */ extern PTR C_alloca PARAMS ((size_t)) ATTRIBUTE_MALLOC; #undef alloca #if GCC_VERSION >= 2000 && !defined USE_C_ALLOCA # define alloca(x) __builtin_alloca(x) # undef C_ALLOCA # define ASTRDUP(X) \ (__extension__ ({ const char *const libiberty_optr = (X); \ const unsigned long libiberty_len = strlen (libiberty_optr) + 1; \ char *const libiberty_nptr = alloca (libiberty_len); \ (char *) memcpy (libiberty_nptr, libiberty_optr, libiberty_len); })) #else # define alloca(x) C_alloca(x) # undef USE_C_ALLOCA # define USE_C_ALLOCA 1 # undef C_ALLOCA # define C_ALLOCA 1 extern const char *libiberty_optr; extern char *libiberty_nptr; extern unsigned long libiberty_len; # define ASTRDUP(X) \ (libiberty_optr = (X), \ libiberty_len = strlen (libiberty_optr) + 1, \ libiberty_nptr = alloca (libiberty_len), \ (char *) memcpy (libiberty_nptr, libiberty_optr, libiberty_len)) #endif #ifdef __cplusplus } #endif #endif /* ! defined (LIBIBERTY_H) */ #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ /* Options passed to cplus_demangle (in 2nd parameter). */ #define DMGL_NO_OPTS 0 /* For readability... */ #define DMGL_PARAMS (1 << 0) /* Include function args */ #define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */ #define DMGL_JAVA (1 << 2) /* Demangle as Java rather than C++. */ #define DMGL_VERBOSE (1 << 3) /* Include implementation details. */ #define DMGL_TYPES (1 << 4) /* Also try to demangle type encodings. */ #define DMGL_AUTO (1 << 8) #define DMGL_GNU (1 << 9) #define DMGL_LUCID (1 << 10) #define DMGL_ARM (1 << 11) #define DMGL_HP (1 << 12) /* For the HP aCC compiler; same as ARM except for template arguments, etc. */ #define DMGL_EDG (1 << 13) #define DMGL_GNU_V3 (1 << 14) #define DMGL_GNAT (1 << 15) /* If none of these are set, use 'current_demangling_style' as the default. */ #define DMGL_STYLE_MASK (DMGL_AUTO|DMGL_GNU|DMGL_LUCID|DMGL_ARM|DMGL_HP|DMGL_EDG|DMGL_GNU_V3|DMGL_JAVA|DMGL_GNAT) /* Enumeration of possible demangling styles. Lucid and ARM styles are still kept logically distinct, even though they now both behave identically. The resulting style is actual the union of both. I.E. either style recognizes both "__pt__" and "__rf__" for operator "->", even though the first is lucid style and the second is ARM style. (FIXME?) */ extern enum demangling_styles { no_demangling = -1, unknown_demangling = 0, auto_demangling = DMGL_AUTO, gnu_demangling = DMGL_GNU, lucid_demangling = DMGL_LUCID, arm_demangling = DMGL_ARM, hp_demangling = DMGL_HP, edg_demangling = DMGL_EDG, gnu_v3_demangling = DMGL_GNU_V3, java_demangling = DMGL_JAVA, gnat_demangling = DMGL_GNAT } current_demangling_style; /* Define string names for the various demangling styles. */ #define NO_DEMANGLING_STYLE_STRING "none" #define AUTO_DEMANGLING_STYLE_STRING "auto" #define GNU_DEMANGLING_STYLE_STRING "gnu" #define LUCID_DEMANGLING_STYLE_STRING "lucid" #define ARM_DEMANGLING_STYLE_STRING "arm" #define HP_DEMANGLING_STYLE_STRING "hp" #define EDG_DEMANGLING_STYLE_STRING "edg" #define GNU_V3_DEMANGLING_STYLE_STRING "gnu-v3" #define JAVA_DEMANGLING_STYLE_STRING "java" #define GNAT_DEMANGLING_STYLE_STRING "gnat" /* Some macros to test what demangling style is active. */ #define CURRENT_DEMANGLING_STYLE current_demangling_style #define AUTO_DEMANGLING (((int) CURRENT_DEMANGLING_STYLE) & DMGL_AUTO) #define GNU_DEMANGLING (((int) CURRENT_DEMANGLING_STYLE) & DMGL_GNU) #define LUCID_DEMANGLING (((int) CURRENT_DEMANGLING_STYLE) & DMGL_LUCID) #define ARM_DEMANGLING (((int) CURRENT_DEMANGLING_STYLE) & DMGL_ARM) #define HP_DEMANGLING (((int) CURRENT_DEMANGLING_STYLE) & DMGL_HP) #define EDG_DEMANGLING (((int) CURRENT_DEMANGLING_STYLE) & DMGL_EDG) #define GNU_V3_DEMANGLING (((int) CURRENT_DEMANGLING_STYLE) & DMGL_GNU_V3) #define JAVA_DEMANGLING (((int) CURRENT_DEMANGLING_STYLE) & DMGL_JAVA) #define GNAT_DEMANGLING (((int) CURRENT_DEMANGLING_STYLE) & DMGL_GNAT) /* Provide information about the available demangle styles. This code is pulled from gdb into libiberty because it is useful to binutils also. */ extern const struct demangler_engine { const char *const demangling_style_name; const enum demangling_styles demangling_style; const char *const demangling_style_doc; } libiberty_demanglers[]; extern char * cplus_demangle PARAMS ((const char *mangled, int options)); extern int cplus_demangle_opname PARAMS ((const char *opname, char *result, int options)); extern const char * cplus_mangle_opname PARAMS ((const char *opname, int options)); /* Note: This sets global state. FIXME if you care about multi-threading. */ extern void set_cplus_marker_for_demangling PARAMS ((int ch)); extern enum demangling_styles cplus_demangle_set_style PARAMS ((enum demangling_styles style)); extern enum demangling_styles cplus_demangle_name_to_style PARAMS ((const char *name)); /* V3 ABI demangling entry points, defined in cp-demangle.c. */ extern char* cplus_demangle_v3 PARAMS ((const char* mangled, int options)); extern char* java_demangle_v3 PARAMS ((const char* mangled)); enum gnu_v3_ctor_kinds { gnu_v3_complete_object_ctor = 1, gnu_v3_base_object_ctor, gnu_v3_complete_object_allocating_ctor }; /* Return non-zero iff NAME is the mangled form of a constructor name in the G++ V3 ABI demangling style. Specifically, return an `enum gnu_v3_ctor_kinds' value indicating what kind of constructor it is. */ extern enum gnu_v3_ctor_kinds is_gnu_v3_mangled_ctor PARAMS ((const char *name)); enum gnu_v3_dtor_kinds { gnu_v3_deleting_dtor = 1, gnu_v3_complete_object_dtor, gnu_v3_base_object_dtor }; /* Return non-zero iff NAME is the mangled form of a destructor name in the G++ V3 ABI demangling style. Specifically, return an `enum gnu_v3_dtor_kinds' value, indicating what kind of destructor it is. */ extern enum gnu_v3_dtor_kinds is_gnu_v3_mangled_dtor PARAMS ((const char *name)); /* The V3 demangler works in two passes. The first pass builds a tree representation of the mangled name, and the second pass turns the tree representation into a demangled string. Here we define an interface to permit a caller to build their own tree representation, which they can pass to the demangler to get a demangled string. This can be used to canonicalize user input into something which the demangler might output. It could also be used by other demanglers in the future. */ /* These are the component types which may be found in the tree. Many component types have one or two subtrees, referred to as left and right (a component type with only one subtree puts it in the left subtree). */ enum demangle_component_type { /* A name, with a length and a pointer to a string. */ DEMANGLE_COMPONENT_NAME, /* A qualified name. The left subtree is a class or namespace or some such thing, and the right subtree is a name qualified by that class. */ DEMANGLE_COMPONENT_QUAL_NAME, /* A local name. The left subtree describes a function, and the right subtree is a name which is local to that function. */ DEMANGLE_COMPONENT_LOCAL_NAME, /* A typed name. The left subtree is a name, and the right subtree describes that name as a function. */ DEMANGLE_COMPONENT_TYPED_NAME, /* A template. The left subtree is a template name, and the right subtree is a template argument list. */ DEMANGLE_COMPONENT_TEMPLATE, /* A template parameter. This holds a number, which is the template parameter index. */ DEMANGLE_COMPONENT_TEMPLATE_PARAM, /* A constructor. This holds a name and the kind of constructor. */ DEMANGLE_COMPONENT_CTOR, /* A destructor. This holds a name and the kind of destructor. */ DEMANGLE_COMPONENT_DTOR, /* A vtable. This has one subtree, the type for which this is a vtable. */ DEMANGLE_COMPONENT_VTABLE, /* A VTT structure. This has one subtree, the type for which this is a VTT. */ DEMANGLE_COMPONENT_VTT, /* A construction vtable. The left subtree is the type for which this is a vtable, and the right subtree is the derived type for which this vtable is built. */ DEMANGLE_COMPONENT_CONSTRUCTION_VTABLE, /* A typeinfo structure. This has one subtree, the type for which this is the tpeinfo structure. */ DEMANGLE_COMPONENT_TYPEINFO, /* A typeinfo name. This has one subtree, the type for which this is the typeinfo name. */ DEMANGLE_COMPONENT_TYPEINFO_NAME, /* A typeinfo function. This has one subtree, the type for which this is the tpyeinfo function. */ DEMANGLE_COMPONENT_TYPEINFO_FN, /* A thunk. This has one subtree, the name for which this is a thunk. */ DEMANGLE_COMPONENT_THUNK, /* A virtual thunk. This has one subtree, the name for which this is a virtual thunk. */ DEMANGLE_COMPONENT_VIRTUAL_THUNK, /* A covariant thunk. This has one subtree, the name for which this is a covariant thunk. */ DEMANGLE_COMPONENT_COVARIANT_THUNK, /* A Java class. This has one subtree, the type. */ DEMANGLE_COMPONENT_JAVA_CLASS, /* A guard variable. This has one subtree, the name for which this is a guard variable. */ DEMANGLE_COMPONENT_GUARD, /* A reference temporary. This has one subtree, the name for which this is a temporary. */ DEMANGLE_COMPONENT_REFTEMP, /* A standard substitution. This holds the name of the substitution. */ DEMANGLE_COMPONENT_SUB_STD, /* The restrict qualifier. The one subtree is the type which is being qualified. */ DEMANGLE_COMPONENT_RESTRICT, /* The volatile qualifier. The one subtree is the type which is being qualified. */ DEMANGLE_COMPONENT_VOLATILE, /* The const qualifier. The one subtree is the type which is being qualified. */ DEMANGLE_COMPONENT_CONST, /* The restrict qualifier modifying a member function. The one subtree is the type which is being qualified. */ DEMANGLE_COMPONENT_RESTRICT_THIS, /* The volatile qualifier modifying a member function. The one subtree is the type which is being qualified. */ DEMANGLE_COMPONENT_VOLATILE_THIS, /* The const qualifier modifying a member function. The one subtree is the type which is being qualified. */ DEMANGLE_COMPONENT_CONST_THIS, /* A vendor qualifier. The left subtree is the type which is being qualified, and the right subtree is the name of the qualifier. */ DEMANGLE_COMPONENT_VENDOR_TYPE_QUAL, /* A pointer. The one subtree is the type which is being pointed to. */ DEMANGLE_COMPONENT_POINTER, /* A reference. The one subtree is the type which is being referenced. */ DEMANGLE_COMPONENT_REFERENCE, /* A complex type. The one subtree is the base type. */ DEMANGLE_COMPONENT_COMPLEX, /* An imaginary type. The one subtree is the base type. */ DEMANGLE_COMPONENT_IMAGINARY, /* A builtin type. This holds the builtin type information. */ DEMANGLE_COMPONENT_BUILTIN_TYPE, /* A vendor's builtin type. This holds the name of the type. */ DEMANGLE_COMPONENT_VENDOR_TYPE, /* A function type. The left subtree is the return type. The right subtree is a list of ARGLIST nodes. Either or both may be NULL. */ DEMANGLE_COMPONENT_FUNCTION_TYPE, /* An array type. The left subtree is the dimension, which may be NULL, or a string (represented as DEMANGLE_COMPONENT_NAME), or an expression. The right subtree is the element type. */ DEMANGLE_COMPONENT_ARRAY_TYPE, /* A pointer to member type. The left subtree is the class type, and the right subtree is the member type. CV-qualifiers appear on the latter. */ DEMANGLE_COMPONENT_PTRMEM_TYPE, /* An argument list. The left subtree is the current argument, and the right subtree is either NULL or another ARGLIST node. */ DEMANGLE_COMPONENT_ARGLIST, /* A template argument list. The left subtree is the current template argument, and the right subtree is either NULL or another TEMPLATE_ARGLIST node. */ DEMANGLE_COMPONENT_TEMPLATE_ARGLIST, /* An operator. This holds information about a standard operator. */ DEMANGLE_COMPONENT_OPERATOR, /* An extended operator. This holds the number of arguments, and the name of the extended operator. */ DEMANGLE_COMPONENT_EXTENDED_OPERATOR, /* A typecast, represented as a unary operator. The one subtree is the type to which the argument should be cast. */ DEMANGLE_COMPONENT_CAST, /* A unary expression. The left subtree is the operator, and the right subtree is the single argument. */ DEMANGLE_COMPONENT_UNARY, /* A binary expression. The left subtree is the operator, and the right subtree is a BINARY_ARGS. */ DEMANGLE_COMPONENT_BINARY, /* Arguments to a binary expression. The left subtree is the first argument, and the right subtree is the second argument. */ DEMANGLE_COMPONENT_BINARY_ARGS, /* A trinary expression. The left subtree is the operator, and the right subtree is a TRINARY_ARG1. */ DEMANGLE_COMPONENT_TRINARY, /* Arguments to a trinary expression. The left subtree is the first argument, and the right subtree is a TRINARY_ARG2. */ DEMANGLE_COMPONENT_TRINARY_ARG1, /* More arguments to a trinary expression. The left subtree is the second argument, and the right subtree is the third argument. */ DEMANGLE_COMPONENT_TRINARY_ARG2, /* A literal. The left subtree is the type, and the right subtree is the value, represented as a DEMANGLE_COMPONENT_NAME. */ DEMANGLE_COMPONENT_LITERAL, /* A negative literal. Like LITERAL, but the value is negated. This is a minor hack: the NAME used for LITERAL points directly to the mangled string, but since negative numbers are mangled using 'n' instead of '-', we want a way to indicate a negative number which involves neither modifying the mangled string nor allocating a new copy of the literal in memory. */ DEMANGLE_COMPONENT_LITERAL_NEG }; /* Types which are only used internally. */ struct demangle_operator_info; struct demangle_builtin_type_info; /* A node in the tree representation is an instance of a struct demangle_component. Note that the field names of the struct are not well protected against macros defined by the file including this one. We can fix this if it ever becomes a problem. */ struct demangle_component { /* The type of this component. */ enum demangle_component_type type; union { /* For DEMANGLE_COMPONENT_NAME. */ struct { /* A pointer to the name (which need not NULL terminated) and its length. */ const char *s; int len; } s_name; /* For DEMANGLE_COMPONENT_OPERATOR. */ struct { /* Operator. */ const struct demangle_operator_info *op; } s_operator; /* For DEMANGLE_COMPONENT_EXTENDED_OPERATOR. */ struct { /* Number of arguments. */ int args; /* Name. */ struct demangle_component *name; } s_extended_operator; /* For DEMANGLE_COMPONENT_CTOR. */ struct { /* Kind of constructor. */ enum gnu_v3_ctor_kinds kind; /* Name. */ struct demangle_component *name; } s_ctor; /* For DEMANGLE_COMPONENT_DTOR. */ struct { /* Kind of destructor. */ enum gnu_v3_dtor_kinds kind; /* Name. */ struct demangle_component *name; } s_dtor; /* For DEMANGLE_COMPONENT_BUILTIN_TYPE. */ struct { /* Builtin type. */ const struct demangle_builtin_type_info *type; } s_builtin; /* For DEMANGLE_COMPONENT_SUB_STD. */ struct { /* Standard substitution string. */ const char* string; /* Length of string. */ int len; } s_string; /* For DEMANGLE_COMPONENT_TEMPLATE_PARAM. */ struct { /* Template parameter index. */ long number; } s_number; /* For other types. */ struct { /* Left (or only) subtree. */ struct demangle_component *left; /* Right subtree. */ struct demangle_component *right; } s_binary; } u; }; /* People building mangled trees are expected to allocate instances of struct demangle_component themselves. They can then call one of the following functions to fill them in. */ /* Fill in most component types with a left subtree and a right subtree. Returns non-zero on success, zero on failure, such as an unrecognized or inappropriate component type. */ extern int cplus_demangle_fill_component PARAMS ((struct demangle_component *fill, enum demangle_component_type, struct demangle_component *left, struct demangle_component *right)); /* Fill in a DEMANGLE_COMPONENT_NAME. Returns non-zero on success, zero for bad arguments. */ extern int cplus_demangle_fill_name PARAMS ((struct demangle_component *fill, const char *, int)); /* Fill in a DEMANGLE_COMPONENT_BUILTIN_TYPE, using the name of the builtin type (e.g., "int", etc.). Returns non-zero on success, zero if the type is not recognized. */ extern int cplus_demangle_fill_builtin_type PARAMS ((struct demangle_component *fill, const char *type_name)); /* Fill in a DEMANGLE_COMPONENT_OPERATOR, using the name of the operator and the number of arguments which it takes (the latter is used to disambiguate operators which can be both binary and unary, such as '-'). Returns non-zero on success, zero if the operator is not recognized. */ extern int cplus_demangle_fill_operator PARAMS ((struct demangle_component *fill, const char *opname, int args)); /* Fill in a DEMANGLE_COMPONENT_EXTENDED_OPERATOR, providing the number of arguments and the name. Returns non-zero on success, zero for bad arguments. */ extern int cplus_demangle_fill_extended_operator PARAMS ((struct demangle_component *fill, int numargs, struct demangle_component *nm)); /* Fill in a DEMANGLE_COMPONENT_CTOR. Returns non-zero on success, zero for bad arguments. */ extern int cplus_demangle_fill_ctor PARAMS ((struct demangle_component *fill, enum gnu_v3_ctor_kinds kind, struct demangle_component *name)); /* Fill in a DEMANGLE_COMPONENT_DTOR. Returns non-zero on success, zero for bad arguments. */ extern int cplus_demangle_fill_dtor PARAMS ((struct demangle_component *fill, enum gnu_v3_dtor_kinds kind, struct demangle_component *name)); /* This function translates a mangled name into a struct demangle_component tree. The first argument is the mangled name. The second argument is DMGL_* options. This returns a pointer to a tree on success, or NULL on failure. On success, the third argument is set to a block of memory allocated by malloc. This block should be passed to free when the tree is no longer needed. */ extern struct demangle_component * cplus_demangle_v3_components PARAMS ((const char *mangled, int options, void **mem)); /* This function takes a struct demangle_component tree and returns the corresponding demangled string. The first argument is DMGL_* options. The second is the tree to demangle. The third is a guess at the length of the demangled string, used to initially allocate the return buffer. The fourth is a pointer to a size_t. On success, this function returns a buffer allocated by malloc(), and sets the size_t pointed to by the fourth argument to the size of the allocated buffer (not the length of the returned string). On failure, this function returns NULL, and sets the size_t pointed to by the fourth argument to 0 for an invalid tree, or to 1 for a memory allocation error. */ extern char * cplus_demangle_print PARAMS ((int options, const struct demangle_component *tree, int estimated_length, size_t *p_allocated_size)); #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* DEMANGLE_H */ #undef CURRENT_DEMANGLING_STYLE #define CURRENT_DEMANGLING_STYLE work->options static char *ada_demangle PARAMS ((const char *, int)); #define min(X,Y) (((X) < (Y)) ? (X) : (Y)) /* A value at least one greater than the maximum number of characters that will be output when using the `%d' format with `printf'. */ #define INTBUF_SIZE 32 /* In order to allow a single demangler executable to demangle strings using various common values of CPLUS_MARKER, as well as any specific one set at compile time, we maintain a string containing all the commonly used ones, and check to see if the marker we are looking for is in that string. CPLUS_MARKER is usually '$' on systems where the assembler can deal with that. Where the assembler can't, it's usually '.' (but on many systems '.' is used for other things). We put the current defined CPLUS_MARKER first (which defaults to '$'), followed by the next most common value, followed by an explicit '$' in case the value of CPLUS_MARKER is not '$'. We could avoid this if we could just get g++ to tell us what the actual cplus marker character is as part of the debug information, perhaps by ensuring that it is the character that terminates the gcc_compiled marker symbol (FIXME). */ #if !defined (CPLUS_MARKER) #define CPLUS_MARKER '$' #endif enum demangling_styles current_demangling_style = auto_demangling; static char cplus_markers[] = { CPLUS_MARKER, '.', '$', '\0' }; static char char_str[2] = { '\000', '\000' }; void set_cplus_marker_for_demangling (ch) int ch; { cplus_markers[0] = ch; } typedef struct string /* Beware: these aren't required to be */ { /* '\0' terminated. */ char *b; /* pointer to start of string */ char *p; /* pointer after last character */ char *e; /* pointer after end of allocated space */ } string; /* Stuff that is shared between sub-routines. Using a shared structure allows cplus_demangle to be reentrant. */ struct work_stuff { int options; char **typevec; char **ktypevec; char **btypevec; int numk; int numb; int ksize; int bsize; int ntypes; int typevec_size; int constructor; int destructor; int static_type; /* A static member function */ int temp_start; /* index in demangled to start of template args */ int type_quals; /* The type qualifiers. */ int dllimported; /* Symbol imported from a PE DLL */ char **tmpl_argvec; /* Template function arguments. */ int ntmpl_args; /* The number of template function arguments. */ int forgetting_types; /* Nonzero if we are not remembering the types we see. */ string* previous_argument; /* The last function argument demangled. */ int nrepeats; /* The number of times to repeat the previous argument. */ }; #define PRINT_ANSI_QUALIFIERS (work -> options & DMGL_ANSI) #define PRINT_ARG_TYPES (work -> options & DMGL_PARAMS) static const struct optable { const char *const in; const char *const out; const int flags; } optable[] = { {"nw", " new", DMGL_ANSI}, /* new (1.92, ansi) */ {"dl", " delete", DMGL_ANSI}, /* new (1.92, ansi) */ {"new", " new", 0}, /* old (1.91, and 1.x) */ {"delete", " delete", 0}, /* old (1.91, and 1.x) */ {"vn", " new []", DMGL_ANSI}, /* GNU, pending ansi */ {"vd", " delete []", DMGL_ANSI}, /* GNU, pending ansi */ {"as", "=", DMGL_ANSI}, /* ansi */ {"ne", "!=", DMGL_ANSI}, /* old, ansi */ {"eq", "==", DMGL_ANSI}, /* old, ansi */ {"ge", ">=", DMGL_ANSI}, /* old, ansi */ {"gt", ">", DMGL_ANSI}, /* old, ansi */ {"le", "<=", DMGL_ANSI}, /* old, ansi */ {"lt", "<", DMGL_ANSI}, /* old, ansi */ {"plus", "+", 0}, /* old */ {"pl", "+", DMGL_ANSI}, /* ansi */ {"apl", "+=", DMGL_ANSI}, /* ansi */ {"minus", "-", 0}, /* old */ {"mi", "-", DMGL_ANSI}, /* ansi */ {"ami", "-=", DMGL_ANSI}, /* ansi */ {"mult", "*", 0}, /* old */ {"ml", "*", DMGL_ANSI}, /* ansi */ {"amu", "*=", DMGL_ANSI}, /* ansi (ARM/Lucid) */ {"aml", "*=", DMGL_ANSI}, /* ansi (GNU/g++) */ {"convert", "+", 0}, /* old (unary +) */ {"negate", "-", 0}, /* old (unary -) */ {"trunc_mod", "%", 0}, /* old */ {"md", "%", DMGL_ANSI}, /* ansi */ {"amd", "%=", DMGL_ANSI}, /* ansi */ {"trunc_div", "/", 0}, /* old */ {"dv", "/", DMGL_ANSI}, /* ansi */ {"adv", "/=", DMGL_ANSI}, /* ansi */ {"truth_andif", "&&", 0}, /* old */ {"aa", "&&", DMGL_ANSI}, /* ansi */ {"truth_orif", "||", 0}, /* old */ {"oo", "||", DMGL_ANSI}, /* ansi */ {"truth_not", "!", 0}, /* old */ {"nt", "!", DMGL_ANSI}, /* ansi */ {"postincrement","++", 0}, /* old */ {"pp", "++", DMGL_ANSI}, /* ansi */ {"postdecrement","--", 0}, /* old */ {"mm", "--", DMGL_ANSI}, /* ansi */ {"bit_ior", "|", 0}, /* old */ {"or", "|", DMGL_ANSI}, /* ansi */ {"aor", "|=", DMGL_ANSI}, /* ansi */ {"bit_xor", "^", 0}, /* old */ {"er", "^", DMGL_ANSI}, /* ansi */ {"aer", "^=", DMGL_ANSI}, /* ansi */ {"bit_and", "&", 0}, /* old */ {"ad", "&", DMGL_ANSI}, /* ansi */ {"aad", "&=", DMGL_ANSI}, /* ansi */ {"bit_not", "~", 0}, /* old */ {"co", "~", DMGL_ANSI}, /* ansi */ {"call", "()", 0}, /* old */ {"cl", "()", DMGL_ANSI}, /* ansi */ {"alshift", "<<", 0}, /* old */ {"ls", "<<", DMGL_ANSI}, /* ansi */ {"als", "<<=", DMGL_ANSI}, /* ansi */ {"arshift", ">>", 0}, /* old */ {"rs", ">>", DMGL_ANSI}, /* ansi */ {"ars", ">>=", DMGL_ANSI}, /* ansi */ {"component", "->", 0}, /* old */ {"pt", "->", DMGL_ANSI}, /* ansi; Lucid C++ form */ {"rf", "->", DMGL_ANSI}, /* ansi; ARM/GNU form */ {"indirect", "*", 0}, /* old */ {"method_call", "->()", 0}, /* old */ {"addr", "&", 0}, /* old (unary &) */ {"array", "[]", 0}, /* old */ {"vc", "[]", DMGL_ANSI}, /* ansi */ {"compound", ", ", 0}, /* old */ {"cm", ", ", DMGL_ANSI}, /* ansi */ {"cond", "?:", 0}, /* old */ {"cn", "?:", DMGL_ANSI}, /* pseudo-ansi */ {"max", ">?", 0}, /* old */ {"mx", ">?", DMGL_ANSI}, /* pseudo-ansi */ {"min", "*", DMGL_ANSI}, /* ansi */ {"sz", "sizeof ", DMGL_ANSI} /* pseudo-ansi */ }; /* These values are used to indicate the various type varieties. They are all non-zero so that they can be used as `success' values. */ typedef enum type_kind_t { tk_none, tk_pointer, tk_reference, tk_integral, tk_bool, tk_char, tk_real } type_kind_t; const struct demangler_engine libiberty_demanglers[] = { { NO_DEMANGLING_STYLE_STRING, no_demangling, "Demangling disabled" } , { AUTO_DEMANGLING_STYLE_STRING, auto_demangling, "Automatic selection based on executable" } , { GNU_DEMANGLING_STYLE_STRING, gnu_demangling, "GNU (g++) style demangling" } , { LUCID_DEMANGLING_STYLE_STRING, lucid_demangling, "Lucid (lcc) style demangling" } , { ARM_DEMANGLING_STYLE_STRING, arm_demangling, "ARM style demangling" } , { HP_DEMANGLING_STYLE_STRING, hp_demangling, "HP (aCC) style demangling" } , { EDG_DEMANGLING_STYLE_STRING, edg_demangling, "EDG style demangling" } , { GNU_V3_DEMANGLING_STYLE_STRING, gnu_v3_demangling, "GNU (g++) V3 ABI-style demangling" } , { JAVA_DEMANGLING_STYLE_STRING, java_demangling, "Java style demangling" } , { GNAT_DEMANGLING_STYLE_STRING, gnat_demangling, "GNAT style demangling" } , { NULL, unknown_demangling, NULL } }; #define STRING_EMPTY(str) ((str) -> b == (str) -> p) #define APPEND_BLANK(str) {if (!STRING_EMPTY(str)) \ string_append(str, " ");} #define LEN_STRING(str) ( (STRING_EMPTY(str))?0:((str)->p - (str)->b)) /* The scope separator appropriate for the language being demangled. */ #define SCOPE_STRING(work) ((work->options & DMGL_JAVA) ? "." : "::") #define ARM_VTABLE_STRING "__vtbl__" /* Lucid/ARM virtual table prefix */ #define ARM_VTABLE_STRLEN 8 /* strlen (ARM_VTABLE_STRING) */ /* Prototypes for local functions */ static void delete_work_stuff PARAMS ((struct work_stuff *)); static void delete_non_B_K_work_stuff PARAMS ((struct work_stuff *)); static char * mop_up PARAMS ((struct work_stuff *, string *, int)); static void squangle_mop_up PARAMS ((struct work_stuff *)); static void work_stuff_copy_to_from PARAMS ((struct work_stuff *, struct work_stuff *)); #if 0 static int demangle_method_args PARAMS ((struct work_stuff *, const char **, string *)); #endif static char * internal_cplus_demangle PARAMS ((struct work_stuff *, const char *)); static int demangle_template_template_parm PARAMS ((struct work_stuff *work, const char **, string *)); static int demangle_template PARAMS ((struct work_stuff *work, const char **, string *, string *, int, int)); static int arm_pt PARAMS ((struct work_stuff *, const char *, int, const char **, const char **)); static int demangle_class_name PARAMS ((struct work_stuff *, const char **, string *)); static int demangle_qualified PARAMS ((struct work_stuff *, const char **, string *, int, int)); static int demangle_class PARAMS ((struct work_stuff *, const char **, string *)); static int demangle_fund_type PARAMS ((struct work_stuff *, const char **, string *)); static int demangle_signature PARAMS ((struct work_stuff *, const char **, string *)); static int demangle_prefix PARAMS ((struct work_stuff *, const char **, string *)); static int gnu_special PARAMS ((struct work_stuff *, const char **, string *)); static int arm_special PARAMS ((const char **, string *)); static void string_need PARAMS ((string *, int)); static void string_delete PARAMS ((string *)); static void string_init PARAMS ((string *)); static void string_clear PARAMS ((string *)); #if 0 static int string_empty PARAMS ((string *)); #endif static void string_append PARAMS ((string *, const char *)); static void string_appends PARAMS ((string *, string *)); static void string_appendn PARAMS ((string *, const char *, int)); static void string_prepend PARAMS ((string *, const char *)); static void string_prependn PARAMS ((string *, const char *, int)); static void string_append_template_idx PARAMS ((string *, int)); static int get_count PARAMS ((const char **, int *)); static int consume_count PARAMS ((const char **)); static int consume_count_with_underscores PARAMS ((const char**)); static int demangle_args PARAMS ((struct work_stuff *, const char **, string *)); static int demangle_nested_args PARAMS ((struct work_stuff*, const char**, string*)); static int do_type PARAMS ((struct work_stuff *, const char **, string *)); static int do_arg PARAMS ((struct work_stuff *, const char **, string *)); static void demangle_function_name PARAMS ((struct work_stuff *, const char **, string *, const char *)); static int iterate_demangle_function PARAMS ((struct work_stuff *, const char **, string *, const char *)); static void remember_type PARAMS ((struct work_stuff *, const char *, int)); static void remember_Btype PARAMS ((struct work_stuff *, const char *, int, int)); static int register_Btype PARAMS ((struct work_stuff *)); static void remember_Ktype PARAMS ((struct work_stuff *, const char *, int)); static void forget_types PARAMS ((struct work_stuff *)); static void forget_B_and_K_types PARAMS ((struct work_stuff *)); static void string_prepends PARAMS ((string *, string *)); static int demangle_template_value_parm PARAMS ((struct work_stuff*, const char**, string*, type_kind_t)); static int do_hpacc_template_const_value PARAMS ((struct work_stuff *, const char **, string *)); static int do_hpacc_template_literal PARAMS ((struct work_stuff *, const char **, string *)); static int snarf_numeric_literal PARAMS ((const char **, string *)); /* There is a TYPE_QUAL value for each type qualifier. They can be combined by bitwise-or to form the complete set of qualifiers for a type. */ #define TYPE_UNQUALIFIED 0x0 #define TYPE_QUAL_CONST 0x1 #define TYPE_QUAL_VOLATILE 0x2 #define TYPE_QUAL_RESTRICT 0x4 static int code_for_qualifier PARAMS ((int)); static const char* qualifier_string PARAMS ((int)); static const char* demangle_qualifier PARAMS ((int)); static int demangle_expression PARAMS ((struct work_stuff *, const char **, string *, type_kind_t)); static int demangle_integral_value PARAMS ((struct work_stuff *, const char **, string *)); static int demangle_real_value PARAMS ((struct work_stuff *, const char **, string *)); static void demangle_arm_hp_template PARAMS ((struct work_stuff *, const char **, int, string *)); static void recursively_demangle PARAMS ((struct work_stuff *, const char **, string *, int)); static void grow_vect PARAMS ((char **, size_t *, size_t, int)); /* Translate count to integer, consuming tokens in the process. Conversion terminates on the first non-digit character. Trying to consume something that isn't a count results in no consumption of input and a return of -1. Overflow consumes the rest of the digits, and returns -1. */ static int consume_count (type) const char **type; { int count = 0; if (! ISDIGIT ((unsigned char)**type)) return -1; while (ISDIGIT ((unsigned char)**type)) { count *= 10; /* Check for overflow. We assume that count is represented using two's-complement; no power of two is divisible by ten, so if an overflow occurs when multiplying by ten, the result will not be a multiple of ten. */ if ((count % 10) != 0) { while (ISDIGIT ((unsigned char) **type)) (*type)++; return -1; } count += **type - '0'; (*type)++; } if (count < 0) count = -1; return (count); } /* Like consume_count, but for counts that are preceded and followed by '_' if they are greater than 10. Also, -1 is returned for failure, since 0 can be a valid value. */ static int consume_count_with_underscores (mangled) const char **mangled; { int idx; if (**mangled == '_') { (*mangled)++; if (!ISDIGIT ((unsigned char)**mangled)) return -1; idx = consume_count (mangled); if (**mangled != '_') /* The trailing underscore was missing. */ return -1; (*mangled)++; } else { if (**mangled < '0' || **mangled > '9') return -1; idx = **mangled - '0'; (*mangled)++; } return idx; } /* C is the code for a type-qualifier. Return the TYPE_QUAL corresponding to this qualifier. */ static int code_for_qualifier (c) int c; { switch (c) { case 'C': return TYPE_QUAL_CONST; case 'V': return TYPE_QUAL_VOLATILE; case 'u': return TYPE_QUAL_RESTRICT; default: break; } /* C was an invalid qualifier. */ abort (); } /* Return the string corresponding to the qualifiers given by TYPE_QUALS. */ static const char* qualifier_string (type_quals) int type_quals; { switch (type_quals) { case TYPE_UNQUALIFIED: return ""; case TYPE_QUAL_CONST: return "const"; case TYPE_QUAL_VOLATILE: return "volatile"; case TYPE_QUAL_RESTRICT: return "__restrict"; case TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE: return "const volatile"; case TYPE_QUAL_CONST | TYPE_QUAL_RESTRICT: return "const __restrict"; case TYPE_QUAL_VOLATILE | TYPE_QUAL_RESTRICT: return "volatile __restrict"; case TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE | TYPE_QUAL_RESTRICT: return "const volatile __restrict"; default: break; } /* TYPE_QUALS was an invalid qualifier set. */ abort (); } /* C is the code for a type-qualifier. Return the string corresponding to this qualifier. This function should only be called with a valid qualifier code. */ static const char* demangle_qualifier (c) int c; { return qualifier_string (code_for_qualifier (c)); } int cplus_demangle_opname (opname, result, options) const char *opname; char *result; int options; { int len, len1, ret; string type; struct work_stuff work[1]; const char *tem; len = strlen(opname); result[0] = '\0'; ret = 0; memset ((char *) work, 0, sizeof (work)); work->options = options; if (opname[0] == '_' && opname[1] == '_' && opname[2] == 'o' && opname[3] == 'p') { /* ANSI. */ /* type conversion operator. */ tem = opname + 4; if (do_type (work, &tem, &type)) { strcat (result, "operator "); strncat (result, type.b, type.p - type.b); string_delete (&type); ret = 1; } } else if (opname[0] == '_' && opname[1] == '_' && ISLOWER((unsigned char)opname[2]) && ISLOWER((unsigned char)opname[3])) { if (opname[4] == '\0') { /* Operator. */ size_t i; for (i = 0; i < ARRAY_SIZE (optable); i++) { if (strlen (optable[i].in) == 2 && memcmp (optable[i].in, opname + 2, 2) == 0) { strcat (result, "operator"); strcat (result, optable[i].out); ret = 1; break; } } } else { if (opname[2] == 'a' && opname[5] == '\0') { /* Assignment. */ size_t i; for (i = 0; i < ARRAY_SIZE (optable); i++) { if (strlen (optable[i].in) == 3 && memcmp (optable[i].in, opname + 2, 3) == 0) { strcat (result, "operator"); strcat (result, optable[i].out); ret = 1; break; } } } } } else if (len >= 3 && opname[0] == 'o' && opname[1] == 'p' && strchr (cplus_markers, opname[2]) != NULL) { /* see if it's an assignment expression */ if (len >= 10 /* op$assign_ */ && memcmp (opname + 3, "assign_", 7) == 0) { size_t i; for (i = 0; i < ARRAY_SIZE (optable); i++) { len1 = len - 10; if ((int) strlen (optable[i].in) == len1 && memcmp (optable[i].in, opname + 10, len1) == 0) { strcat (result, "operator"); strcat (result, optable[i].out); strcat (result, "="); ret = 1; break; } } } else { size_t i; for (i = 0; i < ARRAY_SIZE (optable); i++) { len1 = len - 3; if ((int) strlen (optable[i].in) == len1 && memcmp (optable[i].in, opname + 3, len1) == 0) { strcat (result, "operator"); strcat (result, optable[i].out); ret = 1; break; } } } } else if (len >= 5 && memcmp (opname, "type", 4) == 0 && strchr (cplus_markers, opname[4]) != NULL) { /* type conversion operator */ tem = opname + 5; if (do_type (work, &tem, &type)) { strcat (result, "operator "); strncat (result, type.b, type.p - type.b); string_delete (&type); ret = 1; } } squangle_mop_up (work); return ret; } /* Takes operator name as e.g. "++" and returns mangled operator name (e.g. "postincrement_expr"), or NULL if not found. If OPTIONS & DMGL_ANSI == 1, return the ANSI name; if OPTIONS & DMGL_ANSI == 0, return the old GNU name. */ const char * cplus_mangle_opname (opname, options) const char *opname; int options; { size_t i; int len; len = strlen (opname); for (i = 0; i < ARRAY_SIZE (optable); i++) { if ((int) strlen (optable[i].out) == len && (options & DMGL_ANSI) == (optable[i].flags & DMGL_ANSI) && memcmp (optable[i].out, opname, len) == 0) return optable[i].in; } return (0); } /* Add a routine to set the demangling style to be sure it is valid and allow for any demangler initialization that maybe necessary. */ enum demangling_styles cplus_demangle_set_style (style) enum demangling_styles style; { const struct demangler_engine *demangler = libiberty_demanglers; for (; demangler->demangling_style != unknown_demangling; ++demangler) if (style == demangler->demangling_style) { current_demangling_style = style; return current_demangling_style; } return unknown_demangling; } /* Do string name to style translation */ enum demangling_styles cplus_demangle_name_to_style (name) const char *name; { const struct demangler_engine *demangler = libiberty_demanglers; for (; demangler->demangling_style != unknown_demangling; ++demangler) if (strcmp (name, demangler->demangling_style_name) == 0) return demangler->demangling_style; return unknown_demangling; } /* char *cplus_demangle (const char *mangled, int options) If MANGLED is a mangled function name produced by GNU C++, then a pointer to a @code{malloc}ed string giving a C++ representation of the name will be returned; otherwise NULL will be returned. It is the caller's responsibility to free the string which is returned. The OPTIONS arg may contain one or more of the following bits: DMGL_ANSI ANSI qualifiers such as `const' and `void' are included. DMGL_PARAMS Function parameters are included. For example, cplus_demangle ("foo__1Ai", DMGL_PARAMS) => "A::foo(int)" cplus_demangle ("foo__1Ai", DMGL_PARAMS | DMGL_ANSI) => "A::foo(int)" cplus_demangle ("foo__1Ai", 0) => "A::foo" cplus_demangle ("foo__1Afe", DMGL_PARAMS) => "A::foo(float,...)" cplus_demangle ("foo__1Afe", DMGL_PARAMS | DMGL_ANSI)=> "A::foo(float,...)" cplus_demangle ("foo__1Afe", 0) => "A::foo" Note that any leading underscores, or other such characters prepended by the compilation system, are presumed to have already been stripped from MANGLED. */ char * cplus_demangle (mangled, options) const char *mangled; int options; { char *ret; struct work_stuff work[1]; if (current_demangling_style == no_demangling) return xstrdup (mangled); memset ((char *) work, 0, sizeof (work)); work->options = options; if ((work->options & DMGL_STYLE_MASK) == 0) work->options |= (int) current_demangling_style & DMGL_STYLE_MASK; /* The V3 ABI demangling is implemented elsewhere. */ if (GNU_V3_DEMANGLING || AUTO_DEMANGLING) { ret = cplus_demangle_v3 (mangled, work->options); if (ret || GNU_V3_DEMANGLING) return ret; } if (JAVA_DEMANGLING) { ret = java_demangle_v3 (mangled); if (ret) return ret; } if (GNAT_DEMANGLING) return ada_demangle(mangled,options); ret = internal_cplus_demangle (work, mangled); squangle_mop_up (work); return (ret); } /* Assuming *OLD_VECT points to an array of *SIZE objects of size ELEMENT_SIZE, grow it to contain at least MIN_SIZE objects, updating *OLD_VECT and *SIZE as necessary. */ static void grow_vect (old_vect, size, min_size, element_size) char **old_vect; size_t *size; size_t min_size; int element_size; { if (*size < min_size) { *size *= 2; if (*size < min_size) *size = min_size; *old_vect = (void *) xrealloc (*old_vect, *size * element_size); } } /* Demangle ada names: 1. Discard final __{DIGIT}+ or ${DIGIT}+ 2. Convert other instances of embedded "__" to `.'. 3. Discard leading _ada_. 4. Remove everything after first ___ if it is followed by 'X'. 5. Put symbols that should be suppressed in <...> brackets. The resulting string is valid until the next call of ada_demangle. */ static char * ada_demangle (mangled, option) const char *mangled; int option ATTRIBUTE_UNUSED; { int i, j; int len0; const char* p; char *demangled = NULL; int at_start_name; int changed; size_t demangled_size = 0; changed = 0; if (strncmp (mangled, "_ada_", 5) == 0) { mangled += 5; changed = 1; } if (mangled[0] == '_' || mangled[0] == '<') goto Suppress; p = strstr (mangled, "___"); if (p == NULL) len0 = strlen (mangled); else { if (p[3] == 'X') { len0 = p - mangled; changed = 1; } else goto Suppress; } /* Make demangled big enough for possible expansion by operator name. */ grow_vect (&demangled, &demangled_size, 2 * len0 + 1, sizeof (char)); if (ISDIGIT ((unsigned char) mangled[len0 - 1])) { for (i = len0 - 2; i >= 0 && ISDIGIT ((unsigned char) mangled[i]); i -= 1) ; if (i > 1 && mangled[i] == '_' && mangled[i - 1] == '_') { len0 = i - 1; changed = 1; } else if (mangled[i] == '$') { len0 = i; changed = 1; } } for (i = 0, j = 0; i < len0 && ! ISALPHA ((unsigned char)mangled[i]); i += 1, j += 1) demangled[j] = mangled[i]; at_start_name = 1; while (i < len0) { at_start_name = 0; if (i < len0 - 2 && mangled[i] == '_' && mangled[i + 1] == '_') { demangled[j] = '.'; changed = at_start_name = 1; i += 2; j += 1; } else { demangled[j] = mangled[i]; i += 1; j += 1; } } demangled[j] = '\000'; for (i = 0; demangled[i] != '\0'; i += 1) if (ISUPPER ((unsigned char)demangled[i]) || demangled[i] == ' ') goto Suppress; if (! changed) return NULL; else return demangled; Suppress: grow_vect (&demangled, &demangled_size, strlen (mangled) + 3, sizeof (char)); if (mangled[0] == '<') strcpy (demangled, mangled); else sprintf (demangled, "<%s>", mangled); return demangled; } /* This function performs most of what cplus_demangle use to do, but to be able to demangle a name with a B, K or n code, we need to have a longer term memory of what types have been seen. The original now initializes and cleans up the squangle code info, while internal calls go directly to this routine to avoid resetting that info. */ static char * internal_cplus_demangle (work, mangled) struct work_stuff *work; const char *mangled; { string decl; int success = 0; char *demangled = NULL; int s1, s2, s3, s4; s1 = work->constructor; s2 = work->destructor; s3 = work->static_type; s4 = work->type_quals; work->constructor = work->destructor = 0; work->type_quals = TYPE_UNQUALIFIED; work->dllimported = 0; if ((mangled != NULL) && (*mangled != '\0')) { string_init (&decl); /* First check to see if gnu style demangling is active and if the string to be demangled contains a CPLUS_MARKER. If so, attempt to recognize one of the gnu special forms rather than looking for a standard prefix. In particular, don't worry about whether there is a "__" string in the mangled string. Consider "_$_5__foo" for example. */ if ((AUTO_DEMANGLING || GNU_DEMANGLING)) { success = gnu_special (work, &mangled, &decl); } if (!success) { success = demangle_prefix (work, &mangled, &decl); } if (success && (*mangled != '\0')) { success = demangle_signature (work, &mangled, &decl); } if (work->constructor == 2) { string_prepend (&decl, "global constructors keyed to "); work->constructor = 0; } else if (work->destructor == 2) { string_prepend (&decl, "global destructors keyed to "); work->destructor = 0; } else if (work->dllimported == 1) { string_prepend (&decl, "import stub for "); work->dllimported = 0; } demangled = mop_up (work, &decl, success); } work->constructor = s1; work->destructor = s2; work->static_type = s3; work->type_quals = s4; return demangled; } /* Clear out and squangling related storage */ static void squangle_mop_up (work) struct work_stuff *work; { /* clean up the B and K type mangling types. */ forget_B_and_K_types (work); if (work -> btypevec != NULL) { free ((char *) work -> btypevec); } if (work -> ktypevec != NULL) { free ((char *) work -> ktypevec); } } /* Copy the work state and storage. */ static void work_stuff_copy_to_from (to, from) struct work_stuff *to; struct work_stuff *from; { int i; delete_work_stuff (to); /* Shallow-copy scalars. */ memcpy (to, from, sizeof (*to)); /* Deep-copy dynamic storage. */ if (from->typevec_size) to->typevec = (char **) xmalloc (from->typevec_size * sizeof (to->typevec[0])); for (i = 0; i < from->ntypes; i++) { int len = strlen (from->typevec[i]) + 1; to->typevec[i] = xmalloc (len); memcpy (to->typevec[i], from->typevec[i], len); } if (from->ksize) to->ktypevec = (char **) xmalloc (from->ksize * sizeof (to->ktypevec[0])); for (i = 0; i < from->numk; i++) { int len = strlen (from->ktypevec[i]) + 1; to->ktypevec[i] = xmalloc (len); memcpy (to->ktypevec[i], from->ktypevec[i], len); } if (from->bsize) to->btypevec = (char **) xmalloc (from->bsize * sizeof (to->btypevec[0])); for (i = 0; i < from->numb; i++) { int len = strlen (from->btypevec[i]) + 1; to->btypevec[i] = xmalloc (len); memcpy (to->btypevec[i], from->btypevec[i], len); } if (from->ntmpl_args) to->tmpl_argvec = (char **) xmalloc (from->ntmpl_args * sizeof (to->tmpl_argvec[0])); for (i = 0; i < from->ntmpl_args; i++) { int len = strlen (from->tmpl_argvec[i]) + 1; to->tmpl_argvec[i] = xmalloc (len); memcpy (to->tmpl_argvec[i], from->tmpl_argvec[i], len); } if (from->previous_argument) { to->previous_argument = (string*) xmalloc (sizeof (string)); string_init (to->previous_argument); string_appends (to->previous_argument, from->previous_argument); } } /* Delete dynamic stuff in work_stuff that is not to be re-used. */ static void delete_non_B_K_work_stuff (work) struct work_stuff *work; { /* Discard the remembered types, if any. */ forget_types (work); if (work -> typevec != NULL) { free ((char *) work -> typevec); work -> typevec = NULL; work -> typevec_size = 0; } if (work->tmpl_argvec) { int i; for (i = 0; i < work->ntmpl_args; i++) if (work->tmpl_argvec[i]) free ((char*) work->tmpl_argvec[i]); free ((char*) work->tmpl_argvec); work->tmpl_argvec = NULL; } if (work->previous_argument) { string_delete (work->previous_argument); free ((char*) work->previous_argument); work->previous_argument = NULL; } } /* Delete all dynamic storage in work_stuff. */ static void delete_work_stuff (work) struct work_stuff *work; { delete_non_B_K_work_stuff (work); squangle_mop_up (work); } /* Clear out any mangled storage */ static char * mop_up (work, declp, success) struct work_stuff *work; string *declp; int success; { char *demangled = NULL; delete_non_B_K_work_stuff (work); /* If demangling was successful, ensure that the demangled string is null terminated and return it. Otherwise, free the demangling decl. */ if (!success) { string_delete (declp); } else { string_appendn (declp, "", 1); demangled = declp->b; } return (demangled); } /* LOCAL FUNCTION demangle_signature -- demangle the signature part of a mangled name SYNOPSIS static int demangle_signature (struct work_stuff *work, const char **mangled, string *declp); DESCRIPTION Consume and demangle the signature portion of the mangled name. DECLP is the string where demangled output is being built. At entry it contains the demangled root name from the mangled name prefix. I.E. either a demangled operator name or the root function name. In some special cases, it may contain nothing. *MANGLED points to the current unconsumed location in the mangled name. As tokens are consumed and demangling is performed, the pointer is updated to continuously point at the next token to be consumed. Demangling GNU style mangled names is nasty because there is no explicit token that marks the start of the outermost function argument list. */ static int demangle_signature (work, mangled, declp) struct work_stuff *work; const char **mangled; string *declp; { int success = 1; int func_done = 0; int expect_func = 0; int expect_return_type = 0; const char *oldmangled = NULL; string trawname; string tname; while (success && (**mangled != '\0')) { switch (**mangled) { case 'Q': oldmangled = *mangled; success = demangle_qualified (work, mangled, declp, 1, 0); if (success) remember_type (work, oldmangled, *mangled - oldmangled); if (AUTO_DEMANGLING || GNU_DEMANGLING) expect_func = 1; oldmangled = NULL; break; case 'K': oldmangled = *mangled; success = demangle_qualified (work, mangled, declp, 1, 0); if (AUTO_DEMANGLING || GNU_DEMANGLING) { expect_func = 1; } oldmangled = NULL; break; case 'S': /* Static member function */ if (oldmangled == NULL) { oldmangled = *mangled; } (*mangled)++; work -> static_type = 1; break; case 'C': case 'V': case 'u': work->type_quals |= code_for_qualifier (**mangled); /* a qualified member function */ if (oldmangled == NULL) oldmangled = *mangled; (*mangled)++; break; case 'L': /* Local class name follows after "Lnnn_" */ if (HP_DEMANGLING) { while (**mangled && (**mangled != '_')) (*mangled)++; if (!**mangled) success = 0; else (*mangled)++; } else success = 0; break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': if (oldmangled == NULL) { oldmangled = *mangled; } work->temp_start = -1; /* uppermost call to demangle_class */ success = demangle_class (work, mangled, declp); if (success) { remember_type (work, oldmangled, *mangled - oldmangled); } if (AUTO_DEMANGLING || GNU_DEMANGLING || EDG_DEMANGLING) { /* EDG and others will have the "F", so we let the loop cycle if we are looking at one. */ if (**mangled != 'F') expect_func = 1; } oldmangled = NULL; break; case 'B': { string s; success = do_type (work, mangled, &s); if (success) { string_append (&s, SCOPE_STRING (work)); string_prepends (declp, &s); string_delete (&s); } oldmangled = NULL; expect_func = 1; } break; case 'F': /* Function */ /* ARM/HP style demangling includes a specific 'F' character after the class name. For GNU style, it is just implied. So we can safely just consume any 'F' at this point and be compatible with either style. */ oldmangled = NULL; func_done = 1; (*mangled)++; /* For lucid/ARM/HP style we have to forget any types we might have remembered up to this point, since they were not argument types. GNU style considers all types seen as available for back references. See comment in demangle_args() */ if (LUCID_DEMANGLING || ARM_DEMANGLING || HP_DEMANGLING || EDG_DEMANGLING) { forget_types (work); } success = demangle_args (work, mangled, declp); /* After picking off the function args, we expect to either find the function return type (preceded by an '_') or the end of the string. */ if (success && (AUTO_DEMANGLING || EDG_DEMANGLING) && **mangled == '_') { ++(*mangled); /* At this level, we do not care about the return type. */ success = do_type (work, mangled, &tname); string_delete (&tname); } break; case 't': /* G++ Template */ string_init(&trawname); string_init(&tname); if (oldmangled == NULL) { oldmangled = *mangled; } success = demangle_template (work, mangled, &tname, &trawname, 1, 1); if (success) { remember_type (work, oldmangled, *mangled - oldmangled); } string_append (&tname, SCOPE_STRING (work)); string_prepends(declp, &tname); if (work -> destructor & 1) { string_prepend (&trawname, "~"); string_appends (declp, &trawname); work->destructor -= 1; } if ((work->constructor & 1) || (work->destructor & 1)) { string_appends (declp, &trawname); work->constructor -= 1; } string_delete(&trawname); string_delete(&tname); oldmangled = NULL; expect_func = 1; break; case '_': if ((AUTO_DEMANGLING || GNU_DEMANGLING) && expect_return_type) { /* Read the return type. */ string return_type; (*mangled)++; success = do_type (work, mangled, &return_type); APPEND_BLANK (&return_type); string_prepends (declp, &return_type); string_delete (&return_type); break; } else /* At the outermost level, we cannot have a return type specified, so if we run into another '_' at this point we are dealing with a mangled name that is either bogus, or has been mangled by some algorithm we don't know how to deal with. So just reject the entire demangling. */ /* However, "_nnn" is an expected suffix for alternate entry point numbered nnn for a function, with HP aCC, so skip over that without reporting failure. pai/1997-09-04 */ if (HP_DEMANGLING) { (*mangled)++; while (**mangled && ISDIGIT ((unsigned char)**mangled)) (*mangled)++; } else success = 0; break; case 'H': if (AUTO_DEMANGLING || GNU_DEMANGLING) { /* A G++ template function. Read the template arguments. */ success = demangle_template (work, mangled, declp, 0, 0, 0); if (!(work->constructor & 1)) expect_return_type = 1; (*mangled)++; break; } else /* fall through */ {;} default: if (AUTO_DEMANGLING || GNU_DEMANGLING) { /* Assume we have stumbled onto the first outermost function argument token, and start processing args. */ func_done = 1; success = demangle_args (work, mangled, declp); } else { /* Non-GNU demanglers use a specific token to mark the start of the outermost function argument tokens. Typically 'F', for ARM/HP-demangling, for example. So if we find something we are not prepared for, it must be an error. */ success = 0; } break; } /* if (AUTO_DEMANGLING || GNU_DEMANGLING) */ { if (success && expect_func) { func_done = 1; if (LUCID_DEMANGLING || ARM_DEMANGLING || EDG_DEMANGLING) { forget_types (work); } success = demangle_args (work, mangled, declp); /* Since template include the mangling of their return types, we must set expect_func to 0 so that we don't try do demangle more arguments the next time we get here. */ expect_func = 0; } } } if (success && !func_done) { if (AUTO_DEMANGLING || GNU_DEMANGLING) { /* With GNU style demangling, bar__3foo is 'foo::bar(void)', and bar__3fooi is 'foo::bar(int)'. We get here when we find the first case, and need to ensure that the '(void)' gets added to the current declp. Note that with ARM/HP, the first case represents the name of a static data member 'foo::bar', which is in the current declp, so we leave it alone. */ success = demangle_args (work, mangled, declp); } } if (success && PRINT_ARG_TYPES) { if (work->static_type) string_append (declp, " static"); if (work->type_quals != TYPE_UNQUALIFIED) { APPEND_BLANK (declp); string_append (declp, qualifier_string (work->type_quals)); } } return (success); } #if 0 static int demangle_method_args (work, mangled, declp) struct work_stuff *work; const char **mangled; string *declp; { int success = 0; if (work -> static_type) { string_append (declp, *mangled + 1); *mangled += strlen (*mangled); success = 1; } else { success = demangle_args (work, mangled, declp); } return (success); } #endif static int demangle_template_template_parm (work, mangled, tname) struct work_stuff *work; const char **mangled; string *tname; { int i; int r; int need_comma = 0; int success = 1; string temp; string_append (tname, "template <"); /* get size of template parameter list */ if (get_count (mangled, &r)) { for (i = 0; i < r; i++) { if (need_comma) { string_append (tname, ", "); } /* Z for type parameters */ if (**mangled == 'Z') { (*mangled)++; string_append (tname, "class"); } /* z for template parameters */ else if (**mangled == 'z') { (*mangled)++; success = demangle_template_template_parm (work, mangled, tname); if (!success) { break; } } else { /* temp is initialized in do_type */ success = do_type (work, mangled, &temp); if (success) { string_appends (tname, &temp); } string_delete(&temp); if (!success) { break; } } need_comma = 1; } } if (tname->p[-1] == '>') string_append (tname, " "); string_append (tname, "> class"); return (success); } static int demangle_expression (work, mangled, s, tk) struct work_stuff *work; const char** mangled; string* s; type_kind_t tk; { int need_operator = 0; int success; success = 1; string_appendn (s, "(", 1); (*mangled)++; while (success && **mangled != 'W' && **mangled != '\0') { if (need_operator) { size_t i; size_t len; success = 0; len = strlen (*mangled); for (i = 0; i < ARRAY_SIZE (optable); ++i) { size_t l = strlen (optable[i].in); if (l <= len && memcmp (optable[i].in, *mangled, l) == 0) { string_appendn (s, " ", 1); string_append (s, optable[i].out); string_appendn (s, " ", 1); success = 1; (*mangled) += l; break; } } if (!success) break; } else need_operator = 1; success = demangle_template_value_parm (work, mangled, s, tk); } if (**mangled != 'W') success = 0; else { string_appendn (s, ")", 1); (*mangled)++; } return success; } static int demangle_integral_value (work, mangled, s) struct work_stuff *work; const char** mangled; string* s; { int success; if (**mangled == 'E') success = demangle_expression (work, mangled, s, tk_integral); else if (**mangled == 'Q' || **mangled == 'K') success = demangle_qualified (work, mangled, s, 0, 1); else { int value; /* By default, we let the number decide whether we shall consume an underscore. */ int multidigit_without_leading_underscore = 0; int leave_following_underscore = 0; success = 0; if (**mangled == '_') { if (mangled[0][1] == 'm') { /* Since consume_count_with_underscores does not handle the `m'-prefix we must do it here, using consume_count and adjusting underscores: we have to consume the underscore matching the prepended one. */ multidigit_without_leading_underscore = 1; string_appendn (s, "-", 1); (*mangled) += 2; } else { /* Do not consume a following underscore; consume_count_with_underscores will consume what should be consumed. */ leave_following_underscore = 1; } } else { /* Negative numbers are indicated with a leading `m'. */ if (**mangled == 'm') { string_appendn (s, "-", 1); (*mangled)++; } /* Since consume_count_with_underscores does not handle multi-digit numbers that do not start with an underscore, and this number can be an integer template parameter, we have to call consume_count. */ multidigit_without_leading_underscore = 1; /* These multi-digit numbers never end on an underscore, so if there is one then don't eat it. */ leave_following_underscore = 1; } /* We must call consume_count if we expect to remove a trailing underscore, since consume_count_with_underscores expects the leading underscore (that we consumed) if it is to handle multi-digit numbers. */ if (multidigit_without_leading_underscore) value = consume_count (mangled); else value = consume_count_with_underscores (mangled); if (value != -1) { char buf[INTBUF_SIZE]; sprintf (buf, "%d", value); string_append (s, buf); /* Numbers not otherwise delimited, might have an underscore appended as a delimeter, which we should skip. ??? This used to always remove a following underscore, which is wrong. If other (arbitrary) cases are followed by an underscore, we need to do something more radical. */ if ((value > 9 || multidigit_without_leading_underscore) && ! leave_following_underscore && **mangled == '_') (*mangled)++; /* All is well. */ success = 1; } } return success; } /* Demangle the real value in MANGLED. */ static int demangle_real_value (work, mangled, s) struct work_stuff *work; const char **mangled; string* s; { if (**mangled == 'E') return demangle_expression (work, mangled, s, tk_real); if (**mangled == 'm') { string_appendn (s, "-", 1); (*mangled)++; } while (ISDIGIT ((unsigned char)**mangled)) { string_appendn (s, *mangled, 1); (*mangled)++; } if (**mangled == '.') /* fraction */ { string_appendn (s, ".", 1); (*mangled)++; while (ISDIGIT ((unsigned char)**mangled)) { string_appendn (s, *mangled, 1); (*mangled)++; } } if (**mangled == 'e') /* exponent */ { string_appendn (s, "e", 1); (*mangled)++; while (ISDIGIT ((unsigned char)**mangled)) { string_appendn (s, *mangled, 1); (*mangled)++; } } return 1; } static int demangle_template_value_parm (work, mangled, s, tk) struct work_stuff *work; const char **mangled; string* s; type_kind_t tk; { int success = 1; if (**mangled == 'Y') { /* The next argument is a template parameter. */ int idx; (*mangled)++; idx = consume_count_with_underscores (mangled); if (idx == -1 || (work->tmpl_argvec && idx >= work->ntmpl_args) || consume_count_with_underscores (mangled) == -1) return -1; if (work->tmpl_argvec) string_append (s, work->tmpl_argvec[idx]); else string_append_template_idx (s, idx); } else if (tk == tk_integral) success = demangle_integral_value (work, mangled, s); else if (tk == tk_char) { char tmp[2]; int val; if (**mangled == 'm') { string_appendn (s, "-", 1); (*mangled)++; } string_appendn (s, "'", 1); val = consume_count(mangled); if (val <= 0) success = 0; else { tmp[0] = (char)val; tmp[1] = '\0'; string_appendn (s, &tmp[0], 1); string_appendn (s, "'", 1); } } else if (tk == tk_bool) { int val = consume_count (mangled); if (val == 0) string_appendn (s, "false", 5); else if (val == 1) string_appendn (s, "true", 4); else success = 0; } else if (tk == tk_real) success = demangle_real_value (work, mangled, s); else if (tk == tk_pointer || tk == tk_reference) { if (**mangled == 'Q') success = demangle_qualified (work, mangled, s, /*isfuncname=*/0, /*append=*/1); else { int symbol_len = consume_count (mangled); if (symbol_len == -1) return -1; if (symbol_len == 0) string_appendn (s, "0", 1); else { char *p = xmalloc (symbol_len + 1), *q; strncpy (p, *mangled, symbol_len); p [symbol_len] = '\0'; /* We use cplus_demangle here, rather than internal_cplus_demangle, because the name of the entity mangled here does not make use of any of the squangling or type-code information we have built up thus far; it is mangled independently. */ q = cplus_demangle (p, work->options); if (tk == tk_pointer) string_appendn (s, "&", 1); /* FIXME: Pointer-to-member constants should get a qualifying class name here. */ if (q) { string_append (s, q); free (q); } else string_append (s, p); free (p); } *mangled += symbol_len; } } return success; } /* Demangle the template name in MANGLED. The full name of the template (e.g., S) is placed in TNAME. The name without the template parameters (e.g. S) is placed in TRAWNAME if TRAWNAME is non-NULL. If IS_TYPE is nonzero, this template is a type template, not a function template. If both IS_TYPE and REMEMBER are nonzero, the template is remembered in the list of back-referenceable types. */ static int demangle_template (work, mangled, tname, trawname, is_type, remember) struct work_stuff *work; const char **mangled; string *tname; string *trawname; int is_type; int remember; { int i; int r; int need_comma = 0; int success = 0; const char *start; int is_java_array = 0; string temp; (*mangled)++; if (is_type) { start = *mangled; /* get template name */ if (**mangled == 'z') { int idx; (*mangled)++; (*mangled)++; idx = consume_count_with_underscores (mangled); if (idx == -1 || (work->tmpl_argvec && idx >= work->ntmpl_args) || consume_count_with_underscores (mangled) == -1) return (0); if (work->tmpl_argvec) { string_append (tname, work->tmpl_argvec[idx]); if (trawname) string_append (trawname, work->tmpl_argvec[idx]); } else { string_append_template_idx (tname, idx); if (trawname) string_append_template_idx (trawname, idx); } } else { if ((r = consume_count (mangled)) <= 0 || (int) strlen (*mangled) < r) { return (0); } is_java_array = (work -> options & DMGL_JAVA) && strncmp (*mangled, "JArray1Z", 8) == 0; if (! is_java_array) { string_appendn (tname, *mangled, r); } if (trawname) string_appendn (trawname, *mangled, r); *mangled += r; } } if (!is_java_array) string_append (tname, "<"); /* get size of template parameter list */ if (!get_count (mangled, &r)) { return (0); } if (!is_type) { /* Create an array for saving the template argument values. */ work->tmpl_argvec = (char**) xmalloc (r * sizeof (char *)); work->ntmpl_args = r; for (i = 0; i < r; i++) work->tmpl_argvec[i] = 0; } for (i = 0; i < r; i++) { if (need_comma) { string_append (tname, ", "); } /* Z for type parameters */ if (**mangled == 'Z') { (*mangled)++; /* temp is initialized in do_type */ success = do_type (work, mangled, &temp); if (success) { string_appends (tname, &temp); if (!is_type) { /* Save the template argument. */ int len = temp.p - temp.b; work->tmpl_argvec[i] = xmalloc (len + 1); memcpy (work->tmpl_argvec[i], temp.b, len); work->tmpl_argvec[i][len] = '\0'; } } string_delete(&temp); if (!success) { break; } } /* z for template parameters */ else if (**mangled == 'z') { int r2; (*mangled)++; success = demangle_template_template_parm (work, mangled, tname); if (success && (r2 = consume_count (mangled)) > 0 && (int) strlen (*mangled) >= r2) { string_append (tname, " "); string_appendn (tname, *mangled, r2); if (!is_type) { /* Save the template argument. */ int len = r2; work->tmpl_argvec[i] = xmalloc (len + 1); memcpy (work->tmpl_argvec[i], *mangled, len); work->tmpl_argvec[i][len] = '\0'; } *mangled += r2; } if (!success) { break; } } else { string param; string* s; /* otherwise, value parameter */ /* temp is initialized in do_type */ success = do_type (work, mangled, &temp); string_delete(&temp); if (!success) break; if (!is_type) { s = ¶m; string_init (s); } else s = tname; success = demangle_template_value_parm (work, mangled, s, (type_kind_t) success); if (!success) { if (!is_type) string_delete (s); success = 0; break; } if (!is_type) { int len = s->p - s->b; work->tmpl_argvec[i] = xmalloc (len + 1); memcpy (work->tmpl_argvec[i], s->b, len); work->tmpl_argvec[i][len] = '\0'; string_appends (tname, s); string_delete (s); } } need_comma = 1; } if (is_java_array) { string_append (tname, "[]"); } else { if (tname->p[-1] == '>') string_append (tname, " "); string_append (tname, ">"); } if (is_type && remember) { const int bindex = register_Btype (work); remember_Btype (work, tname->b, LEN_STRING (tname), bindex); } /* if (work -> static_type) { string_append (declp, *mangled + 1); *mangled += strlen (*mangled); success = 1; } else { success = demangle_args (work, mangled, declp); } } */ return (success); } static int arm_pt (work, mangled, n, anchor, args) struct work_stuff *work; const char *mangled; int n; const char **anchor, **args; { /* Check if ARM template with "__pt__" in it ("parameterized type") */ /* Allow HP also here, because HP's cfront compiler follows ARM to some extent */ if ((ARM_DEMANGLING || HP_DEMANGLING) && (*anchor = strstr (mangled, "__pt__"))) { int len; *args = *anchor + 6; len = consume_count (args); if (len == -1) return 0; if (*args + len == mangled + n && **args == '_') { ++*args; return 1; } } if (AUTO_DEMANGLING || EDG_DEMANGLING) { if ((*anchor = strstr (mangled, "__tm__")) || (*anchor = strstr (mangled, "__ps__")) || (*anchor = strstr (mangled, "__pt__"))) { int len; *args = *anchor + 6; len = consume_count (args); if (len == -1) return 0; if (*args + len == mangled + n && **args == '_') { ++*args; return 1; } } else if ((*anchor = strstr (mangled, "__S"))) { int len; *args = *anchor + 3; len = consume_count (args); if (len == -1) return 0; if (*args + len == mangled + n && **args == '_') { ++*args; return 1; } } } return 0; } static void demangle_arm_hp_template (work, mangled, n, declp) struct work_stuff *work; const char **mangled; int n; string *declp; { const char *p; const char *args; const char *e = *mangled + n; string arg; /* Check for HP aCC template spec: classXt1t2 where t1, t2 are template args */ if (HP_DEMANGLING && ((*mangled)[n] == 'X')) { char *start_spec_args = NULL; int hold_options; /* First check for and omit template specialization pseudo-arguments, such as in "Spec<#1,#1.*>" */ start_spec_args = strchr (*mangled, '<'); if (start_spec_args && (start_spec_args - *mangled < n)) string_appendn (declp, *mangled, start_spec_args - *mangled); else string_appendn (declp, *mangled, n); (*mangled) += n + 1; string_init (&arg); if (work->temp_start == -1) /* non-recursive call */ work->temp_start = declp->p - declp->b; /* We want to unconditionally demangle parameter types in template parameters. */ hold_options = work->options; work->options |= DMGL_PARAMS; string_append (declp, "<"); while (1) { string_delete (&arg); switch (**mangled) { case 'T': /* 'T' signals a type parameter */ (*mangled)++; if (!do_type (work, mangled, &arg)) goto hpacc_template_args_done; break; case 'U': case 'S': /* 'U' or 'S' signals an integral value */ if (!do_hpacc_template_const_value (work, mangled, &arg)) goto hpacc_template_args_done; break; case 'A': /* 'A' signals a named constant expression (literal) */ if (!do_hpacc_template_literal (work, mangled, &arg)) goto hpacc_template_args_done; break; default: /* Today, 1997-09-03, we have only the above types of template parameters */ /* FIXME: maybe this should fail and return null */ goto hpacc_template_args_done; } string_appends (declp, &arg); /* Check if we're at the end of template args. 0 if at end of static member of template class, _ if done with template args for a function */ if ((**mangled == '\000') || (**mangled == '_')) break; else string_append (declp, ","); } hpacc_template_args_done: string_append (declp, ">"); string_delete (&arg); if (**mangled == '_') (*mangled)++; work->options = hold_options; return; } /* ARM template? (Also handles HP cfront extensions) */ else if (arm_pt (work, *mangled, n, &p, &args)) { int hold_options; string type_str; string_init (&arg); string_appendn (declp, *mangled, p - *mangled); if (work->temp_start == -1) /* non-recursive call */ work->temp_start = declp->p - declp->b; /* We want to unconditionally demangle parameter types in template parameters. */ hold_options = work->options; work->options |= DMGL_PARAMS; string_append (declp, "<"); /* should do error checking here */ while (args < e) { string_delete (&arg); /* Check for type or literal here */ switch (*args) { /* HP cfront extensions to ARM for template args */ /* spec: Xt1Lv1 where t1 is a type, v1 is a literal value */ /* FIXME: We handle only numeric literals for HP cfront */ case 'X': /* A typed constant value follows */ args++; if (!do_type (work, &args, &type_str)) goto cfront_template_args_done; string_append (&arg, "("); string_appends (&arg, &type_str); string_delete (&type_str); string_append (&arg, ")"); if (*args != 'L') goto cfront_template_args_done; args++; /* Now snarf a literal value following 'L' */ if (!snarf_numeric_literal (&args, &arg)) goto cfront_template_args_done; break; case 'L': /* Snarf a literal following 'L' */ args++; if (!snarf_numeric_literal (&args, &arg)) goto cfront_template_args_done; break; default: /* Not handling other HP cfront stuff */ { const char* old_args = args; if (!do_type (work, &args, &arg)) goto cfront_template_args_done; /* Fail if we didn't make any progress: prevent infinite loop. */ if (args == old_args) { work->options = hold_options; return; } } } string_appends (declp, &arg); string_append (declp, ","); } cfront_template_args_done: string_delete (&arg); if (args >= e) --declp->p; /* remove extra comma */ string_append (declp, ">"); work->options = hold_options; } else if (n>10 && strncmp (*mangled, "_GLOBAL_", 8) == 0 && (*mangled)[9] == 'N' && (*mangled)[8] == (*mangled)[10] && strchr (cplus_markers, (*mangled)[8])) { /* A member of the anonymous namespace. */ string_append (declp, "{anonymous}"); } else { if (work->temp_start == -1) /* non-recursive call only */ work->temp_start = 0; /* disable in recursive calls */ string_appendn (declp, *mangled, n); } *mangled += n; } /* Extract a class name, possibly a template with arguments, from the mangled string; qualifiers, local class indicators, etc. have already been dealt with */ static int demangle_class_name (work, mangled, declp) struct work_stuff *work; const char **mangled; string *declp; { int n; int success = 0; n = consume_count (mangled); if (n == -1) return 0; if ((int) strlen (*mangled) >= n) { demangle_arm_hp_template (work, mangled, n, declp); success = 1; } return (success); } /* LOCAL FUNCTION demangle_class -- demangle a mangled class sequence SYNOPSIS static int demangle_class (struct work_stuff *work, const char **mangled, strint *declp) DESCRIPTION DECLP points to the buffer into which demangling is being done. *MANGLED points to the current token to be demangled. On input, it points to a mangled class (I.E. "3foo", "13verylongclass", etc.) On exit, it points to the next token after the mangled class on success, or the first unconsumed token on failure. If the CONSTRUCTOR or DESTRUCTOR flags are set in WORK, then we are demangling a constructor or destructor. In this case we prepend "class::class" or "class::~class" to DECLP. Otherwise, we prepend "class::" to the current DECLP. Reset the constructor/destructor flags once they have been "consumed". This allows demangle_class to be called later during the same demangling, to do normal class demangling. Returns 1 if demangling is successful, 0 otherwise. */ static int demangle_class (work, mangled, declp) struct work_stuff *work; const char **mangled; string *declp; { int success = 0; int btype; string class_name; char *save_class_name_end = 0; string_init (&class_name); btype = register_Btype (work); if (demangle_class_name (work, mangled, &class_name)) { save_class_name_end = class_name.p; if ((work->constructor & 1) || (work->destructor & 1)) { /* adjust so we don't include template args */ if (work->temp_start && (work->temp_start != -1)) { class_name.p = class_name.b + work->temp_start; } string_prepends (declp, &class_name); if (work -> destructor & 1) { string_prepend (declp, "~"); work -> destructor -= 1; } else { work -> constructor -= 1; } } class_name.p = save_class_name_end; remember_Ktype (work, class_name.b, LEN_STRING(&class_name)); remember_Btype (work, class_name.b, LEN_STRING(&class_name), btype); string_prepend (declp, SCOPE_STRING (work)); string_prepends (declp, &class_name); success = 1; } string_delete (&class_name); return (success); } /* Called when there's a "__" in the mangled name, with `scan' pointing to the rightmost guess. Find the correct "__"-sequence where the function name ends and the signature starts, which is ambiguous with GNU mangling. Call demangle_signature here, so we can make sure we found the right one; *mangled will be consumed so caller will not make further calls to demangle_signature. */ static int iterate_demangle_function (work, mangled, declp, scan) struct work_stuff *work; const char **mangled; string *declp; const char *scan; { const char *mangle_init = *mangled; int success = 0; string decl_init; struct work_stuff work_init; if (*(scan + 2) == '\0') return 0; /* Do not iterate for some demangling modes, or if there's only one "__"-sequence. This is the normal case. */ if (ARM_DEMANGLING || LUCID_DEMANGLING || HP_DEMANGLING || EDG_DEMANGLING || strstr (scan + 2, "__") == NULL) { demangle_function_name (work, mangled, declp, scan); return 1; } /* Save state so we can restart if the guess at the correct "__" was wrong. */ string_init (&decl_init); string_appends (&decl_init, declp); memset (&work_init, 0, sizeof work_init); work_stuff_copy_to_from (&work_init, work); /* Iterate over occurrences of __, allowing names and types to have a "__" sequence in them. We must start with the first (not the last) occurrence, since "__" most often occur between independent mangled parts, hence starting at the last occurence inside a signature might get us a "successful" demangling of the signature. */ while (scan[2]) { demangle_function_name (work, mangled, declp, scan); success = demangle_signature (work, mangled, declp); if (success) break; /* Reset demangle state for the next round. */ *mangled = mangle_init; string_clear (declp); string_appends (declp, &decl_init); work_stuff_copy_to_from (work, &work_init); /* Leave this underscore-sequence. */ scan += 2; /* Scan for the next "__" sequence. */ while (*scan && (scan[0] != '_' || scan[1] != '_')) scan++; /* Move to last "__" in this sequence. */ while (*scan && *scan == '_') scan++; scan -= 2; } /* Delete saved state. */ delete_work_stuff (&work_init); string_delete (&decl_init); return success; } /* LOCAL FUNCTION demangle_prefix -- consume the mangled name prefix and find signature SYNOPSIS static int demangle_prefix (struct work_stuff *work, const char **mangled, string *declp); DESCRIPTION Consume and demangle the prefix of the mangled name. While processing the function name root, arrange to call demangle_signature if the root is ambiguous. DECLP points to the string buffer into which demangled output is placed. On entry, the buffer is empty. On exit it contains the root function name, the demangled operator name, or in some special cases either nothing or the completely demangled result. MANGLED points to the current pointer into the mangled name. As each token of the mangled name is consumed, it is updated. Upon entry the current mangled name pointer points to the first character of the mangled name. Upon exit, it should point to the first character of the signature if demangling was successful, or to the first unconsumed character if demangling of the prefix was unsuccessful. Returns 1 on success, 0 otherwise. */ static int demangle_prefix (work, mangled, declp) struct work_stuff *work; const char **mangled; string *declp; { int success = 1; const char *scan; int i; if (strlen(*mangled) > 6 && (strncmp(*mangled, "_imp__", 6) == 0 || strncmp(*mangled, "__imp_", 6) == 0)) { /* it's a symbol imported from a PE dynamic library. Check for both new style prefix _imp__ and legacy __imp_ used by older versions of dlltool. */ (*mangled) += 6; work->dllimported = 1; } else if (strlen(*mangled) >= 11 && strncmp(*mangled, "_GLOBAL_", 8) == 0) { char *marker = strchr (cplus_markers, (*mangled)[8]); if (marker != NULL && *marker == (*mangled)[10]) { if ((*mangled)[9] == 'D') { /* it's a GNU global destructor to be executed at program exit */ (*mangled) += 11; work->destructor = 2; if (gnu_special (work, mangled, declp)) return success; } else if ((*mangled)[9] == 'I') { /* it's a GNU global constructor to be executed at program init */ (*mangled) += 11; work->constructor = 2; if (gnu_special (work, mangled, declp)) return success; } } } else if ((ARM_DEMANGLING || HP_DEMANGLING || EDG_DEMANGLING) && strncmp(*mangled, "__std__", 7) == 0) { /* it's a ARM global destructor to be executed at program exit */ (*mangled) += 7; work->destructor = 2; } else if ((ARM_DEMANGLING || HP_DEMANGLING || EDG_DEMANGLING) && strncmp(*mangled, "__sti__", 7) == 0) { /* it's a ARM global constructor to be executed at program initial */ (*mangled) += 7; work->constructor = 2; } /* This block of code is a reduction in strength time optimization of: scan = strstr (*mangled, "__"); */ { scan = *mangled; do { scan = strchr (scan, '_'); } while (scan != NULL && *++scan != '_'); if (scan != NULL) --scan; } if (scan != NULL) { /* We found a sequence of two or more '_', ensure that we start at the last pair in the sequence. */ i = strspn (scan, "_"); if (i > 2) { scan += (i - 2); } } if (scan == NULL) { success = 0; } else if (work -> static_type) { if (!ISDIGIT ((unsigned char)scan[0]) && (scan[0] != 't')) { success = 0; } } else if ((scan == *mangled) && (ISDIGIT ((unsigned char)scan[2]) || (scan[2] == 'Q') || (scan[2] == 't') || (scan[2] == 'K') || (scan[2] == 'H'))) { /* The ARM says nothing about the mangling of local variables. But cfront mangles local variables by prepending __ to them. As an extension to ARM demangling we handle this case. */ if ((LUCID_DEMANGLING || ARM_DEMANGLING || HP_DEMANGLING) && ISDIGIT ((unsigned char)scan[2])) { *mangled = scan + 2; consume_count (mangled); string_append (declp, *mangled); *mangled += strlen (*mangled); success = 1; } else { /* A GNU style constructor starts with __[0-9Qt]. But cfront uses names like __Q2_3foo3bar for nested type names. So don't accept this style of constructor for cfront demangling. A GNU style member-template constructor starts with 'H'. */ if (!(LUCID_DEMANGLING || ARM_DEMANGLING || HP_DEMANGLING || EDG_DEMANGLING)) work -> constructor += 1; *mangled = scan + 2; } } else if (ARM_DEMANGLING && scan[2] == 'p' && scan[3] == 't') { /* Cfront-style parameterized type. Handled later as a signature. */ success = 1; /* ARM template? */ demangle_arm_hp_template (work, mangled, strlen (*mangled), declp); } else if (EDG_DEMANGLING && ((scan[2] == 't' && scan[3] == 'm') || (scan[2] == 'p' && scan[3] == 's') || (scan[2] == 'p' && scan[3] == 't'))) { /* EDG-style parameterized type. Handled later as a signature. */ success = 1; /* EDG template? */ demangle_arm_hp_template (work, mangled, strlen (*mangled), declp); } else if ((scan == *mangled) && !ISDIGIT ((unsigned char)scan[2]) && (scan[2] != 't')) { /* Mangled name starts with "__". Skip over any leading '_' characters, then find the next "__" that separates the prefix from the signature. */ if (!(ARM_DEMANGLING || LUCID_DEMANGLING || HP_DEMANGLING || EDG_DEMANGLING) || (arm_special (mangled, declp) == 0)) { while (*scan == '_') { scan++; } if ((scan = strstr (scan, "__")) == NULL || (*(scan + 2) == '\0')) { /* No separator (I.E. "__not_mangled"), or empty signature (I.E. "__not_mangled_either__") */ success = 0; } else return iterate_demangle_function (work, mangled, declp, scan); } } else if (*(scan + 2) != '\0') { /* Mangled name does not start with "__" but does have one somewhere in there with non empty stuff after it. Looks like a global function name. Iterate over all "__":s until the right one is found. */ return iterate_demangle_function (work, mangled, declp, scan); } else { /* Doesn't look like a mangled name */ success = 0; } if (!success && (work->constructor == 2 || work->destructor == 2)) { string_append (declp, *mangled); *mangled += strlen (*mangled); success = 1; } return (success); } /* LOCAL FUNCTION gnu_special -- special handling of gnu mangled strings SYNOPSIS static int gnu_special (struct work_stuff *work, const char **mangled, string *declp); DESCRIPTION Process some special GNU style mangling forms that don't fit the normal pattern. For example: _$_3foo (destructor for class foo) _vt$foo (foo virtual table) _vt$foo$bar (foo::bar virtual table) __vt_foo (foo virtual table, new style with thunks) _3foo$varname (static data member) _Q22rs2tu$vw (static data member) __t6vector1Zii (constructor with template) __thunk_4__$_7ostream (virtual function thunk) */ static int gnu_special (work, mangled, declp) struct work_stuff *work; const char **mangled; string *declp; { int n; int success = 1; const char *p; if ((*mangled)[0] == '_' && strchr (cplus_markers, (*mangled)[1]) != NULL && (*mangled)[2] == '_') { /* Found a GNU style destructor, get past "__" */ (*mangled) += 3; work -> destructor += 1; } else if ((*mangled)[0] == '_' && (((*mangled)[1] == '_' && (*mangled)[2] == 'v' && (*mangled)[3] == 't' && (*mangled)[4] == '_') || ((*mangled)[1] == 'v' && (*mangled)[2] == 't' && strchr (cplus_markers, (*mangled)[3]) != NULL))) { /* Found a GNU style virtual table, get past "_vt" and create the decl. Note that we consume the entire mangled input string, which means that demangle_signature has no work to do. */ if ((*mangled)[2] == 'v') (*mangled) += 5; /* New style, with thunks: "__vt_" */ else (*mangled) += 4; /* Old style, no thunks: "_vt" */ while (**mangled != '\0') { switch (**mangled) { case 'Q': case 'K': success = demangle_qualified (work, mangled, declp, 0, 1); break; case 't': success = demangle_template (work, mangled, declp, 0, 1, 1); break; default: if (ISDIGIT((unsigned char)*mangled[0])) { n = consume_count(mangled); /* We may be seeing a too-large size, or else a "." indicating a static local symbol. In any case, declare victory and move on; *don't* try to use n to allocate. */ if (n > (int) strlen (*mangled)) { success = 1; break; } } else { n = strcspn (*mangled, cplus_markers); } string_appendn (declp, *mangled, n); (*mangled) += n; } p = strpbrk (*mangled, cplus_markers); if (success && ((p == NULL) || (p == *mangled))) { if (p != NULL) { string_append (declp, SCOPE_STRING (work)); (*mangled)++; } } else { success = 0; break; } } if (success) string_append (declp, " virtual table"); } else if ((*mangled)[0] == '_' && (strchr("0123456789Qt", (*mangled)[1]) != NULL) && (p = strpbrk (*mangled, cplus_markers)) != NULL) { /* static data member, "_3foo$varname" for example */ (*mangled)++; switch (**mangled) { case 'Q': case 'K': success = demangle_qualified (work, mangled, declp, 0, 1); break; case 't': success = demangle_template (work, mangled, declp, 0, 1, 1); break; default: n = consume_count (mangled); if (n < 0 || n > (long) strlen (*mangled)) { success = 0; break; } if (n > 10 && strncmp (*mangled, "_GLOBAL_", 8) == 0 && (*mangled)[9] == 'N' && (*mangled)[8] == (*mangled)[10] && strchr (cplus_markers, (*mangled)[8])) { /* A member of the anonymous namespace. There's information about what identifier or filename it was keyed to, but it's just there to make the mangled name unique; we just step over it. */ string_append (declp, "{anonymous}"); (*mangled) += n; /* Now p points to the marker before the N, so we need to update it to the first marker after what we consumed. */ p = strpbrk (*mangled, cplus_markers); break; } string_appendn (declp, *mangled, n); (*mangled) += n; } if (success && (p == *mangled)) { /* Consumed everything up to the cplus_marker, append the variable name. */ (*mangled)++; string_append (declp, SCOPE_STRING (work)); n = strlen (*mangled); string_appendn (declp, *mangled, n); (*mangled) += n; } else { success = 0; } } else if (strncmp (*mangled, "__thunk_", 8) == 0) { int delta; (*mangled) += 8; delta = consume_count (mangled); if (delta == -1) success = 0; else { char *method = internal_cplus_demangle (work, ++*mangled); if (method) { char buf[50]; sprintf (buf, "virtual function thunk (delta:%d) for ", -delta); string_append (declp, buf); string_append (declp, method); free (method); n = strlen (*mangled); (*mangled) += n; } else { success = 0; } } } else if (strncmp (*mangled, "__t", 3) == 0 && ((*mangled)[3] == 'i' || (*mangled)[3] == 'f')) { p = (*mangled)[3] == 'i' ? " type_info node" : " type_info function"; (*mangled) += 4; switch (**mangled) { case 'Q': case 'K': success = demangle_qualified (work, mangled, declp, 0, 1); break; case 't': success = demangle_template (work, mangled, declp, 0, 1, 1); break; default: success = do_type (work, mangled, declp); break; } if (success && **mangled != '\0') success = 0; if (success) string_append (declp, p); } else { success = 0; } return (success); } static void recursively_demangle(work, mangled, result, namelength) struct work_stuff *work; const char **mangled; string *result; int namelength; { char * recurse = (char *)NULL; char * recurse_dem = (char *)NULL; recurse = (char *) xmalloc (namelength + 1); memcpy (recurse, *mangled, namelength); recurse[namelength] = '\000'; recurse_dem = cplus_demangle (recurse, work->options); if (recurse_dem) { string_append (result, recurse_dem); free (recurse_dem); } else { string_appendn (result, *mangled, namelength); } free (recurse); *mangled += namelength; } /* LOCAL FUNCTION arm_special -- special handling of ARM/lucid mangled strings SYNOPSIS static int arm_special (const char **mangled, string *declp); DESCRIPTION Process some special ARM style mangling forms that don't fit the normal pattern. For example: __vtbl__3foo (foo virtual table) __vtbl__3foo__3bar (bar::foo virtual table) */ static int arm_special (mangled, declp) const char **mangled; string *declp; { int n; int success = 1; const char *scan; if (strncmp (*mangled, ARM_VTABLE_STRING, ARM_VTABLE_STRLEN) == 0) { /* Found a ARM style virtual table, get past ARM_VTABLE_STRING and create the decl. Note that we consume the entire mangled input string, which means that demangle_signature has no work to do. */ scan = *mangled + ARM_VTABLE_STRLEN; while (*scan != '\0') /* first check it can be demangled */ { n = consume_count (&scan); if (n == -1) { return (0); /* no good */ } scan += n; if (scan[0] == '_' && scan[1] == '_') { scan += 2; } } (*mangled) += ARM_VTABLE_STRLEN; while (**mangled != '\0') { n = consume_count (mangled); if (n == -1 || n > (long) strlen (*mangled)) return 0; string_prependn (declp, *mangled, n); (*mangled) += n; if ((*mangled)[0] == '_' && (*mangled)[1] == '_') { string_prepend (declp, "::"); (*mangled) += 2; } } string_append (declp, " virtual table"); } else { success = 0; } return (success); } /* LOCAL FUNCTION demangle_qualified -- demangle 'Q' qualified name strings SYNOPSIS static int demangle_qualified (struct work_stuff *, const char *mangled, string *result, int isfuncname, int append); DESCRIPTION Demangle a qualified name, such as "Q25Outer5Inner" which is the mangled form of "Outer::Inner". The demangled output is prepended or appended to the result string according to the state of the append flag. If isfuncname is nonzero, then the qualified name we are building is going to be used as a member function name, so if it is a constructor or destructor function, append an appropriate constructor or destructor name. I.E. for the above example, the result for use as a constructor is "Outer::Inner::Inner" and the result for use as a destructor is "Outer::Inner::~Inner". BUGS Numeric conversion is ASCII dependent (FIXME). */ static int demangle_qualified (work, mangled, result, isfuncname, append) struct work_stuff *work; const char **mangled; string *result; int isfuncname; int append; { int qualifiers = 0; int success = 1; char num[2]; string temp; string last_name; int bindex = register_Btype (work); /* We only make use of ISFUNCNAME if the entity is a constructor or destructor. */ isfuncname = (isfuncname && ((work->constructor & 1) || (work->destructor & 1))); string_init (&temp); string_init (&last_name); if ((*mangled)[0] == 'K') { /* Squangling qualified name reuse */ int idx; (*mangled)++; idx = consume_count_with_underscores (mangled); if (idx == -1 || idx >= work -> numk) success = 0; else string_append (&temp, work -> ktypevec[idx]); } else switch ((*mangled)[1]) { case '_': /* GNU mangled name with more than 9 classes. The count is preceded by an underscore (to distinguish it from the <= 9 case) and followed by an underscore. */ (*mangled)++; qualifiers = consume_count_with_underscores (mangled); if (qualifiers == -1) success = 0; break; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': /* The count is in a single digit. */ num[0] = (*mangled)[1]; num[1] = '\0'; qualifiers = atoi (num); /* If there is an underscore after the digit, skip it. This is said to be for ARM-qualified names, but the ARM makes no mention of such an underscore. Perhaps cfront uses one. */ if ((*mangled)[2] == '_') { (*mangled)++; } (*mangled) += 2; break; case '0': default: success = 0; } if (!success) return success; /* Pick off the names and collect them in the temp buffer in the order in which they are found, separated by '::'. */ while (qualifiers-- > 0) { int remember_K = 1; string_clear (&last_name); if (*mangled[0] == '_') (*mangled)++; if (*mangled[0] == 't') { /* Here we always append to TEMP since we will want to use the template name without the template parameters as a constructor or destructor name. The appropriate (parameter-less) value is returned by demangle_template in LAST_NAME. We do not remember the template type here, in order to match the G++ mangling algorithm. */ success = demangle_template(work, mangled, &temp, &last_name, 1, 0); if (!success) break; } else if (*mangled[0] == 'K') { int idx; (*mangled)++; idx = consume_count_with_underscores (mangled); if (idx == -1 || idx >= work->numk) success = 0; else string_append (&temp, work->ktypevec[idx]); remember_K = 0; if (!success) break; } else { if (EDG_DEMANGLING) { int namelength; /* Now recursively demangle the qualifier * This is necessary to deal with templates in * mangling styles like EDG */ namelength = consume_count (mangled); if (namelength == -1) { success = 0; break; } recursively_demangle(work, mangled, &temp, namelength); } else { string_delete (&last_name); success = do_type (work, mangled, &last_name); if (!success) break; string_appends (&temp, &last_name); } } if (remember_K) remember_Ktype (work, temp.b, LEN_STRING (&temp)); if (qualifiers > 0) string_append (&temp, SCOPE_STRING (work)); } remember_Btype (work, temp.b, LEN_STRING (&temp), bindex); /* If we are using the result as a function name, we need to append the appropriate '::' separated constructor or destructor name. We do this here because this is the most convenient place, where we already have a pointer to the name and the length of the name. */ if (isfuncname) { string_append (&temp, SCOPE_STRING (work)); if (work -> destructor & 1) string_append (&temp, "~"); string_appends (&temp, &last_name); } /* Now either prepend the temp buffer to the result, or append it, depending upon the state of the append flag. */ if (append) string_appends (result, &temp); else { if (!STRING_EMPTY (result)) string_append (&temp, SCOPE_STRING (work)); string_prepends (result, &temp); } string_delete (&last_name); string_delete (&temp); return (success); } /* LOCAL FUNCTION get_count -- convert an ascii count to integer, consuming tokens SYNOPSIS static int get_count (const char **type, int *count) DESCRIPTION Assume that *type points at a count in a mangled name; set *count to its value, and set *type to the next character after the count. There are some weird rules in effect here. If *type does not point at a string of digits, return zero. If *type points at a string of digits followed by an underscore, set *count to their value as an integer, advance *type to point *after the underscore, and return 1. If *type points at a string of digits not followed by an underscore, consume only the first digit. Set *count to its value as an integer, leave *type pointing after that digit, and return 1. The excuse for this odd behavior: in the ARM and HP demangling styles, a type can be followed by a repeat count of the form `Nxy', where: `x' is a single digit specifying how many additional copies of the type to append to the argument list, and `y' is one or more digits, specifying the zero-based index of the first repeated argument in the list. Yes, as you're unmangling the name you can figure this out yourself, but it's there anyway. So, for example, in `bar__3fooFPiN51', the first argument is a pointer to an integer (`Pi'), and then the next five arguments are the same (`N5'), and the first repeat is the function's second argument (`1'). */ static int get_count (type, count) const char **type; int *count; { const char *p; int n; if (!ISDIGIT ((unsigned char)**type)) return (0); else { *count = **type - '0'; (*type)++; if (ISDIGIT ((unsigned char)**type)) { p = *type; n = *count; do { n *= 10; n += *p - '0'; p++; } while (ISDIGIT ((unsigned char)*p)); if (*p == '_') { *type = p + 1; *count = n; } } } return (1); } /* RESULT will be initialised here; it will be freed on failure. The value returned is really a type_kind_t. */ static int do_type (work, mangled, result) struct work_stuff *work; const char **mangled; string *result; { int n; int done; int success; string decl; const char *remembered_type; int type_quals; type_kind_t tk = tk_none; string_init (&decl); string_init (result); done = 0; success = 1; while (success && !done) { int member; switch (**mangled) { /* A pointer type */ case 'P': case 'p': (*mangled)++; if (! (work -> options & DMGL_JAVA)) string_prepend (&decl, "*"); if (tk == tk_none) tk = tk_pointer; break; /* A reference type */ case 'R': (*mangled)++; string_prepend (&decl, "&"); if (tk == tk_none) tk = tk_reference; break; /* An array */ case 'A': { ++(*mangled); if (!STRING_EMPTY (&decl) && (decl.b[0] == '*' || decl.b[0] == '&')) { string_prepend (&decl, "("); string_append (&decl, ")"); } string_append (&decl, "["); if (**mangled != '_') success = demangle_template_value_parm (work, mangled, &decl, tk_integral); if (**mangled == '_') ++(*mangled); string_append (&decl, "]"); break; } /* A back reference to a previously seen type */ case 'T': (*mangled)++; if (!get_count (mangled, &n) || n >= work -> ntypes) { success = 0; } else { remembered_type = work -> typevec[n]; mangled = &remembered_type; } break; /* A function */ case 'F': (*mangled)++; if (!STRING_EMPTY (&decl) && (decl.b[0] == '*' || decl.b[0] == '&')) { string_prepend (&decl, "("); string_append (&decl, ")"); } /* After picking off the function args, we expect to either find the function return type (preceded by an '_') or the end of the string. */ if (!demangle_nested_args (work, mangled, &decl) || (**mangled != '_' && **mangled != '\0')) { success = 0; break; } if (success && (**mangled == '_')) (*mangled)++; break; case 'M': case 'O': { type_quals = TYPE_UNQUALIFIED; member = **mangled == 'M'; (*mangled)++; string_append (&decl, ")"); /* We don't need to prepend `::' for a qualified name; demangle_qualified will do that for us. */ if (**mangled != 'Q') string_prepend (&decl, SCOPE_STRING (work)); if (ISDIGIT ((unsigned char)**mangled)) { n = consume_count (mangled); if (n == -1 || (int) strlen (*mangled) < n) { success = 0; break; } string_prependn (&decl, *mangled, n); *mangled += n; } else if (**mangled == 'X' || **mangled == 'Y') { string temp; do_type (work, mangled, &temp); string_prepends (&decl, &temp); string_delete (&temp); } else if (**mangled == 't') { string temp; string_init (&temp); success = demangle_template (work, mangled, &temp, NULL, 1, 1); if (success) { string_prependn (&decl, temp.b, temp.p - temp.b); string_delete (&temp); } else break; } else if (**mangled == 'Q') { success = demangle_qualified (work, mangled, &decl, /*isfuncnam=*/0, /*append=*/0); if (!success) break; } else { success = 0; break; } string_prepend (&decl, "("); if (member) { switch (**mangled) { case 'C': case 'V': case 'u': type_quals |= code_for_qualifier (**mangled); (*mangled)++; break; default: break; } if (*(*mangled)++ != 'F') { success = 0; break; } } if ((member && !demangle_nested_args (work, mangled, &decl)) || **mangled != '_') { success = 0; break; } (*mangled)++; if (! PRINT_ANSI_QUALIFIERS) { break; } if (type_quals != TYPE_UNQUALIFIED) { APPEND_BLANK (&decl); string_append (&decl, qualifier_string (type_quals)); } break; } case 'G': (*mangled)++; break; case 'C': case 'V': case 'u': if (PRINT_ANSI_QUALIFIERS) { if (!STRING_EMPTY (&decl)) string_prepend (&decl, " "); string_prepend (&decl, demangle_qualifier (**mangled)); } (*mangled)++; break; /* } */ /* fall through */ default: done = 1; break; } } if (success) switch (**mangled) { /* A qualified name, such as "Outer::Inner". */ case 'Q': case 'K': { success = demangle_qualified (work, mangled, result, 0, 1); break; } /* A back reference to a previously seen squangled type */ case 'B': (*mangled)++; if (!get_count (mangled, &n) || n >= work -> numb) success = 0; else string_append (result, work->btypevec[n]); break; case 'X': case 'Y': /* A template parm. We substitute the corresponding argument. */ { int idx; (*mangled)++; idx = consume_count_with_underscores (mangled); if (idx == -1 || (work->tmpl_argvec && idx >= work->ntmpl_args) || consume_count_with_underscores (mangled) == -1) { success = 0; break; } if (work->tmpl_argvec) string_append (result, work->tmpl_argvec[idx]); else string_append_template_idx (result, idx); success = 1; } break; default: success = demangle_fund_type (work, mangled, result); if (tk == tk_none) tk = (type_kind_t) success; break; } if (success) { if (!STRING_EMPTY (&decl)) { string_append (result, " "); string_appends (result, &decl); } } else string_delete (result); string_delete (&decl); if (success) /* Assume an integral type, if we're not sure. */ return (int) ((tk == tk_none) ? tk_integral : tk); else return 0; } /* Given a pointer to a type string that represents a fundamental type argument (int, long, unsigned int, etc) in TYPE, a pointer to the string in which the demangled output is being built in RESULT, and the WORK structure, decode the types and add them to the result. For example: "Ci" => "const int" "Sl" => "signed long" "CUs" => "const unsigned short" The value returned is really a type_kind_t. */ static int demangle_fund_type (work, mangled, result) struct work_stuff *work; const char **mangled; string *result; { int done = 0; int success = 1; char buf[10]; unsigned int dec = 0; type_kind_t tk = tk_integral; /* First pick off any type qualifiers. There can be more than one. */ while (!done) { switch (**mangled) { case 'C': case 'V': case 'u': if (PRINT_ANSI_QUALIFIERS) { if (!STRING_EMPTY (result)) string_prepend (result, " "); string_prepend (result, demangle_qualifier (**mangled)); } (*mangled)++; break; case 'U': (*mangled)++; APPEND_BLANK (result); string_append (result, "unsigned"); break; case 'S': /* signed char only */ (*mangled)++; APPEND_BLANK (result); string_append (result, "signed"); break; case 'J': (*mangled)++; APPEND_BLANK (result); string_append (result, "__complex"); break; default: done = 1; break; } } /* Now pick off the fundamental type. There can be only one. */ switch (**mangled) { case '\0': case '_': break; case 'v': (*mangled)++; APPEND_BLANK (result); string_append (result, "void"); break; case 'x': (*mangled)++; APPEND_BLANK (result); string_append (result, "long long"); break; case 'l': (*mangled)++; APPEND_BLANK (result); string_append (result, "long"); break; case 'i': (*mangled)++; APPEND_BLANK (result); string_append (result, "int"); break; case 's': (*mangled)++; APPEND_BLANK (result); string_append (result, "short"); break; case 'b': (*mangled)++; APPEND_BLANK (result); string_append (result, "bool"); tk = tk_bool; break; case 'c': (*mangled)++; APPEND_BLANK (result); string_append (result, "char"); tk = tk_char; break; case 'w': (*mangled)++; APPEND_BLANK (result); string_append (result, "wchar_t"); tk = tk_char; break; case 'r': (*mangled)++; APPEND_BLANK (result); string_append (result, "long double"); tk = tk_real; break; case 'd': (*mangled)++; APPEND_BLANK (result); string_append (result, "double"); tk = tk_real; break; case 'f': (*mangled)++; APPEND_BLANK (result); string_append (result, "float"); tk = tk_real; break; case 'G': (*mangled)++; if (!ISDIGIT ((unsigned char)**mangled)) { success = 0; break; } case 'I': (*mangled)++; if (**mangled == '_') { int i; (*mangled)++; for (i = 0; i < (long) sizeof (buf) - 1 && **mangled && **mangled != '_'; (*mangled)++, i++) buf[i] = **mangled; if (**mangled != '_') { success = 0; break; } buf[i] = '\0'; (*mangled)++; } else { strncpy (buf, *mangled, 2); buf[2] = '\0'; *mangled += min (strlen (*mangled), 2); } sscanf (buf, "%x", &dec); sprintf (buf, "int%u_t", dec); APPEND_BLANK (result); string_append (result, buf); break; /* fall through */ /* An explicit type, such as "6mytype" or "7integer" */ case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { int bindex = register_Btype (work); string btype; string_init (&btype); if (demangle_class_name (work, mangled, &btype)) { remember_Btype (work, btype.b, LEN_STRING (&btype), bindex); APPEND_BLANK (result); string_appends (result, &btype); } else success = 0; string_delete (&btype); break; } case 't': { string btype; string_init (&btype); success = demangle_template (work, mangled, &btype, 0, 1, 1); string_appends (result, &btype); string_delete (&btype); break; } default: success = 0; break; } return success ? ((int) tk) : 0; } /* Handle a template's value parameter for HP aCC (extension from ARM) **mangled points to 'S' or 'U' */ static int do_hpacc_template_const_value (work, mangled, result) struct work_stuff *work ATTRIBUTE_UNUSED; const char **mangled; string *result; { int unsigned_const; if (**mangled != 'U' && **mangled != 'S') return 0; unsigned_const = (**mangled == 'U'); (*mangled)++; switch (**mangled) { case 'N': string_append (result, "-"); /* fall through */ case 'P': (*mangled)++; break; case 'M': /* special case for -2^31 */ string_append (result, "-2147483648"); (*mangled)++; return 1; default: return 0; } /* We have to be looking at an integer now */ if (!(ISDIGIT ((unsigned char)**mangled))) return 0; /* We only deal with integral values for template parameters -- so it's OK to look only for digits */ while (ISDIGIT ((unsigned char)**mangled)) { char_str[0] = **mangled; string_append (result, char_str); (*mangled)++; } if (unsigned_const) string_append (result, "U"); /* FIXME? Some day we may have 64-bit (or larger :-) ) constants with L or LL suffixes. pai/1997-09-03 */ return 1; /* success */ } /* Handle a template's literal parameter for HP aCC (extension from ARM) **mangled is pointing to the 'A' */ static int do_hpacc_template_literal (work, mangled, result) struct work_stuff *work; const char **mangled; string *result; { int literal_len = 0; char * recurse; char * recurse_dem; if (**mangled != 'A') return 0; (*mangled)++; literal_len = consume_count (mangled); if (literal_len <= 0) return 0; /* Literal parameters are names of arrays, functions, etc. and the canonical representation uses the address operator */ string_append (result, "&"); /* Now recursively demangle the literal name */ recurse = (char *) xmalloc (literal_len + 1); memcpy (recurse, *mangled, literal_len); recurse[literal_len] = '\000'; recurse_dem = cplus_demangle (recurse, work->options); if (recurse_dem) { string_append (result, recurse_dem); free (recurse_dem); } else { string_appendn (result, *mangled, literal_len); } (*mangled) += literal_len; free (recurse); return 1; } static int snarf_numeric_literal (args, arg) const char ** args; string * arg; { if (**args == '-') { char_str[0] = '-'; string_append (arg, char_str); (*args)++; } else if (**args == '+') (*args)++; if (!ISDIGIT ((unsigned char)**args)) return 0; while (ISDIGIT ((unsigned char)**args)) { char_str[0] = **args; string_append (arg, char_str); (*args)++; } return 1; } /* Demangle the next argument, given by MANGLED into RESULT, which *should be an uninitialized* string. It will be initialized here, and free'd should anything go wrong. */ static int do_arg (work, mangled, result) struct work_stuff *work; const char **mangled; string *result; { /* Remember where we started so that we can record the type, for non-squangling type remembering. */ const char *start = *mangled; string_init (result); if (work->nrepeats > 0) { --work->nrepeats; if (work->previous_argument == 0) return 0; /* We want to reissue the previous type in this argument list. */ string_appends (result, work->previous_argument); return 1; } if (**mangled == 'n') { /* A squangling-style repeat. */ (*mangled)++; work->nrepeats = consume_count(mangled); if (work->nrepeats <= 0) /* This was not a repeat count after all. */ return 0; if (work->nrepeats > 9) { if (**mangled != '_') /* The repeat count should be followed by an '_' in this case. */ return 0; else (*mangled)++; } /* Now, the repeat is all set up. */ return do_arg (work, mangled, result); } /* Save the result in WORK->previous_argument so that we can find it if it's repeated. Note that saving START is not good enough: we do not want to add additional types to the back-referenceable type vector when processing a repeated type. */ if (work->previous_argument) string_delete (work->previous_argument); else work->previous_argument = (string*) xmalloc (sizeof (string)); if (!do_type (work, mangled, work->previous_argument)) return 0; string_appends (result, work->previous_argument); remember_type (work, start, *mangled - start); return 1; } static void remember_type (work, start, len) struct work_stuff *work; const char *start; int len; { char *tem; if (work->forgetting_types) return; if (work -> ntypes >= work -> typevec_size) { if (work -> typevec_size == 0) { work -> typevec_size = 3; work -> typevec = (char **) xmalloc (sizeof (char *) * work -> typevec_size); } else { work -> typevec_size *= 2; work -> typevec = (char **) xrealloc ((char *)work -> typevec, sizeof (char *) * work -> typevec_size); } } tem = xmalloc (len + 1); memcpy (tem, start, len); tem[len] = '\0'; work -> typevec[work -> ntypes++] = tem; } /* Remember a K type class qualifier. */ static void remember_Ktype (work, start, len) struct work_stuff *work; const char *start; int len; { char *tem; if (work -> numk >= work -> ksize) { if (work -> ksize == 0) { work -> ksize = 5; work -> ktypevec = (char **) xmalloc (sizeof (char *) * work -> ksize); } else { work -> ksize *= 2; work -> ktypevec = (char **) xrealloc ((char *)work -> ktypevec, sizeof (char *) * work -> ksize); } } tem = xmalloc (len + 1); memcpy (tem, start, len); tem[len] = '\0'; work -> ktypevec[work -> numk++] = tem; } /* Register a B code, and get an index for it. B codes are registered as they are seen, rather than as they are completed, so map > registers map > as B0, and temp as B1 */ static int register_Btype (work) struct work_stuff *work; { int ret; if (work -> numb >= work -> bsize) { if (work -> bsize == 0) { work -> bsize = 5; work -> btypevec = (char **) xmalloc (sizeof (char *) * work -> bsize); } else { work -> bsize *= 2; work -> btypevec = (char **) xrealloc ((char *)work -> btypevec, sizeof (char *) * work -> bsize); } } ret = work -> numb++; work -> btypevec[ret] = NULL; return(ret); } /* Store a value into a previously registered B code type. */ static void remember_Btype (work, start, len, index) struct work_stuff *work; const char *start; int len, index; { char *tem; tem = xmalloc (len + 1); memcpy (tem, start, len); tem[len] = '\0'; work -> btypevec[index] = tem; } /* Lose all the info related to B and K type codes. */ static void forget_B_and_K_types (work) struct work_stuff *work; { int i; while (work -> numk > 0) { i = --(work -> numk); if (work -> ktypevec[i] != NULL) { free (work -> ktypevec[i]); work -> ktypevec[i] = NULL; } } while (work -> numb > 0) { i = --(work -> numb); if (work -> btypevec[i] != NULL) { free (work -> btypevec[i]); work -> btypevec[i] = NULL; } } } /* Forget the remembered types, but not the type vector itself. */ static void forget_types (work) struct work_stuff *work; { int i; while (work -> ntypes > 0) { i = --(work -> ntypes); if (work -> typevec[i] != NULL) { free (work -> typevec[i]); work -> typevec[i] = NULL; } } } /* Process the argument list part of the signature, after any class spec has been consumed, as well as the first 'F' character (if any). For example: "__als__3fooRT0" => process "RT0" "complexfunc5__FPFPc_PFl_i" => process "PFPc_PFl_i" DECLP must be already initialised, usually non-empty. It won't be freed on failure. Note that g++ differs significantly from ARM and lucid style mangling with regards to references to previously seen types. For example, given the source fragment: class foo { public: foo::foo (int, foo &ia, int, foo &ib, int, foo &ic); }; foo::foo (int, foo &ia, int, foo &ib, int, foo &ic) { ia = ib = ic; } void foo (int, foo &ia, int, foo &ib, int, foo &ic) { ia = ib = ic; } g++ produces the names: __3fooiRT0iT2iT2 foo__FiR3fooiT1iT1 while lcc (and presumably other ARM style compilers as well) produces: foo__FiR3fooT1T2T1T2 __ct__3fooFiR3fooT1T2T1T2 Note that g++ bases its type numbers starting at zero and counts all previously seen types, while lucid/ARM bases its type numbers starting at one and only considers types after it has seen the 'F' character indicating the start of the function args. For lucid/ARM style, we account for this difference by discarding any previously seen types when we see the 'F' character, and subtracting one from the type number reference. */ static int demangle_args (work, mangled, declp) struct work_stuff *work; const char **mangled; string *declp; { string arg; int need_comma = 0; int r; int t; const char *tem; char temptype; if (PRINT_ARG_TYPES) { string_append (declp, "("); if (**mangled == '\0') { string_append (declp, "void"); } } while ((**mangled != '_' && **mangled != '\0' && **mangled != 'e') || work->nrepeats > 0) { if ((**mangled == 'N') || (**mangled == 'T')) { temptype = *(*mangled)++; if (temptype == 'N') { if (!get_count (mangled, &r)) { return (0); } } else { r = 1; } if ((HP_DEMANGLING || ARM_DEMANGLING || EDG_DEMANGLING) && work -> ntypes >= 10) { /* If we have 10 or more types we might have more than a 1 digit index so we'll have to consume the whole count here. This will lose if the next thing is a type name preceded by a count but it's impossible to demangle that case properly anyway. Eg if we already have 12 types is T12Pc "(..., type1, Pc, ...)" or "(..., type12, char *, ...)" */ if ((t = consume_count(mangled)) <= 0) { return (0); } } else { if (!get_count (mangled, &t)) { return (0); } } if (LUCID_DEMANGLING || ARM_DEMANGLING || HP_DEMANGLING || EDG_DEMANGLING) { t--; } /* Validate the type index. Protect against illegal indices from malformed type strings. */ if ((t < 0) || (t >= work -> ntypes)) { return (0); } while (work->nrepeats > 0 || --r >= 0) { tem = work -> typevec[t]; if (need_comma && PRINT_ARG_TYPES) { string_append (declp, ", "); } if (!do_arg (work, &tem, &arg)) { return (0); } if (PRINT_ARG_TYPES) { string_appends (declp, &arg); } string_delete (&arg); need_comma = 1; } } else { if (need_comma && PRINT_ARG_TYPES) string_append (declp, ", "); if (!do_arg (work, mangled, &arg)) return (0); if (PRINT_ARG_TYPES) string_appends (declp, &arg); string_delete (&arg); need_comma = 1; } } if (**mangled == 'e') { (*mangled)++; if (PRINT_ARG_TYPES) { if (need_comma) { string_append (declp, ","); } string_append (declp, "..."); } } if (PRINT_ARG_TYPES) { string_append (declp, ")"); } return (1); } /* Like demangle_args, but for demangling the argument lists of function and method pointers or references, not top-level declarations. */ static int demangle_nested_args (work, mangled, declp) struct work_stuff *work; const char **mangled; string *declp; { string* saved_previous_argument; int result; int saved_nrepeats; /* The G++ name-mangling algorithm does not remember types on nested argument lists, unless -fsquangling is used, and in that case the type vector updated by remember_type is not used. So, we turn off remembering of types here. */ ++work->forgetting_types; /* For the repeat codes used with -fsquangling, we must keep track of the last argument. */ saved_previous_argument = work->previous_argument; saved_nrepeats = work->nrepeats; work->previous_argument = 0; work->nrepeats = 0; /* Actually demangle the arguments. */ result = demangle_args (work, mangled, declp); /* Restore the previous_argument field. */ if (work->previous_argument) { string_delete (work->previous_argument); free ((char *) work->previous_argument); } work->previous_argument = saved_previous_argument; --work->forgetting_types; work->nrepeats = saved_nrepeats; return result; } static void demangle_function_name (work, mangled, declp, scan) struct work_stuff *work; const char **mangled; string *declp; const char *scan; { size_t i; string type; const char *tem; string_appendn (declp, (*mangled), scan - (*mangled)); string_need (declp, 1); *(declp -> p) = '\0'; /* Consume the function name, including the "__" separating the name from the signature. We are guaranteed that SCAN points to the separator. */ (*mangled) = scan + 2; /* We may be looking at an instantiation of a template function: foo__Xt1t2_Ft3t4, where t1, t2, ... are template arguments and a following _F marks the start of the function arguments. Handle the template arguments first. */ if (HP_DEMANGLING && (**mangled == 'X')) { demangle_arm_hp_template (work, mangled, 0, declp); /* This leaves MANGLED pointing to the 'F' marking func args */ } if (LUCID_DEMANGLING || ARM_DEMANGLING || HP_DEMANGLING || EDG_DEMANGLING) { /* See if we have an ARM style constructor or destructor operator. If so, then just record it, clear the decl, and return. We can't build the actual constructor/destructor decl until later, when we recover the class name from the signature. */ if (strcmp (declp -> b, "__ct") == 0) { work -> constructor += 1; string_clear (declp); return; } else if (strcmp (declp -> b, "__dt") == 0) { work -> destructor += 1; string_clear (declp); return; } } if (declp->p - declp->b >= 3 && declp->b[0] == 'o' && declp->b[1] == 'p' && strchr (cplus_markers, declp->b[2]) != NULL) { /* see if it's an assignment expression */ if (declp->p - declp->b >= 10 /* op$assign_ */ && memcmp (declp->b + 3, "assign_", 7) == 0) { for (i = 0; i < ARRAY_SIZE (optable); i++) { int len = declp->p - declp->b - 10; if ((int) strlen (optable[i].in) == len && memcmp (optable[i].in, declp->b + 10, len) == 0) { string_clear (declp); string_append (declp, "operator"); string_append (declp, optable[i].out); string_append (declp, "="); break; } } } else { for (i = 0; i < ARRAY_SIZE (optable); i++) { int len = declp->p - declp->b - 3; if ((int) strlen (optable[i].in) == len && memcmp (optable[i].in, declp->b + 3, len) == 0) { string_clear (declp); string_append (declp, "operator"); string_append (declp, optable[i].out); break; } } } } else if (declp->p - declp->b >= 5 && memcmp (declp->b, "type", 4) == 0 && strchr (cplus_markers, declp->b[4]) != NULL) { /* type conversion operator */ tem = declp->b + 5; if (do_type (work, &tem, &type)) { string_clear (declp); string_append (declp, "operator "); string_appends (declp, &type); string_delete (&type); } } else if (declp->b[0] == '_' && declp->b[1] == '_' && declp->b[2] == 'o' && declp->b[3] == 'p') { /* ANSI. */ /* type conversion operator. */ tem = declp->b + 4; if (do_type (work, &tem, &type)) { string_clear (declp); string_append (declp, "operator "); string_appends (declp, &type); string_delete (&type); } } else if (declp->b[0] == '_' && declp->b[1] == '_' && ISLOWER((unsigned char)declp->b[2]) && ISLOWER((unsigned char)declp->b[3])) { if (declp->b[4] == '\0') { /* Operator. */ for (i = 0; i < ARRAY_SIZE (optable); i++) { if (strlen (optable[i].in) == 2 && memcmp (optable[i].in, declp->b + 2, 2) == 0) { string_clear (declp); string_append (declp, "operator"); string_append (declp, optable[i].out); break; } } } else { if (declp->b[2] == 'a' && declp->b[5] == '\0') { /* Assignment. */ for (i = 0; i < ARRAY_SIZE (optable); i++) { if (strlen (optable[i].in) == 3 && memcmp (optable[i].in, declp->b + 2, 3) == 0) { string_clear (declp); string_append (declp, "operator"); string_append (declp, optable[i].out); break; } } } } } } /* a mini string-handling package */ static void string_need (s, n) string *s; int n; { int tem; if (s->b == NULL) { if (n < 32) { n = 32; } s->p = s->b = xmalloc (n); s->e = s->b + n; } else if (s->e - s->p < n) { tem = s->p - s->b; n += tem; n *= 2; s->b = xrealloc (s->b, n); s->p = s->b + tem; s->e = s->b + n; } } static void string_delete (s) string *s; { if (s->b != NULL) { free (s->b); s->b = s->e = s->p = NULL; } } static void string_init (s) string *s; { s->b = s->p = s->e = NULL; } static void string_clear (s) string *s; { s->p = s->b; } #if 0 static int string_empty (s) string *s; { return (s->b == s->p); } #endif static void string_append (p, s) string *p; const char *s; { int n; if (s == NULL || *s == '\0') return; n = strlen (s); string_need (p, n); memcpy (p->p, s, n); p->p += n; } static void string_appends (p, s) string *p, *s; { int n; if (s->b != s->p) { n = s->p - s->b; string_need (p, n); memcpy (p->p, s->b, n); p->p += n; } } static void string_appendn (p, s, n) string *p; const char *s; int n; { if (n != 0) { string_need (p, n); memcpy (p->p, s, n); p->p += n; } } static void string_prepend (p, s) string *p; const char *s; { if (s != NULL && *s != '\0') { string_prependn (p, s, strlen (s)); } } static void string_prepends (p, s) string *p, *s; { if (s->b != s->p) { string_prependn (p, s->b, s->p - s->b); } } static void string_prependn (p, s, n) string *p; const char *s; int n; { char *q; if (n != 0) { string_need (p, n); for (q = p->p - 1; q >= p->b; q--) { q[n] = q[0]; } memcpy (p->b, s, n); p->p += n; } } static void string_append_template_idx (s, idx) string *s; int idx; { char buf[INTBUF_SIZE + 1 /* 'T' */]; sprintf(buf, "T%d", idx); string_append (s, buf); } /* Demangler for g++ V3 ABI. Copyright (C) 2003, 2004 Free Software Foundation, Inc. Written by Ian Lance Taylor . This file is part of the libiberty library, which is part of GCC. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. In addition to the permissions in the GNU General Public License, the Free Software Foundation gives you unlimited permission to link the compiled version of this file into combinations with other programs, and to distribute those combinations without any restriction coming from the use of this file. (The General Public License restrictions do apply in other respects; for example, they cover modification of the file, and distribution when not linked into a combined executable.) This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This code implements a demangler for the g++ V3 ABI. The ABI is described on this web page: http://www.codesourcery.com/cxx-abi/abi.html#mangling This code was written while looking at the demangler written by Alex Samuel . This code first pulls the mangled name apart into a list of components, and then walks the list generating the demangled name. This file will normally define the following functions, q.v.: char *cplus_demangle_v3(const char *mangled, int options) char *java_demangle_v3(const char *mangled) enum gnu_v3_ctor_kinds is_gnu_v3_mangled_ctor (const char *name) enum gnu_v3_dtor_kinds is_gnu_v3_mangled_dtor (const char *name) Also, the interface to the component list is public, and defined in demangle.h. The interface consists of these types, which are defined in demangle.h: enum demangle_component_type struct demangle_component and these functions defined in this file: cplus_demangle_fill_name cplus_demangle_fill_extended_operator cplus_demangle_fill_ctor cplus_demangle_fill_dtor cplus_demangle_print and other functions defined in the file cp-demint.c. This file also defines some other functions and variables which are only to be used by the file cp-demint.c. Preprocessor macros you can define while compiling this file: IN_LIBGCC2 If defined, this file defines the following function, q.v.: char *__cxa_demangle (const char *mangled, char *buf, size_t *len, int *status) instead of cplus_demangle_v3() and java_demangle_v3(). IN_GLIBCPP_V3 If defined, this file defines only __cxa_demangle(), and no other publically visible functions or variables. STANDALONE_DEMANGLER If defined, this file defines a main() function which demangles any arguments, or, if none, demangles stdin. CP_DEMANGLE_DEBUG If defined, turns on debugging mode, which prints information on stdout about the mangled string. This is not generally useful. */ #ifdef HAVE_CONFIG_H #endif #include #ifdef HAVE_STDLIB_H #include #endif #ifdef HAVE_STRING_H #include #endif /* Internal demangler interface for g++ V3 ABI. Copyright (C) 2003, 2004 Free Software Foundation, Inc. Written by Ian Lance Taylor . This file is part of the libiberty library, which is part of GCC. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. In addition to the permissions in the GNU General Public License, the Free Software Foundation gives you unlimited permission to link the compiled version of this file into combinations with other programs, and to distribute those combinations without any restriction coming from the use of this file. (The General Public License restrictions do apply in other respects; for example, they cover modification of the file, and distribution when not linked into a combined executable.) This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef LIBIBERTY_CP_DEMANGLE_H #define LIBIBERTY_CP_DEMANGLE_H /* This file provides some definitions shared by cp-demangle.c and cp-demint.c. It should not be included by any other files. */ /* Information we keep for operators. */ struct demangle_operator_info { /* Mangled name. */ const char *code; /* Real name. */ const char *name; /* Length of real name. */ int len; /* Number of arguments. */ int args; }; /* How to print the value of a builtin type. */ enum d_builtin_type_print { /* Print as (type)val. */ D_PRINT_DEFAULT, /* Print as integer. */ D_PRINT_INT, /* Print as unsigned integer, with trailing "u". */ D_PRINT_UNSIGNED, /* Print as long, with trailing "l". */ D_PRINT_LONG, /* Print as unsigned long, with trailing "ul". */ D_PRINT_UNSIGNED_LONG, /* Print as long long, with trailing "ll". */ D_PRINT_LONG_LONG, /* Print as unsigned long long, with trailing "ull". */ D_PRINT_UNSIGNED_LONG_LONG, /* Print as bool. */ D_PRINT_BOOL, /* Print as float--put value in square brackets. */ D_PRINT_FLOAT, /* Print in usual way, but here to detect void. */ D_PRINT_VOID }; /* Information we keep for a builtin type. */ struct demangle_builtin_type_info { /* Type name. */ const char *name; /* Length of type name. */ int len; /* Type name when using Java. */ const char *java_name; /* Length of java name. */ int java_len; /* How to print a value of this type. */ enum d_builtin_type_print print; }; /* The information structure we pass around. */ struct d_info { /* The string we are demangling. */ const char *s; /* The end of the string we are demangling. */ const char *send; /* The options passed to the demangler. */ int options; /* The next character in the string to consider. */ const char *n; /* The array of components. */ struct demangle_component *comps; /* The index of the next available component. */ int next_comp; /* The number of available component structures. */ int num_comps; /* The array of substitutions. */ struct demangle_component **subs; /* The index of the next substitution. */ int next_sub; /* The number of available entries in the subs array. */ int num_subs; /* The number of substitutions which we actually made from the subs array, plus the number of template parameter references we saw. */ int did_subs; /* The last name we saw, for constructors and destructors. */ struct demangle_component *last_name; /* A running total of the length of large expansions from the mangled name to the demangled name, such as standard substitutions and builtin types. */ int expansion; }; #define d_peek_char(di) (*((di)->n)) #define d_peek_next_char(di) ((di)->n[1]) #define d_advance(di, i) ((di)->n += (i)) #define d_next_char(di) (*((di)->n++)) #define d_str(di) ((di)->n) /* Functions and arrays in cp-demangle.c which are referenced by functions in cp-demint.c. */ #ifdef IN_GLIBCPP_V3 #define CP_STATIC_IF_GLIBCPP_V3 static #else #define CP_STATIC_IF_GLIBCPP_V3 extern #endif CP_STATIC_IF_GLIBCPP_V3 const struct demangle_operator_info cplus_demangle_operators[]; #define D_BUILTIN_TYPE_COUNT (26) CP_STATIC_IF_GLIBCPP_V3 const struct demangle_builtin_type_info cplus_demangle_builtin_types[D_BUILTIN_TYPE_COUNT]; CP_STATIC_IF_GLIBCPP_V3 struct demangle_component * cplus_demangle_mangled_name PARAMS ((struct d_info *, int)); CP_STATIC_IF_GLIBCPP_V3 struct demangle_component * cplus_demangle_type PARAMS ((struct d_info *)); extern void cplus_demangle_init_info PARAMS ((const char *, int, size_t, struct d_info *)); /* cp-demangle.c needs to define this a little differently */ #undef CP_STATIC_IF_GLIBCPP_V3 #endif /* LIBIBERTY_CP_DEMANGLE_H */ /* If IN_GLIBCPP_V3 is defined, some functions are made static. We also rename them via #define to avoid compiler errors when the static definition conflicts with the extern declaration in a header file. */ #ifdef IN_GLIBCPP_V3 #define CP_STATIC_IF_GLIBCPP_V3 static #define cplus_demangle_fill_name d_fill_name static int d_fill_name PARAMS ((struct demangle_component *, const char *, int)); #define cplus_demangle_fill_extended_operator d_fill_extended_operator static int d_fill_extended_operator PARAMS ((struct demangle_component *, int, struct demangle_component *)); #define cplus_demangle_fill_ctor d_fill_ctor static int d_fill_ctor PARAMS ((struct demangle_component *, enum gnu_v3_ctor_kinds, struct demangle_component *)); #define cplus_demangle_fill_dtor d_fill_dtor static int d_fill_dtor PARAMS ((struct demangle_component *, enum gnu_v3_dtor_kinds, struct demangle_component *)); #define cplus_demangle_mangled_name d_mangled_name static struct demangle_component * d_mangled_name PARAMS ((struct d_info *, int)); #define cplus_demangle_type d_type static struct demangle_component * d_type PARAMS ((struct d_info *)); #define cplus_demangle_print d_print static char * d_print PARAMS ((int, const struct demangle_component *, int, size_t *)); #define cplus_demangle_init_info d_init_info static void d_init_info PARAMS ((const char *, int, size_t, struct d_info *)); #else /* ! defined(IN_GLIBCPP_V3) */ #define CP_STATIC_IF_GLIBCPP_V3 #endif /* ! defined(IN_GLIBCPP_V3) */ /* See if the compiler supports dynamic arrays. */ #ifdef __GNUC__ #define CP_DYNAMIC_ARRAYS #else #ifdef __STDC__ #ifdef __STDC_VERSION__ #if __STDC_VERSION__ >= 199901L #define CP_DYNAMIC_ARRAYS #endif /* __STDC__VERSION >= 199901L */ #endif /* defined (__STDC_VERSION__) */ #endif /* defined (__STDC__) */ #endif /* ! defined (__GNUC__) */ /* We avoid pulling in the ctype tables, to prevent pulling in additional unresolved symbols when this code is used in a library. FIXME: Is this really a valid reason? This comes from the original V3 demangler code. As of this writing this file has the following undefined references when compiled with -DIN_GLIBCPP_V3: malloc, realloc, free, memcpy, strcpy, strcat, strlen. */ #define IS_DIGIT(c) ((c) >= '0' && (c) <= '9') #define IS_UPPER(c) ((c) >= 'A' && (c) <= 'Z') #define IS_LOWER(c) ((c) >= 'a' && (c) <= 'z') /* The prefix prepended by GCC to an identifier represnting the anonymous namespace. */ #define ANONYMOUS_NAMESPACE_PREFIX "_GLOBAL_" #define ANONYMOUS_NAMESPACE_PREFIX_LEN \ (sizeof (ANONYMOUS_NAMESPACE_PREFIX) - 1) /* Information we keep for the standard substitutions. */ struct d_standard_sub_info { /* The code for this substitution. */ char code; /* The simple string it expands to. */ const char *simple_expansion; /* The length of the simple expansion. */ int simple_len; /* The results of a full, verbose, expansion. This is used when qualifying a constructor/destructor, or when in verbose mode. */ const char *full_expansion; /* The length of the full expansion. */ int full_len; /* What to set the last_name field of d_info to; NULL if we should not set it. This is only relevant when qualifying a constructor/destructor. */ const char *set_last_name; /* The length of set_last_name. */ int set_last_name_len; }; /* Accessors for subtrees of struct demangle_component. */ #define d_left(dc) ((dc)->u.s_binary.left) #define d_right(dc) ((dc)->u.s_binary.right) /* A list of templates. This is used while printing. */ struct d_print_template { /* Next template on the list. */ struct d_print_template *next; /* This template. */ const struct demangle_component *template; }; /* A list of type modifiers. This is used while printing. */ struct d_print_mod { /* Next modifier on the list. These are in the reverse of the order in which they appeared in the mangled string. */ struct d_print_mod *next; /* The modifier. */ const struct demangle_component *mod; /* Whether this modifier was printed. */ int printed; /* The list of templates which applies to this modifier. */ struct d_print_template *templates; }; /* We use this structure to hold information during printing. */ struct d_print_info { /* The options passed to the demangler. */ int options; /* Buffer holding the result. */ char *buf; /* Current length of data in buffer. */ size_t len; /* Allocated size of buffer. */ size_t alc; /* The current list of templates, if any. */ struct d_print_template *templates; /* The current list of modifiers (e.g., pointer, reference, etc.), if any. */ struct d_print_mod *modifiers; /* Set to 1 if we had a memory allocation failure. */ int allocation_failure; }; #define d_print_saw_error(dpi) ((dpi)->buf == NULL) #define d_append_char(dpi, c) \ do \ { \ if ((dpi)->buf != NULL && (dpi)->len < (dpi)->alc) \ (dpi)->buf[(dpi)->len++] = (c); \ else \ d_print_append_char ((dpi), (c)); \ } \ while (0) #define d_append_buffer(dpi, s, l) \ do \ { \ if ((dpi)->buf != NULL && (dpi)->len + (l) <= (dpi)->alc) \ { \ memcpy ((dpi)->buf + (dpi)->len, (s), (l)); \ (dpi)->len += l; \ } \ else \ d_print_append_buffer ((dpi), (s), (l)); \ } \ while (0) #define d_append_string_constant(dpi, s) \ d_append_buffer (dpi, (s), sizeof (s) - 1) #define d_last_char(dpi) \ ((dpi)->buf == NULL || (dpi)->len == 0 ? '\0' : (dpi)->buf[(dpi)->len - 1]) #ifdef CP_DEMANGLE_DEBUG static void d_dump PARAMS ((struct demangle_component *, int)); #endif static struct demangle_component * d_make_empty PARAMS ((struct d_info *)); static struct demangle_component * d_make_comp PARAMS ((struct d_info *, enum demangle_component_type, struct demangle_component *, struct demangle_component *)); static struct demangle_component * d_make_name PARAMS ((struct d_info *, const char *, int)); static struct demangle_component * d_make_builtin_type PARAMS ((struct d_info *, const struct demangle_builtin_type_info *)); static struct demangle_component * d_make_operator PARAMS ((struct d_info *, const struct demangle_operator_info *)); static struct demangle_component * d_make_extended_operator PARAMS ((struct d_info *, int, struct demangle_component *)); static struct demangle_component * d_make_ctor PARAMS ((struct d_info *, enum gnu_v3_ctor_kinds, struct demangle_component *)); static struct demangle_component * d_make_dtor PARAMS ((struct d_info *, enum gnu_v3_dtor_kinds, struct demangle_component *)); static struct demangle_component * d_make_template_param PARAMS ((struct d_info *, long)); static struct demangle_component * d_make_sub PARAMS ((struct d_info *, const char *, int)); static int has_return_type PARAMS ((struct demangle_component *)); static int is_ctor_dtor_or_conversion PARAMS ((struct demangle_component *)); static struct demangle_component * d_encoding PARAMS ((struct d_info *, int)); static struct demangle_component * d_name PARAMS ((struct d_info *)); static struct demangle_component * d_nested_name PARAMS ((struct d_info *)); static struct demangle_component * d_prefix PARAMS ((struct d_info *)); static struct demangle_component * d_unqualified_name PARAMS ((struct d_info *)); static struct demangle_component * d_source_name PARAMS ((struct d_info *)); static long d_number PARAMS ((struct d_info *)); static struct demangle_component * d_identifier PARAMS ((struct d_info *, int)); static struct demangle_component * d_operator_name PARAMS ((struct d_info *)); static struct demangle_component * d_special_name PARAMS ((struct d_info *)); static int d_call_offset PARAMS ((struct d_info *, int)); static struct demangle_component * d_ctor_dtor_name PARAMS ((struct d_info *)); static struct demangle_component ** d_cv_qualifiers PARAMS ((struct d_info *, struct demangle_component **, int)); static struct demangle_component * d_function_type PARAMS ((struct d_info *)); static struct demangle_component * d_bare_function_type PARAMS ((struct d_info *, int)); static struct demangle_component * d_class_enum_type PARAMS ((struct d_info *)); static struct demangle_component * d_array_type PARAMS ((struct d_info *)); static struct demangle_component * d_pointer_to_member_type PARAMS ((struct d_info *)); static struct demangle_component * d_template_param PARAMS ((struct d_info *)); static struct demangle_component * d_template_args PARAMS ((struct d_info *)); static struct demangle_component * d_template_arg PARAMS ((struct d_info *)); static struct demangle_component * d_expression PARAMS ((struct d_info *)); static struct demangle_component * d_expr_primary PARAMS ((struct d_info *)); static struct demangle_component * d_local_name PARAMS ((struct d_info *)); static int d_discriminator PARAMS ((struct d_info *)); static int d_add_substitution PARAMS ((struct d_info *, struct demangle_component *)); static struct demangle_component * d_substitution PARAMS ((struct d_info *, int)); static void d_print_resize PARAMS ((struct d_print_info *, size_t)); static void d_print_append_char PARAMS ((struct d_print_info *, int)); static void d_print_append_buffer PARAMS ((struct d_print_info *, const char *, size_t)); static void d_print_error PARAMS ((struct d_print_info *)); static void d_print_comp PARAMS ((struct d_print_info *, const struct demangle_component *)); static void d_print_java_identifier PARAMS ((struct d_print_info *, const char *, int)); static void d_print_mod_list PARAMS ((struct d_print_info *, struct d_print_mod *, int)); static void d_print_mod PARAMS ((struct d_print_info *, const struct demangle_component *)); static void d_print_function_type PARAMS ((struct d_print_info *, const struct demangle_component *, struct d_print_mod *)); static void d_print_array_type PARAMS ((struct d_print_info *, const struct demangle_component *, struct d_print_mod *)); static void d_print_expr_op PARAMS ((struct d_print_info *, const struct demangle_component *)); static void d_print_cast PARAMS ((struct d_print_info *, const struct demangle_component *)); static char * d_demangle PARAMS ((const char *, int, size_t *)); #ifdef CP_DEMANGLE_DEBUG static void d_dump (dc, indent) struct demangle_component *dc; int indent; { int i; if (dc == NULL) return; for (i = 0; i < indent; ++i) putchar (' '); switch (dc->type) { case DEMANGLE_COMPONENT_NAME: printf ("name '%.*s'\n", dc->u.s_name.len, dc->u.s_name.s); return; case DEMANGLE_COMPONENT_TEMPLATE_PARAM: printf ("template parameter %ld\n", dc->u.s_number.number); return; case DEMANGLE_COMPONENT_CTOR: printf ("constructor %d\n", (int) dc->u.s_ctor.kind); d_dump (dc->u.s_ctor.name, indent + 2); return; case DEMANGLE_COMPONENT_DTOR: printf ("destructor %d\n", (int) dc->u.s_dtor.kind); d_dump (dc->u.s_dtor.name, indent + 2); return; case DEMANGLE_COMPONENT_SUB_STD: printf ("standard substitution %s\n", dc->u.s_string.string); return; case DEMANGLE_COMPONENT_BUILTIN_TYPE: printf ("builtin type %s\n", dc->u.s_builtin.type->name); return; case DEMANGLE_COMPONENT_OPERATOR: printf ("operator %s\n", dc->u.s_operator.op->name); return; case DEMANGLE_COMPONENT_EXTENDED_OPERATOR: printf ("extended operator with %d args\n", dc->u.s_extended_operator.args); d_dump (dc->u.s_extended_operator.name, indent + 2); return; case DEMANGLE_COMPONENT_QUAL_NAME: printf ("qualified name\n"); break; case DEMANGLE_COMPONENT_LOCAL_NAME: printf ("local name\n"); break; case DEMANGLE_COMPONENT_TYPED_NAME: printf ("typed name\n"); break; case DEMANGLE_COMPONENT_TEMPLATE: printf ("template\n"); break; case DEMANGLE_COMPONENT_VTABLE: printf ("vtable\n"); break; case DEMANGLE_COMPONENT_VTT: printf ("VTT\n"); break; case DEMANGLE_COMPONENT_CONSTRUCTION_VTABLE: printf ("construction vtable\n"); break; case DEMANGLE_COMPONENT_TYPEINFO: printf ("typeinfo\n"); break; case DEMANGLE_COMPONENT_TYPEINFO_NAME: printf ("typeinfo name\n"); break; case DEMANGLE_COMPONENT_TYPEINFO_FN: printf ("typeinfo function\n"); break; case DEMANGLE_COMPONENT_THUNK: printf ("thunk\n"); break; case DEMANGLE_COMPONENT_VIRTUAL_THUNK: printf ("virtual thunk\n"); break; case DEMANGLE_COMPONENT_COVARIANT_THUNK: printf ("covariant thunk\n"); break; case DEMANGLE_COMPONENT_JAVA_CLASS: printf ("java class\n"); break; case DEMANGLE_COMPONENT_GUARD: printf ("guard\n"); break; case DEMANGLE_COMPONENT_REFTEMP: printf ("reference temporary\n"); break; case DEMANGLE_COMPONENT_RESTRICT: printf ("restrict\n"); break; case DEMANGLE_COMPONENT_VOLATILE: printf ("volatile\n"); break; case DEMANGLE_COMPONENT_CONST: printf ("const\n"); break; case DEMANGLE_COMPONENT_RESTRICT_THIS: printf ("restrict this\n"); break; case DEMANGLE_COMPONENT_VOLATILE_THIS: printf ("volatile this\n"); break; case DEMANGLE_COMPONENT_CONST_THIS: printf ("const this\n"); break; case DEMANGLE_COMPONENT_VENDOR_TYPE_QUAL: printf ("vendor type qualifier\n"); break; case DEMANGLE_COMPONENT_POINTER: printf ("pointer\n"); break; case DEMANGLE_COMPONENT_REFERENCE: printf ("reference\n"); break; case DEMANGLE_COMPONENT_COMPLEX: printf ("complex\n"); break; case DEMANGLE_COMPONENT_IMAGINARY: printf ("imaginary\n"); break; case DEMANGLE_COMPONENT_VENDOR_TYPE: printf ("vendor type\n"); break; case DEMANGLE_COMPONENT_FUNCTION_TYPE: printf ("function type\n"); break; case DEMANGLE_COMPONENT_ARRAY_TYPE: printf ("array type\n"); break; case DEMANGLE_COMPONENT_PTRMEM_TYPE: printf ("pointer to member type\n"); break; case DEMANGLE_COMPONENT_ARGLIST: printf ("argument list\n"); break; case DEMANGLE_COMPONENT_TEMPLATE_ARGLIST: printf ("template argument list\n"); break; case DEMANGLE_COMPONENT_CAST: printf ("cast\n"); break; case DEMANGLE_COMPONENT_UNARY: printf ("unary operator\n"); break; case DEMANGLE_COMPONENT_BINARY: printf ("binary operator\n"); break; case DEMANGLE_COMPONENT_BINARY_ARGS: printf ("binary operator arguments\n"); break; case DEMANGLE_COMPONENT_TRINARY: printf ("trinary operator\n"); break; case DEMANGLE_COMPONENT_TRINARY_ARG1: printf ("trinary operator arguments 1\n"); break; case DEMANGLE_COMPONENT_TRINARY_ARG2: printf ("trinary operator arguments 1\n"); break; case DEMANGLE_COMPONENT_LITERAL: printf ("literal\n"); break; case DEMANGLE_COMPONENT_LITERAL_NEG: printf ("negative literal\n"); break; } d_dump (d_left (dc), indent + 2); d_dump (d_right (dc), indent + 2); } #endif /* CP_DEMANGLE_DEBUG */ /* Fill in a DEMANGLE_COMPONENT_NAME. */ CP_STATIC_IF_GLIBCPP_V3 int cplus_demangle_fill_name (p, s, len) struct demangle_component *p; const char *s; int len; { if (p == NULL || s == NULL || len == 0) return 0; p->type = DEMANGLE_COMPONENT_NAME; p->u.s_name.s = s; p->u.s_name.len = len; return 1; } /* Fill in a DEMANGLE_COMPONENT_EXTENDED_OPERATOR. */ CP_STATIC_IF_GLIBCPP_V3 int cplus_demangle_fill_extended_operator (p, args, name) struct demangle_component *p; int args; struct demangle_component *name; { if (p == NULL || args < 0 || name == NULL) return 0; p->type = DEMANGLE_COMPONENT_EXTENDED_OPERATOR; p->u.s_extended_operator.args = args; p->u.s_extended_operator.name = name; return 1; } /* Fill in a DEMANGLE_COMPONENT_CTOR. */ CP_STATIC_IF_GLIBCPP_V3 int cplus_demangle_fill_ctor (p, kind, name) struct demangle_component *p; enum gnu_v3_ctor_kinds kind; struct demangle_component *name; { if (p == NULL || name == NULL || (kind < gnu_v3_complete_object_ctor && kind > gnu_v3_complete_object_allocating_ctor)) return 0; p->type = DEMANGLE_COMPONENT_CTOR; p->u.s_ctor.kind = kind; p->u.s_ctor.name = name; return 1; } /* Fill in a DEMANGLE_COMPONENT_DTOR. */ CP_STATIC_IF_GLIBCPP_V3 int cplus_demangle_fill_dtor (p, kind, name) struct demangle_component *p; enum gnu_v3_dtor_kinds kind; struct demangle_component *name; { if (p == NULL || name == NULL || (kind < gnu_v3_deleting_dtor && kind > gnu_v3_base_object_dtor)) return 0; p->type = DEMANGLE_COMPONENT_DTOR; p->u.s_dtor.kind = kind; p->u.s_dtor.name = name; return 1; } /* Add a new component. */ static struct demangle_component * d_make_empty (di) struct d_info *di; { struct demangle_component *p; if (di->next_comp >= di->num_comps) return NULL; p = &di->comps[di->next_comp]; ++di->next_comp; return p; } /* Add a new generic component. */ static struct demangle_component * d_make_comp (di, type, left, right) struct d_info *di; enum demangle_component_type type; struct demangle_component *left; struct demangle_component *right; { struct demangle_component *p; /* We check for errors here. A typical error would be a NULL return from a subroutine. We catch those here, and return NULL upward. */ switch (type) { /* These types require two parameters. */ case DEMANGLE_COMPONENT_QUAL_NAME: case DEMANGLE_COMPONENT_LOCAL_NAME: case DEMANGLE_COMPONENT_TYPED_NAME: case DEMANGLE_COMPONENT_TEMPLATE: case DEMANGLE_COMPONENT_CONSTRUCTION_VTABLE: case DEMANGLE_COMPONENT_VENDOR_TYPE_QUAL: case DEMANGLE_COMPONENT_PTRMEM_TYPE: case DEMANGLE_COMPONENT_UNARY: case DEMANGLE_COMPONENT_BINARY: case DEMANGLE_COMPONENT_BINARY_ARGS: case DEMANGLE_COMPONENT_TRINARY: case DEMANGLE_COMPONENT_TRINARY_ARG1: case DEMANGLE_COMPONENT_TRINARY_ARG2: case DEMANGLE_COMPONENT_LITERAL: case DEMANGLE_COMPONENT_LITERAL_NEG: if (left == NULL || right == NULL) return NULL; break; /* These types only require one parameter. */ case DEMANGLE_COMPONENT_VTABLE: case DEMANGLE_COMPONENT_VTT: case DEMANGLE_COMPONENT_TYPEINFO: case DEMANGLE_COMPONENT_TYPEINFO_NAME: case DEMANGLE_COMPONENT_TYPEINFO_FN: case DEMANGLE_COMPONENT_THUNK: case DEMANGLE_COMPONENT_VIRTUAL_THUNK: case DEMANGLE_COMPONENT_COVARIANT_THUNK: case DEMANGLE_COMPONENT_JAVA_CLASS: case DEMANGLE_COMPONENT_GUARD: case DEMANGLE_COMPONENT_REFTEMP: case DEMANGLE_COMPONENT_POINTER: case DEMANGLE_COMPONENT_REFERENCE: case DEMANGLE_COMPONENT_COMPLEX: case DEMANGLE_COMPONENT_IMAGINARY: case DEMANGLE_COMPONENT_VENDOR_TYPE: case DEMANGLE_COMPONENT_ARGLIST: case DEMANGLE_COMPONENT_TEMPLATE_ARGLIST: case DEMANGLE_COMPONENT_CAST: if (left == NULL) return NULL; break; /* This needs a right parameter, but the left parameter can be empty. */ case DEMANGLE_COMPONENT_ARRAY_TYPE: if (right == NULL) return NULL; break; /* These are allowed to have no parameters--in some cases they will be filled in later. */ case DEMANGLE_COMPONENT_FUNCTION_TYPE: case DEMANGLE_COMPONENT_RESTRICT: case DEMANGLE_COMPONENT_VOLATILE: case DEMANGLE_COMPONENT_CONST: case DEMANGLE_COMPONENT_RESTRICT_THIS: case DEMANGLE_COMPONENT_VOLATILE_THIS: case DEMANGLE_COMPONENT_CONST_THIS: break; /* Other types should not be seen here. */ default: return NULL; } p = d_make_empty (di); if (p != NULL) { p->type = type; p->u.s_binary.left = left; p->u.s_binary.right = right; } return p; } /* Add a new name component. */ static struct demangle_component * d_make_name (di, s, len) struct d_info *di; const char *s; int len; { struct demangle_component *p; p = d_make_empty (di); if (! cplus_demangle_fill_name (p, s, len)) return NULL; return p; } /* Add a new builtin type component. */ static struct demangle_component * d_make_builtin_type (di, type) struct d_info *di; const struct demangle_builtin_type_info *type; { struct demangle_component *p; if (type == NULL) return NULL; p = d_make_empty (di); if (p != NULL) { p->type = DEMANGLE_COMPONENT_BUILTIN_TYPE; p->u.s_builtin.type = type; } return p; } /* Add a new operator component. */ static struct demangle_component * d_make_operator (di, op) struct d_info *di; const struct demangle_operator_info *op; { struct demangle_component *p; p = d_make_empty (di); if (p != NULL) { p->type = DEMANGLE_COMPONENT_OPERATOR; p->u.s_operator.op = op; } return p; } /* Add a new extended operator component. */ static struct demangle_component * d_make_extended_operator (di, args, name) struct d_info *di; int args; struct demangle_component *name; { struct demangle_component *p; p = d_make_empty (di); if (! cplus_demangle_fill_extended_operator (p, args, name)) return NULL; return p; } /* Add a new constructor component. */ static struct demangle_component * d_make_ctor (di, kind, name) struct d_info *di; enum gnu_v3_ctor_kinds kind; struct demangle_component *name; { struct demangle_component *p; p = d_make_empty (di); if (! cplus_demangle_fill_ctor (p, kind, name)) return NULL; return p; } /* Add a new destructor component. */ static struct demangle_component * d_make_dtor (di, kind, name) struct d_info *di; enum gnu_v3_dtor_kinds kind; struct demangle_component *name; { struct demangle_component *p; p = d_make_empty (di); if (! cplus_demangle_fill_dtor (p, kind, name)) return NULL; return p; } /* Add a new template parameter. */ static struct demangle_component * d_make_template_param (di, i) struct d_info *di; long i; { struct demangle_component *p; p = d_make_empty (di); if (p != NULL) { p->type = DEMANGLE_COMPONENT_TEMPLATE_PARAM; p->u.s_number.number = i; } return p; } /* Add a new standard substitution component. */ static struct demangle_component * d_make_sub (di, name, len) struct d_info *di; const char *name; int len; { struct demangle_component *p; p = d_make_empty (di); if (p != NULL) { p->type = DEMANGLE_COMPONENT_SUB_STD; p->u.s_string.string = name; p->u.s_string.len = len; } return p; } /* ::= _Z TOP_LEVEL is non-zero when called at the top level. */ CP_STATIC_IF_GLIBCPP_V3 struct demangle_component * cplus_demangle_mangled_name (di, top_level) struct d_info *di; int top_level; { if (d_next_char (di) != '_') return NULL; if (d_next_char (di) != 'Z') return NULL; return d_encoding (di, top_level); } /* Return whether a function should have a return type. The argument is the function name, which may be qualified in various ways. The rules are that template functions have return types with some exceptions, function types which are not part of a function name mangling have return types with some exceptions, and non-template function names do not have return types. The exceptions are that constructors, destructors, and conversion operators do not have return types. */ static int has_return_type (dc) struct demangle_component *dc; { if (dc == NULL) return 0; switch (dc->type) { default: return 0; case DEMANGLE_COMPONENT_TEMPLATE: return ! is_ctor_dtor_or_conversion (d_left (dc)); case DEMANGLE_COMPONENT_RESTRICT_THIS: case DEMANGLE_COMPONENT_VOLATILE_THIS: case DEMANGLE_COMPONENT_CONST_THIS: return has_return_type (d_left (dc)); } } /* Return whether a name is a constructor, a destructor, or a conversion operator. */ static int is_ctor_dtor_or_conversion (dc) struct demangle_component *dc; { if (dc == NULL) return 0; switch (dc->type) { default: return 0; case DEMANGLE_COMPONENT_QUAL_NAME: case DEMANGLE_COMPONENT_LOCAL_NAME: return is_ctor_dtor_or_conversion (d_right (dc)); case DEMANGLE_COMPONENT_CTOR: case DEMANGLE_COMPONENT_DTOR: case DEMANGLE_COMPONENT_CAST: return 1; } } /* ::= <(function) name> ::= <(data) name> ::= TOP_LEVEL is non-zero when called at the top level, in which case if DMGL_PARAMS is not set we do not demangle the function parameters. We only set this at the top level, because otherwise we would not correctly demangle names in local scopes. */ static struct demangle_component * d_encoding (di, top_level) struct d_info *di; int top_level; { char peek = d_peek_char (di); if (peek == 'G' || peek == 'T') return d_special_name (di); else { struct demangle_component *dc; dc = d_name (di); if (dc != NULL && top_level && (di->options & DMGL_PARAMS) == 0) { /* Strip off any initial CV-qualifiers, as they really apply to the `this' parameter, and they were not output by the v2 demangler without DMGL_PARAMS. */ while (dc->type == DEMANGLE_COMPONENT_RESTRICT_THIS || dc->type == DEMANGLE_COMPONENT_VOLATILE_THIS || dc->type == DEMANGLE_COMPONENT_CONST_THIS) dc = d_left (dc); /* If the top level is a DEMANGLE_COMPONENT_LOCAL_NAME, then there may be CV-qualifiers on its right argument which really apply here; this happens when parsing a class which is local to a function. */ if (dc->type == DEMANGLE_COMPONENT_LOCAL_NAME) { struct demangle_component *dcr; dcr = d_right (dc); while (dcr->type == DEMANGLE_COMPONENT_RESTRICT_THIS || dcr->type == DEMANGLE_COMPONENT_VOLATILE_THIS || dcr->type == DEMANGLE_COMPONENT_CONST_THIS) dcr = d_left (dcr); dc->u.s_binary.right = dcr; } return dc; } peek = d_peek_char (di); if (peek == '\0' || peek == 'E') return dc; return d_make_comp (di, DEMANGLE_COMPONENT_TYPED_NAME, dc, d_bare_function_type (di, has_return_type (dc))); } } /* ::= ::= ::= ::= ::= ::= St ::= ::= */ static struct demangle_component * d_name (di) struct d_info *di; { char peek = d_peek_char (di); struct demangle_component *dc; switch (peek) { case 'N': return d_nested_name (di); case 'Z': return d_local_name (di); case 'S': { int subst; if (d_peek_next_char (di) != 't') { dc = d_substitution (di, 0); subst = 1; } else { d_advance (di, 2); dc = d_make_comp (di, DEMANGLE_COMPONENT_QUAL_NAME, d_make_name (di, "std", 3), d_unqualified_name (di)); di->expansion += 3; subst = 0; } if (d_peek_char (di) != 'I') { /* The grammar does not permit this case to occur if we called d_substitution() above (i.e., subst == 1). We don't bother to check. */ } else { /* This is , which means that we just saw , which is a substitution candidate if we didn't just get it from a substitution. */ if (! subst) { if (! d_add_substitution (di, dc)) return NULL; } dc = d_make_comp (di, DEMANGLE_COMPONENT_TEMPLATE, dc, d_template_args (di)); } return dc; } default: dc = d_unqualified_name (di); if (d_peek_char (di) == 'I') { /* This is , which means that we just saw , which is a substitution candidate. */ if (! d_add_substitution (di, dc)) return NULL; dc = d_make_comp (di, DEMANGLE_COMPONENT_TEMPLATE, dc, d_template_args (di)); } return dc; } } /* ::= N [] E ::= N [] E */ static struct demangle_component * d_nested_name (di) struct d_info *di; { struct demangle_component *ret; struct demangle_component **pret; if (d_next_char (di) != 'N') return NULL; pret = d_cv_qualifiers (di, &ret, 1); if (pret == NULL) return NULL; *pret = d_prefix (di); if (*pret == NULL) return NULL; if (d_next_char (di) != 'E') return NULL; return ret; } /* ::= ::= ::= ::= ::= ::= <(template) unqualified-name> ::= ::= */ static struct demangle_component * d_prefix (di) struct d_info *di; { struct demangle_component *ret = NULL; while (1) { char peek; enum demangle_component_type comb_type; struct demangle_component *dc; peek = d_peek_char (di); if (peek == '\0') return NULL; /* The older code accepts a here, but I don't see that in the grammar. The older code does not accept a here. */ comb_type = DEMANGLE_COMPONENT_QUAL_NAME; if (IS_DIGIT (peek) || IS_LOWER (peek) || peek == 'C' || peek == 'D') dc = d_unqualified_name (di); else if (peek == 'S') dc = d_substitution (di, 1); else if (peek == 'I') { if (ret == NULL) return NULL; comb_type = DEMANGLE_COMPONENT_TEMPLATE; dc = d_template_args (di); } else if (peek == 'T') dc = d_template_param (di); else if (peek == 'E') return ret; else return NULL; if (ret == NULL) ret = dc; else ret = d_make_comp (di, comb_type, ret, dc); if (peek != 'S' && d_peek_char (di) != 'E') { if (! d_add_substitution (di, ret)) return NULL; } } } /* ::= ::= ::= */ static struct demangle_component * d_unqualified_name (di) struct d_info *di; { char peek; peek = d_peek_char (di); if (IS_DIGIT (peek)) return d_source_name (di); else if (IS_LOWER (peek)) { struct demangle_component *ret; ret = d_operator_name (di); if (ret != NULL && ret->type == DEMANGLE_COMPONENT_OPERATOR) di->expansion += sizeof "operator" + ret->u.s_operator.op->len - 2; return ret; } else if (peek == 'C' || peek == 'D') return d_ctor_dtor_name (di); else return NULL; } /* ::= <(positive length) number> */ static struct demangle_component * d_source_name (di) struct d_info *di; { long len; struct demangle_component *ret; len = d_number (di); if (len <= 0) return NULL; ret = d_identifier (di, len); di->last_name = ret; return ret; } /* number ::= [n] <(non-negative decimal integer)> */ static long d_number (di) struct d_info *di; { int negative; char peek; long ret; negative = 0; peek = d_peek_char (di); if (peek == 'n') { negative = 1; d_advance (di, 1); peek = d_peek_char (di); } ret = 0; while (1) { if (! IS_DIGIT (peek)) { if (negative) ret = - ret; return ret; } ret = ret * 10 + peek - '0'; d_advance (di, 1); peek = d_peek_char (di); } } /* identifier ::= <(unqualified source code identifier)> */ static struct demangle_component * d_identifier (di, len) struct d_info *di; int len; { const char *name; name = d_str (di); if (di->send - name < len) return NULL; d_advance (di, len); /* A Java mangled name may have a trailing '$' if it is a C++ keyword. This '$' is not included in the length count. We just ignore the '$'. */ if ((di->options & DMGL_JAVA) != 0 && d_peek_char (di) == '$') d_advance (di, 1); /* Look for something which looks like a gcc encoding of an anonymous namespace, and replace it with a more user friendly name. */ if (len >= (int) ANONYMOUS_NAMESPACE_PREFIX_LEN + 2 && memcmp (name, ANONYMOUS_NAMESPACE_PREFIX, ANONYMOUS_NAMESPACE_PREFIX_LEN) == 0) { const char *s; s = name + ANONYMOUS_NAMESPACE_PREFIX_LEN; if ((*s == '.' || *s == '_' || *s == '$') && s[1] == 'N') { di->expansion -= len - sizeof "(anonymous namespace)"; return d_make_name (di, "(anonymous namespace)", sizeof "(anonymous namespace)" - 1); } } return d_make_name (di, name, len); } /* operator_name ::= many different two character encodings. ::= cv ::= v */ #define NL(s) s, (sizeof s) - 1 CP_STATIC_IF_GLIBCPP_V3 const struct demangle_operator_info cplus_demangle_operators[] = { { "aN", NL ("&="), 2 }, { "aS", NL ("="), 2 }, { "aa", NL ("&&"), 2 }, { "ad", NL ("&"), 1 }, { "an", NL ("&"), 2 }, { "cl", NL ("()"), 0 }, { "cm", NL (","), 2 }, { "co", NL ("~"), 1 }, { "dV", NL ("/="), 2 }, { "da", NL ("delete[]"), 1 }, { "de", NL ("*"), 1 }, { "dl", NL ("delete"), 1 }, { "dv", NL ("/"), 2 }, { "eO", NL ("^="), 2 }, { "eo", NL ("^"), 2 }, { "eq", NL ("=="), 2 }, { "ge", NL (">="), 2 }, { "gt", NL (">"), 2 }, { "ix", NL ("[]"), 2 }, { "lS", NL ("<<="), 2 }, { "le", NL ("<="), 2 }, { "ls", NL ("<<"), 2 }, { "lt", NL ("<"), 2 }, { "mI", NL ("-="), 2 }, { "mL", NL ("*="), 2 }, { "mi", NL ("-"), 2 }, { "ml", NL ("*"), 2 }, { "mm", NL ("--"), 1 }, { "na", NL ("new[]"), 1 }, { "ne", NL ("!="), 2 }, { "ng", NL ("-"), 1 }, { "nt", NL ("!"), 1 }, { "nw", NL ("new"), 1 }, { "oR", NL ("|="), 2 }, { "oo", NL ("||"), 2 }, { "or", NL ("|"), 2 }, { "pL", NL ("+="), 2 }, { "pl", NL ("+"), 2 }, { "pm", NL ("->*"), 2 }, { "pp", NL ("++"), 1 }, { "ps", NL ("+"), 1 }, { "pt", NL ("->"), 2 }, { "qu", NL ("?"), 3 }, { "rM", NL ("%="), 2 }, { "rS", NL (">>="), 2 }, { "rm", NL ("%"), 2 }, { "rs", NL (">>"), 2 }, { "st", NL ("sizeof "), 1 }, { "sz", NL ("sizeof "), 1 }, { NULL, NULL, 0, 0 } }; static struct demangle_component * d_operator_name (di) struct d_info *di; { char c1; char c2; c1 = d_next_char (di); c2 = d_next_char (di); if (c1 == 'v' && IS_DIGIT (c2)) return d_make_extended_operator (di, c2 - '0', d_source_name (di)); else if (c1 == 'c' && c2 == 'v') return d_make_comp (di, DEMANGLE_COMPONENT_CAST, cplus_demangle_type (di), NULL); else { /* LOW is the inclusive lower bound. */ int low = 0; /* HIGH is the exclusive upper bound. We subtract one to ignore the sentinel at the end of the array. */ int high = ((sizeof (cplus_demangle_operators) / sizeof (cplus_demangle_operators[0])) - 1); while (1) { int i; const struct demangle_operator_info *p; i = low + (high - low) / 2; p = cplus_demangle_operators + i; if (c1 == p->code[0] && c2 == p->code[1]) return d_make_operator (di, p); if (c1 < p->code[0] || (c1 == p->code[0] && c2 < p->code[1])) high = i; else low = i + 1; if (low == high) return NULL; } } } /* ::= TV ::= TT ::= TI ::= TS ::= GV <(object) name> ::= T <(base) encoding> ::= Tc <(base) encoding> Also g++ extensions: ::= TC <(offset) number> _ <(base) type> ::= TF ::= TJ ::= GR */ static struct demangle_component * d_special_name (di) struct d_info *di; { char c; di->expansion += 20; c = d_next_char (di); if (c == 'T') { switch (d_next_char (di)) { case 'V': di->expansion -= 5; return d_make_comp (di, DEMANGLE_COMPONENT_VTABLE, cplus_demangle_type (di), NULL); case 'T': di->expansion -= 10; return d_make_comp (di, DEMANGLE_COMPONENT_VTT, cplus_demangle_type (di), NULL); case 'I': return d_make_comp (di, DEMANGLE_COMPONENT_TYPEINFO, cplus_demangle_type (di), NULL); case 'S': return d_make_comp (di, DEMANGLE_COMPONENT_TYPEINFO_NAME, cplus_demangle_type (di), NULL); case 'h': if (! d_call_offset (di, 'h')) return NULL; return d_make_comp (di, DEMANGLE_COMPONENT_THUNK, d_encoding (di, 0), NULL); case 'v': if (! d_call_offset (di, 'v')) return NULL; return d_make_comp (di, DEMANGLE_COMPONENT_VIRTUAL_THUNK, d_encoding (di, 0), NULL); case 'c': if (! d_call_offset (di, '\0')) return NULL; if (! d_call_offset (di, '\0')) return NULL; return d_make_comp (di, DEMANGLE_COMPONENT_COVARIANT_THUNK, d_encoding (di, 0), NULL); case 'C': { struct demangle_component *derived_type; long offset; struct demangle_component *base_type; derived_type = cplus_demangle_type (di); offset = d_number (di); if (offset < 0) return NULL; if (d_next_char (di) != '_') return NULL; base_type = cplus_demangle_type (di); /* We don't display the offset. FIXME: We should display it in verbose mode. */ di->expansion += 5; return d_make_comp (di, DEMANGLE_COMPONENT_CONSTRUCTION_VTABLE, base_type, derived_type); } case 'F': return d_make_comp (di, DEMANGLE_COMPONENT_TYPEINFO_FN, cplus_demangle_type (di), NULL); case 'J': return d_make_comp (di, DEMANGLE_COMPONENT_JAVA_CLASS, cplus_demangle_type (di), NULL); default: return NULL; } } else if (c == 'G') { switch (d_next_char (di)) { case 'V': return d_make_comp (di, DEMANGLE_COMPONENT_GUARD, d_name (di), NULL); case 'R': return d_make_comp (di, DEMANGLE_COMPONENT_REFTEMP, d_name (di), NULL); default: return NULL; } } else return NULL; } /* ::= h _ ::= v _ ::= <(offset) number> ::= <(offset) number> _ <(virtual offset) number> The C parameter, if not '\0', is a character we just read which is the start of the . We don't display the offset information anywhere. FIXME: We should display it in verbose mode. */ static int d_call_offset (di, c) struct d_info *di; int c; { long offset; long virtual_offset; if (c == '\0') c = d_next_char (di); if (c == 'h') offset = d_number (di); else if (c == 'v') { offset = d_number (di); if (d_next_char (di) != '_') return 0; virtual_offset = d_number (di); } else return 0; if (d_next_char (di) != '_') return 0; return 1; } /* ::= C1 ::= C2 ::= C3 ::= D0 ::= D1 ::= D2 */ static struct demangle_component * d_ctor_dtor_name (di) struct d_info *di; { if (di->last_name != NULL) { if (di->last_name->type == DEMANGLE_COMPONENT_NAME) di->expansion += di->last_name->u.s_name.len; else if (di->last_name->type == DEMANGLE_COMPONENT_SUB_STD) di->expansion += di->last_name->u.s_string.len; } switch (d_next_char (di)) { case 'C': { enum gnu_v3_ctor_kinds kind; switch (d_next_char (di)) { case '1': kind = gnu_v3_complete_object_ctor; break; case '2': kind = gnu_v3_base_object_ctor; break; case '3': kind = gnu_v3_complete_object_allocating_ctor; break; default: return NULL; } return d_make_ctor (di, kind, di->last_name); } case 'D': { enum gnu_v3_dtor_kinds kind; switch (d_next_char (di)) { case '0': kind = gnu_v3_deleting_dtor; break; case '1': kind = gnu_v3_complete_object_dtor; break; case '2': kind = gnu_v3_base_object_dtor; break; default: return NULL; } return d_make_dtor (di, kind, di->last_name); } default: return NULL; } } /* ::= ::= ::= ::= ::= ::= ::= ::= ::= ::= P ::= R ::= C ::= G ::= U ::= various one letter codes ::= u */ CP_STATIC_IF_GLIBCPP_V3 const struct demangle_builtin_type_info cplus_demangle_builtin_types[D_BUILTIN_TYPE_COUNT] = { /* a */ { NL ("signed char"), NL ("signed char"), D_PRINT_DEFAULT }, /* b */ { NL ("bool"), NL ("boolean"), D_PRINT_BOOL }, /* c */ { NL ("char"), NL ("byte"), D_PRINT_DEFAULT }, /* d */ { NL ("double"), NL ("double"), D_PRINT_FLOAT }, /* e */ { NL ("long double"), NL ("long double"), D_PRINT_FLOAT }, /* f */ { NL ("float"), NL ("float"), D_PRINT_FLOAT }, /* g */ { NL ("__float128"), NL ("__float128"), D_PRINT_FLOAT }, /* h */ { NL ("unsigned char"), NL ("unsigned char"), D_PRINT_DEFAULT }, /* i */ { NL ("int"), NL ("int"), D_PRINT_INT }, /* j */ { NL ("unsigned int"), NL ("unsigned"), D_PRINT_UNSIGNED }, /* k */ { NULL, 0, NULL, 0, D_PRINT_DEFAULT }, /* l */ { NL ("long"), NL ("long"), D_PRINT_LONG }, /* m */ { NL ("unsigned long"), NL ("unsigned long"), D_PRINT_UNSIGNED_LONG }, /* n */ { NL ("__int128"), NL ("__int128"), D_PRINT_DEFAULT }, /* o */ { NL ("unsigned __int128"), NL ("unsigned __int128"), D_PRINT_DEFAULT }, /* p */ { NULL, 0, NULL, 0, D_PRINT_DEFAULT }, /* q */ { NULL, 0, NULL, 0, D_PRINT_DEFAULT }, /* r */ { NULL, 0, NULL, 0, D_PRINT_DEFAULT }, /* s */ { NL ("short"), NL ("short"), D_PRINT_DEFAULT }, /* t */ { NL ("unsigned short"), NL ("unsigned short"), D_PRINT_DEFAULT }, /* u */ { NULL, 0, NULL, 0, D_PRINT_DEFAULT }, /* v */ { NL ("void"), NL ("void"), D_PRINT_VOID }, /* w */ { NL ("wchar_t"), NL ("char"), D_PRINT_DEFAULT }, /* x */ { NL ("long long"), NL ("long"), D_PRINT_LONG_LONG }, /* y */ { NL ("unsigned long long"), NL ("unsigned long long"), D_PRINT_UNSIGNED_LONG_LONG }, /* z */ { NL ("..."), NL ("..."), D_PRINT_DEFAULT }, }; CP_STATIC_IF_GLIBCPP_V3 struct demangle_component * cplus_demangle_type (di) struct d_info *di; { char peek; struct demangle_component *ret; int can_subst; /* The ABI specifies that when CV-qualifiers are used, the base type is substitutable, and the fully qualified type is substitutable, but the base type with a strict subset of the CV-qualifiers is not substitutable. The natural recursive implementation of the CV-qualifiers would cause subsets to be substitutable, so instead we pull them all off now. FIXME: The ABI says that order-insensitive vendor qualifiers should be handled in the same way, but we have no way to tell which vendor qualifiers are order-insensitive and which are order-sensitive. So we just assume that they are all order-sensitive. g++ 3.4 supports only one vendor qualifier, __vector, and it treats it as order-sensitive when mangling names. */ peek = d_peek_char (di); if (peek == 'r' || peek == 'V' || peek == 'K') { struct demangle_component **pret; pret = d_cv_qualifiers (di, &ret, 0); if (pret == NULL) return NULL; *pret = cplus_demangle_type (di); if (! d_add_substitution (di, ret)) return NULL; return ret; } can_subst = 1; switch (peek) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'l': case 'm': case 'n': case 'o': case 's': case 't': case 'v': case 'w': case 'x': case 'y': case 'z': ret = d_make_builtin_type (di, &cplus_demangle_builtin_types[peek - 'a']); di->expansion += ret->u.s_builtin.type->len; can_subst = 0; d_advance (di, 1); break; case 'u': d_advance (di, 1); ret = d_make_comp (di, DEMANGLE_COMPONENT_VENDOR_TYPE, d_source_name (di), NULL); break; case 'F': ret = d_function_type (di); break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'N': case 'Z': ret = d_class_enum_type (di); break; case 'A': ret = d_array_type (di); break; case 'M': ret = d_pointer_to_member_type (di); break; case 'T': ret = d_template_param (di); if (d_peek_char (di) == 'I') { /* This is . The part is a substitution candidate. */ if (! d_add_substitution (di, ret)) return NULL; ret = d_make_comp (di, DEMANGLE_COMPONENT_TEMPLATE, ret, d_template_args (di)); } break; case 'S': /* If this is a special substitution, then it is the start of . */ { char peek_next; peek_next = d_peek_next_char (di); if (IS_DIGIT (peek_next) || peek_next == '_' || IS_UPPER (peek_next)) { ret = d_substitution (di, 0); /* The substituted name may have been a template name and may be followed by tepmlate args. */ if (d_peek_char (di) == 'I') ret = d_make_comp (di, DEMANGLE_COMPONENT_TEMPLATE, ret, d_template_args (di)); else can_subst = 0; } else { ret = d_class_enum_type (di); /* If the substitution was a complete type, then it is not a new substitution candidate. However, if the substitution was followed by template arguments, then the whole thing is a substitution candidate. */ if (ret != NULL && ret->type == DEMANGLE_COMPONENT_SUB_STD) can_subst = 0; } } break; case 'P': d_advance (di, 1); ret = d_make_comp (di, DEMANGLE_COMPONENT_POINTER, cplus_demangle_type (di), NULL); break; case 'R': d_advance (di, 1); ret = d_make_comp (di, DEMANGLE_COMPONENT_REFERENCE, cplus_demangle_type (di), NULL); break; case 'C': d_advance (di, 1); ret = d_make_comp (di, DEMANGLE_COMPONENT_COMPLEX, cplus_demangle_type (di), NULL); break; case 'G': d_advance (di, 1); ret = d_make_comp (di, DEMANGLE_COMPONENT_IMAGINARY, cplus_demangle_type (di), NULL); break; case 'U': d_advance (di, 1); ret = d_source_name (di); ret = d_make_comp (di, DEMANGLE_COMPONENT_VENDOR_TYPE_QUAL, cplus_demangle_type (di), ret); break; default: return NULL; } if (can_subst) { if (! d_add_substitution (di, ret)) return NULL; } return ret; } /* ::= [r] [V] [K] */ static struct demangle_component ** d_cv_qualifiers (di, pret, member_fn) struct d_info *di; struct demangle_component **pret; int member_fn; { char peek; peek = d_peek_char (di); while (peek == 'r' || peek == 'V' || peek == 'K') { enum demangle_component_type t; d_advance (di, 1); if (peek == 'r') { t = (member_fn ? DEMANGLE_COMPONENT_RESTRICT_THIS : DEMANGLE_COMPONENT_RESTRICT); di->expansion += sizeof "restrict"; } else if (peek == 'V') { t = (member_fn ? DEMANGLE_COMPONENT_VOLATILE_THIS : DEMANGLE_COMPONENT_VOLATILE); di->expansion += sizeof "volatile"; } else { t = (member_fn ? DEMANGLE_COMPONENT_CONST_THIS : DEMANGLE_COMPONENT_CONST); di->expansion += sizeof "const"; } *pret = d_make_comp (di, t, NULL, NULL); if (*pret == NULL) return NULL; pret = &d_left (*pret); peek = d_peek_char (di); } return pret; } /* ::= F [Y] E */ static struct demangle_component * d_function_type (di) struct d_info *di; { struct demangle_component *ret; if (d_next_char (di) != 'F') return NULL; if (d_peek_char (di) == 'Y') { /* Function has C linkage. We don't print this information. FIXME: We should print it in verbose mode. */ d_advance (di, 1); } ret = d_bare_function_type (di, 1); if (d_next_char (di) != 'E') return NULL; return ret; } /* ::= + */ static struct demangle_component * d_bare_function_type (di, has_return_type) struct d_info *di; int has_return_type; { struct demangle_component *return_type; struct demangle_component *tl; struct demangle_component **ptl; return_type = NULL; tl = NULL; ptl = &tl; while (1) { char peek; struct demangle_component *type; peek = d_peek_char (di); if (peek == '\0' || peek == 'E') break; type = cplus_demangle_type (di); if (type == NULL) return NULL; if (has_return_type) { return_type = type; has_return_type = 0; } else { *ptl = d_make_comp (di, DEMANGLE_COMPONENT_ARGLIST, type, NULL); if (*ptl == NULL) return NULL; ptl = &d_right (*ptl); } } /* There should be at least one parameter type besides the optional return type. A function which takes no arguments will have a single parameter type void. */ if (tl == NULL) return NULL; /* If we have a single parameter type void, omit it. */ if (d_right (tl) == NULL && d_left (tl)->type == DEMANGLE_COMPONENT_BUILTIN_TYPE && d_left (tl)->u.s_builtin.type->print == D_PRINT_VOID) { di->expansion -= d_left (tl)->u.s_builtin.type->len; tl = NULL; } return d_make_comp (di, DEMANGLE_COMPONENT_FUNCTION_TYPE, return_type, tl); } /* ::= */ static struct demangle_component * d_class_enum_type (di) struct d_info *di; { return d_name (di); } /* ::= A <(positive dimension) number> _ <(element) type> ::= A [<(dimension) expression>] _ <(element) type> */ static struct demangle_component * d_array_type (di) struct d_info *di; { char peek; struct demangle_component *dim; if (d_next_char (di) != 'A') return NULL; peek = d_peek_char (di); if (peek == '_') dim = NULL; else if (IS_DIGIT (peek)) { const char *s; s = d_str (di); do { d_advance (di, 1); peek = d_peek_char (di); } while (IS_DIGIT (peek)); dim = d_make_name (di, s, d_str (di) - s); if (dim == NULL) return NULL; } else { dim = d_expression (di); if (dim == NULL) return NULL; } if (d_next_char (di) != '_') return NULL; return d_make_comp (di, DEMANGLE_COMPONENT_ARRAY_TYPE, dim, cplus_demangle_type (di)); } /* ::= M <(class) type> <(member) type> */ static struct demangle_component * d_pointer_to_member_type (di) struct d_info *di; { struct demangle_component *cl; struct demangle_component *mem; struct demangle_component **pmem; if (d_next_char (di) != 'M') return NULL; cl = cplus_demangle_type (di); /* The ABI specifies that any type can be a substitution source, and that M is followed by two types, and that when a CV-qualified type is seen both the base type and the CV-qualified types are substitution sources. The ABI also specifies that for a pointer to a CV-qualified member function, the qualifiers are attached to the second type. Given the grammar, a plain reading of the ABI suggests that both the CV-qualified member function and the non-qualified member function are substitution sources. However, g++ does not work that way. g++ treats only the CV-qualified member function as a substitution source. FIXME. So to work with g++, we need to pull off the CV-qualifiers here, in order to avoid calling add_substitution() in cplus_demangle_type(). */ pmem = d_cv_qualifiers (di, &mem, 1); if (pmem == NULL) return NULL; *pmem = cplus_demangle_type (di); return d_make_comp (di, DEMANGLE_COMPONENT_PTRMEM_TYPE, cl, mem); } /* ::= T_ ::= T <(parameter-2 non-negative) number> _ */ static struct demangle_component * d_template_param (di) struct d_info *di; { long param; if (d_next_char (di) != 'T') return NULL; if (d_peek_char (di) == '_') param = 0; else { param = d_number (di); if (param < 0) return NULL; param += 1; } if (d_next_char (di) != '_') return NULL; ++di->did_subs; return d_make_template_param (di, param); } /* ::= I + E */ static struct demangle_component * d_template_args (di) struct d_info *di; { struct demangle_component *hold_last_name; struct demangle_component *al; struct demangle_component **pal; /* Preserve the last name we saw--don't let the template arguments clobber it, as that would give us the wrong name for a subsequent constructor or destructor. */ hold_last_name = di->last_name; if (d_next_char (di) != 'I') return NULL; al = NULL; pal = &al; while (1) { struct demangle_component *a; a = d_template_arg (di); if (a == NULL) return NULL; *pal = d_make_comp (di, DEMANGLE_COMPONENT_TEMPLATE_ARGLIST, a, NULL); if (*pal == NULL) return NULL; pal = &d_right (*pal); if (d_peek_char (di) == 'E') { d_advance (di, 1); break; } } di->last_name = hold_last_name; return al; } /* ::= ::= X E ::= */ static struct demangle_component * d_template_arg (di) struct d_info *di; { struct demangle_component *ret; switch (d_peek_char (di)) { case 'X': d_advance (di, 1); ret = d_expression (di); if (d_next_char (di) != 'E') return NULL; return ret; case 'L': return d_expr_primary (di); default: return cplus_demangle_type (di); } } /* ::= <(unary) operator-name> ::= <(binary) operator-name> ::= <(trinary) operator-name> ::= st ::= ::= sr ::= sr ::= */ static struct demangle_component * d_expression (di) struct d_info *di; { char peek; peek = d_peek_char (di); if (peek == 'L') return d_expr_primary (di); else if (peek == 'T') return d_template_param (di); else if (peek == 's' && d_peek_next_char (di) == 'r') { struct demangle_component *type; struct demangle_component *name; d_advance (di, 2); type = cplus_demangle_type (di); name = d_unqualified_name (di); if (d_peek_char (di) != 'I') return d_make_comp (di, DEMANGLE_COMPONENT_QUAL_NAME, type, name); else return d_make_comp (di, DEMANGLE_COMPONENT_QUAL_NAME, type, d_make_comp (di, DEMANGLE_COMPONENT_TEMPLATE, name, d_template_args (di))); } else { struct demangle_component *op; int args; op = d_operator_name (di); if (op == NULL) return NULL; if (op->type == DEMANGLE_COMPONENT_OPERATOR) di->expansion += op->u.s_operator.op->len - 2; if (op->type == DEMANGLE_COMPONENT_OPERATOR && strcmp (op->u.s_operator.op->code, "st") == 0) return d_make_comp (di, DEMANGLE_COMPONENT_UNARY, op, cplus_demangle_type (di)); switch (op->type) { default: return NULL; case DEMANGLE_COMPONENT_OPERATOR: args = op->u.s_operator.op->args; break; case DEMANGLE_COMPONENT_EXTENDED_OPERATOR: args = op->u.s_extended_operator.args; break; case DEMANGLE_COMPONENT_CAST: args = 1; break; } switch (args) { case 1: return d_make_comp (di, DEMANGLE_COMPONENT_UNARY, op, d_expression (di)); case 2: { struct demangle_component *left; left = d_expression (di); return d_make_comp (di, DEMANGLE_COMPONENT_BINARY, op, d_make_comp (di, DEMANGLE_COMPONENT_BINARY_ARGS, left, d_expression (di))); } case 3: { struct demangle_component *first; struct demangle_component *second; first = d_expression (di); second = d_expression (di); return d_make_comp (di, DEMANGLE_COMPONENT_TRINARY, op, d_make_comp (di, DEMANGLE_COMPONENT_TRINARY_ARG1, first, d_make_comp (di, DEMANGLE_COMPONENT_TRINARY_ARG2, second, d_expression (di)))); } default: return NULL; } } } /* ::= L <(value) number> E ::= L <(value) float> E ::= L E */ static struct demangle_component * d_expr_primary (di) struct d_info *di; { struct demangle_component *ret; if (d_next_char (di) != 'L') return NULL; if (d_peek_char (di) == '_') ret = cplus_demangle_mangled_name (di, 0); else { struct demangle_component *type; enum demangle_component_type t; const char *s; type = cplus_demangle_type (di); if (type == NULL) return NULL; /* If we have a type we know how to print, we aren't going to print the type name itself. */ if (type->type == DEMANGLE_COMPONENT_BUILTIN_TYPE && type->u.s_builtin.type->print != D_PRINT_DEFAULT) di->expansion -= type->u.s_builtin.type->len; /* Rather than try to interpret the literal value, we just collect it as a string. Note that it's possible to have a floating point literal here. The ABI specifies that the format of such literals is machine independent. That's fine, but what's not fine is that versions of g++ up to 3.2 with -fabi-version=1 used upper case letters in the hex constant, and dumped out gcc's internal representation. That makes it hard to tell where the constant ends, and hard to dump the constant in any readable form anyhow. We don't attempt to handle these cases. */ t = DEMANGLE_COMPONENT_LITERAL; if (d_peek_char (di) == 'n') { t = DEMANGLE_COMPONENT_LITERAL_NEG; d_advance (di, 1); } s = d_str (di); while (d_peek_char (di) != 'E') d_advance (di, 1); ret = d_make_comp (di, t, type, d_make_name (di, s, d_str (di) - s)); } if (d_next_char (di) != 'E') return NULL; return ret; } /* ::= Z <(function) encoding> E <(entity) name> [] ::= Z <(function) encoding> E s [] */ static struct demangle_component * d_local_name (di) struct d_info *di; { struct demangle_component *function; if (d_next_char (di) != 'Z') return NULL; function = d_encoding (di, 0); if (d_next_char (di) != 'E') return NULL; if (d_peek_char (di) == 's') { d_advance (di, 1); if (! d_discriminator (di)) return NULL; return d_make_comp (di, DEMANGLE_COMPONENT_LOCAL_NAME, function, d_make_name (di, "string literal", sizeof "string literal" - 1)); } else { struct demangle_component *name; name = d_name (di); if (! d_discriminator (di)) return NULL; return d_make_comp (di, DEMANGLE_COMPONENT_LOCAL_NAME, function, name); } } /* ::= _ <(non-negative) number> We demangle the discriminator, but we don't print it out. FIXME: We should print it out in verbose mode. */ static int d_discriminator (di) struct d_info *di; { long discrim; if (d_peek_char (di) != '_') return 1; d_advance (di, 1); discrim = d_number (di); if (discrim < 0) return 0; return 1; } /* Add a new substitution. */ static int d_add_substitution (di, dc) struct d_info *di; struct demangle_component *dc; { if (dc == NULL) return 0; if (di->next_sub >= di->num_subs) return 0; di->subs[di->next_sub] = dc; ++di->next_sub; return 1; } /* ::= S _ ::= S_ ::= St ::= Sa ::= Sb ::= Ss ::= Si ::= So ::= Sd If PREFIX is non-zero, then this type is being used as a prefix in a qualified name. In this case, for the standard substitutions, we need to check whether we are being used as a prefix for a constructor or destructor, and return a full template name. Otherwise we will get something like std::iostream::~iostream() which does not correspond particularly well to any function which actually appears in the source. */ static const struct d_standard_sub_info standard_subs[] = { { 't', NL ("std"), NL ("std"), NULL, 0 }, { 'a', NL ("std::allocator"), NL ("std::allocator"), NL ("allocator") }, { 'b', NL ("std::basic_string"), NL ("std::basic_string"), NL ("basic_string") }, { 's', NL ("std::string"), NL ("std::basic_string, std::allocator >"), NL ("basic_string") }, { 'i', NL ("std::istream"), NL ("std::basic_istream >"), NL ("basic_istream") }, { 'o', NL ("std::ostream"), NL ("std::basic_ostream >"), NL ("basic_ostream") }, { 'd', NL ("std::iostream"), NL ("std::basic_iostream >"), NL ("basic_iostream") } }; static struct demangle_component * d_substitution (di, prefix) struct d_info *di; int prefix; { char c; if (d_next_char (di) != 'S') return NULL; c = d_next_char (di); if (c == '_' || IS_DIGIT (c) || IS_UPPER (c)) { int id; id = 0; if (c != '_') { do { if (IS_DIGIT (c)) id = id * 36 + c - '0'; else if (IS_UPPER (c)) id = id * 36 + c - 'A' + 10; else return NULL; c = d_next_char (di); } while (c != '_'); ++id; } if (id >= di->next_sub) return NULL; ++di->did_subs; return di->subs[id]; } else { int verbose; const struct d_standard_sub_info *p; const struct d_standard_sub_info *pend; verbose = (di->options & DMGL_VERBOSE) != 0; if (! verbose && prefix) { char peek; peek = d_peek_char (di); if (peek == 'C' || peek == 'D') verbose = 1; } pend = (&standard_subs[0] + sizeof standard_subs / sizeof standard_subs[0]); for (p = &standard_subs[0]; p < pend; ++p) { if (c == p->code) { const char *s; int len; if (p->set_last_name != NULL) di->last_name = d_make_sub (di, p->set_last_name, p->set_last_name_len); if (verbose) { s = p->full_expansion; len = p->full_len; } else { s = p->simple_expansion; len = p->simple_len; } di->expansion += len; return d_make_sub (di, s, len); } } return NULL; } } /* Resize the print buffer. */ static void d_print_resize (dpi, add) struct d_print_info *dpi; size_t add; { size_t need; if (dpi->buf == NULL) return; need = dpi->len + add; while (need > dpi->alc) { size_t newalc; char *newbuf; newalc = dpi->alc * 2; newbuf = realloc (dpi->buf, newalc); if (newbuf == NULL) { free (dpi->buf); dpi->buf = NULL; dpi->allocation_failure = 1; return; } dpi->buf = newbuf; dpi->alc = newalc; } } /* Append a character to the print buffer. */ static void d_print_append_char (dpi, c) struct d_print_info *dpi; int c; { if (dpi->buf != NULL) { if (dpi->len >= dpi->alc) { d_print_resize (dpi, 1); if (dpi->buf == NULL) return; } dpi->buf[dpi->len] = c; ++dpi->len; } } /* Append a buffer to the print buffer. */ static void d_print_append_buffer (dpi, s, l) struct d_print_info *dpi; const char *s; size_t l; { if (dpi->buf != NULL) { if (dpi->len + l > dpi->alc) { d_print_resize (dpi, l); if (dpi->buf == NULL) return; } memcpy (dpi->buf + dpi->len, s, l); dpi->len += l; } } /* Indicate that an error occurred during printing. */ static void d_print_error (dpi) struct d_print_info *dpi; { free (dpi->buf); dpi->buf = NULL; } /* Turn components into a human readable string. OPTIONS is the options bits passed to the demangler. DC is the tree to print. ESTIMATE is a guess at the length of the result. This returns a string allocated by malloc, or NULL on error. On success, this sets *PALC to the size of the allocated buffer. On failure, this sets *PALC to 0 for a bad parse, or to 1 for a memory allocation failure. */ CP_STATIC_IF_GLIBCPP_V3 char * cplus_demangle_print (options, dc, estimate, palc) int options; const struct demangle_component *dc; int estimate; size_t *palc; { struct d_print_info dpi; dpi.options = options; dpi.alc = estimate + 1; dpi.buf = malloc (dpi.alc); if (dpi.buf == NULL) { *palc = 1; return NULL; } dpi.len = 0; dpi.templates = NULL; dpi.modifiers = NULL; dpi.allocation_failure = 0; d_print_comp (&dpi, dc); d_append_char (&dpi, '\0'); if (dpi.buf != NULL) *palc = dpi.alc; else *palc = dpi.allocation_failure; return dpi.buf; } /* Subroutine to handle components. */ static void d_print_comp (dpi, dc) struct d_print_info *dpi; const struct demangle_component *dc; { if (dc == NULL) { d_print_error (dpi); return; } if (d_print_saw_error (dpi)) return; switch (dc->type) { case DEMANGLE_COMPONENT_NAME: if ((dpi->options & DMGL_JAVA) == 0) d_append_buffer (dpi, dc->u.s_name.s, dc->u.s_name.len); else d_print_java_identifier (dpi, dc->u.s_name.s, dc->u.s_name.len); return; case DEMANGLE_COMPONENT_QUAL_NAME: case DEMANGLE_COMPONENT_LOCAL_NAME: d_print_comp (dpi, d_left (dc)); if ((dpi->options & DMGL_JAVA) == 0) d_append_string_constant (dpi, "::"); else d_append_char (dpi, '.'); d_print_comp (dpi, d_right (dc)); return; case DEMANGLE_COMPONENT_TYPED_NAME: { struct d_print_mod *hold_modifiers; struct demangle_component *typed_name; struct d_print_mod adpm[4]; unsigned int i; struct d_print_template dpt; /* Pass the name down to the type so that it can be printed in the right place for the type. We also have to pass down any CV-qualifiers, which apply to the this parameter. */ hold_modifiers = dpi->modifiers; i = 0; typed_name = d_left (dc); while (typed_name != NULL) { if (i >= sizeof adpm / sizeof adpm[0]) { d_print_error (dpi); return; } adpm[i].next = dpi->modifiers; dpi->modifiers = &adpm[i]; adpm[i].mod = typed_name; adpm[i].printed = 0; adpm[i].templates = dpi->templates; ++i; if (typed_name->type != DEMANGLE_COMPONENT_RESTRICT_THIS && typed_name->type != DEMANGLE_COMPONENT_VOLATILE_THIS && typed_name->type != DEMANGLE_COMPONENT_CONST_THIS) break; typed_name = d_left (typed_name); } /* If typed_name is a template, then it applies to the function type as well. */ if (typed_name->type == DEMANGLE_COMPONENT_TEMPLATE) { dpt.next = dpi->templates; dpi->templates = &dpt; dpt.template = typed_name; } /* If typed_name is a DEMANGLE_COMPONENT_LOCAL_NAME, then there may be CV-qualifiers on its right argument which really apply here; this happens when parsing a class which is local to a function. */ if (typed_name->type == DEMANGLE_COMPONENT_LOCAL_NAME) { struct demangle_component *local_name; local_name = d_right (typed_name); while (local_name->type == DEMANGLE_COMPONENT_RESTRICT_THIS || local_name->type == DEMANGLE_COMPONENT_VOLATILE_THIS || local_name->type == DEMANGLE_COMPONENT_CONST_THIS) { if (i >= sizeof adpm / sizeof adpm[0]) { d_print_error (dpi); return; } adpm[i] = adpm[i - 1]; adpm[i].next = &adpm[i - 1]; dpi->modifiers = &adpm[i]; adpm[i - 1].mod = local_name; adpm[i - 1].printed = 0; adpm[i - 1].templates = dpi->templates; ++i; local_name = d_left (local_name); } } d_print_comp (dpi, d_right (dc)); if (typed_name->type == DEMANGLE_COMPONENT_TEMPLATE) dpi->templates = dpt.next; /* If the modifiers didn't get printed by the type, print them now. */ while (i > 0) { --i; if (! adpm[i].printed) { d_append_char (dpi, ' '); d_print_mod (dpi, adpm[i].mod); } } dpi->modifiers = hold_modifiers; return; } case DEMANGLE_COMPONENT_TEMPLATE: { struct d_print_mod *hold_dpm; /* Don't push modifiers into a template definition. Doing so could give the wrong definition for a template argument. Instead, treat the template essentially as a name. */ hold_dpm = dpi->modifiers; dpi->modifiers = NULL; d_print_comp (dpi, d_left (dc)); if (d_last_char (dpi) == '<') d_append_char (dpi, ' '); d_append_char (dpi, '<'); d_print_comp (dpi, d_right (dc)); /* Avoid generating two consecutive '>' characters, to avoid the C++ syntactic ambiguity. */ if (d_last_char (dpi) == '>') d_append_char (dpi, ' '); d_append_char (dpi, '>'); dpi->modifiers = hold_dpm; return; } case DEMANGLE_COMPONENT_TEMPLATE_PARAM: { long i; struct demangle_component *a; struct d_print_template *hold_dpt; if (dpi->templates == NULL) { d_print_error (dpi); return; } i = dc->u.s_number.number; for (a = d_right (dpi->templates->template); a != NULL; a = d_right (a)) { if (a->type != DEMANGLE_COMPONENT_TEMPLATE_ARGLIST) { d_print_error (dpi); return; } if (i <= 0) break; --i; } if (i != 0 || a == NULL) { d_print_error (dpi); return; } /* While processing this parameter, we need to pop the list of templates. This is because the template parameter may itself be a reference to a parameter of an outer template. */ hold_dpt = dpi->templates; dpi->templates = hold_dpt->next; d_print_comp (dpi, d_left (a)); dpi->templates = hold_dpt; return; } case DEMANGLE_COMPONENT_CTOR: d_print_comp (dpi, dc->u.s_ctor.name); return; case DEMANGLE_COMPONENT_DTOR: d_append_char (dpi, '~'); d_print_comp (dpi, dc->u.s_dtor.name); return; case DEMANGLE_COMPONENT_VTABLE: d_append_string_constant (dpi, "vtable for "); d_print_comp (dpi, d_left (dc)); return; case DEMANGLE_COMPONENT_VTT: d_append_string_constant (dpi, "VTT for "); d_print_comp (dpi, d_left (dc)); return; case DEMANGLE_COMPONENT_CONSTRUCTION_VTABLE: d_append_string_constant (dpi, "construction vtable for "); d_print_comp (dpi, d_left (dc)); d_append_string_constant (dpi, "-in-"); d_print_comp (dpi, d_right (dc)); return; case DEMANGLE_COMPONENT_TYPEINFO: d_append_string_constant (dpi, "typeinfo for "); d_print_comp (dpi, d_left (dc)); return; case DEMANGLE_COMPONENT_TYPEINFO_NAME: d_append_string_constant (dpi, "typeinfo name for "); d_print_comp (dpi, d_left (dc)); return; case DEMANGLE_COMPONENT_TYPEINFO_FN: d_append_string_constant (dpi, "typeinfo fn for "); d_print_comp (dpi, d_left (dc)); return; case DEMANGLE_COMPONENT_THUNK: d_append_string_constant (dpi, "non-virtual thunk to "); d_print_comp (dpi, d_left (dc)); return; case DEMANGLE_COMPONENT_VIRTUAL_THUNK: d_append_string_constant (dpi, "virtual thunk to "); d_print_comp (dpi, d_left (dc)); return; case DEMANGLE_COMPONENT_COVARIANT_THUNK: d_append_string_constant (dpi, "covariant return thunk to "); d_print_comp (dpi, d_left (dc)); return; case DEMANGLE_COMPONENT_JAVA_CLASS: d_append_string_constant (dpi, "java Class for "); d_print_comp (dpi, d_left (dc)); return; case DEMANGLE_COMPONENT_GUARD: d_append_string_constant (dpi, "guard variable for "); d_print_comp (dpi, d_left (dc)); return; case DEMANGLE_COMPONENT_REFTEMP: d_append_string_constant (dpi, "reference temporary for "); d_print_comp (dpi, d_left (dc)); return; case DEMANGLE_COMPONENT_SUB_STD: d_append_buffer (dpi, dc->u.s_string.string, dc->u.s_string.len); return; case DEMANGLE_COMPONENT_RESTRICT: case DEMANGLE_COMPONENT_VOLATILE: case DEMANGLE_COMPONENT_CONST: { struct d_print_mod *pdpm; /* When printing arrays, it's possible to have cases where the same CV-qualifier gets pushed on the stack multiple times. We only need to print it once. */ for (pdpm = dpi->modifiers; pdpm != NULL; pdpm = pdpm->next) { if (! pdpm->printed) { if (pdpm->mod->type != DEMANGLE_COMPONENT_RESTRICT && pdpm->mod->type != DEMANGLE_COMPONENT_VOLATILE && pdpm->mod->type != DEMANGLE_COMPONENT_CONST) break; if (pdpm->mod->type == dc->type) { d_print_comp (dpi, d_left (dc)); return; } } } } /* Fall through. */ case DEMANGLE_COMPONENT_RESTRICT_THIS: case DEMANGLE_COMPONENT_VOLATILE_THIS: case DEMANGLE_COMPONENT_CONST_THIS: case DEMANGLE_COMPONENT_VENDOR_TYPE_QUAL: case DEMANGLE_COMPONENT_POINTER: case DEMANGLE_COMPONENT_REFERENCE: case DEMANGLE_COMPONENT_COMPLEX: case DEMANGLE_COMPONENT_IMAGINARY: { /* We keep a list of modifiers on the stack. */ struct d_print_mod dpm; dpm.next = dpi->modifiers; dpi->modifiers = &dpm; dpm.mod = dc; dpm.printed = 0; dpm.templates = dpi->templates; d_print_comp (dpi, d_left (dc)); /* If the modifier didn't get printed by the type, print it now. */ if (! dpm.printed) d_print_mod (dpi, dc); dpi->modifiers = dpm.next; return; } case DEMANGLE_COMPONENT_BUILTIN_TYPE: if ((dpi->options & DMGL_JAVA) == 0) d_append_buffer (dpi, dc->u.s_builtin.type->name, dc->u.s_builtin.type->len); else d_append_buffer (dpi, dc->u.s_builtin.type->java_name, dc->u.s_builtin.type->java_len); return; case DEMANGLE_COMPONENT_VENDOR_TYPE: d_print_comp (dpi, d_left (dc)); return; case DEMANGLE_COMPONENT_FUNCTION_TYPE: { if (d_left (dc) != NULL) { struct d_print_mod dpm; /* We must pass this type down as a modifier in order to print it in the right location. */ dpm.next = dpi->modifiers; dpi->modifiers = &dpm; dpm.mod = dc; dpm.printed = 0; dpm.templates = dpi->templates; d_print_comp (dpi, d_left (dc)); dpi->modifiers = dpm.next; if (dpm.printed) return; d_append_char (dpi, ' '); } d_print_function_type (dpi, dc, dpi->modifiers); return; } case DEMANGLE_COMPONENT_ARRAY_TYPE: { struct d_print_mod *hold_modifiers; struct d_print_mod adpm[4]; unsigned int i; struct d_print_mod *pdpm; /* We must pass this type down as a modifier in order to print multi-dimensional arrays correctly. If the array itself is CV-qualified, we act as though the element type were CV-qualified. We do this by copying the modifiers down rather than fiddling pointers, so that we don't wind up with a d_print_mod higher on the stack pointing into our stack frame after we return. */ hold_modifiers = dpi->modifiers; adpm[0].next = hold_modifiers; dpi->modifiers = &adpm[0]; adpm[0].mod = dc; adpm[0].printed = 0; adpm[0].templates = dpi->templates; i = 1; pdpm = hold_modifiers; while (pdpm != NULL && (pdpm->mod->type == DEMANGLE_COMPONENT_RESTRICT || pdpm->mod->type == DEMANGLE_COMPONENT_VOLATILE || pdpm->mod->type == DEMANGLE_COMPONENT_CONST)) { if (! pdpm->printed) { if (i >= sizeof adpm / sizeof adpm[0]) { d_print_error (dpi); return; } adpm[i] = *pdpm; adpm[i].next = dpi->modifiers; dpi->modifiers = &adpm[i]; pdpm->printed = 1; ++i; } pdpm = pdpm->next; } d_print_comp (dpi, d_right (dc)); dpi->modifiers = hold_modifiers; if (adpm[0].printed) return; while (i > 1) { --i; d_print_mod (dpi, adpm[i].mod); } d_print_array_type (dpi, dc, dpi->modifiers); return; } case DEMANGLE_COMPONENT_PTRMEM_TYPE: { struct d_print_mod dpm; dpm.next = dpi->modifiers; dpi->modifiers = &dpm; dpm.mod = dc; dpm.printed = 0; dpm.templates = dpi->templates; d_print_comp (dpi, d_right (dc)); /* If the modifier didn't get printed by the type, print it now. */ if (! dpm.printed) { d_append_char (dpi, ' '); d_print_comp (dpi, d_left (dc)); d_append_string_constant (dpi, "::*"); } dpi->modifiers = dpm.next; return; } case DEMANGLE_COMPONENT_ARGLIST: case DEMANGLE_COMPONENT_TEMPLATE_ARGLIST: d_print_comp (dpi, d_left (dc)); if (d_right (dc) != NULL) { d_append_string_constant (dpi, ", "); d_print_comp (dpi, d_right (dc)); } return; case DEMANGLE_COMPONENT_OPERATOR: { char c; d_append_string_constant (dpi, "operator"); c = dc->u.s_operator.op->name[0]; if (IS_LOWER (c)) d_append_char (dpi, ' '); d_append_buffer (dpi, dc->u.s_operator.op->name, dc->u.s_operator.op->len); return; } case DEMANGLE_COMPONENT_EXTENDED_OPERATOR: d_append_string_constant (dpi, "operator "); d_print_comp (dpi, dc->u.s_extended_operator.name); return; case DEMANGLE_COMPONENT_CAST: d_append_string_constant (dpi, "operator "); d_print_cast (dpi, dc); return; case DEMANGLE_COMPONENT_UNARY: if (d_left (dc)->type != DEMANGLE_COMPONENT_CAST) d_print_expr_op (dpi, d_left (dc)); else { d_append_char (dpi, '('); d_print_cast (dpi, d_left (dc)); d_append_char (dpi, ')'); } d_append_char (dpi, '('); d_print_comp (dpi, d_right (dc)); d_append_char (dpi, ')'); return; case DEMANGLE_COMPONENT_BINARY: if (d_right (dc)->type != DEMANGLE_COMPONENT_BINARY_ARGS) { d_print_error (dpi); return; } /* We wrap an expression which uses the greater-than operator in an extra layer of parens so that it does not get confused with the '>' which ends the template parameters. */ if (d_left (dc)->type == DEMANGLE_COMPONENT_OPERATOR && d_left (dc)->u.s_operator.op->len == 1 && d_left (dc)->u.s_operator.op->name[0] == '>') d_append_char (dpi, '('); d_append_char (dpi, '('); d_print_comp (dpi, d_left (d_right (dc))); d_append_string_constant (dpi, ") "); d_print_expr_op (dpi, d_left (dc)); d_append_string_constant (dpi, " ("); d_print_comp (dpi, d_right (d_right (dc))); d_append_char (dpi, ')'); if (d_left (dc)->type == DEMANGLE_COMPONENT_OPERATOR && d_left (dc)->u.s_operator.op->len == 1 && d_left (dc)->u.s_operator.op->name[0] == '>') d_append_char (dpi, ')'); return; case DEMANGLE_COMPONENT_BINARY_ARGS: /* We should only see this as part of DEMANGLE_COMPONENT_BINARY. */ d_print_error (dpi); return; case DEMANGLE_COMPONENT_TRINARY: if (d_right (dc)->type != DEMANGLE_COMPONENT_TRINARY_ARG1 || d_right (d_right (dc))->type != DEMANGLE_COMPONENT_TRINARY_ARG2) { d_print_error (dpi); return; } d_append_char (dpi, '('); d_print_comp (dpi, d_left (d_right (dc))); d_append_string_constant (dpi, ") "); d_print_expr_op (dpi, d_left (dc)); d_append_string_constant (dpi, " ("); d_print_comp (dpi, d_left (d_right (d_right (dc)))); d_append_string_constant (dpi, ") : ("); d_print_comp (dpi, d_right (d_right (d_right (dc)))); d_append_char (dpi, ')'); return; case DEMANGLE_COMPONENT_TRINARY_ARG1: case DEMANGLE_COMPONENT_TRINARY_ARG2: /* We should only see these are part of DEMANGLE_COMPONENT_TRINARY. */ d_print_error (dpi); return; case DEMANGLE_COMPONENT_LITERAL: case DEMANGLE_COMPONENT_LITERAL_NEG: { enum d_builtin_type_print tp; /* For some builtin types, produce simpler output. */ tp = D_PRINT_DEFAULT; if (d_left (dc)->type == DEMANGLE_COMPONENT_BUILTIN_TYPE) { tp = d_left (dc)->u.s_builtin.type->print; switch (tp) { case D_PRINT_INT: case D_PRINT_UNSIGNED: case D_PRINT_LONG: case D_PRINT_UNSIGNED_LONG: case D_PRINT_LONG_LONG: case D_PRINT_UNSIGNED_LONG_LONG: if (d_right (dc)->type == DEMANGLE_COMPONENT_NAME) { if (dc->type == DEMANGLE_COMPONENT_LITERAL_NEG) d_append_char (dpi, '-'); d_print_comp (dpi, d_right (dc)); switch (tp) { default: break; case D_PRINT_UNSIGNED: d_append_char (dpi, 'u'); break; case D_PRINT_LONG: d_append_char (dpi, 'l'); break; case D_PRINT_UNSIGNED_LONG: d_append_string_constant (dpi, "ul"); break; case D_PRINT_LONG_LONG: d_append_string_constant (dpi, "ll"); break; case D_PRINT_UNSIGNED_LONG_LONG: d_append_string_constant (dpi, "ull"); break; } return; } break; case D_PRINT_BOOL: if (d_right (dc)->type == DEMANGLE_COMPONENT_NAME && d_right (dc)->u.s_name.len == 1 && dc->type == DEMANGLE_COMPONENT_LITERAL) { switch (d_right (dc)->u.s_name.s[0]) { case '0': d_append_string_constant (dpi, "false"); return; case '1': d_append_string_constant (dpi, "true"); return; default: break; } } break; default: break; } } d_append_char (dpi, '('); d_print_comp (dpi, d_left (dc)); d_append_char (dpi, ')'); if (dc->type == DEMANGLE_COMPONENT_LITERAL_NEG) d_append_char (dpi, '-'); if (tp == D_PRINT_FLOAT) d_append_char (dpi, '['); d_print_comp (dpi, d_right (dc)); if (tp == D_PRINT_FLOAT) d_append_char (dpi, ']'); } return; default: d_print_error (dpi); return; } } /* Print a Java dentifier. For Java we try to handle encoded extended Unicode characters. The C++ ABI doesn't mention Unicode encoding, so we don't it for C++. Characters are encoded as __U+_. */ static void d_print_java_identifier (dpi, name, len) struct d_print_info *dpi; const char *name; int len; { const char *p; const char *end; end = name + len; for (p = name; p < end; ++p) { if (end - p > 3 && p[0] == '_' && p[1] == '_' && p[2] == 'U') { unsigned long c; const char *q; c = 0; for (q = p + 3; q < end; ++q) { int dig; if (IS_DIGIT (*q)) dig = *q - '0'; else if (*q >= 'A' && *q <= 'F') dig = *q - 'A' + 10; else if (*q >= 'a' && *q <= 'f') dig = *q - 'a' + 10; else break; c = c * 16 + dig; } /* If the Unicode character is larger than 256, we don't try to deal with it here. FIXME. */ if (q < end && *q == '_' && c < 256) { d_append_char (dpi, c); p = q; continue; } } d_append_char (dpi, *p); } } /* Print a list of modifiers. SUFFIX is 1 if we are printing qualifiers on this after printing a function. */ static void d_print_mod_list (dpi, mods, suffix) struct d_print_info *dpi; struct d_print_mod *mods; int suffix; { struct d_print_template *hold_dpt; if (mods == NULL || d_print_saw_error (dpi)) return; if (mods->printed || (! suffix && (mods->mod->type == DEMANGLE_COMPONENT_RESTRICT_THIS || mods->mod->type == DEMANGLE_COMPONENT_VOLATILE_THIS || mods->mod->type == DEMANGLE_COMPONENT_CONST_THIS))) { d_print_mod_list (dpi, mods->next, suffix); return; } mods->printed = 1; hold_dpt = dpi->templates; dpi->templates = mods->templates; if (mods->mod->type == DEMANGLE_COMPONENT_FUNCTION_TYPE) { d_print_function_type (dpi, mods->mod, mods->next); dpi->templates = hold_dpt; return; } else if (mods->mod->type == DEMANGLE_COMPONENT_ARRAY_TYPE) { d_print_array_type (dpi, mods->mod, mods->next); dpi->templates = hold_dpt; return; } else if (mods->mod->type == DEMANGLE_COMPONENT_LOCAL_NAME) { struct d_print_mod *hold_modifiers; struct demangle_component *dc; /* When this is on the modifier stack, we have pulled any qualifiers off the right argument already. Otherwise, we print it as usual, but don't let the left argument see any modifiers. */ hold_modifiers = dpi->modifiers; dpi->modifiers = NULL; d_print_comp (dpi, d_left (mods->mod)); dpi->modifiers = hold_modifiers; if ((dpi->options & DMGL_JAVA) == 0) d_append_string_constant (dpi, "::"); else d_append_char (dpi, '.'); dc = d_right (mods->mod); while (dc->type == DEMANGLE_COMPONENT_RESTRICT_THIS || dc->type == DEMANGLE_COMPONENT_VOLATILE_THIS || dc->type == DEMANGLE_COMPONENT_CONST_THIS) dc = d_left (dc); d_print_comp (dpi, dc); dpi->templates = hold_dpt; return; } d_print_mod (dpi, mods->mod); dpi->templates = hold_dpt; d_print_mod_list (dpi, mods->next, suffix); } /* Print a modifier. */ static void d_print_mod (dpi, mod) struct d_print_info *dpi; const struct demangle_component *mod; { switch (mod->type) { case DEMANGLE_COMPONENT_RESTRICT: case DEMANGLE_COMPONENT_RESTRICT_THIS: d_append_string_constant (dpi, " restrict"); return; case DEMANGLE_COMPONENT_VOLATILE: case DEMANGLE_COMPONENT_VOLATILE_THIS: d_append_string_constant (dpi, " volatile"); return; case DEMANGLE_COMPONENT_CONST: case DEMANGLE_COMPONENT_CONST_THIS: d_append_string_constant (dpi, " const"); return; case DEMANGLE_COMPONENT_VENDOR_TYPE_QUAL: d_append_char (dpi, ' '); d_print_comp (dpi, d_right (mod)); return; case DEMANGLE_COMPONENT_POINTER: /* There is no pointer symbol in Java. */ if ((dpi->options & DMGL_JAVA) == 0) d_append_char (dpi, '*'); return; case DEMANGLE_COMPONENT_REFERENCE: d_append_char (dpi, '&'); return; case DEMANGLE_COMPONENT_COMPLEX: d_append_string_constant (dpi, "complex "); return; case DEMANGLE_COMPONENT_IMAGINARY: d_append_string_constant (dpi, "imaginary "); return; case DEMANGLE_COMPONENT_PTRMEM_TYPE: if (d_last_char (dpi) != '(') d_append_char (dpi, ' '); d_print_comp (dpi, d_left (mod)); d_append_string_constant (dpi, "::*"); return; case DEMANGLE_COMPONENT_TYPED_NAME: d_print_comp (dpi, d_left (mod)); return; default: /* Otherwise, we have something that won't go back on the modifier stack, so we can just print it. */ d_print_comp (dpi, mod); return; } } /* Print a function type, except for the return type. */ static void d_print_function_type (dpi, dc, mods) struct d_print_info *dpi; const struct demangle_component *dc; struct d_print_mod *mods; { int need_paren; int saw_mod; int need_space; struct d_print_mod *p; struct d_print_mod *hold_modifiers; need_paren = 0; saw_mod = 0; need_space = 0; for (p = mods; p != NULL; p = p->next) { if (p->printed) break; saw_mod = 1; switch (p->mod->type) { case DEMANGLE_COMPONENT_POINTER: case DEMANGLE_COMPONENT_REFERENCE: need_paren = 1; break; case DEMANGLE_COMPONENT_RESTRICT: case DEMANGLE_COMPONENT_VOLATILE: case DEMANGLE_COMPONENT_CONST: case DEMANGLE_COMPONENT_VENDOR_TYPE_QUAL: case DEMANGLE_COMPONENT_COMPLEX: case DEMANGLE_COMPONENT_IMAGINARY: case DEMANGLE_COMPONENT_PTRMEM_TYPE: need_space = 1; need_paren = 1; break; case DEMANGLE_COMPONENT_RESTRICT_THIS: case DEMANGLE_COMPONENT_VOLATILE_THIS: case DEMANGLE_COMPONENT_CONST_THIS: break; default: break; } if (need_paren) break; } if (d_left (dc) != NULL && ! saw_mod) need_paren = 1; if (need_paren) { if (! need_space) { if (d_last_char (dpi) != '(' && d_last_char (dpi) != '*') need_space = 1; } if (need_space && d_last_char (dpi) != ' ') d_append_char (dpi, ' '); d_append_char (dpi, '('); } hold_modifiers = dpi->modifiers; dpi->modifiers = NULL; d_print_mod_list (dpi, mods, 0); if (need_paren) d_append_char (dpi, ')'); d_append_char (dpi, '('); if (d_right (dc) != NULL) d_print_comp (dpi, d_right (dc)); d_append_char (dpi, ')'); d_print_mod_list (dpi, mods, 1); dpi->modifiers = hold_modifiers; } /* Print an array type, except for the element type. */ static void d_print_array_type (dpi, dc, mods) struct d_print_info *dpi; const struct demangle_component *dc; struct d_print_mod *mods; { int need_space; need_space = 1; if (mods != NULL) { int need_paren; struct d_print_mod *p; need_paren = 0; for (p = mods; p != NULL; p = p->next) { if (! p->printed) { if (p->mod->type == DEMANGLE_COMPONENT_ARRAY_TYPE) { need_space = 0; break; } else { need_paren = 1; need_space = 1; break; } } } if (need_paren) d_append_string_constant (dpi, " ("); d_print_mod_list (dpi, mods, 0); if (need_paren) d_append_char (dpi, ')'); } if (need_space) d_append_char (dpi, ' '); d_append_char (dpi, '['); if (d_left (dc) != NULL) d_print_comp (dpi, d_left (dc)); d_append_char (dpi, ']'); } /* Print an operator in an expression. */ static void d_print_expr_op (dpi, dc) struct d_print_info *dpi; const struct demangle_component *dc; { if (dc->type == DEMANGLE_COMPONENT_OPERATOR) d_append_buffer (dpi, dc->u.s_operator.op->name, dc->u.s_operator.op->len); else d_print_comp (dpi, dc); } /* Print a cast. */ static void d_print_cast (dpi, dc) struct d_print_info *dpi; const struct demangle_component *dc; { if (d_left (dc)->type != DEMANGLE_COMPONENT_TEMPLATE) d_print_comp (dpi, d_left (dc)); else { struct d_print_mod *hold_dpm; struct d_print_template dpt; /* It appears that for a templated cast operator, we need to put the template parameters in scope for the operator name, but not for the parameters. The effect is that we need to handle the template printing here. */ hold_dpm = dpi->modifiers; dpi->modifiers = NULL; dpt.next = dpi->templates; dpi->templates = &dpt; dpt.template = d_left (dc); d_print_comp (dpi, d_left (d_left (dc))); dpi->templates = dpt.next; if (d_last_char (dpi) == '<') d_append_char (dpi, ' '); d_append_char (dpi, '<'); d_print_comp (dpi, d_right (d_left (dc))); /* Avoid generating two consecutive '>' characters, to avoid the C++ syntactic ambiguity. */ if (d_last_char (dpi) == '>') d_append_char (dpi, ' '); d_append_char (dpi, '>'); dpi->modifiers = hold_dpm; } } /* Initialize the information structure we use to pass around information. */ CP_STATIC_IF_GLIBCPP_V3 void cplus_demangle_init_info (mangled, options, len, di) const char *mangled; int options; size_t len; struct d_info *di; { di->s = mangled; di->send = mangled + len; di->options = options; di->n = mangled; /* We can not need more components than twice the number of chars in the mangled string. Most components correspond directly to chars, but the ARGLIST types are exceptions. */ di->num_comps = 2 * len; di->next_comp = 0; /* Similarly, we can not need more substitutions than there are chars in the mangled string. */ di->num_subs = len; di->next_sub = 0; di->did_subs = 0; di->last_name = NULL; di->expansion = 0; } /* Entry point for the demangler. If MANGLED is a g++ v3 ABI mangled name, return a buffer allocated with malloc holding the demangled name. OPTIONS is the usual libiberty demangler options. On success, this sets *PALC to the allocated size of the returned buffer. On failure, this sets *PALC to 0 for a bad name, or 1 for a memory allocation failure. On failure, this returns NULL. */ static char * d_demangle (mangled, options, palc) const char* mangled; int options; size_t *palc; { size_t len; int type; struct d_info di; struct demangle_component *dc; int estimate; char *ret; *palc = 0; len = strlen (mangled); if (mangled[0] == '_' && mangled[1] == 'Z') type = 0; else if (strncmp (mangled, "_GLOBAL_", 8) == 0 && (mangled[8] == '.' || mangled[8] == '_' || mangled[8] == '$') && (mangled[9] == 'D' || mangled[9] == 'I') && mangled[10] == '_') { char *r; r = malloc (40 + len - 11); if (r == NULL) *palc = 1; else { if (mangled[9] == 'I') strcpy (r, "global constructors keyed to "); else strcpy (r, "global destructors keyed to "); strcat (r, mangled + 11); } return r; } else { if ((options & DMGL_TYPES) == 0) return NULL; type = 1; } cplus_demangle_init_info (mangled, options, len, &di); { #ifdef CP_DYNAMIC_ARRAYS __extension__ struct demangle_component comps[di.num_comps]; __extension__ struct demangle_component *subs[di.num_subs]; di.comps = &comps[0]; di.subs = &subs[0]; #else di.comps = ((struct demangle_component *) malloc (di.num_comps * sizeof (struct demangle_component))); di.subs = ((struct demangle_component **) malloc (di.num_subs * sizeof (struct demangle_component *))); if (di.comps == NULL || di.subs == NULL) { if (di.comps != NULL) free (di.comps); if (di.subs != NULL) free (di.subs); *palc = 1; return NULL; } #endif if (! type) dc = cplus_demangle_mangled_name (&di, 1); else dc = cplus_demangle_type (&di); /* If DMGL_PARAMS is set, then if we didn't consume the entire mangled string, then we didn't successfully demangle it. If DMGL_PARAMS is not set, we didn't look at the trailing parameters. */ if (((options & DMGL_PARAMS) != 0) && d_peek_char (&di) != '\0') dc = NULL; #ifdef CP_DEMANGLE_DEBUG if (dc == NULL) printf ("failed demangling\n"); else d_dump (dc, 0); #endif /* We try to guess the length of the demangled string, to minimize calls to realloc during demangling. */ estimate = len + di.expansion + 10 * di.did_subs; estimate += estimate / 8; ret = NULL; if (dc != NULL) ret = cplus_demangle_print (options, dc, estimate, palc); #ifndef CP_DYNAMIC_ARRAYS free (di.comps); free (di.subs); #endif #ifdef CP_DEMANGLE_DEBUG if (ret != NULL) { int rlen; rlen = strlen (ret); if (rlen > 2 * estimate) printf ("*** Length %d much greater than estimate %d\n", rlen, estimate); else if (rlen > estimate) printf ("*** Length %d greater than estimate %d\n", rlen, estimate); else if (rlen < estimate / 2) printf ("*** Length %d much less than estimate %d\n", rlen, estimate); } #endif } return ret; } #if defined(IN_LIBGCC2) || defined(IN_GLIBCPP_V3) extern char *__cxa_demangle PARAMS ((const char *, char *, size_t *, int *)); /* ia64 ABI-mandated entry point in the C++ runtime library for performing demangling. MANGLED_NAME is a NUL-terminated character string containing the name to be demangled. OUTPUT_BUFFER is a region of memory, allocated with malloc, of *LENGTH bytes, into which the demangled name is stored. If OUTPUT_BUFFER is not long enough, it is expanded using realloc. OUTPUT_BUFFER may instead be NULL; in that case, the demangled name is placed in a region of memory allocated with malloc. If LENGTH is non-NULL, the length of the buffer conaining the demangled name, is placed in *LENGTH. The return value is a pointer to the start of the NUL-terminated demangled name, or NULL if the demangling fails. The caller is responsible for deallocating this memory using free. *STATUS is set to one of the following values: 0: The demangling operation succeeded. -1: A memory allocation failure occurred. -2: MANGLED_NAME is not a valid name under the C++ ABI mangling rules. -3: One of the arguments is invalid. The demangling is performed using the C++ ABI mangling rules, with GNU extensions. */ char * __cxa_demangle (mangled_name, output_buffer, length, status) const char *mangled_name; char *output_buffer; size_t *length; int *status; { char *demangled; size_t alc; if (mangled_name == NULL) { if (status != NULL) *status = -3; return NULL; } if (output_buffer != NULL && length == NULL) { if (status != NULL) *status = -3; return NULL; } /* The specification for __cxa_demangle() is that if the mangled name could be either an extern "C" identifier, or an internal built-in type name, then we resolve it as the identifier. All internal built-in type names are a single lower case character. Frankly, this simplistic disambiguation doesn't make sense to me, but it is documented, so we implement it here. */ if (IS_LOWER (mangled_name[0]) && mangled_name[1] == '\0' && cplus_demangle_builtin_types[mangled_name[0] - 'a'].name != NULL) { if (status != NULL) *status = -2; return NULL; } demangled = d_demangle (mangled_name, DMGL_PARAMS | DMGL_TYPES, &alc); if (demangled == NULL) { if (status != NULL) { if (alc == 1) *status = -1; else *status = -2; } return NULL; } if (output_buffer == NULL) { if (length != NULL) *length = alc; } else { if (strlen (demangled) < *length) { strcpy (output_buffer, demangled); free (demangled); demangled = output_buffer; } else { free (output_buffer); *length = alc; } } if (status != NULL) *status = 0; return demangled; } #else /* ! (IN_LIBGCC2 || IN_GLIBCPP_V3) */ /* Entry point for libiberty demangler. If MANGLED is a g++ v3 ABI mangled name, return a buffer allocated with malloc holding the demangled name. Otherwise, return NULL. */ char * cplus_demangle_v3 (mangled, options) const char* mangled; int options; { size_t alc; return d_demangle (mangled, options, &alc); } /* Demangle a Java symbol. Java uses a subset of the V3 ABI C++ mangling conventions, but the output formatting is a little different. This instructs the C++ demangler not to emit pointer characters ("*"), and to use Java's namespace separator symbol ("." instead of "::"). It then does an additional pass over the demangled output to replace instances of JArray with TYPE[]. */ char * java_demangle_v3 (mangled) const char* mangled; { size_t alc; char *demangled; int nesting; char *from; char *to; demangled = d_demangle (mangled, DMGL_JAVA | DMGL_PARAMS, &alc); if (demangled == NULL) return NULL; nesting = 0; from = demangled; to = from; while (*from != '\0') { if (strncmp (from, "JArray<", 7) == 0) { from += 7; ++nesting; } else if (nesting > 0 && *from == '>') { while (to > demangled && to[-1] == ' ') --to; *to++ = '['; *to++ = ']'; --nesting; ++from; } else *to++ = *from++; } *to = '\0'; return demangled; } #endif /* IN_LIBGCC2 || IN_GLIBCPP_V3 */ #ifndef IN_GLIBCPP_V3 /* Demangle a string in order to find out whether it is a constructor or destructor. Return non-zero on success. Set *CTOR_KIND and *DTOR_KIND appropriately. */ static int is_ctor_or_dtor (mangled, ctor_kind, dtor_kind) const char *mangled; enum gnu_v3_ctor_kinds *ctor_kind; enum gnu_v3_dtor_kinds *dtor_kind; { struct d_info di; struct demangle_component *dc; int ret; *ctor_kind = (enum gnu_v3_ctor_kinds) 0; *dtor_kind = (enum gnu_v3_dtor_kinds) 0; cplus_demangle_init_info (mangled, DMGL_GNU_V3, strlen (mangled), &di); { #ifdef CP_DYNAMIC_ARRAYS __extension__ struct demangle_component comps[di.num_comps]; __extension__ struct demangle_component *subs[di.num_subs]; di.comps = &comps[0]; di.subs = &subs[0]; #else di.comps = ((struct demangle_component *) malloc (di.num_comps * sizeof (struct demangle_component))); di.subs = ((struct demangle_component **) malloc (di.num_subs * sizeof (struct demangle_component *))); if (di.comps == NULL || di.subs == NULL) { if (di.comps != NULL) free (di.comps); if (di.subs != NULL) free (di.subs); return 0; } #endif dc = cplus_demangle_mangled_name (&di, 1); /* Note that because we did not pass DMGL_PARAMS, we don't expect to demangle the entire string. */ ret = 0; while (dc != NULL) { switch (dc->type) { default: dc = NULL; break; case DEMANGLE_COMPONENT_TYPED_NAME: case DEMANGLE_COMPONENT_TEMPLATE: case DEMANGLE_COMPONENT_RESTRICT_THIS: case DEMANGLE_COMPONENT_VOLATILE_THIS: case DEMANGLE_COMPONENT_CONST_THIS: dc = d_left (dc); break; case DEMANGLE_COMPONENT_QUAL_NAME: case DEMANGLE_COMPONENT_LOCAL_NAME: dc = d_right (dc); break; case DEMANGLE_COMPONENT_CTOR: *ctor_kind = dc->u.s_ctor.kind; ret = 1; dc = NULL; break; case DEMANGLE_COMPONENT_DTOR: *dtor_kind = dc->u.s_dtor.kind; ret = 1; dc = NULL; break; } } #ifndef CP_DYNAMIC_ARRAYS free (di.subs); free (di.comps); #endif } return ret; } /* Return whether NAME is the mangled form of a g++ V3 ABI constructor name. A non-zero return indicates the type of constructor. */ enum gnu_v3_ctor_kinds is_gnu_v3_mangled_ctor (name) const char *name; { enum gnu_v3_ctor_kinds ctor_kind; enum gnu_v3_dtor_kinds dtor_kind; if (! is_ctor_or_dtor (name, &ctor_kind, &dtor_kind)) return (enum gnu_v3_ctor_kinds) 0; return ctor_kind; } /* Return whether NAME is the mangled form of a g++ V3 ABI destructor name. A non-zero return indicates the type of destructor. */ enum gnu_v3_dtor_kinds is_gnu_v3_mangled_dtor (name) const char *name; { enum gnu_v3_ctor_kinds ctor_kind; enum gnu_v3_dtor_kinds dtor_kind; if (! is_ctor_or_dtor (name, &ctor_kind, &dtor_kind)) return (enum gnu_v3_dtor_kinds) 0; return dtor_kind; } #endif /* IN_GLIBCPP_V3 */ /* Declarations for getopt. Copyright 1989, 1990, 1991, 1992, 1993, 1994, 1996, 1997, 1998, 2000, 2002 Free Software Foundation, Inc. NOTE: The canonical source of this file is maintained with the GNU C Library. Bugs can be reported to bug-glibc@gnu.org. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef _GETOPT_H #define _GETOPT_H 1 #ifdef __cplusplus extern "C" { #endif /* For communication from `getopt' to the caller. When `getopt' finds an option that takes an argument, the argument value is returned here. Also, when `ordering' is RETURN_IN_ORDER, each non-option ARGV-element is returned here. */ extern char *optarg; /* Index in ARGV of the next element to be scanned. This is used for communication to and from the caller and for communication between successive calls to `getopt'. On entry to `getopt', zero means this is the first call; initialize. When `getopt' returns -1, this is the index of the first of the non-option elements that the caller should itself scan. Otherwise, `optind' communicates from one call to the next how much of ARGV has been scanned so far. */ extern int optind; /* Callers store zero here to inhibit the error message `getopt' prints for unrecognized options. */ extern int opterr; /* Set to an option character which was unrecognized. */ extern int optopt; /* Describe the long-named options requested by the application. The LONG_OPTIONS argument to getopt_long or getopt_long_only is a vector of `struct option' terminated by an element containing a name which is zero. The field `has_arg' is: no_argument (or 0) if the option does not take an argument, required_argument (or 1) if the option requires an argument, optional_argument (or 2) if the option takes an optional argument. If the field `flag' is not NULL, it points to a variable that is set to the value given in the field `val' when the option is found, but left unchanged if the option is not found. To have a long-named option do something other than set an `int' to a compiled-in constant, such as set a value from `optarg', set the option's `flag' field to zero and its `val' field to a nonzero value (the equivalent single-letter option character, if there is one). For long options that have a zero `flag' field, `getopt' returns the contents of the `val' field. */ struct option { #if defined (__STDC__) && __STDC__ const char *name; #else char *name; #endif /* has_arg can't be an enum because some compilers complain about type mismatches in all the code that assumes it is an int. */ int has_arg; int *flag; int val; }; /* Names for the values of the `has_arg' field of `struct option'. */ #define no_argument 0 #define required_argument 1 #define optional_argument 2 #if defined (__STDC__) && __STDC__ /* HAVE_DECL_* is a three-state macro: undefined, 0 or 1. If it is undefined, we haven't run the autoconf check so provide the declaration without arguments. If it is 0, we checked and failed to find the declaration so provide a fully prototyped one. If it is 1, we found it so don't provide any declaration at all. */ #if !HAVE_DECL_GETOPT #if defined (__GNU_LIBRARY__) || defined (HAVE_DECL_GETOPT) /* Many other libraries have conflicting prototypes for getopt, with differences in the consts, in unistd.h. To avoid compilation errors, only prototype getopt for the GNU C library. */ extern int getopt (int argc, char *const *argv, const char *shortopts); #else #ifndef __cplusplus extern int getopt (); #endif /* __cplusplus */ #endif #endif /* !HAVE_DECL_GETOPT */ extern int getopt_long (int argc, char *const *argv, const char *shortopts, const struct option *longopts, int *longind); extern int getopt_long_only (int argc, char *const *argv, const char *shortopts, const struct option *longopts, int *longind); /* Internal only. Users should not call this directly. */ extern int _getopt_internal (int argc, char *const *argv, const char *shortopts, const struct option *longopts, int *longind, int long_only); #else /* not __STDC__ */ extern int getopt (); extern int getopt_long (); extern int getopt_long_only (); extern int _getopt_internal (); #endif /* __STDC__ */ #ifdef __cplusplus } #endif #endif /* getopt.h */ /* An abstract string datatype. Copyright (C) 1998, 1999, 2000, 2002, 2004 Free Software Foundation, Inc. Contributed by Mark Mitchell (mark@markmitchell.com). This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ typedef struct dyn_string { int allocated; /* The amount of space allocated for the string. */ int length; /* The actual length of the string. */ char *s; /* The string itself, NUL-terminated. */ }* dyn_string_t; /* The length STR, in bytes, not including the terminating NUL. */ #define dyn_string_length(STR) \ ((STR)->length) /* The NTBS in which the contents of STR are stored. */ #define dyn_string_buf(STR) \ ((STR)->s) /* Compare DS1 to DS2 with strcmp. */ #define dyn_string_compare(DS1, DS2) \ (strcmp ((DS1)->s, (DS2)->s)) extern int dyn_string_init PARAMS ((struct dyn_string *, int)); extern dyn_string_t dyn_string_new PARAMS ((int)); extern void dyn_string_delete PARAMS ((dyn_string_t)); extern char *dyn_string_release PARAMS ((dyn_string_t)); extern dyn_string_t dyn_string_resize PARAMS ((dyn_string_t, int)); extern void dyn_string_clear PARAMS ((dyn_string_t)); extern int dyn_string_copy PARAMS ((dyn_string_t, dyn_string_t)); extern int dyn_string_copy_cstr PARAMS ((dyn_string_t, const char *)); extern int dyn_string_prepend PARAMS ((dyn_string_t, dyn_string_t)); extern int dyn_string_prepend_cstr PARAMS ((dyn_string_t, const char *)); extern int dyn_string_insert PARAMS ((dyn_string_t, int, dyn_string_t)); extern int dyn_string_insert_cstr PARAMS ((dyn_string_t, int, const char *)); extern int dyn_string_insert_char PARAMS ((dyn_string_t, int, int)); extern int dyn_string_append PARAMS ((dyn_string_t, dyn_string_t)); extern int dyn_string_append_cstr PARAMS ((dyn_string_t, const char *)); extern int dyn_string_append_char PARAMS ((dyn_string_t, int)); extern int dyn_string_substring PARAMS ((dyn_string_t, dyn_string_t, int, int)); extern int dyn_string_eq PARAMS ((dyn_string_t, dyn_string_t)); #ifdef STANDALONE_DEMANGLER static void print_usage PARAMS ((FILE* fp, int exit_value)); #define IS_ALPHA(CHAR) \ (((CHAR) >= 'a' && (CHAR) <= 'z') \ || ((CHAR) >= 'A' && (CHAR) <= 'Z')) /* Non-zero if CHAR is a character than can occur in a mangled name. */ #define is_mangled_char(CHAR) \ (IS_ALPHA (CHAR) || IS_DIGIT (CHAR) \ || (CHAR) == '_' || (CHAR) == '.' || (CHAR) == '$') /* The name of this program, as invoked. */ const char* program_name; /* Prints usage summary to FP and then exits with EXIT_VALUE. */ static void print_usage (fp, exit_value) FILE* fp; int exit_value; { fprintf (fp, "Usage: %s [options] [names ...]\n", program_name); fprintf (fp, "Options:\n"); fprintf (fp, " -h,--help Display this message.\n"); fprintf (fp, " -p,--no-params Don't display function parameters\n"); fprintf (fp, " -v,--verbose Produce verbose demanglings.\n"); fprintf (fp, "If names are provided, they are demangled. Otherwise filters standard input.\n"); exit (exit_value); } /* Option specification for getopt_long. */ static const struct option long_options[] = { { "help", no_argument, NULL, 'h' }, { "no-params", no_argument, NULL, 'p' }, { "verbose", no_argument, NULL, 'v' }, { NULL, no_argument, NULL, 0 }, }; /* Main entry for a demangling filter executable. It will demangle its command line arguments, if any. If none are provided, it will filter stdin to stdout, replacing any recognized mangled C++ names with their demangled equivalents. */ int main (argc, argv) int argc; char *argv[]; { int i; int opt_char; int options = DMGL_PARAMS | DMGL_ANSI | DMGL_TYPES; /* Use the program name of this program, as invoked. */ program_name = argv[0]; /* Parse options. */ do { opt_char = getopt_long (argc, argv, "hpv", long_options, NULL); switch (opt_char) { case '?': /* Unrecognized option. */ print_usage (stderr, 1); break; case 'h': print_usage (stdout, 0); break; case 'p': options &= ~ DMGL_PARAMS; break; case 'v': options |= DMGL_VERBOSE; break; } } while (opt_char != -1); if (optind == argc) /* No command line arguments were provided. Filter stdin. */ { dyn_string_t mangled = dyn_string_new (3); char *s; /* Read all of input. */ while (!feof (stdin)) { char c; /* Pile characters into mangled until we hit one that can't occur in a mangled name. */ c = getchar (); while (!feof (stdin) && is_mangled_char (c)) { dyn_string_append_char (mangled, c); if (feof (stdin)) break; c = getchar (); } if (dyn_string_length (mangled) > 0) { #ifdef IN_GLIBCPP_V3 s = __cxa_demangle (dyn_string_buf (mangled), NULL, NULL, NULL); #else s = cplus_demangle_v3 (dyn_string_buf (mangled), options); #endif if (s != NULL) { fputs (s, stdout); free (s); } else { /* It might not have been a mangled name. Print the original text. */ fputs (dyn_string_buf (mangled), stdout); } dyn_string_clear (mangled); } /* If we haven't hit EOF yet, we've read one character that can't occur in a mangled name, so print it out. */ if (!feof (stdin)) putchar (c); } dyn_string_delete (mangled); } else /* Demangle command line arguments. */ { /* Loop over command line arguments. */ for (i = optind; i < argc; ++i) { char *s; #ifdef IN_GLIBCPP_V3 int status; #endif /* Attempt to demangle. */ #ifdef IN_GLIBCPP_V3 s = __cxa_demangle (argv[i], NULL, NULL, &status); #else s = cplus_demangle_v3 (argv[i], options); #endif /* If it worked, print the demangled name. */ if (s != NULL) { printf ("%s\n", s); free (s); } else { #ifdef IN_GLIBCPP_V3 fprintf (stderr, "Failed: %s (status %d)\n", argv[i], status); #else fprintf (stderr, "Failed: %s\n", argv[i]); #endif } } } return 0; } #endif /* STANDALONE_DEMANGLER */ /* md5.c - Functions to compute MD5 message digest of files or memory blocks according to the definition of MD5 in RFC 1321 from April 1992. Copyright (C) 1995, 1996 Free Software Foundation, Inc. NOTE: This source is derived from an old version taken from the GNU C Library (glibc). This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Written by Ulrich Drepper , 1995. */ #ifdef HAVE_CONFIG_H #endif #include #if STDC_HEADERS || defined _LIBC # include # include #else # ifndef HAVE_MEMCPY # define memcpy(d, s, n) bcopy ((s), (d), (n)) # endif #endif /* md5.h - Declaration of functions and data types used for MD5 sum computing library functions. Copyright 1995, 1996, 2000 Free Software Foundation, Inc. NOTE: The canonical source of this file is maintained with the GNU C Library. Bugs can be reported to bug-glibc@prep.ai.mit.edu. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef _MD5_H #define _MD5_H 1 #include #if defined HAVE_LIMITS_H || _LIBC # include #endif /* The following contortions are an attempt to use the C preprocessor to determine an unsigned integral type that is 32 bits wide. An alternative approach is to use autoconf's AC_CHECK_SIZEOF macro, but doing that would require that the configure script compile and *run* the resulting executable. Locally running cross-compiled executables is usually not possible. */ #ifdef _LIBC # include typedef u_int32_t md5_uint32; #else # define INT_MAX_32_BITS 2147483647 /* If UINT_MAX isn't defined, assume it's a 32-bit type. This should be valid for all systems GNU cares about because that doesn't include 16-bit systems, and only modern systems (that certainly have ) have 64+-bit integral types. */ # ifndef INT_MAX # define INT_MAX INT_MAX_32_BITS # endif # if INT_MAX == INT_MAX_32_BITS typedef unsigned int md5_uint32; # else # if SHRT_MAX == INT_MAX_32_BITS typedef unsigned short md5_uint32; # else # if LONG_MAX == INT_MAX_32_BITS typedef unsigned long md5_uint32; # else /* The following line is intended to evoke an error. Using #error is not portable enough. */ "Cannot determine unsigned 32-bit data type." # endif # endif # endif #endif #undef __P #if defined (__STDC__) && __STDC__ #define __P(x) x #else #define __P(x) () #endif /* Structure to save state of computation between the single steps. */ struct md5_ctx { md5_uint32 A; md5_uint32 B; md5_uint32 C; md5_uint32 D; md5_uint32 total[2]; md5_uint32 buflen; char buffer[128]; }; /* * The following three functions are build up the low level used in * the functions `md5_stream' and `md5_buffer'. */ /* Initialize structure containing state of computation. (RFC 1321, 3.3: Step 3) */ extern void md5_init_ctx __P ((struct md5_ctx *ctx)); /* Starting with the result of former calls of this function (or the initialization function update the context for the next LEN bytes starting at BUFFER. It is necessary that LEN is a multiple of 64!!! */ extern void md5_process_block __P ((const void *buffer, size_t len, struct md5_ctx *ctx)); /* Starting with the result of former calls of this function (or the initialization function update the context for the next LEN bytes starting at BUFFER. It is NOT required that LEN is a multiple of 64. */ extern void md5_process_bytes __P ((const void *buffer, size_t len, struct md5_ctx *ctx)); /* Process the remaining bytes in the buffer and put result from CTX in first 16 bytes following RESBUF. The result is always in little endian byte order, so that a byte-wise output yields to the wanted ASCII representation of the message digest. IMPORTANT: On some systems it is required that RESBUF is correctly aligned for a 32 bits value. */ extern void *md5_finish_ctx __P ((struct md5_ctx *ctx, void *resbuf)); /* Put result from CTX in first 16 bytes following RESBUF. The result is always in little endian byte order, so that a byte-wise output yields to the wanted ASCII representation of the message digest. IMPORTANT: On some systems it is required that RESBUF is correctly aligned for a 32 bits value. */ extern void *md5_read_ctx __P ((const struct md5_ctx *ctx, void *resbuf)); /* Compute MD5 message digest for bytes read from STREAM. The resulting message digest number will be written into the 16 bytes beginning at RESBLOCK. */ extern int md5_stream __P ((FILE *stream, void *resblock)); /* Compute MD5 message digest for LEN bytes beginning at BUFFER. The result is always in little endian byte order, so that a byte-wise output yields to the wanted ASCII representation of the message digest. */ extern void *md5_buffer __P ((const char *buffer, size_t len, void *resblock)); #endif #ifdef _LIBC # if __BYTE_ORDER == __BIG_ENDIAN # define WORDS_BIGENDIAN 1 # endif #endif #ifdef WORDS_BIGENDIAN # define SWAP(n) \ (((n) << 24) | (((n) & 0xff00) << 8) | (((n) >> 8) & 0xff00) | ((n) >> 24)) #else # define SWAP(n) (n) #endif /* This array contains the bytes used to pad the buffer to the next 64-byte boundary. (RFC 1321, 3.1: Step 1) */ static const unsigned char fillbuf[64] = { 0x80, 0 /* , 0, 0, ... */ }; /* Initialize structure containing state of computation. (RFC 1321, 3.3: Step 3) */ void md5_init_ctx (ctx) struct md5_ctx *ctx; { ctx->A = (md5_uint32) 0x67452301; ctx->B = (md5_uint32) 0xefcdab89; ctx->C = (md5_uint32) 0x98badcfe; ctx->D = (md5_uint32) 0x10325476; ctx->total[0] = ctx->total[1] = 0; ctx->buflen = 0; } /* Put result from CTX in first 16 bytes following RESBUF. The result must be in little endian byte order. IMPORTANT: On some systems it is required that RESBUF is correctly aligned for a 32 bits value. */ void * md5_read_ctx (ctx, resbuf) const struct md5_ctx *ctx; void *resbuf; { ((md5_uint32 *) resbuf)[0] = SWAP (ctx->A); ((md5_uint32 *) resbuf)[1] = SWAP (ctx->B); ((md5_uint32 *) resbuf)[2] = SWAP (ctx->C); ((md5_uint32 *) resbuf)[3] = SWAP (ctx->D); return resbuf; } /* Process the remaining bytes in the internal buffer and the usual prolog according to the standard and write the result to RESBUF. IMPORTANT: On some systems it is required that RESBUF is correctly aligned for a 32 bits value. */ void * md5_finish_ctx (ctx, resbuf) struct md5_ctx *ctx; void *resbuf; { /* Take yet unprocessed bytes into account. */ md5_uint32 bytes = ctx->buflen; size_t pad; /* Now count remaining bytes. */ ctx->total[0] += bytes; if (ctx->total[0] < bytes) ++ctx->total[1]; pad = bytes >= 56 ? 64 + 56 - bytes : 56 - bytes; memcpy (&ctx->buffer[bytes], fillbuf, pad); /* Put the 64-bit file length in *bits* at the end of the buffer. */ *(md5_uint32 *) &ctx->buffer[bytes + pad] = SWAP (ctx->total[0] << 3); *(md5_uint32 *) &ctx->buffer[bytes + pad + 4] = SWAP ((ctx->total[1] << 3) | (ctx->total[0] >> 29)); /* Process last bytes. */ md5_process_block (ctx->buffer, bytes + pad + 8, ctx); return md5_read_ctx (ctx, resbuf); } /* Compute MD5 message digest for bytes read from STREAM. The resulting message digest number will be written into the 16 bytes beginning at RESBLOCK. */ int md5_stream (stream, resblock) FILE *stream; void *resblock; { /* Important: BLOCKSIZE must be a multiple of 64. */ #define BLOCKSIZE 4096 struct md5_ctx ctx; char buffer[BLOCKSIZE + 72]; size_t sum; /* Initialize the computation context. */ md5_init_ctx (&ctx); /* Iterate over full file contents. */ while (1) { /* We read the file in blocks of BLOCKSIZE bytes. One call of the computation function processes the whole buffer so that with the next round of the loop another block can be read. */ size_t n; sum = 0; /* Read block. Take care for partial reads. */ do { n = fread (buffer + sum, 1, BLOCKSIZE - sum, stream); sum += n; } while (sum < BLOCKSIZE && n != 0); if (n == 0 && ferror (stream)) return 1; /* If end of file is reached, end the loop. */ if (n == 0) break; /* Process buffer with BLOCKSIZE bytes. Note that BLOCKSIZE % 64 == 0 */ md5_process_block (buffer, BLOCKSIZE, &ctx); } /* Add the last bytes if necessary. */ if (sum > 0) md5_process_bytes (buffer, sum, &ctx); /* Construct result in desired memory. */ md5_finish_ctx (&ctx, resblock); return 0; } /* Compute MD5 message digest for LEN bytes beginning at BUFFER. The result is always in little endian byte order, so that a byte-wise output yields to the wanted ASCII representation of the message digest. */ void * md5_buffer (buffer, len, resblock) const char *buffer; size_t len; void *resblock; { struct md5_ctx ctx; /* Initialize the computation context. */ md5_init_ctx (&ctx); /* Process whole buffer but last len % 64 bytes. */ md5_process_bytes (buffer, len, &ctx); /* Put result in desired memory area. */ return md5_finish_ctx (&ctx, resblock); } void md5_process_bytes (buffer, len, ctx) const void *buffer; size_t len; struct md5_ctx *ctx; { /* When we already have some bits in our internal buffer concatenate both inputs first. */ if (ctx->buflen != 0) { size_t left_over = ctx->buflen; size_t add = 128 - left_over > len ? len : 128 - left_over; memcpy (&ctx->buffer[left_over], buffer, add); ctx->buflen += add; if (left_over + add > 64) { md5_process_block (ctx->buffer, (left_over + add) & ~63, ctx); /* The regions in the following copy operation cannot overlap. */ memcpy (ctx->buffer, &ctx->buffer[(left_over + add) & ~63], (left_over + add) & 63); ctx->buflen = (left_over + add) & 63; } buffer = (const void *) ((const char *) buffer + add); len -= add; } /* Process available complete blocks. */ if (len > 64) { md5_process_block (buffer, len & ~63, ctx); buffer = (const void *) ((const char *) buffer + (len & ~63)); len &= 63; } /* Move remaining bytes in internal buffer. */ if (len > 0) { memcpy (ctx->buffer, buffer, len); ctx->buflen = len; } } /* These are the four functions used in the four steps of the MD5 algorithm and defined in the RFC 1321. The first function is a little bit optimized (as found in Colin Plumbs public domain implementation). */ /* #define FF(b, c, d) ((b & c) | (~b & d)) */ #define FF(b, c, d) (d ^ (b & (c ^ d))) #define FG(b, c, d) FF (d, b, c) #define FH(b, c, d) (b ^ c ^ d) #define FI(b, c, d) (c ^ (b | ~d)) /* Process LEN bytes of BUFFER, accumulating context into CTX. It is assumed that LEN % 64 == 0. */ void md5_process_block (buffer, len, ctx) const void *buffer; size_t len; struct md5_ctx *ctx; { md5_uint32 correct_words[16]; const md5_uint32 *words = (const md5_uint32 *) buffer; size_t nwords = len / sizeof (md5_uint32); const md5_uint32 *endp = words + nwords; md5_uint32 A = ctx->A; md5_uint32 B = ctx->B; md5_uint32 C = ctx->C; md5_uint32 D = ctx->D; /* First increment the byte count. RFC 1321 specifies the possible length of the file up to 2^64 bits. Here we only compute the number of bytes. Do a double word increment. */ ctx->total[0] += len; if (ctx->total[0] < len) ++ctx->total[1]; /* Process all bytes in the buffer with 64 bytes in each round of the loop. */ while (words < endp) { md5_uint32 *cwp = correct_words; md5_uint32 A_save = A; md5_uint32 B_save = B; md5_uint32 C_save = C; md5_uint32 D_save = D; /* First round: using the given function, the context and a constant the next context is computed. Because the algorithms processing unit is a 32-bit word and it is determined to work on words in little endian byte order we perhaps have to change the byte order before the computation. To reduce the work for the next steps we store the swapped words in the array CORRECT_WORDS. */ #define OP(a, b, c, d, s, T) \ do \ { \ a += FF (b, c, d) + (*cwp++ = SWAP (*words)) + T; \ ++words; \ CYCLIC (a, s); \ a += b; \ } \ while (0) /* It is unfortunate that C does not provide an operator for cyclic rotation. Hope the C compiler is smart enough. */ #define CYCLIC(w, s) (w = (w << s) | (w >> (32 - s))) /* Before we start, one word to the strange constants. They are defined in RFC 1321 as T[i] = (int) (4294967296.0 * fabs (sin (i))), i=1..64 */ /* Round 1. */ OP (A, B, C, D, 7, (md5_uint32) 0xd76aa478); OP (D, A, B, C, 12, (md5_uint32) 0xe8c7b756); OP (C, D, A, B, 17, (md5_uint32) 0x242070db); OP (B, C, D, A, 22, (md5_uint32) 0xc1bdceee); OP (A, B, C, D, 7, (md5_uint32) 0xf57c0faf); OP (D, A, B, C, 12, (md5_uint32) 0x4787c62a); OP (C, D, A, B, 17, (md5_uint32) 0xa8304613); OP (B, C, D, A, 22, (md5_uint32) 0xfd469501); OP (A, B, C, D, 7, (md5_uint32) 0x698098d8); OP (D, A, B, C, 12, (md5_uint32) 0x8b44f7af); OP (C, D, A, B, 17, (md5_uint32) 0xffff5bb1); OP (B, C, D, A, 22, (md5_uint32) 0x895cd7be); OP (A, B, C, D, 7, (md5_uint32) 0x6b901122); OP (D, A, B, C, 12, (md5_uint32) 0xfd987193); OP (C, D, A, B, 17, (md5_uint32) 0xa679438e); OP (B, C, D, A, 22, (md5_uint32) 0x49b40821); /* For the second to fourth round we have the possibly swapped words in CORRECT_WORDS. Redefine the macro to take an additional first argument specifying the function to use. */ #undef OP #define OP(a, b, c, d, k, s, T) \ do \ { \ a += FX (b, c, d) + correct_words[k] + T; \ CYCLIC (a, s); \ a += b; \ } \ while (0) #define FX(b, c, d) FG (b, c, d) /* Round 2. */ OP (A, B, C, D, 1, 5, (md5_uint32) 0xf61e2562); OP (D, A, B, C, 6, 9, (md5_uint32) 0xc040b340); OP (C, D, A, B, 11, 14, (md5_uint32) 0x265e5a51); OP (B, C, D, A, 0, 20, (md5_uint32) 0xe9b6c7aa); OP (A, B, C, D, 5, 5, (md5_uint32) 0xd62f105d); OP (D, A, B, C, 10, 9, (md5_uint32) 0x02441453); OP (C, D, A, B, 15, 14, (md5_uint32) 0xd8a1e681); OP (B, C, D, A, 4, 20, (md5_uint32) 0xe7d3fbc8); OP (A, B, C, D, 9, 5, (md5_uint32) 0x21e1cde6); OP (D, A, B, C, 14, 9, (md5_uint32) 0xc33707d6); OP (C, D, A, B, 3, 14, (md5_uint32) 0xf4d50d87); OP (B, C, D, A, 8, 20, (md5_uint32) 0x455a14ed); OP (A, B, C, D, 13, 5, (md5_uint32) 0xa9e3e905); OP (D, A, B, C, 2, 9, (md5_uint32) 0xfcefa3f8); OP (C, D, A, B, 7, 14, (md5_uint32) 0x676f02d9); OP (B, C, D, A, 12, 20, (md5_uint32) 0x8d2a4c8a); #undef FX #define FX(b, c, d) FH (b, c, d) /* Round 3. */ OP (A, B, C, D, 5, 4, (md5_uint32) 0xfffa3942); OP (D, A, B, C, 8, 11, (md5_uint32) 0x8771f681); OP (C, D, A, B, 11, 16, (md5_uint32) 0x6d9d6122); OP (B, C, D, A, 14, 23, (md5_uint32) 0xfde5380c); OP (A, B, C, D, 1, 4, (md5_uint32) 0xa4beea44); OP (D, A, B, C, 4, 11, (md5_uint32) 0x4bdecfa9); OP (C, D, A, B, 7, 16, (md5_uint32) 0xf6bb4b60); OP (B, C, D, A, 10, 23, (md5_uint32) 0xbebfbc70); OP (A, B, C, D, 13, 4, (md5_uint32) 0x289b7ec6); OP (D, A, B, C, 0, 11, (md5_uint32) 0xeaa127fa); OP (C, D, A, B, 3, 16, (md5_uint32) 0xd4ef3085); OP (B, C, D, A, 6, 23, (md5_uint32) 0x04881d05); OP (A, B, C, D, 9, 4, (md5_uint32) 0xd9d4d039); OP (D, A, B, C, 12, 11, (md5_uint32) 0xe6db99e5); OP (C, D, A, B, 15, 16, (md5_uint32) 0x1fa27cf8); OP (B, C, D, A, 2, 23, (md5_uint32) 0xc4ac5665); #undef FX #define FX(b, c, d) FI (b, c, d) /* Round 4. */ OP (A, B, C, D, 0, 6, (md5_uint32) 0xf4292244); OP (D, A, B, C, 7, 10, (md5_uint32) 0x432aff97); OP (C, D, A, B, 14, 15, (md5_uint32) 0xab9423a7); OP (B, C, D, A, 5, 21, (md5_uint32) 0xfc93a039); OP (A, B, C, D, 12, 6, (md5_uint32) 0x655b59c3); OP (D, A, B, C, 3, 10, (md5_uint32) 0x8f0ccc92); OP (C, D, A, B, 10, 15, (md5_uint32) 0xffeff47d); OP (B, C, D, A, 1, 21, (md5_uint32) 0x85845dd1); OP (A, B, C, D, 8, 6, (md5_uint32) 0x6fa87e4f); OP (D, A, B, C, 15, 10, (md5_uint32) 0xfe2ce6e0); OP (C, D, A, B, 6, 15, (md5_uint32) 0xa3014314); OP (B, C, D, A, 13, 21, (md5_uint32) 0x4e0811a1); OP (A, B, C, D, 4, 6, (md5_uint32) 0xf7537e82); OP (D, A, B, C, 11, 10, (md5_uint32) 0xbd3af235); OP (C, D, A, B, 2, 15, (md5_uint32) 0x2ad7d2bb); OP (B, C, D, A, 9, 21, (md5_uint32) 0xeb86d391); /* Add the starting values of the context. */ A += A_save; B += B_save; C += C_save; D += D_save; } /* Put checksum in context given as argument. */ ctx->A = A; ctx->B = B; ctx->C = C; ctx->D = D; } #undef FF #undef FG #undef FH #undef FI #undef FX #undef OP /* alloca.c -- allocate automatically reclaimed memory (Mostly) portable public-domain implementation -- D A Gwyn This implementation of the PWB library alloca function, which is used to allocate space off the run-time stack so that it is automatically reclaimed upon procedure exit, was inspired by discussions with J. Q. Johnson of Cornell. J.Otto Tennant contributed the Cray support. There are some preprocessor constants that can be defined when compiling for your specific system, for improved efficiency; however, the defaults should be okay. The general concept of this implementation is to keep track of all alloca-allocated blocks, and reclaim any that are found to be deeper in the stack than the current invocation. This heuristic does not reclaim storage as soon as it becomes invalid, but it will do so eventually. As a special case, alloca(0) reclaims storage without allocating any. It is a good idea to use alloca(0) in your main control loop, etc. to force garbage collection. */ /* @deftypefn Replacement void* alloca (size_t @var{size}) This function allocates memory which will be automatically reclaimed after the procedure exits. The @libib{} implementation does not free the memory immediately but will do so eventually during subsequent calls to this function. Memory is allocated using @code{xmalloc} under normal circumstances. The header file @file{alloca-conf.h} can be used in conjunction with the GNU Autoconf test @code{AC_FUNC_ALLOCA} to test for and properly make available this function. The @code{AC_FUNC_ALLOCA} test requires that client code use a block of preprocessor code to be safe (see the Autoconf manual for more); this header incorporates that logic and more, including the possibility of a GCC built-in function. @end deftypefn */ #ifdef HAVE_CONFIG_H #endif #ifdef HAVE_STRING_H #include #endif #ifdef HAVE_STDLIB_H #include #endif /* These variables are used by the ASTRDUP implementation that relies on C_alloca. */ const char *libiberty_optr; char *libiberty_nptr; unsigned long libiberty_len; /* If your stack is a linked list of frames, you have to provide an "address metric" ADDRESS_FUNCTION macro. */ #if defined (CRAY) && defined (CRAY_STACKSEG_END) static long i00afunc (); #define ADDRESS_FUNCTION(arg) (char *) i00afunc (&(arg)) #else #define ADDRESS_FUNCTION(arg) &(arg) #endif #ifndef NULL #define NULL 0 #endif /* Define STACK_DIRECTION if you know the direction of stack growth for your system; otherwise it will be automatically deduced at run-time. STACK_DIRECTION > 0 => grows toward higher addresses STACK_DIRECTION < 0 => grows toward lower addresses STACK_DIRECTION = 0 => direction of growth unknown */ #ifndef STACK_DIRECTION #define STACK_DIRECTION 0 /* Direction unknown. */ #endif #if STACK_DIRECTION != 0 #define STACK_DIR STACK_DIRECTION /* Known at compile-time. */ #else /* STACK_DIRECTION == 0; need run-time code. */ static int stack_dir; /* 1 or -1 once known. */ #define STACK_DIR stack_dir static void find_stack_direction () { static char *addr = NULL; /* Address of first `dummy', once known. */ auto char dummy; /* To get stack address. */ if (addr == NULL) { /* Initial entry. */ addr = ADDRESS_FUNCTION (dummy); find_stack_direction (); /* Recurse once. */ } else { /* Second entry. */ if (ADDRESS_FUNCTION (dummy) > addr) stack_dir = 1; /* Stack grew upward. */ else stack_dir = -1; /* Stack grew downward. */ } } #endif /* STACK_DIRECTION == 0 */ /* An "alloca header" is used to: (a) chain together all alloca'ed blocks; (b) keep track of stack depth. It is very important that sizeof(header) agree with malloc alignment chunk size. The following default should work okay. */ #ifndef ALIGN_SIZE #define ALIGN_SIZE sizeof(double) #endif typedef union hdr { char align[ALIGN_SIZE]; /* To force sizeof(header). */ struct { union hdr *next; /* For chaining headers. */ char *deep; /* For stack depth measure. */ } h; } header; static header *last_alloca_header = NULL; /* -> last alloca header. */ /* Return a pointer to at least SIZE bytes of storage, which will be automatically reclaimed upon exit from the procedure that called alloca. Originally, this space was supposed to be taken from the current stack frame of the caller, but that method cannot be made to work for some implementations of C, for example under Gould's UTX/32. */ /* @undocumented C_alloca */ PTR C_alloca (size) size_t size; { auto char probe; /* Probes stack depth: */ register char *depth = ADDRESS_FUNCTION (probe); #if STACK_DIRECTION == 0 if (STACK_DIR == 0) /* Unknown growth direction. */ find_stack_direction (); #endif /* Reclaim garbage, defined as all alloca'd storage that was allocated from deeper in the stack than currently. */ { register header *hp; /* Traverses linked list. */ for (hp = last_alloca_header; hp != NULL;) if ((STACK_DIR > 0 && hp->h.deep > depth) || (STACK_DIR < 0 && hp->h.deep < depth)) { register header *np = hp->h.next; free ((PTR) hp); /* Collect garbage. */ hp = np; /* -> next header. */ } else break; /* Rest are not deeper. */ last_alloca_header = hp; /* -> last valid storage. */ } if (size == 0) return NULL; /* No allocation required. */ /* Allocate combined header + user data storage. */ { register PTR new = xmalloc (sizeof (header) + size); /* Address of header. */ if (new == 0) abort(); ((header *) new)->h.next = last_alloca_header; ((header *) new)->h.deep = depth; last_alloca_header = (header *) new; /* User storage begins just after header. */ return (PTR) ((char *) new + sizeof (header)); } } #if defined (CRAY) && defined (CRAY_STACKSEG_END) #ifdef DEBUG_I00AFUNC #include #endif #ifndef CRAY_STACK #define CRAY_STACK #ifndef CRAY2 /* Stack structures for CRAY-1, CRAY X-MP, and CRAY Y-MP */ struct stack_control_header { long shgrow:32; /* Number of times stack has grown. */ long shaseg:32; /* Size of increments to stack. */ long shhwm:32; /* High water mark of stack. */ long shsize:32; /* Current size of stack (all segments). */ }; /* The stack segment linkage control information occurs at the high-address end of a stack segment. (The stack grows from low addresses to high addresses.) The initial part of the stack segment linkage control information is 0200 (octal) words. This provides for register storage for the routine which overflows the stack. */ struct stack_segment_linkage { long ss[0200]; /* 0200 overflow words. */ long sssize:32; /* Number of words in this segment. */ long ssbase:32; /* Offset to stack base. */ long:32; long sspseg:32; /* Offset to linkage control of previous segment of stack. */ long:32; long sstcpt:32; /* Pointer to task common address block. */ long sscsnm; /* Private control structure number for microtasking. */ long ssusr1; /* Reserved for user. */ long ssusr2; /* Reserved for user. */ long sstpid; /* Process ID for pid based multi-tasking. */ long ssgvup; /* Pointer to multitasking thread giveup. */ long sscray[7]; /* Reserved for Cray Research. */ long ssa0; long ssa1; long ssa2; long ssa3; long ssa4; long ssa5; long ssa6; long ssa7; long sss0; long sss1; long sss2; long sss3; long sss4; long sss5; long sss6; long sss7; }; #else /* CRAY2 */ /* The following structure defines the vector of words returned by the STKSTAT library routine. */ struct stk_stat { long now; /* Current total stack size. */ long maxc; /* Amount of contiguous space which would be required to satisfy the maximum stack demand to date. */ long high_water; /* Stack high-water mark. */ long overflows; /* Number of stack overflow ($STKOFEN) calls. */ long hits; /* Number of internal buffer hits. */ long extends; /* Number of block extensions. */ long stko_mallocs; /* Block allocations by $STKOFEN. */ long underflows; /* Number of stack underflow calls ($STKRETN). */ long stko_free; /* Number of deallocations by $STKRETN. */ long stkm_free; /* Number of deallocations by $STKMRET. */ long segments; /* Current number of stack segments. */ long maxs; /* Maximum number of stack segments so far. */ long pad_size; /* Stack pad size. */ long current_address; /* Current stack segment address. */ long current_size; /* Current stack segment size. This number is actually corrupted by STKSTAT to include the fifteen word trailer area. */ long initial_address; /* Address of initial segment. */ long initial_size; /* Size of initial segment. */ }; /* The following structure describes the data structure which trails any stack segment. I think that the description in 'asdef' is out of date. I only describe the parts that I am sure about. */ struct stk_trailer { long this_address; /* Address of this block. */ long this_size; /* Size of this block (does not include this trailer). */ long unknown2; long unknown3; long link; /* Address of trailer block of previous segment. */ long unknown5; long unknown6; long unknown7; long unknown8; long unknown9; long unknown10; long unknown11; long unknown12; long unknown13; long unknown14; }; #endif /* CRAY2 */ #endif /* not CRAY_STACK */ #ifdef CRAY2 /* Determine a "stack measure" for an arbitrary ADDRESS. I doubt that "lint" will like this much. */ static long i00afunc (long *address) { struct stk_stat status; struct stk_trailer *trailer; long *block, size; long result = 0; /* We want to iterate through all of the segments. The first step is to get the stack status structure. We could do this more quickly and more directly, perhaps, by referencing the $LM00 common block, but I know that this works. */ STKSTAT (&status); /* Set up the iteration. */ trailer = (struct stk_trailer *) (status.current_address + status.current_size - 15); /* There must be at least one stack segment. Therefore it is a fatal error if "trailer" is null. */ if (trailer == 0) abort (); /* Discard segments that do not contain our argument address. */ while (trailer != 0) { block = (long *) trailer->this_address; size = trailer->this_size; if (block == 0 || size == 0) abort (); trailer = (struct stk_trailer *) trailer->link; if ((block <= address) && (address < (block + size))) break; } /* Set the result to the offset in this segment and add the sizes of all predecessor segments. */ result = address - block; if (trailer == 0) { return result; } do { if (trailer->this_size <= 0) abort (); result += trailer->this_size; trailer = (struct stk_trailer *) trailer->link; } while (trailer != 0); /* We are done. Note that if you present a bogus address (one not in any segment), you will get a different number back, formed from subtracting the address of the first block. This is probably not what you want. */ return (result); } #else /* not CRAY2 */ /* Stack address function for a CRAY-1, CRAY X-MP, or CRAY Y-MP. Determine the number of the cell within the stack, given the address of the cell. The purpose of this routine is to linearize, in some sense, stack addresses for alloca. */ static long i00afunc (long address) { long stkl = 0; long size, pseg, this_segment, stack; long result = 0; struct stack_segment_linkage *ssptr; /* Register B67 contains the address of the end of the current stack segment. If you (as a subprogram) store your registers on the stack and find that you are past the contents of B67, you have overflowed the segment. B67 also points to the stack segment linkage control area, which is what we are really interested in. */ stkl = CRAY_STACKSEG_END (); ssptr = (struct stack_segment_linkage *) stkl; /* If one subtracts 'size' from the end of the segment, one has the address of the first word of the segment. If this is not the first segment, 'pseg' will be nonzero. */ pseg = ssptr->sspseg; size = ssptr->sssize; this_segment = stkl - size; /* It is possible that calling this routine itself caused a stack overflow. Discard stack segments which do not contain the target address. */ while (!(this_segment <= address && address <= stkl)) { #ifdef DEBUG_I00AFUNC fprintf (stderr, "%011o %011o %011o\n", this_segment, address, stkl); #endif if (pseg == 0) break; stkl = stkl - pseg; ssptr = (struct stack_segment_linkage *) stkl; size = ssptr->sssize; pseg = ssptr->sspseg; this_segment = stkl - size; } result = address - this_segment; /* If you subtract pseg from the current end of the stack, you get the address of the previous stack segment's end. This seems a little convoluted to me, but I'll bet you save a cycle somewhere. */ while (pseg != 0) { #ifdef DEBUG_I00AFUNC fprintf (stderr, "%011o %011o\n", pseg, size); #endif stkl = stkl - pseg; ssptr = (struct stack_segment_linkage *) stkl; size = ssptr->sssize; pseg = ssptr->sspseg; result += size; } return (result); } #endif /* not CRAY2 */ #endif /* CRAY */ /* Create and destroy argument vectors (argv's) Copyright (C) 1992, 2001 Free Software Foundation, Inc. Written by Fred Fish @ Cygnus Support This file is part of the libiberty library. Libiberty is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Libiberty is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with libiberty; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Create and destroy argument vectors. An argument vector is simply an array of string pointers, terminated by a NULL pointer. */ #define MY_ISBLANK(ch) ((ch) == ' ' || (ch) == '\t') /* Routines imported from standard C runtime libraries. */ #ifdef ANSI_PROTOTYPES #include #include #include #else /* !ANSI_PROTOTYPES */ #if !defined _WIN32 || defined __GNUC__ extern char *memcpy (); /* Copy memory region */ extern int strlen (); /* Count length of string */ extern char *malloc (); /* Standard memory allocater */ extern char *realloc (); /* Standard memory reallocator */ extern void free (); /* Free malloc'd memory */ extern char *strdup (); /* Duplicate a string */ #endif #endif /* ANSI_PROTOTYPES */ #ifndef NULL #define NULL 0 #endif #ifndef EOS #define EOS '\0' #endif #define INITIAL_MAXARGC 8 /* Number of args + NULL in initial argv */ /* @deftypefn Extension char** dupargv (char **@var{vector}) Duplicate an argument vector. Simply scans through @var{vector}, duplicating each argument until the terminating @code{NULL} is found. Returns a pointer to the argument vector if successful. Returns @code{NULL} if there is insufficient memory to complete building the argument vector. @end deftypefn */ char ** dupargv (argv) char **argv; { int argc; char **copy; if (argv == NULL) return NULL; /* the vector */ for (argc = 0; argv[argc] != NULL; argc++); copy = (char **) malloc ((argc + 1) * sizeof (char *)); if (copy == NULL) return NULL; /* the strings */ for (argc = 0; argv[argc] != NULL; argc++) { int len = strlen (argv[argc]); copy[argc] = malloc (sizeof (char *) * (len + 1)); if (copy[argc] == NULL) { freeargv (copy); return NULL; } strcpy (copy[argc], argv[argc]); } copy[argc] = NULL; return copy; } /* @deftypefn Extension void freeargv (char **@var{vector}) Free an argument vector that was built using @code{buildargv}. Simply scans through @var{vector}, freeing the memory for each argument until the terminating @code{NULL} is found, and then frees @var{vector} itself. @end deftypefn */ void freeargv (vector) char **vector; { register char **scan; if (vector != NULL) { for (scan = vector; *scan != NULL; scan++) { free (*scan); } free (vector); } } /* @deftypefn Extension char** buildargv (char *@var{sp}) Given a pointer to a string, parse the string extracting fields separated by whitespace and optionally enclosed within either single or double quotes (which are stripped off), and build a vector of pointers to copies of the string for each field. The input string remains unchanged. The last element of the vector is followed by a @code{NULL} element. All of the memory for the pointer array and copies of the string is obtained from @code{malloc}. All of the memory can be returned to the system with the single function call @code{freeargv}, which takes the returned result of @code{buildargv}, as it's argument. Returns a pointer to the argument vector if successful. Returns @code{NULL} if @var{sp} is @code{NULL} or if there is insufficient memory to complete building the argument vector. If the input is a null string (as opposed to a @code{NULL} pointer), then buildarg returns an argument vector that has one arg, a null string. @end deftypefn The memory for the argv array is dynamically expanded as necessary. In order to provide a working buffer for extracting arguments into, with appropriate stripping of quotes and translation of backslash sequences, we allocate a working buffer at least as long as the input string. This ensures that we always have enough space in which to work, since the extracted arg is never larger than the input string. The argument vector is always kept terminated with a @code{NULL} arg pointer, so it can be passed to @code{freeargv} at any time, or returned, as appropriate. */ char **buildargv (input) const char *input; { char *arg; char *copybuf; int squote = 0; int dquote = 0; int bsquote = 0; int argc = 0; int maxargc = 0; char **argv = NULL; char **nargv; if (input != NULL) { copybuf = (char *) alloca (strlen (input) + 1); /* Is a do{}while to always execute the loop once. Always return an argv, even for null strings. See NOTES above, test case below. */ do { /* Pick off argv[argc] */ while (MY_ISBLANK (*input)) { input++; } if ((maxargc == 0) || (argc >= (maxargc - 1))) { /* argv needs initialization, or expansion */ if (argv == NULL) { maxargc = INITIAL_MAXARGC; nargv = (char **) malloc (maxargc * sizeof (char *)); } else { maxargc *= 2; nargv = (char **) realloc (argv, maxargc * sizeof (char *)); } if (nargv == NULL) { if (argv != NULL) { freeargv (argv); argv = NULL; } break; } argv = nargv; argv[argc] = NULL; } /* Begin scanning arg */ arg = copybuf; while (*input != EOS) { if (MY_ISBLANK (*input) && !squote && !dquote && !bsquote) { break; } else { if (bsquote) { bsquote = 0; *arg++ = *input; } else if (*input == '\\') { bsquote = 1; } else if (squote) { if (*input == '\'') { squote = 0; } else { *arg++ = *input; } } else if (dquote) { if (*input == '"') { dquote = 0; } else { *arg++ = *input; } } else { if (*input == '\'') { squote = 1; } else if (*input == '"') { dquote = 1; } else { *arg++ = *input; } } input++; } } *arg = EOS; argv[argc] = strdup (copybuf); if (argv[argc] == NULL) { freeargv (argv); argv = NULL; break; } argc++; argv[argc] = NULL; while (MY_ISBLANK (*input)) { input++; } } while (*input != EOS); } return (argv); } #ifdef MAIN /* Simple little test driver. */ static const char *const tests[] = { "a simple command line", "arg 'foo' is single quoted", "arg \"bar\" is double quoted", "arg \"foo bar\" has embedded whitespace", "arg 'Jack said \\'hi\\'' has single quotes", "arg 'Jack said \\\"hi\\\"' has double quotes", "a b c d e f g h i j k l m n o p q r s t u v w x y z 1 2 3 4 5 6 7 8 9", /* This should be expanded into only one argument. */ "trailing-whitespace ", "", NULL }; int main () { char **argv; const char *const *test; char **targs; for (test = tests; *test != NULL; test++) { printf ("buildargv(\"%s\")\n", *test); if ((argv = buildargv (*test)) == NULL) { printf ("failed!\n\n"); } else { for (targs = argv; *targs != NULL; targs++) { printf ("\t\"%s\"\n", *targs); } printf ("\n"); } freeargv (argv); } return 0; } #endif /* MAIN */ /* Utility to pick a temporary filename prefix. Copyright (C) 1996, 1997, 1998 Free Software Foundation, Inc. This file is part of the libiberty library. Libiberty is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Libiberty is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with libiberty; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifdef HAVE_CONFIG_H #endif #include /* May get P_tmpdir. */ #ifdef HAVE_STDLIB_H #include #endif #ifdef HAVE_STRING_H #include #endif extern char *choose_tmpdir PARAMS ((void)); /* Name of temporary file. mktemp requires 6 trailing X's. */ #define TEMP_FILE "ccXXXXXX" #define TEMP_FILE_LEN (sizeof(TEMP_FILE) - 1) /* @deftypefn Extension char* choose_temp_base (void) Return a prefix for temporary file names or @code{NULL} if unable to find one. The current directory is chosen if all else fails so the program is exited if a temporary directory can't be found (@code{mktemp} fails). The buffer for the result is obtained with @code{xmalloc}. This function is provided for backwards compatability only. Its use is not recommended. @end deftypefn */ char * choose_temp_base () { const char *base = choose_tmpdir (); char *temp_filename; int len; len = strlen (base); temp_filename = xmalloc (len + TEMP_FILE_LEN + 1); strcpy (temp_filename, base); strcpy (temp_filename + len, TEMP_FILE); mktemp (temp_filename); if (strlen (temp_filename) == 0) abort (); return temp_filename; } /* Concatenate variable number of strings. Copyright (C) 1991, 1994, 2001 Free Software Foundation, Inc. Written by Fred Fish @ Cygnus Support This file is part of the libiberty library. Libiberty is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Libiberty is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with libiberty; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* @deftypefn Extension char* concat (const char *@var{s1}, const char *@var{s2}, @dots{}, @code{NULL}) Concatenate zero or more of strings and return the result in freshly @code{xmalloc}ed memory. Returns @code{NULL} if insufficient memory is available. The argument list is terminated by the first @code{NULL} pointer encountered. Pointers to empty strings are ignored. @end deftypefn NOTES This function uses xmalloc() which is expected to be a front end function to malloc() that deals with low memory situations. In typical use, if malloc() returns NULL then xmalloc() diverts to an error handler routine which never returns, and thus xmalloc will never return a NULL pointer. If the client application wishes to deal with low memory situations itself, it should supply an xmalloc that just directly invokes malloc and blindly returns whatever malloc returns. */ #ifdef HAVE_CONFIG_H #endif #include /* size_t */ #ifdef ANSI_PROTOTYPES #include #else #include #endif # if HAVE_STRING_H # include # else # if HAVE_STRINGS_H # include # endif # endif #if HAVE_STDLIB_H #include #endif static inline unsigned long vconcat_length PARAMS ((const char *, va_list)); static inline unsigned long vconcat_length (first, args) const char *first; va_list args; { unsigned long length = 0; const char *arg; for (arg = first; arg ; arg = va_arg (args, const char *)) length += strlen (arg); return length; } static inline char *vconcat_copy PARAMS ((char *, const char *, va_list)); static inline char * vconcat_copy (dst, first, args) char *dst; const char *first; va_list args; { char *end = dst; const char *arg; for (arg = first; arg ; arg = va_arg (args, const char *)) { unsigned long length = strlen (arg); memcpy (end, arg, length); end += length; } *end = '\000'; return dst; } /* @undocumented concat_length */ unsigned long concat_length VPARAMS ((const char *first, ...)) { unsigned long length; VA_OPEN (args, first); VA_FIXEDARG (args, const char *, first); length = vconcat_length (first, args); VA_CLOSE (args); return length; } /* @undocumented concat_copy */ char * concat_copy VPARAMS ((char *dst, const char *first, ...)) { char *save_dst; VA_OPEN (args, first); VA_FIXEDARG (args, char *, dst); VA_FIXEDARG (args, const char *, first); vconcat_copy (dst, first, args); save_dst = dst; /* With K&R C, dst goes out of scope here. */ VA_CLOSE (args); return save_dst; } char *libiberty_concat_ptr; /* @undocumented concat_copy2 */ char * concat_copy2 VPARAMS ((const char *first, ...)) { VA_OPEN (args, first); VA_FIXEDARG (args, const char *, first); vconcat_copy (libiberty_concat_ptr, first, args); VA_CLOSE (args); return libiberty_concat_ptr; } char * concat VPARAMS ((const char *first, ...)) { char *newstr; /* First compute the size of the result and get sufficient memory. */ VA_OPEN (args, first); VA_FIXEDARG (args, const char *, first); newstr = (char *) xmalloc (vconcat_length (first, args) + 1); VA_CLOSE (args); /* Now copy the individual pieces to the result string. */ VA_OPEN (args, first); VA_FIXEDARG (args, const char *, first); vconcat_copy (newstr, first, args); VA_CLOSE (args); return newstr; } /* @deftypefn Extension char* reconcat (char *@var{optr}, const char *@var{s1}, @dots{}, @code{NULL}) Same as @code{concat}, except that if @var{optr} is not @code{NULL} it is freed after the string is created. This is intended to be useful when you're extending an existing string or building up a string in a loop: @example str = reconcat (str, "pre-", str, NULL); @end example @end deftypefn */ char * reconcat VPARAMS ((char *optr, const char *first, ...)) { char *newstr; /* First compute the size of the result and get sufficient memory. */ VA_OPEN (args, first); VA_FIXEDARG (args, char *, optr); VA_FIXEDARG (args, const char *, first); newstr = (char *) xmalloc (vconcat_length (first, args) + 1); VA_CLOSE (args); /* Now copy the individual pieces to the result string. */ VA_OPEN (args, first); VA_FIXEDARG (args, char *, optr); VA_FIXEDARG (args, const char *, first); vconcat_copy (newstr, first, args); if (optr) /* Done before VA_CLOSE so optr stays in scope for K&R C. */ free (optr); VA_CLOSE (args); return newstr; } #ifdef MAIN #define NULLP (char *)0 /* Simple little test driver. */ #include int main () { printf ("\"\" = \"%s\"\n", concat (NULLP)); printf ("\"a\" = \"%s\"\n", concat ("a", NULLP)); printf ("\"ab\" = \"%s\"\n", concat ("a", "b", NULLP)); printf ("\"abc\" = \"%s\"\n", concat ("a", "b", "c", NULLP)); printf ("\"abcd\" = \"%s\"\n", concat ("ab", "cd", NULLP)); printf ("\"abcde\" = \"%s\"\n", concat ("ab", "c", "de", NULLP)); printf ("\"abcdef\" = \"%s\"\n", concat ("", "a", "", "bcd", "ef", NULLP)); return 0; } #endif /* Demangler component interface functions. Copyright (C) 2004 Free Software Foundation, Inc. Written by Ian Lance Taylor . This file is part of the libiberty library, which is part of GCC. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. In addition to the permissions in the GNU General Public License, the Free Software Foundation gives you unlimited permission to link the compiled version of this file into combinations with other programs, and to distribute those combinations without any restriction coming from the use of this file. (The General Public License restrictions do apply in other respects; for example, they cover modification of the file, and distribution when not linked into a combined executable.) This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file implements a few interface functions which are provided for use with struct demangle_component trees. These functions are declared in demangle.h. These functions are closely tied to the demangler code in cp-demangle.c, and other interface functions can be found in that file. We put these functions in a separate file because they are not needed by the demangler, and so we avoid having them pulled in by programs which only need the demangler. */ #ifdef HAVE_CONFIG_H #endif #ifdef HAVE_STDLIB_H #include #endif #ifdef HAVE_STRING_H #include #endif /* Fill in most component types. */ int cplus_demangle_fill_component (p, type, left, right) struct demangle_component *p; enum demangle_component_type type; struct demangle_component *left; struct demangle_component *right; { if (p == NULL) return 0; switch (type) { case DEMANGLE_COMPONENT_QUAL_NAME: case DEMANGLE_COMPONENT_LOCAL_NAME: case DEMANGLE_COMPONENT_TYPED_NAME: case DEMANGLE_COMPONENT_TEMPLATE: case DEMANGLE_COMPONENT_CONSTRUCTION_VTABLE: case DEMANGLE_COMPONENT_VENDOR_TYPE_QUAL: case DEMANGLE_COMPONENT_FUNCTION_TYPE: case DEMANGLE_COMPONENT_ARRAY_TYPE: case DEMANGLE_COMPONENT_PTRMEM_TYPE: case DEMANGLE_COMPONENT_ARGLIST: case DEMANGLE_COMPONENT_TEMPLATE_ARGLIST: case DEMANGLE_COMPONENT_UNARY: case DEMANGLE_COMPONENT_BINARY: case DEMANGLE_COMPONENT_BINARY_ARGS: case DEMANGLE_COMPONENT_TRINARY: case DEMANGLE_COMPONENT_TRINARY_ARG1: case DEMANGLE_COMPONENT_TRINARY_ARG2: case DEMANGLE_COMPONENT_LITERAL: case DEMANGLE_COMPONENT_LITERAL_NEG: break; /* These component types only have one subtree. */ case DEMANGLE_COMPONENT_VTABLE: case DEMANGLE_COMPONENT_VTT: case DEMANGLE_COMPONENT_TYPEINFO: case DEMANGLE_COMPONENT_TYPEINFO_NAME: case DEMANGLE_COMPONENT_TYPEINFO_FN: case DEMANGLE_COMPONENT_THUNK: case DEMANGLE_COMPONENT_VIRTUAL_THUNK: case DEMANGLE_COMPONENT_COVARIANT_THUNK: case DEMANGLE_COMPONENT_JAVA_CLASS: case DEMANGLE_COMPONENT_GUARD: case DEMANGLE_COMPONENT_REFTEMP: case DEMANGLE_COMPONENT_RESTRICT: case DEMANGLE_COMPONENT_VOLATILE: case DEMANGLE_COMPONENT_CONST: case DEMANGLE_COMPONENT_RESTRICT_THIS: case DEMANGLE_COMPONENT_VOLATILE_THIS: case DEMANGLE_COMPONENT_CONST_THIS: case DEMANGLE_COMPONENT_POINTER: case DEMANGLE_COMPONENT_REFERENCE: case DEMANGLE_COMPONENT_COMPLEX: case DEMANGLE_COMPONENT_IMAGINARY: case DEMANGLE_COMPONENT_VENDOR_TYPE: case DEMANGLE_COMPONENT_CAST: if (right != NULL) return 0; break; default: /* Other types do not use subtrees. */ return 0; } p->type = type; p->u.s_binary.left = left; p->u.s_binary.right = right; return 1; } /* Fill in a DEMANGLE_COMPONENT_BUILTIN_TYPE. */ int cplus_demangle_fill_builtin_type (p, typename) struct demangle_component *p; const char *typename; { int len; unsigned int i; if (p == NULL || typename == NULL) return 0; len = strlen (typename); for (i = 0; i < D_BUILTIN_TYPE_COUNT; ++i) { if (len == cplus_demangle_builtin_types[i].len && strcmp (typename, cplus_demangle_builtin_types[i].name) == 0) { p->type = DEMANGLE_COMPONENT_BUILTIN_TYPE; p->u.s_builtin.type = &cplus_demangle_builtin_types[i]; return 1; } } return 0; } /* Fill in a DEMANGLE_COMPONENT_OPERATOR. */ int cplus_demangle_fill_operator (p, opname, args) struct demangle_component *p; const char *opname; int args; { int len; unsigned int i; if (p == NULL || opname == NULL) return 0; len = strlen (opname); for (i = 0; cplus_demangle_operators[i].name != NULL; ++i) { if (len == cplus_demangle_operators[i].len && args == cplus_demangle_operators[i].args && strcmp (opname, cplus_demangle_operators[i].name) == 0) { p->type = DEMANGLE_COMPONENT_OPERATOR; p->u.s_operator.op = &cplus_demangle_operators[i]; return 1; } } return 0; } /* Translate a mangled name into components. */ struct demangle_component * cplus_demangle_v3_components (mangled, options, mem) const char *mangled; int options; void **mem; { size_t len; int type; struct d_info di; struct demangle_component *dc; len = strlen (mangled); if (mangled[0] == '_' && mangled[1] == 'Z') type = 0; else { if ((options & DMGL_TYPES) == 0) return NULL; type = 1; } cplus_demangle_init_info (mangled, options, len, &di); di.comps = ((struct demangle_component *) malloc (di.num_comps * sizeof (struct demangle_component))); di.subs = ((struct demangle_component **) malloc (di.num_subs * sizeof (struct demangle_component *))); if (di.comps == NULL || di.subs == NULL) { if (di.comps != NULL) free (di.comps); if (di.subs != NULL) free (di.subs); return NULL; } if (! type) dc = cplus_demangle_mangled_name (&di, 1); else dc = cplus_demangle_type (&di); /* If DMGL_PARAMS is set, then if we didn't consume the entire mangled string, then we didn't successfully demangle it. */ if ((options & DMGL_PARAMS) != 0 && d_peek_char (&di) != '\0') dc = NULL; free (di.subs); if (dc != NULL) *mem = di.comps; else free (di.comps); return dc; } /* An abstract string datatype. Copyright (C) 1998, 1999, 2000, 2002, 2004 Free Software Foundation, Inc. Contributed by Mark Mitchell (mark@markmitchell.com). This file is part of GNU CC. GNU CC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. In addition to the permissions in the GNU General Public License, the Free Software Foundation gives you unlimited permission to link the compiled version of this file into combinations with other programs, and to distribute those combinations without any restriction coming from the use of this file. (The General Public License restrictions do apply in other respects; for example, they cover modification of the file, and distribution when not linked into a combined executable.) GNU CC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GNU CC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifdef HAVE_CONFIG_H #endif #include #ifdef HAVE_STRING_H #include #endif #ifdef HAVE_STDLIB_H #include #endif /* Performs in-place initialization of a dyn_string struct. This function can be used with a dyn_string struct on the stack or embedded in another object. The contents of of the string itself are still dynamically allocated. The string initially is capable of holding at least SPACE characeters, including the terminating NUL. If SPACE is 0, it will silently be increated to 1. If RETURN_ON_ALLOCATION_FAILURE is defined and memory allocation fails, returns 0. Otherwise returns 1. */ int dyn_string_init (ds_struct_ptr, space) struct dyn_string *ds_struct_ptr; int space; { /* We need at least one byte in which to store the terminating NUL. */ if (space == 0) space = 1; #ifdef RETURN_ON_ALLOCATION_FAILURE ds_struct_ptr->s = (char *) malloc (space); if (ds_struct_ptr->s == NULL) return 0; #else ds_struct_ptr->s = (char *) xmalloc (space); #endif ds_struct_ptr->allocated = space; ds_struct_ptr->length = 0; ds_struct_ptr->s[0] = '\0'; return 1; } /* Create a new dynamic string capable of holding at least SPACE characters, including the terminating NUL. If SPACE is 0, it will be silently increased to 1. If RETURN_ON_ALLOCATION_FAILURE is defined and memory allocation fails, returns NULL. Otherwise returns the newly allocated string. */ dyn_string_t dyn_string_new (space) int space; { dyn_string_t result; #ifdef RETURN_ON_ALLOCATION_FAILURE result = (dyn_string_t) malloc (sizeof (struct dyn_string)); if (result == NULL) return NULL; if (!dyn_string_init (result, space)) { free (result); return NULL; } #else result = (dyn_string_t) xmalloc (sizeof (struct dyn_string)); dyn_string_init (result, space); #endif return result; } /* Free the memory used by DS. */ void dyn_string_delete (ds) dyn_string_t ds; { free (ds->s); free (ds); } /* Returns the contents of DS in a buffer allocated with malloc. It is the caller's responsibility to deallocate the buffer using free. DS is then set to the empty string. Deletes DS itself. */ char* dyn_string_release (ds) dyn_string_t ds; { /* Store the old buffer. */ char* result = ds->s; /* The buffer is no longer owned by DS. */ ds->s = NULL; /* Delete DS. */ free (ds); /* Return the old buffer. */ return result; } /* Increase the capacity of DS so it can hold at least SPACE characters, plus the terminating NUL. This function will not (at present) reduce the capacity of DS. Returns DS on success. If RETURN_ON_ALLOCATION_FAILURE is defined and a memory allocation operation fails, deletes DS and returns NULL. */ dyn_string_t dyn_string_resize (ds, space) dyn_string_t ds; int space; { int new_allocated = ds->allocated; /* Increase SPACE to hold the NUL termination. */ ++space; /* Increase allocation by factors of two. */ while (space > new_allocated) new_allocated *= 2; if (new_allocated != ds->allocated) { ds->allocated = new_allocated; /* We actually need more space. */ #ifdef RETURN_ON_ALLOCATION_FAILURE ds->s = (char *) realloc (ds->s, ds->allocated); if (ds->s == NULL) { free (ds); return NULL; } #else ds->s = (char *) xrealloc (ds->s, ds->allocated); #endif } return ds; } /* Sets the contents of DS to the empty string. */ void dyn_string_clear (ds) dyn_string_t ds; { /* A dyn_string always has room for at least the NUL terminator. */ ds->s[0] = '\0'; ds->length = 0; } /* Makes the contents of DEST the same as the contents of SRC. DEST and SRC must be distinct. Returns 1 on success. On failure, if RETURN_ON_ALLOCATION_FAILURE, deletes DEST and returns 0. */ int dyn_string_copy (dest, src) dyn_string_t dest; dyn_string_t src; { if (dest == src) abort (); /* Make room in DEST. */ if (dyn_string_resize (dest, src->length) == NULL) return 0; /* Copy DEST into SRC. */ strcpy (dest->s, src->s); /* Update the size of DEST. */ dest->length = src->length; return 1; } /* Copies SRC, a NUL-terminated string, into DEST. Returns 1 on success. On failure, if RETURN_ON_ALLOCATION_FAILURE, deletes DEST and returns 0. */ int dyn_string_copy_cstr (dest, src) dyn_string_t dest; const char *src; { int length = strlen (src); /* Make room in DEST. */ if (dyn_string_resize (dest, length) == NULL) return 0; /* Copy DEST into SRC. */ strcpy (dest->s, src); /* Update the size of DEST. */ dest->length = length; return 1; } /* Inserts SRC at the beginning of DEST. DEST is expanded as necessary. SRC and DEST must be distinct. Returns 1 on success. On failure, if RETURN_ON_ALLOCATION_FAILURE, deletes DEST and returns 0. */ int dyn_string_prepend (dest, src) dyn_string_t dest; dyn_string_t src; { return dyn_string_insert (dest, 0, src); } /* Inserts SRC, a NUL-terminated string, at the beginning of DEST. DEST is expanded as necessary. Returns 1 on success. On failure, if RETURN_ON_ALLOCATION_FAILURE, deletes DEST and returns 0. */ int dyn_string_prepend_cstr (dest, src) dyn_string_t dest; const char *src; { return dyn_string_insert_cstr (dest, 0, src); } /* Inserts SRC into DEST starting at position POS. DEST is expanded as necessary. SRC and DEST must be distinct. Returns 1 on success. On failure, if RETURN_ON_ALLOCATION_FAILURE, deletes DEST and returns 0. */ int dyn_string_insert (dest, pos, src) dyn_string_t dest; int pos; dyn_string_t src; { int i; if (src == dest) abort (); if (dyn_string_resize (dest, dest->length + src->length) == NULL) return 0; /* Make room for the insertion. Be sure to copy the NUL. */ for (i = dest->length; i >= pos; --i) dest->s[i + src->length] = dest->s[i]; /* Splice in the new stuff. */ strncpy (dest->s + pos, src->s, src->length); /* Compute the new length. */ dest->length += src->length; return 1; } /* Inserts SRC, a NUL-terminated string, into DEST starting at position POS. DEST is expanded as necessary. Returns 1 on success. On failure, RETURN_ON_ALLOCATION_FAILURE, deletes DEST and returns 0. */ int dyn_string_insert_cstr (dest, pos, src) dyn_string_t dest; int pos; const char *src; { int i; int length = strlen (src); if (dyn_string_resize (dest, dest->length + length) == NULL) return 0; /* Make room for the insertion. Be sure to copy the NUL. */ for (i = dest->length; i >= pos; --i) dest->s[i + length] = dest->s[i]; /* Splice in the new stuff. */ strncpy (dest->s + pos, src, length); /* Compute the new length. */ dest->length += length; return 1; } /* Inserts character C into DEST starting at position POS. DEST is expanded as necessary. Returns 1 on success. On failure, RETURN_ON_ALLOCATION_FAILURE, deletes DEST and returns 0. */ int dyn_string_insert_char (dest, pos, c) dyn_string_t dest; int pos; int c; { int i; if (dyn_string_resize (dest, dest->length + 1) == NULL) return 0; /* Make room for the insertion. Be sure to copy the NUL. */ for (i = dest->length; i >= pos; --i) dest->s[i + 1] = dest->s[i]; /* Add the new character. */ dest->s[pos] = c; /* Compute the new length. */ ++dest->length; return 1; } /* Append S to DS, resizing DS if necessary. Returns 1 on success. On failure, if RETURN_ON_ALLOCATION_FAILURE, deletes DEST and returns 0. */ int dyn_string_append (dest, s) dyn_string_t dest; dyn_string_t s; { if (dyn_string_resize (dest, dest->length + s->length) == 0) return 0; strcpy (dest->s + dest->length, s->s); dest->length += s->length; return 1; } /* Append the NUL-terminated string S to DS, resizing DS if necessary. Returns 1 on success. On failure, if RETURN_ON_ALLOCATION_FAILURE, deletes DEST and returns 0. */ int dyn_string_append_cstr (dest, s) dyn_string_t dest; const char *s; { int len = strlen (s); /* The new length is the old length plus the size of our string, plus one for the null at the end. */ if (dyn_string_resize (dest, dest->length + len) == NULL) return 0; strcpy (dest->s + dest->length, s); dest->length += len; return 1; } /* Appends C to the end of DEST. Returns 1 on success. On failiure, if RETURN_ON_ALLOCATION_FAILURE, deletes DEST and returns 0. */ int dyn_string_append_char (dest, c) dyn_string_t dest; int c; { /* Make room for the extra character. */ if (dyn_string_resize (dest, dest->length + 1) == NULL) return 0; /* Append the character; it will overwrite the old NUL. */ dest->s[dest->length] = c; /* Add a new NUL at the end. */ dest->s[dest->length + 1] = '\0'; /* Update the length. */ ++(dest->length); return 1; } /* Sets the contents of DEST to the substring of SRC starting at START and ending before END. START must be less than or equal to END, and both must be between zero and the length of SRC, inclusive. Returns 1 on success. On failure, if RETURN_ON_ALLOCATION_FAILURE, deletes DEST and returns 0. */ int dyn_string_substring (dest, src, start, end) dyn_string_t dest; dyn_string_t src; int start; int end; { int i; int length = end - start; if (start > end || start > src->length || end > src->length) abort (); /* Make room for the substring. */ if (dyn_string_resize (dest, length) == NULL) return 0; /* Copy the characters in the substring, */ for (i = length; --i >= 0; ) dest->s[i] = src->s[start + i]; /* NUL-terimate the result. */ dest->s[length] = '\0'; /* Record the length of the substring. */ dest->length = length; return 1; } /* Returns non-zero if DS1 and DS2 have the same contents. */ int dyn_string_eq (ds1, ds2) dyn_string_t ds1; dyn_string_t ds2; { /* If DS1 and DS2 have different lengths, they must not be the same. */ if (ds1->length != ds2->length) return 0; else return !strcmp (ds1->s, ds2->s); } /* Compare two open file descriptors to see if they refer to the same file. Copyright (C) 1991 Free Software Foundation, Inc. This file is part of the libiberty library. Libiberty is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Libiberty is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with libiberty; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* @deftypefn Extension int fdmatch (int @var{fd1}, int @var{fd2}) Check to see if two open file descriptors refer to the same file. This is useful, for example, when we have an open file descriptor for an unnamed file, and the name of a file that we believe to correspond to that fd. This can happen when we are exec'd with an already open file (@code{stdout} for example) or from the SVR4 @file{/proc} calls that return open file descriptors for mapped address spaces. All we have to do is open the file by name and check the two file descriptors for a match, which is done by comparing major and minor device numbers and inode numbers. @end deftypefn BUGS (FIXME: does this work for networks?) It works for NFS, which assigns a device number to each mount. */ #include #include int fdmatch (fd1, fd2) int fd1; int fd2; { struct stat sbuf1; struct stat sbuf2; if ((fstat (fd1, &sbuf1) == 0) && (fstat (fd2, &sbuf2) == 0) && (sbuf1.st_dev == sbuf2.st_dev) && (sbuf1.st_ino == sbuf2.st_ino)) { return (1); } else { return (0); } } /* A Fibonacci heap datatype. Copyright 1998, 1999, 2000, 2001 Free Software Foundation, Inc. Contributed by Daniel Berlin (dan@cgsoftware.com). This file is part of GNU CC. GNU CC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GNU CC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GNU CC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifdef HAVE_CONFIG_H #endif #ifdef HAVE_LIMITS_H #include #endif #ifdef HAVE_STDLIB_H #include #endif #ifdef HAVE_STRING_H #include #endif /* A Fibonacci heap datatype. Copyright 1998, 1999, 2000, 2001, 2002 Free Software Foundation, Inc. Contributed by Daniel Berlin (dan@cgsoftware.com). This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Fibonacci heaps are somewhat complex, but, there's an article in DDJ that explains them pretty well: http://www.ddj.com/articles/1997/9701/9701o/9701o.htm?topic=algoritms Introduction to algorithms by Corman and Rivest also goes over them. The original paper that introduced them is "Fibonacci heaps and their uses in improved network optimization algorithms" by Tarjan and Fredman (JACM 34(3), July 1987). Amortized and real worst case time for operations: ExtractMin: O(lg n) amortized. O(n) worst case. DecreaseKey: O(1) amortized. O(lg n) worst case. Insert: O(2) amortized. O(1) actual. Union: O(1) amortized. O(1) actual. */ #ifndef _FIBHEAP_H_ #define _FIBHEAP_H_ typedef long fibheapkey_t; typedef struct fibheap { size_t nodes; struct fibnode *min; struct fibnode *root; } *fibheap_t; typedef struct fibnode { struct fibnode *parent; struct fibnode *child; struct fibnode *left; struct fibnode *right; fibheapkey_t key; void *data; #ifdef __GNUC__ __extension__ unsigned long int degree : 31; __extension__ unsigned long int mark : 1; #else unsigned int degree : 31; unsigned int mark : 1; #endif } *fibnode_t; extern fibheap_t fibheap_new PARAMS ((void)); extern fibnode_t fibheap_insert PARAMS ((fibheap_t, fibheapkey_t, void *)); extern int fibheap_empty PARAMS ((fibheap_t)); extern fibheapkey_t fibheap_min_key PARAMS ((fibheap_t)); extern fibheapkey_t fibheap_replace_key PARAMS ((fibheap_t, fibnode_t, fibheapkey_t)); extern void *fibheap_replace_key_data PARAMS ((fibheap_t, fibnode_t, fibheapkey_t, void *)); extern void *fibheap_extract_min PARAMS ((fibheap_t)); extern void *fibheap_min PARAMS ((fibheap_t)); extern void *fibheap_replace_data PARAMS ((fibheap_t, fibnode_t, void *)); extern void *fibheap_delete_node PARAMS ((fibheap_t, fibnode_t)); extern void fibheap_delete PARAMS ((fibheap_t)); extern fibheap_t fibheap_union PARAMS ((fibheap_t, fibheap_t)); #endif /* _FIBHEAP_H_ */ #define FIBHEAPKEY_MIN LONG_MIN static void fibheap_ins_root PARAMS ((fibheap_t, fibnode_t)); static void fibheap_rem_root PARAMS ((fibheap_t, fibnode_t)); static void fibheap_consolidate PARAMS ((fibheap_t)); static void fibheap_link PARAMS ((fibheap_t, fibnode_t, fibnode_t)); static void fibheap_cut PARAMS ((fibheap_t, fibnode_t, fibnode_t)); static void fibheap_cascading_cut PARAMS ((fibheap_t, fibnode_t)); static fibnode_t fibheap_extr_min_node PARAMS ((fibheap_t)); static int fibheap_compare PARAMS ((fibheap_t, fibnode_t, fibnode_t)); static int fibheap_comp_data PARAMS ((fibheap_t, fibheapkey_t, void *, fibnode_t)); static fibnode_t fibnode_new PARAMS ((void)); static void fibnode_insert_after PARAMS ((fibnode_t, fibnode_t)); #define fibnode_insert_before(a, b) fibnode_insert_after (a->left, b) static fibnode_t fibnode_remove PARAMS ((fibnode_t)); /* Create a new fibonacci heap. */ fibheap_t fibheap_new () { return (fibheap_t) xcalloc (1, sizeof (struct fibheap)); } /* Create a new fibonacci heap node. */ static fibnode_t fibnode_new () { fibnode_t node; node = (fibnode_t) xcalloc (1, sizeof *node); node->left = node; node->right = node; return node; } static inline int fibheap_compare (heap, a, b) fibheap_t heap ATTRIBUTE_UNUSED; fibnode_t a; fibnode_t b; { if (a->key < b->key) return -1; if (a->key > b->key) return 1; return 0; } static inline int fibheap_comp_data (heap, key, data, b) fibheap_t heap; fibheapkey_t key; void *data; fibnode_t b; { struct fibnode a; a.key = key; a.data = data; return fibheap_compare (heap, &a, b); } /* Insert DATA, with priority KEY, into HEAP. */ fibnode_t fibheap_insert (heap, key, data) fibheap_t heap; fibheapkey_t key; void *data; { fibnode_t node; /* Create the new node. */ node = fibnode_new (); /* Set the node's data. */ node->data = data; node->key = key; /* Insert it into the root list. */ fibheap_ins_root (heap, node); /* If their was no minimum, or this key is less than the min, it's the new min. */ if (heap->min == NULL || node->key < heap->min->key) heap->min = node; heap->nodes++; return node; } /* Return the data of the minimum node (if we know it). */ void * fibheap_min (heap) fibheap_t heap; { /* If there is no min, we can't easily return it. */ if (heap->min == NULL) return NULL; return heap->min->data; } /* Return the key of the minimum node (if we know it). */ fibheapkey_t fibheap_min_key (heap) fibheap_t heap; { /* If there is no min, we can't easily return it. */ if (heap->min == NULL) return 0; return heap->min->key; } /* Union HEAPA and HEAPB into a new heap. */ fibheap_t fibheap_union (heapa, heapb) fibheap_t heapa; fibheap_t heapb; { fibnode_t a_root, b_root, temp; /* If one of the heaps is empty, the union is just the other heap. */ if ((a_root = heapa->root) == NULL) { free (heapa); return heapb; } if ((b_root = heapb->root) == NULL) { free (heapb); return heapa; } /* Merge them to the next nodes on the opposite chain. */ a_root->left->right = b_root; b_root->left->right = a_root; temp = a_root->left; a_root->left = b_root->left; b_root->left = temp; heapa->nodes += heapb->nodes; /* And set the new minimum, if it's changed. */ if (fibheap_compare (heapa, heapb->min, heapa->min) < 0) heapa->min = heapb->min; free (heapb); return heapa; } /* Extract the data of the minimum node from HEAP. */ void * fibheap_extract_min (heap) fibheap_t heap; { fibnode_t z; void *ret = NULL; /* If we don't have a min set, it means we have no nodes. */ if (heap->min != NULL) { /* Otherwise, extract the min node, free the node, and return the node's data. */ z = fibheap_extr_min_node (heap); ret = z->data; free (z); } return ret; } /* Replace both the KEY and the DATA associated with NODE. */ void * fibheap_replace_key_data (heap, node, key, data) fibheap_t heap; fibnode_t node; fibheapkey_t key; void *data; { void *odata; fibheapkey_t okey; fibnode_t y; /* If we wanted to, we could actually do a real increase by redeleting and inserting. However, this would require O (log n) time. So just bail out for now. */ if (fibheap_comp_data (heap, key, data, node) > 0) return NULL; odata = node->data; okey = node->key; node->data = data; node->key = key; y = node->parent; if (okey == key) return odata; /* These two compares are specifically <= 0 to make sure that in the case of equality, a node we replaced the data on, becomes the new min. This is needed so that delete's call to extractmin gets the right node. */ if (y != NULL && fibheap_compare (heap, node, y) <= 0) { fibheap_cut (heap, node, y); fibheap_cascading_cut (heap, y); } if (fibheap_compare (heap, node, heap->min) <= 0) heap->min = node; return odata; } /* Replace the DATA associated with NODE. */ void * fibheap_replace_data (heap, node, data) fibheap_t heap; fibnode_t node; void *data; { return fibheap_replace_key_data (heap, node, node->key, data); } /* Replace the KEY associated with NODE. */ fibheapkey_t fibheap_replace_key (heap, node, key) fibheap_t heap; fibnode_t node; fibheapkey_t key; { int okey = node->key; fibheap_replace_key_data (heap, node, key, node->data); return okey; } /* Delete NODE from HEAP. */ void * fibheap_delete_node (heap, node) fibheap_t heap; fibnode_t node; { void *ret = node->data; /* To perform delete, we just make it the min key, and extract. */ fibheap_replace_key (heap, node, FIBHEAPKEY_MIN); fibheap_extract_min (heap); return ret; } /* Delete HEAP. */ void fibheap_delete (heap) fibheap_t heap; { while (heap->min != NULL) free (fibheap_extr_min_node (heap)); free (heap); } /* Determine if HEAP is empty. */ int fibheap_empty (heap) fibheap_t heap; { return heap->nodes == 0; } /* Extract the minimum node of the heap. */ static fibnode_t fibheap_extr_min_node (heap) fibheap_t heap; { fibnode_t ret = heap->min; fibnode_t x, y, orig; /* Attach the child list of the minimum node to the root list of the heap. If there is no child list, we don't do squat. */ for (x = ret->child, orig = NULL; x != orig && x != NULL; x = y) { if (orig == NULL) orig = x; y = x->right; x->parent = NULL; fibheap_ins_root (heap, x); } /* Remove the old root. */ fibheap_rem_root (heap, ret); heap->nodes--; /* If we are left with no nodes, then the min is NULL. */ if (heap->nodes == 0) heap->min = NULL; else { /* Otherwise, consolidate to find new minimum, as well as do the reorg work that needs to be done. */ heap->min = ret->right; fibheap_consolidate (heap); } return ret; } /* Insert NODE into the root list of HEAP. */ static void fibheap_ins_root (heap, node) fibheap_t heap; fibnode_t node; { /* If the heap is currently empty, the new node becomes the singleton circular root list. */ if (heap->root == NULL) { heap->root = node; node->left = node; node->right = node; return; } /* Otherwise, insert it in the circular root list between the root and it's right node. */ fibnode_insert_after (heap->root, node); } /* Remove NODE from the rootlist of HEAP. */ static void fibheap_rem_root (heap, node) fibheap_t heap; fibnode_t node; { if (node->left == node) heap->root = NULL; else heap->root = fibnode_remove (node); } /* Consolidate the heap. */ static void fibheap_consolidate (heap) fibheap_t heap; { fibnode_t a[1 + 8 * sizeof (long)]; fibnode_t w; fibnode_t y; fibnode_t x; int i; int d; int D; D = 1 + 8 * sizeof (long); memset (a, 0, sizeof (fibnode_t) * D); while ((w = heap->root) != NULL) { x = w; fibheap_rem_root (heap, w); d = x->degree; while (a[d] != NULL) { y = a[d]; if (fibheap_compare (heap, x, y) > 0) { fibnode_t temp; temp = x; x = y; y = temp; } fibheap_link (heap, y, x); a[d] = NULL; d++; } a[d] = x; } heap->min = NULL; for (i = 0; i < D; i++) if (a[i] != NULL) { fibheap_ins_root (heap, a[i]); if (heap->min == NULL || fibheap_compare (heap, a[i], heap->min) < 0) heap->min = a[i]; } } /* Make NODE a child of PARENT. */ static void fibheap_link (heap, node, parent) fibheap_t heap ATTRIBUTE_UNUSED; fibnode_t node; fibnode_t parent; { if (parent->child == NULL) parent->child = node; else fibnode_insert_before (parent->child, node); node->parent = parent; parent->degree++; node->mark = 0; } /* Remove NODE from PARENT's child list. */ static void fibheap_cut (heap, node, parent) fibheap_t heap; fibnode_t node; fibnode_t parent; { fibnode_remove (node); parent->degree--; fibheap_ins_root (heap, node); node->parent = NULL; node->mark = 0; } static void fibheap_cascading_cut (heap, y) fibheap_t heap; fibnode_t y; { fibnode_t z; while ((z = y->parent) != NULL) { if (y->mark == 0) { y->mark = 1; return; } else { fibheap_cut (heap, y, z); y = z; } } } static void fibnode_insert_after (a, b) fibnode_t a; fibnode_t b; { if (a == a->right) { a->right = b; a->left = b; b->right = a; b->left = a; } else { b->right = a->right; a->right->left = b; a->right = b; b->left = a; } } static fibnode_t fibnode_remove (node) fibnode_t node; { fibnode_t ret; if (node == node->left) ret = NULL; else ret = node->left; if (node->parent != NULL && node->parent->child == node) node->parent->child = ret; node->right->left = node->left; node->left->right = node->right; node->parent = NULL; node->left = node; node->right = node; return ret; } /* IEEE floating point support routines, for GDB, the GNU Debugger. Copyright (C) 1991, 1994, 1999, 2000, 2003 Free Software Foundation, Inc. This file is part of GDB. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This is needed to pick up the NAN macro on some systems. */ #define _GNU_SOURCE #ifdef HAVE_CONFIG_H #endif #include #ifdef HAVE_STRING_H #include #endif /* IEEE floating point support declarations, for GDB, the GNU Debugger. Copyright 1991, 1994, 1995, 1997, 2000, 2003 Free Software Foundation, Inc. This file is part of GDB. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #if !defined (FLOATFORMAT_H) #define FLOATFORMAT_H 1 /* A floatformat consists of a sign bit, an exponent and a mantissa. Once the bytes are concatenated according to the byteorder flag, then each of those fields is contiguous. We number the bits with 0 being the most significant (i.e. BITS_BIG_ENDIAN type numbering), and specify which bits each field contains with the *_start and *_len fields. */ /* What is the order of the bytes. */ enum floatformat_byteorders { /* Standard little endian byte order. EX: 1.2345678e10 => 00 00 80 c5 e0 fe 06 42 */ floatformat_little, /* Standard big endian byte order. EX: 1.2345678e10 => 42 06 fe e0 c5 80 00 00 */ floatformat_big, /* Little endian byte order but big endian word order. EX: 1.2345678e10 => e0 fe 06 42 00 00 80 c5 */ floatformat_littlebyte_bigword }; enum floatformat_intbit { floatformat_intbit_yes, floatformat_intbit_no }; struct floatformat { enum floatformat_byteorders byteorder; unsigned int totalsize; /* Total size of number in bits */ /* Sign bit is always one bit long. 1 means negative, 0 means positive. */ unsigned int sign_start; unsigned int exp_start; unsigned int exp_len; /* Bias added to a "true" exponent to form the biased exponent. It is intentionally signed as, otherwize, -exp_bias can turn into a very large number (e.g., given the exp_bias of 0x3fff and a 64 bit long, the equation (long)(1 - exp_bias) evaluates to 4294950914) instead of -16382). */ int exp_bias; /* Exponent value which indicates NaN. This is the actual value stored in the float, not adjusted by the exp_bias. This usually consists of all one bits. */ unsigned int exp_nan; unsigned int man_start; unsigned int man_len; /* Is the integer bit explicit or implicit? */ enum floatformat_intbit intbit; /* Internal name for debugging. */ const char *name; /* Validator method. */ int (*is_valid) PARAMS ((const struct floatformat *fmt, const char *from)); }; /* floatformats for IEEE single and double, big and little endian. */ extern const struct floatformat floatformat_ieee_single_big; extern const struct floatformat floatformat_ieee_single_little; extern const struct floatformat floatformat_ieee_double_big; extern const struct floatformat floatformat_ieee_double_little; /* floatformat for ARM IEEE double, little endian bytes and big endian words */ extern const struct floatformat floatformat_ieee_double_littlebyte_bigword; /* floatformats for various extendeds. */ extern const struct floatformat floatformat_i387_ext; extern const struct floatformat floatformat_m68881_ext; extern const struct floatformat floatformat_i960_ext; extern const struct floatformat floatformat_m88110_ext; extern const struct floatformat floatformat_m88110_harris_ext; extern const struct floatformat floatformat_arm_ext_big; extern const struct floatformat floatformat_arm_ext_littlebyte_bigword; /* IA-64 Floating Point register spilt into memory. */ extern const struct floatformat floatformat_ia64_spill_big; extern const struct floatformat floatformat_ia64_spill_little; extern const struct floatformat floatformat_ia64_quad_big; extern const struct floatformat floatformat_ia64_quad_little; /* Convert from FMT to a double. FROM is the address of the extended float. Store the double in *TO. */ extern void floatformat_to_double PARAMS ((const struct floatformat *, const char *, double *)); /* The converse: convert the double *FROM to FMT and store where TO points. */ extern void floatformat_from_double PARAMS ((const struct floatformat *, const double *, char *)); /* Return non-zero iff the data at FROM is a valid number in format FMT. */ extern int floatformat_is_valid PARAMS ((const struct floatformat *fmt, const char *from)); #endif /* defined (FLOATFORMAT_H) */ #ifndef INFINITY #ifdef HUGE_VAL #define INFINITY HUGE_VAL #else #define INFINITY (1.0 / 0.0) #endif #endif #ifndef NAN #define NAN (0.0 / 0.0) #endif static unsigned long get_field PARAMS ((const unsigned char *, enum floatformat_byteorders, unsigned int, unsigned int, unsigned int)); static int floatformat_always_valid PARAMS ((const struct floatformat *fmt, const char *from)); static int floatformat_always_valid (fmt, from) const struct floatformat *fmt ATTRIBUTE_UNUSED; const char *from ATTRIBUTE_UNUSED; { return 1; } /* The odds that CHAR_BIT will be anything but 8 are low enough that I'm not going to bother with trying to muck around with whether it is defined in a system header, what we do if not, etc. */ #define FLOATFORMAT_CHAR_BIT 8 /* floatformats for IEEE single and double, big and little endian. */ const struct floatformat floatformat_ieee_single_big = { floatformat_big, 32, 0, 1, 8, 127, 255, 9, 23, floatformat_intbit_no, "floatformat_ieee_single_big", floatformat_always_valid }; const struct floatformat floatformat_ieee_single_little = { floatformat_little, 32, 0, 1, 8, 127, 255, 9, 23, floatformat_intbit_no, "floatformat_ieee_single_little", floatformat_always_valid }; const struct floatformat floatformat_ieee_double_big = { floatformat_big, 64, 0, 1, 11, 1023, 2047, 12, 52, floatformat_intbit_no, "floatformat_ieee_double_big", floatformat_always_valid }; const struct floatformat floatformat_ieee_double_little = { floatformat_little, 64, 0, 1, 11, 1023, 2047, 12, 52, floatformat_intbit_no, "floatformat_ieee_double_little", floatformat_always_valid }; /* floatformat for IEEE double, little endian byte order, with big endian word ordering, as on the ARM. */ const struct floatformat floatformat_ieee_double_littlebyte_bigword = { floatformat_littlebyte_bigword, 64, 0, 1, 11, 1023, 2047, 12, 52, floatformat_intbit_no, "floatformat_ieee_double_littlebyte_bigword", floatformat_always_valid }; static int floatformat_i387_ext_is_valid PARAMS ((const struct floatformat *fmt, const char *from)); static int floatformat_i387_ext_is_valid (fmt, from) const struct floatformat *fmt; const char *from; { /* In the i387 double-extended format, if the exponent is all ones, then the integer bit must be set. If the exponent is neither 0 nor ~0, the intbit must also be set. Only if the exponent is zero can it be zero, and then it must be zero. */ unsigned long exponent, int_bit; const unsigned char *ufrom = (const unsigned char *) from; exponent = get_field (ufrom, fmt->byteorder, fmt->totalsize, fmt->exp_start, fmt->exp_len); int_bit = get_field (ufrom, fmt->byteorder, fmt->totalsize, fmt->man_start, 1); if ((exponent == 0) != (int_bit == 0)) return 0; else return 1; } const struct floatformat floatformat_i387_ext = { floatformat_little, 80, 0, 1, 15, 0x3fff, 0x7fff, 16, 64, floatformat_intbit_yes, "floatformat_i387_ext", floatformat_i387_ext_is_valid }; const struct floatformat floatformat_m68881_ext = { /* Note that the bits from 16 to 31 are unused. */ floatformat_big, 96, 0, 1, 15, 0x3fff, 0x7fff, 32, 64, floatformat_intbit_yes, "floatformat_m68881_ext", floatformat_always_valid }; const struct floatformat floatformat_i960_ext = { /* Note that the bits from 0 to 15 are unused. */ floatformat_little, 96, 16, 17, 15, 0x3fff, 0x7fff, 32, 64, floatformat_intbit_yes, "floatformat_i960_ext", floatformat_always_valid }; const struct floatformat floatformat_m88110_ext = { floatformat_big, 80, 0, 1, 15, 0x3fff, 0x7fff, 16, 64, floatformat_intbit_yes, "floatformat_m88110_ext", floatformat_always_valid }; const struct floatformat floatformat_m88110_harris_ext = { /* Harris uses raw format 128 bytes long, but the number is just an ieee double, and the last 64 bits are wasted. */ floatformat_big,128, 0, 1, 11, 0x3ff, 0x7ff, 12, 52, floatformat_intbit_no, "floatformat_m88110_ext_harris", floatformat_always_valid }; const struct floatformat floatformat_arm_ext_big = { /* Bits 1 to 16 are unused. */ floatformat_big, 96, 0, 17, 15, 0x3fff, 0x7fff, 32, 64, floatformat_intbit_yes, "floatformat_arm_ext_big", floatformat_always_valid }; const struct floatformat floatformat_arm_ext_littlebyte_bigword = { /* Bits 1 to 16 are unused. */ floatformat_littlebyte_bigword, 96, 0, 17, 15, 0x3fff, 0x7fff, 32, 64, floatformat_intbit_yes, "floatformat_arm_ext_littlebyte_bigword", floatformat_always_valid }; const struct floatformat floatformat_ia64_spill_big = { floatformat_big, 128, 0, 1, 17, 65535, 0x1ffff, 18, 64, floatformat_intbit_yes, "floatformat_ia64_spill_big", floatformat_always_valid }; const struct floatformat floatformat_ia64_spill_little = { floatformat_little, 128, 0, 1, 17, 65535, 0x1ffff, 18, 64, floatformat_intbit_yes, "floatformat_ia64_spill_little", floatformat_always_valid }; const struct floatformat floatformat_ia64_quad_big = { floatformat_big, 128, 0, 1, 15, 16383, 0x7fff, 16, 112, floatformat_intbit_no, "floatformat_ia64_quad_big", floatformat_always_valid }; const struct floatformat floatformat_ia64_quad_little = { floatformat_little, 128, 0, 1, 15, 16383, 0x7fff, 16, 112, floatformat_intbit_no, "floatformat_ia64_quad_little", floatformat_always_valid }; /* Extract a field which starts at START and is LEN bits long. DATA and TOTAL_LEN are the thing we are extracting it from, in byteorder ORDER. */ static unsigned long get_field (data, order, total_len, start, len) const unsigned char *data; enum floatformat_byteorders order; unsigned int total_len; unsigned int start; unsigned int len; { unsigned long result; unsigned int cur_byte; int cur_bitshift; /* Start at the least significant part of the field. */ cur_byte = (start + len) / FLOATFORMAT_CHAR_BIT; if (order == floatformat_little) cur_byte = (total_len / FLOATFORMAT_CHAR_BIT) - cur_byte - 1; cur_bitshift = ((start + len) % FLOATFORMAT_CHAR_BIT) - FLOATFORMAT_CHAR_BIT; result = *(data + cur_byte) >> (-cur_bitshift); cur_bitshift += FLOATFORMAT_CHAR_BIT; if (order == floatformat_little) ++cur_byte; else --cur_byte; /* Move towards the most significant part of the field. */ while ((unsigned int) cur_bitshift < len) { if (len - cur_bitshift < FLOATFORMAT_CHAR_BIT) /* This is the last byte; zero out the bits which are not part of this field. */ result |= (*(data + cur_byte) & ((1 << (len - cur_bitshift)) - 1)) << cur_bitshift; else result |= *(data + cur_byte) << cur_bitshift; cur_bitshift += FLOATFORMAT_CHAR_BIT; if (order == floatformat_little) ++cur_byte; else --cur_byte; } return result; } #ifndef min #define min(a, b) ((a) < (b) ? (a) : (b)) #endif /* Convert from FMT to a double. FROM is the address of the extended float. Store the double in *TO. */ void floatformat_to_double (fmt, from, to) const struct floatformat *fmt; const char *from; double *to; { const unsigned char *ufrom = (const unsigned char *)from; double dto; long exponent; unsigned long mant; unsigned int mant_bits, mant_off; int mant_bits_left; int special_exponent; /* It's a NaN, denorm or zero */ exponent = get_field (ufrom, fmt->byteorder, fmt->totalsize, fmt->exp_start, fmt->exp_len); /* If the exponent indicates a NaN, we don't have information to decide what to do. So we handle it like IEEE, except that we don't try to preserve the type of NaN. FIXME. */ if ((unsigned long) exponent == fmt->exp_nan) { int nan; mant_off = fmt->man_start; mant_bits_left = fmt->man_len; nan = 0; while (mant_bits_left > 0) { mant_bits = min (mant_bits_left, 32); if (get_field (ufrom, fmt->byteorder, fmt->totalsize, mant_off, mant_bits) != 0) { /* This is a NaN. */ nan = 1; break; } mant_off += mant_bits; mant_bits_left -= mant_bits; } if (nan) dto = NAN; else dto = INFINITY; if (get_field (ufrom, fmt->byteorder, fmt->totalsize, fmt->sign_start, 1)) dto = -dto; *to = dto; return; } mant_bits_left = fmt->man_len; mant_off = fmt->man_start; dto = 0.0; special_exponent = exponent == 0 || (unsigned long) exponent == fmt->exp_nan; /* Don't bias zero's, denorms or NaNs. */ if (!special_exponent) exponent -= fmt->exp_bias; /* Build the result algebraically. Might go infinite, underflow, etc; who cares. */ /* If this format uses a hidden bit, explicitly add it in now. Otherwise, increment the exponent by one to account for the integer bit. */ if (!special_exponent) { if (fmt->intbit == floatformat_intbit_no) dto = ldexp (1.0, exponent); else exponent++; } while (mant_bits_left > 0) { mant_bits = min (mant_bits_left, 32); mant = get_field (ufrom, fmt->byteorder, fmt->totalsize, mant_off, mant_bits); /* Handle denormalized numbers. FIXME: What should we do for non-IEEE formats? */ if (exponent == 0 && mant != 0) dto += ldexp ((double)mant, (- fmt->exp_bias - mant_bits - (mant_off - fmt->man_start) + 1)); else dto += ldexp ((double)mant, exponent - mant_bits); if (exponent != 0) exponent -= mant_bits; mant_off += mant_bits; mant_bits_left -= mant_bits; } /* Negate it if negative. */ if (get_field (ufrom, fmt->byteorder, fmt->totalsize, fmt->sign_start, 1)) dto = -dto; *to = dto; } static void put_field PARAMS ((unsigned char *, enum floatformat_byteorders, unsigned int, unsigned int, unsigned int, unsigned long)); /* Set a field which starts at START and is LEN bits long. DATA and TOTAL_LEN are the thing we are extracting it from, in byteorder ORDER. */ static void put_field (data, order, total_len, start, len, stuff_to_put) unsigned char *data; enum floatformat_byteorders order; unsigned int total_len; unsigned int start; unsigned int len; unsigned long stuff_to_put; { unsigned int cur_byte; int cur_bitshift; /* Start at the least significant part of the field. */ cur_byte = (start + len) / FLOATFORMAT_CHAR_BIT; if (order == floatformat_little) cur_byte = (total_len / FLOATFORMAT_CHAR_BIT) - cur_byte - 1; cur_bitshift = ((start + len) % FLOATFORMAT_CHAR_BIT) - FLOATFORMAT_CHAR_BIT; *(data + cur_byte) &= ~(((1 << ((start + len) % FLOATFORMAT_CHAR_BIT)) - 1) << (-cur_bitshift)); *(data + cur_byte) |= (stuff_to_put & ((1 << FLOATFORMAT_CHAR_BIT) - 1)) << (-cur_bitshift); cur_bitshift += FLOATFORMAT_CHAR_BIT; if (order == floatformat_little) ++cur_byte; else --cur_byte; /* Move towards the most significant part of the field. */ while ((unsigned int) cur_bitshift < len) { if (len - cur_bitshift < FLOATFORMAT_CHAR_BIT) { /* This is the last byte. */ *(data + cur_byte) &= ~((1 << (len - cur_bitshift)) - 1); *(data + cur_byte) |= (stuff_to_put >> cur_bitshift); } else *(data + cur_byte) = ((stuff_to_put >> cur_bitshift) & ((1 << FLOATFORMAT_CHAR_BIT) - 1)); cur_bitshift += FLOATFORMAT_CHAR_BIT; if (order == floatformat_little) ++cur_byte; else --cur_byte; } } /* The converse: convert the double *FROM to an extended float and store where TO points. Neither FROM nor TO have any alignment restrictions. */ void floatformat_from_double (fmt, from, to) const struct floatformat *fmt; const double *from; char *to; { double dfrom; int exponent; double mant; unsigned int mant_bits, mant_off; int mant_bits_left; unsigned char *uto = (unsigned char *)to; dfrom = *from; memset (uto, 0, fmt->totalsize / FLOATFORMAT_CHAR_BIT); /* If negative, set the sign bit. */ if (dfrom < 0) { put_field (uto, fmt->byteorder, fmt->totalsize, fmt->sign_start, 1, 1); dfrom = -dfrom; } if (dfrom == 0) { /* 0.0. */ return; } if (dfrom != dfrom) { /* NaN. */ put_field (uto, fmt->byteorder, fmt->totalsize, fmt->exp_start, fmt->exp_len, fmt->exp_nan); /* Be sure it's not infinity, but NaN value is irrelevant. */ put_field (uto, fmt->byteorder, fmt->totalsize, fmt->man_start, 32, 1); return; } if (dfrom + dfrom == dfrom) { /* This can only happen for an infinite value (or zero, which we already handled above). */ put_field (uto, fmt->byteorder, fmt->totalsize, fmt->exp_start, fmt->exp_len, fmt->exp_nan); return; } mant = frexp (dfrom, &exponent); if (exponent + fmt->exp_bias - 1 > 0) put_field (uto, fmt->byteorder, fmt->totalsize, fmt->exp_start, fmt->exp_len, exponent + fmt->exp_bias - 1); else { /* Handle a denormalized number. FIXME: What should we do for non-IEEE formats? */ put_field (uto, fmt->byteorder, fmt->totalsize, fmt->exp_start, fmt->exp_len, 0); mant = ldexp (mant, exponent + fmt->exp_bias - 1); } mant_bits_left = fmt->man_len; mant_off = fmt->man_start; while (mant_bits_left > 0) { unsigned long mant_long; mant_bits = mant_bits_left < 32 ? mant_bits_left : 32; mant *= 4294967296.0; mant_long = (unsigned long)mant; mant -= mant_long; /* If the integer bit is implicit, and we are not creating a denormalized number, then we need to discard it. */ if ((unsigned int) mant_bits_left == fmt->man_len && fmt->intbit == floatformat_intbit_no && exponent + fmt->exp_bias - 1 > 0) { mant_long &= 0x7fffffff; mant_bits -= 1; } else if (mant_bits < 32) { /* The bits we want are in the most significant MANT_BITS bits of mant_long. Move them to the least significant. */ mant_long >>= 32 - mant_bits; } put_field (uto, fmt->byteorder, fmt->totalsize, mant_off, mant_bits, mant_long); mant_off += mant_bits; mant_bits_left -= mant_bits; } } /* Return non-zero iff the data at FROM is a valid number in format FMT. */ int floatformat_is_valid (fmt, from) const struct floatformat *fmt; const char *from; { return fmt->is_valid (fmt, from); } #ifdef IEEE_DEBUG #include /* This is to be run on a host which uses IEEE floating point. */ void ieee_test (n) double n; { double result; floatformat_to_double (&floatformat_ieee_double_little, (char *) &n, &result); if ((n != result && (! isnan (n) || ! isnan (result))) || (n < 0 && result >= 0) || (n >= 0 && result < 0)) printf ("Differ(to): %.20g -> %.20g\n", n, result); floatformat_from_double (&floatformat_ieee_double_little, &n, (char *) &result); if ((n != result && (! isnan (n) || ! isnan (result))) || (n < 0 && result >= 0) || (n >= 0 && result < 0)) printf ("Differ(from): %.20g -> %.20g\n", n, result); #if 0 { char exten[16]; floatformat_from_double (&floatformat_m68881_ext, &n, exten); floatformat_to_double (&floatformat_m68881_ext, exten, &result); if (n != result) printf ("Differ(to+from): %.20g -> %.20g\n", n, result); } #endif #if IEEE_DEBUG > 1 /* This is to be run on a host which uses 68881 format. */ { long double ex = *(long double *)exten; if (ex != n) printf ("Differ(from vs. extended): %.20g\n", n); } #endif } int main () { ieee_test (0.0); ieee_test (0.5); ieee_test (256.0); ieee_test (0.12345); ieee_test (234235.78907234); ieee_test (-512.0); ieee_test (-0.004321); ieee_test (1.2E-70); ieee_test (1.2E-316); ieee_test (4.9406564584124654E-324); ieee_test (- 4.9406564584124654E-324); ieee_test (- 0.0); ieee_test (- INFINITY); ieee_test (- NAN); ieee_test (INFINITY); ieee_test (NAN); return 0; } #endif /* Copyright (C) 1991, 1992, 1993 Free Software Foundation, Inc. NOTE: This source is derived from an old version taken from the GNU C Library (glibc). This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifdef HAVE_CONFIG_H #if defined (CONFIG_BROKETS) /* We use instead of "config.h" so that a compilation using -I. -I$srcdir will use ./config.h rather than $srcdir/config.h (which it would do because it found this file in $srcdir). */ #else #endif #endif #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif /* This code to undef const added in libiberty. */ #ifndef __STDC__ /* This is a separate conditional since some stdc systems reject `defined (const)'. */ #ifndef const #define const #endif #endif #include #include /* Comment out all this code if we are using the GNU C Library, and are not actually compiling the library itself. This code is part of the GNU C Library, but also included in many other GNU distributions. Compiling and linking in this code is a waste when using the GNU C library (especially if it is a shared library). Rather than having every GNU program understand `configure --with-gnu-libc' and omit the object files, it is simpler to just do this in the source for each such file. */ #if defined (_LIBC) || !defined (__GNU_LIBRARY__) #if !defined(__GNU_LIBRARY__) && !defined(STDC_HEADERS) extern int errno; #endif /* Match STRING against the filename pattern PATTERN, returning zero if it matches, nonzero if not. */ int fnmatch (pattern, string, flags) const char *pattern; const char *string; int flags; { register const char *p = pattern, *n = string; register unsigned char c; #define FOLD(c) ((flags & FNM_CASEFOLD) ? TOLOWER (c) : (c)) while ((c = *p++) != '\0') { c = FOLD (c); switch (c) { case '?': if (*n == '\0') return FNM_NOMATCH; else if ((flags & FNM_FILE_NAME) && *n == '/') return FNM_NOMATCH; else if ((flags & FNM_PERIOD) && *n == '.' && (n == string || ((flags & FNM_FILE_NAME) && n[-1] == '/'))) return FNM_NOMATCH; break; case '\\': if (!(flags & FNM_NOESCAPE)) { c = *p++; c = FOLD (c); } if (FOLD ((unsigned char)*n) != c) return FNM_NOMATCH; break; case '*': if ((flags & FNM_PERIOD) && *n == '.' && (n == string || ((flags & FNM_FILE_NAME) && n[-1] == '/'))) return FNM_NOMATCH; for (c = *p++; c == '?' || c == '*'; c = *p++, ++n) if (((flags & FNM_FILE_NAME) && *n == '/') || (c == '?' && *n == '\0')) return FNM_NOMATCH; if (c == '\0') return 0; { unsigned char c1 = (!(flags & FNM_NOESCAPE) && c == '\\') ? *p : c; c1 = FOLD (c1); for (--p; *n != '\0'; ++n) if ((c == '[' || FOLD ((unsigned char)*n) == c1) && fnmatch (p, n, flags & ~FNM_PERIOD) == 0) return 0; return FNM_NOMATCH; } case '[': { /* Nonzero if the sense of the character class is inverted. */ register int not; if (*n == '\0') return FNM_NOMATCH; if ((flags & FNM_PERIOD) && *n == '.' && (n == string || ((flags & FNM_FILE_NAME) && n[-1] == '/'))) return FNM_NOMATCH; not = (*p == '!' || *p == '^'); if (not) ++p; c = *p++; for (;;) { register unsigned char cstart = c, cend = c; if (!(flags & FNM_NOESCAPE) && c == '\\') cstart = cend = *p++; cstart = cend = FOLD (cstart); if (c == '\0') /* [ (unterminated) loses. */ return FNM_NOMATCH; c = *p++; c = FOLD (c); if ((flags & FNM_FILE_NAME) && c == '/') /* [/] can never match. */ return FNM_NOMATCH; if (c == '-' && *p != ']') { cend = *p++; if (!(flags & FNM_NOESCAPE) && cend == '\\') cend = *p++; if (cend == '\0') return FNM_NOMATCH; cend = FOLD (cend); c = *p++; } if (FOLD ((unsigned char)*n) >= cstart && FOLD ((unsigned char)*n) <= cend) goto matched; if (c == ']') break; } if (!not) return FNM_NOMATCH; break; matched:; /* Skip the rest of the [...] that already matched. */ while (c != ']') { if (c == '\0') /* [... (unterminated) loses. */ return FNM_NOMATCH; c = *p++; if (!(flags & FNM_NOESCAPE) && c == '\\') /* XXX 1003.2d11 is unclear if this is right. */ ++p; } if (not) return FNM_NOMATCH; } break; default: if (c != FOLD ((unsigned char)*n)) return FNM_NOMATCH; } ++n; } if (*n == '\0') return 0; if ((flags & FNM_LEADING_DIR) && *n == '/') /* The FNM_LEADING_DIR flag says that "foo*" matches "foobar/frobozz". */ return 0; return FNM_NOMATCH; } #endif /* _LIBC or not __GNU_LIBRARY__. */ /* Getopt for GNU. NOTE: getopt is now part of the C library, so if you don't know what "Keep this file name-space clean" means, talk to drepper@gnu.org before changing it! Copyright (C) 1987, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98 Free Software Foundation, Inc. NOTE: This source is derived from an old version taken from the GNU C Library (glibc). This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This tells Alpha OSF/1 not to define a getopt prototype in . Ditto for AIX 3.2 and . */ #ifndef _NO_PROTO # define _NO_PROTO #endif #ifdef HAVE_CONFIG_H #endif #if !defined __STDC__ || !__STDC__ /* This is a separate conditional since some stdc systems reject `defined (const)'. */ # ifndef const # define const # endif #endif #include /* Comment out all this code if we are using the GNU C Library, and are not actually compiling the library itself. This code is part of the GNU C Library, but also included in many other GNU distributions. Compiling and linking in this code is a waste when using the GNU C library (especially if it is a shared library). Rather than having every GNU program understand `configure --with-gnu-libc' and omit the object files, it is simpler to just do this in the source for each such file. */ #define GETOPT_INTERFACE_VERSION 2 #if !defined _LIBC && defined __GLIBC__ && __GLIBC__ >= 2 # include # if _GNU_GETOPT_INTERFACE_VERSION == GETOPT_INTERFACE_VERSION # define ELIDE_CODE # endif #endif #ifndef ELIDE_CODE /* This needs to come after some library #include to get __GNU_LIBRARY__ defined. */ #ifdef __GNU_LIBRARY__ /* Don't include stdlib.h for non-GNU C libraries because some of them contain conflicting prototypes for getopt. */ # include # include #endif /* GNU C library. */ #ifdef VMS # include # if HAVE_STRING_H - 0 # include # endif #endif #ifndef _ /* This is for other GNU distributions with internationalized messages. When compiling libc, the _ macro is predefined. */ # if (HAVE_LIBINTL_H && ENABLE_NLS) || defined _LIBC # include # define _(msgid) gettext (msgid) # else # define _(msgid) (msgid) # endif #endif /* This version of `getopt' appears to the caller like standard Unix `getopt' but it behaves differently for the user, since it allows the user to intersperse the options with the other arguments. As `getopt' works, it permutes the elements of ARGV so that, when it is done, all the options precede everything else. Thus all application programs are extended to handle flexible argument order. Setting the environment variable POSIXLY_CORRECT disables permutation. Then the behavior is completely standard. GNU application programs can use a third alternative mode in which they can distinguish the relative order of options and other arguments. */ /* For communication from `getopt' to the caller. When `getopt' finds an option that takes an argument, the argument value is returned here. Also, when `ordering' is RETURN_IN_ORDER, each non-option ARGV-element is returned here. */ char *optarg = NULL; /* Index in ARGV of the next element to be scanned. This is used for communication to and from the caller and for communication between successive calls to `getopt'. On entry to `getopt', zero means this is the first call; initialize. When `getopt' returns -1, this is the index of the first of the non-option elements that the caller should itself scan. Otherwise, `optind' communicates from one call to the next how much of ARGV has been scanned so far. */ /* 1003.2 says this must be 1 before any call. */ int optind = 1; /* Formerly, initialization of getopt depended on optind==0, which causes problems with re-calling getopt as programs generally don't know that. */ int __getopt_initialized = 0; /* The next char to be scanned in the option-element in which the last option character we returned was found. This allows us to pick up the scan where we left off. If this is zero, or a null string, it means resume the scan by advancing to the next ARGV-element. */ static char *nextchar; /* Callers store zero here to inhibit the error message for unrecognized options. */ int opterr = 1; /* Set to an option character which was unrecognized. This must be initialized on some systems to avoid linking in the system's own getopt implementation. */ int optopt = '?'; /* Describe how to deal with options that follow non-option ARGV-elements. If the caller did not specify anything, the default is REQUIRE_ORDER if the environment variable POSIXLY_CORRECT is defined, PERMUTE otherwise. REQUIRE_ORDER means don't recognize them as options; stop option processing when the first non-option is seen. This is what Unix does. This mode of operation is selected by either setting the environment variable POSIXLY_CORRECT, or using `+' as the first character of the list of option characters. PERMUTE is the default. We permute the contents of ARGV as we scan, so that eventually all the non-options are at the end. This allows options to be given in any order, even with programs that were not written to expect this. RETURN_IN_ORDER is an option available to programs that were written to expect options and other ARGV-elements in any order and that care about the ordering of the two. We describe each non-option ARGV-element as if it were the argument of an option with character code 1. Using `-' as the first character of the list of option characters selects this mode of operation. The special argument `--' forces an end of option-scanning regardless of the value of `ordering'. In the case of RETURN_IN_ORDER, only `--' can cause `getopt' to return -1 with `optind' != ARGC. */ static enum { REQUIRE_ORDER, PERMUTE, RETURN_IN_ORDER } ordering; /* Value of POSIXLY_CORRECT environment variable. */ static char *posixly_correct; #ifdef __GNU_LIBRARY__ /* We want to avoid inclusion of string.h with non-GNU libraries because there are many ways it can cause trouble. On some systems, it contains special magic macros that don't work in GCC. */ # include # define my_index strchr #else # if HAVE_STRING_H # include # else # if HAVE_STRINGS_H # include # endif # endif /* Avoid depending on library functions or files whose names are inconsistent. */ #ifndef getenv extern char *getenv (); #endif static char * my_index (str, chr) const char *str; int chr; { while (*str) { if (*str == chr) return (char *) str; str++; } return 0; } /* If using GCC, we can safely declare strlen this way. If not using GCC, it is ok not to declare it. */ #ifdef __GNUC__ /* Note that Motorola Delta 68k R3V7 comes with GCC but not stddef.h. That was relevant to code that was here before. */ # if (!defined __STDC__ || !__STDC__) && !defined strlen /* gcc with -traditional declares the built-in strlen to return int, and has done so at least since version 2.4.5. -- rms. */ extern int strlen (const char *); # endif /* not __STDC__ */ #endif /* __GNUC__ */ #endif /* not __GNU_LIBRARY__ */ /* Handle permutation of arguments. */ /* Describe the part of ARGV that contains non-options that have been skipped. `first_nonopt' is the index in ARGV of the first of them; `last_nonopt' is the index after the last of them. */ static int first_nonopt; static int last_nonopt; #ifdef _LIBC /* Bash 2.0 gives us an environment variable containing flags indicating ARGV elements that should not be considered arguments. */ /* Defined in getopt_init.c */ extern char *__getopt_nonoption_flags; static int nonoption_flags_max_len; static int nonoption_flags_len; static int original_argc; static char *const *original_argv; /* Make sure the environment variable bash 2.0 puts in the environment is valid for the getopt call we must make sure that the ARGV passed to getopt is that one passed to the process. */ static void __attribute__ ((unused)) store_args_and_env (int argc, char *const *argv) { /* XXX This is no good solution. We should rather copy the args so that we can compare them later. But we must not use malloc(3). */ original_argc = argc; original_argv = argv; } # ifdef text_set_element text_set_element (__libc_subinit, store_args_and_env); # endif /* text_set_element */ # define SWAP_FLAGS(ch1, ch2) \ if (nonoption_flags_len > 0) \ { \ char __tmp = __getopt_nonoption_flags[ch1]; \ __getopt_nonoption_flags[ch1] = __getopt_nonoption_flags[ch2]; \ __getopt_nonoption_flags[ch2] = __tmp; \ } #else /* !_LIBC */ # define SWAP_FLAGS(ch1, ch2) #endif /* _LIBC */ /* Exchange two adjacent subsequences of ARGV. One subsequence is elements [first_nonopt,last_nonopt) which contains all the non-options that have been skipped so far. The other is elements [last_nonopt,optind), which contains all the options processed since those non-options were skipped. `first_nonopt' and `last_nonopt' are relocated so that they describe the new indices of the non-options in ARGV after they are moved. */ #if defined __STDC__ && __STDC__ static void exchange (char **); #endif static void exchange (argv) char **argv; { int bottom = first_nonopt; int middle = last_nonopt; int top = optind; char *tem; /* Exchange the shorter segment with the far end of the longer segment. That puts the shorter segment into the right place. It leaves the longer segment in the right place overall, but it consists of two parts that need to be swapped next. */ #ifdef _LIBC /* First make sure the handling of the `__getopt_nonoption_flags' string can work normally. Our top argument must be in the range of the string. */ if (nonoption_flags_len > 0 && top >= nonoption_flags_max_len) { /* We must extend the array. The user plays games with us and presents new arguments. */ char *new_str = malloc (top + 1); if (new_str == NULL) nonoption_flags_len = nonoption_flags_max_len = 0; else { memset (mempcpy (new_str, __getopt_nonoption_flags, nonoption_flags_max_len), '\0', top + 1 - nonoption_flags_max_len); nonoption_flags_max_len = top + 1; __getopt_nonoption_flags = new_str; } } #endif while (top > middle && middle > bottom) { if (top - middle > middle - bottom) { /* Bottom segment is the short one. */ int len = middle - bottom; register int i; /* Swap it with the top part of the top segment. */ for (i = 0; i < len; i++) { tem = argv[bottom + i]; argv[bottom + i] = argv[top - (middle - bottom) + i]; argv[top - (middle - bottom) + i] = tem; SWAP_FLAGS (bottom + i, top - (middle - bottom) + i); } /* Exclude the moved bottom segment from further swapping. */ top -= len; } else { /* Top segment is the short one. */ int len = top - middle; register int i; /* Swap it with the bottom part of the bottom segment. */ for (i = 0; i < len; i++) { tem = argv[bottom + i]; argv[bottom + i] = argv[middle + i]; argv[middle + i] = tem; SWAP_FLAGS (bottom + i, middle + i); } /* Exclude the moved top segment from further swapping. */ bottom += len; } } /* Update records for the slots the non-options now occupy. */ first_nonopt += (optind - last_nonopt); last_nonopt = optind; } /* Initialize the internal data when the first call is made. */ #if defined __STDC__ && __STDC__ static const char *_getopt_initialize (int, char *const *, const char *); #endif static const char * _getopt_initialize (argc, argv, optstring) int argc; char *const *argv; const char *optstring; { /* Start processing options with ARGV-element 1 (since ARGV-element 0 is the program name); the sequence of previously skipped non-option ARGV-elements is empty. */ first_nonopt = last_nonopt = optind; nextchar = NULL; posixly_correct = getenv ("POSIXLY_CORRECT"); /* Determine how to handle the ordering of options and nonoptions. */ if (optstring[0] == '-') { ordering = RETURN_IN_ORDER; ++optstring; } else if (optstring[0] == '+') { ordering = REQUIRE_ORDER; ++optstring; } else if (posixly_correct != NULL) ordering = REQUIRE_ORDER; else ordering = PERMUTE; #ifdef _LIBC if (posixly_correct == NULL && argc == original_argc && argv == original_argv) { if (nonoption_flags_max_len == 0) { if (__getopt_nonoption_flags == NULL || __getopt_nonoption_flags[0] == '\0') nonoption_flags_max_len = -1; else { const char *orig_str = __getopt_nonoption_flags; int len = nonoption_flags_max_len = strlen (orig_str); if (nonoption_flags_max_len < argc) nonoption_flags_max_len = argc; __getopt_nonoption_flags = (char *) malloc (nonoption_flags_max_len); if (__getopt_nonoption_flags == NULL) nonoption_flags_max_len = -1; else memset (mempcpy (__getopt_nonoption_flags, orig_str, len), '\0', nonoption_flags_max_len - len); } } nonoption_flags_len = nonoption_flags_max_len; } else nonoption_flags_len = 0; #endif return optstring; } /* Scan elements of ARGV (whose length is ARGC) for option characters given in OPTSTRING. If an element of ARGV starts with '-', and is not exactly "-" or "--", then it is an option element. The characters of this element (aside from the initial '-') are option characters. If `getopt' is called repeatedly, it returns successively each of the option characters from each of the option elements. If `getopt' finds another option character, it returns that character, updating `optind' and `nextchar' so that the next call to `getopt' can resume the scan with the following option character or ARGV-element. If there are no more option characters, `getopt' returns -1. Then `optind' is the index in ARGV of the first ARGV-element that is not an option. (The ARGV-elements have been permuted so that those that are not options now come last.) OPTSTRING is a string containing the legitimate option characters. If an option character is seen that is not listed in OPTSTRING, return '?' after printing an error message. If you set `opterr' to zero, the error message is suppressed but we still return '?'. If a char in OPTSTRING is followed by a colon, that means it wants an arg, so the following text in the same ARGV-element, or the text of the following ARGV-element, is returned in `optarg'. Two colons mean an option that wants an optional arg; if there is text in the current ARGV-element, it is returned in `optarg', otherwise `optarg' is set to zero. If OPTSTRING starts with `-' or `+', it requests different methods of handling the non-option ARGV-elements. See the comments about RETURN_IN_ORDER and REQUIRE_ORDER, above. Long-named options begin with `--' instead of `-'. Their names may be abbreviated as long as the abbreviation is unique or is an exact match for some defined option. If they have an argument, it follows the option name in the same ARGV-element, separated from the option name by a `=', or else the in next ARGV-element. When `getopt' finds a long-named option, it returns 0 if that option's `flag' field is nonzero, the value of the option's `val' field if the `flag' field is zero. The elements of ARGV aren't really const, because we permute them. But we pretend they're const in the prototype to be compatible with other systems. LONGOPTS is a vector of `struct option' terminated by an element containing a name which is zero. LONGIND returns the index in LONGOPT of the long-named option found. It is only valid when a long-named option has been found by the most recent call. If LONG_ONLY is nonzero, '-' as well as '--' can introduce long-named options. */ int _getopt_internal (argc, argv, optstring, longopts, longind, long_only) int argc; char *const *argv; const char *optstring; const struct option *longopts; int *longind; int long_only; { optarg = NULL; if (optind == 0 || !__getopt_initialized) { if (optind == 0) optind = 1; /* Don't scan ARGV[0], the program name. */ optstring = _getopt_initialize (argc, argv, optstring); __getopt_initialized = 1; } /* Test whether ARGV[optind] points to a non-option argument. Either it does not have option syntax, or there is an environment flag from the shell indicating it is not an option. The later information is only used when the used in the GNU libc. */ #ifdef _LIBC # define NONOPTION_P (argv[optind][0] != '-' || argv[optind][1] == '\0' \ || (optind < nonoption_flags_len \ && __getopt_nonoption_flags[optind] == '1')) #else # define NONOPTION_P (argv[optind][0] != '-' || argv[optind][1] == '\0') #endif if (nextchar == NULL || *nextchar == '\0') { /* Advance to the next ARGV-element. */ /* Give FIRST_NONOPT & LAST_NONOPT rational values if OPTIND has been moved back by the user (who may also have changed the arguments). */ if (last_nonopt > optind) last_nonopt = optind; if (first_nonopt > optind) first_nonopt = optind; if (ordering == PERMUTE) { /* If we have just processed some options following some non-options, exchange them so that the options come first. */ if (first_nonopt != last_nonopt && last_nonopt != optind) exchange ((char **) argv); else if (last_nonopt != optind) first_nonopt = optind; /* Skip any additional non-options and extend the range of non-options previously skipped. */ while (optind < argc && NONOPTION_P) optind++; last_nonopt = optind; } /* The special ARGV-element `--' means premature end of options. Skip it like a null option, then exchange with previous non-options as if it were an option, then skip everything else like a non-option. */ if (optind != argc && !strcmp (argv[optind], "--")) { optind++; if (first_nonopt != last_nonopt && last_nonopt != optind) exchange ((char **) argv); else if (first_nonopt == last_nonopt) first_nonopt = optind; last_nonopt = argc; optind = argc; } /* If we have done all the ARGV-elements, stop the scan and back over any non-options that we skipped and permuted. */ if (optind == argc) { /* Set the next-arg-index to point at the non-options that we previously skipped, so the caller will digest them. */ if (first_nonopt != last_nonopt) optind = first_nonopt; return -1; } /* If we have come to a non-option and did not permute it, either stop the scan or describe it to the caller and pass it by. */ if (NONOPTION_P) { if (ordering == REQUIRE_ORDER) return -1; optarg = argv[optind++]; return 1; } /* We have found another option-ARGV-element. Skip the initial punctuation. */ nextchar = (argv[optind] + 1 + (longopts != NULL && argv[optind][1] == '-')); } /* Decode the current option-ARGV-element. */ /* Check whether the ARGV-element is a long option. If long_only and the ARGV-element has the form "-f", where f is a valid short option, don't consider it an abbreviated form of a long option that starts with f. Otherwise there would be no way to give the -f short option. On the other hand, if there's a long option "fubar" and the ARGV-element is "-fu", do consider that an abbreviation of the long option, just like "--fu", and not "-f" with arg "u". This distinction seems to be the most useful approach. */ if (longopts != NULL && (argv[optind][1] == '-' || (long_only && (argv[optind][2] || !my_index (optstring, argv[optind][1]))))) { char *nameend; const struct option *p; const struct option *pfound = NULL; int exact = 0; int ambig = 0; int indfound = -1; int option_index; for (nameend = nextchar; *nameend && *nameend != '='; nameend++) /* Do nothing. */ ; /* Test all long options for either exact match or abbreviated matches. */ for (p = longopts, option_index = 0; p->name; p++, option_index++) if (!strncmp (p->name, nextchar, nameend - nextchar)) { if ((unsigned int) (nameend - nextchar) == (unsigned int) strlen (p->name)) { /* Exact match found. */ pfound = p; indfound = option_index; exact = 1; break; } else if (pfound == NULL) { /* First nonexact match found. */ pfound = p; indfound = option_index; } else /* Second or later nonexact match found. */ ambig = 1; } if (ambig && !exact) { if (opterr) fprintf (stderr, _("%s: option `%s' is ambiguous\n"), argv[0], argv[optind]); nextchar += strlen (nextchar); optind++; optopt = 0; return '?'; } if (pfound != NULL) { option_index = indfound; optind++; if (*nameend) { /* Don't test has_arg with >, because some C compilers don't allow it to be used on enums. */ if (pfound->has_arg) optarg = nameend + 1; else { if (opterr) { if (argv[optind - 1][1] == '-') /* --option */ fprintf (stderr, _("%s: option `--%s' doesn't allow an argument\n"), argv[0], pfound->name); else /* +option or -option */ fprintf (stderr, _("%s: option `%c%s' doesn't allow an argument\n"), argv[0], argv[optind - 1][0], pfound->name); nextchar += strlen (nextchar); optopt = pfound->val; return '?'; } } } else if (pfound->has_arg == 1) { if (optind < argc) optarg = argv[optind++]; else { if (opterr) fprintf (stderr, _("%s: option `%s' requires an argument\n"), argv[0], argv[optind - 1]); nextchar += strlen (nextchar); optopt = pfound->val; return optstring[0] == ':' ? ':' : '?'; } } nextchar += strlen (nextchar); if (longind != NULL) *longind = option_index; if (pfound->flag) { *(pfound->flag) = pfound->val; return 0; } return pfound->val; } /* Can't find it as a long option. If this is not getopt_long_only, or the option starts with '--' or is not a valid short option, then it's an error. Otherwise interpret it as a short option. */ if (!long_only || argv[optind][1] == '-' || my_index (optstring, *nextchar) == NULL) { if (opterr) { if (argv[optind][1] == '-') /* --option */ fprintf (stderr, _("%s: unrecognized option `--%s'\n"), argv[0], nextchar); else /* +option or -option */ fprintf (stderr, _("%s: unrecognized option `%c%s'\n"), argv[0], argv[optind][0], nextchar); } nextchar = (char *) ""; optind++; optopt = 0; return '?'; } } /* Look at and handle the next short option-character. */ { char c = *nextchar++; char *temp = my_index (optstring, c); /* Increment `optind' when we start to process its last character. */ if (*nextchar == '\0') ++optind; if (temp == NULL || c == ':') { if (opterr) { if (posixly_correct) /* 1003.2 specifies the format of this message. */ fprintf (stderr, _("%s: illegal option -- %c\n"), argv[0], c); else fprintf (stderr, _("%s: invalid option -- %c\n"), argv[0], c); } optopt = c; return '?'; } /* Convenience. Treat POSIX -W foo same as long option --foo */ if (temp[0] == 'W' && temp[1] == ';') { char *nameend; const struct option *p; const struct option *pfound = NULL; int exact = 0; int ambig = 0; int indfound = 0; int option_index; /* This is an option that requires an argument. */ if (*nextchar != '\0') { optarg = nextchar; /* If we end this ARGV-element by taking the rest as an arg, we must advance to the next element now. */ optind++; } else if (optind == argc) { if (opterr) { /* 1003.2 specifies the format of this message. */ fprintf (stderr, _("%s: option requires an argument -- %c\n"), argv[0], c); } optopt = c; if (optstring[0] == ':') c = ':'; else c = '?'; return c; } else /* We already incremented `optind' once; increment it again when taking next ARGV-elt as argument. */ optarg = argv[optind++]; /* optarg is now the argument, see if it's in the table of longopts. */ for (nextchar = nameend = optarg; *nameend && *nameend != '='; nameend++) /* Do nothing. */ ; /* Test all long options for either exact match or abbreviated matches. */ for (p = longopts, option_index = 0; p->name; p++, option_index++) if (!strncmp (p->name, nextchar, nameend - nextchar)) { if ((unsigned int) (nameend - nextchar) == strlen (p->name)) { /* Exact match found. */ pfound = p; indfound = option_index; exact = 1; break; } else if (pfound == NULL) { /* First nonexact match found. */ pfound = p; indfound = option_index; } else /* Second or later nonexact match found. */ ambig = 1; } if (ambig && !exact) { if (opterr) fprintf (stderr, _("%s: option `-W %s' is ambiguous\n"), argv[0], argv[optind]); nextchar += strlen (nextchar); optind++; return '?'; } if (pfound != NULL) { option_index = indfound; if (*nameend) { /* Don't test has_arg with >, because some C compilers don't allow it to be used on enums. */ if (pfound->has_arg) optarg = nameend + 1; else { if (opterr) fprintf (stderr, _("\ %s: option `-W %s' doesn't allow an argument\n"), argv[0], pfound->name); nextchar += strlen (nextchar); return '?'; } } else if (pfound->has_arg == 1) { if (optind < argc) optarg = argv[optind++]; else { if (opterr) fprintf (stderr, _("%s: option `%s' requires an argument\n"), argv[0], argv[optind - 1]); nextchar += strlen (nextchar); return optstring[0] == ':' ? ':' : '?'; } } nextchar += strlen (nextchar); if (longind != NULL) *longind = option_index; if (pfound->flag) { *(pfound->flag) = pfound->val; return 0; } return pfound->val; } nextchar = NULL; return 'W'; /* Let the application handle it. */ } if (temp[1] == ':') { if (temp[2] == ':') { /* This is an option that accepts an argument optionally. */ if (*nextchar != '\0') { optarg = nextchar; optind++; } else optarg = NULL; nextchar = NULL; } else { /* This is an option that requires an argument. */ if (*nextchar != '\0') { optarg = nextchar; /* If we end this ARGV-element by taking the rest as an arg, we must advance to the next element now. */ optind++; } else if (optind == argc) { if (opterr) { /* 1003.2 specifies the format of this message. */ fprintf (stderr, _("%s: option requires an argument -- %c\n"), argv[0], c); } optopt = c; if (optstring[0] == ':') c = ':'; else c = '?'; } else /* We already incremented `optind' once; increment it again when taking next ARGV-elt as argument. */ optarg = argv[optind++]; nextchar = NULL; } } return c; } } int getopt (argc, argv, optstring) int argc; char *const *argv; const char *optstring; { return _getopt_internal (argc, argv, optstring, (const struct option *) 0, (int *) 0, 0); } #endif /* Not ELIDE_CODE. */ #ifdef TEST /* Compile with -DTEST to make an executable for use in testing the above definition of `getopt'. */ int main (argc, argv) int argc; char **argv; { int c; int digit_optind = 0; while (1) { int this_option_optind = optind ? optind : 1; c = getopt (argc, argv, "abc:d:0123456789"); if (c == -1) break; switch (c) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': if (digit_optind != 0 && digit_optind != this_option_optind) printf ("digits occur in two different argv-elements.\n"); digit_optind = this_option_optind; printf ("option %c\n", c); break; case 'a': printf ("option a\n"); break; case 'b': printf ("option b\n"); break; case 'c': printf ("option c with value `%s'\n", optarg); break; case '?': break; default: printf ("?? getopt returned character code 0%o ??\n", c); } } if (optind < argc) { printf ("non-option ARGV-elements: "); while (optind < argc) printf ("%s ", argv[optind++]); printf ("\n"); } exit (0); } #endif /* TEST */ /* getopt_long and getopt_long_only entry points for GNU getopt. Copyright (C) 1987,88,89,90,91,92,93,94,96,97,98 Free Software Foundation, Inc. NOTE: This source is derived from an old version taken from the GNU C Library (glibc). This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifdef HAVE_CONFIG_H #endif #if !defined __STDC__ || !__STDC__ /* This is a separate conditional since some stdc systems reject `defined (const)'. */ #ifndef const #define const #endif #endif #include /* Comment out all this code if we are using the GNU C Library, and are not actually compiling the library itself. This code is part of the GNU C Library, but also included in many other GNU distributions. Compiling and linking in this code is a waste when using the GNU C library (especially if it is a shared library). Rather than having every GNU program understand `configure --with-gnu-libc' and omit the object files, it is simpler to just do this in the source for each such file. */ #define GETOPT_INTERFACE_VERSION 2 #if !defined _LIBC && defined __GLIBC__ && __GLIBC__ >= 2 #include #if _GNU_GETOPT_INTERFACE_VERSION == GETOPT_INTERFACE_VERSION #define ELIDE_CODE #endif #endif #ifndef ELIDE_CODE /* This needs to come after some library #include to get __GNU_LIBRARY__ defined. */ #ifdef __GNU_LIBRARY__ #include #endif #ifndef NULL #define NULL 0 #endif int getopt_long (argc, argv, options, long_options, opt_index) int argc; char *const *argv; const char *options; const struct option *long_options; int *opt_index; { return _getopt_internal (argc, argv, options, long_options, opt_index, 0); } /* Like getopt_long, but '-' as well as '--' can indicate a long option. If an option that starts with '-' (not '--') doesn't match a long option, but does match a short option, it is parsed as a short option instead. */ int getopt_long_only (argc, argv, options, long_options, opt_index) int argc; char *const *argv; const char *options; const struct option *long_options; int *opt_index; { return _getopt_internal (argc, argv, options, long_options, opt_index, 1); } #endif /* Not ELIDE_CODE. */ #ifdef TEST #include int main (argc, argv) int argc; char **argv; { int c; int digit_optind = 0; while (1) { int this_option_optind = optind ? optind : 1; int option_index = 0; static struct option long_options[] = { {"add", 1, 0, 0}, {"append", 0, 0, 0}, {"delete", 1, 0, 0}, {"verbose", 0, 0, 0}, {"create", 0, 0, 0}, {"file", 1, 0, 0}, {0, 0, 0, 0} }; c = getopt_long (argc, argv, "abc:d:0123456789", long_options, &option_index); if (c == -1) break; switch (c) { case 0: printf ("option %s", long_options[option_index].name); if (optarg) printf (" with arg %s", optarg); printf ("\n"); break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': if (digit_optind != 0 && digit_optind != this_option_optind) printf ("digits occur in two different argv-elements.\n"); digit_optind = this_option_optind; printf ("option %c\n", c); break; case 'a': printf ("option a\n"); break; case 'b': printf ("option b\n"); break; case 'c': printf ("option c with value `%s'\n", optarg); break; case 'd': printf ("option d with value `%s'\n", optarg); break; case '?': break; default: printf ("?? getopt returned character code 0%o ??\n", c); } } if (optind < argc) { printf ("non-option ARGV-elements: "); while (optind < argc) printf ("%s ", argv[optind++]); printf ("\n"); } exit (0); } #endif /* TEST */ /* getpwd.c - get the working directory */ /* @deftypefn Supplemental char* getpwd (void) Returns the current working directory. This implementation caches the result on the assumption that the process will not call @code{chdir} between calls to @code{getpwd}. @end deftypefn */ #ifdef HAVE_CONFIG_H #endif #include #include #ifndef errno extern int errno; #endif #ifdef HAVE_STDLIB_H #include #endif #ifdef HAVE_UNISTD_H #include #endif #ifdef HAVE_SYS_PARAM_H #include #endif #if HAVE_SYS_STAT_H #include #endif #if HAVE_LIMITS_H #include #endif /* Prototype these in case the system headers don't provide them. */ extern char *getpwd (); extern char *getwd (); /* Virtually every UN*X system now in common use (except for pre-4.3-tahoe BSD systems) now provides getcwd as called for by POSIX. Allow for the few exceptions to the general rule here. */ #if !defined(HAVE_GETCWD) && defined(HAVE_GETWD) #define getcwd(buf,len) getwd(buf) #endif #ifdef MAXPATHLEN #define GUESSPATHLEN (MAXPATHLEN + 1) #else #define GUESSPATHLEN 100 #endif #if !(defined (VMS) || (defined(_WIN32) && !defined(__CYGWIN__))) /* Get the working directory. Use the PWD environment variable if it's set correctly, since this is faster and gives more uniform answers to the user. Yield the working directory if successful; otherwise, yield 0 and set errno. */ char * getpwd () { static char *pwd; static int failure_errno; char *p = pwd; size_t s; struct stat dotstat, pwdstat; if (!p && !(errno = failure_errno)) { if (! ((p = getenv ("PWD")) != 0 && *p == '/' && stat (p, &pwdstat) == 0 && stat (".", &dotstat) == 0 && dotstat.st_ino == pwdstat.st_ino && dotstat.st_dev == pwdstat.st_dev)) /* The shortcut didn't work. Try the slow, ``sure'' way. */ for (s = GUESSPATHLEN; ! getcwd (p = xmalloc (s), s); s *= 2) { int e = errno; free (p); #ifdef ERANGE if (e != ERANGE) #endif { errno = failure_errno = e; p = 0; break; } } /* Cache the result. This assumes that the program does not invoke chdir between calls to getpwd. */ pwd = p; } return p; } #else /* VMS || _WIN32 && !__CYGWIN__ */ #ifndef MAXPATHLEN #define MAXPATHLEN 255 #endif char * getpwd () { static char *pwd = 0; if (!pwd) pwd = getcwd (xmalloc (MAXPATHLEN + 1), MAXPATHLEN + 1 #ifdef VMS , 0 #endif ); return pwd; } #endif /* VMS || _WIN32 && !__CYGWIN__ */ /* Return time used so far, in microseconds. Copyright (C) 1994, 1999, 2002 Free Software Foundation, Inc. This file is part of the libiberty library. Libiberty is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Libiberty is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with libiberty; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* On some systems (such as WindISS), you must include to get the definition of "time_t" before you include . */ #include /* There are several ways to get elapsed execution time; unfortunately no single way is available for all host systems, nor are there reliable ways to find out which way is correct for a given host. */ #ifdef TIME_WITH_SYS_TIME # include # include #else # if HAVE_SYS_TIME_H # include # else # ifdef HAVE_TIME_H # include # endif # endif #endif #if defined (HAVE_GETRUSAGE) && defined (HAVE_SYS_RESOURCE_H) #include #endif #ifdef HAVE_TIMES #ifdef HAVE_SYS_PARAM_H #include #endif #include #endif #ifdef HAVE_UNISTD_H #include #endif /* This is a fallback; if wrong, it will likely make obviously wrong results. */ #ifndef CLOCKS_PER_SEC #define CLOCKS_PER_SEC 1 #endif #ifdef _SC_CLK_TCK #define GNU_HZ sysconf(_SC_CLK_TCK) #else #ifdef HZ #define GNU_HZ HZ #else #ifdef CLOCKS_PER_SEC #define GNU_HZ CLOCKS_PER_SEC #endif #endif #endif /* @deftypefn Replacement long get_run_time (void) Returns the time used so far, in microseconds. If possible, this is the time used by this process, else it is the elapsed time since the process started. @end deftypefn */ long get_run_time () { #if defined (HAVE_GETRUSAGE) && defined (HAVE_SYS_RESOURCE_H) struct rusage rusage; getrusage (0, &rusage); return (rusage.ru_utime.tv_sec * 1000000 + rusage.ru_utime.tv_usec + rusage.ru_stime.tv_sec * 1000000 + rusage.ru_stime.tv_usec); #else /* ! HAVE_GETRUSAGE */ #ifdef HAVE_TIMES struct tms tms; times (&tms); return (tms.tms_utime + tms.tms_stime) * (1000000 / GNU_HZ); #else /* ! HAVE_TIMES */ /* Fall back on clock and hope it's correctly implemented. */ const long clocks_per_sec = CLOCKS_PER_SEC; if (clocks_per_sec <= 1000000) return clock () * (1000000 / clocks_per_sec); else return clock () / clocks_per_sec; #endif /* HAVE_TIMES */ #endif /* HAVE_GETRUSAGE */ } /* An expandable hash tables datatype. Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Vladimir Makarov (vmakarov@cygnus.com). This file is part of the libiberty library. Libiberty is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Libiberty is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with libiberty; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This package implements basic hash table functionality. It is possible to search for an entry, create an entry and destroy an entry. Elements in the table are generic pointers. The size of the table is not fixed; if the occupancy of the table grows too high the hash table will be expanded. The abstract data implementation is based on generalized Algorithm D from Knuth's book "The art of computer programming". Hash table is expanded by creation of new hash table and transferring elements from the old table to the new table. */ #ifdef HAVE_CONFIG_H #endif #include #ifdef HAVE_STDLIB_H #include #endif #ifdef HAVE_STRING_H #include #endif #ifdef HAVE_MALLOC_H #include #endif #ifdef HAVE_LIMITS_H #include #endif #ifdef HAVE_STDINT_H #include #endif #include /* An expandable hash tables datatype. Copyright (C) 1999, 2000, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Vladimir Makarov (vmakarov@cygnus.com). This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This package implements basic hash table functionality. It is possible to search for an entry, create an entry and destroy an entry. Elements in the table are generic pointers. The size of the table is not fixed; if the occupancy of the table grows too high the hash table will be expanded. The abstract data implementation is based on generalized Algorithm D from Knuth's book "The art of computer programming". Hash table is expanded by creation of new hash table and transferring elements from the old table to the new table. */ #ifndef __HASHTAB_H__ #define __HASHTAB_H__ #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ #ifndef GTY #define GTY(X) #endif /* The type for a hash code. */ typedef unsigned int hashval_t; /* Callback function pointer types. */ /* Calculate hash of a table entry. */ typedef hashval_t (*htab_hash) PARAMS ((const void *)); /* Compare a table entry with a possible entry. The entry already in the table always comes first, so the second element can be of a different type (but in this case htab_find and htab_find_slot cannot be used; instead the variants that accept a hash value must be used). */ typedef int (*htab_eq) PARAMS ((const void *, const void *)); /* Cleanup function called whenever a live element is removed from the hash table. */ typedef void (*htab_del) PARAMS ((void *)); /* Function called by htab_traverse for each live element. The first arg is the slot of the element (which can be passed to htab_clear_slot if desired), the second arg is the auxiliary pointer handed to htab_traverse. Return 1 to continue scan, 0 to stop. */ typedef int (*htab_trav) PARAMS ((void **, void *)); /* Memory-allocation function, with the same functionality as calloc(). Iff it returns NULL, the hash table implementation will pass an error code back to the user, so if your code doesn't handle errors, best if you use xcalloc instead. */ typedef PTR (*htab_alloc) PARAMS ((size_t, size_t)); /* We also need a free() routine. */ typedef void (*htab_free) PARAMS ((PTR)); /* Memory allocation and deallocation; variants which take an extra argument. */ typedef PTR (*htab_alloc_with_arg) PARAMS ((void *, size_t, size_t)); typedef void (*htab_free_with_arg) PARAMS ((void *, void *)); /* Hash tables are of the following type. The structure (implementation) of this type is not needed for using the hash tables. All work with hash table should be executed only through functions mentioned below. The size of this structure is subject to change. */ struct htab GTY(()) { /* Pointer to hash function. */ htab_hash hash_f; /* Pointer to comparison function. */ htab_eq eq_f; /* Pointer to cleanup function. */ htab_del del_f; /* Table itself. */ PTR * GTY ((use_param, length ("%h.size"))) entries; /* Current size (in entries) of the hash table. */ size_t size; /* Current number of elements including also deleted elements. */ size_t n_elements; /* Current number of deleted elements in the table. */ size_t n_deleted; /* The following member is used for debugging. Its value is number of all calls of `htab_find_slot' for the hash table. */ unsigned int searches; /* The following member is used for debugging. Its value is number of collisions fixed for time of work with the hash table. */ unsigned int collisions; /* Pointers to allocate/free functions. */ htab_alloc alloc_f; htab_free free_f; /* Alternate allocate/free functions, which take an extra argument. */ PTR GTY((skip)) alloc_arg; htab_alloc_with_arg alloc_with_arg_f; htab_free_with_arg free_with_arg_f; /* Current size (in entries) of the hash table, as an index into the table of primes. */ unsigned int size_prime_index; }; typedef struct htab *htab_t; /* An enum saying whether we insert into the hash table or not. */ enum insert_option {NO_INSERT, INSERT}; /* The prototypes of the package functions. */ extern htab_t htab_create_alloc PARAMS ((size_t, htab_hash, htab_eq, htab_del, htab_alloc, htab_free)); extern htab_t htab_create_alloc_ex PARAMS ((size_t, htab_hash, htab_eq, htab_del, PTR, htab_alloc_with_arg, htab_free_with_arg)); /* Backward-compatibility functions. */ extern htab_t htab_create PARAMS ((size_t, htab_hash, htab_eq, htab_del)); extern htab_t htab_try_create PARAMS ((size_t, htab_hash, htab_eq, htab_del)); extern void htab_set_functions_ex PARAMS ((htab_t, htab_hash, htab_eq, htab_del, PTR, htab_alloc_with_arg, htab_free_with_arg)); extern void htab_delete PARAMS ((htab_t)); extern void htab_empty PARAMS ((htab_t)); extern PTR htab_find PARAMS ((htab_t, const void *)); extern PTR *htab_find_slot PARAMS ((htab_t, const void *, enum insert_option)); extern PTR htab_find_with_hash PARAMS ((htab_t, const void *, hashval_t)); extern PTR *htab_find_slot_with_hash PARAMS ((htab_t, const void *, hashval_t, enum insert_option)); extern void htab_clear_slot PARAMS ((htab_t, void **)); extern void htab_remove_elt PARAMS ((htab_t, void *)); extern void htab_remove_elt_with_hash PARAMS ((htab_t, void *, hashval_t)); extern void htab_traverse PARAMS ((htab_t, htab_trav, void *)); extern void htab_traverse_noresize PARAMS ((htab_t, htab_trav, void *)); extern size_t htab_size PARAMS ((htab_t)); extern size_t htab_elements PARAMS ((htab_t)); extern double htab_collisions PARAMS ((htab_t)); /* A hash function for pointers. */ extern htab_hash htab_hash_pointer; /* An equality function for pointers. */ extern htab_eq htab_eq_pointer; /* A hash function for null-terminated strings. */ extern hashval_t htab_hash_string PARAMS ((const PTR)); /* An iterative hash function for arbitrary data. */ extern hashval_t iterative_hash PARAMS ((const PTR, size_t, hashval_t)); /* Shorthand for hashing something with an intrinsic size. */ #define iterative_hash_object(OB,INIT) iterative_hash (&OB, sizeof (OB), INIT) #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* __HASHTAB_H */ #ifndef CHAR_BIT #define CHAR_BIT 8 #endif /* This macro defines reserved value for empty table entry. */ #define EMPTY_ENTRY ((PTR) 0) /* This macro defines reserved value for table entry which contained a deleted element. */ #define DELETED_ENTRY ((PTR) 1) static unsigned int higher_prime_index PARAMS ((unsigned long)); static hashval_t htab_mod_1 PARAMS ((hashval_t, hashval_t, hashval_t, int)); static hashval_t htab_mod PARAMS ((hashval_t, htab_t)); static hashval_t htab_mod_m2 PARAMS ((hashval_t, htab_t)); static hashval_t hash_pointer PARAMS ((const void *)); static int eq_pointer PARAMS ((const void *, const void *)); static int htab_expand PARAMS ((htab_t)); static PTR *find_empty_slot_for_expand PARAMS ((htab_t, hashval_t)); /* At some point, we could make these be NULL, and modify the hash-table routines to handle NULL specially; that would avoid function-call overhead for the common case of hashing pointers. */ htab_hash htab_hash_pointer = hash_pointer; htab_eq htab_eq_pointer = eq_pointer; /* Table of primes and multiplicative inverses. Note that these are not minimally reduced inverses. Unlike when generating code to divide by a constant, we want to be able to use the same algorithm all the time. All of these inverses (are implied to) have bit 32 set. For the record, here's the function that computed the table; it's a vastly simplified version of the function of the same name from gcc. */ #if 0 unsigned int ceil_log2 (unsigned int x) { int i; for (i = 31; i >= 0 ; --i) if (x > (1u << i)) return i+1; abort (); } unsigned int choose_multiplier (unsigned int d, unsigned int *mlp, unsigned char *shiftp) { unsigned long long mhigh; double nx; int lgup, post_shift; int pow, pow2; int n = 32, precision = 32; lgup = ceil_log2 (d); pow = n + lgup; pow2 = n + lgup - precision; nx = ldexp (1.0, pow) + ldexp (1.0, pow2); mhigh = nx / d; *shiftp = lgup - 1; *mlp = mhigh; return mhigh >> 32; } #endif struct prime_ent { hashval_t prime; hashval_t inv; hashval_t inv_m2; /* inverse of prime-2 */ hashval_t shift; }; static struct prime_ent const prime_tab[] = { { 7, 0x24924925, 0x9999999b, 2 }, { 13, 0x3b13b13c, 0x745d1747, 3 }, { 31, 0x08421085, 0x1a7b9612, 4 }, { 61, 0x0c9714fc, 0x15b1e5f8, 5 }, { 127, 0x02040811, 0x0624dd30, 6 }, { 251, 0x05197f7e, 0x073260a5, 7 }, { 509, 0x01824366, 0x02864fc8, 8 }, { 1021, 0x00c0906d, 0x014191f7, 9 }, { 2039, 0x0121456f, 0x0161e69e, 10 }, { 4093, 0x00300902, 0x00501908, 11 }, { 8191, 0x00080041, 0x00180241, 12 }, { 16381, 0x000c0091, 0x00140191, 13 }, { 32749, 0x002605a5, 0x002a06e6, 14 }, { 65521, 0x000f00e2, 0x00110122, 15 }, { 131071, 0x00008001, 0x00018003, 16 }, { 262139, 0x00014002, 0x0001c004, 17 }, { 524287, 0x00002001, 0x00006001, 18 }, { 1048573, 0x00003001, 0x00005001, 19 }, { 2097143, 0x00004801, 0x00005801, 20 }, { 4194301, 0x00000c01, 0x00001401, 21 }, { 8388593, 0x00001e01, 0x00002201, 22 }, { 16777213, 0x00000301, 0x00000501, 23 }, { 33554393, 0x00001381, 0x00001481, 24 }, { 67108859, 0x00000141, 0x000001c1, 25 }, { 134217689, 0x000004e1, 0x00000521, 26 }, { 268435399, 0x00000391, 0x000003b1, 27 }, { 536870909, 0x00000019, 0x00000029, 28 }, { 1073741789, 0x0000008d, 0x00000095, 29 }, { 2147483647, 0x00000003, 0x00000007, 30 }, /* Avoid "decimal constant so large it is unsigned" for 4294967291. */ { 0xfffffffb, 0x00000006, 0x00000008, 31 } }; /* The following function returns an index into the above table of the nearest prime number which is greater than N, and near a power of two. */ static unsigned int higher_prime_index (n) unsigned long n; { unsigned int low = 0; unsigned int high = sizeof(prime_tab) / sizeof(prime_tab[0]); while (low != high) { unsigned int mid = low + (high - low) / 2; if (n > prime_tab[mid].prime) low = mid + 1; else high = mid; } /* If we've run out of primes, abort. */ if (n > prime_tab[low].prime) { fprintf (stderr, "Cannot find prime bigger than %lu\n", n); abort (); } return low; } /* Returns a hash code for P. */ static hashval_t hash_pointer (p) const PTR p; { return (hashval_t) ((long)p >> 3); } /* Returns non-zero if P1 and P2 are equal. */ static int eq_pointer (p1, p2) const PTR p1; const PTR p2; { return p1 == p2; } /* Return the current size of given hash table. */ inline size_t htab_size (htab) htab_t htab; { return htab->size; } /* Return the current number of elements in given hash table. */ inline size_t htab_elements (htab) htab_t htab; { return htab->n_elements - htab->n_deleted; } /* Return X % Y. */ static inline hashval_t htab_mod_1 (x, y, inv, shift) hashval_t x, y, inv; int shift; { /* The multiplicative inverses computed above are for 32-bit types, and requires that we be able to compute a highpart multiply. */ #ifdef UNSIGNED_64BIT_TYPE __extension__ typedef UNSIGNED_64BIT_TYPE ull; if (sizeof (hashval_t) * CHAR_BIT <= 32) { hashval_t t1, t2, t3, t4, q, r; t1 = ((ull)x * inv) >> 32; t2 = x - t1; t3 = t2 >> 1; t4 = t1 + t3; q = t4 >> shift; r = x - (q * y); return r; } #endif /* Otherwise just use the native division routines. */ return x % y; } /* Compute the primary hash for HASH given HTAB's current size. */ static inline hashval_t htab_mod (hash, htab) hashval_t hash; htab_t htab; { const struct prime_ent *p = &prime_tab[htab->size_prime_index]; return htab_mod_1 (hash, p->prime, p->inv, p->shift); } /* Compute the secondary hash for HASH given HTAB's current size. */ static inline hashval_t htab_mod_m2 (hash, htab) hashval_t hash; htab_t htab; { const struct prime_ent *p = &prime_tab[htab->size_prime_index]; return 1 + htab_mod_1 (hash, p->prime - 2, p->inv_m2, p->shift); } /* This function creates table with length slightly longer than given source length. Created hash table is initiated as empty (all the hash table entries are EMPTY_ENTRY). The function returns the created hash table, or NULL if memory allocation fails. */ htab_t htab_create_alloc (size, hash_f, eq_f, del_f, alloc_f, free_f) size_t size; htab_hash hash_f; htab_eq eq_f; htab_del del_f; htab_alloc alloc_f; htab_free free_f; { htab_t result; unsigned int size_prime_index; size_prime_index = higher_prime_index (size); size = prime_tab[size_prime_index].prime; result = (htab_t) (*alloc_f) (1, sizeof (struct htab)); if (result == NULL) return NULL; result->entries = (PTR *) (*alloc_f) (size, sizeof (PTR)); if (result->entries == NULL) { if (free_f != NULL) (*free_f) (result); return NULL; } result->size = size; result->size_prime_index = size_prime_index; result->hash_f = hash_f; result->eq_f = eq_f; result->del_f = del_f; result->alloc_f = alloc_f; result->free_f = free_f; return result; } /* As above, but use the variants of alloc_f and free_f which accept an extra argument. */ htab_t htab_create_alloc_ex (size, hash_f, eq_f, del_f, alloc_arg, alloc_f, free_f) size_t size; htab_hash hash_f; htab_eq eq_f; htab_del del_f; PTR alloc_arg; htab_alloc_with_arg alloc_f; htab_free_with_arg free_f; { htab_t result; unsigned int size_prime_index; size_prime_index = higher_prime_index (size); size = prime_tab[size_prime_index].prime; result = (htab_t) (*alloc_f) (alloc_arg, 1, sizeof (struct htab)); if (result == NULL) return NULL; result->entries = (PTR *) (*alloc_f) (alloc_arg, size, sizeof (PTR)); if (result->entries == NULL) { if (free_f != NULL) (*free_f) (alloc_arg, result); return NULL; } result->size = size; result->size_prime_index = size_prime_index; result->hash_f = hash_f; result->eq_f = eq_f; result->del_f = del_f; result->alloc_arg = alloc_arg; result->alloc_with_arg_f = alloc_f; result->free_with_arg_f = free_f; return result; } /* Update the function pointers and allocation parameter in the htab_t. */ void htab_set_functions_ex (htab, hash_f, eq_f, del_f, alloc_arg, alloc_f, free_f) htab_t htab; htab_hash hash_f; htab_eq eq_f; htab_del del_f; PTR alloc_arg; htab_alloc_with_arg alloc_f; htab_free_with_arg free_f; { htab->hash_f = hash_f; htab->eq_f = eq_f; htab->del_f = del_f; htab->alloc_arg = alloc_arg; htab->alloc_with_arg_f = alloc_f; htab->free_with_arg_f = free_f; } /* These functions exist solely for backward compatibility. */ #undef htab_create htab_t htab_create (size, hash_f, eq_f, del_f) size_t size; htab_hash hash_f; htab_eq eq_f; htab_del del_f; { return htab_create_alloc (size, hash_f, eq_f, del_f, xcalloc, free); } htab_t htab_try_create (size, hash_f, eq_f, del_f) size_t size; htab_hash hash_f; htab_eq eq_f; htab_del del_f; { return htab_create_alloc (size, hash_f, eq_f, del_f, calloc, free); } /* This function frees all memory allocated for given hash table. Naturally the hash table must already exist. */ void htab_delete (htab) htab_t htab; { size_t size = htab_size (htab); PTR *entries = htab->entries; int i; if (htab->del_f) for (i = size - 1; i >= 0; i--) if (entries[i] != EMPTY_ENTRY && entries[i] != DELETED_ENTRY) (*htab->del_f) (entries[i]); if (htab->free_f != NULL) { (*htab->free_f) (entries); (*htab->free_f) (htab); } else if (htab->free_with_arg_f != NULL) { (*htab->free_with_arg_f) (htab->alloc_arg, entries); (*htab->free_with_arg_f) (htab->alloc_arg, htab); } } /* This function clears all entries in the given hash table. */ void htab_empty (htab) htab_t htab; { size_t size = htab_size (htab); PTR *entries = htab->entries; int i; if (htab->del_f) for (i = size - 1; i >= 0; i--) if (entries[i] != EMPTY_ENTRY && entries[i] != DELETED_ENTRY) (*htab->del_f) (entries[i]); memset (entries, 0, size * sizeof (PTR)); } /* Similar to htab_find_slot, but without several unwanted side effects: - Does not call htab->eq_f when it finds an existing entry. - Does not change the count of elements/searches/collisions in the hash table. This function also assumes there are no deleted entries in the table. HASH is the hash value for the element to be inserted. */ static PTR * find_empty_slot_for_expand (htab, hash) htab_t htab; hashval_t hash; { hashval_t index = htab_mod (hash, htab); size_t size = htab_size (htab); PTR *slot = htab->entries + index; hashval_t hash2; if (*slot == EMPTY_ENTRY) return slot; else if (*slot == DELETED_ENTRY) abort (); hash2 = htab_mod_m2 (hash, htab); for (;;) { index += hash2; if (index >= size) index -= size; slot = htab->entries + index; if (*slot == EMPTY_ENTRY) return slot; else if (*slot == DELETED_ENTRY) abort (); } } /* The following function changes size of memory allocated for the entries and repeatedly inserts the table elements. The occupancy of the table after the call will be about 50%. Naturally the hash table must already exist. Remember also that the place of the table entries is changed. If memory allocation failures are allowed, this function will return zero, indicating that the table could not be expanded. If all goes well, it will return a non-zero value. */ static int htab_expand (htab) htab_t htab; { PTR *oentries; PTR *olimit; PTR *p; PTR *nentries; size_t nsize, osize, elts; unsigned int oindex, nindex; oentries = htab->entries; oindex = htab->size_prime_index; osize = htab->size; olimit = oentries + osize; elts = htab_elements (htab); /* Resize only when table after removal of unused elements is either too full or too empty. */ if (elts * 2 > osize || (elts * 8 < osize && osize > 32)) { nindex = higher_prime_index (elts * 2); nsize = prime_tab[nindex].prime; } else { nindex = oindex; nsize = osize; } if (htab->alloc_with_arg_f != NULL) nentries = (PTR *) (*htab->alloc_with_arg_f) (htab->alloc_arg, nsize, sizeof (PTR *)); else nentries = (PTR *) (*htab->alloc_f) (nsize, sizeof (PTR *)); if (nentries == NULL) return 0; htab->entries = nentries; htab->size = nsize; htab->size_prime_index = nindex; htab->n_elements -= htab->n_deleted; htab->n_deleted = 0; p = oentries; do { PTR x = *p; if (x != EMPTY_ENTRY && x != DELETED_ENTRY) { PTR *q = find_empty_slot_for_expand (htab, (*htab->hash_f) (x)); *q = x; } p++; } while (p < olimit); if (htab->free_f != NULL) (*htab->free_f) (oentries); else if (htab->free_with_arg_f != NULL) (*htab->free_with_arg_f) (htab->alloc_arg, oentries); return 1; } /* This function searches for a hash table entry equal to the given element. It cannot be used to insert or delete an element. */ PTR htab_find_with_hash (htab, element, hash) htab_t htab; const PTR element; hashval_t hash; { hashval_t index, hash2; size_t size; PTR entry; htab->searches++; size = htab_size (htab); index = htab_mod (hash, htab); entry = htab->entries[index]; if (entry == EMPTY_ENTRY || (entry != DELETED_ENTRY && (*htab->eq_f) (entry, element))) return entry; hash2 = htab_mod_m2 (hash, htab); for (;;) { htab->collisions++; index += hash2; if (index >= size) index -= size; entry = htab->entries[index]; if (entry == EMPTY_ENTRY || (entry != DELETED_ENTRY && (*htab->eq_f) (entry, element))) return entry; } } /* Like htab_find_slot_with_hash, but compute the hash value from the element. */ PTR htab_find (htab, element) htab_t htab; const PTR element; { return htab_find_with_hash (htab, element, (*htab->hash_f) (element)); } /* This function searches for a hash table slot containing an entry equal to the given element. To delete an entry, call this with INSERT = 0, then call htab_clear_slot on the slot returned (possibly after doing some checks). To insert an entry, call this with INSERT = 1, then write the value you want into the returned slot. When inserting an entry, NULL may be returned if memory allocation fails. */ PTR * htab_find_slot_with_hash (htab, element, hash, insert) htab_t htab; const PTR element; hashval_t hash; enum insert_option insert; { PTR *first_deleted_slot; hashval_t index, hash2; size_t size; PTR entry; size = htab_size (htab); if (insert == INSERT && size * 3 <= htab->n_elements * 4) { if (htab_expand (htab) == 0) return NULL; size = htab_size (htab); } index = htab_mod (hash, htab); htab->searches++; first_deleted_slot = NULL; entry = htab->entries[index]; if (entry == EMPTY_ENTRY) goto empty_entry; else if (entry == DELETED_ENTRY) first_deleted_slot = &htab->entries[index]; else if ((*htab->eq_f) (entry, element)) return &htab->entries[index]; hash2 = htab_mod_m2 (hash, htab); for (;;) { htab->collisions++; index += hash2; if (index >= size) index -= size; entry = htab->entries[index]; if (entry == EMPTY_ENTRY) goto empty_entry; else if (entry == DELETED_ENTRY) { if (!first_deleted_slot) first_deleted_slot = &htab->entries[index]; } else if ((*htab->eq_f) (entry, element)) return &htab->entries[index]; } empty_entry: if (insert == NO_INSERT) return NULL; if (first_deleted_slot) { htab->n_deleted--; *first_deleted_slot = EMPTY_ENTRY; return first_deleted_slot; } htab->n_elements++; return &htab->entries[index]; } /* Like htab_find_slot_with_hash, but compute the hash value from the element. */ PTR * htab_find_slot (htab, element, insert) htab_t htab; const PTR element; enum insert_option insert; { return htab_find_slot_with_hash (htab, element, (*htab->hash_f) (element), insert); } /* This function deletes an element with the given value from hash table (the hash is computed from the element). If there is no matching element in the hash table, this function does nothing. */ void htab_remove_elt (htab, element) htab_t htab; PTR element; { htab_remove_elt_with_hash (htab, element, (*htab->hash_f) (element)); } /* This function deletes an element with the given value from hash table. If there is no matching element in the hash table, this function does nothing. */ void htab_remove_elt_with_hash (htab, element, hash) htab_t htab; PTR element; hashval_t hash; { PTR *slot; slot = htab_find_slot_with_hash (htab, element, hash, NO_INSERT); if (*slot == EMPTY_ENTRY) return; if (htab->del_f) (*htab->del_f) (*slot); *slot = DELETED_ENTRY; htab->n_deleted++; } /* This function clears a specified slot in a hash table. It is useful when you've already done the lookup and don't want to do it again. */ void htab_clear_slot (htab, slot) htab_t htab; PTR *slot; { if (slot < htab->entries || slot >= htab->entries + htab_size (htab) || *slot == EMPTY_ENTRY || *slot == DELETED_ENTRY) abort (); if (htab->del_f) (*htab->del_f) (*slot); *slot = DELETED_ENTRY; htab->n_deleted++; } /* This function scans over the entire hash table calling CALLBACK for each live entry. If CALLBACK returns false, the iteration stops. INFO is passed as CALLBACK's second argument. */ void htab_traverse_noresize (htab, callback, info) htab_t htab; htab_trav callback; PTR info; { PTR *slot; PTR *limit; slot = htab->entries; limit = slot + htab_size (htab); do { PTR x = *slot; if (x != EMPTY_ENTRY && x != DELETED_ENTRY) if (!(*callback) (slot, info)) break; } while (++slot < limit); } /* Like htab_traverse_noresize, but does resize the table when it is too empty to improve effectivity of subsequent calls. */ void htab_traverse (htab, callback, info) htab_t htab; htab_trav callback; PTR info; { if (htab_elements (htab) * 8 < htab_size (htab)) htab_expand (htab); htab_traverse_noresize (htab, callback, info); } /* Return the fraction of fixed collisions during all work with given hash table. */ double htab_collisions (htab) htab_t htab; { if (htab->searches == 0) return 0.0; return (double) htab->collisions / (double) htab->searches; } /* Hash P as a null-terminated string. Copied from gcc/hashtable.c. Zack had the following to say with respect to applicability, though note that unlike hashtable.c, this hash table implementation re-hashes rather than chain buckets. http://gcc.gnu.org/ml/gcc-patches/2001-08/msg01021.html From: Zack Weinberg Date: Fri, 17 Aug 2001 02:15:56 -0400 I got it by extracting all the identifiers from all the source code I had lying around in mid-1999, and testing many recurrences of the form "H_n = H_{n-1} * K + c_n * L + M" where K, L, M were either prime numbers or the appropriate identity. This was the best one. I don't remember exactly what constituted "best", except I was looking at bucket-length distributions mostly. So it should be very good at hashing identifiers, but might not be as good at arbitrary strings. I'll add that it thoroughly trounces the hash functions recommended for this use at http://burtleburtle.net/bob/hash/index.html, both on speed and bucket distribution. I haven't tried it against the function they just started using for Perl's hashes. */ hashval_t htab_hash_string (p) const PTR p; { const unsigned char *str = (const unsigned char *) p; hashval_t r = 0; unsigned char c; while ((c = *str++) != 0) r = r * 67 + c - 113; return r; } /* DERIVED FROM: -------------------------------------------------------------------- lookup2.c, by Bob Jenkins, December 1996, Public Domain. hash(), hash2(), hash3, and mix() are externally useful functions. Routines to test the hash are included if SELF_TEST is defined. You can use this free for any purpose. It has no warranty. -------------------------------------------------------------------- */ /* -------------------------------------------------------------------- mix -- mix 3 32-bit values reversibly. For every delta with one or two bit set, and the deltas of all three high bits or all three low bits, whether the original value of a,b,c is almost all zero or is uniformly distributed, * If mix() is run forward or backward, at least 32 bits in a,b,c have at least 1/4 probability of changing. * If mix() is run forward, every bit of c will change between 1/3 and 2/3 of the time. (Well, 22/100 and 78/100 for some 2-bit deltas.) mix() was built out of 36 single-cycle latency instructions in a structure that could supported 2x parallelism, like so: a -= b; a -= c; x = (c>>13); b -= c; a ^= x; b -= a; x = (a<<8); c -= a; b ^= x; c -= b; x = (b>>13); ... Unfortunately, superscalar Pentiums and Sparcs can't take advantage of that parallelism. They've also turned some of those single-cycle latency instructions into multi-cycle latency instructions. Still, this is the fastest good hash I could find. There were about 2^^68 to choose from. I only looked at a billion or so. -------------------------------------------------------------------- */ /* same, but slower, works on systems that might have 8 byte hashval_t's */ #define mix(a,b,c) \ { \ a -= b; a -= c; a ^= (c>>13); \ b -= c; b -= a; b ^= (a<< 8); \ c -= a; c -= b; c ^= ((b&0xffffffff)>>13); \ a -= b; a -= c; a ^= ((c&0xffffffff)>>12); \ b -= c; b -= a; b = (b ^ (a<<16)) & 0xffffffff; \ c -= a; c -= b; c = (c ^ (b>> 5)) & 0xffffffff; \ a -= b; a -= c; a = (a ^ (c>> 3)) & 0xffffffff; \ b -= c; b -= a; b = (b ^ (a<<10)) & 0xffffffff; \ c -= a; c -= b; c = (c ^ (b>>15)) & 0xffffffff; \ } /* -------------------------------------------------------------------- hash() -- hash a variable-length key into a 32-bit value k : the key (the unaligned variable-length array of bytes) len : the length of the key, counting by bytes level : can be any 4-byte value Returns a 32-bit value. Every bit of the key affects every bit of the return value. Every 1-bit and 2-bit delta achieves avalanche. About 36+6len instructions. The best hash table sizes are powers of 2. There is no need to do mod a prime (mod is sooo slow!). If you need less than 32 bits, use a bitmask. For example, if you need only 10 bits, do h = (h & hashmask(10)); In which case, the hash table should have hashsize(10) elements. If you are hashing n strings (ub1 **)k, do it like this: for (i=0, h=0; i= 12) /* aligned */ { a += *(hashval_t *)(k+0); b += *(hashval_t *)(k+4); c += *(hashval_t *)(k+8); mix(a,b,c); k += 12; len -= 12; } else /* unaligned */ #endif while (len >= 12) { a += (k[0] +((hashval_t)k[1]<<8) +((hashval_t)k[2]<<16) +((hashval_t)k[3]<<24)); b += (k[4] +((hashval_t)k[5]<<8) +((hashval_t)k[6]<<16) +((hashval_t)k[7]<<24)); c += (k[8] +((hashval_t)k[9]<<8) +((hashval_t)k[10]<<16)+((hashval_t)k[11]<<24)); mix(a,b,c); k += 12; len -= 12; } /*------------------------------------- handle the last 11 bytes */ c += length; switch(len) /* all the case statements fall through */ { case 11: c+=((hashval_t)k[10]<<24); case 10: c+=((hashval_t)k[9]<<16); case 9 : c+=((hashval_t)k[8]<<8); /* the first byte of c is reserved for the length */ case 8 : b+=((hashval_t)k[7]<<24); case 7 : b+=((hashval_t)k[6]<<16); case 6 : b+=((hashval_t)k[5]<<8); case 5 : b+=k[4]; case 4 : a+=((hashval_t)k[3]<<24); case 3 : a+=((hashval_t)k[2]<<16); case 2 : a+=((hashval_t)k[1]<<8); case 1 : a+=k[0]; /* case 0: nothing left to add */ } mix(a,b,c); /*-------------------------------------------- report the result */ return c; } /* Hex character manipulation support. Copyright (C) 1995, 2001 Free Software Foundation, Inc. This file is part of the libiberty library. Libiberty is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Libiberty is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with libiberty; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include /* for EOF */ #if EOF != -1 #error "hex.c requires EOF == -1" #endif /* @deftypefn Extension void hex_init (void) Initializes the array mapping the current character set to corresponding hex values. This function must be called before any call to @code{hex_p} or @code{hex_value}. If you fail to call it, a default ASCII-based table will normally be used on ASCII systems. @end deftypefn @deftypefn Extension int hex_p (int @var{c}) Evaluates to non-zero if the given character is a valid hex character, or zero if it is not. Note that the value you pass will be cast to @code{unsigned char} within the macro. @end deftypefn @deftypefn Extension unsigned int hex_value (int @var{c}) Returns the numeric equivalent of the given character when interpreted as a hexidecimal digit. The result is undefined if you pass an invalid hex digit. Note that the value you pass will be cast to @code{unsigned char} within the macro. The @code{hex_value} macro returns @code{unsigned int}, rather than signed @code{int}, to make it easier to use in parsing addresses from hex dump files: a signed @code{int} would be sign-extended when converted to a wider unsigned type --- like @code{bfd_vma}, on some systems. @end deftypefn @undocumented _hex_array_size @undocumented _hex_bad @undocumented _hex_value */ /* Are we ASCII? */ #if HOST_CHARSET == HOST_CHARSET_ASCII const unsigned char _hex_value[_hex_array_size] = { _hex_bad, _hex_bad, _hex_bad, _hex_bad, /* NUL SOH STX ETX */ _hex_bad, _hex_bad, _hex_bad, _hex_bad, /* EOT ENQ ACK BEL */ _hex_bad, _hex_bad, _hex_bad, _hex_bad, /* BS HT LF VT */ _hex_bad, _hex_bad, _hex_bad, _hex_bad, /* FF CR SO SI */ _hex_bad, _hex_bad, _hex_bad, _hex_bad, /* DLE DC1 DC2 DC3 */ _hex_bad, _hex_bad, _hex_bad, _hex_bad, /* DC4 NAK SYN ETB */ _hex_bad, _hex_bad, _hex_bad, _hex_bad, /* CAN EM SUB ESC */ _hex_bad, _hex_bad, _hex_bad, _hex_bad, /* FS GS RS US */ _hex_bad, _hex_bad, _hex_bad, _hex_bad, /* SP ! " # */ _hex_bad, _hex_bad, _hex_bad, _hex_bad, /* $ % & ' */ _hex_bad, _hex_bad, _hex_bad, _hex_bad, /* ( ) * + */ _hex_bad, _hex_bad, _hex_bad, _hex_bad, /* , - . / */ 0, 1, 2, 3, /* 0 1 2 3 */ 4, 5, 6, 7, /* 4 5 6 7 */ 8, 9, _hex_bad, _hex_bad, /* 8 9 : ; */ _hex_bad, _hex_bad, _hex_bad, _hex_bad, /* < = > ? */ _hex_bad, 10, 11, 12, /* @ A B C */ 13, 14, 15, _hex_bad, /* D E F G */ _hex_bad, _hex_bad, _hex_bad, _hex_bad, /* H I J K */ _hex_bad, _hex_bad, _hex_bad, _hex_bad, /* L M N O */ _hex_bad, _hex_bad, _hex_bad, _hex_bad, /* P Q R S */ _hex_bad, _hex_bad, _hex_bad, _hex_bad, /* T U V W */ _hex_bad, _hex_bad, _hex_bad, _hex_bad, /* X Y Z [ */ _hex_bad, _hex_bad, _hex_bad, _hex_bad, /* \ ] ^ _ */ _hex_bad, 10, 11, 12, /* ` a b c */ 13, 14, 15, _hex_bad, /* d e f g */ _hex_bad, _hex_bad, _hex_bad, _hex_bad, /* h i j k */ _hex_bad, _hex_bad, _hex_bad, _hex_bad, /* l m n o */ _hex_bad, _hex_bad, _hex_bad, _hex_bad, /* p q r s */ _hex_bad, _hex_bad, _hex_bad, _hex_bad, /* t u v w */ _hex_bad, _hex_bad, _hex_bad, _hex_bad, /* x y z { */ _hex_bad, _hex_bad, _hex_bad, _hex_bad, /* | } ~ DEL */ /* The high half of unsigned char, all values are _hex_bad. */ _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, _hex_bad, }; #define HEX_TABLE_INITIALIZED #else unsigned char _hex_value[_hex_array_size]; #endif /* not ASCII */ void hex_init () { #ifndef HEX_TABLE_INITIALIZED int i; for (i=0; i<_hex_array_size; i++) { switch (i) { case '0': _hex_value[i] = 0; break; case '1': _hex_value[i] = 1; break; case '2': _hex_value[i] = 2; break; case '3': _hex_value[i] = 3; break; case '4': _hex_value[i] = 4; break; case '5': _hex_value[i] = 5; break; case '6': _hex_value[i] = 6; break; case '7': _hex_value[i] = 7; break; case '8': _hex_value[i] = 8; break; case '9': _hex_value[i] = 9; break; case 'a': case 'A': _hex_value[i] = 10; break; case 'b': case 'B': _hex_value[i] = 11; break; case 'c': case 'C': _hex_value[i] = 12; break; case 'd': case 'D': _hex_value[i] = 13; break; case 'e': case 'E': _hex_value[i] = 14; break; case 'f': case 'F': _hex_value[i] = 15; break; default: _hex_value[i] = _hex_bad; break; } } #endif } /* Libiberty basename. Like basename, but is not overridden by the system C library. Copyright (C) 2001, 2002 Free Software Foundation, Inc. This file is part of the libiberty library. Libiberty is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Libiberty is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with libiberty; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* @deftypefn Replacement {const char*} lbasename (const char *@var{name}) Given a pointer to a string containing a typical pathname (@samp{/usr/src/cmd/ls/ls.c} for example), returns a pointer to the last component of the pathname (@samp{ls.c} in this case). The returned pointer is guaranteed to lie within the original string. This latter fact is not true of many vendor C libraries, which return special strings or modify the passed strings for particular input. In particular, the empty string returns the same empty string, and a path ending in @code{/} returns the empty string after it. @end deftypefn */ /* Macros for taking apart, interpreting and processing file names. These are here because some non-Posix (a.k.a. DOSish) systems have drive letter brain-damage at the beginning of an absolute file name, use forward- and back-slash in path names interchangeably, and some of them have case-insensitive file names. Copyright 2000, 2001 Free Software Foundation, Inc. This file is part of BFD, the Binary File Descriptor library. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef FILENAMES_H #define FILENAMES_H #if defined(__MSDOS__) || defined(_WIN32) || defined(__OS2__) || defined (__CYGWIN__) #ifndef HAVE_DOS_BASED_FILE_SYSTEM #define HAVE_DOS_BASED_FILE_SYSTEM 1 #endif #define IS_DIR_SEPARATOR(c) ((c) == '/' || (c) == '\\') /* Note that IS_ABSOLUTE_PATH accepts d:foo as well, although it is only semi-absolute. This is because the users of IS_ABSOLUTE_PATH want to know whether to prepend the current working directory to a file name, which should not be done with a name like d:foo. */ #define IS_ABSOLUTE_PATH(f) (IS_DIR_SEPARATOR((f)[0]) || (((f)[0]) && ((f)[1] == ':'))) #define FILENAME_CMP(s1, s2) strcasecmp(s1, s2) #else /* not DOSish */ #define IS_DIR_SEPARATOR(c) ((c) == '/') #define IS_ABSOLUTE_PATH(f) (IS_DIR_SEPARATOR((f)[0])) #define FILENAME_CMP(s1, s2) strcmp(s1, s2) #endif /* not DOSish */ #endif /* FILENAMES_H */ const char * lbasename (name) const char *name; { const char *base; #if defined (HAVE_DOS_BASED_FILE_SYSTEM) /* Skip over a possible disk name. */ if (ISALPHA (name[0]) && name[1] == ':') name += 2; #endif for (base = name; *name; name++) if (IS_DIR_SEPARATOR (*name)) base = name + 1; return base; } /* Libiberty realpath. Like realpath, but more consistent behavior. Based on gdb_realpath from GDB. Copyright 2003 Free Software Foundation, Inc. This file is part of the libiberty library. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* @deftypefn Replacement {const char*} lrealpath (const char *@var{name}) Given a pointer to a string containing a pathname, returns a canonical version of the filename. Symlinks will be resolved, and ``.'' and ``..'' components will be simplified. The returned value will be allocated using @code{malloc}, or @code{NULL} will be returned on a memory allocation error. @end deftypefn */ #ifdef HAVE_LIMITS_H #include #endif #ifdef HAVE_STDLIB_H #include #endif #ifdef HAVE_UNISTD_H #include #endif #ifdef HAVE_STRING_H #include #endif /* On GNU libc systems the declaration is only visible with _GNU_SOURCE. */ #if defined(HAVE_CANONICALIZE_FILE_NAME) \ && defined(NEED_DECLARATION_CANONICALIZE_FILE_NAME) extern char *canonicalize_file_name (const char *); #endif #if defined(HAVE_REALPATH) # if defined (PATH_MAX) # define REALPATH_LIMIT PATH_MAX # else # if defined (MAXPATHLEN) # define REALPATH_LIMIT MAXPATHLEN # endif # endif #else /* cygwin has realpath, so it won't get here. */ #endif char * lrealpath (filename) const char *filename; { /* Method 1: The system has a compile time upper bound on a filename path. Use that and realpath() to canonicalize the name. This is the most common case. Note that, if there isn't a compile time upper bound, you want to avoid realpath() at all costs. */ #if defined(REALPATH_LIMIT) { char buf[REALPATH_LIMIT]; const char *rp = realpath (filename, buf); if (rp == NULL) rp = filename; return strdup (rp); } #endif /* REALPATH_LIMIT */ /* Method 2: The host system (i.e., GNU) has the function canonicalize_file_name() which malloc's a chunk of memory and returns that, use that. */ #if defined(HAVE_CANONICALIZE_FILE_NAME) { char *rp = canonicalize_file_name (filename); if (rp == NULL) return strdup (filename); else return rp; } #endif /* Method 3: Now we're getting desperate! The system doesn't have a compile time buffer size and no alternative function. Query the OS, using pathconf(), for the buffer limit. Care is needed though, some systems do not limit PATH_MAX (return -1 for pathconf()) making it impossible to pass a correctly sized buffer to realpath() (it could always overflow). On those systems, we skip this. */ #if defined (HAVE_REALPATH) && defined (HAVE_UNISTD_H) { /* Find out the max path size. */ long path_max = pathconf ("/", _PC_PATH_MAX); if (path_max > 0) { /* PATH_MAX is bounded. */ char *buf, *rp, *ret; buf = malloc (path_max); if (buf == NULL) return NULL; rp = realpath (filename, buf); ret = strdup (rp ? rp : filename); free (buf); return ret; } } #endif /* The MS Windows method. If we don't have realpath, we assume we don't have symlinks and just canonicalize to a Windows absolute path. GetFullPath converts ../ and ./ in relative paths to absolute paths, filling in current drive if one is not given or using the current directory of a specified drive (eg, "E:foo"). It also converts all forward slashes to back slashes. */ #if defined (_WIN32) { char buf[MAX_PATH]; char* basename; DWORD len = GetFullPathName (filename, MAX_PATH, buf, &basename); if (len == 0 || len > MAX_PATH - 1) return strdup (filename); else { /* The file system is case-preserving but case-insensitive, Canonicalize to lowercase, using the codepage associated with the process locale. */ CharLowerBuff (buf, len); return strdup (buf); } } #endif /* This system is a lost cause, just duplicate the filename. */ return strdup (filename); } /* Relative (relocatable) prefix support. Copyright (C) 1987, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002 Free Software Foundation, Inc. This file is part of libiberty. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* @deftypefn Extension {const char*} make_relative_prefix (const char *@var{progname}, const char *@var{bin_prefix}, const char *@var{prefix}) Given three paths @var{progname}, @var{bin_prefix}, @var{prefix}, return the path that is in the same position relative to @var{progname}'s directory as @var{prefix} is relative to @var{bin_prefix}. That is, a string starting with the directory portion of @var{progname}, followed by a relative pathname of the difference between @var{bin_prefix} and @var{prefix}. If @var{progname} does not contain any directory separators, @code{make_relative_prefix} will search @env{PATH} to find a program named @var{progname}. Also, if @var{progname} is a symbolic link, the symbolic link will be resolved. For example, if @var{bin_prefix} is @code{/alpha/beta/gamma/gcc/delta}, @var{prefix} is @code{/alpha/beta/gamma/omega/}, and @var{progname} is @code{/red/green/blue/gcc}, then this function will return @code{/red/green/blue/../../omega/}. The return value is normally allocated via @code{malloc}. If no relative prefix can be found, return @code{NULL}. @end deftypefn */ #ifdef HAVE_CONFIG_H #endif #ifdef HAVE_STDLIB_H #include #endif #ifdef HAVE_UNISTD_H #include #endif #include #ifndef R_OK #define R_OK 4 #define W_OK 2 #define X_OK 1 #endif #ifndef DIR_SEPARATOR # define DIR_SEPARATOR '/' #endif #if defined (_WIN32) || defined (__MSDOS__) \ || defined (__DJGPP__) || defined (__OS2__) # define HAVE_DOS_BASED_FILE_SYSTEM # define HAVE_HOST_EXECUTABLE_SUFFIX # define HOST_EXECUTABLE_SUFFIX ".exe" # ifndef DIR_SEPARATOR_2 # define DIR_SEPARATOR_2 '\\' # endif # define PATH_SEPARATOR ';' #else # define PATH_SEPARATOR ':' #endif #ifndef IS_DIR_SEPARATOR # ifndef DIR_SEPARATOR_2 # define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR) # else # define IS_DIR_SEPARATOR(ch) \ (((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2)) # endif #endif #define DIR_UP ".." static char *save_string PARAMS ((const char *, int)); static char **split_directories PARAMS ((const char *, int *)); static void free_split_directories PARAMS ((char **)); static char * save_string (s, len) const char *s; int len; { char *result = malloc (len + 1); memcpy (result, s, len); result[len] = 0; return result; } /* Split a filename into component directories. */ static char ** split_directories (name, ptr_num_dirs) const char *name; int *ptr_num_dirs; { int num_dirs = 0; char **dirs; const char *p, *q; int ch; /* Count the number of directories. Special case MSDOS disk names as part of the initial directory. */ p = name; #ifdef HAVE_DOS_BASED_FILE_SYSTEM if (name[1] == ':' && IS_DIR_SEPARATOR (name[2])) { p += 3; num_dirs++; } #endif /* HAVE_DOS_BASED_FILE_SYSTEM */ while ((ch = *p++) != '\0') { if (IS_DIR_SEPARATOR (ch)) { num_dirs++; while (IS_DIR_SEPARATOR (*p)) p++; } } dirs = (char **) malloc (sizeof (char *) * (num_dirs + 2)); if (dirs == NULL) return NULL; /* Now copy the directory parts. */ num_dirs = 0; p = name; #ifdef HAVE_DOS_BASED_FILE_SYSTEM if (name[1] == ':' && IS_DIR_SEPARATOR (name[2])) { dirs[num_dirs++] = save_string (p, 3); if (dirs[num_dirs - 1] == NULL) { free (dirs); return NULL; } p += 3; } #endif /* HAVE_DOS_BASED_FILE_SYSTEM */ q = p; while ((ch = *p++) != '\0') { if (IS_DIR_SEPARATOR (ch)) { while (IS_DIR_SEPARATOR (*p)) p++; dirs[num_dirs++] = save_string (q, p - q); if (dirs[num_dirs - 1] == NULL) { dirs[num_dirs] = NULL; free_split_directories (dirs); return NULL; } q = p; } } if (p - 1 - q > 0) dirs[num_dirs++] = save_string (q, p - 1 - q); dirs[num_dirs] = NULL; if (dirs[num_dirs - 1] == NULL) { free_split_directories (dirs); return NULL; } if (ptr_num_dirs) *ptr_num_dirs = num_dirs; return dirs; } /* Release storage held by split directories. */ static void free_split_directories (dirs) char **dirs; { int i = 0; while (dirs[i] != NULL) free (dirs[i++]); free ((char *) dirs); } /* Given three strings PROGNAME, BIN_PREFIX, PREFIX, return a string that gets to PREFIX starting with the directory portion of PROGNAME and a relative pathname of the difference between BIN_PREFIX and PREFIX. For example, if BIN_PREFIX is /alpha/beta/gamma/gcc/delta, PREFIX is /alpha/beta/gamma/omega/, and PROGNAME is /red/green/blue/gcc, then this function will return /red/green/blue/../../omega/. If no relative prefix can be found, return NULL. */ char * make_relative_prefix (progname, bin_prefix, prefix) const char *progname; const char *bin_prefix; const char *prefix; { char **prog_dirs, **bin_dirs, **prefix_dirs; int prog_num, bin_num, prefix_num; int i, n, common; int needed_len; char *ret, *ptr, *full_progname = NULL; if (progname == NULL || bin_prefix == NULL || prefix == NULL) return NULL; /* If there is no full pathname, try to find the program by checking in each of the directories specified in the PATH environment variable. */ if (lbasename (progname) == progname) { char *temp; temp = getenv ("PATH"); if (temp) { char *startp, *endp, *nstore; size_t prefixlen = strlen (temp) + 1; if (prefixlen < 2) prefixlen = 2; nstore = (char *) alloca (prefixlen + strlen (progname) + 1); startp = endp = temp; while (1) { if (*endp == PATH_SEPARATOR || *endp == 0) { if (endp == startp) { nstore[0] = '.'; nstore[1] = DIR_SEPARATOR; nstore[2] = '\0'; } else { strncpy (nstore, startp, endp - startp); if (! IS_DIR_SEPARATOR (endp[-1])) { nstore[endp - startp] = DIR_SEPARATOR; nstore[endp - startp + 1] = 0; } else nstore[endp - startp] = 0; } strcat (nstore, progname); if (! access (nstore, X_OK) #ifdef HAVE_HOST_EXECUTABLE_SUFFIX || ! access (strcat (nstore, HOST_EXECUTABLE_SUFFIX), X_OK) #endif ) { progname = nstore; break; } if (*endp == 0) break; endp = startp = endp + 1; } else endp++; } } } full_progname = lrealpath (progname); if (full_progname == NULL) return NULL; prog_dirs = split_directories (full_progname, &prog_num); bin_dirs = split_directories (bin_prefix, &bin_num); free (full_progname); if (bin_dirs == NULL || prog_dirs == NULL) return NULL; /* Remove the program name from comparison of directory names. */ prog_num--; /* If we are still installed in the standard location, we don't need to specify relative directories. Also, if argv[0] still doesn't contain any directory specifiers after the search above, then there is not much we can do. */ if (prog_num == bin_num) { for (i = 0; i < bin_num; i++) { if (strcmp (prog_dirs[i], bin_dirs[i]) != 0) break; } if (prog_num <= 0 || i == bin_num) { free_split_directories (prog_dirs); free_split_directories (bin_dirs); prog_dirs = bin_dirs = (char **) 0; return NULL; } } prefix_dirs = split_directories (prefix, &prefix_num); if (prefix_dirs == NULL) { free_split_directories (prog_dirs); free_split_directories (bin_dirs); return NULL; } /* Find how many directories are in common between bin_prefix & prefix. */ n = (prefix_num < bin_num) ? prefix_num : bin_num; for (common = 0; common < n; common++) { if (strcmp (bin_dirs[common], prefix_dirs[common]) != 0) break; } /* If there are no common directories, there can be no relative prefix. */ if (common == 0) { free_split_directories (prog_dirs); free_split_directories (bin_dirs); free_split_directories (prefix_dirs); return NULL; } /* Two passes: first figure out the size of the result string, and then construct it. */ needed_len = 0; for (i = 0; i < prog_num; i++) needed_len += strlen (prog_dirs[i]); needed_len += sizeof (DIR_UP) * (bin_num - common); for (i = common; i < prefix_num; i++) needed_len += strlen (prefix_dirs[i]); needed_len += 1; /* Trailing NUL. */ ret = (char *) malloc (needed_len); if (ret == NULL) return NULL; /* Build up the pathnames in argv[0]. */ *ret = '\0'; for (i = 0; i < prog_num; i++) strcat (ret, prog_dirs[i]); /* Now build up the ..'s. */ ptr = ret + strlen(ret); for (i = common; i < bin_num; i++) { strcpy (ptr, DIR_UP); ptr += sizeof (DIR_UP) - 1; *(ptr++) = DIR_SEPARATOR; } *ptr = '\0'; /* Put in directories to move over to prefix. */ for (i = common; i < prefix_num; i++) strcat (ret, prefix_dirs[i]); free_split_directories (prog_dirs); free_split_directories (bin_dirs); free_split_directories (prefix_dirs); return ret; } /* Utility to pick a temporary filename prefix. Copyright (C) 1996, 1997, 1998, 2001 Free Software Foundation, Inc. This file is part of the libiberty library. Libiberty is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Libiberty is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with libiberty; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifdef HAVE_CONFIG_H #endif #include /* May get P_tmpdir. */ #include #ifdef HAVE_UNISTD_H #include #endif #ifdef HAVE_STDLIB_H #include #endif #ifdef HAVE_STRING_H #include #endif #ifdef HAVE_SYS_FILE_H #include /* May get R_OK, etc. on some systems. */ #endif #ifndef R_OK #define R_OK 4 #define W_OK 2 #define X_OK 1 #endif extern int mkstemps PARAMS ((char *, int)); /* '/' works just fine on MS-DOS based systems. */ #ifndef DIR_SEPARATOR #define DIR_SEPARATOR '/' #endif /* Name of temporary file. mktemp requires 6 trailing X's. */ #define TEMP_FILE "ccXXXXXX" #define TEMP_FILE_LEN (sizeof(TEMP_FILE) - 1) /* Subroutine of choose_tmpdir. If BASE is non-NULL, return it. Otherwise it checks if DIR is a usable directory. If success, DIR is returned. Otherwise NULL is returned. */ static inline const char *try PARAMS ((const char *, const char *)); static inline const char * try (dir, base) const char *dir, *base; { if (base != 0) return base; if (dir != 0 && access (dir, R_OK | W_OK | X_OK) == 0) return dir; return 0; } static const char tmp[] = { DIR_SEPARATOR, 't', 'm', 'p', 0 }; static const char usrtmp[] = { DIR_SEPARATOR, 'u', 's', 'r', DIR_SEPARATOR, 't', 'm', 'p', 0 }; static const char vartmp[] = { DIR_SEPARATOR, 'v', 'a', 'r', DIR_SEPARATOR, 't', 'm', 'p', 0 }; static char *memoized_tmpdir; /* @deftypefn Replacement char* choose_tmpdir () Returns a pointer to a directory path suitable for creating temporary files in. @end deftypefn */ char * choose_tmpdir () { const char *base = 0; char *tmpdir; unsigned int len; if (memoized_tmpdir) return memoized_tmpdir; base = try (getenv ("TMPDIR"), base); base = try (getenv ("TMP"), base); base = try (getenv ("TEMP"), base); #ifdef P_tmpdir base = try (P_tmpdir, base); #endif /* Try /var/tmp, /usr/tmp, then /tmp. */ base = try (vartmp, base); base = try (usrtmp, base); base = try (tmp, base); /* If all else fails, use the current directory! */ if (base == 0) base = "."; /* Append DIR_SEPARATOR to the directory we've chosen and return it. */ len = strlen (base); tmpdir = xmalloc (len + 2); strcpy (tmpdir, base); tmpdir[len] = DIR_SEPARATOR; tmpdir[len+1] = '\0'; memoized_tmpdir = tmpdir; return tmpdir; } /* @deftypefn Replacement char* make_temp_file (const char *@var{suffix}) Return a temporary file name (as a string) or @code{NULL} if unable to create one. @var{suffix} is a suffix to append to the file name. The string is @code{malloc}ed, and the temporary file has been created. @end deftypefn */ char * make_temp_file (suffix) const char *suffix; { const char *base = choose_tmpdir (); char *temp_filename; int base_len, suffix_len; int fd; if (suffix == 0) suffix = ""; base_len = strlen (base); suffix_len = strlen (suffix); temp_filename = xmalloc (base_len + TEMP_FILE_LEN + suffix_len + 1); strcpy (temp_filename, base); strcpy (temp_filename + base_len, TEMP_FILE); strcpy (temp_filename + base_len + TEMP_FILE_LEN, suffix); fd = mkstemps (temp_filename, suffix_len); /* If mkstemps failed, then something bad is happening. Maybe we should issue a message about a possible security attack in progress? */ if (fd == -1) abort (); /* Similarly if we can not close the file. */ if (close (fd)) abort (); return temp_filename; } /* objalloc.c -- routines to allocate memory for objects Copyright 1997 Free Software Foundation, Inc. Written by Ian Lance Taylor, Cygnus Solutions. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* objalloc.h -- routines to allocate memory for objects Copyright 1997, 2001 Free Software Foundation, Inc. Written by Ian Lance Taylor, Cygnus Solutions. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef OBJALLOC_H #define OBJALLOC_H /* These routines allocate space for an object. The assumption is that the object will want to allocate space as it goes along, but will never want to free any particular block. There is a function to free a block, which also frees all more recently allocated blocks. There is also a function to free all the allocated space. This is essentially a specialization of obstacks. The main difference is that a block may not be allocated a bit at a time. Another difference is that these routines are always built on top of malloc, and always pass an malloc failure back to the caller, unlike more recent versions of obstacks. */ /* This is what an objalloc structure looks like. Callers should not refer to these fields, nor should they allocate these structure themselves. Instead, they should only create them via objalloc_init, and only access them via the functions and macros listed below. The structure is only defined here so that we can access it via macros. */ struct objalloc { char *current_ptr; unsigned int current_space; PTR chunks; }; /* Work out the required alignment. */ struct objalloc_align { char x; double d; }; #if defined (__STDC__) && __STDC__ #ifndef offsetof #include #endif #endif #ifndef offsetof #define offsetof(TYPE, MEMBER) ((unsigned long) &((TYPE *)0)->MEMBER) #endif #define OBJALLOC_ALIGN offsetof (struct objalloc_align, d) /* Create an objalloc structure. Returns NULL if malloc fails. */ extern struct objalloc *objalloc_create PARAMS ((void)); /* Allocate space from an objalloc structure. Returns NULL if malloc fails. */ extern PTR _objalloc_alloc PARAMS ((struct objalloc *, unsigned long)); /* The macro version of objalloc_alloc. We only define this if using gcc, because otherwise we would have to evaluate the arguments multiple times, or use a temporary field as obstack.h does. */ #if defined (__GNUC__) && defined (__STDC__) && __STDC__ /* NextStep 2.0 cc is really gcc 1.93 but it defines __GNUC__ = 2 and does not implement __extension__. But that compiler doesn't define __GNUC_MINOR__. */ #if __GNUC__ < 2 || (__NeXT__ && !__GNUC_MINOR__) #define __extension__ #endif #define objalloc_alloc(o, l) \ __extension__ \ ({ struct objalloc *__o = (o); \ unsigned long __len = (l); \ if (__len == 0) \ __len = 1; \ __len = (__len + OBJALLOC_ALIGN - 1) &~ (OBJALLOC_ALIGN - 1); \ (__len <= __o->current_space \ ? (__o->current_ptr += __len, \ __o->current_space -= __len, \ (PTR) (__o->current_ptr - __len)) \ : _objalloc_alloc (__o, __len)); }) #else /* ! __GNUC__ */ #define objalloc_alloc(o, l) _objalloc_alloc ((o), (l)) #endif /* ! __GNUC__ */ /* Free an entire objalloc structure. */ extern void objalloc_free PARAMS ((struct objalloc *)); /* Free a block allocated by objalloc_alloc. This also frees all more recently allocated blocks. */ extern void objalloc_free_block PARAMS ((struct objalloc *, PTR)); #endif /* OBJALLOC_H */ /* Get a definition for NULL. */ #include #if VMS #include #include #else #ifdef ANSI_PROTOTYPES /* Get a definition for size_t. */ #include #endif #ifdef HAVE_STDLIB_H #include #else /* For systems with larger pointers than ints, this must be declared. */ extern PTR malloc PARAMS ((size_t)); extern void free PARAMS ((PTR)); #endif #endif /* These routines allocate space for an object. Freeing allocated space may or may not free all more recently allocated space. We handle large and small allocation requests differently. If we don't have enough space in the current block, and the allocation request is for more than 512 bytes, we simply pass it through to malloc. */ /* The objalloc structure is defined in objalloc.h. */ /* This structure appears at the start of each chunk. */ struct objalloc_chunk { /* Next chunk. */ struct objalloc_chunk *next; /* If this chunk contains large objects, this is the value of current_ptr when this chunk was allocated. If this chunk contains small objects, this is NULL. */ char *current_ptr; }; /* The aligned size of objalloc_chunk. */ #define CHUNK_HEADER_SIZE \ ((sizeof (struct objalloc_chunk) + OBJALLOC_ALIGN - 1) \ &~ (OBJALLOC_ALIGN - 1)) /* We ask for this much memory each time we create a chunk which is to hold small objects. */ #define CHUNK_SIZE (4096 - 32) /* A request for this amount or more is just passed through to malloc. */ #define BIG_REQUEST (512) /* Create an objalloc structure. */ struct objalloc * objalloc_create () { struct objalloc *ret; struct objalloc_chunk *chunk; ret = (struct objalloc *) malloc (sizeof *ret); if (ret == NULL) return NULL; ret->chunks = (PTR) malloc (CHUNK_SIZE); if (ret->chunks == NULL) { free (ret); return NULL; } chunk = (struct objalloc_chunk *) ret->chunks; chunk->next = NULL; chunk->current_ptr = NULL; ret->current_ptr = (char *) chunk + CHUNK_HEADER_SIZE; ret->current_space = CHUNK_SIZE - CHUNK_HEADER_SIZE; return ret; } /* Allocate space from an objalloc structure. */ PTR _objalloc_alloc (o, len) struct objalloc *o; unsigned long len; { /* We avoid confusion from zero sized objects by always allocating at least 1 byte. */ if (len == 0) len = 1; len = (len + OBJALLOC_ALIGN - 1) &~ (OBJALLOC_ALIGN - 1); if (len <= o->current_space) { o->current_ptr += len; o->current_space -= len; return (PTR) (o->current_ptr - len); } if (len >= BIG_REQUEST) { char *ret; struct objalloc_chunk *chunk; ret = (char *) malloc (CHUNK_HEADER_SIZE + len); if (ret == NULL) return NULL; chunk = (struct objalloc_chunk *) ret; chunk->next = (struct objalloc_chunk *) o->chunks; chunk->current_ptr = o->current_ptr; o->chunks = (PTR) chunk; return (PTR) (ret + CHUNK_HEADER_SIZE); } else { struct objalloc_chunk *chunk; chunk = (struct objalloc_chunk *) malloc (CHUNK_SIZE); if (chunk == NULL) return NULL; chunk->next = (struct objalloc_chunk *) o->chunks; chunk->current_ptr = NULL; o->current_ptr = (char *) chunk + CHUNK_HEADER_SIZE; o->current_space = CHUNK_SIZE - CHUNK_HEADER_SIZE; o->chunks = (PTR) chunk; return objalloc_alloc (o, len); } } /* Free an entire objalloc structure. */ void objalloc_free (o) struct objalloc *o; { struct objalloc_chunk *l; l = (struct objalloc_chunk *) o->chunks; while (l != NULL) { struct objalloc_chunk *next; next = l->next; free (l); l = next; } free (o); } /* Free a block from an objalloc structure. This also frees all more recently allocated blocks. */ void objalloc_free_block (o, block) struct objalloc *o; PTR block; { struct objalloc_chunk *p, *small; char *b = (char *) block; /* First set P to the chunk which contains the block we are freeing, and set Q to the last small object chunk we see before P. */ small = NULL; for (p = (struct objalloc_chunk *) o->chunks; p != NULL; p = p->next) { if (p->current_ptr == NULL) { if (b > (char *) p && b < (char *) p + CHUNK_SIZE) break; small = p; } else { if (b == (char *) p + CHUNK_HEADER_SIZE) break; } } /* If we can't find the chunk, the caller has made a mistake. */ if (p == NULL) abort (); if (p->current_ptr == NULL) { struct objalloc_chunk *q; struct objalloc_chunk *first; /* The block is in a chunk containing small objects. We can free every chunk through SMALL, because they have certainly been allocated more recently. After SMALL, we will not see any chunks containing small objects; we can free any big chunk if the current_ptr is greater than or equal to B. We can then reset the new current_ptr to B. */ first = NULL; q = (struct objalloc_chunk *) o->chunks; while (q != p) { struct objalloc_chunk *next; next = q->next; if (small != NULL) { if (small == q) small = NULL; free (q); } else if (q->current_ptr > b) free (q); else if (first == NULL) first = q; q = next; } if (first == NULL) first = p; o->chunks = (PTR) first; /* Now start allocating from this small block again. */ o->current_ptr = b; o->current_space = ((char *) p + CHUNK_SIZE) - b; } else { struct objalloc_chunk *q; char *current_ptr; /* This block is in a large chunk by itself. We can free everything on the list up to and including this block. We then start allocating from the next chunk containing small objects, setting current_ptr from the value stored with the large chunk we are freeing. */ current_ptr = p->current_ptr; p = p->next; q = (struct objalloc_chunk *) o->chunks; while (q != p) { struct objalloc_chunk *next; next = q->next; free (q); q = next; } o->chunks = (PTR) p; while (p->current_ptr != NULL) p = p->next; o->current_ptr = current_ptr; o->current_space = ((char *) p + CHUNK_SIZE) - current_ptr; } } /* obstack.c - subroutines used implicitly by object stack macros Copyright (C) 1988,89,90,91,92,93,94,96,97 Free Software Foundation, Inc. NOTE: This source is derived from an old version taken from the GNU C Library (glibc). This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifdef HAVE_CONFIG_H #endif /* obstack.h - object stack macros Copyright 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1996, 1997, 1998, 1999, 2000 Free Software Foundation, Inc. NOTE: The canonical source of this file is maintained with the GNU C Library. Bugs can be reported to bug-glibc@gnu.org. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Summary: All the apparent functions defined here are macros. The idea is that you would use these pre-tested macros to solve a very specific set of problems, and they would run fast. Caution: no side-effects in arguments please!! They may be evaluated MANY times!! These macros operate a stack of objects. Each object starts life small, and may grow to maturity. (Consider building a word syllable by syllable.) An object can move while it is growing. Once it has been "finished" it never changes address again. So the "top of the stack" is typically an immature growing object, while the rest of the stack is of mature, fixed size and fixed address objects. These routines grab large chunks of memory, using a function you supply, called `obstack_chunk_alloc'. On occasion, they free chunks, by calling `obstack_chunk_free'. You must define them and declare them before using any obstack macros. Each independent stack is represented by a `struct obstack'. Each of the obstack macros expects a pointer to such a structure as the first argument. One motivation for this package is the problem of growing char strings in symbol tables. Unless you are "fascist pig with a read-only mind" --Gosper's immortal quote from HAKMEM item 154, out of context--you would not like to put any arbitrary upper limit on the length of your symbols. In practice this often means you will build many short symbols and a few long symbols. At the time you are reading a symbol you don't know how long it is. One traditional method is to read a symbol into a buffer, realloc()ating the buffer every time you try to read a symbol that is longer than the buffer. This is beaut, but you still will want to copy the symbol from the buffer to a more permanent symbol-table entry say about half the time. With obstacks, you can work differently. Use one obstack for all symbol names. As you read a symbol, grow the name in the obstack gradually. When the name is complete, finalize it. Then, if the symbol exists already, free the newly read name. The way we do this is to take a large chunk, allocating memory from low addresses. When you want to build a symbol in the chunk you just add chars above the current "high water mark" in the chunk. When you have finished adding chars, because you got to the end of the symbol, you know how long the chars are, and you can create a new object. Mostly the chars will not burst over the highest address of the chunk, because you would typically expect a chunk to be (say) 100 times as long as an average object. In case that isn't clear, when we have enough chars to make up the object, THEY ARE ALREADY CONTIGUOUS IN THE CHUNK (guaranteed) so we just point to it where it lies. No moving of chars is needed and this is the second win: potentially long strings need never be explicitly shuffled. Once an object is formed, it does not change its address during its lifetime. When the chars burst over a chunk boundary, we allocate a larger chunk, and then copy the partly formed object from the end of the old chunk to the beginning of the new larger chunk. We then carry on accreting characters to the end of the object as we normally would. A special macro is provided to add a single char at a time to a growing object. This allows the use of register variables, which break the ordinary 'growth' macro. Summary: We allocate large chunks. We carve out one object at a time from the current chunk. Once carved, an object never moves. We are free to append data of any size to the currently growing object. Exactly one object is growing in an obstack at any one time. You can run one obstack per control block. You may have as many control blocks as you dare. Because of the way we do it, you can `unwind' an obstack back to a previous state. (You may remove objects much as you would with a stack.) */ /* Don't do the contents of this file more than once. */ #ifndef _OBSTACK_H #define _OBSTACK_H 1 #ifdef __cplusplus extern "C" { #endif /* We use subtraction of (char *) 0 instead of casting to int because on word-addressable machines a simple cast to int may ignore the byte-within-word field of the pointer. */ #ifndef __PTR_TO_INT # define __PTR_TO_INT(P) ((P) - (char *) 0) #endif #ifndef __INT_TO_PTR # define __INT_TO_PTR(P) ((P) + (char *) 0) #endif /* We need the type of the resulting object. If __PTRDIFF_TYPE__ is defined, as with GNU C, use that; that way we don't pollute the namespace with 's symbols. Otherwise, if is available, include it and use ptrdiff_t. In traditional C, long is the best that we can do. */ #ifdef __PTRDIFF_TYPE__ # define PTR_INT_TYPE __PTRDIFF_TYPE__ #else # ifdef HAVE_STDDEF_H # include # define PTR_INT_TYPE ptrdiff_t # else # define PTR_INT_TYPE long # endif #endif #if defined _LIBC || defined HAVE_STRING_H # include # if defined __STDC__ && __STDC__ # define _obstack_memcpy(To, From, N) memcpy ((To), (From), (N)) # else # define _obstack_memcpy(To, From, N) memcpy ((To), (char *)(From), (N)) # endif #else # ifdef memcpy # define _obstack_memcpy(To, From, N) memcpy ((To), (char *)(From), (N)) # else # define _obstack_memcpy(To, From, N) bcopy ((char *)(From), (To), (N)) # endif #endif struct _obstack_chunk /* Lives at front of each chunk. */ { char *limit; /* 1 past end of this chunk */ struct _obstack_chunk *prev; /* address of prior chunk or NULL */ char contents[4]; /* objects begin here */ }; struct obstack /* control current object in current chunk */ { long chunk_size; /* preferred size to allocate chunks in */ struct _obstack_chunk *chunk; /* address of current struct obstack_chunk */ char *object_base; /* address of object we are building */ char *next_free; /* where to add next char to current object */ char *chunk_limit; /* address of char after current chunk */ PTR_INT_TYPE temp; /* Temporary for some macros. */ int alignment_mask; /* Mask of alignment for each object. */ #if defined __STDC__ && __STDC__ /* These prototypes vary based on `use_extra_arg', and we use casts to the prototypeless function type in all assignments, but having prototypes here quiets -Wstrict-prototypes. */ struct _obstack_chunk *(*chunkfun) (void *, long); void (*freefun) (void *, struct _obstack_chunk *); void *extra_arg; /* first arg for chunk alloc/dealloc funcs */ #else struct _obstack_chunk *(*chunkfun) (); /* User's fcn to allocate a chunk. */ void (*freefun) (); /* User's function to free a chunk. */ char *extra_arg; /* first arg for chunk alloc/dealloc funcs */ #endif unsigned use_extra_arg:1; /* chunk alloc/dealloc funcs take extra arg */ unsigned maybe_empty_object:1;/* There is a possibility that the current chunk contains a zero-length object. This prevents freeing the chunk if we allocate a bigger chunk to replace it. */ unsigned alloc_failed:1; /* No longer used, as we now call the failed handler on error, but retained for binary compatibility. */ }; /* Declare the external functions we use; they are in obstack.c. */ #if defined __STDC__ && __STDC__ extern void _obstack_newchunk (struct obstack *, int); extern void _obstack_free (struct obstack *, void *); extern int _obstack_begin (struct obstack *, int, int, void *(*) (long), void (*) (void *)); extern int _obstack_begin_1 (struct obstack *, int, int, void *(*) (void *, long), void (*) (void *, void *), void *); extern int _obstack_memory_used (struct obstack *); #else extern void _obstack_newchunk (); extern void _obstack_free (); extern int _obstack_begin (); extern int _obstack_begin_1 (); extern int _obstack_memory_used (); #endif #if defined __STDC__ && __STDC__ /* Do the function-declarations after the structs but before defining the macros. */ void obstack_init (struct obstack *obstack); void * obstack_alloc (struct obstack *obstack, int size); void * obstack_copy (struct obstack *obstack, void *address, int size); void * obstack_copy0 (struct obstack *obstack, void *address, int size); void obstack_free (struct obstack *obstack, void *block); void obstack_blank (struct obstack *obstack, int size); void obstack_grow (struct obstack *obstack, void *data, int size); void obstack_grow0 (struct obstack *obstack, void *data, int size); void obstack_1grow (struct obstack *obstack, int data_char); void obstack_ptr_grow (struct obstack *obstack, void *data); void obstack_int_grow (struct obstack *obstack, int data); void * obstack_finish (struct obstack *obstack); int obstack_object_size (struct obstack *obstack); int obstack_room (struct obstack *obstack); void obstack_make_room (struct obstack *obstack, int size); void obstack_1grow_fast (struct obstack *obstack, int data_char); void obstack_ptr_grow_fast (struct obstack *obstack, void *data); void obstack_int_grow_fast (struct obstack *obstack, int data); void obstack_blank_fast (struct obstack *obstack, int size); void * obstack_base (struct obstack *obstack); void * obstack_next_free (struct obstack *obstack); int obstack_alignment_mask (struct obstack *obstack); int obstack_chunk_size (struct obstack *obstack); int obstack_memory_used (struct obstack *obstack); #endif /* __STDC__ */ /* Non-ANSI C cannot really support alternative functions for these macros, so we do not declare them. */ /* Error handler called when `obstack_chunk_alloc' failed to allocate more memory. This can be set to a user defined function. The default action is to print a message and abort. */ #if defined __STDC__ && __STDC__ extern void (*obstack_alloc_failed_handler) (void); #else extern void (*obstack_alloc_failed_handler) (); #endif /* Exit value used when `print_and_abort' is used. */ extern int obstack_exit_failure; /* Pointer to beginning of object being allocated or to be allocated next. Note that this might not be the final address of the object because a new chunk might be needed to hold the final size. */ #define obstack_base(h) ((h)->object_base) /* Size for allocating ordinary chunks. */ #define obstack_chunk_size(h) ((h)->chunk_size) /* Pointer to next byte not yet allocated in current chunk. */ #define obstack_next_free(h) ((h)->next_free) /* Mask specifying low bits that should be clear in address of an object. */ #define obstack_alignment_mask(h) ((h)->alignment_mask) /* To prevent prototype warnings provide complete argument list in standard C version. */ #if defined __STDC__ && __STDC__ # define obstack_init(h) \ _obstack_begin ((h), 0, 0, \ (void *(*) (long)) obstack_chunk_alloc, (void (*) (void *)) obstack_chunk_free) # define obstack_begin(h, size) \ _obstack_begin ((h), (size), 0, \ (void *(*) (long)) obstack_chunk_alloc, (void (*) (void *)) obstack_chunk_free) # define obstack_specify_allocation(h, size, alignment, chunkfun, freefun) \ _obstack_begin ((h), (size), (alignment), \ (void *(*) (long)) (chunkfun), (void (*) (void *)) (freefun)) # define obstack_specify_allocation_with_arg(h, size, alignment, chunkfun, freefun, arg) \ _obstack_begin_1 ((h), (size), (alignment), \ (void *(*) (void *, long)) (chunkfun), \ (void (*) (void *, void *)) (freefun), (arg)) # define obstack_chunkfun(h, newchunkfun) \ ((h) -> chunkfun = (struct _obstack_chunk *(*)(void *, long)) (newchunkfun)) # define obstack_freefun(h, newfreefun) \ ((h) -> freefun = (void (*)(void *, struct _obstack_chunk *)) (newfreefun)) #else # define obstack_init(h) \ _obstack_begin ((h), 0, 0, \ (void *(*) ()) obstack_chunk_alloc, (void (*) ()) obstack_chunk_free) # define obstack_begin(h, size) \ _obstack_begin ((h), (size), 0, \ (void *(*) ()) obstack_chunk_alloc, (void (*) ()) obstack_chunk_free) # define obstack_specify_allocation(h, size, alignment, chunkfun, freefun) \ _obstack_begin ((h), (size), (alignment), \ (void *(*) ()) (chunkfun), (void (*) ()) (freefun)) # define obstack_specify_allocation_with_arg(h, size, alignment, chunkfun, freefun, arg) \ _obstack_begin_1 ((h), (size), (alignment), \ (void *(*) ()) (chunkfun), (void (*) ()) (freefun), (arg)) # define obstack_chunkfun(h, newchunkfun) \ ((h) -> chunkfun = (struct _obstack_chunk *(*)()) (newchunkfun)) # define obstack_freefun(h, newfreefun) \ ((h) -> freefun = (void (*)()) (newfreefun)) #endif #define obstack_1grow_fast(h,achar) (*((h)->next_free)++ = (achar)) #define obstack_blank_fast(h,n) ((h)->next_free += (n)) #define obstack_memory_used(h) _obstack_memory_used (h) #if defined __GNUC__ && defined __STDC__ && __STDC__ /* NextStep 2.0 cc is really gcc 1.93 but it defines __GNUC__ = 2 and does not implement __extension__. But that compiler doesn't define __GNUC_MINOR__. */ # if __GNUC__ < 2 || (__NeXT__ && !__GNUC_MINOR__) # define __extension__ # endif /* For GNU C, if not -traditional, we can define these macros to compute all args only once without using a global variable. Also, we can avoid using the `temp' slot, to make faster code. */ # define obstack_object_size(OBSTACK) \ __extension__ \ ({ struct obstack *__o = (OBSTACK); \ (unsigned) (__o->next_free - __o->object_base); }) # define obstack_room(OBSTACK) \ __extension__ \ ({ struct obstack *__o = (OBSTACK); \ (unsigned) (__o->chunk_limit - __o->next_free); }) # define obstack_make_room(OBSTACK,length) \ __extension__ \ ({ struct obstack *__o = (OBSTACK); \ int __len = (length); \ if (__o->chunk_limit - __o->next_free < __len) \ _obstack_newchunk (__o, __len); \ (void) 0; }) # define obstack_empty_p(OBSTACK) \ __extension__ \ ({ struct obstack *__o = (OBSTACK); \ (__o->chunk->prev == 0 && __o->next_free - __o->chunk->contents == 0); }) # define obstack_grow(OBSTACK,where,length) \ __extension__ \ ({ struct obstack *__o = (OBSTACK); \ int __len = (length); \ if (__o->next_free + __len > __o->chunk_limit) \ _obstack_newchunk (__o, __len); \ _obstack_memcpy (__o->next_free, (where), __len); \ __o->next_free += __len; \ (void) 0; }) # define obstack_grow0(OBSTACK,where,length) \ __extension__ \ ({ struct obstack *__o = (OBSTACK); \ int __len = (length); \ if (__o->next_free + __len + 1 > __o->chunk_limit) \ _obstack_newchunk (__o, __len + 1); \ _obstack_memcpy (__o->next_free, (where), __len); \ __o->next_free += __len; \ *(__o->next_free)++ = 0; \ (void) 0; }) # define obstack_1grow(OBSTACK,datum) \ __extension__ \ ({ struct obstack *__o = (OBSTACK); \ if (__o->next_free + 1 > __o->chunk_limit) \ _obstack_newchunk (__o, 1); \ obstack_1grow_fast (__o, datum); \ (void) 0; }) /* These assume that the obstack alignment is good enough for pointers or ints, and that the data added so far to the current object shares that much alignment. */ # define obstack_ptr_grow(OBSTACK,datum) \ __extension__ \ ({ struct obstack *__o = (OBSTACK); \ if (__o->next_free + sizeof (void *) > __o->chunk_limit) \ _obstack_newchunk (__o, sizeof (void *)); \ obstack_ptr_grow_fast (__o, datum); }) # define obstack_int_grow(OBSTACK,datum) \ __extension__ \ ({ struct obstack *__o = (OBSTACK); \ if (__o->next_free + sizeof (int) > __o->chunk_limit) \ _obstack_newchunk (__o, sizeof (int)); \ obstack_int_grow_fast (__o, datum); }) # define obstack_ptr_grow_fast(OBSTACK,aptr) \ __extension__ \ ({ struct obstack *__o1 = (OBSTACK); \ *(const void **) __o1->next_free = (aptr); \ __o1->next_free += sizeof (const void *); \ (void) 0; }) # define obstack_int_grow_fast(OBSTACK,aint) \ __extension__ \ ({ struct obstack *__o1 = (OBSTACK); \ *(int *) __o1->next_free = (aint); \ __o1->next_free += sizeof (int); \ (void) 0; }) # define obstack_blank(OBSTACK,length) \ __extension__ \ ({ struct obstack *__o = (OBSTACK); \ int __len = (length); \ if (__o->chunk_limit - __o->next_free < __len) \ _obstack_newchunk (__o, __len); \ obstack_blank_fast (__o, __len); \ (void) 0; }) # define obstack_alloc(OBSTACK,length) \ __extension__ \ ({ struct obstack *__h = (OBSTACK); \ obstack_blank (__h, (length)); \ obstack_finish (__h); }) # define obstack_copy(OBSTACK,where,length) \ __extension__ \ ({ struct obstack *__h = (OBSTACK); \ obstack_grow (__h, (where), (length)); \ obstack_finish (__h); }) # define obstack_copy0(OBSTACK,where,length) \ __extension__ \ ({ struct obstack *__h = (OBSTACK); \ obstack_grow0 (__h, (where), (length)); \ obstack_finish (__h); }) /* The local variable is named __o1 to avoid a name conflict when obstack_blank is called. */ # define obstack_finish(OBSTACK) \ __extension__ \ ({ struct obstack *__o1 = (OBSTACK); \ void *value; \ value = (void *) __o1->object_base; \ if (__o1->next_free == value) \ __o1->maybe_empty_object = 1; \ __o1->next_free \ = __INT_TO_PTR ((__PTR_TO_INT (__o1->next_free)+__o1->alignment_mask)\ & ~ (__o1->alignment_mask)); \ if (__o1->next_free - (char *)__o1->chunk \ > __o1->chunk_limit - (char *)__o1->chunk) \ __o1->next_free = __o1->chunk_limit; \ __o1->object_base = __o1->next_free; \ value; }) # define obstack_free(OBSTACK, OBJ) \ __extension__ \ ({ struct obstack *__o = (OBSTACK); \ void *__obj = (OBJ); \ if (__obj > (void *)__o->chunk && __obj < (void *)__o->chunk_limit) \ __o->next_free = __o->object_base = __obj; \ else (obstack_free) (__o, __obj); }) #else /* not __GNUC__ or not __STDC__ */ # define obstack_object_size(h) \ (unsigned) ((h)->next_free - (h)->object_base) # define obstack_room(h) \ (unsigned) ((h)->chunk_limit - (h)->next_free) # define obstack_empty_p(h) \ ((h)->chunk->prev == 0 && (h)->next_free - (h)->chunk->contents == 0) /* Note that the call to _obstack_newchunk is enclosed in (..., 0) so that we can avoid having void expressions in the arms of the conditional expression. Casting the third operand to void was tried before, but some compilers won't accept it. */ # define obstack_make_room(h,length) \ ( (h)->temp = (length), \ (((h)->next_free + (h)->temp > (h)->chunk_limit) \ ? (_obstack_newchunk ((h), (h)->temp), 0) : 0)) # define obstack_grow(h,where,length) \ ( (h)->temp = (length), \ (((h)->next_free + (h)->temp > (h)->chunk_limit) \ ? (_obstack_newchunk ((h), (h)->temp), 0) : 0), \ _obstack_memcpy ((h)->next_free, (where), (h)->temp), \ (h)->next_free += (h)->temp) # define obstack_grow0(h,where,length) \ ( (h)->temp = (length), \ (((h)->next_free + (h)->temp + 1 > (h)->chunk_limit) \ ? (_obstack_newchunk ((h), (h)->temp + 1), 0) : 0), \ _obstack_memcpy ((h)->next_free, (where), (h)->temp), \ (h)->next_free += (h)->temp, \ *((h)->next_free)++ = 0) # define obstack_1grow(h,datum) \ ( (((h)->next_free + 1 > (h)->chunk_limit) \ ? (_obstack_newchunk ((h), 1), 0) : 0), \ obstack_1grow_fast (h, datum)) # define obstack_ptr_grow(h,datum) \ ( (((h)->next_free + sizeof (char *) > (h)->chunk_limit) \ ? (_obstack_newchunk ((h), sizeof (char *)), 0) : 0), \ obstack_ptr_grow_fast (h, datum)) # define obstack_int_grow(h,datum) \ ( (((h)->next_free + sizeof (int) > (h)->chunk_limit) \ ? (_obstack_newchunk ((h), sizeof (int)), 0) : 0), \ obstack_int_grow_fast (h, datum)) # define obstack_ptr_grow_fast(h,aptr) \ (((const void **) ((h)->next_free += sizeof (void *)))[-1] = (aptr)) # define obstack_int_grow_fast(h,aint) \ (((int *) ((h)->next_free += sizeof (int)))[-1] = (aptr)) # define obstack_blank(h,length) \ ( (h)->temp = (length), \ (((h)->chunk_limit - (h)->next_free < (h)->temp) \ ? (_obstack_newchunk ((h), (h)->temp), 0) : 0), \ obstack_blank_fast (h, (h)->temp)) # define obstack_alloc(h,length) \ (obstack_blank ((h), (length)), obstack_finish ((h))) # define obstack_copy(h,where,length) \ (obstack_grow ((h), (where), (length)), obstack_finish ((h))) # define obstack_copy0(h,where,length) \ (obstack_grow0 ((h), (where), (length)), obstack_finish ((h))) # define obstack_finish(h) \ ( ((h)->next_free == (h)->object_base \ ? (((h)->maybe_empty_object = 1), 0) \ : 0), \ (h)->temp = __PTR_TO_INT ((h)->object_base), \ (h)->next_free \ = __INT_TO_PTR ((__PTR_TO_INT ((h)->next_free)+(h)->alignment_mask) \ & ~ ((h)->alignment_mask)), \ (((h)->next_free - (char *) (h)->chunk \ > (h)->chunk_limit - (char *) (h)->chunk) \ ? ((h)->next_free = (h)->chunk_limit) : 0), \ (h)->object_base = (h)->next_free, \ __INT_TO_PTR ((h)->temp)) # if defined __STDC__ && __STDC__ # define obstack_free(h,obj) \ ( (h)->temp = (char *) (obj) - (char *) (h)->chunk, \ (((h)->temp > 0 && (h)->temp < (h)->chunk_limit - (char *) (h)->chunk)\ ? (int) ((h)->next_free = (h)->object_base \ = (h)->temp + (char *) (h)->chunk) \ : (((obstack_free) ((h), (h)->temp + (char *) (h)->chunk), 0), 0))) # else # define obstack_free(h,obj) \ ( (h)->temp = (char *) (obj) - (char *) (h)->chunk, \ (((h)->temp > 0 && (h)->temp < (h)->chunk_limit - (char *) (h)->chunk)\ ? (int) ((h)->next_free = (h)->object_base \ = (h)->temp + (char *) (h)->chunk) \ : (_obstack_free ((h), (h)->temp + (char *) (h)->chunk), 0))) # endif #endif /* not __GNUC__ or not __STDC__ */ #ifdef __cplusplus } /* C++ */ #endif #endif /* obstack.h */ /* NOTE BEFORE MODIFYING THIS FILE: This version number must be incremented whenever callers compiled using an old obstack.h can no longer properly call the functions in this obstack.c. */ #define OBSTACK_INTERFACE_VERSION 1 /* Comment out all this code if we are using the GNU C Library, and are not actually compiling the library itself, and the installed library supports the same library interface we do. This code is part of the GNU C Library, but also included in many other GNU distributions. Compiling and linking in this code is a waste when using the GNU C library (especially if it is a shared library). Rather than having every GNU program understand `configure --with-gnu-libc' and omit the object files, it is simpler to just do this in the source for each such file. */ #include /* Random thing to get __GNU_LIBRARY__. */ #if !defined (_LIBC) && defined (__GNU_LIBRARY__) && __GNU_LIBRARY__ > 1 #include #if _GNU_OBSTACK_INTERFACE_VERSION == OBSTACK_INTERFACE_VERSION #define ELIDE_CODE #endif #endif #ifndef ELIDE_CODE #if defined (__STDC__) && __STDC__ #define POINTER void * #else #define POINTER char * #endif /* Determine default alignment. */ struct fooalign {char x; double d;}; #define DEFAULT_ALIGNMENT \ ((PTR_INT_TYPE) ((char *) &((struct fooalign *) 0)->d - (char *) 0)) /* If malloc were really smart, it would round addresses to DEFAULT_ALIGNMENT. But in fact it might be less smart and round addresses to as much as DEFAULT_ROUNDING. So we prepare for it to do that. */ union fooround {long x; double d;}; #define DEFAULT_ROUNDING (sizeof (union fooround)) /* When we copy a long block of data, this is the unit to do it with. On some machines, copying successive ints does not work; in such a case, redefine COPYING_UNIT to `long' (if that works) or `char' as a last resort. */ #ifndef COPYING_UNIT #define COPYING_UNIT int #endif /* The functions allocating more room by calling `obstack_chunk_alloc' jump to the handler pointed to by `obstack_alloc_failed_handler'. This variable by default points to the internal function `print_and_abort'. */ #if defined (__STDC__) && __STDC__ static void print_and_abort (void); void (*obstack_alloc_failed_handler) (void) = print_and_abort; #else static void print_and_abort (); void (*obstack_alloc_failed_handler) () = print_and_abort; #endif /* Exit value used when `print_and_abort' is used. */ #if defined __GNU_LIBRARY__ || defined HAVE_STDLIB_H #include #endif #ifndef EXIT_FAILURE #define EXIT_FAILURE 1 #endif int obstack_exit_failure = EXIT_FAILURE; /* The non-GNU-C macros copy the obstack into this global variable to avoid multiple evaluation. */ struct obstack *_obstack; /* Define a macro that either calls functions with the traditional malloc/free calling interface, or calls functions with the mmalloc/mfree interface (that adds an extra first argument), based on the state of use_extra_arg. For free, do not use ?:, since some compilers, like the MIPS compilers, do not allow (expr) ? void : void. */ #if defined (__STDC__) && __STDC__ #define CALL_CHUNKFUN(h, size) \ (((h) -> use_extra_arg) \ ? (*(h)->chunkfun) ((h)->extra_arg, (size)) \ : (*(struct _obstack_chunk *(*) (long)) (h)->chunkfun) ((size))) #define CALL_FREEFUN(h, old_chunk) \ do { \ if ((h) -> use_extra_arg) \ (*(h)->freefun) ((h)->extra_arg, (old_chunk)); \ else \ (*(void (*) (void *)) (h)->freefun) ((old_chunk)); \ } while (0) #else #define CALL_CHUNKFUN(h, size) \ (((h) -> use_extra_arg) \ ? (*(h)->chunkfun) ((h)->extra_arg, (size)) \ : (*(struct _obstack_chunk *(*) ()) (h)->chunkfun) ((size))) #define CALL_FREEFUN(h, old_chunk) \ do { \ if ((h) -> use_extra_arg) \ (*(h)->freefun) ((h)->extra_arg, (old_chunk)); \ else \ (*(void (*) ()) (h)->freefun) ((old_chunk)); \ } while (0) #endif /* Initialize an obstack H for use. Specify chunk size SIZE (0 means default). Objects start on multiples of ALIGNMENT (0 means use default). CHUNKFUN is the function to use to allocate chunks, and FREEFUN the function to free them. Return nonzero if successful, zero if out of memory. To recover from an out of memory error, free up some memory, then call this again. */ int _obstack_begin (h, size, alignment, chunkfun, freefun) struct obstack *h; int size; int alignment; #if defined (__STDC__) && __STDC__ POINTER (*chunkfun) (long); void (*freefun) (void *); #else POINTER (*chunkfun) (); void (*freefun) (); #endif { register struct _obstack_chunk *chunk; /* points to new chunk */ if (alignment == 0) alignment = (int) DEFAULT_ALIGNMENT; if (size == 0) /* Default size is what GNU malloc can fit in a 4096-byte block. */ { /* 12 is sizeof (mhead) and 4 is EXTRA from GNU malloc. Use the values for range checking, because if range checking is off, the extra bytes won't be missed terribly, but if range checking is on and we used a larger request, a whole extra 4096 bytes would be allocated. These number are irrelevant to the new GNU malloc. I suspect it is less sensitive to the size of the request. */ int extra = ((((12 + DEFAULT_ROUNDING - 1) & ~(DEFAULT_ROUNDING - 1)) + 4 + DEFAULT_ROUNDING - 1) & ~(DEFAULT_ROUNDING - 1)); size = 4096 - extra; } #if defined (__STDC__) && __STDC__ h->chunkfun = (struct _obstack_chunk * (*)(void *, long)) chunkfun; h->freefun = (void (*) (void *, struct _obstack_chunk *)) freefun; #else h->chunkfun = (struct _obstack_chunk * (*)()) chunkfun; h->freefun = freefun; #endif h->chunk_size = size; h->alignment_mask = alignment - 1; h->use_extra_arg = 0; chunk = h->chunk = CALL_CHUNKFUN (h, h -> chunk_size); if (!chunk) (*obstack_alloc_failed_handler) (); h->next_free = h->object_base = chunk->contents; h->chunk_limit = chunk->limit = (char *) chunk + h->chunk_size; chunk->prev = 0; /* The initial chunk now contains no empty object. */ h->maybe_empty_object = 0; h->alloc_failed = 0; return 1; } int _obstack_begin_1 (h, size, alignment, chunkfun, freefun, arg) struct obstack *h; int size; int alignment; #if defined (__STDC__) && __STDC__ POINTER (*chunkfun) (POINTER, long); void (*freefun) (POINTER, POINTER); #else POINTER (*chunkfun) (); void (*freefun) (); #endif POINTER arg; { register struct _obstack_chunk *chunk; /* points to new chunk */ if (alignment == 0) alignment = (int) DEFAULT_ALIGNMENT; if (size == 0) /* Default size is what GNU malloc can fit in a 4096-byte block. */ { /* 12 is sizeof (mhead) and 4 is EXTRA from GNU malloc. Use the values for range checking, because if range checking is off, the extra bytes won't be missed terribly, but if range checking is on and we used a larger request, a whole extra 4096 bytes would be allocated. These number are irrelevant to the new GNU malloc. I suspect it is less sensitive to the size of the request. */ int extra = ((((12 + DEFAULT_ROUNDING - 1) & ~(DEFAULT_ROUNDING - 1)) + 4 + DEFAULT_ROUNDING - 1) & ~(DEFAULT_ROUNDING - 1)); size = 4096 - extra; } #if defined(__STDC__) && __STDC__ h->chunkfun = (struct _obstack_chunk * (*)(void *,long)) chunkfun; h->freefun = (void (*) (void *, struct _obstack_chunk *)) freefun; #else h->chunkfun = (struct _obstack_chunk * (*)()) chunkfun; h->freefun = freefun; #endif h->chunk_size = size; h->alignment_mask = alignment - 1; h->extra_arg = arg; h->use_extra_arg = 1; chunk = h->chunk = CALL_CHUNKFUN (h, h -> chunk_size); if (!chunk) (*obstack_alloc_failed_handler) (); h->next_free = h->object_base = chunk->contents; h->chunk_limit = chunk->limit = (char *) chunk + h->chunk_size; chunk->prev = 0; /* The initial chunk now contains no empty object. */ h->maybe_empty_object = 0; h->alloc_failed = 0; return 1; } /* Allocate a new current chunk for the obstack *H on the assumption that LENGTH bytes need to be added to the current object, or a new object of length LENGTH allocated. Copies any partial object from the end of the old chunk to the beginning of the new one. */ void _obstack_newchunk (h, length) struct obstack *h; int length; { register struct _obstack_chunk *old_chunk = h->chunk; register struct _obstack_chunk *new_chunk; register long new_size; register long obj_size = h->next_free - h->object_base; register long i; long already; /* Compute size for new chunk. */ new_size = (obj_size + length) + (obj_size >> 3) + 100; if (new_size < h->chunk_size) new_size = h->chunk_size; /* Allocate and initialize the new chunk. */ new_chunk = CALL_CHUNKFUN (h, new_size); if (!new_chunk) (*obstack_alloc_failed_handler) (); h->chunk = new_chunk; new_chunk->prev = old_chunk; new_chunk->limit = h->chunk_limit = (char *) new_chunk + new_size; /* Move the existing object to the new chunk. Word at a time is fast and is safe if the object is sufficiently aligned. */ if (h->alignment_mask + 1 >= DEFAULT_ALIGNMENT) { for (i = obj_size / sizeof (COPYING_UNIT) - 1; i >= 0; i--) ((COPYING_UNIT *)new_chunk->contents)[i] = ((COPYING_UNIT *)h->object_base)[i]; /* We used to copy the odd few remaining bytes as one extra COPYING_UNIT, but that can cross a page boundary on a machine which does not do strict alignment for COPYING_UNITS. */ already = obj_size / sizeof (COPYING_UNIT) * sizeof (COPYING_UNIT); } else already = 0; /* Copy remaining bytes one by one. */ for (i = already; i < obj_size; i++) new_chunk->contents[i] = h->object_base[i]; /* If the object just copied was the only data in OLD_CHUNK, free that chunk and remove it from the chain. But not if that chunk might contain an empty object. */ if (h->object_base == old_chunk->contents && ! h->maybe_empty_object) { new_chunk->prev = old_chunk->prev; CALL_FREEFUN (h, old_chunk); } h->object_base = new_chunk->contents; h->next_free = h->object_base + obj_size; /* The new chunk certainly contains no empty object yet. */ h->maybe_empty_object = 0; } /* Return nonzero if object OBJ has been allocated from obstack H. This is here for debugging. If you use it in a program, you are probably losing. */ #if defined (__STDC__) && __STDC__ /* Suppress -Wmissing-prototypes warning. We don't want to declare this in obstack.h because it is just for debugging. */ int _obstack_allocated_p (struct obstack *h, POINTER obj); #endif int _obstack_allocated_p (h, obj) struct obstack *h; POINTER obj; { register struct _obstack_chunk *lp; /* below addr of any objects in this chunk */ register struct _obstack_chunk *plp; /* point to previous chunk if any */ lp = (h)->chunk; /* We use >= rather than > since the object cannot be exactly at the beginning of the chunk but might be an empty object exactly at the end of an adjacent chunk. */ while (lp != 0 && ((POINTER) lp >= obj || (POINTER) (lp)->limit < obj)) { plp = lp->prev; lp = plp; } return lp != 0; } /* Free objects in obstack H, including OBJ and everything allocate more recently than OBJ. If OBJ is zero, free everything in H. */ #undef obstack_free /* This function has two names with identical definitions. This is the first one, called from non-ANSI code. */ void _obstack_free (h, obj) struct obstack *h; POINTER obj; { register struct _obstack_chunk *lp; /* below addr of any objects in this chunk */ register struct _obstack_chunk *plp; /* point to previous chunk if any */ lp = h->chunk; /* We use >= because there cannot be an object at the beginning of a chunk. But there can be an empty object at that address at the end of another chunk. */ while (lp != 0 && ((POINTER) lp >= obj || (POINTER) (lp)->limit < obj)) { plp = lp->prev; CALL_FREEFUN (h, lp); lp = plp; /* If we switch chunks, we can't tell whether the new current chunk contains an empty object, so assume that it may. */ h->maybe_empty_object = 1; } if (lp) { h->object_base = h->next_free = (char *) (obj); h->chunk_limit = lp->limit; h->chunk = lp; } else if (obj != 0) /* obj is not in any of the chunks! */ abort (); } /* This function is used from ANSI code. */ void obstack_free (h, obj) struct obstack *h; POINTER obj; { register struct _obstack_chunk *lp; /* below addr of any objects in this chunk */ register struct _obstack_chunk *plp; /* point to previous chunk if any */ lp = h->chunk; /* We use >= because there cannot be an object at the beginning of a chunk. But there can be an empty object at that address at the end of another chunk. */ while (lp != 0 && ((POINTER) lp >= obj || (POINTER) (lp)->limit < obj)) { plp = lp->prev; CALL_FREEFUN (h, lp); lp = plp; /* If we switch chunks, we can't tell whether the new current chunk contains an empty object, so assume that it may. */ h->maybe_empty_object = 1; } if (lp) { h->object_base = h->next_free = (char *) (obj); h->chunk_limit = lp->limit; h->chunk = lp; } else if (obj != 0) /* obj is not in any of the chunks! */ abort (); } int _obstack_memory_used (h) struct obstack *h; { register struct _obstack_chunk* lp; register int nbytes = 0; for (lp = h->chunk; lp != 0; lp = lp->prev) { nbytes += lp->limit - (char *) lp; } return nbytes; } /* Define the error handler. */ #ifndef _ # if (HAVE_LIBINTL_H && ENABLE_NLS) || defined _LIBC # include # ifndef _ # define _(Str) gettext (Str) # endif # else # define _(Str) (Str) # endif #endif static void print_and_abort () { fputs (_("memory exhausted\n"), stderr); exit (obstack_exit_failure); } #if 0 /* These are now turned off because the applications do not use it and it uses bcopy via obstack_grow, which causes trouble on sysV. */ /* Now define the functional versions of the obstack macros. Define them to simply use the corresponding macros to do the job. */ #if defined (__STDC__) && __STDC__ /* These function definitions do not work with non-ANSI preprocessors; they won't pass through the macro names in parentheses. */ /* The function names appear in parentheses in order to prevent the macro-definitions of the names from being expanded there. */ POINTER (obstack_base) (obstack) struct obstack *obstack; { return obstack_base (obstack); } POINTER (obstack_next_free) (obstack) struct obstack *obstack; { return obstack_next_free (obstack); } int (obstack_object_size) (obstack) struct obstack *obstack; { return obstack_object_size (obstack); } int (obstack_room) (obstack) struct obstack *obstack; { return obstack_room (obstack); } int (obstack_make_room) (obstack, length) struct obstack *obstack; int length; { return obstack_make_room (obstack, length); } void (obstack_grow) (obstack, pointer, length) struct obstack *obstack; POINTER pointer; int length; { obstack_grow (obstack, pointer, length); } void (obstack_grow0) (obstack, pointer, length) struct obstack *obstack; POINTER pointer; int length; { obstack_grow0 (obstack, pointer, length); } void (obstack_1grow) (obstack, character) struct obstack *obstack; int character; { obstack_1grow (obstack, character); } void (obstack_blank) (obstack, length) struct obstack *obstack; int length; { obstack_blank (obstack, length); } void (obstack_1grow_fast) (obstack, character) struct obstack *obstack; int character; { obstack_1grow_fast (obstack, character); } void (obstack_blank_fast) (obstack, length) struct obstack *obstack; int length; { obstack_blank_fast (obstack, length); } POINTER (obstack_finish) (obstack) struct obstack *obstack; { return obstack_finish (obstack); } POINTER (obstack_alloc) (obstack, length) struct obstack *obstack; int length; { return obstack_alloc (obstack, length); } POINTER (obstack_copy) (obstack, pointer, length) struct obstack *obstack; POINTER pointer; int length; { return obstack_copy (obstack, pointer, length); } POINTER (obstack_copy0) (obstack, pointer, length) struct obstack *obstack; POINTER pointer; int length; { return obstack_copy0 (obstack, pointer, length); } #endif /* __STDC__ */ #endif /* 0 */ #endif /* !ELIDE_CODE */ /* List implementation of a partition of consecutive integers. Copyright (C) 2000, 2001 Free Software Foundation, Inc. Contributed by CodeSourcery, LLC. This file is part of GNU CC. GNU CC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GNU CC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GNU CC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifdef HAVE_CONFIG_H #endif #ifdef HAVE_STDLIB_H #include #endif #ifdef HAVE_STRING_H #include #endif /* List implementation of a partition of consecutive integers. Copyright (C) 2000, 2001, 2002 Free Software Foundation, Inc. Contributed by CodeSourcery, LLC. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This package implements a partition of consecutive integers. The elements are partitioned into classes. Each class is represented by one of its elements, the canonical element, which is chosen arbitrarily from elements in the class. The principal operations on a partition are FIND, which takes an element, determines its class, and returns the canonical element for that class, and UNION, which unites the two classes that contain two given elements into a single class. The list implementation used here provides constant-time finds. By storing the size of each class with the class's canonical element, it is able to perform unions over all the classes in the partition in O (N log N) time. */ #ifndef _PARTITION_H #define _PARTITION_H #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ #include struct partition_elem { /* The canonical element that represents the class containing this element. */ int class_element; /* The next element in this class. Elements in each class form a circular list. */ struct partition_elem* next; /* The number of elements in this class. Valid only if this is the canonical element for its class. */ unsigned class_count; }; typedef struct partition_def { /* The number of elements in this partition. */ int num_elements; /* The elements in the partition. */ struct partition_elem elements[1]; } *partition; extern partition partition_new PARAMS((int)); extern void partition_delete PARAMS((partition)); extern int partition_union PARAMS((partition, int, int)); extern void partition_print PARAMS((partition, FILE*)); /* Returns the canonical element corresponding to the class containing ELEMENT__ in PARTITION__. */ #define partition_find(partition__, element__) \ ((partition__)->elements[(element__)].class_element) #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* _PARTITION_H */ static int elem_compare PARAMS ((const void *, const void *)); /* Creates a partition of NUM_ELEMENTS elements. Initially each element is in a class by itself. */ partition partition_new (num_elements) int num_elements; { int e; partition part = (partition) xmalloc (sizeof (struct partition_def) + (num_elements - 1) * sizeof (struct partition_elem)); part->num_elements = num_elements; for (e = 0; e < num_elements; ++e) { part->elements[e].class_element = e; part->elements[e].next = &(part->elements[e]); part->elements[e].class_count = 1; } return part; } /* Freeds a partition. */ void partition_delete (part) partition part; { free (part); } /* Unites the classes containing ELEM1 and ELEM2 into a single class of partition PART. If ELEM1 and ELEM2 are already in the same class, does nothing. Returns the canonical element of the resulting union class. */ int partition_union (part, elem1, elem2) partition part; int elem1; int elem2; { struct partition_elem *elements = part->elements; struct partition_elem *e1; struct partition_elem *e2; struct partition_elem *p; struct partition_elem *old_next; /* The canonical element of the resulting union class. */ int class_element = elements[elem1].class_element; /* If they're already in the same class, do nothing. */ if (class_element == elements[elem2].class_element) return class_element; /* Make sure ELEM1 is in the larger class of the two. If not, swap them. This way we always scan the shorter list. */ if (elements[elem1].class_count < elements[elem2].class_count) { int temp = elem1; elem1 = elem2; elem2 = temp; class_element = elements[elem1].class_element; } e1 = &(elements[elem1]); e2 = &(elements[elem2]); /* Keep a count of the number of elements in the list. */ elements[class_element].class_count += elements[e2->class_element].class_count; /* Update the class fields in elem2's class list. */ e2->class_element = class_element; for (p = e2->next; p != e2; p = p->next) p->class_element = class_element; /* Splice ELEM2's class list into ELEM1's. These are circular lists. */ old_next = e1->next; e1->next = e2->next; e2->next = old_next; return class_element; } /* Compare elements ELEM1 and ELEM2 from array of integers, given a pointer to each. Used to qsort such an array. */ static int elem_compare (elem1, elem2) const void *elem1; const void *elem2; { int e1 = * (const int *) elem1; int e2 = * (const int *) elem2; if (e1 < e2) return -1; else if (e1 > e2) return 1; else return 0; } /* Prints PART to the file pointer FP. The elements of each class are sorted. */ void partition_print (part, fp) partition part; FILE *fp; { char *done; int num_elements = part->num_elements; struct partition_elem *elements = part->elements; int *class_elements; int e; /* Flag the elements we've already printed. */ done = (char *) xmalloc (num_elements); memset (done, 0, num_elements); /* A buffer used to sort elements in a class. */ class_elements = (int *) xmalloc (num_elements * sizeof (int)); fputc ('[', fp); for (e = 0; e < num_elements; ++e) /* If we haven't printed this element, print its entire class. */ if (! done[e]) { int c = e; int count = elements[elements[e].class_element].class_count; int i; /* Collect the elements in this class. */ for (i = 0; i < count; ++i) { class_elements[i] = c; done[c] = 1; c = elements[c].next - elements; } /* Sort them. */ qsort ((void *) class_elements, count, sizeof (int), elem_compare); /* Print them. */ fputc ('(', fp); for (i = 0; i < count; ++i) fprintf (fp, i == 0 ? "%d" : " %d", class_elements[i]); fputc (')', fp); } fputc (']', fp); free (done); } /* Calculate the size of physical memory. Copyright 2000, 2001, 2003 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Written by Paul Eggert. */ #if HAVE_CONFIG_H #endif #if HAVE_UNISTD_H # include #endif #if HAVE_SYS_PSTAT_H # include #endif #if HAVE_SYS_SYSMP_H # include #endif #if HAVE_SYS_TABLE_H # include #endif #include #if HAVE_SYS_PARAM_H # include #endif #if HAVE_SYS_SYSCTL_H # include #endif #if HAVE_SYS_SYSTEMCFG_H # include #endif /* Return the total amount of physical memory. */ double physmem_total () { #if defined _SC_PHYS_PAGES && defined _SC_PAGESIZE { /* This works on linux-gnu, solaris2 and cygwin. */ double pages = sysconf (_SC_PHYS_PAGES); double pagesize = sysconf (_SC_PAGESIZE); if (0 <= pages && 0 <= pagesize) return pages * pagesize; } #endif #if HAVE_PSTAT_GETSTATIC { /* This works on hpux11. */ struct pst_static pss; if (0 <= pstat_getstatic (&pss, sizeof pss, 1, 0)) { double pages = pss.physical_memory; double pagesize = pss.page_size; if (0 <= pages && 0 <= pagesize) return pages * pagesize; } } #endif #if HAVE_SYSMP && defined MP_SAGET && defined MPSA_RMINFO && defined _SC_PAGESIZE { /* This works on irix6. */ struct rminfo realmem; if (sysmp (MP_SAGET, MPSA_RMINFO, &realmem, sizeof realmem) == 0) { double pagesize = sysconf (_SC_PAGESIZE); double pages = realmem.physmem; if (0 <= pages && 0 <= pagesize) return pages * pagesize; } } #endif #if HAVE_GETSYSINFO && defined GSI_PHYSMEM { /* This works on Tru64 UNIX V4/5. */ int physmem; if (getsysinfo (GSI_PHYSMEM, (caddr_t) &physmem, sizeof (physmem), NULL, NULL, NULL) == 1) { double kbytes = physmem; if (0 <= kbytes) return kbytes * 1024.0; } } #endif #if HAVE_SYSCTL && defined HW_PHYSMEM { /* This works on *bsd and darwin. */ unsigned int physmem; size_t len = sizeof physmem; static int mib[2] = { CTL_HW, HW_PHYSMEM }; if (sysctl (mib, ARRAY_SIZE (mib), &physmem, &len, NULL, 0) == 0 && len == sizeof (physmem)) return (double) physmem; } #endif #if HAVE__SYSTEM_CONFIGURATION /* This works on AIX 4.3.3+. */ return _system_configuration.physmem; #endif #if defined _WIN32 { /* this works on windows */ PFN_MS_EX pfnex; HMODULE h = GetModuleHandle ("kernel32.dll"); if (!h) return 0.0; /* Use GlobalMemoryStatusEx if available. */ if ((pfnex = (PFN_MS_EX) GetProcAddress (h, "GlobalMemoryStatusEx"))) { lMEMORYSTATUSEX lms_ex; lms_ex.dwLength = sizeof lms_ex; if (!pfnex (&lms_ex)) return 0.0; return (double) lms_ex.ullTotalPhys; } /* Fall back to GlobalMemoryStatus which is always available. but returns wrong results for physical memory > 4GB. */ else { MEMORYSTATUS ms; GlobalMemoryStatus (&ms); return (double) ms.dwTotalPhys; } } #endif /* Return 0 if we can't determine the value. */ return 0; } /* Return the amount of physical memory available. */ double physmem_available () { #if defined _SC_AVPHYS_PAGES && defined _SC_PAGESIZE { /* This works on linux-gnu, solaris2 and cygwin. */ double pages = sysconf (_SC_AVPHYS_PAGES); double pagesize = sysconf (_SC_PAGESIZE); if (0 <= pages && 0 <= pagesize) return pages * pagesize; } #endif #if HAVE_PSTAT_GETSTATIC && HAVE_PSTAT_GETDYNAMIC { /* This works on hpux11. */ struct pst_static pss; struct pst_dynamic psd; if (0 <= pstat_getstatic (&pss, sizeof pss, 1, 0) && 0 <= pstat_getdynamic (&psd, sizeof psd, 1, 0)) { double pages = psd.psd_free; double pagesize = pss.page_size; if (0 <= pages && 0 <= pagesize) return pages * pagesize; } } #endif #if HAVE_SYSMP && defined MP_SAGET && defined MPSA_RMINFO && defined _SC_PAGESIZE { /* This works on irix6. */ struct rminfo realmem; if (sysmp (MP_SAGET, MPSA_RMINFO, &realmem, sizeof realmem) == 0) { double pagesize = sysconf (_SC_PAGESIZE); double pages = realmem.availrmem; if (0 <= pages && 0 <= pagesize) return pages * pagesize; } } #endif #if HAVE_TABLE && defined TBL_VMSTATS { /* This works on Tru64 UNIX V4/5. */ struct tbl_vmstats vmstats; if (table (TBL_VMSTATS, 0, &vmstats, 1, sizeof (vmstats)) == 1) { double pages = vmstats.free_count; double pagesize = vmstats.pagesize; if (0 <= pages && 0 <= pagesize) return pages * pagesize; } } #endif #if HAVE_SYSCTL && defined HW_USERMEM { /* This works on *bsd and darwin. */ unsigned int usermem; size_t len = sizeof usermem; static int mib[2] = { CTL_HW, HW_USERMEM }; if (sysctl (mib, ARRAY_SIZE (mib), &usermem, &len, NULL, 0) == 0 && len == sizeof (usermem)) return (double) usermem; } #endif #if defined _WIN32 { /* this works on windows */ PFN_MS_EX pfnex; HMODULE h = GetModuleHandle ("kernel32.dll"); if (!h) return 0.0; /* Use GlobalMemoryStatusEx if available. */ if ((pfnex = (PFN_MS_EX) GetProcAddress (h, "GlobalMemoryStatusEx"))) { lMEMORYSTATUSEX lms_ex; lms_ex.dwLength = sizeof lms_ex; if (!pfnex (&lms_ex)) return 0.0; return (double) lms_ex.ullAvailPhys; } /* Fall back to GlobalMemoryStatus which is always available. but returns wrong results for physical memory > 4GB */ else { MEMORYSTATUS ms; GlobalMemoryStatus (&ms); return (double) ms.dwAvailPhys; } } #endif /* Guess 25% of physical memory. */ return physmem_total () / 4; } #if DEBUG # include # include int main () { printf ("%12.f %12.f\n", physmem_total (), physmem_available ()); exit (0); } #endif /* DEBUG */ /* Local Variables: compile-command: "gcc -DDEBUG -DHAVE_CONFIG_H -I.. -g -O -Wall -W physmem.c" End: */ /* Utilities to execute a program in a subprocess (possibly linked by pipes with other subprocesses), and wait for it. Generic Unix version (also used for UWIN and VMS). Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004 Free Software Foundation, Inc. This file is part of the libiberty library. Libiberty is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Libiberty is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with libiberty; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Utilities to execute a program in a subprocess (possibly linked by pipes with other subprocesses), and wait for it. Shared logic. Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004 Free Software Foundation, Inc. This file is part of the libiberty library. Libiberty is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Libiberty is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with libiberty; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef PEX_COMMON_H #define PEX_COMMON_H #define install_error_msg "installation problem, cannot exec `%s'" /* stdin file number. */ #define STDIN_FILE_NO 0 /* stdout file number. */ #define STDOUT_FILE_NO 1 /* stderr file number. */ #define STDERR_FILE_NO 2 /* value of `pipe': port index for reading. */ #define READ_PORT 0 /* value of `pipe': port index for writing. */ #define WRITE_PORT 1 #endif #include #include #ifdef NEED_DECLARATION_ERRNO extern int errno; #endif #ifdef HAVE_STRING_H #include #endif #ifdef HAVE_UNISTD_H #include #endif #ifdef HAVE_STDLIB_H #include #endif #ifdef HAVE_SYS_WAIT_H #define __thread __something_other_than__thread #include #undef __thread #endif #ifndef HAVE_WAITPID #define waitpid(pid, status, flags) wait(status) #endif #ifdef vfork /* Autoconf may define this to fork for us. */ # define VFORK_STRING "fork" #else # define VFORK_STRING "vfork" #endif #ifdef HAVE_VFORK_H #include #endif #ifdef VMS #define vfork() (decc$$alloc_vfork_blocks() >= 0 ? \ lib$get_current_invo_context(decc$$get_vfork_jmpbuf()) : -1) #endif /* VMS */ /* Execute a program, possibly setting up pipes to programs executed via other calls to this function. This version of the function uses vfork. In general vfork is similar to setjmp/longmp, in that any variable which is modified by the child process has an indeterminate value in the parent process. We follow a safe approach here by not modifying any variables at all in the child process (with the possible exception of variables modified by xstrerror if exec fails, but this is unlikely to be detectable). We work a little bit harder to avoid gcc warnings. gcc will warn about any automatic variable which is live at the time of the vfork, which is non-volatile, and which is either set more than once or is an argument to the function. This warning isn't quite right, since what we really care about is whether the variable is live at the time of the vfork and set afterward by the child process, but gcc only checks whether the variable is set more than once. To avoid this warning, we ensure that any variable which is live at the time of the vfork (i.e., used after the vfork) is set exactly once and is not an argument, or is marked volatile. */ int pexecute (program, argv, this_pname, temp_base, errmsg_fmt, errmsg_arg, flagsarg) const char *program; char * const *argv; const char *this_pname; const char *temp_base ATTRIBUTE_UNUSED; char **errmsg_fmt, **errmsg_arg; int flagsarg; { int pid; int pdes[2]; int out; int input_desc, output_desc; int flags; /* We declare these to be volatile to avoid warnings from gcc about them being clobbered by vfork. */ volatile int retries, sleep_interval; /* Pipe waiting from last process, to be used as input for the next one. Value is STDIN_FILE_NO if no pipe is waiting (i.e. the next command is the first of a group). */ static int last_pipe_input; flags = flagsarg; /* If this is the first process, initialize. */ if (flags & PEXECUTE_FIRST) last_pipe_input = STDIN_FILE_NO; input_desc = last_pipe_input; /* If this isn't the last process, make a pipe for its output, and record it as waiting to be the input to the next process. */ if (! (flags & PEXECUTE_LAST)) { if (pipe (pdes) < 0) { *errmsg_fmt = "pipe"; *errmsg_arg = NULL; return -1; } out = pdes[WRITE_PORT]; last_pipe_input = pdes[READ_PORT]; } else { /* Last process. */ out = STDOUT_FILE_NO; last_pipe_input = STDIN_FILE_NO; } output_desc = out; /* Fork a subprocess; wait and retry if it fails. */ sleep_interval = 1; pid = -1; for (retries = 0; retries < 4; retries++) { pid = vfork (); if (pid >= 0) break; sleep (sleep_interval); sleep_interval *= 2; } switch (pid) { case -1: *errmsg_fmt = "fork"; *errmsg_arg = NULL; return -1; case 0: /* child */ /* Move the input and output pipes into place, if necessary. */ if (input_desc != STDIN_FILE_NO) { close (STDIN_FILE_NO); dup (input_desc); close (input_desc); } if (output_desc != STDOUT_FILE_NO) { close (STDOUT_FILE_NO); dup (output_desc); close (output_desc); } /* Close the parent's descs that aren't wanted here. */ if (last_pipe_input != STDIN_FILE_NO) close (last_pipe_input); /* Exec the program. */ if (flags & PEXECUTE_SEARCH) execvp (program, argv); else execv (program, argv); /* We don't want to call fprintf after vfork. */ #define writeerr(s) write (STDERR_FILE_NO, s, strlen (s)) writeerr (this_pname); writeerr (": "); writeerr ("installation problem, cannot exec '"); writeerr (program); writeerr ("': "); writeerr (xstrerror (errno)); writeerr ("\n"); _exit (-1); /* NOTREACHED */ return 0; default: /* In the parent, after forking. Close the descriptors that we made for this child. */ if (input_desc != STDIN_FILE_NO) close (input_desc); if (output_desc != STDOUT_FILE_NO) close (output_desc); /* Return child's process number. */ return pid; } } int pwait (pid, status, flags) int pid; int *status; int flags ATTRIBUTE_UNUSED; { /* ??? Here's an opportunity to canonicalize the values in STATUS. Needed? */ pid = waitpid (pid, status, 0); return pid; } /* replacement macros. Copyright (C) 2000 Free Software Foundation, Inc. Contributed by Zack Weinberg . This file is part of the libiberty library. Libiberty is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Libiberty is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with libiberty; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* @defvr Extension HOST_CHARSET This macro indicates the basic character set and encoding used by the host: more precisely, the encoding used for character constants in preprocessor @samp{#if} statements (the C "execution character set"). It is defined by @file{safe-ctype.h}, and will be an integer constant with one of the following values: @ftable @code @item HOST_CHARSET_UNKNOWN The host character set is unknown - that is, not one of the next two possibilities. @item HOST_CHARSET_ASCII The host character set is ASCII. @item HOST_CHARSET_EBCDIC The host character set is some variant of EBCDIC. (Only one of the nineteen EBCDIC varying characters is tested; exercise caution.) @end ftable @end defvr @deffn Extension ISALPHA (@var{c}) @deffnx Extension ISALNUM (@var{c}) @deffnx Extension ISBLANK (@var{c}) @deffnx Extension ISCNTRL (@var{c}) @deffnx Extension ISDIGIT (@var{c}) @deffnx Extension ISGRAPH (@var{c}) @deffnx Extension ISLOWER (@var{c}) @deffnx Extension ISPRINT (@var{c}) @deffnx Extension ISPUNCT (@var{c}) @deffnx Extension ISSPACE (@var{c}) @deffnx Extension ISUPPER (@var{c}) @deffnx Extension ISXDIGIT (@var{c}) These twelve macros are defined by @file{safe-ctype.h}. Each has the same meaning as the corresponding macro (with name in lowercase) defined by the standard header @file{ctype.h}. For example, @code{ISALPHA} returns true for alphabetic characters and false for others. However, there are two differences between these macros and those provided by @file{ctype.h}: @itemize @bullet @item These macros are guaranteed to have well-defined behavior for all values representable by @code{signed char} and @code{unsigned char}, and for @code{EOF}. @item These macros ignore the current locale; they are true for these fixed sets of characters: @multitable {@code{XDIGIT}} {yada yada yada yada yada yada yada yada} @item @code{ALPHA} @tab @kbd{A-Za-z} @item @code{ALNUM} @tab @kbd{A-Za-z0-9} @item @code{BLANK} @tab @kbd{space tab} @item @code{CNTRL} @tab @code{!PRINT} @item @code{DIGIT} @tab @kbd{0-9} @item @code{GRAPH} @tab @code{ALNUM || PUNCT} @item @code{LOWER} @tab @kbd{a-z} @item @code{PRINT} @tab @code{GRAPH ||} @kbd{space} @item @code{PUNCT} @tab @kbd{`~!@@#$%^&*()_-=+[@{]@}\|;:'",<.>/?} @item @code{SPACE} @tab @kbd{space tab \n \r \f \v} @item @code{UPPER} @tab @kbd{A-Z} @item @code{XDIGIT} @tab @kbd{0-9A-Fa-f} @end multitable Note that, if the host character set is ASCII or a superset thereof, all these macros will return false for all values of @code{char} outside the range of 7-bit ASCII. In particular, both ISPRINT and ISCNTRL return false for characters with numeric values from 128 to 255. @end itemize @end deffn @deffn Extension ISIDNUM (@var{c}) @deffnx Extension ISIDST (@var{c}) @deffnx Extension IS_VSPACE (@var{c}) @deffnx Extension IS_NVSPACE (@var{c}) @deffnx Extension IS_SPACE_OR_NUL (@var{c}) @deffnx Extension IS_ISOBASIC (@var{c}) These six macros are defined by @file{safe-ctype.h} and provide additional character classes which are useful when doing lexical analysis of C or similar languages. They are true for the following sets of characters: @multitable {@code{SPACE_OR_NUL}} {yada yada yada yada yada yada yada yada} @item @code{IDNUM} @tab @kbd{A-Za-z0-9_} @item @code{IDST} @tab @kbd{A-Za-z_} @item @code{VSPACE} @tab @kbd{\r \n} @item @code{NVSPACE} @tab @kbd{space tab \f \v \0} @item @code{SPACE_OR_NUL} @tab @code{VSPACE || NVSPACE} @item @code{ISOBASIC} @tab @code{VSPACE || NVSPACE || PRINT} @end multitable @end deffn */ #include /* for EOF */ #if EOF != -1 #error " requires EOF == -1" #endif /* Shorthand */ #define bl _sch_isblank #define cn _sch_iscntrl #define di _sch_isdigit #define is _sch_isidst #define lo _sch_islower #define nv _sch_isnvsp #define pn _sch_ispunct #define pr _sch_isprint #define sp _sch_isspace #define up _sch_isupper #define vs _sch_isvsp #define xd _sch_isxdigit /* Masks. */ #define L (const unsigned short) (lo|is |pr) /* lower case letter */ #define XL (const unsigned short) (lo|is|xd|pr) /* lowercase hex digit */ #define U (const unsigned short) (up|is |pr) /* upper case letter */ #define XU (const unsigned short) (up|is|xd|pr) /* uppercase hex digit */ #define D (const unsigned short) (di |xd|pr) /* decimal digit */ #define P (const unsigned short) (pn |pr) /* punctuation */ #define _ (const unsigned short) (pn|is |pr) /* underscore */ #define C (const unsigned short) ( cn) /* control character */ #define Z (const unsigned short) (nv |cn) /* NUL */ #define M (const unsigned short) (nv|sp |cn) /* cursor movement: \f \v */ #define V (const unsigned short) (vs|sp |cn) /* vertical space: \r \n */ #define T (const unsigned short) (nv|sp|bl|cn) /* tab */ #define S (const unsigned short) (nv|sp|bl|pr) /* space */ /* Are we ASCII? */ #if HOST_CHARSET == HOST_CHARSET_ASCII const unsigned short _sch_istable[256] = { Z, C, C, C, C, C, C, C, /* NUL SOH STX ETX EOT ENQ ACK BEL */ C, T, V, M, M, V, C, C, /* BS HT LF VT FF CR SO SI */ C, C, C, C, C, C, C, C, /* DLE DC1 DC2 DC3 DC4 NAK SYN ETB */ C, C, C, C, C, C, C, C, /* CAN EM SUB ESC FS GS RS US */ S, P, P, P, P, P, P, P, /* SP ! " # $ % & ' */ P, P, P, P, P, P, P, P, /* ( ) * + , - . / */ D, D, D, D, D, D, D, D, /* 0 1 2 3 4 5 6 7 */ D, D, P, P, P, P, P, P, /* 8 9 : ; < = > ? */ P, XU, XU, XU, XU, XU, XU, U, /* @ A B C D E F G */ U, U, U, U, U, U, U, U, /* H I J K L M N O */ U, U, U, U, U, U, U, U, /* P Q R S T U V W */ U, U, U, P, P, P, P, _, /* X Y Z [ \ ] ^ _ */ P, XL, XL, XL, XL, XL, XL, L, /* ` a b c d e f g */ L, L, L, L, L, L, L, L, /* h i j k l m n o */ L, L, L, L, L, L, L, L, /* p q r s t u v w */ L, L, L, P, P, P, P, C, /* x y z { | } ~ DEL */ /* high half of unsigned char is locale-specific, so all tests are false in "C" locale */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; const unsigned char _sch_tolower[256] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 91, 92, 93, 94, 95, 96, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 123,124,125,126,127, 128,129,130,131, 132,133,134,135, 136,137,138,139, 140,141,142,143, 144,145,146,147, 148,149,150,151, 152,153,154,155, 156,157,158,159, 160,161,162,163, 164,165,166,167, 168,169,170,171, 172,173,174,175, 176,177,178,179, 180,181,182,183, 184,185,186,187, 188,189,190,191, 192,193,194,195, 196,197,198,199, 200,201,202,203, 204,205,206,207, 208,209,210,211, 212,213,214,215, 216,217,218,219, 220,221,222,223, 224,225,226,227, 228,229,230,231, 232,233,234,235, 236,237,238,239, 240,241,242,243, 244,245,246,247, 248,249,250,251, 252,253,254,255, }; const unsigned char _sch_toupper[256] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 91, 92, 93, 94, 95, 96, 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 123,124,125,126,127, 128,129,130,131, 132,133,134,135, 136,137,138,139, 140,141,142,143, 144,145,146,147, 148,149,150,151, 152,153,154,155, 156,157,158,159, 160,161,162,163, 164,165,166,167, 168,169,170,171, 172,173,174,175, 176,177,178,179, 180,181,182,183, 184,185,186,187, 188,189,190,191, 192,193,194,195, 196,197,198,199, 200,201,202,203, 204,205,206,207, 208,209,210,211, 212,213,214,215, 216,217,218,219, 220,221,222,223, 224,225,226,227, 228,229,230,231, 232,233,234,235, 236,237,238,239, 240,241,242,243, 244,245,246,247, 248,249,250,251, 252,253,254,255, }; /* Clean up after ourselves */ #undef bl #undef cn #undef di #undef is #undef lo #undef nv #undef pn #undef pr #undef sp #undef up #undef vs #undef xd #undef L #undef XL #undef U #undef XU #undef D #undef P #undef _ #undef C #undef Z #undef M #undef V #undef T #undef S #else # if HOST_CHARSET == HOST_CHARSET_EBCDIC #error "FIXME: write tables for EBCDIC" # else #error "Unrecognized host character set" # endif #endif /* Sorting algorithms. Copyright (C) 2000 Free Software Foundation, Inc. Contributed by Mark Mitchell . This file is part of GNU CC. GNU CC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GNU CC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GNU CC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifdef HAVE_CONFIG_H #endif /* Sorting algorithms. Copyright (C) 2000, 2002 Free Software Foundation, Inc. Contributed by Mark Mitchell . This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef SORT_H #define SORT_H #include /* For size_t */ #ifdef __STDC__ #include #endif /* __STDC__ */ #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ /* Sort an array of pointers. */ extern void sort_pointers PARAMS ((size_t, void **, void **)); #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* SORT_H */ #ifdef HAVE_LIMITS_H #include #endif #ifdef HAVE_SYS_PARAM_H #include #endif #ifdef HAVE_STDLIB_H #include #endif #ifdef HAVE_STRING_H #include #endif #ifndef UCHAR_MAX #define UCHAR_MAX ((unsigned char)(-1)) #endif /* POINTERS and WORK are both arrays of N pointers. When this function returns POINTERS will be sorted in ascending order. */ void sort_pointers (n, pointers, work) size_t n; void **pointers; void **work; { /* The type of a single digit. This can be any unsigned integral type. When changing this, DIGIT_MAX should be changed as well. */ typedef unsigned char digit_t; /* The maximum value a single digit can have. */ #define DIGIT_MAX (UCHAR_MAX + 1) /* The Ith entry is the number of elements in *POINTERSP that have I in the digit on which we are currently sorting. */ unsigned int count[DIGIT_MAX]; /* Nonzero if we are running on a big-endian machine. */ int big_endian_p; size_t i; size_t j; /* The algorithm used here is radix sort which takes time linear in the number of elements in the array. */ /* The algorithm here depends on being able to swap the two arrays an even number of times. */ if ((sizeof (void *) / sizeof (digit_t)) % 2 != 0) abort (); /* Figure out the endianness of the machine. */ for (i = 0, j = 0; i < sizeof (size_t); ++i) { j *= (UCHAR_MAX + 1); j += i; } big_endian_p = (((char *)&j)[0] == 0); /* Move through the pointer values from least significant to most significant digits. */ for (i = 0; i < sizeof (void *) / sizeof (digit_t); ++i) { digit_t *digit; digit_t *bias; digit_t *top; unsigned int *countp; void **pointerp; /* The offset from the start of the pointer will depend on the endianness of the machine. */ if (big_endian_p) j = sizeof (void *) / sizeof (digit_t) - i; else j = i; /* Now, perform a stable sort on this digit. We use counting sort. */ memset (count, 0, DIGIT_MAX * sizeof (unsigned int)); /* Compute the address of the appropriate digit in the first and one-past-the-end elements of the array. On a little-endian machine, the least-significant digit is closest to the front. */ bias = ((digit_t *) pointers) + j; top = ((digit_t *) (pointers + n)) + j; /* Count how many there are of each value. At the end of this loop, COUNT[K] will contain the number of pointers whose Ith digit is K. */ for (digit = bias; digit < top; digit += sizeof (void *) / sizeof (digit_t)) ++count[*digit]; /* Now, make COUNT[K] contain the number of pointers whose Ith digit is less than or equal to K. */ for (countp = count + 1; countp < count + DIGIT_MAX; ++countp) *countp += countp[-1]; /* Now, drop the pointers into their correct locations. */ for (pointerp = pointers + n - 1; pointerp >= pointers; --pointerp) work[--count[((digit_t *) pointerp)[j]]] = *pointerp; /* Swap WORK and POINTERS so that POINTERS contains the sorted array. */ pointerp = pointers; pointers = work; work = pointerp; } } /* Everything below here is a unit test for the routines in this file. */ #ifdef UNIT_TEST #include void *xmalloc (n) size_t n; { return malloc (n); } int main (int argc, char **argv) { int k; int result; size_t i; void **pointers; void **work; if (argc > 1) k = atoi (argv[1]); else k = 10; pointers = xmalloc (k * sizeof (void *)); work = xmalloc (k * sizeof (void *)); for (i = 0; i < k; ++i) { pointers[i] = (void *) random (); printf ("%x\n", pointers[i]); } sort_pointers (k, pointers, work); printf ("\nSorted\n\n"); result = 0; for (i = 0; i < k; ++i) { printf ("%x\n", pointers[i]); if (i > 0 && (char*) pointers[i] < (char*) pointers[i - 1]) result = 1; } free (pointers); free (work); return result; } #endif /* Allocate memory region filled with spaces. Copyright (C) 1991 Free Software Foundation, Inc. This file is part of the libiberty library. Libiberty is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Libiberty is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with libiberty; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* @deftypefn Extension char* spaces (int @var{count}) Returns a pointer to a memory region filled with the specified number of spaces and null terminated. The returned pointer is valid until at least the next call. @end deftypefn */ #if VMS #include #include #else /* For systems with larger pointers than ints, these must be declared. */ extern PTR malloc PARAMS ((size_t)); extern void free PARAMS ((PTR)); #endif const char * spaces (count) int count; { register char *t; static char *buf; static int maxsize; if (count > maxsize) { if (buf) { free (buf); } buf = malloc (count + 1); if (buf == (char *) 0) return 0; for (t = buf + count ; t != buf ; ) { *--t = ' '; } maxsize = count; buf[count] = '\0'; } return (const char *) (buf + maxsize - count); } /* A splay-tree datatype. Copyright (C) 1998, 1999, 2000, 2001 Free Software Foundation, Inc. Contributed by Mark Mitchell (mark@markmitchell.com). This file is part of GNU CC. GNU CC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GNU CC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GNU CC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* For an easily readable description of splay-trees, see: Lewis, Harry R. and Denenberg, Larry. Data Structures and Their Algorithms. Harper-Collins, Inc. 1991. */ #ifdef HAVE_CONFIG_H #endif #ifdef HAVE_STDLIB_H #include #endif #include /* A splay-tree datatype. Copyright 1998, 1999, 2000, 2002 Free Software Foundation, Inc. Contributed by Mark Mitchell (mark@markmitchell.com). This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* For an easily readable description of splay-trees, see: Lewis, Harry R. and Denenberg, Larry. Data Structures and Their Algorithms. Harper-Collins, Inc. 1991. The major feature of splay trees is that all basic tree operations are amortized O(log n) time for a tree with n nodes. */ #ifndef _SPLAY_TREE_H #define _SPLAY_TREE_H #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ #ifndef GTY #define GTY(X) #endif /* Use typedefs for the key and data types to facilitate changing these types, if necessary. These types should be sufficiently wide that any pointer or scalar can be cast to these types, and then cast back, without loss of precision. */ typedef unsigned long int splay_tree_key; typedef unsigned long int splay_tree_value; /* Forward declaration for a node in the tree. */ typedef struct splay_tree_node_s *splay_tree_node; /* The type of a function which compares two splay-tree keys. The function should return values as for qsort. */ typedef int (*splay_tree_compare_fn) PARAMS((splay_tree_key, splay_tree_key)); /* The type of a function used to deallocate any resources associated with the key. */ typedef void (*splay_tree_delete_key_fn) PARAMS((splay_tree_key)); /* The type of a function used to deallocate any resources associated with the value. */ typedef void (*splay_tree_delete_value_fn) PARAMS((splay_tree_value)); /* The type of a function used to iterate over the tree. */ typedef int (*splay_tree_foreach_fn) PARAMS((splay_tree_node, void*)); /* The type of a function used to allocate memory for tree root and node structures. The first argument is the number of bytes needed; the second is a data pointer the splay tree functions pass through to the allocator. This function must never return zero. */ typedef PTR (*splay_tree_allocate_fn) PARAMS((int, void *)); /* The type of a function used to free memory allocated using the corresponding splay_tree_allocate_fn. The first argument is the memory to be freed; the latter is a data pointer the splay tree functions pass through to the freer. */ typedef void (*splay_tree_deallocate_fn) PARAMS((void *, void *)); /* The nodes in the splay tree. */ struct splay_tree_node_s GTY(()) { /* The key. */ splay_tree_key GTY ((use_param1)) key; /* The value. */ splay_tree_value GTY ((use_param2)) value; /* The left and right children, respectively. */ splay_tree_node GTY ((use_params)) left; splay_tree_node GTY ((use_params)) right; }; /* The splay tree itself. */ struct splay_tree_s GTY(()) { /* The root of the tree. */ splay_tree_node GTY ((use_params)) root; /* The comparision function. */ splay_tree_compare_fn comp; /* The deallocate-key function. NULL if no cleanup is necessary. */ splay_tree_delete_key_fn delete_key; /* The deallocate-value function. NULL if no cleanup is necessary. */ splay_tree_delete_value_fn delete_value; /* Allocate/free functions, and a data pointer to pass to them. */ splay_tree_allocate_fn allocate; splay_tree_deallocate_fn deallocate; PTR GTY((skip)) allocate_data; }; typedef struct splay_tree_s *splay_tree; extern splay_tree splay_tree_new PARAMS((splay_tree_compare_fn, splay_tree_delete_key_fn, splay_tree_delete_value_fn)); extern splay_tree splay_tree_new_with_allocator PARAMS((splay_tree_compare_fn, splay_tree_delete_key_fn, splay_tree_delete_value_fn, splay_tree_allocate_fn, splay_tree_deallocate_fn, void *)); extern void splay_tree_delete PARAMS((splay_tree)); extern splay_tree_node splay_tree_insert PARAMS((splay_tree, splay_tree_key, splay_tree_value)); extern void splay_tree_remove PARAMS((splay_tree, splay_tree_key)); extern splay_tree_node splay_tree_lookup PARAMS((splay_tree, splay_tree_key)); extern splay_tree_node splay_tree_predecessor PARAMS((splay_tree, splay_tree_key)); extern splay_tree_node splay_tree_successor PARAMS((splay_tree, splay_tree_key)); extern splay_tree_node splay_tree_max PARAMS((splay_tree)); extern splay_tree_node splay_tree_min PARAMS((splay_tree)); extern int splay_tree_foreach PARAMS((splay_tree, splay_tree_foreach_fn, void*)); extern int splay_tree_compare_ints PARAMS((splay_tree_key, splay_tree_key)); extern int splay_tree_compare_pointers PARAMS((splay_tree_key, splay_tree_key)); #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* _SPLAY_TREE_H */ static void splay_tree_delete_helper PARAMS((splay_tree, splay_tree_node)); static void splay_tree_splay PARAMS((splay_tree, splay_tree_key)); static splay_tree_node splay_tree_splay_helper PARAMS((splay_tree, splay_tree_key, splay_tree_node*, splay_tree_node*, splay_tree_node*)); static int splay_tree_foreach_helper PARAMS((splay_tree, splay_tree_node, splay_tree_foreach_fn, void*)); /* Deallocate NODE (a member of SP), and all its sub-trees. */ static void splay_tree_delete_helper (sp, node) splay_tree sp; splay_tree_node node; { if (!node) return; splay_tree_delete_helper (sp, node->left); splay_tree_delete_helper (sp, node->right); if (sp->delete_key) (*sp->delete_key)(node->key); if (sp->delete_value) (*sp->delete_value)(node->value); (*sp->deallocate) ((char*) node, sp->allocate_data); } /* Help splay SP around KEY. PARENT and GRANDPARENT are the parent and grandparent, respectively, of NODE. */ static splay_tree_node splay_tree_splay_helper (sp, key, node, parent, grandparent) splay_tree sp; splay_tree_key key; splay_tree_node *node; splay_tree_node *parent; splay_tree_node *grandparent; { splay_tree_node *next; splay_tree_node n; int comparison; n = *node; if (!n) return *parent; comparison = (*sp->comp) (key, n->key); if (comparison == 0) /* We've found the target. */ next = 0; else if (comparison < 0) /* The target is to the left. */ next = &n->left; else /* The target is to the right. */ next = &n->right; if (next) { /* Continue down the tree. */ n = splay_tree_splay_helper (sp, key, next, node, parent); /* The recursive call will change the place to which NODE points. */ if (*node != n) return n; } if (!parent) /* NODE is the root. We are done. */ return n; /* First, handle the case where there is no grandparent (i.e., *PARENT is the root of the tree.) */ if (!grandparent) { if (n == (*parent)->left) { *node = n->right; n->right = *parent; } else { *node = n->left; n->left = *parent; } *parent = n; return n; } /* Next handle the cases where both N and *PARENT are left children, or where both are right children. */ if (n == (*parent)->left && *parent == (*grandparent)->left) { splay_tree_node p = *parent; (*grandparent)->left = p->right; p->right = *grandparent; p->left = n->right; n->right = p; *grandparent = n; return n; } else if (n == (*parent)->right && *parent == (*grandparent)->right) { splay_tree_node p = *parent; (*grandparent)->right = p->left; p->left = *grandparent; p->right = n->left; n->left = p; *grandparent = n; return n; } /* Finally, deal with the case where N is a left child, but *PARENT is a right child, or vice versa. */ if (n == (*parent)->left) { (*parent)->left = n->right; n->right = *parent; (*grandparent)->right = n->left; n->left = *grandparent; *grandparent = n; return n; } else { (*parent)->right = n->left; n->left = *parent; (*grandparent)->left = n->right; n->right = *grandparent; *grandparent = n; return n; } } /* Splay SP around KEY. */ static void splay_tree_splay (sp, key) splay_tree sp; splay_tree_key key; { if (sp->root == 0) return; splay_tree_splay_helper (sp, key, &sp->root, /*grandparent=*/0, /*parent=*/0); } /* Call FN, passing it the DATA, for every node below NODE, all of which are from SP, following an in-order traversal. If FN every returns a non-zero value, the iteration ceases immediately, and the value is returned. Otherwise, this function returns 0. */ static int splay_tree_foreach_helper (sp, node, fn, data) splay_tree sp; splay_tree_node node; splay_tree_foreach_fn fn; void* data; { int val; if (!node) return 0; val = splay_tree_foreach_helper (sp, node->left, fn, data); if (val) return val; val = (*fn)(node, data); if (val) return val; return splay_tree_foreach_helper (sp, node->right, fn, data); } /* An allocator and deallocator based on xmalloc. */ static void * splay_tree_xmalloc_allocate (size, data) int size; void *data ATTRIBUTE_UNUSED; { return (void *) xmalloc (size); } static void splay_tree_xmalloc_deallocate (object, data) void *object; void *data ATTRIBUTE_UNUSED; { free (object); } /* Allocate a new splay tree, using COMPARE_FN to compare nodes, DELETE_KEY_FN to deallocate keys, and DELETE_VALUE_FN to deallocate values. Use xmalloc to allocate the splay tree structure, and any nodes added. */ splay_tree splay_tree_new (compare_fn, delete_key_fn, delete_value_fn) splay_tree_compare_fn compare_fn; splay_tree_delete_key_fn delete_key_fn; splay_tree_delete_value_fn delete_value_fn; { return (splay_tree_new_with_allocator (compare_fn, delete_key_fn, delete_value_fn, splay_tree_xmalloc_allocate, splay_tree_xmalloc_deallocate, 0)); } /* Allocate a new splay tree, using COMPARE_FN to compare nodes, DELETE_KEY_FN to deallocate keys, and DELETE_VALUE_FN to deallocate values. */ splay_tree splay_tree_new_with_allocator (compare_fn, delete_key_fn, delete_value_fn, allocate_fn, deallocate_fn, allocate_data) splay_tree_compare_fn compare_fn; splay_tree_delete_key_fn delete_key_fn; splay_tree_delete_value_fn delete_value_fn; splay_tree_allocate_fn allocate_fn; splay_tree_deallocate_fn deallocate_fn; void *allocate_data; { splay_tree sp = (splay_tree) (*allocate_fn) (sizeof (struct splay_tree_s), allocate_data); sp->root = 0; sp->comp = compare_fn; sp->delete_key = delete_key_fn; sp->delete_value = delete_value_fn; sp->allocate = allocate_fn; sp->deallocate = deallocate_fn; sp->allocate_data = allocate_data; return sp; } /* Deallocate SP. */ void splay_tree_delete (sp) splay_tree sp; { splay_tree_delete_helper (sp, sp->root); (*sp->deallocate) ((char*) sp, sp->allocate_data); } /* Insert a new node (associating KEY with DATA) into SP. If a previous node with the indicated KEY exists, its data is replaced with the new value. Returns the new node. */ splay_tree_node splay_tree_insert (sp, key, value) splay_tree sp; splay_tree_key key; splay_tree_value value; { int comparison = 0; splay_tree_splay (sp, key); if (sp->root) comparison = (*sp->comp)(sp->root->key, key); if (sp->root && comparison == 0) { /* If the root of the tree already has the indicated KEY, just replace the value with VALUE. */ if (sp->delete_value) (*sp->delete_value)(sp->root->value); sp->root->value = value; } else { /* Create a new node, and insert it at the root. */ splay_tree_node node; node = ((splay_tree_node) (*sp->allocate) (sizeof (struct splay_tree_node_s), sp->allocate_data)); node->key = key; node->value = value; if (!sp->root) node->left = node->right = 0; else if (comparison < 0) { node->left = sp->root; node->right = node->left->right; node->left->right = 0; } else { node->right = sp->root; node->left = node->right->left; node->right->left = 0; } sp->root = node; } return sp->root; } /* Remove KEY from SP. It is not an error if it did not exist. */ void splay_tree_remove (sp, key) splay_tree sp; splay_tree_key key; { splay_tree_splay (sp, key); if (sp->root && (*sp->comp) (sp->root->key, key) == 0) { splay_tree_node left, right; left = sp->root->left; right = sp->root->right; /* Delete the root node itself. */ if (sp->delete_value) (*sp->delete_value) (sp->root->value); (*sp->deallocate) (sp->root, sp->allocate_data); /* One of the children is now the root. Doesn't matter much which, so long as we preserve the properties of the tree. */ if (left) { sp->root = left; /* If there was a right child as well, hang it off the right-most leaf of the left child. */ if (right) { while (left->right) left = left->right; left->right = right; } } else sp->root = right; } } /* Lookup KEY in SP, returning VALUE if present, and NULL otherwise. */ splay_tree_node splay_tree_lookup (sp, key) splay_tree sp; splay_tree_key key; { splay_tree_splay (sp, key); if (sp->root && (*sp->comp)(sp->root->key, key) == 0) return sp->root; else return 0; } /* Return the node in SP with the greatest key. */ splay_tree_node splay_tree_max (sp) splay_tree sp; { splay_tree_node n = sp->root; if (!n) return NULL; while (n->right) n = n->right; return n; } /* Return the node in SP with the smallest key. */ splay_tree_node splay_tree_min (sp) splay_tree sp; { splay_tree_node n = sp->root; if (!n) return NULL; while (n->left) n = n->left; return n; } /* Return the immediate predecessor KEY, or NULL if there is no predecessor. KEY need not be present in the tree. */ splay_tree_node splay_tree_predecessor (sp, key) splay_tree sp; splay_tree_key key; { int comparison; splay_tree_node node; /* If the tree is empty, there is certainly no predecessor. */ if (!sp->root) return NULL; /* Splay the tree around KEY. That will leave either the KEY itself, its predecessor, or its successor at the root. */ splay_tree_splay (sp, key); comparison = (*sp->comp)(sp->root->key, key); /* If the predecessor is at the root, just return it. */ if (comparison < 0) return sp->root; /* Otherwise, find the rightmost element of the left subtree. */ node = sp->root->left; if (node) while (node->right) node = node->right; return node; } /* Return the immediate successor KEY, or NULL if there is no successor. KEY need not be present in the tree. */ splay_tree_node splay_tree_successor (sp, key) splay_tree sp; splay_tree_key key; { int comparison; splay_tree_node node; /* If the tree is empty, there is certainly no successor. */ if (!sp->root) return NULL; /* Splay the tree around KEY. That will leave either the KEY itself, its predecessor, or its successor at the root. */ splay_tree_splay (sp, key); comparison = (*sp->comp)(sp->root->key, key); /* If the successor is at the root, just return it. */ if (comparison > 0) return sp->root; /* Otherwise, find the leftmost element of the right subtree. */ node = sp->root->right; if (node) while (node->left) node = node->left; return node; } /* Call FN, passing it the DATA, for every node in SP, following an in-order traversal. If FN every returns a non-zero value, the iteration ceases immediately, and the value is returned. Otherwise, this function returns 0. */ int splay_tree_foreach (sp, fn, data) splay_tree sp; splay_tree_foreach_fn fn; void *data; { return splay_tree_foreach_helper (sp, sp->root, fn, data); } /* Splay-tree comparison function, treating the keys as ints. */ int splay_tree_compare_ints (k1, k2) splay_tree_key k1; splay_tree_key k2; { if ((int) k1 < (int) k2) return -1; else if ((int) k1 > (int) k2) return 1; else return 0; } /* Splay-tree comparison function, treating the keys as pointers. */ int splay_tree_compare_pointers (k1, k2) splay_tree_key k1; splay_tree_key k2; { if ((char*) k1 < (char*) k2) return -1; else if ((char*) k1 > (char*) k2) return 1; else return 0; } /* Extended support for using errno values. Written by Fred Fish. fnf@cygnus.com This file is in the public domain. --Per Bothner. */ #ifdef HAVE_SYS_ERRLIST /* Note that errno.h (not sure what OS) or stdio.h (BSD 4.4, at least) might declare sys_errlist in a way that the compiler might consider incompatible with our later declaration, perhaps by using const attributes. So we hide the declaration in errno.h (if any) using a macro. */ #define sys_nerr sys_nerr__ #define sys_errlist sys_errlist__ #endif #include #include #ifdef HAVE_SYS_ERRLIST #undef sys_nerr #undef sys_errlist #endif /* Routines imported from standard C runtime libraries. */ #ifdef HAVE_STDLIB_H #include #else extern PTR malloc (); #endif #ifdef HAVE_STRING_H #include #else extern PTR memset (); #endif #ifndef MAX # define MAX(a,b) ((a) > (b) ? (a) : (b)) #endif static void init_error_tables PARAMS ((void)); /* Translation table for errno values. See intro(2) in most UNIX systems Programmers Reference Manuals. Note that this table is generally only accessed when it is used at runtime to initialize errno name and message tables that are indexed by errno value. Not all of these errnos will exist on all systems. This table is the only thing that should have to be updated as new error numbers are introduced. It's sort of ugly, but at least its portable. */ struct error_info { const int value; /* The numeric value from */ const char *const name; /* The equivalent symbolic value */ #ifndef HAVE_SYS_ERRLIST const char *const msg; /* Short message about this value */ #endif }; #ifndef HAVE_SYS_ERRLIST # define ENTRY(value, name, msg) {value, name, msg} #else # define ENTRY(value, name, msg) {value, name} #endif static const struct error_info error_table[] = { #if defined (EPERM) ENTRY(EPERM, "EPERM", "Not owner"), #endif #if defined (ENOENT) ENTRY(ENOENT, "ENOENT", "No such file or directory"), #endif #if defined (ESRCH) ENTRY(ESRCH, "ESRCH", "No such process"), #endif #if defined (EINTR) ENTRY(EINTR, "EINTR", "Interrupted system call"), #endif #if defined (EIO) ENTRY(EIO, "EIO", "I/O error"), #endif #if defined (ENXIO) ENTRY(ENXIO, "ENXIO", "No such device or address"), #endif #if defined (E2BIG) ENTRY(E2BIG, "E2BIG", "Arg list too long"), #endif #if defined (ENOEXEC) ENTRY(ENOEXEC, "ENOEXEC", "Exec format error"), #endif #if defined (EBADF) ENTRY(EBADF, "EBADF", "Bad file number"), #endif #if defined (ECHILD) ENTRY(ECHILD, "ECHILD", "No child processes"), #endif #if defined (EWOULDBLOCK) /* Put before EAGAIN, sometimes aliased */ ENTRY(EWOULDBLOCK, "EWOULDBLOCK", "Operation would block"), #endif #if defined (EAGAIN) ENTRY(EAGAIN, "EAGAIN", "No more processes"), #endif #if defined (ENOMEM) ENTRY(ENOMEM, "ENOMEM", "Not enough space"), #endif #if defined (EACCES) ENTRY(EACCES, "EACCES", "Permission denied"), #endif #if defined (EFAULT) ENTRY(EFAULT, "EFAULT", "Bad address"), #endif #if defined (ENOTBLK) ENTRY(ENOTBLK, "ENOTBLK", "Block device required"), #endif #if defined (EBUSY) ENTRY(EBUSY, "EBUSY", "Device busy"), #endif #if defined (EEXIST) ENTRY(EEXIST, "EEXIST", "File exists"), #endif #if defined (EXDEV) ENTRY(EXDEV, "EXDEV", "Cross-device link"), #endif #if defined (ENODEV) ENTRY(ENODEV, "ENODEV", "No such device"), #endif #if defined (ENOTDIR) ENTRY(ENOTDIR, "ENOTDIR", "Not a directory"), #endif #if defined (EISDIR) ENTRY(EISDIR, "EISDIR", "Is a directory"), #endif #if defined (EINVAL) ENTRY(EINVAL, "EINVAL", "Invalid argument"), #endif #if defined (ENFILE) ENTRY(ENFILE, "ENFILE", "File table overflow"), #endif #if defined (EMFILE) ENTRY(EMFILE, "EMFILE", "Too many open files"), #endif #if defined (ENOTTY) ENTRY(ENOTTY, "ENOTTY", "Not a typewriter"), #endif #if defined (ETXTBSY) ENTRY(ETXTBSY, "ETXTBSY", "Text file busy"), #endif #if defined (EFBIG) ENTRY(EFBIG, "EFBIG", "File too large"), #endif #if defined (ENOSPC) ENTRY(ENOSPC, "ENOSPC", "No space left on device"), #endif #if defined (ESPIPE) ENTRY(ESPIPE, "ESPIPE", "Illegal seek"), #endif #if defined (EROFS) ENTRY(EROFS, "EROFS", "Read-only file system"), #endif #if defined (EMLINK) ENTRY(EMLINK, "EMLINK", "Too many links"), #endif #if defined (EPIPE) ENTRY(EPIPE, "EPIPE", "Broken pipe"), #endif #if defined (EDOM) ENTRY(EDOM, "EDOM", "Math argument out of domain of func"), #endif #if defined (ERANGE) ENTRY(ERANGE, "ERANGE", "Math result not representable"), #endif #if defined (ENOMSG) ENTRY(ENOMSG, "ENOMSG", "No message of desired type"), #endif #if defined (EIDRM) ENTRY(EIDRM, "EIDRM", "Identifier removed"), #endif #if defined (ECHRNG) ENTRY(ECHRNG, "ECHRNG", "Channel number out of range"), #endif #if defined (EL2NSYNC) ENTRY(EL2NSYNC, "EL2NSYNC", "Level 2 not synchronized"), #endif #if defined (EL3HLT) ENTRY(EL3HLT, "EL3HLT", "Level 3 halted"), #endif #if defined (EL3RST) ENTRY(EL3RST, "EL3RST", "Level 3 reset"), #endif #if defined (ELNRNG) ENTRY(ELNRNG, "ELNRNG", "Link number out of range"), #endif #if defined (EUNATCH) ENTRY(EUNATCH, "EUNATCH", "Protocol driver not attached"), #endif #if defined (ENOCSI) ENTRY(ENOCSI, "ENOCSI", "No CSI structure available"), #endif #if defined (EL2HLT) ENTRY(EL2HLT, "EL2HLT", "Level 2 halted"), #endif #if defined (EDEADLK) ENTRY(EDEADLK, "EDEADLK", "Deadlock condition"), #endif #if defined (ENOLCK) ENTRY(ENOLCK, "ENOLCK", "No record locks available"), #endif #if defined (EBADE) ENTRY(EBADE, "EBADE", "Invalid exchange"), #endif #if defined (EBADR) ENTRY(EBADR, "EBADR", "Invalid request descriptor"), #endif #if defined (EXFULL) ENTRY(EXFULL, "EXFULL", "Exchange full"), #endif #if defined (ENOANO) ENTRY(ENOANO, "ENOANO", "No anode"), #endif #if defined (EBADRQC) ENTRY(EBADRQC, "EBADRQC", "Invalid request code"), #endif #if defined (EBADSLT) ENTRY(EBADSLT, "EBADSLT", "Invalid slot"), #endif #if defined (EDEADLOCK) ENTRY(EDEADLOCK, "EDEADLOCK", "File locking deadlock error"), #endif #if defined (EBFONT) ENTRY(EBFONT, "EBFONT", "Bad font file format"), #endif #if defined (ENOSTR) ENTRY(ENOSTR, "ENOSTR", "Device not a stream"), #endif #if defined (ENODATA) ENTRY(ENODATA, "ENODATA", "No data available"), #endif #if defined (ETIME) ENTRY(ETIME, "ETIME", "Timer expired"), #endif #if defined (ENOSR) ENTRY(ENOSR, "ENOSR", "Out of streams resources"), #endif #if defined (ENONET) ENTRY(ENONET, "ENONET", "Machine is not on the network"), #endif #if defined (ENOPKG) ENTRY(ENOPKG, "ENOPKG", "Package not installed"), #endif #if defined (EREMOTE) ENTRY(EREMOTE, "EREMOTE", "Object is remote"), #endif #if defined (ENOLINK) ENTRY(ENOLINK, "ENOLINK", "Link has been severed"), #endif #if defined (EADV) ENTRY(EADV, "EADV", "Advertise error"), #endif #if defined (ESRMNT) ENTRY(ESRMNT, "ESRMNT", "Srmount error"), #endif #if defined (ECOMM) ENTRY(ECOMM, "ECOMM", "Communication error on send"), #endif #if defined (EPROTO) ENTRY(EPROTO, "EPROTO", "Protocol error"), #endif #if defined (EMULTIHOP) ENTRY(EMULTIHOP, "EMULTIHOP", "Multihop attempted"), #endif #if defined (EDOTDOT) ENTRY(EDOTDOT, "EDOTDOT", "RFS specific error"), #endif #if defined (EBADMSG) ENTRY(EBADMSG, "EBADMSG", "Not a data message"), #endif #if defined (ENAMETOOLONG) ENTRY(ENAMETOOLONG, "ENAMETOOLONG", "File name too long"), #endif #if defined (EOVERFLOW) ENTRY(EOVERFLOW, "EOVERFLOW", "Value too large for defined data type"), #endif #if defined (ENOTUNIQ) ENTRY(ENOTUNIQ, "ENOTUNIQ", "Name not unique on network"), #endif #if defined (EBADFD) ENTRY(EBADFD, "EBADFD", "File descriptor in bad state"), #endif #if defined (EREMCHG) ENTRY(EREMCHG, "EREMCHG", "Remote address changed"), #endif #if defined (ELIBACC) ENTRY(ELIBACC, "ELIBACC", "Can not access a needed shared library"), #endif #if defined (ELIBBAD) ENTRY(ELIBBAD, "ELIBBAD", "Accessing a corrupted shared library"), #endif #if defined (ELIBSCN) ENTRY(ELIBSCN, "ELIBSCN", ".lib section in a.out corrupted"), #endif #if defined (ELIBMAX) ENTRY(ELIBMAX, "ELIBMAX", "Attempting to link in too many shared libraries"), #endif #if defined (ELIBEXEC) ENTRY(ELIBEXEC, "ELIBEXEC", "Cannot exec a shared library directly"), #endif #if defined (EILSEQ) ENTRY(EILSEQ, "EILSEQ", "Illegal byte sequence"), #endif #if defined (ENOSYS) ENTRY(ENOSYS, "ENOSYS", "Operation not applicable"), #endif #if defined (ELOOP) ENTRY(ELOOP, "ELOOP", "Too many symbolic links encountered"), #endif #if defined (ERESTART) ENTRY(ERESTART, "ERESTART", "Interrupted system call should be restarted"), #endif #if defined (ESTRPIPE) ENTRY(ESTRPIPE, "ESTRPIPE", "Streams pipe error"), #endif #if defined (ENOTEMPTY) ENTRY(ENOTEMPTY, "ENOTEMPTY", "Directory not empty"), #endif #if defined (EUSERS) ENTRY(EUSERS, "EUSERS", "Too many users"), #endif #if defined (ENOTSOCK) ENTRY(ENOTSOCK, "ENOTSOCK", "Socket operation on non-socket"), #endif #if defined (EDESTADDRREQ) ENTRY(EDESTADDRREQ, "EDESTADDRREQ", "Destination address required"), #endif #if defined (EMSGSIZE) ENTRY(EMSGSIZE, "EMSGSIZE", "Message too long"), #endif #if defined (EPROTOTYPE) ENTRY(EPROTOTYPE, "EPROTOTYPE", "Protocol wrong type for socket"), #endif #if defined (ENOPROTOOPT) ENTRY(ENOPROTOOPT, "ENOPROTOOPT", "Protocol not available"), #endif #if defined (EPROTONOSUPPORT) ENTRY(EPROTONOSUPPORT, "EPROTONOSUPPORT", "Protocol not supported"), #endif #if defined (ESOCKTNOSUPPORT) ENTRY(ESOCKTNOSUPPORT, "ESOCKTNOSUPPORT", "Socket type not supported"), #endif #if defined (EOPNOTSUPP) ENTRY(EOPNOTSUPP, "EOPNOTSUPP", "Operation not supported on transport endpoint"), #endif #if defined (EPFNOSUPPORT) ENTRY(EPFNOSUPPORT, "EPFNOSUPPORT", "Protocol family not supported"), #endif #if defined (EAFNOSUPPORT) ENTRY(EAFNOSUPPORT, "EAFNOSUPPORT", "Address family not supported by protocol"), #endif #if defined (EADDRINUSE) ENTRY(EADDRINUSE, "EADDRINUSE", "Address already in use"), #endif #if defined (EADDRNOTAVAIL) ENTRY(EADDRNOTAVAIL, "EADDRNOTAVAIL","Cannot assign requested address"), #endif #if defined (ENETDOWN) ENTRY(ENETDOWN, "ENETDOWN", "Network is down"), #endif #if defined (ENETUNREACH) ENTRY(ENETUNREACH, "ENETUNREACH", "Network is unreachable"), #endif #if defined (ENETRESET) ENTRY(ENETRESET, "ENETRESET", "Network dropped connection because of reset"), #endif #if defined (ECONNABORTED) ENTRY(ECONNABORTED, "ECONNABORTED", "Software caused connection abort"), #endif #if defined (ECONNRESET) ENTRY(ECONNRESET, "ECONNRESET", "Connection reset by peer"), #endif #if defined (ENOBUFS) ENTRY(ENOBUFS, "ENOBUFS", "No buffer space available"), #endif #if defined (EISCONN) ENTRY(EISCONN, "EISCONN", "Transport endpoint is already connected"), #endif #if defined (ENOTCONN) ENTRY(ENOTCONN, "ENOTCONN", "Transport endpoint is not connected"), #endif #if defined (ESHUTDOWN) ENTRY(ESHUTDOWN, "ESHUTDOWN", "Cannot send after transport endpoint shutdown"), #endif #if defined (ETOOMANYREFS) ENTRY(ETOOMANYREFS, "ETOOMANYREFS", "Too many references: cannot splice"), #endif #if defined (ETIMEDOUT) ENTRY(ETIMEDOUT, "ETIMEDOUT", "Connection timed out"), #endif #if defined (ECONNREFUSED) ENTRY(ECONNREFUSED, "ECONNREFUSED", "Connection refused"), #endif #if defined (EHOSTDOWN) ENTRY(EHOSTDOWN, "EHOSTDOWN", "Host is down"), #endif #if defined (EHOSTUNREACH) ENTRY(EHOSTUNREACH, "EHOSTUNREACH", "No route to host"), #endif #if defined (EALREADY) ENTRY(EALREADY, "EALREADY", "Operation already in progress"), #endif #if defined (EINPROGRESS) ENTRY(EINPROGRESS, "EINPROGRESS", "Operation now in progress"), #endif #if defined (ESTALE) ENTRY(ESTALE, "ESTALE", "Stale NFS file handle"), #endif #if defined (EUCLEAN) ENTRY(EUCLEAN, "EUCLEAN", "Structure needs cleaning"), #endif #if defined (ENOTNAM) ENTRY(ENOTNAM, "ENOTNAM", "Not a XENIX named type file"), #endif #if defined (ENAVAIL) ENTRY(ENAVAIL, "ENAVAIL", "No XENIX semaphores available"), #endif #if defined (EISNAM) ENTRY(EISNAM, "EISNAM", "Is a named type file"), #endif #if defined (EREMOTEIO) ENTRY(EREMOTEIO, "EREMOTEIO", "Remote I/O error"), #endif ENTRY(0, NULL, NULL) }; #ifdef EVMSERR /* This is not in the table, because the numeric value of EVMSERR (32767) lies outside the range of sys_errlist[]. */ static struct { int value; const char *name, *msg; } evmserr = { EVMSERR, "EVMSERR", "VMS-specific error" }; #endif /* Translation table allocated and initialized at runtime. Indexed by the errno value to find the equivalent symbolic value. */ static const char **error_names; static int num_error_names = 0; /* Translation table allocated and initialized at runtime, if it does not already exist in the host environment. Indexed by the errno value to find the descriptive string. We don't export it for use in other modules because even though it has the same name, it differs from other implementations in that it is dynamically initialized rather than statically initialized. */ #ifndef HAVE_SYS_ERRLIST #define sys_nerr sys_nerr__ #define sys_errlist sys_errlist__ static int sys_nerr; static const char **sys_errlist; #else extern int sys_nerr; extern const char * const sys_errlist[]; #endif /* NAME init_error_tables -- initialize the name and message tables SYNOPSIS static void init_error_tables (); DESCRIPTION Using the error_table, which is initialized at compile time, generate the error_names and the sys_errlist (if needed) tables, which are indexed at runtime by a specific errno value. BUGS The initialization of the tables may fail under low memory conditions, in which case we don't do anything particularly useful, but we don't bomb either. Who knows, it might succeed at a later point if we free some memory in the meantime. In any case, the other routines know how to deal with lack of a table after trying to initialize it. This may or may not be considered to be a bug, that we don't specifically warn about this particular failure mode. */ static void init_error_tables () { const struct error_info *eip; int nbytes; /* If we haven't already scanned the error_table once to find the maximum errno value, then go find it now. */ if (num_error_names == 0) { for (eip = error_table; eip -> name != NULL; eip++) { if (eip -> value >= num_error_names) { num_error_names = eip -> value + 1; } } } /* Now attempt to allocate the error_names table, zero it out, and then initialize it from the statically initialized error_table. */ if (error_names == NULL) { nbytes = num_error_names * sizeof (char *); if ((error_names = (const char **) malloc (nbytes)) != NULL) { memset (error_names, 0, nbytes); for (eip = error_table; eip -> name != NULL; eip++) { error_names[eip -> value] = eip -> name; } } } #ifndef HAVE_SYS_ERRLIST /* Now attempt to allocate the sys_errlist table, zero it out, and then initialize it from the statically initialized error_table. */ if (sys_errlist == NULL) { nbytes = num_error_names * sizeof (char *); if ((sys_errlist = (const char **) malloc (nbytes)) != NULL) { memset (sys_errlist, 0, nbytes); sys_nerr = num_error_names; for (eip = error_table; eip -> name != NULL; eip++) { sys_errlist[eip -> value] = eip -> msg; } } } #endif } /* @deftypefn Extension int errno_max (void) Returns the maximum @code{errno} value for which a corresponding symbolic name or message is available. Note that in the case where we use the @code{sys_errlist} supplied by the system, it is possible for there to be more symbolic names than messages, or vice versa. In fact, the manual page for @code{perror(3C)} explicitly warns that one should check the size of the table (@code{sys_nerr}) before indexing it, since new error codes may be added to the system before they are added to the table. Thus @code{sys_nerr} might be smaller than value implied by the largest @code{errno} value defined in @code{}. We return the maximum value that can be used to obtain a meaningful symbolic name or message. @end deftypefn */ int errno_max () { int maxsize; if (error_names == NULL) { init_error_tables (); } maxsize = MAX (sys_nerr, num_error_names); return (maxsize - 1); } #ifndef HAVE_STRERROR /* @deftypefn Supplemental char* strerror (int @var{errnoval}) Maps an @code{errno} number to an error message string, the contents of which are implementation defined. On systems which have the external variables @code{sys_nerr} and @code{sys_errlist}, these strings will be the same as the ones used by @code{perror}. If the supplied error number is within the valid range of indices for the @code{sys_errlist}, but no message is available for the particular error number, then returns the string @samp{Error @var{num}}, where @var{num} is the error number. If the supplied error number is not a valid index into @code{sys_errlist}, returns @code{NULL}. The returned string is only guaranteed to be valid only until the next call to @code{strerror}. @end deftypefn */ char * strerror (errnoval) int errnoval; { const char *msg; static char buf[32]; #ifndef HAVE_SYS_ERRLIST if (error_names == NULL) { init_error_tables (); } #endif if ((errnoval < 0) || (errnoval >= sys_nerr)) { #ifdef EVMSERR if (errnoval == evmserr.value) msg = evmserr.msg; else #endif /* Out of range, just return NULL */ msg = NULL; } else if ((sys_errlist == NULL) || (sys_errlist[errnoval] == NULL)) { /* In range, but no sys_errlist or no entry at this index. */ sprintf (buf, "Error %d", errnoval); msg = buf; } else { /* In range, and a valid message. Just return the message. */ msg = (char *) sys_errlist[errnoval]; } return (msg); } #endif /* ! HAVE_STRERROR */ /* @deftypefn Replacement {const char*} strerrno (int @var{errnum}) Given an error number returned from a system call (typically returned in @code{errno}), returns a pointer to a string containing the symbolic name of that error number, as found in @code{}. If the supplied error number is within the valid range of indices for symbolic names, but no name is available for the particular error number, then returns the string @samp{Error @var{num}}, where @var{num} is the error number. If the supplied error number is not within the range of valid indices, then returns @code{NULL}. The contents of the location pointed to are only guaranteed to be valid until the next call to @code{strerrno}. @end deftypefn */ const char * strerrno (errnoval) int errnoval; { const char *name; static char buf[32]; if (error_names == NULL) { init_error_tables (); } if ((errnoval < 0) || (errnoval >= num_error_names)) { #ifdef EVMSERR if (errnoval == evmserr.value) name = evmserr.name; else #endif /* Out of range, just return NULL */ name = NULL; } else if ((error_names == NULL) || (error_names[errnoval] == NULL)) { /* In range, but no error_names or no entry at this index. */ sprintf (buf, "Error %d", errnoval); name = (const char *) buf; } else { /* In range, and a valid name. Just return the name. */ name = error_names[errnoval]; } return (name); } /* @deftypefn Extension int strtoerrno (const char *@var{name}) Given the symbolic name of a error number (e.g., @code{EACCES}), map it to an errno value. If no translation is found, returns 0. @end deftypefn */ int strtoerrno (name) const char *name; { int errnoval = 0; if (name != NULL) { if (error_names == NULL) { init_error_tables (); } for (errnoval = 0; errnoval < num_error_names; errnoval++) { if ((error_names[errnoval] != NULL) && (strcmp (name, error_names[errnoval]) == 0)) { break; } } if (errnoval == num_error_names) { #ifdef EVMSERR if (strcmp (name, evmserr.name) == 0) errnoval = evmserr.value; else #endif errnoval = 0; } } return (errnoval); } /* A simple little main that does nothing but print all the errno translations if MAIN is defined and this file is compiled and linked. */ #ifdef MAIN #include int main () { int errn; int errnmax; const char *name; const char *msg; char *strerror (); errnmax = errno_max (); printf ("%d entries in names table.\n", num_error_names); printf ("%d entries in messages table.\n", sys_nerr); printf ("%d is max useful index.\n", errnmax); /* Keep printing values until we get to the end of *both* tables, not *either* table. Note that knowing the maximum useful index does *not* relieve us of the responsibility of testing the return pointer for NULL. */ for (errn = 0; errn <= errnmax; errn++) { name = strerrno (errn); name = (name == NULL) ? "" : name; msg = strerror (errn); msg = (msg == NULL) ? "" : msg; printf ("%-4d%-18s%s\n", errn, name, msg); } return 0; } #endif /* Extended support for using signal values. Written by Fred Fish. fnf@cygnus.com This file is in the public domain. */ /* We need to declare sys_siglist, because even if the system provides it we can't assume that it is declared in (for example, SunOS provides sys_siglist, but it does not declare it in any header file). fHowever, we can't declare sys_siglist portably, because on some systems it is declared with const and on some systems it is declared without const. If we were using autoconf, we could work out the right declaration. Until, then we just ignore any declaration in the system header files, and always declare it ourselves. With luck, this will always work. */ #define sys_siglist no_such_symbol #define sys_nsig sys_nsig__no_such_symbol #include #include /* Routines imported from standard C runtime libraries. */ #ifdef HAVE_STDLIB_H #include #else extern PTR malloc (); #endif #ifdef HAVE_STRING_H #include #else extern PTR memset (); #endif /* Undefine the macro we used to hide the definition of sys_siglist found in the system header files. */ #undef sys_siglist #undef sys_nsig #ifndef NULL # ifdef ANSI_PROTOTYPES # define NULL (void *) 0 # else # define NULL 0 # endif #endif #ifndef MAX # define MAX(a,b) ((a) > (b) ? (a) : (b)) #endif static void init_signal_tables PARAMS ((void)); /* Translation table for signal values. Note that this table is generally only accessed when it is used at runtime to initialize signal name and message tables that are indexed by signal value. Not all of these signals will exist on all systems. This table is the only thing that should have to be updated as new signal numbers are introduced. It's sort of ugly, but at least its portable. */ struct signal_info { const int value; /* The numeric value from */ const char *const name; /* The equivalent symbolic value */ #ifndef HAVE_SYS_SIGLIST const char *const msg; /* Short message about this value */ #endif }; #ifndef HAVE_SYS_SIGLIST # define ENTRY(value, name, msg) {value, name, msg} #else # define ENTRY(value, name, msg) {value, name} #endif static const struct signal_info signal_table[] = { #if defined (SIGHUP) ENTRY(SIGHUP, "SIGHUP", "Hangup"), #endif #if defined (SIGINT) ENTRY(SIGINT, "SIGINT", "Interrupt"), #endif #if defined (SIGQUIT) ENTRY(SIGQUIT, "SIGQUIT", "Quit"), #endif #if defined (SIGILL) ENTRY(SIGILL, "SIGILL", "Illegal instruction"), #endif #if defined (SIGTRAP) ENTRY(SIGTRAP, "SIGTRAP", "Trace/breakpoint trap"), #endif /* Put SIGIOT before SIGABRT, so that if SIGIOT==SIGABRT then SIGABRT overrides SIGIOT. SIGABRT is in ANSI and POSIX.1, and SIGIOT isn't. */ #if defined (SIGIOT) ENTRY(SIGIOT, "SIGIOT", "IOT trap"), #endif #if defined (SIGABRT) ENTRY(SIGABRT, "SIGABRT", "Aborted"), #endif #if defined (SIGEMT) ENTRY(SIGEMT, "SIGEMT", "Emulation trap"), #endif #if defined (SIGFPE) ENTRY(SIGFPE, "SIGFPE", "Arithmetic exception"), #endif #if defined (SIGKILL) ENTRY(SIGKILL, "SIGKILL", "Killed"), #endif #if defined (SIGBUS) ENTRY(SIGBUS, "SIGBUS", "Bus error"), #endif #if defined (SIGSEGV) ENTRY(SIGSEGV, "SIGSEGV", "Segmentation fault"), #endif #if defined (SIGSYS) ENTRY(SIGSYS, "SIGSYS", "Bad system call"), #endif #if defined (SIGPIPE) ENTRY(SIGPIPE, "SIGPIPE", "Broken pipe"), #endif #if defined (SIGALRM) ENTRY(SIGALRM, "SIGALRM", "Alarm clock"), #endif #if defined (SIGTERM) ENTRY(SIGTERM, "SIGTERM", "Terminated"), #endif #if defined (SIGUSR1) ENTRY(SIGUSR1, "SIGUSR1", "User defined signal 1"), #endif #if defined (SIGUSR2) ENTRY(SIGUSR2, "SIGUSR2", "User defined signal 2"), #endif /* Put SIGCLD before SIGCHLD, so that if SIGCLD==SIGCHLD then SIGCHLD overrides SIGCLD. SIGCHLD is in POXIX.1 */ #if defined (SIGCLD) ENTRY(SIGCLD, "SIGCLD", "Child status changed"), #endif #if defined (SIGCHLD) ENTRY(SIGCHLD, "SIGCHLD", "Child status changed"), #endif #if defined (SIGPWR) ENTRY(SIGPWR, "SIGPWR", "Power fail/restart"), #endif #if defined (SIGWINCH) ENTRY(SIGWINCH, "SIGWINCH", "Window size changed"), #endif #if defined (SIGURG) ENTRY(SIGURG, "SIGURG", "Urgent I/O condition"), #endif #if defined (SIGIO) /* "I/O pending" has also been suggested, but is misleading since the signal only happens when the process has asked for it, not everytime I/O is pending. */ ENTRY(SIGIO, "SIGIO", "I/O possible"), #endif #if defined (SIGPOLL) ENTRY(SIGPOLL, "SIGPOLL", "Pollable event occurred"), #endif #if defined (SIGSTOP) ENTRY(SIGSTOP, "SIGSTOP", "Stopped (signal)"), #endif #if defined (SIGTSTP) ENTRY(SIGTSTP, "SIGTSTP", "Stopped (user)"), #endif #if defined (SIGCONT) ENTRY(SIGCONT, "SIGCONT", "Continued"), #endif #if defined (SIGTTIN) ENTRY(SIGTTIN, "SIGTTIN", "Stopped (tty input)"), #endif #if defined (SIGTTOU) ENTRY(SIGTTOU, "SIGTTOU", "Stopped (tty output)"), #endif #if defined (SIGVTALRM) ENTRY(SIGVTALRM, "SIGVTALRM", "Virtual timer expired"), #endif #if defined (SIGPROF) ENTRY(SIGPROF, "SIGPROF", "Profiling timer expired"), #endif #if defined (SIGXCPU) ENTRY(SIGXCPU, "SIGXCPU", "CPU time limit exceeded"), #endif #if defined (SIGXFSZ) ENTRY(SIGXFSZ, "SIGXFSZ", "File size limit exceeded"), #endif #if defined (SIGWIND) ENTRY(SIGWIND, "SIGWIND", "SIGWIND"), #endif #if defined (SIGPHONE) ENTRY(SIGPHONE, "SIGPHONE", "SIGPHONE"), #endif #if defined (SIGLOST) ENTRY(SIGLOST, "SIGLOST", "Resource lost"), #endif #if defined (SIGWAITING) ENTRY(SIGWAITING, "SIGWAITING", "Process's LWPs are blocked"), #endif #if defined (SIGLWP) ENTRY(SIGLWP, "SIGLWP", "Signal LWP"), #endif #if defined (SIGDANGER) ENTRY(SIGDANGER, "SIGDANGER", "Swap space dangerously low"), #endif #if defined (SIGGRANT) ENTRY(SIGGRANT, "SIGGRANT", "Monitor mode granted"), #endif #if defined (SIGRETRACT) ENTRY(SIGRETRACT, "SIGRETRACT", "Need to relinguish monitor mode"), #endif #if defined (SIGMSG) ENTRY(SIGMSG, "SIGMSG", "Monitor mode data available"), #endif #if defined (SIGSOUND) ENTRY(SIGSOUND, "SIGSOUND", "Sound completed"), #endif #if defined (SIGSAK) ENTRY(SIGSAK, "SIGSAK", "Secure attention"), #endif ENTRY(0, NULL, NULL) }; /* Translation table allocated and initialized at runtime. Indexed by the signal value to find the equivalent symbolic value. */ static const char **signal_names; static int num_signal_names = 0; /* Translation table allocated and initialized at runtime, if it does not already exist in the host environment. Indexed by the signal value to find the descriptive string. We don't export it for use in other modules because even though it has the same name, it differs from other implementations in that it is dynamically initialized rather than statically initialized. */ #ifndef HAVE_SYS_SIGLIST static int sys_nsig; static const char **sys_siglist; #else #ifdef NSIG static int sys_nsig = NSIG; #else #ifdef _NSIG static int sys_nsig = _NSIG; #endif #endif extern const char * const sys_siglist[]; #endif /* NAME init_signal_tables -- initialize the name and message tables SYNOPSIS static void init_signal_tables (); DESCRIPTION Using the signal_table, which is initialized at compile time, generate the signal_names and the sys_siglist (if needed) tables, which are indexed at runtime by a specific signal value. BUGS The initialization of the tables may fail under low memory conditions, in which case we don't do anything particularly useful, but we don't bomb either. Who knows, it might succeed at a later point if we free some memory in the meantime. In any case, the other routines know how to deal with lack of a table after trying to initialize it. This may or may not be considered to be a bug, that we don't specifically warn about this particular failure mode. */ static void init_signal_tables () { const struct signal_info *eip; int nbytes; /* If we haven't already scanned the signal_table once to find the maximum signal value, then go find it now. */ if (num_signal_names == 0) { for (eip = signal_table; eip -> name != NULL; eip++) { if (eip -> value >= num_signal_names) { num_signal_names = eip -> value + 1; } } } /* Now attempt to allocate the signal_names table, zero it out, and then initialize it from the statically initialized signal_table. */ if (signal_names == NULL) { nbytes = num_signal_names * sizeof (char *); if ((signal_names = (const char **) malloc (nbytes)) != NULL) { memset (signal_names, 0, nbytes); for (eip = signal_table; eip -> name != NULL; eip++) { signal_names[eip -> value] = eip -> name; } } } #ifndef HAVE_SYS_SIGLIST /* Now attempt to allocate the sys_siglist table, zero it out, and then initialize it from the statically initialized signal_table. */ if (sys_siglist == NULL) { nbytes = num_signal_names * sizeof (char *); if ((sys_siglist = (const char **) malloc (nbytes)) != NULL) { memset (sys_siglist, 0, nbytes); sys_nsig = num_signal_names; for (eip = signal_table; eip -> name != NULL; eip++) { sys_siglist[eip -> value] = eip -> msg; } } } #endif } /* @deftypefn Extension int signo_max (void) Returns the maximum signal value for which a corresponding symbolic name or message is available. Note that in the case where we use the @code{sys_siglist} supplied by the system, it is possible for there to be more symbolic names than messages, or vice versa. In fact, the manual page for @code{psignal(3b)} explicitly warns that one should check the size of the table (@code{NSIG}) before indexing it, since new signal codes may be added to the system before they are added to the table. Thus @code{NSIG} might be smaller than value implied by the largest signo value defined in @code{}. We return the maximum value that can be used to obtain a meaningful symbolic name or message. @end deftypefn */ int signo_max () { int maxsize; if (signal_names == NULL) { init_signal_tables (); } maxsize = MAX (sys_nsig, num_signal_names); return (maxsize - 1); } /* @deftypefn Supplemental {const char *} strsignal (int @var{signo}) Maps an signal number to an signal message string, the contents of which are implementation defined. On systems which have the external variable @code{sys_siglist}, these strings will be the same as the ones used by @code{psignal()}. If the supplied signal number is within the valid range of indices for the @code{sys_siglist}, but no message is available for the particular signal number, then returns the string @samp{Signal @var{num}}, where @var{num} is the signal number. If the supplied signal number is not a valid index into @code{sys_siglist}, returns @code{NULL}. The returned string is only guaranteed to be valid only until the next call to @code{strsignal}. @end deftypefn */ #ifndef HAVE_STRSIGNAL const char * strsignal (signo) int signo; { const char *msg; static char buf[32]; #ifndef HAVE_SYS_SIGLIST if (signal_names == NULL) { init_signal_tables (); } #endif if ((signo < 0) || (signo >= sys_nsig)) { /* Out of range, just return NULL */ msg = NULL; } else if ((sys_siglist == NULL) || (sys_siglist[signo] == NULL)) { /* In range, but no sys_siglist or no entry at this index. */ sprintf (buf, "Signal %d", signo); msg = (const char *) buf; } else { /* In range, and a valid message. Just return the message. */ msg = (const char *) sys_siglist[signo]; } return (msg); } #endif /* ! HAVE_STRSIGNAL */ /* @deftypefn Extension {const char*} strsigno (int @var{signo}) Given an signal number, returns a pointer to a string containing the symbolic name of that signal number, as found in @code{}. If the supplied signal number is within the valid range of indices for symbolic names, but no name is available for the particular signal number, then returns the string @samp{Signal @var{num}}, where @var{num} is the signal number. If the supplied signal number is not within the range of valid indices, then returns @code{NULL}. The contents of the location pointed to are only guaranteed to be valid until the next call to @code{strsigno}. @end deftypefn */ const char * strsigno (signo) int signo; { const char *name; static char buf[32]; if (signal_names == NULL) { init_signal_tables (); } if ((signo < 0) || (signo >= num_signal_names)) { /* Out of range, just return NULL */ name = NULL; } else if ((signal_names == NULL) || (signal_names[signo] == NULL)) { /* In range, but no signal_names or no entry at this index. */ sprintf (buf, "Signal %d", signo); name = (const char *) buf; } else { /* In range, and a valid name. Just return the name. */ name = signal_names[signo]; } return (name); } /* @deftypefn Extension int strtosigno (const char *@var{name}) Given the symbolic name of a signal, map it to a signal number. If no translation is found, returns 0. @end deftypefn */ int strtosigno (name) const char *name; { int signo = 0; if (name != NULL) { if (signal_names == NULL) { init_signal_tables (); } for (signo = 0; signo < num_signal_names; signo++) { if ((signal_names[signo] != NULL) && (strcmp (name, signal_names[signo]) == 0)) { break; } } if (signo == num_signal_names) { signo = 0; } } return (signo); } /* @deftypefn Supplemental void psignal (unsigned @var{signo}, char *@var{message}) Print @var{message} to the standard error, followed by a colon, followed by the description of the signal specified by @var{signo}, followed by a newline. @end deftypefn */ #ifndef HAVE_PSIGNAL void psignal (signo, message) unsigned signo; char *message; { if (signal_names == NULL) { init_signal_tables (); } if ((signo <= 0) || (signo >= sys_nsig)) { fprintf (stderr, "%s: unknown signal\n", message); } else { fprintf (stderr, "%s: %s\n", message, sys_siglist[signo]); } } #endif /* ! HAVE_PSIGNAL */ /* A simple little main that does nothing but print all the signal translations if MAIN is defined and this file is compiled and linked. */ #ifdef MAIN #include int main () { int signo; int maxsigno; const char *name; const char *msg; maxsigno = signo_max (); printf ("%d entries in names table.\n", num_signal_names); printf ("%d entries in messages table.\n", sys_nsig); printf ("%d is max useful index.\n", maxsigno); /* Keep printing values until we get to the end of *both* tables, not *either* table. Note that knowing the maximum useful index does *not* relieve us of the responsibility of testing the return pointer for NULL. */ for (signo = 0; signo <= maxsigno; signo++) { name = strsigno (signo); name = (name == NULL) ? "" : name; msg = strsignal (signo); msg = (msg == NULL) ? "" : msg; printf ("%-4d%-18s%s\n", signo, name, msg); } return 0; } #endif /* ternary.c - Ternary Search Trees Copyright (C) 2001 Free Software Foundation, Inc. Contributed by Daniel Berlin (dan@cgsoftware.com) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifdef HAVE_CONFIG_H #endif #ifdef HAVE_STDLIB_H #include #endif #include /* ternary.h - Ternary Search Trees Copyright 2001 Free Software Foundation, Inc. Contributed by Daniel Berlin (dan@cgsoftware.com) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef TERNARY_H_ #define TERNARY_H_ /* Ternary search trees */ typedef struct ternary_node_def *ternary_tree; typedef struct ternary_node_def { char splitchar; ternary_tree lokid; ternary_tree eqkid; ternary_tree hikid; } ternary_node; /* Insert string S into tree P, associating it with DATA. Return the data in the tree associated with the string if it's already there, and replace is 0. Otherwise, replaces if it it exists, inserts if it doesn't, and returns the data you passed in. */ PTR ternary_insert PARAMS ((ternary_tree *p, const char *s, PTR data, int replace)); /* Delete the ternary search tree rooted at P. Does NOT delete the data you associated with the strings. */ void ternary_cleanup PARAMS ((ternary_tree p)); /* Search the ternary tree for string S, returning the data associated with it if found. */ PTR ternary_search PARAMS ((const ternary_node *p, const char *s)); #endif /* Non-recursive so we don't waste stack space/time on large insertions. */ PTR ternary_insert (root, s, data, replace) ternary_tree *root; const char *s; PTR data; int replace; { int diff; ternary_tree curr, *pcurr; /* Start at the root. */ pcurr = root; /* Loop until we find the right position */ while ((curr = *pcurr)) { /* Calculate the difference */ diff = *s - curr->splitchar; /* Handle current char equal to node splitchar */ if (diff == 0) { /* Handle the case of a string we already have */ if (*s++ == 0) { if (replace) curr->eqkid = (ternary_tree) data; return (PTR) curr->eqkid; } pcurr = &(curr->eqkid); } /* Handle current char less than node splitchar */ else if (diff < 0) { pcurr = &(curr->lokid); } /* Handle current char greater than node splitchar */ else { pcurr = &(curr->hikid); } } /* It's not a duplicate string, and we should insert what's left of the string, into the tree rooted at curr */ for (;;) { /* Allocate the memory for the node, and fill it in */ *pcurr = (ternary_tree) xmalloc (sizeof (ternary_node)); curr = *pcurr; curr->splitchar = *s; curr->lokid = curr->hikid = curr->eqkid = 0; /* Place nodes until we hit the end of the string. When we hit it, place the data in the right place, and return. */ if (*s++ == 0) { curr->eqkid = (ternary_tree) data; return data; } pcurr = &(curr->eqkid); } } /* Free the ternary search tree rooted at p. */ void ternary_cleanup (p) ternary_tree p; { if (p) { ternary_cleanup (p->lokid); if (p->splitchar) ternary_cleanup (p->eqkid); ternary_cleanup (p->hikid); free (p); } } /* Non-recursive find of a string in the ternary tree */ PTR ternary_search (p, s) const ternary_node *p; const char *s; { const ternary_node *curr; int diff, spchar; spchar = *s; curr = p; /* Loop while we haven't hit a NULL node or returned */ while (curr) { /* Calculate the difference */ diff = spchar - curr->splitchar; /* Handle the equal case */ if (diff == 0) { if (spchar == 0) return (PTR) curr->eqkid; spchar = *++s; curr = curr->eqkid; } /* Handle the less than case */ else if (diff < 0) curr = curr->lokid; /* All that's left is greater than */ else curr = curr->hikid; } return NULL; } /* For those who care, the recursive version of the search. Useful if you want a starting point for pmsearch or nearsearch. */ static PTR ternary_recursivesearch (p, s) const ternary_node *p; const char *s; { if (!p) return 0; if (*s < p->splitchar) return ternary_recursivesearch (p->lokid, s); else if (*s > p->splitchar) return ternary_recursivesearch (p->hikid, s); else { if (*s == 0) return (PTR) p->eqkid; return ternary_recursivesearch (p->eqkid, ++s); } } /* * Copyright (c) 1990 Regents of the University of California. * All rights reserved. * * %sccs.include.redist.c% */ /* @deftypefun int xatexit (void (*@var{fn}) (void)) Behaves as the standard @code{atexit} function, but with no limit on the number of registered functions. Returns 0 on success, or @minus{}1 on failure. If you use @code{xatexit} to register functions, you must use @code{xexit} to terminate your program. @end deftypefun */ /* Adapted from newlib/libc/stdlib/{,at}exit.[ch]. If you use xatexit, you must call xexit instead of exit. */ #include #ifdef ANSI_PROTOTYPES #include #else #define size_t unsigned long #endif #if VMS #include #include #else /* For systems with larger pointers than ints, this must be declared. */ PTR malloc PARAMS ((size_t)); #endif static void xatexit_cleanup PARAMS ((void)); /* Pointer to function run by xexit. */ extern void (*_xexit_cleanup) PARAMS ((void)); #define XATEXIT_SIZE 32 struct xatexit { struct xatexit *next; /* next in list */ int ind; /* next index in this table */ void (*fns[XATEXIT_SIZE]) PARAMS ((void)); /* the table itself */ }; /* Allocate one struct statically to guarantee that we can register at least a few handlers. */ static struct xatexit xatexit_first; /* Points to head of LIFO stack. */ static struct xatexit *xatexit_head = &xatexit_first; /* Register function FN to be run by xexit. Return 0 if successful, -1 if not. */ int xatexit (fn) void (*fn) PARAMS ((void)); { register struct xatexit *p; /* Tell xexit to call xatexit_cleanup. */ if (!_xexit_cleanup) _xexit_cleanup = xatexit_cleanup; p = xatexit_head; if (p->ind >= XATEXIT_SIZE) { if ((p = (struct xatexit *) malloc (sizeof *p)) == NULL) return -1; p->ind = 0; p->next = xatexit_head; xatexit_head = p; } p->fns[p->ind++] = fn; return 0; } /* Call any cleanup functions. */ static void xatexit_cleanup () { register struct xatexit *p; register int n; for (p = xatexit_head; p; p = p->next) for (n = p->ind; --n >= 0;) (*p->fns[n]) (); } /* xexit.c -- Run any exit handlers, then exit. Copyright (C) 1994, 95, 1997 Free Software Foundation, Inc. This file is part of the libiberty library. Libiberty is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Libiberty is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with libiberty; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* @deftypefn Replacement void xexit (int @var{code}) Terminates the program. If any functions have been registered with the @code{xatexit} replacement function, they will be called first. Termination is handled via the system's normal @code{exit} call. @end deftypefn */ #ifdef HAVE_CONFIG_H #endif #include #ifdef HAVE_STDLIB_H #include #endif /* This variable is set by xatexit if it is called. This way, xmalloc doesn't drag xatexit into the link. */ void (*_xexit_cleanup) PARAMS ((void)); void xexit (code) int code; { if (_xexit_cleanup != NULL) (*_xexit_cleanup) (); exit (code); } /* memory allocation routines with error checking. Copyright 1989, 90, 91, 92, 93, 94 Free Software Foundation, Inc. This file is part of the libiberty library. Libiberty is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Libiberty is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with libiberty; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* @deftypefn Replacement void* xmalloc (size_t) Allocate memory without fail. If @code{malloc} fails, this will print a message to @code{stderr} (using the name set by @code{xmalloc_set_program_name}, if any) and then call @code{xexit}. Note that it is therefore safe for a program to contain @code{#define malloc xmalloc} in its source. @end deftypefn @deftypefn Replacement void* xrealloc (void *@var{ptr}, size_t @var{size}) Reallocate memory without fail. This routine functions like @code{realloc}, but will behave the same as @code{xmalloc} if memory cannot be found. @end deftypefn @deftypefn Replacement void* xcalloc (size_t @var{nelem}, size_t @var{elsize}) Allocate memory without fail, and set it to zero. This routine functions like @code{calloc}, but will behave the same as @code{xmalloc} if memory cannot be found. @end deftypefn @deftypefn Replacement void xmalloc_set_program_name (const char *@var{name}) You can use this to set the name of the program used by @code{xmalloc_failed} when printing a failure message. @end deftypefn @deftypefn Replacement void xmalloc_failed (size_t) This function is not meant to be called by client code, and is listed here for completeness only. If any of the allocation routines fail, this function will be called to print an error message and terminate execution. @end deftypefn */ #ifdef HAVE_CONFIG_H #endif #include #ifdef ANSI_PROTOTYPES #include #else #define size_t unsigned long #define ptrdiff_t long #endif #if VMS #include #include #else /* For systems with larger pointers than ints, these must be declared. */ PTR malloc PARAMS ((size_t)); PTR realloc PARAMS ((PTR, size_t)); PTR calloc PARAMS ((size_t, size_t)); PTR sbrk PARAMS ((ptrdiff_t)); #endif /* The program name if set. */ static const char *name = ""; #ifdef HAVE_SBRK /* The initial sbrk, set when the program name is set. Not used for win32 ports other than cygwin32. */ static char *first_break = NULL; #endif /* HAVE_SBRK */ void xmalloc_set_program_name (s) const char *s; { name = s; #ifdef HAVE_SBRK /* Win32 ports other than cygwin32 don't have brk() */ if (first_break == NULL) first_break = (char *) sbrk (0); #endif /* HAVE_SBRK */ } void xmalloc_failed (size) size_t size; { #ifdef HAVE_SBRK extern char **environ; size_t allocated; if (first_break != NULL) allocated = (char *) sbrk (0) - first_break; else allocated = (char *) sbrk (0) - (char *) &environ; fprintf (stderr, "\n%s%sout of memory allocating %lu bytes after a total of %lu bytes\n", name, *name ? ": " : "", (unsigned long) size, (unsigned long) allocated); #else /* HAVE_SBRK */ fprintf (stderr, "\n%s%sout of memory allocating %lu bytes\n", name, *name ? ": " : "", (unsigned long) size); #endif /* HAVE_SBRK */ xexit (1); } PTR xmalloc (size) size_t size; { PTR newmem; if (size == 0) size = 1; newmem = malloc (size); if (!newmem) xmalloc_failed (size); return (newmem); } PTR xcalloc (nelem, elsize) size_t nelem, elsize; { PTR newmem; if (nelem == 0 || elsize == 0) nelem = elsize = 1; newmem = calloc (nelem, elsize); if (!newmem) xmalloc_failed (nelem * elsize); return (newmem); } PTR xrealloc (oldmem, size) PTR oldmem; size_t size; { PTR newmem; if (size == 0) size = 1; if (!oldmem) newmem = malloc (size); else newmem = realloc (oldmem, size); if (!newmem) xmalloc_failed (size); return (newmem); } /* xmemdup.c -- Duplicate a memory buffer, using xcalloc. This trivial function is in the public domain. Jeff Garzik, September 1999. */ /* @deftypefn Replacement void* xmemdup (void *@var{input}, size_t @var{copy_size}, size_t @var{alloc_size}) Duplicates a region of memory without fail. First, @var{alloc_size} bytes are allocated, then @var{copy_size} bytes from @var{input} are copied into it, and the new memory is returned. If fewer bytes are copied than were allocated, the remaining memory is zeroed. @end deftypefn */ #ifdef HAVE_CONFIG_H #endif #include /* For size_t. */ #ifdef HAVE_STRING_H #include #endif PTR xmemdup (input, copy_size, alloc_size) const PTR input; size_t copy_size; size_t alloc_size; { PTR output = xcalloc (1, alloc_size); memcpy (output, input, copy_size); return output; } /* xstrdup.c -- Duplicate a string in memory, using xmalloc. This trivial function is in the public domain. Ian Lance Taylor, Cygnus Support, December 1995. */ /* @deftypefn Replacement char* xstrdup (const char *@var{s}) Duplicates a character string without fail, using @code{xmalloc} to obtain memory. @end deftypefn */ #include #ifdef HAVE_CONFIG_H #endif #ifdef HAVE_STRING_H #include #endif char * xstrdup (s) const char *s; { register size_t len = strlen (s) + 1; register char *ret = xmalloc (len); memcpy (ret, s, len); return ret; } /* xstrerror.c -- jacket routine for more robust strerror() usage. Fri Jun 16 18:30:00 1995 Pat Rankin This code is in the public domain. */ /* @deftypefn Replacement char* xstrerror (int @var{errnum}) Behaves exactly like the standard @code{strerror} function, but will never return a @code{NULL} pointer. @end deftypefn */ #include #ifdef VMS #include #if !defined (__STRICT_ANSI__) && !defined (__HIDE_FORBIDDEN_NAMES) extern char *strerror PARAMS ((int,...)); #define DONT_DECLARE_STRERROR #endif #endif /* VMS */ #ifndef DONT_DECLARE_STRERROR extern char *strerror PARAMS ((int)); #endif /* If strerror returns NULL, we'll format the number into a static buffer. */ #define ERRSTR_FMT "undocumented error #%d" static char xstrerror_buf[sizeof ERRSTR_FMT + 20]; /* Like strerror, but result is never a null pointer. */ char * xstrerror (errnum) int errnum; { char *errstr; #ifdef VMS char *(*vmslib_strerror) PARAMS ((int,...)); /* Override any possibly-conflicting declaration from system header. */ vmslib_strerror = (char *(*) PARAMS ((int,...))) strerror; /* Second argument matters iff first is EVMSERR, but it's simpler to pass it unconditionally. `vaxc$errno' is declared in and maintained by the run-time library in parallel to `errno'. We assume that `errnum' corresponds to the last value assigned to errno by the run-time library, hence vaxc$errno will be relevant. */ errstr = (*vmslib_strerror) (errnum, vaxc$errno); #else errstr = strerror (errnum); #endif /* If `errnum' is out of range, result might be NULL. We'll fix that. */ if (!errstr) { sprintf (xstrerror_buf, ERRSTR_FMT, errnum); errstr = xstrerror_buf; } return errstr; } /* Copyright (C) 1991, 1992, 1996, 1998, 2004 Free Software Foundation, Inc. This file is derived from mkstemp.c from the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with the GNU C Library; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifdef HAVE_CONFIG_H #endif #include #ifdef HAVE_STDLIB_H #include #endif #ifdef HAVE_STRING_H #include #endif #include #include #include #ifdef HAVE_UNISTD_H #include #endif #ifdef HAVE_SYS_TIME_H #include #endif /* We need to provide a type for gcc_uint64_t. */ #ifdef __GNUC__ __extension__ typedef unsigned long long gcc_uint64_t; #else typedef unsigned long gcc_uint64_t; #endif #ifndef TMP_MAX #define TMP_MAX 16384 #endif /* @deftypefn Replacement int mkstemps (char *@var{template}, int @var{suffix_len}) Generate a unique temporary file name from @var{template}. @var{template} has the form: @example @var{path}/ccXXXXXX@var{suffix} @end example @var{suffix_len} tells us how long @var{suffix} is (it can be zero length). The last six characters of @var{template} before @var{suffix} must be @samp{XXXXXX}; they are replaced with a string that makes the filename unique. Returns a file descriptor open on the file for reading and writing. @end deftypefn */ int mkstemps (template, suffix_len) char *template; int suffix_len; { static const char letters[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"; static gcc_uint64_t value; #ifdef HAVE_GETTIMEOFDAY struct timeval tv; #endif char *XXXXXX; size_t len; int count; len = strlen (template); if ((int) len < 6 + suffix_len || strncmp (&template[len - 6 - suffix_len], "XXXXXX", 6)) { return -1; } XXXXXX = &template[len - 6 - suffix_len]; #ifdef HAVE_GETTIMEOFDAY /* Get some more or less random data. */ gettimeofday (&tv, NULL); value += ((gcc_uint64_t) tv.tv_usec << 16) ^ tv.tv_sec ^ getpid (); #else value += getpid (); #endif for (count = 0; count < TMP_MAX; ++count) { gcc_uint64_t v = value; int fd; /* Fill in the random bits. */ XXXXXX[0] = letters[v % 62]; v /= 62; XXXXXX[1] = letters[v % 62]; v /= 62; XXXXXX[2] = letters[v % 62]; v /= 62; XXXXXX[3] = letters[v % 62]; v /= 62; XXXXXX[4] = letters[v % 62]; v /= 62; XXXXXX[5] = letters[v % 62]; fd = open (template, O_RDWR|O_CREAT|O_EXCL, 0600); if (fd >= 0) /* The file does not exist. */ return fd; /* This is a random value. It is only necessary that the next TMP_MAX values generated by adding 7777 to VALUE are different with (module 2^32). */ value += 7777; } /* We return the null string if we can't find a unique file name. */ template[0] = '\0'; return -1; } /* CPP Library - charsets Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Broken out of c-lex.c Apr 2003, adding valid C99 UCN ranges. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Get common system includes and various definitions and declarations based on autoconf macros. Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_SYSTEM_H #define GCC_SYSTEM_H /* We must include stdarg.h before stdio.h. */ #include #ifndef va_copy # ifdef __va_copy # define va_copy(d,s) __va_copy((d),(s)) # else # define va_copy(d,s) ((d) = (s)) # endif #endif #ifdef HAVE_STDDEF_H # include #endif #include /* Define a generic NULL if one hasn't already been defined. */ #ifndef NULL #define NULL 0 #endif /* The compiler is not a multi-threaded application and therefore we do not have to use the locking functions. In fact, using the locking functions can cause the compiler to be significantly slower under I/O bound conditions (such as -g -O0 on very large source files). HAVE_DECL_PUTC_UNLOCKED actually indicates whether or not the stdio code is multi-thread safe by default. If it is set to 0, then do not worry about using the _unlocked functions. fputs_unlocked, fwrite_unlocked, and fprintf_unlocked are extensions and need to be prototyped by hand (since we do not define _GNU_SOURCE). */ #if defined HAVE_DECL_PUTC_UNLOCKED && HAVE_DECL_PUTC_UNLOCKED # ifdef HAVE_PUTC_UNLOCKED # undef putc # define putc(C, Stream) putc_unlocked (C, Stream) # endif # ifdef HAVE_FPUTC_UNLOCKED # undef fputc # define fputc(C, Stream) fputc_unlocked (C, Stream) # endif # ifdef HAVE_FPUTS_UNLOCKED # undef fputs # define fputs(String, Stream) fputs_unlocked (String, Stream) # if defined (HAVE_DECL_FPUTS_UNLOCKED) && !HAVE_DECL_FPUTS_UNLOCKED extern int fputs_unlocked (const char *, FILE *); # endif # endif # ifdef HAVE_FWRITE_UNLOCKED # undef fwrite # define fwrite(Ptr, Size, N, Stream) fwrite_unlocked (Ptr, Size, N, Stream) # if defined (HAVE_DECL_FWRITE_UNLOCKED) && !HAVE_DECL_FWRITE_UNLOCKED extern int fwrite_unlocked (const void *, size_t, size_t, FILE *); # endif # endif # ifdef HAVE_FPRINTF_UNLOCKED # undef fprintf /* We can't use a function-like macro here because we don't know if we have varargs macros. */ # define fprintf fprintf_unlocked # if defined (HAVE_DECL_FPRINTF_UNLOCKED) && !HAVE_DECL_FPRINTF_UNLOCKED extern int fprintf_unlocked (FILE *, const char *, ...); # endif # endif #endif /* ??? Glibc's fwrite/fread_unlocked macros cause "warning: signed and unsigned type in conditional expression". */ #undef fread_unlocked #undef fwrite_unlocked /* There are an extraordinary number of issues with . The last straw is that it varies with the locale. Use libiberty's replacement instead. */ #include #include #if !defined (errno) && defined (HAVE_DECL_ERRNO) && !HAVE_DECL_ERRNO extern int errno; #endif /* Some of glibc's string inlines cause warnings. Plus we'd rather rely on (and therefore test) GCC's string builtins. */ #define __NO_STRING_INLINES #ifdef STRING_WITH_STRINGS # include # include #else # ifdef HAVE_STRING_H # include # else # ifdef HAVE_STRINGS_H # include # endif # endif #endif #ifdef HAVE_STDLIB_H # include #endif /* If we don't have an overriding definition, set SUCCESS_EXIT_CODE and FATAL_EXIT_CODE to EXIT_SUCCESS and EXIT_FAILURE respectively, or 0 and 1 if those macros are not defined. */ #ifndef SUCCESS_EXIT_CODE # ifdef EXIT_SUCCESS # define SUCCESS_EXIT_CODE EXIT_SUCCESS # else # define SUCCESS_EXIT_CODE 0 # endif #endif #ifndef FATAL_EXIT_CODE # ifdef EXIT_FAILURE # define FATAL_EXIT_CODE EXIT_FAILURE # else # define FATAL_EXIT_CODE 1 # endif #endif #ifdef HAVE_UNISTD_H # include #endif #ifdef HAVE_SYS_PARAM_H # include /* We use this identifier later and it appears in some vendor param.h's. */ # undef PREFETCH #endif #if HAVE_LIMITS_H # include #endif /* Get definitions of HOST_WIDE_INT and HOST_WIDEST_INT. */ /* HOST_WIDE_INT definitions for the GNU compiler. Copyright (C) 1998, 2002 Free Software Foundation, Inc. This file is part of GCC. Provide definitions for macros which depend on HOST_BITS_PER_INT and HOST_BITS_PER_LONG. */ #ifndef GCC_HWINT_H #define GCC_HWINT_H /* This describes the machine the compiler is hosted on. */ #define HOST_BITS_PER_CHAR CHAR_BIT #define HOST_BITS_PER_SHORT (CHAR_BIT * SIZEOF_SHORT) #define HOST_BITS_PER_INT (CHAR_BIT * SIZEOF_INT) #define HOST_BITS_PER_LONG (CHAR_BIT * SIZEOF_LONG) /* If HAVE_LONG_LONG and SIZEOF_LONG_LONG aren't defined, but GCC_VERSION >= 3000, assume this is the second or later stage of a bootstrap, we do have long long, and it's 64 bits. (This is required by C99; we do have some ports that violate that assumption but they're all cross-compile-only.) Just in case, force a constraint violation if that assumption is incorrect. */ #if !defined HAVE_LONG_LONG # if GCC_VERSION >= 3000 # define HAVE_LONG_LONG 1 # define SIZEOF_LONG_LONG 8 extern char sizeof_long_long_must_be_8[sizeof(long long) == 8 ? 1 : -1]; # endif #endif #ifdef HAVE_LONG_LONG # define HOST_BITS_PER_LONGLONG (CHAR_BIT * SIZEOF_LONG_LONG) #endif #ifdef HAVE___INT64 # define HOST_BITS_PER___INT64 (CHAR_BIT * SIZEOF___INT64) #endif /* Set HOST_WIDE_INT. This should be the widest efficient host integer type. It can be 32 or 64 bits, except that if we are targeting a machine with 64-bit size_t then it has to be 64 bits. With a sane ABI, 'long' is the largest efficient host integer type. Thus, we use that unless we have to use 'long long' or '__int64' because we're targeting a 64-bit machine from a 32-bit host. */ #if HOST_BITS_PER_LONG >= 64 || !defined NEED_64BIT_HOST_WIDE_INT # define HOST_BITS_PER_WIDE_INT HOST_BITS_PER_LONG # define HOST_WIDE_INT long #else # if HOST_BITS_PER_LONGLONG >= 64 # define HOST_BITS_PER_WIDE_INT HOST_BITS_PER_LONGLONG # define HOST_WIDE_INT long long # else # if HOST_BITS_PER___INT64 >= 64 # define HOST_BITS_PER_WIDE_INT HOST_BITS_PER___INT64 # define HOST_WIDE_INT __int64 # else #error "Unable to find a suitable type for HOST_WIDE_INT" # endif # endif #endif /* Various printf format strings for HOST_WIDE_INT. */ #if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG # define HOST_WIDE_INT_PRINT "l" # define HOST_WIDE_INT_PRINT_C "L" /* 'long' might be 32 or 64 bits, and the number of leading zeroes must be tweaked accordingly. */ # if HOST_BITS_PER_WIDE_INT == 64 # define HOST_WIDE_INT_PRINT_DOUBLE_HEX "0x%lx%016lx" # else # define HOST_WIDE_INT_PRINT_DOUBLE_HEX "0x%lx%08lx" # endif #else # define HOST_WIDE_INT_PRINT "ll" # define HOST_WIDE_INT_PRINT_C "LL" /* We can assume that 'long long' is at least 64 bits. */ # define HOST_WIDE_INT_PRINT_DOUBLE_HEX "0x%llx%016llx" #endif /* HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG */ #define HOST_WIDE_INT_PRINT_DEC "%" HOST_WIDE_INT_PRINT "d" #define HOST_WIDE_INT_PRINT_DEC_C HOST_WIDE_INT_PRINT_DEC HOST_WIDE_INT_PRINT_C #define HOST_WIDE_INT_PRINT_UNSIGNED "%" HOST_WIDE_INT_PRINT "u" #define HOST_WIDE_INT_PRINT_HEX "0x%" HOST_WIDE_INT_PRINT "x" /* Set HOST_WIDEST_INT. This is a 64-bit type unless the compiler in use has no 64-bit type at all; in that case it's 32 bits. */ #if HOST_BITS_PER_WIDE_INT >= 64 \ || (HOST_BITS_PER_LONGLONG < 64 && HOST_BITS_PER___INT64 < 64) # define HOST_WIDEST_INT HOST_WIDE_INT # define HOST_BITS_PER_WIDEST_INT HOST_BITS_PER_WIDE_INT # define HOST_WIDEST_INT_PRINT_DEC HOST_WIDE_INT_PRINT_DEC # define HOST_WIDEST_INT_PRINT_DEC_C HOST_WIDE_INT_PRINT_DEC_C # define HOST_WIDEST_INT_PRINT_UNSIGNED HOST_WIDE_INT_PRINT_UNSIGNED # define HOST_WIDEST_INT_PRINT_HEX HOST_WIDE_INT_PRINT_HEX # define HOST_WIDEST_INT_PRINT_DOUBLE_HEX HOST_WIDE_INT_PRINT_DOUBLE_HEX #else # if HOST_BITS_PER_LONGLONG >= 64 # define HOST_BITS_PER_WIDEST_INT HOST_BITS_PER_LONGLONG # define HOST_WIDEST_INT long long # else # if HOST_BITS_PER___INT64 >= 64 # define HOST_BITS_PER_WIDEST_INT HOST_BITS_PER___INT64 # define HOST_WIDEST_INT __int64 # else #error "This line should be impossible to reach" # endif # endif # define HOST_WIDEST_INT_PRINT_DEC "%lld" # define HOST_WIDEST_INT_PRINT_DEC_C "%lldLL" # define HOST_WIDEST_INT_PRINT_UNSIGNED "%llu" # define HOST_WIDEST_INT_PRINT_HEX "0x%llx" # define HOST_WIDEST_INT_PRINT_DOUBLE_HEX "0x%llx%016llx" #endif #endif /* ! GCC_HWINT_H */ /* A macro to determine whether a VALUE lies inclusively within a certain range without evaluating the VALUE more than once. This macro won't warn if the VALUE is unsigned and the LOWER bound is zero, as it would e.g. with "VALUE >= 0 && ...". Note the LOWER bound *is* evaluated twice, and LOWER must not be greater than UPPER. However the bounds themselves can be either positive or negative. */ #define IN_RANGE(VALUE, LOWER, UPPER) \ ((unsigned HOST_WIDE_INT) ((VALUE) - (LOWER)) <= ((UPPER) - (LOWER))) /* Infrastructure for defining missing _MAX and _MIN macros. Note that macros defined with these cannot be used in #if. */ /* The extra casts work around common compiler bugs. */ #define INTTYPE_SIGNED(t) (! ((t) 0 < (t) -1)) /* The outer cast is needed to work around a bug in Cray C 5.0.3.0. It is necessary at least when t == time_t. */ #define INTTYPE_MINIMUM(t) ((t) (INTTYPE_SIGNED (t) \ ? ~ (t) 0 << (sizeof(t) * CHAR_BIT - 1) : (t) 0)) #define INTTYPE_MAXIMUM(t) ((t) (~ (t) 0 - INTTYPE_MINIMUM (t))) /* Use that infrastructure to provide a few constants. */ #ifndef UCHAR_MAX # define UCHAR_MAX INTTYPE_MAXIMUM (unsigned char) #endif #ifdef TIME_WITH_SYS_TIME # include # include #else # if HAVE_SYS_TIME_H # include # else # ifdef HAVE_TIME_H # include # endif # endif #endif #ifdef HAVE_FCNTL_H # include #else # ifdef HAVE_SYS_FILE_H # include # endif #endif #ifndef SEEK_SET # define SEEK_SET 0 # define SEEK_CUR 1 # define SEEK_END 2 #endif #ifndef F_OK # define F_OK 0 # define X_OK 1 # define W_OK 2 # define R_OK 4 #endif #ifndef O_RDONLY # define O_RDONLY 0 #endif #ifndef O_WRONLY # define O_WRONLY 1 #endif /* Some systems define these in, e.g., param.h. We undefine these names here to avoid the warnings. We prefer to use our definitions since we know they are correct. */ #undef MIN #undef MAX #define MIN(X,Y) ((X) < (Y) ? (X) : (Y)) #define MAX(X,Y) ((X) > (Y) ? (X) : (Y)) /* Returns the least number N such that N * Y >= X. */ #define CEIL(x,y) (((x) + (y) - 1) / (y)) #ifdef HAVE_SYS_WAIT_H #define __thread __something_other_than__thread #include #undef __thread #endif #ifndef WIFSIGNALED #define WIFSIGNALED(S) (((S) & 0xff) != 0 && ((S) & 0xff) != 0x7f) #endif #ifndef WTERMSIG #define WTERMSIG(S) ((S) & 0x7f) #endif #ifndef WIFEXITED #define WIFEXITED(S) (((S) & 0xff) == 0) #endif #ifndef WEXITSTATUS #define WEXITSTATUS(S) (((S) & 0xff00) >> 8) #endif #ifndef WSTOPSIG #define WSTOPSIG WEXITSTATUS #endif #ifndef WCOREDUMP #define WCOREDUMP(S) ((S) & WCOREFLG) #endif #ifndef WCOREFLG #define WCOREFLG 0200 #endif /* The HAVE_DECL_* macros are three-state, undefined, 0 or 1. If they are defined to 0 then we must provide the relevant declaration here. These checks will be in the undefined state while configure is running so be careful to test "defined (HAVE_DECL_*)". */ #if defined (HAVE_DECL_ATOF) && !HAVE_DECL_ATOF extern double atof (const char *); #endif #if defined (HAVE_DECL_ATOL) && !HAVE_DECL_ATOL extern long atol (const char *); #endif #if defined (HAVE_DECL_FREE) && !HAVE_DECL_FREE extern void free (void *); #endif #if defined (HAVE_DECL_GETCWD) && !HAVE_DECL_GETCWD extern char *getcwd (char *, size_t); #endif #if defined (HAVE_DECL_GETENV) && !HAVE_DECL_GETENV extern char *getenv (const char *); #endif #if defined (HAVE_DECL_GETOPT) && !HAVE_DECL_GETOPT extern int getopt (int, char * const *, const char *); #endif #if defined (HAVE_DECL_GETWD) && !HAVE_DECL_GETWD extern char *getwd (char *); #endif #if defined (HAVE_DECL_SBRK) && !HAVE_DECL_SBRK extern void *sbrk (int); #endif #if defined (HAVE_DECL_STRSTR) && !HAVE_DECL_STRSTR extern char *strstr (const char *, const char *); #endif #ifdef HAVE_MALLOC_H #include #endif #if defined (HAVE_DECL_MALLOC) && !HAVE_DECL_MALLOC extern void *malloc (size_t); #endif #if defined (HAVE_DECL_CALLOC) && !HAVE_DECL_CALLOC extern void *calloc (size_t, size_t); #endif #if defined (HAVE_DECL_REALLOC) && !HAVE_DECL_REALLOC extern void *realloc (void *, size_t); #endif /* If the system doesn't provide strsignal, we get it defined in libiberty but no declaration is supplied. */ #if !defined (HAVE_STRSIGNAL) \ || (defined (HAVE_DECL_STRSIGNAL) && !HAVE_DECL_STRSIGNAL) # ifndef strsignal extern const char *strsignal (int); # endif #endif #ifdef HAVE_GETRLIMIT # if defined (HAVE_DECL_GETRLIMIT) && !HAVE_DECL_GETRLIMIT # ifndef getrlimit struct rlimit; extern int getrlimit (int, struct rlimit *); # endif # endif #endif #ifdef HAVE_SETRLIMIT # if defined (HAVE_DECL_SETRLIMIT) && !HAVE_DECL_SETRLIMIT # ifndef setrlimit struct rlimit; extern int setrlimit (int, const struct rlimit *); # endif # endif #endif #if defined (HAVE_DECL_ABORT) && !HAVE_DECL_ABORT extern void abort (void); #endif #if defined (HAVE_DECL_SNPRINTF) && !HAVE_DECL_SNPRINTF extern int snprintf (char *, size_t, const char *, ...); #endif /* 1 if we have C99 designated initializers. */ #if !defined(HAVE_DESIGNATED_INITIALIZERS) #define HAVE_DESIGNATED_INITIALIZERS \ ((GCC_VERSION >= 2007) || (__STDC_VERSION__ >= 199901L)) #endif #if HAVE_SYS_STAT_H # include #endif /* Test if something is a normal file. */ #ifndef S_ISREG #define S_ISREG(m) (((m) & S_IFMT) == S_IFREG) #endif /* Test if something is a directory. */ #ifndef S_ISDIR #define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR) #endif /* Test if something is a character special file. */ #ifndef S_ISCHR #define S_ISCHR(m) (((m) & S_IFMT) == S_IFCHR) #endif /* Test if something is a block special file. */ #ifndef S_ISBLK #define S_ISBLK(m) (((m) & S_IFMT) == S_IFBLK) #endif /* Test if something is a socket. */ #ifndef S_ISSOCK # ifdef S_IFSOCK # define S_ISSOCK(m) (((m) & S_IFMT) == S_IFSOCK) # else # define S_ISSOCK(m) 0 # endif #endif /* Test if something is a FIFO. */ #ifndef S_ISFIFO # ifdef S_IFIFO # define S_ISFIFO(m) (((m) & S_IFMT) == S_IFIFO) # else # define S_ISFIFO(m) 0 # endif #endif /* Define well known filenos if the system does not define them. */ #ifndef STDIN_FILENO # define STDIN_FILENO 0 #endif #ifndef STDOUT_FILENO # define STDOUT_FILENO 1 #endif #ifndef STDERR_FILENO # define STDERR_FILENO 2 #endif /* Some systems have mkdir that takes a single argument. */ #ifdef MKDIR_TAKES_ONE_ARG # define mkdir(a,b) mkdir(a) #endif /* Provide a way to print an address via printf. */ #ifndef HOST_PTR_PRINTF # ifdef HAVE_PRINTF_PTR # define HOST_PTR_PRINTF "%p" # elif SIZEOF_INT == SIZEOF_VOID_P # define HOST_PTR_PRINTF "%x" # elif SIZEOF_LONG == SIZEOF_VOID_P # define HOST_PTR_PRINTF "%lx" # else # define HOST_PTR_PRINTF "%llx" # endif #endif /* ! HOST_PTR_PRINTF */ /* By default, colon separates directories in a path. */ #ifndef PATH_SEPARATOR #define PATH_SEPARATOR ':' #endif /* Filename handling macros. */ /* These should be phased out in favor of IS_DIR_SEPARATOR, where possible. */ #ifndef DIR_SEPARATOR # define DIR_SEPARATOR '/' # ifdef HAVE_DOS_BASED_FILE_SYSTEM # define DIR_SEPARATOR_2 '\\' # endif #endif /* Get libiberty declarations. */ /* Provide a default for the HOST_BIT_BUCKET. This suffices for POSIX-like hosts. */ #ifndef HOST_BIT_BUCKET #define HOST_BIT_BUCKET "/dev/null" #endif /* Be conservative and only use enum bitfields with GCC. FIXME: provide a complete autoconf test for buggy enum bitfields. */ #if (GCC_VERSION > 2000) #define ENUM_BITFIELD(TYPE) __extension__ enum TYPE #else #define ENUM_BITFIELD(TYPE) unsigned int #endif #ifndef offsetof #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *) 0)->MEMBER) #endif /* Various error reporting routines want to use __FUNCTION__. */ #if (GCC_VERSION < 2007) #ifndef __FUNCTION__ #define __FUNCTION__ "?" #endif /* ! __FUNCTION__ */ #endif /* __builtin_expect(A, B) evaluates to A, but notifies the compiler that the most likely value of A is B. This feature was added at some point between 2.95 and 3.0. Let's use 3.0 as the lower bound for now. */ #if (GCC_VERSION < 3000) #define __builtin_expect(a, b) (a) #endif /* Provide a fake boolean type. We make no attempt to use the C99 _Bool, as it may not be available in the bootstrap compiler, and even if it is, it is liable to be buggy. This must be after all inclusion of system headers, as some of them will mess us up. */ #undef bool #undef true #undef false #undef TRUE #undef FALSE #define bool unsigned char #define true 1 #define false 0 #define TRUE true #define FALSE false /* Some compilers do not allow the use of unsigned char in bitfields. */ #define BOOL_BITFIELD unsigned int /* As the last action in this file, we poison the identifiers that shouldn't be used. Note, luckily gcc-3.0's token-based integrated preprocessor won't trip on poisoned identifiers that arrive from the expansion of macros. E.g. #define strrchr rindex, won't error if rindex is poisoned after this directive is issued and later on strrchr is called. Note: We define bypass macros for the few cases where we really want to use the libc memory allocation routines. Otherwise we insist you use the "x" versions from libiberty. */ #define really_call_malloc malloc #define really_call_calloc calloc #define really_call_realloc realloc #if defined(FLEX_SCANNER) || defined(YYBISON) || defined(YYBYACC) /* Flex and bison use malloc and realloc. Yuk. Note that this means really_call_* cannot be used in a .l or .y file. */ #define malloc xmalloc #define realloc xrealloc #endif #if (GCC_VERSION >= 3000) /* Note autoconf checks for prototype declarations and includes system.h while doing so. Only poison these tokens if actually compiling gcc, so that the autoconf declaration tests for malloc etc don't spuriously fail. */ #ifdef IN_GCC #undef calloc #undef strdup #pragma GCC poison calloc strdup #if !defined(FLEX_SCANNER) && !defined(YYBISON) #undef malloc #undef realloc #pragma GCC poison malloc realloc #endif /* Old target macros that have moved to the target hooks structure. */ #pragma GCC poison ASM_OPEN_PAREN ASM_CLOSE_PAREN \ FUNCTION_PROLOGUE FUNCTION_EPILOGUE \ FUNCTION_END_PROLOGUE FUNCTION_BEGIN_EPILOGUE \ DECL_MACHINE_ATTRIBUTES COMP_TYPE_ATTRIBUTES INSERT_ATTRIBUTES \ VALID_MACHINE_DECL_ATTRIBUTE VALID_MACHINE_TYPE_ATTRIBUTE \ SET_DEFAULT_TYPE_ATTRIBUTES SET_DEFAULT_DECL_ATTRIBUTES \ MERGE_MACHINE_TYPE_ATTRIBUTES MERGE_MACHINE_DECL_ATTRIBUTES \ MD_INIT_BUILTINS MD_EXPAND_BUILTIN ASM_OUTPUT_CONSTRUCTOR \ ASM_OUTPUT_DESTRUCTOR SIGNED_CHAR_SPEC MAX_CHAR_TYPE_SIZE \ WCHAR_UNSIGNED UNIQUE_SECTION SELECT_SECTION SELECT_RTX_SECTION \ ENCODE_SECTION_INFO STRIP_NAME_ENCODING ASM_GLOBALIZE_LABEL \ ASM_OUTPUT_MI_THUNK CONST_COSTS RTX_COSTS DEFAULT_RTX_COSTS \ ADDRESS_COST MACHINE_DEPENDENT_REORG ASM_FILE_START ASM_FILE_END \ ASM_SIMPLIFY_DWARF_ADDR INIT_TARGET_OPTABS INIT_SUBTARGET_OPTABS \ INIT_GOFAST_OPTABS MULSI3_LIBCALL MULDI3_LIBCALL DIVSI3_LIBCALL \ DIVDI3_LIBCALL UDIVSI3_LIBCALL UDIVDI3_LIBCALL MODSI3_LIBCALL \ MODDI3_LIBCALL UMODSI3_LIBCALL UMODDI3_LIBCALL BUILD_VA_LIST_TYPE \ PRETEND_OUTGOING_VARARGS_NAMED STRUCT_VALUE_INCOMING_REGNUM \ ASM_OUTPUT_SECTION_NAME PROMOTE_FUNCTION_ARGS \ STRUCT_VALUE_INCOMING STRICT_ARGUMENT_NAMING \ PROMOTE_FUNCTION_RETURN PROMOTE_PROTOTYPES STRUCT_VALUE_REGNUM \ SETUP_INCOMING_VARARGS EXPAND_BUILTIN_SAVEREGS \ DEFAULT_SHORT_ENUMS SPLIT_COMPLEX_ARGS MD_ASM_CLOBBERS \ HANDLE_PRAGMA_REDEFINE_EXTNAME HANDLE_PRAGMA_EXTERN_PREFIX /* Other obsolete target macros, or macros that used to be in target headers and were not used, and may be obsolete or may never have been used. */ #pragma GCC poison INT_ASM_OP ASM_OUTPUT_EH_REGION_BEG CPP_PREDEFINES \ ASM_OUTPUT_EH_REGION_END ASM_OUTPUT_LABELREF_AS_INT SMALL_STACK \ DOESNT_NEED_UNWINDER EH_TABLE_LOOKUP OBJC_SELECTORS_WITHOUT_LABELS \ OMIT_EH_TABLE EASY_DIV_EXPR IMPLICIT_FIX_EXPR \ LONGJMP_RESTORE_FROM_STACK MAX_INT_TYPE_SIZE ASM_IDENTIFY_GCC \ STDC_VALUE TRAMPOLINE_ALIGN ASM_IDENTIFY_GCC_AFTER_SOURCE \ SLOW_ZERO_EXTEND SUBREG_REGNO_OFFSET DWARF_LINE_MIN_INSTR_LENGTH \ TRADITIONAL_RETURN_FLOAT NO_BUILTIN_SIZE_TYPE \ NO_BUILTIN_PTRDIFF_TYPE NO_BUILTIN_WCHAR_TYPE NO_BUILTIN_WINT_TYPE \ BLOCK_PROFILER BLOCK_PROFILER_CODE FUNCTION_BLOCK_PROFILER \ FUNCTION_BLOCK_PROFILER_EXIT MACHINE_STATE_SAVE \ MACHINE_STATE_RESTORE SCCS_DIRECTIVE SECTION_ASM_OP \ ASM_OUTPUT_DEFINE_LABEL_DIFFERENCE_SYMBOL ASM_OUTPUT_INTERNAL_LABEL \ OBJC_PROLOGUE ALLOCATE_TRAMPOLINE HANDLE_PRAGMA ROUND_TYPE_SIZE \ ROUND_TYPE_SIZE_UNIT CONST_SECTION_ASM_OP CRT_GET_RFIB_TEXT \ DBX_LBRAC_FIRST DBX_OUTPUT_ENUM DBX_OUTPUT_SOURCE_FILENAME \ DBX_WORKING_DIRECTORY INSN_CACHE_DEPTH INSN_CACHE_SIZE \ INSN_CACHE_LINE_WIDTH INIT_SECTION_PREAMBLE NEED_ATEXIT ON_EXIT \ EXIT_BODY OBJECT_FORMAT_ROSE MULTIBYTE_CHARS MAP_CHARACTER \ LIBGCC_NEEDS_DOUBLE FINAL_PRESCAN_LABEL DEFAULT_CALLER_SAVES \ LOAD_ARGS_REVERSED MAX_INTEGER_COMPUTATION_MODE \ CONVERT_HARD_REGISTER_TO_SSA_P ASM_OUTPUT_MAIN_SOURCE_FILENAME \ FIRST_INSN_ADDRESS TEXT_SECTION SHARED_BSS_SECTION_ASM_OP \ PROMOTED_MODE EXPAND_BUILTIN_VA_END \ LINKER_DOES_NOT_WORK_WITH_DWARF2 FUNCTION_ARG_KEEP_AS_REFERENCE \ GIV_SORT_CRITERION MAX_LONG_TYPE_SIZE MAX_LONG_DOUBLE_TYPE_SIZE \ MAX_WCHAR_TYPE_SIZE GCOV_TYPE_SIZE SHARED_SECTION_ASM_OP \ INTEGRATE_THRESHOLD \ FINAL_REG_PARM_STACK_SPACE MAYBE_REG_PARM_STACK_SPACE \ TRADITIONAL_PIPELINE_INTERFACE DFA_PIPELINE_INTERFACE \ DBX_OUTPUT_STANDARD_TYPES BUILTIN_SETJMP_FRAME_VALUE \ SUNOS4_SHARED_LIBRARIES PROMOTE_FOR_CALL_ONLY \ SPACE_AFTER_L_OPTION NO_RECURSIVE_FUNCTION_CSE \ DEFAULT_MAIN_RETURN TARGET_MEM_FUNCTIONS /* Hooks that are no longer used. */ #pragma GCC poison LANG_HOOKS_FUNCTION_MARK LANG_HOOKS_FUNCTION_FREE \ LANG_HOOKS_MARK_TREE LANG_HOOKS_INSERT_DEFAULT_ATTRIBUTES /* Libiberty macros that are no longer used in GCC. */ #undef ANSI_PROTOTYPES #undef PTR_CONST #undef LONG_DOUBLE #undef VPARAMS #undef VA_OPEN #undef VA_FIXEDARG #undef VA_CLOSE #undef VA_START #pragma GCC poison ANSI_PROTOTYPES PTR_CONST LONG_DOUBLE VPARAMS VA_OPEN \ VA_FIXEDARG VA_CLOSE VA_START #endif /* IN_GCC */ /* Note: not all uses of the `index' token (e.g. variable names and structure members) have been eliminated. */ #undef bcopy #undef bzero #undef bcmp #undef rindex #pragma GCC poison bcopy bzero bcmp rindex #endif /* GCC >= 3.0 */ #endif /* ! GCC_SYSTEM_H */ /* Definitions for CPP library. Copyright (C) 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Written by Per Bothner, 1994-95. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. In other words, you are welcome to use, share and improve this program. You are forbidden to forbid anyone else to use, share and improve what you give them. Help stamp out software-hoarding! */ #ifndef LIBCPP_CPPLIB_H #define LIBCPP_CPPLIB_H #include /* Hash tables. Copyright (C) 2000, 2001, 2003, 2004 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef LIBCPP_SYMTAB_H #define LIBCPP_SYMTAB_H #ifndef GTY #define GTY(x) /* nothing */ #endif /* This is what each hash table entry points to. It may be embedded deeply within another object. */ typedef struct ht_identifier ht_identifier; struct ht_identifier GTY(()) { const unsigned char *str; unsigned int len; unsigned int hash_value; }; #define HT_LEN(NODE) ((NODE)->len) #define HT_STR(NODE) ((NODE)->str) typedef struct ht hash_table; typedef struct ht_identifier *hashnode; enum ht_lookup_option {HT_NO_INSERT = 0, HT_ALLOC, HT_ALLOCED}; /* An identifier hash table for cpplib and the front ends. */ struct ht { /* Identifiers are allocated from here. */ struct obstack stack; hashnode *entries; /* Call back, allocate a node. */ hashnode (*alloc_node) (hash_table *); /* Call back, allocate something that hangs off a node like a cpp_macro. NULL means use the usual allocator. */ void * (*alloc_subobject) (size_t); unsigned int nslots; /* Total slots in the entries array. */ unsigned int nelements; /* Number of live elements. */ /* Link to reader, if any. For the benefit of cpplib. */ struct cpp_reader *pfile; /* Table usage statistics. */ unsigned int searches; unsigned int collisions; /* Should 'entries' be freed when it is no longer needed? */ bool entries_owned; }; /* Initialize the hashtable with 2 ^ order entries. */ extern hash_table *ht_create (unsigned int order); /* Frees all memory associated with a hash table. */ extern void ht_destroy (hash_table *); extern hashnode ht_lookup (hash_table *, const unsigned char *, size_t, enum ht_lookup_option); extern hashnode ht_lookup_with_hash (hash_table *, const unsigned char *, size_t, unsigned int, enum ht_lookup_option); #define HT_HASHSTEP(r, c) ((r) * 67 + ((c) - 113)); #define HT_HASHFINISH(r, len) ((r) + (len)) /* For all nodes in TABLE, make a callback. The callback takes TABLE->PFILE, the node, and a PTR, and the callback sequence stops if the callback returns zero. */ typedef int (*ht_cb) (struct cpp_reader *, hashnode, const void *); extern void ht_forall (hash_table *, ht_cb, const void *); /* Restore the hash table. */ extern void ht_load (hash_table *ht, hashnode *entries, unsigned int nslots, unsigned int nelements, bool own); /* Dump allocation statistics to stderr. */ extern void ht_dump_statistics (hash_table *); #endif /* LIBCPP_SYMTAB_H */ /* Map logical line numbers to (source file, line number) pairs. Copyright (C) 2001, 2003, 2004 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. In other words, you are welcome to use, share and improve this program. You are forbidden to forbid anyone else to use, share and improve what you give them. Help stamp out software-hoarding! */ #ifndef LIBCPP_LINE_MAP_H #define LIBCPP_LINE_MAP_H /* Reason for adding a line change with add_line_map (). LC_ENTER is when including a new file, e.g. a #include directive in C. LC_LEAVE is when reaching a file's end. LC_RENAME is when a file name or line number changes for neither of the above reasons (e.g. a #line directive in C). */ enum lc_reason {LC_ENTER = 0, LC_LEAVE, LC_RENAME}; /* A logical line/column number, i.e. an "index" into a line_map. */ /* Long-term, we want to use this to replace struct location_s (in input.h), and effectively typedef source_location location_t. */ typedef unsigned int source_location; /* Physical source file TO_FILE at line TO_LINE at column 0 is represented by the logical START_LOCATION. TO_LINE+L at column C is represented by START_LOCATION+(L*(1<start_location) >> (MAP)->column_bits) + (MAP)->to_line) #define SOURCE_COLUMN(MAP, LINE) \ (((LINE) - (MAP)->start_location) & ((1 << (MAP)->column_bits) - 1)) /* Returns the last source line within a map. This is the (last) line of the #include, or other directive, that caused a map change. */ #define LAST_SOURCE_LINE(MAP) \ SOURCE_LINE (MAP, LAST_SOURCE_LINE_LOCATION (MAP)) #define LAST_SOURCE_LINE_LOCATION(MAP) \ ((((MAP)[1].start_location - 1 - (MAP)->start_location) \ & ~((1 << (MAP)->column_bits) - 1)) \ + (MAP)->start_location) /* Returns the map a given map was included from. */ #define INCLUDED_FROM(SET, MAP) (&(SET)->maps[(MAP)->included_from]) /* Nonzero if the map is at the bottom of the include stack. */ #define MAIN_FILE_P(MAP) ((MAP)->included_from < 0) /* Set LOC to a source position that is the same line as the most recent linemap_line_start, but with the specified TO_COLUMN column number. */ #define LINEMAP_POSITION_FOR_COLUMN(LOC, SET, TO_COLUMN) { \ unsigned int to_column = (TO_COLUMN); \ struct line_maps *set = (SET); \ if (__builtin_expect (to_column >= set->max_column_hint, 0)) \ (LOC) = linemap_position_for_column (set, to_column); \ else { \ source_location r = set->highest_line; \ r = r + to_column; \ if (r >= set->highest_location) \ set->highest_location = r; \ (LOC) = r; \ }} extern source_location linemap_position_for_column (struct line_maps *set, unsigned int to_column); #endif /* !LIBCPP_LINE_MAP_H */ #ifdef __cplusplus extern "C" { #endif typedef struct cpp_reader cpp_reader; typedef struct cpp_buffer cpp_buffer; typedef struct cpp_options cpp_options; typedef struct cpp_token cpp_token; typedef struct cpp_string cpp_string; typedef struct cpp_hashnode cpp_hashnode; typedef struct cpp_macro cpp_macro; typedef struct cpp_callbacks cpp_callbacks; typedef struct cpp_dir cpp_dir; struct answer; struct _cpp_file; /* The first three groups, apart from '=', can appear in preprocessor expressions (+= and -= are used to indicate unary + and - resp.). This allows a lookup table to be implemented in _cpp_parse_expr. The first group, to CPP_LAST_EQ, can be immediately followed by an '='. The lexer needs operators ending in '=', like ">>=", to be in the same order as their counterparts without the '=', like ">>". */ /* Positions in the table. */ #define CPP_LAST_EQ CPP_MAX #define CPP_FIRST_DIGRAPH CPP_HASH #define CPP_LAST_PUNCTUATOR CPP_DOT_STAR #define CPP_LAST_CPP_OP CPP_LESS_EQ #define TTYPE_TABLE \ OP(CPP_EQ = 0, "=") \ OP(CPP_NOT, "!") \ OP(CPP_GREATER, ">") /* compare */ \ OP(CPP_LESS, "<") \ OP(CPP_PLUS, "+") /* math */ \ OP(CPP_MINUS, "-") \ OP(CPP_MULT, "*") \ OP(CPP_DIV, "/") \ OP(CPP_MOD, "%") \ OP(CPP_AND, "&") /* bit ops */ \ OP(CPP_OR, "|") \ OP(CPP_XOR, "^") \ OP(CPP_RSHIFT, ">>") \ OP(CPP_LSHIFT, "<<") \ OP(CPP_MIN, "?") \ \ OP(CPP_COMPL, "~") \ OP(CPP_AND_AND, "&&") /* logical */ \ OP(CPP_OR_OR, "||") \ OP(CPP_QUERY, "?") \ OP(CPP_COLON, ":") \ OP(CPP_COMMA, ",") /* grouping */ \ OP(CPP_OPEN_PAREN, "(") \ OP(CPP_CLOSE_PAREN, ")") \ TK(CPP_EOF, SPELL_NONE) \ OP(CPP_EQ_EQ, "==") /* compare */ \ OP(CPP_NOT_EQ, "!=") \ OP(CPP_GREATER_EQ, ">=") \ OP(CPP_LESS_EQ, "<=") \ \ /* These two are unary + / - in preprocessor expressions. */ \ OP(CPP_PLUS_EQ, "+=") /* math */ \ OP(CPP_MINUS_EQ, "-=") \ \ OP(CPP_MULT_EQ, "*=") \ OP(CPP_DIV_EQ, "/=") \ OP(CPP_MOD_EQ, "%=") \ OP(CPP_AND_EQ, "&=") /* bit ops */ \ OP(CPP_OR_EQ, "|=") \ OP(CPP_XOR_EQ, "^=") \ OP(CPP_RSHIFT_EQ, ">>=") \ OP(CPP_LSHIFT_EQ, "<<=") \ OP(CPP_MIN_EQ, "?=") \ /* Digraphs together, beginning with CPP_FIRST_DIGRAPH. */ \ OP(CPP_HASH, "#") /* digraphs */ \ OP(CPP_PASTE, "##") \ OP(CPP_OPEN_SQUARE, "[") \ OP(CPP_CLOSE_SQUARE, "]") \ OP(CPP_OPEN_BRACE, "{") \ OP(CPP_CLOSE_BRACE, "}") \ /* The remainder of the punctuation. Order is not significant. */ \ OP(CPP_SEMICOLON, ";") /* structure */ \ OP(CPP_ELLIPSIS, "...") \ OP(CPP_PLUS_PLUS, "++") /* increment */ \ OP(CPP_MINUS_MINUS, "--") \ OP(CPP_DEREF, "->") /* accessors */ \ OP(CPP_DOT, ".") \ OP(CPP_SCOPE, "::") \ OP(CPP_DEREF_STAR, "->*") \ OP(CPP_DOT_STAR, ".*") \ OP(CPP_ATSIGN, "@") /* used in Objective-C */ \ \ TK(CPP_NAME, SPELL_IDENT) /* word */ \ TK(CPP_AT_NAME, SPELL_IDENT) /* @word - Objective-C */ \ TK(CPP_NUMBER, SPELL_LITERAL) /* 34_be+ta */ \ \ TK(CPP_CHAR, SPELL_LITERAL) /* 'char' */ \ TK(CPP_WCHAR, SPELL_LITERAL) /* L'char' */ \ TK(CPP_OTHER, SPELL_LITERAL) /* stray punctuation */ \ \ TK(CPP_STRING, SPELL_LITERAL) /* "string" */ \ TK(CPP_WSTRING, SPELL_LITERAL) /* L"string" */ \ TK(CPP_OBJC_STRING, SPELL_LITERAL) /* @"string" - Objective-C */ \ TK(CPP_HEADER_NAME, SPELL_LITERAL) /* in #include */ \ \ TK(CPP_COMMENT, SPELL_LITERAL) /* Only if output comments. */ \ /* SPELL_LITERAL happens to DTRT. */ \ TK(CPP_MACRO_ARG, SPELL_NONE) /* Macro argument. */ \ TK(CPP_PADDING, SPELL_NONE) /* Whitespace for cpp0. */ #define OP(e, s) e, #define TK(e, s) e, enum cpp_ttype { TTYPE_TABLE N_TTYPES }; #undef OP #undef TK /* C language kind, used when calling cpp_reader_init. */ enum c_lang {CLK_GNUC89 = 0, CLK_GNUC99, CLK_STDC89, CLK_STDC94, CLK_STDC99, CLK_GNUCXX, CLK_CXX98, CLK_ASM}; /* Payload of a NUMBER, STRING, CHAR or COMMENT token. */ struct cpp_string GTY(()) { unsigned int len; const unsigned char *text; }; /* Flags for the cpp_token structure. */ #define PREV_WHITE (1 << 0) /* If whitespace before this token. */ #define DIGRAPH (1 << 1) /* If it was a digraph. */ #define STRINGIFY_ARG (1 << 2) /* If macro argument to be stringified. */ #define PASTE_LEFT (1 << 3) /* If on LHS of a ## operator. */ #define NAMED_OP (1 << 4) /* C++ named operators. */ #define NO_EXPAND (1 << 5) /* Do not macro-expand this token. */ #define BOL (1 << 6) /* Token at beginning of line. */ /* Specify which field, if any, of the cpp_token union is used. */ enum cpp_token_fld_kind { CPP_TOKEN_FLD_NODE, CPP_TOKEN_FLD_SOURCE, CPP_TOKEN_FLD_STR, CPP_TOKEN_FLD_ARG_NO, CPP_TOKEN_FLD_NONE }; /* A preprocessing token. This has been carefully packed and should occupy 16 bytes on 32-bit hosts and 24 bytes on 64-bit hosts. */ struct cpp_token GTY(()) { source_location src_loc; /* Location of first char of token. */ ENUM_BITFIELD(cpp_ttype) type : CHAR_BIT; /* token type */ unsigned char flags; /* flags - see above */ union cpp_token_u { /* An identifier. */ cpp_hashnode * GTY ((nested_ptr (union tree_node, "%h ? CPP_HASHNODE (GCC_IDENT_TO_HT_IDENT (%h)) : NULL", "%h ? HT_IDENT_TO_GCC_IDENT (HT_NODE (%h)) : NULL"), tag ("CPP_TOKEN_FLD_NODE"))) node; /* Inherit padding from this token. */ cpp_token * GTY ((tag ("CPP_TOKEN_FLD_SOURCE"))) source; /* A string, or number. */ struct cpp_string GTY ((tag ("CPP_TOKEN_FLD_STR"))) str; /* Argument no. for a CPP_MACRO_ARG. */ unsigned int GTY ((tag ("CPP_TOKEN_FLD_ARG_NO"))) arg_no; } GTY ((desc ("cpp_token_val_index (&%1)"))) val; }; /* Say which field is in use. */ extern enum cpp_token_fld_kind cpp_token_val_index (cpp_token *tok); /* A type wide enough to hold any multibyte source character. cpplib's character constant interpreter requires an unsigned type. Also, a typedef for the signed equivalent. The width of this type is capped at 32 bits; there do exist targets where wchar_t is 64 bits, but only in a non-default mode, and there would be no meaningful interpretation for a wchar_t value greater than 2^32 anyway -- the widest wide-character encoding around is ISO 10646, which stops at 2^31. */ #if CHAR_BIT * SIZEOF_INT >= 32 # define CPPCHAR_SIGNED_T int #elif CHAR_BIT * SIZEOF_LONG >= 32 # define CPPCHAR_SIGNED_T long #else # error "Cannot find a least-32-bit signed integer type" #endif typedef unsigned CPPCHAR_SIGNED_T cppchar_t; typedef CPPCHAR_SIGNED_T cppchar_signed_t; /* This structure is nested inside struct cpp_reader, and carries all the options visible to the command line. */ struct cpp_options { /* Characters between tab stops. */ unsigned int tabstop; /* The language we're preprocessing. */ enum c_lang lang; /* Nonzero means use extra default include directories for C++. */ unsigned char cplusplus; /* Nonzero means handle cplusplus style comments. */ unsigned char cplusplus_comments; /* Nonzero means define __OBJC__, treat @ as a special token, and use the OBJC[PLUS]_INCLUDE_PATH environment variable. */ unsigned char objc; /* Nonzero means don't copy comments into the output file. */ unsigned char discard_comments; /* Nonzero means don't copy comments into the output file during macro expansion. */ unsigned char discard_comments_in_macro_exp; /* Nonzero means process the ISO trigraph sequences. */ unsigned char trigraphs; /* Nonzero means process the ISO digraph sequences. */ unsigned char digraphs; /* Nonzero means to allow hexadecimal floats and LL suffixes. */ unsigned char extended_numbers; /* Nonzero means print names of header files (-H). */ unsigned char print_include_names; /* Nonzero means cpp_pedwarn causes a hard error. */ unsigned char pedantic_errors; /* Nonzero means don't print warning messages. */ unsigned char inhibit_warnings; /* Nonzero means complain about deprecated features. */ unsigned char warn_deprecated; /* Nonzero means don't suppress warnings from system headers. */ unsigned char warn_system_headers; /* Nonzero means don't print error messages. Has no option to select it, but can be set by a user of cpplib (e.g. fix-header). */ unsigned char inhibit_errors; /* Nonzero means warn if slash-star appears in a comment. */ unsigned char warn_comments; /* Nonzero means warn if a user-supplied include directory does not exist. */ unsigned char warn_missing_include_dirs; /* Nonzero means warn if there are any trigraphs. */ unsigned char warn_trigraphs; /* Nonzero means warn about multicharacter charconsts. */ unsigned char warn_multichar; /* Nonzero means warn about various incompatibilities with traditional C. */ unsigned char warn_traditional; /* Nonzero means warn about long long numeric constants. */ unsigned char warn_long_long; /* Nonzero means warn about text after an #endif (or #else). */ unsigned char warn_endif_labels; /* Nonzero means warn about implicit sign changes owing to integer promotions. */ unsigned char warn_num_sign_change; /* Zero means don't warn about __VA_ARGS__ usage in c89 pedantic mode. Presumably the usage is protected by the appropriate #ifdef. */ unsigned char warn_variadic_macros; /* Nonzero means turn warnings into errors. */ unsigned char warnings_are_errors; /* Nonzero means we should look for header.gcc files that remap file names. */ unsigned char remap; /* Zero means dollar signs are punctuation. */ unsigned char dollars_in_ident; /* True if we should warn about dollars in identifiers or numbers for this translation unit. */ unsigned char warn_dollars; /* Nonzero means warn if undefined identifiers are evaluated in an #if. */ unsigned char warn_undef; /* Nonzero means warn of unused macros from the main file. */ unsigned char warn_unused_macros; /* Nonzero for the 1999 C Standard, including corrigenda and amendments. */ unsigned char c99; /* Nonzero if we are conforming to a specific C or C++ standard. */ unsigned char std; /* Nonzero means give all the error messages the ANSI standard requires. */ unsigned char pedantic; /* Nonzero means we're looking at already preprocessed code, so don't bother trying to do macro expansion and whatnot. */ unsigned char preprocessed; /* Print column number in error messages. */ unsigned char show_column; /* Nonzero means handle C++ alternate operator names. */ unsigned char operator_names; /* True for traditional preprocessing. */ unsigned char traditional; /* Holds the name of the target (execution) character set. */ const char *narrow_charset; /* Holds the name of the target wide character set. */ const char *wide_charset; /* Holds the name of the input character set. */ const char *input_charset; /* True to warn about precompiled header files we couldn't use. */ bool warn_invalid_pch; /* True if dependencies should be restored from a precompiled header. */ bool restore_pch_deps; /* Dependency generation. */ struct { /* Style of header dependencies to generate. */ enum {DEPS_NONE = 0, DEPS_USER, DEPS_SYSTEM } style; /* Assume missing files are generated files. */ bool missing_files; /* Generate phony targets for each dependency apart from the first one. */ bool phony_targets; /* If true, no dependency is generated on the main file. */ bool ignore_main_file; } deps; /* Target-specific features set by the front end or client. */ /* Precision for target CPP arithmetic, target characters, target ints and target wide characters, respectively. */ size_t precision, char_precision, int_precision, wchar_precision; /* True means chars (wide chars) are unsigned. */ bool unsigned_char, unsigned_wchar; /* True if the most significant byte in a word has the lowest address in memory. */ bool bytes_big_endian; /* Nonzero means __STDC__ should have the value 0 in system headers. */ unsigned char stdc_0_in_system_headers; }; /* Callback for header lookup for HEADER, which is the name of a source file. It is used as a method of last resort to find headers that are not otherwise found during the normal include processing. The return value is the malloced name of a header to try and open, if any, or NULL otherwise. This callback is called only if the header is otherwise unfound. */ typedef const char *(*missing_header_cb)(cpp_reader *, const char *header, cpp_dir **); /* Call backs to cpplib client. */ struct cpp_callbacks { /* Called when a new line of preprocessed output is started. */ void (*line_change) (cpp_reader *, const cpp_token *, int); /* Called when switching to/from a new file. The line_map is for the new file. It is NULL if there is no new file. (In C this happens when done with + and also when done with a main file.) This can be used for resource cleanup. */ void (*file_change) (cpp_reader *, const struct line_map *); void (*dir_change) (cpp_reader *, const char *); void (*include) (cpp_reader *, unsigned int, const unsigned char *, const char *, int); void (*define) (cpp_reader *, unsigned int, cpp_hashnode *); void (*undef) (cpp_reader *, unsigned int, cpp_hashnode *); void (*ident) (cpp_reader *, unsigned int, const cpp_string *); void (*def_pragma) (cpp_reader *, unsigned int); int (*valid_pch) (cpp_reader *, const char *, int); void (*read_pch) (cpp_reader *, const char *, int, const char *); missing_header_cb missing_header; }; /* Chain of directories to look for include files in. */ struct cpp_dir { /* NULL-terminated singly-linked list. */ struct cpp_dir *next; /* NAME of the directory, NUL-terminated. */ char *name; unsigned int len; /* One if a system header, two if a system header that has extern "C" guards for C++. */ unsigned char sysp; /* Mapping of file names for this directory for MS-DOS and related platforms. A NULL-terminated array of (from, to) pairs. */ const char **name_map; /* Routine to construct pathname, given the search path name and the HEADER we are trying to find, return a constructed pathname to try and open. If this is NULL, the constructed pathname is as constructed by append_file_to_dir. */ char *(*construct) (const char *header, cpp_dir *dir); /* The C front end uses these to recognize duplicated directories in the search path. */ ino_t ino; dev_t dev; /* Is this a user-supplied directory? */ bool user_supplied_p; }; /* Name under which this program was invoked. */ extern const char *progname; /* The structure of a node in the hash table. The hash table has entries for all identifiers: either macros defined by #define commands (type NT_MACRO), assertions created with #assert (NT_ASSERTION), or neither of the above (NT_VOID). Builtin macros like __LINE__ are flagged NODE_BUILTIN. Poisoned identifiers are flagged NODE_POISONED. NODE_OPERATOR (C++ only) indicates an identifier that behaves like an operator such as "xor". NODE_DIAGNOSTIC is for speed in lex_token: it indicates a diagnostic may be required for this node. Currently this only applies to __VA_ARGS__ and poisoned identifiers. */ /* Hash node flags. */ #define NODE_OPERATOR (1 << 0) /* C++ named operator. */ #define NODE_POISONED (1 << 1) /* Poisoned identifier. */ #define NODE_BUILTIN (1 << 2) /* Builtin macro. */ #define NODE_DIAGNOSTIC (1 << 3) /* Possible diagnostic when lexed. */ #define NODE_WARN (1 << 4) /* Warn if redefined or undefined. */ #define NODE_DISABLED (1 << 5) /* A disabled macro. */ #define NODE_MACRO_ARG (1 << 6) /* Used during #define processing. */ /* Different flavors of hash node. */ enum ident_node_type { NT_VOID = 0, /* No definition yet. */ NT_MACRO, /* A macro of some form. */ NT_ASSERTION /* Predicate for #assert. */ }; /* Different flavors of builtin macro. _Pragma is an operator, but we handle it with the builtin code for efficiency reasons. */ enum builtin_type { BT_SPECLINE = 0, /* `__LINE__' */ BT_DATE, /* `__DATE__' */ BT_FILE, /* `__FILE__' */ BT_BASE_FILE, /* `__BASE_FILE__' */ BT_INCLUDE_LEVEL, /* `__INCLUDE_LEVEL__' */ BT_TIME, /* `__TIME__' */ BT_STDC, /* `__STDC__' */ BT_PRAGMA /* `_Pragma' operator */ }; #define CPP_HASHNODE(HNODE) ((cpp_hashnode *) (HNODE)) #define HT_NODE(NODE) ((ht_identifier *) (NODE)) #define NODE_LEN(NODE) HT_LEN (&(NODE)->ident) #define NODE_NAME(NODE) HT_STR (&(NODE)->ident) /* Specify which field, if any, of the union is used. */ enum { NTV_MACRO, NTV_ANSWER, NTV_BUILTIN, NTV_ARGUMENT, NTV_NONE }; #define CPP_HASHNODE_VALUE_IDX(HNODE) \ ((HNODE.flags & NODE_MACRO_ARG) ? NTV_ARGUMENT \ : HNODE.type == NT_MACRO ? ((HNODE.flags & NODE_BUILTIN) \ ? NTV_BUILTIN : NTV_MACRO) \ : HNODE.type == NT_ASSERTION ? NTV_ANSWER \ : NTV_NONE) /* The common part of an identifier node shared amongst all 3 C front ends. Also used to store CPP identifiers, which are a superset of identifiers in the grammatical sense. */ struct cpp_hashnode GTY(()) { struct ht_identifier ident; unsigned int is_directive : 1; unsigned int directive_index : 7; /* If is_directive, then index into directive table. Otherwise, a NODE_OPERATOR. */ unsigned char rid_code; /* Rid code - for front ends. */ ENUM_BITFIELD(ident_node_type) type : 8; /* CPP node type. */ unsigned char flags; /* CPP flags. */ union _cpp_hashnode_value { /* If a macro. */ cpp_macro * GTY((tag ("NTV_MACRO"))) macro; /* Answers to an assertion. */ struct answer * GTY ((tag ("NTV_ANSWER"))) answers; /* Code for a builtin macro. */ enum builtin_type GTY ((tag ("NTV_BUILTIN"))) builtin; /* Macro argument index. */ unsigned short GTY ((tag ("NTV_ARGUMENT"))) arg_index; } GTY ((desc ("CPP_HASHNODE_VALUE_IDX (%1)"))) value; }; /* Call this first to get a handle to pass to other functions. If you want cpplib to manage its own hashtable, pass in a NULL pointer. Otherwise you should pass in an initialized hash table that cpplib will share; this technique is used by the C front ends. */ extern cpp_reader *cpp_create_reader (enum c_lang, struct ht *, struct line_maps *); /* Call this to change the selected language standard (e.g. because of command line options). */ extern void cpp_set_lang (cpp_reader *, enum c_lang); /* Set the include paths. */ extern void cpp_set_include_chains (cpp_reader *, cpp_dir *, cpp_dir *, int); /* Call these to get pointers to the options, callback, and deps structures for a given reader. These pointers are good until you call cpp_finish on that reader. You can either edit the callbacks through the pointer returned from cpp_get_callbacks, or set them with cpp_set_callbacks. */ extern cpp_options *cpp_get_options (cpp_reader *); extern cpp_callbacks *cpp_get_callbacks (cpp_reader *); extern void cpp_set_callbacks (cpp_reader *, cpp_callbacks *); extern struct depends *cpp_get_deps (cpp_reader *); /* This function reads the file, but does not start preprocessing. It returns the name of the original file; this is the same as the input file, except for preprocessed input. This will generate at least one file change callback, and possibly a line change callback too. If there was an error opening the file, it returns NULL. */ extern const char *cpp_read_main_file (cpp_reader *, const char *); /* Set up built-ins like __FILE__. */ extern void cpp_init_builtins (cpp_reader *, int); /* This is called after options have been parsed, and partially processed. */ extern void cpp_post_options (cpp_reader *); /* Set up translation to the target character set. */ extern void cpp_init_iconv (cpp_reader *); /* Call this to finish preprocessing. If you requested dependency generation, pass an open stream to write the information to, otherwise NULL. It is your responsibility to close the stream. Returns cpp_errors (pfile). */ extern int cpp_finish (cpp_reader *, FILE *deps_stream); /* Call this to release the handle at the end of preprocessing. Any use of the handle after this function returns is invalid. Returns cpp_errors (pfile). */ extern void cpp_destroy (cpp_reader *); /* Error count. */ extern unsigned int cpp_errors (cpp_reader *); extern unsigned int cpp_token_len (const cpp_token *); extern unsigned char *cpp_token_as_text (cpp_reader *, const cpp_token *); extern unsigned char *cpp_spell_token (cpp_reader *, const cpp_token *, unsigned char *); extern void cpp_register_pragma (cpp_reader *, const char *, const char *, void (*) (cpp_reader *)); extern int cpp_avoid_paste (cpp_reader *, const cpp_token *, const cpp_token *); extern const cpp_token *cpp_get_token (cpp_reader *); extern const unsigned char *cpp_macro_definition (cpp_reader *, const cpp_hashnode *); extern void _cpp_backup_tokens (cpp_reader *, unsigned int); /* Evaluate a CPP_CHAR or CPP_WCHAR token. */ extern cppchar_t cpp_interpret_charconst (cpp_reader *, const cpp_token *, unsigned int *, int *); /* Evaluate a vector of CPP_STRING or CPP_WSTRING tokens. */ extern bool cpp_interpret_string (cpp_reader *, const cpp_string *, size_t, cpp_string *, bool); extern bool cpp_interpret_string_notranslate (cpp_reader *, const cpp_string *, size_t, cpp_string *, bool); /* Used to register macros and assertions, perhaps from the command line. The text is the same as the command line argument. */ extern void cpp_define (cpp_reader *, const char *); extern void cpp_assert (cpp_reader *, const char *); extern void cpp_undef (cpp_reader *, const char *); extern void cpp_unassert (cpp_reader *, const char *); /* Undefine all macros and assertions. */ extern void cpp_undef_all (cpp_reader *); extern cpp_buffer *cpp_push_buffer (cpp_reader *, const unsigned char *, size_t, int); extern int cpp_defined (cpp_reader *, const unsigned char *, int); /* A preprocessing number. Code assumes that any unused high bits of the double integer are set to zero. */ typedef unsigned HOST_WIDE_INT cpp_num_part; typedef struct cpp_num cpp_num; struct cpp_num { cpp_num_part high; cpp_num_part low; bool unsignedp; /* True if value should be treated as unsigned. */ bool overflow; /* True if the most recent calculation overflowed. */ }; /* cpplib provides two interfaces for interpretation of preprocessing numbers. cpp_classify_number categorizes numeric constants according to their field (integer, floating point, or invalid), radix (decimal, octal, hexadecimal), and type suffixes. */ #define CPP_N_CATEGORY 0x000F #define CPP_N_INVALID 0x0000 #define CPP_N_INTEGER 0x0001 #define CPP_N_FLOATING 0x0002 #define CPP_N_WIDTH 0x00F0 #define CPP_N_SMALL 0x0010 /* int, float. */ #define CPP_N_MEDIUM 0x0020 /* long, double. */ #define CPP_N_LARGE 0x0040 /* long long, long double. */ #define CPP_N_RADIX 0x0F00 #define CPP_N_DECIMAL 0x0100 #define CPP_N_HEX 0x0200 #define CPP_N_OCTAL 0x0400 #define CPP_N_UNSIGNED 0x1000 /* Properties. */ #define CPP_N_IMAGINARY 0x2000 /* Classify a CPP_NUMBER token. The return value is a combination of the flags from the above sets. */ extern unsigned cpp_classify_number (cpp_reader *, const cpp_token *); /* Evaluate a token classified as category CPP_N_INTEGER. */ extern cpp_num cpp_interpret_integer (cpp_reader *, const cpp_token *, unsigned int type); /* Sign extend a number, with PRECISION significant bits and all others assumed clear, to fill out a cpp_num structure. */ cpp_num cpp_num_sign_extend (cpp_num, size_t); /* Diagnostic levels. To get a diagnostic without associating a position in the translation unit with it, use cpp_error_with_line with a line number of zero. */ /* Warning, an error with -Werror. */ #define CPP_DL_WARNING 0x00 /* Same as CPP_DL_WARNING, except it is not suppressed in system headers. */ #define CPP_DL_WARNING_SYSHDR 0x01 /* Warning, an error with -pedantic-errors or -Werror. */ #define CPP_DL_PEDWARN 0x02 /* An error. */ #define CPP_DL_ERROR 0x03 /* An internal consistency check failed. Prints "internal error: ", otherwise the same as CPP_DL_ERROR. */ #define CPP_DL_ICE 0x04 /* Extracts a diagnostic level from an int. */ #define CPP_DL_EXTRACT(l) (l & 0xf) /* Nonzero if a diagnostic level is one of the warnings. */ #define CPP_DL_WARNING_P(l) (CPP_DL_EXTRACT (l) >= CPP_DL_WARNING \ && CPP_DL_EXTRACT (l) <= CPP_DL_PEDWARN) /* N.B. The error-message-printer prototypes have not been nicely formatted because exgettext needs to see 'msgid' on the same line as the name of the function in order to work properly. Only the string argument gets a name in an effort to keep the lines from getting ridiculously oversized. */ /* Output a diagnostic of some kind. */ extern void cpp_error (cpp_reader *, int, const char *msgid, ...) ATTRIBUTE_PRINTF_3; /* Output a diagnostic with "MSGID: " preceding the error string of errno. No location is printed. */ extern void cpp_errno (cpp_reader *, int, const char *msgid); /* Same as cpp_error, except additionally specifies a position as a (translation unit) physical line and physical column. If the line is zero, then no location is printed. */ extern void cpp_error_with_line (cpp_reader *, int, source_location, unsigned, const char *msgid, ...) ATTRIBUTE_PRINTF_5; /* In cpplex.c */ extern int cpp_ideq (const cpp_token *, const char *); extern void cpp_output_line (cpp_reader *, FILE *); extern void cpp_output_token (const cpp_token *, FILE *); extern const char *cpp_type2name (enum cpp_ttype); /* Returns the value of an escape sequence, truncated to the correct target precision. PSTR points to the input pointer, which is just after the backslash. LIMIT is how much text we have. WIDE is true if the escape sequence is part of a wide character constant or string literal. Handles all relevant diagnostics. */ extern cppchar_t cpp_parse_escape (cpp_reader *, const unsigned char ** pstr, const unsigned char *limit, int wide); /* In cpphash.c */ /* Lookup an identifier in the hashtable. Puts the identifier in the table if it is not already there. */ extern cpp_hashnode *cpp_lookup (cpp_reader *, const unsigned char *, unsigned int); typedef int (*cpp_cb) (cpp_reader *, cpp_hashnode *, void *); extern void cpp_forall_identifiers (cpp_reader *, cpp_cb, void *); /* In cppmacro.c */ extern void cpp_scan_nooutput (cpp_reader *); extern int cpp_sys_macro_p (cpp_reader *); extern unsigned char *cpp_quote_string (unsigned char *, const unsigned char *, unsigned int); /* In cppfiles.c */ extern bool cpp_included (cpp_reader *, const char *); extern void cpp_make_system_header (cpp_reader *, int, int); extern bool cpp_push_include (cpp_reader *, const char *); extern void cpp_change_file (cpp_reader *, enum lc_reason, const char *); extern const char *cpp_get_path (struct _cpp_file *); extern cpp_dir *cpp_get_dir (struct _cpp_file *); extern cpp_buffer *cpp_get_buffer (cpp_reader *); extern struct _cpp_file *cpp_get_file (cpp_buffer *); extern cpp_buffer *cpp_get_prev (cpp_buffer *); /* In cpppch.c */ struct save_macro_data; extern int cpp_save_state (cpp_reader *, FILE *); extern int cpp_write_pch_deps (cpp_reader *, FILE *); extern int cpp_write_pch_state (cpp_reader *, FILE *); extern int cpp_valid_state (cpp_reader *, const char *, int); extern void cpp_prepare_state (cpp_reader *, struct save_macro_data **); extern int cpp_read_state (cpp_reader *, const char *, FILE *, struct save_macro_data *); #ifdef __cplusplus } #endif #endif /* ! LIBCPP_CPPLIB_H */ /* Part of CPP library. Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This header defines all the internal data structures and functions that need to be visible across files. It should not be used outside cpplib. */ #ifndef LIBCPP_INTERNAL_H #define LIBCPP_INTERNAL_H /* Structures that hang off cpp_identifier, for PCH. Copyright (C) 1986, 1987, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_CPP_ID_DATA #define GCC_CPP_ID_DATA #ifndef HAVE_UCHAR typedef unsigned char uchar; #endif #define USTR (const uchar *) /* Intended use: USTR"string" */ /* Chained list of answers to an assertion. */ struct answer GTY(()) { struct answer *next; unsigned int count; cpp_token GTY ((length ("%h.count"))) first[1]; }; /* Each macro definition is recorded in a cpp_macro structure. Variadic macros cannot occur with traditional cpp. */ struct cpp_macro GTY(()) { /* Parameters, if any. */ cpp_hashnode ** GTY ((nested_ptr (union tree_node, "%h ? CPP_HASHNODE (GCC_IDENT_TO_HT_IDENT (%h)) : NULL", "%h ? HT_IDENT_TO_GCC_IDENT (HT_NODE (%h)) : NULL"), length ("%h.paramc"))) params; /* Replacement tokens (ISO) or replacement text (traditional). See comment at top of cpptrad.c for how traditional function-like macros are encoded. */ union cpp_macro_u { cpp_token * GTY ((tag ("0"), length ("%0.count"))) tokens; const uchar * GTY ((tag ("1"))) text; } GTY ((desc ("%1.traditional"))) exp; /* Definition line number. */ source_location line; /* Number of tokens in expansion, or bytes for traditional macros. */ unsigned int count; /* Number of parameters. */ unsigned short paramc; /* If a function-like macro. */ unsigned int fun_like : 1; /* If a variadic macro. */ unsigned int variadic : 1; /* If macro defined in system header. */ unsigned int syshdr : 1; /* Nonzero if it has been expanded or had its existence tested. */ unsigned int used : 1; /* Indicate which field of 'exp' is in use. */ unsigned int traditional : 1; }; #endif /* GCC_CPP_ID_DATA */ #if defined HAVE_ICONV_H && defined HAVE_ICONV #include #else #define HAVE_ICONV 0 typedef int iconv_t; /* dummy */ #endif struct directive; /* Deliberately incomplete. */ struct pending_option; struct op; struct _cpp_strbuf; typedef bool (*convert_f) (iconv_t, const unsigned char *, size_t, struct _cpp_strbuf *); struct cset_converter { convert_f func; iconv_t cd; }; #define BITS_PER_CPPCHAR_T (CHAR_BIT * sizeof (cppchar_t)) /* Test if a sign is valid within a preprocessing number. */ #define VALID_SIGN(c, prevc) \ (((c) == '+' || (c) == '-') && \ ((prevc) == 'e' || (prevc) == 'E' \ || (((prevc) == 'p' || (prevc) == 'P') \ && CPP_OPTION (pfile, extended_numbers)))) #define CPP_OPTION(PFILE, OPTION) ((PFILE)->opts.OPTION) #define CPP_BUFFER(PFILE) ((PFILE)->buffer) #define CPP_BUF_COLUMN(BUF, CUR) ((CUR) - (BUF)->line_base) #define CPP_BUF_COL(BUF) CPP_BUF_COLUMN(BUF, (BUF)->cur) #define CPP_INCREMENT_LINE(PFILE, COLS_HINT) do { \ const struct line_maps *line_table = PFILE->line_table; \ const struct line_map *map = &line_table->maps[line_table->used-1]; \ unsigned int line = SOURCE_LINE (map, line_table->highest_line); \ linemap_line_start (PFILE->line_table, line + 1, COLS_HINT); \ } while (0) /* Maximum nesting of cpp_buffers. We use a static limit, partly for efficiency, and partly to limit runaway recursion. */ #define CPP_STACK_MAX 200 /* Host alignment handling. */ struct dummy { char c; union { double d; int *p; } u; }; #define DEFAULT_ALIGNMENT offsetof (struct dummy, u) #define CPP_ALIGN2(size, align) (((size) + ((align) - 1)) & ~((align) - 1)) #define CPP_ALIGN(size) CPP_ALIGN2 (size, DEFAULT_ALIGNMENT) #define _cpp_mark_macro_used(NODE) do { \ if ((NODE)->type == NT_MACRO && !((NODE)->flags & NODE_BUILTIN)) \ (NODE)->value.macro->used = 1; } while (0) /* A generic memory buffer, and operations on it. */ typedef struct _cpp_buff _cpp_buff; struct _cpp_buff { struct _cpp_buff *next; unsigned char *base, *cur, *limit; }; extern _cpp_buff *_cpp_get_buff (cpp_reader *, size_t); extern void _cpp_release_buff (cpp_reader *, _cpp_buff *); extern void _cpp_extend_buff (cpp_reader *, _cpp_buff **, size_t); extern _cpp_buff *_cpp_append_extend_buff (cpp_reader *, _cpp_buff *, size_t); extern void _cpp_free_buff (_cpp_buff *); extern unsigned char *_cpp_aligned_alloc (cpp_reader *, size_t); extern unsigned char *_cpp_unaligned_alloc (cpp_reader *, size_t); #define BUFF_ROOM(BUFF) (size_t) ((BUFF)->limit - (BUFF)->cur) #define BUFF_FRONT(BUFF) ((BUFF)->cur) #define BUFF_LIMIT(BUFF) ((BUFF)->limit) /* #include types. */ enum include_type {IT_INCLUDE, IT_INCLUDE_NEXT, IT_IMPORT, IT_CMDLINE}; union utoken { const cpp_token *token; const cpp_token **ptoken; }; /* A "run" of tokens; part of a chain of runs. */ typedef struct tokenrun tokenrun; struct tokenrun { tokenrun *next, *prev; cpp_token *base, *limit; }; /* Accessor macros for struct cpp_context. */ #define FIRST(c) ((c)->u.iso.first) #define LAST(c) ((c)->u.iso.last) #define CUR(c) ((c)->u.trad.cur) #define RLIMIT(c) ((c)->u.trad.rlimit) typedef struct cpp_context cpp_context; struct cpp_context { /* Doubly-linked list. */ cpp_context *next, *prev; union { /* For ISO macro expansion. Contexts other than the base context are contiguous tokens. e.g. macro expansions, expanded argument tokens. */ struct { union utoken first; union utoken last; } iso; /* For traditional macro expansion. */ struct { const uchar *cur; const uchar *rlimit; } trad; } u; /* If non-NULL, a buffer used for storage related to this context. When the context is popped, the buffer is released. */ _cpp_buff *buff; /* For a macro context, the macro node, otherwise NULL. */ cpp_hashnode *macro; /* True if utoken element is token, else ptoken. */ bool direct_p; }; struct lexer_state { /* Nonzero if first token on line is CPP_HASH. */ unsigned char in_directive; /* Nonzero if in a directive that will handle padding tokens itself. #include needs this to avoid problems with computed include and spacing between tokens. */ unsigned char directive_wants_padding; /* True if we are skipping a failed conditional group. */ unsigned char skipping; /* Nonzero if in a directive that takes angle-bracketed headers. */ unsigned char angled_headers; /* Nonzero if in a #if or #elif directive. */ unsigned char in_expression; /* Nonzero to save comments. Turned off if discard_comments, and in all directives apart from #define. */ unsigned char save_comments; /* Nonzero if lexing __VA_ARGS__ is valid. */ unsigned char va_args_ok; /* Nonzero if lexing poisoned identifiers is valid. */ unsigned char poisoned_ok; /* Nonzero to prevent macro expansion. */ unsigned char prevent_expansion; /* Nonzero when parsing arguments to a function-like macro. */ unsigned char parsing_args; /* Nonzero if prevent_expansion is true only because output is being discarded. */ unsigned char discarding_output; /* Nonzero to skip evaluating part of an expression. */ unsigned int skip_eval; }; /* Special nodes - identifiers with predefined significance. */ struct spec_nodes { cpp_hashnode *n_defined; /* defined operator */ cpp_hashnode *n_true; /* C++ keyword true */ cpp_hashnode *n_false; /* C++ keyword false */ cpp_hashnode *n__VA_ARGS__; /* C99 vararg macros */ }; typedef struct _cpp_line_note _cpp_line_note; struct _cpp_line_note { /* Location in the clean line the note refers to. */ const uchar *pos; /* Type of note. The 9 'from' trigraph characters represent those trigraphs, '\\' an escaped newline, ' ' an escaped newline with intervening space, and anything else is invalid. */ unsigned int type; }; /* Represents the contents of a file cpplib has read in. */ struct cpp_buffer { const uchar *cur; /* Current location. */ const uchar *line_base; /* Start of current physical line. */ const uchar *next_line; /* Start of to-be-cleaned logical line. */ const uchar *buf; /* Entire character buffer. */ const uchar *rlimit; /* Writable byte at end of file. */ _cpp_line_note *notes; /* Array of notes. */ unsigned int cur_note; /* Next note to process. */ unsigned int notes_used; /* Number of notes. */ unsigned int notes_cap; /* Size of allocated array. */ struct cpp_buffer *prev; /* Pointer into the file table; non-NULL if this is a file buffer. Used for include_next and to record control macros. */ struct _cpp_file *file; /* Value of if_stack at start of this file. Used to prohibit unmatched #endif (etc) in an include file. */ struct if_stack *if_stack; /* True if we need to get the next clean line. */ bool need_line; /* True if we have already warned about C++ comments in this file. The warning happens only for C89 extended mode with -pedantic on, or for -Wtraditional, and only once per file (otherwise it would be far too noisy). */ unsigned int warned_cplusplus_comments : 1; /* True if we don't process trigraphs and escaped newlines. True for preprocessed input, command line directives, and _Pragma buffers. */ unsigned int from_stage3 : 1; /* At EOF, a buffer is automatically popped. If RETURN_AT_EOF is true, a CPP_EOF token is then returned. Otherwise, the next token from the enclosing buffer is returned. */ unsigned int return_at_eof : 1; /* One for a system header, two for a C system header file that therefore needs to be extern "C" protected in C++, and zero otherwise. */ unsigned char sysp; /* The directory of the this buffer's file. Its NAME member is not allocated, so we don't need to worry about freeing it. */ struct cpp_dir dir; /* Descriptor for converting from the input character set to the source character set. */ struct cset_converter input_cset_desc; }; /* A cpp_reader encapsulates the "state" of a pre-processor run. Applying cpp_get_token repeatedly yields a stream of pre-processor tokens. Usually, there is only one cpp_reader object active. */ struct cpp_reader { /* Top of buffer stack. */ cpp_buffer *buffer; /* Overlaid buffer (can be different after processing #include). */ cpp_buffer *overlaid_buffer; /* Lexer state. */ struct lexer_state state; /* Source line tracking. */ struct line_maps *line_table; /* The line of the '#' of the current directive. */ source_location directive_line; /* Memory buffers. */ _cpp_buff *a_buff; /* Aligned permanent storage. */ _cpp_buff *u_buff; /* Unaligned permanent storage. */ _cpp_buff *free_buffs; /* Free buffer chain. */ /* Context stack. */ struct cpp_context base_context; struct cpp_context *context; /* If in_directive, the directive if known. */ const struct directive *directive; /* Search paths for include files. */ struct cpp_dir *quote_include; /* "" */ struct cpp_dir *bracket_include; /* <> */ struct cpp_dir no_search_path; /* No path. */ /* Chain of all hashed _cpp_file instances. */ struct _cpp_file *all_files; struct _cpp_file *main_file; /* File and directory hash table. */ struct htab *file_hash; struct file_hash_entry *file_hash_entries; unsigned int file_hash_entries_allocated, file_hash_entries_used; /* Nonzero means don't look for #include "foo" the source-file directory. */ bool quote_ignores_source_dir; /* Nonzero if any file has contained #pragma once or #import has been used. */ bool seen_once_only; /* Multiple include optimization. */ const cpp_hashnode *mi_cmacro; const cpp_hashnode *mi_ind_cmacro; bool mi_valid; /* Lexing. */ cpp_token *cur_token; tokenrun base_run, *cur_run; unsigned int lookaheads; /* Nonzero prevents the lexer from re-using the token runs. */ unsigned int keep_tokens; /* Error counter for exit code. */ unsigned int errors; /* Buffer to hold macro definition string. */ unsigned char *macro_buffer; unsigned int macro_buffer_len; /* Descriptor for converting from the source character set to the execution character set. */ struct cset_converter narrow_cset_desc; /* Descriptor for converting from the source character set to the wide execution character set. */ struct cset_converter wide_cset_desc; /* Date and time text. Calculated together if either is requested. */ const uchar *date; const uchar *time; /* EOF token, and a token forcing paste avoidance. */ cpp_token avoid_paste; cpp_token eof; /* Opaque handle to the dependencies of mkdeps.c. */ struct depends *deps; /* Obstack holding all macro hash nodes. This never shrinks. See cpphash.c */ struct obstack hash_ob; /* Obstack holding buffer and conditional structures. This is a real stack. See cpplib.c. */ struct obstack buffer_ob; /* Pragma table - dynamic, because a library user can add to the list of recognized pragmas. */ struct pragma_entry *pragmas; /* Call backs to cpplib client. */ struct cpp_callbacks cb; /* Identifier hash table. */ struct ht *hash_table; /* Expression parser stack. */ struct op *op_stack, *op_limit; /* User visible options. */ struct cpp_options opts; /* Special nodes - identifiers with predefined significance to the preprocessor. */ struct spec_nodes spec_nodes; /* Whether cpplib owns the hashtable. */ bool our_hashtable; /* Traditional preprocessing output buffer (a logical line). */ struct { uchar *base; uchar *limit; uchar *cur; source_location first_line; } out; /* Used for buffer overlays by cpptrad.c. */ const uchar *saved_cur, *saved_rlimit, *saved_line_base; /* A saved list of the defined macros, for dependency checking of precompiled headers. */ struct cpp_savedstate *savedstate; }; /* Character classes. Based on the more primitive macros in safe-ctype.h. If the definition of `numchar' looks odd to you, please look up the definition of a pp-number in the C standard [section 6.4.8 of C99]. In the unlikely event that characters other than \r and \n enter the set is_vspace, the macro handle_newline() in cpplex.c must be updated. */ #define _dollar_ok(x) ((x) == '$' && CPP_OPTION (pfile, dollars_in_ident)) #define is_idchar(x) (ISIDNUM(x) || _dollar_ok(x)) #define is_numchar(x) ISIDNUM(x) #define is_idstart(x) (ISIDST(x) || _dollar_ok(x)) #define is_numstart(x) ISDIGIT(x) #define is_hspace(x) ISBLANK(x) #define is_vspace(x) IS_VSPACE(x) #define is_nvspace(x) IS_NVSPACE(x) #define is_space(x) IS_SPACE_OR_NUL(x) /* This table is constant if it can be initialized at compile time, which is the case if cpp was compiled with GCC >=2.7, or another compiler that supports C99. */ #if HAVE_DESIGNATED_INITIALIZERS extern const unsigned char _cpp_trigraph_map[UCHAR_MAX + 1]; #else extern unsigned char _cpp_trigraph_map[UCHAR_MAX + 1]; #endif /* Macros. */ static inline int cpp_in_system_header (cpp_reader *); static inline int cpp_in_system_header (cpp_reader *pfile) { return pfile->buffer ? pfile->buffer->sysp : 0; } #define CPP_PEDANTIC(PF) CPP_OPTION (PF, pedantic) #define CPP_WTRADITIONAL(PF) CPP_OPTION (PF, warn_traditional) /* In cpperror.c */ extern int _cpp_begin_message (cpp_reader *, int, source_location, unsigned int); /* In cppmacro.c */ extern void _cpp_free_definition (cpp_hashnode *); extern bool _cpp_create_definition (cpp_reader *, cpp_hashnode *); extern void _cpp_pop_context (cpp_reader *); extern void _cpp_push_text_context (cpp_reader *, cpp_hashnode *, const uchar *, size_t); extern bool _cpp_save_parameter (cpp_reader *, cpp_macro *, cpp_hashnode *); extern bool _cpp_arguments_ok (cpp_reader *, cpp_macro *, const cpp_hashnode *, unsigned int); extern const uchar *_cpp_builtin_macro_text (cpp_reader *, cpp_hashnode *); int _cpp_warn_if_unused_macro (cpp_reader *, cpp_hashnode *, void *); /* In cpphash.c */ extern void _cpp_init_hashtable (cpp_reader *, hash_table *); extern void _cpp_destroy_hashtable (cpp_reader *); /* In cppfiles.c */ typedef struct _cpp_file _cpp_file; extern _cpp_file *_cpp_find_file (cpp_reader *, const char *fname, cpp_dir *start_dir, bool fake); extern bool _cpp_find_failed (_cpp_file *); extern void _cpp_mark_file_once_only (cpp_reader *, struct _cpp_file *); extern void _cpp_fake_include (cpp_reader *, const char *); extern bool _cpp_stack_file (cpp_reader *, _cpp_file*, bool); extern bool _cpp_stack_include (cpp_reader *, const char *, int, enum include_type); extern int _cpp_compare_file_date (cpp_reader *, const char *, int); extern void _cpp_report_missing_guards (cpp_reader *); extern void _cpp_init_files (cpp_reader *); extern void _cpp_cleanup_files (cpp_reader *); extern void _cpp_pop_file_buffer (cpp_reader *, struct _cpp_file *); extern bool _cpp_save_file_entries (cpp_reader *pfile, FILE *f); extern bool _cpp_read_file_entries (cpp_reader *, FILE *); /* In cppexp.c */ extern bool _cpp_parse_expr (cpp_reader *); extern struct op *_cpp_expand_op_stack (cpp_reader *); /* In cpplex.c */ extern void _cpp_process_line_notes (cpp_reader *, int); extern void _cpp_clean_line (cpp_reader *); extern bool _cpp_get_fresh_line (cpp_reader *); extern bool _cpp_skip_block_comment (cpp_reader *); extern cpp_token *_cpp_temp_token (cpp_reader *); extern const cpp_token *_cpp_lex_token (cpp_reader *); extern cpp_token *_cpp_lex_direct (cpp_reader *); extern int _cpp_equiv_tokens (const cpp_token *, const cpp_token *); extern void _cpp_init_tokenrun (tokenrun *, unsigned int); /* In cppinit.c. */ extern void _cpp_maybe_push_include_file (cpp_reader *); /* In cpplib.c */ extern int _cpp_test_assertion (cpp_reader *, unsigned int *); extern int _cpp_handle_directive (cpp_reader *, int); extern void _cpp_define_builtin (cpp_reader *, const char *); extern char ** _cpp_save_pragma_names (cpp_reader *); extern void _cpp_restore_pragma_names (cpp_reader *, char **); extern void _cpp_do__Pragma (cpp_reader *); extern void _cpp_init_directives (cpp_reader *); extern void _cpp_init_internal_pragmas (cpp_reader *); extern void _cpp_do_file_change (cpp_reader *, enum lc_reason, const char *, unsigned int, unsigned int); extern void _cpp_pop_buffer (cpp_reader *); /* In cpptrad.c. */ extern bool _cpp_scan_out_logical_line (cpp_reader *, cpp_macro *); extern bool _cpp_read_logical_line_trad (cpp_reader *); extern void _cpp_overlay_buffer (cpp_reader *pfile, const uchar *, size_t); extern void _cpp_remove_overlay (cpp_reader *); extern bool _cpp_create_trad_definition (cpp_reader *, cpp_macro *); extern bool _cpp_expansions_different_trad (const cpp_macro *, const cpp_macro *); extern uchar *_cpp_copy_replacement_text (const cpp_macro *, uchar *); extern size_t _cpp_replacement_text_len (const cpp_macro *); /* In cppcharset.c. */ extern cppchar_t _cpp_valid_ucn (cpp_reader *, const uchar **, const uchar *, int); extern void _cpp_destroy_iconv (cpp_reader *); extern uchar *_cpp_convert_input (cpp_reader *, const char *, uchar *, size_t, size_t, off_t *); extern const char *_cpp_default_encoding (void); /* Utility routines and macros. */ #define DSC(str) (const uchar *)str, sizeof str - 1 #define xnew(T) (T *) xmalloc (sizeof(T)) #define xcnew(T) (T *) xcalloc (1, sizeof(T)) #define xnewvec(T, N) (T *) xmalloc (sizeof(T) * (N)) #define xcnewvec(T, N) (T *) xcalloc (N, sizeof(T)) #define xobnew(O, T) (T *) obstack_alloc (O, sizeof(T)) /* These are inline functions instead of macros so we can get type checking. */ static inline int ustrcmp (const uchar *, const uchar *); static inline int ustrncmp (const uchar *, const uchar *, size_t); static inline size_t ustrlen (const uchar *); static inline uchar *uxstrdup (const uchar *); static inline uchar *ustrchr (const uchar *, int); static inline int ufputs (const uchar *, FILE *); static inline int ustrcmp (const uchar *s1, const uchar *s2) { return strcmp ((const char *)s1, (const char *)s2); } static inline int ustrncmp (const uchar *s1, const uchar *s2, size_t n) { return strncmp ((const char *)s1, (const char *)s2, n); } static inline size_t ustrlen (const uchar *s1) { return strlen ((const char *)s1); } static inline uchar * uxstrdup (const uchar *s1) { return (uchar *) xstrdup ((const char *)s1); } static inline uchar * ustrchr (const uchar *s1, int c) { return (uchar *) strchr ((const char *)s1, c); } static inline int ufputs (const uchar *s, FILE *f) { return fputs ((const char *)s, f); } #endif /* ! LIBCPP_INTERNAL_H */ /* Table of UCNs which are valid in identifiers. Copyright (C) 2003 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Automatically generated from cppucnid.tab, do not edit */ /* This file reproduces the table in ISO/IEC 9899:1999 (C99) Annex D, which is itself a reproduction from ISO/IEC TR 10176:1998, and the similar table from ISO/IEC 14882:1988 (C++98) Annex E, which is a reproduction of ISO/IEC PDTR 10176. Unfortunately these tables are not identical. */ #ifndef LIBCPP_UCNID_H #define LIBCPP_UCNID_H #define C99 1 #define CXX 2 #define DIG 4 struct ucnrange { unsigned short lo, hi; unsigned short flags; }; static const struct ucnrange ucnranges[] = { { 0x00aa, 0x00aa, C99 }, /* Latin */ { 0x00b5, 0x00b5, C99 }, /* Special characters */ { 0x00b7, 0x00b7, C99 }, { 0x00ba, 0x00ba, C99 }, /* Latin */ { 0x00c0, 0x00d6, CXX|C99 }, { 0x00d8, 0x00f6, CXX|C99 }, { 0x00f8, 0x01f5, CXX|C99 }, { 0x01fa, 0x0217, CXX|C99 }, { 0x0250, 0x02a8, CXX|C99 }, { 0x02b0, 0x02b8, C99 }, /* Special characters */ { 0x02bb, 0x02bb, C99 }, { 0x02bd, 0x02c1, C99 }, { 0x02d0, 0x02d1, C99 }, { 0x02e0, 0x02e4, C99 }, { 0x037a, 0x037a, C99 }, { 0x0384, 0x0384, CXX }, /* Greek */ { 0x0386, 0x0386, C99 }, { 0x0388, 0x038a, CXX|C99 }, { 0x038c, 0x038c, CXX|C99 }, { 0x038e, 0x03a1, CXX|C99 }, { 0x03a3, 0x03ce, CXX|C99 }, { 0x03d0, 0x03d6, CXX|C99 }, { 0x03da, 0x03da, CXX|C99 }, { 0x03dc, 0x03dc, CXX|C99 }, { 0x03de, 0x03de, CXX|C99 }, { 0x03e0, 0x03e0, CXX|C99 }, { 0x03e2, 0x03f3, CXX|C99 }, { 0x0401, 0x040c, CXX|C99 }, /* Cyrillic */ { 0x040d, 0x040d, CXX }, { 0x040e, 0x040e, C99 }, { 0x040f, 0x044f, CXX|C99 }, { 0x0451, 0x045c, CXX|C99 }, { 0x045e, 0x0481, CXX|C99 }, { 0x0490, 0x04c4, CXX|C99 }, { 0x04c7, 0x04c8, CXX|C99 }, { 0x04cb, 0x04cc, CXX|C99 }, { 0x04d0, 0x04eb, CXX|C99 }, { 0x04ee, 0x04f5, CXX|C99 }, { 0x04f8, 0x04f9, CXX|C99 }, { 0x0531, 0x0556, CXX|C99 }, /* Armenian */ { 0x0559, 0x0559, C99 }, /* Special characters */ { 0x0561, 0x0587, CXX|C99 }, /* Armenian */ { 0x05b0, 0x05b9, C99 }, /* Hebrew */ { 0x05bb, 0x05bd, C99 }, { 0x05bf, 0x05bf, C99 }, { 0x05c1, 0x05c2, C99 }, { 0x05d0, 0x05ea, CXX|C99 }, { 0x05f0, 0x05f2, CXX|C99 }, { 0x05f3, 0x05f4, CXX }, { 0x0621, 0x063a, CXX|C99 }, /* Arabic */ { 0x0640, 0x0652, CXX|C99 }, { 0x0660, 0x0669, C99|DIG }, /* Digits */ { 0x0670, 0x06b7, CXX|C99 }, /* Arabic */ { 0x06ba, 0x06be, CXX|C99 }, { 0x06c0, 0x06ce, CXX|C99 }, { 0x06d0, 0x06dc, C99 }, { 0x06e5, 0x06e7, CXX|C99 }, { 0x06e8, 0x06e8, C99 }, { 0x06ea, 0x06ed, C99 }, { 0x06f0, 0x06f9, C99|DIG }, /* Digits */ { 0x0901, 0x0903, C99 }, /* Devanagari */ { 0x0905, 0x0939, CXX|C99 }, { 0x093d, 0x093d, C99 }, /* Special characters */ { 0x093e, 0x094d, C99 }, /* Devanagari */ { 0x0950, 0x0952, C99 }, { 0x0958, 0x0962, CXX|C99 }, { 0x0963, 0x0963, C99 }, { 0x0966, 0x096f, C99|DIG }, /* Digits */ { 0x0981, 0x0983, C99 }, /* Bengali */ { 0x0985, 0x098c, CXX|C99 }, { 0x098f, 0x0990, CXX|C99 }, { 0x0993, 0x09a8, CXX|C99 }, { 0x09aa, 0x09b0, CXX|C99 }, { 0x09b2, 0x09b2, CXX|C99 }, { 0x09b6, 0x09b9, CXX|C99 }, { 0x09be, 0x09c4, C99 }, { 0x09c7, 0x09c8, C99 }, { 0x09cb, 0x09cd, C99 }, { 0x09dc, 0x09dd, CXX|C99 }, { 0x09df, 0x09e1, CXX|C99 }, { 0x09e2, 0x09e3, C99 }, { 0x09e6, 0x09ef, C99|DIG }, /* Digits */ { 0x09f0, 0x09f1, CXX|C99 }, /* Bengali */ { 0x0a02, 0x0a02, C99 }, /* Gurmukhi */ { 0x0a05, 0x0a0a, CXX|C99 }, { 0x0a0f, 0x0a10, CXX|C99 }, { 0x0a13, 0x0a28, CXX|C99 }, { 0x0a2a, 0x0a30, CXX|C99 }, { 0x0a32, 0x0a33, CXX|C99 }, { 0x0a35, 0x0a36, CXX|C99 }, { 0x0a38, 0x0a39, CXX|C99 }, { 0x0a3e, 0x0a42, C99 }, { 0x0a47, 0x0a48, C99 }, { 0x0a4b, 0x0a4d, C99 }, { 0x0a59, 0x0a5c, CXX|C99 }, { 0x0a5e, 0x0a5e, CXX|C99 }, { 0x0a66, 0x0a6f, C99|DIG }, /* Digits */ { 0x0a74, 0x0a74, C99 }, /* Gurmukhi */ { 0x0a81, 0x0a83, C99 }, /* Gujarati */ { 0x0a85, 0x0a8b, CXX|C99 }, { 0x0a8d, 0x0a8d, CXX|C99 }, { 0x0a8f, 0x0a91, CXX|C99 }, { 0x0a93, 0x0aa8, CXX|C99 }, { 0x0aaa, 0x0ab0, CXX|C99 }, { 0x0ab2, 0x0ab3, CXX|C99 }, { 0x0ab5, 0x0ab9, CXX|C99 }, { 0x0abd, 0x0ac5, C99 }, { 0x0ac7, 0x0ac9, C99 }, { 0x0acb, 0x0acd, C99 }, { 0x0ad0, 0x0ad0, C99 }, { 0x0ae0, 0x0ae0, CXX|C99 }, { 0x0ae6, 0x0aef, C99|DIG }, /* Digits */ { 0x0b01, 0x0b03, C99 }, /* Oriya */ { 0x0b05, 0x0b0c, CXX|C99 }, { 0x0b0f, 0x0b10, CXX|C99 }, { 0x0b13, 0x0b28, CXX|C99 }, { 0x0b2a, 0x0b30, CXX|C99 }, { 0x0b32, 0x0b33, CXX|C99 }, { 0x0b36, 0x0b39, CXX|C99 }, { 0x0b3d, 0x0b3d, C99 }, /* Special characters */ { 0x0b3e, 0x0b43, C99 }, /* Oriya */ { 0x0b47, 0x0b48, C99 }, { 0x0b4b, 0x0b4d, C99 }, { 0x0b5c, 0x0b5d, CXX|C99 }, { 0x0b5f, 0x0b61, CXX|C99 }, { 0x0b66, 0x0b6f, C99|DIG }, /* Digits */ { 0x0b82, 0x0b83, C99 }, /* Tamil */ { 0x0b85, 0x0b8a, CXX|C99 }, { 0x0b8e, 0x0b90, CXX|C99 }, { 0x0b92, 0x0b95, CXX|C99 }, { 0x0b99, 0x0b9a, CXX|C99 }, { 0x0b9c, 0x0b9c, CXX|C99 }, { 0x0b9e, 0x0b9f, CXX|C99 }, { 0x0ba3, 0x0ba4, CXX|C99 }, { 0x0ba8, 0x0baa, CXX|C99 }, { 0x0bae, 0x0bb5, CXX|C99 }, { 0x0bb7, 0x0bb9, CXX|C99 }, { 0x0bbe, 0x0bc2, C99 }, { 0x0bc6, 0x0bc8, C99 }, { 0x0bca, 0x0bcd, C99 }, { 0x0be7, 0x0bef, C99|DIG }, /* Digits */ { 0x0c01, 0x0c03, C99 }, /* Telugu */ { 0x0c05, 0x0c0c, CXX|C99 }, { 0x0c0e, 0x0c10, CXX|C99 }, { 0x0c12, 0x0c28, CXX|C99 }, { 0x0c2a, 0x0c33, CXX|C99 }, { 0x0c35, 0x0c39, CXX|C99 }, { 0x0c3e, 0x0c44, C99 }, { 0x0c46, 0x0c48, C99 }, { 0x0c4a, 0x0c4d, C99 }, { 0x0c60, 0x0c61, CXX|C99 }, { 0x0c66, 0x0c6f, C99|DIG }, /* Digits */ { 0x0c82, 0x0c83, C99 }, /* Kannada */ { 0x0c85, 0x0c8c, CXX|C99 }, { 0x0c8e, 0x0c90, CXX|C99 }, { 0x0c92, 0x0ca8, CXX|C99 }, { 0x0caa, 0x0cb3, CXX|C99 }, { 0x0cb5, 0x0cb9, CXX|C99 }, { 0x0cbe, 0x0cc4, C99 }, { 0x0cc6, 0x0cc8, C99 }, { 0x0cca, 0x0ccd, C99 }, { 0x0cde, 0x0cde, C99 }, { 0x0ce0, 0x0ce1, CXX|C99 }, { 0x0ce6, 0x0cef, C99|DIG }, /* Digits */ { 0x0d02, 0x0d03, C99 }, /* Malayalam */ { 0x0d05, 0x0d0c, CXX|C99 }, { 0x0d0e, 0x0d10, CXX|C99 }, { 0x0d12, 0x0d28, CXX|C99 }, { 0x0d2a, 0x0d39, CXX|C99 }, { 0x0d3e, 0x0d43, C99 }, { 0x0d46, 0x0d48, C99 }, { 0x0d4a, 0x0d4d, C99 }, { 0x0d60, 0x0d61, CXX|C99 }, { 0x0d66, 0x0d6f, C99|DIG }, /* Digits */ { 0x0e01, 0x0e30, CXX|C99 }, /* Thai */ { 0x0e31, 0x0e31, C99 }, { 0x0e32, 0x0e33, CXX|C99 }, { 0x0e34, 0x0e3a, C99 }, { 0x0e40, 0x0e46, CXX|C99 }, { 0x0e47, 0x0e49, C99 }, { 0x0e50, 0x0e59, CXX|C99|DIG }, /* Digits */ { 0x0e5a, 0x0e5b, CXX|C99 }, /* Thai */ { 0x0e81, 0x0e82, CXX|C99 }, /* Lao */ { 0x0e84, 0x0e84, CXX|C99 }, { 0x0e87, 0x0e88, CXX|C99 }, { 0x0e8a, 0x0e8a, CXX|C99 }, { 0x0e8d, 0x0e8d, CXX|C99 }, { 0x0e94, 0x0e97, CXX|C99 }, { 0x0e99, 0x0e9f, CXX|C99 }, { 0x0ea1, 0x0ea3, CXX|C99 }, { 0x0ea5, 0x0ea5, CXX|C99 }, { 0x0ea7, 0x0ea7, CXX|C99 }, { 0x0eaa, 0x0eab, CXX|C99 }, { 0x0ead, 0x0eae, CXX|C99 }, { 0x0eaf, 0x0eaf, CXX }, { 0x0eb0, 0x0eb0, CXX|C99 }, { 0x0eb1, 0x0eb1, C99 }, { 0x0eb2, 0x0eb3, CXX|C99 }, { 0x0eb4, 0x0eb9, C99 }, { 0x0ebb, 0x0ebc, C99 }, { 0x0ebd, 0x0ebd, CXX|C99 }, { 0x0ec0, 0x0ec4, CXX|C99 }, { 0x0ec6, 0x0ec6, CXX|C99 }, { 0x0ec8, 0x0ecd, C99 }, { 0x0ed0, 0x0ed9, C99|DIG }, /* Digits */ { 0x0edc, 0x0edd, C99 }, /* Lao */ { 0x0f00, 0x0f00, C99 }, /* Tibetan */ { 0x0f18, 0x0f19, C99 }, { 0x0f20, 0x0f33, C99|DIG }, /* Digits */ { 0x0f35, 0x0f35, C99 }, /* Tibetan */ { 0x0f37, 0x0f37, C99 }, { 0x0f39, 0x0f39, C99 }, { 0x0f3e, 0x0f47, C99 }, { 0x0f49, 0x0f69, C99 }, { 0x0f71, 0x0f84, C99 }, { 0x0f86, 0x0f8b, C99 }, { 0x0f90, 0x0f95, C99 }, { 0x0f97, 0x0f97, C99 }, { 0x0f99, 0x0fad, C99 }, { 0x0fb1, 0x0fb7, C99 }, { 0x0fb9, 0x0fb9, C99 }, { 0x10a0, 0x10c5, CXX|C99 }, /* Georgian */ { 0x10d0, 0x10f6, CXX|C99 }, { 0x1100, 0x1159, CXX }, /* Hangul */ { 0x1161, 0x11a2, CXX }, { 0x11a8, 0x11f9, CXX }, { 0x1e00, 0x1e9a, CXX|C99 }, /* Latin */ { 0x1e9b, 0x1e9b, C99 }, { 0x1ea0, 0x1ef9, CXX|C99 }, { 0x1f00, 0x1f15, CXX|C99 }, /* Greek */ { 0x1f18, 0x1f1d, CXX|C99 }, { 0x1f20, 0x1f45, CXX|C99 }, { 0x1f48, 0x1f4d, CXX|C99 }, { 0x1f50, 0x1f57, CXX|C99 }, { 0x1f59, 0x1f59, CXX|C99 }, { 0x1f5b, 0x1f5b, CXX|C99 }, { 0x1f5d, 0x1f5d, CXX|C99 }, { 0x1f5f, 0x1f7d, CXX|C99 }, { 0x1f80, 0x1fb4, CXX|C99 }, { 0x1fb6, 0x1fbc, CXX|C99 }, { 0x1fbe, 0x1fbe, C99 }, /* Special characters */ { 0x1fc2, 0x1fc4, CXX|C99 }, /* Greek */ { 0x1fc6, 0x1fcc, CXX|C99 }, { 0x1fd0, 0x1fd3, CXX|C99 }, { 0x1fd6, 0x1fdb, CXX|C99 }, { 0x1fe0, 0x1fec, CXX|C99 }, { 0x1ff2, 0x1ff4, CXX|C99 }, { 0x1ff6, 0x1ffc, CXX|C99 }, { 0x203f, 0x2040, C99 }, /* Special characters */ { 0x207f, 0x207f, C99 }, /* Latin */ { 0x2102, 0x2102, C99 }, /* Special characters */ { 0x2107, 0x2107, C99 }, { 0x210a, 0x2113, C99 }, { 0x2115, 0x2115, C99 }, { 0x2118, 0x211d, C99 }, { 0x2124, 0x2124, C99 }, { 0x2126, 0x2126, C99 }, { 0x2128, 0x2128, C99 }, { 0x212a, 0x2131, C99 }, { 0x2133, 0x2138, C99 }, { 0x2160, 0x2182, C99 }, { 0x3005, 0x3007, C99 }, { 0x3021, 0x3029, C99 }, { 0x3041, 0x3093, CXX|C99 }, /* Hiragana */ { 0x3094, 0x3094, CXX }, { 0x309b, 0x309c, CXX|C99 }, { 0x309d, 0x309e, CXX }, { 0x30a1, 0x30f6, CXX|C99 }, /* Katakana */ { 0x30f7, 0x30fa, CXX }, { 0x30fb, 0x30fc, CXX|C99 }, { 0x30fd, 0x30fe, CXX }, { 0x3105, 0x312c, CXX|C99 }, /* Bopomofo */ { 0x4e00, 0x9fa5, CXX|C99 }, /* CJK Unified Ideographs */ { 0xac00, 0xd7a3, C99 }, /* Hangul */ { 0xf900, 0xfa2d, CXX }, /* CJK Unified Ideographs */ { 0xfb1f, 0xfb36, CXX }, { 0xfb38, 0xfb3c, CXX }, { 0xfb3e, 0xfb3e, CXX }, { 0xfb40, 0xfb44, CXX }, { 0xfb46, 0xfbb1, CXX }, { 0xfbd3, 0xfd3f, CXX }, { 0xfd50, 0xfd8f, CXX }, { 0xfd92, 0xfdc7, CXX }, { 0xfdf0, 0xfdfb, CXX }, { 0xfe70, 0xfe72, CXX }, { 0xfe74, 0xfe74, CXX }, { 0xfe76, 0xfefc, CXX }, { 0xff21, 0xff3a, CXX }, { 0xff41, 0xff5a, CXX }, { 0xff66, 0xffbe, CXX }, { 0xffc2, 0xffc7, CXX }, { 0xffca, 0xffcf, CXX }, { 0xffd2, 0xffd7, CXX }, { 0xffda, 0xffdc, CXX }, }; #endif /* LIBCPP_UCNID_H */ /* Character set handling for C-family languages. Terminological note: In what follows, "charset" or "character set" will be taken to mean both an abstract set of characters and an encoding for that set. The C99 standard discusses two character sets: source and execution. The source character set is used for internal processing in translation phases 1 through 4; the execution character set is used thereafter. Both are required by 5.2.1.2p1 to be multibyte encodings, not wide character encodings (see 3.7.2, 3.7.3 for the standardese meanings of these terms). Furthermore, the "basic character set" (listed in 5.2.1p3) is to be encoded in each with values one byte wide, and is to appear in the initial shift state. It is not explicitly mentioned, but there is also a "wide execution character set" used to encode wide character constants and wide string literals; this is supposed to be the result of applying the standard library function mbstowcs() to an equivalent narrow string (6.4.5p5). However, the behavior of hexadecimal and octal \-escapes is at odds with this; they are supposed to be translated directly to wchar_t values (6.4.4.4p5,6). The source character set is not necessarily the character set used to encode physical source files on disk; translation phase 1 converts from whatever that encoding is to the source character set. The presence of universal character names in C99 (6.4.3 et seq.) forces the source character set to be isomorphic to ISO 10646, that is, Unicode. There is no such constraint on the execution character set; note also that the conversion from source to execution character set does not occur for identifiers (5.1.1.2p1#5). For convenience of implementation, the source character set's encoding of the basic character set should be identical to the execution character set OF THE HOST SYSTEM's encoding of the basic character set, and it should not be a state-dependent encoding. cpplib uses UTF-8 or UTF-EBCDIC for the source character set, depending on whether the host is based on ASCII or EBCDIC (see respectively Unicode section 2.3/ISO10646 Amendment 2, and Unicode Technical Report #16). With limited exceptions, it relies on the system library's iconv() primitive to do charset conversion (specified in SUSv2). */ #if !HAVE_ICONV /* Make certain that the uses of iconv(), iconv_open(), iconv_close() below, which are guarded only by if statements with compile-time constant conditions, do not cause link errors. */ #define iconv_open(x, y) (errno = EINVAL, (iconv_t)-1) #define iconv(a,b,c,d,e) (errno = EINVAL, (size_t)-1) #define iconv_close(x) (void)0 #define ICONV_CONST #endif #if HOST_CHARSET == HOST_CHARSET_ASCII #define SOURCE_CHARSET "UTF-8" #elif HOST_CHARSET == HOST_CHARSET_EBCDIC #define SOURCE_CHARSET "UTF-EBCDIC" #else #error "Unrecognized basic host character set" #endif #ifndef EILSEQ #define EILSEQ EINVAL #endif /* This structure is used for a resizable string buffer throughout. */ /* Don't call it strbuf, as that conflicts with unistd.h on systems such as DYNIX/ptx where unistd.h includes stropts.h. */ struct _cpp_strbuf { uchar *text; size_t asize; size_t len; }; /* This is enough to hold any string that fits on a single 80-column line, even if iconv quadruples its size (e.g. conversion from ASCII to UTF-32) rounded up to a power of two. */ #define OUTBUF_BLOCK_SIZE 256 /* Conversions between UTF-8 and UTF-16/32 are implemented by custom logic. This is because a depressing number of systems lack iconv, or have have iconv libraries that do not do these conversions, so we need a fallback implementation for them. To ensure the fallback doesn't break due to neglect, it is used on all systems. UTF-32 encoding is nice and simple: a four-byte binary number, constrained to the range 00000000-7FFFFFFF to avoid questions of signedness. We do have to cope with big- and little-endian variants. UTF-16 encoding uses two-byte binary numbers, again in big- and little-endian variants, for all values in the 00000000-0000FFFF range. Values in the 00010000-0010FFFF range are encoded as pairs of two-byte numbers, called "surrogate pairs": given a number S in this range, it is mapped to a pair (H, L) as follows: H = (S - 0x10000) / 0x400 + 0xD800 L = (S - 0x10000) % 0x400 + 0xDC00 Two-byte values in the D800...DFFF range are ill-formed except as a component of a surrogate pair. Even if the encoding within a two-byte value is little-endian, the H member of the surrogate pair comes first. There is no way to encode values in the 00110000-7FFFFFFF range, which is not currently a problem as there are no assigned code points in that range; however, the author expects that it will eventually become necessary to abandon UTF-16 due to this limitation. Note also that, because of these pairs, UTF-16 does not meet the requirements of the C standard for a wide character encoding (see 3.7.3 and 6.4.4.4p11). UTF-8 encoding looks like this: value range encoded as 00000000-0000007F 0xxxxxxx 00000080-000007FF 110xxxxx 10xxxxxx 00000800-0000FFFF 1110xxxx 10xxxxxx 10xxxxxx 00010000-001FFFFF 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx 00200000-03FFFFFF 111110xx 10xxxxxx 10xxxxxx 10xxxxxx 10xxxxxx 04000000-7FFFFFFF 1111110x 10xxxxxx 10xxxxxx 10xxxxxx 10xxxxxx 10xxxxxx Values in the 0000D800 ... 0000DFFF range (surrogates) are invalid, which means that three-byte sequences ED xx yy, with A0 <= xx <= BF, never occur. Note also that any value that can be encoded by a given row of the table can also be encoded by all successive rows, but this is not done; only the shortest possible encoding for any given value is valid. For instance, the character 07C0 could be encoded as any of DF 80, E0 9F 80, F0 80 9F 80, F8 80 80 9F 80, or FC 80 80 80 9F 80. Only the first is valid. An implementation note: the transformation from UTF-16 to UTF-8, or vice versa, is easiest done by using UTF-32 as an intermediary. */ /* Internal primitives which go from an UTF-8 byte stream to native-endian UTF-32 in a cppchar_t, or vice versa; this avoids an extra marshal/unmarshal operation in several places below. */ static inline int one_utf8_to_cppchar (const uchar **inbufp, size_t *inbytesleftp, cppchar_t *cp) { static const uchar masks[6] = { 0x7F, 0x1F, 0x0F, 0x07, 0x02, 0x01 }; static const uchar patns[6] = { 0x00, 0xC0, 0xE0, 0xF0, 0xF8, 0xFC }; cppchar_t c; const uchar *inbuf = *inbufp; size_t nbytes, i; if (*inbytesleftp < 1) return EINVAL; c = *inbuf; if (c < 0x80) { *cp = c; *inbytesleftp -= 1; *inbufp += 1; return 0; } /* The number of leading 1-bits in the first byte indicates how many bytes follow. */ for (nbytes = 2; nbytes < 7; nbytes++) if ((c & ~masks[nbytes-1]) == patns[nbytes-1]) goto found; return EILSEQ; found: if (*inbytesleftp < nbytes) return EINVAL; c = (c & masks[nbytes-1]); inbuf++; for (i = 1; i < nbytes; i++) { cppchar_t n = *inbuf++; if ((n & 0xC0) != 0x80) return EILSEQ; c = ((c << 6) + (n & 0x3F)); } /* Make sure the shortest possible encoding was used. */ if (c <= 0x7F && nbytes > 1) return EILSEQ; if (c <= 0x7FF && nbytes > 2) return EILSEQ; if (c <= 0xFFFF && nbytes > 3) return EILSEQ; if (c <= 0x1FFFFF && nbytes > 4) return EILSEQ; if (c <= 0x3FFFFFF && nbytes > 5) return EILSEQ; /* Make sure the character is valid. */ if (c > 0x7FFFFFFF || (c >= 0xD800 && c <= 0xDFFF)) return EILSEQ; *cp = c; *inbufp = inbuf; *inbytesleftp -= nbytes; return 0; } static inline int one_cppchar_to_utf8 (cppchar_t c, uchar **outbufp, size_t *outbytesleftp) { static const uchar masks[6] = { 0x00, 0xC0, 0xE0, 0xF0, 0xF8, 0xFC }; static const uchar limits[6] = { 0x80, 0xE0, 0xF0, 0xF8, 0xFC, 0xFE }; size_t nbytes; uchar buf[6], *p = &buf[6]; uchar *outbuf = *outbufp; nbytes = 1; if (c < 0x80) *--p = c; else { do { *--p = ((c & 0x3F) | 0x80); c >>= 6; nbytes++; } while (c >= 0x3F || (c & limits[nbytes-1])); *--p = (c | masks[nbytes-1]); } if (*outbytesleftp < nbytes) return E2BIG; while (p < &buf[6]) *outbuf++ = *p++; *outbytesleftp -= nbytes; *outbufp = outbuf; return 0; } /* The following four functions transform one character between the two encodings named in the function name. All have the signature int (*)(iconv_t bigend, const uchar **inbufp, size_t *inbytesleftp, uchar **outbufp, size_t *outbytesleftp) BIGEND must have the value 0 or 1, coerced to (iconv_t); it is interpreted as a boolean indicating whether big-endian or little-endian encoding is to be used for the member of the pair that is not UTF-8. INBUFP, INBYTESLEFTP, OUTBUFP, OUTBYTESLEFTP work exactly as they do for iconv. The return value is either 0 for success, or an errno value for failure, which may be E2BIG (need more space), EILSEQ (ill-formed input sequence), ir EINVAL (incomplete input sequence). */ static inline int one_utf8_to_utf32 (iconv_t bigend, const uchar **inbufp, size_t *inbytesleftp, uchar **outbufp, size_t *outbytesleftp) { uchar *outbuf; cppchar_t s = 0; int rval; /* Check for space first, since we know exactly how much we need. */ if (*outbytesleftp < 4) return E2BIG; rval = one_utf8_to_cppchar (inbufp, inbytesleftp, &s); if (rval) return rval; outbuf = *outbufp; outbuf[bigend ? 3 : 0] = (s & 0x000000FF); outbuf[bigend ? 2 : 1] = (s & 0x0000FF00) >> 8; outbuf[bigend ? 1 : 2] = (s & 0x00FF0000) >> 16; outbuf[bigend ? 0 : 3] = (s & 0xFF000000) >> 24; *outbufp += 4; *outbytesleftp -= 4; return 0; } static inline int one_utf32_to_utf8 (iconv_t bigend, const uchar **inbufp, size_t *inbytesleftp, uchar **outbufp, size_t *outbytesleftp) { cppchar_t s; int rval; const uchar *inbuf; if (*inbytesleftp < 4) return EINVAL; inbuf = *inbufp; s = inbuf[bigend ? 0 : 3] << 24; s += inbuf[bigend ? 1 : 2] << 16; s += inbuf[bigend ? 2 : 1] << 8; s += inbuf[bigend ? 3 : 0]; if (s >= 0x7FFFFFFF || (s >= 0xD800 && s <= 0xDFFF)) return EILSEQ; rval = one_cppchar_to_utf8 (s, outbufp, outbytesleftp); if (rval) return rval; *inbufp += 4; *inbytesleftp -= 4; return 0; } static inline int one_utf8_to_utf16 (iconv_t bigend, const uchar **inbufp, size_t *inbytesleftp, uchar **outbufp, size_t *outbytesleftp) { int rval; cppchar_t s = 0; const uchar *save_inbuf = *inbufp; size_t save_inbytesleft = *inbytesleftp; uchar *outbuf = *outbufp; rval = one_utf8_to_cppchar (inbufp, inbytesleftp, &s); if (rval) return rval; if (s > 0x0010FFFF) { *inbufp = save_inbuf; *inbytesleftp = save_inbytesleft; return EILSEQ; } if (s < 0xFFFF) { if (*outbytesleftp < 2) { *inbufp = save_inbuf; *inbytesleftp = save_inbytesleft; return E2BIG; } outbuf[bigend ? 1 : 0] = (s & 0x00FF); outbuf[bigend ? 0 : 1] = (s & 0xFF00) >> 8; *outbufp += 2; *outbytesleftp -= 2; return 0; } else { cppchar_t hi, lo; if (*outbytesleftp < 4) { *inbufp = save_inbuf; *inbytesleftp = save_inbytesleft; return E2BIG; } hi = (s - 0x10000) / 0x400 + 0xD800; lo = (s - 0x10000) % 0x400 + 0xDC00; /* Even if we are little-endian, put the high surrogate first. ??? Matches practice? */ outbuf[bigend ? 1 : 0] = (hi & 0x00FF); outbuf[bigend ? 0 : 1] = (hi & 0xFF00) >> 8; outbuf[bigend ? 3 : 2] = (lo & 0x00FF); outbuf[bigend ? 2 : 3] = (lo & 0xFF00) >> 8; *outbufp += 4; *outbytesleftp -= 4; return 0; } } static inline int one_utf16_to_utf8 (iconv_t bigend, const uchar **inbufp, size_t *inbytesleftp, uchar **outbufp, size_t *outbytesleftp) { cppchar_t s; const uchar *inbuf = *inbufp; int rval; if (*inbytesleftp < 2) return EINVAL; s = inbuf[bigend ? 0 : 1] << 8; s += inbuf[bigend ? 1 : 0]; /* Low surrogate without immediately preceding high surrogate is invalid. */ if (s >= 0xDC00 && s <= 0xDFFF) return EILSEQ; /* High surrogate must have a following low surrogate. */ else if (s >= 0xD800 && s <= 0xDBFF) { cppchar_t hi = s, lo; if (*inbytesleftp < 4) return EINVAL; lo = inbuf[bigend ? 2 : 3] << 8; lo += inbuf[bigend ? 3 : 2]; if (lo < 0xDC00 || lo > 0xDFFF) return EILSEQ; s = (hi - 0xD800) * 0x400 + (lo - 0xDC00) + 0x10000; } rval = one_cppchar_to_utf8 (s, outbufp, outbytesleftp); if (rval) return rval; /* Success - update the input pointers (one_cppchar_to_utf8 has done the output pointers for us). */ if (s <= 0xFFFF) { *inbufp += 2; *inbytesleftp -= 2; } else { *inbufp += 4; *inbytesleftp -= 4; } return 0; } /* Helper routine for the next few functions. The 'const' on one_conversion means that we promise not to modify what function is pointed to, which lets the inliner see through it. */ static inline bool conversion_loop (int (*const one_conversion)(iconv_t, const uchar **, size_t *, uchar **, size_t *), iconv_t cd, const uchar *from, size_t flen, struct _cpp_strbuf *to) { const uchar *inbuf; uchar *outbuf; size_t inbytesleft, outbytesleft; int rval; inbuf = from; inbytesleft = flen; outbuf = to->text + to->len; outbytesleft = to->asize - to->len; for (;;) { do rval = one_conversion (cd, &inbuf, &inbytesleft, &outbuf, &outbytesleft); while (inbytesleft && !rval); if (__builtin_expect (inbytesleft == 0, 1)) { to->len = to->asize - outbytesleft; return true; } if (rval != E2BIG) { errno = rval; return false; } outbytesleft += OUTBUF_BLOCK_SIZE; to->asize += OUTBUF_BLOCK_SIZE; to->text = xrealloc (to->text, to->asize); outbuf = to->text + to->asize - outbytesleft; } } /* These functions convert entire strings between character sets. They all have the signature bool (*)(iconv_t cd, const uchar *from, size_t flen, struct _cpp_strbuf *to); The input string FROM is converted as specified by the function name plus the iconv descriptor CD (which may be fake), and the result appended to TO. On any error, false is returned, otherwise true. */ /* These four use the custom conversion code above. */ static bool convert_utf8_utf16 (iconv_t cd, const uchar *from, size_t flen, struct _cpp_strbuf *to) { return conversion_loop (one_utf8_to_utf16, cd, from, flen, to); } static bool convert_utf8_utf32 (iconv_t cd, const uchar *from, size_t flen, struct _cpp_strbuf *to) { return conversion_loop (one_utf8_to_utf32, cd, from, flen, to); } static bool convert_utf16_utf8 (iconv_t cd, const uchar *from, size_t flen, struct _cpp_strbuf *to) { return conversion_loop (one_utf16_to_utf8, cd, from, flen, to); } static bool convert_utf32_utf8 (iconv_t cd, const uchar *from, size_t flen, struct _cpp_strbuf *to) { return conversion_loop (one_utf32_to_utf8, cd, from, flen, to); } /* Identity conversion, used when we have no alternative. */ static bool convert_no_conversion (iconv_t cd ATTRIBUTE_UNUSED, const uchar *from, size_t flen, struct _cpp_strbuf *to) { if (to->len + flen > to->asize) { to->asize = to->len + flen; to->text = xrealloc (to->text, to->asize); } memcpy (to->text + to->len, from, flen); to->len += flen; return true; } /* And this one uses the system iconv primitive. It's a little different, since iconv's interface is a little different. */ #if HAVE_ICONV static bool convert_using_iconv (iconv_t cd, const uchar *from, size_t flen, struct _cpp_strbuf *to) { ICONV_CONST char *inbuf; char *outbuf; size_t inbytesleft, outbytesleft; /* Reset conversion descriptor and check that it is valid. */ if (iconv (cd, 0, 0, 0, 0) == (size_t)-1) return false; inbuf = (ICONV_CONST char *)from; inbytesleft = flen; outbuf = (char *)to->text + to->len; outbytesleft = to->asize - to->len; for (;;) { iconv (cd, &inbuf, &inbytesleft, &outbuf, &outbytesleft); if (__builtin_expect (inbytesleft == 0, 1)) { to->len = to->asize - outbytesleft; return true; } if (errno != E2BIG) return false; outbytesleft += OUTBUF_BLOCK_SIZE; to->asize += OUTBUF_BLOCK_SIZE; to->text = xrealloc (to->text, to->asize); outbuf = (char *)to->text + to->asize - outbytesleft; } } #else #define convert_using_iconv 0 /* prevent undefined symbol error below */ #endif /* Arrange for the above custom conversion logic to be used automatically when conversion between a suitable pair of character sets is requested. */ #define APPLY_CONVERSION(CONVERTER, FROM, FLEN, TO) \ CONVERTER.func (CONVERTER.cd, FROM, FLEN, TO) struct conversion { const char *pair; convert_f func; iconv_t fake_cd; }; static const struct conversion conversion_tab[] = { { "UTF-8/UTF-32LE", convert_utf8_utf32, (iconv_t)0 }, { "UTF-8/UTF-32BE", convert_utf8_utf32, (iconv_t)1 }, { "UTF-8/UTF-16LE", convert_utf8_utf16, (iconv_t)0 }, { "UTF-8/UTF-16BE", convert_utf8_utf16, (iconv_t)1 }, { "UTF-32LE/UTF-8", convert_utf32_utf8, (iconv_t)0 }, { "UTF-32BE/UTF-8", convert_utf32_utf8, (iconv_t)1 }, { "UTF-16LE/UTF-8", convert_utf16_utf8, (iconv_t)0 }, { "UTF-16BE/UTF-8", convert_utf16_utf8, (iconv_t)1 }, }; /* Subroutine of cpp_init_iconv: initialize and return a cset_converter structure for conversion from FROM to TO. If iconv_open() fails, issue an error and return an identity converter. Silently return an identity converter if FROM and TO are identical. */ static struct cset_converter init_iconv_desc (cpp_reader *pfile, const char *to, const char *from) { struct cset_converter ret; char *pair; size_t i; if (!strcasecmp (to, from)) { ret.func = convert_no_conversion; ret.cd = (iconv_t) -1; return ret; } pair = alloca(strlen(to) + strlen(from) + 2); strcpy(pair, from); strcat(pair, "/"); strcat(pair, to); for (i = 0; i < ARRAY_SIZE (conversion_tab); i++) if (!strcasecmp (pair, conversion_tab[i].pair)) { ret.func = conversion_tab[i].func; ret.cd = conversion_tab[i].fake_cd; return ret; } /* No custom converter - try iconv. */ if (HAVE_ICONV) { ret.func = convert_using_iconv; ret.cd = iconv_open (to, from); if (ret.cd == (iconv_t) -1) { if (errno == EINVAL) cpp_error (pfile, CPP_DL_ERROR, /* FIXME should be DL_SORRY */ "conversion from %s to %s not supported by iconv", from, to); else cpp_errno (pfile, CPP_DL_ERROR, "iconv_open"); ret.func = convert_no_conversion; } } else { cpp_error (pfile, CPP_DL_ERROR, /* FIXME: should be DL_SORRY */ "no iconv implementation, cannot convert from %s to %s", from, to); ret.func = convert_no_conversion; ret.cd = (iconv_t) -1; } return ret; } /* If charset conversion is requested, initialize iconv(3) descriptors for conversion from the source character set to the execution character sets. If iconv is not present in the C library, and conversion is requested, issue an error. */ void cpp_init_iconv (cpp_reader *pfile) { const char *ncset = CPP_OPTION (pfile, narrow_charset); const char *wcset = CPP_OPTION (pfile, wide_charset); const char *default_wcset; bool be = CPP_OPTION (pfile, bytes_big_endian); if (CPP_OPTION (pfile, wchar_precision) >= 32) default_wcset = be ? "UTF-32BE" : "UTF-32LE"; else if (CPP_OPTION (pfile, wchar_precision) >= 16) default_wcset = be ? "UTF-16BE" : "UTF-16LE"; else /* This effectively means that wide strings are not supported, so don't do any conversion at all. */ default_wcset = SOURCE_CHARSET; if (!ncset) ncset = SOURCE_CHARSET; if (!wcset) wcset = default_wcset; pfile->narrow_cset_desc = init_iconv_desc (pfile, ncset, SOURCE_CHARSET); pfile->wide_cset_desc = init_iconv_desc (pfile, wcset, SOURCE_CHARSET); } void _cpp_destroy_iconv (cpp_reader *pfile) { if (HAVE_ICONV) { if (pfile->narrow_cset_desc.func == convert_using_iconv) iconv_close (pfile->narrow_cset_desc.cd); if (pfile->wide_cset_desc.func == convert_using_iconv) iconv_close (pfile->wide_cset_desc.cd); } } /* Utility routine that computes a mask of the form 0000...111... with WIDTH 1-bits. */ static inline size_t width_to_mask (size_t width) { width = MIN (width, BITS_PER_CPPCHAR_T); if (width >= CHAR_BIT * sizeof (size_t)) return ~(size_t) 0; else return ((size_t) 1 << width) - 1; } /* Returns 1 if C is valid in an identifier, 2 if C is valid except at the start of an identifier, and 0 if C is not valid in an identifier. We assume C has already gone through the checks of _cpp_valid_ucn. The algorithm is a simple binary search on the table defined in cppucnid.h. */ static int ucn_valid_in_identifier (cpp_reader *pfile, cppchar_t c) { int mn, mx, md; mn = -1; mx = ARRAY_SIZE (ucnranges); while (mx - mn > 1) { md = (mn + mx) / 2; if (c < ucnranges[md].lo) mx = md; else if (c > ucnranges[md].hi) mn = md; else goto found; } return 0; found: /* When -pedantic, we require the character to have been listed by the standard for the current language. Otherwise, we accept the union of the acceptable sets for C++98 and C99. */ if (CPP_PEDANTIC (pfile) && ((CPP_OPTION (pfile, c99) && !(ucnranges[md].flags & C99)) || (CPP_OPTION (pfile, cplusplus) && !(ucnranges[md].flags & CXX)))) return 0; /* In C99, UCN digits may not begin identifiers. */ if (CPP_OPTION (pfile, c99) && (ucnranges[md].flags & DIG)) return 2; return 1; } /* [lex.charset]: The character designated by the universal character name \UNNNNNNNN is that character whose character short name in ISO/IEC 10646 is NNNNNNNN; the character designated by the universal character name \uNNNN is that character whose character short name in ISO/IEC 10646 is 0000NNNN. If the hexadecimal value for a universal character name is less than 0x20 or in the range 0x7F-0x9F (inclusive), or if the universal character name designates a character in the basic source character set, then the program is ill-formed. *PSTR must be preceded by "\u" or "\U"; it is assumed that the buffer end is delimited by a non-hex digit. Returns zero if UCNs are not part of the relevant standard, or if the string beginning at *PSTR doesn't syntactically match the form 'NNNN' or 'NNNNNNNN'. Otherwise the nonzero value of the UCN, whether valid or invalid, is returned. Diagnostics are emitted for invalid values. PSTR is updated to point one beyond the UCN, or to the syntactically invalid character. IDENTIFIER_POS is 0 when not in an identifier, 1 for the start of an identifier, or 2 otherwise. */ cppchar_t _cpp_valid_ucn (cpp_reader *pfile, const uchar **pstr, const uchar *limit, int identifier_pos) { cppchar_t result, c; unsigned int length; const uchar *str = *pstr; const uchar *base = str - 2; if (!CPP_OPTION (pfile, cplusplus) && !CPP_OPTION (pfile, c99)) cpp_error (pfile, CPP_DL_WARNING, "universal character names are only valid in C++ and C99"); else if (CPP_WTRADITIONAL (pfile) && identifier_pos == 0) cpp_error (pfile, CPP_DL_WARNING, "the meaning of '\\%c' is different in traditional C", (int) str[-1]); if (str[-1] == 'u') length = 4; else if (str[-1] == 'U') length = 8; else abort(); result = 0; do { c = *str; if (!ISXDIGIT (c)) break; str++; result = (result << 4) + hex_value (c); } while (--length && str < limit); *pstr = str; if (length) { /* We'll error when we try it out as the start of an identifier. */ cpp_error (pfile, CPP_DL_ERROR, "incomplete universal character name %.*s", (int) (str - base), base); result = 1; } /* The standard permits $, @ and ` to be specified as UCNs. We use hex escapes so that this also works with EBCDIC hosts. */ else if ((result < 0xa0 && (result != 0x24 && result != 0x40 && result != 0x60)) || (result & 0x80000000) || (result >= 0xD800 && result <= 0xDFFF)) { cpp_error (pfile, CPP_DL_ERROR, "%.*s is not a valid universal character", (int) (str - base), base); result = 1; } else if (identifier_pos) { int validity = ucn_valid_in_identifier (pfile, result); if (validity == 0) cpp_error (pfile, CPP_DL_ERROR, "universal character %.*s is not valid in an identifier", (int) (str - base), base); else if (validity == 2 && identifier_pos == 1) cpp_error (pfile, CPP_DL_ERROR, "universal character %.*s is not valid at the start of an identifier", (int) (str - base), base); } if (result == 0) result = 1; return result; } /* Convert an UCN, pointed to by FROM, to UTF-8 encoding, then translate it to the execution character set and write the result into TBUF. An advanced pointer is returned. Issues all relevant diagnostics. */ static const uchar * convert_ucn (cpp_reader *pfile, const uchar *from, const uchar *limit, struct _cpp_strbuf *tbuf, bool wide) { cppchar_t ucn; uchar buf[6]; uchar *bufp = buf; size_t bytesleft = 6; int rval; struct cset_converter cvt = wide ? pfile->wide_cset_desc : pfile->narrow_cset_desc; from++; /* Skip u/U. */ ucn = _cpp_valid_ucn (pfile, &from, limit, 0); rval = one_cppchar_to_utf8 (ucn, &bufp, &bytesleft); if (rval) { errno = rval; cpp_errno (pfile, CPP_DL_ERROR, "converting UCN to source character set"); } else if (!APPLY_CONVERSION (cvt, buf, 6 - bytesleft, tbuf)) cpp_errno (pfile, CPP_DL_ERROR, "converting UCN to execution character set"); return from; } static void emit_numeric_escape (cpp_reader *pfile, cppchar_t n, struct _cpp_strbuf *tbuf, bool wide) { if (wide) { /* We have to render this into the target byte order, which may not be our byte order. */ bool bigend = CPP_OPTION (pfile, bytes_big_endian); size_t width = CPP_OPTION (pfile, wchar_precision); size_t cwidth = CPP_OPTION (pfile, char_precision); size_t cmask = width_to_mask (cwidth); size_t nbwc = width / cwidth; size_t i; size_t off = tbuf->len; cppchar_t c; if (tbuf->len + nbwc > tbuf->asize) { tbuf->asize += OUTBUF_BLOCK_SIZE; tbuf->text = xrealloc (tbuf->text, tbuf->asize); } for (i = 0; i < nbwc; i++) { c = n & cmask; n >>= cwidth; tbuf->text[off + (bigend ? nbwc - i - 1 : i)] = c; } tbuf->len += nbwc; } else { if (tbuf->len + 1 > tbuf->asize) { tbuf->asize += OUTBUF_BLOCK_SIZE; tbuf->text = xrealloc (tbuf->text, tbuf->asize); } tbuf->text[tbuf->len++] = n; } } /* Convert a hexadecimal escape, pointed to by FROM, to the execution character set and write it into the string buffer TBUF. Returns an advanced pointer, and issues diagnostics as necessary. No character set translation occurs; this routine always produces the execution-set character with numeric value equal to the given hex number. You can, e.g. generate surrogate pairs this way. */ static const uchar * convert_hex (cpp_reader *pfile, const uchar *from, const uchar *limit, struct _cpp_strbuf *tbuf, bool wide) { cppchar_t c, n = 0, overflow = 0; int digits_found = 0; size_t width = (wide ? CPP_OPTION (pfile, wchar_precision) : CPP_OPTION (pfile, char_precision)); size_t mask = width_to_mask (width); if (CPP_WTRADITIONAL (pfile)) cpp_error (pfile, CPP_DL_WARNING, "the meaning of '\\x' is different in traditional C"); from++; /* Skip 'x'. */ while (from < limit) { c = *from; if (! hex_p (c)) break; from++; overflow |= n ^ (n << 4 >> 4); n = (n << 4) + hex_value (c); digits_found = 1; } if (!digits_found) { cpp_error (pfile, CPP_DL_ERROR, "\\x used with no following hex digits"); return from; } if (overflow | (n != (n & mask))) { cpp_error (pfile, CPP_DL_PEDWARN, "hex escape sequence out of range"); n &= mask; } emit_numeric_escape (pfile, n, tbuf, wide); return from; } /* Convert an octal escape, pointed to by FROM, to the execution character set and write it into the string buffer TBUF. Returns an advanced pointer, and issues diagnostics as necessary. No character set translation occurs; this routine always produces the execution-set character with numeric value equal to the given octal number. */ static const uchar * convert_oct (cpp_reader *pfile, const uchar *from, const uchar *limit, struct _cpp_strbuf *tbuf, bool wide) { size_t count = 0; cppchar_t c, n = 0; size_t width = (wide ? CPP_OPTION (pfile, wchar_precision) : CPP_OPTION (pfile, char_precision)); size_t mask = width_to_mask (width); bool overflow = false; while (from < limit && count++ < 3) { c = *from; if (c < '0' || c > '7') break; from++; overflow |= n ^ (n << 3 >> 3); n = (n << 3) + c - '0'; } if (n != (n & mask)) { cpp_error (pfile, CPP_DL_PEDWARN, "octal escape sequence out of range"); n &= mask; } emit_numeric_escape (pfile, n, tbuf, wide); return from; } /* Convert an escape sequence (pointed to by FROM) to its value on the target, and to the execution character set. Do not scan past LIMIT. Write the converted value into TBUF. Returns an advanced pointer. Handles all relevant diagnostics. */ static const uchar * convert_escape (cpp_reader *pfile, const uchar *from, const uchar *limit, struct _cpp_strbuf *tbuf, bool wide) { /* Values of \a \b \e \f \n \r \t \v respectively. */ #if HOST_CHARSET == HOST_CHARSET_ASCII static const uchar charconsts[] = { 7, 8, 27, 12, 10, 13, 9, 11 }; #elif HOST_CHARSET == HOST_CHARSET_EBCDIC static const uchar charconsts[] = { 47, 22, 39, 12, 21, 13, 5, 11 }; #else #error "unknown host character set" #endif uchar c; struct cset_converter cvt = wide ? pfile->wide_cset_desc : pfile->narrow_cset_desc; c = *from; switch (c) { /* UCNs, hex escapes, and octal escapes are processed separately. */ case 'u': case 'U': return convert_ucn (pfile, from, limit, tbuf, wide); case 'x': return convert_hex (pfile, from, limit, tbuf, wide); break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': return convert_oct (pfile, from, limit, tbuf, wide); /* Various letter escapes. Get the appropriate host-charset value into C. */ case '\\': case '\'': case '"': case '?': break; case '(': case '{': case '[': case '%': /* '\(', etc, can be used at the beginning of a line in a long string split onto multiple lines with \-newline, to prevent Emacs or other text editors from getting confused. '\%' can be used to prevent SCCS from mangling printf format strings. */ if (CPP_PEDANTIC (pfile)) goto unknown; break; case 'b': c = charconsts[1]; break; case 'f': c = charconsts[3]; break; case 'n': c = charconsts[4]; break; case 'r': c = charconsts[5]; break; case 't': c = charconsts[6]; break; case 'v': c = charconsts[7]; break; case 'a': if (CPP_WTRADITIONAL (pfile)) cpp_error (pfile, CPP_DL_WARNING, "the meaning of '\\a' is different in traditional C"); c = charconsts[0]; break; case 'e': case 'E': if (CPP_PEDANTIC (pfile)) cpp_error (pfile, CPP_DL_PEDWARN, "non-ISO-standard escape sequence, '\\%c'", (int) c); c = charconsts[2]; break; default: unknown: if (ISGRAPH (c)) cpp_error (pfile, CPP_DL_PEDWARN, "unknown escape sequence '\\%c'", (int) c); else cpp_error (pfile, CPP_DL_PEDWARN, "unknown escape sequence: '\\%03o'", (int) c); } /* Now convert what we have to the execution character set. */ if (!APPLY_CONVERSION (cvt, &c, 1, tbuf)) cpp_errno (pfile, CPP_DL_ERROR, "converting escape sequence to execution character set"); return from + 1; } /* FROM is an array of cpp_string structures of length COUNT. These are to be converted from the source to the execution character set, escape sequences translated, and finally all are to be concatenated. WIDE indicates whether or not to produce a wide string. The result is written into TO. Returns true for success, false for failure. */ bool cpp_interpret_string (cpp_reader *pfile, const cpp_string *from, size_t count, cpp_string *to, bool wide) { struct _cpp_strbuf tbuf; const uchar *p, *base, *limit; size_t i; struct cset_converter cvt = wide ? pfile->wide_cset_desc : pfile->narrow_cset_desc; tbuf.asize = MAX (OUTBUF_BLOCK_SIZE, from->len); tbuf.text = xmalloc (tbuf.asize); tbuf.len = 0; for (i = 0; i < count; i++) { p = from[i].text; if (*p == 'L') p++; p++; /* Skip leading quote. */ limit = from[i].text + from[i].len - 1; /* Skip trailing quote. */ for (;;) { base = p; while (p < limit && *p != '\\') p++; if (p > base) { /* We have a run of normal characters; these can be fed directly to convert_cset. */ if (!APPLY_CONVERSION (cvt, base, p - base, &tbuf)) goto fail; } if (p == limit) break; p = convert_escape (pfile, p + 1, limit, &tbuf, wide); } } /* NUL-terminate the 'to' buffer and translate it to a cpp_string structure. */ emit_numeric_escape (pfile, 0, &tbuf, wide); tbuf.text = xrealloc (tbuf.text, tbuf.len); to->text = tbuf.text; to->len = tbuf.len; return true; fail: cpp_errno (pfile, CPP_DL_ERROR, "converting to execution character set"); free (tbuf.text); return false; } /* Subroutine of do_line and do_linemarker. Convert escape sequences in a string, but do not perform character set conversion. */ bool cpp_interpret_string_notranslate (cpp_reader *pfile, const cpp_string *from, size_t count, cpp_string *to, bool wide) { struct cset_converter save_narrow_cset_desc = pfile->narrow_cset_desc; bool retval; pfile->narrow_cset_desc.func = convert_no_conversion; pfile->narrow_cset_desc.cd = (iconv_t) -1; retval = cpp_interpret_string (pfile, from, count, to, wide); pfile->narrow_cset_desc = save_narrow_cset_desc; return retval; } /* Subroutine of cpp_interpret_charconst which performs the conversion to a number, for narrow strings. STR is the string structure returned by cpp_interpret_string. PCHARS_SEEN and UNSIGNEDP are as for cpp_interpret_charconst. */ static cppchar_t narrow_str_to_charconst (cpp_reader *pfile, cpp_string str, unsigned int *pchars_seen, int *unsignedp) { size_t width = CPP_OPTION (pfile, char_precision); size_t max_chars = CPP_OPTION (pfile, int_precision) / width; size_t mask = width_to_mask (width); size_t i; cppchar_t result, c; bool unsigned_p; /* The value of a multi-character character constant, or a single-character character constant whose representation in the execution character set is more than one byte long, is implementation defined. This implementation defines it to be the number formed by interpreting the byte sequence in memory as a big-endian binary number. If overflow occurs, the high bytes are lost, and a warning is issued. We don't want to process the NUL terminator handed back by cpp_interpret_string. */ result = 0; for (i = 0; i < str.len - 1; i++) { c = str.text[i] & mask; if (width < BITS_PER_CPPCHAR_T) result = (result << width) | c; else result = c; } if (i > max_chars) { i = max_chars; cpp_error (pfile, CPP_DL_WARNING, "character constant too long for its type"); } else if (i > 1 && CPP_OPTION (pfile, warn_multichar)) cpp_error (pfile, CPP_DL_WARNING, "multi-character character constant"); /* Multichar constants are of type int and therefore signed. */ if (i > 1) unsigned_p = 0; else unsigned_p = CPP_OPTION (pfile, unsigned_char); /* Truncate the constant to its natural width, and simultaneously sign- or zero-extend to the full width of cppchar_t. For single-character constants, the value is WIDTH bits wide. For multi-character constants, the value is INT_PRECISION bits wide. */ if (i > 1) width = CPP_OPTION (pfile, int_precision); if (width < BITS_PER_CPPCHAR_T) { mask = ((cppchar_t) 1 << width) - 1; if (unsigned_p || !(result & (1 << (width - 1)))) result &= mask; else result |= ~mask; } *pchars_seen = i; *unsignedp = unsigned_p; return result; } /* Subroutine of cpp_interpret_charconst which performs the conversion to a number, for wide strings. STR is the string structure returned by cpp_interpret_string. PCHARS_SEEN and UNSIGNEDP are as for cpp_interpret_charconst. */ static cppchar_t wide_str_to_charconst (cpp_reader *pfile, cpp_string str, unsigned int *pchars_seen, int *unsignedp) { bool bigend = CPP_OPTION (pfile, bytes_big_endian); size_t width = CPP_OPTION (pfile, wchar_precision); size_t cwidth = CPP_OPTION (pfile, char_precision); size_t mask = width_to_mask (width); size_t cmask = width_to_mask (cwidth); size_t nbwc = width / cwidth; size_t off, i; cppchar_t result = 0, c; /* This is finicky because the string is in the target's byte order, which may not be our byte order. Only the last character, ignoring the NUL terminator, is relevant. */ off = str.len - (nbwc * 2); result = 0; for (i = 0; i < nbwc; i++) { c = bigend ? str.text[off + i] : str.text[off + nbwc - i - 1]; result = (result << cwidth) | (c & cmask); } /* Wide character constants have type wchar_t, and a single character exactly fills a wchar_t, so a multi-character wide character constant is guaranteed to overflow. */ if (off > 0) cpp_error (pfile, CPP_DL_WARNING, "character constant too long for its type"); /* Truncate the constant to its natural width, and simultaneously sign- or zero-extend to the full width of cppchar_t. */ if (width < BITS_PER_CPPCHAR_T) { if (CPP_OPTION (pfile, unsigned_wchar) || !(result & (1 << (width - 1)))) result &= mask; else result |= ~mask; } *unsignedp = CPP_OPTION (pfile, unsigned_wchar); *pchars_seen = 1; return result; } /* Interpret a (possibly wide) character constant in TOKEN. PCHARS_SEEN points to a variable that is filled in with the number of characters seen, and UNSIGNEDP to a variable that indicates whether the result has signed type. */ cppchar_t cpp_interpret_charconst (cpp_reader *pfile, const cpp_token *token, unsigned int *pchars_seen, int *unsignedp) { cpp_string str = { 0, 0 }; bool wide = (token->type == CPP_WCHAR); cppchar_t result; /* an empty constant will appear as L'' or '' */ if (token->val.str.len == (size_t) (2 + wide)) { cpp_error (pfile, CPP_DL_ERROR, "empty character constant"); return 0; } else if (!cpp_interpret_string (pfile, &token->val.str, 1, &str, wide)) return 0; if (wide) result = wide_str_to_charconst (pfile, str, pchars_seen, unsignedp); else result = narrow_str_to_charconst (pfile, str, pchars_seen, unsignedp); if (str.text != token->val.str.text) free ((void *)str.text); return result; } uchar * _cpp_convert_input (cpp_reader *pfile, const char *input_charset, uchar *input, size_t size, size_t len, off_t *st_size) { struct cset_converter input_cset; struct _cpp_strbuf to; input_cset = init_iconv_desc (pfile, SOURCE_CHARSET, input_charset); if (input_cset.func == convert_no_conversion) { to.text = input; to.asize = size; to.len = len; } else { to.asize = MAX (65536, len); to.text = xmalloc (to.asize); to.len = 0; if (!APPLY_CONVERSION (input_cset, input, len, &to)) cpp_error (pfile, CPP_DL_ERROR, "failure to convert %s to %s", CPP_OPTION (pfile, input_charset), SOURCE_CHARSET); free (input); } /* Clean up the mess. */ if (input_cset.func == convert_using_iconv) iconv_close (input_cset.cd); /* Resize buffer if we allocated substantially too much, or if we haven't enough space for the \n-terminator. */ if (to.len + 4096 < to.asize || to.len >= to.asize) to.text = xrealloc (to.text, to.len + 1); to.text[to.len] = '\n'; *st_size = to.len; return to.text; } const char * _cpp_default_encoding (void) { const char *current_encoding = NULL; /* We disable this because the default codeset is 7-bit ASCII on most platforms, and this causes conversion failures on every file in GCC that happens to have one of the upper 128 characters in it -- most likely, as part of the name of a contributor. We should definitely recognize in-band markers of file encoding, like: - the appropriate Unicode byte-order mark (FE FF) to recognize UTF16 and UCS4 (in both big-endian and little-endian flavors) and UTF8 - a "#i", "#d", "/ *", "//", " #p" or "#p" (for #pragma) to distinguish ASCII and EBCDIC. - now we can parse something like "#pragma GCC encoding on the first line, or even Emacs/VIM's mode line tags (there's a problem here in that VIM uses the last line, and Emacs has its more elaborate "Local variables:" convention). - investigate whether Java has another common convention, which would be friendly to support. (Zack Weinberg and Paolo Bonzini, May 20th 2004) */ #if defined (HAVE_LOCALE_H) && defined (HAVE_LANGINFO_CODESET) && 0 setlocale (LC_CTYPE, ""); current_encoding = nl_langinfo (CODESET); #endif if (current_encoding == NULL || *current_encoding == '\0') current_encoding = SOURCE_CHARSET; return current_encoding; } /* CPP Library. (Directive handling.) Copyright (C) 1986, 1987, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Per Bothner, 1994-95. Based on CCCP program by Paul Rubin, June 1986 Adapted to ANSI C, Richard Stallman, Jan 1987 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Dependency generator for Makefile fragments. Copyright (C) 2000, 2001, 2003 Free Software Foundation, Inc. Contributed by Zack Weinberg, Mar 2000 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. In other words, you are welcome to use, share and improve this program. You are forbidden to forbid anyone else to use, share and improve what you give them. Help stamp out software-hoarding! */ #ifndef LIBCPP_MKDEPS_H #define LIBCPP_MKDEPS_H /* This is the data structure used by all the functions in mkdeps.c. It's quite straightforward, but should be treated as opaque. */ struct depends; /* Create a deps buffer. */ extern struct depends *deps_init (void); /* Destroy a deps buffer. */ extern void deps_free (struct depends *); /* Add a set of "vpath" directories. The second argument is a colon- separated list of pathnames, like you would set Make's VPATH variable to. If a dependency or target name begins with any of these pathnames (and the next path element is not "..") that pathname is stripped off. */ extern void deps_add_vpath (struct depends *, const char *); /* Add a target (appears on left side of the colon) to the deps list. Takes a boolean indicating whether to quote the target for MAKE. */ extern void deps_add_target (struct depends *, const char *, int); /* Sets the default target if none has been given already. An empty string as the default target is interpreted as stdin. */ extern void deps_add_default_target (struct depends *, const char *); /* Add a dependency (appears on the right side of the colon) to the deps list. Dependencies will be printed in the order that they were entered with this function. By convention, the first dependency entered should be the primary source file. */ extern void deps_add_dep (struct depends *, const char *); /* Write out a deps buffer to a specified file. The third argument is the number of columns to word-wrap at (0 means don't wrap). */ extern void deps_write (const struct depends *, FILE *, unsigned int); /* Write out a deps buffer to a file, in a form that can be read back with deps_restore. Returns nonzero on error, in which case the error number will be in errno. */ extern int deps_save (struct depends *, FILE *); /* Read back dependency information written with deps_save into the deps buffer. The third argument may be NULL, in which case the dependency information is just skipped, or it may be a filename, in which case that filename is skipped. */ extern int deps_restore (struct depends *, FILE *, const char *); /* For each dependency *except the first*, emit a dummy rule for that file, causing it to depend on nothing. This is used to work around the intermediate-file deletion misfeature in Make, in some automatic dependency schemes. */ extern void deps_phony_targets (const struct depends *, FILE *); #endif /* ! LIBCPP_MKDEPS_H */ /* Stack of conditionals currently in progress (including both successful and failing conditionals). */ struct if_stack { struct if_stack *next; unsigned int line; /* Line where condition started. */ const cpp_hashnode *mi_cmacro;/* macro name for #ifndef around entire file */ bool skip_elses; /* Can future #else / #elif be skipped? */ bool was_skipping; /* If were skipping on entry. */ int type; /* Most recent conditional for diagnostics. */ }; /* Contains a registered pragma or pragma namespace. */ typedef void (*pragma_cb) (cpp_reader *); struct pragma_entry { struct pragma_entry *next; const cpp_hashnode *pragma; /* Name and length. */ int is_nspace; union { pragma_cb handler; struct pragma_entry *space; } u; }; /* Values for the origin field of struct directive. KANDR directives come from traditional (K&R) C. STDC89 directives come from the 1989 C standard. EXTENSION directives are extensions. */ #define KANDR 0 #define STDC89 1 #define EXTENSION 2 /* Values for the flags field of struct directive. CONDIT indicates a conditional; IF_COND an opening conditional. INCL means to treat "..." and <...> as q-char and h-char sequences respectively. IN_I means this directive should be handled even if -fpreprocessed is in effect (these are the directives with callback hooks). EXPAND is set on directives that are always macro-expanded. */ #define CONDIT (1 << 0) #define IF_COND (1 << 1) #define INCL (1 << 2) #define IN_I (1 << 3) #define EXPAND (1 << 4) /* Defines one #-directive, including how to handle it. */ typedef void (*directive_handler) (cpp_reader *); typedef struct directive directive; struct directive { directive_handler handler; /* Function to handle directive. */ const uchar *name; /* Name of directive. */ unsigned short length; /* Length of name. */ unsigned char origin; /* Origin of directive. */ unsigned char flags; /* Flags describing this directive. */ }; /* Forward declarations. */ static void skip_rest_of_line (cpp_reader *); static void check_eol (cpp_reader *); static void start_directive (cpp_reader *); static void prepare_directive_trad (cpp_reader *); static void end_directive (cpp_reader *, int); static void directive_diagnostics (cpp_reader *, const directive *, int); static void run_directive (cpp_reader *, int, const char *, size_t); static char *glue_header_name (cpp_reader *); static const char *parse_include (cpp_reader *, int *); static void push_conditional (cpp_reader *, int, int, const cpp_hashnode *); static unsigned int read_flag (cpp_reader *, unsigned int); static int strtoul_for_line (const uchar *, unsigned int, unsigned long *); static void do_diagnostic (cpp_reader *, int, int); static cpp_hashnode *lex_macro_node (cpp_reader *); static int undefine_macros (cpp_reader *, cpp_hashnode *, void *); static void do_include_common (cpp_reader *, enum include_type); static struct pragma_entry *lookup_pragma_entry (struct pragma_entry *, const cpp_hashnode *); static struct pragma_entry *insert_pragma_entry (cpp_reader *, struct pragma_entry **, const cpp_hashnode *, pragma_cb); static int count_registered_pragmas (struct pragma_entry *); static char ** save_registered_pragmas (struct pragma_entry *, char **); static char ** restore_registered_pragmas (cpp_reader *, struct pragma_entry *, char **); static void do_pragma_once (cpp_reader *); static void do_pragma_poison (cpp_reader *); static void do_pragma_system_header (cpp_reader *); static void do_pragma_dependency (cpp_reader *); static void do_linemarker (cpp_reader *); static const cpp_token *get_token_no_padding (cpp_reader *); static const cpp_token *get__Pragma_string (cpp_reader *); static void destringize_and_run (cpp_reader *, const cpp_string *); static int parse_answer (cpp_reader *, struct answer **, int); static cpp_hashnode *parse_assertion (cpp_reader *, struct answer **, int); static struct answer ** find_answer (cpp_hashnode *, const struct answer *); static void handle_assertion (cpp_reader *, const char *, int); /* This is the table of directive handlers. It is ordered by frequency of occurrence; the numbers at the end are directive counts from all the source code I have lying around (egcs and libc CVS as of 1999-05-18, plus grub-0.5.91, linux-2.2.9, and pcmcia-cs-3.0.9). This is no longer important as directive lookup is now O(1). All extensions other than #warning and #include_next are deprecated. The name is where the extension appears to have come from. */ #define DIRECTIVE_TABLE \ D(define, T_DEFINE = 0, KANDR, IN_I) /* 270554 */ \ D(include, T_INCLUDE, KANDR, INCL | EXPAND) /* 52262 */ \ D(endif, T_ENDIF, KANDR, CONDIT) /* 45855 */ \ D(ifdef, T_IFDEF, KANDR, CONDIT | IF_COND) /*22000 */ \ D(if, T_IF, KANDR, CONDIT | IF_COND | EXPAND) /*18162*/ \ D(else, T_ELSE, KANDR, CONDIT) /* 9863 */ \ D(ifndef, T_IFNDEF, KANDR, CONDIT | IF_COND) /* 9675 */ \ D(undef, T_UNDEF, KANDR, IN_I) /* 4837 */ \ D(line, T_LINE, KANDR, EXPAND) /* 2465 */ \ D(elif, T_ELIF, STDC89, CONDIT | EXPAND) /* 610 */ \ D(error, T_ERROR, STDC89, 0) /* 475 */ \ D(pragma, T_PRAGMA, STDC89, IN_I) /* 195 */ \ D(warning, T_WARNING, EXTENSION, 0) /* 22 */ \ D(include_next, T_INCLUDE_NEXT, EXTENSION, INCL | EXPAND) /* 19 */ \ D(ident, T_IDENT, EXTENSION, IN_I) /* 11 */ \ D(import, T_IMPORT, EXTENSION, INCL | EXPAND) /* 0 ObjC */ \ D(assert, T_ASSERT, EXTENSION, 0) /* 0 SVR4 */ \ D(unassert, T_UNASSERT, EXTENSION, 0) /* 0 SVR4 */ \ D(sccs, T_SCCS, EXTENSION, 0) /* 0 SVR4? */ /* Use the table to generate a series of prototypes, an enum for the directive names, and an array of directive handlers. */ #define D(name, t, o, f) static void do_##name (cpp_reader *); DIRECTIVE_TABLE #undef D #define D(n, tag, o, f) tag, enum { DIRECTIVE_TABLE N_DIRECTIVES }; #undef D #define D(name, t, origin, flags) \ { do_##name, (const uchar *) #name, \ sizeof #name - 1, origin, flags }, static const directive dtable[] = { DIRECTIVE_TABLE }; #undef D #undef DIRECTIVE_TABLE /* Wrapper struct directive for linemarkers. The origin is more or less true - the original K+R cpp did use this notation in its preprocessed output. */ static const directive linemarker_dir = { do_linemarker, USTR"#", 1, KANDR, IN_I }; #define SEEN_EOL() (pfile->cur_token[-1].type == CPP_EOF) /* Skip any remaining tokens in a directive. */ static void skip_rest_of_line (cpp_reader *pfile) { /* Discard all stacked contexts. */ while (pfile->context->prev) _cpp_pop_context (pfile); /* Sweep up all tokens remaining on the line. */ if (! SEEN_EOL ()) while (_cpp_lex_token (pfile)->type != CPP_EOF) ; } /* Ensure there are no stray tokens at the end of a directive. */ static void check_eol (cpp_reader *pfile) { if (! SEEN_EOL () && _cpp_lex_token (pfile)->type != CPP_EOF) cpp_error (pfile, CPP_DL_PEDWARN, "extra tokens at end of #%s directive", pfile->directive->name); } /* Called when entering a directive, _Pragma or command-line directive. */ static void start_directive (cpp_reader *pfile) { /* Setup in-directive state. */ pfile->state.in_directive = 1; pfile->state.save_comments = 0; /* Some handlers need the position of the # for diagnostics. */ pfile->directive_line = pfile->line_table->highest_line; } /* Called when leaving a directive, _Pragma or command-line directive. */ static void end_directive (cpp_reader *pfile, int skip_line) { if (CPP_OPTION (pfile, traditional)) { /* Revert change of prepare_directive_trad. */ pfile->state.prevent_expansion--; if (pfile->directive != &dtable[T_DEFINE]) _cpp_remove_overlay (pfile); } /* We don't skip for an assembler #. */ else if (skip_line) { skip_rest_of_line (pfile); if (!pfile->keep_tokens) { pfile->cur_run = &pfile->base_run; pfile->cur_token = pfile->base_run.base; } } /* Restore state. */ pfile->state.save_comments = ! CPP_OPTION (pfile, discard_comments); pfile->state.in_directive = 0; pfile->state.in_expression = 0; pfile->state.angled_headers = 0; pfile->directive = 0; } /* Prepare to handle the directive in pfile->directive. */ static void prepare_directive_trad (cpp_reader *pfile) { if (pfile->directive != &dtable[T_DEFINE]) { bool no_expand = (pfile->directive && ! (pfile->directive->flags & EXPAND)); bool was_skipping = pfile->state.skipping; pfile->state.in_expression = (pfile->directive == &dtable[T_IF] || pfile->directive == &dtable[T_ELIF]); if (pfile->state.in_expression) pfile->state.skipping = false; if (no_expand) pfile->state.prevent_expansion++; _cpp_scan_out_logical_line (pfile, NULL); if (no_expand) pfile->state.prevent_expansion--; pfile->state.skipping = was_skipping; _cpp_overlay_buffer (pfile, pfile->out.base, pfile->out.cur - pfile->out.base); } /* Stop ISO C from expanding anything. */ pfile->state.prevent_expansion++; } /* Output diagnostics for a directive DIR. INDENTED is nonzero if the '#' was indented. */ static void directive_diagnostics (cpp_reader *pfile, const directive *dir, int indented) { /* Issue -pedantic warnings for extensions. */ if (CPP_PEDANTIC (pfile) && ! pfile->state.skipping && dir->origin == EXTENSION) cpp_error (pfile, CPP_DL_PEDWARN, "#%s is a GCC extension", dir->name); /* Traditionally, a directive is ignored unless its # is in column 1. Therefore in code intended to work with K+R compilers, directives added by C89 must have their # indented, and directives present in traditional C must not. This is true even of directives in skipped conditional blocks. #elif cannot be used at all. */ if (CPP_WTRADITIONAL (pfile)) { if (dir == &dtable[T_ELIF]) cpp_error (pfile, CPP_DL_WARNING, "suggest not using #elif in traditional C"); else if (indented && dir->origin == KANDR) cpp_error (pfile, CPP_DL_WARNING, "traditional C ignores #%s with the # indented", dir->name); else if (!indented && dir->origin != KANDR) cpp_error (pfile, CPP_DL_WARNING, "suggest hiding #%s from traditional C with an indented #", dir->name); } } /* Check if we have a known directive. INDENTED is nonzero if the '#' of the directive was indented. This function is in this file to save unnecessarily exporting dtable etc. to cpplex.c. Returns nonzero if the line of tokens has been handled, zero if we should continue processing the line. */ int _cpp_handle_directive (cpp_reader *pfile, int indented) { const directive *dir = 0; const cpp_token *dname; bool was_parsing_args = pfile->state.parsing_args; bool was_discarding_output = pfile->state.discarding_output; int skip = 1; if (was_discarding_output) pfile->state.prevent_expansion = 0; if (was_parsing_args) { if (CPP_OPTION (pfile, pedantic)) cpp_error (pfile, CPP_DL_PEDWARN, "embedding a directive within macro arguments is not portable"); pfile->state.parsing_args = 0; pfile->state.prevent_expansion = 0; } start_directive (pfile); dname = _cpp_lex_token (pfile); if (dname->type == CPP_NAME) { if (dname->val.node->is_directive) dir = &dtable[dname->val.node->directive_index]; } /* We do not recognize the # followed by a number extension in assembler code. */ else if (dname->type == CPP_NUMBER && CPP_OPTION (pfile, lang) != CLK_ASM) { dir = &linemarker_dir; if (CPP_PEDANTIC (pfile) && ! CPP_OPTION (pfile, preprocessed) && ! pfile->state.skipping) cpp_error (pfile, CPP_DL_PEDWARN, "style of line directive is a GCC extension"); } if (dir) { /* If we have a directive that is not an opening conditional, invalidate any control macro. */ if (! (dir->flags & IF_COND)) pfile->mi_valid = false; /* Kluge alert. In order to be sure that code like this #define HASH # HASH define foo bar does not cause '#define foo bar' to get executed when compiled with -save-temps, we recognize directives in -fpreprocessed mode only if the # is in column 1. cppmacro.c puts a space in front of any '#' at the start of a macro. */ if (CPP_OPTION (pfile, preprocessed) && (indented || !(dir->flags & IN_I))) { skip = 0; dir = 0; } else { /* In failed conditional groups, all non-conditional directives are ignored. Before doing that, whether skipping or not, we should lex angle-bracketed headers correctly, and maybe output some diagnostics. */ pfile->state.angled_headers = dir->flags & INCL; pfile->state.directive_wants_padding = dir->flags & INCL; if (! CPP_OPTION (pfile, preprocessed)) directive_diagnostics (pfile, dir, indented); if (pfile->state.skipping && !(dir->flags & CONDIT)) dir = 0; } } else if (dname->type == CPP_EOF) ; /* CPP_EOF is the "null directive". */ else { /* An unknown directive. Don't complain about it in assembly source: we don't know where the comments are, and # may introduce assembler pseudo-ops. Don't complain about invalid directives in skipped conditional groups (6.10 p4). */ if (CPP_OPTION (pfile, lang) == CLK_ASM) skip = 0; else if (!pfile->state.skipping) cpp_error (pfile, CPP_DL_ERROR, "invalid preprocessing directive #%s", cpp_token_as_text (pfile, dname)); } pfile->directive = dir; if (CPP_OPTION (pfile, traditional)) prepare_directive_trad (pfile); if (dir) pfile->directive->handler (pfile); else if (skip == 0) _cpp_backup_tokens (pfile, 1); end_directive (pfile, skip); if (was_parsing_args) { /* Restore state when within macro args. */ pfile->state.parsing_args = 2; pfile->state.prevent_expansion = 1; } if (was_discarding_output) pfile->state.prevent_expansion = 1; return skip; } /* Directive handler wrapper used by the command line option processor. BUF is \n terminated. */ static void run_directive (cpp_reader *pfile, int dir_no, const char *buf, size_t count) { cpp_push_buffer (pfile, (const uchar *) buf, count, /* from_stage3 */ true); /* Disgusting hack. */ if (dir_no == T_PRAGMA) pfile->buffer->file = pfile->buffer->prev->file; start_directive (pfile); /* This is a short-term fix to prevent a leading '#' being interpreted as a directive. */ _cpp_clean_line (pfile); pfile->directive = &dtable[dir_no]; if (CPP_OPTION (pfile, traditional)) prepare_directive_trad (pfile); pfile->directive->handler (pfile); end_directive (pfile, 1); if (dir_no == T_PRAGMA) pfile->buffer->file = NULL; _cpp_pop_buffer (pfile); } /* Checks for validity the macro name in #define, #undef, #ifdef and #ifndef directives. */ static cpp_hashnode * lex_macro_node (cpp_reader *pfile) { const cpp_token *token = _cpp_lex_token (pfile); /* The token immediately after #define must be an identifier. That identifier may not be "defined", per C99 6.10.8p4. In C++, it may not be any of the "named operators" either, per C++98 [lex.digraph], [lex.key]. Finally, the identifier may not have been poisoned. (In that case the lexer has issued the error message for us.) */ if (token->type == CPP_NAME) { cpp_hashnode *node = token->val.node; if (node == pfile->spec_nodes.n_defined) cpp_error (pfile, CPP_DL_ERROR, "\"defined\" cannot be used as a macro name"); else if (! (node->flags & NODE_POISONED)) return node; } else if (token->flags & NAMED_OP) cpp_error (pfile, CPP_DL_ERROR, "\"%s\" cannot be used as a macro name as it is an operator in C++", NODE_NAME (token->val.node)); else if (token->type == CPP_EOF) cpp_error (pfile, CPP_DL_ERROR, "no macro name given in #%s directive", pfile->directive->name); else cpp_error (pfile, CPP_DL_ERROR, "macro names must be identifiers"); return NULL; } /* Process a #define directive. Most work is done in cppmacro.c. */ static void do_define (cpp_reader *pfile) { cpp_hashnode *node = lex_macro_node (pfile); if (node) { /* If we have been requested to expand comments into macros, then re-enable saving of comments. */ pfile->state.save_comments = ! CPP_OPTION (pfile, discard_comments_in_macro_exp); if (_cpp_create_definition (pfile, node)) if (pfile->cb.define) pfile->cb.define (pfile, pfile->directive_line, node); } } /* Handle #undef. Mark the identifier NT_VOID in the hash table. */ static void do_undef (cpp_reader *pfile) { cpp_hashnode *node = lex_macro_node (pfile); if (node) { if (pfile->cb.undef) pfile->cb.undef (pfile, pfile->directive_line, node); /* 6.10.3.5 paragraph 2: [#undef] is ignored if the specified identifier is not currently defined as a macro name. */ if (node->type == NT_MACRO) { if (node->flags & NODE_WARN) cpp_error (pfile, CPP_DL_WARNING, "undefining \"%s\"", NODE_NAME (node)); if (CPP_OPTION (pfile, warn_unused_macros)) _cpp_warn_if_unused_macro (pfile, node, NULL); _cpp_free_definition (node); } } check_eol (pfile); } /* Undefine a single macro/assertion/whatever. */ static int undefine_macros (cpp_reader *pfile ATTRIBUTE_UNUSED, cpp_hashnode *h, void *data_p ATTRIBUTE_UNUSED) { /* Body of _cpp_free_definition inlined here for speed. Macros and assertions no longer have anything to free. */ h->type = NT_VOID; h->flags &= ~(NODE_POISONED|NODE_BUILTIN|NODE_DISABLED); return 1; } /* Undefine all macros and assertions. */ void cpp_undef_all (cpp_reader *pfile) { cpp_forall_identifiers (pfile, undefine_macros, NULL); } /* Helper routine used by parse_include. Reinterpret the current line as an h-char-sequence (< ... >); we are looking at the first token after the <. Returns a malloced filename. */ static char * glue_header_name (cpp_reader *pfile) { const cpp_token *token; char *buffer; size_t len, total_len = 0, capacity = 1024; /* To avoid lexed tokens overwriting our glued name, we can only allocate from the string pool once we've lexed everything. */ buffer = xmalloc (capacity); for (;;) { token = get_token_no_padding (pfile); if (token->type == CPP_GREATER) break; if (token->type == CPP_EOF) { cpp_error (pfile, CPP_DL_ERROR, "missing terminating > character"); break; } len = cpp_token_len (token) + 2; /* Leading space, terminating \0. */ if (total_len + len > capacity) { capacity = (capacity + len) * 2; buffer = xrealloc (buffer, capacity); } if (token->flags & PREV_WHITE) buffer[total_len++] = ' '; total_len = (cpp_spell_token (pfile, token, (uchar *) &buffer[total_len]) - (uchar *) buffer); } buffer[total_len] = '\0'; return buffer; } /* Returns the file name of #include, #include_next, #import and #pragma dependency. The string is malloced and the caller should free it. Returns NULL on error. */ static const char * parse_include (cpp_reader *pfile, int *pangle_brackets) { char *fname; const cpp_token *header; /* Allow macro expansion. */ header = get_token_no_padding (pfile); if (header->type == CPP_STRING || header->type == CPP_HEADER_NAME) { fname = xmalloc (header->val.str.len - 1); memcpy (fname, header->val.str.text + 1, header->val.str.len - 2); fname[header->val.str.len - 2] = '\0'; *pangle_brackets = header->type == CPP_HEADER_NAME; } else if (header->type == CPP_LESS) { fname = glue_header_name (pfile); *pangle_brackets = 1; } else { const unsigned char *dir; if (pfile->directive == &dtable[T_PRAGMA]) dir = USTR"pragma dependency"; else dir = pfile->directive->name; cpp_error (pfile, CPP_DL_ERROR, "#%s expects \"FILENAME\" or ", dir); return NULL; } check_eol (pfile); return fname; } /* Handle #include, #include_next and #import. */ static void do_include_common (cpp_reader *pfile, enum include_type type) { const char *fname; int angle_brackets; fname = parse_include (pfile, &angle_brackets); if (!fname) return; /* Prevent #include recursion. */ if (pfile->line_table->depth >= CPP_STACK_MAX) cpp_error (pfile, CPP_DL_ERROR, "#include nested too deeply"); else { /* Get out of macro context, if we are. */ skip_rest_of_line (pfile); if (pfile->cb.include) pfile->cb.include (pfile, pfile->directive_line, pfile->directive->name, fname, angle_brackets); _cpp_stack_include (pfile, fname, angle_brackets, type); } free ((void *) fname); } static void do_include (cpp_reader *pfile) { do_include_common (pfile, IT_INCLUDE); } static void do_import (cpp_reader *pfile) { do_include_common (pfile, IT_IMPORT); } static void do_include_next (cpp_reader *pfile) { enum include_type type = IT_INCLUDE_NEXT; /* If this is the primary source file, warn and use the normal search logic. */ if (! pfile->buffer->prev) { cpp_error (pfile, CPP_DL_WARNING, "#include_next in primary source file"); type = IT_INCLUDE; } do_include_common (pfile, type); } /* Subroutine of do_linemarker. Read possible flags after file name. LAST is the last flag seen; 0 if this is the first flag. Return the flag if it is valid, 0 at the end of the directive. Otherwise complain. */ static unsigned int read_flag (cpp_reader *pfile, unsigned int last) { const cpp_token *token = _cpp_lex_token (pfile); if (token->type == CPP_NUMBER && token->val.str.len == 1) { unsigned int flag = token->val.str.text[0] - '0'; if (flag > last && flag <= 4 && (flag != 4 || last == 3) && (flag != 2 || last == 0)) return flag; } if (token->type != CPP_EOF) cpp_error (pfile, CPP_DL_ERROR, "invalid flag \"%s\" in line directive", cpp_token_as_text (pfile, token)); return 0; } /* Subroutine of do_line and do_linemarker. Convert a number in STR, of length LEN, to binary; store it in NUMP, and return 0 if the number was well-formed, 1 if not. Temporary, hopefully. */ static int strtoul_for_line (const uchar *str, unsigned int len, long unsigned int *nump) { unsigned long reg = 0; uchar c; while (len--) { c = *str++; if (!ISDIGIT (c)) return 1; reg *= 10; reg += c - '0'; } *nump = reg; return 0; } /* Interpret #line command. Note that the filename string (if any) is a true string constant (escapes are interpreted), unlike in #line. */ static void do_line (cpp_reader *pfile) { const struct line_maps *line_table = pfile->line_table; const struct line_map *map = &line_table->maps[line_table->used - 1]; const cpp_token *token; const char *new_file = map->to_file; unsigned long new_lineno; /* C99 raised the minimum limit on #line numbers. */ unsigned int cap = CPP_OPTION (pfile, c99) ? 2147483647 : 32767; /* #line commands expand macros. */ token = cpp_get_token (pfile); if (token->type != CPP_NUMBER || strtoul_for_line (token->val.str.text, token->val.str.len, &new_lineno)) { cpp_error (pfile, CPP_DL_ERROR, "\"%s\" after #line is not a positive integer", cpp_token_as_text (pfile, token)); return; } if (CPP_PEDANTIC (pfile) && (new_lineno == 0 || new_lineno > cap)) cpp_error (pfile, CPP_DL_PEDWARN, "line number out of range"); token = cpp_get_token (pfile); if (token->type == CPP_STRING) { cpp_string s = { 0, 0 }; if (cpp_interpret_string_notranslate (pfile, &token->val.str, 1, &s, false)) new_file = (const char *)s.text; check_eol (pfile); } else if (token->type != CPP_EOF) { cpp_error (pfile, CPP_DL_ERROR, "\"%s\" is not a valid filename", cpp_token_as_text (pfile, token)); return; } skip_rest_of_line (pfile); _cpp_do_file_change (pfile, LC_RENAME, new_file, new_lineno, map->sysp); } /* Interpret the # 44 "file" [flags] notation, which has slightly different syntax and semantics from #line: Flags are allowed, and we never complain about the line number being too big. */ static void do_linemarker (cpp_reader *pfile) { const struct line_maps *line_table = pfile->line_table; const struct line_map *map = &line_table->maps[line_table->used - 1]; const cpp_token *token; const char *new_file = map->to_file; unsigned long new_lineno; unsigned int new_sysp = map->sysp; enum lc_reason reason = LC_RENAME; int flag; /* Back up so we can get the number again. Putting this in _cpp_handle_directive risks two calls to _cpp_backup_tokens in some circumstances, which can segfault. */ _cpp_backup_tokens (pfile, 1); /* #line commands expand macros. */ token = cpp_get_token (pfile); if (token->type != CPP_NUMBER || strtoul_for_line (token->val.str.text, token->val.str.len, &new_lineno)) { cpp_error (pfile, CPP_DL_ERROR, "\"%s\" after # is not a positive integer", cpp_token_as_text (pfile, token)); return; } token = cpp_get_token (pfile); if (token->type == CPP_STRING) { cpp_string s = { 0, 0 }; if (cpp_interpret_string_notranslate (pfile, &token->val.str, 1, &s, false)) new_file = (const char *)s.text; new_sysp = 0; flag = read_flag (pfile, 0); if (flag == 1) { reason = LC_ENTER; /* Fake an include for cpp_included (). */ _cpp_fake_include (pfile, new_file); flag = read_flag (pfile, flag); } else if (flag == 2) { reason = LC_LEAVE; flag = read_flag (pfile, flag); } if (flag == 3) { new_sysp = 1; flag = read_flag (pfile, flag); if (flag == 4) new_sysp = 2; pfile->buffer->sysp = new_sysp; } check_eol (pfile); } else if (token->type != CPP_EOF) { cpp_error (pfile, CPP_DL_ERROR, "\"%s\" is not a valid filename", cpp_token_as_text (pfile, token)); return; } skip_rest_of_line (pfile); _cpp_do_file_change (pfile, reason, new_file, new_lineno, new_sysp); } /* Arrange the file_change callback. pfile->line has changed to FILE_LINE of TO_FILE, for reason REASON. SYSP is 1 for a system header, 2 for a system header that needs to be extern "C" protected, and zero otherwise. */ void _cpp_do_file_change (cpp_reader *pfile, enum lc_reason reason, const char *to_file, unsigned int file_line, unsigned int sysp) { const struct line_map *map = linemap_add (pfile->line_table, reason, sysp, to_file, file_line); if (map != NULL) linemap_line_start (pfile->line_table, map->to_line, 127); if (pfile->cb.file_change) pfile->cb.file_change (pfile, map); } /* Report a warning or error detected by the program we are processing. Use the directive's tokens in the error message. */ static void do_diagnostic (cpp_reader *pfile, int code, int print_dir) { if (_cpp_begin_message (pfile, code, pfile->cur_token[-1].src_loc, 0)) { if (print_dir) fprintf (stderr, "#%s ", pfile->directive->name); pfile->state.prevent_expansion++; cpp_output_line (pfile, stderr); pfile->state.prevent_expansion--; } } static void do_error (cpp_reader *pfile) { do_diagnostic (pfile, CPP_DL_ERROR, 1); } static void do_warning (cpp_reader *pfile) { /* We want #warning diagnostics to be emitted in system headers too. */ do_diagnostic (pfile, CPP_DL_WARNING_SYSHDR, 1); } /* Report program identification. */ static void do_ident (cpp_reader *pfile) { const cpp_token *str = cpp_get_token (pfile); if (str->type != CPP_STRING) cpp_error (pfile, CPP_DL_ERROR, "invalid #ident directive"); else if (pfile->cb.ident) pfile->cb.ident (pfile, pfile->directive_line, &str->val.str); check_eol (pfile); } /* Lookup a PRAGMA name in a singly-linked CHAIN. Returns the matching entry, or NULL if none is found. The returned entry could be the start of a namespace chain, or a pragma. */ static struct pragma_entry * lookup_pragma_entry (struct pragma_entry *chain, const cpp_hashnode *pragma) { while (chain && chain->pragma != pragma) chain = chain->next; return chain; } /* Create and insert a pragma entry for NAME at the beginning of a singly-linked CHAIN. If handler is NULL, it is a namespace, otherwise it is a pragma and its handler. */ static struct pragma_entry * insert_pragma_entry (cpp_reader *pfile, struct pragma_entry **chain, const cpp_hashnode *pragma, pragma_cb handler) { struct pragma_entry *new; new = (struct pragma_entry *) _cpp_aligned_alloc (pfile, sizeof (struct pragma_entry)); new->pragma = pragma; if (handler) { new->is_nspace = 0; new->u.handler = handler; } else { new->is_nspace = 1; new->u.space = NULL; } new->next = *chain; *chain = new; return new; } /* Register a pragma NAME in namespace SPACE. If SPACE is null, it goes in the global namespace. HANDLER is the handler it will call, which must be non-NULL. */ void cpp_register_pragma (cpp_reader *pfile, const char *space, const char *name, pragma_cb handler) { struct pragma_entry **chain = &pfile->pragmas; struct pragma_entry *entry; const cpp_hashnode *node; if (!handler) abort (); if (space) { node = cpp_lookup (pfile, USTR space, strlen (space)); entry = lookup_pragma_entry (*chain, node); if (!entry) entry = insert_pragma_entry (pfile, chain, node, NULL); else if (!entry->is_nspace) goto clash; chain = &entry->u.space; } /* Check for duplicates. */ node = cpp_lookup (pfile, USTR name, strlen (name)); entry = lookup_pragma_entry (*chain, node); if (entry) { if (entry->is_nspace) clash: cpp_error (pfile, CPP_DL_ICE, "registering \"%s\" as both a pragma and a pragma namespace", NODE_NAME (node)); else if (space) cpp_error (pfile, CPP_DL_ICE, "#pragma %s %s is already registered", space, name); else cpp_error (pfile, CPP_DL_ICE, "#pragma %s is already registered", name); } else insert_pragma_entry (pfile, chain, node, handler); } /* Register the pragmas the preprocessor itself handles. */ void _cpp_init_internal_pragmas (cpp_reader *pfile) { /* Pragmas in the global namespace. */ cpp_register_pragma (pfile, 0, "once", do_pragma_once); /* New GCC-specific pragmas should be put in the GCC namespace. */ cpp_register_pragma (pfile, "GCC", "poison", do_pragma_poison); cpp_register_pragma (pfile, "GCC", "system_header", do_pragma_system_header); cpp_register_pragma (pfile, "GCC", "dependency", do_pragma_dependency); } /* Return the number of registered pragmas in PE. */ static int count_registered_pragmas (struct pragma_entry *pe) { int ct = 0; for (; pe != NULL; pe = pe->next) { if (pe->is_nspace) ct += count_registered_pragmas (pe->u.space); ct++; } return ct; } /* Save into SD the names of the registered pragmas referenced by PE, and return a pointer to the next free space in SD. */ static char ** save_registered_pragmas (struct pragma_entry *pe, char **sd) { for (; pe != NULL; pe = pe->next) { if (pe->is_nspace) sd = save_registered_pragmas (pe->u.space, sd); *sd++ = xmemdup (HT_STR (&pe->pragma->ident), HT_LEN (&pe->pragma->ident), HT_LEN (&pe->pragma->ident) + 1); } return sd; } /* Return a newly-allocated array which saves the names of the registered pragmas. */ char ** _cpp_save_pragma_names (cpp_reader *pfile) { int ct = count_registered_pragmas (pfile->pragmas); char **result = xnewvec (char *, ct); (void) save_registered_pragmas (pfile->pragmas, result); return result; } /* Restore from SD the names of the registered pragmas referenced by PE, and return a pointer to the next unused name in SD. */ static char ** restore_registered_pragmas (cpp_reader *pfile, struct pragma_entry *pe, char **sd) { for (; pe != NULL; pe = pe->next) { if (pe->is_nspace) sd = restore_registered_pragmas (pfile, pe->u.space, sd); pe->pragma = cpp_lookup (pfile, USTR *sd, strlen (*sd)); free (*sd); sd++; } return sd; } /* Restore the names of the registered pragmas from SAVED. */ void _cpp_restore_pragma_names (cpp_reader *pfile, char **saved) { (void) restore_registered_pragmas (pfile, pfile->pragmas, saved); free (saved); } /* Pragmata handling. We handle some, and pass the rest on to the front end. C99 defines three pragmas and says that no macro expansion is to be performed on them; whether or not macro expansion happens for other pragmas is implementation defined. This implementation never macro-expands the text after #pragma. */ static void do_pragma (cpp_reader *pfile) { const struct pragma_entry *p = NULL; const cpp_token *token, *pragma_token = pfile->cur_token; unsigned int count = 1; pfile->state.prevent_expansion++; token = cpp_get_token (pfile); if (token->type == CPP_NAME) { p = lookup_pragma_entry (pfile->pragmas, token->val.node); if (p && p->is_nspace) { count = 2; token = cpp_get_token (pfile); if (token->type == CPP_NAME) p = lookup_pragma_entry (p->u.space, token->val.node); else p = NULL; } } if (p) { /* Since the handler below doesn't get the line number, that it might need for diagnostics, make sure it has the right numbers in place. */ if (pfile->cb.line_change) (*pfile->cb.line_change) (pfile, pragma_token, false); (*p->u.handler) (pfile); } else if (pfile->cb.def_pragma) { _cpp_backup_tokens (pfile, count); pfile->cb.def_pragma (pfile, pfile->directive_line); } pfile->state.prevent_expansion--; } /* Handle #pragma once. */ static void do_pragma_once (cpp_reader *pfile) { if (pfile->buffer->prev == NULL) cpp_error (pfile, CPP_DL_WARNING, "#pragma once in main file"); check_eol (pfile); _cpp_mark_file_once_only (pfile, pfile->buffer->file); } /* Handle #pragma GCC poison, to poison one or more identifiers so that the lexer produces a hard error for each subsequent usage. */ static void do_pragma_poison (cpp_reader *pfile) { const cpp_token *tok; cpp_hashnode *hp; pfile->state.poisoned_ok = 1; for (;;) { tok = _cpp_lex_token (pfile); if (tok->type == CPP_EOF) break; if (tok->type != CPP_NAME) { cpp_error (pfile, CPP_DL_ERROR, "invalid #pragma GCC poison directive"); break; } hp = tok->val.node; if (hp->flags & NODE_POISONED) continue; if (hp->type == NT_MACRO) cpp_error (pfile, CPP_DL_WARNING, "poisoning existing macro \"%s\"", NODE_NAME (hp)); _cpp_free_definition (hp); hp->flags |= NODE_POISONED | NODE_DIAGNOSTIC; } pfile->state.poisoned_ok = 0; } /* Mark the current header as a system header. This will suppress some categories of warnings (notably those from -pedantic). It is intended for use in system libraries that cannot be implemented in conforming C, but cannot be certain that their headers appear in a system include directory. To prevent abuse, it is rejected in the primary source file. */ static void do_pragma_system_header (cpp_reader *pfile) { cpp_buffer *buffer = pfile->buffer; if (buffer->prev == 0) cpp_error (pfile, CPP_DL_WARNING, "#pragma system_header ignored outside include file"); else { check_eol (pfile); skip_rest_of_line (pfile); cpp_make_system_header (pfile, 1, 0); } } /* Check the modified date of the current include file against a specified file. Issue a diagnostic, if the specified file is newer. We use this to determine if a fixed header should be refixed. */ static void do_pragma_dependency (cpp_reader *pfile) { const char *fname; int angle_brackets, ordering; fname = parse_include (pfile, &angle_brackets); if (!fname) return; ordering = _cpp_compare_file_date (pfile, fname, angle_brackets); if (ordering < 0) cpp_error (pfile, CPP_DL_WARNING, "cannot find source file %s", fname); else if (ordering > 0) { cpp_error (pfile, CPP_DL_WARNING, "current file is older than %s", fname); if (cpp_get_token (pfile)->type != CPP_EOF) { _cpp_backup_tokens (pfile, 1); do_diagnostic (pfile, CPP_DL_WARNING, 0); } } free ((void *) fname); } /* Get a token but skip padding. */ static const cpp_token * get_token_no_padding (cpp_reader *pfile) { for (;;) { const cpp_token *result = cpp_get_token (pfile); if (result->type != CPP_PADDING) return result; } } /* Check syntax is "(string-literal)". Returns the string on success, or NULL on failure. */ static const cpp_token * get__Pragma_string (cpp_reader *pfile) { const cpp_token *string; if (get_token_no_padding (pfile)->type != CPP_OPEN_PAREN) return NULL; string = get_token_no_padding (pfile); if (string->type != CPP_STRING && string->type != CPP_WSTRING) return NULL; if (get_token_no_padding (pfile)->type != CPP_CLOSE_PAREN) return NULL; return string; } /* Destringize IN into a temporary buffer, by removing the first \ of \" and \\ sequences, and process the result as a #pragma directive. */ static void destringize_and_run (cpp_reader *pfile, const cpp_string *in) { const unsigned char *src, *limit; char *dest, *result; dest = result = alloca (in->len - 1); src = in->text + 1 + (in->text[0] == 'L'); limit = in->text + in->len - 1; while (src < limit) { /* We know there is a character following the backslash. */ if (*src == '\\' && (src[1] == '\\' || src[1] == '"')) src++; *dest++ = *src++; } *dest = '\n'; /* Ugh; an awful kludge. We are really not set up to be lexing tokens when in the middle of a macro expansion. Use a new context to force cpp_get_token to lex, and so skip_rest_of_line doesn't go beyond the end of the text. Also, remember the current lexing position so we can return to it later. Something like line-at-a-time lexing should remove the need for this. */ { cpp_context *saved_context = pfile->context; cpp_token *saved_cur_token = pfile->cur_token; tokenrun *saved_cur_run = pfile->cur_run; pfile->context = xnew (cpp_context); pfile->context->macro = 0; pfile->context->prev = 0; run_directive (pfile, T_PRAGMA, result, dest - result); free (pfile->context); pfile->context = saved_context; pfile->cur_token = saved_cur_token; pfile->cur_run = saved_cur_run; } /* See above comment. For the moment, we'd like token1 _Pragma ("foo") token2 to be output as token1 # 7 "file.c" #pragma foo # 7 "file.c" token2 Getting the line markers is a little tricky. */ if (pfile->cb.line_change) pfile->cb.line_change (pfile, pfile->cur_token, false); } /* Handle the _Pragma operator. */ void _cpp_do__Pragma (cpp_reader *pfile) { const cpp_token *string = get__Pragma_string (pfile); if (string) destringize_and_run (pfile, &string->val.str); else cpp_error (pfile, CPP_DL_ERROR, "_Pragma takes a parenthesized string literal"); } /* Ignore #sccs on all systems. */ static void do_sccs (cpp_reader *pfile ATTRIBUTE_UNUSED) { } /* Handle #ifdef. */ static void do_ifdef (cpp_reader *pfile) { int skip = 1; if (! pfile->state.skipping) { const cpp_hashnode *node = lex_macro_node (pfile); if (node) { skip = node->type != NT_MACRO; _cpp_mark_macro_used (node); check_eol (pfile); } } push_conditional (pfile, skip, T_IFDEF, 0); } /* Handle #ifndef. */ static void do_ifndef (cpp_reader *pfile) { int skip = 1; const cpp_hashnode *node = 0; if (! pfile->state.skipping) { node = lex_macro_node (pfile); if (node) { skip = node->type == NT_MACRO; _cpp_mark_macro_used (node); check_eol (pfile); } } push_conditional (pfile, skip, T_IFNDEF, node); } /* _cpp_parse_expr puts a macro in a "#if !defined ()" expression in pfile->mi_ind_cmacro so we can handle multiple-include optimizations. If macro expansion occurs in the expression, we cannot treat it as a controlling conditional, since the expansion could change in the future. That is handled by cpp_get_token. */ static void do_if (cpp_reader *pfile) { int skip = 1; if (! pfile->state.skipping) skip = _cpp_parse_expr (pfile) == false; push_conditional (pfile, skip, T_IF, pfile->mi_ind_cmacro); } /* Flip skipping state if appropriate and continue without changing if_stack; this is so that the error message for missing #endif's etc. will point to the original #if. */ static void do_else (cpp_reader *pfile) { cpp_buffer *buffer = pfile->buffer; struct if_stack *ifs = buffer->if_stack; if (ifs == NULL) cpp_error (pfile, CPP_DL_ERROR, "#else without #if"); else { if (ifs->type == T_ELSE) { cpp_error (pfile, CPP_DL_ERROR, "#else after #else"); cpp_error_with_line (pfile, CPP_DL_ERROR, ifs->line, 0, "the conditional began here"); } ifs->type = T_ELSE; /* Skip any future (erroneous) #elses or #elifs. */ pfile->state.skipping = ifs->skip_elses; ifs->skip_elses = true; /* Invalidate any controlling macro. */ ifs->mi_cmacro = 0; /* Only check EOL if was not originally skipping. */ if (!ifs->was_skipping && CPP_OPTION (pfile, warn_endif_labels)) check_eol (pfile); } } /* Handle a #elif directive by not changing if_stack either. See the comment above do_else. */ static void do_elif (cpp_reader *pfile) { cpp_buffer *buffer = pfile->buffer; struct if_stack *ifs = buffer->if_stack; if (ifs == NULL) cpp_error (pfile, CPP_DL_ERROR, "#elif without #if"); else { if (ifs->type == T_ELSE) { cpp_error (pfile, CPP_DL_ERROR, "#elif after #else"); cpp_error_with_line (pfile, CPP_DL_ERROR, ifs->line, 0, "the conditional began here"); } ifs->type = T_ELIF; /* Only evaluate this if we aren't skipping elses. During evaluation, set skipping to false to get lexer warnings. */ if (ifs->skip_elses) pfile->state.skipping = 1; else { pfile->state.skipping = 0; pfile->state.skipping = ! _cpp_parse_expr (pfile); ifs->skip_elses = ! pfile->state.skipping; } /* Invalidate any controlling macro. */ ifs->mi_cmacro = 0; } } /* #endif pops the if stack and resets pfile->state.skipping. */ static void do_endif (cpp_reader *pfile) { cpp_buffer *buffer = pfile->buffer; struct if_stack *ifs = buffer->if_stack; if (ifs == NULL) cpp_error (pfile, CPP_DL_ERROR, "#endif without #if"); else { /* Only check EOL if was not originally skipping. */ if (!ifs->was_skipping && CPP_OPTION (pfile, warn_endif_labels)) check_eol (pfile); /* If potential control macro, we go back outside again. */ if (ifs->next == 0 && ifs->mi_cmacro) { pfile->mi_valid = true; pfile->mi_cmacro = ifs->mi_cmacro; } buffer->if_stack = ifs->next; pfile->state.skipping = ifs->was_skipping; obstack_free (&pfile->buffer_ob, ifs); } } /* Push an if_stack entry for a preprocessor conditional, and set pfile->state.skipping to SKIP. If TYPE indicates the conditional is #if or #ifndef, CMACRO is a potentially controlling macro, and we need to check here that we are at the top of the file. */ static void push_conditional (cpp_reader *pfile, int skip, int type, const cpp_hashnode *cmacro) { struct if_stack *ifs; cpp_buffer *buffer = pfile->buffer; ifs = xobnew (&pfile->buffer_ob, struct if_stack); ifs->line = pfile->directive_line; ifs->next = buffer->if_stack; ifs->skip_elses = pfile->state.skipping || !skip; ifs->was_skipping = pfile->state.skipping; ifs->type = type; /* This condition is effectively a test for top-of-file. */ if (pfile->mi_valid && pfile->mi_cmacro == 0) ifs->mi_cmacro = cmacro; else ifs->mi_cmacro = 0; pfile->state.skipping = skip; buffer->if_stack = ifs; } /* Read the tokens of the answer into the macro pool, in a directive of type TYPE. Only commit the memory if we intend it as permanent storage, i.e. the #assert case. Returns 0 on success, and sets ANSWERP to point to the answer. */ static int parse_answer (cpp_reader *pfile, struct answer **answerp, int type) { const cpp_token *paren; struct answer *answer; unsigned int acount; /* In a conditional, it is legal to not have an open paren. We should save the following token in this case. */ paren = cpp_get_token (pfile); /* If not a paren, see if we're OK. */ if (paren->type != CPP_OPEN_PAREN) { /* In a conditional no answer is a test for any answer. It could be followed by any token. */ if (type == T_IF) { _cpp_backup_tokens (pfile, 1); return 0; } /* #unassert with no answer is valid - it removes all answers. */ if (type == T_UNASSERT && paren->type == CPP_EOF) return 0; cpp_error (pfile, CPP_DL_ERROR, "missing '(' after predicate"); return 1; } for (acount = 0;; acount++) { size_t room_needed; const cpp_token *token = cpp_get_token (pfile); cpp_token *dest; if (token->type == CPP_CLOSE_PAREN) break; if (token->type == CPP_EOF) { cpp_error (pfile, CPP_DL_ERROR, "missing ')' to complete answer"); return 1; } /* struct answer includes the space for one token. */ room_needed = (sizeof (struct answer) + acount * sizeof (cpp_token)); if (BUFF_ROOM (pfile->a_buff) < room_needed) _cpp_extend_buff (pfile, &pfile->a_buff, sizeof (struct answer)); dest = &((struct answer *) BUFF_FRONT (pfile->a_buff))->first[acount]; *dest = *token; /* Drop whitespace at start, for answer equivalence purposes. */ if (acount == 0) dest->flags &= ~PREV_WHITE; } if (acount == 0) { cpp_error (pfile, CPP_DL_ERROR, "predicate's answer is empty"); return 1; } answer = (struct answer *) BUFF_FRONT (pfile->a_buff); answer->count = acount; answer->next = NULL; *answerp = answer; return 0; } /* Parses an assertion directive of type TYPE, returning a pointer to the hash node of the predicate, or 0 on error. If an answer was supplied, it is placed in ANSWERP, otherwise it is set to 0. */ static cpp_hashnode * parse_assertion (cpp_reader *pfile, struct answer **answerp, int type) { cpp_hashnode *result = 0; const cpp_token *predicate; /* We don't expand predicates or answers. */ pfile->state.prevent_expansion++; *answerp = 0; predicate = cpp_get_token (pfile); if (predicate->type == CPP_EOF) cpp_error (pfile, CPP_DL_ERROR, "assertion without predicate"); else if (predicate->type != CPP_NAME) cpp_error (pfile, CPP_DL_ERROR, "predicate must be an identifier"); else if (parse_answer (pfile, answerp, type) == 0) { unsigned int len = NODE_LEN (predicate->val.node); unsigned char *sym = alloca (len + 1); /* Prefix '#' to get it out of macro namespace. */ sym[0] = '#'; memcpy (sym + 1, NODE_NAME (predicate->val.node), len); result = cpp_lookup (pfile, sym, len + 1); } pfile->state.prevent_expansion--; return result; } /* Returns a pointer to the pointer to CANDIDATE in the answer chain, or a pointer to NULL if the answer is not in the chain. */ static struct answer ** find_answer (cpp_hashnode *node, const struct answer *candidate) { unsigned int i; struct answer **result; for (result = &node->value.answers; *result; result = &(*result)->next) { struct answer *answer = *result; if (answer->count == candidate->count) { for (i = 0; i < answer->count; i++) if (! _cpp_equiv_tokens (&answer->first[i], &candidate->first[i])) break; if (i == answer->count) break; } } return result; } /* Test an assertion within a preprocessor conditional. Returns nonzero on failure, zero on success. On success, the result of the test is written into VALUE, otherwise the value 0. */ int _cpp_test_assertion (cpp_reader *pfile, unsigned int *value) { struct answer *answer; cpp_hashnode *node; node = parse_assertion (pfile, &answer, T_IF); /* For recovery, an erroneous assertion expression is handled as a failing assertion. */ *value = 0; if (node) *value = (node->type == NT_ASSERTION && (answer == 0 || *find_answer (node, answer) != 0)); else if (pfile->cur_token[-1].type == CPP_EOF) _cpp_backup_tokens (pfile, 1); /* We don't commit the memory for the answer - it's temporary only. */ return node == 0; } /* Handle #assert. */ static void do_assert (cpp_reader *pfile) { struct answer *new_answer; cpp_hashnode *node; node = parse_assertion (pfile, &new_answer, T_ASSERT); if (node) { size_t answer_size; /* Place the new answer in the answer list. First check there is not a duplicate. */ new_answer->next = 0; if (node->type == NT_ASSERTION) { if (*find_answer (node, new_answer)) { cpp_error (pfile, CPP_DL_WARNING, "\"%s\" re-asserted", NODE_NAME (node) + 1); return; } new_answer->next = node->value.answers; } answer_size = sizeof (struct answer) + ((new_answer->count - 1) * sizeof (cpp_token)); /* Commit or allocate storage for the object. */ if (pfile->hash_table->alloc_subobject) { struct answer *temp_answer = new_answer; new_answer = pfile->hash_table->alloc_subobject (answer_size); memcpy (new_answer, temp_answer, answer_size); } else BUFF_FRONT (pfile->a_buff) += answer_size; node->type = NT_ASSERTION; node->value.answers = new_answer; check_eol (pfile); } } /* Handle #unassert. */ static void do_unassert (cpp_reader *pfile) { cpp_hashnode *node; struct answer *answer; node = parse_assertion (pfile, &answer, T_UNASSERT); /* It isn't an error to #unassert something that isn't asserted. */ if (node && node->type == NT_ASSERTION) { if (answer) { struct answer **p = find_answer (node, answer), *temp; /* Remove the answer from the list. */ temp = *p; if (temp) *p = temp->next; /* Did we free the last answer? */ if (node->value.answers == 0) node->type = NT_VOID; check_eol (pfile); } else _cpp_free_definition (node); } /* We don't commit the memory for the answer - it's temporary only. */ } /* These are for -D, -U, -A. */ /* Process the string STR as if it appeared as the body of a #define. If STR is just an identifier, define it with value 1. If STR has anything after the identifier, then it should be identifier=definition. */ void cpp_define (cpp_reader *pfile, const char *str) { char *buf, *p; size_t count; /* Copy the entire option so we can modify it. Change the first "=" in the string to a space. If there is none, tack " 1" on the end. */ count = strlen (str); buf = alloca (count + 3); memcpy (buf, str, count); p = strchr (str, '='); if (p) buf[p - str] = ' '; else { buf[count++] = ' '; buf[count++] = '1'; } buf[count] = '\n'; run_directive (pfile, T_DEFINE, buf, count); } /* Slight variant of the above for use by initialize_builtins. */ void _cpp_define_builtin (cpp_reader *pfile, const char *str) { size_t len = strlen (str); char *buf = alloca (len + 1); memcpy (buf, str, len); buf[len] = '\n'; run_directive (pfile, T_DEFINE, buf, len); } /* Process MACRO as if it appeared as the body of an #undef. */ void cpp_undef (cpp_reader *pfile, const char *macro) { size_t len = strlen (macro); char *buf = alloca (len + 1); memcpy (buf, macro, len); buf[len] = '\n'; run_directive (pfile, T_UNDEF, buf, len); } /* Process the string STR as if it appeared as the body of a #assert. */ void cpp_assert (cpp_reader *pfile, const char *str) { handle_assertion (pfile, str, T_ASSERT); } /* Process STR as if it appeared as the body of an #unassert. */ void cpp_unassert (cpp_reader *pfile, const char *str) { handle_assertion (pfile, str, T_UNASSERT); } /* Common code for cpp_assert (-A) and cpp_unassert (-A-). */ static void handle_assertion (cpp_reader *pfile, const char *str, int type) { size_t count = strlen (str); const char *p = strchr (str, '='); /* Copy the entire option so we can modify it. Change the first "=" in the string to a '(', and tack a ')' on the end. */ char *buf = alloca (count + 2); memcpy (buf, str, count); if (p) { buf[p - str] = '('; buf[count++] = ')'; } buf[count] = '\n'; str = buf; run_directive (pfile, type, str, count); } /* The number of errors for a given reader. */ unsigned int cpp_errors (cpp_reader *pfile) { return pfile->errors; } /* The options structure. */ cpp_options * cpp_get_options (cpp_reader *pfile) { return &pfile->opts; } /* The callbacks structure. */ cpp_callbacks * cpp_get_callbacks (cpp_reader *pfile) { return &pfile->cb; } /* Copy the given callbacks structure to our own. */ void cpp_set_callbacks (cpp_reader *pfile, cpp_callbacks *cb) { pfile->cb = *cb; } /* The dependencies structure. (Creates one if it hasn't already been.) */ struct depends * cpp_get_deps (cpp_reader *pfile) { if (!pfile->deps) pfile->deps = deps_init (); return pfile->deps; } /* Push a new buffer on the buffer stack. Returns the new buffer; it doesn't fail. It does not generate a file change call back; that is the responsibility of the caller. */ cpp_buffer * cpp_push_buffer (cpp_reader *pfile, const uchar *buffer, size_t len, int from_stage3) { cpp_buffer *new = xobnew (&pfile->buffer_ob, cpp_buffer); /* Clears, amongst other things, if_stack and mi_cmacro. */ memset (new, 0, sizeof (cpp_buffer)); new->next_line = new->buf = buffer; new->rlimit = buffer + len; new->from_stage3 = from_stage3; new->prev = pfile->buffer; new->need_line = true; pfile->buffer = new; return new; } /* Pops a single buffer, with a file change call-back if appropriate. Then pushes the next -include file, if any remain. */ void _cpp_pop_buffer (cpp_reader *pfile) { cpp_buffer *buffer = pfile->buffer; struct _cpp_file *inc = buffer->file; struct if_stack *ifs; /* Walk back up the conditional stack till we reach its level at entry to this file, issuing error messages. */ for (ifs = buffer->if_stack; ifs; ifs = ifs->next) cpp_error_with_line (pfile, CPP_DL_ERROR, ifs->line, 0, "unterminated #%s", dtable[ifs->type].name); /* In case of a missing #endif. */ pfile->state.skipping = 0; /* _cpp_do_file_change expects pfile->buffer to be the new one. */ pfile->buffer = buffer->prev; free (buffer->notes); /* Free the buffer object now; we may want to push a new buffer in _cpp_push_next_include_file. */ obstack_free (&pfile->buffer_ob, buffer); if (inc) { _cpp_pop_file_buffer (pfile, inc); _cpp_do_file_change (pfile, LC_LEAVE, 0, 0, 0); } } /* Enter all recognized directives in the hash table. */ void _cpp_init_directives (cpp_reader *pfile) { unsigned int i; cpp_hashnode *node; for (i = 0; i < (unsigned int) N_DIRECTIVES; i++) { node = cpp_lookup (pfile, dtable[i].name, dtable[i].length); node->is_directive = 1; node->directive_index = i; } } #undef KANDR #undef STDC89 #undef EXTENSION #undef CONDIT #undef IF_COND #undef INCL #undef IN_I #undef EXPAND /* Default error handlers for CPP Library. Copyright (C) 1986, 1987, 1989, 1992, 1993, 1994, 1995, 1998, 1999, 2000, 2001, 2002, 2004 Free Software Foundation, Inc. Written by Per Bothner, 1994. Based on CCCP program by Paul Rubin, June 1986 Adapted to ANSI C, Richard Stallman, Jan 1987 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. In other words, you are welcome to use, share and improve this program. You are forbidden to forbid anyone else to use, share and improve what you give them. Help stamp out software-hoarding! */ static void print_location (cpp_reader *, source_location, unsigned int); /* Print the logical file location (LINE, COL) in preparation for a diagnostic. Outputs the #include chain if it has changed. A line of zero suppresses the include stack, and outputs the program name instead. */ static void print_location (cpp_reader *pfile, source_location line, unsigned int col) { if (line == 0) fprintf (stderr, "%s: ", progname); else { const struct line_map *map; unsigned int lin; map = linemap_lookup (pfile->line_table, line); linemap_print_containing_files (pfile->line_table, map); lin = SOURCE_LINE (map, line); if (col == 0) { col = SOURCE_COLUMN (map, line); if (col == 0) col = 1; } if (lin == 0) fprintf (stderr, "%s:", map->to_file); else if (CPP_OPTION (pfile, show_column) == 0) fprintf (stderr, "%s:%u:", map->to_file, lin); else fprintf (stderr, "%s:%u:%u:", map->to_file, lin, col); fputc (' ', stderr); } } #ifdef ONE_COMPILATION_UNIT #define _(msg) msg #endif /* Set up for a diagnostic: print the file and line, bump the error counter, etc. SRC_LOC is the logical line number; zero means to print at the location of the previously lexed token, which tends to be the correct place by default. The column number can be specified either using COLUMN or (if COLUMN==0) extracting SOURCE_COLUMN from SRC_LOC. (This may seem redundant, but is useful when pre-scanning (cleaning) a line, when we haven't yet verified whether the current line_map has a big enough max_column_hint.) Returns 0 if the error has been suppressed. */ int _cpp_begin_message (cpp_reader *pfile, int code, source_location src_loc, unsigned int column) { int level = CPP_DL_EXTRACT (code); switch (level) { case CPP_DL_WARNING: case CPP_DL_PEDWARN: if (cpp_in_system_header (pfile) && ! CPP_OPTION (pfile, warn_system_headers)) return 0; /* Fall through. */ case CPP_DL_WARNING_SYSHDR: if (CPP_OPTION (pfile, warnings_are_errors) || (level == CPP_DL_PEDWARN && CPP_OPTION (pfile, pedantic_errors))) { if (CPP_OPTION (pfile, inhibit_errors)) return 0; level = CPP_DL_ERROR; pfile->errors++; } else if (CPP_OPTION (pfile, inhibit_warnings)) return 0; break; case CPP_DL_ERROR: if (CPP_OPTION (pfile, inhibit_errors)) return 0; /* ICEs cannot be inhibited. */ case CPP_DL_ICE: pfile->errors++; break; } print_location (pfile, src_loc, column); if (CPP_DL_WARNING_P (level)) fputs (_("warning: "), stderr); else if (level == CPP_DL_ICE) fputs (_("internal error: "), stderr); return 1; } /* Don't remove the blank before do, as otherwise the exgettext script will mistake this as a function definition */ #define v_message(msgid, ap) \ do { vfprintf (stderr, _(msgid), ap); putc ('\n', stderr); } while (0) /* Exported interface. */ /* Print an error at the location of the previously lexed token. */ void cpp_error (cpp_reader * pfile, int level, const char *msgid, ...) { source_location src_loc; va_list ap; va_start (ap, msgid); if (CPP_OPTION (pfile, traditional)) { if (pfile->state.in_directive) src_loc = pfile->directive_line; else src_loc = pfile->line_table->highest_line; } else { src_loc = pfile->cur_token[-1].src_loc; } if (_cpp_begin_message (pfile, level, src_loc, 0)) v_message (msgid, ap); va_end (ap); } /* Print an error at a specific location. */ void cpp_error_with_line (cpp_reader *pfile, int level, source_location src_loc, unsigned int column, const char *msgid, ...) { va_list ap; va_start (ap, msgid); if (_cpp_begin_message (pfile, level, src_loc, column)) v_message (msgid, ap); va_end (ap); } void cpp_errno (cpp_reader *pfile, int level, const char *msgid) { if (msgid[0] == '\0') msgid = _("stdout"); cpp_error (pfile, level, "%s: %s", msgid, xstrerror (errno)); } /* Parse C expressions for cpplib. Copyright (C) 1987, 1992, 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2004 Free Software Foundation. Contributed by Per Bothner, 1994. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define PART_PRECISION (sizeof (cpp_num_part) * CHAR_BIT) #define HALF_MASK (~(cpp_num_part) 0 >> (PART_PRECISION / 2)) #define LOW_PART(num_part) (num_part & HALF_MASK) #define HIGH_PART(num_part) (num_part >> (PART_PRECISION / 2)) struct op { const cpp_token *token; /* The token forming op (for diagnostics). */ cpp_num value; /* The value logically "right" of op. */ enum cpp_ttype op; }; /* Some simple utility routines on double integers. */ #define num_zerop(num) ((num.low | num.high) == 0) #define num_eq(num1, num2) (num1.low == num2.low && num1.high == num2.high) static bool num_positive (cpp_num, size_t); static bool num_greater_eq (cpp_num, cpp_num, size_t); static cpp_num num_trim (cpp_num, size_t); static cpp_num num_part_mul (cpp_num_part, cpp_num_part); static cpp_num num_unary_op (cpp_reader *, cpp_num, enum cpp_ttype); static cpp_num num_binary_op (cpp_reader *, cpp_num, cpp_num, enum cpp_ttype); static cpp_num num_negate (cpp_num, size_t); static cpp_num num_bitwise_op (cpp_reader *, cpp_num, cpp_num, enum cpp_ttype); static cpp_num num_inequality_op (cpp_reader *, cpp_num, cpp_num, enum cpp_ttype); static cpp_num num_equality_op (cpp_reader *, cpp_num, cpp_num, enum cpp_ttype); static cpp_num num_mul (cpp_reader *, cpp_num, cpp_num); static cpp_num num_div_op (cpp_reader *, cpp_num, cpp_num, enum cpp_ttype); static cpp_num num_lshift (cpp_num, size_t, size_t); static cpp_num num_rshift (cpp_num, size_t, size_t); static cpp_num append_digit (cpp_num, int, int, size_t); static cpp_num parse_defined (cpp_reader *); static cpp_num eval_token (cpp_reader *, const cpp_token *); static struct op *reduce (cpp_reader *, struct op *, enum cpp_ttype); static unsigned int interpret_float_suffix (const uchar *, size_t); static unsigned int interpret_int_suffix (const uchar *, size_t); static void check_promotion (cpp_reader *, const struct op *); /* Token type abuse to create unary plus and minus operators. */ #define CPP_UPLUS (CPP_LAST_CPP_OP + 1) #define CPP_UMINUS (CPP_LAST_CPP_OP + 2) /* With -O2, gcc appears to produce nice code, moving the error message load and subsequent jump completely out of the main path. */ #define SYNTAX_ERROR(msgid) \ do { cpp_error (pfile, CPP_DL_ERROR, msgid); goto syntax_error; } while(0) #define SYNTAX_ERROR2(msgid, arg) \ do { cpp_error (pfile, CPP_DL_ERROR, msgid, arg); goto syntax_error; } \ while(0) /* Subroutine of cpp_classify_number. S points to a float suffix of length LEN, possibly zero. Returns 0 for an invalid suffix, or a flag vector describing the suffix. */ static unsigned int interpret_float_suffix (const uchar *s, size_t len) { size_t f = 0, l = 0, i = 0; while (len--) switch (s[len]) { case 'f': case 'F': f++; break; case 'l': case 'L': l++; break; case 'i': case 'I': case 'j': case 'J': i++; break; default: return 0; } if (f + l > 1 || i > 1) return 0; return ((i ? CPP_N_IMAGINARY : 0) | (f ? CPP_N_SMALL : l ? CPP_N_LARGE : CPP_N_MEDIUM)); } /* Subroutine of cpp_classify_number. S points to an integer suffix of length LEN, possibly zero. Returns 0 for an invalid suffix, or a flag vector describing the suffix. */ static unsigned int interpret_int_suffix (const uchar *s, size_t len) { size_t u, l, i; u = l = i = 0; while (len--) switch (s[len]) { case 'u': case 'U': u++; break; case 'i': case 'I': case 'j': case 'J': i++; break; case 'l': case 'L': l++; /* If there are two Ls, they must be adjacent and the same case. */ if (l == 2 && s[len] != s[len + 1]) return 0; break; default: return 0; } if (l > 2 || u > 1 || i > 1) return 0; return ((i ? CPP_N_IMAGINARY : 0) | (u ? CPP_N_UNSIGNED : 0) | ((l == 0) ? CPP_N_SMALL : (l == 1) ? CPP_N_MEDIUM : CPP_N_LARGE)); } /* Categorize numeric constants according to their field (integer, floating point, or invalid), radix (decimal, octal, hexadecimal), and type suffixes. */ unsigned int cpp_classify_number (cpp_reader *pfile, const cpp_token *token) { const uchar *str = token->val.str.text; const uchar *limit; unsigned int max_digit, result, radix; enum {NOT_FLOAT = 0, AFTER_POINT, AFTER_EXPON} float_flag; /* If the lexer has done its job, length one can only be a single digit. Fast-path this very common case. */ if (token->val.str.len == 1) return CPP_N_INTEGER | CPP_N_SMALL | CPP_N_DECIMAL; limit = str + token->val.str.len; float_flag = NOT_FLOAT; max_digit = 0; radix = 10; /* First, interpret the radix. */ if (*str == '0') { radix = 8; str++; /* Require at least one hex digit to classify it as hex. */ if ((*str == 'x' || *str == 'X') && (str[1] == '.' || ISXDIGIT (str[1]))) { radix = 16; str++; } } /* Now scan for a well-formed integer or float. */ for (;;) { unsigned int c = *str++; if (ISDIGIT (c) || (ISXDIGIT (c) && radix == 16)) { c = hex_value (c); if (c > max_digit) max_digit = c; } else if (c == '.') { if (float_flag == NOT_FLOAT) float_flag = AFTER_POINT; else SYNTAX_ERROR ("too many decimal points in number"); } else if ((radix <= 10 && (c == 'e' || c == 'E')) || (radix == 16 && (c == 'p' || c == 'P'))) { float_flag = AFTER_EXPON; break; } else { /* Start of suffix. */ str--; break; } } if (float_flag != NOT_FLOAT && radix == 8) radix = 10; if (max_digit >= radix) SYNTAX_ERROR2 ("invalid digit \"%c\" in octal constant", '0' + max_digit); if (float_flag != NOT_FLOAT) { if (radix == 16 && CPP_PEDANTIC (pfile) && !CPP_OPTION (pfile, c99)) cpp_error (pfile, CPP_DL_PEDWARN, "use of C99 hexadecimal floating constant"); if (float_flag == AFTER_EXPON) { if (*str == '+' || *str == '-') str++; /* Exponent is decimal, even if string is a hex float. */ if (!ISDIGIT (*str)) SYNTAX_ERROR ("exponent has no digits"); do str++; while (ISDIGIT (*str)); } else if (radix == 16) SYNTAX_ERROR ("hexadecimal floating constants require an exponent"); result = interpret_float_suffix (str, limit - str); if (result == 0) { cpp_error (pfile, CPP_DL_ERROR, "invalid suffix \"%.*s\" on floating constant", (int) (limit - str), str); return CPP_N_INVALID; } /* Traditional C didn't accept any floating suffixes. */ if (limit != str && CPP_WTRADITIONAL (pfile) && ! cpp_sys_macro_p (pfile)) cpp_error (pfile, CPP_DL_WARNING, "traditional C rejects the \"%.*s\" suffix", (int) (limit - str), str); result |= CPP_N_FLOATING; } else { result = interpret_int_suffix (str, limit - str); if (result == 0) { cpp_error (pfile, CPP_DL_ERROR, "invalid suffix \"%.*s\" on integer constant", (int) (limit - str), str); return CPP_N_INVALID; } /* Traditional C only accepted the 'L' suffix. Suppress warning about 'LL' with -Wno-long-long. */ if (CPP_WTRADITIONAL (pfile) && ! cpp_sys_macro_p (pfile)) { int u_or_i = (result & (CPP_N_UNSIGNED|CPP_N_IMAGINARY)); int large = (result & CPP_N_WIDTH) == CPP_N_LARGE; if (u_or_i || (large && CPP_OPTION (pfile, warn_long_long))) cpp_error (pfile, CPP_DL_WARNING, "traditional C rejects the \"%.*s\" suffix", (int) (limit - str), str); } if ((result & CPP_N_WIDTH) == CPP_N_LARGE && ! CPP_OPTION (pfile, c99) && CPP_OPTION (pfile, warn_long_long)) cpp_error (pfile, CPP_DL_PEDWARN, "use of C99 long long integer constant"); result |= CPP_N_INTEGER; } if ((result & CPP_N_IMAGINARY) && CPP_PEDANTIC (pfile)) cpp_error (pfile, CPP_DL_PEDWARN, "imaginary constants are a GCC extension"); if (radix == 10) result |= CPP_N_DECIMAL; else if (radix == 16) result |= CPP_N_HEX; else result |= CPP_N_OCTAL; return result; syntax_error: return CPP_N_INVALID; } /* cpp_interpret_integer converts an integer constant into a cpp_num, of precision options->precision. We do not provide any interface for decimal->float conversion, because the preprocessor doesn't need it and we don't want to drag in GCC's floating point emulator. */ cpp_num cpp_interpret_integer (cpp_reader *pfile, const cpp_token *token, unsigned int type) { const uchar *p, *end; cpp_num result; result.low = 0; result.high = 0; result.unsignedp = !!(type & CPP_N_UNSIGNED); result.overflow = false; p = token->val.str.text; end = p + token->val.str.len; /* Common case of a single digit. */ if (token->val.str.len == 1) result.low = p[0] - '0'; else { cpp_num_part max; size_t precision = CPP_OPTION (pfile, precision); unsigned int base = 10, c = 0; bool overflow = false; if ((type & CPP_N_RADIX) == CPP_N_OCTAL) { base = 8; p++; } else if ((type & CPP_N_RADIX) == CPP_N_HEX) { base = 16; p += 2; } /* We can add a digit to numbers strictly less than this without needing the precision and slowness of double integers. */ max = ~(cpp_num_part) 0; if (precision < PART_PRECISION) max >>= PART_PRECISION - precision; max = (max - base + 1) / base + 1; for (; p < end; p++) { c = *p; if (ISDIGIT (c) || (base == 16 && ISXDIGIT (c))) c = hex_value (c); else break; /* Strict inequality for when max is set to zero. */ if (result.low < max) result.low = result.low * base + c; else { result = append_digit (result, c, base, precision); overflow |= result.overflow; max = 0; } } if (overflow) cpp_error (pfile, CPP_DL_PEDWARN, "integer constant is too large for its type"); /* If too big to be signed, consider it unsigned. Only warn for decimal numbers. Traditional numbers were always signed (but we still honor an explicit U suffix); but we only have traditional semantics in directives. */ else if (!result.unsignedp && !(CPP_OPTION (pfile, traditional) && pfile->state.in_directive) && !num_positive (result, precision)) { if (base == 10) cpp_error (pfile, CPP_DL_WARNING, "integer constant is so large that it is unsigned"); result.unsignedp = true; } } return result; } /* Append DIGIT to NUM, a number of PRECISION bits being read in base BASE. */ static cpp_num append_digit (cpp_num num, int digit, int base, size_t precision) { cpp_num result; unsigned int shift = 3 + (base == 16); bool overflow; cpp_num_part add_high, add_low; /* Multiply by 8 or 16. Catching this overflow here means we don't need to worry about add_high overflowing. */ overflow = !!(num.high >> (PART_PRECISION - shift)); result.high = num.high << shift; result.low = num.low << shift; result.high |= num.low >> (PART_PRECISION - shift); result.unsignedp = num.unsignedp; if (base == 10) { add_low = num.low << 1; add_high = (num.high << 1) + (num.low >> (PART_PRECISION - 1)); } else add_high = add_low = 0; if (add_low + digit < add_low) add_high++; add_low += digit; if (result.low + add_low < result.low) add_high++; if (result.high + add_high < result.high) overflow = true; result.low += add_low; result.high += add_high; result.overflow = overflow; /* The above code catches overflow of a cpp_num type. This catches overflow of the (possibly shorter) target precision. */ num.low = result.low; num.high = result.high; result = num_trim (result, precision); if (!num_eq (result, num)) result.overflow = true; return result; } /* Handle meeting "defined" in a preprocessor expression. */ static cpp_num parse_defined (cpp_reader *pfile) { cpp_num result; int paren = 0; cpp_hashnode *node = 0; const cpp_token *token; cpp_context *initial_context = pfile->context; /* Don't expand macros. */ pfile->state.prevent_expansion++; token = cpp_get_token (pfile); if (token->type == CPP_OPEN_PAREN) { paren = 1; token = cpp_get_token (pfile); } if (token->type == CPP_NAME) { node = token->val.node; if (paren && cpp_get_token (pfile)->type != CPP_CLOSE_PAREN) { cpp_error (pfile, CPP_DL_ERROR, "missing ')' after \"defined\""); node = 0; } } else { cpp_error (pfile, CPP_DL_ERROR, "operator \"defined\" requires an identifier"); if (token->flags & NAMED_OP) { cpp_token op; op.flags = 0; op.type = token->type; cpp_error (pfile, CPP_DL_ERROR, "(\"%s\" is an alternative token for \"%s\" in C++)", cpp_token_as_text (pfile, token), cpp_token_as_text (pfile, &op)); } } if (node) { if (pfile->context != initial_context && CPP_PEDANTIC (pfile)) cpp_error (pfile, CPP_DL_WARNING, "this use of \"defined\" may not be portable"); _cpp_mark_macro_used (node); /* A possible controlling macro of the form #if !defined (). _cpp_parse_expr checks there was no other junk on the line. */ pfile->mi_ind_cmacro = node; } pfile->state.prevent_expansion--; result.unsignedp = false; result.high = 0; result.overflow = false; result.low = node && node->type == NT_MACRO; return result; } /* Convert a token into a CPP_NUMBER (an interpreted preprocessing number or character constant, or the result of the "defined" or "#" operators). */ static cpp_num eval_token (cpp_reader *pfile, const cpp_token *token) { cpp_num result; unsigned int temp; int unsignedp = 0; result.unsignedp = false; result.overflow = false; switch (token->type) { case CPP_NUMBER: temp = cpp_classify_number (pfile, token); switch (temp & CPP_N_CATEGORY) { case CPP_N_FLOATING: cpp_error (pfile, CPP_DL_ERROR, "floating constant in preprocessor expression"); break; case CPP_N_INTEGER: if (!(temp & CPP_N_IMAGINARY)) return cpp_interpret_integer (pfile, token, temp); cpp_error (pfile, CPP_DL_ERROR, "imaginary number in preprocessor expression"); break; case CPP_N_INVALID: /* Error already issued. */ break; } result.high = result.low = 0; break; case CPP_WCHAR: case CPP_CHAR: { cppchar_t cc = cpp_interpret_charconst (pfile, token, &temp, &unsignedp); result.high = 0; result.low = cc; /* Sign-extend the result if necessary. */ if (!unsignedp && (cppchar_signed_t) cc < 0) { if (PART_PRECISION > BITS_PER_CPPCHAR_T) result.low |= ~(~(cpp_num_part) 0 >> (PART_PRECISION - BITS_PER_CPPCHAR_T)); result.high = ~(cpp_num_part) 0; result = num_trim (result, CPP_OPTION (pfile, precision)); } } break; case CPP_NAME: if (token->val.node == pfile->spec_nodes.n_defined) return parse_defined (pfile); else if (CPP_OPTION (pfile, cplusplus) && (token->val.node == pfile->spec_nodes.n_true || token->val.node == pfile->spec_nodes.n_false)) { result.high = 0; result.low = (token->val.node == pfile->spec_nodes.n_true); } else { result.high = 0; result.low = 0; if (CPP_OPTION (pfile, warn_undef) && !pfile->state.skip_eval) cpp_error (pfile, CPP_DL_WARNING, "\"%s\" is not defined", NODE_NAME (token->val.node)); } break; default: /* CPP_HASH */ _cpp_test_assertion (pfile, &temp); result.high = 0; result.low = temp; } result.unsignedp = !!unsignedp; return result; } /* Operator precedence and flags table. After an operator is returned from the lexer, if it has priority less than the operator on the top of the stack, we reduce the stack by one operator and repeat the test. Since equal priorities do not reduce, this is naturally right-associative. We handle left-associative operators by decrementing the priority of just-lexed operators by one, but retaining the priority of operators already on the stack. The remaining cases are '(' and ')'. We handle '(' by skipping the reduction phase completely. ')' is given lower priority than everything else, including '(', effectively forcing a reduction of the parenthesized expression. If there is a matching '(', the routine reduce() exits immediately. If the normal exit route sees a ')', then there cannot have been a matching '(' and an error message is output. The parser assumes all shifted operators require a left operand unless the flag NO_L_OPERAND is set. These semantics are automatic; any extra semantics need to be handled with operator-specific code. */ /* Flags. If CHECK_PROMOTION, we warn if the effective sign of an operand changes because of integer promotions. */ #define NO_L_OPERAND (1 << 0) #define LEFT_ASSOC (1 << 1) #define CHECK_PROMOTION (1 << 2) /* Operator to priority map. Must be in the same order as the first N entries of enum cpp_ttype. */ static const struct operator { uchar prio; uchar flags; } op_table[] = { /* EQ */ {0, 0}, /* Shouldn't happen. */ /* NOT */ {16, NO_L_OPERAND}, /* GREATER */ {12, LEFT_ASSOC | CHECK_PROMOTION}, /* LESS */ {12, LEFT_ASSOC | CHECK_PROMOTION}, /* PLUS */ {14, LEFT_ASSOC | CHECK_PROMOTION}, /* MINUS */ {14, LEFT_ASSOC | CHECK_PROMOTION}, /* MULT */ {15, LEFT_ASSOC | CHECK_PROMOTION}, /* DIV */ {15, LEFT_ASSOC | CHECK_PROMOTION}, /* MOD */ {15, LEFT_ASSOC | CHECK_PROMOTION}, /* AND */ {9, LEFT_ASSOC | CHECK_PROMOTION}, /* OR */ {7, LEFT_ASSOC | CHECK_PROMOTION}, /* XOR */ {8, LEFT_ASSOC | CHECK_PROMOTION}, /* RSHIFT */ {13, LEFT_ASSOC}, /* LSHIFT */ {13, LEFT_ASSOC}, /* MIN */ {10, LEFT_ASSOC | CHECK_PROMOTION}, /* MAX */ {10, LEFT_ASSOC | CHECK_PROMOTION}, /* COMPL */ {16, NO_L_OPERAND}, /* AND_AND */ {6, LEFT_ASSOC}, /* OR_OR */ {5, LEFT_ASSOC}, /* QUERY */ {3, 0}, /* COLON */ {4, LEFT_ASSOC | CHECK_PROMOTION}, /* COMMA */ {2, LEFT_ASSOC}, /* OPEN_PAREN */ {1, NO_L_OPERAND}, /* CLOSE_PAREN */ {0, 0}, /* EOF */ {0, 0}, /* EQ_EQ */ {11, LEFT_ASSOC}, /* NOT_EQ */ {11, LEFT_ASSOC}, /* GREATER_EQ */ {12, LEFT_ASSOC | CHECK_PROMOTION}, /* LESS_EQ */ {12, LEFT_ASSOC | CHECK_PROMOTION}, /* UPLUS */ {16, NO_L_OPERAND}, /* UMINUS */ {16, NO_L_OPERAND} }; /* Parse and evaluate a C expression, reading from PFILE. Returns the truth value of the expression. The implementation is an operator precedence parser, i.e. a bottom-up parser, using a stack for not-yet-reduced tokens. The stack base is op_stack, and the current stack pointer is 'top'. There is a stack element for each operator (only), and the most recently pushed operator is 'top->op'. An operand (value) is stored in the 'value' field of the stack element of the operator that precedes it. */ bool _cpp_parse_expr (cpp_reader *pfile) { struct op *top = pfile->op_stack; unsigned int lex_count; bool saw_leading_not, want_value = true; pfile->state.skip_eval = 0; /* Set up detection of #if ! defined(). */ pfile->mi_ind_cmacro = 0; saw_leading_not = false; lex_count = 0; /* Lowest priority operator prevents further reductions. */ top->op = CPP_EOF; for (;;) { struct op op; lex_count++; op.token = cpp_get_token (pfile); op.op = op.token->type; switch (op.op) { /* These tokens convert into values. */ case CPP_NUMBER: case CPP_CHAR: case CPP_WCHAR: case CPP_NAME: case CPP_HASH: if (!want_value) SYNTAX_ERROR2 ("missing binary operator before token \"%s\"", cpp_token_as_text (pfile, op.token)); want_value = false; top->value = eval_token (pfile, op.token); continue; case CPP_NOT: saw_leading_not = lex_count == 1; break; case CPP_PLUS: if (want_value) op.op = CPP_UPLUS; break; case CPP_MINUS: if (want_value) op.op = CPP_UMINUS; break; default: if ((int) op.op <= (int) CPP_EQ || (int) op.op >= (int) CPP_PLUS_EQ) SYNTAX_ERROR2 ("token \"%s\" is not valid in preprocessor expressions", cpp_token_as_text (pfile, op.token)); break; } /* Check we have a value or operator as appropriate. */ if (op_table[op.op].flags & NO_L_OPERAND) { if (!want_value) SYNTAX_ERROR2 ("missing binary operator before token \"%s\"", cpp_token_as_text (pfile, op.token)); } else if (want_value) { /* We want a number (or expression) and haven't got one. Try to emit a specific diagnostic. */ if (op.op == CPP_CLOSE_PAREN && top->op == CPP_OPEN_PAREN) SYNTAX_ERROR ("missing expression between '(' and ')'"); if (op.op == CPP_EOF && top->op == CPP_EOF) SYNTAX_ERROR ("#if with no expression"); if (top->op != CPP_EOF && top->op != CPP_OPEN_PAREN) SYNTAX_ERROR2 ("operator '%s' has no right operand", cpp_token_as_text (pfile, top->token)); else if (op.op == CPP_CLOSE_PAREN || op.op == CPP_EOF) /* Complain about missing paren during reduction. */; else SYNTAX_ERROR2 ("operator '%s' has no left operand", cpp_token_as_text (pfile, op.token)); } top = reduce (pfile, top, op.op); if (!top) goto syntax_error; if (op.op == CPP_EOF) break; switch (op.op) { case CPP_CLOSE_PAREN: continue; case CPP_OR_OR: if (!num_zerop (top->value)) pfile->state.skip_eval++; break; case CPP_AND_AND: case CPP_QUERY: if (num_zerop (top->value)) pfile->state.skip_eval++; break; case CPP_COLON: if (top->op != CPP_QUERY) SYNTAX_ERROR (" ':' without preceding '?'"); if (!num_zerop (top[-1].value)) /* Was '?' condition true? */ pfile->state.skip_eval++; else pfile->state.skip_eval--; default: break; } want_value = true; /* Check for and handle stack overflow. */ if (++top == pfile->op_limit) top = _cpp_expand_op_stack (pfile); top->op = op.op; top->token = op.token; } /* The controlling macro expression is only valid if we called lex 3 times: and . push_conditional () checks that we are at top-of-file. */ if (pfile->mi_ind_cmacro && !(saw_leading_not && lex_count == 3)) pfile->mi_ind_cmacro = 0; if (top != pfile->op_stack) { cpp_error (pfile, CPP_DL_ICE, "unbalanced stack in #if"); syntax_error: return false; /* Return false on syntax error. */ } return !num_zerop (top->value); } /* Reduce the operator / value stack if possible, in preparation for pushing operator OP. Returns NULL on error, otherwise the top of the stack. */ static struct op * reduce (cpp_reader *pfile, struct op *top, enum cpp_ttype op) { unsigned int prio; if (top->op <= CPP_EQ || top->op > CPP_LAST_CPP_OP + 2) { bad_op: cpp_error (pfile, CPP_DL_ICE, "impossible operator '%u'", top->op); return 0; } if (op == CPP_OPEN_PAREN) return top; /* Decrement the priority of left-associative operators to force a reduction with operators of otherwise equal priority. */ prio = op_table[op].prio - ((op_table[op].flags & LEFT_ASSOC) != 0); while (prio < op_table[top->op].prio) { if (CPP_OPTION (pfile, warn_num_sign_change) && op_table[top->op].flags & CHECK_PROMOTION) check_promotion (pfile, top); switch (top->op) { case CPP_UPLUS: case CPP_UMINUS: case CPP_NOT: case CPP_COMPL: top[-1].value = num_unary_op (pfile, top->value, top->op); break; case CPP_PLUS: case CPP_MINUS: case CPP_RSHIFT: case CPP_LSHIFT: case CPP_MIN: case CPP_MAX: case CPP_COMMA: top[-1].value = num_binary_op (pfile, top[-1].value, top->value, top->op); break; case CPP_GREATER: case CPP_LESS: case CPP_GREATER_EQ: case CPP_LESS_EQ: top[-1].value = num_inequality_op (pfile, top[-1].value, top->value, top->op); break; case CPP_EQ_EQ: case CPP_NOT_EQ: top[-1].value = num_equality_op (pfile, top[-1].value, top->value, top->op); break; case CPP_AND: case CPP_OR: case CPP_XOR: top[-1].value = num_bitwise_op (pfile, top[-1].value, top->value, top->op); break; case CPP_MULT: top[-1].value = num_mul (pfile, top[-1].value, top->value); break; case CPP_DIV: case CPP_MOD: top[-1].value = num_div_op (pfile, top[-1].value, top->value, top->op); break; case CPP_OR_OR: top--; if (!num_zerop (top->value)) pfile->state.skip_eval--; top->value.low = (!num_zerop (top->value) || !num_zerop (top[1].value)); top->value.high = 0; top->value.unsignedp = false; top->value.overflow = false; continue; case CPP_AND_AND: top--; if (num_zerop (top->value)) pfile->state.skip_eval--; top->value.low = (!num_zerop (top->value) && !num_zerop (top[1].value)); top->value.high = 0; top->value.unsignedp = false; top->value.overflow = false; continue; case CPP_OPEN_PAREN: if (op != CPP_CLOSE_PAREN) { cpp_error (pfile, CPP_DL_ERROR, "missing ')' in expression"); return 0; } top--; top->value = top[1].value; return top; case CPP_COLON: top -= 2; if (!num_zerop (top->value)) { pfile->state.skip_eval--; top->value = top[1].value; } else top->value = top[2].value; top->value.unsignedp = (top[1].value.unsignedp || top[2].value.unsignedp); continue; case CPP_QUERY: cpp_error (pfile, CPP_DL_ERROR, "'?' without following ':'"); return 0; default: goto bad_op; } top--; if (top->value.overflow && !pfile->state.skip_eval) cpp_error (pfile, CPP_DL_PEDWARN, "integer overflow in preprocessor expression"); } if (op == CPP_CLOSE_PAREN) { cpp_error (pfile, CPP_DL_ERROR, "missing '(' in expression"); return 0; } return top; } /* Returns the position of the old top of stack after expansion. */ struct op * _cpp_expand_op_stack (cpp_reader *pfile) { size_t old_size = (size_t) (pfile->op_limit - pfile->op_stack); size_t new_size = old_size * 2 + 20; pfile->op_stack = xrealloc (pfile->op_stack, new_size * sizeof (struct op)); pfile->op_limit = pfile->op_stack + new_size; return pfile->op_stack + old_size; } /* Emits a warning if the effective sign of either operand of OP changes because of integer promotions. */ static void check_promotion (cpp_reader *pfile, const struct op *op) { if (op->value.unsignedp == op[-1].value.unsignedp) return; if (op->value.unsignedp) { if (!num_positive (op[-1].value, CPP_OPTION (pfile, precision))) cpp_error (pfile, CPP_DL_WARNING, "the left operand of \"%s\" changes sign when promoted", cpp_token_as_text (pfile, op->token)); } else if (!num_positive (op->value, CPP_OPTION (pfile, precision))) cpp_error (pfile, CPP_DL_WARNING, "the right operand of \"%s\" changes sign when promoted", cpp_token_as_text (pfile, op->token)); } /* Clears the unused high order bits of the number pointed to by PNUM. */ static cpp_num num_trim (cpp_num num, size_t precision) { if (precision > PART_PRECISION) { precision -= PART_PRECISION; if (precision < PART_PRECISION) num.high &= ((cpp_num_part) 1 << precision) - 1; } else { if (precision < PART_PRECISION) num.low &= ((cpp_num_part) 1 << precision) - 1; num.high = 0; } return num; } /* True iff A (presumed signed) >= 0. */ static bool num_positive (cpp_num num, size_t precision) { if (precision > PART_PRECISION) { precision -= PART_PRECISION; return (num.high & (cpp_num_part) 1 << (precision - 1)) == 0; } return (num.low & (cpp_num_part) 1 << (precision - 1)) == 0; } /* Sign extend a number, with PRECISION significant bits and all others assumed clear, to fill out a cpp_num structure. */ cpp_num cpp_num_sign_extend (cpp_num num, size_t precision) { if (!num.unsignedp) { if (precision > PART_PRECISION) { precision -= PART_PRECISION; if (precision < PART_PRECISION && (num.high & (cpp_num_part) 1 << (precision - 1))) num.high |= ~(~(cpp_num_part) 0 >> (PART_PRECISION - precision)); } else if (num.low & (cpp_num_part) 1 << (precision - 1)) { if (precision < PART_PRECISION) num.low |= ~(~(cpp_num_part) 0 >> (PART_PRECISION - precision)); num.high = ~(cpp_num_part) 0; } } return num; } /* Returns the negative of NUM. */ static cpp_num num_negate (cpp_num num, size_t precision) { cpp_num copy; copy = num; num.high = ~num.high; num.low = ~num.low; if (++num.low == 0) num.high++; num = num_trim (num, precision); num.overflow = (!num.unsignedp && num_eq (num, copy) && !num_zerop (num)); return num; } /* Returns true if A >= B. */ static bool num_greater_eq (cpp_num pa, cpp_num pb, size_t precision) { bool unsignedp; unsignedp = pa.unsignedp || pb.unsignedp; if (!unsignedp) { /* Both numbers have signed type. If they are of different sign, the answer is the sign of A. */ unsignedp = num_positive (pa, precision); if (unsignedp != num_positive (pb, precision)) return unsignedp; /* Otherwise we can do an unsigned comparison. */ } return (pa.high > pb.high) || (pa.high == pb.high && pa.low >= pb.low); } /* Returns LHS OP RHS, where OP is a bit-wise operation. */ static cpp_num num_bitwise_op (cpp_reader *pfile ATTRIBUTE_UNUSED, cpp_num lhs, cpp_num rhs, enum cpp_ttype op) { lhs.overflow = false; lhs.unsignedp = lhs.unsignedp || rhs.unsignedp; /* As excess precision is zeroed, there is no need to num_trim () as these operations cannot introduce a set bit there. */ if (op == CPP_AND) { lhs.low &= rhs.low; lhs.high &= rhs.high; } else if (op == CPP_OR) { lhs.low |= rhs.low; lhs.high |= rhs.high; } else { lhs.low ^= rhs.low; lhs.high ^= rhs.high; } return lhs; } /* Returns LHS OP RHS, where OP is an inequality. */ static cpp_num num_inequality_op (cpp_reader *pfile, cpp_num lhs, cpp_num rhs, enum cpp_ttype op) { bool gte = num_greater_eq (lhs, rhs, CPP_OPTION (pfile, precision)); if (op == CPP_GREATER_EQ) lhs.low = gte; else if (op == CPP_LESS) lhs.low = !gte; else if (op == CPP_GREATER) lhs.low = gte && !num_eq (lhs, rhs); else /* CPP_LESS_EQ. */ lhs.low = !gte || num_eq (lhs, rhs); lhs.high = 0; lhs.overflow = false; lhs.unsignedp = false; return lhs; } /* Returns LHS OP RHS, where OP is == or !=. */ static cpp_num num_equality_op (cpp_reader *pfile ATTRIBUTE_UNUSED, cpp_num lhs, cpp_num rhs, enum cpp_ttype op) { /* Work around a 3.0.4 bug; see PR 6950. */ bool eq = num_eq (lhs, rhs); if (op == CPP_NOT_EQ) eq = !eq; lhs.low = eq; lhs.high = 0; lhs.overflow = false; lhs.unsignedp = false; return lhs; } /* Shift NUM, of width PRECISION, right by N bits. */ static cpp_num num_rshift (cpp_num num, size_t precision, size_t n) { cpp_num_part sign_mask; bool x = num_positive (num, precision); if (num.unsignedp || x) sign_mask = 0; else sign_mask = ~(cpp_num_part) 0; if (n >= precision) num.high = num.low = sign_mask; else { /* Sign-extend. */ if (precision < PART_PRECISION) num.high = sign_mask, num.low |= sign_mask << precision; else if (precision < 2 * PART_PRECISION) num.high |= sign_mask << (precision - PART_PRECISION); if (n >= PART_PRECISION) { n -= PART_PRECISION; num.low = num.high; num.high = sign_mask; } if (n) { num.low = (num.low >> n) | (num.high << (PART_PRECISION - n)); num.high = (num.high >> n) | (sign_mask << (PART_PRECISION - n)); } } num = num_trim (num, precision); num.overflow = false; return num; } /* Shift NUM, of width PRECISION, left by N bits. */ static cpp_num num_lshift (cpp_num num, size_t precision, size_t n) { if (n >= precision) { num.overflow = !num.unsignedp && !num_zerop (num); num.high = num.low = 0; } else { cpp_num orig, maybe_orig; size_t m = n; orig = num; if (m >= PART_PRECISION) { m -= PART_PRECISION; num.high = num.low; num.low = 0; } if (m) { num.high = (num.high << m) | (num.low >> (PART_PRECISION - m)); num.low <<= m; } num = num_trim (num, precision); if (num.unsignedp) num.overflow = false; else { maybe_orig = num_rshift (num, precision, n); num.overflow = !num_eq (orig, maybe_orig); } } return num; } /* The four unary operators: +, -, ! and ~. */ static cpp_num num_unary_op (cpp_reader *pfile, cpp_num num, enum cpp_ttype op) { switch (op) { case CPP_UPLUS: if (CPP_WTRADITIONAL (pfile) && !pfile->state.skip_eval) cpp_error (pfile, CPP_DL_WARNING, "traditional C rejects the unary plus operator"); num.overflow = false; break; case CPP_UMINUS: num = num_negate (num, CPP_OPTION (pfile, precision)); break; case CPP_COMPL: num.high = ~num.high; num.low = ~num.low; num = num_trim (num, CPP_OPTION (pfile, precision)); num.overflow = false; break; default: /* case CPP_NOT: */ num.low = num_zerop (num); num.high = 0; num.overflow = false; num.unsignedp = false; break; } return num; } /* The various binary operators. */ static cpp_num num_binary_op (cpp_reader *pfile, cpp_num lhs, cpp_num rhs, enum cpp_ttype op) { cpp_num result; size_t precision = CPP_OPTION (pfile, precision); bool gte; size_t n; switch (op) { /* Shifts. */ case CPP_LSHIFT: case CPP_RSHIFT: if (!rhs.unsignedp && !num_positive (rhs, precision)) { /* A negative shift is a positive shift the other way. */ if (op == CPP_LSHIFT) op = CPP_RSHIFT; else op = CPP_LSHIFT; rhs = num_negate (rhs, precision); } if (rhs.high) n = ~0; /* Maximal. */ else n = rhs.low; if (op == CPP_LSHIFT) lhs = num_lshift (lhs, precision, n); else lhs = num_rshift (lhs, precision, n); break; /* Min / Max. */ case CPP_MIN: case CPP_MAX: { bool unsignedp = lhs.unsignedp || rhs.unsignedp; gte = num_greater_eq (lhs, rhs, precision); if (op == CPP_MIN) gte = !gte; if (!gte) lhs = rhs; lhs.unsignedp = unsignedp; } break; /* Arithmetic. */ case CPP_MINUS: rhs = num_negate (rhs, precision); case CPP_PLUS: result.low = lhs.low + rhs.low; result.high = lhs.high + rhs.high; if (result.low < lhs.low) result.high++; result.unsignedp = lhs.unsignedp || rhs.unsignedp; result.overflow = false; result = num_trim (result, precision); if (!result.unsignedp) { bool lhsp = num_positive (lhs, precision); result.overflow = (lhsp == num_positive (rhs, precision) && lhsp != num_positive (result, precision)); } return result; /* Comma. */ default: /* case CPP_COMMA: */ if (CPP_PEDANTIC (pfile) && (!CPP_OPTION (pfile, c99) || !pfile->state.skip_eval)) cpp_error (pfile, CPP_DL_PEDWARN, "comma operator in operand of #if"); lhs = rhs; break; } return lhs; } /* Multiplies two unsigned cpp_num_parts to give a cpp_num. This cannot overflow. */ static cpp_num num_part_mul (cpp_num_part lhs, cpp_num_part rhs) { cpp_num result; cpp_num_part middle[2], temp; result.low = LOW_PART (lhs) * LOW_PART (rhs); result.high = HIGH_PART (lhs) * HIGH_PART (rhs); middle[0] = LOW_PART (lhs) * HIGH_PART (rhs); middle[1] = HIGH_PART (lhs) * LOW_PART (rhs); temp = result.low; result.low += LOW_PART (middle[0]) << (PART_PRECISION / 2); if (result.low < temp) result.high++; temp = result.low; result.low += LOW_PART (middle[1]) << (PART_PRECISION / 2); if (result.low < temp) result.high++; result.high += HIGH_PART (middle[0]); result.high += HIGH_PART (middle[1]); result.unsignedp = true; result.overflow = false; return result; } /* Multiply two preprocessing numbers. */ static cpp_num num_mul (cpp_reader *pfile, cpp_num lhs, cpp_num rhs) { cpp_num result, temp; bool unsignedp = lhs.unsignedp || rhs.unsignedp; bool overflow, negate = false; size_t precision = CPP_OPTION (pfile, precision); /* Prepare for unsigned multiplication. */ if (!unsignedp) { if (!num_positive (lhs, precision)) negate = !negate, lhs = num_negate (lhs, precision); if (!num_positive (rhs, precision)) negate = !negate, rhs = num_negate (rhs, precision); } overflow = lhs.high && rhs.high; result = num_part_mul (lhs.low, rhs.low); temp = num_part_mul (lhs.high, rhs.low); result.high += temp.low; if (temp.high) overflow = true; temp = num_part_mul (lhs.low, rhs.high); result.high += temp.low; if (temp.high) overflow = true; temp.low = result.low, temp.high = result.high; result = num_trim (result, precision); if (!num_eq (result, temp)) overflow = true; if (negate) result = num_negate (result, precision); if (unsignedp) result.overflow = false; else result.overflow = overflow || (num_positive (result, precision) ^ !negate && !num_zerop (result)); result.unsignedp = unsignedp; return result; } /* Divide two preprocessing numbers, returning the answer or the remainder depending upon OP. */ static cpp_num num_div_op (cpp_reader *pfile, cpp_num lhs, cpp_num rhs, enum cpp_ttype op) { cpp_num result, sub; cpp_num_part mask; bool unsignedp = lhs.unsignedp || rhs.unsignedp; bool negate = false, lhs_neg = false; size_t i, precision = CPP_OPTION (pfile, precision); /* Prepare for unsigned division. */ if (!unsignedp) { if (!num_positive (lhs, precision)) negate = !negate, lhs_neg = true, lhs = num_negate (lhs, precision); if (!num_positive (rhs, precision)) negate = !negate, rhs = num_negate (rhs, precision); } /* Find the high bit. */ if (rhs.high) { i = precision - 1; mask = (cpp_num_part) 1 << (i - PART_PRECISION); for (; ; i--, mask >>= 1) if (rhs.high & mask) break; } else if (rhs.low) { if (precision > PART_PRECISION) i = precision - PART_PRECISION - 1; else i = precision - 1; mask = (cpp_num_part) 1 << i; for (; ; i--, mask >>= 1) if (rhs.low & mask) break; } else { if (!pfile->state.skip_eval) cpp_error (pfile, CPP_DL_ERROR, "division by zero in #if"); return lhs; } /* First nonzero bit of RHS is bit I. Do naive division by shifting the RHS fully left, and subtracting from LHS if LHS is at least as big, and then repeating but with one less shift. This is not very efficient, but is easy to understand. */ rhs.unsignedp = true; lhs.unsignedp = true; i = precision - i - 1; sub = num_lshift (rhs, precision, i); result.high = result.low = 0; for (;;) { if (num_greater_eq (lhs, sub, precision)) { lhs = num_binary_op (pfile, lhs, sub, CPP_MINUS); if (i >= PART_PRECISION) result.high |= (cpp_num_part) 1 << (i - PART_PRECISION); else result.low |= (cpp_num_part) 1 << i; } if (i-- == 0) break; sub.low = (sub.low >> 1) | (sub.high << (PART_PRECISION - 1)); sub.high >>= 1; } /* We divide so that the remainder has the sign of the LHS. */ if (op == CPP_DIV) { result.unsignedp = unsignedp; result.overflow = false; if (!unsignedp) { if (negate) result = num_negate (result, precision); result.overflow = num_positive (result, precision) ^ !negate; } return result; } /* CPP_MOD. */ lhs.unsignedp = unsignedp; lhs.overflow = false; if (lhs_neg) lhs = num_negate (lhs, precision); return lhs; } /* Part of CPP library. File handling. Copyright (C) 1986, 1987, 1989, 1992, 1993, 1994, 1995, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Written by Per Bothner, 1994. Based on CCCP program by Paul Rubin, June 1986 Adapted to ANSI C, Richard Stallman, Jan 1987 Split out of cpplib.c, Zack Weinberg, Oct 1998 Reimplemented, Neil Booth, Jul 2003 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include /* Variable length record files on VMS will have a stat size that includes record control characters that won't be included in the read size. */ #ifdef VMS # define FAB_C_VAR 2 /* variable length records (see Starlet fabdef.h) */ # define STAT_SIZE_RELIABLE(ST) ((ST).st_fab_rfm != FAB_C_VAR) #else # define STAT_SIZE_RELIABLE(ST) true #endif #ifdef __DJGPP__ /* For DJGPP redirected input is opened in text mode. */ # define set_stdin_to_binary_mode() \ if (! isatty (0)) setmode (0, O_BINARY) #else # define set_stdin_to_binary_mode() /* Nothing */ #endif /* This structure represents a file searched for by CPP, whether it exists or not. An instance may be pointed to by more than one file_hash_entry; at present no reference count is kept. */ struct _cpp_file { /* Filename as given to #include or command line switch. */ const char *name; /* The full path used to find the file. */ const char *path; /* The full path of the pch file. */ const char *pchname; /* The file's path with the basename stripped. NULL if it hasn't been calculated yet. */ const char *dir_name; /* Chain through all files. */ struct _cpp_file *next_file; /* The contents of NAME after calling read_file(). */ const uchar *buffer; /* The macro, if any, preventing re-inclusion. */ const cpp_hashnode *cmacro; /* The directory in the search path where FILE was found. Used for #include_next and determining whether a header is a system header. */ cpp_dir *dir; /* As filled in by stat(2) for the file. */ struct stat st; /* File descriptor. Invalid if -1, otherwise open. */ int fd; /* Zero if this file was successfully opened and stat()-ed, otherwise errno obtained from failure. */ int err_no; /* Number of times the file has been stacked for preprocessing. */ unsigned short stack_count; /* If opened with #import or contains #pragma once. */ bool once_only; /* If read() failed before. */ bool dont_read; /* If this file is the main file. */ bool main_file; /* If BUFFER above contains the true contents of the file. */ bool buffer_valid; /* File is a PCH (on return from find_include_file). */ bool pch; }; /* A singly-linked list for all searches for a given file name, with its head pointed to by a slot in FILE_HASH. The file name is what appeared between the quotes in a #include directive; it can be determined implicitly from the hash table location or explicitly from FILE->name. FILE is a structure containing details about the file that was found with that search, or details of how the search failed. START_DIR is the starting location of the search in the include chain. The current directories for "" includes are also hashed in the hash table and therefore unique. Files that are looked up without using a search path, such as absolute filenames and file names from the command line share a special starting directory so they don't cause cache hits with normal include-chain lookups. If START_DIR is NULL then the entry is for a directory, not a file, and the directory is in DIR. Since the starting point in a file lookup chain is never NULL, this means that simple pointer comparisons against START_DIR can be made to determine cache hits in file lookups. If a cache lookup fails because of e.g. an extra "./" in the path, then nothing will break. It is just less efficient as CPP will have to do more work re-preprocessing the file, and/or comparing its contents against earlier once-only files. */ struct file_hash_entry { struct file_hash_entry *next; cpp_dir *start_dir; union { _cpp_file *file; cpp_dir *dir; } u; }; static bool open_file (_cpp_file *file); static bool pch_open_file (cpp_reader *pfile, _cpp_file *file, bool *invalid_pch); static bool find_file_in_dir (cpp_reader *pfile, _cpp_file *file, bool *invalid_pch); static bool read_file_guts (cpp_reader *pfile, _cpp_file *file); static bool read_file (cpp_reader *pfile, _cpp_file *file); static bool should_stack_file (cpp_reader *, _cpp_file *file, bool import); static struct cpp_dir *search_path_head (cpp_reader *, const char *fname, int angle_brackets, enum include_type); static const char *dir_name_of_file (_cpp_file *file); static void open_file_failed (cpp_reader *pfile, _cpp_file *file); static struct file_hash_entry *search_cache (struct file_hash_entry *head, const cpp_dir *start_dir); static _cpp_file *make_cpp_file (cpp_reader *, cpp_dir *, const char *fname); static cpp_dir *make_cpp_dir (cpp_reader *, const char *dir_name, int sysp); static void allocate_file_hash_entries (cpp_reader *pfile); static struct file_hash_entry *new_file_hash_entry (cpp_reader *pfile); static int report_missing_guard (void **slot, void *b); static hashval_t file_hash_hash (const void *p); static int file_hash_eq (const void *p, const void *q); static char *read_filename_string (int ch, FILE *f); static void read_name_map (cpp_dir *dir); static char *remap_filename (cpp_reader *pfile, _cpp_file *file); static char *append_file_to_dir (const char *fname, cpp_dir *dir); static bool validate_pch (cpp_reader *, _cpp_file *file, const char *pchname); static int pchf_adder (void **slot, void *data); static int pchf_save_compare (const void *e1, const void *e2); static int pchf_compare (const void *d_p, const void *e_p); static bool check_file_against_entries (cpp_reader *, _cpp_file *, bool); #ifdef ONE_COMPILATION_UNIT #ifndef O_BINARY # define O_BINARY 0 #endif #endif /* Given a filename in FILE->PATH, with the empty string interpreted as , open it. On success FILE contains an open file descriptor and stat information for the file. On failure the file descriptor is -1 and the appropriate errno is also stored in FILE. Returns TRUE iff successful. We used to open files in nonblocking mode, but that caused more problems than it solved. Do take care not to acquire a controlling terminal by mistake (this can't happen on sane systems, but paranoia is a virtue). Use the three-argument form of open even though we aren't specifying O_CREAT, to defend against broken system headers. O_BINARY tells some runtime libraries (notably DJGPP) not to do newline translation; we can handle DOS line breaks just fine ourselves. */ static bool open_file (_cpp_file *file) { if (file->path[0] == '\0') { file->fd = 0; set_stdin_to_binary_mode (); } else file->fd = open (file->path, O_RDONLY | O_NOCTTY | O_BINARY, 0666); if (file->fd != -1) { if (fstat (file->fd, &file->st) == 0) { if (!S_ISDIR (file->st.st_mode)) { file->err_no = 0; return true; } /* Ignore a directory and continue the search. The file we're looking for may be elsewhere in the search path. */ errno = ENOENT; } close (file->fd); file->fd = -1; } else if (errno == ENOTDIR) errno = ENOENT; file->err_no = errno; return false; } /* Temporary PCH intercept of opening a file. Try to find a PCH file based on FILE->name and FILE->dir, and test those found for validity using PFILE->cb.valid_pch. Return true iff a valid file is found. Set *INVALID_PCH if a PCH file is found but wasn't valid. */ static bool pch_open_file (cpp_reader *pfile, _cpp_file *file, bool *invalid_pch) { static const char extension[] = ".gch"; const char *path = file->path; size_t len, flen; char *pchname; struct stat st; bool valid = false; /* No PCH on or if not requested. */ if (file->name[0] == '\0' || !pfile->cb.valid_pch) return false; flen = strlen (path); len = flen + sizeof (extension); pchname = xmalloc (len); memcpy (pchname, path, flen); memcpy (pchname + flen, extension, sizeof (extension)); if (stat (pchname, &st) == 0) { DIR *pchdir; struct dirent *d; size_t dlen, plen = len; if (!S_ISDIR (st.st_mode)) valid = validate_pch (pfile, file, pchname); else if ((pchdir = opendir (pchname)) != NULL) { pchname[plen - 1] = '/'; while ((d = readdir (pchdir)) != NULL) { dlen = strlen (d->d_name) + 1; if ((strcmp (d->d_name, ".") == 0) || (strcmp (d->d_name, "..") == 0)) continue; if (dlen + plen > len) { len += dlen + 64; pchname = xrealloc (pchname, len); } memcpy (pchname + plen, d->d_name, dlen); valid = validate_pch (pfile, file, pchname); if (valid) break; } closedir (pchdir); } if (valid) file->pch = true; else *invalid_pch = true; } if (valid) file->pchname = pchname; else free (pchname); return valid; } /* Try to open the path FILE->name appended to FILE->dir. This is where remap and PCH intercept the file lookup process. Return true if the file was found, whether or not the open was successful. Set *INVALID_PCH to true if a PCH file is found but wasn't valid. */ static bool find_file_in_dir (cpp_reader *pfile, _cpp_file *file, bool *invalid_pch) { char *path; if (CPP_OPTION (pfile, remap) && (path = remap_filename (pfile, file))) ; else if (file->dir->construct) path = file->dir->construct (file->name, file->dir); else path = append_file_to_dir (file->name, file->dir); if (path) { file->path = path; if (pch_open_file (pfile, file, invalid_pch)) return true; if (open_file (file)) return true; if (file->err_no != ENOENT) { open_file_failed (pfile, file); return true; } free (path); file->path = file->name; } else { file->err_no = ENOENT; file->path = NULL; } return false; } /* Return tue iff the missing_header callback found the given HEADER. */ static bool search_path_exhausted (cpp_reader *pfile, const char *header, _cpp_file *file) { missing_header_cb func = pfile->cb.missing_header; /* When the regular search path doesn't work, try context dependent headers search paths. */ if (func && file->dir == NULL) { if ((file->path = func (pfile, header, &file->dir)) != NULL) { if (open_file (file)) return true; free ((void *)file->path); } file->path = file->name; } return false; } bool _cpp_find_failed (_cpp_file *file) { return file->err_no != 0; } /* Given a filename FNAME search for such a file in the include path starting from START_DIR. If FNAME is the empty string it is interpreted as STDIN if START_DIR is PFILE->no_search_path. If the file is not found in the file cache fall back to the O/S and add the result to our cache. If the file was not found in the filesystem, or there was an error opening it, then ERR_NO is nonzero and FD is -1. If the file was found, then ERR_NO is zero and FD could be -1 or an open file descriptor. FD can be -1 if the file was found in the cache and had previously been closed. To open it again pass the return value to open_file(). */ _cpp_file * _cpp_find_file (cpp_reader *pfile, const char *fname, cpp_dir *start_dir, bool fake) { struct file_hash_entry *entry, **hash_slot; _cpp_file *file; bool invalid_pch = false; /* Ensure we get no confusion between cached files and directories. */ if (start_dir == NULL) cpp_error (pfile, CPP_DL_ICE, "NULL directory in find_file"); hash_slot = (struct file_hash_entry **) htab_find_slot_with_hash (pfile->file_hash, fname, htab_hash_string (fname), INSERT); /* First check the cache before we resort to memory allocation. */ entry = search_cache (*hash_slot, start_dir); if (entry) return entry->u.file; file = make_cpp_file (pfile, start_dir, fname); /* Try each path in the include chain. */ for (; !fake ;) { if (file->dir == pfile->quote_include || file->dir == pfile->bracket_include) { entry = search_cache (*hash_slot, file->dir); if (entry) { /* Found the same file again. Record it as reachable from this position, too. */ free ((char *) file->name); free (file); file = entry->u.file; goto found; } } if (find_file_in_dir (pfile, file, &invalid_pch)) break; file->dir = file->dir->next; if (file->dir == NULL) { if (search_path_exhausted (pfile, fname, file)) return file; open_file_failed (pfile, file); if (invalid_pch) { cpp_error (pfile, CPP_DL_ERROR, "one or more PCH files were found, but they were invalid"); if (!cpp_get_options (pfile)->warn_invalid_pch) cpp_error (pfile, CPP_DL_ERROR, "use -Winvalid-pch for more information"); } break; } } /* This is a new file; put it in the list. */ file->next_file = pfile->all_files; pfile->all_files = file; /* If this file was found in the directory-of-the-current-file, check whether that directory is reachable via one of the normal search paths. If so, we must record this entry as being reachable that way, otherwise we will mistakenly reprocess this file if it is included later from the normal search path. */ if (file->dir && start_dir->next == pfile->quote_include) { cpp_dir *d; cpp_dir *proper_start_dir = pfile->quote_include; for (d = proper_start_dir;; d = d->next) { if (d == pfile->bracket_include) proper_start_dir = d; if (d == 0) { proper_start_dir = 0; break; } /* file->dir->name will have a trailing slash. */ if (!strncmp (d->name, file->dir->name, file->dir->len - 1)) break; } if (proper_start_dir) start_dir = proper_start_dir; } found: /* Store this new result in the hash table. */ entry = new_file_hash_entry (pfile); entry->next = *hash_slot; entry->start_dir = start_dir; entry->u.file = file; *hash_slot = entry; return file; } /* Read a file into FILE->buffer, returning true on success. If FILE->fd is something weird, like a block device, we don't want to read it at all. Don't even try to figure out what something is, except for plain files and block devices, since there is no reliable portable way of doing this. FIXME: Flush file cache and try again if we run out of memory. */ static bool read_file_guts (cpp_reader *pfile, _cpp_file *file) { ssize_t size, total, count; uchar *buf; bool regular; if (S_ISBLK (file->st.st_mode)) { cpp_error (pfile, CPP_DL_ERROR, "%s is a block device", file->path); return false; } regular = S_ISREG (file->st.st_mode); if (regular) { /* off_t might have a wider range than ssize_t - in other words, the max size of a file might be bigger than the address space. We can't handle a file that large. (Anyone with a single source file bigger than 2GB needs to rethink their coding style.) Some systems (e.g. AIX 4.1) define SSIZE_MAX to be much smaller than the actual range of the type. Use INTTYPE_MAXIMUM unconditionally to ensure this does not bite us. */ if (file->st.st_size > INTTYPE_MAXIMUM (ssize_t)) { cpp_error (pfile, CPP_DL_ERROR, "%s is too large", file->path); return false; } size = file->st.st_size; } else /* 8 kilobytes is a sensible starting size. It ought to be bigger than the kernel pipe buffer, and it's definitely bigger than the majority of C source files. */ size = 8 * 1024; buf = xmalloc (size + 1); total = 0; while ((count = read (file->fd, buf + total, size - total)) > 0) { total += count; if (total == size) { if (regular) break; size *= 2; buf = xrealloc (buf, size + 1); } } if (count < 0) { cpp_errno (pfile, CPP_DL_ERROR, file->path); return false; } if (regular && total != size && STAT_SIZE_RELIABLE (file->st)) cpp_error (pfile, CPP_DL_WARNING, "%s is shorter than expected", file->path); file->buffer = _cpp_convert_input (pfile, CPP_OPTION (pfile, input_charset), buf, size, total, &file->st.st_size); file->buffer_valid = true; return true; } /* Convenience wrapper around read_file_guts that opens the file if necessary and closes the file descriptor after reading. FILE must have been passed through find_file() at some stage. */ static bool read_file (cpp_reader *pfile, _cpp_file *file) { /* If we already have its contents in memory, succeed immediately. */ if (file->buffer_valid) return true; /* If an earlier read failed for some reason don't try again. */ if (file->dont_read || file->err_no) return false; if (file->fd == -1 && !open_file (file)) { open_file_failed (pfile, file); return false; } file->dont_read = !read_file_guts (pfile, file); close (file->fd); file->fd = -1; return !file->dont_read; } /* Returns TRUE if FILE's contents have been successfully placed in FILE->buffer and the file should be stacked, otherwise false. */ static bool should_stack_file (cpp_reader *pfile, _cpp_file *file, bool import) { _cpp_file *f; /* Skip once-only files. */ if (file->once_only) return false; /* We must mark the file once-only if #import now, before header guard checks. Otherwise, undefining the header guard might cause the file to be re-stacked. */ if (import) { _cpp_mark_file_once_only (pfile, file); /* Don't stack files that have been stacked before. */ if (file->stack_count) return false; } /* Skip if the file had a header guard and the macro is defined. PCH relies on this appearing before the PCH handler below. */ if (file->cmacro && file->cmacro->type == NT_MACRO) return false; /* Handle PCH files immediately; don't stack them. */ if (file->pch) { pfile->cb.read_pch (pfile, file->pchname, file->fd, file->path); close (file->fd); file->fd = -1; return false; } if (!read_file (pfile, file)) return false; /* Check the file against the PCH file. This is done before checking against files we've already seen, since it may save on I/O. */ if (check_file_against_entries (pfile, file, import)) { /* If this isn't a #import, but yet we can't include the file, that means that it was #import-ed in the PCH file, so we can never include it again. */ if (! import) _cpp_mark_file_once_only (pfile, file); return false; } /* Now we've read the file's contents, we can stack it if there are no once-only files. */ if (!pfile->seen_once_only) return true; /* We may have read the file under a different name. Look for likely candidates and compare file contents to be sure. */ for (f = pfile->all_files; f; f = f->next_file) { if (f == file) continue; if ((import || f->once_only) && f->err_no == 0 && f->st.st_mtime == file->st.st_mtime && f->st.st_size == file->st.st_size && read_file (pfile, f) /* Size might have changed in read_file(). */ && f->st.st_size == file->st.st_size && !memcmp (f->buffer, file->buffer, f->st.st_size)) break; } return f == NULL; } /* Place the file referenced by FILE into a new buffer on the buffer stack if possible. IMPORT is true if this stacking attempt is because of a #import directive. Returns true if a buffer is stacked. */ bool _cpp_stack_file (cpp_reader *pfile, _cpp_file *file, bool import) { cpp_buffer *buffer; int sysp; if (!should_stack_file (pfile, file, import)) return false; if (pfile->buffer == NULL || file->dir == NULL) sysp = 0; else sysp = MAX (pfile->buffer->sysp, file->dir->sysp); /* Add the file to the dependencies on its first inclusion. */ if (CPP_OPTION (pfile, deps.style) > !!sysp && !file->stack_count) { if (!file->main_file || !CPP_OPTION (pfile, deps.ignore_main_file)) deps_add_dep (pfile->deps, file->path); } /* Clear buffer_valid since _cpp_clean_line messes it up. */ file->buffer_valid = false; file->stack_count++; /* Stack the buffer. */ buffer = cpp_push_buffer (pfile, file->buffer, file->st.st_size, CPP_OPTION (pfile, preprocessed)); buffer->file = file; buffer->sysp = sysp; /* Initialize controlling macro state. */ pfile->mi_valid = true; pfile->mi_cmacro = 0; /* Generate the call back. */ _cpp_do_file_change (pfile, LC_ENTER, file->path, 1, sysp); return true; } /* Mark FILE to be included once only. */ void _cpp_mark_file_once_only (cpp_reader *pfile, _cpp_file *file) { pfile->seen_once_only = true; file->once_only = true; } /* Return the directory from which searching for FNAME should start, considering the directive TYPE and ANGLE_BRACKETS. If there is nothing left in the path, returns NULL. */ static struct cpp_dir * search_path_head (cpp_reader *pfile, const char *fname, int angle_brackets, enum include_type type) { cpp_dir *dir; _cpp_file *file; if (IS_ABSOLUTE_PATH (fname)) return &pfile->no_search_path; /* pfile->buffer is NULL when processing an -include command-line flag. */ file = pfile->buffer == NULL ? pfile->main_file : pfile->buffer->file; /* For #include_next, skip in the search path past the dir in which the current file was found, but if it was found via an absolute path use the normal search logic. */ if (type == IT_INCLUDE_NEXT && file->dir) dir = file->dir->next; else if (angle_brackets) dir = pfile->bracket_include; else if (type == IT_CMDLINE) /* -include and -imacros use the #include "" chain with the preprocessor's cwd prepended. */ return make_cpp_dir (pfile, "./", false); else if (pfile->quote_ignores_source_dir) dir = pfile->quote_include; else return make_cpp_dir (pfile, dir_name_of_file (file), pfile->buffer ? pfile->buffer->sysp : 0); if (dir == NULL) cpp_error (pfile, CPP_DL_ERROR, "no include path in which to search for %s", fname); return dir; } /* Strip the basename from the file's path. It ends with a slash if of nonzero length. Note that this procedure also works for , which is represented by the empty string. */ static const char * dir_name_of_file (_cpp_file *file) { if (!file->dir_name) { size_t len = lbasename (file->path) - file->path; char *dir_name = xmalloc (len + 1); memcpy (dir_name, file->path, len); dir_name[len] = '\0'; file->dir_name = dir_name; } return file->dir_name; } /* Handles #include-family directives (distinguished by TYPE), including HEADER, and the command line -imacros and -include. Returns true if a buffer was stacked. */ bool _cpp_stack_include (cpp_reader *pfile, const char *fname, int angle_brackets, enum include_type type) { struct cpp_dir *dir; _cpp_file *file; dir = search_path_head (pfile, fname, angle_brackets, type); if (!dir) return false; file = _cpp_find_file (pfile, fname, dir, false); /* Compensate for the increment in linemap_add. In the case of a normal #include, we're currently at the start of the line *following* the #include. A separate source_location for this location makes no sense (until we do the LC_LEAVE), and complicates LAST_SOURCE_LINE_LOCATION. This does not apply if we found a PCH file (in which case linemap_add is not called) or we were included from the command-line. */ if (! file->pch && file->err_no == 0 && type != IT_CMDLINE) pfile->line_table->highest_location--; return _cpp_stack_file (pfile, file, type == IT_IMPORT); } /* Could not open FILE. The complication is dependency output. */ static void open_file_failed (cpp_reader *pfile, _cpp_file *file) { int sysp = pfile->line_table->highest_line > 1 && pfile->buffer ? pfile->buffer->sysp : 0; bool print_dep = CPP_OPTION (pfile, deps.style) > !!sysp; errno = file->err_no; if (print_dep && CPP_OPTION (pfile, deps.missing_files) && errno == ENOENT) deps_add_dep (pfile->deps, file->name); else { /* If we are outputting dependencies but not for this file then don't error because we can still produce correct output. */ if (CPP_OPTION (pfile, deps.style) && ! print_dep) cpp_errno (pfile, CPP_DL_WARNING, file->path); else cpp_errno (pfile, CPP_DL_ERROR, file->path); } } /* Search in the chain beginning at HEAD for a file whose search path started at START_DIR != NULL. */ static struct file_hash_entry * search_cache (struct file_hash_entry *head, const cpp_dir *start_dir) { struct file_hash_entry *p; /* Look for a file that was found from a search starting at the given location. */ for (p = head; p; p = p->next) if (p->start_dir == start_dir) return p; return 0; } /* Allocate a new _cpp_file structure. */ static _cpp_file * make_cpp_file (cpp_reader *pfile, cpp_dir *dir, const char *fname) { _cpp_file *file; file = xcalloc (1, sizeof (_cpp_file)); file->main_file = !pfile->buffer; file->fd = -1; file->dir = dir; file->name = xstrdup (fname); return file; } /* A hash of directory names. The directory names are the path names of files which contain a #include "", the included file name is appended to this directories. To avoid duplicate entries we follow the convention that all non-empty directory names should end in a '/'. DIR_NAME must be stored in permanently allocated memory. */ static cpp_dir * make_cpp_dir (cpp_reader *pfile, const char *dir_name, int sysp) { struct file_hash_entry *entry, **hash_slot; cpp_dir *dir; hash_slot = (struct file_hash_entry **) htab_find_slot_with_hash (pfile->file_hash, dir_name, htab_hash_string (dir_name), INSERT); /* Have we already hashed this directory? */ for (entry = *hash_slot; entry; entry = entry->next) if (entry->start_dir == NULL) return entry->u.dir; dir = xcalloc (1, sizeof (cpp_dir)); dir->next = pfile->quote_include; dir->name = (char *) dir_name; dir->len = strlen (dir_name); dir->sysp = sysp; dir->construct = 0; /* Store this new result in the hash table. */ entry = new_file_hash_entry (pfile); entry->next = *hash_slot; entry->start_dir = NULL; entry->u.dir = dir; *hash_slot = entry; return dir; } /* Create a new block of memory for file hash entries. */ static void allocate_file_hash_entries (cpp_reader *pfile) { pfile->file_hash_entries_used = 0; pfile->file_hash_entries_allocated = 127; pfile->file_hash_entries = xmalloc (pfile->file_hash_entries_allocated * sizeof (struct file_hash_entry)); } /* Return a new file hash entry. */ static struct file_hash_entry * new_file_hash_entry (cpp_reader *pfile) { if (pfile->file_hash_entries_used == pfile->file_hash_entries_allocated) allocate_file_hash_entries (pfile); return &pfile->file_hash_entries[pfile->file_hash_entries_used++]; } /* Returns TRUE if a file FNAME has ever been successfully opened. This routine is not intended to correctly handle filenames aliased by links or redundant . or .. traversals etc. */ bool cpp_included (cpp_reader *pfile, const char *fname) { struct file_hash_entry *entry; entry = htab_find_with_hash (pfile->file_hash, fname, htab_hash_string (fname)); while (entry && (entry->start_dir == NULL || entry->u.file->err_no)) entry = entry->next; return entry != NULL; } /* Calculate the hash value of a file hash entry P. */ static hashval_t file_hash_hash (const void *p) { struct file_hash_entry *entry = (struct file_hash_entry *) p; const char *hname; if (entry->start_dir) hname = entry->u.file->name; else hname = entry->u.dir->name; return htab_hash_string (hname); } /* Compare a string Q against a file hash entry P. */ static int file_hash_eq (const void *p, const void *q) { struct file_hash_entry *entry = (struct file_hash_entry *) p; const char *fname = (const char *) q; const char *hname; if (entry->start_dir) hname = entry->u.file->name; else hname = entry->u.dir->name; return strcmp (hname, fname) == 0; } /* Initialize everything in this source file. */ void _cpp_init_files (cpp_reader *pfile) { pfile->file_hash = htab_create_alloc (127, file_hash_hash, file_hash_eq, NULL, xcalloc, free); allocate_file_hash_entries (pfile); } /* Finalize everything in this source file. */ void _cpp_cleanup_files (cpp_reader *pfile) { htab_delete (pfile->file_hash); } /* Enter a file name in the hash for the sake of cpp_included. */ void _cpp_fake_include (cpp_reader *pfile, const char *fname) { _cpp_find_file (pfile, fname, pfile->buffer->file->dir, true); } /* Not everyone who wants to set system-header-ness on a buffer can see the details of a buffer. This is an exported interface because fix-header needs it. */ void cpp_make_system_header (cpp_reader *pfile, int syshdr, int externc) { int flags = 0; const struct line_maps *line_table = pfile->line_table; const struct line_map *map = &line_table->maps[line_table->used-1]; /* 1 = system header, 2 = system header to be treated as C. */ if (syshdr) flags = 1 + (externc != 0); pfile->buffer->sysp = flags; _cpp_do_file_change (pfile, LC_RENAME, map->to_file, SOURCE_LINE (map, pfile->line_table->highest_line), flags); } /* Allow the client to change the current file. Used by the front end to achieve pseudo-file names like . If REASON is LC_LEAVE, then NEW_NAME must be NULL. */ void cpp_change_file (cpp_reader *pfile, enum lc_reason reason, const char *new_name) { _cpp_do_file_change (pfile, reason, new_name, 1, 0); } /* Callback function for htab_traverse. */ static int report_missing_guard (void **slot, void *b) { struct file_hash_entry *entry = (struct file_hash_entry *) *slot; int *bannerp = (int *) b; /* Skip directories. */ if (entry->start_dir != NULL) { _cpp_file *file = entry->u.file; /* We don't want MI guard advice for the main file. */ if (file->cmacro == NULL && file->stack_count == 1 && !file->main_file) { if (*bannerp == 0) { fputs (_("Multiple include guards may be useful for:\n"), stderr); *bannerp = 1; } fputs (entry->u.file->path, stderr); putc ('\n', stderr); } } return 0; } /* Report on all files that might benefit from a multiple include guard. Triggered by -H. */ void _cpp_report_missing_guards (cpp_reader *pfile) { int banner = 0; htab_traverse (pfile->file_hash, report_missing_guard, &banner); } /* Locate HEADER, and determine whether it is newer than the current file. If it cannot be located or dated, return -1, if it is newer, return 1, otherwise 0. */ int _cpp_compare_file_date (cpp_reader *pfile, const char *fname, int angle_brackets) { _cpp_file *file; struct cpp_dir *dir; dir = search_path_head (pfile, fname, angle_brackets, IT_INCLUDE); if (!dir) return -1; file = _cpp_find_file (pfile, fname, dir, false); if (file->err_no) return -1; if (file->fd != -1) { close (file->fd); file->fd = -1; } return file->st.st_mtime > pfile->buffer->file->st.st_mtime; } /* Pushes the given file onto the buffer stack. Returns nonzero if successful. */ bool cpp_push_include (cpp_reader *pfile, const char *fname) { return _cpp_stack_include (pfile, fname, false, IT_CMDLINE); } /* Do appropriate cleanup when a file INC's buffer is popped off the input stack. */ void _cpp_pop_file_buffer (cpp_reader *pfile, _cpp_file *file) { /* Record the inclusion-preventing macro, which could be NULL meaning no controlling macro. */ if (pfile->mi_valid && file->cmacro == NULL) file->cmacro = pfile->mi_cmacro; /* Invalidate control macros in the #including file. */ pfile->mi_valid = false; if (file->buffer) { free ((void *) file->buffer); file->buffer = NULL; } } /* Set the include chain for "" to QUOTE, for <> to BRACKET. If QUOTE_IGNORES_SOURCE_DIR, then "" includes do not look in the directory of the including file. If BRACKET does not lie in the QUOTE chain, it is set to QUOTE. */ void cpp_set_include_chains (cpp_reader *pfile, cpp_dir *quote, cpp_dir *bracket, int quote_ignores_source_dir) { pfile->quote_include = quote; pfile->bracket_include = quote; pfile->quote_ignores_source_dir = quote_ignores_source_dir; for (; quote; quote = quote->next) { quote->name_map = NULL; quote->len = strlen (quote->name); if (quote == bracket) pfile->bracket_include = bracket; } } /* Append the file name to the directory to create the path, but don't turn / into // or // into ///; // may be a namespace escape. */ static char * append_file_to_dir (const char *fname, cpp_dir *dir) { size_t dlen, flen; char *path; dlen = dir->len; flen = strlen (fname); path = xmalloc (dlen + 1 + flen + 1); memcpy (path, dir->name, dlen); if (dlen && path[dlen - 1] != '/') path[dlen++] = '/'; memcpy (&path[dlen], fname, flen + 1); return path; } /* Read a space delimited string of unlimited length from a stdio file F. */ static char * read_filename_string (int ch, FILE *f) { char *alloc, *set; int len; len = 20; set = alloc = xmalloc (len + 1); if (! is_space (ch)) { *set++ = ch; while ((ch = getc (f)) != EOF && ! is_space (ch)) { if (set - alloc == len) { len *= 2; alloc = xrealloc (alloc, len + 1); set = alloc + len / 2; } *set++ = ch; } } *set = '\0'; ungetc (ch, f); return alloc; } /* Read the file name map file for DIR. */ static void read_name_map (cpp_dir *dir) { static const char FILE_NAME_MAP_FILE[] = "header.gcc"; char *name; FILE *f; size_t len, count = 0, room = 9; len = dir->len; name = alloca (len + sizeof (FILE_NAME_MAP_FILE) + 1); memcpy (name, dir->name, len); if (len && name[len - 1] != '/') name[len++] = '/'; strcpy (name + len, FILE_NAME_MAP_FILE); f = fopen (name, "r"); dir->name_map = xmalloc (room * sizeof (char *)); /* Silently return NULL if we cannot open. */ if (f) { int ch; while ((ch = getc (f)) != EOF) { char *to; if (is_space (ch)) continue; if (count + 2 > room) { room += 8; dir->name_map = xrealloc (dir->name_map, room * sizeof (char *)); } dir->name_map[count] = read_filename_string (ch, f); while ((ch = getc (f)) != EOF && is_hspace (ch)) ; to = read_filename_string (ch, f); if (IS_ABSOLUTE_PATH (to)) dir->name_map[count + 1] = to; else { dir->name_map[count + 1] = append_file_to_dir (to, dir); free (to); } count += 2; while ((ch = getc (f)) != '\n') if (ch == EOF) break; } fclose (f); } /* Terminate the list of maps. */ dir->name_map[count] = NULL; } /* Remap a FILE's name based on the file_name_map, if any, for FILE->dir. If the file name has any directory separators, recursively check those directories too. */ static char * remap_filename (cpp_reader *pfile, _cpp_file *file) { const char *fname, *p; char *new_dir; cpp_dir *dir; size_t index, len; dir = file->dir; fname = file->name; for (;;) { if (!dir->name_map) read_name_map (dir); for (index = 0; dir->name_map[index]; index += 2) if (!strcmp (dir->name_map[index], fname)) return xstrdup (dir->name_map[index + 1]); p = strchr (fname, '/'); if (!p || p == fname) return NULL; len = dir->len + (p - fname + 1); new_dir = xmalloc (len + 1); memcpy (new_dir, dir->name, dir->len); memcpy (new_dir + dir->len, fname, p - fname + 1); new_dir[len] = '\0'; dir = make_cpp_dir (pfile, new_dir, dir->sysp); fname = p + 1; } } /* Returns true if PCHNAME is a valid PCH file for FILE. */ static bool validate_pch (cpp_reader *pfile, _cpp_file *file, const char *pchname) { const char *saved_path = file->path; bool valid = false; file->path = pchname; if (open_file (file)) { valid = 1 & pfile->cb.valid_pch (pfile, pchname, file->fd); if (!valid) { close (file->fd); file->fd = -1; } if (CPP_OPTION (pfile, print_include_names)) { unsigned int i; for (i = 1; i < pfile->line_table->depth; i++) putc ('.', stderr); fprintf (stderr, "%c %s\n", valid ? '!' : 'x', pchname); } } file->path = saved_path; return valid; } /* Get the path associated with the _cpp_file F. The path includes the base name from the include directive and the directory it was found in via the search path. */ const char * cpp_get_path (struct _cpp_file *f) { return f->path; } /* Get the directory associated with the _cpp_file F. */ cpp_dir * cpp_get_dir (struct _cpp_file *f) { return f->dir; } /* Get the cpp_buffer currently associated with the cpp_reader PFILE. */ cpp_buffer * cpp_get_buffer (cpp_reader *pfile) { return pfile->buffer; } /* Get the _cpp_file associated with the cpp_buffer B. */ _cpp_file * cpp_get_file (cpp_buffer *b) { return b->file; } /* Get the previous cpp_buffer given a cpp_buffer B. The previous buffer is the buffer that included the given buffer. */ cpp_buffer * cpp_get_prev (cpp_buffer *b) { return b->prev; } /* This data structure holds the list of header files that were seen while the PCH was being built. The 'entries' field is kept sorted in memcmp() order; yes, this means that on little-endian systems, it's sorted initially by the least-significant byte of 'size', but that's OK. The code does rely on having entries with the same size next to each other. */ struct pchf_data { /* Number of pchf_entry structures. */ size_t count; /* Are there any values with once_only set? This is used as an optimisation, it means we don't have to search the structure if we're processing a regular #include. */ bool have_once_only; struct pchf_entry { /* The size of this file. This is used to save running a MD5 checksum if the sizes don't match. */ off_t size; /* The MD5 checksum of this file. */ unsigned char sum[16]; /* Is this file to be included only once? */ bool once_only; } entries[1]; }; static struct pchf_data *pchf; /* Data for pchf_addr. */ struct pchf_adder_info { cpp_reader *pfile; struct pchf_data *d; }; /* A hash traversal function to add entries into DATA->D. */ static int pchf_adder (void **slot, void *data) { struct file_hash_entry *h = (struct file_hash_entry *) *slot; struct pchf_adder_info *i = (struct pchf_adder_info *) data; if (h->start_dir != NULL && h->u.file->stack_count != 0) { struct pchf_data *d = i->d; _cpp_file *f = h->u.file; size_t count = d->count++; /* This should probably never happen, since if a read error occurred the PCH file shouldn't be written... */ if (f->dont_read || f->err_no) return 1; d->entries[count].once_only = f->once_only; /* |= is avoided in the next line because of an HP C compiler bug */ d->have_once_only = d->have_once_only | f->once_only; if (f->buffer_valid) md5_buffer ((const char *)f->buffer, f->st.st_size, d->entries[count].sum); else { FILE *ff; int oldfd = f->fd; if (!open_file (f)) { open_file_failed (i->pfile, f); return 0; } ff = fdopen (f->fd, "rb"); md5_stream (ff, d->entries[count].sum); fclose (ff); f->fd = oldfd; } d->entries[count].size = f->st.st_size; } return 1; } /* A qsort ordering function for pchf_entry structures. */ static int pchf_save_compare (const void *e1, const void *e2) { return memcmp (e1, e2, sizeof (struct pchf_entry)); } /* Create and write to F a pchf_data structure. */ bool _cpp_save_file_entries (cpp_reader *pfile, FILE *f) { size_t count = 0; struct pchf_data *result; size_t result_size; struct pchf_adder_info pai; count = htab_elements (pfile->file_hash); result_size = (sizeof (struct pchf_data) + sizeof (struct pchf_entry) * (count - 1)); result = xcalloc (result_size, 1); result->count = 0; result->have_once_only = false; pai.pfile = pfile; pai.d = result; htab_traverse (pfile->file_hash, pchf_adder, &pai); result_size = (sizeof (struct pchf_data) + sizeof (struct pchf_entry) * (result->count - 1)); qsort (result->entries, result->count, sizeof (struct pchf_entry), pchf_save_compare); return fwrite (result, result_size, 1, f) == 1; } /* Read the pchf_data structure from F. */ bool _cpp_read_file_entries (cpp_reader *pfile ATTRIBUTE_UNUSED, FILE *f) { struct pchf_data d; if (fread (&d, sizeof (struct pchf_data) - sizeof (struct pchf_entry), 1, f) != 1) return false; pchf = xmalloc (sizeof (struct pchf_data) + sizeof (struct pchf_entry) * (d.count - 1)); memcpy (pchf, &d, sizeof (struct pchf_data) - sizeof (struct pchf_entry)); if (fread (pchf->entries, sizeof (struct pchf_entry), d.count, f) != d.count) return false; return true; } /* The parameters for pchf_compare. */ struct pchf_compare_data { /* The size of the file we're looking for. */ off_t size; /* The MD5 checksum of the file, if it's been computed. */ unsigned char sum[16]; /* Is SUM valid? */ bool sum_computed; /* Do we need to worry about entries that don't have ONCE_ONLY set? */ bool check_included; /* The file that we're searching for. */ _cpp_file *f; }; /* bsearch comparison function; look for D_P in E_P. */ static int pchf_compare (const void *d_p, const void *e_p) { const struct pchf_entry *e = (const struct pchf_entry *)e_p; struct pchf_compare_data *d = (struct pchf_compare_data *)d_p; int result; result = memcmp (&d->size, &e->size, sizeof (off_t)); if (result != 0) return result; if (! d->sum_computed) { _cpp_file *const f = d->f; md5_buffer ((const char *)f->buffer, f->st.st_size, d->sum); d->sum_computed = true; } result = memcmp (d->sum, e->sum, 16); if (result != 0) return result; if (d->check_included || e->once_only) return 0; else return 1; } /* Check that F is not in a list read from a PCH file (if any). Assumes that f->buffer_valid is true. Return TRUE if the file should not be read. */ static bool check_file_against_entries (cpp_reader *pfile ATTRIBUTE_UNUSED, _cpp_file *f, bool check_included) { struct pchf_compare_data d; if (pchf == NULL || (! check_included && ! pchf->have_once_only)) return false; d.size = f->st.st_size; d.sum_computed = false; d.f = f; d.check_included = check_included; return bsearch (&d, pchf->entries, pchf->count, sizeof (struct pchf_entry), pchf_compare) != NULL; } /* Hash tables for the CPP library. Copyright (C) 1986, 1987, 1989, 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, 2002 Free Software Foundation, Inc. Written by Per Bothner, 1994. Based on CCCP program by Paul Rubin, June 1986 Adapted to ANSI C, Richard Stallman, Jan 1987 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. In other words, you are welcome to use, share and improve this program. You are forbidden to forbid anyone else to use, share and improve what you give them. Help stamp out software-hoarding! */ static cpp_hashnode *alloc_ident_node (hash_table *); /* Return an identifier node for hashtable.c. Used by cpplib except when integrated with the C front ends. */ static cpp_hashnode * alloc_ident_node (hash_table *table) { cpp_hashnode *node; node = obstack_alloc (&table->pfile->hash_ob, sizeof (cpp_hashnode)); memset (node, 0, sizeof (cpp_hashnode)); return node; } /* Set up the identifier hash table. Use TABLE if non-null, otherwise create our own. */ void _cpp_init_hashtable (cpp_reader *pfile, hash_table *table) { struct spec_nodes *s; if (table == NULL) { pfile->our_hashtable = 1; table = ht_create (13); /* 8K (=2^13) entries. */ table->alloc_node = (hashnode (*) (hash_table *)) alloc_ident_node; _obstack_begin (&pfile->hash_ob, 0, 0, (void *(*) (long)) xmalloc, (void (*) (void *)) free); } table->pfile = pfile; pfile->hash_table = table; /* Now we can initialize things that use the hash table. */ _cpp_init_directives (pfile); _cpp_init_internal_pragmas (pfile); s = &pfile->spec_nodes; s->n_defined = cpp_lookup (pfile, DSC("defined")); s->n_true = cpp_lookup (pfile, DSC("true")); s->n_false = cpp_lookup (pfile, DSC("false")); s->n__VA_ARGS__ = cpp_lookup (pfile, DSC("__VA_ARGS__")); s->n__VA_ARGS__->flags |= NODE_DIAGNOSTIC; } /* Tear down the identifier hash table. */ void _cpp_destroy_hashtable (cpp_reader *pfile) { if (pfile->our_hashtable) { ht_destroy (pfile->hash_table); obstack_free (&pfile->hash_ob, 0); } } /* Returns the hash entry for the STR of length LEN, creating one if necessary. */ cpp_hashnode * cpp_lookup (cpp_reader *pfile, const unsigned char *str, unsigned int len) { /* ht_lookup cannot return NULL. */ return CPP_HASHNODE (ht_lookup (pfile->hash_table, str, len, HT_ALLOC)); } /* Determine whether the str STR, of length LEN, is a defined macro. */ int cpp_defined (cpp_reader *pfile, const unsigned char *str, int len) { cpp_hashnode *node; node = CPP_HASHNODE (ht_lookup (pfile->hash_table, str, len, HT_NO_INSERT)); /* If it's of type NT_MACRO, it cannot be poisoned. */ return node && node->type == NT_MACRO; } /* For all nodes in the hashtable, callback CB with parameters PFILE, the node, and V. */ void cpp_forall_identifiers (cpp_reader *pfile, cpp_cb cb, void *v) { /* We don't need a proxy since the hash table's identifier comes first in cpp_hashnode. */ ht_forall (pfile->hash_table, (ht_cb) cb, v); } /* CPP Library. Copyright (C) 1986, 1987, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Per Bothner, 1994-95. Based on CCCP program by Paul Rubin, June 1986 Adapted to ANSI C, Richard Stallman, Jan 1987 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifdef ONE_COMPILATION_UNIT #define LOCALEDIR "/scratch2/smcc-extras/build/gcc-cvs/install/share/locale" #endif static void init_library (void); static void mark_named_operators (cpp_reader *); static void read_original_filename (cpp_reader *); static void read_original_directory (cpp_reader *); static void post_options (cpp_reader *); /* If we have designated initializers (GCC >2.7) these tables can be initialized, constant data. Otherwise, they have to be filled in at runtime. */ #if HAVE_DESIGNATED_INITIALIZERS #define init_trigraph_map() /* Nothing. */ #define TRIGRAPH_MAP \ __extension__ const uchar _cpp_trigraph_map[UCHAR_MAX + 1] = { #define END }; #define s(p, v) [p] = v, #else #define TRIGRAPH_MAP uchar _cpp_trigraph_map[UCHAR_MAX + 1] = { 0 }; \ static void init_trigraph_map (void) { \ unsigned char *x = _cpp_trigraph_map; #define END } #define s(p, v) x[p] = v; #endif TRIGRAPH_MAP s('=', '#') s(')', ']') s('!', '|') s('(', '[') s('\'', '^') s('>', '}') s('/', '\\') s('<', '{') s('-', '~') END #undef s #undef END #undef TRIGRAPH_MAP /* A set of booleans indicating what CPP features each source language requires. */ struct lang_flags { char c99; char cplusplus; char extended_numbers; char std; char cplusplus_comments; char digraphs; }; static const struct lang_flags lang_defaults[] = { /* c99 c++ xnum std // digr */ /* GNUC89 */ { 0, 0, 1, 0, 1, 1 }, /* GNUC99 */ { 1, 0, 1, 0, 1, 1 }, /* STDC89 */ { 0, 0, 0, 1, 0, 0 }, /* STDC94 */ { 0, 0, 0, 1, 0, 1 }, /* STDC99 */ { 1, 0, 1, 1, 1, 1 }, /* GNUCXX */ { 0, 1, 1, 0, 1, 1 }, /* CXX98 */ { 0, 1, 1, 1, 1, 1 }, /* ASM */ { 0, 0, 1, 0, 1, 0 } }; /* Sets internal flags correctly for a given language. */ void cpp_set_lang (cpp_reader *pfile, enum c_lang lang) { const struct lang_flags *l = &lang_defaults[(int) lang]; CPP_OPTION (pfile, lang) = lang; CPP_OPTION (pfile, c99) = l->c99; CPP_OPTION (pfile, cplusplus) = l->cplusplus; CPP_OPTION (pfile, extended_numbers) = l->extended_numbers; CPP_OPTION (pfile, std) = l->std; CPP_OPTION (pfile, trigraphs) = l->std; CPP_OPTION (pfile, cplusplus_comments) = l->cplusplus_comments; CPP_OPTION (pfile, digraphs) = l->digraphs; } /* Initialize library global state. */ static void init_library (void) { static int initialized = 0; if (! initialized) { initialized = 1; /* Set up the trigraph map. This doesn't need to do anything if we were compiled with a compiler that supports C99 designated initializers. */ init_trigraph_map (); #ifdef ENABLE_NLS (void) bindtextdomain ("gcc", LOCALEDIR); #endif } } /* Initialize a cpp_reader structure. */ cpp_reader * cpp_create_reader (enum c_lang lang, hash_table *table, struct line_maps *line_table) { cpp_reader *pfile; /* Initialize this instance of the library if it hasn't been already. */ init_library (); pfile = xcalloc (1, sizeof (cpp_reader)); cpp_set_lang (pfile, lang); CPP_OPTION (pfile, warn_multichar) = 1; CPP_OPTION (pfile, discard_comments) = 1; CPP_OPTION (pfile, discard_comments_in_macro_exp) = 1; CPP_OPTION (pfile, show_column) = 1; CPP_OPTION (pfile, tabstop) = 8; CPP_OPTION (pfile, operator_names) = 1; CPP_OPTION (pfile, warn_trigraphs) = 2; CPP_OPTION (pfile, warn_endif_labels) = 1; CPP_OPTION (pfile, warn_deprecated) = 1; CPP_OPTION (pfile, warn_long_long) = !CPP_OPTION (pfile, c99); CPP_OPTION (pfile, dollars_in_ident) = 1; CPP_OPTION (pfile, warn_dollars) = 1; CPP_OPTION (pfile, warn_variadic_macros) = 1; /* Default CPP arithmetic to something sensible for the host for the benefit of dumb users like fix-header. */ CPP_OPTION (pfile, precision) = CHAR_BIT * sizeof (long); CPP_OPTION (pfile, char_precision) = CHAR_BIT; CPP_OPTION (pfile, wchar_precision) = CHAR_BIT * sizeof (int); CPP_OPTION (pfile, int_precision) = CHAR_BIT * sizeof (int); CPP_OPTION (pfile, unsigned_char) = 0; CPP_OPTION (pfile, unsigned_wchar) = 1; CPP_OPTION (pfile, bytes_big_endian) = 1; /* does not matter */ /* Default to no charset conversion. */ CPP_OPTION (pfile, narrow_charset) = _cpp_default_encoding (); CPP_OPTION (pfile, wide_charset) = 0; /* Default the input character set to UTF-8. */ CPP_OPTION (pfile, input_charset) = _cpp_default_encoding (); /* A fake empty "directory" used as the starting point for files looked up without a search path. Name cannot be '/' because we don't want to prepend anything at all to filenames using it. All other entries are correct zero-initialized. */ pfile->no_search_path.name = (char *) ""; /* Initialize the line map. */ pfile->line_table = line_table; /* Initialize lexer state. */ pfile->state.save_comments = ! CPP_OPTION (pfile, discard_comments); /* Set up static tokens. */ pfile->avoid_paste.type = CPP_PADDING; pfile->avoid_paste.val.source = NULL; pfile->eof.type = CPP_EOF; pfile->eof.flags = 0; /* Create a token buffer for the lexer. */ _cpp_init_tokenrun (&pfile->base_run, 250); pfile->cur_run = &pfile->base_run; pfile->cur_token = pfile->base_run.base; /* Initialize the base context. */ pfile->context = &pfile->base_context; pfile->base_context.macro = 0; pfile->base_context.prev = pfile->base_context.next = 0; /* Aligned and unaligned storage. */ pfile->a_buff = _cpp_get_buff (pfile, 0); pfile->u_buff = _cpp_get_buff (pfile, 0); /* The expression parser stack. */ _cpp_expand_op_stack (pfile); /* Initialize the buffer obstack. */ _obstack_begin (&pfile->buffer_ob, 0, 0, (void *(*) (long)) xmalloc, (void (*) (void *)) free); _cpp_init_files (pfile); _cpp_init_hashtable (pfile, table); return pfile; } /* Free resources used by PFILE. Accessing PFILE after this function returns leads to undefined behavior. Returns the error count. */ void cpp_destroy (cpp_reader *pfile) { cpp_context *context, *contextn; tokenrun *run, *runn; free (pfile->op_stack); while (CPP_BUFFER (pfile) != NULL) _cpp_pop_buffer (pfile); if (pfile->out.base) free (pfile->out.base); if (pfile->macro_buffer) { free (pfile->macro_buffer); pfile->macro_buffer = NULL; pfile->macro_buffer_len = 0; } if (pfile->deps) deps_free (pfile->deps); obstack_free (&pfile->buffer_ob, 0); _cpp_destroy_hashtable (pfile); _cpp_cleanup_files (pfile); _cpp_destroy_iconv (pfile); _cpp_free_buff (pfile->a_buff); _cpp_free_buff (pfile->u_buff); _cpp_free_buff (pfile->free_buffs); for (run = &pfile->base_run; run; run = runn) { runn = run->next; free (run->base); if (run != &pfile->base_run) free (run); } for (context = pfile->base_context.next; context; context = contextn) { contextn = context->next; free (context); } free (pfile); } /* This structure defines one built-in identifier. A node will be entered in the hash table under the name NAME, with value VALUE. There are two tables of these. builtin_array holds all the "builtin" macros: these are handled by builtin_macro() in cppmacro.c. Builtin is somewhat of a misnomer -- the property of interest is that these macros require special code to compute their expansions. The value is a "builtin_type" enumerator. operator_array holds the C++ named operators. These are keywords which act as aliases for punctuators. In C++, they cannot be altered through #define, and #if recognizes them as operators. In C, these are not entered into the hash table at all (but see ). The value is a token-type enumerator. */ struct builtin { const uchar *name; unsigned short len; unsigned short value; }; #define B(n, t) { DSC(n), t } static const struct builtin builtin_array[] = { B("__TIME__", BT_TIME), B("__DATE__", BT_DATE), B("__FILE__", BT_FILE), B("__BASE_FILE__", BT_BASE_FILE), B("__LINE__", BT_SPECLINE), B("__INCLUDE_LEVEL__", BT_INCLUDE_LEVEL), /* Keep builtins not used for -traditional-cpp at the end, and update init_builtins() if any more are added. */ B("_Pragma", BT_PRAGMA), B("__STDC__", BT_STDC), }; static const struct builtin operator_array[] = { B("and", CPP_AND_AND), B("and_eq", CPP_AND_EQ), B("bitand", CPP_AND), B("bitor", CPP_OR), B("compl", CPP_COMPL), B("not", CPP_NOT), B("not_eq", CPP_NOT_EQ), B("or", CPP_OR_OR), B("or_eq", CPP_OR_EQ), B("xor", CPP_XOR), B("xor_eq", CPP_XOR_EQ) }; #undef B /* Mark the C++ named operators in the hash table. */ static void mark_named_operators (cpp_reader *pfile) { const struct builtin *b; for (b = operator_array; b < (operator_array + ARRAY_SIZE (operator_array)); b++) { cpp_hashnode *hp = cpp_lookup (pfile, b->name, b->len); hp->flags |= NODE_OPERATOR; hp->is_directive = 0; hp->directive_index = b->value; } } /* Read the builtins table above and enter them, and language-specific macros, into the hash table. HOSTED is true if this is a hosted environment. */ void cpp_init_builtins (cpp_reader *pfile, int hosted) { const struct builtin *b; size_t n = ARRAY_SIZE (builtin_array); if (CPP_OPTION (pfile, traditional)) n -= 2; for(b = builtin_array; b < builtin_array + n; b++) { cpp_hashnode *hp = cpp_lookup (pfile, b->name, b->len); hp->type = NT_MACRO; hp->flags |= NODE_BUILTIN | NODE_WARN; hp->value.builtin = b->value; } if (CPP_OPTION (pfile, cplusplus)) _cpp_define_builtin (pfile, "__cplusplus 1"); else if (CPP_OPTION (pfile, lang) == CLK_ASM) _cpp_define_builtin (pfile, "__ASSEMBLER__ 1"); else if (CPP_OPTION (pfile, lang) == CLK_STDC94) _cpp_define_builtin (pfile, "__STDC_VERSION__ 199409L"); else if (CPP_OPTION (pfile, c99)) _cpp_define_builtin (pfile, "__STDC_VERSION__ 199901L"); if (hosted) _cpp_define_builtin (pfile, "__STDC_HOSTED__ 1"); else _cpp_define_builtin (pfile, "__STDC_HOSTED__ 0"); if (CPP_OPTION (pfile, objc)) _cpp_define_builtin (pfile, "__OBJC__ 1"); } /* Sanity-checks are dependent on command-line options, so it is called as a subroutine of cpp_read_main_file (). */ #if ENABLE_CHECKING static void sanity_checks (cpp_reader *); static void sanity_checks (cpp_reader *pfile) { cppchar_t test = 0; size_t max_precision = 2 * CHAR_BIT * sizeof (cpp_num_part); /* Sanity checks for assumptions about CPP arithmetic and target type precisions made by cpplib. */ test--; if (test < 1) cpp_error (pfile, CPP_DL_ICE, "cppchar_t must be an unsigned type"); if (CPP_OPTION (pfile, precision) > max_precision) cpp_error (pfile, CPP_DL_ICE, "preprocessor arithmetic has maximum precision of %lu bits;" " target requires %lu bits", (unsigned long) max_precision, (unsigned long) CPP_OPTION (pfile, precision)); if (CPP_OPTION (pfile, precision) < CPP_OPTION (pfile, int_precision)) cpp_error (pfile, CPP_DL_ICE, "CPP arithmetic must be at least as precise as a target int"); if (CPP_OPTION (pfile, char_precision) < 8) cpp_error (pfile, CPP_DL_ICE, "target char is less than 8 bits wide"); if (CPP_OPTION (pfile, wchar_precision) < CPP_OPTION (pfile, char_precision)) cpp_error (pfile, CPP_DL_ICE, "target wchar_t is narrower than target char"); if (CPP_OPTION (pfile, int_precision) < CPP_OPTION (pfile, char_precision)) cpp_error (pfile, CPP_DL_ICE, "target int is narrower than target char"); /* This is assumed in eval_token() and could be fixed if necessary. */ if (sizeof (cppchar_t) > sizeof (cpp_num_part)) cpp_error (pfile, CPP_DL_ICE, "CPP half-integer narrower than CPP character"); if (CPP_OPTION (pfile, wchar_precision) > BITS_PER_CPPCHAR_T) cpp_error (pfile, CPP_DL_ICE, "CPP on this host cannot handle wide character constants over" " %lu bits, but the target requires %lu bits", (unsigned long) BITS_PER_CPPCHAR_T, (unsigned long) CPP_OPTION (pfile, wchar_precision)); } #else # define sanity_checks(PFILE) #endif /* This is called after options have been parsed, and partially processed. */ void cpp_post_options (cpp_reader *pfile) { sanity_checks (pfile); post_options (pfile); /* Mark named operators before handling command line macros. */ if (CPP_OPTION (pfile, cplusplus) && CPP_OPTION (pfile, operator_names)) mark_named_operators (pfile); } /* Setup for processing input from the file named FNAME, or stdin if it is the empty string. Return the original filename on success (e.g. foo.i->foo.c), or NULL on failure. */ const char * cpp_read_main_file (cpp_reader *pfile, const char *fname) { if (CPP_OPTION (pfile, deps.style) != DEPS_NONE) { if (!pfile->deps) pfile->deps = deps_init (); /* Set the default target (if there is none already). */ deps_add_default_target (pfile->deps, fname); } pfile->main_file = _cpp_find_file (pfile, fname, &pfile->no_search_path, false); if (_cpp_find_failed (pfile->main_file)) return NULL; _cpp_stack_file (pfile, pfile->main_file, false); /* For foo.i, read the original filename foo.c now, for the benefit of the front ends. */ if (CPP_OPTION (pfile, preprocessed)) { read_original_filename (pfile); fname = pfile->line_table->maps[pfile->line_table->used-1].to_file; } return fname; } /* For preprocessed files, if the first tokens are of the form # NUM. handle the directive so we know the original file name. This will generate file_change callbacks, which the front ends must handle appropriately given their state of initialization. */ static void read_original_filename (cpp_reader *pfile) { const cpp_token *token, *token1; /* Lex ahead; if the first tokens are of the form # NUM, then process the directive, otherwise back up. */ token = _cpp_lex_direct (pfile); if (token->type == CPP_HASH) { token1 = _cpp_lex_direct (pfile); _cpp_backup_tokens (pfile, 1); /* If it's a #line directive, handle it. */ if (token1->type == CPP_NUMBER) { _cpp_handle_directive (pfile, token->flags & PREV_WHITE); read_original_directory (pfile); return; } } /* Backup as if nothing happened. */ _cpp_backup_tokens (pfile, 1); } /* For preprocessed files, if the tokens following the first filename line is of the form # "/path/name//", handle the directive so we know the original current directory. */ static void read_original_directory (cpp_reader *pfile) { const cpp_token *hash, *token; /* Lex ahead; if the first tokens are of the form # NUM, then process the directive, otherwise back up. */ hash = _cpp_lex_direct (pfile); if (hash->type != CPP_HASH) { _cpp_backup_tokens (pfile, 1); return; } token = _cpp_lex_direct (pfile); if (token->type != CPP_NUMBER) { _cpp_backup_tokens (pfile, 2); return; } token = _cpp_lex_direct (pfile); if (token->type != CPP_STRING || ! (token->val.str.len >= 5 && token->val.str.text[token->val.str.len-2] == '/' && token->val.str.text[token->val.str.len-3] == '/')) { _cpp_backup_tokens (pfile, 3); return; } if (pfile->cb.dir_change) { char *debugdir = alloca (token->val.str.len - 3); memcpy (debugdir, (const char *) token->val.str.text + 1, token->val.str.len - 4); debugdir[token->val.str.len - 4] = '\0'; pfile->cb.dir_change (pfile, debugdir); } } /* This is called at the end of preprocessing. It pops the last buffer and writes dependency output, and returns the number of errors. Maybe it should also reset state, such that you could call cpp_start_read with a new filename to restart processing. */ int cpp_finish (cpp_reader *pfile, FILE *deps_stream) { /* Warn about unused macros before popping the final buffer. */ if (CPP_OPTION (pfile, warn_unused_macros)) cpp_forall_identifiers (pfile, _cpp_warn_if_unused_macro, NULL); /* cpplex.c leaves the final buffer on the stack. This it so that it returns an unending stream of CPP_EOFs to the client. If we popped the buffer, we'd dereference a NULL buffer pointer and segfault. It's nice to allow the client to do worry-free excess cpp_get_token calls. */ while (pfile->buffer) _cpp_pop_buffer (pfile); /* Don't write the deps file if there are errors. */ if (CPP_OPTION (pfile, deps.style) != DEPS_NONE && deps_stream && pfile->errors == 0) { deps_write (pfile->deps, deps_stream, 72); if (CPP_OPTION (pfile, deps.phony_targets)) deps_phony_targets (pfile->deps, deps_stream); } /* Report on headers that could use multiple include guards. */ if (CPP_OPTION (pfile, print_include_names)) _cpp_report_missing_guards (pfile); return pfile->errors; } static void post_options (cpp_reader *pfile) { /* -Wtraditional is not useful in C++ mode. */ if (CPP_OPTION (pfile, cplusplus)) CPP_OPTION (pfile, warn_traditional) = 0; /* Permanently disable macro expansion if we are rescanning preprocessed text. Read preprocesed source in ISO mode. */ if (CPP_OPTION (pfile, preprocessed)) { pfile->state.prevent_expansion = 1; CPP_OPTION (pfile, traditional) = 0; } if (CPP_OPTION (pfile, warn_trigraphs) == 2) CPP_OPTION (pfile, warn_trigraphs) = !CPP_OPTION (pfile, trigraphs); if (CPP_OPTION (pfile, traditional)) { CPP_OPTION (pfile, cplusplus_comments) = 0; /* Traditional CPP does not accurately track column information. */ CPP_OPTION (pfile, show_column) = 0; CPP_OPTION (pfile, trigraphs) = 0; CPP_OPTION (pfile, warn_trigraphs) = 0; } } /* CPP Library - lexical analysis. Copyright (C) 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Per Bothner, 1994-95. Based on CCCP program by Paul Rubin, June 1986 Adapted to ANSI C, Richard Stallman, Jan 1987 Broken out to separate file, Zack Weinberg, Mar 2000 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ enum spell_type { SPELL_OPERATOR = 0, SPELL_IDENT, SPELL_LITERAL, SPELL_NONE }; struct token_spelling { enum spell_type category; const unsigned char *name; }; static const unsigned char *const digraph_spellings[] = { USTR"%:", USTR"%:%:", USTR"<:", USTR":>", USTR"<%", USTR"%>" }; #define OP(e, s) { SPELL_OPERATOR, USTR s }, #define TK(e, s) { s, USTR #e }, static const struct token_spelling token_spellings[N_TTYPES] = { TTYPE_TABLE }; #undef OP #undef TK #define TOKEN_SPELL(token) (token_spellings[(token)->type].category) #define TOKEN_NAME(token) (token_spellings[(token)->type].name) static void add_line_note (cpp_buffer *, const uchar *, unsigned int); static int skip_line_comment (cpp_reader *); static void skip_whitespace (cpp_reader *, cppchar_t); static cpp_hashnode *lex_identifier (cpp_reader *, const uchar *); static void lex_number (cpp_reader *, cpp_string *); static bool forms_identifier_p (cpp_reader *, int); static void lex_preproc_string (cpp_reader *, cpp_token *, const uchar *); static void save_comment (cpp_reader *, cpp_token *, const uchar *, cppchar_t); static void create_literal (cpp_reader *, cpp_token *, const uchar *, unsigned int, enum cpp_ttype); static bool warn_in_comment (cpp_reader *, _cpp_line_note *); static int name_p (cpp_reader *, const cpp_string *); static tokenrun *next_tokenrun (tokenrun *); static _cpp_buff *new_buff (size_t); /* Utility routine: Compares, the token TOKEN to the NUL-terminated string STRING. TOKEN must be a CPP_NAME. Returns 1 for equal, 0 for unequal. */ int cpp_ideq (const cpp_token *token, const char *string) { if (token->type != CPP_NAME) return 0; return !ustrcmp (NODE_NAME (token->val.node), (const uchar *) string); } /* Record a note TYPE at byte POS into the current cleaned logical line. */ static void add_line_note (cpp_buffer *buffer, const uchar *pos, unsigned int type) { if (buffer->notes_used == buffer->notes_cap) { buffer->notes_cap = buffer->notes_cap * 2 + 200; buffer->notes = xrealloc (buffer->notes, buffer->notes_cap * sizeof (_cpp_line_note)); } buffer->notes[buffer->notes_used].pos = pos; buffer->notes[buffer->notes_used].type = type; buffer->notes_used++; } /* Returns with a logical line that contains no escaped newlines or trigraphs. This is a time-critical inner loop. */ void _cpp_clean_line (cpp_reader *pfile) { cpp_buffer *buffer; const uchar *s; uchar c, *d, *p; buffer = pfile->buffer; buffer->cur_note = buffer->notes_used = 0; buffer->cur = buffer->line_base = buffer->next_line; buffer->need_line = false; s = buffer->next_line - 1; if (!buffer->from_stage3) { /* Short circuit for the common case of an un-escaped line with no trigraphs. The primary win here is by not writing any data back to memory until we have to. */ for (;;) { c = *++s; if (c == '\n' || c == '\r') { d = (uchar *) s; if (s == buffer->rlimit) goto done; /* DOS line ending? */ if (c == '\r' && s[1] == '\n') s++; if (s == buffer->rlimit) goto done; /* check for escaped newline */ p = d; while (p != buffer->next_line && is_nvspace (p[-1])) p--; if (p == buffer->next_line || p[-1] != '\\') goto done; /* Have an escaped newline; process it and proceed to the slow path. */ add_line_note (buffer, p - 1, p != d ? ' ' : '\\'); d = p - 2; buffer->next_line = p - 1; break; } if (c == '?' && s[1] == '?' && _cpp_trigraph_map[s[2]]) { /* Have a trigraph. We may or may not have to convert it. Add a line note regardless, for -Wtrigraphs. */ add_line_note (buffer, s, s[2]); if (CPP_OPTION (pfile, trigraphs)) { /* We do, and that means we have to switch to the slow path. */ d = (uchar *) s; *d = _cpp_trigraph_map[s[2]]; s += 2; break; } } } for (;;) { c = *++s; *++d = c; if (c == '\n' || c == '\r') { /* Handle DOS line endings. */ if (c == '\r' && s != buffer->rlimit && s[1] == '\n') s++; if (s == buffer->rlimit) break; /* Escaped? */ p = d; while (p != buffer->next_line && is_nvspace (p[-1])) p--; if (p == buffer->next_line || p[-1] != '\\') break; add_line_note (buffer, p - 1, p != d ? ' ': '\\'); d = p - 2; buffer->next_line = p - 1; } else if (c == '?' && s[1] == '?' && _cpp_trigraph_map[s[2]]) { /* Add a note regardless, for the benefit of -Wtrigraphs. */ add_line_note (buffer, d, s[2]); if (CPP_OPTION (pfile, trigraphs)) { *d = _cpp_trigraph_map[s[2]]; s += 2; } } } } else { do s++; while (*s != '\n' && *s != '\r'); d = (uchar *) s; /* Handle DOS line endings. */ if (*s == '\r' && s != buffer->rlimit && s[1] == '\n') s++; } done: *d = '\n'; /* A sentinel note that should never be processed. */ add_line_note (buffer, d + 1, '\n'); buffer->next_line = s + 1; } /* Return true if the trigraph indicated by NOTE should be warned about in a comment. */ static bool warn_in_comment (cpp_reader *pfile, _cpp_line_note *note) { const uchar *p; /* Within comments we don't warn about trigraphs, unless the trigraph forms an escaped newline, as that may change behavior. */ if (note->type != '/') return false; /* If -trigraphs, then this was an escaped newline iff the next note is coincident. */ if (CPP_OPTION (pfile, trigraphs)) return note[1].pos == note->pos; /* Otherwise, see if this forms an escaped newline. */ p = note->pos + 3; while (is_nvspace (*p)) p++; /* There might have been escaped newlines between the trigraph and the newline we found. Hence the position test. */ return (*p == '\n' && p < note[1].pos); } /* Process the notes created by add_line_note as far as the current location. */ void _cpp_process_line_notes (cpp_reader *pfile, int in_comment) { cpp_buffer *buffer = pfile->buffer; for (;;) { _cpp_line_note *note = &buffer->notes[buffer->cur_note]; unsigned int col; if (note->pos > buffer->cur) break; buffer->cur_note++; col = CPP_BUF_COLUMN (buffer, note->pos + 1); if (note->type == '\\' || note->type == ' ') { if (note->type == ' ' && !in_comment) cpp_error_with_line (pfile, CPP_DL_WARNING, pfile->line_table->highest_line, col, "backslash and newline separated by space"); if (buffer->next_line > buffer->rlimit) { cpp_error_with_line (pfile, CPP_DL_PEDWARN, pfile->line_table->highest_line, col, "backslash-newline at end of file"); /* Prevent "no newline at end of file" warning. */ buffer->next_line = buffer->rlimit; } buffer->line_base = note->pos; CPP_INCREMENT_LINE (pfile, 0); } else if (_cpp_trigraph_map[note->type]) { if (CPP_OPTION (pfile, warn_trigraphs) && (!in_comment || warn_in_comment (pfile, note))) { if (CPP_OPTION (pfile, trigraphs)) cpp_error_with_line (pfile, CPP_DL_WARNING, pfile->line_table->highest_line, col, "trigraph ??%c converted to %c", note->type, (int) _cpp_trigraph_map[note->type]); else { cpp_error_with_line (pfile, CPP_DL_WARNING, pfile->line_table->highest_line, col, "trigraph ??%c ignored, use -trigraphs to enable", note->type); } } } else abort (); } } /* Skip a C-style block comment. We find the end of the comment by seeing if an asterisk is before every '/' we encounter. Returns nonzero if comment terminated by EOF, zero otherwise. Buffer->cur points to the initial asterisk of the comment. */ bool _cpp_skip_block_comment (cpp_reader *pfile) { cpp_buffer *buffer = pfile->buffer; const uchar *cur = buffer->cur; uchar c; cur++; if (*cur == '/') cur++; for (;;) { /* People like decorating comments with '*', so check for '/' instead for efficiency. */ c = *cur++; if (c == '/') { if (cur[-2] == '*') break; /* Warn about potential nested comments, but not if the '/' comes immediately before the true comment delimiter. Don't bother to get it right across escaped newlines. */ if (CPP_OPTION (pfile, warn_comments) && cur[0] == '*' && cur[1] != '/') { buffer->cur = cur; cpp_error_with_line (pfile, CPP_DL_WARNING, pfile->line_table->highest_line, CPP_BUF_COL (buffer), "\"/*\" within comment"); } } else if (c == '\n') { unsigned int cols; buffer->cur = cur - 1; _cpp_process_line_notes (pfile, true); if (buffer->next_line >= buffer->rlimit) return true; _cpp_clean_line (pfile); cols = buffer->next_line - buffer->line_base; CPP_INCREMENT_LINE (pfile, cols); cur = buffer->cur; } } buffer->cur = cur; _cpp_process_line_notes (pfile, true); return false; } /* Skip a C++ line comment, leaving buffer->cur pointing to the terminating newline. Handles escaped newlines. Returns nonzero if a multiline comment. */ static int skip_line_comment (cpp_reader *pfile) { cpp_buffer *buffer = pfile->buffer; unsigned int orig_line = pfile->line_table->highest_line; while (*buffer->cur != '\n') buffer->cur++; _cpp_process_line_notes (pfile, true); return orig_line != pfile->line_table->highest_line; } /* Skips whitespace, saving the next non-whitespace character. */ static void skip_whitespace (cpp_reader *pfile, cppchar_t c) { cpp_buffer *buffer = pfile->buffer; bool saw_NUL = false; do { /* Horizontal space always OK. */ if (c == ' ' || c == '\t') ; /* Just \f \v or \0 left. */ else if (c == '\0') saw_NUL = true; else if (pfile->state.in_directive && CPP_PEDANTIC (pfile)) cpp_error_with_line (pfile, CPP_DL_PEDWARN, pfile->line_table->highest_line, CPP_BUF_COL (buffer), "%s in preprocessing directive", c == '\f' ? "form feed" : "vertical tab"); c = *buffer->cur++; } /* We only want non-vertical space, i.e. ' ' \t \f \v \0. */ while (is_nvspace (c)); if (saw_NUL) cpp_error (pfile, CPP_DL_WARNING, "null character(s) ignored"); buffer->cur--; } /* See if the characters of a number token are valid in a name (no '.', '+' or '-'). */ static int name_p (cpp_reader *pfile, const cpp_string *string) { unsigned int i; for (i = 0; i < string->len; i++) if (!is_idchar (string->text[i])) return 0; return 1; } /* Returns TRUE if the sequence starting at buffer->cur is invalid in an identifier. FIRST is TRUE if this starts an identifier. */ static bool forms_identifier_p (cpp_reader *pfile, int first) { cpp_buffer *buffer = pfile->buffer; if (*buffer->cur == '$') { if (!CPP_OPTION (pfile, dollars_in_ident)) return false; buffer->cur++; if (CPP_OPTION (pfile, warn_dollars) && !pfile->state.skipping) { CPP_OPTION (pfile, warn_dollars) = 0; cpp_error (pfile, CPP_DL_PEDWARN, "'$' in identifier or number"); } return true; } /* Is this a syntactically valid UCN? */ if (0 && *buffer->cur == '\\' && (buffer->cur[1] == 'u' || buffer->cur[1] == 'U')) { buffer->cur += 2; if (_cpp_valid_ucn (pfile, &buffer->cur, buffer->rlimit, 1 + !first)) return true; buffer->cur -= 2; } return false; } /* Lex an identifier starting at BUFFER->CUR - 1. */ static cpp_hashnode * lex_identifier (cpp_reader *pfile, const uchar *base) { cpp_hashnode *result; const uchar *cur, *limit; unsigned int len; unsigned int hash = HT_HASHSTEP (0, *base); cur = pfile->buffer->cur; for (;;) { /* N.B. ISIDNUM does not include $. */ while (ISIDNUM (*cur)) { hash = HT_HASHSTEP (hash, *cur); cur++; } pfile->buffer->cur = cur; if (!forms_identifier_p (pfile, false)) break; limit = pfile->buffer->cur; while (cur < limit) { hash = HT_HASHSTEP (hash, *cur); cur++; } } len = cur - base; hash = HT_HASHFINISH (hash, len); result = (cpp_hashnode *) ht_lookup_with_hash (pfile->hash_table, base, len, hash, HT_ALLOC); /* Rarely, identifiers require diagnostics when lexed. */ if (__builtin_expect ((result->flags & NODE_DIAGNOSTIC) && !pfile->state.skipping, 0)) { /* It is allowed to poison the same identifier twice. */ if ((result->flags & NODE_POISONED) && !pfile->state.poisoned_ok) cpp_error (pfile, CPP_DL_ERROR, "attempt to use poisoned \"%s\"", NODE_NAME (result)); /* Constraint 6.10.3.5: __VA_ARGS__ should only appear in the replacement list of a variadic macro. */ if (result == pfile->spec_nodes.n__VA_ARGS__ && !pfile->state.va_args_ok) cpp_error (pfile, CPP_DL_PEDWARN, "__VA_ARGS__ can only appear in the expansion" " of a C99 variadic macro"); } return result; } /* Lex a number to NUMBER starting at BUFFER->CUR - 1. */ static void lex_number (cpp_reader *pfile, cpp_string *number) { const uchar *cur; const uchar *base; uchar *dest; base = pfile->buffer->cur - 1; do { cur = pfile->buffer->cur; /* N.B. ISIDNUM does not include $. */ while (ISIDNUM (*cur) || *cur == '.' || VALID_SIGN (*cur, cur[-1])) cur++; pfile->buffer->cur = cur; } while (forms_identifier_p (pfile, false)); number->len = cur - base; dest = _cpp_unaligned_alloc (pfile, number->len + 1); memcpy (dest, base, number->len); dest[number->len] = '\0'; number->text = dest; } /* Create a token of type TYPE with a literal spelling. */ static void create_literal (cpp_reader *pfile, cpp_token *token, const uchar *base, unsigned int len, enum cpp_ttype type) { uchar *dest = _cpp_unaligned_alloc (pfile, len + 1); memcpy (dest, base, len); dest[len] = '\0'; token->type = type; token->val.str.len = len; token->val.str.text = dest; } /* Lexes a string, character constant, or angle-bracketed header file name. The stored string contains the spelling, including opening quote and leading any leading 'L'. It returns the type of the literal, or CPP_OTHER if it was not properly terminated. The spelling is NUL-terminated, but it is not guaranteed that this is the first NUL since embedded NULs are preserved. */ static void lex_preproc_string (cpp_reader *pfile, cpp_token *token, const uchar *base) { bool saw_NUL = false; const uchar *cur; cppchar_t terminator; enum cpp_ttype type; cur = base; terminator = *cur++; if (terminator == 'L') terminator = *cur++; if (terminator == '\"') type = *base == 'L' ? CPP_WSTRING: CPP_STRING; else if (terminator == '\'') type = *base == 'L' ? CPP_WCHAR: CPP_CHAR; else terminator = '>', type = CPP_HEADER_NAME; for (;;) { cppchar_t c = *cur++; /* In #include-style directives, terminators are not escapable. */ if (c == '\\' && !pfile->state.angled_headers && *cur != '\n') cur++; else if (c == terminator) break; else if (c == '\n') { cur--; type = CPP_OTHER; break; } else if (c == '\0') saw_NUL = true; } if (saw_NUL && !pfile->state.skipping) cpp_error (pfile, CPP_DL_WARNING, "null character(s) preserved in literal"); pfile->buffer->cur = cur; create_literal (pfile, token, base, cur - base, type); } /* The stored comment includes the comment start and any terminator. */ static void save_comment (cpp_reader *pfile, cpp_token *token, const unsigned char *from, cppchar_t type) { unsigned char *buffer; unsigned int len, clen; len = pfile->buffer->cur - from + 1; /* + 1 for the initial '/'. */ /* C++ comments probably (not definitely) have moved past a new line, which we don't want to save in the comment. */ if (is_vspace (pfile->buffer->cur[-1])) len--; /* If we are currently in a directive, then we need to store all C++ comments as C comments internally, and so we need to allocate a little extra space in that case. Note that the only time we encounter a directive here is when we are saving comments in a "#define". */ clen = (pfile->state.in_directive && type == '/') ? len + 2 : len; buffer = _cpp_unaligned_alloc (pfile, clen); token->type = CPP_COMMENT; token->val.str.len = clen; token->val.str.text = buffer; buffer[0] = '/'; memcpy (buffer + 1, from, len - 1); /* Finish conversion to a C comment, if necessary. */ if (pfile->state.in_directive && type == '/') { buffer[1] = '*'; buffer[clen - 2] = '*'; buffer[clen - 1] = '/'; } } /* Allocate COUNT tokens for RUN. */ void _cpp_init_tokenrun (tokenrun *run, unsigned int count) { run->base = xnewvec (cpp_token, count); run->limit = run->base + count; run->next = NULL; } /* Returns the next tokenrun, or creates one if there is none. */ static tokenrun * next_tokenrun (tokenrun *run) { if (run->next == NULL) { run->next = xnew (tokenrun); run->next->prev = run; _cpp_init_tokenrun (run->next, 250); } return run->next; } /* Allocate a single token that is invalidated at the same time as the rest of the tokens on the line. Has its line and col set to the same as the last lexed token, so that diagnostics appear in the right place. */ cpp_token * _cpp_temp_token (cpp_reader *pfile) { cpp_token *old, *result; old = pfile->cur_token - 1; if (pfile->cur_token == pfile->cur_run->limit) { pfile->cur_run = next_tokenrun (pfile->cur_run); pfile->cur_token = pfile->cur_run->base; } result = pfile->cur_token++; result->src_loc = old->src_loc; return result; } /* Lex a token into RESULT (external interface). Takes care of issues like directive handling, token lookahead, multiple include optimization and skipping. */ const cpp_token * _cpp_lex_token (cpp_reader *pfile) { cpp_token *result; for (;;) { if (pfile->cur_token == pfile->cur_run->limit) { pfile->cur_run = next_tokenrun (pfile->cur_run); pfile->cur_token = pfile->cur_run->base; } if (pfile->lookaheads) { pfile->lookaheads--; result = pfile->cur_token++; } else result = _cpp_lex_direct (pfile); if (result->flags & BOL) { /* Is this a directive. If _cpp_handle_directive returns false, it is an assembler #. */ if (result->type == CPP_HASH /* 6.10.3 p 11: Directives in a list of macro arguments gives undefined behavior. This implementation handles the directive as normal. */ && pfile->state.parsing_args != 1 && _cpp_handle_directive (pfile, result->flags & PREV_WHITE)) continue; if (pfile->cb.line_change && !pfile->state.skipping) pfile->cb.line_change (pfile, result, pfile->state.parsing_args); } /* We don't skip tokens in directives. */ if (pfile->state.in_directive) break; /* Outside a directive, invalidate controlling macros. At file EOF, _cpp_lex_direct takes care of popping the buffer, so we never get here and MI optimization works. */ pfile->mi_valid = false; if (!pfile->state.skipping || result->type == CPP_EOF) break; } return result; } /* Returns true if a fresh line has been loaded. */ bool _cpp_get_fresh_line (cpp_reader *pfile) { int return_at_eof; /* We can't get a new line until we leave the current directive. */ if (pfile->state.in_directive) return false; for (;;) { cpp_buffer *buffer = pfile->buffer; if (!buffer->need_line) return true; if (buffer->next_line < buffer->rlimit) { _cpp_clean_line (pfile); return true; } /* First, get out of parsing arguments state. */ if (pfile->state.parsing_args) return false; /* End of buffer. Non-empty files should end in a newline. */ if (buffer->buf != buffer->rlimit && buffer->next_line > buffer->rlimit && !buffer->from_stage3) { /* Only warn once. */ buffer->next_line = buffer->rlimit; cpp_error_with_line (pfile, CPP_DL_PEDWARN, pfile->line_table->highest_line, CPP_BUF_COLUMN (buffer, buffer->cur), "no newline at end of file"); } return_at_eof = buffer->return_at_eof; _cpp_pop_buffer (pfile); if (pfile->buffer == NULL || return_at_eof) return false; } } #define IF_NEXT_IS(CHAR, THEN_TYPE, ELSE_TYPE) \ do \ { \ result->type = ELSE_TYPE; \ if (*buffer->cur == CHAR) \ buffer->cur++, result->type = THEN_TYPE; \ } \ while (0) /* Lex a token into pfile->cur_token, which is also incremented, to get diagnostics pointing to the correct location. Does not handle issues such as token lookahead, multiple-include optimization, directives, skipping etc. This function is only suitable for use by _cpp_lex_token, and in special cases like lex_expansion_token which doesn't care for any of these issues. When meeting a newline, returns CPP_EOF if parsing a directive, otherwise returns to the start of the token buffer if permissible. Returns the location of the lexed token. */ cpp_token * _cpp_lex_direct (cpp_reader *pfile) { cppchar_t c; cpp_buffer *buffer; const unsigned char *comment_start; cpp_token *result = pfile->cur_token++; fresh_line: result->flags = 0; buffer = pfile->buffer; if (buffer->need_line) { if (!_cpp_get_fresh_line (pfile)) { result->type = CPP_EOF; if (!pfile->state.in_directive) { /* Tell the compiler the line number of the EOF token. */ result->src_loc = pfile->line_table->highest_line; result->flags = BOL; } return result; } if (!pfile->keep_tokens) { pfile->cur_run = &pfile->base_run; result = pfile->base_run.base; pfile->cur_token = result + 1; } result->flags = BOL; if (pfile->state.parsing_args == 2) result->flags |= PREV_WHITE; } buffer = pfile->buffer; update_tokens_line: result->src_loc = pfile->line_table->highest_line; skipped_white: if (buffer->cur >= buffer->notes[buffer->cur_note].pos && !pfile->overlaid_buffer) { _cpp_process_line_notes (pfile, false); result->src_loc = pfile->line_table->highest_line; } c = *buffer->cur++; LINEMAP_POSITION_FOR_COLUMN (result->src_loc, pfile->line_table, CPP_BUF_COLUMN (buffer, buffer->cur)); switch (c) { case ' ': case '\t': case '\f': case '\v': case '\0': result->flags |= PREV_WHITE; skip_whitespace (pfile, c); goto skipped_white; case '\n': if (buffer->cur < buffer->rlimit) CPP_INCREMENT_LINE (pfile, 0); buffer->need_line = true; goto fresh_line; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': result->type = CPP_NUMBER; lex_number (pfile, &result->val.str); break; case 'L': /* 'L' may introduce wide characters or strings. */ if (*buffer->cur == '\'' || *buffer->cur == '"') { lex_preproc_string (pfile, result, buffer->cur - 1); break; } /* Fall through. */ case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': result->type = CPP_NAME; result->val.node = lex_identifier (pfile, buffer->cur - 1); /* Convert named operators to their proper types. */ if (result->val.node->flags & NODE_OPERATOR) { result->flags |= NAMED_OP; result->type = result->val.node->directive_index; } break; case '\'': case '"': lex_preproc_string (pfile, result, buffer->cur - 1); break; case '/': /* A potential block or line comment. */ comment_start = buffer->cur; c = *buffer->cur; if (c == '*') { if (_cpp_skip_block_comment (pfile)) cpp_error (pfile, CPP_DL_ERROR, "unterminated comment"); } else if (c == '/' && (CPP_OPTION (pfile, cplusplus_comments) || cpp_in_system_header (pfile))) { /* Warn about comments only if pedantically GNUC89, and not in system headers. */ if (CPP_OPTION (pfile, lang) == CLK_GNUC89 && CPP_PEDANTIC (pfile) && ! buffer->warned_cplusplus_comments) { cpp_error (pfile, CPP_DL_PEDWARN, "C++ style comments are not allowed in ISO C90"); cpp_error (pfile, CPP_DL_PEDWARN, "(this will be reported only once per input file)"); buffer->warned_cplusplus_comments = 1; } if (skip_line_comment (pfile) && CPP_OPTION (pfile, warn_comments)) cpp_error (pfile, CPP_DL_WARNING, "multi-line comment"); } else if (c == '=') { buffer->cur++; result->type = CPP_DIV_EQ; break; } else { result->type = CPP_DIV; break; } if (!pfile->state.save_comments) { result->flags |= PREV_WHITE; goto update_tokens_line; } /* Save the comment as a token in its own right. */ save_comment (pfile, result, comment_start, c); break; case '<': if (pfile->state.angled_headers) { lex_preproc_string (pfile, result, buffer->cur - 1); break; } result->type = CPP_LESS; if (*buffer->cur == '=') buffer->cur++, result->type = CPP_LESS_EQ; else if (*buffer->cur == '<') { buffer->cur++; IF_NEXT_IS ('=', CPP_LSHIFT_EQ, CPP_LSHIFT); } else if (*buffer->cur == '?' && CPP_OPTION (pfile, cplusplus)) { buffer->cur++; IF_NEXT_IS ('=', CPP_MIN_EQ, CPP_MIN); } else if (CPP_OPTION (pfile, digraphs)) { if (*buffer->cur == ':') { buffer->cur++; result->flags |= DIGRAPH; result->type = CPP_OPEN_SQUARE; } else if (*buffer->cur == '%') { buffer->cur++; result->flags |= DIGRAPH; result->type = CPP_OPEN_BRACE; } } break; case '>': result->type = CPP_GREATER; if (*buffer->cur == '=') buffer->cur++, result->type = CPP_GREATER_EQ; else if (*buffer->cur == '>') { buffer->cur++; IF_NEXT_IS ('=', CPP_RSHIFT_EQ, CPP_RSHIFT); } else if (*buffer->cur == '?' && CPP_OPTION (pfile, cplusplus)) { buffer->cur++; IF_NEXT_IS ('=', CPP_MAX_EQ, CPP_MAX); } break; case '%': result->type = CPP_MOD; if (*buffer->cur == '=') buffer->cur++, result->type = CPP_MOD_EQ; else if (CPP_OPTION (pfile, digraphs)) { if (*buffer->cur == ':') { buffer->cur++; result->flags |= DIGRAPH; result->type = CPP_HASH; if (*buffer->cur == '%' && buffer->cur[1] == ':') buffer->cur += 2, result->type = CPP_PASTE; } else if (*buffer->cur == '>') { buffer->cur++; result->flags |= DIGRAPH; result->type = CPP_CLOSE_BRACE; } } break; case '.': result->type = CPP_DOT; if (ISDIGIT (*buffer->cur)) { result->type = CPP_NUMBER; lex_number (pfile, &result->val.str); } else if (*buffer->cur == '.' && buffer->cur[1] == '.') buffer->cur += 2, result->type = CPP_ELLIPSIS; else if (*buffer->cur == '*' && CPP_OPTION (pfile, cplusplus)) buffer->cur++, result->type = CPP_DOT_STAR; break; case '+': result->type = CPP_PLUS; if (*buffer->cur == '+') buffer->cur++, result->type = CPP_PLUS_PLUS; else if (*buffer->cur == '=') buffer->cur++, result->type = CPP_PLUS_EQ; break; case '-': result->type = CPP_MINUS; if (*buffer->cur == '>') { buffer->cur++; result->type = CPP_DEREF; if (*buffer->cur == '*' && CPP_OPTION (pfile, cplusplus)) buffer->cur++, result->type = CPP_DEREF_STAR; } else if (*buffer->cur == '-') buffer->cur++, result->type = CPP_MINUS_MINUS; else if (*buffer->cur == '=') buffer->cur++, result->type = CPP_MINUS_EQ; break; case '&': result->type = CPP_AND; if (*buffer->cur == '&') buffer->cur++, result->type = CPP_AND_AND; else if (*buffer->cur == '=') buffer->cur++, result->type = CPP_AND_EQ; break; case '|': result->type = CPP_OR; if (*buffer->cur == '|') buffer->cur++, result->type = CPP_OR_OR; else if (*buffer->cur == '=') buffer->cur++, result->type = CPP_OR_EQ; break; case ':': result->type = CPP_COLON; if (*buffer->cur == ':' && CPP_OPTION (pfile, cplusplus)) buffer->cur++, result->type = CPP_SCOPE; else if (*buffer->cur == '>' && CPP_OPTION (pfile, digraphs)) { buffer->cur++; result->flags |= DIGRAPH; result->type = CPP_CLOSE_SQUARE; } break; case '*': IF_NEXT_IS ('=', CPP_MULT_EQ, CPP_MULT); break; case '=': IF_NEXT_IS ('=', CPP_EQ_EQ, CPP_EQ); break; case '!': IF_NEXT_IS ('=', CPP_NOT_EQ, CPP_NOT); break; case '^': IF_NEXT_IS ('=', CPP_XOR_EQ, CPP_XOR); break; case '#': IF_NEXT_IS ('#', CPP_PASTE, CPP_HASH); break; case '?': result->type = CPP_QUERY; break; case '~': result->type = CPP_COMPL; break; case ',': result->type = CPP_COMMA; break; case '(': result->type = CPP_OPEN_PAREN; break; case ')': result->type = CPP_CLOSE_PAREN; break; case '[': result->type = CPP_OPEN_SQUARE; break; case ']': result->type = CPP_CLOSE_SQUARE; break; case '{': result->type = CPP_OPEN_BRACE; break; case '}': result->type = CPP_CLOSE_BRACE; break; case ';': result->type = CPP_SEMICOLON; break; /* @ is a punctuator in Objective-C. */ case '@': result->type = CPP_ATSIGN; break; case '$': case '\\': { const uchar *base = --buffer->cur; if (forms_identifier_p (pfile, true)) { result->type = CPP_NAME; result->val.node = lex_identifier (pfile, base); break; } buffer->cur++; } default: create_literal (pfile, result, buffer->cur - 1, 1, CPP_OTHER); break; } return result; } /* An upper bound on the number of bytes needed to spell TOKEN. Does not include preceding whitespace. */ unsigned int cpp_token_len (const cpp_token *token) { unsigned int len; switch (TOKEN_SPELL (token)) { default: len = 4; break; case SPELL_LITERAL: len = token->val.str.len; break; case SPELL_IDENT: len = NODE_LEN (token->val.node); break; } return len; } /* Write the spelling of a token TOKEN to BUFFER. The buffer must already contain the enough space to hold the token's spelling. Returns a pointer to the character after the last character written. FIXME: Would be nice if we didn't need the PFILE argument. */ unsigned char * cpp_spell_token (cpp_reader *pfile, const cpp_token *token, unsigned char *buffer) { switch (TOKEN_SPELL (token)) { case SPELL_OPERATOR: { const unsigned char *spelling; unsigned char c; if (token->flags & DIGRAPH) spelling = digraph_spellings[(int) token->type - (int) CPP_FIRST_DIGRAPH]; else if (token->flags & NAMED_OP) goto spell_ident; else spelling = TOKEN_NAME (token); while ((c = *spelling++) != '\0') *buffer++ = c; } break; spell_ident: case SPELL_IDENT: memcpy (buffer, NODE_NAME (token->val.node), NODE_LEN (token->val.node)); buffer += NODE_LEN (token->val.node); break; case SPELL_LITERAL: memcpy (buffer, token->val.str.text, token->val.str.len); buffer += token->val.str.len; break; case SPELL_NONE: cpp_error (pfile, CPP_DL_ICE, "unspellable token %s", TOKEN_NAME (token)); break; } return buffer; } /* Returns TOKEN spelt as a null-terminated string. The string is freed when the reader is destroyed. Useful for diagnostics. */ unsigned char * cpp_token_as_text (cpp_reader *pfile, const cpp_token *token) { unsigned int len = cpp_token_len (token) + 1; unsigned char *start = _cpp_unaligned_alloc (pfile, len), *end; end = cpp_spell_token (pfile, token, start); end[0] = '\0'; return start; } /* Used by C front ends, which really should move to using cpp_token_as_text. */ const char * cpp_type2name (enum cpp_ttype type) { return (const char *) token_spellings[type].name; } /* Writes the spelling of token to FP, without any preceding space. Separated from cpp_spell_token for efficiency - to avoid stdio double-buffering. */ void cpp_output_token (const cpp_token *token, FILE *fp) { switch (TOKEN_SPELL (token)) { case SPELL_OPERATOR: { const unsigned char *spelling; int c; if (token->flags & DIGRAPH) spelling = digraph_spellings[(int) token->type - (int) CPP_FIRST_DIGRAPH]; else if (token->flags & NAMED_OP) goto spell_ident; else spelling = TOKEN_NAME (token); c = *spelling; do putc (c, fp); while ((c = *++spelling) != '\0'); } break; spell_ident: case SPELL_IDENT: fwrite (NODE_NAME (token->val.node), 1, NODE_LEN (token->val.node), fp); break; case SPELL_LITERAL: fwrite (token->val.str.text, 1, token->val.str.len, fp); break; case SPELL_NONE: /* An error, most probably. */ break; } } /* Compare two tokens. */ int _cpp_equiv_tokens (const cpp_token *a, const cpp_token *b) { if (a->type == b->type && a->flags == b->flags) switch (TOKEN_SPELL (a)) { default: /* Keep compiler happy. */ case SPELL_OPERATOR: return 1; case SPELL_NONE: return (a->type != CPP_MACRO_ARG || a->val.arg_no == b->val.arg_no); case SPELL_IDENT: return a->val.node == b->val.node; case SPELL_LITERAL: return (a->val.str.len == b->val.str.len && !memcmp (a->val.str.text, b->val.str.text, a->val.str.len)); } return 0; } /* Returns nonzero if a space should be inserted to avoid an accidental token paste for output. For simplicity, it is conservative, and occasionally advises a space where one is not needed, e.g. "." and ".2". */ int cpp_avoid_paste (cpp_reader *pfile, const cpp_token *token1, const cpp_token *token2) { enum cpp_ttype a = token1->type, b = token2->type; cppchar_t c; if (token1->flags & NAMED_OP) a = CPP_NAME; if (token2->flags & NAMED_OP) b = CPP_NAME; c = EOF; if (token2->flags & DIGRAPH) c = digraph_spellings[(int) b - (int) CPP_FIRST_DIGRAPH][0]; else if (token_spellings[b].category == SPELL_OPERATOR) c = token_spellings[b].name[0]; /* Quickly get everything that can paste with an '='. */ if ((int) a <= (int) CPP_LAST_EQ && c == '=') return 1; switch (a) { case CPP_GREATER: return c == '>' || c == '?'; case CPP_LESS: return c == '<' || c == '?' || c == '%' || c == ':'; case CPP_PLUS: return c == '+'; case CPP_MINUS: return c == '-' || c == '>'; case CPP_DIV: return c == '/' || c == '*'; /* Comments. */ case CPP_MOD: return c == ':' || c == '>'; case CPP_AND: return c == '&'; case CPP_OR: return c == '|'; case CPP_COLON: return c == ':' || c == '>'; case CPP_DEREF: return c == '*'; case CPP_DOT: return c == '.' || c == '%' || b == CPP_NUMBER; case CPP_HASH: return c == '#' || c == '%'; /* Digraph form. */ case CPP_NAME: return ((b == CPP_NUMBER && name_p (pfile, &token2->val.str)) || b == CPP_NAME || b == CPP_CHAR || b == CPP_STRING); /* L */ case CPP_NUMBER: return (b == CPP_NUMBER || b == CPP_NAME || c == '.' || c == '+' || c == '-'); /* UCNs */ case CPP_OTHER: return ((token1->val.str.text[0] == '\\' && b == CPP_NAME) || (CPP_OPTION (pfile, objc) && token1->val.str.text[0] == '@' && (b == CPP_NAME || b == CPP_STRING))); default: break; } return 0; } /* Output all the remaining tokens on the current line, and a newline character, to FP. Leading whitespace is removed. If there are macros, special token padding is not performed. */ void cpp_output_line (cpp_reader *pfile, FILE *fp) { const cpp_token *token; token = cpp_get_token (pfile); while (token->type != CPP_EOF) { cpp_output_token (token, fp); token = cpp_get_token (pfile); if (token->flags & PREV_WHITE) putc (' ', fp); } putc ('\n', fp); } /* Memory buffers. Changing these three constants can have a dramatic effect on performance. The values here are reasonable defaults, but might be tuned. If you adjust them, be sure to test across a range of uses of cpplib, including heavy nested function-like macro expansion. Also check the change in peak memory usage (NJAMD is a good tool for this). */ #define MIN_BUFF_SIZE 8000 #define BUFF_SIZE_UPPER_BOUND(MIN_SIZE) (MIN_BUFF_SIZE + (MIN_SIZE) * 3 / 2) #define EXTENDED_BUFF_SIZE(BUFF, MIN_EXTRA) \ (MIN_EXTRA + ((BUFF)->limit - (BUFF)->cur) * 2) #if MIN_BUFF_SIZE > BUFF_SIZE_UPPER_BOUND (0) #error BUFF_SIZE_UPPER_BOUND must be at least as large as MIN_BUFF_SIZE! #endif /* Create a new allocation buffer. Place the control block at the end of the buffer, so that buffer overflows will cause immediate chaos. */ static _cpp_buff * new_buff (size_t len) { _cpp_buff *result; unsigned char *base; if (len < MIN_BUFF_SIZE) len = MIN_BUFF_SIZE; len = CPP_ALIGN (len); base = xmalloc (len + sizeof (_cpp_buff)); result = (_cpp_buff *) (base + len); result->base = base; result->cur = base; result->limit = base + len; result->next = NULL; return result; } /* Place a chain of unwanted allocation buffers on the free list. */ void _cpp_release_buff (cpp_reader *pfile, _cpp_buff *buff) { _cpp_buff *end = buff; while (end->next) end = end->next; end->next = pfile->free_buffs; pfile->free_buffs = buff; } /* Return a free buffer of size at least MIN_SIZE. */ _cpp_buff * _cpp_get_buff (cpp_reader *pfile, size_t min_size) { _cpp_buff *result, **p; for (p = &pfile->free_buffs;; p = &(*p)->next) { size_t size; if (*p == NULL) return new_buff (min_size); result = *p; size = result->limit - result->base; /* Return a buffer that's big enough, but don't waste one that's way too big. */ if (size >= min_size && size <= BUFF_SIZE_UPPER_BOUND (min_size)) break; } *p = result->next; result->next = NULL; result->cur = result->base; return result; } /* Creates a new buffer with enough space to hold the uncommitted remaining bytes of BUFF, and at least MIN_EXTRA more bytes. Copies the excess bytes to the new buffer. Chains the new buffer after BUFF, and returns the new buffer. */ _cpp_buff * _cpp_append_extend_buff (cpp_reader *pfile, _cpp_buff *buff, size_t min_extra) { size_t size = EXTENDED_BUFF_SIZE (buff, min_extra); _cpp_buff *new_buff = _cpp_get_buff (pfile, size); buff->next = new_buff; memcpy (new_buff->base, buff->cur, BUFF_ROOM (buff)); return new_buff; } /* Creates a new buffer with enough space to hold the uncommitted remaining bytes of the buffer pointed to by BUFF, and at least MIN_EXTRA more bytes. Copies the excess bytes to the new buffer. Chains the new buffer before the buffer pointed to by BUFF, and updates the pointer to point to the new buffer. */ void _cpp_extend_buff (cpp_reader *pfile, _cpp_buff **pbuff, size_t min_extra) { _cpp_buff *new_buff, *old_buff = *pbuff; size_t size = EXTENDED_BUFF_SIZE (old_buff, min_extra); new_buff = _cpp_get_buff (pfile, size); memcpy (new_buff->base, old_buff->cur, BUFF_ROOM (old_buff)); new_buff->next = old_buff; *pbuff = new_buff; } /* Free a chain of buffers starting at BUFF. */ void _cpp_free_buff (_cpp_buff *buff) { _cpp_buff *next; for (; buff; buff = next) { next = buff->next; free (buff->base); } } /* Allocate permanent, unaligned storage of length LEN. */ unsigned char * _cpp_unaligned_alloc (cpp_reader *pfile, size_t len) { _cpp_buff *buff = pfile->u_buff; unsigned char *result = buff->cur; if (len > (size_t) (buff->limit - result)) { buff = _cpp_get_buff (pfile, len); buff->next = pfile->u_buff; pfile->u_buff = buff; result = buff->cur; } buff->cur = result + len; return result; } /* Allocate permanent, unaligned storage of length LEN from a_buff. That buffer is used for growing allocations when saving macro replacement lists in a #define, and when parsing an answer to an assertion in #assert, #unassert or #if (and therefore possibly whilst expanding macros). It therefore must not be used by any code that they might call: specifically the lexer and the guts of the macro expander. All existing other uses clearly fit this restriction: storing registered pragmas during initialization. */ unsigned char * _cpp_aligned_alloc (cpp_reader *pfile, size_t len) { _cpp_buff *buff = pfile->a_buff; unsigned char *result = buff->cur; if (len > (size_t) (buff->limit - result)) { buff = _cpp_get_buff (pfile, len); buff->next = pfile->a_buff; pfile->a_buff = buff; result = buff->cur; } buff->cur = result + len; return result; } /* Say which field of TOK is in use. */ enum cpp_token_fld_kind cpp_token_val_index (cpp_token *tok) { switch (TOKEN_SPELL (tok)) { case SPELL_IDENT: return CPP_TOKEN_FLD_NODE; case SPELL_LITERAL: return CPP_TOKEN_FLD_STR; case SPELL_NONE: if (tok->type == CPP_MACRO_ARG) return CPP_TOKEN_FLD_ARG_NO; else if (tok->type == CPP_PADDING) return CPP_TOKEN_FLD_SOURCE; /* else fall through */ default: return CPP_TOKEN_FLD_NONE; } } /* Map logical line numbers to (source file, line number) pairs. Copyright (C) 2001, 2003, 2004 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. In other words, you are welcome to use, share and improve this program. You are forbidden to forbid anyone else to use, share and improve what you give them. Help stamp out software-hoarding! */ static void trace_include (const struct line_maps *, const struct line_map *); /* Initialize a line map set. */ void linemap_init (struct line_maps *set) { set->maps = NULL; set->allocated = 0; set->used = 0; set->last_listed = -1; set->trace_includes = false; set->depth = 0; set->cache = 0; set->highest_location = 0; set->highest_line = 0; set->max_column_hint = 0; } /* Check for and warn about line_maps entered but not exited. */ void linemap_check_files_exited (struct line_maps *set) { struct line_map *map; /* Depending upon whether we are handling preprocessed input or not, this can be a user error or an ICE. */ for (map = &set->maps[set->used - 1]; ! MAIN_FILE_P (map); map = INCLUDED_FROM (set, map)) fprintf (stderr, "line-map.c: file \"%s\" entered but not left\n", map->to_file); } /* Free a line map set. */ void linemap_free (struct line_maps *set) { if (set->maps) { linemap_check_files_exited (set); free (set->maps); } } /* Add a mapping of logical source line to physical source file and line number. The text pointed to by TO_FILE must have a lifetime at least as long as the final call to lookup_line (). An empty TO_FILE means standard input. If reason is LC_LEAVE, and TO_FILE is NULL, then TO_FILE, TO_LINE and SYSP are given their natural values considering the file we are returning to. FROM_LINE should be monotonic increasing across calls to this function. A call to this function can relocate the previous set of A call to this function can relocate the previous set of maps, so any stored line_map pointers should not be used. */ const struct line_map * linemap_add (struct line_maps *set, enum lc_reason reason, unsigned int sysp, const char *to_file, unsigned int to_line) { struct line_map *map; source_location start_location = set->highest_location + 1; if (set->used && start_location < set->maps[set->used - 1].start_location) abort (); if (set->used == set->allocated) { set->allocated = 2 * set->allocated + 256; set->maps = xrealloc (set->maps, set->allocated * sizeof (struct line_map)); } map = &set->maps[set->used]; if (to_file && *to_file == '\0') to_file = ""; /* If we don't keep our line maps consistent, we can easily segfault. Don't rely on the client to do it for us. */ if (set->depth == 0) reason = LC_ENTER; else if (reason == LC_LEAVE) { struct line_map *from; bool error; if (MAIN_FILE_P (map - 1)) { if (to_file == NULL) { set->depth--; return NULL; } error = true; reason = LC_RENAME; from = map - 1; } else { from = INCLUDED_FROM (set, map - 1); error = to_file && strcmp (from->to_file, to_file); } /* Depending upon whether we are handling preprocessed input or not, this can be a user error or an ICE. */ if (error) fprintf (stderr, "line-map.c: file \"%s\" left but not entered\n", to_file); /* A TO_FILE of NULL is special - we use the natural values. */ if (error || to_file == NULL) { to_file = from->to_file; to_line = SOURCE_LINE (from, from[1].start_location); sysp = from->sysp; } } map->reason = reason; map->sysp = sysp; map->start_location = start_location; map->to_file = to_file; map->to_line = to_line; set->cache = set->used++; map->column_bits = 0; set->highest_location = start_location; set->highest_line = start_location; set->max_column_hint = 0; if (reason == LC_ENTER) { map->included_from = set->depth == 0 ? -1 : (int) (set->used - 2); set->depth++; if (set->trace_includes) trace_include (set, map); } else if (reason == LC_RENAME) map->included_from = map[-1].included_from; else if (reason == LC_LEAVE) { set->depth--; map->included_from = INCLUDED_FROM (set, map - 1)->included_from; } return map; } source_location linemap_line_start (struct line_maps *set, unsigned int to_line, unsigned int max_column_hint) { struct line_map *map = &set->maps[set->used - 1]; source_location highest = set->highest_location; source_location r; unsigned int last_line = SOURCE_LINE (map, set->highest_line); int line_delta = to_line - last_line; bool add_map = false; if (line_delta < 0 || (line_delta > 10 && line_delta * map->column_bits > 1000) || (max_column_hint >= (1U << map->column_bits)) || (max_column_hint <= 80 && map->column_bits >= 10)) { add_map = true; } else max_column_hint = set->max_column_hint; if (add_map) { int column_bits; if (max_column_hint > 100000 || highest > 0xC0000000) { max_column_hint = 0; if (highest >0xF0000000) return 0; column_bits = 0; } else { column_bits = 7; while (max_column_hint >= (1U << column_bits)) column_bits++; max_column_hint = 1U << column_bits; } if (line_delta < 0 || last_line != map->to_line || SOURCE_COLUMN (map, highest) >= (1U << column_bits)) map = (struct line_map*) linemap_add (set, LC_RENAME, map->sysp, map->to_file, to_line); map->column_bits = column_bits; r = map->start_location; } else r = highest - SOURCE_COLUMN (map, highest) + (line_delta << map->column_bits); set->highest_line = r; if (r > set->highest_location) set->highest_location = r; set->max_column_hint = max_column_hint; return r; } source_location linemap_position_for_column (struct line_maps *set, unsigned int to_column) { source_location r = set->highest_line; if (to_column >= set->max_column_hint) { if (r >= 0xC000000 || to_column > 100000) { /* Running low on source_locations - disable column numbers. */ return r; } else { struct line_map *map = &set->maps[set->used - 1]; r = linemap_line_start (set, SOURCE_LINE (map, r), to_column + 50); } } r = r + to_column; if (r >= set->highest_location) set->highest_location = r; return r; } /* Given a logical line, returns the map from which the corresponding (source file, line) pair can be deduced. Since the set is built chronologically, the logical lines are monotonic increasing, and so the list is sorted and we can use a binary search. */ const struct line_map * linemap_lookup (struct line_maps *set, source_location line) { unsigned int md, mn, mx; const struct line_map *cached; mn = set->cache; mx = set->used; cached = &set->maps[mn]; /* We should get a segfault if no line_maps have been added yet. */ if (line >= cached->start_location) { if (mn + 1 == mx || line < cached[1].start_location) return cached; } else { mx = mn; mn = 0; } while (mx - mn > 1) { md = (mn + mx) / 2; if (set->maps[md].start_location > line) mx = md; else mn = md; } set->cache = mn; return &set->maps[mn]; } /* Print the file names and line numbers of the #include commands which led to the map MAP, if any, to stderr. Nothing is output if the most recently listed stack is the same as the current one. */ void linemap_print_containing_files (struct line_maps *set, const struct line_map *map) { if (MAIN_FILE_P (map) || set->last_listed == map->included_from) return; set->last_listed = map->included_from; map = INCLUDED_FROM (set, map); fprintf (stderr, _("In file included from %s:%u"), map->to_file, LAST_SOURCE_LINE (map)); while (! MAIN_FILE_P (map)) { map = INCLUDED_FROM (set, map); /* Translators note: this message is used in conjunction with "In file included from %s:%ld" and some other tricks. We want something like this: | In file included from sys/select.h:123, | from sys/types.h:234, | from userfile.c:31: | bits/select.h:45: with all the "from"s lined up. The trailing comma is at the beginning of this message, and the trailing colon is not translated. */ fprintf (stderr, _(",\n from %s:%u"), map->to_file, LAST_SOURCE_LINE (map)); } fputs (":\n", stderr); } /* Print an include trace, for e.g. the -H option of the preprocessor. */ static void trace_include (const struct line_maps *set, const struct line_map *map) { unsigned int i = set->depth; while (--i) putc ('.', stderr); fprintf (stderr, " %s\n", map->to_file); } /* Part of CPP library. (Macro and #define handling.) Copyright (C) 1986, 1987, 1989, 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Written by Per Bothner, 1994. Based on CCCP program by Paul Rubin, June 1986 Adapted to ANSI C, Richard Stallman, Jan 1987 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. In other words, you are welcome to use, share and improve this program. You are forbidden to forbid anyone else to use, share and improve what you give them. Help stamp out software-hoarding! */ typedef struct macro_arg macro_arg; struct macro_arg { const cpp_token **first; /* First token in unexpanded argument. */ const cpp_token **expanded; /* Macro-expanded argument. */ const cpp_token *stringified; /* Stringified argument. */ unsigned int count; /* # of tokens in argument. */ unsigned int expanded_count; /* # of tokens in expanded argument. */ }; /* Macro expansion. */ static int enter_macro_context (cpp_reader *, cpp_hashnode *); static int builtin_macro (cpp_reader *, cpp_hashnode *); static void push_token_context (cpp_reader *, cpp_hashnode *, const cpp_token *, unsigned int); static void push_ptoken_context (cpp_reader *, cpp_hashnode *, _cpp_buff *, const cpp_token **, unsigned int); static _cpp_buff *collect_args (cpp_reader *, const cpp_hashnode *); static cpp_context *next_context (cpp_reader *); static const cpp_token *padding_token (cpp_reader *, const cpp_token *); static void expand_arg (cpp_reader *, macro_arg *); static const cpp_token *new_string_token (cpp_reader *, uchar *, unsigned int); static const cpp_token *stringify_arg (cpp_reader *, macro_arg *); static void paste_all_tokens (cpp_reader *, const cpp_token *); static bool paste_tokens (cpp_reader *, const cpp_token **, const cpp_token *); static void replace_macro_args (cpp_reader *, cpp_hashnode *, cpp_macro *, macro_arg *); static _cpp_buff *funlike_invocation_p (cpp_reader *, cpp_hashnode *); static bool create_iso_definition (cpp_reader *, cpp_macro *); /* #define directive parsing and handling. */ static cpp_token *alloc_expansion_token (cpp_reader *, cpp_macro *); static cpp_token *lex_expansion_token (cpp_reader *, cpp_macro *); static bool warn_of_redefinition (cpp_reader *, const cpp_hashnode *, const cpp_macro *); static bool parse_params (cpp_reader *, cpp_macro *); static void check_trad_stringification (cpp_reader *, const cpp_macro *, const cpp_string *); /* Emits a warning if NODE is a macro defined in the main file that has not been used. */ int _cpp_warn_if_unused_macro (cpp_reader *pfile, cpp_hashnode *node, void *v ATTRIBUTE_UNUSED) { if (node->type == NT_MACRO && !(node->flags & NODE_BUILTIN)) { cpp_macro *macro = node->value.macro; if (!macro->used && MAIN_FILE_P (linemap_lookup (pfile->line_table, macro->line))) cpp_error_with_line (pfile, CPP_DL_WARNING, macro->line, 0, "macro \"%s\" is not used", NODE_NAME (node)); } return 1; } /* Allocates and returns a CPP_STRING token, containing TEXT of length LEN, after null-terminating it. TEXT must be in permanent storage. */ static const cpp_token * new_string_token (cpp_reader *pfile, unsigned char *text, unsigned int len) { cpp_token *token = _cpp_temp_token (pfile); text[len] = '\0'; token->type = CPP_STRING; token->val.str.len = len; token->val.str.text = text; token->flags = 0; return token; } static const char * const monthnames[] = { "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" }; /* Handle builtin macros like __FILE__, and push the resulting token on the context stack. Also handles _Pragma, for which no new token is created. Returns 1 if it generates a new token context, 0 to return the token to the caller. */ const uchar * _cpp_builtin_macro_text (cpp_reader *pfile, cpp_hashnode *node) { const struct line_map *map; const uchar *result = NULL; unsigned int number = 1; switch (node->value.builtin) { default: cpp_error (pfile, CPP_DL_ICE, "invalid built-in macro \"%s\"", NODE_NAME (node)); break; case BT_FILE: case BT_BASE_FILE: { unsigned int len; const char *name; uchar *buf; map = linemap_lookup (pfile->line_table, pfile->line_table->highest_line); if (node->value.builtin == BT_BASE_FILE) while (! MAIN_FILE_P (map)) map = INCLUDED_FROM (pfile->line_table, map); name = map->to_file; len = strlen (name); buf = _cpp_unaligned_alloc (pfile, len * 4 + 3); result = buf; *buf = '"'; buf = cpp_quote_string (buf + 1, (const unsigned char *) name, len); *buf++ = '"'; *buf = '\0'; } break; case BT_INCLUDE_LEVEL: /* The line map depth counts the primary source as level 1, but historically __INCLUDE_DEPTH__ has called the primary source level 0. */ number = pfile->line_table->depth - 1; break; case BT_SPECLINE: map = &pfile->line_table->maps[pfile->line_table->used-1]; /* If __LINE__ is embedded in a macro, it must expand to the line of the macro's invocation, not its definition. Otherwise things like assert() will not work properly. */ if (CPP_OPTION (pfile, traditional)) number = pfile->line_table->highest_line; else number = pfile->cur_token[-1].src_loc; number = SOURCE_LINE (map, number); break; /* __STDC__ has the value 1 under normal circumstances. However, if (a) we are in a system header, (b) the option stdc_0_in_system_headers is true (set by target config), and (c) we are not in strictly conforming mode, then it has the value 0. */ case BT_STDC: { if (cpp_in_system_header (pfile) && CPP_OPTION (pfile, stdc_0_in_system_headers) && !CPP_OPTION (pfile,std)) number = 0; else number = 1; } break; case BT_DATE: case BT_TIME: if (pfile->date == NULL) { /* Allocate __DATE__ and __TIME__ strings from permanent storage. We only do this once, and don't generate them at init time, because time() and localtime() are very slow on some systems. */ time_t tt; struct tm *tb = NULL; /* (time_t) -1 is a legitimate value for "number of seconds since the Epoch", so we have to do a little dance to distinguish that from a genuine error. */ errno = 0; tt = time(NULL); if (tt != (time_t)-1 || errno == 0) tb = localtime (&tt); if (tb) { pfile->date = _cpp_unaligned_alloc (pfile, sizeof ("\"Oct 11 1347\"")); sprintf ((char *) pfile->date, "\"%s %2d %4d\"", monthnames[tb->tm_mon], tb->tm_mday, tb->tm_year + 1900); pfile->time = _cpp_unaligned_alloc (pfile, sizeof ("\"12:34:56\"")); sprintf ((char *) pfile->time, "\"%02d:%02d:%02d\"", tb->tm_hour, tb->tm_min, tb->tm_sec); } else { cpp_errno (pfile, CPP_DL_WARNING, "could not determine date and time"); pfile->date = USTR"\"??? ?? ????\""; pfile->time = USTR"\"??:??:??\""; } } if (node->value.builtin == BT_DATE) result = pfile->date; else result = pfile->time; break; } if (result == NULL) { /* 21 bytes holds all NUL-terminated unsigned 64-bit numbers. */ result = _cpp_unaligned_alloc (pfile, 21); sprintf ((char *) result, "%u", number); } return result; } /* Convert builtin macros like __FILE__ to a token and push it on the context stack. Also handles _Pragma, for which no new token is created. Returns 1 if it generates a new token context, 0 to return the token to the caller. */ static int builtin_macro (cpp_reader *pfile, cpp_hashnode *node) { const uchar *buf; size_t len; char *nbuf; if (node->value.builtin == BT_PRAGMA) { /* Don't interpret _Pragma within directives. The standard is not clear on this, but to me this makes most sense. */ if (pfile->state.in_directive) return 0; _cpp_do__Pragma (pfile); return 1; } buf = _cpp_builtin_macro_text (pfile, node); len = ustrlen (buf); nbuf = alloca (len + 1); memcpy (nbuf, buf, len); nbuf[len]='\n'; cpp_push_buffer (pfile, (uchar *) nbuf, len, /* from_stage3 */ true); _cpp_clean_line (pfile); /* Set pfile->cur_token as required by _cpp_lex_direct. */ pfile->cur_token = _cpp_temp_token (pfile); push_token_context (pfile, NULL, _cpp_lex_direct (pfile), 1); if (pfile->buffer->cur != pfile->buffer->rlimit) cpp_error (pfile, CPP_DL_ICE, "invalid built-in macro \"%s\"", NODE_NAME (node)); _cpp_pop_buffer (pfile); return 1; } /* Copies SRC, of length LEN, to DEST, adding backslashes before all backslashes and double quotes. Non-printable characters are converted to octal. DEST must be of sufficient size. Returns a pointer to the end of the string. */ uchar * cpp_quote_string (uchar *dest, const uchar *src, unsigned int len) { while (len--) { uchar c = *src++; if (c == '\\' || c == '"') { *dest++ = '\\'; *dest++ = c; } else { if (ISPRINT (c)) *dest++ = c; else { sprintf ((char *) dest, "\\%03o", c); dest += 4; } } } return dest; } /* Convert a token sequence ARG to a single string token according to the rules of the ISO C #-operator. */ static const cpp_token * stringify_arg (cpp_reader *pfile, macro_arg *arg) { unsigned char *dest; unsigned int i, escape_it, backslash_count = 0; const cpp_token *source = NULL; size_t len; if (BUFF_ROOM (pfile->u_buff) < 3) _cpp_extend_buff (pfile, &pfile->u_buff, 3); dest = BUFF_FRONT (pfile->u_buff); *dest++ = '"'; /* Loop, reading in the argument's tokens. */ for (i = 0; i < arg->count; i++) { const cpp_token *token = arg->first[i]; if (token->type == CPP_PADDING) { if (source == NULL) source = token->val.source; continue; } escape_it = (token->type == CPP_STRING || token->type == CPP_WSTRING || token->type == CPP_CHAR || token->type == CPP_WCHAR); /* Room for each char being written in octal, initial space and final quote and NUL. */ len = cpp_token_len (token); if (escape_it) len *= 4; len += 3; if ((size_t) (BUFF_LIMIT (pfile->u_buff) - dest) < len) { size_t len_so_far = dest - BUFF_FRONT (pfile->u_buff); _cpp_extend_buff (pfile, &pfile->u_buff, len); dest = BUFF_FRONT (pfile->u_buff) + len_so_far; } /* Leading white space? */ if (dest - 1 != BUFF_FRONT (pfile->u_buff)) { if (source == NULL) source = token; if (source->flags & PREV_WHITE) *dest++ = ' '; } source = NULL; if (escape_it) { _cpp_buff *buff = _cpp_get_buff (pfile, len); unsigned char *buf = BUFF_FRONT (buff); len = cpp_spell_token (pfile, token, buf) - buf; dest = cpp_quote_string (dest, buf, len); _cpp_release_buff (pfile, buff); } else dest = cpp_spell_token (pfile, token, dest); if (token->type == CPP_OTHER && token->val.str.text[0] == '\\') backslash_count++; else backslash_count = 0; } /* Ignore the final \ of invalid string literals. */ if (backslash_count & 1) { cpp_error (pfile, CPP_DL_WARNING, "invalid string literal, ignoring final '\\'"); dest--; } /* Commit the memory, including NUL, and return the token. */ *dest++ = '"'; len = dest - BUFF_FRONT (pfile->u_buff); BUFF_FRONT (pfile->u_buff) = dest + 1; return new_string_token (pfile, dest - len, len); } /* Try to paste two tokens. On success, return nonzero. In any case, PLHS is updated to point to the pasted token, which is guaranteed to not have the PASTE_LEFT flag set. */ static bool paste_tokens (cpp_reader *pfile, const cpp_token **plhs, const cpp_token *rhs) { unsigned char *buf, *end; const cpp_token *lhs; unsigned int len; bool valid; lhs = *plhs; len = cpp_token_len (lhs) + cpp_token_len (rhs) + 1; buf = alloca (len); end = cpp_spell_token (pfile, lhs, buf); /* Avoid comment headers, since they are still processed in stage 3. It is simpler to insert a space here, rather than modifying the lexer to ignore comments in some circumstances. Simply returning false doesn't work, since we want to clear the PASTE_LEFT flag. */ if (lhs->type == CPP_DIV && rhs->type != CPP_EQ) *end++ = ' '; end = cpp_spell_token (pfile, rhs, end); *end = '\n'; cpp_push_buffer (pfile, buf, end - buf, /* from_stage3 */ true); _cpp_clean_line (pfile); /* Set pfile->cur_token as required by _cpp_lex_direct. */ pfile->cur_token = _cpp_temp_token (pfile); *plhs = _cpp_lex_direct (pfile); valid = pfile->buffer->cur == pfile->buffer->rlimit; _cpp_pop_buffer (pfile); return valid; } /* Handles an arbitrarily long sequence of ## operators, with initial operand LHS. This implementation is left-associative, non-recursive, and finishes a paste before handling succeeding ones. If a paste fails, we back up to the RHS of the failing ## operator before pushing the context containing the result of prior successful pastes, with the effect that the RHS appears in the output stream after the pasted LHS normally. */ static void paste_all_tokens (cpp_reader *pfile, const cpp_token *lhs) { const cpp_token *rhs; cpp_context *context = pfile->context; do { /* Take the token directly from the current context. We can do this, because we are in the replacement list of either an object-like macro, or a function-like macro with arguments inserted. In either case, the constraints to #define guarantee we have at least one more token. */ if (context->direct_p) rhs = FIRST (context).token++; else rhs = *FIRST (context).ptoken++; if (rhs->type == CPP_PADDING) abort (); if (!paste_tokens (pfile, &lhs, rhs)) { _cpp_backup_tokens (pfile, 1); /* Mandatory error for all apart from assembler. */ if (CPP_OPTION (pfile, lang) != CLK_ASM) cpp_error (pfile, CPP_DL_ERROR, "pasting \"%s\" and \"%s\" does not give a valid preprocessing token", cpp_token_as_text (pfile, lhs), cpp_token_as_text (pfile, rhs)); break; } } while (rhs->flags & PASTE_LEFT); /* Put the resulting token in its own context. */ push_token_context (pfile, NULL, lhs, 1); } /* Returns TRUE if the number of arguments ARGC supplied in an invocation of the MACRO referenced by NODE is valid. An empty invocation to a macro with no parameters should pass ARGC as zero. Note that MACRO cannot necessarily be deduced from NODE, in case NODE was redefined whilst collecting arguments. */ bool _cpp_arguments_ok (cpp_reader *pfile, cpp_macro *macro, const cpp_hashnode *node, unsigned int argc) { if (argc == macro->paramc) return true; if (argc < macro->paramc) { /* As an extension, a rest argument is allowed to not appear in the invocation at all. e.g. #define debug(format, args...) something debug("string"); This is exactly the same as if there had been an empty rest argument - debug("string", ). */ if (argc + 1 == macro->paramc && macro->variadic) { if (CPP_PEDANTIC (pfile) && ! macro->syshdr) cpp_error (pfile, CPP_DL_PEDWARN, "ISO C99 requires rest arguments to be used"); return true; } cpp_error (pfile, CPP_DL_ERROR, "macro \"%s\" requires %u arguments, but only %u given", NODE_NAME (node), macro->paramc, argc); } else cpp_error (pfile, CPP_DL_ERROR, "macro \"%s\" passed %u arguments, but takes just %u", NODE_NAME (node), argc, macro->paramc); return false; } /* Reads and returns the arguments to a function-like macro invocation. Assumes the opening parenthesis has been processed. If there is an error, emits an appropriate diagnostic and returns NULL. Each argument is terminated by a CPP_EOF token, for the future benefit of expand_arg(). */ static _cpp_buff * collect_args (cpp_reader *pfile, const cpp_hashnode *node) { _cpp_buff *buff, *base_buff; cpp_macro *macro; macro_arg *args, *arg; const cpp_token *token; unsigned int argc; macro = node->value.macro; if (macro->paramc) argc = macro->paramc; else argc = 1; buff = _cpp_get_buff (pfile, argc * (50 * sizeof (cpp_token *) + sizeof (macro_arg))); base_buff = buff; args = (macro_arg *) buff->base; memset (args, 0, argc * sizeof (macro_arg)); buff->cur = (unsigned char *) &args[argc]; arg = args, argc = 0; /* Collect the tokens making up each argument. We don't yet know how many arguments have been supplied, whether too many or too few. Hence the slightly bizarre usage of "argc" and "arg". */ do { unsigned int paren_depth = 0; unsigned int ntokens = 0; argc++; arg->first = (const cpp_token **) buff->cur; for (;;) { /* Require space for 2 new tokens (including a CPP_EOF). */ if ((unsigned char *) &arg->first[ntokens + 2] > buff->limit) { buff = _cpp_append_extend_buff (pfile, buff, 1000 * sizeof (cpp_token *)); arg->first = (const cpp_token **) buff->cur; } token = cpp_get_token (pfile); if (token->type == CPP_PADDING) { /* Drop leading padding. */ if (ntokens == 0) continue; } else if (token->type == CPP_OPEN_PAREN) paren_depth++; else if (token->type == CPP_CLOSE_PAREN) { if (paren_depth-- == 0) break; } else if (token->type == CPP_COMMA) { /* A comma does not terminate an argument within parentheses or as part of a variable argument. */ if (paren_depth == 0 && ! (macro->variadic && argc == macro->paramc)) break; } else if (token->type == CPP_EOF || (token->type == CPP_HASH && token->flags & BOL)) break; arg->first[ntokens++] = token; } /* Drop trailing padding. */ while (ntokens > 0 && arg->first[ntokens - 1]->type == CPP_PADDING) ntokens--; arg->count = ntokens; arg->first[ntokens] = &pfile->eof; /* Terminate the argument. Excess arguments loop back and overwrite the final legitimate argument, before failing. */ if (argc <= macro->paramc) { buff->cur = (unsigned char *) &arg->first[ntokens + 1]; if (argc != macro->paramc) arg++; } } while (token->type != CPP_CLOSE_PAREN && token->type != CPP_EOF); if (token->type == CPP_EOF) { /* We still need the CPP_EOF to end directives, and to end pre-expansion of a macro argument. Step back is not unconditional, since we don't want to return a CPP_EOF to our callers at the end of an -include-d file. */ if (pfile->context->prev || pfile->state.in_directive) _cpp_backup_tokens (pfile, 1); cpp_error (pfile, CPP_DL_ERROR, "unterminated argument list invoking macro \"%s\"", NODE_NAME (node)); } else { /* A single empty argument is counted as no argument. */ if (argc == 1 && macro->paramc == 0 && args[0].count == 0) argc = 0; if (_cpp_arguments_ok (pfile, macro, node, argc)) { /* GCC has special semantics for , ## b where b is a varargs parameter: we remove the comma if b was omitted entirely. If b was merely an empty argument, the comma is retained. If the macro takes just one (varargs) parameter, then we retain the comma only if we are standards conforming. If FIRST is NULL replace_macro_args () swallows the comma. */ if (macro->variadic && (argc < macro->paramc || (argc == 1 && args[0].count == 0 && !CPP_OPTION (pfile, std)))) args[macro->paramc - 1].first = NULL; return base_buff; } } /* An error occurred. */ _cpp_release_buff (pfile, base_buff); return NULL; } /* Search for an opening parenthesis to the macro of NODE, in such a way that, if none is found, we don't lose the information in any intervening padding tokens. If we find the parenthesis, collect the arguments and return the buffer containing them. */ static _cpp_buff * funlike_invocation_p (cpp_reader *pfile, cpp_hashnode *node) { const cpp_token *token, *padding = NULL; for (;;) { token = cpp_get_token (pfile); if (token->type != CPP_PADDING) break; if (padding == NULL || (!(padding->flags & PREV_WHITE) && token->val.source == NULL)) padding = token; } if (token->type == CPP_OPEN_PAREN) { pfile->state.parsing_args = 2; return collect_args (pfile, node); } /* CPP_EOF can be the end of macro arguments, or the end of the file. We mustn't back up over the latter. Ugh. */ if (token->type != CPP_EOF || token == &pfile->eof) { /* Back up. We may have skipped padding, in which case backing up more than one token when expanding macros is in general too difficult. We re-insert it in its own context. */ _cpp_backup_tokens (pfile, 1); if (padding) push_token_context (pfile, NULL, padding, 1); } return NULL; } /* Push the context of a macro with hash entry NODE onto the context stack. If we can successfully expand the macro, we push a context containing its yet-to-be-rescanned replacement list and return one. Otherwise, we don't push a context and return zero. */ static int enter_macro_context (cpp_reader *pfile, cpp_hashnode *node) { /* The presence of a macro invalidates a file's controlling macro. */ pfile->mi_valid = false; pfile->state.angled_headers = false; /* Handle standard macros. */ if (! (node->flags & NODE_BUILTIN)) { cpp_macro *macro = node->value.macro; if (macro->fun_like) { _cpp_buff *buff; pfile->state.prevent_expansion++; pfile->keep_tokens++; pfile->state.parsing_args = 1; buff = funlike_invocation_p (pfile, node); pfile->state.parsing_args = 0; pfile->keep_tokens--; pfile->state.prevent_expansion--; if (buff == NULL) { if (CPP_WTRADITIONAL (pfile) && ! node->value.macro->syshdr) cpp_error (pfile, CPP_DL_WARNING, "function-like macro \"%s\" must be used with arguments in traditional C", NODE_NAME (node)); return 0; } if (macro->paramc > 0) replace_macro_args (pfile, node, macro, (macro_arg *) buff->base); _cpp_release_buff (pfile, buff); } /* Disable the macro within its expansion. */ node->flags |= NODE_DISABLED; macro->used = 1; if (macro->paramc == 0) push_token_context (pfile, node, macro->exp.tokens, macro->count); return 1; } /* Handle built-in macros and the _Pragma operator. */ return builtin_macro (pfile, node); } /* Replace the parameters in a function-like macro of NODE with the actual ARGS, and place the result in a newly pushed token context. Expand each argument before replacing, unless it is operated upon by the # or ## operators. */ static void replace_macro_args (cpp_reader *pfile, cpp_hashnode *node, cpp_macro *macro, macro_arg *args) { unsigned int i, total; const cpp_token *src, *limit; const cpp_token **dest, **first; macro_arg *arg; _cpp_buff *buff; /* First, fully macro-expand arguments, calculating the number of tokens in the final expansion as we go. The ordering of the if statements below is subtle; we must handle stringification before pasting. */ total = macro->count; limit = macro->exp.tokens + macro->count; for (src = macro->exp.tokens; src < limit; src++) if (src->type == CPP_MACRO_ARG) { /* Leading and trailing padding tokens. */ total += 2; /* We have an argument. If it is not being stringified or pasted it is macro-replaced before insertion. */ arg = &args[src->val.arg_no - 1]; if (src->flags & STRINGIFY_ARG) { if (!arg->stringified) arg->stringified = stringify_arg (pfile, arg); } else if ((src->flags & PASTE_LEFT) || (src > macro->exp.tokens && (src[-1].flags & PASTE_LEFT))) total += arg->count - 1; else { if (!arg->expanded) expand_arg (pfile, arg); total += arg->expanded_count - 1; } } /* Now allocate space for the expansion, copy the tokens and replace the arguments. */ buff = _cpp_get_buff (pfile, total * sizeof (cpp_token *)); first = (const cpp_token **) buff->base; dest = first; for (src = macro->exp.tokens; src < limit; src++) { unsigned int count; const cpp_token **from, **paste_flag; if (src->type != CPP_MACRO_ARG) { *dest++ = src; continue; } paste_flag = 0; arg = &args[src->val.arg_no - 1]; if (src->flags & STRINGIFY_ARG) count = 1, from = &arg->stringified; else if (src->flags & PASTE_LEFT) count = arg->count, from = arg->first; else if (src != macro->exp.tokens && (src[-1].flags & PASTE_LEFT)) { count = arg->count, from = arg->first; if (dest != first) { if (dest[-1]->type == CPP_COMMA && macro->variadic && src->val.arg_no == macro->paramc) { /* Swallow a pasted comma if from == NULL, otherwise drop the paste flag. */ if (from == NULL) dest--; else paste_flag = dest - 1; } /* Remove the paste flag if the RHS is a placemarker. */ else if (count == 0) paste_flag = dest - 1; } } else count = arg->expanded_count, from = arg->expanded; /* Padding on the left of an argument (unless RHS of ##). */ if ((!pfile->state.in_directive || pfile->state.directive_wants_padding) && src != macro->exp.tokens && !(src[-1].flags & PASTE_LEFT)) *dest++ = padding_token (pfile, src); if (count) { memcpy (dest, from, count * sizeof (cpp_token *)); dest += count; /* With a non-empty argument on the LHS of ##, the last token should be flagged PASTE_LEFT. */ if (src->flags & PASTE_LEFT) paste_flag = dest - 1; } /* Avoid paste on RHS (even case count == 0). */ if (!pfile->state.in_directive && !(src->flags & PASTE_LEFT)) *dest++ = &pfile->avoid_paste; /* Add a new paste flag, or remove an unwanted one. */ if (paste_flag) { cpp_token *token = _cpp_temp_token (pfile); token->type = (*paste_flag)->type; token->val.str = (*paste_flag)->val.str; if (src->flags & PASTE_LEFT) token->flags = (*paste_flag)->flags | PASTE_LEFT; else token->flags = (*paste_flag)->flags & ~PASTE_LEFT; *paste_flag = token; } } /* Free the expanded arguments. */ for (i = 0; i < macro->paramc; i++) if (args[i].expanded) free (args[i].expanded); push_ptoken_context (pfile, node, buff, first, dest - first); } /* Return a special padding token, with padding inherited from SOURCE. */ static const cpp_token * padding_token (cpp_reader *pfile, const cpp_token *source) { cpp_token *result = _cpp_temp_token (pfile); result->type = CPP_PADDING; result->val.source = (cpp_token *) source; result->flags = 0; return result; } /* Get a new uninitialized context. Create a new one if we cannot re-use an old one. */ static cpp_context * next_context (cpp_reader *pfile) { cpp_context *result = pfile->context->next; if (result == 0) { result = xnew (cpp_context); result->prev = pfile->context; result->next = 0; pfile->context->next = result; } pfile->context = result; return result; } /* Push a list of pointers to tokens. */ static void push_ptoken_context (cpp_reader *pfile, cpp_hashnode *macro, _cpp_buff *buff, const cpp_token **first, unsigned int count) { cpp_context *context = next_context (pfile); context->direct_p = false; context->macro = macro; context->buff = buff; FIRST (context).ptoken = first; LAST (context).ptoken = first + count; } /* Push a list of tokens. */ static void push_token_context (cpp_reader *pfile, cpp_hashnode *macro, const cpp_token *first, unsigned int count) { cpp_context *context = next_context (pfile); context->direct_p = true; context->macro = macro; context->buff = NULL; FIRST (context).token = first; LAST (context).token = first + count; } /* Push a traditional macro's replacement text. */ void _cpp_push_text_context (cpp_reader *pfile, cpp_hashnode *macro, const uchar *start, size_t len) { cpp_context *context = next_context (pfile); context->direct_p = true; context->macro = macro; context->buff = NULL; CUR (context) = start; RLIMIT (context) = start + len; macro->flags |= NODE_DISABLED; } /* Expand an argument ARG before replacing parameters in a function-like macro. This works by pushing a context with the argument's tokens, and then expanding that into a temporary buffer as if it were a normal part of the token stream. collect_args() has terminated the argument's tokens with a CPP_EOF so that we know when we have fully expanded the argument. */ static void expand_arg (cpp_reader *pfile, macro_arg *arg) { unsigned int capacity; bool saved_warn_trad; if (arg->count == 0) return; /* Don't warn about funlike macros when pre-expanding. */ saved_warn_trad = CPP_WTRADITIONAL (pfile); CPP_WTRADITIONAL (pfile) = 0; /* Loop, reading in the arguments. */ capacity = 256; arg->expanded = xmalloc (capacity * sizeof (cpp_token *)); push_ptoken_context (pfile, NULL, NULL, arg->first, arg->count + 1); for (;;) { const cpp_token *token; if (arg->expanded_count + 1 >= capacity) { capacity *= 2; arg->expanded = xrealloc (arg->expanded, capacity * sizeof (cpp_token *)); } token = cpp_get_token (pfile); if (token->type == CPP_EOF) break; arg->expanded[arg->expanded_count++] = token; } _cpp_pop_context (pfile); CPP_WTRADITIONAL (pfile) = saved_warn_trad; } /* Pop the current context off the stack, re-enabling the macro if the context represented a macro's replacement list. The context structure is not freed so that we can re-use it later. */ void _cpp_pop_context (cpp_reader *pfile) { cpp_context *context = pfile->context; if (context->macro) context->macro->flags &= ~NODE_DISABLED; if (context->buff) _cpp_release_buff (pfile, context->buff); pfile->context = context->prev; } /* External routine to get a token. Also used nearly everywhere internally, except for places where we know we can safely call _cpp_lex_token directly, such as lexing a directive name. Macro expansions and directives are transparently handled, including entering included files. Thus tokens are post-macro expansion, and after any intervening directives. External callers see CPP_EOF only at EOF. Internal callers also see it when meeting a directive inside a macro call, when at the end of a directive and state.in_directive is still 1, and at the end of argument pre-expansion. */ const cpp_token * cpp_get_token (cpp_reader *pfile) { const cpp_token *result; for (;;) { cpp_hashnode *node; cpp_context *context = pfile->context; /* Context->prev == 0 <=> base context. */ if (!context->prev) result = _cpp_lex_token (pfile); else if (FIRST (context).token != LAST (context).token) { if (context->direct_p) result = FIRST (context).token++; else result = *FIRST (context).ptoken++; if (result->flags & PASTE_LEFT) { paste_all_tokens (pfile, result); if (pfile->state.in_directive) continue; return padding_token (pfile, result); } } else { _cpp_pop_context (pfile); if (pfile->state.in_directive) continue; return &pfile->avoid_paste; } if (pfile->state.in_directive && result->type == CPP_COMMENT) continue; if (result->type != CPP_NAME) break; node = result->val.node; if (node->type != NT_MACRO || (result->flags & NO_EXPAND)) break; if (!(node->flags & NODE_DISABLED)) { if (!pfile->state.prevent_expansion && enter_macro_context (pfile, node)) { if (pfile->state.in_directive) continue; return padding_token (pfile, result); } } else { /* Flag this token as always unexpandable. FIXME: move this to collect_args()?. */ cpp_token *t = _cpp_temp_token (pfile); t->type = result->type; t->flags = result->flags | NO_EXPAND; t->val.str = result->val.str; result = t; } break; } return result; } /* Returns true if we're expanding an object-like macro that was defined in a system header. Just checks the macro at the top of the stack. Used for diagnostic suppression. */ int cpp_sys_macro_p (cpp_reader *pfile) { cpp_hashnode *node = pfile->context->macro; return node && node->value.macro && node->value.macro->syshdr; } /* Read each token in, until end of the current file. Directives are transparently processed. */ void cpp_scan_nooutput (cpp_reader *pfile) { /* Request a CPP_EOF token at the end of this file, rather than transparently continuing with the including file. */ pfile->buffer->return_at_eof = true; pfile->state.discarding_output++; pfile->state.prevent_expansion++; if (CPP_OPTION (pfile, traditional)) while (_cpp_read_logical_line_trad (pfile)) ; else while (cpp_get_token (pfile)->type != CPP_EOF) ; pfile->state.discarding_output--; pfile->state.prevent_expansion--; } /* Step back one (or more) tokens. Can only step mack more than 1 if they are from the lexer, and not from macro expansion. */ void _cpp_backup_tokens (cpp_reader *pfile, unsigned int count) { if (pfile->context->prev == NULL) { pfile->lookaheads += count; while (count--) { pfile->cur_token--; if (pfile->cur_token == pfile->cur_run->base /* Possible with -fpreprocessed and no leading #line. */ && pfile->cur_run->prev != NULL) { pfile->cur_run = pfile->cur_run->prev; pfile->cur_token = pfile->cur_run->limit; } } } else { if (count != 1) abort (); if (pfile->context->direct_p) FIRST (pfile->context).token--; else FIRST (pfile->context).ptoken--; } } /* #define directive parsing and handling. */ /* Returns nonzero if a macro redefinition warning is required. */ static bool warn_of_redefinition (cpp_reader *pfile, const cpp_hashnode *node, const cpp_macro *macro2) { const cpp_macro *macro1; unsigned int i; /* Some redefinitions need to be warned about regardless. */ if (node->flags & NODE_WARN) return true; /* Redefinition of a macro is allowed if and only if the old and new definitions are the same. (6.10.3 paragraph 2). */ macro1 = node->value.macro; /* Don't check count here as it can be different in valid traditional redefinitions with just whitespace differences. */ if (macro1->paramc != macro2->paramc || macro1->fun_like != macro2->fun_like || macro1->variadic != macro2->variadic) return true; /* Check parameter spellings. */ for (i = 0; i < macro1->paramc; i++) if (macro1->params[i] != macro2->params[i]) return true; /* Check the replacement text or tokens. */ if (CPP_OPTION (pfile, traditional)) return _cpp_expansions_different_trad (macro1, macro2); if (macro1->count != macro2->count) return true; for (i = 0; i < macro1->count; i++) if (!_cpp_equiv_tokens (¯o1->exp.tokens[i], ¯o2->exp.tokens[i])) return true; return false; } /* Free the definition of hashnode H. */ void _cpp_free_definition (cpp_hashnode *h) { /* Macros and assertions no longer have anything to free. */ h->type = NT_VOID; /* Clear builtin flag in case of redefinition. */ h->flags &= ~(NODE_BUILTIN | NODE_DISABLED); } /* Save parameter NODE to the parameter list of macro MACRO. Returns zero on success, nonzero if the parameter is a duplicate. */ bool _cpp_save_parameter (cpp_reader *pfile, cpp_macro *macro, cpp_hashnode *node) { unsigned int len; /* Constraint 6.10.3.6 - duplicate parameter names. */ if (node->flags & NODE_MACRO_ARG) { cpp_error (pfile, CPP_DL_ERROR, "duplicate macro parameter \"%s\"", NODE_NAME (node)); return true; } if (BUFF_ROOM (pfile->a_buff) < (macro->paramc + 1) * sizeof (cpp_hashnode *)) _cpp_extend_buff (pfile, &pfile->a_buff, sizeof (cpp_hashnode *)); ((cpp_hashnode **) BUFF_FRONT (pfile->a_buff))[macro->paramc++] = node; node->flags |= NODE_MACRO_ARG; len = macro->paramc * sizeof (union _cpp_hashnode_value); if (len > pfile->macro_buffer_len) { pfile->macro_buffer = xrealloc (pfile->macro_buffer, len); pfile->macro_buffer_len = len; } ((union _cpp_hashnode_value *) pfile->macro_buffer)[macro->paramc - 1] = node->value; node->value.arg_index = macro->paramc; return false; } /* Check the syntax of the parameters in a MACRO definition. Returns false if an error occurs. */ static bool parse_params (cpp_reader *pfile, cpp_macro *macro) { unsigned int prev_ident = 0; for (;;) { const cpp_token *token = _cpp_lex_token (pfile); switch (token->type) { default: /* Allow/ignore comments in parameter lists if we are preserving comments in macro expansions. */ if (token->type == CPP_COMMENT && ! CPP_OPTION (pfile, discard_comments_in_macro_exp)) continue; cpp_error (pfile, CPP_DL_ERROR, "\"%s\" may not appear in macro parameter list", cpp_token_as_text (pfile, token)); return false; case CPP_NAME: if (prev_ident) { cpp_error (pfile, CPP_DL_ERROR, "macro parameters must be comma-separated"); return false; } prev_ident = 1; if (_cpp_save_parameter (pfile, macro, token->val.node)) return false; continue; case CPP_CLOSE_PAREN: if (prev_ident || macro->paramc == 0) return true; /* Fall through to pick up the error. */ case CPP_COMMA: if (!prev_ident) { cpp_error (pfile, CPP_DL_ERROR, "parameter name missing"); return false; } prev_ident = 0; continue; case CPP_ELLIPSIS: macro->variadic = 1; if (!prev_ident) { _cpp_save_parameter (pfile, macro, pfile->spec_nodes.n__VA_ARGS__); pfile->state.va_args_ok = 1; if (! CPP_OPTION (pfile, c99) && CPP_OPTION (pfile, pedantic) && CPP_OPTION (pfile, warn_variadic_macros)) cpp_error (pfile, CPP_DL_PEDWARN, "anonymous variadic macros were introduced in C99"); } else if (CPP_OPTION (pfile, pedantic) && CPP_OPTION (pfile, warn_variadic_macros)) cpp_error (pfile, CPP_DL_PEDWARN, "ISO C does not permit named variadic macros"); /* We're at the end, and just expect a closing parenthesis. */ token = _cpp_lex_token (pfile); if (token->type == CPP_CLOSE_PAREN) return true; /* Fall through. */ case CPP_EOF: cpp_error (pfile, CPP_DL_ERROR, "missing ')' in macro parameter list"); return false; } } } /* Allocate room for a token from a macro's replacement list. */ static cpp_token * alloc_expansion_token (cpp_reader *pfile, cpp_macro *macro) { if (BUFF_ROOM (pfile->a_buff) < (macro->count + 1) * sizeof (cpp_token)) _cpp_extend_buff (pfile, &pfile->a_buff, sizeof (cpp_token)); return &((cpp_token *) BUFF_FRONT (pfile->a_buff))[macro->count++]; } /* Lex a token from the expansion of MACRO, but mark parameters as we find them and warn of traditional stringification. */ static cpp_token * lex_expansion_token (cpp_reader *pfile, cpp_macro *macro) { cpp_token *token; pfile->cur_token = alloc_expansion_token (pfile, macro); token = _cpp_lex_direct (pfile); /* Is this a parameter? */ if (token->type == CPP_NAME && (token->val.node->flags & NODE_MACRO_ARG) != 0) { token->type = CPP_MACRO_ARG; token->val.arg_no = token->val.node->value.arg_index; } else if (CPP_WTRADITIONAL (pfile) && macro->paramc > 0 && (token->type == CPP_STRING || token->type == CPP_CHAR)) check_trad_stringification (pfile, macro, &token->val.str); return token; } static bool create_iso_definition (cpp_reader *pfile, cpp_macro *macro) { cpp_token *token; const cpp_token *ctoken; /* Get the first token of the expansion (or the '(' of a function-like macro). */ ctoken = _cpp_lex_token (pfile); if (ctoken->type == CPP_OPEN_PAREN && !(ctoken->flags & PREV_WHITE)) { bool ok = parse_params (pfile, macro); macro->params = (cpp_hashnode **) BUFF_FRONT (pfile->a_buff); if (!ok) return false; /* Success. Commit or allocate the parameter array. */ if (pfile->hash_table->alloc_subobject) { cpp_token *tokns = pfile->hash_table->alloc_subobject (sizeof (cpp_token) * macro->paramc); memcpy (tokns, macro->params, sizeof (cpp_token) * macro->paramc); macro->params = (void *) tokns; } else BUFF_FRONT (pfile->a_buff) = (uchar *) ¯o->params[macro->paramc]; macro->fun_like = 1; } else if (ctoken->type != CPP_EOF && !(ctoken->flags & PREV_WHITE)) cpp_error (pfile, CPP_DL_PEDWARN, "ISO C requires whitespace after the macro name"); if (macro->fun_like) token = lex_expansion_token (pfile, macro); else { token = alloc_expansion_token (pfile, macro); *token = *ctoken; } for (;;) { /* Check the stringifying # constraint 6.10.3.2.1 of function-like macros when lexing the subsequent token. */ if (macro->count > 1 && token[-1].type == CPP_HASH && macro->fun_like) { if (token->type == CPP_MACRO_ARG) { token->flags &= ~PREV_WHITE; token->flags |= STRINGIFY_ARG; token->flags |= token[-1].flags & PREV_WHITE; token[-1] = token[0]; macro->count--; } /* Let assembler get away with murder. */ else if (CPP_OPTION (pfile, lang) != CLK_ASM) { cpp_error (pfile, CPP_DL_ERROR, "'#' is not followed by a macro parameter"); return false; } } if (token->type == CPP_EOF) break; /* Paste operator constraint 6.10.3.3.1. */ if (token->type == CPP_PASTE) { /* Token-paste ##, can appear in both object-like and function-like macros, but not at the ends. */ if (--macro->count > 0) token = lex_expansion_token (pfile, macro); if (macro->count == 0 || token->type == CPP_EOF) { cpp_error (pfile, CPP_DL_ERROR, "'##' cannot appear at either end of a macro expansion"); return false; } token[-1].flags |= PASTE_LEFT; } token = lex_expansion_token (pfile, macro); } macro->exp.tokens = (cpp_token *) BUFF_FRONT (pfile->a_buff); macro->traditional = 0; /* Don't count the CPP_EOF. */ macro->count--; /* Clear whitespace on first token for warn_of_redefinition(). */ if (macro->count) macro->exp.tokens[0].flags &= ~PREV_WHITE; /* Commit or allocate the memory. */ if (pfile->hash_table->alloc_subobject) { cpp_token *tokns = pfile->hash_table->alloc_subobject (sizeof (cpp_token) * macro->count); memcpy (tokns, macro->exp.tokens, sizeof (cpp_token) * macro->count); macro->exp.tokens = tokns; } else BUFF_FRONT (pfile->a_buff) = (uchar *) ¯o->exp.tokens[macro->count]; return true; } /* Parse a macro and save its expansion. Returns nonzero on success. */ bool _cpp_create_definition (cpp_reader *pfile, cpp_hashnode *node) { cpp_macro *macro; unsigned int i; bool ok; if (pfile->hash_table->alloc_subobject) macro = pfile->hash_table->alloc_subobject (sizeof (cpp_macro)); else macro = (cpp_macro *) _cpp_aligned_alloc (pfile, sizeof (cpp_macro)); macro->line = pfile->directive_line; macro->params = 0; macro->paramc = 0; macro->variadic = 0; macro->used = !CPP_OPTION (pfile, warn_unused_macros); macro->count = 0; macro->fun_like = 0; /* To suppress some diagnostics. */ macro->syshdr = pfile->buffer && pfile->buffer->sysp != 0; if (CPP_OPTION (pfile, traditional)) ok = _cpp_create_trad_definition (pfile, macro); else { cpp_token *saved_cur_token = pfile->cur_token; ok = create_iso_definition (pfile, macro); /* Restore lexer position because of games lex_expansion_token() plays lexing the macro. We set the type for SEEN_EOL() in cpplib.c. Longer term we should lex the whole line before coming here, and just copy the expansion. */ saved_cur_token[-1].type = pfile->cur_token[-1].type; pfile->cur_token = saved_cur_token; /* Stop the lexer accepting __VA_ARGS__. */ pfile->state.va_args_ok = 0; } /* Clear the fast argument lookup indices. */ for (i = macro->paramc; i-- > 0; ) { struct cpp_hashnode *node = macro->params[i]; node->flags &= ~ NODE_MACRO_ARG; node->value = ((union _cpp_hashnode_value *) pfile->macro_buffer)[i]; } if (!ok) return ok; if (node->type == NT_MACRO) { if (CPP_OPTION (pfile, warn_unused_macros)) _cpp_warn_if_unused_macro (pfile, node, NULL); if (warn_of_redefinition (pfile, node, macro)) { cpp_error_with_line (pfile, CPP_DL_PEDWARN, pfile->directive_line, 0, "\"%s\" redefined", NODE_NAME (node)); if (node->type == NT_MACRO && !(node->flags & NODE_BUILTIN)) cpp_error_with_line (pfile, CPP_DL_PEDWARN, node->value.macro->line, 0, "this is the location of the previous definition"); } } if (node->type != NT_VOID) _cpp_free_definition (node); /* Enter definition in hash table. */ node->type = NT_MACRO; node->value.macro = macro; if (! ustrncmp (NODE_NAME (node), DSC ("__STDC_"))) node->flags |= NODE_WARN; return ok; } /* Warn if a token in STRING matches one of a function-like MACRO's parameters. */ static void check_trad_stringification (cpp_reader *pfile, const cpp_macro *macro, const cpp_string *string) { unsigned int i, len; const uchar *p, *q, *limit; /* Loop over the string. */ limit = string->text + string->len - 1; for (p = string->text + 1; p < limit; p = q) { /* Find the start of an identifier. */ while (p < limit && !is_idstart (*p)) p++; /* Find the end of the identifier. */ q = p; while (q < limit && is_idchar (*q)) q++; len = q - p; /* Loop over the function macro arguments to see if the identifier inside the string matches one of them. */ for (i = 0; i < macro->paramc; i++) { const cpp_hashnode *node = macro->params[i]; if (NODE_LEN (node) == len && !memcmp (p, NODE_NAME (node), len)) { cpp_error (pfile, CPP_DL_WARNING, "macro argument \"%s\" would be stringified in traditional C", NODE_NAME (node)); break; } } } } /* Returns the name, arguments and expansion of a macro, in a format suitable to be read back in again, and therefore also for DWARF 2 debugging info. e.g. "PASTE(X, Y) X ## Y", or "MACNAME EXPANSION". Caller is expected to generate the "#define" bit if needed. The returned text is temporary, and automatically freed later. */ const unsigned char * cpp_macro_definition (cpp_reader *pfile, const cpp_hashnode *node) { unsigned int i, len; const cpp_macro *macro = node->value.macro; unsigned char *buffer; if (node->type != NT_MACRO || (node->flags & NODE_BUILTIN)) { cpp_error (pfile, CPP_DL_ICE, "invalid hash type %d in cpp_macro_definition", node->type); return 0; } /* Calculate length. */ len = NODE_LEN (node) + 2; /* ' ' and NUL. */ if (macro->fun_like) { len += 4; /* "()" plus possible final ".." of named varargs (we have + 1 below). */ for (i = 0; i < macro->paramc; i++) len += NODE_LEN (macro->params[i]) + 1; /* "," */ } if (CPP_OPTION (pfile, traditional)) len += _cpp_replacement_text_len (macro); else { for (i = 0; i < macro->count; i++) { cpp_token *token = ¯o->exp.tokens[i]; if (token->type == CPP_MACRO_ARG) len += NODE_LEN (macro->params[token->val.arg_no - 1]); else len += cpp_token_len (token) + 1; /* Includes room for ' '. */ if (token->flags & STRINGIFY_ARG) len++; /* "#" */ if (token->flags & PASTE_LEFT) len += 3; /* " ##" */ } } if (len > pfile->macro_buffer_len) { pfile->macro_buffer = xrealloc (pfile->macro_buffer, len); pfile->macro_buffer_len = len; } /* Fill in the buffer. Start with the macro name. */ buffer = pfile->macro_buffer; memcpy (buffer, NODE_NAME (node), NODE_LEN (node)); buffer += NODE_LEN (node); /* Parameter names. */ if (macro->fun_like) { *buffer++ = '('; for (i = 0; i < macro->paramc; i++) { cpp_hashnode *param = macro->params[i]; if (param != pfile->spec_nodes.n__VA_ARGS__) { memcpy (buffer, NODE_NAME (param), NODE_LEN (param)); buffer += NODE_LEN (param); } if (i + 1 < macro->paramc) /* Don't emit a space after the comma here; we're trying to emit a Dwarf-friendly definition, and the Dwarf spec forbids spaces in the argument list. */ *buffer++ = ','; else if (macro->variadic) *buffer++ = '.', *buffer++ = '.', *buffer++ = '.'; } *buffer++ = ')'; } /* The Dwarf spec requires a space after the macro name, even if the definition is the empty string. */ *buffer++ = ' '; if (CPP_OPTION (pfile, traditional)) buffer = _cpp_copy_replacement_text (macro, buffer); else if (macro->count) /* Expansion tokens. */ { for (i = 0; i < macro->count; i++) { cpp_token *token = ¯o->exp.tokens[i]; if (token->flags & PREV_WHITE) *buffer++ = ' '; if (token->flags & STRINGIFY_ARG) *buffer++ = '#'; if (token->type == CPP_MACRO_ARG) { len = NODE_LEN (macro->params[token->val.arg_no - 1]); memcpy (buffer, NODE_NAME (macro->params[token->val.arg_no - 1]), len); buffer += len; } else buffer = cpp_spell_token (pfile, token, buffer); if (token->flags & PASTE_LEFT) { *buffer++ = ' '; *buffer++ = '#'; *buffer++ = '#'; /* Next has PREV_WHITE; see _cpp_create_definition. */ } } } *buffer = '\0'; return pfile->macro_buffer; } /* Dependency generator for Makefile fragments. Copyright (C) 2000, 2001, 2003 Free Software Foundation, Inc. Contributed by Zack Weinberg, Mar 2000 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. In other words, you are welcome to use, share and improve this program. You are forbidden to forbid anyone else to use, share and improve what you give them. Help stamp out software-hoarding! */ /* Keep this structure local to this file, so clients don't find it easy to start making assumptions. */ struct depends { const char **targetv; unsigned int ntargets; /* number of slots actually occupied */ unsigned int targets_size; /* amt of allocated space - in words */ const char **depv; unsigned int ndeps; unsigned int deps_size; const char **vpathv; size_t *vpathlv; unsigned int nvpaths; unsigned int vpaths_size; }; static const char *munge (const char *); /* Given a filename, quote characters in that filename which are significant to Make. Note that it's not possible to quote all such characters - e.g. \n, %, *, ?, [, \ (in some contexts), and ~ are not properly handled. It isn't possible to get this right in any current version of Make. (??? Still true? Old comment referred to 3.76.1.) */ static const char * munge (const char *filename) { int len; const char *p, *q; char *dst, *buffer; for (p = filename, len = 0; *p; p++, len++) { switch (*p) { case ' ': case '\t': /* GNU make uses a weird quoting scheme for white space. A space or tab preceded by 2N+1 backslashes represents N backslashes followed by space; a space or tab preceded by 2N backslashes represents N backslashes at the end of a file name; and backslashes in other contexts should not be doubled. */ for (q = p - 1; filename <= q && *q == '\\'; q--) len++; len++; break; case '$': /* '$' is quoted by doubling it. */ len++; break; } } /* Now we know how big to make the buffer. */ buffer = xmalloc (len + 1); for (p = filename, dst = buffer; *p; p++, dst++) { switch (*p) { case ' ': case '\t': for (q = p - 1; filename <= q && *q == '\\'; q--) *dst++ = '\\'; *dst++ = '\\'; break; case '$': *dst++ = '$'; break; default: /* nothing */; } *dst = *p; } *dst = '\0'; return buffer; } /* If T begins with any of the partial pathnames listed in d->vpathv, then advance T to point beyond that pathname. */ static const char * apply_vpath (struct depends *d, const char *t) { if (d->vpathv) { unsigned int i; for (i = 0; i < d->nvpaths; i++) { if (!strncmp (d->vpathv[i], t, d->vpathlv[i])) { const char *p = t + d->vpathlv[i]; if (!IS_DIR_SEPARATOR (*p)) goto not_this_one; /* Do not simplify $(vpath)/../whatever. ??? Might not be necessary. */ if (p[1] == '.' && p[2] == '.' && IS_DIR_SEPARATOR (p[3])) goto not_this_one; /* found a match */ t = t + d->vpathlv[i] + 1; break; } not_this_one:; } } /* Remove leading ./ in any case. */ while (t[0] == '.' && IS_DIR_SEPARATOR (t[1])) t += 2; return t; } /* Public routines. */ struct depends * deps_init (void) { return xcalloc (sizeof (struct depends), 1); } void deps_free (struct depends *d) { unsigned int i; if (d->targetv) { for (i = 0; i < d->ntargets; i++) free ((void *) d->targetv[i]); free (d->targetv); } if (d->depv) { for (i = 0; i < d->ndeps; i++) free ((void *) d->depv[i]); free (d->depv); } if (d->vpathv) { for (i = 0; i < d->nvpaths; i++) free ((void *) d->vpathv[i]); free (d->vpathv); free (d->vpathlv); } free (d); } /* Adds a target T. We make a copy, so it need not be a permanent string. QUOTE is true if the string should be quoted. */ void deps_add_target (struct depends *d, const char *t, int quote) { if (d->ntargets == d->targets_size) { d->targets_size = d->targets_size * 2 + 4; d->targetv = xrealloc (d->targetv, d->targets_size * sizeof (const char *)); } t = apply_vpath (d, t); if (quote) t = munge (t); /* Also makes permanent copy. */ else t = xstrdup (t); d->targetv[d->ntargets++] = t; } /* Sets the default target if none has been given already. An empty string as the default target in interpreted as stdin. The string is quoted for MAKE. */ void deps_add_default_target (struct depends *d, const char *tgt) { /* Only if we have no targets. */ if (d->ntargets) return; if (tgt[0] == '\0') deps_add_target (d, "-", 1); else { #ifndef TARGET_OBJECT_SUFFIX # define TARGET_OBJECT_SUFFIX ".o" #endif const char *start = lbasename (tgt); char *o = alloca (strlen (start) + strlen (TARGET_OBJECT_SUFFIX) + 1); char *suffix; strcpy (o, start); suffix = strrchr (o, '.'); if (!suffix) suffix = o + strlen (o); strcpy (suffix, TARGET_OBJECT_SUFFIX); deps_add_target (d, o, 1); } } void deps_add_dep (struct depends *d, const char *t) { t = munge (apply_vpath (d, t)); /* Also makes permanent copy. */ if (d->ndeps == d->deps_size) { d->deps_size = d->deps_size * 2 + 8; d->depv = xrealloc (d->depv, d->deps_size * sizeof (const char *)); } d->depv[d->ndeps++] = t; } void deps_add_vpath (struct depends *d, const char *vpath) { const char *elem, *p; char *copy; size_t len; for (elem = vpath; *elem; elem = p) { for (p = elem; *p && *p != ':'; p++); len = p - elem; copy = xmalloc (len + 1); memcpy (copy, elem, len); copy[len] = '\0'; if (*p == ':') p++; if (d->nvpaths == d->vpaths_size) { d->vpaths_size = d->vpaths_size * 2 + 8; d->vpathv = xrealloc (d->vpathv, d->vpaths_size * sizeof (const char *)); d->vpathlv = xrealloc (d->vpathlv, d->vpaths_size * sizeof (size_t)); } d->vpathv[d->nvpaths] = copy; d->vpathlv[d->nvpaths] = len; d->nvpaths++; } } void deps_write (const struct depends *d, FILE *fp, unsigned int colmax) { unsigned int size, i, column; column = 0; if (colmax && colmax < 34) colmax = 34; for (i = 0; i < d->ntargets; i++) { size = strlen (d->targetv[i]); column += size; if (colmax && column > colmax) { fputs (" \\\n ", fp); column = 1 + size; } if (i) { putc (' ', fp); column++; } fputs (d->targetv[i], fp); } putc (':', fp); putc (' ', fp); column += 2; for (i = 0; i < d->ndeps; i++) { size = strlen (d->depv[i]); column += size; if (colmax && column > colmax) { fputs (" \\\n ", fp); column = 1 + size; } if (i) { putc (' ', fp); column++; } fputs (d->depv[i], fp); } putc ('\n', fp); } void deps_phony_targets (const struct depends *d, FILE *fp) { unsigned int i; for (i = 1; i < d->ndeps; i++) { putc ('\n', fp); fputs (d->depv[i], fp); putc (':', fp); putc ('\n', fp); } } /* Write out a deps buffer to a file, in a form that can be read back with deps_restore. Returns nonzero on error, in which case the error number will be in errno. */ int deps_save (struct depends *deps, FILE *f) { unsigned int i; /* The cppreader structure contains makefile dependences. Write out this structure. */ /* The number of dependences. */ if (fwrite (&deps->ndeps, sizeof (deps->ndeps), 1, f) != 1) return -1; /* The length of each dependence followed by the string. */ for (i = 0; i < deps->ndeps; i++) { size_t num_to_write = strlen (deps->depv[i]); if (fwrite (&num_to_write, sizeof (size_t), 1, f) != 1) return -1; if (fwrite (deps->depv[i], num_to_write, 1, f) != 1) return -1; } return 0; } /* Read back dependency information written with deps_save into the deps buffer. The third argument may be NULL, in which case the dependency information is just skipped, or it may be a filename, in which case that filename is skipped. */ int deps_restore (struct depends *deps, FILE *fd, const char *self) { unsigned int i, count; size_t num_to_read; size_t buf_size = 512; char *buf = xmalloc (buf_size); /* Number of dependences. */ if (fread (&count, 1, sizeof (count), fd) != sizeof (count)) return -1; /* The length of each dependence string, followed by the string. */ for (i = 0; i < count; i++) { /* Read in # bytes in string. */ if (fread (&num_to_read, 1, sizeof (size_t), fd) != sizeof (size_t)) return -1; if (buf_size < num_to_read + 1) { buf_size = num_to_read + 1 + 127; buf = xrealloc (buf, buf_size); } if (fread (buf, 1, num_to_read, fd) != num_to_read) return -1; buf[num_to_read] = '\0'; /* Generate makefile dependencies from .pch if -nopch-deps. */ if (self != NULL && strcmp (buf, self) != 0) deps_add_dep (deps, buf); } free (buf); return 0; } /* Part of CPP library. (Precompiled header reading/writing.) Copyright (C) 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ static int write_macdef (cpp_reader *, cpp_hashnode *, void *); static int save_idents (cpp_reader *, cpp_hashnode *, void *); static hashval_t hashmem (const void *, size_t); static hashval_t cpp_string_hash (const void *); static int cpp_string_eq (const void *, const void *); static int count_defs (cpp_reader *, cpp_hashnode *, void *); static int comp_hashnodes (const void *, const void *); static int collect_ht_nodes (cpp_reader *, cpp_hashnode *, void *); static int write_defs (cpp_reader *, cpp_hashnode *, void *); static int save_macros (cpp_reader *, cpp_hashnode *, void *); /* This structure represents a macro definition on disk. */ struct macrodef_struct { unsigned int definition_length; unsigned short name_length; unsigned short flags; }; /* This is how we write out a macro definition. Suitable for being called by cpp_forall_identifiers. */ static int write_macdef (cpp_reader *pfile, cpp_hashnode *hn, void *file_p) { FILE *f = (FILE *) file_p; switch (hn->type) { case NT_VOID: if (! (hn->flags & NODE_POISONED)) return 1; case NT_MACRO: if ((hn->flags & NODE_BUILTIN)) return 1; { struct macrodef_struct s; const unsigned char *defn; s.name_length = NODE_LEN (hn); s.flags = hn->flags & NODE_POISONED; if (hn->type == NT_MACRO) { defn = cpp_macro_definition (pfile, hn); s.definition_length = ustrlen (defn); } else { defn = NODE_NAME (hn); s.definition_length = s.name_length; } if (fwrite (&s, sizeof (s), 1, f) != 1 || fwrite (defn, 1, s.definition_length, f) != s.definition_length) { cpp_errno (pfile, CPP_DL_ERROR, "while writing precompiled header"); return 0; } } return 1; case NT_ASSERTION: /* Not currently implemented. */ return 1; default: abort (); } } /* This structure records the names of the defined macros. It's also used as a callback structure for size_initial_idents and save_idents. */ struct cpp_savedstate { /* A hash table of the defined identifiers. */ htab_t definedhash; /* The size of the definitions of those identifiers (the size of 'definedstrs'). */ size_t hashsize; /* Number of definitions */ size_t n_defs; /* Array of definitions. In cpp_write_pch_deps it is used for sorting. */ cpp_hashnode **defs; /* Space for the next definition. Definitions are null-terminated strings. */ unsigned char *definedstrs; }; /* Save this identifier into the state: put it in the hash table, put the definition in 'definedstrs'. */ static int save_idents (cpp_reader *pfile ATTRIBUTE_UNUSED, cpp_hashnode *hn, void *ss_p) { struct cpp_savedstate *const ss = (struct cpp_savedstate *)ss_p; if (hn->type != NT_VOID) { struct cpp_string news; void **slot; news.len = NODE_LEN (hn); news.text= NODE_NAME (hn); slot = htab_find_slot (ss->definedhash, &news, INSERT); if (*slot == NULL) { struct cpp_string *sp; unsigned char *text; sp = xmalloc (sizeof (struct cpp_string)); *slot = sp; sp->len = NODE_LEN (hn); sp->text = text = xmalloc (NODE_LEN (hn)); memcpy (text, NODE_NAME (hn), NODE_LEN (hn)); } } return 1; } /* Hash some memory in a generic way. */ static hashval_t hashmem (const void *p_p, size_t sz) { const unsigned char *p = (const unsigned char *)p_p; size_t i; hashval_t h; h = 0; for (i = 0; i < sz; i++) h = h * 67 - (*p++ - 113); return h; } /* Hash a cpp string for the hashtable machinery. */ static hashval_t cpp_string_hash (const void *a_p) { const struct cpp_string *a = (const struct cpp_string *) a_p; return hashmem (a->text, a->len); } /* Compare two cpp strings for the hashtable machinery. */ static int cpp_string_eq (const void *a_p, const void *b_p) { const struct cpp_string *a = (const struct cpp_string *) a_p; const struct cpp_string *b = (const struct cpp_string *) b_p; return (a->len == b->len && memcmp (a->text, b->text, a->len) == 0); } /* Save the current definitions of the cpp_reader for dependency checking purposes. When writing a precompiled header, this should be called at the same point in the compilation as cpp_valid_state would be called when reading the precompiled header back in. */ int cpp_save_state (cpp_reader *r, FILE *f) { /* Save the list of non-void identifiers for the dependency checking. */ r->savedstate = xmalloc (sizeof (struct cpp_savedstate)); r->savedstate->definedhash = htab_create (100, cpp_string_hash, cpp_string_eq, NULL); cpp_forall_identifiers (r, save_idents, r->savedstate); /* Write out the list of defined identifiers. */ cpp_forall_identifiers (r, write_macdef, f); return 0; } /* Calculate the 'hashsize' field of the saved state. */ static int count_defs (cpp_reader *pfile ATTRIBUTE_UNUSED, cpp_hashnode *hn, void *ss_p) { struct cpp_savedstate *const ss = (struct cpp_savedstate *)ss_p; switch (hn->type) { case NT_MACRO: if (hn->flags & NODE_BUILTIN) return 1; /* else fall through. */ case NT_VOID: { struct cpp_string news; void **slot; news.len = NODE_LEN (hn); news.text = NODE_NAME (hn); slot = htab_find (ss->definedhash, &news); if (slot == NULL) { ss->hashsize += NODE_LEN (hn) + 1; ss->n_defs += 1; } } return 1; case NT_ASSERTION: /* Not currently implemented. */ return 1; default: abort (); } } /* Collect the identifiers into the state's string table. */ static int write_defs (cpp_reader *pfile ATTRIBUTE_UNUSED, cpp_hashnode *hn, void *ss_p) { struct cpp_savedstate *const ss = (struct cpp_savedstate *)ss_p; switch (hn->type) { case NT_MACRO: if (hn->flags & NODE_BUILTIN) return 1; /* else fall through. */ case NT_VOID: { struct cpp_string news; void **slot; news.len = NODE_LEN (hn); news.text = NODE_NAME (hn); slot = htab_find (ss->definedhash, &news); if (slot == NULL) { ss->defs[ss->n_defs] = hn; ss->n_defs += 1; } } return 1; case NT_ASSERTION: /* Not currently implemented. */ return 1; default: abort (); } } /* Comparison function for qsort. The arguments point to pointers of type ht_hashnode *. */ static int comp_hashnodes (const void *px, const void *py) { cpp_hashnode *x = *(cpp_hashnode **) px; cpp_hashnode *y = *(cpp_hashnode **) py; return ustrcmp (NODE_NAME (x), NODE_NAME (y)); } /* Write out the remainder of the dependency information. This should be called after the PCH is ready to be saved. */ int cpp_write_pch_deps (cpp_reader *r, FILE *f) { struct macrodef_struct z; struct cpp_savedstate *const ss = r->savedstate; unsigned char *definedstrs; size_t i; /* Collect the list of identifiers which have been seen and weren't defined to anything previously. */ ss->hashsize = 0; ss->n_defs = 0; cpp_forall_identifiers (r, count_defs, ss); ss->defs = xmalloc (ss->n_defs * sizeof (cpp_hashnode *)); ss->n_defs = 0; cpp_forall_identifiers (r, write_defs, ss); /* Sort the list, copy it into a buffer, and write it out. */ qsort (ss->defs, ss->n_defs, sizeof (cpp_hashnode *), &comp_hashnodes); definedstrs = ss->definedstrs = xmalloc (ss->hashsize); for (i = 0; i < ss->n_defs; ++i) { size_t len = NODE_LEN (ss->defs[i]); memcpy (definedstrs, NODE_NAME (ss->defs[i]), len + 1); definedstrs += len + 1; } memset (&z, 0, sizeof (z)); z.definition_length = ss->hashsize; if (fwrite (&z, sizeof (z), 1, f) != 1 || fwrite (ss->definedstrs, ss->hashsize, 1, f) != 1) { cpp_errno (r, CPP_DL_ERROR, "while writing precompiled header"); return -1; } free (ss->definedstrs); /* Free the saved state. */ free (ss); r->savedstate = NULL; return 0; } /* Write out the definitions of the preprocessor, in a form suitable for cpp_read_state. */ int cpp_write_pch_state (cpp_reader *r, FILE *f) { if (!r->deps) r->deps = deps_init (); if (deps_save (r->deps, f) != 0) { cpp_errno (r, CPP_DL_ERROR, "while writing precompiled header"); return -1; } if (! _cpp_save_file_entries (r, f)) { cpp_errno (r, CPP_DL_ERROR, "while writing precompiled header"); return -1; } return 0; } /* Data structure to transform hash table nodes into a sorted list */ struct ht_node_list { /* Array of nodes */ cpp_hashnode **defs; /* Number of nodes in the array */ size_t n_defs; /* Size of the allocated array */ size_t asize; }; /* Callback for collecting identifiers from hash table */ static int collect_ht_nodes (cpp_reader *pfile ATTRIBUTE_UNUSED, cpp_hashnode *hn, void *nl_p) { struct ht_node_list *const nl = (struct ht_node_list *)nl_p; if (hn->type != NT_VOID || hn->flags & NODE_POISONED) { if (nl->n_defs == nl->asize) { nl->asize *= 2; nl->defs = xrealloc (nl->defs, nl->asize * sizeof (cpp_hashnode *)); } nl->defs[nl->n_defs] = hn; ++nl->n_defs; } return 1; } /* Return nonzero if FD is a precompiled header which is consistent with the preprocessor's current definitions. It will be consistent when: - anything that was defined just before the PCH was generated is defined the same way now; and - anything that was not defined then, but is defined now, was not used by the PCH. NAME is used to print warnings if `warn_invalid_pch' is set in the reader's flags. */ int cpp_valid_state (cpp_reader *r, const char *name, int fd) { struct macrodef_struct m; size_t namebufsz = 256; unsigned char *namebuf = xmalloc (namebufsz); unsigned char *undeftab = NULL; struct ht_node_list nl = { 0, 0, 0 }; unsigned char *first, *last; unsigned int i; /* Read in the list of identifiers that must be defined Check that they are defined in the same way. */ for (;;) { cpp_hashnode *h; const unsigned char *newdefn; if (read (fd, &m, sizeof (m)) != sizeof (m)) goto error; if (m.name_length == 0) break; /* If this file is already preprocessed, there won't be any macros defined, and that's OK. */ if (CPP_OPTION (r, preprocessed)) { if (lseek (fd, m.definition_length, SEEK_CUR) == -1) goto error; continue; } if (m.definition_length > namebufsz) { free (namebuf); namebufsz = m.definition_length + 256; namebuf = xmalloc (namebufsz); } if ((size_t)read (fd, namebuf, m.definition_length) != m.definition_length) goto error; h = cpp_lookup (r, namebuf, m.name_length); if (m.flags & NODE_POISONED || h->type != NT_MACRO || h->flags & NODE_POISONED) { if (CPP_OPTION (r, warn_invalid_pch)) cpp_error (r, CPP_DL_WARNING_SYSHDR, "%s: not used because `%.*s' not defined", name, m.name_length, namebuf); goto fail; } newdefn = cpp_macro_definition (r, h); if (m.definition_length != ustrlen (newdefn) || memcmp (namebuf, newdefn, m.definition_length) != 0) { if (CPP_OPTION (r, warn_invalid_pch)) cpp_error (r, CPP_DL_WARNING_SYSHDR, "%s: not used because `%.*s' defined as `%s' not `%.*s'", name, m.name_length, namebuf, newdefn + m.name_length, m.definition_length - m.name_length, namebuf + m.name_length); goto fail; } } free (namebuf); namebuf = NULL; /* Read in the list of identifiers that must not be defined. Check that they really aren't. */ undeftab = xmalloc (m.definition_length); if ((size_t) read (fd, undeftab, m.definition_length) != m.definition_length) goto error; /* Collect identifiers from the current hash table. */ nl.n_defs = 0; nl.asize = 10; nl.defs = xmalloc (nl.asize * sizeof (cpp_hashnode *)); cpp_forall_identifiers (r, &collect_ht_nodes, &nl); qsort (nl.defs, nl.n_defs, sizeof (cpp_hashnode *), &comp_hashnodes); /* Loop through nl.defs and undeftab, both of which are sorted lists. There should be no matches. */ first = undeftab; last = undeftab + m.definition_length; i = 0; while (first < last && i < nl.n_defs) { int cmp = ustrcmp (first, NODE_NAME (nl.defs[i])); if (cmp < 0) first += ustrlen (first) + 1; else if (cmp > 0) ++i; else { if (CPP_OPTION (r, warn_invalid_pch)) cpp_error (r, CPP_DL_WARNING_SYSHDR, "%s: not used because `%s' is defined", name, first); goto fail; } } free(nl.defs); free (undeftab); /* We win! */ return 0; error: cpp_errno (r, CPP_DL_ERROR, "while reading precompiled header"); return -1; fail: if (namebuf != NULL) free (namebuf); if (undeftab != NULL) free (undeftab); if (nl.defs != NULL) free (nl.defs); return 1; } /* Save all the existing macros. */ struct save_macro_data { uchar **defns; size_t count; size_t array_size; char **saved_pragmas; }; /* Save the definition of a single macro, so that it will persist across a PCH restore. Because macro data is in GCed memory, which will be blown away by PCH, it must be temporarily copied to malloced memory. (The macros will refer to identifier nodes which are also GCed and so on, so the copying is done by turning them into self-contained strings.) The assumption is that most macro definitions will come from the PCH file, not from the compilation before the PCH file is loaded, so it doesn't matter that this is a little expensive. It would reduce the cost even further if macros defined in the PCH file were not saved in this way, but this is not done (yet), except for builtins, and for #assert by default. */ static int save_macros (cpp_reader *r, cpp_hashnode *h, void *data_p) { struct save_macro_data *data = (struct save_macro_data *)data_p; if (h->type != NT_VOID && (h->flags & NODE_BUILTIN) == 0) { if (data->count == data->array_size) { data->array_size *= 2; data->defns = xrealloc (data->defns, (data->array_size * sizeof (uchar *))); } switch (h->type) { case NT_ASSERTION: /* Not currently implemented. */ return 1; case NT_MACRO: { const uchar * defn = cpp_macro_definition (r, h); size_t defnlen = ustrlen (defn); data->defns[data->count] = xmemdup (defn, defnlen, defnlen + 2); data->defns[data->count][defnlen] = '\n'; } break; default: abort (); } data->count++; } return 1; } /* Prepare to restore the state, by saving the currently-defined macros in 'data'. */ void cpp_prepare_state (cpp_reader *r, struct save_macro_data **data) { struct save_macro_data *d = xmalloc (sizeof (struct save_macro_data)); d->array_size = 512; d->defns = xmalloc (d->array_size * sizeof (d->defns[0])); d->count = 0; cpp_forall_identifiers (r, save_macros, d); d->saved_pragmas = _cpp_save_pragma_names (r); *data = d; } /* Given a precompiled header that was previously determined to be valid, apply all its definitions (and undefinitions) to the current state. DEPNAME is passed to deps_restore. */ int cpp_read_state (cpp_reader *r, const char *name, FILE *f, struct save_macro_data *data) { size_t i; struct lexer_state old_state; /* Restore spec_nodes, which will be full of references to the old hashtable entries and so will now be invalid. */ { struct spec_nodes *s = &r->spec_nodes; s->n_defined = cpp_lookup (r, DSC("defined")); s->n_true = cpp_lookup (r, DSC("true")); s->n_false = cpp_lookup (r, DSC("false")); s->n__VA_ARGS__ = cpp_lookup (r, DSC("__VA_ARGS__")); } old_state = r->state; r->state.in_directive = 1; r->state.prevent_expansion = 1; r->state.angled_headers = 0; /* Run through the carefully-saved macros, insert them. */ for (i = 0; i < data->count; i++) { cpp_hashnode *h; size_t namelen; uchar *defn; namelen = strcspn (data->defns[i], "( \n"); h = cpp_lookup (r, data->defns[i], namelen); defn = data->defns[i] + namelen; /* The PCH file is valid, so we know that if there is a definition from the PCH file it must be the same as the one we had originally, and so do not need to restore it. */ if (h->type == NT_VOID) { if (cpp_push_buffer (r, defn, ustrchr (defn, '\n') - defn, true) != NULL) { _cpp_clean_line (r); if (!_cpp_create_definition (r, h)) abort (); _cpp_pop_buffer (r); } else abort (); } free (data->defns[i]); } r->state = old_state; _cpp_restore_pragma_names (r, data->saved_pragmas); free (data); if (deps_restore (r->deps, f, CPP_OPTION (r, restore_pch_deps) ? name : NULL) != 0) goto error; if (! _cpp_read_file_entries (r, f)) goto error; return 0; error: cpp_errno (r, CPP_DL_ERROR, "while reading precompiled header"); return -1; } /* Hash tables. Copyright (C) 2000, 2001, 2003, 2004 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. In other words, you are welcome to use, share and improve this program. You are forbidden to forbid anyone else to use, share and improve what you give them. Help stamp out software-hoarding! */ /* The code below is a specialization of Vladimir Makarov's expandable hash tables (see libiberty/hashtab.c). The abstraction penalty was too high to continue using the generic form. This code knows intrinsically how to calculate a hash value, and how to compare an existing entry with a potential new one. Also, the ability to delete members from the table has been removed. */ static unsigned int calc_hash (const unsigned char *, size_t); static void ht_expand (hash_table *); static double approx_sqrt (double); /* Calculate the hash of the string STR of length LEN. */ static unsigned int calc_hash (const unsigned char *str, size_t len) { size_t n = len; unsigned int r = 0; while (n--) r = HT_HASHSTEP (r, *str++); return HT_HASHFINISH (r, len); } /* Initialize an identifier hashtable. */ hash_table * ht_create (unsigned int order) { unsigned int nslots = 1 << order; hash_table *table; table = xcalloc (1, sizeof (hash_table)); /* Strings need no alignment. */ _obstack_begin (&table->stack, 0, 0, (void *(*) (long)) xmalloc, (void (*) (void *)) free); obstack_alignment_mask (&table->stack) = 0; table->entries = xcalloc (nslots, sizeof (hashnode)); table->entries_owned = true; table->nslots = nslots; return table; } /* Frees all memory associated with a hash table. */ void ht_destroy (hash_table *table) { obstack_free (&table->stack, NULL); if (table->entries_owned) free (table->entries); free (table); } /* Returns the hash entry for the a STR of length LEN. If that string already exists in the table, returns the existing entry, and, if INSERT is CPP_ALLOCED, frees the last obstack object. If the identifier hasn't been seen before, and INSERT is CPP_NO_INSERT, returns NULL. Otherwise insert and returns a new entry. A new string is alloced if INSERT is CPP_ALLOC, otherwise INSERT is CPP_ALLOCED and the item is assumed to be at the top of the obstack. */ hashnode ht_lookup (hash_table *table, const unsigned char *str, size_t len, enum ht_lookup_option insert) { return ht_lookup_with_hash (table, str, len, calc_hash (str, len), insert); } hashnode ht_lookup_with_hash (hash_table *table, const unsigned char *str, size_t len, unsigned int hash, enum ht_lookup_option insert) { unsigned int hash2; unsigned int index; size_t sizemask; hashnode node; sizemask = table->nslots - 1; index = hash & sizemask; table->searches++; node = table->entries[index]; if (node != NULL) { if (node->hash_value == hash && HT_LEN (node) == (unsigned int) len && !memcmp (HT_STR (node), str, len)) { if (insert == HT_ALLOCED) /* The string we search for was placed at the end of the obstack. Release it. */ obstack_free (&table->stack, (void *) str); return node; } /* hash2 must be odd, so we're guaranteed to visit every possible location in the table during rehashing. */ hash2 = ((hash * 17) & sizemask) | 1; for (;;) { table->collisions++; index = (index + hash2) & sizemask; node = table->entries[index]; if (node == NULL) break; if (node->hash_value == hash && HT_LEN (node) == (unsigned int) len && !memcmp (HT_STR (node), str, len)) { if (insert == HT_ALLOCED) /* The string we search for was placed at the end of the obstack. Release it. */ obstack_free (&table->stack, (void *) str); return node; } } } if (insert == HT_NO_INSERT) return NULL; node = (*table->alloc_node) (table); table->entries[index] = node; HT_LEN (node) = (unsigned int) len; node->hash_value = hash; if (insert == HT_ALLOC) HT_STR (node) = obstack_copy0 (&table->stack, str, len); else HT_STR (node) = str; if (++table->nelements * 4 >= table->nslots * 3) /* Must expand the string table. */ ht_expand (table); return node; } /* Double the size of a hash table, re-hashing existing entries. */ static void ht_expand (hash_table *table) { hashnode *nentries, *p, *limit; unsigned int size, sizemask; size = table->nslots * 2; nentries = xcalloc (size, sizeof (hashnode)); sizemask = size - 1; p = table->entries; limit = p + table->nslots; do if (*p) { unsigned int index, hash, hash2; hash = (*p)->hash_value; index = hash & sizemask; if (nentries[index]) { hash2 = ((hash * 17) & sizemask) | 1; do { index = (index + hash2) & sizemask; } while (nentries[index]); } nentries[index] = *p; } while (++p < limit); if (table->entries_owned) free (table->entries); table->entries_owned = true; table->entries = nentries; table->nslots = size; } /* For all nodes in TABLE, callback CB with parameters TABLE->PFILE, the node, and V. */ void ht_forall (hash_table *table, ht_cb cb, const void *v) { hashnode *p, *limit; p = table->entries; limit = p + table->nslots; do if (*p) { if ((*cb) (table->pfile, *p, v) == 0) break; } while (++p < limit); } /* Restore the hash table. */ void ht_load (hash_table *ht, hashnode *entries, unsigned int nslots, unsigned int nelements, bool own) { if (ht->entries_owned) free (ht->entries); ht->entries = entries; ht->nslots = nslots; ht->nelements = nelements; ht->entries_owned = own; } /* Dump allocation statistics to stderr. */ void ht_dump_statistics (hash_table *table) { size_t nelts, nids, overhead, headers; size_t total_bytes, longest, sum_of_squares; double exp_len, exp_len2, exp2_len; hashnode *p, *limit; #define SCALE(x) ((unsigned long) ((x) < 1024*10 \ ? (x) \ : ((x) < 1024*1024*10 \ ? (x) / 1024 \ : (x) / (1024*1024)))) #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M')) total_bytes = longest = sum_of_squares = nids = 0; p = table->entries; limit = p + table->nslots; do if (*p) { size_t n = HT_LEN (*p); total_bytes += n; sum_of_squares += n * n; if (n > longest) longest = n; nids++; } while (++p < limit); nelts = table->nelements; overhead = obstack_memory_used (&table->stack) - total_bytes; headers = table->nslots * sizeof (hashnode); fprintf (stderr, "\nString pool\nentries\t\t%lu\n", (unsigned long) nelts); fprintf (stderr, "identifiers\t%lu (%.2f%%)\n", (unsigned long) nids, nids * 100.0 / nelts); fprintf (stderr, "slots\t\t%lu\n", (unsigned long) table->nslots); fprintf (stderr, "bytes\t\t%lu%c (%lu%c overhead)\n", SCALE (total_bytes), LABEL (total_bytes), SCALE (overhead), LABEL (overhead)); fprintf (stderr, "table size\t%lu%c\n", SCALE (headers), LABEL (headers)); exp_len = (double)total_bytes / (double)nelts; exp2_len = exp_len * exp_len; exp_len2 = (double) sum_of_squares / (double) nelts; fprintf (stderr, "coll/search\t%.4f\n", (double) table->collisions / (double) table->searches); fprintf (stderr, "ins/search\t%.4f\n", (double) nelts / (double) table->searches); fprintf (stderr, "avg. entry\t%.2f bytes (+/- %.2f)\n", exp_len, approx_sqrt (exp_len2 - exp2_len)); fprintf (stderr, "longest entry\t%lu\n", (unsigned long) longest); #undef SCALE #undef LABEL } /* Return the approximate positive square root of a number N. This is for statistical reports, not code generation. */ static double approx_sqrt (double x) { double s, d; if (x < 0) abort (); if (x == 0) return 0; s = x; do { d = (s * s - x) / (2 * s); s -= d; } while (d > .0001); return s; } /* CPP Library - traditional lexical analysis and macro expansion. Copyright (C) 2002, 2004 Free Software Foundation, Inc. Contributed by Neil Booth, May 2002 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* The replacement text of a function-like macro is stored as a contiguous sequence of aligned blocks, each representing the text between subsequent parameters. Each block comprises the text between its surrounding parameters, the length of that text, and the one-based index of the following parameter. The final block in the replacement text is easily recognizable as it has an argument index of zero. */ struct block { unsigned int text_len; unsigned short arg_index; uchar text[1]; }; #define BLOCK_HEADER_LEN offsetof (struct block, text) #define BLOCK_LEN(TEXT_LEN) CPP_ALIGN (BLOCK_HEADER_LEN + (TEXT_LEN)) /* Structure holding information about a function-like macro invocation. */ struct fun_macro { /* Memory buffer holding the trad_arg array. */ _cpp_buff *buff; /* An array of size the number of macro parameters + 1, containing the offsets of the start of each macro argument in the output buffer. The argument continues until the character before the start of the next one. */ size_t *args; /* The hashnode of the macro. */ cpp_hashnode *node; /* The offset of the macro name in the output buffer. */ size_t offset; /* The line the macro name appeared on. */ unsigned int line; /* Zero-based index of argument being currently lexed. */ unsigned int argc; }; /* Lexing state. It is mostly used to prevent macro expansion. */ enum ls {ls_none = 0, /* Normal state. */ ls_fun_open, /* When looking for '('. */ ls_fun_close, /* When looking for ')'. */ ls_defined, /* After defined. */ ls_defined_close, /* Looking for ')' of defined(). */ ls_hash, /* After # in preprocessor conditional. */ ls_predicate, /* After the predicate, maybe paren? */ ls_answer}; /* In answer to predicate. */ /* Lexing TODO: Maybe handle space in escaped newlines. Stop cpplex.c from recognizing comments and directives during its lexing pass. */ static const uchar *skip_whitespace_trad (cpp_reader *, const uchar *, int); static cpp_hashnode *lex_identifier_trad (cpp_reader *, const uchar *); static const uchar *copy_comment (cpp_reader *, const uchar *, int); static void check_output_buffer (cpp_reader *, size_t); static void push_replacement_text (cpp_reader *, cpp_hashnode *); static bool scan_parameters (cpp_reader *, cpp_macro *); static bool recursive_macro (cpp_reader *, cpp_hashnode *); static void save_replacement_text (cpp_reader *, cpp_macro *, unsigned int); static void maybe_start_funlike (cpp_reader *, cpp_hashnode *, const uchar *, struct fun_macro *); static void save_argument (struct fun_macro *, size_t); static void replace_args_and_push (cpp_reader *, struct fun_macro *); static size_t canonicalize_text (uchar *, const uchar *, size_t, uchar *); /* Ensures we have N bytes' space in the output buffer, and reallocates it if not. */ static void check_output_buffer (cpp_reader *pfile, size_t n) { /* We might need two bytes to terminate an unterminated comment, and one more to terminate the line with a NUL. */ n += 2 + 1; if (n > (size_t) (pfile->out.limit - pfile->out.cur)) { size_t size = pfile->out.cur - pfile->out.base; size_t new_size = (size + n) * 3 / 2; pfile->out.base = xrealloc (pfile->out.base, new_size); pfile->out.limit = pfile->out.base + new_size; pfile->out.cur = pfile->out.base + size; } } /* Skip a C-style block comment in a macro as a result of -CC. Buffer->cur points to the initial asterisk of the comment. */ static void skip_macro_block_comment (cpp_reader *pfile) { const uchar *cur = pfile->buffer->cur; cur++; if (*cur == '/') cur++; /* People like decorating comments with '*', so check for '/' instead for efficiency. */ while(! (*cur++ == '/' && cur[-2] == '*') ) ; pfile->buffer->cur = cur; } /* CUR points to the asterisk introducing a comment in the current context. IN_DEFINE is true if we are in the replacement text of a macro. The asterisk and following comment is copied to the buffer pointed to by pfile->out.cur, which must be of sufficient size. Unterminated comments are diagnosed, and correctly terminated in the output. pfile->out.cur is updated depending upon IN_DEFINE, -C, -CC and pfile->state.in_directive. Returns a pointer to the first character after the comment in the input buffer. */ static const uchar * copy_comment (cpp_reader *pfile, const uchar *cur, int in_define) { bool unterminated, copy = false; source_location src_loc = pfile->line_table->highest_line; cpp_buffer *buffer = pfile->buffer; buffer->cur = cur; if (pfile->context->prev) unterminated = false, skip_macro_block_comment (pfile); else unterminated = _cpp_skip_block_comment (pfile); if (unterminated) cpp_error_with_line (pfile, CPP_DL_ERROR, src_loc, 0, "unterminated comment"); /* Comments in directives become spaces so that tokens are properly separated when the ISO preprocessor re-lexes the line. The exception is #define. */ if (pfile->state.in_directive) { if (in_define) { if (CPP_OPTION (pfile, discard_comments_in_macro_exp)) pfile->out.cur--; else copy = true; } else pfile->out.cur[-1] = ' '; } else if (CPP_OPTION (pfile, discard_comments)) pfile->out.cur--; else copy = true; if (copy) { size_t len = (size_t) (buffer->cur - cur); memcpy (pfile->out.cur, cur, len); pfile->out.cur += len; if (unterminated) { *pfile->out.cur++ = '*'; *pfile->out.cur++ = '/'; } } return buffer->cur; } /* CUR points to any character in the input buffer. Skips over all contiguous horizontal white space and NULs, including comments if SKIP_COMMENTS, until reaching the first non-horizontal-whitespace character or the end of the current context. Escaped newlines are removed. The whitespace is copied verbatim to the output buffer, except that comments are handled as described in copy_comment(). pfile->out.cur is updated. Returns a pointer to the first character after the whitespace in the input buffer. */ static const uchar * skip_whitespace_trad (cpp_reader *pfile, const uchar *cur, int skip_comments) { uchar *out = pfile->out.cur; for (;;) { unsigned int c = *cur++; *out++ = c; if (is_nvspace (c)) continue; if (c == '/' && *cur == '*' && skip_comments) { pfile->out.cur = out; cur = copy_comment (pfile, cur, false /* in_define */); out = pfile->out.cur; continue; } out--; break; } pfile->out.cur = out; return cur - 1; } /* Lexes and outputs an identifier starting at CUR, which is assumed to point to a valid first character of an identifier. Returns the hashnode, and updates out.cur. */ static cpp_hashnode * lex_identifier_trad (cpp_reader *pfile, const uchar *cur) { size_t len; uchar *out = pfile->out.cur; cpp_hashnode *result; do *out++ = *cur++; while (is_numchar (*cur)); CUR (pfile->context) = cur; len = out - pfile->out.cur; result = (cpp_hashnode *) ht_lookup (pfile->hash_table, pfile->out.cur, len, HT_ALLOC); pfile->out.cur = out; return result; } /* Overlays the true file buffer temporarily with text of length LEN starting at START. The true buffer is restored upon calling restore_buff(). */ void _cpp_overlay_buffer (cpp_reader *pfile, const uchar *start, size_t len) { cpp_buffer *buffer = pfile->buffer; pfile->overlaid_buffer = buffer; pfile->saved_cur = buffer->cur; pfile->saved_rlimit = buffer->rlimit; pfile->saved_line_base = buffer->next_line; buffer->need_line = false; buffer->cur = start; buffer->line_base = start; buffer->rlimit = start + len; } /* Restores a buffer overlaid by _cpp_overlay_buffer(). */ void _cpp_remove_overlay (cpp_reader *pfile) { cpp_buffer *buffer = pfile->overlaid_buffer; buffer->cur = pfile->saved_cur; buffer->rlimit = pfile->saved_rlimit; buffer->line_base = pfile->saved_line_base; buffer->need_line = true; pfile->overlaid_buffer = NULL; } /* Reads a logical line into the output buffer. Returns TRUE if there is more text left in the buffer. */ bool _cpp_read_logical_line_trad (cpp_reader *pfile) { do { if (pfile->buffer->need_line && !_cpp_get_fresh_line (pfile)) return false; } while (!_cpp_scan_out_logical_line (pfile, NULL) || pfile->state.skipping); return pfile->buffer != NULL; } /* Set up state for finding the opening '(' of a function-like macro. */ static void maybe_start_funlike (cpp_reader *pfile, cpp_hashnode *node, const uchar *start, struct fun_macro *macro) { unsigned int n = node->value.macro->paramc + 1; if (macro->buff) _cpp_release_buff (pfile, macro->buff); macro->buff = _cpp_get_buff (pfile, n * sizeof (size_t)); macro->args = (size_t *) BUFF_FRONT (macro->buff); macro->node = node; macro->offset = start - pfile->out.base; macro->argc = 0; } /* Save the OFFSET of the start of the next argument to MACRO. */ static void save_argument (struct fun_macro *macro, size_t offset) { macro->argc++; if (macro->argc <= macro->node->value.macro->paramc) macro->args[macro->argc] = offset; } /* Copies the next logical line in the current buffer (starting at buffer->cur) to the output buffer. The output is guaranteed to terminate with a NUL character. buffer->cur is updated. If MACRO is non-NULL, then we are scanning the replacement list of MACRO, and we call save_replacement_text() every time we meet an argument. */ bool _cpp_scan_out_logical_line (cpp_reader *pfile, cpp_macro *macro) { bool result = true; cpp_context *context; const uchar *cur; uchar *out; struct fun_macro fmacro; unsigned int c, paren_depth = 0, quote; enum ls lex_state = ls_none; bool header_ok; const uchar *start_of_input_line; fmacro.buff = NULL; quote = 0; header_ok = pfile->state.angled_headers; CUR (pfile->context) = pfile->buffer->cur; RLIMIT (pfile->context) = pfile->buffer->rlimit; pfile->out.cur = pfile->out.base; pfile->out.first_line = pfile->line_table->highest_line; /* start_of_input_line is needed to make sure that directives really, really start at the first character of the line. */ start_of_input_line = pfile->buffer->cur; new_context: context = pfile->context; cur = CUR (context); check_output_buffer (pfile, RLIMIT (context) - cur); out = pfile->out.cur; for (;;) { if (!context->prev && cur >= pfile->buffer->notes[pfile->buffer->cur_note].pos) { pfile->buffer->cur = cur; _cpp_process_line_notes (pfile, false); } c = *cur++; *out++ = c; /* Whitespace should "continue" out of the switch, non-whitespace should "break" out of it. */ switch (c) { case ' ': case '\t': case '\f': case '\v': case '\0': continue; case '\n': /* If this is a macro's expansion, pop it. */ if (context->prev) { pfile->out.cur = out - 1; _cpp_pop_context (pfile); goto new_context; } /* Omit the newline from the output buffer. */ pfile->out.cur = out - 1; pfile->buffer->cur = cur; pfile->buffer->need_line = true; CPP_INCREMENT_LINE (pfile, 0); if ((lex_state == ls_fun_open || lex_state == ls_fun_close) && !pfile->state.in_directive && _cpp_get_fresh_line (pfile)) { /* Newlines in arguments become a space, but we don't clear any in-progress quote. */ if (lex_state == ls_fun_close) out[-1] = ' '; cur = pfile->buffer->cur; continue; } goto done; case '<': if (header_ok) quote = '>'; break; case '>': if (c == quote) quote = 0; break; case '"': case '\'': if (c == quote) quote = 0; else if (!quote) quote = c; break; case '\\': /* Skip escaped quotes here, it's easier than above. */ if (*cur == '\\' || *cur == '"' || *cur == '\'') *out++ = *cur++; break; case '/': /* Traditional CPP does not recognize comments within literals. */ if (!quote && *cur == '*') { pfile->out.cur = out; cur = copy_comment (pfile, cur, macro != 0); out = pfile->out.cur; continue; } break; case '_': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': if (!pfile->state.skipping && (quote == 0 || macro)) { cpp_hashnode *node; uchar *out_start = out - 1; pfile->out.cur = out_start; node = lex_identifier_trad (pfile, cur - 1); out = pfile->out.cur; cur = CUR (context); if (node->type == NT_MACRO /* Should we expand for ls_answer? */ && (lex_state == ls_none || lex_state == ls_fun_open) && !pfile->state.prevent_expansion) { /* Macros invalidate MI optimization. */ pfile->mi_valid = false; if (! (node->flags & NODE_BUILTIN) && node->value.macro->fun_like) { maybe_start_funlike (pfile, node, out_start, &fmacro); lex_state = ls_fun_open; fmacro.line = pfile->line_table->highest_line; continue; } else if (!recursive_macro (pfile, node)) { /* Remove the object-like macro's name from the output, and push its replacement text. */ pfile->out.cur = out_start; push_replacement_text (pfile, node); lex_state = ls_none; goto new_context; } } else if (macro && (node->flags & NODE_MACRO_ARG) != 0) { /* Found a parameter in the replacement text of a #define. Remove its name from the output. */ pfile->out.cur = out_start; save_replacement_text (pfile, macro, node->value.arg_index); out = pfile->out.base; } else if (lex_state == ls_hash) { lex_state = ls_predicate; continue; } else if (pfile->state.in_expression && node == pfile->spec_nodes.n_defined) { lex_state = ls_defined; continue; } } break; case '(': if (quote == 0) { paren_depth++; if (lex_state == ls_fun_open) { if (recursive_macro (pfile, fmacro.node)) lex_state = ls_none; else { lex_state = ls_fun_close; paren_depth = 1; out = pfile->out.base + fmacro.offset; fmacro.args[0] = fmacro.offset; } } else if (lex_state == ls_predicate) lex_state = ls_answer; else if (lex_state == ls_defined) lex_state = ls_defined_close; } break; case ',': if (quote == 0 && lex_state == ls_fun_close && paren_depth == 1) save_argument (&fmacro, out - pfile->out.base); break; case ')': if (quote == 0) { paren_depth--; if (lex_state == ls_fun_close && paren_depth == 0) { cpp_macro *m = fmacro.node->value.macro; m->used = 1; lex_state = ls_none; save_argument (&fmacro, out - pfile->out.base); /* A single zero-length argument is no argument. */ if (fmacro.argc == 1 && m->paramc == 0 && out == pfile->out.base + fmacro.offset + 1) fmacro.argc = 0; if (_cpp_arguments_ok (pfile, m, fmacro.node, fmacro.argc)) { /* Remove the macro's invocation from the output, and push its replacement text. */ pfile->out.cur = (pfile->out.base + fmacro.offset); CUR (context) = cur; replace_args_and_push (pfile, &fmacro); goto new_context; } } else if (lex_state == ls_answer || lex_state == ls_defined_close) lex_state = ls_none; } break; case '#': if (cur - 1 == start_of_input_line /* A '#' from a macro doesn't start a directive. */ && !pfile->context->prev && !pfile->state.in_directive) { /* A directive. With the way _cpp_handle_directive currently works, we only want to call it if either we know the directive is OK, or we want it to fail and be removed from the output. If we want it to be passed through (the assembler case) then we must not call _cpp_handle_directive. */ pfile->out.cur = out; cur = skip_whitespace_trad (pfile, cur, true /* skip_comments */); out = pfile->out.cur; if (*cur == '\n') { /* Null directive. Ignore it and don't invalidate the MI optimization. */ pfile->buffer->need_line = true; CPP_INCREMENT_LINE (pfile, 0); result = false; goto done; } else { bool do_it = false; if (is_numstart (*cur) && CPP_OPTION (pfile, lang) != CLK_ASM) do_it = true; else if (is_idstart (*cur)) /* Check whether we know this directive, but don't advance. */ do_it = lex_identifier_trad (pfile, cur)->is_directive; if (do_it || CPP_OPTION (pfile, lang) != CLK_ASM) { /* This is a kludge. We want to have the ISO preprocessor lex the next token. */ pfile->buffer->cur = cur; _cpp_handle_directive (pfile, false /* indented */); result = false; goto done; } } } if (pfile->state.in_expression) { lex_state = ls_hash; continue; } break; default: break; } /* Non-whitespace disables MI optimization and stops treating '<' as a quote in #include. */ header_ok = false; if (!pfile->state.in_directive) pfile->mi_valid = false; if (lex_state == ls_none) continue; /* Some of these transitions of state are syntax errors. The ISO preprocessor will issue errors later. */ if (lex_state == ls_fun_open) /* Missing '('. */ lex_state = ls_none; else if (lex_state == ls_hash || lex_state == ls_predicate || lex_state == ls_defined) lex_state = ls_none; /* ls_answer and ls_defined_close keep going until ')'. */ } done: if (fmacro.buff) _cpp_release_buff (pfile, fmacro.buff); if (lex_state == ls_fun_close) cpp_error_with_line (pfile, CPP_DL_ERROR, fmacro.line, 0, "unterminated argument list invoking macro \"%s\"", NODE_NAME (fmacro.node)); return result; } /* Push a context holding the replacement text of the macro NODE on the context stack. NODE is either object-like, or a function-like macro with no arguments. */ static void push_replacement_text (cpp_reader *pfile, cpp_hashnode *node) { size_t len; const uchar *text; uchar *buf; if (node->flags & NODE_BUILTIN) { text = _cpp_builtin_macro_text (pfile, node); len = ustrlen (text); buf = _cpp_unaligned_alloc (pfile, len + 1); memcpy (buf, text, len); buf[len]='\n'; text = buf; } else { cpp_macro *macro = node->value.macro; macro->used = 1; text = macro->exp.text; macro->traditional = 1; len = macro->count; } _cpp_push_text_context (pfile, node, text, len); } /* Returns TRUE if traditional macro recursion is detected. */ static bool recursive_macro (cpp_reader *pfile, cpp_hashnode *node) { bool recursing = !!(node->flags & NODE_DISABLED); /* Object-like macros that are already expanding are necessarily recursive. However, it is possible to have traditional function-like macros that are not infinitely recursive but recurse to any given depth. Further, it is easy to construct examples that get ever longer until the point they stop recursing. So there is no easy way to detect true recursion; instead we assume any expansion more than 20 deep since the first invocation of this macro must be recursing. */ if (recursing && node->value.macro->fun_like) { size_t depth = 0; cpp_context *context = pfile->context; do { depth++; if (context->macro == node && depth > 20) break; context = context->prev; } while (context); recursing = context != NULL; } if (recursing) cpp_error (pfile, CPP_DL_ERROR, "detected recursion whilst expanding macro \"%s\"", NODE_NAME (node)); return recursing; } /* Return the length of the replacement text of a function-like or object-like non-builtin macro. */ size_t _cpp_replacement_text_len (const cpp_macro *macro) { size_t len; if (macro->fun_like && (macro->paramc != 0)) { const uchar *exp; len = 0; for (exp = macro->exp.text;;) { struct block *b = (struct block *) exp; len += b->text_len; if (b->arg_index == 0) break; len += NODE_LEN (macro->params[b->arg_index - 1]); exp += BLOCK_LEN (b->text_len); } } else len = macro->count; return len; } /* Copy the replacement text of MACRO to DEST, which must be of sufficient size. It is not NUL-terminated. The next character is returned. */ uchar * _cpp_copy_replacement_text (const cpp_macro *macro, uchar *dest) { if (macro->fun_like && (macro->paramc != 0)) { const uchar *exp; for (exp = macro->exp.text;;) { struct block *b = (struct block *) exp; cpp_hashnode *param; memcpy (dest, b->text, b->text_len); dest += b->text_len; if (b->arg_index == 0) break; param = macro->params[b->arg_index - 1]; memcpy (dest, NODE_NAME (param), NODE_LEN (param)); dest += NODE_LEN (param); exp += BLOCK_LEN (b->text_len); } } else { memcpy (dest, macro->exp.text, macro->count); dest += macro->count; } return dest; } /* Push a context holding the replacement text of the macro NODE on the context stack. NODE is either object-like, or a function-like macro with no arguments. */ static void replace_args_and_push (cpp_reader *pfile, struct fun_macro *fmacro) { cpp_macro *macro = fmacro->node->value.macro; if (macro->paramc == 0) push_replacement_text (pfile, fmacro->node); else { const uchar *exp; uchar *p; _cpp_buff *buff; size_t len = 0; /* Calculate the length of the argument-replaced text. */ for (exp = macro->exp.text;;) { struct block *b = (struct block *) exp; len += b->text_len; if (b->arg_index == 0) break; len += (fmacro->args[b->arg_index] - fmacro->args[b->arg_index - 1] - 1); exp += BLOCK_LEN (b->text_len); } /* Allocate room for the expansion plus \n. */ buff = _cpp_get_buff (pfile, len + 1); /* Copy the expansion and replace arguments. */ p = BUFF_FRONT (buff); for (exp = macro->exp.text;;) { struct block *b = (struct block *) exp; size_t arglen; memcpy (p, b->text, b->text_len); p += b->text_len; if (b->arg_index == 0) break; arglen = (fmacro->args[b->arg_index] - fmacro->args[b->arg_index - 1] - 1); memcpy (p, pfile->out.base + fmacro->args[b->arg_index - 1], arglen); p += arglen; exp += BLOCK_LEN (b->text_len); } /* \n-terminate. */ *p = '\n'; _cpp_push_text_context (pfile, fmacro->node, BUFF_FRONT (buff), len); /* So we free buffer allocation when macro is left. */ pfile->context->buff = buff; } } /* Read and record the parameters, if any, of a function-like macro definition. Destroys pfile->out.cur. Returns true on success, false on failure (syntax error or a duplicate parameter). On success, CUR (pfile->context) is just past the closing parenthesis. */ static bool scan_parameters (cpp_reader *pfile, cpp_macro *macro) { const uchar *cur = CUR (pfile->context) + 1; bool ok; for (;;) { cur = skip_whitespace_trad (pfile, cur, true /* skip_comments */); if (is_idstart (*cur)) { ok = false; if (_cpp_save_parameter (pfile, macro, lex_identifier_trad (pfile, cur))) break; cur = skip_whitespace_trad (pfile, CUR (pfile->context), true /* skip_comments */); if (*cur == ',') { cur++; continue; } ok = (*cur == ')'); break; } ok = (*cur == ')' && macro->paramc == 0); break; } if (!ok) cpp_error (pfile, CPP_DL_ERROR, "syntax error in macro parameter list"); CUR (pfile->context) = cur + (*cur == ')'); return ok; } /* Save the text from pfile->out.base to pfile->out.cur as the replacement text for the current macro, followed by argument ARG_INDEX, with zero indicating the end of the replacement text. */ static void save_replacement_text (cpp_reader *pfile, cpp_macro *macro, unsigned int arg_index) { size_t len = pfile->out.cur - pfile->out.base; uchar *exp; if (macro->paramc == 0) { /* Object-like and function-like macros without parameters simply store their \n-terminated replacement text. */ exp = _cpp_unaligned_alloc (pfile, len + 1); memcpy (exp, pfile->out.base, len); exp[len] = '\n'; macro->exp.text = exp; macro->traditional = 1; macro->count = len; } else { /* Store the text's length (unsigned int), the argument index (unsigned short, base 1) and then the text. */ size_t blen = BLOCK_LEN (len); struct block *block; if (macro->count + blen > BUFF_ROOM (pfile->a_buff)) _cpp_extend_buff (pfile, &pfile->a_buff, macro->count + blen); exp = BUFF_FRONT (pfile->a_buff); block = (struct block *) (exp + macro->count); macro->exp.text = exp; macro->traditional = 1; /* Write out the block information. */ block->text_len = len; block->arg_index = arg_index; memcpy (block->text, pfile->out.base, len); /* Lex the rest into the start of the output buffer. */ pfile->out.cur = pfile->out.base; macro->count += blen; /* If we've finished, commit the memory. */ if (arg_index == 0) BUFF_FRONT (pfile->a_buff) += macro->count; } } /* Analyze and save the replacement text of a macro. Returns true on success. */ bool _cpp_create_trad_definition (cpp_reader *pfile, cpp_macro *macro) { const uchar *cur; uchar *limit; cpp_context *context = pfile->context; /* The context has not been set up for command line defines, and CUR has not been updated for the macro name for in-file defines. */ pfile->out.cur = pfile->out.base; CUR (context) = pfile->buffer->cur; RLIMIT (context) = pfile->buffer->rlimit; check_output_buffer (pfile, RLIMIT (context) - CUR (context)); /* Is this a function-like macro? */ if (* CUR (context) == '(') { bool ok = scan_parameters (pfile, macro); /* Remember the params so we can clear NODE_MACRO_ARG flags. */ macro->params = (cpp_hashnode **) BUFF_FRONT (pfile->a_buff); /* Setting macro to NULL indicates an error occurred, and prevents unnecessary work in _cpp_scan_out_logical_line. */ if (!ok) macro = NULL; else { BUFF_FRONT (pfile->a_buff) = (uchar *) ¯o->params[macro->paramc]; macro->fun_like = 1; } } /* Skip leading whitespace in the replacement text. */ pfile->buffer->cur = skip_whitespace_trad (pfile, CUR (context), CPP_OPTION (pfile, discard_comments_in_macro_exp)); pfile->state.prevent_expansion++; _cpp_scan_out_logical_line (pfile, macro); pfile->state.prevent_expansion--; if (!macro) return false; /* Skip trailing white space. */ cur = pfile->out.base; limit = pfile->out.cur; while (limit > cur && is_space (limit[-1])) limit--; pfile->out.cur = limit; save_replacement_text (pfile, macro, 0); return true; } /* Copy SRC of length LEN to DEST, but convert all contiguous whitespace to a single space, provided it is not in quotes. The quote currently in effect is pointed to by PQUOTE, and is updated by the function. Returns the number of bytes copied. */ static size_t canonicalize_text (uchar *dest, const uchar *src, size_t len, uchar *pquote) { uchar *orig_dest = dest; uchar quote = *pquote; while (len) { if (is_space (*src) && !quote) { do src++, len--; while (len && is_space (*src)); *dest++ = ' '; } else { if (*src == '\'' || *src == '"') { if (!quote) quote = *src; else if (quote == *src) quote = 0; } *dest++ = *src++, len--; } } *pquote = quote; return dest - orig_dest; } /* Returns true if MACRO1 and MACRO2 have expansions different other than in the form of their whitespace. */ bool _cpp_expansions_different_trad (const cpp_macro *macro1, const cpp_macro *macro2) { uchar *p1 = xmalloc (macro1->count + macro2->count); uchar *p2 = p1 + macro1->count; uchar quote1 = 0, quote2 = 0; bool mismatch; size_t len1, len2; if (macro1->paramc > 0) { const uchar *exp1 = macro1->exp.text, *exp2 = macro2->exp.text; mismatch = true; for (;;) { struct block *b1 = (struct block *) exp1; struct block *b2 = (struct block *) exp2; if (b1->arg_index != b2->arg_index) break; len1 = canonicalize_text (p1, b1->text, b1->text_len, "e1); len2 = canonicalize_text (p2, b2->text, b2->text_len, "e2); if (len1 != len2 || memcmp (p1, p2, len1)) break; if (b1->arg_index == 0) { mismatch = false; break; } exp1 += BLOCK_LEN (b1->text_len); exp2 += BLOCK_LEN (b2->text_len); } } else { len1 = canonicalize_text (p1, macro1->exp.text, macro1->count, "e1); len2 = canonicalize_text (p2, macro2->exp.text, macro2->count, "e2); mismatch = (len1 != len2 || memcmp (p1, p2, len1)); } free (p1); return mismatch; } /* Mudflap: narrow-pointer bounds-checking by tree rewriting. Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Frank Ch. Eigler and Graydon Hoare This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Basic error reporting routines. Copyright (C) 1999, 2000, 2001, 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* warning, error, and fatal. These definitions are suitable for use in the generator programs; eventually we would like to use them in cc1 too, but that's a longer term project. N.B. We cannot presently use ATTRIBUTE_PRINTF with these functions, because they can be extended with additional format specifiers which GCC does not know about. */ #ifndef GCC_ERRORS_H #define GCC_ERRORS_H extern void warning (const char *, ...); extern void error (const char *, ...); extern void fatal (const char *, ...) ATTRIBUTE_NORETURN; extern void internal_error (const char *, ...) ATTRIBUTE_NORETURN; extern const char *trim_filename (const char *); extern void fancy_abort (const char *, int, const char *) ATTRIBUTE_NORETURN; extern int have_error; extern const char *progname; #endif /* ! GCC_ERRORS_H */ /* GCC core type declarations. Copyright (C) 2002 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Provide forward declarations of core types which are referred to by most of the compiler. This allows header files to use these types (e.g. in function prototypes) without concern for whether the full definitions are visible. Some other declarations that need to be universally visible are here, too. In the context of tconfig.h, most of these have special definitions which prevent them from being used except in further type declarations. This is a kludge; the right thing is to avoid including the "tm.h" header set in the context of tconfig.h, but we're not there yet. */ #ifndef GCC_CORETYPES_H #define GCC_CORETYPES_H #ifndef GTY #define GTY(x) /* nothing - marker for gengtype */ #endif #ifndef USED_FOR_TARGET struct rtx_def; typedef struct rtx_def *rtx; struct rtvec_def; typedef struct rtvec_def *rtvec; union tree_node; typedef union tree_node *tree; /* Provide forward struct declaration so that we don't have to include all of cpplib.h whenever a random prototype includes a pointer. Note that the cpp_reader typedef remains part of cpplib.h. */ struct cpp_reader; #else struct _dont_use_rtx_here_; struct _dont_use_rtvec_here_; union _dont_use_tree_here_; #define rtx struct _dont_use_rtx_here_ * #define rtvec struct _dont_use_rtvec_here * #define tree union _dont_use_tree_here_ * #endif #endif /* coretypes.h */ #ifndef GCC_TM_H #define GCC_TM_H #ifdef IN_GCC /* Definitions of target machine for GCC for IA-32. Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* The purpose of this file is to define the characteristics of the i386, independent of assembler syntax or operating system. Three other files build on this one to describe a specific assembler syntax: bsd386.h, att386.h, and sun386.h. The actual tm.h file for a particular system should include this file, and then the file for the appropriate assembler syntax. Many macros that specify assembler syntax are omitted entirely from this file because they really belong in the files for particular assemblers. These include RP, IP, LPREFIX, PUT_OP_SIZE, USE_STAR, ADDR_BEG, ADDR_END, PRINT_IREG, PRINT_SCALE, PRINT_B_I_S, and many that start with ASM_ or end in ASM_OP. */ /* Define the specific costs for a given cpu */ struct processor_costs { const int add; /* cost of an add instruction */ const int lea; /* cost of a lea instruction */ const int shift_var; /* variable shift costs */ const int shift_const; /* constant shift costs */ const int mult_init[5]; /* cost of starting a multiply in QImode, HImode, SImode, DImode, TImode*/ const int mult_bit; /* cost of multiply per each bit set */ const int divide[5]; /* cost of a divide/mod in QImode, HImode, SImode, DImode, TImode*/ int movsx; /* The cost of movsx operation. */ int movzx; /* The cost of movzx operation. */ const int large_insn; /* insns larger than this cost more */ const int move_ratio; /* The threshold of number of scalar memory-to-memory move insns. */ const int movzbl_load; /* cost of loading using movzbl */ const int int_load[3]; /* cost of loading integer registers in QImode, HImode and SImode relative to reg-reg move (2). */ const int int_store[3]; /* cost of storing integer register in QImode, HImode and SImode */ const int fp_move; /* cost of reg,reg fld/fst */ const int fp_load[3]; /* cost of loading FP register in SFmode, DFmode and XFmode */ const int fp_store[3]; /* cost of storing FP register in SFmode, DFmode and XFmode */ const int mmx_move; /* cost of moving MMX register. */ const int mmx_load[2]; /* cost of loading MMX register in SImode and DImode */ const int mmx_store[2]; /* cost of storing MMX register in SImode and DImode */ const int sse_move; /* cost of moving SSE register. */ const int sse_load[3]; /* cost of loading SSE register in SImode, DImode and TImode*/ const int sse_store[3]; /* cost of storing SSE register in SImode, DImode and TImode*/ const int mmxsse_to_integer; /* cost of moving mmxsse register to integer and vice versa. */ const int prefetch_block; /* bytes moved to cache for prefetch. */ const int simultaneous_prefetches; /* number of parallel prefetch operations. */ const int branch_cost; /* Default value for BRANCH_COST. */ const int fadd; /* cost of FADD and FSUB instructions. */ const int fmul; /* cost of FMUL instruction. */ const int fdiv; /* cost of FDIV instruction. */ const int fabs; /* cost of FABS instruction. */ const int fchs; /* cost of FCHS instruction. */ const int fsqrt; /* cost of FSQRT instruction. */ }; extern const struct processor_costs *ix86_cost; /* Run-time compilation parameters selecting different hardware subsets. */ extern int target_flags; /* Macros used in the machine description to test the flags. */ /* configure can arrange to make this 2, to force a 486. */ #ifndef TARGET_CPU_DEFAULT #ifdef TARGET_64BIT_DEFAULT #define TARGET_CPU_DEFAULT TARGET_CPU_DEFAULT_k8 #else #define TARGET_CPU_DEFAULT 0 #endif #endif /* Masks for the -m switches */ #define MASK_80387 0x00000001 /* Hardware floating point */ #define MASK_RTD 0x00000002 /* Use ret that pops args */ #define MASK_ALIGN_DOUBLE 0x00000004 /* align doubles to 2 word boundary */ #define MASK_SVR3_SHLIB 0x00000008 /* Uninit locals into bss */ #define MASK_IEEE_FP 0x00000010 /* IEEE fp comparisons */ #define MASK_FLOAT_RETURNS 0x00000020 /* Return float in st(0) */ #define MASK_NO_FANCY_MATH_387 0x00000040 /* Disable sin, cos, sqrt */ #define MASK_OMIT_LEAF_FRAME_POINTER 0x080 /* omit leaf frame pointers */ #define MASK_STACK_PROBE 0x00000100 /* Enable stack probing */ #define MASK_NO_ALIGN_STROPS 0x00000200 /* Enable aligning of string ops. */ #define MASK_INLINE_ALL_STROPS 0x00000400 /* Inline stringops in all cases */ #define MASK_NO_PUSH_ARGS 0x00000800 /* Use push instructions */ #define MASK_ACCUMULATE_OUTGOING_ARGS 0x00001000/* Accumulate outgoing args */ #define MASK_MMX 0x00002000 /* Support MMX regs/builtins */ #define MASK_SSE 0x00004000 /* Support SSE regs/builtins */ #define MASK_SSE2 0x00008000 /* Support SSE2 regs/builtins */ #define MASK_SSE3 0x00010000 /* Support SSE3 regs/builtins */ #define MASK_3DNOW 0x00020000 /* Support 3Dnow builtins */ #define MASK_3DNOW_A 0x00040000 /* Support Athlon 3Dnow builtins */ #define MASK_128BIT_LONG_DOUBLE 0x00080000 /* long double size is 128bit */ #define MASK_64BIT 0x00100000 /* Produce 64bit code */ #define MASK_MS_BITFIELD_LAYOUT 0x00200000 /* Use native (MS) bitfield layout */ #define MASK_TLS_DIRECT_SEG_REFS 0x00400000 /* Avoid adding %gs:0 */ /* Unused: 0x03e0000 */ /* ... overlap with subtarget options starts by 0x04000000. */ #define MASK_NO_RED_ZONE 0x04000000 /* Do not use red zone */ /* Use the floating point instructions */ #define TARGET_80387 (target_flags & MASK_80387) /* Compile using ret insn that pops args. This will not work unless you use prototypes at least for all functions that can take varying numbers of args. */ #define TARGET_RTD (target_flags & MASK_RTD) /* Align doubles to a two word boundary. This breaks compatibility with the published ABI's for structures containing doubles, but produces faster code on the pentium. */ #define TARGET_ALIGN_DOUBLE (target_flags & MASK_ALIGN_DOUBLE) /* Use push instructions to save outgoing args. */ #define TARGET_PUSH_ARGS (!(target_flags & MASK_NO_PUSH_ARGS)) /* Accumulate stack adjustments to prologue/epilogue. */ #define TARGET_ACCUMULATE_OUTGOING_ARGS \ (target_flags & MASK_ACCUMULATE_OUTGOING_ARGS) /* Put uninitialized locals into bss, not data. Meaningful only on svr3. */ #define TARGET_SVR3_SHLIB (target_flags & MASK_SVR3_SHLIB) /* Use IEEE floating point comparisons. These handle correctly the cases where the result of a comparison is unordered. Normally SIGFPE is generated in such cases, in which case this isn't needed. */ #define TARGET_IEEE_FP (target_flags & MASK_IEEE_FP) /* Functions that return a floating point value may return that value in the 387 FPU or in 386 integer registers. If set, this flag causes the 387 to be used, which is compatible with most calling conventions. */ #define TARGET_FLOAT_RETURNS_IN_80387 (target_flags & MASK_FLOAT_RETURNS) /* Long double is 128bit instead of 96bit, even when only 80bits are used. This mode wastes cache, but avoid misaligned data accesses and simplifies address calculations. */ #define TARGET_128BIT_LONG_DOUBLE (target_flags & MASK_128BIT_LONG_DOUBLE) /* Disable generation of FP sin, cos and sqrt operations for 387. This is because FreeBSD lacks these in the math-emulator-code */ #define TARGET_NO_FANCY_MATH_387 (target_flags & MASK_NO_FANCY_MATH_387) /* Don't create frame pointers for leaf functions */ #define TARGET_OMIT_LEAF_FRAME_POINTER \ (target_flags & MASK_OMIT_LEAF_FRAME_POINTER) /* Debug GO_IF_LEGITIMATE_ADDRESS */ #define TARGET_DEBUG_ADDR (ix86_debug_addr_string != 0) /* Debug FUNCTION_ARG macros */ #define TARGET_DEBUG_ARG (ix86_debug_arg_string != 0) /* 64bit Sledgehammer mode. For libgcc2 we make sure this is a compile-time constant. */ #ifdef IN_LIBGCC2 #ifdef __x86_64__ #define TARGET_64BIT 1 #else #define TARGET_64BIT 0 #endif #else #ifdef TARGET_BI_ARCH #define TARGET_64BIT (target_flags & MASK_64BIT) #else #if TARGET_64BIT_DEFAULT #define TARGET_64BIT 1 #else #define TARGET_64BIT 0 #endif #endif #endif #define HAS_LONG_COND_BRANCH 1 #define HAS_LONG_UNCOND_BRANCH 1 /* Avoid adding %gs:0 in TLS references; use %gs:address directly. */ #define TARGET_TLS_DIRECT_SEG_REFS (target_flags & MASK_TLS_DIRECT_SEG_REFS) #define TARGET_386 (ix86_tune == PROCESSOR_I386) #define TARGET_486 (ix86_tune == PROCESSOR_I486) #define TARGET_PENTIUM (ix86_tune == PROCESSOR_PENTIUM) #define TARGET_PENTIUMPRO (ix86_tune == PROCESSOR_PENTIUMPRO) #define TARGET_K6 (ix86_tune == PROCESSOR_K6) #define TARGET_ATHLON (ix86_tune == PROCESSOR_ATHLON) #define TARGET_PENTIUM4 (ix86_tune == PROCESSOR_PENTIUM4) #define TARGET_K8 (ix86_tune == PROCESSOR_K8) #define TARGET_ATHLON_K8 (TARGET_K8 || TARGET_ATHLON) #define TARGET_NOCONA (ix86_tune == PROCESSOR_NOCONA) #define TUNEMASK (1 << ix86_tune) extern const int x86_use_leave, x86_push_memory, x86_zero_extend_with_and; extern const int x86_use_bit_test, x86_cmove, x86_deep_branch; extern const int x86_branch_hints, x86_unroll_strlen; extern const int x86_double_with_add, x86_partial_reg_stall, x86_movx; extern const int x86_use_loop, x86_use_fiop, x86_use_mov0; extern const int x86_use_cltd, x86_read_modify_write; extern const int x86_read_modify, x86_split_long_moves; extern const int x86_promote_QImode, x86_single_stringop, x86_fast_prefix; extern const int x86_himode_math, x86_qimode_math, x86_promote_qi_regs; extern const int x86_promote_hi_regs, x86_integer_DFmode_moves; extern const int x86_add_esp_4, x86_add_esp_8, x86_sub_esp_4, x86_sub_esp_8; extern const int x86_partial_reg_dependency, x86_memory_mismatch_stall; extern const int x86_accumulate_outgoing_args, x86_prologue_using_move; extern const int x86_epilogue_using_move, x86_decompose_lea; extern const int x86_arch_always_fancy_math_387, x86_shift1; extern const int x86_sse_partial_reg_dependency, x86_sse_partial_regs; extern const int x86_sse_typeless_stores, x86_sse_load0_by_pxor; extern const int x86_use_ffreep, x86_sse_partial_regs_for_cvtsd2ss; extern const int x86_inter_unit_moves; extern int x86_prefetch_sse; #define TARGET_USE_LEAVE (x86_use_leave & TUNEMASK) #define TARGET_PUSH_MEMORY (x86_push_memory & TUNEMASK) #define TARGET_ZERO_EXTEND_WITH_AND (x86_zero_extend_with_and & TUNEMASK) #define TARGET_USE_BIT_TEST (x86_use_bit_test & TUNEMASK) #define TARGET_UNROLL_STRLEN (x86_unroll_strlen & TUNEMASK) /* For sane SSE instruction set generation we need fcomi instruction. It is safe to enable all CMOVE instructions. */ #define TARGET_CMOVE ((x86_cmove & (1 << ix86_arch)) || TARGET_SSE) #define TARGET_DEEP_BRANCH_PREDICTION (x86_deep_branch & TUNEMASK) #define TARGET_BRANCH_PREDICTION_HINTS (x86_branch_hints & TUNEMASK) #define TARGET_DOUBLE_WITH_ADD (x86_double_with_add & TUNEMASK) #define TARGET_USE_SAHF ((x86_use_sahf & TUNEMASK) && !TARGET_64BIT) #define TARGET_MOVX (x86_movx & TUNEMASK) #define TARGET_PARTIAL_REG_STALL (x86_partial_reg_stall & TUNEMASK) #define TARGET_USE_LOOP (x86_use_loop & TUNEMASK) #define TARGET_USE_FIOP (x86_use_fiop & TUNEMASK) #define TARGET_USE_MOV0 (x86_use_mov0 & TUNEMASK) #define TARGET_USE_CLTD (x86_use_cltd & TUNEMASK) #define TARGET_SPLIT_LONG_MOVES (x86_split_long_moves & TUNEMASK) #define TARGET_READ_MODIFY_WRITE (x86_read_modify_write & TUNEMASK) #define TARGET_READ_MODIFY (x86_read_modify & TUNEMASK) #define TARGET_PROMOTE_QImode (x86_promote_QImode & TUNEMASK) #define TARGET_FAST_PREFIX (x86_fast_prefix & TUNEMASK) #define TARGET_SINGLE_STRINGOP (x86_single_stringop & TUNEMASK) #define TARGET_QIMODE_MATH (x86_qimode_math & TUNEMASK) #define TARGET_HIMODE_MATH (x86_himode_math & TUNEMASK) #define TARGET_PROMOTE_QI_REGS (x86_promote_qi_regs & TUNEMASK) #define TARGET_PROMOTE_HI_REGS (x86_promote_hi_regs & TUNEMASK) #define TARGET_ADD_ESP_4 (x86_add_esp_4 & TUNEMASK) #define TARGET_ADD_ESP_8 (x86_add_esp_8 & TUNEMASK) #define TARGET_SUB_ESP_4 (x86_sub_esp_4 & TUNEMASK) #define TARGET_SUB_ESP_8 (x86_sub_esp_8 & TUNEMASK) #define TARGET_INTEGER_DFMODE_MOVES (x86_integer_DFmode_moves & TUNEMASK) #define TARGET_PARTIAL_REG_DEPENDENCY (x86_partial_reg_dependency & TUNEMASK) #define TARGET_SSE_PARTIAL_REG_DEPENDENCY \ (x86_sse_partial_reg_dependency & TUNEMASK) #define TARGET_SSE_PARTIAL_REGS (x86_sse_partial_regs & TUNEMASK) #define TARGET_SSE_PARTIAL_REGS_FOR_CVTSD2SS \ (x86_sse_partial_regs_for_cvtsd2ss & TUNEMASK) #define TARGET_SSE_TYPELESS_STORES (x86_sse_typeless_stores & TUNEMASK) #define TARGET_SSE_TYPELESS_LOAD0 (x86_sse_typeless_load0 & TUNEMASK) #define TARGET_SSE_LOAD0_BY_PXOR (x86_sse_load0_by_pxor & TUNEMASK) #define TARGET_MEMORY_MISMATCH_STALL (x86_memory_mismatch_stall & TUNEMASK) #define TARGET_PROLOGUE_USING_MOVE (x86_prologue_using_move & TUNEMASK) #define TARGET_EPILOGUE_USING_MOVE (x86_epilogue_using_move & TUNEMASK) #define TARGET_DECOMPOSE_LEA (x86_decompose_lea & TUNEMASK) #define TARGET_PREFETCH_SSE (x86_prefetch_sse) #define TARGET_SHIFT1 (x86_shift1 & TUNEMASK) #define TARGET_USE_FFREEP (x86_use_ffreep & TUNEMASK) #define TARGET_REP_MOVL_OPTIMAL (x86_rep_movl_optimal & TUNEMASK) #define TARGET_INTER_UNIT_MOVES (x86_inter_unit_moves & TUNEMASK) #define TARGET_FOUR_JUMP_LIMIT (x86_four_jump_limit & TUNEMASK) #define TARGET_STACK_PROBE (target_flags & MASK_STACK_PROBE) #define TARGET_ALIGN_STRINGOPS (!(target_flags & MASK_NO_ALIGN_STROPS)) #define TARGET_INLINE_ALL_STRINGOPS (target_flags & MASK_INLINE_ALL_STROPS) #define ASSEMBLER_DIALECT (ix86_asm_dialect) #define TARGET_SSE ((target_flags & MASK_SSE) != 0) #define TARGET_SSE2 ((target_flags & MASK_SSE2) != 0) #define TARGET_SSE3 ((target_flags & MASK_SSE3) != 0) #define TARGET_SSE_MATH ((ix86_fpmath & FPMATH_SSE) != 0) #define TARGET_MIX_SSE_I387 ((ix86_fpmath & FPMATH_SSE) \ && (ix86_fpmath & FPMATH_387)) #define TARGET_MMX ((target_flags & MASK_MMX) != 0) #define TARGET_3DNOW ((target_flags & MASK_3DNOW) != 0) #define TARGET_3DNOW_A ((target_flags & MASK_3DNOW_A) != 0) #define TARGET_RED_ZONE (!(target_flags & MASK_NO_RED_ZONE)) #define TARGET_USE_MS_BITFIELD_LAYOUT (target_flags & MASK_MS_BITFIELD_LAYOUT) #define TARGET_GNU_TLS (ix86_tls_dialect == TLS_DIALECT_GNU) #define TARGET_SUN_TLS (ix86_tls_dialect == TLS_DIALECT_SUN) /* WARNING: Do not mark empty strings for translation, as calling gettext on an empty string does NOT return an empty string. */ #define TARGET_SWITCHES \ { { "80387", MASK_80387, N_("Use hardware fp") }, \ { "no-80387", -MASK_80387, N_("Do not use hardware fp") }, \ { "hard-float", MASK_80387, N_("Use hardware fp") }, \ { "soft-float", -MASK_80387, N_("Do not use hardware fp") }, \ { "no-soft-float", MASK_80387, N_("Use hardware fp") }, \ { "386", 0, "" /*Deprecated.*/}, \ { "486", 0, "" /*Deprecated.*/}, \ { "pentium", 0, "" /*Deprecated.*/}, \ { "pentiumpro", 0, "" /*Deprecated.*/}, \ { "intel-syntax", 0, "" /*Deprecated.*/}, \ { "no-intel-syntax", 0, "" /*Deprecated.*/}, \ { "rtd", MASK_RTD, \ N_("Alternate calling convention") }, \ { "no-rtd", -MASK_RTD, \ N_("Use normal calling convention") }, \ { "align-double", MASK_ALIGN_DOUBLE, \ N_("Align some doubles on dword boundary") }, \ { "no-align-double", -MASK_ALIGN_DOUBLE, \ N_("Align doubles on word boundary") }, \ { "svr3-shlib", MASK_SVR3_SHLIB, \ N_("Uninitialized locals in .bss") }, \ { "no-svr3-shlib", -MASK_SVR3_SHLIB, \ N_("Uninitialized locals in .data") }, \ { "ieee-fp", MASK_IEEE_FP, \ N_("Use IEEE math for fp comparisons") }, \ { "no-ieee-fp", -MASK_IEEE_FP, \ N_("Do not use IEEE math for fp comparisons") }, \ { "fp-ret-in-387", MASK_FLOAT_RETURNS, \ N_("Return values of functions in FPU registers") }, \ { "no-fp-ret-in-387", -MASK_FLOAT_RETURNS , \ N_("Do not return values of functions in FPU registers")}, \ { "no-fancy-math-387", MASK_NO_FANCY_MATH_387, \ N_("Do not generate sin, cos, sqrt for FPU") }, \ { "fancy-math-387", -MASK_NO_FANCY_MATH_387, \ N_("Generate sin, cos, sqrt for FPU")}, \ { "omit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER, \ N_("Omit the frame pointer in leaf functions") }, \ { "no-omit-leaf-frame-pointer",-MASK_OMIT_LEAF_FRAME_POINTER, "" }, \ { "stack-arg-probe", MASK_STACK_PROBE, \ N_("Enable stack probing") }, \ { "no-stack-arg-probe", -MASK_STACK_PROBE, "" }, \ { "windows", 0, 0 /* undocumented */ }, \ { "dll", 0, 0 /* undocumented */ }, \ { "align-stringops", -MASK_NO_ALIGN_STROPS, \ N_("Align destination of the string operations") }, \ { "no-align-stringops", MASK_NO_ALIGN_STROPS, \ N_("Do not align destination of the string operations") }, \ { "inline-all-stringops", MASK_INLINE_ALL_STROPS, \ N_("Inline all known string operations") }, \ { "no-inline-all-stringops", -MASK_INLINE_ALL_STROPS, \ N_("Do not inline all known string operations") }, \ { "push-args", -MASK_NO_PUSH_ARGS, \ N_("Use push instructions to save outgoing arguments") }, \ { "no-push-args", MASK_NO_PUSH_ARGS, \ N_("Do not use push instructions to save outgoing arguments") }, \ { "accumulate-outgoing-args", MASK_ACCUMULATE_OUTGOING_ARGS, \ N_("Use push instructions to save outgoing arguments") }, \ { "no-accumulate-outgoing-args",-MASK_ACCUMULATE_OUTGOING_ARGS, \ N_("Do not use push instructions to save outgoing arguments") }, \ { "mmx", MASK_MMX, \ N_("Support MMX built-in functions") }, \ { "no-mmx", -MASK_MMX, \ N_("Do not support MMX built-in functions") }, \ { "3dnow", MASK_3DNOW, \ N_("Support 3DNow! built-in functions") }, \ { "no-3dnow", -MASK_3DNOW, \ N_("Do not support 3DNow! built-in functions") }, \ { "sse", MASK_SSE, \ N_("Support MMX and SSE built-in functions and code generation") }, \ { "no-sse", -MASK_SSE, \ N_("Do not support MMX and SSE built-in functions and code generation") },\ { "sse2", MASK_SSE2, \ N_("Support MMX, SSE and SSE2 built-in functions and code generation") }, \ { "no-sse2", -MASK_SSE2, \ N_("Do not support MMX, SSE and SSE2 built-in functions and code generation") }, \ { "sse3", MASK_SSE3, \ N_("Support MMX, SSE, SSE2 and SSE3 built-in functions and code generation") },\ { "no-sse3", -MASK_SSE3, \ N_("Do not support MMX, SSE, SSE2 and SSE3 built-in functions and code generation") },\ { "128bit-long-double", MASK_128BIT_LONG_DOUBLE, \ N_("sizeof(long double) is 16") }, \ { "96bit-long-double", -MASK_128BIT_LONG_DOUBLE, \ N_("sizeof(long double) is 12") }, \ { "64", MASK_64BIT, \ N_("Generate 64bit x86-64 code") }, \ { "32", -MASK_64BIT, \ N_("Generate 32bit i386 code") }, \ { "ms-bitfields", MASK_MS_BITFIELD_LAYOUT, \ N_("Use native (MS) bitfield layout") }, \ { "no-ms-bitfields", -MASK_MS_BITFIELD_LAYOUT, \ N_("Use gcc default bitfield layout") }, \ { "red-zone", -MASK_NO_RED_ZONE, \ N_("Use red-zone in the x86-64 code") }, \ { "no-red-zone", MASK_NO_RED_ZONE, \ N_("Do not use red-zone in the x86-64 code") }, \ { "tls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS, \ N_("Use direct references against %gs when accessing tls data") }, \ { "no-tls-direct-seg-refs", -MASK_TLS_DIRECT_SEG_REFS, \ N_("Do not use direct references against %gs when accessing tls data") }, \ SUBTARGET_SWITCHES \ { "", \ TARGET_DEFAULT | TARGET_64BIT_DEFAULT | TARGET_SUBTARGET_DEFAULT \ | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT, 0 }} #ifndef TARGET_64BIT_DEFAULT #define TARGET_64BIT_DEFAULT 0 #endif #ifndef TARGET_TLS_DIRECT_SEG_REFS_DEFAULT #define TARGET_TLS_DIRECT_SEG_REFS_DEFAULT 0 #endif /* Once GDB has been enhanced to deal with functions without frame pointers, we can change this to allow for elimination of the frame pointer in leaf functions. */ #define TARGET_DEFAULT 0 /* This is not really a target flag, but is done this way so that it's analogous to similar code for Mach-O on PowerPC. darwin.h redefines this to 1. */ #define TARGET_MACHO 0 /* This macro is similar to `TARGET_SWITCHES' but defines names of command options that have values. Its definition is an initializer with a subgrouping for each command option. Each subgrouping contains a string constant, that defines the fixed part of the option name, and the address of a variable. The variable, type `char *', is set to the variable part of the given option if the fixed part matches. The actual option name is made by appending `-m' to the specified name. */ #define TARGET_OPTIONS \ { { "tune=", &ix86_tune_string, \ N_("Schedule code for given CPU"), 0}, \ { "fpmath=", &ix86_fpmath_string, \ N_("Generate floating point mathematics using given instruction set"), 0},\ { "arch=", &ix86_arch_string, \ N_("Generate code for given CPU"), 0}, \ { "regparm=", &ix86_regparm_string, \ N_("Number of registers used to pass integer arguments"), 0},\ { "align-loops=", &ix86_align_loops_string, \ N_("Loop code aligned to this power of 2"), 0}, \ { "align-jumps=", &ix86_align_jumps_string, \ N_("Jump targets are aligned to this power of 2"), 0}, \ { "align-functions=", &ix86_align_funcs_string, \ N_("Function starts are aligned to this power of 2"), 0}, \ { "preferred-stack-boundary=", \ &ix86_preferred_stack_boundary_string, \ N_("Attempt to keep stack aligned to this power of 2"), 0}, \ { "branch-cost=", &ix86_branch_cost_string, \ N_("Branches are this expensive (1-5, arbitrary units)"), 0},\ { "cmodel=", &ix86_cmodel_string, \ N_("Use given x86-64 code model"), 0}, \ { "debug-arg", &ix86_debug_arg_string, \ "" /* Undocumented. */, 0}, \ { "debug-addr", &ix86_debug_addr_string, \ "" /* Undocumented. */, 0}, \ { "asm=", &ix86_asm_string, \ N_("Use given assembler dialect"), 0}, \ { "tls-dialect=", &ix86_tls_dialect_string, \ N_("Use given thread-local storage dialect"), 0}, \ SUBTARGET_OPTIONS \ } /* Sometimes certain combinations of command options do not make sense on a particular target machine. You can define a macro `OVERRIDE_OPTIONS' to take account of this. This macro, if defined, is executed once just after all the command options have been parsed. Don't use this macro to turn on various extra optimizations for `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */ #define OVERRIDE_OPTIONS override_options () /* These are meant to be redefined in the host dependent files */ #define SUBTARGET_SWITCHES #define SUBTARGET_OPTIONS /* Define this to change the optimizations performed by default. */ #define OPTIMIZATION_OPTIONS(LEVEL, SIZE) \ optimization_options ((LEVEL), (SIZE)) /* Support for configure-time defaults of some command line options. */ #define OPTION_DEFAULT_SPECS \ {"arch", "%{!march=*:-march=%(VALUE)}"}, \ {"tune", "%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}" }, \ {"cpu", "%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}" } /* Specs for the compiler proper */ #ifndef CC1_CPU_SPEC #define CC1_CPU_SPEC "\ %{!mtune*: \ %{m386:mtune=i386 \ %n`-m386' is deprecated. Use `-march=i386' or `-mtune=i386' instead.\n} \ %{m486:-mtune=i486 \ %n`-m486' is deprecated. Use `-march=i486' or `-mtune=i486' instead.\n} \ %{mpentium:-mtune=pentium \ %n`-mpentium' is deprecated. Use `-march=pentium' or `-mtune=pentium' instead.\n} \ %{mpentiumpro:-mtune=pentiumpro \ %n`-mpentiumpro' is deprecated. Use `-march=pentiumpro' or `-mtune=pentiumpro' instead.\n} \ %{mcpu=*:-mtune=%* \ %n`-mcpu=' is deprecated. Use `-mtune=' or '-march=' instead.\n}} \ % STACK_BOUNDARY && !TARGET_64BIT) /* Minimum allocation boundary for the code of a function. */ #define FUNCTION_BOUNDARY 8 /* C++ stores the virtual bit in the lowest bit of function pointers. */ #define TARGET_PTRMEMFUNC_VBIT_LOCATION ptrmemfunc_vbit_in_pfn /* Alignment of field after `int : 0' in a structure. */ #define EMPTY_FIELD_BOUNDARY BITS_PER_WORD /* Minimum size in bits of the largest boundary to which any and all fundamental data types supported by the hardware might need to be aligned. No data type wants to be aligned rounder than this. Pentium+ prefers DFmode values to be aligned to 64 bit boundary and Pentium Pro XFmode values at 128 bit boundaries. */ #define BIGGEST_ALIGNMENT 128 /* Decide whether a variable of mode MODE should be 128 bit aligned. */ #define ALIGN_MODE_128(MODE) \ ((MODE) == XFmode || (MODE) == TFmode || SSE_REG_MODE_P (MODE)) /* The published ABIs say that doubles should be aligned on word boundaries, so lower the alignment for structure fields unless -malign-double is set. */ /* ??? Blah -- this macro is used directly by libobjc. Since it supports no vector modes, cut out the complexity and fall back on BIGGEST_FIELD_ALIGNMENT. */ #ifdef IN_TARGET_LIBS #ifdef __x86_64__ #define BIGGEST_FIELD_ALIGNMENT 128 #else #define BIGGEST_FIELD_ALIGNMENT 32 #endif #else #define ADJUST_FIELD_ALIGN(FIELD, COMPUTED) \ x86_field_alignment (FIELD, COMPUTED) #endif /* If defined, a C expression to compute the alignment given to a constant that is being placed in memory. EXP is the constant and ALIGN is the alignment that the object would ordinarily have. The value of this macro is used instead of that alignment to align the object. If this macro is not defined, then ALIGN is used. The typical use of this macro is to increase alignment for string constants to be word aligned so that `strcpy' calls that copy constants can be done inline. */ #define CONSTANT_ALIGNMENT(EXP, ALIGN) ix86_constant_alignment ((EXP), (ALIGN)) /* If defined, a C expression to compute the alignment for a static variable. TYPE is the data type, and ALIGN is the alignment that the object would ordinarily have. The value of this macro is used instead of that alignment to align the object. If this macro is not defined, then ALIGN is used. One use of this macro is to increase alignment of medium-size data to make it all fit in fewer cache lines. Another is to cause character arrays to be word-aligned so that `strcpy' calls that copy constants to character arrays can be done inline. */ #define DATA_ALIGNMENT(TYPE, ALIGN) ix86_data_alignment ((TYPE), (ALIGN)) /* If defined, a C expression to compute the alignment for a local variable. TYPE is the data type, and ALIGN is the alignment that the object would ordinarily have. The value of this macro is used instead of that alignment to align the object. If this macro is not defined, then ALIGN is used. One use of this macro is to increase alignment of medium-size data to make it all fit in fewer cache lines. */ #define LOCAL_ALIGNMENT(TYPE, ALIGN) ix86_local_alignment ((TYPE), (ALIGN)) /* If defined, a C expression that gives the alignment boundary, in bits, of an argument with the specified mode and type. If it is not defined, `PARM_BOUNDARY' is used for all arguments. */ #define FUNCTION_ARG_BOUNDARY(MODE, TYPE) \ ix86_function_arg_boundary ((MODE), (TYPE)) /* Set this nonzero if move instructions will actually fail to work when given unaligned data. */ #define STRICT_ALIGNMENT 0 /* If bit field type is int, don't let it cross an int, and give entire struct the alignment of an int. */ /* Required on the 386 since it doesn't have bit-field insns. */ #define PCC_BITFIELD_TYPE_MATTERS 1 /* Standard register usage. */ /* This processor has special stack-like registers. See reg-stack.c for details. */ #define STACK_REGS #define IS_STACK_MODE(MODE) \ ((MODE) == DFmode || (MODE) == SFmode || (MODE) == XFmode) \ /* Number of actual hardware registers. The hardware registers are assigned numbers for the compiler from 0 to just below FIRST_PSEUDO_REGISTER. All registers that the compiler knows about must be given numbers, even those that are not normally considered general registers. In the 80386 we give the 8 general purpose registers the numbers 0-7. We number the floating point registers 8-15. Note that registers 0-7 can be accessed as a short or int, while only 0-3 may be used with byte `mov' instructions. Reg 16 does not correspond to any hardware register, but instead appears in the RTL as an argument pointer prior to reload, and is eliminated during reloading in favor of either the stack or frame pointer. */ #define FIRST_PSEUDO_REGISTER 53 /* Number of hardware registers that go into the DWARF-2 unwind info. If not defined, equals FIRST_PSEUDO_REGISTER. */ #define DWARF_FRAME_REGISTERS 17 /* 1 for registers that have pervasive standard uses and are not available for the register allocator. On the 80386, the stack pointer is such, as is the arg pointer. The value is a mask - bit 1 is set for fixed registers for 32bit target, while 2 is set for fixed registers for 64bit. Proper value is computed in the CONDITIONAL_REGISTER_USAGE. */ #define FIXED_REGISTERS \ /*ax,dx,cx,bx,si,di,bp,sp,st,st1,st2,st3,st4,st5,st6,st7*/ \ { 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, \ /*arg,flags,fpsr,dir,frame*/ \ 3, 3, 3, 3, 3, \ /*xmm0,xmm1,xmm2,xmm3,xmm4,xmm5,xmm6,xmm7*/ \ 0, 0, 0, 0, 0, 0, 0, 0, \ /*mmx0,mmx1,mmx2,mmx3,mmx4,mmx5,mmx6,mmx7*/ \ 0, 0, 0, 0, 0, 0, 0, 0, \ /* r8, r9, r10, r11, r12, r13, r14, r15*/ \ 1, 1, 1, 1, 1, 1, 1, 1, \ /*xmm8,xmm9,xmm10,xmm11,xmm12,xmm13,xmm14,xmm15*/ \ 1, 1, 1, 1, 1, 1, 1, 1} /* 1 for registers not available across function calls. These must include the FIXED_REGISTERS and also any registers that can be used without being saved. The latter must include the registers where values are returned and the register where structure-value addresses are passed. Aside from that, you can include as many other registers as you like. The value is a mask - bit 1 is set for call used for 32bit target, while 2 is set for call used for 64bit. Proper value is computed in the CONDITIONAL_REGISTER_USAGE. */ #define CALL_USED_REGISTERS \ /*ax,dx,cx,bx,si,di,bp,sp,st,st1,st2,st3,st4,st5,st6,st7*/ \ { 3, 3, 3, 0, 2, 2, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, \ /*arg,flags,fpsr,dir,frame*/ \ 3, 3, 3, 3, 3, \ /*xmm0,xmm1,xmm2,xmm3,xmm4,xmm5,xmm6,xmm7*/ \ 3, 3, 3, 3, 3, 3, 3, 3, \ /*mmx0,mmx1,mmx2,mmx3,mmx4,mmx5,mmx6,mmx7*/ \ 3, 3, 3, 3, 3, 3, 3, 3, \ /* r8, r9, r10, r11, r12, r13, r14, r15*/ \ 3, 3, 3, 3, 1, 1, 1, 1, \ /*xmm8,xmm9,xmm10,xmm11,xmm12,xmm13,xmm14,xmm15*/ \ 3, 3, 3, 3, 3, 3, 3, 3} \ /* Order in which to allocate registers. Each register must be listed once, even those in FIXED_REGISTERS. List frame pointer late and fixed registers last. Note that, in general, we prefer registers listed in CALL_USED_REGISTERS, keeping the others available for storage of persistent values. The ORDER_REGS_FOR_LOCAL_ALLOC actually overwrite the order, so this is just empty initializer for array. */ #define REG_ALLOC_ORDER \ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,\ 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, \ 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, \ 48, 49, 50, 51, 52 } /* ORDER_REGS_FOR_LOCAL_ALLOC is a macro which permits reg_alloc_order to be rearranged based on a particular function. When using sse math, we want to allocate SSE before x87 registers and vice vera. */ #define ORDER_REGS_FOR_LOCAL_ALLOC x86_order_regs_for_local_alloc () /* Macro to conditionally modify fixed_regs/call_used_regs. */ #define CONDITIONAL_REGISTER_USAGE \ do { \ int i; \ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) \ { \ fixed_regs[i] = (fixed_regs[i] & (TARGET_64BIT ? 2 : 1)) != 0; \ call_used_regs[i] = (call_used_regs[i] \ & (TARGET_64BIT ? 2 : 1)) != 0; \ } \ if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM) \ { \ fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \ call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \ } \ if (! TARGET_MMX) \ { \ int i; \ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) \ if (TEST_HARD_REG_BIT (reg_class_contents[(int)MMX_REGS], i)) \ fixed_regs[i] = call_used_regs[i] = 1; \ } \ if (! TARGET_SSE) \ { \ int i; \ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) \ if (TEST_HARD_REG_BIT (reg_class_contents[(int)SSE_REGS], i)) \ fixed_regs[i] = call_used_regs[i] = 1; \ } \ if (! TARGET_80387 && ! TARGET_FLOAT_RETURNS_IN_80387) \ { \ int i; \ HARD_REG_SET x; \ COPY_HARD_REG_SET (x, reg_class_contents[(int)FLOAT_REGS]); \ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) \ if (TEST_HARD_REG_BIT (x, i)) \ fixed_regs[i] = call_used_regs[i] = 1; \ } \ } while (0) /* Return number of consecutive hard regs needed starting at reg REGNO to hold something of mode MODE. This is ordinarily the length in words of a value of mode MODE but can be less for certain modes in special long registers. Actually there are no two word move instructions for consecutive registers. And only registers 0-3 may have mov byte instructions applied to them. */ #define HARD_REGNO_NREGS(REGNO, MODE) \ (FP_REGNO_P (REGNO) || SSE_REGNO_P (REGNO) || MMX_REGNO_P (REGNO) \ ? (COMPLEX_MODE_P (MODE) ? 2 : 1) \ : ((MODE) == XFmode \ ? (TARGET_64BIT ? 2 : 3) \ : (MODE) == XCmode \ ? (TARGET_64BIT ? 4 : 6) \ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))) #define VALID_SSE2_REG_MODE(MODE) \ ((MODE) == V16QImode || (MODE) == V8HImode || (MODE) == V2DFmode \ || (MODE) == V2DImode) #define VALID_SSE_REG_MODE(MODE) \ ((MODE) == TImode || (MODE) == V4SFmode || (MODE) == V4SImode \ || (MODE) == SFmode || (MODE) == TFmode \ /* Always accept SSE2 modes so that xmmintrin.h compiles. */ \ || VALID_SSE2_REG_MODE (MODE) \ || (TARGET_SSE2 && ((MODE) == DFmode || VALID_MMX_REG_MODE (MODE)))) #define VALID_MMX_REG_MODE_3DNOW(MODE) \ ((MODE) == V2SFmode || (MODE) == SFmode) #define VALID_MMX_REG_MODE(MODE) \ ((MODE) == DImode || (MODE) == V8QImode || (MODE) == V4HImode \ || (MODE) == V2SImode || (MODE) == SImode) #define VECTOR_MODE_SUPPORTED_P(MODE) \ (VALID_SSE_REG_MODE (MODE) && TARGET_SSE ? 1 \ : VALID_MMX_REG_MODE (MODE) && TARGET_MMX ? 1 \ : VALID_MMX_REG_MODE_3DNOW (MODE) && TARGET_3DNOW ? 1 : 0) #define VALID_FP_MODE_P(MODE) \ ((MODE) == SFmode || (MODE) == DFmode || (MODE) == XFmode \ || (MODE) == SCmode || (MODE) == DCmode || (MODE) == XCmode) \ #define VALID_INT_MODE_P(MODE) \ ((MODE) == QImode || (MODE) == HImode || (MODE) == SImode \ || (MODE) == DImode \ || (MODE) == CQImode || (MODE) == CHImode || (MODE) == CSImode \ || (MODE) == CDImode \ || (TARGET_64BIT && ((MODE) == TImode || (MODE) == CTImode \ || (MODE) == TFmode || (MODE) == TCmode))) /* Return true for modes passed in SSE registers. */ #define SSE_REG_MODE_P(MODE) \ ((MODE) == TImode || (MODE) == V16QImode || (MODE) == TFmode \ || (MODE) == V8HImode || (MODE) == V2DFmode || (MODE) == V2DImode \ || (MODE) == V4SFmode || (MODE) == V4SImode) /* Return true for modes passed in MMX registers. */ #define MMX_REG_MODE_P(MODE) \ ((MODE) == V8QImode || (MODE) == V4HImode || (MODE) == V2SImode \ || (MODE) == V2SFmode) /* Value is 1 if hard register REGNO can hold a value of machine-mode MODE. */ #define HARD_REGNO_MODE_OK(REGNO, MODE) \ ix86_hard_regno_mode_ok ((REGNO), (MODE)) /* Value is 1 if it is a good idea to tie two pseudo registers when one has mode MODE1 and one has mode MODE2. If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2, for any hard reg, then this must be 0 for correct output. */ #define MODES_TIEABLE_P(MODE1, MODE2) \ ((MODE1) == (MODE2) \ || (((MODE1) == HImode || (MODE1) == SImode \ || ((MODE1) == QImode \ && (TARGET_64BIT || !TARGET_PARTIAL_REG_STALL)) \ || ((MODE1) == DImode && TARGET_64BIT)) \ && ((MODE2) == HImode || (MODE2) == SImode \ || ((MODE2) == QImode \ && (TARGET_64BIT || !TARGET_PARTIAL_REG_STALL)) \ || ((MODE2) == DImode && TARGET_64BIT)))) /* It is possible to write patterns to move flags; but until someone does it, */ #define AVOID_CCMODE_COPIES /* Specify the modes required to caller save a given hard regno. We do this on i386 to prevent flags from being saved at all. Kill any attempts to combine saving of modes. */ #define HARD_REGNO_CALLER_SAVE_MODE(REGNO, NREGS, MODE) \ (CC_REGNO_P (REGNO) ? VOIDmode \ : (MODE) == VOIDmode && (NREGS) != 1 ? VOIDmode \ : (MODE) == VOIDmode ? choose_hard_reg_mode ((REGNO), (NREGS), false)\ : (MODE) == HImode && !TARGET_PARTIAL_REG_STALL ? SImode \ : (MODE) == QImode && (REGNO) >= 4 && !TARGET_64BIT ? SImode \ : (MODE)) /* Specify the registers used for certain standard purposes. The values of these macros are register numbers. */ /* on the 386 the pc register is %eip, and is not usable as a general register. The ordinary mov instructions won't work */ /* #define PC_REGNUM */ /* Register to use for pushing function arguments. */ #define STACK_POINTER_REGNUM 7 /* Base register for access to local variables of the function. */ #define HARD_FRAME_POINTER_REGNUM 6 /* Base register for access to local variables of the function. */ #define FRAME_POINTER_REGNUM 20 /* First floating point reg */ #define FIRST_FLOAT_REG 8 /* First & last stack-like regs */ #define FIRST_STACK_REG FIRST_FLOAT_REG #define LAST_STACK_REG (FIRST_FLOAT_REG + 7) #define FIRST_SSE_REG (FRAME_POINTER_REGNUM + 1) #define LAST_SSE_REG (FIRST_SSE_REG + 7) #define FIRST_MMX_REG (LAST_SSE_REG + 1) #define LAST_MMX_REG (FIRST_MMX_REG + 7) #define FIRST_REX_INT_REG (LAST_MMX_REG + 1) #define LAST_REX_INT_REG (FIRST_REX_INT_REG + 7) #define FIRST_REX_SSE_REG (LAST_REX_INT_REG + 1) #define LAST_REX_SSE_REG (FIRST_REX_SSE_REG + 7) /* Value should be nonzero if functions must have frame pointers. Zero means the frame pointer need not be set up (and parms may be accessed via the stack pointer) in functions that seem suitable. This is computed in `reload', in reload1.c. */ #define FRAME_POINTER_REQUIRED ix86_frame_pointer_required () /* Override this in other tm.h files to cope with various OS losage requiring a frame pointer. */ #ifndef SUBTARGET_FRAME_POINTER_REQUIRED #define SUBTARGET_FRAME_POINTER_REQUIRED 0 #endif /* Make sure we can access arbitrary call frames. */ #define SETUP_FRAME_ADDRESSES() ix86_setup_frame_addresses () /* Base register for access to arguments of the function. */ #define ARG_POINTER_REGNUM 16 /* Register in which static-chain is passed to a function. We do use ECX as static chain register for 32 bit ABI. On the 64bit ABI, ECX is an argument register, so we use R10 instead. */ #define STATIC_CHAIN_REGNUM (TARGET_64BIT ? FIRST_REX_INT_REG + 10 - 8 : 2) /* Register to hold the addressing base for position independent code access to data items. We don't use PIC pointer for 64bit mode. Define the regnum to dummy value to prevent gcc from pessimizing code dealing with EBX. To avoid clobbering a call-saved register unnecessarily, we renumber the pic register when possible. The change is visible after the prologue has been emitted. */ #define REAL_PIC_OFFSET_TABLE_REGNUM 3 #define PIC_OFFSET_TABLE_REGNUM \ (TARGET_64BIT || !flag_pic ? INVALID_REGNUM \ : reload_completed ? REGNO (pic_offset_table_rtx) \ : REAL_PIC_OFFSET_TABLE_REGNUM) #define GOT_SYMBOL_NAME "_GLOBAL_OFFSET_TABLE_" /* A C expression which can inhibit the returning of certain function values in registers, based on the type of value. A nonzero value says to return the function value in memory, just as large structures are always returned. Here TYPE will be a C expression of type `tree', representing the data type of the value. Note that values of mode `BLKmode' must be explicitly handled by this macro. Also, the option `-fpcc-struct-return' takes effect regardless of this macro. On most systems, it is possible to leave the macro undefined; this causes a default definition to be used, whose value is the constant 1 for `BLKmode' values, and 0 otherwise. Do not use this macro to indicate that structures and unions should always be returned in memory. You should instead use `DEFAULT_PCC_STRUCT_RETURN' to indicate this. */ #define RETURN_IN_MEMORY(TYPE) \ ix86_return_in_memory (TYPE) /* This is overridden by . */ #define MS_AGGREGATE_RETURN 0 /* Define the classes of registers for register constraints in the machine description. Also define ranges of constants. One of the classes must always be named ALL_REGS and include all hard regs. If there is more than one class, another class must be named NO_REGS and contain no registers. The name GENERAL_REGS must be the name of a class (or an alias for another name such as ALL_REGS). This is the class of registers that is allowed by "g" or "r" in a register constraint. Also, registers outside this class are allocated only when instructions express preferences for them. The classes must be numbered in nondecreasing order; that is, a larger-numbered class must never be contained completely in a smaller-numbered class. For any two classes, it is very desirable that there be another class that represents their union. It might seem that class BREG is unnecessary, since no useful 386 opcode needs reg %ebx. But some systems pass args to the OS in ebx, and the "b" register constraint is useful in asms for syscalls. The flags and fpsr registers are in no class. */ enum reg_class { NO_REGS, AREG, DREG, CREG, BREG, SIREG, DIREG, AD_REGS, /* %eax/%edx for DImode */ Q_REGS, /* %eax %ebx %ecx %edx */ NON_Q_REGS, /* %esi %edi %ebp %esp */ INDEX_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp */ LEGACY_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp %esp */ GENERAL_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp %esp %r8 - %r15*/ FP_TOP_REG, FP_SECOND_REG, /* %st(0) %st(1) */ FLOAT_REGS, SSE_REGS, MMX_REGS, FP_TOP_SSE_REGS, FP_SECOND_SSE_REGS, FLOAT_SSE_REGS, FLOAT_INT_REGS, INT_SSE_REGS, FLOAT_INT_SSE_REGS, ALL_REGS, LIM_REG_CLASSES }; #define N_REG_CLASSES ((int) LIM_REG_CLASSES) #define INTEGER_CLASS_P(CLASS) \ reg_class_subset_p ((CLASS), GENERAL_REGS) #define FLOAT_CLASS_P(CLASS) \ reg_class_subset_p ((CLASS), FLOAT_REGS) #define SSE_CLASS_P(CLASS) \ reg_class_subset_p ((CLASS), SSE_REGS) #define MMX_CLASS_P(CLASS) \ reg_class_subset_p ((CLASS), MMX_REGS) #define MAYBE_INTEGER_CLASS_P(CLASS) \ reg_classes_intersect_p ((CLASS), GENERAL_REGS) #define MAYBE_FLOAT_CLASS_P(CLASS) \ reg_classes_intersect_p ((CLASS), FLOAT_REGS) #define MAYBE_SSE_CLASS_P(CLASS) \ reg_classes_intersect_p (SSE_REGS, (CLASS)) #define MAYBE_MMX_CLASS_P(CLASS) \ reg_classes_intersect_p (MMX_REGS, (CLASS)) #define Q_CLASS_P(CLASS) \ reg_class_subset_p ((CLASS), Q_REGS) /* Give names of register classes as strings for dump file. */ #define REG_CLASS_NAMES \ { "NO_REGS", \ "AREG", "DREG", "CREG", "BREG", \ "SIREG", "DIREG", \ "AD_REGS", \ "Q_REGS", "NON_Q_REGS", \ "INDEX_REGS", \ "LEGACY_REGS", \ "GENERAL_REGS", \ "FP_TOP_REG", "FP_SECOND_REG", \ "FLOAT_REGS", \ "SSE_REGS", \ "MMX_REGS", \ "FP_TOP_SSE_REGS", \ "FP_SECOND_SSE_REGS", \ "FLOAT_SSE_REGS", \ "FLOAT_INT_REGS", \ "INT_SSE_REGS", \ "FLOAT_INT_SSE_REGS", \ "ALL_REGS" } /* Define which registers fit in which classes. This is an initializer for a vector of HARD_REG_SET of length N_REG_CLASSES. */ #define REG_CLASS_CONTENTS \ { { 0x00, 0x0 }, \ { 0x01, 0x0 }, { 0x02, 0x0 }, /* AREG, DREG */ \ { 0x04, 0x0 }, { 0x08, 0x0 }, /* CREG, BREG */ \ { 0x10, 0x0 }, { 0x20, 0x0 }, /* SIREG, DIREG */ \ { 0x03, 0x0 }, /* AD_REGS */ \ { 0x0f, 0x0 }, /* Q_REGS */ \ { 0x1100f0, 0x1fe0 }, /* NON_Q_REGS */ \ { 0x7f, 0x1fe0 }, /* INDEX_REGS */ \ { 0x1100ff, 0x0 }, /* LEGACY_REGS */ \ { 0x1100ff, 0x1fe0 }, /* GENERAL_REGS */ \ { 0x100, 0x0 }, { 0x0200, 0x0 },/* FP_TOP_REG, FP_SECOND_REG */\ { 0xff00, 0x0 }, /* FLOAT_REGS */ \ { 0x1fe00000,0x1fe000 }, /* SSE_REGS */ \ { 0xe0000000, 0x1f }, /* MMX_REGS */ \ { 0x1fe00100,0x1fe000 }, /* FP_TOP_SSE_REG */ \ { 0x1fe00200,0x1fe000 }, /* FP_SECOND_SSE_REG */ \ { 0x1fe0ff00,0x1fe000 }, /* FLOAT_SSE_REGS */ \ { 0x1ffff, 0x1fe0 }, /* FLOAT_INT_REGS */ \ { 0x1fe100ff,0x1fffe0 }, /* INT_SSE_REGS */ \ { 0x1fe1ffff,0x1fffe0 }, /* FLOAT_INT_SSE_REGS */ \ { 0xffffffff,0x1fffff } \ } /* The same information, inverted: Return the class number of the smallest class containing reg number REGNO. This could be a conditional expression or could index an array. */ #define REGNO_REG_CLASS(REGNO) (regclass_map[REGNO]) /* When defined, the compiler allows registers explicitly used in the rtl to be used as spill registers but prevents the compiler from extending the lifetime of these registers. */ #define SMALL_REGISTER_CLASSES 1 #define QI_REG_P(X) \ (REG_P (X) && REGNO (X) < 4) #define GENERAL_REGNO_P(N) \ ((N) < 8 || REX_INT_REGNO_P (N)) #define GENERAL_REG_P(X) \ (REG_P (X) && GENERAL_REGNO_P (REGNO (X))) #define ANY_QI_REG_P(X) (TARGET_64BIT ? GENERAL_REG_P(X) : QI_REG_P (X)) #define NON_QI_REG_P(X) \ (REG_P (X) && REGNO (X) >= 4 && REGNO (X) < FIRST_PSEUDO_REGISTER) #define REX_INT_REGNO_P(N) ((N) >= FIRST_REX_INT_REG && (N) <= LAST_REX_INT_REG) #define REX_INT_REG_P(X) (REG_P (X) && REX_INT_REGNO_P (REGNO (X))) #define FP_REG_P(X) (REG_P (X) && FP_REGNO_P (REGNO (X))) #define FP_REGNO_P(N) ((N) >= FIRST_STACK_REG && (N) <= LAST_STACK_REG) #define ANY_FP_REG_P(X) (REG_P (X) && ANY_FP_REGNO_P (REGNO (X))) #define ANY_FP_REGNO_P(N) (FP_REGNO_P (N) || SSE_REGNO_P (N)) #define SSE_REGNO_P(N) \ (((N) >= FIRST_SSE_REG && (N) <= LAST_SSE_REG) \ || ((N) >= FIRST_REX_SSE_REG && (N) <= LAST_REX_SSE_REG)) #define REX_SSE_REGNO_P(N) \ ((N) >= FIRST_REX_SSE_REG && (N) <= LAST_REX_SSE_REG) #define SSE_REGNO(N) \ ((N) < 8 ? FIRST_SSE_REG + (N) : FIRST_REX_SSE_REG + (N) - 8) #define SSE_REG_P(N) (REG_P (N) && SSE_REGNO_P (REGNO (N))) #define SSE_FLOAT_MODE_P(MODE) \ ((TARGET_SSE && (MODE) == SFmode) || (TARGET_SSE2 && (MODE) == DFmode)) #define MMX_REGNO_P(N) ((N) >= FIRST_MMX_REG && (N) <= LAST_MMX_REG) #define MMX_REG_P(XOP) (REG_P (XOP) && MMX_REGNO_P (REGNO (XOP))) #define STACK_REG_P(XOP) \ (REG_P (XOP) && \ REGNO (XOP) >= FIRST_STACK_REG && \ REGNO (XOP) <= LAST_STACK_REG) #define NON_STACK_REG_P(XOP) (REG_P (XOP) && ! STACK_REG_P (XOP)) #define STACK_TOP_P(XOP) (REG_P (XOP) && REGNO (XOP) == FIRST_STACK_REG) #define CC_REG_P(X) (REG_P (X) && CC_REGNO_P (REGNO (X))) #define CC_REGNO_P(X) ((X) == FLAGS_REG || (X) == FPSR_REG) /* The class value for index registers, and the one for base regs. */ #define INDEX_REG_CLASS INDEX_REGS #define BASE_REG_CLASS GENERAL_REGS /* Get reg_class from a letter such as appears in the machine description. */ #define REG_CLASS_FROM_LETTER(C) \ ((C) == 'r' ? GENERAL_REGS : \ (C) == 'R' ? LEGACY_REGS : \ (C) == 'q' ? TARGET_64BIT ? GENERAL_REGS : Q_REGS : \ (C) == 'Q' ? Q_REGS : \ (C) == 'f' ? (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387 \ ? FLOAT_REGS \ : NO_REGS) : \ (C) == 't' ? (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387 \ ? FP_TOP_REG \ : NO_REGS) : \ (C) == 'u' ? (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387 \ ? FP_SECOND_REG \ : NO_REGS) : \ (C) == 'a' ? AREG : \ (C) == 'b' ? BREG : \ (C) == 'c' ? CREG : \ (C) == 'd' ? DREG : \ (C) == 'x' ? TARGET_SSE ? SSE_REGS : NO_REGS : \ (C) == 'Y' ? TARGET_SSE2? SSE_REGS : NO_REGS : \ (C) == 'y' ? TARGET_MMX ? MMX_REGS : NO_REGS : \ (C) == 'A' ? AD_REGS : \ (C) == 'D' ? DIREG : \ (C) == 'S' ? SIREG : NO_REGS) /* The letters I, J, K, L and M in a register constraint string can be used to stand for particular ranges of immediate operands. This macro defines what the ranges are. C is the letter, and VALUE is a constant value. Return 1 if VALUE is in the range specified by C. I is for non-DImode shifts. J is for DImode shifts. K is for signed imm8 operands. L is for andsi as zero-extending move. M is for shifts that can be executed by the "lea" opcode. N is for immediate operands for out/in instructions (0-255) */ #define CONST_OK_FOR_LETTER_P(VALUE, C) \ ((C) == 'I' ? (VALUE) >= 0 && (VALUE) <= 31 \ : (C) == 'J' ? (VALUE) >= 0 && (VALUE) <= 63 \ : (C) == 'K' ? (VALUE) >= -128 && (VALUE) <= 127 \ : (C) == 'L' ? (VALUE) == 0xff || (VALUE) == 0xffff \ : (C) == 'M' ? (VALUE) >= 0 && (VALUE) <= 3 \ : (C) == 'N' ? (VALUE) >= 0 && (VALUE) <= 255 \ : 0) /* Similar, but for floating constants, and defining letters G and H. Here VALUE is the CONST_DOUBLE rtx itself. We allow constants even if TARGET_387 isn't set, because the stack register converter may need to load 0.0 into the function value register. */ #define CONST_DOUBLE_OK_FOR_LETTER_P(VALUE, C) \ ((C) == 'G' ? standard_80387_constant_p (VALUE) \ : 0) /* A C expression that defines the optional machine-dependent constraint letters that can be used to segregate specific types of operands, usually memory references, for the target machine. Any letter that is not elsewhere defined and not matched by `REG_CLASS_FROM_LETTER' may be used. Normally this macro will not be defined. If it is required for a particular target machine, it should return 1 if VALUE corresponds to the operand type represented by the constraint letter C. If C is not defined as an extra constraint, the value returned should be 0 regardless of VALUE. */ #define EXTRA_CONSTRAINT(VALUE, D) \ ((D) == 'e' ? x86_64_sign_extended_value (VALUE) \ : (D) == 'Z' ? x86_64_zero_extended_value (VALUE) \ : (D) == 'C' ? standard_sse_constant_p (VALUE) \ : 0) /* Place additional restrictions on the register class to use when it is necessary to be able to hold a value of mode MODE in a reload register for which class CLASS would ordinarily be used. */ #define LIMIT_RELOAD_CLASS(MODE, CLASS) \ ((MODE) == QImode && !TARGET_64BIT \ && ((CLASS) == ALL_REGS || (CLASS) == GENERAL_REGS \ || (CLASS) == LEGACY_REGS || (CLASS) == INDEX_REGS) \ ? Q_REGS : (CLASS)) /* Given an rtx X being reloaded into a reg required to be in class CLASS, return the class of reg to actually use. In general this is just CLASS; but on some machines in some cases it is preferable to use a more restrictive class. On the 80386 series, we prevent floating constants from being reloaded into floating registers (since no move-insn can do that) and we ensure that QImodes aren't reloaded into the esi or edi reg. */ /* Put float CONST_DOUBLE in the constant pool instead of fp regs. QImode must go into class Q_REGS. Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and movdf to do mem-to-mem moves through integer regs. */ #define PREFERRED_RELOAD_CLASS(X, CLASS) \ ix86_preferred_reload_class ((X), (CLASS)) /* If we are copying between general and FP registers, we need a memory location. The same is true for SSE and MMX registers. */ #define SECONDARY_MEMORY_NEEDED(CLASS1, CLASS2, MODE) \ ix86_secondary_memory_needed ((CLASS1), (CLASS2), (MODE), 1) /* QImode spills from non-QI registers need a scratch. This does not happen often -- the only example so far requires an uninitialized pseudo. */ #define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS, MODE, OUT) \ (((CLASS) == GENERAL_REGS || (CLASS) == LEGACY_REGS \ || (CLASS) == INDEX_REGS) && !TARGET_64BIT && (MODE) == QImode \ ? Q_REGS : NO_REGS) /* Return the maximum number of consecutive registers needed to represent mode MODE in a register of class CLASS. */ /* On the 80386, this is the size of MODE in words, except in the FP regs, where a single reg is always enough. */ #define CLASS_MAX_NREGS(CLASS, MODE) \ (!MAYBE_INTEGER_CLASS_P (CLASS) \ ? (COMPLEX_MODE_P (MODE) ? 2 : 1) \ : (((((MODE) == XFmode ? 12 : GET_MODE_SIZE (MODE))) \ + UNITS_PER_WORD - 1) / UNITS_PER_WORD)) /* A C expression whose value is nonzero if pseudos that have been assigned to registers of class CLASS would likely be spilled because registers of CLASS are needed for spill registers. The default value of this macro returns 1 if CLASS has exactly one register and zero otherwise. On most machines, this default should be used. Only define this macro to some other expression if pseudo allocated by `local-alloc.c' end up in memory because their hard registers were needed for spill registers. If this macro returns nonzero for those classes, those pseudos will only be allocated by `global.c', which knows how to reallocate the pseudo to another register. If there would not be another register available for reallocation, you should not change the definition of this macro since the only effect of such a definition would be to slow down register allocation. */ #define CLASS_LIKELY_SPILLED_P(CLASS) \ (((CLASS) == AREG) \ || ((CLASS) == DREG) \ || ((CLASS) == CREG) \ || ((CLASS) == BREG) \ || ((CLASS) == AD_REGS) \ || ((CLASS) == SIREG) \ || ((CLASS) == DIREG) \ || ((CLASS) == FP_TOP_REG) \ || ((CLASS) == FP_SECOND_REG)) /* Return a class of registers that cannot change FROM mode to TO mode. x87 registers can't do subreg as all values are reformated to extended precision. XMM registers does not support with nonzero offsets equal to 4, 8 and 12 otherwise valid for integer registers. Since we can't determine these, prohibit all nonparadoxical subregs changing size. */ #define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \ (GET_MODE_SIZE (TO) < GET_MODE_SIZE (FROM) \ ? reg_classes_intersect_p (FLOAT_SSE_REGS, (CLASS)) \ || MAYBE_MMX_CLASS_P (CLASS) \ : GET_MODE_SIZE (FROM) != GET_MODE_SIZE (TO) \ ? reg_classes_intersect_p (FLOAT_REGS, (CLASS)) : 0) /* Stack layout; function entry, exit and calling. */ /* Define this if pushing a word on the stack makes the stack pointer a smaller address. */ #define STACK_GROWS_DOWNWARD /* Define this if the nominal address of the stack frame is at the high-address end of the local variables; that is, each additional local variable allocated goes at a more negative offset in the frame. */ #define FRAME_GROWS_DOWNWARD /* Offset within stack frame to start allocating local variables at. If FRAME_GROWS_DOWNWARD, this is the offset to the END of the first local allocated. Otherwise, it is the offset to the BEGINNING of the first local allocated. */ #define STARTING_FRAME_OFFSET 0 /* If we generate an insn to push BYTES bytes, this says how many the stack pointer really advances by. On 386 pushw decrements by exactly 2 no matter what the position was. On the 386 there is no pushb; we use pushw instead, and this has the effect of rounding up to 2. For 64bit ABI we round up to 8 bytes. */ #define PUSH_ROUNDING(BYTES) \ (TARGET_64BIT \ ? (((BYTES) + 7) & (-8)) \ : (((BYTES) + 1) & (-2))) /* If defined, the maximum amount of space required for outgoing arguments will be computed and placed into the variable `current_function_outgoing_args_size'. No space will be pushed onto the stack for each call; instead, the function prologue should increase the stack frame size by this amount. */ #define ACCUMULATE_OUTGOING_ARGS TARGET_ACCUMULATE_OUTGOING_ARGS /* If defined, a C expression whose value is nonzero when we want to use PUSH instructions to pass outgoing arguments. */ #define PUSH_ARGS (TARGET_PUSH_ARGS && !ACCUMULATE_OUTGOING_ARGS) /* We want the stack and args grow in opposite directions, even if PUSH_ARGS is 0. */ #define PUSH_ARGS_REVERSED 1 /* Offset of first parameter from the argument pointer register value. */ #define FIRST_PARM_OFFSET(FNDECL) 0 /* Define this macro if functions should assume that stack space has been allocated for arguments even when their values are passed in registers. The value of this macro is the size, in bytes, of the area reserved for arguments passed in registers for the function represented by FNDECL. This space can be allocated by the caller, or be a part of the machine-dependent stack frame: `OUTGOING_REG_PARM_STACK_SPACE' says which. */ #define REG_PARM_STACK_SPACE(FNDECL) 0 /* Define as a C expression that evaluates to nonzero if we do not know how to pass TYPE solely in registers. The file expr.h defines a definition that is usually appropriate, refer to expr.h for additional documentation. If `REG_PARM_STACK_SPACE' is defined, the argument will be computed in the stack and then loaded into a register. */ #define MUST_PASS_IN_STACK(MODE, TYPE) ix86_must_pass_in_stack ((MODE), (TYPE)) /* Value is the number of bytes of arguments automatically popped when returning from a subroutine call. FUNDECL is the declaration node of the function (as a tree), FUNTYPE is the data type of the function (as a tree), or for a library call it is an identifier node for the subroutine name. SIZE is the number of bytes of arguments passed on the stack. On the 80386, the RTD insn may be used to pop them if the number of args is fixed, but if the number is variable then the caller must pop them all. RTD can't be used for library calls now because the library is compiled with the Unix compiler. Use of RTD is a selectable option, since it is incompatible with standard Unix calling sequences. If the option is not selected, the caller must always pop the args. The attribute stdcall is equivalent to RTD on a per module basis. */ #define RETURN_POPS_ARGS(FUNDECL, FUNTYPE, SIZE) \ ix86_return_pops_args ((FUNDECL), (FUNTYPE), (SIZE)) /* Define how to find the value returned by a function. VALTYPE is the data type of the value (as a tree). If the precise function being called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0. */ #define FUNCTION_VALUE(VALTYPE, FUNC) \ ix86_function_value (VALTYPE) #define FUNCTION_VALUE_REGNO_P(N) \ ix86_function_value_regno_p (N) /* Define how to find the value returned by a library function assuming the value has mode MODE. */ #define LIBCALL_VALUE(MODE) \ ix86_libcall_value (MODE) /* Define the size of the result block used for communication between untyped_call and untyped_return. The block contains a DImode value followed by the block used by fnsave and frstor. */ #define APPLY_RESULT_SIZE (8+108) /* 1 if N is a possible register number for function argument passing. */ #define FUNCTION_ARG_REGNO_P(N) ix86_function_arg_regno_p (N) /* Define a data type for recording info about an argument list during the scan of that argument list. This data type should hold all necessary information about the function itself and about the args processed so far, enough to enable macros such as FUNCTION_ARG to determine where the next arg should go. */ typedef struct ix86_args { int words; /* # words passed so far */ int nregs; /* # registers available for passing */ int regno; /* next available register number */ int fastcall; /* fastcall calling convention is used */ int sse_words; /* # sse words passed so far */ int sse_nregs; /* # sse registers available for passing */ int warn_sse; /* True when we want to warn about SSE ABI. */ int warn_mmx; /* True when we want to warn about MMX ABI. */ int sse_regno; /* next available sse register number */ int mmx_words; /* # mmx words passed so far */ int mmx_nregs; /* # mmx registers available for passing */ int mmx_regno; /* next available mmx register number */ int maybe_vaarg; /* true for calls to possibly vardic fncts. */ } CUMULATIVE_ARGS; /* Initialize a variable CUM of type CUMULATIVE_ARGS for a call to a function whose data type is FNTYPE. For a library call, FNTYPE is 0. */ #define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \ init_cumulative_args (&(CUM), (FNTYPE), (LIBNAME), (FNDECL)) /* Update the data in CUM to advance over an argument of mode MODE and data type TYPE. (TYPE is null for libcalls where that information may not be available.) */ #define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \ function_arg_advance (&(CUM), (MODE), (TYPE), (NAMED)) /* Define where to put the arguments to a function. Value is zero to push the argument on the stack, or a hard register in which to store the argument. MODE is the argument's machine mode. TYPE is the data type of the argument (as a tree). This is null for libcalls where that information may not be available. CUM is a variable of type CUMULATIVE_ARGS which gives info about the preceding args and about the function being called. NAMED is nonzero if this argument is a named parameter (otherwise it is an extra parameter matching an ellipsis). */ #define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \ function_arg (&(CUM), (MODE), (TYPE), (NAMED)) /* For an arg passed partly in registers and partly in memory, this is the number of registers used. For args passed entirely in registers or entirely in memory, zero. */ #define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) 0 /* A C expression that indicates when an argument must be passed by reference. If nonzero for an argument, a copy of that argument is made in memory and a pointer to the argument is passed instead of the argument itself. The pointer is passed in whatever way is appropriate for passing a pointer to that type. */ #define FUNCTION_ARG_PASS_BY_REFERENCE(CUM, MODE, TYPE, NAMED) \ function_arg_pass_by_reference(&CUM, MODE, TYPE, NAMED) /* Implement `va_start' for varargs and stdarg. */ #define EXPAND_BUILTIN_VA_START(VALIST, NEXTARG) \ ix86_va_start (VALIST, NEXTARG) /* Implement `va_arg'. */ #define EXPAND_BUILTIN_VA_ARG(VALIST, TYPE) (abort (), NULL_RTX) #define TARGET_ASM_FILE_END ix86_file_end #define NEED_INDICATE_EXEC_STACK 0 /* Output assembler code to FILE to increment profiler label # LABELNO for profiling a function entry. */ #define FUNCTION_PROFILER(FILE, LABELNO) x86_function_profiler (FILE, LABELNO) #define MCOUNT_NAME "_mcount" #define PROFILE_COUNT_REGISTER "edx" /* EXIT_IGNORE_STACK should be nonzero if, when returning from a function, the stack pointer does not matter. The value is tested only in functions that have frame pointers. No definition is equivalent to always zero. */ /* Note on the 386 it might be more efficient not to define this since we have to restore it ourselves from the frame pointer, in order to use pop */ #define EXIT_IGNORE_STACK 1 /* Output assembler code for a block containing the constant parts of a trampoline, leaving space for the variable parts. */ /* On the 386, the trampoline contains two instructions: mov #STATIC,ecx jmp FUNCTION The trampoline is generated entirely at runtime. The operand of JMP is the address of FUNCTION relative to the instruction following the JMP (which is 5 bytes long). */ /* Length in units of the trampoline for entering a nested function. */ #define TRAMPOLINE_SIZE (TARGET_64BIT ? 23 : 10) /* Emit RTL insns to initialize the variable parts of a trampoline. FNADDR is an RTX for the address of the function's pure code. CXT is an RTX for the static chain value for the function. */ #define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \ x86_initialize_trampoline ((TRAMP), (FNADDR), (CXT)) /* Definitions for register eliminations. This is an array of structures. Each structure initializes one pair of eliminable registers. The "from" register number is given first, followed by "to". Eliminations of the same "from" register are listed in order of preference. There are two registers that can always be eliminated on the i386. The frame pointer and the arg pointer can be replaced by either the hard frame pointer or to the stack pointer, depending upon the circumstances. The hard frame pointer is not used before reload and so it is not eligible for elimination. */ #define ELIMINABLE_REGS \ {{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}} \ /* Given FROM and TO register numbers, say whether this elimination is allowed. Frame pointer elimination is automatically handled. All other eliminations are valid. */ #define CAN_ELIMINATE(FROM, TO) \ ((TO) == STACK_POINTER_REGNUM ? ! frame_pointer_needed : 1) /* Define the offset between two registers, one to be eliminated, and the other its replacement, at the start of a routine. */ #define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \ ((OFFSET) = ix86_initial_elimination_offset ((FROM), (TO))) /* Addressing modes, and classification of registers for them. */ /* Macros to check register numbers against specific register classes. */ /* These assume that REGNO is a hard or pseudo reg number. They give nonzero only if REGNO is a hard reg of the suitable class or a pseudo reg currently allocated to a suitable hard reg. Since they use reg_renumber, they are safe only once reg_renumber has been allocated, which happens in local-alloc.c. */ #define REGNO_OK_FOR_INDEX_P(REGNO) \ ((REGNO) < STACK_POINTER_REGNUM \ || (REGNO >= FIRST_REX_INT_REG \ && (REGNO) <= LAST_REX_INT_REG) \ || ((unsigned) reg_renumber[(REGNO)] >= FIRST_REX_INT_REG \ && (unsigned) reg_renumber[(REGNO)] <= LAST_REX_INT_REG) \ || (unsigned) reg_renumber[(REGNO)] < STACK_POINTER_REGNUM) #define REGNO_OK_FOR_BASE_P(REGNO) \ ((REGNO) <= STACK_POINTER_REGNUM \ || (REGNO) == ARG_POINTER_REGNUM \ || (REGNO) == FRAME_POINTER_REGNUM \ || (REGNO >= FIRST_REX_INT_REG \ && (REGNO) <= LAST_REX_INT_REG) \ || ((unsigned) reg_renumber[(REGNO)] >= FIRST_REX_INT_REG \ && (unsigned) reg_renumber[(REGNO)] <= LAST_REX_INT_REG) \ || (unsigned) reg_renumber[(REGNO)] <= STACK_POINTER_REGNUM) #define REGNO_OK_FOR_SIREG_P(REGNO) \ ((REGNO) == 4 || reg_renumber[(REGNO)] == 4) #define REGNO_OK_FOR_DIREG_P(REGNO) \ ((REGNO) == 5 || reg_renumber[(REGNO)] == 5) /* The macros REG_OK_FOR..._P assume that the arg is a REG rtx and check its validity for a certain class. We have two alternate definitions for each of them. The usual definition accepts all pseudo regs; the other rejects them unless they have been allocated suitable hard regs. The symbol REG_OK_STRICT causes the latter definition to be used. Most source files want to accept pseudo regs in the hope that they will get allocated to the class that the insn wants them to be in. Source files for reload pass need to be strict. After reload, it makes no difference, since pseudo regs have been eliminated by then. */ /* Non strict versions, pseudos are ok. */ #define REG_OK_FOR_INDEX_NONSTRICT_P(X) \ (REGNO (X) < STACK_POINTER_REGNUM \ || (REGNO (X) >= FIRST_REX_INT_REG \ && REGNO (X) <= LAST_REX_INT_REG) \ || REGNO (X) >= FIRST_PSEUDO_REGISTER) #define REG_OK_FOR_BASE_NONSTRICT_P(X) \ (REGNO (X) <= STACK_POINTER_REGNUM \ || REGNO (X) == ARG_POINTER_REGNUM \ || REGNO (X) == FRAME_POINTER_REGNUM \ || (REGNO (X) >= FIRST_REX_INT_REG \ && REGNO (X) <= LAST_REX_INT_REG) \ || REGNO (X) >= FIRST_PSEUDO_REGISTER) /* Strict versions, hard registers only */ #define REG_OK_FOR_INDEX_STRICT_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X)) #define REG_OK_FOR_BASE_STRICT_P(X) REGNO_OK_FOR_BASE_P (REGNO (X)) #ifndef REG_OK_STRICT #define REG_OK_FOR_INDEX_P(X) REG_OK_FOR_INDEX_NONSTRICT_P (X) #define REG_OK_FOR_BASE_P(X) REG_OK_FOR_BASE_NONSTRICT_P (X) #else #define REG_OK_FOR_INDEX_P(X) REG_OK_FOR_INDEX_STRICT_P (X) #define REG_OK_FOR_BASE_P(X) REG_OK_FOR_BASE_STRICT_P (X) #endif /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid memory address for an instruction. The MODE argument is the machine mode for the MEM expression that wants to use this address. The other macros defined here are used only in GO_IF_LEGITIMATE_ADDRESS, except for CONSTANT_ADDRESS_P which is usually machine-independent. See legitimize_pic_address in i386.c for details as to what constitutes a legitimate address when -fpic is used. */ #define MAX_REGS_PER_ADDRESS 2 #define CONSTANT_ADDRESS_P(X) constant_address_p (X) /* Nonzero if the constant value X is a legitimate general operand. It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE. */ #define LEGITIMATE_CONSTANT_P(X) legitimate_constant_p (X) #ifdef REG_OK_STRICT #define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \ do { \ if (legitimate_address_p ((MODE), (X), 1)) \ goto ADDR; \ } while (0) #else #define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \ do { \ if (legitimate_address_p ((MODE), (X), 0)) \ goto ADDR; \ } while (0) #endif /* If defined, a C expression to determine the base term of address X. This macro is used in only one place: `find_base_term' in alias.c. It is always safe for this macro to not be defined. It exists so that alias analysis can understand machine-dependent addresses. The typical use of this macro is to handle addresses containing a label_ref or symbol_ref within an UNSPEC. */ #define FIND_BASE_TERM(X) ix86_find_base_term (X) /* Try machine-dependent ways of modifying an illegitimate address to be legitimate. If we find one, return the new, valid address. This macro is used in only one place: `memory_address' in explow.c. OLDX is the address as it was before break_out_memory_refs was called. In some cases it is useful to look at this to decide what needs to be done. MODE and WIN are passed so that this macro can use GO_IF_LEGITIMATE_ADDRESS. It is always safe for this macro to do nothing. It exists to recognize opportunities to optimize the output. For the 80386, we handle X+REG by loading X into a register R and using R+REG. R will go in a general reg and indexing will be used. However, if REG is a broken-out memory address or multiplication, nothing needs to be done because REG can certainly go in a general reg. When -fpic is used, special handling is needed for symbolic references. See comments by legitimize_pic_address in i386.c for details. */ #define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \ do { \ (X) = legitimize_address ((X), (OLDX), (MODE)); \ if (memory_address_p ((MODE), (X))) \ goto WIN; \ } while (0) #define REWRITE_ADDRESS(X) rewrite_address (X) /* Nonzero if the constant value X is a legitimate general operand when generating PIC code. It is given that flag_pic is on and that X satisfies CONSTANT_P or is a CONST_DOUBLE. */ #define LEGITIMATE_PIC_OPERAND_P(X) legitimate_pic_operand_p (X) #define SYMBOLIC_CONST(X) \ (GET_CODE (X) == SYMBOL_REF \ || GET_CODE (X) == LABEL_REF \ || (GET_CODE (X) == CONST && symbolic_reference_mentioned_p (X))) /* Go to LABEL if ADDR (a legitimate address expression) has an effect that depends on the machine mode it is used for. On the 80386, only postdecrement and postincrement address depend thus (the amount of decrement or increment being the length of the operand). */ #define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR, LABEL) \ do { \ if (GET_CODE (ADDR) == POST_INC \ || GET_CODE (ADDR) == POST_DEC) \ goto LABEL; \ } while (0) /* Codes for all the SSE/MMX builtins. */ enum ix86_builtins { IX86_BUILTIN_ADDPS, IX86_BUILTIN_ADDSS, IX86_BUILTIN_DIVPS, IX86_BUILTIN_DIVSS, IX86_BUILTIN_MULPS, IX86_BUILTIN_MULSS, IX86_BUILTIN_SUBPS, IX86_BUILTIN_SUBSS, IX86_BUILTIN_CMPEQPS, IX86_BUILTIN_CMPLTPS, IX86_BUILTIN_CMPLEPS, IX86_BUILTIN_CMPGTPS, IX86_BUILTIN_CMPGEPS, IX86_BUILTIN_CMPNEQPS, IX86_BUILTIN_CMPNLTPS, IX86_BUILTIN_CMPNLEPS, IX86_BUILTIN_CMPNGTPS, IX86_BUILTIN_CMPNGEPS, IX86_BUILTIN_CMPORDPS, IX86_BUILTIN_CMPUNORDPS, IX86_BUILTIN_CMPNEPS, IX86_BUILTIN_CMPEQSS, IX86_BUILTIN_CMPLTSS, IX86_BUILTIN_CMPLESS, IX86_BUILTIN_CMPNEQSS, IX86_BUILTIN_CMPNLTSS, IX86_BUILTIN_CMPNLESS, IX86_BUILTIN_CMPORDSS, IX86_BUILTIN_CMPUNORDSS, IX86_BUILTIN_CMPNESS, IX86_BUILTIN_COMIEQSS, IX86_BUILTIN_COMILTSS, IX86_BUILTIN_COMILESS, IX86_BUILTIN_COMIGTSS, IX86_BUILTIN_COMIGESS, IX86_BUILTIN_COMINEQSS, IX86_BUILTIN_UCOMIEQSS, IX86_BUILTIN_UCOMILTSS, IX86_BUILTIN_UCOMILESS, IX86_BUILTIN_UCOMIGTSS, IX86_BUILTIN_UCOMIGESS, IX86_BUILTIN_UCOMINEQSS, IX86_BUILTIN_CVTPI2PS, IX86_BUILTIN_CVTPS2PI, IX86_BUILTIN_CVTSI2SS, IX86_BUILTIN_CVTSI642SS, IX86_BUILTIN_CVTSS2SI, IX86_BUILTIN_CVTSS2SI64, IX86_BUILTIN_CVTTPS2PI, IX86_BUILTIN_CVTTSS2SI, IX86_BUILTIN_CVTTSS2SI64, IX86_BUILTIN_MAXPS, IX86_BUILTIN_MAXSS, IX86_BUILTIN_MINPS, IX86_BUILTIN_MINSS, IX86_BUILTIN_LOADAPS, IX86_BUILTIN_LOADUPS, IX86_BUILTIN_STOREAPS, IX86_BUILTIN_STOREUPS, IX86_BUILTIN_LOADSS, IX86_BUILTIN_STORESS, IX86_BUILTIN_MOVSS, IX86_BUILTIN_MOVHLPS, IX86_BUILTIN_MOVLHPS, IX86_BUILTIN_LOADHPS, IX86_BUILTIN_LOADLPS, IX86_BUILTIN_STOREHPS, IX86_BUILTIN_STORELPS, IX86_BUILTIN_MASKMOVQ, IX86_BUILTIN_MOVMSKPS, IX86_BUILTIN_PMOVMSKB, IX86_BUILTIN_MOVNTPS, IX86_BUILTIN_MOVNTQ, IX86_BUILTIN_LOADDQA, IX86_BUILTIN_LOADDQU, IX86_BUILTIN_STOREDQA, IX86_BUILTIN_STOREDQU, IX86_BUILTIN_MOVQ, IX86_BUILTIN_LOADD, IX86_BUILTIN_STORED, IX86_BUILTIN_CLRTI, IX86_BUILTIN_PACKSSWB, IX86_BUILTIN_PACKSSDW, IX86_BUILTIN_PACKUSWB, IX86_BUILTIN_PADDB, IX86_BUILTIN_PADDW, IX86_BUILTIN_PADDD, IX86_BUILTIN_PADDQ, IX86_BUILTIN_PADDSB, IX86_BUILTIN_PADDSW, IX86_BUILTIN_PADDUSB, IX86_BUILTIN_PADDUSW, IX86_BUILTIN_PSUBB, IX86_BUILTIN_PSUBW, IX86_BUILTIN_PSUBD, IX86_BUILTIN_PSUBQ, IX86_BUILTIN_PSUBSB, IX86_BUILTIN_PSUBSW, IX86_BUILTIN_PSUBUSB, IX86_BUILTIN_PSUBUSW, IX86_BUILTIN_PAND, IX86_BUILTIN_PANDN, IX86_BUILTIN_POR, IX86_BUILTIN_PXOR, IX86_BUILTIN_PAVGB, IX86_BUILTIN_PAVGW, IX86_BUILTIN_PCMPEQB, IX86_BUILTIN_PCMPEQW, IX86_BUILTIN_PCMPEQD, IX86_BUILTIN_PCMPGTB, IX86_BUILTIN_PCMPGTW, IX86_BUILTIN_PCMPGTD, IX86_BUILTIN_PEXTRW, IX86_BUILTIN_PINSRW, IX86_BUILTIN_PMADDWD, IX86_BUILTIN_PMAXSW, IX86_BUILTIN_PMAXUB, IX86_BUILTIN_PMINSW, IX86_BUILTIN_PMINUB, IX86_BUILTIN_PMULHUW, IX86_BUILTIN_PMULHW, IX86_BUILTIN_PMULLW, IX86_BUILTIN_PSADBW, IX86_BUILTIN_PSHUFW, IX86_BUILTIN_PSLLW, IX86_BUILTIN_PSLLD, IX86_BUILTIN_PSLLQ, IX86_BUILTIN_PSRAW, IX86_BUILTIN_PSRAD, IX86_BUILTIN_PSRLW, IX86_BUILTIN_PSRLD, IX86_BUILTIN_PSRLQ, IX86_BUILTIN_PSLLWI, IX86_BUILTIN_PSLLDI, IX86_BUILTIN_PSLLQI, IX86_BUILTIN_PSRAWI, IX86_BUILTIN_PSRADI, IX86_BUILTIN_PSRLWI, IX86_BUILTIN_PSRLDI, IX86_BUILTIN_PSRLQI, IX86_BUILTIN_PUNPCKHBW, IX86_BUILTIN_PUNPCKHWD, IX86_BUILTIN_PUNPCKHDQ, IX86_BUILTIN_PUNPCKLBW, IX86_BUILTIN_PUNPCKLWD, IX86_BUILTIN_PUNPCKLDQ, IX86_BUILTIN_SHUFPS, IX86_BUILTIN_RCPPS, IX86_BUILTIN_RCPSS, IX86_BUILTIN_RSQRTPS, IX86_BUILTIN_RSQRTSS, IX86_BUILTIN_SQRTPS, IX86_BUILTIN_SQRTSS, IX86_BUILTIN_UNPCKHPS, IX86_BUILTIN_UNPCKLPS, IX86_BUILTIN_ANDPS, IX86_BUILTIN_ANDNPS, IX86_BUILTIN_ORPS, IX86_BUILTIN_XORPS, IX86_BUILTIN_EMMS, IX86_BUILTIN_LDMXCSR, IX86_BUILTIN_STMXCSR, IX86_BUILTIN_SFENCE, /* 3DNow! Original */ IX86_BUILTIN_FEMMS, IX86_BUILTIN_PAVGUSB, IX86_BUILTIN_PF2ID, IX86_BUILTIN_PFACC, IX86_BUILTIN_PFADD, IX86_BUILTIN_PFCMPEQ, IX86_BUILTIN_PFCMPGE, IX86_BUILTIN_PFCMPGT, IX86_BUILTIN_PFMAX, IX86_BUILTIN_PFMIN, IX86_BUILTIN_PFMUL, IX86_BUILTIN_PFRCP, IX86_BUILTIN_PFRCPIT1, IX86_BUILTIN_PFRCPIT2, IX86_BUILTIN_PFRSQIT1, IX86_BUILTIN_PFRSQRT, IX86_BUILTIN_PFSUB, IX86_BUILTIN_PFSUBR, IX86_BUILTIN_PI2FD, IX86_BUILTIN_PMULHRW, /* 3DNow! Athlon Extensions */ IX86_BUILTIN_PF2IW, IX86_BUILTIN_PFNACC, IX86_BUILTIN_PFPNACC, IX86_BUILTIN_PI2FW, IX86_BUILTIN_PSWAPDSI, IX86_BUILTIN_PSWAPDSF, IX86_BUILTIN_SSE_ZERO, IX86_BUILTIN_MMX_ZERO, /* SSE2 */ IX86_BUILTIN_ADDPD, IX86_BUILTIN_ADDSD, IX86_BUILTIN_DIVPD, IX86_BUILTIN_DIVSD, IX86_BUILTIN_MULPD, IX86_BUILTIN_MULSD, IX86_BUILTIN_SUBPD, IX86_BUILTIN_SUBSD, IX86_BUILTIN_CMPEQPD, IX86_BUILTIN_CMPLTPD, IX86_BUILTIN_CMPLEPD, IX86_BUILTIN_CMPGTPD, IX86_BUILTIN_CMPGEPD, IX86_BUILTIN_CMPNEQPD, IX86_BUILTIN_CMPNLTPD, IX86_BUILTIN_CMPNLEPD, IX86_BUILTIN_CMPNGTPD, IX86_BUILTIN_CMPNGEPD, IX86_BUILTIN_CMPORDPD, IX86_BUILTIN_CMPUNORDPD, IX86_BUILTIN_CMPNEPD, IX86_BUILTIN_CMPEQSD, IX86_BUILTIN_CMPLTSD, IX86_BUILTIN_CMPLESD, IX86_BUILTIN_CMPNEQSD, IX86_BUILTIN_CMPNLTSD, IX86_BUILTIN_CMPNLESD, IX86_BUILTIN_CMPORDSD, IX86_BUILTIN_CMPUNORDSD, IX86_BUILTIN_CMPNESD, IX86_BUILTIN_COMIEQSD, IX86_BUILTIN_COMILTSD, IX86_BUILTIN_COMILESD, IX86_BUILTIN_COMIGTSD, IX86_BUILTIN_COMIGESD, IX86_BUILTIN_COMINEQSD, IX86_BUILTIN_UCOMIEQSD, IX86_BUILTIN_UCOMILTSD, IX86_BUILTIN_UCOMILESD, IX86_BUILTIN_UCOMIGTSD, IX86_BUILTIN_UCOMIGESD, IX86_BUILTIN_UCOMINEQSD, IX86_BUILTIN_MAXPD, IX86_BUILTIN_MAXSD, IX86_BUILTIN_MINPD, IX86_BUILTIN_MINSD, IX86_BUILTIN_ANDPD, IX86_BUILTIN_ANDNPD, IX86_BUILTIN_ORPD, IX86_BUILTIN_XORPD, IX86_BUILTIN_SQRTPD, IX86_BUILTIN_SQRTSD, IX86_BUILTIN_UNPCKHPD, IX86_BUILTIN_UNPCKLPD, IX86_BUILTIN_SHUFPD, IX86_BUILTIN_LOADAPD, IX86_BUILTIN_LOADUPD, IX86_BUILTIN_STOREAPD, IX86_BUILTIN_STOREUPD, IX86_BUILTIN_LOADSD, IX86_BUILTIN_STORESD, IX86_BUILTIN_MOVSD, IX86_BUILTIN_LOADHPD, IX86_BUILTIN_LOADLPD, IX86_BUILTIN_STOREHPD, IX86_BUILTIN_STORELPD, IX86_BUILTIN_CVTDQ2PD, IX86_BUILTIN_CVTDQ2PS, IX86_BUILTIN_CVTPD2DQ, IX86_BUILTIN_CVTPD2PI, IX86_BUILTIN_CVTPD2PS, IX86_BUILTIN_CVTTPD2DQ, IX86_BUILTIN_CVTTPD2PI, IX86_BUILTIN_CVTPI2PD, IX86_BUILTIN_CVTSI2SD, IX86_BUILTIN_CVTSI642SD, IX86_BUILTIN_CVTSD2SI, IX86_BUILTIN_CVTSD2SI64, IX86_BUILTIN_CVTSD2SS, IX86_BUILTIN_CVTSS2SD, IX86_BUILTIN_CVTTSD2SI, IX86_BUILTIN_CVTTSD2SI64, IX86_BUILTIN_CVTPS2DQ, IX86_BUILTIN_CVTPS2PD, IX86_BUILTIN_CVTTPS2DQ, IX86_BUILTIN_MOVNTI, IX86_BUILTIN_MOVNTPD, IX86_BUILTIN_MOVNTDQ, IX86_BUILTIN_SETPD1, IX86_BUILTIN_SETPD, IX86_BUILTIN_CLRPD, IX86_BUILTIN_SETRPD, IX86_BUILTIN_LOADPD1, IX86_BUILTIN_LOADRPD, IX86_BUILTIN_STOREPD1, IX86_BUILTIN_STORERPD, /* SSE2 MMX */ IX86_BUILTIN_MASKMOVDQU, IX86_BUILTIN_MOVMSKPD, IX86_BUILTIN_PMOVMSKB128, IX86_BUILTIN_MOVQ2DQ, IX86_BUILTIN_MOVDQ2Q, IX86_BUILTIN_PACKSSWB128, IX86_BUILTIN_PACKSSDW128, IX86_BUILTIN_PACKUSWB128, IX86_BUILTIN_PADDB128, IX86_BUILTIN_PADDW128, IX86_BUILTIN_PADDD128, IX86_BUILTIN_PADDQ128, IX86_BUILTIN_PADDSB128, IX86_BUILTIN_PADDSW128, IX86_BUILTIN_PADDUSB128, IX86_BUILTIN_PADDUSW128, IX86_BUILTIN_PSUBB128, IX86_BUILTIN_PSUBW128, IX86_BUILTIN_PSUBD128, IX86_BUILTIN_PSUBQ128, IX86_BUILTIN_PSUBSB128, IX86_BUILTIN_PSUBSW128, IX86_BUILTIN_PSUBUSB128, IX86_BUILTIN_PSUBUSW128, IX86_BUILTIN_PAND128, IX86_BUILTIN_PANDN128, IX86_BUILTIN_POR128, IX86_BUILTIN_PXOR128, IX86_BUILTIN_PAVGB128, IX86_BUILTIN_PAVGW128, IX86_BUILTIN_PCMPEQB128, IX86_BUILTIN_PCMPEQW128, IX86_BUILTIN_PCMPEQD128, IX86_BUILTIN_PCMPGTB128, IX86_BUILTIN_PCMPGTW128, IX86_BUILTIN_PCMPGTD128, IX86_BUILTIN_PEXTRW128, IX86_BUILTIN_PINSRW128, IX86_BUILTIN_PMADDWD128, IX86_BUILTIN_PMAXSW128, IX86_BUILTIN_PMAXUB128, IX86_BUILTIN_PMINSW128, IX86_BUILTIN_PMINUB128, IX86_BUILTIN_PMULUDQ, IX86_BUILTIN_PMULUDQ128, IX86_BUILTIN_PMULHUW128, IX86_BUILTIN_PMULHW128, IX86_BUILTIN_PMULLW128, IX86_BUILTIN_PSADBW128, IX86_BUILTIN_PSHUFHW, IX86_BUILTIN_PSHUFLW, IX86_BUILTIN_PSHUFD, IX86_BUILTIN_PSLLW128, IX86_BUILTIN_PSLLD128, IX86_BUILTIN_PSLLQ128, IX86_BUILTIN_PSRAW128, IX86_BUILTIN_PSRAD128, IX86_BUILTIN_PSRLW128, IX86_BUILTIN_PSRLD128, IX86_BUILTIN_PSRLQ128, IX86_BUILTIN_PSLLDQI128, IX86_BUILTIN_PSLLWI128, IX86_BUILTIN_PSLLDI128, IX86_BUILTIN_PSLLQI128, IX86_BUILTIN_PSRAWI128, IX86_BUILTIN_PSRADI128, IX86_BUILTIN_PSRLDQI128, IX86_BUILTIN_PSRLWI128, IX86_BUILTIN_PSRLDI128, IX86_BUILTIN_PSRLQI128, IX86_BUILTIN_PUNPCKHBW128, IX86_BUILTIN_PUNPCKHWD128, IX86_BUILTIN_PUNPCKHDQ128, IX86_BUILTIN_PUNPCKHQDQ128, IX86_BUILTIN_PUNPCKLBW128, IX86_BUILTIN_PUNPCKLWD128, IX86_BUILTIN_PUNPCKLDQ128, IX86_BUILTIN_PUNPCKLQDQ128, IX86_BUILTIN_CLFLUSH, IX86_BUILTIN_MFENCE, IX86_BUILTIN_LFENCE, /* Prescott New Instructions. */ IX86_BUILTIN_ADDSUBPS, IX86_BUILTIN_HADDPS, IX86_BUILTIN_HSUBPS, IX86_BUILTIN_MOVSHDUP, IX86_BUILTIN_MOVSLDUP, IX86_BUILTIN_ADDSUBPD, IX86_BUILTIN_HADDPD, IX86_BUILTIN_HSUBPD, IX86_BUILTIN_LOADDDUP, IX86_BUILTIN_MOVDDUP, IX86_BUILTIN_LDDQU, IX86_BUILTIN_MONITOR, IX86_BUILTIN_MWAIT, IX86_BUILTIN_MAX }; /* Max number of args passed in registers. If this is more than 3, we will have problems with ebx (register #4), since it is a caller save register and is also used as the pic register in ELF. So for now, don't allow more than 3 registers to be passed in registers. */ #define REGPARM_MAX (TARGET_64BIT ? 6 : 3) #define SSE_REGPARM_MAX (TARGET_64BIT ? 8 : (TARGET_SSE ? 3 : 0)) #define MMX_REGPARM_MAX (TARGET_64BIT ? 0 : (TARGET_MMX ? 3 : 0)) /* Specify the machine mode that this machine uses for the index in the tablejump instruction. */ #define CASE_VECTOR_MODE (!TARGET_64BIT || flag_pic ? SImode : DImode) /* Define this as 1 if `char' should by default be signed; else as 0. */ #define DEFAULT_SIGNED_CHAR 1 /* Number of bytes moved into a data cache for a single prefetch operation. */ #define PREFETCH_BLOCK ix86_cost->prefetch_block /* Number of prefetch operations that can be done in parallel. */ #define SIMULTANEOUS_PREFETCHES ix86_cost->simultaneous_prefetches /* Max number of bytes we can move from memory to memory in one reasonably fast instruction. */ #define MOVE_MAX 16 /* MOVE_MAX_PIECES is the number of bytes at a time which we can move efficiently, as opposed to MOVE_MAX which is the maximum number of bytes we can move with a single instruction. */ #define MOVE_MAX_PIECES (TARGET_64BIT ? 8 : 4) /* If a memory-to-memory move would take MOVE_RATIO or more simple move-instruction pairs, we will do a movstr or libcall instead. Increasing the value will always make code faster, but eventually incurs high cost in increased code size. If you don't define this, a reasonable default is used. */ #define MOVE_RATIO (optimize_size ? 3 : ix86_cost->move_ratio) /* Define if shifts truncate the shift count which implies one can omit a sign-extension or zero-extension of a shift count. */ /* On i386, shifts do truncate the count. But bit opcodes don't. */ /* #define SHIFT_COUNT_TRUNCATED */ /* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits is done just by pretending it is already truncated. */ #define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1 /* A macro to update M and UNSIGNEDP when an object whose type is TYPE and which has the specified mode and signedness is to be stored in a register. This macro is only called when TYPE is a scalar type. On i386 it is sometimes useful to promote HImode and QImode quantities to SImode. The choice depends on target type. */ #define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \ do { \ if (((MODE) == HImode && TARGET_PROMOTE_HI_REGS) \ || ((MODE) == QImode && TARGET_PROMOTE_QI_REGS)) \ (MODE) = SImode; \ } while (0) /* Specify the machine mode that pointers have. After generation of rtl, the compiler makes no further distinction between pointers and any other objects of this machine mode. */ #define Pmode (TARGET_64BIT ? DImode : SImode) /* A function address in a call instruction is a byte address (for indexing purposes) so give the MEM rtx a byte's mode. */ #define FUNCTION_MODE QImode /* A C expression for the cost of moving data from a register in class FROM to one in class TO. The classes are expressed using the enumeration values such as `GENERAL_REGS'. A value of 2 is the default; other values are interpreted relative to that. It is not required that the cost always equal 2 when FROM is the same as TO; on some machines it is expensive to move between registers if they are not general registers. */ #define REGISTER_MOVE_COST(MODE, CLASS1, CLASS2) \ ix86_register_move_cost ((MODE), (CLASS1), (CLASS2)) /* A C expression for the cost of moving data of mode M between a register and memory. A value of 2 is the default; this cost is relative to those in `REGISTER_MOVE_COST'. If moving between registers and memory is more expensive than between two registers, you should define this macro to express the relative cost. */ #define MEMORY_MOVE_COST(MODE, CLASS, IN) \ ix86_memory_move_cost ((MODE), (CLASS), (IN)) /* A C expression for the cost of a branch instruction. A value of 1 is the default; other values are interpreted relative to that. */ #define BRANCH_COST ix86_branch_cost /* Define this macro as a C expression which is nonzero if accessing less than a word of memory (i.e. a `char' or a `short') is no faster than accessing a word of memory, i.e., if such access require more than one instruction or if there is no difference in cost between byte and (aligned) word loads. When this macro is not defined, the compiler will access a field by finding the smallest containing object; when it is defined, a fullword load will be used if alignment permits. Unless bytes accesses are faster than word accesses, using word accesses is preferable since it may eliminate subsequent memory access if subsequent accesses occur to other fields in the same word of the structure, but to different bytes. */ #define SLOW_BYTE_ACCESS 0 /* Nonzero if access to memory by shorts is slow and undesirable. */ #define SLOW_SHORT_ACCESS 0 /* Define this macro to be the value 1 if unaligned accesses have a cost many times greater than aligned accesses, for example if they are emulated in a trap handler. When this macro is nonzero, the compiler will act as if `STRICT_ALIGNMENT' were nonzero when generating code for block moves. This can cause significantly more instructions to be produced. Therefore, do not set this macro nonzero if unaligned accesses only add a cycle or two to the time for a memory access. If the value of this macro is always zero, it need not be defined. */ /* #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) 0 */ /* Define this macro if it is as good or better to call a constant function address than to call an address kept in a register. Desirable on the 386 because a CALL with a constant address is faster than one with a register address. */ #define NO_FUNCTION_CSE /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE, return the mode to be used for the comparison. For floating-point equality comparisons, CCFPEQmode should be used. VOIDmode should be used in all other cases. For integer comparisons against zero, reduce to CCNOmode or CCZmode if possible, to allow for more combinations. */ #define SELECT_CC_MODE(OP, X, Y) ix86_cc_mode ((OP), (X), (Y)) /* Return nonzero if MODE implies a floating point inequality can be reversed. */ #define REVERSIBLE_CC_MODE(MODE) 1 /* A C expression whose value is reversed condition code of the CODE for comparison done in CC_MODE mode. */ #define REVERSE_CONDITION(CODE, MODE) ix86_reverse_condition ((CODE), (MODE)) /* Control the assembler format that we output, to the extent this does not vary between assemblers. */ /* How to refer to registers in assembler output. This sequence is indexed by compiler's hard-register-number (see above). */ /* In order to refer to the first 8 regs as 32 bit regs prefix an "e" For non floating point regs, the following are the HImode names. For float regs, the stack top is sometimes referred to as "%st(0)" instead of just "%st". PRINT_OPERAND handles this with the "y" code. */ #define HI_REGISTER_NAMES \ {"ax","dx","cx","bx","si","di","bp","sp", \ "st","st(1)","st(2)","st(3)","st(4)","st(5)","st(6)","st(7)", \ "argp", "flags", "fpsr", "dirflag", "frame", \ "xmm0","xmm1","xmm2","xmm3","xmm4","xmm5","xmm6","xmm7", \ "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7" , \ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", \ "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"} #define REGISTER_NAMES HI_REGISTER_NAMES /* Table of additional register names to use in user input. */ #define ADDITIONAL_REGISTER_NAMES \ { { "eax", 0 }, { "edx", 1 }, { "ecx", 2 }, { "ebx", 3 }, \ { "esi", 4 }, { "edi", 5 }, { "ebp", 6 }, { "esp", 7 }, \ { "rax", 0 }, { "rdx", 1 }, { "rcx", 2 }, { "rbx", 3 }, \ { "rsi", 4 }, { "rdi", 5 }, { "rbp", 6 }, { "rsp", 7 }, \ { "al", 0 }, { "dl", 1 }, { "cl", 2 }, { "bl", 3 }, \ { "ah", 0 }, { "dh", 1 }, { "ch", 2 }, { "bh", 3 }, \ { "mm0", 8}, { "mm1", 9}, { "mm2", 10}, { "mm3", 11}, \ { "mm4", 12}, { "mm5", 13}, { "mm6", 14}, { "mm7", 15} } /* Note we are omitting these since currently I don't know how to get gcc to use these, since they want the same but different number as al, and ax. */ #define QI_REGISTER_NAMES \ {"al", "dl", "cl", "bl", "sil", "dil", "bpl", "spl",} /* These parallel the array above, and can be used to access bits 8:15 of regs 0 through 3. */ #define QI_HIGH_REGISTER_NAMES \ {"ah", "dh", "ch", "bh", } /* How to renumber registers for dbx and gdb. */ #define DBX_REGISTER_NUMBER(N) \ (TARGET_64BIT ? dbx64_register_map[(N)] : dbx_register_map[(N)]) extern int const dbx_register_map[FIRST_PSEUDO_REGISTER]; extern int const dbx64_register_map[FIRST_PSEUDO_REGISTER]; extern int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER]; /* Before the prologue, RA is at 0(%esp). */ #define INCOMING_RETURN_ADDR_RTX \ gen_rtx_MEM (VOIDmode, gen_rtx_REG (VOIDmode, STACK_POINTER_REGNUM)) /* After the prologue, RA is at -4(AP) in the current frame. */ #define RETURN_ADDR_RTX(COUNT, FRAME) \ ((COUNT) == 0 \ ? gen_rtx_MEM (Pmode, plus_constant (arg_pointer_rtx, -UNITS_PER_WORD)) \ : gen_rtx_MEM (Pmode, plus_constant (FRAME, UNITS_PER_WORD))) /* PC is dbx register 8; let's use that column for RA. */ #define DWARF_FRAME_RETURN_COLUMN (TARGET_64BIT ? 16 : 8) /* Before the prologue, the top of the frame is at 4(%esp). */ #define INCOMING_FRAME_SP_OFFSET UNITS_PER_WORD /* Describe how we implement __builtin_eh_return. */ #define EH_RETURN_DATA_REGNO(N) ((N) < 2 ? (N) : INVALID_REGNUM) #define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, 2) /* Select a format to encode pointers in exception handling data. CODE is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is true if the symbol may be affected by dynamic relocations. ??? All x86 object file formats are capable of representing this. After all, the relocation needed is the same as for the call insn. Whether or not a particular assembler allows us to enter such, I guess we'll have to see. */ #define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \ (flag_pic \ ? ((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | DW_EH_PE_sdata4\ : DW_EH_PE_absptr) /* This is how to output an insn to push a register on the stack. It need not be very fast code. */ #define ASM_OUTPUT_REG_PUSH(FILE, REGNO) \ do { \ if (TARGET_64BIT) \ asm_fprintf ((FILE), "\tpush{q}\t%%r%s\n", \ reg_names[(REGNO)] + (REX_INT_REGNO_P (REGNO) != 0)); \ else \ asm_fprintf ((FILE), "\tpush{l}\t%%e%s\n", reg_names[(REGNO)]); \ } while (0) /* This is how to output an insn to pop a register from the stack. It need not be very fast code. */ #define ASM_OUTPUT_REG_POP(FILE, REGNO) \ do { \ if (TARGET_64BIT) \ asm_fprintf ((FILE), "\tpop{q}\t%%r%s\n", \ reg_names[(REGNO)] + (REX_INT_REGNO_P (REGNO) != 0)); \ else \ asm_fprintf ((FILE), "\tpop{l}\t%%e%s\n", reg_names[(REGNO)]); \ } while (0) /* This is how to output an element of a case-vector that is absolute. */ #define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \ ix86_output_addr_vec_elt ((FILE), (VALUE)) /* This is how to output an element of a case-vector that is relative. */ #define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \ ix86_output_addr_diff_elt ((FILE), (VALUE), (REL)) /* Under some conditions we need jump tables in the text section, because the assembler cannot handle label differences between sections. */ #define JUMP_TABLES_IN_TEXT_SECTION \ (!TARGET_64BIT && flag_pic && !HAVE_AS_GOTOFF_IN_DATA) /* A C statement that outputs an address constant appropriate to for DWARF debugging. */ #define ASM_OUTPUT_DWARF_ADDR_CONST(FILE, X) \ i386_dwarf_output_addr_const ((FILE), (X)) /* Emit a dtp-relative reference to a TLS variable. */ #ifdef HAVE_AS_TLS #define ASM_OUTPUT_DWARF_DTPREL(FILE, SIZE, X) \ i386_output_dwarf_dtprel (FILE, SIZE, X) #endif /* Switch to init or fini section via SECTION_OP, emit a call to FUNC, and switch back. For x86 we do this only to save a few bytes that would otherwise be unused in the text section. */ #define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \ asm (SECTION_OP "\n\t" \ "call " USER_LABEL_PREFIX #FUNC "\n" \ TEXT_SECTION_ASM_OP); /* Print operand X (an rtx) in assembler syntax to file FILE. CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified. Effect of various CODE letters is described in i386.c near print_operand function. */ #define PRINT_OPERAND_PUNCT_VALID_P(CODE) \ ((CODE) == '*' || (CODE) == '+' || (CODE) == '&') #define PRINT_OPERAND(FILE, X, CODE) \ print_operand ((FILE), (X), (CODE)) #define PRINT_OPERAND_ADDRESS(FILE, ADDR) \ print_operand_address ((FILE), (ADDR)) #define OUTPUT_ADDR_CONST_EXTRA(FILE, X, FAIL) \ do { \ if (! output_addr_const_extra (FILE, (X))) \ goto FAIL; \ } while (0); /* a letter which is not needed by the normal asm syntax, which we can use for operand syntax in the extended asm */ #define ASM_OPERAND_LETTER '#' #define RET return "" #define AT_SP(MODE) (gen_rtx_MEM ((MODE), stack_pointer_rtx)) /* Define the codes that are matched by predicates in i386.c. */ #define PREDICATE_CODES \ {"x86_64_immediate_operand", {CONST_INT, SUBREG, REG, \ SYMBOL_REF, LABEL_REF, CONST}}, \ {"x86_64_nonmemory_operand", {CONST_INT, SUBREG, REG, \ SYMBOL_REF, LABEL_REF, CONST}}, \ {"x86_64_movabs_operand", {CONST_INT, SUBREG, REG, \ SYMBOL_REF, LABEL_REF, CONST}}, \ {"x86_64_szext_nonmemory_operand", {CONST_INT, SUBREG, REG, \ SYMBOL_REF, LABEL_REF, CONST}}, \ {"x86_64_general_operand", {CONST_INT, SUBREG, REG, MEM, \ SYMBOL_REF, LABEL_REF, CONST}}, \ {"x86_64_szext_general_operand", {CONST_INT, SUBREG, REG, MEM, \ SYMBOL_REF, LABEL_REF, CONST}}, \ {"x86_64_zext_immediate_operand", {CONST_INT, CONST_DOUBLE, CONST, \ SYMBOL_REF, LABEL_REF}}, \ {"shiftdi_operand", {SUBREG, REG, MEM}}, \ {"const_int_1_31_operand", {CONST_INT}}, \ {"symbolic_operand", {SYMBOL_REF, LABEL_REF, CONST}}, \ {"aligned_operand", {CONST_INT, CONST_DOUBLE, CONST, SYMBOL_REF, \ LABEL_REF, SUBREG, REG, MEM}}, \ {"pic_symbolic_operand", {CONST}}, \ {"call_insn_operand", {REG, SUBREG, MEM, SYMBOL_REF}}, \ {"sibcall_insn_operand", {REG, SUBREG, SYMBOL_REF}}, \ {"constant_call_address_operand", {SYMBOL_REF, CONST}}, \ {"const0_operand", {CONST_INT, CONST_DOUBLE}}, \ {"const1_operand", {CONST_INT}}, \ {"const248_operand", {CONST_INT}}, \ {"const_0_to_3_operand", {CONST_INT}}, \ {"const_0_to_7_operand", {CONST_INT}}, \ {"const_0_to_15_operand", {CONST_INT}}, \ {"const_0_to_255_operand", {CONST_INT}}, \ {"incdec_operand", {CONST_INT}}, \ {"mmx_reg_operand", {REG}}, \ {"reg_no_sp_operand", {SUBREG, REG}}, \ {"general_no_elim_operand", {CONST_INT, CONST_DOUBLE, CONST, \ SYMBOL_REF, LABEL_REF, SUBREG, REG, MEM}}, \ {"nonmemory_no_elim_operand", {CONST_INT, REG, SUBREG}}, \ {"index_register_operand", {SUBREG, REG}}, \ {"flags_reg_operand", {REG}}, \ {"q_regs_operand", {SUBREG, REG}}, \ {"non_q_regs_operand", {SUBREG, REG}}, \ {"fcmov_comparison_operator", {EQ, NE, LTU, GTU, LEU, GEU, UNORDERED, \ ORDERED, LT, UNLT, GT, UNGT, LE, UNLE, \ GE, UNGE, LTGT, UNEQ}}, \ {"sse_comparison_operator", {EQ, LT, LE, UNORDERED, NE, UNGE, UNGT, \ ORDERED, UNEQ, UNLT, UNLE, LTGT, GE, GT \ }}, \ {"ix86_comparison_operator", {EQ, NE, LE, LT, GE, GT, LEU, LTU, GEU, \ GTU, UNORDERED, ORDERED, UNLE, UNLT, \ UNGE, UNGT, LTGT, UNEQ }}, \ {"ix86_carry_flag_operator", {LTU, LT, UNLT, GT, UNGT, LE, UNLE, \ GE, UNGE, LTGT, UNEQ}}, \ {"cmp_fp_expander_operand", {CONST_DOUBLE, SUBREG, REG, MEM}}, \ {"ext_register_operand", {SUBREG, REG}}, \ {"binary_fp_operator", {PLUS, MINUS, MULT, DIV}}, \ {"mult_operator", {MULT}}, \ {"div_operator", {DIV}}, \ {"arith_or_logical_operator", {PLUS, MULT, AND, IOR, XOR, SMIN, SMAX, \ UMIN, UMAX, COMPARE, MINUS, DIV, MOD, \ UDIV, UMOD, ASHIFT, ROTATE, ASHIFTRT, \ LSHIFTRT, ROTATERT}}, \ {"promotable_binary_operator", {PLUS, MULT, AND, IOR, XOR, ASHIFT}}, \ {"memory_displacement_operand", {MEM}}, \ {"cmpsi_operand", {CONST_INT, CONST_DOUBLE, CONST, SYMBOL_REF, \ LABEL_REF, SUBREG, REG, MEM, AND}}, \ {"long_memory_operand", {MEM}}, \ {"tls_symbolic_operand", {SYMBOL_REF}}, \ {"global_dynamic_symbolic_operand", {SYMBOL_REF}}, \ {"local_dynamic_symbolic_operand", {SYMBOL_REF}}, \ {"initial_exec_symbolic_operand", {SYMBOL_REF}}, \ {"local_exec_symbolic_operand", {SYMBOL_REF}}, \ {"any_fp_register_operand", {REG}}, \ {"register_and_not_any_fp_reg_operand", {REG}}, \ {"fp_register_operand", {REG}}, \ {"register_and_not_fp_reg_operand", {REG}}, \ {"zero_extended_scalar_load_operand", {MEM}}, \ {"vector_move_operand", {CONST_VECTOR, SUBREG, REG, MEM}}, \ {"no_seg_address_operand", {CONST_INT, CONST_DOUBLE, CONST, SYMBOL_REF, \ LABEL_REF, SUBREG, REG, MEM, PLUS, MULT}}, /* A list of predicates that do special things with modes, and so should not elicit warnings for VOIDmode match_operand. */ #define SPECIAL_MODE_PREDICATES \ "ext_register_operand", /* Which processor to schedule for. The cpu attribute defines a list that mirrors this list, so changes to i386.md must be made at the same time. */ enum processor_type { PROCESSOR_I386, /* 80386 */ PROCESSOR_I486, /* 80486DX, 80486SX, 80486DX[24] */ PROCESSOR_PENTIUM, PROCESSOR_PENTIUMPRO, PROCESSOR_K6, PROCESSOR_ATHLON, PROCESSOR_PENTIUM4, PROCESSOR_K8, PROCESSOR_NOCONA, PROCESSOR_max }; extern enum processor_type ix86_tune; extern const char *ix86_tune_string; extern enum processor_type ix86_arch; extern const char *ix86_arch_string; enum fpmath_unit { FPMATH_387 = 1, FPMATH_SSE = 2 }; extern enum fpmath_unit ix86_fpmath; extern const char *ix86_fpmath_string; enum tls_dialect { TLS_DIALECT_GNU, TLS_DIALECT_SUN }; extern enum tls_dialect ix86_tls_dialect; extern const char *ix86_tls_dialect_string; enum cmodel { CM_32, /* The traditional 32-bit ABI. */ CM_SMALL, /* Assumes all code and data fits in the low 31 bits. */ CM_KERNEL, /* Assumes all code and data fits in the high 31 bits. */ CM_MEDIUM, /* Assumes code fits in the low 31 bits; data unlimited. */ CM_LARGE, /* No assumptions. */ CM_SMALL_PIC /* Assumes code+data+got/plt fits in a 31 bit region. */ }; extern enum cmodel ix86_cmodel; extern const char *ix86_cmodel_string; /* Size of the RED_ZONE area. */ #define RED_ZONE_SIZE 128 /* Reserved area of the red zone for temporaries. */ #define RED_ZONE_RESERVE 8 enum asm_dialect { ASM_ATT, ASM_INTEL }; extern const char *ix86_asm_string; extern enum asm_dialect ix86_asm_dialect; extern int ix86_regparm; extern const char *ix86_regparm_string; extern int ix86_preferred_stack_boundary; extern const char *ix86_preferred_stack_boundary_string; extern int ix86_branch_cost; extern const char *ix86_branch_cost_string; extern const char *ix86_debug_arg_string; extern const char *ix86_debug_addr_string; /* Obsoleted by -f options. Remove before 3.2 ships. */ extern const char *ix86_align_loops_string; extern const char *ix86_align_jumps_string; extern const char *ix86_align_funcs_string; /* Smallest class containing REGNO. */ extern enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER]; extern rtx ix86_compare_op0; /* operand 0 for comparisons */ extern rtx ix86_compare_op1; /* operand 1 for comparisons */ /* To properly truncate FP values into integers, we need to set i387 control word. We can't emit proper mode switching code before reload, as spills generated by reload may truncate values incorrectly, but we still can avoid redundant computation of new control word by the mode switching pass. The fldcw instructions are still emitted redundantly, but this is probably not going to be noticeable problem, as most CPUs do have fast path for the sequence. The machinery is to emit simple truncation instructions and split them before reload to instructions having USEs of two memory locations that are filled by this code to old and new control word. Post-reload pass may be later used to eliminate the redundant fildcw if needed. */ enum fp_cw_mode {FP_CW_STORED, FP_CW_UNINITIALIZED, FP_CW_ANY}; /* Define this macro if the port needs extra instructions inserted for mode switching in an optimizing compilation. */ #define OPTIMIZE_MODE_SWITCHING(ENTITY) ix86_optimize_mode_switching /* If you define `OPTIMIZE_MODE_SWITCHING', you have to define this as initializer for an array of integers. Each initializer element N refers to an entity that needs mode switching, and specifies the number of different modes that might need to be set for this entity. The position of the initializer in the initializer - starting counting at zero - determines the integer that is used to refer to the mode-switched entity in question. */ #define NUM_MODES_FOR_MODE_SWITCHING { FP_CW_ANY } /* ENTITY is an integer specifying a mode-switched entity. If `OPTIMIZE_MODE_SWITCHING' is defined, you must define this macro to return an integer value not larger than the corresponding element in `NUM_MODES_FOR_MODE_SWITCHING', to denote the mode that ENTITY must be switched into prior to the execution of INSN. */ #define MODE_NEEDED(ENTITY, I) \ (GET_CODE (I) == CALL_INSN \ || (GET_CODE (I) == INSN && (asm_noperands (PATTERN (I)) >= 0 \ || GET_CODE (PATTERN (I)) == ASM_INPUT))\ ? FP_CW_UNINITIALIZED \ : recog_memoized (I) < 0 || get_attr_type (I) != TYPE_FISTP \ ? FP_CW_ANY \ : FP_CW_STORED) /* This macro specifies the order in which modes for ENTITY are processed. 0 is the highest priority. */ #define MODE_PRIORITY_TO_MODE(ENTITY, N) (N) /* Generate one or more insns to set ENTITY to MODE. HARD_REG_LIVE is the set of hard registers live at the point where the insn(s) are to be inserted. */ #define EMIT_MODE_SET(ENTITY, MODE, HARD_REGS_LIVE) \ ((MODE) == FP_CW_STORED \ ? emit_i387_cw_initialization (assign_386_stack_local (HImode, 1), \ assign_386_stack_local (HImode, 2)), 0\ : 0) /* Avoid renaming of stack registers, as doing so in combination with scheduling just increases amount of live registers at time and in the turn amount of fxch instructions needed. ??? Maybe Pentium chips benefits from renaming, someone can try.... */ #define HARD_REGNO_RENAME_OK(SRC, TARGET) \ ((SRC) < FIRST_STACK_REG || (SRC) > LAST_STACK_REG) #define DLL_IMPORT_EXPORT_PREFIX '#' #define FASTCALL_PREFIX '@' struct machine_function GTY(()) { struct stack_local_entry *stack_locals; const char *some_ld_name; int save_varrargs_registers; int accesses_prev_frame; int optimize_mode_switching; /* Set by ix86_compute_frame_layout and used by prologue/epilogue expander to determine the style used. */ int use_fast_prologue_epilogue; /* Number of saved registers USE_FAST_PROLOGUE_EPILOGUE has been computed for. */ int use_fast_prologue_epilogue_nregs; }; #define ix86_stack_locals (cfun->machine->stack_locals) #define ix86_save_varrargs_registers (cfun->machine->save_varrargs_registers) #define ix86_optimize_mode_switching (cfun->machine->optimize_mode_switching) /* Control behavior of x86_file_start. */ #define X86_FILE_START_VERSION_DIRECTIVE false #define X86_FILE_START_FLTUSED false /* Local variables: version-control: t End: */ /* Definitions for Unix assembler syntax for the Intel 80386. Copyright (C) 1988, 1994, 1999, 2000, 2001, 2002 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file defines the aspects of assembler syntax that are the same for all the i386 Unix systems (though they may differ in non-Unix systems). */ /* Define macro used to output shift-double opcodes when the shift count is in %cl. Some assemblers require %cl as an argument; some don't. This macro controls what to do: by default, don't print %cl. */ #define SHIFT_DOUBLE_OMITS_COUNT 1 /* Define the syntax of pseudo-ops, labels and comments. */ /* String containing the assembler's comment-starter. */ #define ASM_COMMENT_START "/" /* Output to assembler file text saying following lines may contain character constants, extra white space, comments, etc. */ #define ASM_APP_ON "/APP\n" /* Output to assembler file text saying following lines no longer contain unusual constructs. */ #define ASM_APP_OFF "/NO_APP\n" /* Output before read-only data. */ #define TEXT_SECTION_ASM_OP "\t.text" /* Output before writable (initialized) data. */ #define DATA_SECTION_ASM_OP "\t.data" /* Output before writable (uninitialized) data. */ #define BSS_SECTION_ASM_OP "\t.bss" /* Globalizing directive for a label. */ #define GLOBAL_ASM_OP ".globl " /* By default, target has a 80387, uses IEEE compatible arithmetic, and returns float values in the 387. */ #define TARGET_SUBTARGET_DEFAULT (MASK_80387 | MASK_IEEE_FP | MASK_FLOAT_RETURNS) /* Definitions for AT&T assembler syntax for the Intel 80386. Copyright (C) 1988, 1996, 2000, 2001, 2002 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Define the syntax of instructions and addresses. */ /* Prefix for internally generated assembler labels. */ #define LPREFIX ".L" /* Assembler pseudos to introduce constants of various size. */ #define ASM_SHORT "\t.value\t" #define ASM_LONG "\t.long\t" #define ASM_QUAD "\t.quad\t" /* Should not be used for 32bit compilation. */ /* How to output an ASCII string constant. */ #define ASM_OUTPUT_ASCII(FILE, PTR, SIZE) \ do \ { size_t i = 0, limit = (SIZE); \ while (i < limit) \ { if (i%10 == 0) { if (i!=0) fprintf ((FILE), "\n"); \ fputs ("\t.byte\t", (FILE)); } \ else fprintf ((FILE), ","); \ fprintf ((FILE), "0x%x", ((PTR)[i++] & 0377)) ;} \ fprintf ((FILE), "\n"); \ } while (0) /* Output at beginning of assembler file. */ #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true /* This is how to output an assembler line that says to advance the location counter to a multiple of 2**LOG bytes. */ #define ASM_OUTPUT_ALIGN(FILE,LOG) \ if ((LOG)!=0) fprintf ((FILE), "\t.align %d\n", 1<<(LOG)) /* This is how to output an assembler line that says to advance the location counter by SIZE bytes. */ #define ASM_OUTPUT_SKIP(FILE,SIZE) \ fprintf ((FILE), "\t.set .,.+%u\n", (int)(SIZE)) /* Can't use ASM_OUTPUT_SKIP in text section; it doesn't leave 0s. */ #define ASM_NO_SKIP_IN_TEXT 1 /* Define the syntax of labels and symbol definitions/declarations. */ /* The prefix to add for compiler private assembler symbols. */ #undef LOCAL_LABEL_PREFIX #define LOCAL_LABEL_PREFIX "." /* This is how to store into the string BUF the symbol_ref name of an internal numbered label where PREFIX is the class of label and NUM is the number within the class. This is suitable for output with `assemble_name'. */ #undef ASM_GENERATE_INTERNAL_LABEL #define ASM_GENERATE_INTERNAL_LABEL(BUF,PREFIX,NUMBER) \ sprintf ((BUF), "%s%s%ld", LOCAL_LABEL_PREFIX, (PREFIX), (long)(NUMBER)) /* The prefix to add to user-visible assembler symbols. */ #undef USER_LABEL_PREFIX #define USER_LABEL_PREFIX "" /* Definitions needed when using stabs embedded in ELF sections. Copyright (C) 1999 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file may be included by any ELF target which wishes to support -gstabs generating stabs in sections, as produced by gas and understood by gdb. */ #ifndef GCC_DBX_ELF_H #define GCC_DBX_ELF_H /* Output DBX (stabs) debugging information if doing -gstabs. */ #define DBX_DEBUGGING_INFO 1 /* Make LBRAC and RBRAC addresses relative to the start of the function. The native Solaris stabs debugging format works this way, gdb expects it, and it reduces the number of relocation entries... */ #undef DBX_BLOCKS_FUNCTION_RELATIVE #define DBX_BLOCKS_FUNCTION_RELATIVE 1 /* ... but, to make this work, functions must appear prior to line info. */ #undef DBX_FUNCTION_FIRST #define DBX_FUNCTION_FIRST /* When generating stabs debugging, use N_BINCL entries. */ #undef DBX_USE_BINCL #define DBX_USE_BINCL /* There is no limit to the length of stabs strings. */ #ifndef DBX_CONTIN_LENGTH #define DBX_CONTIN_LENGTH 0 #endif /* Like block addresses, stabs line numbers are relative to the current function. */ #undef ASM_OUTPUT_SOURCE_LINE #define ASM_OUTPUT_SOURCE_LINE(FILE, LINE, COUNTER) \ do \ { \ char temp[256]; \ ASM_GENERATE_INTERNAL_LABEL (temp, "LM", COUNTER); \ fprintf (FILE, "\t.stabn 68,0,%d,", LINE); \ assemble_name (FILE, temp); \ putc ('-', FILE); \ assemble_name (FILE, \ XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));\ putc ('\n', FILE); \ (*targetm.asm_out.internal_label) (FILE, "LM", COUNTER); \ } \ while (0) /* Generate a blank trailing N_SO to mark the end of the .o file, since we can't depend upon the linker to mark .o file boundaries with embedded stabs. */ #undef DBX_OUTPUT_MAIN_SOURCE_FILE_END #define DBX_OUTPUT_MAIN_SOURCE_FILE_END(FILE, FILENAME) \ asm_fprintf (FILE, \ "\t.text\n\t.stabs \"\",%d,0,0,%LLetext\n%LLetext:\n", N_SO) #endif /* ! GCC_DBX_ELF_H */ /* elfos.h -- operating system specific defines to be used when targeting GCC for some generic ELF system Copyright (C) 1991, 1994, 1995, 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc. Based on svr4.h contributed by Ron Guilmette (rfg@netcom.com). This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define TARGET_OBJFMT_CPP_BUILTINS() \ do \ { \ builtin_define ("__ELF__"); \ } \ while (0) /* Define a symbol indicating that we are using elfos.h. Some CPU specific configuration files use this. */ #define USING_ELFOS_H /* The prefix to add to user-visible assembler symbols. For ELF systems the convention is *not* to prepend a leading underscore onto user-level symbol names. */ #undef USER_LABEL_PREFIX #define USER_LABEL_PREFIX "" /* Biggest alignment supported by the object file format of this machine. Use this macro to limit the alignment which can be specified using the `__attribute__ ((aligned (N)))' construct. If not defined, the default value is `BIGGEST_ALIGNMENT'. */ #ifndef MAX_OFILE_ALIGNMENT #define MAX_OFILE_ALIGNMENT (32768 * 8) #endif /* Use periods rather than dollar signs in special g++ assembler names. */ #define NO_DOLLAR_IN_LABEL /* Writing `int' for a bit-field forces int alignment for the structure. */ #ifndef PCC_BITFIELD_TYPE_MATTERS #define PCC_BITFIELD_TYPE_MATTERS 1 #endif /* Handle #pragma weak and #pragma pack. */ #define HANDLE_SYSV_PRAGMA 1 /* All ELF targets can support DWARF-2. */ #define DWARF2_DEBUGGING_INFO 1 /* The GNU tools operate better with dwarf2, and it is required by some psABI's. Since we don't have any native tools to be compatible with, default to dwarf2. */ #ifndef PREFERRED_DEBUGGING_TYPE #define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG #endif /* All SVR4 targets use the ELF object file format. */ #define OBJECT_FORMAT_ELF /* Output #ident as a .ident. */ #define ASM_OUTPUT_IDENT(FILE, NAME) \ fprintf (FILE, "%s\"%s\"\n", IDENT_ASM_OP, NAME); #define IDENT_ASM_OP "\t.ident\t" #undef SET_ASM_OP #define SET_ASM_OP "\t.set\t" /* Most svr4 assemblers want a .file directive at the beginning of their input file. */ #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true /* This is how to allocate empty space in some section. The .zero pseudo-op is used for this on most svr4 assemblers. */ #define SKIP_ASM_OP "\t.zero\t" #undef ASM_OUTPUT_SKIP #define ASM_OUTPUT_SKIP(FILE, SIZE) \ fprintf ((FILE), "%s"HOST_WIDE_INT_PRINT_UNSIGNED"\n",\ SKIP_ASM_OP, (SIZE)) /* This is how to store into the string LABEL the symbol_ref name of an internal numbered label where PREFIX is the class of label and NUM is the number within the class. This is suitable for output with `assemble_name'. For most svr4 systems, the convention is that any symbol which begins with a period is not put into the linker symbol table by the assembler. */ #undef ASM_GENERATE_INTERNAL_LABEL #define ASM_GENERATE_INTERNAL_LABEL(LABEL, PREFIX, NUM) \ do \ { \ sprintf (LABEL, "*.%s%u", PREFIX, (unsigned) (NUM)); \ } \ while (0) /* Output the label which precedes a jumptable. Note that for all svr4 systems where we actually generate jumptables (which is to say every svr4 target except i386, where we use casesi instead) we put the jump- tables into the .rodata section and since other stuff could have been put into the .rodata section prior to any given jumptable, we have to make sure that the location counter for the .rodata section gets pro- perly re-aligned prior to the actual beginning of the jump table. */ #undef ALIGN_ASM_OP #define ALIGN_ASM_OP "\t.align\t" #ifndef ASM_OUTPUT_BEFORE_CASE_LABEL #define ASM_OUTPUT_BEFORE_CASE_LABEL(FILE, PREFIX, NUM, TABLE) \ ASM_OUTPUT_ALIGN ((FILE), 2); #endif #undef ASM_OUTPUT_CASE_LABEL #define ASM_OUTPUT_CASE_LABEL(FILE, PREFIX, NUM, JUMPTABLE) \ do \ { \ ASM_OUTPUT_BEFORE_CASE_LABEL (FILE, PREFIX, NUM, JUMPTABLE) \ (*targetm.asm_out.internal_label) (FILE, PREFIX, NUM); \ } \ while (0) /* The standard SVR4 assembler seems to require that certain builtin library routines (e.g. .udiv) be explicitly declared as .globl in each assembly file where they are referenced. */ #define ASM_OUTPUT_EXTERNAL_LIBCALL(FILE, FUN) \ (*targetm.asm_out.globalize_label) (FILE, XSTR (FUN, 0)) /* This says how to output assembler code to declare an uninitialized external linkage data object. Under SVR4, the linker seems to want the alignment of data objects to depend on their types. We do exactly that here. */ #define COMMON_ASM_OP "\t.comm\t" #undef ASM_OUTPUT_ALIGNED_COMMON #define ASM_OUTPUT_ALIGNED_COMMON(FILE, NAME, SIZE, ALIGN) \ do \ { \ fprintf ((FILE), "%s", COMMON_ASM_OP); \ assemble_name ((FILE), (NAME)); \ fprintf ((FILE), ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n", \ (SIZE), (ALIGN) / BITS_PER_UNIT); \ } \ while (0) /* This says how to output assembler code to declare an uninitialized internal linkage data object. Under SVR4, the linker seems to want the alignment of data objects to depend on their types. We do exactly that here. */ #define LOCAL_ASM_OP "\t.local\t" #undef ASM_OUTPUT_ALIGNED_LOCAL #define ASM_OUTPUT_ALIGNED_LOCAL(FILE, NAME, SIZE, ALIGN) \ do \ { \ fprintf ((FILE), "%s", LOCAL_ASM_OP); \ assemble_name ((FILE), (NAME)); \ fprintf ((FILE), "\n"); \ ASM_OUTPUT_ALIGNED_COMMON (FILE, NAME, SIZE, ALIGN); \ } \ while (0) /* This is the pseudo-op used to generate a contiguous sequence of byte values from a double-quoted string WITHOUT HAVING A TERMINATING NUL AUTOMATICALLY APPENDED. This is the same for most svr4 assemblers. */ #undef ASCII_DATA_ASM_OP #define ASCII_DATA_ASM_OP "\t.ascii\t" /* Support a read-only data section. */ #define READONLY_DATA_SECTION_ASM_OP "\t.section\t.rodata" /* On svr4, we *do* have support for the .init and .fini sections, and we can put stuff in there to be executed before and after `main'. We let crtstuff.c and other files know this by defining the following symbols. The definitions say how to change sections to the .init and .fini sections. This is the same for all known svr4 assemblers. */ #define INIT_SECTION_ASM_OP "\t.section\t.init" #define FINI_SECTION_ASM_OP "\t.section\t.fini" /* Output assembly directive to move to the beginning of current section. */ #ifdef HAVE_GAS_SUBSECTION_ORDERING # define ASM_SECTION_START_OP "\t.subsection\t-1" # define ASM_OUTPUT_SECTION_START(FILE) \ fprintf ((FILE), "%s\n", ASM_SECTION_START_OP) #endif #define MAKE_DECL_ONE_ONLY(DECL) (DECL_WEAK (DECL) = 1) /* Switch into a generic section. */ #define TARGET_ASM_NAMED_SECTION default_elf_asm_named_section #undef TARGET_ASM_SELECT_RTX_SECTION #define TARGET_ASM_SELECT_RTX_SECTION default_elf_select_rtx_section #undef TARGET_ASM_SELECT_SECTION #define TARGET_ASM_SELECT_SECTION default_elf_select_section /* Define the strings used for the special svr4 .type and .size directives. These strings generally do not vary from one system running svr4 to another, but if a given system (e.g. m88k running svr) needs to use different pseudo-op names for these, they may be overridden in the file which includes this one. */ #define TYPE_ASM_OP "\t.type\t" #define SIZE_ASM_OP "\t.size\t" /* This is how we tell the assembler that a symbol is weak. */ #define ASM_WEAKEN_LABEL(FILE, NAME) \ do \ { \ fputs ("\t.weak\t", (FILE)); \ assemble_name ((FILE), (NAME)); \ fputc ('\n', (FILE)); \ } \ while (0) /* The following macro defines the format used to output the second operand of the .type assembler directive. Different svr4 assemblers expect various different forms for this operand. The one given here is just a default. You may need to override it in your machine- specific tm.h file (depending upon the particulars of your assembler). */ #define TYPE_OPERAND_FMT "@%s" /* Write the extra assembler code needed to declare a function's result. Most svr4 assemblers don't require any special declaration of the result value, but there are exceptions. */ #ifndef ASM_DECLARE_RESULT #define ASM_DECLARE_RESULT(FILE, RESULT) #endif /* These macros generate the special .type and .size directives which are used to set the corresponding fields of the linker symbol table entries in an ELF object file under SVR4. These macros also output the starting labels for the relevant functions/objects. */ /* Write the extra assembler code needed to declare a function properly. Some svr4 assemblers need to also have something extra said about the function's return value. We allow for that here. */ #ifndef ASM_DECLARE_FUNCTION_NAME #define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \ do \ { \ ASM_OUTPUT_TYPE_DIRECTIVE (FILE, NAME, "function"); \ ASM_DECLARE_RESULT (FILE, DECL_RESULT (DECL)); \ ASM_OUTPUT_LABEL (FILE, NAME); \ } \ while (0) #endif /* Write the extra assembler code needed to declare an object properly. */ #define ASM_DECLARE_OBJECT_NAME(FILE, NAME, DECL) \ do \ { \ HOST_WIDE_INT size; \ \ ASM_OUTPUT_TYPE_DIRECTIVE (FILE, NAME, "object"); \ \ size_directive_output = 0; \ if (!flag_inhibit_size_directive \ && (DECL) && DECL_SIZE (DECL)) \ { \ size_directive_output = 1; \ size = int_size_in_bytes (TREE_TYPE (DECL)); \ ASM_OUTPUT_SIZE_DIRECTIVE (FILE, NAME, size); \ } \ \ ASM_OUTPUT_LABEL (FILE, NAME); \ } \ while (0) /* Output the size directive for a decl in rest_of_decl_compilation in the case where we did not do so before the initializer. Once we find the error_mark_node, we know that the value of size_directive_output was set by ASM_DECLARE_OBJECT_NAME when it was run for the same decl. */ #undef ASM_FINISH_DECLARE_OBJECT #define ASM_FINISH_DECLARE_OBJECT(FILE, DECL, TOP_LEVEL, AT_END)\ do \ { \ const char *name = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \ HOST_WIDE_INT size; \ \ if (!flag_inhibit_size_directive \ && DECL_SIZE (DECL) \ && ! AT_END && TOP_LEVEL \ && DECL_INITIAL (DECL) == error_mark_node \ && !size_directive_output) \ { \ size_directive_output = 1; \ size = int_size_in_bytes (TREE_TYPE (DECL)); \ ASM_OUTPUT_SIZE_DIRECTIVE (FILE, name, size); \ } \ } \ while (0) /* This is how to declare the size of a function. */ #ifndef ASM_DECLARE_FUNCTION_SIZE #define ASM_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \ do \ { \ if (!flag_inhibit_size_directive) \ ASM_OUTPUT_MEASURED_SIZE (FILE, FNAME); \ } \ while (0) #endif /* A table of bytes codes used by the ASM_OUTPUT_ASCII and ASM_OUTPUT_LIMITED_STRING macros. Each byte in the table corresponds to a particular byte value [0..255]. For any given byte value, if the value in the corresponding table position is zero, the given character can be output directly. If the table value is 1, the byte must be output as a \ooo octal escape. If the tables value is anything else, then the byte value should be output as a \ followed by the value in the table. Note that we can use standard UN*X escape sequences for many control characters, but we don't use \a to represent BEL because some svr4 assemblers (e.g. on the i386) don't know about that. Also, we don't use \v since some versions of gas, such as 2.2 did not accept it. */ #define ESCAPES \ "\1\1\1\1\1\1\1\1btn\1fr\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\ \0\0\"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\ \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\\\0\0\0\ \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\1\ \1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\ \1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\ \1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\ \1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1" /* Some svr4 assemblers have a limit on the number of characters which can appear in the operand of a .string directive. If your assembler has such a limitation, you should define STRING_LIMIT to reflect that limit. Note that at least some svr4 assemblers have a limit on the actual number of bytes in the double-quoted string, and that they count each character in an escape sequence as one byte. Thus, an escape sequence like \377 would count as four bytes. If your target assembler doesn't support the .string directive, you should define this to zero. */ #define STRING_LIMIT ((unsigned) 256) #define STRING_ASM_OP "\t.string\t" /* The routine used to output NUL terminated strings. We use a special version of this for most svr4 targets because doing so makes the generated assembly code more compact (and thus faster to assemble) as well as more readable, especially for targets like the i386 (where the only alternative is to output character sequences as comma separated lists of numbers). */ #define ASM_OUTPUT_LIMITED_STRING(FILE, STR) \ do \ { \ register const unsigned char *_limited_str = \ (const unsigned char *) (STR); \ register unsigned ch; \ \ fprintf ((FILE), "%s\"", STRING_ASM_OP); \ \ for (; (ch = *_limited_str); _limited_str++) \ { \ register int escape; \ \ switch (escape = ESCAPES[ch]) \ { \ case 0: \ putc (ch, (FILE)); \ break; \ case 1: \ fprintf ((FILE), "\\%03o", ch); \ break; \ default: \ putc ('\\', (FILE)); \ putc (escape, (FILE)); \ break; \ } \ } \ \ fprintf ((FILE), "\"\n"); \ } \ while (0) /* The routine used to output sequences of byte values. We use a special version of this for most svr4 targets because doing so makes the generated assembly code more compact (and thus faster to assemble) as well as more readable. Note that if we find subparts of the character sequence which end with NUL (and which are shorter than STRING_LIMIT) we output those using ASM_OUTPUT_LIMITED_STRING. */ #undef ASM_OUTPUT_ASCII #define ASM_OUTPUT_ASCII(FILE, STR, LENGTH) \ do \ { \ register const unsigned char *_ascii_bytes = \ (const unsigned char *) (STR); \ register const unsigned char *limit = _ascii_bytes + (LENGTH); \ register unsigned bytes_in_chunk = 0; \ \ for (; _ascii_bytes < limit; _ascii_bytes++) \ { \ register const unsigned char *p; \ \ if (bytes_in_chunk >= 60) \ { \ fprintf ((FILE), "\"\n"); \ bytes_in_chunk = 0; \ } \ \ for (p = _ascii_bytes; p < limit && *p != '\0'; p++) \ continue; \ \ if (p < limit && (p - _ascii_bytes) <= (long)STRING_LIMIT) \ { \ if (bytes_in_chunk > 0) \ { \ fprintf ((FILE), "\"\n"); \ bytes_in_chunk = 0; \ } \ \ ASM_OUTPUT_LIMITED_STRING ((FILE), _ascii_bytes); \ _ascii_bytes = p; \ } \ else \ { \ register int escape; \ register unsigned ch; \ \ if (bytes_in_chunk == 0) \ fprintf ((FILE), "%s\"", ASCII_DATA_ASM_OP); \ \ switch (escape = ESCAPES[ch = *_ascii_bytes]) \ { \ case 0: \ putc (ch, (FILE)); \ bytes_in_chunk++; \ break; \ case 1: \ fprintf ((FILE), "\\%03o", ch); \ bytes_in_chunk += 4; \ break; \ default: \ putc ('\\', (FILE)); \ putc (escape, (FILE)); \ bytes_in_chunk += 2; \ break; \ } \ } \ } \ \ if (bytes_in_chunk > 0) \ fprintf ((FILE), "\"\n"); \ } \ while (0) /* Operating system specific defines to be used when targeting GCC for some generic System V Release 4 system. Copyright (C) 1991, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001 Free Software Foundation, Inc. Contributed by Ron Guilmette (rfg@monkeys.com). This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. To use this file, make up a line like that in config.gcc: tm_file="$tm_file elfos.h svr4.h MACHINE/svr4.h" where MACHINE is replaced by the name of the basic hardware that you are targeting for. Then, in the file MACHINE/svr4.h, put any really system-specific defines (or overrides of defines) which you find that you need. */ /* Define a symbol indicating that we are using svr4.h. */ #define USING_SVR4_H /* Cpp, assembler, linker, library, and startfile spec's. */ /* This defines which switch letters take arguments. On svr4, most of the normal cases (defined in gcc.c) apply, and we also have -h* and -z* options (for the linker). Note however that there is no such thing as a -T option for svr4. */ #undef SWITCH_TAKES_ARG #define SWITCH_TAKES_ARG(CHAR) \ (DEFAULT_SWITCH_TAKES_ARG (CHAR) \ || (CHAR) == 'h' \ || (CHAR) == 'x' \ || (CHAR) == 'z') /* This defines which multi-letter switches take arguments. On svr4, there are no such switches except those implemented by GCC itself. */ #define WORD_SWITCH_TAKES_ARG(STR) \ (DEFAULT_WORD_SWITCH_TAKES_ARG (STR) \ && strcmp (STR, "Tdata") && strcmp (STR, "Ttext") \ && strcmp (STR, "Tbss")) /* Provide an ASM_SPEC appropriate for svr4. Here we try to support as many of the specialized svr4 assembler options as seems reasonable, given that there are certain options which we can't (or shouldn't) support directly due to the fact that they conflict with other options for other svr4 tools (e.g. ld) or with other options for GCC itself. For example, we don't support the -o (output file) or -R (remove input file) options because GCC already handles these things. We also don't support the -m (run m4) option for the assembler because that conflicts with the -m (produce load map) option of the svr4 linker. We do however allow passing arbitrary options to the svr4 assembler via the -Wa, option. Note that gcc doesn't allow a space to follow -Y in a -Ym,* or -Yd,* option. The svr4 assembler wants '-' on the command line if it's expected to read its stdin. */ #undef ASM_SPEC #define ASM_SPEC \ "%{v:-V} %{Qy:} %{!Qn:-Qy} %{n} %{T} %{Ym,*} %{Yd,*} %{Wa,*:%*}" #define AS_NEEDS_DASH_FOR_PIPED_INPUT /* Under svr4, the normal location of the `ld' and `as' programs is the /usr/ccs/bin directory. */ #ifndef CROSS_COMPILE #undef MD_EXEC_PREFIX #define MD_EXEC_PREFIX "/usr/ccs/bin/" #endif /* Under svr4, the normal location of the various *crt*.o files is the /usr/ccs/lib directory. */ #ifndef CROSS_COMPILE #undef MD_STARTFILE_PREFIX #define MD_STARTFILE_PREFIX "/usr/ccs/lib/" #endif /* Provide a LIB_SPEC appropriate for svr4. Here we tack on the default standard C library (unless we are building a shared library). */ #undef LIB_SPEC #define LIB_SPEC "%{!shared:%{!symbolic:-lc}}" /* Provide an ENDFILE_SPEC appropriate for svr4. Here we tack on our own magical crtend.o file (see crtstuff.c) which provides part of the support for getting C++ file-scope static object constructed before entering `main', followed by the normal svr3/svr4 "finalizer" file, which is either `gcrtn.o' or `crtn.o'. */ #undef ENDFILE_SPEC #define ENDFILE_SPEC "crtend.o%s %{pg:gcrtn.o%s}%{!pg:crtn.o%s}" /* Provide a LINK_SPEC appropriate for svr4. Here we provide support for the special GCC options -static, -shared, and -symbolic which allow us to link things in one of these three modes by applying the appropriate combinations of options at link-time. We also provide support here for as many of the other svr4 linker options as seems reasonable, given that some of them conflict with options for other svr4 tools (e.g. the assembler). In particular, we do support the -z*, -V, -b, -t, -Qy, -Qn, and -YP* options here, and the -e*, -l*, -o*, -r, -s, -u*, and -L* options are directly supported by gcc.c itself. We don't directly support the -m (generate load map) option because that conflicts with the -m (run m4) option of the svr4 assembler. We also don't directly support the svr4 linker's -I* or -M* options because these conflict with existing GCC options. We do however allow passing arbitrary options to the svr4 linker via the -Wl, option, in gcc.c. We don't support the svr4 linker's -a option at all because it is totally useless and because it conflicts with GCC's own -a option. Note that gcc doesn't allow a space to follow -Y in a -YP,* option. When the -G link option is used (-shared and -symbolic) a final link is not being done. */ #undef LINK_SPEC #ifdef CROSS_COMPILE #define LINK_SPEC "%{h*} %{v:-V} \ %{b} \ %{static:-dn -Bstatic} \ %{shared:-G -dy -z text} \ %{symbolic:-Bsymbolic -G -dy -z text} \ %{G:-G} \ %{YP,*} \ %{Qy:} %{!Qn:-Qy}" #else #define LINK_SPEC "%{h*} %{v:-V} \ %{b} \ %{static:-dn -Bstatic} \ %{shared:-G -dy -z text} \ %{symbolic:-Bsymbolic -G -dy -z text} \ %{G:-G} \ %{YP,*} \ %{!YP,*:%{p:-Y P,/usr/ccs/lib/libp:/usr/lib/libp:/usr/ccs/lib:/usr/lib} \ %{!p:-Y P,/usr/ccs/lib:/usr/lib}} \ %{Qy:} %{!Qn:-Qy}" #endif /* Gcc automatically adds in one of the files /usr/ccs/lib/values-Xc.o or /usr/ccs/lib/values-Xa.o for each final link step (depending upon the other gcc options selected, such as -ansi). These files each contain one (initialized) copy of a special variable called `_lib_version'. Each one of these files has `_lib_version' initialized to a different (enum) value. The SVR4 library routines query the value of `_lib_version' at run to decide how they should behave. Specifically, they decide (based upon the value of `_lib_version') if they will act in a strictly ANSI conforming manner or not. */ #undef STARTFILE_SPEC #define STARTFILE_SPEC "%{!shared: \ %{!symbolic: \ %{pg:gcrt1.o%s}%{!pg:%{p:mcrt1.o%s}%{!p:crt1.o%s}}}}\ %{pg:gcrti.o%s}%{!pg:crti.o%s} \ %{ansi:values-Xc.o%s} \ %{!ansi:values-Xa.o%s} \ crtbegin.o%s" /* The numbers used to denote specific machine registers in the System V Release 4 DWARF debugging information are quite likely to be totally different from the numbers used in BSD stabs debugging information for the same kind of target machine. Thus, we undefine the macro DBX_REGISTER_NUMBER here as an extra inducement to get people to provide proper machine-specific definitions of DBX_REGISTER_NUMBER (which is also used to provide DWARF registers numbers in dwarfout.c) in their tm.h files which include this file. */ #undef DBX_REGISTER_NUMBER /* Define the actual types of some ANSI-mandated types. (These definitions should work for most SVR4 systems). */ #undef SIZE_TYPE #define SIZE_TYPE "unsigned int" #undef PTRDIFF_TYPE #define PTRDIFF_TYPE "int" #undef WCHAR_TYPE #define WCHAR_TYPE "long int" #undef WCHAR_TYPE_SIZE #define WCHAR_TYPE_SIZE BITS_PER_WORD #define TARGET_HAS_F_SETLKW /* Definitions for Linux-based GNU systems with ELF format Copyright (C) 1995, 1996, 1997, 1998, 1999, 2000, 2003, 2004 Free Software Foundation, Inc. Contributed by Eric Youngdale. Modified for stabs-in-ELF by H.J. Lu (hjl@lucon.org). This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Don't assume anything about the header files. */ #define NO_IMPLICIT_EXTERN_C #undef ASM_APP_ON #define ASM_APP_ON "#APP\n" #undef ASM_APP_OFF #define ASM_APP_OFF "#NO_APP\n" #undef MD_EXEC_PREFIX #undef MD_STARTFILE_PREFIX /* Provide a STARTFILE_SPEC appropriate for GNU/Linux. Here we add the GNU/Linux magical crtbegin.o file (see crtstuff.c) which provides part of the support for getting C++ file-scope static object constructed before entering `main'. */ #undef STARTFILE_SPEC #ifdef USE_GNULIBC_1 #define STARTFILE_SPEC \ "%{!shared: \ %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s} \ %{!p:%{profile:gcrt1.o%s} \ %{!profile:crt1.o%s}}}} \ crti.o%s %{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}" #elif defined HAVE_LD_PIE #define STARTFILE_SPEC \ "%{!shared: %{pg|p|profile:gcrt1.o%s;pie:Scrt1.o%s;:crt1.o%s}} \ crti.o%s %{static:crtbeginT.o%s;shared|pie:crtbeginS.o%s;:crtbegin.o%s}" #else #define STARTFILE_SPEC \ "%{!shared: %{pg|p|profile:gcrt1.o%s;:crt1.o%s}} \ crti.o%s %{static:crtbeginT.o%s;shared|pie:crtbeginS.o%s;:crtbegin.o%s}" #endif /* Provide a ENDFILE_SPEC appropriate for GNU/Linux. Here we tack on the GNU/Linux magical crtend.o file (see crtstuff.c) which provides part of the support for getting C++ file-scope static object constructed before entering `main', followed by a normal GNU/Linux "finalizer" file, `crtn.o'. */ #undef ENDFILE_SPEC #define ENDFILE_SPEC \ "%{shared|pie:crtendS.o%s;:crtend.o%s} crtn.o%s" /* This is for -profile to use -lc_p instead of -lc. */ #ifndef CC1_SPEC #define CC1_SPEC "%{profile:-p}" #endif /* The GNU C++ standard library requires that these macros be defined. */ #undef CPLUSPLUS_CPP_SPEC #define CPLUSPLUS_CPP_SPEC "-D_GNU_SOURCE %(cpp)" #undef LIB_SPEC /* We no longer link with libc_p.a or libg.a by default. If you want to profile or debug the GNU/Linux C library, please add -profile or -ggdb to LDFLAGS at the link time, respectively. */ #if 1 #ifdef USE_GNULIBC_1 #define LIB_SPEC \ "%{!shared: %{p:-lgmon} %{pg:-lgmon} %{profile:-lgmon -lc_p} \ %{!profile:%{!ggdb:-lc} %{ggdb:-lg}}}" #else #define LIB_SPEC \ "%{pthread:-lpthread} \ %{shared:-lc} \ %{!shared:%{mieee-fp:-lieee} %{profile:-lc_p}%{!profile:-lc}}" #endif #else #define LIB_SPEC \ "%{!shared: \ %{p:-lgmon -lc_p} %{pg:-lgmon -lc_p} \ %{!p:%{!pg:%{!g*:-lc} %{g*:-lg}}}}" #endif #define LINUX_TARGET_OS_CPP_BUILTINS() \ do { \ builtin_define ("__gnu_linux__"); \ builtin_define_std ("linux"); \ builtin_define_std ("unix"); \ builtin_assert ("system=linux"); \ builtin_assert ("system=unix"); \ builtin_assert ("system=posix"); \ } while (0) #if !defined(USE_GNULIBC_1) && defined(HAVE_LD_EH_FRAME_HDR) #define LINK_EH_SPEC "%{!static:--eh-frame-hdr} " #endif /* Define this so we can compile MS code for use with WINE. */ #define HANDLE_PRAGMA_PACK_PUSH_POP #define LINK_GCC_C_SEQUENCE_SPEC \ "%{static:--start-group} %G %L %{static:--end-group}%{!static:%G}" /* Use --as-needed -lgcc_s for eh support. */ #ifdef HAVE_LD_AS_NEEDED #define USE_LD_AS_NEEDED 1 #endif /* Determine whether the the entire c99 runtime is present in the runtime library. */ #ifndef USE_GNULIBC_1 #define TARGET_C99_FUNCTIONS 1 #endif #define TARGET_HAS_F_SETLKW /* Definitions for Intel 386 running Linux-based GNU systems with ELF format. Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2001, 2002 Free Software Foundation, Inc. Contributed by Eric Youngdale. Modified for stabs-in-ELF by H.J. Lu. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Output at beginning of assembler file. */ /* The .file command should always begin the output. */ #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true #define TARGET_VERSION fprintf (stderr, " (i386 Linux/ELF)"); /* The svr4 ABI for the i386 says that records and unions are returned in memory. */ #undef DEFAULT_PCC_STRUCT_RETURN #define DEFAULT_PCC_STRUCT_RETURN 1 /* We arrange for the whole %gs segment to map the tls area. */ #undef TARGET_TLS_DIRECT_SEG_REFS_DEFAULT #define TARGET_TLS_DIRECT_SEG_REFS_DEFAULT MASK_TLS_DIRECT_SEG_REFS #undef ASM_COMMENT_START #define ASM_COMMENT_START "#" #undef DBX_REGISTER_NUMBER #define DBX_REGISTER_NUMBER(n) \ (TARGET_64BIT ? dbx64_register_map[n] : svr4_dbx_register_map[n]) /* Output assembler code to FILE to call the profiler. To the best of my knowledge, no Linux libc has required the label argument to mcount. */ #define NO_PROFILE_COUNTERS 1 #undef MCOUNT_NAME #define MCOUNT_NAME "mcount" /* The GLIBC version of mcount for the x86 assumes that there is a frame, so we cannot allow profiling without a frame pointer. */ #undef SUBTARGET_FRAME_POINTER_REQUIRED #define SUBTARGET_FRAME_POINTER_REQUIRED current_function_profile #undef SIZE_TYPE #define SIZE_TYPE "unsigned int" #undef PTRDIFF_TYPE #define PTRDIFF_TYPE "int" #undef WCHAR_TYPE #define WCHAR_TYPE "long int" #undef WCHAR_TYPE_SIZE #define WCHAR_TYPE_SIZE BITS_PER_WORD #define TARGET_OS_CPP_BUILTINS() \ do \ { \ LINUX_TARGET_OS_CPP_BUILTINS(); \ if (flag_pic) \ { \ builtin_define ("__PIC__"); \ builtin_define ("__pic__"); \ } \ } \ while (0) #undef CPP_SPEC #ifdef USE_GNULIBC_1 #define CPP_SPEC "%{posix:-D_POSIX_SOURCE}" #else #define CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}" #endif #undef CC1_SPEC #define CC1_SPEC "%(cc1_cpu) %{profile:-p}" /* Provide a LINK_SPEC appropriate for Linux. Here we provide support for the special GCC options -static and -shared, which allow us to link things in one of these three modes by applying the appropriate combinations of options at link-time. We like to support here for as many of the other GNU linker options as possible. But I don't have the time to search for those flags. I am sure how to add support for -soname shared_object_name. H.J. I took out %{v:%{!V:-V}}. It is too much :-(. They can use -Wl,-V. When the -shared link option is used a final link is not being done. */ /* If ELF is the default format, we should not use /lib/elf. */ #define LINK_EMULATION "elf_i386" #ifdef USE_GNULIBC_1 # define DYNAMIC_LINKER "/lib/ld-linux.so.1" #else # define DYNAMIC_LINKER "/lib/ld-linux.so.2" #endif #undef SUBTARGET_EXTRA_SPECS #define SUBTARGET_EXTRA_SPECS \ { "link_emulation", LINK_EMULATION },\ { "dynamic_linker", DYNAMIC_LINKER } #undef LINK_SPEC #define LINK_SPEC "-m %(link_emulation) %{shared:-shared} \ %{!shared: \ %{!ibcs: \ %{!static: \ %{rdynamic:-export-dynamic} \ %{!dynamic-linker:-dynamic-linker %(dynamic_linker)}} \ %{static:-static}}}" /* A C statement (sans semicolon) to output to the stdio stream FILE the assembler definition of uninitialized global DECL named NAME whose size is SIZE bytes and alignment is ALIGN bytes. Try to use asm_output_aligned_bss to implement this macro. */ #define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \ asm_output_aligned_bss (FILE, DECL, NAME, SIZE, ALIGN) /* A C statement to output to the stdio stream FILE an assembler command to advance the location counter to a multiple of 1< #include #define REG_NAME(reg) reg #define MD_FALLBACK_FRAME_STATE_FOR(CONTEXT, FS, SUCCESS) \ do { \ unsigned char *pc_ = (CONTEXT)->ra; \ struct sigcontext *sc_; \ long new_cfa_; \ \ /* popl %eax ; movl $__NR_sigreturn,%eax ; int $0x80 */ \ if (*(unsigned short *)(pc_+0) == 0xb858 \ && *(unsigned int *)(pc_+2) == 119 \ && *(unsigned short *)(pc_+6) == 0x80cd) \ sc_ = (CONTEXT)->cfa + 4; \ /* movl $__NR_rt_sigreturn,%eax ; int $0x80 */ \ else if (*(unsigned char *)(pc_+0) == 0xb8 \ && *(unsigned int *)(pc_+1) == 173 \ && *(unsigned short *)(pc_+5) == 0x80cd) \ { \ struct rt_sigframe { \ int sig; \ struct siginfo *pinfo; \ void *puc; \ struct siginfo info; \ struct ucontext uc; \ } *rt_ = (CONTEXT)->cfa; \ sc_ = (struct sigcontext *) &rt_->uc.uc_mcontext; \ } \ else \ break; \ \ new_cfa_ = sc_->REG_NAME(esp); \ (FS)->cfa_how = CFA_REG_OFFSET; \ (FS)->cfa_reg = 4; \ (FS)->cfa_offset = new_cfa_ - (long) (CONTEXT)->cfa; \ \ /* The SVR4 register numbering macros aren't usable in libgcc. */ \ (FS)->regs.reg[0].how = REG_SAVED_OFFSET; \ (FS)->regs.reg[0].loc.offset = (long)&sc_->REG_NAME(eax) - new_cfa_; \ (FS)->regs.reg[3].how = REG_SAVED_OFFSET; \ (FS)->regs.reg[3].loc.offset = (long)&sc_->REG_NAME(ebx) - new_cfa_; \ (FS)->regs.reg[1].how = REG_SAVED_OFFSET; \ (FS)->regs.reg[1].loc.offset = (long)&sc_->REG_NAME(ecx) - new_cfa_; \ (FS)->regs.reg[2].how = REG_SAVED_OFFSET; \ (FS)->regs.reg[2].loc.offset = (long)&sc_->REG_NAME(edx) - new_cfa_; \ (FS)->regs.reg[6].how = REG_SAVED_OFFSET; \ (FS)->regs.reg[6].loc.offset = (long)&sc_->REG_NAME(esi) - new_cfa_; \ (FS)->regs.reg[7].how = REG_SAVED_OFFSET; \ (FS)->regs.reg[7].loc.offset = (long)&sc_->REG_NAME(edi) - new_cfa_; \ (FS)->regs.reg[5].how = REG_SAVED_OFFSET; \ (FS)->regs.reg[5].loc.offset = (long)&sc_->REG_NAME(ebp) - new_cfa_; \ (FS)->regs.reg[8].how = REG_SAVED_OFFSET; \ (FS)->regs.reg[8].loc.offset = (long)&sc_->REG_NAME(eip) - new_cfa_; \ (FS)->retaddr_column = 8; \ goto SUCCESS; \ } while (0) #endif /* not USE_GNULIBC_1 */ #endif /* IN_LIBGCC2 */ /* Definitions of various defaults for tm.h macros. Copyright (C) 1992, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Ron Guilmette (rfg@monkeys.com) This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_DEFAULTS_H #define GCC_DEFAULTS_H #ifndef GET_ENVIRONMENT #define GET_ENVIRONMENT(VALUE, NAME) do { (VALUE) = getenv (NAME); } while (0) #endif #define obstack_chunk_alloc ((void *(*) (long)) xmalloc) #define obstack_chunk_free ((void (*) (void *)) free) #define OBSTACK_CHUNK_SIZE 0 #define gcc_obstack_init(OBSTACK) \ _obstack_begin ((OBSTACK), OBSTACK_CHUNK_SIZE, 0, \ obstack_chunk_alloc, \ obstack_chunk_free) /* Define default standard character escape sequences. */ #ifndef TARGET_BELL # define TARGET_BELL 007 # define TARGET_BS 010 # define TARGET_CR 015 # define TARGET_DIGIT0 060 # define TARGET_ESC 033 # define TARGET_FF 014 # define TARGET_NEWLINE 012 # define TARGET_TAB 011 # define TARGET_VT 013 #endif /* Store in OUTPUT a string (made with alloca) containing an assembler-name for a local static variable or function named NAME. LABELNO is an integer which is different for each call. */ #ifndef ASM_PN_FORMAT # ifndef NO_DOT_IN_LABEL # define ASM_PN_FORMAT "%s.%lu" # else # ifndef NO_DOLLAR_IN_LABEL # define ASM_PN_FORMAT "%s$%lu" # else # define ASM_PN_FORMAT "__%s_%lu" # endif # endif #endif /* ! ASM_PN_FORMAT */ #ifndef ASM_FORMAT_PRIVATE_NAME # define ASM_FORMAT_PRIVATE_NAME(OUTPUT, NAME, LABELNO) \ do { const char *const name_ = (NAME); \ char *const output_ = (OUTPUT) = alloca (strlen (name_) + 32);\ sprintf (output_, ASM_PN_FORMAT, name_, (unsigned long)(LABELNO)); \ } while (0) #endif #ifndef ASM_STABD_OP #define ASM_STABD_OP "\t.stabd\t" #endif /* This is how to output an element of a case-vector that is absolute. Some targets don't use this, but we have to define it anyway. */ #ifndef ASM_OUTPUT_ADDR_VEC_ELT #define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \ do { fputs (integer_asm_op (POINTER_SIZE / BITS_PER_UNIT, TRUE), FILE); \ (*targetm.asm_out.internal_label) (FILE, "L", (VALUE)); \ fputc ('\n', FILE); \ } while (0) #endif /* Choose a reasonable default for ASM_OUTPUT_ASCII. */ #ifndef ASM_OUTPUT_ASCII #define ASM_OUTPUT_ASCII(MYFILE, MYSTRING, MYLENGTH) \ do { \ FILE *_hide_asm_out_file = (MYFILE); \ const unsigned char *_hide_p = (const unsigned char *) (MYSTRING); \ int _hide_thissize = (MYLENGTH); \ { \ FILE *asm_out_file = _hide_asm_out_file; \ const unsigned char *p = _hide_p; \ int thissize = _hide_thissize; \ int i; \ fprintf (asm_out_file, "\t.ascii \""); \ \ for (i = 0; i < thissize; i++) \ { \ int c = p[i]; \ if (c == '\"' || c == '\\') \ putc ('\\', asm_out_file); \ if (ISPRINT(c)) \ putc (c, asm_out_file); \ else \ { \ fprintf (asm_out_file, "\\%o", c); \ /* After an octal-escape, if a digit follows, \ terminate one string constant and start another. \ The VAX assembler fails to stop reading the escape \ after three digits, so this is the only way we \ can get it to parse the data properly. */ \ if (i < thissize - 1 && ISDIGIT(p[i + 1])) \ fprintf (asm_out_file, "\"\n\t.ascii \""); \ } \ } \ fprintf (asm_out_file, "\"\n"); \ } \ } \ while (0) #endif /* This is how we tell the assembler to equate two values. */ #ifdef SET_ASM_OP #ifndef ASM_OUTPUT_DEF #define ASM_OUTPUT_DEF(FILE,LABEL1,LABEL2) \ do { fprintf ((FILE), "%s", SET_ASM_OP); \ assemble_name (FILE, LABEL1); \ fprintf (FILE, ","); \ assemble_name (FILE, LABEL2); \ fprintf (FILE, "\n"); \ } while (0) #endif #endif /* This is how to output the definition of a user-level label named NAME, such as the label on a static function or variable NAME. */ #ifndef ASM_OUTPUT_LABEL #define ASM_OUTPUT_LABEL(FILE,NAME) \ do { assemble_name ((FILE), (NAME)); fputs (":\n", (FILE)); } while (0) #endif /* This is how to output a reference to a user-level label named NAME. */ #ifndef ASM_OUTPUT_LABELREF #define ASM_OUTPUT_LABELREF(FILE,NAME) asm_fprintf ((FILE), "%U%s", (NAME)) #endif /* Allow target to print debug info labels specially. This is useful for VLIW targets, since debug info labels should go into the middle of instruction bundles instead of breaking them. */ #ifndef ASM_OUTPUT_DEBUG_LABEL #define ASM_OUTPUT_DEBUG_LABEL(FILE, PREFIX, NUM) \ (*targetm.asm_out.internal_label) (FILE, PREFIX, NUM) #endif /* This is how we tell the assembler that a symbol is weak. */ #ifndef ASM_OUTPUT_WEAK_ALIAS #if defined (ASM_WEAKEN_LABEL) && defined (ASM_OUTPUT_DEF) #define ASM_OUTPUT_WEAK_ALIAS(STREAM, NAME, VALUE) \ do \ { \ ASM_WEAKEN_LABEL (STREAM, NAME); \ if (VALUE) \ ASM_OUTPUT_DEF (STREAM, NAME, VALUE); \ } \ while (0) #endif #endif /* How to emit a .type directive. */ #ifndef ASM_OUTPUT_TYPE_DIRECTIVE #if defined TYPE_ASM_OP && defined TYPE_OPERAND_FMT #define ASM_OUTPUT_TYPE_DIRECTIVE(STREAM, NAME, TYPE) \ do \ { \ fputs (TYPE_ASM_OP, STREAM); \ assemble_name (STREAM, NAME); \ fputs (", ", STREAM); \ fprintf (STREAM, TYPE_OPERAND_FMT, TYPE); \ putc ('\n', STREAM); \ } \ while (0) #endif #endif /* How to emit a .size directive. */ #ifndef ASM_OUTPUT_SIZE_DIRECTIVE #ifdef SIZE_ASM_OP #define ASM_OUTPUT_SIZE_DIRECTIVE(STREAM, NAME, SIZE) \ do \ { \ HOST_WIDE_INT size_ = (SIZE); \ fputs (SIZE_ASM_OP, STREAM); \ assemble_name (STREAM, NAME); \ fprintf (STREAM, ", " HOST_WIDE_INT_PRINT_DEC "\n", size_); \ } \ while (0) #define ASM_OUTPUT_MEASURED_SIZE(STREAM, NAME) \ do \ { \ fputs (SIZE_ASM_OP, STREAM); \ assemble_name (STREAM, NAME); \ fputs (", .-", STREAM); \ assemble_name (STREAM, NAME); \ putc ('\n', STREAM); \ } \ while (0) #endif #endif /* This determines whether or not we support weak symbols. */ #ifndef SUPPORTS_WEAK #if defined (ASM_WEAKEN_LABEL) || defined (ASM_WEAKEN_DECL) #define SUPPORTS_WEAK 1 #else #define SUPPORTS_WEAK 0 #endif #endif /* This determines whether or not we support link-once semantics. */ #ifndef SUPPORTS_ONE_ONLY #ifdef MAKE_DECL_ONE_ONLY #define SUPPORTS_ONE_ONLY 1 #else #define SUPPORTS_ONE_ONLY 0 #endif #endif /* This determines whether weak symbols must be left out of a static archive's table of contents. Defining this macro to be nonzero has the consequence that certain symbols will not be made weak that otherwise would be. The C++ ABI requires this macro to be zero; see the documentation. */ #ifndef TARGET_WEAK_NOT_IN_ARCHIVE_TOC #define TARGET_WEAK_NOT_IN_ARCHIVE_TOC 0 #endif /* This determines whether or not we need linkonce unwind information */ #ifndef TARGET_USES_WEAK_UNWIND_INFO #define TARGET_USES_WEAK_UNWIND_INFO 0 #endif /* By default, there is no prefix on user-defined symbols. */ #ifndef USER_LABEL_PREFIX #define USER_LABEL_PREFIX "" #endif /* If the target supports weak symbols, define TARGET_ATTRIBUTE_WEAK to provide a weak attribute. Else define it to nothing. This would normally belong in ansidecl.h, but SUPPORTS_WEAK is not available at that time. Note, this is only for use by target files which we know are to be compiled by GCC. */ #ifndef TARGET_ATTRIBUTE_WEAK # if SUPPORTS_WEAK # define TARGET_ATTRIBUTE_WEAK __attribute__ ((weak)) # else # define TARGET_ATTRIBUTE_WEAK # endif #endif /* This determines whether this target supports hidden visibility. This is a weaker condition than HAVE_GAS_HIDDEN, which probes for specific assembler syntax. */ #ifndef TARGET_SUPPORTS_HIDDEN # ifdef HAVE_GAS_HIDDEN # define TARGET_SUPPORTS_HIDDEN 1 # else # define TARGET_SUPPORTS_HIDDEN 0 # endif #endif /* Determines whether we may use common symbols to represent one-only semantics (a.k.a. "vague linkage"). */ #ifndef USE_COMMON_FOR_ONE_ONLY # define USE_COMMON_FOR_ONE_ONLY 1 #endif /* If the target supports init_priority C++ attribute, give SUPPORTS_INIT_PRIORITY a nonzero value. */ #ifndef SUPPORTS_INIT_PRIORITY #define SUPPORTS_INIT_PRIORITY 1 #endif /* SUPPORTS_INIT_PRIORITY */ /* If duplicate library search directories can be removed from a linker command without changing the linker's semantics, give this symbol a nonzero. */ #ifndef LINK_ELIMINATE_DUPLICATE_LDIRECTORIES #define LINK_ELIMINATE_DUPLICATE_LDIRECTORIES 0 #endif /* LINK_ELIMINATE_DUPLICATE_LDIRECTORIES */ /* If we have a definition of INCOMING_RETURN_ADDR_RTX, assume that the rest of the DWARF 2 frame unwind support is also provided. */ #if !defined (DWARF2_UNWIND_INFO) && defined (INCOMING_RETURN_ADDR_RTX) #define DWARF2_UNWIND_INFO 1 #endif /* If we have named sections, and we're using crtstuff to run ctors, use them for registering eh frame information. */ #if defined (TARGET_ASM_NAMED_SECTION) && DWARF2_UNWIND_INFO \ && !defined(EH_FRAME_IN_DATA_SECTION) #ifndef EH_FRAME_SECTION_NAME #define EH_FRAME_SECTION_NAME ".eh_frame" #endif #endif /* If we have named section and we support weak symbols, then use the .jcr section for recording java classes which need to be registered at program start-up time. */ #if defined (TARGET_ASM_NAMED_SECTION) && SUPPORTS_WEAK #ifndef JCR_SECTION_NAME #define JCR_SECTION_NAME ".jcr" #endif #endif /* By default, we generate a label at the beginning and end of the text section, and compute the size of the text section by subtracting the two. However, on some platforms that doesn't work, and we use the section itself, rather than a label at the beginning of it, to indicate the start of the section. On such platforms, define this to zero. */ #ifndef DWARF2_GENERATE_TEXT_SECTION_LABEL #define DWARF2_GENERATE_TEXT_SECTION_LABEL 1 #endif /* Number of hardware registers that go into the DWARF-2 unwind info. If not defined, equals FIRST_PSEUDO_REGISTER */ #ifndef DWARF_FRAME_REGISTERS #define DWARF_FRAME_REGISTERS FIRST_PSEUDO_REGISTER #endif /* How to renumber registers for dbx and gdb. If not defined, assume no renumbering is necessary. */ #ifndef DBX_REGISTER_NUMBER #define DBX_REGISTER_NUMBER(REGNO) (REGNO) #endif /* Default sizes for base C types. If the sizes are different for your target, you should override these values by defining the appropriate symbols in your tm.h file. */ #ifndef BITS_PER_UNIT #define BITS_PER_UNIT 8 #endif #ifndef BITS_PER_WORD #define BITS_PER_WORD (BITS_PER_UNIT * UNITS_PER_WORD) #endif #ifndef CHAR_TYPE_SIZE #define CHAR_TYPE_SIZE BITS_PER_UNIT #endif #ifndef BOOL_TYPE_SIZE /* `bool' has size and alignment `1', on almost all platforms. */ #define BOOL_TYPE_SIZE CHAR_TYPE_SIZE #endif #ifndef SHORT_TYPE_SIZE #define SHORT_TYPE_SIZE (BITS_PER_UNIT * MIN ((UNITS_PER_WORD + 1) / 2, 2)) #endif #ifndef INT_TYPE_SIZE #define INT_TYPE_SIZE BITS_PER_WORD #endif #ifndef LONG_TYPE_SIZE #define LONG_TYPE_SIZE BITS_PER_WORD #endif #ifndef LONG_LONG_TYPE_SIZE #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2) #endif #ifndef WCHAR_TYPE_SIZE #define WCHAR_TYPE_SIZE INT_TYPE_SIZE #endif #ifndef FLOAT_TYPE_SIZE #define FLOAT_TYPE_SIZE BITS_PER_WORD #endif #ifndef DOUBLE_TYPE_SIZE #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2) #endif #ifndef LONG_DOUBLE_TYPE_SIZE #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2) #endif /* Width in bits of a pointer. Mind the value of the macro `Pmode'. */ #ifndef POINTER_SIZE #define POINTER_SIZE BITS_PER_WORD #endif #ifndef PIC_OFFSET_TABLE_REGNUM #define PIC_OFFSET_TABLE_REGNUM INVALID_REGNUM #endif /* By default, the preprocessor should be invoked the same way in C++ as in C. */ #ifndef CPLUSPLUS_CPP_SPEC #ifdef CPP_SPEC #define CPLUSPLUS_CPP_SPEC CPP_SPEC #endif #endif #ifndef ACCUMULATE_OUTGOING_ARGS #define ACCUMULATE_OUTGOING_ARGS 0 #endif /* Supply a default definition for PUSH_ARGS. */ #ifndef PUSH_ARGS #ifdef PUSH_ROUNDING #define PUSH_ARGS !ACCUMULATE_OUTGOING_ARGS #else #define PUSH_ARGS 0 #endif #endif /* Decide whether a function's arguments should be processed from first to last or from last to first. They should if the stack and args grow in opposite directions, but only if we have push insns. */ #ifdef PUSH_ROUNDING #ifndef PUSH_ARGS_REVERSED #if defined (STACK_GROWS_DOWNWARD) != defined (ARGS_GROW_DOWNWARD) #define PUSH_ARGS_REVERSED PUSH_ARGS #endif #endif #endif #ifndef PUSH_ARGS_REVERSED #define PUSH_ARGS_REVERSED 0 #endif /* If PREFERRED_STACK_BOUNDARY is not defined, set it to STACK_BOUNDARY. STACK_BOUNDARY is required. */ #ifndef PREFERRED_STACK_BOUNDARY #define PREFERRED_STACK_BOUNDARY STACK_BOUNDARY #endif /* By default, the C++ compiler will use function addresses in the vtable entries. Setting this nonzero tells the compiler to use function descriptors instead. The value of this macro says how many words wide the descriptor is (normally 2). It is assumed that the address of a function descriptor may be treated as a pointer to a function. */ #ifndef TARGET_VTABLE_USES_DESCRIPTORS #define TARGET_VTABLE_USES_DESCRIPTORS 0 #endif /* By default, the vtable entries are void pointers, the so the alignment is the same as pointer alignment. The value of this macro specifies the alignment of the vtable entry in bits. It should be defined only when special alignment is necessary. */ #ifndef TARGET_VTABLE_ENTRY_ALIGN #define TARGET_VTABLE_ENTRY_ALIGN POINTER_SIZE #endif /* There are a few non-descriptor entries in the vtable at offsets below zero. If these entries must be padded (say, to preserve the alignment specified by TARGET_VTABLE_ENTRY_ALIGN), set this to the number of words in each data entry. */ #ifndef TARGET_VTABLE_DATA_ENTRY_DISTANCE #define TARGET_VTABLE_DATA_ENTRY_DISTANCE 1 #endif /* Decide whether it is safe to use a local alias for a virtual function when constructing thunks. */ #ifndef TARGET_USE_LOCAL_THUNK_ALIAS_P #ifdef ASM_OUTPUT_DEF #define TARGET_USE_LOCAL_THUNK_ALIAS_P(DECL) 1 #else #define TARGET_USE_LOCAL_THUNK_ALIAS_P(DECL) 0 #endif #endif /* Select a format to encode pointers in exception handling data. We prefer those that result in fewer dynamic relocations. Assume no special support here and encode direct references. */ #ifndef ASM_PREFERRED_EH_DATA_FORMAT #define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) DW_EH_PE_absptr #endif /* By default, the C++ compiler will use the lowest bit of the pointer to function to indicate a pointer-to-member-function points to a virtual member function. However, if FUNCTION_BOUNDARY indicates function addresses aren't always even, the lowest bit of the delta field will be used. */ #ifndef TARGET_PTRMEMFUNC_VBIT_LOCATION #define TARGET_PTRMEMFUNC_VBIT_LOCATION \ (FUNCTION_BOUNDARY >= 2 * BITS_PER_UNIT \ ? ptrmemfunc_vbit_in_pfn : ptrmemfunc_vbit_in_delta) #endif #ifndef DEFAULT_GDB_EXTENSIONS #define DEFAULT_GDB_EXTENSIONS 1 #endif /* If more than one debugging type is supported, you must define PREFERRED_DEBUGGING_TYPE to choose a format in a system-dependent way. This is one long line cause VAXC can't handle a \-newline. */ #if 1 < (defined (DBX_DEBUGGING_INFO) + defined (SDB_DEBUGGING_INFO) + defined (DWARF2_DEBUGGING_INFO) + defined (XCOFF_DEBUGGING_INFO) + defined (VMS_DEBUGGING_INFO)) #ifndef PREFERRED_DEBUGGING_TYPE You Lose! You must define PREFERRED_DEBUGGING_TYPE! #endif /* no PREFERRED_DEBUGGING_TYPE */ #else /* Only one debugging format supported. Define PREFERRED_DEBUGGING_TYPE so other code needn't care. */ #ifdef DBX_DEBUGGING_INFO #define PREFERRED_DEBUGGING_TYPE DBX_DEBUG #endif #ifdef SDB_DEBUGGING_INFO #define PREFERRED_DEBUGGING_TYPE SDB_DEBUG #endif #ifdef DWARF_DEBUGGING_INFO #define PREFERRED_DEBUGGING_TYPE DWARF_DEBUG #endif #ifdef DWARF2_DEBUGGING_INFO #define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG #endif #ifdef VMS_DEBUGGING_INFO #define PREFERRED_DEBUGGING_TYPE VMS_AND_DWARF2_DEBUG #endif #ifdef XCOFF_DEBUGGING_INFO #define PREFERRED_DEBUGGING_TYPE XCOFF_DEBUG #endif #endif /* More than one debugger format enabled. */ /* If still not defined, must have been because no debugging formats are supported. */ #ifndef PREFERRED_DEBUGGING_TYPE #define PREFERRED_DEBUGGING_TYPE NO_DEBUG #endif /* Define codes for all the float formats that we know of. */ #define UNKNOWN_FLOAT_FORMAT 0 #define IEEE_FLOAT_FORMAT 1 #define VAX_FLOAT_FORMAT 2 #define IBM_FLOAT_FORMAT 3 #define C4X_FLOAT_FORMAT 4 /* Default to IEEE float if not specified. Nearly all machines use it. */ #ifndef TARGET_FLOAT_FORMAT #define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT #endif /* Determine the register class for registers suitable to be the base address register in a MEM. Allow the choice to be dependent upon the mode of the memory access. */ #ifndef MODE_BASE_REG_CLASS #define MODE_BASE_REG_CLASS(MODE) BASE_REG_CLASS #endif #ifndef LARGEST_EXPONENT_IS_NORMAL #define LARGEST_EXPONENT_IS_NORMAL(SIZE) 0 #endif #ifndef ROUND_TOWARDS_ZERO #define ROUND_TOWARDS_ZERO 0 #endif #ifndef MODE_HAS_NANS #define MODE_HAS_NANS(MODE) \ (FLOAT_MODE_P (MODE) \ && TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT \ && !LARGEST_EXPONENT_IS_NORMAL (GET_MODE_BITSIZE (MODE))) #endif #ifndef MODE_HAS_INFINITIES #define MODE_HAS_INFINITIES(MODE) \ (FLOAT_MODE_P (MODE) \ && TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT \ && !LARGEST_EXPONENT_IS_NORMAL (GET_MODE_BITSIZE (MODE))) #endif #ifndef MODE_HAS_SIGNED_ZEROS #define MODE_HAS_SIGNED_ZEROS(MODE) \ (FLOAT_MODE_P (MODE) && TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT) #endif #ifndef MODE_HAS_SIGN_DEPENDENT_ROUNDING #define MODE_HAS_SIGN_DEPENDENT_ROUNDING(MODE) \ (FLOAT_MODE_P (MODE) \ && TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT \ && !ROUND_TOWARDS_ZERO) #endif #ifndef FLOAT_LIB_COMPARE_RETURNS_BOOL #define FLOAT_LIB_COMPARE_RETURNS_BOOL(MODE, COMPARISON) false #endif /* If FLOAT_WORDS_BIG_ENDIAN is not defined in the header files, then the word-endianness is the same as for integers. */ #ifndef FLOAT_WORDS_BIG_ENDIAN #define FLOAT_WORDS_BIG_ENDIAN WORDS_BIG_ENDIAN #endif #ifndef TARGET_FLT_EVAL_METHOD #define TARGET_FLT_EVAL_METHOD 0 #endif #ifndef HOT_TEXT_SECTION_NAME #define HOT_TEXT_SECTION_NAME ".text.hot" #endif #ifndef NORMAL_TEXT_SECTION_NAME #define NORMAL_TEXT_SECTION_NAME ".text" #endif #ifndef UNLIKELY_EXECUTED_TEXT_SECTION_NAME #define UNLIKELY_EXECUTED_TEXT_SECTION_NAME ".text.unlikely" #endif #ifndef HAS_LONG_COND_BRANCH #define HAS_LONG_COND_BRANCH 0 #endif #ifndef HAS_LONG_UNCOND_BRANCH #define HAS_LONG_UNCOND_BRANCH 0 #endif #ifndef VECTOR_MODE_SUPPORTED_P #define VECTOR_MODE_SUPPORTED_P(MODE) 0 #endif /* Determine whether __cxa_atexit, rather than atexit, is used to register C++ destructors for local statics and global objects. */ #ifndef DEFAULT_USE_CXA_ATEXIT #define DEFAULT_USE_CXA_ATEXIT 0 #endif /* Determine whether extra constraint letter should be handled via address reload (like 'o'). */ #ifndef EXTRA_MEMORY_CONSTRAINT #define EXTRA_MEMORY_CONSTRAINT(C,STR) 0 #endif /* Determine whether extra constraint letter should be handled as an address (like 'p'). */ #ifndef EXTRA_ADDRESS_CONSTRAINT #define EXTRA_ADDRESS_CONSTRAINT(C,STR) 0 #endif /* When a port defines CONSTRAINT_LEN, it should use DEFAULT_CONSTRAINT_LEN for all the characters that it does not want to change, so things like the 'length' of a digit in a matching constraint is an implementation detail, and not part of the interface. */ #define DEFAULT_CONSTRAINT_LEN(C,STR) 1 #ifndef CONSTRAINT_LEN #define CONSTRAINT_LEN(C,STR) DEFAULT_CONSTRAINT_LEN (C, STR) #endif #if defined (CONST_OK_FOR_LETTER_P) && ! defined (CONST_OK_FOR_CONSTRAINT_P) #define CONST_OK_FOR_CONSTRAINT_P(VAL,C,STR) CONST_OK_FOR_LETTER_P (VAL, C) #endif #if defined (CONST_DOUBLE_OK_FOR_LETTER_P) && ! defined (CONST_DOUBLE_OK_FOR_CONSTRAINT_P) #define CONST_DOUBLE_OK_FOR_CONSTRAINT_P(OP,C,STR) \ CONST_DOUBLE_OK_FOR_LETTER_P (OP, C) #endif #ifndef REG_CLASS_FROM_CONSTRAINT #define REG_CLASS_FROM_CONSTRAINT(C,STR) REG_CLASS_FROM_LETTER (C) #endif #if defined (EXTRA_CONSTRAINT) && ! defined (EXTRA_CONSTRAINT_STR) #define EXTRA_CONSTRAINT_STR(OP, C,STR) EXTRA_CONSTRAINT (OP, C) #endif #ifndef REGISTER_MOVE_COST #define REGISTER_MOVE_COST(m, x, y) 2 #endif /* Determine whether the the entire c99 runtime is present in the runtime library. */ #ifndef TARGET_C99_FUNCTIONS #define TARGET_C99_FUNCTIONS 0 #endif /* Indicate that CLZ and CTZ are undefined at zero. */ #ifndef CLZ_DEFINED_VALUE_AT_ZERO #define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) 0 #endif #ifndef CTZ_DEFINED_VALUE_AT_ZERO #define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) 0 #endif /* Provide a default value for STORE_FLAG_VALUE. */ #ifndef STORE_FLAG_VALUE #define STORE_FLAG_VALUE 1 #endif /* This macro is used to determine what the largest unit size that move_by_pieces can use is. */ /* MOVE_MAX_PIECES is the number of bytes at a time which we can move efficiently, as opposed to MOVE_MAX which is the maximum number of bytes we can move with a single instruction. */ #ifndef MOVE_MAX_PIECES #define MOVE_MAX_PIECES MOVE_MAX #endif #ifndef STACK_POINTER_OFFSET #define STACK_POINTER_OFFSET 0 #endif #ifndef LOCAL_REGNO #define LOCAL_REGNO(REGNO) 0 #endif /* EXIT_IGNORE_STACK should be nonzero if, when returning from a function, the stack pointer does not matter. The value is tested only in functions that have frame pointers. */ #ifndef EXIT_IGNORE_STACK #define EXIT_IGNORE_STACK 0 #endif /* Assume that case vectors are not pc-relative. */ #ifndef CASE_VECTOR_PC_RELATIVE #define CASE_VECTOR_PC_RELATIVE 0 #endif /* Assume that trampolines need function alignment. */ #ifndef TRAMPOLINE_ALIGNMENT #define TRAMPOLINE_ALIGNMENT FUNCTION_BOUNDARY #endif /* Register mappings for target machines without register windows. */ #ifndef INCOMING_REGNO #define INCOMING_REGNO(N) (N) #endif #ifndef OUTGOING_REGNO #define OUTGOING_REGNO(N) (N) #endif #ifndef SHIFT_COUNT_TRUNCATED #define SHIFT_COUNT_TRUNCATED 0 #endif #ifndef LEGITIMIZE_ADDRESS #define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) #endif #ifndef REVERSIBLE_CC_MODE #define REVERSIBLE_CC_MODE(MODE) 0 #endif #endif /* ! GCC_DEFAULTS_H */ #endif #if defined IN_GCC && !defined GENERATOR_FILE && !defined USED_FOR_TARGET /* Generated automatically by the program `genconstants' from the machine description file `md'. */ #ifndef GCC_INSN_CONSTANTS_H #define GCC_INSN_CONSTANTS_H #define BP_REG 6 #define UNSPEC_PAVGUSB 49 #define UNSPEC_SHUFFLE 41 #define UNSPEC_PFRCPIT2 52 #define UNSPECV_MWAIT 70 #define UNSPEC_TPOFF 4 #define UNSPEC_PFRSQIT1 54 #define UNSPECV_EMMS 31 #define UNSPEC_GOTOFF 1 #define UNSPEC_MOVSLDUP 75 #define UNSPEC_TP 15 #define UNSPEC_ADDSUB 71 #define UNSPEC_MOVNT 34 #define UNSPEC_XTRACT_FRACT 84 #define SP_REG 7 #define UNSPEC_SAHF 25 #define UNSPEC_SIN 21 #define UNSPECV_LDMXCSR 37 #define UNSPEC_COS 22 #define UNSPEC_TAN_ONE 82 #define UNSPEC_GOT 0 #define UNSPEC_SINCOS_SIN 81 #define UNSPEC_LFENCE 60 #define UNSPEC_PFRCP 50 #define UNSPEC_FSCALE_FRACT 86 #define UNSPEC_SINCOS_COS 80 #define UNSPEC_PSADBW 61 #define UNSPEC_PFRSQRT 53 #define UNSPEC_MOVA 38 #define UNSPEC_FYL2X 66 #define UNSPEC_SCAS 20 #define UNSPECV_CLFLUSH 57 #define UNSPEC_STACK_ALLOC 11 #define UNSPEC_FLDCW 28 #define UNSPEC_EH_RETURN 76 #define UNSPECV_STMXCSR 40 #define UNSPEC_FRNDINT 68 #define UNSPEC_PSHUFHW 56 #define UNSPEC_SSE_PROLOGUE_SAVE 13 #define UNSPEC_FSCALE_EXP 87 #define UNSPEC_TLS_LD_BASE 17 #define UNSPEC_SET_GOT 12 #define UNSPEC_GOTTPOFF 3 #define UNSPEC_FYL2XP1 67 #define UNSPECV_STACK_PROBE 10 #define UNSPEC_ADD_CARRY 27 #define UNSPEC_MASKMOV 32 #define UNSPEC_HADD 72 #define UNSPEC_XTRACT_EXP 85 #define UNSPEC_DTPOFF 6 #define UNSPEC_LDQQU 76 #define UNSPEC_GOTPCREL 2 #define UNSPEC_NTPOFF 5 #define UNSPEC_RCP 42 #define UNSPEC_F2XM1 69 #define UNSPEC_REP 75 #define UNSPEC_GOTNTPOFF 7 #define UNSPEC_TAN_TAN 83 #define UNSPEC_FPREM1_U 91 #define UNSPEC_FIX 30 #define UNSPEC_RSQRT 43 #define UNSPEC_NOP 45 #define UNSPECV_FEMMS 46 #define UNSPEC_FSTCW 26 #define UNSPEC_MFENCE 59 #define FPSR_REG 18 #define UNSPEC_TLS_GD 16 #define UNSPEC_SFENCE 44 #define UNSPEC_PFRCPIT1 51 #define UNSPEC_MOVU 39 #define UNSPEC_HSUB 73 #define UNSPEC_MOVMSK 33 #define UNSPECV_MONITOR 69 #define UNSPEC_FPATAN 65 #define UNSPEC_FPREM_F 88 #define UNSPEC_INDNTPOFF 8 #define UNSPEC_FNSTSW 24 #define UNSPEC_MOVSHDUP 74 #define DIRFLAG_REG 19 #define UNSPEC_MOVDDUP 77 #define UNSPEC_FPREM_U 89 #define UNSPECV_ALIGN 68 #define UNSPEC_FPREM1_F 90 #define FLAGS_REG 17 #define UNSPECV_BLOCKAGE 0 #define UNSPEC_PSHUFLW 55 #endif /* GCC_INSN_CONSTANTS_H */ /* Generated automatically by the program `genflags' from the machine description file `md'. */ #ifndef GCC_INSN_FLAGS_H #define GCC_INSN_FLAGS_H #define HAVE_cmpdi_ccno_1_rex64 (TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode)) #define HAVE_cmpdi_1_insn_rex64 (TARGET_64BIT && ix86_match_ccmode (insn, CCmode)) #define HAVE_cmpqi_ext_3_insn (!TARGET_64BIT && ix86_match_ccmode (insn, CCmode)) #define HAVE_cmpqi_ext_3_insn_rex64 (TARGET_64BIT && ix86_match_ccmode (insn, CCmode)) #define HAVE_x86_fnstsw_1 (TARGET_80387) #define HAVE_x86_sahf_1 (!TARGET_64BIT) #define HAVE_popsi1 (!TARGET_64BIT) #define HAVE_movsi_insv_1 (!TARGET_64BIT) #define HAVE_movdi_insv_1_rex64 (TARGET_64BIT) #define HAVE_pushdi2_rex64 (TARGET_64BIT) #define HAVE_popdi1 (TARGET_64BIT) #define HAVE_swapxf 1 #define HAVE_zero_extendhisi2_and (TARGET_ZERO_EXTEND_WITH_AND && !optimize_size) #define HAVE_zero_extendsidi2_32 (!TARGET_64BIT && !TARGET_INTER_UNIT_MOVES) #define HAVE_zero_extendsidi2_rex64 (TARGET_64BIT && !TARGET_INTER_UNIT_MOVES) #define HAVE_zero_extendhidi2 (TARGET_64BIT) #define HAVE_zero_extendqidi2 (TARGET_64BIT) #define HAVE_extendsidi2_rex64 (TARGET_64BIT) #define HAVE_extendhidi2 (TARGET_64BIT) #define HAVE_extendqidi2 (TARGET_64BIT) #define HAVE_extendhisi2 1 #define HAVE_extendqihi2 1 #define HAVE_extendqisi2 1 #define HAVE_truncdfsf2_noop (TARGET_80387 && flag_unsafe_math_optimizations) #define HAVE_truncdfsf2_sse_only (!TARGET_80387 && TARGET_SSE2 && !TARGET_SSE_PARTIAL_REGS_FOR_CVTSD2SS) #define HAVE_truncxfsf2_noop (TARGET_80387 && flag_unsafe_math_optimizations) #define HAVE_truncxfdf2_noop (TARGET_80387 && flag_unsafe_math_optimizations) #define HAVE_fix_truncdi_nomemory (TARGET_80387 && FLOAT_MODE_P (GET_MODE (operands[1])) \ && (!SSE_FLOAT_MODE_P (GET_MODE (operands[1])) || !TARGET_64BIT)) #define HAVE_fix_truncdi_memory (TARGET_80387 && FLOAT_MODE_P (GET_MODE (operands[1])) \ && (!SSE_FLOAT_MODE_P (GET_MODE (operands[1])) || !TARGET_64BIT)) #define HAVE_fix_truncsfdi_sse (TARGET_64BIT && TARGET_SSE) #define HAVE_fix_truncdfdi_sse (TARGET_64BIT && TARGET_SSE2) #define HAVE_fix_truncsi_nomemory (TARGET_80387 && FLOAT_MODE_P (GET_MODE (operands[1])) \ && !SSE_FLOAT_MODE_P (GET_MODE (operands[1]))) #define HAVE_fix_truncsi_memory (TARGET_80387 && FLOAT_MODE_P (GET_MODE (operands[1])) \ && !SSE_FLOAT_MODE_P (GET_MODE (operands[1]))) #define HAVE_fix_truncsfsi_sse (TARGET_SSE) #define HAVE_fix_truncdfsi_sse (TARGET_SSE2) #define HAVE_fix_trunchi_nomemory (TARGET_80387 && FLOAT_MODE_P (GET_MODE (operands[1])) \ && !SSE_FLOAT_MODE_P (GET_MODE (operands[1]))) #define HAVE_fix_trunchi_memory (TARGET_80387 && FLOAT_MODE_P (GET_MODE (operands[1])) \ && !SSE_FLOAT_MODE_P (GET_MODE (operands[1]))) #define HAVE_x86_fnstcw_1 (TARGET_80387) #define HAVE_x86_fldcw_1 (TARGET_80387) #define HAVE_floathixf2 (TARGET_80387) #define HAVE_floatsixf2 (TARGET_80387) #define HAVE_floatdixf2 (TARGET_80387) #define HAVE_adddi3_carry_rex64 (TARGET_64BIT && ix86_binary_operator_ok (PLUS, DImode, operands)) #define HAVE_addqi3_carry (ix86_binary_operator_ok (PLUS, QImode, operands)) #define HAVE_addhi3_carry (ix86_binary_operator_ok (PLUS, HImode, operands)) #define HAVE_addsi3_carry (ix86_binary_operator_ok (PLUS, SImode, operands)) #define HAVE_addqi3_cc (ix86_binary_operator_ok (PLUS, QImode, operands)) #define HAVE_addsi_1_zext (TARGET_64BIT && ix86_binary_operator_ok (PLUS, SImode, operands)) #define HAVE_addqi_ext_1 (!TARGET_64BIT) #define HAVE_subdi3_carry_rex64 (TARGET_64BIT && ix86_binary_operator_ok (MINUS, DImode, operands)) #define HAVE_subqi3_carry (ix86_binary_operator_ok (MINUS, QImode, operands)) #define HAVE_subhi3_carry (ix86_binary_operator_ok (MINUS, HImode, operands)) #define HAVE_subsi3_carry (ix86_binary_operator_ok (MINUS, SImode, operands)) #define HAVE_subsi3_carry_zext (TARGET_64BIT && ix86_binary_operator_ok (MINUS, SImode, operands)) #define HAVE_divqi3 (TARGET_QIMODE_MATH) #define HAVE_udivqi3 (TARGET_QIMODE_MATH) #define HAVE_divmodhi4 (TARGET_HIMODE_MATH) #define HAVE_udivmoddi4 (TARGET_64BIT) #define HAVE_udivmodsi4 1 #define HAVE_testsi_1 (ix86_match_ccmode (insn, CCNOmode) \ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)) #define HAVE_andqi_ext_0 1 #define HAVE_iorqi_ext_0 ((!TARGET_PARTIAL_REG_STALL || optimize_size)) #define HAVE_xorqi_ext_0 ((!TARGET_PARTIAL_REG_STALL || optimize_size)) #define HAVE_negsf2_memory (ix86_unary_operator_ok (NEG, SFmode, operands)) #define HAVE_negsf2_ifs (TARGET_SSE \ && (reload_in_progress || reload_completed \ || (register_operand (operands[0], VOIDmode) \ && register_operand (operands[1], VOIDmode)))) #define HAVE_negdf2_memory (ix86_unary_operator_ok (NEG, DFmode, operands)) #define HAVE_negdf2_ifs (!TARGET_64BIT && TARGET_SSE2 \ && (reload_in_progress || reload_completed \ || (register_operand (operands[0], VOIDmode) \ && register_operand (operands[1], VOIDmode)))) #define HAVE_abssf2_memory (ix86_unary_operator_ok (ABS, SFmode, operands)) #define HAVE_abssf2_ifs (TARGET_SSE \ && (reload_in_progress || reload_completed \ || (register_operand (operands[0], VOIDmode) \ && register_operand (operands[1], VOIDmode)))) #define HAVE_absdf2_memory (ix86_unary_operator_ok (ABS, DFmode, operands)) #define HAVE_absdf2_ifs (!TARGET_64BIT && TARGET_SSE2 \ && (reload_in_progress || reload_completed \ || (register_operand (operands[0], VOIDmode) \ && register_operand (operands[1], VOIDmode)))) #define HAVE_ashldi3_1 (!TARGET_64BIT && TARGET_CMOVE) #define HAVE_x86_shld_1 1 #define HAVE_ashrdi3_63_rex64 (TARGET_64BIT && INTVAL (operands[2]) == 63 && (TARGET_USE_CLTD || optimize_size) \ && ix86_binary_operator_ok (ASHIFTRT, DImode, operands)) #define HAVE_ashrdi3_1 (!TARGET_64BIT && TARGET_CMOVE) #define HAVE_x86_shrd_1 1 #define HAVE_ashrsi3_31 (INTVAL (operands[2]) == 31 && (TARGET_USE_CLTD || optimize_size) \ && ix86_binary_operator_ok (ASHIFTRT, SImode, operands)) #define HAVE_lshrdi3_1 (!TARGET_64BIT && TARGET_CMOVE) #define HAVE_setcc_2 1 #define HAVE_jump 1 #define HAVE_doloop_end_internal (!TARGET_64BIT && TARGET_USE_LOOP \ && (reload_in_progress || reload_completed \ || register_operand (operands[2], VOIDmode))) #define HAVE_blockage 1 #define HAVE_return_internal (reload_completed) #define HAVE_return_internal_long (reload_completed) #define HAVE_return_pop_internal (reload_completed) #define HAVE_return_indirect_internal (reload_completed) #define HAVE_nop 1 #define HAVE_align 1 #define HAVE_set_got (!TARGET_64BIT) #define HAVE_eh_return_si (!TARGET_64BIT) #define HAVE_eh_return_di (TARGET_64BIT) #define HAVE_leave (!TARGET_64BIT) #define HAVE_leave_rex64 (TARGET_64BIT) #define HAVE_ctzsi2 1 #define HAVE_ctzdi2 (TARGET_64BIT) #define HAVE_sqrtsf2_1 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && (TARGET_SSE_MATH && TARGET_MIX_SSE_I387)) #define HAVE_sqrtsf2_1_sse_only (TARGET_SSE_MATH && (!TARGET_80387 || !TARGET_MIX_SSE_I387)) #define HAVE_sqrtsf2_i387 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && !TARGET_SSE_MATH) #define HAVE_sqrtdf2_1 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && (TARGET_SSE2 && TARGET_SSE_MATH && TARGET_MIX_SSE_I387)) #define HAVE_sqrtdf2_1_sse_only (TARGET_SSE2 && TARGET_SSE_MATH && (!TARGET_80387 || !TARGET_MIX_SSE_I387)) #define HAVE_sqrtdf2_i387 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && (!TARGET_SSE2 || !TARGET_SSE_MATH)) #define HAVE_sqrtxf2 (TARGET_80387 && !TARGET_NO_FANCY_MATH_387 \ && (TARGET_IEEE_FP || flag_unsafe_math_optimizations) ) #define HAVE_fpremxf4 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_fprem1xf4 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_sincosdf3 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_sincossf3 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_sincosxf3 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_atan2df3_1 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_atan2sf3_1 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_atan2xf3_1 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_fyl2x_xf3 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_fyl2xp1_xf3 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_cld 1 #define HAVE_x86_movdicc_0_m1_rex64 (TARGET_64BIT) #define HAVE_movdicc_c_rex64 (TARGET_64BIT && TARGET_CMOVE \ && (GET_CODE (operands[2]) != MEM || GET_CODE (operands[3]) != MEM)) #define HAVE_x86_movsicc_0_m1 1 #define HAVE_pro_epilogue_adjust_stack_1 (!TARGET_64BIT) #define HAVE_pro_epilogue_adjust_stack_rex64 (TARGET_64BIT) #define HAVE_pro_epilogue_adjust_stack_rex64_2 (TARGET_64BIT) #define HAVE_sse_movsfcc (TARGET_SSE \ && (GET_CODE (operands[2]) != MEM || GET_CODE (operands[3]) != MEM) \ /* Avoid combine from being smart and converting min/max \ instruction patterns into conditional moves. */ \ && ((GET_CODE (operands[1]) != LT && GET_CODE (operands[1]) != GT \ && GET_CODE (operands[1]) != UNLE && GET_CODE (operands[1]) != UNGE) \ || !rtx_equal_p (operands[4], operands[2]) \ || !rtx_equal_p (operands[5], operands[3])) \ && (!TARGET_IEEE_FP \ || (GET_CODE (operands[1]) != EQ && GET_CODE (operands[1]) != NE))) #define HAVE_sse_movsfcc_eq (TARGET_SSE \ && (GET_CODE (operands[2]) != MEM || GET_CODE (operands[3]) != MEM)) #define HAVE_sse_movdfcc (TARGET_SSE2 \ && (GET_CODE (operands[2]) != MEM || GET_CODE (operands[3]) != MEM) \ /* Avoid combine from being smart and converting min/max \ instruction patterns into conditional moves. */ \ && ((GET_CODE (operands[1]) != LT && GET_CODE (operands[1]) != GT \ && GET_CODE (operands[1]) != UNLE && GET_CODE (operands[1]) != UNGE) \ || !rtx_equal_p (operands[4], operands[2]) \ || !rtx_equal_p (operands[5], operands[3])) \ && (!TARGET_IEEE_FP \ || (GET_CODE (operands[1]) != EQ && GET_CODE (operands[1]) != NE))) #define HAVE_sse_movdfcc_eq (TARGET_SSE \ && (GET_CODE (operands[2]) != MEM || GET_CODE (operands[3]) != MEM)) #define HAVE_allocate_stack_worker_1 (!TARGET_64BIT && TARGET_STACK_PROBE) #define HAVE_allocate_stack_worker_rex64 (TARGET_64BIT && TARGET_STACK_PROBE) #define HAVE_trap 1 #define HAVE_movv4sf_internal (TARGET_SSE) #define HAVE_movv4si_internal (TARGET_SSE) #define HAVE_movv2di_internal (TARGET_SSE) #define HAVE_movv8qi_internal (TARGET_MMX \ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)) #define HAVE_movv4hi_internal (TARGET_MMX \ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)) #define HAVE_movv2si_internal (TARGET_MMX \ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)) #define HAVE_movv2sf_internal (TARGET_3DNOW \ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)) #define HAVE_movv2df_internal (TARGET_SSE2 \ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)) #define HAVE_movv8hi_internal (TARGET_SSE2 \ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)) #define HAVE_movv16qi_internal (TARGET_SSE2 \ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)) #define HAVE_movti_internal (TARGET_SSE && !TARGET_64BIT \ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)) #define HAVE_sse_movmskps (TARGET_SSE) #define HAVE_mmx_pmovmskb (TARGET_SSE || TARGET_3DNOW_A) #define HAVE_mmx_maskmovq ((TARGET_SSE || TARGET_3DNOW_A) && !TARGET_64BIT) #define HAVE_mmx_maskmovq_rex ((TARGET_SSE || TARGET_3DNOW_A) && TARGET_64BIT) #define HAVE_sse_movntv4sf (TARGET_SSE) #define HAVE_sse_movntdi (TARGET_SSE || TARGET_3DNOW_A) #define HAVE_sse_movhlps (TARGET_SSE) #define HAVE_sse_movlhps (TARGET_SSE) #define HAVE_sse_movhps (TARGET_SSE \ && (GET_CODE (operands[1]) == MEM || GET_CODE (operands[2]) == MEM)) #define HAVE_sse_movlps (TARGET_SSE \ && (GET_CODE (operands[1]) == MEM || GET_CODE (operands[2]) == MEM)) #define HAVE_sse_loadss_1 (TARGET_SSE) #define HAVE_sse_movss (TARGET_SSE) #define HAVE_sse_storess (TARGET_SSE) #define HAVE_sse_shufps (TARGET_SSE) #define HAVE_addv4sf3 (TARGET_SSE) #define HAVE_vmaddv4sf3 (TARGET_SSE) #define HAVE_subv4sf3 (TARGET_SSE) #define HAVE_vmsubv4sf3 (TARGET_SSE) #define HAVE_mulv4sf3 (TARGET_SSE) #define HAVE_vmmulv4sf3 (TARGET_SSE) #define HAVE_divv4sf3 (TARGET_SSE) #define HAVE_vmdivv4sf3 (TARGET_SSE) #define HAVE_rcpv4sf2 (TARGET_SSE) #define HAVE_vmrcpv4sf2 (TARGET_SSE) #define HAVE_rsqrtv4sf2 (TARGET_SSE) #define HAVE_vmrsqrtv4sf2 (TARGET_SSE) #define HAVE_sqrtv4sf2 (TARGET_SSE) #define HAVE_vmsqrtv4sf2 (TARGET_SSE) #define HAVE_sse2_andv2di3 (TARGET_SSE2 \ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) #define HAVE_sse2_nandv2di3 (TARGET_SSE2 \ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) #define HAVE_sse2_iorv2di3 (TARGET_SSE2 \ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) #define HAVE_sse2_xorv2di3 (TARGET_SSE2 \ && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) #define HAVE_sse_clrv4sf (TARGET_SSE) #define HAVE_sse_clrv2df (TARGET_SSE2) #define HAVE_maskcmpv4sf3 (TARGET_SSE) #define HAVE_maskncmpv4sf3 (TARGET_SSE) #define HAVE_vmmaskcmpv4sf3 (TARGET_SSE) #define HAVE_vmmaskncmpv4sf3 (TARGET_SSE) #define HAVE_sse_comi (TARGET_SSE) #define HAVE_sse_ucomi (TARGET_SSE) #define HAVE_sse_unpckhps (TARGET_SSE) #define HAVE_sse_unpcklps (TARGET_SSE) #define HAVE_smaxv4sf3 (TARGET_SSE) #define HAVE_vmsmaxv4sf3 (TARGET_SSE) #define HAVE_sminv4sf3 (TARGET_SSE) #define HAVE_vmsminv4sf3 (TARGET_SSE) #define HAVE_cvtpi2ps (TARGET_SSE) #define HAVE_cvtps2pi (TARGET_SSE) #define HAVE_cvttps2pi (TARGET_SSE) #define HAVE_cvtsi2ss (TARGET_SSE) #define HAVE_cvtsi2ssq (TARGET_SSE && TARGET_64BIT) #define HAVE_cvtss2si (TARGET_SSE) #define HAVE_cvtss2siq (TARGET_SSE) #define HAVE_cvttss2si (TARGET_SSE) #define HAVE_cvttss2siq (TARGET_SSE && TARGET_64BIT) #define HAVE_addv8qi3 (TARGET_MMX) #define HAVE_addv4hi3 (TARGET_MMX) #define HAVE_addv2si3 (TARGET_MMX) #define HAVE_mmx_adddi3 (TARGET_MMX) #define HAVE_ssaddv8qi3 (TARGET_MMX) #define HAVE_ssaddv4hi3 (TARGET_MMX) #define HAVE_usaddv8qi3 (TARGET_MMX) #define HAVE_usaddv4hi3 (TARGET_MMX) #define HAVE_subv8qi3 (TARGET_MMX) #define HAVE_subv4hi3 (TARGET_MMX) #define HAVE_subv2si3 (TARGET_MMX) #define HAVE_mmx_subdi3 (TARGET_MMX) #define HAVE_sssubv8qi3 (TARGET_MMX) #define HAVE_sssubv4hi3 (TARGET_MMX) #define HAVE_ussubv8qi3 (TARGET_MMX) #define HAVE_ussubv4hi3 (TARGET_MMX) #define HAVE_mulv4hi3 (TARGET_MMX) #define HAVE_smulv4hi3_highpart (TARGET_MMX) #define HAVE_umulv4hi3_highpart (TARGET_SSE || TARGET_3DNOW_A) #define HAVE_mmx_pmaddwd (TARGET_MMX) #define HAVE_mmx_iordi3 (TARGET_MMX) #define HAVE_mmx_xordi3 (TARGET_MMX) #define HAVE_mmx_clrdi (TARGET_MMX) #define HAVE_mmx_anddi3 (TARGET_MMX) #define HAVE_mmx_nanddi3 (TARGET_MMX) #define HAVE_mmx_uavgv8qi3 (TARGET_SSE || TARGET_3DNOW_A) #define HAVE_mmx_uavgv4hi3 (TARGET_SSE || TARGET_3DNOW_A) #define HAVE_mmx_psadbw (TARGET_SSE || TARGET_3DNOW_A) #define HAVE_mmx_pinsrw (TARGET_SSE || TARGET_3DNOW_A) #define HAVE_mmx_pextrw (TARGET_SSE || TARGET_3DNOW_A) #define HAVE_mmx_pshufw (TARGET_SSE || TARGET_3DNOW_A) #define HAVE_eqv8qi3 (TARGET_MMX) #define HAVE_eqv4hi3 (TARGET_MMX) #define HAVE_eqv2si3 (TARGET_MMX) #define HAVE_gtv8qi3 (TARGET_MMX) #define HAVE_gtv4hi3 (TARGET_MMX) #define HAVE_gtv2si3 (TARGET_MMX) #define HAVE_umaxv8qi3 (TARGET_SSE || TARGET_3DNOW_A) #define HAVE_smaxv4hi3 (TARGET_SSE || TARGET_3DNOW_A) #define HAVE_uminv8qi3 (TARGET_SSE || TARGET_3DNOW_A) #define HAVE_sminv4hi3 (TARGET_SSE || TARGET_3DNOW_A) #define HAVE_ashrv4hi3 (TARGET_MMX) #define HAVE_ashrv2si3 (TARGET_MMX) #define HAVE_lshrv4hi3 (TARGET_MMX) #define HAVE_lshrv2si3 (TARGET_MMX) #define HAVE_mmx_lshrdi3 (TARGET_MMX) #define HAVE_ashlv4hi3 (TARGET_MMX) #define HAVE_ashlv2si3 (TARGET_MMX) #define HAVE_mmx_ashldi3 (TARGET_MMX) #define HAVE_mmx_packsswb (TARGET_MMX) #define HAVE_mmx_packssdw (TARGET_MMX) #define HAVE_mmx_packuswb (TARGET_MMX) #define HAVE_mmx_punpckhbw (TARGET_MMX) #define HAVE_mmx_punpckhwd (TARGET_MMX) #define HAVE_mmx_punpckhdq (TARGET_MMX) #define HAVE_mmx_punpcklbw (TARGET_MMX) #define HAVE_mmx_punpcklwd (TARGET_MMX) #define HAVE_mmx_punpckldq (TARGET_MMX) #define HAVE_emms (TARGET_MMX) #define HAVE_ldmxcsr (TARGET_SSE) #define HAVE_stmxcsr (TARGET_SSE) #define HAVE_addv2sf3 (TARGET_3DNOW) #define HAVE_subv2sf3 (TARGET_3DNOW) #define HAVE_subrv2sf3 (TARGET_3DNOW) #define HAVE_gtv2sf3 (TARGET_3DNOW) #define HAVE_gev2sf3 (TARGET_3DNOW) #define HAVE_eqv2sf3 (TARGET_3DNOW) #define HAVE_pfmaxv2sf3 (TARGET_3DNOW) #define HAVE_pfminv2sf3 (TARGET_3DNOW) #define HAVE_mulv2sf3 (TARGET_3DNOW) #define HAVE_femms (TARGET_3DNOW) #define HAVE_pf2id (TARGET_3DNOW) #define HAVE_pf2iw (TARGET_3DNOW_A) #define HAVE_pfacc (TARGET_3DNOW) #define HAVE_pfnacc (TARGET_3DNOW_A) #define HAVE_pfpnacc (TARGET_3DNOW_A) #define HAVE_pi2fw (TARGET_3DNOW_A) #define HAVE_floatv2si2 (TARGET_3DNOW) #define HAVE_pavgusb (TARGET_3DNOW) #define HAVE_pfrcpv2sf2 (TARGET_3DNOW) #define HAVE_pfrcpit1v2sf3 (TARGET_3DNOW) #define HAVE_pfrcpit2v2sf3 (TARGET_3DNOW) #define HAVE_pfrsqrtv2sf2 (TARGET_3DNOW) #define HAVE_pfrsqit1v2sf3 (TARGET_3DNOW) #define HAVE_pmulhrwv4hi3 (TARGET_3DNOW) #define HAVE_pswapdv2si2 (TARGET_3DNOW_A) #define HAVE_pswapdv2sf2 (TARGET_3DNOW_A) #define HAVE_addv2df3 (TARGET_SSE2) #define HAVE_vmaddv2df3 (TARGET_SSE2) #define HAVE_subv2df3 (TARGET_SSE2) #define HAVE_vmsubv2df3 (TARGET_SSE2) #define HAVE_mulv2df3 (TARGET_SSE2) #define HAVE_vmmulv2df3 (TARGET_SSE2) #define HAVE_divv2df3 (TARGET_SSE2) #define HAVE_vmdivv2df3 (TARGET_SSE2) #define HAVE_smaxv2df3 (TARGET_SSE2) #define HAVE_vmsmaxv2df3 (TARGET_SSE2) #define HAVE_sminv2df3 (TARGET_SSE2) #define HAVE_vmsminv2df3 (TARGET_SSE2) #define HAVE_sqrtv2df2 (TARGET_SSE2) #define HAVE_vmsqrtv2df2 (TARGET_SSE2) #define HAVE_maskcmpv2df3 (TARGET_SSE2) #define HAVE_maskncmpv2df3 (TARGET_SSE2) #define HAVE_vmmaskcmpv2df3 (TARGET_SSE2) #define HAVE_vmmaskncmpv2df3 (TARGET_SSE2) #define HAVE_sse2_comi (TARGET_SSE2) #define HAVE_sse2_ucomi (TARGET_SSE2) #define HAVE_sse2_movmskpd (TARGET_SSE2) #define HAVE_sse2_pmovmskb (TARGET_SSE2) #define HAVE_sse2_maskmovdqu (TARGET_SSE2) #define HAVE_sse2_maskmovdqu_rex64 (TARGET_SSE2) #define HAVE_sse2_movntv2df (TARGET_SSE2) #define HAVE_sse2_movntv2di (TARGET_SSE2) #define HAVE_sse2_movntsi (TARGET_SSE2) #define HAVE_cvtdq2ps (TARGET_SSE2) #define HAVE_cvtps2dq (TARGET_SSE2) #define HAVE_cvttps2dq (TARGET_SSE2) #define HAVE_cvtdq2pd (TARGET_SSE2) #define HAVE_cvtpd2dq (TARGET_SSE2) #define HAVE_cvttpd2dq (TARGET_SSE2) #define HAVE_cvtpd2pi (TARGET_SSE2) #define HAVE_cvttpd2pi (TARGET_SSE2) #define HAVE_cvtpi2pd (TARGET_SSE2) #define HAVE_cvtsd2si (TARGET_SSE2) #define HAVE_cvtsd2siq (TARGET_SSE2 && TARGET_64BIT) #define HAVE_cvttsd2si (TARGET_SSE2) #define HAVE_cvttsd2siq (TARGET_SSE2 && TARGET_64BIT) #define HAVE_cvtsi2sd (TARGET_SSE2) #define HAVE_cvtsi2sdq (TARGET_SSE2 && TARGET_64BIT) #define HAVE_cvtsd2ss (TARGET_SSE2) #define HAVE_cvtss2sd (TARGET_SSE2) #define HAVE_cvtpd2ps (TARGET_SSE2) #define HAVE_cvtps2pd (TARGET_SSE2) #define HAVE_addv16qi3 (TARGET_SSE2) #define HAVE_addv8hi3 (TARGET_SSE2) #define HAVE_addv4si3 (TARGET_SSE2) #define HAVE_addv2di3 (TARGET_SSE2) #define HAVE_ssaddv16qi3 (TARGET_SSE2) #define HAVE_ssaddv8hi3 (TARGET_SSE2) #define HAVE_usaddv16qi3 (TARGET_SSE2) #define HAVE_usaddv8hi3 (TARGET_SSE2) #define HAVE_subv16qi3 (TARGET_SSE2) #define HAVE_subv8hi3 (TARGET_SSE2) #define HAVE_subv4si3 (TARGET_SSE2) #define HAVE_subv2di3 (TARGET_SSE2) #define HAVE_sssubv16qi3 (TARGET_SSE2) #define HAVE_sssubv8hi3 (TARGET_SSE2) #define HAVE_ussubv16qi3 (TARGET_SSE2) #define HAVE_ussubv8hi3 (TARGET_SSE2) #define HAVE_mulv8hi3 (TARGET_SSE2) #define HAVE_smulv8hi3_highpart (TARGET_SSE2) #define HAVE_umulv8hi3_highpart (TARGET_SSE2) #define HAVE_sse2_umulsidi3 (TARGET_SSE2) #define HAVE_sse2_umulv2siv2di3 (TARGET_SSE2) #define HAVE_sse2_pmaddwd (TARGET_SSE2) #define HAVE_sse2_clrti (TARGET_SSE2) #define HAVE_sse2_uavgv16qi3 (TARGET_SSE2) #define HAVE_sse2_uavgv8hi3 (TARGET_SSE2) #define HAVE_sse2_psadbw (TARGET_SSE2) #define HAVE_sse2_pinsrw (TARGET_SSE2) #define HAVE_sse2_pextrw (TARGET_SSE2) #define HAVE_sse2_pshufd (TARGET_SSE2) #define HAVE_sse2_pshuflw (TARGET_SSE2) #define HAVE_sse2_pshufhw (TARGET_SSE2) #define HAVE_eqv16qi3 (TARGET_SSE2) #define HAVE_eqv8hi3 (TARGET_SSE2) #define HAVE_eqv4si3 (TARGET_SSE2) #define HAVE_gtv16qi3 (TARGET_SSE2) #define HAVE_gtv8hi3 (TARGET_SSE2) #define HAVE_gtv4si3 (TARGET_SSE2) #define HAVE_umaxv16qi3 (TARGET_SSE2) #define HAVE_smaxv8hi3 (TARGET_SSE2) #define HAVE_uminv16qi3 (TARGET_SSE2) #define HAVE_sminv8hi3 (TARGET_SSE2) #define HAVE_ashrv8hi3 (TARGET_SSE2) #define HAVE_ashrv4si3 (TARGET_SSE2) #define HAVE_lshrv8hi3 (TARGET_SSE2) #define HAVE_lshrv4si3 (TARGET_SSE2) #define HAVE_lshrv2di3 (TARGET_SSE2) #define HAVE_ashlv8hi3 (TARGET_SSE2) #define HAVE_ashlv4si3 (TARGET_SSE2) #define HAVE_ashlv2di3 (TARGET_SSE2) #define HAVE_ashrv8hi3_ti (TARGET_SSE2) #define HAVE_ashrv4si3_ti (TARGET_SSE2) #define HAVE_lshrv8hi3_ti (TARGET_SSE2) #define HAVE_lshrv4si3_ti (TARGET_SSE2) #define HAVE_lshrv2di3_ti (TARGET_SSE2) #define HAVE_ashlv8hi3_ti (TARGET_SSE2) #define HAVE_ashlv4si3_ti (TARGET_SSE2) #define HAVE_ashlv2di3_ti (TARGET_SSE2) #define HAVE_sse2_ashlti3 (TARGET_SSE2) #define HAVE_sse2_lshrti3 (TARGET_SSE2) #define HAVE_sse2_unpckhpd (TARGET_SSE2) #define HAVE_sse2_unpcklpd (TARGET_SSE2) #define HAVE_sse2_packsswb (TARGET_SSE2) #define HAVE_sse2_packssdw (TARGET_SSE2) #define HAVE_sse2_packuswb (TARGET_SSE2) #define HAVE_sse2_punpckhbw (TARGET_SSE2) #define HAVE_sse2_punpckhwd (TARGET_SSE2) #define HAVE_sse2_punpckhdq (TARGET_SSE2) #define HAVE_sse2_punpcklbw (TARGET_SSE2) #define HAVE_sse2_punpcklwd (TARGET_SSE2) #define HAVE_sse2_punpckldq (TARGET_SSE2) #define HAVE_sse2_punpcklqdq (TARGET_SSE2) #define HAVE_sse2_punpckhqdq (TARGET_SSE2) #define HAVE_sse2_movapd (TARGET_SSE2 \ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)) #define HAVE_sse2_movupd (TARGET_SSE2 \ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)) #define HAVE_sse2_movdqa (TARGET_SSE2 \ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)) #define HAVE_sse2_movdqu (TARGET_SSE2 \ && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)) #define HAVE_sse2_movdq2q (TARGET_SSE2 && !TARGET_64BIT) #define HAVE_sse2_movdq2q_rex64 (TARGET_SSE2 && TARGET_64BIT) #define HAVE_sse2_movq2dq (TARGET_SSE2 && !TARGET_64BIT) #define HAVE_sse2_movq2dq_rex64 (TARGET_SSE2 && TARGET_64BIT) #define HAVE_sse2_movq (TARGET_SSE2) #define HAVE_sse2_loadd (TARGET_SSE2) #define HAVE_sse2_stored (TARGET_SSE2) #define HAVE_sse2_movhpd (TARGET_SSE2 && (GET_CODE (operands[1]) == MEM || GET_CODE (operands[2]) == MEM)) #define HAVE_sse2_loadsd_1 (TARGET_SSE2) #define HAVE_sse2_movsd (TARGET_SSE2 && ix86_binary_operator_ok (UNKNOWN, V2DFmode, operands)) #define HAVE_sse2_storesd (TARGET_SSE2) #define HAVE_sse2_shufpd (TARGET_SSE2) #define HAVE_sse2_clflush (TARGET_SSE2) #define HAVE_mwait (TARGET_SSE3) #define HAVE_monitor (TARGET_SSE3) #define HAVE_addsubv4sf3 (TARGET_SSE3) #define HAVE_addsubv2df3 (TARGET_SSE3) #define HAVE_haddv4sf3 (TARGET_SSE3) #define HAVE_haddv2df3 (TARGET_SSE3) #define HAVE_hsubv4sf3 (TARGET_SSE3) #define HAVE_hsubv2df3 (TARGET_SSE3) #define HAVE_movshdup (TARGET_SSE3) #define HAVE_movsldup (TARGET_SSE3) #define HAVE_lddqu (TARGET_SSE3) #define HAVE_loadddup (TARGET_SSE3) #define HAVE_movddup (TARGET_SSE3) #define HAVE_cmpdi 1 #define HAVE_cmpsi 1 #define HAVE_cmphi 1 #define HAVE_cmpqi (TARGET_QIMODE_MATH) #define HAVE_cmpdi_1_rex64 (TARGET_64BIT) #define HAVE_cmpsi_1 1 #define HAVE_cmpqi_ext_3 1 #define HAVE_cmpxf (TARGET_80387) #define HAVE_cmpdf (TARGET_80387 || TARGET_SSE2) #define HAVE_cmpsf (TARGET_80387 || TARGET_SSE) #define HAVE_movsi 1 #define HAVE_movhi 1 #define HAVE_movstricthi (! TARGET_PARTIAL_REG_STALL || optimize_size) #define HAVE_movqi 1 #define HAVE_reload_outqi 1 #define HAVE_movstrictqi (! TARGET_PARTIAL_REG_STALL || optimize_size) #define HAVE_movdi 1 #define HAVE_movsf 1 #define HAVE_movdf 1 #define HAVE_movxf 1 #define HAVE_zero_extendhisi2 1 #define HAVE_zero_extendqihi2 1 #define HAVE_zero_extendqisi2 1 #define HAVE_zero_extendsidi2 1 #define HAVE_extendsidi2 1 #define HAVE_extendsfdf2 (TARGET_80387 || TARGET_SSE2) #define HAVE_extendsfxf2 (TARGET_80387) #define HAVE_extenddfxf2 (TARGET_80387) #define HAVE_truncdfsf2 (TARGET_80387 || TARGET_SSE2) #define HAVE_truncxfsf2 (TARGET_80387) #define HAVE_truncxfdf2 (TARGET_80387) #define HAVE_fix_truncxfdi2 (TARGET_80387) #define HAVE_fix_truncdfdi2 (TARGET_80387 || (TARGET_SSE2 && TARGET_64BIT)) #define HAVE_fix_truncsfdi2 (TARGET_80387 || (TARGET_SSE && TARGET_64BIT)) #define HAVE_fix_truncxfsi2 (TARGET_80387) #define HAVE_fix_truncdfsi2 (TARGET_80387 || TARGET_SSE2) #define HAVE_fix_truncsfsi2 (TARGET_80387 || TARGET_SSE) #define HAVE_fix_truncxfhi2 (TARGET_80387) #define HAVE_fix_truncdfhi2 (TARGET_80387 && !TARGET_SSE2) #define HAVE_fix_truncsfhi2 (TARGET_80387 && !TARGET_SSE) #define HAVE_floathisf2 (TARGET_SSE || TARGET_80387) #define HAVE_floatsisf2 (TARGET_SSE || TARGET_80387) #define HAVE_floatdisf2 ((TARGET_64BIT && TARGET_SSE) || TARGET_80387) #define HAVE_floathidf2 (TARGET_SSE2 || TARGET_80387) #define HAVE_floatsidf2 (TARGET_80387 || TARGET_SSE2) #define HAVE_floatdidf2 ((TARGET_64BIT && TARGET_SSE2) || TARGET_80387) #define HAVE_floatunssisf2 (TARGET_SSE && TARGET_SSE_MATH && !TARGET_64BIT) #define HAVE_floatunsdisf2 (TARGET_SSE && TARGET_SSE_MATH && TARGET_64BIT) #define HAVE_floatunsdidf2 (TARGET_SSE2 && TARGET_SSE_MATH && TARGET_64BIT) #define HAVE_vec_setv2df (TARGET_SSE2) #define HAVE_vec_extractv2df (TARGET_SSE2) #define HAVE_vec_initv2df (TARGET_SSE2) #define HAVE_vec_setv4sf (TARGET_SSE) #define HAVE_vec_extractv4sf (TARGET_SSE) #define HAVE_vec_initv4sf (TARGET_SSE) #define HAVE_adddi3 1 #define HAVE_addsi3 1 #define HAVE_addhi3 (TARGET_HIMODE_MATH) #define HAVE_addqi3 (TARGET_QIMODE_MATH) #define HAVE_addxf3 (TARGET_80387) #define HAVE_adddf3 (TARGET_80387 || (TARGET_SSE2 && TARGET_SSE_MATH)) #define HAVE_addsf3 (TARGET_80387 || TARGET_SSE_MATH) #define HAVE_subdi3 1 #define HAVE_subsi3 1 #define HAVE_subhi3 (TARGET_HIMODE_MATH) #define HAVE_subqi3 (TARGET_QIMODE_MATH) #define HAVE_subxf3 (TARGET_80387) #define HAVE_subdf3 (TARGET_80387 || (TARGET_SSE2 && TARGET_SSE_MATH)) #define HAVE_subsf3 (TARGET_80387 || TARGET_SSE_MATH) #define HAVE_muldi3 (TARGET_64BIT) #define HAVE_mulsi3 1 #define HAVE_mulhi3 (TARGET_HIMODE_MATH) #define HAVE_mulqi3 (TARGET_QIMODE_MATH) #define HAVE_umulqihi3 (TARGET_QIMODE_MATH) #define HAVE_mulqihi3 (TARGET_QIMODE_MATH) #define HAVE_umulditi3 (TARGET_64BIT) #define HAVE_umulsidi3 (!TARGET_64BIT) #define HAVE_mulditi3 (TARGET_64BIT) #define HAVE_mulsidi3 (!TARGET_64BIT) #define HAVE_umuldi3_highpart (TARGET_64BIT) #define HAVE_umulsi3_highpart 1 #define HAVE_smuldi3_highpart (TARGET_64BIT) #define HAVE_smulsi3_highpart 1 #define HAVE_mulxf3 (TARGET_80387) #define HAVE_muldf3 (TARGET_80387 || (TARGET_SSE2 && TARGET_SSE_MATH)) #define HAVE_mulsf3 (TARGET_80387 || TARGET_SSE_MATH) #define HAVE_divxf3 (TARGET_80387) #define HAVE_divdf3 (TARGET_80387 || (TARGET_SSE2 && TARGET_SSE_MATH)) #define HAVE_divsf3 (TARGET_80387 || TARGET_SSE_MATH) #define HAVE_divmoddi4 (TARGET_64BIT) #define HAVE_divmodsi4 1 #define HAVE_udivmodhi4 (TARGET_HIMODE_MATH) #define HAVE_testsi_ccno_1 1 #define HAVE_testqi_ccz_1 1 #define HAVE_testqi_ext_ccno_0 1 #define HAVE_anddi3 (TARGET_64BIT) #define HAVE_andsi3 1 #define HAVE_andhi3 (TARGET_HIMODE_MATH) #define HAVE_andqi3 (TARGET_QIMODE_MATH) #define HAVE_iordi3 (TARGET_64BIT) #define HAVE_iorsi3 1 #define HAVE_iorhi3 (TARGET_HIMODE_MATH) #define HAVE_iorqi3 (TARGET_QIMODE_MATH) #define HAVE_xordi3 (TARGET_64BIT) #define HAVE_xorsi3 1 #define HAVE_xorhi3 (TARGET_HIMODE_MATH) #define HAVE_xorqi3 (TARGET_QIMODE_MATH) #define HAVE_xorqi_cc_ext_1 1 #define HAVE_negdi2 1 #define HAVE_negsi2 1 #define HAVE_neghi2 (TARGET_HIMODE_MATH) #define HAVE_negqi2 (TARGET_QIMODE_MATH) #define HAVE_negsf2 (TARGET_80387) #define HAVE_negdf2 (TARGET_80387) #define HAVE_negxf2 (TARGET_80387) #define HAVE_abssf2 (TARGET_80387) #define HAVE_absdf2 (TARGET_80387) #define HAVE_absxf2 (TARGET_80387) #define HAVE_one_cmpldi2 (TARGET_64BIT) #define HAVE_one_cmplsi2 1 #define HAVE_one_cmplhi2 (TARGET_HIMODE_MATH) #define HAVE_one_cmplqi2 (TARGET_QIMODE_MATH) #define HAVE_ashldi3 1 #define HAVE_x86_shift_adj_1 (TARGET_CMOVE) #define HAVE_x86_shift_adj_2 1 #define HAVE_ashlsi3 1 #define HAVE_ashlhi3 (TARGET_HIMODE_MATH) #define HAVE_ashlqi3 (TARGET_QIMODE_MATH) #define HAVE_ashrdi3 1 #define HAVE_x86_shift_adj_3 1 #define HAVE_ashrsi3 1 #define HAVE_ashrhi3 (TARGET_HIMODE_MATH) #define HAVE_ashrqi3 (TARGET_QIMODE_MATH) #define HAVE_lshrdi3 1 #define HAVE_lshrsi3 1 #define HAVE_lshrhi3 (TARGET_HIMODE_MATH) #define HAVE_lshrqi3 (TARGET_QIMODE_MATH) #define HAVE_rotldi3 (TARGET_64BIT) #define HAVE_rotlsi3 1 #define HAVE_rotlhi3 (TARGET_HIMODE_MATH) #define HAVE_rotlqi3 (TARGET_QIMODE_MATH) #define HAVE_rotrdi3 (TARGET_64BIT) #define HAVE_rotrsi3 1 #define HAVE_rotrhi3 (TARGET_HIMODE_MATH) #define HAVE_rotrqi3 (TARGET_QIMODE_MATH) #define HAVE_extv 1 #define HAVE_extzv 1 #define HAVE_insv 1 #define HAVE_seq 1 #define HAVE_sne 1 #define HAVE_sgt 1 #define HAVE_sgtu 1 #define HAVE_slt 1 #define HAVE_sltu 1 #define HAVE_sge 1 #define HAVE_sgeu 1 #define HAVE_sle 1 #define HAVE_sleu 1 #define HAVE_sunordered (TARGET_80387 || TARGET_SSE) #define HAVE_sordered (TARGET_80387) #define HAVE_suneq (TARGET_80387 || TARGET_SSE) #define HAVE_sunge (TARGET_80387 || TARGET_SSE) #define HAVE_sungt (TARGET_80387 || TARGET_SSE) #define HAVE_sunle (TARGET_80387 || TARGET_SSE) #define HAVE_sunlt (TARGET_80387 || TARGET_SSE) #define HAVE_sltgt (TARGET_80387 || TARGET_SSE) #define HAVE_beq 1 #define HAVE_bne 1 #define HAVE_bgt 1 #define HAVE_bgtu 1 #define HAVE_blt 1 #define HAVE_bltu 1 #define HAVE_bge 1 #define HAVE_bgeu 1 #define HAVE_ble 1 #define HAVE_bleu 1 #define HAVE_bunordered (TARGET_80387 || TARGET_SSE) #define HAVE_bordered (TARGET_80387 || TARGET_SSE) #define HAVE_buneq (TARGET_80387 || TARGET_SSE) #define HAVE_bunge (TARGET_80387 || TARGET_SSE) #define HAVE_bungt (TARGET_80387 || TARGET_SSE) #define HAVE_bunle (TARGET_80387 || TARGET_SSE) #define HAVE_bunlt (TARGET_80387 || TARGET_SSE) #define HAVE_bltgt (TARGET_80387 || TARGET_SSE) #define HAVE_indirect_jump 1 #define HAVE_tablejump 1 #define HAVE_doloop_end (!TARGET_64BIT && TARGET_USE_LOOP) #define HAVE_call_pop (!TARGET_64BIT) #define HAVE_call 1 #define HAVE_sibcall 1 #define HAVE_call_value_pop (!TARGET_64BIT) #define HAVE_call_value 1 #define HAVE_sibcall_value 1 #define HAVE_untyped_call 1 #define HAVE_return (ix86_can_use_return_insn_p ()) #define HAVE_prologue 1 #define HAVE_epilogue 1 #define HAVE_sibcall_epilogue 1 #define HAVE_eh_return 1 #define HAVE_ffssi2 1 #define HAVE_ffsdi2 (TARGET_64BIT && TARGET_CMOVE) #define HAVE_clzsi2 1 #define HAVE_clzdi2 (TARGET_64BIT) #define HAVE_tls_global_dynamic_32 1 #define HAVE_tls_global_dynamic_64 1 #define HAVE_tls_local_dynamic_base_32 1 #define HAVE_tls_local_dynamic_base_64 1 #define HAVE_sqrtsf2 ((! TARGET_NO_FANCY_MATH_387 && TARGET_80387) || TARGET_SSE_MATH) #define HAVE_sqrtdf2 ((! TARGET_NO_FANCY_MATH_387 && TARGET_80387) \ || (TARGET_SSE2 && TARGET_SSE_MATH)) #define HAVE_fmodsf3 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_fmoddf3 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_fmodxf3 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_dremsf3 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_dremdf3 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_dremxf3 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_tandf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_tansf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_tanxf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_atan2df3 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_atandf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_atan2sf3 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_atansf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_atan2xf3 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_atanxf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_asindf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_asinsf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_asinxf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_acosdf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_acossf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_acosxf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_logsf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_logdf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_logxf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_log10sf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_log10df2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_log10xf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_log2sf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_log2df2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_log2xf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_log1psf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_log1pdf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_log1pxf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_logbsf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_logbdf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_logbxf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_ilogbsi2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_expsf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_expdf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_expxf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_exp10sf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_exp10df2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_exp10xf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_exp2sf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_exp2df2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_exp2xf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_expm1df2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_expm1sf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_expm1xf2 (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 \ && flag_unsafe_math_optimizations) #define HAVE_movstrsi (! optimize_size) #define HAVE_movstrdi (TARGET_64BIT) #define HAVE_strmov 1 #define HAVE_strmov_singleop (TARGET_SINGLE_STRINGOP || optimize_size) #define HAVE_rep_mov 1 #define HAVE_clrstrsi 1 #define HAVE_clrstrdi (TARGET_64BIT) #define HAVE_strset 1 #define HAVE_strset_singleop (TARGET_SINGLE_STRINGOP || optimize_size) #define HAVE_rep_stos 1 #define HAVE_cmpstrsi (! optimize_size || TARGET_INLINE_ALL_STRINGOPS) #define HAVE_cmpintqi 1 #define HAVE_cmpstrqi_nz_1 1 #define HAVE_cmpstrqi_1 1 #define HAVE_strlensi 1 #define HAVE_strlendi 1 #define HAVE_strlenqi_1 1 #define HAVE_movdicc (TARGET_64BIT) #define HAVE_movsicc 1 #define HAVE_movhicc (TARGET_HIMODE_MATH) #define HAVE_movqicc (TARGET_QIMODE_MATH) #define HAVE_movsfcc (TARGET_CMOVE) #define HAVE_movdfcc (TARGET_CMOVE) #define HAVE_movxfcc (TARGET_CMOVE) #define HAVE_minsf3 (TARGET_SSE) #define HAVE_addqicc 1 #define HAVE_addhicc 1 #define HAVE_addsicc 1 #define HAVE_adddicc (TARGET_64BIT) #define HAVE_mindf3 (TARGET_SSE2 && TARGET_SSE_MATH) #define HAVE_maxsf3 (TARGET_SSE) #define HAVE_maxdf3 (TARGET_SSE2 && TARGET_SSE_MATH) #define HAVE_allocate_stack_worker (TARGET_STACK_PROBE) #define HAVE_allocate_stack_worker_postreload 1 #define HAVE_allocate_stack_worker_rex64_postreload 1 #define HAVE_allocate_stack (TARGET_STACK_PROBE) #define HAVE_builtin_setjmp_receiver (!TARGET_64BIT && flag_pic) #define HAVE_conditional_trap 1 #define HAVE_movti (TARGET_SSE || TARGET_64BIT) #define HAVE_movtf (TARGET_64BIT) #define HAVE_movv2df (TARGET_SSE2) #define HAVE_movv8hi (TARGET_SSE2) #define HAVE_movv16qi (TARGET_SSE2) #define HAVE_movv4sf (TARGET_SSE) #define HAVE_movv4si (TARGET_SSE) #define HAVE_movv2di (TARGET_SSE) #define HAVE_movv2si (TARGET_MMX) #define HAVE_movv4hi (TARGET_MMX) #define HAVE_movv8qi (TARGET_MMX) #define HAVE_movv2sf (TARGET_3DNOW) #define HAVE_sse_movaps (TARGET_SSE) #define HAVE_sse_movups (TARGET_SSE) #define HAVE_sse_loadss (TARGET_SSE) #define HAVE_negv4sf2 (TARGET_SSE) #define HAVE_sse_andv4sf3 (TARGET_SSE) #define HAVE_sse_nandv4sf3 (TARGET_SSE) #define HAVE_sse_iorv4sf3 (TARGET_SSE) #define HAVE_sse_xorv4sf3 (TARGET_SSE) #define HAVE_sse2_andv2df3 (TARGET_SSE2) #define HAVE_sse2_nandv2df3 (TARGET_SSE2) #define HAVE_sse2_iorv2df3 (TARGET_SSE2) #define HAVE_sse2_xorv2df3 (TARGET_SSE2) #define HAVE_sfence (TARGET_SSE || TARGET_3DNOW_A) #define HAVE_sse_prologue_save (TARGET_64BIT) #define HAVE_prefetch (TARGET_PREFETCH_SSE || TARGET_3DNOW) #define HAVE_sse2_loadsd (TARGET_SSE2) #define HAVE_sse2_mfence (TARGET_SSE2) #define HAVE_sse2_lfence (TARGET_SSE2) extern rtx gen_cmpdi_ccno_1_rex64 (rtx, rtx); extern rtx gen_cmpdi_1_insn_rex64 (rtx, rtx); extern rtx gen_cmpqi_ext_3_insn (rtx, rtx); extern rtx gen_cmpqi_ext_3_insn_rex64 (rtx, rtx); extern rtx gen_x86_fnstsw_1 (rtx); extern rtx gen_x86_sahf_1 (rtx); extern rtx gen_popsi1 (rtx); extern rtx gen_movsi_insv_1 (rtx, rtx); extern rtx gen_movdi_insv_1_rex64 (rtx, rtx); extern rtx gen_pushdi2_rex64 (rtx, rtx); extern rtx gen_popdi1 (rtx); extern rtx gen_swapxf (rtx, rtx); extern rtx gen_zero_extendhisi2_and (rtx, rtx); extern rtx gen_zero_extendsidi2_32 (rtx, rtx); extern rtx gen_zero_extendsidi2_rex64 (rtx, rtx); extern rtx gen_zero_extendhidi2 (rtx, rtx); extern rtx gen_zero_extendqidi2 (rtx, rtx); extern rtx gen_extendsidi2_rex64 (rtx, rtx); extern rtx gen_extendhidi2 (rtx, rtx); extern rtx gen_extendqidi2 (rtx, rtx); extern rtx gen_extendhisi2 (rtx, rtx); extern rtx gen_extendqihi2 (rtx, rtx); extern rtx gen_extendqisi2 (rtx, rtx); extern rtx gen_truncdfsf2_noop (rtx, rtx); extern rtx gen_truncdfsf2_sse_only (rtx, rtx); extern rtx gen_truncxfsf2_noop (rtx, rtx); extern rtx gen_truncxfdf2_noop (rtx, rtx); extern rtx gen_fix_truncdi_nomemory (rtx, rtx, rtx, rtx, rtx); extern rtx gen_fix_truncdi_memory (rtx, rtx, rtx, rtx); extern rtx gen_fix_truncsfdi_sse (rtx, rtx); extern rtx gen_fix_truncdfdi_sse (rtx, rtx); extern rtx gen_fix_truncsi_nomemory (rtx, rtx, rtx, rtx, rtx); extern rtx gen_fix_truncsi_memory (rtx, rtx, rtx, rtx); extern rtx gen_fix_truncsfsi_sse (rtx, rtx); extern rtx gen_fix_truncdfsi_sse (rtx, rtx); extern rtx gen_fix_trunchi_nomemory (rtx, rtx, rtx, rtx, rtx); extern rtx gen_fix_trunchi_memory (rtx, rtx, rtx, rtx); extern rtx gen_x86_fnstcw_1 (rtx); extern rtx gen_x86_fldcw_1 (rtx); extern rtx gen_floathixf2 (rtx, rtx); extern rtx gen_floatsixf2 (rtx, rtx); extern rtx gen_floatdixf2 (rtx, rtx); extern rtx gen_adddi3_carry_rex64 (rtx, rtx, rtx, rtx); extern rtx gen_addqi3_carry (rtx, rtx, rtx, rtx); extern rtx gen_addhi3_carry (rtx, rtx, rtx, rtx); extern rtx gen_addsi3_carry (rtx, rtx, rtx, rtx); extern rtx gen_addqi3_cc (rtx, rtx, rtx); extern rtx gen_addsi_1_zext (rtx, rtx, rtx); extern rtx gen_addqi_ext_1 (rtx, rtx, rtx); extern rtx gen_subdi3_carry_rex64 (rtx, rtx, rtx, rtx); extern rtx gen_subqi3_carry (rtx, rtx, rtx, rtx); extern rtx gen_subhi3_carry (rtx, rtx, rtx, rtx); extern rtx gen_subsi3_carry (rtx, rtx, rtx, rtx); extern rtx gen_subsi3_carry_zext (rtx, rtx, rtx, rtx); extern rtx gen_divqi3 (rtx, rtx, rtx); extern rtx gen_udivqi3 (rtx, rtx, rtx); extern rtx gen_divmodhi4 (rtx, rtx, rtx, rtx); extern rtx gen_udivmoddi4 (rtx, rtx, rtx, rtx); extern rtx gen_udivmodsi4 (rtx, rtx, rtx, rtx); extern rtx gen_testsi_1 (rtx, rtx); extern rtx gen_andqi_ext_0 (rtx, rtx, rtx); extern rtx gen_iorqi_ext_0 (rtx, rtx, rtx); extern rtx gen_xorqi_ext_0 (rtx, rtx, rtx); extern rtx gen_negsf2_memory (rtx, rtx); extern rtx gen_negsf2_ifs (rtx, rtx, rtx); extern rtx gen_negdf2_memory (rtx, rtx); extern rtx gen_negdf2_ifs (rtx, rtx, rtx); extern rtx gen_abssf2_memory (rtx, rtx); extern rtx gen_abssf2_ifs (rtx, rtx, rtx); extern rtx gen_absdf2_memory (rtx, rtx); extern rtx gen_absdf2_ifs (rtx, rtx, rtx); extern rtx gen_ashldi3_1 (rtx, rtx, rtx); extern rtx gen_x86_shld_1 (rtx, rtx, rtx); extern rtx gen_ashrdi3_63_rex64 (rtx, rtx, rtx); extern rtx gen_ashrdi3_1 (rtx, rtx, rtx); extern rtx gen_x86_shrd_1 (rtx, rtx, rtx); extern rtx gen_ashrsi3_31 (rtx, rtx, rtx); extern rtx gen_lshrdi3_1 (rtx, rtx, rtx); extern rtx gen_setcc_2 (rtx, rtx); extern rtx gen_jump (rtx); extern rtx gen_doloop_end_internal (rtx, rtx, rtx); extern rtx gen_blockage (rtx); extern rtx gen_return_internal (void); extern rtx gen_return_internal_long (void); extern rtx gen_return_pop_internal (rtx); extern rtx gen_return_indirect_internal (rtx); extern rtx gen_nop (void); extern rtx gen_align (rtx); extern rtx gen_set_got (rtx); extern rtx gen_eh_return_si (rtx); extern rtx gen_eh_return_di (rtx); extern rtx gen_leave (void); extern rtx gen_leave_rex64 (void); extern rtx gen_ctzsi2 (rtx, rtx); extern rtx gen_ctzdi2 (rtx, rtx); extern rtx gen_sqrtsf2_1 (rtx, rtx); extern rtx gen_sqrtsf2_1_sse_only (rtx, rtx); extern rtx gen_sqrtsf2_i387 (rtx, rtx); extern rtx gen_sqrtdf2_1 (rtx, rtx); extern rtx gen_sqrtdf2_1_sse_only (rtx, rtx); extern rtx gen_sqrtdf2_i387 (rtx, rtx); extern rtx gen_sqrtxf2 (rtx, rtx); extern rtx gen_fpremxf4 (rtx, rtx, rtx, rtx); extern rtx gen_fprem1xf4 (rtx, rtx, rtx, rtx); extern rtx gen_sincosdf3 (rtx, rtx, rtx); extern rtx gen_sincossf3 (rtx, rtx, rtx); extern rtx gen_sincosxf3 (rtx, rtx, rtx); extern rtx gen_atan2df3_1 (rtx, rtx, rtx); extern rtx gen_atan2sf3_1 (rtx, rtx, rtx); extern rtx gen_atan2xf3_1 (rtx, rtx, rtx); extern rtx gen_fyl2x_xf3 (rtx, rtx, rtx); extern rtx gen_fyl2xp1_xf3 (rtx, rtx, rtx); extern rtx gen_cld (void); extern rtx gen_x86_movdicc_0_m1_rex64 (rtx, rtx); extern rtx gen_movdicc_c_rex64 (rtx, rtx, rtx, rtx); extern rtx gen_x86_movsicc_0_m1 (rtx, rtx); extern rtx gen_pro_epilogue_adjust_stack_1 (rtx, rtx, rtx); extern rtx gen_pro_epilogue_adjust_stack_rex64 (rtx, rtx, rtx); extern rtx gen_pro_epilogue_adjust_stack_rex64_2 (rtx, rtx, rtx, rtx); extern rtx gen_sse_movsfcc (rtx, rtx, rtx, rtx, rtx, rtx); extern rtx gen_sse_movsfcc_eq (rtx, rtx, rtx, rtx, rtx); extern rtx gen_sse_movdfcc (rtx, rtx, rtx, rtx, rtx, rtx); extern rtx gen_sse_movdfcc_eq (rtx, rtx, rtx, rtx, rtx); extern rtx gen_allocate_stack_worker_1 (rtx); extern rtx gen_allocate_stack_worker_rex64 (rtx); extern rtx gen_trap (void); extern rtx gen_movv4sf_internal (rtx, rtx); extern rtx gen_movv4si_internal (rtx, rtx); extern rtx gen_movv2di_internal (rtx, rtx); extern rtx gen_movv8qi_internal (rtx, rtx); extern rtx gen_movv4hi_internal (rtx, rtx); extern rtx gen_movv2si_internal (rtx, rtx); extern rtx gen_movv2sf_internal (rtx, rtx); extern rtx gen_movv2df_internal (rtx, rtx); extern rtx gen_movv8hi_internal (rtx, rtx); extern rtx gen_movv16qi_internal (rtx, rtx); extern rtx gen_movti_internal (rtx, rtx); extern rtx gen_sse_movmskps (rtx, rtx); extern rtx gen_mmx_pmovmskb (rtx, rtx); extern rtx gen_mmx_maskmovq (rtx, rtx, rtx); extern rtx gen_mmx_maskmovq_rex (rtx, rtx, rtx); extern rtx gen_sse_movntv4sf (rtx, rtx); extern rtx gen_sse_movntdi (rtx, rtx); extern rtx gen_sse_movhlps (rtx, rtx, rtx); extern rtx gen_sse_movlhps (rtx, rtx, rtx); extern rtx gen_sse_movhps (rtx, rtx, rtx); extern rtx gen_sse_movlps (rtx, rtx, rtx); extern rtx gen_sse_loadss_1 (rtx, rtx, rtx); extern rtx gen_sse_movss (rtx, rtx, rtx); extern rtx gen_sse_storess (rtx, rtx); extern rtx gen_sse_shufps (rtx, rtx, rtx, rtx); extern rtx gen_addv4sf3 (rtx, rtx, rtx); extern rtx gen_vmaddv4sf3 (rtx, rtx, rtx); extern rtx gen_subv4sf3 (rtx, rtx, rtx); extern rtx gen_vmsubv4sf3 (rtx, rtx, rtx); extern rtx gen_mulv4sf3 (rtx, rtx, rtx); extern rtx gen_vmmulv4sf3 (rtx, rtx, rtx); extern rtx gen_divv4sf3 (rtx, rtx, rtx); extern rtx gen_vmdivv4sf3 (rtx, rtx, rtx); extern rtx gen_rcpv4sf2 (rtx, rtx); extern rtx gen_vmrcpv4sf2 (rtx, rtx, rtx); extern rtx gen_rsqrtv4sf2 (rtx, rtx); extern rtx gen_vmrsqrtv4sf2 (rtx, rtx, rtx); extern rtx gen_sqrtv4sf2 (rtx, rtx); extern rtx gen_vmsqrtv4sf2 (rtx, rtx, rtx); extern rtx gen_sse2_andv2di3 (rtx, rtx, rtx); extern rtx gen_sse2_nandv2di3 (rtx, rtx, rtx); extern rtx gen_sse2_iorv2di3 (rtx, rtx, rtx); extern rtx gen_sse2_xorv2di3 (rtx, rtx, rtx); extern rtx gen_sse_clrv4sf (rtx, rtx); extern rtx gen_sse_clrv2df (rtx); extern rtx gen_maskcmpv4sf3 (rtx, rtx, rtx, rtx); extern rtx gen_maskncmpv4sf3 (rtx, rtx, rtx, rtx); extern rtx gen_vmmaskcmpv4sf3 (rtx, rtx, rtx, rtx); extern rtx gen_vmmaskncmpv4sf3 (rtx, rtx, rtx, rtx); extern rtx gen_sse_comi (rtx, rtx); extern rtx gen_sse_ucomi (rtx, rtx); extern rtx gen_sse_unpckhps (rtx, rtx, rtx); extern rtx gen_sse_unpcklps (rtx, rtx, rtx); extern rtx gen_smaxv4sf3 (rtx, rtx, rtx); extern rtx gen_vmsmaxv4sf3 (rtx, rtx, rtx); extern rtx gen_sminv4sf3 (rtx, rtx, rtx); extern rtx gen_vmsminv4sf3 (rtx, rtx, rtx); extern rtx gen_cvtpi2ps (rtx, rtx, rtx); extern rtx gen_cvtps2pi (rtx, rtx); extern rtx gen_cvttps2pi (rtx, rtx); extern rtx gen_cvtsi2ss (rtx, rtx, rtx); extern rtx gen_cvtsi2ssq (rtx, rtx, rtx); extern rtx gen_cvtss2si (rtx, rtx); extern rtx gen_cvtss2siq (rtx, rtx); extern rtx gen_cvttss2si (rtx, rtx); extern rtx gen_cvttss2siq (rtx, rtx); extern rtx gen_addv8qi3 (rtx, rtx, rtx); extern rtx gen_addv4hi3 (rtx, rtx, rtx); extern rtx gen_addv2si3 (rtx, rtx, rtx); extern rtx gen_mmx_adddi3 (rtx, rtx, rtx); extern rtx gen_ssaddv8qi3 (rtx, rtx, rtx); extern rtx gen_ssaddv4hi3 (rtx, rtx, rtx); extern rtx gen_usaddv8qi3 (rtx, rtx, rtx); extern rtx gen_usaddv4hi3 (rtx, rtx, rtx); extern rtx gen_subv8qi3 (rtx, rtx, rtx); extern rtx gen_subv4hi3 (rtx, rtx, rtx); extern rtx gen_subv2si3 (rtx, rtx, rtx); extern rtx gen_mmx_subdi3 (rtx, rtx, rtx); extern rtx gen_sssubv8qi3 (rtx, rtx, rtx); extern rtx gen_sssubv4hi3 (rtx, rtx, rtx); extern rtx gen_ussubv8qi3 (rtx, rtx, rtx); extern rtx gen_ussubv4hi3 (rtx, rtx, rtx); extern rtx gen_mulv4hi3 (rtx, rtx, rtx); extern rtx gen_smulv4hi3_highpart (rtx, rtx, rtx); extern rtx gen_umulv4hi3_highpart (rtx, rtx, rtx); extern rtx gen_mmx_pmaddwd (rtx, rtx, rtx); extern rtx gen_mmx_iordi3 (rtx, rtx, rtx); extern rtx gen_mmx_xordi3 (rtx, rtx, rtx); extern rtx gen_mmx_clrdi (rtx); extern rtx gen_mmx_anddi3 (rtx, rtx, rtx); extern rtx gen_mmx_nanddi3 (rtx, rtx, rtx); extern rtx gen_mmx_uavgv8qi3 (rtx, rtx, rtx); extern rtx gen_mmx_uavgv4hi3 (rtx, rtx, rtx); extern rtx gen_mmx_psadbw (rtx, rtx, rtx); extern rtx gen_mmx_pinsrw (rtx, rtx, rtx, rtx); extern rtx gen_mmx_pextrw (rtx, rtx, rtx); extern rtx gen_mmx_pshufw (rtx, rtx, rtx); extern rtx gen_eqv8qi3 (rtx, rtx, rtx); extern rtx gen_eqv4hi3 (rtx, rtx, rtx); extern rtx gen_eqv2si3 (rtx, rtx, rtx); extern rtx gen_gtv8qi3 (rtx, rtx, rtx); extern rtx gen_gtv4hi3 (rtx, rtx, rtx); extern rtx gen_gtv2si3 (rtx, rtx, rtx); extern rtx gen_umaxv8qi3 (rtx, rtx, rtx); extern rtx gen_smaxv4hi3 (rtx, rtx, rtx); extern rtx gen_uminv8qi3 (rtx, rtx, rtx); extern rtx gen_sminv4hi3 (rtx, rtx, rtx); extern rtx gen_ashrv4hi3 (rtx, rtx, rtx); extern rtx gen_ashrv2si3 (rtx, rtx, rtx); extern rtx gen_lshrv4hi3 (rtx, rtx, rtx); extern rtx gen_lshrv2si3 (rtx, rtx, rtx); extern rtx gen_mmx_lshrdi3 (rtx, rtx, rtx); extern rtx gen_ashlv4hi3 (rtx, rtx, rtx); extern rtx gen_ashlv2si3 (rtx, rtx, rtx); extern rtx gen_mmx_ashldi3 (rtx, rtx, rtx); extern rtx gen_mmx_packsswb (rtx, rtx, rtx); extern rtx gen_mmx_packssdw (rtx, rtx, rtx); extern rtx gen_mmx_packuswb (rtx, rtx, rtx); extern rtx gen_mmx_punpckhbw (rtx, rtx, rtx); extern rtx gen_mmx_punpckhwd (rtx, rtx, rtx); extern rtx gen_mmx_punpckhdq (rtx, rtx, rtx); extern rtx gen_mmx_punpcklbw (rtx, rtx, rtx); extern rtx gen_mmx_punpcklwd (rtx, rtx, rtx); extern rtx gen_mmx_punpckldq (rtx, rtx, rtx); extern rtx gen_emms (void); extern rtx gen_ldmxcsr (rtx); extern rtx gen_stmxcsr (rtx); extern rtx gen_addv2sf3 (rtx, rtx, rtx); extern rtx gen_subv2sf3 (rtx, rtx, rtx); extern rtx gen_subrv2sf3 (rtx, rtx, rtx); extern rtx gen_gtv2sf3 (rtx, rtx, rtx); extern rtx gen_gev2sf3 (rtx, rtx, rtx); extern rtx gen_eqv2sf3 (rtx, rtx, rtx); extern rtx gen_pfmaxv2sf3 (rtx, rtx, rtx); extern rtx gen_pfminv2sf3 (rtx, rtx, rtx); extern rtx gen_mulv2sf3 (rtx, rtx, rtx); extern rtx gen_femms (void); extern rtx gen_pf2id (rtx, rtx); extern rtx gen_pf2iw (rtx, rtx); extern rtx gen_pfacc (rtx, rtx, rtx); extern rtx gen_pfnacc (rtx, rtx, rtx); extern rtx gen_pfpnacc (rtx, rtx, rtx); extern rtx gen_pi2fw (rtx, rtx); extern rtx gen_floatv2si2 (rtx, rtx); extern rtx gen_pavgusb (rtx, rtx, rtx); extern rtx gen_pfrcpv2sf2 (rtx, rtx); extern rtx gen_pfrcpit1v2sf3 (rtx, rtx, rtx); extern rtx gen_pfrcpit2v2sf3 (rtx, rtx, rtx); extern rtx gen_pfrsqrtv2sf2 (rtx, rtx); extern rtx gen_pfrsqit1v2sf3 (rtx, rtx, rtx); extern rtx gen_pmulhrwv4hi3 (rtx, rtx, rtx); extern rtx gen_pswapdv2si2 (rtx, rtx); extern rtx gen_pswapdv2sf2 (rtx, rtx); extern rtx gen_addv2df3 (rtx, rtx, rtx); extern rtx gen_vmaddv2df3 (rtx, rtx, rtx); extern rtx gen_subv2df3 (rtx, rtx, rtx); extern rtx gen_vmsubv2df3 (rtx, rtx, rtx); extern rtx gen_mulv2df3 (rtx, rtx, rtx); extern rtx gen_vmmulv2df3 (rtx, rtx, rtx); extern rtx gen_divv2df3 (rtx, rtx, rtx); extern rtx gen_vmdivv2df3 (rtx, rtx, rtx); extern rtx gen_smaxv2df3 (rtx, rtx, rtx); extern rtx gen_vmsmaxv2df3 (rtx, rtx, rtx); extern rtx gen_sminv2df3 (rtx, rtx, rtx); extern rtx gen_vmsminv2df3 (rtx, rtx, rtx); extern rtx gen_sqrtv2df2 (rtx, rtx); extern rtx gen_vmsqrtv2df2 (rtx, rtx, rtx); extern rtx gen_maskcmpv2df3 (rtx, rtx, rtx, rtx); extern rtx gen_maskncmpv2df3 (rtx, rtx, rtx, rtx); extern rtx gen_vmmaskcmpv2df3 (rtx, rtx, rtx, rtx); extern rtx gen_vmmaskncmpv2df3 (rtx, rtx, rtx, rtx); extern rtx gen_sse2_comi (rtx, rtx); extern rtx gen_sse2_ucomi (rtx, rtx); extern rtx gen_sse2_movmskpd (rtx, rtx); extern rtx gen_sse2_pmovmskb (rtx, rtx); extern rtx gen_sse2_maskmovdqu (rtx, rtx, rtx); extern rtx gen_sse2_maskmovdqu_rex64 (rtx, rtx, rtx); extern rtx gen_sse2_movntv2df (rtx, rtx); extern rtx gen_sse2_movntv2di (rtx, rtx); extern rtx gen_sse2_movntsi (rtx, rtx); extern rtx gen_cvtdq2ps (rtx, rtx); extern rtx gen_cvtps2dq (rtx, rtx); extern rtx gen_cvttps2dq (rtx, rtx); extern rtx gen_cvtdq2pd (rtx, rtx); extern rtx gen_cvtpd2dq (rtx, rtx); extern rtx gen_cvttpd2dq (rtx, rtx); extern rtx gen_cvtpd2pi (rtx, rtx); extern rtx gen_cvttpd2pi (rtx, rtx); extern rtx gen_cvtpi2pd (rtx, rtx); extern rtx gen_cvtsd2si (rtx, rtx); extern rtx gen_cvtsd2siq (rtx, rtx); extern rtx gen_cvttsd2si (rtx, rtx); extern rtx gen_cvttsd2siq (rtx, rtx); extern rtx gen_cvtsi2sd (rtx, rtx, rtx); extern rtx gen_cvtsi2sdq (rtx, rtx, rtx); extern rtx gen_cvtsd2ss (rtx, rtx, rtx); extern rtx gen_cvtss2sd (rtx, rtx, rtx); extern rtx gen_cvtpd2ps (rtx, rtx); extern rtx gen_cvtps2pd (rtx, rtx); extern rtx gen_addv16qi3 (rtx, rtx, rtx); extern rtx gen_addv8hi3 (rtx, rtx, rtx); extern rtx gen_addv4si3 (rtx, rtx, rtx); extern rtx gen_addv2di3 (rtx, rtx, rtx); extern rtx gen_ssaddv16qi3 (rtx, rtx, rtx); extern rtx gen_ssaddv8hi3 (rtx, rtx, rtx); extern rtx gen_usaddv16qi3 (rtx, rtx, rtx); extern rtx gen_usaddv8hi3 (rtx, rtx, rtx); extern rtx gen_subv16qi3 (rtx, rtx, rtx); extern rtx gen_subv8hi3 (rtx, rtx, rtx); extern rtx gen_subv4si3 (rtx, rtx, rtx); extern rtx gen_subv2di3 (rtx, rtx, rtx); extern rtx gen_sssubv16qi3 (rtx, rtx, rtx); extern rtx gen_sssubv8hi3 (rtx, rtx, rtx); extern rtx gen_ussubv16qi3 (rtx, rtx, rtx); extern rtx gen_ussubv8hi3 (rtx, rtx, rtx); extern rtx gen_mulv8hi3 (rtx, rtx, rtx); extern rtx gen_smulv8hi3_highpart (rtx, rtx, rtx); extern rtx gen_umulv8hi3_highpart (rtx, rtx, rtx); extern rtx gen_sse2_umulsidi3 (rtx, rtx, rtx); extern rtx gen_sse2_umulv2siv2di3 (rtx, rtx, rtx); extern rtx gen_sse2_pmaddwd (rtx, rtx, rtx); extern rtx gen_sse2_clrti (rtx); extern rtx gen_sse2_uavgv16qi3 (rtx, rtx, rtx); extern rtx gen_sse2_uavgv8hi3 (rtx, rtx, rtx); extern rtx gen_sse2_psadbw (rtx, rtx, rtx); extern rtx gen_sse2_pinsrw (rtx, rtx, rtx, rtx); extern rtx gen_sse2_pextrw (rtx, rtx, rtx); extern rtx gen_sse2_pshufd (rtx, rtx, rtx); extern rtx gen_sse2_pshuflw (rtx, rtx, rtx); extern rtx gen_sse2_pshufhw (rtx, rtx, rtx); extern rtx gen_eqv16qi3 (rtx, rtx, rtx); extern rtx gen_eqv8hi3 (rtx, rtx, rtx); extern rtx gen_eqv4si3 (rtx, rtx, rtx); extern rtx gen_gtv16qi3 (rtx, rtx, rtx); extern rtx gen_gtv8hi3 (rtx, rtx, rtx); extern rtx gen_gtv4si3 (rtx, rtx, rtx); extern rtx gen_umaxv16qi3 (rtx, rtx, rtx); extern rtx gen_smaxv8hi3 (rtx, rtx, rtx); extern rtx gen_uminv16qi3 (rtx, rtx, rtx); extern rtx gen_sminv8hi3 (rtx, rtx, rtx); extern rtx gen_ashrv8hi3 (rtx, rtx, rtx); extern rtx gen_ashrv4si3 (rtx, rtx, rtx); extern rtx gen_lshrv8hi3 (rtx, rtx, rtx); extern rtx gen_lshrv4si3 (rtx, rtx, rtx); extern rtx gen_lshrv2di3 (rtx, rtx, rtx); extern rtx gen_ashlv8hi3 (rtx, rtx, rtx); extern rtx gen_ashlv4si3 (rtx, rtx, rtx); extern rtx gen_ashlv2di3 (rtx, rtx, rtx); extern rtx gen_ashrv8hi3_ti (rtx, rtx, rtx); extern rtx gen_ashrv4si3_ti (rtx, rtx, rtx); extern rtx gen_lshrv8hi3_ti (rtx, rtx, rtx); extern rtx gen_lshrv4si3_ti (rtx, rtx, rtx); extern rtx gen_lshrv2di3_ti (rtx, rtx, rtx); extern rtx gen_ashlv8hi3_ti (rtx, rtx, rtx); extern rtx gen_ashlv4si3_ti (rtx, rtx, rtx); extern rtx gen_ashlv2di3_ti (rtx, rtx, rtx); extern rtx gen_sse2_ashlti3 (rtx, rtx, rtx); extern rtx gen_sse2_lshrti3 (rtx, rtx, rtx); extern rtx gen_sse2_unpckhpd (rtx, rtx, rtx); extern rtx gen_sse2_unpcklpd (rtx, rtx, rtx); extern rtx gen_sse2_packsswb (rtx, rtx, rtx); extern rtx gen_sse2_packssdw (rtx, rtx, rtx); extern rtx gen_sse2_packuswb (rtx, rtx, rtx); extern rtx gen_sse2_punpckhbw (rtx, rtx, rtx); extern rtx gen_sse2_punpckhwd (rtx, rtx, rtx); extern rtx gen_sse2_punpckhdq (rtx, rtx, rtx); extern rtx gen_sse2_punpcklbw (rtx, rtx, rtx); extern rtx gen_sse2_punpcklwd (rtx, rtx, rtx); extern rtx gen_sse2_punpckldq (rtx, rtx, rtx); extern rtx gen_sse2_punpcklqdq (rtx, rtx, rtx); extern rtx gen_sse2_punpckhqdq (rtx, rtx, rtx); extern rtx gen_sse2_movapd (rtx, rtx); extern rtx gen_sse2_movupd (rtx, rtx); extern rtx gen_sse2_movdqa (rtx, rtx); extern rtx gen_sse2_movdqu (rtx, rtx); extern rtx gen_sse2_movdq2q (rtx, rtx); extern rtx gen_sse2_movdq2q_rex64 (rtx, rtx); extern rtx gen_sse2_movq2dq (rtx, rtx); extern rtx gen_sse2_movq2dq_rex64 (rtx, rtx); extern rtx gen_sse2_movq (rtx, rtx); extern rtx gen_sse2_loadd (rtx, rtx); extern rtx gen_sse2_stored (rtx, rtx); extern rtx gen_sse2_movhpd (rtx, rtx, rtx); extern rtx gen_sse2_loadsd_1 (rtx, rtx, rtx); extern rtx gen_sse2_movsd (rtx, rtx, rtx); extern rtx gen_sse2_storesd (rtx, rtx); extern rtx gen_sse2_shufpd (rtx, rtx, rtx, rtx); extern rtx gen_sse2_clflush (rtx); extern rtx gen_mwait (rtx, rtx); extern rtx gen_monitor (rtx, rtx, rtx); extern rtx gen_addsubv4sf3 (rtx, rtx, rtx); extern rtx gen_addsubv2df3 (rtx, rtx, rtx); extern rtx gen_haddv4sf3 (rtx, rtx, rtx); extern rtx gen_haddv2df3 (rtx, rtx, rtx); extern rtx gen_hsubv4sf3 (rtx, rtx, rtx); extern rtx gen_hsubv2df3 (rtx, rtx, rtx); extern rtx gen_movshdup (rtx, rtx); extern rtx gen_movsldup (rtx, rtx); extern rtx gen_lddqu (rtx, rtx); extern rtx gen_loadddup (rtx, rtx); extern rtx gen_movddup (rtx, rtx); extern rtx gen_cmpdi (rtx, rtx); extern rtx gen_cmpsi (rtx, rtx); extern rtx gen_cmphi (rtx, rtx); extern rtx gen_cmpqi (rtx, rtx); extern rtx gen_cmpdi_1_rex64 (rtx, rtx); extern rtx gen_cmpsi_1 (rtx, rtx); extern rtx gen_cmpqi_ext_3 (rtx, rtx); extern rtx gen_cmpxf (rtx, rtx); extern rtx gen_cmpdf (rtx, rtx); extern rtx gen_cmpsf (rtx, rtx); extern rtx gen_movsi (rtx, rtx); extern rtx gen_movhi (rtx, rtx); extern rtx gen_movstricthi (rtx, rtx); extern rtx gen_movqi (rtx, rtx); extern rtx gen_reload_outqi (rtx, rtx, rtx); extern rtx gen_movstrictqi (rtx, rtx); extern rtx gen_movdi (rtx, rtx); extern rtx gen_movsf (rtx, rtx); extern rtx gen_movdf (rtx, rtx); extern rtx gen_movxf (rtx, rtx); extern rtx gen_zero_extendhisi2 (rtx, rtx); extern rtx gen_zero_extendqihi2 (rtx, rtx); extern rtx gen_zero_extendqisi2 (rtx, rtx); extern rtx gen_zero_extendsidi2 (rtx, rtx); extern rtx gen_extendsidi2 (rtx, rtx); extern rtx gen_extendsfdf2 (rtx, rtx); extern rtx gen_extendsfxf2 (rtx, rtx); extern rtx gen_extenddfxf2 (rtx, rtx); extern rtx gen_truncdfsf2 (rtx, rtx); extern rtx gen_truncxfsf2 (rtx, rtx); extern rtx gen_truncxfdf2 (rtx, rtx); extern rtx gen_fix_truncxfdi2 (rtx, rtx); extern rtx gen_fix_truncdfdi2 (rtx, rtx); extern rtx gen_fix_truncsfdi2 (rtx, rtx); extern rtx gen_fix_truncxfsi2 (rtx, rtx); extern rtx gen_fix_truncdfsi2 (rtx, rtx); extern rtx gen_fix_truncsfsi2 (rtx, rtx); extern rtx gen_fix_truncxfhi2 (rtx, rtx); extern rtx gen_fix_truncdfhi2 (rtx, rtx); extern rtx gen_fix_truncsfhi2 (rtx, rtx); extern rtx gen_floathisf2 (rtx, rtx); extern rtx gen_floatsisf2 (rtx, rtx); extern rtx gen_floatdisf2 (rtx, rtx); extern rtx gen_floathidf2 (rtx, rtx); extern rtx gen_floatsidf2 (rtx, rtx); extern rtx gen_floatdidf2 (rtx, rtx); extern rtx gen_floatunssisf2 (rtx, rtx); extern rtx gen_floatunsdisf2 (rtx, rtx); extern rtx gen_floatunsdidf2 (rtx, rtx); extern rtx gen_vec_setv2df (rtx, rtx, rtx); extern rtx gen_vec_extractv2df (rtx, rtx, rtx); extern rtx gen_vec_initv2df (rtx, rtx); extern rtx gen_vec_setv4sf (rtx, rtx, rtx); extern rtx gen_vec_extractv4sf (rtx, rtx, rtx); extern rtx gen_vec_initv4sf (rtx, rtx); extern rtx gen_adddi3 (rtx, rtx, rtx); extern rtx gen_addsi3 (rtx, rtx, rtx); extern rtx gen_addhi3 (rtx, rtx, rtx); extern rtx gen_addqi3 (rtx, rtx, rtx); extern rtx gen_addxf3 (rtx, rtx, rtx); extern rtx gen_adddf3 (rtx, rtx, rtx); extern rtx gen_addsf3 (rtx, rtx, rtx); extern rtx gen_subdi3 (rtx, rtx, rtx); extern rtx gen_subsi3 (rtx, rtx, rtx); extern rtx gen_subhi3 (rtx, rtx, rtx); extern rtx gen_subqi3 (rtx, rtx, rtx); extern rtx gen_subxf3 (rtx, rtx, rtx); extern rtx gen_subdf3 (rtx, rtx, rtx); extern rtx gen_subsf3 (rtx, rtx, rtx); extern rtx gen_muldi3 (rtx, rtx, rtx); extern rtx gen_mulsi3 (rtx, rtx, rtx); extern rtx gen_mulhi3 (rtx, rtx, rtx); extern rtx gen_mulqi3 (rtx, rtx, rtx); extern rtx gen_umulqihi3 (rtx, rtx, rtx); extern rtx gen_mulqihi3 (rtx, rtx, rtx); extern rtx gen_umulditi3 (rtx, rtx, rtx); extern rtx gen_umulsidi3 (rtx, rtx, rtx); extern rtx gen_mulditi3 (rtx, rtx, rtx); extern rtx gen_mulsidi3 (rtx, rtx, rtx); extern rtx gen_umuldi3_highpart (rtx, rtx, rtx); extern rtx gen_umulsi3_highpart (rtx, rtx, rtx); extern rtx gen_smuldi3_highpart (rtx, rtx, rtx); extern rtx gen_smulsi3_highpart (rtx, rtx, rtx); extern rtx gen_mulxf3 (rtx, rtx, rtx); extern rtx gen_muldf3 (rtx, rtx, rtx); extern rtx gen_mulsf3 (rtx, rtx, rtx); extern rtx gen_divxf3 (rtx, rtx, rtx); extern rtx gen_divdf3 (rtx, rtx, rtx); extern rtx gen_divsf3 (rtx, rtx, rtx); extern rtx gen_divmoddi4 (rtx, rtx, rtx, rtx); extern rtx gen_divmodsi4 (rtx, rtx, rtx, rtx); extern rtx gen_udivmodhi4 (rtx, rtx, rtx, rtx); extern rtx gen_testsi_ccno_1 (rtx, rtx); extern rtx gen_testqi_ccz_1 (rtx, rtx); extern rtx gen_testqi_ext_ccno_0 (rtx, rtx); extern rtx gen_anddi3 (rtx, rtx, rtx); extern rtx gen_andsi3 (rtx, rtx, rtx); extern rtx gen_andhi3 (rtx, rtx, rtx); extern rtx gen_andqi3 (rtx, rtx, rtx); extern rtx gen_iordi3 (rtx, rtx, rtx); extern rtx gen_iorsi3 (rtx, rtx, rtx); extern rtx gen_iorhi3 (rtx, rtx, rtx); extern rtx gen_iorqi3 (rtx, rtx, rtx); extern rtx gen_xordi3 (rtx, rtx, rtx); extern rtx gen_xorsi3 (rtx, rtx, rtx); extern rtx gen_xorhi3 (rtx, rtx, rtx); extern rtx gen_xorqi3 (rtx, rtx, rtx); extern rtx gen_xorqi_cc_ext_1 (rtx, rtx, rtx); extern rtx gen_negdi2 (rtx, rtx); extern rtx gen_negsi2 (rtx, rtx); extern rtx gen_neghi2 (rtx, rtx); extern rtx gen_negqi2 (rtx, rtx); extern rtx gen_negsf2 (rtx, rtx); extern rtx gen_negdf2 (rtx, rtx); extern rtx gen_negxf2 (rtx, rtx); extern rtx gen_abssf2 (rtx, rtx); extern rtx gen_absdf2 (rtx, rtx); extern rtx gen_absxf2 (rtx, rtx); extern rtx gen_one_cmpldi2 (rtx, rtx); extern rtx gen_one_cmplsi2 (rtx, rtx); extern rtx gen_one_cmplhi2 (rtx, rtx); extern rtx gen_one_cmplqi2 (rtx, rtx); extern rtx gen_ashldi3 (rtx, rtx, rtx); extern rtx gen_x86_shift_adj_1 (rtx, rtx, rtx, rtx); extern rtx gen_x86_shift_adj_2 (rtx, rtx, rtx); extern rtx gen_ashlsi3 (rtx, rtx, rtx); extern rtx gen_ashlhi3 (rtx, rtx, rtx); extern rtx gen_ashlqi3 (rtx, rtx, rtx); extern rtx gen_ashrdi3 (rtx, rtx, rtx); extern rtx gen_x86_shift_adj_3 (rtx, rtx, rtx); extern rtx gen_ashrsi3 (rtx, rtx, rtx); extern rtx gen_ashrhi3 (rtx, rtx, rtx); extern rtx gen_ashrqi3 (rtx, rtx, rtx); extern rtx gen_lshrdi3 (rtx, rtx, rtx); extern rtx gen_lshrsi3 (rtx, rtx, rtx); extern rtx gen_lshrhi3 (rtx, rtx, rtx); extern rtx gen_lshrqi3 (rtx, rtx, rtx); extern rtx gen_rotldi3 (rtx, rtx, rtx); extern rtx gen_rotlsi3 (rtx, rtx, rtx); extern rtx gen_rotlhi3 (rtx, rtx, rtx); extern rtx gen_rotlqi3 (rtx, rtx, rtx); extern rtx gen_rotrdi3 (rtx, rtx, rtx); extern rtx gen_rotrsi3 (rtx, rtx, rtx); extern rtx gen_rotrhi3 (rtx, rtx, rtx); extern rtx gen_rotrqi3 (rtx, rtx, rtx); extern rtx gen_extv (rtx, rtx, rtx, rtx); extern rtx gen_extzv (rtx, rtx, rtx, rtx); extern rtx gen_insv (rtx, rtx, rtx, rtx); extern rtx gen_seq (rtx); extern rtx gen_sne (rtx); extern rtx gen_sgt (rtx); extern rtx gen_sgtu (rtx); extern rtx gen_slt (rtx); extern rtx gen_sltu (rtx); extern rtx gen_sge (rtx); extern rtx gen_sgeu (rtx); extern rtx gen_sle (rtx); extern rtx gen_sleu (rtx); extern rtx gen_sunordered (rtx); extern rtx gen_sordered (rtx); extern rtx gen_suneq (rtx); extern rtx gen_sunge (rtx); extern rtx gen_sungt (rtx); extern rtx gen_sunle (rtx); extern rtx gen_sunlt (rtx); extern rtx gen_sltgt (rtx); extern rtx gen_beq (rtx); extern rtx gen_bne (rtx); extern rtx gen_bgt (rtx); extern rtx gen_bgtu (rtx); extern rtx gen_blt (rtx); extern rtx gen_bltu (rtx); extern rtx gen_bge (rtx); extern rtx gen_bgeu (rtx); extern rtx gen_ble (rtx); extern rtx gen_bleu (rtx); extern rtx gen_bunordered (rtx); extern rtx gen_bordered (rtx); extern rtx gen_buneq (rtx); extern rtx gen_bunge (rtx); extern rtx gen_bungt (rtx); extern rtx gen_bunle (rtx); extern rtx gen_bunlt (rtx); extern rtx gen_bltgt (rtx); extern rtx gen_indirect_jump (rtx); extern rtx gen_tablejump (rtx, rtx); extern rtx gen_doloop_end (rtx, rtx, rtx, rtx, rtx); #define GEN_CALL_POP(A, B, C, D) gen_call_pop ((A), (B), (C), (D)) extern rtx gen_call_pop (rtx, rtx, rtx, rtx); #define GEN_CALL(A, B, C, D) gen_call ((A), (B), (C)) extern rtx gen_call (rtx, rtx, rtx); #define GEN_SIBCALL(A, B, C, D) gen_sibcall ((A), (B), (C)) extern rtx gen_sibcall (rtx, rtx, rtx); #define GEN_CALL_VALUE_POP(A, B, C, D, E) gen_call_value_pop ((A), (B), (C), (D), (E)) extern rtx gen_call_value_pop (rtx, rtx, rtx, rtx, rtx); #define GEN_CALL_VALUE(A, B, C, D, E) gen_call_value ((A), (B), (C), (D)) extern rtx gen_call_value (rtx, rtx, rtx, rtx); #define GEN_SIBCALL_VALUE(A, B, C, D, E) gen_sibcall_value ((A), (B), (C), (D)) extern rtx gen_sibcall_value (rtx, rtx, rtx, rtx); extern rtx gen_untyped_call (rtx, rtx, rtx); extern rtx gen_return (void); extern rtx gen_prologue (void); extern rtx gen_epilogue (void); extern rtx gen_sibcall_epilogue (void); extern rtx gen_eh_return (rtx); extern rtx gen_ffssi2 (rtx, rtx); extern rtx gen_ffsdi2 (rtx, rtx); extern rtx gen_clzsi2 (rtx, rtx); extern rtx gen_clzdi2 (rtx, rtx); extern rtx gen_tls_global_dynamic_32 (rtx, rtx); extern rtx gen_tls_global_dynamic_64 (rtx, rtx); extern rtx gen_tls_local_dynamic_base_32 (rtx); extern rtx gen_tls_local_dynamic_base_64 (rtx); extern rtx gen_sqrtsf2 (rtx, rtx); extern rtx gen_sqrtdf2 (rtx, rtx); extern rtx gen_fmodsf3 (rtx, rtx, rtx); extern rtx gen_fmoddf3 (rtx, rtx, rtx); extern rtx gen_fmodxf3 (rtx, rtx, rtx); extern rtx gen_dremsf3 (rtx, rtx, rtx); extern rtx gen_dremdf3 (rtx, rtx, rtx); extern rtx gen_dremxf3 (rtx, rtx, rtx); extern rtx gen_tandf2 (rtx, rtx); extern rtx gen_tansf2 (rtx, rtx); extern rtx gen_tanxf2 (rtx, rtx); extern rtx gen_atan2df3 (rtx, rtx, rtx); extern rtx gen_atandf2 (rtx, rtx); extern rtx gen_atan2sf3 (rtx, rtx, rtx); extern rtx gen_atansf2 (rtx, rtx); extern rtx gen_atan2xf3 (rtx, rtx, rtx); extern rtx gen_atanxf2 (rtx, rtx); extern rtx gen_asindf2 (rtx, rtx); extern rtx gen_asinsf2 (rtx, rtx); extern rtx gen_asinxf2 (rtx, rtx); extern rtx gen_acosdf2 (rtx, rtx); extern rtx gen_acossf2 (rtx, rtx); extern rtx gen_acosxf2 (rtx, rtx); extern rtx gen_logsf2 (rtx, rtx); extern rtx gen_logdf2 (rtx, rtx); extern rtx gen_logxf2 (rtx, rtx); extern rtx gen_log10sf2 (rtx, rtx); extern rtx gen_log10df2 (rtx, rtx); extern rtx gen_log10xf2 (rtx, rtx); extern rtx gen_log2sf2 (rtx, rtx); extern rtx gen_log2df2 (rtx, rtx); extern rtx gen_log2xf2 (rtx, rtx); extern rtx gen_log1psf2 (rtx, rtx); extern rtx gen_log1pdf2 (rtx, rtx); extern rtx gen_log1pxf2 (rtx, rtx); extern rtx gen_logbsf2 (rtx, rtx); extern rtx gen_logbdf2 (rtx, rtx); extern rtx gen_logbxf2 (rtx, rtx); extern rtx gen_ilogbsi2 (rtx, rtx, rtx, rtx); extern rtx gen_expsf2 (rtx, rtx); extern rtx gen_expdf2 (rtx, rtx); extern rtx gen_expxf2 (rtx, rtx); extern rtx gen_exp10sf2 (rtx, rtx); extern rtx gen_exp10df2 (rtx, rtx); extern rtx gen_exp10xf2 (rtx, rtx); extern rtx gen_exp2sf2 (rtx, rtx); extern rtx gen_exp2df2 (rtx, rtx); extern rtx gen_exp2xf2 (rtx, rtx); extern rtx gen_expm1df2 (rtx, rtx); extern rtx gen_expm1sf2 (rtx, rtx); extern rtx gen_expm1xf2 (rtx, rtx); extern rtx gen_movstrsi (rtx, rtx, rtx, rtx); extern rtx gen_movstrdi (rtx, rtx, rtx, rtx); extern rtx gen_strmov (rtx, rtx, rtx, rtx); extern rtx gen_strmov_singleop (rtx, rtx, rtx, rtx, rtx, rtx); extern rtx gen_rep_mov (rtx, rtx, rtx, rtx, rtx, rtx, rtx); extern rtx gen_clrstrsi (rtx, rtx, rtx); extern rtx gen_clrstrdi (rtx, rtx, rtx); extern rtx gen_strset (rtx, rtx, rtx); extern rtx gen_strset_singleop (rtx, rtx, rtx, rtx); extern rtx gen_rep_stos (rtx, rtx, rtx, rtx, rtx); extern rtx gen_cmpstrsi (rtx, rtx, rtx, rtx, rtx); extern rtx gen_cmpintqi (rtx); extern rtx gen_cmpstrqi_nz_1 (rtx, rtx, rtx, rtx, rtx, rtx); extern rtx gen_cmpstrqi_1 (rtx, rtx, rtx, rtx, rtx, rtx); extern rtx gen_strlensi (rtx, rtx, rtx, rtx); extern rtx gen_strlendi (rtx, rtx, rtx, rtx); extern rtx gen_strlenqi_1 (rtx, rtx, rtx); extern rtx gen_movdicc (rtx, rtx, rtx, rtx); extern rtx gen_movsicc (rtx, rtx, rtx, rtx); extern rtx gen_movhicc (rtx, rtx, rtx, rtx); extern rtx gen_movqicc (rtx, rtx, rtx, rtx); extern rtx gen_movsfcc (rtx, rtx, rtx, rtx); extern rtx gen_movdfcc (rtx, rtx, rtx, rtx); extern rtx gen_movxfcc (rtx, rtx, rtx, rtx); extern rtx gen_minsf3 (rtx, rtx, rtx); extern rtx gen_addqicc (rtx, rtx, rtx, rtx); extern rtx gen_addhicc (rtx, rtx, rtx, rtx); extern rtx gen_addsicc (rtx, rtx, rtx, rtx); extern rtx gen_adddicc (rtx, rtx, rtx, rtx); extern rtx gen_mindf3 (rtx, rtx, rtx); extern rtx gen_maxsf3 (rtx, rtx, rtx); extern rtx gen_maxdf3 (rtx, rtx, rtx); extern rtx gen_allocate_stack_worker (rtx); extern rtx gen_allocate_stack_worker_postreload (rtx); extern rtx gen_allocate_stack_worker_rex64_postreload (rtx); extern rtx gen_allocate_stack (rtx, rtx); extern rtx gen_builtin_setjmp_receiver (rtx); extern rtx gen_conditional_trap (rtx, rtx); extern rtx gen_movti (rtx, rtx); extern rtx gen_movtf (rtx, rtx); extern rtx gen_movv2df (rtx, rtx); extern rtx gen_movv8hi (rtx, rtx); extern rtx gen_movv16qi (rtx, rtx); extern rtx gen_movv4sf (rtx, rtx); extern rtx gen_movv4si (rtx, rtx); extern rtx gen_movv2di (rtx, rtx); extern rtx gen_movv2si (rtx, rtx); extern rtx gen_movv4hi (rtx, rtx); extern rtx gen_movv8qi (rtx, rtx); extern rtx gen_movv2sf (rtx, rtx); extern rtx gen_sse_movaps (rtx, rtx); extern rtx gen_sse_movups (rtx, rtx); extern rtx gen_sse_loadss (rtx, rtx); extern rtx gen_negv4sf2 (rtx, rtx); extern rtx gen_sse_andv4sf3 (rtx, rtx, rtx); extern rtx gen_sse_nandv4sf3 (rtx, rtx, rtx); extern rtx gen_sse_iorv4sf3 (rtx, rtx, rtx); extern rtx gen_sse_xorv4sf3 (rtx, rtx, rtx); extern rtx gen_sse2_andv2df3 (rtx, rtx, rtx); extern rtx gen_sse2_nandv2df3 (rtx, rtx, rtx); extern rtx gen_sse2_iorv2df3 (rtx, rtx, rtx); extern rtx gen_sse2_xorv2df3 (rtx, rtx, rtx); extern rtx gen_sfence (void); extern rtx gen_sse_prologue_save (rtx, rtx, rtx, rtx); extern rtx gen_prefetch (rtx, rtx, rtx); extern rtx gen_sse2_loadsd (rtx, rtx); extern rtx gen_sse2_mfence (void); extern rtx gen_sse2_lfence (void); #endif /* GCC_INSN_FLAGS_H */ #endif #endif /* GCC_TM_H */ /* Sets (bit vectors) of hard registers, and operations on them. Copyright (C) 1987, 1992, 1994, 2000, 2003 Free Software Foundation, Inc. This file is part of GCC GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_HARD_REG_SET_H #define GCC_HARD_REG_SET_H /* Define the type of a set of hard registers. */ /* HARD_REG_ELT_TYPE is a typedef of the unsigned integral type which will be used for hard reg sets, either alone or in an array. If HARD_REG_SET is a macro, its definition is HARD_REG_ELT_TYPE, and it has enough bits to represent all the target machine's hard registers. Otherwise, it is a typedef for a suitably sized array of HARD_REG_ELT_TYPEs. HARD_REG_SET_LONGS is defined as how many. Note that lots of code assumes that the first part of a regset is the same format as a HARD_REG_SET. To help make sure this is true, we only try the widest integer mode (HOST_WIDE_INT) instead of all the smaller types. This approach loses only if there are a very few registers and then only in the few cases where we have an array of HARD_REG_SETs, so it needn't be as complex as it used to be. */ typedef unsigned HOST_WIDE_INT HARD_REG_ELT_TYPE; #if FIRST_PSEUDO_REGISTER <= HOST_BITS_PER_WIDE_INT #define HARD_REG_SET HARD_REG_ELT_TYPE #else #define HARD_REG_SET_LONGS \ ((FIRST_PSEUDO_REGISTER + HOST_BITS_PER_WIDE_INT - 1) \ / HOST_BITS_PER_WIDE_INT) typedef HARD_REG_ELT_TYPE HARD_REG_SET[HARD_REG_SET_LONGS]; #endif /* HARD_CONST is used to cast a constant to the appropriate type for use with a HARD_REG_SET. */ #define HARD_CONST(X) ((HARD_REG_ELT_TYPE) (X)) /* Define macros SET_HARD_REG_BIT, CLEAR_HARD_REG_BIT and TEST_HARD_REG_BIT to set, clear or test one bit in a hard reg set of type HARD_REG_SET. All three take two arguments: the set and the register number. In the case where sets are arrays of longs, the first argument is actually a pointer to a long. Define two macros for initializing a set: CLEAR_HARD_REG_SET and SET_HARD_REG_SET. These take just one argument. Also define macros for copying hard reg sets: COPY_HARD_REG_SET and COMPL_HARD_REG_SET. These take two arguments TO and FROM; they read from FROM and store into TO. COMPL_HARD_REG_SET complements each bit. Also define macros for combining hard reg sets: IOR_HARD_REG_SET and AND_HARD_REG_SET. These take two arguments TO and FROM; they read from FROM and combine bitwise into TO. Define also two variants IOR_COMPL_HARD_REG_SET and AND_COMPL_HARD_REG_SET which use the complement of the set FROM. Also define GO_IF_HARD_REG_SUBSET (X, Y, TO): if X is a subset of Y, go to TO. */ #ifdef HARD_REG_SET #define SET_HARD_REG_BIT(SET, BIT) \ ((SET) |= HARD_CONST (1) << (BIT)) #define CLEAR_HARD_REG_BIT(SET, BIT) \ ((SET) &= ~(HARD_CONST (1) << (BIT))) #define TEST_HARD_REG_BIT(SET, BIT) \ (!!((SET) & (HARD_CONST (1) << (BIT)))) #define CLEAR_HARD_REG_SET(TO) ((TO) = HARD_CONST (0)) #define SET_HARD_REG_SET(TO) ((TO) = ~ HARD_CONST (0)) #define COPY_HARD_REG_SET(TO, FROM) ((TO) = (FROM)) #define COMPL_HARD_REG_SET(TO, FROM) ((TO) = ~(FROM)) #define IOR_HARD_REG_SET(TO, FROM) ((TO) |= (FROM)) #define IOR_COMPL_HARD_REG_SET(TO, FROM) ((TO) |= ~ (FROM)) #define AND_HARD_REG_SET(TO, FROM) ((TO) &= (FROM)) #define AND_COMPL_HARD_REG_SET(TO, FROM) ((TO) &= ~ (FROM)) #define GO_IF_HARD_REG_SUBSET(X,Y,TO) if (HARD_CONST (0) == ((X) & ~(Y))) goto TO #define GO_IF_HARD_REG_EQUAL(X,Y,TO) if ((X) == (Y)) goto TO #else #define UHOST_BITS_PER_WIDE_INT ((unsigned) HOST_BITS_PER_WIDE_INT) #define SET_HARD_REG_BIT(SET, BIT) \ ((SET)[(BIT) / UHOST_BITS_PER_WIDE_INT] \ |= HARD_CONST (1) << ((BIT) % UHOST_BITS_PER_WIDE_INT)) #define CLEAR_HARD_REG_BIT(SET, BIT) \ ((SET)[(BIT) / UHOST_BITS_PER_WIDE_INT] \ &= ~(HARD_CONST (1) << ((BIT) % UHOST_BITS_PER_WIDE_INT))) #define TEST_HARD_REG_BIT(SET, BIT) \ (!!((SET)[(BIT) / UHOST_BITS_PER_WIDE_INT] \ & (HARD_CONST (1) << ((BIT) % UHOST_BITS_PER_WIDE_INT)))) #if FIRST_PSEUDO_REGISTER <= 2*HOST_BITS_PER_WIDE_INT #define CLEAR_HARD_REG_SET(TO) \ do { HARD_REG_ELT_TYPE *scan_tp_ = (TO); \ scan_tp_[0] = 0; \ scan_tp_[1] = 0; } while (0) #define SET_HARD_REG_SET(TO) \ do { HARD_REG_ELT_TYPE *scan_tp_ = (TO); \ scan_tp_[0] = -1; \ scan_tp_[1] = -1; } while (0) #define COPY_HARD_REG_SET(TO, FROM) \ do { HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \ scan_tp_[0] = scan_fp_[0]; \ scan_tp_[1] = scan_fp_[1]; } while (0) #define COMPL_HARD_REG_SET(TO, FROM) \ do { HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \ scan_tp_[0] = ~ scan_fp_[0]; \ scan_tp_[1] = ~ scan_fp_[1]; } while (0) #define AND_HARD_REG_SET(TO, FROM) \ do { HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \ scan_tp_[0] &= scan_fp_[0]; \ scan_tp_[1] &= scan_fp_[1]; } while (0) #define AND_COMPL_HARD_REG_SET(TO, FROM) \ do { HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \ scan_tp_[0] &= ~ scan_fp_[0]; \ scan_tp_[1] &= ~ scan_fp_[1]; } while (0) #define IOR_HARD_REG_SET(TO, FROM) \ do { HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \ scan_tp_[0] |= scan_fp_[0]; \ scan_tp_[1] |= scan_fp_[1]; } while (0) #define IOR_COMPL_HARD_REG_SET(TO, FROM) \ do { HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \ scan_tp_[0] |= ~ scan_fp_[0]; \ scan_tp_[1] |= ~ scan_fp_[1]; } while (0) #define GO_IF_HARD_REG_SUBSET(X,Y,TO) \ do { HARD_REG_ELT_TYPE *scan_xp_ = (X), *scan_yp_ = (Y); \ if ((0 == (scan_xp_[0] & ~ scan_yp_[0])) \ && (0 == (scan_xp_[1] & ~ scan_yp_[1]))) \ goto TO; } while (0) #define GO_IF_HARD_REG_EQUAL(X,Y,TO) \ do { HARD_REG_ELT_TYPE *scan_xp_ = (X), *scan_yp_ = (Y); \ if ((scan_xp_[0] == scan_yp_[0]) \ && (scan_xp_[1] == scan_yp_[1])) \ goto TO; } while (0) #else #if FIRST_PSEUDO_REGISTER <= 3*HOST_BITS_PER_WIDE_INT #define CLEAR_HARD_REG_SET(TO) \ do { HARD_REG_ELT_TYPE *scan_tp_ = (TO); \ scan_tp_[0] = 0; \ scan_tp_[1] = 0; \ scan_tp_[2] = 0; } while (0) #define SET_HARD_REG_SET(TO) \ do { HARD_REG_ELT_TYPE *scan_tp_ = (TO); \ scan_tp_[0] = -1; \ scan_tp_[1] = -1; \ scan_tp_[2] = -1; } while (0) #define COPY_HARD_REG_SET(TO, FROM) \ do { HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \ scan_tp_[0] = scan_fp_[0]; \ scan_tp_[1] = scan_fp_[1]; \ scan_tp_[2] = scan_fp_[2]; } while (0) #define COMPL_HARD_REG_SET(TO, FROM) \ do { HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \ scan_tp_[0] = ~ scan_fp_[0]; \ scan_tp_[1] = ~ scan_fp_[1]; \ scan_tp_[2] = ~ scan_fp_[2]; } while (0) #define AND_HARD_REG_SET(TO, FROM) \ do { HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \ scan_tp_[0] &= scan_fp_[0]; \ scan_tp_[1] &= scan_fp_[1]; \ scan_tp_[2] &= scan_fp_[2]; } while (0) #define AND_COMPL_HARD_REG_SET(TO, FROM) \ do { HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \ scan_tp_[0] &= ~ scan_fp_[0]; \ scan_tp_[1] &= ~ scan_fp_[1]; \ scan_tp_[2] &= ~ scan_fp_[2]; } while (0) #define IOR_HARD_REG_SET(TO, FROM) \ do { HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \ scan_tp_[0] |= scan_fp_[0]; \ scan_tp_[1] |= scan_fp_[1]; \ scan_tp_[2] |= scan_fp_[2]; } while (0) #define IOR_COMPL_HARD_REG_SET(TO, FROM) \ do { HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \ scan_tp_[0] |= ~ scan_fp_[0]; \ scan_tp_[1] |= ~ scan_fp_[1]; \ scan_tp_[2] |= ~ scan_fp_[2]; } while (0) #define GO_IF_HARD_REG_SUBSET(X,Y,TO) \ do { HARD_REG_ELT_TYPE *scan_xp_ = (X), *scan_yp_ = (Y); \ if ((0 == (scan_xp_[0] & ~ scan_yp_[0])) \ && (0 == (scan_xp_[1] & ~ scan_yp_[1])) \ && (0 == (scan_xp_[2] & ~ scan_yp_[2]))) \ goto TO; } while (0) #define GO_IF_HARD_REG_EQUAL(X,Y,TO) \ do { HARD_REG_ELT_TYPE *scan_xp_ = (X), *scan_yp_ = (Y); \ if ((scan_xp_[0] == scan_yp_[0]) \ && (scan_xp_[1] == scan_yp_[1]) \ && (scan_xp_[2] == scan_yp_[2])) \ goto TO; } while (0) #else #if FIRST_PSEUDO_REGISTER <= 4*HOST_BITS_PER_WIDE_INT #define CLEAR_HARD_REG_SET(TO) \ do { HARD_REG_ELT_TYPE *scan_tp_ = (TO); \ scan_tp_[0] = 0; \ scan_tp_[1] = 0; \ scan_tp_[2] = 0; \ scan_tp_[3] = 0; } while (0) #define SET_HARD_REG_SET(TO) \ do { HARD_REG_ELT_TYPE *scan_tp_ = (TO); \ scan_tp_[0] = -1; \ scan_tp_[1] = -1; \ scan_tp_[2] = -1; \ scan_tp_[3] = -1; } while (0) #define COPY_HARD_REG_SET(TO, FROM) \ do { HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \ scan_tp_[0] = scan_fp_[0]; \ scan_tp_[1] = scan_fp_[1]; \ scan_tp_[2] = scan_fp_[2]; \ scan_tp_[3] = scan_fp_[3]; } while (0) #define COMPL_HARD_REG_SET(TO, FROM) \ do { HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \ scan_tp_[0] = ~ scan_fp_[0]; \ scan_tp_[1] = ~ scan_fp_[1]; \ scan_tp_[2] = ~ scan_fp_[2]; \ scan_tp_[3] = ~ scan_fp_[3]; } while (0) #define AND_HARD_REG_SET(TO, FROM) \ do { HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \ scan_tp_[0] &= scan_fp_[0]; \ scan_tp_[1] &= scan_fp_[1]; \ scan_tp_[2] &= scan_fp_[2]; \ scan_tp_[3] &= scan_fp_[3]; } while (0) #define AND_COMPL_HARD_REG_SET(TO, FROM) \ do { HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \ scan_tp_[0] &= ~ scan_fp_[0]; \ scan_tp_[1] &= ~ scan_fp_[1]; \ scan_tp_[2] &= ~ scan_fp_[2]; \ scan_tp_[3] &= ~ scan_fp_[3]; } while (0) #define IOR_HARD_REG_SET(TO, FROM) \ do { HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \ scan_tp_[0] |= scan_fp_[0]; \ scan_tp_[1] |= scan_fp_[1]; \ scan_tp_[2] |= scan_fp_[2]; \ scan_tp_[3] |= scan_fp_[3]; } while (0) #define IOR_COMPL_HARD_REG_SET(TO, FROM) \ do { HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \ scan_tp_[0] |= ~ scan_fp_[0]; \ scan_tp_[1] |= ~ scan_fp_[1]; \ scan_tp_[2] |= ~ scan_fp_[2]; \ scan_tp_[3] |= ~ scan_fp_[3]; } while (0) #define GO_IF_HARD_REG_SUBSET(X,Y,TO) \ do { HARD_REG_ELT_TYPE *scan_xp_ = (X), *scan_yp_ = (Y); \ if ((0 == (scan_xp_[0] & ~ scan_yp_[0])) \ && (0 == (scan_xp_[1] & ~ scan_yp_[1])) \ && (0 == (scan_xp_[2] & ~ scan_yp_[2])) \ && (0 == (scan_xp_[3] & ~ scan_yp_[3]))) \ goto TO; } while (0) #define GO_IF_HARD_REG_EQUAL(X,Y,TO) \ do { HARD_REG_ELT_TYPE *scan_xp_ = (X), *scan_yp_ = (Y); \ if ((scan_xp_[0] == scan_yp_[0]) \ && (scan_xp_[1] == scan_yp_[1]) \ && (scan_xp_[2] == scan_yp_[2]) \ && (scan_xp_[3] == scan_yp_[3])) \ goto TO; } while (0) #else /* FIRST_PSEUDO_REGISTER > 3*HOST_BITS_PER_WIDE_INT */ #define CLEAR_HARD_REG_SET(TO) \ do { HARD_REG_ELT_TYPE *scan_tp_ = (TO); \ int i; \ for (i = 0; i < HARD_REG_SET_LONGS; i++) \ *scan_tp_++ = 0; } while (0) #define SET_HARD_REG_SET(TO) \ do { HARD_REG_ELT_TYPE *scan_tp_ = (TO); \ int i; \ for (i = 0; i < HARD_REG_SET_LONGS; i++) \ *scan_tp_++ = -1; } while (0) #define COPY_HARD_REG_SET(TO, FROM) \ do { HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \ int i; \ for (i = 0; i < HARD_REG_SET_LONGS; i++) \ *scan_tp_++ = *scan_fp_++; } while (0) #define COMPL_HARD_REG_SET(TO, FROM) \ do { HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \ int i; \ for (i = 0; i < HARD_REG_SET_LONGS; i++) \ *scan_tp_++ = ~ *scan_fp_++; } while (0) #define AND_HARD_REG_SET(TO, FROM) \ do { HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \ int i; \ for (i = 0; i < HARD_REG_SET_LONGS; i++) \ *scan_tp_++ &= *scan_fp_++; } while (0) #define AND_COMPL_HARD_REG_SET(TO, FROM) \ do { HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \ int i; \ for (i = 0; i < HARD_REG_SET_LONGS; i++) \ *scan_tp_++ &= ~ *scan_fp_++; } while (0) #define IOR_HARD_REG_SET(TO, FROM) \ do { HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \ int i; \ for (i = 0; i < HARD_REG_SET_LONGS; i++) \ *scan_tp_++ |= *scan_fp_++; } while (0) #define IOR_COMPL_HARD_REG_SET(TO, FROM) \ do { HARD_REG_ELT_TYPE *scan_tp_ = (TO), *scan_fp_ = (FROM); \ int i; \ for (i = 0; i < HARD_REG_SET_LONGS; i++) \ *scan_tp_++ |= ~ *scan_fp_++; } while (0) #define GO_IF_HARD_REG_SUBSET(X,Y,TO) \ do { HARD_REG_ELT_TYPE *scan_xp_ = (X), *scan_yp_ = (Y); \ int i; \ for (i = 0; i < HARD_REG_SET_LONGS; i++) \ if (0 != (*scan_xp_++ & ~ *scan_yp_++)) break; \ if (i == HARD_REG_SET_LONGS) goto TO; } while (0) #define GO_IF_HARD_REG_EQUAL(X,Y,TO) \ do { HARD_REG_ELT_TYPE *scan_xp_ = (X), *scan_yp_ = (Y); \ int i; \ for (i = 0; i < HARD_REG_SET_LONGS; i++) \ if (*scan_xp_++ != *scan_yp_++) break; \ if (i == HARD_REG_SET_LONGS) goto TO; } while (0) #endif #endif #endif #endif /* Define some standard sets of registers. */ /* Indexed by hard register number, contains 1 for registers that are fixed use (stack pointer, pc, frame pointer, etc.). These are the registers that cannot be used to allocate a pseudo reg whose life does not cross calls. */ extern char fixed_regs[FIRST_PSEUDO_REGISTER]; /* The same info as a HARD_REG_SET. */ extern HARD_REG_SET fixed_reg_set; /* Indexed by hard register number, contains 1 for registers that are fixed use or are clobbered by function calls. These are the registers that cannot be used to allocate a pseudo reg whose life crosses calls. */ extern char call_used_regs[FIRST_PSEUDO_REGISTER]; #ifdef CALL_REALLY_USED_REGISTERS extern char call_really_used_regs[]; #endif /* The same info as a HARD_REG_SET. */ extern HARD_REG_SET call_used_reg_set; /* Registers that we don't want to caller save. */ extern HARD_REG_SET losing_caller_save_reg_set; /* Indexed by hard register number, contains 1 for registers that are fixed use -- i.e. in fixed_regs -- or a function value return register or TARGET_STRUCT_VALUE_RTX or STATIC_CHAIN_REGNUM. These are the registers that cannot hold quantities across calls even if we are willing to save and restore them. */ extern char call_fixed_regs[FIRST_PSEUDO_REGISTER]; /* The same info as a HARD_REG_SET. */ extern HARD_REG_SET call_fixed_reg_set; /* Indexed by hard register number, contains 1 for registers that are being used for global register decls. These must be exempt from ordinary flow analysis and are also considered fixed. */ extern char global_regs[FIRST_PSEUDO_REGISTER]; /* Contains 1 for registers that are set or clobbered by calls. */ /* ??? Ideally, this would be just call_used_regs plus global_regs, but for someone's bright idea to have call_used_regs strictly include fixed_regs. Which leaves us guessing as to the set of fixed_regs that are actually preserved. We know for sure that those associated with the local stack frame are safe, but scant others. */ extern HARD_REG_SET regs_invalidated_by_call; #ifdef REG_ALLOC_ORDER /* Table of register numbers in the order in which to try to use them. */ extern int reg_alloc_order[FIRST_PSEUDO_REGISTER]; /* The inverse of reg_alloc_order. */ extern int inv_reg_alloc_order[FIRST_PSEUDO_REGISTER]; #endif /* For each reg class, a HARD_REG_SET saying which registers are in it. */ extern HARD_REG_SET reg_class_contents[N_REG_CLASSES]; /* For each reg class, number of regs it contains. */ extern unsigned int reg_class_size[N_REG_CLASSES]; /* For each reg class, table listing all the containing classes. */ extern enum reg_class reg_class_superclasses[N_REG_CLASSES][N_REG_CLASSES]; /* For each reg class, table listing all the classes contained in it. */ extern enum reg_class reg_class_subclasses[N_REG_CLASSES][N_REG_CLASSES]; /* For each pair of reg classes, a largest reg class contained in their union. */ extern enum reg_class reg_class_subunion[N_REG_CLASSES][N_REG_CLASSES]; /* For each pair of reg classes, the smallest reg class that contains their union. */ extern enum reg_class reg_class_superunion[N_REG_CLASSES][N_REG_CLASSES]; /* Number of non-fixed registers. */ extern int n_non_fixed_regs; /* Vector indexed by hardware reg giving its name. */ extern const char * reg_names[FIRST_PSEUDO_REGISTER]; /* Given a hard REGN a FROM mode and a TO mode, return nonzero if REGN cannot change modes between the specified modes. */ #define REG_CANNOT_CHANGE_MODE_P(REGN, FROM, TO) \ CANNOT_CHANGE_MODE_CLASS (FROM, TO, REGNO_REG_CLASS (REGN)) #endif /* ! GCC_HARD_REG_SET_H */ /* Register Transfer Language (RTL) definitions for GCC Copyright (C) 1987, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_RTL_H #define GCC_RTL_H /* Memory statistics helpers. Copyright (C) 2004 Free Software Foundation, Inc. Contributed by Cygnus Solutions. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_STATISTICS #define GCC_STATISTICS #ifdef GATHER_STATISTICS #define MEM_STAT_DECL , const char *_loc_name ATTRIBUTE_UNUSED, int _loc_line ATTRIBUTE_UNUSED, const char *_loc_function ATTRIBUTE_UNUSED #define PASS_MEM_STAT , _loc_name, _loc_line, _loc_function #define MEM_STAT_INFO , __FILE__, __LINE__, __FUNCTION__ #else #define MEM_STAT_DECL #define PASS_MEM_STAT #define MEM_STAT_INFO #endif #endif struct function; /* Machine mode definitions for GCC; included by rtl.h and tree.h. Copyright (C) 1991, 1993, 1994, 1996, 1998, 1999, 2000, 2001, 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef HAVE_MACHINE_MODES #define HAVE_MACHINE_MODES /* Make an enum class that gives all the machine modes. */ /* Generated automatically from machmode.def and config/i386/i386-modes.def by genmodes. */ #ifndef GCC_INSN_MODES_H #define GCC_INSN_MODES_H enum machine_mode { VOIDmode, /* machmode.def:146 */ BLKmode, /* machmode.def:150 */ CCmode, /* machmode.def:178 */ CCGCmode, /* config/i386/i386-modes.def:57 */ CCGOCmode, /* config/i386/i386-modes.def:58 */ CCNOmode, /* config/i386/i386-modes.def:59 */ CCZmode, /* config/i386/i386-modes.def:60 */ CCFPmode, /* config/i386/i386-modes.def:61 */ CCFPUmode, /* config/i386/i386-modes.def:62 */ BImode, /* machmode.def:153 */ QImode, /* machmode.def:158 */ HImode, /* machmode.def:159 */ SImode, /* machmode.def:160 */ DImode, /* machmode.def:161 */ TImode, /* machmode.def:162 */ SFmode, /* machmode.def:173 */ DFmode, /* machmode.def:174 */ XFmode, /* config/i386/i386-modes.def:29 */ TFmode, /* config/i386/i386-modes.def:35 */ CQImode, /* machmode.def:186 */ CHImode, /* machmode.def:186 */ CSImode, /* machmode.def:186 */ CDImode, /* machmode.def:186 */ CTImode, /* machmode.def:186 */ SCmode, /* machmode.def:187 */ DCmode, /* machmode.def:187 */ XCmode, /* machmode.def:187 */ TCmode, /* machmode.def:187 */ V2QImode, /* machmode.def:190 */ V4QImode, /* machmode.def:191 */ V2HImode, /* machmode.def:191 */ V8QImode, /* machmode.def:192 */ V4HImode, /* machmode.def:192 */ V2SImode, /* machmode.def:192 */ V1DImode, /* machmode.def:206 */ V16QImode, /* machmode.def:193 */ V8HImode, /* machmode.def:193 */ V4SImode, /* machmode.def:193 */ V2DImode, /* machmode.def:193 */ V8SImode, /* machmode.def:197 */ V4DImode, /* machmode.def:198 */ V8DImode, /* machmode.def:199 */ V2SFmode, /* machmode.def:209 */ V4SFmode, /* machmode.def:210 */ V2DFmode, /* machmode.def:210 */ V8SFmode, /* machmode.def:214 */ V4DFmode, /* machmode.def:216 */ V16SFmode, /* machmode.def:215 */ V8DFmode, /* machmode.def:217 */ MAX_MACHINE_MODE, MIN_MODE_RANDOM = VOIDmode, MAX_MODE_RANDOM = BLKmode, MIN_MODE_CC = CCmode, MAX_MODE_CC = CCFPUmode, MIN_MODE_INT = QImode, MAX_MODE_INT = TImode, MIN_MODE_PARTIAL_INT = VOIDmode, MAX_MODE_PARTIAL_INT = VOIDmode, MIN_MODE_FLOAT = SFmode, MAX_MODE_FLOAT = TFmode, MIN_MODE_COMPLEX_INT = CQImode, MAX_MODE_COMPLEX_INT = CTImode, MIN_MODE_COMPLEX_FLOAT = SCmode, MAX_MODE_COMPLEX_FLOAT = TCmode, MIN_MODE_VECTOR_INT = V2QImode, MAX_MODE_VECTOR_INT = V8DImode, MIN_MODE_VECTOR_FLOAT = V2SFmode, MAX_MODE_VECTOR_FLOAT = V8DFmode, NUM_MACHINE_MODES = MAX_MACHINE_MODE }; #define CONST_MODE_SIZE #define CONST_MODE_BASE_ALIGN #endif /* insn-modes.h */ /* Get the name of mode MODE as a string. */ extern const char * const mode_name[NUM_MACHINE_MODES]; #define GET_MODE_NAME(MODE) mode_name[MODE] /* Mode classes. */ /* Machine mode class definitions for GCC. Copyright (C) 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define MODE_CLASSES \ DEF_MODE_CLASS (MODE_RANDOM), /* other */ \ DEF_MODE_CLASS (MODE_CC), /* condition code in a register */ \ DEF_MODE_CLASS (MODE_INT), /* integer */ \ DEF_MODE_CLASS (MODE_PARTIAL_INT), /* integer with padding bits */ \ DEF_MODE_CLASS (MODE_FLOAT), /* floating point */ \ DEF_MODE_CLASS (MODE_COMPLEX_INT), /* complex numbers */ \ DEF_MODE_CLASS (MODE_COMPLEX_FLOAT), \ DEF_MODE_CLASS (MODE_VECTOR_INT), /* SIMD vectors */ \ DEF_MODE_CLASS (MODE_VECTOR_FLOAT) #define DEF_MODE_CLASS(M) M enum mode_class { MODE_CLASSES, MAX_MODE_CLASS }; #undef DEF_MODE_CLASS #undef MODE_CLASSES /* Get the general kind of object that mode MODE represents (integer, floating, complex, etc.) */ extern const unsigned char mode_class[NUM_MACHINE_MODES]; #define GET_MODE_CLASS(MODE) mode_class[MODE] /* Nonzero if MODE is an integral mode. */ #define INTEGRAL_MODE_P(MODE) \ (GET_MODE_CLASS (MODE) == MODE_INT \ || GET_MODE_CLASS (MODE) == MODE_PARTIAL_INT \ || GET_MODE_CLASS (MODE) == MODE_COMPLEX_INT \ || GET_MODE_CLASS (MODE) == MODE_VECTOR_INT) /* Nonzero if MODE is a floating-point mode. */ #define FLOAT_MODE_P(MODE) \ (GET_MODE_CLASS (MODE) == MODE_FLOAT \ || GET_MODE_CLASS (MODE) == MODE_COMPLEX_FLOAT \ || GET_MODE_CLASS (MODE) == MODE_VECTOR_FLOAT) /* Nonzero if MODE is a complex mode. */ #define COMPLEX_MODE_P(MODE) \ (GET_MODE_CLASS (MODE) == MODE_COMPLEX_INT \ || GET_MODE_CLASS (MODE) == MODE_COMPLEX_FLOAT) /* Nonzero if MODE is a vector mode. */ #define VECTOR_MODE_P(MODE) \ (GET_MODE_CLASS (MODE) == MODE_VECTOR_INT \ || GET_MODE_CLASS (MODE) == MODE_VECTOR_FLOAT) /* Nonzero if MODE is a scalar integral mode. */ #define SCALAR_INT_MODE_P(MODE) \ (GET_MODE_CLASS (MODE) == MODE_INT \ || GET_MODE_CLASS (MODE) == MODE_PARTIAL_INT) /* Nonzero if MODE is a scalar floating point mode. */ #define SCALAR_FLOAT_MODE_P(MODE) \ (GET_MODE_CLASS (MODE) == MODE_FLOAT) /* Get the size in bytes and bits of an object of mode MODE. */ extern CONST_MODE_SIZE unsigned char mode_size[NUM_MACHINE_MODES]; #define GET_MODE_SIZE(MODE) ((unsigned short) mode_size[MODE]) #define GET_MODE_BITSIZE(MODE) ((unsigned short) (GET_MODE_SIZE (MODE) * BITS_PER_UNIT)) /* Get the number of value bits of an object of mode MODE. */ extern const unsigned short mode_precision[NUM_MACHINE_MODES]; #define GET_MODE_PRECISION(MODE) mode_precision[MODE] /* Get a bitmask containing 1 for all bits in a word that fit within mode MODE. */ extern const unsigned HOST_WIDE_INT mode_mask_array[NUM_MACHINE_MODES]; #define GET_MODE_MASK(MODE) mode_mask_array[MODE] /* Return the mode of the inner elements in a vector. */ extern const unsigned char mode_inner[NUM_MACHINE_MODES]; #define GET_MODE_INNER(MODE) mode_inner[MODE] /* Get the size in bytes of the basic parts of an object of mode MODE. */ #define GET_MODE_UNIT_SIZE(MODE) \ (GET_MODE_INNER (MODE) == VOIDmode \ ? GET_MODE_SIZE (MODE) \ : GET_MODE_SIZE (GET_MODE_INNER (MODE))) /* Get the number of units in the object. */ extern const unsigned char mode_nunits[NUM_MACHINE_MODES]; #define GET_MODE_NUNITS(MODE) mode_nunits[MODE] /* Get the next wider natural mode (eg, QI -> HI -> SI -> DI -> TI). */ extern const unsigned char mode_wider[NUM_MACHINE_MODES]; #define GET_MODE_WIDER_MODE(MODE) mode_wider[MODE] /* Return the mode for data of a given size SIZE and mode class CLASS. If LIMIT is nonzero, then don't use modes bigger than MAX_FIXED_MODE_SIZE. The value is BLKmode if no other mode is found. */ extern enum machine_mode mode_for_size (unsigned int, enum mode_class, int); /* Similar, but find the smallest mode for a given width. */ extern enum machine_mode smallest_mode_for_size (unsigned int, enum mode_class); /* Return an integer mode of the exact same size as the input mode, or BLKmode on failure. */ extern enum machine_mode int_mode_for_mode (enum machine_mode); /* Find the best mode to use to access a bit field. */ extern enum machine_mode get_best_mode (int, int, unsigned int, enum machine_mode, int); /* Determine alignment, 1<=result<=BIGGEST_ALIGNMENT. */ extern CONST_MODE_BASE_ALIGN unsigned char mode_base_align[NUM_MACHINE_MODES]; extern unsigned get_mode_alignment (enum machine_mode); #define GET_MODE_ALIGNMENT(MODE) get_mode_alignment (MODE) /* For each class, get the narrowest mode in that class. */ extern const unsigned char class_narrowest_mode[MAX_MODE_CLASS]; #define GET_CLASS_NARROWEST_MODE(CLASS) class_narrowest_mode[CLASS] /* Define the integer modes whose sizes are BITS_PER_UNIT and BITS_PER_WORD and the mode whose class is Pmode and whose size is POINTER_SIZE. */ extern enum machine_mode byte_mode; extern enum machine_mode word_mode; extern enum machine_mode ptr_mode; /* Target-dependent machine mode initialization - in insn-modes.c. */ extern void init_adjust_machine_modes (void); #endif /* not HAVE_MACHINE_MODES */ /* Declarations for variables relating to reading the source file. Used by parsers, lexical analyzers, and error message routines. Copyright (C) 1993, 1997, 1998, 2000, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_INPUT_H #define GCC_INPUT_H extern struct line_maps line_table; /* The location for declarations in "" */ #define BUILTINS_LOCATION ((source_location) 2) typedef struct location_s GTY(()) { /* The name of the source file involved. */ const char *file; /* The line-location in the source file. */ int line; /* FUTURE (but confuses gentype): int column. */ } expanded_location; #ifdef USE_MAPPED_LOCATION extern expanded_location expand_location (source_location); #define UNKNOWN_LOCATION ((source_location) 0) typedef source_location location_t; /* deprecated typedef */ typedef source_location source_locus; /* to be removed */ #else /* ! USE_MAPPED_LOCATION */ typedef struct location_s location_t; typedef location_t *source_locus; #define expand_location(FILELINE) (FILELINE) extern location_t unknown_location; #define UNKNOWN_LOCATION unknown_location #endif /* ! USE_MAPPED_LOCATION */ struct file_stack { struct file_stack *next; location_t location; }; /* Top-level source file. */ extern const char *main_input_filename; extern location_t input_location; #ifdef USE_MAPPED_LOCATION extern void push_srcloc (location_t); #else /* ! USE_MAPPED_LOCATION */ extern void push_srcloc (const char *name, int line); #endif /* ! USE_MAPPED_LOCATION */ extern void pop_srcloc (void); #define LOCATION_FILE(LOC) ((expand_location (LOC)).file) #define LOCATION_LINE(LOC) ((expand_location (LOC)).line) #define input_line LOCATION_LINE(input_location) #define input_filename LOCATION_FILE(input_location) /* Stack of currently pending input files. The line member is not accurate for the innermost file on the stack. */ extern struct file_stack *input_file_stack; /* Incremented on each change to input_file_stack. */ extern int input_file_stack_tick; #endif #undef FFS /* Some systems predefine this symbol; don't let it interfere. */ #undef FLOAT /* Likewise. */ #undef ABS /* Likewise. */ #undef PC /* Likewise. */ /* Value used by some passes to "recognize" noop moves as valid instructions. */ #define NOOP_MOVE_INSN_CODE INT_MAX /* Register Transfer Language EXPRESSIONS CODES */ #define RTX_CODE enum rtx_code enum rtx_code { #define DEF_RTL_EXPR(ENUM, NAME, FORMAT, CLASS) ENUM , /* This file contains the definitions and documentation for the Register Transfer Expressions (rtx's) that make up the Register Transfer Language (rtl) used in the Back End of the GNU compiler. Copyright (C) 1987, 1988, 1992, 1994, 1995, 1997, 1998, 1999, 2000, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Expression definitions and descriptions for all targets are in this file. Some will not be used for some targets. The fields in the cpp macro call "DEF_RTL_EXPR()" are used to create declarations in the C source of the compiler. The fields are: 1. The internal name of the rtx used in the C source. It is a tag in the enumeration "enum rtx_code" defined in "rtl.h". By convention these are in UPPER_CASE. 2. The name of the rtx in the external ASCII format read by read_rtx(), and printed by print_rtx(). These names are stored in rtx_name[]. By convention these are the internal (field 1) names in lower_case. 3. The print format, and type of each rtx->u.fld[] (field) in this rtx. These formats are stored in rtx_format[]. The meaning of the formats is documented in front of this array in rtl.c 4. The class of the rtx. These are stored in rtx_class and are accessed via the GET_RTX_CLASS macro. They are defined as follows: RTX_CONST_OBJ an rtx code that can be used to represent a constant object (e.g, CONST_INT) RTX_OBJ an rtx code that can be used to represent an object (e.g, REG, MEM) RTX_COMPARE an rtx code for a comparison (e.g, LT, GT) RTX_COMM_COMPARE an rtx code for a commutative comparison (e.g, EQ, NE, ORDERED) RTX_UNARY an rtx code for a unary arithmetic expression (e.g, NEG, NOT) RTX_COMM_ARITH an rtx code for a commutative binary operation (e.g,, PLUS, MULT) RTX_TERNARY an rtx code for a non-bitfield three input operation (IF_THEN_ELSE) RTX_BIN_ARITH an rtx code for a non-commutative binary operation (e.g., MINUS, DIV) RTX_BITFIELD_OPS an rtx code for a bit-field operation (ZERO_EXTRACT, SIGN_EXTRACT) RTX_INSN an rtx code for a machine insn (INSN, JUMP_INSN, CALL_INSN) RTX_MATCH an rtx code for something that matches in insns (e.g, MATCH_DUP) RTX_AUTOINC an rtx code for autoincrement addressing modes (e.g. POST_DEC) RTX_EXTRA everything else */ /* --------------------------------------------------------------------- Expressions (and "meta" expressions) used for structuring the rtl representation of a program. --------------------------------------------------------------------- */ /* an expression code name unknown to the reader */ DEF_RTL_EXPR(UNKNOWN, "UnKnown", "*", RTX_EXTRA) /* (NIL) is used by rtl reader and printer to represent a null pointer. */ DEF_RTL_EXPR(NIL, "nil", "*", RTX_EXTRA) /* include a file */ DEF_RTL_EXPR(INCLUDE, "include", "s", RTX_EXTRA) /* --------------------------------------------------------------------- Expressions used in constructing lists. --------------------------------------------------------------------- */ /* a linked list of expressions */ DEF_RTL_EXPR(EXPR_LIST, "expr_list", "ee", RTX_EXTRA) /* a linked list of instructions. The insns are represented in print by their uids. */ DEF_RTL_EXPR(INSN_LIST, "insn_list", "ue", RTX_EXTRA) /* ---------------------------------------------------------------------- Expression types for machine descriptions. These do not appear in actual rtl code in the compiler. ---------------------------------------------------------------------- */ /* Appears only in machine descriptions. Means use the function named by the second arg (the string) as a predicate; if matched, store the structure that was matched in the operand table at index specified by the first arg (the integer). If the second arg is the null string, the structure is just stored. A third string argument indicates to the register allocator restrictions on where the operand can be allocated. If the target needs no restriction on any instruction this field should be the null string. The string is prepended by: '=' to indicate the operand is only written to. '+' to indicate the operand is both read and written to. Each character in the string represents an allocable class for an operand. 'g' indicates the operand can be any valid class. 'i' indicates the operand can be immediate (in the instruction) data. 'r' indicates the operand can be in a register. 'm' indicates the operand can be in memory. 'o' a subset of the 'm' class. Those memory addressing modes that can be offset at compile time (have a constant added to them). Other characters indicate target dependent operand classes and are described in each target's machine description. For instructions with more than one operand, sets of classes can be separated by a comma to indicate the appropriate multi-operand constraints. There must be a 1 to 1 correspondence between these sets of classes in all operands for an instruction. */ DEF_RTL_EXPR(MATCH_OPERAND, "match_operand", "iss", RTX_MATCH) /* Appears only in machine descriptions. Means match a SCRATCH or a register. When used to generate rtl, a SCRATCH is generated. As for MATCH_OPERAND, the mode specifies the desired mode and the first argument is the operand number. The second argument is the constraint. */ DEF_RTL_EXPR(MATCH_SCRATCH, "match_scratch", "is", RTX_MATCH) /* Appears only in machine descriptions. Means match only something equal to what is stored in the operand table at the index specified by the argument. */ DEF_RTL_EXPR(MATCH_DUP, "match_dup", "i", RTX_MATCH) /* Appears only in machine descriptions. Means apply a predicate, AND match recursively the operands of the rtx. Operand 0 is the operand-number, as in match_operand. Operand 1 is a predicate to apply (as a string, a function name). Operand 2 is a vector of expressions, each of which must match one subexpression of the rtx this construct is matching. */ DEF_RTL_EXPR(MATCH_OPERATOR, "match_operator", "isE", RTX_MATCH) /* Appears only in machine descriptions. Means to match a PARALLEL of arbitrary length. The predicate is applied to the PARALLEL and the initial expressions in the PARALLEL are matched. Operand 0 is the operand-number, as in match_operand. Operand 1 is a predicate to apply to the PARALLEL. Operand 2 is a vector of expressions, each of which must match the corresponding element in the PARALLEL. */ DEF_RTL_EXPR(MATCH_PARALLEL, "match_parallel", "isE", RTX_MATCH) /* Appears only in machine descriptions. Means match only something equal to what is stored in the operand table at the index specified by the argument. For MATCH_OPERATOR. */ DEF_RTL_EXPR(MATCH_OP_DUP, "match_op_dup", "iE", RTX_MATCH) /* Appears only in machine descriptions. Means match only something equal to what is stored in the operand table at the index specified by the argument. For MATCH_PARALLEL. */ DEF_RTL_EXPR(MATCH_PAR_DUP, "match_par_dup", "iE", RTX_MATCH) /* Appears only in machine descriptions. Defines the pattern for one kind of instruction. Operand: 0: names this instruction. If the name is the null string, the instruction is in the machine description just to be recognized, and will never be emitted by the tree to rtl expander. 1: is the pattern. 2: is a string which is a C expression giving an additional condition for recognizing this pattern. A null string means no extra condition. 3: is the action to execute if this pattern is matched. If this assembler code template starts with a * then it is a fragment of C code to run to decide on a template to use. Otherwise, it is the template to use. 4: optionally, a vector of attributes for this insn. */ DEF_RTL_EXPR(DEFINE_INSN, "define_insn", "sEsTV", RTX_EXTRA) /* Definition of a peephole optimization. 1st operand: vector of insn patterns to match 2nd operand: C expression that must be true 3rd operand: template or C code to produce assembler output. 4: optionally, a vector of attributes for this insn. */ DEF_RTL_EXPR(DEFINE_PEEPHOLE, "define_peephole", "EsTV", RTX_EXTRA) /* Definition of a split operation. 1st operand: insn pattern to match 2nd operand: C expression that must be true 3rd operand: vector of insn patterns to place into a SEQUENCE 4th operand: optionally, some C code to execute before generating the insns. This might, for example, create some RTX's and store them in elements of `recog_data.operand' for use by the vector of insn-patterns. (`operands' is an alias here for `recog_data.operand'). */ DEF_RTL_EXPR(DEFINE_SPLIT, "define_split", "EsES", RTX_EXTRA) /* Definition of an insn and associated split. This is the concatenation, with a few modifications, of a define_insn and a define_split which share the same pattern. Operand: 0: names this instruction. If the name is the null string, the instruction is in the machine description just to be recognized, and will never be emitted by the tree to rtl expander. 1: is the pattern. 2: is a string which is a C expression giving an additional condition for recognizing this pattern. A null string means no extra condition. 3: is the action to execute if this pattern is matched. If this assembler code template starts with a * then it is a fragment of C code to run to decide on a template to use. Otherwise, it is the template to use. 4: C expression that must be true for split. This may start with "&&" in which case the split condition is the logical and of the insn condition and what follows the "&&" of this operand. 5: vector of insn patterns to place into a SEQUENCE 6: optionally, some C code to execute before generating the insns. This might, for example, create some RTX's and store them in elements of `recog_data.operand' for use by the vector of insn-patterns. (`operands' is an alias here for `recog_data.operand'). 7: optionally, a vector of attributes for this insn. */ DEF_RTL_EXPR(DEFINE_INSN_AND_SPLIT, "define_insn_and_split", "sEsTsESV", RTX_EXTRA) /* Definition of an RTL peephole operation. Follows the same arguments as define_split. */ DEF_RTL_EXPR(DEFINE_PEEPHOLE2, "define_peephole2", "EsES", RTX_EXTRA) /* Define how to generate multiple insns for a standard insn name. 1st operand: the insn name. 2nd operand: vector of insn-patterns. Use match_operand to substitute an element of `recog_data.operand'. 3rd operand: C expression that must be true for this to be available. This may not test any operands. 4th operand: Extra C code to execute before generating the insns. This might, for example, create some RTX's and store them in elements of `recog_data.operand' for use by the vector of insn-patterns. (`operands' is an alias here for `recog_data.operand'). */ DEF_RTL_EXPR(DEFINE_EXPAND, "define_expand", "sEss", RTX_EXTRA) /* Define a requirement for delay slots. 1st operand: Condition involving insn attributes that, if true, indicates that the insn requires the number of delay slots shown. 2nd operand: Vector whose length is the three times the number of delay slots required. Each entry gives three conditions, each involving attributes. The first must be true for an insn to occupy that delay slot location. The second is true for all insns that can be annulled if the branch is true and the third is true for all insns that can be annulled if the branch is false. Multiple DEFINE_DELAYs may be present. They indicate differing requirements for delay slots. */ DEF_RTL_EXPR(DEFINE_DELAY, "define_delay", "eE", RTX_EXTRA) /* Define a set of insns that requires a function unit. This means that these insns produce their result after a delay and that there may be restrictions on the number of insns of this type that can be scheduled simultaneously. More than one DEFINE_FUNCTION_UNIT can be specified for a function unit. Each gives a set of operations and associated delays. The first three operands must be the same for each operation for the same function unit. All delays are specified in cycles. 1st operand: Name of function unit (mostly for documentation) 2nd operand: Number of identical function units in CPU 3rd operand: Total number of simultaneous insns that can execute on this function unit; 0 if unlimited. 4th operand: Condition involving insn attribute, that, if true, specifies those insns that this expression applies to. 5th operand: Constant delay after which insn result will be available. 6th operand: Delay until next insn can be scheduled on the function unit executing this operation. The meaning depends on whether or not the next operand is supplied. 7th operand: If this operand is not specified, the 6th operand gives the number of cycles after the instruction matching the 4th operand begins using the function unit until a subsequent insn can begin. A value of zero should be used for a unit with no issue constraints. If only one operation can be executed a time and the unit is busy for the entire time, the 3rd operand should be specified as 1, the 6th operand should be specified as 0, and the 7th operand should not be specified. If this operand is specified, it is a list of attribute expressions. If an insn for which any of these expressions is true is currently executing on the function unit, the issue delay will be given by the 6th operand. Otherwise, the insn can be immediately scheduled (subject to the limit on the number of simultaneous operations executing on the unit.) */ DEF_RTL_EXPR(DEFINE_FUNCTION_UNIT, "define_function_unit", "siieiiV", RTX_EXTRA) /* Define attribute computation for `asm' instructions. */ DEF_RTL_EXPR(DEFINE_ASM_ATTRIBUTES, "define_asm_attributes", "V", RTX_EXTRA) /* Definition of a conditional execution meta operation. Automatically generates new instances of DEFINE_INSN, selected by having attribute "predicable" true. The new pattern will contain a COND_EXEC and the predicate at top-level. Operand: 0: The predicate pattern. The top-level form should match a relational operator. Operands should have only one alternative. 1: A C expression giving an additional condition for recognizing the generated pattern. 2: A template or C code to produce assembler output. */ DEF_RTL_EXPR(DEFINE_COND_EXEC, "define_cond_exec", "Ess", RTX_EXTRA) /* SEQUENCE appears in the result of a `gen_...' function for a DEFINE_EXPAND that wants to make several insns. Its elements are the bodies of the insns that should be made. `emit_insn' takes the SEQUENCE apart and makes separate insns. */ DEF_RTL_EXPR(SEQUENCE, "sequence", "E", RTX_EXTRA) /* Refers to the address of its argument. This is only used in alias.c. */ DEF_RTL_EXPR(ADDRESS, "address", "e", RTX_MATCH) /* ---------------------------------------------------------------------- Constructions for CPU pipeline description described by NDFAs. These do not appear in actual rtl code in the compiler. ---------------------------------------------------------------------- */ /* (define_cpu_unit string [string]) describes cpu functional units (separated by comma). 1st operand: Names of cpu functional units. 2nd operand: Name of automaton (see comments for DEFINE_AUTOMATON). All define_reservations, define_cpu_units, and define_query_cpu_units should have unique names which may not be "nothing". */ DEF_RTL_EXPR(DEFINE_CPU_UNIT, "define_cpu_unit", "sS", RTX_EXTRA) /* (define_query_cpu_unit string [string]) describes cpu functional units analogously to define_cpu_unit. The reservation of such units can be queried for automaton state. */ DEF_RTL_EXPR(DEFINE_QUERY_CPU_UNIT, "define_query_cpu_unit", "sS", RTX_EXTRA) /* (exclusion_set string string) means that each CPU functional unit in the first string can not be reserved simultaneously with any unit whose name is in the second string and vise versa. CPU units in the string are separated by commas. For example, it is useful for description CPU with fully pipelined floating point functional unit which can execute simultaneously only single floating point insns or only double floating point insns. All CPU functional units in a set should belong to the same automaton. */ DEF_RTL_EXPR(EXCLUSION_SET, "exclusion_set", "ss", RTX_EXTRA) /* (presence_set string string) means that each CPU functional unit in the first string can not be reserved unless at least one of pattern of units whose names are in the second string is reserved. This is an asymmetric relation. CPU units or unit patterns in the strings are separated by commas. Pattern is one unit name or unit names separated by white-spaces. For example, it is useful for description that slot1 is reserved after slot0 reservation for a VLIW processor. We could describe it by the following construction (presence_set "slot1" "slot0") Or slot1 is reserved only after slot0 and unit b0 reservation. In this case we could write (presence_set "slot1" "slot0 b0") All CPU functional units in a set should belong to the same automaton. */ DEF_RTL_EXPR(PRESENCE_SET, "presence_set", "ss", RTX_EXTRA) /* (final_presence_set string string) is analogous to `presence_set'. The difference between them is when checking is done. When an instruction is issued in given automaton state reflecting all current and planned unit reservations, the automaton state is changed. The first state is a source state, the second one is a result state. Checking for `presence_set' is done on the source state reservation, checking for `final_presence_set' is done on the result reservation. This construction is useful to describe a reservation which is actually two subsequent reservations. For example, if we use (presence_set "slot1" "slot0") the following insn will be never issued (because slot1 requires slot0 which is absent in the source state). (define_reservation "insn_and_nop" "slot0 + slot1") but it can be issued if we use analogous `final_presence_set'. */ DEF_RTL_EXPR(FINAL_PRESENCE_SET, "final_presence_set", "ss", RTX_EXTRA) /* (absence_set string string) means that each CPU functional unit in the first string can be reserved only if each pattern of units whose names are in the second string is not reserved. This is an asymmetric relation (actually exclusion set is analogous to this one but it is symmetric). CPU units or unit patterns in the string are separated by commas. Pattern is one unit name or unit names separated by white-spaces. For example, it is useful for description that slot0 can not be reserved after slot1 or slot2 reservation for a VLIW processor. We could describe it by the following construction (absence_set "slot2" "slot0, slot1") Or slot2 can not be reserved if slot0 and unit b0 are reserved or slot1 and unit b1 are reserved . In this case we could write (absence_set "slot2" "slot0 b0, slot1 b1") All CPU functional units in a set should to belong the same automaton. */ DEF_RTL_EXPR(ABSENCE_SET, "absence_set", "ss", RTX_EXTRA) /* (final_absence_set string string) is analogous to `absence_set' but checking is done on the result (state) reservation. See comments for `final_presence_set'. */ DEF_RTL_EXPR(FINAL_ABSENCE_SET, "final_absence_set", "ss", RTX_EXTRA) /* (define_bypass number out_insn_names in_insn_names) names bypass with given latency (the first number) from insns given by the first string (see define_insn_reservation) into insns given by the second string. Insn names in the strings are separated by commas. The third operand is optional name of function which is additional guard for the bypass. The function will get the two insns as parameters. If the function returns zero the bypass will be ignored for this case. Additional guard is necessary to recognize complicated bypasses, e.g. when consumer is load address. */ DEF_RTL_EXPR(DEFINE_BYPASS, "define_bypass", "issS", RTX_EXTRA) /* (define_automaton string) describes names of automata generated and used for pipeline hazards recognition. The names are separated by comma. Actually it is possibly to generate the single automaton but unfortunately it can be very large. If we use more one automata, the summary size of the automata usually is less than the single one. The automaton name is used in define_cpu_unit and define_query_cpu_unit. All automata should have unique names. */ DEF_RTL_EXPR(DEFINE_AUTOMATON, "define_automaton", "s", RTX_EXTRA) /* (automata_option string) describes option for generation of automata. Currently there are the following options: o "no-minimization" which makes no minimization of automata. This is only worth to do when we are debugging the description and need to look more accurately at reservations of states. o "time" which means printing additional time statistics about generation of automata. o "v" which means generation of file describing the result automata. The file has suffix `.dfa' and can be used for the description verification and debugging. o "w" which means generation of warning instead of error for non-critical errors. o "ndfa" which makes nondeterministic finite state automata. o "progress" which means output of a progress bar showing how many states were generated so far for automaton being processed. */ DEF_RTL_EXPR(AUTOMATA_OPTION, "automata_option", "s", RTX_EXTRA) /* (define_reservation string string) names reservation (the first string) of cpu functional units (the 2nd string). Sometimes unit reservations for different insns contain common parts. In such case, you can describe common part and use its name (the 1st parameter) in regular expression in define_insn_reservation. All define_reservations, define_cpu_units, and define_query_cpu_units should have unique names which may not be "nothing". */ DEF_RTL_EXPR(DEFINE_RESERVATION, "define_reservation", "ss", RTX_EXTRA) /* (define_insn_reservation name default_latency condition regexpr) describes reservation of cpu functional units (the 3nd operand) for instruction which is selected by the condition (the 2nd parameter). The first parameter is used for output of debugging information. The reservations are described by a regular expression according the following syntax: regexp = regexp "," oneof | oneof oneof = oneof "|" allof | allof allof = allof "+" repeat | repeat repeat = element "*" number | element element = cpu_function_unit_name | reservation_name | result_name | "nothing" | "(" regexp ")" 1. "," is used for describing start of the next cycle in reservation. 2. "|" is used for describing the reservation described by the first regular expression *or* the reservation described by the second regular expression *or* etc. 3. "+" is used for describing the reservation described by the first regular expression *and* the reservation described by the second regular expression *and* etc. 4. "*" is used for convenience and simply means sequence in which the regular expression are repeated NUMBER times with cycle advancing (see ","). 5. cpu functional unit name which means its reservation. 6. reservation name -- see define_reservation. 7. string "nothing" means no units reservation. */ DEF_RTL_EXPR(DEFINE_INSN_RESERVATION, "define_insn_reservation", "sies", RTX_EXTRA) /* ---------------------------------------------------------------------- Expressions used for insn attributes. These also do not appear in actual rtl code in the compiler. ---------------------------------------------------------------------- */ /* Definition of an insn attribute. 1st operand: name of the attribute 2nd operand: comma-separated list of possible attribute values 3rd operand: expression for the default value of the attribute. */ DEF_RTL_EXPR(DEFINE_ATTR, "define_attr", "sse", RTX_EXTRA) /* Marker for the name of an attribute. */ DEF_RTL_EXPR(ATTR, "attr", "s", RTX_EXTRA) /* For use in the last (optional) operand of DEFINE_INSN or DEFINE_PEEPHOLE and in DEFINE_ASM_INSN to specify an attribute to assign to insns matching that pattern. (set_attr "name" "value") is equivalent to (set (attr "name") (const_string "value")) */ DEF_RTL_EXPR(SET_ATTR, "set_attr", "ss", RTX_EXTRA) /* In the last operand of DEFINE_INSN and DEFINE_PEEPHOLE, this can be used to specify that attribute values are to be assigned according to the alternative matched. The following three expressions are equivalent: (set (attr "att") (cond [(eq_attrq "alternative" "1") (const_string "a1") (eq_attrq "alternative" "2") (const_string "a2")] (const_string "a3"))) (set_attr_alternative "att" [(const_string "a1") (const_string "a2") (const_string "a3")]) (set_attr "att" "a1,a2,a3") */ DEF_RTL_EXPR(SET_ATTR_ALTERNATIVE, "set_attr_alternative", "sE", RTX_EXTRA) /* A conditional expression true if the value of the specified attribute of the current insn equals the specified value. The first operand is the attribute name and the second is the comparison value. */ DEF_RTL_EXPR(EQ_ATTR, "eq_attr", "ss", RTX_EXTRA) /* A special case of the above representing a set of alternatives. The first operand is bitmap of the set, the second one is the default value. */ DEF_RTL_EXPR(EQ_ATTR_ALT, "eq_attr_alt", "ii", RTX_EXTRA) /* A conditional expression which is true if the specified flag is true for the insn being scheduled in reorg. genattr.c defines the following flags which can be tested by (attr_flag "foo") expressions in eligible_for_delay. forward, backward, very_likely, likely, very_unlikely, and unlikely. */ DEF_RTL_EXPR (ATTR_FLAG, "attr_flag", "s", RTX_EXTRA) /* ---------------------------------------------------------------------- Expression types used for things in the instruction chain. All formats must start with "iuu" to handle the chain. Each insn expression holds an rtl instruction and its semantics during back-end processing. See macros's in "rtl.h" for the meaning of each rtx->u.fld[]. ---------------------------------------------------------------------- */ /* An instruction that cannot jump. */ DEF_RTL_EXPR(INSN, "insn", "iuuBieiee", RTX_INSN) /* An instruction that can possibly jump. Fields ( rtx->u.fld[] ) have exact same meaning as INSN's. */ DEF_RTL_EXPR(JUMP_INSN, "jump_insn", "iuuBieiee0", RTX_INSN) /* An instruction that can possibly call a subroutine but which will not change which instruction comes next in the current function. Field ( rtx->u.fld[9] ) is CALL_INSN_FUNCTION_USAGE. All other fields ( rtx->u.fld[] ) have exact same meaning as INSN's. */ DEF_RTL_EXPR(CALL_INSN, "call_insn", "iuuBieieee", RTX_INSN) /* A marker that indicates that control will not flow through. */ DEF_RTL_EXPR(BARRIER, "barrier", "iuu000000", RTX_EXTRA) /* Holds a label that is followed by instructions. Operand: 4: is used in jump.c for the use-count of the label. 5: is used in flow.c to point to the chain of label_ref's to this label. 6: is a number that is unique in the entire compilation. 7: is the user-given name of the label, if any. */ DEF_RTL_EXPR(CODE_LABEL, "code_label", "iuuB00is", RTX_EXTRA) #ifdef USE_MAPPED_LOCATION /* Say where in the code a source line starts, for symbol table's sake. Operand: 4: unused if line number > 0, note-specific data otherwise. 5: line number if > 0, enum note_insn otherwise. 6: CODE_LABEL_NUMBER if line number == NOTE_INSN_DELETED_LABEL. */ #else /* Say where in the code a source line starts, for symbol table's sake. Operand: 4: filename, if line number > 0, note-specific data otherwise. 5: line number if > 0, enum note_insn otherwise. 6: unique number if line number == note_insn_deleted_label. */ #endif DEF_RTL_EXPR(NOTE, "note", "iuuB0ni", RTX_EXTRA) /* ---------------------------------------------------------------------- Top level constituents of INSN, JUMP_INSN and CALL_INSN. ---------------------------------------------------------------------- */ /* Conditionally execute code. Operand 0 is the condition that if true, the code is executed. Operand 1 is the code to be executed (typically a SET). Semantics are that there are no side effects if the condition is false. This pattern is created automatically by the if_convert pass run after reload or by target-specific splitters. */ DEF_RTL_EXPR(COND_EXEC, "cond_exec", "ee", RTX_EXTRA) /* Several operations to be done in parallel (perhaps under COND_EXEC). */ DEF_RTL_EXPR(PARALLEL, "parallel", "E", RTX_EXTRA) /* A string that is passed through to the assembler as input. One can obviously pass comments through by using the assembler comment syntax. These occur in an insn all by themselves as the PATTERN. They also appear inside an ASM_OPERANDS as a convenient way to hold a string. */ DEF_RTL_EXPR(ASM_INPUT, "asm_input", "s", RTX_EXTRA) #ifdef USE_MAPPED_LOCATION /* An assembler instruction with operands. 1st operand is the instruction template. 2nd operand is the constraint for the output. 3rd operand is the number of the output this expression refers to. When an insn stores more than one value, a separate ASM_OPERANDS is made for each output; this integer distinguishes them. 4th is a vector of values of input operands. 5th is a vector of modes and constraints for the input operands. Each element is an ASM_INPUT containing a constraint string and whose mode indicates the mode of the input operand. 6th is the source line number. */ DEF_RTL_EXPR(ASM_OPERANDS, "asm_operands", "ssiEEi", RTX_EXTRA) #else /* An assembler instruction with operands. 1st operand is the instruction template. 2nd operand is the constraint for the output. 3rd operand is the number of the output this expression refers to. When an insn stores more than one value, a separate ASM_OPERANDS is made for each output; this integer distinguishes them. 4th is a vector of values of input operands. 5th is a vector of modes and constraints for the input operands. Each element is an ASM_INPUT containing a constraint string and whose mode indicates the mode of the input operand. 6th is the name of the containing source file. 7th is the source line number. */ DEF_RTL_EXPR(ASM_OPERANDS, "asm_operands", "ssiEEsi", RTX_EXTRA) #endif /* A machine-specific operation. 1st operand is a vector of operands being used by the operation so that any needed reloads can be done. 2nd operand is a unique value saying which of a number of machine-specific operations is to be performed. (Note that the vector must be the first operand because of the way that genrecog.c record positions within an insn.) This can occur all by itself in a PATTERN, as a component of a PARALLEL, or inside an expression. */ DEF_RTL_EXPR(UNSPEC, "unspec", "Ei", RTX_EXTRA) /* Similar, but a volatile operation and one which may trap. */ DEF_RTL_EXPR(UNSPEC_VOLATILE, "unspec_volatile", "Ei", RTX_EXTRA) /* Vector of addresses, stored as full words. */ /* Each element is a LABEL_REF to a CODE_LABEL whose address we want. */ DEF_RTL_EXPR(ADDR_VEC, "addr_vec", "E", RTX_EXTRA) /* Vector of address differences X0 - BASE, X1 - BASE, ... First operand is BASE; the vector contains the X's. The machine mode of this rtx says how much space to leave for each difference and is adjusted by branch shortening if CASE_VECTOR_SHORTEN_MODE is defined. The third and fourth operands store the target labels with the minimum and maximum addresses respectively. The fifth operand stores flags for use by branch shortening. Set at the start of shorten_branches: min_align: the minimum alignment for any of the target labels. base_after_vec: true iff BASE is after the ADDR_DIFF_VEC. min_after_vec: true iff minimum addr target label is after the ADDR_DIFF_VEC. max_after_vec: true iff maximum addr target label is after the ADDR_DIFF_VEC. min_after_base: true iff minimum address target label is after BASE. max_after_base: true iff maximum address target label is after BASE. Set by the actual branch shortening process: offset_unsigned: true iff offsets have to be treated as unsigned. scale: scaling that is necessary to make offsets fit into the mode. The third, fourth and fifth operands are only valid when CASE_VECTOR_SHORTEN_MODE is defined, and only in an optimizing compilations. */ DEF_RTL_EXPR(ADDR_DIFF_VEC, "addr_diff_vec", "eEee0", RTX_EXTRA) /* Memory prefetch, with attributes supported on some targets. Operand 1 is the address of the memory to fetch. Operand 2 is 1 for a write access, 0 otherwise. Operand 3 is the level of temporal locality; 0 means there is no temporal locality and 1, 2, and 3 are for increasing levels of temporal locality. The attributes specified by operands 2 and 3 are ignored for targets whose prefetch instructions do not support them. */ DEF_RTL_EXPR(PREFETCH, "prefetch", "eee", RTX_EXTRA) /* ---------------------------------------------------------------------- At the top level of an instruction (perhaps under PARALLEL). ---------------------------------------------------------------------- */ /* Assignment. Operand 1 is the location (REG, MEM, PC, CC0 or whatever) assigned to. Operand 2 is the value stored there. ALL assignment must use SET. Instructions that do multiple assignments must use multiple SET, under PARALLEL. */ DEF_RTL_EXPR(SET, "set", "ee", RTX_EXTRA) /* Indicate something is used in a way that we don't want to explain. For example, subroutine calls will use the register in which the static chain is passed. */ DEF_RTL_EXPR(USE, "use", "e", RTX_EXTRA) /* Indicate something is clobbered in a way that we don't want to explain. For example, subroutine calls will clobber some physical registers (the ones that are by convention not saved). */ DEF_RTL_EXPR(CLOBBER, "clobber", "e", RTX_EXTRA) /* Call a subroutine. Operand 1 is the address to call. Operand 2 is the number of arguments. */ DEF_RTL_EXPR(CALL, "call", "ee", RTX_EXTRA) /* Return from a subroutine. */ DEF_RTL_EXPR(RETURN, "return", "", RTX_EXTRA) /* Conditional trap. Operand 1 is the condition. Operand 2 is the trap code. For an unconditional trap, make the condition (const_int 1). */ DEF_RTL_EXPR(TRAP_IF, "trap_if", "ee", RTX_EXTRA) /* Placeholder for _Unwind_Resume before we know if a function call or a branch is needed. Operand 1 is the exception region from which control is flowing. */ DEF_RTL_EXPR(RESX, "resx", "i", RTX_EXTRA) /* ---------------------------------------------------------------------- Primitive values for use in expressions. ---------------------------------------------------------------------- */ /* numeric integer constant */ DEF_RTL_EXPR(CONST_INT, "const_int", "w", RTX_CONST_OBJ) /* numeric floating point constant. Operands hold the value. They are all 'w' and there may be from 2 to 6; see real.h. */ DEF_RTL_EXPR(CONST_DOUBLE, "const_double", CONST_DOUBLE_FORMAT, RTX_CONST_OBJ) /* Describes a vector constant. */ DEF_RTL_EXPR(CONST_VECTOR, "const_vector", "E", RTX_EXTRA) /* String constant. Used only for attributes right now. */ DEF_RTL_EXPR(CONST_STRING, "const_string", "s", RTX_OBJ) /* This is used to encapsulate an expression whose value is constant (such as the sum of a SYMBOL_REF and a CONST_INT) so that it will be recognized as a constant operand rather than by arithmetic instructions. */ DEF_RTL_EXPR(CONST, "const", "e", RTX_CONST_OBJ) /* program counter. Ordinary jumps are represented by a SET whose first operand is (PC). */ DEF_RTL_EXPR(PC, "pc", "", RTX_OBJ) /* Used in the cselib routines to describe a value. */ DEF_RTL_EXPR(VALUE, "value", "0", RTX_OBJ) /* A register. The "operand" is the register number, accessed with the REGNO macro. If this number is less than FIRST_PSEUDO_REGISTER than a hardware register is being referred to. The second operand holds the original register number - this will be different for a pseudo register that got turned into a hard register. This rtx needs to have as many (or more) fields as a MEM, since we can change REG rtx's into MEMs during reload. */ DEF_RTL_EXPR(REG, "reg", "i00", RTX_OBJ) /* A scratch register. This represents a register used only within a single insn. It will be turned into a REG during register allocation or reload unless the constraint indicates that the register won't be needed, in which case it can remain a SCRATCH. This code is marked as having one operand so it can be turned into a REG. */ DEF_RTL_EXPR(SCRATCH, "scratch", "0", RTX_OBJ) /* One word of a multi-word value. The first operand is the complete value; the second says which word. The WORDS_BIG_ENDIAN flag controls whether word number 0 (as numbered in a SUBREG) is the most or least significant word. This is also used to refer to a value in a different machine mode. For example, it can be used to refer to a SImode value as if it were Qimode, or vice versa. Then the word number is always 0. */ DEF_RTL_EXPR(SUBREG, "subreg", "ei", RTX_EXTRA) /* This one-argument rtx is used for move instructions that are guaranteed to alter only the low part of a destination. Thus, (SET (SUBREG:HI (REG...)) (MEM:HI ...)) has an unspecified effect on the high part of REG, but (SET (STRICT_LOW_PART (SUBREG:HI (REG...))) (MEM:HI ...)) is guaranteed to alter only the bits of REG that are in HImode. The actual instruction used is probably the same in both cases, but the register constraints may be tighter when STRICT_LOW_PART is in use. */ DEF_RTL_EXPR(STRICT_LOW_PART, "strict_low_part", "e", RTX_EXTRA) /* (CONCAT a b) represents the virtual concatenation of a and b to make a value that has as many bits as a and b put together. This is used for complex values. Normally it appears only in DECL_RTLs and during RTL generation, but not in the insn chain. */ DEF_RTL_EXPR(CONCAT, "concat", "ee", RTX_OBJ) /* A memory location; operand is the address. The second operand is the alias set to which this MEM belongs. We use `0' instead of `w' for this field so that the field need not be specified in machine descriptions. */ DEF_RTL_EXPR(MEM, "mem", "e0", RTX_OBJ) /* Reference to an assembler label in the code for this function. The operand is a CODE_LABEL found in the insn chain. The unprinted fields 1 and 2 are used in flow.c for the LABEL_NEXTREF and CONTAINING_INSN. */ DEF_RTL_EXPR(LABEL_REF, "label_ref", "u00", RTX_CONST_OBJ) /* Reference to a named label: Operand 0: label name Operand 1: flags (see SYMBOL_FLAG_* in rtl.h) Operand 2: tree from which this symbol is derived, or null. This is either a DECL node, or some kind of constant. */ DEF_RTL_EXPR(SYMBOL_REF, "symbol_ref", "s00", RTX_CONST_OBJ) /* The condition code register is represented, in our imagination, as a register holding a value that can be compared to zero. In fact, the machine has already compared them and recorded the results; but instructions that look at the condition code pretend to be looking at the entire value and comparing it. */ DEF_RTL_EXPR(CC0, "cc0", "", RTX_OBJ) /* ===================================================================== A QUEUED expression really points to a member of the queue of instructions to be output later for postincrement/postdecrement. QUEUED expressions never become part of instructions. When a QUEUED expression would be put into an instruction, instead either the incremented variable or a copy of its previous value is used. Operands are: 0. the variable to be incremented (a REG rtx). 1. the incrementing instruction, or 0 if it hasn't been output yet. 2. A REG rtx for a copy of the old value of the variable, or 0 if none yet. 3. the body to use for the incrementing instruction 4. the next QUEUED expression in the queue. ====================================================================== */ DEF_RTL_EXPR(QUEUED, "queued", "eeeee", RTX_EXTRA) /* ---------------------------------------------------------------------- Expressions for operators in an rtl pattern ---------------------------------------------------------------------- */ /* if_then_else. This is used in representing ordinary conditional jump instructions. Operand: 0: condition 1: then expr 2: else expr */ DEF_RTL_EXPR(IF_THEN_ELSE, "if_then_else", "eee", RTX_TERNARY) /* General conditional. The first operand is a vector composed of pairs of expressions. The first element of each pair is evaluated, in turn. The value of the conditional is the second expression of the first pair whose first expression evaluates nonzero. If none of the expressions is true, the second operand will be used as the value of the conditional. This should be replaced with use of IF_THEN_ELSE. */ DEF_RTL_EXPR(COND, "cond", "Ee", RTX_EXTRA) /* Comparison, produces a condition code result. */ DEF_RTL_EXPR(COMPARE, "compare", "ee", RTX_BIN_ARITH) /* plus */ DEF_RTL_EXPR(PLUS, "plus", "ee", RTX_COMM_ARITH) /* Operand 0 minus operand 1. */ DEF_RTL_EXPR(MINUS, "minus", "ee", RTX_BIN_ARITH) /* Minus operand 0. */ DEF_RTL_EXPR(NEG, "neg", "e", RTX_UNARY) DEF_RTL_EXPR(MULT, "mult", "ee", RTX_COMM_ARITH) /* Operand 0 divided by operand 1. */ DEF_RTL_EXPR(DIV, "div", "ee", RTX_BIN_ARITH) /* Remainder of operand 0 divided by operand 1. */ DEF_RTL_EXPR(MOD, "mod", "ee", RTX_BIN_ARITH) /* Unsigned divide and remainder. */ DEF_RTL_EXPR(UDIV, "udiv", "ee", RTX_BIN_ARITH) DEF_RTL_EXPR(UMOD, "umod", "ee", RTX_BIN_ARITH) /* Bitwise operations. */ DEF_RTL_EXPR(AND, "and", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(IOR, "ior", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(XOR, "xor", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(NOT, "not", "e", RTX_UNARY) /* Operand: 0: value to be shifted. 1: number of bits. */ DEF_RTL_EXPR(ASHIFT, "ashift", "ee", RTX_BIN_ARITH) /* shift left */ DEF_RTL_EXPR(ROTATE, "rotate", "ee", RTX_BIN_ARITH) /* rotate left */ DEF_RTL_EXPR(ASHIFTRT, "ashiftrt", "ee", RTX_BIN_ARITH) /* arithmetic shift right */ DEF_RTL_EXPR(LSHIFTRT, "lshiftrt", "ee", RTX_BIN_ARITH) /* logical shift right */ DEF_RTL_EXPR(ROTATERT, "rotatert", "ee", RTX_BIN_ARITH) /* rotate right */ /* Minimum and maximum values of two operands. We need both signed and unsigned forms. (We cannot use MIN for SMIN because it conflicts with a macro of the same name.) */ DEF_RTL_EXPR(SMIN, "smin", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(SMAX, "smax", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(UMIN, "umin", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(UMAX, "umax", "ee", RTX_COMM_ARITH) /* These unary operations are used to represent incrementation and decrementation as they occur in memory addresses. The amount of increment or decrement are not represented because they can be understood from the machine-mode of the containing MEM. These operations exist in only two cases: 1. pushes onto the stack. 2. created automatically by the life_analysis pass in flow.c. */ DEF_RTL_EXPR(PRE_DEC, "pre_dec", "e", RTX_AUTOINC) DEF_RTL_EXPR(PRE_INC, "pre_inc", "e", RTX_AUTOINC) DEF_RTL_EXPR(POST_DEC, "post_dec", "e", RTX_AUTOINC) DEF_RTL_EXPR(POST_INC, "post_inc", "e", RTX_AUTOINC) /* These binary operations are used to represent generic address side-effects in memory addresses, except for simple incrementation or decrementation which use the above operations. They are created automatically by the life_analysis pass in flow.c. The first operand is a REG which is used as the address. The second operand is an expression that is assigned to the register, either before (PRE_MODIFY) or after (POST_MODIFY) evaluating the address. Currently, the compiler can only handle second operands of the form (plus (reg) (reg)) and (plus (reg) (const_int)), where the first operand of the PLUS has to be the same register as the first operand of the *_MODIFY. */ DEF_RTL_EXPR(PRE_MODIFY, "pre_modify", "ee", RTX_AUTOINC) DEF_RTL_EXPR(POST_MODIFY, "post_modify", "ee", RTX_AUTOINC) /* Comparison operations. The ordered comparisons exist in two flavors, signed and unsigned. */ DEF_RTL_EXPR(NE, "ne", "ee", RTX_COMM_COMPARE) DEF_RTL_EXPR(EQ, "eq", "ee", RTX_COMM_COMPARE) DEF_RTL_EXPR(GE, "ge", "ee", RTX_COMPARE) DEF_RTL_EXPR(GT, "gt", "ee", RTX_COMPARE) DEF_RTL_EXPR(LE, "le", "ee", RTX_COMPARE) DEF_RTL_EXPR(LT, "lt", "ee", RTX_COMPARE) DEF_RTL_EXPR(GEU, "geu", "ee", RTX_COMPARE) DEF_RTL_EXPR(GTU, "gtu", "ee", RTX_COMPARE) DEF_RTL_EXPR(LEU, "leu", "ee", RTX_COMPARE) DEF_RTL_EXPR(LTU, "ltu", "ee", RTX_COMPARE) /* Additional floating point unordered comparison flavors. */ DEF_RTL_EXPR(UNORDERED, "unordered", "ee", RTX_COMM_COMPARE) DEF_RTL_EXPR(ORDERED, "ordered", "ee", RTX_COMM_COMPARE) /* These are equivalent to unordered or ... */ DEF_RTL_EXPR(UNEQ, "uneq", "ee", RTX_COMM_COMPARE) DEF_RTL_EXPR(UNGE, "unge", "ee", RTX_COMPARE) DEF_RTL_EXPR(UNGT, "ungt", "ee", RTX_COMPARE) DEF_RTL_EXPR(UNLE, "unle", "ee", RTX_COMPARE) DEF_RTL_EXPR(UNLT, "unlt", "ee", RTX_COMPARE) /* This is an ordered NE, ie !UNEQ, ie false for NaN. */ DEF_RTL_EXPR(LTGT, "ltgt", "ee", RTX_COMM_COMPARE) /* Represents the result of sign-extending the sole operand. The machine modes of the operand and of the SIGN_EXTEND expression determine how much sign-extension is going on. */ DEF_RTL_EXPR(SIGN_EXTEND, "sign_extend", "e", RTX_UNARY) /* Similar for zero-extension (such as unsigned short to int). */ DEF_RTL_EXPR(ZERO_EXTEND, "zero_extend", "e", RTX_UNARY) /* Similar but here the operand has a wider mode. */ DEF_RTL_EXPR(TRUNCATE, "truncate", "e", RTX_UNARY) /* Similar for extending floating-point values (such as SFmode to DFmode). */ DEF_RTL_EXPR(FLOAT_EXTEND, "float_extend", "e", RTX_UNARY) DEF_RTL_EXPR(FLOAT_TRUNCATE, "float_truncate", "e", RTX_UNARY) /* Conversion of fixed point operand to floating point value. */ DEF_RTL_EXPR(FLOAT, "float", "e", RTX_UNARY) /* With fixed-point machine mode: Conversion of floating point operand to fixed point value. Value is defined only when the operand's value is an integer. With floating-point machine mode (and operand with same mode): Operand is rounded toward zero to produce an integer value represented in floating point. */ DEF_RTL_EXPR(FIX, "fix", "e", RTX_UNARY) /* Conversion of unsigned fixed point operand to floating point value. */ DEF_RTL_EXPR(UNSIGNED_FLOAT, "unsigned_float", "e", RTX_UNARY) /* With fixed-point machine mode: Conversion of floating point operand to *unsigned* fixed point value. Value is defined only when the operand's value is an integer. */ DEF_RTL_EXPR(UNSIGNED_FIX, "unsigned_fix", "e", RTX_UNARY) /* Absolute value */ DEF_RTL_EXPR(ABS, "abs", "e", RTX_UNARY) /* Square root */ DEF_RTL_EXPR(SQRT, "sqrt", "e", RTX_UNARY) /* Find first bit that is set. Value is 1 + number of trailing zeros in the arg., or 0 if arg is 0. */ DEF_RTL_EXPR(FFS, "ffs", "e", RTX_UNARY) /* Count leading zeros. */ DEF_RTL_EXPR(CLZ, "clz", "e", RTX_UNARY) /* Count trailing zeros. */ DEF_RTL_EXPR(CTZ, "ctz", "e", RTX_UNARY) /* Population count (number of 1 bits). */ DEF_RTL_EXPR(POPCOUNT, "popcount", "e", RTX_UNARY) /* Population parity (number of 1 bits modulo 2). */ DEF_RTL_EXPR(PARITY, "parity", "e", RTX_UNARY) /* Reference to a signed bit-field of specified size and position. Operand 0 is the memory unit (usually SImode or QImode) which contains the field's first bit. Operand 1 is the width, in bits. Operand 2 is the number of bits in the memory unit before the first bit of this field. If BITS_BIG_ENDIAN is defined, the first bit is the msb and operand 2 counts from the msb of the memory unit. Otherwise, the first bit is the lsb and operand 2 counts from the lsb of the memory unit. */ DEF_RTL_EXPR(SIGN_EXTRACT, "sign_extract", "eee", RTX_BITFIELD_OPS) /* Similar for unsigned bit-field. */ DEF_RTL_EXPR(ZERO_EXTRACT, "zero_extract", "eee", RTX_BITFIELD_OPS) /* For RISC machines. These save memory when splitting insns. */ /* HIGH are the high-order bits of a constant expression. */ DEF_RTL_EXPR(HIGH, "high", "e", RTX_CONST_OBJ) /* LO_SUM is the sum of a register and the low-order bits of a constant expression. */ DEF_RTL_EXPR(LO_SUM, "lo_sum", "ee", RTX_OBJ) /* Header for range information. Operand 0 is the NOTE_INSN_RANGE_BEG insn. Operand 1 is the NOTE_INSN_RANGE_END insn. Operand 2 is a vector of all of the registers that can be substituted within this range. Operand 3 is the number of calls in the range. Operand 4 is the number of insns in the range. Operand 5 is the unique range number for this range. Operand 6 is the basic block # of the start of the live range. Operand 7 is the basic block # of the end of the live range. Operand 8 is the loop depth. Operand 9 is a bitmap of the registers live at the start of the range. Operand 10 is a bitmap of the registers live at the end of the range. Operand 11 is marker number for the start of the range. Operand 12 is the marker number for the end of the range. */ DEF_RTL_EXPR(RANGE_INFO, "range_info", "uuEiiiiiibbii", RTX_EXTRA) /* Registers that can be substituted within the range. Operand 0 is the original pseudo register number. Operand 1 will be filled in with the pseudo register the value is copied for the duration of the range. Operand 2 is the number of references within the range to the register. Operand 3 is the number of sets or clobbers of the register in the range. Operand 4 is the number of deaths the register has. Operand 5 is the copy flags that give the status of whether a copy is needed from the original register to the new register at the beginning of the range, or whether a copy from the new register back to the original at the end of the range. Operand 6 is the live length. Operand 7 is the number of calls that this register is live across. Operand 8 is the symbol node of the variable if the register is a user variable. Operand 9 is the block node that the variable is declared in if the register is a user variable. */ DEF_RTL_EXPR(RANGE_REG, "range_reg", "iiiiiiiitt", RTX_EXTRA) /* Information about a local variable's ranges. Operand 0 is an EXPR_LIST of the different ranges a variable is in where it is copied to a different pseudo register. Operand 1 is the block that the variable is declared in. Operand 2 is the number of distinct ranges. */ DEF_RTL_EXPR(RANGE_VAR, "range_var", "eti", RTX_EXTRA) /* Information about the registers that are live at the current point. Operand 0 is the live bitmap. Operand 1 is the original block number. */ DEF_RTL_EXPR(RANGE_LIVE, "range_live", "bi", RTX_EXTRA) /* Describes a merge operation between two vector values. Operands 0 and 1 are the vectors to be merged, operand 2 is a bitmask that specifies where the parts of the result are taken from. Set bits indicate operand 0, clear bits indicate operand 1. The parts are defined by the mode of the vectors. */ DEF_RTL_EXPR(VEC_MERGE, "vec_merge", "eee", RTX_TERNARY) /* Describes an operation that selects parts of a vector. Operands 0 is the source vector, operand 1 is a PARALLEL that contains a CONST_INT for each of the subparts of the result vector, giving the number of the source subpart that should be stored into it. */ DEF_RTL_EXPR(VEC_SELECT, "vec_select", "ee", RTX_BIN_ARITH) /* Describes a vector concat operation. Operands 0 and 1 are the source vectors, the result is a vector that is as long as operands 0 and 1 combined and is the concatenation of the two source vectors. */ DEF_RTL_EXPR(VEC_CONCAT, "vec_concat", "ee", RTX_BIN_ARITH) /* Describes an operation that converts a small vector into a larger one by duplicating the input values. The output vector mode must have the same submodes as the input vector mode, and the number of output parts must be an integer multiple of the number of input parts. */ DEF_RTL_EXPR(VEC_DUPLICATE, "vec_duplicate", "e", RTX_UNARY) /* Addition with signed saturation */ DEF_RTL_EXPR(SS_PLUS, "ss_plus", "ee", RTX_COMM_ARITH) /* Addition with unsigned saturation */ DEF_RTL_EXPR(US_PLUS, "us_plus", "ee", RTX_COMM_ARITH) /* Operand 0 minus operand 1, with signed saturation. */ DEF_RTL_EXPR(SS_MINUS, "ss_minus", "ee", RTX_BIN_ARITH) /* Operand 0 minus operand 1, with unsigned saturation. */ DEF_RTL_EXPR(US_MINUS, "us_minus", "ee", RTX_BIN_ARITH) /* Signed saturating truncate. */ DEF_RTL_EXPR(SS_TRUNCATE, "ss_truncate", "e", RTX_UNARY) /* Unsigned saturating truncate. */ DEF_RTL_EXPR(US_TRUNCATE, "us_truncate", "e", RTX_UNARY) /* Information about the variable and its location. */ DEF_RTL_EXPR(VAR_LOCATION, "var_location", "te", RTX_EXTRA) /* Local variables: mode:c End: */ #undef DEF_RTL_EXPR LAST_AND_UNUSED_RTX_CODE}; /* A convenient way to get a value for NUM_RTX_CODE. Assumes default enum value assignment. */ #define NUM_RTX_CODE ((int) LAST_AND_UNUSED_RTX_CODE) /* The cast here, saves many elsewhere. */ /* Register Transfer Language EXPRESSIONS CODE CLASSES */ enum rtx_class { /* We check bit 0-1 of some rtx class codes in the predicates below. */ /* Bit 0 = comparison if 0, arithmetic is 1 Bit 1 = 1 if commutative. */ RTX_COMPARE, /* 0 */ RTX_COMM_COMPARE, RTX_BIN_ARITH, RTX_COMM_ARITH, /* Must follow the four preceding values. */ RTX_UNARY, /* 4 */ RTX_EXTRA, RTX_MATCH, RTX_INSN, /* Bit 0 = 1 if constant. */ RTX_OBJ, /* 8 */ RTX_CONST_OBJ, RTX_TERNARY, RTX_BITFIELD_OPS, RTX_AUTOINC }; #define RTX_OBJ_MASK (~1) #define RTX_OBJ_RESULT (RTX_OBJ & RTX_OBJ_MASK) #define RTX_COMPARE_MASK (~1) #define RTX_COMPARE_RESULT (RTX_COMPARE & RTX_COMPARE_MASK) #define RTX_ARITHMETIC_MASK (~1) #define RTX_ARITHMETIC_RESULT (RTX_COMM_ARITH & RTX_ARITHMETIC_MASK) #define RTX_BINARY_MASK (~3) #define RTX_BINARY_RESULT (RTX_COMPARE & RTX_BINARY_MASK) #define RTX_COMMUTATIVE_MASK (~2) #define RTX_COMMUTATIVE_RESULT (RTX_COMM_COMPARE & RTX_COMMUTATIVE_MASK) #define RTX_NON_COMMUTATIVE_RESULT (RTX_COMPARE & RTX_COMMUTATIVE_MASK) #define RTX_EXPR_FIRST (RTX_COMPARE) #define RTX_EXPR_LAST (RTX_UNARY) extern const unsigned char rtx_length[NUM_RTX_CODE]; #define GET_RTX_LENGTH(CODE) (rtx_length[(int) (CODE)]) extern const char * const rtx_name[NUM_RTX_CODE]; #define GET_RTX_NAME(CODE) (rtx_name[(int) (CODE)]) extern const char * const rtx_format[NUM_RTX_CODE]; #define GET_RTX_FORMAT(CODE) (rtx_format[(int) (CODE)]) extern const enum rtx_class rtx_class[NUM_RTX_CODE]; #define GET_RTX_CLASS(CODE) (rtx_class[(int) (CODE)]) extern const unsigned char rtx_size[NUM_RTX_CODE]; extern const unsigned char rtx_next[NUM_RTX_CODE]; /* The flags and bitfields of an ADDR_DIFF_VEC. BASE is the base label relative to which the offsets are calculated, as explained in rtl.def. */ typedef struct { /* Set at the start of shorten_branches - ONLY WHEN OPTIMIZING - : */ unsigned min_align: 8; /* Flags: */ unsigned base_after_vec: 1; /* BASE is after the ADDR_DIFF_VEC. */ unsigned min_after_vec: 1; /* minimum address target label is after the ADDR_DIFF_VEC. */ unsigned max_after_vec: 1; /* maximum address target label is after the ADDR_DIFF_VEC. */ unsigned min_after_base: 1; /* minimum address target label is after BASE. */ unsigned max_after_base: 1; /* maximum address target label is after BASE. */ /* Set by the actual branch shortening process - ONLY WHEN OPTIMIZING - : */ unsigned offset_unsigned: 1; /* offsets have to be treated as unsigned. */ unsigned : 2; unsigned scale : 8; } addr_diff_vec_flags; /* Structure used to describe the attributes of a MEM. These are hashed so MEMs that the same attributes share a data structure. This means they cannot be modified in place. If any element is nonzero, it means the value of the corresponding attribute is unknown. */ /* ALIGN and SIZE are the alignment and size of the MEM itself, while EXPR can describe a larger underlying object, which might have a stricter alignment; OFFSET is the offset of the MEM within that object. */ typedef struct mem_attrs GTY(()) { HOST_WIDE_INT alias; /* Memory alias set. */ tree expr; /* expr corresponding to MEM. */ rtx offset; /* Offset from start of DECL, as CONST_INT. */ rtx size; /* Size in bytes, as a CONST_INT. */ unsigned int align; /* Alignment of MEM in bits. */ } mem_attrs; /* Structure used to describe the attributes of a REG in similar way as mem_attrs does for MEM above. */ typedef struct reg_attrs GTY(()) { tree decl; /* decl corresponding to REG. */ HOST_WIDE_INT offset; /* Offset from start of DECL. */ } reg_attrs; /* Common union for an element of an rtx. */ union rtunion_def { int rtint; unsigned int rtuint; const char *rtstr; rtx rtx; rtvec rtvec; enum machine_mode rttype; addr_diff_vec_flags rt_addr_diff_vec_flags; struct cselib_val_struct *rt_cselib; struct bitmap_head_def *rtbit; tree rttree; struct basic_block_def *bb; mem_attrs *rtmem; reg_attrs *rtreg; }; typedef union rtunion_def rtunion; /* RTL expression ("rtx"). */ struct rtx_def GTY((chain_next ("RTX_NEXT (&%h)"), chain_prev ("RTX_PREV (&%h)"))) { /* The kind of expression this is. */ ENUM_BITFIELD(rtx_code) code: 16; /* The kind of value the expression has. */ ENUM_BITFIELD(machine_mode) mode : 8; /* 1 in a MEM if we should keep the alias set for this mem unchanged when we access a component. 1 in a CALL_INSN if it is a sibling call. 1 in a SET that is for a return. In a CODE_LABEL, part of the two-bit alternate entry field. */ unsigned int jump : 1; /* In a CODE_LABEL, part of the two-bit alternate entry field. 1 in a MEM if it cannot trap. */ unsigned int call : 1; /* 1 in a REG, MEM, or CONCAT if the value is set at most once, anywhere. 1 in a SUBREG if it references an unsigned object whose mode has been from a promoted to a wider mode. 1 in a SYMBOL_REF if it addresses something in the per-function constants pool. 1 in a CALL_INSN, NOTE, or EXPR_LIST for a const or pure call. 1 in a JUMP_INSN, CALL_INSN, or INSN of an annulling branch. */ unsigned int unchanging : 1; /* 1 in a MEM or ASM_OPERANDS expression if the memory reference is volatile. 1 in an INSN, CALL_INSN, JUMP_INSN, CODE_LABEL, BARRIER, or NOTE if it has been deleted. 1 in a REG expression if corresponds to a variable declared by the user, 0 for an internally generated temporary. 1 in a SUBREG with a negative value. 1 in a LABEL_REF or in a REG_LABEL note for a non-local label. In a SYMBOL_REF, this flag is used for machine-specific purposes. */ unsigned int volatil : 1; /* 1 in a MEM referring to a field of an aggregate. 0 if the MEM was a variable or the result of a * operator in C; 1 if it was the result of a . or -> operator (on a struct) in C. 1 in a REG if the register is used only in exit code a loop. 1 in a SUBREG expression if was generated from a variable with a promoted mode. 1 in a CODE_LABEL if the label is used for nonlocal gotos and must not be deleted even if its count is zero. 1 in a LABEL_REF if this is a reference to a label outside the current loop. 1 in an INSN, JUMP_INSN or CALL_INSN if this insn must be scheduled together with the preceding insn. Valid only within sched. 1 in an INSN, JUMP_INSN, or CALL_INSN if insn is in a delay slot and from the target of a branch. Valid from reorg until end of compilation; cleared before used. 1 in an INSN, JUMP_INSN or CALL_INSN or related rtx if this insn is dead code. Valid only during dead-code elimination phase; cleared before use. */ unsigned int in_struct : 1; /* At the end of RTL generation, 1 if this rtx is used. This is used for copying shared structure. See `unshare_all_rtl'. In a REG, this is not needed for that purpose, and used instead in `leaf_renumber_regs_insn'. 1 in a SYMBOL_REF, means that emit_library_call has used it as the function. */ unsigned int used : 1; /* 1 in an INSN or a SET if this rtx is related to the call frame, either changing how we compute the frame address or saving and restoring registers in the prologue and epilogue. 1 in a REG or MEM if it is a pointer. 1 in a SYMBOL_REF if it addresses something in the per-function constant string pool. */ unsigned frame_related : 1; /* 1 in a REG or PARALLEL that is the current function's return value. 1 in a MEM if it refers to a scalar. 1 in a SYMBOL_REF for a weak symbol. */ unsigned return_val : 1; /* The first element of the operands of this rtx. The number of operands and their types are controlled by the `code' field, according to rtl.def. */ union u { rtunion fld[1]; HOST_WIDE_INT hwint[1]; } GTY ((special ("rtx_def"), desc ("GET_CODE (&%0)"))) u; }; /* The size in bytes of an rtx header (code, mode and flags). */ #define RTX_HDR_SIZE offsetof (struct rtx_def, u) /* The size in bytes of an rtx with code CODE. */ #define RTX_SIZE(CODE) rtx_size[CODE] #define NULL_RTX (rtx) 0 /* The "next" and "previous" RTX, relative to this one. */ #define RTX_NEXT(X) (rtx_next[GET_CODE (X)] == 0 ? NULL \ : *(rtx *)(((char *)X) + rtx_next[GET_CODE (X)])) /* FIXME: the "NEXT_INSN (PREV_INSN (X)) == X" condition shouldn't be needed. */ #define RTX_PREV(X) ((GET_CODE (X) == INSN \ || GET_CODE (X) == CALL_INSN \ || GET_CODE (X) == JUMP_INSN \ || GET_CODE (X) == NOTE \ || GET_CODE (X) == BARRIER \ || GET_CODE (X) == CODE_LABEL) \ && PREV_INSN (X) != NULL \ && NEXT_INSN (PREV_INSN (X)) == X \ ? PREV_INSN (X) : NULL) /* Define macros to access the `code' field of the rtx. */ #define GET_CODE(RTX) ((enum rtx_code) (RTX)->code) #define PUT_CODE(RTX, CODE) ((RTX)->code = (CODE)) #define GET_MODE(RTX) ((enum machine_mode) (RTX)->mode) #define PUT_MODE(RTX, MODE) ((RTX)->mode = (MODE)) /* RTL vector. These appear inside RTX's when there is a need for a variable number of things. The principle use is inside PARALLEL expressions. */ struct rtvec_def GTY(()) { int num_elem; /* number of elements */ rtx GTY ((length ("%h.num_elem"))) elem[1]; }; #define NULL_RTVEC (rtvec) 0 #define GET_NUM_ELEM(RTVEC) ((RTVEC)->num_elem) #define PUT_NUM_ELEM(RTVEC, NUM) ((RTVEC)->num_elem = (NUM)) /* Predicate yielding nonzero iff X is an rtx for a register. */ #define REG_P(X) (GET_CODE (X) == REG) /* Predicate yielding nonzero iff X is an rtx for a memory location. */ #define MEM_P(X) (GET_CODE (X) == MEM) /* Predicate yielding nonzero iff X is a label insn. */ #define LABEL_P(X) (GET_CODE (X) == CODE_LABEL) /* Predicate yielding nonzero iff X is a jump insn. */ #define JUMP_P(X) (GET_CODE (X) == JUMP_INSN) /* Predicate yielding nonzero iff X is a call insn. */ #define CALL_P(X) (GET_CODE (X) == CALL_INSN) /* Predicate yielding nonzero iff X is an insn that cannot jump. */ #define NONJUMP_INSN_P(X) (GET_CODE (X) == INSN) /* Predicate yielding nonzero iff X is a real insn. */ #define INSN_P(X) \ (NONJUMP_INSN_P (X) || JUMP_P (X) || CALL_P (X)) /* Predicate yielding nonzero iff X is a note insn. */ #define NOTE_P(X) (GET_CODE (X) == NOTE) /* Predicate yielding nonzero iff X is a barrier insn. */ #define BARRIER_P(X) (GET_CODE (X) == BARRIER) /* Predicate yielding nonzero iff X is a data for a jump table. */ #define JUMP_TABLE_DATA_P(INSN) \ (JUMP_P (INSN) && (GET_CODE (PATTERN (INSN)) == ADDR_VEC || \ GET_CODE (PATTERN (INSN)) == ADDR_DIFF_VEC)) /* 1 if X is a unary operator. */ #define UNARY_P(X) \ (GET_RTX_CLASS (GET_CODE (X)) == RTX_UNARY) /* 1 if X is a binary operator. */ #define BINARY_P(X) \ ((GET_RTX_CLASS (GET_CODE (X)) & RTX_BINARY_MASK) == RTX_BINARY_RESULT) /* 1 if X is an arithmetic operator. */ #define ARITHMETIC_P(X) \ ((GET_RTX_CLASS (GET_CODE (X)) & RTX_ARITHMETIC_MASK) \ == RTX_ARITHMETIC_RESULT) /* 1 if X is an arithmetic operator. */ #define COMMUTATIVE_ARITH_P(X) \ (GET_RTX_CLASS (GET_CODE (X)) == RTX_COMM_ARITH) /* 1 if X is a commutative arithmetic operator or a comparison operator. These two are sometimes selected together because it is possible to swap the two operands. */ #define SWAPPABLE_OPERANDS_P(X) \ ((1 << GET_RTX_CLASS (GET_CODE (X))) \ & ((1 << RTX_COMM_ARITH) | (1 << RTX_COMM_COMPARE) \ | (1 << RTX_COMPARE))) /* 1 if X is a non-commutative operator. */ #define NON_COMMUTATIVE_P(X) \ ((GET_RTX_CLASS (GET_CODE (X)) & RTX_COMMUTATIVE_MASK) \ == RTX_NON_COMMUTATIVE_RESULT) /* 1 if X is a commutative operator on integers. */ #define COMMUTATIVE_P(X) \ ((GET_RTX_CLASS (GET_CODE (X)) & RTX_COMMUTATIVE_MASK) \ == RTX_COMMUTATIVE_RESULT) /* 1 if X is a relational operator. */ #define COMPARISON_P(X) \ ((GET_RTX_CLASS (GET_CODE (X)) & RTX_COMPARE_MASK) == RTX_COMPARE_RESULT) /* 1 if X is a constant value that is an integer. */ #define CONSTANT_P(X) \ (GET_RTX_CLASS (GET_CODE (X)) == RTX_CONST_OBJ \ || GET_CODE (X) == CONST_VECTOR) /* 1 if X can be used to represent an object. */ #define OBJECT_P(X) \ ((GET_RTX_CLASS (GET_CODE (X)) & RTX_OBJ_MASK) == RTX_OBJ_RESULT) /* General accessor macros for accessing the fields of an rtx. */ #if defined ENABLE_RTL_CHECKING && (GCC_VERSION >= 2007) /* The bit with a star outside the statement expr and an & inside is so that N can be evaluated only once. */ #define RTL_CHECK1(RTX, N, C1) __extension__ \ (*({ rtx const _rtx = (RTX); const int _n = (N); \ const enum rtx_code _code = GET_CODE (_rtx); \ if (_n < 0 || _n >= GET_RTX_LENGTH (_code)) \ rtl_check_failed_bounds (_rtx, _n, __FILE__, __LINE__, \ __FUNCTION__); \ if (GET_RTX_FORMAT(_code)[_n] != C1) \ rtl_check_failed_type1 (_rtx, _n, C1, __FILE__, __LINE__, \ __FUNCTION__); \ &_rtx->u.fld[_n]; })) #define RTL_CHECK2(RTX, N, C1, C2) __extension__ \ (*({ rtx const _rtx = (RTX); const int _n = (N); \ const enum rtx_code _code = GET_CODE (_rtx); \ if (_n < 0 || _n >= GET_RTX_LENGTH (_code)) \ rtl_check_failed_bounds (_rtx, _n, __FILE__, __LINE__, \ __FUNCTION__); \ if (GET_RTX_FORMAT(_code)[_n] != C1 \ && GET_RTX_FORMAT(_code)[_n] != C2) \ rtl_check_failed_type2 (_rtx, _n, C1, C2, __FILE__, __LINE__, \ __FUNCTION__); \ &_rtx->u.fld[_n]; })) #define RTL_CHECKC1(RTX, N, C) __extension__ \ (*({ rtx const _rtx = (RTX); const int _n = (N); \ if (GET_CODE (_rtx) != (C)) \ rtl_check_failed_code1 (_rtx, (C), __FILE__, __LINE__, \ __FUNCTION__); \ &_rtx->u.fld[_n]; })) #define RTL_CHECKC2(RTX, N, C1, C2) __extension__ \ (*({ rtx const _rtx = (RTX); const int _n = (N); \ const enum rtx_code _code = GET_CODE (_rtx); \ if (_code != (C1) && _code != (C2)) \ rtl_check_failed_code2 (_rtx, (C1), (C2), __FILE__, __LINE__, \ __FUNCTION__); \ &_rtx->u.fld[_n]; })) #define RTVEC_ELT(RTVEC, I) __extension__ \ (*({ rtvec const _rtvec = (RTVEC); const int _i = (I); \ if (_i < 0 || _i >= GET_NUM_ELEM (_rtvec)) \ rtvec_check_failed_bounds (_rtvec, _i, __FILE__, __LINE__, \ __FUNCTION__); \ &_rtvec->elem[_i]; })) #define XWINT(RTX, N) __extension__ \ (*({ rtx const _rtx = (RTX); const int _n = (N); \ const enum rtx_code _code = GET_CODE (_rtx); \ if (_n < 0 || _n >= GET_RTX_LENGTH (_code)) \ rtl_check_failed_bounds (_rtx, _n, __FILE__, __LINE__, \ __FUNCTION__); \ if (GET_RTX_FORMAT(_code)[_n] != 'w') \ rtl_check_failed_type1 (_rtx, _n, 'w', __FILE__, __LINE__, \ __FUNCTION__); \ &_rtx->u.hwint[_n]; })) #define XCWINT(RTX, N, C) __extension__ \ (*({ rtx const _rtx = (RTX); \ if (GET_CODE (_rtx) != (C)) \ rtl_check_failed_code1 (_rtx, (C), __FILE__, __LINE__, \ __FUNCTION__); \ &_rtx->u.hwint[N]; })) extern void rtl_check_failed_bounds (rtx, int, const char *, int, const char *) ATTRIBUTE_NORETURN; extern void rtl_check_failed_type1 (rtx, int, int, const char *, int, const char *) ATTRIBUTE_NORETURN; extern void rtl_check_failed_type2 (rtx, int, int, int, const char *, int, const char *) ATTRIBUTE_NORETURN; extern void rtl_check_failed_code1 (rtx, enum rtx_code, const char *, int, const char *) ATTRIBUTE_NORETURN; extern void rtl_check_failed_code2 (rtx, enum rtx_code, enum rtx_code, const char *, int, const char *) ATTRIBUTE_NORETURN; extern void rtvec_check_failed_bounds (rtvec, int, const char *, int, const char *) ATTRIBUTE_NORETURN; #else /* not ENABLE_RTL_CHECKING */ #define RTL_CHECK1(RTX, N, C1) ((RTX)->u.fld[N]) #define RTL_CHECK2(RTX, N, C1, C2) ((RTX)->u.fld[N]) #define RTL_CHECKC1(RTX, N, C) ((RTX)->u.fld[N]) #define RTL_CHECKC2(RTX, N, C1, C2) ((RTX)->u.fld[N]) #define RTVEC_ELT(RTVEC, I) ((RTVEC)->elem[I]) #define XWINT(RTX, N) ((RTX)->u.hwint[N]) #define XCWINT(RTX, N, C) ((RTX)->u.hwint[N]) #endif /* General accessor macros for accessing the flags of an rtx. */ /* Access an individual rtx flag, with no checking of any kind. */ #define RTX_FLAG(RTX, FLAG) ((RTX)->FLAG) #if defined ENABLE_RTL_FLAG_CHECKING && (GCC_VERSION >= 2007) #define RTL_FLAG_CHECK1(NAME, RTX, C1) __extension__ \ ({ rtx const _rtx = (RTX); \ if (GET_CODE(_rtx) != C1) \ rtl_check_failed_flag (NAME, _rtx, __FILE__, __LINE__, \ __FUNCTION__); \ _rtx; }) #define RTL_FLAG_CHECK2(NAME, RTX, C1, C2) __extension__ \ ({ rtx const _rtx = (RTX); \ if (GET_CODE(_rtx) != C1 && GET_CODE(_rtx) != C2) \ rtl_check_failed_flag (NAME,_rtx, __FILE__, __LINE__, \ __FUNCTION__); \ _rtx; }) #define RTL_FLAG_CHECK3(NAME, RTX, C1, C2, C3) __extension__ \ ({ rtx const _rtx = (RTX); \ if (GET_CODE(_rtx) != C1 && GET_CODE(_rtx) != C2 \ && GET_CODE(_rtx) != C3) \ rtl_check_failed_flag (NAME, _rtx, __FILE__, __LINE__, \ __FUNCTION__); \ _rtx; }) #define RTL_FLAG_CHECK4(NAME, RTX, C1, C2, C3, C4) __extension__ \ ({ rtx const _rtx = (RTX); \ if (GET_CODE(_rtx) != C1 && GET_CODE(_rtx) != C2 \ && GET_CODE(_rtx) != C3 && GET_CODE(_rtx) != C4) \ rtl_check_failed_flag (NAME, _rtx, __FILE__, __LINE__, \ __FUNCTION__); \ _rtx; }) #define RTL_FLAG_CHECK5(NAME, RTX, C1, C2, C3, C4, C5) __extension__ \ ({ rtx const _rtx = (RTX); \ if (GET_CODE(_rtx) != C1 && GET_CODE(_rtx) != C2 \ && GET_CODE(_rtx) != C3 && GET_CODE(_rtx) != C4 \ && GET_CODE(_rtx) != C5) \ rtl_check_failed_flag (NAME, _rtx, __FILE__, __LINE__, \ __FUNCTION__); \ _rtx; }) #define RTL_FLAG_CHECK6(NAME, RTX, C1, C2, C3, C4, C5, C6) \ __extension__ \ ({ rtx const _rtx = (RTX); \ if (GET_CODE(_rtx) != C1 && GET_CODE(_rtx) != C2 \ && GET_CODE(_rtx) != C3 && GET_CODE(_rtx) != C4 \ && GET_CODE(_rtx) != C5 && GET_CODE(_rtx) != C6) \ rtl_check_failed_flag (NAME,_rtx, __FILE__, __LINE__, \ __FUNCTION__); \ _rtx; }) #define RTL_FLAG_CHECK7(NAME, RTX, C1, C2, C3, C4, C5, C6, C7) \ __extension__ \ ({ rtx const _rtx = (RTX); \ if (GET_CODE(_rtx) != C1 && GET_CODE(_rtx) != C2 \ && GET_CODE(_rtx) != C3 && GET_CODE(_rtx) != C4 \ && GET_CODE(_rtx) != C5 && GET_CODE(_rtx) != C6 \ && GET_CODE(_rtx) != C7) \ rtl_check_failed_flag (NAME, _rtx, __FILE__, __LINE__, \ __FUNCTION__); \ _rtx; }) #define RTL_FLAG_CHECK8(NAME, RTX, C1, C2, C3, C4, C5, C6, C7, C8) \ __extension__ \ ({ rtx const _rtx = (RTX); \ if (GET_CODE(_rtx) != C1 && GET_CODE(_rtx) != C2 \ && GET_CODE(_rtx) != C3 && GET_CODE(_rtx) != C4 \ && GET_CODE(_rtx) != C5 && GET_CODE(_rtx) != C6 \ && GET_CODE(_rtx) != C7 && GET_CODE(_rtx) != C8) \ rtl_check_failed_flag (NAME, _rtx, __FILE__, __LINE__, \ __FUNCTION__); \ _rtx; }) extern void rtl_check_failed_flag (const char *, rtx, const char *, int, const char *) ATTRIBUTE_NORETURN ; #else /* not ENABLE_RTL_FLAG_CHECKING */ #define RTL_FLAG_CHECK1(NAME, RTX, C1) (RTX) #define RTL_FLAG_CHECK2(NAME, RTX, C1, C2) (RTX) #define RTL_FLAG_CHECK3(NAME, RTX, C1, C2, C3) (RTX) #define RTL_FLAG_CHECK4(NAME, RTX, C1, C2, C3, C4) (RTX) #define RTL_FLAG_CHECK5(NAME, RTX, C1, C2, C3, C4, C5) (RTX) #define RTL_FLAG_CHECK6(NAME, RTX, C1, C2, C3, C4, C5, C6) (RTX) #define RTL_FLAG_CHECK7(NAME, RTX, C1, C2, C3, C4, C5, C6, C7) (RTX) #define RTL_FLAG_CHECK8(NAME, RTX, C1, C2, C3, C4, C5, C6, C7, C8) (RTX) #endif #define CLEAR_RTX_FLAGS(RTX) \ do { \ rtx const _rtx = (RTX); \ _rtx->jump = 0; \ _rtx->call = 0; \ _rtx->unchanging = 0; \ _rtx->volatil = 0; \ _rtx->in_struct = 0; \ _rtx->used = 0; \ _rtx->frame_related = 0; \ _rtx->return_val = 0; \ } while (0) #define XINT(RTX, N) (RTL_CHECK2 (RTX, N, 'i', 'n').rtint) #define XSTR(RTX, N) (RTL_CHECK2 (RTX, N, 's', 'S').rtstr) #define XEXP(RTX, N) (RTL_CHECK2 (RTX, N, 'e', 'u').rtx) #define XVEC(RTX, N) (RTL_CHECK2 (RTX, N, 'E', 'V').rtvec) #define XMODE(RTX, N) (RTL_CHECK1 (RTX, N, 'M').rttype) #define XBITMAP(RTX, N) (RTL_CHECK1 (RTX, N, 'b').rtbit) #define XTREE(RTX, N) (RTL_CHECK1 (RTX, N, 't').rttree) #define XBBDEF(RTX, N) (RTL_CHECK1 (RTX, N, 'B').bb) #define XTMPL(RTX, N) (RTL_CHECK1 (RTX, N, 'T').rtstr) #define XVECEXP(RTX, N, M) RTVEC_ELT (XVEC (RTX, N), M) #define XVECLEN(RTX, N) GET_NUM_ELEM (XVEC (RTX, N)) /* These are like XINT, etc. except that they expect a '0' field instead of the normal type code. */ #define X0INT(RTX, N) (RTL_CHECK1 (RTX, N, '0').rtint) #define X0UINT(RTX, N) (RTL_CHECK1 (RTX, N, '0').rtuint) #define X0STR(RTX, N) (RTL_CHECK1 (RTX, N, '0').rtstr) #define X0EXP(RTX, N) (RTL_CHECK1 (RTX, N, '0').rtx) #define X0VEC(RTX, N) (RTL_CHECK1 (RTX, N, '0').rtvec) #define X0MODE(RTX, N) (RTL_CHECK1 (RTX, N, '0').rttype) #define X0BITMAP(RTX, N) (RTL_CHECK1 (RTX, N, '0').rtbit) #define X0TREE(RTX, N) (RTL_CHECK1 (RTX, N, '0').rttree) #define X0BBDEF(RTX, N) (RTL_CHECK1 (RTX, N, '0').bb) #define X0ADVFLAGS(RTX, N) (RTL_CHECK1 (RTX, N, '0').rt_addr_diff_vec_flags) #define X0CSELIB(RTX, N) (RTL_CHECK1 (RTX, N, '0').rt_cselib) #define X0MEMATTR(RTX, N) (RTL_CHECKC1 (RTX, N, MEM).rtmem) #define X0REGATTR(RTX, N) (RTL_CHECKC1 (RTX, N, REG).rtreg) /* Access a '0' field with any type. */ #define X0ANY(RTX, N) RTL_CHECK1 (RTX, N, '0') #define XCINT(RTX, N, C) (RTL_CHECKC1 (RTX, N, C).rtint) #define XCUINT(RTX, N, C) (RTL_CHECKC1 (RTX, N, C).rtuint) #define XCSTR(RTX, N, C) (RTL_CHECKC1 (RTX, N, C).rtstr) #define XCEXP(RTX, N, C) (RTL_CHECKC1 (RTX, N, C).rtx) #define XCVEC(RTX, N, C) (RTL_CHECKC1 (RTX, N, C).rtvec) #define XCMODE(RTX, N, C) (RTL_CHECKC1 (RTX, N, C).rttype) #define XCBITMAP(RTX, N, C) (RTL_CHECKC1 (RTX, N, C).rtbit) #define XCTREE(RTX, N, C) (RTL_CHECKC1 (RTX, N, C).rttree) #define XCBBDEF(RTX, N, C) (RTL_CHECKC1 (RTX, N, C).bb) #define XCADVFLAGS(RTX, N, C) (RTL_CHECKC1 (RTX, N, C).rt_addr_diff_vec_flags) #define XCCSELIB(RTX, N, C) (RTL_CHECKC1 (RTX, N, C).rt_cselib) #define XCVECEXP(RTX, N, M, C) RTVEC_ELT (XCVEC (RTX, N, C), M) #define XCVECLEN(RTX, N, C) GET_NUM_ELEM (XCVEC (RTX, N, C)) #define XC2EXP(RTX, N, C1, C2) (RTL_CHECKC2 (RTX, N, C1, C2).rtx) /* ACCESS MACROS for particular fields of insns. */ /* Holds a unique number for each insn. These are not necessarily sequentially increasing. */ #define INSN_UID(INSN) XINT (INSN, 0) /* Chain insns together in sequence. */ #define PREV_INSN(INSN) XEXP (INSN, 1) #define NEXT_INSN(INSN) XEXP (INSN, 2) #define BLOCK_FOR_INSN(INSN) XBBDEF (INSN, 3) #define INSN_LOCATOR(INSN) XINT (INSN, 4) /* The body of an insn. */ #define PATTERN(INSN) XEXP (INSN, 5) /* Code number of instruction, from when it was recognized. -1 means this instruction has not been recognized yet. */ #define INSN_CODE(INSN) XINT (INSN, 6) /* Set up in flow.c; empty before then. Holds a chain of INSN_LIST rtx's whose first operands point at previous insns with direct data-flow connections to this one. That means that those insns set variables whose next use is in this insn. They are always in the same basic block as this insn. */ #define LOG_LINKS(INSN) XEXP(INSN, 7) #define RTX_UNCHANGING_P(RTX) \ (RTL_FLAG_CHECK3("RTX_UNCHANGING_P", (RTX), REG, MEM, CONCAT)->unchanging) #define RTX_FRAME_RELATED_P(RTX) \ (RTL_FLAG_CHECK5("RTX_FRAME_RELATED_P", (RTX), INSN, CALL_INSN, \ JUMP_INSN, BARRIER, SET)->frame_related) /* 1 if RTX is an insn that has been deleted. */ #define INSN_DELETED_P(RTX) \ (RTL_FLAG_CHECK6("INSN_DELETED_P", (RTX), INSN, CALL_INSN, JUMP_INSN, \ CODE_LABEL, BARRIER, NOTE)->volatil) /* 1 if RTX is a call to a const or pure function. */ #define CONST_OR_PURE_CALL_P(RTX) \ (RTL_FLAG_CHECK3("CONST_OR_PURE_CALL_P", (RTX), CALL_INSN, NOTE, \ EXPR_LIST)->unchanging) /* 1 if RTX is a call_insn for a sibling call. */ #define SIBLING_CALL_P(RTX) \ (RTL_FLAG_CHECK1("SIBLING_CALL_P", (RTX), CALL_INSN)->jump) /* 1 if RTX is a jump_insn, call_insn, or insn that is an annulling branch. */ #define INSN_ANNULLED_BRANCH_P(RTX) \ (RTL_FLAG_CHECK3("INSN_ANNULLED_BRANCH_P", (RTX), JUMP_INSN, CALL_INSN, INSN)->unchanging) /* 1 if RTX is an insn that is dead code. Valid only for dead-code elimination phase. */ #define INSN_DEAD_CODE_P(RTX) \ (RTL_FLAG_CHECK3("INSN_DEAD_CODE_P", (RTX), INSN, CALL_INSN, JUMP_INSN)->in_struct) /* 1 if RTX is an insn in a delay slot and is from the target of the branch. If the branch insn has INSN_ANNULLED_BRANCH_P set, this insn should only be executed if the branch is taken. For annulled branches with this bit clear, the insn should be executed only if the branch is not taken. */ #define INSN_FROM_TARGET_P(RTX) \ (RTL_FLAG_CHECK3("INSN_FROM_TARGET_P", (RTX), INSN, JUMP_INSN, CALL_INSN)->in_struct) #define ADDR_DIFF_VEC_FLAGS(RTX) X0ADVFLAGS(RTX, 4) #define CSELIB_VAL_PTR(RTX) X0CSELIB(RTX, 0) /* Holds a list of notes on what this insn does to various REGs. It is a chain of EXPR_LIST rtx's, where the second operand is the chain pointer and the first operand is the REG being described. The mode field of the EXPR_LIST contains not a real machine mode but a value from enum reg_note. */ #define REG_NOTES(INSN) XEXP(INSN, 8) /* Don't forget to change reg_note_name in rtl.c. */ enum reg_note { /* The value in REG dies in this insn (i.e., it is not needed past this insn). If REG is set in this insn, the REG_DEAD note may, but need not, be omitted. */ REG_DEAD = 1, /* The REG is autoincremented or autodecremented. */ REG_INC, /* Describes the insn as a whole; it says that the insn sets a register to a constant value or to be equivalent to a memory address. If the register is spilled to the stack then the constant value should be substituted for it. The contents of the REG_EQUIV is the constant value or memory address, which may be different from the source of the SET although it has the same value. A REG_EQUIV note may also appear on an insn which copies a register parameter to a pseudo-register, if there is a memory address which could be used to hold that pseudo-register throughout the function. */ REG_EQUIV, /* Like REG_EQUIV except that the destination is only momentarily equal to the specified rtx. Therefore, it cannot be used for substitution; but it can be used for cse. */ REG_EQUAL, /* This insn copies the return-value of a library call out of the hard reg for return values. This note is actually an INSN_LIST and it points to the first insn involved in setting up arguments for the call. flow.c uses this to delete the entire library call when its result is dead. */ REG_RETVAL, /* The inverse of REG_RETVAL: it goes on the first insn of the library call and points at the one that has the REG_RETVAL. This note is also an INSN_LIST. */ REG_LIBCALL, /* The register is always nonnegative during the containing loop. This is used in branches so that decrement and branch instructions terminating on zero can be matched. There must be an insn pattern in the md file named `decrement_and_branch_until_zero' or else this will never be added to any instructions. */ REG_NONNEG, /* There is no conflict *after this insn* between the register in the note and the destination of this insn. */ REG_NO_CONFLICT, /* Identifies a register set in this insn and never used. */ REG_UNUSED, /* REG_CC_SETTER and REG_CC_USER link a pair of insns that set and use CC0, respectively. Normally, these are required to be consecutive insns, but we permit putting a cc0-setting insn in the delay slot of a branch as long as only one copy of the insn exists. In that case, these notes point from one to the other to allow code generation to determine what any require information and to properly update CC_STATUS. These notes are INSN_LISTs. */ REG_CC_SETTER, REG_CC_USER, /* Points to a CODE_LABEL. Used by non-JUMP_INSNs to say that the CODE_LABEL contained in the REG_LABEL note is used by the insn. This note is an INSN_LIST. */ REG_LABEL, /* REG_DEP_ANTI and REG_DEP_OUTPUT are used in LOG_LINKS to represent write-after-read and write-after-write dependencies respectively. Data dependencies, which are the only type of LOG_LINK created by flow, are represented by a 0 reg note kind. */ REG_DEP_ANTI, REG_DEP_OUTPUT, /* REG_BR_PROB is attached to JUMP_INSNs and CALL_INSNs. It has an integer value. For jumps, it is the probability that this is a taken branch. For calls, it is the probability that this call won't return. */ REG_BR_PROB, /* REG_VALUE_PROFILE is attached when the profile is read in to an insn before that the code to profile the value is inserted. It contains the results of profiling. */ REG_VALUE_PROFILE, /* Attached to a call insn; indicates that the call is malloc-like and that the pointer returned cannot alias anything else. */ REG_NOALIAS, /* Used to optimize rtl generated by dynamic stack allocations for targets where SETJMP_VIA_SAVE_AREA is true. */ REG_SAVE_AREA, /* REG_BR_PRED is attached to JUMP_INSNs and CALL_INSNSs. It contains CONCAT of two integer value. First specifies the branch predictor that added the note, second specifies the predicted hitrate of branch in the same format as REG_BR_PROB note uses. */ REG_BR_PRED, /* Attached to insns that are RTX_FRAME_RELATED_P, but are too complex for DWARF to interpret what they imply. The attached rtx is used instead of intuition. */ REG_FRAME_RELATED_EXPR, /* Indicates that REG holds the exception context for the function. This context is shared by inline functions, so the code to acquire the real exception context is delayed until after inlining. */ REG_EH_CONTEXT, /* Indicates what exception region an INSN belongs in. This is used to indicate what region to which a call may throw. REGION 0 indicates that a call cannot throw at all. REGION -1 indicates that it cannot throw, nor will it execute a non-local goto. */ REG_EH_REGION, /* Used by haifa-sched to save NOTE_INSN notes across scheduling. */ REG_SAVE_NOTE, /* Indicates that this insn (which is part of the prologue) computes a value which might not be used later, and if so it's OK to delete the insn. Normally, deleting any insn in the prologue is an error. At present the parameter is unused and set to (const_int 0). */ REG_MAYBE_DEAD, /* Indicates that a call does not return. */ REG_NORETURN, /* Indicates that an indirect jump is a non-local goto instead of a computed goto. */ REG_NON_LOCAL_GOTO, /* Indicates that a jump crosses between hot and cold sections in a (partitioned) assembly or .o file, and therefore should not be reduced to a simpler jump by optimizations. */ REG_CROSSING_JUMP, /* This kind of note is generated at each to `setjmp', and similar functions that can return twice. */ REG_SETJMP, /* Indicate calls that always returns. */ REG_ALWAYS_RETURN, /* Indicate that the memory load references a vtable. The expression is of the form (plus (symbol_ref vtable_sym) (const_int offset)). */ REG_VTABLE_REF }; /* The base value for branch probability notes. */ #define REG_BR_PROB_BASE 10000 /* Define macros to extract and insert the reg-note kind in an EXPR_LIST. */ #define REG_NOTE_KIND(LINK) ((enum reg_note) GET_MODE (LINK)) #define PUT_REG_NOTE_KIND(LINK, KIND) \ PUT_MODE (LINK, (enum machine_mode) (KIND)) /* Names for REG_NOTE's in EXPR_LIST insn's. */ extern const char * const reg_note_name[]; #define GET_REG_NOTE_NAME(MODE) (reg_note_name[(int) (MODE)]) /* This field is only present on CALL_INSNs. It holds a chain of EXPR_LIST of USE and CLOBBER expressions. USE expressions list the registers filled with arguments that are passed to the function. CLOBBER expressions document the registers explicitly clobbered by this CALL_INSN. Pseudo registers can not be mentioned in this list. */ #define CALL_INSN_FUNCTION_USAGE(INSN) XEXP(INSN, 9) /* The label-number of a code-label. The assembler label is made from `L' and the label-number printed in decimal. Label numbers are unique in a compilation. */ #define CODE_LABEL_NUMBER(INSN) XINT (INSN, 6) #define LINE_NUMBER NOTE /* In a NOTE that is a line number, this is a string for the file name that the line is in. We use the same field to record block numbers temporarily in NOTE_INSN_BLOCK_BEG and NOTE_INSN_BLOCK_END notes. (We avoid lots of casts between ints and pointers if we use a different macro for the block number.) */ /* Opaque data. */ #define NOTE_DATA(INSN) RTL_CHECKC1 (INSN, 4, NOTE) #define NOTE_DELETED_LABEL_NAME(INSN) XCSTR (INSN, 4, NOTE) #ifdef USE_MAPPED_LOCATION #define NOTE_SOURCE_LOCATION(INSN) XCUINT (INSN, 5, NOTE) #define NOTE_EXPANDED_LOCATION(XLOC, INSN) \ (XLOC) = expand_location (NOTE_SOURCE_LOCATION (INSN)) #define SET_INSN_DELETED(INSN) \ (PUT_CODE (INSN, NOTE), NOTE_LINE_NUMBER (INSN) = NOTE_INSN_DELETED) #else #define NOTE_EXPANDED_LOCATION(XLOC, INSN) \ ((XLOC).file = NOTE_SOURCE_FILE (INSN), \ (XLOC).line = NOTE_LINE_NUMBER (INSN)) #define NOTE_SOURCE_FILE(INSN) XCSTR (INSN, 4, NOTE) #define SET_INSN_DELETED(INSN) \ (PUT_CODE (INSN, NOTE), NOTE_SOURCE_FILE (INSN) = 0, \ NOTE_LINE_NUMBER (INSN) = NOTE_INSN_DELETED) #endif #define NOTE_BLOCK(INSN) XCTREE (INSN, 4, NOTE) #define NOTE_EH_HANDLER(INSN) XCINT (INSN, 4, NOTE) #define NOTE_BASIC_BLOCK(INSN) XCBBDEF (INSN, 4, NOTE) #define NOTE_EXPECTED_VALUE(INSN) XCEXP (INSN, 4, NOTE) #define NOTE_PREDICTION(INSN) XCINT (INSN, 4, NOTE) #define NOTE_PRECONDITIONED(INSN) XCINT (INSN, 4, NOTE) #define NOTE_VAR_LOCATION(INSN) XCEXP (INSN, 4, NOTE) /* In a NOTE that is a line number, this is the line number. Other kinds of NOTEs are identified by negative numbers here. */ #define NOTE_LINE_NUMBER(INSN) XCINT (INSN, 5, NOTE) /* Nonzero if INSN is a note marking the beginning of a basic block. */ #define NOTE_INSN_BASIC_BLOCK_P(INSN) \ (GET_CODE (INSN) == NOTE \ && NOTE_LINE_NUMBER (INSN) == NOTE_INSN_BASIC_BLOCK) /* Algorithm and flags for prediction. */ #define NOTE_PREDICTION_ALG(INSN) (XCINT(INSN, 4, NOTE)>>8) #define NOTE_PREDICTION_FLAGS(INSN) (XCINT(INSN, 4, NOTE)&0xff) #define NOTE_PREDICT(ALG,FLAGS) ((ALG<<8)+(FLAGS)) /* Variable declaration and the location of a variable. */ #define NOTE_VAR_LOCATION_DECL(INSN) (XCTREE (XCEXP (INSN, 4, NOTE), \ 0, VAR_LOCATION)) #define NOTE_VAR_LOCATION_LOC(INSN) (XCEXP (XCEXP (INSN, 4, NOTE), \ 1, VAR_LOCATION)) /* Codes that appear in the NOTE_LINE_NUMBER field for kinds of notes that are not line numbers. Notice that we do not try to use zero here for any of the special note codes because sometimes the source line actually can be zero! This happens (for example) when we are generating code for the per-translation-unit constructor and destructor routines for some C++ translation unit. If you should change any of the following values, or if you should add a new value here, don't forget to change the note_insn_name array in rtl.c. */ enum insn_note { /* Keep all of these numbers negative. Adjust as needed. */ NOTE_INSN_BIAS = -100, /* This note is used to get rid of an insn when it isn't safe to patch the insn out of the chain. */ NOTE_INSN_DELETED, /* These are used to mark the beginning and end of a lexical block. See NOTE_BLOCK and reorder_blocks. */ NOTE_INSN_BLOCK_BEG, NOTE_INSN_BLOCK_END, /* These mark the extremes of a loop. */ NOTE_INSN_LOOP_BEG, NOTE_INSN_LOOP_END, /* Generated at the place in a loop that `continue' jumps to. */ NOTE_INSN_LOOP_CONT, /* Generated at the start of a duplicated exit test. */ NOTE_INSN_LOOP_VTOP, /* Generated at the end of a conditional at the top of the loop. This is used to perform a lame form of loop rotation in lieu of actually understanding the loop structure. The note is discarded after rotation is complete. */ NOTE_INSN_LOOP_END_TOP_COND, /* This kind of note is generated at the end of the function body, just before the return insn or return label. In an optimizing compilation it is deleted by the first jump optimization, after enabling that optimizer to determine whether control can fall off the end of the function body without a return statement. */ NOTE_INSN_FUNCTION_END, /* This marks the point immediately after the last prologue insn. */ NOTE_INSN_PROLOGUE_END, /* This marks the point immediately prior to the first epilogue insn. */ NOTE_INSN_EPILOGUE_BEG, /* Generated in place of user-declared labels when they are deleted. */ NOTE_INSN_DELETED_LABEL, /* This note indicates the start of the real body of the function, i.e. the point just after all of the parms have been moved into their homes, etc. */ NOTE_INSN_FUNCTION_BEG, /* These note where exception handling regions begin and end. Uses NOTE_EH_HANDLER to identify the region in question. */ NOTE_INSN_EH_REGION_BEG, NOTE_INSN_EH_REGION_END, /* Generated whenever a duplicate line number note is output. For example, one is output after the end of an inline function, in order to prevent the line containing the inline call from being counted twice in gcov. */ NOTE_INSN_REPEATED_LINE_NUMBER, /* Record the struct for the following basic block. Uses NOTE_BASIC_BLOCK. */ NOTE_INSN_BASIC_BLOCK, /* Record the expected value of a register at a location. Uses NOTE_EXPECTED_VALUE; stored as (eq (reg) (const_int)). */ NOTE_INSN_EXPECTED_VALUE, /* Record a prediction. Uses NOTE_PREDICTION. */ NOTE_INSN_PREDICTION, /* Record that the current basic block is unlikely to be executed and should be moved to the UNLIKELY_EXECUTED_TEXT_SECTION. */ NOTE_INSN_UNLIKELY_EXECUTED_CODE, /* The location of a variable. */ NOTE_INSN_VAR_LOCATION, NOTE_INSN_MAX }; /* Names for NOTE insn's other than line numbers. */ extern const char * const note_insn_name[NOTE_INSN_MAX - NOTE_INSN_BIAS]; #define GET_NOTE_INSN_NAME(NOTE_CODE) \ (note_insn_name[(NOTE_CODE) - (int) NOTE_INSN_BIAS]) /* The name of a label, in case it corresponds to an explicit label in the input source code. */ #define LABEL_NAME(RTX) XCSTR (RTX, 7, CODE_LABEL) /* In jump.c, each label contains a count of the number of LABEL_REFs that point at it, so unused labels can be deleted. */ #define LABEL_NUSES(RTX) XCINT (RTX, 4, CODE_LABEL) /* Labels carry a two-bit field composed of the ->jump and ->call bits. This field indicates whether the label is an alternate entry point, and if so, what kind. */ enum label_kind { LABEL_NORMAL = 0, /* ordinary label */ LABEL_STATIC_ENTRY, /* alternate entry point, not exported */ LABEL_GLOBAL_ENTRY, /* alternate entry point, exported */ LABEL_WEAK_ENTRY /* alternate entry point, exported as weak symbol */ }; #if defined ENABLE_RTL_FLAG_CHECKING && (GCC_VERSION > 2007) /* Retrieve the kind of LABEL. */ #define LABEL_KIND(LABEL) __extension__ \ ({ rtx const _label = (LABEL); \ if (GET_CODE (_label) != CODE_LABEL) \ rtl_check_failed_flag ("LABEL_KIND", _label, __FILE__, __LINE__, \ __FUNCTION__); \ (enum label_kind) ((_label->jump << 1) | _label->call); }) /* Set the kind of LABEL. */ #define SET_LABEL_KIND(LABEL, KIND) do { \ rtx _label = (LABEL); \ unsigned int _kind = (KIND); \ if (GET_CODE (_label) != CODE_LABEL) \ rtl_check_failed_flag ("SET_LABEL_KIND", _label, __FILE__, __LINE__, \ __FUNCTION__); \ _label->jump = ((_kind >> 1) & 1); \ _label->call = (_kind & 1); \ } while (0) #else /* Retrieve the kind of LABEL. */ #define LABEL_KIND(LABEL) \ ((enum label_kind) (((LABEL)->jump << 1) | (LABEL)->call)) /* Set the kind of LABEL. */ #define SET_LABEL_KIND(LABEL, KIND) do { \ rtx _label = (LABEL); \ unsigned int _kind = (KIND); \ _label->jump = ((_kind >> 1) & 1); \ _label->call = (_kind & 1); \ } while (0) #endif /* rtl flag checking */ #define LABEL_ALT_ENTRY_P(LABEL) (LABEL_KIND (LABEL) != LABEL_NORMAL) /* In jump.c, each JUMP_INSN can point to a label that it can jump to, so that if the JUMP_INSN is deleted, the label's LABEL_NUSES can be decremented and possibly the label can be deleted. */ #define JUMP_LABEL(INSN) XCEXP (INSN, 9, JUMP_INSN) /* Once basic blocks are found in flow.c, each CODE_LABEL starts a chain that goes through all the LABEL_REFs that jump to that label. The chain eventually winds up at the CODE_LABEL: it is circular. */ #define LABEL_REFS(LABEL) XCEXP (LABEL, 5, CODE_LABEL) /* This is the field in the LABEL_REF through which the circular chain of references to a particular label is linked. This chain is set up in flow.c. */ #define LABEL_NEXTREF(REF) XCEXP (REF, 1, LABEL_REF) /* Once basic blocks are found in flow.c, Each LABEL_REF points to its containing instruction with this field. */ #define CONTAINING_INSN(RTX) XCEXP (RTX, 2, LABEL_REF) /* For a REG rtx, REGNO extracts the register number. ORIGINAL_REGNO holds the number the register originally had; for a pseudo register turned into a hard reg this will hold the old pseudo register number. */ #define REGNO(RTX) XCUINT (RTX, 0, REG) #define ORIGINAL_REGNO(RTX) X0UINT (RTX, 1) /* 1 if RTX is a reg or parallel that is the current function's return value. */ #define REG_FUNCTION_VALUE_P(RTX) \ (RTL_FLAG_CHECK2("REG_FUNCTION_VALUE_P", (RTX), REG, PARALLEL)->return_val) /* 1 if RTX is a reg that corresponds to a variable declared by the user. */ #define REG_USERVAR_P(RTX) \ (RTL_FLAG_CHECK1("REG_USERVAR_P", (RTX), REG)->volatil) /* 1 if RTX is a reg that holds a pointer value. */ #define REG_POINTER(RTX) \ (RTL_FLAG_CHECK1("REG_POINTER", (RTX), REG)->frame_related) /* 1 if RTX is a mem that holds a pointer value. */ #define MEM_POINTER(RTX) \ (RTL_FLAG_CHECK1("MEM_POINTER", (RTX), MEM)->frame_related) /* 1 if the given register REG corresponds to a hard register. */ #define HARD_REGISTER_P(REG) (HARD_REGISTER_NUM_P (REGNO (REG))) /* 1 if the given register number REG_NO corresponds to a hard register. */ #define HARD_REGISTER_NUM_P(REG_NO) ((REG_NO) < FIRST_PSEUDO_REGISTER) /* For a CONST_INT rtx, INTVAL extracts the integer. */ #define INTVAL(RTX) XCWINT(RTX, 0, CONST_INT) /* For a CONST_DOUBLE: For a DImode, there are two integers CONST_DOUBLE_LOW is the low-order word and ..._HIGH the high-order. For a float, there is a REAL_VALUE_TYPE structure, and CONST_DOUBLE_REAL_VALUE(r) is a pointer to it. */ #define CONST_DOUBLE_LOW(r) XCWINT (r, 0, CONST_DOUBLE) #define CONST_DOUBLE_HIGH(r) XCWINT (r, 1, CONST_DOUBLE) #define CONST_DOUBLE_REAL_VALUE(r) ((struct real_value *)&CONST_DOUBLE_LOW(r)) /* For a CONST_VECTOR, return element #n. */ #define CONST_VECTOR_ELT(RTX, N) XCVECEXP (RTX, 0, N, CONST_VECTOR) /* For a CONST_VECTOR, return the number of elements in a vector. */ #define CONST_VECTOR_NUNITS(RTX) XCVECLEN (RTX, 0, CONST_VECTOR) /* For a SUBREG rtx, SUBREG_REG extracts the value we want a subreg of. SUBREG_BYTE extracts the byte-number. */ #define SUBREG_REG(RTX) XCEXP (RTX, 0, SUBREG) #define SUBREG_BYTE(RTX) XCUINT (RTX, 1, SUBREG) /* in rtlanal.c */ /* Return the right cost to give to an operation to make the cost of the corresponding register-to-register instruction N times that of a fast register-to-register instruction. */ #define COSTS_N_INSNS(N) ((N) * 4) /* Maximum cost of an rtl expression. This value has the special meaning not to use an rtx with this cost under any circumstances. */ #define MAX_COST INT_MAX extern int rtx_cost (rtx, enum rtx_code); extern int address_cost (rtx, enum machine_mode); extern unsigned int subreg_lsb (rtx); extern unsigned int subreg_lsb_1 (enum machine_mode, enum machine_mode, unsigned int); extern unsigned int subreg_regno_offset (unsigned int, enum machine_mode, unsigned int, enum machine_mode); extern bool subreg_offset_representable_p (unsigned int, enum machine_mode, unsigned int, enum machine_mode); extern unsigned int subreg_regno (rtx); extern unsigned HOST_WIDE_INT nonzero_bits (rtx, enum machine_mode); extern unsigned int num_sign_bit_copies (rtx, enum machine_mode); /* 1 if RTX is a subreg containing a reg that is already known to be sign- or zero-extended from the mode of the subreg to the mode of the reg. SUBREG_PROMOTED_UNSIGNED_P gives the signedness of the extension. When used as a LHS, is means that this extension must be done when assigning to SUBREG_REG. */ #define SUBREG_PROMOTED_VAR_P(RTX) \ (RTL_FLAG_CHECK1("SUBREG_PROMOTED", (RTX), SUBREG)->in_struct) #define SUBREG_PROMOTED_UNSIGNED_SET(RTX, VAL) \ do { \ rtx const _rtx = RTL_FLAG_CHECK1("SUBREG_PROMOTED_UNSIGNED_SET", (RTX), SUBREG); \ if ((VAL) < 0) \ _rtx->volatil = 1; \ else { \ _rtx->volatil = 0; \ _rtx->unchanging = (VAL); \ } \ } while (0) #define SUBREG_PROMOTED_UNSIGNED_P(RTX) \ ((RTL_FLAG_CHECK1("SUBREG_PROMOTED_UNSIGNED_P", (RTX), SUBREG)->volatil) \ ? -1 : (RTX)->unchanging) /* Access various components of an ASM_OPERANDS rtx. */ #define ASM_OPERANDS_TEMPLATE(RTX) XCSTR (RTX, 0, ASM_OPERANDS) #define ASM_OPERANDS_OUTPUT_CONSTRAINT(RTX) XCSTR (RTX, 1, ASM_OPERANDS) #define ASM_OPERANDS_OUTPUT_IDX(RTX) XCINT (RTX, 2, ASM_OPERANDS) #define ASM_OPERANDS_INPUT_VEC(RTX) XCVEC (RTX, 3, ASM_OPERANDS) #define ASM_OPERANDS_INPUT_CONSTRAINT_VEC(RTX) XCVEC (RTX, 4, ASM_OPERANDS) #define ASM_OPERANDS_INPUT(RTX, N) XCVECEXP (RTX, 3, N, ASM_OPERANDS) #define ASM_OPERANDS_INPUT_LENGTH(RTX) XCVECLEN (RTX, 3, ASM_OPERANDS) #define ASM_OPERANDS_INPUT_CONSTRAINT_EXP(RTX, N) \ XCVECEXP (RTX, 4, N, ASM_OPERANDS) #define ASM_OPERANDS_INPUT_CONSTRAINT(RTX, N) \ XSTR (XCVECEXP (RTX, 4, N, ASM_OPERANDS), 0) #define ASM_OPERANDS_INPUT_MODE(RTX, N) \ GET_MODE (XCVECEXP (RTX, 4, N, ASM_OPERANDS)) #ifdef USE_MAPPED_LOCATION #define ASM_OPERANDS_SOURCE_LOCATION(RTX) XCUINT (RTX, 5, ASM_OPERANDS) #else #define ASM_OPERANDS_SOURCE_FILE(RTX) XCSTR (RTX, 5, ASM_OPERANDS) #define ASM_OPERANDS_SOURCE_LINE(RTX) XCINT (RTX, 6, ASM_OPERANDS) #endif /* 1 if RTX is a mem and we should keep the alias set for this mem unchanged when we access a component. Set to 1, or example, when we are already in a non-addressable component of an aggregate. */ #define MEM_KEEP_ALIAS_SET_P(RTX) \ (RTL_FLAG_CHECK1("MEM_KEEP_ALIAS_SET_P", (RTX), MEM)->jump) /* 1 if RTX is a mem or asm_operand for a volatile reference. */ #define MEM_VOLATILE_P(RTX) \ (RTL_FLAG_CHECK3("MEM_VOLATILE_P", (RTX), MEM, ASM_OPERANDS, \ ASM_INPUT)->volatil) /* 1 if RTX is a mem that refers to an aggregate, either to the aggregate itself of to a field of the aggregate. If zero, RTX may or may not be such a reference. */ #define MEM_IN_STRUCT_P(RTX) \ (RTL_FLAG_CHECK1("MEM_IN_STRUCT_P", (RTX), MEM)->in_struct) /* 1 if RTX is a MEM that refers to a scalar. If zero, RTX may or may not refer to a scalar. */ #define MEM_SCALAR_P(RTX) \ (RTL_FLAG_CHECK1("MEM_SCALAR_P", (RTX), MEM)->return_val) /* 1 if RTX is a mem that cannot trap. */ #define MEM_NOTRAP_P(RTX) \ (RTL_FLAG_CHECK1("MEM_NOTRAP_P", (RTX), MEM)->call) /* If VAL is nonzero, set MEM_IN_STRUCT_P and clear MEM_SCALAR_P in RTX. Otherwise, vice versa. Use this macro only when you are *sure* that you know that the MEM is in a structure, or is a scalar. VAL is evaluated only once. */ #define MEM_SET_IN_STRUCT_P(RTX, VAL) \ do { \ if (VAL) \ { \ MEM_IN_STRUCT_P (RTX) = 1; \ MEM_SCALAR_P (RTX) = 0; \ } \ else \ { \ MEM_IN_STRUCT_P (RTX) = 0; \ MEM_SCALAR_P (RTX) = 1; \ } \ } while (0) /* The memory attribute block. We provide access macros for each value in the block and provide defaults if none specified. */ #define MEM_ATTRS(RTX) X0MEMATTR (RTX, 1) /* The register attribute block. We provide access macros for each value in the block and provide defaults if none specified. */ #define REG_ATTRS(RTX) X0REGATTR (RTX, 2) /* For a MEM rtx, the alias set. If 0, this MEM is not in any alias set, and may alias anything. Otherwise, the MEM can only alias MEMs in a conflicting alias set. This value is set in a language-dependent manner in the front-end, and should not be altered in the back-end. These set numbers are tested with alias_sets_conflict_p. */ #define MEM_ALIAS_SET(RTX) (MEM_ATTRS (RTX) == 0 ? 0 : MEM_ATTRS (RTX)->alias) /* For a MEM rtx, the decl it is known to refer to, if it is known to refer to part of a DECL. It may also be a COMPONENT_REF. */ #define MEM_EXPR(RTX) (MEM_ATTRS (RTX) == 0 ? 0 : MEM_ATTRS (RTX)->expr) /* For a MEM rtx, the offset from the start of MEM_EXPR, if known, as a RTX that is always a CONST_INT. */ #define MEM_OFFSET(RTX) (MEM_ATTRS (RTX) == 0 ? 0 : MEM_ATTRS (RTX)->offset) /* For a MEM rtx, the size in bytes of the MEM, if known, as an RTX that is always a CONST_INT. */ #define MEM_SIZE(RTX) \ (MEM_ATTRS (RTX) != 0 ? MEM_ATTRS (RTX)->size \ : GET_MODE (RTX) != BLKmode ? GEN_INT (GET_MODE_SIZE (GET_MODE (RTX))) \ : 0) /* For a MEM rtx, the alignment in bits. We can use the alignment of the mode as a default when STRICT_ALIGNMENT, but not if not. */ #define MEM_ALIGN(RTX) \ (MEM_ATTRS (RTX) != 0 ? MEM_ATTRS (RTX)->align \ : (STRICT_ALIGNMENT && GET_MODE (RTX) != BLKmode \ ? GET_MODE_ALIGNMENT (GET_MODE (RTX)) : BITS_PER_UNIT)) /* For a REG rtx, the decl it is known to refer to, if it is known to refer to part of a DECL. */ #define REG_EXPR(RTX) (REG_ATTRS (RTX) == 0 ? 0 : REG_ATTRS (RTX)->decl) /* For a MEM rtx, the offset from the start of MEM_DECL, if known, as a RTX that is always a CONST_INT. */ #define REG_OFFSET(RTX) (REG_ATTRS (RTX) == 0 ? 0 : REG_ATTRS (RTX)->offset) /* Copy the attributes that apply to memory locations from RHS to LHS. */ #define MEM_COPY_ATTRIBUTES(LHS, RHS) \ (MEM_VOLATILE_P (LHS) = MEM_VOLATILE_P (RHS), \ MEM_IN_STRUCT_P (LHS) = MEM_IN_STRUCT_P (RHS), \ MEM_SCALAR_P (LHS) = MEM_SCALAR_P (RHS), \ MEM_NOTRAP_P (LHS) = MEM_NOTRAP_P (RHS), \ RTX_UNCHANGING_P (LHS) = RTX_UNCHANGING_P (RHS), \ MEM_KEEP_ALIAS_SET_P (LHS) = MEM_KEEP_ALIAS_SET_P (RHS), \ MEM_ATTRS (LHS) = MEM_ATTRS (RHS)) /* 1 if RTX is a label_ref to a label outside the loop containing the reference. */ #define LABEL_OUTSIDE_LOOP_P(RTX) \ (RTL_FLAG_CHECK1("LABEL_OUTSIDE_LOOP_P", (RTX), LABEL_REF)->in_struct) /* 1 if RTX is a label_ref for a nonlocal label. */ /* Likewise in an expr_list for a reg_label note. */ #define LABEL_REF_NONLOCAL_P(RTX) \ (RTL_FLAG_CHECK2("LABEL_REF_NONLOCAL_P", (RTX), LABEL_REF, \ REG_LABEL)->volatil) /* 1 if RTX is a code_label that should always be considered to be needed. */ #define LABEL_PRESERVE_P(RTX) \ (RTL_FLAG_CHECK2("LABEL_PRESERVE_P", (RTX), CODE_LABEL, NOTE)->in_struct) /* 1 if RTX is a reg that is used only in an exit test of a loop. */ #define REG_LOOP_TEST_P(RTX) \ (RTL_FLAG_CHECK1("REG_LOOP_TEST_P", (RTX), REG)->in_struct) /* During sched, 1 if RTX is an insn that must be scheduled together with the preceding insn. */ #define SCHED_GROUP_P(RTX) \ (RTL_FLAG_CHECK3("SCHED_GROUP_P", (RTX), INSN, JUMP_INSN, CALL_INSN \ )->in_struct) /* For a SET rtx, SET_DEST is the place that is set and SET_SRC is the value it is set to. */ #define SET_DEST(RTX) XC2EXP(RTX, 0, SET, CLOBBER) #define SET_SRC(RTX) XCEXP(RTX, 1, SET) #define SET_IS_RETURN_P(RTX) \ (RTL_FLAG_CHECK1("SET_IS_RETURN_P", (RTX), SET)->jump) /* For a TRAP_IF rtx, TRAP_CONDITION is an expression. */ #define TRAP_CONDITION(RTX) XCEXP (RTX, 0, TRAP_IF) #define TRAP_CODE(RTX) XCEXP (RTX, 1, TRAP_IF) /* For a COND_EXEC rtx, COND_EXEC_TEST is the condition to base conditionally executing the code on, COND_EXEC_CODE is the code to execute if the condition is true. */ #define COND_EXEC_TEST(RTX) XCEXP (RTX, 0, COND_EXEC) #define COND_EXEC_CODE(RTX) XCEXP (RTX, 1, COND_EXEC) /* 1 if RTX is a symbol_ref that addresses this function's rtl constants pool. */ #define CONSTANT_POOL_ADDRESS_P(RTX) \ (RTL_FLAG_CHECK1("CONSTANT_POOL_ADDRESS_P", (RTX), SYMBOL_REF)->unchanging) /* 1 if RTX is a symbol_ref that addresses a value in the file's tree constant pool. This information is private to varasm.c. */ #define TREE_CONSTANT_POOL_ADDRESS_P(RTX) \ (RTL_FLAG_CHECK1("TREE_CONSTANT_POOL_ADDRESS_P", \ (RTX), SYMBOL_REF)->frame_related) /* Used if RTX is a symbol_ref, for machine-specific purposes. */ #define SYMBOL_REF_FLAG(RTX) \ (RTL_FLAG_CHECK1("SYMBOL_REF_FLAG", (RTX), SYMBOL_REF)->volatil) /* 1 if RTX is a symbol_ref that has been the library function in emit_library_call. */ #define SYMBOL_REF_USED(RTX) \ (RTL_FLAG_CHECK1("SYMBOL_REF_USED", (RTX), SYMBOL_REF)->used) /* 1 if RTX is a symbol_ref for a weak symbol. */ #define SYMBOL_REF_WEAK(RTX) \ (RTL_FLAG_CHECK1("SYMBOL_REF_WEAK", (RTX), SYMBOL_REF)->return_val) /* The tree (decl or constant) associated with the symbol, or null. */ #define SYMBOL_REF_DECL(RTX) X0TREE ((RTX), 2) /* A set of flags on a symbol_ref that are, in some respects, redundant with information derivable from the tree decl associated with this symbol. Except that we build a *lot* of SYMBOL_REFs that aren't associated with a decl. In some cases this is a bug. But beyond that, it's nice to cache this information to avoid recomputing it. Finally, this allows space for the target to store more than one bit of information, as with SYMBOL_REF_FLAG. */ #define SYMBOL_REF_FLAGS(RTX) X0INT ((RTX), 1) /* These flags are common enough to be defined for all targets. They are computed by the default version of targetm.encode_section_info. */ /* Set if this symbol is a function. */ #define SYMBOL_FLAG_FUNCTION (1 << 0) #define SYMBOL_REF_FUNCTION_P(RTX) \ ((SYMBOL_REF_FLAGS (RTX) & SYMBOL_FLAG_FUNCTION) != 0) /* Set if targetm.binds_local_p is true. */ #define SYMBOL_FLAG_LOCAL (1 << 1) #define SYMBOL_REF_LOCAL_P(RTX) \ ((SYMBOL_REF_FLAGS (RTX) & SYMBOL_FLAG_LOCAL) != 0) /* Set if targetm.in_small_data_p is true. */ #define SYMBOL_FLAG_SMALL (1 << 2) #define SYMBOL_REF_SMALL_P(RTX) \ ((SYMBOL_REF_FLAGS (RTX) & SYMBOL_FLAG_SMALL) != 0) /* The three-bit field at [5:3] is true for TLS variables; use SYMBOL_REF_TLS_MODEL to extract the field as an enum tls_model. */ #define SYMBOL_FLAG_TLS_SHIFT 3 #define SYMBOL_REF_TLS_MODEL(RTX) \ ((enum tls_model) ((SYMBOL_REF_FLAGS (RTX) >> SYMBOL_FLAG_TLS_SHIFT) & 7)) /* Set if this symbol is not defined in this translation unit. */ #define SYMBOL_FLAG_EXTERNAL (1 << 6) #define SYMBOL_REF_EXTERNAL_P(RTX) \ ((SYMBOL_REF_FLAGS (RTX) & SYMBOL_FLAG_EXTERNAL) != 0) /* Subsequent bits are available for the target to use. */ #define SYMBOL_FLAG_MACH_DEP_SHIFT 7 #define SYMBOL_FLAG_MACH_DEP (1 << SYMBOL_FLAG_MACH_DEP_SHIFT) /* Define a macro to look for REG_INC notes, but save time on machines where they never exist. */ #if (defined (HAVE_PRE_INCREMENT) || defined (HAVE_PRE_DECREMENT) || defined (HAVE_POST_INCREMENT) || defined (HAVE_POST_DECREMENT)) #define FIND_REG_INC_NOTE(INSN, REG) \ ((REG) != NULL_RTX && REG_P ((REG)) \ ? find_regno_note ((INSN), REG_INC, REGNO (REG)) \ : find_reg_note ((INSN), REG_INC, (REG))) #else #define FIND_REG_INC_NOTE(INSN, REG) 0 #endif /* Indicate whether the machine has any sort of auto increment addressing. If not, we can avoid checking for REG_INC notes. */ #if (defined (HAVE_PRE_INCREMENT) || defined (HAVE_PRE_DECREMENT) || defined (HAVE_POST_INCREMENT) || defined (HAVE_POST_DECREMENT)) #define AUTO_INC_DEC #endif #ifndef HAVE_PRE_INCREMENT #define HAVE_PRE_INCREMENT 0 #endif #ifndef HAVE_PRE_DECREMENT #define HAVE_PRE_DECREMENT 0 #endif #ifndef HAVE_POST_INCREMENT #define HAVE_POST_INCREMENT 0 #endif #ifndef HAVE_POST_DECREMENT #define HAVE_POST_DECREMENT 0 #endif #ifndef HAVE_POST_MODIFY_DISP #define HAVE_POST_MODIFY_DISP 0 #endif #ifndef HAVE_POST_MODIFY_REG #define HAVE_POST_MODIFY_REG 0 #endif #ifndef HAVE_PRE_MODIFY_DISP #define HAVE_PRE_MODIFY_DISP 0 #endif #ifndef HAVE_PRE_MODIFY_REG #define HAVE_PRE_MODIFY_REG 0 #endif /* Some architectures do not have complete pre/post increment/decrement instruction sets, or only move some modes efficiently. These macros allow us to tune autoincrement generation. */ #ifndef USE_LOAD_POST_INCREMENT #define USE_LOAD_POST_INCREMENT(MODE) HAVE_POST_INCREMENT #endif #ifndef USE_LOAD_POST_DECREMENT #define USE_LOAD_POST_DECREMENT(MODE) HAVE_POST_DECREMENT #endif #ifndef USE_LOAD_PRE_INCREMENT #define USE_LOAD_PRE_INCREMENT(MODE) HAVE_PRE_INCREMENT #endif #ifndef USE_LOAD_PRE_DECREMENT #define USE_LOAD_PRE_DECREMENT(MODE) HAVE_PRE_DECREMENT #endif #ifndef USE_STORE_POST_INCREMENT #define USE_STORE_POST_INCREMENT(MODE) HAVE_POST_INCREMENT #endif #ifndef USE_STORE_POST_DECREMENT #define USE_STORE_POST_DECREMENT(MODE) HAVE_POST_DECREMENT #endif #ifndef USE_STORE_PRE_INCREMENT #define USE_STORE_PRE_INCREMENT(MODE) HAVE_PRE_INCREMENT #endif #ifndef USE_STORE_PRE_DECREMENT #define USE_STORE_PRE_DECREMENT(MODE) HAVE_PRE_DECREMENT #endif /* Nonzero if we need to distinguish between the return value of this function and the return value of a function called by this function. This helps integrate.c. This is 1 until after the rtl generation pass. ??? It appears that this is 1 only when expanding trees to RTL. */ extern int rtx_equal_function_value_matters; /* Nonzero when we are generating CONCATs. */ extern int generating_concat_p; /* Generally useful functions. */ /* In expmed.c */ extern int ceil_log2 (unsigned HOST_WIDE_INT); #define plus_constant(X, C) plus_constant_wide ((X), (HOST_WIDE_INT) (C)) /* In builtins.c */ extern rtx expand_builtin_expect_jump (tree, rtx, rtx); extern void purge_builtin_constant_p (void); /* In explow.c */ extern void set_stack_check_libfunc (rtx); extern HOST_WIDE_INT trunc_int_for_mode (HOST_WIDE_INT, enum machine_mode); extern rtx plus_constant_wide (rtx, HOST_WIDE_INT); extern rtx plus_constant_for_output_wide (rtx, HOST_WIDE_INT); extern void optimize_save_area_alloca (rtx); /* In emit-rtl.c */ extern rtvec gen_rtvec (int, ...); extern rtx copy_insn_1 (rtx); extern rtx copy_insn (rtx); extern rtx gen_int_mode (HOST_WIDE_INT, enum machine_mode); extern rtx emit_copy_of_insn_after (rtx, rtx); extern void set_reg_attrs_from_mem (rtx, rtx); extern void set_mem_attrs_from_reg (rtx, rtx); extern void set_reg_attrs_for_parm (rtx, rtx); extern void set_reg_pointer_align (rtx, unsigned int); extern int mem_expr_equal_p (tree, tree); /* In rtl.c */ extern rtx rtx_alloc_stat (RTX_CODE MEM_STAT_DECL); #define rtx_alloc(c) rtx_alloc_stat (c MEM_STAT_INFO) extern rtvec rtvec_alloc (int); extern rtx copy_rtx (rtx); extern void dump_rtx_statistics (void); /* In emit-rtl.c */ extern rtx copy_rtx_if_shared (rtx); /* In rtl.c */ extern rtx copy_most_rtx (rtx, rtx); extern rtx shallow_copy_rtx_stat (rtx MEM_STAT_DECL); #define shallow_copy_rtx(a) shallow_copy_rtx_stat (a MEM_STAT_INFO) extern int rtx_equal_p (rtx, rtx); /* In emit-rtl.c */ extern rtvec gen_rtvec_v (int, rtx *); extern rtx gen_reg_rtx (enum machine_mode); extern rtx gen_rtx_REG_offset (rtx, enum machine_mode, unsigned int, int); extern rtx gen_label_rtx (void); extern int subreg_hard_regno (rtx, int); extern rtx gen_lowpart_common (enum machine_mode, rtx); /* In cse.c */ extern rtx gen_lowpart_if_possible (enum machine_mode, rtx); /* In emit-rtl.c */ extern rtx gen_highpart (enum machine_mode, rtx); extern rtx gen_highpart_mode (enum machine_mode, enum machine_mode, rtx); extern rtx gen_realpart (enum machine_mode, rtx); extern rtx gen_imagpart (enum machine_mode, rtx); extern rtx operand_subword (rtx, unsigned int, int, enum machine_mode); /* In emit-rtl.c */ extern rtx operand_subword_force (rtx, unsigned int, enum machine_mode); extern int subreg_lowpart_p (rtx); extern unsigned int subreg_lowpart_offset (enum machine_mode, enum machine_mode); extern unsigned int subreg_highpart_offset (enum machine_mode, enum machine_mode); extern rtx make_safe_from (rtx, rtx); extern rtx convert_memory_address (enum machine_mode, rtx); extern rtx get_insns (void); extern const char *get_insn_name (int); extern rtx get_last_insn (void); extern rtx get_last_insn_anywhere (void); extern rtx get_first_nonnote_insn (void); extern rtx get_last_nonnote_insn (void); extern void start_sequence (void); extern void push_to_sequence (rtx); extern void end_sequence (void); extern void push_to_full_sequence (rtx, rtx); extern rtx immed_double_const (HOST_WIDE_INT, HOST_WIDE_INT, enum machine_mode); /* In varasm.c */ extern rtx force_const_mem (enum machine_mode, rtx); /* In varasm.c */ extern rtx get_pool_constant (rtx); extern rtx get_pool_constant_mark (rtx, bool *); extern enum machine_mode get_pool_mode (rtx); extern rtx get_pool_constant_for_function (struct function *, rtx); extern enum machine_mode get_pool_mode_for_function (struct function *, rtx); extern int get_pool_offset (rtx); extern rtx simplify_subtraction (rtx); /* In function.c */ extern rtx assign_stack_local (enum machine_mode, HOST_WIDE_INT, int); extern rtx assign_stack_temp (enum machine_mode, HOST_WIDE_INT, int); extern rtx assign_stack_temp_for_type (enum machine_mode, HOST_WIDE_INT, int, tree); extern rtx assign_temp (tree, int, int, int); /* In emit-rtl.c */ extern rtx emit_insn_before (rtx, rtx); extern rtx emit_insn_before_setloc (rtx, rtx, int); extern rtx emit_jump_insn_before (rtx, rtx); extern rtx emit_jump_insn_before_setloc (rtx, rtx, int); extern rtx emit_call_insn_before (rtx, rtx); extern rtx emit_call_insn_before_setloc (rtx, rtx, int); extern rtx emit_barrier_before (rtx); extern rtx emit_label_before (rtx, rtx); extern rtx emit_note_before (int, rtx); extern rtx emit_insn_after (rtx, rtx); extern rtx emit_insn_after_setloc (rtx, rtx, int); extern rtx emit_jump_insn_after (rtx, rtx); extern rtx emit_jump_insn_after_setloc (rtx, rtx, int); extern rtx emit_call_insn_after (rtx, rtx); extern rtx emit_call_insn_after_setloc (rtx, rtx, int); extern rtx emit_barrier_after (rtx); extern rtx emit_label_after (rtx, rtx); extern rtx emit_note_after (int, rtx); extern rtx emit_note_copy_after (rtx, rtx); extern rtx emit_insn (rtx); extern rtx emit_jump_insn (rtx); extern rtx emit_call_insn (rtx); extern rtx emit_label (rtx); extern rtx emit_barrier (void); extern rtx emit_note (int); extern rtx emit_note_copy (rtx); extern rtx emit_line_note (location_t); extern rtx make_insn_raw (rtx); extern void add_function_usage_to (rtx, rtx); extern rtx last_call_insn (void); extern rtx previous_insn (rtx); extern rtx next_insn (rtx); extern rtx prev_nonnote_insn (rtx); extern rtx next_nonnote_insn (rtx); extern rtx prev_real_insn (rtx); extern rtx next_real_insn (rtx); extern rtx prev_active_insn (rtx); extern rtx next_active_insn (rtx); extern int active_insn_p (rtx); extern rtx prev_label (rtx); extern rtx next_label (rtx); extern rtx skip_consecutive_labels (rtx); extern rtx next_cc0_user (rtx); extern rtx prev_cc0_setter (rtx); #define emit_insn_before_sameloc(INSN, BEFORE) \ emit_insn_before_setloc (INSN, BEFORE, INSN_LOCATOR (BEFORE)) #define emit_jump_insn_before_sameloc(INSN, BEFORE) \ emit_jump_insn_before_setloc (INSN, BEFORE, INSN_LOCATOR (BEFORE)) #define emit_call_insn_before_sameloc(INSN, BEFORE) \ emit_call_insn_before_setloc (INSN, BEFORE, INSN_LOCATOR (BEFORE)) #define emit_insn_after_sameloc(INSN, AFTER) \ emit_insn_after_setloc (INSN, AFTER, INSN_LOCATOR (AFTER)) #define emit_jump_insn_after_sameloc(INSN, AFTER) \ emit_jump_insn_after_setloc (INSN, AFTER, INSN_LOCATOR (AFTER)) #define emit_call_insn_after_sameloc(INSN, AFTER) \ emit_call_insn_after_setloc (INSN, AFTER, INSN_LOCATOR (AFTER)) /* In cfglayout.c */ extern tree choose_inner_scope (tree, tree); extern int insn_line (rtx); extern const char * insn_file (rtx); extern int locator_line (int); extern const char * locator_file (int); extern int prologue_locator, epilogue_locator; /* In jump.c */ extern enum rtx_code reverse_condition (enum rtx_code); extern enum rtx_code reverse_condition_maybe_unordered (enum rtx_code); extern enum rtx_code swap_condition (enum rtx_code); extern enum rtx_code unsigned_condition (enum rtx_code); extern enum rtx_code signed_condition (enum rtx_code); extern void mark_jump_label (rtx, rtx, int); extern void cleanup_barriers (void); /* In jump.c */ extern bool squeeze_notes (rtx *, rtx *); extern rtx delete_related_insns (rtx); extern void delete_jump (rtx); extern void delete_barrier (rtx); extern rtx get_label_before (rtx); extern rtx get_label_after (rtx); extern rtx follow_jumps (rtx); /* In recog.c */ extern rtx *find_constant_term_loc (rtx *); /* In emit-rtl.c */ extern rtx try_split (rtx, rtx, int); extern int split_branch_probability; /* In unknown file */ extern rtx split_insns (rtx, rtx); /* In simplify-rtx.c */ extern rtx simplify_unary_operation (enum rtx_code, enum machine_mode, rtx, enum machine_mode); extern rtx simplify_binary_operation (enum rtx_code, enum machine_mode, rtx, rtx); extern rtx simplify_ternary_operation (enum rtx_code, enum machine_mode, enum machine_mode, rtx, rtx, rtx); extern rtx simplify_const_relational_operation (enum rtx_code, enum machine_mode, rtx, rtx); extern rtx simplify_relational_operation (enum rtx_code, enum machine_mode, enum machine_mode, rtx, rtx); extern rtx simplify_gen_binary (enum rtx_code, enum machine_mode, rtx, rtx); extern rtx simplify_gen_unary (enum rtx_code, enum machine_mode, rtx, enum machine_mode); extern rtx simplify_gen_ternary (enum rtx_code, enum machine_mode, enum machine_mode, rtx, rtx, rtx); extern rtx simplify_gen_relational (enum rtx_code, enum machine_mode, enum machine_mode, rtx, rtx); extern rtx simplify_subreg (enum machine_mode, rtx, enum machine_mode, unsigned int); extern rtx simplify_gen_subreg (enum machine_mode, rtx, enum machine_mode, unsigned int); extern rtx simplify_replace_rtx (rtx, rtx, rtx); extern rtx simplify_rtx (rtx); extern rtx avoid_constant_pool_reference (rtx); /* In regclass.c */ extern enum machine_mode choose_hard_reg_mode (unsigned int, unsigned int, bool); /* In emit-rtl.c */ extern rtx set_unique_reg_note (rtx, enum reg_note, rtx); /* Functions in rtlanal.c */ /* Single set is implemented as macro for performance reasons. */ #define single_set(I) (INSN_P (I) \ ? (GET_CODE (PATTERN (I)) == SET \ ? PATTERN (I) : single_set_1 (I)) \ : NULL_RTX) #define single_set_1(I) single_set_2 (I, PATTERN (I)) /* Structure used for passing data to REPLACE_LABEL. */ typedef struct replace_label_data { rtx r1; rtx r2; bool update_label_nuses; } replace_label_data; extern int rtx_addr_can_trap_p (rtx); extern bool nonzero_address_p (rtx); extern int rtx_unstable_p (rtx); extern int rtx_varies_p (rtx, int); extern int rtx_addr_varies_p (rtx, int); extern HOST_WIDE_INT get_integer_term (rtx); extern rtx get_related_value (rtx); extern rtx get_jump_table_offset (rtx, rtx *); extern int global_reg_mentioned_p (rtx); extern int reg_mentioned_p (rtx, rtx); extern int count_occurrences (rtx, rtx, int); extern int reg_referenced_p (rtx, rtx); extern int reg_used_between_p (rtx, rtx, rtx); extern int reg_referenced_between_p (rtx, rtx, rtx); extern int reg_set_between_p (rtx, rtx, rtx); extern int regs_set_between_p (rtx, rtx, rtx); extern int commutative_operand_precedence (rtx); extern int swap_commutative_operands_p (rtx, rtx); extern int modified_between_p (rtx, rtx, rtx); extern int no_labels_between_p (rtx, rtx); extern int no_jumps_between_p (rtx, rtx); extern int modified_in_p (rtx, rtx); extern int insn_dependent_p (rtx, rtx); extern int reg_set_p (rtx, rtx); extern rtx single_set_2 (rtx, rtx); extern int multiple_sets (rtx); extern int set_noop_p (rtx); extern int noop_move_p (rtx); extern rtx find_last_value (rtx, rtx *, rtx, int); extern int refers_to_regno_p (unsigned int, unsigned int, rtx, rtx *); extern int reg_overlap_mentioned_p (rtx, rtx); extern rtx set_of (rtx, rtx); extern void note_stores (rtx, void (*) (rtx, rtx, void *), void *); extern void note_uses (rtx *, void (*) (rtx *, void *), void *); extern int dead_or_set_p (rtx, rtx); extern int dead_or_set_regno_p (rtx, unsigned int); extern rtx find_reg_note (rtx, enum reg_note, rtx); extern rtx find_regno_note (rtx, enum reg_note, unsigned int); extern rtx find_reg_equal_equiv_note (rtx); extern int find_reg_fusage (rtx, enum rtx_code, rtx); extern int find_regno_fusage (rtx, enum rtx_code, unsigned int); extern int pure_call_p (rtx); extern void remove_note (rtx, rtx); extern int side_effects_p (rtx); extern int volatile_refs_p (rtx); extern int volatile_insn_p (rtx); extern int may_trap_p (rtx); extern int inequality_comparisons_p (rtx); extern rtx replace_rtx (rtx, rtx, rtx); extern rtx replace_regs (rtx, rtx *, unsigned int, int); extern int replace_label (rtx *, void *); extern int rtx_referenced_p (rtx, rtx); extern bool tablejump_p (rtx, rtx *, rtx *); extern int computed_jump_p (rtx); typedef int (*rtx_function) (rtx *, void *); extern int for_each_rtx (rtx *, rtx_function, void *); extern rtx regno_use_in (unsigned int, rtx); extern int auto_inc_p (rtx); extern int in_expr_list_p (rtx, rtx); extern void remove_node_from_expr_list (rtx, rtx *); extern int insns_safe_to_move_p (rtx, rtx, rtx *); extern int loc_mentioned_in_p (rtx *, rtx); extern rtx find_first_parameter_load (rtx, rtx); extern bool keep_with_call_p (rtx); extern bool label_is_jump_target_p (rtx, rtx); /* flow.c */ extern rtx find_use_as_address (rtx, rtx, HOST_WIDE_INT); /* lists.c */ void free_EXPR_LIST_list (rtx *); void free_INSN_LIST_list (rtx *); void free_EXPR_LIST_node (rtx); void free_INSN_LIST_node (rtx); rtx alloc_INSN_LIST (rtx, rtx); rtx alloc_EXPR_LIST (int, rtx, rtx); /* regclass.c */ /* Maximum number of parallel sets and clobbers in any insn in this fn. Always at least 3, since the combiner could put that many together and we want this to remain correct for all the remaining passes. */ extern int max_parallel; /* Free up register info memory. */ extern void free_reg_info (void); /* recog.c */ extern int asm_noperands (rtx); extern const char *decode_asm_operands (rtx, rtx *, rtx **, const char **, enum machine_mode *); extern enum reg_class reg_preferred_class (int); extern enum reg_class reg_alternate_class (int); extern void split_all_insns (int); extern void split_all_insns_noflow (void); #define MAX_SAVED_CONST_INT 64 extern GTY(()) rtx const_int_rtx[MAX_SAVED_CONST_INT * 2 + 1]; #define const0_rtx (const_int_rtx[MAX_SAVED_CONST_INT]) #define const1_rtx (const_int_rtx[MAX_SAVED_CONST_INT+1]) #define const2_rtx (const_int_rtx[MAX_SAVED_CONST_INT+2]) #define constm1_rtx (const_int_rtx[MAX_SAVED_CONST_INT-1]) extern GTY(()) rtx const_true_rtx; extern GTY(()) rtx const_tiny_rtx[3][(int) MAX_MACHINE_MODE]; /* Returns a constant 0 rtx in mode MODE. Integer modes are treated the same as VOIDmode. */ #define CONST0_RTX(MODE) (const_tiny_rtx[0][(int) (MODE)]) /* Likewise, for the constants 1 and 2. */ #define CONST1_RTX(MODE) (const_tiny_rtx[1][(int) (MODE)]) #define CONST2_RTX(MODE) (const_tiny_rtx[2][(int) (MODE)]) /* If HARD_FRAME_POINTER_REGNUM is defined, then a special dummy reg is used to represent the frame pointer. This is because the hard frame pointer and the automatic variables are separated by an amount that cannot be determined until after register allocation. We can assume that in this case ELIMINABLE_REGS will be defined, one action of which will be to eliminate FRAME_POINTER_REGNUM into HARD_FRAME_POINTER_REGNUM. */ #ifndef HARD_FRAME_POINTER_REGNUM #define HARD_FRAME_POINTER_REGNUM FRAME_POINTER_REGNUM #endif /* Index labels for global_rtl. */ enum global_rtl_index { GR_PC, GR_CC0, GR_STACK_POINTER, GR_FRAME_POINTER, /* For register elimination to work properly these hard_frame_pointer_rtx, frame_pointer_rtx, and arg_pointer_rtx must be the same if they refer to the same register. */ #if FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM GR_ARG_POINTER = GR_FRAME_POINTER, #endif #if HARD_FRAME_POINTER_REGNUM == FRAME_POINTER_REGNUM GR_HARD_FRAME_POINTER = GR_FRAME_POINTER, #else GR_HARD_FRAME_POINTER, #endif #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM #if HARD_FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM GR_ARG_POINTER = GR_HARD_FRAME_POINTER, #else GR_ARG_POINTER, #endif #endif GR_VIRTUAL_INCOMING_ARGS, GR_VIRTUAL_STACK_ARGS, GR_VIRTUAL_STACK_DYNAMIC, GR_VIRTUAL_OUTGOING_ARGS, GR_VIRTUAL_CFA, GR_MAX }; /* Pointers to standard pieces of rtx are stored here. */ extern GTY(()) rtx global_rtl[GR_MAX]; /* Standard pieces of rtx, to be substituted directly into things. */ #define pc_rtx (global_rtl[GR_PC]) #define cc0_rtx (global_rtl[GR_CC0]) /* All references to certain hard regs, except those created by allocating pseudo regs into them (when that's possible), go through these unique rtx objects. */ #define stack_pointer_rtx (global_rtl[GR_STACK_POINTER]) #define frame_pointer_rtx (global_rtl[GR_FRAME_POINTER]) #define hard_frame_pointer_rtx (global_rtl[GR_HARD_FRAME_POINTER]) #define arg_pointer_rtx (global_rtl[GR_ARG_POINTER]) extern GTY(()) rtx pic_offset_table_rtx; extern GTY(()) rtx static_chain_rtx; extern GTY(()) rtx static_chain_incoming_rtx; extern GTY(()) rtx return_address_pointer_rtx; /* Include the RTL generation functions. */ #ifndef NO_GENRTL_H /* Generated automatically by gengenrtl from rtl.def. */ #ifndef GCC_GENRTL_H #define GCC_GENRTL_H extern rtx gen_rtx_fmt_s (RTX_CODE, enum machine_mode mode, const char *arg0); extern rtx gen_rtx_fmt_ee (RTX_CODE, enum machine_mode mode, rtx arg0, rtx arg1); extern rtx gen_rtx_fmt_ue (RTX_CODE, enum machine_mode mode, rtx arg0, rtx arg1); extern rtx gen_rtx_fmt_iss (RTX_CODE, enum machine_mode mode, int arg0, const char *arg1, const char *arg2); extern rtx gen_rtx_fmt_is (RTX_CODE, enum machine_mode mode, int arg0, const char *arg1); extern rtx gen_rtx_fmt_i (RTX_CODE, enum machine_mode mode, int arg0); extern rtx gen_rtx_fmt_isE (RTX_CODE, enum machine_mode mode, int arg0, const char *arg1, rtvec arg2); extern rtx gen_rtx_fmt_iE (RTX_CODE, enum machine_mode mode, int arg0, rtvec arg1); extern rtx gen_rtx_fmt_sEss (RTX_CODE, enum machine_mode mode, const char *arg0, rtvec arg1, const char *arg2, const char *arg3); extern rtx gen_rtx_fmt_eE (RTX_CODE, enum machine_mode mode, rtx arg0, rtvec arg1); extern rtx gen_rtx_fmt_Ess (RTX_CODE, enum machine_mode mode, rtvec arg0, const char *arg1, const char *arg2); extern rtx gen_rtx_fmt_E (RTX_CODE, enum machine_mode mode, rtvec arg0); extern rtx gen_rtx_fmt_e (RTX_CODE, enum machine_mode mode, rtx arg0); extern rtx gen_rtx_fmt_ss (RTX_CODE, enum machine_mode mode, const char *arg0, const char *arg1); extern rtx gen_rtx_fmt_sies (RTX_CODE, enum machine_mode mode, const char *arg0, int arg1, rtx arg2, const char *arg3); extern rtx gen_rtx_fmt_sse (RTX_CODE, enum machine_mode mode, const char *arg0, const char *arg1, rtx arg2); extern rtx gen_rtx_fmt_sE (RTX_CODE, enum machine_mode mode, const char *arg0, rtvec arg1); extern rtx gen_rtx_fmt_ii (RTX_CODE, enum machine_mode mode, int arg0, int arg1); extern rtx gen_rtx_fmt_iuuBieiee (RTX_CODE, enum machine_mode mode, int arg0, rtx arg1, rtx arg2, struct basic_block_def *arg3, int arg4, rtx arg5, int arg6, rtx arg7, rtx arg8); extern rtx gen_rtx_fmt_iuuBieiee0 (RTX_CODE, enum machine_mode mode, int arg0, rtx arg1, rtx arg2, struct basic_block_def *arg3, int arg4, rtx arg5, int arg6, rtx arg7, rtx arg8); extern rtx gen_rtx_fmt_iuuBieieee (RTX_CODE, enum machine_mode mode, int arg0, rtx arg1, rtx arg2, struct basic_block_def *arg3, int arg4, rtx arg5, int arg6, rtx arg7, rtx arg8, rtx arg9); extern rtx gen_rtx_fmt_iuu000000 (RTX_CODE, enum machine_mode mode, int arg0, rtx arg1, rtx arg2); extern rtx gen_rtx_fmt_iuuB00is (RTX_CODE, enum machine_mode mode, int arg0, rtx arg1, rtx arg2, struct basic_block_def *arg3, int arg4, const char *arg5); extern rtx gen_rtx_fmt_ssiEEsi (RTX_CODE, enum machine_mode mode, const char *arg0, const char *arg1, int arg2, rtvec arg3, rtvec arg4, const char *arg5, int arg6); extern rtx gen_rtx_fmt_Ei (RTX_CODE, enum machine_mode mode, rtvec arg0, int arg1); extern rtx gen_rtx_fmt_eEee0 (RTX_CODE, enum machine_mode mode, rtx arg0, rtvec arg1, rtx arg2, rtx arg3); extern rtx gen_rtx_fmt_eee (RTX_CODE, enum machine_mode mode, rtx arg0, rtx arg1, rtx arg2); extern rtx gen_rtx_fmt_ (RTX_CODE, enum machine_mode mode); extern rtx gen_rtx_fmt_w (RTX_CODE, enum machine_mode mode, HOST_WIDE_INT arg0); extern rtx gen_rtx_fmt_0 (RTX_CODE, enum machine_mode mode); extern rtx gen_rtx_fmt_i00 (RTX_CODE, enum machine_mode mode, int arg0); extern rtx gen_rtx_fmt_ei (RTX_CODE, enum machine_mode mode, rtx arg0, int arg1); extern rtx gen_rtx_fmt_e0 (RTX_CODE, enum machine_mode mode, rtx arg0); extern rtx gen_rtx_fmt_u00 (RTX_CODE, enum machine_mode mode, rtx arg0); extern rtx gen_rtx_fmt_s00 (RTX_CODE, enum machine_mode mode, const char *arg0); extern rtx gen_rtx_fmt_eeeee (RTX_CODE, enum machine_mode mode, rtx arg0, rtx arg1, rtx arg2, rtx arg3, rtx arg4); extern rtx gen_rtx_fmt_Ee (RTX_CODE, enum machine_mode mode, rtvec arg0, rtx arg1); extern rtx gen_rtx_fmt_uuEiiiiiibbii (RTX_CODE, enum machine_mode mode, rtx arg0, rtx arg1, rtvec arg2, int arg3, int arg4, int arg5, int arg6, int arg7, int arg8, struct bitmap_head_def *arg9, struct bitmap_head_def *arg10, int arg11, int arg12); extern rtx gen_rtx_fmt_iiiiiiiitt (RTX_CODE, enum machine_mode mode, int arg0, int arg1, int arg2, int arg3, int arg4, int arg5, int arg6, int arg7, union tree_node *arg8, union tree_node *arg9); extern rtx gen_rtx_fmt_eti (RTX_CODE, enum machine_mode mode, rtx arg0, union tree_node *arg1, int arg2); extern rtx gen_rtx_fmt_bi (RTX_CODE, enum machine_mode mode, struct bitmap_head_def *arg0, int arg1); extern rtx gen_rtx_fmt_te (RTX_CODE, enum machine_mode mode, union tree_node *arg0, rtx arg1); #define gen_rtx_INCLUDE(MODE, ARG0) \ gen_rtx_fmt_s (INCLUDE, (MODE), (ARG0)) #define gen_rtx_EXPR_LIST(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (EXPR_LIST, (MODE), (ARG0), (ARG1)) #define gen_rtx_INSN_LIST(MODE, ARG0, ARG1) \ gen_rtx_fmt_ue (INSN_LIST, (MODE), (ARG0), (ARG1)) #define gen_rtx_MATCH_OPERAND(MODE, ARG0, ARG1, ARG2) \ gen_rtx_fmt_iss (MATCH_OPERAND, (MODE), (ARG0), (ARG1), (ARG2)) #define gen_rtx_MATCH_SCRATCH(MODE, ARG0, ARG1) \ gen_rtx_fmt_is (MATCH_SCRATCH, (MODE), (ARG0), (ARG1)) #define gen_rtx_MATCH_DUP(MODE, ARG0) \ gen_rtx_fmt_i (MATCH_DUP, (MODE), (ARG0)) #define gen_rtx_MATCH_OPERATOR(MODE, ARG0, ARG1, ARG2) \ gen_rtx_fmt_isE (MATCH_OPERATOR, (MODE), (ARG0), (ARG1), (ARG2)) #define gen_rtx_MATCH_PARALLEL(MODE, ARG0, ARG1, ARG2) \ gen_rtx_fmt_isE (MATCH_PARALLEL, (MODE), (ARG0), (ARG1), (ARG2)) #define gen_rtx_MATCH_OP_DUP(MODE, ARG0, ARG1) \ gen_rtx_fmt_iE (MATCH_OP_DUP, (MODE), (ARG0), (ARG1)) #define gen_rtx_MATCH_PAR_DUP(MODE, ARG0, ARG1) \ gen_rtx_fmt_iE (MATCH_PAR_DUP, (MODE), (ARG0), (ARG1)) #define gen_rtx_DEFINE_EXPAND(MODE, ARG0, ARG1, ARG2, ARG3) \ gen_rtx_fmt_sEss (DEFINE_EXPAND, (MODE), (ARG0), (ARG1), (ARG2), (ARG3)) #define gen_rtx_DEFINE_DELAY(MODE, ARG0, ARG1) \ gen_rtx_fmt_eE (DEFINE_DELAY, (MODE), (ARG0), (ARG1)) #define gen_rtx_DEFINE_COND_EXEC(MODE, ARG0, ARG1, ARG2) \ gen_rtx_fmt_Ess (DEFINE_COND_EXEC, (MODE), (ARG0), (ARG1), (ARG2)) #define gen_rtx_SEQUENCE(MODE, ARG0) \ gen_rtx_fmt_E (SEQUENCE, (MODE), (ARG0)) #define gen_rtx_ADDRESS(MODE, ARG0) \ gen_rtx_fmt_e (ADDRESS, (MODE), (ARG0)) #define gen_rtx_EXCLUSION_SET(MODE, ARG0, ARG1) \ gen_rtx_fmt_ss (EXCLUSION_SET, (MODE), (ARG0), (ARG1)) #define gen_rtx_PRESENCE_SET(MODE, ARG0, ARG1) \ gen_rtx_fmt_ss (PRESENCE_SET, (MODE), (ARG0), (ARG1)) #define gen_rtx_FINAL_PRESENCE_SET(MODE, ARG0, ARG1) \ gen_rtx_fmt_ss (FINAL_PRESENCE_SET, (MODE), (ARG0), (ARG1)) #define gen_rtx_ABSENCE_SET(MODE, ARG0, ARG1) \ gen_rtx_fmt_ss (ABSENCE_SET, (MODE), (ARG0), (ARG1)) #define gen_rtx_FINAL_ABSENCE_SET(MODE, ARG0, ARG1) \ gen_rtx_fmt_ss (FINAL_ABSENCE_SET, (MODE), (ARG0), (ARG1)) #define gen_rtx_DEFINE_AUTOMATON(MODE, ARG0) \ gen_rtx_fmt_s (DEFINE_AUTOMATON, (MODE), (ARG0)) #define gen_rtx_AUTOMATA_OPTION(MODE, ARG0) \ gen_rtx_fmt_s (AUTOMATA_OPTION, (MODE), (ARG0)) #define gen_rtx_DEFINE_RESERVATION(MODE, ARG0, ARG1) \ gen_rtx_fmt_ss (DEFINE_RESERVATION, (MODE), (ARG0), (ARG1)) #define gen_rtx_DEFINE_INSN_RESERVATION(MODE, ARG0, ARG1, ARG2, ARG3) \ gen_rtx_fmt_sies (DEFINE_INSN_RESERVATION, (MODE), (ARG0), (ARG1), (ARG2), (ARG3)) #define gen_rtx_DEFINE_ATTR(MODE, ARG0, ARG1, ARG2) \ gen_rtx_fmt_sse (DEFINE_ATTR, (MODE), (ARG0), (ARG1), (ARG2)) #define gen_rtx_ATTR(MODE, ARG0) \ gen_rtx_fmt_s (ATTR, (MODE), (ARG0)) #define gen_rtx_SET_ATTR(MODE, ARG0, ARG1) \ gen_rtx_fmt_ss (SET_ATTR, (MODE), (ARG0), (ARG1)) #define gen_rtx_SET_ATTR_ALTERNATIVE(MODE, ARG0, ARG1) \ gen_rtx_fmt_sE (SET_ATTR_ALTERNATIVE, (MODE), (ARG0), (ARG1)) #define gen_rtx_EQ_ATTR(MODE, ARG0, ARG1) \ gen_rtx_fmt_ss (EQ_ATTR, (MODE), (ARG0), (ARG1)) #define gen_rtx_EQ_ATTR_ALT(MODE, ARG0, ARG1) \ gen_rtx_fmt_ii (EQ_ATTR_ALT, (MODE), (ARG0), (ARG1)) #define gen_rtx_ATTR_FLAG(MODE, ARG0) \ gen_rtx_fmt_s (ATTR_FLAG, (MODE), (ARG0)) #define gen_rtx_INSN(MODE, ARG0, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6, ARG7, ARG8) \ gen_rtx_fmt_iuuBieiee (INSN, (MODE), (ARG0), (ARG1), (ARG2), (ARG3), (ARG4), (ARG5), (ARG6), (ARG7), (ARG8)) #define gen_rtx_JUMP_INSN(MODE, ARG0, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6, ARG7, ARG8) \ gen_rtx_fmt_iuuBieiee0 (JUMP_INSN, (MODE), (ARG0), (ARG1), (ARG2), (ARG3), (ARG4), (ARG5), (ARG6), (ARG7), (ARG8)) #define gen_rtx_CALL_INSN(MODE, ARG0, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6, ARG7, ARG8, ARG9) \ gen_rtx_fmt_iuuBieieee (CALL_INSN, (MODE), (ARG0), (ARG1), (ARG2), (ARG3), (ARG4), (ARG5), (ARG6), (ARG7), (ARG8), (ARG9)) #define gen_rtx_BARRIER(MODE, ARG0, ARG1, ARG2) \ gen_rtx_fmt_iuu000000 (BARRIER, (MODE), (ARG0), (ARG1), (ARG2)) #define gen_rtx_CODE_LABEL(MODE, ARG0, ARG1, ARG2, ARG3, ARG4, ARG5) \ gen_rtx_fmt_iuuB00is (CODE_LABEL, (MODE), (ARG0), (ARG1), (ARG2), (ARG3), (ARG4), (ARG5)) #define gen_rtx_COND_EXEC(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (COND_EXEC, (MODE), (ARG0), (ARG1)) #define gen_rtx_PARALLEL(MODE, ARG0) \ gen_rtx_fmt_E (PARALLEL, (MODE), (ARG0)) #define gen_rtx_ASM_INPUT(MODE, ARG0) \ gen_rtx_fmt_s (ASM_INPUT, (MODE), (ARG0)) #define gen_rtx_ASM_OPERANDS(MODE, ARG0, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6) \ gen_rtx_fmt_ssiEEsi (ASM_OPERANDS, (MODE), (ARG0), (ARG1), (ARG2), (ARG3), (ARG4), (ARG5), (ARG6)) #define gen_rtx_UNSPEC(MODE, ARG0, ARG1) \ gen_rtx_fmt_Ei (UNSPEC, (MODE), (ARG0), (ARG1)) #define gen_rtx_UNSPEC_VOLATILE(MODE, ARG0, ARG1) \ gen_rtx_fmt_Ei (UNSPEC_VOLATILE, (MODE), (ARG0), (ARG1)) #define gen_rtx_ADDR_VEC(MODE, ARG0) \ gen_rtx_fmt_E (ADDR_VEC, (MODE), (ARG0)) #define gen_rtx_ADDR_DIFF_VEC(MODE, ARG0, ARG1, ARG2, ARG3) \ gen_rtx_fmt_eEee0 (ADDR_DIFF_VEC, (MODE), (ARG0), (ARG1), (ARG2), (ARG3)) #define gen_rtx_PREFETCH(MODE, ARG0, ARG1, ARG2) \ gen_rtx_fmt_eee (PREFETCH, (MODE), (ARG0), (ARG1), (ARG2)) #define gen_rtx_SET(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (SET, (MODE), (ARG0), (ARG1)) #define gen_rtx_USE(MODE, ARG0) \ gen_rtx_fmt_e (USE, (MODE), (ARG0)) #define gen_rtx_CLOBBER(MODE, ARG0) \ gen_rtx_fmt_e (CLOBBER, (MODE), (ARG0)) #define gen_rtx_CALL(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (CALL, (MODE), (ARG0), (ARG1)) #define gen_rtx_RETURN(MODE) \ gen_rtx_fmt_ (RETURN, (MODE)) #define gen_rtx_TRAP_IF(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (TRAP_IF, (MODE), (ARG0), (ARG1)) #define gen_rtx_RESX(MODE, ARG0) \ gen_rtx_fmt_i (RESX, (MODE), (ARG0)) #define gen_rtx_raw_CONST_INT(MODE, ARG0) \ gen_rtx_fmt_w (CONST_INT, (MODE), (ARG0)) #define gen_rtx_raw_CONST_VECTOR(MODE, ARG0) \ gen_rtx_fmt_E (CONST_VECTOR, (MODE), (ARG0)) #define gen_rtx_CONST_STRING(MODE, ARG0) \ gen_rtx_fmt_s (CONST_STRING, (MODE), (ARG0)) #define gen_rtx_CONST(MODE, ARG0) \ gen_rtx_fmt_e (CONST, (MODE), (ARG0)) #define gen_rtx_PC(MODE) \ gen_rtx_fmt_ (PC, (MODE)) #define gen_rtx_VALUE(MODE) \ gen_rtx_fmt_0 (VALUE, (MODE)) #define gen_rtx_raw_REG(MODE, ARG0) \ gen_rtx_fmt_i00 (REG, (MODE), (ARG0)) #define gen_rtx_SCRATCH(MODE) \ gen_rtx_fmt_0 (SCRATCH, (MODE)) #define gen_rtx_raw_SUBREG(MODE, ARG0, ARG1) \ gen_rtx_fmt_ei (SUBREG, (MODE), (ARG0), (ARG1)) #define gen_rtx_STRICT_LOW_PART(MODE, ARG0) \ gen_rtx_fmt_e (STRICT_LOW_PART, (MODE), (ARG0)) #define gen_rtx_CONCAT(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (CONCAT, (MODE), (ARG0), (ARG1)) #define gen_rtx_raw_MEM(MODE, ARG0) \ gen_rtx_fmt_e0 (MEM, (MODE), (ARG0)) #define gen_rtx_LABEL_REF(MODE, ARG0) \ gen_rtx_fmt_u00 (LABEL_REF, (MODE), (ARG0)) #define gen_rtx_SYMBOL_REF(MODE, ARG0) \ gen_rtx_fmt_s00 (SYMBOL_REF, (MODE), (ARG0)) #define gen_rtx_CC0(MODE) \ gen_rtx_fmt_ (CC0, (MODE)) #define gen_rtx_QUEUED(MODE, ARG0, ARG1, ARG2, ARG3, ARG4) \ gen_rtx_fmt_eeeee (QUEUED, (MODE), (ARG0), (ARG1), (ARG2), (ARG3), (ARG4)) #define gen_rtx_IF_THEN_ELSE(MODE, ARG0, ARG1, ARG2) \ gen_rtx_fmt_eee (IF_THEN_ELSE, (MODE), (ARG0), (ARG1), (ARG2)) #define gen_rtx_COND(MODE, ARG0, ARG1) \ gen_rtx_fmt_Ee (COND, (MODE), (ARG0), (ARG1)) #define gen_rtx_COMPARE(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (COMPARE, (MODE), (ARG0), (ARG1)) #define gen_rtx_PLUS(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (PLUS, (MODE), (ARG0), (ARG1)) #define gen_rtx_MINUS(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (MINUS, (MODE), (ARG0), (ARG1)) #define gen_rtx_NEG(MODE, ARG0) \ gen_rtx_fmt_e (NEG, (MODE), (ARG0)) #define gen_rtx_MULT(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (MULT, (MODE), (ARG0), (ARG1)) #define gen_rtx_DIV(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (DIV, (MODE), (ARG0), (ARG1)) #define gen_rtx_MOD(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (MOD, (MODE), (ARG0), (ARG1)) #define gen_rtx_UDIV(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (UDIV, (MODE), (ARG0), (ARG1)) #define gen_rtx_UMOD(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (UMOD, (MODE), (ARG0), (ARG1)) #define gen_rtx_AND(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (AND, (MODE), (ARG0), (ARG1)) #define gen_rtx_IOR(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (IOR, (MODE), (ARG0), (ARG1)) #define gen_rtx_XOR(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (XOR, (MODE), (ARG0), (ARG1)) #define gen_rtx_NOT(MODE, ARG0) \ gen_rtx_fmt_e (NOT, (MODE), (ARG0)) #define gen_rtx_ASHIFT(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (ASHIFT, (MODE), (ARG0), (ARG1)) #define gen_rtx_ROTATE(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (ROTATE, (MODE), (ARG0), (ARG1)) #define gen_rtx_ASHIFTRT(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (ASHIFTRT, (MODE), (ARG0), (ARG1)) #define gen_rtx_LSHIFTRT(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (LSHIFTRT, (MODE), (ARG0), (ARG1)) #define gen_rtx_ROTATERT(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (ROTATERT, (MODE), (ARG0), (ARG1)) #define gen_rtx_SMIN(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (SMIN, (MODE), (ARG0), (ARG1)) #define gen_rtx_SMAX(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (SMAX, (MODE), (ARG0), (ARG1)) #define gen_rtx_UMIN(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (UMIN, (MODE), (ARG0), (ARG1)) #define gen_rtx_UMAX(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (UMAX, (MODE), (ARG0), (ARG1)) #define gen_rtx_PRE_DEC(MODE, ARG0) \ gen_rtx_fmt_e (PRE_DEC, (MODE), (ARG0)) #define gen_rtx_PRE_INC(MODE, ARG0) \ gen_rtx_fmt_e (PRE_INC, (MODE), (ARG0)) #define gen_rtx_POST_DEC(MODE, ARG0) \ gen_rtx_fmt_e (POST_DEC, (MODE), (ARG0)) #define gen_rtx_POST_INC(MODE, ARG0) \ gen_rtx_fmt_e (POST_INC, (MODE), (ARG0)) #define gen_rtx_PRE_MODIFY(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (PRE_MODIFY, (MODE), (ARG0), (ARG1)) #define gen_rtx_POST_MODIFY(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (POST_MODIFY, (MODE), (ARG0), (ARG1)) #define gen_rtx_NE(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (NE, (MODE), (ARG0), (ARG1)) #define gen_rtx_EQ(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (EQ, (MODE), (ARG0), (ARG1)) #define gen_rtx_GE(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (GE, (MODE), (ARG0), (ARG1)) #define gen_rtx_GT(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (GT, (MODE), (ARG0), (ARG1)) #define gen_rtx_LE(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (LE, (MODE), (ARG0), (ARG1)) #define gen_rtx_LT(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (LT, (MODE), (ARG0), (ARG1)) #define gen_rtx_GEU(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (GEU, (MODE), (ARG0), (ARG1)) #define gen_rtx_GTU(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (GTU, (MODE), (ARG0), (ARG1)) #define gen_rtx_LEU(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (LEU, (MODE), (ARG0), (ARG1)) #define gen_rtx_LTU(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (LTU, (MODE), (ARG0), (ARG1)) #define gen_rtx_UNORDERED(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (UNORDERED, (MODE), (ARG0), (ARG1)) #define gen_rtx_ORDERED(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (ORDERED, (MODE), (ARG0), (ARG1)) #define gen_rtx_UNEQ(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (UNEQ, (MODE), (ARG0), (ARG1)) #define gen_rtx_UNGE(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (UNGE, (MODE), (ARG0), (ARG1)) #define gen_rtx_UNGT(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (UNGT, (MODE), (ARG0), (ARG1)) #define gen_rtx_UNLE(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (UNLE, (MODE), (ARG0), (ARG1)) #define gen_rtx_UNLT(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (UNLT, (MODE), (ARG0), (ARG1)) #define gen_rtx_LTGT(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (LTGT, (MODE), (ARG0), (ARG1)) #define gen_rtx_SIGN_EXTEND(MODE, ARG0) \ gen_rtx_fmt_e (SIGN_EXTEND, (MODE), (ARG0)) #define gen_rtx_ZERO_EXTEND(MODE, ARG0) \ gen_rtx_fmt_e (ZERO_EXTEND, (MODE), (ARG0)) #define gen_rtx_TRUNCATE(MODE, ARG0) \ gen_rtx_fmt_e (TRUNCATE, (MODE), (ARG0)) #define gen_rtx_FLOAT_EXTEND(MODE, ARG0) \ gen_rtx_fmt_e (FLOAT_EXTEND, (MODE), (ARG0)) #define gen_rtx_FLOAT_TRUNCATE(MODE, ARG0) \ gen_rtx_fmt_e (FLOAT_TRUNCATE, (MODE), (ARG0)) #define gen_rtx_FLOAT(MODE, ARG0) \ gen_rtx_fmt_e (FLOAT, (MODE), (ARG0)) #define gen_rtx_FIX(MODE, ARG0) \ gen_rtx_fmt_e (FIX, (MODE), (ARG0)) #define gen_rtx_UNSIGNED_FLOAT(MODE, ARG0) \ gen_rtx_fmt_e (UNSIGNED_FLOAT, (MODE), (ARG0)) #define gen_rtx_UNSIGNED_FIX(MODE, ARG0) \ gen_rtx_fmt_e (UNSIGNED_FIX, (MODE), (ARG0)) #define gen_rtx_ABS(MODE, ARG0) \ gen_rtx_fmt_e (ABS, (MODE), (ARG0)) #define gen_rtx_SQRT(MODE, ARG0) \ gen_rtx_fmt_e (SQRT, (MODE), (ARG0)) #define gen_rtx_FFS(MODE, ARG0) \ gen_rtx_fmt_e (FFS, (MODE), (ARG0)) #define gen_rtx_CLZ(MODE, ARG0) \ gen_rtx_fmt_e (CLZ, (MODE), (ARG0)) #define gen_rtx_CTZ(MODE, ARG0) \ gen_rtx_fmt_e (CTZ, (MODE), (ARG0)) #define gen_rtx_POPCOUNT(MODE, ARG0) \ gen_rtx_fmt_e (POPCOUNT, (MODE), (ARG0)) #define gen_rtx_PARITY(MODE, ARG0) \ gen_rtx_fmt_e (PARITY, (MODE), (ARG0)) #define gen_rtx_SIGN_EXTRACT(MODE, ARG0, ARG1, ARG2) \ gen_rtx_fmt_eee (SIGN_EXTRACT, (MODE), (ARG0), (ARG1), (ARG2)) #define gen_rtx_ZERO_EXTRACT(MODE, ARG0, ARG1, ARG2) \ gen_rtx_fmt_eee (ZERO_EXTRACT, (MODE), (ARG0), (ARG1), (ARG2)) #define gen_rtx_HIGH(MODE, ARG0) \ gen_rtx_fmt_e (HIGH, (MODE), (ARG0)) #define gen_rtx_LO_SUM(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (LO_SUM, (MODE), (ARG0), (ARG1)) #define gen_rtx_RANGE_INFO(MODE, ARG0, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6, ARG7, ARG8, ARG9, ARG10, ARG11, ARG12) \ gen_rtx_fmt_uuEiiiiiibbii (RANGE_INFO, (MODE), (ARG0), (ARG1), (ARG2), (ARG3), (ARG4), (ARG5), (ARG6), (ARG7), (ARG8), (ARG9), (ARG10), (ARG11), (ARG12)) #define gen_rtx_RANGE_REG(MODE, ARG0, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6, ARG7, ARG8, ARG9) \ gen_rtx_fmt_iiiiiiiitt (RANGE_REG, (MODE), (ARG0), (ARG1), (ARG2), (ARG3), (ARG4), (ARG5), (ARG6), (ARG7), (ARG8), (ARG9)) #define gen_rtx_RANGE_VAR(MODE, ARG0, ARG1, ARG2) \ gen_rtx_fmt_eti (RANGE_VAR, (MODE), (ARG0), (ARG1), (ARG2)) #define gen_rtx_RANGE_LIVE(MODE, ARG0, ARG1) \ gen_rtx_fmt_bi (RANGE_LIVE, (MODE), (ARG0), (ARG1)) #define gen_rtx_VEC_MERGE(MODE, ARG0, ARG1, ARG2) \ gen_rtx_fmt_eee (VEC_MERGE, (MODE), (ARG0), (ARG1), (ARG2)) #define gen_rtx_VEC_SELECT(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (VEC_SELECT, (MODE), (ARG0), (ARG1)) #define gen_rtx_VEC_CONCAT(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (VEC_CONCAT, (MODE), (ARG0), (ARG1)) #define gen_rtx_VEC_DUPLICATE(MODE, ARG0) \ gen_rtx_fmt_e (VEC_DUPLICATE, (MODE), (ARG0)) #define gen_rtx_SS_PLUS(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (SS_PLUS, (MODE), (ARG0), (ARG1)) #define gen_rtx_US_PLUS(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (US_PLUS, (MODE), (ARG0), (ARG1)) #define gen_rtx_SS_MINUS(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (SS_MINUS, (MODE), (ARG0), (ARG1)) #define gen_rtx_US_MINUS(MODE, ARG0, ARG1) \ gen_rtx_fmt_ee (US_MINUS, (MODE), (ARG0), (ARG1)) #define gen_rtx_SS_TRUNCATE(MODE, ARG0) \ gen_rtx_fmt_e (SS_TRUNCATE, (MODE), (ARG0)) #define gen_rtx_US_TRUNCATE(MODE, ARG0) \ gen_rtx_fmt_e (US_TRUNCATE, (MODE), (ARG0)) #define gen_rtx_VAR_LOCATION(MODE, ARG0, ARG1) \ gen_rtx_fmt_te (VAR_LOCATION, (MODE), (ARG0), (ARG1)) #endif /* GCC_GENRTL_H */ #ifndef USE_MAPPED_LOCATION #undef gen_rtx_ASM_OPERANDS #define gen_rtx_ASM_OPERANDS(MODE, ARG0, ARG1, ARG2, ARG3, ARG4, LOC) \ gen_rtx_fmt_ssiEEsi (ASM_OPERANDS, (MODE), (ARG0), (ARG1), (ARG2), (ARG3), (ARG4), (LOC).file, (LOC).line) #endif #endif /* There are some RTL codes that require special attention; the generation functions included above do the raw handling. If you add to this list, modify special_rtx in gengenrtl.c as well. */ extern rtx gen_rtx_CONST_INT (enum machine_mode, HOST_WIDE_INT); extern rtx gen_rtx_CONST_VECTOR (enum machine_mode, rtvec); extern rtx gen_raw_REG (enum machine_mode, int); extern rtx gen_rtx_REG (enum machine_mode, unsigned); extern rtx gen_rtx_SUBREG (enum machine_mode, rtx, int); extern rtx gen_rtx_MEM (enum machine_mode, rtx); /* We need the cast here to ensure that we get the same result both with and without prototypes. */ #define GEN_INT(N) gen_rtx_CONST_INT (VOIDmode, (HOST_WIDE_INT) (N)) /* Virtual registers are used during RTL generation to refer to locations into the stack frame when the actual location isn't known until RTL generation is complete. The routine instantiate_virtual_regs replaces these with the proper value, which is normally {frame,arg,stack}_pointer_rtx plus a constant. */ #define FIRST_VIRTUAL_REGISTER (FIRST_PSEUDO_REGISTER) /* This points to the first word of the incoming arguments passed on the stack, either by the caller or by the callee when pretending it was passed by the caller. */ #define virtual_incoming_args_rtx (global_rtl[GR_VIRTUAL_INCOMING_ARGS]) #define VIRTUAL_INCOMING_ARGS_REGNUM (FIRST_VIRTUAL_REGISTER) /* If FRAME_GROWS_DOWNWARD, this points to immediately above the first variable on the stack. Otherwise, it points to the first variable on the stack. */ #define virtual_stack_vars_rtx (global_rtl[GR_VIRTUAL_STACK_ARGS]) #define VIRTUAL_STACK_VARS_REGNUM ((FIRST_VIRTUAL_REGISTER) + 1) /* This points to the location of dynamically-allocated memory on the stack immediately after the stack pointer has been adjusted by the amount desired. */ #define virtual_stack_dynamic_rtx (global_rtl[GR_VIRTUAL_STACK_DYNAMIC]) #define VIRTUAL_STACK_DYNAMIC_REGNUM ((FIRST_VIRTUAL_REGISTER) + 2) /* This points to the location in the stack at which outgoing arguments should be written when the stack is pre-pushed (arguments pushed using push insns always use sp). */ #define virtual_outgoing_args_rtx (global_rtl[GR_VIRTUAL_OUTGOING_ARGS]) #define VIRTUAL_OUTGOING_ARGS_REGNUM ((FIRST_VIRTUAL_REGISTER) + 3) /* This points to the Canonical Frame Address of the function. This should correspond to the CFA produced by INCOMING_FRAME_SP_OFFSET, but is calculated relative to the arg pointer for simplicity; the frame pointer nor stack pointer are necessarily fixed relative to the CFA until after reload. */ #define virtual_cfa_rtx (global_rtl[GR_VIRTUAL_CFA]) #define VIRTUAL_CFA_REGNUM ((FIRST_VIRTUAL_REGISTER) + 4) #define LAST_VIRTUAL_REGISTER ((FIRST_VIRTUAL_REGISTER) + 4) /* Nonzero if REGNUM is a pointer into the stack frame. */ #define REGNO_PTR_FRAME_P(REGNUM) \ ((REGNUM) == STACK_POINTER_REGNUM \ || (REGNUM) == FRAME_POINTER_REGNUM \ || (REGNUM) == HARD_FRAME_POINTER_REGNUM \ || (REGNUM) == ARG_POINTER_REGNUM \ || ((REGNUM) >= FIRST_VIRTUAL_REGISTER \ && (REGNUM) <= LAST_VIRTUAL_REGISTER)) /* REGNUM never really appearing in the INSN stream. */ #define INVALID_REGNUM (~(unsigned int) 0) extern rtx output_constant_def (tree, int); extern rtx lookup_constant_def (tree); /* Nonzero after the second flow pass has completed. Set to 1 or 0 by toplev.c */ extern int flow2_completed; /* Nonzero after end of reload pass. Set to 1 or 0 by reload1.c. */ extern int reload_completed; /* Nonzero after thread_prologue_and_epilogue_insns has run. */ extern int epilogue_completed; /* Set to 1 while reload_as_needed is operating. Required by some machines to handle any generated moves differently. */ extern int reload_in_progress; /* If this is nonzero, we do not bother generating VOLATILE around volatile memory references, and we are willing to output indirect addresses. If cse is to follow, we reject indirect addresses so a useful potential cse is generated; if it is used only once, instruction combination will produce the same indirect address eventually. */ extern int cse_not_expected; /* Set to nonzero before life analysis to indicate that it is unsafe to generate any new pseudo registers. */ extern int no_new_pseudos; /* Translates rtx code to tree code, for those codes needed by REAL_ARITHMETIC. The function returns an int because the caller may not know what `enum tree_code' means. */ extern int rtx_to_tree_code (enum rtx_code); /* In cse.c */ extern int delete_trivially_dead_insns (rtx, int); #ifdef BUFSIZ extern int cse_main (rtx, int, int, FILE *); #endif extern void cse_condition_code_reg (void); /* In jump.c */ extern int comparison_dominates_p (enum rtx_code, enum rtx_code); extern int condjump_p (rtx); extern int any_condjump_p (rtx); extern int any_uncondjump_p (rtx); extern int safe_to_remove_jump_p (rtx); extern rtx pc_set (rtx); extern rtx condjump_label (rtx); extern int simplejump_p (rtx); extern int returnjump_p (rtx); extern int onlyjump_p (rtx); extern int only_sets_cc0_p (rtx); extern int sets_cc0_p (rtx); extern int invert_jump_1 (rtx, rtx); extern int invert_jump (rtx, rtx, int); extern int rtx_renumbered_equal_p (rtx, rtx); extern int true_regnum (rtx); extern unsigned int reg_or_subregno (rtx); extern int redirect_jump_1 (rtx, rtx); extern int redirect_jump (rtx, rtx, int); extern void rebuild_jump_labels (rtx); extern enum rtx_code reversed_comparison_code (rtx, rtx); extern enum rtx_code reversed_comparison_code_parts (enum rtx_code, rtx, rtx, rtx); extern void delete_for_peephole (rtx, rtx); extern int condjump_in_parallel_p (rtx); extern void purge_line_number_notes (rtx); /* In emit-rtl.c. */ extern int max_reg_num (void); extern int max_label_num (void); extern int get_first_label_num (void); extern void maybe_set_first_label_num (rtx); extern void delete_insns_since (rtx); extern void mark_reg_pointer (rtx, int); extern void mark_user_reg (rtx); extern void reset_used_flags (rtx); extern void set_used_flags (rtx); extern void reorder_insns (rtx, rtx, rtx); extern void reorder_insns_nobb (rtx, rtx, rtx); extern int get_max_uid (void); extern int in_sequence_p (void); extern void force_next_line_note (void); extern void init_emit (void); extern void init_emit_once (int); extern void push_topmost_sequence (void); extern void pop_topmost_sequence (void); extern void reverse_comparison (rtx); extern void set_new_first_and_last_insn (rtx, rtx); extern void set_new_last_label_num (int); extern void unshare_all_rtl (void); extern void unshare_all_rtl_again (rtx); extern void unshare_all_rtl_in_chain (rtx); extern void verify_rtl_sharing (void); extern void set_first_insn (rtx); extern void set_last_insn (rtx); extern void link_cc0_insns (rtx); extern void add_insn (rtx); extern void add_insn_before (rtx, rtx); extern void add_insn_after (rtx, rtx); extern void remove_insn (rtx); extern void emit_insn_after_with_line_notes (rtx, rtx, rtx); extern enum rtx_code classify_insn (rtx); extern rtx emit (rtx); /* Query and clear/ restore no_line_numbers. This is used by the switch / case handling in stmt.c to give proper line numbers in warnings about unreachable code. */ int force_line_numbers (void); void restore_line_number_status (int old_value); extern void renumber_insns (FILE *); extern void remove_unnecessary_notes (void); extern rtx delete_insn (rtx); extern rtx entry_of_function (void); extern void delete_insn_chain (rtx, rtx); extern rtx unlink_insn_chain (rtx, rtx); extern rtx delete_insn_and_edges (rtx); extern void delete_insn_chain_and_edges (rtx, rtx); extern rtx gen_lowpart_SUBREG (enum machine_mode, rtx); /* In combine.c */ extern int combine_instructions (rtx, unsigned int); extern unsigned int extended_count (rtx, enum machine_mode, int); extern rtx remove_death (unsigned int, rtx); #ifdef BUFSIZ extern void dump_combine_stats (FILE *); extern void dump_combine_total_stats (FILE *); #endif /* In web.c */ extern void web_main (void); /* In sched-rgn.c. */ #ifdef BUFSIZ extern void schedule_insns (FILE *); #endif /* In sched-ebb.c. */ #ifdef BUFSIZ extern void schedule_ebbs (FILE *); #endif /* In haifa-sched.c. */ extern void fix_sched_param (const char *, const char *); /* In print-rtl.c */ extern const char *print_rtx_head; extern void debug_rtx (rtx); extern void debug_rtx_list (rtx, int); extern void debug_rtx_range (rtx, rtx); extern rtx debug_rtx_find (rtx, int); #ifdef BUFSIZ extern void print_mem_expr (FILE *, tree); extern void print_rtl (FILE *, rtx); extern void print_simple_rtl (FILE *, rtx); extern int print_rtl_single (FILE *, rtx); extern void print_inline_rtx (FILE *, rtx, int); #endif /* In loop.c */ extern void init_loop (void); #ifdef BUFSIZ extern void loop_optimize (rtx, FILE *, int); #endif extern void branch_target_load_optimize (bool); /* In function.c */ extern void reposition_prologue_and_epilogue_notes (rtx); extern void thread_prologue_and_epilogue_insns (rtx); extern int prologue_epilogue_contains (rtx); extern int sibcall_epilogue_contains (rtx); extern void mark_temp_addr_taken (rtx); extern void update_temp_slot_address (rtx, rtx); extern void purge_hard_subreg_sets (rtx); /* In stmt.c */ extern void set_file_and_line_for_stmt (location_t); extern void expand_null_return (void); extern void expand_naked_return (void); extern void emit_jump (rtx); extern int preserve_subexpressions_p (void); /* In expr.c */ extern rtx move_by_pieces (rtx, rtx, unsigned HOST_WIDE_INT, unsigned int, int); /* In flow.c */ extern void recompute_reg_usage (rtx, int); extern int initialize_uninitialized_subregs (void); extern void delete_dead_jumptables (void); #ifdef BUFSIZ extern void print_rtl_with_bb (FILE *, rtx); extern void dump_flow_info (FILE *); #endif /* In expmed.c */ extern void init_expmed (void); extern void expand_inc (rtx, rtx); extern void expand_dec (rtx, rtx); extern rtx expand_mult_highpart (enum machine_mode, rtx, unsigned HOST_WIDE_INT, rtx, int, int); /* In gcse.c */ extern bool can_copy_p (enum machine_mode); extern rtx fis_get_condition (rtx); #ifdef BUFSIZ extern int gcse_main (rtx, FILE *); extern int bypass_jumps (FILE *); extern void gcse_after_reload_main (rtx, FILE *); #endif /* In global.c */ extern void mark_elimination (int, int); #ifdef BUFSIZ extern int global_alloc (FILE *); extern void dump_global_regs (FILE *); #endif #ifdef HARD_CONST /* Yes, this ifdef is silly, but HARD_REG_SET is not always defined. */ extern void retry_global_alloc (int, HARD_REG_SET); #endif extern void build_insn_chain (rtx); /* In regclass.c */ extern int reg_classes_intersect_p (enum reg_class, enum reg_class); extern int reg_class_subset_p (enum reg_class, enum reg_class); extern void globalize_reg (int); extern void init_reg_modes_once (void); extern void init_regs (void); extern void init_fake_stack_mems (void); extern void init_reg_sets (void); extern void regset_release_memory (void); extern void regclass_init (void); extern void regclass (rtx, int, FILE *); extern void reg_scan (rtx, unsigned int, int); extern void reg_scan_update (rtx, rtx, unsigned int); extern void fix_register (const char *, int, int); #ifdef HARD_CONST extern void cannot_change_mode_set_regs (HARD_REG_SET *, enum machine_mode, unsigned int); #endif extern bool invalid_mode_change_p (unsigned int, enum reg_class, enum machine_mode); /* In regmove.c */ #ifdef BUFSIZ extern void regmove_optimize (rtx, int, FILE *); #endif extern void combine_stack_adjustments (void); /* In reorg.c */ #ifdef BUFSIZ extern void dbr_schedule (rtx, FILE *); #endif /* In local-alloc.c */ #ifdef BUFSIZ extern void dump_local_alloc (FILE *); #endif extern int local_alloc (void); /* In reg-stack.c */ #ifdef BUFSIZ extern bool reg_to_stack (FILE *); #endif /* In calls.c */ enum libcall_type { LCT_NORMAL = 0, LCT_CONST = 1, LCT_PURE = 2, LCT_CONST_MAKE_BLOCK = 3, LCT_PURE_MAKE_BLOCK = 4, LCT_NORETURN = 5, LCT_THROW = 6, LCT_ALWAYS_RETURN = 7, LCT_RETURNS_TWICE = 8 }; extern void emit_library_call (rtx, enum libcall_type, enum machine_mode, int, ...); extern rtx emit_library_call_value (rtx, rtx, enum libcall_type, enum machine_mode, int, ...); /* In unroll.c */ extern int set_dominates_use (int, int, int, rtx, rtx); /* In varasm.c */ extern int in_data_section (void); extern void init_varasm_once (void); /* In rtl.c */ extern void init_rtl (void); extern void traverse_md_constants (int (*) (void **, void *), void *); struct md_constant { char *name, *value; }; #ifdef BUFSIZ extern int read_skip_spaces (FILE *); extern rtx read_rtx (FILE *); #endif extern const char *read_rtx_filename; extern int read_rtx_lineno; /* Redefine abort to report an internal error w/o coredump, and reporting the location of the error in the source file. This logic is duplicated in rtl.h and tree.h because every file that needs the special abort includes one or both. toplev.h gets too few files, system.h gets too many. */ extern void fancy_abort (const char *, int, const char *) ATTRIBUTE_NORETURN; #define abort() fancy_abort (__FILE__, __LINE__, __FUNCTION__) /* In alias.c */ extern void clear_reg_alias_info (rtx); extern rtx canon_rtx (rtx); extern int true_dependence (rtx, enum machine_mode, rtx, int (*)(rtx, int)); extern rtx get_addr (rtx); extern int canon_true_dependence (rtx, enum machine_mode, rtx, rtx, int (*)(rtx, int)); extern int read_dependence (rtx, rtx); extern int anti_dependence (rtx, rtx); extern int output_dependence (rtx, rtx); extern int unchanging_anti_dependence (rtx, rtx); extern void mark_constant_function (void); extern void init_alias_once (void); extern void init_alias_analysis (void); extern void end_alias_analysis (void); extern rtx addr_side_effect_eval (rtx, int, int); extern bool memory_modified_in_insn_p (rtx, rtx); extern rtx find_base_term (rtx); extern rtx gen_hard_reg_clobber (enum machine_mode, unsigned int); extern rtx get_reg_known_value (unsigned int); extern bool get_reg_known_equiv_p (unsigned int); #ifdef STACK_REGS extern int stack_regs_mentioned (rtx insn); #endif /* In toplev.c */ extern GTY(()) rtx stack_limit_rtx; /* In regrename.c */ extern void regrename_optimize (void); extern void copyprop_hardreg_forward (void); /* In ifcvt.c */ extern void if_convert (int); /* In predict.c */ extern void invert_br_probabilities (rtx); extern bool expensive_function_p (int); /* In tracer.c */ extern void tracer (void); /* In var-tracking.c */ extern void variable_tracking_main (void); /* In stor-layout.c. */ extern void get_mode_bounds (enum machine_mode, int, enum machine_mode, rtx *, rtx *); /* In loop-unswitch.c */ extern rtx reversed_condition (rtx); extern rtx compare_and_jump_seq (rtx, rtx, enum rtx_code, rtx, int, rtx); /* In loop-iv.c */ extern rtx canon_condition (rtx); extern void simplify_using_condition (rtx, rtx *, struct bitmap_head_def *); /* In ra.c. */ extern void reg_alloc (void); /* In modulo-sched.c. */ #ifdef BUFSIZ extern void sms_schedule (FILE *); #endif struct rtl_hooks { rtx (*gen_lowpart) (enum machine_mode, rtx); rtx (*reg_nonzero_bits) (rtx, enum machine_mode, rtx, enum machine_mode, unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT *); rtx (*reg_num_sign_bit_copies) (rtx, enum machine_mode, rtx, enum machine_mode, unsigned int, unsigned int *); /* Whenever you add entries here, make sure you adjust hosthooks-def.h. */ }; /* Each pass can provide its own. */ extern struct rtl_hooks rtl_hooks; /* ... but then it has to restore these. */ extern const struct rtl_hooks general_rtl_hooks; /* Keep this for the nonce. */ #define gen_lowpart rtl_hooks.gen_lowpart #endif /* ! GCC_RTL_H */ /* Front-end tree definitions for GNU compiler. Copyright (C) 1989, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_TREE_H #define GCC_TREE_H /* Codes of tree nodes */ #define DEFTREECODE(SYM, STRING, TYPE, NARGS) SYM, enum tree_code { /* This file contains the definitions and documentation for the tree codes used in GCC. Copyright (C) 1987, 1988, 1993, 1995, 1997, 1998, 2000, 2001, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* The third argument can be: 'x' for an exceptional code (fits no category). 't' for a type object code. 'c' for codes for constants. 'd' for codes for declarations (also serving as variable refs). 'r' for codes for references to storage. '<' for codes for comparison expressions. '1' for codes for unary arithmetic expressions. '2' for codes for binary arithmetic expressions. 's' for codes for "statement" expressions, which have side-effects, but usually no interesting value. 'e' for codes for other kinds of expressions. */ /* For `r', `e', `<', `1', `2', and `s' nodes, which use struct tree_exp, the 4th element is the number of argument slots to allocate. This determines the size of the tree node object. Other nodes use different structures, and the size is determined by the tree_union member structure; the 4th element should be zero. Languages that define language-specific 'x' or 'c' codes must define the tree_size langhook to say how big they are. */ /* Any erroneous construct is parsed into a node of this type. This type of node is accepted without complaint in all contexts by later parsing activities, to avoid multiple error messages for one error. No fields in these nodes are used except the TREE_CODE. */ DEFTREECODE (ERROR_MARK, "error_mark", 'x', 0) /* Used to represent a name (such as, in the DECL_NAME of a decl node). Internally it looks like a STRING_CST node. There is only one IDENTIFIER_NODE ever made for any particular name. Use `get_identifier' to get it (or create it, the first time). */ DEFTREECODE (IDENTIFIER_NODE, "identifier_node", 'x', 0) /* Has the TREE_VALUE and TREE_PURPOSE fields. */ /* These nodes are made into lists by chaining through the TREE_CHAIN field. The elements of the list live in the TREE_VALUE fields, while TREE_PURPOSE fields are occasionally used as well to get the effect of Lisp association lists. */ DEFTREECODE (TREE_LIST, "tree_list", 'x', 0) /* These nodes contain an array of tree nodes. */ DEFTREECODE (TREE_VEC, "tree_vec", 'x', 0) /* A symbol binding block. These are arranged in a tree, where the BLOCK_SUBBLOCKS field contains a chain of subblocks chained through the BLOCK_CHAIN field. BLOCK_SUPERCONTEXT points to the parent block. For a block which represents the outermost scope of a function, it points to the FUNCTION_DECL node. BLOCK_VARS points to a chain of decl nodes. BLOCK_TYPE_TAGS points to a chain of types which have their own names. BLOCK_CHAIN points to the next BLOCK at the same level. BLOCK_ABSTRACT_ORIGIN points to the original (abstract) tree node which this block is an instance of, or else is NULL to indicate that this block is not an instance of anything else. When non-NULL, the value could either point to another BLOCK node or it could point to a FUNCTION_DECL node (e.g. in the case of a block representing the outermost scope of a particular inlining of a function). BLOCK_ABSTRACT is nonzero if the block represents an abstract instance of a block (i.e. one which is nested within an abstract instance of an inline function). TREE_ASM_WRITTEN is nonzero if the block was actually referenced in the generated assembly. */ DEFTREECODE (BLOCK, "block", 'x', 0) /* Each data type is represented by a tree node whose code is one of the following: */ /* Each node that represents a data type has a component TYPE_SIZE containing a tree that is an expression for the size in bits. The TYPE_MODE contains the machine mode for values of this type. The TYPE_POINTER_TO field contains a type for a pointer to this type, or zero if no such has been created yet. The TYPE_NEXT_VARIANT field is used to chain together types that are variants made by type modifiers such as "const" and "volatile". The TYPE_MAIN_VARIANT field, in any member of such a chain, points to the start of the chain. The TYPE_NONCOPIED_PARTS field is a list specifying which parts of an object of this type should *not* be copied by assignment. The TREE_VALUE of each is a FIELD_DECL that should not be copied. The TREE_PURPOSE is an initial value for that field when an object of this type is initialized via an INIT_EXPR. It may be NULL if no special value is required. Even the things in this list are copied if the right-hand side of an assignment is known to be a complete object (rather than being, perhaps, a subobject of some other object.) The determination of what constitutes a complete object is done by fixed_type_p. The TYPE_NAME field contains info on the name used in the program for this type (for GDB symbol table output). It is either a TYPE_DECL node, for types that are typedefs, or an IDENTIFIER_NODE in the case of structs, unions or enums that are known with a tag, or zero for types that have no special name. The TYPE_CONTEXT for any sort of type which could have a name or which could have named members (e.g. tagged types in C/C++) will point to the node which represents the scope of the given type, or will be NULL_TREE if the type has "file scope". For most types, this will point to a BLOCK node or a FUNCTION_DECL node, but it could also point to a FUNCTION_TYPE node (for types whose scope is limited to the formal parameter list of some function type specification) or it could point to a RECORD_TYPE, UNION_TYPE or QUAL_UNION_TYPE node (for C++ "member" types). For non-tagged-types, TYPE_CONTEXT need not be set to anything in particular, since any type which is of some type category (e.g. an array type or a function type) which cannot either have a name itself or have named members doesn't really have a "scope" per se. The TREE_CHAIN field is used as a forward-references to names for ENUMERAL_TYPE, RECORD_TYPE, UNION_TYPE, and QUAL_UNION_TYPE nodes; see below. */ DEFTREECODE (VOID_TYPE, "void_type", 't', 0) /* The void type in C */ /* Integer types in all languages, including char in C. Also used for sub-ranges of other discrete types. Has components TYPE_MIN_VALUE, TYPE_MAX_VALUE (expressions, inclusive) and TYPE_PRECISION (number of bits used by this type). In the case of a subrange type in Pascal, the TREE_TYPE of this will point at the supertype (another INTEGER_TYPE, or an ENUMERAL_TYPE, CHAR_TYPE, or BOOLEAN_TYPE). Otherwise, the TREE_TYPE is zero. */ DEFTREECODE (INTEGER_TYPE, "integer_type", 't', 0) /* C's float and double. Different floating types are distinguished by machine mode and by the TYPE_SIZE and the TYPE_PRECISION. */ DEFTREECODE (REAL_TYPE, "real_type", 't', 0) /* Complex number types. The TREE_TYPE field is the data type of the real and imaginary parts. */ DEFTREECODE (COMPLEX_TYPE, "complex_type", 't', 0) /* Vector types. The TREE_TYPE field is the data type of the vector elements. */ DEFTREECODE (VECTOR_TYPE, "vector_type", 't', 0) /* C enums. The type node looks just like an INTEGER_TYPE node. The symbols for the values of the enum type are defined by CONST_DECL nodes, but the type does not point to them; however, the TYPE_VALUES is a list in which each element's TREE_PURPOSE is a name and the TREE_VALUE is the value (an INTEGER_CST node). */ /* A forward reference `enum foo' when no enum named foo is defined yet has zero (a null pointer) in its TYPE_SIZE. The tag name is in the TYPE_NAME field. If the type is later defined, the normal fields are filled in. RECORD_TYPE, UNION_TYPE, and QUAL_UNION_TYPE forward refs are treated similarly. */ DEFTREECODE (ENUMERAL_TYPE, "enumeral_type", 't', 0) /* Pascal's boolean type (true or false are the only values); no special fields needed. */ DEFTREECODE (BOOLEAN_TYPE, "boolean_type", 't', 0) /* CHAR in Pascal; not used in C. No special fields needed. */ DEFTREECODE (CHAR_TYPE, "char_type", 't', 0) /* All pointer-to-x types have code POINTER_TYPE. The TREE_TYPE points to the node for the type pointed to. */ DEFTREECODE (POINTER_TYPE, "pointer_type", 't', 0) /* An offset is a pointer relative to an object. The TREE_TYPE field is the type of the object at the offset. The TYPE_OFFSET_BASETYPE points to the node for the type of object that the offset is relative to. */ DEFTREECODE (OFFSET_TYPE, "offset_type", 't', 0) /* A reference is like a pointer except that it is coerced automatically to the value it points to. Used in C++. */ DEFTREECODE (REFERENCE_TYPE, "reference_type", 't', 0) /* METHOD_TYPE is the type of a function which takes an extra first argument for "self", which is not present in the declared argument list. The TREE_TYPE is the return type of the method. The TYPE_METHOD_BASETYPE is the type of "self". TYPE_ARG_TYPES is the real argument list, which includes the hidden argument for "self". */ DEFTREECODE (METHOD_TYPE, "method_type", 't', 0) /* Used for Pascal; details not determined right now. */ DEFTREECODE (FILE_TYPE, "file_type", 't', 0) /* Types of arrays. Special fields: TREE_TYPE Type of an array element. TYPE_DOMAIN Type to index by. Its range of values specifies the array length. The field TYPE_POINTER_TO (TREE_TYPE (array_type)) is always nonzero and holds the type to coerce a value of that array type to in C. TYPE_STRING_FLAG indicates a string (in contrast to an array of chars) in languages (such as Chill) that make a distinction. */ /* Array types in C or Pascal */ DEFTREECODE (ARRAY_TYPE, "array_type", 't', 0) /* Types of sets for Pascal. Special fields are the same as in an array type. The target type is always a boolean type. Used for both bitstrings and powersets in Chill; TYPE_STRING_FLAG indicates a bitstring. */ DEFTREECODE (SET_TYPE, "set_type", 't', 0) /* Struct in C, or record in Pascal. */ /* Special fields: TYPE_FIELDS chain of FIELD_DECLs for the fields of the struct, and VAR_DECLs, TYPE_DECLs and CONST_DECLs for record-scope variables, types and enumerators. A few may need to be added for Pascal. */ /* See the comment above, before ENUMERAL_TYPE, for how forward references to struct tags are handled in C. */ DEFTREECODE (RECORD_TYPE, "record_type", 't', 0) /* Union in C. Like a struct, except that the offsets of the fields will all be zero. */ /* See the comment above, before ENUMERAL_TYPE, for how forward references to union tags are handled in C. */ DEFTREECODE (UNION_TYPE, "union_type", 't', 0) /* C union type */ /* Similar to UNION_TYPE, except that the expressions in DECL_QUALIFIER in each FIELD_DECL determine what the union contains. The first field whose DECL_QUALIFIER expression is true is deemed to occupy the union. */ DEFTREECODE (QUAL_UNION_TYPE, "qual_union_type", 't', 0) /* Type of functions. Special fields: TREE_TYPE type of value returned. TYPE_ARG_TYPES list of types of arguments expected. this list is made of TREE_LIST nodes. Types of "Procedures" in languages where they are different from functions have code FUNCTION_TYPE also, but then TREE_TYPE is zero or void type. */ DEFTREECODE (FUNCTION_TYPE, "function_type", 't', 0) /* This is a language-specific kind of type. Its meaning is defined by the language front end. layout_type does not know how to lay this out, so the front-end must do so manually. */ DEFTREECODE (LANG_TYPE, "lang_type", 't', 0) /* Expressions */ /* First, the constants. */ /* Contents are in TREE_INT_CST_LOW and TREE_INT_CST_HIGH fields, 32 bits each, giving us a 64 bit constant capability. Note: constants of type char in Pascal are INTEGER_CST, and so are pointer constants such as nil in Pascal or NULL in C. `(int *) 1' in C also results in an INTEGER_CST. */ DEFTREECODE (INTEGER_CST, "integer_cst", 'c', 0) /* Contents are in TREE_REAL_CST field. */ DEFTREECODE (REAL_CST, "real_cst", 'c', 0) /* Contents are in TREE_REALPART and TREE_IMAGPART fields, whose contents are other constant nodes. */ DEFTREECODE (COMPLEX_CST, "complex_cst", 'c', 0) /* Contents are in TREE_VECTOR_CST_ELTS field. */ DEFTREECODE (VECTOR_CST, "vector_cst", 'c', 0) /* Contents are TREE_STRING_LENGTH and TREE_STRING_POINTER fields. */ DEFTREECODE (STRING_CST, "string_cst", 'c', 0) /* Declarations. All references to names are represented as ..._DECL nodes. The decls in one binding context are chained through the TREE_CHAIN field. Each DECL has a DECL_NAME field which contains an IDENTIFIER_NODE. (Some decls, most often labels, may have zero as the DECL_NAME). DECL_CONTEXT points to the node representing the context in which this declaration has its scope. For FIELD_DECLs, this is the RECORD_TYPE, UNION_TYPE, or QUAL_UNION_TYPE node that the field is a member of. For VAR_DECL, PARM_DECL, FUNCTION_DECL, LABEL_DECL, and CONST_DECL nodes, this points to either the FUNCTION_DECL for the containing function, the RECORD_TYPE or UNION_TYPE for the containing type, or NULL_TREE or a TRANSLATION_UNIT_DECL if the given decl has "file scope". DECL_ABSTRACT_ORIGIN, if non-NULL, points to the original (abstract) ..._DECL node of which this decl is an (inlined or template expanded) instance. The TREE_TYPE field holds the data type of the object, when relevant. LABEL_DECLs have no data type. For TYPE_DECL, the TREE_TYPE field contents are the type whose name is being declared. The DECL_ALIGN, DECL_SIZE, and DECL_MODE fields exist in decl nodes just as in type nodes. They are unused in LABEL_DECL, TYPE_DECL and CONST_DECL nodes. DECL_FIELD_BIT_OFFSET holds an integer number of bits offset for the location. DECL_VOFFSET holds an expression for a variable offset; it is to be multiplied by DECL_VOFFSET_UNIT (an integer). These fields are relevant only in FIELD_DECLs and PARM_DECLs. DECL_INITIAL holds the value to initialize a variable to, or the value of a constant. For a function, it holds the body (a node of type BLOCK representing the function's binding contour and whose body contains the function's statements.) For a LABEL_DECL in C, it is a flag, nonzero if the label's definition has been seen. PARM_DECLs use a special field: DECL_ARG_TYPE is the type in which the argument is actually passed, which may be different from its type within the function. FUNCTION_DECLs use four special fields: DECL_ARGUMENTS holds a chain of PARM_DECL nodes for the arguments. DECL_RESULT holds a RESULT_DECL node for the value of a function, or it is 0 for a function that returns no value. (C functions returning void have zero here.) The TREE_TYPE field is the type in which the result is actually returned. This is usually the same as the return type of the FUNCTION_DECL, but it may be a wider integer type because of promotion. DECL_FUNCTION_CODE is a code number that is nonzero for built-in functions. Its value is an enum built_in_function that says which built-in function it is. DECL_SOURCE_FILE holds a filename string and DECL_SOURCE_LINE holds a line number. In some cases these can be the location of a reference, if no definition has been seen. DECL_ABSTRACT is nonzero if the decl represents an abstract instance of a decl (i.e. one which is nested within an abstract instance of a inline function. */ DEFTREECODE (FUNCTION_DECL, "function_decl", 'd', 0) DEFTREECODE (LABEL_DECL, "label_decl", 'd', 0) DEFTREECODE (CONST_DECL, "const_decl", 'd', 0) DEFTREECODE (TYPE_DECL, "type_decl", 'd', 0) DEFTREECODE (VAR_DECL, "var_decl", 'd', 0) DEFTREECODE (PARM_DECL, "parm_decl", 'd', 0) DEFTREECODE (RESULT_DECL, "result_decl", 'd', 0) DEFTREECODE (FIELD_DECL, "field_decl", 'd', 0) /* A namespace declaration. Namespaces appear in DECL_CONTEXT of other _DECLs, providing a hierarchy of names. */ DEFTREECODE (NAMESPACE_DECL, "namespace_decl", 'd', 0) /* A translation unit. This is not technically a declaration, since it can't be looked up, but it's close enough. */ DEFTREECODE (TRANSLATION_UNIT_DECL, "translation_unit_decl", 'd', 0) /* References to storage. */ /* Value is structure or union component. Operand 0 is the structure or union (an expression). Operand 1 is the field (a node of type FIELD_DECL). Operand 2, if present, is the value of DECL_FIELD_OFFSET, measured in units of DECL_OFFSET_ALIGN / BITS_PER_UNIT. */ DEFTREECODE (COMPONENT_REF, "component_ref", 'r', 3) /* Reference to a group of bits within an object. Similar to COMPONENT_REF except the position is given explicitly rather than via a FIELD_DECL. Operand 0 is the structure or union expression; operand 1 is a tree giving the number of bits being referenced; operand 2 is a tree giving the position of the first referenced bit. The field can be either a signed or unsigned field; BIT_FIELD_REF_UNSIGNED says which. */ DEFTREECODE (BIT_FIELD_REF, "bit_field_ref", 'r', 3) /* C unary `*' or Pascal `^'. One operand, an expression for a pointer. */ DEFTREECODE (INDIRECT_REF, "indirect_ref", 'r', 1) /* Pascal `^` on a file. One operand, an expression for the file. */ DEFTREECODE (BUFFER_REF, "buffer_ref", 'r', 1) /* Array indexing. Operand 0 is the array; operand 1 is a (single) array index. Operand 2, if present, is a copy of TYPE_MIN_VALUE of the index. Operand 3, if present, is the element size, measured in units of the alignment of the element type. */ DEFTREECODE (ARRAY_REF, "array_ref", 'r', 4) /* Likewise, except that the result is a range ("slice") of the array. The starting index of the resulting array is taken from operand 1 and the size of the range is taken from the type of the expression. */ DEFTREECODE (ARRAY_RANGE_REF, "array_range_ref", 'r', 4) /* Used to represent lookup of runtime type dependent data. Often this is a reference to a vtable, but it needn't be. Operands are: OBJ_TYPE_REF_EXPR: An expression that evaluates the value to use. OBJ_TYPE_REF_OBJECT: Is the object on whose behalf the lookup is being performed. Through this the optimizers may be able to statically determine the dynamic type of the object. OBJ_TYPE_REF_TOKEN: Something front-end specific used to resolve the reference to something simpler, usually to the address of a DECL. Never touched by the middle-end. Good choices would be either an identifier or a vtable index. */ DEFTREECODE (OBJ_TYPE_REF, "obj_type_ref", 'e', 3) /* The exception object from the runtime. */ DEFTREECODE (EXC_PTR_EXPR, "exc_ptr_expr", 'e', 0) /* The filter object from the runtime. */ DEFTREECODE (FILTER_EXPR, "filter_expr", 'e', 0) /* Constructor: return an aggregate value made from specified components. In C, this is used only for structure and array initializers. Also used for SET_TYPE in Chill (and potentially Pascal). The operand is a list of component values made out of a chain of TREE_LIST nodes. For ARRAY_TYPE: The TREE_PURPOSE of each node is the corresponding index. If the TREE_PURPOSE is a RANGE_EXPR, it is a short-hand for many nodes, one for each index in the range. (If the corresponding TREE_VALUE has side-effects, they are evaluated once for each element. Wrap the value in a SAVE_EXPR if you want to evaluate side effects only once.) For RECORD_TYPE, UNION_TYPE, or QUAL_UNION_TYPE: The TREE_PURPOSE of each node is a FIELD_DECL. For SET_TYPE: The TREE_VALUE specifies a value (index) in the set that is true. If TREE_PURPOSE is non-NULL, it specifies the lower limit of a range of true values. Elements not listed are false (not in the set). */ DEFTREECODE (CONSTRUCTOR, "constructor", 'e', 1) /* The expression types are mostly straightforward, with the fourth argument of DEFTREECODE saying how many operands there are. Unless otherwise specified, the operands are expressions and the types of all the operands and the expression must all be the same. */ /* Contains two expressions to compute, one followed by the other. the first value is ignored. The second one's value is used. The type of the first expression need not agree with the other types. */ DEFTREECODE (COMPOUND_EXPR, "compound_expr", 'e', 2) /* Assignment expression. Operand 0 is the what to set; 1, the new value. */ DEFTREECODE (MODIFY_EXPR, "modify_expr", 'e', 2) /* Initialization expression. Operand 0 is the variable to initialize; Operand 1 is the initializer. */ DEFTREECODE (INIT_EXPR, "init_expr", 'e', 2) /* For TARGET_EXPR, operand 0 is the target of an initialization, operand 1 is the initializer for the target, which may be void if simply expanding it initializes the target. operand 2 is the cleanup for this node, if any. operand 3 is the saved initializer after this node has been expanded once; this is so we can re-expand the tree later. */ DEFTREECODE (TARGET_EXPR, "target_expr", 'e', 4) /* Conditional expression ( ... ? ... : ... in C). Operand 0 is the condition. Operand 1 is the then-value. Operand 2 is the else-value. Operand 0 may be of any type. Operand 1 must have the same type as the entire expression, unless it unconditionally throws an exception, in which case it should have VOID_TYPE. The same constraints apply to operand 2. */ DEFTREECODE (COND_EXPR, "cond_expr", 'e', 3) /* Declare local variables, including making RTL and allocating space. BIND_EXPR_VARS is a chain of VAR_DECL nodes for the variables. BIND_EXPR_BODY is the body, the expression to be computed using the variables. The value of operand 1 becomes that of the BIND_EXPR. BIND_EXPR_BLOCK is the BLOCK that corresponds to these bindings for debugging purposes. If this BIND_EXPR is actually expanded, that sets the TREE_USED flag in the BLOCK. The BIND_EXPR is not responsible for informing parsers about these variables. If the body is coming from the input file, then the code that creates the BIND_EXPR is also responsible for informing the parser of the variables. If the BIND_EXPR is ever expanded, its TREE_USED flag is set. This tells the code for debugging symbol tables not to ignore the BIND_EXPR. If the BIND_EXPR should be output for debugging but will not be expanded, set the TREE_USED flag by hand. In order for the BIND_EXPR to be known at all, the code that creates it must also install it as a subblock in the tree of BLOCK nodes for the function. */ DEFTREECODE (BIND_EXPR, "bind_expr", 'e', 3) /* A labeled block. Operand 0 is the label that will be generated to mark the end of the block. Operand 1 is the labeled block body. */ DEFTREECODE (LABELED_BLOCK_EXPR, "labeled_block_expr", 'e', 2) /* Function call. Operand 0 is the function. Operand 1 is the argument list, a list of expressions made out of a chain of TREE_LIST nodes. Operand 2 is the static chain argument, or NULL. */ DEFTREECODE (CALL_EXPR, "call_expr", 'e', 3) /* Specify a value to compute along with its corresponding cleanup. Operand 0 argument is an expression whose value needs a cleanup. Operand 1 is the cleanup expression for the object. Operand 2 is unused. The cleanup is executed by the first enclosing CLEANUP_POINT_EXPR, if it exists, otherwise it is the responsibility of the caller to manually call expand_start_target_temps/expand_end_target_temps, as needed. This differs from TRY_CATCH_EXPR in that operand 2 is always evaluated when an exception isn't thrown when cleanups are run. */ DEFTREECODE (WITH_CLEANUP_EXPR, "with_cleanup_expr", 'e', 3) /* Specify a cleanup point. Operand 0 is an expression that may have cleanups. If it does, those cleanups are executed after the expression is expanded. Note that if the expression is a reference to storage, it is forced out of memory before the cleanups are run. This is necessary to handle cases where the cleanups modify the storage referenced; in the expression 't.i', if 't' is a struct with an integer member 'i' and a cleanup which modifies 'i', the value of the expression depends on whether the cleanup is run before or after 't.i' is evaluated. When expand_expr is run on 't.i', it returns a MEM. This is not good enough; the value of 't.i' must be forced out of memory. As a consequence, the operand of a CLEANUP_POINT_EXPR must not have BLKmode, because it will not be forced out of memory. */ DEFTREECODE (CLEANUP_POINT_EXPR, "cleanup_point_expr", 'e', 1) /* The following two codes are used in languages that have types where some field in an object of the type contains a value that is used in the computation of another field's offset or size and/or the size of the type. The positions and/or sizes of fields can vary from object to object of the same type or even for one and the same object within its scope. Record types with discriminants in Ada or schema types in Pascal are examples of such types. This mechanism is also used to create "fat pointers" for unconstrained array types in Ada; the fat pointer is a structure one of whose fields is a pointer to the actual array type and the other field is a pointer to a template, which is a structure containing the bounds of the array. The bounds in the type pointed to by the first field in the fat pointer refer to the values in the template. When you wish to construct such a type you need "self-references" that allow you to reference the object having this type from the TYPE node, i.e. without having a variable instantiating this type. Such a "self-references" is done using a PLACEHOLDER_EXPR. This is a node that will later be replaced with the object being referenced. Its type is that of the object and selects which object to use from a chain of references (see below). No other slots are used in the PLACEHOLDER_EXPR. For example, if your type FOO is a RECORD_TYPE with a field BAR, and you need the value of .BAR to calculate TYPE_SIZE (FOO), just substitute above with a PLACEHOLDER_EXPR whose TREE_TYPE is FOO. Then construct your COMPONENT_REF with the PLACEHOLDER_EXPR as the first operand (which has the correct type). Later, when the size is needed in the program, the back-end will find this PLACEHOLDER_EXPR and generate code to calculate the actual size at run-time. In the following, we describe how this calculation is done. When we wish to evaluate a size or offset, we check whether it contains a PLACEHOLDER_EXPR. If it does, we call substitute_placeholder_in_expr passing both that tree and an expression within which the object may be found. The latter expression is the object itself in the simple case of an Ada record with discriminant, but it can be the array in the case of an unconstrained array. In the latter case, we need the fat pointer, because the bounds of the array can only be accessed from it. However, we rely here on the fact that the expression for the array contains the dereference of the fat pointer that obtained the array pointer. */ /* Denotes a record to later be substituted before evaluating this expression. The type of this expression is used to find the record to replace it. */ DEFTREECODE (PLACEHOLDER_EXPR, "placeholder_expr", 'x', 0) /* Simple arithmetic. */ DEFTREECODE (PLUS_EXPR, "plus_expr", '2', 2) DEFTREECODE (MINUS_EXPR, "minus_expr", '2', 2) DEFTREECODE (MULT_EXPR, "mult_expr", '2', 2) /* Division for integer result that rounds the quotient toward zero. */ DEFTREECODE (TRUNC_DIV_EXPR, "trunc_div_expr", '2', 2) /* Division for integer result that rounds the quotient toward infinity. */ DEFTREECODE (CEIL_DIV_EXPR, "ceil_div_expr", '2', 2) /* Division for integer result that rounds toward minus infinity. */ DEFTREECODE (FLOOR_DIV_EXPR, "floor_div_expr", '2', 2) /* Division for integer result that rounds toward nearest integer. */ DEFTREECODE (ROUND_DIV_EXPR, "round_div_expr", '2', 2) /* Four kinds of remainder that go with the four kinds of division. */ DEFTREECODE (TRUNC_MOD_EXPR, "trunc_mod_expr", '2', 2) DEFTREECODE (CEIL_MOD_EXPR, "ceil_mod_expr", '2', 2) DEFTREECODE (FLOOR_MOD_EXPR, "floor_mod_expr", '2', 2) DEFTREECODE (ROUND_MOD_EXPR, "round_mod_expr", '2', 2) /* Division for real result. */ DEFTREECODE (RDIV_EXPR, "rdiv_expr", '2', 2) /* Division which is not supposed to need rounding. Used for pointer subtraction in C. */ DEFTREECODE (EXACT_DIV_EXPR, "exact_div_expr", '2', 2) /* Conversion of real to fixed point: four ways to round, like the four ways to divide. CONVERT_EXPR can also be used to convert a real to an integer, and that is what is used in languages that do not have ways of specifying which of these is wanted. Maybe these are not needed. */ DEFTREECODE (FIX_TRUNC_EXPR, "fix_trunc_expr", '1', 1) DEFTREECODE (FIX_CEIL_EXPR, "fix_ceil_expr", '1', 1) DEFTREECODE (FIX_FLOOR_EXPR, "fix_floor_expr", '1', 1) DEFTREECODE (FIX_ROUND_EXPR, "fix_round_expr", '1', 1) /* Conversion of an integer to a real. */ DEFTREECODE (FLOAT_EXPR, "float_expr", '1', 1) /* Unary negation. */ DEFTREECODE (NEGATE_EXPR, "negate_expr", '1', 1) DEFTREECODE (MIN_EXPR, "min_expr", '2', 2) DEFTREECODE (MAX_EXPR, "max_expr", '2', 2) /* Represents the absolute value of the operand. An ABS_EXPR must have either an INTEGER_TYPE or a REAL_TYPE. The operand of the ABS_EXPR must have the same type. */ DEFTREECODE (ABS_EXPR, "abs_expr", '1', 1) /* Shift operations for shift and rotate. Shift means logical shift if done on an unsigned type, arithmetic shift if done on a signed type. The second operand is the number of bits to shift by; it need not be the same type as the first operand and result. Note that the result is undefined if the second operand is larger than the first operand's type size. */ DEFTREECODE (LSHIFT_EXPR, "lshift_expr", '2', 2) DEFTREECODE (RSHIFT_EXPR, "rshift_expr", '2', 2) DEFTREECODE (LROTATE_EXPR, "lrotate_expr", '2', 2) DEFTREECODE (RROTATE_EXPR, "rrotate_expr", '2', 2) /* Bitwise operations. Operands have same mode as result. */ DEFTREECODE (BIT_IOR_EXPR, "bit_ior_expr", '2', 2) DEFTREECODE (BIT_XOR_EXPR, "bit_xor_expr", '2', 2) DEFTREECODE (BIT_AND_EXPR, "bit_and_expr", '2', 2) DEFTREECODE (BIT_NOT_EXPR, "bit_not_expr", '1', 1) /* ANDIF and ORIF allow the second operand not to be computed if the value of the expression is determined from the first operand. AND, OR, and XOR always compute the second operand whether its value is needed or not (for side effects). The operand may have BOOLEAN_TYPE or INTEGER_TYPE. In either case, the argument will be either zero or one. For example, a TRUTH_NOT_EXPR will never have an INTEGER_TYPE VAR_DECL as its argument; instead, a NE_EXPR will be used to compare the VAR_DECL to zero, thereby obtaining a node with value zero or one. */ DEFTREECODE (TRUTH_ANDIF_EXPR, "truth_andif_expr", 'e', 2) DEFTREECODE (TRUTH_ORIF_EXPR, "truth_orif_expr", 'e', 2) DEFTREECODE (TRUTH_AND_EXPR, "truth_and_expr", 'e', 2) DEFTREECODE (TRUTH_OR_EXPR, "truth_or_expr", 'e', 2) DEFTREECODE (TRUTH_XOR_EXPR, "truth_xor_expr", 'e', 2) DEFTREECODE (TRUTH_NOT_EXPR, "truth_not_expr", 'e', 1) /* Relational operators. `EQ_EXPR' and `NE_EXPR' are allowed for any types. The others are allowed only for integer (or pointer or enumeral) or real types. In all cases the operands will have the same type, and the value is always the type used by the language for booleans. */ DEFTREECODE (LT_EXPR, "lt_expr", '<', 2) DEFTREECODE (LE_EXPR, "le_expr", '<', 2) DEFTREECODE (GT_EXPR, "gt_expr", '<', 2) DEFTREECODE (GE_EXPR, "ge_expr", '<', 2) DEFTREECODE (EQ_EXPR, "eq_expr", '<', 2) DEFTREECODE (NE_EXPR, "ne_expr", '<', 2) /* Additional relational operators for floating point unordered. */ DEFTREECODE (UNORDERED_EXPR, "unordered_expr", '<', 2) DEFTREECODE (ORDERED_EXPR, "ordered_expr", '<', 2) /* These are equivalent to unordered or ... */ DEFTREECODE (UNLT_EXPR, "unlt_expr", '<', 2) DEFTREECODE (UNLE_EXPR, "unle_expr", '<', 2) DEFTREECODE (UNGT_EXPR, "ungt_expr", '<', 2) DEFTREECODE (UNGE_EXPR, "unge_expr", '<', 2) DEFTREECODE (UNEQ_EXPR, "uneq_expr", '<', 2) /* This is the reverse of uneq_expr. */ DEFTREECODE (LTGT_EXPR, "ltgt_expr", '<', 2) /* Operations for Pascal sets. Not used now. */ DEFTREECODE (IN_EXPR, "in_expr", '2', 2) DEFTREECODE (SET_LE_EXPR, "set_le_expr", '<', 2) DEFTREECODE (CARD_EXPR, "card_expr", '1', 1) DEFTREECODE (RANGE_EXPR, "range_expr", '2', 2) /* Represents a conversion of type of a value. All conversions, including implicit ones, must be represented by CONVERT_EXPR or NOP_EXPR nodes. */ DEFTREECODE (CONVERT_EXPR, "convert_expr", '1', 1) /* Represents a conversion expected to require no code to be generated. */ DEFTREECODE (NOP_EXPR, "nop_expr", '1', 1) /* Value is same as argument, but guaranteed not an lvalue. */ DEFTREECODE (NON_LVALUE_EXPR, "non_lvalue_expr", '1', 1) /* Represents viewing something of one type as being of a second type. This corresponds to an "Unchecked Conversion" in Ada and roughly to the idiom *(type2 *)&X in C. The only operand is the value to be viewed as being of another type. It is undefined if the type of the input and of the expression have different sizes. This code may also be used within the LHS of a MODIFY_EXPR, in which case no actual data motion may occur. TREE_ADDRESSABLE will be set in this case and GCC must abort if it could not do the operation without generating insns. */ DEFTREECODE (VIEW_CONVERT_EXPR, "view_convert_expr", '1', 1) /* Represents something we computed once and will use multiple times. First operand is that expression. After it is evaluated once, it will be replaced by the temporary variable that holds the value. */ DEFTREECODE (SAVE_EXPR, "save_expr", 'e', 1) /* For a UNSAVE_EXPR, operand 0 is the value to unsave. By unsave, we mean that all _EXPRs such as TARGET_EXPRs, SAVE_EXPRs, CALL_EXPRs, that are protected from being evaluated more than once should be reset so that a new expand_expr call of this expr will cause those to be re-evaluated. This is useful when we want to reuse a tree in different places, but where we must re-expand. */ DEFTREECODE (UNSAVE_EXPR, "unsave_expr", 'e', 1) /* & in C. Value is the address at which the operand's value resides. Operand may have any mode. Result mode is Pmode. */ DEFTREECODE (ADDR_EXPR, "addr_expr", 'e', 1) /* Non-lvalue reference or pointer to an object. */ DEFTREECODE (REFERENCE_EXPR, "reference_expr", 'e', 1) /* Operand is a function constant; result is a function variable value of type EPmode. Used only for languages that need static chains. */ DEFTREECODE (ENTRY_VALUE_EXPR, "entry_value_expr", 'e', 1) /* Operand0 is a function constant; result is part N of a function descriptor of type ptr_mode. */ DEFTREECODE (FDESC_EXPR, "fdesc_expr", 'e', 2) /* Given two real or integer operands of the same type, returns a complex value of the corresponding complex type. */ DEFTREECODE (COMPLEX_EXPR, "complex_expr", '2', 2) /* Complex conjugate of operand. Used only on complex types. */ DEFTREECODE (CONJ_EXPR, "conj_expr", '1', 1) /* Used only on an operand of complex type, these return a value of the corresponding component type. */ DEFTREECODE (REALPART_EXPR, "realpart_expr", 'r', 1) DEFTREECODE (IMAGPART_EXPR, "imagpart_expr", 'r', 1) /* Nodes for ++ and -- in C. The second arg is how much to increment or decrement by. For a pointer, it would be the size of the object pointed to. */ DEFTREECODE (PREDECREMENT_EXPR, "predecrement_expr", 'e', 2) DEFTREECODE (PREINCREMENT_EXPR, "preincrement_expr", 'e', 2) DEFTREECODE (POSTDECREMENT_EXPR, "postdecrement_expr", 'e', 2) DEFTREECODE (POSTINCREMENT_EXPR, "postincrement_expr", 'e', 2) /* Used to implement `va_arg'. */ DEFTREECODE (VA_ARG_EXPR, "va_arg_expr", 'e', 1) /* Evaluate operand 1. If and only if an exception is thrown during the evaluation of operand 1, evaluate operand 2. This differs from TRY_FINALLY_EXPR in that operand 2 is not evaluated on a normal or jump exit, only on an exception. */ DEFTREECODE (TRY_CATCH_EXPR, "try_catch_expr", 's', 2) /* Evaluate the first operand. The second operand is a cleanup expression which is evaluated on any exit (normal, exception, or jump out) from this expression. */ DEFTREECODE (TRY_FINALLY_EXPR, "try_finally", 's', 2) /* These types of expressions have no useful value, and always have side effects. */ /* Used to represent a local declaration. The operand is DECL_EXPR_DECL. */ DEFTREECODE (DECL_EXPR, "decl_expr", 's', 1) /* A label definition, encapsulated as a statement. Operand 0 is the LABEL_DECL node for the label that appears here. The type should be void and the value should be ignored. */ DEFTREECODE (LABEL_EXPR, "label_expr", 's', 1) /* GOTO. Operand 0 is a LABEL_DECL node or an expression. The type should be void and the value should be ignored. */ DEFTREECODE (GOTO_EXPR, "goto_expr", 's', 1) /* Used internally for cleanups in the implementation of TRY_FINALLY_EXPR. (Specifically, it is created by expand_expr, not front-ends.) Operand 0 is the rtx for the start of the subroutine we need to call. Operand 1 is the rtx for a variable in which to store the address of where the subroutine should return to. */ DEFTREECODE (GOTO_SUBROUTINE_EXPR, "goto_subroutine", 's', 2) /* RETURN. Evaluates operand 0, then returns from the current function. Presumably that operand is an assignment that stores into the RESULT_DECL that hold the value to be returned. The operand may be null. The type should be void and the value should be ignored. */ DEFTREECODE (RETURN_EXPR, "return_expr", 's', 1) /* Exit the inner most loop conditionally. Operand 0 is the condition. The type should be void and the value should be ignored. */ DEFTREECODE (EXIT_EXPR, "exit_expr", 's', 1) /* A loop. Operand 0 is the body of the loop. It must contain an EXIT_EXPR or is an infinite loop. The type should be void and the value should be ignored. */ DEFTREECODE (LOOP_EXPR, "loop_expr", 's', 1) /* Exit a labeled block, possibly returning a value. Operand 0 is a LABELED_BLOCK_EXPR to exit. Operand 1 is the value to return. It may be left null. */ DEFTREECODE (EXIT_BLOCK_EXPR, "exit_block_expr", 's', 2) /* Switch expression. TREE_TYPE is the original type of the condition, before any language required type conversions. It may be NULL, in which case the original type and final types are assumed to be the same. Operand 0 is the expression used to perform the branch, Operand 1 is the body of the switch, which probably contains CASE_LABEL_EXPRs. It may also be NULL, in which case operand 2 must not be NULL. Operand 2 is either NULL_TREE or a TREE_VEC of the CASE_LABEL_EXPRs of all the cases. */ DEFTREECODE (SWITCH_EXPR, "switch_expr", 's', 3) /* Used to represent a case label. The operands are CASE_LOW and CASE_HIGH, respectively. If CASE_LOW is NULL_TREE, the label is a 'default' label. If CASE_HIGH is NULL_TREE, the label is a normal case label. CASE_LABEL is the corresponding LABEL_DECL. */ DEFTREECODE (CASE_LABEL_EXPR, "case_label_expr", 's', 3) /* RESX. Resume execution after an exception. Operand 0 is a number indicating the exception region that is being left. */ DEFTREECODE (RESX_EXPR, "resx_expr", 's', 1) /* Used to represent an inline assembly statement. ASM_STRING returns a STRING_CST for the instruction (e.g., "mov x, y"). ASM_OUTPUTS, ASM_INPUTS, and ASM_CLOBBERS represent the outputs, inputs, and clobbers for the statement. */ DEFTREECODE (ASM_EXPR, "asm_expr", 's', 4) /* Variable references for SSA analysis. New SSA names are created every time a variable is assigned a new value. The SSA builder uses SSA_NAME nodes to implement SSA versioning. */ DEFTREECODE (SSA_NAME, "ssa_name", 'x', 0) /* SSA PHI operator. PHI_RESULT is the new SSA_NAME node created by the PHI node. PHI_ARG_LENGTH is the number of arguments. PHI_ARG_ELT returns the Ith tuple from the argument list. Each tuple contains the incoming reaching definition (SSA_NAME node) and the edge via which that definition is coming through. */ DEFTREECODE (PHI_NODE, "phi_node", 'x', 0) /* Used to represent a typed exception handler. CATCH_TYPES is the type (or list of types) handled, and CATCH_BODY is the code for the handler. */ DEFTREECODE (CATCH_EXPR, "catch_expr", 's', 2) /* Used to represent an exception specification. EH_FILTER_TYPES is a list of allowed types, and EH_FILTER_FAILURE is an expression to evaluate on failure. EH_FILTER_MUST_NOT_THROW controls which range type to use when expanding. */ DEFTREECODE (EH_FILTER_EXPR, "eh_filter_expr", 's', 2) /* Node used for describing a property that is known at compile time. */ DEFTREECODE (SCEV_KNOWN, "scev_known", 'e', 0) /* Node used for describing a property that is not known at compile time. */ DEFTREECODE (SCEV_NOT_KNOWN, "scev_not_known", 'e', 0) /* Polynomial chains of recurrences. Under the form: cr = {CHREC_LEFT (cr), +, CHREC_RIGHT (cr)}. */ DEFTREECODE (POLYNOMIAL_CHREC, "polynomial_chrec", 'e', 3) /* Used to chain children of container statements together. Use the interface in tree-iterator.h to access this node. */ DEFTREECODE (STATEMENT_LIST, "statement_list", 'x', 0) /* Value handles. Artificial nodes to represent expressions in partial redundancy elimination (tree-ssa-pre.c). These nodes are used for expression canonicalization. If two expressions compute the same value, they will be assigned the same value handle. */ DEFTREECODE (VALUE_HANDLE, "value_handle", 'x', 0) /* Base class information. Holds information about a class as a baseclass of itself or another class. */ DEFTREECODE (TREE_BINFO, "tree_binfo", 'x', 0) /* Local variables: mode:c End: */ LAST_AND_UNUSED_TREE_CODE /* A convenient way to get a value for NUM_TREE_CODES. */ }; #undef DEFTREECODE /* Number of language-independent tree codes. */ #define NUM_TREE_CODES ((int) LAST_AND_UNUSED_TREE_CODE) /* Indexed by enum tree_code, contains a character which is `<' for a comparison expression, `1', for a unary arithmetic expression, `2' for a binary arithmetic expression, `e' for other types of expressions, `r' for a reference, `c' for a constant, `d' for a decl, `t' for a type, `s' for a statement, and `x' for anything else (TREE_LIST, IDENTIFIER, etc). */ #define MAX_TREE_CODES 256 extern const char tree_code_type[]; #define TREE_CODE_CLASS(CODE) tree_code_type[(int) (CODE)] /* Returns nonzero iff CLASS is not the tree code of a type. */ #define IS_NON_TYPE_CODE_CLASS(CLASS) (strchr ("xbcdr<12se", (CLASS)) != 0) /* Returns nonzero iff CLASS is the tree-code class of an expression. */ #define IS_EXPR_CODE_CLASS(CLASS) (strchr ("<12ers", (CLASS)) != 0) /* Returns nonzero iff NODE is an expression of some kind. */ #define EXPR_P(NODE) IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (TREE_CODE (NODE))) /* Number of argument-words in each kind of tree-node. */ extern const unsigned char tree_code_length[]; #define TREE_CODE_LENGTH(CODE) tree_code_length[(int) (CODE)] /* Names of tree components. */ extern const char *const tree_code_name[]; /* Classify which part of the compiler has defined a given builtin function. Note that we assume below that this is no more than two bits. */ enum built_in_class { NOT_BUILT_IN = 0, BUILT_IN_FRONTEND, BUILT_IN_MD, BUILT_IN_NORMAL }; /* Names for the above. */ extern const char *const built_in_class_names[4]; /* Codes that identify the various built in functions so that expand_call can identify them quickly. */ #define DEF_BUILTIN(ENUM, N, C, T, LT, B, F, NA, AT, IM) ENUM, enum built_in_function { /* This file contains the definitions and documentation for the builtins used in the GNU compiler. Copyright (C) 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Before including this file, you should define a macro: DEF_BUILTIN (ENUM, NAME, CLASS, TYPE, LIBTYPE, BOTH_P, FALLBACK_P, NONANSI_P, ATTRS, IMPLICIT) This macro will be called once for each builtin function. The ENUM will be of type `enum built_in_function', and will indicate which builtin function is being processed. The NAME of the builtin function (which will always start with `__builtin_') is a string literal. The CLASS is of type `enum built_in_class' and indicates what kind of builtin is being processed. Some builtins are actually two separate functions. For example, for `strcmp' there are two builtin functions; `__builtin_strcmp' and `strcmp' itself. Both behave identically. Other builtins define only the `__builtin' variant. If BOTH_P is TRUE, then this builtin has both variants; otherwise, it is has only the first variant. TYPE indicates the type of the function. The symbols correspond to enumerals from builtin-types.def. If BOTH_P is true, then LIBTYPE is the type of the non-`__builtin_' variant. Otherwise, LIBTYPE should be ignored. If FALLBACK_P is true then, if for some reason, the compiler cannot expand the builtin function directly, it will call the corresponding library function (which does not have the `__builtin_' prefix. If NONANSI_P is true, then the non-`__builtin_' variant is not an ANSI/ISO library function, and so we should pretend it does not exist when compiling in ANSI conformant mode. ATTRs is an attribute list as defined in builtin-attrs.def that describes the attributes of this builtin function. IMPLICIT specifies condition when the builtin can be produced by compiler. For instance C90 reserves floorf function, but does not define it's meaning. When user uses floorf we may assume that the floorf has the meaning we expect, but we can't produce floorf by simplifying floor((double)float) since the runtime need not implement it. */ /* A GCC builtin (like __builtin_saveregs) is provided by the compiler, but does not correspond to a function in the standard library. */ #undef DEF_GCC_BUILTIN #define DEF_GCC_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, BT_LAST, \ false, false, false, ATTRS, true) /* A library builtin (like __builtin_strchr) is a builtin equivalent of an ANSI/ISO standard library function. In addition to the `__builtin' version, we will create an ordinary version (e.g, `strchr') as well. If we cannot compute the answer using the builtin function, we will fall back to the standard library version. */ #undef DEF_LIB_BUILTIN #define DEF_LIB_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \ true, true, false, ATTRS, true) /* Like DEF_LIB_BUILTIN, except that the function is not one that is specified by ANSI/ISO C. So, when we're being fully conformant we ignore the version of these builtins that does not begin with __builtin. */ #undef DEF_EXT_LIB_BUILTIN #define DEF_EXT_LIB_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \ true, true, true, ATTRS, false) /* Like DEF_LIB_BUILTIN, except that the function is only a part of the standard in C94 or above. */ #undef DEF_C94_BUILTIN #define DEF_C94_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \ true, true, !flag_isoc94, ATTRS, TARGET_C99_FUNCTIONS) /* Like DEF_LIB_BUILTIN, except that the function is only a part of the standard in C99 or above. */ #undef DEF_C99_BUILTIN #define DEF_C99_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \ true, true, !flag_isoc99, ATTRS, TARGET_C99_FUNCTIONS) /* Builtin that is specified by C99 and C90 reserve the name for future use. We can still recognize the builtin in C90 mode but we can't produce it implicitly. */ #undef DEF_C99_C90RES_BUILTIN #define DEF_C99_C90RES_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \ true, true, !flag_isoc99, ATTRS, TARGET_C99_FUNCTIONS) /* Define an attribute list for math functions that are normally "impure" because some of them may write into global memory for `errno'. If !flag_errno_math they are instead "const". */ #undef ATTR_MATHFN_ERRNO #define ATTR_MATHFN_ERRNO (flag_errno_math ? \ ATTR_NOTHROW_LIST : ATTR_CONST_NOTHROW_LIST) /* Define an attribute list for math functions that are normally "pure" but if flag_unsafe_math_optimizations is set they are instead "const". This distinction accounts for the fact that some math functions check the rounding mode which is akin to examining global memory. In "unsafe" mode we can be less careful. */ #undef ATTR_MATHFN_FPROUNDING #define ATTR_MATHFN_FPROUNDING (flag_unsafe_math_optimizations ? \ ATTR_CONST_NOTHROW_LIST : ATTR_PURE_NOTHROW_LIST) /* Define an attribute list for math functions that are normally "impure" because some of them may write into global memory for `errno'. If !flag_errno_math, we can possibly use "pure" or "const" depending on whether we care about FP rounding. */ #undef ATTR_MATHFN_FPROUNDING_ERRNO #define ATTR_MATHFN_FPROUNDING_ERRNO (flag_errno_math ? \ ATTR_NOTHROW_LIST : ATTR_MATHFN_FPROUNDING) /* Define an attribute list for math functions that need to mind FP rounding, but because they store into memory they are never "const" or "pure". Use of this macro is mainly for documentation and maintenance purposes. */ #undef ATTR_MATHFN_FPROUNDING_STORE #define ATTR_MATHFN_FPROUNDING_STORE ATTR_NOTHROW_LIST /* Category: math builtins. */ DEF_LIB_BUILTIN (BUILT_IN_ACOS, "acos", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_ACOSF, "acosf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ACOSH, "acosh", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ACOSHF, "acoshf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ACOSHL, "acoshl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_ACOSL, "acosl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_ASIN, "asin", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_ASINF, "asinf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ASINH, "asinh", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_ASINHF, "asinhf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_ASINHL, "asinhl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_C90RES_BUILTIN (BUILT_IN_ASINL, "asinl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_ATAN, "atan", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_LIB_BUILTIN (BUILT_IN_ATAN2, "atan2", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_ATAN2F, "atan2f", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_ATAN2L, "atan2l", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_ATANF, "atanf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_ATANH, "atanh", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ATANHF, "atanhf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ATANHL, "atanhl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_ATANL, "atanl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CBRT, "cbrt", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CBRTF, "cbrtf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CBRTL, "cbrtl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_LIB_BUILTIN (BUILT_IN_CEIL, "ceil", BT_FN_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_C90RES_BUILTIN (BUILT_IN_CEILF, "ceilf", BT_FN_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_C90RES_BUILTIN (BUILT_IN_CEILL, "ceill", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_COPYSIGN, "copysign", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_COPYSIGNF, "copysignf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_COPYSIGNL, "copysignl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_COS, "cos", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_C90RES_BUILTIN (BUILT_IN_COSF, "cosf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_LIB_BUILTIN (BUILT_IN_COSH, "cosh", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_COSHF, "coshf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_COSHL, "coshl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_COSL, "cosl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_EXT_LIB_BUILTIN (BUILT_IN_DREM, "drem", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_DREMF, "dremf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_DREML, "dreml", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ERF, "erf", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_ERFC, "erfc", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ERFCF, "erfcf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ERFCL, "erfcl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ERFF, "erff", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_ERFL, "erfl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_LIB_BUILTIN (BUILT_IN_EXP, "exp", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_EXP10, "exp10", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_EXP10F, "exp10f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_EXP10L, "exp10l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_EXP2, "exp2", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_EXP2F, "exp2f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_EXP2L, "exp2l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_EXPF, "expf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_EXPL, "expl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_EXPM1, "expm1", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_EXPM1F, "expm1f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_EXPM1L, "expm1l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_FABS, "fabs", BT_FN_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_C90RES_BUILTIN (BUILT_IN_FABSF, "fabsf", BT_FN_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_C90RES_BUILTIN (BUILT_IN_FABSL, "fabsl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_FDIM, "fdim", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_FDIMF, "fdimf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_FDIML, "fdiml", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_FLOOR, "floor", BT_FN_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_C90RES_BUILTIN (BUILT_IN_FLOORF, "floorf", BT_FN_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_C90RES_BUILTIN (BUILT_IN_FLOORL, "floorl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_FMA, "fma", BT_FN_DOUBLE_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_FMAF, "fmaf", BT_FN_FLOAT_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_FMAL, "fmal", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_FMAX, "fmax", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_FMAXF, "fmaxf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_FMAXL, "fmaxl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_FMIN, "fmin", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_FMINF, "fminf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_FMINL, "fminl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_FMOD, "fmod", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_FMODF, "fmodf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_FMODL, "fmodl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_FREXP, "frexp", BT_FN_DOUBLE_DOUBLE_INTPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_C99_C90RES_BUILTIN (BUILT_IN_FREXPF, "frexpf", BT_FN_FLOAT_FLOAT_INTPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_C99_C90RES_BUILTIN (BUILT_IN_FREXPL, "frexpl", BT_FN_LONGDOUBLE_LONGDOUBLE_INTPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_EXT_LIB_BUILTIN (BUILT_IN_GAMMA, "gamma", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_GAMMAF, "gammaf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_GAMMAL, "gammal", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_GCC_BUILTIN (BUILT_IN_HUGE_VAL, "huge_val", BT_FN_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_HUGE_VALF, "huge_valf", BT_FN_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_HUGE_VALL, "huge_vall", BT_FN_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_HYPOT, "hypot", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_HYPOTF, "hypotf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_HYPOTL, "hypotl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ILOGB, "ilogb", BT_FN_INT_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ILOGBF, "ilogbf", BT_FN_INT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ILOGBL, "ilogbl", BT_FN_INT_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_GCC_BUILTIN (BUILT_IN_INF, "inf", BT_FN_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_INFF, "inff", BT_FN_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_INFL, "infl", BT_FN_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_J0, "j0", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_J0F, "j0f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_J0L, "j0l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_J1, "j1", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_J1F, "j1f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_J1L, "j1l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_JN, "jn", BT_FN_DOUBLE_INT_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_JNF, "jnf", BT_FN_FLOAT_INT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_JNL, "jnl", BT_FN_LONGDOUBLE_INT_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_LDEXP, "ldexp", BT_FN_DOUBLE_DOUBLE_INT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_LDEXPF, "ldexpf", BT_FN_FLOAT_FLOAT_INT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_LDEXPL, "ldexpl", BT_FN_LONGDOUBLE_LONGDOUBLE_INT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LGAMMA, "lgamma", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LGAMMAF, "lgammaf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LGAMMAL, "lgammal", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LLRINT, "llrint", BT_FN_LONGLONG_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LLRINTF, "llrintf", BT_FN_LONGLONG_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LLRINTL, "llrintl", BT_FN_LONGLONG_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LLROUND, "llround", BT_FN_LONGLONG_DOUBLE, ATTR_MATHFN_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LLROUNDF, "llroundf", BT_FN_LONGLONG_FLOAT, ATTR_MATHFN_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LLROUNDL, "llroundl", BT_FN_LONGLONG_LONGDOUBLE, ATTR_MATHFN_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_LOG, "log", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_LOG10, "log10", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_LOG10F, "log10f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_LOG10L, "log10l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LOG1P, "log1p", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LOG1PF, "log1pf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LOG1PL, "log1pl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LOG2, "log2", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LOG2F, "log2f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LOG2L, "log2l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LOGB, "logb", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LOGBF, "logbf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LOGBL, "logbl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_LOGF, "logf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_LOGL, "logl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LRINT, "lrint", BT_FN_LONG_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LRINTF, "lrintf", BT_FN_LONG_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LRINTL, "lrintl", BT_FN_LONG_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LROUND, "lround", BT_FN_LONG_DOUBLE, ATTR_MATHFN_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LROUNDF, "lroundf", BT_FN_LONG_FLOAT, ATTR_MATHFN_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LROUNDL, "lroundl", BT_FN_LONG_LONGDOUBLE, ATTR_MATHFN_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_MODF, "modf", BT_FN_DOUBLE_DOUBLE_DOUBLEPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_C99_C90RES_BUILTIN (BUILT_IN_MODFF, "modff", BT_FN_FLOAT_FLOAT_FLOATPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_C99_C90RES_BUILTIN (BUILT_IN_MODFL, "modfl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLEPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_GCC_BUILTIN (BUILT_IN_NAN, "nan", BT_FN_DOUBLE_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL_1) DEF_GCC_BUILTIN (BUILT_IN_NANF, "nanf", BT_FN_FLOAT_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL_1) DEF_GCC_BUILTIN (BUILT_IN_NANL, "nanl", BT_FN_LONGDOUBLE_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL_1) DEF_GCC_BUILTIN (BUILT_IN_NANS, "nans", BT_FN_DOUBLE_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL_1) DEF_GCC_BUILTIN (BUILT_IN_NANSF, "nansf", BT_FN_FLOAT_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL_1) DEF_GCC_BUILTIN (BUILT_IN_NANSL, "nansl", BT_FN_LONGDOUBLE_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL_1) DEF_C99_BUILTIN (BUILT_IN_NEARBYINT, "nearbyint", BT_FN_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_NEARBYINTF, "nearbyintf", BT_FN_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_NEARBYINTL, "nearbyintl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_NEXTAFTER, "nextafter", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_NEXTAFTERF, "nextafterf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_NEXTAFTERL, "nextafterl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_NEXTTOWARD, "nexttoward", BT_FN_DOUBLE_DOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_NEXTTOWARDF, "nexttowardf", BT_FN_FLOAT_FLOAT_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_NEXTTOWARDL, "nexttowardl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_POW, "pow", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_POW10, "pow10", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_POW10F, "pow10f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_POW10L, "pow10l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_POWF, "powf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_POWL, "powl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_REMAINDER, "remainder", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_REMAINDERF, "remainderf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_REMAINDERL, "remainderl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_REMQUO, "remquo", BT_FN_DOUBLE_DOUBLE_DOUBLE_INTPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_C99_BUILTIN (BUILT_IN_REMQUOF, "remquof", BT_FN_FLOAT_FLOAT_FLOAT_INTPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_C99_BUILTIN (BUILT_IN_REMQUOL, "remquol", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE_INTPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_C99_BUILTIN (BUILT_IN_RINT, "rint", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_RINTF, "rintf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_RINTL, "rintl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ROUND, "round", BT_FN_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_ROUNDF, "roundf", BT_FN_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_ROUNDL, "roundl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_SCALB, "scalb", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_SCALBF, "scalbf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_SCALBL, "scalbl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_SCALBLN, "scalbln", BT_FN_DOUBLE_DOUBLE_LONG, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_SCALBLNF, "scalblnf", BT_FN_FLOAT_FLOAT_LONG, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_SCALBLNL, "scalblnl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONG, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_SCALBN, "scalbn", BT_FN_DOUBLE_DOUBLE_INT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_SCALBNF, "scalbnf", BT_FN_FLOAT_FLOAT_INT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_SCALBNL, "scalbnl", BT_FN_LONGDOUBLE_LONGDOUBLE_INT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNBIT, "signbit", BT_FN_INT_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNBITF, "signbitf", BT_FN_INT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNBITL, "signbitl", BT_FN_INT_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNIFICAND, "significand", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNIFICANDF, "significandf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNIFICANDL, "significandl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_SIN, "sin", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_EXT_LIB_BUILTIN (BUILT_IN_SINCOS, "sincos", BT_FN_VOID_DOUBLE_DOUBLEPTR_DOUBLEPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_EXT_LIB_BUILTIN (BUILT_IN_SINCOSF, "sincosf", BT_FN_VOID_FLOAT_FLOATPTR_FLOATPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_EXT_LIB_BUILTIN (BUILT_IN_SINCOSL, "sincosl", BT_FN_VOID_LONGDOUBLE_LONGDOUBLEPTR_LONGDOUBLEPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_C99_C90RES_BUILTIN (BUILT_IN_SINF, "sinf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_LIB_BUILTIN (BUILT_IN_SINH, "sinh", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_SINHF, "sinhf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_SINHL, "sinhl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_SINL, "sinl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_LIB_BUILTIN (BUILT_IN_SQRT, "sqrt", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_SQRTF, "sqrtf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_SQRTL, "sqrtl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_TAN, "tan", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_C90RES_BUILTIN (BUILT_IN_TANF, "tanf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_LIB_BUILTIN (BUILT_IN_TANH, "tanh", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_C90RES_BUILTIN (BUILT_IN_TANHF, "tanhf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_C90RES_BUILTIN (BUILT_IN_TANHL, "tanhl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_C90RES_BUILTIN (BUILT_IN_TANL, "tanl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_TGAMMA, "tgamma", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_TGAMMAF, "tgammaf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_TGAMMAL, "tgammal", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_TRUNC, "trunc", BT_FN_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_TRUNCF, "truncf", BT_FN_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_TRUNCL, "truncl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_Y0, "y0", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_Y0F, "y0f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_Y0L, "y0l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_Y1, "y1", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_Y1F, "y1f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_Y1L, "y1l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_YN, "yn", BT_FN_DOUBLE_INT_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_YNF, "ynf", BT_FN_FLOAT_INT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_YNL, "ynl", BT_FN_LONGDOUBLE_INT_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) /* Category: _Complex math builtins. */ /* The C99 clog function conflicts with C++ iostreams clog, see http://gcc.gnu.org/ml/gcc-patches/2003-09/msg00510.html */ DEF_C99_BUILTIN (BUILT_IN_CABS, "cabs", BT_FN_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CABSF, "cabsf", BT_FN_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CABSL, "cabsl", BT_FN_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CACOS, "cacos", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CACOSF, "cacosf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CACOSH, "cacosh", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CACOSHF, "cacoshf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CACOSHL, "cacoshl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CACOSL, "cacosl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CARG, "carg", BT_FN_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CARGF, "cargf", BT_FN_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CARGL, "cargl", BT_FN_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CASIN, "casin", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CASINF, "casinf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CASINH, "casinh", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CASINHF, "casinhf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CASINHL, "casinhl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CASINL, "casinl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CATAN, "catan", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CATANF, "catanf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CATANH, "catanh", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CATANHF, "catanhf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CATANHL, "catanhl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CATANL, "catanl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CCOS, "ccos", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CCOSF, "ccosf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CCOSH, "ccosh", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CCOSHF, "ccoshf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CCOSHL, "ccoshl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CCOSL, "ccosl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CEXP, "cexp", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CEXPF, "cexpf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CEXPL, "cexpl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CIMAG, "cimag", BT_FN_DOUBLE_COMPLEX_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_CIMAGF, "cimagf", BT_FN_FLOAT_COMPLEX_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_CIMAGL, "cimagl", BT_FN_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) /*DEF_C99_BUILTIN (BUILT_IN_CLOG, "clog", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING)*/ /*DEF_C99_BUILTIN (BUILT_IN_CLOGF, "clogf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING)*/ /*DEF_C99_BUILTIN (BUILT_IN_CLOGL, "clogl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)*/ DEF_C99_BUILTIN (BUILT_IN_CONJ, "conj", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_CONJF, "conjf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_CONJL, "conjl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_CPOW, "cpow", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CPOWF, "cpowf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CPOWL, "cpowl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CPROJ, "cproj", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CPROJF, "cprojf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CPROJL, "cprojl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CREAL, "creal", BT_FN_DOUBLE_COMPLEX_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_CREALF, "crealf", BT_FN_FLOAT_COMPLEX_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_CREALL, "creall", BT_FN_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_CSIN, "csin", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CSINF, "csinf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CSINH, "csinh", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CSINHF, "csinhf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CSINHL, "csinhl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CSINL, "csinl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CSQRT, "csqrt", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CSQRTF, "csqrtf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CSQRTL, "csqrtl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CTAN, "ctan", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CTANF, "ctanf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CTANH, "ctanh", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CTANHF, "ctanhf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CTANHL, "ctanhl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CTANL, "ctanl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) /* Category: string/memory builtins. */ /* bcmp, bcopy and bzero have traditionally accepted NULL pointers when the length parameter is zero, so don't apply attribute "nonnull". */ DEF_EXT_LIB_BUILTIN (BUILT_IN_BCMP, "bcmp", BT_FN_INT_CONST_PTR_CONST_PTR_SIZE, ATTR_PURE_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_BCOPY, "bcopy", BT_FN_VOID_CONST_PTR_PTR_SIZE, ATTR_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_BZERO, "bzero", BT_FN_VOID_PTR_SIZE, ATTR_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_FFS, "ffs", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_FFSL, "ffsl", BT_FN_INT_LONG, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_FFSLL, "ffsll", BT_FN_INT_LONGLONG, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_INDEX, "index", BT_FN_STRING_CONST_STRING_INT, ATTR_PURE_NOTHROW_NONNULL_1) DEF_LIB_BUILTIN (BUILT_IN_MEMCMP, "memcmp", BT_FN_INT_CONST_PTR_CONST_PTR_SIZE, ATTR_PURE_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_MEMCPY, "memcpy", BT_FN_PTR_PTR_CONST_PTR_SIZE, ATTR_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_MEMMOVE, "memmove", BT_FN_PTR_PTR_CONST_PTR_SIZE, ATTR_NOTHROW_NONNULL_1_2) DEF_EXT_LIB_BUILTIN (BUILT_IN_MEMPCPY, "mempcpy", BT_FN_PTR_PTR_CONST_PTR_SIZE, ATTR_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_MEMSET, "memset", BT_FN_PTR_PTR_INT_SIZE, ATTR_NOTHROW_NONNULL_1) DEF_EXT_LIB_BUILTIN (BUILT_IN_RINDEX, "rindex", BT_FN_STRING_CONST_STRING_INT, ATTR_PURE_NOTHROW_NONNULL_1) DEF_EXT_LIB_BUILTIN (BUILT_IN_STPCPY, "stpcpy", BT_FN_STRING_STRING_CONST_STRING, ATTR_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_STRCAT, "strcat", BT_FN_STRING_STRING_CONST_STRING, ATTR_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_STRCHR, "strchr", BT_FN_STRING_CONST_STRING_INT, ATTR_PURE_NOTHROW_NONNULL_1) DEF_LIB_BUILTIN (BUILT_IN_STRCMP, "strcmp", BT_FN_INT_CONST_STRING_CONST_STRING, ATTR_PURE_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_STRCPY, "strcpy", BT_FN_STRING_STRING_CONST_STRING, ATTR_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_STRCSPN, "strcspn", BT_FN_SIZE_CONST_STRING_CONST_STRING, ATTR_PURE_NOTHROW_NONNULL_1_2) DEF_EXT_LIB_BUILTIN (BUILT_IN_STRDUP, "strdup", BT_FN_STRING_CONST_STRING, ATTR_MALLOC_NOTHROW_NONNULL_1) DEF_LIB_BUILTIN (BUILT_IN_STRLEN, "strlen", BT_FN_SIZE_CONST_STRING, ATTR_PURE_NOTHROW_NONNULL_1) DEF_LIB_BUILTIN (BUILT_IN_STRNCAT, "strncat", BT_FN_STRING_STRING_CONST_STRING_SIZE, ATTR_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_STRNCMP, "strncmp", BT_FN_INT_CONST_STRING_CONST_STRING_SIZE, ATTR_PURE_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_STRNCPY, "strncpy", BT_FN_STRING_STRING_CONST_STRING_SIZE, ATTR_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_STRPBRK, "strpbrk", BT_FN_STRING_CONST_STRING_CONST_STRING, ATTR_PURE_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_STRRCHR, "strrchr", BT_FN_STRING_CONST_STRING_INT, ATTR_PURE_NOTHROW_NONNULL_1) DEF_LIB_BUILTIN (BUILT_IN_STRSPN, "strspn", BT_FN_SIZE_CONST_STRING_CONST_STRING, ATTR_PURE_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_STRSTR, "strstr", BT_FN_STRING_CONST_STRING_CONST_STRING, ATTR_PURE_NOTHROW_NONNULL_1_2) /* Category: stdio builtins. */ DEF_LIB_BUILTIN (BUILT_IN_FPRINTF, "fprintf", BT_FN_INT_FILEPTR_CONST_STRING_VAR, ATTR_FORMAT_PRINTF_2_3) DEF_EXT_LIB_BUILTIN (BUILT_IN_FPRINTF_UNLOCKED, "fprintf_unlocked", BT_FN_INT_FILEPTR_CONST_STRING_VAR, ATTR_FORMAT_PRINTF_2_3) DEF_LIB_BUILTIN (BUILT_IN_FPUTC, "fputc", BT_FN_INT_INT_FILEPTR, ATTR_NOTHROW_NONNULL_2) DEF_EXT_LIB_BUILTIN (BUILT_IN_FPUTC_UNLOCKED, "fputc_unlocked", BT_FN_INT_INT_FILEPTR, ATTR_NOTHROW_NONNULL_2) DEF_LIB_BUILTIN (BUILT_IN_FPUTS, "fputs", BT_FN_INT_CONST_STRING_FILEPTR, ATTR_NOTHROW_NONNULL_1_2) DEF_EXT_LIB_BUILTIN (BUILT_IN_FPUTS_UNLOCKED, "fputs_unlocked", BT_FN_INT_CONST_STRING_FILEPTR, ATTR_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_FSCANF, "fscanf", BT_FN_INT_FILEPTR_CONST_STRING_VAR, ATTR_FORMAT_SCANF_2_3) DEF_LIB_BUILTIN (BUILT_IN_FWRITE, "fwrite", BT_FN_SIZE_CONST_PTR_SIZE_SIZE_FILEPTR, ATTR_NOTHROW_NONNULL_1_4) DEF_EXT_LIB_BUILTIN (BUILT_IN_FWRITE_UNLOCKED, "fwrite_unlocked", BT_FN_SIZE_CONST_PTR_SIZE_SIZE_FILEPTR, ATTR_NOTHROW_NONNULL_1_4) DEF_LIB_BUILTIN (BUILT_IN_PRINTF, "printf", BT_FN_INT_CONST_STRING_VAR, ATTR_FORMAT_PRINTF_1_2) DEF_EXT_LIB_BUILTIN (BUILT_IN_PRINTF_UNLOCKED, "printf_unlocked", BT_FN_INT_CONST_STRING_VAR, ATTR_FORMAT_PRINTF_1_2) DEF_LIB_BUILTIN (BUILT_IN_PUTCHAR, "putchar", BT_FN_INT_INT, ATTR_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_PUTCHAR_UNLOCKED, "putchar_unlocked", BT_FN_INT_INT, ATTR_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_PUTS, "puts", BT_FN_INT_CONST_STRING, ATTR_NOTHROW_NONNULL_1) DEF_EXT_LIB_BUILTIN (BUILT_IN_PUTS_UNLOCKED, "puts_unlocked", BT_FN_INT_CONST_STRING, ATTR_NOTHROW_NONNULL_1) DEF_LIB_BUILTIN (BUILT_IN_SCANF, "scanf", BT_FN_INT_CONST_STRING_VAR, ATTR_FORMAT_SCANF_1_2) DEF_C99_BUILTIN (BUILT_IN_SNPRINTF, "snprintf", BT_FN_INT_STRING_SIZE_CONST_STRING_VAR, ATTR_FORMAT_PRINTF_3_4) DEF_LIB_BUILTIN (BUILT_IN_SPRINTF, "sprintf", BT_FN_INT_STRING_CONST_STRING_VAR, ATTR_FORMAT_PRINTF_2_3) DEF_LIB_BUILTIN (BUILT_IN_SSCANF, "sscanf", BT_FN_INT_CONST_STRING_CONST_STRING_VAR, ATTR_FORMAT_SCANF_2_3) DEF_LIB_BUILTIN (BUILT_IN_VFPRINTF, "vfprintf", BT_FN_INT_FILEPTR_CONST_STRING_VALIST_ARG, ATTR_FORMAT_PRINTF_2_0) DEF_C99_BUILTIN (BUILT_IN_VFSCANF, "vfscanf", BT_FN_INT_FILEPTR_CONST_STRING_VALIST_ARG, ATTR_FORMAT_SCANF_2_0) DEF_LIB_BUILTIN (BUILT_IN_VPRINTF, "vprintf", BT_FN_INT_CONST_STRING_VALIST_ARG, ATTR_FORMAT_PRINTF_1_0) DEF_C99_BUILTIN (BUILT_IN_VSCANF, "vscanf", BT_FN_INT_CONST_STRING_VALIST_ARG, ATTR_FORMAT_SCANF_1_0) DEF_C99_BUILTIN (BUILT_IN_VSNPRINTF, "vsnprintf", BT_FN_INT_STRING_SIZE_CONST_STRING_VALIST_ARG, ATTR_FORMAT_PRINTF_3_0) DEF_LIB_BUILTIN (BUILT_IN_VSPRINTF, "vsprintf", BT_FN_INT_STRING_CONST_STRING_VALIST_ARG, ATTR_FORMAT_PRINTF_2_0) DEF_C99_BUILTIN (BUILT_IN_VSSCANF, "vsscanf", BT_FN_INT_CONST_STRING_CONST_STRING_VALIST_ARG, ATTR_FORMAT_SCANF_2_0) /* Category: ctype builtins. */ DEF_LIB_BUILTIN (BUILT_IN_ISALNUM, "isalnum", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ISALPHA, "isalpha", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_ISASCII, "isascii", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_ISBLANK, "isblank", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ISCNTRL, "iscntrl", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ISDIGIT, "isdigit", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ISGRAPH, "isgraph", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ISLOWER, "islower", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ISPRINT, "isprint", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ISPUNCT, "ispunct", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ISSPACE, "isspace", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ISUPPER, "isupper", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ISXDIGIT, "isxdigit", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_TOASCII, "toascii", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_TOLOWER, "tolower", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_TOUPPER, "toupper", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) /* Category: wctype builtins. */ DEF_C94_BUILTIN (BUILT_IN_ISWALNUM, "iswalnum", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_ISWALPHA, "iswalpha", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_ISWBLANK, "iswblank", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_ISWCNTRL, "iswcntrl", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_ISWDIGIT, "iswdigit", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_ISWGRAPH, "iswgraph", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_ISWLOWER, "iswlower", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_ISWPRINT, "iswprint", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_ISWPUNCT, "iswpunct", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_ISWSPACE, "iswspace", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_ISWUPPER, "iswupper", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_ISWXDIGIT, "iswxdigit", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_TOWLOWER, "towlower", BT_FN_WINT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_TOWUPPER, "towupper", BT_FN_WINT_WINT, ATTR_PURE_NOTHROW_LIST) /* Category: miscellaneous builtins. */ DEF_LIB_BUILTIN (BUILT_IN_ABORT, "abort", BT_FN_VOID, ATTR_NORETURN_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ABS, "abs", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_AGGREGATE_INCOMING_ADDRESS, "aggregate_incoming_address", BT_FN_PTR_VAR, ATTR_NULL) DEF_EXT_LIB_BUILTIN (BUILT_IN_ALLOCA, "alloca", BT_FN_PTR_SIZE, ATTR_MALLOC_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_APPLY, "apply", BT_FN_PTR_PTR_FN_VOID_VAR_PTR_SIZE, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_APPLY_ARGS, "apply_args", BT_FN_PTR_VAR, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_ARGS_INFO, "args_info", BT_FN_INT_INT, ATTR_NULL) DEF_LIB_BUILTIN (BUILT_IN_CALLOC, "calloc", BT_FN_PTR_SIZE_SIZE, ATTR_MALLOC_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_CLASSIFY_TYPE, "classify_type", BT_FN_INT_VAR, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_CLZ, "clz", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_CLZL, "clzl", BT_FN_INT_LONG, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_CLZLL, "clzll", BT_FN_INT_LONGLONG, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_CONSTANT_P, "constant_p", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_CTZ, "ctz", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_CTZL, "ctzl", BT_FN_INT_LONG, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_CTZLL, "ctzll", BT_FN_INT_LONGLONG, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_DCGETTEXT, "dcgettext", BT_FN_STRING_CONST_STRING_CONST_STRING_INT, ATTR_FORMAT_ARG_2) DEF_EXT_LIB_BUILTIN (BUILT_IN_DGETTEXT, "dgettext", BT_FN_STRING_CONST_STRING_CONST_STRING, ATTR_FORMAT_ARG_2) DEF_GCC_BUILTIN (BUILT_IN_DWARF_CFA, "dwarf_cfa", BT_FN_PTR, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_DWARF_SP_COLUMN, "dwarf_sp_column", BT_FN_UNSIGNED, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_EH_RETURN, "eh_return", BT_FN_VOID_PTRMODE_PTR, ATTR_NORETURN_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_EH_RETURN_DATA_REGNO, "eh_return_data_regno", BT_FN_INT_INT, ATTR_NULL) DEF_EXT_LIB_BUILTIN (BUILT_IN_EXECL, "execl", BT_FN_INT_CONST_STRING_CONST_STRING_VAR, ATTR_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_EXECLP, "execlp", BT_FN_INT_CONST_STRING_CONST_STRING_VAR, ATTR_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_EXECLE, "execle", BT_FN_INT_CONST_STRING_CONST_STRING_VAR, ATTR_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_EXECV, "execv", BT_FN_INT_CONST_STRING_PTR_CONST_STRING, ATTR_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_EXECVP, "execvp", BT_FN_INT_CONST_STRING_PTR_CONST_STRING, ATTR_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_EXECVE, "execve", BT_FN_INT_CONST_STRING_PTR_CONST_STRING_PTR_CONST_STRING, ATTR_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_EXIT, "exit", BT_FN_VOID_INT, ATTR_NORETURN_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_EXPECT, "expect", BT_FN_LONG_LONG_LONG, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_EXTEND_POINTER, "extend_pointer", BT_FN_WORD_PTR, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_EXTRACT_RETURN_ADDR, "extract_return_addr", BT_FN_PTR_PTR, ATTR_NULL) DEF_EXT_LIB_BUILTIN (BUILT_IN_FORK, "fork", BT_FN_PID, ATTR_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_FRAME_ADDRESS, "frame_address", BT_FN_PTR_UNSIGNED, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_FROB_RETURN_ADDR, "frob_return_addr", BT_FN_PTR_PTR, ATTR_NULL) DEF_EXT_LIB_BUILTIN (BUILT_IN_GETTEXT, "gettext", BT_FN_STRING_CONST_STRING, ATTR_FORMAT_ARG_1) DEF_C99_BUILTIN (BUILT_IN_IMAXABS, "imaxabs", BT_FN_INTMAX_INTMAX, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_INIT_DWARF_REG_SIZES, "init_dwarf_reg_size_table", BT_FN_VOID_PTR, ATTR_NULL) DEF_EXT_LIB_BUILTIN (BUILT_IN_FINITE, "finite", BT_FN_INT_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_FINITEF, "finitef", BT_FN_INT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_FINITEL, "finitel", BT_FN_INT_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_C90RES_BUILTIN (BUILT_IN_ISINF, "isinf", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_ISINFF, "isinff", BT_FN_INT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_ISINFL, "isinfl", BT_FN_INT_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_C90RES_BUILTIN (BUILT_IN_ISNAN, "isnan", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_ISNANF, "isnanf", BT_FN_INT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_ISNANL, "isnanl", BT_FN_INT_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_ISGREATER, "isgreater", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_ISGREATEREQUAL, "isgreaterequal", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_ISLESS, "isless", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_ISLESSEQUAL, "islessequal", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_ISLESSGREATER, "islessgreater", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_ISUNORDERED, "isunordered", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_LABS, "labs", BT_FN_LONG_LONG, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_LLABS, "llabs", BT_FN_LONGLONG_LONGLONG, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_LONGJMP, "longjmp", BT_FN_VOID_PTR_INT, ATTR_NORETURN_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_MALLOC, "malloc", BT_FN_PTR_SIZE, ATTR_MALLOC_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_NEXT_ARG, "next_arg", BT_FN_PTR_VAR, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_PARITY, "parity", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_PARITYL, "parityl", BT_FN_INT_LONG, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_PARITYLL, "parityll", BT_FN_INT_LONGLONG, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_POPCOUNT, "popcount", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_POPCOUNTL, "popcountl", BT_FN_INT_LONG, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_POPCOUNTLL, "popcountll", BT_FN_INT_LONGLONG, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_PREFETCH, "prefetch", BT_FN_VOID_CONST_PTR_VAR, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_RETURN, "return", BT_FN_VOID_PTR, ATTR_NORETURN_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_RETURN_ADDRESS, "return_address", BT_FN_PTR_UNSIGNED, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_SAVEREGS, "saveregs", BT_FN_PTR_VAR, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_SETJMP, "setjmp", BT_FN_INT_PTR, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_STACK_ALLOC, "stack_alloc", BT_FN_VOID_PTR_SIZE, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_STACK_SAVE, "stack_save", BT_FN_PTR, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_STACK_RESTORE, "stack_restore", BT_FN_VOID_PTR, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_STDARG_START, "stdarg_start", BT_FN_VOID_VALIST_REF_VAR, ATTR_NULL) DEF_EXT_LIB_BUILTIN (BUILT_IN_STRFMON, "strfmon", BT_FN_SSIZE_STRING_SIZE_CONST_STRING_VAR, ATTR_FORMAT_STRFMON_3_4) DEF_LIB_BUILTIN (BUILT_IN_STRFTIME, "strftime", BT_FN_SIZE_STRING_SIZE_CONST_STRING_CONST_PTR, ATTR_FORMAT_STRFTIME_3_0) DEF_GCC_BUILTIN (BUILT_IN_TRAP, "trap", BT_FN_VOID, ATTR_NORETURN_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_UNWIND_INIT, "unwind_init", BT_FN_VOID, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_UPDATE_SETJMP_BUF, "update_setjmp_buf", BT_FN_VOID_PTR_INT, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_VA_COPY, "va_copy", BT_FN_VOID_VALIST_REF_VALIST_ARG, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_VA_END, "va_end", BT_FN_VOID_VALIST_REF, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_VA_START, "va_start", BT_FN_VOID_VALIST_REF_VAR, ATTR_NULL) DEF_EXT_LIB_BUILTIN (BUILT_IN__EXIT, "_exit", BT_FN_VOID_INT, ATTR_NORETURN_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN__EXIT2, "_Exit", BT_FN_VOID_INT, ATTR_NORETURN_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_INIT_TRAMPOLINE, "init_trampoline", BT_FN_VOID_PTR_PTR_PTR, ATTR_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_ADJUST_TRAMPOLINE, "adjust_trampoline", BT_FN_PTR_PTR, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_NONLOCAL_GOTO, "nonlocal_goto", BT_FN_PTR_PTR, ATTR_NORETURN_NOTHROW_LIST) /* Profiling hooks. */ DEF_GCC_BUILTIN (BUILT_IN_PROFILE_FUNC_ENTER, "profile_func_enter", BT_FN_VOID, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_PROFILE_FUNC_EXIT, "profile_func_exit", BT_FN_VOID, ATTR_NULL) /* Upper bound on non-language-specific builtins. */ END_BUILTINS }; #undef DEF_BUILTIN /* Names for the above. */ extern const char *const built_in_names[(int) END_BUILTINS]; /* Helper macros for math builtins. */ #define BUILTIN_EXP10_P(FN) \ ((FN) == BUILT_IN_EXP10 || (FN) == BUILT_IN_EXP10F || (FN) == BUILT_IN_EXP10L \ || (FN) == BUILT_IN_POW10 || (FN) == BUILT_IN_POW10F || (FN) == BUILT_IN_POW10L) #define BUILTIN_EXPONENT_P(FN) (BUILTIN_EXP10_P (FN) \ || (FN) == BUILT_IN_EXP || (FN) == BUILT_IN_EXPF || (FN) == BUILT_IN_EXPL \ || (FN) == BUILT_IN_EXP2 || (FN) == BUILT_IN_EXP2F || (FN) == BUILT_IN_EXP2L) #define BUILTIN_SQRT_P(FN) \ ((FN) == BUILT_IN_SQRT || (FN) == BUILT_IN_SQRTF || (FN) == BUILT_IN_SQRTL) #define BUILTIN_CBRT_P(FN) \ ((FN) == BUILT_IN_CBRT || (FN) == BUILT_IN_CBRTF || (FN) == BUILT_IN_CBRTL) #define BUILTIN_ROOT_P(FN) (BUILTIN_SQRT_P (FN) || BUILTIN_CBRT_P (FN)) /* An array of _DECL trees for the above. */ extern GTY(()) tree built_in_decls[(int) END_BUILTINS]; extern GTY(()) tree implicit_built_in_decls[(int) END_BUILTINS]; /* The definition of tree nodes fills the next several pages. */ /* A tree node can represent a data type, a variable, an expression or a statement. Each node has a TREE_CODE which says what kind of thing it represents. Some common codes are: INTEGER_TYPE -- represents a type of integers. ARRAY_TYPE -- represents a type of pointer. VAR_DECL -- represents a declared variable. INTEGER_CST -- represents a constant integer value. PLUS_EXPR -- represents a sum (an expression). As for the contents of a tree node: there are some fields that all nodes share. Each TREE_CODE has various special-purpose fields as well. The fields of a node are never accessed directly, always through accessor macros. */ /* Every kind of tree node starts with this structure, so all nodes have these fields. See the accessor macros, defined below, for documentation of the fields. */ union tree_ann_d; struct tree_common GTY(()) { tree chain; tree type; union tree_ann_d *ann; ENUM_BITFIELD(tree_code) code : 8; unsigned side_effects_flag : 1; unsigned constant_flag : 1; unsigned addressable_flag : 1; unsigned volatile_flag : 1; unsigned readonly_flag : 1; unsigned unsigned_flag : 1; unsigned asm_written_flag: 1; unsigned nowarning_flag : 1; unsigned used_flag : 1; unsigned nothrow_flag : 1; unsigned static_flag : 1; unsigned public_flag : 1; unsigned private_flag : 1; unsigned protected_flag : 1; unsigned deprecated_flag : 1; unsigned invariant_flag : 1; unsigned lang_flag_0 : 1; unsigned lang_flag_1 : 1; unsigned lang_flag_2 : 1; unsigned lang_flag_3 : 1; unsigned lang_flag_4 : 1; unsigned lang_flag_5 : 1; unsigned lang_flag_6 : 1; unsigned visited : 1; }; /* The following table lists the uses of each of the above flags and for which types of nodes they are defined. Note that expressions include decls. addressable_flag: TREE_ADDRESSABLE in VAR_DECL, FUNCTION_DECL, FIELD_DECL, CONSTRUCTOR, LABEL_DECL, ..._TYPE, IDENTIFIER_NODE. In a STMT_EXPR, it means we want the result of the enclosed expression. CALL_EXPR_TAILCALL in CALL_EXPR static_flag: TREE_STATIC in VAR_DECL, FUNCTION_DECL, CONSTRUCTOR, ADDR_EXPR TREE_VIA_VIRTUAL in TREE_BINFO TREE_CONSTANT_OVERFLOW in INTEGER_CST, REAL_CST, COMPLEX_CST, VECTOR_CST TREE_SYMBOL_REFERENCED in IDENTIFIER_NODE CLEANUP_EH_ONLY in TARGET_EXPR, WITH_CLEANUP_EXPR ASM_INPUT_P in ASM_EXPR EH_FILTER_MUST_NOT_THROW in EH_FILTER_EXPR TYPE_REF_CAN_ALIAS_ALL in POINTER_TYPE, REFERENCE_TYPE public_flag: TREE_OVERFLOW in INTEGER_CST, REAL_CST, COMPLEX_CST, VECTOR_CST ??? and other expressions? TREE_PUBLIC in VAR_DECL or FUNCTION_DECL or IDENTIFIER_NODE TREE_VIA_PUBLIC in TREE_LIST or TREE_VEC ASM_VOLATILE_P in ASM_EXPR private_flag: TREE_PRIVATE in ..._DECL CALL_EXPR_HAS_RETURN_SLOT_ADDR in CALL_EXPR protected_flag: TREE_PROTECTED in BLOCK ..._DECL CALL_FROM_THUNK_P in CALL_EXPR side_effects_flag: TREE_SIDE_EFFECTS in all expressions all decls all constants FORCED_LABEL in LABEL_DECL volatile_flag: TREE_THIS_VOLATILE in all expressions TYPE_VOLATILE in ..._TYPE readonly_flag: TREE_READONLY in all expressions TYPE_READONLY in ..._TYPE constant_flag: TREE_CONSTANT in all expressions all decls all constants unsigned_flag: TYPE_UNSIGNED in all types DECL_UNSIGNED in all decls BIT_FIELD_REF_UNSIGNED in BIT_FIELD_REF asm_written_flag: TREE_ASM_WRITTEN in VAR_DECL, FUNCTION_DECL, RECORD_TYPE, UNION_TYPE, QUAL_UNION_TYPE BLOCK, SSA_NAME used_flag: TREE_USED in expressions, IDENTIFIER_NODE nothrow_flag: TREE_NOTHROW in CALL_EXPR, FUNCTION_DECL TYPE_ALIGN_OK in ..._TYPE TREE_THIS_NOTRAP in INDIRECT_REF deprecated_flag: TREE_DEPRECATED in ..._DECL visited: Used in tree traversals to mark visited nodes. invariant_flag: TREE_INVARIANT in all expressions. nowarning_flag: TREE_NO_WARNING in ... any expr node */ /* Define accessors for the fields that all tree nodes have (though some fields are not used for all kinds of nodes). */ /* The tree-code says what kind of node it is. Codes are defined in tree.def. */ #define TREE_CODE(NODE) ((enum tree_code) (NODE)->common.code) #define TREE_SET_CODE(NODE, VALUE) ((NODE)->common.code = (VALUE)) /* When checking is enabled, errors will be generated if a tree node is accessed incorrectly. The macros abort with a fatal error. */ #if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007) #define TREE_CHECK(T, CODE) __extension__ \ ({ const tree __t = (T); \ if (TREE_CODE (__t) != (CODE)) \ tree_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, \ (CODE), 0); \ __t; }) #define TREE_NOT_CHECK(T, CODE) __extension__ \ ({ const tree __t = (T); \ if (TREE_CODE (__t) == (CODE)) \ tree_not_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, \ (CODE), 0); \ __t; }) #define TREE_CHECK2(T, CODE1, CODE2) __extension__ \ ({ const tree __t = (T); \ if (TREE_CODE (__t) != (CODE1) \ && TREE_CODE (__t) != (CODE2)) \ tree_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, \ (CODE1), (CODE2), 0); \ __t; }) #define TREE_NOT_CHECK2(T, CODE1, CODE2) __extension__ \ ({ const tree __t = (T); \ if (TREE_CODE (__t) == (CODE1) \ || TREE_CODE (__t) == (CODE2)) \ tree_not_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, \ (CODE1), (CODE2), 0); \ __t; }) #define TREE_CHECK3(T, CODE1, CODE2, CODE3) __extension__ \ ({ const tree __t = (T); \ if (TREE_CODE (__t) != (CODE1) \ && TREE_CODE (__t) != (CODE2) \ && TREE_CODE (__t) != (CODE3)) \ tree_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, \ (CODE1), (CODE2), (CODE3), 0); \ __t; }) #define TREE_NOT_CHECK3(T, CODE1, CODE2, CODE3) __extension__ \ ({ const tree __t = (T); \ if (TREE_CODE (__t) == (CODE1) \ || TREE_CODE (__t) == (CODE2) \ || TREE_CODE (__t) == (CODE3)) \ tree_not_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, \ (CODE1), (CODE2), (CODE3), 0); \ __t; }) #define TREE_CHECK4(T, CODE1, CODE2, CODE3, CODE4) __extension__ \ ({ const tree __t = (T); \ if (TREE_CODE (__t) != (CODE1) \ && TREE_CODE (__t) != (CODE2) \ && TREE_CODE (__t) != (CODE3) \ && TREE_CODE (__t) != (CODE4)) \ tree_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, \ (CODE1), (CODE2), (CODE3), (CODE4), 0); \ __t; }) #define NON_TREE_CHECK4(T, CODE1, CODE2, CODE3, CODE4) __extension__ \ ({ const tree __t = (T); \ if (TREE_CODE (__t) == (CODE1) \ || TREE_CODE (__t) == (CODE2) \ || TREE_CODE (__t) == (CODE3) \ || TREE_CODE (__t) == (CODE4)) \ tree_not_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, \ (CODE1), (CODE2), (CODE3), (CODE4), 0); \ __t; }) #define TREE_CHECK5(T, CODE1, CODE2, CODE3, CODE4, CODE5) __extension__ \ ({ const tree __t = (T); \ if (TREE_CODE (__t) != (CODE1) \ && TREE_CODE (__t) != (CODE2) \ && TREE_CODE (__t) != (CODE3) \ && TREE_CODE (__t) != (CODE4) \ && TREE_CODE (__t) != (CODE5)) \ tree_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, \ (CODE1), (CODE2), (CODE3), (CODE4), (CODE5), 0);\ __t; }) #define TREE_NOT_CHECK5(T, CODE1, CODE2, CODE3, CODE4, CODE5) __extension__ \ ({ const tree __t = (T); \ if (TREE_CODE (__t) == (CODE1) \ || TREE_CODE (__t) == (CODE2) \ || TREE_CODE (__t) == (CODE3) \ || TREE_CODE (__t) == (CODE4) \ || TREE_CODE (__t) == (CODE5)) \ tree_not_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, \ (CODE1), (CODE2), (CODE3), (CODE4), (CODE5), 0);\ __t; }) #define TREE_CLASS_CHECK(T, CLASS) __extension__ \ ({ const tree __t = (T); \ if (TREE_CODE_CLASS (TREE_CODE(__t)) != (CLASS)) \ tree_class_check_failed (__t, (CLASS), __FILE__, __LINE__, \ __FUNCTION__); \ __t; }) /* These checks have to be special cased. */ #define EXPR_CHECK(T) __extension__ \ ({ const tree __t = (T); \ char const __c = TREE_CODE_CLASS (TREE_CODE (__t)); \ if (!IS_EXPR_CODE_CLASS (__c)) \ tree_class_check_failed (__t, 'E', __FILE__, __LINE__, \ __FUNCTION__); \ __t; }) /* These checks have to be special cased. */ #define NON_TYPE_CHECK(T) __extension__ \ ({ const tree __t = (T); \ char const __c = TREE_CODE_CLASS (TREE_CODE (__t)); \ if (!IS_NON_TYPE_CODE_CLASS (__c)) \ tree_class_check_failed (__t, 'T', __FILE__, __LINE__, \ __FUNCTION__); \ __t; }) #define TREE_VEC_ELT_CHECK(T, I) __extension__ \ (*({const tree __t = (T); \ const int __i = (I); \ if (TREE_CODE (__t) != TREE_VEC) \ tree_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, \ TREE_VEC, 0); \ if (__i < 0 || __i >= __t->vec.length) \ tree_vec_elt_check_failed (__i, __t->vec.length, \ __FILE__, __LINE__, __FUNCTION__); \ &__t->vec.a[__i]; })) #define PHI_NODE_ELT_CHECK(t, i) __extension__ \ (*({const tree __t = t; \ const int __i = (i); \ if (TREE_CODE (__t) != PHI_NODE) \ tree_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, \ PHI_NODE, 0); \ if (__i < 0 || __i >= __t->phi.capacity) \ phi_node_elt_check_failed (__i, __t->phi.num_args, \ __FILE__, __LINE__, __FUNCTION__); \ &__t->phi.a[__i]; })) /* Special checks for TREE_OPERANDs. */ #define TREE_OPERAND_CHECK(T, I) __extension__ \ (*({const tree __t = EXPR_CHECK (T); \ const int __i = (I); \ if (__i < 0 || __i >= TREE_CODE_LENGTH (TREE_CODE (__t))) \ tree_operand_check_failed (__i, TREE_CODE (__t), \ __FILE__, __LINE__, __FUNCTION__); \ &__t->exp.operands[__i]; })) #define TREE_OPERAND_CHECK_CODE(T, CODE, I) __extension__ \ (*({const tree __t = (T); \ const int __i = (I); \ if (TREE_CODE (__t) != CODE) \ tree_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, (CODE), 0);\ if (__i < 0 || __i >= TREE_CODE_LENGTH (CODE)) \ tree_operand_check_failed (__i, (CODE), \ __FILE__, __LINE__, __FUNCTION__); \ &__t->exp.operands[__i]; })) #define TREE_RTL_OPERAND_CHECK(T, CODE, I) __extension__ \ (*(rtx *) \ ({const tree __t = (T); \ const int __i = (I); \ if (TREE_CODE (__t) != (CODE)) \ tree_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, (CODE), 0); \ if (__i < 0 || __i >= TREE_CODE_LENGTH ((CODE))) \ tree_operand_check_failed (__i, (CODE), \ __FILE__, __LINE__, __FUNCTION__); \ &__t->exp.operands[__i]; })) extern void tree_check_failed (const tree, const char *, int, const char *, ...) ATTRIBUTE_NORETURN; extern void tree_not_check_failed (const tree, const char *, int, const char *, ...) ATTRIBUTE_NORETURN; extern void tree_class_check_failed (const tree, int, const char *, int, const char *) ATTRIBUTE_NORETURN; extern void tree_vec_elt_check_failed (int, int, const char *, int, const char *) ATTRIBUTE_NORETURN; extern void phi_node_elt_check_failed (int, int, const char *, int, const char *) ATTRIBUTE_NORETURN; extern void tree_operand_check_failed (int, enum tree_code, const char *, int, const char *) ATTRIBUTE_NORETURN; #else /* not ENABLE_TREE_CHECKING, or not gcc */ #define TREE_CHECK(T, CODE) (T) #define TREE_NOT_CHECK(T, CODE) (T) #define TREE_CHECK2(T, CODE1, CODE2) (T) #define TREE_NOT_CHECK2(T, CODE1, CODE2) (T) #define TREE_CHECK3(T, CODE1, CODE2, CODE3) (T) #define TREE_NOT_CHECK3(T, CODE1, CODE2, CODE3) (T) #define TREE_CHECK4(T, CODE1, CODE2, CODE3, CODE4) (T) #define TREE_NOT_CHECK4(T, CODE1, CODE2, CODE3, CODE4) (T) #define TREE_CHECK5(T, CODE1, CODE2, CODE3, CODE4, CODE5) (T) #define TREE_NOT_CHECK5(T, CODE1, CODE2, CODE3, CODE4, CODE5) (T) #define TREE_CLASS_CHECK(T, CODE) (T) #define EXPR_CHECK(T) (T) #define NON_TYPE_CHECK(T) (T) #define TREE_VEC_ELT_CHECK(T, I) ((T)->vec.a[I]) #define TREE_OPERAND_CHECK(T, I) ((T)->exp.operands[I]) #define TREE_OPERAND_CHECK_CODE(T, CODE, I) ((T)->exp.operands[I]) #define TREE_RTL_OPERAND_CHECK(T, CODE, I) (*(rtx *) &((T)->exp.operands[I])) #define PHI_NODE_ELT_CHECK(T, i) ((T)->phi.a[i]) #endif #define TREE_BLOCK(NODE) ((NODE)->exp.block) /* This file is generated using gencheck. Do not edit. */ #ifndef GCC_TREE_CHECK_H #define GCC_TREE_CHECK_H #define ERROR_MARK_CHECK(t) TREE_CHECK (t, ERROR_MARK) #define IDENTIFIER_NODE_CHECK(t) TREE_CHECK (t, IDENTIFIER_NODE) #define TREE_LIST_CHECK(t) TREE_CHECK (t, TREE_LIST) #define TREE_VEC_CHECK(t) TREE_CHECK (t, TREE_VEC) #define BLOCK_CHECK(t) TREE_CHECK (t, BLOCK) #define VOID_TYPE_CHECK(t) TREE_CHECK (t, VOID_TYPE) #define INTEGER_TYPE_CHECK(t) TREE_CHECK (t, INTEGER_TYPE) #define REAL_TYPE_CHECK(t) TREE_CHECK (t, REAL_TYPE) #define COMPLEX_TYPE_CHECK(t) TREE_CHECK (t, COMPLEX_TYPE) #define VECTOR_TYPE_CHECK(t) TREE_CHECK (t, VECTOR_TYPE) #define ENUMERAL_TYPE_CHECK(t) TREE_CHECK (t, ENUMERAL_TYPE) #define BOOLEAN_TYPE_CHECK(t) TREE_CHECK (t, BOOLEAN_TYPE) #define CHAR_TYPE_CHECK(t) TREE_CHECK (t, CHAR_TYPE) #define POINTER_TYPE_CHECK(t) TREE_CHECK (t, POINTER_TYPE) #define OFFSET_TYPE_CHECK(t) TREE_CHECK (t, OFFSET_TYPE) #define REFERENCE_TYPE_CHECK(t) TREE_CHECK (t, REFERENCE_TYPE) #define METHOD_TYPE_CHECK(t) TREE_CHECK (t, METHOD_TYPE) #define FILE_TYPE_CHECK(t) TREE_CHECK (t, FILE_TYPE) #define ARRAY_TYPE_CHECK(t) TREE_CHECK (t, ARRAY_TYPE) #define SET_TYPE_CHECK(t) TREE_CHECK (t, SET_TYPE) #define RECORD_TYPE_CHECK(t) TREE_CHECK (t, RECORD_TYPE) #define UNION_TYPE_CHECK(t) TREE_CHECK (t, UNION_TYPE) #define QUAL_UNION_TYPE_CHECK(t) TREE_CHECK (t, QUAL_UNION_TYPE) #define FUNCTION_TYPE_CHECK(t) TREE_CHECK (t, FUNCTION_TYPE) #define LANG_TYPE_CHECK(t) TREE_CHECK (t, LANG_TYPE) #define INTEGER_CST_CHECK(t) TREE_CHECK (t, INTEGER_CST) #define REAL_CST_CHECK(t) TREE_CHECK (t, REAL_CST) #define COMPLEX_CST_CHECK(t) TREE_CHECK (t, COMPLEX_CST) #define VECTOR_CST_CHECK(t) TREE_CHECK (t, VECTOR_CST) #define STRING_CST_CHECK(t) TREE_CHECK (t, STRING_CST) #define FUNCTION_DECL_CHECK(t) TREE_CHECK (t, FUNCTION_DECL) #define LABEL_DECL_CHECK(t) TREE_CHECK (t, LABEL_DECL) #define CONST_DECL_CHECK(t) TREE_CHECK (t, CONST_DECL) #define TYPE_DECL_CHECK(t) TREE_CHECK (t, TYPE_DECL) #define VAR_DECL_CHECK(t) TREE_CHECK (t, VAR_DECL) #define PARM_DECL_CHECK(t) TREE_CHECK (t, PARM_DECL) #define RESULT_DECL_CHECK(t) TREE_CHECK (t, RESULT_DECL) #define FIELD_DECL_CHECK(t) TREE_CHECK (t, FIELD_DECL) #define NAMESPACE_DECL_CHECK(t) TREE_CHECK (t, NAMESPACE_DECL) #define TRANSLATION_UNIT_DECL_CHECK(t) TREE_CHECK (t, TRANSLATION_UNIT_DECL) #define COMPONENT_REF_CHECK(t) TREE_CHECK (t, COMPONENT_REF) #define BIT_FIELD_REF_CHECK(t) TREE_CHECK (t, BIT_FIELD_REF) #define INDIRECT_REF_CHECK(t) TREE_CHECK (t, INDIRECT_REF) #define BUFFER_REF_CHECK(t) TREE_CHECK (t, BUFFER_REF) #define ARRAY_REF_CHECK(t) TREE_CHECK (t, ARRAY_REF) #define ARRAY_RANGE_REF_CHECK(t) TREE_CHECK (t, ARRAY_RANGE_REF) #define OBJ_TYPE_REF_CHECK(t) TREE_CHECK (t, OBJ_TYPE_REF) #define EXC_PTR_EXPR_CHECK(t) TREE_CHECK (t, EXC_PTR_EXPR) #define FILTER_EXPR_CHECK(t) TREE_CHECK (t, FILTER_EXPR) #define CONSTRUCTOR_CHECK(t) TREE_CHECK (t, CONSTRUCTOR) #define COMPOUND_EXPR_CHECK(t) TREE_CHECK (t, COMPOUND_EXPR) #define MODIFY_EXPR_CHECK(t) TREE_CHECK (t, MODIFY_EXPR) #define INIT_EXPR_CHECK(t) TREE_CHECK (t, INIT_EXPR) #define TARGET_EXPR_CHECK(t) TREE_CHECK (t, TARGET_EXPR) #define COND_EXPR_CHECK(t) TREE_CHECK (t, COND_EXPR) #define BIND_EXPR_CHECK(t) TREE_CHECK (t, BIND_EXPR) #define LABELED_BLOCK_EXPR_CHECK(t) TREE_CHECK (t, LABELED_BLOCK_EXPR) #define CALL_EXPR_CHECK(t) TREE_CHECK (t, CALL_EXPR) #define WITH_CLEANUP_EXPR_CHECK(t) TREE_CHECK (t, WITH_CLEANUP_EXPR) #define CLEANUP_POINT_EXPR_CHECK(t) TREE_CHECK (t, CLEANUP_POINT_EXPR) #define PLACEHOLDER_EXPR_CHECK(t) TREE_CHECK (t, PLACEHOLDER_EXPR) #define PLUS_EXPR_CHECK(t) TREE_CHECK (t, PLUS_EXPR) #define MINUS_EXPR_CHECK(t) TREE_CHECK (t, MINUS_EXPR) #define MULT_EXPR_CHECK(t) TREE_CHECK (t, MULT_EXPR) #define TRUNC_DIV_EXPR_CHECK(t) TREE_CHECK (t, TRUNC_DIV_EXPR) #define CEIL_DIV_EXPR_CHECK(t) TREE_CHECK (t, CEIL_DIV_EXPR) #define FLOOR_DIV_EXPR_CHECK(t) TREE_CHECK (t, FLOOR_DIV_EXPR) #define ROUND_DIV_EXPR_CHECK(t) TREE_CHECK (t, ROUND_DIV_EXPR) #define TRUNC_MOD_EXPR_CHECK(t) TREE_CHECK (t, TRUNC_MOD_EXPR) #define CEIL_MOD_EXPR_CHECK(t) TREE_CHECK (t, CEIL_MOD_EXPR) #define FLOOR_MOD_EXPR_CHECK(t) TREE_CHECK (t, FLOOR_MOD_EXPR) #define ROUND_MOD_EXPR_CHECK(t) TREE_CHECK (t, ROUND_MOD_EXPR) #define RDIV_EXPR_CHECK(t) TREE_CHECK (t, RDIV_EXPR) #define EXACT_DIV_EXPR_CHECK(t) TREE_CHECK (t, EXACT_DIV_EXPR) #define FIX_TRUNC_EXPR_CHECK(t) TREE_CHECK (t, FIX_TRUNC_EXPR) #define FIX_CEIL_EXPR_CHECK(t) TREE_CHECK (t, FIX_CEIL_EXPR) #define FIX_FLOOR_EXPR_CHECK(t) TREE_CHECK (t, FIX_FLOOR_EXPR) #define FIX_ROUND_EXPR_CHECK(t) TREE_CHECK (t, FIX_ROUND_EXPR) #define FLOAT_EXPR_CHECK(t) TREE_CHECK (t, FLOAT_EXPR) #define NEGATE_EXPR_CHECK(t) TREE_CHECK (t, NEGATE_EXPR) #define MIN_EXPR_CHECK(t) TREE_CHECK (t, MIN_EXPR) #define MAX_EXPR_CHECK(t) TREE_CHECK (t, MAX_EXPR) #define ABS_EXPR_CHECK(t) TREE_CHECK (t, ABS_EXPR) #define LSHIFT_EXPR_CHECK(t) TREE_CHECK (t, LSHIFT_EXPR) #define RSHIFT_EXPR_CHECK(t) TREE_CHECK (t, RSHIFT_EXPR) #define LROTATE_EXPR_CHECK(t) TREE_CHECK (t, LROTATE_EXPR) #define RROTATE_EXPR_CHECK(t) TREE_CHECK (t, RROTATE_EXPR) #define BIT_IOR_EXPR_CHECK(t) TREE_CHECK (t, BIT_IOR_EXPR) #define BIT_XOR_EXPR_CHECK(t) TREE_CHECK (t, BIT_XOR_EXPR) #define BIT_AND_EXPR_CHECK(t) TREE_CHECK (t, BIT_AND_EXPR) #define BIT_NOT_EXPR_CHECK(t) TREE_CHECK (t, BIT_NOT_EXPR) #define TRUTH_ANDIF_EXPR_CHECK(t) TREE_CHECK (t, TRUTH_ANDIF_EXPR) #define TRUTH_ORIF_EXPR_CHECK(t) TREE_CHECK (t, TRUTH_ORIF_EXPR) #define TRUTH_AND_EXPR_CHECK(t) TREE_CHECK (t, TRUTH_AND_EXPR) #define TRUTH_OR_EXPR_CHECK(t) TREE_CHECK (t, TRUTH_OR_EXPR) #define TRUTH_XOR_EXPR_CHECK(t) TREE_CHECK (t, TRUTH_XOR_EXPR) #define TRUTH_NOT_EXPR_CHECK(t) TREE_CHECK (t, TRUTH_NOT_EXPR) #define LT_EXPR_CHECK(t) TREE_CHECK (t, LT_EXPR) #define LE_EXPR_CHECK(t) TREE_CHECK (t, LE_EXPR) #define GT_EXPR_CHECK(t) TREE_CHECK (t, GT_EXPR) #define GE_EXPR_CHECK(t) TREE_CHECK (t, GE_EXPR) #define EQ_EXPR_CHECK(t) TREE_CHECK (t, EQ_EXPR) #define NE_EXPR_CHECK(t) TREE_CHECK (t, NE_EXPR) #define UNORDERED_EXPR_CHECK(t) TREE_CHECK (t, UNORDERED_EXPR) #define ORDERED_EXPR_CHECK(t) TREE_CHECK (t, ORDERED_EXPR) #define UNLT_EXPR_CHECK(t) TREE_CHECK (t, UNLT_EXPR) #define UNLE_EXPR_CHECK(t) TREE_CHECK (t, UNLE_EXPR) #define UNGT_EXPR_CHECK(t) TREE_CHECK (t, UNGT_EXPR) #define UNGE_EXPR_CHECK(t) TREE_CHECK (t, UNGE_EXPR) #define UNEQ_EXPR_CHECK(t) TREE_CHECK (t, UNEQ_EXPR) #define LTGT_EXPR_CHECK(t) TREE_CHECK (t, LTGT_EXPR) #define IN_EXPR_CHECK(t) TREE_CHECK (t, IN_EXPR) #define SET_LE_EXPR_CHECK(t) TREE_CHECK (t, SET_LE_EXPR) #define CARD_EXPR_CHECK(t) TREE_CHECK (t, CARD_EXPR) #define RANGE_EXPR_CHECK(t) TREE_CHECK (t, RANGE_EXPR) #define CONVERT_EXPR_CHECK(t) TREE_CHECK (t, CONVERT_EXPR) #define NOP_EXPR_CHECK(t) TREE_CHECK (t, NOP_EXPR) #define NON_LVALUE_EXPR_CHECK(t) TREE_CHECK (t, NON_LVALUE_EXPR) #define VIEW_CONVERT_EXPR_CHECK(t) TREE_CHECK (t, VIEW_CONVERT_EXPR) #define SAVE_EXPR_CHECK(t) TREE_CHECK (t, SAVE_EXPR) #define UNSAVE_EXPR_CHECK(t) TREE_CHECK (t, UNSAVE_EXPR) #define ADDR_EXPR_CHECK(t) TREE_CHECK (t, ADDR_EXPR) #define REFERENCE_EXPR_CHECK(t) TREE_CHECK (t, REFERENCE_EXPR) #define ENTRY_VALUE_EXPR_CHECK(t) TREE_CHECK (t, ENTRY_VALUE_EXPR) #define FDESC_EXPR_CHECK(t) TREE_CHECK (t, FDESC_EXPR) #define COMPLEX_EXPR_CHECK(t) TREE_CHECK (t, COMPLEX_EXPR) #define CONJ_EXPR_CHECK(t) TREE_CHECK (t, CONJ_EXPR) #define REALPART_EXPR_CHECK(t) TREE_CHECK (t, REALPART_EXPR) #define IMAGPART_EXPR_CHECK(t) TREE_CHECK (t, IMAGPART_EXPR) #define PREDECREMENT_EXPR_CHECK(t) TREE_CHECK (t, PREDECREMENT_EXPR) #define PREINCREMENT_EXPR_CHECK(t) TREE_CHECK (t, PREINCREMENT_EXPR) #define POSTDECREMENT_EXPR_CHECK(t) TREE_CHECK (t, POSTDECREMENT_EXPR) #define POSTINCREMENT_EXPR_CHECK(t) TREE_CHECK (t, POSTINCREMENT_EXPR) #define VA_ARG_EXPR_CHECK(t) TREE_CHECK (t, VA_ARG_EXPR) #define TRY_CATCH_EXPR_CHECK(t) TREE_CHECK (t, TRY_CATCH_EXPR) #define TRY_FINALLY_EXPR_CHECK(t) TREE_CHECK (t, TRY_FINALLY_EXPR) #define DECL_EXPR_CHECK(t) TREE_CHECK (t, DECL_EXPR) #define LABEL_EXPR_CHECK(t) TREE_CHECK (t, LABEL_EXPR) #define GOTO_EXPR_CHECK(t) TREE_CHECK (t, GOTO_EXPR) #define GOTO_SUBROUTINE_EXPR_CHECK(t) TREE_CHECK (t, GOTO_SUBROUTINE_EXPR) #define RETURN_EXPR_CHECK(t) TREE_CHECK (t, RETURN_EXPR) #define EXIT_EXPR_CHECK(t) TREE_CHECK (t, EXIT_EXPR) #define LOOP_EXPR_CHECK(t) TREE_CHECK (t, LOOP_EXPR) #define EXIT_BLOCK_EXPR_CHECK(t) TREE_CHECK (t, EXIT_BLOCK_EXPR) #define SWITCH_EXPR_CHECK(t) TREE_CHECK (t, SWITCH_EXPR) #define CASE_LABEL_EXPR_CHECK(t) TREE_CHECK (t, CASE_LABEL_EXPR) #define RESX_EXPR_CHECK(t) TREE_CHECK (t, RESX_EXPR) #define ASM_EXPR_CHECK(t) TREE_CHECK (t, ASM_EXPR) #define SSA_NAME_CHECK(t) TREE_CHECK (t, SSA_NAME) #define PHI_NODE_CHECK(t) TREE_CHECK (t, PHI_NODE) #define CATCH_EXPR_CHECK(t) TREE_CHECK (t, CATCH_EXPR) #define EH_FILTER_EXPR_CHECK(t) TREE_CHECK (t, EH_FILTER_EXPR) #define SCEV_KNOWN_CHECK(t) TREE_CHECK (t, SCEV_KNOWN) #define SCEV_NOT_KNOWN_CHECK(t) TREE_CHECK (t, SCEV_NOT_KNOWN) #define POLYNOMIAL_CHREC_CHECK(t) TREE_CHECK (t, POLYNOMIAL_CHREC) #define STATEMENT_LIST_CHECK(t) TREE_CHECK (t, STATEMENT_LIST) #define VALUE_HANDLE_CHECK(t) TREE_CHECK (t, VALUE_HANDLE) #define TREE_BINFO_CHECK(t) TREE_CHECK (t, TREE_BINFO) #define SIZEOF_EXPR_CHECK(t) TREE_CHECK (t, SIZEOF_EXPR) #define ARROW_EXPR_CHECK(t) TREE_CHECK (t, ARROW_EXPR) #define ALIGNOF_EXPR_CHECK(t) TREE_CHECK (t, ALIGNOF_EXPR) #define EXPR_STMT_CHECK(t) TREE_CHECK (t, EXPR_STMT) #define FOR_STMT_CHECK(t) TREE_CHECK (t, FOR_STMT) #define WHILE_STMT_CHECK(t) TREE_CHECK (t, WHILE_STMT) #define DO_STMT_CHECK(t) TREE_CHECK (t, DO_STMT) #define BREAK_STMT_CHECK(t) TREE_CHECK (t, BREAK_STMT) #define CONTINUE_STMT_CHECK(t) TREE_CHECK (t, CONTINUE_STMT) #define SWITCH_STMT_CHECK(t) TREE_CHECK (t, SWITCH_STMT) #define STMT_EXPR_CHECK(t) TREE_CHECK (t, STMT_EXPR) #define COMPOUND_LITERAL_EXPR_CHECK(t) TREE_CHECK (t, COMPOUND_LITERAL_EXPR) #endif /* GCC_TREE_CHECK_H */ #define TYPE_CHECK(T) TREE_CLASS_CHECK (T, 't') #define DECL_CHECK(T) TREE_CLASS_CHECK (T, 'd') #define CST_CHECK(T) TREE_CLASS_CHECK (T, 'c') #define STMT_CHECK(T) TREE_CLASS_CHECK (T, 's') #define FUNC_OR_METHOD_CHECK(T) TREE_CHECK2 (T, FUNCTION_TYPE, METHOD_TYPE) #define PTR_OR_REF_CHECK(T) TREE_CHECK2 (T, POINTER_TYPE, REFERENCE_TYPE) #define SET_OR_ARRAY_CHECK(T) \ TREE_CHECK2 (T, ARRAY_TYPE, SET_TYPE) #define RECORD_OR_UNION_CHECK(T) \ TREE_CHECK3 (T, RECORD_TYPE, UNION_TYPE, QUAL_UNION_TYPE) #define NOT_RECORD_OR_UNION_CHECK(T) \ TREE_NOT_CHECK3 (T, RECORD_TYPE, UNION_TYPE, QUAL_UNION_TYPE) #define NUMERICAL_TYPE_CHECK(T) \ TREE_CHECK5 (T, INTEGER_TYPE, ENUMERAL_TYPE, BOOLEAN_TYPE, \ CHAR_TYPE, REAL_TYPE) /* In all nodes that are expressions, this is the data type of the expression. In POINTER_TYPE nodes, this is the type that the pointer points to. In ARRAY_TYPE nodes, this is the type of the elements. In VECTOR_TYPE nodes, this is the type of the elements. */ #define TREE_TYPE(NODE) ((NODE)->common.type) /* Here is how primitive or already-canonicalized types' hash codes are made. */ #define TYPE_HASH(TYPE) (TYPE_UID (TYPE)) /* A simple hash function for an arbitrary tree node. This must not be used in hash tables which are saved to a PCH. */ #define TREE_HASH(NODE) ((size_t) (NODE) & 0777777) /* Nodes are chained together for many purposes. Types are chained together to record them for being output to the debugger (see the function `chain_type'). Decls in the same scope are chained together to record the contents of the scope. Statement nodes for successive statements used to be chained together. Often lists of things are represented by TREE_LIST nodes that are chained together. */ #define TREE_CHAIN(NODE) ((NODE)->common.chain) /* Given an expression as a tree, strip any NON_LVALUE_EXPRs and NOP_EXPRs that don't change the machine mode. */ #define STRIP_NOPS(EXP) \ while ((TREE_CODE (EXP) == NOP_EXPR \ || TREE_CODE (EXP) == CONVERT_EXPR \ || TREE_CODE (EXP) == NON_LVALUE_EXPR) \ && TREE_OPERAND (EXP, 0) != error_mark_node \ && (TYPE_MODE (TREE_TYPE (EXP)) \ == TYPE_MODE (TREE_TYPE (TREE_OPERAND (EXP, 0))))) \ (EXP) = TREE_OPERAND (EXP, 0) /* Like STRIP_NOPS, but don't let the signedness change either. */ #define STRIP_SIGN_NOPS(EXP) \ while ((TREE_CODE (EXP) == NOP_EXPR \ || TREE_CODE (EXP) == CONVERT_EXPR \ || TREE_CODE (EXP) == NON_LVALUE_EXPR) \ && TREE_OPERAND (EXP, 0) != error_mark_node \ && (TYPE_MODE (TREE_TYPE (EXP)) \ == TYPE_MODE (TREE_TYPE (TREE_OPERAND (EXP, 0)))) \ && (TYPE_UNSIGNED (TREE_TYPE (EXP)) \ == TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (EXP, 0))))) \ (EXP) = TREE_OPERAND (EXP, 0) /* Like STRIP_NOPS, but don't alter the TREE_TYPE main variant either. */ #define STRIP_MAIN_TYPE_NOPS(EXP) \ while ((TREE_CODE (EXP) == NOP_EXPR \ || TREE_CODE (EXP) == CONVERT_EXPR \ || TREE_CODE (EXP) == NON_LVALUE_EXPR) \ && TREE_OPERAND (EXP, 0) != error_mark_node \ && (TYPE_MAIN_VARIANT (TREE_TYPE (EXP)) \ == TYPE_MAIN_VARIANT (TREE_TYPE (TREE_OPERAND (EXP, 0))))) \ (EXP) = TREE_OPERAND (EXP, 0) /* Like STRIP_NOPS, but don't alter the TREE_TYPE either. */ #define STRIP_TYPE_NOPS(EXP) \ while ((TREE_CODE (EXP) == NOP_EXPR \ || TREE_CODE (EXP) == CONVERT_EXPR \ || TREE_CODE (EXP) == NON_LVALUE_EXPR) \ && TREE_OPERAND (EXP, 0) != error_mark_node \ && (TREE_TYPE (EXP) \ == TREE_TYPE (TREE_OPERAND (EXP, 0)))) \ (EXP) = TREE_OPERAND (EXP, 0) /* Remove unnecessary type conversions according to tree_ssa_useless_type_conversion. */ #define STRIP_USELESS_TYPE_CONVERSION(EXP) \ while (tree_ssa_useless_type_conversion (EXP)) \ EXP = TREE_OPERAND (EXP, 0) /* Nonzero if TYPE represents an integral type. Note that we do not include COMPLEX types here. */ #define INTEGRAL_TYPE_P(TYPE) \ (TREE_CODE (TYPE) == INTEGER_TYPE || TREE_CODE (TYPE) == ENUMERAL_TYPE \ || TREE_CODE (TYPE) == BOOLEAN_TYPE || TREE_CODE (TYPE) == CHAR_TYPE) /* Nonzero if TYPE represents a scalar floating-point type. */ #define SCALAR_FLOAT_TYPE_P(TYPE) (TREE_CODE (TYPE) == REAL_TYPE) /* Nonzero if TYPE represents a complex floating-point type. */ #define COMPLEX_FLOAT_TYPE_P(TYPE) \ (TREE_CODE (TYPE) == COMPLEX_TYPE \ && TREE_CODE (TREE_TYPE (TYPE)) == REAL_TYPE) /* Nonzero if TYPE represents a floating-point type, including complex floating-point types. */ #define FLOAT_TYPE_P(TYPE) \ (SCALAR_FLOAT_TYPE_P (TYPE) || COMPLEX_FLOAT_TYPE_P (TYPE)) /* Nonzero if TYPE represents an aggregate (multi-component) type. */ #define AGGREGATE_TYPE_P(TYPE) \ (TREE_CODE (TYPE) == ARRAY_TYPE || TREE_CODE (TYPE) == RECORD_TYPE \ || TREE_CODE (TYPE) == UNION_TYPE || TREE_CODE (TYPE) == QUAL_UNION_TYPE \ || TREE_CODE (TYPE) == SET_TYPE) /* Nonzero if TYPE represents a pointer or reference type. (It should be renamed to INDIRECT_TYPE_P.) */ #define POINTER_TYPE_P(TYPE) \ (TREE_CODE (TYPE) == POINTER_TYPE || TREE_CODE (TYPE) == REFERENCE_TYPE) /* Nonzero if this type is a complete type. */ #define COMPLETE_TYPE_P(NODE) (TYPE_SIZE (NODE) != NULL_TREE) /* Nonzero if this type is the (possibly qualified) void type. */ #define VOID_TYPE_P(NODE) (TREE_CODE (NODE) == VOID_TYPE) /* Nonzero if this type is complete or is cv void. */ #define COMPLETE_OR_VOID_TYPE_P(NODE) \ (COMPLETE_TYPE_P (NODE) || VOID_TYPE_P (NODE)) /* Nonzero if this type is complete or is an array with unspecified bound. */ #define COMPLETE_OR_UNBOUND_ARRAY_TYPE_P(NODE) \ (COMPLETE_TYPE_P (TREE_CODE (NODE) == ARRAY_TYPE ? TREE_TYPE (NODE) : (NODE))) /* Nonzero if TYPE represents a type. */ #define TYPE_P(TYPE) (TREE_CODE_CLASS (TREE_CODE (TYPE)) == 't') /* Define many boolean fields that all tree nodes have. */ /* In VAR_DECL nodes, nonzero means address of this is needed. So it cannot be in a register. In a FUNCTION_DECL, nonzero means its address is needed. So it must be compiled even if it is an inline function. In a FIELD_DECL node, it means that the programmer is permitted to construct the address of this field. This is used for aliasing purposes: see record_component_aliases. In CONSTRUCTOR nodes, it means object constructed must be in memory. In LABEL_DECL nodes, it means a goto for this label has been seen from a place outside all binding contours that restore stack levels. In ..._TYPE nodes, it means that objects of this type must be fully addressable. This means that pieces of this object cannot go into register parameters, for example. In IDENTIFIER_NODEs, this means that some extern decl for this name had its address taken. That matters for inline functions. */ #define TREE_ADDRESSABLE(NODE) ((NODE)->common.addressable_flag) #define CALL_EXPR_TAILCALL(NODE) (CALL_EXPR_CHECK(NODE)->common.addressable_flag) /* In a VAR_DECL, nonzero means allocate static storage. In a FUNCTION_DECL, nonzero if function has been defined. In a CONSTRUCTOR, nonzero means allocate static storage. ??? This is also used in lots of other nodes in unclear ways which should be cleaned up some day. */ #define TREE_STATIC(NODE) ((NODE)->common.static_flag) /* In a TARGET_EXPR, WITH_CLEANUP_EXPR, means that the pertinent cleanup should only be executed if an exception is thrown, not on normal exit of its scope. */ #define CLEANUP_EH_ONLY(NODE) ((NODE)->common.static_flag) /* In an expr node (usually a conversion) this means the node was made implicitly and should not lead to any sort of warning. */ #define TREE_NO_WARNING(NODE) ((NODE)->common.nowarning_flag) /* In an INTEGER_CST, REAL_CST, COMPLEX_CST, or VECTOR_CST this means there was an overflow in folding. This is distinct from TREE_OVERFLOW because ANSI C requires a diagnostic when overflows occur in constant expressions. */ #define TREE_CONSTANT_OVERFLOW(NODE) (CST_CHECK (NODE)->common.static_flag) /* In an IDENTIFIER_NODE, this means that assemble_name was called with this string as an argument. */ #define TREE_SYMBOL_REFERENCED(NODE) \ (IDENTIFIER_NODE_CHECK (NODE)->common.static_flag) /* Nonzero in a pointer or reference type means the data pointed to by this type can alias anything. */ #define TYPE_REF_CAN_ALIAS_ALL(NODE) \ (PTR_OR_REF_CHECK (NODE)->common.static_flag) /* In an INTEGER_CST, REAL_CST, COMPLEX_CST, or VECTOR_CST, this means there was an overflow in folding, and no warning has been issued for this subexpression. TREE_OVERFLOW implies TREE_CONSTANT_OVERFLOW, but not vice versa. ??? Apparently, lots of code assumes this is defined in all expressions. */ #define TREE_OVERFLOW(NODE) ((NODE)->common.public_flag) /* In a VAR_DECL or FUNCTION_DECL, nonzero means name is to be accessible from outside this module. In an IDENTIFIER_NODE, nonzero means an external declaration accessible from outside this module was previously seen for this name in an inner scope. */ #define TREE_PUBLIC(NODE) ((NODE)->common.public_flag) /* In any expression, decl, or constant, nonzero means it has side effects or reevaluation of the whole expression could produce a different value. This is set if any subexpression is a function call, a side effect or a reference to a volatile variable. In a ..._DECL, this is set only if the declaration said `volatile'. This will never be set for a constant. */ #define TREE_SIDE_EFFECTS(NODE) \ (NON_TYPE_CHECK (NODE)->common.side_effects_flag) /* In a LABEL_DECL, nonzero means this label had its address taken and therefore can never be deleted and is a jump target for computed gotos. */ #define FORCED_LABEL(NODE) ((NODE)->common.side_effects_flag) /* Nonzero means this expression is volatile in the C sense: its address should be of type `volatile WHATEVER *'. In other words, the declared item is volatile qualified. This is used in _DECL nodes and _REF nodes. In a ..._TYPE node, means this type is volatile-qualified. But use TYPE_VOLATILE instead of this macro when the node is a type, because eventually we may make that a different bit. If this bit is set in an expression, so is TREE_SIDE_EFFECTS. */ #define TREE_THIS_VOLATILE(NODE) ((NODE)->common.volatile_flag) /* Nonzero means this node will not trap. In an INDIRECT_REF, means accessing the memory pointed to won't generate a trap. However, this only applies to an object when used appropriately: it doesn't mean that writing a READONLY mem won't trap. */ #define TREE_THIS_NOTRAP(NODE) ((NODE)->common.nothrow_flag) /* In a VAR_DECL, PARM_DECL or FIELD_DECL, or any kind of ..._REF node, nonzero means it may not be the lhs of an assignment. */ #define TREE_READONLY(NODE) (NON_TYPE_CHECK (NODE)->common.readonly_flag) /* Nonzero if NODE is a _DECL with TREE_READONLY set. */ #define TREE_READONLY_DECL_P(NODE) (DECL_P (NODE) && TREE_READONLY (NODE)) /* Value of expression is constant. Always on in all ..._CST nodes. May also appear in an expression or decl where the value is constant. */ #define TREE_CONSTANT(NODE) (NON_TYPE_CHECK (NODE)->common.constant_flag) /* In a decl (most significantly a FIELD_DECL), means an unsigned field. */ #define DECL_UNSIGNED(NODE) (DECL_CHECK (NODE)->common.unsigned_flag) /* In a BIT_FIELD_REF, means the bitfield is to be interpreted as unsigned. */ #define BIT_FIELD_REF_UNSIGNED(NODE) \ (BIT_FIELD_REF_CHECK (NODE)->common.unsigned_flag) /* In integral and pointer types, means an unsigned type. */ #define TYPE_UNSIGNED(NODE) (TYPE_CHECK (NODE)->common.unsigned_flag) #define TYPE_TRAP_SIGNED(NODE) \ (flag_trapv && ! TYPE_UNSIGNED (NODE)) /* Nonzero in a VAR_DECL means assembler code has been written. Nonzero in a FUNCTION_DECL means that the function has been compiled. This is interesting in an inline function, since it might not need to be compiled separately. Nonzero in a RECORD_TYPE, UNION_TYPE, QUAL_UNION_TYPE or ENUMERAL_TYPE if the sdb debugging info for the type has been written. In a BLOCK node, nonzero if reorder_blocks has already seen this block. In an SSA_NAME node, nonzero if the SSA_NAME occurs in an abnormal PHI node. */ #define TREE_ASM_WRITTEN(NODE) ((NODE)->common.asm_written_flag) /* Nonzero in a _DECL if the name is used in its scope. Nonzero in an expr node means inhibit warning if value is unused. In IDENTIFIER_NODEs, this means that some extern decl for this name was used. */ #define TREE_USED(NODE) ((NODE)->common.used_flag) /* In a FUNCTION_DECL, nonzero means a call to the function cannot throw an exception. In a CALL_EXPR, nonzero means the call cannot throw. */ #define TREE_NOTHROW(NODE) ((NODE)->common.nothrow_flag) /* In a CALL_EXPR, means that the address of the return slot is part of the argument list. */ #define CALL_EXPR_HAS_RETURN_SLOT_ADDR(NODE) ((NODE)->common.private_flag) /* In a CALL_EXPR, means that the call is the jump from a thunk to the thunked-to function. */ #define CALL_FROM_THUNK_P(NODE) ((NODE)->common.protected_flag) /* In a type, nonzero means that all objects of the type are guaranteed by the language or front-end to be properly aligned, so we can indicate that a MEM of this type is aligned at least to the alignment of the type, even if it doesn't appear that it is. We see this, for example, in object-oriented languages where a tag field may show this is an object of a more-aligned variant of the more generic type. In an SSA_NAME node, nonzero if the SSA_NAME node is on the SSA_NAME freelist. */ #define TYPE_ALIGN_OK(NODE) (TYPE_CHECK (NODE)->common.nothrow_flag) /* Used in classes in C++. */ #define TREE_PRIVATE(NODE) ((NODE)->common.private_flag) /* Used in classes in C++. In a BLOCK node, this is BLOCK_HANDLER_BLOCK. */ #define TREE_PROTECTED(NODE) ((NODE)->common.protected_flag) /* Nonzero in an IDENTIFIER_NODE if the use of the name is defined as a deprecated feature by __attribute__((deprecated)). */ #define TREE_DEPRECATED(NODE) ((NODE)->common.deprecated_flag) /* Value of expression is function invariant. A strict subset of TREE_CONSTANT, such an expression is constant over any one function invocation, though not across different invocations. May appear in any expression node. */ #define TREE_INVARIANT(NODE) ((NODE)->common.invariant_flag) /* These flags are available for each language front end to use internally. */ #define TREE_LANG_FLAG_0(NODE) ((NODE)->common.lang_flag_0) #define TREE_LANG_FLAG_1(NODE) ((NODE)->common.lang_flag_1) #define TREE_LANG_FLAG_2(NODE) ((NODE)->common.lang_flag_2) #define TREE_LANG_FLAG_3(NODE) ((NODE)->common.lang_flag_3) #define TREE_LANG_FLAG_4(NODE) ((NODE)->common.lang_flag_4) #define TREE_LANG_FLAG_5(NODE) ((NODE)->common.lang_flag_5) #define TREE_LANG_FLAG_6(NODE) ((NODE)->common.lang_flag_6) /* Define additional fields and accessors for nodes representing constants. */ /* In an INTEGER_CST node. These two together make a 2-word integer. If the data type is signed, the value is sign-extended to 2 words even though not all of them may really be in use. In an unsigned constant shorter than 2 words, the extra bits are 0. */ #define TREE_INT_CST(NODE) (INTEGER_CST_CHECK (NODE)->int_cst.int_cst) #define TREE_INT_CST_LOW(NODE) (TREE_INT_CST (NODE).low) #define TREE_INT_CST_HIGH(NODE) (TREE_INT_CST (NODE).high) #define INT_CST_LT(A, B) \ (TREE_INT_CST_HIGH (A) < TREE_INT_CST_HIGH (B) \ || (TREE_INT_CST_HIGH (A) == TREE_INT_CST_HIGH (B) \ && TREE_INT_CST_LOW (A) < TREE_INT_CST_LOW (B))) #define INT_CST_LT_UNSIGNED(A, B) \ (((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (A) \ < (unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (B)) \ || (((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (A) \ == (unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (B)) \ && TREE_INT_CST_LOW (A) < TREE_INT_CST_LOW (B))) struct tree_int_cst GTY(()) { struct tree_common common; /* A sub-struct is necessary here because the function `const_hash' wants to scan both words as a unit and taking the address of the sub-struct yields the properly inclusive bounded pointer. */ struct tree_int_cst_lowhi { unsigned HOST_WIDE_INT low; HOST_WIDE_INT high; } int_cst; }; /* In a REAL_CST node. struct real_value is an opaque entity, with manipulators defined in real.h. We don't want tree.h depending on real.h and transitively on tm.h. */ struct real_value; #define TREE_REAL_CST_PTR(NODE) (REAL_CST_CHECK (NODE)->real_cst.real_cst_ptr) #define TREE_REAL_CST(NODE) (*TREE_REAL_CST_PTR (NODE)) struct tree_real_cst GTY(()) { struct tree_common common; struct real_value * real_cst_ptr; }; /* In a STRING_CST */ #define TREE_STRING_LENGTH(NODE) (STRING_CST_CHECK (NODE)->string.length) #define TREE_STRING_POINTER(NODE) (STRING_CST_CHECK (NODE)->string.pointer) struct tree_string GTY(()) { struct tree_common common; int length; const char *pointer; }; /* In a COMPLEX_CST node. */ #define TREE_REALPART(NODE) (COMPLEX_CST_CHECK (NODE)->complex.real) #define TREE_IMAGPART(NODE) (COMPLEX_CST_CHECK (NODE)->complex.imag) struct tree_complex GTY(()) { struct tree_common common; tree real; tree imag; }; /* In a VECTOR_CST node. */ #define TREE_VECTOR_CST_ELTS(NODE) (VECTOR_CST_CHECK (NODE)->vector.elements) struct tree_vector GTY(()) { struct tree_common common; tree elements; }; /* Define fields and accessors for some special-purpose tree nodes. */ #define IDENTIFIER_LENGTH(NODE) \ (IDENTIFIER_NODE_CHECK (NODE)->identifier.id.len) #define IDENTIFIER_POINTER(NODE) \ ((const char *) IDENTIFIER_NODE_CHECK (NODE)->identifier.id.str) #define IDENTIFIER_HASH_VALUE(NODE) \ (IDENTIFIER_NODE_CHECK (NODE)->identifier.id.hash_value) /* Translate a hash table identifier pointer to a tree_identifier pointer, and vice versa. */ #define HT_IDENT_TO_GCC_IDENT(NODE) \ ((tree) ((char *) (NODE) - sizeof (struct tree_common))) #define GCC_IDENT_TO_HT_IDENT(NODE) (&((struct tree_identifier *) (NODE))->id) struct tree_identifier GTY(()) { struct tree_common common; struct ht_identifier id; }; /* In a TREE_LIST node. */ #define TREE_PURPOSE(NODE) (TREE_LIST_CHECK (NODE)->list.purpose) #define TREE_VALUE(NODE) (TREE_LIST_CHECK (NODE)->list.value) struct tree_list GTY(()) { struct tree_common common; tree purpose; tree value; }; /* In a TREE_VEC node. */ #define TREE_VEC_LENGTH(NODE) (TREE_VEC_CHECK (NODE)->vec.length) #define TREE_VEC_END(NODE) \ ((void) TREE_VEC_CHECK (NODE), &((NODE)->vec.a[(NODE)->vec.length])) #define TREE_VEC_ELT(NODE,I) TREE_VEC_ELT_CHECK (NODE, I) struct tree_vec GTY(()) { struct tree_common common; int length; tree GTY ((length ("TREE_VEC_LENGTH ((tree)&%h)"))) a[1]; }; /* Define fields and accessors for some nodes that represent expressions. */ /* Nonzero if NODE is an empty statement (NOP_EXPR <0>). */ #define IS_EMPTY_STMT(NODE) (TREE_CODE (NODE) == NOP_EXPR \ && VOID_TYPE_P (TREE_TYPE (NODE)) \ && integer_zerop (TREE_OPERAND (NODE, 0))) /* In a WITH_CLEANUP_EXPR node. */ #define WITH_CLEANUP_EXPR_RTL(NODE) \ TREE_RTL_OPERAND_CHECK (NODE, WITH_CLEANUP_EXPR, 2) /* In a CONSTRUCTOR node. */ #define CONSTRUCTOR_ELTS(NODE) TREE_OPERAND_CHECK_CODE (NODE, CONSTRUCTOR, 0) /* In ordinary expression nodes. */ #define TREE_OPERAND(NODE, I) TREE_OPERAND_CHECK (NODE, I) #define TREE_COMPLEXITY(NODE) (EXPR_CHECK (NODE)->exp.complexity) /* In a LABELED_BLOCK_EXPR node. */ #define LABELED_BLOCK_LABEL(NODE) \ TREE_OPERAND_CHECK_CODE (NODE, LABELED_BLOCK_EXPR, 0) #define LABELED_BLOCK_BODY(NODE) \ TREE_OPERAND_CHECK_CODE (NODE, LABELED_BLOCK_EXPR, 1) /* In an EXIT_BLOCK_EXPR node. */ #define EXIT_BLOCK_LABELED_BLOCK(NODE) \ TREE_OPERAND_CHECK_CODE (NODE, EXIT_BLOCK_EXPR, 0) #define EXIT_BLOCK_RETURN(NODE) TREE_OPERAND_CHECK_CODE (NODE, EXIT_BLOCK_EXPR, 1) /* In a LOOP_EXPR node. */ #define LOOP_EXPR_BODY(NODE) TREE_OPERAND_CHECK_CODE (NODE, LOOP_EXPR, 0) #ifdef USE_MAPPED_LOCATION /* The source location of this expression. Non-tree_exp nodes such as decls and constants can be shared among multiple locations, so return nothing. */ #define EXPR_LOCATION(NODE) \ (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (TREE_CODE (NODE))) \ ? (NODE)->exp.locus \ : UNKNOWN_LOCATION) #define SET_EXPR_LOCATION(NODE, FROM) \ (EXPR_CHECK (NODE)->exp.locus = (FROM)) #define EXPR_HAS_LOCATION(NODE) (EXPR_LOCATION (NODE) != UNKNOWN_LOCATION) /* EXPR_LOCUS and SET_EXPR_LOCUS are deprecated. */ #define EXPR_LOCUS(NODE) \ (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (TREE_CODE (NODE))) \ ? &(NODE)->exp.locus \ : (location_t *)NULL) #define SET_EXPR_LOCUS(NODE, FROM) \ do { source_location *loc_tmp = FROM; \ EXPR_CHECK (NODE)->exp.locus \ = loc_tmp == NULL ? UNKNOWN_LOCATION : *loc_tmp; } while (0) #define EXPR_FILENAME(NODE) \ LOCATION_FILE (EXPR_CHECK (NODE)->exp.locus) #define EXPR_LINENO(NODE) \ LOCATION_LINE (EXPR_CHECK (NODE)->exp.locus) #else /* The source location of this expression. Non-tree_exp nodes such as decls and constants can be shared among multiple locations, so return nothing. */ #define EXPR_LOCUS(NODE) \ (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (TREE_CODE (NODE))) \ ? (NODE)->exp.locus \ : (location_t *)NULL) #define SET_EXPR_LOCUS(NODE, FROM) \ (EXPR_CHECK (NODE)->exp.locus = (FROM)) #define SET_EXPR_LOCATION(NODE, FROM) annotate_with_locus (NODE, FROM) #define EXPR_FILENAME(NODE) \ (EXPR_CHECK (NODE)->exp.locus->file) #define EXPR_LINENO(NODE) \ (EXPR_CHECK (NODE)->exp.locus->line) #define EXPR_HAS_LOCATION(NODE) (EXPR_LOCUS (NODE) != NULL) #define EXPR_LOCATION(NODE) \ (EXPR_HAS_LOCATION(NODE) ? *(NODE)->exp.locus : UNKNOWN_LOCATION) #endif /* In a TARGET_EXPR node. */ #define TARGET_EXPR_SLOT(NODE) TREE_OPERAND_CHECK_CODE (NODE, TARGET_EXPR, 0) #define TARGET_EXPR_INITIAL(NODE) TREE_OPERAND_CHECK_CODE (NODE, TARGET_EXPR, 1) #define TARGET_EXPR_CLEANUP(NODE) TREE_OPERAND_CHECK_CODE (NODE, TARGET_EXPR, 2) /* DECL_EXPR accessor. This gives access to the DECL associated with the given declaration statement. */ #define DECL_EXPR_DECL(NODE) TREE_OPERAND (DECL_EXPR_CHECK (NODE), 0) #define EXIT_EXPR_COND(NODE) TREE_OPERAND (EXIT_EXPR_CHECK (NODE), 0) /* SWITCH_EXPR accessors. These give access to the condition, body and original condition type (before any compiler conversions) of the switch statement, respectively. */ #define SWITCH_COND(NODE) TREE_OPERAND ((NODE), 0) #define SWITCH_BODY(NODE) TREE_OPERAND ((NODE), 1) #define SWITCH_LABELS(NODE) TREE_OPERAND ((NODE), 2) /* CASE_LABEL_EXPR accessors. These give access to the high and low values of a case label, respectively. */ #define CASE_LOW(NODE) TREE_OPERAND ((NODE), 0) #define CASE_HIGH(NODE) TREE_OPERAND ((NODE), 1) #define CASE_LABEL(NODE) TREE_OPERAND ((NODE), 2) /* The operands of a BIND_EXPR. */ #define BIND_EXPR_VARS(NODE) (TREE_OPERAND (BIND_EXPR_CHECK (NODE), 0)) #define BIND_EXPR_BODY(NODE) (TREE_OPERAND (BIND_EXPR_CHECK (NODE), 1)) #define BIND_EXPR_BLOCK(NODE) (TREE_OPERAND (BIND_EXPR_CHECK (NODE), 2)) /* GOTO_EXPR accessor. This gives access to the label associated with a goto statement. */ #define GOTO_DESTINATION(NODE) TREE_OPERAND ((NODE), 0) /* ASM_EXPR accessors. ASM_STRING returns a STRING_CST for the instruction (e.g., "mov x, y"). ASM_OUTPUTS, ASM_INPUTS, and ASM_CLOBBERS represent the outputs, inputs, and clobbers for the statement. */ #define ASM_STRING(NODE) TREE_OPERAND ((NODE), 0) #define ASM_OUTPUTS(NODE) TREE_OPERAND ((NODE), 1) #define ASM_INPUTS(NODE) TREE_OPERAND ((NODE), 2) #define ASM_CLOBBERS(NODE) TREE_OPERAND ((NODE), 3) /* Nonzero if we want to create an ASM_INPUT instead of an ASM_OPERAND with no operands. */ #define ASM_INPUT_P(NODE) (TREE_STATIC (NODE)) #define ASM_VOLATILE_P(NODE) (TREE_PUBLIC (NODE)) /* COND_EXPR accessors. */ #define COND_EXPR_COND(NODE) (TREE_OPERAND (COND_EXPR_CHECK (NODE), 0)) #define COND_EXPR_THEN(NODE) (TREE_OPERAND (COND_EXPR_CHECK (NODE), 1)) #define COND_EXPR_ELSE(NODE) (TREE_OPERAND (COND_EXPR_CHECK (NODE), 2)) /* LABEL_EXPR accessor. This gives access to the label associated with the given label expression. */ #define LABEL_EXPR_LABEL(NODE) TREE_OPERAND (LABEL_EXPR_CHECK (NODE), 0) /* VDEF_EXPR accessors are specified in tree-flow.h, along with the other accessors for SSA operands. */ /* CATCH_EXPR accessors. */ #define CATCH_TYPES(NODE) TREE_OPERAND (CATCH_EXPR_CHECK (NODE), 0) #define CATCH_BODY(NODE) TREE_OPERAND (CATCH_EXPR_CHECK (NODE), 1) /* EH_FILTER_EXPR accessors. */ #define EH_FILTER_TYPES(NODE) TREE_OPERAND (EH_FILTER_EXPR_CHECK (NODE), 0) #define EH_FILTER_FAILURE(NODE) TREE_OPERAND (EH_FILTER_EXPR_CHECK (NODE), 1) #define EH_FILTER_MUST_NOT_THROW(NODE) TREE_STATIC (EH_FILTER_EXPR_CHECK (NODE)) /* OBJ_TYPE_REF accessors. */ #define OBJ_TYPE_REF_EXPR(NODE) TREE_OPERAND (OBJ_TYPE_REF_CHECK (NODE), 0) #define OBJ_TYPE_REF_OBJECT(NODE) TREE_OPERAND (OBJ_TYPE_REF_CHECK (NODE), 1) #define OBJ_TYPE_REF_TOKEN(NODE) TREE_OPERAND (OBJ_TYPE_REF_CHECK (NODE), 2) struct tree_exp GTY(()) { struct tree_common common; source_locus locus; int complexity; tree block; tree GTY ((special ("tree_exp"), desc ("TREE_CODE ((tree) &%0)"))) operands[1]; }; /* SSA_NAME accessors. */ /* Returns the variable being referenced. Once released, this is the only field that can be relied upon. */ #define SSA_NAME_VAR(NODE) SSA_NAME_CHECK (NODE)->ssa_name.var /* Returns the statement which defines this reference. Note that we use the same field when chaining SSA_NAME nodes together on the SSA_NAME freelist. */ #define SSA_NAME_DEF_STMT(NODE) SSA_NAME_CHECK (NODE)->common.chain /* Returns the SSA version number of this SSA name. Note that in tree SSA, version numbers are not per variable and may be recycled. */ #define SSA_NAME_VERSION(NODE) SSA_NAME_CHECK (NODE)->ssa_name.version /* Nonzero if this SSA name occurs in an abnormal PHI. SSA_NAMES are never output, so we can safely use the ASM_WRITTEN_FLAG for this status bit. */ #define SSA_NAME_OCCURS_IN_ABNORMAL_PHI(NODE) \ SSA_NAME_CHECK (NODE)->common.asm_written_flag /* Nonzero if this SSA_NAME expression is currently on the free list of SSA_NAMES. Using NOTHROW_FLAG seems reasonably safe since throwing has no meaning for an SSA_NAME. */ #define SSA_NAME_IN_FREE_LIST(NODE) \ SSA_NAME_CHECK (NODE)->common.nothrow_flag /* Attributes for SSA_NAMEs for pointer-type variables. */ #define SSA_NAME_PTR_INFO(N) \ SSA_NAME_CHECK (N)->ssa_name.ptr_info /* Get the value of this SSA_NAME, if available. */ #define SSA_NAME_VALUE(N) \ SSA_NAME_CHECK (N)->ssa_name.value_handle /* Auxiliary pass-specific data. */ #define SSA_NAME_AUX(N) \ SSA_NAME_CHECK (N)->ssa_name.aux #ifndef _TREE_FLOW_H struct ptr_info_def; #endif struct tree_ssa_name GTY(()) { struct tree_common common; /* _DECL wrapped by this SSA name. */ tree var; /* SSA version number. */ unsigned int version; /* Pointer attributes used for alias analysis. */ struct ptr_info_def *ptr_info; /* Value for SSA name used by GVN. */ tree GTY((skip)) value_handle; /* Auxiliary information stored with the ssa name. */ PTR GTY((skip)) aux; }; /* In a PHI_NODE node. */ #define PHI_RESULT_TREE(NODE) PHI_NODE_CHECK (NODE)->phi.result #define PHI_ARG_DEF_TREE(NODE, I) PHI_NODE_ELT_CHECK (NODE, I).def /* PHI_NODEs for each basic block are chained together in a single linked list. The head of the list is linked from the block annotation, and the link to the next PHI is in PHI_CHAIN. */ #define PHI_CHAIN(NODE) TREE_CHAIN (PHI_NODE_CHECK (NODE)) /* Nonzero if the PHI node was rewritten by a previous pass through the SSA renamer. */ #define PHI_REWRITTEN(NODE) PHI_NODE_CHECK (NODE)->phi.rewritten #define PHI_NUM_ARGS(NODE) PHI_NODE_CHECK (NODE)->phi.num_args #define PHI_ARG_CAPACITY(NODE) PHI_NODE_CHECK (NODE)->phi.capacity #define PHI_ARG_ELT(NODE, I) PHI_NODE_ELT_CHECK (NODE, I) #define PHI_ARG_EDGE(NODE, I) PHI_NODE_ELT_CHECK (NODE, I).e #define PHI_ARG_NONZERO(NODE, I) PHI_NODE_ELT_CHECK (NODE, I).nonzero struct edge_def; struct phi_arg_d GTY(()) { tree def; struct edge_def * GTY((skip (""))) e; bool nonzero; }; struct tree_phi_node GTY(()) { struct tree_common common; tree result; int num_args; int capacity; /* Nonzero if the PHI node was rewritten by a previous pass through the SSA renamer. */ int rewritten; struct phi_arg_d GTY ((length ("((tree)&%h)->phi.capacity"))) a[1]; }; struct varray_head_tag; /* In a BLOCK node. */ #define BLOCK_VARS(NODE) (BLOCK_CHECK (NODE)->block.vars) #define BLOCK_SUBBLOCKS(NODE) (BLOCK_CHECK (NODE)->block.subblocks) #define BLOCK_SUPERCONTEXT(NODE) (BLOCK_CHECK (NODE)->block.supercontext) /* Note: when changing this, make sure to find the places that use chainon or nreverse. */ #define BLOCK_CHAIN(NODE) TREE_CHAIN (BLOCK_CHECK (NODE)) #define BLOCK_ABSTRACT_ORIGIN(NODE) (BLOCK_CHECK (NODE)->block.abstract_origin) #define BLOCK_ABSTRACT(NODE) (BLOCK_CHECK (NODE)->block.abstract_flag) /* Nonzero means that this block is prepared to handle exceptions listed in the BLOCK_VARS slot. */ #define BLOCK_HANDLER_BLOCK(NODE) \ (BLOCK_CHECK (NODE)->block.handler_block_flag) /* An index number for this block. These values are not guaranteed to be unique across functions -- whether or not they are depends on the debugging output format in use. */ #define BLOCK_NUMBER(NODE) (BLOCK_CHECK (NODE)->block.block_num) /* If block reordering splits a lexical block into discontiguous address ranges, we'll make a copy of the original block. Note that this is logically distinct from BLOCK_ABSTRACT_ORIGIN. In that case, we have one source block that has been replicated (through inlining or unrolling) into many logical blocks, and that these logical blocks have different physical variables in them. In this case, we have one logical block split into several non-contiguous address ranges. Most debug formats can't actually represent this idea directly, so we fake it by creating multiple logical blocks with the same variables in them. However, for those that do support non-contiguous regions, these allow the original logical block to be reconstructed, along with the set of address ranges. One of the logical block fragments is arbitrarily chosen to be the ORIGIN. The other fragments will point to the origin via BLOCK_FRAGMENT_ORIGIN; the origin itself will have this pointer be null. The list of fragments will be chained through BLOCK_FRAGMENT_CHAIN from the origin. */ #define BLOCK_FRAGMENT_ORIGIN(NODE) (BLOCK_CHECK (NODE)->block.fragment_origin) #define BLOCK_FRAGMENT_CHAIN(NODE) (BLOCK_CHECK (NODE)->block.fragment_chain) struct tree_block GTY(()) { struct tree_common common; unsigned handler_block_flag : 1; unsigned abstract_flag : 1; unsigned block_num : 30; tree vars; tree subblocks; tree supercontext; tree abstract_origin; tree fragment_origin; tree fragment_chain; }; /* Define fields and accessors for nodes representing data types. */ /* See tree.def for documentation of the use of these fields. Look at the documentation of the various ..._TYPE tree codes. Note that the type.values, type.minval, and type.maxval fields are overloaded and used for different macros in different kinds of types. Each macro must check to ensure the tree node is of the proper kind of type. Note also that some of the front-ends also overload these fields, so they must be checked as well. */ #define TYPE_UID(NODE) (TYPE_CHECK (NODE)->type.uid) #define TYPE_SIZE(NODE) (TYPE_CHECK (NODE)->type.size) #define TYPE_SIZE_UNIT(NODE) (TYPE_CHECK (NODE)->type.size_unit) #define TYPE_MODE(NODE) (TYPE_CHECK (NODE)->type.mode) #define TYPE_ORIG_SIZE_TYPE(NODE) (INTEGER_TYPE_CHECK (NODE)->type.values) #define TYPE_VALUES(NODE) (ENUMERAL_TYPE_CHECK (NODE)->type.values) #define TYPE_DOMAIN(NODE) (SET_OR_ARRAY_CHECK (NODE)->type.values) #define TYPE_FIELDS(NODE) (RECORD_OR_UNION_CHECK (NODE)->type.values) #define TYPE_METHODS(NODE) (RECORD_OR_UNION_CHECK (NODE)->type.maxval) #define TYPE_VFIELD(NODE) (RECORD_OR_UNION_CHECK (NODE)->type.minval) #define TYPE_ARG_TYPES(NODE) (FUNC_OR_METHOD_CHECK (NODE)->type.values) #define TYPE_METHOD_BASETYPE(NODE) (FUNC_OR_METHOD_CHECK (NODE)->type.maxval) #define TYPE_OFFSET_BASETYPE(NODE) (OFFSET_TYPE_CHECK (NODE)->type.maxval) #define TYPE_POINTER_TO(NODE) (TYPE_CHECK (NODE)->type.pointer_to) #define TYPE_REFERENCE_TO(NODE) (TYPE_CHECK (NODE)->type.reference_to) #define TYPE_NEXT_PTR_TO(NODE) (POINTER_TYPE_CHECK (NODE)->type.minval) #define TYPE_NEXT_REF_TO(NODE) (REFERENCE_TYPE_CHECK (NODE)->type.minval) #define TYPE_MIN_VALUE(NODE) (NUMERICAL_TYPE_CHECK (NODE)->type.minval) #define TYPE_MAX_VALUE(NODE) (NUMERICAL_TYPE_CHECK (NODE)->type.maxval) #define TYPE_PRECISION(NODE) (TYPE_CHECK (NODE)->type.precision) #define TYPE_SYMTAB_ADDRESS(NODE) (TYPE_CHECK (NODE)->type.symtab.address) #define TYPE_SYMTAB_POINTER(NODE) (TYPE_CHECK (NODE)->type.symtab.pointer) #define TYPE_SYMTAB_DIE(NODE) (TYPE_CHECK (NODE)->type.symtab.die) #define TYPE_NAME(NODE) (TYPE_CHECK (NODE)->type.name) #define TYPE_NEXT_VARIANT(NODE) (TYPE_CHECK (NODE)->type.next_variant) #define TYPE_MAIN_VARIANT(NODE) (TYPE_CHECK (NODE)->type.main_variant) #define TYPE_CONTEXT(NODE) (TYPE_CHECK (NODE)->type.context) #define TYPE_LANG_SPECIFIC(NODE) (TYPE_CHECK (NODE)->type.lang_specific) /* For a VECTOR_TYPE node, this describes a different type which is emitted in the debugging output. We use this to describe a vector as a structure containing an array. */ #define TYPE_DEBUG_REPRESENTATION_TYPE(NODE) (VECTOR_TYPE_CHECK (NODE)->type.values) /* For record and union types, information about this type, as a base type for itself. */ #define TYPE_BINFO(NODE) (RECORD_OR_UNION_CHECK(NODE)->type.binfo) /* For non record and union types, used in a language-dependent way. */ #define TYPE_LANG_SLOT_1(NODE) (NOT_RECORD_OR_UNION_CHECK(NODE)->type.binfo) /* The (language-specific) typed-based alias set for this type. Objects whose TYPE_ALIAS_SETs are different cannot alias each other. If the TYPE_ALIAS_SET is -1, no alias set has yet been assigned to this type. If the TYPE_ALIAS_SET is 0, objects of this type can alias objects of any type. */ #define TYPE_ALIAS_SET(NODE) (TYPE_CHECK (NODE)->type.alias_set) /* Nonzero iff the typed-based alias set for this type has been calculated. */ #define TYPE_ALIAS_SET_KNOWN_P(NODE) (TYPE_CHECK (NODE)->type.alias_set != -1) /* A TREE_LIST of IDENTIFIER nodes of the attributes that apply to this type. */ #define TYPE_ATTRIBUTES(NODE) (TYPE_CHECK (NODE)->type.attributes) /* The alignment necessary for objects of this type. The value is an int, measured in bits. */ #define TYPE_ALIGN(NODE) (TYPE_CHECK (NODE)->type.align) /* 1 if the alignment for this type was requested by "aligned" attribute, 0 if it is the default for this type. */ #define TYPE_USER_ALIGN(NODE) (TYPE_CHECK (NODE)->type.user_align) /* The alignment for NODE, in bytes. */ #define TYPE_ALIGN_UNIT(NODE) (TYPE_ALIGN (NODE) / BITS_PER_UNIT) /* If your language allows you to declare types, and you want debug info for them, then you need to generate corresponding TYPE_DECL nodes. These "stub" TYPE_DECL nodes have no name, and simply point at the type node. You then set the TYPE_STUB_DECL field of the type node to point back at the TYPE_DECL node. This allows the debug routines to know that the two nodes represent the same type, so that we only get one debug info record for them. */ #define TYPE_STUB_DECL(NODE) TREE_CHAIN (NODE) /* In a RECORD_TYPE, UNION_TYPE or QUAL_UNION_TYPE, it means the type has BLKmode only because it lacks the alignment requirement for its size. */ #define TYPE_NO_FORCE_BLK(NODE) (TYPE_CHECK (NODE)->type.no_force_blk_flag) /* In an INTEGER_TYPE, it means the type represents a size. We use this both for validity checking and to permit optimizations that are unsafe for other types. Note that the C `size_t' type should *not* have this flag set. The `size_t' type is simply a typedef for an ordinary integer type that happens to be the type of an expression returned by `sizeof'; `size_t' has no special properties. Expressions whose type have TYPE_IS_SIZETYPE set are always actual sizes. */ #define TYPE_IS_SIZETYPE(NODE) \ (INTEGER_TYPE_CHECK (NODE)->type.no_force_blk_flag) /* In a FUNCTION_TYPE, indicates that the function returns with the stack pointer depressed. */ #define TYPE_RETURNS_STACK_DEPRESSED(NODE) \ (FUNCTION_TYPE_CHECK (NODE)->type.no_force_blk_flag) /* Nonzero in a type considered volatile as a whole. */ #define TYPE_VOLATILE(NODE) (TYPE_CHECK (NODE)->common.volatile_flag) /* Means this type is const-qualified. */ #define TYPE_READONLY(NODE) (TYPE_CHECK (NODE)->common.readonly_flag) /* If nonzero, this type is `restrict'-qualified, in the C sense of the term. */ #define TYPE_RESTRICT(NODE) (TYPE_CHECK (NODE)->type.restrict_flag) /* There is a TYPE_QUAL value for each type qualifier. They can be combined by bitwise-or to form the complete set of qualifiers for a type. */ #define TYPE_UNQUALIFIED 0x0 #define TYPE_QUAL_CONST 0x1 #define TYPE_QUAL_VOLATILE 0x2 #define TYPE_QUAL_RESTRICT 0x4 /* The set of type qualifiers for this type. */ #define TYPE_QUALS(NODE) \ ((TYPE_READONLY (NODE) * TYPE_QUAL_CONST) \ | (TYPE_VOLATILE (NODE) * TYPE_QUAL_VOLATILE) \ | (TYPE_RESTRICT (NODE) * TYPE_QUAL_RESTRICT)) /* These flags are available for each language front end to use internally. */ #define TYPE_LANG_FLAG_0(NODE) (TYPE_CHECK (NODE)->type.lang_flag_0) #define TYPE_LANG_FLAG_1(NODE) (TYPE_CHECK (NODE)->type.lang_flag_1) #define TYPE_LANG_FLAG_2(NODE) (TYPE_CHECK (NODE)->type.lang_flag_2) #define TYPE_LANG_FLAG_3(NODE) (TYPE_CHECK (NODE)->type.lang_flag_3) #define TYPE_LANG_FLAG_4(NODE) (TYPE_CHECK (NODE)->type.lang_flag_4) #define TYPE_LANG_FLAG_5(NODE) (TYPE_CHECK (NODE)->type.lang_flag_5) #define TYPE_LANG_FLAG_6(NODE) (TYPE_CHECK (NODE)->type.lang_flag_6) /* Used to keep track of visited nodes in tree traversals. This is set to 0 by copy_node and make_node. */ #define TREE_VISITED(NODE) ((NODE)->common.visited) /* If set in an ARRAY_TYPE, indicates a string type (for languages that distinguish string from array of char). If set in a SET_TYPE, indicates a bitstring type. */ #define TYPE_STRING_FLAG(NODE) (TYPE_CHECK (NODE)->type.string_flag) /* If non-NULL, this is an upper bound of the size (in bytes) of an object of the given ARRAY_TYPE. This allows temporaries to be allocated. */ #define TYPE_ARRAY_MAX_SIZE(ARRAY_TYPE) \ (ARRAY_TYPE_CHECK (ARRAY_TYPE)->type.maxval) /* For a VECTOR_TYPE, this is the number of sub-parts of the vector. */ #define TYPE_VECTOR_SUBPARTS(VECTOR_TYPE) \ GET_MODE_NUNITS (VECTOR_TYPE_CHECK (VECTOR_TYPE)->type.mode) /* Indicates that objects of this type must be initialized by calling a function when they are created. */ #define TYPE_NEEDS_CONSTRUCTING(NODE) \ (TYPE_CHECK (NODE)->type.needs_constructing_flag) /* Indicates that objects of this type (a UNION_TYPE), should be passed the same way that the first union alternative would be passed. */ #define TYPE_TRANSPARENT_UNION(NODE) \ (UNION_TYPE_CHECK (NODE)->type.transparent_union_flag) /* For an ARRAY_TYPE, indicates that it is not permitted to take the address of a component of the type. */ #define TYPE_NONALIASED_COMPONENT(NODE) \ (ARRAY_TYPE_CHECK (NODE)->type.transparent_union_flag) /* Indicated that objects of this type should be laid out in as compact a way as possible. */ #define TYPE_PACKED(NODE) (TYPE_CHECK (NODE)->type.packed_flag) struct die_struct; struct tree_type GTY(()) { struct tree_common common; tree values; tree size; tree size_unit; tree attributes; unsigned int uid; unsigned int precision : 9; ENUM_BITFIELD(machine_mode) mode : 7; unsigned string_flag : 1; unsigned no_force_blk_flag : 1; unsigned needs_constructing_flag : 1; unsigned transparent_union_flag : 1; unsigned packed_flag : 1; unsigned restrict_flag : 1; unsigned spare : 2; unsigned lang_flag_0 : 1; unsigned lang_flag_1 : 1; unsigned lang_flag_2 : 1; unsigned lang_flag_3 : 1; unsigned lang_flag_4 : 1; unsigned lang_flag_5 : 1; unsigned lang_flag_6 : 1; unsigned user_align : 1; unsigned int align; tree pointer_to; tree reference_to; union tree_type_symtab { int GTY ((tag ("0"))) address; char * GTY ((tag ("1"))) pointer; struct die_struct * GTY ((tag ("2"))) die; } GTY ((desc ("debug_hooks == &sdb_debug_hooks ? 1 : debug_hooks == &dwarf2_debug_hooks ? 2 : 0"), descbits ("2"))) symtab; tree name; tree minval; tree maxval; tree next_variant; tree main_variant; tree binfo; tree context; HOST_WIDE_INT alias_set; /* Points to a structure whose details depend on the language in use. */ struct lang_type *lang_specific; }; /* Define accessor macros for information about type inheritance and basetypes. A "basetype" means a particular usage of a data type for inheritance in another type. Each such basetype usage has its own "binfo" object to describe it. The binfo object is a TREE_VEC node. Inheritance is represented by the binfo nodes allocated for a given type. For example, given types C and D, such that D is inherited by C, 3 binfo nodes will be allocated: one for describing the binfo properties of C, similarly one for D, and one for describing the binfo properties of D as a base type for C. Thus, given a pointer to class C, one can get a pointer to the binfo of D acting as a basetype for C by looking at C's binfo's basetypes. */ /* BINFO specific flags. */ /* Nonzero means that the derivation chain is via a `virtual' declaration. */ #define BINFO_VIRTUAL_P(NODE) (TREE_BINFO_CHECK (NODE)->common.static_flag) /* Flags for language dependent use. */ #define BINFO_MARKED(NODE) TREE_LANG_FLAG_0(TREE_BINFO_CHECK(NODE)) #define BINFO_FLAG_1(NODE) TREE_LANG_FLAG_1(TREE_BINFO_CHECK(NODE)) #define BINFO_FLAG_2(NODE) TREE_LANG_FLAG_2(TREE_BINFO_CHECK(NODE)) #define BINFO_FLAG_3(NODE) TREE_LANG_FLAG_3(TREE_BINFO_CHECK(NODE)) #define BINFO_FLAG_4(NODE) TREE_LANG_FLAG_4(TREE_BINFO_CHECK(NODE)) #define BINFO_FLAG_5(NODE) TREE_LANG_FLAG_5(TREE_BINFO_CHECK(NODE)) #define BINFO_FLAG_6(NODE) TREE_LANG_FLAG_6(TREE_BINFO_CHECK(NODE)) /* The actual data type node being inherited in this basetype. */ #define BINFO_TYPE(NODE) TREE_TYPE (TREE_BINFO_CHECK(NODE)) /* The offset where this basetype appears in its containing type. BINFO_OFFSET slot holds the offset (in bytes) from the base of the complete object to the base of the part of the object that is allocated on behalf of this `type'. This is always 0 except when there is multiple inheritance. */ #define BINFO_OFFSET(NODE) (TREE_BINFO_CHECK(NODE)->binfo.offset) #define TYPE_BINFO_OFFSET(NODE) BINFO_OFFSET (TYPE_BINFO (NODE)) #define BINFO_OFFSET_ZEROP(NODE) (integer_zerop (BINFO_OFFSET (NODE))) /* The virtual function table belonging to this basetype. Virtual function tables provide a mechanism for run-time method dispatching. The entries of a virtual function table are language-dependent. */ #define BINFO_VTABLE(NODE) (TREE_BINFO_CHECK(NODE)->binfo.vtable) #define TYPE_BINFO_VTABLE(NODE) BINFO_VTABLE (TYPE_BINFO (NODE)) /* The virtual functions in the virtual function table. This is a TREE_LIST that is used as an initial approximation for building a virtual function table for this basetype. */ #define BINFO_VIRTUALS(NODE) (TREE_BINFO_CHECK(NODE)->binfo.virtuals) #define TYPE_BINFO_VIRTUALS(NODE) BINFO_VIRTUALS (TYPE_BINFO (NODE)) /* A vector of binfos for the direct basetypes inherited by this basetype. If this basetype describes type D as inherited in C, and if the basetypes of D are E and F, then this vector contains binfos for inheritance of E and F by C. ??? This could probably be done by just allocating the base types at the end of this TREE_VEC (instead of using another TREE_VEC). This would simplify the calculation of how many basetypes a given type had. */ #define BINFO_BASETYPES(NODE) (TREE_BINFO_CHECK(NODE)->binfo.base_types) #define TYPE_BINFO_BASETYPES(NODE) BINFO_BASETYPES (TYPE_BINFO (NODE)) /* The number of basetypes for NODE. */ #define BINFO_N_BASETYPES(NODE) \ (BINFO_BASETYPES (NODE) ? TREE_VEC_LENGTH (BINFO_BASETYPES (NODE)) : 0) /* Accessor macro to get to the Nth basetype of this basetype. */ #define BINFO_BASETYPE(NODE,N) TREE_VEC_ELT (BINFO_BASETYPES (NODE), (N)) #define TYPE_BINFO_BASETYPE(NODE,N) \ BINFO_TYPE (TREE_VEC_ELT (BINFO_BASETYPES (TYPE_BINFO (NODE)), (N))) /* For a BINFO record describing a virtual base class, i.e., one where TREE_VIA_VIRTUAL is set, this field assists in locating the virtual base. The actual contents are language-dependent. In the C++ front-end this field is an INTEGER_CST giving an offset into the vtable where the offset to the virtual base can be found. */ #define BINFO_VPTR_FIELD(NODE) (TREE_BINFO_CHECK(NODE)->binfo.vptr_field) /* Indicates the accesses this binfo has to its bases. The values are access_public_node, access_protected_node or access_private_node. If this array is not present, public access is implied. */ #define BINFO_BASEACCESSES(NODE) (TREE_BINFO_CHECK(NODE)->binfo.base_accesses) #define BINFO_BASEACCESS(NODE,N) TREE_VEC_ELT (BINFO_BASEACCESSES(NODE), (N)) /* Number of language independent elements in a binfo. Languages may add additional trailing elements. */ #define BINFO_LANG_SLOT(NODE,N) (TREE_BINFO_CHECK(NODE)->binfo.lang_slots[N]) /* Slot used to build a chain that represents a use of inheritance. For example, if X is derived from Y, and Y is derived from Z, then this field can be used to link the binfo node for X to the binfo node for X's Y to represent the use of inheritance from X to Y. Similarly, this slot of the binfo node for X's Y can point to the Z from which Y is inherited (in X's inheritance hierarchy). In this fashion, one can represent and traverse specific uses of inheritance using the binfo nodes themselves (instead of consing new space pointing to binfo nodes). It is up to the language-dependent front-ends to maintain this information as necessary. */ #define BINFO_INHERITANCE_CHAIN(NODE) \ (TREE_BINFO_CHECK(NODE)->binfo.inheritance) struct tree_binfo GTY (()) { struct tree_common common; tree offset; tree vtable; tree virtuals; tree base_types; tree vptr_field; tree base_accesses; tree inheritance; tree GTY ((length ("binfo_lang_slots"))) lang_slots[1]; }; extern GTY (()) unsigned binfo_lang_slots; /* Define fields and accessors for nodes representing declared names. */ /* Nonzero if DECL represents a decl. */ #define DECL_P(DECL) (TREE_CODE_CLASS (TREE_CODE (DECL)) == 'd') /* Nonzero if DECL represents a variable for the SSA passes. */ #define SSA_VAR_P(DECL) \ (TREE_CODE (DECL) == VAR_DECL \ || TREE_CODE (DECL) == PARM_DECL \ || TREE_CODE (DECL) == RESULT_DECL \ || (TREE_CODE (DECL) == SSA_NAME \ && (TREE_CODE (SSA_NAME_VAR (DECL)) == VAR_DECL \ || TREE_CODE (SSA_NAME_VAR (DECL)) == PARM_DECL \ || TREE_CODE (SSA_NAME_VAR (DECL)) == RESULT_DECL))) /* This is the name of the object as written by the user. It is an IDENTIFIER_NODE. */ #define DECL_NAME(NODE) (DECL_CHECK (NODE)->decl.name) /* The name of the object as the assembler will see it (but before any translations made by ASM_OUTPUT_LABELREF). Often this is the same as DECL_NAME. It is an IDENTIFIER_NODE. */ #define DECL_ASSEMBLER_NAME(NODE) decl_assembler_name (NODE) /* Returns nonzero if the DECL_ASSEMBLER_NAME for NODE has been set. If zero, the NODE might still have a DECL_ASSEMBLER_NAME -- it just hasn't been set yet. */ #define DECL_ASSEMBLER_NAME_SET_P(NODE) \ (DECL_CHECK (NODE)->decl.assembler_name != NULL_TREE) /* Set the DECL_ASSEMBLER_NAME for NODE to NAME. */ #define SET_DECL_ASSEMBLER_NAME(NODE, NAME) \ (DECL_CHECK (NODE)->decl.assembler_name = (NAME)) /* Copy the DECL_ASSEMBLER_NAME from DECL1 to DECL2. Note that if DECL1's DECL_ASSEMBLER_NAME has not yet been set, using this macro will not cause the DECL_ASSEMBLER_NAME of either DECL to be set. In other words, the semantics of using this macro, are different than saying: SET_DECL_ASSEMBLER_NAME(DECL2, DECL_ASSEMBLER_NAME (DECL1)) which will try to set the DECL_ASSEMBLER_NAME for DECL1. */ #define COPY_DECL_ASSEMBLER_NAME(DECL1, DECL2) \ (DECL_ASSEMBLER_NAME_SET_P (DECL1) \ ? (void) SET_DECL_ASSEMBLER_NAME (DECL2, \ DECL_ASSEMBLER_NAME (DECL1)) \ : (void) 0) /* Records the section name in a section attribute. Used to pass the name from decl_attributes to make_function_rtl and make_decl_rtl. */ #define DECL_SECTION_NAME(NODE) (DECL_CHECK (NODE)->decl.section_name) /* For FIELD_DECLs, this is the RECORD_TYPE, UNION_TYPE, or QUAL_UNION_TYPE node that the field is a member of. For VAR_DECL, PARM_DECL, FUNCTION_DECL, LABEL_DECL, and CONST_DECL nodes, this points to either the FUNCTION_DECL for the containing function, the RECORD_TYPE or UNION_TYPE for the containing type, or NULL_TREE or a TRANSLATION_UNIT_DECL if the given decl has "file scope". */ #define DECL_CONTEXT(NODE) (DECL_CHECK (NODE)->decl.context) #define DECL_FIELD_CONTEXT(NODE) (FIELD_DECL_CHECK (NODE)->decl.context) /* In a DECL this is the field where attributes are stored. */ #define DECL_ATTRIBUTES(NODE) (DECL_CHECK (NODE)->decl.attributes) /* In a FIELD_DECL, this is the field position, counting in bytes, of the byte containing the bit closest to the beginning of the structure. */ #define DECL_FIELD_OFFSET(NODE) (FIELD_DECL_CHECK (NODE)->decl.arguments) /* In a FIELD_DECL, this is the offset, in bits, of the first bit of the field from DECL_FIELD_OFFSET. */ #define DECL_FIELD_BIT_OFFSET(NODE) (FIELD_DECL_CHECK (NODE)->decl.u2.t) /* In a FIELD_DECL, this indicates whether the field was a bit-field and if so, the type that was originally specified for it. TREE_TYPE may have been modified (in finish_struct). */ #define DECL_BIT_FIELD_TYPE(NODE) (FIELD_DECL_CHECK (NODE)->decl.result) /* In FUNCTION_DECL, a chain of ..._DECL nodes. VAR_DECL and PARM_DECL reserve the arguments slot for language-specific uses. */ #define DECL_ARGUMENTS(NODE) (DECL_CHECK (NODE)->decl.arguments) /* This field is used to reference anything in decl.result and is meant only for use by the garbage collector. */ #define DECL_RESULT_FLD(NODE) (DECL_CHECK (NODE)->decl.result) /* In FUNCTION_DECL, holds the decl for the return value. */ #define DECL_RESULT(NODE) (FUNCTION_DECL_CHECK (NODE)->decl.result) /* For a TYPE_DECL, holds the "original" type. (TREE_TYPE has the copy.) */ #define DECL_ORIGINAL_TYPE(NODE) (TYPE_DECL_CHECK (NODE)->decl.result) /* In PARM_DECL, holds the type as written (perhaps a function or array). */ #define DECL_ARG_TYPE_AS_WRITTEN(NODE) (PARM_DECL_CHECK (NODE)->decl.result) /* For a FUNCTION_DECL, holds the tree of BINDINGs. For a TRANSLATION_UNIT_DECL, holds the namespace's BLOCK. For a VAR_DECL, holds the initial value. For a PARM_DECL, not used--default values for parameters are encoded in the type of the function, not in the PARM_DECL slot. ??? Need to figure out some way to check this isn't a PARM_DECL. */ #define DECL_INITIAL(NODE) (DECL_CHECK (NODE)->decl.initial) /* For a PARM_DECL, records the data type used to pass the argument, which may be different from the type seen in the program. */ #define DECL_ARG_TYPE(NODE) (PARM_DECL_CHECK (NODE)->decl.initial) /* For a FIELD_DECL in a QUAL_UNION_TYPE, records the expression, which if nonzero, indicates that the field occupies the type. */ #define DECL_QUALIFIER(NODE) (FIELD_DECL_CHECK (NODE)->decl.initial) /* These two fields describe where in the source code the declaration was. If the declaration appears in several places (as for a C function that is declared first and then defined later), this information should refer to the definition. */ #define DECL_SOURCE_LOCATION(NODE) (DECL_CHECK (NODE)->decl.locus) #define DECL_SOURCE_FILE(NODE) LOCATION_FILE (DECL_SOURCE_LOCATION (NODE)) #define DECL_SOURCE_LINE(NODE) LOCATION_LINE (DECL_SOURCE_LOCATION (NODE)) #ifdef USE_MAPPED_LOCATION #define DECL_IS_BUILTIN(DECL) \ (DECL_SOURCE_LOCATION (DECL) <= BUILTINS_LOCATION) #else #define DECL_IS_BUILTIN(DECL) (DECL_SOURCE_LINE(DECL) == 0) #endif /* Holds the size of the datum, in bits, as a tree expression. Need not be constant. */ #define DECL_SIZE(NODE) (DECL_CHECK (NODE)->decl.size) /* Likewise for the size in bytes. */ #define DECL_SIZE_UNIT(NODE) (DECL_CHECK (NODE)->decl.size_unit) /* Holds the alignment required for the datum, in bits. */ #define DECL_ALIGN(NODE) (DECL_CHECK (NODE)->decl.u1.a.align) /* The alignment of NODE, in bytes. */ #define DECL_ALIGN_UNIT(NODE) (DECL_ALIGN (NODE) / BITS_PER_UNIT) /* For FIELD_DECLs, off_align holds the number of low-order bits of DECL_FIELD_OFFSET which are known to be always zero. DECL_OFFSET_ALIGN thus returns the alignment that DECL_FIELD_OFFSET has. */ #define DECL_OFFSET_ALIGN(NODE) \ (((unsigned HOST_WIDE_INT)1) << FIELD_DECL_CHECK (NODE)->decl.u1.a.off_align) /* Specify that DECL_ALIGN(NODE) is a multiple of X. */ #define SET_DECL_OFFSET_ALIGN(NODE, X) \ (FIELD_DECL_CHECK (NODE)->decl.u1.a.off_align = exact_log2 ((X) & -(X))) /* 1 if the alignment for this type was requested by "aligned" attribute, 0 if it is the default for this type. */ #define DECL_USER_ALIGN(NODE) (DECL_CHECK (NODE)->decl.user_align) /* Holds the machine mode corresponding to the declaration of a variable or field. Always equal to TYPE_MODE (TREE_TYPE (decl)) except for a FIELD_DECL. */ #define DECL_MODE(NODE) (DECL_CHECK (NODE)->decl.mode) /* Holds the RTL expression for the value of a variable or function. This value can be evaluated lazily for functions, variables with static storage duration, and labels. */ #define DECL_RTL(NODE) \ (DECL_CHECK (NODE)->decl.rtl \ ? (NODE)->decl.rtl \ : (make_decl_rtl (NODE, NULL), (NODE)->decl.rtl)) /* Set the DECL_RTL for NODE to RTL. */ #define SET_DECL_RTL(NODE, RTL) set_decl_rtl (NODE, RTL) /* Returns nonzero if the DECL_RTL for NODE has already been set. */ #define DECL_RTL_SET_P(NODE) (DECL_CHECK (NODE)->decl.rtl != NULL) /* Copy the RTL from NODE1 to NODE2. If the RTL was not set for NODE1, it will not be set for NODE2; this is a lazy copy. */ #define COPY_DECL_RTL(NODE1, NODE2) \ (DECL_CHECK (NODE2)->decl.rtl = DECL_CHECK (NODE1)->decl.rtl) /* The DECL_RTL for NODE, if it is set, or NULL, if it is not set. */ #define DECL_RTL_IF_SET(NODE) (DECL_RTL_SET_P (NODE) ? DECL_RTL (NODE) : NULL) /* For PARM_DECL, holds an RTL for the stack slot or register where the data was actually passed. */ #define DECL_INCOMING_RTL(NODE) (PARM_DECL_CHECK (NODE)->decl.u2.r) /* For FUNCTION_DECL, this holds a pointer to a structure ("struct function") that describes the status of this function. */ #define DECL_STRUCT_FUNCTION(NODE) (FUNCTION_DECL_CHECK (NODE)->decl.u2.f) /* For FUNCTION_DECL, if it is built-in, this identifies which built-in operation it is. */ #define DECL_FUNCTION_CODE(NODE) (FUNCTION_DECL_CHECK (NODE)->decl.u1.f) /* In a FUNCTION_DECL for which DECL_BUILT_IN does not hold, this is the approximate number of statements in this function. There is no need for this number to be exact; it is only used in various heuristics regarding optimization. */ #define DECL_NUM_STMTS(NODE) (FUNCTION_DECL_CHECK (NODE)->decl.u1.i) /* The DECL_VINDEX is used for FUNCTION_DECLS in two different ways. Before the struct containing the FUNCTION_DECL is laid out, DECL_VINDEX may point to a FUNCTION_DECL in a base class which is the FUNCTION_DECL which this FUNCTION_DECL will replace as a virtual function. When the class is laid out, this pointer is changed to an INTEGER_CST node which is suitable for use as an index into the virtual function table. */ #define DECL_VINDEX(NODE) (DECL_CHECK (NODE)->decl.vindex) /* For FIELD_DECLS, DECL_FCONTEXT is the *first* baseclass in which this FIELD_DECL is defined. This information is needed when writing debugging information about vfield and vbase decls for C++. */ #define DECL_FCONTEXT(NODE) (FIELD_DECL_CHECK (NODE)->decl.vindex) /* Every ..._DECL node gets a unique number. */ #define DECL_UID(NODE) (DECL_CHECK (NODE)->decl.uid) /* For any sort of a ..._DECL node, this points to the original (abstract) decl node which this decl is an instance of, or else it is NULL indicating that this decl is not an instance of some other decl. For example, in a nested declaration of an inline function, this points back to the definition. */ #define DECL_ABSTRACT_ORIGIN(NODE) (DECL_CHECK (NODE)->decl.abstract_origin) /* Like DECL_ABSTRACT_ORIGIN, but returns NODE if there's no abstract origin. This is useful when setting the DECL_ABSTRACT_ORIGIN. */ #define DECL_ORIGIN(NODE) \ (DECL_ABSTRACT_ORIGIN (NODE) ? DECL_ABSTRACT_ORIGIN (NODE) : (NODE)) /* Nonzero for any sort of ..._DECL node means this decl node represents an inline instance of some original (abstract) decl from an inline function; suppress any warnings about shadowing some other variable. FUNCTION_DECL nodes can also have their abstract origin set to themselves. */ #define DECL_FROM_INLINE(NODE) (DECL_ABSTRACT_ORIGIN (NODE) != NULL_TREE \ && DECL_ABSTRACT_ORIGIN (NODE) != (NODE)) /* Nonzero if a _DECL means that the name of this decl should be ignored for symbolic debug purposes. */ #define DECL_IGNORED_P(NODE) (DECL_CHECK (NODE)->decl.ignored_flag) /* Nonzero for a given ..._DECL node means that this node represents an "abstract instance" of the given declaration (e.g. in the original declaration of an inline function). When generating symbolic debugging information, we mustn't try to generate any address information for nodes marked as "abstract instances" because we don't actually generate any code or allocate any data space for such instances. */ #define DECL_ABSTRACT(NODE) (DECL_CHECK (NODE)->decl.abstract_flag) /* Nonzero if a _DECL means that no warnings should be generated just because this decl is unused. */ #define DECL_IN_SYSTEM_HEADER(NODE) \ (DECL_CHECK (NODE)->decl.in_system_header_flag) /* Nonzero for a given ..._DECL node means that this node should be put in .common, if possible. If a DECL_INITIAL is given, and it is not error_mark_node, then the decl cannot be put in .common. */ #define DECL_COMMON(NODE) (DECL_CHECK (NODE)->decl.common_flag) /* Language-specific decl information. */ #define DECL_LANG_SPECIFIC(NODE) (DECL_CHECK (NODE)->decl.lang_specific) /* In a VAR_DECL or FUNCTION_DECL, nonzero means external reference: do not allocate storage, and refer to a definition elsewhere. */ #define DECL_EXTERNAL(NODE) (DECL_CHECK (NODE)->decl.external_flag) /* In a VAR_DECL for a RECORD_TYPE, sets number for non-init_priority initializations. */ #define DEFAULT_INIT_PRIORITY 65535 #define MAX_INIT_PRIORITY 65535 #define MAX_RESERVED_INIT_PRIORITY 100 /* In a TYPE_DECL nonzero means the detail info about this type is not dumped into stabs. Instead it will generate cross reference ('x') of names. This uses the same flag as DECL_EXTERNAL. */ #define TYPE_DECL_SUPPRESS_DEBUG(NODE) \ (TYPE_DECL_CHECK (NODE)->decl.external_flag) /* In VAR_DECL and PARM_DECL nodes, nonzero means declared `register'. */ #define DECL_REGISTER(NODE) (DECL_CHECK (NODE)->decl.regdecl_flag) /* In LABEL_DECL nodes, nonzero means that an error message about jumping into such a binding contour has been printed for this label. */ #define DECL_ERROR_ISSUED(NODE) (LABEL_DECL_CHECK (NODE)->decl.regdecl_flag) /* In a FIELD_DECL, indicates this field should be bit-packed. */ #define DECL_PACKED(NODE) (FIELD_DECL_CHECK (NODE)->decl.regdecl_flag) /* In a FUNCTION_DECL with a nonzero DECL_CONTEXT, indicates that a static chain is not needed. */ #define DECL_NO_STATIC_CHAIN(NODE) \ (FUNCTION_DECL_CHECK (NODE)->decl.regdecl_flag) /* Nonzero in a ..._DECL means this variable is ref'd from a nested function. For VAR_DECL nodes, PARM_DECL nodes, and FUNCTION_DECL nodes. For LABEL_DECL nodes, nonzero if nonlocal gotos to the label are permitted. Also set in some languages for variables, etc., outside the normal lexical scope, such as class instance variables. */ #define DECL_NONLOCAL(NODE) (DECL_CHECK (NODE)->decl.nonlocal_flag) /* Nonzero in a FUNCTION_DECL means this function can be substituted where it is called. */ #define DECL_INLINE(NODE) (FUNCTION_DECL_CHECK (NODE)->decl.inline_flag) /* Nonzero in a FUNCTION_DECL means that this function was declared inline, such as via the `inline' keyword in C/C++. This flag controls the linkage semantics of 'inline'; whether or not the function is inlined is controlled by DECL_INLINE. */ #define DECL_DECLARED_INLINE_P(NODE) \ (FUNCTION_DECL_CHECK (NODE)->decl.declared_inline_flag) /* Nonzero in a decl means that the gimplifier has seen (or placed) this variable in a BIND_EXPR. */ #define DECL_SEEN_IN_BIND_EXPR_P(NODE) \ (DECL_CHECK (NODE)->decl.seen_in_bind_expr) /* In a VAR_DECL, nonzero if the decl is a register variable with an explicit asm specification. */ #define DECL_HARD_REGISTER(NODE) (DECL_CHECK (NODE)->decl.inline_flag) /* Value of the decls's visibility attribute */ #define DECL_VISIBILITY(NODE) (DECL_CHECK (NODE)->decl.visibility) /* In a FUNCTION_DECL, nonzero if the function cannot be inlined. */ #define DECL_UNINLINABLE(NODE) (FUNCTION_DECL_CHECK (NODE)->decl.uninlinable) /* In a VAR_DECL, nonzero if the data should be allocated from thread-local storage. */ #define DECL_THREAD_LOCAL(NODE) (VAR_DECL_CHECK (NODE)->decl.thread_local_flag) /* In a FUNCTION_DECL, the saved representation of the body of the entire function. */ #define DECL_SAVED_TREE(NODE) (FUNCTION_DECL_CHECK (NODE)->decl.saved_tree) /* List of FUNCTION_DECLs inlined into this function's body. */ #define DECL_INLINED_FNS(NODE) (FUNCTION_DECL_CHECK (NODE)->decl.inlined_fns) /* Nonzero in a FUNCTION_DECL means this function should be treated as if it were a malloc, meaning it returns a pointer that is not an alias. */ #define DECL_IS_MALLOC(NODE) (FUNCTION_DECL_CHECK (NODE)->decl.malloc_flag) /* Nonzero in a FUNCTION_DECL means this function should be treated as "pure" function (like const function, but may read global memory). */ #define DECL_IS_PURE(NODE) (FUNCTION_DECL_CHECK (NODE)->decl.pure_flag) /* Nonzero in a FIELD_DECL means it is a bit field, and must be accessed specially. */ #define DECL_BIT_FIELD(NODE) (FIELD_DECL_CHECK (NODE)->decl.bit_field_flag) /* In a LABEL_DECL, nonzero means label was defined inside a binding contour that restored a stack level and which is now exited. */ #define DECL_TOO_LATE(NODE) (LABEL_DECL_CHECK (NODE)->decl.bit_field_flag) /* Unused in FUNCTION_DECL. */ /* In a VAR_DECL that's static, nonzero if the space is in the text section. */ #define DECL_IN_TEXT_SECTION(NODE) (VAR_DECL_CHECK (NODE)->decl.bit_field_flag) /* In a FUNCTION_DECL, nonzero means a built in function. */ #define DECL_BUILT_IN(NODE) (DECL_BUILT_IN_CLASS (NODE) != NOT_BUILT_IN) /* For a builtin function, identify which part of the compiler defined it. */ #define DECL_BUILT_IN_CLASS(NODE) \ (FUNCTION_DECL_CHECK (NODE)->decl.built_in_class) /* Used in VAR_DECLs to indicate that the variable is a vtable. Used in FIELD_DECLs for vtable pointers. Used in FUNCTION_DECLs to indicate that the function is virtual. */ #define DECL_VIRTUAL_P(NODE) (DECL_CHECK (NODE)->decl.virtual_flag) /* Used to indicate that the linkage status of this DECL is not yet known, so it should not be output now. */ #define DECL_DEFER_OUTPUT(NODE) (DECL_CHECK (NODE)->decl.defer_output) /* Used in PARM_DECLs whose type are unions to indicate that the argument should be passed in the same way that the first union alternative would be passed. */ #define DECL_TRANSPARENT_UNION(NODE) \ (PARM_DECL_CHECK (NODE)->decl.transparent_union) /* Used in FUNCTION_DECLs to indicate that they should be run automatically at the beginning or end of execution. */ #define DECL_STATIC_CONSTRUCTOR(NODE) \ (FUNCTION_DECL_CHECK (NODE)->decl.static_ctor_flag) #define DECL_STATIC_DESTRUCTOR(NODE) \ (FUNCTION_DECL_CHECK (NODE)->decl.static_dtor_flag) /* Used to indicate that this DECL represents a compiler-generated entity. */ #define DECL_ARTIFICIAL(NODE) (DECL_CHECK (NODE)->decl.artificial_flag) /* Used to indicate that this DECL has weak linkage. */ #define DECL_WEAK(NODE) (DECL_CHECK (NODE)->decl.weak_flag) /* Used in TREE_PUBLIC decls to indicate that copies of this DECL in multiple translation units should be merged. */ #define DECL_ONE_ONLY(NODE) (DECL_CHECK (NODE)->decl.transparent_union) /* Used in a DECL to indicate that, even if it TREE_PUBLIC, it need not be put out unless it is needed in this translation unit. Entities like this are shared across translation units (like weak entities), but are guaranteed to be generated by any translation unit that needs them, and therefore need not be put out anywhere where they are not needed. DECL_COMDAT is just a hint to the back-end; it is up to front-ends which set this flag to ensure that there will never be any harm, other than bloat, in putting out something which is DECL_COMDAT. */ #define DECL_COMDAT(NODE) (DECL_CHECK (NODE)->decl.comdat_flag) /* Used in FUNCTION_DECLs to indicate that function entry and exit should be instrumented with calls to support routines. */ #define DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT(NODE) \ (FUNCTION_DECL_CHECK (NODE)->decl.no_instrument_function_entry_exit) /* Used in FUNCTION_DECLs to indicate that limit-stack-* should be disabled in this function. */ #define DECL_NO_LIMIT_STACK(NODE) \ (FUNCTION_DECL_CHECK (NODE)->decl.no_limit_stack) /* Additional flags for language-specific uses. */ #define DECL_LANG_FLAG_0(NODE) (DECL_CHECK (NODE)->decl.lang_flag_0) #define DECL_LANG_FLAG_1(NODE) (DECL_CHECK (NODE)->decl.lang_flag_1) #define DECL_LANG_FLAG_2(NODE) (DECL_CHECK (NODE)->decl.lang_flag_2) #define DECL_LANG_FLAG_3(NODE) (DECL_CHECK (NODE)->decl.lang_flag_3) #define DECL_LANG_FLAG_4(NODE) (DECL_CHECK (NODE)->decl.lang_flag_4) #define DECL_LANG_FLAG_5(NODE) (DECL_CHECK (NODE)->decl.lang_flag_5) #define DECL_LANG_FLAG_6(NODE) (DECL_CHECK (NODE)->decl.lang_flag_6) #define DECL_LANG_FLAG_7(NODE) (DECL_CHECK (NODE)->decl.lang_flag_7) /* Used to indicate that the pointer to this DECL cannot be treated as an address constant. */ #define DECL_NON_ADDR_CONST_P(NODE) (DECL_CHECK (NODE)->decl.non_addr_const_p) /* Used in a FIELD_DECL to indicate that we cannot form the address of this component. */ #define DECL_NONADDRESSABLE_P(NODE) \ (FIELD_DECL_CHECK (NODE)->decl.non_addressable) /* Used to indicate an alias set for the memory pointed to by this particular FIELD_DECL, PARM_DECL, or VAR_DECL, which must have pointer (or reference) type. */ #define DECL_POINTER_ALIAS_SET(NODE) \ (DECL_CHECK (NODE)->decl.pointer_alias_set) /* Used to store the alias_var for a DECL node. */ #define DECL_PTA_ALIASVAR(NODE) \ (DECL_CHECK (NODE)->decl.alias_var) /* A numeric unique identifier for a LABEL_DECL. The UID allocation is dense, unique within any one function, and may be used to index arrays. If the value is -1, then no UID has been assigned. */ #define LABEL_DECL_UID(NODE) \ (LABEL_DECL_CHECK (NODE)->decl.pointer_alias_set) /* Nonzero if an alias set has been assigned to this declaration. */ #define DECL_POINTER_ALIAS_SET_KNOWN_P(NODE) \ (DECL_POINTER_ALIAS_SET (NODE) != - 1) /* Nonzero for a decl which is at file scope. */ #define DECL_FILE_SCOPE_P(EXP) \ (! DECL_CONTEXT (EXP) \ || TREE_CODE (DECL_CONTEXT (EXP)) == TRANSLATION_UNIT_DECL) /* Nonzero for a decl that has been marked as needing a memory slot. NOTE: Never use this macro directly. It will give you incomplete information. Most of the time this bit will only be set after alias analysis in the tree optimizers. It's always better to call needs_to_live_in_memory instead. To mark memory variables use mark_call_clobbered. */ #define DECL_NEEDS_TO_LIVE_IN_MEMORY_INTERNAL(DECL) \ DECL_CHECK (DECL)->decl.needs_to_live_in_memory /* Nonzero for a decl that cgraph has decided should be inlined into at least one call site. It is not meaningful to look at this directly; always use cgraph_function_possibly_inlined_p. */ #define DECL_POSSIBLY_INLINED(DECL) \ FUNCTION_DECL_CHECK (DECL)->decl.possibly_inlined /* Enumerate visibility settings. */ enum symbol_visibility { VISIBILITY_DEFAULT, VISIBILITY_INTERNAL, VISIBILITY_HIDDEN, VISIBILITY_PROTECTED }; struct function; union alias_var_def; struct tree_decl GTY(()) { struct tree_common common; location_t locus; unsigned int uid; tree size; ENUM_BITFIELD(machine_mode) mode : 8; unsigned external_flag : 1; unsigned nonlocal_flag : 1; unsigned regdecl_flag : 1; unsigned inline_flag : 1; unsigned bit_field_flag : 1; unsigned virtual_flag : 1; unsigned ignored_flag : 1; unsigned abstract_flag : 1; unsigned in_system_header_flag : 1; unsigned common_flag : 1; unsigned defer_output : 1; unsigned transparent_union : 1; unsigned static_ctor_flag : 1; unsigned static_dtor_flag : 1; unsigned artificial_flag : 1; unsigned weak_flag : 1; unsigned non_addr_const_p : 1; unsigned no_instrument_function_entry_exit : 1; unsigned comdat_flag : 1; unsigned malloc_flag : 1; unsigned no_limit_stack : 1; ENUM_BITFIELD(built_in_class) built_in_class : 2; unsigned pure_flag : 1; unsigned non_addressable : 1; unsigned user_align : 1; unsigned uninlinable : 1; unsigned thread_local_flag : 1; unsigned declared_inline_flag : 1; unsigned seen_in_bind_expr : 1; ENUM_BITFIELD(symbol_visibility) visibility : 2; unsigned lang_flag_0 : 1; unsigned lang_flag_1 : 1; unsigned lang_flag_2 : 1; unsigned lang_flag_3 : 1; unsigned lang_flag_4 : 1; unsigned lang_flag_5 : 1; unsigned lang_flag_6 : 1; unsigned lang_flag_7 : 1; unsigned needs_to_live_in_memory : 1; unsigned possibly_inlined : 1; /* 14 unused bits. */ union tree_decl_u1 { /* In a FUNCTION_DECL for which DECL_BUILT_IN holds, this is DECL_FUNCTION_CODE. */ enum built_in_function f; /* In a FUNCTION_DECL for which DECL_BUILT_IN does not hold, this is used by language-dependent code. */ HOST_WIDE_INT i; /* DECL_ALIGN and DECL_OFFSET_ALIGN. (These are not used for FUNCTION_DECLs). */ struct tree_decl_u1_a { unsigned int align : 24; unsigned int off_align : 8; } a; } GTY ((skip)) u1; tree size_unit; tree name; tree context; tree arguments; /* Also used for DECL_FIELD_OFFSET */ tree result; /* Also used for DECL_BIT_FIELD_TYPE */ tree initial; /* Also used for DECL_QUALIFIER */ tree abstract_origin; tree assembler_name; tree section_name; tree attributes; rtx rtl; /* RTL representation for object. */ /* In FUNCTION_DECL, if it is inline, holds the saved insn chain. In FIELD_DECL, is DECL_FIELD_BIT_OFFSET. In PARM_DECL, holds an RTL for the stack slot of register where the data was actually passed. Used by Chill and Java in LABEL_DECL and by C++ and Java in VAR_DECL. */ union tree_decl_u2 { struct function * GTY ((tag ("FUNCTION_DECL"))) f; rtx GTY ((tag ("PARM_DECL"))) r; tree GTY ((tag ("FIELD_DECL"))) t; int GTY ((tag ("VAR_DECL"))) i; } GTY ((desc ("TREE_CODE((tree) &(%0))"))) u2; /* In a FUNCTION_DECL, this is DECL_SAVED_TREE. */ tree saved_tree; /* In a FUNCTION_DECL, these are function data which is to be kept as long as FUNCTION_DECL is kept. */ tree inlined_fns; tree vindex; HOST_WIDE_INT pointer_alias_set; union alias_var_def *GTY ((skip(""))) alias_var; /* Points to a structure whose details depend on the language in use. */ struct lang_decl *lang_specific; }; /* A STATEMENT_LIST chains statements together in GENERIC and GIMPLE. To reduce overhead, the nodes containing the statements are not trees. This avoids the overhead of tree_common on all linked list elements. Use the interface in tree-iterator.h to access this node. */ #define STATEMENT_LIST_HEAD(NODE) \ (STATEMENT_LIST_CHECK (NODE)->stmt_list.head) #define STATEMENT_LIST_TAIL(NODE) \ (STATEMENT_LIST_CHECK (NODE)->stmt_list.tail) struct tree_statement_list_node GTY ((chain_next ("%h.next"), chain_prev ("%h.prev"))) { struct tree_statement_list_node *prev; struct tree_statement_list_node *next; tree stmt; }; struct tree_statement_list GTY(()) { struct tree_common common; struct tree_statement_list_node *head; struct tree_statement_list_node *tail; }; #define VALUE_HANDLE_ID(NODE) \ (VALUE_HANDLE_CHECK (NODE)->value_handle.id) #define VALUE_HANDLE_EXPR_SET(NODE) \ (VALUE_HANDLE_CHECK (NODE)->value_handle.expr_set) /* Defined and used in tree-ssa-pre.c. */ struct value_set; struct tree_value_handle GTY(()) { struct tree_common common; /* The set of expressions represented by this handle. */ struct value_set * GTY ((skip)) expr_set; /* Unique ID for this value handle. IDs are handed out in a conveniently dense form starting at 0, so that we can make bitmaps of value handles. */ unsigned int id; }; enum tree_node_structure_enum { TS_COMMON, TS_INT_CST, TS_REAL_CST, TS_VECTOR, TS_STRING, TS_COMPLEX, TS_IDENTIFIER, TS_DECL, TS_TYPE, TS_LIST, TS_VEC, TS_EXP, TS_SSA_NAME, TS_PHI_NODE, TS_BLOCK, TS_BINFO, TS_STATEMENT_LIST, TS_VALUE_HANDLE, LAST_TS_ENUM }; /* Define the overall contents of a tree node. It may be any of the structures declared above for various types of node. */ union tree_node GTY ((ptr_alias (union lang_tree_node), desc ("tree_node_structure (&%h)"))) { struct tree_common GTY ((tag ("TS_COMMON"))) common; struct tree_int_cst GTY ((tag ("TS_INT_CST"))) int_cst; struct tree_real_cst GTY ((tag ("TS_REAL_CST"))) real_cst; struct tree_vector GTY ((tag ("TS_VECTOR"))) vector; struct tree_string GTY ((tag ("TS_STRING"))) string; struct tree_complex GTY ((tag ("TS_COMPLEX"))) complex; struct tree_identifier GTY ((tag ("TS_IDENTIFIER"))) identifier; struct tree_decl GTY ((tag ("TS_DECL"))) decl; struct tree_type GTY ((tag ("TS_TYPE"))) type; struct tree_list GTY ((tag ("TS_LIST"))) list; struct tree_vec GTY ((tag ("TS_VEC"))) vec; struct tree_exp GTY ((tag ("TS_EXP"))) exp; struct tree_ssa_name GTY ((tag ("TS_SSA_NAME"))) ssa_name; struct tree_phi_node GTY ((tag ("TS_PHI_NODE"))) phi; struct tree_block GTY ((tag ("TS_BLOCK"))) block; struct tree_binfo GTY ((tag ("TS_BINFO"))) binfo; struct tree_statement_list GTY ((tag ("TS_STATEMENT_LIST"))) stmt_list; struct tree_value_handle GTY ((tag ("TS_VALUE_HANDLE"))) value_handle; }; /* Standard named or nameless data types of the C compiler. */ enum tree_index { TI_ERROR_MARK, TI_INTQI_TYPE, TI_INTHI_TYPE, TI_INTSI_TYPE, TI_INTDI_TYPE, TI_INTTI_TYPE, TI_UINTQI_TYPE, TI_UINTHI_TYPE, TI_UINTSI_TYPE, TI_UINTDI_TYPE, TI_UINTTI_TYPE, TI_INTEGER_ZERO, TI_INTEGER_ONE, TI_INTEGER_MINUS_ONE, TI_NULL_POINTER, TI_SIZE_ZERO, TI_SIZE_ONE, TI_BITSIZE_ZERO, TI_BITSIZE_ONE, TI_BITSIZE_UNIT, TI_PUBLIC, TI_PROTECTED, TI_PRIVATE, TI_BOOLEAN_FALSE, TI_BOOLEAN_TRUE, TI_COMPLEX_INTEGER_TYPE, TI_COMPLEX_FLOAT_TYPE, TI_COMPLEX_DOUBLE_TYPE, TI_COMPLEX_LONG_DOUBLE_TYPE, TI_FLOAT_TYPE, TI_DOUBLE_TYPE, TI_LONG_DOUBLE_TYPE, TI_FLOAT_PTR_TYPE, TI_DOUBLE_PTR_TYPE, TI_LONG_DOUBLE_PTR_TYPE, TI_INTEGER_PTR_TYPE, TI_VOID_TYPE, TI_PTR_TYPE, TI_CONST_PTR_TYPE, TI_SIZE_TYPE, TI_PID_TYPE, TI_PTRDIFF_TYPE, TI_VA_LIST_TYPE, TI_BOOLEAN_TYPE, TI_FILEPTR_TYPE, TI_VOID_LIST_NODE, TI_MAIN_IDENTIFIER, TI_MAX }; extern GTY(()) tree global_trees[TI_MAX]; #define error_mark_node global_trees[TI_ERROR_MARK] #define intQI_type_node global_trees[TI_INTQI_TYPE] #define intHI_type_node global_trees[TI_INTHI_TYPE] #define intSI_type_node global_trees[TI_INTSI_TYPE] #define intDI_type_node global_trees[TI_INTDI_TYPE] #define intTI_type_node global_trees[TI_INTTI_TYPE] #define unsigned_intQI_type_node global_trees[TI_UINTQI_TYPE] #define unsigned_intHI_type_node global_trees[TI_UINTHI_TYPE] #define unsigned_intSI_type_node global_trees[TI_UINTSI_TYPE] #define unsigned_intDI_type_node global_trees[TI_UINTDI_TYPE] #define unsigned_intTI_type_node global_trees[TI_UINTTI_TYPE] #define integer_zero_node global_trees[TI_INTEGER_ZERO] #define integer_one_node global_trees[TI_INTEGER_ONE] #define integer_minus_one_node global_trees[TI_INTEGER_MINUS_ONE] #define size_zero_node global_trees[TI_SIZE_ZERO] #define size_one_node global_trees[TI_SIZE_ONE] #define bitsize_zero_node global_trees[TI_BITSIZE_ZERO] #define bitsize_one_node global_trees[TI_BITSIZE_ONE] #define bitsize_unit_node global_trees[TI_BITSIZE_UNIT] /* Base access nodes. */ #define access_public_node global_trees[TI_PUBLIC] #define access_protected_node global_trees[TI_PROTECTED] #define access_private_node global_trees[TI_PRIVATE] #define null_pointer_node global_trees[TI_NULL_POINTER] #define float_type_node global_trees[TI_FLOAT_TYPE] #define double_type_node global_trees[TI_DOUBLE_TYPE] #define long_double_type_node global_trees[TI_LONG_DOUBLE_TYPE] #define float_ptr_type_node global_trees[TI_FLOAT_PTR_TYPE] #define double_ptr_type_node global_trees[TI_DOUBLE_PTR_TYPE] #define long_double_ptr_type_node global_trees[TI_LONG_DOUBLE_PTR_TYPE] #define integer_ptr_type_node global_trees[TI_INTEGER_PTR_TYPE] #define complex_integer_type_node global_trees[TI_COMPLEX_INTEGER_TYPE] #define complex_float_type_node global_trees[TI_COMPLEX_FLOAT_TYPE] #define complex_double_type_node global_trees[TI_COMPLEX_DOUBLE_TYPE] #define complex_long_double_type_node global_trees[TI_COMPLEX_LONG_DOUBLE_TYPE] #define void_type_node global_trees[TI_VOID_TYPE] /* The C type `void *'. */ #define ptr_type_node global_trees[TI_PTR_TYPE] /* The C type `const void *'. */ #define const_ptr_type_node global_trees[TI_CONST_PTR_TYPE] /* The C type `size_t'. */ #define size_type_node global_trees[TI_SIZE_TYPE] #define pid_type_node global_trees[TI_PID_TYPE] #define ptrdiff_type_node global_trees[TI_PTRDIFF_TYPE] #define va_list_type_node global_trees[TI_VA_LIST_TYPE] /* The C type `FILE *'. */ #define fileptr_type_node global_trees[TI_FILEPTR_TYPE] #define boolean_type_node global_trees[TI_BOOLEAN_TYPE] #define boolean_false_node global_trees[TI_BOOLEAN_FALSE] #define boolean_true_node global_trees[TI_BOOLEAN_TRUE] /* The node that should be placed at the end of a parameter list to indicate that the function does not take a variable number of arguments. The TREE_VALUE will be void_type_node and there will be no TREE_CHAIN. Language-independent code should not assume anything else about this node. */ #define void_list_node global_trees[TI_VOID_LIST_NODE] #define main_identifier_node global_trees[TI_MAIN_IDENTIFIER] #define MAIN_NAME_P(NODE) (IDENTIFIER_NODE_CHECK (NODE) == main_identifier_node) /* An enumeration of the standard C integer types. These must be ordered so that shorter types appear before longer ones, and so that signed types appear before unsigned ones, for the correct functioning of interpret_integer() in c-lex.c. */ enum integer_type_kind { itk_char, itk_signed_char, itk_unsigned_char, itk_short, itk_unsigned_short, itk_int, itk_unsigned_int, itk_long, itk_unsigned_long, itk_long_long, itk_unsigned_long_long, itk_none }; typedef enum integer_type_kind integer_type_kind; /* The standard C integer types. Use integer_type_kind to index into this array. */ extern GTY(()) tree integer_types[itk_none]; #define char_type_node integer_types[itk_char] #define signed_char_type_node integer_types[itk_signed_char] #define unsigned_char_type_node integer_types[itk_unsigned_char] #define short_integer_type_node integer_types[itk_short] #define short_unsigned_type_node integer_types[itk_unsigned_short] #define integer_type_node integer_types[itk_int] #define unsigned_type_node integer_types[itk_unsigned_int] #define long_integer_type_node integer_types[itk_long] #define long_unsigned_type_node integer_types[itk_unsigned_long] #define long_long_integer_type_node integer_types[itk_long_long] #define long_long_unsigned_type_node integer_types[itk_unsigned_long_long] /* Set to the default thread-local storage (tls) model to use. */ enum tls_model { TLS_MODEL_GLOBAL_DYNAMIC = 1, TLS_MODEL_LOCAL_DYNAMIC, TLS_MODEL_INITIAL_EXEC, TLS_MODEL_LOCAL_EXEC }; extern enum tls_model flag_tls_default; /* A pointer-to-function member type looks like: struct { __P __pfn; ptrdiff_t __delta; }; If __pfn is NULL, it is a NULL pointer-to-member-function. (Because the vtable is always the first thing in the object, we don't need its offset.) If the function is virtual, then PFN is one plus twice the index into the vtable; otherwise, it is just a pointer to the function. Unfortunately, using the lowest bit of PFN doesn't work in architectures that don't impose alignment requirements on function addresses, or that use the lowest bit to tell one ISA from another, for example. For such architectures, we use the lowest bit of DELTA instead of the lowest bit of the PFN, and DELTA will be multiplied by 2. */ enum ptrmemfunc_vbit_where_t { ptrmemfunc_vbit_in_pfn, ptrmemfunc_vbit_in_delta }; #define NULL_TREE (tree) NULL extern GTY(()) tree frame_base_decl; extern tree decl_assembler_name (tree); /* Compute the number of bytes occupied by 'node'. This routine only looks at TREE_CODE and, if the code is TREE_VEC, TREE_VEC_LENGTH. */ extern size_t tree_size (tree); /* Lowest level primitive for allocating a node. The TREE_CODE is the only argument. Contents are initialized to zero except for a few of the common fields. */ extern tree make_node_stat (enum tree_code MEM_STAT_DECL); #define make_node(t) make_node_stat (t MEM_STAT_INFO) /* Make a copy of a node, with all the same contents. */ extern tree copy_node_stat (tree MEM_STAT_DECL); #define copy_node(t) copy_node_stat (t MEM_STAT_INFO) /* Make a copy of a chain of TREE_LIST nodes. */ extern tree copy_list (tree); /* Make a BINFO. */ extern tree make_tree_binfo_stat (unsigned MEM_STAT_DECL); #define make_tree_binfo(t) make_tree_binfo_stat (t MEM_STAT_INFO) /* Make a TREE_VEC. */ extern tree make_tree_vec_stat (int MEM_STAT_DECL); #define make_tree_vec(t) make_tree_vec_stat (t MEM_STAT_INFO) /* Tree nodes for SSA analysis. */ extern tree make_phi_node (tree, int); extern void init_phinodes (void); extern void fini_phinodes (void); extern void release_phi_node (tree); #ifdef GATHER_STATISTICS extern void phinodes_print_statistics (void); #endif extern void init_ssanames (void); extern void fini_ssanames (void); extern tree make_ssa_name (tree, tree); extern tree duplicate_ssa_name (tree, tree); extern void release_ssa_name (tree); #ifdef GATHER_STATISTICS extern void ssanames_print_statistics (void); #endif /* Return the (unique) IDENTIFIER_NODE node for a given name. The name is supplied as a char *. */ extern tree get_identifier (const char *); #if GCC_VERSION >= 3000 #define get_identifier(str) \ (__builtin_constant_p (str) \ ? get_identifier_with_length ((str), strlen (str)) \ : get_identifier (str)) #endif /* Identical to get_identifier, except that the length is assumed known. */ extern tree get_identifier_with_length (const char *, size_t); /* If an identifier with the name TEXT (a null-terminated string) has previously been referred to, return that node; otherwise return NULL_TREE. */ extern tree maybe_get_identifier (const char *); /* Construct various types of nodes. */ #define build_int_2(LO, HI) \ build_int_2_wide ((unsigned HOST_WIDE_INT) (LO), (HOST_WIDE_INT) (HI)) extern tree build (enum tree_code, tree, ...); extern tree build_nt (enum tree_code, ...); #if GCC_VERSION >= 3000 || __STDC_VERSION__ >= 199901L /* Use preprocessor trickery to map "build" to "buildN" where N is the expected number of arguments. This is used for both efficiency (no varargs), and checking (verifying number of passed arguments). */ #define build(code, ...) \ _buildN1(build, _buildC1(__VA_ARGS__))(code, __VA_ARGS__) #define _buildN1(BASE, X) _buildN2(BASE, X) #define _buildN2(BASE, X) BASE##X #define _buildC1(...) _buildC2(__VA_ARGS__,9,8,7,6,5,4,3,2,1,0,0) #define _buildC2(x,a1,a2,a3,a4,a5,a6,a7,a8,a9,c,...) c #endif extern tree build0_stat (enum tree_code, tree MEM_STAT_DECL); #define build0(c,t) build0_stat (c,t MEM_STAT_INFO) extern tree build1_stat (enum tree_code, tree, tree MEM_STAT_DECL); #define build1(c,t1,t2) build1_stat (c,t1,t2 MEM_STAT_INFO) extern tree build2_stat (enum tree_code, tree, tree, tree MEM_STAT_DECL); #define build2(c,t1,t2,t3) build2_stat (c,t1,t2,t3 MEM_STAT_INFO) extern tree build3_stat (enum tree_code, tree, tree, tree, tree MEM_STAT_DECL); #define build3(c,t1,t2,t3,t4) build3_stat (c,t1,t2,t3,t4 MEM_STAT_INFO) extern tree build4_stat (enum tree_code, tree, tree, tree, tree, tree MEM_STAT_DECL); #define build4(c,t1,t2,t3,t4,t5) build4_stat (c,t1,t2,t3,t4,t5 MEM_STAT_INFO) extern tree build_int_2_wide (unsigned HOST_WIDE_INT, HOST_WIDE_INT); extern tree build_vector (tree, tree); extern tree build_constructor (tree, tree); extern tree build_real_from_int_cst (tree, tree); extern tree build_complex (tree, tree, tree); extern tree build_string (int, const char *); extern tree build_tree_list_stat (tree, tree MEM_STAT_DECL); #define build_tree_list(t,q) build_tree_list_stat(t,q MEM_STAT_INFO) extern tree build_decl_stat (enum tree_code, tree, tree MEM_STAT_DECL); #define build_decl(c,t,q) build_decl_stat (c,t,q MEM_STAT_INFO) extern tree build_block (tree, tree, tree, tree, tree); #ifndef USE_MAPPED_LOCATION extern void annotate_with_file_line (tree, const char *, int); extern void annotate_with_locus (tree, location_t); #endif extern tree build_empty_stmt (void); /* Construct various nodes representing data types. */ extern tree make_signed_type (int); extern tree make_unsigned_type (int); extern void initialize_sizetypes (void); extern void set_sizetype (tree); extern void fixup_unsigned_type (tree); extern tree build_pointer_type_for_mode (tree, enum machine_mode, bool); extern tree build_pointer_type (tree); extern tree build_reference_type_for_mode (tree, enum machine_mode, bool); extern tree build_reference_type (tree); extern tree build_vector_type_for_mode (tree, enum machine_mode); extern tree build_vector_type (tree innertype, int nunits); extern tree build_type_no_quals (tree); extern tree build_index_type (tree); extern tree build_index_2_type (tree, tree); extern tree build_array_type (tree, tree); extern tree build_function_type (tree, tree); extern tree build_function_type_list (tree, ...); extern tree build_method_type_directly (tree, tree, tree); extern tree build_method_type (tree, tree); extern tree build_offset_type (tree, tree); extern tree build_complex_type (tree); extern tree array_type_nelts (tree); extern tree value_member (tree, tree); extern tree purpose_member (tree, tree); extern tree binfo_member (tree, tree); extern int attribute_list_equal (tree, tree); extern int attribute_list_contained (tree, tree); extern int tree_int_cst_equal (tree, tree); extern int tree_int_cst_lt (tree, tree); extern int tree_int_cst_compare (tree, tree); extern int host_integerp (tree, int); extern HOST_WIDE_INT tree_low_cst (tree, int); extern int tree_int_cst_msb (tree); extern int tree_int_cst_sgn (tree); extern int tree_expr_nonnegative_p (tree); extern int rtl_expr_nonnegative_p (rtx); extern tree get_inner_array_type (tree); /* From expmed.c. Since rtl.h is included after tree.h, we can't put the prototype here. Rtl.h does declare the prototype if tree.h had been included. */ extern tree make_tree (tree, rtx); /* Return a type like TTYPE except that its TYPE_ATTRIBUTES is ATTRIBUTE. Such modified types already made are recorded so that duplicates are not made. */ extern tree build_type_attribute_variant (tree, tree); extern tree build_decl_attribute_variant (tree, tree); /* Structure describing an attribute and a function to handle it. */ struct attribute_spec { /* The name of the attribute (without any leading or trailing __), or NULL to mark the end of a table of attributes. */ const char *const name; /* The minimum length of the list of arguments of the attribute. */ const int min_length; /* The maximum length of the list of arguments of the attribute (-1 for no maximum). */ const int max_length; /* Whether this attribute requires a DECL. If it does, it will be passed from types of DECLs, function return types and array element types to the DECLs, function types and array types respectively; but when applied to a type in any other circumstances, it will be ignored with a warning. (If greater control is desired for a given attribute, this should be false, and the flags argument to the handler may be used to gain greater control in that case.) */ const bool decl_required; /* Whether this attribute requires a type. If it does, it will be passed from a DECL to the type of that DECL. */ const bool type_required; /* Whether this attribute requires a function (or method) type. If it does, it will be passed from a function pointer type to the target type, and from a function return type (which is not itself a function pointer type) to the function type. */ const bool function_type_required; /* Function to handle this attribute. NODE points to the node to which the attribute is to be applied. If a DECL, it should be modified in place; if a TYPE, a copy should be created. NAME is the name of the attribute (possibly with leading or trailing __). ARGS is the TREE_LIST of the arguments (which may be NULL). FLAGS gives further information about the context of the attribute. Afterwards, the attributes will be added to the DECL_ATTRIBUTES or TYPE_ATTRIBUTES, as appropriate, unless *NO_ADD_ATTRS is set to true (which should be done on error, as well as in any other cases when the attributes should not be added to the DECL or TYPE). Depending on FLAGS, any attributes to be applied to another type or DECL later may be returned; otherwise the return value should be NULL_TREE. This pointer may be NULL if no special handling is required beyond the checks implied by the rest of this structure. */ tree (*const handler) (tree *node, tree name, tree args, int flags, bool *no_add_attrs); }; /* Flags that may be passed in the third argument of decl_attributes, and to handler functions for attributes. */ enum attribute_flags { /* The type passed in is the type of a DECL, and any attributes that should be passed in again to be applied to the DECL rather than the type should be returned. */ ATTR_FLAG_DECL_NEXT = 1, /* The type passed in is a function return type, and any attributes that should be passed in again to be applied to the function type rather than the return type should be returned. */ ATTR_FLAG_FUNCTION_NEXT = 2, /* The type passed in is an array element type, and any attributes that should be passed in again to be applied to the array type rather than the element type should be returned. */ ATTR_FLAG_ARRAY_NEXT = 4, /* The type passed in is a structure, union or enumeration type being created, and should be modified in place. */ ATTR_FLAG_TYPE_IN_PLACE = 8, /* The attributes are being applied by default to a library function whose name indicates known behavior, and should be silently ignored if they are not in fact compatible with the function type. */ ATTR_FLAG_BUILT_IN = 16 }; /* Default versions of target-overridable functions. */ extern tree merge_decl_attributes (tree, tree); extern tree merge_type_attributes (tree, tree); extern void default_register_cpp_builtins (struct cpp_reader *); /* Split a list of declspecs and attributes into two. */ extern void split_specs_attrs (tree, tree *, tree *); /* Strip attributes from a list of combined specs and attrs. */ extern tree strip_attrs (tree); /* Return 1 if an attribute and its arguments are valid for a decl or type. */ extern int valid_machine_attribute (tree, tree, tree, tree); /* Given a tree node and a string, return nonzero if the tree node is a valid attribute name for the string. */ extern int is_attribute_p (const char *, tree); /* Given an attribute name and a list of attributes, return the list element of the attribute or NULL_TREE if not found. */ extern tree lookup_attribute (const char *, tree); /* Given two attributes lists, return a list of their union. */ extern tree merge_attributes (tree, tree); #ifdef TARGET_DLLIMPORT_DECL_ATTRIBUTES /* Given two Windows decl attributes lists, possibly including dllimport, return a list of their union . */ extern tree merge_dllimport_decl_attributes (tree, tree); #endif /* Check whether CAND is suitable to be returned from get_qualified_type (BASE, TYPE_QUALS). */ extern bool check_qualified_type (tree, tree, int); /* Return a version of the TYPE, qualified as indicated by the TYPE_QUALS, if one exists. If no qualified version exists yet, return NULL_TREE. */ extern tree get_qualified_type (tree, int); /* Like get_qualified_type, but creates the type if it does not exist. This function never returns NULL_TREE. */ extern tree build_qualified_type (tree, int); /* Like build_qualified_type, but only deals with the `const' and `volatile' qualifiers. This interface is retained for backwards compatibility with the various front-ends; new code should use build_qualified_type instead. */ #define build_type_variant(TYPE, CONST_P, VOLATILE_P) \ build_qualified_type ((TYPE), \ ((CONST_P) ? TYPE_QUAL_CONST : 0) \ | ((VOLATILE_P) ? TYPE_QUAL_VOLATILE : 0)) /* Make a copy of a type node. */ extern tree build_type_copy (tree); /* Finish up a builtin RECORD_TYPE. Give it a name and provide its fields. Optionally specify an alignment, and then lsy it out. */ extern void finish_builtin_struct (tree, const char *, tree, tree); /* Given a ..._TYPE node, calculate the TYPE_SIZE, TYPE_SIZE_UNIT, TYPE_ALIGN and TYPE_MODE fields. If called more than once on one node, does nothing except for the first time. */ extern void layout_type (tree); /* These functions allow a front-end to perform a manual layout of a RECORD_TYPE. (For instance, if the placement of subsequent fields depends on the placement of fields so far.) Begin by calling start_record_layout. Then, call place_field for each of the fields. Then, call finish_record_layout. See layout_type for the default way in which these functions are used. */ typedef struct record_layout_info_s { /* The RECORD_TYPE that we are laying out. */ tree t; /* The offset into the record so far, in bytes, not including bits in BITPOS. */ tree offset; /* The last known alignment of SIZE. */ unsigned int offset_align; /* The bit position within the last OFFSET_ALIGN bits, in bits. */ tree bitpos; /* The alignment of the record so far, in bits. */ unsigned int record_align; /* The alignment of the record so far, ignoring #pragma pack and __attribute__ ((packed)), in bits. */ unsigned int unpacked_align; /* The previous field layed out. */ tree prev_field; /* The static variables (i.e., class variables, as opposed to instance variables) encountered in T. */ tree pending_statics; /* Bits remaining in the current alignment group */ int remaining_in_alignment; /* True if we've seen a packed field that didn't have normal alignment anyway. */ int packed_maybe_necessary; } *record_layout_info; extern void set_lang_adjust_rli (void (*) (record_layout_info)); extern record_layout_info start_record_layout (tree); extern tree bit_from_pos (tree, tree); extern tree byte_from_pos (tree, tree); extern void pos_from_bit (tree *, tree *, unsigned int, tree); extern void normalize_offset (tree *, tree *, unsigned int); extern tree rli_size_unit_so_far (record_layout_info); extern tree rli_size_so_far (record_layout_info); extern void normalize_rli (record_layout_info); extern void place_field (record_layout_info, tree); extern void compute_record_mode (tree); extern void finish_record_layout (record_layout_info, int); /* Given a hashcode and a ..._TYPE node (for which the hashcode was made), return a canonicalized ..._TYPE node, so that duplicates are not made. How the hash code is computed is up to the caller, as long as any two callers that could hash identical-looking type nodes agree. */ extern tree type_hash_canon (unsigned int, tree); /* Given a VAR_DECL, PARM_DECL, RESULT_DECL or FIELD_DECL node, calculates the DECL_SIZE, DECL_SIZE_UNIT, DECL_ALIGN and DECL_MODE fields. Call this only once for any given decl node. Second argument is the boundary that this field can be assumed to be starting at (in bits). Zero means it can be assumed aligned on any boundary that may be needed. */ extern void layout_decl (tree, unsigned); /* Return the mode for data of a given size SIZE and mode class CLASS. If LIMIT is nonzero, then don't use modes bigger than MAX_FIXED_MODE_SIZE. The value is BLKmode if no other mode is found. This is like mode_for_size, but is passed a tree. */ extern enum machine_mode mode_for_size_tree (tree, enum mode_class, int); /* Return an expr equal to X but certainly not valid as an lvalue. */ extern tree non_lvalue (tree); extern tree pedantic_non_lvalue (tree); extern tree convert (tree, tree); extern unsigned int expr_align (tree); extern tree expr_first (tree); extern tree expr_last (tree); extern tree expr_only (tree); extern tree size_in_bytes (tree); extern HOST_WIDE_INT int_size_in_bytes (tree); extern tree bit_position (tree); extern HOST_WIDE_INT int_bit_position (tree); extern tree byte_position (tree); extern HOST_WIDE_INT int_byte_position (tree); /* Define data structures, macros, and functions for handling sizes and the various types used to represent sizes. */ enum size_type_kind { SIZETYPE, /* Normal representation of sizes in bytes. */ SSIZETYPE, /* Signed representation of sizes in bytes. */ USIZETYPE, /* Unsigned representation of sizes in bytes. */ BITSIZETYPE, /* Normal representation of sizes in bits. */ SBITSIZETYPE, /* Signed representation of sizes in bits. */ UBITSIZETYPE, /* Unsigned representation of sizes in bits. */ TYPE_KIND_LAST}; extern GTY(()) tree sizetype_tab[(int) TYPE_KIND_LAST]; #define sizetype sizetype_tab[(int) SIZETYPE] #define bitsizetype sizetype_tab[(int) BITSIZETYPE] #define ssizetype sizetype_tab[(int) SSIZETYPE] #define usizetype sizetype_tab[(int) USIZETYPE] #define sbitsizetype sizetype_tab[(int) SBITSIZETYPE] #define ubitsizetype sizetype_tab[(int) UBITSIZETYPE] extern tree size_binop (enum tree_code, tree, tree); extern tree size_diffop (tree, tree); extern tree size_int_wide (HOST_WIDE_INT, enum size_type_kind); extern tree size_int_type_wide (HOST_WIDE_INT, tree); #define size_int_type(L, T) size_int_type_wide ((HOST_WIDE_INT) (L), T) #define size_int(L) size_int_wide ((HOST_WIDE_INT) (L), SIZETYPE) #define ssize_int(L) size_int_wide ((HOST_WIDE_INT) (L), SSIZETYPE) #define bitsize_int(L) size_int_wide ((HOST_WIDE_INT) (L), BITSIZETYPE) #define sbitsize_int(L) size_int_wide ((HOST_WIDE_INT) (L), SBITSIZETYPE) extern tree round_up (tree, int); extern tree round_down (tree, int); extern tree get_pending_sizes (void); extern void put_pending_size (tree); extern void put_pending_sizes (tree); /* Type for sizes of data-type. */ #define BITS_PER_UNIT_LOG \ ((BITS_PER_UNIT > 1) + (BITS_PER_UNIT > 2) + (BITS_PER_UNIT > 4) \ + (BITS_PER_UNIT > 8) + (BITS_PER_UNIT > 16) + (BITS_PER_UNIT > 32) \ + (BITS_PER_UNIT > 64) + (BITS_PER_UNIT > 128) + (BITS_PER_UNIT > 256)) /* If nonzero, an upper limit on alignment of structure fields, in bits. */ extern unsigned int maximum_field_alignment; /* If nonzero, the alignment of a bitstring or (power-)set value, in bits. */ extern unsigned int set_alignment; /* Concatenate two lists (chains of TREE_LIST nodes) X and Y by making the last node in X point to Y. Returns X, except if X is 0 returns Y. */ extern tree chainon (tree, tree); /* Make a new TREE_LIST node from specified PURPOSE, VALUE and CHAIN. */ extern tree tree_cons_stat (tree, tree, tree MEM_STAT_DECL); #define tree_cons(t,q,w) tree_cons_stat (t,q,w MEM_STAT_INFO) /* Return the last tree node in a chain. */ extern tree tree_last (tree); /* Reverse the order of elements in a chain, and return the new head. */ extern tree nreverse (tree); /* Returns the length of a chain of nodes (number of chain pointers to follow before reaching a null pointer). */ extern int list_length (tree); /* Returns the number of FIELD_DECLs in a type. */ extern int fields_length (tree); /* Given an initializer INIT, return TRUE if INIT is zero or some aggregate of zeros. Otherwise return FALSE. */ extern bool initializer_zerop (tree); extern void categorize_ctor_elements (tree, HOST_WIDE_INT *, HOST_WIDE_INT *); extern HOST_WIDE_INT count_type_elements (tree); extern int mostly_zeros_p (tree); /* add_var_to_bind_expr (bind_expr, var) binds var to bind_expr. */ extern void add_var_to_bind_expr (tree, tree); /* integer_zerop (tree x) is nonzero if X is an integer constant of value 0 */ extern int integer_zerop (tree); /* integer_onep (tree x) is nonzero if X is an integer constant of value 1 */ extern int integer_onep (tree); /* integer_all_onesp (tree x) is nonzero if X is an integer constant all of whose significant bits are 1. */ extern int integer_all_onesp (tree); /* integer_pow2p (tree x) is nonzero is X is an integer constant with exactly one bit 1. */ extern int integer_pow2p (tree); /* integer_nonzerop (tree x) is nonzero if X is an integer constant with a nonzero value. */ extern int integer_nonzerop (tree); /* staticp (tree x) is nonzero if X is a reference to data allocated at a fixed address in memory. */ extern int staticp (tree); /* save_expr (EXP) returns an expression equivalent to EXP but it can be used multiple times within context CTX and only evaluate EXP once. */ extern tree save_expr (tree); /* Look inside EXPR and into any simple arithmetic operations. Return the innermost non-arithmetic node. */ extern tree skip_simple_arithmetic (tree); /* Returns the index of the first non-tree operand for CODE, or the number of operands if all are trees. */ extern int first_rtl_op (enum tree_code); /* Return which tree structure is used by T. */ enum tree_node_structure_enum tree_node_structure (tree); /* unsave_expr (EXP) returns an expression equivalent to EXP but it can be used multiple times and will evaluate EXP in its entirety each time. */ extern tree unsave_expr (tree); /* Reset EXP in place so that it can be expanded again. Does not recurse into subtrees. */ extern void unsave_expr_1 (tree); /* Return 0 if it is safe to evaluate EXPR multiple times, return 1 if it is safe if EXPR is unsaved afterward, or return 2 if it is completely unsafe. */ extern int unsafe_for_reeval (tree); /* Return 1 if EXP contains a PLACEHOLDER_EXPR; i.e., if it represents a size or offset that depends on a field within a record. Note that we only allow such expressions within simple arithmetic or a COND_EXPR. */ extern bool contains_placeholder_p (tree); /* This macro calls the above function but short-circuits the common case of a constant to save time. Also check for null. */ #define CONTAINS_PLACEHOLDER_P(EXP) \ ((EXP) != 0 && ! TREE_CONSTANT (EXP) && contains_placeholder_p (EXP)) /* Return 1 if any part of the computation of TYPE involves a PLACEHOLDER_EXPR. This includes size, bounds, qualifiers (for QUAL_UNION_TYPE) and field positions. */ extern bool type_contains_placeholder_p (tree); /* Return 1 if EXP contains any expressions that produce cleanups for an outer scope to deal with. Used by fold. */ extern int has_cleanups (tree); /* Given a tree EXP, a FIELD_DECL F, and a replacement value R, return a tree with all occurrences of references to F in a PLACEHOLDER_EXPR replaced by R. Note that we assume here that EXP contains only arithmetic expressions. */ extern tree substitute_in_expr (tree, tree, tree); /* This macro calls the above function but short-circuits the common case of a constant to save time and also checks for NULL. */ #define SUBSTITUTE_IN_EXPR(EXP, F, R) \ ((EXP) == 0 || TREE_CONSTANT (EXP) ? (EXP) : substitute_in_expr (EXP, F, R)) /* Similar, but look for a PLACEHOLDER_EXPR in EXP and find a replacement for it within OBJ, a tree that is an object or a chain of references. */ extern tree substitute_placeholder_in_expr (tree, tree); /* This macro calls the above function but short-circuits the common case of a constant to save time and also checks for NULL. */ #define SUBSTITUTE_PLACEHOLDER_IN_EXPR(EXP, OBJ) \ ((EXP) == 0 || TREE_CONSTANT (EXP) ? (EXP) \ : substitute_placeholder_in_expr (EXP, OBJ)) /* variable_size (EXP) is like save_expr (EXP) except that it is for the special case of something that is part of a variable size for a data type. It makes special arrangements to compute the value at the right time when the data type belongs to a function parameter. */ extern tree variable_size (tree); /* stabilize_reference (EXP) returns a reference equivalent to EXP but it can be used multiple times and only evaluate the subexpressions once. */ extern tree stabilize_reference (tree); /* Subroutine of stabilize_reference; this is called for subtrees of references. Any expression with side-effects must be put in a SAVE_EXPR to ensure that it is only evaluated once. */ extern tree stabilize_reference_1 (tree); /* Return EXP, stripped of any conversions to wider types in such a way that the result of converting to type FOR_TYPE is the same as if EXP were converted to FOR_TYPE. If FOR_TYPE is 0, it signifies EXP's type. */ extern tree get_unwidened (tree, tree); /* Return OP or a simpler expression for a narrower value which can be sign-extended or zero-extended to give back OP. Store in *UNSIGNEDP_PTR either 1 if the value should be zero-extended or 0 if the value should be sign-extended. */ extern tree get_narrower (tree, int *); /* Given an expression EXP that may be a COMPONENT_REF or an ARRAY_REF, look for nested component-refs or array-refs at constant positions and find the ultimate containing object, which is returned. */ extern tree get_inner_reference (tree, HOST_WIDE_INT *, HOST_WIDE_INT *, tree *, enum machine_mode *, int *, int *); /* Return 1 if T is an expression that get_inner_reference handles. */ extern int handled_component_p (tree); /* Return a tree of sizetype representing the size, in bytes, of the element of EXP, an ARRAY_REF. */ extern tree array_ref_element_size (tree); /* Return a tree representing the lower bound of the array mentioned in EXP, an ARRAY_REF. */ extern tree array_ref_low_bound (tree); /* Return a tree representing the offset, in bytes, of the field referenced by EXP. This does not include any offset in DECL_FIELD_BIT_OFFSET. */ extern tree component_ref_field_offset (tree); /* Given a DECL or TYPE, return the scope in which it was declared, or NUL_TREE if there is no containing scope. */ extern tree get_containing_scope (tree); /* Return the FUNCTION_DECL which provides this _DECL with its context, or zero if none. */ extern tree decl_function_context (tree); /* Return the RECORD_TYPE, UNION_TYPE, or QUAL_UNION_TYPE which provides this _DECL with its context, or zero if none. */ extern tree decl_type_context (tree); /* Return 1 if EXPR is the real constant zero. */ extern int real_zerop (tree); /* Declare commonly used variables for tree structure. */ /* Nonzero means lvalues are limited to those valid in pedantic ANSI C. Zero means allow extended lvalues. */ extern int pedantic_lvalues; /* Points to the FUNCTION_DECL of the function whose body we are reading. */ extern GTY(()) tree current_function_decl; /* Nonzero means a FUNC_BEGIN label was emitted. */ extern GTY(()) tree current_function_func_begin_label; /* A DECL for the current file-scope context. When using IMA, this heads a chain of FILE_DECLs; currently only C uses it. */ extern GTY(()) tree current_file_decl; /* Nonzero means all ..._TYPE nodes should be allocated permanently. */ extern int all_types_permanent; /* Exit a binding level. This function is provided by each language frontend. */ extern tree poplevel (int, int, int); /* Declare a predefined function. Return the declaration. This function is provided by each language frontend. */ extern tree builtin_function (const char *, tree, int, enum built_in_class, const char *, tree); /* In tree.c */ extern unsigned crc32_string (unsigned, const char *); extern void clean_symbol_name (char *); extern tree get_file_function_name_long (const char *); extern tree get_set_constructor_bits (tree, char *, int); extern tree get_set_constructor_bytes (tree, unsigned char *, int); extern tree get_callee_fndecl (tree); extern void change_decl_assembler_name (tree, tree); extern int type_num_arguments (tree); extern tree lhd_unsave_expr_now (tree); extern bool associative_tree_code (enum tree_code); extern bool commutative_tree_code (enum tree_code); /* In stmt.c */ extern void expand_fixups (rtx); extern void expand_expr_stmt (tree); extern void expand_expr_stmt_value (tree, int, int); extern int warn_if_unused_value (tree, location_t); extern void expand_decl_init (tree); extern void expand_label (tree); extern void expand_goto (tree); extern void expand_asm (tree, int); extern void expand_start_cond (tree, int); extern void expand_end_cond (void); extern void expand_start_else (void); extern void expand_start_elseif (tree); extern void expand_stack_alloc (tree, tree); extern rtx expand_stack_save (void); extern void expand_stack_restore (tree); extern void expand_return (tree); extern void expand_start_bindings_and_block (int, tree); #define expand_start_bindings(flags) \ expand_start_bindings_and_block(flags, NULL_TREE) extern void expand_end_bindings (tree, int, int); extern void warn_about_unused_variables (tree); extern void start_cleanup_deferral (void); extern void end_cleanup_deferral (void); extern int is_body_block (tree); extern int conditional_context (void); extern struct nesting * current_nesting_level (void); extern tree last_cleanup_this_contour (void); extern void expand_start_case (int, tree, tree, const char *); extern void expand_end_case_type (tree, tree); #define expand_end_case(cond) expand_end_case_type (cond, NULL) extern int add_case_node (tree, tree, tree, tree *, bool); extern int pushcase (tree, tree (*) (tree, tree), tree, tree *); extern int pushcase_range (tree, tree, tree (*) (tree, tree), tree, tree *); extern void using_eh_for_cleanups (void); /* In fold-const.c */ /* Fold constants as much as possible in an expression. Returns the simplified expression. Acts only on the top level of the expression; if the argument itself cannot be simplified, its subexpressions are not changed. */ extern tree fold (tree); extern tree fold_initializer (tree); extern tree fold_convert (tree, tree); extern tree fold_single_bit_test (enum tree_code, tree, tree, tree); extern tree fold_abs_const (tree, tree); extern int force_fit_type (tree, int); extern int add_double (unsigned HOST_WIDE_INT, HOST_WIDE_INT, unsigned HOST_WIDE_INT, HOST_WIDE_INT, unsigned HOST_WIDE_INT *, HOST_WIDE_INT *); extern int neg_double (unsigned HOST_WIDE_INT, HOST_WIDE_INT, unsigned HOST_WIDE_INT *, HOST_WIDE_INT *); extern int mul_double (unsigned HOST_WIDE_INT, HOST_WIDE_INT, unsigned HOST_WIDE_INT, HOST_WIDE_INT, unsigned HOST_WIDE_INT *, HOST_WIDE_INT *); extern void lshift_double (unsigned HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT, unsigned int, unsigned HOST_WIDE_INT *, HOST_WIDE_INT *, int); extern void rshift_double (unsigned HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT, unsigned int, unsigned HOST_WIDE_INT *, HOST_WIDE_INT *, int); extern void lrotate_double (unsigned HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT, unsigned int, unsigned HOST_WIDE_INT *, HOST_WIDE_INT *); extern void rrotate_double (unsigned HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT, unsigned int, unsigned HOST_WIDE_INT *, HOST_WIDE_INT *); extern int div_and_round_double (enum tree_code, int, unsigned HOST_WIDE_INT, HOST_WIDE_INT, unsigned HOST_WIDE_INT, HOST_WIDE_INT, unsigned HOST_WIDE_INT *, HOST_WIDE_INT *, unsigned HOST_WIDE_INT *, HOST_WIDE_INT *); enum operand_equal_flag { OEP_ONLY_CONST = 1, OEP_PURE_SAME = 2 }; extern int operand_equal_p (tree, tree, unsigned int); extern tree omit_one_operand (tree, tree, tree); extern tree omit_two_operands (tree, tree, tree, tree); extern tree invert_truthvalue (tree); extern tree nondestructive_fold_unary_to_constant (enum tree_code, tree, tree); extern tree nondestructive_fold_binary_to_constant (enum tree_code, tree, tree, tree); extern tree fold_read_from_constant_string (tree); extern tree int_const_binop (enum tree_code, tree, tree, int); extern tree build_fold_addr_expr (tree); extern tree build_fold_addr_expr_with_type (tree, tree); extern tree build_fold_indirect_ref (tree); extern bool tree_swap_operands_p (tree, tree, bool); extern enum tree_code swap_tree_comparison (enum tree_code); /* In builtins.c */ extern tree fold_builtin (tree); extern enum built_in_function builtin_mathfn_code (tree); extern tree build_function_call_expr (tree, tree); extern tree mathfn_built_in (tree, enum built_in_function fn); extern tree strip_float_extensions (tree); extern tree simplify_builtin (tree, int); extern tree c_strlen (tree, int); extern tree std_gimplify_va_arg_expr (tree, tree, tree *, tree *); /* In convert.c */ extern tree strip_float_extensions (tree); /* In alias.c */ extern void record_component_aliases (tree); extern HOST_WIDE_INT get_alias_set (tree); extern int alias_sets_conflict_p (HOST_WIDE_INT, HOST_WIDE_INT); extern int alias_sets_might_conflict_p (HOST_WIDE_INT, HOST_WIDE_INT); extern int readonly_fields_p (tree); extern int objects_must_conflict_p (tree, tree); /* In tree.c */ extern int really_constant_p (tree); extern int int_fits_type_p (tree, tree); extern bool variably_modified_type_p (tree, tree); extern int tree_log2 (tree); extern int tree_floor_log2 (tree); extern int simple_cst_equal (tree, tree); extern unsigned int iterative_hash_expr (tree, unsigned int); extern int compare_tree_int (tree, unsigned HOST_WIDE_INT); extern int type_list_equal (tree, tree); extern int chain_member (tree, tree); extern tree type_hash_lookup (unsigned int, tree); extern void type_hash_add (unsigned int, tree); extern int simple_cst_list_equal (tree, tree); extern void dump_tree_statistics (void); extern void expand_function_end (void); extern void expand_function_start (tree); extern void expand_pending_sizes (tree); extern void recompute_tree_invarant_for_addr_expr (tree); extern bool needs_to_live_in_memory (tree); extern tree make_vector (enum machine_mode, tree, int); extern tree reconstruct_complex_type (tree, tree); extern int real_onep (tree); extern int real_twop (tree); extern int real_minus_onep (tree); extern void init_ttree (void); extern void build_common_tree_nodes (int); extern void build_common_tree_nodes_2 (int); extern tree build_range_type (tree, tree, tree); /* In function.c */ extern void expand_main_function (void); extern void init_dummy_function_start (void); extern void expand_dummy_function_end (void); extern void init_function_for_compilation (void); extern void allocate_struct_function (tree); extern void init_function_start (tree); extern bool use_register_for_decl (tree); extern void assign_parms (tree); extern void setjmp_vars_warning (tree); extern void setjmp_args_warning (void); extern void init_temp_slots (void); extern void combine_temp_slots (void); extern void free_temp_slots (void); extern void pop_temp_slots (void); extern void push_temp_slots (void); extern void preserve_temp_slots (rtx); extern void preserve_rtl_expr_temps (tree); extern int aggregate_value_p (tree, tree); extern void push_function_context (void); extern void pop_function_context (void); extern void push_function_context_to (tree); extern void pop_function_context_from (tree); /* In print-rtl.c */ #ifdef BUFSIZ extern void print_rtl (FILE *, rtx); #endif /* In print-tree.c */ extern void debug_tree (tree); #ifdef BUFSIZ extern void print_node (FILE *, const char *, tree, int); extern void print_node_brief (FILE *, const char *, tree, int); extern void indent_to (FILE *, int); #endif /* In tree-inline.c: */ extern bool debug_find_tree (tree, tree); /* In expr.c */ extern rtx expand_builtin_return_addr (enum built_in_function, int, rtx); extern void check_max_integer_computation_mode (tree); /* In emit-rtl.c */ extern rtx emit_line_note (location_t); /* In calls.c */ /* Nonzero if this is a call to a `const' function. */ #define ECF_CONST 1 /* Nonzero if this is a call to a `volatile' function. */ #define ECF_NORETURN 2 /* Nonzero if this is a call to malloc or a related function. */ #define ECF_MALLOC 4 /* Nonzero if it is plausible that this is a call to alloca. */ #define ECF_MAY_BE_ALLOCA 8 /* Nonzero if this is a call to a function that won't throw an exception. */ #define ECF_NOTHROW 16 /* Nonzero if this is a call to setjmp or a related function. */ #define ECF_RETURNS_TWICE 32 /* Nonzero if this is a call to `longjmp'. */ #define ECF_LONGJMP 64 /* Nonzero if this is a syscall that makes a new process in the image of the current one. */ #define ECF_SIBCALL 128 /* Nonzero if this is a call to "pure" function (like const function, but may read memory. */ #define ECF_PURE 256 /* Nonzero if this is a call to a function that returns with the stack pointer depressed. */ #define ECF_SP_DEPRESSED 512 /* Nonzero if this call is known to always return. */ #define ECF_ALWAYS_RETURN 1024 /* Create libcall block around the call. */ #define ECF_LIBCALL_BLOCK 2048 extern int flags_from_decl_or_type (tree); extern int call_expr_flags (tree); extern int setjmp_call_p (tree); extern bool alloca_call_p (tree); /* In attribs.c. */ /* Process the attributes listed in ATTRIBUTES and install them in *NODE, which is either a DECL (including a TYPE_DECL) or a TYPE. If a DECL, it should be modified in place; if a TYPE, a copy should be created unless ATTR_FLAG_TYPE_IN_PLACE is set in FLAGS. FLAGS gives further information, in the form of a bitwise OR of flags in enum attribute_flags from tree.h. Depending on these flags, some attributes may be returned to be applied at a later stage (for example, to apply a decl attribute to the declaration rather than to its type). */ extern tree decl_attributes (tree *, tree, int); /* In integrate.c */ extern void set_decl_abstract_flags (tree, int); extern void set_decl_origin_self (tree); /* In stor-layout.c */ extern void set_min_and_max_values_for_integral_type (tree, int, bool); extern void fixup_signed_type (tree); extern void internal_reference_types (void); extern unsigned int update_alignment_for_field (record_layout_info, tree, unsigned int); /* varasm.c */ extern void make_decl_rtl (tree, const char *); extern void make_decl_one_only (tree); extern int supports_one_only (void); extern void variable_section (tree, int); enum tls_model decl_tls_model (tree); extern void resolve_unique_section (tree, int, int); extern void mark_referenced (tree); extern void mark_decl_referenced (tree); extern void notice_global_symbol (tree); /* In stmt.c */ extern void emit_nop (void); extern void expand_computed_goto (tree); extern bool parse_output_constraint (const char **, int, int, int, bool *, bool *, bool *); extern bool parse_input_constraint (const char **, int, int, int, int, const char * const *, bool *, bool *); extern void expand_asm_operands (tree, tree, tree, tree, int, location_t); extern void expand_asm_expr (tree); extern bool asm_op_is_mem_input (tree, tree); extern tree resolve_asm_operand_names (tree, tree, tree); extern int any_pending_cleanups (void); extern void init_stmt_for_function (void); extern void expand_start_target_temps (void); extern void expand_end_target_temps (void); extern void expand_elseif (tree); extern void save_stack_pointer (void); extern void expand_decl (tree); extern int expand_decl_cleanup (tree, tree); extern int expand_decl_cleanup_eh (tree, tree, int); extern void expand_anon_union_decl (tree, tree, tree); extern int containing_blocks_have_cleanups_or_stack_level (void); /* In gimplify.c. */ extern tree create_artificial_label (void); extern void gimplify_function_tree (tree); extern const char *get_name (tree); extern tree unshare_expr (tree); extern void sort_case_labels (tree); /* If KIND=='I', return a suitable global initializer (constructor) name. If KIND=='D', return a suitable global clean-up (destructor) name. */ extern tree get_file_function_name (int); /* Interface of the DWARF2 unwind info support. */ /* Generate a new label for the CFI info to refer to. */ extern char *dwarf2out_cfi_label (void); /* Entry point to update the canonical frame address (CFA). */ extern void dwarf2out_def_cfa (const char *, unsigned, HOST_WIDE_INT); /* Add the CFI for saving a register window. */ extern void dwarf2out_window_save (const char *); /* Add a CFI to update the running total of the size of arguments pushed onto the stack. */ extern void dwarf2out_args_size (const char *, HOST_WIDE_INT); /* Entry point for saving a register to the stack. */ extern void dwarf2out_reg_save (const char *, unsigned, HOST_WIDE_INT); /* Entry point for saving the return address in the stack. */ extern void dwarf2out_return_save (const char *, HOST_WIDE_INT); /* Entry point for saving the return address in a register. */ extern void dwarf2out_return_reg (const char *, unsigned); /* The type of a callback function for walking over tree structure. */ typedef tree (*walk_tree_fn) (tree *, int *, void *); tree walk_tree (tree*, walk_tree_fn, void*, void*); tree walk_tree_without_duplicates (tree*, walk_tree_fn, void*); /* In tree-dump.c */ /* Different tree dump places. When you add new tree dump places, extend the DUMP_FILES array in tree-dump.c. */ enum tree_dump_index { TDI_none, /* No dump */ TDI_tu, /* dump the whole translation unit. */ TDI_class, /* dump class hierarchy. */ TDI_original, /* dump each function before optimizing it */ TDI_generic, /* dump each function after genericizing it */ TDI_nested, /* dump each function after unnesting it */ TDI_inlined, /* dump each function after inlining within it. */ TDI_vcg, /* create a VCG graph file for each function's flowgraph. */ TDI_xml, /* dump function call graph. */ TDI_all, /* enable all the dumps. */ TDI_end }; /* Bit masks to control tree dumping. Not all values are applicable to all tree dumps. Add new ones at the end. When you define new values, extend the DUMP_OPTIONS array in tree-dump.c */ #define TDF_ADDRESS (1 << 0) /* dump node addresses */ #define TDF_SLIM (1 << 1) /* don't go wild following links */ #define TDF_RAW (1 << 2) /* don't unparse the function */ #define TDF_DETAILS (1 << 3) /* show more detailed info about each pass */ #define TDF_STATS (1 << 4) /* dump various statistics about each pass */ #define TDF_BLOCKS (1 << 5) /* display basic block boundaries */ #define TDF_VOPS (1 << 6) /* display virtual operands */ #define TDF_LINENO (1 << 7) /* display statement line numbers */ #define TDF_UID (1 << 8) /* display decl UIDs */ typedef struct dump_info *dump_info_p; extern int dump_flag (dump_info_p, int, tree); extern int dump_enabled_p (enum tree_dump_index); extern FILE *dump_begin (enum tree_dump_index, int *); extern void dump_end (enum tree_dump_index, FILE *); extern void dump_node (tree, int, FILE *); extern int dump_switch_p (const char *); extern const char *dump_flag_name (enum tree_dump_index); /* Assign the RTX to declaration. */ extern void set_decl_rtl (tree, rtx); extern void set_decl_incoming_rtl (tree, rtx); /* Redefine abort to report an internal error w/o coredump, and reporting the location of the error in the source file. This logic is duplicated in rtl.h and tree.h because every file that needs the special abort includes one or both. toplev.h gets too few files, system.h gets too many. */ extern void fancy_abort (const char *, int, const char *) ATTRIBUTE_NORETURN; #define abort() fancy_abort (__FILE__, __LINE__, __FUNCTION__) /* Enum and arrays used for tree allocation stats. Keep in sync with tree.c:tree_node_kind_names. */ typedef enum { d_kind, t_kind, b_kind, s_kind, r_kind, e_kind, c_kind, id_kind, perm_list_kind, temp_list_kind, vec_kind, binfo_kind, phi_kind, ssa_name_kind, x_kind, lang_decl, lang_type, all_kinds } tree_node_kind; extern int tree_node_counts[]; extern int tree_node_sizes[]; /* True if we are in gimple form and the actions of the folders need to be restricted. False if we are not in gimple form and folding is not restricted to creating gimple expressions. */ extern bool in_gimple_form; #endif /* GCC_TREE_H */ #ifndef GCC_TM_P_H #define GCC_TM_P_H #ifdef IN_GCC /* Definitions of target machine for GCC for IA-32. Copyright (C) 1988, 1992, 1994, 1995, 1996, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Functions in i386.c */ extern void override_options (void); extern void optimization_options (int, int); extern int ix86_can_use_return_insn_p (void); extern int ix86_frame_pointer_required (void); extern void ix86_setup_frame_addresses (void); extern void ix86_file_end (void); extern HOST_WIDE_INT ix86_initial_elimination_offset (int, int); extern void ix86_expand_prologue (void); extern void ix86_expand_epilogue (int); extern void ix86_output_addr_vec_elt (FILE *, int); extern void ix86_output_addr_diff_elt (FILE *, int, int); #ifdef RTX_CODE extern int ix86_aligned_p (rtx); extern int standard_80387_constant_p (rtx); extern const char *standard_80387_constant_opcode (rtx); extern rtx standard_80387_constant_rtx (int); extern int standard_sse_constant_p (rtx); extern int symbolic_reference_mentioned_p (rtx); extern bool extended_reg_mentioned_p (rtx); extern bool x86_extended_QIreg_mentioned_p (rtx); extern bool x86_extended_reg_mentioned_p (rtx); extern int any_fp_register_operand (rtx, enum machine_mode); extern int register_and_not_any_fp_reg_operand (rtx, enum machine_mode); extern int fp_register_operand (rtx, enum machine_mode); extern int register_and_not_fp_reg_operand (rtx, enum machine_mode); extern int x86_64_general_operand (rtx, enum machine_mode); extern int x86_64_szext_general_operand (rtx, enum machine_mode); extern int x86_64_nonmemory_operand (rtx, enum machine_mode); extern int x86_64_szext_nonmemory_operand (rtx, enum machine_mode); extern int x86_64_immediate_operand (rtx, enum machine_mode); extern int x86_64_zext_immediate_operand (rtx, enum machine_mode); extern int symbolic_operand (rtx, enum machine_mode); extern int tls_symbolic_operand (rtx, enum machine_mode); extern int global_dynamic_symbolic_operand (rtx, enum machine_mode); extern int local_dynamic_symbolic_operand (rtx, enum machine_mode); extern int initial_exec_symbolic_operand (rtx, enum machine_mode); extern int local_exec_symbolic_operand (rtx, enum machine_mode); extern int pic_symbolic_operand (rtx, enum machine_mode); extern int call_insn_operand (rtx, enum machine_mode); extern int sibcall_insn_operand (rtx, enum machine_mode); extern int constant_call_address_operand (rtx, enum machine_mode); extern int const0_operand (rtx, enum machine_mode); extern int const1_operand (rtx, enum machine_mode); extern int const248_operand (rtx, enum machine_mode); extern int incdec_operand (rtx, enum machine_mode); extern int reg_no_sp_operand (rtx, enum machine_mode); extern int mmx_reg_operand (rtx, enum machine_mode); extern int general_no_elim_operand (rtx, enum machine_mode); extern int nonmemory_no_elim_operand (rtx, enum machine_mode); extern int q_regs_operand (rtx, enum machine_mode); extern int non_q_regs_operand (rtx, enum machine_mode); extern int sse_comparison_operator (rtx, enum machine_mode); extern int fcmov_comparison_operator (rtx, enum machine_mode); extern int cmp_fp_expander_operand (rtx, enum machine_mode); extern int ix86_comparison_operator (rtx, enum machine_mode); extern int ext_register_operand (rtx, enum machine_mode); extern int binary_fp_operator (rtx, enum machine_mode); extern int mult_operator (rtx, enum machine_mode); extern int div_operator (rtx, enum machine_mode); extern int arith_or_logical_operator (rtx, enum machine_mode); extern int promotable_binary_operator (rtx, enum machine_mode); extern int memory_displacement_operand (rtx, enum machine_mode); extern int cmpsi_operand (rtx, enum machine_mode); extern int long_memory_operand (rtx, enum machine_mode); extern int aligned_operand (rtx, enum machine_mode); extern enum machine_mode ix86_cc_mode (enum rtx_code, rtx, rtx); extern int ix86_expand_movstr (rtx, rtx, rtx, rtx); extern int ix86_expand_clrstr (rtx, rtx, rtx); extern int ix86_expand_strlen (rtx, rtx, rtx, rtx); extern bool legitimate_constant_p (rtx); extern bool constant_address_p (rtx); extern bool legitimate_pic_operand_p (rtx); extern int legitimate_pic_address_disp_p (rtx); extern int legitimate_address_p (enum machine_mode, rtx, int); extern rtx legitimize_pic_address (rtx, rtx); extern rtx legitimize_address (rtx, rtx, enum machine_mode); extern void print_reg (rtx, int, FILE*); extern void print_operand (FILE*, rtx, int); extern void print_operand_address (FILE*, rtx); extern bool output_addr_const_extra (FILE*, rtx); extern void split_di (rtx[], int, rtx[], rtx[]); extern void split_ti (rtx[], int, rtx[], rtx[]); extern const char *output_set_got (rtx); extern const char *output_387_binary_op (rtx, rtx*); extern const char *output_387_reg_move (rtx, rtx*); extern const char *output_fix_trunc (rtx, rtx*); extern const char *output_fp_compare (rtx, rtx*, int, int); extern void i386_dwarf_output_addr_const (FILE*, rtx); extern void i386_output_dwarf_dtprel (FILE*, int, rtx); extern void ix86_expand_clear (rtx); extern void ix86_expand_move (enum machine_mode, rtx[]); extern void ix86_expand_vector_move (enum machine_mode, rtx[]); extern void ix86_expand_binary_operator (enum rtx_code, enum machine_mode, rtx[]); extern int ix86_binary_operator_ok (enum rtx_code, enum machine_mode, rtx[]); extern void ix86_expand_unary_operator (enum rtx_code, enum machine_mode, rtx[]); extern int ix86_unary_operator_ok (enum rtx_code, enum machine_mode, rtx[]); extern int ix86_match_ccmode (rtx, enum machine_mode); extern rtx ix86_expand_compare (enum rtx_code, rtx *, rtx *); extern int ix86_use_fcomi_compare (enum rtx_code); extern void ix86_expand_branch (enum rtx_code, rtx); extern int ix86_expand_setcc (enum rtx_code, rtx); extern int ix86_expand_int_movcc (rtx[]); extern int ix86_expand_fp_movcc (rtx[]); extern int ix86_expand_int_addcc (rtx[]); extern void ix86_expand_call (rtx, rtx, rtx, rtx, rtx, int); extern void x86_initialize_trampoline (rtx, rtx, rtx); extern rtx ix86_zero_extend_to_Pmode (rtx); extern void ix86_split_long_move (rtx[]); extern void ix86_split_ashldi (rtx *, rtx); extern void ix86_split_ashrdi (rtx *, rtx); extern void ix86_split_lshrdi (rtx *, rtx); extern rtx ix86_find_base_term (rtx); extern int ix86_check_movabs (rtx, int); extern rtx assign_386_stack_local (enum machine_mode, int); extern int ix86_attr_length_immediate_default (rtx, int); extern int ix86_attr_length_address_default (rtx); extern enum machine_mode ix86_fp_compare_mode (enum rtx_code); extern int x86_64_sign_extended_value (rtx); extern int x86_64_zero_extended_value (rtx); extern rtx ix86_libcall_value (enum machine_mode); extern bool ix86_function_value_regno_p (int); extern bool ix86_function_arg_regno_p (int); extern int ix86_function_arg_boundary (enum machine_mode, tree); extern int ix86_return_in_memory (tree); extern void ix86_va_start (tree, rtx); extern rtx ix86_va_arg (tree, tree); extern rtx ix86_force_to_memory (enum machine_mode, rtx); extern void ix86_free_from_memory (enum machine_mode); extern void ix86_split_fp_branch (enum rtx_code code, rtx, rtx, rtx, rtx, rtx); extern int ix86_hard_regno_mode_ok (int, enum machine_mode); extern int ix86_register_move_cost (enum machine_mode, enum reg_class, enum reg_class); extern int ix86_secondary_memory_needed (enum reg_class, enum reg_class, enum machine_mode, int); extern enum reg_class ix86_preferred_reload_class (rtx, enum reg_class); extern int ix86_memory_move_cost (enum machine_mode, enum reg_class, int); extern void emit_i387_cw_initialization (rtx, rtx); extern bool ix86_fp_jump_nontrivial_p (enum rtx_code); extern void x86_order_regs_for_local_alloc (void); extern void x86_function_profiler (FILE *, int); extern void x86_emit_floatuns (rtx [2]); extern void ix86_emit_fp_unordered_jump (rtx); extern void ix86_emit_i387_log1p (rtx, rtx); extern enum rtx_code ix86_reverse_condition (enum rtx_code, enum machine_mode); #ifdef TREE_CODE extern void init_cumulative_args (CUMULATIVE_ARGS *, tree, rtx, tree); extern rtx function_arg (CUMULATIVE_ARGS *, enum machine_mode, tree, int); extern int function_arg_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode, tree, int); extern void function_arg_advance (CUMULATIVE_ARGS *, enum machine_mode, tree, int); extern rtx ix86_function_value (tree); extern void ix86_init_builtins (void); extern rtx ix86_expand_builtin (tree, rtx, rtx, enum machine_mode, int); #endif #endif #ifdef TREE_CODE extern int ix86_return_pops_args (tree, tree, int); extern int ix86_data_alignment (tree, int); extern int ix86_local_alignment (tree, int); extern int ix86_constant_alignment (tree, int); extern tree ix86_handle_dll_attribute (tree *, tree, tree, int, bool *); extern tree ix86_handle_shared_attribute (tree *, tree, tree, int, bool *); extern unsigned int i386_pe_section_type_flags (tree, const char *, int); extern void i386_pe_asm_named_section (const char *, unsigned int); extern int x86_field_alignment (tree, int); #endif extern rtx ix86_tls_get_addr (void); extern bool ix86_must_pass_in_stack (enum machine_mode mode, tree); extern void ix86_expand_vector_init (rtx, rtx); /* In winnt.c */ extern int i386_pe_dllexport_name_p (const char *); extern int i386_pe_dllimport_name_p (const char *); extern void i386_pe_unique_section (tree, int); extern void i386_pe_declare_function_type (FILE *, const char *, int); extern void i386_pe_record_external_function (const char *); extern void i386_pe_record_exported_symbol (const char *, int); extern void i386_pe_asm_file_end (FILE *); extern void i386_pe_encode_section_info (tree, rtx, int); extern const char *i386_pe_strip_name_encoding (const char *); extern const char *i386_pe_strip_name_encoding_full (const char *); extern void i386_pe_output_labelref (FILE *, const char *); /* Generated automatically by the program `genpreds'. */ #ifndef GCC_TM_PREDS_H #define GCC_TM_PREDS_H #ifdef RTX_CODE extern int x86_64_immediate_operand (rtx, enum machine_mode); extern int x86_64_nonmemory_operand (rtx, enum machine_mode); extern int x86_64_movabs_operand (rtx, enum machine_mode); extern int x86_64_szext_nonmemory_operand (rtx, enum machine_mode); extern int x86_64_general_operand (rtx, enum machine_mode); extern int x86_64_szext_general_operand (rtx, enum machine_mode); extern int x86_64_zext_immediate_operand (rtx, enum machine_mode); extern int shiftdi_operand (rtx, enum machine_mode); extern int const_int_1_31_operand (rtx, enum machine_mode); extern int symbolic_operand (rtx, enum machine_mode); extern int aligned_operand (rtx, enum machine_mode); extern int pic_symbolic_operand (rtx, enum machine_mode); extern int call_insn_operand (rtx, enum machine_mode); extern int sibcall_insn_operand (rtx, enum machine_mode); extern int constant_call_address_operand (rtx, enum machine_mode); extern int const0_operand (rtx, enum machine_mode); extern int const1_operand (rtx, enum machine_mode); extern int const248_operand (rtx, enum machine_mode); extern int const_0_to_3_operand (rtx, enum machine_mode); extern int const_0_to_7_operand (rtx, enum machine_mode); extern int const_0_to_15_operand (rtx, enum machine_mode); extern int const_0_to_255_operand (rtx, enum machine_mode); extern int incdec_operand (rtx, enum machine_mode); extern int mmx_reg_operand (rtx, enum machine_mode); extern int reg_no_sp_operand (rtx, enum machine_mode); extern int general_no_elim_operand (rtx, enum machine_mode); extern int nonmemory_no_elim_operand (rtx, enum machine_mode); extern int index_register_operand (rtx, enum machine_mode); extern int flags_reg_operand (rtx, enum machine_mode); extern int q_regs_operand (rtx, enum machine_mode); extern int non_q_regs_operand (rtx, enum machine_mode); extern int fcmov_comparison_operator (rtx, enum machine_mode); extern int sse_comparison_operator (rtx, enum machine_mode); extern int ix86_comparison_operator (rtx, enum machine_mode); extern int ix86_carry_flag_operator (rtx, enum machine_mode); extern int cmp_fp_expander_operand (rtx, enum machine_mode); extern int ext_register_operand (rtx, enum machine_mode); extern int binary_fp_operator (rtx, enum machine_mode); extern int mult_operator (rtx, enum machine_mode); extern int div_operator (rtx, enum machine_mode); extern int arith_or_logical_operator (rtx, enum machine_mode); extern int promotable_binary_operator (rtx, enum machine_mode); extern int memory_displacement_operand (rtx, enum machine_mode); extern int cmpsi_operand (rtx, enum machine_mode); extern int long_memory_operand (rtx, enum machine_mode); extern int tls_symbolic_operand (rtx, enum machine_mode); extern int global_dynamic_symbolic_operand (rtx, enum machine_mode); extern int local_dynamic_symbolic_operand (rtx, enum machine_mode); extern int initial_exec_symbolic_operand (rtx, enum machine_mode); extern int local_exec_symbolic_operand (rtx, enum machine_mode); extern int any_fp_register_operand (rtx, enum machine_mode); extern int register_and_not_any_fp_reg_operand (rtx, enum machine_mode); extern int fp_register_operand (rtx, enum machine_mode); extern int register_and_not_fp_reg_operand (rtx, enum machine_mode); extern int zero_extended_scalar_load_operand (rtx, enum machine_mode); extern int vector_move_operand (rtx, enum machine_mode); extern int no_seg_address_operand (rtx, enum machine_mode); #endif /* RTX_CODE */ #endif /* GCC_TM_PREDS_H */ #endif #endif /* GCC_TM_P_H */ /* Define control and data flow tables, and regsets. Copyright (C) 1987, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_BASIC_BLOCK_H #define GCC_BASIC_BLOCK_H /* Functions to support general ended bitmaps. Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_BITMAP_H #define GCC_BITMAP_H /* Fundamental storage type for bitmap. */ /* typedef unsigned HOST_WIDE_INT BITMAP_WORD; */ /* #define nBITMAP_WORD_BITS HOST_BITS_PER_WIDE_INT */ typedef unsigned long BITMAP_WORD; #define nBITMAP_WORD_BITS (CHAR_BIT * SIZEOF_LONG) #define BITMAP_WORD_BITS (unsigned) nBITMAP_WORD_BITS /* Number of words to use for each element in the linked list. */ #ifndef BITMAP_ELEMENT_WORDS #define BITMAP_ELEMENT_WORDS ((128 + nBITMAP_WORD_BITS - 1) / nBITMAP_WORD_BITS) #endif /* Number of bits in each actual element of a bitmap. We get slightly better code for bit % BITMAP_ELEMENT_ALL_BITS and bit / BITMAP_ELEMENT_ALL_BITS if bits is unsigned, assuming it is a power of 2. */ #define BITMAP_ELEMENT_ALL_BITS \ ((unsigned) (BITMAP_ELEMENT_WORDS * BITMAP_WORD_BITS)) /* Bitmap set element. We use a linked list to hold only the bits that are set. This allows for use to grow the bitset dynamically without having to realloc and copy a giant bit array. The `prev' field is undefined for an element on the free list. */ typedef struct bitmap_element_def GTY(()) { struct bitmap_element_def *next; /* Next element. */ struct bitmap_element_def *prev; /* Previous element. */ unsigned int indx; /* regno/BITMAP_ELEMENT_ALL_BITS. */ BITMAP_WORD bits[BITMAP_ELEMENT_WORDS]; /* Bits that are set. */ } bitmap_element; /* Head of bitmap linked list. */ typedef struct bitmap_head_def GTY(()) { bitmap_element *first; /* First element in linked list. */ bitmap_element *current; /* Last element looked at. */ unsigned int indx; /* Index of last element looked at. */ int using_obstack; /* Are we using an obstack or ggc for allocation? */ } bitmap_head; typedef struct bitmap_head_def *bitmap; /* Enumeration giving the various operations we support. */ enum bitmap_bits { BITMAP_AND, /* TO = FROM1 & FROM2 */ BITMAP_AND_COMPL, /* TO = FROM1 & ~ FROM2 */ BITMAP_IOR, /* TO = FROM1 | FROM2 */ BITMAP_XOR, /* TO = FROM1 ^ FROM2 */ BITMAP_IOR_COMPL /* TO = FROM1 | ~FROM2 */ }; /* Global data */ extern bitmap_element bitmap_zero_bits; /* Zero bitmap element */ /* Clear a bitmap by freeing up the linked list. */ extern void bitmap_clear (bitmap); /* Copy a bitmap to another bitmap. */ extern void bitmap_copy (bitmap, bitmap); /* True if two bitmaps are identical. */ extern int bitmap_equal_p (bitmap, bitmap); /* Perform an operation on two bitmaps, yielding a third. */ extern int bitmap_operation (bitmap, bitmap, bitmap, enum bitmap_bits); /* `or' into one bitmap the `and' of a second bitmap witih the complement of a third. */ extern void bitmap_ior_and_compl (bitmap, bitmap, bitmap); /* Clear a single register in a register set. */ extern void bitmap_clear_bit (bitmap, int); /* Set a single register in a register set. */ extern void bitmap_set_bit (bitmap, int); /* Return true if a register is set in a register set. */ extern int bitmap_bit_p (bitmap, int); /* Debug functions to print a bitmap linked list. */ extern void debug_bitmap (bitmap); extern void debug_bitmap_file (FILE *, bitmap); /* Print a bitmap. */ extern void bitmap_print (FILE *, bitmap, const char *, const char *); /* Initialize a bitmap header. If HEAD is NULL, a new header will be allocated. USING_OBSTACK indicates how elements should be allocated. */ extern bitmap bitmap_initialize (bitmap head, int using_obstack); /* Release all memory used by the bitmap obstack. */ extern void bitmap_release_memory (void); /* A few compatibility/functions macros for compatibility with sbitmaps */ #define dump_bitmap(file, bitmap) bitmap_print (file, bitmap, "", "\n") #define bitmap_zero(a) bitmap_clear (a) #define bitmap_a_or_b(a,b,c) bitmap_operation (a, b, c, BITMAP_IOR) #define bitmap_a_and_b(a,b,c) bitmap_operation (a, b, c, BITMAP_AND) extern int bitmap_union_of_diff (bitmap, bitmap, bitmap, bitmap); extern int bitmap_first_set_bit (bitmap); extern int bitmap_last_set_bit (bitmap); /* Allocate a bitmap with oballoc. */ #define BITMAP_OBSTACK_ALLOC(OBSTACK) \ bitmap_initialize (obstack_alloc (OBSTACK, sizeof (bitmap_head)), 1) /* Allocate a bitmap with ggc_alloc. */ #define BITMAP_GGC_ALLOC() \ bitmap_initialize (NULL, 0) /* Allocate a bitmap with xmalloc. */ #define BITMAP_XMALLOC() \ bitmap_initialize (xmalloc (sizeof (bitmap_head)), 1) /* Do any cleanup needed on a bitmap when it is no longer used. */ #define BITMAP_FREE(BITMAP) \ do { \ if (BITMAP) \ { \ bitmap_clear (BITMAP); \ (BITMAP) = 0; \ } \ } while (0) /* Do any cleanup needed on an xmalloced bitmap when it is no longer used. */ #define BITMAP_XFREE(BITMAP) \ do { \ if (BITMAP) \ { \ bitmap_clear (BITMAP); \ free (BITMAP); \ (BITMAP) = 0; \ } \ } while (0) /* Do any one-time initializations needed for bitmaps. */ #define BITMAP_INIT_ONCE() /* Loop over all bits in BITMAP, starting with MIN, setting BITNUM to the bit number and executing CODE for all bits that are set. */ #define EXECUTE_IF_SET_IN_BITMAP(BITMAP, MIN, BITNUM, CODE) \ do { \ bitmap_element *ptr_ = (BITMAP)->first; \ unsigned int indx_ = (MIN) / BITMAP_ELEMENT_ALL_BITS; \ unsigned bit_num_ = (MIN) % BITMAP_WORD_BITS; \ unsigned word_num_ = (MIN) / BITMAP_WORD_BITS % BITMAP_ELEMENT_WORDS; \ \ \ /* Find the block the minimum bit is in. */ \ while (ptr_ != 0 && ptr_->indx < indx_) \ ptr_ = ptr_->next; \ \ if (ptr_ != 0 && ptr_->indx != indx_) \ { \ bit_num_ = 0; \ word_num_ = 0; \ } \ \ for (; ptr_ != 0; ptr_ = ptr_->next) \ { \ for (; word_num_ < BITMAP_ELEMENT_WORDS; word_num_++) \ { \ BITMAP_WORD word_ = ptr_->bits[word_num_]; \ \ if (word_ != 0) \ { \ for (; bit_num_ < BITMAP_WORD_BITS; bit_num_++) \ { \ BITMAP_WORD mask_ = ((BITMAP_WORD) 1) << bit_num_; \ \ if ((word_ & mask_) != 0) \ { \ word_ &= ~ mask_; \ (BITNUM) = (ptr_->indx * BITMAP_ELEMENT_ALL_BITS \ + word_num_ * BITMAP_WORD_BITS \ + bit_num_); \ CODE; \ \ if (word_ == 0) \ break; \ } \ } \ } \ \ bit_num_ = 0; \ } \ \ word_num_ = 0; \ } \ } while (0) /* Loop over all bits in BITMAP1 and BITMAP2, starting with MIN, setting BITNUM to the bit number and executing CODE for all bits that are set in the first bitmap and not set in the second. */ #define EXECUTE_IF_AND_COMPL_IN_BITMAP(BITMAP1, BITMAP2, MIN, BITNUM, CODE) \ do { \ bitmap_element *ptr1_ = (BITMAP1)->first; \ bitmap_element *ptr2_ = (BITMAP2)->first; \ unsigned int indx_ = (MIN) / BITMAP_ELEMENT_ALL_BITS; \ unsigned bit_num_ = (MIN) % BITMAP_WORD_BITS; \ unsigned word_num_ = (MIN) / BITMAP_WORD_BITS % BITMAP_ELEMENT_WORDS; \ \ /* Find the block the minimum bit is in in the first bitmap. */ \ while (ptr1_ != 0 && ptr1_->indx < indx_) \ ptr1_ = ptr1_->next; \ \ if (ptr1_ != 0 && ptr1_->indx != indx_) \ { \ bit_num_ = 0; \ word_num_ = 0; \ } \ \ for (; ptr1_ != 0 ; ptr1_ = ptr1_->next) \ { \ /* Advance BITMAP2 to the equivalent link, using an all \ zero element if an equivalent link doesn't exist. */ \ bitmap_element *tmp2_; \ \ while (ptr2_ != 0 && ptr2_->indx < ptr1_->indx) \ ptr2_ = ptr2_->next; \ \ tmp2_ = ((ptr2_ != 0 && ptr2_->indx == ptr1_->indx) \ ? ptr2_ : &bitmap_zero_bits); \ \ for (; word_num_ < BITMAP_ELEMENT_WORDS; word_num_++) \ { \ BITMAP_WORD word_ = (ptr1_->bits[word_num_] \ & ~ tmp2_->bits[word_num_]); \ if (word_ != 0) \ { \ for (; bit_num_ < BITMAP_WORD_BITS; bit_num_++) \ { \ BITMAP_WORD mask_ = ((BITMAP_WORD) 1) << bit_num_; \ \ if ((word_ & mask_) != 0) \ { \ word_ &= ~ mask_; \ (BITNUM) = (ptr1_->indx * BITMAP_ELEMENT_ALL_BITS \ + word_num_ * BITMAP_WORD_BITS \ + bit_num_); \ \ CODE; \ if (word_ == 0) \ break; \ } \ } \ } \ \ bit_num_ = 0; \ } \ \ word_num_ = 0; \ } \ } while (0) /* Loop over all bits in BITMAP1 and BITMAP2, starting with MIN, setting BITNUM to the bit number and executing CODE for all bits that are set in the both bitmaps. */ #define EXECUTE_IF_AND_IN_BITMAP(BITMAP1, BITMAP2, MIN, BITNUM, CODE) \ do { \ bitmap_element *ptr1_ = (BITMAP1)->first; \ bitmap_element *ptr2_ = (BITMAP2)->first; \ unsigned int indx_ = (MIN) / BITMAP_ELEMENT_ALL_BITS; \ unsigned bit_num_ = (MIN) % BITMAP_WORD_BITS; \ unsigned word_num_ = (MIN) / BITMAP_WORD_BITS % BITMAP_ELEMENT_WORDS; \ \ /* Find the block the minimum bit is in in the first bitmap. */ \ while (ptr1_ != 0 && ptr1_->indx < indx_) \ ptr1_ = ptr1_->next; \ \ if (ptr1_ != 0 && ptr1_->indx != indx_) \ { \ bit_num_ = 0; \ word_num_ = 0; \ } \ \ for (; ptr1_ != 0 ; ptr1_ = ptr1_->next) \ { \ /* Advance BITMAP2 to the equivalent link. */ \ while (ptr2_ != 0 && ptr2_->indx < ptr1_->indx) \ ptr2_ = ptr2_->next; \ \ if (ptr2_ == 0) \ { \ /* If there are no more elements in BITMAP2, exit loop now. */ \ ptr1_ = (bitmap_element *)0; \ break; \ } \ else if (ptr2_->indx > ptr1_->indx) \ { \ bit_num_ = word_num_ = 0; \ continue; \ } \ \ for (; word_num_ < BITMAP_ELEMENT_WORDS; word_num_++) \ { \ BITMAP_WORD word_ = (ptr1_->bits[word_num_] \ & ptr2_->bits[word_num_]); \ if (word_ != 0) \ { \ for (; bit_num_ < BITMAP_WORD_BITS; bit_num_++) \ { \ BITMAP_WORD mask_ = ((BITMAP_WORD) 1) << bit_num_; \ \ if ((word_ & mask_) != 0) \ { \ word_ &= ~ mask_; \ (BITNUM) = (ptr1_->indx * BITMAP_ELEMENT_ALL_BITS \ + word_num_ * BITMAP_WORD_BITS \ + bit_num_); \ \ CODE; \ if (word_ == 0) \ break; \ } \ } \ } \ \ bit_num_ = 0; \ } \ \ word_num_ = 0; \ } \ } while (0) #endif /* GCC_BITMAP_H */ /* Simple bitmaps. Copyright (C) 1999, 2000, 2002, 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_SBITMAP_H #define GCC_SBITMAP_H /* It's not clear yet whether using bitmap.[ch] will be a win. It should be straightforward to convert so for now we keep things simple while more important issues are dealt with. */ #define SBITMAP_ELT_BITS ((unsigned) HOST_BITS_PER_WIDE_INT) #define SBITMAP_ELT_TYPE unsigned HOST_WIDE_INT typedef struct simple_bitmap_def { unsigned int n_bits; /* Number of bits. */ unsigned int size; /* Size in elements. */ unsigned int bytes; /* Size in bytes. */ SBITMAP_ELT_TYPE elms[1]; /* The elements. */ } *sbitmap; typedef SBITMAP_ELT_TYPE *sbitmap_ptr; /* Return the set size needed for N elements. */ #define SBITMAP_SET_SIZE(N) (((N) + SBITMAP_ELT_BITS - 1) / SBITMAP_ELT_BITS) /* Set bit number bitno in the bitmap. */ #define SET_BIT(BITMAP, BITNO) \ ((BITMAP)->elms [(BITNO) / SBITMAP_ELT_BITS] \ |= (SBITMAP_ELT_TYPE) 1 << (BITNO) % SBITMAP_ELT_BITS) /* Test if bit number bitno in the bitmap is set. */ #define TEST_BIT(BITMAP, BITNO) \ ((BITMAP)->elms [(BITNO) / SBITMAP_ELT_BITS] >> (BITNO) % SBITMAP_ELT_BITS & 1) /* Reset bit number bitno in the bitmap. */ #define RESET_BIT(BITMAP, BITNO) \ ((BITMAP)->elms [(BITNO) / SBITMAP_ELT_BITS] \ &= ~((SBITMAP_ELT_TYPE) 1 << (BITNO) % SBITMAP_ELT_BITS)) /* Loop over all elements of SBITSET, starting with MIN. */ #define EXECUTE_IF_SET_IN_SBITMAP(SBITMAP, MIN, N, CODE) \ do { \ unsigned int word_num_; \ unsigned int bit_num_ = (MIN) % (unsigned int) SBITMAP_ELT_BITS; \ unsigned int size_ = (SBITMAP)->size; \ SBITMAP_ELT_TYPE *ptr_ = (SBITMAP)->elms; \ \ for (word_num_ = (MIN) / (unsigned int) SBITMAP_ELT_BITS; \ word_num_ < size_; word_num_++, bit_num_ = 0) \ { \ SBITMAP_ELT_TYPE word_ = ptr_[word_num_]; \ \ if (word_ != 0) \ for (; bit_num_ < SBITMAP_ELT_BITS; bit_num_++) \ { \ SBITMAP_ELT_TYPE _mask = (SBITMAP_ELT_TYPE) 1 << bit_num_; \ \ if ((word_ & _mask) != 0) \ { \ word_ &= ~ _mask; \ (N) = word_num_ * SBITMAP_ELT_BITS + bit_num_; \ CODE; \ if (word_ == 0) \ break; \ } \ } \ } \ } while (0) #define EXECUTE_IF_SET_IN_SBITMAP_REV(SBITMAP, N, CODE) \ do { \ unsigned int word_num_; \ unsigned int bit_num_; \ unsigned int size_ = (SBITMAP)->size; \ SBITMAP_ELT_TYPE *ptr_ = (SBITMAP)->elms; \ \ for (word_num_ = size_; word_num_ > 0; word_num_--) \ { \ SBITMAP_ELT_TYPE word_ = ptr_[word_num_ - 1]; \ \ if (word_ != 0) \ for (bit_num_ = SBITMAP_ELT_BITS; bit_num_ > 0; bit_num_--) \ { \ SBITMAP_ELT_TYPE _mask = (SBITMAP_ELT_TYPE)1 << (bit_num_ - 1);\ \ if ((word_ & _mask) != 0) \ { \ word_ &= ~ _mask; \ (N) = (word_num_ - 1) * SBITMAP_ELT_BITS + bit_num_ - 1;\ CODE; \ if (word_ == 0) \ break; \ } \ } \ } \ } while (0) #define sbitmap_free(MAP) free(MAP) #define sbitmap_vector_free(VEC) free(VEC) struct int_list; extern void dump_sbitmap (FILE *, sbitmap); extern void dump_sbitmap_file (FILE *, sbitmap); extern void dump_sbitmap_vector (FILE *, const char *, const char *, sbitmap *, int); extern sbitmap sbitmap_alloc (unsigned int); extern sbitmap *sbitmap_vector_alloc (unsigned int, unsigned int); extern sbitmap sbitmap_resize (sbitmap, unsigned int, int); extern void sbitmap_copy (sbitmap, sbitmap); extern int sbitmap_equal (sbitmap, sbitmap); extern void sbitmap_zero (sbitmap); extern void sbitmap_ones (sbitmap); extern void sbitmap_vector_zero (sbitmap *, unsigned int); extern void sbitmap_vector_ones (sbitmap *, unsigned int); extern void sbitmap_union_of_diff (sbitmap, sbitmap, sbitmap, sbitmap); extern bool sbitmap_union_of_diff_cg (sbitmap, sbitmap, sbitmap, sbitmap); extern void sbitmap_difference (sbitmap, sbitmap, sbitmap); extern void sbitmap_not (sbitmap, sbitmap); extern void sbitmap_a_or_b_and_c (sbitmap, sbitmap, sbitmap, sbitmap); extern bool sbitmap_a_or_b_and_c_cg (sbitmap, sbitmap, sbitmap, sbitmap); extern void sbitmap_a_and_b_or_c (sbitmap, sbitmap, sbitmap, sbitmap); extern bool sbitmap_a_and_b_or_c_cg (sbitmap, sbitmap, sbitmap, sbitmap); extern void sbitmap_a_and_b (sbitmap, sbitmap, sbitmap); extern bool sbitmap_a_and_b_cg (sbitmap, sbitmap, sbitmap); extern void sbitmap_a_or_b (sbitmap, sbitmap, sbitmap); extern bool sbitmap_a_or_b_cg (sbitmap, sbitmap, sbitmap); extern void sbitmap_a_xor_b (sbitmap, sbitmap, sbitmap); extern bool sbitmap_a_xor_b_cg (sbitmap, sbitmap, sbitmap); extern bool sbitmap_a_subset_b_p (sbitmap, sbitmap); extern int sbitmap_first_set_bit (sbitmap); extern int sbitmap_last_set_bit (sbitmap); extern void sbitmap_intersect_of_predsucc (sbitmap, sbitmap *, int, struct int_list **); #define sbitmap_intersect_of_predecessors sbitmap_intersect_of_predsucc #define sbitmap_intersect_of_successors sbitmap_intersect_of_predsucc extern void sbitmap_union_of_predsucc (sbitmap, sbitmap *, int, struct int_list **); #define sbitmap_union_of_predecessors sbitmap_union_of_predsucc #define sbitmap_union_of_successors sbitmap_union_of_predsucc /* Intersection and Union of preds/succs using the new flow graph structure instead of the pred/succ arrays. */ extern void sbitmap_intersection_of_succs (sbitmap, sbitmap *, int); extern void sbitmap_intersection_of_preds (sbitmap, sbitmap *, int); extern void sbitmap_union_of_succs (sbitmap, sbitmap *, int); extern void sbitmap_union_of_preds (sbitmap, sbitmap *, int); extern void debug_sbitmap (sbitmap); extern sbitmap sbitmap_realloc (sbitmap, unsigned int); #endif /* ! GCC_SBITMAP_H */ /* Virtual array support. Copyright (C) 1998, 1999, 2000, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Cygnus Solutions. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_VARRAY_H #define GCC_VARRAY_H #ifndef HOST_WIDE_INT #endif #ifndef GCC_SYSTEM_H #endif /* Auxiliary structure used inside the varray structure, used for function integration data. */ struct const_equiv_data GTY(()) { /* Map pseudo reg number in calling function to equivalent constant. We cannot in general substitute constants into parameter pseudo registers, since some machine descriptions (many RISCs) won't always handle the resulting insns. So if an incoming parameter has a constant equivalent, we record it here, and if the resulting insn is recognizable, we go with it. We also use this mechanism to convert references to incoming arguments and stacked variables. copy_rtx_and_substitute will replace the virtual incoming argument and virtual stacked variables registers with new pseudos that contain pointers into the replacement area allocated for this inline instance. These pseudos are then marked as being equivalent to the appropriate address and substituted if valid. */ rtx rtx; /* Record the valid age for each entry. The entry is invalid if its age is less than const_age. */ unsigned age; }; /* Enum indicating what the varray contains. If this is changed, `element' in varray.c needs to be updated. */ enum varray_data_enum { VARRAY_DATA_C, VARRAY_DATA_UC, VARRAY_DATA_S, VARRAY_DATA_US, VARRAY_DATA_I, VARRAY_DATA_U, VARRAY_DATA_L, VARRAY_DATA_UL, VARRAY_DATA_HINT, VARRAY_DATA_UHINT, VARRAY_DATA_GENERIC, VARRAY_DATA_GENERIC_NOGC, VARRAY_DATA_CPTR, VARRAY_DATA_RTX, VARRAY_DATA_RTVEC, VARRAY_DATA_TREE, VARRAY_DATA_BITMAP, VARRAY_DATA_REG, VARRAY_DATA_CONST_EQUIV, VARRAY_DATA_BB, VARRAY_DATA_TE, VARRAY_DATA_EDGE, VARRAY_DATA_TREE_PTR, NUM_VARRAY_DATA }; /* Union of various array types that are used. */ typedef union varray_data_tag GTY (()) { char GTY ((length ("%0.num_elements"), tag ("VARRAY_DATA_C"))) c[1]; unsigned char GTY ((length ("%0.num_elements"), tag ("VARRAY_DATA_UC"))) uc[1]; short GTY ((length ("%0.num_elements"), tag ("VARRAY_DATA_S"))) s[1]; unsigned short GTY ((length ("%0.num_elements"), tag ("VARRAY_DATA_US"))) us[1]; int GTY ((length ("%0.num_elements"), tag ("VARRAY_DATA_I"))) i[1]; unsigned int GTY ((length ("%0.num_elements"), tag ("VARRAY_DATA_U"))) u[1]; long GTY ((length ("%0.num_elements"), tag ("VARRAY_DATA_L"))) l[1]; unsigned long GTY ((length ("%0.num_elements"), tag ("VARRAY_DATA_UL"))) ul[1]; HOST_WIDE_INT GTY ((length ("%0.num_elements"), tag ("VARRAY_DATA_HINT"))) hint[1]; unsigned HOST_WIDE_INT GTY ((length ("%0.num_elements"), tag ("VARRAY_DATA_UHINT"))) uhint[1]; PTR GTY ((length ("%0.num_elements"), use_param, tag ("VARRAY_DATA_GENERIC"))) generic[1]; PTR GTY ((length ("%0.num_elements"), skip (""), tag ("VARRAY_DATA_GENERIC_NOGC"))) generic_nogc[1]; char *GTY ((length ("%0.num_elements"), tag ("VARRAY_DATA_CPTR"))) cptr[1]; rtx GTY ((length ("%0.num_elements"), tag ("VARRAY_DATA_RTX"))) rtx[1]; rtvec GTY ((length ("%0.num_elements"), tag ("VARRAY_DATA_RTVEC"))) rtvec[1]; tree GTY ((length ("%0.num_elements"), tag ("VARRAY_DATA_TREE"))) tree[1]; struct bitmap_head_def *GTY ((length ("%0.num_elements"), tag ("VARRAY_DATA_BITMAP"))) bitmap[1]; struct reg_info_def *GTY ((length ("%0.num_elements"), skip, tag ("VARRAY_DATA_REG"))) reg[1]; struct const_equiv_data GTY ((length ("%0.num_elements"), tag ("VARRAY_DATA_CONST_EQUIV"))) const_equiv[1]; struct basic_block_def *GTY ((length ("%0.num_elements"), skip, tag ("VARRAY_DATA_BB"))) bb[1]; struct elt_list *GTY ((length ("%0.num_elements"), tag ("VARRAY_DATA_TE"))) te[1]; struct edge_def *GTY ((length ("%0.num_elements"), tag ("VARRAY_DATA_EDGE"))) e[1]; tree *GTY ((length ("%0.num_elements"), skip (""), tag ("VARRAY_DATA_TREE_PTR"))) tp[1]; } varray_data; /* Virtual array of pointers header. */ struct varray_head_tag GTY(()) { size_t num_elements; /* Maximum element number allocated. */ size_t elements_used; /* The number of elements used, if using VARRAY_PUSH/VARRAY_POP. */ enum varray_data_enum type; /* The kind of elements in the varray. */ const char *name; /* name of the varray for reporting errors */ varray_data GTY ((desc ("%0.type"))) data; /* The data elements follow, must be last. */ }; typedef struct varray_head_tag *varray_type; /* Allocate a virtual array with NUM elements, each of which is SIZE bytes long, named NAME. Array elements are zeroed. */ extern varray_type varray_init (size_t, enum varray_data_enum, const char *); #define VARRAY_CHAR_INIT(va, num, name) \ va = varray_init (num, VARRAY_DATA_C, name) #define VARRAY_UCHAR_INIT(va, num, name) \ va = varray_init (num, VARRAY_DATA_UC, name) #define VARRAY_SHORT_INIT(va, num, name) \ va = varray_init (num, VARRAY_DATA_S, name) #define VARRAY_USHORT_INIT(va, num, name) \ va = varray_init (num, VARRAY_DATA_US, name) #define VARRAY_INT_INIT(va, num, name) \ va = varray_init (num, VARRAY_DATA_I, name) #define VARRAY_UINT_INIT(va, num, name) \ va = varray_init (num, VARRAY_DATA_U, name) #define VARRAY_LONG_INIT(va, num, name) \ va = varray_init (num, VARRAY_DATA_L, name) #define VARRAY_ULONG_INIT(va, num, name) \ va = varray_init (num, VARRAY_DATA_UL, name) #define VARRAY_WIDE_INT_INIT(va, num, name) \ va = varray_init (num, VARRAY_DATA_HINT, name) #define VARRAY_UWIDE_INT_INIT(va, num, name) \ va = varray_init (num, VARRAY_DATA_UHINT, name) #define VARRAY_GENERIC_PTR_INIT(va, num, name) \ va = varray_init (num, VARRAY_DATA_GENERIC, name) #define VARRAY_GENERIC_PTR_NOGC_INIT(va, num, name) \ va = varray_init (num, VARRAY_DATA_GENERIC_NOGC, name) #define VARRAY_CHAR_PTR_INIT(va, num, name) \ va = varray_init (num, VARRAY_DATA_CPTR, name) #define VARRAY_RTX_INIT(va, num, name) \ va = varray_init (num, VARRAY_DATA_RTX, name) #define VARRAY_RTVEC_INIT(va, num, name) \ va = varray_init (num, VARRAY_DATA_RTVEC, name) #define VARRAY_TREE_INIT(va, num, name) \ va = varray_init (num, VARRAY_DATA_TREE, name) #define VARRAY_BITMAP_INIT(va, num, name) \ va = varray_init (num, VARRAY_DATA_BITMAP, name) #define VARRAY_REG_INIT(va, num, name) \ va = varray_init (num, VARRAY_DATA_REG, name) #define VARRAY_CONST_EQUIV_INIT(va, num, name) \ va = varray_init (num, VARRAY_DATA_CONST_EQUIV, name) #define VARRAY_BB_INIT(va, num, name) \ va = varray_init (num, VARRAY_DATA_BB, name) #define VARRAY_ELT_LIST_INIT(va, num, name) \ va = varray_init (num, VARRAY_DATA_TE, name) #define VARRAY_EDGE_INIT(va, num, name) \ va = varray_init (num, VARRAY_DATA_EDGE, name) #define VARRAY_TREE_PTR_INIT(va, num, name) \ va = varray_init (num, VARRAY_DATA_TREE_PTR, name) /* Free up memory allocated by the virtual array, but do not free any of the elements involved. */ #define VARRAY_FREE(vp) \ do { if (vp) { free (vp); vp = (varray_type) 0; } } while (0) /* Grow/shrink the virtual array VA to N elements. */ extern varray_type varray_grow (varray_type, size_t); #define VARRAY_GROW(VA, N) ((VA) = varray_grow (VA, N)) #define VARRAY_SIZE(VA) ((VA)->num_elements) #define VARRAY_ACTIVE_SIZE(VA) ((VA)->elements_used) #define VARRAY_POP_ALL(VA) ((VA)->elements_used = 0) #define VARRAY_CLEAR(VA) varray_clear(VA) extern void varray_clear (varray_type); extern void varray_copy (varray_type v1, varray_type v2); extern void dump_varray_statistics (void); /* Check for VARRAY_xxx macros being in bound. */ #if defined ENABLE_CHECKING && (GCC_VERSION >= 2007) extern void varray_check_failed (varray_type, size_t, const char *, int, const char *) ATTRIBUTE_NORETURN; extern void varray_underflow (varray_type, const char *, int, const char *) ATTRIBUTE_NORETURN; #define VARRAY_CHECK(VA, N, T) __extension__ \ (*({ varray_type const _va = (VA); \ const size_t _n = (N); \ if (_n >= _va->num_elements) \ varray_check_failed (_va, _n, __FILE__, __LINE__, __FUNCTION__); \ &_va->data.T[_n]; })) #define VARRAY_POP(VA) do { \ varray_type const _va = (VA); \ if (_va->elements_used == 0) \ varray_underflow (_va, __FILE__, __LINE__, __FUNCTION__); \ else \ _va->elements_used--; \ } while (0) #else #define VARRAY_CHECK(VA, N, T) ((VA)->data.T[N]) /* Pop the top element of VA. */ #define VARRAY_POP(VA) do { ((VA)->elements_used--); } while (0) #endif /* Push X onto VA. T is the name of the field in varray_data corresponding to the type of X. */ #define VARRAY_PUSH(VA, T, X) \ do \ { \ if ((VA)->elements_used >= (VA)->num_elements) \ VARRAY_GROW ((VA), 2 * (VA)->num_elements); \ (VA)->data.T[(VA)->elements_used++] = (X); \ } \ while (0) #define VARRAY_CHAR(VA, N) VARRAY_CHECK (VA, N, c) #define VARRAY_UCHAR(VA, N) VARRAY_CHECK (VA, N, uc) #define VARRAY_SHORT(VA, N) VARRAY_CHECK (VA, N, s) #define VARRAY_USHORT(VA, N) VARRAY_CHECK (VA, N, us) #define VARRAY_INT(VA, N) VARRAY_CHECK (VA, N, i) #define VARRAY_UINT(VA, N) VARRAY_CHECK (VA, N, u) #define VARRAY_LONG(VA, N) VARRAY_CHECK (VA, N, l) #define VARRAY_ULONG(VA, N) VARRAY_CHECK (VA, N, ul) #define VARRAY_WIDE_INT(VA, N) VARRAY_CHECK (VA, N, hint) #define VARRAY_UWIDE_INT(VA, N) VARRAY_CHECK (VA, N, uhint) #define VARRAY_GENERIC_PTR(VA,N) VARRAY_CHECK (VA, N, generic) #define VARRAY_GENERIC_PTR_NOGC(VA,N) VARRAY_CHECK (VA, N, generic_nogc) #define VARRAY_CHAR_PTR(VA,N) VARRAY_CHECK (VA, N, cptr) #define VARRAY_RTX(VA, N) VARRAY_CHECK (VA, N, rtx) #define VARRAY_RTVEC(VA, N) VARRAY_CHECK (VA, N, rtvec) #define VARRAY_TREE(VA, N) VARRAY_CHECK (VA, N, tree) #define VARRAY_BITMAP(VA, N) VARRAY_CHECK (VA, N, bitmap) #define VARRAY_REG(VA, N) VARRAY_CHECK (VA, N, reg) #define VARRAY_CONST_EQUIV(VA, N) VARRAY_CHECK (VA, N, const_equiv) #define VARRAY_BB(VA, N) VARRAY_CHECK (VA, N, bb) #define VARRAY_ELT_LIST(VA, N) VARRAY_CHECK (VA, N, te) #define VARRAY_EDGE(VA, N) VARRAY_CHECK (VA, N, e) #define VARRAY_TREE_PTR(VA, N) VARRAY_CHECK (VA, N, tp) /* Push a new element on the end of VA, extending it if necessary. */ #define VARRAY_PUSH_CHAR(VA, X) VARRAY_PUSH (VA, c, X) #define VARRAY_PUSH_UCHAR(VA, X) VARRAY_PUSH (VA, uc, X) #define VARRAY_PUSH_SHORT(VA, X) VARRAY_PUSH (VA, s, X) #define VARRAY_PUSH_USHORT(VA, X) VARRAY_PUSH (VA, us, X) #define VARRAY_PUSH_INT(VA, X) VARRAY_PUSH (VA, i, X) #define VARRAY_PUSH_UINT(VA, X) VARRAY_PUSH (VA, u, X) #define VARRAY_PUSH_LONG(VA, X) VARRAY_PUSH (VA, l, X) #define VARRAY_PUSH_ULONG(VA, X) VARRAY_PUSH (VA, ul, X) #define VARRAY_PUSH_WIDE_INT(VA, X) VARRAY_PUSH (VA, hint, X) #define VARRAY_PUSH_UWIDE_INT(VA, X) VARRAY_PUSH (VA, uhint, X) #define VARRAY_PUSH_GENERIC_PTR(VA, X) VARRAY_PUSH (VA, generic, X) #define VARRAY_PUSH_GENERIC_PTR_NOGC(VA, X) VARRAY_PUSH (VA, generic_nogc, X) #define VARRAY_PUSH_CHAR_PTR(VA, X) VARRAY_PUSH (VA, cptr, X) #define VARRAY_PUSH_RTX(VA, X) VARRAY_PUSH (VA, rtx, X) #define VARRAY_PUSH_RTVEC(VA, X) VARRAY_PUSH (VA, rtvec, X) #define VARRAY_PUSH_TREE(VA, X) VARRAY_PUSH (VA, tree, X) #define VARRAY_PUSH_BITMAP(VA, X) VARRAY_PUSH (VA, bitmap, X) #define VARRAY_PUSH_REG(VA, X) VARRAY_PUSH (VA, reg, X) #define VARRAY_PUSH_CONST_EQUIV(VA, X) VARRAY_PUSH (VA, const_equiv, X) #define VARRAY_PUSH_BB(VA, X) VARRAY_PUSH (VA, bb, X) #define VARRAY_PUSH_EDGE(VA, X) VARRAY_PUSH (VA, e, X) #define VARRAY_PUSH_TREE_PTR(VA, X) VARRAY_PUSH (VA, tp, X) /* Return the last element of VA. */ #define VARRAY_TOP(VA, T) VARRAY_CHECK(VA, (VA)->elements_used - 1, T) #define VARRAY_TOP_CHAR(VA) VARRAY_TOP (VA, c) #define VARRAY_TOP_UCHAR(VA) VARRAY_TOP (VA, uc) #define VARRAY_TOP_SHORT(VA) VARRAY_TOP (VA, s) #define VARRAY_TOP_USHORT(VA) VARRAY_TOP (VA, us) #define VARRAY_TOP_INT(VA) VARRAY_TOP (VA, i) #define VARRAY_TOP_UINT(VA) VARRAY_TOP (VA, u) #define VARRAY_TOP_LONG(VA) VARRAY_TOP (VA, l) #define VARRAY_TOP_ULONG(VA) VARRAY_TOP (VA, ul) #define VARRAY_TOP_WIDE_INT(VA) VARRAY_TOP (VA, hint) #define VARRAY_TOP_UWIDE_INT(VA) VARRAY_TOP (VA, uhint) #define VARRAY_TOP_GENERIC_PTR(VA) VARRAY_TOP (VA, generic) #define VARRAY_TOP_GENERIC_PTR_NOGC(VA) VARRAY_TOP (VA, generic_nogc) #define VARRAY_TOP_CHAR_PTR(VA) VARRAY_TOP (VA, cptr) #define VARRAY_TOP_RTX(VA) VARRAY_TOP (VA, rtx) #define VARRAY_TOP_RTVEC(VA) VARRAY_TOP (VA, rtvec) #define VARRAY_TOP_TREE(VA) VARRAY_TOP (VA, tree) #define VARRAY_TOP_BITMAP(VA) VARRAY_TOP (VA, bitmap) #define VARRAY_TOP_REG(VA) VARRAY_TOP (VA, reg) #define VARRAY_TOP_CONST_EQUIV(VA) VARRAY_TOP (VA, const_equiv) #define VARRAY_TOP_BB(VA) VARRAY_TOP (VA, bb) #define VARRAY_TOP_EDGE(VA) VARRAY_TOP (VA, e) #define VARRAY_TOP_TREE_PTR(VA) VARRAY_TOP (VA, tp) #endif /* ! GCC_VARRAY_H */ /* Definitions for branch prediction routines in the GNU compiler. Copyright (C) 2001, 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_PREDICT_H #define GCC_PREDICT_H #define DEF_PREDICTOR(ENUM, NAME, HITRATE, FLAGS) ENUM, enum br_predictor { /* Definitions for the branch prediction routines in the GNU compiler. Copyright (C) 2001, 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Before including this file, you should define a macro: DEF_PREDICTOR (ENUM, NAME, HITRATE) This macro will be called once for each predictor. The ENUM will be of type `enum predictor', and will enumerate all supported predictors. The order of DEF_PREDICTOR calls is important, as in the first match combining heuristics, the predictor appearing first in this file will win. NAME is used in the debugging output to determine predictor type. HITRATE is the probability that edge predicted by predictor as taken will be really taken (so it should be always above REG_BR_PROB_BASE / 2). */ /* A value used as final outcome of all heuristics. */ DEF_PREDICTOR (PRED_COMBINED, "combined", PROB_ALWAYS, 0) /* An outcome estimated by Dempster-Shaffer theory. */ DEF_PREDICTOR (PRED_DS_THEORY, "DS theory", PROB_ALWAYS, 0) /* An combined heuristics using probability determined by first matching heuristics from this list. */ DEF_PREDICTOR (PRED_FIRST_MATCH, "first match", PROB_ALWAYS, 0) /* Heuristic applying when no heuristic below applies. */ DEF_PREDICTOR (PRED_NO_PREDICTION, "no prediction", PROB_ALWAYS, 0) /* Mark unconditional jump as taken. */ DEF_PREDICTOR (PRED_UNCONDITIONAL, "unconditional jump", PROB_ALWAYS, PRED_FLAG_FIRST_MATCH) /* Use number of loop iterations determined by loop unroller to set probability. We don't want to use Dempster-Shaffer theory here, as the predictions is exact. */ DEF_PREDICTOR (PRED_LOOP_ITERATIONS, "loop iterations", PROB_ALWAYS, PRED_FLAG_FIRST_MATCH) /* Hints dropped by user via __builtin_expect feature. */ DEF_PREDICTOR (PRED_BUILTIN_EXPECT, "__builtin_expect", PROB_VERY_LIKELY, PRED_FLAG_FIRST_MATCH) /* Branch containing goto is probably not taken. */ DEF_PREDICTOR (PRED_CONTINUE, "continue", HITRATE (56), 0) /* Branch to basic block containing call marked by noreturn attribute. */ DEF_PREDICTOR (PRED_NORETURN, "noreturn call", HITRATE (99), PRED_FLAG_FIRST_MATCH) /* Loopback edge is taken. */ DEF_PREDICTOR (PRED_LOOP_BRANCH, "loop branch", HITRATE (89), PRED_FLAG_FIRST_MATCH) /* Edge causing loop to terminate is probably not taken. */ DEF_PREDICTOR (PRED_LOOP_EXIT, "loop exit", HITRATE (90), PRED_FLAG_FIRST_MATCH) /* Condition emitted by preconditiong code to ensure that variable setting number of iterations is greater than initial value of iterator. */ DEF_PREDICTOR (PRED_LOOP_CONDITION, "loop condition", PROB_VERY_LIKELY, 0) /* Preconditioning makes linear list of branches. */ DEF_PREDICTOR (PRED_LOOP_PRECONDITIONING, "loop preconditioning", PROB_VERY_LIKELY, 0) /* Copied condition for the first iteration of loop is probably true. */ DEF_PREDICTOR (PRED_LOOP_HEADER, "loop header", HITRATE (64), 0) /* Pointers are usually not NULL. */ DEF_PREDICTOR (PRED_POINTER, "pointer", HITRATE (81), 0) DEF_PREDICTOR (PRED_TREE_POINTER, "pointer (on trees)", HITRATE (81), 0) /* NE is probable, EQ not etc... */ DEF_PREDICTOR (PRED_OPCODE_POSITIVE, "opcode values positive", HITRATE (79), 0) DEF_PREDICTOR (PRED_OPCODE_NONEQUAL, "opcode values nonequal", HITRATE (71), 0) DEF_PREDICTOR (PRED_FPOPCODE, "fp_opcode", HITRATE (90), 0) DEF_PREDICTOR (PRED_TREE_OPCODE_POSITIVE, "opcode values positive (on trees)", HITRATE (79), 0) DEF_PREDICTOR (PRED_TREE_OPCODE_NONEQUAL, "opcode values nonequal (on trees)", HITRATE (71), 0) DEF_PREDICTOR (PRED_TREE_FPOPCODE, "fp_opcode (on trees)", HITRATE (90), 0) /* Branch guarding call is probably taken. */ DEF_PREDICTOR (PRED_CALL, "call", HITRATE (70), 0) /* Branch causing function to terminate is probably not taken. */ DEF_PREDICTOR (PRED_EARLY_RETURN, "early return", HITRATE (67), 0) /* Branch containing goto is probably not taken. */ DEF_PREDICTOR (PRED_GOTO, "goto", HITRATE (70), 0) /* Branch ending with return constant is probably not taken. */ DEF_PREDICTOR (PRED_CONST_RETURN, "const return", HITRATE (95), 0) /* Branch ending with return negative constant is probably not taken. */ DEF_PREDICTOR (PRED_NEGATIVE_RETURN, "negative return", HITRATE (96), 0) /* Branch ending with return; is probably not taken */ DEF_PREDICTOR (PRED_NULL_RETURN, "null return", HITRATE (90), 0) /* Branches to a mudflap bounds check are extremely unlikely. */ DEF_PREDICTOR (PRED_MUDFLAP, "mudflap check", HITRATE (99), 0) /* Upper bound on non-language-specific builtins. */ END_PREDICTORS }; #undef DEF_PREDICTOR enum prediction { NOT_TAKEN, TAKEN }; /* Flags for NOTE_PREDICTION */ #define IS_TAKEN 1 /* Predict edges to the block as taken. */ extern void predict_insn_def (rtx, enum br_predictor, enum prediction); extern void predict_insn (rtx, enum br_predictor, int); #endif /* GCC_PREDICT_H */ /* Head of register set linked list. */ typedef bitmap_head regset_head; /* A pointer to a regset_head. */ typedef bitmap regset; /* Initialize a new regset. */ #define INIT_REG_SET(HEAD) bitmap_initialize (HEAD, 1) /* Clear a register set by freeing up the linked list. */ #define CLEAR_REG_SET(HEAD) bitmap_clear (HEAD) /* Copy a register set to another register set. */ #define COPY_REG_SET(TO, FROM) bitmap_copy (TO, FROM) /* Compare two register sets. */ #define REG_SET_EQUAL_P(A, B) bitmap_equal_p (A, B) /* `and' a register set with a second register set. */ #define AND_REG_SET(TO, FROM) bitmap_operation (TO, TO, FROM, BITMAP_AND) /* `and' the complement of a register set with a register set. */ #define AND_COMPL_REG_SET(TO, FROM) \ bitmap_operation (TO, TO, FROM, BITMAP_AND_COMPL) /* Inclusive or a register set with a second register set. */ #define IOR_REG_SET(TO, FROM) bitmap_operation (TO, TO, FROM, BITMAP_IOR) /* Exclusive or a register set with a second register set. */ #define XOR_REG_SET(TO, FROM) bitmap_operation (TO, TO, FROM, BITMAP_XOR) /* Or into TO the register set FROM1 `and'ed with the complement of FROM2. */ #define IOR_AND_COMPL_REG_SET(TO, FROM1, FROM2) \ bitmap_ior_and_compl (TO, FROM1, FROM2) /* Clear a single register in a register set. */ #define CLEAR_REGNO_REG_SET(HEAD, REG) bitmap_clear_bit (HEAD, REG) /* Set a single register in a register set. */ #define SET_REGNO_REG_SET(HEAD, REG) bitmap_set_bit (HEAD, REG) /* Return true if a register is set in a register set. */ #define REGNO_REG_SET_P(TO, REG) bitmap_bit_p (TO, REG) /* Copy the hard registers in a register set to the hard register set. */ extern void reg_set_to_hard_reg_set (HARD_REG_SET *, bitmap); #define REG_SET_TO_HARD_REG_SET(TO, FROM) \ do { \ CLEAR_HARD_REG_SET (TO); \ reg_set_to_hard_reg_set (&TO, FROM); \ } while (0) /* Loop over all registers in REGSET, starting with MIN, setting REGNUM to the register number and executing CODE for all registers that are set. */ #define EXECUTE_IF_SET_IN_REG_SET(REGSET, MIN, REGNUM, CODE) \ EXECUTE_IF_SET_IN_BITMAP (REGSET, MIN, REGNUM, CODE) /* Loop over all registers in REGSET1 and REGSET2, starting with MIN, setting REGNUM to the register number and executing CODE for all registers that are set in the first regset and not set in the second. */ #define EXECUTE_IF_AND_COMPL_IN_REG_SET(REGSET1, REGSET2, MIN, REGNUM, CODE) \ EXECUTE_IF_AND_COMPL_IN_BITMAP (REGSET1, REGSET2, MIN, REGNUM, CODE) /* Loop over all registers in REGSET1 and REGSET2, starting with MIN, setting REGNUM to the register number and executing CODE for all registers that are set in both regsets. */ #define EXECUTE_IF_AND_IN_REG_SET(REGSET1, REGSET2, MIN, REGNUM, CODE) \ EXECUTE_IF_AND_IN_BITMAP (REGSET1, REGSET2, MIN, REGNUM, CODE) /* Allocate a register set with oballoc. */ #define OBSTACK_ALLOC_REG_SET(OBSTACK) BITMAP_OBSTACK_ALLOC (OBSTACK) /* Initialize a register set. Returns the new register set. */ #define INITIALIZE_REG_SET(HEAD) bitmap_initialize (&HEAD, 1) /* Do any cleanup needed on a regset when it is no longer used. */ #define FREE_REG_SET(REGSET) BITMAP_FREE(REGSET) /* Do any one-time initializations needed for regsets. */ #define INIT_ONCE_REG_SET() BITMAP_INIT_ONCE () /* Grow any tables needed when the number of registers is calculated or extended. For the linked list allocation, nothing needs to be done, other than zero the statistics on the first allocation. */ #define MAX_REGNO_REG_SET(NUM_REGS, NEW_P, RENUMBER_P) /* Type we use to hold basic block counters. Should be at least 64bit. Although a counter cannot be negative, we use a signed type, because erroneous negative counts can be generated when the flow graph is manipulated by various optimizations. A signed type makes those easy to detect. */ typedef HOST_WIDEST_INT gcov_type; /* Control flow edge information. */ struct edge_def GTY((chain_next ("%h.pred_next"))) { /* Links through the predecessor and successor lists. */ struct edge_def *pred_next; struct edge_def *succ_next; /* The two blocks at the ends of the edge. */ struct basic_block_def *src; struct basic_block_def *dest; /* Instructions queued on the edge. */ union edge_def_insns { rtx GTY ((tag ("0"))) r; tree GTY ((tag ("1"))) t; } GTY ((desc ("ir_type ()"))) insns; /* Auxiliary info specific to a pass. */ PTR GTY ((skip (""))) aux; /* Location of any goto implicit in the edge, during tree-ssa. */ source_locus goto_locus; int flags; /* see EDGE_* below */ int probability; /* biased by REG_BR_PROB_BASE */ gcov_type count; /* Expected number of executions calculated in profile.c */ bool crossing_edge; /* Crosses between hot and cold sections, when we do partitioning. */ }; typedef struct edge_def *edge; #define EDGE_FALLTHRU 1 /* 'Straight line' flow */ #define EDGE_ABNORMAL 2 /* Strange flow, like computed label, or eh */ #define EDGE_ABNORMAL_CALL 4 /* Call with abnormal exit like an exception, or sibcall */ #define EDGE_EH 8 /* Exception throw */ #define EDGE_FAKE 16 /* Not a real edge (profile.c) */ #define EDGE_DFS_BACK 32 /* A backwards edge */ #define EDGE_CAN_FALLTHRU 64 /* Candidate for straight line flow. */ #define EDGE_IRREDUCIBLE_LOOP 128 /* Part of irreducible loop. */ #define EDGE_SIBCALL 256 /* Edge from sibcall to exit. */ #define EDGE_LOOP_EXIT 512 /* Exit of a loop. */ #define EDGE_TRUE_VALUE 1024 /* Edge taken when controlling predicate is non zero. */ #define EDGE_FALSE_VALUE 2048 /* Edge taken when controlling predicate is zero. */ #define EDGE_EXECUTABLE 4096 /* Edge is executable. Only valid during SSA-CCP. */ #define EDGE_ALL_FLAGS 8191 #define EDGE_COMPLEX (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL | EDGE_EH) /* Counter summary from the last set of coverage counts read by profile.c. */ extern const struct gcov_ctr_summary *profile_info; /* Declared in cfgloop.h. */ struct loop; struct loops; /* Declared in tree-flow.h. */ struct bb_ann_d; /* A basic block is a sequence of instructions with only entry and only one exit. If any one of the instructions are executed, they will all be executed, and in sequence from first to last. There may be COND_EXEC instructions in the basic block. The COND_EXEC *instructions* will be executed -- but if the condition is false the conditionally executed *expressions* will of course not be executed. We don't consider the conditionally executed expression (which might have side-effects) to be in a separate basic block because the program counter will always be at the same location after the COND_EXEC instruction, regardless of whether the condition is true or not. Basic blocks need not start with a label nor end with a jump insn. For example, a previous basic block may just "conditionally fall" into the succeeding basic block, and the last basic block need not end with a jump insn. Block 0 is a descendant of the entry block. A basic block beginning with two labels cannot have notes between the labels. Data for jump tables are stored in jump_insns that occur in no basic block even though these insns can follow or precede insns in basic blocks. */ /* Basic block information indexed by block number. */ struct basic_block_def GTY((chain_next ("%h.next_bb"), chain_prev ("%h.prev_bb"))) { /* The first and last insns of the block. */ rtx head_; rtx end_; /* Pointers to the first and last trees of the block. */ tree stmt_list; /* The edges into and out of the block. */ edge pred; edge succ; /* Liveness info. */ /* The registers that are modified within this in block. */ bitmap GTY ((skip (""))) local_set; /* The registers that are conditionally modified within this block. In other words, registers that are set only as part of a COND_EXEC. */ bitmap GTY ((skip (""))) cond_local_set; /* The registers that are live on entry to this block. Note that in SSA form, global_live_at_start does not reflect the use of regs in phi functions, since the liveness of these regs may depend on which edge was taken into the block. */ bitmap GTY ((skip (""))) global_live_at_start; /* The registers that are live on exit from this block. */ bitmap GTY ((skip (""))) global_live_at_end; /* Auxiliary info specific to a pass. */ PTR GTY ((skip (""))) aux; /* The index of this block. */ int index; /* Previous and next blocks in the chain. */ struct basic_block_def *prev_bb; struct basic_block_def *next_bb; /* The loop depth of this block. */ int loop_depth; /* Innermost loop containing the block. */ struct loop * GTY ((skip (""))) loop_father; /* The dominance and postdominance information node. */ struct et_node * GTY ((skip (""))) dom[2]; /* Expected number of executions: calculated in profile.c. */ gcov_type count; /* Expected frequency. Normalized to be in range 0 to BB_FREQ_MAX. */ int frequency; /* Various flags. See BB_* below. */ int flags; /* Which section block belongs in, when partitioning basic blocks. */ int partition; /* The data used by basic block copying and reordering functions. */ struct reorder_block_def * GTY ((skip (""))) rbi; /* Annotations used at the tree level. */ struct bb_ann_d *tree_annotations; }; typedef struct basic_block_def *basic_block; /* Structure to hold information about the blocks during reordering and copying. */ typedef struct reorder_block_def { rtx header; rtx footer; basic_block next; basic_block original; /* Used by loop copying. */ basic_block copy; int duplicated; /* These fields are used by bb-reorder pass. */ int visited; } *reorder_block_def; #define BB_FREQ_MAX 10000 /* Masks for basic_block.flags. */ #define BB_DIRTY 1 #define BB_NEW 2 #define BB_REACHABLE 4 #define BB_VISITED 8 #define BB_IRREDUCIBLE_LOOP 16 #define BB_SUPERBLOCK 32 /* Partitions, to be used when partitioning hot and cold basic blocks into separate sections. */ #define UNPARTITIONED 0 #define HOT_PARTITION 1 #define COLD_PARTITION 2 /* Number of basic blocks in the current function. */ extern int n_basic_blocks; /* First free basic block number. */ extern int last_basic_block; /* Number of edges in the current function. */ extern int n_edges; /* Index by basic block number, get basic block struct info. */ extern GTY(()) varray_type basic_block_info; #define BASIC_BLOCK(N) (VARRAY_BB (basic_block_info, (N))) /* For iterating over basic blocks. */ #define FOR_BB_BETWEEN(BB, FROM, TO, DIR) \ for (BB = FROM; BB != TO; BB = BB->DIR) #define FOR_EACH_BB(BB) \ FOR_BB_BETWEEN (BB, ENTRY_BLOCK_PTR->next_bb, EXIT_BLOCK_PTR, next_bb) #define FOR_EACH_BB_REVERSE(BB) \ FOR_BB_BETWEEN (BB, EXIT_BLOCK_PTR->prev_bb, ENTRY_BLOCK_PTR, prev_bb) /* For iterating over insns in basic block. */ #define FOR_BB_INSNS(BB, INSN) \ for ((INSN) = BB_HEAD (BB); \ (INSN) != NEXT_INSN (BB_END (BB)); \ (INSN) = NEXT_INSN (INSN)) #define FOR_BB_INSNS_REVERSE(BB, INSN) \ for ((INSN) = BB_END (BB); \ (INSN) != PREV_INSN (BB_HEAD (BB)); \ (INSN) = PREV_INSN (INSN)) /* Cycles through _all_ basic blocks, even the fake ones (entry and exit block). */ #define FOR_ALL_BB(BB) \ for (BB = ENTRY_BLOCK_PTR; BB; BB = BB->next_bb) /* What registers are live at the setjmp call. */ extern regset regs_live_at_setjmp; /* Special labels found during CFG build. */ extern GTY(()) rtx label_value_list; extern struct obstack flow_obstack; /* Indexed by n, gives number of basic block that (REG n) is used in. If the value is REG_BLOCK_GLOBAL (-2), it means (REG n) is used in more than one basic block. REG_BLOCK_UNKNOWN (-1) means it hasn't been seen yet so we don't know. This information remains valid for the rest of the compilation of the current function; it is used to control register allocation. */ #define REG_BLOCK_UNKNOWN -1 #define REG_BLOCK_GLOBAL -2 #define REG_BASIC_BLOCK(N) (VARRAY_REG (reg_n_info, N)->basic_block) /* Stuff for recording basic block info. */ #define BB_HEAD(B) (B)->head_ #define BB_END(B) (B)->end_ /* Special block numbers [markers] for entry and exit. */ #define ENTRY_BLOCK (-1) #define EXIT_BLOCK (-2) /* Special block number not valid for any block. */ #define INVALID_BLOCK (-3) /* Similarly, block pointers for the edge list. */ extern GTY(()) basic_block ENTRY_BLOCK_PTR; extern GTY(()) basic_block EXIT_BLOCK_PTR; #define BLOCK_NUM(INSN) (BLOCK_FOR_INSN (INSN)->index + 0) #define set_block_for_insn(INSN, BB) (BLOCK_FOR_INSN (INSN) = BB) extern void compute_bb_for_insn (void); extern void free_bb_for_insn (void); extern void update_bb_for_insn (basic_block); extern void free_basic_block_vars (void); extern void insert_insn_on_edge (rtx, edge); bool safe_insert_insn_on_edge (rtx, edge); extern void commit_edge_insertions (void); extern void commit_edge_insertions_watch_calls (void); extern void remove_fake_edges (void); extern void add_noreturn_fake_exit_edges (void); extern void connect_infinite_loops_to_exit (void); extern edge unchecked_make_edge (basic_block, basic_block, int); extern edge cached_make_edge (sbitmap *, basic_block, basic_block, int); extern edge make_edge (basic_block, basic_block, int); extern edge make_single_succ_edge (basic_block, basic_block, int); extern void remove_edge (edge); extern void redirect_edge_succ (edge, basic_block); extern edge redirect_edge_succ_nodup (edge, basic_block); extern void redirect_edge_pred (edge, basic_block); extern basic_block create_basic_block_structure (rtx, rtx, rtx, basic_block); extern void clear_bb_flags (void); extern void flow_reverse_top_sort_order_compute (int *); extern int flow_depth_first_order_compute (int *, int *); extern void flow_preorder_transversal_compute (int *); extern int dfs_enumerate_from (basic_block, int, bool (*)(basic_block, void *), basic_block *, int, void *); extern void dump_edge_info (FILE *, edge, int); extern void brief_dump_cfg (FILE *); extern void clear_edges (void); extern void mark_critical_edges (void); extern rtx first_insn_after_basic_block_note (basic_block); /* Structure to group all of the information to process IF-THEN and IF-THEN-ELSE blocks for the conditional execution support. This needs to be in a public file in case the IFCVT macros call functions passing the ce_if_block data structure. */ typedef struct ce_if_block { basic_block test_bb; /* First test block. */ basic_block then_bb; /* THEN block. */ basic_block else_bb; /* ELSE block or NULL. */ basic_block join_bb; /* Join THEN/ELSE blocks. */ basic_block last_test_bb; /* Last bb to hold && or || tests. */ int num_multiple_test_blocks; /* # of && and || basic blocks. */ int num_and_and_blocks; /* # of && blocks. */ int num_or_or_blocks; /* # of || blocks. */ int num_multiple_test_insns; /* # of insns in && and || blocks. */ int and_and_p; /* Complex test is &&. */ int num_then_insns; /* # of insns in THEN block. */ int num_else_insns; /* # of insns in ELSE block. */ int pass; /* Pass number. */ #ifdef IFCVT_EXTRA_FIELDS IFCVT_EXTRA_FIELDS /* Any machine dependent fields. */ #endif } ce_if_block_t; /* This structure maintains an edge list vector. */ struct edge_list { int num_blocks; int num_edges; edge *index_to_edge; }; /* This is the value which indicates no edge is present. */ #define EDGE_INDEX_NO_EDGE -1 /* EDGE_INDEX returns an integer index for an edge, or EDGE_INDEX_NO_EDGE if there is no edge between the 2 basic blocks. */ #define EDGE_INDEX(el, pred, succ) (find_edge_index ((el), (pred), (succ))) /* INDEX_EDGE_PRED_BB and INDEX_EDGE_SUCC_BB return a pointer to the basic block which is either the pred or succ end of the indexed edge. */ #define INDEX_EDGE_PRED_BB(el, index) ((el)->index_to_edge[(index)]->src) #define INDEX_EDGE_SUCC_BB(el, index) ((el)->index_to_edge[(index)]->dest) /* INDEX_EDGE returns a pointer to the edge. */ #define INDEX_EDGE(el, index) ((el)->index_to_edge[(index)]) /* Number of edges in the compressed edge list. */ #define NUM_EDGES(el) ((el)->num_edges) /* BB is assumed to contain conditional jump. Return the fallthru edge. */ #define FALLTHRU_EDGE(bb) ((bb)->succ->flags & EDGE_FALLTHRU \ ? (bb)->succ : (bb)->succ->succ_next) /* BB is assumed to contain conditional jump. Return the branch edge. */ #define BRANCH_EDGE(bb) ((bb)->succ->flags & EDGE_FALLTHRU \ ? (bb)->succ->succ_next : (bb)->succ) /* Return expected execution frequency of the edge E. */ #define EDGE_FREQUENCY(e) (((e)->src->frequency \ * (e)->probability \ + REG_BR_PROB_BASE / 2) \ / REG_BR_PROB_BASE) /* Return nonzero if edge is critical. */ #define EDGE_CRITICAL_P(e) ((e)->src->succ->succ_next \ && (e)->dest->pred->pred_next) struct edge_list * create_edge_list (void); void free_edge_list (struct edge_list *); void print_edge_list (FILE *, struct edge_list *); void verify_edge_list (FILE *, struct edge_list *); int find_edge_index (struct edge_list *, basic_block, basic_block); edge find_edge (basic_block, basic_block); enum update_life_extent { UPDATE_LIFE_LOCAL = 0, UPDATE_LIFE_GLOBAL = 1, UPDATE_LIFE_GLOBAL_RM_NOTES = 2 }; /* Flags for life_analysis and update_life_info. */ #define PROP_DEATH_NOTES 1 /* Create DEAD and UNUSED notes. */ #define PROP_LOG_LINKS 2 /* Create LOG_LINKS. */ #define PROP_REG_INFO 4 /* Update regs_ever_live et al. */ #define PROP_KILL_DEAD_CODE 8 /* Remove dead code. */ #define PROP_SCAN_DEAD_CODE 16 /* Scan for dead code. */ #define PROP_ALLOW_CFG_CHANGES 32 /* Allow the CFG to be changed by dead code removal. */ #define PROP_AUTOINC 64 /* Create autoinc mem references. */ #define PROP_EQUAL_NOTES 128 /* Take into account REG_EQUAL notes. */ #define PROP_SCAN_DEAD_STORES 256 /* Scan for dead code. */ #define PROP_ASM_SCAN 512 /* Internal flag used within flow.c to flag analysis of asms. */ #define PROP_FINAL (PROP_DEATH_NOTES | PROP_LOG_LINKS \ | PROP_REG_INFO | PROP_KILL_DEAD_CODE \ | PROP_SCAN_DEAD_CODE | PROP_AUTOINC \ | PROP_ALLOW_CFG_CHANGES \ | PROP_SCAN_DEAD_STORES) #define PROP_POSTRELOAD (PROP_DEATH_NOTES \ | PROP_KILL_DEAD_CODE \ | PROP_SCAN_DEAD_CODE | PROP_AUTOINC \ | PROP_SCAN_DEAD_STORES) #define CLEANUP_EXPENSIVE 1 /* Do relatively expensive optimizations except for edge forwarding */ #define CLEANUP_CROSSJUMP 2 /* Do crossjumping. */ #define CLEANUP_POST_REGSTACK 4 /* We run after reg-stack and need to care REG_DEAD notes. */ #define CLEANUP_PRE_LOOP 8 /* Take care to preserve syntactic loop notes. */ #define CLEANUP_UPDATE_LIFE 16 /* Keep life information up to date. */ #define CLEANUP_THREADING 32 /* Do jump threading. */ #define CLEANUP_NO_INSN_DEL 64 /* Do not try to delete trivially dead insns. */ #define CLEANUP_CFGLAYOUT 128 /* Do cleanup in cfglayout mode. */ #define CLEANUP_LOG_LINKS 256 /* Update log links. */ extern void life_analysis (FILE *, int); extern int update_life_info (sbitmap, enum update_life_extent, int); extern int update_life_info_in_dirty_blocks (enum update_life_extent, int); extern int count_or_remove_death_notes (sbitmap, int); extern int propagate_block (basic_block, regset, regset, regset, int); struct propagate_block_info; extern rtx propagate_one_insn (struct propagate_block_info *, rtx); extern struct propagate_block_info *init_propagate_block_info (basic_block, regset, regset, regset, int); extern void free_propagate_block_info (struct propagate_block_info *); /* In lcm.c */ extern struct edge_list *pre_edge_lcm (FILE *, int, sbitmap *, sbitmap *, sbitmap *, sbitmap *, sbitmap **, sbitmap **); extern struct edge_list *pre_edge_rev_lcm (FILE *, int, sbitmap *, sbitmap *, sbitmap *, sbitmap *, sbitmap **, sbitmap **); extern void compute_available (sbitmap *, sbitmap *, sbitmap *, sbitmap *); extern int optimize_mode_switching (FILE *); /* In emit-rtl.c. */ extern rtx emit_block_insn_after (rtx, rtx, basic_block); extern rtx emit_block_insn_before (rtx, rtx, basic_block); /* In predict.c */ extern void estimate_probability (struct loops *); extern void note_prediction_to_br_prob (void); extern void expected_value_to_br_prob (void); extern bool maybe_hot_bb_p (basic_block); extern bool probably_cold_bb_p (basic_block); extern bool probably_never_executed_bb_p (basic_block); extern bool tree_predicted_by_p (basic_block, enum br_predictor); extern bool rtl_predicted_by_p (basic_block, enum br_predictor); extern void tree_predict_edge (edge, enum br_predictor, int); extern void rtl_predict_edge (edge, enum br_predictor, int); extern void predict_edge_def (edge, enum br_predictor, enum prediction); /* In flow.c */ extern void init_flow (void); extern void debug_bb (basic_block); extern basic_block debug_bb_n (int); extern void dump_regset (regset, FILE *); extern void debug_regset (regset); extern void allocate_reg_life_data (void); extern void allocate_bb_life_data (void); extern void expunge_block (basic_block); extern void link_block (basic_block, basic_block); extern void unlink_block (basic_block); extern void compact_blocks (void); extern basic_block alloc_block (void); extern void find_unreachable_blocks (void); extern int delete_noop_moves (void); extern basic_block force_nonfallthru (edge); extern rtx block_label (basic_block); extern bool forwarder_block_p (basic_block); extern bool purge_all_dead_edges (int); extern bool purge_dead_edges (basic_block); extern void find_sub_basic_blocks (basic_block); extern void find_many_sub_basic_blocks (sbitmap); extern void rtl_make_eh_edge (sbitmap *, basic_block, rtx); extern bool can_fallthru (basic_block, basic_block); extern bool could_fall_through (basic_block, basic_block); extern void flow_nodes_print (const char *, const sbitmap, FILE *); extern void flow_edge_list_print (const char *, const edge *, int, FILE *); extern void alloc_aux_for_block (basic_block, int); extern void alloc_aux_for_blocks (int); extern void clear_aux_for_blocks (void); extern void free_aux_for_blocks (void); extern void alloc_aux_for_edge (edge, int); extern void alloc_aux_for_edges (int); extern void clear_aux_for_edges (void); extern void free_aux_for_edges (void); extern void find_basic_blocks (rtx, int, FILE *); extern bool cleanup_cfg (int); extern bool delete_unreachable_blocks (void); extern bool merge_seq_blocks (void); typedef struct conflict_graph_def *conflict_graph; /* Callback function when enumerating conflicts. The arguments are the smaller and larger regno in the conflict. Returns zero if enumeration is to continue, nonzero to halt enumeration. */ typedef int (*conflict_graph_enum_fn) (int, int, void *); /* Prototypes of operations on conflict graphs. */ extern conflict_graph conflict_graph_new (int); extern void conflict_graph_delete (conflict_graph); extern int conflict_graph_add (conflict_graph, int, int); extern int conflict_graph_conflict_p (conflict_graph, int, int); extern void conflict_graph_enum (conflict_graph, int, conflict_graph_enum_fn, void *); extern void conflict_graph_merge_regs (conflict_graph, int, int); extern void conflict_graph_print (conflict_graph, FILE*); extern conflict_graph conflict_graph_compute (regset, partition); extern bool mark_dfs_back_edges (void); extern void set_edge_can_fallthru_flag (void); extern void update_br_prob_note (basic_block); extern void fixup_abnormal_edges (void); extern bool can_hoist_insn_p (rtx, rtx, regset); extern rtx hoist_insn_after (rtx, rtx, rtx, rtx); extern rtx hoist_insn_to_edge (rtx, edge, rtx, rtx); extern bool inside_basic_block_p (rtx); extern bool control_flow_insn_p (rtx); /* In bb-reorder.c */ extern void reorder_basic_blocks (void); extern void partition_hot_cold_basic_blocks (void); /* In cfg.c */ extern void alloc_rbi_pool (void); extern void initialize_bb_rbi (basic_block bb); extern void free_rbi_pool (void); /* In dominance.c */ enum cdi_direction { CDI_DOMINATORS, CDI_POST_DOMINATORS }; enum dom_state { DOM_NONE, /* Not computed at all. */ DOM_CONS_OK, /* The data is conservatively OK, i.e. if it says you that A dominates B, it indeed does. */ DOM_NO_FAST_QUERY, /* The data is OK, but the fast query data are not usable. */ DOM_OK /* Everything is ok. */ }; extern enum dom_state dom_computed[2]; extern void calculate_dominance_info (enum cdi_direction); extern void free_dominance_info (enum cdi_direction); extern basic_block nearest_common_dominator (enum cdi_direction, basic_block, basic_block); extern void set_immediate_dominator (enum cdi_direction, basic_block, basic_block); extern basic_block get_immediate_dominator (enum cdi_direction, basic_block); extern bool dominated_by_p (enum cdi_direction, basic_block, basic_block); extern int get_dominated_by (enum cdi_direction, basic_block, basic_block **); extern void add_to_dominance_info (enum cdi_direction, basic_block); extern void delete_from_dominance_info (enum cdi_direction, basic_block); basic_block recount_dominator (enum cdi_direction, basic_block); extern void redirect_immediate_dominators (enum cdi_direction, basic_block, basic_block); extern void iterate_fix_dominators (enum cdi_direction, basic_block *, int); extern void verify_dominators (enum cdi_direction); extern basic_block first_dom_son (enum cdi_direction, basic_block); extern basic_block next_dom_son (enum cdi_direction, basic_block); extern edge try_redirect_by_replacing_jump (edge, basic_block, bool); extern void break_superblocks (void); /* Hooks for cfg representation specific functions. Copyright (C) 2003, 2004 Free Software Foundation, Inc. Contributed by Sebastian Pop This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_CFGHOOKS_H #define GCC_CFGHOOKS_H struct cfg_hooks { /* Name of the corresponding ir. */ const char *name; /* Debugging. */ int (*verify_flow_info) (void); void (*dump_bb) (basic_block, FILE *, int); /* Basic CFG manipulation. */ /* Return new basic block. */ basic_block (*create_basic_block) (void *head, void *end, basic_block after); /* Redirect edge E to the given basic block B and update underlying program representation. Returns edge representing redirected branch (that may not be equivalent to E in the case of duplicate edges being removed) or NULL if edge is not easily redirectable for whatever reason. */ edge (*redirect_edge_and_branch) (edge e, basic_block b); /* Same as the above but allows redirecting of fallthru edges. In that case newly created forwarder basic block is returned. It aborts when called on abnormal edge. */ basic_block (*redirect_edge_and_branch_force) (edge, basic_block); /* Remove statements corresponding to a given basic block. */ void (*delete_basic_block) (basic_block); /* Creates a new basic block just after basic block B by splitting everything after specified instruction I. */ basic_block (*split_block) (basic_block b, void * i); /* Move block B immediately after block A. */ bool (*move_block_after) (basic_block b, basic_block a); /* Return true when blocks A and B can be merged into single basic block. */ bool (*can_merge_blocks_p) (basic_block a, basic_block b); /* Merge blocks A and B. */ void (*merge_blocks) (basic_block a, basic_block b); /* Predict edge E using PREDICTOR to given PROBABILITY. */ void (*predict_edge) (edge e, enum br_predictor predictor, int probability); /* Return true if the one of outgoing edges is already predicted by PREDICTOR. */ bool (*predicted_by_p) (basic_block bb, enum br_predictor predictor); /* Return true when block A can be duplicated. */ bool (*can_duplicate_block_p) (basic_block a); /* Duplicate block A. */ basic_block (*duplicate_block) (basic_block a); /* Higher level functions representable by primitive operations above if we didn't have some oddities in RTL and Tree representations. */ basic_block (*split_edge) (edge); void (*make_forwarder_block) (edge); /* Tries to make the edge fallthru. */ void (*tidy_fallthru_edge) (edge); /* Say whether a block ends with a call, possibly followed by some other code that must stay with the call. */ bool (*block_ends_with_call_p) (basic_block); /* Say whether a block ends with a conditional branch. Switches and unconditional branches do not qualify. */ bool (*block_ends_with_condjump_p) (basic_block); /* Add fake edges to the function exit for any non constant and non noreturn calls, volatile inline assembly in the bitmap of blocks specified by BLOCKS or to the whole CFG if BLOCKS is zero. Return the number of blocks that were split. The goal is to expose cases in which entering a basic block does not imply that all subsequent instructions must be executed. */ int (*flow_call_edges_add) (sbitmap); }; extern void verify_flow_info (void); extern void dump_bb (basic_block, FILE *, int); extern edge redirect_edge_and_branch (edge, basic_block); extern basic_block redirect_edge_and_branch_force (edge, basic_block); extern edge split_block (basic_block, void *); extern edge split_block_after_labels (basic_block); extern bool move_block_after (basic_block, basic_block); extern void delete_basic_block (basic_block); extern basic_block split_edge (edge); extern basic_block create_basic_block (void *, void *, basic_block); extern basic_block create_empty_bb (basic_block); extern bool can_merge_blocks_p (basic_block, basic_block); extern void merge_blocks (basic_block, basic_block); extern edge make_forwarder_block (basic_block, bool (*)(edge), void (*) (basic_block)); extern void tidy_fallthru_edge (edge); extern void tidy_fallthru_edges (void); extern void predict_edge (edge e, enum br_predictor predictor, int probability); extern bool predicted_by_p (basic_block bb, enum br_predictor predictor); extern bool can_duplicate_block_p (basic_block); extern basic_block duplicate_block (basic_block, edge); extern bool block_ends_with_call_p (basic_block bb); extern bool block_ends_with_condjump_p (basic_block bb); extern int flow_call_edges_add (sbitmap); /* Hooks containers. */ extern struct cfg_hooks tree_cfg_hooks; extern struct cfg_hooks rtl_cfg_hooks; extern struct cfg_hooks cfg_layout_rtl_cfg_hooks; /* Declarations. */ extern int ir_type (void); extern void rtl_register_cfg_hooks (void); extern void cfg_layout_rtl_register_cfg_hooks (void); extern void tree_register_cfg_hooks (void); #endif /* GCC_CFGHOOKS_H */ #endif /* GCC_BASIC_BLOCK_H */ /* Compilation switch flag definitions for GCC. Copyright (C) 1987, 1988, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_FLAGS_H #define GCC_FLAGS_H /* This file is auto-generated by opts.sh. */ #ifndef OPTIONS_H #define OPTIONS_H /* Set by -Wabi. Warn about things that will change when compiling with an ABI-compliant compiler */ extern int warn_abi; /* Set by -Waggregate-return. Warn about returning structures, unions or arrays */ extern int warn_aggregate_return; /* Set by -Wbad-function-cast. Warn about casting functions to incompatible types */ extern int warn_bad_function_cast; /* Set by -Wcast-align. Warn about pointer casts which increase alignment */ extern int warn_cast_align; /* Set by -Wcast-qual. Warn about casts which discard qualifiers */ extern int warn_cast_qual; /* Set by -Wchar-subscripts. Warn about subscripts whose type is \"char\" */ extern int warn_char_subscripts; /* Set by -Wconversion. Warn about possibly confusing type conversions */ extern int warn_conversion; /* Set by -Wctor-dtor-privacy. Warn when all constructors and destructors are private */ extern int warn_ctor_dtor_privacy; /* Set by -Wdeclaration-after-statement. Warn when a declaration is found after a statement */ extern int warn_declaration_after_statement; /* Set by -Wdeprecated. Warn about deprecated compiler features */ extern int warn_deprecated; /* Set by -Wdeprecated-declarations. Warn about uses of __attribute__((deprecated)) declarations */ extern int warn_deprecated_decl; /* Set by -Wdisabled-optimization. Warn when an optimization pass is disabled */ extern int warn_disabled_optimization; /* Set by -Wdiv-by-zero. Warn about compile-time integer division by zero */ extern int warn_div_by_zero; /* Set by -Weffc++. Warn about violations of Effective C++ style rules */ extern int warn_ecpp; /* Set by -Werror. Treat all warnings as errors */ extern int warnings_are_errors; /* Set by -Wfatal-errors. Exit on the first error occurred */ extern int flag_fatal_errors; /* Set by -Wfloat-equal. Warn if testing floating point numbers for equality */ extern int warn_float_equal; /* Set by -Wformat-extra-args. Warn if passing too many arguments to a function for its format string */ extern int warn_format_extra_args; /* Set by -Wformat-nonliteral. Warn about format strings that are not literals */ extern int warn_format_nonliteral; /* Set by -Wformat-security. Warn about possible security problems with format functions */ extern int warn_format_security; /* Set by -Wformat-y2k. Warn about strftime formats yielding 2-digit years */ extern int warn_format_y2k; /* Set by -Wformat-zero-length. Warn about zero-length formats */ extern int warn_format_zero_length; /* Set by -Wimplicit-function-declaration. Warn about implicit function declarations */ extern int mesg_implicit_function_declaration; /* Set by -Wimplicit-int. Warn when a declaration does not specify a type */ extern int warn_implicit_int; /* Set by -Winit-self. Warn about variables which are initialized to themselves. */ extern int warn_init_self; /* Set by -Winline. Warn when an inlined function cannot be inlined */ extern int warn_inline; /* Set by -Winvalid-offsetof. Warn about invalid uses of the \"offsetof\" macro */ extern int warn_invalid_offsetof; /* Set by -Wlong-long. Do not warn about using \"long long\" when -pedantic */ extern int warn_long_long; /* Set by -Wmissing-braces. Warn about possibly missing braces around initializers */ extern int warn_missing_braces; /* Set by -Wmissing-declarations. Warn about global functions without previous declarations */ extern int warn_missing_declarations; /* Set by -Wmissing-format-attribute. Warn about functions which might be candidates for format attributes */ extern int warn_missing_format_attribute; /* Set by -Wmissing-noreturn. Warn about functions which might be candidates for __attribute__((noreturn)) */ extern int warn_missing_noreturn; /* Set by -Wmissing-prototypes. Warn about global functions without prototypes */ extern int warn_missing_prototypes; /* Set by -Wnested-externs. Warn about \"extern\" declarations not at file scope */ extern int warn_nested_externs; /* Set by -Wnon-template-friend. Warn when non-templatized friend functions are declared within a template */ extern int warn_nontemplate_friend; /* Set by -Wnon-virtual-dtor. Warn about non-virtual destructors */ extern int warn_nonvdtor; /* Set by -Wnonnull. Warn about NULL being passed to argument slots marked as requiring non-NULL */ extern int warn_nonnull; /* Set by -Wold-style-cast. Warn if a C-style cast is used in a program */ extern int warn_old_style_cast; /* Set by -Wold-style-definition. Warn if an old-style parameter definition is used */ extern int warn_old_style_definition; /* Set by -Woverloaded-virtual. Warn about overloaded virtual function names */ extern int warn_overloaded_virtual; /* Set by -Wpacked. Warn when the packed attribute has no effect on struct layout */ extern int warn_packed; /* Set by -Wpadded. Warn when padding is required to align structure members */ extern int warn_padded; /* Set by -Wparentheses. Warn about possibly missing parentheses */ extern int warn_parentheses; /* Set by -Wpmf-conversions. Warn when converting the type of pointers to member functions */ extern int warn_pmf2ptr; /* Set by -Wpointer-arith. Warn about function pointer arithmetic */ extern int warn_pointer_arith; /* Set by -Wprotocol. Warn if inherited methods are unimplemented */ extern int warn_protocol; /* Set by -Wredundant-decls. Warn about multiple declarations of the same object */ extern int warn_redundant_decls; /* Set by -Wreorder. Warn when the compiler reorders code */ extern int warn_reorder; /* Set by -Wselector. Warn if a selector has multiple methods */ extern int warn_selector; /* Set by -Wsequence-point. Warn about possible violations of sequence point rules */ extern int warn_sequence_point; /* Set by -Wshadow. Warn when one local variable shadows another */ extern int warn_shadow; /* Set by -Wsign-compare. Warn about signed-unsigned comparisons */ extern int warn_sign_compare; /* Set by -Wsign-promo. Warn when overload promotes from unsigned to signed */ extern int warn_sign_promo; /* Set by -Wstrict-prototypes. Warn about unprototyped function declarations */ extern int warn_strict_prototypes; /* Set by -Wswitch. Warn about enumerated switches, with no default, missing a case */ extern int warn_switch; /* Set by -Wswitch-default. Warn about enumerated switches missing a \"default:\" statement */ extern int warn_switch_default; /* Set by -Wswitch-enum. Warn about all enumerated switches missing a specific case */ extern int warn_switch_enum; /* Set by -Wsynth. Warn when synthesis behavior differs from Cfront */ extern int warn_synth; /* Set by -Wsystem-headers. Do not suppress warnings from system headers */ extern int warn_system_headers; /* Set by -Wtraditional. Warn about features not present in traditional C */ extern int warn_traditional; /* Set by -Wundeclared-selector. Warn about @selector()s without previously declared methods */ extern int warn_undeclared_selector; /* Set by -Wuninitialized. Warn about uninitialized automatic variables */ extern int warn_uninitialized; /* Set by -Wunreachable-code. Warn about code that will never be executed */ extern int warn_notreached; /* Set by -Wunused-function. Warn when a function is unused */ extern int warn_unused_function; /* Set by -Wunused-label. Warn when a label is unused */ extern int warn_unused_label; /* Set by -Wunused-parameter. Warn when a function parameter is unused */ extern int warn_unused_parameter; /* Set by -Wunused-value. Warn when an expression value is unused */ extern int warn_unused_value; /* Set by -Wunused-variable. Warn when a variable is unused */ extern int warn_unused_variable; /* Set by -fPIC. */ extern int flag_pic; /* Set by -fPIE. */ extern int flag_pie; /* Set by -fabi-version=. */ extern int flag_abi_version; /* Set by -falign-functions. Align the start of functions */ extern int align_functions; /* Set by -falign-jumps. Align labels which are only reached by jumping */ extern int align_jumps; /* Set by -falign-labels. Align all labels */ extern int align_labels; /* Set by -falign-loops. Align the start of loops */ extern int align_loops; /* Set by -fargument-alias. Specify that arguments may alias each other and globals */ extern int flag_argument_noalias; /* Set by -fargument-noalias. Assume arguments may alias globals but not each other */ extern int flag_argument_noalias; /* Set by -fargument-noalias-global. Assume arguments alias neither each other nor globals */ extern int flag_argument_noalias; /* Set by -fasynchronous-unwind-tables. Generate unwind tables that are exact at each instruction boundary */ extern int flag_asynchronous_unwind_tables; /* Set by -fbounds-check. Generate code to check bounds before indexing arrays */ extern int flag_bounds_check; /* Set by -fbranch-count-reg. Replace add, compare, branch with branch on count register */ extern int flag_branch_on_count_reg; /* Set by -fbranch-probabilities. Use profiling information for branch probabilities */ extern int flag_branch_probabilities; /* Set by -fbranch-target-load-optimize. Perform branch target load optimization before prologue / epilogue threading */ extern int flag_branch_target_load_optimize; /* Set by -fbranch-target-load-optimize2. Perform branch target load optimization after prologue / epilogue threading */ extern int flag_branch_target_load_optimize2; /* Set by -fbtr-bb-exclusive. Restrict target load migration not to re-use registers in any basic block */ extern int flag_btr_bb_exclusive; /* Set by -fcaller-saves. Save registers around function calls */ extern int flag_caller_saves; /* Set by -fcommon. Do not put uninitialized globals in the common section */ extern int flag_no_common; /* Set by -fcprop-registers. Perform a register copy-propagation optimization pass */ extern int flag_cprop_registers; /* Set by -fcrossjumping. Perform cross-jumping optimization */ extern int flag_crossjumping; /* Set by -fcse-follow-jumps. When running CSE, follow jumps to their targets */ extern int flag_cse_follow_jumps; /* Set by -fcse-skip-blocks. When running CSE, follow conditional jumps */ extern int flag_cse_skip_blocks; /* Set by -fdata-sections. Place data items into their own section */ extern int flag_data_sections; /* Set by -fdefer-pop. Defer popping functions args from stack until later */ extern int flag_defer_pop; /* Set by -fdelayed-branch. Attempt to fill delay slots of branch instructions */ extern int flag_delayed_branch; /* Set by -fdelete-null-pointer-checks. Delete useless null pointer checks */ extern int flag_delete_null_pointer_checks; /* Set by -fdump-unnumbered. Suppress output of instruction numbers and line number notes in debugging dumps */ extern int flag_dump_unnumbered; /* Set by -feliminate-dwarf2-dups. Perform DWARF2 duplicate elimination */ extern int flag_eliminate_dwarf2_dups; /* Set by -feliminate-unused-debug-symbols. Perform unused type elimination in debug info */ extern int flag_debug_only_used_symbols; /* Set by -feliminate-unused-debug-types. Perform unused type elimination in debug info */ extern int flag_eliminate_unused_debug_types; /* Set by -fexceptions. Enable exception handling */ extern int flag_exceptions; /* Set by -fexpensive-optimizations. Perform a number of minor, expensive optimizations */ extern int flag_expensive_optimizations; /* Set by -ffinite-math-only. Assume no NaNs or infinities are generated */ extern int flag_finite_math_only; /* Set by -ffloat-store. Do not store floats in registers */ extern int flag_float_store; /* Set by -fforce-addr. Copy memory address constants into registers before use */ extern int flag_force_addr; /* Set by -fforce-mem. Copy memory operands into registers before use */ extern int flag_force_mem; /* Set by -ffunction-cse. Allow function addresses to be held in registers */ extern int flag_no_function_cse; /* Set by -ffunction-sections. Place each function into its own section */ extern int flag_function_sections; /* Set by -fgcse. Perform global common subexpression elimination */ extern int flag_gcse; /* Set by -fgcse-after-reload. Perform global common subexpression elimination after register allocation */ extern int flag_gcse_after_reload; /* Set by -fgcse-las. Perform redundant load after store elimination in global common subexpression */ extern int flag_gcse_las; /* Set by -fgcse-lm. Perform enhanced load motion during global common subexpression elimination */ extern int flag_gcse_lm; /* Set by -fgcse-sm. Perform store motion after global common subexpression elimination */ extern int flag_gcse_sm; /* Set by -fguess-branch-probability. Enable guessing of branch probabilities */ extern int flag_guess_branch_prob; /* Set by -fident. Process #ident directives */ extern int flag_no_ident; /* Set by -fif-conversion. Perform conversion of conditional jumps to branchless equivalents */ extern int flag_if_conversion; /* Set by -fif-conversion2. Perform conversion of conditional jumps to conditional execution */ extern int flag_if_conversion2; /* Set by -finhibit-size-directive. Do not generate .size directives */ extern int flag_inhibit_size_directive; /* Set by -finline. Pay attention to the \"inline\" keyword */ extern int flag_no_inline; /* Set by -finline-functions. Integrate simple functions into their callers */ extern int flag_inline_functions; /* Set by -finstrument-functions. Instrument function entry and exit with profiling calls */ extern int flag_instrument_function_entry_exit; /* Set by -fkeep-inline-functions. Generate code for functions even if they are fully inlined */ extern int flag_keep_inline_functions; /* Set by -fkeep-static-consts. Emit static const variables even if they are not used */ extern int flag_keep_static_consts; /* Set by -fleading-underscore. Give external symbols a leading underscore */ extern int flag_leading_underscore; /* Set by -floop-optimize. Perform loop optimizations */ extern int flag_loop_optimize; /* Set by -floop-optimize2. Perform loop optimizations using the new loop optimizer */ extern int flag_loop_optimize2; /* Set by -fmath-errno. Set errno after built-in math functions */ extern int flag_errno_math; /* Set by -fmem-report. Report on permanent memory allocation */ extern int mem_report; /* Set by -fmerge-all-constants. Attempt to merge identical constants and constant variables */ extern int flag_merge_constants; /* Set by -fmerge-constants. Attempt to merge identical constants across compilation units */ extern int flag_merge_constants; /* Set by -fmodulo-sched. Perform SMS based modulo scheduling before the first scheduling pass */ extern int flag_modulo_sched; /* Set by -fmove-all-movables. Force all loop invariant computations out of loops */ extern int flag_move_all_movables; /* Set by -fmove-loop-invariants. Move loop invariant computations out of loops */ extern int flag_move_loop_invariants; /* Set by -fmudflap. Add mudflap bounds-checking instrumentation for single-threaded program. */ extern int flag_mudflap; /* Set by -fmudflapir. Ignore read operations when inserting mudflap instrumentation. */ extern int flag_mudflap_ignore_reads; /* Set by -fmudflapth. Add mudflap bounds-checking instrumentation for multi-threaded program. */ extern int flag_mudflap_threads; /* Set by -fnew-ra. Use graph-coloring register allocation */ extern int flag_new_regalloc; /* Set by -fnon-call-exceptions. Support synchronous non-call exceptions */ extern int flag_non_call_exceptions; /* Set by -fold-unroll-all-loops. Perform loop unrolling for all loops */ extern int flag_old_unroll_all_loops; /* Set by -fold-unroll-loops. Perform loop unrolling when iteration count is known */ extern int flag_old_unroll_loops; /* Set by -fomit-frame-pointer. When possible do not generate stack frames */ extern int flag_omit_frame_pointer; /* Set by -foptimize-register-move. Do the full register move optimization pass */ extern int flag_regmove; /* Set by -foptimize-sibling-calls. Optimize sibling and tail recursive calls */ extern int flag_optimize_sibling_calls; /* Set by -fpack-struct. Pack structure members together without holes */ extern int flag_pack_struct; /* Set by -fpcc-struct-return. Return small aggregates in memory, not registers */ extern int flag_pcc_struct_return; /* Set by -fpeel-loops. Perform loop peeling */ extern int flag_peel_loops; /* Set by -fpeephole. Enable machine specific peephole optimizations */ extern int flag_no_peephole; /* Set by -fpeephole2. Enable an RTL peephole pass before sched2 */ extern int flag_peephole2; /* Set by -fpic. Generate position-independent code if possible */ extern int flag_pic; /* Set by -fpie. Generate position-independent code for executables if possible */ extern int flag_pie; /* Set by -fprefetch-loop-arrays. Generate prefetch instructions, if available, for arrays in loops */ extern int flag_prefetch_loop_arrays; /* Set by -fprofile. Enable basic program profiling code */ extern int profile_flag; /* Set by -fprofile-arcs. Insert arc-based program profiling code */ extern int profile_arc_flag; /* Set by -fprofile-values. Insert code to profile values of expressions */ extern int flag_profile_values; /* Set by -freduce-all-givs. Strength reduce all loop general induction variables */ extern int flag_reduce_all_givs; /* Set by -freg-struct-return. Return small aggregates in registers */ extern int flag_pcc_struct_return; /* Set by -fregmove. Enables a register move optimization */ extern int flag_regmove; /* Set by -frename-registers. Perform a register renaming optimization pass */ extern int flag_rename_registers; /* Set by -freorder-blocks. Reorder basic blocks to improve code placement */ extern int flag_reorder_blocks; /* Set by -freorder-blocks-and-partition. Reorder basic blocks and partition into hot and cold sections */ extern int flag_reorder_blocks_and_partition; /* Set by -freorder-functions. Reorder functions to improve code placement */ extern int flag_reorder_functions; /* Set by -frerun-cse-after-loop. Add a common subexpression elimination pass after loop optimizations */ extern int flag_rerun_cse_after_loop; /* Set by -frerun-loop-opt. Run the loop optimizer twice */ extern int flag_rerun_loop_opt; /* Set by -frounding-math. Disable optimizations that assume default FP rounding behavior */ extern int flag_rounding_math; /* Set by -fsched-interblock. Enable scheduling across basic blocks */ extern int flag_schedule_interblock; /* Set by -fsched-spec. Allow speculative motion of non-loads */ extern int flag_schedule_speculative; /* Set by -fsched-spec-load. Allow speculative motion of some loads */ extern int flag_schedule_speculative_load; /* Set by -fsched-spec-load-dangerous. Allow speculative motion of more loads */ extern int flag_schedule_speculative_load_dangerous; /* Set by -fsched-stalled-insns. Allow premature scheduling of queued insns */ extern int flag_sched_stalled_insns; /* Set by -fsched-stalled-insns-dep. Set dependence distance checking in premature scheduling of queued insns */ extern int flag_sched_stalled_insns_dep; /* Set by -fsched2-use-superblocks. If scheduling post reload, do superblock scheduling */ extern int flag_sched2_use_superblocks; /* Set by -fsched2-use-traces. If scheduling post reload, do trace scheduling */ extern int flag_sched2_use_traces; /* Set by -fschedule-insns. Reschedule instructions before register allocation */ extern int flag_schedule_insns; /* Set by -fschedule-insns2. Reschedule instructions after register allocation */ extern int flag_schedule_insns_after_reload; /* Set by -fshared-data. Mark data as shared rather than private */ extern int flag_shared_data; /* Set by -fsignaling-nans. Disable optimizations observable by IEEE signaling NaNs */ extern int flag_signaling_nans; /* Set by -fsingle-precision-constant. Convert floating point constants to single precision constants */ extern int flag_single_precision_constant; /* Set by -fstack-check. Insert stack checking code into the program */ extern int flag_stack_check; /* Set by -fstrength-reduce. Perform strength reduction optimizations */ extern int flag_strength_reduce; /* Set by -fstrict-aliasing. Assume strict aliasing rules apply */ extern int flag_strict_aliasing; /* Set by -fsyntax-only. Check for syntax errors, then stop */ extern int flag_syntax_only; /* Set by -ftest-coverage. Create data files needed by \"gcov\" */ extern int flag_test_coverage; /* Set by -fthread-jumps. Perform jump threading optimizations */ extern int flag_thread_jumps; /* Set by -ftime-report. Report the time taken by each compiler pass */ extern int time_report; /* Set by -ftracer. Perform superblock formation via tail duplication */ extern int flag_tracer; /* Set by -ftrapping-math. Assume floating-point operations can trap */ extern int flag_trapping_math; /* Set by -ftrapv. Trap for signed overflow in addition, subtraction and multiplication */ extern int flag_trapv; /* Set by -ftree-based-profiling. Use tree-ssa based implementation of profiling */ extern int flag_tree_based_profiling; /* Set by -ftree-ccp. Enable SSA-CCP optimization on trees */ extern int flag_tree_ccp; /* Set by -ftree-ch. Enable loop header copying on trees */ extern int flag_tree_ch; /* Set by -ftree-combine-temps. Coalesce memory temporaries in the SSA->normal pass */ extern int flag_tree_combine_temps; /* Set by -ftree-copyrename. Replace SSA temporaries with better names in copies. */ extern int flag_tree_copyrename; /* Set by -ftree-dce. Enable SSA dead code elimination optimization on trees */ extern int flag_tree_dce; /* Set by -ftree-dominator-opts. Enable dominator optimizations */ extern int flag_tree_dom; /* Set by -ftree-dse. Enable dead store elimination */ extern int flag_tree_dse; /* Set by -ftree-fre. Enable Full Redundancy Elimination (FRE) on trees */ extern int flag_tree_fre; /* Set by -ftree-loop-optimize. Enable loop optimizations on tree level */ extern int flag_tree_loop_optimize; /* Set by -ftree-lrs. Perform live range splitting during the SSA->normal pass. */ extern int flag_tree_live_range_split; /* Set by -ftree-pre. Enable SSA-PRE optimization on trees */ extern int flag_tree_pre; /* Set by -ftree-sra. Perform scalar replacement of aggregates */ extern int flag_tree_sra; /* Set by -ftree-ter. Replace temporary expressions in the SSA->normal pass */ extern int flag_tree_ter; /* Set by -funit-at-a-time. Compile whole compilation unit at a time */ extern int flag_unit_at_a_time; /* Set by -funroll-all-loops. Perform loop unrolling for all loops */ extern int flag_unroll_all_loops; /* Set by -funroll-loops. Perform loop unrolling when iteration count is known */ extern int flag_unroll_loops; /* Set by -funsafe-math-optimizations. Allow math optimizations that may violate IEEE or ISO standards */ extern int flag_unsafe_math_optimizations; /* Set by -funswitch-loops. Perform loop unswitching */ extern int flag_unswitch_loops; /* Set by -funwind-tables. Just generate unwind tables for exception handling */ extern int flag_unwind_tables; /* Set by -fvar-tracking. Perform variable tracking */ extern int flag_var_tracking; /* Set by -fverbose-asm. Add extra commentary to assembler output */ extern int flag_verbose_asm; /* Set by -fvpt. Use expression value profiles in optimizations */ extern int flag_value_profile_transformations; /* Set by -fweb. Construct webs and split unrelated uses of single variable */ extern int flag_web; /* Set by -fwrapv. Assume signed arithmetic overflow wraps around */ extern int flag_wrapv; /* Set by -fzero-initialized-in-bss. Put zero initialized data in the bss section */ extern int flag_zero_initialized_in_bss; /* Set by -p. Enable function profiling */ extern int profile_flag; /* Set by -pedantic. Issue warnings needed for strict compliance to the standard */ extern int pedantic; /* Set by -quiet. Do not display functions compiled or elapsed time */ extern int quiet_flag; /* Set by -version. Display the compiler's version */ extern int version_flag; /* Set by -w. Suppress warnings */ extern int inhibit_warnings; #define CL_C (1 << 0) #define CL_CXX (1 << 1) #define CL_ObjC (1 << 2) #define CL_ObjCXX (1 << 3) enum opt_code { OPT__help, /* --help */ OPT__output_pch_, /* --output-pch= */ OPT__param, /* --param */ OPT__target_help, /* --target-help */ OPT__version, /* --version */ OPT_A, /* -A */ OPT_C, /* -C */ OPT_CC, /* -CC */ OPT_D, /* -D */ OPT_E, /* -E */ OPT_F, /* -F */ OPT_G, /* -G */ OPT_H, /* -H */ OPT_I, /* -I */ OPT_M, /* -M */ OPT_MD, /* -MD */ OPT_MF, /* -MF */ OPT_MG, /* -MG */ OPT_MM, /* -MM */ OPT_MMD, /* -MMD */ OPT_MP, /* -MP */ OPT_MQ, /* -MQ */ OPT_MT, /* -MT */ OPT_O, /* -O */ OPT_Os, /* -Os */ OPT_P, /* -P */ OPT_U, /* -U */ OPT_W, /* -W */ OPT_Wabi, /* -Wabi */ OPT_Waggregate_return, /* -Waggregate-return */ OPT_Wall, /* -Wall */ OPT_Wbad_function_cast, /* -Wbad-function-cast */ OPT_Wcast_align, /* -Wcast-align */ OPT_Wcast_qual, /* -Wcast-qual */ OPT_Wchar_subscripts, /* -Wchar-subscripts */ OPT_Wcomment, /* -Wcomment */ OPT_Wcomments, /* -Wcomments */ OPT_Wconversion, /* -Wconversion */ OPT_Wctor_dtor_privacy, /* -Wctor-dtor-privacy */ OPT_Wdeclaration_after_statement, /* -Wdeclaration-after-statement */ OPT_Wdeprecated, /* -Wdeprecated */ OPT_Wdeprecated_declarations, /* -Wdeprecated-declarations */ OPT_Wdisabled_optimization, /* -Wdisabled-optimization */ OPT_Wdiv_by_zero, /* -Wdiv-by-zero */ OPT_Weffc__, /* -Weffc++ */ OPT_Wendif_labels, /* -Wendif-labels */ OPT_Werror, /* -Werror */ OPT_Werror_implicit_function_declaration, /* -Werror-implicit-function-declaration */ OPT_Wextra, /* -Wextra */ OPT_Wfatal_errors, /* -Wfatal-errors */ OPT_Wfloat_equal, /* -Wfloat-equal */ OPT_Wformat, /* -Wformat */ OPT_Wformat_extra_args, /* -Wformat-extra-args */ OPT_Wformat_nonliteral, /* -Wformat-nonliteral */ OPT_Wformat_security, /* -Wformat-security */ OPT_Wformat_y2k, /* -Wformat-y2k */ OPT_Wformat_zero_length, /* -Wformat-zero-length */ OPT_Wformat_, /* -Wformat= */ OPT_Wimplicit, /* -Wimplicit */ OPT_Wimplicit_function_declaration, /* -Wimplicit-function-declaration */ OPT_Wimplicit_int, /* -Wimplicit-int */ OPT_Wimport, /* -Wimport */ OPT_Winit_self, /* -Winit-self */ OPT_Winline, /* -Winline */ OPT_Winvalid_offsetof, /* -Winvalid-offsetof */ OPT_Winvalid_pch, /* -Winvalid-pch */ OPT_Wlarger_than_, /* -Wlarger-than- */ OPT_Wlong_long, /* -Wlong-long */ OPT_Wmain, /* -Wmain */ OPT_Wmissing_braces, /* -Wmissing-braces */ OPT_Wmissing_declarations, /* -Wmissing-declarations */ OPT_Wmissing_format_attribute, /* -Wmissing-format-attribute */ OPT_Wmissing_include_dirs, /* -Wmissing-include-dirs */ OPT_Wmissing_noreturn, /* -Wmissing-noreturn */ OPT_Wmissing_prototypes, /* -Wmissing-prototypes */ OPT_Wmultichar, /* -Wmultichar */ OPT_Wnested_externs, /* -Wnested-externs */ OPT_Wnon_template_friend, /* -Wnon-template-friend */ OPT_Wnon_virtual_dtor, /* -Wnon-virtual-dtor */ OPT_Wnonnull, /* -Wnonnull */ OPT_Wold_style_cast, /* -Wold-style-cast */ OPT_Wold_style_definition, /* -Wold-style-definition */ OPT_Woverloaded_virtual, /* -Woverloaded-virtual */ OPT_Wpacked, /* -Wpacked */ OPT_Wpadded, /* -Wpadded */ OPT_Wparentheses, /* -Wparentheses */ OPT_Wpmf_conversions, /* -Wpmf-conversions */ OPT_Wpointer_arith, /* -Wpointer-arith */ OPT_Wprotocol, /* -Wprotocol */ OPT_Wredundant_decls, /* -Wredundant-decls */ OPT_Wreorder, /* -Wreorder */ OPT_Wreturn_type, /* -Wreturn-type */ OPT_Wselector, /* -Wselector */ OPT_Wsequence_point, /* -Wsequence-point */ OPT_Wshadow, /* -Wshadow */ OPT_Wsign_compare, /* -Wsign-compare */ OPT_Wsign_promo, /* -Wsign-promo */ OPT_Wstrict_aliasing, /* -Wstrict-aliasing */ OPT_Wstrict_aliasing_, /* -Wstrict-aliasing= */ OPT_Wstrict_prototypes, /* -Wstrict-prototypes */ OPT_Wswitch, /* -Wswitch */ OPT_Wswitch_default, /* -Wswitch-default */ OPT_Wswitch_enum, /* -Wswitch-enum */ OPT_Wsynth, /* -Wsynth */ OPT_Wsystem_headers, /* -Wsystem-headers */ OPT_Wtraditional, /* -Wtraditional */ OPT_Wtrigraphs, /* -Wtrigraphs */ OPT_Wundeclared_selector, /* -Wundeclared-selector */ OPT_Wundef, /* -Wundef */ OPT_Wuninitialized, /* -Wuninitialized */ OPT_Wunknown_pragmas, /* -Wunknown-pragmas */ OPT_Wunreachable_code, /* -Wunreachable-code */ OPT_Wunused, /* -Wunused */ OPT_Wunused_function, /* -Wunused-function */ OPT_Wunused_label, /* -Wunused-label */ OPT_Wunused_macros, /* -Wunused-macros */ OPT_Wunused_parameter, /* -Wunused-parameter */ OPT_Wunused_value, /* -Wunused-value */ OPT_Wunused_variable, /* -Wunused-variable */ OPT_Wvariadic_macros, /* -Wvariadic-macros */ OPT_Wwrite_strings, /* -Wwrite-strings */ OPT_ansi, /* -ansi */ OPT_aux_info, /* -aux-info */ OPT_aux_info_, /* -aux-info= */ OPT_auxbase, /* -auxbase */ OPT_auxbase_strip, /* -auxbase-strip */ OPT_d, /* -d */ OPT_dumpbase, /* -dumpbase */ OPT_fPIC, /* -fPIC */ OPT_fPIE, /* -fPIE */ OPT_fabi_version_, /* -fabi-version= */ OPT_faccess_control, /* -faccess-control */ OPT_falign_functions, /* -falign-functions */ OPT_falign_functions_, /* -falign-functions= */ OPT_falign_jumps, /* -falign-jumps */ OPT_falign_jumps_, /* -falign-jumps= */ OPT_falign_labels, /* -falign-labels */ OPT_falign_labels_, /* -falign-labels= */ OPT_falign_loops, /* -falign-loops */ OPT_falign_loops_, /* -falign-loops= */ OPT_fall_virtual, /* -fall-virtual */ OPT_falt_external_templates, /* -falt-external-templates */ OPT_fargument_alias, /* -fargument-alias */ OPT_fargument_noalias, /* -fargument-noalias */ OPT_fargument_noalias_global, /* -fargument-noalias-global */ OPT_fasm, /* -fasm */ OPT_fasynchronous_unwind_tables, /* -fasynchronous-unwind-tables */ OPT_fbounds_check, /* -fbounds-check */ OPT_fbranch_count_reg, /* -fbranch-count-reg */ OPT_fbranch_probabilities, /* -fbranch-probabilities */ OPT_fbranch_target_load_optimize, /* -fbranch-target-load-optimize */ OPT_fbranch_target_load_optimize2, /* -fbranch-target-load-optimize2 */ OPT_fbtr_bb_exclusive, /* -fbtr-bb-exclusive */ OPT_fbuiltin, /* -fbuiltin */ OPT_fbuiltin_, /* -fbuiltin- */ OPT_fcall_saved_, /* -fcall-saved- */ OPT_fcall_used_, /* -fcall-used- */ OPT_fcaller_saves, /* -fcaller-saves */ OPT_fcheck_new, /* -fcheck-new */ OPT_fcommon, /* -fcommon */ OPT_fcond_mismatch, /* -fcond-mismatch */ OPT_fconserve_space, /* -fconserve-space */ OPT_fconst_strings, /* -fconst-strings */ OPT_fconstant_string_class_, /* -fconstant-string-class= */ OPT_fcprop_registers, /* -fcprop-registers */ OPT_fcrossjumping, /* -fcrossjumping */ OPT_fcse_follow_jumps, /* -fcse-follow-jumps */ OPT_fcse_skip_blocks, /* -fcse-skip-blocks */ OPT_fdata_sections, /* -fdata-sections */ OPT_fdefault_inline, /* -fdefault-inline */ OPT_fdefer_pop, /* -fdefer-pop */ OPT_fdelayed_branch, /* -fdelayed-branch */ OPT_fdelete_null_pointer_checks, /* -fdelete-null-pointer-checks */ OPT_fdiagnostics_show_location_, /* -fdiagnostics-show-location= */ OPT_fdollars_in_identifiers, /* -fdollars-in-identifiers */ OPT_fdump_, /* -fdump- */ OPT_fdump_unnumbered, /* -fdump-unnumbered */ OPT_felide_constructors, /* -felide-constructors */ OPT_feliminate_dwarf2_dups, /* -feliminate-dwarf2-dups */ OPT_feliminate_unused_debug_symbols, /* -feliminate-unused-debug-symbols */ OPT_feliminate_unused_debug_types, /* -feliminate-unused-debug-types */ OPT_fenforce_eh_specs, /* -fenforce-eh-specs */ OPT_fenum_int_equiv, /* -fenum-int-equiv */ OPT_fexceptions, /* -fexceptions */ OPT_fexec_charset_, /* -fexec-charset= */ OPT_fexpensive_optimizations, /* -fexpensive-optimizations */ OPT_fexternal_templates, /* -fexternal-templates */ OPT_ffast_math, /* -ffast-math */ OPT_ffinite_math_only, /* -ffinite-math-only */ OPT_ffixed_, /* -ffixed- */ OPT_ffixed_form, /* -ffixed-form */ OPT_ffixed_line_length_, /* -ffixed-line-length- */ OPT_ffloat_store, /* -ffloat-store */ OPT_ffor_scope, /* -ffor-scope */ OPT_fforce_addr, /* -fforce-addr */ OPT_fforce_mem, /* -fforce-mem */ OPT_ffreestanding, /* -ffreestanding */ OPT_ffunction_cse, /* -ffunction-cse */ OPT_ffunction_sections, /* -ffunction-sections */ OPT_fgcse, /* -fgcse */ OPT_fgcse_after_reload, /* -fgcse-after-reload */ OPT_fgcse_las, /* -fgcse-las */ OPT_fgcse_lm, /* -fgcse-lm */ OPT_fgcse_sm, /* -fgcse-sm */ OPT_fgnu_keywords, /* -fgnu-keywords */ OPT_fgnu_runtime, /* -fgnu-runtime */ OPT_fguess_branch_probability, /* -fguess-branch-probability */ OPT_fguiding_decls, /* -fguiding-decls */ OPT_fhandle_exceptions, /* -fhandle-exceptions */ OPT_fhonor_std, /* -fhonor-std */ OPT_fhosted, /* -fhosted */ OPT_fhuge_objects, /* -fhuge-objects */ OPT_fident, /* -fident */ OPT_fif_conversion, /* -fif-conversion */ OPT_fif_conversion2, /* -fif-conversion2 */ OPT_fimplement_inlines, /* -fimplement-inlines */ OPT_fimplicit_inline_templates, /* -fimplicit-inline-templates */ OPT_fimplicit_templates, /* -fimplicit-templates */ OPT_finhibit_size_directive, /* -finhibit-size-directive */ OPT_finline, /* -finline */ OPT_finline_functions, /* -finline-functions */ OPT_finline_limit_, /* -finline-limit- */ OPT_finline_limit_eq, /* -finline-limit= */ OPT_finput_charset_, /* -finput-charset= */ OPT_finstrument_functions, /* -finstrument-functions */ OPT_fkeep_inline_functions, /* -fkeep-inline-functions */ OPT_fkeep_static_consts, /* -fkeep-static-consts */ OPT_flabels_ok, /* -flabels-ok */ OPT_fleading_underscore, /* -fleading-underscore */ OPT_floop_optimize, /* -floop-optimize */ OPT_floop_optimize2, /* -floop-optimize2 */ OPT_fmath_errno, /* -fmath-errno */ OPT_fmem_report, /* -fmem-report */ OPT_fmerge_all_constants, /* -fmerge-all-constants */ OPT_fmerge_constants, /* -fmerge-constants */ OPT_fmessage_length_, /* -fmessage-length= */ OPT_fmodulo_sched, /* -fmodulo-sched */ OPT_fmove_all_movables, /* -fmove-all-movables */ OPT_fmove_loop_invariants, /* -fmove-loop-invariants */ OPT_fms_extensions, /* -fms-extensions */ OPT_fmudflap, /* -fmudflap */ OPT_fmudflapir, /* -fmudflapir */ OPT_fmudflapth, /* -fmudflapth */ OPT_fname_mangling_version_, /* -fname-mangling-version- */ OPT_fnew_abi, /* -fnew-abi */ OPT_fnew_ra, /* -fnew-ra */ OPT_fnext_runtime, /* -fnext-runtime */ OPT_fnil_receivers, /* -fnil-receivers */ OPT_fnon_call_exceptions, /* -fnon-call-exceptions */ OPT_fnonansi_builtins, /* -fnonansi-builtins */ OPT_fnonnull_objects, /* -fnonnull-objects */ OPT_fobjc_exceptions, /* -fobjc-exceptions */ OPT_fobjc_sjlj_exceptions, /* -fobjc-sjlj-exceptions */ OPT_fold_unroll_all_loops, /* -fold-unroll-all-loops */ OPT_fold_unroll_loops, /* -fold-unroll-loops */ OPT_fomit_frame_pointer, /* -fomit-frame-pointer */ OPT_foperator_names, /* -foperator-names */ OPT_foptimize_register_move, /* -foptimize-register-move */ OPT_foptimize_sibling_calls, /* -foptimize-sibling-calls */ OPT_foptional_diags, /* -foptional-diags */ OPT_fpack_struct, /* -fpack-struct */ OPT_fpcc_struct_return, /* -fpcc-struct-return */ OPT_fpch_deps, /* -fpch-deps */ OPT_fpch_preprocess, /* -fpch-preprocess */ OPT_fpeel_loops, /* -fpeel-loops */ OPT_fpeephole, /* -fpeephole */ OPT_fpeephole2, /* -fpeephole2 */ OPT_fpermissive, /* -fpermissive */ OPT_fpic, /* -fpic */ OPT_fpie, /* -fpie */ OPT_fprefetch_loop_arrays, /* -fprefetch-loop-arrays */ OPT_fpreprocessed, /* -fpreprocessed */ OPT_fprofile, /* -fprofile */ OPT_fprofile_arcs, /* -fprofile-arcs */ OPT_fprofile_generate, /* -fprofile-generate */ OPT_fprofile_use, /* -fprofile-use */ OPT_fprofile_values, /* -fprofile-values */ OPT_frandom_seed, /* -frandom-seed */ OPT_frandom_seed_, /* -frandom-seed= */ OPT_freduce_all_givs, /* -freduce-all-givs */ OPT_freg_struct_return, /* -freg-struct-return */ OPT_fregmove, /* -fregmove */ OPT_frename_registers, /* -frename-registers */ OPT_freorder_blocks, /* -freorder-blocks */ OPT_freorder_blocks_and_partition, /* -freorder-blocks-and-partition */ OPT_freorder_functions, /* -freorder-functions */ OPT_freplace_objc_classes, /* -freplace-objc-classes */ OPT_frepo, /* -frepo */ OPT_frerun_cse_after_loop, /* -frerun-cse-after-loop */ OPT_frerun_loop_opt, /* -frerun-loop-opt */ OPT_frounding_math, /* -frounding-math */ OPT_frtti, /* -frtti */ OPT_fsched_interblock, /* -fsched-interblock */ OPT_fsched_spec, /* -fsched-spec */ OPT_fsched_spec_load, /* -fsched-spec-load */ OPT_fsched_spec_load_dangerous, /* -fsched-spec-load-dangerous */ OPT_fsched_stalled_insns, /* -fsched-stalled-insns */ OPT_fsched_stalled_insns_dep, /* -fsched-stalled-insns-dep */ OPT_fsched_stalled_insns_dep_, /* -fsched-stalled-insns-dep= */ OPT_fsched_stalled_insns_, /* -fsched-stalled-insns= */ OPT_fsched_verbose_, /* -fsched-verbose= */ OPT_fsched2_use_superblocks, /* -fsched2-use-superblocks */ OPT_fsched2_use_traces, /* -fsched2-use-traces */ OPT_fschedule_insns, /* -fschedule-insns */ OPT_fschedule_insns2, /* -fschedule-insns2 */ OPT_fshared_data, /* -fshared-data */ OPT_fshort_double, /* -fshort-double */ OPT_fshort_enums, /* -fshort-enums */ OPT_fshort_wchar, /* -fshort-wchar */ OPT_fshow_column, /* -fshow-column */ OPT_fsignaling_nans, /* -fsignaling-nans */ OPT_fsigned_bitfields, /* -fsigned-bitfields */ OPT_fsigned_char, /* -fsigned-char */ OPT_fsingle_precision_constant, /* -fsingle-precision-constant */ OPT_fsquangle, /* -fsquangle */ OPT_fstack_check, /* -fstack-check */ OPT_fstack_limit, /* -fstack-limit */ OPT_fstack_limit_register_, /* -fstack-limit-register= */ OPT_fstack_limit_symbol_, /* -fstack-limit-symbol= */ OPT_fstats, /* -fstats */ OPT_fstrength_reduce, /* -fstrength-reduce */ OPT_fstrict_aliasing, /* -fstrict-aliasing */ OPT_fstrict_prototype, /* -fstrict-prototype */ OPT_fsyntax_only, /* -fsyntax-only */ OPT_ftabstop_, /* -ftabstop= */ OPT_ftemplate_depth_, /* -ftemplate-depth- */ OPT_ftest_coverage, /* -ftest-coverage */ OPT_fthis_is_variable, /* -fthis-is-variable */ OPT_fthread_jumps, /* -fthread-jumps */ OPT_ftime_report, /* -ftime-report */ OPT_ftls_model_, /* -ftls-model= */ OPT_ftracer, /* -ftracer */ OPT_ftrapping_math, /* -ftrapping-math */ OPT_ftrapv, /* -ftrapv */ OPT_ftree_based_profiling, /* -ftree-based-profiling */ OPT_ftree_ccp, /* -ftree-ccp */ OPT_ftree_ch, /* -ftree-ch */ OPT_ftree_combine_temps, /* -ftree-combine-temps */ OPT_ftree_copyrename, /* -ftree-copyrename */ OPT_ftree_dce, /* -ftree-dce */ OPT_ftree_dominator_opts, /* -ftree-dominator-opts */ OPT_ftree_dse, /* -ftree-dse */ OPT_ftree_fre, /* -ftree-fre */ OPT_ftree_loop_optimize, /* -ftree-loop-optimize */ OPT_ftree_lrs, /* -ftree-lrs */ OPT_ftree_points_to_, /* -ftree-points-to= */ OPT_ftree_pre, /* -ftree-pre */ OPT_ftree_sra, /* -ftree-sra */ OPT_ftree_ter, /* -ftree-ter */ OPT_funit_at_a_time, /* -funit-at-a-time */ OPT_funroll_all_loops, /* -funroll-all-loops */ OPT_funroll_loops, /* -funroll-loops */ OPT_funsafe_math_optimizations, /* -funsafe-math-optimizations */ OPT_funsigned_bitfields, /* -funsigned-bitfields */ OPT_funsigned_char, /* -funsigned-char */ OPT_funswitch_loops, /* -funswitch-loops */ OPT_funwind_tables, /* -funwind-tables */ OPT_fuse_cxa_atexit, /* -fuse-cxa-atexit */ OPT_fvar_tracking, /* -fvar-tracking */ OPT_fverbose_asm, /* -fverbose-asm */ OPT_fvpt, /* -fvpt */ OPT_fvtable_gc, /* -fvtable-gc */ OPT_fvtable_thunks, /* -fvtable-thunks */ OPT_fweak, /* -fweak */ OPT_fweb, /* -fweb */ OPT_fwide_exec_charset_, /* -fwide-exec-charset= */ OPT_fworking_directory, /* -fworking-directory */ OPT_fwrapv, /* -fwrapv */ OPT_fxref, /* -fxref */ OPT_fzero_initialized_in_bss, /* -fzero-initialized-in-bss */ OPT_fzero_link, /* -fzero-link */ OPT_g, /* -g */ OPT_gcoff, /* -gcoff */ OPT_gdwarf_2, /* -gdwarf-2 */ OPT_gen_decls, /* -gen-decls */ OPT_ggdb, /* -ggdb */ OPT_gstabs, /* -gstabs */ OPT_gstabs_, /* -gstabs+ */ OPT_gvms, /* -gvms */ OPT_gxcoff, /* -gxcoff */ OPT_gxcoff_, /* -gxcoff+ */ OPT_idirafter, /* -idirafter */ OPT_imacros, /* -imacros */ OPT_include, /* -include */ OPT_iprefix, /* -iprefix */ OPT_iquote, /* -iquote */ OPT_isysroot, /* -isysroot */ OPT_isystem, /* -isystem */ OPT_iwithprefix, /* -iwithprefix */ OPT_iwithprefixbefore, /* -iwithprefixbefore */ OPT_lang_asm, /* -lang-asm */ OPT_lang_objc, /* -lang-objc */ OPT_m, /* -m */ OPT_nostdinc, /* -nostdinc */ OPT_nostdinc__, /* -nostdinc++ */ OPT_o, /* -o */ OPT_p, /* -p */ OPT_pedantic, /* -pedantic */ OPT_pedantic_errors, /* -pedantic-errors */ OPT_print_objc_runtime_info, /* -print-objc-runtime-info */ OPT_quiet, /* -quiet */ OPT_remap, /* -remap */ OPT_std_c__98, /* -std=c++98 */ OPT_std_c89, /* -std=c89 */ OPT_std_c99, /* -std=c99 */ OPT_std_c9x, /* -std=c9x */ OPT_std_gnu__98, /* -std=gnu++98 */ OPT_std_gnu89, /* -std=gnu89 */ OPT_std_gnu99, /* -std=gnu99 */ OPT_std_gnu9x, /* -std=gnu9x */ OPT_std_iso9899_1990, /* -std=iso9899:1990 */ OPT_std_iso9899_199409, /* -std=iso9899:199409 */ OPT_std_iso9899_1999, /* -std=iso9899:1999 */ OPT_std_iso9899_199x, /* -std=iso9899:199x */ OPT_traditional_cpp, /* -traditional-cpp */ OPT_trigraphs, /* -trigraphs */ OPT_undef, /* -undef */ OPT_v, /* -v */ OPT_version, /* -version */ OPT_w, /* -w */ N_OPTS }; #endif /* OPTIONS_H */ enum debug_info_type { NO_DEBUG, /* Write no debug info. */ DBX_DEBUG, /* Write BSD .stabs for DBX (using dbxout.c). */ SDB_DEBUG, /* Write COFF for (old) SDB (using sdbout.c). */ DWARF_DEBUG, /* Write Dwarf debug info (using dwarfout.c). */ DWARF2_DEBUG, /* Write Dwarf v2 debug info (using dwarf2out.c). */ XCOFF_DEBUG, /* Write IBM/Xcoff debug info (using dbxout.c). */ VMS_DEBUG, /* Write VMS debug info (using vmsdbgout.c). */ VMS_AND_DWARF2_DEBUG /* Write VMS debug info (using vmsdbgout.c). and DWARF v2 debug info (using dwarf2out.c). */ }; /* Specify which kind of debugging info to generate. */ extern enum debug_info_type write_symbols; /* Names of debug_info_type, for error messages. */ extern const char *const debug_type_names[]; enum debug_info_level { DINFO_LEVEL_NONE, /* Write no debugging info. */ DINFO_LEVEL_TERSE, /* Write minimal info to support tracebacks only. */ DINFO_LEVEL_NORMAL, /* Write info for all declarations (and line table). */ DINFO_LEVEL_VERBOSE /* Write normal info plus #define/#undef info. */ }; /* Specify how much debugging info to generate. */ extern enum debug_info_level debug_info_level; /* Nonzero means use GNU-only extensions in the generated symbolic debugging information. */ extern bool use_gnu_debug_info_extensions; /* Nonzero means emit debugging information only for symbols which are used. */ extern int flag_debug_only_used_symbols; /* Nonzero means do optimizations. -opt. */ extern int optimize; /* Nonzero means optimize for size. -Os. */ extern int optimize_size; /* Don't print functions as they are compiled and don't print times taken by the various passes. -quiet. */ extern int quiet_flag; /* Print memory still in use at end of compilation (which may have little to do with peak memory consumption). -fmem-report. */ extern int mem_report; /* Do print extra warnings (such as for uninitialized variables). -W/-Wextra. */ extern bool extra_warnings; /* Nonzero to warn about unused variables, functions et.al. Use set_Wunused() to update the -Wunused-* flags that correspond to the -Wunused option. */ extern void set_Wunused (int setting); /* Nonzero to warn about variables used before they are initialized. */ extern int warn_uninitialized; /* Nonzero means warn about function definitions that default the return type or that use a null return and have a return-type other than void. */ extern int warn_return_type; /* Nonzero means warn about any objects definitions whose size is larger than N bytes. Also want about function definitions whose returned values are larger than N bytes. The value N is in `larger_than_size'. */ extern bool warn_larger_than; extern HOST_WIDE_INT larger_than_size; /* Nonzero means warn about constructs which might not be strict aliasing safe. */ extern int warn_strict_aliasing; /* Nonzero if generating code to do profiling. */ extern int profile_flag; /* Nonzero if generating code to profile program flow graph arcs. */ extern int profile_arc_flag; /* Nonzero if value profile should be measured. */ extern int flag_profile_values; /* Nonzero if generating info for gcov to calculate line test coverage. */ extern int flag_test_coverage; /* Nonzero indicates that branch taken probabilities should be calculated. */ extern int flag_branch_probabilities; /* Nonzero if basic blocks should be reordered. */ extern int flag_reorder_blocks; /* Nonzero if basic blocks should be partitioned into hot and cold sections of the .o file, in addition to being reordered. */ extern int flag_reorder_blocks_and_partition; /* Nonzero if functions should be reordered. */ extern int flag_reorder_functions; /* Nonzero if registers should be renamed. */ extern int flag_rename_registers; /* Nonzero for -pedantic switch: warn about anything that standard C forbids. */ extern int pedantic; /* Temporarily suppress certain warnings. This is set while reading code from a system header file. */ extern int in_system_header; /* Nonzero for -dp: annotate the assembly with a comment describing the pattern and alternative used. */ extern int flag_print_asm_name; /* Now the symbols that are set with `-f' switches. */ /* Nonzero means `char' should be signed. */ extern int flag_signed_char; /* Nonzero means give an enum type only as many bytes as it needs. A value of 2 means it has not yet been initialized. */ extern int flag_short_enums; /* Nonzero for -fcaller-saves: allocate values in regs that need to be saved across function calls, if that produces overall better code. Optional now, so people can test it. */ extern int flag_caller_saves; /* Nonzero for -fpcc-struct-return: return values the same way PCC does. */ extern int flag_pcc_struct_return; /* Nonzero for -fforce-mem: load memory value into a register before arithmetic on it. This makes better cse but slower compilation. */ extern int flag_force_mem; /* Nonzero for -fforce-addr: load memory address into a register before reference to memory. This makes better cse but slower compilation. */ extern int flag_force_addr; /* Nonzero for -fdefer-pop: don't pop args after each function call; instead save them up to pop many calls' args with one insns. */ extern int flag_defer_pop; /* Nonzero for -ffloat-store: don't allocate floats and doubles in extended-precision registers. */ extern int flag_float_store; /* Nonzero enables strength-reduction in loop.c. */ extern int flag_strength_reduce; /* Nonzero enables loop unrolling in unroll.c. Only loops for which the number of iterations can be calculated at compile-time (UNROLL_COMPLETELY, UNROLL_MODULO) or at run-time (preconditioned to be UNROLL_MODULO) are unrolled. */ extern int flag_old_unroll_loops; /* Nonzero enables loop unrolling in unroll.c. All loops are unrolled. This is generally not a win. */ extern int flag_old_unroll_all_loops; /* Nonzero forces all invariant computations in loops to be moved outside the loop. */ extern int flag_move_all_movables; /* Nonzero enables prefetch optimizations for arrays in loops. */ extern int flag_prefetch_loop_arrays; /* Nonzero forces all general induction variables in loops to be strength reduced. */ extern int flag_reduce_all_givs; /* Nonzero for -fcse-follow-jumps: have cse follow jumps to do a more extensive job. */ extern int flag_cse_follow_jumps; /* Nonzero for -fcse-skip-blocks: have cse follow a branch around a block. */ extern int flag_cse_skip_blocks; /* Nonzero for -fexpensive-optimizations: perform miscellaneous relatively-expensive optimizations. */ extern int flag_expensive_optimizations; /* Nonzero means to use global dataflow analysis to eliminate useless null pointer tests. */ extern int flag_delete_null_pointer_checks; /* Nonzero means don't put addresses of constant functions in registers. Used for compiling the Unix kernel, where strange substitutions are done on the assembly output. */ extern int flag_no_function_cse; /* Nonzero for -fomit-frame-pointer: don't make a frame pointer in simple functions that don't require one. */ extern int flag_omit_frame_pointer; /* Nonzero to inhibit use of define_optimization peephole opts. */ extern int flag_no_peephole; /* Nonzero allows GCC to optimize sibling and tail recursive calls. */ extern int flag_optimize_sibling_calls; /* Nonzero means the front end generally wants `errno' maintained by math operations, like built-in SQRT. */ extern int flag_errno_math; /* Nonzero means that unsafe floating-point math optimizations are allowed for the sake of speed. IEEE compliance is not guaranteed, and operations are allowed to assume that their arguments and results are "normal" (e.g., nonnegative for SQRT). */ extern int flag_unsafe_math_optimizations; /* Nonzero means that no NaNs or +-Infs are expected. */ extern int flag_finite_math_only; /* Zero means that floating-point math operations cannot generate a (user-visible) trap. This is the case, for example, in nonstop IEEE 754 arithmetic. */ extern int flag_trapping_math; /* Nonzero means disable transformations that assume default floating point rounding behavior. */ extern int flag_rounding_math; /* 0 means straightforward implementation of complex divide acceptable. 1 means wide ranges of inputs must work for complex divide. 2 means C99-like requirements for complex divide (not yet implemented). */ extern int flag_complex_divide_method; /* Nonzero means to run loop optimizations twice. */ extern int flag_rerun_loop_opt; /* Nonzero means make functions that look like good inline candidates go inline. */ extern int flag_inline_functions; /* Nonzero for -fkeep-inline-functions: even if we make a function go inline everywhere, keep its definition around for debugging purposes. */ extern int flag_keep_inline_functions; /* Nonzero means that functions declared `inline' will be treated as `static'. Prevents generation of zillions of copies of unused static inline functions; instead, `inlines' are written out only when actually used. Used in conjunction with -g. Also does the right thing with #pragma interface. */ extern int flag_no_inline; /* Nonzero means that we don't want inlining by virtue of -fno-inline, not just because the tree inliner turned us off. */ extern int flag_really_no_inline; /* Nonzero if we are only using compiler to check syntax errors. */ extern int flag_syntax_only; extern int rtl_dump_and_exit; /* Nonzero if we are exiting on the first error occurred. */ extern int flag_fatal_errors; /* Nonzero means we should save auxiliary info into a .X file. */ extern int flag_gen_aux_info; /* Nonzero means make the text shared if supported. */ extern int flag_shared_data; /* Controls the activation of SMS modulo scheduling. */ extern int flag_modulo_sched; /* flag_schedule_insns means schedule insns within basic blocks (before local_alloc). flag_schedule_insns_after_reload means schedule insns after global_alloc. */ extern int flag_schedule_insns; extern int flag_schedule_insns_after_reload; extern int flag_sched2_use_superblocks; extern int flag_sched2_use_traces; /* The following flags have effect only for scheduling before register allocation: flag_schedule_interblock means schedule insns across basic blocks. flag_schedule_speculative means allow speculative motion of non-load insns. flag_schedule_speculative_load means allow speculative motion of some load insns. flag_schedule_speculative_load_dangerous allows speculative motion of more load insns. */ extern int flag_schedule_interblock; extern int flag_schedule_speculative; extern int flag_schedule_speculative_load; extern int flag_schedule_speculative_load_dangerous; /* The following flags have an effect during scheduling after register allocation: sched_stalled_insns means that insns can be moved prematurely from the queue of stalled insns into the ready list. sched_stalled_insns_dep controls how many recently scheduled cycles will be examined for a dependency on a stalled insn that is candidate for premature removal from the queue of stalled insns into the ready list (has an effect only if the flag 'sched_stalled_insns' is set). */ extern int flag_sched_stalled_insns; extern int flag_sched_stalled_insns_dep; /* flag_branch_on_count_reg means try to replace add-1,compare,branch tupple by a cheaper branch, on a count register. */ extern int flag_branch_on_count_reg; /* This option is set to 1 on -fsingle-precision-constant option which is used to convert the floating point constants to single precision constants. */ extern int flag_single_precision_constant; /* Nonzero means put things in delayed-branch slots if supported. */ extern int flag_delayed_branch; /* Nonzero means suppress output of instruction numbers and line number notes in debugging dumps. */ extern int flag_dump_unnumbered; /* Nonzero means change certain warnings into errors. Usually these are warnings about failure to conform to some standard. */ extern int flag_pedantic_errors; /* Nonzero means generate position-independent code. 1 vs 2 for a target-dependent "small" or "large" mode. */ extern int flag_pic; /* Nonzero if we are compiling position independent code for executable. 1 vs 2 for a target-dependent "small" or "large" mode. */ extern int flag_pie; /* Nonzero if we are compiling code for a shared library, zero for executable. */ extern int flag_shlib; /* Nonzero means generate extra code for exception handling and enable exception handling. */ extern int flag_exceptions; /* Nonzero means generate frame unwind info table when supported. */ extern int flag_unwind_tables; /* Nonzero means generate frame unwind info table exact at each insn boundary. */ extern int flag_asynchronous_unwind_tables; /* Nonzero means don't place uninitialized global data in common storage by default. */ extern int flag_no_common; /* -finhibit-size-directive inhibits output of .size for ELF. This is used only for compiling crtstuff.c, and it may be extended to other effects needed for crtstuff.c on other systems. */ extern int flag_inhibit_size_directive; /* Nonzero means place each function into its own section on those platforms which support arbitrary section names and unlimited numbers of sections. */ extern int flag_function_sections; /* ... and similar for data. */ extern int flag_data_sections; /* -fverbose-asm causes extra commentary information to be produced in the generated assembly code (to make it more readable). This option is generally only of use to those who actually need to read the generated assembly code (perhaps while debugging the compiler itself). -fno-verbose-asm, the default, causes the extra information to not be added and is useful when comparing two assembler files. */ extern int flag_verbose_asm; /* -dA causes debug information to be produced in the generated assembly code (to make it more readable). This option is generally only of use to those who actually need to read the generated assembly code (perhaps while debugging the compiler itself). Currently, this switch is only used by dwarfout.c; however, it is intended to be a catchall for printing debug information in the assembler file. */ extern int flag_debug_asm; extern int flag_dump_rtl_in_asm; /* Greater than zero if user symbols are prepended by a leading underscore in generated assembly code. */ extern int flag_leading_underscore; /* Tag all structures with __attribute__(packed) */ extern int flag_pack_struct; /* This flag is only tested if alias checking is enabled. 0 if pointer arguments may alias each other. True in C. 1 if pointer arguments may not alias each other but may alias global variables. 2 if pointer arguments may not alias each other and may not alias global variables. True in Fortran. The value is ignored if flag_alias_check is 0. */ extern int flag_argument_noalias; /* Nonzero if we should do (language-dependent) alias analysis. Typically, this analysis will assume that expressions of certain types do not alias expressions of certain other types. Only used if alias analysis (in general) is enabled. */ extern int flag_strict_aliasing; /* Emit code to probe the stack, to help detect stack overflow; also may cause large objects to be allocated dynamically. */ extern int flag_stack_check; /* Do the full regmove optimization pass. */ extern int flag_regmove; /* Instrument functions with calls at entry and exit, for profiling. */ extern int flag_instrument_function_entry_exit; /* Perform a peephole pass before sched2. */ extern int flag_peephole2; /* Try to guess branch probabilities. */ extern int flag_guess_branch_prob; /* -fcheck-bounds causes gcc to generate array bounds checks. For C, C++ and ObjC: defaults off. For Java: defaults to on. For Fortran: defaults to off. */ extern int flag_bounds_check; /* This will attempt to merge constant section constants, if 1 only string constants and constants from constant pool, if 2 also constant variables. */ extern int flag_merge_constants; /* If one, renumber instruction UIDs to reduce the number of unused UIDs if there are a lot of instructions. If greater than one, unconditionally renumber instruction UIDs. */ extern int flag_renumber_insns; /* Other basic status info about current function. */ /* Nonzero means current function must be given a frame pointer. Set in stmt.c if anything is allocated on the stack there. Set in reload1.c if anything is allocated on the stack there. */ extern int frame_pointer_needed; /* Nonzero if the generated code should trap on signed overflow for PLUS / SUB / MULT. */ extern int flag_trapv; /* Nonzero if the signed arithmetic overflow should wrap around. */ extern int flag_wrapv; /* Nonzero if subexpressions must be evaluated from left-to-right. */ extern int flag_evaluation_order; /* Value of the -G xx switch, and whether it was passed or not. */ extern unsigned HOST_WIDE_INT g_switch_value; extern bool g_switch_set; /* Values of the -falign-* flags: how much to align labels in code. 0 means `use default', 1 means `don't align'. For each variable, there is an _log variant which is the power of two not less than the variable, for .align output. */ extern int align_loops; extern int align_loops_log; extern int align_loops_max_skip; extern int align_jumps; extern int align_jumps_log; extern int align_jumps_max_skip; extern int align_labels; extern int align_labels_log; extern int align_labels_max_skip; extern int align_functions; extern int align_functions_log; /* Like align_functions_log above, but used by front-ends to force the minimum function alignment. Zero means no alignment is forced. */ extern int force_align_functions_log; /* Nonzero if we dump in VCG format, not plain text. */ extern int dump_for_graph; /* Selection of the graph form. */ enum graph_dump_types { no_graph = 0, vcg }; extern enum graph_dump_types graph_dump_format; /* Nonzero means ignore `#ident' directives. 0 means handle them. On SVR4 targets, it also controls whether or not to emit a string identifying the compiler. */ extern int flag_no_ident; /* Nonzero means perform global CSE. */ extern int flag_gcse; /* Nonzero if we want to perform enhanced load motion during gcse. */ extern int flag_gcse_lm; /* Nonzero if we want to perform store motion after gcse. */ extern int flag_gcse_sm; /* Nonzero if we want to perform redundant load-after-store elimination in gcse. */ extern int flag_gcse_las; /* Nonzero if we want to perform global redundancy elimination after register allocation. */ extern int flag_gcse_after_reload; /* Nonzero if value histograms should be used to optimize code. */ extern int flag_value_profile_transformations; /* Perform branch target register optimization before prologue / epilogue threading. */ extern int flag_branch_target_load_optimize; /* Perform branch target register optimization after prologue / epilogue threading and jump2. */ extern int flag_branch_target_load_optimize2; /* For the bt-load pass, nonzero means don't re-use branch target registers in any basic block. */ extern int flag_btr_bb_exclusive; /* Nonzero means we should do dwarf2 duplicate elimination. */ extern int flag_eliminate_dwarf2_dups; /* Nonzero means we should do unused type elimination. */ extern int flag_eliminate_unused_debug_types; /* Nonzero means to collect statistics which might be expensive and to print them when we are done. */ extern int flag_detailed_statistics; /* Nonzero means enable synchronous exceptions for non-call instructions. */ extern int flag_non_call_exceptions; /* Nonzero means enable mudflap bounds-checking transforms; >1 means also to include multithreading locks. */ extern int flag_mudflap; extern int flag_mudflap_threads; extern int flag_mudflap_ignore_reads; /* Enable SSA-PRE on trees. */ extern int flag_tree_pre; /* Enable SSA-CCP on trees. */ extern int flag_tree_ccp; /* Enable SSA-DCE on trees. */ extern int flag_tree_dce; /* Enable SSA->normal pass memory location coalescing. */ extern int flag_tree_combine_temps; /* Enable SSA->normal pass expression replacement. */ extern int flag_tree_ter; /* Enable SSA_>normal live range splitting. */ extern int flag_tree_live_range_split; /* Enable dominator optimizations. */ extern int flag_tree_dom; /* Enable loop header copying on tree-ssa. */ extern int flag_tree_ch; /* Enable dead store and redundant load elimination */ extern int flag_tree_dse; /* Enable scalar replacement of aggregates. */ extern int flag_tree_sra; /* Enable copy rename optimization. */ extern int flag_tree_copyrename; /* Enable points-to analysis on trees. */ enum pta_type { PTA_NONE, PTA_ANDERSEN }; extern enum pta_type flag_tree_points_to; /* Enable FRE (Full Redundancy Elimination) on trees. */ extern int flag_tree_fre; /* Nonzero means put zero initialized data in the bss section. */ extern int flag_zero_initialized_in_bss; /* Nonzero means disable transformations observable by signaling NaNs. */ extern int flag_signaling_nans; extern int flag_unit_at_a_time; extern int flag_web; /* Nonzero means that we defer emitting functions until they are actually used. */ extern int flag_remove_unreachable_functions; /* Nonzero if we should track variables. */ extern int flag_var_tracking; /* A string that's used when a random name is required. NULL means to make it really random. */ extern const char *flag_random_seed; /* The version of the C++ ABI in use. The following values are allowed: 0: The version of the ABI believed most conformant with the C++ ABI specification. This ABI may change as bugs are discovered and fixed. Therefore, 0 will not necessarily indicate the same ABI in different versions of G++. 1: The version of the ABI first used in G++ 3.2. Additional positive integers will be assigned as new versions of the ABI become the default version of the ABI. */ extern int flag_abi_version; /* Returns TRUE if generated code should match ABI version N or greater is in use. */ #define abi_version_at_least(N) \ (flag_abi_version == 0 || flag_abi_version >= (N)) /* True if the given mode has a NaN representation and the treatment of NaN operands is important. Certain optimizations, such as folding x * 0 into x, are not correct for NaN operands, and are normally disabled for modes with NaNs. The user can ask for them to be done anyway using the -funsafe-math-optimizations switch. */ #define HONOR_NANS(MODE) \ (MODE_HAS_NANS (MODE) && !flag_finite_math_only) /* Like HONOR_NANs, but true if we honor signaling NaNs (or sNaNs). */ #define HONOR_SNANS(MODE) (flag_signaling_nans && HONOR_NANS (MODE)) /* As for HONOR_NANS, but true if the mode can represent infinity and the treatment of infinite values is important. */ #define HONOR_INFINITIES(MODE) \ (MODE_HAS_INFINITIES (MODE) && !flag_finite_math_only) /* Like HONOR_NANS, but true if the given mode distinguishes between positive and negative zero, and the sign of zero is important. */ #define HONOR_SIGNED_ZEROS(MODE) \ (MODE_HAS_SIGNED_ZEROS (MODE) && !flag_unsafe_math_optimizations) /* Like HONOR_NANS, but true if given mode supports sign-dependent rounding, and the rounding mode is important. */ #define HONOR_SIGN_DEPENDENT_ROUNDING(MODE) \ (MODE_HAS_SIGN_DEPENDENT_ROUNDING (MODE) && flag_rounding_math) #endif /* ! GCC_FLAGS_H */ /* Structure for saving state for a nested function. Copyright (C) 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_FUNCTION_H #define GCC_FUNCTION_H struct var_refs_queue GTY(()) { rtx modified; enum machine_mode promoted_mode; int unsignedp; struct var_refs_queue *next; }; /* Stack of pending (incomplete) sequences saved by `start_sequence'. Each element describes one pending sequence. The main insn-chain is saved in the last element of the chain, unless the chain is empty. */ struct sequence_stack GTY(()) { /* First and last insns in the chain of the saved sequence. */ rtx first; rtx last; struct sequence_stack *next; }; extern struct sequence_stack *sequence_stack; /* Stack of single obstacks. */ struct simple_obstack_stack { struct obstack *obstack; struct simple_obstack_stack *next; }; struct emit_status GTY(()) { /* This is reset to LAST_VIRTUAL_REGISTER + 1 at the start of each function. After rtl generation, it is 1 plus the largest register number used. */ int x_reg_rtx_no; /* Lowest label number in current function. */ int x_first_label_num; /* The ends of the doubly-linked chain of rtl for the current function. Both are reset to null at the start of rtl generation for the function. start_sequence saves both of these on `sequence_stack' and then starts a new, nested sequence of insns. */ rtx x_first_insn; rtx x_last_insn; /* Stack of pending (incomplete) sequences saved by `start_sequence'. Each element describes one pending sequence. The main insn-chain is saved in the last element of the chain, unless the chain is empty. */ struct sequence_stack *sequence_stack; /* INSN_UID for next insn emitted. Reset to 1 for each function compiled. */ int x_cur_insn_uid; /* Location the last line-number NOTE emitted. This is used to avoid generating duplicates. */ location_t x_last_location; /* The length of the regno_pointer_align, regno_decl, and x_regno_reg_rtx vectors. Since these vectors are needed during the expansion phase when the total number of registers in the function is not yet known, the vectors are copied and made bigger when necessary. */ int regno_pointer_align_length; /* Indexed by pseudo register number, if nonzero gives the known alignment for that pseudo (if REG_POINTER is set in x_regno_reg_rtx). Allocated in parallel with x_regno_reg_rtx. */ unsigned char * GTY ((length ("%h.x_reg_rtx_no"))) regno_pointer_align; /* Indexed by pseudo register number, gives the rtx for that pseudo. Allocated in parallel with regno_pointer_align. */ rtx * GTY ((length ("%h.x_reg_rtx_no"))) x_regno_reg_rtx; }; /* For backward compatibility... eventually these should all go away. */ #define reg_rtx_no (cfun->emit->x_reg_rtx_no) #define regno_reg_rtx (cfun->emit->x_regno_reg_rtx) #define seq_stack (cfun->emit->sequence_stack) #define REGNO_POINTER_ALIGN(REGNO) (cfun->emit->regno_pointer_align[REGNO]) struct expr_status GTY(()) { /* Number of units that we should eventually pop off the stack. These are the arguments to function calls that have already returned. */ int x_pending_stack_adjust; /* Under some ABIs, it is the caller's responsibility to pop arguments pushed for function calls. A naive implementation would simply pop the arguments immediately after each call. However, if several function calls are made in a row, it is typically cheaper to pop all the arguments after all of the calls are complete since a single pop instruction can be used. Therefore, GCC attempts to defer popping the arguments until absolutely necessary. (For example, at the end of a conditional, the arguments must be popped, since code outside the conditional won't know whether or not the arguments need to be popped.) When INHIBIT_DEFER_POP is nonzero, however, the compiler does not attempt to defer pops. Instead, the stack is popped immediately after each call. Rather then setting this variable directly, use NO_DEFER_POP and OK_DEFER_POP. */ int x_inhibit_defer_pop; /* If PREFERRED_STACK_BOUNDARY and PUSH_ROUNDING are defined, the stack boundary can be momentarily unaligned while pushing the arguments. Record the delta since last aligned boundary here in order to get stack alignment in the nested function calls working right. */ int x_stack_pointer_delta; /* Nonzero means __builtin_saveregs has already been done in this function. The value is the pseudoreg containing the value __builtin_saveregs returned. */ rtx x_saveregs_value; /* Similarly for __builtin_apply_args. */ rtx x_apply_args_value; /* List of labels that must never be deleted. */ rtx x_forced_labels; /* Postincrements that still need to be expanded. */ rtx x_pending_chain; }; #define pending_stack_adjust (cfun->expr->x_pending_stack_adjust) #define inhibit_defer_pop (cfun->expr->x_inhibit_defer_pop) #define saveregs_value (cfun->expr->x_saveregs_value) #define apply_args_value (cfun->expr->x_apply_args_value) #define forced_labels (cfun->expr->x_forced_labels) #define pending_chain (cfun->expr->x_pending_chain) #define stack_pointer_delta (cfun->expr->x_stack_pointer_delta) /* This structure can save all the important global and static variables describing the status of the current function. */ struct function GTY(()) { struct eh_status *eh; struct stmt_status *stmt; struct expr_status *expr; struct emit_status *emit; struct varasm_status *varasm; /* For tree-optimize.c. */ /* Saved tree and arguments during tree optimization. Used later for inlining */ tree saved_tree; tree saved_args; /* For function.c. */ /* Points to the FUNCTION_DECL of this function. */ tree decl; /* Function containing this function, if any. */ struct function *outer; /* Number of bytes of args popped by function being compiled on its return. Zero if no bytes are to be popped. May affect compilation of return insn or of function epilogue. */ int pops_args; /* If function's args have a fixed size, this is that size, in bytes. Otherwise, it is -1. May affect compilation of return insn or of function epilogue. */ int args_size; /* # bytes the prologue should push and pretend that the caller pushed them. The prologue must do this, but only if parms can be passed in registers. */ int pretend_args_size; /* # of bytes of outgoing arguments. If ACCUMULATE_OUTGOING_ARGS is defined, the needed space is pushed by the prologue. */ int outgoing_args_size; /* This is the offset from the arg pointer to the place where the first anonymous arg can be found, if there is one. */ rtx arg_offset_rtx; /* Quantities of various kinds of registers used for the current function's args. */ CUMULATIVE_ARGS args_info; /* If nonzero, an RTL expression for the location at which the current function returns its result. If the current function returns its result in a register, current_function_return_rtx will always be the hard register containing the result. */ rtx return_rtx; /* The arg pointer hard register, or the pseudo into which it was copied. */ rtx internal_arg_pointer; /* Opaque pointer used by get_hard_reg_initial_val and has_hard_reg_initial_val (see integrate.[hc]). */ struct initial_value_struct *hard_reg_initial_vals; /* List (chain of EXPR_LIST) of labels heading the current handlers for nonlocal gotos. */ rtx x_nonlocal_goto_handler_labels; /* Label that will go on function epilogue. Jumping to this label serves as a "return" instruction on machines which require execution of the epilogue on all returns. */ rtx x_return_label; /* Label that will go on the end of function epilogue. Jumping to this label serves as a "naked return" instruction on machines which require execution of the epilogue on all returns. */ rtx x_naked_return_label; /* List (chain of EXPR_LISTs) of all stack slots in this function. Made for the sake of unshare_all_rtl. */ rtx x_stack_slot_list; /* Place after which to insert the tail_recursion_label if we need one. */ rtx x_tail_recursion_reentry; /* Location at which to save the argument pointer if it will need to be referenced. There are two cases where this is done: if nonlocal gotos exist, or if vars stored at an offset from the argument pointer will be needed by inner routines. */ rtx x_arg_pointer_save_area; /* Offset to end of allocated area of stack frame. If stack grows down, this is the address of the last stack slot allocated. If stack grows up, this is the address for the next slot. */ HOST_WIDE_INT x_frame_offset; /* A VAR_DECL that should contain the static chain for this function. It will be initialized at the beginning of the function. */ tree static_chain_decl; /* An expression that contains the non-local goto save area. The first word is the saved frame pointer and the second is the saved stack pointer. */ tree nonlocal_goto_save_area; /* Insn after which register parms and SAVE_EXPRs are born, if nonopt. */ rtx x_parm_birth_insn; /* List of all used temporaries allocated, by level. */ struct varray_head_tag * GTY((param_is (struct temp_slot))) x_used_temp_slots; /* List of available temp slots. */ struct temp_slot *x_avail_temp_slots; /* Current nesting level for temporaries. */ int x_temp_slot_level; /* Current nesting level for variables in a block. */ int x_var_temp_slot_level; /* When temporaries are created by TARGET_EXPRs, they are created at this level of temp_slot_level, so that they can remain allocated until no longer needed. CLEANUP_POINT_EXPRs define the lifetime of TARGET_EXPRs. */ int x_target_temp_slot_level; /* This slot is initialized as 0 and is added to during the nested function. */ struct var_refs_queue *fixup_var_refs_queue; /* For integrate.c. */ int inlinable; int no_debugging_symbols; rtvec original_arg_vector; tree original_decl_initial; /* Highest label number in current function. */ int inl_max_label_num; /* Function sequence number for profiling, debugging, etc. */ int funcdef_no; /* For md files. */ /* tm.h can use this to store whatever it likes. */ struct machine_function * GTY ((maybe_undef)) machine; /* The largest alignment of slot allocated on the stack. */ int stack_alignment_needed; /* Preferred alignment of the end of stack frame. */ int preferred_stack_boundary; /* Set when the call to function itself has been emit. */ bool recursive_call_emit; /* Set when the tail call has been produced. */ bool tail_call_emit; /* Language-specific code can use this to store whatever it likes. */ struct language_function * language; /* For reorg. */ /* If some insns can be deferred to the delay slots of the epilogue, the delay list for them is recorded here. */ rtx epilogue_delay_list; /* How commonly executed the function is. Initialized during branch probabilities pass. */ enum function_frequency { /* This function most likely won't be executed at all. (set only when profile feedback is available). */ FUNCTION_FREQUENCY_UNLIKELY_EXECUTED, /* The default value. */ FUNCTION_FREQUENCY_NORMAL, /* Optimize this function hard (set only when profile feedback is available). */ FUNCTION_FREQUENCY_HOT } function_frequency; /* Maximal number of entities in the single jumptable. Used to estimate final flowgraph size. */ int max_jumptable_ents; /* UIDs for LABEL_DECLs. */ int last_label_uid; /* Line number of the end of the function. */ location_t function_end_locus; /* Array mapping insn uids to blocks. */ struct varray_head_tag *ib_boundaries_block; /* The variables unexpanded so far. */ tree unexpanded_var_list; /* Collected bit flags. */ /* Nonzero if function being compiled needs to be given an address where the value should be stored. */ unsigned int returns_struct : 1; /* Nonzero if function being compiled needs to return the address of where it has put a structure value. */ unsigned int returns_pcc_struct : 1; /* Nonzero if the current function returns a pointer type. */ unsigned int returns_pointer : 1; /* Nonzero if function being compiled can call setjmp. */ unsigned int calls_setjmp : 1; /* Nonzero if function being compiled can call longjmp. */ unsigned int calls_longjmp : 1; /* Nonzero if function being compiled can call alloca, either as a subroutine or builtin. */ unsigned int calls_alloca : 1; /* Nonzero if the function calls __builtin_eh_return. */ unsigned int calls_eh_return : 1; /* Nonzero if function being compiled receives nonlocal gotos from nested functions. */ unsigned int has_nonlocal_label : 1; /* Nonzero if function being compiled has nonlocal gotos to parent function. */ unsigned int has_nonlocal_goto : 1; /* Nonzero if function being compiled contains nested functions. */ unsigned int contains_functions : 1; /* Nonzero if the function being compiled issues a computed jump. */ unsigned int has_computed_jump : 1; /* Nonzero if the current function is a thunk, i.e., a lightweight function implemented by the output_mi_thunk hook) that just adjusts one of its arguments and forwards to another function. */ unsigned int is_thunk : 1; /* This bit is used by the exception handling logic. It is set if all calls (if any) are sibling calls. Such functions do not have to have EH tables generated, as they cannot throw. A call to such a function, however, should be treated as throwing if any of its callees can throw. */ unsigned int all_throwers_are_sibcalls : 1; /* Nonzero if instrumentation calls for function entry and exit should be generated. */ unsigned int instrument_entry_exit : 1; /* Nonzero if profiling code should be generated. */ unsigned int profile : 1; /* Nonzero if stack limit checking should be enabled in the current function. */ unsigned int limit_stack : 1; /* Nonzero if current function uses stdarg.h or equivalent. */ unsigned int stdarg : 1; /* Nonzero if the back-end should not keep track of expressions that determine the size of variable-sized objects. Normally, such expressions are saved away, and then expanded when the next function is started. For example, if a parameter has a variable-sized type, then the size of the parameter is computed when the function body is entered. However, some front-ends do not desire this behavior. */ unsigned int x_dont_save_pending_sizes_p : 1; /* Nonzero if the current function uses the constant pool. */ unsigned int uses_const_pool : 1; /* Nonzero if the current function uses pic_offset_table_rtx. */ unsigned int uses_pic_offset_table : 1; /* Nonzero if the current function needs an lsda for exception handling. */ unsigned int uses_eh_lsda : 1; /* Nonzero if code to initialize arg_pointer_save_area has been emitted. */ unsigned int arg_pointer_save_area_init : 1; }; /* The function currently being compiled. */ extern GTY(()) struct function *cfun; /* Pointer to chain of `struct function' for containing functions. */ extern GTY(()) struct function *outer_function_chain; /* Nonzero if we've already converted virtual regs to hard regs. */ extern int virtuals_instantiated; /* Nonzero if at least one trampoline has been created. */ extern int trampolines_created; /* For backward compatibility... eventually these should all go away. */ #define current_function_pops_args (cfun->pops_args) #define current_function_returns_struct (cfun->returns_struct) #define current_function_returns_pcc_struct (cfun->returns_pcc_struct) #define current_function_returns_pointer (cfun->returns_pointer) #define current_function_calls_setjmp (cfun->calls_setjmp) #define current_function_calls_alloca (cfun->calls_alloca) #define current_function_calls_longjmp (cfun->calls_longjmp) #define current_function_calls_eh_return (cfun->calls_eh_return) #define current_function_has_computed_jump (cfun->has_computed_jump) #define current_function_contains_functions (cfun->contains_functions) #define current_function_is_thunk (cfun->is_thunk) #define current_function_args_info (cfun->args_info) #define current_function_args_size (cfun->args_size) #define current_function_pretend_args_size (cfun->pretend_args_size) #define current_function_outgoing_args_size (cfun->outgoing_args_size) #define current_function_arg_offset_rtx (cfun->arg_offset_rtx) #define current_function_stdarg (cfun->stdarg) #define current_function_internal_arg_pointer (cfun->internal_arg_pointer) #define current_function_return_rtx (cfun->return_rtx) #define current_function_instrument_entry_exit (cfun->instrument_entry_exit) #define current_function_profile (cfun->profile) #define current_function_funcdef_no (cfun->funcdef_no) #define current_function_limit_stack (cfun->limit_stack) #define current_function_uses_pic_offset_table (cfun->uses_pic_offset_table) #define current_function_uses_const_pool (cfun->uses_const_pool) #define current_function_epilogue_delay_list (cfun->epilogue_delay_list) #define current_function_has_nonlocal_label (cfun->has_nonlocal_label) #define current_function_has_nonlocal_goto (cfun->has_nonlocal_goto) #define return_label (cfun->x_return_label) #define naked_return_label (cfun->x_naked_return_label) #define stack_slot_list (cfun->x_stack_slot_list) #define parm_birth_insn (cfun->x_parm_birth_insn) #define frame_offset (cfun->x_frame_offset) #define tail_recursion_reentry (cfun->x_tail_recursion_reentry) #define arg_pointer_save_area (cfun->x_arg_pointer_save_area) #define used_temp_slots (cfun->x_used_temp_slots) #define avail_temp_slots (cfun->x_avail_temp_slots) #define temp_slot_level (cfun->x_temp_slot_level) #define target_temp_slot_level (cfun->x_target_temp_slot_level) #define var_temp_slot_level (cfun->x_var_temp_slot_level) #define nonlocal_labels (cfun->x_nonlocal_labels) #define nonlocal_goto_handler_labels (cfun->x_nonlocal_goto_handler_labels) /* Given a function decl for a containing function, return the `struct function' for it. */ struct function *find_function_data (tree); /* Identify BLOCKs referenced by more than one NOTE_INSN_BLOCK_{BEG,END}, and create duplicate blocks. */ extern void reorder_blocks (void); /* Set BLOCK_NUMBER for all the blocks in FN. */ extern void number_blocks (tree); extern void clear_block_marks (tree); extern tree blocks_nreverse (tree); extern void reset_block_changes (void); extern void record_block_change (tree); extern void finalize_block_changes (void); extern void check_block_change (rtx, tree *); extern void free_block_changes (void); /* Return size needed for stack frame based on slots so far allocated. This size counts from zero. It is not rounded to STACK_BOUNDARY; the caller may have to do that. */ extern HOST_WIDE_INT get_frame_size (void); /* Likewise, but for a different than the current function. */ extern HOST_WIDE_INT get_func_frame_size (struct function *); /* A pointer to a function to create target specific, per-function data structures. */ extern struct machine_function * (*init_machine_status) (void); /* Save and restore status information for a nested function. */ extern void restore_emit_status (struct function *); extern void free_after_parsing (struct function *); extern void free_after_compilation (struct function *); extern void init_varasm_status (struct function *); #ifdef RTX_CODE extern void diddle_return_value (void (*)(rtx, void*), void*); extern void clobber_return_register (void); extern void use_return_register (void); #endif extern rtx get_arg_pointer_save_area (struct function *); extern void init_virtual_regs (struct emit_status *); extern void instantiate_virtual_regs (void); /* Returns the name of the current function. */ extern const char *current_function_name (void); /* Called once, at initialization, to initialize function.c. */ extern void init_function_once (void); extern void do_warn_unused_parameter (tree); #endif /* GCC_FUNCTION_H */ /* Tree inlining hooks and declarations. Copyright 2001, 2003 Free Software Foundation, Inc. Contributed by Alexandre Oliva This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_TREE_INLINE_H #define GCC_TREE_INLINE_H /* Function prototypes. */ void optimize_inline_calls (tree); bool tree_inlinable_function_p (tree); tree copy_tree_r (tree*, int*, void*); void clone_body (tree, tree, void*); tree save_body (tree, tree *); void remap_save_expr (tree*, void*, int*); int estimate_num_insns (tree expr); /* 0 if we should not perform inlining. 1 if we should expand functions calls inline at the tree level. 2 if we should consider *all* functions to be inline candidates. */ extern int flag_inline_trees; #endif /* GCC_TREE_INLINE_H */ /* Functions to analyze and validate GIMPLE trees. Copyright (C) 2002, 2003 Free Software Foundation, Inc. Contributed by Diego Novillo This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef _TREE_SIMPLE_H #define _TREE_SIMPLE_H 1 /* Iterator routines for manipulating GENERIC and GIMPLE tree statements. Copyright (C) 2003, 2004 Free Software Foundation, Inc. Contributed by Andrew MacLeod This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is dependent upon the implementation of tree's. It provides an abstract interface to the tree objects such that if all tree creation and manipulations are done through this interface, we can easily change the implementation of tree's, and not impact other code. */ #ifndef GCC_TREE_ITERATOR_H #define GCC_TREE_ITERATOR_H 1 /* Iterator object for GENERIC or GIMPLE TREE statements. */ typedef struct { struct tree_statement_list_node *ptr; tree container; } tree_stmt_iterator; static inline tree_stmt_iterator tsi_start (tree t) { tree_stmt_iterator i; i.ptr = STATEMENT_LIST_HEAD (t); i.container = t; return i; } static inline tree_stmt_iterator tsi_last (tree t) { tree_stmt_iterator i; i.ptr = STATEMENT_LIST_TAIL (t); i.container = t; return i; } static inline bool tsi_end_p (tree_stmt_iterator i) { return i.ptr == NULL; } static inline bool tsi_one_before_end_p (tree_stmt_iterator i) { return i.ptr != NULL && i.ptr->next == NULL; } static inline void tsi_next (tree_stmt_iterator *i) { i->ptr = i->ptr->next; } static inline void tsi_prev (tree_stmt_iterator *i) { i->ptr = i->ptr->prev; } static inline tree * tsi_stmt_ptr (tree_stmt_iterator i) { return &i.ptr->stmt; } static inline tree tsi_stmt (tree_stmt_iterator i) { return i.ptr->stmt; } enum tsi_iterator_update { TSI_NEW_STMT, /* Leave the iterator at the same statement. */ TSI_SAME_STMT, /* Only valid when single statement is added, move iterator to it. */ TSI_CHAIN_START, /* Only valid when chain of statements is added, move iterator to the first statement in the chain. */ TSI_CHAIN_END, /* Only valid when chain of statements is added, move iterator to the last statement in the chain. */ TSI_CONTINUE_LINKING /* Move iterator to whatever position is suitable for linking other statements/chains of statements in the same direction. */ }; extern void tsi_link_before (tree_stmt_iterator *, tree, enum tsi_iterator_update); extern void tsi_link_after (tree_stmt_iterator *, tree, enum tsi_iterator_update); void tsi_delink (tree_stmt_iterator *); tree tsi_split_statement_list_after (const tree_stmt_iterator *); tree tsi_split_statement_list_before (tree_stmt_iterator *); void append_to_statement_list (tree, tree *); void append_to_statement_list_force (tree, tree *); #endif /* GCC_TREE_ITERATOR_H */ extern tree create_tmp_var_raw (tree, const char *); extern tree create_tmp_var_name (const char *); extern tree create_tmp_var (tree, const char *); extern bool is_gimple_tmp_var (tree); extern tree get_initialized_tmp_var (tree, tree *, tree *); extern tree get_formal_tmp_var (tree, tree *); extern void declare_tmp_vars (tree, tree); extern void annotate_all_with_locus (tree *, location_t); /* Validation of GIMPLE expressions. Note that these predicates only check the basic form of the expression, they don't recurse to make sure that underlying nodes are also of the right form. */ /* Returns true iff T is a valid GIMPLE statement. */ extern bool is_gimple_stmt (tree); /* Returns true iff TYPE is a valid type for a scalar register variable. */ extern bool is_gimple_reg_type (tree); /* Returns true iff T is a scalar register variable. */ extern bool is_gimple_reg (tree); /* Returns true iff T is any sort of variable. */ extern bool is_gimple_variable (tree); /* Returns true iff T is a variable or an INDIRECT_REF (of a variable). */ extern bool is_gimple_min_lval (tree); /* Returns true iff T is an lvalue other than an INDIRECT_REF. */ extern bool is_gimple_addr_expr_arg (tree); /* Returns true iff T is any valid GIMPLE lvalue. */ extern bool is_gimple_lvalue (tree); /* Returns true iff T is a GIMPLE restricted function invariant. */ extern bool is_gimple_min_invariant (tree); /* Returns true iff T is a GIMPLE rvalue. */ extern bool is_gimple_val (tree); /* Returns true iff T is a valid rhs for a MODIFY_EXPR. */ extern bool is_gimple_rhs (tree); /* Returns true iff T is a valid if-statement condition. */ extern bool is_gimple_condexpr (tree); /* Returns true iff T is a type conversion. */ extern bool is_gimple_cast (tree); /* Returns true iff T is a valid CONSTRUCTOR element (either an rvalue or another CONSTRUCTOR). */ extern bool is_gimple_constructor_elt (tree); /* Returns true iff T is a variable that does not need to live in memory. */ extern bool is_gimple_non_addressable (tree t); /* Returns true iff T is a valid call address expression. */ extern bool is_gimple_call_addr (tree); /* If T makes a function call, returns the CALL_EXPR operand. */ extern tree get_call_expr_in (tree t); extern void recalculate_side_effects (tree); /* FIXME we should deduce this from the predicate. */ typedef enum fallback_t { fb_none = 0, fb_rvalue = 1, fb_lvalue = 2, fb_mayfail = 4, fb_either= fb_rvalue | fb_lvalue } fallback_t; enum gimplify_status { GS_ERROR = -2, /* Something Bad Seen. */ GS_UNHANDLED = -1, /* A langhook result for "I dunno". */ GS_OK = 0, /* We did something, maybe more to do. */ GS_ALL_DONE = 1 /* The expression is fully gimplified. */ }; extern enum gimplify_status gimplify_expr (tree *, tree *, tree *, bool (*) (tree), fallback_t); extern void gimplify_type_sizes (tree, tree *); extern void gimplify_one_sizepos (tree *, tree *); extern void gimplify_stmt (tree *); extern void gimplify_to_stmt_list (tree *); extern void gimplify_body (tree *, tree); extern void push_gimplify_context (void); extern void pop_gimplify_context (tree); extern void gimplify_and_add (tree, tree *); /* Miscellaneous helpers. */ extern tree get_base_address (tree t); extern void gimple_add_tmp_var (tree); extern tree gimple_current_bind_expr (void); extern void gimple_push_bind_expr (tree); extern void gimple_pop_bind_expr (void); extern void unshare_all_trees (tree); extern tree voidify_wrapper_expr (tree, tree); extern tree gimple_build_eh_filter (tree, tree, tree); extern tree build_and_jump (tree *); extern tree alloc_stmt_list (void); extern void free_stmt_list (tree); extern tree force_labels_r (tree *, int *, void *); extern enum gimplify_status gimplify_va_arg_expr (tree *, tree *, tree *); /* In tree-nested.c. */ extern void lower_nested_functions (tree); #endif /* _TREE_SIMPLE_H */ /* Data and Control Flow Analysis for Trees. Copyright (C) 2001, 2003, 2004 Free Software Foundation, Inc. Contributed by Diego Novillo This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef _TREE_FLOW_H #define _TREE_FLOW_H 1 /* SSA operand management for trees. Copyright (C) 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_TREE_SSA_OPERANDS_H #define GCC_TREE_SSA_OPERANDS_H /* Interface to SSA operands. */ /* This represents a pointer to a DEF operand. */ typedef struct def_operand_ptr GTY(()) { tree * GTY((skip(""))) def; } def_operand_p; /* This represents a pointer to a USE operand. */ typedef struct use_operand_ptr GTY(()) { tree * GTY((skip(""))) use; } use_operand_p; /* This represents the DEF operands of a stmt. */ typedef struct def_optype_d GTY(()) { unsigned num_defs; struct def_operand_ptr GTY((length("%h.num_defs"))) defs[1]; } def_optype_t; typedef def_optype_t *def_optype; /* This represents the USE operands of a stmt. */ typedef struct use_optype_d GTY(()) { unsigned num_uses; struct use_operand_ptr GTY((length("%h.num_uses"))) uses[1]; } use_optype_t; typedef use_optype_t *use_optype; /* This represents the MAY_DEFS for a stmt. */ typedef struct v_may_def_optype_d GTY(()) { unsigned num_v_may_defs; tree GTY((length ("%h.num_v_may_defs * 2"))) v_may_defs[1]; } v_may_def_optype_t; typedef v_may_def_optype_t *v_may_def_optype; /* This represents the VUSEs for a stmt. */ typedef struct vuse_optype_d GTY(()) { unsigned num_vuses; tree GTY((length ("%h.num_vuses"))) vuses[1]; } vuse_optype_t; typedef vuse_optype_t *vuse_optype; /* This represents the V_MUST_DEFS for a stmt. */ typedef struct v_must_def_optype_d GTY(()) { unsigned num_v_must_defs; tree GTY((length("%h.num_v_must_defs"))) v_must_defs[1]; } v_must_def_optype_t; typedef v_must_def_optype_t *v_must_def_optype; #define USE_FROM_PTR(OP) get_use_from_ptr (OP) #define DEF_FROM_PTR(OP) get_def_from_ptr (OP) #define SET_USE(OP, V) ((*((OP).use)) = (V)) #define SET_DEF(OP, V) ((*((OP).def)) = (V)) #define USE_OPS(ANN) get_use_ops (ANN) #define STMT_USE_OPS(STMT) get_use_ops (stmt_ann (STMT)) #define NUM_USES(OPS) ((OPS) ? (OPS)->num_uses : 0) #define USE_OP_PTR(OPS, I) get_use_op_ptr ((OPS), (I)) #define USE_OP(OPS, I) (USE_FROM_PTR (USE_OP_PTR ((OPS), (I)))) #define SET_USE_OP(OPS, I, V) (SET_USE (USE_OP_PTR ((OPS), (I)), (V))) #define DEF_OPS(ANN) get_def_ops (ANN) #define STMT_DEF_OPS(STMT) get_def_ops (stmt_ann (STMT)) #define NUM_DEFS(OPS) ((OPS) ? (OPS)->num_defs : 0) #define DEF_OP_PTR(OPS, I) get_def_op_ptr ((OPS), (I)) #define DEF_OP(OPS, I) (DEF_FROM_PTR (DEF_OP_PTR ((OPS), (I)))) #define SET_DEF_OP(OPS, I, V) (SET_DEF (DEF_OP_PTR ((OPS), (I)), (V))) #define V_MAY_DEF_OPS(ANN) get_v_may_def_ops (ANN) #define STMT_V_MAY_DEF_OPS(STMT) get_v_may_def_ops (stmt_ann(STMT)) #define NUM_V_MAY_DEFS(OPS) ((OPS) ? (OPS)->num_v_may_defs : 0) #define V_MAY_DEF_RESULT_PTR(OPS, I) get_v_may_def_result_ptr ((OPS), (I)) #define V_MAY_DEF_RESULT(OPS, I) \ (DEF_FROM_PTR (V_MAY_DEF_RESULT_PTR ((OPS), (I)))) #define SET_V_MAY_DEF_RESULT(OPS, I, V) \ (SET_DEF (V_MAY_DEF_RESULT_PTR ((OPS), (I)), (V))) #define V_MAY_DEF_OP_PTR(OPS, I) get_v_may_def_op_ptr ((OPS), (I)) #define V_MAY_DEF_OP(OPS, I) \ (USE_FROM_PTR (V_MAY_DEF_OP_PTR ((OPS), (I)))) #define SET_V_MAY_DEF_OP(OPS, I, V) \ (SET_USE (V_MAY_DEF_OP_PTR ((OPS), (I)), (V))) #define VUSE_OPS(ANN) get_vuse_ops (ANN) #define STMT_VUSE_OPS(STMT) get_vuse_ops (stmt_ann(STMT)) #define NUM_VUSES(OPS) ((OPS) ? (OPS)->num_vuses : 0) #define VUSE_OP_PTR(OPS, I) get_vuse_op_ptr ((OPS), (I)) #define VUSE_OP(OPS, I) (USE_FROM_PTR (VUSE_OP_PTR ((OPS), (I)))) #define SET_VUSE_OP(OPS, I, V) (SET_USE (VUSE_OP_PTR ((OPS), (I)), (V))) #define V_MUST_DEF_OPS(ANN) get_v_must_def_ops (ANN) #define STMT_V_MUST_DEF_OPS(STMT) get_v_must_def_ops (stmt_ann (STMT)) #define NUM_V_MUST_DEFS(OPS) ((OPS) ? (OPS)->num_v_must_defs : 0) #define V_MUST_DEF_OP_PTR(OPS, I) get_v_must_def_op_ptr ((OPS), (I)) #define V_MUST_DEF_OP(OPS, I) \ (DEF_FROM_PTR (V_MUST_DEF_OP_PTR ((OPS), (I)))) #define SET_V_MUST_DEF_OP(OPS, I, V) \ (SET_DEF (V_MUST_DEF_OP_PTR ((OPS), (I)), (V))) #define PHI_RESULT_PTR(PHI) get_phi_result_ptr (PHI) #define PHI_RESULT(PHI) DEF_FROM_PTR (PHI_RESULT_PTR (PHI)) #define SET_PHI_RESULT(PHI, V) SET_DEF (PHI_RESULT_PTR (PHI), (V)) #define PHI_ARG_DEF_PTR(PHI, I) get_phi_arg_def_ptr ((PHI), (I)) #define PHI_ARG_DEF(PHI, I) USE_FROM_PTR (PHI_ARG_DEF_PTR ((PHI), (I))) #define SET_PHI_ARG_DEF(PHI, I, V) \ SET_USE (PHI_ARG_DEF_PTR ((PHI), (I)), (V)) #define PHI_ARG_DEF_FROM_EDGE(PHI, E) \ PHI_ARG_DEF ((PHI), \ phi_arg_from_edge ((PHI),(E))) #define PHI_ARG_DEF_PTR_FROM_EDGE(PHI, E) \ PHI_ARG_DEF_PTR ((PHI), \ phi_arg_from_edge ((PHI),(E))) extern void init_ssa_operands (void); extern void fini_ssa_operands (void); extern void verify_start_operands (tree); extern void finalize_ssa_stmt_operands (tree); void add_vuse (tree, tree); extern void get_stmt_operands (tree); extern void remove_vuses (tree); extern void remove_v_may_defs (tree); extern void remove_v_must_defs (tree); extern void copy_virtual_operands (tree, tree); #endif /* GCC_TREE_SSA_OPERANDS_H */ /* Forward declare structures for the garbage collector GTY markers. */ #ifndef GCC_BASIC_BLOCK_H struct edge_def; typedef struct edge_def *edge; struct basic_block_def; typedef struct basic_block_def *basic_block; #endif /*--------------------------------------------------------------------------- Attributes for SSA_NAMEs. NOTE: These structures are stored in struct tree_ssa_name but are only used by the tree optimizers, so it makes better sense to declare them here to avoid recompiling unrelated files when making changes. ---------------------------------------------------------------------------*/ /* Aliasing information for SSA_NAMEs representing pointer variables. */ struct ptr_info_def GTY(()) { /* Nonzero if points-to analysis couldn't determine where this pointer is pointing to. */ unsigned int pt_anything : 1; /* Nonzero if this pointer is the result of a call to malloc. */ unsigned int pt_malloc : 1; /* Nonzero if the value of this pointer escapes the current function. */ unsigned int value_escapes_p : 1; /* Set of variables that this pointer may point to. */ bitmap pt_vars; /* If this pointer has been dereferenced, and points-to information is more precise than type-based aliasing, indirect references to this pointer will be represented by this memory tag, instead of the type tag computed by TBAA. */ tree name_mem_tag; }; /*--------------------------------------------------------------------------- Tree annotations stored in tree_common.ann ---------------------------------------------------------------------------*/ enum tree_ann_type { TREE_ANN_COMMON, VAR_ANN, STMT_ANN }; struct tree_ann_common_d GTY(()) { /* Annotation type. */ enum tree_ann_type type; /* The value handle for this expression. Used by GVN-PRE. */ tree GTY((skip)) value_handle; }; /* It is advantageous to avoid things like life analysis for variables which do not need PHI nodes. This enum describes whether or not a particular variable may need a PHI node. */ enum need_phi_state { /* This is the default. If we are still in this state after finding all the definition and use sites, then we will assume the variable needs PHI nodes. This is probably an overly conservative assumption. */ NEED_PHI_STATE_UNKNOWN, /* This state indicates that we have seen one or more sets of the variable in a single basic block and that the sets dominate all uses seen so far. If after finding all definition and use sites we are still in this state, then the variable does not need any PHI nodes. */ NEED_PHI_STATE_NO, /* This state indicates that we have either seen multiple definitions of the variable in multiple blocks, or that we encountered a use in a block that was not dominated by the block containing the set(s) of this variable. This variable is assumed to need PHI nodes. */ NEED_PHI_STATE_MAYBE }; /* When computing aliasing information, we represent the memory pointed-to by pointers with artificial variables called "memory tags" (MT). There are two kinds of tags: type and name. Type tags (TMT) are used in type-based alias analysis, they represent all the pointed-to locations and variables of the same alias set class. Name tags (NMT) are used in flow-sensitive points-to alias analysis, they represent the variables and memory locations pointed-to by a specific SSA_NAME pointer. */ enum mem_tag_kind { /* This variable is not a memory tag. */ NOT_A_TAG, /* This variable is a type memory tag (TMT). */ TYPE_TAG, /* This variable is a name memory tag (NMT). */ NAME_TAG }; struct var_ann_d GTY(()) { struct tree_ann_common_d common; /* Nonzero if this variable has uses which may not appear in the IL. This can happen in the following cases: 1. If the variable is used in a variable length array declaration. 2. If the variable is the return value in a C++ function where the named return value optimization has been performed. */ unsigned has_hidden_use : 1; /* Used by the out of SSA pass to determine whether this variable has been seen yet or not. */ unsigned out_of_ssa_tag : 1; /* Used when building root_var structures in tree_ssa_live.[ch]. */ unsigned root_var_processed : 1; /* If nonzero, this variable is a memory tag. */ ENUM_BITFIELD (mem_tag_kind) mem_tag_kind : 2; /* Nonzero if this variable is an alias tag that represents references to other variables (i.e., this variable appears in the MAY_ALIASES array of other variables). */ unsigned is_alias_tag : 1; /* Nonzero if this variable was used after SSA optimizations were applied. We set this when translating out of SSA form. */ unsigned used : 1; /* This field indicates whether or not the variable may need PHI nodes. See the enum's definition for more detailed information about the states. */ ENUM_BITFIELD (need_phi_state) need_phi_state : 2; /* An artificial variable representing the memory location pointed-to by all the pointers that TBAA (type-based alias analysis) considers to be aliased. If the variable is not a pointer or if it is never dereferenced, this must be NULL. */ tree type_mem_tag; /* Variables that may alias this variable. */ varray_type may_aliases; /* Unique ID of this variable. */ size_t uid; /* Used when going out of SSA form to indicate which partition this variable represents storage for. */ unsigned partition; /* Used by the root-var object in tree-ssa-live.[ch]. */ unsigned root_index; /* Default definition for this symbol. If this field is not NULL, it means that the first reference to this variable in the function is a USE or a VUSE. In those cases, the SSA renamer creates an SSA name for this variable with an empty defining statement. */ tree default_def; /* During into-ssa and the dominator optimizer, this field holds the current version of this variable (an SSA_NAME). This was previously two varrays (one in into-ssa the other in the dominator optimizer). That is wasteful, particularly since the dominator optimizer calls into-ssa resulting in having two varrays live at the same time and this can happen for each call to the dominator optimizer. */ tree current_def; }; struct dataflow_d GTY(()) { /* Immediate uses. This is a list of all the statements and PHI nodes that are immediately reached by the definitions made in this statement. */ varray_type immediate_uses; /* Use this array for very small numbers of uses instead of the varray. */ tree uses[2]; /* Reached uses. This is a list of all the possible program statements that may be reached directly or indirectly by definitions made in this statement. Notice that this is a superset of IMMEDIATE_USES. For instance, given the following piece of code: 1 a1 = 10; 2 if (a1 > 3) 3 a2 = a1 + 5; 4 a3 = PHI (a1, a2) 5 b1 = a3 - 2; IMMEDIATE_USES for statement #1 are all those statements that use a1 directly (i.e., #2, #3 and #4). REACHED_USES for statement #1 also includes statement #5 because 'a1' could reach 'a3' via the PHI node at statement #4. The set of REACHED_USES is then the transitive closure over all the PHI nodes in the IMMEDIATE_USES set. */ /* Reaching definitions. Similarly to REACHED_USES, the set REACHING_DEFS is the set of all the statements that make definitions that may reach this statement. Notice that we don't need to have a similar entry for immediate definitions, as these are represented by the SSA_NAME nodes themselves (each SSA_NAME node contains a pointer to the statement that makes that definition). */ }; typedef struct dataflow_d *dataflow_t; struct stmt_ann_d GTY(()) { struct tree_ann_common_d common; /* Nonzero if the statement has been modified (meaning that the operands need to be scanned again). */ unsigned modified : 1; /* Nonzero if the statement is in the CCP worklist and has not been "cancelled". If we ever need to use this bit outside CCP, then it should be renamed. */ unsigned in_ccp_worklist: 1; /* Nonzero if the statement makes aliased loads. */ unsigned makes_aliased_loads : 1; /* Nonzero if the statement makes aliased stores. */ unsigned makes_aliased_stores : 1; /* Nonzero if the statement makes references to volatile storage. */ unsigned has_volatile_ops : 1; /* Nonzero if the statement makes a function call that may clobber global and local addressable variables. */ unsigned makes_clobbering_call : 1; /* Basic block that contains this statement. */ basic_block GTY ((skip (""))) bb; /* Statement operands. */ struct def_optype_d * GTY (()) def_ops; struct use_optype_d * GTY (()) use_ops; /* Virtual operands (V_MAY_DEF, VUSE, and V_MUST_DEF). */ struct v_may_def_optype_d * GTY (()) v_may_def_ops; struct vuse_optype_d * GTY (()) vuse_ops; struct v_must_def_optype_d * GTY (()) v_must_def_ops; /* Dataflow information. */ dataflow_t df; /* Set of variables that have had their address taken in the statement. */ bitmap addresses_taken; /* Unique identifier for this statement. These ID's are to be created by each pass on an as-needed basis in any order convenient for the pass which needs statement UIDs. */ unsigned int uid; }; union tree_ann_d GTY((desc ("ann_type ((tree_ann_t)&%h)"))) { struct tree_ann_common_d GTY((tag ("TREE_ANN_COMMON"))) common; struct var_ann_d GTY((tag ("VAR_ANN"))) decl; struct stmt_ann_d GTY((tag ("STMT_ANN"))) stmt; }; typedef union tree_ann_d *tree_ann_t; typedef struct var_ann_d *var_ann_t; typedef struct stmt_ann_d *stmt_ann_t; static inline tree_ann_t tree_ann (tree); static inline tree_ann_t get_tree_ann (tree); static inline var_ann_t var_ann (tree); static inline var_ann_t get_var_ann (tree); static inline stmt_ann_t stmt_ann (tree); static inline stmt_ann_t get_stmt_ann (tree); static inline enum tree_ann_type ann_type (tree_ann_t); static inline basic_block bb_for_stmt (tree); extern void set_bb_for_stmt (tree, basic_block); static inline void modify_stmt (tree); static inline void unmodify_stmt (tree); static inline bool stmt_modified_p (tree); static inline varray_type may_aliases (tree); static inline int get_lineno (tree); static inline const char *get_filename (tree); static inline bool is_exec_stmt (tree); static inline bool is_label_stmt (tree); static inline v_may_def_optype get_v_may_def_ops (stmt_ann_t); static inline vuse_optype get_vuse_ops (stmt_ann_t); static inline use_optype get_use_ops (stmt_ann_t); static inline def_optype get_def_ops (stmt_ann_t); static inline bitmap addresses_taken (tree); static inline int num_immediate_uses (dataflow_t); static inline tree immediate_use (dataflow_t, int); static inline dataflow_t get_immediate_uses (tree); static inline bool has_hidden_use (tree); static inline void set_has_hidden_use (tree); static inline void set_default_def (tree, tree); static inline tree default_def (tree); static inline bool may_be_aliased (tree); /*--------------------------------------------------------------------------- Structure representing predictions in tree level. ---------------------------------------------------------------------------*/ struct edge_prediction GTY((chain_next ("%h.next"))) { struct edge_prediction *next; edge edge; enum br_predictor predictor; int probability; }; /*--------------------------------------------------------------------------- Block annotations stored in basic_block.tree_annotations ---------------------------------------------------------------------------*/ struct bb_ann_d GTY(()) { /* Chain of PHI nodes for this block. */ tree phi_nodes; /* Number of predecessors for this block. This is only valid during SSA rewriting. It is not maintained after conversion into SSA form. */ int num_preds; /* Nonzero if this block is forwardable during cfg cleanups. This is also used to detect loops during cfg cleanups. */ unsigned forwardable: 1; /* Nonzero if this block contains an escape point (see is_escape_site). */ unsigned has_escape_site : 1; struct edge_prediction *predictions; }; typedef struct bb_ann_d *bb_ann_t; /* Accessors for basic block annotations. */ static inline bb_ann_t bb_ann (basic_block); static inline tree phi_nodes (basic_block); static inline void set_phi_nodes (basic_block, tree); /*--------------------------------------------------------------------------- Global declarations ---------------------------------------------------------------------------*/ /* Array of all variables referenced in the function. */ extern GTY(()) varray_type referenced_vars; #define num_referenced_vars VARRAY_ACTIVE_SIZE (referenced_vars) #define referenced_var(i) VARRAY_TREE (referenced_vars, i) /* Array of all SSA_NAMEs used in the function. */ extern GTY(()) varray_type ssa_names; #define num_ssa_names VARRAY_ACTIVE_SIZE (ssa_names) #define ssa_name(i) VARRAY_TREE (ssa_names, i) /* Artificial variable used to model the effects of function calls. */ extern GTY(()) tree global_var; /* Call clobbered variables in the function. If bit I is set, then REFERENCED_VARS (I) is call-clobbered. */ extern bitmap call_clobbered_vars; /* 'true' after aliases have been computed (see compute_may_aliases). */ extern bool aliases_computed_p; /* Macros for showing usage statistics. */ #define SCALE(x) ((unsigned long) ((x) < 1024*10 \ ? (x) \ : ((x) < 1024*1024*10 \ ? (x) / 1024 \ : (x) / (1024*1024)))) #define LABEL(x) ((x) < 1024*10 ? 'b' : ((x) < 1024*1024*10 ? 'k' : 'M')) #define PERCENT(x,y) ((float)(x) * 100.0 / (float)(y)) /*--------------------------------------------------------------------------- Block iterators ---------------------------------------------------------------------------*/ typedef struct { tree_stmt_iterator tsi; basic_block bb; } block_stmt_iterator; static inline block_stmt_iterator bsi_start (basic_block); static inline block_stmt_iterator bsi_last (basic_block); static inline block_stmt_iterator bsi_after_labels (basic_block); static inline bool bsi_end_p (block_stmt_iterator); static inline void bsi_next (block_stmt_iterator *); static inline void bsi_prev (block_stmt_iterator *); static inline tree bsi_stmt (block_stmt_iterator); static inline tree * bsi_stmt_ptr (block_stmt_iterator); extern void bsi_remove (block_stmt_iterator *); extern void bsi_move_before (block_stmt_iterator *, block_stmt_iterator *); extern void bsi_move_after (block_stmt_iterator *, block_stmt_iterator *); extern void bsi_move_to_bb_end (block_stmt_iterator *, basic_block); enum bsi_iterator_update { /* Note that these are intentionally in the same order as TSI_FOO. They mean exactly the same as their TSI_* counterparts. */ BSI_NEW_STMT, BSI_SAME_STMT, BSI_CHAIN_START, BSI_CHAIN_END, BSI_CONTINUE_LINKING }; extern void bsi_insert_before (block_stmt_iterator *, tree, enum bsi_iterator_update); extern void bsi_insert_after (block_stmt_iterator *, tree, enum bsi_iterator_update); extern void bsi_replace (const block_stmt_iterator *, tree, bool); /*--------------------------------------------------------------------------- Function prototypes ---------------------------------------------------------------------------*/ /* In tree-cfg.c */ /* Location to track pending stmt for edge insertion. */ #define PENDING_STMT(e) ((e)->insns.t) extern void delete_tree_cfg_annotations (void); extern void disband_implicit_edges (void); extern bool stmt_ends_bb_p (tree); extern bool is_ctrl_stmt (tree); extern bool is_ctrl_altering_stmt (tree); extern bool computed_goto_p (tree); extern bool simple_goto_p (tree); extern void tree_dump_bb (basic_block, FILE *, int); extern void debug_tree_bb (basic_block); extern basic_block debug_tree_bb_n (int); extern void dump_tree_cfg (FILE *, int); extern void debug_tree_cfg (int); extern void dump_cfg_stats (FILE *); extern void debug_cfg_stats (void); extern void debug_loop_ir (void); extern void print_loop_ir (FILE *); extern void cleanup_tree_cfg (void); extern tree first_stmt (basic_block); extern tree last_stmt (basic_block); extern tree *last_stmt_ptr (basic_block); extern tree last_and_only_stmt (basic_block); extern edge find_taken_edge (basic_block, tree); extern void cfg_remove_useless_stmts (void); extern edge thread_edge (edge, basic_block); extern basic_block label_to_block (tree); extern void tree_optimize_tail_calls (bool, enum tree_dump_index); extern edge tree_block_forwards_to (basic_block bb); extern void bsi_insert_on_edge (edge, tree); extern void bsi_commit_edge_inserts (int *); extern void notice_special_calls (tree); extern void clear_special_calls (void); extern void compute_dominance_frontiers (bitmap *); extern void verify_stmts (void); extern tree tree_block_label (basic_block bb); extern void extract_true_false_edges_from_block (basic_block, edge *, edge *); extern bool tree_purge_dead_eh_edges (basic_block); extern bool tree_purge_all_dead_eh_edges (bitmap); /* In tree-pretty-print.c. */ extern void dump_generic_bb (FILE *, basic_block, int, int); /* In tree-dfa.c */ extern var_ann_t create_var_ann (tree); extern stmt_ann_t create_stmt_ann (tree); extern tree_ann_t create_tree_ann (tree); extern tree create_phi_node (tree, basic_block); extern void add_phi_arg (tree *, tree, edge); extern void remove_phi_arg (tree, basic_block); extern void remove_phi_arg_num (tree, int); extern void remove_phi_node (tree, tree, basic_block); extern void remove_all_phi_nodes_for (bitmap); extern void dump_dfa_stats (FILE *); extern void debug_dfa_stats (void); extern void debug_referenced_vars (void); extern void dump_referenced_vars (FILE *); extern void dump_variable (FILE *, tree); extern void debug_variable (tree); extern void dump_immediate_uses (FILE *); extern void debug_immediate_uses (void); extern void dump_immediate_uses_for (FILE *, tree); extern void debug_immediate_uses_for (tree); extern void compute_immediate_uses (int, bool (*)(tree)); extern void free_df (void); extern tree get_virtual_var (tree); extern void add_referenced_tmp_var (tree var); extern void mark_new_vars_to_rename (tree, bitmap); extern void redirect_immediate_uses (tree, tree); extern tree make_rename_temp (tree, const char *); /* Flags used when computing reaching definitions and reached uses. */ #define TDFA_USE_OPS 1 << 0 #define TDFA_USE_VOPS 1 << 1 /* In gimple-low.c */ struct lower_data; extern void lower_stmt_body (tree, struct lower_data *); extern void expand_used_vars (void); extern void record_vars (tree); extern bool block_may_fallthru (tree block); /* In tree-ssa-alias.c */ extern void dump_may_aliases_for (FILE *, tree); extern void debug_may_aliases_for (tree); extern void dump_alias_info (FILE *); extern void debug_alias_info (void); extern void dump_points_to_info (FILE *); extern void debug_points_to_info (void); /* Call-back function for walk_use_def_chains(). At each reaching definition, a function with this prototype is called. */ typedef bool (*walk_use_def_chains_fn) (tree, tree, void *); /* In tree-ssa.c */ extern void init_tree_ssa (void); extern void rewrite_vars_out_of_ssa (bitmap); extern void dump_reaching_defs (FILE *); extern void debug_reaching_defs (void); extern void dump_tree_ssa (FILE *); extern void debug_tree_ssa (void); extern void debug_def_blocks (void); extern void dump_tree_ssa_stats (FILE *); extern void debug_tree_ssa_stats (void); extern void ssa_remove_edge (edge); extern edge ssa_redirect_edge (edge, basic_block); extern void set_is_used (tree); extern bool tree_ssa_useless_type_conversion (tree); extern bool tree_ssa_useless_type_conversion_1 (tree, tree); extern void verify_ssa (void); extern void delete_tree_ssa (void); extern void register_new_def (tree, varray_type *); extern void walk_use_def_chains (tree, walk_use_def_chains_fn, void *); extern void kill_redundant_phi_nodes (void); /* In tree-into-ssa.c */ extern void rewrite_into_ssa (bool); extern void rewrite_ssa_into_ssa (bitmap); void compute_global_livein (bitmap, bitmap); tree duplicate_ssa_name (tree, tree); /* In tree-ssa-ccp.c */ bool fold_stmt (tree *); tree widen_bitfield (tree, tree, tree); /* In tree-ssa-dom.c */ extern void dump_dominator_optimization_stats (FILE *); extern void debug_dominator_optimization_stats (void); /* In tree-ssa-copy.c */ extern void propagate_value (use_operand_p, tree); extern void propagate_tree_value (tree *, tree); extern void replace_exp (use_operand_p, tree); /* In tree-flow-inline.h */ static inline int phi_arg_from_edge (tree, edge); static inline bool may_propagate_copy (tree, tree); static inline bool is_call_clobbered (tree); static inline void mark_call_clobbered (tree); /* In tree-eh.c */ extern void make_eh_edges (tree); extern bool tree_could_trap_p (tree); extern bool tree_could_throw_p (tree); extern bool tree_can_throw_internal (tree); extern bool tree_can_throw_external (tree); extern int lookup_stmt_eh_region (tree); extern void add_stmt_to_eh_region (tree, int); extern bool remove_stmt_from_eh_region (tree); extern bool maybe_clean_eh_stmt (tree); /* In tree-ssa-pre.c */ void add_to_value (tree, tree); void debug_value_expressions (tree); void print_value_expressions (FILE *, tree); /* In tree-vn.c */ bool expressions_equal_p (tree, tree); tree get_value_handle (tree); hashval_t vn_compute (tree, hashval_t, vuse_optype); tree vn_lookup_or_add (tree, vuse_optype); void vn_add (tree, tree, vuse_optype); tree vn_lookup (tree, vuse_optype); void vn_init (void); void vn_delete (void); /* In tree-sra.c */ void insert_edge_copies (tree stmt, basic_block bb); /* Inline functions for tree-flow.h Copyright (C) 2001, 2003 Free Software Foundation, Inc. Contributed by Diego Novillo This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef _TREE_FLOW_INLINE_H #define _TREE_FLOW_INLINE_H 1 /* Inline functions for manipulating various data structures defined in tree-flow.h. See tree-flow.h for documentation. */ /* Return the variable annotation for T, which must be a _DECL node. Return NULL if the variable annotation doesn't already exist. */ static inline var_ann_t var_ann (tree t) { #if defined ENABLE_CHECKING if (t == NULL_TREE || !DECL_P (t) || (t->common.ann && t->common.ann->common.type != VAR_ANN)) abort (); #endif return (var_ann_t) t->common.ann; } /* Return the variable annotation for T, which must be a _DECL node. Create the variable annotation if it doesn't exist. */ static inline var_ann_t get_var_ann (tree var) { var_ann_t ann = var_ann (var); return (ann) ? ann : create_var_ann (var); } /* Return the statement annotation for T, which must be a statement node. Return NULL if the statement annotation doesn't exist. */ static inline stmt_ann_t stmt_ann (tree t) { #if defined ENABLE_CHECKING if (!is_gimple_stmt (t)) abort (); #endif return (stmt_ann_t) t->common.ann; } /* Return the statement annotation for T, which must be a statement node. Create the statement annotation if it doesn't exist. */ static inline stmt_ann_t get_stmt_ann (tree stmt) { stmt_ann_t ann = stmt_ann (stmt); return (ann) ? ann : create_stmt_ann (stmt); } /* Return the annotation type for annotation ANN. */ static inline enum tree_ann_type ann_type (tree_ann_t ann) { return ann->common.type; } /* Return the basic block for statement T. */ static inline basic_block bb_for_stmt (tree t) { stmt_ann_t ann = stmt_ann (t); return ann ? ann->bb : NULL; } /* Return the may_aliases varray for variable VAR, or NULL if it has no may aliases. */ static inline varray_type may_aliases (tree var) { var_ann_t ann = var_ann (var); return ann ? ann->may_aliases : NULL; } /* Return true if VAR has a hidden use, false if it does not. */ static inline bool has_hidden_use (tree var) { var_ann_t ann = var_ann (var); return ann ? ann->has_hidden_use : false; } /* Set the hidden use flag on VAR. */ static inline void set_has_hidden_use (tree var) { var_ann_t ann = var_ann (var); if (ann == NULL) ann = create_var_ann (var); ann->has_hidden_use = 1; } /* Return the line number for EXPR, or return -1 if we have no line number information for it. */ static inline int get_lineno (tree expr) { if (expr == NULL_TREE) return -1; if (TREE_CODE (expr) == COMPOUND_EXPR) expr = TREE_OPERAND (expr, 0); if (! EXPR_HAS_LOCATION (expr)) return -1; return EXPR_LINENO (expr); } /* Return the file name for EXPR, or return "???" if we have no filename information. */ static inline const char * get_filename (tree expr) { const char *filename; if (expr == NULL_TREE) return "???"; if (TREE_CODE (expr) == COMPOUND_EXPR) expr = TREE_OPERAND (expr, 0); if (EXPR_HAS_LOCATION (expr) && (filename = EXPR_FILENAME (expr))) return filename; else return "???"; } /* Mark statement T as modified. */ static inline void modify_stmt (tree t) { stmt_ann_t ann = stmt_ann (t); if (ann == NULL) ann = create_stmt_ann (t); ann->modified = 1; } /* Mark statement T as unmodified. */ static inline void unmodify_stmt (tree t) { stmt_ann_t ann = stmt_ann (t); if (ann == NULL) ann = create_stmt_ann (t); ann->modified = 0; } /* Return true if T is marked as modified, false otherwise. */ static inline bool stmt_modified_p (tree t) { stmt_ann_t ann = stmt_ann (t); /* Note that if the statement doesn't yet have an annotation, we consider it modified. This will force the next call to get_stmt_operands to scan the statement. */ return ann ? ann->modified : true; } /* Return the definitions present in ANN, a statement annotation. Return NULL if this annotation contains no definitions. */ static inline def_optype get_def_ops (stmt_ann_t ann) { return ann ? ann->def_ops : NULL; } /* Return the uses present in ANN, a statement annotation. Return NULL if this annotation contains no uses. */ static inline use_optype get_use_ops (stmt_ann_t ann) { return ann ? ann->use_ops : NULL; } /* Return the virtual may-defs present in ANN, a statement annotation. Return NULL if this annotation contains no virtual may-defs. */ static inline v_may_def_optype get_v_may_def_ops (stmt_ann_t ann) { return ann ? ann->v_may_def_ops : NULL; } /* Return the virtual uses present in ANN, a statement annotation. Return NULL if this annotation contains no virtual uses. */ static inline vuse_optype get_vuse_ops (stmt_ann_t ann) { return ann ? ann->vuse_ops : NULL; } /* Return the virtual must-defs present in ANN, a statement annotation. Return NULL if this annotation contains no must-defs.*/ static inline v_must_def_optype get_v_must_def_ops (stmt_ann_t ann) { return ann ? ann->v_must_def_ops : NULL; } /* Return the tree pointer to by USE. */ static inline tree get_use_from_ptr (use_operand_p use) { return *(use.use); } /* Return the tree pointer to by DEF. */ static inline tree get_def_from_ptr (def_operand_p def) { return *(def.def); } /* Return a pointer to the tree that is at INDEX in the USES array. */ static inline use_operand_p get_use_op_ptr (use_optype uses, unsigned int index) { #ifdef ENABLE_CHECKING if (index >= uses->num_uses) abort(); #endif return uses->uses[index]; } /* Return a def_operand_p pointer for element INDEX of DEFS. */ static inline def_operand_p get_def_op_ptr (def_optype defs, unsigned int index) { #ifdef ENABLE_CHECKING if (index >= defs->num_defs) abort(); #endif return defs->defs[index]; } /* Return the def_operand_p that is the V_MAY_DEF_RESULT for the V_MAY_DEF at INDEX in the V_MAY_DEFS array. */ static inline def_operand_p get_v_may_def_result_ptr(v_may_def_optype v_may_defs, unsigned int index) { def_operand_p op; #ifdef ENABLE_CHECKING if (index >= v_may_defs->num_v_may_defs) abort(); #endif op.def = &(v_may_defs->v_may_defs[index * 2]); return op; } /* Return a use_operand_p that is the V_MAY_DEF_OP for the V_MAY_DEF at INDEX in the V_MAY_DEFS array. */ static inline use_operand_p get_v_may_def_op_ptr(v_may_def_optype v_may_defs, unsigned int index) { use_operand_p op; #ifdef ENABLE_CHECKING if (index >= v_may_defs->num_v_may_defs) abort(); #endif op.use = &(v_may_defs->v_may_defs[index * 2 + 1]); return op; } /* Return a use_operand_p that is at INDEX in the VUSES array. */ static inline use_operand_p get_vuse_op_ptr(vuse_optype vuses, unsigned int index) { use_operand_p op; #ifdef ENABLE_CHECKING if (index >= vuses->num_vuses) abort(); #endif op.use = &(vuses->vuses[index]); return op; } /* Return a def_operand_p that is the V_MUST_DEF_OP for the V_MUST_DEF at INDEX in the V_MUST_DEFS array. */ static inline def_operand_p get_v_must_def_op_ptr (v_must_def_optype v_must_defs, unsigned int index) { def_operand_p op; #ifdef ENABLE_CHECKING if (index >= v_must_defs->num_v_must_defs) abort(); #endif op.def = &(v_must_defs->v_must_defs[index]); return op; } /* Return a def_operand_p pointer for the result of PHI. */ static inline def_operand_p get_phi_result_ptr (tree phi) { def_operand_p op; op.def = &(PHI_RESULT_TREE (phi)); return op; } /* Return a use_operand_p pointer for argument I of phinode PHI. */ static inline use_operand_p get_phi_arg_def_ptr (tree phi, int i) { use_operand_p op; op.use = &(PHI_ARG_DEF_TREE (phi, i)); return op; } /* Mark the beginning of changes to the SSA operands for STMT. */ static inline void start_ssa_stmt_operands (tree stmt ATTRIBUTE_UNUSED) { #ifdef ENABLE_CHECKING verify_start_operands (stmt); #endif } /* Return the bitmap of addresses taken by STMT, or NULL if it takes no addresses. */ static inline bitmap addresses_taken (tree stmt) { stmt_ann_t ann = stmt_ann (stmt); return ann ? ann->addresses_taken : NULL; } /* Return the immediate uses of STMT, or NULL if this information is not computed. */ static dataflow_t get_immediate_uses (tree stmt) { stmt_ann_t ann = stmt_ann (stmt); return ann ? ann->df : NULL; } /* Return the number of immediate uses present in the dataflow information at DF. */ static inline int num_immediate_uses (dataflow_t df) { varray_type imm; if (!df) return 0; imm = df->immediate_uses; if (!imm) return df->uses[1] ? 2 : 1; return VARRAY_ACTIVE_SIZE (imm) + 2; } /* Return the tree that is at NUM in the immediate use DF array. */ static inline tree immediate_use (dataflow_t df, int num) { if (!df) return NULL_TREE; #ifdef ENABLE_CHECKING if (num >= num_immediate_uses (df)) abort (); #endif if (num < 2) return df->uses[num]; return VARRAY_TREE (df->immediate_uses, num - 2); } /* Return the basic_block annotation for BB. */ static inline bb_ann_t bb_ann (basic_block bb) { return (bb_ann_t)bb->tree_annotations; } /* Return the PHI nodes for basic block BB, or NULL if there are no PHI nodes. */ static inline tree phi_nodes (basic_block bb) { if (bb->index < 0) return NULL; return bb_ann (bb)->phi_nodes; } /* Set list of phi nodes of a basic block BB to L. */ static inline void set_phi_nodes (basic_block bb, tree l) { tree phi; bb_ann (bb)->phi_nodes = l; for (phi = l; phi; phi = PHI_CHAIN (phi)) set_bb_for_stmt (phi, bb); } /* Return the phi index number for an edge. */ static inline int phi_arg_from_edge (tree phi, edge e) { int i; #if defined ENABLE_CHECKING if (!phi || TREE_CODE (phi) != PHI_NODE) abort(); #endif for (i = 0; i < PHI_NUM_ARGS (phi); i++) if (PHI_ARG_EDGE (phi, i) == e) return i; return -1; } /* ----------------------------------------------------------------------- */ /* Return true if T is an executable statement. */ static inline bool is_exec_stmt (tree t) { return (t && !IS_EMPTY_STMT (t) && t != error_mark_node); } /* Return true if this stmt can be the target of a control transfer stmt such as a goto. */ static inline bool is_label_stmt (tree t) { if (t) switch (TREE_CODE (t)) { case LABEL_DECL: case LABEL_EXPR: case CASE_LABEL_EXPR: return true; default: return false; } return false; } /* Return true if we may propagate ORIG into DEST, false otherwise. */ static inline bool may_propagate_copy (tree dest, tree orig) { /* FIXME. GIMPLE is allowing pointer assignments and comparisons of pointers that have different alias sets. This means that these pointers will have different memory tags associated to them. If we allow copy propagation in these cases, statements de-referencing the new pointer will now have a reference to a different memory tag with potentially incorrect SSA information. This was showing up in libjava/java/util/zip/ZipFile.java with code like: struct java.io.BufferedInputStream *T.660; struct java.io.BufferedInputStream *T.647; struct java.io.InputStream *is; struct java.io.InputStream *is.662; [ ... ] T.660 = T.647; is = T.660; <-- This ought to be type-casted is.662 = is; Also, f/name.c exposed a similar problem with a COND_EXPR predicate that was causing DOM to generate and equivalence with two pointers of alias-incompatible types: struct _ffename_space *n; struct _ffename *ns; [ ... ] if (n == ns) goto lab; ... lab: return n; I think that GIMPLE should emit the appropriate type-casts. For the time being, blocking copy-propagation in these cases is the safe thing to do. */ if (TREE_CODE (dest) == SSA_NAME && TREE_CODE (orig) == SSA_NAME && POINTER_TYPE_P (TREE_TYPE (dest)) && POINTER_TYPE_P (TREE_TYPE (orig))) { tree mt_dest = var_ann (SSA_NAME_VAR (dest))->type_mem_tag; tree mt_orig = var_ann (SSA_NAME_VAR (orig))->type_mem_tag; if (mt_dest && mt_orig && mt_dest != mt_orig) return false; } /* If the destination is a SSA_NAME for a virtual operand, then we have some special cases to handle. */ if (TREE_CODE (dest) == SSA_NAME && !is_gimple_reg (dest)) { /* If both operands are SSA_NAMEs referring to virtual operands, then we can always propagate. */ if (TREE_CODE (orig) == SSA_NAME) { if (!is_gimple_reg (orig)) return true; #ifdef ENABLE_CHECKING /* If we have one real and one virtual operand, then something has gone terribly wrong. */ if (is_gimple_reg (orig)) abort (); #endif } /* We have a "copy" from something like a constant into a virtual operand. Reject these. */ return false; } /* If ORIG flows in from an abnormal edge, it cannot be propagated. */ if (TREE_CODE (orig) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig)) return false; /* If DEST is an SSA_NAME that flows from an abnormal edge or if it represents a hard register, then it cannot be replaced. */ if (TREE_CODE (dest) == SSA_NAME && (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (dest) || DECL_HARD_REGISTER (SSA_NAME_VAR (dest)))) return false; /* Anything else is OK. */ return true; } /* Set the default definition for VAR to DEF. */ static inline void set_default_def (tree var, tree def) { var_ann_t ann = var_ann (var); if (ann == NULL) ann = create_var_ann (var); ann->default_def = def; } /* Return the default definition for variable VAR, or NULL if none exists. */ static inline tree default_def (tree var) { var_ann_t ann = var_ann (var); return ann ? ann->default_def : NULL_TREE; } /* PHI nodes should contain only ssa_names and invariants. A test for ssa_name is definitely simpler; don't let invalid contents slip in in the meantime. */ static inline bool phi_ssa_name_p (tree t) { if (TREE_CODE (t) == SSA_NAME) return true; #ifdef ENABLE_CHECKING if (!is_gimple_min_invariant (t)) abort (); #endif return false; } /* ----------------------------------------------------------------------- */ /* Return a block_stmt_iterator that points to beginning of basic block BB. */ static inline block_stmt_iterator bsi_start (basic_block bb) { block_stmt_iterator bsi; if (bb->stmt_list) bsi.tsi = tsi_start (bb->stmt_list); else { #ifdef ENABLE_CHECKING if (bb->index >= 0) abort (); #endif bsi.tsi.ptr = NULL; bsi.tsi.container = NULL; } bsi.bb = bb; return bsi; } /* Return a block statement iterator that points to the last label in block BB. */ static inline block_stmt_iterator bsi_after_labels (basic_block bb) { block_stmt_iterator bsi; tree_stmt_iterator next; bsi.bb = bb; if (!bb->stmt_list) { #ifdef ENABLE_CHECKING if (bb->index >= 0) abort (); #endif bsi.tsi.ptr = NULL; bsi.tsi.container = NULL; return bsi; } bsi.tsi = tsi_start (bb->stmt_list); if (tsi_end_p (bsi.tsi)) return bsi; /* Ensure that there are some labels. The rationale is that we want to insert after the bsi that is returned, and these insertions should be placed at the start of the basic block. This would not work if the first statement was not label; rather fail here than enable the user proceed in wrong way. */ if (TREE_CODE (tsi_stmt (bsi.tsi)) != LABEL_EXPR) abort (); next = bsi.tsi; tsi_next (&next); while (!tsi_end_p (next) && TREE_CODE (tsi_stmt (next)) == LABEL_EXPR) { bsi.tsi = next; tsi_next (&next); } return bsi; } /* Return a block statement iterator that points to the end of basic block BB. */ static inline block_stmt_iterator bsi_last (basic_block bb) { block_stmt_iterator bsi; if (bb->stmt_list) bsi.tsi = tsi_last (bb->stmt_list); else { #ifdef ENABLE_CHECKING if (bb->index >= 0) abort (); #endif bsi.tsi.ptr = NULL; bsi.tsi.container = NULL; } bsi.bb = bb; return bsi; } /* Return true if block statement iterator I has reached the end of the basic block. */ static inline bool bsi_end_p (block_stmt_iterator i) { return tsi_end_p (i.tsi); } /* Modify block statement iterator I so that it is at the next statement in the basic block. */ static inline void bsi_next (block_stmt_iterator *i) { tsi_next (&i->tsi); } /* Modify block statement iterator I so that it is at the previous statement in the basic block. */ static inline void bsi_prev (block_stmt_iterator *i) { tsi_prev (&i->tsi); } /* Return the statement that block statement iterator I is currently at. */ static inline tree bsi_stmt (block_stmt_iterator i) { return tsi_stmt (i.tsi); } /* Return a pointer to the statement that block statement iterator I is currently at. */ static inline tree * bsi_stmt_ptr (block_stmt_iterator i) { return tsi_stmt_ptr (i.tsi); } /* Return true if VAR may be aliased. */ static inline bool may_be_aliased (tree var) { return (TREE_ADDRESSABLE (var) || decl_function_context (var) != current_function_decl); } /* Return true if VAR is a clobbered by function calls. */ static inline bool is_call_clobbered (tree var) { return needs_to_live_in_memory (var) || bitmap_bit_p (call_clobbered_vars, var_ann (var)->uid); } /* Mark variable VAR as being clobbered by function calls. */ static inline void mark_call_clobbered (tree var) { var_ann_t ann = var_ann (var); /* Call-clobbered variables need to live in memory. */ DECL_NEEDS_TO_LIVE_IN_MEMORY_INTERNAL (var) = 1; bitmap_set_bit (call_clobbered_vars, ann->uid); } /* Mark variable VAR as being non-addressable. */ static inline void mark_non_addressable (tree var) { bitmap_clear_bit (call_clobbered_vars, var_ann (var)->uid); DECL_NEEDS_TO_LIVE_IN_MEMORY_INTERNAL (var) = 0; TREE_ADDRESSABLE (var) = 0; } /* Return the common annotation for T. Return NULL if the annotation doesn't already exist. */ static inline tree_ann_t tree_ann (tree t) { return t->common.ann; } /* Return a common annotation for T. Create the constant annotation if it doesn't exist. */ static inline tree_ann_t get_tree_ann (tree t) { tree_ann_t ann = tree_ann (t); return (ann) ? ann : create_tree_ann (t); } #endif /* _TREE_FLOW_INLINE_H */ #endif /* _TREE_FLOW_H */ /* Mudflap: narrow-pointer bounds-checking by tree rewriting. Copyright (C) 2001, 2002, 2003 Free Software Foundation, Inc. Contributed by Frank Ch. Eigler This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef TREE_MUDFLAP_H #define TREE_MUDFLAP_H /* Instrumentation. */ extern void mudflap_init (void); extern void mudflap_c_function_decls (tree); extern void mudflap_c_function_ops (tree); extern void mudflap_enqueue_decl (tree); extern void mudflap_enqueue_constant (tree); extern void mudflap_finish_file (void); /* Tree node marking. */ extern int mf_marked_p (tree); extern tree mf_mark (tree); #endif /* TREE_MUDFLAP_H */ /* Tree-dumping functionality for intermediate representation. Copyright (C) 1999, 2000, 2003 Free Software Foundation, Inc. Written by Mark Mitchell This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_TREE_DUMP_H #define GCC_TREE_DUMP_H /* Flags used with queue functions. */ #define DUMP_NONE 0 #define DUMP_BINFO 1 /* Information about a node to be dumped. */ typedef struct dump_node_info { /* The index for the node. */ unsigned int index; /* Nonzero if the node is a binfo. */ unsigned int binfo_p : 1; } *dump_node_info_p; /* A dump_queue is a link in the queue of things to be dumped. */ typedef struct dump_queue { /* The queued tree node. */ splay_tree_node node; /* The next node in the queue. */ struct dump_queue *next; } *dump_queue_p; /* A dump_info gives information about how we should perform the dump and about the current state of the dump. */ struct dump_info { /* The stream on which to dump the information. */ FILE *stream; /* The original node. */ tree node; /* User flags. */ int flags; /* The next unused node index. */ unsigned int index; /* The next column. */ unsigned int column; /* The first node in the queue of nodes to be written out. */ dump_queue_p queue; /* The last node in the queue. */ dump_queue_p queue_end; /* Free queue nodes. */ dump_queue_p free_list; /* The tree nodes which we have already written out. The keys are the addresses of the nodes; the values are the integer indices we assigned them. */ splay_tree nodes; }; /* Dump the CHILD and its children. */ #define dump_child(field, child) \ queue_and_dump_index (di, field, child, DUMP_NONE) extern void dump_pointer (dump_info_p, const char *, void *); extern void dump_int (dump_info_p, const char *, int); extern void dump_string (dump_info_p, const char *); extern void dump_stmt (dump_info_p, tree); extern void queue_and_dump_index (dump_info_p, const char *, tree, int); extern void queue_and_dump_type (dump_info_p, tree); extern void dump_function (enum tree_dump_index, tree); extern void dump_function_to_file (tree, FILE *, int); extern unsigned int dump_register (const char *, const char *); #endif /* ! GCC_TREE_DUMP_H */ /* Definitions for describing one tree-ssa optimization pass. Copyright (C) 2004 Free Software Foundation, Inc. Contributed by Richard Henderson This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_TREE_PASS_H #define GCC_TREE_PASS_H 1 /* Global variables used to communicate with passes. */ extern FILE *dump_file; extern int dump_flags; extern struct bitmap_head_def *vars_to_rename; /* Describe one pass. */ struct tree_opt_pass { /* Terse name of the pass used as a fragment of the dump file name. */ const char *name; /* If non-null, this pass and all sub-passes are executed only if the function returns true. */ bool (*gate) (void); /* This is the code to run. If null, then there should be sub-passes otherwise this pass does nothing. */ void (*execute) (void); /* A list of sub-passes to run, dependent on gate predicate. */ struct tree_opt_pass *sub; /* Next in the list of passes to run, independent of gate predicate. */ struct tree_opt_pass *next; /* Static pass number, used as a fragment of the dump file name. */ unsigned int static_pass_number; /* The timevar id associated with this pass. */ /* ??? Ideally would be dynamically assigned. */ unsigned int tv_id; /* Sets of properties input and output from this pass. */ unsigned int properties_required; unsigned int properties_provided; unsigned int properties_destroyed; /* Flags indicating common sets things to do before and after. */ unsigned int todo_flags_start; unsigned int todo_flags_finish; }; /* Pass properties. */ #define PROP_gimple_any (1 << 0) /* entire gimple grammar */ #define PROP_gimple_lcf (1 << 1) /* lowered control flow */ #define PROP_gimple_leh (1 << 2) /* lowered eh */ #define PROP_cfg (1 << 3) #define PROP_referenced_vars (1 << 4) #define PROP_pta (1 << 5) #define PROP_ssa (1 << 6) #define PROP_no_crit_edges (1 << 7) #define PROP_rtl (1 << 8) #define PROP_trees \ (PROP_gimple_any | PROP_gimple_lcf | PROP_gimple_leh) /* To-do flags. */ #define TODO_dump_func (1 << 0) /* pass doesn't dump itself */ #define TODO_rename_vars (1 << 1) /* rewrite new vars to ssa */ #define TODO_ggc_collect (1 << 2) /* run the collector */ #define TODO_verify_ssa (1 << 3) #define TODO_verify_flow (1 << 4) #define TODO_verify_stmts (1 << 5) #define TODO_verify_all \ (TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts) extern struct tree_opt_pass pass_mudflap_1; extern struct tree_opt_pass pass_mudflap_2; extern struct tree_opt_pass pass_remove_useless_stmts; extern struct tree_opt_pass pass_lower_cf; extern struct tree_opt_pass pass_lower_eh; extern struct tree_opt_pass pass_build_cfg; extern struct tree_opt_pass pass_tree_profile; extern struct tree_opt_pass pass_referenced_vars; extern struct tree_opt_pass pass_build_pta; extern struct tree_opt_pass pass_del_pta; extern struct tree_opt_pass pass_sra; extern struct tree_opt_pass pass_tail_recursion; extern struct tree_opt_pass pass_tail_calls; extern struct tree_opt_pass pass_loop; extern struct tree_opt_pass pass_loop_init; extern struct tree_opt_pass pass_loop_done; extern struct tree_opt_pass pass_ch; extern struct tree_opt_pass pass_ccp; extern struct tree_opt_pass pass_build_ssa; extern struct tree_opt_pass pass_del_ssa; extern struct tree_opt_pass pass_dominator; extern struct tree_opt_pass pass_dce; extern struct tree_opt_pass pass_cd_dce; extern struct tree_opt_pass pass_may_alias; extern struct tree_opt_pass pass_split_crit_edges; extern struct tree_opt_pass pass_pre; extern struct tree_opt_pass pass_profile; extern struct tree_opt_pass pass_lower_complex; extern struct tree_opt_pass pass_fold_builtins; extern struct tree_opt_pass pass_early_warn_uninitialized; extern struct tree_opt_pass pass_late_warn_uninitialized; extern struct tree_opt_pass pass_warn_function_return; extern struct tree_opt_pass pass_phiopt; extern struct tree_opt_pass pass_forwprop; extern struct tree_opt_pass pass_redundant_phi; extern struct tree_opt_pass pass_dse; extern struct tree_opt_pass pass_nrv; extern struct tree_opt_pass pass_remove_useless_vars; extern struct tree_opt_pass pass_rename_ssa_copies; extern struct tree_opt_pass pass_expand; extern struct tree_opt_pass pass_rest_of_compilation; extern struct tree_opt_pass pass_fre; #endif /* GCC_TREE_PASS_H */ /* Various declarations for language-independent diagnostics subroutines. Copyright (C) 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Gabriel Dos Reis This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_DIAGNOSTIC_H #define GCC_DIAGNOSTIC_H /* Various declarations for language-independent pretty-print subroutines. Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Gabriel Dos Reis This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_PRETTY_PRINT_H #define GCC_PRETTY_PRINT_H /* The type of a text to be formatted according a format specification along with a list of things. */ typedef struct { const char *format_spec; va_list *args_ptr; int err_no; /* for %m */ } text_info; /* How often diagnostics are prefixed by their locations: o DIAGNOSTICS_SHOW_PREFIX_NEVER: never - not yet supported; o DIAGNOSTICS_SHOW_PREFIX_ONCE: emit only once; o DIAGNOSTICS_SHOW_PREFIX_EVERY_LINE: emit each time a physical line is started. */ typedef enum { DIAGNOSTICS_SHOW_PREFIX_ONCE = 0x0, DIAGNOSTICS_SHOW_PREFIX_NEVER = 0x1, DIAGNOSTICS_SHOW_PREFIX_EVERY_LINE = 0x2 } diagnostic_prefixing_rule_t; /* The output buffer datatype. This is best seen as an abstract datatype whose fields should not be accessed directly by clients. */ typedef struct { /* The obstack where the text is built up. */ struct obstack obstack; /* Where to output formatted text. */ FILE *stream; /* The amount of characters output so far. */ int line_length; /* This must be large enough to hold any printed integer or floating-point value. */ char digit_buffer[128]; } output_buffer; /* The type of pretty-printer flags passed to clients. */ typedef unsigned int pp_flags; typedef enum { pp_none, pp_before, pp_after } pp_padding; /* The type of a hook that formats client-specific data onto a pretty_pinter. A client-supplied formatter returns true if everything goes well, otherwise it returns false. */ typedef struct pretty_print_info pretty_printer; typedef bool (*printer_fn) (pretty_printer *, text_info *); /* Client supplied function used to decode formats. */ #define pp_format_decoder(PP) pp_base (PP)->format_decoder /* TRUE if a newline character needs to be added before further formatting. */ #define pp_needs_newline(PP) pp_base (PP)->need_newline /* Maximum characters per line in automatic line wrapping mode. Zero means don't wrap lines. */ #define pp_line_cutoff(PP) pp_base (PP)->ideal_maximum_length /* True if PRETTY-PTINTER is in line-wrapping mode. */ #define pp_is_wrapping_line(PP) (pp_line_cutoff (PP) > 0) /* Prefixing rule used in formatting a diagnostic message. */ #define pp_prefixing_rule(PP) pp_base (PP)->prefixing_rule /* The amount of whitespace to be emitted when starting a new line. */ #define pp_indentation(PP) pp_base (PP)->indent_skip /* The data structure that contains the bare minimum required to do proper pretty-printing. Clients may derived from this structure and add additional fields they need. */ struct pretty_print_info { /* Where we print external representation of ENTITY. */ output_buffer *buffer; /* The prefix for each new line. */ const char *prefix; /* Where to put whitespace around the entity being formatted. */ pp_padding padding; /* The real upper bound of number of characters per line, taking into account the case of a very very looong prefix. */ int maximum_length; /* The ideal upper bound of number of characters per line, as suggested by front-end. */ int ideal_maximum_length; /* Indentation count. */ int indent_skip; /* Current prefixing rule. */ diagnostic_prefixing_rule_t prefixing_rule; /* If non-NULL, this function formats a TEXT into the BUFFER. When called, TEXT->format_spec points to a format code. FORMAT_DECODER should call pp_string (and related functions) to add data to the BUFFER. FORMAT_DECODER can read arguments from *TEXT->args_pts using VA_ARG. If the BUFFER needs additional characters from the format string, it should advance the TEXT->format_spec as it goes. When FORMAT_DECODER returns, TEXT->format_spec should point to the last character processed. */ printer_fn format_decoder; /* Nonzero if current PREFIX was emitted at least once. */ bool emitted_prefix; /* Nonzero means one should emit a newline before outputting anything. */ bool need_newline; }; #define pp_set_line_maximum_length(PP, L) \ pp_base_set_line_maximum_length (pp_base (PP), L) #define pp_set_prefix(PP, P) pp_base_set_prefix (pp_base (PP), P) #define pp_destroy_prefix(PP) pp_base_destroy_prefix (pp_base (PP)) #define pp_remaining_character_count_for_line(PP) \ pp_base_remaining_character_count_for_line (pp_base (PP)) #define pp_clear_output_area(PP) \ pp_base_clear_output_area (pp_base (PP)) #define pp_formatted_text(PP) pp_base_formatted_text (pp_base (PP)) #define pp_last_position_in_text(PP) \ pp_base_last_position_in_text (pp_base (PP)) #define pp_emit_prefix(PP) pp_base_emit_prefix (pp_base (PP)) #define pp_append_text(PP, B, E) \ pp_base_append_text (pp_base (PP), B, E) #define pp_flush(PP) pp_base_flush (pp_base (PP)) #define pp_format_text(PP, TI) pp_base_format_text (pp_base (PP), TI) #define pp_format_verbatim(PP, TI) \ pp_base_format_verbatim (pp_base (PP), TI) #define pp_character(PP, C) pp_base_character (pp_base (PP), C) #define pp_string(PP, S) pp_base_string (pp_base (PP), S) #define pp_newline(PP) pp_base_newline (pp_base (PP)) #define pp_space(PP) pp_character (PP, ' ') #define pp_left_paren(PP) pp_character (PP, '(') #define pp_right_paren(PP) pp_character (PP, ')') #define pp_left_bracket(PP) pp_character (PP, '[') #define pp_right_bracket(PP) pp_character (PP, ']') #define pp_left_brace(PP) pp_character (PP, '{') #define pp_right_brace(PP) pp_character (PP, '}') #define pp_semicolon(PP) pp_character (PP, ';') #define pp_comma(PP) pp_string (PP, ", ") #define pp_dot(PP) pp_character (PP, '.') #define pp_colon(PP) pp_character (PP, ':') #define pp_colon_colon(PP) pp_string (PP, "::") #define pp_arrow(PP) pp_string (PP, "->") #define pp_equal(PP) pp_character (PP, '=') #define pp_question(PP) pp_character (PP, '?') #define pp_bar(PP) pp_character (PP, '|') #define pp_carret(PP) pp_character (PP, '^') #define pp_ampersand(PP) pp_character (PP, '&') #define pp_less(PP) pp_character (PP, '<') #define pp_greater(PP) pp_character (PP, '>') #define pp_plus(PP) pp_character (PP, '+') #define pp_minus(PP) pp_character (PP, '-') #define pp_star(PP) pp_character (PP, '*') #define pp_slash(PP) pp_character (PP, '/') #define pp_modulo(PP) pp_character (PP, '%') #define pp_exclamation(PP) pp_character (PP, '!') #define pp_complement(PP) pp_character (PP, '~') #define pp_quote(PP) pp_character (PP, '\'') #define pp_backquote(PP) pp_character (PP, '`') #define pp_doublequote(PP) pp_character (PP, '"') #define pp_newline_and_indent(PP, N) \ do { \ pp_indentation (PP) += N; \ pp_newline (PP); \ pp_base_indent (pp_base (PP)); \ pp_needs_newline (PP) = false; \ } while (0) #define pp_maybe_newline_and_indent(PP, N) \ if (pp_needs_newline (PP)) pp_newline_and_indent (PP, N) #define pp_maybe_space(PP) pp_base_maybe_space (pp_base (PP)) #define pp_separate_with(PP, C) \ do { \ pp_character (PP, C); \ pp_space (PP); \ } while (0) #define pp_scalar(PP, FORMAT, SCALAR) \ do \ { \ sprintf (pp_buffer (PP)->digit_buffer, FORMAT, SCALAR); \ pp_string (PP, pp_buffer (PP)->digit_buffer); \ } \ while (0) #define pp_decimal_int(PP, I) pp_scalar (PP, "%d", I) #define pp_wide_integer(PP, I) \ pp_scalar (PP, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) I) #define pp_pointer(PP, P) pp_scalar (PP, "%p", P) #define pp_identifier(PP, ID) pp_string (PP, ID) #define pp_tree_identifier(PP, T) \ pp_append_text(PP, IDENTIFIER_POINTER (T), \ IDENTIFIER_POINTER (T) + IDENTIFIER_LENGTH (T)) #define pp_unsupported_tree(PP, T) \ pp_verbatim (pp_base (PP), "#`%s' not supported by %s#", \ tree_code_name[(int) TREE_CODE (T)], __FUNCTION__) #define pp_buffer(PP) pp_base (PP)->buffer /* Clients that directly derive from pretty_printer need to override this macro to return a pointer to the base pretty_printer structure. */ #define pp_base(PP) (PP) extern void pp_construct (pretty_printer *, const char *, int); extern void pp_base_set_line_maximum_length (pretty_printer *, int); extern void pp_base_set_prefix (pretty_printer *, const char *); extern void pp_base_destroy_prefix (pretty_printer *); extern int pp_base_remaining_character_count_for_line (pretty_printer *); extern void pp_base_clear_output_area (pretty_printer *); extern const char *pp_base_formatted_text (pretty_printer *); extern const char *pp_base_last_position_in_text (const pretty_printer *); extern void pp_base_emit_prefix (pretty_printer *); extern void pp_base_append_text (pretty_printer *, const char *, const char *); extern void pp_printf (pretty_printer *, const char *, ...) ATTRIBUTE_PRINTF_2; extern void pp_verbatim (pretty_printer *, const char *, ...); extern void pp_base_flush (pretty_printer *); extern void pp_base_format_text (pretty_printer *, text_info *); extern void pp_base_format_verbatim (pretty_printer *, text_info *); extern void pp_base_indent (pretty_printer *); extern void pp_base_newline (pretty_printer *); extern void pp_base_character (pretty_printer *, int); extern void pp_base_string (pretty_printer *, const char *); extern void pp_write_text_to_stream (pretty_printer *pp); extern void pp_base_maybe_space (pretty_printer *); #endif /* GCC_PRETTY_PRINT_H */ /* Constants used to discriminate diagnostics. */ typedef enum { #define DEFINE_DIAGNOSTIC_KIND(K, msgid) K, DEFINE_DIAGNOSTIC_KIND (DK_FATAL, "fatal error: ") DEFINE_DIAGNOSTIC_KIND (DK_ICE, "internal compiler error: ") DEFINE_DIAGNOSTIC_KIND (DK_ERROR, "error: ") DEFINE_DIAGNOSTIC_KIND (DK_SORRY, "sorry, unimplemented: ") DEFINE_DIAGNOSTIC_KIND (DK_WARNING, "warning: ") DEFINE_DIAGNOSTIC_KIND (DK_ANACHRONISM, "anachronism: ") DEFINE_DIAGNOSTIC_KIND (DK_NOTE, "note: ") DEFINE_DIAGNOSTIC_KIND (DK_DEBUG, "debug: ") #undef DEFINE_DIAGNOSTIC_KIND DK_LAST_DIAGNOSTIC_KIND } diagnostic_t; /* A diagnostic is described by the MESSAGE to send, the FILE and LINE of its context and its KIND (ice, error, warning, note, ...) See complete list in diagnostic.def. */ typedef struct { text_info message; location_t location; /* The kind of diagnostic it is about. */ diagnostic_t kind; } diagnostic_info; #define pedantic_error_kind() (flag_pedantic_errors ? DK_ERROR : DK_WARNING) /* Forward declarations. */ typedef struct diagnostic_context diagnostic_context; typedef void (*diagnostic_starter_fn) (diagnostic_context *, diagnostic_info *); typedef diagnostic_starter_fn diagnostic_finalizer_fn; /* This data structure bundles altogether any information relevant to the context of a diagnostic message. */ struct diagnostic_context { /* Where most of the diagnostic formatting work is done. */ pretty_printer *printer; /* The number of times we have issued diagnostics. */ int diagnostic_count[DK_LAST_DIAGNOSTIC_KIND]; /* True if we should display the "warnings are being tread as error" message, usually displayed once per compiler run. */ bool warnings_are_errors_message; /* True if we should raise a SIGABRT on errors. */ bool abort_on_error; /* This function is called before any message is printed out. It is responsible for preparing message prefix and such. For example, it might say: In file included from "/usr/local/include/curses.h:5: from "/home/gdr/src/nifty_printer.h:56: ... */ diagnostic_starter_fn begin_diagnostic; /* This function is called after the diagnostic message is printed. */ diagnostic_finalizer_fn end_diagnostic; /* Client hook to report an internal error. */ void (*internal_error) (const char *, va_list *); /* Function of last diagnostic message; more generally, function such that if next diagnostic message is in it then we don't have to mention the function name. */ tree last_function; /* Used to detect when input_file_stack has changed since last described. */ int last_module; int lock; /* Hook for front-end extensions. */ void *x_data; }; /* Client supplied function to announce a diagnostic. */ #define diagnostic_starter(DC) (DC)->begin_diagnostic /* Client supplied function called after a diagnostic message is displayed. */ #define diagnostic_finalizer(DC) (DC)->end_diagnostic /* Extension hook for client. */ #define diagnostic_auxiliary_data(DC) (DC)->x_data /* Same as pp_format_decoder. Works on 'diagnostic_context *'. */ #define diagnostic_format_decoder(DC) ((DC)->printer->format_decoder) /* Same as output_prefixing_rule. Works on 'diagnostic_context *'. */ #define diagnostic_prefixing_rule(DC) ((DC)->printer->prefixing_rule) /* Maximum characters per line in automatic line wrapping mode. Zero means don't wrap lines. */ #define diagnostic_line_cutoff(DC) ((DC)->printer->ideal_maximum_length) #define diagnostic_flush_buffer(DC) pp_base_flush ((DC)->printer) /* True if the last function in which a diagnostic was reported is different from the current one. */ #define diagnostic_last_function_changed(DC) \ ((DC)->last_function != current_function_decl) /* Remember the current function as being the last one in which we report a diagnostic. */ #define diagnostic_set_last_function(DC) \ (DC)->last_function = current_function_decl /* True if the last module or file in which a diagnostic was reported is different from the current one. */ #define diagnostic_last_module_changed(DC) \ ((DC)->last_module != input_file_stack_tick) /* Remember the current module or file as being the last one in which we report a diagnostic. */ #define diagnostic_set_last_module(DC) \ (DC)->last_module = input_file_stack_tick /* Raise SIGABRT on any diagnostic of severity DK_ERROR or higher. */ #define diagnostic_abort_on_error(DC) \ (DC)->abort_on_error = true /* This diagnostic_context is used by front-ends that directly output diagnostic messages without going through `error', `warning', and similar functions. */ extern diagnostic_context *global_dc; /* The total count of a KIND of diagnostics emitted so far. */ #define diagnostic_kind_count(DC, DK) (DC)->diagnostic_count[(int) (DK)] /* The number of errors that have been issued so far. Ideally, these would take a diagnostic_context as an argument. */ #define errorcount diagnostic_kind_count (global_dc, DK_ERROR) /* Similarly, but for warnings. */ #define warningcount diagnostic_kind_count (global_dc, DK_WARNING) /* Similarly, but for sorrys. */ #define sorrycount diagnostic_kind_count (global_dc, DK_SORRY) /* Returns nonzero if warnings should be emitted. */ #define diagnostic_report_warnings_p() \ (!inhibit_warnings \ && !(in_system_header && !warn_system_headers)) #define report_diagnostic(D) diagnostic_report_diagnostic (global_dc, D) /* Diagnostic related functions. */ extern void diagnostic_initialize (diagnostic_context *); extern void diagnostic_report_current_module (diagnostic_context *); extern void diagnostic_report_current_function (diagnostic_context *); extern void diagnostic_report_diagnostic (diagnostic_context *, diagnostic_info *); extern void diagnostic_set_info (diagnostic_info *, const char *, va_list *, location_t, diagnostic_t); extern char *diagnostic_build_prefix (diagnostic_info *); /* Pure text formatting support functions. */ extern void verbatim (const char *, ...); extern char *file_name_as_prefix (const char *); extern void debug_output_buffer (pretty_printer *); /* In tree-pretty-print.c */ extern int dump_generic_node (pretty_printer *, tree, int, int, bool); extern void print_generic_stmt (FILE *, tree, int); extern void print_generic_stmt_indented (FILE *, tree, int, int); extern void print_generic_expr (FILE *, tree, int); extern void print_generic_decl (FILE *, tree, int); extern void debug_generic_expr (tree); extern void debug_generic_stmt (tree); extern void debug_c_tree (tree); #endif /* ! GCC_DIAGNOSTIC_H */ /* The lang_hooks data structure. Copyright 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_LANG_HOOKS_H #define GCC_LANG_HOOKS_H /* This file should be #include-d after tree.h. */ struct diagnostic_context; /* A print hook for print_tree (). */ typedef void (*lang_print_tree_hook) (FILE *, tree, int indent); /* The following hooks are documented in langhooks.c. Must not be NULL. */ struct lang_hooks_for_tree_inlining { tree (*walk_subtrees) (tree *, int *, tree (*) (tree *, int *, void *), void *, void *); int (*cannot_inline_tree_fn) (tree *); int (*disregard_inline_limits) (tree); tree (*add_pending_fn_decls) (void *, tree); int (*auto_var_in_fn_p) (tree, tree); tree (*copy_res_decl_for_inlining) (tree, tree, tree, void *, int *, tree); int (*anon_aggr_type_p) (tree); bool (*var_mod_type_p) (tree, tree); int (*start_inlining) (tree); void (*end_inlining) (tree); tree (*convert_parm_for_inlining) (tree, tree, tree, int); int (*estimate_num_insns) (tree); }; struct lang_hooks_for_callgraph { /* The node passed is a language-specific tree node. If its contents are relevant to use of other declarations, mark them. */ tree (*analyze_expr) (tree *, int *, tree); /* Produce RTL for function passed as argument. */ void (*expand_function) (tree); }; /* Lang hooks for management of language-specific data or status when entering / leaving functions etc. */ struct lang_hooks_for_functions { /* Called when entering a function. */ void (*init) (struct function *); /* Called when leaving a function. */ void (*final) (struct function *); /* Called when entering a nested function. */ void (*enter_nested) (struct function *); /* Called when leaving a nested function. */ void (*leave_nested) (struct function *); /* Determines if it's ok for a function to have no noreturn attribute. */ bool (*missing_noreturn_ok_p) (tree); }; /* The following hooks are used by tree-dump.c. */ struct lang_hooks_for_tree_dump { /* Dump language-specific parts of tree nodes. Returns nonzero if it does not want the usual dumping of the second argument. */ bool (*dump_tree) (void *, tree); /* Determine type qualifiers in a language-specific way. */ int (*type_quals) (tree); }; /* Hooks related to types. */ struct lang_hooks_for_types { /* Return a new type (with the indicated CODE), doing whatever language-specific processing is required. */ tree (*make_type) (enum tree_code); /* Given MODE and UNSIGNEDP, return a suitable type-tree with that mode. */ tree (*type_for_mode) (enum machine_mode, int); /* Given PRECISION and UNSIGNEDP, return a suitable type-tree for an integer type with at least that precision. */ tree (*type_for_size) (unsigned, int); /* Given an integer type T, return a type like T but unsigned. If T is unsigned, the value is T. */ tree (*unsigned_type) (tree); /* Given an integer type T, return a type like T but signed. If T is signed, the value is T. */ tree (*signed_type) (tree); /* Return a type the same as TYPE except unsigned or signed according to UNSIGNEDP. */ tree (*signed_or_unsigned_type) (int, tree); /* Given a type, apply default promotions to unnamed function arguments and return the new type. Return the same type if no change. Required by any language that supports variadic arguments. The default hook aborts. */ tree (*type_promotes_to) (tree); /* Register TYPE as a builtin type with the indicated NAME. The TYPE is placed in the outermost lexical scope. The semantics should be analogous to: typedef TYPE NAME; in C. The default hook ignores the declaration. */ void (*register_builtin_type) (tree, const char *); /* This routine is called in tree.c to print an error message for invalid use of an incomplete type. VALUE is the expression that was used (or 0 if that isn't known) and TYPE is the type that was invalid. */ void (*incomplete_type_error) (tree value, tree type); /* Called from assign_temp to return the maximum size, if there is one, for a type. */ tree (*max_size) (tree); /* Nonzero if types that are identical are to be hashed so that only one copy is kept. If a language requires unique types for each user-specified type, such as Ada, this should be set to TRUE. */ bool hash_types; }; /* Language hooks related to decls and the symbol table. */ struct lang_hooks_for_decls { /* Enter a new lexical scope. Argument is always zero when called from outside the front end. */ void (*pushlevel) (int); /* Exit a lexical scope and return a BINDING for that scope. Takes three arguments: KEEP -- nonzero if there were declarations in this scope. REVERSE -- reverse the order of decls before returning them. FUNCTIONBODY -- nonzero if this level is the body of a function. */ tree (*poplevel) (int, int, int); /* Returns nonzero if we are in the global binding level. Ada returns -1 for an undocumented reason used in stor-layout.c. */ int (*global_bindings_p) (void); /* Insert BLOCK at the end of the list of subblocks of the current binding level. This is used when a BIND_EXPR is expanded, to handle the BLOCK node inside the BIND_EXPR. */ void (*insert_block) (tree); /* Set the BLOCK node for the current scope level. */ void (*set_block) (tree); /* Function to add a decl to the current scope level. Takes one argument, a decl to add. Returns that decl, or, if the same symbol is already declared, may return a different decl for that name. */ tree (*pushdecl) (tree); /* Returns the chain of decls so far in the current scope level. */ tree (*getdecls) (void); /* Returns true when we should warn for an unused global DECL. We will already have checked that it has static binding. */ bool (*warn_unused_global) (tree); /* Obtain a list of globals and do final output on them at end of compilation */ void (*final_write_globals) (void); /* Do necessary preparations before assemble_variable can proceed. */ void (*prepare_assemble_variable) (tree); /* True if this decl may be called via a sibcall. */ bool (*ok_for_sibcall) (tree); }; /* Language-specific hooks. See langhooks-def.h for defaults. */ struct lang_hooks { /* String identifying the front end. e.g. "GNU C++". */ const char *name; /* sizeof (struct lang_identifier), so make_node () creates identifier nodes long enough for the language-specific slots. */ size_t identifier_size; /* Determines the size of any language-specific 'x' or 'c' nodes. Since it is called from make_node, the only information available is the tree code. Expected to abort on unrecognized codes. */ size_t (*tree_size) (enum tree_code); /* The first callback made to the front end, for simple initialization needed before any calls to handle_option. Return the language mask to filter the switch array with. */ unsigned int (*init_options) (unsigned int argc, const char **argv); /* Callback used to perform language-specific initialization for the global diagnostic context structure. */ void (*initialize_diagnostics) (struct diagnostic_context *); /* Handle the switch CODE, which has real type enum opt_code from options.h. If the switch takes an argument, it is passed in ARG which points to permanent storage. The handler is responsible for checking whether ARG is NULL, which indicates that no argument was in fact supplied. For -f and -W switches, VALUE is 1 or 0 for the positive and negative forms respectively. Return 1 if the switch is valid, 0 if invalid, and -1 if it's valid and should not be treated as language-independent too. */ int (*handle_option) (size_t code, const char *arg, int value); /* Return false to use the default complaint about a missing argument, otherwise output a complaint and return true. */ bool (*missing_argument) (const char *opt, size_t code); /* Called when all command line options have been parsed to allow further processing and initialization Should return true to indicate that a compiler back-end is not required, such as with the -E option. If errorcount is nonzero after this call the compiler exits immediately and the finish hook is not called. */ bool (*post_options) (const char **); /* Called after post_options to initialize the front end. Return false to indicate that no further compilation be performed, in which case the finish hook is called immediately. */ bool (*init) (void); /* Called at the end of compilation, as a finalizer. */ void (*finish) (void); /* Parses the entire file. The argument is nonzero to cause bison parsers to dump debugging information during parsing. */ void (*parse_file) (int); /* Called immediately after parsing to clear the binding stack. */ void (*clear_binding_stack) (void); /* Called to obtain the alias set to be used for an expression or type. Returns -1 if the language does nothing special for it. */ HOST_WIDE_INT (*get_alias_set) (tree); /* Called with an expression that is to be processed as a constant. Returns either the same expression or a language-independent constant equivalent to its input. */ tree (*expand_constant) (tree); /* Called by expand_expr for language-specific tree codes. Fourth argument is actually an enum expand_modifier. */ rtx (*expand_expr) (tree, rtx, enum machine_mode, int, rtx *); /* Called by expand_expr to generate the definition of a decl. Returns 1 if handled, 0 otherwise. */ int (*expand_decl) (tree); /* Prepare expr to be an argument of a TRUTH_NOT_EXPR or other logical operation. This preparation consists of taking the ordinary representation of an expression expr and producing a valid tree boolean expression describing whether expr is nonzero. We could simply always do build_binary_op (NE_EXPR, expr, integer_zero_node, 1), but we optimize comparisons, &&, ||, and !. The result should be an expression of boolean type (if not an error_mark_node). */ tree (*truthvalue_conversion) (tree); /* Hook called by safe_from_p for language-specific tree codes. It is up to the language front-end to install a hook if it has any such codes that safe_from_p needs to know about. Since same_from_p will recursively explore the TREE_OPERANDs of an expression, this hook should not reexamine those pieces. This routine may recursively call safe_from_p; it should always pass `0' as the TOP_P parameter. */ int (*safe_from_p) (rtx, tree); /* Function to finish handling an incomplete decl at the end of compilation. Default hook is does nothing. */ void (*finish_incomplete_decl) (tree); /* Function used by unsafe_for_reeval. A non-negative number is returned directly from unsafe_for_reeval, a negative number falls through. The default hook returns a negative number. */ int (*unsafe_for_reeval) (tree); /* Mark EXP saying that we need to be able to take the address of it; it should not be allocated in a register. Return true if successful. */ bool (*mark_addressable) (tree); /* Hook called by staticp for language-specific tree codes. */ int (*staticp) (tree); /* Replace the DECL_LANG_SPECIFIC data, which may be NULL, of the DECL_NODE with a newly GC-allocated copy. */ void (*dup_lang_specific_decl) (tree); /* Called before its argument, an UNSAVE_EXPR, is to be unsaved. Modify it in-place so that all the evaluate only once things are cleared out. */ tree (*unsave_expr_now) (tree); /* Called by expand_expr to build and return the cleanup-expression for the passed TARGET_EXPR. Return NULL if there is none. */ tree (*maybe_build_cleanup) (tree); /* Set the DECL_ASSEMBLER_NAME for a node. If it is the sort of thing that the assembler should talk about, set DECL_ASSEMBLER_NAME to an appropriate IDENTIFIER_NODE. Otherwise, set it to the ERROR_MARK_NODE to ensure that the assembler does not talk about it. */ void (*set_decl_assembler_name) (tree); /* Return nonzero if fold-const is free to use bit-field optimizations, for instance in fold_truthop(). */ bool (*can_use_bit_fields_p) (void); /* Nonzero if TYPE_READONLY and TREE_READONLY should always be honored. */ bool honor_readonly; /* Nonzero if this front end does not generate a dummy BLOCK between the outermost scope of the function and the FUNCTION_DECL. See is_body_block in stmt.c, and its callers. */ bool no_body_blocks; /* The front end can add its own statistics to -fmem-report with this hook. It should output to stderr. */ void (*print_statistics) (void); /* Called by print_tree when there is a tree of class 'x' that it doesn't know how to display. */ lang_print_tree_hook print_xnode; /* Called to print language-dependent parts of a class 'd', class 't', and IDENTIFIER_NODE nodes. */ lang_print_tree_hook print_decl; lang_print_tree_hook print_type; lang_print_tree_hook print_identifier; /* Computes the name to use to print a declaration. DECL is the non-NULL declaration in question. VERBOSITY determines what information will be printed: 0: DECL_NAME, demangled as necessary. 1: and scope information. 2: and any other information that might be interesting, such as function parameter types in C++. */ const char *(*decl_printable_name) (tree decl, int verbosity); /* This compares two types for equivalence ("compatible" in C-based languages). This routine should only return 1 if it is sure. It should not be used in contexts where erroneously returning 0 causes problems. */ int (*types_compatible_p) (tree x, tree y); /* Given a CALL_EXPR, return a function decl that is its target. */ tree (*lang_get_callee_fndecl) (tree); /* Called by report_error_function to print out function name. */ void (*print_error_function) (struct diagnostic_context *, const char *); /* Called from expr_size to calculate the size of the value of an expression in a language-dependent way. Returns a tree for the size in bytes. A frontend can call lhd_expr_size to get the default semantics in cases that it doesn't want to handle specially. */ tree (*expr_size) (tree); /* Update lang specific fields after duplicating function body. */ void (*update_decl_after_saving) (tree, void *); /* Pointers to machine-independent attribute tables, for front ends using attribs.c. If one is NULL, it is ignored. Respectively, a table of attributes specific to the language, a table of attributes common to two or more languages (to allow easy sharing), and a table of attributes for checking formats. */ const struct attribute_spec *attribute_table; const struct attribute_spec *common_attribute_table; const struct attribute_spec *format_attribute_table; /* Function-related language hooks. */ struct lang_hooks_for_functions function; struct lang_hooks_for_tree_inlining tree_inlining; struct lang_hooks_for_callgraph callgraph; struct lang_hooks_for_tree_dump tree_dump; struct lang_hooks_for_decls decls; struct lang_hooks_for_types types; /* Perform language-specific gimplification on the argument. Returns an enum gimplify_status, though we can't see that type here. */ int (*gimplify_expr) (tree *, tree *, tree *); /* Fold an OBJ_TYPE_REF expression to the address of a function. KNOWN_TYPE carries the true type of the OBJ_TYPE_REF_OBJECT. */ tree (*fold_obj_type_ref) (tree, tree); /* True if the front end has gimplified the function before running the inliner, false if the front end generates GENERIC directly. */ bool gimple_before_inlining; /* Whenever you add entries here, make sure you adjust langhooks-def.h and langhooks.c accordingly. */ }; /* Each front end provides its own. */ extern const struct lang_hooks lang_hooks; #endif /* GCC_LANG_HOOKS_H */ /* Garbage collection for the GNU compiler. Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_GGC_H #define GCC_GGC_H /* Symbols are marked with `ggc' for `gcc gc' so as not to interfere with an external gc library that might be linked in. */ /* Constants for general use. */ extern const char empty_string[]; /* empty string */ extern const char digit_vector[]; /* "0" .. "9" */ #define digit_string(d) (digit_vector + ((d) * 2)) /* Internal functions and data structures used by the GTY machinery. */ /* The first parameter is a pointer to a pointer, the second a cookie. */ typedef void (*gt_pointer_operator) (void *, void *); /* Type information for GCC. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ /* Enumeration of types known. */ enum gt_types_enum { gt_ggc_e_11align_stack, gt_ggc_e_7c_scope, gt_ggc_e_9c_binding, gt_ggc_e_12aterm_list_a, gt_ggc_e_6aterm_, gt_ggc_e_15throw_stmt_node, gt_ggc_e_15edge_prediction, gt_ggc_e_19v_must_def_optype_d, gt_ggc_e_13vuse_optype_d, gt_ggc_e_18v_may_def_optype_d, gt_ggc_e_12use_optype_d, gt_ggc_e_12def_optype_d, gt_ggc_e_10dataflow_d, gt_ggc_e_23constant_descriptor_rtx, gt_ggc_e_24constant_descriptor_tree, gt_ggc_e_14in_named_entry, gt_ggc_e_17rtx_constant_pool, gt_ggc_e_9type_hash, gt_ggc_e_16string_pool_data, gt_ggc_e_10goto_fixup, gt_ggc_e_11label_chain, gt_ggc_e_7nesting, gt_ggc_e_9case_node, gt_ggc_e_9eh_region, gt_ggc_e_13ehl_map_entry, gt_ggc_e_16var_loc_list_def, gt_ggc_e_12var_loc_node, gt_ggc_e_16limbo_die_struct, gt_ggc_e_16dw_ranges_struct, gt_ggc_e_14pubname_struct, gt_ggc_e_28dw_separate_line_info_struct, gt_ggc_e_19dw_line_info_struct, gt_ggc_e_14dw_attr_struct, gt_ggc_e_18dw_loc_list_struct, gt_ggc_e_15queued_reg_save, gt_ggc_e_20indirect_string_node, gt_ggc_e_19dw_loc_descr_struct, gt_ggc_e_13dw_fde_struct, gt_ggc_e_13dw_cfi_struct, gt_ggc_e_8typeinfo, gt_ggc_e_15alias_set_entry, gt_ggc_e_8c_switch, gt_ggc_e_18sorted_fields_type, gt_ggc_e_19cgraph_varpool_node, gt_ggc_e_11cgraph_edge, gt_ggc_e_11cgraph_node, gt_ggc_e_8bb_ann_d, gt_ggc_e_17reorder_block_def, gt_ggc_e_7et_node, gt_ggc_e_4loop, gt_ggc_e_12elt_loc_list, gt_ggc_e_17cselib_val_struct, gt_ggc_e_8elt_list, gt_ggc_e_12reg_info_def, gt_ggc_e_14lang_tree_node, gt_ggc_e_9value_set, gt_ggc_e_24tree_statement_list_node, gt_ggc_e_9lang_decl, gt_ggc_e_13alias_var_def, gt_ggc_e_9lang_type, gt_ggc_e_10die_struct, gt_ggc_e_8edge_def, gt_ggc_e_12ptr_info_def, gt_ggc_e_10real_value, gt_ggc_e_10tree_ann_d, gt_ggc_e_13convert_optab, gt_ggc_e_5optab, gt_ggc_e_15basic_block_def, gt_ggc_e_9reg_attrs, gt_ggc_e_9mem_attrs, gt_ggc_e_17language_function, gt_ggc_e_9temp_slot, gt_ggc_e_15varray_head_tag, gt_ggc_e_20initial_value_struct, gt_ggc_e_13varasm_status, gt_ggc_e_11stmt_status, gt_ggc_e_9eh_status, gt_ggc_e_8function, gt_ggc_e_11expr_status, gt_ggc_e_11emit_status, gt_ggc_e_14sequence_stack, gt_ggc_e_14var_refs_queue, gt_ggc_e_15bitmap_head_def, gt_ggc_e_18bitmap_element_def, gt_ggc_e_17stack_local_entry, gt_ggc_e_16machine_function, gt_ggc_e_6answer, gt_ggc_e_9cpp_macro, gt_ggc_e_9cpp_token, gt_ggc_e_9tree_node, gt_ggc_e_9rtvec_def, gt_ggc_e_7rtx_def, gt_ggc_e_10location_s, gt_e_II17splay_tree_node_s, gt_e_SP9tree_node17splay_tree_node_s, gt_e_P13alias_var_def15varray_head_tag, gt_e_P15throw_stmt_node4htab, gt_e_P23constant_descriptor_rtx4htab, gt_e_P24constant_descriptor_tree4htab, gt_e_P14in_named_entry4htab, gt_e_P9type_hash4htab, gt_e_P13ehl_map_entry4htab, gt_e_P9tree_node4htab, gt_e_P9reg_attrs4htab, gt_e_P9mem_attrs4htab, gt_e_P7rtx_def4htab, gt_e_SP9tree_node12splay_tree_s, gt_e_P16var_loc_list_def4htab, gt_e_P10die_struct4htab, gt_e_P20indirect_string_node4htab, gt_e_P19cgraph_varpool_node4htab, gt_e_P11cgraph_node4htab, gt_e_P15alias_set_entry15varray_head_tag, gt_e_II12splay_tree_s, gt_e_P9temp_slot15varray_head_tag, gt_types_enum_last }; /* GC marker procedures. */ #define gt_ggc_m_11align_stack(X) do { \ if (X != NULL) gt_ggc_mx_align_stack (X);\ } while (0) extern void gt_ggc_mx_align_stack (void *); #define gt_ggc_m_7c_scope(X) do { \ if (X != NULL) gt_ggc_mx_c_scope (X);\ } while (0) extern void gt_ggc_mx_c_scope (void *); #define gt_ggc_m_9c_binding(X) do { \ if (X != NULL) gt_ggc_mx_c_binding (X);\ } while (0) extern void gt_ggc_mx_c_binding (void *); #define gt_ggc_m_12aterm_list_a(X) do { \ if (X != NULL) gt_ggc_mx_aterm_list_a (X);\ } while (0) extern void gt_ggc_mx_aterm_list_a (void *); #define gt_ggc_m_6aterm_(X) do { \ if (X != NULL) gt_ggc_mx_aterm_ (X);\ } while (0) extern void gt_ggc_mx_aterm_ (void *); #define gt_ggc_m_15throw_stmt_node(X) do { \ if (X != NULL) gt_ggc_mx_throw_stmt_node (X);\ } while (0) extern void gt_ggc_mx_throw_stmt_node (void *); #define gt_ggc_m_15edge_prediction(X) do { \ if (X != NULL) gt_ggc_mx_edge_prediction (X);\ } while (0) extern void gt_ggc_mx_edge_prediction (void *); #define gt_ggc_m_19v_must_def_optype_d(X) do { \ if (X != NULL) gt_ggc_mx_v_must_def_optype_d (X);\ } while (0) extern void gt_ggc_mx_v_must_def_optype_d (void *); #define gt_ggc_m_13vuse_optype_d(X) do { \ if (X != NULL) gt_ggc_mx_vuse_optype_d (X);\ } while (0) extern void gt_ggc_mx_vuse_optype_d (void *); #define gt_ggc_m_18v_may_def_optype_d(X) do { \ if (X != NULL) gt_ggc_mx_v_may_def_optype_d (X);\ } while (0) extern void gt_ggc_mx_v_may_def_optype_d (void *); #define gt_ggc_m_12use_optype_d(X) do { \ if (X != NULL) gt_ggc_mx_use_optype_d (X);\ } while (0) extern void gt_ggc_mx_use_optype_d (void *); #define gt_ggc_m_12def_optype_d(X) do { \ if (X != NULL) gt_ggc_mx_def_optype_d (X);\ } while (0) extern void gt_ggc_mx_def_optype_d (void *); #define gt_ggc_m_10dataflow_d(X) do { \ if (X != NULL) gt_ggc_mx_dataflow_d (X);\ } while (0) extern void gt_ggc_mx_dataflow_d (void *); #define gt_ggc_m_23constant_descriptor_rtx(X) do { \ if (X != NULL) gt_ggc_mx_constant_descriptor_rtx (X);\ } while (0) extern void gt_ggc_mx_constant_descriptor_rtx (void *); #define gt_ggc_m_24constant_descriptor_tree(X) do { \ if (X != NULL) gt_ggc_mx_constant_descriptor_tree (X);\ } while (0) extern void gt_ggc_mx_constant_descriptor_tree (void *); #define gt_ggc_m_14in_named_entry(X) do { \ if (X != NULL) gt_ggc_mx_in_named_entry (X);\ } while (0) extern void gt_ggc_mx_in_named_entry (void *); #define gt_ggc_m_17rtx_constant_pool(X) do { \ if (X != NULL) gt_ggc_mx_rtx_constant_pool (X);\ } while (0) extern void gt_ggc_mx_rtx_constant_pool (void *); #define gt_ggc_m_9type_hash(X) do { \ if (X != NULL) gt_ggc_mx_type_hash (X);\ } while (0) extern void gt_ggc_mx_type_hash (void *); #define gt_ggc_m_16string_pool_data(X) do { \ if (X != NULL) gt_ggc_mx_string_pool_data (X);\ } while (0) extern void gt_ggc_mx_string_pool_data (void *); #define gt_ggc_m_10goto_fixup(X) do { \ if (X != NULL) gt_ggc_mx_goto_fixup (X);\ } while (0) extern void gt_ggc_mx_goto_fixup (void *); #define gt_ggc_m_11label_chain(X) do { \ if (X != NULL) gt_ggc_mx_label_chain (X);\ } while (0) extern void gt_ggc_mx_label_chain (void *); #define gt_ggc_m_7nesting(X) do { \ if (X != NULL) gt_ggc_mx_nesting (X);\ } while (0) extern void gt_ggc_mx_nesting (void *); #define gt_ggc_m_9case_node(X) do { \ if (X != NULL) gt_ggc_mx_case_node (X);\ } while (0) extern void gt_ggc_mx_case_node (void *); #define gt_ggc_m_9eh_region(X) do { \ if (X != NULL) gt_ggc_mx_eh_region (X);\ } while (0) extern void gt_ggc_mx_eh_region (void *); #define gt_ggc_m_13ehl_map_entry(X) do { \ if (X != NULL) gt_ggc_mx_ehl_map_entry (X);\ } while (0) extern void gt_ggc_mx_ehl_map_entry (void *); #define gt_ggc_m_16var_loc_list_def(X) do { \ if (X != NULL) gt_ggc_mx_var_loc_list_def (X);\ } while (0) extern void gt_ggc_mx_var_loc_list_def (void *); #define gt_ggc_m_12var_loc_node(X) do { \ if (X != NULL) gt_ggc_mx_var_loc_node (X);\ } while (0) extern void gt_ggc_mx_var_loc_node (void *); #define gt_ggc_m_16limbo_die_struct(X) do { \ if (X != NULL) gt_ggc_mx_limbo_die_struct (X);\ } while (0) extern void gt_ggc_mx_limbo_die_struct (void *); #define gt_ggc_m_16dw_ranges_struct(X) do { \ if (X != NULL) gt_ggc_mx_dw_ranges_struct (X);\ } while (0) extern void gt_ggc_mx_dw_ranges_struct (void *); #define gt_ggc_m_14pubname_struct(X) do { \ if (X != NULL) gt_ggc_mx_pubname_struct (X);\ } while (0) extern void gt_ggc_mx_pubname_struct (void *); #define gt_ggc_m_28dw_separate_line_info_struct(X) do { \ if (X != NULL) gt_ggc_mx_dw_separate_line_info_struct (X);\ } while (0) extern void gt_ggc_mx_dw_separate_line_info_struct (void *); #define gt_ggc_m_19dw_line_info_struct(X) do { \ if (X != NULL) gt_ggc_mx_dw_line_info_struct (X);\ } while (0) extern void gt_ggc_mx_dw_line_info_struct (void *); #define gt_ggc_m_14dw_attr_struct(X) do { \ if (X != NULL) gt_ggc_mx_dw_attr_struct (X);\ } while (0) extern void gt_ggc_mx_dw_attr_struct (void *); #define gt_ggc_m_18dw_loc_list_struct(X) do { \ if (X != NULL) gt_ggc_mx_dw_loc_list_struct (X);\ } while (0) extern void gt_ggc_mx_dw_loc_list_struct (void *); #define gt_ggc_m_15queued_reg_save(X) do { \ if (X != NULL) gt_ggc_mx_queued_reg_save (X);\ } while (0) extern void gt_ggc_mx_queued_reg_save (void *); #define gt_ggc_m_20indirect_string_node(X) do { \ if (X != NULL) gt_ggc_mx_indirect_string_node (X);\ } while (0) extern void gt_ggc_mx_indirect_string_node (void *); #define gt_ggc_m_19dw_loc_descr_struct(X) do { \ if (X != NULL) gt_ggc_mx_dw_loc_descr_struct (X);\ } while (0) extern void gt_ggc_mx_dw_loc_descr_struct (void *); #define gt_ggc_m_13dw_fde_struct(X) do { \ if (X != NULL) gt_ggc_mx_dw_fde_struct (X);\ } while (0) extern void gt_ggc_mx_dw_fde_struct (void *); #define gt_ggc_m_13dw_cfi_struct(X) do { \ if (X != NULL) gt_ggc_mx_dw_cfi_struct (X);\ } while (0) extern void gt_ggc_mx_dw_cfi_struct (void *); #define gt_ggc_m_8typeinfo(X) do { \ if (X != NULL) gt_ggc_mx_typeinfo (X);\ } while (0) extern void gt_ggc_mx_typeinfo (void *); #define gt_ggc_m_15alias_set_entry(X) do { \ if (X != NULL) gt_ggc_mx_alias_set_entry (X);\ } while (0) extern void gt_ggc_mx_alias_set_entry (void *); #define gt_ggc_m_8c_switch(X) do { \ if (X != NULL) gt_ggc_mx_c_switch (X);\ } while (0) extern void gt_ggc_mx_c_switch (void *); #define gt_ggc_m_18sorted_fields_type(X) do { \ if (X != NULL) gt_ggc_mx_sorted_fields_type (X);\ } while (0) extern void gt_ggc_mx_sorted_fields_type (void *); #define gt_ggc_m_19cgraph_varpool_node(X) do { \ if (X != NULL) gt_ggc_mx_cgraph_varpool_node (X);\ } while (0) extern void gt_ggc_mx_cgraph_varpool_node (void *); #define gt_ggc_m_11cgraph_edge(X) do { \ if (X != NULL) gt_ggc_mx_cgraph_edge (X);\ } while (0) extern void gt_ggc_mx_cgraph_edge (void *); #define gt_ggc_m_11cgraph_node(X) do { \ if (X != NULL) gt_ggc_mx_cgraph_node (X);\ } while (0) extern void gt_ggc_mx_cgraph_node (void *); #define gt_ggc_m_8bb_ann_d(X) do { \ if (X != NULL) gt_ggc_mx_bb_ann_d (X);\ } while (0) extern void gt_ggc_mx_bb_ann_d (void *); #define gt_ggc_m_17reorder_block_def(X) do { \ if (X != NULL) gt_ggc_mx_reorder_block_def (X);\ } while (0) extern void gt_ggc_mx_reorder_block_def (void *); #define gt_ggc_m_7et_node(X) do { \ if (X != NULL) gt_ggc_mx_et_node (X);\ } while (0) extern void gt_ggc_mx_et_node (void *); #define gt_ggc_m_4loop(X) do { \ if (X != NULL) gt_ggc_mx_loop (X);\ } while (0) extern void gt_ggc_mx_loop (void *); #define gt_ggc_m_12elt_loc_list(X) do { \ if (X != NULL) gt_ggc_mx_elt_loc_list (X);\ } while (0) extern void gt_ggc_mx_elt_loc_list (void *); #define gt_ggc_m_17cselib_val_struct(X) do { \ if (X != NULL) gt_ggc_mx_cselib_val_struct (X);\ } while (0) extern void gt_ggc_mx_cselib_val_struct (void *); #define gt_ggc_m_8elt_list(X) do { \ if (X != NULL) gt_ggc_mx_elt_list (X);\ } while (0) extern void gt_ggc_mx_elt_list (void *); #define gt_ggc_m_12reg_info_def(X) do { \ if (X != NULL) gt_ggc_mx_reg_info_def (X);\ } while (0) extern void gt_ggc_mx_reg_info_def (void *); #define gt_ggc_m_14lang_tree_node(X) do { \ if (X != NULL) gt_ggc_mx_lang_tree_node (X);\ } while (0) extern void gt_ggc_mx_lang_tree_node (void *); #define gt_ggc_m_9value_set(X) do { \ if (X != NULL) gt_ggc_mx_value_set (X);\ } while (0) extern void gt_ggc_mx_value_set (void *); #define gt_ggc_m_24tree_statement_list_node(X) do { \ if (X != NULL) gt_ggc_mx_tree_statement_list_node (X);\ } while (0) extern void gt_ggc_mx_tree_statement_list_node (void *); #define gt_ggc_m_9lang_decl(X) do { \ if (X != NULL) gt_ggc_mx_lang_decl (X);\ } while (0) extern void gt_ggc_mx_lang_decl (void *); #define gt_ggc_m_13alias_var_def(X) do { \ if (X != NULL) gt_ggc_mx_alias_var_def (X);\ } while (0) extern void gt_ggc_mx_alias_var_def (void *); #define gt_ggc_m_9lang_type(X) do { \ if (X != NULL) gt_ggc_mx_lang_type (X);\ } while (0) extern void gt_ggc_mx_lang_type (void *); #define gt_ggc_m_10die_struct(X) do { \ if (X != NULL) gt_ggc_mx_die_struct (X);\ } while (0) extern void gt_ggc_mx_die_struct (void *); #define gt_ggc_m_8edge_def(X) do { \ if (X != NULL) gt_ggc_mx_edge_def (X);\ } while (0) extern void gt_ggc_mx_edge_def (void *); #define gt_ggc_m_12ptr_info_def(X) do { \ if (X != NULL) gt_ggc_mx_ptr_info_def (X);\ } while (0) extern void gt_ggc_mx_ptr_info_def (void *); #define gt_ggc_m_10real_value(X) do { \ if (X != NULL) gt_ggc_mx_real_value (X);\ } while (0) extern void gt_ggc_mx_real_value (void *); #define gt_ggc_m_10tree_ann_d(X) do { \ if (X != NULL) gt_ggc_mx_tree_ann_d (X);\ } while (0) extern void gt_ggc_mx_tree_ann_d (void *); #define gt_ggc_m_13convert_optab(X) do { \ if (X != NULL) gt_ggc_mx_convert_optab (X);\ } while (0) extern void gt_ggc_mx_convert_optab (void *); #define gt_ggc_m_5optab(X) do { \ if (X != NULL) gt_ggc_mx_optab (X);\ } while (0) extern void gt_ggc_mx_optab (void *); #define gt_ggc_m_15basic_block_def(X) do { \ if (X != NULL) gt_ggc_mx_basic_block_def (X);\ } while (0) extern void gt_ggc_mx_basic_block_def (void *); #define gt_ggc_m_9reg_attrs(X) do { \ if (X != NULL) gt_ggc_mx_reg_attrs (X);\ } while (0) extern void gt_ggc_mx_reg_attrs (void *); #define gt_ggc_m_9mem_attrs(X) do { \ if (X != NULL) gt_ggc_mx_mem_attrs (X);\ } while (0) extern void gt_ggc_mx_mem_attrs (void *); #define gt_ggc_m_17language_function(X) do { \ if (X != NULL) gt_ggc_mx_language_function (X);\ } while (0) extern void gt_ggc_mx_language_function (void *); #define gt_ggc_m_9temp_slot(X) do { \ if (X != NULL) gt_ggc_mx_temp_slot (X);\ } while (0) extern void gt_ggc_mx_temp_slot (void *); #define gt_ggc_m_15varray_head_tag(X) do { \ if (X != NULL) gt_ggc_mx_varray_head_tag (X);\ } while (0) extern void gt_ggc_mx_varray_head_tag (void *); #define gt_ggc_m_20initial_value_struct(X) do { \ if (X != NULL) gt_ggc_mx_initial_value_struct (X);\ } while (0) extern void gt_ggc_mx_initial_value_struct (void *); #define gt_ggc_m_13varasm_status(X) do { \ if (X != NULL) gt_ggc_mx_varasm_status (X);\ } while (0) extern void gt_ggc_mx_varasm_status (void *); #define gt_ggc_m_11stmt_status(X) do { \ if (X != NULL) gt_ggc_mx_stmt_status (X);\ } while (0) extern void gt_ggc_mx_stmt_status (void *); #define gt_ggc_m_9eh_status(X) do { \ if (X != NULL) gt_ggc_mx_eh_status (X);\ } while (0) extern void gt_ggc_mx_eh_status (void *); #define gt_ggc_m_8function(X) do { \ if (X != NULL) gt_ggc_mx_function (X);\ } while (0) extern void gt_ggc_mx_function (void *); #define gt_ggc_m_11expr_status(X) do { \ if (X != NULL) gt_ggc_mx_expr_status (X);\ } while (0) extern void gt_ggc_mx_expr_status (void *); #define gt_ggc_m_11emit_status(X) do { \ if (X != NULL) gt_ggc_mx_emit_status (X);\ } while (0) extern void gt_ggc_mx_emit_status (void *); #define gt_ggc_m_14sequence_stack(X) do { \ if (X != NULL) gt_ggc_mx_sequence_stack (X);\ } while (0) extern void gt_ggc_mx_sequence_stack (void *); #define gt_ggc_m_14var_refs_queue(X) do { \ if (X != NULL) gt_ggc_mx_var_refs_queue (X);\ } while (0) extern void gt_ggc_mx_var_refs_queue (void *); #define gt_ggc_m_15bitmap_head_def(X) do { \ if (X != NULL) gt_ggc_mx_bitmap_head_def (X);\ } while (0) extern void gt_ggc_mx_bitmap_head_def (void *); #define gt_ggc_m_18bitmap_element_def(X) do { \ if (X != NULL) gt_ggc_mx_bitmap_element_def (X);\ } while (0) extern void gt_ggc_mx_bitmap_element_def (void *); #define gt_ggc_m_17stack_local_entry(X) do { \ if (X != NULL) gt_ggc_mx_stack_local_entry (X);\ } while (0) extern void gt_ggc_mx_stack_local_entry (void *); #define gt_ggc_m_16machine_function(X) do { \ if (X != NULL) gt_ggc_mx_machine_function (X);\ } while (0) extern void gt_ggc_mx_machine_function (void *); #define gt_ggc_m_6answer(X) do { \ if (X != NULL) gt_ggc_mx_answer (X);\ } while (0) extern void gt_ggc_mx_answer (void *); #define gt_ggc_m_9cpp_macro(X) do { \ if (X != NULL) gt_ggc_mx_cpp_macro (X);\ } while (0) extern void gt_ggc_mx_cpp_macro (void *); #define gt_ggc_m_9cpp_token(X) do { \ if (X != NULL) gt_ggc_mx_cpp_token (X);\ } while (0) extern void gt_ggc_mx_cpp_token (void *); #define gt_ggc_m_9tree_node(X) do { \ if (X != NULL) gt_ggc_mx_tree_node (X);\ } while (0) #define gt_ggc_mx_tree_node gt_ggc_mx_lang_tree_node #define gt_ggc_m_9rtvec_def(X) do { \ if (X != NULL) gt_ggc_mx_rtvec_def (X);\ } while (0) extern void gt_ggc_mx_rtvec_def (void *); #define gt_ggc_m_7rtx_def(X) do { \ if (X != NULL) gt_ggc_mx_rtx_def (X);\ } while (0) extern void gt_ggc_mx_rtx_def (void *); #define gt_ggc_m_10location_s(X) do { \ if (X != NULL) gt_ggc_mx_location_s (X);\ } while (0) extern void gt_ggc_mx_location_s (void *); extern void gt_ggc_m_II17splay_tree_node_s (void *); extern void gt_ggc_m_SP9tree_node17splay_tree_node_s (void *); extern void gt_ggc_m_P13alias_var_def15varray_head_tag (void *); extern void gt_ggc_m_P15throw_stmt_node4htab (void *); extern void gt_ggc_m_P23constant_descriptor_rtx4htab (void *); extern void gt_ggc_m_P24constant_descriptor_tree4htab (void *); extern void gt_ggc_m_P14in_named_entry4htab (void *); extern void gt_ggc_m_P9type_hash4htab (void *); extern void gt_ggc_m_P13ehl_map_entry4htab (void *); extern void gt_ggc_m_P9tree_node4htab (void *); extern void gt_ggc_m_P9reg_attrs4htab (void *); extern void gt_ggc_m_P9mem_attrs4htab (void *); extern void gt_ggc_m_P7rtx_def4htab (void *); extern void gt_ggc_m_SP9tree_node12splay_tree_s (void *); extern void gt_ggc_m_P16var_loc_list_def4htab (void *); extern void gt_ggc_m_P10die_struct4htab (void *); extern void gt_ggc_m_P20indirect_string_node4htab (void *); extern void gt_ggc_m_P19cgraph_varpool_node4htab (void *); extern void gt_ggc_m_P11cgraph_node4htab (void *); extern void gt_ggc_m_P15alias_set_entry15varray_head_tag (void *); extern void gt_ggc_m_II12splay_tree_s (void *); extern void gt_ggc_m_P9temp_slot15varray_head_tag (void *); /* PCH type-walking procedures. */ #define gt_pch_n_11align_stack(X) do { \ if (X != NULL) gt_pch_nx_align_stack (X);\ } while (0) extern void gt_pch_nx_align_stack (void *); #define gt_pch_n_7c_scope(X) do { \ if (X != NULL) gt_pch_nx_c_scope (X);\ } while (0) extern void gt_pch_nx_c_scope (void *); #define gt_pch_n_9c_binding(X) do { \ if (X != NULL) gt_pch_nx_c_binding (X);\ } while (0) extern void gt_pch_nx_c_binding (void *); #define gt_pch_n_12aterm_list_a(X) do { \ if (X != NULL) gt_pch_nx_aterm_list_a (X);\ } while (0) extern void gt_pch_nx_aterm_list_a (void *); #define gt_pch_n_6aterm_(X) do { \ if (X != NULL) gt_pch_nx_aterm_ (X);\ } while (0) extern void gt_pch_nx_aterm_ (void *); #define gt_pch_n_15throw_stmt_node(X) do { \ if (X != NULL) gt_pch_nx_throw_stmt_node (X);\ } while (0) extern void gt_pch_nx_throw_stmt_node (void *); #define gt_pch_n_15edge_prediction(X) do { \ if (X != NULL) gt_pch_nx_edge_prediction (X);\ } while (0) extern void gt_pch_nx_edge_prediction (void *); #define gt_pch_n_19v_must_def_optype_d(X) do { \ if (X != NULL) gt_pch_nx_v_must_def_optype_d (X);\ } while (0) extern void gt_pch_nx_v_must_def_optype_d (void *); #define gt_pch_n_13vuse_optype_d(X) do { \ if (X != NULL) gt_pch_nx_vuse_optype_d (X);\ } while (0) extern void gt_pch_nx_vuse_optype_d (void *); #define gt_pch_n_18v_may_def_optype_d(X) do { \ if (X != NULL) gt_pch_nx_v_may_def_optype_d (X);\ } while (0) extern void gt_pch_nx_v_may_def_optype_d (void *); #define gt_pch_n_12use_optype_d(X) do { \ if (X != NULL) gt_pch_nx_use_optype_d (X);\ } while (0) extern void gt_pch_nx_use_optype_d (void *); #define gt_pch_n_12def_optype_d(X) do { \ if (X != NULL) gt_pch_nx_def_optype_d (X);\ } while (0) extern void gt_pch_nx_def_optype_d (void *); #define gt_pch_n_10dataflow_d(X) do { \ if (X != NULL) gt_pch_nx_dataflow_d (X);\ } while (0) extern void gt_pch_nx_dataflow_d (void *); #define gt_pch_n_23constant_descriptor_rtx(X) do { \ if (X != NULL) gt_pch_nx_constant_descriptor_rtx (X);\ } while (0) extern void gt_pch_nx_constant_descriptor_rtx (void *); #define gt_pch_n_24constant_descriptor_tree(X) do { \ if (X != NULL) gt_pch_nx_constant_descriptor_tree (X);\ } while (0) extern void gt_pch_nx_constant_descriptor_tree (void *); #define gt_pch_n_14in_named_entry(X) do { \ if (X != NULL) gt_pch_nx_in_named_entry (X);\ } while (0) extern void gt_pch_nx_in_named_entry (void *); #define gt_pch_n_17rtx_constant_pool(X) do { \ if (X != NULL) gt_pch_nx_rtx_constant_pool (X);\ } while (0) extern void gt_pch_nx_rtx_constant_pool (void *); #define gt_pch_n_9type_hash(X) do { \ if (X != NULL) gt_pch_nx_type_hash (X);\ } while (0) extern void gt_pch_nx_type_hash (void *); #define gt_pch_n_16string_pool_data(X) do { \ if (X != NULL) gt_pch_nx_string_pool_data (X);\ } while (0) extern void gt_pch_nx_string_pool_data (void *); #define gt_pch_n_10goto_fixup(X) do { \ if (X != NULL) gt_pch_nx_goto_fixup (X);\ } while (0) extern void gt_pch_nx_goto_fixup (void *); #define gt_pch_n_11label_chain(X) do { \ if (X != NULL) gt_pch_nx_label_chain (X);\ } while (0) extern void gt_pch_nx_label_chain (void *); #define gt_pch_n_7nesting(X) do { \ if (X != NULL) gt_pch_nx_nesting (X);\ } while (0) extern void gt_pch_nx_nesting (void *); #define gt_pch_n_9case_node(X) do { \ if (X != NULL) gt_pch_nx_case_node (X);\ } while (0) extern void gt_pch_nx_case_node (void *); #define gt_pch_n_9eh_region(X) do { \ if (X != NULL) gt_pch_nx_eh_region (X);\ } while (0) extern void gt_pch_nx_eh_region (void *); #define gt_pch_n_13ehl_map_entry(X) do { \ if (X != NULL) gt_pch_nx_ehl_map_entry (X);\ } while (0) extern void gt_pch_nx_ehl_map_entry (void *); #define gt_pch_n_16var_loc_list_def(X) do { \ if (X != NULL) gt_pch_nx_var_loc_list_def (X);\ } while (0) extern void gt_pch_nx_var_loc_list_def (void *); #define gt_pch_n_12var_loc_node(X) do { \ if (X != NULL) gt_pch_nx_var_loc_node (X);\ } while (0) extern void gt_pch_nx_var_loc_node (void *); #define gt_pch_n_16limbo_die_struct(X) do { \ if (X != NULL) gt_pch_nx_limbo_die_struct (X);\ } while (0) extern void gt_pch_nx_limbo_die_struct (void *); #define gt_pch_n_16dw_ranges_struct(X) do { \ if (X != NULL) gt_pch_nx_dw_ranges_struct (X);\ } while (0) extern void gt_pch_nx_dw_ranges_struct (void *); #define gt_pch_n_14pubname_struct(X) do { \ if (X != NULL) gt_pch_nx_pubname_struct (X);\ } while (0) extern void gt_pch_nx_pubname_struct (void *); #define gt_pch_n_28dw_separate_line_info_struct(X) do { \ if (X != NULL) gt_pch_nx_dw_separate_line_info_struct (X);\ } while (0) extern void gt_pch_nx_dw_separate_line_info_struct (void *); #define gt_pch_n_19dw_line_info_struct(X) do { \ if (X != NULL) gt_pch_nx_dw_line_info_struct (X);\ } while (0) extern void gt_pch_nx_dw_line_info_struct (void *); #define gt_pch_n_14dw_attr_struct(X) do { \ if (X != NULL) gt_pch_nx_dw_attr_struct (X);\ } while (0) extern void gt_pch_nx_dw_attr_struct (void *); #define gt_pch_n_18dw_loc_list_struct(X) do { \ if (X != NULL) gt_pch_nx_dw_loc_list_struct (X);\ } while (0) extern void gt_pch_nx_dw_loc_list_struct (void *); #define gt_pch_n_15queued_reg_save(X) do { \ if (X != NULL) gt_pch_nx_queued_reg_save (X);\ } while (0) extern void gt_pch_nx_queued_reg_save (void *); #define gt_pch_n_20indirect_string_node(X) do { \ if (X != NULL) gt_pch_nx_indirect_string_node (X);\ } while (0) extern void gt_pch_nx_indirect_string_node (void *); #define gt_pch_n_19dw_loc_descr_struct(X) do { \ if (X != NULL) gt_pch_nx_dw_loc_descr_struct (X);\ } while (0) extern void gt_pch_nx_dw_loc_descr_struct (void *); #define gt_pch_n_13dw_fde_struct(X) do { \ if (X != NULL) gt_pch_nx_dw_fde_struct (X);\ } while (0) extern void gt_pch_nx_dw_fde_struct (void *); #define gt_pch_n_13dw_cfi_struct(X) do { \ if (X != NULL) gt_pch_nx_dw_cfi_struct (X);\ } while (0) extern void gt_pch_nx_dw_cfi_struct (void *); #define gt_pch_n_8typeinfo(X) do { \ if (X != NULL) gt_pch_nx_typeinfo (X);\ } while (0) extern void gt_pch_nx_typeinfo (void *); #define gt_pch_n_15alias_set_entry(X) do { \ if (X != NULL) gt_pch_nx_alias_set_entry (X);\ } while (0) extern void gt_pch_nx_alias_set_entry (void *); #define gt_pch_n_8c_switch(X) do { \ if (X != NULL) gt_pch_nx_c_switch (X);\ } while (0) extern void gt_pch_nx_c_switch (void *); #define gt_pch_n_18sorted_fields_type(X) do { \ if (X != NULL) gt_pch_nx_sorted_fields_type (X);\ } while (0) extern void gt_pch_nx_sorted_fields_type (void *); #define gt_pch_n_19cgraph_varpool_node(X) do { \ if (X != NULL) gt_pch_nx_cgraph_varpool_node (X);\ } while (0) extern void gt_pch_nx_cgraph_varpool_node (void *); #define gt_pch_n_11cgraph_edge(X) do { \ if (X != NULL) gt_pch_nx_cgraph_edge (X);\ } while (0) extern void gt_pch_nx_cgraph_edge (void *); #define gt_pch_n_11cgraph_node(X) do { \ if (X != NULL) gt_pch_nx_cgraph_node (X);\ } while (0) extern void gt_pch_nx_cgraph_node (void *); #define gt_pch_n_8bb_ann_d(X) do { \ if (X != NULL) gt_pch_nx_bb_ann_d (X);\ } while (0) extern void gt_pch_nx_bb_ann_d (void *); #define gt_pch_n_17reorder_block_def(X) do { \ if (X != NULL) gt_pch_nx_reorder_block_def (X);\ } while (0) extern void gt_pch_nx_reorder_block_def (void *); #define gt_pch_n_7et_node(X) do { \ if (X != NULL) gt_pch_nx_et_node (X);\ } while (0) extern void gt_pch_nx_et_node (void *); #define gt_pch_n_4loop(X) do { \ if (X != NULL) gt_pch_nx_loop (X);\ } while (0) extern void gt_pch_nx_loop (void *); #define gt_pch_n_12elt_loc_list(X) do { \ if (X != NULL) gt_pch_nx_elt_loc_list (X);\ } while (0) extern void gt_pch_nx_elt_loc_list (void *); #define gt_pch_n_17cselib_val_struct(X) do { \ if (X != NULL) gt_pch_nx_cselib_val_struct (X);\ } while (0) extern void gt_pch_nx_cselib_val_struct (void *); #define gt_pch_n_8elt_list(X) do { \ if (X != NULL) gt_pch_nx_elt_list (X);\ } while (0) extern void gt_pch_nx_elt_list (void *); #define gt_pch_n_12reg_info_def(X) do { \ if (X != NULL) gt_pch_nx_reg_info_def (X);\ } while (0) extern void gt_pch_nx_reg_info_def (void *); #define gt_pch_n_14lang_tree_node(X) do { \ if (X != NULL) gt_pch_nx_lang_tree_node (X);\ } while (0) extern void gt_pch_nx_lang_tree_node (void *); #define gt_pch_n_9value_set(X) do { \ if (X != NULL) gt_pch_nx_value_set (X);\ } while (0) extern void gt_pch_nx_value_set (void *); #define gt_pch_n_24tree_statement_list_node(X) do { \ if (X != NULL) gt_pch_nx_tree_statement_list_node (X);\ } while (0) extern void gt_pch_nx_tree_statement_list_node (void *); #define gt_pch_n_9lang_decl(X) do { \ if (X != NULL) gt_pch_nx_lang_decl (X);\ } while (0) extern void gt_pch_nx_lang_decl (void *); #define gt_pch_n_13alias_var_def(X) do { \ if (X != NULL) gt_pch_nx_alias_var_def (X);\ } while (0) extern void gt_pch_nx_alias_var_def (void *); #define gt_pch_n_9lang_type(X) do { \ if (X != NULL) gt_pch_nx_lang_type (X);\ } while (0) extern void gt_pch_nx_lang_type (void *); #define gt_pch_n_10die_struct(X) do { \ if (X != NULL) gt_pch_nx_die_struct (X);\ } while (0) extern void gt_pch_nx_die_struct (void *); #define gt_pch_n_8edge_def(X) do { \ if (X != NULL) gt_pch_nx_edge_def (X);\ } while (0) extern void gt_pch_nx_edge_def (void *); #define gt_pch_n_12ptr_info_def(X) do { \ if (X != NULL) gt_pch_nx_ptr_info_def (X);\ } while (0) extern void gt_pch_nx_ptr_info_def (void *); #define gt_pch_n_10real_value(X) do { \ if (X != NULL) gt_pch_nx_real_value (X);\ } while (0) extern void gt_pch_nx_real_value (void *); #define gt_pch_n_10tree_ann_d(X) do { \ if (X != NULL) gt_pch_nx_tree_ann_d (X);\ } while (0) extern void gt_pch_nx_tree_ann_d (void *); #define gt_pch_n_13convert_optab(X) do { \ if (X != NULL) gt_pch_nx_convert_optab (X);\ } while (0) extern void gt_pch_nx_convert_optab (void *); #define gt_pch_n_5optab(X) do { \ if (X != NULL) gt_pch_nx_optab (X);\ } while (0) extern void gt_pch_nx_optab (void *); #define gt_pch_n_15basic_block_def(X) do { \ if (X != NULL) gt_pch_nx_basic_block_def (X);\ } while (0) extern void gt_pch_nx_basic_block_def (void *); #define gt_pch_n_9reg_attrs(X) do { \ if (X != NULL) gt_pch_nx_reg_attrs (X);\ } while (0) extern void gt_pch_nx_reg_attrs (void *); #define gt_pch_n_9mem_attrs(X) do { \ if (X != NULL) gt_pch_nx_mem_attrs (X);\ } while (0) extern void gt_pch_nx_mem_attrs (void *); #define gt_pch_n_17language_function(X) do { \ if (X != NULL) gt_pch_nx_language_function (X);\ } while (0) extern void gt_pch_nx_language_function (void *); #define gt_pch_n_9temp_slot(X) do { \ if (X != NULL) gt_pch_nx_temp_slot (X);\ } while (0) extern void gt_pch_nx_temp_slot (void *); #define gt_pch_n_15varray_head_tag(X) do { \ if (X != NULL) gt_pch_nx_varray_head_tag (X);\ } while (0) extern void gt_pch_nx_varray_head_tag (void *); #define gt_pch_n_20initial_value_struct(X) do { \ if (X != NULL) gt_pch_nx_initial_value_struct (X);\ } while (0) extern void gt_pch_nx_initial_value_struct (void *); #define gt_pch_n_13varasm_status(X) do { \ if (X != NULL) gt_pch_nx_varasm_status (X);\ } while (0) extern void gt_pch_nx_varasm_status (void *); #define gt_pch_n_11stmt_status(X) do { \ if (X != NULL) gt_pch_nx_stmt_status (X);\ } while (0) extern void gt_pch_nx_stmt_status (void *); #define gt_pch_n_9eh_status(X) do { \ if (X != NULL) gt_pch_nx_eh_status (X);\ } while (0) extern void gt_pch_nx_eh_status (void *); #define gt_pch_n_8function(X) do { \ if (X != NULL) gt_pch_nx_function (X);\ } while (0) extern void gt_pch_nx_function (void *); #define gt_pch_n_11expr_status(X) do { \ if (X != NULL) gt_pch_nx_expr_status (X);\ } while (0) extern void gt_pch_nx_expr_status (void *); #define gt_pch_n_11emit_status(X) do { \ if (X != NULL) gt_pch_nx_emit_status (X);\ } while (0) extern void gt_pch_nx_emit_status (void *); #define gt_pch_n_14sequence_stack(X) do { \ if (X != NULL) gt_pch_nx_sequence_stack (X);\ } while (0) extern void gt_pch_nx_sequence_stack (void *); #define gt_pch_n_14var_refs_queue(X) do { \ if (X != NULL) gt_pch_nx_var_refs_queue (X);\ } while (0) extern void gt_pch_nx_var_refs_queue (void *); #define gt_pch_n_15bitmap_head_def(X) do { \ if (X != NULL) gt_pch_nx_bitmap_head_def (X);\ } while (0) extern void gt_pch_nx_bitmap_head_def (void *); #define gt_pch_n_18bitmap_element_def(X) do { \ if (X != NULL) gt_pch_nx_bitmap_element_def (X);\ } while (0) extern void gt_pch_nx_bitmap_element_def (void *); #define gt_pch_n_17stack_local_entry(X) do { \ if (X != NULL) gt_pch_nx_stack_local_entry (X);\ } while (0) extern void gt_pch_nx_stack_local_entry (void *); #define gt_pch_n_16machine_function(X) do { \ if (X != NULL) gt_pch_nx_machine_function (X);\ } while (0) extern void gt_pch_nx_machine_function (void *); #define gt_pch_n_6answer(X) do { \ if (X != NULL) gt_pch_nx_answer (X);\ } while (0) extern void gt_pch_nx_answer (void *); #define gt_pch_n_9cpp_macro(X) do { \ if (X != NULL) gt_pch_nx_cpp_macro (X);\ } while (0) extern void gt_pch_nx_cpp_macro (void *); #define gt_pch_n_9cpp_token(X) do { \ if (X != NULL) gt_pch_nx_cpp_token (X);\ } while (0) extern void gt_pch_nx_cpp_token (void *); #define gt_pch_n_9tree_node(X) do { \ if (X != NULL) gt_pch_nx_tree_node (X);\ } while (0) #define gt_pch_nx_tree_node gt_pch_nx_lang_tree_node #define gt_pch_n_9rtvec_def(X) do { \ if (X != NULL) gt_pch_nx_rtvec_def (X);\ } while (0) extern void gt_pch_nx_rtvec_def (void *); #define gt_pch_n_7rtx_def(X) do { \ if (X != NULL) gt_pch_nx_rtx_def (X);\ } while (0) extern void gt_pch_nx_rtx_def (void *); #define gt_pch_n_10location_s(X) do { \ if (X != NULL) gt_pch_nx_location_s (X);\ } while (0) extern void gt_pch_nx_location_s (void *); extern void gt_pch_n_II17splay_tree_node_s (void *); extern void gt_pch_n_SP9tree_node17splay_tree_node_s (void *); extern void gt_pch_n_P13alias_var_def15varray_head_tag (void *); extern void gt_pch_n_P15throw_stmt_node4htab (void *); extern void gt_pch_n_P23constant_descriptor_rtx4htab (void *); extern void gt_pch_n_P24constant_descriptor_tree4htab (void *); extern void gt_pch_n_P14in_named_entry4htab (void *); extern void gt_pch_n_P9type_hash4htab (void *); extern void gt_pch_n_P13ehl_map_entry4htab (void *); extern void gt_pch_n_P9tree_node4htab (void *); extern void gt_pch_n_P9reg_attrs4htab (void *); extern void gt_pch_n_P9mem_attrs4htab (void *); extern void gt_pch_n_P7rtx_def4htab (void *); extern void gt_pch_n_SP9tree_node12splay_tree_s (void *); extern void gt_pch_n_P16var_loc_list_def4htab (void *); extern void gt_pch_n_P10die_struct4htab (void *); extern void gt_pch_n_P20indirect_string_node4htab (void *); extern void gt_pch_n_P19cgraph_varpool_node4htab (void *); extern void gt_pch_n_P11cgraph_node4htab (void *); extern void gt_pch_n_P15alias_set_entry15varray_head_tag (void *); extern void gt_pch_n_II12splay_tree_s (void *); extern void gt_pch_n_P9temp_slot15varray_head_tag (void *); /* Local pointer-walking routines. */ extern void gt_pch_p_11align_stack (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_7c_scope (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_9c_binding (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_15throw_stmt_node (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_15edge_prediction (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_19v_must_def_optype_d (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_13vuse_optype_d (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_18v_may_def_optype_d (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_12use_optype_d (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_12def_optype_d (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_10dataflow_d (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_23constant_descriptor_rtx (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_24constant_descriptor_tree (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_14in_named_entry (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_17rtx_constant_pool (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_9type_hash (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_16string_pool_data (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_10goto_fixup (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_11label_chain (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_7nesting (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_9case_node (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_9eh_region (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_13ehl_map_entry (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_16var_loc_list_def (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_12var_loc_node (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_16limbo_die_struct (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_16dw_ranges_struct (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_14pubname_struct (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_28dw_separate_line_info_struct (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_19dw_line_info_struct (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_14dw_attr_struct (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_18dw_loc_list_struct (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_15queued_reg_save (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_20indirect_string_node (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_19dw_loc_descr_struct (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_13dw_fde_struct (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_13dw_cfi_struct (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_8typeinfo (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_15alias_set_entry (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_18sorted_fields_type (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_19cgraph_varpool_node (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_11cgraph_edge (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_11cgraph_node (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_8bb_ann_d (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_12elt_loc_list (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_17cselib_val_struct (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_8elt_list (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_14lang_tree_node (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_24tree_statement_list_node (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_9lang_decl (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_13alias_var_def (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_9lang_type (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_10die_struct (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_8edge_def (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_12ptr_info_def (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_10real_value (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_10tree_ann_d (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_13convert_optab (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_5optab (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_15basic_block_def (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_9reg_attrs (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_9mem_attrs (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_17language_function (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_9temp_slot (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_15varray_head_tag (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_20initial_value_struct (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_13varasm_status (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_11stmt_status (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_9eh_status (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_8function (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_11expr_status (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_11emit_status (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_14sequence_stack (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_14var_refs_queue (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_15bitmap_head_def (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_18bitmap_element_def (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_17stack_local_entry (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_16machine_function (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_6answer (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_9cpp_macro (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_9cpp_token (void *, void *, gt_pointer_operator, void *); #define gt_pch_p_9tree_node gt_pch_p_14lang_tree_node extern void gt_pch_p_9rtvec_def (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_7rtx_def (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_10location_s (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_II17splay_tree_node_s (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_SP9tree_node17splay_tree_node_s (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_P13alias_var_def15varray_head_tag (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_P15throw_stmt_node4htab (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_P23constant_descriptor_rtx4htab (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_P24constant_descriptor_tree4htab (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_P14in_named_entry4htab (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_P9type_hash4htab (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_P13ehl_map_entry4htab (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_P9tree_node4htab (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_P9reg_attrs4htab (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_P9mem_attrs4htab (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_P7rtx_def4htab (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_SP9tree_node12splay_tree_s (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_P16var_loc_list_def4htab (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_P10die_struct4htab (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_P20indirect_string_node4htab (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_P19cgraph_varpool_node4htab (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_P11cgraph_node4htab (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_P15alias_set_entry15varray_head_tag (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_II12splay_tree_s (void *, void *, gt_pointer_operator, void *); extern void gt_pch_p_P9temp_slot15varray_head_tag (void *, void *, gt_pointer_operator, void *); /* One of these applies its third parameter (with cookie in the fourth parameter) to each pointer in the object pointed to by the first parameter, using the second parameter. */ typedef void (*gt_note_pointers) (void *, void *, gt_pointer_operator, void *); /* One of these is called before objects are re-ordered in memory. The first parameter is the original object, the second is the subobject that has had its pointers reordered, the third parameter can compute the new values of a pointer when given the cookie in the fourth parameter. */ typedef void (*gt_handle_reorder) (void *, void *, gt_pointer_operator, void *); /* Used by the gt_pch_n_* routines. Register an object in the hash table. */ extern int gt_pch_note_object (void *, void *, gt_note_pointers); /* Used by the gt_pch_n_* routines. Register that an object has a reorder function. */ extern void gt_pch_note_reorder (void *, void *, gt_handle_reorder); /* Mark the object in the first parameter and anything it points to. */ typedef void (*gt_pointer_walker) (void *); /* Structures for the easy way to mark roots. In an array, terminated by having base == NULL. */ struct ggc_root_tab { void *base; size_t nelt; size_t stride; gt_pointer_walker cb; gt_pointer_walker pchw; }; #define LAST_GGC_ROOT_TAB { NULL, 0, 0, NULL, NULL } /* Pointers to arrays of ggc_root_tab, terminated by NULL. */ extern const struct ggc_root_tab * const gt_ggc_rtab[]; extern const struct ggc_root_tab * const gt_ggc_deletable_rtab[]; extern const struct ggc_root_tab * const gt_pch_cache_rtab[]; extern const struct ggc_root_tab * const gt_pch_scalar_rtab[]; /* Structure for hash table cache marking. */ struct htab; struct ggc_cache_tab { struct htab * *base; size_t nelt; size_t stride; gt_pointer_walker cb; gt_pointer_walker pchw; int (*marked_p) (const void *); }; #define LAST_GGC_CACHE_TAB { NULL, 0, 0, NULL, NULL, NULL } /* Pointers to arrays of ggc_cache_tab, terminated by NULL. */ extern const struct ggc_cache_tab * const gt_ggc_cache_rtab[]; /* If EXPR is not NULL and previously unmarked, mark it and evaluate to true. Otherwise evaluate to false. */ #define ggc_test_and_set_mark(EXPR) \ ((EXPR) != NULL && ((void *) (EXPR)) != (void *) 1 && ! ggc_set_mark (EXPR)) #define ggc_mark(EXPR) \ do { \ const void *const a__ = (EXPR); \ if (a__ != NULL && a__ != (void *) 1) \ ggc_set_mark (a__); \ } while (0) /* Actually set the mark on a particular region of memory, but don't follow pointers. This function is called by ggc_mark_*. It returns zero if the object was not previously marked; nonzero if the object was already marked, or if, for any other reason, pointers in this data structure should not be traversed. */ extern int ggc_set_mark (const void *); /* Return 1 if P has been marked, zero otherwise. P must have been allocated by the GC allocator; it mustn't point to static objects, stack variables, or memory allocated with malloc. */ extern int ggc_marked_p (const void *); /* Mark the entries in the string pool. */ extern void ggc_mark_stringpool (void); /* Call ggc_set_mark on all the roots. */ extern void ggc_mark_roots (void); /* Save and restore the string pool entries for PCH. */ extern void gt_pch_save_stringpool (void); extern void gt_pch_fixup_stringpool (void); extern void gt_pch_restore_stringpool (void); /* PCH and GGC handling for strings, mostly trivial. */ extern void gt_pch_p_S (void *, void *, gt_pointer_operator, void *); extern void gt_pch_n_S (const void *); extern void gt_ggc_m_S (void *); /* Initialize the string pool. */ extern void init_stringpool (void); /* A GC implementation must provide these functions. They are internal to the GC system. */ /* Forward declare the zone structure. Only ggc_zone implements this. */ struct alloc_zone; /* Initialize the garbage collector. */ extern void init_ggc (void); /* Start a new GGC zone. */ extern struct alloc_zone *new_ggc_zone (const char *); /* Free a complete GGC zone, destroying everything in it. */ extern void destroy_ggc_zone (struct alloc_zone *); /* Start a new GGC context. Memory allocated in previous contexts will not be collected while the new context is active. */ extern void ggc_push_context (void); /* Finish a GC context. Any uncollected memory in the new context will be merged with the old context. */ extern void ggc_pop_context (void); struct ggc_pch_data; /* Return a new ggc_pch_data structure. */ extern struct ggc_pch_data *init_ggc_pch (void); /* The second parameter and third parameters give the address and size of an object. Update the ggc_pch_data structure with as much of that information as is necessary. The last argument should be true if the object is a string. */ extern void ggc_pch_count_object (struct ggc_pch_data *, void *, size_t, bool); /* Return the total size of the data to be written to hold all the objects previously passed to ggc_pch_count_object. */ extern size_t ggc_pch_total_size (struct ggc_pch_data *); /* The objects, when read, will most likely be at the address in the second parameter. */ extern void ggc_pch_this_base (struct ggc_pch_data *, void *); /* Assuming that the objects really do end up at the address passed to ggc_pch_this_base, return the address of this object. The last argument should be true if the object is a string. */ extern char *ggc_pch_alloc_object (struct ggc_pch_data *, void *, size_t, bool); /* Write out any initial information required. */ extern void ggc_pch_prepare_write (struct ggc_pch_data *, FILE *); /* Write out this object, including any padding. The last argument should be true if the object is a string. */ extern void ggc_pch_write_object (struct ggc_pch_data *, FILE *, void *, void *, size_t, bool); /* All objects have been written, write out any final information required. */ extern void ggc_pch_finish (struct ggc_pch_data *, FILE *); /* A PCH file has just been read in at the address specified second parameter. Set up the GC implementation for the new objects. */ extern void ggc_pch_read (FILE *, void *); /* Allocation. */ /* For single pass garbage. */ extern struct alloc_zone *garbage_zone; /* For regular rtl allocations. */ extern struct alloc_zone *rtl_zone; /* For regular tree allocations. */ extern struct alloc_zone *tree_zone; /* The internal primitive. */ extern void *ggc_alloc_stat (size_t MEM_STAT_DECL); #define ggc_alloc(s) ggc_alloc_stat (s MEM_STAT_INFO) /* Allocate an object into the specified allocation zone. */ extern void *ggc_alloc_zone_stat (size_t, struct alloc_zone * MEM_STAT_DECL); #define ggc_alloc_zone(s,z) ggc_alloc_zone_stat (s,z MEM_STAT_INFO) /* Allocate an object of the specified type and size. */ extern void *ggc_alloc_typed_stat (enum gt_types_enum, size_t MEM_STAT_DECL); #define ggc_alloc_typed(s,z) ggc_alloc_typed_stat (s,z MEM_STAT_INFO) /* Like ggc_alloc, but allocates cleared memory. */ extern void *ggc_alloc_cleared_stat (size_t MEM_STAT_DECL); #define ggc_alloc_cleared(s) ggc_alloc_cleared_stat (s MEM_STAT_INFO) /* Like ggc_alloc_zone, but allocates cleared memory. */ extern void *ggc_alloc_cleared_zone (size_t, struct alloc_zone * MEM_STAT_DECL); #define ggc_alloc_cleared_zone(s,z) ggc_alloc_cleared_stat (s,z MEM_STAT_INFO) /* Resize a block. */ extern void *ggc_realloc_stat (void *, size_t MEM_STAT_DECL); #define ggc_realloc(s,z) ggc_realloc_stat (s,z MEM_STAT_INFO) /* Like ggc_alloc_cleared, but performs a multiplication. */ extern void *ggc_calloc (size_t, size_t); /* Free a block. To be used when known for certain it's not reachable. */ extern void ggc_free (void *); extern void ggc_record_overhead (size_t, size_t MEM_STAT_DECL); extern void dump_ggc_loc_statistics (void); #define ggc_alloc_rtx(CODE) \ ((rtx) ggc_alloc_typed (gt_ggc_e_7rtx_def, RTX_SIZE (CODE))) #define ggc_alloc_rtvec(NELT) \ ((rtvec) ggc_alloc_typed (gt_ggc_e_9rtvec_def, sizeof (struct rtvec_def) \ + ((NELT) - 1) * sizeof (rtx))) #define ggc_alloc_tree(LENGTH) ((tree) ggc_alloc_zone (LENGTH, tree_zone)) #define htab_create_ggc(SIZE, HASH, EQ, DEL) \ htab_create_alloc (SIZE, HASH, EQ, DEL, ggc_calloc, NULL) #define splay_tree_new_ggc(COMPARE) \ splay_tree_new_with_allocator (COMPARE, NULL, NULL, \ &ggc_splay_alloc, &ggc_splay_dont_free, \ NULL) extern void *ggc_splay_alloc (int, void *); extern void ggc_splay_dont_free (void *, void *); /* Allocate a gc-able string, and fill it with LENGTH bytes from CONTENTS. If LENGTH is -1, then CONTENTS is assumed to be a null-terminated string and the memory sized accordingly. */ extern const char *ggc_alloc_string (const char *contents, int length); /* Make a copy of S, in GC-able memory. */ #define ggc_strdup(S) ggc_alloc_string((S), -1) /* Invoke the collector. Garbage collection occurs only when this function is called, not during allocations. */ extern void ggc_collect (void); /* Return the number of bytes allocated at the indicated address. */ extern size_t ggc_get_size (const void *); /* Write out all GCed objects to F. */ extern void gt_pch_save (FILE *f); /* Read objects previously saved with gt_pch_save from F. */ extern void gt_pch_restore (FILE *f); /* Statistics. */ /* This structure contains the statistics common to all collectors. Particular collectors can extend this structure. */ typedef struct ggc_statistics { /* At present, we don't really gather any interesting statistics. */ int unused; } ggc_statistics; /* Used by the various collectors to gather and print statistics that do not depend on the collector in use. */ extern void ggc_print_common_statistics (FILE *, ggc_statistics *); /* Print allocation statistics. */ extern void ggc_print_statistics (void); extern void stringpool_statistics (void); /* Heuristics. */ extern int ggc_min_expand_heuristic (void); extern int ggc_min_heapsize_heuristic (void); extern void init_ggc_heuristics (void); #endif /* Callgraph handling code. Copyright (C) 2003, 2004 Free Software Foundation, Inc. Contributed by Jan Hubicka This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_CGRAPH_H #define GCC_CGRAPH_H /* Information about the function collected locally. Available after function is analyzed. */ struct cgraph_local_info GTY(()) { /* Size of the function before inlining. */ int self_insns; /* Set when function function is visible in current compilation unit only and it's address is never taken. */ bool local; /* Set once it has been finalized so we consider it to be output. */ bool finalized; /* False when there something makes inlining impossible (such as va_arg). */ bool inlinable; /* True when function should be inlined independently on it's size. */ bool disregard_inline_limits; /* True when the function has been originally extern inline, but it is redefined now. */ bool redefined_extern_inline; }; /* Information about the function that needs to be computed globally once compilation is finished. Available only with -funit-at-time. */ struct cgraph_global_info GTY(()) { /* For inline clones this points to the function they will be inlined into. */ struct cgraph_node *inlined_to; /* Estimated size of the function after inlining. */ int insns; /* Set iff the function has been inlined at least once. */ bool inlined; }; /* Information about the function that is propagated by the RTL backend. Available only for functions that has been already assembled. */ struct cgraph_rtl_info GTY(()) { int preferred_incoming_stack_boundary; bool const_function; bool pure_function; }; /* The cgraph data structure. Each function decl has assigned cgraph_node listing callees and callers. */ struct cgraph_node GTY((chain_next ("%h.next"), chain_prev ("%h.previous"))) { tree decl; struct cgraph_edge *callees; struct cgraph_edge *callers; struct cgraph_node *next; struct cgraph_node *previous; /* For nested functions points to function the node is nested in. */ struct cgraph_node *origin; /* Points to first nested function, if any. */ struct cgraph_node *nested; /* Pointer to the next function with same origin, if any. */ struct cgraph_node *next_nested; /* Pointer to the next function in cgraph_nodes_queue. */ struct cgraph_node *next_needed; /* Pointer to the next clone. */ struct cgraph_node *next_clone; PTR GTY ((skip)) aux; struct cgraph_local_info local; struct cgraph_global_info global; struct cgraph_rtl_info rtl; /* Unique id of the node. */ int uid; /* Set when function must be output - it is externally visible or it's address is taken. */ bool needed; /* Set when function is reachable by call from other function that is either reachable or needed. */ bool reachable; /* Set once the function has been instantiated and its callee lists created. */ bool analyzed; /* Set when function is scheduled to be assembled. */ bool output; }; struct cgraph_edge GTY((chain_next ("%h.next_caller"))) { struct cgraph_node *caller; struct cgraph_node *callee; struct cgraph_edge *next_caller; struct cgraph_edge *next_callee; tree call_expr; PTR GTY ((skip (""))) aux; /* When NULL, inline this call. When non-NULL, points to the explanation why function was not inlined. */ const char *inline_failed; }; /* The cgraph_varpool data structure. Each static variable decl has assigned cgraph_varpool_node. */ struct cgraph_varpool_node GTY(()) { tree decl; /* Pointer to the next function in cgraph_varpool_nodes_queue. */ struct cgraph_varpool_node *next_needed; /* Set when function must be output - it is externally visible or it's address is taken. */ bool needed; /* Set once it has been finalized so we consider it to be output. */ bool finalized; /* Set when function is scheduled to be assembled. */ bool output; }; extern GTY(()) struct cgraph_node *cgraph_nodes; extern GTY(()) int cgraph_n_nodes; extern GTY(()) int cgraph_max_uid; extern bool cgraph_global_info_ready; extern GTY(()) struct cgraph_node *cgraph_nodes_queue; extern FILE *cgraph_dump_file; extern GTY(()) int cgraph_varpool_n_nodes; extern GTY(()) struct cgraph_varpool_node *cgraph_varpool_nodes_queue; /* In cgraph.c */ void dump_cgraph (FILE *); void dump_cgraph_node (FILE *, struct cgraph_node *); void cgraph_remove_edge (struct cgraph_edge *); void cgraph_remove_node (struct cgraph_node *); struct cgraph_edge *cgraph_create_edge (struct cgraph_node *, struct cgraph_node *, tree); struct cgraph_node *cgraph_node (tree decl); struct cgraph_edge *cgraph_edge (struct cgraph_node *, tree call_expr); bool cgraph_calls_p (tree, tree); struct cgraph_local_info *cgraph_local_info (tree); struct cgraph_global_info *cgraph_global_info (tree); struct cgraph_rtl_info *cgraph_rtl_info (tree); const char * cgraph_node_name (struct cgraph_node *); struct cgraph_edge * cgraph_clone_edge (struct cgraph_edge *, struct cgraph_node *, tree); struct cgraph_node * cgraph_clone_node (struct cgraph_node *); struct cgraph_varpool_node *cgraph_varpool_node (tree decl); void cgraph_varpool_mark_needed_node (struct cgraph_varpool_node *); void cgraph_varpool_finalize_decl (tree); bool cgraph_varpool_assemble_pending_decls (void); void cgraph_redirect_edge_callee (struct cgraph_edge *, struct cgraph_node *); bool cgraph_function_possibly_inlined_p (tree); /* In cgraphunit.c */ bool cgraph_assemble_pending_functions (void); void cgraph_finalize_function (tree, bool); void cgraph_finalize_compilation_unit (void); void cgraph_create_edges (struct cgraph_node *, tree); void cgraph_optimize (void); void cgraph_mark_needed_node (struct cgraph_node *); void cgraph_mark_reachable_node (struct cgraph_node *); bool cgraph_inline_p (struct cgraph_edge *, const char **reason); bool cgraph_preserve_function_body_p (tree); void verify_cgraph (void); void verify_cgraph_node (struct cgraph_node *); void cgraph_mark_inline_edge (struct cgraph_edge *e); void cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate); void cgraph_build_static_cdtor (char which, tree body); #endif /* GCC_CGRAPH_H */ /* Internal function decls */ /* Helpers. */ static tree mf_build_string (const char *string); static tree mf_varname_tree (tree); static tree mf_file_function_line_tree (location_t); /* Indirection-related instrumentation. */ static void mf_decl_cache_locals (void); static void mf_decl_clear_locals (void); static void mf_xform_derefs (void); static void execute_mudflap_function_ops (void); /* Addressable variables instrumentation. */ static void mf_xform_decls (tree, tree); static tree mx_xfn_xform_decls (tree *, int *, void *); static void mx_register_decls (tree, tree *); static void execute_mudflap_function_decls (void); /* ------------------------------------------------------------------------ */ /* Some generally helpful functions for mudflap instrumentation. */ /* Build a reference to a literal string. */ static tree mf_build_string (const char *string) { size_t len = strlen (string); tree result = mf_mark (build_string (len + 1, string)); TREE_TYPE (result) = build_array_type (char_type_node, build_index_type (build_int_2 (len, 0))); TREE_CONSTANT (result) = 1; TREE_INVARIANT (result) = 1; TREE_READONLY (result) = 1; TREE_STATIC (result) = 1; result = build1 (ADDR_EXPR, build_pointer_type (char_type_node), result); return mf_mark (result); } /* Create a properly typed STRING_CST node that describes the given declaration. It will be used as an argument for __mf_register(). Try to construct a helpful string, including file/function/variable name. */ static tree mf_varname_tree (tree decl) { static pretty_printer buf_rec; static int initialized = 0; pretty_printer *buf = & buf_rec; const char *buf_contents; tree result; if (decl == NULL_TREE) abort (); if (!initialized) { pp_construct (buf, /* prefix */ NULL, /* line-width */ 0); initialized = 1; } pp_clear_output_area (buf); /* Add FILENAME[:LINENUMBER]. */ { expanded_location xloc = expand_location (DECL_SOURCE_LOCATION (decl)); const char *sourcefile; unsigned sourceline = xloc.line; sourcefile = xloc.file; if (sourcefile == NULL && current_function_decl != NULL_TREE) sourcefile = DECL_SOURCE_FILE (current_function_decl); if (sourcefile == NULL) sourcefile = ""; pp_string (buf, sourcefile); if (sourceline != 0) { pp_string (buf, ":"); pp_decimal_int (buf, sourceline); } } if (current_function_decl != NULL_TREE) { /* Add (FUNCTION): */ pp_string (buf, " ("); { const char *funcname = NULL; if (DECL_NAME (current_function_decl)) funcname = lang_hooks.decl_printable_name (current_function_decl, 1); if (funcname == NULL) funcname = "anonymous fn"; pp_string (buf, funcname); } pp_string (buf, ") "); } else pp_string (buf, " "); /* Add , possibly demangled. */ { const char *declname = NULL; if (strcmp ("GNU C++", lang_hooks.name) == 0 && DECL_NAME (decl) != NULL) { /* The gcc/cp decl_printable_name hook doesn't do as good a job as the libiberty demangler. */ declname = cplus_demangle (IDENTIFIER_POINTER (DECL_NAME (decl)), DMGL_AUTO | DMGL_VERBOSE); } if (declname == NULL) declname = lang_hooks.decl_printable_name (decl, 3); if (declname == NULL) declname = ""; pp_string (buf, declname); } /* Return the lot as a new STRING_CST. */ buf_contents = pp_base_formatted_text (buf); result = mf_build_string (buf_contents); pp_clear_output_area (buf); return result; } /* And another friend, for producing a simpler message. */ static tree mf_file_function_line_tree (location_t location) { expanded_location xloc = expand_location (location); const char *file = NULL, *colon, *line, *op, *name, *cp; char linebuf[18]; char *string; tree result; /* Add FILENAME[:LINENUMBER]. */ if (xloc.file == NULL && current_function_decl != NULL_TREE) xloc.file = DECL_SOURCE_FILE (current_function_decl); if (xloc.file == NULL) xloc.file = ""; if (xloc.line > 0) { sprintf (linebuf, "%d", xloc.line); colon = ":"; line = linebuf; } else colon = line = ""; /* Add (FUNCTION). */ name = lang_hooks.decl_printable_name (current_function_decl, 1); if (name) { op = " ("; cp = ")"; } else op = name = cp = ""; string = concat (file, colon, line, op, name, cp, NULL); result = mf_build_string (string); free (string); return result; } /* global tree nodes */ /* Global tree objects for global variables and functions exported by mudflap runtime library. mf_init_extern_trees must be called before using these. */ /* uintptr_t (usually "unsigned long") */ static GTY (()) tree mf_uintptr_type; /* struct __mf_cache { uintptr_t low; uintptr_t high; }; */ static GTY (()) tree mf_cache_struct_type; /* struct __mf_cache * const */ static GTY (()) tree mf_cache_structptr_type; /* extern struct __mf_cache __mf_lookup_cache []; */ static GTY (()) tree mf_cache_array_decl; /* extern unsigned char __mf_lc_shift; */ static GTY (()) tree mf_cache_shift_decl; /* extern uintptr_t __mf_lc_mask; */ static GTY (()) tree mf_cache_mask_decl; /* Their function-scope local shadows, used in single-threaded mode only. */ /* auto const unsigned char __mf_lc_shift_l; */ static GTY (()) tree mf_cache_shift_decl_l; /* auto const uintptr_t __mf_lc_mask_l; */ static GTY (()) tree mf_cache_mask_decl_l; /* extern void __mf_check (void *ptr, size_t sz, int type, const char *); */ static GTY (()) tree mf_check_fndecl; /* extern void __mf_register (void *ptr, size_t sz, int type, const char *); */ static GTY (()) tree mf_register_fndecl; /* extern void __mf_unregister (void *ptr, size_t sz, int type); */ static GTY (()) tree mf_unregister_fndecl; /* Helper for mudflap_init: construct a decl with the given category, name, and type, mark it an external reference, and pushdecl it. */ static inline tree mf_make_builtin (enum tree_code category, const char *name, tree type) { tree decl = mf_mark (build_decl (category, get_identifier (name), type)); TREE_PUBLIC (decl) = 1; DECL_EXTERNAL (decl) = 1; lang_hooks.decls.pushdecl (decl); return decl; } /* Helper for mudflap_init: construct a tree corresponding to the type struct __mf_cache { uintptr_t low; uintptr_t high; }; where uintptr_t is the FIELD_TYPE argument. */ static inline tree mf_make_mf_cache_struct_type (tree field_type) { /* There is, abominably, no language-independent way to construct a RECORD_TYPE. So we have to call the basic type construction primitives by hand. */ tree fieldlo = build_decl (FIELD_DECL, get_identifier ("low"), field_type); tree fieldhi = build_decl (FIELD_DECL, get_identifier ("high"), field_type); tree struct_type = make_node (RECORD_TYPE); DECL_CONTEXT (fieldlo) = struct_type; DECL_CONTEXT (fieldhi) = struct_type; TREE_CHAIN (fieldlo) = fieldhi; TYPE_FIELDS (struct_type) = fieldlo; TYPE_NAME (struct_type) = get_identifier ("__mf_cache"); layout_type (struct_type); return struct_type; } #define build_function_type_3(rtype, arg1, arg2, arg3) \ build_function_type (rtype, tree_cons (0, arg1, tree_cons (0, arg2, \ tree_cons (0, arg3, void_list_node)))) #define build_function_type_4(rtype, arg1, arg2, arg3, arg4) \ build_function_type (rtype, tree_cons (0, arg1, tree_cons (0, arg2, \ tree_cons (0, arg3, tree_cons (0, arg4, \ void_list_node))))) /* Initialize the global tree nodes that correspond to mf-runtime.h declarations. */ void mudflap_init (void) { static bool done = false; tree mf_const_string_type; tree mf_cache_array_type; tree mf_check_register_fntype; tree mf_unregister_fntype; if (done) return; done = true; mf_uintptr_type = lang_hooks.types.type_for_mode (ptr_mode, /*unsignedp=*/true); mf_const_string_type = build_pointer_type (build_qualified_type (char_type_node, TYPE_QUAL_CONST)); mf_cache_struct_type = mf_make_mf_cache_struct_type (mf_uintptr_type); mf_cache_structptr_type = build_pointer_type (mf_cache_struct_type); mf_cache_array_type = build_array_type (mf_cache_struct_type, 0); mf_check_register_fntype = build_function_type_4 (void_type_node, ptr_type_node, size_type_node, integer_type_node, mf_const_string_type); mf_unregister_fntype = build_function_type_3 (void_type_node, ptr_type_node, size_type_node, integer_type_node); mf_cache_array_decl = mf_make_builtin (VAR_DECL, "__mf_lookup_cache", mf_cache_array_type); mf_cache_shift_decl = mf_make_builtin (VAR_DECL, "__mf_lc_shift", unsigned_char_type_node); mf_cache_mask_decl = mf_make_builtin (VAR_DECL, "__mf_lc_mask", mf_uintptr_type); mf_check_fndecl = mf_make_builtin (FUNCTION_DECL, "__mf_check", mf_check_register_fntype); mf_register_fndecl = mf_make_builtin (FUNCTION_DECL, "__mf_register", mf_check_register_fntype); mf_unregister_fndecl = mf_make_builtin (FUNCTION_DECL, "__mf_unregister", mf_unregister_fntype); } #undef build_function_type_4 #undef build_function_type_3 /* ------------------------------------------------------------------------ */ /* Memory reference transforms. Perform the mudflap indirection-related tree transforms on the current function. This is the second part of the mudflap instrumentation. It works on low-level GIMPLE using the CFG, because we want to run this pass after tree optimizations have been performed, but we have to preserve the CFG for expansion from trees to RTL. */ static void execute_mudflap_function_ops (void) { if (mf_marked_p (current_function_decl)) return; push_gimplify_context (); /* In multithreaded mode, don't cache the lookup cache parameters. */ if (! flag_mudflap_threads) mf_decl_cache_locals (); mf_xform_derefs (); if (! flag_mudflap_threads) mf_decl_clear_locals (); pop_gimplify_context (NULL); } /* Create and initialize local shadow variables for the lookup cache globals. Put their decls in the *_l globals for use by mf_build_check_statement_for. */ static void mf_decl_cache_locals (void) { tree t, shift_init_stmts, mask_init_stmts; tree_stmt_iterator tsi; /* Build the cache vars. */ mf_cache_shift_decl_l = mf_mark (create_tmp_var (TREE_TYPE (mf_cache_shift_decl), "__mf_lookup_shift_l")); mf_cache_mask_decl_l = mf_mark (create_tmp_var (TREE_TYPE (mf_cache_mask_decl), "__mf_lookup_mask_l")); /* Build initialization nodes for the cache vars. We just load the globals into the cache variables. */ t = build (MODIFY_EXPR, TREE_TYPE (mf_cache_shift_decl_l), mf_cache_shift_decl_l, mf_cache_shift_decl); SET_EXPR_LOCATION (t, DECL_SOURCE_LOCATION (current_function_decl)); gimplify_to_stmt_list (&t); shift_init_stmts = t; t = build (MODIFY_EXPR, TREE_TYPE (mf_cache_mask_decl_l), mf_cache_mask_decl_l, mf_cache_mask_decl); SET_EXPR_LOCATION (t, DECL_SOURCE_LOCATION (current_function_decl)); gimplify_to_stmt_list (&t); mask_init_stmts = t; /* Anticipating multiple entry points, we insert the cache vars initializers in each successor of the ENTRY_BLOCK_PTR. */ for (tsi = tsi_start (shift_init_stmts); ! tsi_end_p (tsi); tsi_next (&tsi)) insert_edge_copies (tsi_stmt (tsi), ENTRY_BLOCK_PTR); for (tsi = tsi_start (mask_init_stmts); ! tsi_end_p (tsi); tsi_next (&tsi)) insert_edge_copies (tsi_stmt (tsi), ENTRY_BLOCK_PTR); bsi_commit_edge_inserts (NULL); } static void mf_decl_clear_locals (void) { /* Unset local shadows. */ mf_cache_shift_decl_l = NULL_TREE; mf_cache_mask_decl_l = NULL_TREE; } static void mf_build_check_statement_for (tree addr, tree size, block_stmt_iterator *instr_bsi, location_t *locus, tree dirflag) { tree_stmt_iterator head, tsi; tree ptrtype = TREE_TYPE (addr); block_stmt_iterator bsi; basic_block cond_bb, then_bb, join_bb; edge e; tree cond, t, u, v, l1, l2; tree mf_value; tree mf_base; tree mf_elem; /* We first need to split the current basic block, and start altering the CFG. This allows us to insert the statements we're about to construct into the right basic blocks. The label l1 is the label of the block for the THEN clause of the conditional jump we're about to construct, and l2 is the ELSE clause, which is just the continuation of the old statement stream. */ l1 = create_artificial_label (); l2 = create_artificial_label (); cond_bb = bb_for_stmt (bsi_stmt (*instr_bsi)); bsi = *instr_bsi; bsi_prev (&bsi); if (! bsi_end_p (bsi)) { e = split_block (cond_bb, bsi_stmt (bsi)); cond_bb = e->src; join_bb = e->dest; } else { join_bb = cond_bb; cond_bb = create_empty_bb (join_bb->prev_bb); e = make_edge (cond_bb, join_bb, 0); } e->flags = EDGE_FALSE_VALUE; then_bb = create_empty_bb (cond_bb); make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE); make_edge (then_bb, join_bb, EDGE_FALLTHRU); /* We expect that the conditional jump we will construct will not be taken very often as it basically is an exception condition. */ predict_edge_def (then_bb->pred, PRED_MUDFLAP, NOT_TAKEN); /* Update dominance info. Note that bb_join's data was updated by split_block. */ if (dom_computed[CDI_DOMINATORS] >= DOM_CONS_OK) { set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb); set_immediate_dominator (CDI_DOMINATORS, join_bb, cond_bb); } /* Build our local variables. */ mf_value = create_tmp_var (ptrtype, "__mf_value"); mf_elem = create_tmp_var (mf_cache_structptr_type, "__mf_elem"); mf_base = create_tmp_var (mf_uintptr_type, "__mf_base"); /* Build: __mf_value =
. */ t = build (MODIFY_EXPR, void_type_node, mf_value, unshare_expr (addr)); SET_EXPR_LOCUS (t, locus); gimplify_to_stmt_list (&t); head = tsi_start (t); tsi = tsi_last (t); /* Build: __mf_base = (uintptr_t)__mf_value. */ t = build (MODIFY_EXPR, void_type_node, mf_base, build1 (NOP_EXPR, mf_uintptr_type, mf_value)); SET_EXPR_LOCUS (t, locus); gimplify_to_stmt_list (&t); tsi_link_after (&tsi, t, TSI_CONTINUE_LINKING); /* Build: __mf_elem = &__mf_lookup_cache [(__mf_base >> __mf_shift) & __mf_mask]. */ t = build (RSHIFT_EXPR, mf_uintptr_type, mf_base, (flag_mudflap_threads ? mf_cache_shift_decl : mf_cache_shift_decl_l)); t = build (BIT_AND_EXPR, mf_uintptr_type, t, (flag_mudflap_threads ? mf_cache_mask_decl : mf_cache_mask_decl_l)); t = build (ARRAY_REF, TREE_TYPE (TREE_TYPE (mf_cache_array_decl)), mf_cache_array_decl, t, NULL_TREE, NULL_TREE); t = build1 (ADDR_EXPR, mf_cache_structptr_type, t); t = build (MODIFY_EXPR, void_type_node, mf_elem, t); SET_EXPR_LOCUS (t, locus); gimplify_to_stmt_list (&t); tsi_link_after (&tsi, t, TSI_CONTINUE_LINKING); /* Quick validity check. if (__mf_elem->low > __mf_base || (__mf_elem_high < __mf_base + sizeof(T) - 1)) { __mf_check (); ... and only if single-threaded: __mf_lookup_shift_1 = f...; __mf_lookup_mask_l = ...; } It is expected that this body of code is rarely executed so we mark the edge to the THEN clause of the conditional jump as unlikely. */ /* Construct t <-- '__mf_elem->low > __mf_base'. */ t = build (COMPONENT_REF, mf_uintptr_type, build1 (INDIRECT_REF, mf_cache_struct_type, mf_elem), TYPE_FIELDS (mf_cache_struct_type), NULL_TREE); t = build (GT_EXPR, boolean_type_node, t, mf_base); /* Construct '__mf_elem->high < __mf_base + sizeof(T) - 1'. First build: 1) u <-- '__mf_elem->high' 2) v <-- '__mf_base + sizeof (T) - 1'. Then build 'u <-- (u < v). */ u = build (COMPONENT_REF, mf_uintptr_type, build1 (INDIRECT_REF, mf_cache_struct_type, mf_elem), TREE_CHAIN (TYPE_FIELDS (mf_cache_struct_type)), NULL_TREE); v = convert (mf_uintptr_type, size_binop (MINUS_EXPR, size, size_one_node)); v = fold (build (PLUS_EXPR, mf_uintptr_type, mf_base, v)); u = build (LT_EXPR, boolean_type_node, u, v); /* Build the composed conditional: t <-- 't || u'. Then store the result of the evaluation of 't' in a temporary variable which we can use as the condition for the conditional jump. */ t = build (TRUTH_OR_EXPR, boolean_type_node, t, u); cond = create_tmp_var (boolean_type_node, "__mf_unlikely_cond"); t = build (MODIFY_EXPR, boolean_type_node, cond, t); gimplify_to_stmt_list (&t); tsi_link_after (&tsi, t, TSI_CONTINUE_LINKING); /* Build the conditional jump. 'cond' is just a temporary so we can simply build a void COND_EXPR. We do need labels in both arms though. */ t = build (COND_EXPR, void_type_node, cond, build (GOTO_EXPR, void_type_node, tree_block_label (then_bb)), build (GOTO_EXPR, void_type_node, tree_block_label (join_bb))); SET_EXPR_LOCUS (t, locus); tsi_link_after (&tsi, t, TSI_CONTINUE_LINKING); /* At this point, after so much hard work, we have only constructed the conditional jump, if (__mf_elem->low > __mf_base || (__mf_elem_high < __mf_base + sizeof(T) - 1)) The lowered GIMPLE tree representing this code is in the statement list starting at 'head'. We can insert this now in the current basic block, ie. the one that the statement we're instrumenting was originally in. */ bsi = bsi_last (cond_bb); for (tsi = head; ! tsi_end_p (tsi); tsi_next (&tsi)) bsi_insert_after (&bsi, tsi_stmt (tsi), BSI_CONTINUE_LINKING); /* Now build up the body of the cache-miss handling: __mf_check(); refresh *_l vars. This is the body of the conditional. */ u = tree_cons (NULL_TREE, mf_file_function_line_tree (locus == NULL ? UNKNOWN_LOCATION : *locus), NULL_TREE); u = tree_cons (NULL_TREE, dirflag, u); u = tree_cons (NULL_TREE, size, u); u = tree_cons (NULL_TREE, mf_value, u); t = build_function_call_expr (mf_check_fndecl, u); gimplify_to_stmt_list (&t); head = tsi_start (t); tsi = tsi_last (t); if (! flag_mudflap_threads) { t = build (MODIFY_EXPR, void_type_node, mf_cache_shift_decl_l, mf_cache_shift_decl); tsi_link_after (&tsi, t, TSI_CONTINUE_LINKING); t = build (MODIFY_EXPR, void_type_node, mf_cache_mask_decl_l, mf_cache_mask_decl); tsi_link_after (&tsi, t, TSI_CONTINUE_LINKING); } /* Insert the check code in the THEN block. */ bsi = bsi_start (then_bb); for (tsi = head; ! tsi_end_p (tsi); tsi_next (&tsi)) bsi_insert_after (&bsi, tsi_stmt (tsi), BSI_CONTINUE_LINKING); *instr_bsi = bsi_start (join_bb); bsi_next (instr_bsi); } static void mf_xform_derefs_1 (block_stmt_iterator *iter, tree *tp, location_t *locus, tree dirflag) { tree type, ptr_type, addr, size, t; /* Don't instrument read operations. */ if (dirflag == integer_zero_node && flag_mudflap_ignore_reads) return; t = *tp; type = TREE_TYPE (t); size = TYPE_SIZE_UNIT (type); switch (TREE_CODE (t)) { case ARRAY_REF: { /* Omit checking if we can statically determine that the access is valid. For non-addressable local arrays this is not optional, since we won't have called __mf_register for the object. */ tree op0, op1; op0 = TREE_OPERAND (t, 0); op1 = TREE_OPERAND (t, 1); while (TREE_CODE (op1) == INTEGER_CST) { tree dom = TYPE_DOMAIN (TREE_TYPE (op0)); /* Test for index in range. Break if not. */ if (!dom || (! TYPE_MIN_VALUE (dom) || ! really_constant_p (TYPE_MIN_VALUE (dom))) || (! TYPE_MAX_VALUE (dom) || ! really_constant_p (TYPE_MAX_VALUE (dom))) || (tree_int_cst_lt (op1, TYPE_MIN_VALUE (dom)) || tree_int_cst_lt (TYPE_MAX_VALUE (dom), op1))) break; /* If we're looking at a non-external VAR_DECL, then the access must be ok. */ if (TREE_CODE (op0) == VAR_DECL && !DECL_EXTERNAL (op0)) return; /* Only continue if we're still looking at an array. */ if (TREE_CODE (op0) != ARRAY_REF) break; op1 = TREE_OPERAND (op0, 1); op0 = TREE_OPERAND (op0, 0); } /* If we got here, we couldn't statically the check. */ ptr_type = build_pointer_type (type); addr = build1 (ADDR_EXPR, ptr_type, t); } break; case INDIRECT_REF: addr = TREE_OPERAND (t, 0); ptr_type = TREE_TYPE (addr); break; case ARRAY_RANGE_REF: warning ("mudflap checking not yet implemented for ARRAY_RANGE_REF"); return; case COMPONENT_REF: { tree field; /* If we're not dereferencing something, then the access must be ok. */ if (TREE_CODE (TREE_OPERAND (t, 0)) != INDIRECT_REF) return; field = TREE_OPERAND (t, 1); /* If we're looking at a bit field, then we can't take its address with ADDR_EXPR -- lang_hooks.mark_addressable will error. Do things the hard way with PLUS. */ if (DECL_BIT_FIELD_TYPE (field)) { if (TREE_CODE (DECL_SIZE_UNIT (field)) == INTEGER_CST) size = DECL_SIZE_UNIT (field); addr = TREE_OPERAND (TREE_OPERAND (t, 0), 0); addr = fold_convert (ptr_type_node, addr); addr = fold (build (PLUS_EXPR, ptr_type_node, addr, fold_convert (ptr_type_node, byte_position (field)))); } else { ptr_type = build_pointer_type (type); addr = build1 (ADDR_EXPR, ptr_type, t); } } break; case BIT_FIELD_REF: { tree ofs, rem, bpu; /* If we're not dereferencing something, then the access must be ok. */ if (TREE_CODE (TREE_OPERAND (t, 0)) != INDIRECT_REF) return; bpu = bitsize_int (BITS_PER_UNIT); ofs = convert (bitsizetype, TREE_OPERAND (t, 2)); rem = size_binop (TRUNC_MOD_EXPR, ofs, bpu); ofs = size_binop (TRUNC_DIV_EXPR, ofs, bpu); size = convert (bitsizetype, TREE_OPERAND (t, 1)); size = size_binop (PLUS_EXPR, size, rem); size = size_binop (CEIL_DIV_EXPR, size, bpu); size = convert (sizetype, size); addr = TREE_OPERAND (TREE_OPERAND (t, 0), 0); addr = convert (ptr_type_node, addr); addr = fold (build (PLUS_EXPR, ptr_type_node, addr, ofs)); } break; default: return; } mf_build_check_statement_for (addr, size, iter, locus, dirflag); } static void mf_xform_derefs (void) { basic_block bb, next; block_stmt_iterator i; int saved_last_basic_block = last_basic_block; bb = ENTRY_BLOCK_PTR ->next_bb; do { next = bb->next_bb; for (i = bsi_start (bb); !bsi_end_p (i); bsi_next (&i)) { tree s = bsi_stmt (i); /* Only a few GIMPLE statements can reference memory. */ switch (TREE_CODE (s)) { case MODIFY_EXPR: mf_xform_derefs_1 (&i, &TREE_OPERAND (s, 0), EXPR_LOCUS (s), integer_one_node); mf_xform_derefs_1 (&i, &TREE_OPERAND (s, 1), EXPR_LOCUS (s), integer_zero_node); break; case RETURN_EXPR: if (TREE_OPERAND (s, 0) != NULL_TREE) { if (TREE_CODE (TREE_OPERAND (s, 0)) == MODIFY_EXPR) mf_xform_derefs_1 (&i, &TREE_OPERAND (TREE_OPERAND (s, 0), 1), EXPR_LOCUS (s), integer_zero_node); else mf_xform_derefs_1 (&i, &TREE_OPERAND (s, 0), EXPR_LOCUS (s), integer_zero_node); } break; default: ; } } bb = next; } while (bb && bb->index <= saved_last_basic_block); } /* ------------------------------------------------------------------------ */ /* ADDR_EXPR transforms. Perform the declaration-related mudflap tree transforms on the current function. This is the first part of the mudflap instrumentation. It works on high-level GIMPLE because after lowering, all variables are moved out of their BIND_EXPR binding context, and we lose liveness information for the declarations we wish to instrument. */ static void execute_mudflap_function_decls (void) { if (mf_marked_p (current_function_decl)) return; push_gimplify_context (); mf_xform_decls (DECL_SAVED_TREE (current_function_decl), DECL_ARGUMENTS (current_function_decl)); pop_gimplify_context (NULL); } /* This struct is passed between mf_xform_decls to store state needed during the traversal searching for objects that have their addresses taken. */ struct mf_xform_decls_data { tree param_decls; }; /* Synthesize a CALL_EXPR and a TRY_FINALLY_EXPR, for this chain of _DECLs if appropriate. Arrange to call the __mf_register function now, and the __mf_unregister function later for each. */ static void mx_register_decls (tree decl, tree *stmt_list) { tree finally_stmts = NULL_TREE; tree_stmt_iterator initially_stmts = tsi_start (*stmt_list); while (decl != NULL_TREE) { /* Eligible decl? */ if ((TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == PARM_DECL) /* It must be a non-external, automatic variable. */ && ! DECL_EXTERNAL (decl) && ! TREE_STATIC (decl) /* The decl must have its address taken. */ && TREE_ADDRESSABLE (decl) /* The type of the variable must be complete. */ && COMPLETE_OR_VOID_TYPE_P (TREE_TYPE (decl)) /* Don't process the same decl twice. */ && ! mf_marked_p (decl)) { tree size = NULL_TREE, variable_name; tree unregister_fncall, unregister_fncall_params; tree register_fncall, register_fncall_params; if (DECL_DEFER_OUTPUT (decl)) { /* Oh no ... it's probably a variable-length array (VLA). The size and address cannot be computed by merely looking at the DECL. See gimplify_decl_stmt for the method by which VLA declarations turn into calls to BUILT_IN_STACK_ALLOC. We assume that multiple VLAs declared later in the same block get allocation code later than the others. */ tree stack_alloc_call = NULL_TREE; while(! tsi_end_p (initially_stmts)) { tree t = tsi_stmt (initially_stmts); tree call = NULL_TREE; if (TREE_CODE (t) == CALL_EXPR) call = t; else if (TREE_CODE (t) == MODIFY_EXPR && TREE_CODE (TREE_OPERAND (t, 1)) == CALL_EXPR) call = TREE_OPERAND (t, 1); else if (TREE_CODE (t) == TRY_FINALLY_EXPR) { /* We hope that this is the try/finally block sometimes constructed by gimplify_bind_expr() for a BIND_EXPR that contains VLAs. This very naive recursion appears to be sufficient. */ initially_stmts = tsi_start (TREE_OPERAND (t, 0)); } if (call != NULL_TREE) { if (TREE_CODE (TREE_OPERAND(call, 0)) == ADDR_EXPR && TREE_OPERAND (TREE_OPERAND (call, 0), 0) == implicit_built_in_decls [BUILT_IN_STACK_ALLOC]) { tree stack_alloc_args = TREE_OPERAND (call, 1); tree stack_alloc_op1 = TREE_VALUE (stack_alloc_args); tree stack_alloc_op2 = TREE_VALUE (TREE_CHAIN (stack_alloc_args)); if (TREE_CODE (stack_alloc_op1) == ADDR_EXPR && TREE_OPERAND (stack_alloc_op1, 0) == decl) { /* Got it! */ size = stack_alloc_op2; stack_alloc_call = call; /* Advance iterator to point past this allocation call. */ tsi_next (&initially_stmts); break; } } } tsi_next (&initially_stmts); } if (stack_alloc_call == NULL_TREE) { warning ("mudflap cannot handle variable-sized declaration `%s'", IDENTIFIER_POINTER (DECL_NAME (decl))); break; } } else { size = convert (size_type_node, TYPE_SIZE_UNIT (TREE_TYPE (decl))); } /* (& VARIABLE, sizeof (VARIABLE), __MF_TYPE_STACK) */ unregister_fncall_params = tree_cons (NULL_TREE, convert (ptr_type_node, mf_mark (build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (decl)), decl))), tree_cons (NULL_TREE, size, tree_cons (NULL_TREE, build_int_2 (3, 0), /* __MF_TYPE_STACK */ NULL_TREE))); /* __mf_unregister (...) */ unregister_fncall = build_function_call_expr (mf_unregister_fndecl, unregister_fncall_params); /* (& VARIABLE, sizeof (VARIABLE), __MF_TYPE_STACK, "name") */ variable_name = mf_varname_tree (decl); register_fncall_params = tree_cons (NULL_TREE, convert (ptr_type_node, mf_mark (build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (decl)), decl))), tree_cons (NULL_TREE, size, tree_cons (NULL_TREE, build_int_2 (3, 0), /* __MF_TYPE_STACK */ tree_cons (NULL_TREE, variable_name, NULL_TREE)))); /* __mf_register (...) */ register_fncall = build_function_call_expr (mf_register_fndecl, register_fncall_params); /* Accumulate the two calls. */ /* ??? Set EXPR_LOCATION. */ gimplify_stmt (®ister_fncall); gimplify_stmt (&unregister_fncall); /* Add the __mf_register call at the current appending point. */ if (tsi_end_p (initially_stmts)) internal_error ("mudflap ran off end of BIND_EXPR body"); tsi_link_before (&initially_stmts, register_fncall, TSI_SAME_STMT); /* Accumulate the FINALLY piece. */ append_to_statement_list (unregister_fncall, &finally_stmts); mf_mark (decl); } decl = TREE_CHAIN (decl); } /* Actually, (initially_stmts!=NULL) <=> (finally_stmts!=NULL) */ if (finally_stmts != NULL_TREE) { tree t = build (TRY_FINALLY_EXPR, void_type_node, *stmt_list, finally_stmts); *stmt_list = NULL; append_to_statement_list (t, stmt_list); } } /* Process every variable mentioned in BIND_EXPRs. */ static tree mx_xfn_xform_decls (tree *t, int *continue_p, void *data) { struct mf_xform_decls_data* d = (struct mf_xform_decls_data*) data; if (*t == NULL_TREE || *t == error_mark_node) { *continue_p = 0; return NULL_TREE; } *continue_p = 1; switch (TREE_CODE (*t)) { case BIND_EXPR: { /* Process function parameters now (but only once). */ mx_register_decls (d->param_decls, &BIND_EXPR_BODY (*t)); d->param_decls = NULL_TREE; mx_register_decls (BIND_EXPR_VARS (*t), &BIND_EXPR_BODY (*t)); } break; default: break; } return NULL; } /* Perform the object lifetime tracking mudflap transform on the given function tree. The tree is mutated in place, with possibly copied subtree nodes. For every auto variable declared, if its address is ever taken within the function, then supply its lifetime to the mudflap runtime with the __mf_register and __mf_unregister calls. */ static void mf_xform_decls (tree fnbody, tree fnparams) { struct mf_xform_decls_data d; d.param_decls = fnparams; walk_tree_without_duplicates (&fnbody, mx_xfn_xform_decls, &d); } /* ------------------------------------------------------------------------ */ /* Externally visible mudflap functions. */ /* Mark and return the given tree node to prevent further mudflap transforms. */ static GTY ((param_is (union tree_node))) htab_t marked_trees = NULL; tree mf_mark (tree t) { void **slot; if (marked_trees == NULL) marked_trees = htab_create_ggc (31, htab_hash_pointer, htab_eq_pointer, NULL); slot = htab_find_slot (marked_trees, t, INSERT); *slot = t; return t; } int mf_marked_p (tree t) { void *entry; if (marked_trees == NULL) return 0; entry = htab_find (marked_trees, t); return (entry != NULL); } /* Remember given node as a static of some kind: global data, function-scope static, or an anonymous constant. Its assembler label is given. */ /* A list of globals whose incomplete declarations we encountered. Instead of emitting the __mf_register call for them here, it's delayed until program finish time. If they're still incomplete by then, warnings are emitted. */ static GTY (()) varray_type deferred_static_decls; /* A list of statements for calling __mf_register() at startup time. */ static GTY (()) tree enqueued_call_stmt_chain; static void mudflap_register_call (tree obj, tree object_size, tree varname) { tree arg, args, call_stmt; args = tree_cons (NULL_TREE, varname, NULL_TREE); arg = build_int_2 (4, 0); /* __MF_TYPE_STATIC */ args = tree_cons (NULL_TREE, arg, args); arg = convert (size_type_node, object_size); args = tree_cons (NULL_TREE, arg, args); arg = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (obj)), obj); arg = convert (ptr_type_node, arg); args = tree_cons (NULL_TREE, arg, args); call_stmt = build_function_call_expr (mf_register_fndecl, args); append_to_statement_list (call_stmt, &enqueued_call_stmt_chain); } void mudflap_enqueue_decl (tree obj) { if (mf_marked_p (obj)) return; /* We don't need to process variable decls that are internally generated extern. If we did, we'd end up with warnings for them during mudflap_finish_file (). That would confuse the user, since the text would refer to variables that don't show up in the user's source code. */ if (DECL_P (obj) && DECL_EXTERNAL (obj) && DECL_ARTIFICIAL (obj)) return; if (COMPLETE_TYPE_P (TREE_TYPE (obj))) { tree object_size; mf_mark (obj); object_size = size_in_bytes (TREE_TYPE (obj)); if (dump_file) { fprintf (dump_file, "enqueue_decl obj=`"); print_generic_expr (dump_file, obj, dump_flags); fprintf (dump_file, "' size="); print_generic_expr (dump_file, object_size, dump_flags); fprintf (dump_file, "\n"); } /* NB: the above condition doesn't require TREE_USED or TREE_ADDRESSABLE. That's because this object may be a global only used from other compilation units. XXX: Maybe static objects could require those attributes being set. */ mudflap_register_call (obj, object_size, mf_varname_tree (obj)); } else { size_t i; if (! deferred_static_decls) VARRAY_TREE_INIT (deferred_static_decls, 10, "deferred static list"); /* Ugh, linear search... */ for (i = 0; i < VARRAY_ACTIVE_SIZE (deferred_static_decls); i++) if (VARRAY_TREE (deferred_static_decls, i) == obj) { warning ("mudflap cannot track lifetime of `%s'", IDENTIFIER_POINTER (DECL_NAME (obj))); return; } VARRAY_PUSH_TREE (deferred_static_decls, obj); } } void mudflap_enqueue_constant (tree obj) { tree object_size, varname; if (mf_marked_p (obj)) return; if (TREE_CODE (obj) == STRING_CST) object_size = build_int_2 (TREE_STRING_LENGTH (obj), 0); else object_size = size_in_bytes (TREE_TYPE (obj)); if (dump_file) { fprintf (dump_file, "enqueue_constant obj=`"); print_generic_expr (dump_file, obj, dump_flags); fprintf (dump_file, "' size="); print_generic_expr (dump_file, object_size, dump_flags); fprintf (dump_file, "\n"); } if (TREE_CODE (obj) == STRING_CST) varname = mf_build_string ("string literal"); else varname = mf_build_string ("constant"); mudflap_register_call (obj, object_size, varname); } /* Emit any file-wide instrumentation. */ void mudflap_finish_file (void) { /* Try to give the deferred objects one final try. */ if (deferred_static_decls) { size_t i; for (i = 0; i < VARRAY_ACTIVE_SIZE (deferred_static_decls); i++) { tree obj = VARRAY_TREE (deferred_static_decls, i); /* Call enqueue_decl again on the same object it has previously put into the table. (It won't modify the table this time, so infinite iteration is not a problem.) */ mudflap_enqueue_decl (obj); } VARRAY_CLEAR (deferred_static_decls); } if (enqueued_call_stmt_chain) { cgraph_build_static_cdtor ('I', enqueued_call_stmt_chain); enqueued_call_stmt_chain = 0; } } static bool gate_mudflap (void) { return flag_mudflap != 0; } struct tree_opt_pass pass_mudflap_1 = { "mudflap1", /* name */ gate_mudflap, /* gate */ execute_mudflap_function_decls, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ 0, /* tv_id */ PROP_gimple_any, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func /* todo_flags_finish */ }; struct tree_opt_pass pass_mudflap_2 = { "mudflap2", /* name */ gate_mudflap, /* gate */ execute_mudflap_function_ops, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ 0, /* tv_id */ PROP_gimple_leh, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_verify_flow | TODO_verify_stmts | TODO_dump_func /* todo_flags_finish */ }; /* Type information for tree-mudflap.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_tree_mudflap_h[] = { { &enqueued_call_stmt_chain, 1, sizeof (enqueued_call_stmt_chain), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &deferred_static_decls, 1, sizeof (deferred_static_decls), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, { &marked_trees, 1, sizeof (marked_trees), >_ggc_m_P9tree_node4htab, >_pch_n_P9tree_node4htab }, { &mf_unregister_fndecl, 1, sizeof (mf_unregister_fndecl), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &mf_register_fndecl, 1, sizeof (mf_register_fndecl), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &mf_check_fndecl, 1, sizeof (mf_check_fndecl), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &mf_cache_mask_decl_l, 1, sizeof (mf_cache_mask_decl_l), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &mf_cache_shift_decl_l, 1, sizeof (mf_cache_shift_decl_l), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &mf_cache_mask_decl, 1, sizeof (mf_cache_mask_decl), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &mf_cache_shift_decl, 1, sizeof (mf_cache_shift_decl), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &mf_cache_array_decl, 1, sizeof (mf_cache_array_decl), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &mf_cache_structptr_type, 1, sizeof (mf_cache_structptr_type), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &mf_cache_struct_type, 1, sizeof (mf_cache_struct_type), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &mf_uintptr_type, 1, sizeof (mf_uintptr_type), >_ggc_mx_tree_node, >_pch_nx_tree_node }, LAST_GGC_ROOT_TAB }; /* Chains of recurrences. Copyright (C) 2003, 2004 Free Software Foundation, Inc. Contributed by Sebastian Pop This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file implements operations on chains of recurrences. Chains of recurrences are used for modeling evolution functions of scalar variables. */ /* Chains of recurrences. Copyright (C) 2003, 2004 Free Software Foundation, Inc. Contributed by Sebastian Pop This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_TREE_CHREC_H #define GCC_TREE_CHREC_H /* Accessors for the chains of recurrences. */ #define CHREC_VAR(NODE) TREE_OPERAND (NODE, 0) #define CHREC_LEFT(NODE) TREE_OPERAND (NODE, 1) #define CHREC_RIGHT(NODE) TREE_OPERAND (NODE, 2) #define CHREC_VARIABLE(NODE) TREE_INT_CST_LOW (CHREC_VAR (NODE)) /* The following trees are unique elements. Thus the comparison of another element to these elements should be done on the pointer to these trees, and not on their value. */ extern tree chrec_not_analyzed_yet; extern GTY(()) tree chrec_dont_know; extern GTY(()) tree chrec_known; /* After having added an automatically generated element, please include it in the following function. */ static inline bool automatically_generated_chrec_p (tree chrec) { return (chrec == chrec_not_analyzed_yet || chrec == chrec_dont_know || chrec == chrec_known); } /* The tree nodes aka. CHRECs. */ static inline bool tree_is_chrec (tree expr) { if (TREE_CODE (expr) == POLYNOMIAL_CHREC || automatically_generated_chrec_p (expr)) return true; else return false; } /* Chrec folding functions. */ extern tree chrec_fold_plus (tree, tree, tree); extern tree chrec_fold_minus (tree, tree, tree); extern tree chrec_fold_multiply (tree, tree, tree); extern tree chrec_convert (tree, tree); extern tree count_ev_in_wider_type (tree, tree); extern tree chrec_type (tree); /* Operations. */ extern tree chrec_apply (unsigned, tree, tree); extern tree chrec_replace_initial_condition (tree, tree); extern tree update_initial_condition_to_origin (tree); extern tree initial_condition (tree); extern tree evolution_part_in_loop_num (tree, unsigned); extern tree hide_evolution_in_other_loops_than_loop (tree, unsigned); extern tree reset_evolution_in_loop (unsigned, tree, tree); extern tree chrec_merge (tree, tree); /* Observers. */ extern bool is_multivariate_chrec (tree); extern bool chrec_is_positive (tree, bool *); extern bool chrec_contains_symbols (tree); extern bool chrec_contains_symbols_defined_in_loop (tree, unsigned); extern bool chrec_contains_undetermined (tree); extern bool tree_contains_chrecs (tree); extern bool evolution_function_is_affine_multivariate_p (tree); extern bool evolution_function_is_univariate_p (tree); /* Build a polynomial chain of recurrence. */ static inline tree build_polynomial_chrec (unsigned loop_num, tree left, tree right) { if (left == chrec_dont_know || right == chrec_dont_know) return chrec_dont_know; return build (POLYNOMIAL_CHREC, TREE_TYPE (left), build_int_2 (loop_num, 0), left, right); } /* Observers. */ /* Determines whether CHREC is equal to zero. */ static inline bool chrec_zerop (tree chrec) { if (chrec == NULL_TREE) return false; if (TREE_CODE (chrec) == INTEGER_CST) return integer_zerop (chrec); return false; } /* Determines whether the expression CHREC is a constant. */ static inline bool evolution_function_is_constant_p (tree chrec) { if (chrec == NULL_TREE) return false; switch (TREE_CODE (chrec)) { case INTEGER_CST: case REAL_CST: return true; default: return false; } } /* Determine whether the given tree is an affine evolution function or not. */ static inline bool evolution_function_is_affine_p (tree chrec) { if (chrec == NULL_TREE) return false; switch (TREE_CODE (chrec)) { case POLYNOMIAL_CHREC: if (evolution_function_is_constant_p (CHREC_LEFT (chrec)) && evolution_function_is_constant_p (CHREC_RIGHT (chrec))) return true; else return false; default: return false; } } /* Determine whether the given tree is an affine or constant evolution function. */ static inline bool evolution_function_is_affine_or_constant_p (tree chrec) { return evolution_function_is_affine_p (chrec) || evolution_function_is_constant_p (chrec); } /* Determines whether EXPR does not contains chrec expressions. */ static inline bool tree_does_not_contain_chrecs (tree expr) { return !tree_contains_chrecs (expr); } /* Determines whether CHREC is a loop invariant with respect to LOOP_NUM. Set the result in RES and return true when the property can be computed. */ static inline bool no_evolution_in_loop_p (tree chrec, unsigned loop_num, bool *res) { tree scev; if (chrec == chrec_not_analyzed_yet || chrec == chrec_dont_know || chrec_contains_symbols_defined_in_loop (chrec, loop_num)) return false; scev = hide_evolution_in_other_loops_than_loop (chrec, loop_num); *res = !tree_is_chrec (scev); return true; } #endif /* GCC_TREE_CHREC_H */ /* This part will be removed once the merging is finished. */ /* The following trees are unique elements. Thus the comparison of another element to these elements should be done on the pointer to these trees, and not on their value. */ /* The SSA_NAMEs that are not yet analyzed are qualified with NULL_TREE. */ tree chrec_not_analyzed_yet; /* Reserved to the cases where the analyzer has detected an undecidable property at compile time. */ tree chrec_dont_know; /* When the analyzer has detected that a property will never happen, then it qualifies it with chrec_known. */ tree chrec_known; /* Empty hook. Will be replaced by the main function from tree-scalar-evolution.c. */ tree count_ev_in_wider_type (tree foo ATTRIBUTE_UNUSED, tree bar ATTRIBUTE_UNUSED) { return NULL_TREE; } /* Empty hook. Will be replaced by the main function from tree-scalar-evolution.c. */ bool chrec_contains_symbols_defined_in_loop (tree chrec ATTRIBUTE_UNUSED, unsigned loop_nb ATTRIBUTE_UNUSED) { return true; } /* Extended folder for chrecs. */ /* Determines whether CST is not a constant evolution. */ static inline bool is_not_constant_evolution (tree cst) { return (TREE_CODE (cst) == POLYNOMIAL_CHREC); } /* Fold CODE for a polynomial function and a constant. */ static inline tree chrec_fold_poly_cst (enum tree_code code, tree type, tree poly, tree cst) { #if defined ENABLE_CHECKING if (poly == NULL_TREE || cst == NULL_TREE || TREE_CODE (poly) != POLYNOMIAL_CHREC || is_not_constant_evolution (cst)) abort (); #endif switch (code) { case PLUS_EXPR: return build_polynomial_chrec (CHREC_VARIABLE (poly), chrec_fold_plus (type, CHREC_LEFT (poly), cst), CHREC_RIGHT (poly)); case MINUS_EXPR: return build_polynomial_chrec (CHREC_VARIABLE (poly), chrec_fold_minus (type, CHREC_LEFT (poly), cst), CHREC_RIGHT (poly)); case MULT_EXPR: return build_polynomial_chrec (CHREC_VARIABLE (poly), chrec_fold_multiply (type, CHREC_LEFT (poly), cst), chrec_fold_multiply (type, CHREC_RIGHT (poly), cst)); default: return chrec_dont_know; } } /* Fold the addition of two polynomial functions. */ static inline tree chrec_fold_plus_poly_poly (enum tree_code code, tree type, tree poly0, tree poly1) { tree left, right; #if defined ENABLE_CHECKING if (poly0 == NULL_TREE || poly1 == NULL_TREE || TREE_CODE (poly0) != POLYNOMIAL_CHREC || TREE_CODE (poly1) != POLYNOMIAL_CHREC) abort (); #endif /* {a, +, b}_1 + {c, +, d}_2 -> {{a, +, b}_1 + c, +, d}_2, {a, +, b}_2 + {c, +, d}_1 -> {{c, +, d}_1 + a, +, b}_2, {a, +, b}_x + {c, +, d}_x -> {a+c, +, b+d}_x. */ if (CHREC_VARIABLE (poly0) < CHREC_VARIABLE (poly1)) { if (code == PLUS_EXPR) return build_polynomial_chrec (CHREC_VARIABLE (poly1), chrec_fold_plus (type, poly0, CHREC_LEFT (poly1)), CHREC_RIGHT (poly1)); else return build_polynomial_chrec (CHREC_VARIABLE (poly1), chrec_fold_minus (type, poly0, CHREC_LEFT (poly1)), chrec_fold_multiply (type, CHREC_RIGHT (poly1), convert (type, integer_minus_one_node))); } if (CHREC_VARIABLE (poly0) > CHREC_VARIABLE (poly1)) { if (code == PLUS_EXPR) return build_polynomial_chrec (CHREC_VARIABLE (poly0), chrec_fold_plus (type, CHREC_LEFT (poly0), poly1), CHREC_RIGHT (poly0)); else return build_polynomial_chrec (CHREC_VARIABLE (poly0), chrec_fold_minus (type, CHREC_LEFT (poly0), poly1), CHREC_RIGHT (poly0)); } if (code == PLUS_EXPR) { left = chrec_fold_plus (type, CHREC_LEFT (poly0), CHREC_LEFT (poly1)); right = chrec_fold_plus (type, CHREC_RIGHT (poly0), CHREC_RIGHT (poly1)); } else { left = chrec_fold_minus (type, CHREC_LEFT (poly0), CHREC_LEFT (poly1)); right = chrec_fold_minus (type, CHREC_RIGHT (poly0), CHREC_RIGHT (poly1)); } if (chrec_zerop (right)) return left; else return build_polynomial_chrec (CHREC_VARIABLE (poly0), left, right); } /* Fold the multiplication of two polynomial functions. */ static inline tree chrec_fold_multiply_poly_poly (tree type, tree poly0, tree poly1) { #if defined ENABLE_CHECKING if (poly0 == NULL_TREE || poly1 == NULL_TREE || TREE_CODE (poly0) != POLYNOMIAL_CHREC || TREE_CODE (poly1) != POLYNOMIAL_CHREC) abort (); #endif /* {a, +, b}_1 * {c, +, d}_2 -> {c*{a, +, b}_1, +, d}_2, {a, +, b}_2 * {c, +, d}_1 -> {a*{c, +, d}_1, +, b}_2, {a, +, b}_x * {c, +, d}_x -> {a*c, +, a*d + b*c + b*d, +, 2*b*d}_x. */ if (CHREC_VARIABLE (poly0) < CHREC_VARIABLE (poly1)) /* poly0 is a constant wrt. poly1. */ return build_polynomial_chrec (CHREC_VARIABLE (poly1), chrec_fold_multiply (type, CHREC_LEFT (poly1), poly0), CHREC_RIGHT (poly1)); if (CHREC_VARIABLE (poly1) < CHREC_VARIABLE (poly0)) /* poly1 is a constant wrt. poly0. */ return build_polynomial_chrec (CHREC_VARIABLE (poly0), chrec_fold_multiply (type, CHREC_LEFT (poly0), poly1), CHREC_RIGHT (poly0)); /* poly0 and poly1 are two polynomials in the same variable, {a, +, b}_x * {c, +, d}_x -> {a*c, +, a*d + b*c + b*d, +, 2*b*d}_x. */ return build_polynomial_chrec (CHREC_VARIABLE (poly0), build_polynomial_chrec (CHREC_VARIABLE (poly0), /* "a*c". */ chrec_fold_multiply (type, CHREC_LEFT (poly0), CHREC_LEFT (poly1)), /* "a*d + b*c + b*d". */ chrec_fold_plus (type, chrec_fold_multiply (type, CHREC_LEFT (poly0), CHREC_RIGHT (poly1)), chrec_fold_plus (type, chrec_fold_multiply (type, CHREC_RIGHT (poly0), CHREC_LEFT (poly1)), chrec_fold_multiply (type, CHREC_RIGHT (poly0), CHREC_RIGHT (poly1))))), /* "2*b*d". */ chrec_fold_multiply (type, build_int_2 (2, 0), chrec_fold_multiply (type, CHREC_RIGHT (poly0), CHREC_RIGHT (poly1)))); } /* When the operands are automatically_generated_chrec_p, the fold has to respect the semantics of the operands. */ static inline tree chrec_fold_automatically_generated_operands (tree op0, tree op1) { if (op0 == chrec_dont_know || op1 == chrec_dont_know) return chrec_dont_know; if (op0 == chrec_known || op1 == chrec_known) return chrec_known; if (op0 == chrec_not_analyzed_yet || op1 == chrec_not_analyzed_yet) return chrec_not_analyzed_yet; /* The default case produces a safe result. */ return chrec_dont_know; } /* Fold the addition of two chrecs. */ static tree chrec_fold_plus_1 (enum tree_code code, tree type, tree op0, tree op1) { if (automatically_generated_chrec_p (op0) || automatically_generated_chrec_p (op1)) return chrec_fold_automatically_generated_operands (op0, op1); switch (TREE_CODE (op0)) { case POLYNOMIAL_CHREC: switch (TREE_CODE (op1)) { case POLYNOMIAL_CHREC: return chrec_fold_plus_poly_poly (code, type, op0, op1); default: if (code == PLUS_EXPR) return build_polynomial_chrec (CHREC_VARIABLE (op0), chrec_fold_plus (type, CHREC_LEFT (op0), op1), CHREC_RIGHT (op0)); else return build_polynomial_chrec (CHREC_VARIABLE (op0), chrec_fold_minus (type, CHREC_LEFT (op0), op1), CHREC_RIGHT (op0)); } default: switch (TREE_CODE (op1)) { case POLYNOMIAL_CHREC: if (code == PLUS_EXPR) return build_polynomial_chrec (CHREC_VARIABLE (op1), chrec_fold_plus (type, op0, CHREC_LEFT (op1)), CHREC_RIGHT (op1)); else return build_polynomial_chrec (CHREC_VARIABLE (op1), chrec_fold_minus (type, op0, CHREC_LEFT (op1)), chrec_fold_multiply (type, CHREC_RIGHT (op1), convert (type, integer_minus_one_node))); default: if (tree_contains_chrecs (op0) || tree_contains_chrecs (op1)) return build (code, type, op0, op1); else return fold (build (code, type, op0, op1)); } } } /* Fold the addition of two chrecs. */ tree chrec_fold_plus (tree type, tree op0, tree op1) { if (integer_zerop (op0)) return op1; if (integer_zerop (op1)) return op0; return chrec_fold_plus_1 (PLUS_EXPR, type, op0, op1); } /* Fold the subtraction of two chrecs. */ tree chrec_fold_minus (tree type, tree op0, tree op1) { if (integer_zerop (op1)) return op0; return chrec_fold_plus_1 (MINUS_EXPR, type, op0, op1); } /* Fold the multiplication of two chrecs. */ tree chrec_fold_multiply (tree type, tree op0, tree op1) { if (automatically_generated_chrec_p (op0) || automatically_generated_chrec_p (op1)) return chrec_fold_automatically_generated_operands (op0, op1); switch (TREE_CODE (op0)) { case POLYNOMIAL_CHREC: switch (TREE_CODE (op1)) { case POLYNOMIAL_CHREC: return chrec_fold_multiply_poly_poly (type, op0, op1); default: if (integer_onep (op1)) return op0; if (integer_zerop (op1)) return convert (type, integer_zero_node); return build_polynomial_chrec (CHREC_VARIABLE (op0), chrec_fold_multiply (type, CHREC_LEFT (op0), op1), chrec_fold_multiply (type, CHREC_RIGHT (op0), op1)); } default: if (integer_onep (op0)) return op1; if (integer_zerop (op0)) return convert (type, integer_zero_node); switch (TREE_CODE (op1)) { case POLYNOMIAL_CHREC: return build_polynomial_chrec (CHREC_VARIABLE (op1), chrec_fold_multiply (type, CHREC_LEFT (op1), op0), chrec_fold_multiply (type, CHREC_RIGHT (op1), op0)); default: if (integer_onep (op1)) return op0; if (integer_zerop (op1)) return convert (type, integer_zero_node); return fold (build (MULT_EXPR, type, op0, op1)); } } } /* Operations. */ /* The factorial. */ static tree tree_fold_factorial (tree f) { if (tree_int_cst_sgn (f) <= 0) return integer_one_node; else return fold (build (MULT_EXPR, integer_type_node, f, tree_fold_factorial (fold (build (MINUS_EXPR, integer_type_node, f, integer_one_node))))); } /* The binomial coefficient. */ static tree tree_fold_binomial (tree n, tree k) { return fold (build (EXACT_DIV_EXPR, integer_type_node, tree_fold_factorial (n), fold (build (MULT_EXPR, integer_type_node, tree_fold_factorial (k), tree_fold_factorial (fold (build (MINUS_EXPR, integer_type_node, n, k))))))); } /* Helper function. Use the Newton's interpolating formula for evaluating the value of the evolution function. */ static tree chrec_evaluate (unsigned var, tree chrec, tree n, tree k) { tree type = chrec_type (chrec); tree binomial_n_k = tree_fold_binomial (n, k); if (TREE_CODE (chrec) == POLYNOMIAL_CHREC) { if (CHREC_VARIABLE (chrec) > var) return chrec_evaluate (var, CHREC_LEFT (chrec), n, k); if (CHREC_VARIABLE (chrec) == var) return chrec_fold_plus (type, fold (build (MULT_EXPR, type, binomial_n_k, CHREC_LEFT (chrec))), chrec_evaluate (var, CHREC_RIGHT (chrec), n, fold (build (PLUS_EXPR, type, k, integer_one_node)))); return fold (build (MULT_EXPR, type, binomial_n_k, chrec)); } else return fold (build (MULT_EXPR, type, binomial_n_k, chrec)); } /* Evaluates "CHREC (X)" when the varying variable is VAR. Example: Given the following parameters, var = 1 chrec = {3, +, 4}_1 x = 10 The result is given by the Newton's interpolating formula: 3 * \binom{10}{0} + 4 * \binom{10}{1}. */ tree chrec_apply (unsigned var, tree chrec, tree x) { tree type = chrec_type (chrec); tree res = chrec_dont_know; if (automatically_generated_chrec_p (chrec) || automatically_generated_chrec_p (x) /* When the symbols are defined in an outer loop, it is possible to symbolically compute the apply, since the symbols are constants with respect to the varying loop. */ || chrec_contains_symbols_defined_in_loop (chrec, var) || chrec_contains_symbols (x)) return chrec_dont_know; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "(chrec_apply \n"); if (evolution_function_is_affine_p (chrec)) { /* "{a, +, b} (x)" -> "a + b*x". */ if (TREE_CODE (CHREC_LEFT (chrec)) == INTEGER_CST && integer_zerop (CHREC_LEFT (chrec))) res = chrec_fold_multiply (type, CHREC_RIGHT (chrec), x); else res = chrec_fold_plus (type, CHREC_LEFT (chrec), chrec_fold_multiply (type, CHREC_RIGHT (chrec), x)); } else if (TREE_CODE (chrec) != POLYNOMIAL_CHREC) res = chrec; else if (TREE_CODE (x) == INTEGER_CST && tree_int_cst_sgn (x) == 1) /* testsuite/.../ssa-chrec-38.c. */ res = chrec_evaluate (var, chrec, x, integer_zero_node); else res = chrec_dont_know; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, " (varying_loop = %d\n", var); fprintf (dump_file, ")\n (chrec = "); print_generic_expr (dump_file, chrec, 0); fprintf (dump_file, ")\n (x = "); print_generic_expr (dump_file, x, 0); fprintf (dump_file, ")\n (res = "); print_generic_expr (dump_file, res, 0); fprintf (dump_file, "))\n"); } return res; } /* Replaces the initial condition in CHREC with INIT_COND. */ tree chrec_replace_initial_condition (tree chrec, tree init_cond) { if (automatically_generated_chrec_p (chrec)) return chrec; switch (TREE_CODE (chrec)) { case POLYNOMIAL_CHREC: return build_polynomial_chrec (CHREC_VARIABLE (chrec), chrec_replace_initial_condition (CHREC_LEFT (chrec), init_cond), CHREC_RIGHT (chrec)); default: return init_cond; } } /* Returns the initial condition of a given CHREC. */ tree initial_condition (tree chrec) { if (automatically_generated_chrec_p (chrec)) return chrec; if (TREE_CODE (chrec) == POLYNOMIAL_CHREC) return initial_condition (CHREC_LEFT (chrec)); else return chrec; } /* Returns a univariate function that represents the evolution in LOOP_NUM. Mask the evolution of any other loop. */ tree hide_evolution_in_other_loops_than_loop (tree chrec, unsigned loop_num) { if (automatically_generated_chrec_p (chrec)) return chrec; switch (TREE_CODE (chrec)) { case POLYNOMIAL_CHREC: if (CHREC_VARIABLE (chrec) == loop_num) return build_polynomial_chrec (loop_num, hide_evolution_in_other_loops_than_loop (CHREC_LEFT (chrec), loop_num), CHREC_RIGHT (chrec)); else if (CHREC_VARIABLE (chrec) < loop_num) /* There is no evolution in this loop. */ return initial_condition (chrec); else return hide_evolution_in_other_loops_than_loop (CHREC_LEFT (chrec), loop_num); default: return chrec; } } /* Returns the evolution part in LOOP_NUM. Example: the call get_evolution_in_loop (1, {{0, +, 1}_1, +, 2}_1) returns {1, +, 2}_1 */ tree evolution_part_in_loop_num (tree chrec, unsigned loop_num) { if (automatically_generated_chrec_p (chrec)) return chrec; switch (TREE_CODE (chrec)) { case POLYNOMIAL_CHREC: if (CHREC_VARIABLE (chrec) == loop_num) { if (TREE_CODE (CHREC_LEFT (chrec)) != POLYNOMIAL_CHREC || CHREC_VARIABLE (CHREC_LEFT (chrec)) != CHREC_VARIABLE (chrec)) return CHREC_RIGHT (chrec); else return build_polynomial_chrec (loop_num, evolution_part_in_loop_num (CHREC_LEFT (chrec), loop_num), CHREC_RIGHT (chrec)); } else if (CHREC_VARIABLE (chrec) < loop_num) /* There is no evolution part in this loop. */ return NULL_TREE; else return evolution_part_in_loop_num (CHREC_LEFT (chrec), loop_num); default: return NULL_TREE; } } /* Set or reset the evolution of CHREC to NEW_EVOL in loop LOOP_NUM. This function is essentially used for setting the evolution to chrec_dont_know, for example after having determined that it is impossible to say how many times a loop will execute. */ tree reset_evolution_in_loop (unsigned loop_num, tree chrec, tree new_evol) { if (TREE_CODE (chrec) == POLYNOMIAL_CHREC && CHREC_VARIABLE (chrec) > loop_num) return build (TREE_CODE (chrec), build_int_2 (CHREC_VARIABLE (chrec), 0), reset_evolution_in_loop (loop_num, CHREC_LEFT (chrec), new_evol), reset_evolution_in_loop (loop_num, CHREC_RIGHT (chrec), new_evol)); while (TREE_CODE (chrec) == POLYNOMIAL_CHREC && CHREC_VARIABLE (chrec) == loop_num) chrec = CHREC_LEFT (chrec); return build_polynomial_chrec (loop_num, chrec, new_evol); } /* Merges two evolution functions that were found by following two alternate paths of a conditional expression. */ tree chrec_merge (tree chrec1, tree chrec2) { if (chrec1 == chrec_dont_know || chrec2 == chrec_dont_know) return chrec_dont_know; if (chrec1 == chrec_known || chrec2 == chrec_known) return chrec_known; if (chrec1 == chrec_not_analyzed_yet) return chrec2; if (chrec2 == chrec_not_analyzed_yet) return chrec1; if (operand_equal_p (chrec1, chrec2, 0)) return chrec1; return chrec_dont_know; } /* Observers. */ /* Helper function for is_multivariate_chrec. */ static bool is_multivariate_chrec_rec (tree chrec, unsigned int rec_var) { if (chrec == NULL_TREE) return false; if (TREE_CODE (chrec) == POLYNOMIAL_CHREC) { if (CHREC_VARIABLE (chrec) != rec_var) return true; else return (is_multivariate_chrec_rec (CHREC_LEFT (chrec), rec_var) || is_multivariate_chrec_rec (CHREC_RIGHT (chrec), rec_var)); } else return false; } /* Determine whether the given chrec is multivariate or not. */ bool is_multivariate_chrec (tree chrec) { if (chrec == NULL_TREE) return false; if (TREE_CODE (chrec) == POLYNOMIAL_CHREC) return (is_multivariate_chrec_rec (CHREC_LEFT (chrec), CHREC_VARIABLE (chrec)) || is_multivariate_chrec_rec (CHREC_RIGHT (chrec), CHREC_VARIABLE (chrec))); else return false; } /* Determines whether the chrec contains symbolic names or not. */ bool chrec_contains_symbols (tree chrec) { if (chrec == NULL_TREE) return false; if (TREE_CODE (chrec) == SSA_NAME || TREE_CODE (chrec) == VAR_DECL || TREE_CODE (chrec) == PARM_DECL || TREE_CODE (chrec) == FUNCTION_DECL || TREE_CODE (chrec) == LABEL_DECL || TREE_CODE (chrec) == RESULT_DECL || TREE_CODE (chrec) == FIELD_DECL) return true; switch (TREE_CODE_LENGTH (TREE_CODE (chrec))) { case 3: if (chrec_contains_symbols (TREE_OPERAND (chrec, 2))) return true; case 2: if (chrec_contains_symbols (TREE_OPERAND (chrec, 1))) return true; case 1: if (chrec_contains_symbols (TREE_OPERAND (chrec, 0))) return true; default: return false; } } /* Determines whether the chrec contains undetermined coefficients. */ bool chrec_contains_undetermined (tree chrec) { if (chrec == chrec_dont_know || chrec == chrec_not_analyzed_yet || chrec == NULL_TREE) return true; switch (TREE_CODE_LENGTH (TREE_CODE (chrec))) { case 3: if (chrec_contains_undetermined (TREE_OPERAND (chrec, 2))) return true; case 2: if (chrec_contains_undetermined (TREE_OPERAND (chrec, 1))) return true; case 1: if (chrec_contains_undetermined (TREE_OPERAND (chrec, 0))) return true; default: return false; } } /* Determines whether the tree EXPR contains chrecs. */ bool tree_contains_chrecs (tree expr) { if (expr == NULL_TREE) return false; if (tree_is_chrec (expr)) return true; switch (TREE_CODE_LENGTH (TREE_CODE (expr))) { case 3: if (tree_contains_chrecs (TREE_OPERAND (expr, 2))) return true; case 2: if (tree_contains_chrecs (TREE_OPERAND (expr, 1))) return true; case 1: if (tree_contains_chrecs (TREE_OPERAND (expr, 0))) return true; default: return false; } } /* Determine whether the given tree is an affine multivariate evolution. */ bool evolution_function_is_affine_multivariate_p (tree chrec) { if (chrec == NULL_TREE) return false; switch (TREE_CODE (chrec)) { case POLYNOMIAL_CHREC: if (evolution_function_is_constant_p (CHREC_LEFT (chrec))) { if (evolution_function_is_constant_p (CHREC_RIGHT (chrec))) return true; else { if (TREE_CODE (CHREC_RIGHT (chrec)) == POLYNOMIAL_CHREC && CHREC_VARIABLE (CHREC_RIGHT (chrec)) != CHREC_VARIABLE (chrec) && evolution_function_is_affine_multivariate_p (CHREC_RIGHT (chrec))) return true; else return false; } } else { if (evolution_function_is_constant_p (CHREC_RIGHT (chrec)) && TREE_CODE (CHREC_LEFT (chrec)) == POLYNOMIAL_CHREC && CHREC_VARIABLE (CHREC_LEFT (chrec)) != CHREC_VARIABLE (chrec) && evolution_function_is_affine_multivariate_p (CHREC_LEFT (chrec))) return true; else return false; } default: return false; } } /* Determine whether the given tree is a function in zero or one variables. */ bool evolution_function_is_univariate_p (tree chrec) { if (chrec == NULL_TREE) return true; switch (TREE_CODE (chrec)) { case POLYNOMIAL_CHREC: switch (TREE_CODE (CHREC_LEFT (chrec))) { case POLYNOMIAL_CHREC: if (CHREC_VARIABLE (chrec) != CHREC_VARIABLE (CHREC_LEFT (chrec))) return false; if (!evolution_function_is_univariate_p (CHREC_LEFT (chrec))) return false; break; default: break; } switch (TREE_CODE (CHREC_RIGHT (chrec))) { case POLYNOMIAL_CHREC: if (CHREC_VARIABLE (chrec) != CHREC_VARIABLE (CHREC_RIGHT (chrec))) return false; if (!evolution_function_is_univariate_p (CHREC_RIGHT (chrec))) return false; break; default: break; } default: return true; } } /* Convert the initial condition of chrec to type. */ tree chrec_convert (tree type, tree chrec) { tree ct; if (automatically_generated_chrec_p (chrec)) return chrec; ct = chrec_type (chrec); if (ct == type) return chrec; if (TYPE_PRECISION (ct) < TYPE_PRECISION (type)) return count_ev_in_wider_type (type, chrec); switch (TREE_CODE (chrec)) { case POLYNOMIAL_CHREC: return build_polynomial_chrec (CHREC_VARIABLE (chrec), chrec_convert (type, CHREC_LEFT (chrec)), chrec_convert (type, CHREC_RIGHT (chrec))); default: { tree res = convert (type, chrec); /* Don't propagate overflows. */ TREE_OVERFLOW (res) = 0; if (TREE_CODE_CLASS (TREE_CODE (res)) == 'c') TREE_CONSTANT_OVERFLOW (res) = 0; return res; } } } /* Returns the type of the chrec. */ tree chrec_type (tree chrec) { if (automatically_generated_chrec_p (chrec)) return NULL_TREE; return TREE_TYPE (chrec); } extern void initialize_scalar_evolutions_analyzer (void); /* Initializer. */ void initialize_scalar_evolutions_analyzer (void) { /* The elements below are unique. */ if (chrec_dont_know == NULL_TREE) { chrec_not_analyzed_yet = NULL_TREE; chrec_dont_know = make_node (SCEV_NOT_KNOWN); chrec_known = make_node (SCEV_KNOWN); TREE_TYPE (chrec_dont_know) = NULL_TREE; TREE_TYPE (chrec_known) = NULL_TREE; } } /* Control flow functions for trees. Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Diego Novillo This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Declarations for insn-output.c. These functions are defined in recog.c, final.c, and varasm.c. Copyright (C) 1987, 1991, 1994, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_OUTPUT_H #define GCC_OUTPUT_H /* Compute branch alignments based on frequency information in the CFG. */ extern void compute_alignments (void); /* Initialize data in final at the beginning of a compilation. */ extern void init_final (const char *); /* Enable APP processing of subsequent output. Used before the output from an `asm' statement. */ extern void app_enable (void); /* Disable APP processing of subsequent output. Called from varasm.c before most kinds of output. */ extern void app_disable (void); /* Return the number of slots filled in the current delayed branch sequence (we don't count the insn needing the delay slot). Zero if not in a delayed branch sequence. */ extern int dbr_sequence_length (void); /* Indicate that branch shortening hasn't yet been done. */ extern void init_insn_lengths (void); /* Obtain the current length of an insn. If branch shortening has been done, get its actual length. Otherwise, get its maximum length. */ extern int get_attr_length (rtx); /* Make a pass over all insns and compute their actual lengths by shortening any branches of variable length if possible. */ extern void shorten_branches (rtx); /* Output assembler code for the start of a function, and initialize some of the variables in this file for the new function. The label for the function and associated assembler pseudo-ops have already been output in `assemble_start_function'. */ extern void final_start_function (rtx, FILE *, int); /* Output assembler code for the end of a function. For clarity, args are same as those of `final_start_function' even though not all of them are needed. */ extern void final_end_function (void); /* Output assembler code for some insns: all or part of a function. */ extern void final (rtx, FILE *, int, int); /* The final scan for one insn, INSN. Args are same as in `final', except that INSN is the insn being scanned. Value returned is the next insn to be scanned. */ extern rtx final_scan_insn (rtx, FILE *, int, int, int, int *); /* Replace a SUBREG with a REG or a MEM, based on the thing it is a subreg of. */ extern rtx alter_subreg (rtx *); /* Report inconsistency between the assembler template and the operands. In an `asm', it's the user's fault; otherwise, the compiler's fault. */ extern void output_operand_lossage (const char *, ...) ATTRIBUTE_PRINTF_1; /* Output a string of assembler code, substituting insn operands. Defined in final.c. */ extern void output_asm_insn (const char *, rtx *); /* Compute a worst-case reference address of a branch so that it can be safely used in the presence of aligned labels. Defined in final.c. */ extern int insn_current_reference_address (rtx); /* Find the alignment associated with a CODE_LABEL. Defined in final.c. */ extern int label_to_alignment (rtx); /* Output a LABEL_REF, or a bare CODE_LABEL, as an assembler symbol. */ extern void output_asm_label (rtx); /* Print a memory reference operand for address X using machine-dependent assembler syntax. */ extern void output_address (rtx); /* Print an integer constant expression in assembler syntax. Addition and subtraction are the only arithmetic that may appear in these expressions. */ extern void output_addr_const (FILE *, rtx); /* Output a string of assembler code, substituting numbers, strings and fixed syntactic prefixes. */ #if GCC_VERSION >= 3004 #define ATTRIBUTE_ASM_FPRINTF(m, n) __attribute__ ((__format__ (__asm_fprintf__, m, n))) ATTRIBUTE_NONNULL(m) /* This is a magic identifier which allows GCC to figure out the type of HOST_WIDE_INT for %wd specifier checks. You must issue this typedef before using the __asm_fprintf__ format attribute. */ typedef HOST_WIDE_INT __gcc_host_wide_int__; #else #define ATTRIBUTE_ASM_FPRINTF(m, n) ATTRIBUTE_NONNULL(m) #endif extern void asm_fprintf (FILE *file, const char *p, ...) ATTRIBUTE_ASM_FPRINTF(2, 3); /* Split up a CONST_DOUBLE or integer constant rtx into two rtx's for single words. */ extern void split_double (rtx, rtx *, rtx *); /* Return nonzero if this function has no function calls. */ extern int leaf_function_p (void); /* Return 1 if branch is a forward branch. Uses insn_shuid array, so it works only in the final pass. May be used by output templates to add branch prediction hints, for example. */ extern int final_forward_branch_p (rtx); /* Return 1 if this function uses only the registers that can be safely renumbered. */ extern int only_leaf_regs_used (void); /* Scan IN_RTX and its subexpressions, and renumber all regs into those available in leaf functions. */ extern void leaf_renumber_regs_insn (rtx); /* Locate the proper template for the given insn-code. */ extern const char *get_insn_template (int, rtx); /* Add function NAME to the weak symbols list. VALUE is a weak alias associated with NAME. */ extern int add_weak (tree, const char *, const char *); /* Functions in flow.c */ extern void allocate_for_life_analysis (void); extern int regno_clobbered_at_setjmp (int); /* Functions in varasm.c. */ /* Tell assembler to switch to text section. */ extern void text_section (void); /* Tell assembler to switch to unlikely-to-be-executed text section. */ extern void unlikely_text_section (void); /* Tell assembler to switch to data section. */ extern void data_section (void); /* Tell assembler to switch to read-only data section. This is normally the text section. */ extern void readonly_data_section (void); /* Determine if we're in the text section. */ extern int in_text_section (void); /* Determine if we're in the unlikely-to-be-executed text section. */ extern int in_unlikely_text_section (void); #ifdef CTORS_SECTION_ASM_OP extern void ctors_section (void); #endif #ifdef DTORS_SECTION_ASM_OP extern void dtors_section (void); #endif #ifdef BSS_SECTION_ASM_OP extern void bss_section (void); #endif #ifdef INIT_SECTION_ASM_OP extern void init_section (void); #endif #ifdef FINI_SECTION_ASM_OP extern void fini_section (void); #endif #ifdef EXPORTS_SECTION_ASM_OP extern void exports_section (void); #endif #ifdef DRECTVE_SECTION_ASM_OP extern void drectve_section (void); #endif #ifdef SDATA_SECTION_ASM_OP extern void sdata_section (void); #endif /* Tell assembler to change to section NAME for DECL. If DECL is NULL, just switch to section NAME. If NAME is NULL, get the name from DECL. If RELOC is 1, the initializer for DECL contains relocs. */ extern void named_section (tree, const char *, int); /* Tell assembler to switch to the section for function DECL. */ extern void function_section (tree); /* Tell assembler to switch to the section for string merging. */ extern void mergeable_string_section (tree, unsigned HOST_WIDE_INT, unsigned int); /* Tell assembler to switch to the section for constant merging. */ extern void mergeable_constant_section (enum machine_mode, unsigned HOST_WIDE_INT, unsigned int); /* Declare DECL to be a weak symbol. */ extern void declare_weak (tree); /* Merge weak status. */ extern void merge_weak (tree, tree); /* Emit any pending weak declarations. */ extern void weak_finish (void); /* Decode an `asm' spec for a declaration as a register name. Return the register number, or -1 if nothing specified, or -2 if the ASMSPEC is not `cc' or `memory' and is not recognized, or -3 if ASMSPEC is `cc' and is not recognized, or -4 if ASMSPEC is `memory' and is not recognized. Accept an exact spelling or a decimal number. Prefixes such as % are optional. */ extern int decode_reg_name (const char *); /* Make the rtl for variable VAR be volatile. Use this only for static variables. */ extern void make_var_volatile (tree); extern void assemble_alias (tree, tree); extern void default_assemble_visibility (tree, int); /* Output a string of literal assembler code for an `asm' keyword used between functions. */ extern void assemble_asm (tree); /* Output assembler code for the constant pool of a function and associated with defining the name of the function. DECL describes the function. NAME is the function's name. For the constant pool, we use the current constant pool data. */ extern void assemble_start_function (tree, const char *); /* Output assembler code associated with defining the size of the function. DECL describes the function. NAME is the function's name. */ extern void assemble_end_function (tree, const char *); /* Assemble everything that is needed for a variable or function declaration. Not used for automatic variables, and not used for function definitions. Should not be called for variables of incomplete structure type. TOP_LEVEL is nonzero if this variable has file scope. AT_END is nonzero if this is the special handling, at end of compilation, to define things that have had only tentative definitions. DONT_OUTPUT_DATA if nonzero means don't actually output the initial value (that will be done by the caller). */ extern void assemble_variable (tree, int, int, int); /* Output something to declare an external symbol to the assembler. (Most assemblers don't need this, so we normally output nothing.) Do nothing if DECL is not external. */ extern void assemble_external (tree); /* Assemble code to leave SIZE bytes of zeros. */ extern void assemble_zeros (unsigned HOST_WIDE_INT); /* Assemble an alignment pseudo op for an ALIGN-bit boundary. */ extern void assemble_align (int); extern void assemble_eh_align (int); /* Assemble a string constant with the specified C string as contents. */ extern void assemble_string (const char *, int); /* Similar, for calling a library function FUN. */ extern void assemble_external_libcall (rtx); /* Assemble a label named NAME. */ extern void assemble_label (const char *); extern void assemble_eh_label (const char *); /* Output to FILE a reference to the assembler name of a C-level name NAME. If NAME starts with a *, the rest of NAME is output verbatim. Otherwise NAME is transformed in an implementation-defined way (usually by the addition of an underscore). Many macros in the tm file are defined to call this function. */ extern void assemble_name (FILE *, const char *); /* Return the assembler directive for creating a given kind of integer object. SIZE is the number of bytes in the object and ALIGNED_P indicates whether it is known to be aligned. Return NULL if the assembly dialect has no such directive. The returned string should be printed at the start of a new line and be followed immediately by the object's initial value. */ extern const char *integer_asm_op (int, int); /* Use directive OP to assemble an integer object X. Print OP at the start of the line, followed immediately by the value of X. */ extern void assemble_integer_with_op (const char *, rtx); /* The default implementation of the asm_out.integer target hook. */ extern bool default_assemble_integer (rtx, unsigned int, int); /* Assemble the integer constant X into an object of SIZE bytes. ALIGN is the alignment of the integer in bits. Return 1 if we were able to output the constant, otherwise 0. If FORCE is nonzero, abort if we can't output the constant. */ extern bool assemble_integer (rtx, unsigned, unsigned, int); /* An interface to assemble_integer for the common case in which a value is fully aligned and must be printed. VALUE is the value of the integer object and SIZE is the number of bytes it contains. */ #define assemble_aligned_integer(SIZE, VALUE) \ assemble_integer (VALUE, SIZE, (SIZE) * BITS_PER_UNIT, 1) #ifdef REAL_VALUE_TYPE_SIZE /* Assemble the floating-point constant D into an object of size MODE. */ extern void assemble_real (REAL_VALUE_TYPE, enum machine_mode, unsigned); #endif /* Return the size of the constant pool. */ extern int get_pool_size (void); #ifdef HAVE_peephole extern rtx peephole (rtx); #endif /* Write all the constants in the constant pool. */ extern void output_constant_pool (const char *, tree); /* Return nonzero if VALUE is a valid constant-valued expression for use in initializing a static variable; one that can be an element of a "constant" initializer. Return null_pointer_node if the value is absolute; if it is relocatable, return the variable that determines the relocation. We assume that VALUE has been folded as much as possible; therefore, we do not need to check for such things as arithmetic-combinations of integers. */ extern tree initializer_constant_valid_p (tree, tree); /* Output assembler code for constant EXP to FILE, with no label. This includes the pseudo-op such as ".int" or ".byte", and a newline. Assumes output_addressed_constants has been done on EXP already. Generate exactly SIZE bytes of assembler data, padding at the end with zeros if necessary. SIZE must always be specified. ALIGN is the alignment in bits that may be assumed for the data. */ extern void output_constant (tree, unsigned HOST_WIDE_INT, unsigned int); /* When outputting delayed branch sequences, this rtx holds the sequence being output. It is null when no delayed branch sequence is being output, so it can be used as a test in the insn output code. This variable is defined in final.c. */ extern rtx final_sequence; /* The line number of the beginning of the current function. Various md code needs this so that it can output relative linenumbers. */ #ifdef SDB_DEBUGGING_INFO /* Avoid undef sym in certain broken linkers. */ extern int sdb_begin_function_line; #endif /* File in which assembler code is being written. */ #ifdef BUFSIZ extern FILE *asm_out_file; #endif /* The first global object in the file. */ extern const char *first_global_object_name; /* The first weak object in the file. */ extern const char *weak_global_object_name; /* Nonzero if function being compiled doesn't contain any calls (ignoring the prologue and epilogue). This is set prior to local register allocation and is valid for the remaining compiler passes. */ extern int current_function_is_leaf; /* Nonzero if function being compiled doesn't contain any instructions that can throw an exception. This is set prior to final. */ extern int current_function_nothrow; /* Nonzero if function being compiled doesn't modify the stack pointer (ignoring the prologue and epilogue). This is only valid after life_analysis has run. */ extern int current_function_sp_is_unchanging; /* Nonzero if the function being compiled is a leaf function which only uses leaf registers. This is valid after reload (specifically after sched2) and is useful only if the port defines LEAF_REGISTERS. */ extern int current_function_uses_only_leaf_regs; /* Default file in which to dump debug output. */ #ifdef BUFSIZ extern FILE *dump_file; #endif /* Nonnull if the insn currently being emitted was a COND_EXEC pattern. */ extern rtx current_insn_predicate; /* Last insn processed by final_scan_insn. */ extern rtx current_output_insn; /* Nonzero while outputting an `asm' with operands. This means that inconsistencies are the user's fault, so don't abort. The precise value is the insn being output, to pass to error_for_asm. */ extern rtx this_is_asm_operands; /* Carry information from ASM_DECLARE_OBJECT_NAME to ASM_FINISH_DECLARE_OBJECT. */ extern int size_directive_output; extern tree last_assemble_variable_decl; /* Decide whether DECL needs to be in a writable section. RELOC is the same as for SELECT_SECTION. */ extern bool decl_readonly_section (tree, int); extern bool decl_readonly_section_1 (tree, int, int); /* This can be used to compute RELOC for the function above, when given a constant expression. */ extern int compute_reloc_for_constant (tree); /* User label prefix in effect for this compilation. */ extern const char *user_label_prefix; /* Default target function prologue and epilogue assembler output. */ extern void default_function_pro_epilogue (FILE *, HOST_WIDE_INT); /* Tell assembler to switch to the section for the exception table. */ extern void default_exception_section (void); /* Tell assembler to switch to the section for the EH frames. */ extern void named_section_eh_frame_section (void); extern void collect2_eh_frame_section (void); extern void default_eh_frame_section (void); /* Default target hook that outputs nothing to a stream. */ extern void no_asm_to_stream (FILE *); /* Flags controlling properties of a section. */ #define SECTION_ENTSIZE 0x000ff /* entity size in section */ #define SECTION_CODE 0x00100 /* contains code */ #define SECTION_WRITE 0x00200 /* data is writable */ #define SECTION_DEBUG 0x00400 /* contains debug data */ #define SECTION_LINKONCE 0x00800 /* is linkonce */ #define SECTION_SMALL 0x01000 /* contains "small data" */ #define SECTION_BSS 0x02000 /* contains zeros only */ #define SECTION_FORGET 0x04000 /* forget that we've entered the section */ #define SECTION_MERGE 0x08000 /* contains mergeable data */ #define SECTION_STRINGS 0x10000 /* contains zero terminated strings without embedded zeros */ #define SECTION_OVERRIDE 0x20000 /* allow override of default flags */ #define SECTION_TLS 0x40000 /* contains thread-local storage */ #define SECTION_NOTYPE 0x80000 /* don't output @progbits */ #define SECTION_MACH_DEP 0x100000 /* subsequent bits reserved for target */ extern unsigned int get_named_section_flags (const char *); extern bool set_named_section_flags (const char *, unsigned int); extern void named_section_flags (const char *, unsigned int); extern bool named_section_first_declaration (const char *); extern unsigned int default_section_type_flags (tree, const char *, int); extern unsigned int default_section_type_flags_1 (tree, const char *, int, int); extern void default_no_named_section (const char *, unsigned int); extern void default_elf_asm_named_section (const char *, unsigned int); extern void default_coff_asm_named_section (const char *, unsigned int); extern void default_pe_asm_named_section (const char *, unsigned int); extern void default_stabs_asm_out_destructor (rtx, int); extern void default_named_section_asm_out_destructor (rtx, int); extern void default_dtor_section_asm_out_destructor (rtx, int); extern void default_stabs_asm_out_constructor (rtx, int); extern void default_named_section_asm_out_constructor (rtx, int); extern void default_ctor_section_asm_out_constructor (rtx, int); extern void default_select_section (tree, int, unsigned HOST_WIDE_INT); extern void default_elf_select_section (tree, int, unsigned HOST_WIDE_INT); extern void default_elf_select_section_1 (tree, int, unsigned HOST_WIDE_INT, int); extern void default_unique_section (tree, int); extern void default_unique_section_1 (tree, int, int); extern void default_select_rtx_section (enum machine_mode, rtx, unsigned HOST_WIDE_INT); extern void default_elf_select_rtx_section (enum machine_mode, rtx, unsigned HOST_WIDE_INT); extern void default_encode_section_info (tree, rtx, int); extern const char *default_strip_name_encoding (const char *); extern bool default_binds_local_p (tree); extern bool default_binds_local_p_1 (tree, int); extern void default_globalize_label (FILE *, const char *); extern void default_emit_unwind_label (FILE *, tree, int, int); extern void default_internal_label (FILE *, const char *, unsigned long); extern void default_file_start (void); extern void file_end_indicate_exec_stack (void); extern bool default_valid_pointer_mode (enum machine_mode); extern int default_address_cost (rtx); #endif /* ! GCC_OUTPUT_H */ /* Definitions for code generation pass of GNU compiler. Copyright (C) 1987, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_EXPR_H #define GCC_EXPR_H /* For inhibit_defer_pop */ /* For XEXP, GEN_INT, rtx_code */ /* For optimize_size */ /* For host_integerp, tree_low_cst, convert, size_binop, ssize_int, TREE_CODE, TYPE_SIZE, int_size_in_bytes, */ /* For GET_MODE_BITSIZE, word_mode */ /* The default branch cost is 1. */ #ifndef BRANCH_COST #define BRANCH_COST 1 #endif /* Macros to access the slots of a QUEUED rtx. Here rather than in rtl.h because only the expansion pass should ever encounter a QUEUED. */ /* The variable for which an increment is queued. */ #define QUEUED_VAR(P) XEXP (P, 0) /* If the increment has been emitted, this is the insn that does the increment. It is zero before the increment is emitted. If more than one insn is emitted, this is the first insn. */ #define QUEUED_INSN(P) XEXP (P, 1) /* If a pre-increment copy has been generated, this is the copy (it is a temporary reg). Zero if no copy made yet. */ #define QUEUED_COPY(P) XEXP (P, 2) /* This is the body to use for the insn to do the increment. It is used to emit the increment. */ #define QUEUED_BODY(P) XEXP (P, 3) /* Next QUEUED in the queue. */ #define QUEUED_NEXT(P) XEXP (P, 4) /* This is the 4th arg to `expand_expr'. EXPAND_STACK_PARM means we are possibly expanding a call param onto the stack. Choosing a value of 2 isn't special; It just allows some code optimization in store_expr. EXPAND_SUM means it is ok to return a PLUS rtx or MULT rtx. EXPAND_INITIALIZER is similar but also record any labels on forced_labels. EXPAND_CONST_ADDRESS means it is ok to return a MEM whose address is a constant that is not a legitimate address. EXPAND_WRITE means we are only going to write to the resulting rtx. EXPAND_MEMORY means we are interested in a memory result, even if the memory is constant and we could have propagated a constant value. */ enum expand_modifier {EXPAND_NORMAL = 0, EXPAND_STACK_PARM = 2, EXPAND_SUM, EXPAND_CONST_ADDRESS, EXPAND_INITIALIZER, EXPAND_WRITE, EXPAND_MEMORY}; /* Prevent the compiler from deferring stack pops. See inhibit_defer_pop for more information. */ #define NO_DEFER_POP (inhibit_defer_pop += 1) /* Allow the compiler to defer stack pops. See inhibit_defer_pop for more information. */ #define OK_DEFER_POP (inhibit_defer_pop -= 1) /* If a memory-to-memory move would take MOVE_RATIO or more simple move-instruction sequences, we will do a movstr or libcall instead. */ #ifndef MOVE_RATIO #if defined (HAVE_movstrqi) || defined (HAVE_movstrhi) || defined (HAVE_movstrsi) || defined (HAVE_movstrdi) || defined (HAVE_movstrti) #define MOVE_RATIO 2 #else /* If we are optimizing for space (-Os), cut down the default move ratio. */ #define MOVE_RATIO (optimize_size ? 3 : 15) #endif #endif /* If a clear memory operation would take CLEAR_RATIO or more simple move-instruction sequences, we will do a clrstr or libcall instead. */ #ifndef CLEAR_RATIO #if defined (HAVE_clrstrqi) || defined (HAVE_clrstrhi) || defined (HAVE_clrstrsi) || defined (HAVE_clrstrdi) || defined (HAVE_clrstrti) #define CLEAR_RATIO 2 #else /* If we are optimizing for space, cut down the default clear ratio. */ #define CLEAR_RATIO (optimize_size ? 3 : 15) #endif #endif enum direction {none, upward, downward}; /* Structure to record the size of a sequence of arguments as the sum of a tree-expression and a constant. This structure is also used to store offsets from the stack, which might be negative, so the variable part must be ssizetype, not sizetype. */ struct args_size { HOST_WIDE_INT constant; tree var; }; /* Package up various arg related fields of struct args for locate_and_pad_parm. */ struct locate_and_pad_arg_data { /* Size of this argument on the stack, rounded up for any padding it gets. If REG_PARM_STACK_SPACE is defined, then register parms are counted here, otherwise they aren't. */ struct args_size size; /* Offset of this argument from beginning of stack-args. */ struct args_size offset; /* Offset to the start of the stack slot. Different from OFFSET if this arg pads downward. */ struct args_size slot_offset; /* The amount that the stack pointer needs to be adjusted to force alignment for the next argument. */ struct args_size alignment_pad; /* Which way we should pad this arg. */ enum direction where_pad; }; /* Add the value of the tree INC to the `struct args_size' TO. */ #define ADD_PARM_SIZE(TO, INC) \ do { \ tree inc = (INC); \ if (host_integerp (inc, 0)) \ (TO).constant += tree_low_cst (inc, 0); \ else if ((TO).var == 0) \ (TO).var = convert (ssizetype, inc); \ else \ (TO).var = size_binop (PLUS_EXPR, (TO).var, \ convert (ssizetype, inc)); \ } while (0) #define SUB_PARM_SIZE(TO, DEC) \ do { \ tree dec = (DEC); \ if (host_integerp (dec, 0)) \ (TO).constant -= tree_low_cst (dec, 0); \ else if ((TO).var == 0) \ (TO).var = size_binop (MINUS_EXPR, ssize_int (0), \ convert (ssizetype, dec)); \ else \ (TO).var = size_binop (MINUS_EXPR, (TO).var, \ convert (ssizetype, dec)); \ } while (0) /* Convert the implicit sum in a `struct args_size' into a tree of type ssizetype. */ #define ARGS_SIZE_TREE(SIZE) \ ((SIZE).var == 0 ? ssize_int ((SIZE).constant) \ : size_binop (PLUS_EXPR, convert (ssizetype, (SIZE).var), \ ssize_int ((SIZE).constant))) /* Convert the implicit sum in a `struct args_size' into an rtx. */ #define ARGS_SIZE_RTX(SIZE) \ ((SIZE).var == 0 ? GEN_INT ((SIZE).constant) \ : expand_expr (ARGS_SIZE_TREE (SIZE), NULL_RTX, VOIDmode, 0)) /* Supply a default definition for FUNCTION_ARG_PADDING: usually pad upward, but pad short args downward on big-endian machines. */ #define DEFAULT_FUNCTION_ARG_PADDING(MODE, TYPE) \ (! BYTES_BIG_ENDIAN \ ? upward \ : (((MODE) == BLKmode \ ? ((TYPE) && TREE_CODE (TYPE_SIZE (TYPE)) == INTEGER_CST \ && int_size_in_bytes (TYPE) < (PARM_BOUNDARY / BITS_PER_UNIT)) \ : GET_MODE_BITSIZE (MODE) < PARM_BOUNDARY) \ ? downward : upward)) #ifndef FUNCTION_ARG_PADDING #define FUNCTION_ARG_PADDING(MODE, TYPE) \ DEFAULT_FUNCTION_ARG_PADDING ((MODE), (TYPE)) #endif /* Supply a default definition for FUNCTION_ARG_BOUNDARY. Normally, we let FUNCTION_ARG_PADDING, which also pads the length, handle any needed alignment. */ #ifndef FUNCTION_ARG_BOUNDARY #define FUNCTION_ARG_BOUNDARY(MODE, TYPE) PARM_BOUNDARY #endif tree split_complex_types (tree); tree split_complex_values (tree); /* Nonzero if we do not know how to pass TYPE solely in registers. */ extern bool default_must_pass_in_stack (enum machine_mode, tree); #ifndef MUST_PASS_IN_STACK #define MUST_PASS_IN_STACK(MODE,TYPE) default_must_pass_in_stack(MODE, TYPE) #endif /* Supply a default definition of STACK_SAVEAREA_MODE for emit_stack_save. Normally move_insn, so Pmode stack pointer. */ #ifndef STACK_SAVEAREA_MODE #define STACK_SAVEAREA_MODE(LEVEL) Pmode #endif /* Supply a default definition of STACK_SIZE_MODE for allocate_dynamic_stack_space. Normally PLUS/MINUS, so word_mode. */ #ifndef STACK_SIZE_MODE #define STACK_SIZE_MODE word_mode #endif /* Provide default values for the macros controlling stack checking. */ #ifndef STACK_CHECK_BUILTIN #define STACK_CHECK_BUILTIN 0 #endif /* The default interval is one page. */ #ifndef STACK_CHECK_PROBE_INTERVAL #define STACK_CHECK_PROBE_INTERVAL 4096 #endif /* The default is to do a store into the stack. */ #ifndef STACK_CHECK_PROBE_LOAD #define STACK_CHECK_PROBE_LOAD 0 #endif /* This value is arbitrary, but should be sufficient for most machines. */ #ifndef STACK_CHECK_PROTECT #define STACK_CHECK_PROTECT (75 * UNITS_PER_WORD) #endif /* Make the maximum frame size be the largest we can and still only need one probe per function. */ #ifndef STACK_CHECK_MAX_FRAME_SIZE #define STACK_CHECK_MAX_FRAME_SIZE \ (STACK_CHECK_PROBE_INTERVAL - UNITS_PER_WORD) #endif /* This is arbitrary, but should be large enough everywhere. */ #ifndef STACK_CHECK_FIXED_FRAME_SIZE #define STACK_CHECK_FIXED_FRAME_SIZE (4 * UNITS_PER_WORD) #endif /* Provide a reasonable default for the maximum size of an object to allocate in the fixed frame. We may need to be able to make this controllable by the user at some point. */ #ifndef STACK_CHECK_MAX_VAR_SIZE #define STACK_CHECK_MAX_VAR_SIZE (STACK_CHECK_MAX_FRAME_SIZE / 100) #endif /* Functions from optabs.c, commonly used, and without need for the optabs tables: */ /* Passed to expand_simple_binop and expand_binop to say which options to try to use if the requested operation can't be open-coded on the requisite mode. Either OPTAB_LIB or OPTAB_LIB_WIDEN says try using a library call. Either OPTAB_WIDEN or OPTAB_LIB_WIDEN says try using a wider mode. OPTAB_MUST_WIDEN says try widening and don't try anything else. */ enum optab_methods { OPTAB_DIRECT, OPTAB_LIB, OPTAB_WIDEN, OPTAB_LIB_WIDEN, OPTAB_MUST_WIDEN }; /* Generate code for a simple binary or unary operation. "Simple" in this case means "can be unambiguously described by a (mode, code) pair and mapped to a single optab." */ extern rtx expand_simple_binop (enum machine_mode, enum rtx_code, rtx, rtx, rtx, int, enum optab_methods); extern rtx expand_simple_unop (enum machine_mode, enum rtx_code, rtx, rtx, int); /* Report whether the machine description contains an insn which can perform the operation described by CODE and MODE. */ extern int have_insn_for (enum rtx_code, enum machine_mode); /* Emit code to make a call to a constant function or a library call. */ extern void emit_libcall_block (rtx, rtx, rtx, rtx); /* Create but don't emit one rtl instruction to perform certain operations. Modes must match; operands must meet the operation's predicates. Likewise for subtraction and for just copying. These do not call protect_from_queue; caller must do so. */ extern rtx gen_add2_insn (rtx, rtx); extern rtx gen_add3_insn (rtx, rtx, rtx); extern rtx gen_sub2_insn (rtx, rtx); extern rtx gen_sub3_insn (rtx, rtx, rtx); extern rtx gen_move_insn (rtx, rtx); extern int have_add2_insn (rtx, rtx); extern int have_sub2_insn (rtx, rtx); /* Emit a pair of rtl insns to compare two rtx's and to jump to a label if the comparison is true. */ extern void emit_cmp_and_jump_insns (rtx, rtx, enum rtx_code, rtx, enum machine_mode, int, rtx); /* Generate code to indirectly jump to a location given in the rtx LOC. */ extern void emit_indirect_jump (rtx); /* Generated automatically by the program `genconfig' from the machine description file `md'. */ #ifndef GCC_INSN_CONFIG_H #define GCC_INSN_CONFIG_H #define MAX_RECOG_OPERANDS 30 #define MAX_DUP_OPERANDS 4 #ifndef MAX_INSNS_PER_SPLIT #define MAX_INSNS_PER_SPLIT 5 #endif #define CC0_P(X) ((X) ? 0 : 0) #define HAVE_conditional_move 1 #define HAVE_peephole2 1 #define MAX_INSNS_PER_PEEP2 4 #endif /* GCC_INSN_CONFIG_H */ #ifdef HAVE_conditional_move /* Emit a conditional move operation. */ rtx emit_conditional_move (rtx, enum rtx_code, rtx, rtx, enum machine_mode, rtx, rtx, enum machine_mode, int); /* Return nonzero if the conditional move is supported. */ int can_conditionally_move_p (enum machine_mode mode); #endif rtx emit_conditional_add (rtx, enum rtx_code, rtx, rtx, enum machine_mode, rtx, rtx, enum machine_mode, int); /* Functions from expmed.c: */ /* Arguments MODE, RTX: return an rtx for the negation of that value. May emit insns. */ extern rtx negate_rtx (enum machine_mode, rtx); /* Expand a logical AND operation. */ extern rtx expand_and (enum machine_mode, rtx, rtx, rtx); /* Emit a store-flag operation. */ extern rtx emit_store_flag (rtx, enum rtx_code, rtx, rtx, enum machine_mode, int, int); /* Like emit_store_flag, but always succeeds. */ extern rtx emit_store_flag_force (rtx, enum rtx_code, rtx, rtx, enum machine_mode, int, int); /* Functions from loop.c: */ /* Given an insn and condition, return a canonical description of the test being made. */ extern rtx canonicalize_condition (rtx, rtx, int, rtx *, rtx, int); /* Given a JUMP_INSN, return a canonical description of the test being made. */ extern rtx get_condition (rtx, rtx *, int); /* Generate a conditional trap instruction. */ extern rtx gen_cond_trap (enum rtx_code, rtx, rtx, rtx); /* Functions from builtins.c: */ extern rtx expand_builtin (tree, rtx, rtx, enum machine_mode, int); extern tree std_build_builtin_va_list (void); extern void std_expand_builtin_va_start (tree, rtx); extern rtx std_expand_builtin_va_arg (tree, tree); extern rtx expand_builtin_va_arg (tree, tree); extern rtx default_expand_builtin (tree, rtx, rtx, enum machine_mode, int); extern void expand_builtin_setjmp_setup (rtx, rtx); extern void expand_builtin_setjmp_receiver (rtx); extern void expand_builtin_longjmp (rtx, rtx); extern rtx expand_builtin_saveregs (void); extern void expand_builtin_trap (void); extern tree simplify_builtin_fputs (tree, int, int, tree); extern tree simplify_builtin_strcpy (tree, tree); extern tree simplify_builtin_strncpy (tree, tree); /* Functions from expr.c: */ /* This is run once per compilation to set up which modes can be used directly in memory and to initialize the block move optab. */ extern void init_expr_once (void); /* This is run at the start of compiling a function. */ extern void init_expr (void); /* This is run at the end of compiling a function. */ extern void finish_expr_for_function (void); /* Use protect_from_queue to convert a QUEUED expression into something that you can put immediately into an instruction. */ extern rtx protect_from_queue (rtx, int); /* Perform all the pending incrementations. */ extern void emit_queue (void); /* Tell if something has a queued subexpression. */ extern int queued_subexp_p (rtx); /* Emit some rtl insns to move data between rtx's, converting machine modes. Both modes must be floating or both fixed. */ extern void convert_move (rtx, rtx, int); /* Convert an rtx to specified machine mode and return the result. */ extern rtx convert_to_mode (enum machine_mode, rtx, int); /* Convert an rtx to MODE from OLDMODE and return the result. */ extern rtx convert_modes (enum machine_mode, enum machine_mode, rtx, int); /* Emit code to move a block Y to a block X. */ enum block_op_methods { BLOCK_OP_NORMAL, BLOCK_OP_NO_LIBCALL, BLOCK_OP_CALL_PARM }; extern void init_block_move_fn (const char *); extern void init_block_clear_fn (const char *); extern rtx emit_block_move (rtx, rtx, rtx, enum block_op_methods); /* Copy all or part of a value X into registers starting at REGNO. The number of registers to be filled is NREGS. */ extern void move_block_to_reg (int, rtx, int, enum machine_mode); /* Copy all or part of a BLKmode value X out of registers starting at REGNO. The number of registers to be filled is NREGS. */ extern void move_block_from_reg (int, rtx, int); /* Generate a non-consecutive group of registers represented by a PARALLEL. */ extern rtx gen_group_rtx (rtx); /* Load a BLKmode value into non-consecutive registers represented by a PARALLEL. */ extern void emit_group_load (rtx, rtx, tree, int); /* Move a non-consecutive group of registers represented by a PARALLEL into a non-consecutive group of registers represented by a PARALLEL. */ extern void emit_group_move (rtx, rtx); /* Store a BLKmode value from non-consecutive registers represented by a PARALLEL. */ extern void emit_group_store (rtx, rtx, tree, int); /* Copy BLKmode object from a set of registers. */ extern rtx copy_blkmode_from_reg (rtx, rtx, tree); /* Mark REG as holding a parameter for the next CALL_INSN. */ extern void use_reg (rtx *, rtx); /* Mark NREGS consecutive regs, starting at REGNO, as holding parameters for the next CALL_INSN. */ extern void use_regs (rtx *, int, int); /* Mark a PARALLEL as holding a parameter for the next CALL_INSN. */ extern void use_group_regs (rtx *, rtx); /* Write zeros through the storage of OBJECT. If OBJECT has BLKmode, SIZE is its length in bytes. */ extern rtx clear_storage (rtx, rtx); /* Determine whether the LEN bytes can be moved by using several move instructions. Return nonzero if a call to move_by_pieces should succeed. */ extern int can_move_by_pieces (unsigned HOST_WIDE_INT, unsigned int); /* Return nonzero if it is desirable to store LEN bytes generated by CONSTFUN with several move instructions by store_by_pieces function. CONSTFUNDATA is a pointer which will be passed as argument in every CONSTFUN call. ALIGN is maximum alignment we can assume. */ extern int can_store_by_pieces (unsigned HOST_WIDE_INT, rtx (*) (void *, HOST_WIDE_INT, enum machine_mode), void *, unsigned int); /* Generate several move instructions to store LEN bytes generated by CONSTFUN to block TO. (A MEM rtx with BLKmode). CONSTFUNDATA is a pointer which will be passed as argument in every CONSTFUN call. ALIGN is maximum alignment we can assume. Returns TO + LEN. */ extern rtx store_by_pieces (rtx, unsigned HOST_WIDE_INT, rtx (*) (void *, HOST_WIDE_INT, enum machine_mode), void *, unsigned int, int); /* Emit insns to set X from Y. */ extern rtx emit_move_insn (rtx, rtx); /* Emit insns to set X from Y, with no frills. */ extern rtx emit_move_insn_1 (rtx, rtx); /* Push a block of length SIZE (perhaps variable) and return an rtx to address the beginning of the block. */ extern rtx push_block (rtx, int, int); /* Generate code to push something onto the stack, given its mode and type. */ extern void emit_push_insn (rtx, enum machine_mode, tree, rtx, unsigned int, int, rtx, int, rtx, rtx, int, rtx); /* Expand an assignment that stores the value of FROM into TO. */ extern rtx expand_assignment (tree, tree, int); /* Generate code for computing expression EXP, and storing the value into TARGET. If SUGGEST_REG is nonzero, copy the value through a register and return that register, if that is possible. */ extern rtx store_expr (tree, rtx, int); /* Given an rtx that may include add and multiply operations, generate them as insns and return a pseudo-reg containing the value. Useful after calling expand_expr with 1 as sum_ok. */ extern rtx force_operand (rtx, rtx); /* Work horse for expand_expr. */ extern rtx expand_expr_real (tree, rtx, enum machine_mode, enum expand_modifier, rtx *); /* Generate code for computing expression EXP. An rtx for the computed value is returned. The value is never null. In the case of a void EXP, const0_rtx is returned. */ static inline rtx expand_expr (tree exp, rtx target, enum machine_mode mode, enum expand_modifier modifier) { return expand_expr_real (exp, target, mode, modifier, NULL); } extern void expand_var (tree); /* At the start of a function, record that we have no previously-pushed arguments waiting to be popped. */ extern void init_pending_stack_adjust (void); /* When exiting from function, if safe, clear out any pending stack adjust so the adjustment won't get done. */ extern void clear_pending_stack_adjust (void); /* Pop any previously-pushed arguments that have not been popped yet. */ extern void do_pending_stack_adjust (void); /* Return the tree node and offset if a given argument corresponds to a string constant. */ extern tree string_constant (tree, tree *); /* Generate code to evaluate EXP and jump to LABEL if the value is zero. */ extern void jumpifnot (tree, rtx); /* Generate code to evaluate EXP and jump to LABEL if the value is nonzero. */ extern void jumpif (tree, rtx); /* Generate code to evaluate EXP and jump to IF_FALSE_LABEL if the result is zero, or IF_TRUE_LABEL if the result is one. */ extern void do_jump (tree, rtx, rtx); /* Generate rtl to compare two rtx's, will call emit_cmp_insn. */ extern rtx compare_from_rtx (rtx, rtx, enum rtx_code, int, enum machine_mode, rtx); extern void do_compare_rtx_and_jump (rtx, rtx, enum rtx_code, int, enum machine_mode, rtx, rtx, rtx); /* Two different ways of generating switch statements. */ extern int try_casesi (tree, tree, tree, tree, rtx, rtx); extern int try_tablejump (tree, tree, tree, tree, rtx, rtx); /* Smallest number of adjacent cases before we use a jump table. XXX Should be a target hook. */ extern unsigned int case_values_threshold (void); /* Functions from alias.c */ /* Exported functions from alias.c Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_ALIAS_H #define GCC_ALIAS_H extern HOST_WIDE_INT new_alias_set (void); extern void record_alias_subset (HOST_WIDE_INT, HOST_WIDE_INT); extern HOST_WIDE_INT get_varargs_alias_set (void); extern HOST_WIDE_INT get_frame_alias_set (void); extern void record_base_value (unsigned int, rtx, int); extern int can_address_p (tree); #endif /* GCC_ALIAS_H */ /* extern HOST_WIDE_INT get_varargs_alias_set (void); */ /* extern HOST_WIDE_INT get_frame_alias_set (void); */ /* extern void record_base_value (unsigned int, rtx, int); */ /* extern void record_alias_subset (HOST_WIDE_INT, HOST_WIDE_INT); */ /* extern HOST_WIDE_INT new_alias_set (void); */ /* extern int can_address_p (tree); */ /* rtl.h and tree.h were included. */ /* Return an rtx for the size in bytes of the value of an expr. */ extern rtx expr_size (tree); /* Return a wide integer for the size in bytes of the value of EXP, or -1 if the size can vary or is larger than an integer. */ extern HOST_WIDE_INT int_expr_size (tree); /* Convert a stack slot address ADDR valid in function FNDECL into an address valid in this function (using a static chain). */ extern rtx fix_lexical_addr (rtx, tree); /* Return the address of the trampoline for entering nested fn FUNCTION. */ extern rtx trampoline_address (tree); /* Return an rtx that refers to the value returned by a function in its original home. This becomes invalid if any more code is emitted. */ extern rtx hard_function_value (tree, tree, int); extern rtx prepare_call_address (rtx, rtx, rtx *, int, int); extern rtx expand_call (tree, rtx, int); extern void fixup_tail_calls (void); #ifdef TREE_CODE extern rtx expand_shift (enum tree_code, enum machine_mode, rtx, tree, rtx, int); extern rtx expand_divmod (int, enum tree_code, enum machine_mode, rtx, rtx, rtx, int); #endif extern void locate_and_pad_parm (enum machine_mode, tree, int, int, tree, struct args_size *, struct locate_and_pad_arg_data *); /* Return the CODE_LABEL rtx for a LABEL_DECL, creating it if necessary. */ extern rtx label_rtx (tree); /* As label_rtx, but additionally the label is placed on the forced label list of its containing function (i.e. it is treated as reachable even if how is not obvious). */ extern rtx force_label_rtx (tree); /* Indicate how an input argument register was promoted. */ extern rtx promoted_input_arg (unsigned int, enum machine_mode *, int *); /* Return an rtx like arg but sans any constant terms. Returns the original rtx if it has no constant terms. The constant terms are added and stored via a second arg. */ extern rtx eliminate_constant_term (rtx, rtx *); /* Convert arg to a valid memory address for specified machine mode, by emitting insns to perform arithmetic if nec. */ extern rtx memory_address (enum machine_mode, rtx); /* Like `memory_address' but pretent `flag_force_addr' is 0. */ extern rtx memory_address_noforce (enum machine_mode, rtx); /* Return a memory reference like MEMREF, but with its mode changed to MODE and its address changed to ADDR. (VOIDmode means don't change the mode. NULL for ADDR means don't change the address.) */ extern rtx change_address (rtx, enum machine_mode, rtx); /* Return a memory reference like MEMREF, but with its mode changed to MODE and its address offset by OFFSET bytes. */ #define adjust_address(MEMREF, MODE, OFFSET) \ adjust_address_1 (MEMREF, MODE, OFFSET, 1, 1) /* Likewise, but the reference is not required to be valid. */ #define adjust_address_nv(MEMREF, MODE, OFFSET) \ adjust_address_1 (MEMREF, MODE, OFFSET, 0, 1) /* Return a memory reference like MEMREF, but with its mode changed to MODE and its address changed to ADDR, which is assumed to be increased by OFFSET bytes from MEMREF. */ #define adjust_automodify_address(MEMREF, MODE, ADDR, OFFSET) \ adjust_automodify_address_1 (MEMREF, MODE, ADDR, OFFSET, 1) /* Likewise, but the reference is not required to be valid. */ #define adjust_automodify_address_nv(MEMREF, MODE, ADDR, OFFSET) \ adjust_automodify_address_1 (MEMREF, MODE, ADDR, OFFSET, 0) extern rtx adjust_address_1 (rtx, enum machine_mode, HOST_WIDE_INT, int, int); extern rtx adjust_automodify_address_1 (rtx, enum machine_mode, rtx, HOST_WIDE_INT, int); /* Return a memory reference like MEMREF, but whose address is changed by adding OFFSET, an RTX, to it. POW2 is the highest power of two factor known to be in OFFSET (possibly 1). */ extern rtx offset_address (rtx, rtx, unsigned HOST_WIDE_INT); /* Definitions from emit-rtl.c */ /* Exported functions from emit-rtl.c Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_EMIT_RTL_H #define GCC_EMIT_RTL_H /* Set the alias set of MEM to SET. */ extern void set_mem_alias_set (rtx, HOST_WIDE_INT); /* Set the alignment of MEM to ALIGN bits. */ extern void set_mem_align (rtx, unsigned int); /* Set the expr for MEM to EXPR. */ extern void set_mem_expr (rtx, tree); /* Set the offset for MEM to OFFSET. */ extern void set_mem_offset (rtx, rtx); /* Set the size for MEM to SIZE. */ extern void set_mem_size (rtx, rtx); /* Return a memory reference like MEMREF, but with its address changed to ADDR. The caller is asserting that the actual piece of memory pointed to is the same, just the form of the address is being changed, such as by putting something into a register. */ extern rtx replace_equiv_address (rtx, rtx); /* Likewise, but the reference is not required to be valid. */ extern rtx replace_equiv_address_nv (rtx, rtx); #endif /* GCC_EMIT_RTL_H */ /* Return a memory reference like MEMREF, but with its mode widened to MODE and adjusted by OFFSET. */ extern rtx widen_memory_access (rtx, enum machine_mode, HOST_WIDE_INT); /* Return a memory reference like MEMREF, but which is known to have a valid address. */ extern rtx validize_mem (rtx); /* Given REF, either a MEM or a REG, and T, either the type of X or the expression corresponding to REF, set RTX_UNCHANGING_P if appropriate. */ extern void maybe_set_unchanging (rtx, tree); /* Given REF, a MEM, and T, either the type of X or the expression corresponding to REF, set the memory attributes. OBJECTP is nonzero if we are making a new object of this type. */ extern void set_mem_attributes (rtx, tree, int); /* Similar, except that BITPOS has not yet been applied to REF, so if we alter MEM_OFFSET according to T then we should subtract BITPOS expecting that it'll be added back in later. */ extern void set_mem_attributes_minus_bitpos (rtx, tree, int, HOST_WIDE_INT); /* Assemble the static constant template for function entry trampolines. */ extern rtx assemble_trampoline_template (void); /* Given rtx, return new rtx whose address won't be affected by any side effects. It has been copied to a new temporary reg. */ extern rtx stabilize (rtx); /* Given an rtx, copy all regs it refers to into new temps and return a modified copy that refers to the new temps. */ extern rtx copy_all_regs (rtx); /* Copy given rtx to a new temp reg and return that. */ extern rtx copy_to_reg (rtx); /* Like copy_to_reg but always make the reg Pmode. */ extern rtx copy_addr_to_reg (rtx); /* Like copy_to_reg but always make the reg the specified mode MODE. */ extern rtx copy_to_mode_reg (enum machine_mode, rtx); /* Copy given rtx to given temp reg and return that. */ extern rtx copy_to_suggested_reg (rtx, rtx, enum machine_mode); /* Copy a value to a register if it isn't already a register. Args are mode (in case value is a constant) and the value. */ extern rtx force_reg (enum machine_mode, rtx); /* Return given rtx, copied into a new temp reg if it was in memory. */ extern rtx force_not_mem (rtx); /* Return mode and signedness to use when object is promoted. */ extern enum machine_mode promote_mode (tree, enum machine_mode, int *, int); /* Remove some bytes from the stack. An rtx says how many. */ extern void adjust_stack (rtx); /* Add some bytes to the stack. An rtx says how many. */ extern void anti_adjust_stack (rtx); /* This enum is used for the following two functions. */ enum save_level {SAVE_BLOCK, SAVE_FUNCTION, SAVE_NONLOCAL}; /* Save the stack pointer at the specified level. */ extern void emit_stack_save (enum save_level, rtx *, rtx); /* Restore the stack pointer from a save area of the specified level. */ extern void emit_stack_restore (enum save_level, rtx, rtx); /* Invoke emit_stack_save for the nonlocal_goto_save_area. */ extern void update_nonlocal_goto_save_area (void); /* Allocate some space on the stack dynamically and return its address. An rtx says how many bytes. */ extern rtx allocate_dynamic_stack_space (rtx, rtx, int); /* Probe a range of stack addresses from FIRST to FIRST+SIZE, inclusive. FIRST is a constant and size is a Pmode RTX. These are offsets from the current stack pointer. STACK_GROWS_DOWNWARD says whether to add or subtract from the stack. If SIZE is constant, this is done with a fixed number of probes. Otherwise, we must make a loop. */ extern void probe_stack_range (HOST_WIDE_INT, rtx); /* Return an rtx that refers to the value returned by a library call in its original home. This becomes invalid if any more code is emitted. */ extern rtx hard_libcall_value (enum machine_mode); /* Given an rtx, return an rtx for a value rounded up to a multiple of STACK_BOUNDARY / BITS_PER_UNIT. */ extern rtx round_push (rtx); /* Return the mode desired by operand N of a particular bitfield insert/extract insn, or MAX_MACHINE_MODE if no such insn is available. */ enum extraction_pattern { EP_insv, EP_extv, EP_extzv }; extern enum machine_mode mode_for_extraction (enum extraction_pattern, int); extern rtx store_bit_field (rtx, unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, enum machine_mode, rtx, HOST_WIDE_INT); extern rtx extract_bit_field (rtx, unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, int, rtx, enum machine_mode, enum machine_mode, HOST_WIDE_INT); extern rtx expand_mult (enum machine_mode, rtx, rtx, rtx, int); extern bool const_mult_add_overflow_p (rtx, rtx, rtx, enum machine_mode, int); extern rtx expand_mult_add (rtx, rtx, rtx, rtx,enum machine_mode, int); extern rtx expand_mult_highpart_adjust (enum machine_mode, rtx, rtx, rtx, rtx, int); extern rtx assemble_static_space (unsigned HOST_WIDE_INT); extern int safe_from_p (rtx, tree, int); /* Call this once to initialize the contents of the optabs appropriately for the current target machine. */ extern void init_optabs (void); extern void init_all_optabs (void); /* Call this to initialize an optab function entry. */ extern rtx init_one_libfunc (const char *); extern void do_jump_by_parts_equality_rtx (rtx, rtx, rtx); extern void do_jump_by_parts_greater_rtx (enum machine_mode, int, rtx, rtx, rtx, rtx); extern int vector_mode_valid_p (enum machine_mode); #endif /* GCC_EXPR_H */ /* Timing variables for measuring compiler performance. Copyright (C) 2000, 2003 Free Software Foundation, Inc. Contributed by Alex Samuel This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_TIMEVAR_H #define GCC_TIMEVAR_H /* Timing variables are used to measure elapsed time in various portions of the compiler. Each measures elapsed user, system, and wall-clock time, as appropriate to and supported by the host system. Timing variables are defined using the DEFTIMEVAR macro in timevar.def. Each has an enumeral identifier, used when referring to the timing variable in code, and a character string name. Timing variables can be used in two ways: - On the timing stack, using timevar_push and timevar_pop. Timing variables may be pushed onto the stack; elapsed time is attributed to the topmost timing variable on the stack. When another variable is pushed on, the previous topmost variable is `paused' until the pushed variable is popped back off. - As a standalone timer, using timevar_start and timevar_stop. All time elapsed between the two calls is attributed to the variable. */ /* This structure stores the various varieties of time that can be measured. Times are stored in seconds. The time may be an absolute time or a time difference; in the former case, the time base is undefined, except that the difference between two times produces a valid time difference. */ struct timevar_time_def { /* User time in this process. */ double user; /* System time (if applicable for this host platform) in this process. */ double sys; /* Wall clock time. */ double wall; }; /* An enumeration of timing variable identifiers. Constructed from the contents of timevar.def. */ #define DEFTIMEVAR(identifier__, name__) \ identifier__, typedef enum { /* This file contains the definitions for timing variables used to measure run-time performance of the compiler. Copyright (C) 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Alex Samuel This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file contains timing variable definitions, used by timevar.h and timevar.c. Syntax: DEFTIMEVAR (id, name) where ID is the enumeral value used to identify the timing variable, and NAME is a character string describing its purpose. */ /* The total execution time. */ DEFTIMEVAR (TV_TOTAL , "total time") /* Time spent garbage-collecting. */ DEFTIMEVAR (TV_GC , "garbage collection") /* Time spent generating dump files. */ DEFTIMEVAR (TV_DUMP , "dump files") DEFTIMEVAR (TV_CGRAPH , "callgraph construction") DEFTIMEVAR (TV_CGRAPHOPT , "callgraph optimization") /* Time spent by constructing CFG. */ DEFTIMEVAR (TV_CFG , "cfg construction") /* Time spent by cleaning up CFG. */ DEFTIMEVAR (TV_CLEANUP_CFG , "cfg cleanup") DEFTIMEVAR (TV_CFG_VERIFY , "CFG verifier") DEFTIMEVAR (TV_DELETE_TRIVIALLY_DEAD , "trivially dead code") /* Time spent by life analysis. */ DEFTIMEVAR (TV_LIFE , "life analysis") DEFTIMEVAR (TV_LIFE_UPDATE , "life info update") DEFTIMEVAR (TV_ALIAS_ANALYSIS , "alias analysis") DEFTIMEVAR (TV_REG_SCAN , "register scan") DEFTIMEVAR (TV_REBUILD_JUMP , "rebuild jump labels") /* Timing in various stages of the compiler. */ DEFTIMEVAR (TV_CPP , "preprocessing") DEFTIMEVAR (TV_LEX , "lexical analysis") DEFTIMEVAR (TV_PARSE , "parser") DEFTIMEVAR (TV_NAME_LOOKUP , "name lookup") DEFTIMEVAR (TV_INTEGRATION , "integration") DEFTIMEVAR (TV_TREE_GIMPLIFY , "tree gimplify") DEFTIMEVAR (TV_TREE_EH , "tree eh") DEFTIMEVAR (TV_TREE_CFG , "tree CFG construction") DEFTIMEVAR (TV_TREE_CLEANUP_CFG , "tree CFG cleanup") DEFTIMEVAR (TV_TREE_PTA , "tree PTA") DEFTIMEVAR (TV_TREE_MAY_ALIAS , "tree alias analysis") DEFTIMEVAR (TV_TREE_INSERT_PHI_NODES , "tree PHI insertion") DEFTIMEVAR (TV_TREE_SSA_REWRITE_BLOCKS, "tree SSA rewrite") DEFTIMEVAR (TV_TREE_SSA_OTHER , "tree SSA other") DEFTIMEVAR (TV_TREE_OPS , "tree operand scan") DEFTIMEVAR (TV_TREE_SSA_DOMINATOR_OPTS , "dominator optimization") DEFTIMEVAR (TV_TREE_SRA , "tree SRA") DEFTIMEVAR (TV_TREE_CCP , "tree CCP") DEFTIMEVAR (TV_TREE_SPLIT_EDGES , "tree split crit edges") DEFTIMEVAR (TV_TREE_PRE , "tree PRE") DEFTIMEVAR (TV_TREE_FRE , "tree FRE") DEFTIMEVAR (TV_TREE_PHIOPT , "tree linearize phis") DEFTIMEVAR (TV_TREE_FORWPROP , "tree forward propagate") DEFTIMEVAR (TV_TREE_DCE , "tree conservative DCE") DEFTIMEVAR (TV_TREE_CD_DCE , "tree aggressive DCE") DEFTIMEVAR (TV_TREE_DSE , "tree DSE") DEFTIMEVAR (TV_TREE_LOOP , "tree loop optimization") DEFTIMEVAR (TV_TREE_CH , "tree copy headers") DEFTIMEVAR (TV_TREE_SSA_TO_NORMAL , "tree SSA to normal") DEFTIMEVAR (TV_TREE_NRV , "tree NRV optimization") DEFTIMEVAR (TV_TREE_COPY_RENAME , "tree rename SSA copies") DEFTIMEVAR (TV_TREE_SSA_VERIFY , "tree SSA verifier") DEFTIMEVAR (TV_TREE_STMT_VERIFY , "tree STMT verifier") DEFTIMEVAR (TV_CGRAPH_VERIFY , "callgraph verifier") DEFTIMEVAR (TV_DOM_FRONTIERS , "dominance frontiers") DEFTIMEVAR (TV_CONTROL_DEPENDENCES , "control dependences") DEFTIMEVAR (TV_OVERLOAD , "overload resolution") DEFTIMEVAR (TV_TEMPLATE_INSTANTIATION, "template instantiation") DEFTIMEVAR (TV_EXPAND , "expand") DEFTIMEVAR (TV_VARCONST , "varconst") DEFTIMEVAR (TV_JUMP , "jump") DEFTIMEVAR (TV_CSE , "CSE") DEFTIMEVAR (TV_GCSE , "global CSE") DEFTIMEVAR (TV_LOOP , "loop analysis") DEFTIMEVAR (TV_BYPASS , "bypass jumps") DEFTIMEVAR (TV_TRACER , "tracer") DEFTIMEVAR (TV_WEB , "web") DEFTIMEVAR (TV_CSE2 , "CSE 2") DEFTIMEVAR (TV_BRANCH_PROB , "branch prediction") DEFTIMEVAR (TV_VPT , "value profile opts") DEFTIMEVAR (TV_FLOW , "flow analysis") DEFTIMEVAR (TV_COMBINE , "combiner") DEFTIMEVAR (TV_IFCVT , "if-conversion") DEFTIMEVAR (TV_REGMOVE , "regmove") DEFTIMEVAR (TV_MODE_SWITCH , "mode switching") DEFTIMEVAR (TV_SMS , "sms modulo scheduling") DEFTIMEVAR (TV_SCHED , "scheduling") DEFTIMEVAR (TV_LOCAL_ALLOC , "local alloc") DEFTIMEVAR (TV_GLOBAL_ALLOC , "global alloc") DEFTIMEVAR (TV_RELOAD_CSE_REGS , "reload CSE regs") DEFTIMEVAR (TV_FLOW2 , "flow 2") DEFTIMEVAR (TV_IFCVT2 , "if-conversion 2") DEFTIMEVAR (TV_PEEPHOLE2 , "peephole 2") DEFTIMEVAR (TV_RENAME_REGISTERS , "rename registers") DEFTIMEVAR (TV_SCHED2 , "scheduling 2") DEFTIMEVAR (TV_MACH_DEP , "machine dep reorg") DEFTIMEVAR (TV_DBR_SCHED , "delay branch sched") DEFTIMEVAR (TV_REORDER_BLOCKS , "reorder blocks") DEFTIMEVAR (TV_SHORTEN_BRANCH , "shorten branches") DEFTIMEVAR (TV_REG_STACK , "reg stack") DEFTIMEVAR (TV_FINAL , "final") DEFTIMEVAR (TV_SYMOUT , "symout") DEFTIMEVAR (TV_VAR_TRACKING , "variable tracking") /* Everything else in rest_of_compilation not included above. */ DEFTIMEVAR (TV_REST_OF_COMPILATION , "rest of compilation") TIMEVAR_LAST } timevar_id_t; #undef DEFTIMEVAR /* Execute the sequence: timevar_pop (TV), return (E); */ #define POP_TIMEVAR_AND_RETURN(TV, E) return (timevar_pop (TV), (E)) extern void timevar_init (void); extern void timevar_push (timevar_id_t); extern void timevar_pop (timevar_id_t); extern void timevar_start (timevar_id_t); extern void timevar_stop (timevar_id_t); extern void timevar_get (timevar_id_t, struct timevar_time_def *); extern void timevar_print (FILE *); /* Provided for backward compatibility. */ extern void print_time (const char *, long); #endif /* ! GCC_TIMEVAR_H */ /* toplev.h - Various declarations for functions found in toplev.c Copyright (C) 1998, 1999, 2000, 2001, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_TOPLEV_H #define GCC_TOPLEV_H /* If non-NULL, return one past-the-end of the matching SUBPART of the WHOLE string. */ #define skip_leading_substring(whole, part) \ (strncmp (whole, part, strlen (part)) ? NULL : whole + strlen (part)) extern int toplev_main (unsigned int, const char **); extern int read_integral_parameter (const char *, const char *, const int); extern void strip_off_ending (char *, int); extern const char *trim_filename (const char *); extern void _fatal_insn_not_found (rtx, const char *, int, const char *) ATTRIBUTE_NORETURN; extern void _fatal_insn (const char *, rtx, const char *, int, const char *) ATTRIBUTE_NORETURN; #define fatal_insn(msgid, insn) \ _fatal_insn (msgid, insn, __FILE__, __LINE__, __FUNCTION__) #define fatal_insn_not_found(insn) \ _fatal_insn_not_found (insn, __FILE__, __LINE__, __FUNCTION__) /* If we haven't already defined a frontend specific diagnostics style, use the generic one. */ #ifndef GCC_DIAG_STYLE #define GCC_DIAG_STYLE __gcc_diag__ #endif /* None of these functions are suitable for ATTRIBUTE_PRINTF, because each language front end can extend them with its own set of format specifiers. We must use custom format checks. */ #if GCC_VERSION >= 3005 #define ATTRIBUTE_GCC_DIAG(m, n) __attribute__ ((__format__ (GCC_DIAG_STYLE, m, n))) ATTRIBUTE_NONNULL(m) #else #define ATTRIBUTE_GCC_DIAG(m, n) ATTRIBUTE_NONNULL(m) #endif extern void internal_error (const char *, ...) ATTRIBUTE_GCC_DIAG(1,2) ATTRIBUTE_NORETURN; extern void warning (const char *, ...); extern void error (const char *, ...); extern void fatal_error (const char *, ...) ATTRIBUTE_GCC_DIAG(1,2) ATTRIBUTE_NORETURN; extern void pedwarn (const char *, ...); extern void sorry (const char *, ...); extern void inform (const char *, ...) ATTRIBUTE_GCC_DIAG(1,2); extern void rest_of_decl_compilation (tree, const char *, int, int); extern void rest_of_type_compilation (tree, int); extern void rest_of_compilation (void); extern void tree_rest_of_compilation (tree, bool); extern void init_tree_optimization_passes (void); extern void init_optimization_passes (void); extern void finish_optimization_passes (void); extern bool enable_rtl_dump_file (int); extern void announce_function (tree); extern void error_for_asm (rtx, const char *, ...) ATTRIBUTE_GCC_DIAG(2,3); extern void warning_for_asm (rtx, const char *, ...) ATTRIBUTE_GCC_DIAG(2,3); extern void warn_deprecated_use (tree); #ifdef BUFSIZ extern void output_quoted_string (FILE *, const char *); extern void output_file_directive (FILE *, const char *); #endif #ifdef BUFSIZ /* N.B. Unlike all the others, fnotice is just gettext+fprintf, and therefore it can have ATTRIBUTE_PRINTF. */ extern void fnotice (FILE *, const char *, ...) ATTRIBUTE_PRINTF_2; #endif extern int wrapup_global_declarations (tree *, int); extern void check_global_declarations (tree *, int); extern void write_global_declarations (void); /* A unique local time stamp, might be zero if none is available. */ extern unsigned local_tick; extern const char *progname; extern const char *dump_base_name; extern const char *aux_base_name; extern const char *aux_info_file_name; extern const char *asm_file_name; extern bool exit_after_options; extern int target_flags_explicit; /* See toplev.c. */ extern int flag_loop_optimize; extern int flag_crossjumping; extern int flag_if_conversion; extern int flag_if_conversion2; extern int flag_keep_static_consts; extern int flag_peel_loops; extern int flag_rerun_cse_after_loop; extern int flag_thread_jumps; extern int flag_tracer; extern int flag_unroll_loops; extern int flag_unroll_all_loops; extern int flag_unswitch_loops; extern int flag_cprop_registers; extern int time_report; extern int flag_new_regalloc; extern int flag_tree_based_profiling; /* Things to do with target switches. */ extern void display_target_options (void); extern void print_version (FILE *, const char *); extern void set_target_switch (const char *); extern void * default_get_pch_validity (size_t *); extern const char * default_pch_valid_p (const void *, size_t); /* The hashtable, so that the C front ends can pass it to cpplib. */ extern struct ht *ident_hash; /* This function can be used by targets to set the flags originally implied by -ffast-math and -fno-fast-math. */ extern void set_fast_math_flags (int); /* Handle -d switch. */ extern void decode_d_option (const char *); /* Return true iff flags are set as if -ffast-math. */ extern bool fast_math_flags_set_p (void); /* The following functions accept a wide integer argument. Rather than having to cast on every function call, we use a macro instead. */ #ifndef exact_log2 #define exact_log2(N) exact_log2_wide ((unsigned HOST_WIDE_INT) (N)) #if (__GNUC__ * 1000 + __GNUC_MINOR__) >= 3004 #if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONGLONG #define FL2T__ HOST_WIDE_INT #define FL2T_CLZ__ __builtin_clzll #else #if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG #define FL2T__ HOST_WIDE_INT #define FL2T_CLZ__ __builtin_clzl #else #define FL2T__ int #define FL2T_CLZ__ __builtin_clz #endif #endif static inline int floor_log2(FL2T__ n) { if (n) return (sizeof(FL2T__)*8-1) - (int)FL2T_CLZ__(n); return -1; } #else #define floor_log2(N) floor_log2_wide ((unsigned HOST_WIDE_INT) (N)) #endif #endif extern int exact_log2_wide (unsigned HOST_WIDE_INT); extern int floor_log2_wide (unsigned HOST_WIDE_INT); /* Functions used to get and set GCC's notion of in what directory compilation was started. */ extern const char *get_src_pwd (void); extern bool set_src_pwd (const char *); #endif /* ! GCC_TOPLEV_H */ /* Exception Handling interface routines. Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Mike Stump . This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ struct function; struct inline_remap; /* Per-function EH data. Used only in except.c, but GC and others manipulate pointers to the opaque type. */ struct eh_status; /* Internal structure describing a region. */ struct eh_region; /* Test: is exception handling turned on? */ extern int doing_eh (int); /* Start an exception handling region. All instructions emitted after this point are considered to be part of the region until an expand_eh_region_end variant is invoked. */ extern void expand_eh_region_start (void); /* End an exception handling region for a cleanup. HANDLER is an expression to expand for the cleanup. */ extern void expand_eh_region_end_cleanup (tree); /* End an exception handling region for a try block, and prepares for subsequent calls to expand_start_catch. */ extern void expand_start_all_catch (void); /* Begin a catch clause. TYPE is an object to be matched by the runtime, or a list of such objects, or null if this is a catch-all clause. */ extern void expand_start_catch (tree); /* End a catch clause. Control will resume after the try/catch block. */ extern void expand_end_catch (void); /* End a sequence of catch handlers for a try block. */ extern void expand_end_all_catch (void); /* End an exception region for an exception type filter. ALLOWED is a TREE_LIST of TREE_VALUE objects to be matched by the runtime. FAILURE is a function to invoke if a mismatch occurs. */ extern void expand_eh_region_end_allowed (tree, tree); /* End an exception region for a must-not-throw filter. FAILURE is a function to invoke if an uncaught exception propagates this far. */ extern void expand_eh_region_end_must_not_throw (tree); /* End an exception region for a throw. No handling goes on here, but it's the easiest way for the front-end to indicate what type is being thrown. */ extern void expand_eh_region_end_throw (tree); /* End a fixup region. Within this region the cleanups for the immediately enclosing region are _not_ run. This is used for goto cleanup to avoid destroying an object twice. */ extern void expand_eh_region_end_fixup (tree); /* End some sort of EH region, depending on the argument. */ extern void expand_eh_handler (tree); /* Note that the current EH region (if any) may contain a throw, or a call to a function which itself may contain a throw. */ extern void note_eh_region_may_contain_throw (struct eh_region *); extern void note_current_region_may_contain_throw (void); /* Invokes CALLBACK for every exception handler label. Only used by old loop hackery; should not be used by new code. */ extern void for_each_eh_label (void (*) (rtx)); /* Invokes CALLBACK for every exception region in the current function. */ extern void for_each_eh_region (void (*) (struct eh_region *)); /* Determine if the given INSN can throw an exception. */ extern bool can_throw_internal_1 (int); extern bool can_throw_internal (rtx); extern bool can_throw_external_1 (int); extern bool can_throw_external (rtx); /* Set current_function_nothrow and cfun->all_throwers_are_sibcalls. */ extern void set_nothrow_function_flags (void); /* After initial rtl generation, call back to finish generating exception support code. */ extern void finish_eh_generation (void); extern void init_eh (void); extern void init_eh_for_function (void); extern rtx reachable_handlers (rtx); extern void maybe_remove_eh_handler (rtx); extern void convert_from_eh_region_ranges (void); extern void convert_to_eh_region_ranges (void); extern void find_exception_handler_labels (void); extern bool current_function_has_exception_handlers (void); extern void output_function_exception_table (void); extern void expand_builtin_unwind_init (void); extern rtx expand_builtin_eh_return_data_regno (tree); extern rtx expand_builtin_extract_return_addr (tree); extern void expand_builtin_init_dwarf_reg_sizes (tree); extern rtx expand_builtin_frob_return_addr (tree); extern rtx expand_builtin_dwarf_sp_column (void); extern void expand_builtin_eh_return (tree, tree); extern void expand_eh_return (void); extern rtx expand_builtin_extend_pointer (tree); extern rtx get_exception_pointer (struct function *); extern rtx get_exception_filter (struct function *); extern int duplicate_eh_regions (struct function *, struct inline_remap *); extern int check_handled (tree, tree); extern void sjlj_emit_function_exit_after (rtx); extern struct eh_region *gen_eh_region_cleanup (struct eh_region *, struct eh_region *); extern struct eh_region *gen_eh_region_try (struct eh_region *); extern struct eh_region *gen_eh_region_catch (struct eh_region *, tree); extern struct eh_region *gen_eh_region_allowed (struct eh_region *, tree); extern struct eh_region *gen_eh_region_must_not_throw (struct eh_region *); extern int get_eh_region_number (struct eh_region *); extern bool get_eh_region_may_contain_throw (struct eh_region *); extern tree get_eh_region_tree_label (struct eh_region *); extern void set_eh_region_tree_label (struct eh_region *, tree); extern void foreach_reachable_handler (int, bool, void (*) (struct eh_region *, void *), void *); extern void collect_eh_region_array (void); extern void expand_resx_expr (tree); /* tree-eh.c */ extern int lookup_stmt_eh_region (tree); /* If non-NULL, this is a function that returns an expression to be executed if an unhandled exception is propagated out of a cleanup region. For example, in C++, an exception thrown by a destructor during stack unwinding is required to result in a call to `std::terminate', so the C++ version of this function returns a CALL_EXPR for `std::terminate'. */ extern tree (*lang_protect_cleanup_actions) (void); /* Return true if type A catches type B. */ extern int (*lang_eh_type_covers) (tree a, tree b); /* Map a type to a runtime object to match type. */ extern tree (*lang_eh_runtime_type) (tree); /* Just because the user configured --with-sjlj-exceptions=no doesn't mean that we can use call frame exceptions. Detect that the target has appropriate support. */ #ifndef MUST_USE_SJLJ_EXCEPTIONS # if !(defined (EH_RETURN_DATA_REGNO) \ && (defined (IA64_UNWIND_INFO) \ || (DWARF2_UNWIND_INFO \ && (defined (EH_RETURN_HANDLER_RTX) \ || defined (HAVE_eh_return))))) # define MUST_USE_SJLJ_EXCEPTIONS 1 # else # define MUST_USE_SJLJ_EXCEPTIONS 0 # endif #endif #ifdef CONFIG_SJLJ_EXCEPTIONS # if CONFIG_SJLJ_EXCEPTIONS == 1 # define USING_SJLJ_EXCEPTIONS 1 # endif # if CONFIG_SJLJ_EXCEPTIONS == 0 # define USING_SJLJ_EXCEPTIONS 0 # ifndef EH_RETURN_DATA_REGNO #error "EH_RETURN_DATA_REGNO required" # endif # if !defined(EH_RETURN_HANDLER_RTX) && !defined(HAVE_eh_return) #error "EH_RETURN_HANDLER_RTX or eh_return required" # endif # if !defined(DWARF2_UNWIND_INFO) && !defined(IA64_UNWIND_INFO) #error "{DWARF2,IA64}_UNWIND_INFO required" # endif # endif #else # define USING_SJLJ_EXCEPTIONS MUST_USE_SJLJ_EXCEPTIONS #endif /* Natural loop functions Copyright (C) 1987, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_CFGLOOP_H #define GCC_CFGLOOP_H /* For rtx_code. */ /* Structure to hold decision about unrolling/peeling. */ enum lpt_dec { LPT_NONE, LPT_PEEL_COMPLETELY, LPT_PEEL_SIMPLE, LPT_UNROLL_CONSTANT, LPT_UNROLL_RUNTIME, LPT_UNROLL_STUPID }; struct lpt_decision { enum lpt_dec decision; unsigned times; }; /* Structure to hold information for each natural loop. */ struct loop { /* Index into loops array. */ int num; /* Basic block of loop header. */ basic_block header; /* Basic block of loop latch. */ basic_block latch; /* Basic block of loop preheader or NULL if it does not exist. */ basic_block pre_header; /* For loop unrolling/peeling decision. */ struct lpt_decision lpt_decision; /* Number of loop insns. */ unsigned ninsns; /* Average number of executed insns per iteration. */ unsigned av_ninsns; /* Array of edges along the preheader extended basic block trace. The source of the first edge is the root node of preheader extended basic block, if it exists. */ edge *pre_header_edges; /* Number of edges along the pre_header extended basic block trace. */ int num_pre_header_edges; /* The first block in the loop. This is not necessarily the same as the loop header. */ basic_block first; /* The last block in the loop. This is not necessarily the same as the loop latch. */ basic_block last; /* Bitmap of blocks contained within the loop. */ sbitmap nodes; /* Number of blocks contained within the loop. */ unsigned num_nodes; /* Array of edges that enter the loop. */ edge *entry_edges; /* Number of edges that enter the loop. */ int num_entries; /* Array of edges that exit the loop. */ edge *exit_edges; /* Number of edges that exit the loop. */ int num_exits; /* Bitmap of blocks that dominate all exits of the loop. */ sbitmap exits_doms; /* The loop nesting depth. */ int depth; /* Superloops of the loop. */ struct loop **pred; /* The height of the loop (enclosed loop levels) within the loop hierarchy tree. */ int level; /* The outer (parent) loop or NULL if outermost loop. */ struct loop *outer; /* The first inner (child) loop or NULL if innermost loop. */ struct loop *inner; /* Link to the next (sibling) loop. */ struct loop *next; /* Loop that is copy of this loop. */ struct loop *copy; /* Nonzero if the loop is invalid (e.g., contains setjmp.). */ int invalid; /* Auxiliary info specific to a pass. */ void *aux; /* The following are currently used by loop.c but they are likely to disappear as loop.c is converted to use the CFG. */ /* Nonzero if the loop has a NOTE_INSN_LOOP_VTOP. */ rtx vtop; /* Nonzero if the loop has a NOTE_INSN_LOOP_CONT. A continue statement will generate a branch to NEXT_INSN (cont). */ rtx cont; /* The dominator of cont. */ rtx cont_dominator; /* The NOTE_INSN_LOOP_BEG. */ rtx start; /* The NOTE_INSN_LOOP_END. */ rtx end; /* For a rotated loop that is entered near the bottom, this is the label at the top. Otherwise it is zero. */ rtx top; /* Place in the loop where control enters. */ rtx scan_start; /* The position where to sink insns out of the loop. */ rtx sink; /* List of all LABEL_REFs which refer to code labels outside the loop. Used by routines that need to know all loop exits, such as final_biv_value and final_giv_value. This does not include loop exits due to return instructions. This is because all bivs and givs are pseudos, and hence must be dead after a return, so the presence of a return does not affect any of the optimizations that use this info. It is simpler to just not include return instructions on this list. */ rtx exit_labels; /* The number of LABEL_REFs on exit_labels for this loop and all loops nested inside it. */ int exit_count; }; /* Flags for state of loop structure. */ enum { LOOPS_HAVE_PREHEADERS = 1, LOOPS_HAVE_SIMPLE_LATCHES = 2, LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS = 4 }; /* Structure to hold CFG information about natural loops within a function. */ struct loops { /* Number of natural loops in the function. */ unsigned num; /* Maximum nested loop level in the function. */ unsigned levels; /* Array of natural loop descriptors (scanning this array in reverse order will find the inner loops before their enclosing outer loops). */ struct loop *array; /* The above array is unused in new loop infrastructure and is kept only for purposes of the old loop optimizer. Instead we store just pointers to loops here. */ struct loop **parray; /* Pointer to root of loop hierarchy tree. */ struct loop *tree_root; /* Information derived from the CFG. */ struct cfg { /* The ordering of the basic blocks in a depth first search. */ int *dfs_order; /* The reverse completion ordering of the basic blocks found in a depth first search. */ int *rc_order; } cfg; /* Headers shared by multiple loops that should be merged. */ sbitmap shared_headers; /* State of loops. */ int state; }; /* Flags for loop discovery. */ #define LOOP_TREE 1 /* Build loop hierarchy tree. */ #define LOOP_PRE_HEADER 2 /* Analyze loop preheader. */ #define LOOP_ENTRY_EDGES 4 /* Find entry edges. */ #define LOOP_EXIT_EDGES 8 /* Find exit edges. */ #define LOOP_EDGES (LOOP_ENTRY_EDGES | LOOP_EXIT_EDGES) #define LOOP_ALL 15 /* All of the above */ /* Loop recognition. */ extern int flow_loops_find (struct loops *, int flags); extern int flow_loops_update (struct loops *, int flags); extern void flow_loops_free (struct loops *); extern void flow_loops_dump (const struct loops *, FILE *, void (*)(const struct loop *, FILE *, int), int); extern void flow_loop_dump (const struct loop *, FILE *, void (*)(const struct loop *, FILE *, int), int); extern int flow_loop_scan (struct loop *, int); extern void flow_loop_free (struct loop *); void mark_irreducible_loops (struct loops *); extern void create_loop_notes (void); /* Loop data structure manipulation/querying. */ extern void flow_loop_tree_node_add (struct loop *, struct loop *); extern void flow_loop_tree_node_remove (struct loop *); extern bool flow_loop_outside_edge_p (const struct loop *, edge); extern bool flow_loop_nested_p (const struct loop *, const struct loop *); extern bool flow_bb_inside_loop_p (const struct loop *, const basic_block); extern struct loop * find_common_loop (struct loop *, struct loop *); extern int num_loop_insns (struct loop *); extern int average_num_loop_insns (struct loop *); extern unsigned get_loop_level (const struct loop *); /* Loops & cfg manipulation. */ extern basic_block *get_loop_body (const struct loop *); extern basic_block *get_loop_body_in_dom_order (const struct loop *); extern edge *get_loop_exit_edges (const struct loop *, unsigned *); extern unsigned num_loop_branches (const struct loop *); extern edge loop_preheader_edge (const struct loop *); extern edge loop_latch_edge (const struct loop *); extern void add_bb_to_loop (basic_block, struct loop *); extern void remove_bb_from_loops (basic_block); extern void cancel_loop (struct loops *, struct loop *); extern void cancel_loop_tree (struct loops *, struct loop *); extern basic_block loop_split_edge_with (edge, rtx); extern int fix_loop_placement (struct loop *); enum { CP_SIMPLE_PREHEADERS = 1 }; extern void create_preheaders (struct loops *, int); extern void force_single_succ_latches (struct loops *); extern void verify_loop_structure (struct loops *); /* Loop analysis. */ extern bool just_once_each_iteration_p (struct loop *, basic_block); extern unsigned expected_loop_iterations (const struct loop *); /* Loop manipulation. */ extern bool can_duplicate_loop_p (struct loop *loop); #define DLTHE_FLAG_UPDATE_FREQ 1 /* Update frequencies in duplicate_loop_to_header_edge. */ extern int duplicate_loop_to_header_edge (struct loop *, edge, struct loops *, unsigned, sbitmap, edge, edge *, unsigned *, int); extern struct loop *loopify (struct loops *, edge, edge, basic_block); extern void unloop (struct loops *, struct loop *); extern bool remove_path (struct loops *, edge); extern edge split_loop_bb (basic_block, rtx); /* Induction variable analysis. */ /* The description of induction variable. The things are a bit complicated due to need to handle subregs and extends. The value of the object described by it can be obtained as follows (all computations are done in extend_mode): Value in i-th iteration is delta + mult * extend_{extend_mode} (subreg_{mode} (base + i * step)). If first_special is true, the value in the first iteration is delta + mult * base If extend = NIL, first_special must be false, delta 0, mult 1 and value is subreg_{mode} (base + i * step) The get_iv_value function can be used to obtain these expressions. ??? Add a third mode field that would specify the mode in that inner computation is done, which would enable it to be different from the outer one? */ struct rtx_iv { /* Its base and step (mode of base and step is supposed to be extend_mode, see the description above). */ rtx base, step; /* The type of extend applied to it (SIGN_EXTEND, ZERO_EXTEND or NIL). */ enum rtx_code extend; /* Operations applied in the extended mode. */ rtx delta, mult; /* The mode it is extended to. */ enum machine_mode extend_mode; /* The mode the variable iterates in. */ enum machine_mode mode; /* Whether we have already filled the remaining fields. */ unsigned analysed : 1; /* Whether the first iteration needs to be handled specially. */ unsigned first_special : 1; }; /* The description of an exit from the loop and of the number of iterations till we take the exit. */ struct niter_desc { /* The edge out of the loop. */ edge out_edge; /* The other edge leading from the condition. */ edge in_edge; /* True if we are able to say anything about number of iterations of the loop. */ bool simple_p; /* True if the loop iterates the constant number of times. */ bool const_iter; /* Number of iterations if constant. */ unsigned HOST_WIDEST_INT niter; /* Upper bound on the number of iterations. */ unsigned HOST_WIDEST_INT niter_max; /* Assumptions under that the rest of the information is valid. */ rtx assumptions; /* Assumptions under that the loop ends before reaching the latch, even if value of niter_expr says otherwise. */ rtx noloop_assumptions; /* Condition under that the loop is infinite. */ rtx infinite; /* Whether the comparison is signed. */ bool signed_p; /* The mode in that niter_expr should be computed. */ enum machine_mode mode; /* The number of iterations of the loop. */ rtx niter_expr; }; extern void iv_analysis_loop_init (struct loop *); extern rtx iv_get_reaching_def (rtx, rtx); extern bool iv_analyze (rtx, rtx, struct rtx_iv *); extern rtx get_iv_value (struct rtx_iv *, rtx); extern void find_simple_exit (struct loop *, struct niter_desc *); extern void iv_number_of_iterations (struct loop *, rtx, rtx, struct niter_desc *); extern void iv_analysis_done (void); extern struct niter_desc *get_simple_loop_desc (struct loop *loop); extern void free_simple_loop_desc (struct loop *loop); static inline struct niter_desc * simple_loop_desc (struct loop *loop) { return loop->aux; } /* Register pressure estimation for induction variable optimizations & loop invariant motion. */ extern unsigned global_cost_for_size (unsigned, unsigned, unsigned); extern void init_set_costs (void); /* Loop optimizer initialization. */ extern struct loops *loop_optimizer_init (FILE *); extern void loop_optimizer_finalize (struct loops *, FILE *); /* Optimization passes. */ extern void unswitch_loops (struct loops *); enum { UAP_PEEL = 1, /* Enables loop peeling. */ UAP_UNROLL = 2, /* Enables peeling of loops if it seems profitable. */ UAP_UNROLL_ALL = 4 /* Enables peeling of all loops. */ }; extern void unroll_and_peel_loops (struct loops *, int); extern void doloop_optimize_loops (struct loops *); extern void move_loop_invariants (struct loops *); #endif /* GCC_CFGLOOP_H */ /* This file contains functions for building the Control Flow Graph (CFG) for a function tree. */ /* Local declarations. */ /* Initial capacity for the basic block array. */ static const int initial_cfg_capacity = 20; /* Mapping of labels to their associated blocks. This can greatly speed up building of the CFG in code with lots of gotos. */ static GTY(()) varray_type label_to_block_map; /* CFG statistics. */ struct cfg_stats_d { long num_merged_labels; }; static struct cfg_stats_d cfg_stats; /* Nonzero if we found a computed goto while building basic blocks. */ static bool found_computed_goto; /* Basic blocks and flowgraphs. */ static basic_block create_bb (void *, void *, basic_block); static void create_block_annotation (basic_block); static void free_blocks_annotations (void); static void clear_blocks_annotations (void); static void make_blocks (tree); static void factor_computed_gotos (void); /* Edges. */ static void make_cfg_edges (void); static void make_ctrl_stmt_edges (basic_block); static void make_exit_edges (basic_block); static void make_cond_expr_edges (basic_block); static void make_switch_expr_edges (basic_block); static void make_goto_expr_edges (basic_block); static edge tree_redirect_edge_and_branch (edge, basic_block); static edge tree_try_redirect_by_replacing_jump (edge, basic_block); static void split_critical_edges (void); /* Various helpers. */ static inline bool stmt_starts_bb_p (tree, tree); static int tree_verify_flow_info (void); static void tree_make_forwarder_block (edge); static bool thread_jumps (void); static bool tree_forwarder_block_p (basic_block); static void bsi_commit_edge_inserts_1 (edge e); static void tree_cfg2vcg (FILE *); /* Flowgraph optimization and cleanup. */ static void tree_merge_blocks (basic_block, basic_block); static bool tree_can_merge_blocks_p (basic_block, basic_block); static void remove_bb (basic_block); static void group_case_labels (void); static void cleanup_dead_labels (void); static bool cleanup_control_flow (void); static bool cleanup_control_expr_graph (basic_block, block_stmt_iterator); static edge find_taken_edge_cond_expr (basic_block, tree); static edge find_taken_edge_switch_expr (basic_block, tree); static tree find_case_label_for_value (tree, tree); static bool phi_alternatives_equal (basic_block, edge, edge); /*--------------------------------------------------------------------------- Create basic blocks ---------------------------------------------------------------------------*/ /* Entry point to the CFG builder for trees. TP points to the list of statements to be added to the flowgraph. */ static void build_tree_cfg (tree *tp) { /* Register specific tree functions. */ tree_register_cfg_hooks (); /* Initialize rbi_pool. */ alloc_rbi_pool (); /* Initialize the basic block array. */ init_flow (); n_basic_blocks = 0; last_basic_block = 0; VARRAY_BB_INIT (basic_block_info, initial_cfg_capacity, "basic_block_info"); memset ((void *) &cfg_stats, 0, sizeof (cfg_stats)); /* Build a mapping of labels to their associated blocks. */ VARRAY_BB_INIT (label_to_block_map, initial_cfg_capacity, "label to block map"); ENTRY_BLOCK_PTR->next_bb = EXIT_BLOCK_PTR; EXIT_BLOCK_PTR->prev_bb = ENTRY_BLOCK_PTR; found_computed_goto = 0; make_blocks (*tp); /* Computed gotos are hell to deal with, especially if there are lots of them with a large number of destinations. So we factor them to a common computed goto location before we build the edge list. After we convert back to normal form, we will un-factor the computed gotos since factoring introduces an unwanted jump. */ if (found_computed_goto) factor_computed_gotos (); /* Make sure there is always at least one block, even if its empty. */ if (n_basic_blocks == 0) create_empty_bb (ENTRY_BLOCK_PTR); create_block_annotation (ENTRY_BLOCK_PTR); create_block_annotation (EXIT_BLOCK_PTR); /* Adjust the size of the array. */ VARRAY_GROW (basic_block_info, n_basic_blocks); /* To speed up statement iterator walks, we first purge dead labels. */ cleanup_dead_labels (); /* Group case nodes to reduce the number of edges. We do this after cleaning up dead labels because otherwise we miss a lot of obvious case merging opportunities. */ group_case_labels (); /* Create the edges of the flowgraph. */ make_cfg_edges (); /* Debugging dumps. */ /* Write the flowgraph to a VCG file. */ { int local_dump_flags; FILE *dump_file = dump_begin (TDI_vcg, &local_dump_flags); if (dump_file) { tree_cfg2vcg (dump_file); dump_end (TDI_vcg, dump_file); } } /* Dump a textual representation of the flowgraph. */ if (dump_file) dump_tree_cfg (dump_file, dump_flags); } static void execute_build_cfg (void) { build_tree_cfg (&DECL_SAVED_TREE (current_function_decl)); } struct tree_opt_pass pass_build_cfg = { "cfg", /* name */ NULL, /* gate */ execute_build_cfg, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ TV_TREE_CFG, /* tv_id */ PROP_gimple_leh, /* properties_required */ PROP_cfg, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_verify_stmts /* todo_flags_finish */ }; /* Search the CFG for any computed gotos. If found, factor them to a common computed goto site. Also record the location of that site so that we can un-factor the gotos after we have converted back to normal form. */ static void factor_computed_gotos (void) { basic_block bb; tree factored_label_decl = NULL; tree var = NULL; tree factored_computed_goto_label = NULL; tree factored_computed_goto = NULL; /* We know there are one or more computed gotos in this function. Examine the last statement in each basic block to see if the block ends with a computed goto. */ FOR_EACH_BB (bb) { block_stmt_iterator bsi = bsi_last (bb); tree last; if (bsi_end_p (bsi)) continue; last = bsi_stmt (bsi); /* Ignore the computed goto we create when we factor the original computed gotos. */ if (last == factored_computed_goto) continue; /* If the last statement is a computed goto, factor it. */ if (computed_goto_p (last)) { tree assignment; /* The first time we find a computed goto we need to create the factored goto block and the variable each original computed goto will use for their goto destination. */ if (! factored_computed_goto) { basic_block new_bb = create_empty_bb (bb); block_stmt_iterator new_bsi = bsi_start (new_bb); /* Create the destination of the factored goto. Each original computed goto will put its desired destination into this variable and jump to the label we create immediately below. */ var = create_tmp_var (ptr_type_node, "gotovar"); /* Build a label for the new block which will contain the factored computed goto. */ factored_label_decl = create_artificial_label (); factored_computed_goto_label = build1 (LABEL_EXPR, void_type_node, factored_label_decl); bsi_insert_after (&new_bsi, factored_computed_goto_label, BSI_NEW_STMT); /* Build our new computed goto. */ factored_computed_goto = build1 (GOTO_EXPR, void_type_node, var); bsi_insert_after (&new_bsi, factored_computed_goto, BSI_NEW_STMT); } /* Copy the original computed goto's destination into VAR. */ assignment = build (MODIFY_EXPR, ptr_type_node, var, GOTO_DESTINATION (last)); bsi_insert_before (&bsi, assignment, BSI_SAME_STMT); /* And re-vector the computed goto to the new destination. */ GOTO_DESTINATION (last) = factored_label_decl; } } } /* Create annotations for a single basic block. */ static void create_block_annotation (basic_block bb) { /* Verify that the tree_annotations field is clear. */ if (bb->tree_annotations) abort (); bb->tree_annotations = ggc_alloc_cleared (sizeof (struct bb_ann_d)); } /* Free the annotations for all the basic blocks. */ static void free_blocks_annotations (void) { clear_blocks_annotations (); } /* Clear the annotations for all the basic blocks. */ static void clear_blocks_annotations (void) { basic_block bb; FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) bb->tree_annotations = NULL; } /* Build a flowgraph for the statement_list STMT_LIST. */ static void make_blocks (tree stmt_list) { tree_stmt_iterator i = tsi_start (stmt_list); tree stmt = NULL; bool start_new_block = true; bool first_stmt_of_list = true; basic_block bb = ENTRY_BLOCK_PTR; while (!tsi_end_p (i)) { tree prev_stmt; prev_stmt = stmt; stmt = tsi_stmt (i); /* If the statement starts a new basic block or if we have determined in a previous pass that we need to create a new block for STMT, do so now. */ if (start_new_block || stmt_starts_bb_p (stmt, prev_stmt)) { if (!first_stmt_of_list) stmt_list = tsi_split_statement_list_before (&i); bb = create_basic_block (stmt_list, NULL, bb); start_new_block = false; } /* Now add STMT to BB and create the subgraphs for special statement codes. */ set_bb_for_stmt (stmt, bb); if (computed_goto_p (stmt)) found_computed_goto = true; /* If STMT is a basic block terminator, set START_NEW_BLOCK for the next iteration. */ if (stmt_ends_bb_p (stmt)) start_new_block = true; tsi_next (&i); first_stmt_of_list = false; } } /* Create and return a new empty basic block after bb AFTER. */ static basic_block create_bb (void *h, void *e, basic_block after) { basic_block bb; if (e) abort (); /* Create and initialize a new basic block. */ bb = alloc_block (); memset (bb, 0, sizeof (*bb)); bb->index = last_basic_block; bb->flags = BB_NEW; bb->stmt_list = h ? h : alloc_stmt_list (); /* Add the new block to the linked list of blocks. */ link_block (bb, after); /* Grow the basic block array if needed. */ if ((size_t) last_basic_block == VARRAY_SIZE (basic_block_info)) { size_t new_size = last_basic_block + (last_basic_block + 3) / 4; VARRAY_GROW (basic_block_info, new_size); } /* Add the newly created block to the array. */ BASIC_BLOCK (last_basic_block) = bb; create_block_annotation (bb); n_basic_blocks++; last_basic_block++; initialize_bb_rbi (bb); return bb; } /*--------------------------------------------------------------------------- Edge creation ---------------------------------------------------------------------------*/ /* Join all the blocks in the flowgraph. */ static void make_cfg_edges (void) { basic_block bb; /* Create an edge from entry to the first block with executable statements in it. */ make_edge (ENTRY_BLOCK_PTR, BASIC_BLOCK (0), EDGE_FALLTHRU); /* Traverse basic block array placing edges. */ FOR_EACH_BB (bb) { tree first = first_stmt (bb); tree last = last_stmt (bb); if (first) { /* Edges for statements that always alter flow control. */ if (is_ctrl_stmt (last)) make_ctrl_stmt_edges (bb); /* Edges for statements that sometimes alter flow control. */ if (is_ctrl_altering_stmt (last)) make_exit_edges (bb); } /* Finally, if no edges were created above, this is a regular basic block that only needs a fallthru edge. */ if (bb->succ == NULL) make_edge (bb, bb->next_bb, EDGE_FALLTHRU); } /* We do not care about fake edges, so remove any that the CFG builder inserted for completeness. */ remove_fake_edges (); /* Clean up the graph and warn for unreachable code. */ cleanup_tree_cfg (); } /* Create edges for control statement at basic block BB. */ static void make_ctrl_stmt_edges (basic_block bb) { tree last = last_stmt (bb); tree first = first_stmt (bb); #if defined ENABLE_CHECKING if (last == NULL_TREE) abort(); #endif if (TREE_CODE (first) == LABEL_EXPR && DECL_NONLOCAL (LABEL_EXPR_LABEL (first))) make_edge (ENTRY_BLOCK_PTR, bb, EDGE_ABNORMAL); switch (TREE_CODE (last)) { case GOTO_EXPR: make_goto_expr_edges (bb); break; case RETURN_EXPR: make_edge (bb, EXIT_BLOCK_PTR, 0); break; case COND_EXPR: make_cond_expr_edges (bb); break; case SWITCH_EXPR: make_switch_expr_edges (bb); break; case RESX_EXPR: make_eh_edges (last); /* Yet another NORETURN hack. */ if (bb->succ == NULL) make_edge (bb, EXIT_BLOCK_PTR, EDGE_FAKE); break; default: abort (); } } /* Create exit edges for statements in block BB that alter the flow of control. Statements that alter the control flow are 'goto', 'return' and calls to non-returning functions. */ static void make_exit_edges (basic_block bb) { tree last = last_stmt (bb); if (last == NULL_TREE) abort (); switch (TREE_CODE (last)) { case CALL_EXPR: /* If this function receives a nonlocal goto, then we need to make edges from this call site to all the nonlocal goto handlers. */ if (TREE_SIDE_EFFECTS (last) && current_function_has_nonlocal_label) make_goto_expr_edges (bb); /* If this statement has reachable exception handlers, then create abnormal edges to them. */ make_eh_edges (last); /* Some calls are known not to return. For such calls we create a fake edge. We really need to revamp how we build edges so that it's not such a bloody pain to avoid creating edges for this case since all we do is remove these edges when we're done building the CFG. */ if (call_expr_flags (last) & (ECF_NORETURN | ECF_LONGJMP)) { make_edge (bb, EXIT_BLOCK_PTR, EDGE_FAKE); return; } /* Don't forget the fall-thru edge. */ make_edge (bb, bb->next_bb, EDGE_FALLTHRU); break; case MODIFY_EXPR: /* A MODIFY_EXPR may have a CALL_EXPR on its RHS and the CALL_EXPR may have an abnormal edge. Search the RHS for this case and create any required edges. */ if (TREE_CODE (TREE_OPERAND (last, 1)) == CALL_EXPR && TREE_SIDE_EFFECTS (TREE_OPERAND (last, 1)) && current_function_has_nonlocal_label) make_goto_expr_edges (bb); make_eh_edges (last); make_edge (bb, bb->next_bb, EDGE_FALLTHRU); break; default: abort (); } } /* Create the edges for a COND_EXPR starting at block BB. At this point, both clauses must contain only simple gotos. */ static void make_cond_expr_edges (basic_block bb) { tree entry = last_stmt (bb); basic_block then_bb, else_bb; tree then_label, else_label; #if defined ENABLE_CHECKING if (entry == NULL_TREE || TREE_CODE (entry) != COND_EXPR) abort (); #endif /* Entry basic blocks for each component. */ then_label = GOTO_DESTINATION (COND_EXPR_THEN (entry)); else_label = GOTO_DESTINATION (COND_EXPR_ELSE (entry)); then_bb = label_to_block (then_label); else_bb = label_to_block (else_label); make_edge (bb, then_bb, EDGE_TRUE_VALUE); make_edge (bb, else_bb, EDGE_FALSE_VALUE); } /* Create the edges for a SWITCH_EXPR starting at block BB. At this point, the switch body has been lowered and the SWITCH_LABELS filled in, so this is in effect a multi-way branch. */ static void make_switch_expr_edges (basic_block bb) { tree entry = last_stmt (bb); size_t i, n; tree vec; vec = SWITCH_LABELS (entry); n = TREE_VEC_LENGTH (vec); for (i = 0; i < n; ++i) { tree lab = CASE_LABEL (TREE_VEC_ELT (vec, i)); basic_block label_bb = label_to_block (lab); make_edge (bb, label_bb, 0); } } /* Return the basic block holding label DEST. */ basic_block label_to_block (tree dest) { int uid = LABEL_DECL_UID (dest); /* We would die hard when faced by undefined label. Emit label to very first basic block. This will hopefully make even the dataflow and undefined variable warnings quite right. */ if ((errorcount || sorrycount) && uid < 0) { block_stmt_iterator bsi = bsi_start (BASIC_BLOCK (0)); tree stmt; stmt = build1 (LABEL_EXPR, void_type_node, dest); bsi_insert_before (&bsi, stmt, BSI_NEW_STMT); uid = LABEL_DECL_UID (dest); } return VARRAY_BB (label_to_block_map, uid); } /* Create edges for a goto statement at block BB. */ static void make_goto_expr_edges (basic_block bb) { tree goto_t, dest; basic_block target_bb; int for_call; block_stmt_iterator last = bsi_last (bb); goto_t = bsi_stmt (last); /* If the last statement is not a GOTO (i.e., it is a RETURN_EXPR, CALL_EXPR or MODIFY_EXPR), then the edge is an abnormal edge resulting from a nonlocal goto. */ if (TREE_CODE (goto_t) != GOTO_EXPR) { dest = error_mark_node; for_call = 1; } else { dest = GOTO_DESTINATION (goto_t); for_call = 0; /* A GOTO to a local label creates normal edges. */ if (simple_goto_p (goto_t)) { edge e = make_edge (bb, label_to_block (dest), EDGE_FALLTHRU); #ifdef USE_MAPPED_LOCATION e->goto_locus = EXPR_LOCATION (goto_t); #else e->goto_locus = EXPR_LOCUS (goto_t); #endif bsi_remove (&last); return; } /* Nothing more to do for nonlocal gotos. */ if (TREE_CODE (dest) == LABEL_DECL) return; /* Computed gotos remain. */ } /* Look for the block starting with the destination label. In the case of a computed goto, make an edge to any label block we find in the CFG. */ FOR_EACH_BB (target_bb) { block_stmt_iterator bsi; for (bsi = bsi_start (target_bb); !bsi_end_p (bsi); bsi_next (&bsi)) { tree target = bsi_stmt (bsi); if (TREE_CODE (target) != LABEL_EXPR) break; if ( /* Computed GOTOs. Make an edge to every label block that has been marked as a potential target for a computed goto. */ (FORCED_LABEL (LABEL_EXPR_LABEL (target)) && for_call == 0) /* Nonlocal GOTO target. Make an edge to every label block that has been marked as a potential target for a nonlocal goto. */ || (DECL_NONLOCAL (LABEL_EXPR_LABEL (target)) && for_call == 1)) { make_edge (bb, target_bb, EDGE_ABNORMAL); break; } } } /* Degenerate case of computed goto with no labels. */ if (!for_call && !bb->succ) make_edge (bb, EXIT_BLOCK_PTR, EDGE_FAKE); } /*--------------------------------------------------------------------------- Flowgraph analysis ---------------------------------------------------------------------------*/ /* Remove unreachable blocks and other miscellaneous clean up work. */ void cleanup_tree_cfg (void) { bool something_changed = true; timevar_push (TV_TREE_CLEANUP_CFG); /* These three transformations can cascade, so we iterate on them until nothing changes. */ while (something_changed) { something_changed = cleanup_control_flow (); something_changed |= thread_jumps (); something_changed |= delete_unreachable_blocks (); } /* Merging the blocks creates no new opportunities for the other optimizations, so do it here. */ merge_seq_blocks (); compact_blocks (); #ifdef ENABLE_CHECKING verify_flow_info (); #endif timevar_pop (TV_TREE_CLEANUP_CFG); } /* Cleanup useless labels in basic blocks. This is something we wish to do early because it allows us to group case labels before creating the edges for the CFG, and it speeds up block statement iterators in all passes later on. We only run this pass once, running it more than once is probably not profitable. */ /* A map from basic block index to the leading label of that block. */ static tree *label_for_bb; /* Callback for for_each_eh_region. Helper for cleanup_dead_labels. */ static void update_eh_label (struct eh_region *region) { tree old_label = get_eh_region_tree_label (region); if (old_label) { tree new_label = label_for_bb[label_to_block (old_label)->index]; set_eh_region_tree_label (region, new_label); } } /* Given LABEL return the first label in the same basic block. */ static tree main_block_label (tree label) { basic_block bb = label_to_block (label); /* label_to_block possibly inserted undefined label into the chain. */ if (!label_for_bb[bb->index]) label_for_bb[bb->index] = label; return label_for_bb[bb->index]; } /* Cleanup redundant labels. This is a three-steo process: 1) Find the leading label for each block. 2) Redirect all references to labels to the leading labels. 3) Cleanup all useless labels. */ static void cleanup_dead_labels (void) { basic_block bb; label_for_bb = xcalloc (last_basic_block, sizeof (tree)); /* Find a suitable label for each block. We use the first user-defined label is there is one, or otherwise just the first label we see. */ FOR_EACH_BB (bb) { block_stmt_iterator i; for (i = bsi_start (bb); !bsi_end_p (i); bsi_next (&i)) { tree label, stmt = bsi_stmt (i); if (TREE_CODE (stmt) != LABEL_EXPR) break; label = LABEL_EXPR_LABEL (stmt); /* If we have not yet seen a label for the current block, remember this one and see if there are more labels. */ if (! label_for_bb[bb->index]) { label_for_bb[bb->index] = label; continue; } /* If we did see a label for the current block already, but it is an artificially created label, replace it if the current label is a user defined label. */ if (! DECL_ARTIFICIAL (label) && DECL_ARTIFICIAL (label_for_bb[bb->index])) { label_for_bb[bb->index] = label; break; } } } /* Now redirect all jumps/branches to the selected label. First do so for each block ending in a control statement. */ FOR_EACH_BB (bb) { tree stmt = last_stmt (bb); if (!stmt) continue; switch (TREE_CODE (stmt)) { case COND_EXPR: { tree true_branch, false_branch; true_branch = COND_EXPR_THEN (stmt); false_branch = COND_EXPR_ELSE (stmt); GOTO_DESTINATION (true_branch) = main_block_label (GOTO_DESTINATION (true_branch)); GOTO_DESTINATION (false_branch) = main_block_label (GOTO_DESTINATION (false_branch)); break; } case SWITCH_EXPR: { size_t i; tree vec = SWITCH_LABELS (stmt); size_t n = TREE_VEC_LENGTH (vec); /* Replace all destination labels. */ for (i = 0; i < n; ++i) CASE_LABEL (TREE_VEC_ELT (vec, i)) = main_block_label (CASE_LABEL (TREE_VEC_ELT (vec, i))); break; } /* We have to handle GOTO_EXPRs until they're removed, and we don't remove them until after we've created the CFG edges. */ case GOTO_EXPR: if (! computed_goto_p (stmt)) { GOTO_DESTINATION (stmt) = main_block_label (GOTO_DESTINATION (stmt)); break; } default: break; } } for_each_eh_region (update_eh_label); /* Finally, purge dead labels. All user-defined labels and labels that can be the target of non-local gotos are preserved. */ FOR_EACH_BB (bb) { block_stmt_iterator i; tree label_for_this_bb = label_for_bb[bb->index]; if (! label_for_this_bb) continue; for (i = bsi_start (bb); !bsi_end_p (i); ) { tree label, stmt = bsi_stmt (i); if (TREE_CODE (stmt) != LABEL_EXPR) break; label = LABEL_EXPR_LABEL (stmt); if (label == label_for_this_bb || ! DECL_ARTIFICIAL (label) || DECL_NONLOCAL (label)) bsi_next (&i); else bsi_remove (&i); } } free (label_for_bb); } /* Look for blocks ending in a multiway branch (a SWITCH_EXPR in GIMPLE), and scan the sorted vector of cases. Combine the ones jumping to the same label. Eg. three separate entries 1: 2: 3: become one entry 1..3: */ static void group_case_labels (void) { basic_block bb; FOR_EACH_BB (bb) { tree stmt = last_stmt (bb); if (stmt && TREE_CODE (stmt) == SWITCH_EXPR) { tree labels = SWITCH_LABELS (stmt); int old_size = TREE_VEC_LENGTH (labels); int i, j, new_size = old_size; /* Look for possible opportunities to merge cases. Ignore the last element of the label vector because it must be the default case. */ i = 0; while (i < old_size - 2) { tree base_case, base_label, base_high, type; base_case = TREE_VEC_ELT (labels, i); if (! base_case) abort (); type = TREE_TYPE (CASE_LOW (base_case)); base_label = CASE_LABEL (base_case); base_high = CASE_HIGH (base_case) ? CASE_HIGH (base_case) : CASE_LOW (base_case); /* Try to merge case labels. Break out when we reach the end of the label vector or when we cannot merge the next case label with the current one. */ while (i < old_size - 2) { tree merge_case = TREE_VEC_ELT (labels, ++i); tree merge_label = CASE_LABEL (merge_case); tree t = int_const_binop (PLUS_EXPR, base_high, integer_one_node, 1); /* Merge the cases if they jump to the same place, and their ranges are consecutive. */ if (merge_label == base_label && tree_int_cst_equal (CASE_LOW (merge_case), t)) { base_high = CASE_HIGH (merge_case) ? CASE_HIGH (merge_case) : CASE_LOW (merge_case); CASE_HIGH (base_case) = base_high; TREE_VEC_ELT (labels, i) = NULL_TREE; new_size--; } else break; } } /* Compress the case labels in the label vector, and adjust the length of the vector. */ for (i = 0, j = 0; i < new_size; i++) { while (! TREE_VEC_ELT (labels, j)) j++; TREE_VEC_ELT (labels, i) = TREE_VEC_ELT (labels, j++); } TREE_VEC_LENGTH (labels) = new_size; } } } /* Checks whether we can merge block B into block A. */ static bool tree_can_merge_blocks_p (basic_block a, basic_block b) { tree stmt; block_stmt_iterator bsi; if (!a->succ || a->succ->succ_next) return false; if (a->succ->flags & EDGE_ABNORMAL) return false; if (a->succ->dest != b) return false; if (b == EXIT_BLOCK_PTR) return false; if (b->pred->pred_next) return false; /* If A ends by a statement causing exceptions or something similar, we cannot merge the blocks. */ stmt = last_stmt (a); if (stmt && stmt_ends_bb_p (stmt)) return false; /* Do not allow a block with only a non-local label to be merged. */ if (stmt && TREE_CODE (stmt) == LABEL_EXPR && DECL_NONLOCAL (LABEL_EXPR_LABEL (stmt))) return false; /* There may be no phi nodes at the start of b. Most of these degenerate phi nodes should be cleaned up by kill_redundant_phi_nodes. */ if (phi_nodes (b)) return false; /* Do not remove user labels. */ for (bsi = bsi_start (b); !bsi_end_p (bsi); bsi_next (&bsi)) { stmt = bsi_stmt (bsi); if (TREE_CODE (stmt) != LABEL_EXPR) break; if (!DECL_ARTIFICIAL (LABEL_EXPR_LABEL (stmt))) return false; } return true; } /* Merge block B into block A. */ static void tree_merge_blocks (basic_block a, basic_block b) { block_stmt_iterator bsi; tree_stmt_iterator last; if (dump_file) fprintf (dump_file, "Merging blocks %d and %d\n", a->index, b->index); /* Ensure that B follows A. */ move_block_after (b, a); if (!(a->succ->flags & EDGE_FALLTHRU)) abort (); if (last_stmt (a) && stmt_ends_bb_p (last_stmt (a))) abort (); /* Remove labels from B and set bb_for_stmt to A for other statements. */ for (bsi = bsi_start (b); !bsi_end_p (bsi);) { if (TREE_CODE (bsi_stmt (bsi)) == LABEL_EXPR) bsi_remove (&bsi); else { set_bb_for_stmt (bsi_stmt (bsi), a); bsi_next (&bsi); } } /* Merge the chains. */ last = tsi_last (a->stmt_list); tsi_link_after (&last, b->stmt_list, TSI_NEW_STMT); b->stmt_list = NULL; } /* Walk the function tree removing unnecessary statements. * Empty statement nodes are removed * Unnecessary TRY_FINALLY and TRY_CATCH blocks are removed * Unnecessary COND_EXPRs are removed * Some unnecessary BIND_EXPRs are removed Clearly more work could be done. The trick is doing the analysis and removal fast enough to be a net improvement in compile times. Note that when we remove a control structure such as a COND_EXPR BIND_EXPR, or TRY block, we will need to repeat this optimization pass to ensure we eliminate all the useless code. */ struct rus_data { tree *last_goto; bool repeat; bool may_throw; bool may_branch; bool has_label; }; static void remove_useless_stmts_1 (tree *, struct rus_data *); static bool remove_useless_stmts_warn_notreached (tree stmt) { if (EXPR_HAS_LOCATION (stmt)) { location_t loc = EXPR_LOCATION (stmt); warning ("%Hwill never be executed", &loc); return true; } switch (TREE_CODE (stmt)) { case STATEMENT_LIST: { tree_stmt_iterator i; for (i = tsi_start (stmt); !tsi_end_p (i); tsi_next (&i)) if (remove_useless_stmts_warn_notreached (tsi_stmt (i))) return true; } break; case COND_EXPR: if (remove_useless_stmts_warn_notreached (COND_EXPR_COND (stmt))) return true; if (remove_useless_stmts_warn_notreached (COND_EXPR_THEN (stmt))) return true; if (remove_useless_stmts_warn_notreached (COND_EXPR_ELSE (stmt))) return true; break; case TRY_FINALLY_EXPR: case TRY_CATCH_EXPR: if (remove_useless_stmts_warn_notreached (TREE_OPERAND (stmt, 0))) return true; if (remove_useless_stmts_warn_notreached (TREE_OPERAND (stmt, 1))) return true; break; case CATCH_EXPR: return remove_useless_stmts_warn_notreached (CATCH_BODY (stmt)); case EH_FILTER_EXPR: return remove_useless_stmts_warn_notreached (EH_FILTER_FAILURE (stmt)); case BIND_EXPR: return remove_useless_stmts_warn_notreached (BIND_EXPR_BLOCK (stmt)); default: /* Not a live container. */ break; } return false; } static void remove_useless_stmts_cond (tree *stmt_p, struct rus_data *data) { tree then_clause, else_clause, cond; bool save_has_label, then_has_label, else_has_label; save_has_label = data->has_label; data->has_label = false; data->last_goto = NULL; remove_useless_stmts_1 (&COND_EXPR_THEN (*stmt_p), data); then_has_label = data->has_label; data->has_label = false; data->last_goto = NULL; remove_useless_stmts_1 (&COND_EXPR_ELSE (*stmt_p), data); else_has_label = data->has_label; data->has_label = save_has_label | then_has_label | else_has_label; fold_stmt (stmt_p); then_clause = COND_EXPR_THEN (*stmt_p); else_clause = COND_EXPR_ELSE (*stmt_p); cond = COND_EXPR_COND (*stmt_p); /* If neither arm does anything at all, we can remove the whole IF. */ if (!TREE_SIDE_EFFECTS (then_clause) && !TREE_SIDE_EFFECTS (else_clause)) { *stmt_p = build_empty_stmt (); data->repeat = true; } /* If there are no reachable statements in an arm, then we can zap the entire conditional. */ else if (integer_nonzerop (cond) && !else_has_label) { if (warn_notreached) remove_useless_stmts_warn_notreached (else_clause); *stmt_p = then_clause; data->repeat = true; } else if (integer_zerop (cond) && !then_has_label) { if (warn_notreached) remove_useless_stmts_warn_notreached (then_clause); *stmt_p = else_clause; data->repeat = true; } /* Check a couple of simple things on then/else with single stmts. */ else { tree then_stmt = expr_only (then_clause); tree else_stmt = expr_only (else_clause); /* Notice branches to a common destination. */ if (then_stmt && else_stmt && TREE_CODE (then_stmt) == GOTO_EXPR && TREE_CODE (else_stmt) == GOTO_EXPR && (GOTO_DESTINATION (then_stmt) == GOTO_DESTINATION (else_stmt))) { *stmt_p = then_stmt; data->repeat = true; } /* If the THEN/ELSE clause merely assigns a value to a variable or parameter which is already known to contain that value, then remove the useless THEN/ELSE clause. */ else if (TREE_CODE (cond) == VAR_DECL || TREE_CODE (cond) == PARM_DECL) { if (else_stmt && TREE_CODE (else_stmt) == MODIFY_EXPR && TREE_OPERAND (else_stmt, 0) == cond && integer_zerop (TREE_OPERAND (else_stmt, 1))) COND_EXPR_ELSE (*stmt_p) = alloc_stmt_list (); } else if ((TREE_CODE (cond) == EQ_EXPR || TREE_CODE (cond) == NE_EXPR) && (TREE_CODE (TREE_OPERAND (cond, 0)) == VAR_DECL || TREE_CODE (TREE_OPERAND (cond, 0)) == PARM_DECL) && TREE_CONSTANT (TREE_OPERAND (cond, 1))) { tree stmt = (TREE_CODE (cond) == EQ_EXPR ? then_stmt : else_stmt); tree *location = (TREE_CODE (cond) == EQ_EXPR ? &COND_EXPR_THEN (*stmt_p) : &COND_EXPR_ELSE (*stmt_p)); if (stmt && TREE_CODE (stmt) == MODIFY_EXPR && TREE_OPERAND (stmt, 0) == TREE_OPERAND (cond, 0) && TREE_OPERAND (stmt, 1) == TREE_OPERAND (cond, 1)) *location = alloc_stmt_list (); } } /* Protect GOTOs in the arm of COND_EXPRs from being removed. They would be re-introduced during lowering. */ data->last_goto = NULL; } static void remove_useless_stmts_tf (tree *stmt_p, struct rus_data *data) { bool save_may_branch, save_may_throw; bool this_may_branch, this_may_throw; /* Collect may_branch and may_throw information for the body only. */ save_may_branch = data->may_branch; save_may_throw = data->may_throw; data->may_branch = false; data->may_throw = false; data->last_goto = NULL; remove_useless_stmts_1 (&TREE_OPERAND (*stmt_p, 0), data); this_may_branch = data->may_branch; this_may_throw = data->may_throw; data->may_branch |= save_may_branch; data->may_throw |= save_may_throw; data->last_goto = NULL; remove_useless_stmts_1 (&TREE_OPERAND (*stmt_p, 1), data); /* If the body is empty, then we can emit the FINALLY block without the enclosing TRY_FINALLY_EXPR. */ if (!TREE_SIDE_EFFECTS (TREE_OPERAND (*stmt_p, 0))) { *stmt_p = TREE_OPERAND (*stmt_p, 1); data->repeat = true; } /* If the handler is empty, then we can emit the TRY block without the enclosing TRY_FINALLY_EXPR. */ else if (!TREE_SIDE_EFFECTS (TREE_OPERAND (*stmt_p, 1))) { *stmt_p = TREE_OPERAND (*stmt_p, 0); data->repeat = true; } /* If the body neither throws, nor branches, then we can safely string the TRY and FINALLY blocks together. */ else if (!this_may_branch && !this_may_throw) { tree stmt = *stmt_p; *stmt_p = TREE_OPERAND (stmt, 0); append_to_statement_list (TREE_OPERAND (stmt, 1), stmt_p); data->repeat = true; } } static void remove_useless_stmts_tc (tree *stmt_p, struct rus_data *data) { bool save_may_throw, this_may_throw; tree_stmt_iterator i; tree stmt; /* Collect may_throw information for the body only. */ save_may_throw = data->may_throw; data->may_throw = false; data->last_goto = NULL; remove_useless_stmts_1 (&TREE_OPERAND (*stmt_p, 0), data); this_may_throw = data->may_throw; data->may_throw = save_may_throw; /* If the body cannot throw, then we can drop the entire TRY_CATCH_EXPR. */ if (!this_may_throw) { if (warn_notreached) remove_useless_stmts_warn_notreached (TREE_OPERAND (*stmt_p, 1)); *stmt_p = TREE_OPERAND (*stmt_p, 0); data->repeat = true; return; } /* Process the catch clause specially. We may be able to tell that no exceptions propagate past this point. */ this_may_throw = true; i = tsi_start (TREE_OPERAND (*stmt_p, 1)); stmt = tsi_stmt (i); data->last_goto = NULL; switch (TREE_CODE (stmt)) { case CATCH_EXPR: for (; !tsi_end_p (i); tsi_next (&i)) { stmt = tsi_stmt (i); /* If we catch all exceptions, then the body does not propagate exceptions past this point. */ if (CATCH_TYPES (stmt) == NULL) this_may_throw = false; data->last_goto = NULL; remove_useless_stmts_1 (&CATCH_BODY (stmt), data); } break; case EH_FILTER_EXPR: if (EH_FILTER_MUST_NOT_THROW (stmt)) this_may_throw = false; else if (EH_FILTER_TYPES (stmt) == NULL) this_may_throw = false; remove_useless_stmts_1 (&EH_FILTER_FAILURE (stmt), data); break; default: /* Otherwise this is a cleanup. */ remove_useless_stmts_1 (&TREE_OPERAND (*stmt_p, 1), data); /* If the cleanup is empty, then we can emit the TRY block without the enclosing TRY_CATCH_EXPR. */ if (!TREE_SIDE_EFFECTS (TREE_OPERAND (*stmt_p, 1))) { *stmt_p = TREE_OPERAND (*stmt_p, 0); data->repeat = true; } break; } data->may_throw |= this_may_throw; } static void remove_useless_stmts_bind (tree *stmt_p, struct rus_data *data) { tree block; /* First remove anything underneath the BIND_EXPR. */ remove_useless_stmts_1 (&BIND_EXPR_BODY (*stmt_p), data); /* If the BIND_EXPR has no variables, then we can pull everything up one level and remove the BIND_EXPR, unless this is the toplevel BIND_EXPR for the current function or an inlined function. When this situation occurs we will want to apply this optimization again. */ block = BIND_EXPR_BLOCK (*stmt_p); if (BIND_EXPR_VARS (*stmt_p) == NULL_TREE && *stmt_p != DECL_SAVED_TREE (current_function_decl) && (! block || ! BLOCK_ABSTRACT_ORIGIN (block) || (TREE_CODE (BLOCK_ABSTRACT_ORIGIN (block)) != FUNCTION_DECL))) { *stmt_p = BIND_EXPR_BODY (*stmt_p); data->repeat = true; } } static void remove_useless_stmts_goto (tree *stmt_p, struct rus_data *data) { tree dest = GOTO_DESTINATION (*stmt_p); data->may_branch = true; data->last_goto = NULL; /* Record the last goto expr, so that we can delete it if unnecessary. */ if (TREE_CODE (dest) == LABEL_DECL) data->last_goto = stmt_p; } static void remove_useless_stmts_label (tree *stmt_p, struct rus_data *data) { tree label = LABEL_EXPR_LABEL (*stmt_p); data->has_label = true; /* We do want to jump across non-local label receiver code. */ if (DECL_NONLOCAL (label)) data->last_goto = NULL; else if (data->last_goto && GOTO_DESTINATION (*data->last_goto) == label) { *data->last_goto = build_empty_stmt (); data->repeat = true; } /* ??? Add something here to delete unused labels. */ } /* If the function is "const" or "pure", then clear TREE_SIDE_EFFECTS on its decl. This allows us to eliminate redundant or useless calls to "const" functions. Gimplifier already does the same operation, but we may notice functions being const and pure once their calls has been gimplified, so we need to update the flag. */ static void update_call_expr_flags (tree call) { tree decl = get_callee_fndecl (call); if (!decl) return; if (call_expr_flags (call) & (ECF_CONST | ECF_PURE)) TREE_SIDE_EFFECTS (call) = 0; if (TREE_NOTHROW (decl)) TREE_NOTHROW (call) = 1; } /* T is CALL_EXPR. Set current_function_calls_* flags. */ void notice_special_calls (tree t) { int flags = call_expr_flags (t); if (flags & ECF_MAY_BE_ALLOCA) current_function_calls_alloca = true; if (flags & ECF_RETURNS_TWICE) current_function_calls_setjmp = true; } /* Clear flags set by notice_special_calls. Used by dead code removal to update the flags. */ void clear_special_calls (void) { current_function_calls_alloca = false; current_function_calls_setjmp = false; } static void remove_useless_stmts_1 (tree *tp, struct rus_data *data) { tree t = *tp; switch (TREE_CODE (t)) { case COND_EXPR: remove_useless_stmts_cond (tp, data); break; case TRY_FINALLY_EXPR: remove_useless_stmts_tf (tp, data); break; case TRY_CATCH_EXPR: remove_useless_stmts_tc (tp, data); break; case BIND_EXPR: remove_useless_stmts_bind (tp, data); break; case GOTO_EXPR: remove_useless_stmts_goto (tp, data); break; case LABEL_EXPR: remove_useless_stmts_label (tp, data); break; case RETURN_EXPR: fold_stmt (tp); data->last_goto = NULL; data->may_branch = true; break; case CALL_EXPR: fold_stmt (tp); data->last_goto = NULL; notice_special_calls (t); update_call_expr_flags (t); if (tree_could_throw_p (t)) data->may_throw = true; break; case MODIFY_EXPR: data->last_goto = NULL; fold_stmt (tp); if (TREE_CODE (TREE_OPERAND (t, 1)) == CALL_EXPR) { update_call_expr_flags (TREE_OPERAND (t, 1)); notice_special_calls (TREE_OPERAND (t, 1)); } if (tree_could_throw_p (t)) data->may_throw = true; break; case STATEMENT_LIST: { tree_stmt_iterator i = tsi_start (t); while (!tsi_end_p (i)) { t = tsi_stmt (i); if (IS_EMPTY_STMT (t)) { tsi_delink (&i); continue; } remove_useless_stmts_1 (tsi_stmt_ptr (i), data); t = tsi_stmt (i); if (TREE_CODE (t) == STATEMENT_LIST) { tsi_link_before (&i, t, TSI_SAME_STMT); tsi_delink (&i); } else tsi_next (&i); } } break; case SWITCH_EXPR: fold_stmt (tp); data->last_goto = NULL; break; default: data->last_goto = NULL; break; } } static void remove_useless_stmts (void) { struct rus_data data; clear_special_calls (); do { memset (&data, 0, sizeof (data)); remove_useless_stmts_1 (&DECL_SAVED_TREE (current_function_decl), &data); } while (data.repeat); } struct tree_opt_pass pass_remove_useless_stmts = { "useless", /* name */ NULL, /* gate */ remove_useless_stmts, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ 0, /* tv_id */ PROP_gimple_any, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func /* todo_flags_finish */ }; /* Remove obviously useless statements in basic block BB. */ static void cfg_remove_useless_stmts_bb (basic_block bb) { block_stmt_iterator bsi; tree stmt = NULL_TREE; tree cond, var = NULL_TREE, val = NULL_TREE; struct var_ann_d *ann; /* Check whether we come here from a condition, and if so, get the condition. */ if (!bb->pred || bb->pred->pred_next || !(bb->pred->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE))) return; cond = COND_EXPR_COND (last_stmt (bb->pred->src)); if (TREE_CODE (cond) == VAR_DECL || TREE_CODE (cond) == PARM_DECL) { var = cond; val = (bb->pred->flags & EDGE_FALSE_VALUE ? boolean_false_node : boolean_true_node); } else if (TREE_CODE (cond) == TRUTH_NOT_EXPR && (TREE_CODE (TREE_OPERAND (cond, 0)) == VAR_DECL || TREE_CODE (TREE_OPERAND (cond, 0)) == PARM_DECL)) { var = TREE_OPERAND (cond, 0); val = (bb->pred->flags & EDGE_FALSE_VALUE ? boolean_true_node : boolean_false_node); } else { if (bb->pred->flags & EDGE_FALSE_VALUE) cond = invert_truthvalue (cond); if (TREE_CODE (cond) == EQ_EXPR && (TREE_CODE (TREE_OPERAND (cond, 0)) == VAR_DECL || TREE_CODE (TREE_OPERAND (cond, 0)) == PARM_DECL) && (TREE_CODE (TREE_OPERAND (cond, 1)) == VAR_DECL || TREE_CODE (TREE_OPERAND (cond, 1)) == PARM_DECL || TREE_CONSTANT (TREE_OPERAND (cond, 1)))) { var = TREE_OPERAND (cond, 0); val = TREE_OPERAND (cond, 1); } else return; } /* Only work for normal local variables. */ ann = var_ann (var); if (!ann || ann->may_aliases || TREE_ADDRESSABLE (var)) return; if (! TREE_CONSTANT (val)) { ann = var_ann (val); if (!ann || ann->may_aliases || TREE_ADDRESSABLE (val)) return; } /* Ignore floating point variables, since comparison behaves weird for them. */ if (FLOAT_TYPE_P (TREE_TYPE (var))) return; for (bsi = bsi_start (bb); !bsi_end_p (bsi);) { stmt = bsi_stmt (bsi); /* If the THEN/ELSE clause merely assigns a value to a variable/parameter which is already known to contain that value, then remove the useless THEN/ELSE clause. */ if (TREE_CODE (stmt) == MODIFY_EXPR && TREE_OPERAND (stmt, 0) == var && operand_equal_p (val, TREE_OPERAND (stmt, 1), 0)) { bsi_remove (&bsi); continue; } /* Invalidate the var if we encounter something that could modify it. */ if (TREE_CODE (stmt) == ASM_EXPR || TREE_CODE (stmt) == VA_ARG_EXPR || (TREE_CODE (stmt) == MODIFY_EXPR && (TREE_OPERAND (stmt, 0) == var || TREE_OPERAND (stmt, 0) == val || TREE_CODE (TREE_OPERAND (stmt, 1)) == VA_ARG_EXPR))) return; bsi_next (&bsi); } } /* A CFG-aware version of remove_useless_stmts. */ void cfg_remove_useless_stmts (void) { basic_block bb; #ifdef ENABLE_CHECKING verify_flow_info (); #endif FOR_EACH_BB (bb) { cfg_remove_useless_stmts_bb (bb); } } /* Remove PHI nodes associated with basic block BB and all edges out of BB. */ static void remove_phi_nodes_and_edges_for_unreachable_block (basic_block bb) { tree phi; /* Since this block is no longer reachable, we can just delete all of its PHI nodes. */ phi = phi_nodes (bb); while (phi) { tree next = PHI_CHAIN (phi); remove_phi_node (phi, NULL_TREE, bb); phi = next; } /* Remove edges to BB's successors. */ while (bb->succ != NULL) ssa_remove_edge (bb->succ); } /* Remove statements of basic block BB. */ static void remove_bb (basic_block bb) { block_stmt_iterator i; source_locus loc = 0; if (dump_file) { fprintf (dump_file, "Removing basic block %d\n", bb->index); if (dump_flags & TDF_DETAILS) { dump_bb (bb, dump_file, 0); fprintf (dump_file, "\n"); } } /* Remove all the instructions in the block. */ for (i = bsi_start (bb); !bsi_end_p (i); bsi_remove (&i)) { tree stmt = bsi_stmt (i); set_bb_for_stmt (stmt, NULL); /* Don't warn for removed gotos. Gotos are often removed due to jump threading, thus resulting in bogus warnings. Not great, since this way we lose warnings for gotos in the original program that are indeed unreachable. */ if (TREE_CODE (stmt) != GOTO_EXPR && EXPR_HAS_LOCATION (stmt) && !loc) #ifdef USE_MAPPED_LOCATION loc = EXPR_LOCATION (stmt); #else loc = EXPR_LOCUS (stmt); #endif } /* If requested, give a warning that the first statement in the block is unreachable. We walk statements backwards in the loop above, so the last statement we process is the first statement in the block. */ if (warn_notreached && loc) #ifdef USE_MAPPED_LOCATION warning ("%Hwill never be executed", &loc); #else warning ("%Hwill never be executed", loc); #endif remove_phi_nodes_and_edges_for_unreachable_block (bb); } /* Examine BB to determine if it is a forwarding block (a block which only transfers control to a new destination). If BB is a forwarding block, then return the edge leading to the ultimate destination. */ edge tree_block_forwards_to (basic_block bb) { block_stmt_iterator bsi; bb_ann_t ann = bb_ann (bb); tree stmt; /* If this block is not forwardable, then avoid useless work. */ if (! ann->forwardable) return NULL; /* Set this block to not be forwardable. This prevents infinite loops since any block currently under examination is considered non-forwardable. */ ann->forwardable = 0; /* No forwarding is possible if this block is a special block (ENTRY/EXIT), this block has more than one successor, this block's single successor is reached via an abnormal edge, this block has phi nodes, or this block's single successor has phi nodes. */ if (bb == EXIT_BLOCK_PTR || bb == ENTRY_BLOCK_PTR || !bb->succ || bb->succ->succ_next || bb->succ->dest == EXIT_BLOCK_PTR || (bb->succ->flags & EDGE_ABNORMAL) != 0 || phi_nodes (bb) || phi_nodes (bb->succ->dest)) return NULL; /* Walk past any labels at the start of this block. */ for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) { stmt = bsi_stmt (bsi); if (TREE_CODE (stmt) != LABEL_EXPR) break; } /* If we reached the end of this block we may be able to optimize this case. */ if (bsi_end_p (bsi)) { edge dest; /* Recursive call to pick up chains of forwarding blocks. */ dest = tree_block_forwards_to (bb->succ->dest); /* If none found, we forward to bb->succ at minimum. */ if (!dest) dest = bb->succ; ann->forwardable = 1; return dest; } /* No forwarding possible. */ return NULL; } /* Try to remove superfluous control structures. */ static bool cleanup_control_flow (void) { basic_block bb; block_stmt_iterator bsi; bool retval = false; tree stmt; FOR_EACH_BB (bb) { bsi = bsi_last (bb); if (bsi_end_p (bsi)) continue; stmt = bsi_stmt (bsi); if (TREE_CODE (stmt) == COND_EXPR || TREE_CODE (stmt) == SWITCH_EXPR) retval |= cleanup_control_expr_graph (bb, bsi); } return retval; } /* Disconnect an unreachable block in the control expression starting at block BB. */ static bool cleanup_control_expr_graph (basic_block bb, block_stmt_iterator bsi) { edge taken_edge; bool retval = false; tree expr = bsi_stmt (bsi), val; if (bb->succ->succ_next) { edge e, next; switch (TREE_CODE (expr)) { case COND_EXPR: val = COND_EXPR_COND (expr); break; case SWITCH_EXPR: val = SWITCH_COND (expr); if (TREE_CODE (val) != INTEGER_CST) return false; break; default: abort (); } taken_edge = find_taken_edge (bb, val); if (!taken_edge) return false; /* Remove all the edges except the one that is always executed. */ for (e = bb->succ; e; e = next) { next = e->succ_next; if (e != taken_edge) { taken_edge->probability += e->probability; taken_edge->count += e->count; ssa_remove_edge (e); retval = true; } } if (taken_edge->probability > REG_BR_PROB_BASE) taken_edge->probability = REG_BR_PROB_BASE; } else taken_edge = bb->succ; bsi_remove (&bsi); taken_edge->flags = EDGE_FALLTHRU; /* We removed some paths from the cfg. */ if (dom_computed[CDI_DOMINATORS] >= DOM_CONS_OK) dom_computed[CDI_DOMINATORS] = DOM_CONS_OK; return retval; } /* Given a control block BB and a constant value VAL, return the edge that will be taken out of the block. If VAL does not match a unique edge, NULL is returned. */ edge find_taken_edge (basic_block bb, tree val) { tree stmt; stmt = last_stmt (bb); #if defined ENABLE_CHECKING if (stmt == NULL_TREE || !is_ctrl_stmt (stmt)) abort (); #endif /* If VAL is not a constant, we can't determine which edge might be taken. */ if (val == NULL || !really_constant_p (val)) return NULL; if (TREE_CODE (stmt) == COND_EXPR) return find_taken_edge_cond_expr (bb, val); if (TREE_CODE (stmt) == SWITCH_EXPR) return find_taken_edge_switch_expr (bb, val); return bb->succ; } /* Given a constant value VAL and the entry block BB to a COND_EXPR statement, determine which of the two edges will be taken out of the block. Return NULL if either edge may be taken. */ static edge find_taken_edge_cond_expr (basic_block bb, tree val) { edge true_edge, false_edge; extract_true_false_edges_from_block (bb, &true_edge, &false_edge); /* If both edges of the branch lead to the same basic block, it doesn't matter which edge is taken. */ if (true_edge->dest == false_edge->dest) return true_edge; /* Otherwise, try to determine which branch of the if() will be taken. If VAL is a constant but it can't be reduced to a 0 or a 1, then we don't really know which edge will be taken at runtime. This may happen when comparing addresses (e.g., if (&var1 == 4)). */ if (integer_nonzerop (val)) return true_edge; else if (integer_zerop (val)) return false_edge; else return NULL; } /* Given a constant value VAL and the entry block BB to a SWITCH_EXPR statement, determine which edge will be taken out of the block. Return NULL if any edge may be taken. */ static edge find_taken_edge_switch_expr (basic_block bb, tree val) { tree switch_expr, taken_case; basic_block dest_bb; edge e; if (TREE_CODE (val) != INTEGER_CST) return NULL; switch_expr = last_stmt (bb); taken_case = find_case_label_for_value (switch_expr, val); dest_bb = label_to_block (CASE_LABEL (taken_case)); e = find_edge (bb, dest_bb); if (!e) abort (); return e; } /* Return the CASE_LABEL_EXPR that SWITCH_EXPR will take for VAL. We can make optimal use here of the fact that the case labels are sorted: We can do a binary search for a case matching VAL. */ static tree find_case_label_for_value (tree switch_expr, tree val) { tree vec = SWITCH_LABELS (switch_expr); size_t low, high, n = TREE_VEC_LENGTH (vec); tree default_case = TREE_VEC_ELT (vec, n - 1); for (low = -1, high = n - 1; high - low > 1; ) { size_t i = (high + low) / 2; tree t = TREE_VEC_ELT (vec, i); int cmp; /* Cache the result of comparing CASE_LOW and val. */ cmp = tree_int_cst_compare (CASE_LOW (t), val); if (cmp > 0) high = i; else low = i; if (CASE_HIGH (t) == NULL) { /* A singe-valued case label. */ if (cmp == 0) return t; } else { /* A case range. We can only handle integer ranges. */ if (cmp <= 0 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0) return t; } } return default_case; } /* If all the PHI nodes in DEST have alternatives for E1 and E2 and those alternatives are equal in each of the PHI nodes, then return true, else return false. */ static bool phi_alternatives_equal (basic_block dest, edge e1, edge e2) { tree phi, val1, val2; int n1, n2; for (phi = phi_nodes (dest); phi; phi = PHI_CHAIN (phi)) { n1 = phi_arg_from_edge (phi, e1); n2 = phi_arg_from_edge (phi, e2); #ifdef ENABLE_CHECKING if (n1 < 0 || n2 < 0) abort (); #endif val1 = PHI_ARG_DEF (phi, n1); val2 = PHI_ARG_DEF (phi, n2); if (!operand_equal_p (val1, val2, 0)) return false; } return true; } /* Computing the Dominance Frontier: As described in Morgan, section 3.5, this may be done simply by walking the dominator tree bottom-up, computing the frontier for the children before the parent. When considering a block B, there are two cases: (1) A flow graph edge leaving B that does not lead to a child of B in the dominator tree must be a block that is either equal to B or not dominated by B. Such blocks belong in the frontier of B. (2) Consider a block X in the frontier of one of the children C of B. If X is not equal to B and is not dominated by B, it is in the frontier of B. */ static void compute_dominance_frontiers_1 (bitmap *frontiers, basic_block bb, sbitmap done) { edge e; basic_block c; SET_BIT (done, bb->index); /* Do the frontier of the children first. Not all children in the dominator tree (blocks dominated by this one) are children in the CFG, so check all blocks. */ for (c = first_dom_son (CDI_DOMINATORS, bb); c; c = next_dom_son (CDI_DOMINATORS, c)) { if (! TEST_BIT (done, c->index)) compute_dominance_frontiers_1 (frontiers, c, done); } /* Find blocks conforming to rule (1) above. */ for (e = bb->succ; e; e = e->succ_next) { if (e->dest == EXIT_BLOCK_PTR) continue; if (get_immediate_dominator (CDI_DOMINATORS, e->dest) != bb) bitmap_set_bit (frontiers[bb->index], e->dest->index); } /* Find blocks conforming to rule (2). */ for (c = first_dom_son (CDI_DOMINATORS, bb); c; c = next_dom_son (CDI_DOMINATORS, c)) { int x; EXECUTE_IF_SET_IN_BITMAP (frontiers[c->index], 0, x, { if (get_immediate_dominator (CDI_DOMINATORS, BASIC_BLOCK (x)) != bb) bitmap_set_bit (frontiers[bb->index], x); }); } } void compute_dominance_frontiers (bitmap *frontiers) { sbitmap done = sbitmap_alloc (last_basic_block); timevar_push (TV_DOM_FRONTIERS); sbitmap_zero (done); compute_dominance_frontiers_1 (frontiers, ENTRY_BLOCK_PTR->succ->dest, done); sbitmap_free (done); timevar_pop (TV_DOM_FRONTIERS); } /*--------------------------------------------------------------------------- Debugging functions ---------------------------------------------------------------------------*/ /* Dump tree-specific information of block BB to file OUTF. */ void tree_dump_bb (basic_block bb, FILE *outf, int indent) { dump_generic_bb (outf, bb, indent, TDF_VOPS); } /* Dump a basic block on stderr. */ void debug_tree_bb (basic_block bb) { dump_bb (bb, stderr, 0); } /* Dump basic block with index N on stderr. */ basic_block debug_tree_bb_n (int n) { debug_tree_bb (BASIC_BLOCK (n)); return BASIC_BLOCK (n); } /* Dump the CFG on stderr. FLAGS are the same used by the tree dumping functions (see TDF_* in tree.h). */ void debug_tree_cfg (int flags) { dump_tree_cfg (stderr, flags); } /* Dump the program showing basic block boundaries on the given FILE. FLAGS are the same used by the tree dumping functions (see TDF_* in tree.h). */ void dump_tree_cfg (FILE *file, int flags) { if (flags & TDF_DETAILS) { const char *funcname = lang_hooks.decl_printable_name (current_function_decl, 2); fputc ('\n', file); fprintf (file, ";; Function %s\n\n", funcname); fprintf (file, ";; \n%d basic blocks, %d edges, last basic block %d.\n\n", n_basic_blocks, n_edges, last_basic_block); brief_dump_cfg (file); fprintf (file, "\n"); } if (flags & TDF_STATS) dump_cfg_stats (file); dump_function_to_file (current_function_decl, file, flags | TDF_BLOCKS); } /* Dump CFG statistics on FILE. */ void dump_cfg_stats (FILE *file) { static long max_num_merged_labels = 0; unsigned long size, total = 0; long n_edges; basic_block bb; const char * const fmt_str = "%-30s%-13s%12s\n"; const char * const fmt_str_1 = "%-30s%13lu%11lu%c\n"; const char * const fmt_str_3 = "%-43s%11lu%c\n"; const char *funcname = lang_hooks.decl_printable_name (current_function_decl, 2); fprintf (file, "\nCFG Statistics for %s\n\n", funcname); fprintf (file, "---------------------------------------------------------\n"); fprintf (file, fmt_str, "", " Number of ", "Memory"); fprintf (file, fmt_str, "", " instances ", "used "); fprintf (file, "---------------------------------------------------------\n"); size = n_basic_blocks * sizeof (struct basic_block_def); total += size; fprintf (file, fmt_str_1, "Basic blocks", n_basic_blocks, SCALE (size), LABEL (size)); n_edges = 0; FOR_EACH_BB (bb) { edge e; for (e = bb->succ; e; e = e->succ_next) n_edges++; } size = n_edges * sizeof (struct edge_def); total += size; fprintf (file, fmt_str_1, "Edges", n_edges, SCALE (size), LABEL (size)); size = n_basic_blocks * sizeof (struct bb_ann_d); total += size; fprintf (file, fmt_str_1, "Basic block annotations", n_basic_blocks, SCALE (size), LABEL (size)); fprintf (file, "---------------------------------------------------------\n"); fprintf (file, fmt_str_3, "Total memory used by CFG data", SCALE (total), LABEL (total)); fprintf (file, "---------------------------------------------------------\n"); fprintf (file, "\n"); if (cfg_stats.num_merged_labels > max_num_merged_labels) max_num_merged_labels = cfg_stats.num_merged_labels; fprintf (file, "Coalesced label blocks: %ld (Max so far: %ld)\n", cfg_stats.num_merged_labels, max_num_merged_labels); fprintf (file, "\n"); } /* Dump CFG statistics on stderr. Keep extern so that it's always linked in the final executable. */ void debug_cfg_stats (void) { dump_cfg_stats (stderr); } /* Dump the flowgraph to a .vcg FILE. */ static void tree_cfg2vcg (FILE *file) { edge e; basic_block bb; const char *funcname = lang_hooks.decl_printable_name (current_function_decl, 2); /* Write the file header. */ fprintf (file, "graph: { title: \"%s\"\n", funcname); fprintf (file, "node: { title: \"ENTRY\" label: \"ENTRY\" }\n"); fprintf (file, "node: { title: \"EXIT\" label: \"EXIT\" }\n"); /* Write blocks and edges. */ for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) { fprintf (file, "edge: { sourcename: \"ENTRY\" targetname: \"%d\"", e->dest->index); if (e->flags & EDGE_FAKE) fprintf (file, " linestyle: dotted priority: 10"); else fprintf (file, " linestyle: solid priority: 100"); fprintf (file, " }\n"); } fputc ('\n', file); FOR_EACH_BB (bb) { enum tree_code head_code, end_code; const char *head_name, *end_name; int head_line = 0; int end_line = 0; tree first = first_stmt (bb); tree last = last_stmt (bb); if (first) { head_code = TREE_CODE (first); head_name = tree_code_name[head_code]; head_line = get_lineno (first); } else head_name = "no-statement"; if (last) { end_code = TREE_CODE (last); end_name = tree_code_name[end_code]; end_line = get_lineno (last); } else end_name = "no-statement"; fprintf (file, "node: { title: \"%d\" label: \"#%d\\n%s (%d)\\n%s (%d)\"}\n", bb->index, bb->index, head_name, head_line, end_name, end_line); for (e = bb->succ; e; e = e->succ_next) { if (e->dest == EXIT_BLOCK_PTR) fprintf (file, "edge: { sourcename: \"%d\" targetname: \"EXIT\"", bb->index); else fprintf (file, "edge: { sourcename: \"%d\" targetname: \"%d\"", bb->index, e->dest->index); if (e->flags & EDGE_FAKE) fprintf (file, " priority: 10 linestyle: dotted"); else fprintf (file, " priority: 100 linestyle: solid"); fprintf (file, " }\n"); } if (bb->next_bb != EXIT_BLOCK_PTR) fputc ('\n', file); } fputs ("}\n\n", file); } /*--------------------------------------------------------------------------- Miscellaneous helpers ---------------------------------------------------------------------------*/ /* Return true if T represents a stmt that always transfers control. */ bool is_ctrl_stmt (tree t) { return (TREE_CODE (t) == COND_EXPR || TREE_CODE (t) == SWITCH_EXPR || TREE_CODE (t) == GOTO_EXPR || TREE_CODE (t) == RETURN_EXPR || TREE_CODE (t) == RESX_EXPR); } /* Return true if T is a statement that may alter the flow of control (e.g., a call to a non-returning function). */ bool is_ctrl_altering_stmt (tree t) { tree call = t; #if defined ENABLE_CHECKING if (t == NULL) abort (); #endif switch (TREE_CODE (t)) { case MODIFY_EXPR: /* A MODIFY_EXPR with a rhs of a call has the characteristics of the call. */ call = TREE_OPERAND (t, 1); if (TREE_CODE (call) != CALL_EXPR) break; /* FALLTHRU */ case CALL_EXPR: /* A non-pure/const CALL_EXPR alters flow control if the current function has nonlocal labels. */ if (TREE_SIDE_EFFECTS (t) && current_function_has_nonlocal_label) return true; /* A CALL_EXPR also alters control flow if it does not return. */ if (call_expr_flags (call) & (ECF_NORETURN | ECF_LONGJMP)) return true; break; default: return false; } /* If a statement can throw, it alters control flow. */ return tree_can_throw_internal (t); } /* Return true if T is a computed goto. */ bool computed_goto_p (tree t) { return (TREE_CODE (t) == GOTO_EXPR && TREE_CODE (GOTO_DESTINATION (t)) != LABEL_DECL); } /* Checks whether EXPR is a simple local goto. */ bool simple_goto_p (tree expr) { return (TREE_CODE (expr) == GOTO_EXPR && TREE_CODE (GOTO_DESTINATION (expr)) == LABEL_DECL && (decl_function_context (GOTO_DESTINATION (expr)) == current_function_decl)); } /* Return true if T should start a new basic block. PREV_T is the statement preceding T. It is used when T is a label or a case label. Labels should only start a new basic block if their previous statement wasn't a label. Otherwise, sequence of labels would generate unnecessary basic blocks that only contain a single label. */ static inline bool stmt_starts_bb_p (tree t, tree prev_t) { enum tree_code code; if (t == NULL_TREE) return false; /* LABEL_EXPRs start a new basic block only if the preceding statement wasn't a label of the same type. This prevents the creation of consecutive blocks that have nothing but a single label. */ code = TREE_CODE (t); if (code == LABEL_EXPR) { /* Nonlocal and computed GOTO targets always start a new block. */ if (code == LABEL_EXPR && (DECL_NONLOCAL (LABEL_EXPR_LABEL (t)) || FORCED_LABEL (LABEL_EXPR_LABEL (t)))) return true; if (prev_t && TREE_CODE (prev_t) == code) { if (DECL_NONLOCAL (LABEL_EXPR_LABEL (prev_t))) return true; cfg_stats.num_merged_labels++; return false; } else return true; } return false; } /* Return true if T should end a basic block. */ bool stmt_ends_bb_p (tree t) { return is_ctrl_stmt (t) || is_ctrl_altering_stmt (t); } /* Add gotos that used to be represented implicitly in the CFG. */ void disband_implicit_edges (void) { basic_block bb; block_stmt_iterator last; edge e; tree stmt, label; FOR_EACH_BB (bb) { last = bsi_last (bb); stmt = last_stmt (bb); if (stmt && TREE_CODE (stmt) == COND_EXPR) { /* Remove superfluous gotos from COND_EXPR branches. Moved from cfg_remove_useless_stmts here since it violates the invariants for tree--cfg correspondence and thus fits better here where we do it anyway. */ for (e = bb->succ; e; e = e->succ_next) { if (e->dest != bb->next_bb) continue; if (e->flags & EDGE_TRUE_VALUE) COND_EXPR_THEN (stmt) = build_empty_stmt (); else if (e->flags & EDGE_FALSE_VALUE) COND_EXPR_ELSE (stmt) = build_empty_stmt (); else abort (); e->flags |= EDGE_FALLTHRU; } continue; } if (stmt && TREE_CODE (stmt) == RETURN_EXPR) { /* Remove the RETURN_EXPR if we may fall though to the exit instead. */ if (!bb->succ || bb->succ->succ_next || bb->succ->dest != EXIT_BLOCK_PTR) abort (); if (bb->next_bb == EXIT_BLOCK_PTR && !TREE_OPERAND (stmt, 0)) { bsi_remove (&last); bb->succ->flags |= EDGE_FALLTHRU; } continue; } /* There can be no fallthru edge if the last statement is a control one. */ if (stmt && is_ctrl_stmt (stmt)) continue; /* Find a fallthru edge and emit the goto if necessary. */ for (e = bb->succ; e; e = e->succ_next) if (e->flags & EDGE_FALLTHRU) break; if (!e || e->dest == bb->next_bb) continue; if (e->dest == EXIT_BLOCK_PTR) abort (); label = tree_block_label (e->dest); stmt = build1 (GOTO_EXPR, void_type_node, label); #ifdef USE_MAPPED_LOCATION SET_EXPR_LOCATION (stmt, e->goto_locus); #else SET_EXPR_LOCUS (stmt, e->goto_locus); #endif bsi_insert_after (&last, stmt, BSI_NEW_STMT); e->flags &= ~EDGE_FALLTHRU; } } /* Remove block annotations and other datastructures. */ void delete_tree_cfg_annotations (void) { basic_block bb; if (n_basic_blocks > 0) free_blocks_annotations (); label_to_block_map = NULL; free_rbi_pool (); FOR_EACH_BB (bb) bb->rbi = NULL; } /* Return the first statement in basic block BB. */ tree first_stmt (basic_block bb) { block_stmt_iterator i = bsi_start (bb); return !bsi_end_p (i) ? bsi_stmt (i) : NULL_TREE; } /* Return the last statement in basic block BB. */ tree last_stmt (basic_block bb) { block_stmt_iterator b = bsi_last (bb); return !bsi_end_p (b) ? bsi_stmt (b) : NULL_TREE; } /* Return a pointer to the last statement in block BB. */ tree * last_stmt_ptr (basic_block bb) { block_stmt_iterator last = bsi_last (bb); return !bsi_end_p (last) ? bsi_stmt_ptr (last) : NULL; } /* Return the last statement of an otherwise empty block. Return NULL if the block is totally empty, or if it contains more than one statement. */ tree last_and_only_stmt (basic_block bb) { block_stmt_iterator i = bsi_last (bb); tree last, prev; if (bsi_end_p (i)) return NULL_TREE; last = bsi_stmt (i); bsi_prev (&i); if (bsi_end_p (i)) return last; /* Empty statements should no longer appear in the instruction stream. Everything that might have appeared before should be deleted by remove_useless_stmts, and the optimizers should just bsi_remove instead of smashing with build_empty_stmt. Thus the only thing that should appear here in a block containing one executable statement is a label. */ prev = bsi_stmt (i); if (TREE_CODE (prev) == LABEL_EXPR) return last; else return NULL_TREE; } /* Mark BB as the basic block holding statement T. */ void set_bb_for_stmt (tree t, basic_block bb) { if (TREE_CODE (t) == STATEMENT_LIST) { tree_stmt_iterator i; for (i = tsi_start (t); !tsi_end_p (i); tsi_next (&i)) set_bb_for_stmt (tsi_stmt (i), bb); } else { stmt_ann_t ann = get_stmt_ann (t); ann->bb = bb; /* If the statement is a label, add the label to block-to-labels map so that we can speed up edge creation for GOTO_EXPRs. */ if (TREE_CODE (t) == LABEL_EXPR) { int uid; t = LABEL_EXPR_LABEL (t); uid = LABEL_DECL_UID (t); if (uid == -1) { LABEL_DECL_UID (t) = uid = cfun->last_label_uid++; if (VARRAY_SIZE (label_to_block_map) <= (unsigned) uid) VARRAY_GROW (label_to_block_map, 3 * uid / 2); } else { #ifdef ENABLE_CHECKING /* We're moving an existing label. Make sure that we've removed it from the old block. */ if (bb && VARRAY_BB (label_to_block_map, uid)) abort (); #endif } VARRAY_BB (label_to_block_map, uid) = bb; } } } /* Insert statement (or statement list) T before the statement pointed-to by iterator I. M specifies how to update iterator I after insertion (see enum bsi_iterator_update). */ void bsi_insert_before (block_stmt_iterator *i, tree t, enum bsi_iterator_update m) { set_bb_for_stmt (t, i->bb); modify_stmt (t); tsi_link_before (&i->tsi, t, m); } /* Insert statement (or statement list) T after the statement pointed-to by iterator I. M specifies how to update iterator I after insertion (see enum bsi_iterator_update). */ void bsi_insert_after (block_stmt_iterator *i, tree t, enum bsi_iterator_update m) { set_bb_for_stmt (t, i->bb); modify_stmt (t); tsi_link_after (&i->tsi, t, m); } /* Remove the statement pointed to by iterator I. The iterator is updated to the next statement. */ void bsi_remove (block_stmt_iterator *i) { tree t = bsi_stmt (*i); set_bb_for_stmt (t, NULL); modify_stmt (t); tsi_delink (&i->tsi); } /* Move the statement at FROM so it comes right after the statement at TO. */ void bsi_move_after (block_stmt_iterator *from, block_stmt_iterator *to) { tree stmt = bsi_stmt (*from); bsi_remove (from); bsi_insert_after (to, stmt, BSI_SAME_STMT); } /* Move the statement at FROM so it comes right before the statement at TO. */ void bsi_move_before (block_stmt_iterator *from, block_stmt_iterator *to) { tree stmt = bsi_stmt (*from); bsi_remove (from); bsi_insert_before (to, stmt, BSI_SAME_STMT); } /* Move the statement at FROM to the end of basic block BB. */ void bsi_move_to_bb_end (block_stmt_iterator *from, basic_block bb) { block_stmt_iterator last = bsi_last (bb); /* Have to check bsi_end_p because it could be an empty block. */ if (!bsi_end_p (last) && is_ctrl_stmt (bsi_stmt (last))) bsi_move_before (from, &last); else bsi_move_after (from, &last); } /* Replace the contents of the statement pointed to by iterator BSI with STMT. If PRESERVE_EH_INFO is true, the exception handling information of the original statement is preserved. */ void bsi_replace (const block_stmt_iterator *bsi, tree stmt, bool preserve_eh_info) { int eh_region; tree orig_stmt = bsi_stmt (*bsi); SET_EXPR_LOCUS (stmt, EXPR_LOCUS (orig_stmt)); set_bb_for_stmt (stmt, bsi->bb); /* Preserve EH region information from the original statement, if requested by the caller. */ if (preserve_eh_info) { eh_region = lookup_stmt_eh_region (orig_stmt); if (eh_region >= 0) add_stmt_to_eh_region (stmt, eh_region); } *bsi_stmt_ptr (*bsi) = stmt; modify_stmt (stmt); } /* Insert the statement pointed-to by BSI into edge E. Every attempt is made to place the statement in an existing basic block, but sometimes that isn't possible. When it isn't possible, the edge is split and the statement is added to the new block. In all cases, the returned *BSI points to the correct location. The return value is true if insertion should be done after the location, or false if it should be done before the location. */ static bool tree_find_edge_insert_loc (edge e, block_stmt_iterator *bsi) { basic_block dest, src; tree tmp; dest = e->dest; restart: /* If the destination has one predecessor which has no PHI nodes, insert there. Except for the exit block. The requirement for no PHI nodes could be relaxed. Basically we would have to examine the PHIs to prove that none of them used the value set by the statement we want to insert on E. That hardly seems worth the effort. */ if (dest->pred->pred_next == NULL && ! phi_nodes (dest) && dest != EXIT_BLOCK_PTR) { *bsi = bsi_start (dest); if (bsi_end_p (*bsi)) return true; /* Make sure we insert after any leading labels. */ tmp = bsi_stmt (*bsi); while (TREE_CODE (tmp) == LABEL_EXPR) { bsi_next (bsi); if (bsi_end_p (*bsi)) break; tmp = bsi_stmt (*bsi); } if (bsi_end_p (*bsi)) { *bsi = bsi_last (dest); return true; } else return false; } /* If the source has one successor, the edge is not abnormal and the last statement does not end a basic block, insert there. Except for the entry block. */ src = e->src; if ((e->flags & EDGE_ABNORMAL) == 0 && src->succ->succ_next == NULL && src != ENTRY_BLOCK_PTR) { *bsi = bsi_last (src); if (bsi_end_p (*bsi)) return true; tmp = bsi_stmt (*bsi); if (!stmt_ends_bb_p (tmp)) return true; /* Insert code just before returning the value. We may need to decompose the return in the case it contains non-trivial operand. */ if (TREE_CODE (tmp) == RETURN_EXPR) { tree op = TREE_OPERAND (tmp, 0); if (!is_gimple_val (op)) { if (TREE_CODE (op) != MODIFY_EXPR) abort (); bsi_insert_before (bsi, op, BSI_NEW_STMT); TREE_OPERAND (tmp, 0) = TREE_OPERAND (op, 0); } bsi_prev (bsi); return true; } } /* Otherwise, create a new basic block, and split this edge. */ dest = split_edge (e); e = dest->pred; goto restart; } /* This routine will commit all pending edge insertions, creating any new basic blocks which are necessary. If specified, NEW_BLOCKS returns a count of the number of new basic blocks which were created. */ void bsi_commit_edge_inserts (int *new_blocks) { basic_block bb; edge e; int blocks; blocks = n_basic_blocks; bsi_commit_edge_inserts_1 (ENTRY_BLOCK_PTR->succ); FOR_EACH_BB (bb) for (e = bb->succ; e; e = e->succ_next) bsi_commit_edge_inserts_1 (e); if (new_blocks) *new_blocks = n_basic_blocks - blocks; } /* Commit insertions pending at edge E. */ static void bsi_commit_edge_inserts_1 (edge e) { if (PENDING_STMT (e)) { block_stmt_iterator bsi; tree stmt = PENDING_STMT (e); PENDING_STMT (e) = NULL_TREE; if (tree_find_edge_insert_loc (e, &bsi)) bsi_insert_after (&bsi, stmt, BSI_NEW_STMT); else bsi_insert_before (&bsi, stmt, BSI_NEW_STMT); } } /* Add STMT to the pending list of edge E. No actual insertion is made until a call to bsi_commit_edge_inserts () is made. */ void bsi_insert_on_edge (edge e, tree stmt) { append_to_statement_list (stmt, &PENDING_STMT (e)); } /* Specialized edge insertion for SSA-PRE. FIXME: This should probably disappear. The only reason it's here is because PRE needs the call to tree_find_edge_insert_loc(). */ void pre_insert_on_edge (edge e, tree stmt); void pre_insert_on_edge (edge e, tree stmt) { block_stmt_iterator bsi; if (PENDING_STMT (e)) abort (); if (tree_find_edge_insert_loc (e, &bsi)) bsi_insert_after (&bsi, stmt, BSI_NEW_STMT); else bsi_insert_before (&bsi, stmt, BSI_NEW_STMT); } /*--------------------------------------------------------------------------- Tree specific functions for CFG manipulation ---------------------------------------------------------------------------*/ /* Split a (typically critical) edge EDGE_IN. Return the new block. Abort on abnormal edges. */ static basic_block tree_split_edge (edge edge_in) { basic_block new_bb, after_bb, dest, src; edge new_edge, e; tree phi; int i, num_elem; /* Abnormal edges cannot be split. */ if (edge_in->flags & EDGE_ABNORMAL) abort (); src = edge_in->src; dest = edge_in->dest; /* Place the new block in the block list. Try to keep the new block near its "logical" location. This is of most help to humans looking at debugging dumps. */ for (e = dest->pred; e; e = e->pred_next) if (e->src->next_bb == dest) break; if (!e) after_bb = dest->prev_bb; else after_bb = edge_in->src; new_bb = create_empty_bb (after_bb); new_edge = make_edge (new_bb, dest, EDGE_FALLTHRU); /* Find all the PHI arguments on the original edge, and change them to the new edge. Do it before redirection, so that the argument does not get removed. */ for (phi = phi_nodes (dest); phi; phi = PHI_CHAIN (phi)) { num_elem = PHI_NUM_ARGS (phi); for (i = 0; i < num_elem; i++) if (PHI_ARG_EDGE (phi, i) == edge_in) { PHI_ARG_EDGE (phi, i) = new_edge; break; } } if (!redirect_edge_and_branch (edge_in, new_bb)) abort (); if (PENDING_STMT (edge_in)) abort (); return new_bb; } /* Return true when BB has label LABEL in it. */ static bool has_label_p (basic_block bb, tree label) { block_stmt_iterator bsi; for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) { tree stmt = bsi_stmt (bsi); if (TREE_CODE (stmt) != LABEL_EXPR) return false; if (LABEL_EXPR_LABEL (stmt) == label) return true; } return false; } /* Callback for walk_tree, check that all elements with address taken are properly noticed as such. */ static tree verify_expr (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED) { tree t = *tp, x; if (TYPE_P (t)) *walk_subtrees = 0; /* Check operand N for being valid GIMPLE and give error MSG if not. We check for constants explicitly since they are not considered gimple invariants if they overflowed. */ #define CHECK_OP(N, MSG) \ do { if (TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (t, N))) != 'c' \ && !is_gimple_val (TREE_OPERAND (t, N))) \ { error (MSG); return TREE_OPERAND (t, N); }} while (0) switch (TREE_CODE (t)) { case SSA_NAME: if (SSA_NAME_IN_FREE_LIST (t)) { error ("SSA name in freelist but still referenced"); return *tp; } break; case MODIFY_EXPR: x = TREE_OPERAND (t, 0); if (TREE_CODE (x) == BIT_FIELD_REF && is_gimple_reg (TREE_OPERAND (x, 0))) { error ("GIMPLE register modified with BIT_FIELD_REF"); return t; } break; case ADDR_EXPR: /* Skip any references (they will be checked when we recurse down the tree) and ensure that any variable used as a prefix is marked addressable. */ for (x = TREE_OPERAND (t, 0); (handled_component_p (x) || TREE_CODE (x) == REALPART_EXPR || TREE_CODE (x) == IMAGPART_EXPR); x = TREE_OPERAND (x, 0)) ; if (TREE_CODE (x) != VAR_DECL && TREE_CODE (x) != PARM_DECL) return NULL; if (!TREE_ADDRESSABLE (x)) { error ("address taken, but ADDRESSABLE bit not set"); return x; } break; case COND_EXPR: x = TREE_OPERAND (t, 0); if (TREE_CODE (TREE_TYPE (x)) != BOOLEAN_TYPE) { error ("non-boolean used in condition"); return x; } break; case NOP_EXPR: case CONVERT_EXPR: case FIX_TRUNC_EXPR: case FIX_CEIL_EXPR: case FIX_FLOOR_EXPR: case FIX_ROUND_EXPR: case FLOAT_EXPR: case NEGATE_EXPR: case ABS_EXPR: case BIT_NOT_EXPR: case NON_LVALUE_EXPR: case TRUTH_NOT_EXPR: CHECK_OP (0, "Invalid operand to unary operator"); break; case REALPART_EXPR: case IMAGPART_EXPR: case COMPONENT_REF: case ARRAY_REF: case ARRAY_RANGE_REF: case BIT_FIELD_REF: case VIEW_CONVERT_EXPR: /* We have a nest of references. Verify that each of the operands that determine where to reference is either a constant or a variable, verify that the base is valid, and then show we've already checked the subtrees. */ while (TREE_CODE (t) == REALPART_EXPR || TREE_CODE (t) == IMAGPART_EXPR || handled_component_p (t)) { if (TREE_CODE (t) == COMPONENT_REF && TREE_OPERAND (t, 2)) CHECK_OP (2, "Invalid COMPONENT_REF offset operator"); else if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF) { CHECK_OP (1, "Invalid array index."); if (TREE_OPERAND (t, 2)) CHECK_OP (2, "Invalid array lower bound."); if (TREE_OPERAND (t, 3)) CHECK_OP (3, "Invalid array stride."); } else if (TREE_CODE (t) == BIT_FIELD_REF) { CHECK_OP (1, "Invalid operand to BIT_FIELD_REF"); CHECK_OP (2, "Invalid operand to BIT_FIELD_REF"); } t = TREE_OPERAND (t, 0); } if (TREE_CODE_CLASS (TREE_CODE (t)) != 'c' && !is_gimple_lvalue (t)) { error ("Invalid reference prefix."); return t; } *walk_subtrees = 0; break; case LT_EXPR: case LE_EXPR: case GT_EXPR: case GE_EXPR: case EQ_EXPR: case NE_EXPR: case UNORDERED_EXPR: case ORDERED_EXPR: case UNLT_EXPR: case UNLE_EXPR: case UNGT_EXPR: case UNGE_EXPR: case UNEQ_EXPR: case LTGT_EXPR: case PLUS_EXPR: case MINUS_EXPR: case MULT_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case TRUNC_MOD_EXPR: case CEIL_MOD_EXPR: case FLOOR_MOD_EXPR: case ROUND_MOD_EXPR: case RDIV_EXPR: case EXACT_DIV_EXPR: case MIN_EXPR: case MAX_EXPR: case LSHIFT_EXPR: case RSHIFT_EXPR: case LROTATE_EXPR: case RROTATE_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case BIT_AND_EXPR: CHECK_OP (0, "Invalid operand to binary operator"); CHECK_OP (1, "Invalid operand to binary operator"); break; default: break; } return NULL; #undef CHECK_OP } /* Verify STMT, return true if STMT is not in GIMPLE form. TODO: Implement type checking. */ static bool verify_stmt (tree stmt, bool last_in_block) { tree addr; if (!is_gimple_stmt (stmt)) { error ("Is not a valid GIMPLE statement."); goto fail; } addr = walk_tree (&stmt, verify_expr, NULL, NULL); if (addr) { debug_generic_stmt (addr); return true; } /* If the statement is marked as part of an EH region, then it is expected that the statement could throw. Verify that when we have optimizations that simplify statements such that we prove that they cannot throw, that we update other data structures to match. */ if (lookup_stmt_eh_region (stmt) >= 0) { if (!tree_could_throw_p (stmt)) { error ("Statement marked for throw, but doesn't."); goto fail; } if (!last_in_block && tree_can_throw_internal (stmt)) { error ("Statement marked for throw in middle of block."); goto fail; } } return false; fail: debug_generic_stmt (stmt); return true; } /* Return true when the T can be shared. */ static bool tree_node_can_be_shared (tree t) { if (TYPE_P (t) || DECL_P (t) /* We check for constants explicitly since they are not considered gimple invariants if they overflowed. */ || TREE_CODE_CLASS (TREE_CODE (t)) == 'c' || is_gimple_min_invariant (t) || TREE_CODE (t) == SSA_NAME) return true; while (((TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF) /* We check for constants explicitly since they are not considered gimple invariants if they overflowed. */ && (TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (t, 1))) == 'c' || is_gimple_min_invariant (TREE_OPERAND (t, 1)))) || (TREE_CODE (t) == COMPONENT_REF || TREE_CODE (t) == REALPART_EXPR || TREE_CODE (t) == IMAGPART_EXPR)) t = TREE_OPERAND (t, 0); if (DECL_P (t)) return true; return false; } /* Called via walk_trees. Verify tree sharing. */ static tree verify_node_sharing (tree * tp, int *walk_subtrees, void *data) { htab_t htab = (htab_t) data; void **slot; if (tree_node_can_be_shared (*tp)) { *walk_subtrees = false; return NULL; } slot = htab_find_slot (htab, *tp, INSERT); if (*slot) return *slot; *slot = *tp; return NULL; } /* Verify the GIMPLE statement chain. */ void verify_stmts (void) { basic_block bb; block_stmt_iterator bsi; bool err = false; htab_t htab; tree addr; timevar_push (TV_TREE_STMT_VERIFY); htab = htab_create (37, htab_hash_pointer, htab_eq_pointer, NULL); FOR_EACH_BB (bb) { tree phi; int i; for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi)) { int phi_num_args = PHI_NUM_ARGS (phi); for (i = 0; i < phi_num_args; i++) { tree t = PHI_ARG_DEF (phi, i); tree addr; /* Addressable variables do have SSA_NAMEs but they are not considered gimple values. */ if (TREE_CODE (t) != SSA_NAME && TREE_CODE (t) != FUNCTION_DECL && !is_gimple_val (t)) { error ("PHI def is not a GIMPLE value"); debug_generic_stmt (phi); debug_generic_stmt (t); err |= true; } addr = walk_tree (&t, verify_expr, NULL, NULL); if (addr) { debug_generic_stmt (addr); err |= true; } addr = walk_tree (&t, verify_node_sharing, htab, NULL); if (addr) { error ("Incorrect sharing of tree nodes"); debug_generic_stmt (phi); debug_generic_stmt (addr); err |= true; } } } for (bsi = bsi_start (bb); !bsi_end_p (bsi); ) { tree stmt = bsi_stmt (bsi); bsi_next (&bsi); err |= verify_stmt (stmt, bsi_end_p (bsi)); addr = walk_tree (&stmt, verify_node_sharing, htab, NULL); if (addr) { error ("Incorrect sharing of tree nodes"); debug_generic_stmt (stmt); debug_generic_stmt (addr); err |= true; } } } if (err) internal_error ("verify_stmts failed."); htab_delete (htab); timevar_pop (TV_TREE_STMT_VERIFY); } /* Verifies that the flow information is OK. */ static int tree_verify_flow_info (void) { int err = 0; basic_block bb; block_stmt_iterator bsi; tree stmt; edge e; if (ENTRY_BLOCK_PTR->stmt_list) { error ("ENTRY_BLOCK has a statement list associated with it\n"); err = 1; } if (EXIT_BLOCK_PTR->stmt_list) { error ("EXIT_BLOCK has a statement list associated with it\n"); err = 1; } for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next) if (e->flags & EDGE_FALLTHRU) { error ("Fallthru to exit from bb %d\n", e->src->index); err = 1; } FOR_EACH_BB (bb) { bool found_ctrl_stmt = false; /* Skip labels on the start of basic block. */ for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) { if (TREE_CODE (bsi_stmt (bsi)) != LABEL_EXPR) break; if (label_to_block (LABEL_EXPR_LABEL (bsi_stmt (bsi))) != bb) { error ("Label %s to block does not match in bb %d\n", IDENTIFIER_POINTER (DECL_NAME (bsi_stmt (bsi))), bb->index); err = 1; } if (decl_function_context (LABEL_EXPR_LABEL (bsi_stmt (bsi))) != current_function_decl) { error ("Label %s has incorrect context in bb %d\n", IDENTIFIER_POINTER (DECL_NAME (bsi_stmt (bsi))), bb->index); err = 1; } } /* Verify that body of basic block BB is free of control flow. */ for (; !bsi_end_p (bsi); bsi_next (&bsi)) { tree stmt = bsi_stmt (bsi); if (found_ctrl_stmt) { error ("Control flow in the middle of basic block %d\n", bb->index); err = 1; } if (stmt_ends_bb_p (stmt)) found_ctrl_stmt = true; if (TREE_CODE (stmt) == LABEL_EXPR) { error ("Label %s in the middle of basic block %d\n", IDENTIFIER_POINTER (DECL_NAME (stmt)), bb->index); err = 1; } } bsi = bsi_last (bb); if (bsi_end_p (bsi)) continue; stmt = bsi_stmt (bsi); if (is_ctrl_stmt (stmt)) { for (e = bb->succ; e; e = e->succ_next) if (e->flags & EDGE_FALLTHRU) { error ("Fallthru edge after a control statement in bb %d \n", bb->index); err = 1; } } switch (TREE_CODE (stmt)) { case COND_EXPR: { edge true_edge; edge false_edge; if (TREE_CODE (COND_EXPR_THEN (stmt)) != GOTO_EXPR || TREE_CODE (COND_EXPR_ELSE (stmt)) != GOTO_EXPR) { error ("Structured COND_EXPR at the end of bb %d\n", bb->index); err = 1; } extract_true_false_edges_from_block (bb, &true_edge, &false_edge); if (!true_edge || !false_edge || !(true_edge->flags & EDGE_TRUE_VALUE) || !(false_edge->flags & EDGE_FALSE_VALUE) || (true_edge->flags & (EDGE_FALLTHRU | EDGE_ABNORMAL)) || (false_edge->flags & (EDGE_FALLTHRU | EDGE_ABNORMAL)) || bb->succ->succ_next->succ_next) { error ("Wrong outgoing edge flags at end of bb %d\n", bb->index); err = 1; } if (!has_label_p (true_edge->dest, GOTO_DESTINATION (COND_EXPR_THEN (stmt)))) { error ("`then' label does not match edge at end of bb %d\n", bb->index); err = 1; } if (!has_label_p (false_edge->dest, GOTO_DESTINATION (COND_EXPR_ELSE (stmt)))) { error ("`else' label does not match edge at end of bb %d\n", bb->index); err = 1; } } break; case GOTO_EXPR: if (simple_goto_p (stmt)) { error ("Explicit goto at end of bb %d\n", bb->index); err = 1; } else { /* FIXME. We should double check that the labels in the destination blocks have their address taken. */ for (e = bb->succ; e; e = e->succ_next) if ((e->flags & (EDGE_FALLTHRU | EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)) || !(e->flags & EDGE_ABNORMAL)) { error ("Wrong outgoing edge flags at end of bb %d\n", bb->index); err = 1; } } break; case RETURN_EXPR: if (!bb->succ || bb->succ->succ_next || (bb->succ->flags & (EDGE_FALLTHRU | EDGE_ABNORMAL | EDGE_TRUE_VALUE | EDGE_FALSE_VALUE))) { error ("Wrong outgoing edge flags at end of bb %d\n", bb->index); err = 1; } if (bb->succ->dest != EXIT_BLOCK_PTR) { error ("Return edge does not point to exit in bb %d\n", bb->index); err = 1; } break; case SWITCH_EXPR: { tree prev; edge e; size_t i, n; tree vec; vec = SWITCH_LABELS (stmt); n = TREE_VEC_LENGTH (vec); /* Mark all the destination basic blocks. */ for (i = 0; i < n; ++i) { tree lab = CASE_LABEL (TREE_VEC_ELT (vec, i)); basic_block label_bb = label_to_block (lab); if (label_bb->aux && label_bb->aux != (void *)1) abort (); label_bb->aux = (void *)1; } /* Verify that the case labels are sorted. */ prev = TREE_VEC_ELT (vec, 0); for (i = 1; i < n - 1; ++i) { tree c = TREE_VEC_ELT (vec, i); if (! CASE_LOW (c)) { error ("Found default case not at end of case vector"); err = 1; continue; } if (! tree_int_cst_lt (CASE_LOW (prev), CASE_LOW (c))) { error ("Case labels not sorted:\n "); print_generic_expr (stderr, prev, 0); fprintf (stderr," is greater than "); print_generic_expr (stderr, c, 0); fprintf (stderr," but comes before it.\n"); err = 1; } prev = c; } if (CASE_LOW (TREE_VEC_ELT (vec, n - 1))) { error ("No default case found at end of case vector"); err = 1; } for (e = bb->succ; e; e = e->succ_next) { if (!e->dest->aux) { error ("Extra outgoing edge %d->%d\n", bb->index, e->dest->index); err = 1; } e->dest->aux = (void *)2; if ((e->flags & (EDGE_FALLTHRU | EDGE_ABNORMAL | EDGE_TRUE_VALUE | EDGE_FALSE_VALUE))) { error ("Wrong outgoing edge flags at end of bb %d\n", bb->index); err = 1; } } /* Check that we have all of them. */ for (i = 0; i < n; ++i) { tree lab = CASE_LABEL (TREE_VEC_ELT (vec, i)); basic_block label_bb = label_to_block (lab); if (label_bb->aux != (void *)2) { error ("Missing edge %i->%i\n", bb->index, label_bb->index); err = 1; } } for (e = bb->succ; e; e = e->succ_next) e->dest->aux = (void *)0; } default: ; } } if (dom_computed[CDI_DOMINATORS] >= DOM_NO_FAST_QUERY) verify_dominators (CDI_DOMINATORS); return err; } /* Updates phi nodes after creating forwarder block joined by edge FALLTHRU. */ static void tree_make_forwarder_block (edge fallthru) { edge e; basic_block dummy, bb; tree phi, new_phi, var, prev, next; dummy = fallthru->src; bb = fallthru->dest; if (!bb->pred->pred_next) return; /* If we redirected a branch we must create new phi nodes at the start of BB. */ for (phi = phi_nodes (dummy); phi; phi = PHI_CHAIN (phi)) { var = PHI_RESULT (phi); new_phi = create_phi_node (var, bb); SSA_NAME_DEF_STMT (var) = new_phi; SET_PHI_RESULT (phi, make_ssa_name (SSA_NAME_VAR (var), phi)); add_phi_arg (&new_phi, PHI_RESULT (phi), fallthru); } /* Ensure that the PHI node chain is in the same order. */ prev = NULL; for (phi = phi_nodes (bb); phi; phi = next) { next = PHI_CHAIN (phi); PHI_CHAIN (phi) = prev; prev = phi; } set_phi_nodes (bb, prev); /* Add the arguments we have stored on edges. */ for (e = bb->pred; e; e = e->pred_next) { if (e == fallthru) continue; for (phi = phi_nodes (bb), var = PENDING_STMT (e); phi; phi = PHI_CHAIN (phi), var = TREE_CHAIN (var)) add_phi_arg (&phi, TREE_VALUE (var), e); PENDING_STMT (e) = NULL; } } /* Return true if basic block BB does nothing except pass control flow to another block and that we can safely insert a label at the start of the successor block. */ static bool tree_forwarder_block_p (basic_block bb) { block_stmt_iterator bsi; edge e; /* If we have already determined that this block is not forwardable, then no further checks are necessary. */ if (! bb_ann (bb)->forwardable) return false; /* BB must have a single outgoing normal edge. Otherwise it can not be a forwarder block. */ if (!bb->succ || bb->succ->succ_next || bb->succ->dest == EXIT_BLOCK_PTR || (bb->succ->flags & EDGE_ABNORMAL) || bb == ENTRY_BLOCK_PTR) { bb_ann (bb)->forwardable = 0; return false; } /* Successors of the entry block are not forwarders. */ for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) if (e->dest == bb) { bb_ann (bb)->forwardable = 0; return false; } /* BB can not have any PHI nodes. This could potentially be relaxed early in compilation if we re-rewrote the variables appearing in any PHI nodes in forwarder blocks. */ if (phi_nodes (bb)) { bb_ann (bb)->forwardable = 0; return false; } /* Now walk through the statements. We can ignore labels, anything else means this is not a forwarder block. */ for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) { tree stmt = bsi_stmt (bsi); switch (TREE_CODE (stmt)) { case LABEL_EXPR: if (DECL_NONLOCAL (LABEL_EXPR_LABEL (stmt))) return false; break; default: bb_ann (bb)->forwardable = 0; return false; } } return true; } /* Thread jumps over empty statements. This code should _not_ thread over obviously equivalent conditions as that requires nontrivial updates to the SSA graph. */ static bool thread_jumps (void) { edge e, next, last, old; basic_block bb, dest, tmp; tree phi; int arg; bool retval = false; FOR_EACH_BB (bb) bb_ann (bb)->forwardable = 1; FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { /* Don't waste time on unreachable blocks. */ if (!bb->pred) continue; /* Nor on forwarders. */ if (tree_forwarder_block_p (bb)) continue; /* This block is now part of a forwarding path, mark it as not forwardable so that we can detect loops. This bit will be reset below. */ bb_ann (bb)->forwardable = 0; /* Examine each of our block's successors to see if it is forwardable. */ for (e = bb->succ; e; e = next) { next = e->succ_next; /* If the edge is abnormal or its destination is not forwardable, then there's nothing to do. */ if ((e->flags & EDGE_ABNORMAL) || !tree_forwarder_block_p (e->dest)) continue; /* Now walk through as many forwarder block as possible to find the ultimate destination we want to thread our jump to. */ last = e->dest->succ; bb_ann (e->dest)->forwardable = 0; for (dest = e->dest->succ->dest; tree_forwarder_block_p (dest); last = dest->succ, dest = dest->succ->dest) { /* An infinite loop detected. We redirect the edge anyway, so that the loop is shrunk into single basic block. */ if (!bb_ann (dest)->forwardable) break; if (dest->succ->dest == EXIT_BLOCK_PTR) break; bb_ann (dest)->forwardable = 0; } /* Reset the forwardable marks to 1. */ for (tmp = e->dest; tmp != dest; tmp = tmp->succ->dest) bb_ann (tmp)->forwardable = 1; if (dest == e->dest) continue; old = find_edge (bb, dest); if (old) { /* If there already is an edge, check whether the values in phi nodes differ. */ if (!phi_alternatives_equal (dest, last, old)) { /* The previous block is forwarder. Redirect our jump to that target instead since we know it has no PHI nodes that will need updating. */ dest = last->src; /* That might mean that no forwarding at all is possible. */ if (dest == e->dest) continue; old = find_edge (bb, dest); } } /* Perform the redirection. */ retval = true; e = redirect_edge_and_branch (e, dest); /* TODO -- updating dominators in this case is simple. */ free_dominance_info (CDI_DOMINATORS); if (!old) { /* Update PHI nodes. We know that the new argument should have the same value as the argument associated with LAST. Otherwise we would have changed our target block above. */ for (phi = phi_nodes (dest); phi; phi = PHI_CHAIN (phi)) { arg = phi_arg_from_edge (phi, last); if (arg < 0) abort (); add_phi_arg (&phi, PHI_ARG_DEF (phi, arg), e); } } } /* Reset the forwardable bit on our block since it's no longer in a forwarding chain path. */ bb_ann (bb)->forwardable = 1; } return retval; } /* Return a non-special label in the head of basic block BLOCK. Create one if it doesn't exist. */ tree tree_block_label (basic_block bb) { block_stmt_iterator i, s = bsi_start (bb); bool first = true; tree label, stmt; for (i = s; !bsi_end_p (i); first = false, bsi_next (&i)) { stmt = bsi_stmt (i); if (TREE_CODE (stmt) != LABEL_EXPR) break; label = LABEL_EXPR_LABEL (stmt); if (!DECL_NONLOCAL (label)) { if (!first) bsi_move_before (&i, &s); return label; } } label = create_artificial_label (); stmt = build1 (LABEL_EXPR, void_type_node, label); bsi_insert_before (&s, stmt, BSI_NEW_STMT); return label; } /* Attempt to perform edge redirection by replacing a possibly complex jump instruction by a goto or by removing the jump completely. This can apply only if all edges now point to the same block. The parameters and return values are equivalent to redirect_edge_and_branch. */ static edge tree_try_redirect_by_replacing_jump (edge e, basic_block target) { basic_block src = e->src; edge tmp; block_stmt_iterator b; tree stmt; /* Verify that all targets will be TARGET. */ for (tmp = src->succ; tmp; tmp = tmp->succ_next) if (tmp->dest != target && tmp != e) break; if (tmp) return NULL; b = bsi_last (src); if (bsi_end_p (b)) return NULL; stmt = bsi_stmt (b); if (TREE_CODE (stmt) == COND_EXPR || TREE_CODE (stmt) == SWITCH_EXPR) { bsi_remove (&b); e = ssa_redirect_edge (e, target); e->flags = EDGE_FALLTHRU; return e; } return NULL; } /* Redirect E to DEST. Return NULL on failure. Otherwise, return the edge representing the redirected branch. */ static edge tree_redirect_edge_and_branch (edge e, basic_block dest) { basic_block bb = e->src; block_stmt_iterator bsi; edge ret; tree label, stmt; if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH)) return NULL; if (e->src != ENTRY_BLOCK_PTR && (ret = tree_try_redirect_by_replacing_jump (e, dest))) return ret; if (e->dest == dest) return NULL; label = tree_block_label (dest); bsi = bsi_last (bb); stmt = bsi_end_p (bsi) ? NULL : bsi_stmt (bsi); switch (stmt ? TREE_CODE (stmt) : ERROR_MARK) { case COND_EXPR: stmt = (e->flags & EDGE_TRUE_VALUE ? COND_EXPR_THEN (stmt) : COND_EXPR_ELSE (stmt)); GOTO_DESTINATION (stmt) = label; break; case GOTO_EXPR: /* No non-abnormal edges should lead from a non-simple goto, and simple ones should be represented implicitly. */ abort (); case SWITCH_EXPR: { tree vec = SWITCH_LABELS (stmt); size_t i, n = TREE_VEC_LENGTH (vec); for (i = 0; i < n; ++i) { tree elt = TREE_VEC_ELT (vec, i); if (label_to_block (CASE_LABEL (elt)) == e->dest) CASE_LABEL (elt) = label; } } break; case RETURN_EXPR: bsi_remove (&bsi); e->flags |= EDGE_FALLTHRU; break; default: /* Otherwise it must be a fallthru edge, and we don't need to do anything besides redirecting it. */ if (!(e->flags & EDGE_FALLTHRU)) abort (); break; } /* Update/insert PHI nodes as necessary. */ /* Now update the edges in the CFG. */ e = ssa_redirect_edge (e, dest); return e; } /* Simple wrapper, as we can always redirect fallthru edges. */ static basic_block tree_redirect_edge_and_branch_force (edge e, basic_block dest) { e = tree_redirect_edge_and_branch (e, dest); if (!e) abort (); return NULL; } /* Splits basic block BB after statement STMT (but at least after the labels). If STMT is NULL, BB is split just after the labels. */ static basic_block tree_split_block (basic_block bb, void *stmt) { block_stmt_iterator bsi, bsi_tgt; tree act; basic_block new_bb; edge e; new_bb = create_empty_bb (bb); /* Redirect the outgoing edges. */ new_bb->succ = bb->succ; bb->succ = NULL; for (e = new_bb->succ; e; e = e->succ_next) e->src = new_bb; if (stmt && TREE_CODE ((tree) stmt) == LABEL_EXPR) stmt = NULL; /* Move everything from BSI to the new basic block. */ for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) { act = bsi_stmt (bsi); if (TREE_CODE (act) == LABEL_EXPR) continue; if (!stmt) break; if (stmt == act) { bsi_next (&bsi); break; } } bsi_tgt = bsi_start (new_bb); while (!bsi_end_p (bsi)) { act = bsi_stmt (bsi); bsi_remove (&bsi); bsi_insert_after (&bsi_tgt, act, BSI_NEW_STMT); } return new_bb; } /* Moves basic block BB after block AFTER. */ static bool tree_move_block_after (basic_block bb, basic_block after) { if (bb->prev_bb == after) return true; unlink_block (bb); link_block (bb, after); return true; } /* Return true if basic_block can be duplicated. */ static bool tree_can_duplicate_bb_p (basic_block bb ATTRIBUTE_UNUSED) { return true; } /* Create a duplicate of the basic block BB. NOTE: This does not preserve SSA form. */ static basic_block tree_duplicate_bb (basic_block bb) { basic_block new_bb; block_stmt_iterator bsi, bsi_tgt; new_bb = create_empty_bb (EXIT_BLOCK_PTR->prev_bb); bsi_tgt = bsi_start (new_bb); for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) { tree stmt = bsi_stmt (bsi); tree copy; if (TREE_CODE (stmt) == LABEL_EXPR) continue; copy = unshare_expr (stmt); /* Copy also the virtual operands. */ get_stmt_ann (copy); copy_virtual_operands (copy, stmt); bsi_insert_after (&bsi_tgt, copy, BSI_NEW_STMT); } return new_bb; } /* Dump FUNCTION_DECL FN to file FILE using FLAGS (see TDF_* in tree.h) */ void dump_function_to_file (tree fn, FILE *file, int flags) { tree arg, vars, var; bool ignore_topmost_bind = false, any_var = false; basic_block bb; tree chain; fprintf (file, "%s (", lang_hooks.decl_printable_name (fn, 2)); arg = DECL_ARGUMENTS (fn); while (arg) { print_generic_expr (file, arg, dump_flags); if (TREE_CHAIN (arg)) fprintf (file, ", "); arg = TREE_CHAIN (arg); } fprintf (file, ")\n"); if (flags & TDF_RAW) { dump_node (fn, TDF_SLIM | flags, file); return; } /* When GIMPLE is lowered, the variables are no longer available in BIND_EXPRs, so display them separately. */ if (cfun && cfun->unexpanded_var_list) { ignore_topmost_bind = true; fprintf (file, "{\n"); for (vars = cfun->unexpanded_var_list; vars; vars = TREE_CHAIN (vars)) { var = TREE_VALUE (vars); print_generic_decl (file, var, flags); fprintf (file, "\n"); any_var = true; } } if (basic_block_info) { /* Make a CFG based dump. */ if (!ignore_topmost_bind) fprintf (file, "{\n"); if (any_var && n_basic_blocks) fprintf (file, "\n"); FOR_EACH_BB (bb) dump_generic_bb (file, bb, 2, flags); fprintf (file, "}\n"); } else { int indent; /* Make a tree based dump. */ chain = DECL_SAVED_TREE (fn); if (TREE_CODE (chain) == BIND_EXPR) { if (ignore_topmost_bind) { chain = BIND_EXPR_BODY (chain); indent = 2; } else indent = 0; } else { if (!ignore_topmost_bind) fprintf (file, "{\n"); indent = 2; } if (any_var) fprintf (file, "\n"); print_generic_stmt_indented (file, chain, flags, indent); if (ignore_topmost_bind) fprintf (file, "}\n"); } fprintf (file, "\n\n"); } /* Pretty print of the loops intermediate representation. */ static void print_loop (FILE *, struct loop *, int); static void print_pred_bbs (FILE *, edge); static void print_succ_bbs (FILE *, edge); /* Print the predecessors indexes of edge E on FILE. */ static void print_pred_bbs (FILE *file, edge e) { if (e == NULL) return; else if (e->pred_next == NULL) fprintf (file, "bb_%d", e->src->index); else { fprintf (file, "bb_%d, ", e->src->index); print_pred_bbs (file, e->pred_next); } } /* Print the successors indexes of edge E on FILE. */ static void print_succ_bbs (FILE *file, edge e) { if (e == NULL) return; else if (e->succ_next == NULL) fprintf (file, "bb_%d", e->dest->index); else { fprintf (file, "bb_%d, ", e->dest->index); print_succ_bbs (file, e->succ_next); } } /* Pretty print LOOP on FILE, indented INDENT spaces. */ static void print_loop (FILE *file, struct loop *loop, int indent) { char *s_indent; basic_block bb; if (loop == NULL) return; s_indent = (char *) alloca ((size_t) indent + 1); memset ((void *) s_indent, ' ', (size_t) indent); s_indent[indent] = '\0'; /* Print the loop's header. */ fprintf (file, "%sloop_%d\n", s_indent, loop->num); /* Print the loop's body. */ fprintf (file, "%s{\n", s_indent); FOR_EACH_BB (bb) if (bb->loop_father == loop) { /* Print the basic_block's header. */ fprintf (file, "%s bb_%d (preds = {", s_indent, bb->index); print_pred_bbs (file, bb->pred); fprintf (file, "}, succs = {"); print_succ_bbs (file, bb->succ); fprintf (file, "})\n"); /* Print the basic_block's body. */ fprintf (file, "%s {\n", s_indent); tree_dump_bb (bb, file, indent + 4); fprintf (file, "%s }\n", s_indent); } print_loop (file, loop->inner, indent + 2); fprintf (file, "%s}\n", s_indent); print_loop (file, loop->next, indent); } /* Follow a CFG edge from the entry point of the program, and on entry of a loop, pretty print the loop structure on FILE. */ void print_loop_ir (FILE *file) { basic_block bb; bb = BASIC_BLOCK (0); if (bb && bb->loop_father) print_loop (file, bb->loop_father, 0); } /* Debugging loops structure at tree level. */ void debug_loop_ir (void) { print_loop_ir (stderr); } /* Return true if BB ends with a call, possibly followed by some instructions that must stay with the call. Return false, otherwise. */ static bool tree_block_ends_with_call_p (basic_block bb) { block_stmt_iterator bsi = bsi_last (bb); tree t = tsi_stmt (bsi.tsi); if (TREE_CODE (t) == RETURN_EXPR && TREE_OPERAND (t, 0)) t = TREE_OPERAND (t, 0); if (TREE_CODE (t) == MODIFY_EXPR) t = TREE_OPERAND (t, 1); return TREE_CODE (t) == CALL_EXPR; } /* Return true if BB ends with a conditional branch. Return false, otherwise. */ static bool tree_block_ends_with_condjump_p (basic_block bb) { tree stmt = tsi_stmt (bsi_last (bb).tsi); return (TREE_CODE (stmt) == COND_EXPR); } /* Return true if we need to add fake edge to exit at statement T. Helper function for tree_flow_call_edges_add. */ static bool need_fake_tree_edge_p (tree t) { if (TREE_CODE (t) == RETURN_EXPR && TREE_OPERAND (t, 0)) t = TREE_OPERAND (t, 0); if (TREE_CODE (t) == MODIFY_EXPR) t = TREE_OPERAND (t, 1); /* NORETURN and LONGJMP calls already have an edge to exit. CONST, PURE and ALWAYS_RETURN calls do not need one. We don't currently check for CONST and PURE here, although it would be a good idea, because those attributes are figured out from the RTL in mark_constant_function, and the counter incrementation code from -fprofile-arcs leads to different results from -fbranch-probabilities. */ if (TREE_CODE (t) == CALL_EXPR && !(call_expr_flags (t) & (ECF_NORETURN | ECF_LONGJMP | ECF_ALWAYS_RETURN))) return true; if (TREE_CODE (t) == ASM_EXPR && (ASM_VOLATILE_P (t) || ASM_INPUT_P (t))) return true; return false; } /* Add fake edges to the function exit for any non constant and non noreturn calls, volatile inline assembly in the bitmap of blocks specified by BLOCKS or to the whole CFG if BLOCKS is zero. Return the number of blocks that were split. The goal is to expose cases in which entering a basic block does not imply that all subsequent instructions must be executed. */ static int tree_flow_call_edges_add (sbitmap blocks) { int i; int blocks_split = 0; int last_bb = last_basic_block; bool check_last_block = false; if (n_basic_blocks == 0) return 0; if (! blocks) check_last_block = true; else check_last_block = TEST_BIT (blocks, EXIT_BLOCK_PTR->prev_bb->index); /* In the last basic block, before epilogue generation, there will be a fallthru edge to EXIT. Special care is required if the last insn of the last basic block is a call because make_edge folds duplicate edges, which would result in the fallthru edge also being marked fake, which would result in the fallthru edge being removed by remove_fake_edges, which would result in an invalid CFG. Moreover, we can't elide the outgoing fake edge, since the block profiler needs to take this into account in order to solve the minimal spanning tree in the case that the call doesn't return. Handle this by adding a dummy instruction in a new last basic block. */ if (check_last_block) { basic_block bb = EXIT_BLOCK_PTR->prev_bb; block_stmt_iterator bsi = bsi_last (bb); tree t = NULL_TREE; if (!bsi_end_p (bsi)) t = bsi_stmt (bsi); if (need_fake_tree_edge_p (t)) { edge e; for (e = bb->succ; e; e = e->succ_next) if (e->dest == EXIT_BLOCK_PTR) { bsi_insert_on_edge (e, build_empty_stmt ()); bsi_commit_edge_inserts ((int *)NULL); break; } } } /* Now add fake edges to the function exit for any non constant calls since there is no way that we can determine if they will return or not... */ for (i = 0; i < last_bb; i++) { basic_block bb = BASIC_BLOCK (i); block_stmt_iterator bsi; tree stmt, last_stmt; if (!bb) continue; if (blocks && !TEST_BIT (blocks, i)) continue; bsi = bsi_last (bb); if (!bsi_end_p (bsi)) { last_stmt = bsi_stmt (bsi); do { stmt = bsi_stmt (bsi); if (need_fake_tree_edge_p (stmt)) { edge e; /* The handling above of the final block before the epilogue should be enough to verify that there is no edge to the exit block in CFG already. Calling make_edge in such case would cause us to mark that edge as fake and remove it later. */ #ifdef ENABLE_CHECKING if (stmt == last_stmt) for (e = bb->succ; e; e = e->succ_next) if (e->dest == EXIT_BLOCK_PTR) abort (); #endif /* Note that the following may create a new basic block and renumber the existing basic blocks. */ if (stmt != last_stmt) { e = split_block (bb, stmt); if (e) blocks_split++; } make_edge (bb, EXIT_BLOCK_PTR, EDGE_FAKE); } bsi_prev (&bsi); } while (!bsi_end_p (bsi)); } } if (blocks_split) verify_flow_info (); return blocks_split; } bool tree_purge_dead_eh_edges (basic_block bb) { bool changed = false; edge e, next; tree stmt = last_stmt (bb); if (stmt && tree_can_throw_internal (stmt)) return false; for (e = bb->succ; e ; e = next) { next = e->succ_next; if (e->flags & EDGE_EH) { ssa_remove_edge (e); changed = true; } } return changed; } bool tree_purge_all_dead_eh_edges (bitmap blocks) { bool changed = false; size_t i; EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i, { changed |= tree_purge_dead_eh_edges (BASIC_BLOCK (i)); }); return changed; } struct cfg_hooks tree_cfg_hooks = { "tree", tree_verify_flow_info, tree_dump_bb, /* dump_bb */ create_bb, /* create_basic_block */ tree_redirect_edge_and_branch,/* redirect_edge_and_branch */ tree_redirect_edge_and_branch_force,/* redirect_edge_and_branch_force */ remove_bb, /* delete_basic_block */ tree_split_block, /* split_block */ tree_move_block_after, /* move_block_after */ tree_can_merge_blocks_p, /* can_merge_blocks_p */ tree_merge_blocks, /* merge_blocks */ tree_predict_edge, /* predict_edge */ tree_predicted_by_p, /* predicted_by_p */ tree_can_duplicate_bb_p, /* can_duplicate_block_p */ tree_duplicate_bb, /* duplicate_block */ tree_split_edge, /* split_edge */ tree_make_forwarder_block, /* make_forward_block */ NULL, /* tidy_fallthru_edge */ tree_block_ends_with_call_p, /* block_ends_with_call_p */ tree_block_ends_with_condjump_p, /* block_ends_with_condjump_p */ tree_flow_call_edges_add /* flow_call_edges_add */ }; /* Split all critical edges. */ static void split_critical_edges (void) { basic_block bb; edge e; FOR_ALL_BB (bb) { for (e = bb->succ; e ; e = e->succ_next) if (EDGE_CRITICAL_P (e) && !(e->flags & EDGE_ABNORMAL)) { split_edge (e); } } } struct tree_opt_pass pass_split_crit_edges = { "crited", /* name */ NULL, /* gate */ split_critical_edges, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ TV_TREE_SPLIT_EDGES, /* tv_id */ PROP_cfg, /* properties required */ PROP_no_crit_edges, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func, /* todo_flags_finish */ }; /* Emit return warnings. */ static void execute_warn_function_return (void) { #ifdef USE_MAPPED_LOCATION source_location location; #else location_t *locus; #endif tree last; edge e; if (warn_missing_noreturn && !TREE_THIS_VOLATILE (cfun->decl) && EXIT_BLOCK_PTR->pred == NULL && !lang_hooks.function.missing_noreturn_ok_p (cfun->decl)) warning ("%Jfunction might be possible candidate for attribute `noreturn'", cfun->decl); /* If we have a path to EXIT, then we do return. */ if (TREE_THIS_VOLATILE (cfun->decl) && EXIT_BLOCK_PTR->pred != NULL) { #ifdef USE_MAPPED_LOCATION location = UNKNOWN_LOCATION; #else locus = NULL; #endif for (e = EXIT_BLOCK_PTR->pred; e ; e = e->pred_next) { last = last_stmt (e->src); if (TREE_CODE (last) == RETURN_EXPR #ifdef USE_MAPPED_LOCATION && (location = EXPR_LOCATION (last)) != UNKNOWN_LOCATION) #else && (locus = EXPR_LOCUS (last)) != NULL) #endif break; } #ifdef USE_MAPPED_LOCATION if (location == UNKNOWN_LOCATION) location = cfun->function_end_locus; warning ("%H`noreturn' function does return", &location); #else if (!locus) locus = &cfun->function_end_locus; warning ("%H`noreturn' function does return", locus); #endif } /* If we see "return;" in some basic block, then we do reach the end without returning a value. */ else if (warn_return_type && EXIT_BLOCK_PTR->pred != NULL && !VOID_TYPE_P (TREE_TYPE (TREE_TYPE (cfun->decl)))) { for (e = EXIT_BLOCK_PTR->pred; e ; e = e->pred_next) { tree last = last_stmt (e->src); if (TREE_CODE (last) == RETURN_EXPR && TREE_OPERAND (last, 0) == NULL) { #ifdef USE_MAPPED_LOCATION location = EXPR_LOCATION (last); if (location == UNKNOWN_LOCATION) location = cfun->function_end_locus; warning ("%Hcontrol reaches end of non-void function", &location); #else locus = EXPR_LOCUS (last); if (!locus) locus = &cfun->function_end_locus; warning ("%Hcontrol reaches end of non-void function", locus); #endif break; } } } } /* Given a basic block B which ends with a conditional and has precisely two successors, determine which of the edges is taken if the conditional is true and which is taken if the conditional is false. Set TRUE_EDGE and FALSE_EDGE appropriately. */ void extract_true_false_edges_from_block (basic_block b, edge *true_edge, edge *false_edge) { edge e = b->succ; if (e->flags & EDGE_TRUE_VALUE) { *true_edge = e; *false_edge = e->succ_next; } else { *false_edge = e; *true_edge = e->succ_next; } } struct tree_opt_pass pass_warn_function_return = { NULL, /* name */ NULL, /* gate */ execute_warn_function_return, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ 0, /* tv_id */ PROP_cfg, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0 /* todo_flags_finish */ }; /* Type information for tree-cfg.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_tree_cfg_h[] = { { &label_to_block_map, 1, sizeof (label_to_block_map), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, LAST_GGC_ROOT_TAB }; /* Data flow functions for trees. Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Diego Novillo This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Tree based points-to analysis Copyright (C) 2002, 2003 Free Software Foundation, Inc. Contributed by Daniel Berlin This file is part of GCC. GCC is free software; you can redistribute it and/or modify under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef TREE_ALIAS_COMMON #define TREE_ALIAS_COMMON /* Tree based linear points-to analysis Copyright (C) 2002, 2003 Free Software Foundation, Inc. Contributed by Daniel Berlin This file is part of GCC. GCC is free software; you can redistribute it and/or modify under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef TREE_ALIAS_TYPE_H #define TREE_ALIAS_TYPE_H union alias_var_def; struct aterm_; struct aterm_list_a; enum alias_var_kind { ATERM_AVAR }; struct alias_var_common GTY (()) { enum alias_var_kind kind; unsigned int varnum; tree decl; }; struct alias_var_aterm GTY (()) { struct alias_var_common common; struct aterm_ * GTY((skip (""))) term; struct aterm_list_a *GTY ((skip (""))) ptset; }; union alias_var_def GTY ((desc ("%0.common.kind"))) { struct alias_var_common GTY ((tag ("-1"))) common; struct alias_var_aterm GTY ((tag ("ATERM_AVAR"))) aterm; }; typedef union alias_var_def *alias_var; #define ALIAS_VAR_KIND(x) ((x)->common.kind) #define ALIAS_VAR_VARNUM(x) ((x)->common.varnum) #define ALIAS_VAR_DECL(x) ((x)->common.decl) #define ALIAS_VAR_ATERM(x) ((x)->aterm.term) #define ALIAS_VAR_PTSET(x) ((x)->aterm.ptset) union alias_type_def; typedef union alias_type_def *alias_type; alias_var alias_var_new_with_aterm (tree, struct aterm_ *); #endif /* TREE_ALIAS_TYPE_H */ /* Alias analysis function pointers. Functions implemented by the actual alias analysis algorithms in order for them to work with the common points-to structure. */ struct tree_alias_ops { /* Initialization. Called right before we start using the other functions. */ void (*init) (struct tree_alias_ops *); /* Cleanup. Called when we are finished with the alias analyzer. */ void (*cleanup) (struct tree_alias_ops *); /* Add variable. Called when we want to inform the alias analyzer about a new variable we've found. */ alias_var (*add_var) (struct tree_alias_ops *, tree); /* Add variable equivalent to existing one. Called when we want to inform the alias analyzer about a new variable that has the same points-to set as an existing variable. */ alias_var (*add_var_same) (struct tree_alias_ops *, tree, alias_var); /* Process a simple assignment (a = b). Called to process simple assignment statements of the form a = b, where a and b are both variables. */ void (*simple_assign) (struct tree_alias_ops *, alias_var, alias_var); /* Process an address assignment (a = &b). Called to process address assignment statements of the form a = &b, where a and b are both variables. */ void (*addr_assign) (struct tree_alias_ops *, alias_var, alias_var); /* Process a pointer assignment (a = *b). Called to process pointer assignment statements of the form a = *b, where a and b are both variables. */ void (*ptr_assign) (struct tree_alias_ops *, alias_var, alias_var); /* Process an operator assignment (a = op (...)) Called to process operators of the form a = op(...), where a is a variable. */ void (*op_assign) (struct tree_alias_ops *, alias_var, varray_type, tree, bitmap); /* Process a heap assignment (a = alloc (...)) Called to process a heap assignment of the form a = alloc (...), where a is a variable, and *alloc is a function that returns new memory. */ void (*heap_assign) (struct tree_alias_ops *, alias_var); /* Process an assignment to a pointer (*a = b) Called to process assignment to pointer statements of the form *a = b, where a and b are both variables. */ void (*assign_ptr) (struct tree_alias_ops *, alias_var, alias_var); /* Process a function definition. Called to inform the alias analyzer about a new function definition. */ void (*function_def) (struct tree_alias_ops *, alias_var, varray_type, alias_var); /* Process a function call. Return 1 if we need to assume conservative side-effects. */ int (*function_call) (struct tree_alias_ops *, alias_var, alias_var, varray_type, bitmap); /* Determine if two alias variables may alias. */ bool (*may_alias) (struct tree_alias_ops *, alias_var, alias_var); /* Determine if two alias variables have the same points-to set. */ bool (*same_points_to_set) (struct tree_alias_ops *, alias_var, alias_var); /* Determine if the alias variable has an empty points-to set. */ bool (*empty_points_to_set) (struct tree_alias_ops *, alias_var); /* Private data. */ void *data; /* Interprocedural. */ unsigned int ip:1; /* Can do conservative interprocedural analysis if we save the * info. */ unsigned int ip_partial:1; }; extern struct tree_alias_ops *current_alias_ops; extern void init_alias_vars (void); extern bool ptr_may_alias_var (tree, tree); extern bool same_points_to_set (tree, tree); extern bool empty_points_to_set (tree); extern const char *alias_get_name (tree); #endif /* TREE_ALIAS_COMMON */ /* Definition of functions in convert.c. Copyright (C) 1993, 2000, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_CONVERT_H #define GCC_CONVERT_H extern tree convert_to_integer (tree, tree); extern tree convert_to_pointer (tree, tree); extern tree convert_to_real (tree, tree); extern tree convert_to_complex (tree, tree); extern tree convert_to_vector (tree, tree); #endif /* GCC_CONVERT_H */ /* params.h - Run-time parameters. Copyright (C) 2001, 2003, 2004 Free Software Foundation, Inc. Written by Mark Mitchell . This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This module provides a means for setting integral parameters dynamically. Instead of encoding magic numbers in various places, use this module to organize all the magic numbers in a single place. The values of the parameters can be set on the command-line, thereby providing a way to control the amount of effort spent on particular optimization passes, or otherwise tune the behavior of the compiler. Since their values can be set on the command-line, these parameters should not be used for non-dynamic memory allocation. */ #ifndef GCC_PARAMS_H #define GCC_PARAMS_H /* No parameter shall have this value. */ #define INVALID_PARAM_VAL (-1) /* The information associated with each parameter. */ typedef struct param_info { /* The name used with the `--param =' switch to set this value. */ const char *const option; /* The associated value. */ int value; /* A short description of the option. */ const char *const help; } param_info; /* An array containing the compiler parameters and their current values. */ extern param_info *compiler_params; /* Add the N PARAMS to the current list of compiler parameters. */ extern void add_params (const param_info params[], size_t n); /* Set the VALUE associated with the parameter given by NAME. */ extern void set_param_value (const char *name, int value); /* The parameters in use by language-independent code. */ typedef enum compiler_param { #define DEFPARAM(enumerator, option, msgid, default) \ enumerator, /* params.def - Run-time parameters. Copyright (C) 2001, 2002, 2004 Free Software Foundation, Inc. Written by Mark Mitchell . This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file contains definitions for language-independent parameters. The DEFPARAM macro takes 4 arguments: - The enumeral corresponding to this parameter. - The name that can be used to set this parameter using the command-line option `--param ='. - A help string explaining how the parameter is used. - A default value for the parameter. Be sure to add an entry to invoke.texi summarizing the parameter. */ /* The single function inlining limit. This is the maximum size of a function counted in internal gcc instructions (not in real machine instructions) that is eligible for inlining by the tree inliner. The default value is 500. Only functions marked inline (or methods defined in the class definition for C++) are affected by this, unless you set the -finline-functions (included in -O3) compiler option. There are more restrictions to inlining: If inlined functions call other functions, the already inlined instructions are counted and once the recursive inline limit (see "max-inline-insns" parameter) is exceeded, the acceptable size gets decreased. */ DEFPARAM (PARAM_MAX_INLINE_INSNS_SINGLE, "max-inline-insns-single", "The maximum number of instructions in a single function eligible for inlining", 500) /* The single function inlining limit for functions that are inlined by virtue of -finline-functions (-O3). This limit should be chosen to be below or equal to the limit that is applied to functions marked inlined (or defined in the class declaration in C++) given by the "max-inline-insns-single" parameter. The default value is 150. */ DEFPARAM (PARAM_MAX_INLINE_INSNS_AUTO, "max-inline-insns-auto", "The maximum number of instructions when automatically inlining", 120) DEFPARAM (PARAM_MAX_INLINE_INSNS_RECURSIVE, "max-inline-insns-recursive", "The maximum number of instructions inline function can grow to via recursive inlining", 500) DEFPARAM (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO, "max-inline-insns-recursive-auto", "The maximum number of instructions non-inline function can grow to via recursive inlining", 500) DEFPARAM (PARAM_MAX_INLINE_RECURSIVE_DEPTH, "max-inline-recursive-depth", "The maximum depth of recursive inlining for inline functions", 8) DEFPARAM (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO, "max-inline-recursive-depth-auto", "The maximum depth of recursive inlining for non-inline functions", 8) /* For languages that (still) use the RTL inliner, we can specify limits for the RTL inliner separately. The parameter here defines the maximum number of RTL instructions a function may have to be eligible for inlining in the RTL inliner. The default value is 600. */ DEFPARAM (PARAM_MAX_INLINE_INSNS_RTL, "max-inline-insns-rtl", "The maximum number of instructions for the RTL inliner", 600) /* The maximum number of instructions to consider when looking for an instruction to fill a delay slot. If more than this arbitrary number of instructions is searched, the time savings from filling the delay slot will be minimal so stop searching. Increasing values mean more aggressive optimization, making the compile time increase with probably small improvement in executable run time. */ DEFPARAM (PARAM_MAX_DELAY_SLOT_INSN_SEARCH, "max-delay-slot-insn-search", "The maximum number of instructions to consider to fill a delay slot", 100) /* When trying to fill delay slots, the maximum number of instructions to consider when searching for a block with valid live register information. Increasing this arbitrarily chosen value means more aggressive optimization, increasing the compile time. This parameter should be removed when the delay slot code is rewritten to maintain the control-flow graph. */ DEFPARAM(PARAM_MAX_DELAY_SLOT_LIVE_SEARCH, "max-delay-slot-live-search", "The maximum number of instructions to consider to find accurate live register information", 333) /* This parameter limits the number of branch elements that the scheduler will track anti-dependencies through without resetting the tracking mechanism. Large functions with few calls or barriers can generate lists containing many 1000's of dependencies. Generally the compiler either uses all available memory, or runs for far too long. */ DEFPARAM(PARAM_MAX_PENDING_LIST_LENGTH, "max-pending-list-length", "The maximum length of scheduling's pending operations list", 32) DEFPARAM(PARAM_LARGE_FUNCTION_INSNS, "large-function-insns", "The size of function body to be considered large", 3000) DEFPARAM(PARAM_LARGE_FUNCTION_GROWTH, "large-function-growth", "Maximal growth due to inlining of large function (in percent)", 100) DEFPARAM(PARAM_INLINE_UNIT_GROWTH, "inline-unit-growth", "how much can given compilation unit grow because of the inlining (in percent)", 50) /* The GCSE optimization will be disabled if it would require significantly more memory than this value. */ DEFPARAM(PARAM_MAX_GCSE_MEMORY, "max-gcse-memory", "The maximum amount of memory to be allocated by GCSE", 50 * 1024 * 1024) /* The number of repetitions of copy/const prop and PRE to run. */ DEFPARAM(PARAM_MAX_GCSE_PASSES, "max-gcse-passes", "The maximum number of passes to make when doing GCSE", 1) /* This is the threshold ratio when to perform partial redundancy elimination after reload. We perform partial redundancy elimination when the following holds: (Redundant load execution count) ------------------------------- >= GCSE_AFTER_RELOAD_PARTIAL_FRACTION (Added loads execution count) */ DEFPARAM(PARAM_GCSE_AFTER_RELOAD_PARTIAL_FRACTION, "gcse-after-reload-partial-fraction", "The threshold ratio for performing partial redundancy elimination \ after reload.", 3) /* This is the threshold ratio of the critical edges execution count compared to the redundant loads execution count that permits performing the load redundancy elimination in gcse after reload. */ DEFPARAM(PARAM_GCSE_AFTER_RELOAD_CRITICAL_FRACTION, "gcse-after-reload-critical-fraction", "The threshold ratio of critical edges execution count that permit \ performing redundancy elimination after reload.", 10) /* This parameter limits the number of insns in a loop that will be unrolled, and by how much the loop is unrolled. This limit should be at most half of the peeling limits: loop unroller decides to not unroll loops that iterate fewer than 2*number of allowed unrollings and thus we would have loops that are neither peeled or unrolled otherwise. */ DEFPARAM(PARAM_MAX_UNROLLED_INSNS, "max-unrolled-insns", "The maximum number of instructions to consider to unroll in a loop", 200) /* This parameter limits how many times the loop is unrolled depending on number of insns really executed in each iteration. */ DEFPARAM(PARAM_MAX_AVERAGE_UNROLLED_INSNS, "max-average-unrolled-insns", "The maximum number of instructions to consider to unroll in a loop on average", 80) /* The maximum number of unrollings of a single loop. */ DEFPARAM(PARAM_MAX_UNROLL_TIMES, "max-unroll-times", "The maximum number of unrollings of a single loop", 8) /* The maximum number of insns of a peeled loop. */ DEFPARAM(PARAM_MAX_PEELED_INSNS, "max-peeled-insns", "The maximum number of insns of a peeled loop", 400) /* The maximum number of peelings of a single loop. */ DEFPARAM(PARAM_MAX_PEEL_TIMES, "max-peel-times", "The maximum number of peelings of a single loop", 16) /* The maximum number of insns of a peeled loop. */ DEFPARAM(PARAM_MAX_COMPLETELY_PEELED_INSNS, "max-completely-peeled-insns", "The maximum number of insns of a completely peeled loop", 400) /* The maximum number of peelings of a single loop that is peeled completely. */ DEFPARAM(PARAM_MAX_COMPLETELY_PEEL_TIMES, "max-completely-peel-times", "The maximum number of peelings of a single loop that is peeled completely", 16) /* The maximum number of insns of a peeled loop that rolls only once. */ DEFPARAM(PARAM_MAX_ONCE_PEELED_INSNS, "max-once-peeled-insns", "The maximum number of insns of a peeled loop that rolls only once", 400) /* The maximum number of insns of an unswitched loop. */ DEFPARAM(PARAM_MAX_UNSWITCH_INSNS, "max-unswitch-insns", "The maximum number of insns of an unswitched loop", 50) /* The maximum level of recursion in unswitch_single_loop. */ DEFPARAM(PARAM_MAX_UNSWITCH_LEVEL, "max-unswitch-level", "The maximum number of unswitchings in a single loop", 3) DEFPARAM(PARAM_MAX_SMS_LOOP_NUMBER, "max-sms-loop-number", "Maximum number of loops to perform swing modulo scheduling on \ (mainly for debugging)", -1) /* This parameter is used to tune SMS MAX II calculations. */ DEFPARAM(PARAM_SMS_MAX_II_FACTOR, "sms-max-ii-factor", "A factor for tuning the upper bound that swing modulo scheduler uses \ for scheduling a loop", 100) DEFPARAM(PARAM_SMS_DFA_HISTORY, "sms-dfa-history", "The number of cycles the swing modulo scheduler considers when \ checking conflicts using DFA", 0) DEFPARAM(PARAM_SMS_LOOP_AVERAGE_COUNT_THRESHOLD, "sms-loop-average-count-threshold", "A threshold on the average loop count considered by the swing modulo \ scheduler", 0) DEFPARAM(HOT_BB_COUNT_FRACTION, "hot-bb-count-fraction", "Select fraction of the maximal count of repetitions of basic block in \ program given basic block needs to have to be considered hot", 10000) DEFPARAM(HOT_BB_FREQUENCY_FRACTION, "hot-bb-frequency-fraction", "Select fraction of the maximal frequency of executions of basic \ block in function given basic block needs to have to be considered hot", 1000) DEFPARAM(TRACER_DYNAMIC_COVERAGE_FEEDBACK, "tracer-dynamic-coverage-feedback", "The percentage of function, weighted by execution frequency, that \ must be covered by trace formation. Used when profile feedback is available", 95) DEFPARAM(TRACER_DYNAMIC_COVERAGE, "tracer-dynamic-coverage", "The percentage of function, weighted by execution frequency, that \ must be covered by trace formation. Used when profile feedback is not available", 75) DEFPARAM(TRACER_MAX_CODE_GROWTH, "tracer-max-code-growth", "Maximal code growth caused by tail duplication (in percent)", 100) DEFPARAM(TRACER_MIN_BRANCH_RATIO, "tracer-min-branch-ratio", "Stop reverse growth if the reverse probability of best edge is less \ than this threshold (in percent)", 10) DEFPARAM(TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK, "tracer-min-branch-probability-feedback", "Stop forward growth if the probability of best edge is less than \ this threshold (in percent). Used when profile feedback is available", 80) DEFPARAM(TRACER_MIN_BRANCH_PROBABILITY, "tracer-min-branch-probability", "Stop forward growth if the probability of best edge is less than \ this threshold (in percent). Used when profile feedback is not available", 50) /* The maximum number of incoming edges to consider for crossjumping. */ DEFPARAM(PARAM_MAX_CROSSJUMP_EDGES, "max-crossjump-edges", "The maximum number of incoming edges to consider for crossjumping", 100) /* The maximum length of path considered in cse. */ DEFPARAM(PARAM_MAX_CSE_PATH_LENGTH, "max-cse-path-length", "The maximum length of path considered in cse", 10) /* The product of the next two is used to decide whether or not to use .GLOBAL_VAR. See tree-dfa.c. */ DEFPARAM(PARAM_GLOBAL_VAR_THRESHOLD, "global-var-threshold", "Given N calls and V call-clobbered vars in a function. Use .GLOBAL_VAR if NxV is larger than this limit", 500000) DEFPARAM(PARAM_MAX_CSELIB_MEMORY_LOCATIONS, "max-cselib-memory-locations", "The maximum memory locations recorded by cselib", 500) #ifdef ENABLE_GC_ALWAYS_COLLECT # define GGC_MIN_EXPAND_DEFAULT 0 # define GGC_MIN_HEAPSIZE_DEFAULT 0 #else # define GGC_MIN_EXPAND_DEFAULT 30 # define GGC_MIN_HEAPSIZE_DEFAULT 4096 #endif DEFPARAM(GGC_MIN_EXPAND, "ggc-min-expand", "Minimum heap expansion to trigger garbage collection, as \ a percentage of the total size of the heap", GGC_MIN_EXPAND_DEFAULT) DEFPARAM(GGC_MIN_HEAPSIZE, "ggc-min-heapsize", "Minimum heap size before we start collecting garbage, in kilobytes", GGC_MIN_HEAPSIZE_DEFAULT) #undef GGC_MIN_EXPAND_DEFAULT #undef GGC_MIN_HEAPSIZE_DEFAULT DEFPARAM(PARAM_MAX_RELOAD_SEARCH_INSNS, "max-reload-search-insns", "The maximum number of instructions to search backward when looking for equivalent reload", 100) DEFPARAM(PARAM_MAX_ALIASED_VOPS, "max-aliased-vops", "The maximum number of virtual operands allowed to represent aliases before triggering alias grouping.", 500) DEFPARAM(PARAM_MAX_SCHED_REGION_BLOCKS, "max-sched-region-blocks", "The maximum number of blocks in a region to be considered for interblock scheduling", 10) DEFPARAM(PARAM_MAX_SCHED_REGION_INSNS, "max-sched-region-insns", "The maximum number of insns in a region to be considered for interblock scheduling", 100) /* Local variables: mode:c End: */ #undef DEFPARAM LAST_PARAM } compiler_param; /* The value of the parameter given by ENUM. */ #define PARAM_VALUE(ENUM) \ (compiler_params[(int) ENUM].value) /* Macros for the various parameters. */ #define MAX_INLINE_INSNS_SINGLE \ PARAM_VALUE (PARAM_MAX_INLINE_INSNS_SINGLE) #define MAX_INLINE_INSNS \ PARAM_VALUE (PARAM_MAX_INLINE_INSNS) #define MAX_INLINE_SLOPE \ PARAM_VALUE (PARAM_MAX_INLINE_SLOPE) #define MIN_INLINE_INSNS \ PARAM_VALUE (PARAM_MIN_INLINE_INSNS) #define MAX_INLINE_INSNS_AUTO \ PARAM_VALUE (PARAM_MAX_INLINE_INSNS_AUTO) #define MAX_INLINE_INSNS_RTL \ PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RTL) #define MAX_DELAY_SLOT_INSN_SEARCH \ PARAM_VALUE (PARAM_MAX_DELAY_SLOT_INSN_SEARCH) #define MAX_DELAY_SLOT_LIVE_SEARCH \ PARAM_VALUE (PARAM_MAX_DELAY_SLOT_LIVE_SEARCH) #define MAX_PENDING_LIST_LENGTH \ PARAM_VALUE (PARAM_MAX_PENDING_LIST_LENGTH) #define MAX_GCSE_MEMORY \ ((size_t) PARAM_VALUE (PARAM_MAX_GCSE_MEMORY)) #define MAX_GCSE_PASSES \ PARAM_VALUE (PARAM_MAX_GCSE_PASSES) #define GCSE_AFTER_RELOAD_PARTIAL_FRACTION \ PARAM_VALUE (PARAM_GCSE_AFTER_RELOAD_PARTIAL_FRACTION) #define GCSE_AFTER_RELOAD_CRITICAL_FRACTION \ PARAM_VALUE (PARAM_GCSE_AFTER_RELOAD_CRITICAL_FRACTION) #define MAX_UNROLLED_INSNS \ PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) #define MAX_SMS_LOOP_NUMBER \ PARAM_VALUE (PARAM_MAX_SMS_LOOP_NUMBER) #define SMS_MAX_II_FACTOR \ PARAM_VALUE (PARAM_SMS_MAX_II_FACTOR) #define SMS_DFA_HISTORY \ PARAM_VALUE (PARAM_SMS_DFA_HISTORY) #define SMS_LOOP_AVERAGE_COUNT_THRESHOLD \ PARAM_VALUE (PARAM_SMS_LOOP_AVERAGE_COUNT_THRESHOLD) #define GLOBAL_VAR_THRESHOLD \ PARAM_VALUE (PARAM_GLOBAL_VAR_THRESHOLD) #define MAX_ALIASED_VOPS \ PARAM_VALUE (PARAM_MAX_ALIASED_VOPS) #endif /* ! GCC_PARAMS_H */ /* Build and maintain data flow information for trees. */ /* Counters used to display DFA and SSA statistics. */ struct dfa_stats_d { long num_stmt_anns; long num_var_anns; long num_defs; long num_uses; long num_phis; long num_phi_args; int max_num_phi_args; long num_v_may_defs; long num_vuses; long num_v_must_defs; }; /* State information for find_vars_r. */ struct walk_state { /* Hash table used to avoid adding the same variable more than once. */ htab_t vars_found; }; /* Local functions. */ static void collect_dfa_stats (struct dfa_stats_d *); static tree collect_dfa_stats_r (tree *, int *, void *); static void add_immediate_use (tree, tree); static tree find_vars_r (tree *, int *, void *); static void add_referenced_var (tree, struct walk_state *); static void compute_immediate_uses_for_phi (tree, bool (*)(tree)); static void compute_immediate_uses_for_stmt (tree, int, bool (*)(tree)); static void find_hidden_use_vars (tree); static tree find_hidden_use_vars_r (tree *, int *, void *); /* Global declarations. */ /* Array of all variables referenced in the function. */ varray_type referenced_vars; /*--------------------------------------------------------------------------- Dataflow analysis (DFA) routines ---------------------------------------------------------------------------*/ /* Find all the variables referenced in the function. This function builds the global arrays REFERENCED_VARS and CALL_CLOBBERED_VARS. Note that this function does not look for statement operands, it simply determines what variables are referenced in the program and detects various attributes for each variable used by alias analysis and the optimizer. */ static void find_referenced_vars (void) { htab_t vars_found; basic_block bb; block_stmt_iterator si; struct walk_state walk_state; tree block; /* Walk the lexical blocks in the function looking for variables that may have been used to declare VLAs and for nested functions. Both constructs create hidden uses of variables. Note that at this point we may have multiple blocks hung off DECL_INITIAL chained through the BLOCK_CHAIN field due to how inlining works. Egad. */ block = DECL_INITIAL (current_function_decl); while (block) { find_hidden_use_vars (block); block = BLOCK_CHAIN (block); } vars_found = htab_create (50, htab_hash_pointer, htab_eq_pointer, NULL); memset (&walk_state, 0, sizeof (walk_state)); walk_state.vars_found = vars_found; FOR_EACH_BB (bb) for (si = bsi_start (bb); !bsi_end_p (si); bsi_next (&si)) { tree *stmt_p = bsi_stmt_ptr (si); walk_tree (stmt_p, find_vars_r, &walk_state, NULL); } htab_delete (vars_found); } struct tree_opt_pass pass_referenced_vars = { NULL, /* name */ NULL, /* gate */ find_referenced_vars, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ 0, /* tv_id */ PROP_gimple_leh | PROP_cfg, /* properties_required */ PROP_referenced_vars, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0, /* todo_flags_finish */ }; /* Compute immediate uses. CALC_FOR is an optional function pointer which indicates whether immediate uses information should be calculated for a given SSA variable. If NULL, then information is computed for all variables. FLAGS is one of {TDFA_USE_OPS, TDFA_USE_VOPS}. It is used by compute_immediate_uses_for_stmt to determine whether to look at virtual and/or real operands while computing def-use chains. */ void compute_immediate_uses (int flags, bool (*calc_for)(tree)) { basic_block bb; block_stmt_iterator si; FOR_EACH_BB (bb) { tree phi; for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi)) compute_immediate_uses_for_phi (phi, calc_for); for (si = bsi_start (bb); !bsi_end_p (si); bsi_next (&si)) { tree stmt = bsi_stmt (si); get_stmt_operands (stmt); compute_immediate_uses_for_stmt (stmt, flags, calc_for); } } } /* Invalidates dataflow information for a statement STMT. */ static void free_df_for_stmt (tree stmt) { stmt_ann_t ann = stmt_ann (stmt); if (ann && ann->df) { /* If we have a varray of immediate uses, then go ahead and release it for re-use. */ if (ann->df->immediate_uses) ggc_free (ann->df->immediate_uses); /* Similarly for the main dataflow structure. */ ggc_free (ann->df); ann->df = NULL; } } /* Invalidate dataflow information for the whole function. */ void free_df (void) { basic_block bb; block_stmt_iterator si; FOR_EACH_BB (bb) { tree phi; for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi)) free_df_for_stmt (phi); for (si = bsi_start (bb); !bsi_end_p (si); bsi_next (&si)) { tree stmt = bsi_stmt (si); free_df_for_stmt (stmt); } } } /* Helper for compute_immediate_uses. Check all the USE and/or VUSE operands in phi node PHI and add a def-use edge between their defining statement and PHI. CALC_FOR is as in compute_immediate_uses. PHI nodes are easy, we only need to look at their arguments. */ static void compute_immediate_uses_for_phi (tree phi, bool (*calc_for)(tree)) { int i; #ifdef ENABLE_CHECKING if (TREE_CODE (phi) != PHI_NODE) abort (); #endif for (i = 0; i < PHI_NUM_ARGS (phi); i++) { tree arg = PHI_ARG_DEF (phi, i); if (TREE_CODE (arg) == SSA_NAME && (!calc_for || calc_for (arg))) { tree imm_rdef_stmt = SSA_NAME_DEF_STMT (PHI_ARG_DEF (phi, i)); if (!IS_EMPTY_STMT (imm_rdef_stmt)) add_immediate_use (imm_rdef_stmt, phi); } } } /* Another helper for compute_immediate_uses. Depending on the value of FLAGS, check all the USE and/or VUSE operands in STMT and add a def-use edge between their defining statement and STMT. CALC_FOR is as in compute_immediate_uses. */ static void compute_immediate_uses_for_stmt (tree stmt, int flags, bool (*calc_for)(tree)) { size_t i; use_optype uses; vuse_optype vuses; v_may_def_optype v_may_defs; stmt_ann_t ann; #ifdef ENABLE_CHECKING /* PHI nodes are handled elsewhere. */ if (TREE_CODE (stmt) == PHI_NODE) abort (); #endif /* Look at USE_OPS or VUSE_OPS according to FLAGS. */ ann = stmt_ann (stmt); if (flags & TDFA_USE_OPS) { uses = USE_OPS (ann); for (i = 0; i < NUM_USES (uses); i++) { tree use = USE_OP (uses, i); tree imm_stmt = SSA_NAME_DEF_STMT (use); if (!IS_EMPTY_STMT (imm_stmt) && (!calc_for || calc_for (use))) add_immediate_use (imm_stmt, stmt); } } if (flags & TDFA_USE_VOPS) { vuses = VUSE_OPS (ann); for (i = 0; i < NUM_VUSES (vuses); i++) { tree vuse = VUSE_OP (vuses, i); tree imm_rdef_stmt = SSA_NAME_DEF_STMT (vuse); if (!IS_EMPTY_STMT (imm_rdef_stmt) && (!calc_for || calc_for (vuse))) add_immediate_use (imm_rdef_stmt, stmt); } v_may_defs = V_MAY_DEF_OPS (ann); for (i = 0; i < NUM_V_MAY_DEFS (v_may_defs); i++) { tree vuse = V_MAY_DEF_OP (v_may_defs, i); tree imm_rdef_stmt = SSA_NAME_DEF_STMT (vuse); if (!IS_EMPTY_STMT (imm_rdef_stmt) && (!calc_for || calc_for (vuse))) add_immediate_use (imm_rdef_stmt, stmt); } } } /* Add statement USE_STMT to the list of statements that use definitions made by STMT. */ static void add_immediate_use (tree stmt, tree use_stmt) { stmt_ann_t ann = get_stmt_ann (stmt); struct dataflow_d *df; df = ann->df; if (df == NULL) { df = ann->df = ggc_alloc (sizeof (struct dataflow_d)); memset ((void *) df, 0, sizeof (struct dataflow_d)); df->uses[0] = use_stmt; return; } if (!df->uses[1]) { df->uses[1] = use_stmt; return; } if (ann->df->immediate_uses == NULL) VARRAY_TREE_INIT (ann->df->immediate_uses, 4, "immediate_uses"); VARRAY_PUSH_TREE (ann->df->immediate_uses, use_stmt); } /* If the immediate use of USE points to OLD, then redirect it to NEW. */ static void redirect_immediate_use (tree use, tree old, tree new) { tree imm_stmt = SSA_NAME_DEF_STMT (use); struct dataflow_d *df = get_stmt_ann (imm_stmt)->df; unsigned int num_uses = num_immediate_uses (df); unsigned int i; for (i = 0; i < num_uses; i++) { if (immediate_use (df, i) == old) { if (i == 0 || i == 1) df->uses[i] = new; else VARRAY_TREE (df->immediate_uses, i - 2) = new; } } } /* Redirect all immediate uses for operands in OLD so that they point to NEW. This routine should have no knowledge of how immediate uses are stored. */ void redirect_immediate_uses (tree old, tree new) { stmt_ann_t ann = get_stmt_ann (old); use_optype uses = USE_OPS (ann); vuse_optype vuses = VUSE_OPS (ann); v_may_def_optype v_may_defs = V_MAY_DEF_OPS (ann); unsigned int i; /* Look at USE_OPS or VUSE_OPS according to FLAGS. */ for (i = 0; i < NUM_USES (uses); i++) redirect_immediate_use (USE_OP (uses, i), old, new); for (i = 0; i < NUM_VUSES (vuses); i++) redirect_immediate_use (VUSE_OP (vuses, i), old, new); for (i = 0; i < NUM_V_MAY_DEFS (v_may_defs); i++) redirect_immediate_use (V_MAY_DEF_OP (v_may_defs, i), old, new); } /*--------------------------------------------------------------------------- Manage annotations ---------------------------------------------------------------------------*/ /* Create a new annotation for a _DECL node T. */ var_ann_t create_var_ann (tree t) { var_ann_t ann; #if defined ENABLE_CHECKING if (t == NULL_TREE || !DECL_P (t) || (t->common.ann && t->common.ann->common.type != VAR_ANN)) abort (); #endif ann = ggc_alloc (sizeof (*ann)); memset ((void *) ann, 0, sizeof (*ann)); ann->common.type = VAR_ANN; t->common.ann = (tree_ann_t) ann; return ann; } /* Create a new annotation for a statement node T. */ stmt_ann_t create_stmt_ann (tree t) { stmt_ann_t ann; #if defined ENABLE_CHECKING if ((!is_gimple_stmt (t)) || (t->common.ann && t->common.ann->common.type != STMT_ANN)) abort (); #endif ann = ggc_alloc (sizeof (*ann)); memset ((void *) ann, 0, sizeof (*ann)); ann->common.type = STMT_ANN; /* Since we just created the annotation, mark the statement modified. */ ann->modified = true; t->common.ann = (tree_ann_t) ann; return ann; } /* Create a new annotation for a tree T. */ tree_ann_t create_tree_ann (tree t) { tree_ann_t ann; #if defined ENABLE_CHECKING if (t == NULL_TREE || (t->common.ann && t->common.ann->common.type != TREE_ANN_COMMON)) abort (); #endif ann = ggc_alloc (sizeof (*ann)); memset ((void *) ann, 0, sizeof (*ann)); ann->common.type = TREE_ANN_COMMON; t->common.ann = ann; return ann; } /* Build a temporary. Make sure and register it to be renamed. */ tree make_rename_temp (tree type, const char *prefix) { tree t = create_tmp_var (type, prefix); add_referenced_tmp_var (t); bitmap_set_bit (vars_to_rename, var_ann (t)->uid); return t; } /*--------------------------------------------------------------------------- Debugging functions ---------------------------------------------------------------------------*/ /* Dump the list of all the referenced variables in the current function to FILE. */ void dump_variable_dfa (FILE *file, tree var); void dump_referenced_vars (FILE *file) { size_t i; fprintf (file, "\nReferenced variables in %s: %u\n\n", get_name (current_function_decl), (unsigned) num_referenced_vars); for (i = 0; i < num_referenced_vars; i++) { tree var = referenced_var (i); fprintf (file, "Variable: "); dump_variable_dfa (file, var); fprintf (file, "\n"); } } /* Dump the list of all the referenced variables to stderr. */ void debug_referenced_vars (void) { dump_referenced_vars (stderr); } /* Dump variable VAR and its may-aliases to FILE. */ void dump_variable_dfa (FILE *file, tree var) { var_ann_t ann; if (var == NULL_TREE) { fprintf (file, ""); return; } print_generic_expr (file, var, dump_flags); if (TREE_CODE (var) == SSA_NAME) var = SSA_NAME_VAR (var); ann = var_ann (var); fprintf (file, ", UID %u", (unsigned) ann->uid); if (ann->has_hidden_use) fprintf (file, ", has hidden uses"); if (ann->type_mem_tag) { fprintf (file, ", type memory tag: "); print_generic_expr (file, ann->type_mem_tag, dump_flags); } if (ann->is_alias_tag) fprintf (file, ", is an alias tag"); if (needs_to_live_in_memory (var)) fprintf (file, ", is %s", TREE_STATIC (var) ? "static" : "global"); if (is_call_clobbered (var)) fprintf (file, ", call clobbered"); if (ann->default_def) { fprintf (file, ", default def: "); print_generic_expr (file, ann->default_def, dump_flags); } if (ann->may_aliases) { fprintf (file, ", may aliases: "); dump_may_aliases_for (file, var); } fprintf (file, "\n"); } /* Dump variable VAR and its may-aliases to stderr. */ void debug_variable (tree var) { dump_variable_dfa (stderr, var); } /* Dump def-use edges on FILE. */ void dump_immediate_uses (FILE *file) { basic_block bb; block_stmt_iterator si; const char *funcname = lang_hooks.decl_printable_name (current_function_decl, 2); fprintf (file, "\nDef-use edges for function %s\n", funcname); FOR_EACH_BB (bb) { tree phi; for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi)) dump_immediate_uses_for (file, phi); for (si = bsi_start (bb); !bsi_end_p (si); bsi_next (&si)) dump_immediate_uses_for (file, bsi_stmt (si)); } fprintf (file, "\n"); } /* Dump def-use edges on stderr. */ void debug_immediate_uses (void) { dump_immediate_uses (stderr); } /* Dump all immediate uses for STMT on FILE. */ void dump_immediate_uses_for (FILE *file, tree stmt) { dataflow_t df = get_immediate_uses (stmt); int num_imm_uses = num_immediate_uses (df); if (num_imm_uses > 0) { int i; fprintf (file, "-> "); print_generic_stmt (file, stmt, TDF_SLIM); fprintf (file, "\n"); for (i = 0; i < num_imm_uses; i++) { fprintf (file, "\t"); print_generic_stmt (file, immediate_use (df, i), TDF_SLIM); fprintf (file, "\n"); } fprintf (file, "\n"); } } /* Dump immediate uses for STMT on stderr. */ void debug_immediate_uses_for (tree stmt) { dump_immediate_uses_for (stderr, stmt); } /* Dump various DFA statistics to FILE. */ void dump_dfa_stats (FILE *file) { struct dfa_stats_d dfa_stats; unsigned long size, total = 0; const char * const fmt_str = "%-30s%-13s%12s\n"; const char * const fmt_str_1 = "%-30s%13lu%11lu%c\n"; const char * const fmt_str_3 = "%-43s%11lu%c\n"; const char *funcname = lang_hooks.decl_printable_name (current_function_decl, 2); collect_dfa_stats (&dfa_stats); fprintf (file, "\nDFA Statistics for %s\n\n", funcname); fprintf (file, "---------------------------------------------------------\n"); fprintf (file, fmt_str, "", " Number of ", "Memory"); fprintf (file, fmt_str, "", " instances ", "used "); fprintf (file, "---------------------------------------------------------\n"); size = num_referenced_vars * sizeof (tree); total += size; fprintf (file, fmt_str_1, "Referenced variables", num_referenced_vars, SCALE (size), LABEL (size)); size = dfa_stats.num_stmt_anns * sizeof (struct stmt_ann_d); total += size; fprintf (file, fmt_str_1, "Statements annotated", dfa_stats.num_stmt_anns, SCALE (size), LABEL (size)); size = dfa_stats.num_var_anns * sizeof (struct var_ann_d); total += size; fprintf (file, fmt_str_1, "Variables annotated", dfa_stats.num_var_anns, SCALE (size), LABEL (size)); size = dfa_stats.num_uses * sizeof (tree *); total += size; fprintf (file, fmt_str_1, "USE operands", dfa_stats.num_uses, SCALE (size), LABEL (size)); size = dfa_stats.num_defs * sizeof (tree *); total += size; fprintf (file, fmt_str_1, "DEF operands", dfa_stats.num_defs, SCALE (size), LABEL (size)); size = dfa_stats.num_vuses * sizeof (tree *); total += size; fprintf (file, fmt_str_1, "VUSE operands", dfa_stats.num_vuses, SCALE (size), LABEL (size)); size = dfa_stats.num_v_may_defs * sizeof (tree *); total += size; fprintf (file, fmt_str_1, "V_MAY_DEF operands", dfa_stats.num_v_may_defs, SCALE (size), LABEL (size)); size = dfa_stats.num_v_must_defs * sizeof (tree *); total += size; fprintf (file, fmt_str_1, "V_MUST_DEF operands", dfa_stats.num_v_must_defs, SCALE (size), LABEL (size)); size = dfa_stats.num_phis * sizeof (struct tree_phi_node); total += size; fprintf (file, fmt_str_1, "PHI nodes", dfa_stats.num_phis, SCALE (size), LABEL (size)); size = dfa_stats.num_phi_args * sizeof (struct phi_arg_d); total += size; fprintf (file, fmt_str_1, "PHI arguments", dfa_stats.num_phi_args, SCALE (size), LABEL (size)); fprintf (file, "---------------------------------------------------------\n"); fprintf (file, fmt_str_3, "Total memory used by DFA/SSA data", SCALE (total), LABEL (total)); fprintf (file, "---------------------------------------------------------\n"); fprintf (file, "\n"); if (dfa_stats.num_phis) fprintf (file, "Average number of arguments per PHI node: %.1f (max: %d)\n", (float) dfa_stats.num_phi_args / (float) dfa_stats.num_phis, dfa_stats.max_num_phi_args); fprintf (file, "\n"); } /* Dump DFA statistics on stderr. */ void debug_dfa_stats (void) { dump_dfa_stats (stderr); } /* Collect DFA statistics and store them in the structure pointed by DFA_STATS_P. */ static void collect_dfa_stats (struct dfa_stats_d *dfa_stats_p) { htab_t htab; basic_block bb; block_stmt_iterator i; if (dfa_stats_p == NULL) abort (); memset ((void *)dfa_stats_p, 0, sizeof (struct dfa_stats_d)); /* Walk all the trees in the function counting references. Start at basic block 0, but don't stop at block boundaries. */ htab = htab_create (30, htab_hash_pointer, htab_eq_pointer, NULL); for (i = bsi_start (BASIC_BLOCK (0)); !bsi_end_p (i); bsi_next (&i)) walk_tree (bsi_stmt_ptr (i), collect_dfa_stats_r, (void *) dfa_stats_p, (void *) htab); htab_delete (htab); FOR_EACH_BB (bb) { tree phi; for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi)) { dfa_stats_p->num_phis++; dfa_stats_p->num_phi_args += PHI_NUM_ARGS (phi); if (PHI_NUM_ARGS (phi) > dfa_stats_p->max_num_phi_args) dfa_stats_p->max_num_phi_args = PHI_NUM_ARGS (phi); } } } /* Callback for walk_tree to collect DFA statistics for a tree and its children. */ static tree collect_dfa_stats_r (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED, void *data) { tree t = *tp; struct dfa_stats_d *dfa_stats_p = (struct dfa_stats_d *)data; if (t->common.ann) { switch (ann_type (t->common.ann)) { case STMT_ANN: { stmt_ann_t ann = (stmt_ann_t) t->common.ann; dfa_stats_p->num_stmt_anns++; dfa_stats_p->num_defs += NUM_DEFS (DEF_OPS (ann)); dfa_stats_p->num_uses += NUM_USES (USE_OPS (ann)); dfa_stats_p->num_v_may_defs += NUM_V_MAY_DEFS (V_MAY_DEF_OPS (ann)); dfa_stats_p->num_vuses += NUM_VUSES (VUSE_OPS (ann)); dfa_stats_p->num_v_must_defs += NUM_V_MUST_DEFS (V_MUST_DEF_OPS (ann)); break; } case VAR_ANN: dfa_stats_p->num_var_anns++; break; default: break; } } return NULL; } /*--------------------------------------------------------------------------- Miscellaneous helpers ---------------------------------------------------------------------------*/ /* Callback for walk_tree. Used to collect variables referenced in the function. */ static tree find_vars_r (tree *tp, int *walk_subtrees, void *data) { struct walk_state *walk_state = (struct walk_state *) data; /* If T is a regular variable that the optimizers are interested in, add it to the list of variables. */ if (SSA_VAR_P (*tp)) add_referenced_var (*tp, walk_state); /* Type, _DECL and constant nodes have no interesting children. Ignore them. */ else if (DECL_P (*tp) || TYPE_P (*tp) || TREE_CODE_CLASS (TREE_CODE (*tp)) == 'c') *walk_subtrees = 0; return NULL_TREE; } /* Add VAR to the list of dereferenced variables. WALK_STATE contains a hash table used to avoid adding the same variable more than once. Note that this function assumes that VAR is a valid SSA variable. If WALK_STATE is NULL, no duplicate checking is done. */ static void add_referenced_var (tree var, struct walk_state *walk_state) { void **slot; var_ann_t v_ann; v_ann = get_var_ann (var); if (walk_state) slot = htab_find_slot (walk_state->vars_found, (void *) var, INSERT); else slot = NULL; if (slot == NULL || *slot == NULL) { /* This is the first time we find this variable, add it to the REFERENCED_VARS array and annotate it with attributes that are intrinsic to the variable. */ if (slot) *slot = (void *) var; v_ann->uid = num_referenced_vars; VARRAY_PUSH_TREE (referenced_vars, var); /* Global and static variables are call-clobbered, always. */ if (needs_to_live_in_memory (var)) mark_call_clobbered (var); /* DECL_NONLOCAL variables should not be removed, as they are needed to emit nested functions. */ if (DECL_NONLOCAL (var)) v_ann->used = 1; } } /* Return the virtual variable associated to the non-scalar variable VAR. */ tree get_virtual_var (tree var) { STRIP_NOPS (var); if (TREE_CODE (var) == SSA_NAME) var = SSA_NAME_VAR (var); while (TREE_CODE (var) == REALPART_EXPR || TREE_CODE (var) == IMAGPART_EXPR || handled_component_p (var)) var = TREE_OPERAND (var, 0); #ifdef ENABLE_CHECKING /* Treating GIMPLE registers as virtual variables makes no sense. Also complain if we couldn't extract a _DECL out of the original expression. */ if (!SSA_VAR_P (var) || is_gimple_reg (var)) abort (); #endif return var; } /* Mark variables in BLOCK that have hidden uses. A hidden use can occur due to VLA declarations or nested functions. */ static void find_hidden_use_vars (tree block) { tree sub, decl, tem; /* Check all the arrays declared in the block for VLAs. While scanning the block's variables, also see if there is a nested function at this scope. */ for (decl = BLOCK_VARS (block); decl; decl = TREE_CHAIN (decl)) { int inside_vla = 0; walk_tree (&decl, find_hidden_use_vars_r, &inside_vla, NULL); } /* Now repeat the search in any sub-blocks. */ for (sub = BLOCK_SUBBLOCKS (block); sub; sub = TREE_CHAIN (sub)) find_hidden_use_vars (sub); /* A VLA parameter may use a variable which as set from another parameter to declare the size of the VLA. We need to mark the variable as having a hidden use since it is used to declare the VLA parameter and that declaration is not seen by the SSA code. Note get_pending_sizes clears the PENDING_SIZES chain, so we must restore it. */ tem = get_pending_sizes (); put_pending_sizes (tem); for (; tem; tem = TREE_CHAIN (tem)) { int inside_vla = 1; walk_tree (&TREE_VALUE (tem), find_hidden_use_vars_r, &inside_vla, NULL); } } /* Callback for walk_tree used by find_hidden_use_vars to analyze each variable in a lexical block. If the variable's size has a variable size, then mark all objects needed to compute the variable's size as having hidden uses. */ static tree find_hidden_use_vars_r (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED) { int *inside_vla = (int *) data; /* We need to look for hidden uses due to VLAs in variable definitions. We originally used to look for these hidden uses in the variable's type, but that's unreliable if the type's size contains a SAVE_EXPR for a different function context than the variable is used within. */ if (SSA_VAR_P (*tp) && ((DECL_SIZE (*tp) && ! really_constant_p (DECL_SIZE (*tp))) || (DECL_SIZE_UNIT (*tp) && ! really_constant_p (DECL_SIZE_UNIT (*tp))))) { int save = *inside_vla; *inside_vla = 1; walk_tree (&DECL_SIZE (*tp), find_hidden_use_vars_r, inside_vla, NULL); walk_tree (&DECL_SIZE_UNIT (*tp), find_hidden_use_vars_r, inside_vla, NULL); *inside_vla = save; } else if (*inside_vla && SSA_VAR_P (*tp)) set_has_hidden_use (*tp); return NULL_TREE; } /* Add a temporary variable to REFERENCED_VARS. This is similar to add_referenced_var, but is used by passes that need to add new temps to the REFERENCED_VARS array after the program has been scanned for variables. The variable will just receive a new UID and be added to the REFERENCED_VARS array without checking for duplicates. */ void add_referenced_tmp_var (tree var) { add_referenced_var (var, NULL); } /* Return true if V_MAY_DEFS_AFTER contains fewer entries than V_MAY_DEFS_BEFORE. Note that this assumes that both varrays are V_MAY_DEF operands for the same statement. */ static inline bool v_may_defs_disappeared_p (v_may_def_optype v_may_defs_before, v_may_def_optype v_may_defs_after) { /* If there was nothing before, nothing could've disappeared. */ if (v_may_defs_before == NULL) return false; /* All/some of them gone. */ if (v_may_defs_after == NULL || NUM_V_MAY_DEFS (v_may_defs_before) > NUM_V_MAY_DEFS (v_may_defs_after)) return true; return false; } /* Return true if V_MUST_DEFS_AFTER contains fewer entries than V_MUST_DEFS_BEFORE. Note that this assumes that both varrays are V_MUST_DEF operands for the same statement. */ static inline bool v_must_defs_disappeared_p (v_must_def_optype v_must_defs_before, v_must_def_optype v_must_defs_after) { /* If there was nothing before, nothing could've disappeared. */ if (v_must_defs_before == NULL) return false; /* All/some of them gone. */ if (v_must_defs_after == NULL || NUM_V_MUST_DEFS (v_must_defs_before) > NUM_V_MUST_DEFS (v_must_defs_after)) return true; return false; } /* Add all the non-SSA variables found in STMT's operands to the bitmap VARS_TO_RENAME. */ void mark_new_vars_to_rename (tree stmt, bitmap vars_to_rename) { def_optype defs; use_optype uses; v_may_def_optype v_may_defs; vuse_optype vuses; v_must_def_optype v_must_defs; size_t i; bitmap vars_in_vops_to_rename; bool found_exposed_symbol = false; v_may_def_optype v_may_defs_before, v_may_defs_after; v_must_def_optype v_must_defs_before, v_must_defs_after; stmt_ann_t ann; vars_in_vops_to_rename = BITMAP_XMALLOC (); /* Before re-scanning the statement for operands, mark the existing virtual operands to be renamed again. We do this because when new symbols are exposed, the virtual operands that were here before due to aliasing will probably be removed by the call to get_stmt_operand. Therefore, we need to flag them to be renamed beforehand. We flag them in a separate bitmap because we don't really want to rename them if there are not any newly exposed symbols in the statement operands. */ ann = stmt_ann (stmt); v_may_defs_before = v_may_defs = V_MAY_DEF_OPS (ann); for (i = 0; i < NUM_V_MAY_DEFS (v_may_defs); i++) { tree var = V_MAY_DEF_RESULT (v_may_defs, i); if (!DECL_P (var)) var = SSA_NAME_VAR (var); bitmap_set_bit (vars_in_vops_to_rename, var_ann (var)->uid); } vuses = VUSE_OPS (ann); for (i = 0; i < NUM_VUSES (vuses); i++) { tree var = VUSE_OP (vuses, i); if (!DECL_P (var)) var = SSA_NAME_VAR (var); bitmap_set_bit (vars_in_vops_to_rename, var_ann (var)->uid); } v_must_defs_before = v_must_defs = V_MUST_DEF_OPS (ann); for (i = 0; i < NUM_V_MUST_DEFS (v_must_defs); i++) { tree var = V_MUST_DEF_OP (v_must_defs, i); if (!DECL_P (var)) var = SSA_NAME_VAR (var); bitmap_set_bit (vars_in_vops_to_rename, var_ann (var)->uid); } /* Now force an operand re-scan on the statement and mark any newly exposed variables. */ modify_stmt (stmt); get_stmt_operands (stmt); defs = DEF_OPS (ann); for (i = 0; i < NUM_DEFS (defs); i++) { tree var = DEF_OP (defs, i); if (DECL_P (var)) { found_exposed_symbol = true; bitmap_set_bit (vars_to_rename, var_ann (var)->uid); } } uses = USE_OPS (ann); for (i = 0; i < NUM_USES (uses); i++) { tree var = USE_OP (uses, i); if (DECL_P (var)) { found_exposed_symbol = true; bitmap_set_bit (vars_to_rename, var_ann (var)->uid); } } v_may_defs_after = v_may_defs = V_MAY_DEF_OPS (ann); for (i = 0; i < NUM_V_MAY_DEFS (v_may_defs); i++) { tree var = V_MAY_DEF_RESULT (v_may_defs, i); if (DECL_P (var)) { found_exposed_symbol = true; bitmap_set_bit (vars_to_rename, var_ann (var)->uid); } } vuses = VUSE_OPS (ann); for (i = 0; i < NUM_VUSES (vuses); i++) { tree var = VUSE_OP (vuses, i); if (DECL_P (var)) { found_exposed_symbol = true; bitmap_set_bit (vars_to_rename, var_ann (var)->uid); } } v_must_defs_after = v_must_defs = V_MUST_DEF_OPS (ann); for (i = 0; i < NUM_V_MUST_DEFS (v_must_defs); i++) { tree var = V_MUST_DEF_OP (v_must_defs, i); if (DECL_P (var)) { found_exposed_symbol = true; bitmap_set_bit (vars_to_rename, var_ann (var)->uid); } } /* If we found any newly exposed symbols, or if there are fewer VDEF operands in the statement, add the variables we had set in VARS_IN_VOPS_TO_RENAME to VARS_TO_RENAME. We need to check for vanishing VDEFs because in those cases, the names that were formerly generated by this statement are not going to be available anymore. */ if (found_exposed_symbol || v_may_defs_disappeared_p (v_may_defs_before, v_may_defs_after) || v_must_defs_disappeared_p (v_must_defs_before, v_must_defs_after)) bitmap_a_or_b (vars_to_rename, vars_to_rename, vars_in_vops_to_rename); BITMAP_XFREE (vars_in_vops_to_rename); } /* Exception handling semantics and decomposition for trees. Copyright (C) 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* HACK */ extern int using_eh_for_cleanups_p; /* Misc functions used in this file. */ /* Compare and hash for any structure which begins with a canonical pointer. Assumes all pointers are interchangable, which is sort of already assumed by gcc elsewhere IIRC. */ static int struct_ptr_eq (const void *a, const void *b) { const void * const * x = a; const void * const * y = b; return *x == *y; } static hashval_t struct_ptr_hash (const void *a) { const void * const * x = a; return (size_t)*x >> 4; } /* Remember and lookup EH region data for arbitrary statements. Really this means any statement that could_throw_p. We could stuff this information into the stmt_ann data structure, but: (1) We absolutely rely on this information being kept until we get to rtl. Once we're done with lowering here, if we lose the information there's no way to recover it! (2) There are many more statements that *cannot* throw as compared to those that can. We should be saving some amount of space by only allocating memory for those that can throw. */ struct throw_stmt_node GTY(()) { tree stmt; int region_nr; }; static GTY((param_is (struct throw_stmt_node))) htab_t throw_stmt_table; static void record_stmt_eh_region (struct eh_region *region, tree t) { struct throw_stmt_node *n; void **slot; if (!region) return; n = ggc_alloc (sizeof (*n)); n->stmt = t; n->region_nr = get_eh_region_number (region); slot = htab_find_slot (throw_stmt_table, n, INSERT); if (*slot) abort (); *slot = n; } void add_stmt_to_eh_region (tree t, int num) { struct throw_stmt_node *n; void **slot; if (num < 0) abort (); n = ggc_alloc (sizeof (*n)); n->stmt = t; n->region_nr = num; slot = htab_find_slot (throw_stmt_table, n, INSERT); if (*slot) abort (); *slot = n; } bool remove_stmt_from_eh_region (tree t) { struct throw_stmt_node dummy; void **slot; if (!throw_stmt_table) return false; dummy.stmt = t; slot = htab_find_slot (throw_stmt_table, &dummy, NO_INSERT); if (slot) { htab_clear_slot (throw_stmt_table, slot); return true; } else return false; } int lookup_stmt_eh_region (tree t) { struct throw_stmt_node *p, n; if (!throw_stmt_table) return -2; n.stmt = t; p = htab_find (throw_stmt_table, &n); return (p ? p->region_nr : -1); } /* First pass of EH node decomposition. Build up a tree of TRY_FINALLY_EXPR nodes and LABEL_DECL nodes. We will use this during the second phase to determine if a goto leaves the body of a TRY_FINALLY_EXPR node. */ struct finally_tree_node { tree child, parent; }; /* Note that this table is *not* marked GTY. It is short-lived. */ static htab_t finally_tree; static void record_in_finally_tree (tree child, tree parent) { struct finally_tree_node *n; void **slot; n = xmalloc (sizeof (*n)); n->child = child; n->parent = parent; slot = htab_find_slot (finally_tree, n, INSERT); if (*slot) abort (); *slot = n; } static void collect_finally_tree (tree t, tree region) { tailrecurse: switch (TREE_CODE (t)) { case LABEL_EXPR: record_in_finally_tree (LABEL_EXPR_LABEL (t), region); break; case TRY_FINALLY_EXPR: record_in_finally_tree (t, region); collect_finally_tree (TREE_OPERAND (t, 0), t); t = TREE_OPERAND (t, 1); goto tailrecurse; case TRY_CATCH_EXPR: collect_finally_tree (TREE_OPERAND (t, 0), region); t = TREE_OPERAND (t, 1); goto tailrecurse; case CATCH_EXPR: t = CATCH_BODY (t); goto tailrecurse; case EH_FILTER_EXPR: t = EH_FILTER_FAILURE (t); goto tailrecurse; case STATEMENT_LIST: { tree_stmt_iterator i; for (i = tsi_start (t); !tsi_end_p (i); tsi_next (&i)) collect_finally_tree (tsi_stmt (i), region); } break; default: /* A type, a decl, or some kind of statement that we're not interested in. Don't walk them. */ break; } } /* Use the finally tree to determine if a jump from START to TARGET would leave the try_finally node that START lives in. */ static bool outside_finally_tree (tree start, tree target) { struct finally_tree_node n, *p; do { n.child = start; p = htab_find (finally_tree, &n); if (!p) return true; start = p->parent; } while (start != target); return false; } /* Second pass of EH node decomposition. Actually transform the TRY_FINALLY and TRY_CATCH nodes into a set of gotos, magic labels, and eh regions. The eh region creation is straight-forward, but frobbing all the gotos and such into shape isn't. */ /* State of the world while lowering. */ struct leh_state { /* What's "current" while constructing the eh region tree. These correspond to variables of the same name in cfun->eh, which we don't have easy access to. */ struct eh_region *cur_region; struct eh_region *prev_try; /* Processing of TRY_FINALLY requires a bit more state. This is split out into a separate structure so that we don't have to copy so much when processing other nodes. */ struct leh_tf_state *tf; }; struct leh_tf_state { /* Pointer to the TRY_FINALLY node under discussion. The try_finally_expr is the original TRY_FINALLY_EXPR. We need to retain this so that outside_finally_tree can reliably reference the tree used in the collect_finally_tree data structures. */ tree try_finally_expr; tree *top_p; /* The state outside this try_finally node. */ struct leh_state *outer; /* The exception region created for it. */ struct eh_region *region; /* The GOTO_QUEUE is is an array of GOTO_EXPR and RETURN_EXPR statements that are seen to escape this TRY_FINALLY_EXPR node. */ struct goto_queue_node { tree stmt; tree repl_stmt; tree cont_stmt; int index; } *goto_queue; size_t goto_queue_size; size_t goto_queue_active; /* The set of unique labels seen as entries in the goto queue. */ varray_type dest_array; /* A label to be added at the end of the completed transformed sequence. It will be set if may_fallthru was true *at one time*, though subsequent transformations may have cleared that flag. */ tree fallthru_label; /* A label that has been registered with except.c to be the landing pad for this try block. */ tree eh_label; /* True if it is possible to fall out the bottom of the try block. Cleared if the fallthru is converted to a goto. */ bool may_fallthru; /* True if any entry in goto_queue is a RETURN_EXPR. */ bool may_return; /* True if the finally block can receive an exception edge. Cleared if the exception case is handled by code duplication. */ bool may_throw; }; static void lower_eh_filter (struct leh_state *, tree *); static void lower_eh_constructs_1 (struct leh_state *, tree *); /* Comparison function for qsort/bsearch. We're interested in searching goto queue elements for source statements. */ static int goto_queue_cmp (const void *x, const void *y) { tree a = ((const struct goto_queue_node *)x)->stmt; tree b = ((const struct goto_queue_node *)y)->stmt; return (a == b ? 0 : a < b ? -1 : 1); } /* Search for STMT in the goto queue. Return the replacement, or null if the statement isn't in the queue. */ static tree find_goto_replacement (struct leh_tf_state *tf, tree stmt) { struct goto_queue_node tmp, *ret; tmp.stmt = stmt; ret = bsearch (&tmp, tf->goto_queue, tf->goto_queue_active, sizeof (struct goto_queue_node), goto_queue_cmp); return (ret ? ret->repl_stmt : NULL); } /* A subroutine of replace_goto_queue_1. Handles the sub-clauses of a lowered COND_EXPR. If, by chance, the replacement is a simple goto, then we can just splat it in, otherwise we add the new stmts immediately after the COND_EXPR and redirect. */ static void replace_goto_queue_cond_clause (tree *tp, struct leh_tf_state *tf, tree_stmt_iterator *tsi) { tree new, one, label; new = find_goto_replacement (tf, *tp); if (!new) return; one = expr_only (new); if (one && TREE_CODE (one) == GOTO_EXPR) { *tp = one; return; } label = build1 (LABEL_EXPR, void_type_node, NULL_TREE); *tp = build_and_jump (&LABEL_EXPR_LABEL (label)); tsi_link_after (tsi, label, TSI_CONTINUE_LINKING); tsi_link_after (tsi, new, TSI_CONTINUE_LINKING); } /* The real work of replace_goto_queue. Returns with TSI updated to point to the next statement. */ static void replace_goto_queue_stmt_list (tree, struct leh_tf_state *); static void replace_goto_queue_1 (tree t, struct leh_tf_state *tf, tree_stmt_iterator *tsi) { switch (TREE_CODE (t)) { case GOTO_EXPR: case RETURN_EXPR: t = find_goto_replacement (tf, t); if (t) { tsi_link_before (tsi, t, TSI_SAME_STMT); tsi_delink (tsi); return; } break; case COND_EXPR: replace_goto_queue_cond_clause (&COND_EXPR_THEN (t), tf, tsi); replace_goto_queue_cond_clause (&COND_EXPR_ELSE (t), tf, tsi); break; case TRY_FINALLY_EXPR: case TRY_CATCH_EXPR: replace_goto_queue_stmt_list (TREE_OPERAND (t, 0), tf); replace_goto_queue_stmt_list (TREE_OPERAND (t, 1), tf); break; case CATCH_EXPR: replace_goto_queue_stmt_list (CATCH_BODY (t), tf); break; case EH_FILTER_EXPR: replace_goto_queue_stmt_list (EH_FILTER_FAILURE (t), tf); break; case STATEMENT_LIST: abort (); default: /* These won't have gotos in them. */ break; } tsi_next (tsi); } /* A subroutine of replace_goto_queue. Handles STATEMENT_LISTs. */ static void replace_goto_queue_stmt_list (tree t, struct leh_tf_state *tf) { tree_stmt_iterator i = tsi_start (t); while (!tsi_end_p (i)) replace_goto_queue_1 (tsi_stmt (i), tf, &i); } /* Replace all goto queue members. */ static void replace_goto_queue (struct leh_tf_state *tf) { replace_goto_queue_stmt_list (*tf->top_p, tf); } /* For any GOTO_EXPR or RETURN_EXPR, decide whether it leaves a try_finally node, and if so record that fact in the goto queue associated with that try_finally node. */ static void maybe_record_in_goto_queue (struct leh_state *state, tree stmt) { struct leh_tf_state *tf = state->tf; struct goto_queue_node *q; size_t active, size; int index; if (!tf) return; switch (TREE_CODE (stmt)) { case GOTO_EXPR: { tree lab = GOTO_DESTINATION (stmt); /* Computed and non-local gotos do not get processed. Given their nature we can neither tell whether we've escaped the finally block nor redirect them if we knew. */ if (TREE_CODE (lab) != LABEL_DECL) return; /* No need to record gotos that don't leave the try block. */ if (! outside_finally_tree (lab, tf->try_finally_expr)) return; if (! tf->dest_array) { VARRAY_TREE_INIT (tf->dest_array, 10, "dest_array"); VARRAY_PUSH_TREE (tf->dest_array, lab); index = 0; } else { int n = VARRAY_ACTIVE_SIZE (tf->dest_array); for (index = 0; index < n; ++index) if (VARRAY_TREE (tf->dest_array, index) == lab) break; if (index == n) VARRAY_PUSH_TREE (tf->dest_array, lab); } } break; case RETURN_EXPR: tf->may_return = true; index = -1; break; default: abort (); } active = tf->goto_queue_active; size = tf->goto_queue_size; if (active >= size) { size = (size ? size * 2 : 32); tf->goto_queue_size = size; tf->goto_queue = xrealloc (tf->goto_queue, size * sizeof (struct goto_queue_node)); } q = &tf->goto_queue[active]; tf->goto_queue_active = active + 1; memset (q, 0, sizeof (*q)); q->stmt = stmt; q->index = index; } #ifdef ENABLE_CHECKING /* We do not process SWITCH_EXPRs for now. As long as the original source was in fact structured, and we've not yet done jump threading, then none of the labels will leave outer TRY_FINALLY_EXPRs. Verify this. */ static void verify_norecord_switch_expr (struct leh_state *state, tree switch_expr) { struct leh_tf_state *tf = state->tf; size_t i, n; tree vec; if (!tf) return; vec = SWITCH_LABELS (switch_expr); n = TREE_VEC_LENGTH (vec); for (i = 0; i < n; ++i) { tree lab = CASE_LABEL (TREE_VEC_ELT (vec, i)); if (outside_finally_tree (lab, tf->try_finally_expr)) abort (); } } #else #define verify_norecord_switch_expr(state, switch_expr) #endif /* Redirect a RETURN_EXPR pointed to by STMT_P to FINLAB. Place in CONT_P whatever is needed to finish the return. If MOD is non-null, insert it before the new branch. RETURN_VALUE_P is a cache containing a temporary variable to be used in manipulating the value returned from the function. */ static void do_return_redirection (struct goto_queue_node *q, tree finlab, tree mod, tree *return_value_p) { tree ret_expr = TREE_OPERAND (q->stmt, 0); tree x; if (ret_expr) { /* The nasty part about redirecting the return value is that the return value itself is to be computed before the FINALLY block is executed. e.g. int x; int foo (void) { x = 0; try { return x; } finally { x++; } } should return 0, not 1. Arrange for this to happen by copying computed the return value into a local temporary. This also allows us to redirect multiple return statements through the same destination block; whether this is a net win or not really depends, I guess, but it does make generation of the switch in lower_try_finally_switch easier. */ if (TREE_CODE (ret_expr) == RESULT_DECL) { if (!*return_value_p) *return_value_p = ret_expr; else if (*return_value_p != ret_expr) abort (); q->cont_stmt = q->stmt; } else if (TREE_CODE (ret_expr) == MODIFY_EXPR) { tree result = TREE_OPERAND (ret_expr, 0); tree new, old = TREE_OPERAND (ret_expr, 1); if (!*return_value_p) { if (aggregate_value_p (TREE_TYPE (result), TREE_TYPE (current_function_decl))) /* If this function returns in memory, copy the argument into the return slot now. Otherwise, we might need to worry about magic return semantics, so we need to use a temporary to hold the value until we're actually ready to return. */ new = result; else new = create_tmp_var (TREE_TYPE (old), "rettmp"); *return_value_p = new; } else new = *return_value_p; x = build (MODIFY_EXPR, TREE_TYPE (new), new, old); append_to_statement_list (x, &q->repl_stmt); if (new == result) x = result; else x = build (MODIFY_EXPR, TREE_TYPE (result), result, new); q->cont_stmt = build1 (RETURN_EXPR, void_type_node, x); } else abort (); } else { /* If we don't return a value, all return statements are the same. */ q->cont_stmt = q->stmt; } if (mod) append_to_statement_list (mod, &q->repl_stmt); x = build1 (GOTO_EXPR, void_type_node, finlab); append_to_statement_list (x, &q->repl_stmt); } /* Similar, but easier, for GOTO_EXPR. */ static void do_goto_redirection (struct goto_queue_node *q, tree finlab, tree mod) { tree x; q->cont_stmt = q->stmt; if (mod) append_to_statement_list (mod, &q->repl_stmt); x = build1 (GOTO_EXPR, void_type_node, finlab); append_to_statement_list (x, &q->repl_stmt); } /* We want to transform try { body; } catch { stuff; } to body; goto over; lab: stuff; over: T is a TRY_FINALLY or TRY_CATCH node. LAB is the label that should be placed before the second operand, or NULL. OVER is an existing label that should be put at the exit, or NULL. */ static void frob_into_branch_around (tree *tp, tree lab, tree over) { tree x, op1; op1 = TREE_OPERAND (*tp, 1); *tp = TREE_OPERAND (*tp, 0); if (block_may_fallthru (*tp)) { if (!over) over = create_artificial_label (); x = build1 (GOTO_EXPR, void_type_node, over); append_to_statement_list (x, tp); } if (lab) { x = build1 (LABEL_EXPR, void_type_node, lab); append_to_statement_list (x, tp); } append_to_statement_list (op1, tp); if (over) { x = build1 (LABEL_EXPR, void_type_node, over); append_to_statement_list (x, tp); } } /* A subroutine of lower_try_finally. Duplicate the tree rooted at T. Make sure to record all new labels found. */ static tree lower_try_finally_dup_block (tree t, struct leh_state *outer_state) { tree region = NULL; t = lhd_unsave_expr_now (t); if (outer_state->tf) region = outer_state->tf->try_finally_expr; collect_finally_tree (t, region); return t; } /* A subroutine of lower_try_finally. Create a fallthru label for the given try_finally state. The only tricky bit here is that we have to make sure to record the label in our outer context. */ static tree lower_try_finally_fallthru_label (struct leh_tf_state *tf) { tree label = tf->fallthru_label; if (!label) { label = create_artificial_label (); tf->fallthru_label = label; if (tf->outer->tf) record_in_finally_tree (label, tf->outer->tf->try_finally_expr); } return label; } /* A subroutine of lower_try_finally. If lang_protect_cleanup_actions returns non-null, then the language requires that the exception path out of a try_finally be treated specially. To wit: the code within the finally block may not itself throw an exception. We have two choices here. First we can duplicate the finally block and wrap it in a must_not_throw region. Second, we can generate code like try { finally_block; } catch { if (fintmp == eh_edge) protect_cleanup_actions; } where "fintmp" is the temporary used in the switch statement generation alternative considered below. For the nonce, we always choose the first option. THIS_STATE may be null if if this is a try-cleanup, not a try-finally. */ static void honor_protect_cleanup_actions (struct leh_state *outer_state, struct leh_state *this_state, struct leh_tf_state *tf) { tree protect_cleanup_actions, finally, x; tree_stmt_iterator i; bool finally_may_fallthru; /* First check for nothing to do. */ if (lang_protect_cleanup_actions) protect_cleanup_actions = lang_protect_cleanup_actions (); else protect_cleanup_actions = NULL; finally = TREE_OPERAND (*tf->top_p, 1); /* If the EH case of the finally block can fall through, this may be a structure of the form try { try { throw ...; } cleanup { try { throw ...; } catch (...) { } } } catch (...) { yyy; } E.g. with an inline destructor with an embedded try block. In this case we must save the runtime EH data around the nested exception. This complication means that any time the previous runtime data might be used (via fallthru from the finally) we handle the eh case here, whether or not protect_cleanup_actions is active. */ finally_may_fallthru = block_may_fallthru (finally); if (!finally_may_fallthru && !protect_cleanup_actions) return; /* Duplicate the FINALLY block. Only need to do this for try-finally, and not for cleanups. */ if (this_state) finally = lower_try_finally_dup_block (finally, outer_state); /* Resume execution after the exception. Adding this now lets lower_eh_filter not add unnecessary gotos, as it is clear that we never fallthru from this copy of the finally block. */ if (finally_may_fallthru) { tree save_eptr, save_filt; save_eptr = create_tmp_var (ptr_type_node, "save_eptr"); save_filt = create_tmp_var (integer_type_node, "save_filt"); i = tsi_start (finally); x = build (EXC_PTR_EXPR, ptr_type_node); x = build (MODIFY_EXPR, void_type_node, save_eptr, x); tsi_link_before (&i, x, TSI_CONTINUE_LINKING); x = build (FILTER_EXPR, integer_type_node); x = build (MODIFY_EXPR, void_type_node, save_filt, x); tsi_link_before (&i, x, TSI_CONTINUE_LINKING); i = tsi_last (finally); x = build (EXC_PTR_EXPR, ptr_type_node); x = build (MODIFY_EXPR, void_type_node, x, save_eptr); tsi_link_after (&i, x, TSI_CONTINUE_LINKING); x = build (FILTER_EXPR, integer_type_node); x = build (MODIFY_EXPR, void_type_node, x, save_filt); tsi_link_after (&i, x, TSI_CONTINUE_LINKING); x = build1 (RESX_EXPR, void_type_node, build_int_2 (get_eh_region_number (tf->region), 0)); tsi_link_after (&i, x, TSI_CONTINUE_LINKING); } /* Wrap the block with protect_cleanup_actions as the action. */ if (protect_cleanup_actions) { x = build (EH_FILTER_EXPR, void_type_node, NULL, NULL); append_to_statement_list (protect_cleanup_actions, &EH_FILTER_FAILURE (x)); EH_FILTER_MUST_NOT_THROW (x) = 1; finally = build (TRY_CATCH_EXPR, void_type_node, finally, x); lower_eh_filter (outer_state, &finally); } else lower_eh_constructs_1 (outer_state, &finally); /* Hook this up to the end of the existing try block. If we previously fell through the end, we'll have to branch around. This means adding a new goto, and adding it to the queue. */ i = tsi_last (TREE_OPERAND (*tf->top_p, 0)); if (tf->may_fallthru) { x = lower_try_finally_fallthru_label (tf); x = build1 (GOTO_EXPR, void_type_node, x); tsi_link_after (&i, x, TSI_CONTINUE_LINKING); if (this_state) maybe_record_in_goto_queue (this_state, x); tf->may_fallthru = false; } x = build1 (LABEL_EXPR, void_type_node, tf->eh_label); tsi_link_after (&i, x, TSI_CONTINUE_LINKING); tsi_link_after (&i, finally, TSI_CONTINUE_LINKING); /* Having now been handled, EH isn't to be considered with the rest of the outgoing edges. */ tf->may_throw = false; } /* A subroutine of lower_try_finally. We have determined that there is no fallthru edge out of the finally block. This means that there is no outgoing edge corresponding to any incoming edge. Restructure the try_finally node for this special case. */ static void lower_try_finally_nofallthru (struct leh_state *state, struct leh_tf_state *tf) { tree x, finally, lab, return_val; struct goto_queue_node *q, *qe; if (tf->may_throw) lab = tf->eh_label; else lab = create_artificial_label (); finally = TREE_OPERAND (*tf->top_p, 1); *tf->top_p = TREE_OPERAND (*tf->top_p, 0); x = build1 (LABEL_EXPR, void_type_node, lab); append_to_statement_list (x, tf->top_p); return_val = NULL; q = tf->goto_queue; qe = q + tf->goto_queue_active; for (; q < qe; ++q) if (q->index < 0) do_return_redirection (q, lab, NULL, &return_val); else do_goto_redirection (q, lab, NULL); replace_goto_queue (tf); lower_eh_constructs_1 (state, &finally); append_to_statement_list (finally, tf->top_p); } /* A subroutine of lower_try_finally. We have determined that there is exactly one destination of the finally block. Restructure the try_finally node for this special case. */ static void lower_try_finally_onedest (struct leh_state *state, struct leh_tf_state *tf) { struct goto_queue_node *q, *qe; tree x, finally, finally_label; finally = TREE_OPERAND (*tf->top_p, 1); *tf->top_p = TREE_OPERAND (*tf->top_p, 0); lower_eh_constructs_1 (state, &finally); if (tf->may_throw) { /* Only reachable via the exception edge. Add the given label to the head of the FINALLY block. Append a RESX at the end. */ x = build1 (LABEL_EXPR, void_type_node, tf->eh_label); append_to_statement_list (x, tf->top_p); append_to_statement_list (finally, tf->top_p); x = build1 (RESX_EXPR, void_type_node, build_int_2 (get_eh_region_number (tf->region), 0)); append_to_statement_list (x, tf->top_p); return; } if (tf->may_fallthru) { /* Only reachable via the fallthru edge. Do nothing but let the two blocks run together; we'll fall out the bottom. */ append_to_statement_list (finally, tf->top_p); return; } finally_label = create_artificial_label (); x = build1 (LABEL_EXPR, void_type_node, finally_label); append_to_statement_list (x, tf->top_p); append_to_statement_list (finally, tf->top_p); q = tf->goto_queue; qe = q + tf->goto_queue_active; if (tf->may_return) { /* Reachable by return expressions only. Redirect them. */ tree return_val = NULL; for (; q < qe; ++q) do_return_redirection (q, finally_label, NULL, &return_val); replace_goto_queue (tf); } else { /* Reachable by goto expressions only. Redirect them. */ for (; q < qe; ++q) do_goto_redirection (q, finally_label, NULL); replace_goto_queue (tf); if (VARRAY_TREE (tf->dest_array, 0) == tf->fallthru_label) { /* Reachable by goto to fallthru label only. Redirect it to the new label (already created, sadly), and do not emit the final branch out, or the fallthru label. */ tf->fallthru_label = NULL; return; } } append_to_statement_list (tf->goto_queue[0].cont_stmt, tf->top_p); maybe_record_in_goto_queue (state, tf->goto_queue[0].cont_stmt); } /* A subroutine of lower_try_finally. There are multiple edges incoming and outgoing from the finally block. Implement this by duplicating the finally block for every destination. */ static void lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf) { tree finally, new_stmt; tree x; finally = TREE_OPERAND (*tf->top_p, 1); *tf->top_p = TREE_OPERAND (*tf->top_p, 0); new_stmt = NULL_TREE; if (tf->may_fallthru) { x = lower_try_finally_dup_block (finally, state); lower_eh_constructs_1 (state, &x); append_to_statement_list (x, &new_stmt); x = lower_try_finally_fallthru_label (tf); x = build1 (GOTO_EXPR, void_type_node, x); append_to_statement_list (x, &new_stmt); } if (tf->may_throw) { x = build1 (LABEL_EXPR, void_type_node, tf->eh_label); append_to_statement_list (x, &new_stmt); x = lower_try_finally_dup_block (finally, state); lower_eh_constructs_1 (state, &x); append_to_statement_list (x, &new_stmt); x = build1 (RESX_EXPR, void_type_node, build_int_2 (get_eh_region_number (tf->region), 0)); append_to_statement_list (x, &new_stmt); } if (tf->goto_queue) { struct goto_queue_node *q, *qe; tree return_val = NULL; int return_index; tree *labels; if (tf->dest_array) return_index = VARRAY_ACTIVE_SIZE (tf->dest_array); else return_index = 0; labels = xcalloc (sizeof (tree), return_index + 1); q = tf->goto_queue; qe = q + tf->goto_queue_active; for (; q < qe; q++) { int index = q->index < 0 ? return_index : q->index; tree lab = labels[index]; bool build_p = false; if (!lab) { labels[index] = lab = create_artificial_label (); build_p = true; } if (index == return_index) do_return_redirection (q, lab, NULL, &return_val); else do_goto_redirection (q, lab, NULL); if (build_p) { x = build1 (LABEL_EXPR, void_type_node, lab); append_to_statement_list (x, &new_stmt); x = lower_try_finally_dup_block (finally, state); lower_eh_constructs_1 (state, &x); append_to_statement_list (x, &new_stmt); append_to_statement_list (q->cont_stmt, &new_stmt); maybe_record_in_goto_queue (state, q->cont_stmt); } } replace_goto_queue (tf); free (labels); } /* Need to link new stmts after running replace_goto_queue due to not wanting to process the same goto stmts twice. */ append_to_statement_list (new_stmt, tf->top_p); } /* A subroutine of lower_try_finally. There are multiple edges incoming and outgoing from the finally block. Implement this by instrumenting each incoming edge and creating a switch statement at the end of the finally block that branches to the appropriate destination. */ static void lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf) { struct goto_queue_node *q, *qe; tree return_val = NULL; tree finally, finally_tmp, finally_label; int return_index, eh_index, fallthru_index; int nlabels, ndests, j, last_case_index; tree case_label_vec, switch_stmt, last_case, switch_body; tree x; /* Mash the TRY block to the head of the chain. */ finally = TREE_OPERAND (*tf->top_p, 1); *tf->top_p = TREE_OPERAND (*tf->top_p, 0); /* Lower the finally block itself. */ lower_eh_constructs_1 (state, &finally); /* Prepare for switch statement generation. */ if (tf->dest_array) nlabels = VARRAY_ACTIVE_SIZE (tf->dest_array); else nlabels = 0; return_index = nlabels; eh_index = return_index + tf->may_return; fallthru_index = eh_index + tf->may_throw; ndests = fallthru_index + tf->may_fallthru; finally_tmp = create_tmp_var (integer_type_node, "finally_tmp"); finally_label = create_artificial_label (); case_label_vec = make_tree_vec (ndests); switch_stmt = build (SWITCH_EXPR, integer_type_node, finally_tmp, NULL_TREE, case_label_vec); switch_body = NULL; last_case = NULL; last_case_index = 0; /* Begin inserting code for getting to the finally block. Things are done in this order to correspond to the sequence the code is layed out. */ if (tf->may_fallthru) { x = build (MODIFY_EXPR, void_type_node, finally_tmp, build_int_2 (fallthru_index, 0)); append_to_statement_list (x, tf->top_p); if (tf->may_throw) { x = build1 (GOTO_EXPR, void_type_node, finally_label); append_to_statement_list (x, tf->top_p); } last_case = build (CASE_LABEL_EXPR, void_type_node, build_int_2 (fallthru_index, 0), NULL, create_artificial_label ()); TREE_VEC_ELT (case_label_vec, last_case_index) = last_case; last_case_index++; x = build (LABEL_EXPR, void_type_node, CASE_LABEL (last_case)); append_to_statement_list (x, &switch_body); x = lower_try_finally_fallthru_label (tf); x = build1 (GOTO_EXPR, void_type_node, x); append_to_statement_list (x, &switch_body); } if (tf->may_throw) { x = build1 (LABEL_EXPR, void_type_node, tf->eh_label); append_to_statement_list (x, tf->top_p); x = build (MODIFY_EXPR, void_type_node, finally_tmp, build_int_2 (eh_index, 0)); append_to_statement_list (x, tf->top_p); last_case = build (CASE_LABEL_EXPR, void_type_node, build_int_2 (eh_index, 0), NULL, create_artificial_label ()); TREE_VEC_ELT (case_label_vec, last_case_index) = last_case; last_case_index++; x = build (LABEL_EXPR, void_type_node, CASE_LABEL (last_case)); append_to_statement_list (x, &switch_body); x = build1 (RESX_EXPR, void_type_node, build_int_2 (get_eh_region_number (tf->region), 0)); append_to_statement_list (x, &switch_body); } x = build1 (LABEL_EXPR, void_type_node, finally_label); append_to_statement_list (x, tf->top_p); append_to_statement_list (finally, tf->top_p); /* Redirect each incoming goto edge. */ q = tf->goto_queue; qe = q + tf->goto_queue_active; j = last_case_index + tf->may_return; last_case_index += nlabels; for (; q < qe; ++q) { tree mod; int switch_id, case_index; if (q->index < 0) { mod = build (MODIFY_EXPR, void_type_node, finally_tmp, build_int_2 (return_index, 0)); do_return_redirection (q, finally_label, mod, &return_val); switch_id = return_index; } else { mod = build (MODIFY_EXPR, void_type_node, finally_tmp, build_int_2 (q->index, 0)); do_goto_redirection (q, finally_label, mod); switch_id = q->index; } case_index = j + q->index; if (!TREE_VEC_ELT (case_label_vec, case_index)) { last_case = build (CASE_LABEL_EXPR, void_type_node, build_int_2 (switch_id, 0), NULL, create_artificial_label ()); TREE_VEC_ELT (case_label_vec, case_index) = last_case; x = build (LABEL_EXPR, void_type_node, CASE_LABEL (last_case)); append_to_statement_list (x, &switch_body); append_to_statement_list (q->cont_stmt, &switch_body); maybe_record_in_goto_queue (state, q->cont_stmt); } } replace_goto_queue (tf); last_case_index += nlabels; /* Make sure that the last case is the default label, as one is required. Then sort the labels, which is also required in GIMPLE. */ CASE_LOW (last_case) = NULL; sort_case_labels (case_label_vec); /* Need to link switch_stmt after running replace_goto_queue due to not wanting to process the same goto stmts twice. */ append_to_statement_list (switch_stmt, tf->top_p); append_to_statement_list (switch_body, tf->top_p); } /* Decide whether or not we are going to duplicate the finally block. There are several considerations. First, if this is Java, then the finally block contains code written by the user. It has line numbers associated with it, so duplicating the block means it's difficult to set a breakpoint. Since controlling code generation via -g is verboten, we simply never duplicate code without optimization. Second, we'd like to prevent egregious code growth. One way to do this is to estimate the size of the finally block, multiply that by the number of copies we'd need to make, and compare against the estimate of the size of the switch machinery we'd have to add. */ static bool decide_copy_try_finally (int ndests, tree finally) { int f_estimate, sw_estimate; if (!optimize) return false; /* Finally estimate N times, plus N gotos. */ f_estimate = estimate_num_insns (finally); f_estimate = (f_estimate + 1) * ndests; /* Switch statement (cost 10), N variable assignments, N gotos. */ sw_estimate = 10 + 2 * ndests; /* Optimize for size clearly wants our best guess. */ if (optimize_size) return f_estimate < sw_estimate; /* ??? These numbers are completely made up so far. */ if (optimize > 1) return f_estimate < 100 || f_estimate < sw_estimate * 2; else return f_estimate < 40 || f_estimate * 2 < sw_estimate * 3; } /* A subroutine of lower_eh_constructs_1. Lower a TRY_FINALLY_EXPR nodes to a sequence of labels and blocks, plus the exception region trees that record all the magic. This is complicated by the need to arrange for the FINALLY block to be executed on all exits. */ static void lower_try_finally (struct leh_state *state, tree *tp) { struct leh_tf_state this_tf; struct leh_state this_state; int ndests; /* Process the try block. */ memset (&this_tf, 0, sizeof (this_tf)); this_tf.try_finally_expr = *tp; this_tf.top_p = tp; this_tf.outer = state; if (using_eh_for_cleanups_p) this_tf.region = gen_eh_region_cleanup (state->cur_region, state->prev_try); else this_tf.region = NULL; this_state.cur_region = this_tf.region; this_state.prev_try = state->prev_try; this_state.tf = &this_tf; lower_eh_constructs_1 (&this_state, &TREE_OPERAND (*tp, 0)); /* Determine if the try block is escaped through the bottom. */ this_tf.may_fallthru = block_may_fallthru (TREE_OPERAND (*tp, 0)); /* Determine if any exceptions are possible within the try block. */ if (using_eh_for_cleanups_p) this_tf.may_throw = get_eh_region_may_contain_throw (this_tf.region); if (this_tf.may_throw) { this_tf.eh_label = create_artificial_label (); set_eh_region_tree_label (this_tf.region, this_tf.eh_label); honor_protect_cleanup_actions (state, &this_state, &this_tf); } /* Sort the goto queue for efficient searching later. */ if (this_tf.goto_queue_active > 1) qsort (this_tf.goto_queue, this_tf.goto_queue_active, sizeof (struct goto_queue_node), goto_queue_cmp); /* Determine how many edges (still) reach the finally block. Or rather, how many destinations are reached by the finally block. Use this to determine how we process the finally block itself. */ if (this_tf.dest_array) ndests = VARRAY_ACTIVE_SIZE (this_tf.dest_array); else ndests = 0; ndests += this_tf.may_fallthru; ndests += this_tf.may_return; ndests += this_tf.may_throw; /* If the FINALLY block is not reachable, dike it out. */ if (ndests == 0) *tp = TREE_OPERAND (*tp, 0); /* If the finally block doesn't fall through, then any destination we might try to impose there isn't reached either. There may be some minor amount of cleanup and redirection still needed. */ else if (!block_may_fallthru (TREE_OPERAND (*tp, 1))) lower_try_finally_nofallthru (state, &this_tf); /* We can easily special-case redirection to a single destination. */ else if (ndests == 1) lower_try_finally_onedest (state, &this_tf); else if (decide_copy_try_finally (ndests, TREE_OPERAND (*tp, 1))) lower_try_finally_copy (state, &this_tf); else lower_try_finally_switch (state, &this_tf); /* If someone requested we add a label at the end of the transformed block, do so. */ if (this_tf.fallthru_label) { tree x = build1 (LABEL_EXPR, void_type_node, this_tf.fallthru_label); append_to_statement_list (x, tp); } if (this_tf.goto_queue) free (this_tf.goto_queue); } /* A subroutine of lower_eh_constructs_1. Lower a TRY_CATCH_EXPR with a list of CATCH_EXPR nodes to a sequence of labels and blocks, plus the exception region trees that record all the magic. */ static void lower_catch (struct leh_state *state, tree *tp) { struct eh_region *try_region; struct leh_state this_state; tree_stmt_iterator i; tree out_label; try_region = gen_eh_region_try (state->cur_region); this_state.cur_region = try_region; this_state.prev_try = try_region; this_state.tf = state->tf; lower_eh_constructs_1 (&this_state, &TREE_OPERAND (*tp, 0)); if (!get_eh_region_may_contain_throw (try_region)) { *tp = TREE_OPERAND (*tp, 0); return; } out_label = NULL; for (i = tsi_start (TREE_OPERAND (*tp, 1)); !tsi_end_p (i); ) { struct eh_region *catch_region; tree catch, x, eh_label; catch = tsi_stmt (i); catch_region = gen_eh_region_catch (try_region, CATCH_TYPES (catch)); this_state.cur_region = catch_region; this_state.prev_try = state->prev_try; lower_eh_constructs_1 (&this_state, &CATCH_BODY (catch)); eh_label = create_artificial_label (); set_eh_region_tree_label (catch_region, eh_label); x = build1 (LABEL_EXPR, void_type_node, eh_label); tsi_link_before (&i, x, TSI_SAME_STMT); if (block_may_fallthru (CATCH_BODY (catch))) { if (!out_label) out_label = create_artificial_label (); x = build1 (GOTO_EXPR, void_type_node, out_label); append_to_statement_list (x, &CATCH_BODY (catch)); } tsi_link_before (&i, CATCH_BODY (catch), TSI_SAME_STMT); tsi_delink (&i); } frob_into_branch_around (tp, NULL, out_label); } /* A subroutine of lower_eh_constructs_1. Lower a TRY_CATCH_EXPR with a EH_FILTER_EXPR to a sequence of labels and blocks, plus the exception region trees that record all the magic. */ static void lower_eh_filter (struct leh_state *state, tree *tp) { struct leh_state this_state; struct eh_region *this_region; tree inner = expr_first (TREE_OPERAND (*tp, 1)); tree eh_label; if (EH_FILTER_MUST_NOT_THROW (inner)) this_region = gen_eh_region_must_not_throw (state->cur_region); else this_region = gen_eh_region_allowed (state->cur_region, EH_FILTER_TYPES (inner)); this_state = *state; this_state.cur_region = this_region; lower_eh_constructs_1 (&this_state, &TREE_OPERAND (*tp, 0)); if (!get_eh_region_may_contain_throw (this_region)) { *tp = TREE_OPERAND (*tp, 0); return; } lower_eh_constructs_1 (state, &EH_FILTER_FAILURE (inner)); TREE_OPERAND (*tp, 1) = EH_FILTER_FAILURE (inner); eh_label = create_artificial_label (); set_eh_region_tree_label (this_region, eh_label); frob_into_branch_around (tp, eh_label, NULL); } /* Implement a cleanup expression. This is similar to try-finally, except that we only execute the cleanup block for exception edges. */ static void lower_cleanup (struct leh_state *state, tree *tp) { struct leh_state this_state; struct eh_region *this_region; struct leh_tf_state fake_tf; /* If not using eh, then exception-only cleanups are no-ops. */ if (!flag_exceptions) { *tp = TREE_OPERAND (*tp, 0); lower_eh_constructs_1 (state, tp); return; } this_region = gen_eh_region_cleanup (state->cur_region, state->prev_try); this_state = *state; this_state.cur_region = this_region; lower_eh_constructs_1 (&this_state, &TREE_OPERAND (*tp, 0)); if (!get_eh_region_may_contain_throw (this_region)) { *tp = TREE_OPERAND (*tp, 0); return; } /* Build enough of a try-finally state so that we can reuse honor_protect_cleanup_actions. */ memset (&fake_tf, 0, sizeof (fake_tf)); fake_tf.top_p = tp; fake_tf.outer = state; fake_tf.region = this_region; fake_tf.may_fallthru = block_may_fallthru (TREE_OPERAND (*tp, 0)); fake_tf.may_throw = true; fake_tf.eh_label = create_artificial_label (); set_eh_region_tree_label (this_region, fake_tf.eh_label); honor_protect_cleanup_actions (state, NULL, &fake_tf); if (fake_tf.may_throw) { /* In this case honor_protect_cleanup_actions had nothing to do, and we should process this normally. */ lower_eh_constructs_1 (state, &TREE_OPERAND (*tp, 1)); frob_into_branch_around (tp, fake_tf.eh_label, fake_tf.fallthru_label); } else { /* In this case honor_protect_cleanup_actions did nearly all of the work. All we have left is to append the fallthru_label. */ *tp = TREE_OPERAND (*tp, 0); if (fake_tf.fallthru_label) { tree x = build1 (LABEL_EXPR, void_type_node, fake_tf.fallthru_label); append_to_statement_list (x, tp); } } } /* Main loop for lowering eh constructs. */ static void lower_eh_constructs_1 (struct leh_state *state, tree *tp) { tree_stmt_iterator i; tree t = *tp; switch (TREE_CODE (t)) { case COND_EXPR: lower_eh_constructs_1 (state, &COND_EXPR_THEN (t)); lower_eh_constructs_1 (state, &COND_EXPR_ELSE (t)); break; case CALL_EXPR: /* Look for things that can throw exceptions, and record them. */ if (state->cur_region && tree_could_throw_p (t)) { record_stmt_eh_region (state->cur_region, t); note_eh_region_may_contain_throw (state->cur_region); } break; case MODIFY_EXPR: /* Look for things that can throw exceptions, and record them. */ if (state->cur_region && tree_could_throw_p (t)) { record_stmt_eh_region (state->cur_region, t); note_eh_region_may_contain_throw (state->cur_region); /* ??? For the benefit of calls.c, converting all this to rtl, we need to record the call expression, not just the outer modify statement. */ if (TREE_CODE (TREE_OPERAND (t, 1)) == CALL_EXPR) record_stmt_eh_region (state->cur_region, TREE_OPERAND (t, 1)); } break; case GOTO_EXPR: case RETURN_EXPR: maybe_record_in_goto_queue (state, t); break; case SWITCH_EXPR: verify_norecord_switch_expr (state, t); break; case TRY_FINALLY_EXPR: lower_try_finally (state, tp); break; case TRY_CATCH_EXPR: i = tsi_start (TREE_OPERAND (t, 1)); switch (TREE_CODE (tsi_stmt (i))) { case CATCH_EXPR: lower_catch (state, tp); break; case EH_FILTER_EXPR: lower_eh_filter (state, tp); break; default: lower_cleanup (state, tp); break; } break; case STATEMENT_LIST: for (i = tsi_start (t); !tsi_end_p (i); ) { lower_eh_constructs_1 (state, tsi_stmt_ptr (i)); t = tsi_stmt (i); if (TREE_CODE (t) == STATEMENT_LIST) { tsi_link_before (&i, t, TSI_SAME_STMT); tsi_delink (&i); } else tsi_next (&i); } break; default: /* A type, a decl, or some kind of statement that we're not interested in. Don't walk them. */ break; } } static void lower_eh_constructs (void) { struct leh_state null_state; tree *tp = &DECL_SAVED_TREE (current_function_decl); finally_tree = htab_create (31, struct_ptr_hash, struct_ptr_eq, free); throw_stmt_table = htab_create_ggc (31, struct_ptr_hash, struct_ptr_eq, ggc_free); collect_finally_tree (*tp, NULL); memset (&null_state, 0, sizeof (null_state)); lower_eh_constructs_1 (&null_state, tp); htab_delete (finally_tree); collect_eh_region_array (); } struct tree_opt_pass pass_lower_eh = { "eh", /* name */ NULL, /* gate */ lower_eh_constructs, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ TV_TREE_EH, /* tv_id */ PROP_gimple_lcf, /* properties_required */ PROP_gimple_leh, /* properties_provided */ PROP_gimple_lcf, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func /* todo_flags_finish */ }; /* Construct EH edges for STMT. */ static void make_eh_edge (struct eh_region *region, void *data) { tree stmt, lab; basic_block src, dst; stmt = data; lab = get_eh_region_tree_label (region); src = bb_for_stmt (stmt); dst = label_to_block (lab); make_edge (src, dst, EDGE_ABNORMAL | EDGE_EH); } void make_eh_edges (tree stmt) { int region_nr; bool is_resx; if (TREE_CODE (stmt) == RESX_EXPR) { region_nr = TREE_INT_CST_LOW (TREE_OPERAND (stmt, 0)); is_resx = true; } else { region_nr = lookup_stmt_eh_region (stmt); if (region_nr < 0) return; is_resx = false; } foreach_reachable_handler (region_nr, is_resx, make_eh_edge, stmt); } /* Return true if the expr can trap, as in dereferencing an invalid pointer location or floating point arithmetic. C.f. the rtl version, may_trap_p. This routine expects only GIMPLE lhs or rhs input. */ bool tree_could_trap_p (tree expr) { enum tree_code code = TREE_CODE (expr); bool honor_nans = false; bool honor_snans = false; bool fp_operation = false; tree t; if (TREE_CODE_CLASS (code) == '<' || TREE_CODE_CLASS (code) == '1' || TREE_CODE_CLASS (code) == '2') { t = TREE_TYPE (expr); fp_operation = FLOAT_TYPE_P (t); if (fp_operation) { honor_nans = flag_trapping_math && !flag_finite_math_only; honor_snans = flag_signaling_nans != 0; } } switch (code) { case ARRAY_REF: case ARRAY_RANGE_REF: case COMPONENT_REF: case REALPART_EXPR: case IMAGPART_EXPR: case BIT_FIELD_REF: t = get_base_address (expr); return !t || tree_could_trap_p (t); case INDIRECT_REF: return !TREE_THIS_NOTRAP (expr); case ASM_EXPR: return TREE_THIS_VOLATILE (expr); case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case EXACT_DIV_EXPR: case CEIL_MOD_EXPR: case FLOOR_MOD_EXPR: case ROUND_MOD_EXPR: case TRUNC_MOD_EXPR: case RDIV_EXPR: if (honor_snans) return true; if (fp_operation && flag_trapping_math) return true; t = TREE_OPERAND (expr, 1); if (!TREE_CONSTANT (t) || integer_zerop (t)) return true; return false; case LT_EXPR: case LE_EXPR: case GT_EXPR: case GE_EXPR: case LTGT_EXPR: /* Some floating point comparisons may trap. */ return honor_nans; case EQ_EXPR: case NE_EXPR: case UNORDERED_EXPR: case ORDERED_EXPR: case UNLT_EXPR: case UNLE_EXPR: case UNGT_EXPR: case UNGE_EXPR: case UNEQ_EXPR: return honor_snans; case CONVERT_EXPR: case FIX_TRUNC_EXPR: case FIX_CEIL_EXPR: case FIX_FLOOR_EXPR: case FIX_ROUND_EXPR: /* Conversion of floating point might trap. */ return honor_nans; case NEGATE_EXPR: case ABS_EXPR: case CONJ_EXPR: /* These operations don't trap even with floating point. */ return false; default: /* Any floating arithmetic may trap. */ if (fp_operation && flag_trapping_math) return true; return false; } } bool tree_could_throw_p (tree t) { if (!flag_exceptions) return false; if (TREE_CODE (t) == MODIFY_EXPR) { if (flag_non_call_exceptions && tree_could_trap_p (TREE_OPERAND (t, 0))) return true; t = TREE_OPERAND (t, 1); } if (TREE_CODE (t) == CALL_EXPR) return (call_expr_flags (t) & ECF_NOTHROW) == 0; if (flag_non_call_exceptions) return tree_could_trap_p (t); return false; } bool tree_can_throw_internal (tree stmt) { int region_nr = lookup_stmt_eh_region (stmt); if (region_nr < 0) return false; return can_throw_internal_1 (region_nr); } bool tree_can_throw_external (tree stmt) { int region_nr = lookup_stmt_eh_region (stmt); if (region_nr < 0) return false; return can_throw_external_1 (region_nr); } bool maybe_clean_eh_stmt (tree stmt) { if (!tree_could_throw_p (stmt)) if (remove_stmt_from_eh_region (stmt)) return true; return false; } /* Type information for tree-eh.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ void gt_ggc_mx_throw_stmt_node (void *x_p) { struct throw_stmt_node * const x = (struct throw_stmt_node *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_9tree_node ((*x).stmt); } } void gt_ggc_m_P15throw_stmt_node4htab (void *x_p) { struct htab * const x = (struct htab *)x_p; if (ggc_test_and_set_mark (x)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { gt_ggc_m_15throw_stmt_node ((*x).entries[i0]); } ggc_mark ((*x).entries); } } } void gt_pch_nx_throw_stmt_node (void *x_p) { struct throw_stmt_node * const x = (struct throw_stmt_node *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_15throw_stmt_node)) { gt_pch_n_9tree_node ((*x).stmt); } } void gt_pch_n_P15throw_stmt_node4htab (void *x_p) { struct htab * const x = (struct htab *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_P15throw_stmt_node4htab)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { gt_pch_n_15throw_stmt_node ((*x).entries[i0]); } gt_pch_note_object ((*x).entries, x, gt_pch_p_P15throw_stmt_node4htab); } } } void gt_pch_p_15throw_stmt_node (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct throw_stmt_node * const x ATTRIBUTE_UNUSED = (struct throw_stmt_node *)x_p; if ((void *)(x) == this_obj) op (&((*x).stmt), cookie); } void gt_pch_p_P15throw_stmt_node4htab (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct htab * const x ATTRIBUTE_UNUSED = (struct htab *)x_p; if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { if ((void *)((*x).entries) == this_obj) op (&((*x).entries[i0]), cookie); } if ((void *)(x) == this_obj) op (&((*x).entries), cookie); } } /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_tree_eh_h[] = { { &throw_stmt_table, 1, sizeof (throw_stmt_table), >_ggc_m_P15throw_stmt_node4htab, >_pch_n_P15throw_stmt_node4htab }, LAST_GGC_ROOT_TAB }; /* Miscellaneous SSA utility functions. Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Remove edge E and remove the corresponding arguments from the PHI nodes in E's destination block. */ void ssa_remove_edge (edge e) { tree phi, next; /* Remove the appropriate PHI arguments in E's destination block. */ for (phi = phi_nodes (e->dest); phi; phi = next) { next = PHI_CHAIN (phi); remove_phi_arg (phi, e->src); } remove_edge (e); } /* Remove the corresponding arguments from the PHI nodes in E's destination block and redirect it to DEST. Return redirected edge. The list of removed arguments is stored in PENDING_STMT (e). */ edge ssa_redirect_edge (edge e, basic_block dest) { tree phi, next; tree list = NULL, *last = &list; tree src, dst, node; int i; /* Remove the appropriate PHI arguments in E's destination block. */ for (phi = phi_nodes (e->dest); phi; phi = next) { next = PHI_CHAIN (phi); i = phi_arg_from_edge (phi, e); if (i < 0) continue; src = PHI_ARG_DEF (phi, i); dst = PHI_RESULT (phi); node = build_tree_list (dst, src); *last = node; last = &TREE_CHAIN (node); remove_phi_arg_num (phi, i); } e = redirect_edge_succ_nodup (e, dest); PENDING_STMT (e) = list; return e; } /* Return true if the definition of SSA_NAME at block BB is malformed. STMT is the statement where SSA_NAME is created. DEFINITION_BLOCK is an array of basic blocks indexed by SSA_NAME version numbers. If DEFINITION_BLOCK[SSA_NAME_VERSION] is set, it means that the block in that array slot contains the definition of SSA_NAME. */ static bool verify_def (basic_block bb, basic_block *definition_block, tree ssa_name, tree stmt) { bool err = false; if (TREE_CODE (ssa_name) != SSA_NAME) { error ("Expected an SSA_NAME object"); debug_generic_stmt (ssa_name); debug_generic_stmt (stmt); } if (definition_block[SSA_NAME_VERSION (ssa_name)]) { error ("SSA_NAME created in two different blocks %i and %i", definition_block[SSA_NAME_VERSION (ssa_name)]->index, bb->index); fprintf (stderr, "SSA_NAME: "); debug_generic_stmt (ssa_name); debug_generic_stmt (stmt); err = true; } definition_block[SSA_NAME_VERSION (ssa_name)] = bb; if (SSA_NAME_DEF_STMT (ssa_name) != stmt) { error ("SSA_NAME_DEF_STMT is wrong"); fprintf (stderr, "SSA_NAME: "); debug_generic_stmt (ssa_name); fprintf (stderr, "Expected definition statement:\n"); debug_generic_stmt (SSA_NAME_DEF_STMT (ssa_name)); fprintf (stderr, "\nActual definition statement:\n"); debug_generic_stmt (stmt); err = true; } return err; } /* Return true if the use of SSA_NAME at statement STMT in block BB is malformed. DEF_BB is the block where SSA_NAME was found to be created. IDOM contains immediate dominator information for the flowgraph. CHECK_ABNORMAL is true if the caller wants to check whether this use is flowing through an abnormal edge (only used when checking PHI arguments). */ static bool verify_use (basic_block bb, basic_block def_bb, tree ssa_name, tree stmt, bool check_abnormal) { bool err = false; if (IS_EMPTY_STMT (SSA_NAME_DEF_STMT (ssa_name))) ; /* Nothing to do. */ else if (!def_bb) { error ("Missing definition"); err = true; } else if (bb != def_bb && !dominated_by_p (CDI_DOMINATORS, bb, def_bb)) { error ("Definition in block %i does not dominate use in block %i", def_bb->index, bb->index); err = true; } if (check_abnormal && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (ssa_name)) { error ("SSA_NAME_OCCURS_IN_ABNORMAL_PHI should be set"); err = true; } if (err) { fprintf (stderr, "for SSA_NAME: "); debug_generic_stmt (ssa_name); fprintf (stderr, "in statement:\n"); debug_generic_stmt (stmt); } return err; } /* Return true if any of the arguments for PHI node PHI at block BB is malformed. IDOM contains immediate dominator information for the flowgraph. DEFINITION_BLOCK is an array of basic blocks indexed by SSA_NAME version numbers. If DEFINITION_BLOCK[SSA_NAME_VERSION] is set, it means that the block in that array slot contains the definition of SSA_NAME. */ static bool verify_phi_args (tree phi, basic_block bb, basic_block *definition_block) { edge e; bool err = false; int i, phi_num_args = PHI_NUM_ARGS (phi); /* Mark all the incoming edges. */ for (e = bb->pred; e; e = e->pred_next) e->aux = (void *) 1; for (i = 0; i < phi_num_args; i++) { tree op = PHI_ARG_DEF (phi, i); e = PHI_ARG_EDGE (phi, i); if (TREE_CODE (op) == SSA_NAME) err |= verify_use (e->src, definition_block[SSA_NAME_VERSION (op)], op, phi, e->flags & EDGE_ABNORMAL); if (e->dest != bb) { error ("Wrong edge %d->%d for PHI argument\n", e->src->index, e->dest->index, bb->index); err = true; } if (e->aux == (void *) 0) { error ("PHI argument flowing through dead edge %d->%d\n", e->src->index, e->dest->index); err = true; } if (e->aux == (void *) 2) { error ("PHI argument duplicated for edge %d->%d\n", e->src->index, e->dest->index); err = true; } if (err) { fprintf (stderr, "PHI argument\n"); debug_generic_stmt (op); } e->aux = (void *) 2; } for (e = bb->pred; e; e = e->pred_next) { if (e->aux != (void *) 2) { error ("No argument flowing through edge %d->%d\n", e->src->index, e->dest->index); err = true; } e->aux = (void *) 0; } if (err) { fprintf (stderr, "for PHI node\n"); debug_generic_stmt (phi); } return err; } /* Verify common invariants in the SSA web. TODO: verify the variable annotations. */ void verify_ssa (void) { bool err = false; basic_block bb; basic_block *definition_block = xcalloc (num_ssa_names, sizeof (basic_block)); timevar_push (TV_TREE_SSA_VERIFY); calculate_dominance_info (CDI_DOMINATORS); /* Verify and register all the SSA_NAME definitions found in the function. */ FOR_EACH_BB (bb) { tree phi; block_stmt_iterator bsi; for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi)) err |= verify_def (bb, definition_block, PHI_RESULT (phi), phi); for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) { tree stmt; stmt_ann_t ann; unsigned int j; v_may_def_optype v_may_defs; v_must_def_optype v_must_defs; def_optype defs; stmt = bsi_stmt (bsi); ann = stmt_ann (stmt); get_stmt_operands (stmt); v_may_defs = V_MAY_DEF_OPS (ann); if (ann->makes_aliased_stores && NUM_V_MAY_DEFS (v_may_defs) == 0) error ("Makes aliased stores, but no V_MAY_DEFS"); for (j = 0; j < NUM_V_MAY_DEFS (v_may_defs); j++) { tree op = V_MAY_DEF_RESULT (v_may_defs, j); if (is_gimple_reg (op)) { error ("Found a virtual definition for a GIMPLE register"); debug_generic_stmt (op); debug_generic_stmt (stmt); err = true; } err |= verify_def (bb, definition_block, op, stmt); } v_must_defs = STMT_V_MUST_DEF_OPS (stmt); for (j = 0; j < NUM_V_MUST_DEFS (v_must_defs); j++) { tree op = V_MUST_DEF_OP (v_must_defs, j); if (is_gimple_reg (op)) { error ("Found a virtual must-def for a GIMPLE register"); debug_generic_stmt (op); debug_generic_stmt (stmt); err = true; } err |= verify_def (bb, definition_block, op, stmt); } defs = DEF_OPS (ann); for (j = 0; j < NUM_DEFS (defs); j++) { tree op = DEF_OP (defs, j); if (TREE_CODE (op) == SSA_NAME && !is_gimple_reg (op)) { error ("Found a real definition for a non-GIMPLE register"); debug_generic_stmt (op); debug_generic_stmt (stmt); err = true; } err |= verify_def (bb, definition_block, op, stmt); } } } /* Now verify all the uses and make sure they agree with the definitions found in the previous pass. */ FOR_EACH_BB (bb) { edge e; tree phi; block_stmt_iterator bsi; /* Make sure that all edges have a clear 'aux' field. */ for (e = bb->pred; e; e = e->pred_next) { if (e->aux) { error ("AUX pointer initialized for edge %d->%d\n", e->src->index, e->dest->index); err = true; } } /* Verify the arguments for every PHI node in the block. */ for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi)) err |= verify_phi_args (phi, bb, definition_block); /* Now verify all the uses and vuses in every statement of the block. Remember, the RHS of a V_MAY_DEF is a use as well. */ for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) { tree stmt = bsi_stmt (bsi); stmt_ann_t ann = stmt_ann (stmt); unsigned int j; vuse_optype vuses; v_may_def_optype v_may_defs; use_optype uses; vuses = VUSE_OPS (ann); for (j = 0; j < NUM_VUSES (vuses); j++) { tree op = VUSE_OP (vuses, j); if (is_gimple_reg (op)) { error ("Found a virtual use for a GIMPLE register"); debug_generic_stmt (op); debug_generic_stmt (stmt); err = true; } err |= verify_use (bb, definition_block[SSA_NAME_VERSION (op)], op, stmt, false); } v_may_defs = V_MAY_DEF_OPS (ann); for (j = 0; j < NUM_V_MAY_DEFS (v_may_defs); j++) { tree op = V_MAY_DEF_OP (v_may_defs, j); if (is_gimple_reg (op)) { error ("Found a virtual use for a GIMPLE register"); debug_generic_stmt (op); debug_generic_stmt (stmt); err = true; } err |= verify_use (bb, definition_block[SSA_NAME_VERSION (op)], op, stmt, false); } uses = USE_OPS (ann); for (j = 0; j < NUM_USES (uses); j++) { tree op = USE_OP (uses, j); if (TREE_CODE (op) == SSA_NAME && !is_gimple_reg (op)) { error ("Found a real use of a non-GIMPLE register"); debug_generic_stmt (op); debug_generic_stmt (stmt); err = true; } err |= verify_use (bb, definition_block[SSA_NAME_VERSION (op)], op, stmt, false); } } } free (definition_block); timevar_pop (TV_TREE_SSA_VERIFY); if (err) internal_error ("verify_ssa failed."); } /* Set the USED bit in the annotation for T. */ void set_is_used (tree t) { while (1) { if (SSA_VAR_P (t)) break; if (TREE_CODE (t) == REALPART_EXPR || TREE_CODE (t) == IMAGPART_EXPR) t = TREE_OPERAND (t, 0); else while (handled_component_p (t)) t = TREE_OPERAND (t, 0); } if (TREE_CODE (t) == SSA_NAME) t = SSA_NAME_VAR (t); var_ann (t)->used = 1; } /* Initialize global DFA and SSA structures. */ void init_tree_ssa (void) { VARRAY_TREE_INIT (referenced_vars, 20, "referenced_vars"); call_clobbered_vars = BITMAP_XMALLOC (); init_ssa_operands (); init_ssanames (); init_phinodes (); global_var = NULL_TREE; aliases_computed_p = false; } /* Deallocate memory associated with SSA data structures for FNDECL. */ void delete_tree_ssa (void) { size_t i; basic_block bb; block_stmt_iterator bsi; /* Remove annotations from every tree in the function. */ FOR_EACH_BB (bb) for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) bsi_stmt (bsi)->common.ann = NULL; /* Remove annotations from every referenced variable. */ if (referenced_vars) { for (i = 0; i < num_referenced_vars; i++) referenced_var (i)->common.ann = NULL; referenced_vars = NULL; } fini_ssanames (); fini_phinodes (); fini_ssa_operands (); global_var = NULL_TREE; BITMAP_XFREE (call_clobbered_vars); call_clobbered_vars = NULL; aliases_computed_p = false; } /* Return true if EXPR is a useless type conversion, otherwise return false. */ bool tree_ssa_useless_type_conversion_1 (tree outer_type, tree inner_type) { /* If the inner and outer types are effectively the same, then strip the type conversion and enter the equivalence into the table. */ if (inner_type == outer_type || (lang_hooks.types_compatible_p (inner_type, outer_type))) return true; /* If both types are pointers and the outer type is a (void *), then the conversion is not necessary. The opposite is not true since that conversion would result in a loss of information if the equivalence was used. Consider an indirect function call where we need to know the exact type of the function to correctly implement the ABI. */ else if (POINTER_TYPE_P (inner_type) && POINTER_TYPE_P (outer_type) && TREE_CODE (TREE_TYPE (outer_type)) == VOID_TYPE) return true; /* Pointers and references are equivalent once we get to GENERIC, so strip conversions that just switch between them. */ else if (POINTER_TYPE_P (inner_type) && POINTER_TYPE_P (outer_type) && lang_hooks.types_compatible_p (TREE_TYPE (inner_type), TREE_TYPE (outer_type))) return true; /* If both the inner and outer types are integral types, then the conversion is not necessary if they have the same mode and signedness and precision. Note that type _Bool can have size of 4 (only happens on powerpc-darwin right now but can happen on any target that defines BOOL_TYPE_SIZE to be INT_TYPE_SIZE) and a precision of 1 while unsigned int is the same expect for a precision of 4 so testing of precision is necessary. */ else if (INTEGRAL_TYPE_P (inner_type) && INTEGRAL_TYPE_P (outer_type) && TYPE_MODE (inner_type) == TYPE_MODE (outer_type) && TYPE_UNSIGNED (inner_type) == TYPE_UNSIGNED (outer_type) && TYPE_PRECISION (inner_type) == TYPE_PRECISION (outer_type)) return true; /* Recurse for complex types. */ else if (TREE_CODE (inner_type) == COMPLEX_TYPE && TREE_CODE (outer_type) == COMPLEX_TYPE && tree_ssa_useless_type_conversion_1 (TREE_TYPE (outer_type), TREE_TYPE (inner_type))) return true; return false; } /* Return true if EXPR is a useless type conversion, otherwise return false. */ bool tree_ssa_useless_type_conversion (tree expr) { /* If we have an assignment that merely uses a NOP_EXPR to change the top of the RHS to the type of the LHS and the type conversion is "safe", then strip away the type conversion so that we can enter LHS = RHS into the const_and_copies table. */ if (TREE_CODE (expr) == NOP_EXPR || TREE_CODE (expr) == CONVERT_EXPR || TREE_CODE (expr) == VIEW_CONVERT_EXPR || TREE_CODE (expr) == NON_LVALUE_EXPR) return tree_ssa_useless_type_conversion_1 (TREE_TYPE (expr), TREE_TYPE (TREE_OPERAND (expr, 0))); return false; } /* Internal helper for walk_use_def_chains. VAR, FN and DATA are as described in walk_use_def_chains. VISITED is a bitmap used to mark visited SSA_NAMEs to avoid infinite loops. */ static bool walk_use_def_chains_1 (tree var, walk_use_def_chains_fn fn, void *data, bitmap visited) { tree def_stmt; if (bitmap_bit_p (visited, SSA_NAME_VERSION (var))) return false; bitmap_set_bit (visited, SSA_NAME_VERSION (var)); def_stmt = SSA_NAME_DEF_STMT (var); if (TREE_CODE (def_stmt) != PHI_NODE) { /* If we reached the end of the use-def chain, call FN. */ return (*fn) (var, def_stmt, data); } else { int i; /* Otherwise, follow use-def links out of each PHI argument and call FN after visiting each one. */ for (i = 0; i < PHI_NUM_ARGS (def_stmt); i++) { tree arg = PHI_ARG_DEF (def_stmt, i); if (TREE_CODE (arg) == SSA_NAME && walk_use_def_chains_1 (arg, fn, data, visited)) return true; if ((*fn) (arg, def_stmt, data)) return true; } } return false; } /* Walk use-def chains starting at the SSA variable VAR. Call function FN at each reaching definition found. FN takes three arguments: VAR, its defining statement (DEF_STMT) and a generic pointer to whatever state information that FN may want to maintain (DATA). FN is able to stop the walk by returning true, otherwise in order to continue the walk, FN should return false. Note, that if DEF_STMT is a PHI node, the semantics are slightly different. For each argument ARG of the PHI node, this function will: 1- Walk the use-def chains for ARG. 2- Call (*FN) (ARG, PHI, DATA). Note how the first argument to FN is no longer the original variable VAR, but the PHI argument currently being examined. If FN wants to get at VAR, it should call PHI_RESULT (PHI). */ void walk_use_def_chains (tree var, walk_use_def_chains_fn fn, void *data) { tree def_stmt; #if defined ENABLE_CHECKING if (TREE_CODE (var) != SSA_NAME) abort (); #endif def_stmt = SSA_NAME_DEF_STMT (var); /* We only need to recurse if the reaching definition comes from a PHI node. */ if (TREE_CODE (def_stmt) != PHI_NODE) (*fn) (var, def_stmt, data); else { bitmap visited = BITMAP_XMALLOC (); walk_use_def_chains_1 (var, fn, data, visited); BITMAP_XFREE (visited); } } /* Replaces VAR with REPL in memory reference expression *X in statement STMT. */ static void propagate_into_addr (tree stmt, tree var, tree *x, tree repl) { tree new_var, ass_stmt, addr_var; basic_block bb; block_stmt_iterator bsi; /* There is nothing special to handle in the other cases. */ if (TREE_CODE (repl) != ADDR_EXPR) return; addr_var = TREE_OPERAND (repl, 0); while (TREE_CODE (*x) == ARRAY_REF || TREE_CODE (*x) == COMPONENT_REF || TREE_CODE (*x) == BIT_FIELD_REF) x = &TREE_OPERAND (*x, 0); if (TREE_CODE (*x) != INDIRECT_REF || TREE_OPERAND (*x, 0) != var) return; modify_stmt (stmt); if (TREE_TYPE (*x) == TREE_TYPE (addr_var)) { *x = addr_var; mark_new_vars_to_rename (stmt, vars_to_rename); return; } /* Frontends sometimes produce expressions like *&a instead of a[0]. Create a temporary variable to handle this case. */ ass_stmt = build2 (MODIFY_EXPR, void_type_node, NULL_TREE, repl); new_var = duplicate_ssa_name (var, ass_stmt); TREE_OPERAND (*x, 0) = new_var; TREE_OPERAND (ass_stmt, 0) = new_var; bb = bb_for_stmt (stmt); tree_block_label (bb); bsi = bsi_after_labels (bb); bsi_insert_after (&bsi, ass_stmt, BSI_NEW_STMT); mark_new_vars_to_rename (stmt, vars_to_rename); } /* Replaces immediate uses of VAR by REPL. */ static void replace_immediate_uses (tree var, tree repl) { use_optype uses; vuse_optype vuses; v_may_def_optype v_may_defs; int i, j, n; dataflow_t df; tree stmt; stmt_ann_t ann; bool mark_new_vars; df = get_immediate_uses (SSA_NAME_DEF_STMT (var)); n = num_immediate_uses (df); for (i = 0; i < n; i++) { stmt = immediate_use (df, i); ann = stmt_ann (stmt); if (TREE_CODE (stmt) == PHI_NODE) { for (j = 0; j < PHI_NUM_ARGS (stmt); j++) if (PHI_ARG_DEF (stmt, j) == var) { SET_PHI_ARG_DEF (stmt, j, repl); if (TREE_CODE (repl) == SSA_NAME && PHI_ARG_EDGE (stmt, j)->flags & EDGE_ABNORMAL) SSA_NAME_OCCURS_IN_ABNORMAL_PHI (repl) = 1; } continue; } get_stmt_operands (stmt); mark_new_vars = false; if (is_gimple_reg (SSA_NAME_VAR (var))) { if (TREE_CODE (stmt) == MODIFY_EXPR) { propagate_into_addr (stmt, var, &TREE_OPERAND (stmt, 0), repl); propagate_into_addr (stmt, var, &TREE_OPERAND (stmt, 1), repl); } uses = USE_OPS (ann); for (j = 0; j < (int) NUM_USES (uses); j++) if (USE_OP (uses, j) == var) { propagate_value (USE_OP_PTR (uses, j), repl); mark_new_vars = POINTER_TYPE_P (TREE_TYPE (repl)); } } else { vuses = VUSE_OPS (ann); for (j = 0; j < (int) NUM_VUSES (vuses); j++) if (VUSE_OP (vuses, j) == var) propagate_value (VUSE_OP_PTR (vuses, j), repl); v_may_defs = V_MAY_DEF_OPS (ann); for (j = 0; j < (int) NUM_V_MAY_DEFS (v_may_defs); j++) if (V_MAY_DEF_OP (v_may_defs, j) == var) propagate_value (V_MAY_DEF_OP_PTR (v_may_defs, j), repl); } /* If REPL is a pointer, it may have different memory tags associated with it. For instance, VAR may have had a name tag while REPL only had a type tag. In these cases, the virtual operands (if any) in the statement will refer to different symbols which need to be renamed. */ if (mark_new_vars) mark_new_vars_to_rename (stmt, vars_to_rename); else modify_stmt (stmt); } } /* Gets the value VAR is equivalent to according to EQ_TO. */ static tree get_eq_name (tree *eq_to, tree var) { unsigned ver; tree val = var; while (TREE_CODE (val) == SSA_NAME) { ver = SSA_NAME_VERSION (val); if (!eq_to[ver]) break; val = eq_to[ver]; } while (TREE_CODE (var) == SSA_NAME) { ver = SSA_NAME_VERSION (var); if (!eq_to[ver]) break; var = eq_to[ver]; eq_to[ver] = val; } return val; } /* Checks whether phi node PHI is redundant and if it is, records the ssa name its result is redundant to to EQ_TO array. */ static void check_phi_redundancy (tree phi, tree *eq_to) { tree val = NULL_TREE, def, res = PHI_RESULT (phi), stmt; unsigned i, ver = SSA_NAME_VERSION (res), n; dataflow_t df; /* It is unlikely that such large phi node would be redundant. */ if (PHI_NUM_ARGS (phi) > 16) return; for (i = 0; i < (unsigned) PHI_NUM_ARGS (phi); i++) { def = PHI_ARG_DEF (phi, i); if (TREE_CODE (def) == SSA_NAME) { def = get_eq_name (eq_to, def); if (def == res) continue; } if (val && !operand_equal_p (val, def, 0)) return; val = def; } /* At least one of the arguments should not be equal to the result, or something strange is happening. */ if (!val) abort (); if (get_eq_name (eq_to, res) == val) return; if (!may_propagate_copy (res, val)) return; eq_to[ver] = val; df = get_immediate_uses (SSA_NAME_DEF_STMT (res)); n = num_immediate_uses (df); for (i = 0; i < n; i++) { stmt = immediate_use (df, i); if (TREE_CODE (stmt) == PHI_NODE) check_phi_redundancy (stmt, eq_to); } } /* Removes redundant phi nodes. A redundant PHI node is a PHI node where all of its PHI arguments are the same value, excluding any PHI arguments which are the same as the PHI result. A redundant PHI node is effectively a copy, so we forward copy propagate which removes all uses of the destination of the PHI node then finally we delete the redundant PHI node. Note that if we can not copy propagate the PHI node, then the PHI will not be removed. Thus we do not have to worry about dependencies between PHIs and the problems serializing PHIs into copies creates. The most important effect of this pass is to remove degenerate PHI nodes created by removing unreachable code. */ void kill_redundant_phi_nodes (void) { tree *eq_to; unsigned i, old_num_ssa_names; basic_block bb; tree phi, var, repl, stmt; /* The EQ_TO[VER] holds the value by that the ssa name VER should be replaced. If EQ_TO[VER] is ssa name and it is decided to replace it by other value, it may be necessary to follow the chain till the final value. We perform path shortening (replacing the entries of the EQ_TO array with heads of these chains) whenever we access the field to prevent quadratic complexity (probably would not occur in practice anyway, but let us play it safe). */ eq_to = xcalloc (num_ssa_names, sizeof (tree)); /* We have had cases where computing immediate uses takes a significant amount of compile time. If we run into such problems here, we may want to only compute immediate uses for a subset of all the SSA_NAMEs instead of computing it for all of the SSA_NAMEs. */ compute_immediate_uses (TDFA_USE_OPS | TDFA_USE_VOPS, NULL); old_num_ssa_names = num_ssa_names; FOR_EACH_BB (bb) { for (phi = phi_nodes (bb); phi; phi = TREE_CHAIN (phi)) { var = PHI_RESULT (phi); check_phi_redundancy (phi, eq_to); } } /* Now propagate the values. */ for (i = 0; i < old_num_ssa_names; i++) { if (!ssa_name (i)) continue; repl = get_eq_name (eq_to, ssa_name (i)); if (repl != ssa_name (i)) replace_immediate_uses (ssa_name (i), repl); } /* And remove the dead phis. */ for (i = 0; i < old_num_ssa_names; i++) { if (!ssa_name (i)) continue; repl = get_eq_name (eq_to, ssa_name (i)); if (repl != ssa_name (i)) { stmt = SSA_NAME_DEF_STMT (ssa_name (i)); remove_phi_node (stmt, NULL_TREE, bb_for_stmt (stmt)); } } free_df (); free (eq_to); } struct tree_opt_pass pass_redundant_phi = { "redphi", /* name */ NULL, /* gate */ kill_redundant_phi_nodes, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ 0, /* tv_id */ PROP_cfg | PROP_ssa, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func | TODO_rename_vars | TODO_ggc_collect | TODO_verify_ssa /* todo_flags_finish */ }; /* Emit warnings for uninitialized variables. This is done in two passes. The first pass notices real uses of SSA names with default definitions. Such uses are unconditionally uninitialized, and we can be certain that such a use is a mistake. This pass is run before most optimizations, so that we catch as many as we can. The second pass follows PHI nodes to find uses that are potentially uninitialized. In this case we can't necessarily prove that the use is really uninitialized. This pass is run after most optimizations, so that we thread as many jumps and possible, and delete as much dead code as possible, in order to reduce false positives. We also look again for plain uninitialized variables, since optimization may have changed conditionally uninitialized to unconditionally uninitialized. */ /* Emit a warning for T, an SSA_NAME, being uninitialized. The exact warning text is in MSGID and LOCUS may contain a location or be null. */ static void warn_uninit (tree t, const char *msgid, location_t *locus) { tree var = SSA_NAME_VAR (t); tree def = SSA_NAME_DEF_STMT (t); /* Default uses (indicated by an empty definition statement), are uninitialized. */ if (!IS_EMPTY_STMT (def)) return; /* Except for PARMs of course, which are always initialized. */ if (TREE_CODE (var) == PARM_DECL) return; /* Hard register variables get their initial value from the ether. */ if (DECL_HARD_REGISTER (var)) return; /* TREE_NO_WARNING either means we already warned, or the front end wishes to suppress the warning. */ if (TREE_NO_WARNING (var)) return; if (!locus) locus = &DECL_SOURCE_LOCATION (var); warning (msgid, locus, var); TREE_NO_WARNING (var) = 1; } /* Called via walk_tree, look for SSA_NAMEs that have empty definitions and warn about them. */ static tree warn_uninitialized_var (tree *tp, int *walk_subtrees, void *data) { location_t *locus = data; tree t = *tp; /* We only do data flow with SSA_NAMEs, so that's all we can warn about. */ if (TREE_CODE (t) == SSA_NAME) { warn_uninit (t, "%H'%D' is used uninitialized in this function", locus); *walk_subtrees = 0; } else if (DECL_P (t) || TYPE_P (t)) *walk_subtrees = 0; return NULL_TREE; } /* Look for inputs to PHI that are SSA_NAMEs that have empty definitions and warn about them. */ static void warn_uninitialized_phi (tree phi) { int i, n = PHI_NUM_ARGS (phi); /* Don't look at memory tags. */ if (!is_gimple_reg (PHI_RESULT (phi))) return; for (i = 0; i < n; ++i) { tree op = PHI_ARG_DEF (phi, i); if (TREE_CODE (op) == SSA_NAME) warn_uninit (op, "%H'%D' may be used uninitialized in this function", NULL); } } static void execute_early_warn_uninitialized (void) { block_stmt_iterator bsi; basic_block bb; FOR_EACH_BB (bb) for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) walk_tree (bsi_stmt_ptr (bsi), warn_uninitialized_var, EXPR_LOCUS (bsi_stmt (bsi)), NULL); } static void execute_late_warn_uninitialized (void) { basic_block bb; tree phi; /* Re-do the plain uninitialized variable check, as optimization may have straightened control flow. Do this first so that we don't accidentally get a "may be" warning when we'd have seen an "is" warning later. */ execute_early_warn_uninitialized (); FOR_EACH_BB (bb) for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi)) warn_uninitialized_phi (phi); } static bool gate_warn_uninitialized (void) { return warn_uninitialized != 0; } struct tree_opt_pass pass_early_warn_uninitialized = { NULL, /* name */ gate_warn_uninitialized, /* gate */ execute_early_warn_uninitialized, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ 0, /* tv_id */ PROP_ssa, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0 /* todo_flags_finish */ }; struct tree_opt_pass pass_late_warn_uninitialized = { NULL, /* name */ gate_warn_uninitialized, /* gate */ execute_late_warn_uninitialized, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ 0, /* tv_id */ PROP_ssa, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0 /* todo_flags_finish */ }; /* Top-level control of tree optimizations. Copyright 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Diego Novillo This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Global variables used to communicate with passes. */ int dump_flags; bitmap vars_to_rename; bool in_gimple_form; /* The root of the compilation pass tree, once constructed. */ static struct tree_opt_pass *all_passes; /* Pass: gimplify the function if it's not been done. */ static void execute_gimple (void) { /* We have this test here rather than as the gate because we always want to dump the original gimplified function. */ if (!lang_hooks.gimple_before_inlining) gimplify_function_tree (current_function_decl); } static struct tree_opt_pass pass_gimple = { "gimple", /* name */ NULL, /* gate */ execute_gimple, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ 0, /* tv_id */ 0, /* properties_required */ PROP_gimple_any, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func /* todo_flags_finish */ }; /* Gate: execute, or not, all of the non-trivial optimizations. */ static bool gate_all_optimizations (void) { return (optimize >= 1 /* Don't bother doing anything if the program has errors. */ && !(errorcount || sorrycount)); } static struct tree_opt_pass pass_all_optimizations = { NULL, /* name */ gate_all_optimizations, /* gate */ NULL, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ 0, /* tv_id */ 0, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0 /* todo_flags_finish */ }; /* Pass: do the actions required to finish with tree-ssa optimization passes. */ static void execute_free_datastructures (void) { tree *chain; /* ??? This isn't the right place for this. Worse, it got computed more or less at random in various passes. */ free_dominance_info (CDI_DOMINATORS); /* Emit gotos for implicit jumps. */ disband_implicit_edges (); /* Remove the ssa structures. Do it here since this includes statement annotations that need to be intact during disband_implicit_edges. */ delete_tree_ssa (); /* Re-chain the statements from the blocks. */ chain = &DECL_SAVED_TREE (current_function_decl); *chain = alloc_stmt_list (); /* And get rid of annotations we no longer need. */ delete_tree_cfg_annotations (); } static struct tree_opt_pass pass_free_datastructures = { NULL, /* name */ NULL, /* gate */ execute_free_datastructures, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ 0, /* tv_id */ PROP_cfg, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0 /* todo_flags_finish */ }; /* Do the actions required to initialize internal data structures used in tree-ssa optimization passes. */ static void execute_init_datastructures (void) { /* Allocate hash tables, arrays and other structures. */ init_tree_ssa (); } static struct tree_opt_pass pass_init_datastructures = { NULL, /* name */ NULL, /* gate */ execute_init_datastructures, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ 0, /* tv_id */ PROP_cfg, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0 /* todo_flags_finish */ }; /* Iterate over the pass tree allocating dump file numbers. We want to do this depth first, and independent of whether the pass is enabled or not. */ static void register_one_dump_file (struct tree_opt_pass *pass) { char *dot_name, *flag_name; char num[10]; if (!pass->name) return; /* See below in dup_pass_1. */ num[0] = '\0'; if (pass->static_pass_number) sprintf (num, "%d", ((int) pass->static_pass_number < 0 ? 1 : pass->static_pass_number)); dot_name = concat (".", pass->name, num, NULL); flag_name = concat ("tree-", pass->name, num, NULL); pass->static_pass_number = dump_register (dot_name, flag_name); } static int register_dump_files (struct tree_opt_pass *pass, int properties) { do { /* Verify that all required properties are present. */ if (pass->properties_required & ~properties) abort (); if (pass->properties_destroyed & pass->properties_provided) abort (); pass->properties_required = properties; pass->properties_provided = properties = (properties | pass->properties_provided) & ~pass->properties_destroyed; if (properties & PROP_trees) register_one_dump_file (pass); if (pass->sub) properties = register_dump_files (pass->sub, properties); pass = pass->next; } while (pass); return properties; } /* Duplicate a pass that's to be run more than once. */ static struct tree_opt_pass * dup_pass_1 (struct tree_opt_pass *pass) { struct tree_opt_pass *new; new = xmalloc (sizeof (*new)); memcpy (new, pass, sizeof (*new)); /* Indicate to register_dump_files that this pass has duplicates, and so it should rename the dump file. The first instance will be < 0, and be number of duplicates = -static_pass_number + 1. Subsequent instances will be > 0 and just the duplicate number. */ if (pass->name) { int n, p = pass->static_pass_number; if (p) n = -(--p) + 1; else n = 2, p = -1; pass->static_pass_number = p; new->static_pass_number = n; } return new; } /* Construct the pass tree. */ void init_tree_optimization_passes (void) { struct tree_opt_pass **p; #define NEXT_PASS(PASS) (*p = &PASS, p = &(*p)->next) #define DUP_PASS(PASS) (*dup_pass_1 (&PASS)) p = &all_passes; NEXT_PASS (pass_gimple); NEXT_PASS (pass_remove_useless_stmts); NEXT_PASS (pass_mudflap_1); NEXT_PASS (pass_lower_cf); NEXT_PASS (pass_lower_eh); NEXT_PASS (pass_build_cfg); NEXT_PASS (pass_tree_profile); NEXT_PASS (pass_init_datastructures); NEXT_PASS (pass_all_optimizations); NEXT_PASS (pass_mudflap_2); NEXT_PASS (pass_free_datastructures); NEXT_PASS (pass_expand); NEXT_PASS (pass_rest_of_compilation); *p = NULL; p = &pass_all_optimizations.sub; NEXT_PASS (pass_referenced_vars); NEXT_PASS (pass_build_pta); NEXT_PASS (pass_build_ssa); NEXT_PASS (pass_rename_ssa_copies); NEXT_PASS (pass_early_warn_uninitialized); NEXT_PASS (pass_dce); NEXT_PASS (pass_dominator); NEXT_PASS (pass_redundant_phi); NEXT_PASS (DUP_PASS (pass_dce)); NEXT_PASS (pass_forwprop); NEXT_PASS (pass_phiopt); NEXT_PASS (pass_may_alias); NEXT_PASS (pass_tail_recursion); NEXT_PASS (pass_ch); NEXT_PASS (pass_del_pta); NEXT_PASS (pass_profile); NEXT_PASS (pass_lower_complex); NEXT_PASS (pass_sra); NEXT_PASS (DUP_PASS (pass_rename_ssa_copies)); NEXT_PASS (DUP_PASS (pass_dominator)); NEXT_PASS (DUP_PASS (pass_redundant_phi)); NEXT_PASS (DUP_PASS (pass_dce)); NEXT_PASS (pass_dse); NEXT_PASS (DUP_PASS (pass_forwprop)); NEXT_PASS (DUP_PASS (pass_phiopt)); NEXT_PASS (pass_ccp); NEXT_PASS (DUP_PASS (pass_redundant_phi)); NEXT_PASS (pass_fold_builtins); NEXT_PASS (pass_split_crit_edges); NEXT_PASS (pass_pre); NEXT_PASS (pass_loop); NEXT_PASS (DUP_PASS (pass_dominator)); NEXT_PASS (DUP_PASS (pass_redundant_phi)); NEXT_PASS (pass_cd_dce); NEXT_PASS (DUP_PASS (pass_dse)); NEXT_PASS (DUP_PASS (pass_forwprop)); NEXT_PASS (DUP_PASS (pass_phiopt)); NEXT_PASS (pass_tail_calls); NEXT_PASS (pass_late_warn_uninitialized); NEXT_PASS (pass_warn_function_return); NEXT_PASS (pass_del_ssa); NEXT_PASS (pass_nrv); NEXT_PASS (pass_remove_useless_vars); *p = NULL; p = &pass_loop.sub; NEXT_PASS (pass_loop_init); NEXT_PASS (pass_loop_done); *p = NULL; #undef NEXT_PASS #undef DUP_PASS /* Register the passes with the tree dump code. */ register_dump_files (all_passes, 0); } static void execute_pass_list (struct tree_opt_pass *); static unsigned int last_verified; static void execute_todo (unsigned int flags) { if (flags & TODO_rename_vars) { rewrite_into_ssa (false); bitmap_clear (vars_to_rename); } if ((flags & TODO_dump_func) && dump_file) dump_function_to_file (current_function_decl, dump_file, dump_flags); if (flags & TODO_ggc_collect) ggc_collect (); #ifdef ENABLE_CHECKING if (flags & TODO_verify_ssa) verify_ssa (); if (flags & TODO_verify_flow) verify_flow_info (); if (flags & TODO_verify_stmts) verify_stmts (); #endif } static bool execute_one_pass (struct tree_opt_pass *pass) { unsigned int todo; /* See if we're supposed to run this pass. */ if (pass->gate && !pass->gate ()) return false; /* Note that the folders should only create gimple expressions. This is a hack until the new folder is ready. */ in_gimple_form = (pass->properties_provided & PROP_trees) != 0; /* Run pre-pass verification. */ todo = pass->todo_flags_start & ~last_verified; if (todo) execute_todo (todo); /* If a dump file name is present, open it if enabled. */ if (pass->static_pass_number) { dump_file = dump_begin (pass->static_pass_number, &dump_flags); if (dump_file) { const char *dname, *aname; dname = lang_hooks.decl_printable_name (current_function_decl, 2); aname = (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (current_function_decl))); fprintf (dump_file, "\n;; Function %s (%s)\n\n", dname, aname); } } /* If a timevar is present, start it. */ if (pass->tv_id) timevar_push (pass->tv_id); /* Do it! */ if (pass->execute) pass->execute (); /* Run post-pass cleanup and verification. */ todo = pass->todo_flags_finish; last_verified = todo & TODO_verify_all; if (todo) execute_todo (todo); /* Close down timevar and dump file. */ if (pass->tv_id) timevar_pop (pass->tv_id); if (dump_file) { dump_end (pass->static_pass_number, dump_file); dump_file = NULL; } return true; } static void execute_pass_list (struct tree_opt_pass *pass) { do { if (execute_one_pass (pass) && pass->sub) execute_pass_list (pass->sub); pass = pass->next; } while (pass); } /* For functions-as-trees languages, this performs all optimization and compilation for FNDECL. */ void tree_rest_of_compilation (tree fndecl, bool nested_p) { location_t saved_loc; struct cgraph_node *saved_node = NULL, *node; timevar_push (TV_EXPAND); if (flag_unit_at_a_time && !cgraph_global_info_ready) abort (); /* Initialize the RTL code for the function. */ current_function_decl = fndecl; saved_loc = input_location; input_location = DECL_SOURCE_LOCATION (fndecl); init_function_start (fndecl); /* Even though we're inside a function body, we still don't want to call expand_expr to calculate the size of a variable-sized array. We haven't necessarily assigned RTL to all variables yet, so it's not safe to try to expand expressions involving them. */ cfun->x_dont_save_pending_sizes_p = 1; node = cgraph_node (fndecl); /* We might need the body of this function so that we can expand it inline somewhere else. This means not lowering some constructs such as exception handling. */ if (cgraph_preserve_function_body_p (fndecl)) { if (!flag_unit_at_a_time) { struct cgraph_edge *e; saved_node = cgraph_clone_node (node); for (e = saved_node->callees; e; e = e->next_callee) if (!e->inline_failed) cgraph_clone_inlined_nodes (e, true); } cfun->saved_tree = save_body (fndecl, &cfun->saved_args); } if (flag_inline_trees) { struct cgraph_edge *e; for (e = node->callees; e; e = e->next_callee) if (!e->inline_failed || warn_inline) break; if (e) { timevar_push (TV_INTEGRATION); optimize_inline_calls (fndecl); timevar_pop (TV_INTEGRATION); } } if (!vars_to_rename) vars_to_rename = BITMAP_XMALLOC (); /* If this is a nested function, protect the local variables in the stack above us from being collected while we're compiling this function. */ if (nested_p) ggc_push_context (); /* Perform all tree transforms and optimizations. */ execute_pass_list (all_passes); /* Restore original body if still needed. */ if (cfun->saved_tree) { DECL_SAVED_TREE (fndecl) = cfun->saved_tree; DECL_ARGUMENTS (fndecl) = cfun->saved_args; /* When not in unit-at-a-time mode, we must preserve out of line copy representing node before inlining. Restore original outgoing edges using clone we created earlier. */ if (!flag_unit_at_a_time) { struct cgraph_edge *e; while (node->callees) cgraph_remove_edge (node->callees); node->callees = saved_node->callees; saved_node->callees = NULL; for (e = saved_node->callees; e; e = e->next_callee) e->caller = node; cgraph_remove_node (saved_node); } } else DECL_SAVED_TREE (fndecl) = NULL; cfun = 0; /* If requested, warn about function definitions where the function will return a value (usually of some struct or union type) which itself will take up a lot of stack space. */ if (warn_larger_than && !DECL_EXTERNAL (fndecl) && TREE_TYPE (fndecl)) { tree ret_type = TREE_TYPE (TREE_TYPE (fndecl)); if (ret_type && TYPE_SIZE_UNIT (ret_type) && TREE_CODE (TYPE_SIZE_UNIT (ret_type)) == INTEGER_CST && 0 < compare_tree_int (TYPE_SIZE_UNIT (ret_type), larger_than_size)) { unsigned int size_as_int = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (ret_type)); if (compare_tree_int (TYPE_SIZE_UNIT (ret_type), size_as_int) == 0) warning ("%Jsize of return value of '%D' is %u bytes", fndecl, fndecl, size_as_int); else warning ("%Jsize of return value of '%D' is larger than %wd bytes", fndecl, fndecl, larger_than_size); } } if (!nested_p && !flag_inline_trees) { DECL_SAVED_TREE (fndecl) = NULL; if (DECL_STRUCT_FUNCTION (fndecl) == 0 && !cgraph_node (fndecl)->origin) { /* Stop pointing to the local nodes about to be freed. But DECL_INITIAL must remain nonzero so we know this was an actual function definition. For a nested function, this is done in c_pop_function_context. If rest_of_compilation set this to 0, leave it 0. */ if (DECL_INITIAL (fndecl) != 0) DECL_INITIAL (fndecl) = error_mark_node; } } input_location = saved_loc; ggc_collect (); /* Undo the GC context switch. */ if (nested_p) ggc_pop_context (); timevar_pop (TV_EXPAND); } /* Functions to analyze and validate GIMPLE trees. Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Diego Novillo Rewritten by Jason Merrill This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* GCC GIMPLE structure Inspired by the SIMPLE C grammar at http://www-acaps.cs.mcgill.ca/info/McCAT/McCAT.html function : FUNCTION_DECL DECL_SAVED_TREE -> compound-stmt compound-stmt: STATEMENT_LIST members -> stmt stmt : block | if-stmt | switch-stmt | goto-stmt | return-stmt | resx-stmt | label-stmt | try-stmt | modify-stmt | call-stmt block : BIND_EXPR BIND_EXPR_VARS -> chain of DECLs BIND_EXPR_BLOCK -> BLOCK BIND_EXPR_BODY -> compound-stmt if-stmt : COND_EXPR op0 -> condition op1 -> compound-stmt op2 -> compound-stmt switch-stmt : SWITCH_EXPR op0 -> val op1 -> NULL op2 -> TREE_VEC of CASE_LABEL_EXPRs The CASE_LABEL_EXPRs are sorted by CASE_LOW, and default is last. goto-stmt : GOTO_EXPR op0 -> LABEL_DECL | val return-stmt : RETURN_EXPR op0 -> return-value return-value : NULL | RESULT_DECL | MODIFY_EXPR op0 -> RESULT_DECL op1 -> lhs resx-stmt : RESX_EXPR label-stmt : LABEL_EXPR op0 -> LABEL_DECL try-stmt : TRY_CATCH_EXPR op0 -> compound-stmt op1 -> handler | TRY_FINALLY_EXPR op0 -> compound-stmt op1 -> compound-stmt handler : catch-seq | EH_FILTER_EXPR | compound-stmt catch-seq : STATEMENT_LIST members -> CATCH_EXPR modify-stmt : MODIFY_EXPR op0 -> lhs op1 -> rhs call-stmt : CALL_EXPR op0 -> val | OBJ_TYPE_REF op1 -> call-arg-list call-arg-list: TREE_LIST members -> lhs addr-expr-arg: ID | compref lhs : addr-expr-arg | '*' val | bitfieldref min-lval : ID | '*' val bitfieldref : BIT_FIELD_REF op0 -> inner-compref op1 -> CONST op2 -> var compref : inner-compref | REALPART_EXPR op0 -> inner-compref | IMAGPART_EXPR op0 -> inner-compref inner-compref: min-lval | COMPONENT_REF op0 -> inner-compref op1 -> FIELD_DECL op2 -> val | ARRAY_REF op0 -> inner-compref op1 -> val op2 -> val op3 -> val | ARRAY_RANGE_REF op0 -> inner-compref op1 -> val op2 -> val op3 -> val | VIEW_CONVERT_EXPR op0 -> inner-compref condition : val | val RELOP val val : ID | CONST rhs : lhs | CONST | '&' addr-expr-arg | call_expr | UNOP val | val BINOP val | val RELOP val */ static inline bool is_gimple_id (tree); /* Validation of GIMPLE expressions. */ /* Return true if T is a GIMPLE RHS. */ bool is_gimple_rhs (tree t) { enum tree_code code = TREE_CODE (t); switch (TREE_CODE_CLASS (code)) { case '1': case '2': case '<': return true; default: break; } switch (code) { case TRUTH_NOT_EXPR: case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case TRUTH_XOR_EXPR: case ADDR_EXPR: case CALL_EXPR: case CONSTRUCTOR: case COMPLEX_EXPR: /* FIXME lower VA_ARG_EXPR. */ case VA_ARG_EXPR: case INTEGER_CST: case REAL_CST: case STRING_CST: case COMPLEX_CST: case VECTOR_CST: case OBJ_TYPE_REF: return true; default: break; } return is_gimple_lvalue (t) || is_gimple_val (t); } /* Returns true if T is a valid CONSTRUCTOR component in GIMPLE, either a val or another CONSTRUCTOR. */ bool is_gimple_constructor_elt (tree t) { return (is_gimple_val (t) || TREE_CODE (t) == CONSTRUCTOR); } /* Return true if T is a valid LHS for a GIMPLE assignment expression. */ bool is_gimple_lvalue (tree t) { return (is_gimple_addr_expr_arg (t) || TREE_CODE (t) == INDIRECT_REF /* These are complex lvalues, but don't have addresses, so they go here. */ || TREE_CODE (t) == BIT_FIELD_REF); } /* Return true if T is a GIMPLE condition. */ bool is_gimple_condexpr (tree t) { return (is_gimple_val (t) || TREE_CODE_CLASS (TREE_CODE (t)) == '<'); } /* Return true if T is a valid operand for ADDR_EXPR. */ bool is_gimple_addr_expr_arg (tree t) { return (is_gimple_id (t) || TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF || TREE_CODE (t) == COMPONENT_REF || TREE_CODE (t) == REALPART_EXPR || TREE_CODE (t) == IMAGPART_EXPR || TREE_CODE (t) == INDIRECT_REF); } /* Return true if T is function invariant. Or rather a restricted form of function invariant. */ bool is_gimple_min_invariant (tree t) { switch (TREE_CODE (t)) { case ADDR_EXPR: return TREE_INVARIANT (t); case INTEGER_CST: case REAL_CST: case STRING_CST: case COMPLEX_CST: case VECTOR_CST: return !TREE_OVERFLOW (t); default: return false; } } /* Return true if T looks like a valid GIMPLE statement. */ bool is_gimple_stmt (tree t) { enum tree_code code = TREE_CODE (t); if (IS_EMPTY_STMT (t)) return 1; switch (code) { case BIND_EXPR: case COND_EXPR: /* These are only valid if they're void. */ return TREE_TYPE (t) == NULL || VOID_TYPE_P (TREE_TYPE (t)); case SWITCH_EXPR: case GOTO_EXPR: case RETURN_EXPR: case LABEL_EXPR: case CASE_LABEL_EXPR: case TRY_CATCH_EXPR: case TRY_FINALLY_EXPR: case EH_FILTER_EXPR: case CATCH_EXPR: case ASM_EXPR: case RESX_EXPR: case PHI_NODE: case STATEMENT_LIST: /* These are always void. */ return true; case VA_ARG_EXPR: /* FIXME this should be lowered. */ return true; case CALL_EXPR: case MODIFY_EXPR: /* These are valid regardless of their type. */ return true; default: return false; } } /* Return true if T is a variable. */ bool is_gimple_variable (tree t) { return (TREE_CODE (t) == VAR_DECL || TREE_CODE (t) == PARM_DECL || TREE_CODE (t) == RESULT_DECL || TREE_CODE (t) == SSA_NAME); } /* Return true if T is a GIMPLE identifier (something with an address). */ static inline bool is_gimple_id (tree t) { return (is_gimple_variable (t) || TREE_CODE (t) == FUNCTION_DECL || TREE_CODE (t) == LABEL_DECL /* Allow string constants, since they are addressable. */ || TREE_CODE (t) == STRING_CST); } /* Return true if TYPE is a suitable type for a scalar register variable. */ bool is_gimple_reg_type (tree type) { return (!AGGREGATE_TYPE_P (type) && TREE_CODE (type) != COMPLEX_TYPE); } /* Return true if T is a scalar register variable. */ bool is_gimple_reg (tree t) { if (TREE_CODE (t) == SSA_NAME) t = SSA_NAME_VAR (t); return (is_gimple_variable (t) && is_gimple_reg_type (TREE_TYPE (t)) /* A volatile decl is not acceptable because we can't reuse it as needed. We need to copy it into a temp first. */ && ! TREE_THIS_VOLATILE (t) && ! TREE_ADDRESSABLE (t) && ! needs_to_live_in_memory (t)); } /* Return true if T is a GIMPLE variable whose address is not needed. */ bool is_gimple_non_addressable (tree t) { if (TREE_CODE (t) == SSA_NAME) t = SSA_NAME_VAR (t); return (is_gimple_variable (t) && ! TREE_ADDRESSABLE (t) && ! needs_to_live_in_memory (t)); } /* Return true if T is a GIMPLE rvalue, i.e. an identifier or a constant. */ bool is_gimple_val (tree t) { /* Make loads from volatiles and memory vars explicit. */ if (is_gimple_variable (t) && is_gimple_reg_type (TREE_TYPE (t)) && !is_gimple_reg (t)) return false; /* FIXME make these decls. That can happen only when we expose the entire landing-pad construct at the tree level. */ if (TREE_CODE (t) == EXC_PTR_EXPR || TREE_CODE (t) == FILTER_EXPR) return 1; return (is_gimple_variable (t) || is_gimple_min_invariant (t)); } /* Return true if T is a GIMPLE minimal lvalue. */ bool is_gimple_min_lval (tree t) { return (is_gimple_id (t) || TREE_CODE (t) == INDIRECT_REF); } /* Return true if T is a typecast operation. */ bool is_gimple_cast (tree t) { return (TREE_CODE (t) == NOP_EXPR || TREE_CODE (t) == CONVERT_EXPR || TREE_CODE (t) == FIX_TRUNC_EXPR || TREE_CODE (t) == FIX_CEIL_EXPR || TREE_CODE (t) == FIX_FLOOR_EXPR || TREE_CODE (t) == FIX_ROUND_EXPR); } /* Return true if T is a valid op0 of a CALL_EXPR. */ bool is_gimple_call_addr (tree t) { return (TREE_CODE (t) == OBJ_TYPE_REF || is_gimple_val (t)); } /* If T makes a function call, return the corresponding CALL_EXPR operand. Otherwise, return NULL_TREE. */ tree get_call_expr_in (tree t) { if (TREE_CODE (t) == MODIFY_EXPR) t = TREE_OPERAND (t, 1); if (TREE_CODE (t) == CALL_EXPR) return t; return NULL_TREE; } /* Given a memory reference expression, return the base address. Note that, in contrast with get_base_var, this will not recurse inside INDIRECT_REF expressions. Therefore, given the reference PTR->FIELD, this function will return *PTR. Whereas get_base_var would've returned PTR. */ tree get_base_address (tree t) { while (TREE_CODE (t) == REALPART_EXPR || TREE_CODE (t) == IMAGPART_EXPR || handled_component_p (t)) t = TREE_OPERAND (t, 0); if (SSA_VAR_P (t) || TREE_CODE (t) == STRING_CST || TREE_CODE (t) == CONSTRUCTOR || TREE_CODE (t) == INDIRECT_REF) return t; else return NULL_TREE; } void recalculate_side_effects (tree t) { enum tree_code code = TREE_CODE (t); int fro = first_rtl_op (code); int i; switch (TREE_CODE_CLASS (code)) { case 'e': switch (code) { case INIT_EXPR: case MODIFY_EXPR: case VA_ARG_EXPR: case PREDECREMENT_EXPR: case PREINCREMENT_EXPR: case POSTDECREMENT_EXPR: case POSTINCREMENT_EXPR: /* All of these have side-effects, no matter what their operands are. */ return; default: break; } /* Fall through. */ case '<': /* a comparison expression */ case '1': /* a unary arithmetic expression */ case '2': /* a binary arithmetic expression */ case 'r': /* a reference */ TREE_SIDE_EFFECTS (t) = TREE_THIS_VOLATILE (t); for (i = 0; i < fro; ++i) { tree op = TREE_OPERAND (t, i); if (op && TREE_SIDE_EFFECTS (op)) TREE_SIDE_EFFECTS (t) = 1; } break; } } /* Tree based linear points-to analysis Copyright (C) 2002, 2003 Free Software Foundation, Inc. Contributed by Daniel Berlin This file is part of GCC. GCC is free software; you can redistribute it and/or modify under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ alias_var alias_var_new_with_aterm (tree decl, struct aterm_ *term) { alias_var ret = ggc_alloc (sizeof (struct alias_var_aterm)); ALIAS_VAR_KIND (ret) = ATERM_AVAR; ALIAS_VAR_DECL (ret) = decl; ALIAS_VAR_ATERM (ret) = term; return ret; } /* Tree lowering pass. This pass converts the GENERIC functions-as-trees tree representation into the GIMPLE form. Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc. Major work done by Sebastian Pop , Diego Novillo and Jason Merrill . This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Default macros to initialize the lang_hooks data structure. Copyright 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Alexandre Oliva This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_LANG_HOOKS_DEF_H #define GCC_LANG_HOOKS_DEF_H /* General-purpose hooks. Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. In other words, you are welcome to use, share and improve this program. You are forbidden to forbid anyone else to use, share and improve what you give them. Help stamp out software-hoarding! */ #ifndef GCC_HOOKS_H #define GCC_HOOKS_H extern bool hook_bool_void_false (void); extern bool hook_bool_bool_false (bool); extern bool hook_bool_tree_false (tree); extern bool hook_bool_tree_true (tree); extern bool hook_bool_tree_hwi_hwi_tree_false (tree, HOST_WIDE_INT, HOST_WIDE_INT, tree); extern bool hook_bool_tree_hwi_hwi_tree_true (tree, HOST_WIDE_INT, HOST_WIDE_INT, tree); extern bool hook_bool_rtx_false (rtx); extern bool hook_bool_uintp_uintp_false (unsigned int *, unsigned int *); extern bool hook_bool_rtx_int_int_intp_false (rtx, int, int, int *); extern bool hook_bool_constcharptr_size_t_false (const char *, size_t); extern void hook_void_void (void); extern void hook_void_int (int); extern void hook_void_charptr (char *); extern void hook_void_FILEptr_constcharptr (FILE *, const char *); extern void hook_void_tree (tree); extern void hook_void_tree_treeptr (tree, tree *); extern int hook_int_tree_tree_1 (tree, tree); extern int hook_int_rtx_0 (rtx); extern int hook_int_size_t_constcharptr_int_0 (size_t, const char *, int); extern int hook_int_void_no_regs (void); extern int hook_int_void_1 (void); extern unsigned hook_uint_uint_constcharptrptr_0 (unsigned, const char **); extern bool default_can_output_mi_thunk_no_vcall (tree, HOST_WIDE_INT, HOST_WIDE_INT, tree); extern bool hook_bool_tree_tree_false (tree, tree); extern rtx hook_rtx_rtx_identity (rtx); extern rtx hook_rtx_rtx_null (rtx); extern rtx hook_rtx_tree_int_null (tree, int); extern tree hook_tree_tree_identity (tree a); extern const char *hook_constcharptr_tree_null (tree); #endif struct diagnostic_context; /* Provide a hook routine for alias sets that always returns 1. This is used by languages that haven't deal with alias sets yet. */ extern HOST_WIDE_INT hook_get_alias_set_0 (tree); /* Note to creators of new hooks: The macros in this file should NOT be surrounded by a #ifdef...#endif pair, since this file declares the defaults. Each front end overrides any hooks it wishes to, in the file containing its struct lang_hooks, AFTER including this file. */ /* See langhooks.h for the definition and documentation of each hook. */ extern void lhd_do_nothing (void); extern void lhd_do_nothing_t (tree); extern void lhd_do_nothing_i (int); extern void lhd_do_nothing_f (struct function *); extern bool lhd_post_options (const char **); extern HOST_WIDE_INT lhd_get_alias_set (tree); extern tree lhd_return_tree (tree); extern tree lhd_return_null_tree_v (void); extern tree lhd_return_null_tree (tree); extern tree lhd_do_nothing_iii_return_null_tree (int, int, int); extern int lhd_safe_from_p (rtx, tree); extern int lhd_staticp (tree); extern int lhd_unsafe_for_reeval (tree); extern void lhd_clear_binding_stack (void); extern void lhd_print_tree_nothing (FILE *, tree, int); extern const char *lhd_decl_printable_name (tree, int); extern int lhd_types_compatible_p (tree, tree); extern rtx lhd_expand_expr (tree, rtx, enum machine_mode, int, rtx *); extern int lhd_expand_decl (tree); extern void lhd_print_error_function (struct diagnostic_context *, const char *); extern void lhd_set_decl_assembler_name (tree); extern bool lhd_can_use_bit_fields_p (void); extern bool lhd_warn_unused_global_decl (tree); extern void lhd_incomplete_type_error (tree, tree); extern tree lhd_type_promotes_to (tree); extern void lhd_register_builtin_type (tree, const char *); extern bool lhd_decl_ok_for_sibcall (tree); extern tree lhd_expr_size (tree); extern bool lhd_decl_uninit (tree); extern tree lhd_get_callee_fndecl (tree); extern size_t lhd_tree_size (enum tree_code); /* Declarations of default tree inlining hooks. */ extern tree lhd_tree_inlining_walk_subtrees (tree *, int *, walk_tree_fn, void *, void *); extern int lhd_tree_inlining_cannot_inline_tree_fn (tree *); extern int lhd_tree_inlining_disregard_inline_limits (tree); extern tree lhd_tree_inlining_add_pending_fn_decls (void *, tree); extern int lhd_tree_inlining_auto_var_in_fn_p (tree, tree); extern tree lhd_tree_inlining_copy_res_decl_for_inlining (tree, tree, tree, void *, int *, tree); extern int lhd_tree_inlining_anon_aggr_type_p (tree); extern int lhd_tree_inlining_start_inlining (tree); extern void lhd_tree_inlining_end_inlining (tree); extern tree lhd_tree_inlining_convert_parm_for_inlining (tree, tree, tree, int); extern void lhd_initialize_diagnostics (struct diagnostic_context *); extern tree lhd_callgraph_analyze_expr (tree *, int *, tree); /* Declarations for tree gimplification hooks. */ extern int lhd_gimplify_expr (tree *, tree *, tree *); #define LANG_HOOKS_NAME "GNU unknown" #define LANG_HOOKS_IDENTIFIER_SIZE sizeof (struct lang_identifier) #define LANG_HOOKS_INIT hook_bool_void_false #define LANG_HOOKS_FINISH lhd_do_nothing #define LANG_HOOKS_PARSE_FILE lhd_do_nothing_i #define LANG_HOOKS_CLEAR_BINDING_STACK lhd_clear_binding_stack #define LANG_HOOKS_INIT_OPTIONS hook_uint_uint_constcharptrptr_0 #define LANG_HOOKS_INITIALIZE_DIAGNOSTICS lhd_initialize_diagnostics #define LANG_HOOKS_HANDLE_OPTION hook_int_size_t_constcharptr_int_0 #define LANG_HOOKS_MISSING_ARGUMENT hook_bool_constcharptr_size_t_false #define LANG_HOOKS_POST_OPTIONS lhd_post_options #define LANG_HOOKS_GET_ALIAS_SET lhd_get_alias_set #define LANG_HOOKS_EXPAND_CONSTANT lhd_return_tree #define LANG_HOOKS_EXPAND_EXPR lhd_expand_expr #define LANG_HOOKS_EXPAND_DECL lhd_expand_decl #define LANG_HOOKS_SAFE_FROM_P lhd_safe_from_p #define LANG_HOOKS_FINISH_INCOMPLETE_DECL lhd_do_nothing_t #define LANG_HOOKS_UNSAFE_FOR_REEVAL lhd_unsafe_for_reeval #define LANG_HOOKS_STATICP lhd_staticp #define LANG_HOOKS_DUP_LANG_SPECIFIC_DECL lhd_do_nothing_t #define LANG_HOOKS_UNSAVE_EXPR_NOW lhd_unsave_expr_now #define LANG_HOOKS_MAYBE_BUILD_CLEANUP lhd_return_null_tree #define LANG_HOOKS_SET_DECL_ASSEMBLER_NAME lhd_set_decl_assembler_name #define LANG_HOOKS_CAN_USE_BIT_FIELDS_P lhd_can_use_bit_fields_p #define LANG_HOOKS_HONOR_READONLY false #define LANG_HOOKS_NO_BODY_BLOCKS false #define LANG_HOOKS_PRINT_STATISTICS lhd_do_nothing #define LANG_HOOKS_PRINT_XNODE lhd_print_tree_nothing #define LANG_HOOKS_PRINT_DECL lhd_print_tree_nothing #define LANG_HOOKS_PRINT_TYPE lhd_print_tree_nothing #define LANG_HOOKS_PRINT_IDENTIFIER lhd_print_tree_nothing #define LANG_HOOKS_PRINT_ERROR_FUNCTION lhd_print_error_function #define LANG_HOOKS_DECL_PRINTABLE_NAME lhd_decl_printable_name #define LANG_HOOKS_GET_CALLEE_FNDECL lhd_return_null_tree #define LANG_HOOKS_EXPR_SIZE lhd_expr_size #define LANG_HOOKS_TREE_SIZE lhd_tree_size #define LANG_HOOKS_TYPES_COMPATIBLE_P lhd_types_compatible_p #define LANG_HOOKS_UPDATE_DECL_AFTER_SAVING NULL #define LANG_HOOKS_FUNCTION_INIT lhd_do_nothing_f #define LANG_HOOKS_FUNCTION_FINAL lhd_do_nothing_f #define LANG_HOOKS_FUNCTION_ENTER_NESTED lhd_do_nothing_f #define LANG_HOOKS_FUNCTION_LEAVE_NESTED lhd_do_nothing_f #define LANG_HOOKS_FUNCTION_MISSING_NORETURN_OK_P hook_bool_tree_true /* Attribute hooks. */ #define LANG_HOOKS_ATTRIBUTE_TABLE NULL #define LANG_HOOKS_COMMON_ATTRIBUTE_TABLE NULL #define LANG_HOOKS_FORMAT_ATTRIBUTE_TABLE NULL /* Tree inlining hooks. */ #define LANG_HOOKS_TREE_INLINING_WALK_SUBTREES lhd_tree_inlining_walk_subtrees #define LANG_HOOKS_TREE_INLINING_CANNOT_INLINE_TREE_FN \ lhd_tree_inlining_cannot_inline_tree_fn #define LANG_HOOKS_TREE_INLINING_DISREGARD_INLINE_LIMITS \ lhd_tree_inlining_disregard_inline_limits #define LANG_HOOKS_TREE_INLINING_ADD_PENDING_FN_DECLS \ lhd_tree_inlining_add_pending_fn_decls #define LANG_HOOKS_TREE_INLINING_AUTO_VAR_IN_FN_P \ lhd_tree_inlining_auto_var_in_fn_p #define LANG_HOOKS_TREE_INLINING_COPY_RES_DECL_FOR_INLINING \ lhd_tree_inlining_copy_res_decl_for_inlining #define LANG_HOOKS_TREE_INLINING_ANON_AGGR_TYPE_P \ lhd_tree_inlining_anon_aggr_type_p #define LANG_HOOKS_TREE_INLINING_VAR_MOD_TYPE_P \ hook_bool_tree_tree_false #define LANG_HOOKS_TREE_INLINING_START_INLINING \ lhd_tree_inlining_start_inlining #define LANG_HOOKS_TREE_INLINING_END_INLINING \ lhd_tree_inlining_end_inlining #define LANG_HOOKS_TREE_INLINING_CONVERT_PARM_FOR_INLINING \ lhd_tree_inlining_convert_parm_for_inlining #define LANG_HOOKS_TREE_INLINING_ESTIMATE_NUM_INSNS \ NULL #define LANG_HOOKS_TREE_INLINING_INITIALIZER { \ LANG_HOOKS_TREE_INLINING_WALK_SUBTREES, \ LANG_HOOKS_TREE_INLINING_CANNOT_INLINE_TREE_FN, \ LANG_HOOKS_TREE_INLINING_DISREGARD_INLINE_LIMITS, \ LANG_HOOKS_TREE_INLINING_ADD_PENDING_FN_DECLS, \ LANG_HOOKS_TREE_INLINING_AUTO_VAR_IN_FN_P, \ LANG_HOOKS_TREE_INLINING_COPY_RES_DECL_FOR_INLINING, \ LANG_HOOKS_TREE_INLINING_ANON_AGGR_TYPE_P, \ LANG_HOOKS_TREE_INLINING_VAR_MOD_TYPE_P, \ LANG_HOOKS_TREE_INLINING_START_INLINING, \ LANG_HOOKS_TREE_INLINING_END_INLINING, \ LANG_HOOKS_TREE_INLINING_CONVERT_PARM_FOR_INLINING, \ LANG_HOOKS_TREE_INLINING_ESTIMATE_NUM_INSNS \ } #define LANG_HOOKS_CALLGRAPH_ANALYZE_EXPR lhd_callgraph_analyze_expr #define LANG_HOOKS_CALLGRAPH_EXPAND_FUNCTION NULL #define LANG_HOOKS_CALLGRAPH_INITIALIZER { \ LANG_HOOKS_CALLGRAPH_ANALYZE_EXPR, \ LANG_HOOKS_CALLGRAPH_EXPAND_FUNCTION, \ } #define LANG_HOOKS_FUNCTION_INITIALIZER { \ LANG_HOOKS_FUNCTION_INIT, \ LANG_HOOKS_FUNCTION_FINAL, \ LANG_HOOKS_FUNCTION_ENTER_NESTED, \ LANG_HOOKS_FUNCTION_LEAVE_NESTED, \ LANG_HOOKS_FUNCTION_MISSING_NORETURN_OK_P \ } /* Hooks for tree gimplification. */ #define LANG_HOOKS_GIMPLIFY_EXPR lhd_gimplify_expr #define LANG_HOOKS_FOLD_OBJ_TYPE_REF NULL #define LANG_HOOKS_GIMPLE_BEFORE_INLINING true /* Tree dump hooks. */ extern bool lhd_tree_dump_dump_tree (void *, tree); extern int lhd_tree_dump_type_quals (tree); extern tree lhd_make_node (enum tree_code); #define LANG_HOOKS_TREE_DUMP_DUMP_TREE_FN lhd_tree_dump_dump_tree #define LANG_HOOKS_TREE_DUMP_TYPE_QUALS_FN lhd_tree_dump_type_quals #define LANG_HOOKS_TREE_DUMP_INITIALIZER { \ LANG_HOOKS_TREE_DUMP_DUMP_TREE_FN, \ LANG_HOOKS_TREE_DUMP_TYPE_QUALS_FN \ } /* Types hooks. There are no reasonable defaults for most of them, so we create a compile-time error instead. */ #define LANG_HOOKS_MAKE_TYPE lhd_make_node #define LANG_HOOKS_INCOMPLETE_TYPE_ERROR lhd_incomplete_type_error #define LANG_HOOKS_TYPE_PROMOTES_TO lhd_type_promotes_to #define LANG_HOOKS_REGISTER_BUILTIN_TYPE lhd_register_builtin_type #define LANG_HOOKS_TYPE_MAX_SIZE lhd_return_null_tree #define LANG_HOOKS_HASH_TYPES true #define LANG_HOOKS_FOR_TYPES_INITIALIZER { \ LANG_HOOKS_MAKE_TYPE, \ LANG_HOOKS_TYPE_FOR_MODE, \ LANG_HOOKS_TYPE_FOR_SIZE, \ LANG_HOOKS_UNSIGNED_TYPE, \ LANG_HOOKS_SIGNED_TYPE, \ LANG_HOOKS_SIGNED_OR_UNSIGNED_TYPE, \ LANG_HOOKS_TYPE_PROMOTES_TO, \ LANG_HOOKS_REGISTER_BUILTIN_TYPE, \ LANG_HOOKS_INCOMPLETE_TYPE_ERROR, \ LANG_HOOKS_TYPE_MAX_SIZE, \ LANG_HOOKS_HASH_TYPES \ } /* Declaration hooks. */ #define LANG_HOOKS_PUSHLEVEL pushlevel #define LANG_HOOKS_POPLEVEL poplevel #define LANG_HOOKS_GLOBAL_BINDINGS_P global_bindings_p #define LANG_HOOKS_INSERT_BLOCK insert_block #define LANG_HOOKS_SET_BLOCK set_block #define LANG_HOOKS_PUSHDECL pushdecl #define LANG_HOOKS_GETDECLS getdecls #define LANG_HOOKS_WARN_UNUSED_GLOBAL_DECL lhd_warn_unused_global_decl #define LANG_HOOKS_WRITE_GLOBALS write_global_declarations #define LANG_HOOKS_PREPARE_ASSEMBLE_VARIABLE NULL #define LANG_HOOKS_DECL_OK_FOR_SIBCALL lhd_decl_ok_for_sibcall #define LANG_HOOKS_DECLS { \ LANG_HOOKS_PUSHLEVEL, \ LANG_HOOKS_POPLEVEL, \ LANG_HOOKS_GLOBAL_BINDINGS_P, \ LANG_HOOKS_INSERT_BLOCK, \ LANG_HOOKS_SET_BLOCK, \ LANG_HOOKS_PUSHDECL, \ LANG_HOOKS_GETDECLS, \ LANG_HOOKS_WARN_UNUSED_GLOBAL_DECL, \ LANG_HOOKS_WRITE_GLOBALS, \ LANG_HOOKS_PREPARE_ASSEMBLE_VARIABLE, \ LANG_HOOKS_DECL_OK_FOR_SIBCALL, \ } /* The whole thing. The structure is defined in langhooks.h. */ #define LANG_HOOKS_INITIALIZER { \ LANG_HOOKS_NAME, \ LANG_HOOKS_IDENTIFIER_SIZE, \ LANG_HOOKS_TREE_SIZE, \ LANG_HOOKS_INIT_OPTIONS, \ LANG_HOOKS_INITIALIZE_DIAGNOSTICS, \ LANG_HOOKS_HANDLE_OPTION, \ LANG_HOOKS_MISSING_ARGUMENT, \ LANG_HOOKS_POST_OPTIONS, \ LANG_HOOKS_INIT, \ LANG_HOOKS_FINISH, \ LANG_HOOKS_PARSE_FILE, \ LANG_HOOKS_CLEAR_BINDING_STACK, \ LANG_HOOKS_GET_ALIAS_SET, \ LANG_HOOKS_EXPAND_CONSTANT, \ LANG_HOOKS_EXPAND_EXPR, \ LANG_HOOKS_EXPAND_DECL, \ LANG_HOOKS_TRUTHVALUE_CONVERSION, \ LANG_HOOKS_SAFE_FROM_P, \ LANG_HOOKS_FINISH_INCOMPLETE_DECL, \ LANG_HOOKS_UNSAFE_FOR_REEVAL, \ LANG_HOOKS_MARK_ADDRESSABLE, \ LANG_HOOKS_STATICP, \ LANG_HOOKS_DUP_LANG_SPECIFIC_DECL, \ LANG_HOOKS_UNSAVE_EXPR_NOW, \ LANG_HOOKS_MAYBE_BUILD_CLEANUP, \ LANG_HOOKS_SET_DECL_ASSEMBLER_NAME, \ LANG_HOOKS_CAN_USE_BIT_FIELDS_P, \ LANG_HOOKS_HONOR_READONLY, \ LANG_HOOKS_NO_BODY_BLOCKS, \ LANG_HOOKS_PRINT_STATISTICS, \ LANG_HOOKS_PRINT_XNODE, \ LANG_HOOKS_PRINT_DECL, \ LANG_HOOKS_PRINT_TYPE, \ LANG_HOOKS_PRINT_IDENTIFIER, \ LANG_HOOKS_DECL_PRINTABLE_NAME, \ LANG_HOOKS_TYPES_COMPATIBLE_P, \ LANG_HOOKS_GET_CALLEE_FNDECL, \ LANG_HOOKS_PRINT_ERROR_FUNCTION, \ LANG_HOOKS_EXPR_SIZE, \ LANG_HOOKS_UPDATE_DECL_AFTER_SAVING, \ LANG_HOOKS_ATTRIBUTE_TABLE, \ LANG_HOOKS_COMMON_ATTRIBUTE_TABLE, \ LANG_HOOKS_FORMAT_ATTRIBUTE_TABLE, \ LANG_HOOKS_FUNCTION_INITIALIZER, \ LANG_HOOKS_TREE_INLINING_INITIALIZER, \ LANG_HOOKS_CALLGRAPH_INITIALIZER, \ LANG_HOOKS_TREE_DUMP_INITIALIZER, \ LANG_HOOKS_DECLS, \ LANG_HOOKS_FOR_TYPES_INITIALIZER, \ LANG_HOOKS_GIMPLIFY_EXPR, \ LANG_HOOKS_FOLD_OBJ_TYPE_REF, \ LANG_HOOKS_GIMPLE_BEFORE_INLINING \ } #endif /* GCC_LANG_HOOKS_DEF_H */ /* Definitions of floating-point access for GNU compiler. Copyright (C) 1989, 1991, 1994, 1996, 1997, 1998, 1999, 2000, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_REAL_H #define GCC_REAL_H /* An expanded form of the represented number. */ /* Enumerate the special cases of numbers that we encounter. */ enum real_value_class { rvc_zero, rvc_normal, rvc_inf, rvc_nan }; #define SIGNIFICAND_BITS (128 + HOST_BITS_PER_LONG) #define EXP_BITS (32 - 5) #define MAX_EXP ((1 << (EXP_BITS - 1)) - 1) #define SIGSZ (SIGNIFICAND_BITS / HOST_BITS_PER_LONG) #define SIG_MSB ((unsigned long)1 << (HOST_BITS_PER_LONG - 1)) struct real_value GTY(()) { /* Use the same underlying type for all bit-fields, so as to make sure they're packed together, otherwise REAL_VALUE_TYPE_SIZE will be miscomputed. */ unsigned int /* ENUM_BITFIELD (real_value_class) */ class : 2; unsigned int sign : 1; unsigned int signalling : 1; unsigned int canonical : 1; unsigned int uexp : EXP_BITS; unsigned long sig[SIGSZ]; }; #define REAL_EXP(REAL) \ ((int)((REAL)->uexp ^ (unsigned int)(1 << (EXP_BITS - 1))) \ - (1 << (EXP_BITS - 1))) #define SET_REAL_EXP(REAL, EXP) \ ((REAL)->uexp = ((unsigned int)(EXP) & (unsigned int)((1 << EXP_BITS) - 1))) /* Various headers condition prototypes on #ifdef REAL_VALUE_TYPE, so it needs to be a macro. We do need to continue to have a structure tag so that other headers can forward declare it. */ #define REAL_VALUE_TYPE struct real_value /* We store a REAL_VALUE_TYPE into an rtx, and we do this by putting it in consecutive "w" slots. Moreover, we've got to compute the number of "w" slots at preprocessor time, which means we can't use sizeof. Guess. */ #define REAL_VALUE_TYPE_SIZE (SIGNIFICAND_BITS + 32) #define REAL_WIDTH \ (REAL_VALUE_TYPE_SIZE/HOST_BITS_PER_WIDE_INT \ + (REAL_VALUE_TYPE_SIZE%HOST_BITS_PER_WIDE_INT ? 1 : 0)) /* round up */ /* Verify the guess. */ extern char test_real_width [sizeof(REAL_VALUE_TYPE) <= REAL_WIDTH*sizeof(HOST_WIDE_INT) ? 1 : -1]; /* Calculate the format for CONST_DOUBLE. We need as many slots as are necessary to overlay a REAL_VALUE_TYPE on them. This could be as many as four (32-bit HOST_WIDE_INT, 128-bit REAL_VALUE_TYPE). A number of places assume that there are always at least two 'w' slots in a CONST_DOUBLE, so we provide them even if one would suffice. */ #if REAL_WIDTH == 1 # define CONST_DOUBLE_FORMAT "ww" #else # if REAL_WIDTH == 2 # define CONST_DOUBLE_FORMAT "ww" # else # if REAL_WIDTH == 3 # define CONST_DOUBLE_FORMAT "www" # else # if REAL_WIDTH == 4 # define CONST_DOUBLE_FORMAT "wwww" # else # if REAL_WIDTH == 5 # define CONST_DOUBLE_FORMAT "wwwww" # else # if REAL_WIDTH == 6 # define CONST_DOUBLE_FORMAT "wwwwww" # else #error "REAL_WIDTH > 6 not supported" # endif # endif # endif # endif # endif #endif /* Describes the properties of the specific target format in use. */ struct real_format { /* Move to and from the target bytes. */ void (*encode) (const struct real_format *, long *, const REAL_VALUE_TYPE *); void (*decode) (const struct real_format *, REAL_VALUE_TYPE *, const long *); /* The radix of the exponent and digits of the significand. */ int b; /* log2(b). */ int log2_b; /* Size of the significand in digits of radix B. */ int p; /* Size of the significant of a NaN, in digits of radix B. */ int pnan; /* The minimum negative integer, x, such that b**(x-1) is normalized. */ int emin; /* The maximum integer, x, such that b**(x-1) is representable. */ int emax; /* The bit position of the sign bit, or -1 for a complex encoding. */ int signbit; /* Properties of the format. */ bool has_nans; bool has_inf; bool has_denorm; bool has_signed_zero; bool qnan_msb_set; }; /* The target format used for each floating floating point mode. Indexed by MODE - QFmode. */ extern const struct real_format * real_format_for_mode[MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1]; #define REAL_MODE_FORMAT(MODE) (real_format_for_mode[(MODE) - MIN_MODE_FLOAT]) /* Declare functions in real.c. */ /* Binary or unary arithmetic on tree_code. */ extern void real_arithmetic (REAL_VALUE_TYPE *, int, const REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *); /* Compare reals by tree_code. */ extern bool real_compare (int, const REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *); /* Determine whether a floating-point value X is infinite. */ extern bool real_isinf (const REAL_VALUE_TYPE *); /* Determine whether a floating-point value X is a NaN. */ extern bool real_isnan (const REAL_VALUE_TYPE *); /* Determine whether a floating-point value X is negative. */ extern bool real_isneg (const REAL_VALUE_TYPE *); /* Determine whether a floating-point value X is minus zero. */ extern bool real_isnegzero (const REAL_VALUE_TYPE *); /* Compare two floating-point objects for bitwise identity. */ extern bool real_identical (const REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *); /* Extend or truncate to a new mode. */ extern void real_convert (REAL_VALUE_TYPE *, enum machine_mode, const REAL_VALUE_TYPE *); /* Return true if truncating to NEW is exact. */ extern bool exact_real_truncate (enum machine_mode, const REAL_VALUE_TYPE *); /* Render R as a decimal floating point constant. */ extern void real_to_decimal (char *, const REAL_VALUE_TYPE *, size_t, size_t, int); /* Render R as a hexadecimal floating point constant. */ extern void real_to_hexadecimal (char *, const REAL_VALUE_TYPE *, size_t, size_t, int); /* Render R as an integer. */ extern HOST_WIDE_INT real_to_integer (const REAL_VALUE_TYPE *); extern void real_to_integer2 (HOST_WIDE_INT *, HOST_WIDE_INT *, const REAL_VALUE_TYPE *); /* Initialize R from a decimal or hexadecimal string. */ extern void real_from_string (REAL_VALUE_TYPE *, const char *); /* Initialize R from an integer pair HIGH/LOW. */ extern void real_from_integer (REAL_VALUE_TYPE *, enum machine_mode, unsigned HOST_WIDE_INT, HOST_WIDE_INT, int); extern long real_to_target_fmt (long *, const REAL_VALUE_TYPE *, const struct real_format *); extern long real_to_target (long *, const REAL_VALUE_TYPE *, enum machine_mode); extern void real_from_target_fmt (REAL_VALUE_TYPE *, const long *, const struct real_format *); extern void real_from_target (REAL_VALUE_TYPE *, const long *, enum machine_mode); extern void real_inf (REAL_VALUE_TYPE *); extern bool real_nan (REAL_VALUE_TYPE *, const char *, int, enum machine_mode); extern void real_maxval (REAL_VALUE_TYPE *, int, enum machine_mode); extern void real_2expN (REAL_VALUE_TYPE *, int); extern unsigned int real_hash (const REAL_VALUE_TYPE *); /* Target formats defined in real.c. */ extern const struct real_format ieee_single_format; extern const struct real_format mips_single_format; extern const struct real_format ieee_double_format; extern const struct real_format mips_double_format; extern const struct real_format ieee_extended_motorola_format; extern const struct real_format ieee_extended_intel_96_format; extern const struct real_format ieee_extended_intel_96_round_53_format; extern const struct real_format ieee_extended_intel_128_format; extern const struct real_format ibm_extended_format; extern const struct real_format mips_extended_format; extern const struct real_format ieee_quad_format; extern const struct real_format mips_quad_format; extern const struct real_format vax_f_format; extern const struct real_format vax_d_format; extern const struct real_format vax_g_format; extern const struct real_format i370_single_format; extern const struct real_format i370_double_format; extern const struct real_format c4x_single_format; extern const struct real_format c4x_extended_format; extern const struct real_format real_internal_format; /* ====================================================================== */ /* Crap. */ #define REAL_ARITHMETIC(value, code, d1, d2) \ real_arithmetic (&(value), code, &(d1), &(d2)) #define REAL_VALUES_IDENTICAL(x, y) real_identical (&(x), &(y)) #define REAL_VALUES_EQUAL(x, y) real_compare (EQ_EXPR, &(x), &(y)) #define REAL_VALUES_LESS(x, y) real_compare (LT_EXPR, &(x), &(y)) /* Determine whether a floating-point value X is infinite. */ #define REAL_VALUE_ISINF(x) real_isinf (&(x)) /* Determine whether a floating-point value X is a NaN. */ #define REAL_VALUE_ISNAN(x) real_isnan (&(x)) /* Determine whether a floating-point value X is negative. */ #define REAL_VALUE_NEGATIVE(x) real_isneg (&(x)) /* Determine whether a floating-point value X is minus zero. */ #define REAL_VALUE_MINUS_ZERO(x) real_isnegzero (&(x)) /* IN is a REAL_VALUE_TYPE. OUT is an array of longs. */ #define REAL_VALUE_TO_TARGET_LONG_DOUBLE(IN, OUT) \ real_to_target (OUT, &(IN), \ mode_for_size (LONG_DOUBLE_TYPE_SIZE, MODE_FLOAT, 0)) #define REAL_VALUE_TO_TARGET_DOUBLE(IN, OUT) \ real_to_target (OUT, &(IN), mode_for_size (64, MODE_FLOAT, 0)) /* IN is a REAL_VALUE_TYPE. OUT is a long. */ #define REAL_VALUE_TO_TARGET_SINGLE(IN, OUT) \ ((OUT) = real_to_target (NULL, &(IN), mode_for_size (32, MODE_FLOAT, 0))) #define REAL_VALUE_FROM_INT(r, lo, hi, mode) \ real_from_integer (&(r), mode, lo, hi, 0) #define REAL_VALUE_FROM_UNSIGNED_INT(r, lo, hi, mode) \ real_from_integer (&(r), mode, lo, hi, 1) extern REAL_VALUE_TYPE real_value_truncate (enum machine_mode, REAL_VALUE_TYPE); #define REAL_VALUE_TO_INT(plow, phigh, r) \ real_to_integer2 (plow, phigh, &(r)) extern REAL_VALUE_TYPE real_arithmetic2 (int, const REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *); #define REAL_VALUE_NEGATE(X) \ real_arithmetic2 (NEGATE_EXPR, &(X), NULL) #define REAL_VALUE_ABS(X) \ real_arithmetic2 (ABS_EXPR, &(X), NULL) extern int significand_size (enum machine_mode); extern REAL_VALUE_TYPE real_from_string2 (const char *, enum machine_mode); #define REAL_VALUE_ATOF(s, m) \ real_from_string2 (s, m) #define CONST_DOUBLE_ATOF(s, m) \ CONST_DOUBLE_FROM_REAL_VALUE (real_from_string2 (s, m), m) #define REAL_VALUE_FIX(r) \ real_to_integer (&(r)) /* ??? Not quite right. */ #define REAL_VALUE_UNSIGNED_FIX(r) \ real_to_integer (&(r)) /* ??? These were added for Paranoia support. */ /* Return floor log2(R). */ extern int real_exponent (const REAL_VALUE_TYPE *); /* R = A * 2**EXP. */ extern void real_ldexp (REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *, int); /* **** End of software floating point emulator interface macros **** */ /* Constant real values 0, 1, 2, 3, 10, -1, -2, 0.5 and 1/3. */ extern REAL_VALUE_TYPE dconst0; extern REAL_VALUE_TYPE dconst1; extern REAL_VALUE_TYPE dconst2; extern REAL_VALUE_TYPE dconst3; extern REAL_VALUE_TYPE dconst10; extern REAL_VALUE_TYPE dconstm1; extern REAL_VALUE_TYPE dconstm2; extern REAL_VALUE_TYPE dconsthalf; extern REAL_VALUE_TYPE dconstthird; extern REAL_VALUE_TYPE dconstpi; extern REAL_VALUE_TYPE dconste; /* Function to return a real value (not a tree node) from a given integer constant. */ REAL_VALUE_TYPE real_value_from_int_cst (tree, tree); /* Given a CONST_DOUBLE in FROM, store into TO the value it represents. */ #define REAL_VALUE_FROM_CONST_DOUBLE(to, from) \ memcpy (&(to), &CONST_DOUBLE_LOW ((from)), sizeof (REAL_VALUE_TYPE)) /* Return a CONST_DOUBLE with value R and mode M. */ #define CONST_DOUBLE_FROM_REAL_VALUE(r, m) \ const_double_from_real_value (r, m) extern rtx const_double_from_real_value (REAL_VALUE_TYPE, enum machine_mode); /* Replace R by 1/R in the given machine mode, if the result is exact. */ extern bool exact_real_inverse (enum machine_mode, REAL_VALUE_TYPE *); /* In tree.c: wrap up a REAL_VALUE_TYPE in a tree node. */ extern tree build_real (tree, REAL_VALUE_TYPE); /* Calculate R as the square root of X in the given machine mode. */ extern bool real_sqrt (REAL_VALUE_TYPE *, enum machine_mode, const REAL_VALUE_TYPE *); /* Calculate R as X raised to the integer exponent N in mode MODE. */ extern bool real_powi (REAL_VALUE_TYPE *, enum machine_mode, const REAL_VALUE_TYPE *, HOST_WIDE_INT); /* Standard round to integer value functions. */ extern void real_trunc (REAL_VALUE_TYPE *, enum machine_mode, const REAL_VALUE_TYPE *); extern void real_floor (REAL_VALUE_TYPE *, enum machine_mode, const REAL_VALUE_TYPE *); extern void real_ceil (REAL_VALUE_TYPE *, enum machine_mode, const REAL_VALUE_TYPE *); extern void real_round (REAL_VALUE_TYPE *, enum machine_mode, const REAL_VALUE_TYPE *); /* Set the sign of R to the sign of X. */ extern void real_copysign (REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *); #endif /* ! GCC_REAL_H */ /* Data structure definitions for a generic GCC target. Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. In other words, you are welcome to use, share and improve this program. You are forbidden to forbid anyone else to use, share and improve what you give them. Help stamp out software-hoarding! */ /* This file contains a data structure that describes a GCC target. At present it is incomplete, but in future it should grow to contain most or all target machine and target O/S specific information. This structure has its initializer declared in target-def.h in the form of large macro TARGET_INITIALIZER that expands to many smaller macros. The smaller macros each initialize one component of the structure, and each has a default. Each target should have a file that includes target.h and target-def.h, and overrides any inappropriate defaults by undefining the relevant macro and defining a suitable replacement. That file should then contain the definition of "targetm" like so: struct gcc_target targetm = TARGET_INITIALIZER; Doing things this way allows us to bring together everything that defines a GCC target. By supplying a default that is appropriate to most targets, we can easily add new items without needing to edit dozens of target configuration files. It should also allow us to gradually reduce the amount of conditional compilation that is scattered throughout GCC. */ #ifndef GCC_TARGET_H #define GCC_TARGET_H struct gcc_target { /* Functions that output assembler for the target. */ struct asm_out { /* Opening and closing parentheses for asm expression grouping. */ const char *open_paren, *close_paren; /* Assembler instructions for creating various kinds of integer object. */ const char *byte_op; struct asm_int_op { const char *hi; const char *si; const char *di; const char *ti; } aligned_op, unaligned_op; /* Try to output the assembler code for an integer object whose value is given by X. SIZE is the size of the object in bytes and ALIGNED_P indicates whether it is aligned. Return true if successful. Only handles cases for which BYTE_OP, ALIGNED_OP and UNALIGNED_OP are NULL. */ bool (* integer) (rtx x, unsigned int size, int aligned_p); /* Output code that will globalize a label. */ void (* globalize_label) (FILE *, const char *); /* Output code that will emit a label for unwind info, if this target requires such labels. Second argument is the decl the unwind info is associated with, third is a boolean: true if this is for exception handling, fourth is a boolean: true if this is only a placeholder for an omitted FDE. */ void (* unwind_label) (FILE *, tree, int, int); /* Output an internal label. */ void (* internal_label) (FILE *, const char *, unsigned long); /* Emit an assembler directive to set visibility for the symbol associated with the tree decl. */ void (* visibility) (tree, int); /* Output the assembler code for entry to a function. */ void (* function_prologue) (FILE *, HOST_WIDE_INT); /* Output the assembler code for end of prologue. */ void (* function_end_prologue) (FILE *); /* Output the assembler code for start of epilogue. */ void (* function_begin_epilogue) (FILE *); /* Output the assembler code for function exit. */ void (* function_epilogue) (FILE *, HOST_WIDE_INT); /* Switch to an arbitrary section NAME with attributes as specified by FLAGS. */ void (* named_section) (const char *, unsigned int); /* Switch to the section that holds the exception table. */ void (* exception_section) (void); /* Switch to the section that holds the exception frames. */ void (* eh_frame_section) (void); /* Select and switch to a section for EXP. It may be a DECL or a constant. RELOC is nonzero if runtime relocations must be applied; bit 1 will be set if the runtime relocations require non-local name resolution. ALIGN is the required alignment of the data. */ void (* select_section) (tree, int, unsigned HOST_WIDE_INT); /* Select and switch to a section for X with MODE. ALIGN is the desired alignment of the data. */ void (* select_rtx_section) (enum machine_mode, rtx, unsigned HOST_WIDE_INT); /* Select a unique section name for DECL. RELOC is the same as for SELECT_SECTION. */ void (* unique_section) (tree, int); /* Output a constructor for a symbol with a given priority. */ void (* constructor) (rtx, int); /* Output a destructor for a symbol with a given priority. */ void (* destructor) (rtx, int); /* Output the assembler code for a thunk function. THUNK_DECL is the declaration for the thunk function itself, FUNCTION is the decl for the target function. DELTA is an immediate constant offset to be added to THIS. If VCALL_OFFSET is nonzero, the word at *(*this + vcall_offset) should be added to THIS. */ void (* output_mi_thunk) (FILE *file, tree thunk_decl, HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset, tree function_decl); /* Determine whether output_mi_thunk would succeed. */ /* ??? Ideally, this hook would not exist, and success or failure would be returned from output_mi_thunk directly. But there's too much undo-able setup involved in invoking output_mi_thunk. Could be fixed by making output_mi_thunk emit rtl instead of text to the output file. */ bool (* can_output_mi_thunk) (tree thunk_decl, HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset, tree function_decl); /* Output any boilerplate text needed at the beginning of a translation unit. */ void (*file_start) (void); /* Output any boilerplate text needed at the end of a translation unit. */ void (*file_end) (void); /* Output an assembler pseudo-op to declare a library function name external. */ void (*external_libcall) (rtx); } asm_out; /* Functions relating to instruction scheduling. */ struct sched { /* Given the current cost, COST, of an insn, INSN, calculate and return a new cost based on its relationship to DEP_INSN through the dependence LINK. The default is to make no adjustment. */ int (* adjust_cost) (rtx insn, rtx link, rtx def_insn, int cost); /* Adjust the priority of an insn as you see fit. Returns the new priority. */ int (* adjust_priority) (rtx, int); /* Function which returns the maximum number of insns that can be scheduled in the same machine cycle. This must be constant over an entire compilation. The default is 1. */ int (* issue_rate) (void); /* Calculate how much this insn affects how many more insns we can emit this cycle. Default is they all cost the same. */ int (* variable_issue) (FILE *, int, rtx, int); /* Initialize machine-dependent scheduling code. */ void (* md_init) (FILE *, int, int); /* Finalize machine-dependent scheduling code. */ void (* md_finish) (FILE *, int); /* Initialize machine-dependent function while scheduling code. */ void (* md_init_global) (FILE *, int, int); /* Finalize machine-dependent function wide scheduling code. */ void (* md_finish_global) (FILE *, int); /* Reorder insns in a machine-dependent fashion, in two different places. Default does nothing. */ int (* reorder) (FILE *, int, rtx *, int *, int); int (* reorder2) (FILE *, int, rtx *, int *, int); /* The following member value is a pointer to a function called after evaluation forward dependencies of insns in chain given by two parameter values (head and tail correspondingly). */ void (* dependencies_evaluation_hook) (rtx, rtx); /* The following member value is a pointer to a function returning nonzero if we should use DFA based scheduling. The default is to use the old pipeline scheduler. */ int (* use_dfa_pipeline_interface) (void); /* The values of all the following members are used only for the DFA based scheduler: */ /* The values of the following four members are pointers to functions used to simplify the automaton descriptions. dfa_pre_cycle_insn and dfa_post_cycle_insn give functions returning insns which are used to change the pipeline hazard recognizer state when the new simulated processor cycle correspondingly starts and finishes. The function defined by init_dfa_pre_cycle_insn and init_dfa_post_cycle_insn are used to initialize the corresponding insns. The default values of the members result in not changing the automaton state when the new simulated processor cycle correspondingly starts and finishes. */ void (* init_dfa_pre_cycle_insn) (void); rtx (* dfa_pre_cycle_insn) (void); void (* init_dfa_post_cycle_insn) (void); rtx (* dfa_post_cycle_insn) (void); /* The following member value is a pointer to a function returning value which defines how many insns in queue `ready' will we try for multi-pass scheduling. If the member value is nonzero and the function returns positive value, the DFA based scheduler will make multi-pass scheduling for the first cycle. In other words, we will try to choose ready insn which permits to start maximum number of insns on the same cycle. */ int (* first_cycle_multipass_dfa_lookahead) (void); /* The following member value is pointer to a function controlling what insns from the ready insn queue will be considered for the multipass insn scheduling. If the hook returns zero for insn passed as the parameter, the insn will be not chosen to be issued. */ int (* first_cycle_multipass_dfa_lookahead_guard) (rtx); /* The following member value is pointer to a function called by the insn scheduler before issuing insn passed as the third parameter on given cycle. If the hook returns nonzero, the insn is not issued on given processors cycle. Instead of that, the processor cycle is advanced. If the value passed through the last parameter is zero, the insn ready queue is not sorted on the new cycle start as usually. The first parameter passes file for debugging output. The second one passes the scheduler verbose level of the debugging output. The forth and the fifth parameter values are correspondingly processor cycle on which the previous insn has been issued and the current processor cycle. */ int (* dfa_new_cycle) (FILE *, int, rtx, int, int, int *); /* The values of the following members are pointers to functions used to improve the first cycle multipass scheduling by inserting nop insns. dfa_scheduler_bubble gives a function returning a nop insn with given index. The indexes start with zero. The function should return NULL if there are no more nop insns with indexes greater than given index. To initialize the nop insn the function given by member init_dfa_scheduler_bubbles is used. The default values of the members result in not inserting nop insns during the multipass scheduling. */ void (* init_dfa_bubbles) (void); rtx (* dfa_bubble) (int); /* The following member value is a pointer to a function called by the insn scheduler. It should return true if there exists a dependence which is considered costly by the target, between the insn passed as the first parameter, and the insn passed as the second parameter. The third parameter is the INSN_DEPEND link that represents the dependence between the two insns. The fourth argument is the cost of the dependence as estimated by the scheduler. The last argument is the distance in cycles between the already scheduled insn (first parameter) and the the second insn (second parameter). */ bool (* is_costly_dependence) (rtx, rtx, rtx, int, int); } sched; /* Given two decls, merge their attributes and return the result. */ tree (* merge_decl_attributes) (tree, tree); /* Given two types, merge their attributes and return the result. */ tree (* merge_type_attributes) (tree, tree); /* Table of machine attributes and functions to handle them. Ignored if NULL. */ const struct attribute_spec *attribute_table; /* Return zero if the attributes on TYPE1 and TYPE2 are incompatible, one if they are compatible and two if they are nearly compatible (which causes a warning to be generated). */ int (* comp_type_attributes) (tree type1, tree type2); /* Assign default attributes to the newly defined TYPE. */ void (* set_default_type_attributes) (tree type); /* Insert attributes on the newly created DECL. */ void (* insert_attributes) (tree decl, tree *attributes); /* Return true if FNDECL (which has at least one machine attribute) can be inlined despite its machine attributes, false otherwise. */ bool (* function_attribute_inlinable_p) (tree fndecl); /* Return true if bitfields in RECORD_TYPE should follow the Microsoft Visual C++ bitfield layout rules. */ bool (* ms_bitfield_layout_p) (tree record_type); /* Return true if anonymous bitfields affect structure alignment. */ bool (* align_anon_bitfield) (void); /* Set up target-specific built-in functions. */ void (* init_builtins) (void); /* Expand a target-specific builtin. */ rtx (* expand_builtin) (tree exp, rtx target, rtx subtarget, enum machine_mode mode, int ignore); /* For a vendor-specific fundamental TYPE, return a pointer to a statically-allocated string containing the C++ mangling for TYPE. In all other cases, return NULL. */ const char * (* mangle_fundamental_type) (tree type); /* Make any adjustments to libfunc names needed for this target. */ void (* init_libfuncs) (void); /* Given a decl, a section name, and whether the decl initializer has relocs, choose attributes for the section. */ /* ??? Should be merged with SELECT_SECTION and UNIQUE_SECTION. */ unsigned int (* section_type_flags) (tree, const char *, int); /* True if new jumps cannot be created, to replace existing ones or not, at the current point in the compilation. */ bool (* cannot_modify_jumps_p) (void); /* Return a register class for which branch target register optimizations should be applied. */ int (* branch_target_register_class) (void); /* Return true if branch target register optimizations should include callee-saved registers that are not already live during the current function. AFTER_PE_GEN is true if prologues and epilogues have already been generated. */ bool (* branch_target_register_callee_saved) (bool after_pe_gen); /* True if the constant X cannot be placed in the constant pool. */ bool (* cannot_force_const_mem) (rtx); /* True if the insn X cannot be duplicated. */ bool (* cannot_copy_insn_p) (rtx); /* Given an address RTX, undo the effects of LEGITIMIZE_ADDRESS. */ rtx (* delegitimize_address) (rtx); /* True if it is OK to do sibling call optimization for the specified call expression EXP. DECL will be the called function, or NULL if this is an indirect call. */ bool (*function_ok_for_sibcall) (tree decl, tree exp); /* True if EXP should be placed in a "small data" section. */ bool (* in_small_data_p) (tree); /* True if EXP names an object for which name resolution must resolve to the current module. */ bool (* binds_local_p) (tree); /* Do something target-specific to record properties of the DECL into the associated SYMBOL_REF. */ void (* encode_section_info) (tree, rtx, int); /* Undo the effects of encode_section_info on the symbol string. */ const char * (* strip_name_encoding) (const char *); /* True if MODE is valid for a pointer in __attribute__((mode("MODE"))). */ bool (* valid_pointer_mode) (enum machine_mode mode); /* True if a vector is opaque. */ bool (* vector_opaque_p) (tree); /* Compute a (partial) cost for rtx X. Return true if the complete cost has been computed, and false if subexpressions should be scanned. In either case, *TOTAL contains the cost result. */ /* Note that CODE and OUTER_CODE ought to be RTX_CODE, but that's not necessarily defined at this point. */ bool (* rtx_costs) (rtx x, int code, int outer_code, int *total); /* Compute the cost of X, used as an address. Never called with invalid addresses. */ int (* address_cost) (rtx x); /* Given a register, this hook should return a parallel of registers to represent where to find the register pieces. Define this hook if the register and its mode are represented in Dwarf in non-contiguous locations, or if the register should be represented in more than one register in Dwarf. Otherwise, this hook should return NULL_RTX. */ rtx (* dwarf_register_span) (rtx); /* Fetch the fixed register(s) which hold condition codes, for targets where it makes sense to look for duplicate assignments to the condition codes. This should return true if there is such a register, false otherwise. The arguments should be set to the fixed register numbers. Up to two condition code registers are supported. If there is only one for this target, the int pointed at by the second argument should be set to -1. */ bool (* fixed_condition_code_regs) (unsigned int *, unsigned int *); /* If two condition code modes are compatible, return a condition code mode which is compatible with both, such that a comparison done in the returned mode will work for both of the original modes. If the condition code modes are not compatible, return VOIDmode. */ enum machine_mode (* cc_modes_compatible) (enum machine_mode, enum machine_mode); /* Do machine-dependent code transformations. Called just before delayed-branch scheduling. */ void (* machine_dependent_reorg) (void); /* Create the __builtin_va_list type. */ tree (* build_builtin_va_list) (void); /* Validity-checking routines for PCH files, target-specific. get_pch_validity returns a pointer to the data to be stored, and stores the size in its argument. pch_valid_p gets the same information back and returns NULL if the PCH is valid, or an error message if not. */ void * (* get_pch_validity) (size_t *); const char * (* pch_valid_p) (const void *, size_t); /* True if the compiler should give an enum type only as many bytes as it takes to represent the range of possible values of that type. */ bool (* default_short_enums) (void); /* This target hook returns an rtx that is used to store the address of the current frame into the built-in setjmp buffer. */ rtx (* builtin_setjmp_frame_value) (void); /* This target hook should add STRING_CST trees for any hard regs the port wishes to automatically clobber for all asms. */ tree (* md_asm_clobbers) (tree); /* Functions relating to calls - argument passing, returns, etc. */ struct calls { bool (*promote_function_args) (tree fntype); bool (*promote_function_return) (tree fntype); bool (*promote_prototypes) (tree fntype); rtx (*struct_value_rtx) (tree fndecl, int incoming); bool (*return_in_memory) (tree type, tree fndecl); bool (*return_in_msb) (tree type); rtx (*expand_builtin_saveregs) (void); /* Returns pretend_argument_size. */ void (*setup_incoming_varargs) (CUMULATIVE_ARGS *ca, enum machine_mode mode, tree type, int *pretend_arg_size, int second_time); bool (*strict_argument_naming) (CUMULATIVE_ARGS *ca); /* Returns true if we should use targetm.calls.setup_incoming_varargs() and/or targetm.calls.strict_argument_naming(). */ bool (*pretend_outgoing_varargs_named) (CUMULATIVE_ARGS *ca); /* Given a complex type T, return true if a parameter of type T should be passed as two scalars. */ bool (* split_complex_arg) (tree type); /* Gimplifies a VA_ARG_EXPR. */ tree (* gimplify_va_arg_expr) (tree valist, tree type, tree *pre_p, tree *post_p); } calls; /* Functions specific to the C++ frontend. */ struct cxx { /* Return the integer type used for guard variables. */ tree (*guard_type) (void); /* Return true if only the low bit of the guard should be tested. */ bool (*guard_mask_bit) (void); /* Returns the size of the array cookie for an array of type. */ tree (*get_cookie_size) (tree); /* Returns true if the element size should be stored in the array cookie. */ bool (*cookie_has_size) (void); /* Allows backends to perform additional processing when deciding if a class should be exported or imported. */ int (*import_export_class) (tree, int); } cxx; /* Leave the boolean fields at the end. */ /* True if arbitrary sections are supported. */ bool have_named_sections; /* True if "native" constructors and destructors are supported, false if we're using collect2 for the job. */ bool have_ctors_dtors; /* True if thread-local storage is supported. */ bool have_tls; /* True if a small readonly data section is supported. */ bool have_srodata_section; /* True if EH frame info sections should be zero-terminated. */ bool terminate_dw2_eh_frame_info; /* True if #NO_APP should be emitted at the beginning of assembly output. */ bool file_start_app_off; /* True if output_file_directive should be called for main_input_filename at the beginning of assembly output. */ bool file_start_file_directive; /* True if #pragma redefine_extname is to be supported. */ bool handle_pragma_redefine_extname; /* True if #pragma extern_prefix is to be supported. */ bool handle_pragma_extern_prefix; /* True if the RTL prologue and epilogue should be expanded after all passes that modify the instructions (and not merely reorder them) have been run. */ bool late_rtl_prologue_epilogue; /* Leave the boolean fields at the end. */ }; extern struct gcc_target targetm; #endif /* GCC_TARGET_H */ static struct gimplify_ctx { tree current_bind_expr; bool save_stack; tree temps; tree conditional_cleanups; int conditions; tree exit_label; tree return_temp; varray_type case_labels; /* The formal temporary table. Should this be persistent? */ htab_t temp_htab; } *gimplify_ctxp; /* Formal (expression) temporary table handling: Multiple occurrences of the same scalar expression are evaluated into the same temporary. */ typedef struct gimple_temp_hash_elt { tree val; /* Key */ tree temp; /* Value */ } elt_t; /* Forward declarations. */ static enum gimplify_status gimplify_modify_expr_rhs (tree *, tree *, tree *, tree *, tree *, bool); static enum gimplify_status gimplify_compound_expr (tree *, tree *, bool); /* Return a hash value for a formal temporary table entry. */ static hashval_t gimple_tree_hash (const void *p) { tree t = ((const elt_t *) p)->val; return iterative_hash_expr (t, 0); } /* Compare two formal temporary table entries. */ static int gimple_tree_eq (const void *p1, const void *p2) { tree t1 = ((const elt_t *) p1)->val; tree t2 = ((const elt_t *) p2)->val; enum tree_code code = TREE_CODE (t1); if (TREE_CODE (t2) != code || TREE_TYPE (t1) != TREE_TYPE (t2)) return 0; if (!operand_equal_p (t1, t2, 0)) return 0; /* Only allow them to compare equal if they also hash equal; otherwise results are nondeterminate, and we fail bootstrap comparison. */ if (gimple_tree_hash (p1) != gimple_tree_hash (p2)) abort (); return 1; } /* Set up a context for the gimplifier. */ void push_gimplify_context (void) { if (gimplify_ctxp) abort (); gimplify_ctxp = (struct gimplify_ctx *) xcalloc (1, sizeof (struct gimplify_ctx)); gimplify_ctxp->temp_htab = htab_create (1000, gimple_tree_hash, gimple_tree_eq, free); } /* Tear down a context for the gimplifier. If BODY is non-null, then put the temporaries into the outer BIND_EXPR. Otherwise, put them in the unexpanded_var_list. */ void pop_gimplify_context (tree body) { if (!gimplify_ctxp || gimplify_ctxp->current_bind_expr) abort (); if (body) declare_tmp_vars (gimplify_ctxp->temps, body); else record_vars (gimplify_ctxp->temps); #if 0 if (!quiet_flag) fprintf (stderr, " collisions: %f ", htab_collisions (gimplify_ctxp->temp_htab)); #endif htab_delete (gimplify_ctxp->temp_htab); free (gimplify_ctxp); gimplify_ctxp = NULL; } void gimple_push_bind_expr (tree bind) { TREE_CHAIN (bind) = gimplify_ctxp->current_bind_expr; gimplify_ctxp->current_bind_expr = bind; } void gimple_pop_bind_expr (void) { gimplify_ctxp->current_bind_expr = TREE_CHAIN (gimplify_ctxp->current_bind_expr); } tree gimple_current_bind_expr (void) { return gimplify_ctxp->current_bind_expr; } /* Returns true iff there is a COND_EXPR between us and the innermost CLEANUP_POINT_EXPR. This info is used by gimple_push_cleanup. */ static bool gimple_conditional_context (void) { return gimplify_ctxp->conditions > 0; } /* Note that we've entered a COND_EXPR. */ static void gimple_push_condition (void) { ++(gimplify_ctxp->conditions); } /* Note that we've left a COND_EXPR. If we're back at unconditional scope now, add any conditional cleanups we've seen to the prequeue. */ static void gimple_pop_condition (tree *pre_p) { int conds = --(gimplify_ctxp->conditions); if (conds == 0) { append_to_statement_list (gimplify_ctxp->conditional_cleanups, pre_p); gimplify_ctxp->conditional_cleanups = NULL_TREE; } else if (conds < 0) abort (); } /* A subroutine of append_to_statement_list{,_force}. */ static void append_to_statement_list_1 (tree t, tree *list_p, bool side_effects) { tree list = *list_p; tree_stmt_iterator i; if (!side_effects) return; if (!list) { if (t && TREE_CODE (t) == STATEMENT_LIST) { *list_p = t; return; } *list_p = list = alloc_stmt_list (); } i = tsi_last (list); tsi_link_after (&i, t, TSI_CONTINUE_LINKING); } /* Add T to the end of the list container pointed by LIST_P. If T is an expression with no effects, it is ignored. */ void append_to_statement_list (tree t, tree *list_p) { append_to_statement_list_1 (t, list_p, t ? TREE_SIDE_EFFECTS (t) : false); } /* Similar, but the statement is always added, regardless of side effects. */ void append_to_statement_list_force (tree t, tree *list_p) { append_to_statement_list_1 (t, list_p, t != NULL); } /* Both gimplify the statement T and append it to LIST_P. */ void gimplify_and_add (tree t, tree *list_p) { gimplify_stmt (&t); append_to_statement_list (t, list_p); } /* Strip off a legitimate source ending from the input string NAME of length LEN. Rather than having to know the names used by all of our front ends, we strip off an ending of a period followed by up to five characters. (Java uses ".class".) */ static inline void remove_suffix (char *name, int len) { int i; for (i = 2; i < 8 && len > i; i++) { if (name[len - i] == '.') { name[len - i] = '\0'; break; } } } /* Create a nameless artificial label and put it in the current function context. Returns the newly created label. */ tree create_artificial_label (void) { tree lab = build_decl (LABEL_DECL, NULL_TREE, void_type_node); DECL_ARTIFICIAL (lab) = 1; DECL_CONTEXT (lab) = current_function_decl; return lab; } /* Create a new temporary name with PREFIX. Returns an identifier. */ static GTY(()) unsigned int tmp_var_id_num; tree create_tmp_var_name (const char *prefix) { char *tmp_name; if (prefix) { char *preftmp = ASTRDUP (prefix); remove_suffix (preftmp, strlen (preftmp)); prefix = preftmp; } ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix ? prefix : "T", tmp_var_id_num++); return get_identifier (tmp_name); } /* Create a new temporary variable declaration of type TYPE. Does NOT push it into the current binding. */ tree create_tmp_var_raw (tree type, const char *prefix) { tree tmp_var; tree new_type; /* Make the type of the variable writable. */ new_type = build_type_variant (type, 0, 0); TYPE_ATTRIBUTES (new_type) = TYPE_ATTRIBUTES (type); tmp_var = build_decl (VAR_DECL, create_tmp_var_name (prefix), type); /* The variable was declared by the compiler. */ DECL_ARTIFICIAL (tmp_var) = 1; /* And we don't want debug info for it. */ DECL_IGNORED_P (tmp_var) = 1; /* Make the variable writable. */ TREE_READONLY (tmp_var) = 0; DECL_EXTERNAL (tmp_var) = 0; TREE_STATIC (tmp_var) = 0; TREE_USED (tmp_var) = 1; return tmp_var; } /* Create a new temporary variable declaration of type TYPE. DOES push the variable into the current binding. Further, assume that this is called only from gimplification or optimization, at which point the creation of certain types are bugs. */ tree create_tmp_var (tree type, const char *prefix) { tree tmp_var; #if defined ENABLE_CHECKING /* We don't allow types that are addressable (meaning we can't make copies), incomplete, or of variable size. */ if (TREE_ADDRESSABLE (type) || !COMPLETE_TYPE_P (type) || TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST) abort (); #endif tmp_var = create_tmp_var_raw (type, prefix); gimple_add_tmp_var (tmp_var); return tmp_var; } /* Given a tree, try to return a useful variable name that we can use to prefix a temporary that is being assigned the value of the tree. I.E. given = &A, return A. */ const char * get_name (tree t) { tree stripped_decl; stripped_decl = t; STRIP_NOPS (stripped_decl); if (DECL_P (stripped_decl) && DECL_NAME (stripped_decl)) return IDENTIFIER_POINTER (DECL_NAME (stripped_decl)); else { switch (TREE_CODE (stripped_decl)) { case ADDR_EXPR: return get_name (TREE_OPERAND (stripped_decl, 0)); break; default: return NULL; } } } /* Create a temporary with a name derived from VAL. Subroutine of lookup_tmp_var; nobody else should call this function. */ static inline tree create_tmp_from_val (tree val) { return create_tmp_var (TREE_TYPE (val), get_name (val)); } /* Create a temporary to hold the value of VAL. If IS_FORMAL, try to reuse an existing expression temporary. */ static tree lookup_tmp_var (tree val, bool is_formal) { if (!is_formal || TREE_SIDE_EFFECTS (val)) return create_tmp_from_val (val); else { elt_t elt, *elt_p; void **slot; elt.val = val; slot = htab_find_slot (gimplify_ctxp->temp_htab, (void *)&elt, INSERT); if (*slot == NULL) { elt_p = xmalloc (sizeof (*elt_p)); elt_p->val = val; elt_p->temp = create_tmp_from_val (val); TREE_READONLY (elt_p->temp) = 1; *slot = (void *) elt_p; } else elt_p = (elt_t *) *slot; return elt_p->temp; } } /* Returns a formal temporary variable initialized with VAL. PRE_P is as in gimplify_expr. Only use this function if: 1) The value of the unfactored expression represented by VAL will not change between the initialization and use of the temporary, and 2) The temporary will not be otherwise modified. For instance, #1 means that this is inappropriate for SAVE_EXPR temps, and #2 means it is inappropriate for && temps. For other cases, use get_initialized_tmp_var instead. */ static tree internal_get_tmp_var (tree val, tree *pre_p, tree *post_p, bool is_formal) { tree t, mod; char class; gimplify_expr (&val, pre_p, post_p, is_gimple_rhs, fb_rvalue); t = lookup_tmp_var (val, is_formal); mod = build (MODIFY_EXPR, TREE_TYPE (t), t, val); class = TREE_CODE_CLASS (TREE_CODE (val)); if (EXPR_HAS_LOCATION (val)) SET_EXPR_LOCUS (mod, EXPR_LOCUS (val)); else SET_EXPR_LOCATION (mod, input_location); /* gimplify_modify_expr might want to reduce this further. */ gimplify_and_add (mod, pre_p); return t; } tree get_formal_tmp_var (tree val, tree *pre_p) { return internal_get_tmp_var (val, pre_p, NULL, true); } /* Returns a temporary variable initialized with VAL. PRE_P and POST_P are as in gimplify_expr. */ tree get_initialized_tmp_var (tree val, tree *pre_p, tree *post_p) { return internal_get_tmp_var (val, pre_p, post_p, false); } /* Returns true if T is a GIMPLE temporary variable, false otherwise. */ bool is_gimple_tmp_var (tree t) { /* FIXME this could trigger for other local artificials, too. */ return (TREE_CODE (t) == VAR_DECL && DECL_ARTIFICIAL (t) && !TREE_STATIC (t) && !DECL_EXTERNAL (t)); } /* Declares all the variables in VARS in SCOPE. */ void declare_tmp_vars (tree vars, tree scope) { tree last = vars; if (last) { tree temps; /* C99 mode puts the default 'return 0;' for main outside the outer braces. So drill down until we find an actual scope. */ while (TREE_CODE (scope) == COMPOUND_EXPR) scope = TREE_OPERAND (scope, 0); if (TREE_CODE (scope) != BIND_EXPR) abort (); temps = nreverse (last); TREE_CHAIN (last) = BIND_EXPR_VARS (scope); BIND_EXPR_VARS (scope) = temps; } } void gimple_add_tmp_var (tree tmp) { if (TREE_CHAIN (tmp) || DECL_SEEN_IN_BIND_EXPR_P (tmp)) abort (); DECL_CONTEXT (tmp) = current_function_decl; DECL_SEEN_IN_BIND_EXPR_P (tmp) = 1; if (gimplify_ctxp) { TREE_CHAIN (tmp) = gimplify_ctxp->temps; gimplify_ctxp->temps = tmp; } else if (cfun) record_vars (tmp); else declare_tmp_vars (tmp, DECL_SAVED_TREE (current_function_decl)); } /* Determines whether to assign a locus to the statement STMT. */ static bool should_carry_locus_p (tree stmt) { /* Don't emit a line note for a label. We particularly don't want to emit one for the break label, since it doesn't actually correspond to the beginning of the loop/switch. */ if (TREE_CODE (stmt) == LABEL_EXPR) return false; /* Do not annotate empty statements, since it confuses gcov. */ if (!TREE_SIDE_EFFECTS (stmt)) return false; return true; } static void annotate_one_with_locus (tree t, location_t locus) { if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (TREE_CODE (t))) && ! EXPR_HAS_LOCATION (t) && should_carry_locus_p (t)) SET_EXPR_LOCATION (t, locus); } void annotate_all_with_locus (tree *stmt_p, location_t locus) { tree_stmt_iterator i; if (!*stmt_p) return; for (i = tsi_start (*stmt_p); !tsi_end_p (i); tsi_next (&i)) { tree t = tsi_stmt (i); #ifdef ENABLE_CHECKING /* Assuming we've already been gimplified, we shouldn't see nested chaining constructs anymore. */ if (TREE_CODE (t) == STATEMENT_LIST || TREE_CODE (t) == COMPOUND_EXPR) abort (); #endif annotate_one_with_locus (t, locus); } } /* Similar to copy_tree_r() but do not copy SAVE_EXPR or TARGET_EXPR nodes. These nodes model computations that should only be done once. If we were to unshare something like SAVE_EXPR(i++), the gimplification process would create wrong code. */ static tree mostly_copy_tree_r (tree *tp, int *walk_subtrees, void *data) { enum tree_code code = TREE_CODE (*tp); /* Don't unshare types, decls, constants and SAVE_EXPR nodes. */ if (TREE_CODE_CLASS (code) == 't' || TREE_CODE_CLASS (code) == 'd' || TREE_CODE_CLASS (code) == 'c' || code == SAVE_EXPR || code == TARGET_EXPR /* We can't do anything sensible with a BLOCK used as an expression, but we also can't abort when we see it because of non-expression uses. So just avert our eyes and cross our fingers. Silly Java. */ || code == BLOCK) *walk_subtrees = 0; else if (code == BIND_EXPR) abort (); else copy_tree_r (tp, walk_subtrees, data); return NULL_TREE; } /* Mark all the _DECL nodes under *TP as volatile. FIXME: This must die after VA_ARG_EXPRs are properly lowered. */ static tree mark_decls_volatile_r (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED) { if (SSA_VAR_P (*tp)) TREE_THIS_VOLATILE (*tp) = 1; return NULL_TREE; } /* Callback for walk_tree to unshare most of the shared trees rooted at *TP. If *TP has been visited already (i.e., TREE_VISITED (*TP) == 1), then *TP is deep copied by calling copy_tree_r. This unshares the same trees as copy_tree_r with the exception of SAVE_EXPR nodes. These nodes model computations that should only be done once. If we were to unshare something like SAVE_EXPR(i++), the gimplification process would create wrong code. */ static tree copy_if_shared_r (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED) { tree t = *tp; enum tree_code code = TREE_CODE (t); /* Skip types, decls, and constants. But we do want to look at their types and the bounds of types. Mark them as visited so we properly unmark their subtrees on the unmark pass. If we've already seen them, don't look down further. */ if (TREE_CODE_CLASS (code) == 't' || TREE_CODE_CLASS (code) == 'd' || TREE_CODE_CLASS (code) == 'c') { if (TREE_VISITED (t)) *walk_subtrees = 0; else TREE_VISITED (t) = 1; } /* If this node has been visited already, unshare it and don't look any deeper. */ else if (TREE_VISITED (t)) { walk_tree (tp, mostly_copy_tree_r, NULL, NULL); *walk_subtrees = 0; } /* Otherwise, mark the tree as visited and keep looking. */ else { TREE_VISITED (t) = 1; if (TREE_CODE (*tp) == VA_ARG_EXPR && targetm.calls.gimplify_va_arg_expr == NULL) { /* Mark any _DECL inside the operand as volatile to avoid the optimizers messing around with it. We have to do this early, otherwise we might mark a variable as volatile after we gimplify other statements that use the variable assuming it's not volatile. */ /* FIXME once most targets define the above hook, this should go away (perhaps along with the #include "target.h"). */ walk_tree (&TREE_OPERAND (*tp, 0), mark_decls_volatile_r, NULL, NULL); } } return NULL_TREE; } static tree unmark_visited_r (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED) { if (TREE_VISITED (*tp)) TREE_VISITED (*tp) = 0; else *walk_subtrees = 0; return NULL_TREE; } /* Unshare all the trees in BODY_P, a pointer into the body of FNDECL, and the bodies of any nested functions if we are unsharing the entire body of FNDECL. */ static void unshare_body (tree *body_p, tree fndecl) { struct cgraph_node *cgn = cgraph_node (fndecl); walk_tree (body_p, copy_if_shared_r, NULL, NULL); if (body_p == &DECL_SAVED_TREE (fndecl)) for (cgn = cgn->nested; cgn; cgn = cgn->next_nested) unshare_body (&DECL_SAVED_TREE (cgn->decl), cgn->decl); } /* Likewise, but mark all trees as not visited. */ static void unvisit_body (tree *body_p, tree fndecl) { struct cgraph_node *cgn = cgraph_node (fndecl); walk_tree (body_p, unmark_visited_r, NULL, NULL); if (body_p == &DECL_SAVED_TREE (fndecl)) for (cgn = cgn->nested; cgn; cgn = cgn->next_nested) unvisit_body (&DECL_SAVED_TREE (cgn->decl), cgn->decl); } /* Unshare T and all the trees reached from T via TREE_CHAIN. */ void unshare_all_trees (tree t) { walk_tree (&t, copy_if_shared_r, NULL, NULL); walk_tree (&t, unmark_visited_r, NULL, NULL); } /* Unconditionally make an unshared copy of EXPR. This is used when using stored expressions which span multiple functions, such as BINFO_VTABLE, as the normal unsharing process can't tell that they're shared. */ tree unshare_expr (tree expr) { walk_tree (&expr, mostly_copy_tree_r, NULL, NULL); return expr; } /* A terser interface for building a representation of a exception specification. */ tree gimple_build_eh_filter (tree body, tree allowed, tree failure) { tree t; /* FIXME should the allowed types go in TREE_TYPE? */ t = build (EH_FILTER_EXPR, void_type_node, allowed, NULL_TREE); append_to_statement_list (failure, &EH_FILTER_FAILURE (t)); t = build (TRY_CATCH_EXPR, void_type_node, NULL_TREE, t); append_to_statement_list (body, &TREE_OPERAND (t, 0)); return t; } /* WRAPPER is a code such as BIND_EXPR or CLEANUP_POINT_EXPR which can both contain statements and have a value. Assign its value to a temporary and give it void_type_node. Returns the temporary, or NULL_TREE if WRAPPER was already void. */ tree voidify_wrapper_expr (tree wrapper, tree temp) { if (!VOID_TYPE_P (TREE_TYPE (wrapper))) { tree *p, sub = wrapper; restart: /* Set p to point to the body of the wrapper. */ switch (TREE_CODE (sub)) { case BIND_EXPR: /* For a BIND_EXPR, the body is operand 1. */ p = &BIND_EXPR_BODY (sub); break; default: p = &TREE_OPERAND (sub, 0); break; } /* Advance to the last statement. Set all container types to void. */ if (TREE_CODE (*p) == STATEMENT_LIST) { tree_stmt_iterator i = tsi_last (*p); p = tsi_end_p (i) ? NULL : tsi_stmt_ptr (i); } else { for (; TREE_CODE (*p) == COMPOUND_EXPR; p = &TREE_OPERAND (*p, 1)) { TREE_SIDE_EFFECTS (*p) = 1; TREE_TYPE (*p) = void_type_node; } } if (p == NULL || IS_EMPTY_STMT (*p)) ; /* Look through exception handling. */ else if (TREE_CODE (*p) == TRY_FINALLY_EXPR || TREE_CODE (*p) == TRY_CATCH_EXPR) { sub = *p; goto restart; } /* The C++ frontend already did this for us. */ else if (TREE_CODE (*p) == INIT_EXPR || TREE_CODE (*p) == TARGET_EXPR) temp = TREE_OPERAND (*p, 0); /* If we're returning a dereference, move the dereference outside the wrapper. */ else if (TREE_CODE (*p) == INDIRECT_REF) { tree ptr = TREE_OPERAND (*p, 0); temp = create_tmp_var (TREE_TYPE (ptr), "retval"); *p = build (MODIFY_EXPR, TREE_TYPE (ptr), temp, ptr); temp = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (temp)), temp); /* If this is a BIND_EXPR for a const inline function, it might not have TREE_SIDE_EFFECTS set. That is no longer accurate. */ TREE_SIDE_EFFECTS (wrapper) = 1; } else { if (!temp) temp = create_tmp_var (TREE_TYPE (wrapper), "retval"); *p = build (MODIFY_EXPR, TREE_TYPE (temp), temp, *p); TREE_SIDE_EFFECTS (wrapper) = 1; } TREE_TYPE (wrapper) = void_type_node; return temp; } return NULL_TREE; } /* Prepare calls to builtins to SAVE and RESTORE the stack as well as a temporary through which they communicate. */ static void build_stack_save_restore (tree *save, tree *restore) { tree save_call, tmp_var; save_call = build_function_call_expr (implicit_built_in_decls[BUILT_IN_STACK_SAVE], NULL_TREE); tmp_var = create_tmp_var (ptr_type_node, "saved_stack"); *save = build (MODIFY_EXPR, ptr_type_node, tmp_var, save_call); *restore = build_function_call_expr (implicit_built_in_decls[BUILT_IN_STACK_RESTORE], tree_cons (NULL_TREE, tmp_var, NULL_TREE)); } /* Gimplify a BIND_EXPR. Just voidify and recurse. */ static enum gimplify_status gimplify_bind_expr (tree *expr_p, tree temp, tree *pre_p) { tree bind_expr = *expr_p; bool old_save_stack = gimplify_ctxp->save_stack; tree t; temp = voidify_wrapper_expr (bind_expr, temp); /* Mark variables seen in this bind expr. */ for (t = BIND_EXPR_VARS (bind_expr); t ; t = TREE_CHAIN (t)) DECL_SEEN_IN_BIND_EXPR_P (t) = 1; gimple_push_bind_expr (bind_expr); gimplify_ctxp->save_stack = false; gimplify_to_stmt_list (&BIND_EXPR_BODY (bind_expr)); if (gimplify_ctxp->save_stack) { tree stack_save, stack_restore; /* Save stack on entry and restore it on exit. Add a try_finally block to achieve this. Note that mudflap depends on the format of the emitted code: see mx_register_decls(). */ build_stack_save_restore (&stack_save, &stack_restore); t = build (TRY_FINALLY_EXPR, void_type_node, BIND_EXPR_BODY (bind_expr), NULL_TREE); append_to_statement_list (stack_restore, &TREE_OPERAND (t, 1)); BIND_EXPR_BODY (bind_expr) = NULL_TREE; append_to_statement_list (stack_save, &BIND_EXPR_BODY (bind_expr)); append_to_statement_list (t, &BIND_EXPR_BODY (bind_expr)); } gimplify_ctxp->save_stack = old_save_stack; gimple_pop_bind_expr (); if (temp) { *expr_p = temp; append_to_statement_list (bind_expr, pre_p); return GS_OK; } else return GS_ALL_DONE; } /* Gimplify a RETURN_EXPR. If the expression to be returned is not a GIMPLE value, it is assigned to a new temporary and the statement is re-written to return the temporary. PRE_P points to the list where side effects that must happen before STMT should be stored. */ static enum gimplify_status gimplify_return_expr (tree stmt, tree *pre_p) { tree ret_expr = TREE_OPERAND (stmt, 0); tree result_decl, result; if (!ret_expr || TREE_CODE (ret_expr) == RESULT_DECL) return GS_ALL_DONE; if (ret_expr == error_mark_node) return GS_ERROR; if (VOID_TYPE_P (TREE_TYPE (TREE_TYPE (current_function_decl)))) result_decl = NULL_TREE; else { result_decl = TREE_OPERAND (ret_expr, 0); #ifdef ENABLE_CHECKING if ((TREE_CODE (ret_expr) != MODIFY_EXPR && TREE_CODE (ret_expr) != INIT_EXPR) || TREE_CODE (result_decl) != RESULT_DECL) abort (); #endif } /* If aggregate_value_p is true, then we can return the bare RESULT_DECL. Recall that aggregate_value_p is FALSE for any aggregate type that is returned in registers. If we're returning values in registers, then we don't want to extend the lifetime of the RESULT_DECL, particularly across another call. In addition, for those aggregates for which hard_function_value generates a PARALLEL, we'll abort during normal expansion of structure assignments; there's special code in expand_return to handle this case that does not exist in expand_expr. */ if (!result_decl || aggregate_value_p (result_decl, TREE_TYPE (current_function_decl))) result = result_decl; else if (gimplify_ctxp->return_temp) result = gimplify_ctxp->return_temp; else { result = create_tmp_var (TREE_TYPE (result_decl), NULL); /* ??? With complex control flow (usually involving abnormal edges), we can wind up warning about an uninitialized value for this. Due to how this variable is constructed and initialized, this is never true. Give up and never warn. */ TREE_NO_WARNING (result) = 1; gimplify_ctxp->return_temp = result; } /* Smash the lhs of the MODIFY_EXPR to the temporary we plan to use. Then gimplify the whole thing. */ if (result != result_decl) TREE_OPERAND (ret_expr, 0) = result; gimplify_and_add (TREE_OPERAND (stmt, 0), pre_p); /* If we didn't use a temporary, then the result is just the result_decl. Otherwise we need a simple copy. This should already be gimple. */ if (result == result_decl) ret_expr = result; else ret_expr = build (MODIFY_EXPR, TREE_TYPE (result), result_decl, result); TREE_OPERAND (stmt, 0) = ret_expr; return GS_ALL_DONE; } /* Gimplifies a DECL_EXPR node *STMT_P by making any necessary allocation and initialization explicit. */ static enum gimplify_status gimplify_decl_expr (tree *stmt_p) { tree stmt = *stmt_p; tree decl = DECL_EXPR_DECL (stmt); *stmt_p = NULL_TREE; if (TREE_TYPE (decl) == error_mark_node) return GS_ERROR; else if (TREE_CODE (decl) == TYPE_DECL) gimplify_type_sizes (TREE_TYPE (decl), stmt_p); else if (TREE_CODE (decl) == VAR_DECL && !DECL_EXTERNAL (decl)) { tree init = DECL_INITIAL (decl); if (!TREE_CONSTANT (DECL_SIZE (decl))) { /* This is a variable-sized decl. Simplify its size and mark it for deferred expansion. Note that mudflap depends on the format of the emitted code: see mx_register_decls(). */ tree t, args; gimplify_type_sizes (TREE_TYPE (decl), stmt_p); gimplify_one_sizepos (&DECL_SIZE (decl), stmt_p); gimplify_one_sizepos (&DECL_SIZE_UNIT (decl), stmt_p); args = tree_cons (NULL, DECL_SIZE_UNIT (decl), NULL); t = build_fold_addr_expr (decl); args = tree_cons (NULL, t, args); t = implicit_built_in_decls[BUILT_IN_STACK_ALLOC]; t = build_function_call_expr (t, args); gimplify_and_add (t, stmt_p); DECL_DEFER_OUTPUT (decl) = 1; } if (init && init != error_mark_node) { if (!TREE_STATIC (decl)) { DECL_INITIAL (decl) = NULL_TREE; init = build (MODIFY_EXPR, void_type_node, decl, init); gimplify_and_add (init, stmt_p); } else /* We must still examine initializers for static variables as they may contain a label address. */ walk_tree (&init, force_labels_r, NULL, NULL); } /* This decl isn't mentioned in the enclosing block, so add it to the list of temps. FIXME it seems a bit of a kludge to say that anonymous artificial vars aren't pushed, but everything else is. */ if (DECL_ARTIFICIAL (decl) && DECL_NAME (decl) == NULL_TREE) gimple_add_tmp_var (decl); } return GS_ALL_DONE; } /* Gimplify a LOOP_EXPR. Normally this just involves gimplifying the body and replacing the LOOP_EXPR with goto, but if the loop contains an EXIT_EXPR, we need to append a label for it to jump to. */ static enum gimplify_status gimplify_loop_expr (tree *expr_p, tree *pre_p) { tree saved_label = gimplify_ctxp->exit_label; tree start_label = build1 (LABEL_EXPR, void_type_node, NULL_TREE); tree jump_stmt = build_and_jump (&LABEL_EXPR_LABEL (start_label)); append_to_statement_list (start_label, pre_p); gimplify_ctxp->exit_label = NULL_TREE; gimplify_and_add (LOOP_EXPR_BODY (*expr_p), pre_p); if (gimplify_ctxp->exit_label) { append_to_statement_list (jump_stmt, pre_p); *expr_p = build1 (LABEL_EXPR, void_type_node, gimplify_ctxp->exit_label); } else *expr_p = jump_stmt; gimplify_ctxp->exit_label = saved_label; return GS_ALL_DONE; } /* Compare two case labels. Because the front end should already have made sure that case ranges do not overlap, it is enough to only compare the CASE_LOW values of each case label. */ static int compare_case_labels (const void *p1, const void *p2) { tree case1 = *(tree *)p1; tree case2 = *(tree *)p2; return tree_int_cst_compare (CASE_LOW (case1), CASE_LOW (case2)); } /* Sort the case labels in LABEL_VEC in ascending order. */ void sort_case_labels (tree label_vec) { size_t len = TREE_VEC_LENGTH (label_vec); tree default_case = TREE_VEC_ELT (label_vec, len - 1); if (CASE_LOW (default_case)) { size_t i; /* The last label in the vector should be the default case but it is not. */ for (i = 0; i < len; ++i) { tree t = TREE_VEC_ELT (label_vec, i); if (!CASE_LOW (t)) { default_case = t; TREE_VEC_ELT (label_vec, i) = TREE_VEC_ELT (label_vec, len - 1); TREE_VEC_ELT (label_vec, len - 1) = default_case; break; } } } qsort (&TREE_VEC_ELT (label_vec, 0), len - 1, sizeof (tree), compare_case_labels); } /* Gimplify a SWITCH_EXPR, and collect a TREE_VEC of the labels it can branch to. */ static enum gimplify_status gimplify_switch_expr (tree *expr_p, tree *pre_p) { tree switch_expr = *expr_p; enum gimplify_status ret; ret = gimplify_expr (&SWITCH_COND (switch_expr), pre_p, NULL, is_gimple_val, fb_rvalue); if (SWITCH_BODY (switch_expr)) { varray_type labels, saved_labels; tree label_vec, default_case = NULL_TREE; size_t i, len; /* If someone can be bothered to fill in the labels, they can be bothered to null out the body too. */ if (SWITCH_LABELS (switch_expr)) abort (); saved_labels = gimplify_ctxp->case_labels; VARRAY_TREE_INIT (gimplify_ctxp->case_labels, 8, "case_labels"); gimplify_to_stmt_list (&SWITCH_BODY (switch_expr)); labels = gimplify_ctxp->case_labels; gimplify_ctxp->case_labels = saved_labels; len = VARRAY_ACTIVE_SIZE (labels); for (i = 0; i < len; ++i) { tree t = VARRAY_TREE (labels, i); if (!CASE_LOW (t)) { /* The default case must be the last label in the list. */ default_case = t; VARRAY_TREE (labels, i) = VARRAY_TREE (labels, len - 1); len--; break; } } label_vec = make_tree_vec (len + 1); SWITCH_LABELS (*expr_p) = label_vec; append_to_statement_list (switch_expr, pre_p); if (! default_case) { /* If the switch has no default label, add one, so that we jump around the switch body. */ default_case = build (CASE_LABEL_EXPR, void_type_node, NULL_TREE, NULL_TREE, create_artificial_label ()); append_to_statement_list (SWITCH_BODY (switch_expr), pre_p); *expr_p = build (LABEL_EXPR, void_type_node, CASE_LABEL (default_case)); } else *expr_p = SWITCH_BODY (switch_expr); for (i = 0; i < len; ++i) TREE_VEC_ELT (label_vec, i) = VARRAY_TREE (labels, i); TREE_VEC_ELT (label_vec, len) = default_case; sort_case_labels (label_vec); SWITCH_BODY (switch_expr) = NULL; } else if (!SWITCH_LABELS (switch_expr)) abort (); return ret; } static enum gimplify_status gimplify_case_label_expr (tree *expr_p) { tree expr = *expr_p; if (gimplify_ctxp->case_labels) VARRAY_PUSH_TREE (gimplify_ctxp->case_labels, expr); else abort (); *expr_p = build (LABEL_EXPR, void_type_node, CASE_LABEL (expr)); return GS_ALL_DONE; } /* Gimplify a LABELED_BLOCK_EXPR into a LABEL_EXPR following a (possibly empty) body. */ static enum gimplify_status gimplify_labeled_block_expr (tree *expr_p) { tree body = LABELED_BLOCK_BODY (*expr_p); tree label = LABELED_BLOCK_LABEL (*expr_p); tree t; DECL_CONTEXT (label) = current_function_decl; t = build (LABEL_EXPR, void_type_node, label); if (body != NULL_TREE) t = build (COMPOUND_EXPR, void_type_node, body, t); *expr_p = t; return GS_OK; } /* Gimplify a EXIT_BLOCK_EXPR into a GOTO_EXPR. */ static enum gimplify_status gimplify_exit_block_expr (tree *expr_p) { tree labeled_block = TREE_OPERAND (*expr_p, 0); tree label; /* First operand must be a LABELED_BLOCK_EXPR, which should already be lowered (or partially lowered) when we get here. */ #if defined ENABLE_CHECKING if (TREE_CODE (labeled_block) != LABELED_BLOCK_EXPR) abort (); #endif label = LABELED_BLOCK_LABEL (labeled_block); *expr_p = build1 (GOTO_EXPR, void_type_node, label); return GS_OK; } /* Build a GOTO to the LABEL_DECL pointed to by LABEL_P, building it first if necessary. */ tree build_and_jump (tree *label_p) { if (label_p == NULL) /* If there's nowhere to jump, just fall through. */ return NULL_TREE; if (*label_p == NULL_TREE) { tree label = create_artificial_label (); *label_p = label; } return build1 (GOTO_EXPR, void_type_node, *label_p); } /* Gimplify an EXIT_EXPR by converting to a GOTO_EXPR inside a COND_EXPR. This also involves building a label to jump to and communicating it to gimplify_loop_expr through gimplify_ctxp->exit_label. */ static enum gimplify_status gimplify_exit_expr (tree *expr_p) { tree cond = TREE_OPERAND (*expr_p, 0); tree expr; expr = build_and_jump (&gimplify_ctxp->exit_label); expr = build (COND_EXPR, void_type_node, cond, expr, NULL_TREE); *expr_p = expr; return GS_OK; } /* A helper function to be called via walk_tree. Mark all labels under *TP as being forced. To be called for DECL_INITIAL of static variables. */ tree force_labels_r (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED) { if (TYPE_P (*tp)) *walk_subtrees = 0; if (TREE_CODE (*tp) == LABEL_DECL) FORCED_LABEL (*tp) = 1; return NULL_TREE; } /* *EXPR_P is a COMPONENT_REF being used as an rvalue. If its type is different from its canonical type, wrap the whole thing inside a NOP_EXPR and force the type of the COMPONENT_REF to be the canonical type. The canonical type of a COMPONENT_REF is the type of the field being referenced--unless the field is a bit-field which can be read directly in a smaller mode, in which case the canonical type is the sign-appropriate type corresponding to that mode. */ static void canonicalize_component_ref (tree *expr_p) { tree expr = *expr_p; tree type; if (TREE_CODE (expr) != COMPONENT_REF) abort (); if (INTEGRAL_TYPE_P (TREE_TYPE (expr))) type = TREE_TYPE (get_unwidened (expr, NULL_TREE)); else type = TREE_TYPE (TREE_OPERAND (expr, 1)); if (TREE_TYPE (expr) != type) { tree old_type = TREE_TYPE (expr); /* Set the type of the COMPONENT_REF to the underlying type. */ TREE_TYPE (expr) = type; /* And wrap the whole thing inside a NOP_EXPR. */ expr = build1 (NOP_EXPR, old_type, expr); *expr_p = expr; } } /* If a NOP conversion is changing a pointer to array of foo to a pointer to foo, embed that change in the ADDR_EXPR by converting T array[U]; (T *)&array ==> &array[L] where L is the lower bound. For simplicity, only do this for constant lower bound. */ static void canonicalize_addr_expr (tree *expr_p) { tree expr = *expr_p; tree ctype = TREE_TYPE (expr); tree addr_expr = TREE_OPERAND (expr, 0); tree atype = TREE_TYPE (addr_expr); tree dctype, datype, ddatype, otype, obj_expr; /* Both cast and addr_expr types should be pointers. */ if (!POINTER_TYPE_P (ctype) || !POINTER_TYPE_P (atype)) return; /* The addr_expr type should be a pointer to an array. */ datype = TREE_TYPE (atype); if (TREE_CODE (datype) != ARRAY_TYPE) return; /* Both cast and addr_expr types should address the same object type. */ dctype = TREE_TYPE (ctype); ddatype = TREE_TYPE (datype); if (!lang_hooks.types_compatible_p (ddatype, dctype)) return; /* The addr_expr and the object type should match. */ obj_expr = TREE_OPERAND (addr_expr, 0); otype = TREE_TYPE (obj_expr); if (!lang_hooks.types_compatible_p (otype, datype)) return; /* The lower bound and element sizes must be constant. */ if (TREE_CODE (TYPE_SIZE_UNIT (dctype)) != INTEGER_CST || !TYPE_DOMAIN (datype) || !TYPE_MIN_VALUE (TYPE_DOMAIN (datype)) || TREE_CODE (TYPE_MIN_VALUE (TYPE_DOMAIN (datype))) != INTEGER_CST) return; /* All checks succeeded. Build a new node to merge the cast. */ *expr_p = build4 (ARRAY_REF, dctype, obj_expr, TYPE_MIN_VALUE (TYPE_DOMAIN (datype)), TYPE_MIN_VALUE (TYPE_DOMAIN (datype)), size_binop (EXACT_DIV_EXPR, TYPE_SIZE_UNIT (dctype), size_int (TYPE_ALIGN (dctype) / BITS_PER_UNIT))); *expr_p = build1 (ADDR_EXPR, ctype, *expr_p); } /* *EXPR_P is a NOP_EXPR or CONVERT_EXPR. Remove it and/or other conversions underneath as appropriate. */ static enum gimplify_status gimplify_conversion (tree *expr_p) { /* If we still have a conversion at the toplevel, then strip away all but the outermost conversion. */ if (TREE_CODE (*expr_p) == NOP_EXPR || TREE_CODE (*expr_p) == CONVERT_EXPR) { STRIP_SIGN_NOPS (TREE_OPERAND (*expr_p, 0)); /* And remove the outermost conversion if it's useless. */ if (tree_ssa_useless_type_conversion (*expr_p)) *expr_p = TREE_OPERAND (*expr_p, 0); } /* If we still have a conversion at the toplevel, then canonicalize some constructs. */ if (TREE_CODE (*expr_p) == NOP_EXPR || TREE_CODE (*expr_p) == CONVERT_EXPR) { tree sub = TREE_OPERAND (*expr_p, 0); /* If a NOP conversion is changing the type of a COMPONENT_REF expression, then canonicalize its type now in order to expose more redundant conversions. */ if (TREE_CODE (sub) == COMPONENT_REF) canonicalize_component_ref (&TREE_OPERAND (*expr_p, 0)); /* If a NOP conversion is changing a pointer to array of foo to a pointer to foo, embed that change in the ADDR_EXPR. */ else if (TREE_CODE (sub) == ADDR_EXPR) canonicalize_addr_expr (expr_p); } return GS_OK; } /* Reduce MIN/MAX_EXPR to a COND_EXPR for further gimplification. */ static enum gimplify_status gimplify_minimax_expr (tree *expr_p, tree *pre_p, tree *post_p) { tree op1 = TREE_OPERAND (*expr_p, 0); tree op2 = TREE_OPERAND (*expr_p, 1); enum tree_code code; enum gimplify_status r0, r1; if (TREE_CODE (*expr_p) == MIN_EXPR) code = LE_EXPR; else code = GE_EXPR; r0 = gimplify_expr (&op1, pre_p, post_p, is_gimple_val, fb_rvalue); r1 = gimplify_expr (&op2, pre_p, post_p, is_gimple_val, fb_rvalue); *expr_p = build (COND_EXPR, TREE_TYPE (*expr_p), build (code, boolean_type_node, op1, op2), op1, op2); if (r0 == GS_ERROR || r1 == GS_ERROR) return GS_ERROR; else return GS_OK; } /* Subroutine of gimplify_compound_lval. Converts an ARRAY_REF to the equivalent *(&array + offset) form. */ static enum gimplify_status gimplify_array_ref_to_plus (tree *expr_p, tree *pre_p, tree *post_p) { tree array = TREE_OPERAND (*expr_p, 0); tree arrtype = TREE_TYPE (array); tree elttype = TREE_TYPE (arrtype); tree size = array_ref_element_size (*expr_p); tree ptrtype = build_pointer_type (elttype); enum tree_code add_code = PLUS_EXPR; tree idx = TREE_OPERAND (*expr_p, 1); tree minidx = unshare_expr (array_ref_low_bound (*expr_p)); tree offset, addr, result; enum gimplify_status ret; /* If the array domain does not start at zero, apply the offset. */ if (!integer_zerop (minidx)) { idx = convert (TREE_TYPE (minidx), idx); idx = fold (build (MINUS_EXPR, TREE_TYPE (minidx), idx, minidx)); } /* If the index is negative -- a technically invalid situation now that we've biased the index back to zero -- then casting it to unsigned has ill effects. In particular, -1*4U/4U != -1. Represent this as a subtraction of a positive rather than addition of a negative. This will prevent any conversion back to ARRAY_REF from getting the wrong results from the division. */ if (TREE_CODE (idx) == INTEGER_CST && tree_int_cst_sgn (idx) < 0) { idx = fold (build1 (NEGATE_EXPR, TREE_TYPE (idx), idx)); add_code = MINUS_EXPR; } /* Pointer arithmetic must be done in sizetype. */ idx = fold_convert (sizetype, idx); /* Convert the index to a byte offset. */ offset = size_binop (MULT_EXPR, size, idx); ret = gimplify_expr (&array, pre_p, post_p, is_gimple_min_lval, fb_lvalue); if (ret == GS_ERROR) return ret; addr = build_fold_addr_expr_with_type (array, ptrtype); result = fold (build (add_code, ptrtype, addr, offset)); *expr_p = build1 (INDIRECT_REF, elttype, result); return GS_OK; } /* Gimplify the COMPONENT_REF, ARRAY_REF, REALPART_EXPR or IMAGPART_EXPR node pointed by EXPR_P. compound_lval : min_lval '[' val ']' | min_lval '.' ID | compound_lval '[' val ']' | compound_lval '.' ID This is not part of the original SIMPLE definition, which separates array and member references, but it seems reasonable to handle them together. Also, this way we don't run into problems with union aliasing; gcc requires that for accesses through a union to alias, the union reference must be explicit, which was not always the case when we were splitting up array and member refs. PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. POST_P points to the list where side effects that must happen after *EXPR_P should be stored. */ static enum gimplify_status gimplify_compound_lval (tree *expr_p, tree *pre_p, tree *post_p, fallback_t fallback) { tree *p; varray_type stack; enum gimplify_status ret = GS_OK, tret; int i; #if defined ENABLE_CHECKING if (TREE_CODE (*expr_p) != ARRAY_REF && TREE_CODE (*expr_p) != ARRAY_RANGE_REF && TREE_CODE (*expr_p) != COMPONENT_REF && TREE_CODE (*expr_p) != BIT_FIELD_REF && TREE_CODE (*expr_p) != REALPART_EXPR && TREE_CODE (*expr_p) != IMAGPART_EXPR) abort (); #endif /* Create a stack of the subexpressions so later we can walk them in order from inner to outer. */ VARRAY_TREE_INIT (stack, 10, "stack"); /* We can either handle REALPART_EXPR, IMAGEPART_EXPR anything that handled_components can deal with. */ for (p = expr_p; (handled_component_p (*p) || TREE_CODE (*p) == REALPART_EXPR || TREE_CODE (*p) == IMAGPART_EXPR); p = &TREE_OPERAND (*p, 0)) VARRAY_PUSH_TREE (stack, *p); /* Now STACK is a stack of pointers to all the refs we've walked through and P points to the innermost expression. Java requires that we elaborated nodes in source order. That means we must gimplify the inner expression followed by each of the indices, in order. But we can't gimplify the inner expression until we deal with any variable bounds, sizes, or positions in order to deal with PLACEHOLDER_EXPRs. So we do this in three steps. First we deal with the annotations for any variables in the components, then we gimplify the base, then we gimplify any indices, from left to right. */ for (i = VARRAY_ACTIVE_SIZE (stack) - 1; i >= 0; i--) { tree t = VARRAY_TREE (stack, i); if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF) { /* Gimplify the low bound and element type size and put them into the ARRAY_REF. If these values are set, they have already been gimplified. */ if (!TREE_OPERAND (t, 2)) { tree low = unshare_expr (array_ref_low_bound (t)); if (!is_gimple_min_invariant (low)) { TREE_OPERAND (t, 2) = low; tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p, is_gimple_tmp_var, fb_rvalue); ret = MIN (ret, tret); } } if (!TREE_OPERAND (t, 3)) { tree elmt_type = TREE_TYPE (TREE_TYPE (TREE_OPERAND (t, 0))); tree elmt_size = unshare_expr (array_ref_element_size (t)); tree factor = size_int (TYPE_ALIGN (elmt_type) / BITS_PER_UNIT); /* Divide the element size by the alignment of the element type (above). */ elmt_size = size_binop (EXACT_DIV_EXPR, elmt_size, factor); if (!is_gimple_min_invariant (elmt_size)) { TREE_OPERAND (t, 3) = elmt_size; tret = gimplify_expr (&TREE_OPERAND (t, 3), pre_p, post_p, is_gimple_tmp_var, fb_rvalue); ret = MIN (ret, tret); } } } else if (TREE_CODE (t) == COMPONENT_REF) { /* Set the field offset into T and gimplify it. */ if (!TREE_OPERAND (t, 2)) { tree offset = unshare_expr (component_ref_field_offset (t)); tree field = TREE_OPERAND (t, 1); tree factor = size_int (DECL_OFFSET_ALIGN (field) / BITS_PER_UNIT); /* Divide the offset by its alignment. */ offset = size_binop (EXACT_DIV_EXPR, offset, factor); if (!is_gimple_min_invariant (offset)) { TREE_OPERAND (t, 2) = offset; tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p, is_gimple_tmp_var, fb_rvalue); ret = MIN (ret, tret); } } } } /* Step 2 is to gimplify the base expression. */ tret = gimplify_expr (p, pre_p, post_p, is_gimple_min_lval, fallback); ret = MIN (ret, tret); /* And finally, the indices and operands to BIT_FIELD_REF. During this loop we also remove any useless conversions. */ for (; VARRAY_ACTIVE_SIZE (stack) > 0; ) { tree t = VARRAY_TOP_TREE (stack); if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF) { /* Gimplify the dimension. Temporary fix for gcc.c-torture/execute/20040313-1.c. Gimplify non-constant array indices into a temporary variable. FIXME - The real fix is to gimplify post-modify expressions into a minimal gimple lvalue. However, that exposes bugs in alias analysis. The alias analyzer does not handle &PTR->FIELD very well. Will fix after the branch is merged into mainline (dnovillo 2004-05-03). */ if (!is_gimple_min_invariant (TREE_OPERAND (t, 1))) { tret = gimplify_expr (&TREE_OPERAND (t, 1), pre_p, post_p, is_gimple_tmp_var, fb_rvalue); ret = MIN (ret, tret); } } else if (TREE_CODE (t) == BIT_FIELD_REF) { tret = gimplify_expr (&TREE_OPERAND (t, 1), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (ret, tret); tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (ret, tret); } STRIP_USELESS_TYPE_CONVERSION (TREE_OPERAND (t, 0)); /* The innermost expression P may have originally had TREE_SIDE_EFFECTS set which would have caused all the outer expressions in EXPR_P leading to P to also have had TREE_SIDE_EFFECTS set. */ recalculate_side_effects (t); VARRAY_POP (stack); } tret = gimplify_expr (p, pre_p, post_p, is_gimple_min_lval, fallback); ret = MIN (ret, tret); /* If the outermost expression is a COMPONENT_REF, canonicalize its type. */ if ((fallback & fb_rvalue) && TREE_CODE (*expr_p) == COMPONENT_REF) { canonicalize_component_ref (expr_p); ret = MIN (ret, GS_OK); } return ret; } /* Gimplify the self modifying expression pointed by EXPR_P (++, --, +=, -=). PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. POST_P points to the list where side effects that must happen after *EXPR_P should be stored. WANT_VALUE is nonzero iff we want to use the value of this expression in another expression. */ static enum gimplify_status gimplify_self_mod_expr (tree *expr_p, tree *pre_p, tree *post_p, bool want_value) { enum tree_code code; tree lhs, lvalue, rhs, t1; bool postfix; enum tree_code arith_code; enum gimplify_status ret; code = TREE_CODE (*expr_p); #if defined ENABLE_CHECKING if (code != POSTINCREMENT_EXPR && code != POSTDECREMENT_EXPR && code != PREINCREMENT_EXPR && code != PREDECREMENT_EXPR) abort (); #endif /* Prefix or postfix? */ if (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR) /* Faster to treat as prefix if result is not used. */ postfix = want_value; else postfix = false; /* Add or subtract? */ if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR) arith_code = PLUS_EXPR; else arith_code = MINUS_EXPR; /* Gimplify the LHS into a GIMPLE lvalue. */ lvalue = TREE_OPERAND (*expr_p, 0); ret = gimplify_expr (&lvalue, pre_p, post_p, is_gimple_lvalue, fb_lvalue); if (ret == GS_ERROR) return ret; /* Extract the operands to the arithmetic operation. */ lhs = lvalue; rhs = TREE_OPERAND (*expr_p, 1); /* For postfix operator, we evaluate the LHS to an rvalue and then use that as the result value and in the postqueue operation. */ if (postfix) { ret = gimplify_expr (&lhs, pre_p, post_p, is_gimple_val, fb_rvalue); if (ret == GS_ERROR) return ret; } t1 = build (arith_code, TREE_TYPE (*expr_p), lhs, rhs); t1 = build (MODIFY_EXPR, TREE_TYPE (lvalue), lvalue, t1); if (postfix) { gimplify_and_add (t1, post_p); *expr_p = lhs; return GS_ALL_DONE; } else { *expr_p = t1; return GS_OK; } } /* Gimplify the CALL_EXPR node pointed by EXPR_P. PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. WANT_VALUE is true if the result of the call is desired. */ static enum gimplify_status gimplify_call_expr (tree *expr_p, tree *pre_p, bool want_value) { tree decl; tree arglist; enum gimplify_status ret; #if defined ENABLE_CHECKING if (TREE_CODE (*expr_p) != CALL_EXPR) abort (); #endif /* For reliable diagnostics during inlining, it is necessary that every call_expr be annotated with file and line. */ if (! EXPR_HAS_LOCATION (*expr_p)) SET_EXPR_LOCATION (*expr_p, input_location); /* This may be a call to a builtin function. Builtin function calls may be transformed into different (and more efficient) builtin function calls under certain circumstances. Unfortunately, gimplification can muck things up enough that the builtin expanders are not aware that certain transformations are still valid. So we attempt transformation/gimplification of the call before we gimplify the CALL_EXPR. At this time we do not manage to transform all calls in the same manner as the expanders do, but we do transform most of them. */ decl = get_callee_fndecl (*expr_p); if (decl && DECL_BUILT_IN (decl)) { tree new; /* If it is allocation of stack, record the need to restore the memory when the enclosing bind_expr is exited. */ if (DECL_FUNCTION_CODE (decl) == BUILT_IN_STACK_ALLOC) gimplify_ctxp->save_stack = true; /* If it is restore of the stack, reset it, since it means we are regimplifying the bind_expr. Note that we use the fact that for try_finally_expr, try part is processed first. */ if (DECL_FUNCTION_CODE (decl) == BUILT_IN_STACK_RESTORE) gimplify_ctxp->save_stack = false; new = simplify_builtin (*expr_p, !want_value); if (new && new != *expr_p) { /* There was a transformation of this call which computes the same value, but in a more efficient way. Return and try again. */ *expr_p = new; return GS_OK; } } /* There is a sequence point before the call, so any side effects in the calling expression must occur before the actual call. Force gimplify_expr to use an internal post queue. */ ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, NULL, is_gimple_call_addr, fb_rvalue); if (PUSH_ARGS_REVERSED) TREE_OPERAND (*expr_p, 1) = nreverse (TREE_OPERAND (*expr_p, 1)); for (arglist = TREE_OPERAND (*expr_p, 1); arglist; arglist = TREE_CHAIN (arglist)) { enum gimplify_status t; bool (*test) (tree); fallback_t fb; /* In general, we allow lvalues for function arguments to avoid extra overhead of copying large aggregates out of even larger aggregates into temporaries only to copy the temporaries to the argument list. Make optimizers happy by pulling out to temporaries those types that fit in registers. */ if (is_gimple_reg_type (TREE_TYPE (TREE_VALUE (arglist)))) test = is_gimple_val, fb = fb_rvalue; else test = is_gimple_lvalue, fb = fb_either; /* There is a sequence point before a function call. Side effects in the argument list must occur before the actual call. So, when gimplifying arguments, force gimplify_expr to use an internal post queue which is then appended to the end of PRE_P. */ t = gimplify_expr (&TREE_VALUE (arglist), pre_p, NULL, test, fb); if (t == GS_ERROR) ret = GS_ERROR; } if (PUSH_ARGS_REVERSED) TREE_OPERAND (*expr_p, 1) = nreverse (TREE_OPERAND (*expr_p, 1)); /* Try this again in case gimplification exposed something. */ if (ret != GS_ERROR && decl && DECL_BUILT_IN (decl)) { tree new = simplify_builtin (*expr_p, !want_value); if (new && new != *expr_p) { /* There was a transformation of this call which computes the same value, but in a more efficient way. Return and try again. */ *expr_p = new; return GS_OK; } } /* If the function is "const" or "pure", then clear TREE_SIDE_EFFECTS on its decl. This allows us to eliminate redundant or useless calls to "const" functions. */ if (TREE_CODE (*expr_p) == CALL_EXPR && (call_expr_flags (*expr_p) & (ECF_CONST | ECF_PURE))) TREE_SIDE_EFFECTS (*expr_p) = 0; return ret; } /* Handle shortcut semantics in the predicate operand of a COND_EXPR by rewriting it into multiple COND_EXPRs, and possibly GOTO_EXPRs. TRUE_LABEL_P and FALSE_LABEL_P point to the labels to jump to if the condition is true or false, respectively. If null, we should generate our own to skip over the evaluation of this specific expression. This function is the tree equivalent of do_jump. shortcut_cond_r should only be called by shortcut_cond_expr. */ static tree shortcut_cond_r (tree pred, tree *true_label_p, tree *false_label_p) { tree local_label = NULL_TREE; tree t, expr = NULL; /* OK, it's not a simple case; we need to pull apart the COND_EXPR to retain the shortcut semantics. Just insert the gotos here; shortcut_cond_expr will append the real blocks later. */ if (TREE_CODE (pred) == TRUTH_ANDIF_EXPR) { /* Turn if (a && b) into if (a); else goto no; if (b) goto yes; else goto no; (no:) */ if (false_label_p == NULL) false_label_p = &local_label; t = shortcut_cond_r (TREE_OPERAND (pred, 0), NULL, false_label_p); append_to_statement_list (t, &expr); t = shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p); append_to_statement_list (t, &expr); } else if (TREE_CODE (pred) == TRUTH_ORIF_EXPR) { /* Turn if (a || b) into if (a) goto yes; if (b) goto yes; else goto no; (yes:) */ if (true_label_p == NULL) true_label_p = &local_label; t = shortcut_cond_r (TREE_OPERAND (pred, 0), true_label_p, NULL); append_to_statement_list (t, &expr); t = shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p); append_to_statement_list (t, &expr); } else if (TREE_CODE (pred) == COND_EXPR) { /* As long as we're messing with gotos, turn if (a ? b : c) into if (a) if (b) goto yes; else goto no; else if (c) goto yes; else goto no; */ expr = build (COND_EXPR, void_type_node, TREE_OPERAND (pred, 0), shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p), shortcut_cond_r (TREE_OPERAND (pred, 2), true_label_p, false_label_p)); } else { expr = build (COND_EXPR, void_type_node, pred, build_and_jump (true_label_p), build_and_jump (false_label_p)); } if (local_label) { t = build1 (LABEL_EXPR, void_type_node, local_label); append_to_statement_list (t, &expr); } return expr; } static tree shortcut_cond_expr (tree expr) { tree pred = TREE_OPERAND (expr, 0); tree then_ = TREE_OPERAND (expr, 1); tree else_ = TREE_OPERAND (expr, 2); tree true_label, false_label, end_label, t; tree *true_label_p; tree *false_label_p; bool emit_end, emit_false; bool then_se = then_ && TREE_SIDE_EFFECTS (then_); bool else_se = else_ && TREE_SIDE_EFFECTS (else_); /* First do simple transformations. */ if (!else_se) { /* If there is no 'else', turn (a && b) into if (a) if (b). */ while (TREE_CODE (pred) == TRUTH_ANDIF_EXPR) { TREE_OPERAND (expr, 0) = TREE_OPERAND (pred, 1); then_ = shortcut_cond_expr (expr); pred = TREE_OPERAND (pred, 0); expr = build (COND_EXPR, void_type_node, pred, then_, NULL_TREE); } } if (!then_se) { /* If there is no 'then', turn if (a || b); else d into if (a); else if (b); else d. */ while (TREE_CODE (pred) == TRUTH_ORIF_EXPR) { TREE_OPERAND (expr, 0) = TREE_OPERAND (pred, 1); else_ = shortcut_cond_expr (expr); pred = TREE_OPERAND (pred, 0); expr = build (COND_EXPR, void_type_node, pred, NULL_TREE, else_); } } /* If we're done, great. */ if (TREE_CODE (pred) != TRUTH_ANDIF_EXPR && TREE_CODE (pred) != TRUTH_ORIF_EXPR) return expr; /* Otherwise we need to mess with gotos. Change if (a) c; else d; to if (a); else goto no; c; goto end; no: d; end: and recursively gimplify the condition. */ true_label = false_label = end_label = NULL_TREE; /* If our arms just jump somewhere, hijack those labels so we don't generate jumps to jumps. */ if (then_ && TREE_CODE (then_) == GOTO_EXPR && TREE_CODE (GOTO_DESTINATION (then_)) == LABEL_DECL) { true_label = GOTO_DESTINATION (then_); then_ = NULL; then_se = false; } if (else_ && TREE_CODE (else_) == GOTO_EXPR && TREE_CODE (GOTO_DESTINATION (else_)) == LABEL_DECL) { false_label = GOTO_DESTINATION (else_); else_ = NULL; else_se = false; } /* If we aren't hijacking a label for the 'then' branch, it falls through. */ if (true_label) true_label_p = &true_label; else true_label_p = NULL; /* The 'else' branch also needs a label if it contains interesting code. */ if (false_label || else_se) false_label_p = &false_label; else false_label_p = NULL; /* If there was nothing else in our arms, just forward the label(s). */ if (!then_se && !else_se) return shortcut_cond_r (pred, true_label_p, false_label_p); /* If our last subexpression already has a terminal label, reuse it. */ if (else_se) expr = expr_last (else_); else if (then_se) expr = expr_last (then_); else expr = NULL; if (expr && TREE_CODE (expr) == LABEL_EXPR) end_label = LABEL_EXPR_LABEL (expr); /* If we don't care about jumping to the 'else' branch, jump to the end if the condition is false. */ if (!false_label_p) false_label_p = &end_label; /* We only want to emit these labels if we aren't hijacking them. */ emit_end = (end_label == NULL_TREE); emit_false = (false_label == NULL_TREE); pred = shortcut_cond_r (pred, true_label_p, false_label_p); expr = NULL; append_to_statement_list (pred, &expr); append_to_statement_list (then_, &expr); if (else_se) { t = build_and_jump (&end_label); append_to_statement_list (t, &expr); if (emit_false) { t = build1 (LABEL_EXPR, void_type_node, false_label); append_to_statement_list (t, &expr); } append_to_statement_list (else_, &expr); } if (emit_end && end_label) { t = build1 (LABEL_EXPR, void_type_node, end_label); append_to_statement_list (t, &expr); } return expr; } /* EXPR is used in a boolean context; make sure it has BOOLEAN_TYPE. */ static tree gimple_boolify (tree expr) { tree type = TREE_TYPE (expr); if (TREE_CODE (type) == BOOLEAN_TYPE) return expr; /* If this is the predicate of a COND_EXPR, it might not even be a truthvalue yet. */ expr = lang_hooks.truthvalue_conversion (expr); switch (TREE_CODE (expr)) { case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case TRUTH_XOR_EXPR: case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: /* Also boolify the arguments of truth exprs. */ TREE_OPERAND (expr, 1) = gimple_boolify (TREE_OPERAND (expr, 1)); /* FALLTHRU */ case TRUTH_NOT_EXPR: TREE_OPERAND (expr, 0) = gimple_boolify (TREE_OPERAND (expr, 0)); /* FALLTHRU */ case EQ_EXPR: case NE_EXPR: case LE_EXPR: case GE_EXPR: case LT_EXPR: case GT_EXPR: /* These expressions always produce boolean results. */ TREE_TYPE (expr) = boolean_type_node; return expr; default: /* Other expressions that get here must have boolean values, but might need to be converted to the appropriate mode. */ return convert (boolean_type_node, expr); } } /* Convert the conditional expression pointed by EXPR_P '(p) ? a : b;' into if (p) if (p) t1 = a; a; else or else t1 = b; b; t1; The second form is used when *EXPR_P is of type void. TARGET is the tree for T1 above. PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. */ static enum gimplify_status gimplify_cond_expr (tree *expr_p, tree *pre_p, tree target) { tree expr = *expr_p; tree tmp, type; enum gimplify_status ret; type = TREE_TYPE (expr); if (!type) TREE_TYPE (expr) = void_type_node; /* If this COND_EXPR has a value, copy the values into a temporary within the arms. */ else if (! VOID_TYPE_P (type)) { if (target) { tmp = target; ret = GS_OK; } else { tmp = create_tmp_var (TREE_TYPE (expr), "iftmp"); ret = GS_ALL_DONE; } /* Build the then clause, 't1 = a;'. But don't build an assignment if this branch is void; in C++ it can be, if it's a throw. */ if (TREE_TYPE (TREE_OPERAND (expr, 1)) != void_type_node) TREE_OPERAND (expr, 1) = build (MODIFY_EXPR, void_type_node, tmp, TREE_OPERAND (expr, 1)); /* Build the else clause, 't1 = b;'. */ if (TREE_TYPE (TREE_OPERAND (expr, 2)) != void_type_node) TREE_OPERAND (expr, 2) = build (MODIFY_EXPR, void_type_node, tmp, TREE_OPERAND (expr, 2)); TREE_TYPE (expr) = void_type_node; recalculate_side_effects (expr); /* Move the COND_EXPR to the prequeue and use the temp in its place. */ gimplify_and_add (expr, pre_p); *expr_p = tmp; return ret; } /* Make sure the condition has BOOLEAN_TYPE. */ TREE_OPERAND (expr, 0) = gimple_boolify (TREE_OPERAND (expr, 0)); /* Break apart && and || conditions. */ if (TREE_CODE (TREE_OPERAND (expr, 0)) == TRUTH_ANDIF_EXPR || TREE_CODE (TREE_OPERAND (expr, 0)) == TRUTH_ORIF_EXPR) { expr = shortcut_cond_expr (expr); if (expr != *expr_p) { *expr_p = expr; /* We can't rely on gimplify_expr to re-gimplify the expanded form properly, as cleanups might cause the target labels to be wrapped in a TRY_FINALLY_EXPR. To prevent that, we need to set up a conditional context. */ gimple_push_condition (); gimplify_stmt (expr_p); gimple_pop_condition (pre_p); return GS_ALL_DONE; } } /* Now do the normal gimplification. */ ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, NULL, is_gimple_condexpr, fb_rvalue); gimple_push_condition (); gimplify_to_stmt_list (&TREE_OPERAND (expr, 1)); gimplify_to_stmt_list (&TREE_OPERAND (expr, 2)); recalculate_side_effects (expr); gimple_pop_condition (pre_p); if (ret == GS_ERROR) ; else if (TREE_SIDE_EFFECTS (TREE_OPERAND (expr, 1))) ret = GS_ALL_DONE; else if (TREE_SIDE_EFFECTS (TREE_OPERAND (expr, 2))) /* Rewrite "if (a); else b" to "if (!a) b" */ { TREE_OPERAND (expr, 0) = invert_truthvalue (TREE_OPERAND (expr, 0)); ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, NULL, is_gimple_condexpr, fb_rvalue); tmp = TREE_OPERAND (expr, 1); TREE_OPERAND (expr, 1) = TREE_OPERAND (expr, 2); TREE_OPERAND (expr, 2) = tmp; } else /* Both arms are empty; replace the COND_EXPR with its predicate. */ expr = TREE_OPERAND (expr, 0); *expr_p = expr; return ret; } /* A subroutine of gimplify_modify_expr. Replace a MODIFY_EXPR with a call to __builtin_memcpy. */ static enum gimplify_status gimplify_modify_expr_to_memcpy (tree *expr_p, bool want_value) { tree args, t, to, to_ptr, from; to = TREE_OPERAND (*expr_p, 0); from = TREE_OPERAND (*expr_p, 1); t = TYPE_SIZE_UNIT (TREE_TYPE (from)); t = unshare_expr (t); t = SUBSTITUTE_PLACEHOLDER_IN_EXPR (t, to); t = SUBSTITUTE_PLACEHOLDER_IN_EXPR (t, from); args = tree_cons (NULL, t, NULL); t = build_fold_addr_expr (from); args = tree_cons (NULL, t, args); to_ptr = build_fold_addr_expr (to); args = tree_cons (NULL, to_ptr, args); t = implicit_built_in_decls[BUILT_IN_MEMCPY]; t = build_function_call_expr (t, args); if (want_value) { t = build1 (NOP_EXPR, TREE_TYPE (to_ptr), t); t = build1 (INDIRECT_REF, TREE_TYPE (to), t); } *expr_p = t; return GS_OK; } /* A subroutine of gimplify_modify_expr. Replace a MODIFY_EXPR with a call to __builtin_memset. In this case we know that the RHS is a CONSTRUCTOR with an empty element list. */ static enum gimplify_status gimplify_modify_expr_to_memset (tree *expr_p, bool want_value) { tree args, t, to, to_ptr; to = TREE_OPERAND (*expr_p, 0); t = TYPE_SIZE_UNIT (TREE_TYPE (TREE_OPERAND (*expr_p, 1))); t = unshare_expr (t); t = SUBSTITUTE_PLACEHOLDER_IN_EXPR (t, to); args = tree_cons (NULL, t, NULL); args = tree_cons (NULL, integer_zero_node, args); to_ptr = build_fold_addr_expr (to); args = tree_cons (NULL, to_ptr, args); t = implicit_built_in_decls[BUILT_IN_MEMSET]; t = build_function_call_expr (t, args); if (want_value) { t = build1 (NOP_EXPR, TREE_TYPE (to_ptr), t); t = build1 (INDIRECT_REF, TREE_TYPE (to), t); } *expr_p = t; return GS_OK; } /* A subroutine of gimplify_modify_expr. Break out elements of a CONSTRUCTOR used as an initializer into separate MODIFY_EXPRs. Note that we still need to clear any elements that don't have explicit initializers, so if not all elements are initialized we keep the original MODIFY_EXPR, we just remove all of the constructor elements. */ static enum gimplify_status gimplify_init_constructor (tree *expr_p, tree *pre_p, tree *post_p, bool want_value) { tree object = TREE_OPERAND (*expr_p, 0); tree ctor = TREE_OPERAND (*expr_p, 1); tree type = TREE_TYPE (ctor); enum gimplify_status ret; tree elt_list; if (TREE_CODE (ctor) != CONSTRUCTOR) return GS_UNHANDLED; elt_list = CONSTRUCTOR_ELTS (ctor); ret = GS_ALL_DONE; switch (TREE_CODE (type)) { case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: case ARRAY_TYPE: { HOST_WIDE_INT i, num_elements, num_nonzero_elements; HOST_WIDE_INT num_nonconstant_elements; bool cleared; /* Aggregate types must lower constructors to initialization of individual elements. The exception is that a CONSTRUCTOR node with no elements indicates zero-initialization of the whole. */ if (elt_list == NULL) { if (want_value) { *expr_p = object; return GS_OK; } else return GS_UNHANDLED; } categorize_ctor_elements (ctor, &num_nonzero_elements, &num_nonconstant_elements); num_elements = count_type_elements (TREE_TYPE (ctor)); /* If a const aggregate variable is being initialized, then it should never be a lose to promote the variable to be static. */ if (num_nonconstant_elements == 0 && TREE_READONLY (object) && TREE_CODE (object) == VAR_DECL) { DECL_INITIAL (object) = ctor; TREE_STATIC (object) = 1; if (!DECL_NAME (object)) DECL_NAME (object) = create_tmp_var_name ("C"); walk_tree (&DECL_INITIAL (object), force_labels_r, NULL, NULL); /* ??? C++ doesn't automatically append a . to the assembler name, and even when it does, it looks a FE private data structures to figure out what that number should be, which are not set for this variable. I suppose this is important for local statics for inline functions, which aren't "local" in the object file sense. So in order to get a unique TU-local symbol, we must invoke the lhd version now. */ lhd_set_decl_assembler_name (object); *expr_p = NULL_TREE; break; } /* If there are "lots" of initialized elements, and all of them are valid address constants, then the entire initializer can be dropped to memory, and then memcpy'd out. */ if (num_nonconstant_elements == 0) { HOST_WIDE_INT size = int_size_in_bytes (type); unsigned int align; /* ??? We can still get unbounded array types, at least from the C++ front end. This seems wrong, but attempt to work around it for now. */ if (size < 0) { size = int_size_in_bytes (TREE_TYPE (object)); if (size >= 0) TREE_TYPE (ctor) = type = TREE_TYPE (object); } /* Find the maximum alignment we can assume for the object. */ /* ??? Make use of DECL_OFFSET_ALIGN. */ if (DECL_P (object)) align = DECL_ALIGN (object); else align = TYPE_ALIGN (type); if (size > 0 && !can_move_by_pieces (size, align)) { tree new = create_tmp_var_raw (type, "C"); gimple_add_tmp_var (new); TREE_STATIC (new) = 1; TREE_READONLY (new) = 1; DECL_INITIAL (new) = ctor; if (align > DECL_ALIGN (new)) { DECL_ALIGN (new) = align; DECL_USER_ALIGN (new) = 1; } walk_tree (&DECL_INITIAL (new), force_labels_r, NULL, NULL); TREE_OPERAND (*expr_p, 1) = new; break; } } /* If there are "lots" of initialized elements, even discounting those that are not address constants (and thus *must* be computed at runtime), then partition the constructor into constant and non-constant parts. Block copy the constant parts in, then generate code for the non-constant parts. */ /* TODO. There's code in cp/typeck.c to do this. */ /* If there are "lots" of zeros, then block clear the object first. */ cleared = false; if (num_elements - num_nonzero_elements > CLEAR_RATIO && num_nonzero_elements < num_elements/4) cleared = true; /* ??? This bit ought not be needed. For any element not present in the initializer, we should simply set them to zero. Except we'd need to *find* the elements that are not present, and that requires trickery to avoid quadratic compile-time behavior in large cases or excessive memory use in small cases. */ else { HOST_WIDE_INT len = list_length (elt_list); if (TREE_CODE (type) == ARRAY_TYPE) { tree nelts = array_type_nelts (type); if (!host_integerp (nelts, 1) || tree_low_cst (nelts, 1) + 1 != len) cleared = 1;; } else if (len != fields_length (type)) cleared = 1; } if (cleared) { /* Zap the CONSTRUCTOR element list, which simplifies this case. Note that we still have to gimplify, in order to handle the case of variable sized types. Make an unshared copy of OBJECT before that so we can match a PLACEHOLDER_EXPR to it later, if needed. */ CONSTRUCTOR_ELTS (ctor) = NULL_TREE; object = unshare_expr (TREE_OPERAND (*expr_p, 0)); gimplify_stmt (expr_p); append_to_statement_list (*expr_p, pre_p); } for (i = 0; elt_list; i++, elt_list = TREE_CHAIN (elt_list)) { tree purpose, value, cref, init; purpose = TREE_PURPOSE (elt_list); value = TREE_VALUE (elt_list); if (cleared && initializer_zerop (value)) continue; if (TREE_CODE (type) == ARRAY_TYPE) { tree t = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (object))); /* ??? Here's to hoping the front end fills in all of the indicies, so we don't have to figure out what's missing ourselves. */ if (!purpose) abort (); /* ??? Need to handle this. */ if (TREE_CODE (purpose) == RANGE_EXPR) abort (); cref = build (ARRAY_REF, t, unshare_expr (object), purpose, NULL_TREE, NULL_TREE); } else cref = build (COMPONENT_REF, TREE_TYPE (purpose), unshare_expr (object), purpose, NULL_TREE); init = build (MODIFY_EXPR, TREE_TYPE (purpose), cref, value); /* Each member initialization is a full-expression. */ gimplify_and_add (init, pre_p); } *expr_p = NULL_TREE; } break; case COMPLEX_TYPE: { tree r, i; /* Extract the real and imaginary parts out of the ctor. */ r = i = NULL_TREE; if (elt_list) { r = TREE_VALUE (elt_list); elt_list = TREE_CHAIN (elt_list); if (elt_list) { i = TREE_VALUE (elt_list); if (TREE_CHAIN (elt_list)) abort (); } } if (r == NULL || i == NULL) { tree zero = convert (TREE_TYPE (type), integer_zero_node); if (r == NULL) r = zero; if (i == NULL) i = zero; } /* Complex types have either COMPLEX_CST or COMPLEX_EXPR to represent creation of a complex value. */ if (TREE_CONSTANT (r) && TREE_CONSTANT (i)) { ctor = build_complex (type, r, i); TREE_OPERAND (*expr_p, 1) = ctor; } else { ctor = build (COMPLEX_EXPR, type, r, i); TREE_OPERAND (*expr_p, 1) = ctor; ret = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, is_gimple_rhs, fb_rvalue); } } break; case VECTOR_TYPE: /* Go ahead and simplify constant constructors to VECTOR_CST. */ if (TREE_CONSTANT (ctor)) TREE_OPERAND (*expr_p, 1) = build_vector (type, elt_list); else { /* Vector types use CONSTRUCTOR all the way through gimple compilation as a general initializer. */ for (; elt_list; elt_list = TREE_CHAIN (elt_list)) { enum gimplify_status tret; tret = gimplify_expr (&TREE_VALUE (elt_list), pre_p, post_p, is_gimple_constructor_elt, fb_rvalue); if (tret == GS_ERROR) ret = GS_ERROR; } } break; default: /* So how did we get a CONSTRUCTOR for a scalar type? */ abort (); } if (ret == GS_ERROR) return GS_ERROR; else if (want_value) { append_to_statement_list (*expr_p, pre_p); *expr_p = object; return GS_OK; } else return GS_ALL_DONE; } /* Subroutine of gimplify_modify_expr to do simplifications of MODIFY_EXPRs based on the code of the RHS. We loop for as long as something changes. */ static enum gimplify_status gimplify_modify_expr_rhs (tree *expr_p, tree *from_p, tree *to_p, tree *pre_p, tree *post_p, bool want_value) { enum gimplify_status ret = GS_OK; while (ret != GS_UNHANDLED) switch (TREE_CODE (*from_p)) { case TARGET_EXPR: { /* If we are initializing something from a TARGET_EXPR, strip the TARGET_EXPR and initialize it directly, if possible. This can't be done if the initializer is void, since that implies that the temporary is set in some non-trivial way. ??? What about code that pulls out the temp and uses it elsewhere? I think that such code never uses the TARGET_EXPR as an initializer. If I'm wrong, we'll abort because the temp won't have any RTL. In that case, I guess we'll need to replace references somehow. */ tree init = TARGET_EXPR_INITIAL (*from_p); if (!VOID_TYPE_P (TREE_TYPE (init))) { *from_p = init; ret = GS_OK; } else ret = GS_UNHANDLED; } break; case COMPOUND_EXPR: /* Remove any COMPOUND_EXPR in the RHS so the following cases will be caught. */ gimplify_compound_expr (from_p, pre_p, true); ret = GS_OK; break; case CONSTRUCTOR: /* If we're initializing from a CONSTRUCTOR, break this into individual MODIFY_EXPRs. */ return gimplify_init_constructor (expr_p, pre_p, post_p, want_value); case COND_EXPR: /* If we're assigning from a ?: expression with ADDRESSABLE type, push the assignment down into the branches, since we can't generate a temporary of such a type. */ if (TREE_ADDRESSABLE (TREE_TYPE (*from_p))) { *expr_p = *from_p; return gimplify_cond_expr (expr_p, pre_p, *to_p); } else ret = GS_UNHANDLED; break; default: ret = GS_UNHANDLED; break; } return ret; } /* Gimplify the MODIFY_EXPR node pointed by EXPR_P. modify_expr : varname '=' rhs | '*' ID '=' rhs PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. POST_P points to the list where side effects that must happen after *EXPR_P should be stored. WANT_VALUE is nonzero iff we want to use the value of this expression in another expression. */ static enum gimplify_status gimplify_modify_expr (tree *expr_p, tree *pre_p, tree *post_p, bool want_value) { tree *from_p = &TREE_OPERAND (*expr_p, 1); tree *to_p = &TREE_OPERAND (*expr_p, 0); enum gimplify_status ret = GS_UNHANDLED; #if defined ENABLE_CHECKING if (TREE_CODE (*expr_p) != MODIFY_EXPR && TREE_CODE (*expr_p) != INIT_EXPR) abort (); #endif /* The distinction between MODIFY_EXPR and INIT_EXPR is no longer useful. */ if (TREE_CODE (*expr_p) == INIT_EXPR) TREE_SET_CODE (*expr_p, MODIFY_EXPR); /* See if any simplifications can be done based on what the RHS is. */ ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p, want_value); if (ret != GS_UNHANDLED) return ret; /* If the value being copied is of variable width, expose the length if the copy by converting the whole thing to a memcpy/memset. Note that we need to do this before gimplifying any of the operands so that we can resolve any PLACEHOLDER_EXPRs in the size. Also note that the RTL expander uses the size of the expression to be copied, not of the destination, so that is what we must here. The types on both sides of the MODIFY_EXPR should be the same, but they aren't always and there are problems with class-wide types in Ada where it's hard to make it "correct". */ if (TREE_CODE (TREE_TYPE (*from_p)) != ERROR_MARK && TYPE_SIZE_UNIT (TREE_TYPE (*from_p)) && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (*from_p))) != INTEGER_CST) { if (TREE_CODE (*from_p) == CONSTRUCTOR) return gimplify_modify_expr_to_memset (expr_p, want_value); else return gimplify_modify_expr_to_memcpy (expr_p, want_value); } ret = gimplify_expr (to_p, pre_p, post_p, is_gimple_lvalue, fb_lvalue); if (ret == GS_ERROR) return ret; ret = gimplify_expr (from_p, pre_p, post_p, is_gimple_rhs, fb_rvalue); if (ret == GS_ERROR) return ret; /* Now see if the above changed *from_p to something we handle specially. */ ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p, want_value); if (ret != GS_UNHANDLED) return ret; /* If the destination is already simple, nothing else needed. */ if (is_gimple_tmp_var (*to_p)) ret = GS_ALL_DONE; else { /* If the RHS of the MODIFY_EXPR may throw or make a nonlocal goto and the LHS is a user variable, then we need to introduce a temporary. ie temp = RHS; LHS = temp. This way the optimizers can determine that the user variable is only modified if evaluation of the RHS does not throw. FIXME this should be handled by the is_gimple_rhs predicate. */ if (aggregate_value_p (TREE_TYPE (*from_p), NULL_TREE)) /* Don't force a temp of a large aggregate type; the copy could be arbitrarily expensive. Instead we will generate a V_MAY_DEF for the assignment. */; else if (TREE_CODE (*from_p) == CALL_EXPR || (flag_non_call_exceptions && tree_could_trap_p (*from_p)) /* If we're dealing with a renamable type, either source or dest must be a renamed variable. */ || (is_gimple_reg_type (TREE_TYPE (*from_p)) && !is_gimple_reg (*to_p))) gimplify_expr (from_p, pre_p, post_p, is_gimple_val, fb_rvalue); ret = want_value ? GS_OK : GS_ALL_DONE; } if (want_value) { append_to_statement_list (*expr_p, pre_p); *expr_p = *to_p; } return ret; } /* Gimplify a comparison between two variable-sized objects. Do this with a call to BUILT_IN_MEMCMP. */ static enum gimplify_status gimplify_variable_sized_compare (tree *expr_p) { tree op0 = TREE_OPERAND (*expr_p, 0); tree op1 = TREE_OPERAND (*expr_p, 1); tree args, t, dest; t = TYPE_SIZE_UNIT (TREE_TYPE (op0)); t = unshare_expr (t); t = SUBSTITUTE_PLACEHOLDER_IN_EXPR (t, op0); args = tree_cons (NULL, t, NULL); t = build_fold_addr_expr (op1); args = tree_cons (NULL, t, args); dest = build_fold_addr_expr (op0); args = tree_cons (NULL, dest, args); t = implicit_built_in_decls[BUILT_IN_MEMCMP]; t = build_function_call_expr (t, args); *expr_p = build (TREE_CODE (*expr_p), TREE_TYPE (*expr_p), t, integer_zero_node); return GS_OK; } /* Gimplify TRUTH_ANDIF_EXPR and TRUTH_ORIF_EXPR expressions. EXPR_P points to the expression to gimplify. Expressions of the form 'a && b' are gimplified to: a && b ? true : false gimplify_cond_expr will do the rest. PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. */ static enum gimplify_status gimplify_boolean_expr (tree *expr_p) { /* Preserve the original type of the expression. */ tree type = TREE_TYPE (*expr_p); *expr_p = build (COND_EXPR, type, *expr_p, convert (type, boolean_true_node), convert (type, boolean_false_node)); return GS_OK; } /* Gimplifies an expression sequence. This function gimplifies each expression and re-writes the original expression with the last expression of the sequence in GIMPLE form. PRE_P points to the list where the side effects for all the expressions in the sequence will be emitted. WANT_VALUE is true when the result of the last COMPOUND_EXPR is used. */ /* ??? Should rearrange to share the pre-queue with all the indirect invocations of gimplify_expr. Would probably save on creations of statement_list nodes. */ static enum gimplify_status gimplify_compound_expr (tree *expr_p, tree *pre_p, bool want_value) { tree t = *expr_p; do { tree *sub_p = &TREE_OPERAND (t, 0); if (TREE_CODE (*sub_p) == COMPOUND_EXPR) gimplify_compound_expr (sub_p, pre_p, false); else gimplify_stmt (sub_p); append_to_statement_list (*sub_p, pre_p); t = TREE_OPERAND (t, 1); } while (TREE_CODE (t) == COMPOUND_EXPR); *expr_p = t; if (want_value) return GS_OK; else { gimplify_stmt (expr_p); return GS_ALL_DONE; } } /* Gimplifies a statement list. These may be created either by an enlightened front-end, or by shortcut_cond_expr. */ static enum gimplify_status gimplify_statement_list (tree *expr_p) { tree_stmt_iterator i = tsi_start (*expr_p); while (!tsi_end_p (i)) { tree t; gimplify_stmt (tsi_stmt_ptr (i)); t = tsi_stmt (i); if (t == NULL) tsi_delink (&i); else if (TREE_CODE (t) == STATEMENT_LIST) { tsi_link_before (&i, t, TSI_SAME_STMT); tsi_delink (&i); } else tsi_next (&i); } return GS_ALL_DONE; } /* Gimplify a SAVE_EXPR node. EXPR_P points to the expression to gimplify. After gimplification, EXPR_P will point to a new temporary that holds the original value of the SAVE_EXPR node. PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. */ static enum gimplify_status gimplify_save_expr (tree *expr_p, tree *pre_p, tree *post_p) { enum gimplify_status ret = GS_ALL_DONE; tree val; #if defined ENABLE_CHECKING if (TREE_CODE (*expr_p) != SAVE_EXPR) abort (); #endif val = TREE_OPERAND (*expr_p, 0); /* If the operand is already a GIMPLE temporary, just re-write the SAVE_EXPR node. */ if (is_gimple_tmp_var (val)) *expr_p = val; /* The operand may be a void-valued expression such as SAVE_EXPRs generated by the Java frontend for class initialization. It is being executed only for its side-effects. */ else if (TREE_TYPE (val) == void_type_node) { tree body = TREE_OPERAND (*expr_p, 0); ret = gimplify_expr (& body, pre_p, post_p, is_gimple_stmt, fb_none); append_to_statement_list (body, pre_p); *expr_p = NULL; } else *expr_p = TREE_OPERAND (*expr_p, 0) = get_initialized_tmp_var (val, pre_p, post_p); return ret; } /* Re-write the ADDR_EXPR node pointed by EXPR_P unary_expr : ... | '&' varname ... PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. POST_P points to the list where side effects that must happen after *EXPR_P should be stored. */ static enum gimplify_status gimplify_addr_expr (tree *expr_p, tree *pre_p, tree *post_p) { tree expr = *expr_p; tree op0 = TREE_OPERAND (expr, 0); enum gimplify_status ret; switch (TREE_CODE (op0)) { case INDIRECT_REF: /* Check if we are dealing with an expression of the form '&*ptr'. While the front end folds away '&*ptr' into 'ptr', these expressions may be generated internally by the compiler (e.g., builtins like __builtin_va_end). */ *expr_p = TREE_OPERAND (op0, 0); ret = GS_OK; break; case ARRAY_REF: /* Fold &a[6] to (&a + 6). */ ret = gimplify_array_ref_to_plus (&TREE_OPERAND (expr, 0), pre_p, post_p); /* This added an INDIRECT_REF. Fold it away. */ *expr_p = TREE_OPERAND (TREE_OPERAND (expr, 0), 0); break; case VIEW_CONVERT_EXPR: /* Take the address of our operand and then convert it to the type of this ADDR_EXPR. ??? The interactions of VIEW_CONVERT_EXPR and aliasing is not at all clear. The impact of this transformation is even less clear. */ *expr_p = fold_convert (TREE_TYPE (expr), build_fold_addr_expr (TREE_OPERAND (op0, 0))); ret = GS_OK; break; default: /* We use fb_either here because the C frontend sometimes takes the address of a call that returns a struct. */ ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, post_p, is_gimple_addr_expr_arg, fb_either); if (ret != GS_ERROR) { /* Make sure TREE_INVARIANT, TREE_CONSTANT, and TREE_SIDE_EFFECTS is set properly. */ recompute_tree_invarant_for_addr_expr (expr); /* Mark the RHS addressable. */ lang_hooks.mark_addressable (TREE_OPERAND (expr, 0)); } break; } /* If the operand is gimplified into a _DECL, mark the address expression as TREE_INVARIANT. */ if (DECL_P (TREE_OPERAND (expr, 0))) TREE_INVARIANT (expr) = 1; return ret; } /* Gimplify the operands of an ASM_EXPR. Input operands should be a gimple value; output operands should be a gimple lvalue. */ static enum gimplify_status gimplify_asm_expr (tree *expr_p, tree *pre_p, tree *post_p) { tree expr = *expr_p; int noutputs = list_length (ASM_OUTPUTS (expr)); const char **oconstraints = (const char **) alloca ((noutputs) * sizeof (const char *)); int i; tree link; const char *constraint; bool allows_mem, allows_reg, is_inout; enum gimplify_status ret, tret; ASM_STRING (expr) = resolve_asm_operand_names (ASM_STRING (expr), ASM_OUTPUTS (expr), ASM_INPUTS (expr)); ret = GS_ALL_DONE; for (i = 0, link = ASM_OUTPUTS (expr); link; ++i, link = TREE_CHAIN (link)) { oconstraints[i] = constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (link))); parse_output_constraint (&constraint, i, 0, 0, &allows_mem, &allows_reg, &is_inout); if (!allows_reg && allows_mem) lang_hooks.mark_addressable (TREE_VALUE (link)); tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p, is_inout ? is_gimple_min_lval : is_gimple_lvalue, fb_lvalue | fb_mayfail); if (tret == GS_ERROR) { error ("invalid lvalue in asm output %d", i); ret = tret; } if (is_inout) { /* An input/output operand. To give the optimizers more flexibility, split it into separate input and output operands. */ tree input; char buf[10]; size_t constraint_len = strlen (constraint); /* Turn the in/out constraint into an output constraint. */ char *p = xstrdup (constraint); p[0] = '='; TREE_VALUE (TREE_PURPOSE (link)) = build_string (constraint_len, p); free (p); /* And add a matching input constraint. */ if (allows_reg) { sprintf (buf, "%d", i); input = build_string (strlen (buf), buf); } else input = build_string (constraint_len - 1, constraint + 1); input = build_tree_list (build_tree_list (NULL_TREE, input), unshare_expr (TREE_VALUE (link))); ASM_INPUTS (expr) = chainon (ASM_INPUTS (expr), input); } } for (link = ASM_INPUTS (expr); link; ++i, link = TREE_CHAIN (link)) { constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (link))); parse_input_constraint (&constraint, 0, 0, noutputs, 0, oconstraints, &allows_mem, &allows_reg); /* If the operand is a memory input, it should be an lvalue. */ if (!allows_reg && allows_mem) { lang_hooks.mark_addressable (TREE_VALUE (link)); tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p, is_gimple_lvalue, fb_lvalue | fb_mayfail); if (tret == GS_ERROR) { error ("memory input %d is not directly addressable", i); ret = tret; } } else { tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p, is_gimple_val, fb_rvalue); if (tret == GS_ERROR) ret = tret; } } return ret; } /* Gimplify a CLEANUP_POINT_EXPR. Currently this works by adding WITH_CLEANUP_EXPRs to the prequeue as we encounter cleanups while gimplifying the body, and converting them to TRY_FINALLY_EXPRs when we return to this function. FIXME should we complexify the prequeue handling instead? Or use flags for all the cleanups and let the optimizer tighten them up? The current code seems pretty fragile; it will break on a cleanup within any non-conditional nesting. But any such nesting would be broken, anyway; we can't write a TRY_FINALLY_EXPR that starts inside a nesting construct and continues out of it. We can do that at the RTL level, though, so having an optimizer to tighten up try/finally regions would be a Good Thing. */ static enum gimplify_status gimplify_cleanup_point_expr (tree *expr_p, tree *pre_p) { tree_stmt_iterator iter; tree body; tree temp = voidify_wrapper_expr (*expr_p, NULL); /* We only care about the number of conditions between the innermost CLEANUP_POINT_EXPR and the cleanup. So save and reset the count. */ int old_conds = gimplify_ctxp->conditions; gimplify_ctxp->conditions = 0; body = TREE_OPERAND (*expr_p, 0); gimplify_to_stmt_list (&body); gimplify_ctxp->conditions = old_conds; for (iter = tsi_start (body); !tsi_end_p (iter); ) { tree *wce_p = tsi_stmt_ptr (iter); tree wce = *wce_p; if (TREE_CODE (wce) == WITH_CLEANUP_EXPR) { if (tsi_one_before_end_p (iter)) { tsi_link_before (&iter, TREE_OPERAND (wce, 1), TSI_SAME_STMT); tsi_delink (&iter); break; } else { tree sl, tfe; sl = tsi_split_statement_list_after (&iter); tfe = build (TRY_FINALLY_EXPR, void_type_node, sl, NULL_TREE); append_to_statement_list (TREE_OPERAND (wce, 1), &TREE_OPERAND (tfe, 1)); *wce_p = tfe; iter = tsi_start (sl); } } else tsi_next (&iter); } if (temp) { *expr_p = temp; append_to_statement_list (body, pre_p); return GS_OK; } else { *expr_p = body; return GS_ALL_DONE; } } /* Insert a cleanup marker for gimplify_cleanup_point_expr. CLEANUP is the cleanup action required. */ static void gimple_push_cleanup (tree var, tree cleanup, tree *pre_p) { tree wce; /* Errors can result in improperly nested cleanups. Which results in confusion when trying to resolve the WITH_CLEANUP_EXPR. */ if (errorcount || sorrycount) return; if (gimple_conditional_context ()) { /* If we're in a conditional context, this is more complex. We only want to run the cleanup if we actually ran the initialization that necessitates it, but we want to run it after the end of the conditional context. So we wrap the try/finally around the condition and use a flag to determine whether or not to actually run the destructor. Thus test ? f(A()) : 0 becomes (approximately) flag = 0; try { if (test) { A::A(temp); flag = 1; val = f(temp); } else { val = 0; } } finally { if (flag) A::~A(temp); } val */ tree flag = create_tmp_var (boolean_type_node, "cleanup"); tree ffalse = build (MODIFY_EXPR, void_type_node, flag, boolean_false_node); tree ftrue = build (MODIFY_EXPR, void_type_node, flag, boolean_true_node); cleanup = build (COND_EXPR, void_type_node, flag, cleanup, NULL); wce = build (WITH_CLEANUP_EXPR, void_type_node, NULL_TREE, cleanup, NULL_TREE); append_to_statement_list (ffalse, &gimplify_ctxp->conditional_cleanups); append_to_statement_list (wce, &gimplify_ctxp->conditional_cleanups); append_to_statement_list (ftrue, pre_p); /* Because of this manipulation, and the EH edges that jump threading cannot redirect, the temporary (VAR) will appear to be used uninitialized. Don't warn. */ TREE_NO_WARNING (var) = 1; } else { wce = build (WITH_CLEANUP_EXPR, void_type_node, NULL_TREE, cleanup, NULL_TREE); append_to_statement_list (wce, pre_p); } gimplify_stmt (&TREE_OPERAND (wce, 1)); } /* Gimplify a TARGET_EXPR which doesn't appear on the rhs of an INIT_EXPR. */ static enum gimplify_status gimplify_target_expr (tree *expr_p, tree *pre_p, tree *post_p) { tree targ = *expr_p; tree temp = TARGET_EXPR_SLOT (targ); tree init = TARGET_EXPR_INITIAL (targ); enum gimplify_status ret; if (init) { /* TARGET_EXPR temps aren't part of the enclosing block, so add it to the temps list. */ gimple_add_tmp_var (temp); /* If TARGET_EXPR_INITIAL is void, then the mere evaluation of the expression is supposed to initialize the slot. */ if (VOID_TYPE_P (TREE_TYPE (init))) ret = gimplify_expr (&init, pre_p, post_p, is_gimple_stmt, fb_none); else { /* Special handling for BIND_EXPR can result in fewer temps. */ ret = GS_OK; if (TREE_CODE (init) == BIND_EXPR) gimplify_bind_expr (&init, temp, pre_p); if (init != temp) { init = build (MODIFY_EXPR, void_type_node, temp, init); ret = gimplify_expr (&init, pre_p, post_p, is_gimple_stmt, fb_none); } } if (ret == GS_ERROR) return GS_ERROR; append_to_statement_list (init, pre_p); /* If needed, push the cleanup for the temp. */ if (TARGET_EXPR_CLEANUP (targ)) { gimplify_stmt (&TARGET_EXPR_CLEANUP (targ)); gimple_push_cleanup (temp, TARGET_EXPR_CLEANUP (targ), pre_p); } /* Only expand this once. */ TREE_OPERAND (targ, 3) = init; TARGET_EXPR_INITIAL (targ) = NULL_TREE; } else if (!DECL_SEEN_IN_BIND_EXPR_P (temp)) /* We should have expanded this before. */ abort (); *expr_p = temp; return GS_OK; } /* Gimplification of expression trees. */ /* Gimplify an expression which appears at statement context; usually, this means replacing it with a suitably gimple STATEMENT_LIST. */ void gimplify_stmt (tree *stmt_p) { gimplify_expr (stmt_p, NULL, NULL, is_gimple_stmt, fb_none); } /* Similarly, but force the result to be a STATEMENT_LIST. */ void gimplify_to_stmt_list (tree *stmt_p) { gimplify_stmt (stmt_p); if (!*stmt_p) *stmt_p = alloc_stmt_list (); else if (TREE_CODE (*stmt_p) != STATEMENT_LIST) { tree t = *stmt_p; *stmt_p = alloc_stmt_list (); append_to_statement_list (t, stmt_p); } } /* Gimplifies the expression tree pointed by EXPR_P. Return 0 if gimplification failed. PRE_P points to the list where side effects that must happen before EXPR should be stored. POST_P points to the list where side effects that must happen after EXPR should be stored, or NULL if there is no suitable list. In that case, we copy the result to a temporary, emit the post-effects, and then return the temporary. GIMPLE_TEST_F points to a function that takes a tree T and returns nonzero if T is in the GIMPLE form requested by the caller. The GIMPLE predicates are in tree-gimple.c. This test is used twice. Before gimplification, the test is invoked to determine whether *EXPR_P is already gimple enough. If that fails, *EXPR_P is gimplified according to its code and GIMPLE_TEST_F is called again. If the test still fails, then a new temporary variable is created and assigned the value of the gimplified expression. FALLBACK tells the function what sort of a temporary we want. If the 1 bit is set, an rvalue is OK. If the 2 bit is set, an lvalue is OK. If both are set, either is OK, but an lvalue is preferable. The return value is either GS_ERROR or GS_ALL_DONE, since this function iterates until solution. */ enum gimplify_status gimplify_expr (tree *expr_p, tree *pre_p, tree *post_p, bool (* gimple_test_f) (tree), fallback_t fallback) { tree tmp; tree internal_pre = NULL_TREE; tree internal_post = NULL_TREE; tree save_expr; int is_statement = (pre_p == NULL); location_t saved_location; enum gimplify_status ret; save_expr = *expr_p; if (save_expr == NULL_TREE) return GS_ALL_DONE; /* We used to check the predicate here and return immediately if it succeeds. This is wrong; the design is for gimplification to be idempotent, and for the predicates to only test for valid forms, not whether they are fully simplified. */ /* Set up our internal queues if needed. */ if (pre_p == NULL) pre_p = &internal_pre; if (post_p == NULL) post_p = &internal_post; saved_location = input_location; if (save_expr != error_mark_node && EXPR_HAS_LOCATION (*expr_p)) input_location = EXPR_LOCATION (*expr_p); /* Loop over the specific gimplifiers until the toplevel node remains the same. */ do { /* Strip away as many useless type conversions as possible at the toplevel. */ STRIP_USELESS_TYPE_CONVERSION (*expr_p); /* Remember the expr. */ save_expr = *expr_p; /* Die, die, die, my darling. */ if (save_expr == error_mark_node || (TREE_TYPE (save_expr) && TREE_TYPE (save_expr) == error_mark_node)) { ret = GS_ERROR; break; } /* Do any language-specific gimplification. */ ret = lang_hooks.gimplify_expr (expr_p, pre_p, post_p); if (ret == GS_OK) { if (*expr_p == NULL_TREE) break; if (*expr_p != save_expr) continue; } else if (ret != GS_UNHANDLED) break; ret = GS_OK; switch (TREE_CODE (*expr_p)) { /* First deal with the special cases. */ case POSTINCREMENT_EXPR: case POSTDECREMENT_EXPR: case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: ret = gimplify_self_mod_expr (expr_p, pre_p, post_p, fallback != fb_none); break; case ARRAY_REF: case ARRAY_RANGE_REF: case REALPART_EXPR: case IMAGPART_EXPR: case COMPONENT_REF: ret = gimplify_compound_lval (expr_p, pre_p, post_p, fallback ? fallback : fb_rvalue); break; case COND_EXPR: ret = gimplify_cond_expr (expr_p, pre_p, NULL_TREE); break; case CALL_EXPR: ret = gimplify_call_expr (expr_p, pre_p, fallback != fb_none); break; case TREE_LIST: abort (); case COMPOUND_EXPR: ret = gimplify_compound_expr (expr_p, pre_p, fallback != fb_none); break; case MODIFY_EXPR: case INIT_EXPR: ret = gimplify_modify_expr (expr_p, pre_p, post_p, fallback != fb_none); break; case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: ret = gimplify_boolean_expr (expr_p); break; case TRUTH_NOT_EXPR: TREE_OPERAND (*expr_p, 0) = gimple_boolify (TREE_OPERAND (*expr_p, 0)); ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); recalculate_side_effects (*expr_p); break; case ADDR_EXPR: ret = gimplify_addr_expr (expr_p, pre_p, post_p); break; case VA_ARG_EXPR: ret = gimplify_va_arg_expr (expr_p, pre_p, post_p); break; case VIEW_CONVERT_EXPR: if (VOID_TYPE_P (TREE_TYPE (*expr_p)) || fallback == fb_none) { /* Just strip a conversion to void (or in void context) and try again. */ *expr_p = TREE_OPERAND (*expr_p, 0); break; } /* If both types are BLKmode or if one type is of variable size, convert this into a pointer punning operation. This avoids copies of large data or making a variable-size temporary. ??? The interactions of VIEW_CONVERT_EXPR and aliasing is not at all clear. The impact of this transformation is even less clear. */ if ((TYPE_MODE (TREE_TYPE (*expr_p)) == BLKmode && TYPE_MODE (TREE_TYPE (TREE_OPERAND (*expr_p, 0))) == BLKmode) || !TREE_CONSTANT (TYPE_SIZE (TREE_TYPE (*expr_p))) || !TREE_CONSTANT (TYPE_SIZE (TREE_TYPE (TREE_OPERAND (*expr_p,0))))) { tree restype = TREE_TYPE (*expr_p); *expr_p = build1 (INDIRECT_REF, TREE_TYPE (*expr_p), fold_convert (build_pointer_type (restype), build_fold_addr_expr (TREE_OPERAND (*expr_p, 0)))); break; } goto unary; case CONVERT_EXPR: case NOP_EXPR: if (IS_EMPTY_STMT (*expr_p)) { ret = GS_ALL_DONE; break; } if (VOID_TYPE_P (TREE_TYPE (*expr_p)) || fallback == fb_none) { /* Just strip a conversion to void (or in void context) and try again. */ *expr_p = TREE_OPERAND (*expr_p, 0); break; } ret = gimplify_conversion (expr_p); if (ret == GS_ERROR) break; if (*expr_p != save_expr) break; /* FALLTHRU */ case FIX_TRUNC_EXPR: case FIX_CEIL_EXPR: case FIX_FLOOR_EXPR: case FIX_ROUND_EXPR: unary: /* unary_expr: ... | '(' cast ')' val | ... */ ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); recalculate_side_effects (*expr_p); break; case INDIRECT_REF: ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_reg, fb_rvalue); recalculate_side_effects (*expr_p); break; /* Constants need not be gimplified. */ case INTEGER_CST: case REAL_CST: case STRING_CST: case COMPLEX_CST: case VECTOR_CST: ret = GS_ALL_DONE; break; case CONST_DECL: *expr_p = DECL_INITIAL (*expr_p); break; case DECL_EXPR: ret = gimplify_decl_expr (expr_p); break; case EXC_PTR_EXPR: /* FIXME make this a decl. */ ret = GS_ALL_DONE; break; case BIND_EXPR: ret = gimplify_bind_expr (expr_p, NULL, pre_p); break; case LOOP_EXPR: ret = gimplify_loop_expr (expr_p, pre_p); break; case SWITCH_EXPR: ret = gimplify_switch_expr (expr_p, pre_p); break; case LABELED_BLOCK_EXPR: ret = gimplify_labeled_block_expr (expr_p); break; case EXIT_BLOCK_EXPR: ret = gimplify_exit_block_expr (expr_p); break; case EXIT_EXPR: ret = gimplify_exit_expr (expr_p); break; case GOTO_EXPR: /* If the target is not LABEL, then it is a computed jump and the target needs to be gimplified. */ if (TREE_CODE (GOTO_DESTINATION (*expr_p)) != LABEL_DECL) ret = gimplify_expr (&GOTO_DESTINATION (*expr_p), pre_p, NULL, is_gimple_val, fb_rvalue); break; case LABEL_EXPR: ret = GS_ALL_DONE; #ifdef ENABLE_CHECKING if (decl_function_context (LABEL_EXPR_LABEL (*expr_p)) != current_function_decl) abort (); #endif break; case CASE_LABEL_EXPR: ret = gimplify_case_label_expr (expr_p); break; case RETURN_EXPR: ret = gimplify_return_expr (*expr_p, pre_p); break; case CONSTRUCTOR: /* Don't reduce this in place; let gimplify_init_constructor work its magic. Buf if we're just elaborating this for side effects, just gimplify any element that has side-effects. */ if (fallback == fb_none) { for (tmp = CONSTRUCTOR_ELTS (*expr_p); tmp; tmp = TREE_CHAIN (tmp)) if (TREE_SIDE_EFFECTS (TREE_VALUE (tmp))) gimplify_expr (&TREE_VALUE (tmp), pre_p, post_p, gimple_test_f, fallback); *expr_p = NULL_TREE; } ret = GS_ALL_DONE; break; /* The following are special cases that are not handled by the original GIMPLE grammar. */ /* SAVE_EXPR nodes are converted into a GIMPLE identifier and eliminated. */ case SAVE_EXPR: ret = gimplify_save_expr (expr_p, pre_p, post_p); break; case BIT_FIELD_REF: { enum gimplify_status r0, r1, r2; r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_lvalue, fb_either); r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, is_gimple_val, fb_rvalue); r2 = gimplify_expr (&TREE_OPERAND (*expr_p, 2), pre_p, post_p, is_gimple_val, fb_rvalue); recalculate_side_effects (*expr_p); ret = MIN (r0, MIN (r1, r2)); } break; case NON_LVALUE_EXPR: /* This should have been stripped above. */ abort (); break; case ASM_EXPR: ret = gimplify_asm_expr (expr_p, pre_p, post_p); break; case TRY_FINALLY_EXPR: case TRY_CATCH_EXPR: gimplify_to_stmt_list (&TREE_OPERAND (*expr_p, 0)); gimplify_to_stmt_list (&TREE_OPERAND (*expr_p, 1)); ret = GS_ALL_DONE; break; case CLEANUP_POINT_EXPR: ret = gimplify_cleanup_point_expr (expr_p, pre_p); break; case TARGET_EXPR: ret = gimplify_target_expr (expr_p, pre_p, post_p); break; case CATCH_EXPR: gimplify_to_stmt_list (&CATCH_BODY (*expr_p)); ret = GS_ALL_DONE; break; case EH_FILTER_EXPR: gimplify_to_stmt_list (&EH_FILTER_FAILURE (*expr_p)); ret = GS_ALL_DONE; break; case OBJ_TYPE_REF: { enum gimplify_status r0, r1; r0 = gimplify_expr (&OBJ_TYPE_REF_OBJECT (*expr_p), pre_p, post_p, is_gimple_val, fb_rvalue); r1 = gimplify_expr (&OBJ_TYPE_REF_EXPR (*expr_p), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (r0, r1); } break; case MIN_EXPR: case MAX_EXPR: ret = gimplify_minimax_expr (expr_p, pre_p, post_p); break; case LABEL_DECL: /* We get here when taking the address of a label. We mark the label as "forced"; meaning it can never be removed and it is a potential target for any computed goto. */ FORCED_LABEL (*expr_p) = 1; ret = GS_ALL_DONE; break; case STATEMENT_LIST: ret = gimplify_statement_list (expr_p); break; case VAR_DECL: /* ??? If this is a local variable, and it has not been seen in any outer BIND_EXPR, then it's probably the result of a duplicate declaration, for which we've already issued an error. It would be really nice if the front end wouldn't leak these at all. Currently the only known culprit is C++ destructors, as seen in g++.old-deja/g++.jason/binding.C. */ tmp = *expr_p; if (!TREE_STATIC (tmp) && !DECL_EXTERNAL (tmp) && decl_function_context (tmp) == current_function_decl && !DECL_SEEN_IN_BIND_EXPR_P (tmp)) { #ifdef ENABLE_CHECKING if (!errorcount && !sorrycount) abort (); #endif ret = GS_ERROR; } else ret = GS_ALL_DONE; break; default: /* If this is a comparison of objects of aggregate type, handle it specially (by converting to a call to memcmp). It would be nice to only have to do this for variable-sized objects, but then we'd have to allow the same nest of reference nodes we allow for MODIFY_EXPR and that's too complex. */ if (TREE_CODE_CLASS (TREE_CODE (*expr_p)) == '<' && (AGGREGATE_TYPE_P (TREE_TYPE (TREE_OPERAND (*expr_p, 1))))) ret = gimplify_variable_sized_compare (expr_p); /* If *EXPR_P does not need to be special-cased, handle it according to its class. */ else if (TREE_CODE_CLASS (TREE_CODE (*expr_p)) == '1') ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); else if (TREE_CODE_CLASS (TREE_CODE (*expr_p)) == '2' || TREE_CODE_CLASS (TREE_CODE (*expr_p)) == '<' || TREE_CODE (*expr_p) == TRUTH_AND_EXPR || TREE_CODE (*expr_p) == TRUTH_OR_EXPR || TREE_CODE (*expr_p) == TRUTH_XOR_EXPR) { enum gimplify_status r0, r1; r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (r0, r1); } else if (TREE_CODE_CLASS (TREE_CODE (*expr_p)) == 'd' || TREE_CODE_CLASS (TREE_CODE (*expr_p)) == 'c') { ret = GS_ALL_DONE; break; } else /* Fail if we don't know how to handle this tree code. */ abort (); recalculate_side_effects (*expr_p); break; } /* If we replaced *expr_p, gimplify again. */ if (ret == GS_OK && (*expr_p == NULL || *expr_p == save_expr)) ret = GS_ALL_DONE; } while (ret == GS_OK); /* If we encountered an error_mark somewhere nested inside, either stub out the statement or propagate the error back out. */ if (ret == GS_ERROR) { if (is_statement) *expr_p = NULL; goto out; } #ifdef ENABLE_CHECKING /* This was only valid as a return value from the langhook, which we handled. Make sure it doesn't escape from any other context. */ if (ret == GS_UNHANDLED) abort (); #endif if (fallback == fb_none && *expr_p && !is_gimple_stmt (*expr_p)) { /* We aren't looking for a value, and we don't have a valid statement. If it doesn't have side-effects, throw it away. */ if (!TREE_SIDE_EFFECTS (*expr_p)) *expr_p = NULL; else if (!TREE_THIS_VOLATILE (*expr_p)) { /* This is probably a _REF that contains something nested that has side effects. Recurse through the operands to find it. */ enum tree_code code = TREE_CODE (*expr_p); if (code == COMPONENT_REF || code == REALPART_EXPR || code == IMAGPART_EXPR) gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, gimple_test_f, fallback); else if (code == ARRAY_REF || code == ARRAY_RANGE_REF) { gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, gimple_test_f, fallback); gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, gimple_test_f, fallback); } else /* Anything else with side-effects must be converted to a valid statement before we get here. */ abort (); *expr_p = NULL; } else if (COMPLETE_TYPE_P (TREE_TYPE (*expr_p))) { /* Historically, the compiler has treated a bare reference to a volatile lvalue as forcing a load. */ tree tmp = create_tmp_var (TREE_TYPE (*expr_p), "vol"); *expr_p = build (MODIFY_EXPR, TREE_TYPE (tmp), tmp, *expr_p); } else /* We can't do anything useful with a volatile reference to incomplete type, so just throw it away. */ *expr_p = NULL; } /* If we are gimplifying at the statement level, we're done. Tack everything together and replace the original statement with the gimplified form. */ if (fallback == fb_none || is_statement) { if (internal_pre || internal_post) { append_to_statement_list (*expr_p, &internal_pre); append_to_statement_list (internal_post, &internal_pre); annotate_all_with_locus (&internal_pre, input_location); *expr_p = internal_pre; } else if (!*expr_p) ; else if (TREE_CODE (*expr_p) == STATEMENT_LIST) annotate_all_with_locus (expr_p, input_location); else annotate_one_with_locus (*expr_p, input_location); goto out; } /* Otherwise we're gimplifying a subexpression, so the resulting value is interesting. */ /* If it's sufficiently simple already, we're done. Unless we are handling some post-effects internally; if that's the case, we need to copy into a temp before adding the post-effects to the tree. */ if (!internal_post && (*gimple_test_f) (*expr_p)) goto out; /* Otherwise, we need to create a new temporary for the gimplified expression. */ /* We can't return an lvalue if we have an internal postqueue. The object the lvalue refers to would (probably) be modified by the postqueue; we need to copy the value out first, which means an rvalue. */ if ((fallback & fb_lvalue) && !internal_post && is_gimple_addr_expr_arg (*expr_p)) { /* An lvalue will do. Take the address of the expression, store it in a temporary, and replace the expression with an INDIRECT_REF of that temporary. */ tmp = build_fold_addr_expr (*expr_p); gimplify_expr (&tmp, pre_p, post_p, is_gimple_reg, fb_rvalue); *expr_p = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (tmp)), tmp); } else if ((fallback & fb_rvalue) && is_gimple_rhs (*expr_p)) { #if defined ENABLE_CHECKING if (VOID_TYPE_P (TREE_TYPE (*expr_p))) abort (); #endif /* An rvalue will do. Assign the gimplified expression into a new temporary TMP and replace the original expression with TMP. */ if (internal_post || (fallback & fb_lvalue)) /* The postqueue might change the value of the expression between the initialization and use of the temporary, so we can't use a formal temp. FIXME do we care? */ *expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p); else *expr_p = get_formal_tmp_var (*expr_p, pre_p); } else if (fallback & fb_mayfail) { /* If this is an asm statement, and the user asked for the impossible, don't abort. Fail and let gimplify_asm_expr issue an error. */ ret = GS_ERROR; goto out; } else { fprintf (stderr, "gimplification failed:\n"); print_generic_expr (stderr, *expr_p, 0); debug_tree (*expr_p); abort (); } #if defined ENABLE_CHECKING /* Make sure the temporary matches our predicate. */ if (!(*gimple_test_f) (*expr_p)) abort (); #endif if (internal_post) { annotate_all_with_locus (&internal_post, input_location); append_to_statement_list (internal_post, pre_p); } out: input_location = saved_location; return ret; } /* Look through TYPE for variable-sized objects and gimplify each such size that we find. Add to LIST_P any statements generated. */ void gimplify_type_sizes (tree type, tree *list_p) { tree field; switch (TREE_CODE (type)) { case ERROR_MARK: return; case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: case CHAR_TYPE: case REAL_TYPE: gimplify_one_sizepos (&TYPE_MIN_VALUE (type), list_p); gimplify_one_sizepos (&TYPE_MAX_VALUE (type), list_p); break; case ARRAY_TYPE: /* These anonymous types don't have declarations, so handle them here. */ gimplify_type_sizes (TYPE_DOMAIN (type), list_p); break; case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) if (TREE_CODE (field) == FIELD_DECL) gimplify_one_sizepos (&DECL_FIELD_OFFSET (field), list_p); break; default: break; } gimplify_one_sizepos (&TYPE_SIZE (type), list_p); gimplify_one_sizepos (&TYPE_SIZE_UNIT (type), list_p); } /* Subroutine of the above to gimplify one size or position, *EXPR_P. We add any required statements to STMT_P. */ void gimplify_one_sizepos (tree *expr_p, tree *stmt_p) { /* We don't do anything if the value isn't there, is constant, or contains A PLACEHOLDER_EXPR. We also don't want to do anything if it's already a VAR_DECL. If it's a VAR_DECL from another function, the gimplfier will want to replace it with a new variable, but that will cause problems if this type is from outside the function. It's OK to have that here. */ if (*expr_p == NULL_TREE || TREE_CONSTANT (*expr_p) || TREE_CODE (*expr_p) == VAR_DECL || CONTAINS_PLACEHOLDER_P (*expr_p)) return; gimplify_expr (expr_p, stmt_p, NULL, is_gimple_val, fb_rvalue); } #ifdef ENABLE_CHECKING /* Compare types A and B for a "close enough" match. */ static bool cpt_same_type (tree a, tree b) { if (lang_hooks.types_compatible_p (a, b)) return true; /* ??? The C++ FE decomposes METHOD_TYPES to FUNCTION_TYPES and doesn't link them together. This routine is intended to catch type errors that will affect the optimizers, and the optimizers don't add new dereferences of function pointers, so ignore it. */ if ((TREE_CODE (a) == FUNCTION_TYPE || TREE_CODE (a) == METHOD_TYPE) && (TREE_CODE (b) == FUNCTION_TYPE || TREE_CODE (b) == METHOD_TYPE)) return true; /* ??? The C FE pushes type qualifiers after the fact into the type of the element from the type of the array. See build_unary_op's handling of ADDR_EXPR. This seems wrong -- if we were going to do this, we should have done it when creating the variable in the first place. Alternately, why aren't the two array types made variants? */ if (TREE_CODE (a) == ARRAY_TYPE && TREE_CODE (b) == ARRAY_TYPE) return cpt_same_type (TREE_TYPE (a), TREE_TYPE (b)); /* And because of those, we have to recurse down through pointers. */ if (POINTER_TYPE_P (a) && POINTER_TYPE_P (b)) return cpt_same_type (TREE_TYPE (a), TREE_TYPE (b)); return false; } /* Check for some cases of the front end missing cast expressions. The type of a dereference should correspond to the pointer type; similarly the type of an address should match its object. */ static tree check_pointer_types_r (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED) { tree t = *tp; tree ptype, otype, dtype; switch (TREE_CODE (t)) { case INDIRECT_REF: case ARRAY_REF: otype = TREE_TYPE (t); ptype = TREE_TYPE (TREE_OPERAND (t, 0)); dtype = TREE_TYPE (ptype); if (!cpt_same_type (otype, dtype)) abort (); break; case ADDR_EXPR: ptype = TREE_TYPE (t); otype = TREE_TYPE (TREE_OPERAND (t, 0)); dtype = TREE_TYPE (ptype); if (!cpt_same_type (otype, dtype)) { /* &array is allowed to produce a pointer to the element, rather than a pointer to the array type. We must allow this in order to properly represent assigning the address of an array in C into pointer to the element type. */ if (TREE_CODE (otype) == ARRAY_TYPE && POINTER_TYPE_P (ptype) && cpt_same_type (TREE_TYPE (otype), dtype)) break; abort (); } break; default: return NULL_TREE; } return NULL_TREE; } #endif /* Gimplify the body of statements pointed by BODY_P. FNDECL is the function decl containing BODY. */ void gimplify_body (tree *body_p, tree fndecl) { location_t saved_location = input_location; tree body; timevar_push (TV_TREE_GIMPLIFY); push_gimplify_context (); /* Unshare most shared trees in the body and in that of any nested functions. It would seem we don't have to do this for nested functions because they are supposed to be output and then the outer function gimplified first, but the g++ front end doesn't always do it that way. */ unshare_body (body_p, fndecl); unvisit_body (body_p, fndecl); /* Make sure input_location isn't set to something wierd. */ input_location = DECL_SOURCE_LOCATION (fndecl); /* Gimplify the function's body. */ gimplify_stmt (body_p); body = *body_p; /* Unshare again, in case gimplification was sloppy. */ unshare_all_trees (body); if (!body) body = alloc_stmt_list (); else if (TREE_CODE (body) == STATEMENT_LIST) { tree t = expr_only (*body_p); if (t) body = t; } /* If there isn't an outer BIND_EXPR, add one. */ if (TREE_CODE (body) != BIND_EXPR) { tree b = build (BIND_EXPR, void_type_node, NULL_TREE, NULL_TREE, NULL_TREE); TREE_SIDE_EFFECTS (b) = 1; append_to_statement_list_force (body, &BIND_EXPR_BODY (b)); body = b; } *body_p = body; pop_gimplify_context (body); #ifdef ENABLE_CHECKING walk_tree (body_p, check_pointer_types_r, NULL, NULL); #endif timevar_pop (TV_TREE_GIMPLIFY); input_location = saved_location; } /* Entry point to the gimplification pass. FNDECL is the FUNCTION_DECL node for the function we want to gimplify. */ void gimplify_function_tree (tree fndecl) { tree oldfn; oldfn = current_function_decl; current_function_decl = fndecl; gimplify_body (&DECL_SAVED_TREE (fndecl), fndecl); /* If we're instrumenting function entry/exit, then prepend the call to the entry hook and wrap the whole function in a TRY_FINALLY_EXPR to catch the exit hook. */ /* ??? Add some way to ignore exceptions for this TFE. */ if (flag_instrument_function_entry_exit && ! DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (fndecl)) { tree tf, x, bind; tf = build (TRY_FINALLY_EXPR, void_type_node, NULL, NULL); TREE_SIDE_EFFECTS (tf) = 1; x = DECL_SAVED_TREE (fndecl); append_to_statement_list (x, &TREE_OPERAND (tf, 0)); x = implicit_built_in_decls[BUILT_IN_PROFILE_FUNC_EXIT]; x = build_function_call_expr (x, NULL); append_to_statement_list (x, &TREE_OPERAND (tf, 1)); bind = build (BIND_EXPR, void_type_node, NULL, NULL, NULL); TREE_SIDE_EFFECTS (bind) = 1; x = implicit_built_in_decls[BUILT_IN_PROFILE_FUNC_ENTER]; x = build_function_call_expr (x, NULL); append_to_statement_list (x, &BIND_EXPR_BODY (bind)); append_to_statement_list (tf, &BIND_EXPR_BODY (bind)); DECL_SAVED_TREE (fndecl) = bind; } current_function_decl = oldfn; } /* Type information for gimplify.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_gimplify_h[] = { LAST_GGC_ROOT_TAB }; const struct ggc_root_tab gt_pch_rs_gt_gimplify_h[] = { { &tmp_var_id_num, 1, sizeof (tmp_var_id_num), NULL, NULL }, LAST_GGC_ROOT_TAB }; /* Pretty formatting of GENERIC trees in C syntax. Copyright (C) 2001, 2002, 2003 Free Software Foundation, Inc. Adapted from c-pretty-print.c by Diego Novillo This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Local functions, macros and variables. */ static int op_prio (tree); static const char *op_symbol (tree); static void pretty_print_string (pretty_printer *, const char*); static void print_call_name (pretty_printer *, tree); static void newline_and_indent (pretty_printer *, int); static void maybe_init_pretty_print (FILE *); static void print_declaration (pretty_printer *, tree, int, int); static void print_struct_decl (pretty_printer *, tree, int, int); static void do_niy (pretty_printer *, tree); static void dump_vops (pretty_printer *, tree, int, int); static void dump_generic_bb_buff (pretty_printer *, basic_block, int, int); #define INDENT(SPACE) do { \ int i; for (i = 0; i>>\n"); } void debug_generic_expr (tree t) { print_generic_expr (stderr, t, TDF_VOPS|TDF_UID); fprintf (stderr, "\n"); } void debug_generic_stmt (tree t) { print_generic_stmt (stderr, t, TDF_VOPS|TDF_UID); fprintf (stderr, "\n"); } /* Prints declaration DECL to the FILE with details specified by FLAGS. */ void print_generic_decl (FILE *file, tree decl, int flags) { maybe_init_pretty_print (file); dumping_stmts = true; print_declaration (&buffer, decl, 2, flags); pp_write_text_to_stream (&buffer); } /* Print tree T, and its successors, on file FILE. FLAGS specifies details to show in the dump. See TDF_* in tree.h. */ void print_generic_stmt (FILE *file, tree t, int flags) { maybe_init_pretty_print (file); dumping_stmts = true; dump_generic_node (&buffer, t, 0, flags, true); pp_flush (&buffer); } /* Print tree T, and its successors, on file FILE. FLAGS specifies details to show in the dump. See TDF_* in tree.h. The output is indented by INDENT spaces. */ void print_generic_stmt_indented (FILE *file, tree t, int flags, int indent) { int i; maybe_init_pretty_print (file); dumping_stmts = true; for (i = 0; i < indent; i++) pp_space (&buffer); dump_generic_node (&buffer, t, indent, flags, true); pp_flush (&buffer); } /* Print a single expression T on file FILE. FLAGS specifies details to show in the dump. See TDF_* in tree.h. */ void print_generic_expr (FILE *file, tree t, int flags) { maybe_init_pretty_print (file); dumping_stmts = false; dump_generic_node (&buffer, t, 0, flags, false); } /* Dump the name of a _DECL node and its DECL_UID if TDF_UID is set in FLAGS. */ static void dump_decl_name (pretty_printer *buffer, tree node, int flags) { if (DECL_NAME (node)) pp_tree_identifier (buffer, DECL_NAME (node)); if ((flags & TDF_UID) || DECL_NAME (node) == NULL_TREE) { if (TREE_CODE (node) == LABEL_DECL && LABEL_DECL_UID (node) != -1) pp_printf (buffer, "", LABEL_DECL_UID (node)); else pp_printf (buffer, "", DECL_UID (node)); } } /* Dump a function declaration. NODE is the FUNCTION_TYPE. BUFFER, SPC and FLAGS are as in dump_generic_node. */ static void dump_function_declaration (pretty_printer *buffer, tree node, int spc, int flags) { bool wrote_arg = false; tree arg; pp_space (buffer); pp_character (buffer, '('); /* Print the argument types. The last element in the list is a VOID_TYPE. The following avoids printing the last element. */ arg = TYPE_ARG_TYPES (node); while (arg && TREE_CHAIN (arg) && arg != error_mark_node) { wrote_arg = true; dump_generic_node (buffer, TREE_VALUE (arg), spc, flags, false); arg = TREE_CHAIN (arg); if (TREE_CHAIN (arg) && TREE_CODE (TREE_CHAIN (arg)) == TREE_LIST) { pp_character (buffer, ','); pp_space (buffer); } } if (!wrote_arg) pp_string (buffer, "void"); pp_character (buffer, ')'); } /* Dump the node NODE on the pretty_printer BUFFER, SPC spaces of indent. FLAGS specifies details to show in the dump (see TDF_* in tree.h). If IS_STMT is true, the object printed is considered to be a statement and it is terminated by ';' if appropriate. */ int dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags, bool is_stmt) { tree type; tree op0, op1; const char *str; bool is_expr; if (node == NULL_TREE) return spc; is_expr = EXPR_P (node); if (TREE_CODE (node) != ERROR_MARK && is_gimple_stmt (node) && (flags & TDF_VOPS) && stmt_ann (node)) dump_vops (buffer, node, spc, flags); if (dumping_stmts && (flags & TDF_LINENO) && EXPR_HAS_LOCATION (node)) { expanded_location xloc = expand_location (EXPR_LOCATION (node)); pp_character (buffer, '['); if (xloc.file) { pp_string (buffer, xloc.file); pp_string (buffer, " : "); } pp_decimal_int (buffer, xloc.line); pp_string (buffer, "] "); } switch (TREE_CODE (node)) { case ERROR_MARK: pp_string (buffer, "<<< error >>>"); break; case IDENTIFIER_NODE: pp_tree_identifier (buffer, node); break; case TREE_LIST: while (node && node != error_mark_node) { if (TREE_PURPOSE (node)) { dump_generic_node (buffer, TREE_PURPOSE (node), spc, flags, false); pp_space (buffer); } dump_generic_node (buffer, TREE_VALUE (node), spc, flags, false); node = TREE_CHAIN (node); if (node && TREE_CODE (node) == TREE_LIST) { pp_character (buffer, ','); pp_space (buffer); } } break; case TREE_VEC: dump_generic_node (buffer, BINFO_TYPE (node), spc, flags, false); break; case BLOCK: NIY; break; case VOID_TYPE: case INTEGER_TYPE: case REAL_TYPE: case COMPLEX_TYPE: case VECTOR_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: case CHAR_TYPE: { unsigned int quals = TYPE_QUALS (node); char class; if (quals & TYPE_QUAL_CONST) pp_string (buffer, "const "); else if (quals & TYPE_QUAL_VOLATILE) pp_string (buffer, "volatile "); else if (quals & TYPE_QUAL_RESTRICT) pp_string (buffer, "restrict "); class = TREE_CODE_CLASS (TREE_CODE (node)); if (class == 'd') { if (DECL_NAME (node)) dump_decl_name (buffer, node, flags); else pp_string (buffer, ""); } else if (class == 't') { if (TYPE_NAME (node)) { if (TREE_CODE (TYPE_NAME (node)) == IDENTIFIER_NODE) pp_tree_identifier (buffer, TYPE_NAME (node)); else if (TREE_CODE (TYPE_NAME (node)) == TYPE_DECL && DECL_NAME (TYPE_NAME (node))) dump_decl_name (buffer, TYPE_NAME (node), flags); else pp_string (buffer, ""); } else pp_string (buffer, ""); } break; } case POINTER_TYPE: case REFERENCE_TYPE: str = (TREE_CODE (node) == POINTER_TYPE ? "*" : "&"); if (TREE_CODE (TREE_TYPE (node)) == FUNCTION_TYPE) { tree fnode = TREE_TYPE (node); dump_generic_node (buffer, TREE_TYPE (fnode), spc, flags, false); pp_space (buffer); pp_character (buffer, '('); pp_string (buffer, str); if (TYPE_NAME (node) && DECL_NAME (TYPE_NAME (node))) dump_decl_name (buffer, TYPE_NAME (node), flags); else pp_printf (buffer, "", TYPE_UID (node)); pp_character (buffer, ')'); dump_function_declaration (buffer, fnode, spc, flags); } else { unsigned int quals = TYPE_QUALS (node); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); pp_space (buffer); pp_string (buffer, str); if (quals & TYPE_QUAL_CONST) pp_string (buffer, " const"); else if (quals & TYPE_QUAL_VOLATILE) pp_string (buffer, "volatile"); else if (quals & TYPE_QUAL_RESTRICT) pp_string (buffer, " restrict"); } break; case OFFSET_TYPE: NIY; break; case METHOD_TYPE: dump_decl_name (buffer, TYPE_NAME (TYPE_METHOD_BASETYPE (node)), flags); pp_string (buffer, "::"); break; case FILE_TYPE: NIY; break; case ARRAY_TYPE: { tree tmp; /* Print the innermost component type. */ for (tmp = TREE_TYPE (node); TREE_CODE (tmp) == ARRAY_TYPE; tmp = TREE_TYPE (tmp)) ; dump_generic_node (buffer, tmp, spc, flags, false); /* Print the dimensions. */ for (tmp = node; TREE_CODE (tmp) == ARRAY_TYPE; tmp = TREE_TYPE (tmp)) { tree domain = TYPE_DOMAIN (tmp); pp_character (buffer, '['); if (domain) { if (TYPE_MIN_VALUE (domain) && !integer_zerop (TYPE_MIN_VALUE (domain))) { dump_generic_node (buffer, TYPE_MIN_VALUE (domain), spc, flags, false); pp_string (buffer, " .. "); } if (TYPE_MAX_VALUE (domain)) dump_generic_node (buffer, TYPE_MAX_VALUE (domain), spc, flags, false); } else pp_string (buffer, ""); pp_character (buffer, ']'); } break; } case SET_TYPE: NIY; break; case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: /* Print the name of the structure. */ if (TREE_CODE (node) == RECORD_TYPE) pp_string (buffer, "struct "); else if (TREE_CODE (node) == UNION_TYPE) pp_string (buffer, "union "); if (TYPE_NAME (node)) dump_generic_node (buffer, TYPE_NAME (node), spc, flags, false); else print_struct_decl (buffer, node, spc, flags); break; case LANG_TYPE: NIY; break; case INTEGER_CST: if (TREE_CODE (TREE_TYPE (node)) == POINTER_TYPE) { /* In the case of a pointer, one may want to divide by the size of the pointed-to type. Unfortunately, this not straightforward. The C front-end maps expressions (int *) 5 int *p; (p + 5) in such a way that the two INTEGER_CST nodes for "5" have different values but identical types. In the latter case, the 5 is multiplied by sizeof (int) in c-common.c (pointer_int_sum) to convert it to a byte address, and yet the type of the node is left unchanged. Argh. What is consistent though is that the number value corresponds to bytes (UNITS) offset. NB: Neither of the following divisors can be trivially used to recover the original literal: TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (node))) TYPE_PRECISION (TREE_TYPE (TREE_TYPE (node))) */ pp_wide_integer (buffer, TREE_INT_CST_LOW (node)); pp_string (buffer, "B"); /* pseudo-unit */ } else if (! host_integerp (node, 0)) { tree val = node; if (tree_int_cst_sgn (val) < 0) { pp_character (buffer, '-'); val = build_int_2 (-TREE_INT_CST_LOW (val), ~TREE_INT_CST_HIGH (val) + !TREE_INT_CST_LOW (val)); } /* Would "%x%0*x" or "%x%*0x" get zero-padding on all systems? */ { static char format[10]; /* "%x%09999x\0" */ if (!format[0]) sprintf (format, "%%x%%0%dx", HOST_BITS_PER_INT / 4); sprintf (pp_buffer (buffer)->digit_buffer, format, TREE_INT_CST_HIGH (val), TREE_INT_CST_LOW (val)); pp_string (buffer, pp_buffer (buffer)->digit_buffer); } } else pp_wide_integer (buffer, TREE_INT_CST_LOW (node)); break; case REAL_CST: /* Code copied from print_node. */ { REAL_VALUE_TYPE d; if (TREE_OVERFLOW (node)) pp_string (buffer, " overflow"); #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC) d = TREE_REAL_CST (node); if (REAL_VALUE_ISINF (d)) pp_string (buffer, " Inf"); else if (REAL_VALUE_ISNAN (d)) pp_string (buffer, " Nan"); else { char string[100]; real_to_decimal (string, &d, sizeof (string), 0, 1); pp_string (buffer, string); } #else { HOST_WIDE_INT i; unsigned char *p = (unsigned char *) &TREE_REAL_CST (node); pp_string (buffer, "0x"); for (i = 0; i < sizeof TREE_REAL_CST (node); i++) output_formatted_integer (buffer, "%02x", *p++); } #endif break; } case COMPLEX_CST: pp_string (buffer, "__complex__ ("); dump_generic_node (buffer, TREE_REALPART (node), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_IMAGPART (node), spc, flags, false); pp_string (buffer, ")"); break; case STRING_CST: pp_string (buffer, "\""); pretty_print_string (buffer, TREE_STRING_POINTER (node)); pp_string (buffer, "\""); break; case VECTOR_CST: { tree elt; pp_string (buffer, "{ "); for (elt = TREE_VECTOR_CST_ELTS (node); elt; elt = TREE_CHAIN (elt)) { dump_generic_node (buffer, TREE_VALUE (elt), spc, flags, false); if (TREE_CHAIN (elt)) pp_string (buffer, ", "); } pp_string (buffer, " }"); } break; case FUNCTION_TYPE: break; case FUNCTION_DECL: case CONST_DECL: dump_decl_name (buffer, node, flags); break; case LABEL_DECL: if (DECL_NAME (node)) dump_decl_name (buffer, node, flags); else if (LABEL_DECL_UID (node) != -1) pp_printf (buffer, "", LABEL_DECL_UID (node)); else pp_printf (buffer, "", DECL_UID (node)); break; case TYPE_DECL: if (DECL_IS_BUILTIN (node)) { /* Don't print the declaration of built-in types. */ break; } if (DECL_NAME (node)) dump_decl_name (buffer, node, flags); else { if ((TREE_CODE (TREE_TYPE (node)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (node)) == UNION_TYPE) && TYPE_METHODS (TREE_TYPE (node))) { /* The type is a c++ class: all structures have at least 4 methods. */ pp_string (buffer, "class "); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); } else { pp_string (buffer, (TREE_CODE (TREE_TYPE (node)) == UNION_TYPE ? "union" : "struct ")); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); } } break; case VAR_DECL: case PARM_DECL: case FIELD_DECL: case NAMESPACE_DECL: dump_decl_name (buffer, node, flags); break; case RESULT_DECL: pp_string (buffer, ""); break; case COMPONENT_REF: op0 = TREE_OPERAND (node, 0); str = "."; if (TREE_CODE (op0) == INDIRECT_REF) { op0 = TREE_OPERAND (op0, 0); str = "->"; } if (op_prio (op0) < op_prio (node)) pp_character (buffer, '('); dump_generic_node (buffer, op0, spc, flags, false); if (op_prio (op0) < op_prio (node)) pp_character (buffer, ')'); pp_string (buffer, str); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); op0 = component_ref_field_offset (node); if (op0 && TREE_CODE (op0) != INTEGER_CST) { pp_string (buffer, "{off: "); dump_generic_node (buffer, op0, spc, flags, false); pp_character (buffer, '}'); } break; case BIT_FIELD_REF: pp_string (buffer, "BIT_FIELD_REF <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); pp_string (buffer, ">"); break; case BUFFER_REF: NIY; break; case ARRAY_REF: case ARRAY_RANGE_REF: op0 = TREE_OPERAND (node, 0); if (op_prio (op0) < op_prio (node)) pp_character (buffer, '('); dump_generic_node (buffer, op0, spc, flags, false); if (op_prio (op0) < op_prio (node)) pp_character (buffer, ')'); pp_character (buffer, '['); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); if (TREE_CODE (node) == ARRAY_RANGE_REF) pp_string (buffer, " ..."); pp_character (buffer, ']'); op0 = array_ref_low_bound (node); op1 = array_ref_element_size (node); if (!integer_zerop (op0) || (TYPE_SIZE_UNIT (TREE_TYPE (node)) && !operand_equal_p (op1, TYPE_SIZE_UNIT (TREE_TYPE (node)), 0))) { pp_string (buffer, "{lb: "); dump_generic_node (buffer, op0, spc, flags, false); pp_string (buffer, " sz: "); dump_generic_node (buffer, op1, spc, flags, false); pp_character (buffer, '}'); } break; case CONSTRUCTOR: { tree lnode; bool is_struct_init = FALSE; pp_character (buffer, '{'); lnode = CONSTRUCTOR_ELTS (node); if (TREE_CODE (TREE_TYPE (node)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (node)) == UNION_TYPE) is_struct_init = TRUE; while (lnode && lnode != error_mark_node) { tree val; if (TREE_PURPOSE (lnode) && is_struct_init) { pp_character (buffer, '.'); dump_generic_node (buffer, TREE_PURPOSE (lnode), spc, flags, false); pp_string (buffer, "="); } val = TREE_VALUE (lnode); if (val && TREE_CODE (val) == ADDR_EXPR) if (TREE_CODE (TREE_OPERAND (val, 0)) == FUNCTION_DECL) val = TREE_OPERAND (val, 0); if (val && TREE_CODE (val) == FUNCTION_DECL) { dump_decl_name (buffer, val, flags); } else { dump_generic_node (buffer, TREE_VALUE (lnode), spc, flags, false); } lnode = TREE_CHAIN (lnode); if (lnode && TREE_CODE (lnode) == TREE_LIST) { pp_character (buffer, ','); pp_space (buffer); } } pp_character (buffer, '}'); } break; case COMPOUND_EXPR: { tree *tp; if (flags & TDF_SLIM) { pp_string (buffer, ""); break; } dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, dumping_stmts); if (dumping_stmts) newline_and_indent (buffer, spc); else { pp_character (buffer, ','); pp_space (buffer); } for (tp = &TREE_OPERAND (node, 1); TREE_CODE (*tp) == COMPOUND_EXPR; tp = &TREE_OPERAND (*tp, 1)) { dump_generic_node (buffer, TREE_OPERAND (*tp, 0), spc, flags, dumping_stmts); if (dumping_stmts) newline_and_indent (buffer, spc); else { pp_character (buffer, ','); pp_space (buffer); } } dump_generic_node (buffer, *tp, spc, flags, dumping_stmts); } break; case STATEMENT_LIST: { tree_stmt_iterator si; bool first = true; if ((flags & TDF_SLIM) || !dumping_stmts) { pp_string (buffer, ""); break; } for (si = tsi_start (node); !tsi_end_p (si); tsi_next (&si)) { if (!first) newline_and_indent (buffer, spc); else first = false; dump_generic_node (buffer, tsi_stmt (si), spc, flags, true); } } break; case MODIFY_EXPR: case INIT_EXPR: dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_space (buffer); pp_character (buffer, '='); pp_space (buffer); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); break; case TARGET_EXPR: pp_string (buffer, "TARGET_EXPR <"); dump_generic_node (buffer, TARGET_EXPR_SLOT (node), spc, flags, false); pp_character (buffer, ','); pp_space (buffer); dump_generic_node (buffer, TARGET_EXPR_INITIAL (node), spc, flags, false); pp_character (buffer, '>'); break; case DECL_EXPR: print_declaration (buffer, DECL_EXPR_DECL (node), spc, flags); is_stmt = false; break; case COND_EXPR: if (TREE_TYPE (node) == NULL || TREE_TYPE (node) == void_type_node) { pp_string (buffer, "if ("); dump_generic_node (buffer, COND_EXPR_COND (node), spc, flags, false); pp_character (buffer, ')'); /* The lowered cond_exprs should always be printed in full. */ if (COND_EXPR_THEN (node) && TREE_CODE (COND_EXPR_THEN (node)) == GOTO_EXPR && COND_EXPR_ELSE (node) && TREE_CODE (COND_EXPR_ELSE (node)) == GOTO_EXPR) { pp_space (buffer); dump_generic_node (buffer, COND_EXPR_THEN (node), 0, flags, true); pp_string (buffer, " else "); dump_generic_node (buffer, COND_EXPR_ELSE (node), 0, flags, true); } else if (!(flags & TDF_SLIM)) { /* Output COND_EXPR_THEN. */ if (COND_EXPR_THEN (node)) { newline_and_indent (buffer, spc+2); pp_character (buffer, '{'); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, COND_EXPR_THEN (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_character (buffer, '}'); } /* Output COND_EXPR_ELSE. */ if (COND_EXPR_ELSE (node)) { newline_and_indent (buffer, spc); pp_string (buffer, "else"); newline_and_indent (buffer, spc+2); pp_character (buffer, '{'); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, COND_EXPR_ELSE (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_character (buffer, '}'); } } is_expr = false; } else { dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_space (buffer); pp_character (buffer, '?'); pp_space (buffer); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_space (buffer); pp_character (buffer, ':'); pp_space (buffer); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); } break; case BIND_EXPR: pp_character (buffer, '{'); if (!(flags & TDF_SLIM)) { if (BIND_EXPR_VARS (node)) { pp_newline (buffer); for (op0 = BIND_EXPR_VARS (node); op0; op0 = TREE_CHAIN (op0)) { print_declaration (buffer, op0, spc+2, flags); pp_newline (buffer); } } newline_and_indent (buffer, spc+2); dump_generic_node (buffer, BIND_EXPR_BODY (node), spc+2, flags, true); newline_and_indent (buffer, spc); pp_character (buffer, '}'); } is_expr = false; break; case CALL_EXPR: print_call_name (buffer, node); /* Print parameters. */ pp_space (buffer); pp_character (buffer, '('); op1 = TREE_OPERAND (node, 1); if (op1) dump_generic_node (buffer, op1, spc, flags, false); pp_character (buffer, ')'); op1 = TREE_OPERAND (node, 2); if (op1) { pp_string (buffer, " [static-chain: "); dump_generic_node (buffer, op1, spc, flags, false); pp_character (buffer, ']'); } if (CALL_EXPR_TAILCALL (node)) pp_string (buffer, " [tail call]"); break; case WITH_CLEANUP_EXPR: NIY; break; case CLEANUP_POINT_EXPR: pp_string (buffer, "<>"); break; case PLACEHOLDER_EXPR: pp_string (buffer, "'); break; /* Binary arithmetic and logic expressions. */ case MULT_EXPR: case PLUS_EXPR: case MINUS_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case TRUNC_MOD_EXPR: case CEIL_MOD_EXPR: case FLOOR_MOD_EXPR: case ROUND_MOD_EXPR: case RDIV_EXPR: case EXACT_DIV_EXPR: case LSHIFT_EXPR: case RSHIFT_EXPR: case LROTATE_EXPR: case RROTATE_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case BIT_AND_EXPR: case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case TRUTH_XOR_EXPR: case LT_EXPR: case LE_EXPR: case GT_EXPR: case GE_EXPR: case EQ_EXPR: case NE_EXPR: case UNLT_EXPR: case UNLE_EXPR: case UNGT_EXPR: case UNGE_EXPR: case UNEQ_EXPR: case LTGT_EXPR: case ORDERED_EXPR: case UNORDERED_EXPR: { const char *op = op_symbol (node); op0 = TREE_OPERAND (node, 0); op1 = TREE_OPERAND (node, 1); /* When the operands are expressions with less priority, keep semantics of the tree representation. */ if (op_prio (op0) < op_prio (node)) { pp_character (buffer, '('); dump_generic_node (buffer, op0, spc, flags, false); pp_character (buffer, ')'); } else dump_generic_node (buffer, op0, spc, flags, false); pp_space (buffer); pp_string (buffer, op); pp_space (buffer); /* When the operands are expressions with less priority, keep semantics of the tree representation. */ if (op_prio (op1) < op_prio (node)) { pp_character (buffer, '('); dump_generic_node (buffer, op1, spc, flags, false); pp_character (buffer, ')'); } else dump_generic_node (buffer, op1, spc, flags, false); } break; /* Unary arithmetic and logic expressions. */ case NEGATE_EXPR: case BIT_NOT_EXPR: case TRUTH_NOT_EXPR: case ADDR_EXPR: case REFERENCE_EXPR: case PREDECREMENT_EXPR: case PREINCREMENT_EXPR: case INDIRECT_REF: if (TREE_CODE (node) == ADDR_EXPR && (TREE_CODE (TREE_OPERAND (node, 0)) == STRING_CST || TREE_CODE (TREE_OPERAND (node, 0)) == FUNCTION_DECL)) ; /* Do not output '&' for strings and function pointers. */ else pp_string (buffer, op_symbol (node)); if (op_prio (TREE_OPERAND (node, 0)) < op_prio (node)) { pp_character (buffer, '('); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, ')'); } else dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); break; case POSTDECREMENT_EXPR: case POSTINCREMENT_EXPR: if (op_prio (TREE_OPERAND (node, 0)) < op_prio (node)) { pp_character (buffer, '('); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, ')'); } else dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, op_symbol (node)); break; case MIN_EXPR: pp_string (buffer, "MIN_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_character (buffer, '>'); break; case MAX_EXPR: pp_string (buffer, "MAX_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_character (buffer, '>'); break; case ABS_EXPR: pp_string (buffer, "ABS_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, '>'); break; case IN_EXPR: NIY; break; case SET_LE_EXPR: NIY; break; case CARD_EXPR: NIY; break; case RANGE_EXPR: NIY; break; case FIX_TRUNC_EXPR: case FIX_CEIL_EXPR: case FIX_FLOOR_EXPR: case FIX_ROUND_EXPR: case FLOAT_EXPR: case CONVERT_EXPR: case NOP_EXPR: type = TREE_TYPE (node); op0 = TREE_OPERAND (node, 0); if (type != TREE_TYPE (op0)) { pp_character (buffer, '('); dump_generic_node (buffer, type, spc, flags, false); pp_string (buffer, ")"); } if (op_prio (op0) < op_prio (node)) pp_character (buffer, '('); dump_generic_node (buffer, op0, spc, flags, false); if (op_prio (op0) < op_prio (node)) pp_character (buffer, ')'); break; case VIEW_CONVERT_EXPR: pp_string (buffer, "VIEW_CONVERT_EXPR<"); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); pp_string (buffer, ">("); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, ')'); break; case NON_LVALUE_EXPR: pp_string (buffer, "NON_LVALUE_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, '>'); break; case SAVE_EXPR: pp_string (buffer, "SAVE_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, '>'); break; case UNSAVE_EXPR: pp_string (buffer, "UNSAVE_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, '>'); break; case ENTRY_VALUE_EXPR: NIY; break; case COMPLEX_EXPR: pp_string (buffer, "COMPLEX_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ">"); break; case CONJ_EXPR: pp_string (buffer, "CONJ_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">"); break; case REALPART_EXPR: pp_string (buffer, "REALPART_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">"); break; case IMAGPART_EXPR: pp_string (buffer, "IMAGPART_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">"); break; case VA_ARG_EXPR: pp_string (buffer, "VA_ARG_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">"); break; case TRY_FINALLY_EXPR: case TRY_CATCH_EXPR: pp_string (buffer, "try"); newline_and_indent (buffer, spc+2); pp_string (buffer, "{"); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_string (buffer, "}"); newline_and_indent (buffer, spc); pp_string (buffer, (TREE_CODE (node) == TRY_CATCH_EXPR) ? "catch" : "finally"); newline_and_indent (buffer, spc+2); pp_string (buffer, "{"); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_string (buffer, "}"); is_expr = false; break; case CATCH_EXPR: pp_string (buffer, "catch ("); dump_generic_node (buffer, CATCH_TYPES (node), spc+2, flags, false); pp_string (buffer, ")"); newline_and_indent (buffer, spc+2); pp_string (buffer, "{"); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, CATCH_BODY (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_string (buffer, "}"); is_expr = false; break; case EH_FILTER_EXPR: pp_string (buffer, "<<>>"); newline_and_indent (buffer, spc+2); pp_string (buffer, "{"); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, EH_FILTER_FAILURE (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_string (buffer, "}"); is_expr = false; break; case GOTO_SUBROUTINE_EXPR: NIY; break; case LABEL_EXPR: op0 = TREE_OPERAND (node, 0); /* If this is for break or continue, don't bother printing it. */ if (DECL_NAME (op0)) { const char *name = IDENTIFIER_POINTER (DECL_NAME (op0)); if (strcmp (name, "break") == 0 || strcmp (name, "continue") == 0) break; } dump_generic_node (buffer, op0, spc, flags, false); pp_character (buffer, ':'); if (DECL_NONLOCAL (op0)) pp_string (buffer, " [non-local]"); break; case LABELED_BLOCK_EXPR: op0 = LABELED_BLOCK_LABEL (node); /* If this is for break or continue, don't bother printing it. */ if (DECL_NAME (op0)) { const char *name = IDENTIFIER_POINTER (DECL_NAME (op0)); if (strcmp (name, "break") == 0 || strcmp (name, "continue") == 0) { dump_generic_node (buffer, LABELED_BLOCK_BODY (node), spc, flags, false); break; } } dump_generic_node (buffer, LABELED_BLOCK_LABEL (node), spc, flags, false); pp_string (buffer, ": {"); if (!(flags & TDF_SLIM)) newline_and_indent (buffer, spc+2); dump_generic_node (buffer, LABELED_BLOCK_BODY (node), spc+2, flags, true); if (!flags) newline_and_indent (buffer, spc); pp_character (buffer, '}'); is_expr = false; break; case EXIT_BLOCK_EXPR: op0 = LABELED_BLOCK_LABEL (EXIT_BLOCK_LABELED_BLOCK (node)); /* If this is for a break or continue, print it accordingly. */ if (DECL_NAME (op0)) { const char *name = IDENTIFIER_POINTER (DECL_NAME (op0)); if (strcmp (name, "break") == 0 || strcmp (name, "continue") == 0) { pp_string (buffer, name); break; } } pp_string (buffer, "<<>>"); break; case EXC_PTR_EXPR: pp_string (buffer, "<<>>"); break; case FILTER_EXPR: pp_string (buffer, "<<>>"); break; case LOOP_EXPR: pp_string (buffer, "while (1)"); if (!(flags & TDF_SLIM)) { newline_and_indent (buffer, spc+2); pp_character (buffer, '{'); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, LOOP_EXPR_BODY (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_character (buffer, '}'); } is_expr = false; break; case RETURN_EXPR: pp_string (buffer, "return"); op0 = TREE_OPERAND (node, 0); if (op0) { pp_space (buffer); if (TREE_CODE (op0) == MODIFY_EXPR) dump_generic_node (buffer, TREE_OPERAND (op0, 1), spc, flags, false); else dump_generic_node (buffer, op0, spc, flags, false); } break; case EXIT_EXPR: pp_string (buffer, "if ("); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ") break"); break; case SWITCH_EXPR: pp_string (buffer, "switch ("); dump_generic_node (buffer, SWITCH_COND (node), spc, flags, false); pp_character (buffer, ')'); if (!(flags & TDF_SLIM)) { newline_and_indent (buffer, spc+2); pp_character (buffer, '{'); if (SWITCH_BODY (node)) { newline_and_indent (buffer, spc+4); dump_generic_node (buffer, SWITCH_BODY (node), spc+4, flags, true); } else { tree vec = SWITCH_LABELS (node); size_t i, n = TREE_VEC_LENGTH (vec); for (i = 0; i < n; ++i) { tree elt = TREE_VEC_ELT (vec, i); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, elt, spc+4, flags, false); pp_string (buffer, " goto "); dump_generic_node (buffer, CASE_LABEL (elt), spc+4, flags, true); pp_semicolon (buffer); } } newline_and_indent (buffer, spc+2); pp_character (buffer, '}'); } is_expr = false; break; case GOTO_EXPR: op0 = GOTO_DESTINATION (node); if (TREE_CODE (op0) != SSA_NAME && DECL_P (op0) && DECL_NAME (op0)) { const char *name = IDENTIFIER_POINTER (DECL_NAME (op0)); if (strcmp (name, "break") == 0 || strcmp (name, "continue") == 0) { pp_string (buffer, name); break; } } pp_string (buffer, "goto "); dump_generic_node (buffer, op0, spc, flags, false); break; case RESX_EXPR: pp_string (buffer, "resx"); /* ??? Any sensible way to present the eh region? */ break; case ASM_EXPR: pp_string (buffer, "__asm__"); if (ASM_VOLATILE_P (node)) pp_string (buffer, " __volatile__"); pp_character (buffer, '('); dump_generic_node (buffer, ASM_STRING (node), spc, flags, false); pp_character (buffer, ':'); dump_generic_node (buffer, ASM_OUTPUTS (node), spc, flags, false); pp_character (buffer, ':'); dump_generic_node (buffer, ASM_INPUTS (node), spc, flags, false); if (ASM_CLOBBERS (node)) { pp_character (buffer, ':'); dump_generic_node (buffer, ASM_CLOBBERS (node), spc, flags, false); } pp_string (buffer, ")"); break; case CASE_LABEL_EXPR: if (CASE_LOW (node) && CASE_HIGH (node)) { pp_string (buffer, "case "); dump_generic_node (buffer, CASE_LOW (node), spc, flags, false); pp_string (buffer, " ... "); dump_generic_node (buffer, CASE_HIGH (node), spc, flags, false); } else if (CASE_LOW (node)) { pp_string (buffer, "case "); dump_generic_node (buffer, CASE_LOW (node), spc, flags, false); } else pp_string (buffer, "default "); pp_character (buffer, ':'); break; case OBJ_TYPE_REF: pp_string (buffer, "OBJ_TYPE_REF("); dump_generic_node (buffer, OBJ_TYPE_REF_EXPR (node), spc, flags, false); pp_character (buffer, ';'); dump_generic_node (buffer, OBJ_TYPE_REF_OBJECT (node), spc, flags, false); pp_character (buffer, '-'); pp_character (buffer, '>'); dump_generic_node (buffer, OBJ_TYPE_REF_TOKEN (node), spc, flags, false); pp_character (buffer, ')'); break; case PHI_NODE: { int i; dump_generic_node (buffer, PHI_RESULT (node), spc, flags, false); pp_string (buffer, " = PHI <"); for (i = 0; i < PHI_NUM_ARGS (node); i++) { dump_generic_node (buffer, PHI_ARG_DEF (node, i), spc, flags, false); pp_string (buffer, "("); pp_decimal_int (buffer, PHI_ARG_EDGE (node, i)->src->index); pp_string (buffer, ")"); if (i < PHI_NUM_ARGS (node) - 1) pp_string (buffer, ", "); } pp_string (buffer, ">;"); } break; case SSA_NAME: dump_generic_node (buffer, SSA_NAME_VAR (node), spc, flags, false); pp_string (buffer, "_"); pp_decimal_int (buffer, SSA_NAME_VERSION (node)); break; case VALUE_HANDLE: pp_printf (buffer, "VH.%d", VALUE_HANDLE_ID (node)); break; default: NIY; } if (is_stmt && is_expr) pp_semicolon (buffer); pp_write_text_to_stream (buffer); return spc; } /* Print the declaration of a variable. */ static void print_declaration (pretty_printer *buffer, tree t, int spc, int flags) { INDENT (spc); if (TREE_CODE (t) == TYPE_DECL) pp_string (buffer, "typedef "); if (DECL_REGISTER (t)) pp_string (buffer, "register "); if (TREE_PUBLIC (t) && DECL_EXTERNAL (t)) pp_string (buffer, "extern "); else if (TREE_STATIC (t)) pp_string (buffer, "static "); /* Print the type and name. */ if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE) { tree tmp; /* Print array's type. */ tmp = TREE_TYPE (t); while (TREE_CODE (TREE_TYPE (tmp)) == ARRAY_TYPE) tmp = TREE_TYPE (tmp); dump_generic_node (buffer, TREE_TYPE (tmp), spc, flags, false); /* Print variable's name. */ pp_space (buffer); dump_generic_node (buffer, t, spc, flags, false); /* Print the dimensions. */ tmp = TREE_TYPE (t); while (TREE_CODE (tmp) == ARRAY_TYPE) { pp_character (buffer, '['); if (TYPE_DOMAIN (tmp)) { if (TREE_CODE (TYPE_SIZE (tmp)) == INTEGER_CST) pp_wide_integer (buffer, TREE_INT_CST_LOW (TYPE_SIZE (tmp)) / TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (tmp)))); else dump_generic_node (buffer, TYPE_SIZE_UNIT (tmp), spc, flags, false); } pp_character (buffer, ']'); tmp = TREE_TYPE (tmp); } } else if (TREE_CODE (t) == FUNCTION_DECL) { dump_generic_node (buffer, TREE_TYPE (TREE_TYPE (t)), spc, flags, false); pp_space (buffer); dump_decl_name (buffer, t, flags); dump_function_declaration (buffer, TREE_TYPE (t), spc, flags); } else { /* Print type declaration. */ dump_generic_node (buffer, TREE_TYPE (t), spc, flags, false); /* Print variable's name. */ pp_space (buffer); dump_generic_node (buffer, t, spc, flags, false); } /* The initial value of a function serves to determine wether the function is declared or defined. So the following does not apply to function nodes. */ if (TREE_CODE (t) != FUNCTION_DECL) { /* Print the initial value. */ if (DECL_INITIAL (t)) { pp_space (buffer); pp_character (buffer, '='); pp_space (buffer); dump_generic_node (buffer, DECL_INITIAL (t), spc, flags, false); } } pp_character (buffer, ';'); } /* Prints a structure: name, fields, and methods. FIXME: Still incomplete. */ static void print_struct_decl (pretty_printer *buffer, tree node, int spc, int flags) { /* Print the name of the structure. */ if (TYPE_NAME (node)) { INDENT (spc); if (TREE_CODE (node) == RECORD_TYPE) pp_string (buffer, "struct "); else if ((TREE_CODE (node) == UNION_TYPE || TREE_CODE (node) == QUAL_UNION_TYPE)) pp_string (buffer, "union "); dump_generic_node (buffer, TYPE_NAME (node), spc, 0, false); } /* Print the contents of the structure. */ pp_newline (buffer); INDENT (spc); pp_character (buffer, '{'); pp_newline (buffer); /* Print the fields of the structure. */ { tree tmp; tmp = TYPE_FIELDS (node); while (tmp) { /* Avoid to print recursively the structure. */ /* FIXME : Not implemented correctly..., what about the case when we have a cycle in the contain graph? ... Maybe this could be solved by looking at the scope in which the structure was declared. */ if (TREE_TYPE (tmp) != node || (TREE_CODE (TREE_TYPE (tmp)) == POINTER_TYPE && TREE_TYPE (TREE_TYPE (tmp)) != node)) { print_declaration (buffer, tmp, spc+2, flags); pp_newline (buffer); } tmp = TREE_CHAIN (tmp); } } INDENT (spc); pp_character (buffer, '}'); } /* Return the priority of the operator OP. From lowest to highest precedence with either left-to-right (L-R) or right-to-left (R-L) associativity]: 1 [L-R] , 2 [R-L] = += -= *= /= %= &= ^= |= <<= >>= 3 [R-L] ?: 4 [L-R] || 5 [L-R] && 6 [L-R] | 7 [L-R] ^ 8 [L-R] & 9 [L-R] == != 10 [L-R] < <= > >= 11 [L-R] << >> 12 [L-R] + - 13 [L-R] * / % 14 [R-L] ! ~ ++ -- + - * & (type) sizeof 15 [L-R] fn() [] -> . unary +, - and * have higher precedence than the corresponding binary operators. */ static int op_prio (tree op) { if (op == NULL) return 9999; switch (TREE_CODE (op)) { case TREE_LIST: case COMPOUND_EXPR: case BIND_EXPR: return 1; case MODIFY_EXPR: case INIT_EXPR: return 2; case COND_EXPR: return 3; case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR: return 4; case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR: return 5; case BIT_IOR_EXPR: return 6; case BIT_XOR_EXPR: case TRUTH_XOR_EXPR: return 7; case BIT_AND_EXPR: return 8; case EQ_EXPR: case NE_EXPR: return 9; case UNLT_EXPR: case UNLE_EXPR: case UNGT_EXPR: case UNGE_EXPR: case UNEQ_EXPR: case LTGT_EXPR: case ORDERED_EXPR: case UNORDERED_EXPR: case LT_EXPR: case LE_EXPR: case GT_EXPR: case GE_EXPR: return 10; case LSHIFT_EXPR: case RSHIFT_EXPR: case LROTATE_EXPR: case RROTATE_EXPR: return 11; case PLUS_EXPR: case MINUS_EXPR: return 12; case MULT_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case RDIV_EXPR: case EXACT_DIV_EXPR: case TRUNC_MOD_EXPR: case CEIL_MOD_EXPR: case FLOOR_MOD_EXPR: case ROUND_MOD_EXPR: return 13; case TRUTH_NOT_EXPR: case BIT_NOT_EXPR: case POSTINCREMENT_EXPR: case POSTDECREMENT_EXPR: case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: case NEGATE_EXPR: case INDIRECT_REF: case ADDR_EXPR: case FLOAT_EXPR: case NOP_EXPR: case CONVERT_EXPR: case FIX_TRUNC_EXPR: case FIX_CEIL_EXPR: case FIX_FLOOR_EXPR: case FIX_ROUND_EXPR: case TARGET_EXPR: return 14; case CALL_EXPR: case ARRAY_REF: case ARRAY_RANGE_REF: case COMPONENT_REF: return 15; /* Special expressions. */ case MIN_EXPR: case MAX_EXPR: case ABS_EXPR: case REALPART_EXPR: case IMAGPART_EXPR: return 16; case SAVE_EXPR: case NON_LVALUE_EXPR: return op_prio (TREE_OPERAND (op, 0)); default: /* Return an arbitrarily high precedence to avoid surrounding single VAR_DECLs in ()s. */ return 9999; } } /* Return the symbol associated with operator OP. */ static const char * op_symbol (tree op) { if (op == NULL) abort (); switch (TREE_CODE (op)) { case MODIFY_EXPR: return "="; case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR: return "||"; case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR: return "&&"; case BIT_IOR_EXPR: return "|"; case TRUTH_XOR_EXPR: case BIT_XOR_EXPR: return "^"; case ADDR_EXPR: case BIT_AND_EXPR: return "&"; case ORDERED_EXPR: return "ord"; case UNORDERED_EXPR: return "unord"; case EQ_EXPR: return "=="; case UNEQ_EXPR: return "u=="; case NE_EXPR: return "!="; case LT_EXPR: return "<"; case UNLT_EXPR: return "u<"; case LE_EXPR: return "<="; case UNLE_EXPR: return "u<="; case GT_EXPR: return ">"; case UNGT_EXPR: return "u>"; case GE_EXPR: return ">="; case UNGE_EXPR: return "u>="; case LTGT_EXPR: return "<>"; case LSHIFT_EXPR: return "<<"; case RSHIFT_EXPR: return ">>"; case PLUS_EXPR: return "+"; case NEGATE_EXPR: case MINUS_EXPR: return "-"; case BIT_NOT_EXPR: return "~"; case TRUTH_NOT_EXPR: return "!"; case MULT_EXPR: case INDIRECT_REF: return "*"; case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case RDIV_EXPR: case EXACT_DIV_EXPR: return "/"; case TRUNC_MOD_EXPR: case CEIL_MOD_EXPR: case FLOOR_MOD_EXPR: case ROUND_MOD_EXPR: return "%"; case PREDECREMENT_EXPR: return " --"; case PREINCREMENT_EXPR: return " ++"; case POSTDECREMENT_EXPR: return "-- "; case POSTINCREMENT_EXPR: return "++ "; case REFERENCE_EXPR: return ""; default: return "<<< ??? >>>"; } } /* Prints the name of a CALL_EXPR. */ static void print_call_name (pretty_printer *buffer, tree node) { tree op0; if (TREE_CODE (node) != CALL_EXPR) abort (); op0 = TREE_OPERAND (node, 0); if (TREE_CODE (op0) == NON_LVALUE_EXPR) op0 = TREE_OPERAND (op0, 0); switch (TREE_CODE (op0)) { case VAR_DECL: case PARM_DECL: PRINT_FUNCTION_NAME (op0); break; case ADDR_EXPR: case INDIRECT_REF: case NOP_EXPR: dump_generic_node (buffer, TREE_OPERAND (op0, 0), 0, 0, false); break; case COND_EXPR: pp_string (buffer, "("); dump_generic_node (buffer, TREE_OPERAND (op0, 0), 0, 0, false); pp_string (buffer, ") ? "); dump_generic_node (buffer, TREE_OPERAND (op0, 1), 0, 0, false); pp_string (buffer, " : "); dump_generic_node (buffer, TREE_OPERAND (op0, 2), 0, 0, false); break; case COMPONENT_REF: /* The function is a pointer contained in a structure. */ if (TREE_CODE (TREE_OPERAND (op0, 0)) == INDIRECT_REF || TREE_CODE (TREE_OPERAND (op0, 0)) == VAR_DECL) PRINT_FUNCTION_NAME (TREE_OPERAND (op0, 1)); else dump_generic_node (buffer, TREE_OPERAND (op0, 0), 0, 0, false); /* else We can have several levels of structures and a function pointer inside. This is not implemented yet... */ /* NIY;*/ break; case ARRAY_REF: if (TREE_CODE (TREE_OPERAND (op0, 0)) == VAR_DECL) PRINT_FUNCTION_NAME (TREE_OPERAND (op0, 0)); else dump_generic_node (buffer, op0, 0, 0, false); break; case SSA_NAME: case OBJ_TYPE_REF: dump_generic_node (buffer, op0, 0, 0, false); break; default: NIY; } } /* Parses the string STR and replaces new-lines by '\n', tabs by '\t', ... */ static void pretty_print_string (pretty_printer *buffer, const char *str) { if (str == NULL) return; while (*str) { switch (str[0]) { case '\b': pp_string (buffer, "\\b"); break; case '\f': pp_string (buffer, "\\f"); break; case '\n': pp_string (buffer, "\\n"); break; case '\r': pp_string (buffer, "\\r"); break; case '\t': pp_string (buffer, "\\t"); break; case '\v': pp_string (buffer, "\\v"); break; case '\\': pp_string (buffer, "\\\\"); break; case '\"': pp_string (buffer, "\\\""); break; case '\'': pp_string (buffer, "\\'"); break; case '\0': pp_string (buffer, "\\0"); break; case '\1': pp_string (buffer, "\\1"); break; case '\2': pp_string (buffer, "\\2"); break; case '\3': pp_string (buffer, "\\3"); break; case '\4': pp_string (buffer, "\\4"); break; case '\5': pp_string (buffer, "\\5"); break; case '\6': pp_string (buffer, "\\6"); break; case '\7': pp_string (buffer, "\\7"); break; default: pp_character (buffer, str[0]); break; } str++; } } static void maybe_init_pretty_print (FILE *file) { if (!initialized) { pp_construct (&buffer, /* prefix */NULL, /* line-width */0); pp_needs_newline (&buffer) = true; initialized = 1; } buffer.buffer->stream = file; } static void newline_and_indent (pretty_printer *buffer, int spc) { pp_newline (buffer); INDENT (spc); } static void dump_vops (pretty_printer *buffer, tree stmt, int spc, int flags) { size_t i; stmt_ann_t ann = stmt_ann (stmt); v_may_def_optype v_may_defs = V_MAY_DEF_OPS (ann); v_must_def_optype v_must_defs = V_MUST_DEF_OPS (ann); vuse_optype vuses = VUSE_OPS (ann); for (i = 0; i < NUM_V_MAY_DEFS (v_may_defs); i++) { pp_string (buffer, "# "); dump_generic_node (buffer, V_MAY_DEF_RESULT (v_may_defs, i), spc + 2, flags, false); pp_string (buffer, " = V_MAY_DEF <"); dump_generic_node (buffer, V_MAY_DEF_OP (v_may_defs, i), spc + 2, flags, false); pp_string (buffer, ">;"); newline_and_indent (buffer, spc); } for (i = 0; i < NUM_V_MUST_DEFS (v_must_defs); i++) { tree v_must_def = V_MUST_DEF_OP (v_must_defs, i); pp_string (buffer, "# V_MUST_DEF <"); dump_generic_node (buffer, v_must_def, spc + 2, flags, false); pp_string (buffer, ">;"); newline_and_indent (buffer, spc); } for (i = 0; i < NUM_VUSES (vuses); i++) { tree vuse = VUSE_OP (vuses, i); pp_string (buffer, "# VUSE <"); dump_generic_node (buffer, vuse, spc + 2, flags, false); pp_string (buffer, ">;"); newline_and_indent (buffer, spc); } } /* Dumps basic block BB to FILE with details described by FLAGS and indented by INDENT spaces. */ void dump_generic_bb (FILE *file, basic_block bb, int indent, int flags) { maybe_init_pretty_print (file); dumping_stmts = true; dump_generic_bb_buff (&buffer, bb, indent, flags); pp_flush (&buffer); } /* Dumps header of basic block BB to buffer BUFFER indented by INDENT spaces and details described by flags. */ static void dump_bb_header (pretty_printer *buffer, basic_block bb, int indent, int flags) { edge e; tree stmt; if (flags & TDF_BLOCKS) { INDENT (indent); pp_string (buffer, "# BLOCK "); pp_decimal_int (buffer, bb->index); if (flags & TDF_LINENO) { block_stmt_iterator bsi; for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) if (get_lineno (bsi_stmt (bsi)) != -1) { pp_string (buffer, ", starting at line "); pp_decimal_int (buffer, get_lineno (bsi_stmt (bsi))); break; } } newline_and_indent (buffer, indent); pp_string (buffer, "# PRED:"); pp_write_text_to_stream (buffer); for (e = bb->pred; e; e = e->pred_next) if (flags & TDF_SLIM) { pp_string (buffer, " "); if (e->src == ENTRY_BLOCK_PTR) pp_string (buffer, "ENTRY"); else pp_decimal_int (buffer, e->src->index); } else dump_edge_info (buffer->buffer->stream, e, 0); pp_newline (buffer); } else { stmt = first_stmt (bb); if (!stmt || TREE_CODE (stmt) != LABEL_EXPR) { INDENT (indent - 2); pp_string (buffer, "index); pp_string (buffer, ">:"); pp_newline (buffer); } } } /* Dumps end of basic block BB to buffer BUFFER indented by INDENT spaces. */ static void dump_bb_end (pretty_printer *buffer, basic_block bb, int indent, int flags) { edge e; INDENT (indent); pp_string (buffer, "# SUCC:"); pp_write_text_to_stream (buffer); for (e = bb->succ; e; e = e->succ_next) if (flags & TDF_SLIM) { pp_string (buffer, " "); if (e->dest == EXIT_BLOCK_PTR) pp_string (buffer, "EXIT"); else pp_decimal_int (buffer, e->dest->index); } else dump_edge_info (buffer->buffer->stream, e, 1); pp_newline (buffer); } /* Dumps phi nodes of basic block BB to buffer BUFFER with details described by FLAGS indented by INDENT spaces. */ static void dump_phi_nodes (pretty_printer *buffer, basic_block bb, int indent, int flags) { tree phi = phi_nodes (bb); if (!phi) return; for (; phi; phi = PHI_CHAIN (phi)) { if (is_gimple_reg (PHI_RESULT (phi)) || (flags & TDF_VOPS)) { INDENT (indent); pp_string (buffer, "# "); dump_generic_node (buffer, phi, indent, flags, false); pp_newline (buffer); } } } /* Dump jump to basic block BB that is represented implicitly in the cfg to BUFFER. */ static void pp_cfg_jump (pretty_printer *buffer, basic_block bb) { tree stmt; stmt = first_stmt (bb); pp_string (buffer, "goto index); pp_string (buffer, ">"); if (stmt && TREE_CODE (stmt) == LABEL_EXPR) { pp_string (buffer, " ("); dump_generic_node (buffer, LABEL_EXPR_LABEL (stmt), 0, 0, false); pp_string (buffer, ")"); } pp_semicolon (buffer); } /* Dump edges represented implicitly in basic block BB to BUFFER, indented by INDENT spaces, with details given by FLAGS. */ static void dump_implicit_edges (pretty_printer *buffer, basic_block bb, int indent, int flags) { edge e; /* If there is a fallthru edge, we may need to add an artificial goto to the dump. */ for (e = bb->succ; e; e = e->succ_next) if (e->flags & EDGE_FALLTHRU) break; if (e && e->dest != bb->next_bb) { INDENT (indent); if ((flags & TDF_LINENO) #ifdef USE_MAPPED_LOCATION && e->goto_locus != UNKNOWN_LOCATION #else && e->goto_locus #endif ) { expanded_location goto_xloc; #ifdef USE_MAPPED_LOCATION goto_xloc = expand_location (e->goto_locus); #else goto_xloc = *e->goto_locus; #endif pp_character (buffer, '['); if (goto_xloc.file) { pp_string (buffer, goto_xloc.file); pp_string (buffer, " : "); } pp_decimal_int (buffer, goto_xloc.line); pp_string (buffer, "] "); } pp_cfg_jump (buffer, e->dest); pp_newline (buffer); } } /* Dumps basic block BB to buffer BUFFER with details described by FLAGS and indented by INDENT spaces. */ static void dump_generic_bb_buff (pretty_printer *buffer, basic_block bb, int indent, int flags) { block_stmt_iterator bsi; tree stmt; int label_indent = indent - 2; if (label_indent < 0) label_indent = 0; dump_bb_header (buffer, bb, indent, flags); if (bb_ann (bb)) dump_phi_nodes (buffer, bb, indent, flags); for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) { int curr_indent; stmt = bsi_stmt (bsi); curr_indent = TREE_CODE (stmt) == LABEL_EXPR ? label_indent : indent; INDENT (curr_indent); dump_generic_node (buffer, stmt, curr_indent, flags, true); pp_newline (buffer); } dump_implicit_edges (buffer, bb, indent, flags); if (flags & TDF_BLOCKS) dump_bb_end (buffer, bb, indent, flags); } /* Rewrite a program in Normal form into SSA. Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Diego Novillo This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Generic dominator tree walker Copyright (C) 2003, 2004 Free Software Foundation, Inc. Contributed by Diego Novillo This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_DOMWALK_H #define GCC_DOMWALK_H /* This is the main data structure for the dominator walker. It provides the callback hooks as well as a convenient place to hang block local data and pass-global data. */ struct dom_walk_data { /* This is the direction of the dominator tree we want to walk. ie, if it is set to CDI_DOMINATORS, then we walk the dominator tree, if it is set to CDI_POST_DOMINATORS, then we walk the post dominator tree. */ ENUM_BITFIELD (cdi_direction) dom_direction : 2; /* Nonzero if the statement walker should walk the statements from last to first within a basic block instead of first to last. Note this affects both statement walkers. We haven't yet needed to use the second statement walker for anything, so it's hard to predict if we really need the ability to select their direction independently. */ BOOL_BITFIELD walk_stmts_backward : 1; /* Function to initialize block local data. Note that the dominator walker infrastructure may provide a new fresh, and zero'd block local data structure, or it may re-use an existing block local data structure. If the block local structure has items such as virtual arrays, then that allows your optimizer to re-use those arrays rather than creating new ones. */ void (*initialize_block_local_data) (struct dom_walk_data *, basic_block, bool); /* Function to call before the statement walk occurring before the recursive walk of the dominator children. This typically initializes an block local data and pushes that data onto BLOCK_DATA_STACK. */ void (*before_dom_children_before_stmts) (struct dom_walk_data *, basic_block); /* Function to call to walk statements before the recursive walk of the dominator children. */ void (*before_dom_children_walk_stmts) (struct dom_walk_data *, basic_block, block_stmt_iterator); /* Function to call after the statement walk occurring before the recursive walk of the dominator children. */ void (*before_dom_children_after_stmts) (struct dom_walk_data *, basic_block); /* Function to call before the statement walk occurring after the recursive walk of the dominator children. */ void (*after_dom_children_before_stmts) (struct dom_walk_data *, basic_block); /* Function to call to walk statements after the recursive walk of the dominator children. */ void (*after_dom_children_walk_stmts) (struct dom_walk_data *, basic_block, block_stmt_iterator); /* Function to call after the statement walk occurring after the recursive walk of the dominator children. This typically finalizes any block local data and pops that data from BLOCK_DATA_STACK. */ void (*after_dom_children_after_stmts) (struct dom_walk_data *, basic_block); /* Global data for a walk through the dominator tree. */ void *global_data; /* Stack of any data we need to keep on a per-block basis. If you have no local data, then BLOCK_DATA_STACK will be NULL. */ varray_type block_data_stack; /* Size of the block local data. If this is zero, then it is assumed you have no local data and thus no BLOCK_DATA_STACK as well. */ size_t block_local_data_size; /* From here below are private data. Please do not use this information/data outside domwalk.c. */ /* Stack of available block local structures. */ varray_type free_block_data; }; void walk_dominator_tree (struct dom_walk_data *, basic_block); void init_walk_dominator_tree (struct dom_walk_data *); void fini_walk_dominator_tree (struct dom_walk_data *); #endif /* GCC_DOMWALK_H */ /* This file builds the SSA form for a function as described in: R. Cytron, J. Ferrante, B. Rosen, M. Wegman, and K. Zadeck. Efficiently Computing Static Single Assignment Form and the Control Dependence Graph. ACM Transactions on Programming Languages and Systems, 13(4):451-490, October 1991. */ /* Structure to map a variable VAR to the set of blocks that contain definitions for VAR. */ struct def_blocks_d { /* The variable. */ tree var; /* Blocks that contain definitions of VAR. Bit I will be set if the Ith block contains a definition of VAR. */ bitmap def_blocks; /* Blocks that contain a phi node for VAR. */ bitmap phi_blocks; /* Blocks where VAR is live-on-entry. Similar semantics as DEF_BLOCKS. */ bitmap livein_blocks; }; /* Each entry in DEF_BLOCKS contains an element of type STRUCT DEF_BLOCKS_D, mapping a variable VAR to a bitmap describing all the basic blocks where VAR is defined (assigned a new value). It also contains a bitmap of all the blocks where VAR is live-on-entry (i.e., there is a use of VAR in block B without a preceding definition in B). The live-on-entry information is used when computing PHI pruning heuristics. */ static htab_t def_blocks; /* Global data to attach to the main dominator walk structure. */ struct mark_def_sites_global_data { /* This sbitmap contains the variables which are set before they are used in a basic block. We keep it as a global variable solely to avoid the overhead of allocating and deallocating the bitmap. */ sbitmap kills; /* Bitmap of names to rename. */ sbitmap names_to_rename; }; struct rewrite_block_data { varray_type block_defs; }; /* Information stored for ssa names. */ struct ssa_name_info { /* This field indicates whether or not the variable may need PHI nodes. See the enum's definition for more detailed information about the states. */ ENUM_BITFIELD (need_phi_state) need_phi_state : 2; /* The actual definition of the ssa name. */ tree current_def; }; /* Local functions. */ static void rewrite_finalize_block (struct dom_walk_data *, basic_block); static void rewrite_initialize_block_local_data (struct dom_walk_data *, basic_block, bool); static void rewrite_initialize_block (struct dom_walk_data *, basic_block); static void rewrite_add_phi_arguments (struct dom_walk_data *, basic_block); static void mark_def_sites (struct dom_walk_data *walk_data, basic_block bb, block_stmt_iterator); static void mark_def_sites_initialize_block (struct dom_walk_data *walk_data, basic_block bb); static void set_def_block (tree, basic_block, bool, bool); static void set_livein_block (tree, basic_block); static bool prepare_use_operand_for_rename (use_operand_p, size_t *uid_p); static bool prepare_def_operand_for_rename (tree def, size_t *uid_p); static void insert_phi_nodes (bitmap *, bitmap); static void rewrite_stmt (struct dom_walk_data *, basic_block, block_stmt_iterator); static inline void rewrite_operand (use_operand_p); static void insert_phi_nodes_for (tree, bitmap *, varray_type *); static tree get_reaching_def (tree); static hashval_t def_blocks_hash (const void *); static int def_blocks_eq (const void *, const void *); static void def_blocks_free (void *); static int debug_def_blocks_r (void **, void *); static inline struct def_blocks_d *get_def_blocks_for (tree); static inline struct def_blocks_d *find_def_blocks_for (tree); static void htab_statistics (FILE *, htab_t); /* Get the information associated with NAME. */ static inline struct ssa_name_info * get_ssa_name_ann (tree name) { if (!SSA_NAME_AUX (name)) SSA_NAME_AUX (name) = xcalloc (1, sizeof (struct ssa_name_info)); return SSA_NAME_AUX (name); } /* Gets phi_state field for VAR. */ static inline enum need_phi_state get_phi_state (tree var) { if (TREE_CODE (var) == SSA_NAME) return get_ssa_name_ann (var)->need_phi_state; else return var_ann (var)->need_phi_state; } /* Sets phi_state field for VAR to STATE. */ static inline void set_phi_state (tree var, enum need_phi_state state) { if (TREE_CODE (var) == SSA_NAME) get_ssa_name_ann (var)->need_phi_state = state; else var_ann (var)->need_phi_state = state; } /* Return the current definition for VAR. */ static inline tree get_current_def (tree var) { if (TREE_CODE (var) == SSA_NAME) return get_ssa_name_ann (var)->current_def; else return var_ann (var)->current_def; } /* Sets current definition of VAR to DEF. */ static inline void set_current_def (tree var, tree def) { if (TREE_CODE (var) == SSA_NAME) get_ssa_name_ann (var)->current_def = def; else var_ann (var)->current_def = def; } /* Compute global livein information given the set of blockx where an object is locally live at the start of the block (LIVEIN) and the set of blocks where the object is defined (DEF_BLOCKS). Note: This routine augments the existing local livein information to include global livein (i.e., it modifies the underlying bitmap for LIVEIN). */ void compute_global_livein (bitmap livein, bitmap def_blocks) { basic_block bb, *worklist, *tos; int i; tos = worklist = (basic_block *) xmalloc (sizeof (basic_block) * (n_basic_blocks + 1)); EXECUTE_IF_SET_IN_BITMAP (livein, 0, i, { *tos++ = BASIC_BLOCK (i); }); /* Iterate until the worklist is empty. */ while (tos != worklist) { edge e; /* Pull a block off the worklist. */ bb = *--tos; /* For each predecessor block. */ for (e = bb->pred; e; e = e->pred_next) { basic_block pred = e->src; int pred_index = pred->index; /* None of this is necessary for the entry block. */ if (pred != ENTRY_BLOCK_PTR && ! bitmap_bit_p (livein, pred_index) && ! bitmap_bit_p (def_blocks, pred_index)) { *tos++ = pred; bitmap_set_bit (livein, pred_index); } } } free (worklist); } /* Block initialization routine for mark_def_sites. Clear the KILLS bitmap at the start of each block. */ static void mark_def_sites_initialize_block (struct dom_walk_data *walk_data, basic_block bb ATTRIBUTE_UNUSED) { struct mark_def_sites_global_data *gd = walk_data->global_data; sbitmap kills = gd->kills; sbitmap_zero (kills); } /* Block initialization routine for mark_def_sites. Clear the KILLS bitmap at the start of each block. */ static void ssa_mark_def_sites_initialize_block (struct dom_walk_data *walk_data, basic_block bb) { struct mark_def_sites_global_data *gd = walk_data->global_data; sbitmap kills = gd->kills; tree phi, def; unsigned def_uid; sbitmap_zero (kills); for (phi = phi_nodes (bb); phi; phi = TREE_CHAIN (phi)) { def = PHI_RESULT (phi); def_uid = SSA_NAME_VERSION (def); if (!TEST_BIT (gd->names_to_rename, def_uid)) continue; set_def_block (def, bb, true, true); SET_BIT (kills, def_uid); } } /* Marks ssa names used as arguments of phis at the end of BB. */ static void ssa_mark_phi_uses (struct dom_walk_data *walk_data, basic_block bb) { struct mark_def_sites_global_data *gd = walk_data->global_data; sbitmap kills = gd->kills; edge e; tree phi, use; unsigned uid; for (e = bb->succ; e; e = e->succ_next) { if (e->dest == EXIT_BLOCK_PTR) continue; for (phi = phi_nodes (e->dest); phi; phi = TREE_CHAIN (phi)) { use = PHI_ARG_DEF_FROM_EDGE (phi, e); if (TREE_CODE (use) != SSA_NAME) continue; uid = SSA_NAME_VERSION (use); if (TEST_BIT (gd->names_to_rename, uid) && !TEST_BIT (kills, uid)) set_livein_block (use, bb); } } } /* Call back for walk_dominator_tree used to collect definition sites for every variable in the function. For every statement S in block BB: 1- Variables defined by S in DEF_OPS(S) are marked in the bitmap WALK_DATA->GLOBAL_DATA->KILLS. 2- If S uses a variable VAR and there is no preceding kill of VAR, then it is marked in marked in the LIVEIN_BLOCKS bitmap associated with VAR. This information is used to determine which variables are live across block boundaries to reduce the number of PHI nodes we create. */ static void mark_def_sites (struct dom_walk_data *walk_data, basic_block bb, block_stmt_iterator bsi) { struct mark_def_sites_global_data *gd = walk_data->global_data; sbitmap kills = gd->kills; v_may_def_optype v_may_defs; v_must_def_optype v_must_defs; vuse_optype vuses; def_optype defs; use_optype uses; size_t i, uid; tree stmt; stmt_ann_t ann; /* Mark all the blocks that have definitions for each variable in the VARS_TO_RENAME bitmap. */ stmt = bsi_stmt (bsi); get_stmt_operands (stmt); ann = stmt_ann (stmt); /* If a variable is used before being set, then the variable is live across a block boundary, so mark it live-on-entry to BB. */ uses = USE_OPS (ann); for (i = 0; i < NUM_USES (uses); i++) { use_operand_p use_p = USE_OP_PTR (uses, i); if (prepare_use_operand_for_rename (use_p, &uid) && !TEST_BIT (kills, uid)) set_livein_block (USE_FROM_PTR (use_p), bb); } /* Similarly for virtual uses. */ vuses = VUSE_OPS (ann); for (i = 0; i < NUM_VUSES (vuses); i++) { use_operand_p use_p = VUSE_OP_PTR (vuses, i); if (prepare_use_operand_for_rename (use_p, &uid) && !TEST_BIT (kills, uid)) set_livein_block (USE_FROM_PTR (use_p), bb); } /* Note that virtual definitions are irrelevant for computing KILLS because a V_MAY_DEF does not constitute a killing definition of the variable. However, the operand of a virtual definitions is a use of the variable, so it may cause the variable to be considered live-on-entry. */ v_may_defs = V_MAY_DEF_OPS (ann); for (i = 0; i < NUM_V_MAY_DEFS (v_may_defs); i++) { use_operand_p use_p = V_MAY_DEF_OP_PTR (v_may_defs, i); if (prepare_use_operand_for_rename (use_p, &uid)) { /* If we do not already have an SSA_NAME for our destination, then set the destination to the source. */ if (TREE_CODE (V_MAY_DEF_RESULT (v_may_defs, i)) != SSA_NAME) SET_V_MAY_DEF_RESULT (v_may_defs, i, USE_FROM_PTR (use_p)); set_livein_block (USE_FROM_PTR (use_p), bb); set_def_block (V_MAY_DEF_RESULT (v_may_defs, i), bb, false, false); } } /* Now process the virtual must-defs made by this statement. */ v_must_defs = V_MUST_DEF_OPS (ann); for (i = 0; i < NUM_V_MUST_DEFS (v_must_defs); i++) { tree def = V_MUST_DEF_OP (v_must_defs, i); if (prepare_def_operand_for_rename (def, &uid)) { set_def_block (def, bb, false, false); SET_BIT (kills, uid); } } /* Now process the definition made by this statement. Mark the variables in KILLS. */ defs = DEF_OPS (ann); for (i = 0; i < NUM_DEFS (defs); i++) { tree def = DEF_OP (defs, i); if (prepare_def_operand_for_rename (def, &uid)) { set_def_block (def, bb, false, false); SET_BIT (kills, uid); } } } /* Ditto, but works over ssa names. */ static void ssa_mark_def_sites (struct dom_walk_data *walk_data, basic_block bb, block_stmt_iterator bsi) { struct mark_def_sites_global_data *gd = walk_data->global_data; sbitmap kills = gd->kills; v_may_def_optype v_may_defs; v_must_def_optype v_must_defs; vuse_optype vuses; def_optype defs; use_optype uses; size_t i, uid, def_uid; tree stmt, use, def; stmt_ann_t ann; /* Mark all the blocks that have definitions for each variable in the names_to_rename bitmap. */ stmt = bsi_stmt (bsi); get_stmt_operands (stmt); ann = stmt_ann (stmt); /* If a variable is used before being set, then the variable is live across a block boundary, so mark it live-on-entry to BB. */ uses = USE_OPS (ann); for (i = 0; i < NUM_USES (uses); i++) { use = USE_OP (uses, i); uid = SSA_NAME_VERSION (use); if (TEST_BIT (gd->names_to_rename, uid) && !TEST_BIT (kills, uid)) set_livein_block (use, bb); } /* Similarly for virtual uses. */ vuses = VUSE_OPS (ann); for (i = 0; i < NUM_VUSES (vuses); i++) { use = VUSE_OP (vuses, i); uid = SSA_NAME_VERSION (use); if (TEST_BIT (gd->names_to_rename, uid) && !TEST_BIT (kills, uid)) set_livein_block (use, bb); } v_may_defs = V_MAY_DEF_OPS (ann); for (i = 0; i < NUM_V_MAY_DEFS (v_may_defs); i++) { use = V_MAY_DEF_OP (v_may_defs, i); uid = SSA_NAME_VERSION (use); if (TEST_BIT (gd->names_to_rename, uid) && !TEST_BIT (kills, uid)) set_livein_block (use, bb); } /* Now process the definition made by this statement. Mark the variables in KILLS. */ defs = DEF_OPS (ann); for (i = 0; i < NUM_DEFS (defs); i++) { def = DEF_OP (defs, i); def_uid = SSA_NAME_VERSION (def); if (TEST_BIT (gd->names_to_rename, def_uid)) { set_def_block (def, bb, false, true); SET_BIT (kills, def_uid); } } v_may_defs = V_MAY_DEF_OPS (ann); for (i = 0; i < NUM_V_MAY_DEFS (v_may_defs); i++) { def = V_MAY_DEF_RESULT (v_may_defs, i); def_uid = SSA_NAME_VERSION (def); if (TEST_BIT (gd->names_to_rename, def_uid)) { set_def_block (def, bb, false, true); SET_BIT (kills, def_uid); } } v_must_defs = V_MUST_DEF_OPS (ann); for (i = 0; i < NUM_V_MUST_DEFS (v_must_defs); i++) { def = V_MUST_DEF_OP (v_must_defs, i); def_uid = SSA_NAME_VERSION (def); if (TEST_BIT (gd->names_to_rename, def_uid)) { set_def_block (def, bb, false, true); SET_BIT (kills, def_uid); } } } /* Mark block BB as the definition site for variable VAR. PHI_P is true if VAR is defined by a phi node. SSA_P is true if we are called from rewrite_ssa_into_ssa. */ static void set_def_block (tree var, basic_block bb, bool phi_p, bool ssa_p) { struct def_blocks_d *db_p; enum need_phi_state state; if (!ssa_p && TREE_CODE (var) == SSA_NAME) var = SSA_NAME_VAR (var); state = get_phi_state (var); db_p = get_def_blocks_for (var); /* Set the bit corresponding to the block where VAR is defined. */ bitmap_set_bit (db_p->def_blocks, bb->index); if (phi_p) bitmap_set_bit (db_p->phi_blocks, bb->index); /* Keep track of whether or not we may need to insert phi nodes. If we are in the UNKNOWN state, then this is the first definition of VAR. Additionally, we have not seen any uses of VAR yet, so we do not need a phi node for this variable at this time (i.e., transition to NEED_PHI_STATE_NO). If we are in any other state, then we either have multiple definitions of this variable occurring in different blocks or we saw a use of the variable which was not dominated by the block containing the definition(s). In this case we may need a PHI node, so enter state NEED_PHI_STATE_MAYBE. */ if (state == NEED_PHI_STATE_UNKNOWN) set_phi_state (var, NEED_PHI_STATE_NO); else set_phi_state (var, NEED_PHI_STATE_MAYBE); } /* Mark block BB as having VAR live at the entry to BB. */ static void set_livein_block (tree var, basic_block bb) { struct def_blocks_d *db_p; enum need_phi_state state = get_phi_state (var); db_p = get_def_blocks_for (var); /* Set the bit corresponding to the block where VAR is live in. */ bitmap_set_bit (db_p->livein_blocks, bb->index); /* Keep track of whether or not we may need to insert phi nodes. If we reach here in NEED_PHI_STATE_NO, see if this use is dominated by the single block containing the definition(s) of this variable. If it is, then we remain in NEED_PHI_STATE_NO, otherwise we transition to NEED_PHI_STATE_MAYBE. */ if (state == NEED_PHI_STATE_NO) { int def_block_index = bitmap_first_set_bit (db_p->def_blocks); if (def_block_index == -1 || ! dominated_by_p (CDI_DOMINATORS, bb, BASIC_BLOCK (def_block_index))) set_phi_state (var, NEED_PHI_STATE_MAYBE); } else set_phi_state (var, NEED_PHI_STATE_MAYBE); } /* If the use operand pointed to by OP_P needs to be renamed, then strip away any SSA_NAME wrapping the operand, set *UID_P to the underlying variable's uid, and return true. Otherwise return false. If the operand was an SSA_NAME, change it to the stipped name. */ static bool prepare_use_operand_for_rename (use_operand_p op_p, size_t *uid_p) { tree use = USE_FROM_PTR (op_p); tree var = (TREE_CODE (use) != SSA_NAME) ? use : SSA_NAME_VAR (use); *uid_p = var_ann (var)->uid; /* Ignore variables that don't need to be renamed. */ if (vars_to_rename && !bitmap_bit_p (vars_to_rename, *uid_p)) return false; /* The variable needs to be renamed. If this is a use which already has an SSA_NAME, then strip it off. By not throwing away SSA_NAMEs on assignments, we avoid a lot of useless churn of SSA_NAMEs without having to overly complicate the renamer. */ if (TREE_CODE (use) == SSA_NAME) SET_USE (op_p, var); return true; } /* If the def variable DEF needs to be renamed, then strip away any SSA_NAME wrapping the operand, set *UID_P to the underlying variable's uid and return true. Otherwise return false. */ static bool prepare_def_operand_for_rename (tree def, size_t *uid_p) { tree var = (TREE_CODE (def) != SSA_NAME) ? def : SSA_NAME_VAR (def); *uid_p = var_ann (var)->uid; /* Ignore variables that don't need to be renamed. */ if (vars_to_rename && !bitmap_bit_p (vars_to_rename, *uid_p)) return false; return true; } /* Helper for insert_phi_nodes. If VAR needs PHI nodes, insert them at the dominance frontier (DFS) of blocks defining VAR. WORK_STACK is the varray used to implement the worklist of basic blocks. */ static inline void insert_phi_nodes_1 (tree var, bitmap *dfs, varray_type *work_stack) { if (get_phi_state (var) != NEED_PHI_STATE_NO) insert_phi_nodes_for (var, dfs, work_stack); } /* Insert PHI nodes at the dominance frontier of blocks with variable definitions. DFS contains the dominance frontier information for the flowgraph. PHI nodes will only be inserted at the dominance frontier of definition blocks for variables whose NEED_PHI_STATE annotation is marked as ``maybe'' or ``unknown'' (computed by mark_def_sites). If NAMES_TO_RENAME is not NULL, do the same but for ssa name rewriting. */ static void insert_phi_nodes (bitmap *dfs, bitmap names_to_rename) { size_t i; varray_type work_stack; timevar_push (TV_TREE_INSERT_PHI_NODES); /* Array WORK_STACK is a stack of CFG blocks. Each block that contains an assignment or PHI node will be pushed to this stack. */ VARRAY_GENERIC_PTR_NOGC_INIT (work_stack, last_basic_block, "work_stack"); /* Iterate over all variables in VARS_TO_RENAME. For each variable, add to the work list all the blocks that have a definition for the variable. PHI nodes will be added to the dominance frontier blocks of each definition block. */ if (names_to_rename) { EXECUTE_IF_SET_IN_BITMAP (names_to_rename, 0, i, { if (ssa_name (i)) insert_phi_nodes_1 (ssa_name (i), dfs, &work_stack); }); } else if (vars_to_rename) EXECUTE_IF_SET_IN_BITMAP (vars_to_rename, 0, i, insert_phi_nodes_1 (referenced_var (i), dfs, &work_stack)); else for (i = 0; i < num_referenced_vars; i++) insert_phi_nodes_1 (referenced_var (i), dfs, &work_stack); VARRAY_FREE (work_stack); timevar_pop (TV_TREE_INSERT_PHI_NODES); } /* Perform a depth-first traversal of the dominator tree looking for variables to rename. BB is the block where to start searching. Renaming is a five step process: 1- Every definition made by PHI nodes at the start of the blocks is registered as the current definition for the corresponding variable. 2- Every statement in BB is rewritten. USE and VUSE operands are rewritten with their corresponding reaching definition. DEF and VDEF targets are registered as new definitions. 3- All the PHI nodes in successor blocks of BB are visited. The argument corresponding to BB is replaced with its current reaching definition. 4- Recursively rewrite every dominator child block of BB. 5- Restore (in reverse order) the current reaching definition for every new definition introduced in this block. This is done so that when we return from the recursive call, all the current reaching definitions are restored to the names that were valid in the dominator parent of BB. */ /* Initialize the local stacks. BLOCK_DEFS is used to save all the existing reaching definitions for the new SSA names introduced in this block. Before registering a new definition for a variable, the existing reaching definition is pushed into this stack so that we can restore it in Step 5. */ static void rewrite_initialize_block_local_data (struct dom_walk_data *walk_data ATTRIBUTE_UNUSED, basic_block bb ATTRIBUTE_UNUSED, bool recycled ATTRIBUTE_UNUSED) { #ifdef ENABLE_CHECKING struct rewrite_block_data *bd = (struct rewrite_block_data *)VARRAY_TOP_GENERIC_PTR (walk_data->block_data_stack); /* We get cleared memory from the allocator, so if the memory is not cleared, then we are re-using a previously allocated entry. In that case, we can also re-use the underlying virtual arrays. Just make sure we clear them before using them! */ if (recycled && bd->block_defs && VARRAY_ACTIVE_SIZE (bd->block_defs) > 0) abort (); #endif } /* SSA Rewriting Step 1. Initialization, create a block local stack of reaching definitions for new SSA names produced in this block (BLOCK_DEFS). Register new definitions for every PHI node in the block. */ static void rewrite_initialize_block (struct dom_walk_data *walk_data, basic_block bb) { tree phi; struct rewrite_block_data *bd = (struct rewrite_block_data *)VARRAY_TOP_GENERIC_PTR (walk_data->block_data_stack); if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "\n\nRenaming block #%d\n\n", bb->index); /* Step 1. Register new definitions for every PHI node in the block. Conceptually, all the PHI nodes are executed in parallel and each PHI node introduces a new version for the associated variable. */ for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi)) { tree result = PHI_RESULT (phi); register_new_def (result, &bd->block_defs); } } /* Register DEF (an SSA_NAME) to be a new definition for the original ssa name VAR and push VAR's current reaching definition into the stack pointed by BLOCK_DEFS_P. */ static void ssa_register_new_def (tree var, tree def, varray_type *block_defs_p) { tree currdef; /* If this variable is set in a single basic block and all uses are dominated by the set(s) in that single basic block, then there is nothing to do. TODO we should not be called at all, and just keep the original name. */ if (get_phi_state (var) == NEED_PHI_STATE_NO) { set_current_def (var, def); return; } currdef = get_current_def (var); if (! *block_defs_p) VARRAY_TREE_INIT (*block_defs_p, 20, "block_defs"); /* Push the current reaching definition into *BLOCK_DEFS_P. This stack is later used by the dominator tree callbacks to restore the reaching definitions for all the variables defined in the block after a recursive visit to all its immediately dominated blocks. */ VARRAY_PUSH_TREE (*block_defs_p, var); VARRAY_PUSH_TREE (*block_defs_p, currdef); /* Set the current reaching definition for VAR to be DEF. */ set_current_def (var, def); } /* Ditto, for rewriting ssa names. */ static void ssa_rewrite_initialize_block (struct dom_walk_data *walk_data, basic_block bb) { tree phi, new_name; struct rewrite_block_data *bd = (struct rewrite_block_data *)VARRAY_TOP_GENERIC_PTR (walk_data->block_data_stack); sbitmap names_to_rename = walk_data->global_data; edge e; bool abnormal_phi; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "\n\nRenaming block #%d\n\n", bb->index); for (e = bb->pred; e; e = e->pred_next) if (e->flags & EDGE_ABNORMAL) break; abnormal_phi = (e != NULL); /* Step 1. Register new definitions for every PHI node in the block. Conceptually, all the PHI nodes are executed in parallel and each PHI node introduces a new version for the associated variable. */ for (phi = phi_nodes (bb); phi; phi = TREE_CHAIN (phi)) { tree result = PHI_RESULT (phi); if (TEST_BIT (names_to_rename, SSA_NAME_VERSION (result))) { new_name = duplicate_ssa_name (result, phi); SET_PHI_RESULT (phi, new_name); if (abnormal_phi) SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_name) = 1; } else new_name = result; ssa_register_new_def (result, new_name, &bd->block_defs); } } /* SSA Rewriting Step 3. Visit all the successor blocks of BB looking for PHI nodes. For every PHI node found, add a new argument containing the current reaching definition for the variable and the edge through which that definition is reaching the PHI node. */ static void rewrite_add_phi_arguments (struct dom_walk_data *walk_data ATTRIBUTE_UNUSED, basic_block bb) { edge e; for (e = bb->succ; e; e = e->succ_next) { tree phi; for (phi = phi_nodes (e->dest); phi; phi = TREE_CHAIN (phi)) { tree currdef; /* If this PHI node has already been rewritten, then there is nothing to do for this PHI or any following PHIs since we always add new PHI nodes at the start of the PHI chain. */ if (PHI_REWRITTEN (phi)) break; currdef = get_reaching_def (SSA_NAME_VAR (PHI_RESULT (phi))); add_phi_arg (&phi, currdef, e); } } } /* Ditto, for ssa name rewriting. */ static void ssa_rewrite_phi_arguments (struct dom_walk_data *walk_data, basic_block bb) { edge e; sbitmap names_to_rename = walk_data->global_data; use_operand_p op; for (e = bb->succ; e; e = e->succ_next) { tree phi; if (e->dest == EXIT_BLOCK_PTR) continue; for (phi = phi_nodes (e->dest); phi; phi = TREE_CHAIN (phi)) { op = PHI_ARG_DEF_PTR_FROM_EDGE (phi, e); if (TREE_CODE (USE_FROM_PTR (op)) != SSA_NAME) continue; if (!TEST_BIT (names_to_rename, SSA_NAME_VERSION (USE_FROM_PTR (op)))) continue; SET_USE (op, get_reaching_def (USE_FROM_PTR (op))); if (e->flags & EDGE_ABNORMAL) SSA_NAME_OCCURS_IN_ABNORMAL_PHI (USE_FROM_PTR (op)) = 1; } } } /* SSA Rewriting Step 5. Restore the current reaching definition for each variable referenced in the block (in reverse order). */ static void rewrite_finalize_block (struct dom_walk_data *walk_data, basic_block bb ATTRIBUTE_UNUSED) { struct rewrite_block_data *bd = (struct rewrite_block_data *)VARRAY_TOP_GENERIC_PTR (walk_data->block_data_stack); /* Step 5. Restore the current reaching definition for each variable referenced in the block (in reverse order). */ while (bd->block_defs && VARRAY_ACTIVE_SIZE (bd->block_defs) > 0) { tree tmp = VARRAY_TOP_TREE (bd->block_defs); tree saved_def, var; VARRAY_POP (bd->block_defs); if (TREE_CODE (tmp) == SSA_NAME) { saved_def = tmp; var = SSA_NAME_VAR (saved_def); } else { saved_def = NULL; var = tmp; } set_current_def (var, saved_def); } } /* Ditto, for rewriting ssa names. */ static void ssa_rewrite_finalize_block (struct dom_walk_data *walk_data, basic_block bb ATTRIBUTE_UNUSED) { struct rewrite_block_data *bd = (struct rewrite_block_data *)VARRAY_TOP_GENERIC_PTR (walk_data->block_data_stack); /* Step 5. Restore the current reaching definition for each variable referenced in the block (in reverse order). */ while (bd->block_defs && VARRAY_ACTIVE_SIZE (bd->block_defs) > 0) { tree var; tree saved_def = VARRAY_TOP_TREE (bd->block_defs); VARRAY_POP (bd->block_defs); var = VARRAY_TOP_TREE (bd->block_defs); VARRAY_POP (bd->block_defs); set_current_def (var, saved_def); } } /* Dump SSA information to FILE. */ void dump_tree_ssa (FILE *file) { basic_block bb; const char *funcname = lang_hooks.decl_printable_name (current_function_decl, 2); fprintf (file, "SSA information for %s\n\n", funcname); FOR_EACH_BB (bb) { dump_bb (bb, file, 0); fputs (" ", file); print_generic_stmt (file, phi_nodes (bb), dump_flags); fputs ("\n\n", file); } } /* Dump SSA information to stderr. */ void debug_tree_ssa (void) { dump_tree_ssa (stderr); } /* Dump SSA statistics on FILE. */ void dump_tree_ssa_stats (FILE *file) { fprintf (file, "\nHash table statistics:\n"); fprintf (file, " def_blocks: "); htab_statistics (file, def_blocks); fprintf (file, "\n"); } /* Dump SSA statistics on stderr. */ void debug_tree_ssa_stats (void) { dump_tree_ssa_stats (stderr); } /* Dump statistics for the hash table HTAB. */ static void htab_statistics (FILE *file, htab_t htab) { fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n", (long) htab_size (htab), (long) htab_elements (htab), htab_collisions (htab)); } /* Insert PHI nodes for variable VAR using the dominance frontier information given in DFS. WORK_STACK is the varray used to implement the worklist of basic blocks. */ static void insert_phi_nodes_for (tree var, bitmap *dfs, varray_type *work_stack) { struct def_blocks_d *def_map; bitmap phi_insertion_points; int bb_index; edge e; tree phi; basic_block bb; def_map = find_def_blocks_for (var); if (def_map == NULL) return; phi_insertion_points = BITMAP_XMALLOC (); EXECUTE_IF_SET_IN_BITMAP (def_map->def_blocks, 0, bb_index, { VARRAY_PUSH_GENERIC_PTR_NOGC (*work_stack, BASIC_BLOCK (bb_index)); }); /* Pop a block off the worklist, add every block that appears in the original block's dfs that we have not already processed to the worklist. Iterate until the worklist is empty. Blocks which are added to the worklist are potential sites for PHI nodes. The iteration step could be done during PHI insertion just as easily. We do it here for historical reasons -- we used to have a heuristic which used the potential PHI insertion points to determine if fully pruned or semi pruned SSA form was appropriate. We now always use fully pruned SSA form. */ while (VARRAY_ACTIVE_SIZE (*work_stack) > 0) { int dfs_index; bb = VARRAY_TOP_GENERIC_PTR_NOGC (*work_stack); bb_index = bb->index; VARRAY_POP (*work_stack); EXECUTE_IF_AND_COMPL_IN_BITMAP (dfs[bb_index], phi_insertion_points, 0, dfs_index, { basic_block bb = BASIC_BLOCK (dfs_index); VARRAY_PUSH_GENERIC_PTR_NOGC (*work_stack, bb); bitmap_set_bit (phi_insertion_points, dfs_index); }); } /* Remove the blocks where we already have the phis. */ bitmap_operation (phi_insertion_points, phi_insertion_points, def_map->phi_blocks, BITMAP_AND_COMPL); /* Now compute global livein for this variable. Note this modifies def_map->livein_blocks. */ compute_global_livein (def_map->livein_blocks, def_map->def_blocks); /* And insert the PHI nodes. */ EXECUTE_IF_AND_IN_BITMAP (phi_insertion_points, def_map->livein_blocks, 0, bb_index, do { bb = BASIC_BLOCK (bb_index); phi = create_phi_node (var, bb); /* If we are rewriting ssa names, add also the phi arguments. */ if (TREE_CODE (var) == SSA_NAME) { for (e = bb->pred; e; e = e->pred_next) add_phi_arg (&phi, var, e); } } while (0)); BITMAP_XFREE (phi_insertion_points); } /* SSA Rewriting Step 2. Rewrite every variable used in each statement in the block with its immediate reaching definitions. Update the current definition of a variable when a new real or virtual definition is found. */ static void rewrite_stmt (struct dom_walk_data *walk_data, basic_block bb ATTRIBUTE_UNUSED, block_stmt_iterator si) { size_t i; stmt_ann_t ann; tree stmt; vuse_optype vuses; v_may_def_optype v_may_defs; v_must_def_optype v_must_defs; def_optype defs; use_optype uses; struct rewrite_block_data *bd; bd = VARRAY_TOP_GENERIC_PTR (walk_data->block_data_stack); stmt = bsi_stmt (si); ann = stmt_ann (stmt); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Renaming statement "); print_generic_stmt (dump_file, stmt, TDF_SLIM); fprintf (dump_file, "\n"); } #if defined ENABLE_CHECKING /* We have just scanned the code for operands. No statement should be modified. */ if (ann->modified) abort (); #endif defs = DEF_OPS (ann); uses = USE_OPS (ann); vuses = VUSE_OPS (ann); v_may_defs = V_MAY_DEF_OPS (ann); v_must_defs = V_MUST_DEF_OPS (ann); /* Step 1. Rewrite USES and VUSES in the statement. */ for (i = 0; i < NUM_USES (uses); i++) rewrite_operand (USE_OP_PTR (uses, i)); /* Rewrite virtual uses in the statement. */ for (i = 0; i < NUM_VUSES (vuses); i++) rewrite_operand (VUSE_OP_PTR (vuses, i)); /* Step 2. Register the statement's DEF and VDEF operands. */ for (i = 0; i < NUM_DEFS (defs); i++) { def_operand_p def_p = DEF_OP_PTR (defs, i); if (TREE_CODE (DEF_FROM_PTR (def_p)) != SSA_NAME) SET_DEF (def_p, make_ssa_name (DEF_FROM_PTR (def_p), stmt)); /* FIXME: We shouldn't be registering new defs if the variable doesn't need to be renamed. */ register_new_def (DEF_FROM_PTR (def_p), &bd->block_defs); } /* Register new virtual definitions made by the statement. */ for (i = 0; i < NUM_V_MAY_DEFS (v_may_defs); i++) { rewrite_operand (V_MAY_DEF_OP_PTR (v_may_defs, i)); if (TREE_CODE (V_MAY_DEF_RESULT (v_may_defs, i)) != SSA_NAME) SET_V_MAY_DEF_RESULT (v_may_defs, i, make_ssa_name (V_MAY_DEF_RESULT (v_may_defs, i), stmt)); /* FIXME: We shouldn't be registering new defs if the variable doesn't need to be renamed. */ register_new_def (V_MAY_DEF_RESULT (v_may_defs, i), &bd->block_defs); } /* Register new virtual mustdefs made by the statement. */ for (i = 0; i < NUM_V_MUST_DEFS (v_must_defs); i++) { def_operand_p v_must_def_p = V_MUST_DEF_OP_PTR (v_must_defs, i); if (TREE_CODE (DEF_FROM_PTR (v_must_def_p)) != SSA_NAME) SET_DEF (v_must_def_p, make_ssa_name (DEF_FROM_PTR (v_must_def_p), stmt)); /* FIXME: We shouldn't be registering new mustdefs if the variable doesn't need to be renamed. */ register_new_def (DEF_FROM_PTR (v_must_def_p), &bd->block_defs); } } /* Ditto, for rewriting ssa names. */ static void ssa_rewrite_stmt (struct dom_walk_data *walk_data, basic_block bb ATTRIBUTE_UNUSED, block_stmt_iterator si) { size_t i; stmt_ann_t ann; tree stmt, var; use_operand_p use_p; def_operand_p def_p; vuse_optype vuses; v_may_def_optype v_may_defs; v_must_def_optype v_must_defs; def_optype defs; use_optype uses; struct rewrite_block_data *bd; sbitmap names_to_rename = walk_data->global_data; bd = VARRAY_TOP_GENERIC_PTR (walk_data->block_data_stack); stmt = bsi_stmt (si); ann = stmt_ann (stmt); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Renaming statement "); print_generic_stmt (dump_file, stmt, TDF_SLIM); fprintf (dump_file, "\n"); } #if defined ENABLE_CHECKING /* We have just scanned the code for operands. No statement should be modified. */ if (ann->modified) abort (); #endif defs = DEF_OPS (ann); uses = USE_OPS (ann); vuses = VUSE_OPS (ann); v_may_defs = V_MAY_DEF_OPS (ann); v_must_defs = V_MUST_DEF_OPS (ann); /* Step 1. Rewrite USES and VUSES in the statement. */ for (i = 0; i < NUM_USES (uses); i++) { use_p = USE_OP_PTR (uses, i); if (TEST_BIT (names_to_rename, SSA_NAME_VERSION (USE_FROM_PTR (use_p)))) SET_USE (use_p, get_reaching_def (USE_FROM_PTR (use_p))); } /* Rewrite virtual uses in the statement. */ for (i = 0; i < NUM_VUSES (vuses); i++) { use_p = VUSE_OP_PTR (vuses, i); if (TEST_BIT (names_to_rename, SSA_NAME_VERSION (USE_FROM_PTR (use_p)))) SET_USE (use_p, get_reaching_def (USE_FROM_PTR (use_p))); } for (i = 0; i < NUM_V_MAY_DEFS (v_may_defs); i++) { use_p = V_MAY_DEF_OP_PTR (v_may_defs, i); if (TEST_BIT (names_to_rename, SSA_NAME_VERSION (USE_FROM_PTR (use_p)))) SET_USE (use_p, get_reaching_def (USE_FROM_PTR (use_p))); } /* Step 2. Register the statement's DEF and VDEF operands. */ for (i = 0; i < NUM_DEFS (defs); i++) { def_p = DEF_OP_PTR (defs, i); var = DEF_FROM_PTR (def_p); if (!TEST_BIT (names_to_rename, SSA_NAME_VERSION (var))) continue; SET_DEF (def_p, duplicate_ssa_name (var, stmt)); ssa_register_new_def (var, DEF_FROM_PTR (def_p), &bd->block_defs); } /* Register new virtual definitions made by the statement. */ for (i = 0; i < NUM_V_MAY_DEFS (v_may_defs); i++) { def_p = V_MAY_DEF_RESULT_PTR (v_may_defs, i); var = DEF_FROM_PTR (def_p); if (!TEST_BIT (names_to_rename, SSA_NAME_VERSION (var))) continue; SET_DEF (def_p, duplicate_ssa_name (var, stmt)); ssa_register_new_def (var, DEF_FROM_PTR (def_p), &bd->block_defs); } for (i = 0; i < NUM_V_MUST_DEFS (v_must_defs); i++) { def_p = V_MUST_DEF_OP_PTR (v_must_defs, i); var = DEF_FROM_PTR (def_p); if (!TEST_BIT (names_to_rename, SSA_NAME_VERSION (var))) continue; SET_DEF (def_p, duplicate_ssa_name (var, stmt)); ssa_register_new_def (var, DEF_FROM_PTR (def_p), &bd->block_defs); } } /* Replace the operand pointed by OP_P with its immediate reaching definition. */ static inline void rewrite_operand (use_operand_p op_p) { if (TREE_CODE (USE_FROM_PTR (op_p)) != SSA_NAME) SET_USE (op_p, get_reaching_def (USE_FROM_PTR (op_p))); } /* Register DEF (an SSA_NAME) to be a new definition for its underlying variable (SSA_NAME_VAR (DEF)) and push VAR's current reaching definition into the stack pointed by BLOCK_DEFS_P. */ void register_new_def (tree def, varray_type *block_defs_p) { tree var = SSA_NAME_VAR (def); tree currdef; /* If this variable is set in a single basic block and all uses are dominated by the set(s) in that single basic block, then there is no reason to record anything for this variable in the block local definition stacks. Doing so just wastes time and memory. This is the same test to prune the set of variables which may need PHI nodes. So we just use that information since it's already computed and available for us to use. */ if (get_phi_state (var) == NEED_PHI_STATE_NO) { set_current_def (var, def); return; } currdef = get_current_def (var); if (! *block_defs_p) VARRAY_TREE_INIT (*block_defs_p, 20, "block_defs"); /* Push the current reaching definition into *BLOCK_DEFS_P. This stack is later used by the dominator tree callbacks to restore the reaching definitions for all the variables defined in the block after a recursive visit to all its immediately dominated blocks. If there is no current reaching definition, then just record the underlying _DECL node. */ VARRAY_PUSH_TREE (*block_defs_p, currdef ? currdef : var); /* Set the current reaching definition for VAR to be DEF. */ set_current_def (var, def); } /* Return the current definition for variable VAR. If none is found, create a new SSA name to act as the zeroth definition for VAR. If VAR is call clobbered and there exists a more recent definition of GLOBAL_VAR, return the definition for GLOBAL_VAR. This means that VAR has been clobbered by a function call since its last assignment. */ static tree get_reaching_def (tree var) { tree default_d, currdef_var, avar; /* Lookup the current reaching definition for VAR. */ default_d = NULL_TREE; currdef_var = get_current_def (var); /* If there is no reaching definition for VAR, create and register a default definition for it (if needed). */ if (currdef_var == NULL_TREE) { if (TREE_CODE (var) == SSA_NAME) avar = SSA_NAME_VAR (var); else avar = var; default_d = default_def (avar); if (default_d == NULL_TREE) { default_d = make_ssa_name (avar, build_empty_stmt ()); set_default_def (avar, default_d); } set_current_def (var, default_d); } /* Return the current reaching definition for VAR, or the default definition, if we had to create one. */ return (currdef_var) ? currdef_var : default_d; } /* Hashing and equality functions for DEF_BLOCKS. */ static hashval_t def_blocks_hash (const void *p) { return htab_hash_pointer ((const void *)((const struct def_blocks_d *)p)->var); } static int def_blocks_eq (const void *p1, const void *p2) { return ((const struct def_blocks_d *)p1)->var == ((const struct def_blocks_d *)p2)->var; } /* Free memory allocated by one entry in DEF_BLOCKS. */ static void def_blocks_free (void *p) { struct def_blocks_d *entry = p; BITMAP_XFREE (entry->def_blocks); BITMAP_XFREE (entry->phi_blocks); BITMAP_XFREE (entry->livein_blocks); free (entry); } /* Dump the DEF_BLOCKS hash table on stderr. */ void debug_def_blocks (void) { htab_traverse (def_blocks, debug_def_blocks_r, NULL); } /* Callback for htab_traverse to dump the DEF_BLOCKS hash table. */ static int debug_def_blocks_r (void **slot, void *data ATTRIBUTE_UNUSED) { unsigned long i; struct def_blocks_d *db_p = (struct def_blocks_d *) *slot; fprintf (stderr, "VAR: "); print_generic_expr (stderr, db_p->var, dump_flags); fprintf (stderr, ", DEF_BLOCKS: { "); EXECUTE_IF_SET_IN_BITMAP (db_p->def_blocks, 0, i, fprintf (stderr, "%ld ", i)); fprintf (stderr, "}"); fprintf (stderr, ", LIVEIN_BLOCKS: { "); EXECUTE_IF_SET_IN_BITMAP (db_p->livein_blocks, 0, i, fprintf (stderr, "%ld ", i)); fprintf (stderr, "}\n"); return 1; } /* Return the set of blocks where variable VAR is defined and the blocks where VAR is live on entry (livein). Return NULL, if no entry is found in DEF_BLOCKS. */ static inline struct def_blocks_d * find_def_blocks_for (tree var) { struct def_blocks_d dm; dm.var = var; return (struct def_blocks_d *) htab_find (def_blocks, &dm); } /* Return the set of blocks where variable VAR is defined and the blocks where VAR is live on entry (livein). If no entry is found in DEF_BLOCKS, a new one is created and returned. */ static inline struct def_blocks_d * get_def_blocks_for (tree var) { struct def_blocks_d db, *db_p; void **slot; db.var = var; slot = htab_find_slot (def_blocks, (void *) &db, INSERT); if (*slot == NULL) { db_p = xmalloc (sizeof (*db_p)); db_p->var = var; db_p->def_blocks = BITMAP_XMALLOC (); db_p->phi_blocks = BITMAP_XMALLOC (); db_p->livein_blocks = BITMAP_XMALLOC (); *slot = (void *) db_p; } else db_p = (struct def_blocks_d *) *slot; return db_p; } /* If a variable V in VARS_TO_RENAME is a pointer, the renaming process will cause us to lose the name memory tags that may have been associated with the various SSA_NAMEs of V. This means that the variables aliased to those name tags also need to be renamed again. FIXME 1- We should either have a better scheme for renaming pointers that doesn't lose name tags or re-run alias analysis to recover points-to information. 2- Currently we just invalidate *all* the name tags. This should be more selective. */ static void invalidate_name_tags (bitmap vars_to_rename) { size_t i; bool rename_name_tags_p; rename_name_tags_p = false; EXECUTE_IF_SET_IN_BITMAP (vars_to_rename, 0, i, if (POINTER_TYPE_P (TREE_TYPE (referenced_var (i)))) { rename_name_tags_p = true; break; }); if (rename_name_tags_p) for (i = 0; i < num_referenced_vars; i++) { var_ann_t ann = var_ann (referenced_var (i)); if (ann->mem_tag_kind == NAME_TAG) { size_t j; varray_type may_aliases = ann->may_aliases; bitmap_set_bit (vars_to_rename, ann->uid); if (ann->may_aliases) for (j = 0; j < VARRAY_ACTIVE_SIZE (may_aliases); j++) { tree var = VARRAY_TREE (may_aliases, j); bitmap_set_bit (vars_to_rename, var_ann (var)->uid); } } } } /* Main entry point into the SSA builder. The renaming process proceeds in five main phases: 1- If VARS_TO_RENAME has any entries, any existing PHI nodes for those variables are removed from the flow graph so that they can be computed again. 2- Compute dominance frontier and immediate dominators, needed to insert PHI nodes and rename the function in dominator tree order. 3- Find and mark all the blocks that define variables (mark_def_sites). 4- Insert PHI nodes at dominance frontiers (insert_phi_nodes). 5- Rename all the blocks (rewrite_initialize_block, rewrite_add_phi_arguments) and statements in the program (rewrite_stmt). Steps 3 and 5 are done using the dominator tree walker (walk_dominator_tree). ALL is true if all variables should be renamed (otherwise just those mentioned in vars_to_rename are taken into account). */ void rewrite_into_ssa (bool all) { bitmap *dfs; basic_block bb; struct dom_walk_data walk_data; struct mark_def_sites_global_data mark_def_sites_global_data; bitmap old_vars_to_rename = vars_to_rename; unsigned i; timevar_push (TV_TREE_SSA_OTHER); if (all) vars_to_rename = NULL; else { /* Initialize the array of variables to rename. */ if (vars_to_rename == NULL) abort (); if (bitmap_first_set_bit (vars_to_rename) < 0) { timevar_pop (TV_TREE_SSA_OTHER); return; } invalidate_name_tags (vars_to_rename); /* Now remove all the existing PHI nodes (if any) for the variables that we are about to rename into SSA. */ remove_all_phi_nodes_for (vars_to_rename); } /* Allocate memory for the DEF_BLOCKS hash table. */ def_blocks = htab_create (VARRAY_ACTIVE_SIZE (referenced_vars), def_blocks_hash, def_blocks_eq, def_blocks_free); /* Initialize dominance frontier and immediate dominator bitmaps. Also count the number of predecessors for each block. Doing so can save significant time during PHI insertion for large graphs. */ dfs = (bitmap *) xmalloc (last_basic_block * sizeof (bitmap *)); FOR_EACH_BB (bb) { edge e; int count = 0; for (e = bb->pred; e; e = e->pred_next) count++; bb_ann (bb)->num_preds = count; dfs[bb->index] = BITMAP_XMALLOC (); } for (i = 0; i < num_referenced_vars; i++) set_current_def (referenced_var (i), NULL_TREE); /* Ensure that the dominance information is OK. */ calculate_dominance_info (CDI_DOMINATORS); /* Compute dominance frontiers. */ compute_dominance_frontiers (dfs); /* Setup callbacks for the generic dominator tree walker to find and mark definition sites. */ walk_data.walk_stmts_backward = false; walk_data.dom_direction = CDI_DOMINATORS; walk_data.initialize_block_local_data = NULL; walk_data.before_dom_children_before_stmts = mark_def_sites_initialize_block; walk_data.before_dom_children_walk_stmts = mark_def_sites; walk_data.before_dom_children_after_stmts = NULL; walk_data.after_dom_children_before_stmts = NULL; walk_data.after_dom_children_walk_stmts = NULL; walk_data.after_dom_children_after_stmts = NULL; /* Notice that this bitmap is indexed using variable UIDs, so it must be large enough to accommodate all the variables referenced in the function, not just the ones we are renaming. */ mark_def_sites_global_data.kills = sbitmap_alloc (num_referenced_vars); walk_data.global_data = &mark_def_sites_global_data; /* We do not have any local data. */ walk_data.block_local_data_size = 0; /* Initialize the dominator walker. */ init_walk_dominator_tree (&walk_data); /* Recursively walk the dominator tree. */ walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR); /* Finalize the dominator walker. */ fini_walk_dominator_tree (&walk_data); /* We no longer need this bitmap, clear and free it. */ sbitmap_free (mark_def_sites_global_data.kills); /* Insert PHI nodes at dominance frontiers of definition blocks. */ insert_phi_nodes (dfs, NULL); /* Rewrite all the basic blocks in the program. */ timevar_push (TV_TREE_SSA_REWRITE_BLOCKS); /* Setup callbacks for the generic dominator tree walker. */ walk_data.walk_stmts_backward = false; walk_data.dom_direction = CDI_DOMINATORS; walk_data.initialize_block_local_data = rewrite_initialize_block_local_data; walk_data.before_dom_children_before_stmts = rewrite_initialize_block; walk_data.before_dom_children_walk_stmts = rewrite_stmt; walk_data.before_dom_children_after_stmts = rewrite_add_phi_arguments; walk_data.after_dom_children_before_stmts = NULL; walk_data.after_dom_children_walk_stmts = NULL; walk_data.after_dom_children_after_stmts = rewrite_finalize_block; walk_data.global_data = NULL; walk_data.block_local_data_size = sizeof (struct rewrite_block_data); /* Initialize the dominator walker. */ init_walk_dominator_tree (&walk_data); /* Recursively walk the dominator tree rewriting each statement in each basic block. */ walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR); /* Finalize the dominator walker. */ fini_walk_dominator_tree (&walk_data); timevar_pop (TV_TREE_SSA_REWRITE_BLOCKS); /* Debugging dumps. */ if (dump_file && (dump_flags & TDF_STATS)) { dump_dfa_stats (dump_file); dump_tree_ssa_stats (dump_file); } /* Free allocated memory. */ FOR_EACH_BB (bb) BITMAP_XFREE (dfs[bb->index]); free (dfs); htab_delete (def_blocks); vars_to_rename = old_vars_to_rename; timevar_pop (TV_TREE_SSA_OTHER); } /* The ssa names in NAMES_TO_RENAME may have more than one definition; add phi nodes and rewrite them to fix this. */ void rewrite_ssa_into_ssa (bitmap names_to_rename) { bitmap *dfs; basic_block bb; struct dom_walk_data walk_data; struct mark_def_sites_global_data mark_def_sites_global_data; unsigned i; sbitmap snames_to_rename; tree name; if (bitmap_first_set_bit (names_to_rename) < 0) return; timevar_push (TV_TREE_SSA_OTHER); /* Allocate memory for the DEF_BLOCKS hash table. */ def_blocks = htab_create (num_ssa_names, def_blocks_hash, def_blocks_eq, def_blocks_free); /* Initialize dominance frontier and immediate dominator bitmaps. Also count the number of predecessors for each block. Doing so can save significant time during PHI insertion for large graphs. */ dfs = (bitmap *) xmalloc (last_basic_block * sizeof (bitmap *)); FOR_EACH_BB (bb) { edge e; int count = 0; for (e = bb->pred; e; e = e->pred_next) count++; bb_ann (bb)->num_preds = count; dfs[bb->index] = BITMAP_XMALLOC (); } /* Ensure that the dominance information is OK. */ calculate_dominance_info (CDI_DOMINATORS); /* Compute dominance frontiers. */ compute_dominance_frontiers (dfs); /* Setup callbacks for the generic dominator tree walker to find and mark definition sites. */ walk_data.walk_stmts_backward = false; walk_data.dom_direction = CDI_DOMINATORS; walk_data.initialize_block_local_data = NULL; walk_data.before_dom_children_before_stmts = ssa_mark_def_sites_initialize_block; walk_data.before_dom_children_walk_stmts = ssa_mark_def_sites; walk_data.before_dom_children_after_stmts = ssa_mark_phi_uses; walk_data.after_dom_children_before_stmts = NULL; walk_data.after_dom_children_walk_stmts = NULL; walk_data.after_dom_children_after_stmts = NULL; snames_to_rename = sbitmap_alloc (num_ssa_names); sbitmap_zero (snames_to_rename); EXECUTE_IF_SET_IN_BITMAP (names_to_rename, 0, i, SET_BIT (snames_to_rename, i)); mark_def_sites_global_data.kills = sbitmap_alloc (num_ssa_names); mark_def_sites_global_data.names_to_rename = snames_to_rename; walk_data.global_data = &mark_def_sites_global_data; /* We do not have any local data. */ walk_data.block_local_data_size = 0; /* Initialize the dominator walker. */ init_walk_dominator_tree (&walk_data); /* Recursively walk the dominator tree. */ walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR); /* Finalize the dominator walker. */ fini_walk_dominator_tree (&walk_data); /* We no longer need this bitmap, clear and free it. */ sbitmap_free (mark_def_sites_global_data.kills); for (i = 0; i < num_ssa_names; i++) if (ssa_name (i)) set_current_def (ssa_name (i), NULL_TREE); /* Insert PHI nodes at dominance frontiers of definition blocks. */ insert_phi_nodes (dfs, names_to_rename); /* Rewrite all the basic blocks in the program. */ timevar_push (TV_TREE_SSA_REWRITE_BLOCKS); /* Setup callbacks for the generic dominator tree walker. */ walk_data.walk_stmts_backward = false; walk_data.dom_direction = CDI_DOMINATORS; walk_data.initialize_block_local_data = rewrite_initialize_block_local_data; walk_data.before_dom_children_before_stmts = ssa_rewrite_initialize_block; walk_data.before_dom_children_walk_stmts = ssa_rewrite_stmt; walk_data.before_dom_children_after_stmts = ssa_rewrite_phi_arguments; walk_data.after_dom_children_before_stmts = NULL; walk_data.after_dom_children_walk_stmts = NULL; walk_data.after_dom_children_after_stmts = ssa_rewrite_finalize_block; walk_data.global_data = snames_to_rename; walk_data.block_local_data_size = sizeof (struct rewrite_block_data); /* Initialize the dominator walker. */ init_walk_dominator_tree (&walk_data); /* Recursively walk the dominator tree rewriting each statement in each basic block. */ walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR); /* Finalize the dominator walker. */ fini_walk_dominator_tree (&walk_data); sbitmap_free (snames_to_rename); timevar_pop (TV_TREE_SSA_REWRITE_BLOCKS); /* Debugging dumps. */ if (dump_file && (dump_flags & TDF_STATS)) { dump_dfa_stats (dump_file); dump_tree_ssa_stats (dump_file); } /* Free allocated memory. */ FOR_EACH_BB (bb) BITMAP_XFREE (dfs[bb->index]); free (dfs); htab_delete (def_blocks); for (i = 0; i < num_ssa_names; i++) { name = ssa_name (i); if (!name || !SSA_NAME_AUX (name)) continue; free (SSA_NAME_AUX (name)); SSA_NAME_AUX (name) = NULL; } timevar_pop (TV_TREE_SSA_OTHER); } /* Rewrites all variables into ssa. */ static void rewrite_all_into_ssa (void) { rewrite_into_ssa (true); } struct tree_opt_pass pass_build_ssa = { "ssa", /* name */ NULL, /* gate */ rewrite_all_into_ssa, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ 0, /* tv_id */ PROP_cfg | PROP_referenced_vars, /* properties_required */ PROP_ssa, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func | TODO_verify_ssa /* todo_flags_finish */ }; /* Convert a program in SSA form into Normal form. Copyright (C) 2004 Free Software Foundation, Inc. Contributed by Andrew Macleod This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Routines for liveness in SSA trees. Copyright (C) 2003, 2004 Free Software Foundation, Inc. Contributed by Andrew MacLeod This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef _TREE_SSA_LIVE_H #define _TREE_SSA_LIVE_H 1 /* Used to create the variable mapping when we go out of SSA form. */ typedef struct _var_map { /* The partition of all variables. */ partition var_partition; /* Vector for compacting partitions. */ int *partition_to_compact; int *compact_to_partition; /* Mapping of partition numbers to vars. */ tree *partition_to_var; /* Current number of partitions. */ unsigned int num_partitions; /* Original partition size. */ unsigned int partition_size; /* Reference count, if required. */ int *ref_count; } *var_map; #define VAR_ANN_PARTITION(ann) (ann->partition) #define VAR_ANN_ROOT_INDEX(ann) (ann->root_index) #define NO_PARTITION -1 /* Flags to pass to compact_var_map */ #define VARMAP_NORMAL 0 #define VARMAP_NO_SINGLE_DEFS 1 /* Flags to pass to remove_ssa_form. */ #define SSANORM_PERFORM_TER 0x1 #define SSANORM_COMBINE_TEMPS 0x2 #define SSANORM_REMOVE_ALL_PHIS 0x4 #define SSANORM_COALESCE_PARTITIONS 0x8 #define SSANORM_USE_COALESCE_LIST 0x10 extern var_map init_var_map (int); extern void delete_var_map (var_map); extern void dump_var_map (FILE *, var_map); extern int var_union (var_map, tree, tree); extern void change_partition_var (var_map, tree, int); extern void compact_var_map (var_map, int); extern void remove_ssa_form (FILE *, var_map, int); extern void register_ssa_partitions_for_vars (bitmap vars, var_map map); extern tree make_ssa_temp (tree); static inline int num_var_partitions (var_map); static inline tree var_to_partition_to_var (var_map, tree); static inline tree partition_to_var (var_map, int); static inline int var_to_partition (var_map, tree); static inline tree version_to_var (var_map, int); static inline int version_ref_count (var_map, tree); static inline void register_ssa_partition (var_map, tree, bool); #define SSA_VAR_MAP_REF_COUNT 0x01 extern var_map create_ssa_var_map (int); /* Number of partitions in MAP. */ static inline int num_var_partitions (var_map map) { return map->num_partitions; } /* Return the reference count for SSA_VAR's partition in MAP. */ static inline int version_ref_count (var_map map, tree ssa_var) { int version = SSA_NAME_VERSION (ssa_var); #ifdef ENABLE_CHECKING if (!map->ref_count) abort (); #endif return map->ref_count[version]; } /* Given partition index I from MAP, return the variable which represents that partition. */ static inline tree partition_to_var (var_map map, int i) { if (map->compact_to_partition) i = map->compact_to_partition[i]; i = partition_find (map->var_partition, i); return map->partition_to_var[i]; } /* Given ssa_name VERSION, if it has a partition in MAP, return the var it is associated with. Otherwise return NULL. */ static inline tree version_to_var (var_map map, int version) { int part; part = partition_find (map->var_partition, version); if (map->partition_to_compact) part = map->partition_to_compact[part]; if (part == NO_PARTITION) return NULL_TREE; return partition_to_var (map, part); } /* Given VAR, return the partition number in MAP which contains it. NO_PARTITION is returned if its not in any partition. */ static inline int var_to_partition (var_map map, tree var) { var_ann_t ann; int part; if (TREE_CODE (var) == SSA_NAME) { part = partition_find (map->var_partition, SSA_NAME_VERSION (var)); if (map->partition_to_compact) part = map->partition_to_compact[part]; } else { ann = var_ann (var); if (ann->out_of_ssa_tag) part = VAR_ANN_PARTITION (ann); else part = NO_PARTITION; } return part; } /* Given VAR, return the variable which represents the entire partition it is a member of in MAP. NULL is returned if it is not in a partition. */ static inline tree var_to_partition_to_var (var_map map, tree var) { int part; part = var_to_partition (map, var); if (part == NO_PARTITION) return NULL_TREE; return partition_to_var (map, part); } /* This routine registers a partition for SSA_VAR with MAP. IS_USE is used to count references. Any unregistered partitions may be compacted out later. */ static inline void register_ssa_partition (var_map map, tree ssa_var, bool is_use) { int version; #if defined ENABLE_CHECKING if (TREE_CODE (ssa_var) != SSA_NAME) abort (); if (!is_gimple_reg (SSA_NAME_VAR (ssa_var))) { fprintf (stderr, "Illegally registering a virtual SSA name :"); print_generic_expr (stderr, ssa_var, TDF_SLIM); fprintf (stderr, " in the SSA->Normal phase.\n"); abort(); } #endif version = SSA_NAME_VERSION (ssa_var); if (is_use && map->ref_count) map->ref_count[version]++; if (map->partition_to_var[version] == NULL_TREE) map->partition_to_var[SSA_NAME_VERSION (ssa_var)] = ssa_var; } /* ---------------- live on entry/exit info ------------------------------ This structure is used to represent live range information on SSA based trees. A partition map must be provided, and based on the active partitions, live-on-entry information and live-on-exit information can be calculated. As well, partitions are marked as to whether they are global (live outside the basic block they are defined in). The live-on-entry information is per variable. It provide a bitmap for each variable which has a bit set for each basic block that the variable is live on entry to that block. The live-on-exit information is per block. It provides a bitmap for each block indicating which partitions are live on exit from the block. For the purposes of this implementation, we treat the elements of a PHI as follows: Uses in a PHI are considered LIVE-ON-EXIT to the block from which they originate. They are *NOT* considered live on entry to the block containing the PHI node. The Def of a PHI node is *not* considered live on entry to the block. It is considered to be "define early" in the block. Picture it as each block having a stmt (or block-preheader) before the first real stmt in the block which defines all the variables that are defined by PHIs. ----------------------------------------------------------------------- */ typedef struct tree_live_info_d { /* Var map this relates to. */ var_map map; /* Bitmap indicating which partitions are global. */ bitmap global; /* Bitmap of live on entry blocks for partition elements. */ bitmap *livein; /* Number of basic blocks when live on exit calculated. */ int num_blocks; /* Bitmap of what variables are live on exit for a basic blocks. */ bitmap *liveout; } *tree_live_info_p; extern tree_live_info_p calculate_live_on_entry (var_map); extern void calculate_live_on_exit (tree_live_info_p); extern void delete_tree_live_info (tree_live_info_p); #define LIVEDUMP_ENTRY 0x01 #define LIVEDUMP_EXIT 0x02 #define LIVEDUMP_ALL (LIVEDUMP_ENTRY | LIVEDUMP_EXIT) extern void dump_live_info (FILE *, tree_live_info_p, int); static inline int partition_is_global (tree_live_info_p, int); static inline bitmap live_entry_blocks (tree_live_info_p, int); static inline bitmap live_on_exit (tree_live_info_p, basic_block); static inline var_map live_var_map (tree_live_info_p); static inline void live_merge_and_clear (tree_live_info_p, int, int); static inline void make_live_on_entry (tree_live_info_p, basic_block, int); /* Return TRUE if P is marked as a global in LIVE. */ static inline int partition_is_global (tree_live_info_p live, int p) { if (!live->global) abort (); return bitmap_bit_p (live->global, p); } /* Return the bitmap from LIVE representing the live on entry blocks for partition P. */ static inline bitmap live_entry_blocks (tree_live_info_p live, int p) { if (!live->livein) abort (); return live->livein[p]; } /* Return the bitmap from LIVE representing the live on exit partitions from block BB. */ static inline bitmap live_on_exit (tree_live_info_p live, basic_block bb) { if (!live->liveout) abort(); if (bb == ENTRY_BLOCK_PTR || bb == EXIT_BLOCK_PTR) abort (); return live->liveout[bb->index]; } /* Return the partition map which the information in LIVE utilizes. */ static inline var_map live_var_map (tree_live_info_p live) { return live->map; } /* Merge the live on entry information in LIVE for partitions P1 and P2. Place the result into P1. Clear P2. */ static inline void live_merge_and_clear (tree_live_info_p live, int p1, int p2) { bitmap_a_or_b (live->livein[p1], live->livein[p1], live->livein[p2]); bitmap_zero (live->livein[p2]); } /* Mark partition P as live on entry to basic block BB in LIVE. */ static inline void make_live_on_entry (tree_live_info_p live, basic_block bb , int p) { bitmap_set_bit (live->livein[p], bb->index); bitmap_set_bit (live->global, p); } /* A tree_partition_associator (TPA)object is a base structure which allows partitions to be associated with a tree object. A varray of tree elements represent each distinct tree item. A parallel int array represents the first partition number associated with the tree. This partition number is then used as in index into the next_partition array, which returns the index of the next partition which is associated with the tree. TPA_NONE indicates the end of the list. A varray paralleling the partition list 'partition_to_tree_map' is used to indicate which tree index the partition is in. */ typedef struct tree_partition_associator_d { varray_type trees; varray_type first_partition; int *next_partition; int *partition_to_tree_map; int num_trees; int uncompressed_num; var_map map; } *tpa_p; /* Value returned when there are no more partitions associated with a tree. */ #define TPA_NONE -1 static inline tree tpa_tree (tpa_p, int); static inline int tpa_first_partition (tpa_p, int); static inline int tpa_next_partition (tpa_p, int); static inline int tpa_num_trees (tpa_p); static inline int tpa_find_tree (tpa_p, int); static inline void tpa_decompact (tpa_p); extern tpa_p tpa_init (var_map); extern void tpa_delete (tpa_p); extern void tpa_dump (FILE *, tpa_p); extern void tpa_remove_partition (tpa_p, int, int); extern int tpa_compact (tpa_p); /* Return the number of distinct tree nodes in TPA. */ static inline int tpa_num_trees (tpa_p tpa) { return tpa->num_trees; } /* Return the tree node for index I in TPA. */ static inline tree tpa_tree (tpa_p tpa, int i) { return VARRAY_TREE (tpa->trees, i); } /* Return the first partition associated with tree list I in TPA. */ static inline int tpa_first_partition (tpa_p tpa, int i) { return VARRAY_INT (tpa->first_partition, i); } /* Return the next partition after partition I in TPA's list. */ static inline int tpa_next_partition (tpa_p tpa, int i) { return tpa->next_partition[i]; } /* Return the tree index from TPA whose list contains partition I. TPA_NONE is returned if I is not associated with any list. */ static inline int tpa_find_tree (tpa_p tpa, int i) { int index; index = tpa->partition_to_tree_map[i]; /* When compressed, any index higher than the number of tree elements is a compressed element, so return TPA_NONE. */ if (index != TPA_NONE && index >= tpa_num_trees (tpa)) { #ifdef ENABLE_CHECKING if (tpa->uncompressed_num == -1) abort (); #endif index = TPA_NONE; } return index; } /* This function removes any compaction which was performed on TPA. */ static inline void tpa_decompact(tpa_p tpa) { #ifdef ENABLE_CHECKING if (tpa->uncompressed_num == -1) abort (); #endif tpa->num_trees = tpa->uncompressed_num; } /* Once a var_map has been created and compressed, a complimentary root_var object can be built. This creates a list of all the root variables from which ssa version names are derived. Each root variable has a list of which partitions are versions of that root. This is implemented using the tree_partition_associator. The tree vector is used to represent the root variable. The list of partitions represent SSA versions of the root variable. */ typedef tpa_p root_var_p; static inline tree root_var (root_var_p, int); static inline int root_var_first_partition (root_var_p, int); static inline int root_var_next_partition (root_var_p, int); static inline int root_var_num (root_var_p); static inline void root_var_dump (FILE *, root_var_p); static inline void root_var_remove_partition (root_var_p, int, int); static inline void root_var_delete (root_var_p); static inline int root_var_find (root_var_p, int); static inline int root_var_compact (root_var_p); static inline void root_var_decompact (tpa_p); extern root_var_p root_var_init (var_map); /* Value returned when there are no more partitions associated with a root variable. */ #define ROOT_VAR_NONE TPA_NONE /* Return the number of distinct root variables in RV. */ static inline int root_var_num (root_var_p rv) { return tpa_num_trees (rv); } /* Return root variable I from RV. */ static inline tree root_var (root_var_p rv, int i) { return tpa_tree (rv, i); } /* Return the first partition in RV belonging to root variable list I. */ static inline int root_var_first_partition (root_var_p rv, int i) { return tpa_first_partition (rv, i); } /* Return the next partition after partition I in a root list from RV. */ static inline int root_var_next_partition (root_var_p rv, int i) { return tpa_next_partition (rv, i); } /* Send debug info for root_var list RV to file F. */ static inline void root_var_dump (FILE *f, root_var_p rv) { fprintf (f, "\nRoot Var dump\n"); tpa_dump (f, rv); fprintf (f, "\n"); } /* Destroy root_var object RV. */ static inline void root_var_delete (root_var_p rv) { tpa_delete (rv); } /* Remove partition PARTITION_INDEX from root_var list ROOT_INDEX in RV. */ static inline void root_var_remove_partition (root_var_p rv, int root_index, int partition_index) { tpa_remove_partition (rv, root_index, partition_index); } /* Return the root_var list index for partition I in RV. */ static inline int root_var_find (root_var_p rv, int i) { return tpa_find_tree (rv, i); } /* Hide single element lists in RV. */ static inline int root_var_compact (root_var_p rv) { return tpa_compact (rv); } /* Expose the single element lists in RV. */ static inline void root_var_decompact (root_var_p rv) { tpa_decompact (rv); } /* A TYPE_VAR object is similar to a root_var object, except this associates partitions with their type rather than their root variable. This is used to coalesce memory locations based on type. */ typedef tpa_p type_var_p; static inline tree type_var (type_var_p, int); static inline int type_var_first_partition (type_var_p, int); static inline int type_var_next_partition (type_var_p, int); static inline int type_var_num (type_var_p); static inline void type_var_dump (FILE *, type_var_p); static inline void type_var_remove_partition (type_var_p, int, int); static inline void type_var_delete (type_var_p); static inline int type_var_find (type_var_p, int); static inline int type_var_compact (type_var_p); static inline void type_var_decompact (type_var_p); extern type_var_p type_var_init (var_map); /* Value returned when there is no partitions associated with a list. */ #define TYPE_VAR_NONE TPA_NONE /* Return the number of distinct type lists in TV. */ static inline int type_var_num (type_var_p tv) { return tpa_num_trees (tv); } /* Return the type of list I in TV. */ static inline tree type_var (type_var_p tv, int i) { return tpa_tree (tv, i); } /* Return the first partition belonging to type list I in TV. */ static inline int type_var_first_partition (type_var_p tv, int i) { return tpa_first_partition (tv, i); } /* Return the next partition after partition I in a type list within TV. */ static inline int type_var_next_partition (type_var_p tv, int i) { return tpa_next_partition (tv, i); } /* Send debug info for type_var object TV to file F. */ static inline void type_var_dump (FILE *f, type_var_p tv) { fprintf (f, "\nType Var dump\n"); tpa_dump (f, tv); fprintf (f, "\n"); } /* Delete type_var object TV. */ static inline void type_var_delete (type_var_p tv) { tpa_delete (tv); } /* Remove partition PARTITION_INDEX from type list TYPE_INDEX in TV. */ static inline void type_var_remove_partition (type_var_p tv, int type_index, int partition_index) { tpa_remove_partition (tv, type_index, partition_index); } /* Return the type index in TV for the list partition I is in. */ static inline int type_var_find (type_var_p tv, int i) { return tpa_find_tree (tv, i); } /* Hide single element lists in TV. */ static inline int type_var_compact (type_var_p tv) { return tpa_compact (tv); } /* Expose single element lists in TV. */ static inline void type_var_decompact (type_var_p tv) { tpa_decompact (tv); } /* This set of routines implements a coalesce_list. This is an object which is used to track pairs of partitions which are desirable to coalesce together at some point. Costs are associated with each pair, and when all desired information has been collected, the object can be used to order the pairs for processing. */ /* This structure defines a pair for coalescing. */ typedef struct partition_pair_d { int first_partition; int second_partition; int cost; struct partition_pair_d *next; } *partition_pair_p; /* This structure maintains the list of coalesce pairs. When add_mode is true, list is a triangular shaped list of coalesce pairs. The smaller partition number is used to index the list, and the larger is index is located in a partition_pair_p object. These lists are sorted from smallest to largest by 'second_partition'. New coalesce pairs are allowed to be added in this mode. When add_mode is false, the lists have all been merged into list[0]. The rest of the lists are not used. list[0] is ordered from most desirable coalesce to least desirable. pop_best_coalesce() retrieves the pairs one at a time. */ typedef struct coalesce_list_d { var_map map; partition_pair_p *list; bool add_mode; } *coalesce_list_p; extern coalesce_list_p create_coalesce_list (var_map); extern void add_coalesce (coalesce_list_p, int, int, int); extern void sort_coalesce_list (coalesce_list_p); extern void dump_coalesce_list (FILE *, coalesce_list_p); extern void delete_coalesce_list (coalesce_list_p); #define NO_BEST_COALESCE -1 extern int pop_best_coalesce (coalesce_list_p, int *, int *); extern conflict_graph build_tree_conflict_graph (tree_live_info_p, tpa_p, coalesce_list_p); extern void coalesce_tpa_members (tpa_p tpa, conflict_graph graph, var_map map, coalesce_list_p cl, FILE *); #endif /* _TREE_SSA_LIVE_H */ /* Used to hold all the components required to do SSA PHI elimination. The node and pred/succ list is a simple linear list of nodes and edges represented as pairs of nodes. The predecessor and successor list: Nodes are entered in pairs, where [0] ->PRED, [1]->SUCC. All the even indexes in the array represent predecessors, all the odd elements are successors. Rationale: When implemented as bitmaps, very large programs SSA->Normal times were being dominated by clearing the interference graph. Typically this list of edges is extremely small since it only includes PHI results and uses from a single edge which have not coalesced with each other. This means that no virtual PHI nodes are included, and empirical evidence suggests that the number of edges rarely exceed 3, and in a bootstrap of GCC, the maximum size encountered was 7. This also limits the number of possible nodes that are involved to rarely more than 6, and in the bootstrap of gcc, the maximum number of nodes encountered was 12. */ typedef struct _elim_graph { /* Size of the elimination vectors. */ int size; /* List of nodes in the elimination graph. */ varray_type nodes; /* The predecessor and successor edge list. */ varray_type edge_list; /* Visited vector. */ sbitmap visited; /* Stack for visited nodes. */ varray_type stack; /* The variable partition map. */ var_map map; /* Edge being eliminated by this graph. */ edge e; /* List of constant copies to emit. These are pushed on in pairs. */ varray_type const_copies; } *elim_graph; /* Local functions. */ static tree create_temp (tree); static void insert_copy_on_edge (edge, tree, tree); static elim_graph new_elim_graph (int); static inline void delete_elim_graph (elim_graph); static inline void clear_elim_graph (elim_graph); static inline int elim_graph_size (elim_graph); static inline void elim_graph_add_node (elim_graph, tree); static inline void elim_graph_add_edge (elim_graph, int, int); static inline int elim_graph_remove_succ_edge (elim_graph, int); static inline void eliminate_name (elim_graph, tree); static void eliminate_build (elim_graph, basic_block, int); static void elim_forward (elim_graph, int); static int elim_unvisited_predecessor (elim_graph, int); static void elim_backward (elim_graph, int); static void elim_create (elim_graph, int); static void eliminate_phi (edge, int, elim_graph); static tree_live_info_p coalesce_ssa_name (var_map, int); static void assign_vars (var_map); static bool replace_use_variable (var_map, use_operand_p, tree *); static bool replace_def_variable (var_map, def_operand_p, tree *); static void eliminate_virtual_phis (void); static void coalesce_abnormal_edges (var_map, conflict_graph, root_var_p); static void print_exprs (FILE *, const char *, tree, const char *, tree, const char *); static void print_exprs_edge (FILE *, edge, const char *, tree, const char *, tree); /* Create a temporary variable based on the type of variable T. Use T's name as the prefix. */ static tree create_temp (tree t) { tree tmp; const char *name = NULL; tree type; if (TREE_CODE (t) == SSA_NAME) t = SSA_NAME_VAR (t); if (TREE_CODE (t) != VAR_DECL && TREE_CODE (t) != PARM_DECL) abort (); type = TREE_TYPE (t); tmp = DECL_NAME (t); if (tmp) name = IDENTIFIER_POINTER (tmp); if (name == NULL) name = "temp"; tmp = create_tmp_var (type, name); DECL_ARTIFICIAL (tmp) = DECL_ARTIFICIAL (t); add_referenced_tmp_var (tmp); /* add_referenced_tmp_var will create the annotation and set up some of the flags in the annotation. However, some flags we need to inherit from our original variable. */ var_ann (tmp)->type_mem_tag = var_ann (t)->type_mem_tag; if (is_call_clobbered (t)) mark_call_clobbered (tmp); return tmp; } /* This helper function fill insert a copy from a constant or variable SRC to variable DEST on edge E. */ static void insert_copy_on_edge (edge e, tree dest, tree src) { tree copy; copy = build (MODIFY_EXPR, TREE_TYPE (dest), dest, src); set_is_used (dest); if (TREE_CODE (src) == ADDR_EXPR) src = TREE_OPERAND (src, 0); if (TREE_CODE (src) == VAR_DECL || TREE_CODE (src) == PARM_DECL) set_is_used (src); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Inserting a copy on edge BB%d->BB%d :", e->src->index, e->dest->index); print_generic_expr (dump_file, copy, dump_flags); fprintf (dump_file, "\n"); } bsi_insert_on_edge (e, copy); } /* Create an elimination graph with SIZE nodes and associated data structures. */ static elim_graph new_elim_graph (int size) { elim_graph g = (elim_graph) xmalloc (sizeof (struct _elim_graph)); VARRAY_TREE_INIT (g->nodes, 30, "Elimination Node List"); VARRAY_TREE_INIT (g->const_copies, 20, "Elimination Constant Copies"); VARRAY_INT_INIT (g->edge_list, 20, "Elimination Edge List"); VARRAY_INT_INIT (g->stack, 30, " Elimination Stack"); g->visited = sbitmap_alloc (size); return g; } /* Empty elimination graph G. */ static inline void clear_elim_graph (elim_graph g) { VARRAY_POP_ALL (g->nodes); VARRAY_POP_ALL (g->edge_list); } /* Delete elimination graph G. */ static inline void delete_elim_graph (elim_graph g) { sbitmap_free (g->visited); free (g); } /* Return the number of nodes in graph G. */ static inline int elim_graph_size (elim_graph g) { return VARRAY_ACTIVE_SIZE (g->nodes); } /* Add NODE to graph G, if it doesn't exist already. */ static inline void elim_graph_add_node (elim_graph g, tree node) { int x; for (x = 0; x < elim_graph_size (g); x++) if (VARRAY_TREE (g->nodes, x) == node) return; VARRAY_PUSH_TREE (g->nodes, node); } /* Add the edge PRED->SUCC to graph G. */ static inline void elim_graph_add_edge (elim_graph g, int pred, int succ) { VARRAY_PUSH_INT (g->edge_list, pred); VARRAY_PUSH_INT (g->edge_list, succ); } /* Remove an edge from graph G for which NODE is the predecessor, and return the successor node. -1 is returned if there is no such edge. */ static inline int elim_graph_remove_succ_edge (elim_graph g, int node) { int y; unsigned x; for (x = 0; x < VARRAY_ACTIVE_SIZE (g->edge_list); x += 2) if (VARRAY_INT (g->edge_list, x) == node) { VARRAY_INT (g->edge_list, x) = -1; y = VARRAY_INT (g->edge_list, x + 1); VARRAY_INT (g->edge_list, x + 1) = -1; return y; } return -1; } /* Find all the nodes in GRAPH which are successors to NODE in the edge list. VAR will hold the partition number found. CODE is the code fragment executed for every node found. */ #define FOR_EACH_ELIM_GRAPH_SUCC(GRAPH, NODE, VAR, CODE) \ do { \ unsigned x_; \ int y_; \ for (x_ = 0; x_ < VARRAY_ACTIVE_SIZE ((GRAPH)->edge_list); x_ += 2) \ { \ y_ = VARRAY_INT ((GRAPH)->edge_list, x_); \ if (y_ != (NODE)) \ continue; \ (VAR) = VARRAY_INT ((GRAPH)->edge_list, x_ + 1); \ CODE; \ } \ } while (0) /* Find all the nodes which are predecessors of NODE in the edge list for GRAPH. VAR will hold the partition number found. CODE is the code fragment executed for every node found. */ #define FOR_EACH_ELIM_GRAPH_PRED(GRAPH, NODE, VAR, CODE) \ do { \ unsigned x_; \ int y_; \ for (x_ = 0; x_ < VARRAY_ACTIVE_SIZE ((GRAPH)->edge_list); x_ += 2) \ { \ y_ = VARRAY_INT ((GRAPH)->edge_list, x_ + 1); \ if (y_ != (NODE)) \ continue; \ (VAR) = VARRAY_INT ((GRAPH)->edge_list, x_); \ CODE; \ } \ } while (0) /* Add T to elimination graph G. */ static inline void eliminate_name (elim_graph g, tree T) { elim_graph_add_node (g, T); } /* Build elimination graph G for basic block BB on incoming PHI edge I. */ static void eliminate_build (elim_graph g, basic_block B, int i) { tree phi; tree T0, Ti; int p0, pi; clear_elim_graph (g); for (phi = phi_nodes (B); phi; phi = PHI_CHAIN (phi)) { T0 = var_to_partition_to_var (g->map, PHI_RESULT (phi)); /* Ignore results which are not in partitions. */ if (T0 == NULL_TREE) continue; if (PHI_ARG_EDGE (phi, i) == g->e) Ti = PHI_ARG_DEF (phi, i); else { /* On rare occasions, a PHI node may not have the arguments in the same order as all of the other PHI nodes. If they don't match, find the appropriate index here. */ pi = phi_arg_from_edge (phi, g->e); if (pi == -1) abort(); Ti = PHI_ARG_DEF (phi, pi); } /* If this argument is a constant, or a SSA_NAME which is being left in SSA form, just queue a copy to be emitted on this edge. */ if (!phi_ssa_name_p (Ti) || (TREE_CODE (Ti) == SSA_NAME && var_to_partition (g->map, Ti) == NO_PARTITION)) { /* Save constant copies until all other copies have been emitted on this edge. */ VARRAY_PUSH_TREE (g->const_copies, T0); VARRAY_PUSH_TREE (g->const_copies, Ti); } else { Ti = var_to_partition_to_var (g->map, Ti); if (T0 != Ti) { eliminate_name (g, T0); eliminate_name (g, Ti); p0 = var_to_partition (g->map, T0); pi = var_to_partition (g->map, Ti); elim_graph_add_edge (g, p0, pi); } } } } /* Push successors of T onto the elimination stack for G. */ static void elim_forward (elim_graph g, int T) { int S; SET_BIT (g->visited, T); FOR_EACH_ELIM_GRAPH_SUCC (g, T, S, { if (!TEST_BIT (g->visited, S)) elim_forward (g, S); }); VARRAY_PUSH_INT (g->stack, T); } /* Return 1 if there unvisited predecessors of T in graph G. */ static int elim_unvisited_predecessor (elim_graph g, int T) { int P; FOR_EACH_ELIM_GRAPH_PRED (g, T, P, { if (!TEST_BIT (g->visited, P)) return 1; }); return 0; } /* Process predecessors first, and insert a copy. */ static void elim_backward (elim_graph g, int T) { int P; SET_BIT (g->visited, T); FOR_EACH_ELIM_GRAPH_PRED (g, T, P, { if (!TEST_BIT (g->visited, P)) { elim_backward (g, P); insert_copy_on_edge (g->e, partition_to_var (g->map, P), partition_to_var (g->map, T)); } }); } /* Insert required copies for T in graph G. Check for a strongly connected region, and create a temporary to break the cycle if one is found. */ static void elim_create (elim_graph g, int T) { tree U; int P, S; if (elim_unvisited_predecessor (g, T)) { U = create_temp (partition_to_var (g->map, T)); insert_copy_on_edge (g->e, U, partition_to_var (g->map, T)); FOR_EACH_ELIM_GRAPH_PRED (g, T, P, { if (!TEST_BIT (g->visited, P)) { elim_backward (g, P); insert_copy_on_edge (g->e, partition_to_var (g->map, P), U); } }); } else { S = elim_graph_remove_succ_edge (g, T); if (S != -1) { SET_BIT (g->visited, T); insert_copy_on_edge (g->e, partition_to_var (g->map, T), partition_to_var (g->map, S)); } } } /* Eliminate all the phi nodes on edge E in graph G. I is the usual PHI index that edge E's values are found on. */ static void eliminate_phi (edge e, int i, elim_graph g) { int num_nodes = 0; int x; basic_block B = e->dest; #if defined ENABLE_CHECKING if (i == -1) abort (); if (VARRAY_ACTIVE_SIZE (g->const_copies) != 0) abort (); #endif /* Abnormal edges already have everything coalesced, or the coalescer would have aborted. */ if (e->flags & EDGE_ABNORMAL) return; num_nodes = num_var_partitions (g->map); g->e = e; eliminate_build (g, B, i); if (elim_graph_size (g) != 0) { sbitmap_zero (g->visited); VARRAY_POP_ALL (g->stack); for (x = 0; x < elim_graph_size (g); x++) { tree var = VARRAY_TREE (g->nodes, x); int p = var_to_partition (g->map, var); if (!TEST_BIT (g->visited, p)) elim_forward (g, p); } sbitmap_zero (g->visited); while (VARRAY_ACTIVE_SIZE (g->stack) > 0) { x = VARRAY_TOP_INT (g->stack); VARRAY_POP (g->stack); if (!TEST_BIT (g->visited, x)) elim_create (g, x); } } /* If there are any pending constant copies, issue them now. */ while (VARRAY_ACTIVE_SIZE (g->const_copies) > 0) { tree src, dest; src = VARRAY_TOP_TREE (g->const_copies); VARRAY_POP (g->const_copies); dest = VARRAY_TOP_TREE (g->const_copies); VARRAY_POP (g->const_copies); insert_copy_on_edge (e, dest, src); } } /* Shortcut routine to print messages to file F of the form: "STR1 EXPR1 STR2 EXPR2 STR3." */ static void print_exprs (FILE *f, const char *str1, tree expr1, const char *str2, tree expr2, const char *str3) { fprintf (f, "%s", str1); print_generic_expr (f, expr1, TDF_SLIM); fprintf (f, "%s", str2); print_generic_expr (f, expr2, TDF_SLIM); fprintf (f, "%s", str3); } /* Shortcut routine to print abnormal edge messages to file F of the form: "STR1 EXPR1 STR2 EXPR2 across edge E. */ static void print_exprs_edge (FILE *f, edge e, const char *str1, tree expr1, const char *str2, tree expr2) { print_exprs (f, str1, expr1, str2, expr2, " across an abnormal edge"); fprintf (f, " from BB%d->BB%d\n", e->src->index, e->dest->index); } /* Coalesce partitions in MAP which are live across abnormal edges in GRAPH. RV is the root variable groupings of the partitions in MAP. Since code cannot be inserted on these edges, failure to coalesce something across an abnormal edge is an error. */ static void coalesce_abnormal_edges (var_map map, conflict_graph graph, root_var_p rv) { basic_block bb; edge e; tree phi, var, tmp; int x, y; /* Code cannot be inserted on abnormal edges. Look for all abnormal edges, and coalesce any PHI results with their arguments across that edge. */ FOR_EACH_BB (bb) for (e = bb->succ; e; e = e->succ_next) if (e->dest != EXIT_BLOCK_PTR && e->flags & EDGE_ABNORMAL) for (phi = phi_nodes (e->dest); phi; phi = PHI_CHAIN (phi)) { /* Visit each PHI on the destination side of this abnormal edge, and attempt to coalesce the argument with the result. */ var = PHI_RESULT (phi); x = var_to_partition (map, var); /* Ignore results which are not relevant. */ if (x == NO_PARTITION) continue; y = phi_arg_from_edge (phi, e); if (y == -1) abort (); tmp = PHI_ARG_DEF (phi, y); if (!phi_ssa_name_p (tmp)) { print_exprs_edge (stderr, e, "\nConstant argument in PHI. Can't insert :", var, " = ", tmp); abort (); } y = var_to_partition (map, tmp); if (x == NO_PARTITION || y == NO_PARTITION) abort (); if (root_var_find (rv, x) != root_var_find (rv, y)) { print_exprs_edge (stderr, e, "\nDifferent root vars: ", root_var (rv, root_var_find (rv, x)), " and ", root_var (rv, root_var_find (rv, y))); abort (); } if (x != y) { if (!conflict_graph_conflict_p (graph, x, y)) { /* Now map the partitions back to their real variables. */ var = partition_to_var (map, x); tmp = partition_to_var (map, y); if (dump_file && (dump_flags & TDF_DETAILS)) { print_exprs_edge (dump_file, e, "ABNORMAL: Coalescing ", var, " and ", tmp); } if (var_union (map, var, tmp) == NO_PARTITION) { print_exprs_edge (stderr, e, "\nUnable to coalesce", partition_to_var (map, x), " and ", partition_to_var (map, y)); abort (); } conflict_graph_merge_regs (graph, x, y); } else { print_exprs_edge (stderr, e, "\n Conflict ", partition_to_var (map, x), " and ", partition_to_var (map, y)); abort (); } } } } /* Reduce the number of live ranges in MAP. Live range information is returned if FLAGS indicates that we are combining temporaries, otherwise NULL is returned. The only partitions which are associated with actual variables at this point are those which are forced to be coalesced for various reason. (live on entry, live across abnormal edges, etc.). */ static tree_live_info_p coalesce_ssa_name (var_map map, int flags) { int num, x, i; sbitmap live; tree var, phi; root_var_p rv; tree_live_info_p liveinfo; var_ann_t ann; conflict_graph graph; basic_block bb; coalesce_list_p cl = NULL; if (num_var_partitions (map) <= 1) return NULL; /* If no preference given, use cheap coalescing of all partitions. */ if ((flags & (SSANORM_COALESCE_PARTITIONS | SSANORM_USE_COALESCE_LIST)) == 0) flags |= SSANORM_COALESCE_PARTITIONS; liveinfo = calculate_live_on_entry (map); calculate_live_on_exit (liveinfo); rv = root_var_init (map); /* Remove single element variable from the list. */ root_var_compact (rv); if (flags & SSANORM_USE_COALESCE_LIST) { cl = create_coalesce_list (map); /* Add all potential copies via PHI arguments to the list. */ FOR_EACH_BB (bb) { for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi)) { tree res = PHI_RESULT (phi); int p = var_to_partition (map, res); if (p == NO_PARTITION) continue; for (x = 0; x < PHI_NUM_ARGS (phi); x++) { tree arg = PHI_ARG_DEF (phi, x); int p2; if (TREE_CODE (arg) != SSA_NAME) continue; if (SSA_NAME_VAR (res) != SSA_NAME_VAR (arg)) continue; p2 = var_to_partition (map, PHI_ARG_DEF (phi, x)); if (p2 != NO_PARTITION) add_coalesce (cl, p, p2, 1); } } } /* Coalesce all the result decls together. */ var = NULL_TREE; i = 0; for (x = 0; x < num_var_partitions (map); x++) { tree p = partition_to_var (map, x); if (TREE_CODE (SSA_NAME_VAR(p)) == RESULT_DECL) { if (var == NULL_TREE) { var = p; i = x; } else add_coalesce (cl, i, x, 1); } } } /* Build a conflict graph. */ graph = build_tree_conflict_graph (liveinfo, rv, cl); if (cl) { if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Before sorting:\n"); dump_coalesce_list (dump_file, cl); } sort_coalesce_list (cl); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "\nAfter sorting:\n"); dump_coalesce_list (dump_file, cl); } } /* Put the single element variables back in. */ root_var_decompact (rv); /* First, coalesce all live on entry variables to their root variable. This will ensure the first use is coming from the correct location. */ live = sbitmap_alloc (num_var_partitions (map)); sbitmap_zero (live); /* Set 'live' vector to indicate live on entry partitions. */ num = num_var_partitions (map); for (x = 0 ; x < num; x++) { var = partition_to_var (map, x); if (default_def (SSA_NAME_VAR (var)) == var) SET_BIT (live, x); } if ((flags & SSANORM_COMBINE_TEMPS) == 0) { delete_tree_live_info (liveinfo); liveinfo = NULL; } /* Assign root variable as partition representative for each live on entry partition. */ EXECUTE_IF_SET_IN_SBITMAP (live, 0, x, { var = root_var (rv, root_var_find (rv, x)); ann = var_ann (var); /* If these aren't already coalesced... */ if (partition_to_var (map, x) != var) { if (ann->out_of_ssa_tag) { /* This root variable has already been assigned to another partition which is not coalesced with this one. */ abort (); } if (dump_file && (dump_flags & TDF_DETAILS)) { print_exprs (dump_file, "Must coalesce ", partition_to_var (map, x), " with the root variable ", var, ".\n"); } change_partition_var (map, var, x); } }); sbitmap_free (live); /* Coalesce partitions live across abnormal edges. */ coalesce_abnormal_edges (map, graph, rv); if (dump_file && (dump_flags & TDF_DETAILS)) dump_var_map (dump_file, map); /* Coalesce partitions. */ if (flags & SSANORM_USE_COALESCE_LIST) coalesce_tpa_members (rv, graph, map, cl, ((dump_flags & TDF_DETAILS) ? dump_file : NULL)); if (flags & SSANORM_COALESCE_PARTITIONS) coalesce_tpa_members (rv, graph, map, NULL, ((dump_flags & TDF_DETAILS) ? dump_file : NULL)); if (cl) delete_coalesce_list (cl); root_var_delete (rv); conflict_graph_delete (graph); return liveinfo; } /* Take the ssa-name var_map MAP, and assign real variables to each partition. */ static void assign_vars (var_map map) { int x, i, num, rep; tree t, var; var_ann_t ann; root_var_p rv; rv = root_var_init (map); if (!rv) return; /* Coalescing may already have forced some partitions to their root variable. Find these and tag them. */ num = num_var_partitions (map); for (x = 0; x < num; x++) { var = partition_to_var (map, x); if (TREE_CODE (var) != SSA_NAME) { /* Coalescing will already have verified that more than one partition doesn't have the same root variable. Simply marked the variable as assigned. */ ann = var_ann (var); ann->out_of_ssa_tag = 1; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "partition %d has variable ", x); print_generic_expr (dump_file, var, TDF_SLIM); fprintf (dump_file, " assigned to it.\n"); } } } num = root_var_num (rv); for (x = 0; x < num; x++) { var = root_var (rv, x); ann = var_ann (var); for (i = root_var_first_partition (rv, x); i != ROOT_VAR_NONE; i = root_var_next_partition (rv, i)) { t = partition_to_var (map, i); if (t == var || TREE_CODE (t) != SSA_NAME) continue; rep = var_to_partition (map, t); if (!ann->out_of_ssa_tag) { if (dump_file && (dump_flags & TDF_DETAILS)) print_exprs (dump_file, "", t, " --> ", var, "\n"); change_partition_var (map, var, rep); continue; } if (dump_file && (dump_flags & TDF_DETAILS)) print_exprs (dump_file, "", t, " not coalesced with ", var, ""); var = create_temp (t); change_partition_var (map, var, rep); ann = var_ann (var); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, " --> New temp: '"); print_generic_expr (dump_file, var, TDF_SLIM); fprintf (dump_file, "'\n"); } } } root_var_delete (rv); } /* Replace use operand P with whatever variable it has been rewritten to based on the partitions in MAP. EXPR is an optional expression vector over SSA versions which is used to replace P with an expression instead of a variable. If the stmt is changed, return true. */ static inline bool replace_use_variable (var_map map, use_operand_p p, tree *expr) { tree new_var; tree var = USE_FROM_PTR (p); /* Check if we are replacing this variable with an expression. */ if (expr) { int version = SSA_NAME_VERSION (var); if (expr[version]) { tree new_expr = TREE_OPERAND (expr[version], 1); SET_USE (p, new_expr); /* Clear the stmt's RHS, or GC might bite us. */ TREE_OPERAND (expr[version], 1) = NULL_TREE; return true; } } new_var = var_to_partition_to_var (map, var); if (new_var) { SET_USE (p, new_var); set_is_used (new_var); return true; } return false; } /* Replace def operand DEF_P with whatever variable it has been rewritten to based on the partitions in MAP. EXPR is an optional expression vector over SSA versions which is used to replace DEF_P with an expression instead of a variable. If the stmt is changed, return true. */ static inline bool replace_def_variable (var_map map, def_operand_p def_p, tree *expr) { tree new_var; tree var = DEF_FROM_PTR (def_p); /* Check if we are replacing this variable with an expression. */ if (expr) { int version = SSA_NAME_VERSION (var); if (expr[version]) { tree new_expr = TREE_OPERAND (expr[version], 1); SET_DEF (def_p, new_expr); /* Clear the stmt's RHS, or GC might bite us. */ TREE_OPERAND (expr[version], 1) = NULL_TREE; return true; } } new_var = var_to_partition_to_var (map, var); if (new_var) { SET_DEF (def_p, new_var); set_is_used (new_var); return true; } return false; } /* Remove any PHI node which is a virtual PHI. */ static void eliminate_virtual_phis (void) { basic_block bb; tree phi, next; FOR_EACH_BB (bb) { for (phi = phi_nodes (bb); phi; phi = next) { next = PHI_CHAIN (phi); if (!is_gimple_reg (SSA_NAME_VAR (PHI_RESULT (phi)))) { #ifdef ENABLE_CHECKING int i; /* There should be no arguments of this PHI which are in the partition list, or we get incorrect results. */ for (i = 0; i < PHI_NUM_ARGS (phi); i++) { tree arg = PHI_ARG_DEF (phi, i); if (TREE_CODE (arg) == SSA_NAME && is_gimple_reg (SSA_NAME_VAR (arg))) { fprintf (stderr, "Argument of PHI is not virtual ("); print_generic_expr (stderr, arg, TDF_SLIM); fprintf (stderr, "), but the result is :"); print_generic_stmt (stderr, phi, TDF_SLIM); abort(); } } #endif remove_phi_node (phi, NULL_TREE, bb); } } } } /* This routine will coalesce variables in MAP of the same type which do not interfere with each other. LIVEINFO is the live range info for variables of interest. This will both reduce the memory footprint of the stack, and allow us to coalesce together local copies of globals and scalarized component refs. */ static void coalesce_vars (var_map map, tree_live_info_p liveinfo) { basic_block bb; type_var_p tv; tree var; int x, p, p2; coalesce_list_p cl; conflict_graph graph; cl = create_coalesce_list (map); /* Merge all the live on entry vectors for coalesced partitions. */ for (x = 0; x < num_var_partitions (map); x++) { var = partition_to_var (map, x); p = var_to_partition (map, var); if (p != x) live_merge_and_clear (liveinfo, p, x); } /* When PHI nodes are turned into copies, the result of each PHI node becomes live on entry to the block. Mark these now. */ FOR_EACH_BB (bb) { tree phi, arg; int p; for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi)) { p = var_to_partition (map, PHI_RESULT (phi)); /* Skip virtual PHI nodes. */ if (p == NO_PARTITION) continue; make_live_on_entry (liveinfo, bb, p); /* Each argument is a potential copy operation. Add any arguments which are not coalesced to the result to the coalesce list. */ for (x = 0; x < PHI_NUM_ARGS (phi); x++) { arg = PHI_ARG_DEF (phi, x); if (!phi_ssa_name_p (arg)) continue; p2 = var_to_partition (map, arg); if (p2 == NO_PARTITION) continue; if (p != p2) add_coalesce (cl, p, p2, 1); } } } /* Re-calculate live on exit info. */ calculate_live_on_exit (liveinfo); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Live range info for variable memory coalescing.\n"); dump_live_info (dump_file, liveinfo, LIVEDUMP_ALL); fprintf (dump_file, "Coalesce list from phi nodes:\n"); dump_coalesce_list (dump_file, cl); } tv = type_var_init (map); if (dump_file) type_var_dump (dump_file, tv); type_var_compact (tv); if (dump_file) type_var_dump (dump_file, tv); graph = build_tree_conflict_graph (liveinfo, tv, cl); type_var_decompact (tv); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "type var list now looks like:n"); type_var_dump (dump_file, tv); fprintf (dump_file, "Coalesce list after conflict graph build:\n"); dump_coalesce_list (dump_file, cl); } sort_coalesce_list (cl); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Coalesce list after sorting:\n"); dump_coalesce_list (dump_file, cl); } coalesce_tpa_members (tv, graph, map, cl, ((dump_flags & TDF_DETAILS) ? dump_file : NULL)); type_var_delete (tv); delete_coalesce_list (cl); } /* Temporary Expression Replacement (TER) Replace SSA version variables during out-of-ssa with their defining expression if there is only one use of the variable. A pass is made through the function, one block at a time. No cross block information is tracked. Variables which only have one use, and whose defining stmt is considered a replaceable expression (see check_replaceable) are entered into consideration by adding a list of dependent partitions to the version_info vector for that ssa_name_version. This information comes from the partition mapping for each USE. At the same time, the partition_dep_list vector for these partitions have this version number entered into their lists. When the use of a replaceable ssa_variable is encountered, the dependence list in version_info[] is moved to the "pending_dependence" list in case the current expression is also replaceable. (To be determined later in processing this stmt.) version_info[] for the version is then updated to point to the defining stmt and the 'replaceable' bit is set. Any partition which is defined by a statement 'kills' any expression which is dependent on this partition. Every ssa version in the partitions' dependence list is removed from future consideration. All virtual references are lumped together. Any expression which is dependent on any virtual variable (via a VUSE) has a dependence added to the special partition defined by VIRTUAL_PARTITION. Whenever a V_MAY_DEF is seen, all expressions dependent this VIRTUAL_PARTITION are removed from consideration. At the end of a basic block, all expression are removed from consideration in preparation for the next block. The end result is a vector over SSA_NAME_VERSION which is passed back to rewrite_out_of_ssa. As the SSA variables are being rewritten, instead of replacing the SSA_NAME tree element with the partition it was assigned, it is replaced with the RHS of the defining expression. */ /* Dependency list element. This can contain either a partition index or a version number, depending on which list it is in. */ typedef struct value_expr_d { int value; struct value_expr_d *next; } *value_expr_p; /* Temporary Expression Replacement (TER) table information. */ typedef struct temp_expr_table_d { var_map map; void **version_info; value_expr_p *partition_dep_list; bitmap replaceable; bool saw_replaceable; int virtual_partition; bitmap partition_in_use; value_expr_p free_list; value_expr_p pending_dependence; } *temp_expr_table_p; /* Used to indicate a dependency on V_MAY_DEFs. */ #define VIRTUAL_PARTITION(table) (table->virtual_partition) static temp_expr_table_p new_temp_expr_table (var_map); static tree *free_temp_expr_table (temp_expr_table_p); static inline value_expr_p new_value_expr (temp_expr_table_p); static inline void free_value_expr (temp_expr_table_p, value_expr_p); static inline value_expr_p find_value_in_list (value_expr_p, int, value_expr_p *); static inline void add_value_to_list (temp_expr_table_p, value_expr_p *, int); static inline void add_info_to_list (temp_expr_table_p, value_expr_p *, value_expr_p); static value_expr_p remove_value_from_list (value_expr_p *, int); static void add_dependance (temp_expr_table_p, int, tree); static bool check_replaceable (temp_expr_table_p, tree); static void finish_expr (temp_expr_table_p, int, bool); static void mark_replaceable (temp_expr_table_p, tree); static inline void kill_expr (temp_expr_table_p, int, bool); static inline void kill_virtual_exprs (temp_expr_table_p, bool); static void find_replaceable_in_bb (temp_expr_table_p, basic_block); static tree *find_replaceable_exprs (var_map); static void dump_replaceable_exprs (FILE *, tree *); /* Create a new TER table for MAP. */ static temp_expr_table_p new_temp_expr_table (var_map map) { temp_expr_table_p t; t = (temp_expr_table_p) xmalloc (sizeof (struct temp_expr_table_d)); t->map = map; t->version_info = xcalloc (num_ssa_names + 1, sizeof (void *)); t->partition_dep_list = xcalloc (num_var_partitions (map) + 1, sizeof (value_expr_p)); t->replaceable = BITMAP_XMALLOC (); t->partition_in_use = BITMAP_XMALLOC (); t->saw_replaceable = false; t->virtual_partition = num_var_partitions (map); t->free_list = NULL; t->pending_dependence = NULL; return t; } /* Free TER table T. If there are valid replacements, return the expression vector. */ static tree * free_temp_expr_table (temp_expr_table_p t) { value_expr_p p; tree *ret = NULL; #ifdef ENABLE_CHECKING int x; for (x = 0; x <= num_var_partitions (t->map); x++) if (t->partition_dep_list[x] != NULL) abort(); #endif while ((p = t->free_list)) { t->free_list = p->next; free (p); } BITMAP_XFREE (t->partition_in_use); BITMAP_XFREE (t->replaceable); free (t->partition_dep_list); if (t->saw_replaceable) ret = (tree *)t->version_info; else free (t->version_info); free (t); return ret; } /* Allocate a new value list node. Take it from the free list in TABLE if possible. */ static inline value_expr_p new_value_expr (temp_expr_table_p table) { value_expr_p p; if (table->free_list) { p = table->free_list; table->free_list = p->next; } else p = (value_expr_p) xmalloc (sizeof (struct value_expr_d)); return p; } /* Add value list node P to the free list in TABLE. */ static inline void free_value_expr (temp_expr_table_p table, value_expr_p p) { p->next = table->free_list; table->free_list = p; } /* Find VALUE if its in LIST. Return a pointer to the list object if found, else return NULL. If LAST_PTR is provided, it will point to the previous item upon return, or NULL if this is the first item in the list. */ static inline value_expr_p find_value_in_list (value_expr_p list, int value, value_expr_p *last_ptr) { value_expr_p curr; value_expr_p last = NULL; for (curr = list; curr; last = curr, curr = curr->next) { if (curr->value == value) break; } if (last_ptr) *last_ptr = last; return curr; } /* Add VALUE to LIST, if it isn't already present. TAB is the expression table */ static inline void add_value_to_list (temp_expr_table_p tab, value_expr_p *list, int value) { value_expr_p info; if (!find_value_in_list (*list, value, NULL)) { info = new_value_expr (tab); info->value = value; info->next = *list; *list = info; } } /* Add value node INFO if it's value isn't already in LIST. Free INFO if it is already in the list. TAB is the expression table. */ static inline void add_info_to_list (temp_expr_table_p tab, value_expr_p *list, value_expr_p info) { if (find_value_in_list (*list, info->value, NULL)) free_value_expr (tab, info); else { info->next = *list; *list = info; } } /* Look for VALUE in LIST. If found, remove it from the list and return it's pointer. */ static value_expr_p remove_value_from_list (value_expr_p *list, int value) { value_expr_p info, last; info = find_value_in_list (*list, value, &last); if (!info) return NULL; if (!last) *list = info->next; else last->next = info->next; return info; } /* Add a dependency between the def of ssa VERSION and VAR. If VAR is replaceable by an expression, add a dependence each of the elements of the expression. These are contained in the pending list. TAB is the expression table. */ static void add_dependance (temp_expr_table_p tab, int version, tree var) { int i, x; value_expr_p info; i = SSA_NAME_VERSION (var); if (bitmap_bit_p (tab->replaceable, i)) { /* This variable is being substituted, so use whatever dependences were queued up when we marked this as replaceable earlier. */ while ((info = tab->pending_dependence)) { tab->pending_dependence = info->next; /* Get the partition this variable was dependent on. Reuse this object to represent the current expression instead. */ x = info->value; info->value = version; add_info_to_list (tab, &(tab->partition_dep_list[x]), info); add_value_to_list (tab, (value_expr_p *)&(tab->version_info[version]), x); bitmap_set_bit (tab->partition_in_use, x); } } else { i = var_to_partition (tab->map, var); #ifdef ENABLE_CHECKING if (i== NO_PARTITION) abort (); #endif add_value_to_list (tab, &(tab->partition_dep_list[i]), version); add_value_to_list (tab, (value_expr_p *)&(tab->version_info[version]), i); bitmap_set_bit (tab->partition_in_use, i); } } /* Check if expression STMT is suitable for replacement in table TAB. If so, create an expression entry. Return true if this stmt is replaceable. */ static bool check_replaceable (temp_expr_table_p tab, tree stmt) { stmt_ann_t ann; vuse_optype vuseops; def_optype defs; use_optype uses; tree var, def; int num_use_ops, version, i; var_map map = tab->map; if (TREE_CODE (stmt) != MODIFY_EXPR) return false; ann = stmt_ann (stmt); defs = DEF_OPS (ann); /* Punt if there is more than 1 def, or more than 1 use. */ if (NUM_DEFS (defs) != 1) return false; def = DEF_OP (defs, 0); if (version_ref_count (map, def) != 1) return false; /* Assignments to variables assigned to hard registers are not replaceable. */ if (DECL_HARD_REGISTER (SSA_NAME_VAR (def))) return false; /* There must be no V_MAY_DEFS. */ if (NUM_V_MAY_DEFS (V_MAY_DEF_OPS (ann)) != 0) return false; /* There must be no V_MUST_DEFS. */ if (NUM_V_MUST_DEFS (V_MUST_DEF_OPS (ann)) != 0) return false; /* Float expressions must go through memory if float-store is on. */ if (flag_float_store && FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (stmt, 1)))) return false; uses = USE_OPS (ann); num_use_ops = NUM_USES (uses); vuseops = VUSE_OPS (ann); /* Any expression which has no virtual operands and no real operands should have been propagated if it's possible to do anything with them. If this happens here, it probably exists that way for a reason, so we won't touch it. An example is: b_4 = &tab There are no virtual uses nor any real uses, so we just leave this alone to be safe. */ if (num_use_ops == 0 && NUM_VUSES (vuseops) == 0) return false; version = SSA_NAME_VERSION (def); /* Add this expression to the dependency list for each use partition. */ for (i = 0; i < num_use_ops; i++) { var = USE_OP (uses, i); add_dependance (tab, version, var); } /* If there are VUSES, add a dependence on virtual defs. */ if (NUM_VUSES (vuseops) != 0) { add_value_to_list (tab, (value_expr_p *)&(tab->version_info[version]), VIRTUAL_PARTITION (tab)); add_value_to_list (tab, &(tab->partition_dep_list[VIRTUAL_PARTITION (tab)]), version); bitmap_set_bit (tab->partition_in_use, VIRTUAL_PARTITION (tab)); } return true; } /* This function will remove the expression for VERSION from replacement consideration.n table TAB If 'replace' is true, it is marked as replaceable, otherwise not. */ static void finish_expr (temp_expr_table_p tab, int version, bool replace) { value_expr_p info, tmp; int partition; /* Remove this expression from its dependent lists. The partition dependence list is retained and transfered later to whomever uses this version. */ for (info = (value_expr_p) tab->version_info[version]; info; info = tmp) { partition = info->value; #ifdef ENABLE_CHECKING if (tab->partition_dep_list[partition] == NULL) abort (); #endif tmp = remove_value_from_list (&(tab->partition_dep_list[partition]), version); #ifdef ENABLE_CHECKING if (!tmp) abort (); #endif free_value_expr (tab, tmp); /* Only clear the bit when the dependency list is emptied via a replacement. Otherwise kill_expr will take care of it. */ if (!(tab->partition_dep_list[partition]) && replace) bitmap_clear_bit (tab->partition_in_use, partition); tmp = info->next; if (!replace) free_value_expr (tab, info); } if (replace) { tab->saw_replaceable = true; bitmap_set_bit (tab->replaceable, version); } else { #ifdef ENABLE_CHECKING if (bitmap_bit_p (tab->replaceable, version)) abort (); #endif tab->version_info[version] = NULL; } } /* Mark the expression associated with VAR as replaceable, and enter the defining stmt into the version_info table TAB. */ static void mark_replaceable (temp_expr_table_p tab, tree var) { value_expr_p info; int version = SSA_NAME_VERSION (var); finish_expr (tab, version, true); /* Move the dependence list to the pending list. */ if (tab->version_info[version]) { info = (value_expr_p) tab->version_info[version]; for ( ; info->next; info = info->next) continue; info->next = tab->pending_dependence; tab->pending_dependence = (value_expr_p)tab->version_info[version]; } tab->version_info[version] = SSA_NAME_DEF_STMT (var); } /* This function marks any expression in TAB which is dependent on PARTITION as NOT replaceable. CLEAR_BIT is used to determine whether partition_in_use should have its bit cleared. Since this routine can be called within an EXECUTE_IF_SET_IN_BITMAP, the bit can't always be cleared. */ static inline void kill_expr (temp_expr_table_p tab, int partition, bool clear_bit) { value_expr_p ptr; /* Mark every active expr dependent on this var as not replaceable. */ while ((ptr = tab->partition_dep_list[partition]) != NULL) finish_expr (tab, ptr->value, false); if (clear_bit) bitmap_clear_bit (tab->partition_in_use, partition); } /* This function kills all expressions in TAB which are dependent on virtual DEFs. CLEAR_BIT determines whether partition_in_use gets cleared. */ static inline void kill_virtual_exprs (temp_expr_table_p tab, bool clear_bit) { kill_expr (tab, VIRTUAL_PARTITION (tab), clear_bit); } /* This function processes basic block BB, and looks for variables which can be replaced by their expressions. Results are stored in TAB. */ static void find_replaceable_in_bb (temp_expr_table_p tab, basic_block bb) { block_stmt_iterator bsi; tree stmt, def; stmt_ann_t ann; int partition, num, i; use_optype uses; def_optype defs; var_map map = tab->map; value_expr_p p; for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) { stmt = bsi_stmt (bsi); ann = stmt_ann (stmt); /* Determine if this stmt finishes an existing expression. */ uses = USE_OPS (ann); num = NUM_USES (uses); for (i = 0; i < num; i++) { def = USE_OP (uses, i); if (tab->version_info[SSA_NAME_VERSION (def)]) { /* Mark expression as replaceable unless stmt is volatile. */ if (!ann->has_volatile_ops) mark_replaceable (tab, def); else finish_expr (tab, SSA_NAME_VERSION (def), false); } } /* Next, see if this stmt kills off an active expression. */ defs = DEF_OPS (ann); num = NUM_DEFS (defs); for (i = 0; i < num; i++) { def = DEF_OP (defs, i); partition = var_to_partition (map, def); if (partition != NO_PARTITION && tab->partition_dep_list[partition]) kill_expr (tab, partition, true); } /* Now see if we are creating a new expression or not. */ if (!ann->has_volatile_ops) check_replaceable (tab, stmt); /* Free any unused dependency lists. */ while ((p = tab->pending_dependence)) { tab->pending_dependence = p->next; free_value_expr (tab, p); } /* A V_MAY_DEF kills any expression using a virtual operand. */ if (NUM_V_MAY_DEFS (V_MAY_DEF_OPS (ann)) > 0) kill_virtual_exprs (tab, true); /* A V_MUST_DEF kills any expression using a virtual operand. */ if (NUM_V_MUST_DEFS (V_MUST_DEF_OPS (ann)) > 0) kill_virtual_exprs (tab, true); } } /* This function is the driver routine for replacement of temporary expressions in the SSA->normal phase, operating on MAP. If there are replaceable expressions, a table is returned which maps SSA versions to the expressions they should be replaced with. A NULL_TREE indicates no replacement should take place. If there are no replacements at all, NULL is returned by the function, otherwise an expression vector indexed by SSA_NAME version numbers. */ static tree * find_replaceable_exprs (var_map map) { basic_block bb; int i; temp_expr_table_p table; tree *ret; table = new_temp_expr_table (map); FOR_EACH_BB (bb) { find_replaceable_in_bb (table, bb); EXECUTE_IF_SET_IN_BITMAP ((table->partition_in_use), 0, i, { kill_expr (table, i, false); }); } ret = free_temp_expr_table (table); return ret; } /* Dump TER expression table EXPR to file F. */ static void dump_replaceable_exprs (FILE *f, tree *expr) { tree stmt, var; int x; fprintf (f, "\nReplacing Expressions\n"); for (x = 0; x < (int)num_ssa_names + 1; x++) if (expr[x]) { stmt = expr[x]; var = DEF_OP (STMT_DEF_OPS (stmt), 0); print_generic_expr (f, var, TDF_SLIM); fprintf (f, " replace with --> "); print_generic_expr (f, TREE_OPERAND (stmt, 1), TDF_SLIM); fprintf (f, "\n"); } fprintf (f, "\n"); } /* Helper function for discover_nonconstant_array_refs. Look for ARRAY_REF nodes with non-constant indexes and mark them addressable. */ static tree discover_nonconstant_array_refs_r (tree * tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED) { tree t = *tp; if (TYPE_P (t) || DECL_P (t)) *walk_subtrees = 0; else if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF) { while (((TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF) && is_gimple_min_invariant (TREE_OPERAND (t, 1)) && (!TREE_OPERAND (t, 2) || is_gimple_min_invariant (TREE_OPERAND (t, 2)))) || (TREE_CODE (t) == COMPONENT_REF && (!TREE_OPERAND (t,2) || is_gimple_min_invariant (TREE_OPERAND (t, 2)))) || TREE_CODE (t) == BIT_FIELD_REF || TREE_CODE (t) == REALPART_EXPR || TREE_CODE (t) == IMAGPART_EXPR || TREE_CODE (t) == VIEW_CONVERT_EXPR || TREE_CODE (t) == NOP_EXPR || TREE_CODE (t) == CONVERT_EXPR) t = TREE_OPERAND (t, 0); if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF) { t = get_base_address (t); if (t && DECL_P (t)) TREE_ADDRESSABLE (t) = 1; } *walk_subtrees = 0; } return NULL_TREE; } /* RTL expansion is not able to compile array references with variable offsets for arrays stored in single register. Discover such expressions and mark variables as addressable to avoid this scenario. */ static void discover_nonconstant_array_refs (void) { basic_block bb; block_stmt_iterator bsi; FOR_EACH_BB (bb) { for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) walk_tree (bsi_stmt_ptr (bsi), discover_nonconstant_array_refs_r, NULL , NULL); } } /* This function will rewrite the current program using the variable mapping found in MAP. If the replacement vector VALUES is provided, any occurrences of partitions with non-null entries in the vector will be replaced with the expression in the vector instead of its mapped variable. */ static void rewrite_trees (var_map map, tree *values) { elim_graph g; basic_block bb; block_stmt_iterator si; edge e; tree phi; bool changed; #ifdef ENABLE_CHECKING /* Search for PHIs where the destination has no partition, but one or more arguments has a partition. This should not happen and can create incorrect code. */ FOR_EACH_BB (bb) { tree phi; for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi)) { tree T0 = var_to_partition_to_var (map, PHI_RESULT (phi)); if (T0 == NULL_TREE) { int i; for (i = 0; i < PHI_NUM_ARGS (phi); i++) { tree arg = PHI_ARG_DEF (phi, i); if (TREE_CODE (arg) == SSA_NAME && var_to_partition (map, arg) != NO_PARTITION) { fprintf (stderr, "Argument of PHI is in a partition :("); print_generic_expr (stderr, arg, TDF_SLIM); fprintf (stderr, "), but the result is not :"); print_generic_stmt (stderr, phi, TDF_SLIM); abort(); } } } } } #endif /* Replace PHI nodes with any required copies. */ g = new_elim_graph (map->num_partitions); g->map = map; FOR_EACH_BB (bb) { for (si = bsi_start (bb); !bsi_end_p (si); ) { size_t i, num_uses, num_defs; use_optype uses; def_optype defs; tree stmt = bsi_stmt (si); use_operand_p use_p; int remove = 0, is_copy = 0; stmt_ann_t ann; get_stmt_operands (stmt); ann = stmt_ann (stmt); changed = false; if (TREE_CODE (stmt) == MODIFY_EXPR && (TREE_CODE (TREE_OPERAND (stmt, 1)) == SSA_NAME)) is_copy = 1; uses = USE_OPS (ann); num_uses = NUM_USES (uses); for (i = 0; i < num_uses; i++) { use_p = USE_OP_PTR (uses, i); if (replace_use_variable (map, use_p, values)) changed = true; } defs = DEF_OPS (ann); num_defs = NUM_DEFS (defs); /* Mark this stmt for removal if it is the list of replaceable expressions. */ if (values && num_defs == 1) { tree def = DEF_OP (defs, 0); tree val; val = values[SSA_NAME_VERSION (def)]; if (val) remove = 1; } if (!remove) { for (i = 0; i < num_defs; i++) { def_operand_p def_p = DEF_OP_PTR (defs, i); if (replace_def_variable (map, def_p, NULL)) changed = true; /* If both SSA_NAMEs coalesce to the same variable, mark the now redundant copy for removal. */ if (is_copy && num_uses == 1 && (DEF_FROM_PTR (def_p) == USE_OP (uses, 0))) remove = 1; } if (changed) modify_stmt (stmt); } /* Remove any stmts marked for removal. */ if (remove) bsi_remove (&si); else bsi_next (&si); } phi = phi_nodes (bb); if (phi) { for (e = bb->pred; e; e = e->pred_next) eliminate_phi (e, phi_arg_from_edge (phi, e), g); } } delete_elim_graph (g); /* If any copies were inserted on edges, actually insert them now. */ bsi_commit_edge_inserts (NULL); } /* Remove the variables specified in MAP from SSA form. Any debug information is sent to DUMP. FLAGS indicate what options should be used. */ void remove_ssa_form (FILE *dump, var_map map, int flags) { tree_live_info_p liveinfo; basic_block bb; tree phi, next; FILE *save; tree *values = NULL; save = dump_file; dump_file = dump; /* If we are not combining temps, don't calculate live ranges for variables with only one SSA version. */ if ((flags & SSANORM_COMBINE_TEMPS) == 0) compact_var_map (map, VARMAP_NO_SINGLE_DEFS); else compact_var_map (map, VARMAP_NORMAL); if (dump_file && (dump_flags & TDF_DETAILS)) dump_var_map (dump_file, map); liveinfo = coalesce_ssa_name (map, flags); /* Make sure even single occurrence variables are in the list now. */ if ((flags & SSANORM_COMBINE_TEMPS) == 0) compact_var_map (map, VARMAP_NORMAL); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "After Coalescing:\n"); dump_var_map (dump_file, map); } if (flags & SSANORM_PERFORM_TER) { values = find_replaceable_exprs (map); if (values && dump_file && (dump_flags & TDF_DETAILS)) dump_replaceable_exprs (dump_file, values); } /* Assign real variables to the partitions now. */ assign_vars (map); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "After Root variable replacement:\n"); dump_var_map (dump_file, map); } if ((flags & SSANORM_COMBINE_TEMPS) && liveinfo) { coalesce_vars (map, liveinfo); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "After variable memory coalescing:\n"); dump_var_map (dump_file, map); } } if (liveinfo) delete_tree_live_info (liveinfo); rewrite_trees (map, values); if (values) free (values); /* Remove phi nodes which have been translated back to real variables. */ FOR_EACH_BB (bb) { for (phi = phi_nodes (bb); phi; phi = next) { next = PHI_CHAIN (phi); if ((flags & SSANORM_REMOVE_ALL_PHIS) || var_to_partition (map, PHI_RESULT (phi)) != NO_PARTITION) remove_phi_node (phi, NULL_TREE, bb); } } dump_file = save; } /* Take a subset of the variables VARS in the current function out of SSA form. */ void rewrite_vars_out_of_ssa (bitmap vars) { if (bitmap_first_set_bit (vars) >= 0) { var_map map; basic_block bb; tree phi; int i; int ssa_flags; /* Search for PHIs in which one of the PHI arguments is marked for translation out of SSA form, but for which the PHI result is not marked for translation out of SSA form. Our per-variable out of SSA translation can not handle that case; however we can easily handle it here by creating a new instance of the PHI result's underlying variable and initializing it to the offending PHI argument on the edge associated with the PHI argument. We then change the PHI argument to use our new instead of the PHI's underlying variable. You might think we could register partitions for the out-of-ssa translation here and avoid a second walk of the PHI nodes. No such luck since the size of the var map will change if we have to manually take variables out of SSA form here. */ FOR_EACH_BB (bb) { for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi)) { tree result = SSA_NAME_VAR (PHI_RESULT (phi)); /* If the definition is marked for renaming, then we need to do nothing more for this PHI node. */ if (bitmap_bit_p (vars, var_ann (result)->uid)) continue; /* Look at all the arguments and see if any of them are marked for renaming. If so, we need to handle them specially. */ for (i = 0; i < PHI_NUM_ARGS (phi); i++) { tree arg = PHI_ARG_DEF (phi, i); /* If the argument is not an SSA_NAME, then we can ignore this argument. */ if (TREE_CODE (arg) != SSA_NAME) continue; /* If this argument is marked for renaming, then we need to undo the copy propagation so that we can take the argument out of SSA form without taking the result out of SSA form. */ arg = SSA_NAME_VAR (arg); if (bitmap_bit_p (vars, var_ann (arg)->uid)) { tree new_name, copy; /* Get a new SSA_NAME for the copy, it is based on the result, not the argument! We use the PHI as the definition since we haven't created the definition statement yet. */ new_name = make_ssa_name (result, phi); /* Now create the copy statement. */ copy = build (MODIFY_EXPR, TREE_TYPE (arg), new_name, PHI_ARG_DEF (phi, i)); /* Now update SSA_NAME_DEF_STMT to point to the newly created statement. */ SSA_NAME_DEF_STMT (new_name) = copy; /* Now make the argument reference our new SSA_NAME. */ SET_PHI_ARG_DEF (phi, i, new_name); /* Queue the statement for insertion. */ bsi_insert_on_edge (PHI_ARG_EDGE (phi, i), copy); modify_stmt (copy); } } } } /* If any copies were inserted on edges, actually insert them now. */ bsi_commit_edge_inserts (NULL); /* Now register partitions for all instances of the variables we are taking out of SSA form. */ map = init_var_map (num_ssa_names + 1); register_ssa_partitions_for_vars (vars, map); /* Now that we have all the partitions registered, translate the appropriate variables out of SSA form. */ ssa_flags = SSANORM_COALESCE_PARTITIONS; if (flag_tree_combine_temps) ssa_flags |= SSANORM_COMBINE_TEMPS; remove_ssa_form (dump_file, map, ssa_flags); /* And finally, reset the out_of_ssa flag for each of the vars we just took out of SSA form. */ EXECUTE_IF_SET_IN_BITMAP (vars, 0, i, { var_ann (referenced_var (i))->out_of_ssa_tag = 0; }); /* Free the map as we are done with it. */ delete_var_map (map); } } /* Take the current function out of SSA form, as described in R. Morgan, ``Building an Optimizing Compiler'', Butterworth-Heinemann, Boston, MA, 1998. pp 176-186. */ static void rewrite_out_of_ssa (void) { var_map map; int var_flags = 0; int ssa_flags = (SSANORM_REMOVE_ALL_PHIS | SSANORM_USE_COALESCE_LIST); if (!flag_tree_live_range_split) ssa_flags |= SSANORM_COALESCE_PARTITIONS; eliminate_virtual_phis (); if (dump_file && (dump_flags & TDF_DETAILS)) dump_tree_cfg (dump_file, dump_flags & ~TDF_DETAILS); /* We cannot allow unssa to un-gimplify trees before we instrument them. */ if (flag_tree_ter && !flag_mudflap) var_flags = SSA_VAR_MAP_REF_COUNT; map = create_ssa_var_map (var_flags); if (flag_tree_combine_temps) ssa_flags |= SSANORM_COMBINE_TEMPS; if (flag_tree_ter && !flag_mudflap) ssa_flags |= SSANORM_PERFORM_TER; remove_ssa_form (dump_file, map, ssa_flags); if (dump_file && (dump_flags & TDF_DETAILS)) dump_tree_cfg (dump_file, dump_flags & ~TDF_DETAILS); /* Do some cleanups which reduce the amount of data the tree->rtl expanders deal with. */ cfg_remove_useless_stmts (); /* Flush out flow graph and SSA data. */ delete_var_map (map); /* Mark arrays indexed with non-constant indices with TREE_ADDRESSABLE. */ discover_nonconstant_array_refs (); } /* Define the parameters of the out of SSA pass. */ struct tree_opt_pass pass_del_ssa = { "optimized", /* name */ NULL, /* gate */ rewrite_out_of_ssa, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ TV_TREE_SSA_TO_NORMAL, /* tv_id */ PROP_cfg | PROP_ssa, /* properties_required */ 0, /* properties_provided */ /* ??? If TER is enabled, we also kill gimple. */ PROP_ssa, /* properties_destroyed */ TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts, /* todo_flags_start */ TODO_dump_func | TODO_ggc_collect /* todo_flags_finish */ }; /* Tree based points-to analysis Copyright (C) 2002, 2003 Free Software Foundation, Inc. Contributed by Daniel Berlin This file is part of GCC. GCC is free software; you can redistribute it and/or modify under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* If we have andersen's points-to analysis, include it. */ #ifdef HAVE_BANSHEE /* Tree based Andersen points-to analysis Copyright (C) 2002, 2003 Free Software Foundation, Inc. Contributed by Daniel Berlin This file is part of GCC. GCC is free software; you can redistribute it and/or modify under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef TREE_ALIAS_ANDER #define TREE_ALIAS_ANDER extern struct tree_alias_ops *andersen_alias_ops; #endif /* TREE_ALIAS_ANDER */ #endif /* Definitions for c-common.c. Copyright (C) 1987, 1993, 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_C_COMMON_H #define GCC_C_COMMON_H /* Usage of TREE_LANG_FLAG_?: 0: TREE_NEGATED_INT (in INTEGER_CST). IDENTIFIER_MARKED (used by search routines). DECL_PRETTY_FUNCTION_P (in VAR_DECL) STMT_EXPR_NO_SCOPE (in STMT_EXPR) 1: C_DECLARED_LABEL_FLAG (in LABEL_DECL) STMT_IS_FULL_EXPR_P (in _STMT) STATEMENT_LIST_STMT_EXPR (in STATEMENT_LIST) 2: unused 3: unused 4: unused */ /* Reserved identifiers. This is the union of all the keywords for C, C++, and Objective-C. All the type modifiers have to be in one block at the beginning, because they are used as mask bits. There are 27 type modifiers; if we add many more we will have to redesign the mask mechanism. */ enum rid { /* Modifiers: */ /* C, in empirical order of frequency. */ RID_STATIC = 0, RID_UNSIGNED, RID_LONG, RID_CONST, RID_EXTERN, RID_REGISTER, RID_TYPEDEF, RID_SHORT, RID_INLINE, RID_VOLATILE, RID_SIGNED, RID_AUTO, RID_RESTRICT, /* C extensions */ RID_COMPLEX, RID_THREAD, /* C++ */ RID_FRIEND, RID_VIRTUAL, RID_EXPLICIT, RID_EXPORT, RID_MUTABLE, /* ObjC */ RID_IN, RID_OUT, RID_INOUT, RID_BYCOPY, RID_BYREF, RID_ONEWAY, /* C */ RID_INT, RID_CHAR, RID_FLOAT, RID_DOUBLE, RID_VOID, RID_ENUM, RID_STRUCT, RID_UNION, RID_IF, RID_ELSE, RID_WHILE, RID_DO, RID_FOR, RID_SWITCH, RID_CASE, RID_DEFAULT, RID_BREAK, RID_CONTINUE, RID_RETURN, RID_GOTO, RID_SIZEOF, /* C extensions */ RID_ASM, RID_TYPEOF, RID_ALIGNOF, RID_ATTRIBUTE, RID_VA_ARG, RID_EXTENSION, RID_IMAGPART, RID_REALPART, RID_LABEL, RID_PTRBASE, RID_PTREXTENT, RID_PTRVALUE, RID_CHOOSE_EXPR, RID_TYPES_COMPATIBLE_P, /* Too many ways of getting the name of a function as a string */ RID_FUNCTION_NAME, RID_PRETTY_FUNCTION_NAME, RID_C99_FUNCTION_NAME, /* C++ */ RID_BOOL, RID_WCHAR, RID_CLASS, RID_PUBLIC, RID_PRIVATE, RID_PROTECTED, RID_TEMPLATE, RID_NULL, RID_CATCH, RID_DELETE, RID_FALSE, RID_NAMESPACE, RID_NEW, RID_OFFSETOF, RID_OPERATOR, RID_THIS, RID_THROW, RID_TRUE, RID_TRY, RID_TYPENAME, RID_TYPEID, RID_USING, /* casts */ RID_CONSTCAST, RID_DYNCAST, RID_REINTCAST, RID_STATCAST, /* Objective-C */ RID_ID, RID_AT_ENCODE, RID_AT_END, RID_AT_CLASS, RID_AT_ALIAS, RID_AT_DEFS, RID_AT_PRIVATE, RID_AT_PROTECTED, RID_AT_PUBLIC, RID_AT_PROTOCOL, RID_AT_SELECTOR, RID_AT_THROW, RID_AT_TRY, RID_AT_CATCH, RID_AT_FINALLY, RID_AT_SYNCHRONIZED, RID_AT_INTERFACE, RID_AT_IMPLEMENTATION, RID_MAX, RID_FIRST_MODIFIER = RID_STATIC, RID_LAST_MODIFIER = RID_ONEWAY, RID_FIRST_AT = RID_AT_ENCODE, RID_LAST_AT = RID_AT_IMPLEMENTATION, RID_FIRST_PQ = RID_IN, RID_LAST_PQ = RID_ONEWAY }; #define OBJC_IS_AT_KEYWORD(rid) \ ((unsigned int)(rid) >= (unsigned int)RID_FIRST_AT && \ (unsigned int)(rid) <= (unsigned int)RID_LAST_AT) #define OBJC_IS_PQ_KEYWORD(rid) \ ((unsigned int)(rid) >= (unsigned int)RID_FIRST_PQ && \ (unsigned int)(rid) <= (unsigned int)RID_LAST_PQ) /* The elements of `ridpointers' are identifier nodes for the reserved type names and storage classes. It is indexed by a RID_... value. */ extern GTY ((length ("(int)RID_MAX"))) tree *ridpointers; /* Standard named or nameless data types of the C compiler. */ enum c_tree_index { CTI_WCHAR_TYPE, CTI_SIGNED_WCHAR_TYPE, CTI_UNSIGNED_WCHAR_TYPE, CTI_WINT_TYPE, CTI_SIGNED_SIZE_TYPE, /* For format checking only. */ CTI_UNSIGNED_PTRDIFF_TYPE, /* For format checking only. */ CTI_INTMAX_TYPE, CTI_UINTMAX_TYPE, CTI_WIDEST_INT_LIT_TYPE, CTI_WIDEST_UINT_LIT_TYPE, CTI_CHAR_ARRAY_TYPE, CTI_WCHAR_ARRAY_TYPE, CTI_INT_ARRAY_TYPE, CTI_STRING_TYPE, CTI_CONST_STRING_TYPE, /* Type for boolean expressions (bool in C++, int in C). */ CTI_TRUTHVALUE_TYPE, CTI_TRUTHVALUE_TRUE, CTI_TRUTHVALUE_FALSE, CTI_DEFAULT_FUNCTION_TYPE, /* These are not types, but we have to look them up all the time. */ CTI_FUNCTION_NAME_DECL, CTI_PRETTY_FUNCTION_NAME_DECL, CTI_C99_FUNCTION_NAME_DECL, CTI_SAVED_FUNCTION_NAME_DECLS, CTI_VOID_ZERO, C_TREE_IDX_MAX }; #define C_RID_CODE(id) (((struct c_common_identifier *) (id))->node.rid_code) /* Identifier part common to the C front ends. Inherits from tree_identifier, despite appearances. */ struct c_common_identifier GTY(()) { struct tree_common common; struct cpp_hashnode node; }; #define wchar_type_node c_global_trees[CTI_WCHAR_TYPE] #define signed_wchar_type_node c_global_trees[CTI_SIGNED_WCHAR_TYPE] #define unsigned_wchar_type_node c_global_trees[CTI_UNSIGNED_WCHAR_TYPE] #define wint_type_node c_global_trees[CTI_WINT_TYPE] #define signed_size_type_node c_global_trees[CTI_SIGNED_SIZE_TYPE] #define unsigned_ptrdiff_type_node c_global_trees[CTI_UNSIGNED_PTRDIFF_TYPE] #define intmax_type_node c_global_trees[CTI_INTMAX_TYPE] #define uintmax_type_node c_global_trees[CTI_UINTMAX_TYPE] #define widest_integer_literal_type_node c_global_trees[CTI_WIDEST_INT_LIT_TYPE] #define widest_unsigned_literal_type_node c_global_trees[CTI_WIDEST_UINT_LIT_TYPE] #define truthvalue_type_node c_global_trees[CTI_TRUTHVALUE_TYPE] #define truthvalue_true_node c_global_trees[CTI_TRUTHVALUE_TRUE] #define truthvalue_false_node c_global_trees[CTI_TRUTHVALUE_FALSE] #define char_array_type_node c_global_trees[CTI_CHAR_ARRAY_TYPE] #define wchar_array_type_node c_global_trees[CTI_WCHAR_ARRAY_TYPE] #define int_array_type_node c_global_trees[CTI_INT_ARRAY_TYPE] #define string_type_node c_global_trees[CTI_STRING_TYPE] #define const_string_type_node c_global_trees[CTI_CONST_STRING_TYPE] #define default_function_type c_global_trees[CTI_DEFAULT_FUNCTION_TYPE] #define function_name_decl_node c_global_trees[CTI_FUNCTION_NAME_DECL] #define pretty_function_name_decl_node c_global_trees[CTI_PRETTY_FUNCTION_NAME_DECL] #define c99_function_name_decl_node c_global_trees[CTI_C99_FUNCTION_NAME_DECL] #define saved_function_name_decls c_global_trees[CTI_SAVED_FUNCTION_NAME_DECLS] /* A node for `((void) 0)'. */ #define void_zero_node c_global_trees[CTI_VOID_ZERO] extern GTY(()) tree c_global_trees[C_TREE_IDX_MAX]; /* In a RECORD_TYPE, a sorted array of the fields of the type, not a tree for size reasons. */ struct sorted_fields_type GTY(()) { int len; tree GTY((length ("%h.len"))) elts[1]; }; /* Mark which labels are explicitly declared. These may be shadowed, and may be referenced from nested functions. */ #define C_DECLARED_LABEL_FLAG(label) TREE_LANG_FLAG_1 (label) /* Flag strings given by __FUNCTION__ and __PRETTY_FUNCTION__ for a warning if they undergo concatenation. */ #define C_ARTIFICIAL_STRING_P(NODE) TREE_LANG_FLAG_0 (NODE) typedef enum c_language_kind { clk_c = 0, /* C90, C94 or C99 */ clk_objc = 1, /* clk_c with ObjC features. */ clk_cxx = 2, /* ANSI/ISO C++ */ clk_objcxx = 3 /* clk_cxx with ObjC features. */ } c_language_kind; /* To test for a specific language use c_language, defined by each front end. For "ObjC features" or "not C++" use the macros. */ extern c_language_kind c_language; #define c_dialect_cxx() (c_language & clk_cxx) #define c_dialect_objc() (c_language & clk_objc) /* Information about a statement tree. */ struct stmt_tree_s GTY(()) { /* The current statment list being collected. */ tree x_cur_stmt_list; /* In C++, Nonzero if we should treat statements as full expressions. In particular, this variable is no-zero if at the end of a statement we should destroy any temporaries created during that statement. Similarly, if, at the end of a block, we should destroy any local variables in this block. Normally, this variable is nonzero, since those are the normal semantics of C++. However, in order to represent aggregate initialization code as tree structure, we use statement-expressions. The statements within the statement expression should not result in cleanups being run until the entire enclosing statement is complete. This flag has no effect in C. */ int stmts_are_full_exprs_p; }; typedef struct stmt_tree_s *stmt_tree; /* Global state pertinent to the current function. Some C dialects extend this structure with additional fields. */ struct c_language_function GTY(()) { /* While we are parsing the function, this contains information about the statement-tree that we are building. */ struct stmt_tree_s x_stmt_tree; }; /* When building a statement-tree, this is the current statment list being collected. It's TREE_CHAIN is a back-pointer to the previous statment list. */ #define cur_stmt_list (current_stmt_tree ()->x_cur_stmt_list) /* Language-specific hooks. */ extern void (*lang_expand_function_end) (void); /* Callback that determines if it's ok for a function to have no noreturn attribute. */ extern int (*lang_missing_noreturn_ok_p) (tree); extern void push_file_scope (void); extern void pop_file_scope (void); extern int yyparse (void); extern stmt_tree current_stmt_tree (void); extern tree push_stmt_list (void); extern tree re_push_stmt_list (tree); extern tree pop_stmt_list (tree); extern tree add_stmt (tree); extern void push_cleanup (tree, tree, bool); extern tree walk_stmt_tree (tree *, walk_tree_fn, void *); extern int c_expand_decl (tree); extern int field_decl_cmp (const void *, const void *); extern void resort_sorted_fields (void *, void *, gt_pointer_operator, void *); extern bool has_c_linkage (tree decl); /* Switches common to the C front ends. */ /* Nonzero if prepreprocessing only. */ extern int flag_preprocess_only; /* Zero means that faster, ...NonNil variants of objc_msgSend... calls will be used in ObjC; passing nil receivers to such calls will most likely result in crashes. */ extern int flag_nil_receivers; /* Nonzero means that we will allow new ObjC exception syntax (@throw, @try, etc.) in source code. */ extern int flag_objc_exceptions; /* Nonzero means that we generate NeXT setjmp based exceptions. */ extern int flag_objc_sjlj_exceptions; /* Nonzero means that code generation will be altered to support "zero-link" execution. This currently affects ObjC only, but may affect other languages in the future. */ extern int flag_zero_link; /* Nonzero means emit an '__OBJC, __image_info' for the current translation unit. It will inform the ObjC runtime that class definition(s) herein contained are to replace one(s) previously loaded. */ extern int flag_replace_objc_classes; /* Nonzero means don't output line number information. */ extern char flag_no_line_commands; /* Nonzero causes -E output not to be done, but directives such as #define that have side effects are still obeyed. */ extern char flag_no_output; /* Nonzero means dump macros in some fashion; contains the 'D', 'M' or 'N' of the command line switch. */ extern char flag_dump_macros; /* Nonzero means pass #include lines through to the output. */ extern char flag_dump_includes; /* Nonzero means process PCH files while preprocessing. */ extern bool flag_pch_preprocess; /* The file name to which we should write a precompiled header, or NULL if no header will be written in this compile. */ extern const char *pch_file; /* Nonzero if an ISO standard was selected. It rejects macros in the user's namespace. */ extern int flag_iso; /* Nonzero if -undef was given. It suppresses target built-in macros and assertions. */ extern int flag_undef; /* Nonzero means don't recognize the non-ANSI builtin functions. */ extern int flag_no_builtin; /* Nonzero means don't recognize the non-ANSI builtin functions. -ansi sets this. */ extern int flag_no_nonansi_builtin; /* Nonzero means give `double' the same size as `float'. */ extern int flag_short_double; /* Nonzero means give `wchar_t' the same size as `short'. */ extern int flag_short_wchar; /* Nonzero means allow Microsoft extensions without warnings or errors. */ extern int flag_ms_extensions; /* Nonzero means don't recognize the keyword `asm'. */ extern int flag_no_asm; /* Nonzero means give string constants the type `const char *', as mandated by the standard. */ extern int flag_const_strings; /* Nonzero means to treat bitfields as signed unless they say `unsigned'. */ extern int flag_signed_bitfields; extern int explicit_flag_signed_bitfields; /* Nonzero means warn about deprecated conversion from string constant to `char *'. */ extern int warn_write_strings; /* Warn about #pragma directives that are not recognized. */ extern int warn_unknown_pragmas; /* Tri state variable. */ /* Warn about format/argument anomalies in calls to formatted I/O functions (*printf, *scanf, strftime, strfmon, etc.). */ extern int warn_format; /* C/ObjC language option variables. */ /* Nonzero means allow type mismatches in conditional expressions; just make their values `void'. */ extern int flag_cond_mismatch; /* Nonzero means enable C89 Amendment 1 features. */ extern int flag_isoc94; /* Nonzero means use the ISO C99 dialect of C. */ extern int flag_isoc99; /* Nonzero means that we have builtin functions, and main is an int. */ extern int flag_hosted; /* Warn if main is suspicious. */ extern int warn_main; /* ObjC language option variables. */ /* Open and close the file for outputting class declarations, if requested (ObjC). */ extern int flag_gen_declaration; /* Generate code for GNU or NeXT runtime environment. */ extern int flag_next_runtime; /* Tells the compiler that this is a special run. Do not perform any compiling, instead we are to test some platform dependent features and output a C header file with appropriate definitions. */ extern int print_struct_values; /* ???. Undocumented. */ extern const char *constant_string_class_name; /* C++ language option variables. */ /* Nonzero means don't recognize any extension keywords. */ extern int flag_no_gnu_keywords; /* Nonzero means do emit exported implementations of functions even if they can be inlined. */ extern int flag_implement_inlines; /* Nonzero means that implicit instantiations will be emitted if needed. */ extern int flag_implicit_templates; /* Nonzero means that implicit instantiations of inline templates will be emitted if needed, even if instantiations of non-inline templates aren't. */ extern int flag_implicit_inline_templates; /* Nonzero means generate separate instantiation control files and juggle them at link time. */ extern int flag_use_repository; /* Nonzero if we want to issue diagnostics that the standard says are not required. */ extern int flag_optional_diags; /* Nonzero means we should attempt to elide constructors when possible. */ extern int flag_elide_constructors; /* Nonzero means that member functions defined in class scope are inline by default. */ extern int flag_default_inline; /* Controls whether compiler generates 'type descriptor' that give run-time type information. */ extern int flag_rtti; /* Nonzero if we want to conserve space in the .o files. We do this by putting uninitialized data and runtime initialized data into .common instead of .data at the expense of not flagging multiple definitions. */ extern int flag_conserve_space; /* Nonzero if we want to obey access control semantics. */ extern int flag_access_control; /* Nonzero if we want to check the return value of new and avoid calling constructors if it is a null pointer. */ extern int flag_check_new; /* Nonzero if we want the new ISO rules for pushing a new scope for `for' initialization variables. 0: Old rules, set by -fno-for-scope. 2: New ISO rules, set by -ffor-scope. 1: Try to implement new ISO rules, but with backup compatibility (and warnings). This is the default, for now. */ extern int flag_new_for_scope; /* Nonzero if we want to emit defined symbols with common-like linkage as weak symbols where possible, in order to conform to C++ semantics. Otherwise, emit them as local symbols. */ extern int flag_weak; /* 0 means we want the preprocessor to not emit line directives for the current working directory. 1 means we want it to do it. -1 means we should decide depending on whether debugging information is being emitted or not. */ extern int flag_working_directory; /* Nonzero to use __cxa_atexit, rather than atexit, to register destructors for local statics and global objects. */ extern int flag_use_cxa_atexit; /* Nonzero means make the default pedwarns warnings instead of errors. The value of this flag is ignored if -pedantic is specified. */ extern int flag_permissive; /* Nonzero means to implement standard semantics for exception specifications, calling unexpected if an exception is thrown that doesn't match the specification. Zero means to treat them as assertions and optimize accordingly, but not check them. */ extern int flag_enforce_eh_specs; /* Nonzero means warn about implicit declarations. */ extern int warn_implicit; /* Maximum template instantiation depth. This limit is rather arbitrary, but it exists to limit the time it takes to notice infinite template instantiations. */ extern int max_tinst_depth; /* Nonzero means the expression being parsed will never be evaluated. This is a count, since unevaluated expressions can nest. */ extern int skip_evaluation; /* C types are partitioned into three subsets: object, function, and incomplete types. */ #define C_TYPE_OBJECT_P(type) \ (TREE_CODE (type) != FUNCTION_TYPE && TYPE_SIZE (type)) #define C_TYPE_INCOMPLETE_P(type) \ (TREE_CODE (type) != FUNCTION_TYPE && TYPE_SIZE (type) == 0) #define C_TYPE_FUNCTION_P(type) \ (TREE_CODE (type) == FUNCTION_TYPE) /* For convenience we define a single macro to identify the class of object or incomplete types. */ #define C_TYPE_OBJECT_OR_INCOMPLETE_P(type) \ (!C_TYPE_FUNCTION_P (type)) /* Record in each node resulting from a binary operator what operator was specified for it. */ #define C_EXP_ORIGINAL_CODE(exp) ((enum tree_code) TREE_COMPLEXITY (exp)) /* Attribute table common to the C front ends. */ extern const struct attribute_spec c_common_attribute_table[]; extern const struct attribute_spec c_common_format_attribute_table[]; /* Pointer to function to lazily generate the VAR_DECL for __FUNCTION__ etc. ID is the identifier to use, NAME is the string. TYPE_DEP indicates whether it depends on type of the function or not (i.e. __PRETTY_FUNCTION__). */ extern tree (*make_fname_decl) (tree, int); extern tree identifier_global_value (tree); extern void record_builtin_type (enum rid, const char *, tree); extern tree build_void_list_node (void); extern void start_fname_decls (void); extern void finish_fname_decls (void); extern const char *fname_as_string (int); extern tree fname_decl (unsigned, tree); extern void check_function_arguments (tree, tree); extern void check_function_arguments_recurse (void (*) (void *, tree, unsigned HOST_WIDE_INT), void *, tree, unsigned HOST_WIDE_INT); extern void check_function_format (tree, tree); extern void set_Wformat (int); extern tree handle_format_attribute (tree *, tree, tree, int, bool *); extern tree handle_format_arg_attribute (tree *, tree, tree, int, bool *); extern int c_common_handle_option (size_t code, const char *arg, int value); extern bool c_common_missing_argument (const char *opt, size_t code); extern tree c_common_type_for_mode (enum machine_mode, int); extern tree c_common_type_for_size (unsigned int, int); extern tree c_common_unsigned_type (tree); extern tree c_common_signed_type (tree); extern tree c_common_signed_or_unsigned_type (int, tree); extern tree c_common_truthvalue_conversion (tree); extern void c_apply_type_quals_to_decl (int, tree); extern tree c_sizeof_or_alignof_type (tree, enum tree_code, int); extern tree c_alignof_expr (tree); /* Print an error message for invalid operands to arith operation CODE. NOP_EXPR is used as a special case (see truthvalue_conversion). */ extern void binary_op_error (enum tree_code); #define my_friendly_assert(EXP, N) (void) \ (((EXP) == 0) ? (fancy_abort (__FILE__, __LINE__, __FUNCTION__), 0) : 0) /* Validate the expression after `case' and apply default promotions. */ extern tree check_case_value (tree); extern tree fix_string_type (tree); struct varray_head_tag; extern void constant_expression_warning (tree); extern tree convert_and_check (tree, tree); extern void overflow_warning (tree); extern void unsigned_conversion_warning (tree, tree); #define c_sizeof(T) c_sizeof_or_alignof_type (T, SIZEOF_EXPR, 1) #define c_alignof(T) c_sizeof_or_alignof_type (T, ALIGNOF_EXPR, 1) /* Subroutine of build_binary_op, used for comparison operations. See if the operands have both been converted from subword integer types and, if so, perhaps change them both back to their original type. */ extern tree shorten_compare (tree *, tree *, tree *, enum tree_code *); extern tree pointer_int_sum (enum tree_code, tree, tree); extern unsigned int min_precision (tree, int); /* Add qualifiers to a type, in the fashion for C. */ extern tree c_build_qualified_type (tree, int); /* Build tree nodes and builtin functions common to both C and C++ language frontends. */ extern void c_common_nodes_and_builtins (void); extern void disable_builtin_function (const char *); extern tree build_va_arg (tree, tree); extern unsigned int c_common_init_options (unsigned int, const char **); extern bool c_common_post_options (const char **); extern bool c_common_init (void); extern void c_common_finish (void); extern void c_common_parse_file (int); extern HOST_WIDE_INT c_common_get_alias_set (tree); extern void c_register_builtin_type (tree, const char*); extern bool c_promoting_integer_type_p (tree); extern int self_promoting_args_p (tree); extern tree strip_array_types (tree); extern tree strip_pointer_operator (tree); /* This is the basic parsing function. */ extern void c_parse_file (void); /* This is misnamed, it actually performs end-of-compilation processing. */ extern void finish_file (void); /* These macros provide convenient access to the various _STMT nodes. */ /* Nonzero if this statement should be considered a full-expression, i.e., if temporaries created during this statement should have their destructors run at the end of this statement. (In C, this will always be false, since there are no destructors.) */ #define STMT_IS_FULL_EXPR_P(NODE) TREE_LANG_FLAG_1 ((NODE)) /* Nonzero if a given STATEMENT_LIST represents the outermost binding if a statement expression. */ #define STATEMENT_LIST_STMT_EXPR(NODE) \ TREE_LANG_FLAG_1 (STATEMENT_LIST_CHECK (NODE)) /* WHILE_STMT accessors. These give access to the condition of the while statement and the body of the while statement, respectively. */ #define WHILE_COND(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 0) #define WHILE_BODY(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 1) /* DO_STMT accessors. These give access to the condition of the do statement and the body of the do statement, respectively. */ #define DO_COND(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 0) #define DO_BODY(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 1) /* EXPR_STMT accessor. This gives the expression associated with an expression statement. */ #define EXPR_STMT_EXPR(NODE) TREE_OPERAND (EXPR_STMT_CHECK (NODE), 0) /* FOR_STMT accessors. These give access to the init statement, condition, update expression, and body of the for statement, respectively. */ #define FOR_INIT_STMT(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 0) #define FOR_COND(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 1) #define FOR_EXPR(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 2) #define FOR_BODY(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 3) #define SWITCH_TYPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 2) /* STMT_EXPR accessor. */ #define STMT_EXPR_STMT(NODE) TREE_OPERAND (STMT_EXPR_CHECK (NODE), 0) /* Nonzero if this statement-expression does not have an associated scope. */ #define STMT_EXPR_NO_SCOPE(NODE) \ TREE_LANG_FLAG_0 (STMT_EXPR_CHECK (NODE)) /* COMPOUND_LITERAL_EXPR accessors. */ #define COMPOUND_LITERAL_EXPR_DECL_STMT(NODE) \ TREE_OPERAND (COMPOUND_LITERAL_EXPR_CHECK (NODE), 0) #define COMPOUND_LITERAL_EXPR_DECL(NODE) \ DECL_EXPR_DECL (COMPOUND_LITERAL_EXPR_DECL_STMT (NODE)) #define DEFTREECODE(SYM, NAME, TYPE, LENGTH) SYM, enum c_tree_code { C_DUMMY_TREE_CODE = LAST_AND_UNUSED_TREE_CODE, /* This file contains the definitions and documentation for the additional tree codes used in the GNU C++ compiler (see tree.def for the standard codes). Copyright (C) 1987, 1988, 1990, 1993, 1997, 1998, 1999, 2000, 2001, 2004 Free Software Foundation, Inc. Written by Benjamin Chelf This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Tree nodes relevant to both C and C++. These were originally in cp-tree.def in the cp subdir. */ DEFTREECODE (SIZEOF_EXPR, "sizeof_expr", '1', 1) DEFTREECODE (ARROW_EXPR, "arrow_expr", 'e', 1) DEFTREECODE (ALIGNOF_EXPR, "alignof_expr", '1', 1) /* Used to represent an expression statement. Use `EXPR_STMT_EXPR' to obtain the expression. */ DEFTREECODE (EXPR_STMT, "expr_stmt", 'e', 1) /* Used to represent a `for' statement. The operands are FOR_INIT_STMT, FOR_COND, FOR_EXPR, and FOR_BODY, respectively. */ DEFTREECODE (FOR_STMT, "for_stmt", 'e', 4) /* Used to represent a 'while' statement. The operands are WHILE_COND and WHILE_BODY, respectively. */ DEFTREECODE (WHILE_STMT, "while_stmt", 'e', 2) /* Used to represent a 'do' statement. The operands are DO_BODY and DO_COND, respectively. */ DEFTREECODE (DO_STMT, "do_stmt", 'e', 2) /* Used to represent a 'break' statement. */ DEFTREECODE (BREAK_STMT, "break_stmt", 'e', 0) /* Used to represent a 'continue' statement. */ DEFTREECODE (CONTINUE_STMT, "continue_stmt", 'e', 0) /* Used to represent a 'switch' statement. The operands are SWITCH_COND, SWITCH_BODY and SWITCH_TYPE, respectively. */ DEFTREECODE (SWITCH_STMT, "switch_stmt", 'e', 3) /* A STMT_EXPR represents a statement-expression. The STMT_EXPR_STMT is the statement given by the expression. */ DEFTREECODE (STMT_EXPR, "stmt_expr", 'e', 1) /* A COMPOUND_LITERAL_EXPR represents a C99 compound literal. The COMPOUND_LITERAL_EXPR_DECL_STMT is the a DECL_STMT containing the decl for the anonymous object represented by the COMPOUND_LITERAL; the DECL_INITIAL of that decl is the CONSTRUCTOR that initializes the compound literal. */ DEFTREECODE (COMPOUND_LITERAL_EXPR, "compound_literal_expr", 'e', 1) /* Local variables: mode:c End: */ LAST_C_TREE_CODE }; #undef DEFTREECODE #define c_common_stmt_codes \ EXPR_STMT, FOR_STMT, \ WHILE_STMT, DO_STMT, \ BREAK_STMT, CONTINUE_STMT, SWITCH_STMT /* TRUE if a code represents a statement. The front end init langhook should take care of initialization of this array. */ extern bool statement_code_p[MAX_TREE_CODES]; #define STATEMENT_CODE_P(CODE) statement_code_p[(int) (CODE)] #define INIT_STATEMENT_CODES(STMT_CODES) \ do { \ unsigned int i; \ memset (&statement_code_p, 0, sizeof (statement_code_p)); \ for (i = 0; i < ARRAY_SIZE (STMT_CODES); i++) \ statement_code_p[STMT_CODES[i]] = true; \ } while (0) extern int stmts_are_full_exprs_p (void); extern int anon_aggr_type_p (tree); /* For a VAR_DECL that is an anonymous union, these are the various sub-variables that make up the anonymous union. */ #define DECL_ANON_UNION_ELEMS(NODE) DECL_ARGUMENTS ((NODE)) /* In a FIELD_DECL, nonzero if the decl was originally a bitfield. */ #define DECL_C_BIT_FIELD(NODE) \ (DECL_LANG_FLAG_4 (FIELD_DECL_CHECK (NODE)) == 1) #define SET_DECL_C_BIT_FIELD(NODE) \ (DECL_LANG_FLAG_4 (FIELD_DECL_CHECK (NODE)) = 1) #define CLEAR_DECL_C_BIT_FIELD(NODE) \ (DECL_LANG_FLAG_4 (FIELD_DECL_CHECK (NODE)) = 0) extern void emit_local_var (tree); extern void make_rtl_for_local_static (tree); extern tree do_case (tree, tree); extern tree build_stmt (enum tree_code, ...); extern tree build_case_label (tree, tree, tree); extern tree build_continue_stmt (void); extern tree build_break_stmt (void); extern void c_expand_asm_operands (tree, tree, tree, tree, int, location_t); /* These functions must be defined by each front-end which implements a variant of the C language. They are used in c-common.c. */ extern tree build_unary_op (enum tree_code, tree, int); extern tree build_binary_op (enum tree_code, tree, tree, int); extern int lvalue_p (tree); extern tree default_conversion (tree); /* Given two integer or real types, return the type for their sum. Given two compatible ANSI C types, returns the merged type. */ extern tree common_type (tree, tree); extern tree decl_constant_value (tree); /* Handle increment and decrement of boolean types. */ extern tree boolean_increment (enum tree_code, tree); /* Hook currently used only by the C++ front end to reset internal state after entering or leaving a header file. */ extern void extract_interface_info (void); extern int case_compare (splay_tree_key, splay_tree_key); extern tree c_add_case_label (splay_tree, tree, tree, tree); extern void c_do_switch_warnings (splay_tree, tree); extern tree build_function_call (tree, tree); extern tree finish_label_address_expr (tree); /* Same function prototype, but the C and C++ front ends have different implementations. Used in c-common.c. */ extern tree lookup_label (tree); extern int vector_types_convertible_p (tree t1, tree t2); extern rtx c_expand_expr (tree, rtx, enum machine_mode, int, rtx *); extern int c_staticp (tree); extern int c_common_unsafe_for_reeval (tree); extern void init_c_lex (void); extern void c_cpp_builtins (cpp_reader *); /* Positive if an implicit `extern "C"' scope has just been entered; negative if such a scope has just been exited. */ extern GTY(()) int pending_lang_change; /* Information recorded about each file examined during compilation. */ struct c_fileinfo { int time; /* Time spent in the file. */ short interface_only; /* Flags - used only by C++ */ short interface_unknown; }; struct c_fileinfo *get_fileinfo (const char *); extern void dump_time_statistics (void); extern bool c_dump_tree (void *, tree); extern void c_warn_unused_result (tree *); extern void verify_sequence_points (tree); /* In c-gimplify.c */ extern void c_genericize (tree); extern int c_gimplify_expr (tree *, tree *, tree *); extern tree c_build_bind_expr (tree, tree); /* In c-pch.c */ extern void pch_init (void); extern int c_common_valid_pch (cpp_reader *pfile, const char *name, int fd); extern void c_common_read_pch (cpp_reader *pfile, const char *name, int fd, const char *orig); extern void c_common_write_pch (void); extern void c_common_no_more_pch (void); extern void c_common_pch_pragma (cpp_reader *pfile); extern void builtin_define_with_value (const char *, const char *, int); extern void c_stddef_cpp_builtins (void); extern void fe_file_change (const struct line_map *); extern void c_parse_error (const char *, enum cpp_ttype, tree); /* The following have been moved here from c-tree.h, since they're needed in the ObjC++ world, too. What is more, stub-objc.c could use a few prototypes. */ extern tree lookup_interface (tree); extern tree is_class_name (tree); extern tree objc_is_object_ptr (tree); extern void objc_check_decl (tree); extern int objc_comptypes (tree, tree, int); extern tree objc_message_selector (void); extern tree lookup_objc_ivar (tree); extern void *get_current_scope (void); extern void objc_mark_locals_volatile (void *); extern void objc_clear_super_receiver (void); extern int objc_is_public (tree, tree); /* In c-ppoutput.c */ extern void init_pp_output (FILE *); extern void preprocess_file (cpp_reader *); extern void pp_file_change (const struct line_map *); extern void pp_dir_change (cpp_reader *, const char *); #endif /* ! GCC_C_COMMON_H */ /* Definitions for C parsing and type checking. Copyright (C) 1987, 1993, 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_C_TREE_H #define GCC_C_TREE_H /* struct lang_identifier is private to c-decl.c, but langhooks.c needs to know how big it is. This is sanity-checked in c-decl.c. */ #define C_SIZEOF_STRUCT_LANG_IDENTIFIER \ (sizeof (struct c_common_identifier) + 3 * sizeof (void *)) /* For gc purposes, return the most likely link for the longest chain. */ #define C_LANG_TREE_NODE_CHAIN_NEXT(T) \ ((union lang_tree_node *) \ (TREE_CODE (T) == INTEGER_TYPE ? TYPE_NEXT_VARIANT (T) \ : TREE_CODE (T) == COMPOUND_EXPR ? TREE_OPERAND (T, 1) \ : TREE_CHAIN (T))) /* Language-specific declaration information. */ struct lang_decl GTY(()) { /* The return types and parameter types may have variable size. This is a list of any SAVE_EXPRs that need to be evaluated to compute those sizes. */ tree pending_sizes; }; /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */ #define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1 (TYPE) /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */ #define C_TYPE_FIELDS_VOLATILE(TYPE) TREE_LANG_FLAG_2 (TYPE) /* In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE nonzero if the definition of the type has already started. */ #define C_TYPE_BEING_DEFINED(TYPE) TYPE_LANG_FLAG_0 (TYPE) /* In an incomplete RECORD_TYPE or UNION_TYPE, a list of variable declarations whose type would be completed by completing that type. */ #define C_TYPE_INCOMPLETE_VARS(TYPE) TYPE_VFIELD (TYPE) /* In an IDENTIFIER_NODE, nonzero if this identifier is actually a keyword. C_RID_CODE (node) is then the RID_* value of the keyword, and C_RID_YYCODE is the token number wanted by Yacc. */ #define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_0 (ID) struct lang_type GTY(()) { /* In a RECORD_TYPE, a sorted array of the fields of the type. */ struct sorted_fields_type * GTY ((reorder ("resort_sorted_fields"))) s; /* In an ENUMERAL_TYPE, the min and max values. */ tree enum_min; tree enum_max; }; /* Record whether a type or decl was written with nonconstant size. Note that TYPE_SIZE may have simplified to a constant. */ #define C_TYPE_VARIABLE_SIZE(TYPE) TYPE_LANG_FLAG_1 (TYPE) #define C_DECL_VARIABLE_SIZE(TYPE) DECL_LANG_FLAG_0 (TYPE) /* Store a value in that field. */ #define C_SET_EXP_ORIGINAL_CODE(EXP, CODE) \ (TREE_COMPLEXITY (EXP) = (int) (CODE)) /* Record whether a typedef for type `int' was actually `signed int'. */ #define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP) /* For a FUNCTION_DECL, nonzero if it was defined without an explicit return type. */ #define C_FUNCTION_IMPLICIT_INT(EXP) DECL_LANG_FLAG_1 (EXP) /* For a FUNCTION_DECL, nonzero if it was an implicit declaration. */ #define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP) /* For FUNCTION_DECLs, evaluates true if the decl is built-in but has been declared. */ #define C_DECL_DECLARED_BUILTIN(EXP) DECL_LANG_FLAG_3 (EXP) /* Record whether a decl was declared register. This is strictly a front-end flag, whereas DECL_REGISTER is used for code generation; they may differ for structures with volatile fields. */ #define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4 (EXP) /* Nonzero for a decl which either doesn't exist or isn't a prototype. N.B. Could be simplified if all built-in decls had complete prototypes (but this is presently difficult because some of them need FILE*). */ #define C_DECL_ISNT_PROTOTYPE(EXP) \ (EXP == 0 \ || (TYPE_ARG_TYPES (TREE_TYPE (EXP)) == 0 \ && !DECL_BUILT_IN (EXP))) /* For FUNCTION_TYPE, a hidden list of types of arguments. The same as TYPE_ARG_TYPES for functions with prototypes, but created for functions without prototypes. */ #define TYPE_ACTUAL_ARG_TYPES(NODE) TYPE_LANG_SLOT_1 (NODE) /* Save and restore the variables in this file and elsewhere that keep track of the progress of compilation of the current function. Used for nested functions. */ struct language_function GTY(()) { struct c_language_function base; tree x_break_label; tree x_cont_label; struct c_switch * GTY((skip)) x_switch_stack; int returns_value; int returns_null; int returns_abnormally; int warn_about_return_type; int extern_inline; }; /* in c-parse.in */ extern void c_parse_init (void); /* in c-aux-info.c */ extern void gen_aux_info_record (tree, int, int, int); /* in c-decl.c */ extern tree c_break_label; extern tree c_cont_label; extern int global_bindings_p (void); extern void push_scope (void); extern tree pop_scope (void); extern void insert_block (tree); extern tree pushdecl (tree); extern void c_expand_body (tree); extern void c_init_decl_processing (void); extern void c_dup_lang_specific_decl (tree); extern void c_print_identifier (FILE *, tree, int); extern tree build_array_declarator (tree, tree, int, int); extern tree build_enumerator (tree, tree); extern void check_for_loop_decls (void); extern void mark_forward_parm_decls (void); extern int complete_array_type (tree, tree, int); extern void declare_parm_level (void); extern void undeclared_variable (tree); extern tree declare_label (tree); extern tree define_label (location_t, tree); extern void finish_decl (tree, tree, tree); extern tree finish_enum (tree, tree, tree); extern void finish_function (void); extern tree finish_struct (tree, tree, tree); extern tree get_parm_info (bool); extern tree grokfield (tree, tree, tree); extern tree groktypename (tree); extern tree groktypename_in_parm_context (tree); extern tree implicitly_declare (tree); extern void keep_next_level (void); extern tree lookup_name (tree); extern void pending_xref_error (void); extern void c_push_function_context (struct function *); extern void c_pop_function_context (struct function *); extern void push_parm_decl (tree); extern tree pushdecl_top_level (tree); extern tree set_array_declarator_type (tree, tree, int); extern void shadow_tag (tree); extern void shadow_tag_warned (tree, int); extern tree start_enum (tree); extern int start_function (tree, tree, tree); extern tree start_decl (tree, tree, int, tree); extern tree start_struct (enum tree_code, tree); extern void store_parm_decls (void); extern tree xref_tag (enum tree_code, tree); extern int c_expand_decl (tree); extern tree make_pointer_declarator (tree, tree); /* in c-objc-common.c */ extern int c_disregard_inline_limits (tree); extern int c_cannot_inline_tree_fn (tree *); extern bool c_objc_common_init (void); extern bool c_missing_noreturn_ok_p (tree); extern tree c_objc_common_truthvalue_conversion (tree expr); extern void c_objc_common_finish_file (void); extern int defer_fn (tree); extern bool c_warn_unused_global_decl (tree); extern void c_initialize_diagnostics (diagnostic_context *); #define c_build_type_variant(TYPE, CONST_P, VOLATILE_P) \ c_build_qualified_type ((TYPE), \ ((CONST_P) ? TYPE_QUAL_CONST : 0) | \ ((VOLATILE_P) ? TYPE_QUAL_VOLATILE : 0)) #define c_sizeof_nowarn(T) c_sizeof_or_alignof_type (T, SIZEOF_EXPR, 0) /* in c-typeck.c */ extern struct c_switch *c_switch_stack; extern tree require_complete_type (tree); extern int same_translation_unit_p (tree, tree); extern int comptypes (tree, tree); extern tree c_size_in_bytes (tree); extern bool c_mark_addressable (tree); extern void c_incomplete_type_error (tree, tree); extern tree c_type_promotes_to (tree); extern tree composite_type (tree, tree); extern tree build_component_ref (tree, tree); extern tree build_indirect_ref (tree, const char *); extern tree build_array_ref (tree, tree); extern tree build_external_ref (tree, int); extern tree parser_build_binary_op (enum tree_code, tree, tree); extern void readonly_error (tree, const char *); extern tree build_conditional_expr (tree, tree, tree); extern tree build_compound_expr (tree); extern tree c_cast_expr (tree, tree); extern tree build_c_cast (tree, tree); extern tree build_modify_expr (tree, enum tree_code, tree); extern void store_init_value (tree, tree); extern void error_init (const char *); extern void pedwarn_init (const char *); extern void start_init (tree, tree, int); extern void finish_init (void); extern void really_start_incremental_init (tree); extern void push_init_level (int); extern tree pop_init_level (int); extern void set_init_index (tree, tree); extern void set_init_label (tree); extern void process_init_element (tree); extern tree build_compound_literal (tree, tree); extern void pedwarn_c90 (const char *, ...) ATTRIBUTE_PRINTF_1; extern void pedwarn_c99 (const char *, ...) ATTRIBUTE_PRINTF_1; extern tree c_start_case (tree); extern void c_finish_case (tree); extern tree build_asm_expr (tree, tree, tree, tree, bool); extern tree build_asm_stmt (tree, tree); extern tree c_convert_parm_for_inlining (tree, tree, tree, int); extern int c_types_compatible_p (tree, tree); extern tree c_begin_compound_stmt (bool); extern tree c_end_compound_stmt (tree, bool); extern void c_finish_if_stmt (location_t, tree, tree, tree, bool); extern void c_finish_loop (location_t, tree, tree, tree, tree, tree, bool); extern tree c_begin_stmt_expr (void); extern tree c_finish_stmt_expr (tree); extern tree c_process_expr_stmt (tree); extern tree c_finish_expr_stmt (tree); extern tree c_finish_return (tree); extern tree c_finish_bc_stmt (tree *, bool); extern tree c_finish_goto_label (tree); extern tree c_finish_goto_ptr (tree); extern tree build_offsetof (tree, tree); /* Set to 0 at beginning of a function definition, set to 1 if a return statement that specifies a return value is seen. */ extern int current_function_returns_value; /* Set to 0 at beginning of a function definition, set to 1 if a return statement with no argument is seen. */ extern int current_function_returns_null; /* Set to 0 at beginning of a function definition, set to 1 if a call to a noreturn function is seen. */ extern int current_function_returns_abnormally; /* Nonzero means we are reading code that came from a system header file. */ extern int system_header_p; /* True means global_bindings_p should return false even if the scope stack says we are in file scope. */ extern bool c_override_global_bindings_to_false; /* True means we've initialized exception handling. */ extern bool c_eh_initialized_p; /* In c-decl.c */ extern void c_finish_incomplete_decl (tree); extern void *get_current_scope (void); extern void objc_mark_locals_volatile (void *); extern void c_write_global_declarations (void); extern GTY(()) tree static_ctors; extern GTY(()) tree static_dtors; /* In order for the format checking to accept the C frontend diagnostic framework extensions, you must include this file before toplev.h, not after. */ #ifndef GCC_DIAG_STYLE #define GCC_DIAG_STYLE __gcc_cdiag__ #endif #endif /* ! GCC_C_TREE_H */ /* Reduce ifdefery later. */ #ifndef HAVE_BANSHEE #define HAVE_BANSHEE 0 #endif /* This file contains the implementation of the common parts of the tree points-to analysis infrastructure. Overview: This file contains the points-to analysis driver. It does two main things: 1. Keeps track of the PTA data for each variable (IE the data each specific PTA implementation wants to keep associated with a variable). 2. Walks the function trees, calling the appropriate functions that each PTA implementation has implemented. In order to speed up PTA queries, the PTA specific data is stored in the tree for *_DECL's, in DECL_PTA_ALIASVAR. This way, we only need to use the hash table for non-DECL's. */ #define FIELD_BASED 0 /* Array of all created alias_vars. Note that this should contain all the alias_vars we wanted marked during GC. */ static GTY((param_is (union alias_var_def))) varray_type alias_vars = NULL; struct tree_alias_ops *current_alias_ops; /* Array of local (to a function) alias_vars. Note that this should contain all the alias_vars that are local to this function. We delete these from alias_vars before collection. */ static GTY(()) varray_type local_alias_vars; static GTY(()) varray_type local_alias_varnums; tree pta_global_var; static bitmap addrargs; static alias_var get_alias_var_decl (tree); static alias_var get_alias_var (tree); static void find_func_aliases (tree); static void deal_with_call_aliasing (tree, alias_var); static alias_var create_fun_alias_var_ptf (tree, tree); static alias_var create_fun_alias_var (tree, int); static alias_var create_alias_var (tree); static void intra_function_call (varray_type); static void get_values_from_constructor (tree, varray_type *, bitmap, int *); static bool call_may_clobber (tree); static bool call_may_return (tree); /* Return true if a EXPR, which is a CALL_EXPR, may clobber variables. */ static bool call_may_clobber (tree expr) { int flags; if (TREE_CODE (expr) != CALL_EXPR) return false; flags = call_expr_flags (expr); return (! (flags & (ECF_CONST | ECF_PURE | ECF_NORETURN))); } /* Return true if a EXPR, which is a CALL_EXPR, may return. */ static bool call_may_return (tree expr) { int flags; if (TREE_CODE (expr) != CALL_EXPR) return false; flags = call_expr_flags (expr); return ! (flags & ECF_NORETURN); } /* Get the alias_var for DECL. Creates the alias_var if it does not exist already. Also handles FUNCTION_DECL properly. */ static alias_var get_alias_var_decl (tree decl) { alias_var newvar; if (TREE_CODE (decl) == FIELD_DECL) abort (); if (DECL_P (decl)) { if (DECL_PTA_ALIASVAR (decl)) return DECL_PTA_ALIASVAR (decl); } if (TREE_CODE (decl) == FUNCTION_DECL) newvar = create_fun_alias_var (decl, 0); else { newvar = create_alias_var (decl); /* Assign globals to global var for purposes of intraprocedural analysis. */ if ((DECL_CONTEXT (decl) == NULL || TREE_PUBLIC (decl) || TREE_STATIC (decl) || decl_function_context (decl) == NULL) && decl != pta_global_var) { current_alias_ops->addr_assign (current_alias_ops, get_alias_var (pta_global_var), newvar); /* If the global has some DECL_INITIAL, we need to process it here. */ if (DECL_INITIAL (decl)) find_func_aliases (decl); } } if (!current_alias_ops->ip) { if (!current_alias_ops->ip_partial || (TREE_CODE (decl) != FUNCTION_DECL && TREE_CODE (decl) != PARM_DECL)) { VARRAY_PUSH_INT (local_alias_varnums, ALIAS_VAR_VARNUM (newvar)); VARRAY_PUSH_TREE (local_alias_vars, decl); } } return newvar; } /* Get the alias_var for an expression EXPR. Note that this function expects to only be handed a RHS or LHS, not a MODIFY_EXPR. */ static alias_var get_alias_var (tree expr) { /* If it's a decl, get the alias var of the decl. We farm this off to get_alias_var_decl so it can abort if the alias var doesn't exist, and in case something else *knows* it has a decl, and wants the alias var. */ if (DECL_P (expr)) return get_alias_var_decl (expr); /* True constants have no aliases (unless modifiable strings are on, in which case i don't think we'll end up with a STRING_CST anyway) */ if (TREE_CODE_CLASS (TREE_CODE (expr)) == 'c') return NULL; switch (TREE_CODE (expr)) { case ARRAY_REF: case ARRAY_RANGE_REF: { /* Find the first non-array ref, and return its alias variable. */ tree p; for (p = expr; TREE_CODE (p) == ARRAY_REF || TREE_CODE (p) == ARRAY_RANGE_REF; p = TREE_OPERAND (p, 0)) ; return get_alias_var (p); } break; case COMPONENT_REF: { #if FIELD_BASED bool safe = true; tree p; for (p = expr; TREE_CODE (p) == COMPONENT_REF || TREE_CODE (p) == INDIRECT_REF; p = TREE_OPERAND (p, 0)) { if (TREE_CODE (TREE_TYPE (p)) == UNION_TYPE || TREE_CODE (TREE_TYPE (p)) == QUAL_UNION_TYPE) { safe = false; break; } } if (!safe) { for (p = expr; TREE_CODE (p) == COMPONENT_REF; p = TREE_OPERAND (p, 0)); return get_alias_var (p); } else { return get_alias_var (TREE_OPERAND (expr, 1)); } #else /* Find the first non-component ref, and return its alias variable. */ tree p; for (p = expr; TREE_CODE (p) == COMPONENT_REF; p = TREE_OPERAND (p, 0)); return get_alias_var (p); #endif } break; case REALPART_EXPR: case IMAGPART_EXPR: case NOP_EXPR: case CONVERT_EXPR: case FIX_TRUNC_EXPR: case FIX_CEIL_EXPR: case FIX_FLOOR_EXPR: case FIX_ROUND_EXPR: case ADDR_EXPR: case INDIRECT_REF: case BIT_FIELD_REF: /* If it's a ref or cast or conversion of something, get the alias var of the something. */ return get_alias_var (TREE_OPERAND (expr, 0)); break; default: return NULL; } } /* Perform conservative aliasing for an intraprocedural mode function call. ARGS are the arguments that were passed to that function call. */ static void intra_function_call (varray_type args) { size_t l = VARRAY_ACTIVE_SIZE (args); size_t i; alias_var av = get_alias_var (pta_global_var); /* We assume assignments among the actual parameters. */ for (i = 0; i < l; i++) { alias_var argi = VARRAY_GENERIC_PTR (args, i); size_t j; for (j = 0; j < l; j++) { alias_var argj; if (i == j) continue; argj = VARRAY_GENERIC_PTR (args, j); /* Restricted pointers can't be aliased with other restricted pointers. */ if (!TYPE_RESTRICT (TREE_TYPE (ALIAS_VAR_DECL (argi))) || !TYPE_RESTRICT (TREE_TYPE (ALIAS_VAR_DECL (argj)))) /* Do a bit of TBAA to avoid pointless assignments. */ if (alias_sets_conflict_p (get_alias_set (ALIAS_VAR_DECL (argi)), get_alias_set (ALIAS_VAR_DECL (argj)))) current_alias_ops->simple_assign (current_alias_ops, argi, argj); } } /* We assume that an actual parameter can point to any global. */ for (i = 0; i < l; i++) { alias_var argav = VARRAY_GENERIC_PTR (args, i); /* Restricted pointers can't be aliased with other restricted pointers. */ if (!TYPE_RESTRICT (TREE_TYPE (ALIAS_VAR_DECL (argav))) || !TYPE_RESTRICT (TREE_TYPE (ALIAS_VAR_DECL (av)))) { /* Arguments can alias globals, and whatever they point to can point to a global as well. */ current_alias_ops->simple_assign (current_alias_ops, argav, av); } } } /* Put all pointers in a constructor in an array. */ static void get_values_from_constructor (tree constructor, varray_type *vals, bitmap addrargs, int *i) { tree elt_list; switch (TREE_CODE (constructor)) { case CONSTRUCTOR: { for (elt_list = CONSTRUCTOR_ELTS (constructor); elt_list; elt_list = TREE_CHAIN (elt_list)) { tree value = TREE_VALUE (elt_list); if (TREE_CODE (value) == TREE_LIST || TREE_CODE (value) == CONSTRUCTOR) { get_values_from_constructor (value, vals, addrargs, i); } else { alias_var aav; aav = get_alias_var (value); if (aav) VARRAY_PUSH_GENERIC_PTR (*vals, aav); if (TREE_CODE (value) == ADDR_EXPR) bitmap_set_bit (addrargs, *i); *i = *i + 1; } } } break; case TREE_LIST: for (elt_list = constructor; elt_list; elt_list = TREE_CHAIN (elt_list)) { get_values_from_constructor (TREE_VALUE (elt_list), vals, addrargs, i); } break; default: abort(); } } /* Deal with the possible return values of a call that we don't have actual PTA info about. */ static void deal_with_call_aliasing (tree callargs, alias_var lhsAV) { tree arg, argp; for (argp = callargs; argp; argp = TREE_CHAIN (argp)) { arg = TREE_VALUE (argp); /* If we take the address of a variable directly in the argument, the return value could be the address of that variable. */ if (TREE_CODE (arg) == ADDR_EXPR) current_alias_ops->addr_assign (current_alias_ops, lhsAV, get_alias_var (arg)); /* If we pass in a pointer, we could return that pointer. */ else if (POINTER_TYPE_P (TREE_TYPE (arg))) { alias_var argtv = get_alias_var (arg); if (argtv) current_alias_ops->simple_assign (current_alias_ops, lhsAV, argtv); } } } /* Find the operand of the component ref that actually is doing something to the DECL */ static tree find_op_of_decl (tree cref) { while (!DECL_P (TREE_OPERAND (cref, 0))) { cref = TREE_OPERAND (cref, 0); } return cref; } /* Tree walker that is the heart of the aliasing infrastructure. TP is a pointer to the current tree. WALK_SUBTREES specifies whether to continue traversing subtrees or not. Returns NULL_TREE when we should stop. This function is the main part of the aliasing infrastructure. It walks the trees, calling the appropriate alias analyzer functions to process various statements. */ static void find_func_aliases (tree stp) { if (TREE_CODE (stp) == RETURN_EXPR) { stp = TREE_OPERAND (stp, 0); if (!stp) return; } if (TREE_CODE (stp) == MODIFY_EXPR || (DECL_P (stp) && DECL_INITIAL (stp) != NULL_TREE )) { tree op0, op1; alias_var lhsAV = NULL; alias_var rhsAV = NULL; if (DECL_P (stp)) { op0 = stp; op1 = DECL_INITIAL (stp); } else { op0 = TREE_OPERAND (stp, 0); op1 = TREE_OPERAND (stp, 1); } /* lhsAV should always have an alias variable */ lhsAV = get_alias_var (op0); if (!lhsAV) return; /* rhsAV might not have one, c.f. c = 5 */ rhsAV = get_alias_var (op1); #if !FIELD_BASED while (TREE_CODE (op1) == COMPONENT_REF && TREE_CODE (TREE_OPERAND (op1, 0)) == COMPONENT_REF) { op1 = TREE_OPERAND (op1, 0); } while (TREE_CODE (op1) == BIT_FIELD_REF) { op1 = TREE_OPERAND (op1, 0); } /* Take care of fact that we may have multi-level component refs. */ if (TREE_CODE (op1) == COMPONENT_REF) op1 = find_op_of_decl (op1); #endif /* You would think we could test rhsAV at the top, rather than 50 separate times, but we can't, because it can be NULL for operator assignments, where we'd still collect the individual alias vars for the operator. */ /* Note that structures are treated as a single alias variable, since we can disambiguate based on TBAA first, and fall back on points-to. */ /* x = */ if (is_gimple_variable (op0)) { /* x = y */ if (is_gimple_variable (op1)) { if (rhsAV != NULL) current_alias_ops->simple_assign (current_alias_ops, lhsAV, rhsAV); } /* x = foo.y */ else if (TREE_CODE (op1) == COMPONENT_REF && DECL_P (TREE_OPERAND (op1, 0))) { if (rhsAV != NULL) current_alias_ops->simple_assign (current_alias_ops, lhsAV, rhsAV); } /* x = (cast) [maybe-addr-expr] y */ else if (is_gimple_cast (op1)) { tree stripped_op1 = op1; STRIP_NOPS (stripped_op1); if (rhsAV != NULL) { if (TREE_CODE (stripped_op1) == ADDR_EXPR) current_alias_ops->addr_assign (current_alias_ops, lhsAV, rhsAV); else current_alias_ops->simple_assign (current_alias_ops, lhsAV, rhsAV); } } /* x = *y or x = foo->y */ else if (TREE_CODE (op1) == INDIRECT_REF || TREE_CODE (op1) == ARRAY_REF || (TREE_CODE (op1) == COMPONENT_REF && TREE_CODE (TREE_OPERAND (op1, 0)) == INDIRECT_REF)) { if (rhsAV != NULL) current_alias_ops->ptr_assign (current_alias_ops, lhsAV, rhsAV); } /* x = &y = x = &foo.y */ else if (TREE_CODE (op1) == ADDR_EXPR) { if (rhsAV != NULL) current_alias_ops->addr_assign (current_alias_ops, lhsAV, rhsAV); } /* x = func(...) */ else if (TREE_CODE (op1) == CALL_EXPR) { /* Heap assignment. These are __attribute__ malloc or something, I'll deal with it later. */ if (0) {} else { /* NORETURN functions have no effect on aliasing. */ if (call_may_return (op1)) { varray_type args; tree arg; tree callop0, callop1; int argnum; /* Collect the arguments */ VARRAY_GENERIC_PTR_INIT (args, 1, "Arguments"); bitmap_clear (addrargs); callop1 = TREE_OPERAND (op1, 1); callop0 = TREE_OPERAND (op1, 0); for (arg = callop1, argnum = 0; arg; arg = TREE_CHAIN (arg), argnum++) { alias_var aav = get_alias_var (TREE_VALUE (arg)); if (aav) { VARRAY_PUSH_GENERIC_PTR (args, aav); if (TREE_CODE (TREE_VALUE (arg)) == ADDR_EXPR) bitmap_set_bit (addrargs, argnum); } } /* Simulate the call */ if (current_alias_ops->function_call (current_alias_ops, lhsAV, get_alias_var (callop0), args, addrargs)) { if (call_may_clobber (op1) && !current_alias_ops->ip && flag_argument_noalias != 2) { intra_function_call (args); } if (POINTER_TYPE_P (TREE_TYPE (op0))) deal_with_call_aliasing (callop1, lhsAV); } } } } /* x = op (...) */ else { bitmap_clear (addrargs); if (TREE_CODE (op1) == CONSTRUCTOR) { varray_type ops; int i = 0; VARRAY_GENERIC_PTR_INIT (ops, 1, "Operands"); get_values_from_constructor (op1, &ops, addrargs, &i); current_alias_ops->op_assign (current_alias_ops, lhsAV, ops, op1, addrargs); } else switch (TREE_CODE_CLASS (TREE_CODE (op1))) { case 'e': /* an expression */ case 's': /* an expression with side effects */ case '<': /* a comparison expression */ case '1': /* a unary arithmetic expression */ case 'r': /* a reference */ case '2': /* a binary arithmetic expression */ { tree op; varray_type ops; int i; VARRAY_GENERIC_PTR_INIT (ops, 1, "Operands"); for (i = 0; i < TREE_CODE_LENGTH (TREE_CODE (op1)); i++) { alias_var aav; op = TREE_OPERAND (op1, i); aav = get_alias_var (op); if (aav) VARRAY_PUSH_GENERIC_PTR (ops, aav); if (TREE_CODE (op) == ADDR_EXPR) bitmap_set_bit (addrargs, i); } current_alias_ops->op_assign (current_alias_ops, lhsAV, ops, op1, addrargs); } break; default: break; } } } /* *x = */ else { /* x.f = y or x->f = y */ if ((TREE_CODE (op0) == COMPONENT_REF || TREE_CODE (op0) == BIT_FIELD_REF) && is_gimple_variable (op1)) { if (rhsAV != NULL) current_alias_ops->simple_assign (current_alias_ops, lhsAV, rhsAV); } /* x.f = &y or x->f = &y */ else if (TREE_CODE (op0) == COMPONENT_REF && TREE_CODE (op1) == ADDR_EXPR) { if (rhsAV != NULL) current_alias_ops->addr_assign (current_alias_ops, lhsAV, rhsAV); } /* *x.f = y or *x->f = y */ else if ((TREE_CODE (op0) == INDIRECT_REF || TREE_CODE (op0) == ARRAY_REF) && TREE_CODE (TREE_OPERAND (op0, 0)) == COMPONENT_REF && is_gimple_variable (op1)) { if (rhsAV != NULL) current_alias_ops->assign_ptr (current_alias_ops, lhsAV, rhsAV); } /* *x = &y */ else if ((TREE_CODE (op0) == INDIRECT_REF || TREE_CODE (op0) == ARRAY_REF) && TREE_CODE (op1) == ADDR_EXPR) { /* This becomes temp = &y and *x = temp . */ alias_var tempvar; tree temp = create_tmp_var_raw (void_type_node, "aliastmp"); tempvar = current_alias_ops->add_var (current_alias_ops, temp); current_alias_ops->addr_assign (current_alias_ops, tempvar, rhsAV); current_alias_ops->assign_ptr (current_alias_ops, lhsAV, tempvar); } /* *x = *y */ else if ((TREE_CODE (op0) == INDIRECT_REF || TREE_CODE (op0) == ARRAY_REF) && (TREE_CODE (op1) == INDIRECT_REF || TREE_CODE (op1) == ARRAY_REF)) { /* This becomes temp = *y and *x = temp . */ alias_var tempvar; tree temp; temp = create_tmp_var_raw (void_type_node, "aliastmp"); tempvar = current_alias_ops->add_var (current_alias_ops, temp); current_alias_ops->ptr_assign (current_alias_ops, tempvar, rhsAV); current_alias_ops->assign_ptr (current_alias_ops, lhsAV, tempvar); } /* *x = (cast) y */ else if ((TREE_CODE (op0) == INDIRECT_REF || TREE_CODE (op0) == ARRAY_REF) && is_gimple_cast (op1)) { if (rhsAV != NULL) { /* This becomes temp = (cast) y and *x = temp. */ alias_var tempvar; tree temp; temp = create_tmp_var_raw (void_type_node, "aliastmp"); tempvar = current_alias_ops->add_var (current_alias_ops, temp); current_alias_ops->simple_assign (current_alias_ops, tempvar, rhsAV); current_alias_ops->assign_ptr (current_alias_ops, lhsAV, tempvar); } } /* *x = */ else { if (rhsAV != NULL) current_alias_ops->assign_ptr (current_alias_ops, lhsAV, rhsAV); } } } /* Calls without return values. */ else if (TREE_CODE (stp) == CALL_EXPR) { alias_var callvar; varray_type args; tree arg; callvar = get_alias_var (TREE_OPERAND (stp, 0)); if (callvar != NULL) { /* NORETURN and CONST functions with no return value have no effect on aliasing (as may be seen above, const functions that return a value might have an effect on aliasing, since the return value can point to one of the arguments. */ if (call_may_clobber (stp)) { int argnum; VARRAY_GENERIC_PTR_INIT (args, 1, "Arguments"); bitmap_clear (addrargs); for (arg = TREE_OPERAND (stp, 1), argnum=0; arg; arg = TREE_CHAIN (arg), argnum++) { alias_var aav = get_alias_var (TREE_VALUE (arg)); if (aav) { VARRAY_PUSH_GENERIC_PTR (args, aav); if (TREE_CODE (TREE_VALUE (arg)) == ADDR_EXPR) bitmap_set_bit (addrargs, argnum); } } if (current_alias_ops->function_call (current_alias_ops, NULL, callvar, args, addrargs)) if (!current_alias_ops->ip && flag_argument_noalias != 2) intra_function_call (args); } } } } /* Create the alias_var for a function definition DECL, it's arguments, and it's return value. If FORCE is true, we force creation of the alias_var, regardless of whether one exists already. This includes creation of alias_var's for - The function itself. - The arguments. - The return value. */ static alias_var create_fun_alias_var (tree decl, int force) { alias_var avar, retvar; tree rdecl; varray_type params = NULL; if (!force) { if (DECL_PTA_ALIASVAR (decl)) return DECL_PTA_ALIASVAR (decl); } VARRAY_GENERIC_PTR_INIT (params, 1, "Arguments"); if (DECL_ARGUMENTS (decl) != NULL) { tree arg; for (arg = DECL_ARGUMENTS (decl); arg; arg = TREE_CHAIN (arg)) { alias_var var = get_alias_var (arg); VARRAY_PUSH_GENERIC_PTR (params, var); /* Incoming pointers can point to pta_global_var, unless either we are interprocedural, or we can do ip on all statics + this function has been defined + it's not an external function. */ if (POINTER_TYPE_P (TREE_TYPE (arg)) && !current_alias_ops->ip /* FIXME: Need to let analyzer decide in partial case. */ && (!current_alias_ops->ip_partial || !cgraph_local_info (decl)->local)) current_alias_ops->simple_assign (current_alias_ops, var, get_alias_var (pta_global_var)); } } else if (TYPE_ARG_TYPES (TREE_TYPE (decl)) != NULL) { tree arg; /* FIXME: Handle varargs */ for (arg = TYPE_ARG_TYPES (TREE_TYPE (decl)); arg && TREE_VALUE (arg) != void_type_node; arg = TREE_CHAIN (arg)) { tree fakedecl = create_tmp_var_raw (TREE_VALUE (arg), "normarg"); alias_var var; DECL_CONTEXT (fakedecl) = current_function_decl; var = get_alias_var (fakedecl); VARRAY_PUSH_GENERIC_PTR (params, var); /* Incoming pointers can point to pta_global_var, unless either we are interprocedural, or we can do ip on all statics + this function has been defined + it's not an external function. */ if (POINTER_TYPE_P (TREE_TYPE (fakedecl)) && !current_alias_ops->ip /* FIXME: need to let analyzer decide in partial case. */ && (!current_alias_ops->ip_partial || !TREE_STATIC (decl) || TREE_PUBLIC (decl))) current_alias_ops->simple_assign (current_alias_ops, var, get_alias_var (pta_global_var)); } } /* Functions declared like void f() are *not* equivalent to void f(void). You can pass an argument to them. Thus, we need to create some fake argument that would alias any actuals that get passed to our function. */ else { tree fakedecl = create_tmp_var_raw (void_type_node, "fakearg"); alias_var fakevar; DECL_CONTEXT (fakedecl) = current_function_decl; fakevar = get_alias_var (fakedecl); VARRAY_PUSH_GENERIC_PTR (params, fakevar); } if (!DECL_RESULT (decl)) { rdecl = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (decl)), "_rv_"); retvar = current_alias_ops->add_var (current_alias_ops, rdecl); DECL_PTA_ALIASVAR (rdecl) = retvar; } else { retvar = current_alias_ops->add_var (current_alias_ops, DECL_RESULT (decl)); DECL_PTA_ALIASVAR (DECL_RESULT (decl)) = retvar; } VARRAY_PUSH_GENERIC_PTR (alias_vars, retvar); ALIAS_VAR_VARNUM (retvar) = VARRAY_ACTIVE_SIZE (alias_vars) - 1; avar = current_alias_ops->add_var (current_alias_ops, decl); VARRAY_PUSH_GENERIC_PTR (alias_vars, avar); ALIAS_VAR_VARNUM (avar) = VARRAY_ACTIVE_SIZE (alias_vars) - 1; current_alias_ops->function_def (current_alias_ops, avar, params, retvar); DECL_PTA_ALIASVAR (decl) = avar; /* FIXME: Also, if this is a defining declaration then add the annotation to all extern definitions of the function. */ return avar; } /* Create an alias variable for a pointer-to-member function DECL of type TYPE, it's arguments, and it's return value. Returns the alias_var for the PTF. This includes creating alias_var's for - The function itself. - The arguments. - The return value. */ static alias_var create_fun_alias_var_ptf (tree decl, tree type) { alias_var avar, retvar; tree rdecl; varray_type params = NULL; if (DECL_PTA_ALIASVAR (decl)) return DECL_PTA_ALIASVAR (decl); VARRAY_GENERIC_PTR_INIT (params, 1, "Arguments"); if (TYPE_ARG_TYPES (type) != NULL) { tree arg; /* FIXME: Handle varargs */ for (arg = TYPE_ARG_TYPES (type); arg && TREE_VALUE (arg) != void_type_node; arg = TREE_CHAIN (arg)) { tree fakedecl = create_tmp_var_raw (TREE_VALUE (arg), "ptfarg"); alias_var var; DECL_CONTEXT (fakedecl) = DECL_CONTEXT (decl); var = get_alias_var (fakedecl); VARRAY_PUSH_GENERIC_PTR (params, var); } } /* Functions declared like void f() are *not* equivalent to void f(void). You can pass an argument to them. Thus, we need to create some fake argument that would alias any actuals that get passed to our function. */ else { tree fakedecl = create_tmp_var_raw (void_type_node, "fakearg"); alias_var fakevar; DECL_CONTEXT (fakedecl) = DECL_CONTEXT (decl); fakevar = get_alias_var (fakedecl); VARRAY_PUSH_GENERIC_PTR (params, fakevar); } rdecl = create_tmp_var_raw (TREE_TYPE (type), "_rv_"); retvar = current_alias_ops->add_var (current_alias_ops, rdecl); VARRAY_PUSH_GENERIC_PTR (alias_vars, retvar); ALIAS_VAR_VARNUM (retvar) = VARRAY_ACTIVE_SIZE (alias_vars) - 1; avar = current_alias_ops->add_var (current_alias_ops, decl); VARRAY_PUSH_GENERIC_PTR (alias_vars, avar); ALIAS_VAR_VARNUM (avar) = VARRAY_ACTIVE_SIZE (alias_vars) - 1; current_alias_ops->function_def (current_alias_ops, avar, params, retvar); DECL_PTA_ALIASVAR (decl) = avar; return avar; } /* Create the alias_var for a *_DECL node DECL. Returns the alias_var for DECL. This function also handles creation of alias_var's for PTF variables. */ static alias_var create_alias_var (tree decl) { alias_var avar; if (!DECL_P (decl)) abort (); if (DECL_P (decl)) { if (DECL_PTA_ALIASVAR (decl)) return DECL_PTA_ALIASVAR (decl); } if (POINTER_TYPE_P (TREE_TYPE (decl)) && TREE_CODE (TREE_TYPE (TREE_TYPE (decl))) == FUNCTION_TYPE) { avar = create_fun_alias_var_ptf (decl, TREE_TYPE (TREE_TYPE (decl))); } else avar = current_alias_ops->add_var (current_alias_ops, decl); if (DECL_P (decl)) { DECL_PTA_ALIASVAR (decl) = avar; } VARRAY_PUSH_GENERIC_PTR (alias_vars, avar); ALIAS_VAR_VARNUM (avar) = VARRAY_ACTIVE_SIZE (alias_vars) - 1; return avar; } /* Create points-to sets for the current function. */ static void create_alias_vars (void) { basic_block bb; #if HAVE_BANSHEE if (flag_tree_points_to == PTA_ANDERSEN) current_alias_ops = andersen_alias_ops; else #endif { current_alias_ops = NULL; flag_tree_points_to = PTA_NONE; return; } pta_global_var = build_decl (VAR_DECL, get_identifier (".pta_global_var"), size_type_node); DECL_ARTIFICIAL (pta_global_var) = 1; TREE_READONLY (pta_global_var) = 1; DECL_EXTERNAL (pta_global_var) = 0; TREE_STATIC (pta_global_var) = 1; TREE_USED (pta_global_var) = 1; DECL_CONTEXT (pta_global_var) = current_function_decl; TREE_THIS_VOLATILE (pta_global_var) = 1; TREE_ADDRESSABLE (pta_global_var) = 0; init_alias_vars (); DECL_PTA_ALIASVAR (current_function_decl) = NULL; get_alias_var (current_function_decl); /* First, walk the variables and their DECL_INITIAL's */ if (cfun->unexpanded_var_list) { tree vars, var; for (vars = cfun->unexpanded_var_list; vars; vars = TREE_CHAIN (vars)) { var = TREE_VALUE (vars); if (TREE_CODE (var) != LABEL_DECL && decl_function_context (var) == NULL && DECL_INITIAL (var)) find_func_aliases (var); } } /* Now walk all statements and derive aliases. */ FOR_EACH_BB (bb) { block_stmt_iterator bsi; for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) find_func_aliases (bsi_stmt (bsi)); } pta_global_var = NULL_TREE; } struct tree_opt_pass pass_build_pta = { "pta", /* name */ NULL, /* gate */ create_alias_vars, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ TV_TREE_PTA, /* tv_id */ PROP_cfg, /* properties_required */ PROP_pta, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0 /* todo_flags_finish */ }; /* Delete created points-to sets. */ static void delete_alias_vars (void) { size_t i; if (flag_tree_points_to != PTA_ANDERSEN) return; for (i = 0; i < VARRAY_ACTIVE_SIZE (local_alias_vars); i++) { tree key = VARRAY_TREE (local_alias_vars, i); if (DECL_P (key)) DECL_PTA_ALIASVAR (key) = NULL; else abort (); } for (i = 0; i < VARRAY_ACTIVE_SIZE (local_alias_varnums); i ++) VARRAY_GENERIC_PTR (alias_vars, VARRAY_INT (local_alias_varnums, i)) = NULL; if (!current_alias_ops->ip && !current_alias_ops->ip_partial) { /* VARRAY_CLEAR (alias_vars); */ VARRAY_CLEAR (local_alias_vars); VARRAY_CLEAR (local_alias_varnums); } BITMAP_XFREE (addrargs); current_alias_ops->cleanup (current_alias_ops); } struct tree_opt_pass pass_del_pta = { "pta", /* name */ NULL, /* gate */ delete_alias_vars, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ TV_TREE_PTA, /* tv_id */ PROP_pta, /* properties_required */ 0, /* properties_provided */ PROP_pta, /* properties_destroyed */ 0, /* todo_flags_start */ 0 /* todo_flags_finish */ }; /* Initialize points-to analysis machinery. */ void init_alias_vars (void) { current_alias_ops->init (current_alias_ops); addrargs = BITMAP_XMALLOC (); VARRAY_TREE_INIT (local_alias_vars, 10, "Local alias vars"); VARRAY_INT_INIT (local_alias_varnums, 10, "Local alias varnums"); if ((!current_alias_ops->ip && !current_alias_ops->ip_partial) || alias_vars == NULL) VARRAY_GENERIC_PTR_INIT (alias_vars, 10, "Alias vars"); } /* Return true if PTR can't point to anything (i.e. it has an empty points-to set. */ bool empty_points_to_set (tree ptr) { alias_var ptrtv; #if !FIELD_BASED #else if (TREE_CODE (ptr) == COMPONENT_REF) ptr = TREE_OPERAND (ptr, 1); #endif if (DECL_P (ptr)) { ptrtv = DECL_PTA_ALIASVAR (ptr); if (!ptrtv) return true; } else abort (); return current_alias_ops->empty_points_to_set (current_alias_ops, ptrtv); } /* Return true if PTR and VAR have the same points-to set. */ bool same_points_to_set (tree ptr, tree var) { alias_var ptrtv, vartv; #if !FIELD_BASED #else if (TREE_CODE (ptr) == COMPONENT_REF) ptr = TREE_OPERAND (ptr, 1); if (TREE_CODE (var) == COMPONENT_REF) var = TREE_OPERAND (var, 1); #endif if (ptr == var) return true; if (DECL_P (ptr)) { ptrtv = DECL_PTA_ALIASVAR (ptr); if (!ptrtv) return false; } else abort (); if (DECL_P (var)) { vartv = DECL_PTA_ALIASVAR (var); if (!vartv) return false; } else abort (); return current_alias_ops->same_points_to_set (current_alias_ops, vartv, ptrtv); } /* Determine whether two variables (PTR and VAR) may-alias. Returns TRUE if PTR may-alias VAR. */ bool ptr_may_alias_var (tree ptr, tree var) { alias_var ptrtv, vartv; #if !FIELD_BASED #else if (TREE_CODE (ptr) == COMPONENT_REF) ptr = TREE_OPERAND (ptr, 1); if (TREE_CODE (var) == COMPONENT_REF) var = TREE_OPERAND (var, 1); #endif if (ptr == var) return true; if (DECL_P (ptr)) { ptrtv = DECL_PTA_ALIASVAR (ptr); if (!ptrtv) return false; } else abort (); if (DECL_P (var)) { vartv = DECL_PTA_ALIASVAR (var); if (!vartv) return false; } else abort (); return current_alias_ops->may_alias (current_alias_ops, ptrtv, vartv); } #define MASK_POINTER(P) ((unsigned)((unsigned long)(P) & 0xffff)) const char * alias_get_name (tree t) { const char *name; #if FIELD_BASED if (TREE_CODE (t) == FIELD_DECL) { /* First get the name of the field, then the prefix, then smash them together. */ const char *fieldname = IDENTIFIER_POINTER (DECL_NAME (t)); const char *prefix = alias_get_name (DECL_CONTEXT (t)); char *smashed; size_t neededlen = strlen (fieldname) + strlen (prefix) + 2; smashed = ggc_alloc (neededlen); sprintf (smashed, "%s.%s", prefix, fieldname); name = smashed; } else if (TYPE_P (t)) { if (TYPE_NAME (t) && IDENTIFIER_POINTER (TYPE_NAME (t))) name = IDENTIFIER_POINTER (TYPE_NAME (t)); else name = ""; } else #endif { if (TREE_CODE (t) == FUNCTION_DECL) name = IDENTIFIER_POINTER (DECL_NAME (t)); else if (TREE_CODE (t) == RESULT_DECL) name = ""; else name = get_name (t); } if (!name) { char *namep; /* 2 = UF 4 = the masked pointer 2 = the <> around it 1 = the terminator. */ namep = ggc_alloc (2 + 4 + 2 + 1); sprintf (namep, "", MASK_POINTER (t)); return namep; } return name; } /* Type information for tree-alias-common.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_tree_alias_common_h[] = { { &local_alias_varnums, 1, sizeof (local_alias_varnums), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, { &local_alias_vars, 1, sizeof (local_alias_vars), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, { &alias_vars, 1, sizeof (alias_vars), >_ggc_m_P13alias_var_def15varray_head_tag, >_pch_n_P13alias_var_def15varray_head_tag }, LAST_GGC_ROOT_TAB }; /* Conditional constant propagation pass for the GNU compiler. Copyright (C) 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Adapted from original RTL SSA-CCP by Daniel Berlin Adapted to GIMPLE trees by Diego Novillo This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Conditional constant propagation. References: Constant propagation with conditional branches, Wegman and Zadeck, ACM TOPLAS 13(2):181-210. Building an Optimizing Compiler, Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9. Advanced Compiler Design and Implementation, Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6 */ /* These RTL headers are needed for basic-block.h. */ /* Possible lattice values. */ typedef enum { UNINITIALIZED = 0, UNDEFINED, CONSTANT, VARYING } latticevalue; /* Use the TREE_VISITED bitflag to mark statements and PHI nodes that have been deemed VARYING and shouldn't be simulated again. */ #define DONT_SIMULATE_AGAIN(T) TREE_VISITED (T) /* Main structure for CCP. Contains the lattice value and, if it's a constant, the constant value. */ typedef struct { latticevalue lattice_val; tree const_val; } value; /* A bitmap to keep track of executable blocks in the CFG. */ static sbitmap executable_blocks; /* Array of control flow edges on the worklist. */ static GTY(()) varray_type cfg_blocks = NULL; static unsigned int cfg_blocks_num = 0; static int cfg_blocks_tail; static int cfg_blocks_head; static sbitmap bb_in_list; /* This is used to track the current value of each variable. */ static value *value_vector; /* Worklist of SSA edges which will need reexamination as their definition has changed. SSA edges are def-use edges in the SSA web. For each edge, we store the definition statement or PHI node D. The destination nodes that need to be visited are accessed using immediate_uses (D). */ static GTY(()) varray_type ssa_edges; /* Identical to SSA_EDGES. For performance reasons, the list of SSA edges is split into two. One contains all SSA edges who need to be reexamined because their lattice value changed to varying (this worklist), and the other contains all other SSA edges to be reexamined (ssa_edges). Since most values in the program are varying, the ideal situation is to move them to that lattice value as quickly as possible. Thus, it doesn't make sense to process any other type of lattice value until all varying values are propagated fully, which is one thing using the varying worklist achieves. In addition, if you don't use a separate worklist for varying edges, you end up with situations where lattice values move from undefined->constant->varying instead of undefined->varying. */ static GTY(()) varray_type varying_ssa_edges; static void initialize (void); static void finalize_ssa (void); static void visit_phi_node (tree); static tree ccp_fold (tree); static value cp_lattice_meet (value, value); static void visit_stmt (tree); static void visit_cond_stmt (tree); static void visit_assignment (tree); static void add_var_to_ssa_edges_worklist (tree, value); static void add_outgoing_control_edges (basic_block); static void add_control_edge (edge); static void def_to_varying (tree); static void set_lattice_value (tree, value); static void simulate_block (basic_block); static void simulate_stmt (tree); static void substitute_and_fold (void); static value evaluate_stmt (tree); static void dump_lattice_value (FILE *, const char *, value); static bool replace_uses_in (tree, bool *); static latticevalue likely_value (tree); static tree get_rhs (tree); static bool set_rhs (tree *, tree); static value *get_value (tree); static value get_default_value (tree); static tree ccp_fold_builtin (tree, tree); static bool get_strlen (tree, tree *, bitmap); static inline bool cfg_blocks_empty_p (void); static void cfg_blocks_add (basic_block); static basic_block cfg_blocks_get (void); static bool need_imm_uses_for_ccp (tree var); /* Process an SSA edge worklist. WORKLIST is the SSA edge worklist to drain. This pops statements off the given WORKLIST and processes them until there are no more statements on WORKLIST. */ static void process_ssa_edge_worklist (varray_type *worklist) { /* Drain the entire worklist. */ while (VARRAY_ACTIVE_SIZE (*worklist) > 0) { /* Pull the statement to simulate off the worklist. */ tree stmt = VARRAY_TOP_TREE (*worklist); stmt_ann_t ann = stmt_ann (stmt); VARRAY_POP (*worklist); /* visit_stmt can "cancel" reevaluation of some statements. If it does, then in_ccp_worklist will be zero. */ if (ann->in_ccp_worklist) { ann->in_ccp_worklist = 0; simulate_stmt (stmt); } } } /* Main entry point for SSA Conditional Constant Propagation. FNDECL is the declaration for the function to optimize. On exit, VARS_TO_RENAME will contain the symbols that have been exposed by the propagation of ADDR_EXPR expressions into pointer dereferences and need to be renamed into SSA. PHASE indicates which dump file from the DUMP_FILES array to use when dumping debugging information. */ static void tree_ssa_ccp (void) { initialize (); /* Iterate until the worklists are empty. */ while (!cfg_blocks_empty_p () || VARRAY_ACTIVE_SIZE (ssa_edges) > 0 || VARRAY_ACTIVE_SIZE (varying_ssa_edges) > 0) { if (!cfg_blocks_empty_p ()) { /* Pull the next block to simulate off the worklist. */ basic_block dest_block = cfg_blocks_get (); simulate_block (dest_block); } /* In order to move things to varying as quickly as possible,process the VARYING_SSA_EDGES worklist first. */ process_ssa_edge_worklist (&varying_ssa_edges); /* Now process the SSA_EDGES worklist. */ process_ssa_edge_worklist (&ssa_edges); } /* Now perform substitutions based on the known constant values. */ substitute_and_fold (); /* Now cleanup any unreachable code. */ cleanup_tree_cfg (); /* Free allocated memory. */ finalize_ssa (); /* Debugging dumps. */ if (dump_file && (dump_flags & TDF_DETAILS)) { dump_referenced_vars (dump_file); fprintf (dump_file, "\n\n"); } } static bool gate_ccp (void) { return flag_tree_ccp != 0; } struct tree_opt_pass pass_ccp = { "ccp", /* name */ gate_ccp, /* gate */ tree_ssa_ccp, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ TV_TREE_CCP, /* tv_id */ PROP_cfg | PROP_ssa, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func | TODO_rename_vars | TODO_ggc_collect | TODO_verify_ssa | TODO_verify_stmts /* todo_flags_finish */ }; /* Get the constant value associated with variable VAR. */ static value * get_value (tree var) { value *val; #if defined ENABLE_CHECKING if (TREE_CODE (var) != SSA_NAME) abort (); #endif val = &value_vector[SSA_NAME_VERSION (var)]; if (val->lattice_val == UNINITIALIZED) *val = get_default_value (var); return val; } /* Simulate the execution of BLOCK. Evaluate the statement associated with each variable reference inside the block. */ static void simulate_block (basic_block block) { tree phi; /* There is nothing to do for the exit block. */ if (block == EXIT_BLOCK_PTR) return; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "\nSimulating block %d\n", block->index); /* Always simulate PHI nodes, even if we have simulated this block before. */ for (phi = phi_nodes (block); phi; phi = PHI_CHAIN (phi)) visit_phi_node (phi); /* If this is the first time we've simulated this block, then we must simulate each of its statements. */ if (!TEST_BIT (executable_blocks, block->index)) { block_stmt_iterator j; unsigned int normal_edge_count; edge e, normal_edge; /* Note that we have simulated this block. */ SET_BIT (executable_blocks, block->index); for (j = bsi_start (block); !bsi_end_p (j); bsi_next (&j)) visit_stmt (bsi_stmt (j)); /* We can not predict when abnormal edges will be executed, so once a block is considered executable, we consider any outgoing abnormal edges as executable. At the same time, if this block has only one successor that is reached by non-abnormal edges, then add that successor to the worklist. */ normal_edge_count = 0; normal_edge = NULL; for (e = block->succ; e; e = e->succ_next) { if (e->flags & EDGE_ABNORMAL) { add_control_edge (e); } else { normal_edge_count++; normal_edge = e; } } if (normal_edge_count == 1) add_control_edge (normal_edge); } } /* Follow the def-use edges for statement DEF_STMT and simulate all the statements reached by it. */ static void simulate_stmt (tree use_stmt) { basic_block use_bb = bb_for_stmt (use_stmt); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "\nSimulating statement (from ssa_edges): "); print_generic_stmt (dump_file, use_stmt, dump_flags); } if (TREE_CODE (use_stmt) == PHI_NODE) { /* PHI nodes are always visited, regardless of whether or not the destination block is executable. */ visit_phi_node (use_stmt); } else if (TEST_BIT (executable_blocks, use_bb->index)) { /* Otherwise, visit the statement containing the use reached by DEF, only if the destination block is marked executable. */ visit_stmt (use_stmt); } } /* Perform final substitution and folding. After this pass the program should still be in SSA form. */ static void substitute_and_fold (void) { basic_block bb; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "\nSubstituing constants and folding statements\n\n"); /* Substitute constants in every statement of every basic block. */ FOR_EACH_BB (bb) { block_stmt_iterator i; tree phi; /* Propagate our known constants into PHI nodes. */ for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi)) { int i; for (i = 0; i < PHI_NUM_ARGS (phi); i++) { value *new_val; use_operand_p orig_p = PHI_ARG_DEF_PTR (phi, i); tree orig = USE_FROM_PTR (orig_p); if (! SSA_VAR_P (orig)) break; new_val = get_value (orig); if (new_val->lattice_val == CONSTANT && may_propagate_copy (orig, new_val->const_val)) SET_USE (orig_p, new_val->const_val); } } for (i = bsi_start (bb); !bsi_end_p (i); bsi_next (&i)) { bool replaced_address; tree stmt = bsi_stmt (i); /* Skip statements that have been folded already. */ if (stmt_modified_p (stmt) || !is_exec_stmt (stmt)) continue; /* Replace the statement with its folded version and mark it folded. */ if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Line %d: replaced ", get_lineno (stmt)); print_generic_stmt (dump_file, stmt, TDF_SLIM); } if (replace_uses_in (stmt, &replaced_address)) { bool changed = fold_stmt (bsi_stmt_ptr (i)); stmt = bsi_stmt(i); modify_stmt (stmt); /* If we folded a builtin function, we'll likely need to rename VDEFs. */ if (replaced_address || changed) { mark_new_vars_to_rename (stmt, vars_to_rename); if (maybe_clean_eh_stmt (stmt)) tree_purge_dead_eh_edges (bb); } } if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, " with "); print_generic_stmt (dump_file, stmt, TDF_SLIM); fprintf (dump_file, "\n"); } } } } /* Loop through the PHI_NODE's parameters for BLOCK and compare their lattice values to determine PHI_NODE's lattice value. The value of a PHI node is determined calling cp_lattice_meet() with all the arguments of the PHI node that are incoming via executable edges. */ static void visit_phi_node (tree phi) { bool short_circuit = 0; value phi_val, *curr_val; int i; /* If the PHI node has already been deemed to be VARYING, don't simulate it again. */ if (DONT_SIMULATE_AGAIN (phi)) return; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "\nVisiting PHI node: "); print_generic_expr (dump_file, phi, dump_flags); } curr_val = get_value (PHI_RESULT (phi)); switch (curr_val->lattice_val) { case VARYING: if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "\n Shortcircuit. Default of VARYING."); short_circuit = 1; break; case CONSTANT: phi_val = *curr_val; break; case UNDEFINED: case UNINITIALIZED: phi_val.lattice_val = UNDEFINED; phi_val.const_val = NULL_TREE; break; default: abort (); } /* If the variable is volatile or the variable is never referenced in a real operand, then consider the PHI node VARYING. */ if (short_circuit || TREE_THIS_VOLATILE (SSA_NAME_VAR (PHI_RESULT (phi)))) { phi_val.lattice_val = VARYING; phi_val.const_val = NULL; } else for (i = 0; i < PHI_NUM_ARGS (phi); i++) { /* Compute the meet operator over all the PHI arguments. */ edge e = PHI_ARG_EDGE (phi, i); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "\n Argument #%d (%d -> %d %sexecutable)\n", i, e->src->index, e->dest->index, (e->flags & EDGE_EXECUTABLE) ? "" : "not "); } /* If the incoming edge is executable, Compute the meet operator for the existing value of the PHI node and the current PHI argument. */ if (e->flags & EDGE_EXECUTABLE) { tree rdef = PHI_ARG_DEF (phi, i); value *rdef_val, val; if (is_gimple_min_invariant (rdef)) { val.lattice_val = CONSTANT; val.const_val = rdef; rdef_val = &val; } else rdef_val = get_value (rdef); phi_val = cp_lattice_meet (phi_val, *rdef_val); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "\t"); print_generic_expr (dump_file, rdef, dump_flags); dump_lattice_value (dump_file, "\tValue: ", *rdef_val); fprintf (dump_file, "\n"); } if (phi_val.lattice_val == VARYING) break; } } if (dump_file && (dump_flags & TDF_DETAILS)) { dump_lattice_value (dump_file, "\n PHI node value: ", phi_val); fprintf (dump_file, "\n\n"); } set_lattice_value (PHI_RESULT (phi), phi_val); if (phi_val.lattice_val == VARYING) DONT_SIMULATE_AGAIN (phi) = 1; } /* Compute the meet operator between VAL1 and VAL2: any M UNDEFINED = any any M VARYING = VARYING Ci M Cj = Ci if (i == j) Ci M Cj = VARYING if (i != j) */ static value cp_lattice_meet (value val1, value val2) { value result; /* any M UNDEFINED = any. */ if (val1.lattice_val == UNDEFINED) return val2; else if (val2.lattice_val == UNDEFINED) return val1; /* any M VARYING = VARYING. */ if (val1.lattice_val == VARYING || val2.lattice_val == VARYING) { result.lattice_val = VARYING; result.const_val = NULL_TREE; return result; } /* Ci M Cj = Ci if (i == j) Ci M Cj = VARYING if (i != j) */ if (simple_cst_equal (val1.const_val, val2.const_val) == 1) { result.lattice_val = CONSTANT; result.const_val = val1.const_val; } else { result.lattice_val = VARYING; result.const_val = NULL_TREE; } return result; } /* Evaluate statement STMT. If the statement produces an output value and its evaluation changes the lattice value of its output, do the following: - If the statement is an assignment, add all the SSA edges starting at this definition. - If the statement is a conditional branch: . If the statement evaluates to non-constant, add all edges to worklist. . If the statement is constant, add the edge executed as the result of the branch. */ static void visit_stmt (tree stmt) { size_t i; stmt_ann_t ann; def_optype defs; v_may_def_optype v_may_defs; v_must_def_optype v_must_defs; /* If the statement has already been deemed to be VARYING, don't simulate it again. */ if (DONT_SIMULATE_AGAIN (stmt)) return; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "\nVisiting statement: "); print_generic_stmt (dump_file, stmt, TDF_SLIM); fprintf (dump_file, "\n"); } ann = stmt_ann (stmt); /* If this statement is already in the worklist then "cancel" it. The reevaluation implied by the worklist entry will produce the same value we generate here and thus reevaluating it again from the worklist is pointless. */ if (ann->in_ccp_worklist) ann->in_ccp_worklist = 0; /* Now examine the statement. If the statement is an assignment that produces a single output value, evaluate its RHS to see if the lattice value of its output has changed. */ if (TREE_CODE (stmt) == MODIFY_EXPR && TREE_CODE (TREE_OPERAND (stmt, 0)) == SSA_NAME) visit_assignment (stmt); /* Definitions made by statements other than assignments to SSA_NAMEs represent unknown modifications to their outputs. Mark them VARYING. */ else if (NUM_DEFS (defs = DEF_OPS (ann)) != 0) { DONT_SIMULATE_AGAIN (stmt) = 1; for (i = 0; i < NUM_DEFS (defs); i++) { tree def = DEF_OP (defs, i); def_to_varying (def); } } /* If STMT is a conditional branch, see if we can determine which branch will be taken. */ else if (TREE_CODE (stmt) == COND_EXPR || TREE_CODE (stmt) == SWITCH_EXPR) visit_cond_stmt (stmt); /* Any other kind of statement is not interesting for constant propagation and, therefore, not worth simulating. */ else { DONT_SIMULATE_AGAIN (stmt) = 1; /* If STMT is a computed goto, then mark all the output edges executable. */ if (computed_goto_p (stmt)) add_outgoing_control_edges (bb_for_stmt (stmt)); } /* Mark all V_MAY_DEF operands VARYING. */ v_may_defs = V_MAY_DEF_OPS (ann); for (i = 0; i < NUM_V_MAY_DEFS (v_may_defs); i++) def_to_varying (V_MAY_DEF_RESULT (v_may_defs, i)); /* Mark all V_MUST_DEF operands VARYING. */ v_must_defs = V_MUST_DEF_OPS (ann); for (i = 0; i < NUM_V_MUST_DEFS (v_must_defs); i++) def_to_varying (V_MUST_DEF_OP (v_must_defs, i)); } /* Visit the assignment statement STMT. Set the value of its LHS to the value computed by the RHS. */ static void visit_assignment (tree stmt) { value val; tree lhs, rhs; lhs = TREE_OPERAND (stmt, 0); rhs = TREE_OPERAND (stmt, 1); if (TREE_THIS_VOLATILE (SSA_NAME_VAR (lhs))) { /* Volatile variables are always VARYING. */ val.lattice_val = VARYING; val.const_val = NULL_TREE; } else if (TREE_CODE (rhs) == SSA_NAME) { /* For a simple copy operation, we copy the lattice values. */ value *nval = get_value (rhs); val = *nval; } else { /* Evaluate the statement. */ val = evaluate_stmt (stmt); } /* FIXME: Hack. If this was a definition of a bitfield, we need to widen the constant value into the type of the destination variable. This should not be necessary if GCC represented bitfields properly. */ { tree lhs = TREE_OPERAND (stmt, 0); if (val.lattice_val == CONSTANT && TREE_CODE (lhs) == COMPONENT_REF && DECL_BIT_FIELD (TREE_OPERAND (lhs, 1))) { tree w = widen_bitfield (val.const_val, TREE_OPERAND (lhs, 1), lhs); if (w && is_gimple_min_invariant (w)) val.const_val = w; else { val.lattice_val = VARYING; val.const_val = NULL; } } } /* Set the lattice value of the statement's output. */ set_lattice_value (lhs, val); if (val.lattice_val == VARYING) DONT_SIMULATE_AGAIN (stmt) = 1; } /* Visit the conditional statement STMT. If it evaluates to a constant value, mark outgoing edges appropriately. */ static void visit_cond_stmt (tree stmt) { edge e; value val; basic_block block; block = bb_for_stmt (stmt); val = evaluate_stmt (stmt); /* Find which edge out of the conditional block will be taken and add it to the worklist. If no single edge can be determined statically, add all outgoing edges from BLOCK. */ e = find_taken_edge (block, val.const_val); if (e) add_control_edge (e); else { DONT_SIMULATE_AGAIN (stmt) = 1; add_outgoing_control_edges (block); } } /* Add all the edges coming out of BB to the control flow worklist. */ static void add_outgoing_control_edges (basic_block bb) { edge e; for (e = bb->succ; e; e = e->succ_next) add_control_edge (e); } /* Add edge E to the control flow worklist. */ static void add_control_edge (edge e) { basic_block bb = e->dest; if (bb == EXIT_BLOCK_PTR) return; /* If the edge had already been executed, skip it. */ if (e->flags & EDGE_EXECUTABLE) return; e->flags |= EDGE_EXECUTABLE; /* If the block is already in the list, we're done. */ if (TEST_BIT (bb_in_list, bb->index)) return; cfg_blocks_add (bb); if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Adding Destination of edge (%d -> %d) to worklist\n\n", e->src->index, e->dest->index); } /* CCP specific front-end to the non-destructive constant folding routines. Attempt to simplify the RHS of STMT knowing that one or more operands are constants. If simplification is possible, return the simplified RHS, otherwise return the original RHS. */ static tree ccp_fold (tree stmt) { tree rhs = get_rhs (stmt); enum tree_code code = TREE_CODE (rhs); int kind = TREE_CODE_CLASS (code); tree retval = NULL_TREE; /* If the RHS is just a variable, then that variable must now have a constant value that we can return directly. */ if (TREE_CODE (rhs) == SSA_NAME) return get_value (rhs)->const_val; /* Unary operators. Note that we know the single operand must be a constant. So this should almost always return a simplified RHS. */ if (kind == '1') { /* Handle unary operators which can appear in GIMPLE form. */ tree op0 = TREE_OPERAND (rhs, 0); /* Simplify the operand down to a constant. */ if (TREE_CODE (op0) == SSA_NAME) { value *val = get_value (op0); if (val->lattice_val == CONSTANT) op0 = get_value (op0)->const_val; } retval = nondestructive_fold_unary_to_constant (code, TREE_TYPE (rhs), op0); /* If we folded, but did not create an invariant, then we can not use this expression. */ if (retval && ! is_gimple_min_invariant (retval)) return NULL; /* If we could not fold the expression, but the arguments are all constants and gimple values, then build and return the new expression. In some cases the new expression is still something we can use as a replacement for an argument. This happens with NOP conversions of types for example. In other cases the new expression can not be used as a replacement for an argument (as it would create non-gimple code). But the new expression can still be used to derive other constants. */ if (! retval && is_gimple_min_invariant (op0)) return build1 (code, TREE_TYPE (rhs), op0); } /* Binary and comparison operators. We know one or both of the operands are constants. */ else if (kind == '2' || kind == '<' || code == TRUTH_AND_EXPR || code == TRUTH_OR_EXPR || code == TRUTH_XOR_EXPR) { /* Handle binary and comparison operators that can appear in GIMPLE form. */ tree op0 = TREE_OPERAND (rhs, 0); tree op1 = TREE_OPERAND (rhs, 1); /* Simplify the operands down to constants when appropriate. */ if (TREE_CODE (op0) == SSA_NAME) { value *val = get_value (op0); if (val->lattice_val == CONSTANT) op0 = val->const_val; } if (TREE_CODE (op1) == SSA_NAME) { value *val = get_value (op1); if (val->lattice_val == CONSTANT) op1 = val->const_val; } retval = nondestructive_fold_binary_to_constant (code, TREE_TYPE (rhs), op0, op1); /* If we folded, but did not create an invariant, then we can not use this expression. */ if (retval && ! is_gimple_min_invariant (retval)) return NULL; /* If we could not fold the expression, but the arguments are all constants and gimple values, then build and return the new expression. In some cases the new expression is still something we can use as a replacement for an argument. This happens with NOP conversions of types for example. In other cases the new expression can not be used as a replacement for an argument (as it would create non-gimple code). But the new expression can still be used to derive other constants. */ if (! retval && is_gimple_min_invariant (op0) && is_gimple_min_invariant (op1)) return build (code, TREE_TYPE (rhs), op0, op1); } /* We may be able to fold away calls to builtin functions if their arguments are constants. */ else if (code == CALL_EXPR && TREE_CODE (TREE_OPERAND (rhs, 0)) == ADDR_EXPR && (TREE_CODE (TREE_OPERAND (TREE_OPERAND (rhs, 0), 0)) == FUNCTION_DECL) && DECL_BUILT_IN (TREE_OPERAND (TREE_OPERAND (rhs, 0), 0))) { use_optype uses = STMT_USE_OPS (stmt); if (NUM_USES (uses) != 0) { tree *orig; size_t i; /* Preserve the original values of every operand. */ orig = xmalloc (sizeof (tree) * NUM_USES (uses)); for (i = 0; i < NUM_USES (uses); i++) orig[i] = USE_OP (uses, i); /* Substitute operands with their values and try to fold. */ replace_uses_in (stmt, NULL); retval = fold_builtin (rhs); /* Restore operands to their original form. */ for (i = 0; i < NUM_USES (uses); i++) SET_USE_OP (uses, i, orig[i]); free (orig); } } else return rhs; /* If we got a simplified form, see if we need to convert its type. */ if (retval) { if (TREE_TYPE (retval) != TREE_TYPE (rhs)) retval = fold_convert (TREE_TYPE (rhs), retval); if (TREE_TYPE (retval) == TREE_TYPE (rhs)) return retval; } /* No simplification was possible. */ return rhs; } /* Evaluate statement STMT. */ static value evaluate_stmt (tree stmt) { value val; tree simplified; latticevalue likelyvalue = likely_value (stmt); /* If the statement is likely to have a CONSTANT result, then try to fold the statement to determine the constant value. */ if (likelyvalue == CONSTANT) simplified = ccp_fold (stmt); /* If the statement is likely to have a VARYING result, then do not bother folding the statement. */ else if (likelyvalue == VARYING) simplified = get_rhs (stmt); /* Otherwise the statement is likely to have an UNDEFINED value and there will be nothing to do. */ else simplified = NULL_TREE; if (simplified && is_gimple_min_invariant (simplified)) { /* The statement produced a constant value. */ val.lattice_val = CONSTANT; val.const_val = simplified; } else { /* The statement produced a nonconstant value. If the statement had undefined operands, then the result of the statement should be undefined. Else the result of the statement is VARYING. */ val.lattice_val = (likelyvalue == UNDEFINED ? UNDEFINED : VARYING); val.const_val = NULL_TREE; } return val; } /* Debugging dumps. */ static void dump_lattice_value (FILE *outf, const char *prefix, value val) { switch (val.lattice_val) { case UNDEFINED: fprintf (outf, "%sUNDEFINED", prefix); break; case VARYING: fprintf (outf, "%sVARYING", prefix); break; case CONSTANT: fprintf (outf, "%sCONSTANT ", prefix); print_generic_expr (outf, val.const_val, dump_flags); break; default: abort (); } } /* Given a constant value VAL for bitfield FIELD, and a destination variable VAR, return VAL appropriately widened to fit into VAR. If FIELD is wider than HOST_WIDE_INT, NULL is returned. */ tree widen_bitfield (tree val, tree field, tree var) { unsigned HOST_WIDE_INT var_size, field_size; tree wide_val; unsigned HOST_WIDE_INT mask; unsigned int i; /* We can only do this if the size of the type and field and VAL are all constants representable in HOST_WIDE_INT. */ if (!host_integerp (TYPE_SIZE (TREE_TYPE (var)), 1) || !host_integerp (DECL_SIZE (field), 1) || !host_integerp (val, 0)) return NULL_TREE; var_size = tree_low_cst (TYPE_SIZE (TREE_TYPE (var)), 1); field_size = tree_low_cst (DECL_SIZE (field), 1); /* Give up if either the bitfield or the variable are too wide. */ if (field_size > HOST_BITS_PER_WIDE_INT || var_size > HOST_BITS_PER_WIDE_INT) return NULL_TREE; #if defined ENABLE_CHECKING if (var_size < field_size) abort (); #endif /* If the sign bit of the value is not set or the field's type is unsigned, just mask off the high order bits of the value. */ if (DECL_UNSIGNED (field) || !(tree_low_cst (val, 0) & (((HOST_WIDE_INT)1) << (field_size - 1)))) { /* Zero extension. Build a mask with the lower 'field_size' bits set and a BIT_AND_EXPR node to clear the high order bits of the value. */ for (i = 0, mask = 0; i < field_size; i++) mask |= ((HOST_WIDE_INT) 1) << i; wide_val = build (BIT_AND_EXPR, TREE_TYPE (var), val, fold_convert (TREE_TYPE (var), build_int_2 (mask, 0))); } else { /* Sign extension. Create a mask with the upper 'field_size' bits set and a BIT_IOR_EXPR to set the high order bits of the value. */ for (i = 0, mask = 0; i < (var_size - field_size); i++) mask |= ((HOST_WIDE_INT) 1) << (var_size - i - 1); wide_val = build (BIT_IOR_EXPR, TREE_TYPE (var), val, fold_convert (TREE_TYPE (var), build_int_2 (mask, 0))); } return fold (wide_val); } /* Function indicating whether we ought to include information for 'var' when calculating immediate uses. */ static bool need_imm_uses_for_ccp (tree var) { return get_value (var)->lattice_val != VARYING; } /* Initialize local data structures and worklists for CCP. */ static void initialize (void) { edge e; basic_block bb; sbitmap virtual_var; /* Worklists of SSA edges. */ VARRAY_TREE_INIT (ssa_edges, 20, "ssa_edges"); VARRAY_TREE_INIT (varying_ssa_edges, 20, "varying_ssa_edges"); executable_blocks = sbitmap_alloc (last_basic_block); sbitmap_zero (executable_blocks); bb_in_list = sbitmap_alloc (last_basic_block); sbitmap_zero (bb_in_list); value_vector = (value *) xmalloc (num_ssa_names * sizeof (value)); memset (value_vector, 0, num_ssa_names * sizeof (value)); /* 1 if ssa variable is used in a virtual variable context. */ virtual_var = sbitmap_alloc (num_ssa_names); sbitmap_zero (virtual_var); /* Initialize default values and simulation flags for PHI nodes, statements and edges. */ FOR_EACH_BB (bb) { block_stmt_iterator i; tree stmt; stmt_ann_t ann; def_optype defs; v_may_def_optype v_may_defs; v_must_def_optype v_must_defs; size_t x; int vary; /* Get the default value for each definition. */ for (i = bsi_start (bb); !bsi_end_p (i); bsi_next (&i)) { vary = 0; stmt = bsi_stmt (i); get_stmt_operands (stmt); ann = stmt_ann (stmt); defs = DEF_OPS (ann); for (x = 0; x < NUM_DEFS (defs); x++) { tree def = DEF_OP (defs, x); if (get_value (def)->lattice_val == VARYING) vary = 1; } DONT_SIMULATE_AGAIN (stmt) = vary; /* Mark all V_MAY_DEF operands VARYING. */ v_may_defs = V_MAY_DEF_OPS (ann); for (x = 0; x < NUM_V_MAY_DEFS (v_may_defs); x++) { tree res = V_MAY_DEF_RESULT (v_may_defs, x); get_value (res)->lattice_val = VARYING; SET_BIT (virtual_var, SSA_NAME_VERSION (res)); } /* Mark all V_MUST_DEF operands VARYING. */ v_must_defs = V_MUST_DEF_OPS (ann); for (x = 0; x < NUM_V_MUST_DEFS (v_must_defs); x++) { tree v_must_def = V_MUST_DEF_OP (v_must_defs, x); get_value (v_must_def)->lattice_val = VARYING; SET_BIT (virtual_var, SSA_NAME_VERSION (v_must_def)); } } for (e = bb->succ; e; e = e->succ_next) e->flags &= ~EDGE_EXECUTABLE; } /* Now process PHI nodes. */ FOR_EACH_BB (bb) { tree phi, var; int x; for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi)) { value *val; val = get_value (PHI_RESULT (phi)); if (val->lattice_val != VARYING) { for (x = 0; x < PHI_NUM_ARGS (phi); x++) { var = PHI_ARG_DEF (phi, x); /* If one argument is virtual, the result is virtual, and therefore varying. */ if (TREE_CODE (var) == SSA_NAME) { if (TEST_BIT (virtual_var, SSA_NAME_VERSION (var))) { val->lattice_val = VARYING; SET_BIT (virtual_var, SSA_NAME_VERSION (PHI_RESULT (phi))); break; } } } } DONT_SIMULATE_AGAIN (phi) = ((val->lattice_val == VARYING) ? 1 : 0); } } sbitmap_free (virtual_var); /* Compute immediate uses for variables we care about. */ compute_immediate_uses (TDFA_USE_OPS, need_imm_uses_for_ccp); if (dump_file && (dump_flags & TDF_DETAILS)) dump_immediate_uses (dump_file); VARRAY_BB_INIT (cfg_blocks, 20, "cfg_blocks"); /* Seed the algorithm by adding the successors of the entry block to the edge worklist. */ for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) { if (e->dest != EXIT_BLOCK_PTR) { e->flags |= EDGE_EXECUTABLE; cfg_blocks_add (e->dest); } } } /* Free allocated storage. */ static void finalize_ssa (void) { ssa_edges = NULL; varying_ssa_edges = NULL; cfg_blocks = NULL; free (value_vector); sbitmap_free (bb_in_list); sbitmap_free (executable_blocks); free_df (); } /* Is the block worklist empty. */ static inline bool cfg_blocks_empty_p (void) { return (cfg_blocks_num == 0); } /* Add a basic block to the worklist. */ static void cfg_blocks_add (basic_block bb) { if (bb == ENTRY_BLOCK_PTR || bb == EXIT_BLOCK_PTR) return; if (TEST_BIT (bb_in_list, bb->index)) return; if (cfg_blocks_empty_p ()) { cfg_blocks_tail = cfg_blocks_head = 0; cfg_blocks_num = 1; } else { cfg_blocks_num++; if (cfg_blocks_num > VARRAY_SIZE (cfg_blocks)) { /* We have to grow the array now. Adjust to queue to occupy the full space of the original array. */ cfg_blocks_tail = VARRAY_SIZE (cfg_blocks); cfg_blocks_head = 0; VARRAY_GROW (cfg_blocks, 2 * VARRAY_SIZE (cfg_blocks)); } else cfg_blocks_tail = (cfg_blocks_tail + 1) % VARRAY_SIZE (cfg_blocks); } VARRAY_BB (cfg_blocks, cfg_blocks_tail) = bb; SET_BIT (bb_in_list, bb->index); } /* Remove a block from the worklist. */ static basic_block cfg_blocks_get (void) { basic_block bb; bb = VARRAY_BB (cfg_blocks, cfg_blocks_head); #ifdef ENABLE_CHECKING if (cfg_blocks_empty_p () || !bb) abort (); #endif cfg_blocks_head = (cfg_blocks_head + 1) % VARRAY_SIZE (cfg_blocks); --cfg_blocks_num; RESET_BIT (bb_in_list, bb->index); return bb; } /* We have just defined a new value for VAR. Add all immediate uses of VAR to the ssa_edges or varying_ssa_edges worklist. */ static void add_var_to_ssa_edges_worklist (tree var, value val) { tree stmt = SSA_NAME_DEF_STMT (var); dataflow_t df = get_immediate_uses (stmt); int num_uses = num_immediate_uses (df); int i; for (i = 0; i < num_uses; i++) { tree use = immediate_use (df, i); if (!DONT_SIMULATE_AGAIN (use)) { stmt_ann_t ann = stmt_ann (use); if (ann->in_ccp_worklist == 0) { ann->in_ccp_worklist = 1; if (val.lattice_val == VARYING) VARRAY_PUSH_TREE (varying_ssa_edges, use); else VARRAY_PUSH_TREE (ssa_edges, use); } } } } /* Set the lattice value for the variable VAR to VARYING. */ static void def_to_varying (tree var) { value val; val.lattice_val = VARYING; val.const_val = NULL_TREE; set_lattice_value (var, val); } /* Set the lattice value for variable VAR to VAL. */ static void set_lattice_value (tree var, value val) { value *old = get_value (var); #ifdef ENABLE_CHECKING if (val.lattice_val == UNDEFINED) { /* CONSTANT->UNDEFINED is never a valid state transition. */ if (old->lattice_val == CONSTANT) abort (); /* VARYING->UNDEFINED is generally not a valid state transition, except for values which are initialized to VARYING. */ if (old->lattice_val == VARYING && get_default_value (var).lattice_val != VARYING) abort (); } else if (val.lattice_val == CONSTANT) { /* VARYING -> CONSTANT is an invalid state transition, except for objects which start off in a VARYING state. */ if (old->lattice_val == VARYING && get_default_value (var).lattice_val != VARYING) abort (); } #endif /* If the constant for VAR has changed, then this VAR is really varying. */ if (old->lattice_val == CONSTANT && val.lattice_val == CONSTANT && !simple_cst_equal (old->const_val, val.const_val)) { val.lattice_val = VARYING; val.const_val = NULL_TREE; } if (old->lattice_val != val.lattice_val) { if (dump_file && (dump_flags & TDF_DETAILS)) { dump_lattice_value (dump_file, "Lattice value changed to ", val); fprintf (dump_file, ". Adding definition to SSA edges.\n"); } add_var_to_ssa_edges_worklist (var, val); *old = val; } } /* Replace USE references in statement STMT with their immediate reaching definition. Return true if at least one reference was replaced. If REPLACED_ADDRESSES_P is given, it will be set to true if an address constant was replaced. */ static bool replace_uses_in (tree stmt, bool *replaced_addresses_p) { bool replaced = false; use_optype uses; size_t i; if (replaced_addresses_p) *replaced_addresses_p = false; get_stmt_operands (stmt); uses = STMT_USE_OPS (stmt); for (i = 0; i < NUM_USES (uses); i++) { use_operand_p use = USE_OP_PTR (uses, i); value *val = get_value (USE_FROM_PTR (use)); if (val->lattice_val == CONSTANT) { SET_USE (use, val->const_val); replaced = true; if (POINTER_TYPE_P (TREE_TYPE (USE_FROM_PTR (use))) && replaced_addresses_p) *replaced_addresses_p = true; } } return replaced; } /* Return the likely latticevalue for STMT. If STMT has no operands, then return CONSTANT. Else if any operands of STMT are undefined, then return UNDEFINED. Else if any operands of STMT are constants, then return CONSTANT. Else return VARYING. */ static latticevalue likely_value (tree stmt) { use_optype uses; size_t i; int found_constant = 0; stmt_ann_t ann; /* If the statement makes aliased loads or has volatile operands, it won't fold to a constant value. */ ann = stmt_ann (stmt); if (ann->makes_aliased_loads || ann->has_volatile_ops) return VARYING; /* A CALL_EXPR is assumed to be varying. This may be overly conservative, in the presence of const and pure calls. */ if (get_call_expr_in (stmt) != NULL_TREE) return VARYING; get_stmt_operands (stmt); uses = USE_OPS (ann); for (i = 0; i < NUM_USES (uses); i++) { tree use = USE_OP (uses, i); value *val = get_value (use); if (val->lattice_val == UNDEFINED) return UNDEFINED; if (val->lattice_val == CONSTANT) found_constant = 1; } return ((found_constant || !uses) ? CONSTANT : VARYING); } /* A subroutine of fold_stmt_r. Attempts to fold *(A+O) to A[X]. BASE is an array type. OFFSET is a byte displacement. ORIG_TYPE is the desired result type. */ static tree maybe_fold_offset_to_array_ref (tree base, tree offset, tree orig_type) { tree min_idx, idx, elt_offset = integer_zero_node; tree array_type, elt_type, elt_size; /* If BASE is an ARRAY_REF, we can pick up another offset (this time measured in units of the size of elements type) from that ARRAY_REF). We can't do anything if either is variable. The case we handle here is *(&A[N]+O). */ if (TREE_CODE (base) == ARRAY_REF) { tree low_bound = array_ref_low_bound (base); elt_offset = TREE_OPERAND (base, 1); if (TREE_CODE (low_bound) != INTEGER_CST || TREE_CODE (elt_offset) != INTEGER_CST) return NULL_TREE; elt_offset = int_const_binop (MINUS_EXPR, elt_offset, low_bound, 0); base = TREE_OPERAND (base, 0); } /* Ignore stupid user tricks of indexing non-array variables. */ array_type = TREE_TYPE (base); if (TREE_CODE (array_type) != ARRAY_TYPE) return NULL_TREE; elt_type = TREE_TYPE (array_type); if (!lang_hooks.types_compatible_p (orig_type, elt_type)) return NULL_TREE; /* If OFFSET and ELT_OFFSET are zero, we don't care about the size of the element type (so we can use the alignment if it's not constant). Otherwise, compute the offset as an index by using a division. If the division isn't exact, then don't do anything. */ elt_size = TYPE_SIZE_UNIT (elt_type); if (integer_zerop (offset)) { if (TREE_CODE (elt_size) != INTEGER_CST) elt_size = size_int (TYPE_ALIGN (elt_type)); idx = integer_zero_node; } else { unsigned HOST_WIDE_INT lquo, lrem; HOST_WIDE_INT hquo, hrem; if (TREE_CODE (elt_size) != INTEGER_CST || div_and_round_double (TRUNC_DIV_EXPR, 1, TREE_INT_CST_LOW (offset), TREE_INT_CST_HIGH (offset), TREE_INT_CST_LOW (elt_size), TREE_INT_CST_HIGH (elt_size), &lquo, &hquo, &lrem, &hrem) || lrem || hrem) return NULL_TREE; idx = build_int_2_wide (lquo, hquo); } /* Assume the low bound is zero. If there is a domain type, get the low bound, if any, convert the index into that type, and add the low bound. */ min_idx = integer_zero_node; if (TYPE_DOMAIN (array_type)) { if (TYPE_MIN_VALUE (TYPE_DOMAIN (array_type))) min_idx = TYPE_MIN_VALUE (TYPE_DOMAIN (array_type)); else min_idx = fold_convert (TYPE_DOMAIN (array_type), min_idx); if (TREE_CODE (min_idx) != INTEGER_CST) return NULL_TREE; idx = fold_convert (TYPE_DOMAIN (array_type), idx); elt_offset = fold_convert (TYPE_DOMAIN (array_type), elt_offset); } if (!integer_zerop (min_idx)) idx = int_const_binop (PLUS_EXPR, idx, min_idx, 0); if (!integer_zerop (elt_offset)) idx = int_const_binop (PLUS_EXPR, idx, elt_offset, 0); return build (ARRAY_REF, orig_type, base, idx, min_idx, size_int (tree_low_cst (elt_size, 1) / (TYPE_ALIGN (elt_type) / BITS_PER_UNIT))); } /* A subroutine of fold_stmt_r. Attempts to fold *(S+O) to S.X. BASE is a record type. OFFSET is a byte displacement. ORIG_TYPE is the desired result type. */ /* ??? This doesn't handle class inheritance. */ static tree maybe_fold_offset_to_component_ref (tree record_type, tree base, tree offset, tree orig_type, bool base_is_ptr) { tree f, t, field_type, tail_array_field; if (TREE_CODE (record_type) != RECORD_TYPE && TREE_CODE (record_type) != UNION_TYPE && TREE_CODE (record_type) != QUAL_UNION_TYPE) return NULL_TREE; /* Short-circuit silly cases. */ if (lang_hooks.types_compatible_p (record_type, orig_type)) return NULL_TREE; tail_array_field = NULL_TREE; for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f)) { int cmp; if (TREE_CODE (f) != FIELD_DECL) continue; if (DECL_BIT_FIELD (f)) continue; if (TREE_CODE (DECL_FIELD_OFFSET (f)) != INTEGER_CST) continue; /* ??? Java creates "interesting" fields for representing base classes. They have no name, and have no context. With no context, we get into trouble with nonoverlapping_component_refs_p. Skip them. */ if (!DECL_FIELD_CONTEXT (f)) continue; /* The previous array field isn't at the end. */ tail_array_field = NULL_TREE; /* Check to see if this offset overlaps with the field. */ cmp = tree_int_cst_compare (DECL_FIELD_OFFSET (f), offset); if (cmp > 0) continue; field_type = TREE_TYPE (f); if (cmp < 0) { /* Don't care about offsets into the middle of scalars. */ if (!AGGREGATE_TYPE_P (field_type)) continue; /* Check for array at the end of the struct. This is often used as for flexible array members. We should be able to turn this into an array access anyway. */ if (TREE_CODE (field_type) == ARRAY_TYPE) tail_array_field = f; /* Check the end of the field against the offset. */ if (!DECL_SIZE_UNIT (f) || TREE_CODE (DECL_SIZE_UNIT (f)) != INTEGER_CST) continue; t = int_const_binop (MINUS_EXPR, offset, DECL_FIELD_OFFSET (f), 1); if (!tree_int_cst_lt (t, DECL_SIZE_UNIT (f))) continue; /* If we matched, then set offset to the displacement into this field. */ offset = t; } /* Here we exactly match the offset being checked. If the types match, then we can return that field. */ else if (lang_hooks.types_compatible_p (orig_type, field_type)) { if (base_is_ptr) base = build1 (INDIRECT_REF, record_type, base); t = build (COMPONENT_REF, field_type, base, f, NULL_TREE); return t; } /* Don't care about type-punning of scalars. */ else if (!AGGREGATE_TYPE_P (field_type)) return NULL_TREE; goto found; } if (!tail_array_field) return NULL_TREE; f = tail_array_field; field_type = TREE_TYPE (f); found: /* If we get here, we've got an aggregate field, and a possibly nonzero offset into them. Recurse and hope for a valid match. */ if (base_is_ptr) base = build1 (INDIRECT_REF, record_type, base); base = build (COMPONENT_REF, field_type, base, f, NULL_TREE); t = maybe_fold_offset_to_array_ref (base, offset, orig_type); if (t) return t; return maybe_fold_offset_to_component_ref (field_type, base, offset, orig_type, false); } /* A subroutine of fold_stmt_r. Attempt to simplify *(BASE+OFFSET). Return the simplified expression, or NULL if nothing could be done. */ static tree maybe_fold_stmt_indirect (tree expr, tree base, tree offset) { tree t; /* We may well have constructed a double-nested PLUS_EXPR via multiple substitutions. Fold that down to one. Remove NON_LVALUE_EXPRs that are sometimes added. */ base = fold (base); STRIP_NOPS (base); TREE_OPERAND (expr, 0) = base; /* One possibility is that the address reduces to a string constant. */ t = fold_read_from_constant_string (expr); if (t) return t; /* Add in any offset from a PLUS_EXPR. */ if (TREE_CODE (base) == PLUS_EXPR) { tree offset2; offset2 = TREE_OPERAND (base, 1); if (TREE_CODE (offset2) != INTEGER_CST) return NULL_TREE; base = TREE_OPERAND (base, 0); offset = int_const_binop (PLUS_EXPR, offset, offset2, 1); } if (TREE_CODE (base) == ADDR_EXPR) { /* Strip the ADDR_EXPR. */ base = TREE_OPERAND (base, 0); /* Try folding *(&B+O) to B[X]. */ t = maybe_fold_offset_to_array_ref (base, offset, TREE_TYPE (expr)); if (t) return t; /* Try folding *(&B+O) to B.X. */ t = maybe_fold_offset_to_component_ref (TREE_TYPE (base), base, offset, TREE_TYPE (expr), false); if (t) return t; /* Fold *&B to B. We can only do this if EXPR is the same type as BASE. We can't do this if EXPR is the element type of an array and BASE is the array. */ if (integer_zerop (offset) && lang_hooks.types_compatible_p (TREE_TYPE (base), TREE_TYPE (expr))) return base; } else { /* We can get here for out-of-range string constant accesses, such as "_"[3]. Bail out of the entire substitution search and arrange for the entire statement to be replaced by a call to __builtin_trap. In all likelyhood this will all be constant-folded away, but in the meantime we can't leave with something that get_expr_operands can't understand. */ t = base; STRIP_NOPS (t); if (TREE_CODE (t) == ADDR_EXPR && TREE_CODE (TREE_OPERAND (t, 0)) == STRING_CST) { /* FIXME: Except that this causes problems elsewhere with dead code not being deleted, and we abort in the rtl expanders because we failed to remove some ssa_name. In the meantime, just return zero. */ /* FIXME2: This condition should be signaled by fold_read_from_constant_string directly, rather than re-checking for it here. */ return integer_zero_node; } /* Try folding *(B+O) to B->X. Still an improvement. */ if (POINTER_TYPE_P (TREE_TYPE (base))) { t = maybe_fold_offset_to_component_ref (TREE_TYPE (TREE_TYPE (base)), base, offset, TREE_TYPE (expr), true); if (t) return t; } } /* Otherwise we had an offset that we could not simplify. */ return NULL_TREE; } /* A subroutine of fold_stmt_r. EXPR is a PLUS_EXPR. A quaint feature extant in our address arithmetic is that there can be hidden type changes here. The type of the result need not be the same as the type of the input pointer. What we're after here is an expression of the form (T *)(&array + const) where the cast doesn't actually exist, but is implicit in the type of the PLUS_EXPR. We'd like to turn this into &array[x] which may be able to propagate further. */ static tree maybe_fold_stmt_addition (tree expr) { tree op0 = TREE_OPERAND (expr, 0); tree op1 = TREE_OPERAND (expr, 1); tree ptr_type = TREE_TYPE (expr); tree ptd_type; tree t; bool subtract = (TREE_CODE (expr) == MINUS_EXPR); /* We're only interested in pointer arithmetic. */ if (!POINTER_TYPE_P (ptr_type)) return NULL_TREE; /* Canonicalize the integral operand to op1. */ if (INTEGRAL_TYPE_P (TREE_TYPE (op0))) { if (subtract) return NULL_TREE; t = op0, op0 = op1, op1 = t; } /* It had better be a constant. */ if (TREE_CODE (op1) != INTEGER_CST) return NULL_TREE; /* The first operand should be an ADDR_EXPR. */ if (TREE_CODE (op0) != ADDR_EXPR) return NULL_TREE; op0 = TREE_OPERAND (op0, 0); /* If the first operand is an ARRAY_REF, expand it so that we can fold the offset into it. */ while (TREE_CODE (op0) == ARRAY_REF) { tree array_obj = TREE_OPERAND (op0, 0); tree array_idx = TREE_OPERAND (op0, 1); tree elt_type = TREE_TYPE (op0); tree elt_size = TYPE_SIZE_UNIT (elt_type); tree min_idx; if (TREE_CODE (array_idx) != INTEGER_CST) break; if (TREE_CODE (elt_size) != INTEGER_CST) break; /* Un-bias the index by the min index of the array type. */ min_idx = TYPE_DOMAIN (TREE_TYPE (array_obj)); if (min_idx) { min_idx = TYPE_MIN_VALUE (min_idx); if (min_idx) { if (TREE_CODE (min_idx) != INTEGER_CST) break; array_idx = convert (TREE_TYPE (min_idx), array_idx); if (!integer_zerop (min_idx)) array_idx = int_const_binop (MINUS_EXPR, array_idx, min_idx, 0); } } /* Convert the index to a byte offset. */ array_idx = convert (sizetype, array_idx); array_idx = int_const_binop (MULT_EXPR, array_idx, elt_size, 0); /* Update the operands for the next round, or for folding. */ /* If we're manipulating unsigned types, then folding into negative values can produce incorrect results. Particularly if the type is smaller than the width of the pointer. */ if (subtract && TYPE_UNSIGNED (TREE_TYPE (op1)) && tree_int_cst_lt (array_idx, op1)) return NULL; op1 = int_const_binop (subtract ? MINUS_EXPR : PLUS_EXPR, array_idx, op1, 0); subtract = false; op0 = array_obj; } /* If we weren't able to fold the subtraction into another array reference, canonicalize the integer for passing to the array and component ref simplification functions. */ if (subtract) { if (TYPE_UNSIGNED (TREE_TYPE (op1))) return NULL; op1 = fold (build1 (NEGATE_EXPR, TREE_TYPE (op1), op1)); /* ??? In theory fold should always produce another integer. */ if (TREE_CODE (op1) != INTEGER_CST) return NULL; } ptd_type = TREE_TYPE (ptr_type); /* At which point we can try some of the same things as for indirects. */ t = maybe_fold_offset_to_array_ref (op0, op1, ptd_type); if (!t) t = maybe_fold_offset_to_component_ref (TREE_TYPE (op0), op0, op1, ptd_type, false); if (t) t = build1 (ADDR_EXPR, ptr_type, t); return t; } /* Subroutine of fold_stmt called via walk_tree. We perform several simplifications of EXPR_P, mostly having to do with pointer arithmetic. */ static tree fold_stmt_r (tree *expr_p, int *walk_subtrees, void *data) { bool *changed_p = data; tree expr = *expr_p, t; /* ??? It'd be nice if walk_tree had a pre-order option. */ switch (TREE_CODE (expr)) { case INDIRECT_REF: t = walk_tree (&TREE_OPERAND (expr, 0), fold_stmt_r, data, NULL); if (t) return t; *walk_subtrees = 0; t = maybe_fold_stmt_indirect (expr, TREE_OPERAND (expr, 0), integer_zero_node); break; /* ??? Could handle ARRAY_REF here, as a variant of INDIRECT_REF. We'd only want to bother decomposing an existing ARRAY_REF if the base array is found to have another offset contained within. Otherwise we'd be wasting time. */ case ADDR_EXPR: t = walk_tree (&TREE_OPERAND (expr, 0), fold_stmt_r, data, NULL); if (t) return t; *walk_subtrees = 0; /* Set TREE_INVARIANT properly so that the value is properly considered constant, and so gets propagated as expected. */ if (*changed_p) recompute_tree_invarant_for_addr_expr (expr); return NULL_TREE; case PLUS_EXPR: case MINUS_EXPR: t = walk_tree (&TREE_OPERAND (expr, 0), fold_stmt_r, data, NULL); if (t) return t; t = walk_tree (&TREE_OPERAND (expr, 1), fold_stmt_r, data, NULL); if (t) return t; *walk_subtrees = 0; t = maybe_fold_stmt_addition (expr); break; case COMPONENT_REF: t = walk_tree (&TREE_OPERAND (expr, 0), fold_stmt_r, data, NULL); if (t) return t; *walk_subtrees = 0; /* Make sure the FIELD_DECL is actually a field in the type on the lhs. In cases with IMA it is possible that it came from another, equivalent type at this point. We have already checked the equivalence in this case. Match on type plus offset, to allow for unnamed fields. We won't necessarily get the corresponding field for unions; this is believed to be harmless. */ if ((current_file_decl && TREE_CHAIN (current_file_decl)) && (DECL_FIELD_CONTEXT (TREE_OPERAND (expr, 1)) != TREE_TYPE (TREE_OPERAND (expr, 0)))) { tree f; tree orig_field = TREE_OPERAND (expr, 1); tree orig_type = TREE_TYPE (orig_field); for (f = TYPE_FIELDS (TREE_TYPE (TREE_OPERAND (expr, 0))); f; f = TREE_CHAIN (f)) { if (lang_hooks.types_compatible_p (TREE_TYPE (f), orig_type) && tree_int_cst_compare (DECL_FIELD_BIT_OFFSET (f), DECL_FIELD_BIT_OFFSET (orig_field)) == 0 && tree_int_cst_compare (DECL_FIELD_OFFSET (f), DECL_FIELD_OFFSET (orig_field)) == 0) { TREE_OPERAND (expr, 1) = f; break; } } /* Fall through is an error; it will be detected in tree-sra. */ } break; default: return NULL_TREE; } if (t) { *expr_p = t; *changed_p = true; } return NULL_TREE; } /* Fold the statement pointed by STMT_P. In some cases, this function may replace the whole statement with a new one. Returns true iff folding makes any changes. */ bool fold_stmt (tree *stmt_p) { tree rhs, result, stmt; bool changed = false; stmt = *stmt_p; /* If we replaced constants and the statement makes pointer dereferences, then we may need to fold instances of *&VAR into VAR, etc. */ if (walk_tree (stmt_p, fold_stmt_r, &changed, NULL)) { *stmt_p = build_function_call_expr (implicit_built_in_decls[BUILT_IN_TRAP], NULL); return true; } rhs = get_rhs (stmt); if (!rhs) return changed; result = NULL_TREE; if (TREE_CODE (rhs) == CALL_EXPR) { tree callee; /* Check for builtins that CCP can handle using information not available in the generic fold routines. */ callee = get_callee_fndecl (rhs); if (callee && DECL_BUILT_IN (callee)) result = ccp_fold_builtin (stmt, rhs); else { /* Check for resolvable OBJ_TYPE_REF. The only sorts we can resolve here are when we've propagated the address of a decl into the object slot. */ /* ??? Should perhaps do this in fold proper. However, doing it there requires that we create a new CALL_EXPR, and that requires copying EH region info to the new node. Easier to just do it here where we can just smash the call operand. */ callee = TREE_OPERAND (rhs, 0); if (TREE_CODE (callee) == OBJ_TYPE_REF && lang_hooks.fold_obj_type_ref && TREE_CODE (OBJ_TYPE_REF_OBJECT (callee)) == ADDR_EXPR && DECL_P (TREE_OPERAND (OBJ_TYPE_REF_OBJECT (callee), 0))) { tree t; t = TREE_TYPE (TREE_OPERAND (OBJ_TYPE_REF_OBJECT (callee), 0)); t = lang_hooks.fold_obj_type_ref (callee, t); if (t) { TREE_OPERAND (rhs, 0) = t; changed = true; } } } } /* If we couldn't fold the RHS, hand over to the generic fold routines. */ if (result == NULL_TREE) result = fold (rhs); /* Strip away useless type conversions. Both the NON_LVALUE_EXPR that may have been added by fold, and "useless" type conversions that might now be apparent due to propagation. */ STRIP_USELESS_TYPE_CONVERSION (result); if (result != rhs) changed |= set_rhs (stmt_p, result); return changed; } /* Get the main expression from statement STMT. */ static tree get_rhs (tree stmt) { enum tree_code code = TREE_CODE (stmt); if (code == MODIFY_EXPR) return TREE_OPERAND (stmt, 1); if (code == COND_EXPR) return COND_EXPR_COND (stmt); else if (code == SWITCH_EXPR) return SWITCH_COND (stmt); else if (code == RETURN_EXPR) { if (!TREE_OPERAND (stmt, 0)) return NULL_TREE; if (TREE_CODE (TREE_OPERAND (stmt, 0)) == MODIFY_EXPR) return TREE_OPERAND (TREE_OPERAND (stmt, 0), 1); else return TREE_OPERAND (stmt, 0); } else if (code == GOTO_EXPR) return GOTO_DESTINATION (stmt); else if (code == LABEL_EXPR) return LABEL_EXPR_LABEL (stmt); else return stmt; } /* Set the main expression of *STMT_P to EXPR. */ static bool set_rhs (tree *stmt_p, tree expr) { tree stmt = *stmt_p; enum tree_code code = TREE_CODE (expr); /* Verify the constant folded result is valid gimple. */ if (TREE_CODE_CLASS (code) == '2') { if (!is_gimple_val (TREE_OPERAND (expr, 0)) || !is_gimple_val (TREE_OPERAND (expr, 1))) return false; } else if (TREE_CODE_CLASS (code) == '1') { if (!is_gimple_val (TREE_OPERAND (expr, 0))) return false; } code = TREE_CODE (stmt); if (code == MODIFY_EXPR) TREE_OPERAND (stmt, 1) = expr; else if (code == COND_EXPR) COND_EXPR_COND (stmt) = expr; else if (code == SWITCH_EXPR) SWITCH_COND (stmt) = expr; else if (code == RETURN_EXPR) { if (TREE_OPERAND (stmt, 0) && TREE_CODE (TREE_OPERAND (stmt, 0)) == MODIFY_EXPR) TREE_OPERAND (TREE_OPERAND (stmt, 0), 1) = expr; else TREE_OPERAND (stmt, 0) = expr; } else if (code == GOTO_EXPR) GOTO_DESTINATION (stmt) = expr; else if (code == LABEL_EXPR) LABEL_EXPR_LABEL (stmt) = expr; else { /* Replace the whole statement with EXPR. If EXPR has no side effects, then replace *STMT_P with an empty statement. */ stmt_ann_t ann = stmt_ann (stmt); *stmt_p = TREE_SIDE_EFFECTS (expr) ? expr : build_empty_stmt (); (*stmt_p)->common.ann = (tree_ann_t) ann; if (TREE_SIDE_EFFECTS (expr)) { def_optype defs; v_may_def_optype v_may_defs; v_must_def_optype v_must_defs; size_t i; /* Fix all the SSA_NAMEs created by *STMT_P to point to its new replacement. */ defs = DEF_OPS (ann); for (i = 0; i < NUM_DEFS (defs); i++) { tree var = DEF_OP (defs, i); if (TREE_CODE (var) == SSA_NAME) SSA_NAME_DEF_STMT (var) = *stmt_p; } v_may_defs = V_MAY_DEF_OPS (ann); for (i = 0; i < NUM_V_MAY_DEFS (v_may_defs); i++) { tree var = V_MAY_DEF_RESULT (v_may_defs, i); if (TREE_CODE (var) == SSA_NAME) SSA_NAME_DEF_STMT (var) = *stmt_p; } v_must_defs = V_MUST_DEF_OPS (ann); for (i = 0; i < NUM_V_MUST_DEFS (v_must_defs); i++) { tree var = V_MUST_DEF_OP (v_must_defs, i); if (TREE_CODE (var) == SSA_NAME) SSA_NAME_DEF_STMT (var) = *stmt_p; } } } return true; } /* Return a default value for variable VAR using the following rules: 1- Global and static variables are considered VARYING, unless they are declared const. 2- Function arguments are considered VARYING. 3- Any other value is considered UNDEFINED. This is useful when considering PHI nodes. PHI arguments that are undefined do not change the constant value of the PHI node, which allows for more constants to be propagated. */ static value get_default_value (tree var) { value val; tree sym; if (TREE_CODE (var) == SSA_NAME) sym = SSA_NAME_VAR (var); else { #ifdef ENABLE_CHECKING if (!DECL_P (var)) abort (); #endif sym = var; } val.lattice_val = UNDEFINED; val.const_val = NULL_TREE; if (TREE_CODE (sym) == PARM_DECL || TREE_THIS_VOLATILE (sym)) { /* Function arguments and volatile variables are considered VARYING. */ val.lattice_val = VARYING; } else if (decl_function_context (sym) != current_function_decl || TREE_STATIC (sym)) { /* Globals and static variables are considered VARYING, unless they are declared 'const'. */ val.lattice_val = VARYING; if (TREE_READONLY (sym) && DECL_INITIAL (sym) && is_gimple_min_invariant (DECL_INITIAL (sym))) { val.lattice_val = CONSTANT; val.const_val = DECL_INITIAL (sym); } } else { enum tree_code code; tree stmt = SSA_NAME_DEF_STMT (var); if (!IS_EMPTY_STMT (stmt)) { code = TREE_CODE (stmt); if (code != MODIFY_EXPR && code != PHI_NODE) val.lattice_val = VARYING; } } return val; } /* Fold builtin call FN in statement STMT. If it cannot be folded into a constant, return NULL_TREE. Otherwise, return its constant value. */ static tree ccp_fold_builtin (tree stmt, tree fn) { tree result, strlen_val[2]; tree arglist = TREE_OPERAND (fn, 1), a; tree callee = get_callee_fndecl (fn); bitmap visited; int strlen_arg, i; /* Ignore MD builtins. */ if (DECL_BUILT_IN_CLASS (callee) == BUILT_IN_MD) return NULL_TREE; /* First try the generic builtin folder. If that succeeds, return the result directly. */ result = fold_builtin (fn); if (result) return result; /* If the builtin could not be folded, and it has no argument list, we're done. */ if (!arglist) return NULL_TREE; /* Limit the work only for builtins we know how to simplify. */ switch (DECL_FUNCTION_CODE (callee)) { case BUILT_IN_STRLEN: case BUILT_IN_FPUTS: case BUILT_IN_FPUTS_UNLOCKED: strlen_arg = 1; break; case BUILT_IN_STRCPY: case BUILT_IN_STRNCPY: strlen_arg = 2; break; default: return NULL_TREE; } /* Try to use the dataflow information gathered by the CCP process. */ visited = BITMAP_XMALLOC (); memset (strlen_val, 0, sizeof (strlen_val)); for (i = 0, a = arglist; strlen_arg; i++, strlen_arg >>= 1, a = TREE_CHAIN (a)) if (strlen_arg & 1) { bitmap_clear (visited); if (!get_strlen (TREE_VALUE (a), &strlen_val[i], visited)) strlen_val[i] = NULL_TREE; } BITMAP_XFREE (visited); /* FIXME. All this code looks dangerous in the sense that it might create non-gimple expressions. */ switch (DECL_FUNCTION_CODE (callee)) { case BUILT_IN_STRLEN: /* Convert from the internal "sizetype" type to "size_t". */ if (strlen_val[0] && size_type_node) { tree new = convert (size_type_node, strlen_val[0]); /* If the result is not a valid gimple value, or not a cast of a valid gimple value, then we can not use the result. */ if (is_gimple_val (new) || (is_gimple_cast (new) && is_gimple_val (TREE_OPERAND (new, 0)))) return new; else return NULL_TREE; } return strlen_val[0]; case BUILT_IN_STRCPY: if (strlen_val[1] && is_gimple_val (strlen_val[1])) return simplify_builtin_strcpy (arglist, strlen_val[1]); case BUILT_IN_STRNCPY: if (strlen_val[1] && is_gimple_val (strlen_val[1])) return simplify_builtin_strncpy (arglist, strlen_val[1]); case BUILT_IN_FPUTS: return simplify_builtin_fputs (arglist, TREE_CODE (stmt) != MODIFY_EXPR, 0, strlen_val[0]); case BUILT_IN_FPUTS_UNLOCKED: return simplify_builtin_fputs (arglist, TREE_CODE (stmt) != MODIFY_EXPR, 1, strlen_val[0]); default: abort (); } return NULL_TREE; } /* Return the string length of ARG in LENGTH. If ARG is an SSA name variable, follow its use-def chains. If LENGTH is not NULL and its value is not equal to the length we determine, or if we are unable to determine the length, return false. VISITED is a bitmap of visited variables. */ static bool get_strlen (tree arg, tree *length, bitmap visited) { tree var, def_stmt, val; if (TREE_CODE (arg) != SSA_NAME) { val = c_strlen (arg, 1); if (!val) return false; if (*length && simple_cst_equal (val, *length) != 1) return false; *length = val; return true; } /* If we were already here, break the infinite cycle. */ if (bitmap_bit_p (visited, SSA_NAME_VERSION (arg))) return true; bitmap_set_bit (visited, SSA_NAME_VERSION (arg)); var = arg; def_stmt = SSA_NAME_DEF_STMT (var); switch (TREE_CODE (def_stmt)) { case MODIFY_EXPR: { tree len, rhs; /* The RHS of the statement defining VAR must either have a constant length or come from another SSA_NAME with a constant length. */ rhs = TREE_OPERAND (def_stmt, 1); STRIP_NOPS (rhs); if (TREE_CODE (rhs) == SSA_NAME) return get_strlen (rhs, length, visited); /* See if the RHS is a constant length. */ len = c_strlen (rhs, 1); if (len) { if (*length && simple_cst_equal (len, *length) != 1) return false; *length = len; return true; } break; } case PHI_NODE: { /* All the arguments of the PHI node must have the same constant length. */ int i; for (i = 0; i < PHI_NUM_ARGS (def_stmt); i++) { tree arg = PHI_ARG_DEF (def_stmt, i); /* If this PHI has itself as an argument, we cannot determine the string length of this argument. However, if we can find a constant string length for the other PHI args then we can still be sure that this is a constant string length. So be optimistic and just continue with the next argument. */ if (arg == PHI_RESULT (def_stmt)) continue; if (!get_strlen (arg, length, visited)) return false; } return true; } default: break; } return false; } /* A simple pass that attempts to fold all builtin functions. This pass is run after we've propagated as many constants as we can. */ static void execute_fold_all_builtins (void) { basic_block bb; FOR_EACH_BB (bb) { block_stmt_iterator i; for (i = bsi_start (bb); !bsi_end_p (i); bsi_next (&i)) { tree *stmtp = bsi_stmt_ptr (i); tree call = get_rhs (*stmtp); tree callee, result; if (!call || TREE_CODE (call) != CALL_EXPR) continue; callee = get_callee_fndecl (call); if (!callee || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL) continue; result = ccp_fold_builtin (*stmtp, call); if (!result) switch (DECL_FUNCTION_CODE (callee)) { case BUILT_IN_CONSTANT_P: /* Resolve __builtin_constant_p. If it hasn't been folded to integer_one_node by now, it's fairly certain that the value simply isn't constant. */ result = integer_zero_node; break; default: continue; } if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Simplified\n "); print_generic_stmt (dump_file, *stmtp, dump_flags); } if (set_rhs (stmtp, result)) modify_stmt (*stmtp); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "to\n "); print_generic_stmt (dump_file, *stmtp, dump_flags); fprintf (dump_file, "\n"); } } } } struct tree_opt_pass pass_fold_builtins = { "fab", /* name */ NULL, /* gate */ execute_fold_all_builtins, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ 0, /* tv_id */ PROP_cfg | PROP_ssa, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func | TODO_verify_ssa /* todo_flags_finish */ }; /* Type information for tree-ssa-ccp.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_tree_ssa_ccp_h[] = { { &varying_ssa_edges, 1, sizeof (varying_ssa_edges), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, { &ssa_edges, 1, sizeof (ssa_edges), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, { &cfg_blocks, 1, sizeof (cfg_blocks), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, LAST_GGC_ROOT_TAB }; /* Value Numbering routines for tree expressions. Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Daniel Berlin , Steven Bosscher and Diego Novillo This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* The value table that maps expressions to values. */ static htab_t value_table; /* Map expressions to values. These are simple pairs of expressions and the values they represent. To find the value represented by an expression, we use a hash table where the elements are {e,v} pairs, and the expression is the key. */ typedef struct val_expr_pair_d { /* Value handle. */ tree v; /* Associated expression. */ tree e; /* Virtual uses in E. */ vuse_optype vuses; /* E's hash value. */ hashval_t hashcode; } *val_expr_pair_t; static void set_value_handle (tree e, tree v); /* Create and return a new value handle node of type TYPE. */ static tree make_value_handle (tree type) { static unsigned int id = 0; tree vh; vh = build0 (VALUE_HANDLE, type); VALUE_HANDLE_ID (vh) = id++; return vh; } /* Given an expression EXPR, compute a hash value number using the code of the expression, its real operands and virtual operands (if any). VAL can be used to iterate by passing previous value numbers (it is used by iterative_hash_expr). VUSES is the set of virtual use operands associated with EXPR. It may be NULL if EXPR has no virtual operands. */ hashval_t vn_compute (tree expr, hashval_t val, vuse_optype vuses) { size_t i; #if defined ENABLE_CHECKING /* EXPR must not be a statement. We are only interested in value numbering expressions on the RHS of assignments. */ if (expr == NULL_TREE || (expr->common.ann && expr->common.ann->common.type == STMT_ANN)) abort (); #endif val = iterative_hash_expr (expr, val); /* If the expression has virtual uses, incorporate them into the hash value computed for EXPR. */ for (i = 0; i < NUM_VUSES (vuses); i++) val = iterative_hash_expr (VUSE_OP (vuses, i), val); return val; } /* Compare two expressions E1 and E2 and return true if they are equal. */ bool expressions_equal_p (tree e1, tree e2) { tree te1, te2; if (e1 == e2) return true; te1 = TREE_TYPE (e1); te2 = TREE_TYPE (e2); if (TREE_CODE (e1) == TREE_CODE (e2) && (te1 == te2 || lang_hooks.types_compatible_p (te1, te2)) && operand_equal_p (e1, e2, OEP_PURE_SAME)) return true; return false; } /* Hash a {v,e} pair that is pointed to by P. The hashcode is cached in the val_expr_pair, so we just return that. */ static hashval_t val_expr_pair_hash (const void *p) { const val_expr_pair_t ve = (val_expr_pair_t) p; return ve->hashcode; } /* Given two val_expr_pair_t's, return true if they represent the same expression, false otherwise. P1 and P2 should point to the val_expr_pair_t's to be compared. */ static int val_expr_pair_expr_eq (const void *p1, const void *p2) { const val_expr_pair_t ve1 = (val_expr_pair_t) p1; const val_expr_pair_t ve2 = (val_expr_pair_t) p2; if (expressions_equal_p (ve1->e, ve2->e)) return true; return false; } /* Set the value handle for expression E to value V */ static void set_value_handle (tree e, tree v) { if (TREE_CODE (e) == SSA_NAME) SSA_NAME_VALUE (e) = v; else if (EXPR_P (e) || DECL_P (e)) get_tree_ann (e)->common.value_handle = v; else if (is_gimple_min_invariant (e)) /* Do nothing. Constants are their own value handles. */ ; else abort (); } /* Insert EXPR into VALUE_TABLE with value VAL, and add expression EXPR to the value set for value VAL. VUSES represent the virtual use operands associated with EXPR (if any). They are used when computing the hash value for EXPR. */ void vn_add (tree expr, tree val, vuse_optype vuses) { void **slot; val_expr_pair_t new_pair; new_pair = xmalloc (sizeof (struct val_expr_pair_d)); new_pair->e = expr; new_pair->v = val; new_pair->vuses = vuses; new_pair->hashcode = vn_compute (expr, 0, vuses); slot = htab_find_slot_with_hash (value_table, new_pair, new_pair->hashcode, INSERT); if (*slot) free (*slot); *slot = (void *) new_pair; set_value_handle (expr, val); add_to_value (val, expr); } /* Search in VALUE_TABLE for an existing instance of expression EXPR, and return its value, or NULL if none has been set. VUSES represent the virtual use operands associated with EXPR (if any). They are used when computing the hash value for EXPR. */ tree vn_lookup (tree expr, vuse_optype vuses) { void **slot; struct val_expr_pair_d vep = {NULL, NULL, NULL, 0}; /* Constants are their own value. */ if (is_gimple_min_invariant (expr)) return expr; vep.e = expr; vep.vuses = vuses; vep.hashcode = vn_compute (expr, 0, vuses); slot = htab_find_slot_with_hash (value_table, &vep, vep.hashcode, NO_INSERT); if (!slot) return NULL_TREE; else return ((val_expr_pair_t) *slot)->v; } /* Like vn_lookup, but creates a new value for expression EXPR, if EXPR doesn't already have a value. Return the existing/created value for EXPR. VUSES represent the virtual use operands associated with EXPR (if any). They are used when computing the hash value for EXPR. */ tree vn_lookup_or_add (tree expr, vuse_optype vuses) { tree v = vn_lookup (expr, vuses); if (v == NULL_TREE) { v = make_value_handle (TREE_TYPE (expr)); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Created value "); print_generic_expr (dump_file, v, dump_flags); fprintf (dump_file, " for "); print_generic_expr (dump_file, expr, dump_flags); fprintf (dump_file, "\n"); } vn_add (expr, v, vuses); } set_value_handle (expr, v); return v; } /* Get the value handle of EXPR. This is the only correct way to get the value handle for a "thing". If EXPR does not have a value handle associated, it returns NULL_TREE. */ tree get_value_handle (tree expr) { if (TREE_CODE (expr) == SSA_NAME) return SSA_NAME_VALUE (expr); else if (EXPR_P (expr) || DECL_P (expr)) { tree_ann_t ann = tree_ann (expr); return ((ann) ? ann->common.value_handle : NULL_TREE); } else if (is_gimple_min_invariant (expr)) return expr; abort (); } /* Initialize data structures used in value numbering. */ void vn_init (void) { value_table = htab_create (511, val_expr_pair_hash, val_expr_pair_expr_eq, free); } /* Delete data used for value numbering. */ void vn_delete (void) { htab_delete (value_table); value_table = NULL; } /* Dead code elimination pass for the GNU compiler. Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Ben Elliston and Andrew MacLeod Adapted to use control dependence by Steven Bosscher, SUSE Labs. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Dead code elimination. References: Building an Optimizing Compiler, Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9. Advanced Compiler Design and Implementation, Steven Muchnick, Morgan Kaufmann, 1997, Section 18.10. Dead-code elimination is the removal of statements which have no impact on the program's output. "Dead statements" have no impact on the program's output, while "necessary statements" may have impact on the output. The algorithm consists of three phases: 1. Marking as necessary all statements known to be necessary, e.g. most function calls, writing a value to memory, etc; 2. Propagating necessary statements, e.g., the statements giving values to operands in necessary statements; and 3. Removing dead statements. */ /* These RTL headers are needed for basic-block.h. */ static struct stmt_stats { int total; int total_phis; int removed; int removed_phis; } stats; static varray_type worklist; /* Vector indicating an SSA name has already been processed and marked as necessary. */ static sbitmap processed; /* Vector indicating that last_stmt if a basic block has already been marked as necessary. */ static sbitmap last_stmt_necessary; /* Before we can determine whether a control branch is dead, we need to compute which blocks are control dependent on which edges. We expect each block to be control dependent on very few edges so we use a bitmap for each block recording its edges. An array holds the bitmap. The Ith bit in the bitmap is set if that block is dependent on the Ith edge. */ bitmap *control_dependence_map; /* Execute CODE for each edge (given number EDGE_NUMBER within the CODE) for which the block with index N is control dependent. */ #define EXECUTE_IF_CONTROL_DEPENDENT(N, EDGE_NUMBER, CODE) \ EXECUTE_IF_SET_IN_BITMAP (control_dependence_map[N], 0, EDGE_NUMBER, CODE) /* Local function prototypes. */ static inline void set_control_dependence_map_bit (basic_block, int); static inline void clear_control_dependence_bitmap (basic_block); static void find_all_control_dependences (struct edge_list *); static void find_control_dependence (struct edge_list *, int); static inline basic_block find_pdom (basic_block); static inline void mark_stmt_necessary (tree, bool); static inline void mark_operand_necessary (tree); static bool need_to_preserve_store (tree); static void mark_stmt_if_obviously_necessary (tree, bool); static void find_obviously_necessary_stmts (struct edge_list *); static void mark_control_dependent_edges_necessary (basic_block, struct edge_list *); static void propagate_necessity (struct edge_list *); static void eliminate_unnecessary_stmts (void); static void remove_dead_phis (basic_block); static void remove_dead_stmt (block_stmt_iterator *, basic_block); static void print_stats (void); static void tree_dce_init (bool); static void tree_dce_done (bool); /* Indicate block BB is control dependent on an edge with index EDGE_INDEX. */ static inline void set_control_dependence_map_bit (basic_block bb, int edge_index) { if (bb == ENTRY_BLOCK_PTR) return; if (bb == EXIT_BLOCK_PTR) abort (); bitmap_set_bit (control_dependence_map[bb->index], edge_index); } /* Clear all control dependences for block BB. */ static inline void clear_control_dependence_bitmap (basic_block bb) { bitmap_clear (control_dependence_map[bb->index]); } /* Record all blocks' control dependences on all edges in the edge list EL, ala Morgan, Section 3.6. */ static void find_all_control_dependences (struct edge_list *el) { int i; for (i = 0; i < NUM_EDGES (el); ++i) find_control_dependence (el, i); } /* Determine all blocks' control dependences on the given edge with edge_list EL index EDGE_INDEX, ala Morgan, Section 3.6. */ static void find_control_dependence (struct edge_list *el, int edge_index) { basic_block current_block; basic_block ending_block; #ifdef ENABLE_CHECKING if (INDEX_EDGE_PRED_BB (el, edge_index) == EXIT_BLOCK_PTR) abort (); #endif if (INDEX_EDGE_PRED_BB (el, edge_index) == ENTRY_BLOCK_PTR) ending_block = ENTRY_BLOCK_PTR->next_bb; else ending_block = find_pdom (INDEX_EDGE_PRED_BB (el, edge_index)); for (current_block = INDEX_EDGE_SUCC_BB (el, edge_index); current_block != ending_block && current_block != EXIT_BLOCK_PTR; current_block = find_pdom (current_block)) { edge e = INDEX_EDGE (el, edge_index); /* For abnormal edges, we don't make current_block control dependent because instructions that throw are always necessary anyway. */ if (e->flags & EDGE_ABNORMAL) continue; set_control_dependence_map_bit (current_block, edge_index); } } /* Find the immediate postdominator PDOM of the specified basic block BLOCK. This function is necessary because some blocks have negative numbers. */ static inline basic_block find_pdom (basic_block block) { if (block == ENTRY_BLOCK_PTR) abort (); else if (block == EXIT_BLOCK_PTR) return EXIT_BLOCK_PTR; else { basic_block bb = get_immediate_dominator (CDI_POST_DOMINATORS, block); if (! bb) return EXIT_BLOCK_PTR; return bb; } } #define NECESSARY(stmt) stmt->common.asm_written_flag /* If STMT is not already marked necessary, mark it, and add it to the worklist if ADD_TO_WORKLIST is true. */ static inline void mark_stmt_necessary (tree stmt, bool add_to_worklist) { #ifdef ENABLE_CHECKING if (stmt == NULL || stmt == error_mark_node || (stmt && DECL_P (stmt))) abort (); #endif if (NECESSARY (stmt)) return; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Marking useful stmt: "); print_generic_stmt (dump_file, stmt, TDF_SLIM); fprintf (dump_file, "\n"); } NECESSARY (stmt) = 1; if (add_to_worklist) VARRAY_PUSH_TREE (worklist, stmt); } /* Mark the statement defining operand OP as necessary. */ static inline void mark_operand_necessary (tree op) { tree stmt; int ver; #ifdef ENABLE_CHECKING if (op == NULL) abort (); #endif ver = SSA_NAME_VERSION (op); if (TEST_BIT (processed, ver)) return; SET_BIT (processed, ver); stmt = SSA_NAME_DEF_STMT (op); #ifdef ENABLE_CHECKING if (stmt == NULL) abort (); #endif if (NECESSARY (stmt) || IS_EMPTY_STMT (stmt)) return; NECESSARY (stmt) = 1; VARRAY_PUSH_TREE (worklist, stmt); } /* Return true if a store to a variable needs to be preserved. */ static inline bool need_to_preserve_store (tree ssa_name) { return (needs_to_live_in_memory (SSA_NAME_VAR (ssa_name))); } /* Mark STMT as necessary if it is obviously is. Add it to the worklist if it can make other statements necessary. If AGGRESSIVE is false, control statements are conservatively marked as necessary. */ static void mark_stmt_if_obviously_necessary (tree stmt, bool aggressive) { def_optype defs; v_may_def_optype v_may_defs; v_must_def_optype v_must_defs; stmt_ann_t ann; size_t i; /* Statements that are implicitly live. Most function calls, asm and return statements are required. Labels and BIND_EXPR nodes are kept because they are control flow, and we have no way of knowing whether they can be removed. DCE can eliminate all the other statements in a block, and CFG can then remove the block and labels. */ switch (TREE_CODE (stmt)) { case BIND_EXPR: case LABEL_EXPR: case CASE_LABEL_EXPR: mark_stmt_necessary (stmt, false); return; case ASM_EXPR: case RESX_EXPR: case RETURN_EXPR: mark_stmt_necessary (stmt, true); return; case CALL_EXPR: /* Most, but not all function calls are required. Function calls that produce no result and have no side effects (i.e. const pure functions) are unnecessary. */ if (TREE_SIDE_EFFECTS (stmt)) mark_stmt_necessary (stmt, true); return; case MODIFY_EXPR: if (TREE_CODE (TREE_OPERAND (stmt, 1)) == CALL_EXPR && TREE_SIDE_EFFECTS (TREE_OPERAND (stmt, 1))) { mark_stmt_necessary (stmt, true); return; } /* These values are mildly magic bits of the EH runtime. We can't see the entire lifetime of these values until landing pads are generated. */ if (TREE_CODE (TREE_OPERAND (stmt, 0)) == EXC_PTR_EXPR || TREE_CODE (TREE_OPERAND (stmt, 0)) == FILTER_EXPR) { mark_stmt_necessary (stmt, true); return; } break; case GOTO_EXPR: if (! simple_goto_p (stmt)) mark_stmt_necessary (stmt, true); return; case COND_EXPR: if (GOTO_DESTINATION (COND_EXPR_THEN (stmt)) == GOTO_DESTINATION (COND_EXPR_ELSE (stmt))) { /* A COND_EXPR is obviously dead if the target labels are the same. We cannot kill the statement at this point, so to prevent the statement from being marked necessary, we replace the condition with a constant. The stmt is killed later on in cfg_cleanup. */ COND_EXPR_COND (stmt) = integer_zero_node; modify_stmt (stmt); return; } /* Fall through. */ case SWITCH_EXPR: if (! aggressive) mark_stmt_necessary (stmt, true); break; default: break; } ann = stmt_ann (stmt); /* If the statement has volatile operands, it needs to be preserved. Same for statements that can alter control flow in unpredictable ways. */ if (ann->has_volatile_ops || is_ctrl_altering_stmt (stmt)) { mark_stmt_necessary (stmt, true); return; } get_stmt_operands (stmt); defs = DEF_OPS (ann); for (i = 0; i < NUM_DEFS (defs); i++) { tree def = DEF_OP (defs, i); if (need_to_preserve_store (def)) { mark_stmt_necessary (stmt, true); return; } } v_may_defs = V_MAY_DEF_OPS (ann); for (i = 0; i < NUM_V_MAY_DEFS (v_may_defs); i++) { tree v_may_def = V_MAY_DEF_RESULT (v_may_defs, i); if (need_to_preserve_store (v_may_def)) { mark_stmt_necessary (stmt, true); return; } } v_must_defs = V_MUST_DEF_OPS (ann); for (i = 0; i < NUM_V_MUST_DEFS (v_must_defs); i++) { tree v_must_def = V_MUST_DEF_OP (v_must_defs, i); if (need_to_preserve_store (v_must_def)) { mark_stmt_necessary (stmt, true); return; } } return; } /* Find obviously necessary statements. These are things like most function calls, and stores to file level variables. If EL is NULL, control statements are conservatively marked as necessary. Otherwise it contains the list of edges used by control dependence analysis. */ static void find_obviously_necessary_stmts (struct edge_list *el) { basic_block bb; block_stmt_iterator i; edge e; FOR_EACH_BB (bb) { tree phi; /* Check any PHI nodes in the block. */ for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi)) { NECESSARY (phi) = 0; /* PHIs for virtual variables do not directly affect code generation and need not be considered inherently necessary regardless of the bits set in their decl. Thus, we only need to mark PHIs for real variables which need their result preserved as being inherently necessary. */ if (is_gimple_reg (PHI_RESULT (phi)) && need_to_preserve_store (PHI_RESULT (phi))) mark_stmt_necessary (phi, true); } /* Check all statements in the block. */ for (i = bsi_start (bb); ! bsi_end_p (i); bsi_next (&i)) { tree stmt = bsi_stmt (i); NECESSARY (stmt) = 0; mark_stmt_if_obviously_necessary (stmt, el != NULL); } /* Mark this basic block as `not visited'. A block will be marked visited when the edges that it is control dependent on have been marked. */ bb->flags &= ~BB_VISITED; } if (el) { /* Prevent the loops from being removed. We must keep the infinite loops, and we currently do not have a means to recognize the finite ones. */ FOR_EACH_BB (bb) { for (e = bb->succ; e; e = e->succ_next) if (e->flags & EDGE_DFS_BACK) mark_control_dependent_edges_necessary (e->dest, el); } } } /* Make corresponding control dependent edges necessary. We only have to do this once for each basic block, so we clear the bitmap after we're done. */ static void mark_control_dependent_edges_necessary (basic_block bb, struct edge_list *el) { int edge_number; #ifdef ENABLE_CHECKING if (bb == EXIT_BLOCK_PTR) abort (); #endif if (bb == ENTRY_BLOCK_PTR) return; EXECUTE_IF_CONTROL_DEPENDENT (bb->index, edge_number, { tree t; basic_block cd_bb = INDEX_EDGE_PRED_BB (el, edge_number); if (TEST_BIT (last_stmt_necessary, cd_bb->index)) continue; SET_BIT (last_stmt_necessary, cd_bb->index); t = last_stmt (cd_bb); if (t && is_ctrl_stmt (t)) mark_stmt_necessary (t, true); }); } /* Propagate necessity using the operands of necessary statements. Process the uses on each statement in the worklist, and add all feeding statements which contribute to the calculation of this value to the worklist. In conservative mode, EL is NULL. */ static void propagate_necessity (struct edge_list *el) { tree i; bool aggressive = (el ? true : false); if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "\nProcessing worklist:\n"); while (VARRAY_ACTIVE_SIZE (worklist) > 0) { /* Take `i' from worklist. */ i = VARRAY_TOP_TREE (worklist); VARRAY_POP (worklist); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "processing: "); print_generic_stmt (dump_file, i, TDF_SLIM); fprintf (dump_file, "\n"); } if (aggressive) { /* Mark the last statements of the basic blocks that the block containing `i' is control dependent on, but only if we haven't already done so. */ basic_block bb = bb_for_stmt (i); if (! (bb->flags & BB_VISITED)) { bb->flags |= BB_VISITED; mark_control_dependent_edges_necessary (bb, el); } } if (TREE_CODE (i) == PHI_NODE) { /* PHI nodes are somewhat special in that each PHI alternative has data and control dependencies. All the statements feeding the PHI node's arguments are always necessary. In aggressive mode, we also consider the control dependent edges leading to the predecessor block associated with each PHI alternative as necessary. */ int k; for (k = 0; k < PHI_NUM_ARGS (i); k++) { tree arg = PHI_ARG_DEF (i, k); if (TREE_CODE (arg) == SSA_NAME) mark_operand_necessary (arg); } if (aggressive) { for (k = 0; k < PHI_NUM_ARGS (i); k++) { basic_block arg_bb = PHI_ARG_EDGE (i, k)->src; if (! (arg_bb->flags & BB_VISITED)) { arg_bb->flags |= BB_VISITED; mark_control_dependent_edges_necessary (arg_bb, el); } } } } else { /* Propagate through the operands. Examine all the USE, VUSE and V_MAY_DEF operands in this statement. Mark all the statements which feed this statement's uses as necessary. */ vuse_optype vuses; v_may_def_optype v_may_defs; use_optype uses; stmt_ann_t ann; size_t k; get_stmt_operands (i); ann = stmt_ann (i); uses = USE_OPS (ann); for (k = 0; k < NUM_USES (uses); k++) mark_operand_necessary (USE_OP (uses, k)); vuses = VUSE_OPS (ann); for (k = 0; k < NUM_VUSES (vuses); k++) mark_operand_necessary (VUSE_OP (vuses, k)); /* The operands of V_MAY_DEF expressions are also needed as they represent potential definitions that may reach this statement (V_MAY_DEF operands allow us to follow def-def links). */ v_may_defs = V_MAY_DEF_OPS (ann); for (k = 0; k < NUM_V_MAY_DEFS (v_may_defs); k++) mark_operand_necessary (V_MAY_DEF_OP (v_may_defs, k)); } } } /* Eliminate unnecessary statements. Any instruction not marked as necessary contributes nothing to the program, and can be deleted. */ static void eliminate_unnecessary_stmts (void) { basic_block bb; block_stmt_iterator i; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "\nEliminating unnecessary statements:\n"); clear_special_calls (); FOR_EACH_BB (bb) { /* Remove dead PHI nodes. */ remove_dead_phis (bb); /* Remove dead statements. */ for (i = bsi_start (bb); ! bsi_end_p (i) ; ) { tree t = bsi_stmt (i); stats.total++; /* If `i' is not necessary then remove it. */ if (! NECESSARY (t)) remove_dead_stmt (&i, bb); else { if (TREE_CODE (t) == CALL_EXPR) notice_special_calls (t); else if (TREE_CODE (t) == MODIFY_EXPR && TREE_CODE (TREE_OPERAND (t, 1)) == CALL_EXPR) notice_special_calls (TREE_OPERAND (t, 1)); bsi_next (&i); } } } } /* Remove dead PHI nodes from block BB. */ static void remove_dead_phis (basic_block bb) { tree prev, phi; prev = NULL_TREE; phi = phi_nodes (bb); while (phi) { stats.total_phis++; if (! NECESSARY (phi)) { tree next = PHI_CHAIN (phi); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Deleting : "); print_generic_stmt (dump_file, phi, TDF_SLIM); fprintf (dump_file, "\n"); } remove_phi_node (phi, prev, bb); stats.removed_phis++; phi = next; } else { prev = phi; phi = PHI_CHAIN (phi); } } } /* Remove dead statement pointed by iterator I. Receives the basic block BB containing I so that we don't have to look it up. */ static void remove_dead_stmt (block_stmt_iterator *i, basic_block bb) { tree t = bsi_stmt (*i); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Deleting : "); print_generic_stmt (dump_file, t, TDF_SLIM); fprintf (dump_file, "\n"); } stats.removed++; /* If we have determined that a conditional branch statement contributes nothing to the program, then we not only remove it, but we also change the flow graph so that the current block will simply fall-thru to its immediate post-dominator. The blocks we are circumventing will be removed by cleaup_cfg if this change in the flow graph makes them unreachable. */ if (is_ctrl_stmt (t)) { basic_block post_dom_bb; edge e; #ifdef ENABLE_CHECKING /* The post dominance info has to be up-to-date. */ if (dom_computed[CDI_POST_DOMINATORS] != DOM_OK) abort (); #endif /* Get the immediate post dominator of bb. */ post_dom_bb = get_immediate_dominator (CDI_POST_DOMINATORS, bb); /* Some blocks don't have an immediate post dominator. This can happen for example with infinite loops. Removing an infinite loop is an inappropriate transformation anyway... */ if (! post_dom_bb) { bsi_next (i); return; } /* Redirect the first edge out of BB to reach POST_DOM_BB. */ redirect_edge_and_branch (bb->succ, post_dom_bb); PENDING_STMT (bb->succ) = NULL; /* The edge is no longer associated with a conditional, so it does not have TRUE/FALSE flags. */ bb->succ->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE); /* If the edge reaches any block other than the exit, then it is a fallthru edge; if it reaches the exit, then it is not a fallthru edge. */ if (post_dom_bb != EXIT_BLOCK_PTR) bb->succ->flags |= EDGE_FALLTHRU; else bb->succ->flags &= ~EDGE_FALLTHRU; /* Remove the remaining the outgoing edges. */ for (e = bb->succ->succ_next; e != NULL;) { edge tmp = e; e = e->succ_next; remove_edge (tmp); } } bsi_remove (i); } /* Print out removed statement statistics. */ static void print_stats (void) { if (dump_file && (dump_flags & (TDF_STATS|TDF_DETAILS))) { float percg; percg = ((float) stats.removed / (float) stats.total) * 100; fprintf (dump_file, "Removed %d of %d statements (%d%%)\n", stats.removed, stats.total, (int) percg); if (stats.total_phis == 0) percg = 0; else percg = ((float) stats.removed_phis / (float) stats.total_phis) * 100; fprintf (dump_file, "Removed %d of %d PHI nodes (%d%%)\n", stats.removed_phis, stats.total_phis, (int) percg); } } /* Initialization for this pass. Set up the used data structures. */ static void tree_dce_init (bool aggressive) { memset ((void *) &stats, 0, sizeof (stats)); if (aggressive) { int i; control_dependence_map = xmalloc (last_basic_block * sizeof (bitmap)); for (i = 0; i < last_basic_block; ++i) control_dependence_map[i] = BITMAP_XMALLOC (); last_stmt_necessary = sbitmap_alloc (last_basic_block); sbitmap_zero (last_stmt_necessary); } processed = sbitmap_alloc (num_ssa_names + 1); sbitmap_zero (processed); VARRAY_TREE_INIT (worklist, 64, "work list"); } /* Cleanup after this pass. */ static void tree_dce_done (bool aggressive) { if (aggressive) { int i; for (i = 0; i < last_basic_block; ++i) BITMAP_XFREE (control_dependence_map[i]); free (control_dependence_map); sbitmap_free (last_stmt_necessary); } sbitmap_free (processed); } /* Main routine to eliminate dead code. AGGRESSIVE controls the aggressiveness of the algorithm. In conservative mode, we ignore control dependence and simply declare all but the most trivially dead branches necessary. This mode is fast. In aggressive mode, control dependences are taken into account, which results in more dead code elimination, but at the cost of some time. FIXME: Aggressive mode before PRE doesn't work currently because the dominance info is not invalidated after DCE1. This is not an issue right now because we only run aggressive DCE as the last tree SSA pass, but keep this in mind when you start experimenting with pass ordering. */ static void perform_tree_ssa_dce (bool aggressive) { struct edge_list *el = NULL; tree_dce_init (aggressive); if (aggressive) { /* Compute control dependence. */ timevar_push (TV_CONTROL_DEPENDENCES); calculate_dominance_info (CDI_POST_DOMINATORS); el = create_edge_list (); find_all_control_dependences (el); timevar_pop (TV_CONTROL_DEPENDENCES); mark_dfs_back_edges (); } find_obviously_necessary_stmts (el); propagate_necessity (el); eliminate_unnecessary_stmts (); if (aggressive) free_dominance_info (CDI_POST_DOMINATORS); cleanup_tree_cfg (); /* Debugging dumps. */ if (dump_file) { dump_function_to_file (current_function_decl, dump_file, dump_flags); print_stats (); } tree_dce_done (aggressive); free_edge_list (el); } /* Pass entry points. */ static void tree_ssa_dce (void) { perform_tree_ssa_dce (/*aggressive=*/false); } static void tree_ssa_cd_dce (void) { perform_tree_ssa_dce (/*aggressive=*/optimize >= 2); } static bool gate_dce (void) { return flag_tree_dce != 0; } struct tree_opt_pass pass_dce = { "dce", /* name */ gate_dce, /* gate */ tree_ssa_dce, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ TV_TREE_DCE, /* tv_id */ PROP_cfg | PROP_ssa, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_ggc_collect | TODO_verify_ssa /* todo_flags_finish */ }; struct tree_opt_pass pass_cd_dce = { "cddce", /* name */ gate_dce, /* gate */ tree_ssa_cd_dce, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ TV_TREE_CD_DCE, /* tv_id */ PROP_cfg | PROP_ssa, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_ggc_collect | TODO_verify_ssa | TODO_verify_flow /* todo_flags_finish */ }; /* Const/copy propagation and SSA_NAME replacement support routines. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file provides a handful of interfaces for performing const/copy propagation and simple expression replacement which keep variable annotations up-to-date. We require that for any copy operation where the RHS and LHS have a non-null memory tag that the memory tag be the same. It is OK for one or both of the memory tags to be NULL. We also require tracking if a variable is dereferenced in a load or store operation. We enforce these requirements by having all copy propagation and replacements of one SSA_NAME with a different SSA_NAME to use the APIs defined in this file. */ /* Given two SSA_NAMEs, replace the annotations for the one referred to by OP with VAR's annotations. If OP is a pointer, copy the memory tag used originally by OP into VAR. This is needed in cases where VAR had never been dereferenced in the program. If FOR_PROPAGATION is true, then perform additional checks to ensure that const/copy propagation of var for OP is valid. */ static void replace_ssa_names_ann (tree op, tree var, bool for_propagation ATTRIBUTE_UNUSED) { #if defined ENABLE_CHECKING if (for_propagation && !may_propagate_copy (op, var)) abort (); #endif /* If VAR doesn't have a memory tag, copy the one from the original operand. Also copy the dereferenced flags. */ if (POINTER_TYPE_P (TREE_TYPE (op))) { var_ann_t new_ann = var_ann (SSA_NAME_VAR (var)); var_ann_t orig_ann = var_ann (SSA_NAME_VAR (op)); if (new_ann->type_mem_tag == NULL_TREE) new_ann->type_mem_tag = orig_ann->type_mem_tag; else if (orig_ann->type_mem_tag == NULL_TREE) orig_ann->type_mem_tag = new_ann->type_mem_tag; else if (new_ann->type_mem_tag != orig_ann->type_mem_tag) abort (); } } /* Common code for propagate_value and replace_exp. Replace use operand OP_P with VAL. FOR_PROPAGATION indicates if the replacement is done to propagate a value or not. */ static void replace_exp_1 (use_operand_p op_p, tree val, bool for_propagation) { if (TREE_CODE (val) == SSA_NAME) { if (TREE_CODE (USE_FROM_PTR (op_p)) == SSA_NAME) replace_ssa_names_ann (USE_FROM_PTR (op_p), val, for_propagation); SET_USE (op_p, val); } else SET_USE (op_p, lhd_unsave_expr_now (val)); } /* Propagate the value VAL (assumed to be a constant or another SSA_NAME) into the operand pointed by OP_P. Use this version for const/copy propagation as it will perform additional checks to ensure validity of the const/copy propagation. */ void propagate_value (use_operand_p op_p, tree val) { replace_exp_1 (op_p, val, true); } /* Propagate the value VAL (assumed to be a constant or another SSA_NAME) into the tree pointed by OP_P. Use this version for const/copy propagation when SSA operands are not available. It will perform the additional checks to ensure validity of the const/copy propagation, but will not update any operand information. Be sure to mark the stmt as modified. */ void propagate_tree_value (tree *op_p, tree val) { if (TREE_CODE (val) == SSA_NAME) { if (TREE_CODE (*op_p) == SSA_NAME) replace_ssa_names_ann (*op_p, val, true); *op_p = val; } else *op_p = lhd_unsave_expr_now (val); } /* Replace *OP_P with value VAL (assumed to be a constant or another SSA_NAME). Use this version when not const/copy propagating values. For example, PRE uses this version when building expressions as they would appear in specific blocks taking into account actions of PHI nodes. */ void replace_exp (use_operand_p op_p, tree val) { replace_exp_1 (op_p, val, false); } /* Language independent return value optimizations Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file implements return value optimizations for functions which return aggregate types. Basically this pass searches the function for return statements which return a local aggregate. When converted to RTL such statements will generate a copy from the local aggregate to final return value destination mandated by the target's ABI. That copy can often be avoided by directly constructing the return value into the final destination mandated by the target's ABI. This is basically a generic equivalent to the C++ front-end's Named Return Value optimization. */ struct nrv_data { /* This is the temporary (a VAR_DECL) which appears in all of this function's RETURN_EXPR statements. */ tree var; /* This is the function's RESULT_DECL. We will replace all occurrences of VAR with RESULT_DECL when we apply this optimization. */ tree result; }; static tree finalize_nrv_r (tree *, int *, void *); /* Callback for the tree walker. If TP refers to a RETURN_EXPR, then set the expression being returned to nrv_data->result. If TP refers to nrv_data->var, then replace nrv_data->var with nrv_data->result. If we reach a node where we know all the subtrees are uninteresting, then set *WALK_SUBTREES to zero. */ static tree finalize_nrv_r (tree *tp, int *walk_subtrees, void *data) { struct nrv_data *dp = (struct nrv_data *)data; /* No need to walk into types. */ if (TYPE_P (*tp)) *walk_subtrees = 0; /* If this is a RETURN_EXPR, set the expression being returned to RESULT. */ else if (TREE_CODE (*tp) == RETURN_EXPR) TREE_OPERAND (*tp, 0) = dp->result; /* Otherwise replace all occurrences of VAR with RESULT. */ else if (*tp == dp->var) *tp = dp->result; /* Keep iterating. */ return NULL_TREE; } /* Main entry point for return value optimizations. If this function always returns the same local variable, and that local variable is an aggregate type, then replace the variable with the function's DECL_RESULT. This is the equivalent of the C++ named return value optimization applied to optimized trees in a language independent form. If we ever encounter languages which prevent this kind of optimization, then we could either have the languages register the optimization or we could change the gating function to check the current language. */ static void tree_nrv (void) { tree result = DECL_RESULT (current_function_decl); tree result_type = TREE_TYPE (result); tree found = NULL; basic_block bb; struct nrv_data data; /* If this function does not return an aggregate type in memory, then there is nothing to do. */ if (!aggregate_value_p (result, current_function_decl)) return; /* Look through each block for suitable return expressions. RETURN_EXPRs end basic blocks, so we only have to look at the last statement in each block. That makes this very fast. */ FOR_EACH_BB (bb) { tree stmt = last_stmt (bb); if (stmt && TREE_CODE (stmt) == RETURN_EXPR) { tree ret_expr = TREE_OPERAND (stmt, 0); /* This probably should not happen, but just to be safe do not perform NRV optimizations if only some of the return statement return a value. */ if (!ret_expr || TREE_CODE (ret_expr) != MODIFY_EXPR || TREE_CODE (TREE_OPERAND (ret_expr, 0)) != RESULT_DECL) return; /* Now verify that this return statement uses the same value as any previously encountered return statement. */ if (found != NULL) { /* If we found a return statement using a different variable than previous return statements, then we can not perform NRV optimizations. */ if (found != TREE_OPERAND (ret_expr, 1)) return; } else found = TREE_OPERAND (ret_expr, 1); /* The returned value must be a local automatic variable of the same type and alignment as the function's result. */ if (TREE_CODE (found) != VAR_DECL || DECL_CONTEXT (found) != current_function_decl || TREE_STATIC (found) || TREE_ADDRESSABLE (found) || DECL_ALIGN (found) > DECL_ALIGN (result) || !lang_hooks.types_compatible_p (TREE_TYPE (found), result_type)) return; } } if (!found) return; /* If dumping details, then note once and only the NRV replacement. */ if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "NRV Replaced: "); print_generic_expr (dump_file, found, dump_flags); fprintf (dump_file, " with: "); print_generic_expr (dump_file, result, dump_flags); fprintf (dump_file, "\n"); } /* At this point we know that all the return statements return the same local which has suitable attributes for NRV. Copy debugging information from FOUND to RESULT. */ DECL_NAME (result) = DECL_NAME (found); DECL_SOURCE_LOCATION (result) = DECL_SOURCE_LOCATION (found); DECL_ABSTRACT_ORIGIN (result) = DECL_ABSTRACT_ORIGIN (found); TREE_ADDRESSABLE (result) = TREE_ADDRESSABLE (found); /* Now walk through the function changing all references to VAR to be RESULT. */ data.var = found; data.result = result; FOR_EACH_BB (bb) { block_stmt_iterator bsi; for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) walk_tree (bsi_stmt_ptr (bsi), finalize_nrv_r, &data, 0); } /* FOUND is no longer used. Ensure it gets removed. */ var_ann (found)->used = 0; } struct tree_opt_pass pass_nrv = { "nrv", /* name */ NULL, /* gate */ tree_nrv, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ TV_TREE_NRV, /* tv_id */ PROP_cfg, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func | TODO_ggc_collect /* todo_flags_finish */ }; /* Rename SSA copies. Copyright (C) 2004 Free Software Foundation, Inc. Contributed by Andrew MacLeod This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ extern void rename_ssa_copies (void); /* The following routines implement the SSA copy renaming phase. This optimization looks for copies between 2 SSA_NAMES, either through a direct copy, or an implicit one via a PHI node result and its arguments. Each copy is examined to determine if it is possible to rename the base variable of one of the operands to the same variable as the other operand. ie. T.3_5 = a_1 = T.3_5 If this copy couldn't be copy propagated, it could possibly remain in the program throughout the optimization phases. After SSA->normal, it would become: T.3 = a = T.3 Since T.3_5 is distinct from all other SSA versions of T.3, there is no fundamental reason why the base variable needs to be T.3, subject to certain restrictions. This optimization attempts to determine if we can change the base variable on copies like this, and result in code such as: a_5 = a_1 = a_5 This gives the SSA->normal pass a shot at coalescing a_1 and a_5. If it is possible, the copy goes away completely. If it isn't possible, a new temp will be created for a_5, and you will end up with the exact same code: a.8 = a = a.8 The other benefit of performing this optimization relates to what variables are chosen in copies. Gimplification of the program uses temporaries for a lot of things. expressions like a_1 = = a_1 get turned into T.3_5 = a_1 = T.3_5 = a_1 Copy propagation is done in a forward direction, and if we can propagate through the copy, we end up with: T.3_5 = = T.3_5 The copy is gone, but so is all reference to the user variable 'a'. By performing this optimization, we would see the sequence: a_5 = a_1 = a_5 = a_1 which copy propagation would then turn into: a_5 = = a_5 and so we still retain the user variable whenever possible. */ /* Coalesce the partitions in MAP representing VAR1 and VAR2 if it is valid. Choose a representative for the partition, and send debug info to DEBUG. */ static void copy_rename_partition_coalesce (var_map map, tree var1, tree var2, FILE *debug) { int p1, p2, p3; tree root1, root2; var_ann_t ann1, ann2, ann3; bool gimp1, gimp2; #ifdef ENABLE_CHECKING if (TREE_CODE (var1) != SSA_NAME || TREE_CODE (var2) != SSA_NAME) abort (); #endif register_ssa_partition (map, var1, false); register_ssa_partition (map, var2, true); p1 = partition_find (map->var_partition, SSA_NAME_VERSION (var1)); p2 = partition_find (map->var_partition, SSA_NAME_VERSION (var2)); if (debug) { fprintf (debug, "Try : "); print_generic_expr (debug, var1, TDF_SLIM); fprintf (debug, "(P%d) & ", p1); print_generic_expr (debug, var2, TDF_SLIM); fprintf (debug, "(P%d)", p2); } #ifdef ENABLE_CHECKING if (p1 == NO_PARTITION || p2 == NO_PARTITION) abort (); #endif root1 = SSA_NAME_VAR (partition_to_var (map, p1)); root2 = SSA_NAME_VAR (partition_to_var (map, p2)); if (DECL_HARD_REGISTER (root1) || DECL_HARD_REGISTER (root2)) { if (debug) { if (DECL_HARD_REGISTER (root1)) print_generic_expr (debug, var1, TDF_SLIM); else print_generic_expr (debug, var2, TDF_SLIM); fprintf (debug, " is a hardware register. No Coalescing.\n"); } return; } ann1 = var_ann (root1); ann2 = var_ann (root2); if (p1 == p2) { if (debug) fprintf (debug, " : Already coalesced.\n"); return; } /* Partitions already have the same root, simply merge them. */ if (root1 == root2) { p1 = partition_union (map->var_partition, p1, p2); if (debug) fprintf (debug, " : Same root, coalesced --> P%d.\n", p1); return; } /* Never attempt to coalesce 2 difference parameters. */ if (TREE_CODE (root1) == PARM_DECL && TREE_CODE (root2) == PARM_DECL) { if (debug) fprintf (debug, " : 2 different PARM_DECLS. No coalesce.\n"); return; } if ((TREE_CODE (root1) == RESULT_DECL) != (TREE_CODE (root2) == RESULT_DECL)) { if (debug) fprintf (debug, " : One root a RESULT_DECL. No coalesce.\n"); return; } gimp1 = is_gimple_tmp_var (root1); gimp2 = is_gimple_tmp_var (root2); /* Never attempt to coalesce 2 user variables unless one is an inline variable. */ if (!gimp1 && !gimp2) { if (DECL_FROM_INLINE (root2)) gimp2 = true; else if (DECL_FROM_INLINE (root1)) gimp1 = true; else { if (debug) fprintf (debug, " : 2 different USER vars. No coalesce.\n"); return; } } /* Don't coalesce if there are two different memory tags. */ if (ann1->type_mem_tag && ann2->type_mem_tag && ann1->type_mem_tag != ann2->type_mem_tag) { if (debug) fprintf (debug, " : 2 memory tags. No coalesce.\n"); return; } /* If both values have default defs, we can't coalesce. If only one has a tag, make sure that variable is the new root partition. */ if (default_def (root1)) { if (default_def (root2)) { if (debug) fprintf (debug, " : 2 default defs. No coalesce.\n"); return; } else { gimp2 = true; gimp1 = false; } } else if (default_def (root2)) { gimp1 = true; gimp2 = false; } /* Merge the two partitions. */ p3 = partition_union (map->var_partition, p1, p2); /* Set the root variable of the partition to the better choice, if there is one. */ if (!gimp2) SSA_NAME_VAR (partition_to_var (map, p3)) = root2; else if (!gimp1) SSA_NAME_VAR (partition_to_var (map, p3)) = root1; /* Update the various flag widgitry of the current base representative. */ ann3 = var_ann (SSA_NAME_VAR (partition_to_var (map, p3))); if (ann1->type_mem_tag) ann3->type_mem_tag = ann1->type_mem_tag; else ann3->type_mem_tag = ann2->type_mem_tag; if (debug) { fprintf (debug, " --> P%d ", p3); print_generic_expr (debug, SSA_NAME_VAR (partition_to_var (map, p3)), TDF_SLIM); fprintf (debug, "\n"); } } /* This function will make a pass through the IL, and attempt to coalesce any SSA versions which occur in PHI's or copies. Coalescing is accomplished by changing the underlying root variable of all coalesced version. This will then cause the SSA->normal pass to attempt to coalesce them all to the same variable. */ void rename_ssa_copies (void) { var_map map; basic_block bb; block_stmt_iterator bsi; tree phi, stmt, var, part_var; unsigned x; FILE *debug; if (dump_file && (dump_flags & TDF_DETAILS)) debug = dump_file; else debug = NULL; map = init_var_map (num_ssa_names + 1); FOR_EACH_BB (bb) { /* Scan for real copies. */ for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) { stmt = bsi_stmt (bsi); if (TREE_CODE (stmt) == MODIFY_EXPR) { tree lhs = TREE_OPERAND (stmt, 0); tree rhs = TREE_OPERAND (stmt, 1); if (TREE_CODE (lhs) == SSA_NAME && !has_hidden_use (SSA_NAME_VAR (lhs)) && TREE_CODE (rhs) == SSA_NAME) copy_rename_partition_coalesce (map, lhs, rhs, debug); } } } FOR_EACH_BB (bb) { /* Treat PHI nodes as copies between the result and each argument. */ for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi)) { int i; tree res = PHI_RESULT (phi); /* Do not process virtual SSA_NAMES or variables which have hidden uses. */ if (!is_gimple_reg (SSA_NAME_VAR (res)) || has_hidden_use (SSA_NAME_VAR (res))) continue; for (i = 0; i < PHI_NUM_ARGS (phi); i++) { tree arg = PHI_ARG_DEF (phi, i); if (TREE_CODE (arg) == SSA_NAME) copy_rename_partition_coalesce (map, res, arg, debug); } } } if (debug) dump_var_map (debug, map); /* Now one more pass to make all elements of a partition share the same root variable. */ for (x = 1; x <= num_ssa_names; x++) { part_var = partition_to_var (map, x); if (!part_var) continue; var = map->partition_to_var[x]; if (debug) { if (SSA_NAME_VAR (var) != SSA_NAME_VAR (part_var)) { fprintf (debug, "Coalesced "); print_generic_expr (debug, var, TDF_SLIM); fprintf (debug, " to "); print_generic_expr (debug, part_var, TDF_SLIM); fprintf (debug, "\n"); } } SSA_NAME_VAR (var) = SSA_NAME_VAR (part_var); } delete_var_map (map); } /* Return true if copy rename is to be performed. */ static bool gate_copyrename (void) { return flag_tree_copyrename != 0; } struct tree_opt_pass pass_rename_ssa_copies = { "copyrename", /* name */ gate_copyrename, /* gate */ rename_ssa_copies, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ TV_TREE_COPY_RENAME, /* tv_id */ PROP_cfg | PROP_ssa, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func | TODO_verify_ssa /* todo_flags_finish */ }; /* SSA-PRE for trees. Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Daniel Berlin and Steven Bosscher This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Functions to support a pool of allocatable objects Copyright (C) 1997, 1998, 1999, 2000, 2001, 2003, 2004 Free Software Foundation, Inc. Contributed by Daniel Berlin This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef ALLOC_POOL_H #define ALLOC_POOL_H typedef unsigned long ALLOC_POOL_ID_TYPE; typedef struct alloc_pool_list_def { struct alloc_pool_list_def *next; } *alloc_pool_list; typedef struct alloc_pool_def { const char *name; #ifdef ENABLE_CHECKING ALLOC_POOL_ID_TYPE id; #endif size_t elts_per_block; alloc_pool_list free_list; size_t elts_allocated; size_t elts_free; size_t blocks_allocated; alloc_pool_list block_list; size_t block_size; size_t elt_size; } *alloc_pool; extern alloc_pool create_alloc_pool (const char *, size_t, size_t); extern void free_alloc_pool (alloc_pool); extern void *pool_alloc (alloc_pool); extern void pool_free (alloc_pool, void *); extern void dump_alloc_pool_statistics (void); #endif /* TODO: 1. Avail sets can be shared by making an avail_find_leader that walks up the dominator tree and looks in those avail sets. This might affect code optimality, it's unclear right now. 2. Load motion can be performed by value numbering the loads the same as we do other expressions. This requires iterative hashing the vuses into the values. Right now we simply assign a new value every time we see a statement with a vuse. 3. Strength reduction can be performed by anticipating expressions we can repair later on. 4. Our canonicalization of expressions during lookups don't take constants into account very well. In particular, we don't fold anywhere, so we can get situations where we stupidly think something is a new value (a + 1 + 1 vs a + 2). This is somewhat expensive to fix, but it does expose a lot more eliminations. It may or not be worth it, depending on how critical you consider PRE vs just plain GRE. */ /* For ease of terminology, "expression node" in the below refers to every expression node but MODIFY_EXPR, because MODIFY_EXPR's represent the actual statement containing the expressions we care about, and we cache the value number by putting it in the expression. */ /* Basic algorithm First we walk the statements to generate the AVAIL sets, the EXP_GEN sets, and the tmp_gen sets. EXP_GEN sets represent the generation of values/expressions by a given block. We use them when computing the ANTIC sets. The AVAIL sets consist of SSA_NAME's that represent values, so we know what values are available in what blocks. AVAIL is a forward dataflow problem. In SSA, values are never killed, so we don't need a kill set, or a fixpoint iteration, in order to calculate the AVAIL sets. In traditional parlance, AVAIL sets tell us the downsafety of the expressions/values. Next, we generate the ANTIC sets. These sets represent the anticipatable expressions. ANTIC is a backwards dataflow problem.An expression is anticipatable in a given block if it could be generated in that block. This means that if we had to perform an insertion in that block, of the value of that expression, we could. Calculating the ANTIC sets requires phi translation of expressions, because the flow goes backwards through phis. We must iterate to a fixpoint of the ANTIC sets, because we have a kill set. Even in SSA form, values are not live over the entire function, only from their definition point onwards. So we have to remove values from the ANTIC set once we go past the definition point of the leaders that make them up. compute_antic/compute_antic_aux performs this computation. Third, we perform insertions to make partially redundant expressions fully redundant. An expression is partially redundant (excluding partial anticipation) if: 1. It is AVAIL in some, but not all, of the predecessors of a given block. 2. It is ANTIC in all the predecessors. In order to make it fully redundant, we insert the expression into the predecessors where it is not available, but is ANTIC. insert/insert_aux performs this insertion. Fourth, we eliminate fully redundant expressions. This is a simple statement walk that replaces redundant calculations with the now available values. */ /* Representations of value numbers: Value numbers are represented using the "value handle" approach. This means that each SSA_NAME (and for other reasons to be disclosed in a moment, expression nodes) has a value handle that can be retrieved through get_value_handle. This value handle, *is* the value number of the SSA_NAME. You can pointer compare the value handles for equivalence purposes. For debugging reasons, the value handle is internally more than just a number, it is a VAR_DECL named "value.x", where x is a unique number for each value number in use. This allows expressions with SSA_NAMES replaced by value handles to still be pretty printed in a sane way. They simply print as "value.3 * value.5", etc. Expression nodes have value handles associated with them as a cache. Otherwise, we'd have to look them up again in the hash table This makes significant difference (factor of two or more) on some test cases. They can be thrown away after the pass is finished. */ /* Representation of expressions on value numbers: In some portions of this code, you will notice we allocate "fake" analogues to the expression we are value numbering, and replace the operands with the values of the expression. Since we work on values, and not just names, we canonicalize expressions to value expressions for use in the ANTIC sets, the EXP_GEN set, etc. This is theoretically unnecessary, it just saves a bunch of repeated get_value_handle and find_leader calls in the remainder of the code, trading off temporary memory usage for speed. The tree nodes aren't actually creating more garbage, since they are allocated in a special pools which are thrown away at the end of this pass. All of this also means that if you print the EXP_GEN or ANTIC sets, you will see "value.5 + value.7" in the set, instead of "a_55 + b_66" or something. The only thing that actually cares about seeing the value leaders is phi translation, and it needs to be able to find the leader for a value in an arbitrary block, so this "value expression" form is perfect for it (otherwise you'd do get_value_handle->find_leader->translate->get_value_handle->find_leader).*/ /* Representation of sets: There are currently two types of sets used, hopefully to be unified soon. The AVAIL sets do not need to be sorted in any particular order, and thus, are simply represented as two bitmaps, one that keeps track of values present in the set, and one that keeps track of expressions present in the set. The other sets are represented as doubly linked lists kept in topological order, with an optional supporting bitmap of values present in the set. The sets represent values, and the elements can be values or expressions. The elements can appear in different sets, but each element can only appear once in each set. Since each node in the set represents a value, we also want to be able to map expression, set pairs to something that tells us whether the value is present is a set. We use a per-set bitmap for that. The value handles also point to a linked list of the expressions they represent via a tree annotation. This is mainly useful only for debugging, since we don't do identity lookups. */ /* A value set element. Basically a single linked list of expressions/values. */ typedef struct value_set_node { /* An expression. */ tree expr; /* A pointer to the next element of the value set. */ struct value_set_node *next; } *value_set_node_t; /* A value set. This is a singly linked list of value_set_node elements with a possible bitmap that tells us what values exist in the set. This set must be kept in topologically sorted order. */ typedef struct value_set { /* The head of the list. Used for iterating over the list in order. */ value_set_node_t head; /* The tail of the list. Used for tail insertions, which are necessary to keep the set in topologically sorted order because of how the set is built. */ value_set_node_t tail; /* The length of the list. */ size_t length; /* True if the set is indexed, which means it contains a backing bitmap for quick determination of whether certain values exist in the set. */ bool indexed; /* The bitmap of values that exist in the set. May be NULL in an empty or non-indexed set. */ bitmap values; } *value_set_t; /* An unordered bitmap set. One bitmap tracks values, the other, expressions. */ typedef struct bitmap_set { bitmap expressions; bitmap values; } *bitmap_set_t; /* Sets that we need to keep track of. */ typedef struct bb_value_sets { /* The EXP_GEN set, which represents expressions/values generated in a basic block. */ value_set_t exp_gen; /* The PHI_GEN set, which represents PHI results generated in a basic block. */ bitmap_set_t phi_gen; /* The TMP_GEN set, which represents results/temporaries generated in a basic block. IE the LHS of an expression. */ bitmap_set_t tmp_gen; /* The AVAIL_OUT set, which represents which values are available in a given basic block. */ bitmap_set_t avail_out; /* The ANTIC_IN set, which represents which values are anticiptable in a given basic block. */ value_set_t antic_in; /* The NEW_SETS set, which is used during insertion to augment the AVAIL_OUT set of blocks with the new insertions performed during the current iteration. */ bitmap_set_t new_sets; } *bb_value_sets_t; #define EXP_GEN(BB) ((bb_value_sets_t) ((BB)->aux))->exp_gen #define PHI_GEN(BB) ((bb_value_sets_t) ((BB)->aux))->phi_gen #define TMP_GEN(BB) ((bb_value_sets_t) ((BB)->aux))->tmp_gen #define AVAIL_OUT(BB) ((bb_value_sets_t) ((BB)->aux))->avail_out #define ANTIC_IN(BB) ((bb_value_sets_t) ((BB)->aux))->antic_in #define NEW_SETS(BB) ((bb_value_sets_t) ((BB)->aux))->new_sets /* This structure is used to keep track of statistics on what optimization PRE was able to perform. */ static struct { /* The number of RHS computations eliminated by PRE. */ int eliminations; /* The number of new expressions/temporaries generated by PRE. */ int insertions; /* The number of new PHI nodes added by PRE. */ int phis; } pre_stats; static tree bitmap_find_leader (bitmap_set_t, tree); static tree find_leader (value_set_t, tree); static void value_insert_into_set (value_set_t, tree); static void bitmap_value_insert_into_set (bitmap_set_t, tree); static void bitmap_value_replace_in_set (bitmap_set_t, tree); static void insert_into_set (value_set_t, tree); static void bitmap_set_copy (bitmap_set_t, bitmap_set_t); static bool bitmap_set_contains_value (bitmap_set_t, tree); static bitmap_set_t bitmap_set_new (void); static value_set_t set_new (bool); static bool is_undefined_value (tree); static tree create_expression_by_pieces (basic_block, tree, tree); /* We can add and remove elements and entries to and from sets and hash tables, so we use alloc pools for them. */ static alloc_pool value_set_pool; static alloc_pool bitmap_set_pool; static alloc_pool value_set_node_pool; static alloc_pool binary_node_pool; static alloc_pool unary_node_pool; /* The phi_translate_table caches phi translations for a given expression and predecessor. */ static htab_t phi_translate_table; /* A three tuple {e, pred, v} used to cache phi translations in the phi_translate_table. */ typedef struct expr_pred_trans_d { /* The expression. */ tree e; /* The predecessor block along which we translated the expression. */ basic_block pred; /* The value that resulted from the translation. */ tree v; /* The hashcode for the expression, pred pair. This is cached for speed reasons. */ hashval_t hashcode; } *expr_pred_trans_t; /* Return the hash value for a phi translation table entry. */ static hashval_t expr_pred_trans_hash (const void *p) { const expr_pred_trans_t ve = (expr_pred_trans_t) p; return ve->hashcode; } /* Return true if two phi translation table entries are the same. P1 and P2 should point to the expr_pred_trans_t's to be compared.*/ static int expr_pred_trans_eq (const void *p1, const void *p2) { const expr_pred_trans_t ve1 = (expr_pred_trans_t) p1; const expr_pred_trans_t ve2 = (expr_pred_trans_t) p2; basic_block b1 = ve1->pred; basic_block b2 = ve2->pred; /* If they are not translations for the same basic block, they can't be equal. */ if (b1 != b2) return false; /* If they are for the same basic block, determine if the expressions are equal. */ if (expressions_equal_p (ve1->e, ve2->e)) return true; return false; } /* Search in the phi translation table for the translation of expression E in basic block PRED. Return the translated value, if found, NULL otherwise. */ static inline tree phi_trans_lookup (tree e, basic_block pred) { void **slot; struct expr_pred_trans_d ept; ept.e = e; ept.pred = pred; ept.hashcode = vn_compute (e, (unsigned long) pred, NULL); slot = htab_find_slot_with_hash (phi_translate_table, &ept, ept.hashcode, NO_INSERT); if (!slot) return NULL; else return ((expr_pred_trans_t) *slot)->v; } /* Add the tuple mapping from {expression E, basic block PRED} to value V, to the phi translation table. */ static inline void phi_trans_add (tree e, tree v, basic_block pred) { void **slot; expr_pred_trans_t new_pair = xmalloc (sizeof (*new_pair)); new_pair->e = e; new_pair->pred = pred; new_pair->v = v; new_pair->hashcode = vn_compute (e, (unsigned long) pred, NULL); slot = htab_find_slot_with_hash (phi_translate_table, new_pair, new_pair->hashcode, INSERT); if (*slot) free (*slot); *slot = (void *) new_pair; } /* Add expression E to the expression set of value V. */ void add_to_value (tree v, tree e) { /* Constants have no expression sets. */ if (is_gimple_min_invariant (v)) return; if (VALUE_HANDLE_EXPR_SET (v) == NULL) VALUE_HANDLE_EXPR_SET (v) = set_new (false); insert_into_set (VALUE_HANDLE_EXPR_SET (v), e); } /* Return true if value V exists in the bitmap for SET. */ static inline bool value_exists_in_set_bitmap (value_set_t set, tree v) { if (!set->values) return false; return bitmap_bit_p (set->values, VALUE_HANDLE_ID (v)); } /* Remove value V from the bitmap for SET. */ static void value_remove_from_set_bitmap (value_set_t set, tree v) { #ifdef ENABLE_CHECKING if (!set->indexed) abort (); #endif if (!set->values) return; bitmap_clear_bit (set->values, VALUE_HANDLE_ID (v)); } /* Insert the value number V into the bitmap of values existing in SET. */ static inline void value_insert_into_set_bitmap (value_set_t set, tree v) { #ifdef ENABLE_CHECKING if (!set->indexed) abort (); #endif if (set->values == NULL) { set->values = BITMAP_GGC_ALLOC (); bitmap_clear (set->values); } bitmap_set_bit (set->values, VALUE_HANDLE_ID (v)); } /* Create a new bitmap set and return it. */ static bitmap_set_t bitmap_set_new (void) { bitmap_set_t ret = pool_alloc (bitmap_set_pool); ret->expressions = BITMAP_GGC_ALLOC (); ret->values = BITMAP_GGC_ALLOC (); bitmap_clear (ret->expressions); bitmap_clear (ret->values); return ret; } /* Create a new set. */ static value_set_t set_new (bool indexed) { value_set_t ret; ret = pool_alloc (value_set_pool); ret->head = ret->tail = NULL; ret->length = 0; ret->indexed = indexed; ret->values = NULL; return ret; } /* Insert an expression EXPR into a bitmapped set. */ static void bitmap_insert_into_set (bitmap_set_t set, tree expr) { tree val; /* XXX: For now, we only let SSA_NAMES into the bitmap sets. */ if (TREE_CODE (expr) != SSA_NAME) abort (); val = get_value_handle (expr); if (val == NULL) abort (); if (!is_gimple_min_invariant (val)) bitmap_set_bit (set->values, VALUE_HANDLE_ID (val)); bitmap_set_bit (set->expressions, SSA_NAME_VERSION (expr)); } /* Insert EXPR into SET. */ static void insert_into_set (value_set_t set, tree expr) { value_set_node_t newnode = pool_alloc (value_set_node_pool); tree val = get_value_handle (expr); if (val == NULL) abort (); /* For indexed sets, insert the value into the set value bitmap. For all sets, add it to the linked list and increment the list length. */ if (set->indexed) value_insert_into_set_bitmap (set, val); newnode->next = NULL; newnode->expr = expr; set->length ++; if (set->head == NULL) { set->head = set->tail = newnode; } else { set->tail->next = newnode; set->tail = newnode; } } /* Copy a bitmapped set ORIG, into bitmapped set DEST. */ static void bitmap_set_copy (bitmap_set_t dest, bitmap_set_t orig) { bitmap_copy (dest->expressions, orig->expressions); bitmap_copy (dest->values, orig->values); } /* Copy the set ORIG to the set DEST. */ static void set_copy (value_set_t dest, value_set_t orig) { value_set_node_t node; if (!orig || !orig->head) return; for (node = orig->head; node; node = node->next) { insert_into_set (dest, node->expr); } } /* Remove EXPR from SET. */ static void set_remove (value_set_t set, tree expr) { value_set_node_t node, prev; /* Remove the value of EXPR from the bitmap, decrement the set length, and remove it from the actual double linked list. */ value_remove_from_set_bitmap (set, get_value_handle (expr)); set->length--; prev = NULL; for (node = set->head; node != NULL; prev = node, node = node->next) { if (node->expr == expr) { if (prev == NULL) set->head = node->next; else prev->next= node->next; if (node == set->tail) set->tail = prev; pool_free (value_set_node_pool, node); return; } } } /* Return true if SET contains the value VAL. */ static bool set_contains_value (value_set_t set, tree val) { /* All constants are in every set. */ if (is_gimple_min_invariant (val)) return true; if (set->length == 0) return false; return value_exists_in_set_bitmap (set, val); } /* Return true if bitmapped set SET contains the expression EXPR. */ static bool bitmap_set_contains (bitmap_set_t set, tree expr) { /* XXX: Bitmapped sets only contain SSA_NAME's for now. */ if (TREE_CODE (expr) != SSA_NAME) return false; return bitmap_bit_p (set->expressions, SSA_NAME_VERSION (expr)); } /* Return true if bitmapped set SET contains the value VAL. */ static bool bitmap_set_contains_value (bitmap_set_t set, tree val) { if (is_gimple_min_invariant (val)) return true; return bitmap_bit_p (set->values, VALUE_HANDLE_ID (val)); } /* Replace an instance of value LOOKFOR with expression EXPR in SET. */ static void bitmap_set_replace_value (bitmap_set_t set, tree lookfor, tree expr) { value_set_t exprset; value_set_node_t node; if (is_gimple_min_invariant (lookfor)) return; if (!bitmap_set_contains_value (set, lookfor)) return; /* The number of expressions having a given value is usually significantly less than the total number of expressions in SET. Thus, rather than check, for each expression in SET, whether it has the value LOOKFOR, we walk the reverse mapping that tells us what expressions have a given value, and see if any of those expressions are in our set. For large testcases, this is about 5-10x faster than walking the bitmap. If this is somehow a significant lose for some cases, we can choose which set to walk based on the set size. */ exprset = VALUE_HANDLE_EXPR_SET (lookfor); for (node = exprset->head; node; node = node->next) { if (TREE_CODE (node->expr) == SSA_NAME) { if (bitmap_bit_p (set->expressions, SSA_NAME_VERSION (node->expr))) { bitmap_clear_bit (set->expressions, SSA_NAME_VERSION (node->expr)); bitmap_set_bit (set->expressions, SSA_NAME_VERSION (expr)); return; } } } } /* Subtract bitmapped set B from value set A, and return the new set. */ static value_set_t bitmap_set_subtract_from_value_set (value_set_t a, bitmap_set_t b, bool indexed) { value_set_t ret = set_new (indexed); value_set_node_t node; for (node = a->head; node; node = node->next) { if (!bitmap_set_contains (b, node->expr)) insert_into_set (ret, node->expr); } return ret; } /* Return true if two sets are equal. */ static bool set_equal (value_set_t a, value_set_t b) { value_set_node_t node; if (a->length != b->length) return false; for (node = a->head; node; node = node->next) { if (!set_contains_value (b, get_value_handle (node->expr))) return false; } return true; } /* Replace an instance of EXPR's VALUE with EXPR in SET. */ static void bitmap_value_replace_in_set (bitmap_set_t set, tree expr) { tree val = get_value_handle (expr); bitmap_set_replace_value (set, val, expr); } /* Insert EXPR into SET if EXPR's value is not already present in SET. */ static void bitmap_value_insert_into_set (bitmap_set_t set, tree expr) { tree val = get_value_handle (expr); if (is_gimple_min_invariant (val)) return; if (!bitmap_set_contains_value (set, val)) bitmap_insert_into_set (set, expr); } /* Insert the value for EXPR into SET, if it doesn't exist already. */ static void value_insert_into_set (value_set_t set, tree expr) { tree val = get_value_handle (expr); /* Constant and invariant values exist everywhere, and thus, actually keeping them in the sets is pointless. */ if (is_gimple_min_invariant (val)) return; if (!set_contains_value (set, val)) insert_into_set (set, expr); } /* Print out SET to OUTFILE. */ static void bitmap_print_value_set (FILE *outfile, bitmap_set_t set, const char *setname, int blockindex) { fprintf (outfile, "%s[%d] := { ", setname, blockindex); if (set) { int i; EXECUTE_IF_SET_IN_BITMAP (set->expressions, 0, i, { print_generic_expr (outfile, ssa_name (i), 0); fprintf (outfile, " ("); print_generic_expr (outfile, get_value_handle (ssa_name (i)), 0); fprintf (outfile, ") "); if (bitmap_last_set_bit (set->expressions) != i) fprintf (outfile, ", "); }); } fprintf (outfile, " }\n"); } /* Print out the value_set SET to OUTFILE. */ static void print_value_set (FILE *outfile, value_set_t set, const char *setname, int blockindex) { value_set_node_t node; fprintf (outfile, "%s[%d] := { ", setname, blockindex); if (set) { for (node = set->head; node; node = node->next) { print_generic_expr (outfile, node->expr, 0); fprintf (outfile, " ("); print_generic_expr (outfile, get_value_handle (node->expr), 0); fprintf (outfile, ") "); if (node->next) fprintf (outfile, ", "); } } fprintf (outfile, " }\n"); } /* Print out the expressions that have VAL to OUTFILE. */ void print_value_expressions (FILE *outfile, tree val) { if (VALUE_HANDLE_EXPR_SET (val)) { char s[10]; sprintf (s, "VH.%04d", VALUE_HANDLE_ID (val)); print_value_set (outfile, VALUE_HANDLE_EXPR_SET (val), s, 0); } } void debug_value_expressions (tree val) { print_value_expressions (stderr, val); } void debug_value_set (value_set_t, const char *, int); void debug_value_set (value_set_t set, const char *setname, int blockindex) { print_value_set (stderr, set, setname, blockindex); } /* Translate EXPR using phis in PHIBLOCK, so that it has the values of the phis in PRED. Return NULL if we can't find a leader for each part of the translated expression. */ static tree phi_translate (tree expr, value_set_t set, basic_block pred, basic_block phiblock) { tree phitrans = NULL; tree oldexpr = expr; if (expr == NULL) return NULL; /* Phi translations of a given expression don't change, */ phitrans = phi_trans_lookup (expr, pred); if (phitrans) return phitrans; switch (TREE_CODE_CLASS (TREE_CODE (expr))) { case '2': { tree oldop1 = TREE_OPERAND (expr, 0); tree oldop2 = TREE_OPERAND (expr, 1); tree newop1; tree newop2; tree newexpr; newop1 = phi_translate (find_leader (set, oldop1), set, pred, phiblock); if (newop1 == NULL) return NULL; newop2 = phi_translate (find_leader (set, oldop2), set, pred, phiblock); if (newop2 == NULL) return NULL; if (newop1 != oldop1 || newop2 != oldop2) { newexpr = pool_alloc (binary_node_pool); memcpy (newexpr, expr, tree_size (expr)); create_tree_ann (newexpr); TREE_OPERAND (newexpr, 0) = newop1 == oldop1 ? oldop1 : get_value_handle (newop1); TREE_OPERAND (newexpr, 1) = newop2 == oldop2 ? oldop2 : get_value_handle (newop2); vn_lookup_or_add (newexpr, NULL); expr = newexpr; phi_trans_add (oldexpr, newexpr, pred); } } break; /* XXX: Until we have PRE of loads working, none will be ANTIC. */ case 'r': return NULL; break; case '1': { tree oldop1 = TREE_OPERAND (expr, 0); tree newop1; tree newexpr; newop1 = phi_translate (find_leader (set, oldop1), set, pred, phiblock); if (newop1 == NULL) return NULL; if (newop1 != oldop1) { newexpr = pool_alloc (unary_node_pool); memcpy (newexpr, expr, tree_size (expr)); create_tree_ann (newexpr); TREE_OPERAND (newexpr, 0) = get_value_handle (newop1); vn_lookup_or_add (newexpr, NULL); expr = newexpr; phi_trans_add (oldexpr, newexpr, pred); } } break; case 'd': abort (); case 'x': { tree phi = NULL; int i; if (TREE_CODE (expr) != SSA_NAME) abort (); if (TREE_CODE (SSA_NAME_DEF_STMT (expr)) == PHI_NODE) phi = SSA_NAME_DEF_STMT (expr); else return expr; for (i = 0; i < PHI_NUM_ARGS (phi); i++) if (PHI_ARG_EDGE (phi, i)->src == pred) { tree val; if (is_undefined_value (PHI_ARG_DEF (phi, i))) return NULL; val = vn_lookup_or_add (PHI_ARG_DEF (phi, i), NULL); return PHI_ARG_DEF (phi, i); } } break; } return expr; } static void phi_translate_set (value_set_t dest, value_set_t set, basic_block pred, basic_block phiblock) { value_set_node_t node; for (node = set->head; node; node = node->next) { tree translated; translated = phi_translate (node->expr, set, pred, phiblock); phi_trans_add (node->expr, translated, pred); if (translated != NULL) value_insert_into_set (dest, translated); } } /* Find the leader for a value (i.e., the name representing that value) in a given set, and return it. Return NULL if no leader is found. */ static tree bitmap_find_leader (bitmap_set_t set, tree val) { if (val == NULL) return NULL; if (is_gimple_min_invariant (val)) return val; if (bitmap_set_contains_value (set, val)) { /* Rather than walk the entire bitmap of expressions, and see whether any of them has the value we are looking for, we look at the reverse mapping, which tells us the set of expressions that have a given value (IE value->expressions with that value) and see if any of those expressions are in our set. The number of expressions per value is usually significantly less than the number of expressions in the set. In fact, for large testcases, doing it this way is roughly 5-10x faster than walking the bitmap. If this is somehow a significant lose for some cases, we can choose which set to walk based on which set is smaller. */ value_set_t exprset; value_set_node_t node; exprset = VALUE_HANDLE_EXPR_SET (val); for (node = exprset->head; node; node = node->next) { if (TREE_CODE (node->expr) == SSA_NAME) { if (bitmap_bit_p (set->expressions, SSA_NAME_VERSION (node->expr))) return node->expr; } } } return NULL; } /* Find the leader for a value (i.e., the name representing that value) in a given set, and return it. Return NULL if no leader is found. */ static tree find_leader (value_set_t set, tree val) { value_set_node_t node; if (val == NULL) return NULL; /* Constants represent themselves. */ if (is_gimple_min_invariant (val)) return val; if (set->length == 0) return NULL; if (value_exists_in_set_bitmap (set, val)) { for (node = set->head; node; node = node->next) { if (get_value_handle (node->expr) == val) return node->expr; } } return NULL; } /* Determine if the expression EXPR is valid in SET. This means that we have a leader for each part of the expression (if it consists of values), or the expression is an SSA_NAME. NB: We never should run into a case where we have SSA_NAME + SSA_NAME or SSA_NAME + value. The sets valid_in_set is called on, the ANTIC sets, will only ever have SSA_NAME's or binary value expression (IE VALUE1 + VALUE2) */ static bool valid_in_set (value_set_t set, tree expr) { switch (TREE_CODE_CLASS (TREE_CODE (expr))) { case '2': { tree op1 = TREE_OPERAND (expr, 0); tree op2 = TREE_OPERAND (expr, 1); return set_contains_value (set, op1) && set_contains_value (set, op2); } break; case '1': { tree op1 = TREE_OPERAND (expr, 0); return set_contains_value (set, op1); } break; /* XXX: Until PRE of loads works, no reference nodes are ANTIC. */ case 'r': { return false; } case 'x': { if (TREE_CODE (expr) == SSA_NAME) return true; abort (); } case 'c': abort (); } return false; } /* Clean the set of expressions that are no longer valid in SET. This means expressions that are made up of values we have no leaders for in SET. */ static void clean (value_set_t set) { value_set_node_t node; value_set_node_t next; node = set->head; while (node) { next = node->next; if (!valid_in_set (set, node->expr)) set_remove (set, node->expr); node = next; } } /* Compute the ANTIC set for BLOCK. ANTIC_OUT[BLOCK] = intersection of ANTIC_IN[b] for all succ(BLOCK), if succs(BLOCK) > 1 ANTIC_OUT[BLOCK] = phi_translate (ANTIC_IN[succ(BLOCK)]) if succs(BLOCK) == 1 ANTIC_IN[BLOCK] = clean(ANTIC_OUT[BLOCK] U EXP_GEN[BLOCK] - TMP_GEN[BLOCK]) Iterate until fixpointed. XXX: It would be nice to either write a set_clear, and use it for antic_out, or to mark the antic_out set as deleted at the end of this routine, so that the pool can hand the same memory back out again for the next antic_out. */ static bool compute_antic_aux (basic_block block) { basic_block son; edge e; bool changed = false; value_set_t S, old, ANTIC_OUT; value_set_node_t node; ANTIC_OUT = S = NULL; /* If any edges from predecessors are abnormal, antic_in is empty, so punt. Remember that the block has an incoming abnormal edge by setting the BB_VISITED flag. */ if (! (block->flags & BB_VISITED)) { for (e = block->pred; e; e = e->pred_next) if (e->flags & EDGE_ABNORMAL) { block->flags |= BB_VISITED; break; } } if (block->flags & BB_VISITED) { S = NULL; goto visit_sons; } old = set_new (false); set_copy (old, ANTIC_IN (block)); ANTIC_OUT = set_new (true); /* If the block has no successors, ANTIC_OUT is empty, because it is the exit block. */ if (block->succ == NULL); /* If we have one successor, we could have some phi nodes to translate through. */ else if (block->succ->succ_next == NULL) { phi_translate_set (ANTIC_OUT, ANTIC_IN(block->succ->dest), block, block->succ->dest); } /* If we have multiple successors, we take the intersection of all of them. */ else { varray_type worklist; edge e; size_t i; basic_block bprime, first; VARRAY_BB_INIT (worklist, 1, "succ"); e = block->succ; while (e) { VARRAY_PUSH_BB (worklist, e->dest); e = e->succ_next; } first = VARRAY_BB (worklist, 0); set_copy (ANTIC_OUT, ANTIC_IN (first)); for (i = 1; i < VARRAY_ACTIVE_SIZE (worklist); i++) { bprime = VARRAY_BB (worklist, i); node = ANTIC_OUT->head; while (node) { tree val; value_set_node_t next = node->next; val = get_value_handle (node->expr); if (!set_contains_value (ANTIC_IN (bprime), val)) set_remove (ANTIC_OUT, node->expr); node = next; } } VARRAY_CLEAR (worklist); } /* Generate ANTIC_OUT - TMP_GEN */ S = bitmap_set_subtract_from_value_set (ANTIC_OUT, TMP_GEN (block), false); /* Start ANTIC_IN with EXP_GEN - TMP_GEN */ ANTIC_IN (block) = bitmap_set_subtract_from_value_set (EXP_GEN (block), TMP_GEN (block), true); /* Then union in the ANTIC_OUT - TMP_GEN values, to get ANTIC_OUT U EXP_GEN - TMP_GEN */ for (node = S->head; node; node = node->next) { value_insert_into_set (ANTIC_IN (block), node->expr); } clean (ANTIC_IN (block)); if (!set_equal (old, ANTIC_IN (block))) changed = true; visit_sons: if (dump_file && (dump_flags & TDF_DETAILS)) { if (ANTIC_OUT) print_value_set (dump_file, ANTIC_OUT, "ANTIC_OUT", block->index); print_value_set (dump_file, ANTIC_IN (block), "ANTIC_IN", block->index); if (S) print_value_set (dump_file, S, "S", block->index); } for (son = first_dom_son (CDI_POST_DOMINATORS, block); son; son = next_dom_son (CDI_POST_DOMINATORS, son)) { changed |= compute_antic_aux (son); } return changed; } /* Compute ANTIC sets. */ static void compute_antic (void) { bool changed = true; basic_block bb; int num_iterations = 0; FOR_ALL_BB (bb) { ANTIC_IN (bb) = set_new (true); if (bb->flags & BB_VISITED) abort (); } while (changed) { num_iterations++; changed = false; changed = compute_antic_aux (EXIT_BLOCK_PTR); } FOR_ALL_BB (bb) { bb->flags &= ~BB_VISITED; } if (num_iterations > 2 && dump_file && (dump_flags & TDF_STATS)) fprintf (dump_file, "compute_antic required %d iterations\n", num_iterations); } /* Find a leader for an expression, or generate one using create_expression_by_pieces if it's ANTIC but complex. BLOCK is the basic_block we are looking for leaders in. EXPR is the expression to find a leader or generate for. STMTS is the statement list to put the inserted expressions on. Returns the SSA_NAME of the LHS of the generated expression or the leader. */ static tree find_or_generate_expression (basic_block block, tree expr, tree stmts) { tree genop; genop = bitmap_find_leader (AVAIL_OUT (block), expr); /* Depending on the order we process DOM branches in, the value may not have propagated to all the dom children yet during this iteration. In this case, the value will always be in the NEW_SETS for us already, having been propagated from our dominator. */ if (genop == NULL) genop = bitmap_find_leader (NEW_SETS (block), expr); /* If it's still NULL, see if it is a complex expression, and if so, generate it recursively, otherwise, abort, because it's not really . */ if (genop == NULL) { genop = VALUE_HANDLE_EXPR_SET (expr)->head->expr; if (TREE_CODE_CLASS (TREE_CODE (genop)) != '1' && TREE_CODE_CLASS (TREE_CODE (genop)) != '2') abort (); genop = create_expression_by_pieces (block, genop, stmts); } return genop; } /* Create an expression in pieces, so that we can handle very complex expressions that may be ANTIC, but not necessary GIMPLE. BLOCK is the basic block the expression will be inserted into, EXPR is the expression to insert (in value form) STMTS is a statement list to append the necessary insertions into. This function will abort if we hit some value that shouldn't be ANTIC but is (IE there is no leader for it, or its components). This function may also generate expressions that are themselves partially or fully redundant. Those that are will be either made fully redundant during the next iteration of insert (for partially redundant ones), or eliminated by eliminate (for fully redundant ones). */ static tree create_expression_by_pieces (basic_block block, tree expr, tree stmts) { tree name = NULL_TREE; tree newexpr = NULL_TREE; tree v; switch (TREE_CODE_CLASS (TREE_CODE (expr))) { case '2': { tree_stmt_iterator tsi; tree genop1, genop2; tree temp; tree op1 = TREE_OPERAND (expr, 0); tree op2 = TREE_OPERAND (expr, 1); genop1 = find_or_generate_expression (block, op1, stmts); genop2 = find_or_generate_expression (block, op2, stmts); temp = create_tmp_var (TREE_TYPE (expr), "pretmp"); add_referenced_tmp_var (temp); newexpr = build (TREE_CODE (expr), TREE_TYPE (expr), genop1, genop2); newexpr = build (MODIFY_EXPR, TREE_TYPE (expr), temp, newexpr); name = make_ssa_name (temp, newexpr); TREE_OPERAND (newexpr, 0) = name; tsi = tsi_last (stmts); tsi_link_after (&tsi, newexpr, TSI_CONTINUE_LINKING); pre_stats.insertions++; break; } case '1': { tree_stmt_iterator tsi; tree genop1; tree temp; tree op1 = TREE_OPERAND (expr, 0); genop1 = find_or_generate_expression (block, op1, stmts); temp = create_tmp_var (TREE_TYPE (expr), "pretmp"); add_referenced_tmp_var (temp); newexpr = build (TREE_CODE (expr), TREE_TYPE (expr), genop1); newexpr = build (MODIFY_EXPR, TREE_TYPE (expr), temp, newexpr); name = make_ssa_name (temp, newexpr); TREE_OPERAND (newexpr, 0) = name; tsi = tsi_last (stmts); tsi_link_after (&tsi, newexpr, TSI_CONTINUE_LINKING); pre_stats.insertions++; break; } default: abort (); } v = get_value_handle (expr); vn_add (name, v, NULL); bitmap_insert_into_set (NEW_SETS (block), name); bitmap_value_insert_into_set (AVAIL_OUT (block), name); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Inserted "); print_generic_expr (dump_file, newexpr, 0); fprintf (dump_file, " in predecessor %d\n", block->index); } return name; } /* Perform insertion of partially redundant values. For BLOCK, do the following: 1. Propagate the NEW_SETS of the dominator into the current block. If the block has multiple predecessors, 2a. Iterate over the ANTIC expressions for the block to see if any of them are partially redundant. 2b. If so, insert them into the necessary predecessors to make the expression fully redundant. 2c. Insert a new PHI merging the values of the predecessors. 2d. Insert the new PHI, and the new expressions, into the NEW_SETS set. 3. Recursively call ourselves on the dominator children of BLOCK. */ static bool insert_aux (basic_block block) { basic_block son; bool new_stuff = false; if (block) { basic_block dom; dom = get_immediate_dominator (CDI_DOMINATORS, block); if (dom) { int i; bitmap_set_t newset = NEW_SETS (dom); EXECUTE_IF_SET_IN_BITMAP (newset->expressions, 0, i, { bitmap_insert_into_set (NEW_SETS (block), ssa_name (i)); bitmap_value_replace_in_set (AVAIL_OUT (block), ssa_name (i)); }); if (block->pred->pred_next) { value_set_node_t node; for (node = ANTIC_IN (block)->head; node; node = node->next) { if (TREE_CODE_CLASS (TREE_CODE (node->expr)) == '2' || TREE_CODE_CLASS (TREE_CODE (node->expr)) == '1') { tree *avail; tree val; bool by_some = false; bool cant_insert = false; bool all_same = true; tree first_s = NULL; edge pred; basic_block bprime; tree eprime; val = get_value_handle (node->expr); if (bitmap_set_contains_value (PHI_GEN (block), val)) continue; if (bitmap_set_contains_value (AVAIL_OUT (dom), val)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Found fully redundant value\n"); continue; } avail = xcalloc (last_basic_block, sizeof (tree)); for (pred = block->pred; pred; pred = pred->pred_next) { tree vprime; tree edoubleprime; bprime = pred->src; eprime = phi_translate (node->expr, ANTIC_IN (block), bprime, block); /* eprime will generally only be NULL if the value of the expression, translated through the PHI for this predecessor, is undefined. If that is the case, we can't make the expression fully redundant, because its value is undefined along a predecessor path. We can thus break out early because it doesn't matter what the rest of the results are. */ if (eprime == NULL) { cant_insert = true; break; } vprime = get_value_handle (eprime); if (!vprime) abort (); edoubleprime = bitmap_find_leader (AVAIL_OUT (bprime), vprime); if (edoubleprime == NULL) { avail[bprime->index] = eprime; all_same = false; } else { avail[bprime->index] = edoubleprime; by_some = true; if (first_s == NULL) first_s = edoubleprime; else if (first_s != edoubleprime) all_same = false; if (first_s != edoubleprime && operand_equal_p (first_s, edoubleprime, 0)) abort (); } } /* If we can insert it, it's not the same value already existing along every predecessor, and it's defined by some predecessor, it is partially redundant. */ if (!cant_insert && !all_same && by_some) { tree type = TREE_TYPE (avail[block->pred->src->index]); tree temp; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Found partial redundancy for expression "); print_generic_expr (dump_file, node->expr, 0); fprintf (dump_file, "\n"); } /* Make the necessary insertions. */ for (pred = block->pred; pred; pred = pred->pred_next) { tree stmts = alloc_stmt_list (); tree builtexpr; bprime = pred->src; eprime = avail[bprime->index]; if (TREE_CODE_CLASS (TREE_CODE (eprime)) == '2' || TREE_CODE_CLASS (TREE_CODE (eprime)) == '1') { builtexpr = create_expression_by_pieces (bprime, eprime, stmts); bsi_insert_on_edge (pred, stmts); bsi_commit_edge_inserts (NULL); avail[bprime->index] = builtexpr; } } /* Now build a phi for the new variable. */ temp = create_tmp_var (type, "prephitmp"); add_referenced_tmp_var (temp); temp = create_phi_node (temp, block); vn_add (PHI_RESULT (temp), val, NULL); #if 0 if (!set_contains_value (AVAIL_OUT (block), val)) insert_into_set (AVAIL_OUT (block), PHI_RESULT (temp)); else #endif bitmap_value_replace_in_set (AVAIL_OUT (block), PHI_RESULT (temp)); for (pred = block->pred; pred; pred = pred->pred_next) { add_phi_arg (&temp, avail[pred->src->index], pred); } if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Created phi "); print_generic_expr (dump_file, temp, 0); fprintf (dump_file, " in block %d\n", block->index); } pre_stats.phis++; new_stuff = true; bitmap_insert_into_set (NEW_SETS (block), PHI_RESULT (temp)); bitmap_insert_into_set (PHI_GEN (block), PHI_RESULT (temp)); } free (avail); } } } } } for (son = first_dom_son (CDI_DOMINATORS, block); son; son = next_dom_son (CDI_DOMINATORS, son)) { new_stuff |= insert_aux (son); } return new_stuff; } /* Perform insertion of partially redundant values. */ static void insert_pre (void) { bool new_stuff = true; basic_block bb; int num_iterations = 0; FOR_ALL_BB (bb) NEW_SETS (bb) = bitmap_set_new (); while (new_stuff) { num_iterations++; new_stuff = false; new_stuff = insert_aux (ENTRY_BLOCK_PTR); } if (num_iterations > 2 && dump_file && (dump_flags & TDF_STATS)) fprintf (dump_file, "insert required %d iterations\n", num_iterations); } /* Return true if VAR is an SSA variable with no defining statement in this procedure, *AND* isn't a live-on-entry parameter. */ static bool is_undefined_value (tree expr) { return (TREE_CODE (expr) == SSA_NAME && IS_EMPTY_STMT (SSA_NAME_DEF_STMT (expr)) /* PARM_DECLs and hard registers are always defined. */ && TREE_CODE (SSA_NAME_VAR (expr)) != PARM_DECL && !DECL_HARD_REGISTER (SSA_NAME_VAR (expr))); } /* Given an SSA variable VAR and an expression EXPR, compute the value number for EXPR and create a value handle (VAL) for it. If VAR and EXPR are not the same, associate VAL with VAR. Finally, add VAR to S1 and its value handle to S2. VUSES represent the virtual use operands associated with EXPR (if any). They are used when computing the hash value for EXPR. */ static inline void add_to_sets (tree var, tree expr, vuse_optype vuses, bitmap_set_t s1, bitmap_set_t s2) { tree val = vn_lookup_or_add (expr, vuses); /* VAR and EXPR may be the same when processing statements for which we are not computing value numbers (e.g., non-assignments, or statements that make aliased stores). In those cases, we are only interested in making VAR available as its own value. */ if (var != expr) vn_add (var, val, vuses); bitmap_insert_into_set (s1, var); bitmap_value_insert_into_set (s2, var); } /* Given a unary or binary expression EXPR, create and return a new expression with the same structure as EXPR but with its operands replaced with the value handles of each of the operands of EXPR. Insert EXPR's operands into the EXP_GEN set for BLOCK. VUSES represent the virtual use operands associated with EXPR (if any). They are used when computing the hash value for EXPR. */ static inline tree create_value_expr_from (tree expr, basic_block block, vuse_optype vuses) { int i; enum tree_code code = TREE_CODE (expr); tree vexpr; #if defined ENABLE_CHECKING if (TREE_CODE_CLASS (code) != '1' && TREE_CODE_CLASS (code) != '2') abort (); #endif if (TREE_CODE_CLASS (code) == '1') vexpr = pool_alloc (unary_node_pool); else vexpr = pool_alloc (binary_node_pool); memcpy (vexpr, expr, tree_size (expr)); for (i = 0; i < TREE_CODE_LENGTH (code); i++) { tree op = TREE_OPERAND (expr, i); if (op != NULL) { tree val = vn_lookup_or_add (op, vuses); if (!is_undefined_value (op)) value_insert_into_set (EXP_GEN (block), op); TREE_TYPE (val) = TREE_TYPE (TREE_OPERAND (vexpr, i)); TREE_OPERAND (vexpr, i) = val; } } return vexpr; } /* Compute the AVAIL set for BLOCK. This function performs value numbering of the statements in BLOCK. The AVAIL sets are built from information we glean while doing this value numbering, since the AVAIL sets contain only one entry per value. AVAIL_IN[BLOCK] = AVAIL_OUT[dom(BLOCK)]. AVAIL_OUT[BLOCK] = AVAIL_IN[BLOCK] U PHI_GEN[BLOCK] U TMP_GEN[BLOCK]. */ static void compute_avail (basic_block block) { basic_block son; /* For arguments with default definitions, we pretend they are defined in the entry block. */ if (block == ENTRY_BLOCK_PTR) { tree param; for (param = DECL_ARGUMENTS (current_function_decl); param; param = TREE_CHAIN (param)) { if (default_def (param) != NULL) { tree val; tree def = default_def (param); val = vn_lookup_or_add (def, NULL); bitmap_insert_into_set (TMP_GEN (block), def); bitmap_value_insert_into_set (AVAIL_OUT (block), def); } } } else if (block) { block_stmt_iterator bsi; tree stmt, phi; basic_block dom; /* Initially, the set of available values in BLOCK is that of its immediate dominator. */ dom = get_immediate_dominator (CDI_DOMINATORS, block); if (dom) bitmap_set_copy (AVAIL_OUT (block), AVAIL_OUT (dom)); /* Generate values for PHI nodes. */ for (phi = phi_nodes (block); phi; phi = PHI_CHAIN (phi)) /* We have no need for virtual phis, as they don't represent actual computations. */ if (is_gimple_reg (PHI_RESULT (phi))) add_to_sets (PHI_RESULT (phi), PHI_RESULT (phi), NULL, PHI_GEN (block), AVAIL_OUT (block)); /* Now compute value numbers and populate value sets with all the expressions computed in BLOCK. */ for (bsi = bsi_start (block); !bsi_end_p (bsi); bsi_next (&bsi)) { stmt_ann_t ann; size_t j; stmt = bsi_stmt (bsi); ann = stmt_ann (stmt); get_stmt_operands (stmt); /* We are only interested in assignments of the form X_i = EXPR, where EXPR represents an "interesting" computation, it has no volatile operands and X_i doesn't flow through an abnormal edge. */ if (TREE_CODE (stmt) == MODIFY_EXPR && !ann->has_volatile_ops && TREE_CODE (TREE_OPERAND (stmt, 0)) == SSA_NAME && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (TREE_OPERAND (stmt, 0))) { tree lhs = TREE_OPERAND (stmt, 0); tree rhs = TREE_OPERAND (stmt, 1); vuse_optype vuses = STMT_VUSE_OPS (stmt); STRIP_USELESS_TYPE_CONVERSION (rhs); if (TREE_CODE_CLASS (TREE_CODE (rhs)) == '1' || TREE_CODE_CLASS (TREE_CODE (rhs)) == '2') { /* For binary, unary, and reference expressions, create a duplicate expression with the operands replaced with the value handles of the original RHS. */ tree newt = create_value_expr_from (rhs, block, vuses); add_to_sets (lhs, newt, vuses, TMP_GEN (block), AVAIL_OUT (block)); value_insert_into_set (EXP_GEN (block), newt); continue; } else if (TREE_CODE (rhs) == SSA_NAME || is_gimple_min_invariant (rhs)) { /* Compute a value number for the RHS of the statement and add its value to the AVAIL_OUT set for the block. Add the LHS to TMP_GEN. */ add_to_sets (lhs, rhs, vuses, TMP_GEN (block), AVAIL_OUT (block)); if (TREE_CODE (rhs) == SSA_NAME && !is_undefined_value (rhs)) value_insert_into_set (EXP_GEN (block), rhs); continue; } } /* For any other statement that we don't recognize, simply make the names generated by the statement available in AVAIL_OUT and TMP_GEN. */ for (j = 0; j < NUM_DEFS (STMT_DEF_OPS (stmt)); j++) { tree def = DEF_OP (STMT_DEF_OPS (stmt), j); add_to_sets (def, def, NULL, TMP_GEN (block), AVAIL_OUT (block)); } for (j = 0; j < NUM_USES (STMT_USE_OPS (stmt)); j++) { tree use = USE_OP (STMT_USE_OPS (stmt), j); add_to_sets (use, use, NULL, TMP_GEN (block), AVAIL_OUT (block)); } } } /* Compute available sets for the dominator children of BLOCK. */ for (son = first_dom_son (CDI_DOMINATORS, block); son; son = next_dom_son (CDI_DOMINATORS, son)) compute_avail (son); } /* Eliminate fully redundant computations. */ static void eliminate (void) { basic_block b; FOR_EACH_BB (b) { block_stmt_iterator i; for (i = bsi_start (b); !bsi_end_p (i); bsi_next (&i)) { tree stmt = bsi_stmt (i); /* Lookup the RHS of the expression, see if we have an available computation for it. If so, replace the RHS with the available computation. */ if (TREE_CODE (stmt) == MODIFY_EXPR && TREE_CODE (TREE_OPERAND (stmt, 0)) == SSA_NAME && TREE_CODE (TREE_OPERAND (stmt ,1)) != SSA_NAME && !is_gimple_min_invariant (TREE_OPERAND (stmt, 1)) && !stmt_ann (stmt)->has_volatile_ops) { tree lhs = TREE_OPERAND (stmt, 0); tree *rhs_p = &TREE_OPERAND (stmt, 1); tree sprime; vuse_optype vuses = STMT_VUSE_OPS (stmt); sprime = bitmap_find_leader (AVAIL_OUT (b), vn_lookup (lhs, vuses)); if (sprime && sprime != lhs && (TREE_CODE (*rhs_p) != SSA_NAME || may_propagate_copy (*rhs_p, sprime))) { if (sprime == *rhs_p) abort (); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Replaced "); print_generic_expr (dump_file, *rhs_p, 0); fprintf (dump_file, " with "); print_generic_expr (dump_file, sprime, 0); fprintf (dump_file, " in "); print_generic_stmt (dump_file, stmt, 0); } pre_stats.eliminations++; propagate_tree_value (rhs_p, sprime); modify_stmt (stmt); } } } } } /* Initialize data structures used by PRE. */ static void init_pre (void) { size_t tsize; basic_block bb; vn_init (); memset (&pre_stats, 0, sizeof (pre_stats)); FOR_ALL_BB (bb) bb->aux = xcalloc (1, sizeof (struct bb_value_sets)); phi_translate_table = htab_create (511, expr_pred_trans_hash, expr_pred_trans_eq, free); value_set_pool = create_alloc_pool ("Value sets", sizeof (struct value_set), 30); bitmap_set_pool = create_alloc_pool ("Bitmap sets", sizeof (struct bitmap_set), 30); value_set_node_pool = create_alloc_pool ("Value set nodes", sizeof (struct value_set_node), 30); calculate_dominance_info (CDI_POST_DOMINATORS); calculate_dominance_info (CDI_DOMINATORS); tsize = tree_size (build (PLUS_EXPR, void_type_node, NULL_TREE, NULL_TREE)); binary_node_pool = create_alloc_pool ("Binary tree nodes", tsize, 30); tsize = tree_size (build1 (NEGATE_EXPR, void_type_node, NULL_TREE)); unary_node_pool = create_alloc_pool ("Unary tree nodes", tsize, 30); FOR_ALL_BB (bb) { EXP_GEN (bb) = set_new (true); PHI_GEN (bb) = bitmap_set_new (); TMP_GEN (bb) = bitmap_set_new (); AVAIL_OUT (bb) = bitmap_set_new (); } } /* Deallocate data structures used by PRE. */ static void fini_pre (void) { basic_block bb; free_alloc_pool (value_set_pool); free_alloc_pool (bitmap_set_pool); free_alloc_pool (value_set_node_pool); free_alloc_pool (binary_node_pool); free_alloc_pool (unary_node_pool); htab_delete (phi_translate_table); FOR_ALL_BB (bb) { free (bb->aux); bb->aux = NULL; } free_dominance_info (CDI_POST_DOMINATORS); vn_delete (); } /* Main entry point to the SSA-PRE pass. DO_FRE is true if the caller only wants to do full redundancy elimination. */ static void execute_pre (bool do_fre) { init_pre (); /* Collect and value number expressions computed in each basic block. */ compute_avail (ENTRY_BLOCK_PTR); if (dump_file && (dump_flags & TDF_DETAILS)) { basic_block bb; FOR_ALL_BB (bb) { print_value_set (dump_file, EXP_GEN (bb), "exp_gen", bb->index); bitmap_print_value_set (dump_file, TMP_GEN (bb), "tmp_gen", bb->index); bitmap_print_value_set (dump_file, AVAIL_OUT (bb), "avail_out", bb->index); } } /* Insert can get quite slow on an incredibly large number of basic blocks due to some quadratic behavior. Until this behavior is fixed, don't run it when he have an incredibly large number of bb's. If we aren't going to run insert, there is no point in computing ANTIC, either, even though it's plenty fast. */ if (!do_fre && n_basic_blocks < 4000) { compute_antic (); insert_pre (); } /* Remove all the redundant expressions. */ eliminate (); if (dump_file && (dump_flags & TDF_STATS)) { fprintf (dump_file, "Insertions:%d\n", pre_stats.insertions); fprintf (dump_file, "New PHIs:%d\n", pre_stats.phis); fprintf (dump_file, "Eliminated:%d\n", pre_stats.eliminations); } fini_pre (); } /* Gate and execute functions for PRE. */ static void do_pre (void) { execute_pre (false); } static bool gate_pre (void) { return flag_tree_pre != 0; } struct tree_opt_pass pass_pre = { "pre", /* name */ gate_pre, /* gate */ do_pre, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ TV_TREE_PRE, /* tv_id */ PROP_no_crit_edges | PROP_cfg | PROP_ssa,/* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func | TODO_ggc_collect | TODO_verify_ssa /* todo_flags_finish */ }; /* Gate and execute functions for FRE. */ static void do_fre (void) { execute_pre (true); } static bool gate_fre (void) { return flag_tree_fre != 0; } struct tree_opt_pass pass_fre = { "fre", /* name */ gate_fre, /* gate */ do_fre, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ TV_TREE_FRE, /* tv_id */ PROP_no_crit_edges | PROP_cfg | PROP_ssa,/* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func | TODO_ggc_collect | TODO_verify_ssa /* todo_flags_finish */ }; /* Liveness for SSA trees. Copyright (C) 2003 Free Software Foundation, Inc. Contributed by Andrew MacLeod This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ static void live_worklist (tree_live_info_p, varray_type, int); static tree_live_info_p new_tree_live_info (var_map); static inline void set_if_valid (var_map, bitmap, tree); static inline void add_livein_if_notdef (tree_live_info_p, bitmap, tree, basic_block); static inline void register_ssa_partition (var_map, tree, bool); static inline void add_conflicts_if_valid (tpa_p, conflict_graph, var_map, bitmap, tree); static partition_pair_p find_partition_pair (coalesce_list_p, int, int, bool); /* This is where the mapping from SSA version number to real storage variable is tracked. All SSA versions of the same variable may not ultimately be mapped back to the same real variable. In that instance, we need to detect the live range overlap, and give one of the variable new storage. The vector 'partition_to_var' tracks which partition maps to which variable. Given a VAR, it is sometimes desirable to know which partition that VAR represents. There is an additional field in the variable annotation to track that information. */ /* Create a variable partition map of SIZE, initialize and return it. */ var_map init_var_map (int size) { var_map map; map = (var_map) xmalloc (sizeof (struct _var_map)); map->var_partition = partition_new (size); map->partition_to_var = (tree *)xmalloc (size * sizeof (tree)); memset (map->partition_to_var, 0, size * sizeof (tree)); map->partition_to_compact = NULL; map->compact_to_partition = NULL; map->num_partitions = size; map->partition_size = size; map->ref_count = NULL; return map; } /* Free memory associated with MAP. */ void delete_var_map (var_map map) { free (map->partition_to_var); partition_delete (map->var_partition); if (map->partition_to_compact) free (map->partition_to_compact); if (map->compact_to_partition) free (map->compact_to_partition); if (map->ref_count) free (map->ref_count); free (map); } /* This function will combine the partitions in MAP for VAR1 and VAR2. It Returns the partition which represents the new partition. If the two partitions cannot be combined, NO_PARTITION is returned. */ int var_union (var_map map, tree var1, tree var2) { int p1, p2, p3; tree root_var = NULL_TREE; tree other_var = NULL_TREE; /* This is independent of partition_to_compact. If partition_to_compact is on, then whichever one of these partitions is absorbed will never have a dereference into the partition_to_compact array any more. */ if (TREE_CODE (var1) == SSA_NAME) p1 = partition_find (map->var_partition, SSA_NAME_VERSION (var1)); else { p1 = var_to_partition (map, var1); if (map->compact_to_partition) p1 = map->compact_to_partition[p1]; root_var = var1; } if (TREE_CODE (var2) == SSA_NAME) p2 = partition_find (map->var_partition, SSA_NAME_VERSION (var2)); else { p2 = var_to_partition (map, var2); if (map->compact_to_partition) p2 = map->compact_to_partition[p2]; /* If there is no root_var set, or its not a user variable, set the root_var to this one. */ if (!root_var || is_gimple_tmp_var (root_var)) { other_var = root_var; root_var = var2; } else other_var = var2; } if (p1 == NO_PARTITION || p2 == NO_PARTITION) abort (); if (p1 == p2) p3 = p1; else p3 = partition_union (map->var_partition, p1, p2); if (map->partition_to_compact) p3 = map->partition_to_compact[p3]; if (root_var) change_partition_var (map, root_var, p3); if (other_var) change_partition_var (map, other_var, p3); return p3; } /* Compress the partition numbers in MAP such that they fall in the range 0..(num_partitions-1) instead of wherever they turned out during the partitioning exercise. This removes any references to unused partitions, thereby allowing bitmaps and other vectors to be much denser. Compression type is controlled by FLAGS. This is implemented such that compaction doesn't affect partitioning. Ie., once partitions are created and possibly merged, running one or more different kind of compaction will not affect the partitions themselves. Their index might change, but all the same variables will still be members of the same partition group. This allows work on reduced sets, and no loss of information when a larger set is later desired. In particular, coalescing can work on partitions which have 2 or more definitions, and then 'recompact' later to include all the single definitions for assignment to program variables. */ void compact_var_map (var_map map, int flags) { sbitmap used; int x, limit, count, tmp, root, root_i; tree var; root_var_p rv = NULL; limit = map->partition_size; used = sbitmap_alloc (limit); sbitmap_zero (used); /* Already compressed? Abandon the old one. */ if (map->partition_to_compact) { free (map->partition_to_compact); map->partition_to_compact = NULL; } if (map->compact_to_partition) { free (map->compact_to_partition); map->compact_to_partition = NULL; } map->num_partitions = map->partition_size; if (flags & VARMAP_NO_SINGLE_DEFS) rv = root_var_init (map); map->partition_to_compact = (int *)xmalloc (limit * sizeof (int)); memset (map->partition_to_compact, 0xff, (limit * sizeof (int))); /* Find out which partitions are actually referenced. */ count = 0; for (x = 0; x < limit; x++) { tmp = partition_find (map->var_partition, x); if (!TEST_BIT (used, tmp) && map->partition_to_var[tmp] != NULL_TREE) { /* It is referenced, check to see if there is more than one version in the root_var table, if one is available. */ if (rv) { root = root_var_find (rv, tmp); root_i = root_var_first_partition (rv, root); /* If there is only one, don't include this in the compaction. */ if (root_var_next_partition (rv, root_i) == ROOT_VAR_NONE) continue; } SET_BIT (used, tmp); count++; } } /* Build a compacted partitioning. */ if (count != limit) { map->compact_to_partition = (int *)xmalloc (count * sizeof (int)); count = 0; /* SSA renaming begins at 1, so skip 0 when compacting. */ EXECUTE_IF_SET_IN_SBITMAP (used, 1, x, { map->partition_to_compact[x] = count; map->compact_to_partition[count] = x; var = map->partition_to_var[x]; if (TREE_CODE (var) != SSA_NAME) change_partition_var (map, var, count); count++; }); } else { free (map->partition_to_compact); map->partition_to_compact = NULL; } map->num_partitions = count; if (rv) root_var_delete (rv); sbitmap_free (used); } /* This function is used to change the representative variable in MAP for VAR's partition from an SSA_NAME variable to a regular variable. This allows partitions to be mapped back to real variables. */ void change_partition_var (var_map map, tree var, int part) { var_ann_t ann; if (TREE_CODE (var) == SSA_NAME) abort(); ann = var_ann (var); ann->out_of_ssa_tag = 1; VAR_ANN_PARTITION (ann) = part; if (map->compact_to_partition) map->partition_to_var[map->compact_to_partition[part]] = var; } /* This function looks through the program and uses FLAGS to determine what SSA versioned variables are given entries in a new partition table. This new partition map is returned. */ var_map create_ssa_var_map (int flags) { block_stmt_iterator bsi; basic_block bb; tree dest, use; tree stmt; stmt_ann_t ann; vuse_optype vuses; v_may_def_optype v_may_defs; v_must_def_optype v_must_defs; use_optype uses; def_optype defs; unsigned x; var_map map; #if defined ENABLE_CHECKING sbitmap used_in_real_ops; sbitmap used_in_virtual_ops; #endif map = init_var_map (num_ssa_names + 1); #if defined ENABLE_CHECKING used_in_real_ops = sbitmap_alloc (num_referenced_vars); sbitmap_zero (used_in_real_ops); used_in_virtual_ops = sbitmap_alloc (num_referenced_vars); sbitmap_zero (used_in_virtual_ops); #endif if (flags & SSA_VAR_MAP_REF_COUNT) { map->ref_count = (int *)xmalloc (((num_ssa_names + 1) * sizeof (int))); memset (map->ref_count, 0, (num_ssa_names + 1) * sizeof (int)); } FOR_EACH_BB (bb) { tree phi, arg; for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi)) { int i; register_ssa_partition (map, PHI_RESULT (phi), false); for (i = 0; i < PHI_NUM_ARGS (phi); i++) { arg = PHI_ARG_DEF (phi, i); if (TREE_CODE (arg) == SSA_NAME) register_ssa_partition (map, arg, true); } } for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) { stmt = bsi_stmt (bsi); get_stmt_operands (stmt); ann = stmt_ann (stmt); /* Register USE and DEF operands in each statement. */ uses = USE_OPS (ann); for (x = 0; x < NUM_USES (uses); x++) { use = USE_OP (uses, x); register_ssa_partition (map, use, true); #if defined ENABLE_CHECKING SET_BIT (used_in_real_ops, var_ann (SSA_NAME_VAR (use))->uid); #endif } defs = DEF_OPS (ann); for (x = 0; x < NUM_DEFS (defs); x++) { dest = DEF_OP (defs, x); register_ssa_partition (map, dest, false); #if defined ENABLE_CHECKING SET_BIT (used_in_real_ops, var_ann (SSA_NAME_VAR (dest))->uid); #endif } /* While we do not care about virtual operands for out of SSA, we do need to look at them to make sure we mark all the variables which are used. */ vuses = VUSE_OPS (ann); for (x = 0; x < NUM_VUSES (vuses); x++) { tree var = VUSE_OP (vuses, x); set_is_used (var); #if defined ENABLE_CHECKING SET_BIT (used_in_virtual_ops, var_ann (SSA_NAME_VAR (var))->uid); #endif } v_may_defs = V_MAY_DEF_OPS (ann); for (x = 0; x < NUM_V_MAY_DEFS (v_may_defs); x++) { tree var = V_MAY_DEF_OP (v_may_defs, x); set_is_used (var); #if defined ENABLE_CHECKING SET_BIT (used_in_virtual_ops, var_ann (SSA_NAME_VAR (var))->uid); #endif } v_must_defs = V_MUST_DEF_OPS (ann); for (x = 0; x < NUM_V_MUST_DEFS (v_must_defs); x++) { tree var = V_MUST_DEF_OP (v_must_defs, x); set_is_used (var); #if defined ENABLE_CHECKING SET_BIT (used_in_virtual_ops, var_ann (SSA_NAME_VAR (var))->uid); #endif } } } #if defined ENABLE_CHECKING { unsigned i; sbitmap both = sbitmap_alloc (num_referenced_vars); sbitmap_a_and_b (both, used_in_real_ops, used_in_virtual_ops); if (sbitmap_first_set_bit (both) >= 0) { EXECUTE_IF_SET_IN_SBITMAP (both, 0, i, fprintf (stderr, "Variable %s used in real and virtual operands\n", get_name (referenced_var (i)))); abort (); } sbitmap_free (used_in_real_ops); sbitmap_free (used_in_virtual_ops); sbitmap_free (both); } #endif return map; } /* Allocate and return a new live range information object base on MAP. */ static tree_live_info_p new_tree_live_info (var_map map) { tree_live_info_p live; int x; live = (tree_live_info_p) xmalloc (sizeof (struct tree_live_info_d)); live->map = map; live->num_blocks = last_basic_block; live->global = BITMAP_XMALLOC (); live->livein = (bitmap *)xmalloc (num_var_partitions (map) * sizeof (bitmap)); for (x = 0; x < num_var_partitions (map); x++) live->livein[x] = BITMAP_XMALLOC (); /* liveout is deferred until it is actually requested. */ live->liveout = NULL; return live; } /* Free storage for live range info object LIVE. */ void delete_tree_live_info (tree_live_info_p live) { int x; if (live->liveout) { for (x = live->num_blocks - 1; x >= 0; x--) BITMAP_XFREE (live->liveout[x]); free (live->liveout); } if (live->livein) { for (x = num_var_partitions (live->map) - 1; x >= 0; x--) BITMAP_XFREE (live->livein[x]); free (live->livein); } if (live->global) BITMAP_XFREE (live->global); free (live); } /* Using LIVE, fill in all the live-on-entry blocks between the defs and uses for partition I. STACK is a varray used for temporary memory which is passed in rather than being allocated on every call. */ static void live_worklist (tree_live_info_p live, varray_type stack, int i) { int b; tree var; basic_block def_bb = NULL; edge e; var_map map = live->map; var = partition_to_var (map, i); if (SSA_NAME_DEF_STMT (var)) def_bb = bb_for_stmt (SSA_NAME_DEF_STMT (var)); EXECUTE_IF_SET_IN_BITMAP (live->livein[i], 0, b, { VARRAY_PUSH_INT (stack, b); }); while (VARRAY_ACTIVE_SIZE (stack) > 0) { b = VARRAY_TOP_INT (stack); VARRAY_POP (stack); for (e = BASIC_BLOCK (b)->pred; e; e = e->pred_next) if (e->src != ENTRY_BLOCK_PTR) { /* Its not live on entry to the block its defined in. */ if (e->src == def_bb) continue; if (!bitmap_bit_p (live->livein[i], e->src->index)) { bitmap_set_bit (live->livein[i], e->src->index); VARRAY_PUSH_INT (stack, e->src->index); } } } } /* If VAR is in a partition of MAP, set the bit for that partition in VEC. */ static inline void set_if_valid (var_map map, bitmap vec, tree var) { int p = var_to_partition (map, var); if (p != NO_PARTITION) bitmap_set_bit (vec, p); } /* If VAR is in a partition and it isn't defined in DEF_VEC, set the livein and global bit for it in the LIVE object. BB is the block being processed. */ static inline void add_livein_if_notdef (tree_live_info_p live, bitmap def_vec, tree var, basic_block bb) { int p = var_to_partition (live->map, var); if (p == NO_PARTITION || bb == ENTRY_BLOCK_PTR) return; if (!bitmap_bit_p (def_vec, p)) { bitmap_set_bit (live->livein[p], bb->index); bitmap_set_bit (live->global, p); } } /* Given partition map MAP, calculate all the live on entry bitmaps for each basic block. Return a live info object. */ tree_live_info_p calculate_live_on_entry (var_map map) { tree_live_info_p live; int num, i; basic_block bb; bitmap saw_def; tree phi, var, stmt; tree op; edge e; varray_type stack; block_stmt_iterator bsi; use_optype uses; def_optype defs; stmt_ann_t ann; saw_def = BITMAP_XMALLOC (); live = new_tree_live_info (map); FOR_EACH_BB (bb) { bitmap_clear (saw_def); for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi)) { for (i = 0; i < PHI_NUM_ARGS (phi); i++) { var = PHI_ARG_DEF (phi, i); if (!phi_ssa_name_p (var)) continue; stmt = SSA_NAME_DEF_STMT (var); e = PHI_ARG_EDGE (phi, i); /* Any uses in PHIs which either don't have def's or are not defined in the block from which the def comes, will be live on entry to that block. */ if (!stmt || e->src != bb_for_stmt (stmt)) add_livein_if_notdef (live, saw_def, var, e->src); } } /* Don't mark PHI results as defined until all the PHI nodes have been processed. If the PHI sequence is: a_3 = PHI b_3 = PHI The a_3 referred to in b_3's PHI node is the one incoming on the edge, *not* the PHI node just seen. */ for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi)) { var = PHI_RESULT (phi); set_if_valid (map, saw_def, var); } for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) { stmt = bsi_stmt (bsi); get_stmt_operands (stmt); ann = stmt_ann (stmt); uses = USE_OPS (ann); num = NUM_USES (uses); for (i = 0; i < num; i++) { op = USE_OP (uses, i); add_livein_if_notdef (live, saw_def, op, bb); } defs = DEF_OPS (ann); num = NUM_DEFS (defs); for (i = 0; i < num; i++) { op = DEF_OP (defs, i); set_if_valid (map, saw_def, op); } } } VARRAY_INT_INIT (stack, last_basic_block, "stack"); EXECUTE_IF_SET_IN_BITMAP (live->global, 0, i, { live_worklist (live, stack, i); }); #ifdef ENABLE_CHECKING /* Check for live on entry partitions and report those with a DEF in the program. This will typically mean an optimization has done something wrong. */ bb = ENTRY_BLOCK_PTR; num = 0; for (e = bb->succ; e; e = e->succ_next) { int entry_block = e->dest->index; if (e->dest == EXIT_BLOCK_PTR) continue; for (i = 0; i < num_var_partitions (map); i++) { basic_block tmp; tree d; var = partition_to_var (map, i); stmt = SSA_NAME_DEF_STMT (var); tmp = bb_for_stmt (stmt); d = default_def (SSA_NAME_VAR (var)); if (bitmap_bit_p (live_entry_blocks (live, i), entry_block)) { if (!IS_EMPTY_STMT (stmt)) { num++; print_generic_expr (stderr, var, TDF_SLIM); fprintf (stderr, " is defined "); if (tmp) fprintf (stderr, " in BB%d, ", tmp->index); fprintf (stderr, "by:\n"); print_generic_expr (stderr, stmt, TDF_SLIM); fprintf (stderr, "\nIt is also live-on-entry to entry BB %d", entry_block); fprintf (stderr, " So it appears to have multiple defs.\n"); } else { if (d != var) { num++; print_generic_expr (stderr, var, TDF_SLIM); fprintf (stderr, " is live-on-entry to BB%d ",entry_block); if (d) { fprintf (stderr, " but is not the default def of "); print_generic_expr (stderr, d, TDF_SLIM); fprintf (stderr, "\n"); } else fprintf (stderr, " and there is no default def.\n"); } } } else if (d == var) { /* The only way this var shouldn't be marked live on entry is if it occurs in a PHI argument of the block. */ int z, ok = 0; for (phi = phi_nodes (e->dest); phi && !ok; phi = PHI_CHAIN (phi)) { for (z = 0; z < PHI_NUM_ARGS (phi); z++) if (var == PHI_ARG_DEF (phi, z)) { ok = 1; break; } } if (ok) continue; num++; print_generic_expr (stderr, var, TDF_SLIM); fprintf (stderr, " is not marked live-on-entry to entry BB%d ", entry_block); fprintf (stderr, "but it is a default def so it should be.\n"); } } } if (num > 0) abort (); #endif BITMAP_XFREE (saw_def); return live; } /* Calculate the live on exit vectors based on the entry info in LIVEINFO. */ void calculate_live_on_exit (tree_live_info_p liveinfo) { unsigned b; int i, x; bitmap *on_exit; basic_block bb; edge e; tree t, phi; bitmap on_entry; var_map map = liveinfo->map; on_exit = (bitmap *)xmalloc (last_basic_block * sizeof (bitmap)); for (x = 0; x < last_basic_block; x++) on_exit[x] = BITMAP_XMALLOC (); /* Set all the live-on-exit bits for uses in PHIs. */ FOR_EACH_BB (bb) { for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi)) for (i = 0; i < PHI_NUM_ARGS (phi); i++) { t = PHI_ARG_DEF (phi, i); e = PHI_ARG_EDGE (phi, i); if (!phi_ssa_name_p (t) || e->src == ENTRY_BLOCK_PTR) continue; set_if_valid (map, on_exit[e->src->index], t); } } /* Set live on exit for all predecessors of live on entry's. */ for (i = 0; i < num_var_partitions (map); i++) { on_entry = live_entry_blocks (liveinfo, i); EXECUTE_IF_SET_IN_BITMAP (on_entry, 0, b, { for (e = BASIC_BLOCK(b)->pred; e; e = e->pred_next) if (e->src != ENTRY_BLOCK_PTR) bitmap_set_bit (on_exit[e->src->index], i); }); } liveinfo->liveout = on_exit; } /* Initialize a tree_partition_associator object using MAP. */ tpa_p tpa_init (var_map map) { tpa_p tpa; int num_partitions = num_var_partitions (map); int x; if (num_partitions == 0) return NULL; tpa = (tpa_p) xmalloc (sizeof (struct tree_partition_associator_d)); tpa->num_trees = 0; tpa->uncompressed_num = -1; tpa->map = map; tpa->next_partition = (int *)xmalloc (num_partitions * sizeof (int)); memset (tpa->next_partition, TPA_NONE, num_partitions * sizeof (int)); tpa->partition_to_tree_map = (int *)xmalloc (num_partitions * sizeof (int)); memset (tpa->partition_to_tree_map, TPA_NONE, num_partitions * sizeof (int)); x = MAX (40, (num_partitions / 20)); VARRAY_TREE_INIT (tpa->trees, x, "trees"); VARRAY_INT_INIT (tpa->first_partition, x, "first_partition"); return tpa; } /* Remove PARTITION_INDEX from TREE_INDEX's list in the tpa structure TPA. */ void tpa_remove_partition (tpa_p tpa, int tree_index, int partition_index) { int i; i = tpa_first_partition (tpa, tree_index); if (i == partition_index) { VARRAY_INT (tpa->first_partition, tree_index) = tpa->next_partition[i]; } else { for ( ; i != TPA_NONE; i = tpa_next_partition (tpa, i)) { if (tpa->next_partition[i] == partition_index) { tpa->next_partition[i] = tpa->next_partition[partition_index]; break; } } } } /* Free the memory used by tree_partition_associator object TPA. */ void tpa_delete (tpa_p tpa) { if (!tpa) return; free (tpa->partition_to_tree_map); free (tpa->next_partition); free (tpa); } /* This function will remove any tree entries from TPA which have only a single element. This will help keep the size of the conflict graph down. The function returns the number of remaining tree lists. */ int tpa_compact (tpa_p tpa) { int last, x, y, first, swap_i; tree swap_t; /* Find the last list which has more than 1 partition. */ for (last = tpa->num_trees - 1; last > 0; last--) { first = tpa_first_partition (tpa, last); if (tpa_next_partition (tpa, first) != NO_PARTITION) break; } x = 0; while (x < last) { first = tpa_first_partition (tpa, x); /* If there is not more than one partition, swap with the current end of the tree list. */ if (tpa_next_partition (tpa, first) == NO_PARTITION) { swap_t = VARRAY_TREE (tpa->trees, last); swap_i = VARRAY_INT (tpa->first_partition, last); /* Update the last entry. Since it is known to only have one partition, there is nothing else to update. */ VARRAY_TREE (tpa->trees, last) = VARRAY_TREE (tpa->trees, x); VARRAY_INT (tpa->first_partition, last) = VARRAY_INT (tpa->first_partition, x); tpa->partition_to_tree_map[tpa_first_partition (tpa, last)] = last; /* Since this list is known to have more than one partition, update the list owner entries. */ VARRAY_TREE (tpa->trees, x) = swap_t; VARRAY_INT (tpa->first_partition, x) = swap_i; for (y = tpa_first_partition (tpa, x); y != NO_PARTITION; y = tpa_next_partition (tpa, y)) tpa->partition_to_tree_map[y] = x; /* Ensure last is a list with more than one partition. */ last--; for (; last > x; last--) { first = tpa_first_partition (tpa, last); if (tpa_next_partition (tpa, first) != NO_PARTITION) break; } } x++; } first = tpa_first_partition (tpa, x); if (tpa_next_partition (tpa, first) != NO_PARTITION) x++; tpa->uncompressed_num = tpa->num_trees; tpa->num_trees = x; return last; } /* Initialize a root_var object with SSA partitions from MAP which are based on each root variable. */ root_var_p root_var_init (var_map map) { root_var_p rv; int num_partitions = num_var_partitions (map); int x, p; tree t; var_ann_t ann; sbitmap seen; rv = tpa_init (map); if (!rv) return NULL; seen = sbitmap_alloc (num_partitions); sbitmap_zero (seen); /* Start at the end and work towards the front. This will provide a list that is ordered from smallest to largest. */ for (x = num_partitions - 1; x >= 0; x--) { t = partition_to_var (map, x); /* The var map may not be compacted yet, so check for NULL. */ if (!t) continue; p = var_to_partition (map, t); #ifdef ENABLE_CHECKING if (p == NO_PARTITION) abort (); #endif /* Make sure we only put coalesced partitions into the list once. */ if (TEST_BIT (seen, p)) continue; SET_BIT (seen, p); if (TREE_CODE (t) == SSA_NAME) t = SSA_NAME_VAR (t); ann = var_ann (t); if (ann->root_var_processed) { rv->next_partition[p] = VARRAY_INT (rv->first_partition, VAR_ANN_ROOT_INDEX (ann)); VARRAY_INT (rv->first_partition, VAR_ANN_ROOT_INDEX (ann)) = p; } else { ann->root_var_processed = 1; VAR_ANN_ROOT_INDEX (ann) = rv->num_trees++; VARRAY_PUSH_TREE (rv->trees, t); VARRAY_PUSH_INT (rv->first_partition, p); } rv->partition_to_tree_map[p] = VAR_ANN_ROOT_INDEX (ann); } /* Reset the out_of_ssa_tag flag on each variable for later use. */ for (x = 0; x < rv->num_trees; x++) { t = VARRAY_TREE (rv->trees, x); var_ann (t)->root_var_processed = 0; } sbitmap_free (seen); return rv; } /* Initialize a type_var structure which associates all the partitions in MAP of the same type to the type node's index. Volatiles are ignored. */ type_var_p type_var_init (var_map map) { type_var_p tv; int x, y, p; int num_partitions = num_var_partitions (map); tree t; sbitmap seen; seen = sbitmap_alloc (num_partitions); sbitmap_zero (seen); tv = tpa_init (map); if (!tv) return NULL; for (x = num_partitions - 1; x >= 0; x--) { t = partition_to_var (map, x); /* Disallow coalescing of these types of variables. */ if (!t || TREE_THIS_VOLATILE (t) || TREE_CODE (t) == RESULT_DECL || TREE_CODE (t) == PARM_DECL || (DECL_P (t) && (DECL_REGISTER (t) || !DECL_ARTIFICIAL (t) || DECL_RTL_SET_P (t)))) continue; p = var_to_partition (map, t); #ifdef ENABLE_CHECKING if (p == NO_PARTITION) abort (); #endif /* If partitions have been coalesced, only add the representative for the partition to the list once. */ if (TEST_BIT (seen, p)) continue; SET_BIT (seen, p); t = TREE_TYPE (t); /* Find the list for this type. */ for (y = 0; y < tv->num_trees; y++) if (t == VARRAY_TREE (tv->trees, y)) break; if (y == tv->num_trees) { tv->num_trees++; VARRAY_PUSH_TREE (tv->trees, t); VARRAY_PUSH_INT (tv->first_partition, p); } else { tv->next_partition[p] = VARRAY_INT (tv->first_partition, y); VARRAY_INT (tv->first_partition, y) = p; } tv->partition_to_tree_map[p] = y; } sbitmap_free (seen); return tv; } /* Create a new coalesce list object from MAP and return it. */ coalesce_list_p create_coalesce_list (var_map map) { coalesce_list_p list; list = (coalesce_list_p) xmalloc (sizeof (struct coalesce_list_d)); list->map = map; list->add_mode = true; list->list = (partition_pair_p *) xcalloc (num_var_partitions (map), sizeof (struct partition_pair_d)); return list; } /* Delete coalesce list CL. */ void delete_coalesce_list (coalesce_list_p cl) { free (cl->list); free (cl); } /* Find a matching coalesce pair object in CL for partitions P1 and P2. If one isn't found, return NULL if CREATE is false, otherwise create a new coalesce pair object and return it. */ static partition_pair_p find_partition_pair (coalesce_list_p cl, int p1, int p2, bool create) { partition_pair_p node, tmp; int s; /* Normalize so that p1 is the smaller value. */ if (p2 < p1) { s = p1; p1 = p2; p2 = s; } tmp = NULL; /* The list is sorted such that if we find a value greater than p2, p2 is not in the list. */ for (node = cl->list[p1]; node; node = node->next) { if (node->second_partition == p2) return node; else if (node->second_partition > p2) break; tmp = node; } if (!create) return NULL; node = (partition_pair_p) xmalloc (sizeof (struct partition_pair_d)); node->first_partition = p1; node->second_partition = p2; node->cost = 0; if (tmp != NULL) { node->next = tmp->next; tmp->next = node; } else { /* This is now the first node in the list. */ node->next = cl->list[p1]; cl->list[p1] = node; } return node; } /* Add a potential coalesce between P1 and P2 in CL with a cost of VALUE. */ void add_coalesce (coalesce_list_p cl, int p1, int p2, int value) { partition_pair_p node; #ifdef ENABLE_CHECKING if (!cl->add_mode) abort(); #endif if (p1 == p2) return; node = find_partition_pair (cl, p1, p2, true); node->cost += value; } /* Comparison function to allow qsort to sort P1 and P2 in descending order. */ static int compare_pairs (const void *p1, const void *p2) { return (*(partition_pair_p *)p2)->cost - (*(partition_pair_p *)p1)->cost; } /* Prepare CL for removal of preferred pairs. When finished, list element 0 has all the coalesce pairs, sorted in order from most important coalesce to least important. */ void sort_coalesce_list (coalesce_list_p cl) { int x, num, count; partition_pair_p chain, p; partition_pair_p *list; if (!cl->add_mode) abort(); cl->add_mode = false; /* Compact the array of lists to a single list, and count the elements. */ num = 0; chain = NULL; for (x = 0; x < num_var_partitions (cl->map); x++) if (cl->list[x] != NULL) { for (p = cl->list[x]; p->next != NULL; p = p->next) num++; num++; p->next = chain; chain = cl->list[x]; cl->list[x] = NULL; } /* Only call qsort if there are more than 2 items. */ if (num > 2) { list = xmalloc (sizeof (partition_pair_p) * num); count = 0; for (p = chain; p != NULL; p = p->next) list[count++] = p; #ifdef ENABLE_CHECKING if (count != num) abort (); #endif qsort (list, count, sizeof (partition_pair_p), compare_pairs); p = list[0]; for (x = 1; x < num; x++) { p->next = list[x]; p = list[x]; } p->next = NULL; cl->list[0] = list[0]; free (list); } else { cl->list[0] = chain; if (num == 2) { /* Simply swap the two elements if they are in the wrong order. */ if (chain->cost < chain->next->cost) { cl->list[0] = chain->next; cl->list[0]->next = chain; chain->next = NULL; } } } } /* Retrieve the best remaining pair to coalesce from CL. Returns the 2 partitions via P1 and P2. Their calculated cost is returned by the function. NO_BEST_COALESCE is returned if the coalesce list is empty. */ int pop_best_coalesce (coalesce_list_p cl, int *p1, int *p2) { partition_pair_p node; int ret; if (cl->add_mode) abort(); node = cl->list[0]; if (!node) return NO_BEST_COALESCE; cl->list[0] = node->next; *p1 = node->first_partition; *p2 = node->second_partition; ret = node->cost; free (node); return ret; } /* If variable VAR is in a partition in MAP, add a conflict in GRAPH between VAR and any other live partitions in VEC which are associated via TPA. Reset the live bit in VEC. */ static inline void add_conflicts_if_valid (tpa_p tpa, conflict_graph graph, var_map map, bitmap vec, tree var) { int p, y, first; p = var_to_partition (map, var); if (p != NO_PARTITION) { bitmap_clear_bit (vec, p); first = tpa_find_tree (tpa, p); /* If find returns nothing, this object isn't interesting. */ if (first == TPA_NONE) return; /* Only add interferences between objects in the same list. */ for (y = tpa_first_partition (tpa, first); y != TPA_NONE; y = tpa_next_partition (tpa, y)) { if (bitmap_bit_p (vec, y)) conflict_graph_add (graph, p, y); } } } /* Return a conflict graph for the information contained in LIVE_INFO. Only conflicts between items in the same TPA list are added. If optional coalesce list CL is passed in, any copies encountered are added. */ conflict_graph build_tree_conflict_graph (tree_live_info_p liveinfo, tpa_p tpa, coalesce_list_p cl) { conflict_graph graph; var_map map; bitmap live; int num, x, y, i; basic_block bb; varray_type partition_link, tpa_to_clear, tpa_nodes; def_optype defs; use_optype uses; unsigned l; map = live_var_map (liveinfo); graph = conflict_graph_new (num_var_partitions (map)); if (tpa_num_trees (tpa) == 0) return graph; live = BITMAP_XMALLOC (); VARRAY_INT_INIT (partition_link, num_var_partitions (map) + 1, "part_link"); VARRAY_INT_INIT (tpa_nodes, tpa_num_trees (tpa), "tpa nodes"); VARRAY_INT_INIT (tpa_to_clear, 50, "tpa to clear"); FOR_EACH_BB (bb) { block_stmt_iterator bsi; tree phi; /* Start with live on exit temporaries. */ bitmap_copy (live, live_on_exit (liveinfo, bb)); for (bsi = bsi_last (bb); !bsi_end_p (bsi); bsi_prev (&bsi)) { bool is_a_copy = false; tree stmt = bsi_stmt (bsi); stmt_ann_t ann; get_stmt_operands (stmt); ann = stmt_ann (stmt); /* A copy between 2 partitions does not introduce an interference by itself. If they did, you would never be able to coalesce two things which are copied. If the two variables really do conflict, they will conflict elsewhere in the program. This is handled specially here since we may also be interested in copies between real variables and SSA_NAME variables. We may be interested in trying to coalesce SSA_NAME variables with root variables in some cases. */ if (TREE_CODE (stmt) == MODIFY_EXPR) { tree lhs = TREE_OPERAND (stmt, 0); tree rhs = TREE_OPERAND (stmt, 1); int p1, p2; int bit; if (DECL_P (lhs) || TREE_CODE (lhs) == SSA_NAME) p1 = var_to_partition (map, lhs); else p1 = NO_PARTITION; if (DECL_P (rhs) || TREE_CODE (rhs) == SSA_NAME) p2 = var_to_partition (map, rhs); else p2 = NO_PARTITION; if (p1 != NO_PARTITION && p2 != NO_PARTITION) { is_a_copy = true; bit = bitmap_bit_p (live, p2); /* If the RHS is live, make it not live while we add the conflicts, then make it live again. */ if (bit) bitmap_clear_bit (live, p2); add_conflicts_if_valid (tpa, graph, map, live, lhs); if (bit) bitmap_set_bit (live, p2); if (cl) add_coalesce (cl, p1, p2, 1); set_if_valid (map, live, rhs); } } if (!is_a_copy) { tree var; defs = DEF_OPS (ann); num = NUM_DEFS (defs); for (x = 0; x < num; x++) { var = DEF_OP (defs, x); add_conflicts_if_valid (tpa, graph, map, live, var); } uses = USE_OPS (ann); num = NUM_USES (uses); for (x = 0; x < num; x++) { var = USE_OP (uses, x); set_if_valid (map, live, var); } } } /* If result of a PHI is unused, then the loops over the statements will not record any conflicts. However, since the PHI node is going to be translated out of SSA form we must record a conflict between the result of the PHI and any variables with are live. Otherwise the out-of-ssa translation may create incorrect code. */ for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi)) { tree result = PHI_RESULT (phi); int p = var_to_partition (map, result); if (p != NO_PARTITION && ! bitmap_bit_p (live, p)) add_conflicts_if_valid (tpa, graph, map, live, result); } /* Anything which is still live at this point interferes. In order to implement this efficiently, only conflicts between partitions which have the same TPA root need be added. TPA roots which have been seen are tracked in 'tpa_nodes'. A nonzero entry points to an index into 'partition_link', which then indexes into itself forming a linked list of partitions sharing a tpa root which have been seen as live up to this point. Since partitions start at index zero, all entries in partition_link are (partition + 1). Conflicts are added between the current partition and any already seen. tpa_clear contains all the tpa_roots processed, and these are the only entries which need to be zero'd out for a clean restart. */ EXECUTE_IF_SET_IN_BITMAP (live, 0, x, { i = tpa_find_tree (tpa, x); if (i != TPA_NONE) { int start = VARRAY_INT (tpa_nodes, i); /* If start is 0, a new root reference list is being started. Register it to be cleared. */ if (!start) VARRAY_PUSH_INT (tpa_to_clear, i); /* Add interferences to other tpa members seen. */ for (y = start; y != 0; y = VARRAY_INT (partition_link, y)) conflict_graph_add (graph, x, y - 1); VARRAY_INT (tpa_nodes, i) = x + 1; VARRAY_INT (partition_link, x + 1) = start; } }); /* Now clear the used tpa root references. */ for (l = 0; l < VARRAY_ACTIVE_SIZE (tpa_to_clear); l++) VARRAY_INT (tpa_nodes, VARRAY_INT (tpa_to_clear, l)) = 0; VARRAY_POP_ALL (tpa_to_clear); } BITMAP_XFREE (live); return graph; } /* This routine will attempt to coalesce the elements in TPA subject to the conflicts found in GRAPH. If optional coalesce_list CL is provided, only coalesces specified within the coalesce list are attempted. Otherwise an attempt is made to coalesce as many partitions within each TPA grouping as possible. If DEBUG is provided, debug output will be sent there. */ void coalesce_tpa_members (tpa_p tpa, conflict_graph graph, var_map map, coalesce_list_p cl, FILE *debug) { int x, y, z, w; tree var, tmp; /* Attempt to coalesce any items in a coalesce list. */ if (cl) { while (pop_best_coalesce (cl, &x, &y) != NO_BEST_COALESCE) { if (debug) { fprintf (debug, "Coalesce list: (%d)", x); print_generic_expr (debug, partition_to_var (map, x), TDF_SLIM); fprintf (debug, " & (%d)", y); print_generic_expr (debug, partition_to_var (map, y), TDF_SLIM); } w = tpa_find_tree (tpa, x); z = tpa_find_tree (tpa, y); if (w != z || w == TPA_NONE || z == TPA_NONE) { if (debug) { if (w != z) fprintf (debug, ": Fail, Non-matching TPA's\n"); if (w == TPA_NONE) fprintf (debug, ": Fail %d non TPA.\n", x); else fprintf (debug, ": Fail %d non TPA.\n", y); } continue; } var = partition_to_var (map, x); tmp = partition_to_var (map, y); x = var_to_partition (map, var); y = var_to_partition (map, tmp); if (debug) fprintf (debug, " [map: %d, %d] ", x, y); if (x == y) { if (debug) fprintf (debug, ": Already Coalesced.\n"); continue; } if (!conflict_graph_conflict_p (graph, x, y)) { z = var_union (map, var, tmp); if (z == NO_PARTITION) { if (debug) fprintf (debug, ": Unable to perform partition union.\n"); continue; } /* z is the new combined partition. We need to remove the other partition from the list. Set x to be that other partition. */ if (z == x) { conflict_graph_merge_regs (graph, x, y); w = tpa_find_tree (tpa, y); tpa_remove_partition (tpa, w, y); } else { conflict_graph_merge_regs (graph, y, x); w = tpa_find_tree (tpa, x); tpa_remove_partition (tpa, w, x); } if (debug) fprintf (debug, ": Success -> %d\n", z); } else if (debug) fprintf (debug, ": Fail due to conflict\n"); } /* If using a coalesce list, don't try to coalesce anything else. */ return; } for (x = 0; x < tpa_num_trees (tpa); x++) { while (tpa_first_partition (tpa, x) != TPA_NONE) { int p1, p2; /* Coalesce first partition with anything that doesn't conflict. */ y = tpa_first_partition (tpa, x); tpa_remove_partition (tpa, x, y); var = partition_to_var (map, y); /* p1 is the partition representative to which y belongs. */ p1 = var_to_partition (map, var); for (z = tpa_next_partition (tpa, y); z != TPA_NONE; z = tpa_next_partition (tpa, z)) { tmp = partition_to_var (map, z); /* p2 is the partition representative to which z belongs. */ p2 = var_to_partition (map, tmp); if (debug) { fprintf (debug, "Coalesce : "); print_generic_expr (debug, var, TDF_SLIM); fprintf (debug, " &"); print_generic_expr (debug, tmp, TDF_SLIM); fprintf (debug, " (%d ,%d)", p1, p2); } /* If partitions are already merged, don't check for conflict. */ if (tmp == var) { tpa_remove_partition (tpa, x, z); if (debug) fprintf (debug, ": Already coalesced\n"); } else if (!conflict_graph_conflict_p (graph, p1, p2)) { int v; if (tpa_find_tree (tpa, y) == TPA_NONE || tpa_find_tree (tpa, z) == TPA_NONE) { if (debug) fprintf (debug, ": Fail non-TPA member\n"); continue; } if ((v = var_union (map, var, tmp)) == NO_PARTITION) { if (debug) fprintf (debug, ": Fail cannot combine partitions\n"); continue; } tpa_remove_partition (tpa, x, z); if (v == p1) conflict_graph_merge_regs (graph, v, z); else { /* Update the first partition's representative. */ conflict_graph_merge_regs (graph, v, y); p1 = v; } /* The root variable of the partition may be changed now. */ var = partition_to_var (map, p1); if (debug) fprintf (debug, ": Success -> %d\n", v); } else if (debug) fprintf (debug, ": Fail, Conflict\n"); } } } } /* Send debug info for coalesce list CL to file F. */ void dump_coalesce_list (FILE *f, coalesce_list_p cl) { partition_pair_p node; int x, num; tree var; if (cl->add_mode) { fprintf (f, "Coalesce List:\n"); num = num_var_partitions (cl->map); for (x = 0; x < num; x++) { node = cl->list[x]; if (node) { fprintf (f, "["); print_generic_expr (f, partition_to_var (cl->map, x), TDF_SLIM); fprintf (f, "] - "); for ( ; node; node = node->next) { var = partition_to_var (cl->map, node->second_partition); print_generic_expr (f, var, TDF_SLIM); fprintf (f, "(%1d), ", node->cost); } fprintf (f, "\n"); } } } else { fprintf (f, "Sorted Coalesce list:\n"); for (node = cl->list[0]; node; node = node->next) { fprintf (f, "(%d) ", node->cost); var = partition_to_var (cl->map, node->first_partition); print_generic_expr (f, var, TDF_SLIM); fprintf (f, " : "); var = partition_to_var (cl->map, node->second_partition); print_generic_expr (f, var, TDF_SLIM); fprintf (f, "\n"); } } } /* Output tree_partition_associator object TPA to file F.. */ void tpa_dump (FILE *f, tpa_p tpa) { int x, i; if (!tpa) return; for (x = 0; x < tpa_num_trees (tpa); x++) { print_generic_expr (f, tpa_tree (tpa, x), TDF_SLIM); fprintf (f, " : ("); for (i = tpa_first_partition (tpa, x); i != TPA_NONE; i = tpa_next_partition (tpa, i)) { fprintf (f, "(%d)",i); print_generic_expr (f, partition_to_var (tpa->map, i), TDF_SLIM); fprintf (f, " "); #ifdef ENABLE_CHECKING if (tpa_find_tree (tpa, i) != x) fprintf (f, "**find tree incorrectly set** "); #endif } fprintf (f, ")\n"); } fflush (f); } /* Output partition map MAP to file F. */ void dump_var_map (FILE *f, var_map map) { int t; unsigned x, y; int p; fprintf (f, "\nPartition map \n\n"); for (x = 0; x < map->num_partitions; x++) { if (map->compact_to_partition != NULL) p = map->compact_to_partition[x]; else p = x; if (map->partition_to_var[p] == NULL_TREE) continue; t = 0; for (y = 1; y < num_ssa_names; y++) { p = partition_find (map->var_partition, y); if (map->partition_to_compact) p = map->partition_to_compact[p]; if (p == (int)x) { if (t++ == 0) { fprintf(f, "Partition %d (", x); print_generic_expr (f, partition_to_var (map, p), TDF_SLIM); fprintf (f, " - "); } fprintf (f, "%d ", y); } } if (t != 0) fprintf (f, ")\n"); } fprintf (f, "\n"); } /* Output live range info LIVE to file F, controlled by FLAG. */ void dump_live_info (FILE *f, tree_live_info_p live, int flag) { basic_block bb; int i; var_map map = live->map; if ((flag & LIVEDUMP_ENTRY) && live->livein) { FOR_EACH_BB (bb) { fprintf (f, "\nLive on entry to BB%d : ", bb->index); for (i = 0; i < num_var_partitions (map); i++) { if (bitmap_bit_p (live_entry_blocks (live, i), bb->index)) { print_generic_expr (f, partition_to_var (map, i), TDF_SLIM); fprintf (f, " "); } } fprintf (f, "\n"); } } if ((flag & LIVEDUMP_EXIT) && live->liveout) { FOR_EACH_BB (bb) { fprintf (f, "\nLive on exit from BB%d : ", bb->index); EXECUTE_IF_SET_IN_BITMAP (live->liveout[bb->index], 0, i, { print_generic_expr (f, partition_to_var (map, i), TDF_SLIM); fprintf (f, " "); }); fprintf (f, "\n"); } } } /* Register partitions in MAP so that we can take VARS out of SSA form. This requires a walk over all the PHI nodes and all the statements. */ void register_ssa_partitions_for_vars (bitmap vars, var_map map) { basic_block bb; if (bitmap_first_set_bit (vars) >= 0) { /* Find every instance (SSA_NAME) of variables in VARs and register a new partition for them. This requires examining every statement and every PHI node once. */ FOR_EACH_BB (bb) { block_stmt_iterator bsi; tree next; tree phi; /* Register partitions for SSA_NAMEs appearing in the PHI nodes in this basic block. Note we delete PHI nodes in this loop if they are associated with virtual vars which are going to be renamed. */ for (phi = phi_nodes (bb); phi; phi = next) { tree result = SSA_NAME_VAR (PHI_RESULT (phi)); next = PHI_CHAIN (phi); if (bitmap_bit_p (vars, var_ann (result)->uid)) { if (! is_gimple_reg (result)) remove_phi_node (phi, NULL_TREE, bb); else { int i; /* Register a partition for the result. */ register_ssa_partition (map, PHI_RESULT (phi), 0); /* Register a partition for each argument as needed. */ for (i = 0; i < PHI_NUM_ARGS (phi); i++) { tree arg = PHI_ARG_DEF (phi, i); if (TREE_CODE (arg) != SSA_NAME) continue; if (!bitmap_bit_p (vars, var_ann (SSA_NAME_VAR (arg))->uid)) continue; register_ssa_partition (map, arg, 1); } } } } /* Now register partitions for SSA_NAMEs appearing in each statement in this block. */ for (bsi = bsi_start (bb); ! bsi_end_p (bsi); bsi_next (&bsi)) { stmt_ann_t ann = stmt_ann (bsi_stmt (bsi)); use_optype uses = USE_OPS (ann); def_optype defs = DEF_OPS (ann); unsigned int i; for (i = 0; i < NUM_USES (uses); i++) { tree op = USE_OP (uses, i); if (TREE_CODE (op) == SSA_NAME && bitmap_bit_p (vars, var_ann (SSA_NAME_VAR (op))->uid)) register_ssa_partition (map, op, 1); } for (i = 0; i < NUM_DEFS (defs); i++) { tree op = DEF_OP (defs, i); if (TREE_CODE (op) == SSA_NAME && bitmap_bit_p (vars, var_ann (SSA_NAME_VAR (op))->uid)) register_ssa_partition (map, op, 0); } } } } } /* SSA operands management for trees. Copyright (C) 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Flags to describe operand properties in get_stmt_operands and helpers. */ /* By default, operands are loaded. */ #define opf_none 0 /* Operand is the target of an assignment expression or a call-clobbered variable */ #define opf_is_def (1 << 0) /* Operand is the target of an assignment expression. */ #define opf_kill_def (1 << 2) /* No virtual operands should be created in the expression. This is used when traversing ADDR_EXPR nodes which have different semantics than other expressions. Inside an ADDR_EXPR node, the only operands that we need to consider are indices into arrays. For instance, &a.b[i] should generate a USE of 'i' but it should not generate a VUSE for 'a' nor a VUSE for 'b'. */ #define opf_no_vops (1 << 1) /* Array for building all the def operands. */ static GTY (()) varray_type build_defs; /* Array for building all the use operands. */ static GTY (()) varray_type build_uses; /* Array for building all the v_may_def operands. */ static GTY (()) varray_type build_v_may_defs; /* Array for building all the vuse operands. */ static GTY (()) varray_type build_vuses; /* Array for building all the v_must_def operands. */ static GTY (()) varray_type build_v_must_defs; #ifdef ENABLE_CHECKING tree check_build_stmt; #endif typedef struct voperands_d { v_may_def_optype v_may_def_ops; vuse_optype vuse_ops; v_must_def_optype v_must_def_ops; } *voperands_t; static void note_addressable (tree, stmt_ann_t); static void get_expr_operands (tree, tree *, int, voperands_t); static inline void append_def (tree *, tree); static inline void append_use (tree *, tree); static void append_v_may_def (tree, tree, voperands_t); static void append_v_must_def (tree, tree, voperands_t); static void add_call_clobber_ops (tree, voperands_t); static void add_call_read_ops (tree, voperands_t); static void add_stmt_operand (tree *, tree, int, voperands_t); /* Return a vector of contiguous memory of a specified size. */ static inline def_optype allocate_def_optype (unsigned num) { def_optype def_ops; unsigned size; size = sizeof (struct def_optype_d) + sizeof (tree *) * (num - 1); def_ops = ggc_alloc (size); def_ops->num_defs = num; return def_ops; } static inline use_optype allocate_use_optype (unsigned num) { use_optype use_ops; unsigned size; size = sizeof (struct use_optype_d) + sizeof (tree *) * (num - 1); use_ops = ggc_alloc (size); use_ops->num_uses = num; return use_ops; } static inline v_may_def_optype allocate_v_may_def_optype (unsigned num) { v_may_def_optype v_may_def_ops; unsigned size; size = sizeof (struct v_may_def_optype_d) + sizeof (tree) * ((num * 2) - 1); v_may_def_ops = ggc_alloc (size); v_may_def_ops->num_v_may_defs = num; return v_may_def_ops; } static inline vuse_optype allocate_vuse_optype (unsigned num) { vuse_optype vuse_ops; unsigned size; size = sizeof (struct vuse_optype_d) + sizeof (tree) * (num - 1); vuse_ops = ggc_alloc (size); vuse_ops->num_vuses = num; return vuse_ops; } static inline v_must_def_optype allocate_v_must_def_optype (unsigned num) { v_must_def_optype v_must_def_ops; unsigned size; size = sizeof (struct v_must_def_optype_d) + sizeof (tree *) * (num - 1); v_must_def_ops = ggc_alloc (size); v_must_def_ops->num_v_must_defs = num; return v_must_def_ops; } static inline void free_uses (use_optype *uses, bool dealloc) { if (*uses) { if (dealloc) ggc_free (*uses); *uses = NULL; } } static inline void free_defs (def_optype *defs, bool dealloc) { if (*defs) { if (dealloc) ggc_free (*defs); *defs = NULL; } } static inline void free_vuses (vuse_optype *vuses, bool dealloc) { if (*vuses) { if (dealloc) ggc_free (*vuses); *vuses = NULL; } } static inline void free_v_may_defs (v_may_def_optype *v_may_defs, bool dealloc) { if (*v_may_defs) { if (dealloc) ggc_free (*v_may_defs); *v_may_defs = NULL; } } static inline void free_v_must_defs (v_must_def_optype *v_must_defs, bool dealloc) { if (*v_must_defs) { if (dealloc) ggc_free (*v_must_defs); *v_must_defs = NULL; } } void remove_vuses (tree stmt) { stmt_ann_t ann; ann = stmt_ann (stmt); if (ann) free_vuses (&(ann->vuse_ops), true); } void remove_v_may_defs (tree stmt) { stmt_ann_t ann; ann = stmt_ann (stmt); if (ann) free_v_may_defs (&(ann->v_may_def_ops), true); } void remove_v_must_defs (tree stmt) { stmt_ann_t ann; ann = stmt_ann (stmt); if (ann) free_v_must_defs (&(ann->v_must_def_ops), true); } void init_ssa_operands (void) { VARRAY_TREE_PTR_INIT (build_defs, 5, "build defs"); VARRAY_TREE_PTR_INIT (build_uses, 10, "build uses"); VARRAY_TREE_INIT (build_v_may_defs, 10, "build v_may_defs"); VARRAY_TREE_INIT (build_vuses, 10, "build vuses"); VARRAY_TREE_INIT (build_v_must_defs, 10, "build v_must_defs"); } void fini_ssa_operands (void) { } static void finalize_ssa_defs (tree stmt) { unsigned num, x; stmt_ann_t ann; def_optype def_ops; num = VARRAY_ACTIVE_SIZE (build_defs); if (num == 0) return; #ifdef ENABLE_CHECKING /* There should only be a single real definition per assignment. */ if (TREE_CODE (stmt) == MODIFY_EXPR && num > 1) abort (); #endif def_ops = allocate_def_optype (num); for (x = 0; x < num ; x++) def_ops->defs[x].def = VARRAY_TREE_PTR (build_defs, x); VARRAY_POP_ALL (build_defs); ann = stmt_ann (stmt); ann->def_ops = def_ops; } static void finalize_ssa_uses (tree stmt) { unsigned num, x; use_optype use_ops; stmt_ann_t ann; num = VARRAY_ACTIVE_SIZE (build_uses); if (num == 0) return; #ifdef ENABLE_CHECKING { unsigned x; /* If the pointer to the operand is the statement itself, something is wrong. It means that we are pointing to a local variable (the initial call to get_stmt_operands does not pass a pointer to a statement). */ for (x = 0; x < num; x++) if (*(VARRAY_TREE_PTR (build_uses, x)) == stmt) abort (); } #endif use_ops = allocate_use_optype (num); for (x = 0; x < num ; x++) use_ops->uses[x].use = VARRAY_TREE_PTR (build_uses, x); VARRAY_POP_ALL (build_uses); ann = stmt_ann (stmt); ann->use_ops = use_ops; } static void finalize_ssa_v_may_defs (tree stmt) { unsigned num, x; v_may_def_optype v_may_def_ops; stmt_ann_t ann; num = VARRAY_ACTIVE_SIZE (build_v_may_defs); if (num == 0) return; #ifdef ENABLE_CHECKING /* V_MAY_DEFs must be entered in pairs of result/uses. */ if (num % 2 != 0) abort(); #endif v_may_def_ops = allocate_v_may_def_optype (num / 2); for (x = 0; x < num; x++) v_may_def_ops->v_may_defs[x] = VARRAY_TREE (build_v_may_defs, x); VARRAY_CLEAR (build_v_may_defs); ann = stmt_ann (stmt); ann->v_may_def_ops = v_may_def_ops; } static inline void finalize_ssa_vuses (tree stmt) { unsigned num, x; stmt_ann_t ann; vuse_optype vuse_ops; v_may_def_optype v_may_defs; #ifdef ENABLE_CHECKING if (VARRAY_ACTIVE_SIZE (build_v_may_defs) > 0) { fprintf (stderr, "Please finalize V_MAY_DEFs before finalize VUSES.\n"); abort (); } #endif num = VARRAY_ACTIVE_SIZE (build_vuses); if (num == 0) return; /* Remove superfluous VUSE operands. If the statement already has a V_MAY_DEF operation for a variable 'a', then a VUSE for 'a' is not needed because V_MAY_DEFs imply a VUSE of the variable. For instance, suppose that variable 'a' is aliased: # VUSE # a_3 = V_MAY_DEF a = a + 1; The VUSE is superfluous because it is implied by the V_MAY_DEF operation. */ ann = stmt_ann (stmt); v_may_defs = V_MAY_DEF_OPS (ann); if (NUM_V_MAY_DEFS (v_may_defs) > 0) { size_t i, j; for (i = 0; i < VARRAY_ACTIVE_SIZE (build_vuses); i++) { bool found = false; for (j = 0; j < NUM_V_MAY_DEFS (v_may_defs); j++) { tree vuse_var, v_may_def_var; tree vuse = VARRAY_TREE (build_vuses, i); tree v_may_def = V_MAY_DEF_OP (v_may_defs, j); if (TREE_CODE (vuse) == SSA_NAME) vuse_var = SSA_NAME_VAR (vuse); else vuse_var = vuse; if (TREE_CODE (v_may_def) == SSA_NAME) v_may_def_var = SSA_NAME_VAR (v_may_def); else v_may_def_var = v_may_def; if (vuse_var == v_may_def_var) { found = true; break; } } /* If we found a useless VUSE operand, remove it from the operand array by replacing it with the last active element in the operand array (unless the useless VUSE was the last operand, in which case we simply remove it. */ if (found) { if (i != VARRAY_ACTIVE_SIZE (build_vuses) - 1) { VARRAY_TREE (build_vuses, i) = VARRAY_TREE (build_vuses, VARRAY_ACTIVE_SIZE (build_vuses) - 1); } VARRAY_POP (build_vuses); /* We want to rescan the element at this index, unless this was the last element, in which case the loop terminates. */ i--; } } } num = VARRAY_ACTIVE_SIZE (build_vuses); /* We could have reduced the size to zero now, however. */ if (num == 0) return; vuse_ops = allocate_vuse_optype (num); for (x = 0; x < num; x++) vuse_ops->vuses[x] = VARRAY_TREE (build_vuses, x); VARRAY_CLEAR (build_vuses); ann->vuse_ops = vuse_ops; } static void finalize_ssa_v_must_defs (tree stmt) { unsigned num, x; stmt_ann_t ann; v_must_def_optype v_must_def_ops; num = VARRAY_ACTIVE_SIZE (build_v_must_defs); if (num == 0) return; #ifdef ENABLE_CHECKING /* There should only be a single V_MUST_DEF per assignment. */ if (TREE_CODE (stmt) == MODIFY_EXPR && num > 1) abort (); #endif v_must_def_ops = allocate_v_must_def_optype (num); for (x = 0; x < num ; x++) v_must_def_ops->v_must_defs[x] = VARRAY_TREE (build_v_must_defs, x); VARRAY_POP_ALL (build_v_must_defs); ann = stmt_ann (stmt); ann->v_must_def_ops = v_must_def_ops; } extern void finalize_ssa_stmt_operands (tree stmt) { #ifdef ENABLE_CHECKING if (check_build_stmt == NULL) abort(); #endif finalize_ssa_defs (stmt); finalize_ssa_uses (stmt); finalize_ssa_v_must_defs (stmt); finalize_ssa_v_may_defs (stmt); finalize_ssa_vuses (stmt); #ifdef ENABLE_CHECKING check_build_stmt = NULL; #endif } extern void verify_start_operands (tree stmt ATTRIBUTE_UNUSED) { #ifdef ENABLE_CHECKING if (VARRAY_ACTIVE_SIZE (build_defs) > 0 || VARRAY_ACTIVE_SIZE (build_uses) > 0 || VARRAY_ACTIVE_SIZE (build_vuses) > 0 || VARRAY_ACTIVE_SIZE (build_v_may_defs) > 0 || VARRAY_ACTIVE_SIZE (build_v_must_defs) > 0) abort (); if (check_build_stmt != NULL) abort(); check_build_stmt = stmt; #endif } /* Add DEF_P to the list of pointers to operands defined by STMT. */ static inline void append_def (tree *def_p, tree stmt ATTRIBUTE_UNUSED) { #ifdef ENABLE_CHECKING if (check_build_stmt != stmt) abort(); #endif VARRAY_PUSH_TREE_PTR (build_defs, def_p); } /* Add USE_P to the list of pointers to operands used by STMT. */ static inline void append_use (tree *use_p, tree stmt ATTRIBUTE_UNUSED) { #ifdef ENABLE_CHECKING if (check_build_stmt != stmt) abort(); #endif VARRAY_PUSH_TREE_PTR (build_uses, use_p); } /* Add a new virtual def for variable VAR to statement STMT. If PREV_VOPS is not NULL, the existing entries are preserved and no new entries are added here. This is done to preserve the SSA numbering of virtual operands. */ static void append_v_may_def (tree var, tree stmt, voperands_t prev_vops) { stmt_ann_t ann; size_t i; tree result, source; #ifdef ENABLE_CHECKING if (check_build_stmt != stmt) abort(); #endif ann = stmt_ann (stmt); /* Don't allow duplicate entries. */ for (i = 0; i < VARRAY_ACTIVE_SIZE (build_v_may_defs); i += 2) { tree result = VARRAY_TREE (build_v_may_defs, i); if (var == result || (TREE_CODE (result) == SSA_NAME && var == SSA_NAME_VAR (result))) return; } /* If the statement already had virtual definitions, see if any of the existing V_MAY_DEFs matches VAR. If so, re-use it, otherwise add a new V_MAY_DEF for VAR. */ result = NULL_TREE; source = NULL_TREE; if (prev_vops) for (i = 0; i < NUM_V_MAY_DEFS (prev_vops->v_may_def_ops); i++) { result = V_MAY_DEF_RESULT (prev_vops->v_may_def_ops, i); if (result == var || (TREE_CODE (result) == SSA_NAME && SSA_NAME_VAR (result) == var)) { source = V_MAY_DEF_OP (prev_vops->v_may_def_ops, i); break; } } /* If no previous V_MAY_DEF operand was found for VAR, create one now. */ if (source == NULL_TREE) { result = var; source = var; } VARRAY_PUSH_TREE (build_v_may_defs, result); VARRAY_PUSH_TREE (build_v_may_defs, source); } /* Add VAR to the list of virtual uses for STMT. If PREV_VOPS is not NULL, the existing entries are preserved and no new entries are added here. This is done to preserve the SSA numbering of virtual operands. */ static void append_vuse (tree var, tree stmt, voperands_t prev_vops) { stmt_ann_t ann; size_t i; bool found; tree vuse; #ifdef ENABLE_CHECKING if (check_build_stmt != stmt) abort(); #endif ann = stmt_ann (stmt); /* Don't allow duplicate entries. */ for (i = 0; i < VARRAY_ACTIVE_SIZE (build_vuses); i++) { tree vuse_var = VARRAY_TREE (build_vuses, i); if (var == vuse_var || (TREE_CODE (vuse_var) == SSA_NAME && var == SSA_NAME_VAR (vuse_var))) return; } /* If the statement already had virtual uses, see if any of the existing VUSEs matches VAR. If so, re-use it, otherwise add a new VUSE for VAR. */ found = false; vuse = NULL_TREE; if (prev_vops) for (i = 0; i < NUM_VUSES (prev_vops->vuse_ops); i++) { vuse = VUSE_OP (prev_vops->vuse_ops, i); if (vuse == var || (TREE_CODE (vuse) == SSA_NAME && SSA_NAME_VAR (vuse) == var)) { found = true; break; } } /* If VAR existed already in PREV_VOPS, re-use it. */ if (found) var = vuse; VARRAY_PUSH_TREE (build_vuses, var); } /* Add VAR to the list of virtual must definitions for STMT. If PREV_VOPS is not NULL, the existing entries are preserved and no new entries are added here. This is done to preserve the SSA numbering of virtual operands. */ static void append_v_must_def (tree var, tree stmt, voperands_t prev_vops) { stmt_ann_t ann; size_t i; bool found; tree v_must_def; #ifdef ENABLE_CHECKING if (check_build_stmt != stmt) abort(); #endif ann = stmt_ann (stmt); /* Don't allow duplicate entries. */ for (i = 0; i < VARRAY_ACTIVE_SIZE (build_v_must_defs); i++) { tree v_must_def_var = VARRAY_TREE (build_v_must_defs, i); if (var == v_must_def_var || (TREE_CODE (v_must_def_var) == SSA_NAME && var == SSA_NAME_VAR (v_must_def_var))) return; } /* If the statement already had virtual must defs, see if any of the existing V_MUST_DEFs matches VAR. If so, re-use it, otherwise add a new V_MUST_DEF for VAR. */ found = false; v_must_def = NULL_TREE; if (prev_vops) for (i = 0; i < NUM_V_MUST_DEFS (prev_vops->v_must_def_ops); i++) { v_must_def = V_MUST_DEF_OP (prev_vops->v_must_def_ops, i); if (v_must_def == var || (TREE_CODE (v_must_def) == SSA_NAME && SSA_NAME_VAR (v_must_def) == var)) { found = true; break; } } /* If VAR existed already in PREV_VOPS, re-use it. */ if (found) var = v_must_def; VARRAY_PUSH_TREE (build_v_must_defs, var); } /* External entry point which by-passes the previous vops mechanism. */ void add_vuse (tree var, tree stmt) { append_vuse (var, stmt, NULL); } /* Get the operands of statement STMT. Note that repeated calls to get_stmt_operands for the same statement will do nothing until the statement is marked modified by a call to modify_stmt(). */ void get_stmt_operands (tree stmt) { enum tree_code code; stmt_ann_t ann; struct voperands_d prev_vops; #if defined ENABLE_CHECKING /* The optimizers cannot handle statements that are nothing but a _DECL. This indicates a bug in the gimplifier. */ if (SSA_VAR_P (stmt)) abort (); #endif /* Ignore error statements. */ if (TREE_CODE (stmt) == ERROR_MARK) return; ann = get_stmt_ann (stmt); /* If the statement has not been modified, the operands are still valid. */ if (!ann->modified) return; timevar_push (TV_TREE_OPS); /* Initially assume that the statement has no volatile operands, nor makes aliased loads or stores. */ ann->has_volatile_ops = false; ann->makes_aliased_stores = false; ann->makes_aliased_loads = false; /* Remove any existing operands as they will be scanned again. */ free_defs (&(ann->def_ops), true); free_uses (&(ann->use_ops), true); /* Before removing existing virtual operands, save them in PREV_VOPS so that we can re-use their SSA versions. */ prev_vops.v_may_def_ops = V_MAY_DEF_OPS (ann); prev_vops.vuse_ops = VUSE_OPS (ann); prev_vops.v_must_def_ops = V_MUST_DEF_OPS (ann); /* Don't free the previous values to memory since we're still using them. */ free_v_may_defs (&(ann->v_may_def_ops), false); free_vuses (&(ann->vuse_ops), false); free_v_must_defs (&(ann->v_must_def_ops), false); start_ssa_stmt_operands (stmt); code = TREE_CODE (stmt); switch (code) { case MODIFY_EXPR: get_expr_operands (stmt, &TREE_OPERAND (stmt, 1), opf_none, &prev_vops); if (TREE_CODE (TREE_OPERAND (stmt, 0)) == ARRAY_REF || TREE_CODE (TREE_OPERAND (stmt, 0)) == COMPONENT_REF || TREE_CODE (TREE_OPERAND (stmt, 0)) == REALPART_EXPR || TREE_CODE (TREE_OPERAND (stmt, 0)) == IMAGPART_EXPR /* Use a V_MAY_DEF if the RHS might throw, as the LHS won't be modified in that case. FIXME we should represent somehow that it is killed on the fallthrough path. */ || tree_could_throw_p (TREE_OPERAND (stmt, 1))) get_expr_operands (stmt, &TREE_OPERAND (stmt, 0), opf_is_def, &prev_vops); else get_expr_operands (stmt, &TREE_OPERAND (stmt, 0), opf_is_def | opf_kill_def, &prev_vops); break; case COND_EXPR: get_expr_operands (stmt, &COND_EXPR_COND (stmt), opf_none, &prev_vops); break; case SWITCH_EXPR: get_expr_operands (stmt, &SWITCH_COND (stmt), opf_none, &prev_vops); break; case ASM_EXPR: { int noutputs = list_length (ASM_OUTPUTS (stmt)); const char **oconstraints = (const char **) alloca ((noutputs) * sizeof (const char *)); int i; tree link; const char *constraint; bool allows_mem, allows_reg, is_inout; for (i=0, link = ASM_OUTPUTS (stmt); link; ++i, link = TREE_CHAIN (link)) { oconstraints[i] = constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (link))); parse_output_constraint (&constraint, i, 0, 0, &allows_mem, &allows_reg, &is_inout); if (allows_reg && is_inout) /* This should have been split in gimplify_asm_expr. */ abort (); if (!allows_reg && allows_mem) { tree t = get_base_address (TREE_VALUE (link)); if (t && DECL_P (t)) mark_call_clobbered (t); } get_expr_operands (stmt, &TREE_VALUE (link), opf_is_def, &prev_vops); } for (link = ASM_INPUTS (stmt); link; link = TREE_CHAIN (link)) { constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (link))); parse_input_constraint (&constraint, 0, 0, noutputs, 0, oconstraints, &allows_mem, &allows_reg); if (!allows_reg && allows_mem) { tree t = get_base_address (TREE_VALUE (link)); if (t && DECL_P (t)) mark_call_clobbered (t); } get_expr_operands (stmt, &TREE_VALUE (link), 0, &prev_vops); } /* Clobber memory for asm ("" : : : "memory"); */ for (link = ASM_CLOBBERS (stmt); link; link = TREE_CHAIN (link)) if (!strcmp (TREE_STRING_POINTER (TREE_VALUE (link)), "memory")) add_call_clobber_ops (stmt, &prev_vops); } break; case RETURN_EXPR: get_expr_operands (stmt, &TREE_OPERAND (stmt, 0), opf_none, &prev_vops); break; case GOTO_EXPR: get_expr_operands (stmt, &GOTO_DESTINATION (stmt), opf_none, &prev_vops); break; case LABEL_EXPR: get_expr_operands (stmt, &LABEL_EXPR_LABEL (stmt), opf_none, &prev_vops); break; /* These nodes contain no variable references. */ case BIND_EXPR: case CASE_LABEL_EXPR: case TRY_CATCH_EXPR: case TRY_FINALLY_EXPR: case EH_FILTER_EXPR: case CATCH_EXPR: case RESX_EXPR: break; default: /* Notice that if get_expr_operands tries to use &STMT as the operand pointer (which may only happen for USE operands), we will abort in append_use. This default will handle statements like empty statements, CALL_EXPRs or VA_ARG_EXPRs that may appear on the RHS of a statement or as statements themselves. */ get_expr_operands (stmt, &stmt, opf_none, &prev_vops); break; } finalize_ssa_stmt_operands (stmt); /* Now free the previous virtual ops to memory. */ free_v_may_defs (&(prev_vops.v_may_def_ops), true); free_vuses (&(prev_vops.vuse_ops), true); free_v_must_defs (&(prev_vops.v_must_def_ops), true); /* Clear the modified bit for STMT. Subsequent calls to get_stmt_operands for this statement will do nothing until the statement is marked modified by a call to modify_stmt(). */ ann->modified = 0; timevar_pop (TV_TREE_OPS); } /* Recursively scan the expression pointed by EXPR_P in statement STMT. FLAGS is one of the OPF_* constants modifying how to interpret the operands found. PREV_VOPS is as in append_v_may_def and append_vuse. */ static void get_expr_operands (tree stmt, tree *expr_p, int flags, voperands_t prev_vops) { enum tree_code code; char class; tree expr = *expr_p; if (expr == NULL || expr == error_mark_node) return; code = TREE_CODE (expr); class = TREE_CODE_CLASS (code); /* We could have the address of a component, array member, etc which has interesting variable references. */ if (code == ADDR_EXPR) { /* Taking the address of a variable does not represent a reference to it, but the fact that STMT takes its address will be of interest to some passes (e.g. alias resolution). */ add_stmt_operand (expr_p, stmt, 0, NULL); /* If the address is constant (invariant is not sufficient), there will be no interesting variable references inside. */ if (TREE_CONSTANT (expr)) return; /* There should be no VUSEs created, since the referenced objects are not really accessed. The only operands that we should find here are ARRAY_REF indices which will always be real operands (GIMPLE does not allow non-registers as array indices). */ flags |= opf_no_vops; /* Avoid recursion. */ expr_p = &TREE_OPERAND (expr, 0); expr = *expr_p; code = TREE_CODE (expr); class = TREE_CODE_CLASS (code); } /* Expressions that make no memory references. */ if (class == 'c' || class == 't' || code == BLOCK || code == FUNCTION_DECL || code == EXC_PTR_EXPR || code == FILTER_EXPR || code == LABEL_DECL) return; /* If we found a variable, add it to DEFS or USES depending on the operand flags. */ if (SSA_VAR_P (expr)) { add_stmt_operand (expr_p, stmt, flags, prev_vops); return; } /* Pointer dereferences always represent a use of the base pointer. */ if (code == INDIRECT_REF) { tree *pptr = &TREE_OPERAND (expr, 0); tree ptr = *pptr; if (SSA_VAR_P (ptr)) { if (!aliases_computed_p) { /* If the pointer does not have a memory tag and aliases have not been computed yet, mark the statement as having volatile operands to prevent DOM from entering it in equivalence tables and DCE from killing it. */ stmt_ann (stmt)->has_volatile_ops = true; } else { struct ptr_info_def *pi = NULL; /* If we have computed aliasing already, check if PTR has flow-sensitive points-to information. */ if (TREE_CODE (ptr) == SSA_NAME && (pi = SSA_NAME_PTR_INFO (ptr)) != NULL && pi->name_mem_tag) { /* PTR has its own memory tag. Use it. */ add_stmt_operand (&pi->name_mem_tag, stmt, flags, prev_vops); } else { /* If PTR is not an SSA_NAME or it doesn't have a name tag, use its type memory tag. */ var_ann_t ann; /* If we are emitting debugging dumps, display a warning if PTR is an SSA_NAME with no flow-sensitive alias information. That means that we may need to compute aliasing again. */ if (dump_file && TREE_CODE (ptr) == SSA_NAME && pi == NULL) { fprintf (dump_file, "NOTE: no flow-sensitive alias info for "); print_generic_expr (dump_file, ptr, dump_flags); fprintf (dump_file, " in "); print_generic_stmt (dump_file, stmt, dump_flags); } if (TREE_CODE (ptr) == SSA_NAME) ptr = SSA_NAME_VAR (ptr); ann = var_ann (ptr); add_stmt_operand (&ann->type_mem_tag, stmt, flags, prev_vops); } } } /* If a constant is used as a pointer, we can't generate a real operand for it but we mark the statement volatile to prevent optimizations from messing things up. */ else if (TREE_CODE (ptr) == INTEGER_CST) { stmt_ann (stmt)->has_volatile_ops = true; return; } /* Everything else *should* have been folded elsewhere, but users are smarter than we in finding ways to write invalid code. We cannot just abort here. If we were absolutely certain that we do handle all valid cases, then we could just do nothing here. That seems optimistic, so attempt to do something logical... */ else if ((TREE_CODE (ptr) == PLUS_EXPR || TREE_CODE (ptr) == MINUS_EXPR) && TREE_CODE (TREE_OPERAND (ptr, 0)) == ADDR_EXPR && TREE_CODE (TREE_OPERAND (ptr, 1)) == INTEGER_CST) { /* Make sure we know the object is addressable. */ pptr = &TREE_OPERAND (ptr, 0); add_stmt_operand (pptr, stmt, 0, NULL); /* Mark the object itself with a VUSE. */ pptr = &TREE_OPERAND (*pptr, 0); get_expr_operands (stmt, pptr, flags, prev_vops); return; } /* Ok, this isn't even is_gimple_min_invariant. Something's broke. */ else abort (); /* Add a USE operand for the base pointer. */ get_expr_operands (stmt, pptr, opf_none, prev_vops); return; } /* Treat array references as references to the virtual variable representing the array. The virtual variable for an ARRAY_REF is the VAR_DECL for the array. */ if (code == ARRAY_REF || code == ARRAY_RANGE_REF) { /* Add the virtual variable for the ARRAY_REF to VDEFS or VUSES according to the value of IS_DEF. Recurse if the LHS of the ARRAY_REF node is not a regular variable. */ if (SSA_VAR_P (TREE_OPERAND (expr, 0))) add_stmt_operand (expr_p, stmt, flags, prev_vops); else get_expr_operands (stmt, &TREE_OPERAND (expr, 0), flags, prev_vops); get_expr_operands (stmt, &TREE_OPERAND (expr, 1), opf_none, prev_vops); get_expr_operands (stmt, &TREE_OPERAND (expr, 2), opf_none, prev_vops); get_expr_operands (stmt, &TREE_OPERAND (expr, 3), opf_none, prev_vops); return; } /* Similarly to arrays, references to compound variables (complex types and structures/unions) are globbed. FIXME: This means that a.x = 6; a.y = 7; foo (a.x, a.y); will not be constant propagated because the two partial definitions to 'a' will kill each other. Note that SRA may be able to fix this problem if 'a' can be scalarized. */ if (code == IMAGPART_EXPR || code == REALPART_EXPR || code == COMPONENT_REF) { /* If the LHS of the compound reference is not a regular variable, recurse to keep looking for more operands in the subexpression. */ if (SSA_VAR_P (TREE_OPERAND (expr, 0))) add_stmt_operand (expr_p, stmt, flags, prev_vops); else get_expr_operands (stmt, &TREE_OPERAND (expr, 0), flags, prev_vops); if (code == COMPONENT_REF) get_expr_operands (stmt, &TREE_OPERAND (expr, 2), opf_none, prev_vops); return; } /* Function calls. Add every argument to USES. If the callee is neither pure nor const, create a VDEF reference for GLOBAL_VAR (See find_vars_r). */ if (code == CALL_EXPR) { tree op; int call_flags = call_expr_flags (expr); /* Find uses in the called function. */ get_expr_operands (stmt, &TREE_OPERAND (expr, 0), opf_none, prev_vops); for (op = TREE_OPERAND (expr, 1); op; op = TREE_CHAIN (op)) get_expr_operands (stmt, &TREE_VALUE (op), opf_none, prev_vops); get_expr_operands (stmt, &TREE_OPERAND (expr, 2), opf_none, prev_vops); if (bitmap_first_set_bit (call_clobbered_vars) >= 0) { /* A 'pure' or a 'const' functions never call clobber anything. A 'noreturn' function might, but since we don't return anyway there is no point in recording that. */ if (!(call_flags & (ECF_PURE | ECF_CONST | ECF_NORETURN))) add_call_clobber_ops (stmt, prev_vops); else if (!(call_flags & (ECF_CONST | ECF_NORETURN))) add_call_read_ops (stmt, prev_vops); } else if (!aliases_computed_p) stmt_ann (stmt)->has_volatile_ops = true; return; } /* Lists. */ if (code == TREE_LIST) { tree op; for (op = expr; op; op = TREE_CHAIN (op)) get_expr_operands (stmt, &TREE_VALUE (op), flags, prev_vops); return; } /* Assignments. */ if (code == MODIFY_EXPR) { get_expr_operands (stmt, &TREE_OPERAND (expr, 1), opf_none, prev_vops); if (TREE_CODE (TREE_OPERAND (expr, 0)) == ARRAY_REF || TREE_CODE (TREE_OPERAND (expr, 0)) == COMPONENT_REF || TREE_CODE (TREE_OPERAND (expr, 0)) == REALPART_EXPR || TREE_CODE (TREE_OPERAND (expr, 0)) == IMAGPART_EXPR) get_expr_operands (stmt, &TREE_OPERAND (expr, 0), opf_is_def, prev_vops); else get_expr_operands (stmt, &TREE_OPERAND (expr, 0), opf_is_def | opf_kill_def, prev_vops); return; } /* Mark VA_ARG_EXPR nodes as making volatile references. FIXME, this is needed because we currently do not gimplify VA_ARG_EXPR properly. */ if (code == VA_ARG_EXPR) { stmt_ann (stmt)->has_volatile_ops = true; return; } /* Unary expressions. */ if (class == '1' || code == TRUTH_NOT_EXPR || code == BIT_FIELD_REF || code == CONSTRUCTOR) { get_expr_operands (stmt, &TREE_OPERAND (expr, 0), flags, prev_vops); return; } /* Binary expressions. */ if (class == '2' || class == '<' || code == TRUTH_AND_EXPR || code == TRUTH_OR_EXPR || code == TRUTH_XOR_EXPR || code == COMPOUND_EXPR || code == OBJ_TYPE_REF) { tree op0 = TREE_OPERAND (expr, 0); tree op1 = TREE_OPERAND (expr, 1); /* If it would be profitable to swap the operands, then do so to canonicalize the statement, enabling better optimization. By placing canonicalization of such expressions here we transparently keep statements in canonical form, even when the statement is modified. */ if (tree_swap_operands_p (op0, op1, false)) { /* For relationals we need to swap the operands and change the code. */ if (code == LT_EXPR || code == GT_EXPR || code == LE_EXPR || code == GE_EXPR) { TREE_SET_CODE (expr, swap_tree_comparison (code)); TREE_OPERAND (expr, 0) = op1; TREE_OPERAND (expr, 1) = op0; } /* For a commutative operator we can just swap the operands. */ if (commutative_tree_code (code)) { TREE_OPERAND (expr, 0) = op1; TREE_OPERAND (expr, 1) = op0; } } get_expr_operands (stmt, &TREE_OPERAND (expr, 0), flags, prev_vops); get_expr_operands (stmt, &TREE_OPERAND (expr, 1), flags, prev_vops); return; } /* If we get here, something has gone wrong. */ fprintf (stderr, "unhandled expression in get_expr_operands():\n"); debug_tree (expr); fputs ("\n", stderr); abort (); } /* Add *VAR_P to the appropriate operand array of STMT. FLAGS is as in get_expr_operands. If *VAR_P is a GIMPLE register, it will be added to the statement's real operands, otherwise it is added to virtual operands. PREV_VOPS is used when adding virtual operands to statements that already had them (See append_v_may_def and append_vuse). */ static void add_stmt_operand (tree *var_p, tree stmt, int flags, voperands_t prev_vops) { bool is_real_op; tree var, sym; stmt_ann_t s_ann; var_ann_t v_ann; var = *var_p; STRIP_NOPS (var); s_ann = stmt_ann (stmt); /* If the operand is an ADDR_EXPR, add its operand to the list of variables that have had their address taken in this statement. */ if (TREE_CODE (var) == ADDR_EXPR) { note_addressable (TREE_OPERAND (var, 0), s_ann); return; } /* If the original variable is not a scalar, it will be added to the list of virtual operands. In that case, use its base symbol as the virtual variable representing it. */ is_real_op = is_gimple_reg (var); if (!is_real_op && !DECL_P (var)) var = get_virtual_var (var); /* If VAR is not a variable that we care to optimize, do nothing. */ if (var == NULL_TREE || !SSA_VAR_P (var)) return; sym = (TREE_CODE (var) == SSA_NAME ? SSA_NAME_VAR (var) : var); v_ann = var_ann (sym); /* FIXME: We currently refuse to optimize variables that have hidden uses (variables used in VLA declarations, MD builtin calls and variables from the parent function in nested functions). This is because not all uses of these variables are exposed in the IL or the statements that reference them are not in GIMPLE form. If that's the case, mark the statement as having volatile operands and return. */ if (v_ann->has_hidden_use) { s_ann->has_volatile_ops = true; return; } /* Don't expose volatile variables to the optimizers. */ if (TREE_THIS_VOLATILE (sym)) { s_ann->has_volatile_ops = true; return; } if (is_real_op) { /* The variable is a GIMPLE register. Add it to real operands. */ if (flags & opf_is_def) append_def (var_p, stmt); else append_use (var_p, stmt); } else { varray_type aliases; /* The variable is not a GIMPLE register. Add it (or its aliases) to virtual operands, unless the caller has specifically requested not to add virtual operands (used when adding operands inside an ADDR_EXPR expression). */ if (flags & opf_no_vops) return; aliases = v_ann->may_aliases; /* If alias information hasn't been computed yet, then addressable variables will not be an alias tag nor will they have aliases. In this case, mark the statement as having volatile operands. */ if (!aliases_computed_p && may_be_aliased (var)) s_ann->has_volatile_ops = true; if (aliases == NULL) { /* The variable is not aliased or it is an alias tag. */ if (flags & opf_is_def) { if (v_ann->is_alias_tag) { /* Alias tagged vars get regular V_MAY_DEF */ s_ann->makes_aliased_stores = 1; append_v_may_def (var, stmt, prev_vops); } else if ((flags & opf_kill_def) && v_ann->mem_tag_kind == NOT_A_TAG) /* V_MUST_DEF for non-aliased non-GIMPLE register variable definitions. Avoid memory tags. */ append_v_must_def (var, stmt, prev_vops); else /* Call-clobbered variables & memory tags get V_MAY_DEF */ append_v_may_def (var, stmt, prev_vops); } else { append_vuse (var, stmt, prev_vops); if (v_ann->is_alias_tag) s_ann->makes_aliased_loads = 1; } } else { size_t i; /* The variable is aliased. Add its aliases to the virtual operands. */ if (VARRAY_ACTIVE_SIZE (aliases) == 0) abort (); if (flags & opf_is_def) { /* If the variable is also an alias tag, add a virtual operand for it, otherwise we will miss representing references to the members of the variable's alias set. This fixes the bug in gcc.c-torture/execute/20020503-1.c. */ if (v_ann->is_alias_tag) append_v_may_def (var, stmt, prev_vops); for (i = 0; i < VARRAY_ACTIVE_SIZE (aliases); i++) append_v_may_def (VARRAY_TREE (aliases, i), stmt, prev_vops); s_ann->makes_aliased_stores = 1; } else { if (v_ann->is_alias_tag) append_vuse (var, stmt, prev_vops); for (i = 0; i < VARRAY_ACTIVE_SIZE (aliases); i++) append_vuse (VARRAY_TREE (aliases, i), stmt, prev_vops); s_ann->makes_aliased_loads = 1; } } } } /* Record that VAR had its address taken in the statement with annotations S_ANN. */ static void note_addressable (tree var, stmt_ann_t s_ann) { var = get_base_address (var); if (var && SSA_VAR_P (var)) { if (s_ann->addresses_taken == NULL) s_ann->addresses_taken = BITMAP_GGC_ALLOC (); bitmap_set_bit (s_ann->addresses_taken, var_ann (var)->uid); } } /* Add clobbering definitions for .GLOBAL_VAR or for each of the call clobbered variables in the function. */ static void add_call_clobber_ops (tree stmt, voperands_t prev_vops) { /* Functions that are not const, pure or never return may clobber call-clobbered variables. */ stmt_ann (stmt)->makes_clobbering_call = true; /* If we had created .GLOBAL_VAR earlier, use it. Otherwise, add a V_MAY_DEF operand for every call clobbered variable. See compute_may_aliases for the heuristic used to decide whether to create .GLOBAL_VAR or not. */ if (global_var) add_stmt_operand (&global_var, stmt, opf_is_def, prev_vops); else { size_t i; EXECUTE_IF_SET_IN_BITMAP (call_clobbered_vars, 0, i, { tree var = referenced_var (i); /* If VAR is read-only, don't add a V_MAY_DEF, just a VUSE operand. */ if (!TREE_READONLY (var)) add_stmt_operand (&var, stmt, opf_is_def, prev_vops); else add_stmt_operand (&var, stmt, opf_none, prev_vops); }); } } /* Add VUSE operands for .GLOBAL_VAR or all call clobbered variables in the function. */ static void add_call_read_ops (tree stmt, voperands_t prev_vops) { /* Otherwise, if the function is not pure, it may reference memory. Add a VUSE for .GLOBAL_VAR if it has been created. Otherwise, add a VUSE for each call-clobbered variable. See add_referenced_var for the heuristic used to decide whether to create .GLOBAL_VAR. */ if (global_var) add_stmt_operand (&global_var, stmt, opf_none, prev_vops); else { size_t i; EXECUTE_IF_SET_IN_BITMAP (call_clobbered_vars, 0, i, { tree var = referenced_var (i); add_stmt_operand (&var, stmt, opf_none, prev_vops); }); } } /* Copies virtual operands from SRC to DST. */ void copy_virtual_operands (tree dst, tree src) { vuse_optype vuses = STMT_VUSE_OPS (src); v_may_def_optype v_may_defs = STMT_V_MAY_DEF_OPS (src); v_must_def_optype v_must_defs = STMT_V_MUST_DEF_OPS (src); vuse_optype *vuses_new = &stmt_ann (dst)->vuse_ops; v_may_def_optype *v_may_defs_new = &stmt_ann (dst)->v_may_def_ops; v_must_def_optype *v_must_defs_new = &stmt_ann (dst)->v_must_def_ops; unsigned i; if (vuses) { *vuses_new = allocate_vuse_optype (NUM_VUSES (vuses)); for (i = 0; i < NUM_VUSES (vuses); i++) SET_VUSE_OP (*vuses_new, i, VUSE_OP (vuses, i)); } if (v_may_defs) { *v_may_defs_new = allocate_v_may_def_optype (NUM_V_MAY_DEFS (v_may_defs)); for (i = 0; i < NUM_V_MAY_DEFS (v_may_defs); i++) { SET_V_MAY_DEF_OP (*v_may_defs_new, i, V_MAY_DEF_OP (v_may_defs, i)); SET_V_MAY_DEF_RESULT (*v_may_defs_new, i, V_MAY_DEF_RESULT (v_may_defs, i)); } } if (v_must_defs) { *v_must_defs_new = allocate_v_must_def_optype (NUM_V_MUST_DEFS (v_must_defs)); for (i = 0; i < NUM_V_MUST_DEFS (v_must_defs); i++) SET_V_MUST_DEF_OP (*v_must_defs_new, i, V_MUST_DEF_OP (v_must_defs, i)); } } /* Type information for tree-ssa-operands.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_tree_ssa_operands_h[] = { { &build_v_must_defs, 1, sizeof (build_v_must_defs), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, { &build_vuses, 1, sizeof (build_vuses), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, { &build_v_may_defs, 1, sizeof (build_v_may_defs), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, { &build_uses, 1, sizeof (build_uses), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, { &build_defs, 1, sizeof (build_defs), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, LAST_GGC_ROOT_TAB }; /* Alias analysis for trees. Copyright (C) 2004 Free Software Foundation, Inc. Contributed by Diego Novillo This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Structure to map a variable to its alias set and keep track of the virtual operands that will be needed to represent it. */ struct alias_map_d { /* Variable and its alias set. */ tree var; HOST_WIDE_INT set; /* Total number of virtual operands that will be needed to represent all the aliases of VAR. */ long total_alias_vops; /* Nonzero if the aliases for this memory tag have been grouped already. Used in group_aliases. */ unsigned int grouped_p : 1; /* Set of variables aliased with VAR. This is the exact same information contained in VAR_ANN (VAR)->MAY_ALIASES, but in bitmap form to speed up alias grouping. */ sbitmap may_aliases; }; /* Alias information used by compute_may_aliases and its helpers. */ struct alias_info { /* SSA names visited while collecting points-to information. If bit I is set, it means that SSA variable with version I has already been visited. */ bitmap ssa_names_visited; /* Array of SSA_NAME pointers processed by the points-to collector. */ varray_type processed_ptrs; /* Variables whose address is still needed. */ bitmap addresses_needed; /* ADDRESSABLE_VARS contains all the global variables and locals that have had their address taken. */ struct alias_map_d **addressable_vars; size_t num_addressable_vars; /* POINTERS contains all the _DECL pointers with unique memory tags that have been referenced in the program. */ struct alias_map_d **pointers; size_t num_pointers; /* Number of function calls found in the program. */ size_t num_calls_found; /* Array of counters to keep track of how many times each pointer has been dereferenced in the program. This is used by the alias grouping heuristic in compute_flow_insensitive_aliasing. */ varray_type num_references; /* Total number of virtual operands that will be needed to represent all the aliases of all the pointers found in the program. */ long total_alias_vops; /* Variables that have been written to. */ bitmap written_vars; /* Pointers that have been used in an indirect store operation. */ bitmap dereferenced_ptrs_store; /* Pointers that have been used in an indirect load operation. */ bitmap dereferenced_ptrs_load; }; /* Counters used to display statistics on alias analysis. */ struct alias_stats_d { unsigned int alias_queries; unsigned int alias_mayalias; unsigned int alias_noalias; unsigned int simple_queries; unsigned int simple_resolved; unsigned int tbaa_queries; unsigned int tbaa_resolved; unsigned int pta_queries; unsigned int pta_resolved; }; /* Local variables. */ static struct alias_stats_d alias_stats; /* Local functions. */ static void compute_flow_insensitive_aliasing (struct alias_info *); static void dump_alias_stats (FILE *); static bool may_alias_p (tree, HOST_WIDE_INT, tree, HOST_WIDE_INT); static tree create_memory_tag (tree type, bool is_type_tag); static tree get_tmt_for (tree, struct alias_info *); static tree get_nmt_for (tree); static void add_may_alias (tree, tree); static struct alias_info *init_alias_info (void); static void delete_alias_info (struct alias_info *); static void compute_points_to_and_addr_escape (struct alias_info *); static void compute_flow_sensitive_aliasing (struct alias_info *); static void setup_pointers_and_addressables (struct alias_info *); static bool collect_points_to_info_r (tree, tree, void *); static bool is_escape_site (tree, size_t *); static void add_pointed_to_var (struct alias_info *, tree, tree); static void add_pointed_to_expr (tree, tree); static void create_global_var (void); static void collect_points_to_info_for (struct alias_info *, tree); static bool ptr_is_dereferenced_by (tree, tree, bool *); static void maybe_create_global_var (struct alias_info *ai); static void group_aliases (struct alias_info *); static struct ptr_info_def *get_ptr_info (tree t); /* Global declarations. */ /* Call clobbered variables in the function. If bit I is set, then REFERENCED_VARS (I) is call-clobbered. */ bitmap call_clobbered_vars; /* 'true' after aliases have been computed (see compute_may_aliases). This is used by get_stmt_operands and its helpers to determine what to do when scanning an operand for a variable that may be aliased. If may-alias information is still not available, the statement is marked as having volatile operands. */ bool aliases_computed_p; /* When the program has too many call-clobbered variables and call-sites, this variable is used to represent the clobbering effects of function calls. In these cases, all the call clobbered variables in the program are forced to alias this variable. This reduces compile times by not having to keep track of too many V_MAY_DEF expressions at call sites. */ tree global_var; /* Compute may-alias information for every variable referenced in function FNDECL. Alias analysis proceeds in 3 main phases: 1- Points-to and escape analysis. This phase walks the use-def chains in the SSA web looking for three things: * Assignments of the form P_i = &VAR * Assignments of the form P_i = malloc() * Pointers and ADDR_EXPR that escape the current function. The concept of 'escaping' is the same one used in the Java world. When a pointer or an ADDR_EXPR escapes, it means that it has been exposed outside of the current function. So, assignment to global variables, function arguments and returning a pointer are all escape sites. This is where we are currently limited. Since not everything is renamed into SSA, we lose track of escape properties when a pointer is stashed inside a field in a structure, for instance. In those cases, we are assuming that the pointer does escape. We use escape analysis to determine whether a variable is call-clobbered. Simply put, if an ADDR_EXPR escapes, then the variable is call-clobbered. If a pointer P_i escapes, then all the variables pointed-to by P_i (and its memory tag) also escape. 2- Compute flow-sensitive aliases We have two classes of memory tags. Memory tags associated with the pointed-to data type of the pointers in the program. These tags are called "type memory tag" (TMT). The other class are those associated with SSA_NAMEs, called "name memory tag" (NMT). The basic idea is that when adding operands for an INDIRECT_REF *P_i, we will first check whether P_i has a name tag, if it does we use it, because that will have more precise aliasing information. Otherwise, we use the standard type tag. In this phase, we go through all the pointers we found in points-to analysis and create alias sets for the name memory tags associated with each pointer P_i. If P_i escapes, we mark call-clobbered the variables it points to and its tag. 3- Compute flow-insensitive aliases This pass will compare the alias set of every type memory tag and every addressable variable found in the program. Given a type memory tag TMT and an addressable variable V. If the alias sets of TMT and V conflict (as computed by may_alias_p), then V is marked as an alias tag and added to the alias set of TMT. For instance, consider the following function: foo (int i) { int *p, *q, a, b; if (i > 10) p = &a; else q = &b; *p = 3; *q = 5; a = b + 2; return *p; } After aliasing analysis has finished, the type memory tag for pointer 'p' will have two aliases, namely variables 'a' and 'b'. Every time pointer 'p' is dereferenced, we want to mark the operation as a potential reference to 'a' and 'b'. foo (int i) { int *p, a, b; if (i_2 > 10) p_4 = &a; else p_6 = &b; # p_1 = PHI ; # a_7 = V_MAY_DEF ; # b_8 = V_MAY_DEF ; *p_1 = 3; # a_9 = V_MAY_DEF # VUSE a_9 = b_8 + 2; # VUSE ; # VUSE ; return *p_1; } In certain cases, the list of may aliases for a pointer may grow too large. This may cause an explosion in the number of virtual operands inserted in the code. Resulting in increased memory consumption and compilation time. When the number of virtual operands needed to represent aliased loads and stores grows too large (configurable with @option{--param max-aliased-vops}), alias sets are grouped to avoid severe compile-time slow downs and memory consumption. See group_aliases. */ static void compute_may_aliases (void) { struct alias_info *ai; memset (&alias_stats, 0, sizeof (alias_stats)); /* Initialize aliasing information. */ ai = init_alias_info (); /* For each pointer P_i, determine the sets of variables that P_i may point-to. For every addressable variable V, determine whether the address of V escapes the current function, making V call-clobbered (i.e., whether &V is stored in a global variable or if its passed as a function call argument). */ compute_points_to_and_addr_escape (ai); /* Collect all pointers and addressable variables, compute alias sets, create memory tags for pointers and promote variables whose address is not needed anymore. */ setup_pointers_and_addressables (ai); /* Compute flow-sensitive, points-to based aliasing for all the name memory tags. Note that this pass needs to be done before flow insensitive analysis because it uses the points-to information gathered before to mark call-clobbered type tags. */ compute_flow_sensitive_aliasing (ai); /* Compute type-based flow-insensitive aliasing for all the type memory tags. */ compute_flow_insensitive_aliasing (ai); /* If the program has too many call-clobbered variables and/or function calls, create .GLOBAL_VAR and use it to model call-clobbering semantics at call sites. This reduces the number of virtual operands considerably, improving compile times at the expense of lost aliasing precision. */ maybe_create_global_var (ai); /* Debugging dumps. */ if (dump_file) { dump_referenced_vars (dump_file); if (dump_flags & TDF_STATS) dump_alias_stats (dump_file); dump_points_to_info (dump_file); dump_alias_info (dump_file); } /* Deallocate memory used by aliasing data structures. */ delete_alias_info (ai); /* Indicate that may-alias information is now available. */ aliases_computed_p = true; } struct tree_opt_pass pass_may_alias = { "alias", /* name */ NULL, /* gate */ compute_may_aliases, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ TV_TREE_MAY_ALIAS, /* tv_id */ PROP_cfg | PROP_ssa | PROP_pta, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func | TODO_rename_vars | TODO_ggc_collect | TODO_verify_ssa /* todo_flags_finish */ }; /* Initialize the data structures used for alias analysis. */ static struct alias_info * init_alias_info (void) { struct alias_info *ai; ai = xcalloc (1, sizeof (struct alias_info)); ai->ssa_names_visited = BITMAP_XMALLOC (); VARRAY_TREE_INIT (ai->processed_ptrs, 50, "processed_ptrs"); ai->addresses_needed = BITMAP_XMALLOC (); VARRAY_UINT_INIT (ai->num_references, num_referenced_vars, "num_references"); ai->written_vars = BITMAP_XMALLOC (); ai->dereferenced_ptrs_store = BITMAP_XMALLOC (); ai->dereferenced_ptrs_load = BITMAP_XMALLOC (); return ai; } /* Deallocate memory used by alias analysis. */ static void delete_alias_info (struct alias_info *ai) { size_t i; BITMAP_XFREE (ai->ssa_names_visited); ai->processed_ptrs = NULL; BITMAP_XFREE (ai->addresses_needed); for (i = 0; i < ai->num_addressable_vars; i++) { sbitmap_free (ai->addressable_vars[i]->may_aliases); free (ai->addressable_vars[i]); } free (ai->addressable_vars); for (i = 0; i < ai->num_pointers; i++) { sbitmap_free (ai->pointers[i]->may_aliases); free (ai->pointers[i]); } free (ai->pointers); ai->num_references = NULL; BITMAP_XFREE (ai->written_vars); BITMAP_XFREE (ai->dereferenced_ptrs_store); BITMAP_XFREE (ai->dereferenced_ptrs_load); free (ai); } /* Walk use-def chains for pointer PTR to determine what variables is PTR pointing to. */ static void collect_points_to_info_for (struct alias_info *ai, tree ptr) { #if defined ENABLE_CHECKING if (!POINTER_TYPE_P (TREE_TYPE (ptr))) abort (); #endif if (!bitmap_bit_p (ai->ssa_names_visited, SSA_NAME_VERSION (ptr))) { struct ptr_info_def *pi; bitmap_set_bit (ai->ssa_names_visited, SSA_NAME_VERSION (ptr)); walk_use_def_chains (ptr, collect_points_to_info_r, ai); VARRAY_PUSH_TREE (ai->processed_ptrs, ptr); /* If we could not determine where PTR was pointing to, clear all the other points-to information. */ pi = SSA_NAME_PTR_INFO (ptr); if (pi->pt_anything) { pi->pt_malloc = 0; pi->pt_vars = NULL; } } } /* Helper for ptr_is_dereferenced_by. Called by walk_tree to look for INDIRECT_REF nodes for the pointer passed in DATA. */ static tree find_ptr_dereference (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED, void *data) { tree ptr = (tree) data; if (TREE_CODE (*tp) == INDIRECT_REF && TREE_OPERAND (*tp, 0) == ptr) return *tp; return NULL_TREE; } /* Return true if STMT contains INDIRECT_REF . *IS_STORE is set to 'true' if the dereference is on the LHS of an assignment. */ static bool ptr_is_dereferenced_by (tree ptr, tree stmt, bool *is_store) { *is_store = false; if (TREE_CODE (stmt) == MODIFY_EXPR || (TREE_CODE (stmt) == RETURN_EXPR && TREE_CODE (TREE_OPERAND (stmt, 0)) == MODIFY_EXPR)) { tree e, lhs, rhs; e = (TREE_CODE (stmt) == RETURN_EXPR) ? TREE_OPERAND (stmt, 0) : stmt; lhs = TREE_OPERAND (e, 0); rhs = TREE_OPERAND (e, 1); if (EXPR_P (lhs) && walk_tree (&lhs, find_ptr_dereference, ptr, NULL)) { *is_store = true; return true; } else if (EXPR_P (rhs) && walk_tree (&rhs, find_ptr_dereference, ptr, NULL)) { return true; } } else if (TREE_CODE (stmt) == ASM_EXPR) { if (walk_tree (&ASM_OUTPUTS (stmt), find_ptr_dereference, ptr, NULL) || walk_tree (&ASM_CLOBBERS (stmt), find_ptr_dereference, ptr, NULL)) { *is_store = true; return true; } else if (walk_tree (&ASM_INPUTS (stmt), find_ptr_dereference, ptr, NULL)) { return true; } } return false; } /* Traverse use-def links for all the pointers in the program to collect address escape and points-to information. This is loosely based on the same idea described in R. Hasti and S. Horwitz, ``Using static single assignment form to improve flow-insensitive pointer analysis,'' in SIGPLAN Conference on Programming Language Design and Implementation, pp. 97-105, 1998. */ static void compute_points_to_and_addr_escape (struct alias_info *ai) { basic_block bb; size_t i; timevar_push (TV_TREE_PTA); FOR_EACH_BB (bb) { bb_ann_t block_ann = bb_ann (bb); block_stmt_iterator si; for (si = bsi_start (bb); !bsi_end_p (si); bsi_next (&si)) { use_optype uses; def_optype defs; v_may_def_optype v_may_defs; v_must_def_optype v_must_defs; stmt_ann_t ann; bitmap addr_taken; tree stmt = bsi_stmt (si); bool stmt_escapes_p = is_escape_site (stmt, &ai->num_calls_found); /* Mark all the variables whose address are taken by the statement. Note that this will miss all the addresses taken in PHI nodes (those are discovered while following the use-def chains). */ get_stmt_operands (stmt); addr_taken = addresses_taken (stmt); if (addr_taken) EXECUTE_IF_SET_IN_BITMAP (addr_taken, 0, i, { tree var = referenced_var (i); bitmap_set_bit (ai->addresses_needed, var_ann (var)->uid); if (stmt_escapes_p) mark_call_clobbered (var); }); if (stmt_escapes_p) block_ann->has_escape_site = 1; /* Special case for silly ADDR_EXPR tricks (gcc.c-torture/unsorted/pass.c). If this statement is an assignment to a non-pointer variable and the RHS takes the address of a variable, assume that the variable on the RHS is call-clobbered. We could add the LHS to the list of "pointers" and follow it to see if it really escapes, but it's not worth the pain. */ if (addr_taken && TREE_CODE (stmt) == MODIFY_EXPR && !POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (stmt, 0)))) EXECUTE_IF_SET_IN_BITMAP (addr_taken, 0, i, { tree var = referenced_var (i); mark_call_clobbered (var); }); ann = stmt_ann (stmt); uses = USE_OPS (ann); for (i = 0; i < NUM_USES (uses); i++) { tree op = USE_OP (uses, i); var_ann_t v_ann = var_ann (SSA_NAME_VAR (op)); struct ptr_info_def *pi; bool is_store; /* If the operand's variable may be aliased, keep track of how many times we've referenced it. This is used for alias grouping in compute_flow_sensitive_aliasing. Note that we don't need to grow AI->NUM_REFERENCES because we are processing regular variables, not memory tags (the array's initial size is set to NUM_REFERENCED_VARS). */ if (may_be_aliased (SSA_NAME_VAR (op))) (VARRAY_UINT (ai->num_references, v_ann->uid))++; if (!POINTER_TYPE_P (TREE_TYPE (op))) continue; collect_points_to_info_for (ai, op); pi = SSA_NAME_PTR_INFO (op); if (ptr_is_dereferenced_by (op, stmt, &is_store)) { /* If we found OP to point to a set of variables or malloc, then create a name memory tag for it. This gives more precise aliasing information, which helps the optimizers. FIXME: Cycles in the SSA web and the lack of SSA information for structures will prevent the creation of name tags. Find ways around this limitation. */ if (pi->pt_malloc || pi->pt_vars) pi->name_mem_tag = get_nmt_for (op); /* Keep track of how many time we've dereferenced each pointer. Again, we don't need to grow AI->NUM_REFERENCES because we're processing existing program variables. */ (VARRAY_UINT (ai->num_references, v_ann->uid))++; /* If this is a store operation, mark OP as being dereferenced to store, otherwise mark it as being dereferenced to load. */ if (is_store) bitmap_set_bit (ai->dereferenced_ptrs_store, v_ann->uid); else bitmap_set_bit (ai->dereferenced_ptrs_load, v_ann->uid); } else if (stmt_escapes_p) { /* Note that even if STMT is an escape point, pointer OP will not escape if it is being dereferenced. That's why we only check for escape points if OP is not dereferenced by STMT. */ pi->value_escapes_p = 1; /* If the statement makes a function call, assume that pointer OP will be dereferenced in a store operation inside the called function. */ if (get_call_expr_in (stmt)) bitmap_set_bit (ai->dereferenced_ptrs_store, v_ann->uid); } } /* Update reference counter for definitions to any potentially aliased variable. This is used in the alias grouping heuristics. */ defs = DEF_OPS (ann); for (i = 0; i < NUM_DEFS (defs); i++) { tree op = DEF_OP (defs, i); tree var = SSA_NAME_VAR (op); var_ann_t ann = var_ann (var); bitmap_set_bit (ai->written_vars, ann->uid); if (may_be_aliased (var)) (VARRAY_UINT (ai->num_references, ann->uid))++; } /* Mark variables in V_MAY_DEF operands as being written to. */ v_may_defs = V_MAY_DEF_OPS (ann); for (i = 0; i < NUM_V_MAY_DEFS (v_may_defs); i++) { tree op = V_MAY_DEF_OP (v_may_defs, i); tree var = SSA_NAME_VAR (op); var_ann_t ann = var_ann (var); bitmap_set_bit (ai->written_vars, ann->uid); } /* Mark variables in V_MUST_DEF operands as being written to. */ v_must_defs = V_MUST_DEF_OPS (ann); for (i = 0; i < NUM_V_MUST_DEFS (v_must_defs); i++) { tree op = V_MUST_DEF_OP (v_must_defs, i); tree var = SSA_NAME_VAR (op); var_ann_t ann = var_ann (var); bitmap_set_bit (ai->written_vars, ann->uid); } /* After promoting variables and computing aliasing we will need to re-scan most statements. FIXME: Try to minimize the number of statements re-scanned. It's not really necessary to re-scan *all* statements. */ modify_stmt (stmt); } } timevar_pop (TV_TREE_PTA); } /* For every pointer P_i in AI->PROCESSED_PTRS, create may-alias sets for the name memory tag (NMT) associated with P_i. If P_i escapes, then its name tag and the variables it points-to are call-clobbered. Finally, if P_i escapes and we could not determine where it points to, then all the variables in the same alias set as *P_i are marked call-clobbered. This is necessary because we must assume that P_i may take the address of any variable in the same alias set. */ static void compute_flow_sensitive_aliasing (struct alias_info *ai) { size_t i; for (i = 0; i < VARRAY_ACTIVE_SIZE (ai->processed_ptrs); i++) { size_t j; tree ptr = VARRAY_TREE (ai->processed_ptrs, i); struct ptr_info_def *pi = SSA_NAME_PTR_INFO (ptr); var_ann_t v_ann = var_ann (SSA_NAME_VAR (ptr)); if (pi->value_escapes_p || pi->pt_anything) { /* If PTR escapes or may point to anything, then its associated memory tags are call-clobbered. */ if (pi->name_mem_tag) mark_call_clobbered (pi->name_mem_tag); if (v_ann->type_mem_tag) mark_call_clobbered (v_ann->type_mem_tag); /* If PTR may point to anything, mark call-clobbered all the addressables with the same alias set as the type pointed-to by PTR. */ if (pi->pt_anything) { HOST_WIDE_INT ptr_set; ptr_set = get_alias_set (TREE_TYPE (TREE_TYPE (ptr))); for (j = 0; j < ai->num_addressable_vars; j++) { struct alias_map_d *alias_map = ai->addressable_vars[j]; if (alias_map->set == ptr_set) mark_call_clobbered (alias_map->var); } } /* If PTR's value may escape and PTR is never dereferenced, we need to mark all the variables PTR points-to as call-clobbered. Note that we only need do this it PTR is never dereferenced. If PTR is dereferenced, it will have a name memory tag, which will have been marked call-clobbered. This will in turn mark the pointed-to variables as call-clobbered when we call add_may_alias below. */ if (pi->value_escapes_p && pi->name_mem_tag == NULL_TREE && pi->pt_vars) EXECUTE_IF_SET_IN_BITMAP (pi->pt_vars, 0, j, mark_call_clobbered (referenced_var (j))); } /* Set up aliasing information for PTR's name memory tag (if it has one). Note that only pointers that have been dereferenced will have a name memory tag. */ if (pi->name_mem_tag && pi->pt_vars) EXECUTE_IF_SET_IN_BITMAP (pi->pt_vars, 0, j, add_may_alias (pi->name_mem_tag, referenced_var (j))); /* If the name tag is call clobbered, so is the type tag associated with the base VAR_DECL. */ if (pi->name_mem_tag && v_ann->type_mem_tag && is_call_clobbered (pi->name_mem_tag)) mark_call_clobbered (v_ann->type_mem_tag); } } /* Compute type-based alias sets. Traverse all the pointers and addressable variables found in setup_pointers_and_addressables. For every pointer P in AI->POINTERS and addressable variable V in AI->ADDRESSABLE_VARS, add V to the may-alias sets of P's type memory tag (TMT) if their alias sets conflict. V is then marked as an alias tag so that the operand scanner knows that statements containing V have aliased operands. */ static void compute_flow_insensitive_aliasing (struct alias_info *ai) { size_t i; /* Initialize counter for the total number of virtual operands that aliasing will introduce. When AI->TOTAL_ALIAS_VOPS goes beyond the threshold set by --params max-alias-vops, we enable alias grouping. */ ai->total_alias_vops = 0; /* For every pointer P, determine which addressable variables may alias with P's type memory tag. */ for (i = 0; i < ai->num_pointers; i++) { size_t j; struct alias_map_d *p_map = ai->pointers[i]; tree tag = var_ann (p_map->var)->type_mem_tag; var_ann_t tag_ann = var_ann (tag); p_map->total_alias_vops = 0; p_map->may_aliases = sbitmap_alloc (num_referenced_vars); sbitmap_zero (p_map->may_aliases); for (j = 0; j < ai->num_addressable_vars; j++) { struct alias_map_d *v_map; var_ann_t v_ann; tree var; bool tag_stored_p, var_stored_p; v_map = ai->addressable_vars[j]; var = v_map->var; v_ann = var_ann (var); /* Skip memory tags and variables that have never been written to. We also need to check if the variables are call-clobbered because they may be overwritten by function calls. */ tag_stored_p = bitmap_bit_p (ai->written_vars, tag_ann->uid) || is_call_clobbered (tag); var_stored_p = bitmap_bit_p (ai->written_vars, v_ann->uid) || is_call_clobbered (var); if (!tag_stored_p && !var_stored_p) continue; if (may_alias_p (p_map->var, p_map->set, var, v_map->set)) { size_t num_tag_refs, num_var_refs; num_tag_refs = VARRAY_UINT (ai->num_references, tag_ann->uid); num_var_refs = VARRAY_UINT (ai->num_references, v_ann->uid); /* Add VAR to TAG's may-aliases set. */ add_may_alias (tag, var); /* Update the total number of virtual operands due to aliasing. Since we are adding one more alias to TAG's may-aliases set, the total number of virtual operands due to aliasing will be increased by the number of references made to VAR and TAG (every reference to TAG will also count as a reference to VAR). */ ai->total_alias_vops += (num_var_refs + num_tag_refs); p_map->total_alias_vops += (num_var_refs + num_tag_refs); /* Update the bitmap used to represent TAG's alias set in case we need to group aliases. */ SET_BIT (p_map->may_aliases, var_ann (var)->uid); } } } if (dump_file) fprintf (dump_file, "%s: Total number of aliased vops: %ld\n", get_name (current_function_decl), ai->total_alias_vops); /* Determine if we need to enable alias grouping. */ if (ai->total_alias_vops >= MAX_ALIASED_VOPS) group_aliases (ai); } /* Comparison function for qsort used in group_aliases. */ static int total_alias_vops_cmp (const void *p, const void *q) { const struct alias_map_d **p1 = (const struct alias_map_d **)p; const struct alias_map_d **p2 = (const struct alias_map_d **)q; long n1 = (*p1)->total_alias_vops; long n2 = (*p2)->total_alias_vops; /* We want to sort in descending order. */ return (n1 > n2 ? -1 : (n1 == n2) ? 0 : 1); } /* Group all the aliases for TAG to make TAG represent all the variables in its alias set. Update the total number of virtual operands due to aliasing (AI->TOTAL_ALIAS_VOPS). This function will make TAG be the unique alias tag for all the variables in its may-aliases. So, given: may-aliases(TAG) = { V1, V2, V3 } This function will group the variables into: may-aliases(V1) = { TAG } may-aliases(V2) = { TAG } may-aliases(V2) = { TAG } */ static void group_aliases_into (tree tag, sbitmap tag_aliases, struct alias_info *ai) { size_t i; var_ann_t tag_ann = var_ann (tag); size_t num_tag_refs = VARRAY_UINT (ai->num_references, tag_ann->uid); EXECUTE_IF_SET_IN_SBITMAP (tag_aliases, 0, i, { tree var = referenced_var (i); var_ann_t ann = var_ann (var); /* Make TAG the unique alias of VAR. */ ann->is_alias_tag = 0; ann->may_aliases = NULL; /* Note that VAR and TAG may be the same if the function has no addressable variables (see the discussion at the end of setup_pointers_and_addressables). */ if (var != tag) add_may_alias (var, tag); /* Reduce total number of virtual operands contributed by TAG on behalf of VAR. Notice that the references to VAR itself won't be removed. We will merely replace them with references to TAG. */ ai->total_alias_vops -= num_tag_refs; }); /* We have reduced the number of virtual operands that TAG makes on behalf of all the variables formerly aliased with it. However, we have also "removed" all the virtual operands for TAG itself, so we add them back. */ ai->total_alias_vops += num_tag_refs; /* TAG no longer has any aliases. */ tag_ann->may_aliases = NULL; } /* Group may-aliases sets to reduce the number of virtual operands due to aliasing. 1- Sort the list of pointers in decreasing number of contributed virtual operands. 2- Take the first entry in AI->POINTERS and revert the role of the memory tag and its aliases. Usually, whenever an aliased variable Vi is found to alias with a memory tag T, we add Vi to the may-aliases set for T. Meaning that after alias analysis, we will have: may-aliases(T) = { V1, V2, V3, ..., Vn } This means that every statement that references T, will get 'n' virtual operands for each of the Vi tags. But, when alias grouping is enabled, we make T an alias tag and add it to the alias set of all the Vi variables: may-aliases(V1) = { T } may-aliases(V2) = { T } ... may-aliases(Vn) = { T } This has two effects: (a) statements referencing T will only get a single virtual operand, and, (b) all the variables Vi will now appear to alias each other. So, we lose alias precision to improve compile time. But, in theory, a program with such a high level of aliasing should not be very optimizable in the first place. 3- Since variables may be in the alias set of more than one memory tag, the grouping done in step (2) needs to be extended to all the memory tags that have a non-empty intersection with the may-aliases set of tag T. For instance, if we originally had these may-aliases sets: may-aliases(T) = { V1, V2, V3 } may-aliases(R) = { V2, V4 } In step (2) we would have reverted the aliases for T as: may-aliases(V1) = { T } may-aliases(V2) = { T } may-aliases(V3) = { T } But note that now V2 is no longer aliased with R. We could add R to may-aliases(V2), but we are in the process of grouping aliases to reduce virtual operands so what we do is add V4 to the grouping to obtain: may-aliases(V1) = { T } may-aliases(V2) = { T } may-aliases(V3) = { T } may-aliases(V4) = { T } 4- If the total number of virtual operands due to aliasing is still above the threshold set by max-alias-vops, go back to (2). */ static void group_aliases (struct alias_info *ai) { size_t i; sbitmap res; /* Sort the POINTERS array in descending order of contributed virtual operands. */ qsort (ai->pointers, ai->num_pointers, sizeof (struct alias_map_d *), total_alias_vops_cmp); res = sbitmap_alloc (num_referenced_vars); /* For every pointer in AI->POINTERS, reverse the roles of its tag and the tag's may-aliases set. */ for (i = 0; i < ai->num_pointers; i++) { size_t j; tree tag1 = var_ann (ai->pointers[i]->var)->type_mem_tag; sbitmap tag1_aliases = ai->pointers[i]->may_aliases; /* Skip tags that have been grouped already. */ if (ai->pointers[i]->grouped_p) continue; /* See if TAG1 had any aliases in common with other type tags. If we find a TAG2 with common aliases with TAG1, add TAG2's aliases into TAG1. */ for (j = i + 1; j < ai->num_pointers; j++) { sbitmap tag2_aliases = ai->pointers[j]->may_aliases; sbitmap_a_and_b (res, tag1_aliases, tag2_aliases); if (sbitmap_first_set_bit (res) >= 0) { tree tag2 = var_ann (ai->pointers[j]->var)->type_mem_tag; sbitmap_a_or_b (tag1_aliases, tag1_aliases, tag2_aliases); /* TAG2 does not need its aliases anymore. */ sbitmap_zero (tag2_aliases); var_ann (tag2)->may_aliases = NULL; /* TAG1 is the unique alias of TAG2. */ add_may_alias (tag2, tag1); ai->pointers[j]->grouped_p = true; } } /* Now group all the aliases we collected into TAG1. */ group_aliases_into (tag1, tag1_aliases, ai); /* If we've reduced total number of virtual operands below the threshold, stop. */ if (ai->total_alias_vops < MAX_ALIASED_VOPS) break; } /* Finally, all the variables that have been grouped cannot be in the may-alias set of name memory tags. Suppose that we have grouped the aliases in this code so that may-aliases(a) = TMT.20 p_5 = &a; ... # a_9 = V_MAY_DEF p_5->field = 0 ... Several modifications to TMT.20 ... # VUSE x_30 = p_5->field Since p_5 points to 'a', the optimizers will try to propagate 0 into p_5->field, but that is wrong because there have been modifications to 'TMT.20' in between. To prevent this we have to replace 'a' with 'TMT.20' in the name tag of p_5. */ for (i = 0; i < VARRAY_ACTIVE_SIZE (ai->processed_ptrs); i++) { size_t j; tree ptr = VARRAY_TREE (ai->processed_ptrs, i); tree name_tag = SSA_NAME_PTR_INFO (ptr)->name_mem_tag; varray_type aliases; if (name_tag == NULL_TREE) continue; aliases = var_ann (name_tag)->may_aliases; for (j = 0; aliases && j < VARRAY_ACTIVE_SIZE (aliases); j++) { tree alias = VARRAY_TREE (aliases, j); var_ann_t ann = var_ann (alias); if (ann->may_aliases) { #if defined ENABLE_CHECKING if (VARRAY_ACTIVE_SIZE (ann->may_aliases) != 1) abort (); #endif VARRAY_TREE (aliases, j) = VARRAY_TREE (ann->may_aliases, 0); } } } sbitmap_free (res); if (dump_file) fprintf (dump_file, "%s: Total number of aliased vops after grouping: %ld%s\n", get_name (current_function_decl), ai->total_alias_vops, (ai->total_alias_vops < 0) ? " (negative values are OK)" : ""); } /* Create a new alias set entry for VAR in AI->ADDRESSABLE_VARS. */ static void create_alias_map_for (tree var, struct alias_info *ai) { struct alias_map_d *alias_map; alias_map = xcalloc (1, sizeof (*alias_map)); alias_map->var = var; if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE) alias_map->set = get_alias_set (TREE_TYPE (TREE_TYPE (var))); else alias_map->set = get_alias_set (var); ai->addressable_vars[ai->num_addressable_vars++] = alias_map; } /* Create memory tags for all the dereferenced pointers and build the ADDRESSABLE_VARS and POINTERS arrays used for building the may-alias sets. Based on the address escape and points-to information collected earlier, this pass will also clear the TREE_ADDRESSABLE flag from those variables whose address is not needed anymore. */ static void setup_pointers_and_addressables (struct alias_info *ai) { size_t i, n_vars, num_addressable_vars, num_pointers; /* Size up the arrays ADDRESSABLE_VARS and POINTERS. */ num_addressable_vars = num_pointers = 0; for (i = 0; i < num_referenced_vars; i++) { tree var = referenced_var (i); if (may_be_aliased (var)) num_addressable_vars++; if (POINTER_TYPE_P (TREE_TYPE (var))) { /* Since we don't keep track of volatile variables nor variables with hidden uses, assume that these pointers are used in indirect store operations. */ var_ann_t ann = var_ann (var); if (TREE_THIS_VOLATILE (var) || ann->has_hidden_use) bitmap_set_bit (ai->dereferenced_ptrs_store, ann->uid); num_pointers++; } } /* Create ADDRESSABLE_VARS and POINTERS. Note that these arrays are always going to be slightly bigger than we actually need them because some TREE_ADDRESSABLE variables will be marked non-addressable below and only pointers with unique type tags are going to be added to POINTERS. */ ai->addressable_vars = xcalloc (num_addressable_vars, sizeof (struct alias_map_d *)); ai->pointers = xcalloc (num_pointers, sizeof (struct alias_map_d *)); ai->num_addressable_vars = 0; ai->num_pointers = 0; /* Since we will be creating type memory tags within this loop, cache the value of NUM_REFERENCED_VARS to avoid processing the additional tags unnecessarily. */ n_vars = num_referenced_vars; for (i = 0; i < n_vars; i++) { tree var = referenced_var (i); var_ann_t v_ann = var_ann (var); /* Name memory tags already have flow-sensitive aliasing information, so they need not be processed by compute_may_aliases. Similarly, type memory tags are already accounted for when we process their associated pointer. */ if (v_ann->mem_tag_kind != NOT_A_TAG) continue; /* Remove the ADDRESSABLE flag from every addressable variable whose address is not needed anymore. This is caused by the propagation of ADDR_EXPR constants into INDIRECT_REF expressions and the removal of dead pointer assignments done by the early scalar cleanup passes. */ if (TREE_ADDRESSABLE (var)) { if (!bitmap_bit_p (ai->addresses_needed, v_ann->uid) && !v_ann->has_hidden_use && v_ann->mem_tag_kind == NOT_A_TAG && !needs_to_live_in_memory (var)) { /* The address of VAR is not needed, remove the addressable bit, so that it can be optimized as a regular variable. */ mark_non_addressable (var); /* Since VAR is now a regular GIMPLE register, we will need to rename VAR into SSA afterwards. */ bitmap_set_bit (vars_to_rename, v_ann->uid); } } /* Global variables and addressable locals may be aliased. Create an entry in ADDRESSABLE_VARS for VAR. */ if (may_be_aliased (var)) { create_alias_map_for (var, ai); bitmap_set_bit (vars_to_rename, var_ann (var)->uid); } /* Add pointer variables that have been dereferenced to the POINTERS array and create a type memory tag for them. */ if (POINTER_TYPE_P (TREE_TYPE (var)) && (bitmap_bit_p (ai->dereferenced_ptrs_store, v_ann->uid) || bitmap_bit_p (ai->dereferenced_ptrs_load, v_ann->uid))) { tree tag = v_ann->type_mem_tag; var_ann_t t_ann; /* If pointer VAR still doesn't have a memory tag associated with it, create it now or re-use an existing one. */ if (tag == NULL_TREE) tag = get_tmt_for (var, ai); t_ann = var_ann (tag); /* Associate the tag with pointer VAR. */ v_ann->type_mem_tag = tag; /* If pointer VAR has been used in a store operation, then its memory tag must be marked as written-to. */ if (bitmap_bit_p (ai->dereferenced_ptrs_store, v_ann->uid)) bitmap_set_bit (ai->written_vars, t_ann->uid); /* If pointer VAR is a global variable or a PARM_DECL, then its memory tag should be considered a global variable. */ if (TREE_CODE (var) == PARM_DECL || needs_to_live_in_memory (var)) mark_call_clobbered (tag); /* All the dereferences of pointer VAR count as references of TAG. Since TAG can be associated with several pointers, add the dereferences of VAR to the TAG. We may need to grow AI->NUM_REFERENCES because we have been adding name and type tags. */ if (t_ann->uid >= VARRAY_SIZE (ai->num_references)) VARRAY_GROW (ai->num_references, t_ann->uid + 10); VARRAY_UINT (ai->num_references, t_ann->uid) += VARRAY_UINT (ai->num_references, v_ann->uid); } } /* If we found no addressable variables, but we have more than one pointer, we will need to check for conflicts between the pointers. Otherwise, we would miss alias relations as in testsuite/gcc.dg/tree-ssa/20040319-1.c: struct bar { int count; int *arr;}; void foo (struct bar *b) { b->count = 0; *(b->arr) = 2; if (b->count == 0) abort (); } b->count and *(b->arr) could be aliased if b->arr == &b->count. To do this, we add all the memory tags for the pointers in AI->POINTERS to AI->ADDRESSABLE_VARS, so that compute_flow_insensitive_aliasing will naturally compare every pointer to every type tag. */ if (ai->num_addressable_vars == 0 && ai->num_pointers > 1) { free (ai->addressable_vars); ai->addressable_vars = xcalloc (ai->num_pointers, sizeof (struct alias_map_d *)); ai->num_addressable_vars = 0; for (i = 0; i < ai->num_pointers; i++) { struct alias_map_d *p = ai->pointers[i]; tree tag = var_ann (p->var)->type_mem_tag; create_alias_map_for (tag, ai); } } } /* Determine whether to use .GLOBAL_VAR to model call clobbering semantics. At every call site, we need to emit V_MAY_DEF expressions to represent the clobbering effects of the call for variables whose address escapes the current function. One approach is to group all call-clobbered variables into a single representative that is used as an alias of every call-clobbered variable (.GLOBAL_VAR). This works well, but it ties the optimizer hands because references to any call clobbered variable is a reference to .GLOBAL_VAR. The second approach is to emit a clobbering V_MAY_DEF for every call-clobbered variable at call sites. This is the preferred way in terms of optimization opportunities but it may create too many V_MAY_DEF operands if there are many call clobbered variables and function calls in the function. To decide whether or not to use .GLOBAL_VAR we multiply the number of function calls found by the number of call-clobbered variables. If that product is beyond a certain threshold, as determined by the parameterized values shown below, we use .GLOBAL_VAR. FIXME. This heuristic should be improved. One idea is to use several .GLOBAL_VARs of different types instead of a single one. The thresholds have been derived from a typical bootstrap cycle, including all target libraries. Compile times were found increase by ~1% compared to using .GLOBAL_VAR. */ static void maybe_create_global_var (struct alias_info *ai) { size_t i, n_clobbered; /* Count all the call-clobbered variables. */ n_clobbered = 0; EXECUTE_IF_SET_IN_BITMAP (call_clobbered_vars, 0, i, n_clobbered++); /* Create .GLOBAL_VAR if we have too many call-clobbered variables. We also create .GLOBAL_VAR when there no call-clobbered variables to prevent code motion transformations from re-arranging function calls that may have side effects. For instance, foo () { int a = f (); g (); h (a); } There are no call-clobbered variables in foo(), so it would be entirely possible for a pass to want to move the call to f() after the call to g(). If f() has side effects, that would be wrong. Creating .GLOBAL_VAR in this case will insert VDEFs for it and prevent such transformations. */ if (n_clobbered == 0 || ai->num_calls_found * n_clobbered >= (size_t) GLOBAL_VAR_THRESHOLD) create_global_var (); /* If the function has calls to clobbering functions and .GLOBAL_VAR has been created, make it an alias for all call-clobbered variables. */ if (global_var) EXECUTE_IF_SET_IN_BITMAP (call_clobbered_vars, 0, i, { tree var = referenced_var (i); if (var != global_var) { add_may_alias (var, global_var); bitmap_set_bit (vars_to_rename, var_ann (var)->uid); } }); } /* Return TRUE if pointer PTR may point to variable VAR. MEM_ALIAS_SET is the alias set for the memory location pointed-to by PTR This is needed because when checking for type conflicts we are interested in the alias set of the memory location pointed-to by PTR. The alias set of PTR itself is irrelevant. VAR_ALIAS_SET is the alias set for VAR. */ static bool may_alias_p (tree ptr, HOST_WIDE_INT mem_alias_set, tree var, HOST_WIDE_INT var_alias_set) { tree mem; var_ann_t v_ann, m_ann; alias_stats.alias_queries++; alias_stats.simple_queries++; /* By convention, a variable cannot alias itself. */ mem = var_ann (ptr)->type_mem_tag; if (mem == var) { alias_stats.alias_noalias++; alias_stats.simple_resolved++; return false; } v_ann = var_ann (var); m_ann = var_ann (mem); #if defined ENABLE_CHECKING if (m_ann->mem_tag_kind != TYPE_TAG) abort (); #endif alias_stats.tbaa_queries++; /* If VAR is a pointer with the same alias set as PTR, then dereferencing PTR can't possibly affect VAR. Note, that we are specifically testing for PTR's alias set here, not its pointed-to type. We also can't do this check with relaxed aliasing enabled. */ if (POINTER_TYPE_P (TREE_TYPE (var)) && var_alias_set != 0) { HOST_WIDE_INT ptr_alias_set = get_alias_set (ptr); if (ptr_alias_set == var_alias_set) { alias_stats.alias_noalias++; alias_stats.tbaa_resolved++; return false; } } /* If the alias sets don't conflict then MEM cannot alias VAR. */ if (!alias_sets_conflict_p (mem_alias_set, var_alias_set)) { /* Handle aliases to structure fields. If either VAR or MEM are aggregate types, they may not have conflicting types, but one of the structures could contain a pointer to the other one. For instance, given MEM -> struct P *p; VAR -> struct Q *q; It may happen that '*p' and '*q' can't alias because 'struct P' and 'struct Q' have non-conflicting alias sets. However, it could happen that one of the fields in 'struct P' is a 'struct Q *' or vice-versa. Therefore, we also need to check if 'struct P' aliases 'struct Q *' or 'struct Q' aliases 'struct P *'. Notice, that since GIMPLE does not have more than one-level pointers, we don't need to recurse into the structures. */ if (AGGREGATE_TYPE_P (TREE_TYPE (mem)) || AGGREGATE_TYPE_P (TREE_TYPE (var))) { tree ptr_to_var; if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE) ptr_to_var = TYPE_POINTER_TO (TREE_TYPE (TREE_TYPE (var))); else ptr_to_var = TYPE_POINTER_TO (TREE_TYPE (var)); /* If no pointer-to VAR exists, then MEM can't alias VAR. */ if (ptr_to_var == NULL_TREE) { alias_stats.alias_noalias++; alias_stats.tbaa_resolved++; return false; } /* If MEM doesn't alias a pointer to VAR and VAR doesn't alias PTR, then PTR can't alias VAR. */ if (!alias_sets_conflict_p (mem_alias_set, get_alias_set (ptr_to_var)) && !alias_sets_conflict_p (var_alias_set, get_alias_set (ptr))) { alias_stats.alias_noalias++; alias_stats.tbaa_resolved++; return false; } } else { alias_stats.alias_noalias++; alias_stats.tbaa_resolved++; return false; } } if (flag_tree_points_to != PTA_NONE) alias_stats.pta_queries++; /* If -ftree-points-to is given, check if PTR may point to VAR. */ if (flag_tree_points_to == PTA_ANDERSEN && !ptr_may_alias_var (ptr, var)) { alias_stats.alias_noalias++; alias_stats.pta_resolved++; return false; } alias_stats.alias_mayalias++; return true; } /* Add ALIAS to the set of variables that may alias VAR. */ static void add_may_alias (tree var, tree alias) { size_t i; var_ann_t v_ann = get_var_ann (var); var_ann_t a_ann = get_var_ann (alias); #if defined ENABLE_CHECKING if (var == alias) abort (); #endif if (v_ann->may_aliases == NULL) VARRAY_TREE_INIT (v_ann->may_aliases, 2, "aliases"); /* Avoid adding duplicates. */ for (i = 0; i < VARRAY_ACTIVE_SIZE (v_ann->may_aliases); i++) if (alias == VARRAY_TREE (v_ann->may_aliases, i)) return; /* If VAR is a call-clobbered variable, so is its new ALIAS. */ if (is_call_clobbered (var)) mark_call_clobbered (alias); /* Likewise. If ALIAS is call-clobbered, so is VAR. */ else if (is_call_clobbered (alias)) mark_call_clobbered (var); VARRAY_PUSH_TREE (v_ann->may_aliases, alias); a_ann->is_alias_tag = 1; } /* Given two pointers DEST and ORIG. Merge the points-to information in ORIG into DEST. AI is as in collect_points_to_info. */ static void merge_pointed_to_info (struct alias_info *ai, tree dest, tree orig) { struct ptr_info_def *dest_pi, *orig_pi; /* Make sure we have points-to information for ORIG. */ collect_points_to_info_for (ai, orig); dest_pi = get_ptr_info (dest); orig_pi = SSA_NAME_PTR_INFO (orig); if (orig_pi) { dest_pi->pt_anything |= orig_pi->pt_anything; dest_pi->pt_malloc |= orig_pi->pt_malloc; if (orig_pi->pt_vars) { if (dest_pi->pt_vars == NULL) { dest_pi->pt_vars = BITMAP_GGC_ALLOC (); bitmap_copy (dest_pi->pt_vars, orig_pi->pt_vars); } else bitmap_a_or_b (dest_pi->pt_vars, dest_pi->pt_vars, orig_pi->pt_vars); } } } /* Add VALUE to the list of expressions pointed-to by PTR. */ static void add_pointed_to_expr (tree ptr, tree value) { struct ptr_info_def *pi; #if defined ENABLE_CHECKING /* Pointer variables should have been handled by merge_pointed_to_info. */ if (TREE_CODE (value) == SSA_NAME && POINTER_TYPE_P (TREE_TYPE (value))) abort (); #endif pi = get_ptr_info (ptr); /* If VALUE is the result of a malloc-like call, then the area pointed to PTR is guaranteed to not alias with anything else. */ if (TREE_CODE (value) == CALL_EXPR && (call_expr_flags (value) & (ECF_MALLOC | ECF_MAY_BE_ALLOCA))) pi->pt_malloc = 1; else pi->pt_anything = 1; if (dump_file) { fprintf (dump_file, "Pointer "); print_generic_expr (dump_file, ptr, dump_flags); fprintf (dump_file, " points to "); if (pi->pt_malloc) fprintf (dump_file, "malloc space: "); else fprintf (dump_file, "an arbitrary address: "); print_generic_expr (dump_file, value, dump_flags); fprintf (dump_file, "\n"); } } /* If VALUE is of the form &DECL, add DECL to the set of variables pointed-to by PTR. Otherwise, add VALUE as a pointed-to expression by PTR. AI is as in collect_points_to_info. */ static void add_pointed_to_var (struct alias_info *ai, tree ptr, tree value) { if (TREE_CODE (value) == ADDR_EXPR) { tree pt_var; struct ptr_info_def *pi; size_t uid; pt_var = TREE_OPERAND (value, 0); if (TREE_CODE_CLASS (TREE_CODE (pt_var)) == 'r') pt_var = get_base_address (pt_var); if (pt_var && SSA_VAR_P (pt_var)) { pi = get_ptr_info (ptr); uid = var_ann (pt_var)->uid; if (pi->pt_vars == NULL) pi->pt_vars = BITMAP_GGC_ALLOC (); bitmap_set_bit (pi->pt_vars, uid); bitmap_set_bit (ai->addresses_needed, uid); } else add_pointed_to_expr (ptr, value); } else add_pointed_to_expr (ptr, value); } /* Callback for walk_use_def_chains to gather points-to information from the SSA web. VAR is an SSA variable or a GIMPLE expression. STMT is the statement that generates the SSA variable or, if STMT is a PHI_NODE, VAR is one of the PHI arguments. DATA is a pointer to a structure of type ALIAS_INFO. */ static bool collect_points_to_info_r (tree var, tree stmt, void *data) { struct alias_info *ai = (struct alias_info *) data; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Visiting use-def links for "); print_generic_expr (dump_file, var, dump_flags); fprintf (dump_file, "\n"); } if (TREE_CODE (stmt) == MODIFY_EXPR) { tree rhs = TREE_OPERAND (stmt, 1); STRIP_NOPS (rhs); /* Found P_i = CONST. */ if (is_gimple_min_invariant (rhs)) add_pointed_to_var (ai, var, rhs); /* Found P_i = Q_j. */ else if (TREE_CODE (rhs) == SSA_NAME && POINTER_TYPE_P (TREE_TYPE (rhs))) merge_pointed_to_info (ai, var, rhs); /* Found P_i = PLUS_EXPR or P_i = MINUS_EXPR */ else if (TREE_CODE (rhs) == PLUS_EXPR || TREE_CODE (rhs) == MINUS_EXPR) { tree op0 = TREE_OPERAND (rhs, 0); tree op1 = TREE_OPERAND (rhs, 1); if (TREE_CODE (op0) == SSA_NAME && POINTER_TYPE_P (TREE_TYPE (op0))) merge_pointed_to_info (ai, var, op0); else if (TREE_CODE (op1) == SSA_NAME && POINTER_TYPE_P (TREE_TYPE (op1))) merge_pointed_to_info (ai, var, op1); else if (is_gimple_min_invariant (op0)) add_pointed_to_var (ai, var, op0); else if (is_gimple_min_invariant (op1)) add_pointed_to_var (ai, var, op1); else add_pointed_to_expr (var, rhs); } /* Something else. */ else add_pointed_to_expr (var, rhs); } else if (TREE_CODE (stmt) == ASM_EXPR) { /* Pointers defined by __asm__ statements can point anywhere. */ get_ptr_info (var)->pt_anything = 1; } else if (IS_EMPTY_STMT (stmt)) { tree decl = SSA_NAME_VAR (var); if (TREE_CODE (decl) == PARM_DECL) add_pointed_to_expr (var, decl); else if (DECL_INITIAL (decl)) add_pointed_to_var (ai, var, DECL_INITIAL (decl)); else add_pointed_to_expr (var, decl); } else if (TREE_CODE (stmt) == PHI_NODE) { tree lhs = PHI_RESULT (stmt); if (is_gimple_min_invariant (var)) add_pointed_to_var (ai, lhs, var); else if (TREE_CODE (var) == SSA_NAME) merge_pointed_to_info (ai, lhs, var); else abort (); } else abort (); return false; } /* Return true if STMT is an "escape" site from the current function. Escape sites those statements which might expose the address of a variable outside the current function. STMT is an escape site iff: 1- STMT is a function call, or 2- STMT is an __asm__ expression, or 3- STMT is an assignment to a non-local variable, or 4- STMT is a return statement. If NUM_CALLS_P is not NULL, the counter is incremented if STMT contains a function call. */ static bool is_escape_site (tree stmt, size_t *num_calls_p) { if (get_call_expr_in (stmt) != NULL_TREE) { if (num_calls_p) (*num_calls_p)++; return true; } else if (TREE_CODE (stmt) == ASM_EXPR) return true; else if (TREE_CODE (stmt) == MODIFY_EXPR) { tree lhs = TREE_OPERAND (stmt, 0); /* Get to the base of _REF nodes. */ if (TREE_CODE (lhs) != SSA_NAME) lhs = get_base_address (lhs); /* If we couldn't recognize the LHS of the assignment, assume that it is a non-local store. */ if (lhs == NULL_TREE) return true; /* If the LHS is an SSA name, it can't possibly represent a non-local memory store. */ if (TREE_CODE (lhs) == SSA_NAME) return false; /* FIXME: LHS is not an SSA_NAME. Even if it's an assignment to a local variables we cannot be sure if it will escape, because we don't have information about objects not in SSA form. Need to implement something along the lines of J.-D. Choi, M. Gupta, M. J. Serrano, V. C. Sreedhar, and S. P. Midkiff, ``Escape analysis for java,'' in Proceedings of the Conference on Object-Oriented Programming Systems, Languages, and Applications (OOPSLA), pp. 1-19, 1999. */ return true; } else if (TREE_CODE (stmt) == RETURN_EXPR) return true; return false; } /* Create a new memory tag of type TYPE. If IS_TYPE_TAG is true, the tag is considered to represent all the pointers whose pointed-to types are in the same alias set class. Otherwise, the tag represents a single SSA_NAME pointer variable. */ static tree create_memory_tag (tree type, bool is_type_tag) { var_ann_t ann; tree tag = create_tmp_var_raw (type, (is_type_tag) ? "TMT" : "NMT"); /* By default, memory tags are local variables. Alias analysis will determine whether they should be considered globals. */ DECL_CONTEXT (tag) = current_function_decl; /* If the pointed-to type is volatile, so is the tag. */ TREE_THIS_VOLATILE (tag) = TREE_THIS_VOLATILE (type); /* Memory tags are by definition addressable. This also prevents is_gimple_ref frome confusing memory tags with optimizable variables. */ TREE_ADDRESSABLE (tag) = 1; ann = get_var_ann (tag); ann->mem_tag_kind = (is_type_tag) ? TYPE_TAG : NAME_TAG; ann->type_mem_tag = NULL_TREE; /* Add the tag to the symbol table and mark it for renaming. */ add_referenced_tmp_var (tag); bitmap_set_bit (vars_to_rename, ann->uid); return tag; } /* Create a name memory tag to represent a specific SSA_NAME pointer P_i. This is used if P_i has been found to point to a specific set of variables or to a non-aliased memory location like the address returned by malloc functions. */ static tree get_nmt_for (tree ptr) { struct ptr_info_def *pi = get_ptr_info (ptr); tree tag = pi->name_mem_tag; if (tag == NULL_TREE) { tag = create_memory_tag (TREE_TYPE (TREE_TYPE (ptr)), false); /* If PTR is a PARM_DECL, its memory tag should be considered a global variable. */ if (TREE_CODE (SSA_NAME_VAR (ptr)) == PARM_DECL) mark_call_clobbered (tag); /* Similarly, if PTR points to malloc, then TAG is a global. */ if (pi->pt_malloc) mark_call_clobbered (tag); } return tag; } /* Return the type memory tag associated to pointer PTR. A memory tag is an artificial variable that represents the memory location pointed-to by PTR. It is used to model the effects of pointer de-references on addressable variables. AI points to the data gathered during alias analysis. This function populates the array AI->POINTERS. */ static tree get_tmt_for (tree ptr, struct alias_info *ai) { size_t i; tree tag; tree tag_type = TREE_TYPE (TREE_TYPE (ptr)); HOST_WIDE_INT tag_set = get_alias_set (tag_type); /* To avoid creating unnecessary memory tags, only create one memory tag per alias set class. Note that it may be tempting to group memory tags based on conflicting alias sets instead of equivalence. That would be wrong because alias sets are not necessarily transitive (as demonstrated by the libstdc++ test 23_containers/vector/cons/4.cc). Given three alias sets A, B, C such that conflicts (A, B) == true and conflicts (A, C) == true, it does not necessarily follow that conflicts (B, C) == true. */ for (i = 0, tag = NULL_TREE; i < ai->num_pointers; i++) { struct alias_map_d *curr = ai->pointers[i]; if (tag_set == curr->set && (flag_tree_points_to == PTA_NONE || same_points_to_set (curr->var, ptr))) { tag = var_ann (curr->var)->type_mem_tag; break; } } /* If VAR cannot alias with any of the existing memory tags, create a new tag for PTR and add it to the POINTERS array. */ if (tag == NULL_TREE) { struct alias_map_d *alias_map; /* Create a new MT.* artificial variable representing the memory location pointed-to by PTR. */ tag = create_memory_tag (tag_type, true); /* Add PTR to the POINTERS array. Note that we are not interested in PTR's alias set. Instead, we cache the alias set for the memory that PTR points to. */ alias_map = xcalloc (1, sizeof (*alias_map)); alias_map->var = ptr; alias_map->set = tag_set; ai->pointers[ai->num_pointers++] = alias_map; } return tag; } /* Create GLOBAL_VAR, an artificial global variable to act as a representative of all the variables that may be clobbered by function calls. */ static void create_global_var (void) { global_var = build_decl (VAR_DECL, get_identifier (".GLOBAL_VAR"), size_type_node); DECL_ARTIFICIAL (global_var) = 1; TREE_READONLY (global_var) = 0; DECL_EXTERNAL (global_var) = 0; TREE_STATIC (global_var) = 1; TREE_USED (global_var) = 1; DECL_CONTEXT (global_var) = NULL_TREE; TREE_THIS_VOLATILE (global_var) = 0; TREE_ADDRESSABLE (global_var) = 0; add_referenced_tmp_var (global_var); bitmap_set_bit (vars_to_rename, var_ann (global_var)->uid); } /* Dump alias statistics on FILE. */ static void dump_alias_stats (FILE *file) { const char *funcname = lang_hooks.decl_printable_name (current_function_decl, 2); fprintf (file, "\nAlias statistics for %s\n\n", funcname); fprintf (file, "Total alias queries:\t%u\n", alias_stats.alias_queries); fprintf (file, "Total alias mayalias results:\t%u\n", alias_stats.alias_mayalias); fprintf (file, "Total alias noalias results:\t%u\n", alias_stats.alias_noalias); fprintf (file, "Total simple queries:\t%u\n", alias_stats.simple_queries); fprintf (file, "Total simple resolved:\t%u\n", alias_stats.simple_resolved); fprintf (file, "Total TBAA queries:\t%u\n", alias_stats.tbaa_queries); fprintf (file, "Total TBAA resolved:\t%u\n", alias_stats.tbaa_resolved); fprintf (file, "Total PTA queries:\t%u\n", alias_stats.pta_queries); fprintf (file, "Total PTA resolved:\t%u\n", alias_stats.pta_resolved); } /* Dump alias information on FILE. */ void dump_alias_info (FILE *file) { size_t i; const char *funcname = lang_hooks.decl_printable_name (current_function_decl, 2); fprintf (file, "\nAlias information for %s\n\n", funcname); for (i = 0; i < num_referenced_vars; i++) { tree var = referenced_var (i); var_ann_t ann = var_ann (var); if (ann->may_aliases || ann->type_mem_tag || ann->is_alias_tag || ann->mem_tag_kind != NOT_A_TAG) dump_variable_dfa (file, var); } fprintf (file, "\n"); } /* Dump alias information on stderr. */ void debug_alias_info (void) { dump_alias_info (stderr); } /* Return the alias information associated with pointer T. It creates a new instance if none existed. */ static struct ptr_info_def * get_ptr_info (tree t) { struct ptr_info_def *pi; #if defined ENABLE_CHECKING if (!POINTER_TYPE_P (TREE_TYPE (t))) abort (); #endif pi = SSA_NAME_PTR_INFO (t); if (pi == NULL) { pi = ggc_alloc (sizeof (*pi)); memset ((void *)pi, 0, sizeof (*pi)); SSA_NAME_PTR_INFO (t) = pi; } return pi; } /* Dump points-to information for SSA_NAME PTR into FILE. */ static void dump_points_to_info_for (FILE *file, tree ptr) { struct ptr_info_def *pi = SSA_NAME_PTR_INFO (ptr); fprintf (file, "Pointer "); print_generic_expr (file, ptr, dump_flags); if (pi == NULL) return; if (pi->name_mem_tag) { fprintf (file, ", name memory tag: "); print_generic_expr (file, pi->name_mem_tag, dump_flags); } if (pi->value_escapes_p) fprintf (file, ", its value escapes"); if (pi->pt_anything) fprintf (file, ", points-to anything"); if (pi->pt_malloc) fprintf (file, ", points-to malloc"); if (pi->pt_vars) { unsigned ix; fprintf (file, ", points-to vars: { "); EXECUTE_IF_SET_IN_BITMAP (pi->pt_vars, 0, ix, { print_generic_expr (file, referenced_var (ix), dump_flags); fprintf (file, " "); }); fprintf (file, "}"); } fprintf (file, "\n"); } /* Dump points-to information into FILE. NOTE: This function is slow, as it needs to traverse the whole CFG looking for pointer SSA_NAMEs. */ void dump_points_to_info (FILE *file) { basic_block bb; block_stmt_iterator si; size_t i; const char *fname = lang_hooks.decl_printable_name (current_function_decl, 2); fprintf (file, "\n\nPointed-to sets for pointers in %s\n\n", fname); /* First dump points-to information for the default definitions of pointer variables. This is necessary because default definitions are not part of the code. */ for (i = 0; i < num_referenced_vars; i++) { tree var = referenced_var (i); if (POINTER_TYPE_P (TREE_TYPE (var))) { var_ann_t ann = var_ann (var); if (ann->default_def) dump_points_to_info_for (file, ann->default_def); } } /* Dump points-to information for every pointer defined in the program. */ FOR_EACH_BB (bb) { tree phi; for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi)) { tree ptr = PHI_RESULT (phi); if (POINTER_TYPE_P (TREE_TYPE (ptr))) dump_points_to_info_for (file, ptr); } for (si = bsi_start (bb); !bsi_end_p (si); bsi_next (&si)) { stmt_ann_t ann = stmt_ann (bsi_stmt (si)); def_optype defs = DEF_OPS (ann); if (defs) for (i = 0; i < NUM_DEFS (defs); i++) if (POINTER_TYPE_P (TREE_TYPE (DEF_OP (defs, i)))) dump_points_to_info_for (file, DEF_OP (defs, i)); } } fprintf (file, "\n"); } /* Dump points-to info pointed by PTO into STDERR. */ void debug_points_to_info (void) { dump_points_to_info (stderr); } /* Dump to FILE the list of variables that may be aliasing VAR. */ void dump_may_aliases_for (FILE *file, tree var) { varray_type aliases; if (TREE_CODE (var) == SSA_NAME) var = SSA_NAME_VAR (var); aliases = var_ann (var)->may_aliases; if (aliases) { size_t i; fprintf (file, "{ "); for (i = 0; i < VARRAY_ACTIVE_SIZE (aliases); i++) { print_generic_expr (file, VARRAY_TREE (aliases, i), dump_flags); fprintf (file, " "); } fprintf (file, "}"); } } /* Dump to stderr the list of variables that may be aliasing VAR. */ void debug_may_aliases_for (tree var) { dump_may_aliases_for (stderr, var); } /* Optimization of PHI nodes by converting them into straightline code. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ static void tree_ssa_phiopt (void); static bool conditional_replacement (basic_block, tree, tree, tree); static bool value_replacement (basic_block, tree, tree, tree); static bool abs_replacement (basic_block, tree, tree, tree); static void replace_phi_with_stmt (block_stmt_iterator, basic_block, basic_block, tree, tree); static bool candidate_bb_for_phi_optimization (basic_block, basic_block *, basic_block *); static bool empty_block_p (basic_block); /* This pass eliminates PHI nodes which can be trivially implemented as an assignment from a conditional expression. ie if we have something like: bb0: if (cond) goto bb2; else goto bb1; bb1: bb2: x = PHI (0 (bb1), 1 (bb0) We can rewrite that as: bb0: bb1: bb2: x = cond; bb1 will become unreachable and bb0 and bb2 will almost always be merged into a single block. This occurs often due to gimplification of conditionals. Also done is the following optimization: bb0: if (a != b) goto bb2; else goto bb1; bb1: bb2: x = PHI (a (bb1), b (bb0)) We can rewrite that as: bb0: bb1: bb2: x = b; This can sometimes occur as a result of other optimizations. A similar transformation is done by the ifcvt RTL optimizer. This pass also eliminates PHI nodes which are really absolute values. i.e. if we have something like: bb0: if (a >= 0) goto bb2; else goto bb1; bb1: x = -a; bb2: x = PHI (x (bb1), a (bb0)); We can rewrite that as: bb0: bb1: bb2: x = ABS_EXPR< a >; bb1 will become unreachable and bb0 and bb2 will almost always be merged into a single block. Similar transformations are done by the ifcvt RTL optimizer. */ static void tree_ssa_phiopt (void) { basic_block bb; bool removed_phis = false; /* Search every basic block for PHI nodes we may be able to optimize. */ FOR_EACH_BB (bb) { tree arg0, arg1, phi; /* We're searching for blocks with one PHI node which has two arguments. */ phi = phi_nodes (bb); if (phi && PHI_CHAIN (phi) == NULL && PHI_NUM_ARGS (phi) == 2) { arg0 = PHI_ARG_DEF (phi, 0); arg1 = PHI_ARG_DEF (phi, 1); /* Do the replacement of conditional if it can be done. */ if (conditional_replacement (bb, phi, arg0, arg1) || value_replacement (bb, phi, arg0, arg1) || abs_replacement (bb, phi, arg0, arg1)) { /* We have done the replacement so we need to rebuild the cfg when this pass is complete. */ removed_phis = true; } } } /* If we removed any PHIs, then we have unreachable blocks and blocks which need to be merged in the CFG. */ if (removed_phis) cleanup_tree_cfg (); } /* Return TRUE if block BB has no executable statements, otherwise return FALSE. */ static bool empty_block_p (basic_block bb) { block_stmt_iterator bsi; /* BB must have no executable statements. */ bsi = bsi_start (bb); while (!bsi_end_p (bsi) && (TREE_CODE (bsi_stmt (bsi)) == LABEL_EXPR || IS_EMPTY_STMT (bsi_stmt (bsi)))) bsi_next (&bsi); if (!bsi_end_p (bsi)) return false; return true; } /* BB is a basic block which has only one PHI node with precisely two arguments. Examine both of BB's predecessors to see if one ends with a COND_EXPR and the other is a successor of the COND_EXPR. If so, then we may be able to optimize PHI nodes at the start of BB. If so, mark store the block with the COND_EXPR into COND_BLOCK_P and the other block into OTHER_BLOCK_P and return true, otherwise return false. */ static bool candidate_bb_for_phi_optimization (basic_block bb, basic_block *cond_block_p, basic_block *other_block_p) { tree last0, last1; basic_block cond_block, other_block; /* One of the alternatives must come from a block ending with a COND_EXPR. */ last0 = last_stmt (bb->pred->src); last1 = last_stmt (bb->pred->pred_next->src); if (last0 && TREE_CODE (last0) == COND_EXPR) { cond_block = bb->pred->src; other_block = bb->pred->pred_next->src; } else if (last1 && TREE_CODE (last1) == COND_EXPR) { other_block = bb->pred->src; cond_block = bb->pred->pred_next->src; } else return false; /* COND_BLOCK must have precisely two successors. We indirectly verify that those successors are BB and OTHER_BLOCK. */ if (!cond_block->succ || !cond_block->succ->succ_next || cond_block->succ->succ_next->succ_next || (cond_block->succ->flags & EDGE_ABNORMAL) != 0 || (cond_block->succ->succ_next->flags & EDGE_ABNORMAL) != 0) return false; /* OTHER_BLOCK must have a single predecessor which is COND_BLOCK, OTHER_BLOCK must have a single successor which is BB and OTHER_BLOCK must have no PHI nodes. */ if (!other_block->pred || other_block->pred->src != cond_block || other_block->pred->pred_next || !other_block->succ || other_block->succ->dest != bb || other_block->succ->succ_next || phi_nodes (other_block)) return false; *cond_block_p = cond_block; *other_block_p = other_block; /* Everything looks OK. */ return true; } /* Replace PHI in block BB with statement NEW. NEW is inserted after BSI. Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK is known to have two edges, one of which must reach BB). */ static void replace_phi_with_stmt (block_stmt_iterator bsi, basic_block bb, basic_block cond_block, tree phi, tree new) { /* Insert our new statement at the head of our block. */ bsi_insert_after (&bsi, new, BSI_NEW_STMT); /* Register our new statement as the defining statement for the result. */ SSA_NAME_DEF_STMT (PHI_RESULT (phi)) = new; /* Remove the now useless PHI node. We do not want to use remove_phi_node since that releases the SSA_NAME as well and the SSA_NAME is still being used. */ release_phi_node (phi); bb_ann (bb)->phi_nodes = NULL; /* Disconnect the edge leading into the empty block. That will make the empty block unreachable and it will be removed later. */ if (cond_block->succ->dest == bb) { cond_block->succ->flags |= EDGE_FALLTHRU; cond_block->succ->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE); ssa_remove_edge (cond_block->succ->succ_next); } else { cond_block->succ->succ_next->flags |= EDGE_FALLTHRU; cond_block->succ->succ_next->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE); ssa_remove_edge (cond_block->succ); } /* Eliminate the COND_EXPR at the end of COND_BLOCK. */ bsi = bsi_last (cond_block); bsi_remove (&bsi); if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n", cond_block->index, bb->index); } /* The function conditional_replacement does the main work of doing the conditional replacement. Return true if the replacement is done. Otherwise return false. BB is the basic block where the replacement is going to be done on. ARG0 is argument 0 from PHI. Likewise for ARG1. */ static bool conditional_replacement (basic_block bb, tree phi, tree arg0, tree arg1) { tree result; tree old_result = NULL; basic_block other_block = NULL; basic_block cond_block = NULL; tree new, cond; block_stmt_iterator bsi; edge true_edge, false_edge; tree new_var = NULL; /* The PHI arguments have the constants 0 and 1, then convert it to the conditional. */ if ((integer_zerop (arg0) && integer_onep (arg1)) || (integer_zerop (arg1) && integer_onep (arg0))) ; else return false; if (!candidate_bb_for_phi_optimization (bb, &cond_block, &other_block) || !empty_block_p (other_block)) return false; /* If the condition is not a naked SSA_NAME and its type does not match the type of the result, then we have to create a new variable to optimize this case as it would likely create non-gimple code when the condition was converted to the result's type. */ cond = COND_EXPR_COND (last_stmt (cond_block)); result = PHI_RESULT (phi); if (TREE_CODE (cond) != SSA_NAME && !lang_hooks.types_compatible_p (TREE_TYPE (cond), TREE_TYPE (result))) { new_var = make_rename_temp (TREE_TYPE (cond), NULL); old_result = cond; cond = new_var; } /* If the condition was a naked SSA_NAME and the type is not the same as the type of the result, then convert the type of the condition. */ if (!lang_hooks.types_compatible_p (TREE_TYPE (cond), TREE_TYPE (result))) cond = fold_convert (TREE_TYPE (result), cond); /* We need to know which is the true edge and which is the false edge so that we know when to invert the condition below. */ extract_true_false_edges_from_block (cond_block, &true_edge, &false_edge); /* Insert our new statement at the head of our block. */ bsi = bsi_start (bb); if (old_result) { tree new1; if (TREE_CODE_CLASS (TREE_CODE (old_result)) != '<') return false; new1 = build (TREE_CODE (old_result), TREE_TYPE (result), TREE_OPERAND (old_result, 0), TREE_OPERAND (old_result, 1)); new1 = build (MODIFY_EXPR, TREE_TYPE (result), new_var, new1); bsi_insert_after (&bsi, new1, BSI_NEW_STMT); } /* At this point we know we have a COND_EXPR with two successors. One successor is BB, the other successor is an empty block which falls through into BB. There is a single PHI node at the join point (BB) and its arguments are constants (0, 1). So, given the condition COND, and the two PHI arguments, we can rewrite this PHI into non-branching code: dest = (COND) or dest = COND' We use the condition as-is if the argument associated with the true edge has the value one or the argument associated with the false edge as the value zero. Note that those conditions are not the same since only one of the outgoing edges from the COND_EXPR will directly reach BB and thus be associated with an argument. */ if ((PHI_ARG_EDGE (phi, 0) == true_edge && integer_onep (arg0)) || (PHI_ARG_EDGE (phi, 0) == false_edge && integer_zerop (arg0)) || (PHI_ARG_EDGE (phi, 1) == true_edge && integer_onep (arg1)) || (PHI_ARG_EDGE (phi, 1) == false_edge && integer_zerop (arg1))) { new = build (MODIFY_EXPR, TREE_TYPE (PHI_RESULT (phi)), PHI_RESULT (phi), cond); } else { tree cond1 = invert_truthvalue (cond); cond = cond1; /* If what we get back is a conditional expression, there is no way that it can be gimple. */ if (TREE_CODE (cond) == COND_EXPR) return false; /* If what we get back is not gimple try to create it as gimple by using a temporary variable. */ if (is_gimple_cast (cond) && !is_gimple_val (TREE_OPERAND (cond, 0))) { tree temp = TREE_OPERAND (cond, 0); tree new_var_1 = make_rename_temp (TREE_TYPE (temp), NULL); new = build (MODIFY_EXPR, TREE_TYPE (new_var_1), new_var_1, temp); bsi_insert_after (&bsi, new, BSI_NEW_STMT); cond = fold_convert (TREE_TYPE (result), new_var_1); } if (TREE_CODE (cond) == TRUTH_NOT_EXPR && !is_gimple_val (TREE_OPERAND (cond, 0))) return false; new = build (MODIFY_EXPR, TREE_TYPE (PHI_RESULT (phi)), PHI_RESULT (phi), cond); } replace_phi_with_stmt (bsi, bb, cond_block, phi, new); /* Note that we optimized this PHI. */ return true; } /* The function value_replacement does the main work of doing the value replacement. Return true if the replacement is done. Otherwise return false. BB is the basic block where the replacement is going to be done on. ARG0 is argument 0 from the PHI. Likewise for ARG1. */ static bool value_replacement (basic_block bb, tree phi, tree arg0, tree arg1) { tree result; basic_block other_block = NULL; basic_block cond_block = NULL; tree new, cond; edge true_edge, false_edge; /* If the type says honor signed zeros we cannot do this optimization. */ if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg1)))) return false; if (!candidate_bb_for_phi_optimization (bb, &cond_block, &other_block) || !empty_block_p (other_block)) return false; cond = COND_EXPR_COND (last_stmt (cond_block)); result = PHI_RESULT (phi); /* This transformation is only valid for equality comparisons. */ if (TREE_CODE (cond) != NE_EXPR && TREE_CODE (cond) != EQ_EXPR) return false; /* We need to know which is the true edge and which is the false edge so that we know if have abs or negative abs. */ extract_true_false_edges_from_block (cond_block, &true_edge, &false_edge); /* At this point we know we have a COND_EXPR with two successors. One successor is BB, the other successor is an empty block which falls through into BB. The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR. There is a single PHI node at the join point (BB) with two arguments. We now need to verify that the two arguments in the PHI node match the two arguments to the equality comparison. */ if ((operand_equal_p (arg0, TREE_OPERAND (cond, 0), 0) && operand_equal_p (arg1, TREE_OPERAND (cond, 1), 0)) || (operand_equal_p (arg1, TREE_OPERAND (cond, 0), 0) && operand_equal_p (arg0, TREE_OPERAND (cond, 1), 0))) { edge e; tree arg; /* For NE_EXPR, we want to build an assignment result = arg where arg is the PHI argument associated with the true edge. For EQ_EXPR we want the PHI argument associated with the false edge. */ e = (TREE_CODE (cond) == NE_EXPR ? true_edge : false_edge); /* Unfortunately, E may not reach BB (it may instead have gone to OTHER_BLOCK). If that is the case, then we want the single outgoing edge from OTHER_BLOCK which reaches BB and represents the desired path from COND_BLOCK. */ if (e->dest == other_block) e = e->dest->succ; /* Now we know the incoming edge to BB that has the argument for the RHS of our new assignment statement. */ if (PHI_ARG_EDGE (phi, 0) == e) arg = arg0; else arg = arg1; /* Build the new assignment. */ new = build (MODIFY_EXPR, TREE_TYPE (result), result, arg); replace_phi_with_stmt (bsi_start (bb), bb, cond_block, phi, new); /* Note that we optimized this PHI. */ return true; } return false; } /* The function absolute_replacement does the main work of doing the absolute replacement. Return true if the replacement is done. Otherwise return false. bb is the basic block where the replacement is going to be done on. arg0 is argument 0 from the phi. Likewise for arg1. */ static bool abs_replacement (basic_block bb, tree phi, tree arg0, tree arg1) { tree result; basic_block other_block = NULL; basic_block cond_block = NULL; tree new, cond; block_stmt_iterator bsi; edge true_edge, false_edge; tree assign = NULL; edge e; tree rhs = NULL, lhs = NULL; bool negate; enum tree_code cond_code; /* If the type says honor signed zeros we cannot do this optimization. */ if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg1)))) return false; if (!candidate_bb_for_phi_optimization (bb, &cond_block, &other_block)) return false; /* OTHER_BLOCK must have only one executable statement which must have the form arg0 = -arg1 or arg1 = -arg0. */ bsi = bsi_start (other_block); while (!bsi_end_p (bsi)) { tree stmt = bsi_stmt (bsi); /* Empty statements and labels are uninteresting. */ if (TREE_CODE (stmt) == LABEL_EXPR || IS_EMPTY_STMT (stmt)) { bsi_next (&bsi); continue; } /* If we found the assignment, but it was not the only executable statement in OTHER_BLOCK, then we can not optimize. */ if (assign) return false; /* If we got here, then we have found the first executable statement in OTHER_BLOCK. If it is anything other than arg = -arg1 or arg1 = -arg0, then we can not optimize. */ if (TREE_CODE (stmt) == MODIFY_EXPR) { lhs = TREE_OPERAND (stmt, 0); rhs = TREE_OPERAND (stmt, 1); if (TREE_CODE (rhs) == NEGATE_EXPR) { rhs = TREE_OPERAND (rhs, 0); /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */ if ((lhs == arg0 && rhs == arg1) || (lhs == arg1 && rhs == arg0)) { assign = stmt; bsi_next (&bsi); } else return false; } else return false; } else return false; } /* If we did not find the proper negation assignment, then we can not optimize. */ if (assign == NULL) return false; cond = COND_EXPR_COND (last_stmt (cond_block)); result = PHI_RESULT (phi); /* Only relationals comparing arg[01] against zero are interesting. */ cond_code = TREE_CODE (cond); if (cond_code != GT_EXPR && cond_code != GE_EXPR && cond_code != LT_EXPR && cond_code != LE_EXPR) return false; /* Make sure the conditional is arg[01] OP y. */ if (TREE_OPERAND (cond, 0) != rhs) return false; if (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (cond, 1))) ? real_zerop (TREE_OPERAND (cond, 1)) : integer_zerop (TREE_OPERAND (cond, 1))) ; else return false; /* We need to know which is the true edge and which is the false edge so that we know if have abs or negative abs. */ extract_true_false_edges_from_block (cond_block, &true_edge, &false_edge); /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we will need to negate the result. Similarly for LT_EXPR/LE_EXPR if the false edge goes to OTHER_BLOCK. */ if (cond_code == GT_EXPR || cond_code == GE_EXPR) e = true_edge; else e = false_edge; if (e->dest == other_block) negate = true; else negate = false; if (negate) lhs = make_rename_temp (TREE_TYPE (result), NULL); else lhs = result; /* Build the modify expression with abs expression. */ new = build (MODIFY_EXPR, TREE_TYPE (lhs), lhs, build1 (ABS_EXPR, TREE_TYPE (lhs), rhs)); replace_phi_with_stmt (bsi_start (bb), bb, cond_block, phi, new); if (negate) { /* Get the right BSI. We want to insert after the recently added ABS_EXPR statement (which we know is the first statement in the block. */ bsi = bsi_start (bb); bsi_next (&bsi); new = build (MODIFY_EXPR, TREE_TYPE (result), result, build1 (NEGATE_EXPR, TREE_TYPE (lhs), lhs)); bsi_insert_after (&bsi, new, BSI_NEW_STMT); /* Register the new statement as defining the temporary -- this is normally done by replace_phi_with_stmt, but the link will be wrong if we had to negate the resulting value. */ SSA_NAME_DEF_STMT (result) = new; } /* Note that we optimized this PHI. */ return true; } /* Always do these optimizations if we have SSA trees to work on. */ static bool gate_phiopt (void) { return 1; } struct tree_opt_pass pass_phiopt = { "phiopt", /* name */ gate_phiopt, /* gate */ tree_ssa_phiopt, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ TV_TREE_PHIOPT, /* tv_id */ PROP_cfg | PROP_ssa, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func | TODO_ggc_collect /* todo_flags_finish */ | TODO_verify_ssa | TODO_rename_vars | TODO_verify_flow }; /* Forward propagation of single use variables. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This pass performs simple forward propagation of single use variables from their definition site into their single use site. Right now we only bother forward propagating into COND_EXPRs since those are relatively common cases where forward propagation creates valid gimple code without the expression needing to fold. ie bb0: x = a COND b; if (x) goto ... else goto ... Will be transformed into: bb0: if (a COND b) goto ... else goto ... Similarly for the tests (x == 0), (x != 0), (x == 1) and (x != 1). Or (assuming c1 and c2 are constants): bb0: x = a + c1; if (x EQ/NEQ c2) goto ... else goto ... Will be transformed into: bb0: if (a EQ/NEQ (c2 - c1)) goto ... else goto ... Similarly for x = a - c1. Or bb0: x = !a if (x) goto ... else goto ... Will be transformed into: bb0: if (a == 0) goto ... else goto ... Similarly for the tests (x == 0), (x != 0), (x == 1) and (x != 1). For these cases, we propagate A into all, possibly more than one, COND_EXPRs that use X. Or bb0: x = (typecast) a if (x) goto ... else goto ... Will be transformed into: bb0: if (a != 0) goto ... else goto ... (Assuming a is an integral type and x is a boolean or x is an integral and a is a boolean.) Similarly for the tests (x == 0), (x != 0), (x == 1) and (x != 1). For these cases, we propagate A into all, possibly more than one, COND_EXPRs that use X. In addition to eliminating the variable and the statement which assigns a value to the variable, we may be able to later thread the jump without adding insane complexity in the dominator optimizer. Also note these transformations can cascade. We handle this by having a worklist of COND_EXPR statements to examine. As we make a change to a statement, we put it back on the worklist to examine on the next iteration of the main loop. This will (of course) be extended as other needs arise. */ /* Bitmap of variables for which we want immediate uses. This is set by record_single_argument_cond_exprs and tested in need_imm_uses_for_forwprop. */ static bitmap vars; static bool need_imm_uses_for_forwprop (tree); static void tree_ssa_forward_propagate_single_use_vars (void); static void record_single_argument_cond_exprs (varray_type, varray_type *, bitmap); static void substitute_single_use_vars (varray_type *, varray_type); /* Function indicating whether we ought to include information for 'var' when calculating immediate uses. */ static bool need_imm_uses_for_forwprop (tree var) { return bitmap_bit_p (vars, SSA_NAME_VERSION (var)); } /* Find all COND_EXPRs with a condition that is a naked SSA_NAME or an equality comparison against a constant. Record the identified COND_EXPRs and the SSA_NAME used in the COND_EXPR into a virtual array, which is returned to the caller. Also record into VARS that we will need immediate uses for the identified SSA_NAME. The more uninteresting COND_EXPRs and associated SSA_NAMEs we can filter out here, the faster this pass will run since its runtime is dominated by the time to build immediate uses. */ static void record_single_argument_cond_exprs (varray_type cond_worklist, varray_type *vars_worklist, bitmap vars) { /* The first pass over the blocks gathers the set of variables we need immediate uses for as well as the set of interesting COND_EXPRs. A simpler implementation may be appropriate if/when we have a lower overhead means of getting immediate use information. */ while (VARRAY_ACTIVE_SIZE (cond_worklist) > 0) { tree last = VARRAY_TOP_TREE (cond_worklist); VARRAY_POP (cond_worklist); /* See if this block ends in a COND_EXPR. */ if (last && TREE_CODE (last) == COND_EXPR) { tree cond = COND_EXPR_COND (last); enum tree_code cond_code = TREE_CODE (cond); /* If the condition is a lone variable or an equality test of an SSA_NAME against an integral constant, then we may have an optimizable case. Note these conditions also ensure the COND_EXPR has no virtual operands or other side effects. */ if (cond_code == SSA_NAME || ((cond_code == EQ_EXPR || cond_code == NE_EXPR) && TREE_CODE (TREE_OPERAND (cond, 0)) == SSA_NAME && TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (cond, 1))) == 'c' && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (cond, 1))))) { tree def; tree test_var; /* Extract the single variable used in the test into TEST_VAR. */ if (cond_code == SSA_NAME) test_var = cond; else test_var = TREE_OPERAND (cond, 0); /* If we have already recorded this SSA_NAME as interesting, do not do so again. */ if (bitmap_bit_p (vars, SSA_NAME_VERSION (test_var))) continue; /* Now get the defining statement for TEST_VAR and see if it something we are interested in. */ def = SSA_NAME_DEF_STMT (test_var); if (TREE_CODE (def) == MODIFY_EXPR) { tree def_rhs = TREE_OPERAND (def, 1); /* If TEST_VAR is set by adding or subtracting a constant from an SSA_NAME, then it is interesting to us as we can adjust the constant in the conditional and thus eliminate the arithmetic operation. */ if (TREE_CODE (def_rhs) == PLUS_EXPR || TREE_CODE (def_rhs) == MINUS_EXPR) { tree op0 = TREE_OPERAND (def_rhs, 0); tree op1 = TREE_OPERAND (def_rhs, 1); /* The first operand must be an SSA_NAME and the second operand must be a constant. */ if (TREE_CODE (op0) != SSA_NAME || TREE_CODE_CLASS (TREE_CODE (op1)) != 'c' || !INTEGRAL_TYPE_P (TREE_TYPE (op1))) continue; } /* These cases require comparisons of a naked SSA_NAME or comparison of an SSA_NAME against zero or one. */ else if (TREE_CODE (cond) == SSA_NAME || integer_zerop (TREE_OPERAND (cond, 1)) || integer_onep (TREE_OPERAND (cond, 1))) { /* If TEST_VAR is set from a relational operation between two SSA_NAMEs or a combination of an SSA_NAME and a constant, then it is interesting. */ if (TREE_CODE_CLASS (TREE_CODE (def_rhs)) == '<') { tree op0 = TREE_OPERAND (def_rhs, 0); tree op1 = TREE_OPERAND (def_rhs, 1); /* Both operands of DEF_RHS must be SSA_NAMEs or constants. */ if ((TREE_CODE (op0) != SSA_NAME && !is_gimple_min_invariant (op0)) || (TREE_CODE (op1) != SSA_NAME && !is_gimple_min_invariant (op1))) continue; } /* If TEST_VAR is set from a TRUTH_NOT_EXPR, then it is interesting. */ else if (TREE_CODE (def_rhs) == TRUTH_NOT_EXPR) { def_rhs = TREE_OPERAND (def_rhs, 0); /* DEF_RHS must be an SSA_NAME or constant. */ if (TREE_CODE (def_rhs) != SSA_NAME && !is_gimple_min_invariant (def_rhs)) continue; } /* If TEST_VAR was set from a cast of an integer type to a boolean type or a cast of a boolean to an integral, then it is interesting. */ else if (TREE_CODE (def_rhs) == NOP_EXPR || TREE_CODE (def_rhs) == CONVERT_EXPR) { tree outer_type; tree inner_type; outer_type = TREE_TYPE (def_rhs); inner_type = TREE_TYPE (TREE_OPERAND (def_rhs, 0)); if ((TREE_CODE (outer_type) == BOOLEAN_TYPE && INTEGRAL_TYPE_P (inner_type)) || (TREE_CODE (inner_type) == BOOLEAN_TYPE && INTEGRAL_TYPE_P (outer_type))) ; else continue; } else continue; } else continue; /* All the tests passed, record TEST_VAR as interesting. */ VARRAY_PUSH_TREE (*vars_worklist, test_var); bitmap_set_bit (vars, SSA_NAME_VERSION (test_var)); } } } } } /* Given FORWPROP_DATA containing SSA_NAMEs which are used in COND_EXPRs that we may be able to optimize, attempt to rewrite the condition in each COND_EXPR to use the RHS of the statement which defines the SSA_NAME used in the COND_EXPR. */ static void substitute_single_use_vars (varray_type *cond_worklist, varray_type vars_worklist) { while (VARRAY_ACTIVE_SIZE (vars_worklist) > 0) { tree test_var = VARRAY_TOP_TREE (vars_worklist); tree def = SSA_NAME_DEF_STMT (test_var); dataflow_t df; int j, num_uses, propagated_uses; block_stmt_iterator bsi; VARRAY_POP (vars_worklist); /* Now compute the immediate uses of TEST_VAR. */ df = get_immediate_uses (def); num_uses = num_immediate_uses (df); propagated_uses = 0; /* If TEST_VAR is used more than once and is not a boolean set via TRUTH_NOT_EXPR with another SSA_NAME as its argument, then we can not optimize. */ if (num_uses == 1 || (TREE_CODE (TREE_TYPE (test_var)) == BOOLEAN_TYPE && TREE_CODE (TREE_OPERAND (def, 1)) == TRUTH_NOT_EXPR && (TREE_CODE (TREE_OPERAND (TREE_OPERAND (def, 1), 0)) == SSA_NAME))) ; else continue; /* Walk over each use and try to forward propagate the RHS of DEF into the use. */ for (j = 0; j < num_uses; j++) { tree cond_stmt; tree cond; enum tree_code cond_code; tree def_rhs; enum tree_code def_rhs_code; tree new_cond; cond_stmt = immediate_use (df, j); /* For now we can only propagate into COND_EXPRs. */ if (TREE_CODE (cond_stmt) != COND_EXPR) continue; cond = COND_EXPR_COND (cond_stmt); cond_code = TREE_CODE (cond); def_rhs = TREE_OPERAND (def, 1); def_rhs_code = TREE_CODE (def_rhs); /* If the definition of the single use variable was from an arithmetic operation, then we just need to adjust the constant in the COND_EXPR_COND and update the variable tested. */ if (def_rhs_code == PLUS_EXPR || def_rhs_code == MINUS_EXPR) { tree op0 = TREE_OPERAND (def_rhs, 0); tree op1 = TREE_OPERAND (def_rhs, 1); enum tree_code new_code; tree t; /* If the variable was defined via X + C, then we must subtract C from the constant in the conditional. Otherwise we add C to the constant in the conditional. The result must fold into a valid gimple operand to be optimizable. */ new_code = def_rhs_code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR; t = int_const_binop (new_code, TREE_OPERAND (cond, 1), op1, 0); if (!is_gimple_val (t)) continue; new_cond = build (cond_code, boolean_type_node, op0, t); } /* If the variable is defined by a conditional expression... */ else if (TREE_CODE_CLASS (def_rhs_code) == '<') { /* TEST_VAR was set from a relational operator. */ tree op0 = TREE_OPERAND (def_rhs, 0); tree op1 = TREE_OPERAND (def_rhs, 1); new_cond = build (def_rhs_code, boolean_type_node, op0, op1); /* Invert the conditional if necessary. */ if ((cond_code == EQ_EXPR && integer_zerop (TREE_OPERAND (cond, 1))) || (cond_code == NE_EXPR && integer_onep (TREE_OPERAND (cond, 1)))) { new_cond = invert_truthvalue (new_cond); /* If we did not get a simple relational expression or bare SSA_NAME, then we can not optimize this case. */ if (TREE_CODE_CLASS (TREE_CODE (new_cond)) != '<' && TREE_CODE (new_cond) != SSA_NAME) continue; } } else { bool invert = false; enum tree_code new_code; /* TEST_VAR was set from a TRUTH_NOT_EXPR or a NOP_EXPR. */ if (def_rhs_code == TRUTH_NOT_EXPR) invert = true; if (cond_code == SSA_NAME || (cond_code == NE_EXPR && integer_zerop (TREE_OPERAND (cond, 1))) || (cond_code == EQ_EXPR && integer_onep (TREE_OPERAND (cond, 1)))) new_code = NE_EXPR; else new_code = EQ_EXPR; if (invert) new_code = (new_code == EQ_EXPR ? NE_EXPR : EQ_EXPR); new_cond = build (new_code, boolean_type_node, TREE_OPERAND (def_rhs, 0), convert (TREE_TYPE (def_rhs), integer_zero_node)); } /* Dump details. */ if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, " Replaced '"); print_generic_expr (dump_file, cond, dump_flags); fprintf (dump_file, "' with '"); print_generic_expr (dump_file, new_cond, dump_flags); fprintf (dump_file, "'\n"); } /* Replace the condition. */ COND_EXPR_COND (cond_stmt) = new_cond; modify_stmt (cond_stmt); propagated_uses++; VARRAY_PUSH_TREE (*cond_worklist, cond_stmt); } /* If we propagated into all the uses, then we can delete DEF. Unfortunately, we have to find the defining statement in whatever block it might be in. */ if (num_uses && num_uses == propagated_uses) for (bsi = bsi_start (bb_for_stmt (def)); !bsi_end_p (bsi); bsi_next (&bsi)) { if (def == bsi_stmt (bsi)) { bsi_remove (&bsi); break; } } } } /* Main entry point for the forward propagation optimizer. */ static void tree_ssa_forward_propagate_single_use_vars (void) { basic_block bb; varray_type vars_worklist, cond_worklist; vars = BITMAP_XMALLOC (); VARRAY_TREE_INIT (vars_worklist, 10, "VARS worklist"); VARRAY_TREE_INIT (cond_worklist, 10, "COND worklist"); /* Prime the COND_EXPR worklist by placing all the COND_EXPRs on the worklist. */ FOR_EACH_BB (bb) { tree last = last_stmt (bb); if (last && TREE_CODE (last) == COND_EXPR) VARRAY_PUSH_TREE (cond_worklist, last); } while (VARRAY_ACTIVE_SIZE (cond_worklist) > 0) { /* First get a list of all the interesting COND_EXPRs and potential single use variables which feed those COND_EXPRs. This will drain COND_WORKLIST and initialize VARS_WORKLIST. */ record_single_argument_cond_exprs (cond_worklist, &vars_worklist, vars); if (VARRAY_ACTIVE_SIZE (vars_worklist) > 0) { /* Now compute immediate uses for all the variables we care about. */ compute_immediate_uses (TDFA_USE_OPS, need_imm_uses_for_forwprop); /* We've computed immediate uses, so we can/must clear the VARS bitmap for the next iteration. */ bitmap_clear (vars); /* And optimize. This will drain VARS_WORKLIST and initialize COND_WORKLIST for the next iteration. */ substitute_single_use_vars (&cond_worklist, vars_worklist); /* We do not incrementally update the dataflow information so we must free it here and recompute the necessary bits on the next iteration. If this turns out to be expensive, methods for incrementally updating the dataflow are known. */ free_df (); } } /* All done. Clean up. */ BITMAP_XFREE (vars); } static bool gate_forwprop (void) { return 1; } struct tree_opt_pass pass_forwprop = { "forwprop", /* name */ gate_forwprop, /* gate */ tree_ssa_forward_propagate_single_use_vars, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ TV_TREE_FORWPROP, /* tv_id */ PROP_cfg | PROP_ssa, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func | TODO_ggc_collect /* todo_flags_finish */ | TODO_verify_ssa }; /* Nested function decomposition for trees. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* The object of this pass is to lower the representation of a set of nested functions in order to expose all of the gory details of the various nonlocal references. We want to do this sooner rather than later, in order to give us more freedom in emitting all of the functions in question. Back in olden times, when gcc was young, we developed an insanely complicated scheme whereby variables which were referenced nonlocally were forced to live in the stack of the declaring function, and then the nested functions magically discovered where these variables were placed. In order for this scheme to function properly, it required that the outer function be partially expanded, then we switch to compiling the inner function, and once done with those we switch back to compiling the outer function. Such delicate ordering requirements makes it difficult to do whole translation unit optimizations involving such functions. The implementation here is much more direct. Everything that can be referenced by an inner function is a member of an explicitly created structure herein called the "nonlocal frame struct". The incomming static chain for a nested function is a pointer to this struct in the parent. In this way, we settle on known offsets from a known base, and so are decoupled from the logic that places objects in the function's stack frame. More importantly, we don't have to wait for that to happen -- since the compilation of the inner function is no longer tied to a real stack frame, the nonlocal frame struct can be allocated anywhere. Which means that the outer function is now inlinable. Theory of operation here is very simple. Iterate over all the statements in all the functions (depth first) several times, allocating structures and fields on demand. In general we want to examine inner functions first, so that we can avoid making changes to outer functions which are unnecessary. The order of the passes matters a bit, in that later passes will be skipped if it is discovered that the functions don't actually interact at all. That is, they're nested in the lexical sense but could have been written as independent functions without change. */ struct var_map_elt { tree old; tree new; }; struct nesting_info { struct nesting_info *outer; struct nesting_info *inner; struct nesting_info *next; htab_t var_map; tree context; tree new_local_var_chain; tree frame_type; tree frame_decl; tree chain_field; tree chain_decl; tree nl_goto_field; bool any_parm_remapped; bool any_tramp_created; }; /* Hashing and equality functions for nesting_info->var_map. */ static hashval_t var_map_hash (const void *x) { const struct var_map_elt *a = x; return htab_hash_pointer (a->old); } static int var_map_eq (const void *x, const void *y) { const struct var_map_elt *a = x; const struct var_map_elt *b = y; return a->old == b->old; } /* We're working in so many different function contexts simultaneously, that create_tmp_var is dangerous. Prevent mishap. */ #define create_tmp_var cant_use_create_tmp_var_here_dummy /* Like create_tmp_var, except record the variable for registration at the given nesting level. */ static tree create_tmp_var_for (struct nesting_info *info, tree type, const char *prefix) { tree tmp_var; #if defined ENABLE_CHECKING /* If the type is of variable size or a type which must be created by the frontend, something is wrong. Note that we explicitly allow incomplete types here, since we create them ourselves here. */ if (TREE_ADDRESSABLE (type) || (TYPE_SIZE_UNIT (type) && TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST)) abort (); #endif tmp_var = create_tmp_var_raw (type, prefix); DECL_CONTEXT (tmp_var) = info->context; TREE_CHAIN (tmp_var) = info->new_local_var_chain; DECL_SEEN_IN_BIND_EXPR_P (tmp_var) = 1; info->new_local_var_chain = tmp_var; return tmp_var; } /* Take the address of EXP. Mark it for addressability as necessary. */ static tree build_addr (tree exp) { tree base = exp; while (TREE_CODE (base) == REALPART_EXPR || TREE_CODE (base) == IMAGPART_EXPR || handled_component_p (base)) base = TREE_OPERAND (base, 0); if (DECL_P (base)) TREE_ADDRESSABLE (base) = 1; return build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (exp)), exp); } /* Insert FIELD into TYPE, sorted by alignment requirements. */ static void insert_field_into_struct (tree type, tree field) { tree *p; DECL_CONTEXT (field) = type; for (p = &TYPE_FIELDS (type); *p ; p = &TREE_CHAIN (*p)) if (DECL_ALIGN (field) >= DECL_ALIGN (*p)) break; TREE_CHAIN (field) = *p; *p = field; } /* Build or return the RECORD_TYPE that describes the frame state that is shared between INFO->CONTEXT and its nested functions. This record will not be complete until finalize_nesting_tree; up until that point we'll be adding fields as necessary. We also build the DECL that represents this frame in the function. */ static tree get_frame_type (struct nesting_info *info) { tree type = info->frame_type; if (!type) { char *name; type = make_node (RECORD_TYPE); name = concat ("FRAME.", IDENTIFIER_POINTER (DECL_NAME (info->context)), NULL); TYPE_NAME (type) = get_identifier (name); free (name); info->frame_type = type; info->frame_decl = create_tmp_var_for (info, type, "FRAME"); } return type; } /* Return true if DECL should be referenced by pointer in the non-local frame structure. */ static bool use_pointer_in_frame (tree decl) { if (TREE_CODE (decl) == PARM_DECL) { /* It's illegal to copy TREE_ADDRESSABLE, impossible to copy variable sized decls, and inefficient to copy large aggregates. Don't bother moving anything but scalar variables. */ return AGGREGATE_TYPE_P (TREE_TYPE (decl)); } else { /* Variable sized types make things "interesting" in the frame. */ return DECL_SIZE (decl) == NULL || !TREE_CONSTANT (DECL_SIZE (decl)); } } /* Given DECL, a non-locally accessed variable, find or create a field in the non-local frame structure for the given nesting context. */ static tree lookup_field_for_decl (struct nesting_info *info, tree decl, enum insert_option insert) { struct var_map_elt *elt, dummy; void **slot; tree field; dummy.old = decl; slot = htab_find_slot (info->var_map, &dummy, insert); if (!slot) { if (insert == INSERT) abort (); return NULL; } elt = *slot; if (!elt && insert == INSERT) { field = make_node (FIELD_DECL); DECL_NAME (field) = DECL_NAME (decl); if (use_pointer_in_frame (decl)) { TREE_TYPE (field) = build_pointer_type (TREE_TYPE (decl)); DECL_ALIGN (field) = TYPE_ALIGN (TREE_TYPE (field)); DECL_NONADDRESSABLE_P (field) = 1; } else { TREE_TYPE (field) = TREE_TYPE (decl); DECL_SOURCE_LOCATION (field) = DECL_SOURCE_LOCATION (decl); DECL_ALIGN (field) = DECL_ALIGN (decl); DECL_USER_ALIGN (field) = DECL_USER_ALIGN (decl); TREE_ADDRESSABLE (field) = TREE_ADDRESSABLE (decl); DECL_NONADDRESSABLE_P (field) = !TREE_ADDRESSABLE (decl); TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (decl); } insert_field_into_struct (get_frame_type (info), field); elt = xmalloc (sizeof (*elt)); elt->old = decl; elt->new = field; *slot = elt; if (TREE_CODE (decl) == PARM_DECL) info->any_parm_remapped = true; } else field = elt ? elt->new : NULL; return field; } /* Build or return the variable that holds the static chain within INFO->CONTEXT. This variable may only be used within INFO->CONTEXT. */ static tree get_chain_decl (struct nesting_info *info) { tree decl = info->chain_decl; if (!decl) { tree type; type = get_frame_type (info->outer); type = build_pointer_type (type); /* Note that this variable is *not* entered into any BIND_EXPR; the construction of this variable is handled specially in expand_function_start and initialize_inlined_parameters. Note also that it's represented as a parameter. This is more close to the truth, since the initial value does come from the caller. */ decl = build_decl (PARM_DECL, create_tmp_var_name ("CHAIN"), type); DECL_ARTIFICIAL (decl) = 1; DECL_IGNORED_P (decl) = 1; TREE_USED (decl) = 1; DECL_CONTEXT (decl) = info->context; DECL_ARG_TYPE (decl) = type; /* Tell tree-inline.c that we never write to this variable, so it can copy-prop the replacement value immediately. */ TREE_READONLY (decl) = 1; info->chain_decl = decl; } return decl; } /* Build or return the field within the non-local frame state that holds the static chain for INFO->CONTEXT. This is the way to walk back up multiple nesting levels. */ static tree get_chain_field (struct nesting_info *info) { tree field = info->chain_field; if (!field) { tree type = build_pointer_type (get_frame_type (info->outer)); field = make_node (FIELD_DECL); DECL_NAME (field) = get_identifier ("__chain"); TREE_TYPE (field) = type; DECL_ALIGN (field) = TYPE_ALIGN (type); DECL_NONADDRESSABLE_P (field) = 1; insert_field_into_struct (get_frame_type (info), field); info->chain_field = field; } return field; } /* Copy EXP into a temporary. Allocate the temporary in the context of INFO and insert the initialization statement before TSI. */ static tree init_tmp_var (struct nesting_info *info, tree exp, tree_stmt_iterator *tsi) { tree t, stmt; t = create_tmp_var_for (info, TREE_TYPE (exp), NULL); stmt = build (MODIFY_EXPR, TREE_TYPE (t), t, exp); SET_EXPR_LOCUS (stmt, EXPR_LOCUS (tsi_stmt (*tsi))); tsi_link_before (tsi, stmt, TSI_SAME_STMT); return t; } /* Similarly, but only do so to force EXP to satisfy is_gimple_val. */ static tree gimplify_val_nested (struct nesting_info *info, tree exp, tree_stmt_iterator *tsi) { if (is_gimple_val (exp)) return exp; else return init_tmp_var (info, exp, tsi); } /* Build or return the type used to represent a nested function trampoline. */ static GTY(()) tree trampoline_type; static tree get_trampoline_type (void) { tree record, t; unsigned align, size; if (trampoline_type) return trampoline_type; align = TRAMPOLINE_ALIGNMENT; size = TRAMPOLINE_SIZE; /* If we won't be able to guarantee alignment simply via TYPE_ALIGN, then allocate extra space so that we can do dynamic alignment. */ if (align > STACK_BOUNDARY) { size += ((align/BITS_PER_UNIT) - 1) & -(STACK_BOUNDARY/BITS_PER_UNIT); align = STACK_BOUNDARY; } t = build_index_type (build_int_2 (size - 1, 0)); t = build_array_type (char_type_node, t); t = build_decl (FIELD_DECL, get_identifier ("__data"), t); DECL_ALIGN (t) = align; DECL_USER_ALIGN (t) = 1; record = make_node (RECORD_TYPE); TYPE_NAME (record) = get_identifier ("__builtin_trampoline"); TYPE_FIELDS (record) = t; layout_type (record); return record; } /* Given DECL, a nested function, find or create a field in the non-local frame structure for a trampoline for this function. */ static tree lookup_tramp_for_decl (struct nesting_info *info, tree decl, enum insert_option insert) { struct var_map_elt *elt, dummy; void **slot; tree field; dummy.old = decl; slot = htab_find_slot (info->var_map, &dummy, insert); if (!slot) { if (insert == INSERT) abort (); return NULL; } elt = *slot; if (!elt && insert == INSERT) { field = make_node (FIELD_DECL); DECL_NAME (field) = DECL_NAME (decl); TREE_TYPE (field) = get_trampoline_type (); TREE_ADDRESSABLE (field) = 1; insert_field_into_struct (get_frame_type (info), field); elt = xmalloc (sizeof (*elt)); elt->old = decl; elt->new = field; *slot = elt; info->any_tramp_created = true; } else field = elt ? elt->new : NULL; return field; } /* Build or return the field within the non-local frame state that holds the non-local goto "jmp_buf". The buffer itself is maintained by the rtl middle-end as dynamic stack space is allocated. */ static tree get_nl_goto_field (struct nesting_info *info) { tree field = info->nl_goto_field; if (!field) { unsigned size; tree type; /* For __builtin_nonlocal_goto, we need N words. The first is the frame pointer, the rest is for the target's stack pointer save area. The number of words is controlled by STACK_SAVEAREA_MODE; not the best interface, but it'll do for now. */ if (Pmode == ptr_mode) type = ptr_type_node; else type = lang_hooks.types.type_for_mode (Pmode, 1); size = GET_MODE_SIZE (STACK_SAVEAREA_MODE (SAVE_NONLOCAL)); size = size / GET_MODE_SIZE (Pmode); size = size + 1; type = build_array_type (type, build_index_type (build_int_2 (size, 0))); field = make_node (FIELD_DECL); DECL_NAME (field) = get_identifier ("__nl_goto_buf"); TREE_TYPE (field) = type; DECL_ALIGN (field) = TYPE_ALIGN (type); TREE_ADDRESSABLE (field) = 1; insert_field_into_struct (get_frame_type (info), field); info->nl_goto_field = field; } return field; } /* Convenience routines to walk all statements of a gimple function. For each statement, we invoke CALLBACK via walk_tree. The passed data is a walk_stmt_info structure. Of note here is a TSI that points to the current statement being walked. The VAL_ONLY flag that indicates whether the *TP being examined may be replaced with something that matches is_gimple_val (if true) or something slightly more complicated (if false). "Something" technically means the common subset of is_gimple_lvalue and is_gimple_rhs, but we never try to form anything more complicated than that, so we don't bother checking. */ struct walk_stmt_info { walk_tree_fn callback; tree_stmt_iterator tsi; struct nesting_info *info; bool val_only; }; /* A subroutine of walk_function. Iterate over all sub-statements of *TP. */ static void walk_stmts (struct walk_stmt_info *wi, tree *tp) { tree t = *tp; if (!t) return; switch (TREE_CODE (t)) { case STATEMENT_LIST: { tree_stmt_iterator i; for (i = tsi_start (t); !tsi_end_p (i); tsi_next (&i)) { wi->tsi = i; walk_stmts (wi, tsi_stmt_ptr (i)); } } break; case COND_EXPR: walk_tree (&COND_EXPR_COND (t), wi->callback, wi, NULL); walk_stmts (wi, &COND_EXPR_THEN (t)); walk_stmts (wi, &COND_EXPR_ELSE (t)); break; case CATCH_EXPR: walk_stmts (wi, &CATCH_BODY (t)); break; case EH_FILTER_EXPR: walk_stmts (wi, &EH_FILTER_FAILURE (t)); break; case TRY_CATCH_EXPR: case TRY_FINALLY_EXPR: walk_stmts (wi, &TREE_OPERAND (t, 0)); walk_stmts (wi, &TREE_OPERAND (t, 1)); break; case BIND_EXPR: walk_stmts (wi, &BIND_EXPR_BODY (t)); break; case RETURN_EXPR: walk_stmts (wi, &TREE_OPERAND (t, 0)); break; case MODIFY_EXPR: /* The immediate arguments of a MODIFY_EXPR may use COMPONENT_REF. */ wi->val_only = false; walk_tree (&TREE_OPERAND (t, 0), wi->callback, wi, NULL); wi->val_only = false; walk_tree (&TREE_OPERAND (t, 1), wi->callback, wi, NULL); wi->val_only = true; break; default: wi->val_only = true; walk_tree (tp, wi->callback, wi, NULL); break; } } /* Invoke CALLBACK on all statements of INFO->CONTEXT. */ static void walk_function (walk_tree_fn callback, struct nesting_info *info) { struct walk_stmt_info wi; memset (&wi, 0, sizeof (wi)); wi.callback = callback; wi.info = info; wi.val_only = true; walk_stmts (&wi, &DECL_SAVED_TREE (info->context)); } /* Similarly for ROOT and all functions nested underneath, depth first. */ static void walk_all_functions (walk_tree_fn callback, struct nesting_info *root) { do { if (root->inner) walk_all_functions (callback, root->inner); walk_function (callback, root); root = root->next; } while (root); } /* Construct our local datastructure describing the function nesting tree rooted by CGN. */ static struct nesting_info * create_nesting_tree (struct cgraph_node *cgn) { struct nesting_info *info = xcalloc (1, sizeof (*info)); info->var_map = htab_create (7, var_map_hash, var_map_eq, free); info->context = cgn->decl; for (cgn = cgn->nested; cgn ; cgn = cgn->next_nested) { struct nesting_info *sub = create_nesting_tree (cgn); sub->outer = info; sub->next = info->inner; info->inner = sub; } return info; } /* Return an expression computing the static chain for TARGET_CONTEXT from INFO->CONTEXT. Insert any necessary computations before TSI. */ static tree get_static_chain (struct nesting_info *info, tree target_context, tree_stmt_iterator *tsi) { struct nesting_info *i; tree x; if (info->context == target_context) { x = build_addr (info->frame_decl); } else { x = get_chain_decl (info); for (i = info->outer; i->context != target_context; i = i->outer) { tree field = get_chain_field (i); x = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (x)), x); x = build (COMPONENT_REF, TREE_TYPE (field), x, field, NULL_TREE); x = init_tmp_var (info, x, tsi); } } return x; } /* Return an expression referencing FIELD from TARGET_CONTEXT's non-local frame as seen from INFO->CONTEXT. Insert any necessary computations before TSI. */ static tree get_frame_field (struct nesting_info *info, tree target_context, tree field, tree_stmt_iterator *tsi) { struct nesting_info *i; tree x; if (info->context == target_context) { /* Make sure frame_decl gets created. */ (void) get_frame_type (info); x = info->frame_decl; } else { x = get_chain_decl (info); for (i = info->outer; i->context != target_context; i = i->outer) { tree field = get_chain_field (i); x = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (x)), x); x = build (COMPONENT_REF, TREE_TYPE (field), x, field, NULL_TREE); x = init_tmp_var (info, x, tsi); } x = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (x)), x); } x = build (COMPONENT_REF, TREE_TYPE (field), x, field, NULL_TREE); return x; } /* Called via walk_function+walk_tree, rewrite all references to VAR and PARM_DECLs that belong to outer functions. The rewrite will involve some number of structure accesses back up the static chain. E.g. for a variable FOO up one nesting level it'll be CHAIN->FOO. For two levels it'll be CHAIN->__chain->FOO. Further indirections apply to decls for which use_pointer_in_frame is true. */ static tree convert_nonlocal_reference (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = data; struct nesting_info *info = wi->info; tree t = *tp; *walk_subtrees = 0; switch (TREE_CODE (t)) { case VAR_DECL: /* Non-automatic variables are never processed. */ if (TREE_STATIC (t) || DECL_EXTERNAL (t)) break; /* FALLTHRU */ case PARM_DECL: if (decl_function_context (t) != info->context) { tree target_context = decl_function_context (t); struct nesting_info *i; tree x; for (i = info->outer; i->context != target_context; i = i->outer) continue; x = lookup_field_for_decl (i, t, INSERT); x = get_frame_field (info, target_context, x, &wi->tsi); if (use_pointer_in_frame (t)) { x = init_tmp_var (info, x, &wi->tsi); x = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (x)), x); } if (wi->val_only) x = init_tmp_var (info, x, &wi->tsi); *tp = x; } break; case GOTO_EXPR: /* Don't walk non-local gotos for now. */ if (TREE_CODE (GOTO_DESTINATION (t)) != LABEL_DECL) { *walk_subtrees = 1; wi->val_only = true; } break; case LABEL_DECL: /* We're taking the address of a label from a parent function, but this is not itself a non-local goto. Mark the label such that it will not be deleted, much as we would with a label address in static storage. */ if (decl_function_context (t) != info->context) FORCED_LABEL (t) = 1; break; case ADDR_EXPR: { bool save_val_only = wi->val_only; tree save_sub = TREE_OPERAND (t, 0); wi->val_only = false; walk_tree (&TREE_OPERAND (t, 0), convert_nonlocal_reference, wi, NULL); wi->val_only = true; if (save_sub != TREE_OPERAND (t, 0)) { /* If we changed anything, then TREE_INVARIANT is be wrong, since we're no longer directly referencing a decl. */ TREE_INVARIANT (t) = 0; /* If the callback converted the address argument in a context where we only accept variables (and min_invariant, presumably), then compute the address into a temporary. */ if (save_val_only) *tp = gimplify_val_nested (wi->info, t, &wi->tsi); } } break; case REALPART_EXPR: case IMAGPART_EXPR: case COMPONENT_REF: case ARRAY_REF: case ARRAY_RANGE_REF: case BIT_FIELD_REF: /* Go down this entire nest and just look at the final prefix and anything that describes the references. Otherwise, we lose track of whether a NOP_EXPR or VIEW_CONVERT_EXPR needs a simple value. */ wi->val_only = true; for (; handled_component_p (t) || TREE_CODE (t) == REALPART_EXPR || TREE_CODE (t) == IMAGPART_EXPR; tp = &TREE_OPERAND (t, 0), t = *tp) { if (TREE_CODE (t) == COMPONENT_REF) walk_tree (&TREE_OPERAND (t, 2), convert_nonlocal_reference, wi, NULL); else if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF) { walk_tree (&TREE_OPERAND (t, 1), convert_nonlocal_reference, wi, NULL); walk_tree (&TREE_OPERAND (t, 2), convert_nonlocal_reference, wi, NULL); walk_tree (&TREE_OPERAND (t, 3), convert_nonlocal_reference, wi, NULL); } else if (TREE_CODE (t) == BIT_FIELD_REF) { walk_tree (&TREE_OPERAND (t, 1), convert_nonlocal_reference, wi, NULL); walk_tree (&TREE_OPERAND (t, 2), convert_nonlocal_reference, wi, NULL); } } wi->val_only = false; walk_tree (tp, convert_nonlocal_reference, wi, NULL); break; default: if (!DECL_P (t) && !TYPE_P (t)) { *walk_subtrees = 1; wi->val_only = true; } break; } return NULL_TREE; } /* Called via walk_function+walk_tree, rewrite all references to VAR and PARM_DECLs that were referenced by inner nested functions. The rewrite will be a structure reference to the local frame variable. */ static tree convert_local_reference (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = data; struct nesting_info *info = wi->info; tree t = *tp, field, x, y; switch (TREE_CODE (t)) { case VAR_DECL: /* Non-automatic variables are never processed. */ if (TREE_STATIC (t) || DECL_EXTERNAL (t)) break; /* FALLTHRU */ case PARM_DECL: if (decl_function_context (t) == info->context) { /* If we copied a pointer to the frame, then the original decl is used unchanged in the parent function. */ if (use_pointer_in_frame (t)) break; /* No need to transform anything if no child references the variable. */ field = lookup_field_for_decl (info, t, NO_INSERT); if (!field) break; x = get_frame_field (info, info->context, field, &wi->tsi); if (wi->val_only) x = init_tmp_var (info, x, &wi->tsi); *tp = x; } break; case ADDR_EXPR: { bool save_val_only = wi->val_only; tree save_sub = TREE_OPERAND (t, 0); wi->val_only = false; walk_tree (&TREE_OPERAND (t, 0), convert_local_reference, wi, NULL); wi->val_only = save_val_only; /* If we converted anything ... */ if (TREE_OPERAND (t, 0) != save_sub) { /* Then the frame decl is now addressable. */ TREE_ADDRESSABLE (info->frame_decl) = 1; /* If we are in a context where we only accept values, then compute the address into a temporary. */ if (save_val_only) *tp = gimplify_val_nested (wi->info, t, &wi->tsi); } } break; case CALL_EXPR: *walk_subtrees = 1; /* Ready for some fun? We need to recognize __builtin_stack_alloc (&x, n) and insert FRAME.x = &x after that. X should have use_pointer_in_frame set. We can't do this any earlier, since we can't meaningfully evaluate &x. */ x = get_callee_fndecl (t); if (!x || DECL_BUILT_IN_CLASS (x) != BUILT_IN_NORMAL) break; if (DECL_FUNCTION_CODE (x) != BUILT_IN_STACK_ALLOC) break; t = TREE_VALUE (TREE_OPERAND (t, 1)); if (TREE_CODE (t) != ADDR_EXPR) abort (); t = TREE_OPERAND (t, 0); if (TREE_CODE (t) != VAR_DECL) abort (); field = lookup_field_for_decl (info, t, NO_INSERT); if (!field) break; if (!use_pointer_in_frame (t)) abort (); x = build_addr (t); y = get_frame_field (info, info->context, field, &wi->tsi); x = build (MODIFY_EXPR, void_type_node, y, x); SET_EXPR_LOCUS (x, EXPR_LOCUS (tsi_stmt (wi->tsi))); tsi_link_after (&wi->tsi, x, TSI_SAME_STMT); break; case REALPART_EXPR: case IMAGPART_EXPR: case COMPONENT_REF: case ARRAY_REF: case ARRAY_RANGE_REF: case BIT_FIELD_REF: /* Go down this entire nest and just look at the final prefix and anything that describes the references. Otherwise, we lose track of whether a NOP_EXPR or VIEW_CONVERT_EXPR needs a simple value. */ wi->val_only = true; for (; handled_component_p (t) || TREE_CODE (t) == REALPART_EXPR || TREE_CODE (t) == IMAGPART_EXPR; tp = &TREE_OPERAND (t, 0), t = *tp) { if (TREE_CODE (t) == COMPONENT_REF) walk_tree (&TREE_OPERAND (t, 2), convert_local_reference, wi, NULL); else if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF) { walk_tree (&TREE_OPERAND (t, 1), convert_local_reference, wi, NULL); walk_tree (&TREE_OPERAND (t, 2), convert_local_reference, wi, NULL); walk_tree (&TREE_OPERAND (t, 3), convert_local_reference, wi, NULL); } else if (TREE_CODE (t) == BIT_FIELD_REF) { walk_tree (&TREE_OPERAND (t, 1), convert_local_reference, wi, NULL); walk_tree (&TREE_OPERAND (t, 2), convert_local_reference, wi, NULL); } } wi->val_only = false; walk_tree (tp, convert_local_reference, wi, NULL); break; default: if (!DECL_P (t) && !TYPE_P (t)) { *walk_subtrees = 1; wi->val_only = true; } break; } return NULL_TREE; } /* Called via walk_function+walk_tree, rewrite all GOTO_EXPRs that reference labels from outer functions. The rewrite will be a call to __builtin_nonlocal_goto. */ static tree convert_nl_goto_reference (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = data; struct nesting_info *info = wi->info, *i; tree t = *tp, label, new_label, target_context, x, arg, field; struct var_map_elt *elt; void **slot; *walk_subtrees = 0; if (TREE_CODE (t) != GOTO_EXPR) return NULL_TREE; label = GOTO_DESTINATION (t); if (TREE_CODE (label) != LABEL_DECL) return NULL_TREE; target_context = decl_function_context (label); if (target_context == info->context) return NULL_TREE; for (i = info->outer; target_context != i->context; i = i->outer) continue; /* The original user label may also be use for a normal goto, therefore we must create a new label that will actually receive the abnormal control transfer. This new label will be marked LABEL_NONLOCAL; this mark will trigger proper behavior in the cfg, as well as cause the (hairy target-specific) non-local goto receiver code to be generated when we expand rtl. */ new_label = create_artificial_label (); DECL_NONLOCAL (new_label) = 1; /* Enter this association into var_map so that we can insert the new label into the IL during a second pass. */ elt = xmalloc (sizeof (*elt)); elt->old = label; elt->new = new_label; slot = htab_find_slot (i->var_map, elt, INSERT); *slot = elt; /* Build: __builtin_nl_goto(new_label, &chain->nl_goto_field). */ field = get_nl_goto_field (i); x = get_frame_field (info, target_context, field, &wi->tsi); x = build_addr (x); x = gimplify_val_nested (info, x, &wi->tsi); arg = tree_cons (NULL, x, NULL); x = build_addr (new_label); arg = tree_cons (NULL, x, arg); x = implicit_built_in_decls[BUILT_IN_NONLOCAL_GOTO]; x = build_function_call_expr (x, arg); SET_EXPR_LOCUS (x, EXPR_LOCUS (tsi_stmt (wi->tsi))); *tsi_stmt_ptr (wi->tsi) = x; return NULL_TREE; } /* Called via walk_function+walk_tree, rewrite all LABEL_EXPRs that are referenced via nonlocal goto from a nested function. The rewrite will involve installing a newly generated DECL_NONLOCAL label, and (potentially) a branch around the rtl gunk that is assumed to be attached to such a label. */ static tree convert_nl_goto_receiver (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = data; struct nesting_info *info = wi->info; tree t = *tp, label, new_label, x; struct var_map_elt *elt, dummy; tree_stmt_iterator tmp_tsi; *walk_subtrees = 0; if (TREE_CODE (t) != LABEL_EXPR) return NULL_TREE; label = LABEL_EXPR_LABEL (t); dummy.old = label; elt = htab_find (info->var_map, &dummy); if (!elt) return NULL_TREE; new_label = elt->new; /* If there's any possibility that the previous statement falls through, then we must branch around the new non-local label. */ tmp_tsi = wi->tsi; tsi_prev (&tmp_tsi); if (tsi_end_p (tmp_tsi) || block_may_fallthru (tsi_stmt (tmp_tsi))) { x = build1 (GOTO_EXPR, void_type_node, label); tsi_link_before (&wi->tsi, x, TSI_SAME_STMT); } x = build1 (LABEL_EXPR, void_type_node, new_label); tsi_link_before (&wi->tsi, x, TSI_SAME_STMT); return NULL_TREE; } /* Called via walk_function+walk_tree, rewrite all references to addresses of nested functions that require the use of trampolines. The rewrite will involve a reference a trampoline generated for the occasion. */ static tree convert_tramp_reference (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = data; struct nesting_info *info = wi->info, *i; tree t = *tp, decl, target_context, x, arg; *walk_subtrees = 0; switch (TREE_CODE (t)) { case ADDR_EXPR: /* Build T.1 = &CHAIN->tramp; T.2 = __builtin_adjust_trampoline (T.1); T.3 = (func_type)T.2; */ decl = TREE_OPERAND (t, 0); if (TREE_CODE (decl) != FUNCTION_DECL) break; /* Only need to process nested functions. */ target_context = decl_function_context (decl); if (!target_context) break; /* If the nested function doesn't use a static chain, then it doesn't need a trampoline. */ if (DECL_NO_STATIC_CHAIN (decl)) break; /* Lookup the immediate parent of the callee, as that's where we need to insert the trampoline. */ for (i = info; i->context != target_context; i = i->outer) continue; x = lookup_tramp_for_decl (i, decl, INSERT); /* Compute the address of the field holding the trampoline. */ x = get_frame_field (info, target_context, x, &wi->tsi); x = build_addr (x); x = gimplify_val_nested (info, x, &wi->tsi); arg = tree_cons (NULL, x, NULL); /* Do machine-specific ugliness. Normally this will involve computing extra alignment, but it can really be anything. */ x = implicit_built_in_decls[BUILT_IN_ADJUST_TRAMPOLINE]; x = build_function_call_expr (x, arg); x = init_tmp_var (info, x, &wi->tsi); /* Cast back to the proper function type. */ x = build1 (NOP_EXPR, TREE_TYPE (t), x); x = init_tmp_var (info, x, &wi->tsi); *tp = x; break; case CALL_EXPR: /* Only walk call arguments, lest we generate trampolines for direct calls. */ walk_tree (&TREE_OPERAND (t, 1), convert_tramp_reference, wi, NULL); break; default: if (!DECL_P (t) && !TYPE_P (t)) *walk_subtrees = 1; break; } return NULL_TREE; } /* Called via walk_function+walk_tree, rewrite all CALL_EXPRs that reference nested functions to make sure that the static chain is set up properly for the call. */ static tree convert_call_expr (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = data; struct nesting_info *info = wi->info; tree t = *tp, decl, target_context; *walk_subtrees = 0; switch (TREE_CODE (t)) { case CALL_EXPR: decl = get_callee_fndecl (t); if (!decl) break; target_context = decl_function_context (decl); if (target_context && !DECL_NO_STATIC_CHAIN (decl)) TREE_OPERAND (t, 2) = get_static_chain (info, target_context, &wi->tsi); break; case RETURN_EXPR: case MODIFY_EXPR: /* Only return and modify may contain calls. */ *walk_subtrees = 1; break; default: break; } return NULL_TREE; } /* Walk the nesting tree starting with ROOT, depth first. Convert all trampolines and call expressions. On the way back up, determine if a nested function actually uses its static chain; if not, remember that. */ static void convert_all_function_calls (struct nesting_info *root) { do { if (root->inner) convert_all_function_calls (root->inner); walk_function (convert_tramp_reference, root); walk_function (convert_call_expr, root); /* If the function does not use a static chain, then remember that. */ if (root->outer && !root->chain_decl && !root->chain_field) DECL_NO_STATIC_CHAIN (root->context) = 1; else { #ifdef ENABLE_CHECKING if (DECL_NO_STATIC_CHAIN (root->context)) abort (); #endif } root = root->next; } while (root); } /* Do "everything else" to clean up or complete state collected by the various walking passes -- lay out the types and decls, generate code to initialize the frame decl, store critical expressions in the struct function for rtl to find. */ static void finalize_nesting_tree_1 (struct nesting_info *root) { tree stmt_list = NULL; tree context = root->context; struct function *sf; /* If we created a non-local frame type or decl, we need to lay them out at this time. */ if (root->frame_type) { layout_type (root->frame_type); layout_decl (root->frame_decl, 0); } /* If any parameters were referenced non-locally, then we need to insert a copy. Likewise, if any variables were referenced by pointer, we need to initialize the address. */ if (root->any_parm_remapped) { tree p; for (p = DECL_ARGUMENTS (context); p ; p = TREE_CHAIN (p)) { tree field, x, y; field = lookup_field_for_decl (root, p, NO_INSERT); if (!field) continue; if (use_pointer_in_frame (p)) x = build_addr (p); else x = p; y = build (COMPONENT_REF, TREE_TYPE (field), root->frame_decl, field, NULL_TREE); x = build (MODIFY_EXPR, TREE_TYPE (field), y, x); append_to_statement_list (x, &stmt_list); } } /* If a chain_field was created, then it needs to be initialized from chain_decl. */ if (root->chain_field) { tree x = build (COMPONENT_REF, TREE_TYPE (root->chain_field), root->frame_decl, root->chain_field, NULL_TREE); x = build (MODIFY_EXPR, TREE_TYPE (x), x, get_chain_decl (root)); append_to_statement_list (x, &stmt_list); } /* If trampolines were created, then we need to initialize them. */ if (root->any_tramp_created) { struct nesting_info *i; for (i = root->inner; i ; i = i->next) { tree arg, x, field; field = lookup_tramp_for_decl (root, i->context, NO_INSERT); if (!field) continue; if (DECL_NO_STATIC_CHAIN (i->context)) x = null_pointer_node; else x = build_addr (root->frame_decl); arg = tree_cons (NULL, x, NULL); x = build_addr (i->context); arg = tree_cons (NULL, x, arg); x = build (COMPONENT_REF, TREE_TYPE (field), root->frame_decl, field, NULL_TREE); x = build_addr (x); arg = tree_cons (NULL, x, arg); x = implicit_built_in_decls[BUILT_IN_INIT_TRAMPOLINE]; x = build_function_call_expr (x, arg); append_to_statement_list (x, &stmt_list); } } /* If we created initialization statements, insert them. */ if (stmt_list) { annotate_all_with_locus (&stmt_list, DECL_SOURCE_LOCATION (context)); append_to_statement_list (BIND_EXPR_BODY (DECL_SAVED_TREE (context)), &stmt_list); BIND_EXPR_BODY (DECL_SAVED_TREE (context)) = stmt_list; } /* If a chain_decl was created, then it needs to be registered with struct function so that it gets initialized from the static chain register at the beginning of the function. */ sf = DECL_STRUCT_FUNCTION (root->context); sf->static_chain_decl = root->chain_decl; /* Similarly for the non-local goto save area. */ if (root->nl_goto_field) { sf->nonlocal_goto_save_area = get_frame_field (root, context, root->nl_goto_field, NULL); sf->has_nonlocal_label = 1; } /* Make sure all new local variables get inserted into the proper BIND_EXPR. */ if (root->new_local_var_chain) declare_tmp_vars (root->new_local_var_chain, DECL_SAVED_TREE (root->context)); /* Dump the translated tree function. */ dump_function (TDI_nested, root->context); } static void finalize_nesting_tree (struct nesting_info *root) { do { if (root->inner) finalize_nesting_tree (root->inner); finalize_nesting_tree_1 (root); root = root->next; } while (root); } /* Free the data structures allocated during this pass. */ static void free_nesting_tree (struct nesting_info *root) { struct nesting_info *next; do { if (root->inner) free_nesting_tree (root->inner); htab_delete (root->var_map); next = root->next; free (root); root = next; } while (root); } /* Main entry point for this pass. Process FNDECL and all of its nested subroutines and turn them into something less tightly bound. */ void lower_nested_functions (tree fndecl) { struct nesting_info *root; struct cgraph_node *cgn; /* If there are no nested functions, there's nothing to do. */ cgn = cgraph_node (fndecl); if (!cgn->nested) return; root = create_nesting_tree (cgn); walk_all_functions (convert_nonlocal_reference, root); walk_all_functions (convert_local_reference, root); walk_all_functions (convert_nl_goto_reference, root); walk_all_functions (convert_nl_goto_receiver, root); convert_all_function_calls (root); finalize_nesting_tree (root); free_nesting_tree (root); } /* Type information for tree-nested.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_tree_nested_h[] = { { &trampoline_type, 1, sizeof (trampoline_type), >_ggc_mx_tree_node, >_pch_nx_tree_node }, LAST_GGC_ROOT_TAB }; #undef create_tmp_var /* Dead store elimination Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file implements dead store elimination. A dead store is a store into a memory location which will later be overwritten by another store without any intervening loads. In this case the earlier store can be deleted. In our SSA + virtual operand world we use immediate uses of virtual operands to detect dead stores. If a store's virtual definition is used precisely once by a later store to the same location which post dominates the first store, then the first store is dead. The single use of the store's virtual definition ensures that there are no intervening aliased loads and the requirement that the second load post dominate the first ensures that if the earlier store executes, then the later stores will execute before the function exits. It may help to think of this as first moving the earlier store to the point immediately before the later store. Again, the single use of the virtual definition and the post-dominance relationship ensure that such movement would be safe. Clearly if there are back to back stores, then the second is redundant. Reviewing section 10.7.2 in Morgan's "Building an Optimizing Compiler" may also help in understanding this code since it discusses the relationship between dead store and redundant load elimination. In fact, they are the same transformation applied to different views of the CFG. */ struct dse_global_data { /* This is the global bitmap for store statements. Each statement has a unique ID. When we encounter a store statement that we want to record, set the bit corresponding to the statement's unique ID in this bitmap. */ bitmap stores; }; /* We allocate a bitmap-per-block for stores which are encountered during the scan of that block. This allows us to restore the global bitmap of stores when we finish processing a block. */ struct dse_block_local_data { bitmap stores; }; static bool gate_dse (void); static void tree_ssa_dse (void); static void dse_initialize_block_local_data (struct dom_walk_data *, basic_block, bool); static void dse_optimize_stmt (struct dom_walk_data *, basic_block, block_stmt_iterator); static void dse_record_phis (struct dom_walk_data *, basic_block); static void dse_finalize_block (struct dom_walk_data *, basic_block); static void fix_phi_uses (tree, tree); static void fix_stmt_v_may_defs (tree, tree); static void record_voperand_set (bitmap, bitmap *, unsigned int); /* Function indicating whether we ought to include information for 'var' when calculating immediate uses. For this pass we only want use information for virtual variables. */ static bool need_imm_uses_for_dse (tree var) { return !is_gimple_reg (var); } /* Replace uses in PHI which match V_MAY_DEF_RESULTs in STMT with the corresponding V_MAY_DEF_OP in STMT. */ static void fix_phi_uses (tree phi, tree stmt) { stmt_ann_t ann = stmt_ann (stmt); v_may_def_optype v_may_defs; unsigned int i; int j; get_stmt_operands (stmt); v_may_defs = V_MAY_DEF_OPS (ann); /* Walk each V_MAY_DEF in STMT. */ for (i = 0; i < NUM_V_MAY_DEFS (v_may_defs); i++) { tree v_may_def = V_MAY_DEF_RESULT (v_may_defs, i); /* Find any uses in the PHI which match V_MAY_DEF and replace them with the appropriate V_MAY_DEF_OP. */ for (j = 0; j < PHI_NUM_ARGS (phi); j++) if (v_may_def == PHI_ARG_DEF (phi, j)) SET_PHI_ARG_DEF (phi, j, V_MAY_DEF_OP (v_may_defs, i)); } } /* Replace the V_MAY_DEF_OPs in STMT1 which match V_MAY_DEF_RESULTs in STMT2 with the appropriate V_MAY_DEF_OPs from STMT2. */ static void fix_stmt_v_may_defs (tree stmt1, tree stmt2) { stmt_ann_t ann1 = stmt_ann (stmt1); stmt_ann_t ann2 = stmt_ann (stmt2); v_may_def_optype v_may_defs1; v_may_def_optype v_may_defs2; unsigned int i, j; get_stmt_operands (stmt1); get_stmt_operands (stmt2); v_may_defs1 = V_MAY_DEF_OPS (ann1); v_may_defs2 = V_MAY_DEF_OPS (ann2); /* Walk each V_MAY_DEF_OP in stmt1. */ for (i = 0; i < NUM_V_MAY_DEFS (v_may_defs1); i++) { tree v_may_def1 = V_MAY_DEF_OP (v_may_defs1, i); /* Find the appropriate V_MAY_DEF_RESULT in STMT2. */ for (j = 0; j < NUM_V_MAY_DEFS (v_may_defs2); j++) { if (v_may_def1 == V_MAY_DEF_RESULT (v_may_defs2, j)) { /* Update. */ SET_V_MAY_DEF_OP (v_may_defs1, i, V_MAY_DEF_OP (v_may_defs2, j)); break; } } #ifdef ENABLE_CHECKING /* If we did not find a corresponding V_MAY_DEF_RESULT, then something has gone terribly wrong. */ if (j == NUM_V_MAY_DEFS (v_may_defs2)) abort (); #endif } } /* Set bit UID in bitmaps GLOBAL and *LOCAL, creating *LOCAL as needed. */ static void record_voperand_set (bitmap global, bitmap *local, unsigned int uid) { /* Lazily allocate the bitmap. Note that we do not get a notification when the block local data structures die, so we allocate the local bitmap backed by the GC system. */ if (*local == NULL) *local = BITMAP_GGC_ALLOC (); /* Set the bit in the local and global bitmaps. */ bitmap_set_bit (*local, uid); bitmap_set_bit (global, uid); } /* Initialize block local data structures. */ static void dse_initialize_block_local_data (struct dom_walk_data *walk_data, basic_block bb ATTRIBUTE_UNUSED, bool recycled) { struct dse_block_local_data *bd = VARRAY_TOP_GENERIC_PTR (walk_data->block_data_stack); /* If we are given a recycled block local data structure, ensure any bitmap associated with the block is cleared. */ if (recycled) { if (bd->stores) bitmap_clear (bd->stores); } } /* Attempt to eliminate dead stores in the statement referenced by BSI. A dead store is a store into a memory location which will later be overwritten by another store without any intervening loads. In this case the earlier store can be deleted. In our SSA + virtual operand world we use immediate uses of virtual operands to detect dead stores. If a store's virtual definition is used precisely once by a later store to the same location which post dominates the first store, then the first store is dead. */ static void dse_optimize_stmt (struct dom_walk_data *walk_data, basic_block bb ATTRIBUTE_UNUSED, block_stmt_iterator bsi) { struct dse_block_local_data *bd = VARRAY_TOP_GENERIC_PTR (walk_data->block_data_stack); struct dse_global_data *dse_gd = walk_data->global_data; tree stmt = bsi_stmt (bsi); stmt_ann_t ann = stmt_ann (stmt); v_may_def_optype v_may_defs; get_stmt_operands (stmt); v_may_defs = V_MAY_DEF_OPS (ann); /* If this statement has no virtual uses, then there is nothing to do. */ if (NUM_V_MAY_DEFS (v_may_defs) == 0) return; /* We know we have virtual definitions. If this is a MODIFY_EXPR, then record it into our table. */ if (TREE_CODE (stmt) == MODIFY_EXPR && TREE_CODE (TREE_OPERAND (stmt, 1)) != CALL_EXPR) { dataflow_t df = get_immediate_uses (stmt); unsigned int num_uses = num_immediate_uses (df); tree use; tree skipped_phi; /* If there are no uses then there is nothing left to do. */ if (num_uses == 0) { record_voperand_set (dse_gd->stores, &bd->stores, ann->uid); return; } use = immediate_use (df, 0); skipped_phi = NULL; /* Skip through any PHI nodes we have already seen if the PHI represents the only use of this store. Note this does not handle the case where the store has multiple V_MAY_DEFs which all reach a set of PHI nodes in the same block. */ while (num_uses == 1 && TREE_CODE (use) == PHI_NODE && bitmap_bit_p (dse_gd->stores, stmt_ann (use)->uid)) { /* Record the first PHI we skip so that we can fix its uses if we find that STMT is a dead store. */ if (!skipped_phi) skipped_phi = use; /* Skip past this PHI and loop again in case we had a PHI chain. */ df = get_immediate_uses (use); num_uses = num_immediate_uses (df); use = immediate_use (df, 0); } /* If we have precisely one immediate use at this point, then we may have found redundant store. */ if (num_uses == 1 && bitmap_bit_p (dse_gd->stores, stmt_ann (use)->uid) && operand_equal_p (TREE_OPERAND (stmt, 0), TREE_OPERAND (use, 0), 0)) { /* We need to fix the operands if either the first PHI we skipped, or the store which we are not deleting if we did not skip any PHIs. */ if (skipped_phi) fix_phi_uses (skipped_phi, stmt); else fix_stmt_v_may_defs (use, stmt); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, " Deleted dead store '"); print_generic_expr (dump_file, bsi_stmt (bsi), dump_flags); fprintf (dump_file, "'\n"); } /* Any immediate uses which reference STMT need to instead reference the new consumer, either SKIPPED_PHI or USE. This allows us to cascade dead stores. */ redirect_immediate_uses (stmt, skipped_phi ? skipped_phi : use); /* Finally remove the dead store. */ bsi_remove (&bsi); } record_voperand_set (dse_gd->stores, &bd->stores, ann->uid); } } /* Record that we have seen the PHIs at the start of BB which correspond to virtual operands. */ static void dse_record_phis (struct dom_walk_data *walk_data, basic_block bb) { struct dse_block_local_data *bd = VARRAY_TOP_GENERIC_PTR (walk_data->block_data_stack); struct dse_global_data *dse_gd = walk_data->global_data; tree phi; for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi)) if (need_imm_uses_for_dse (PHI_RESULT (phi))) record_voperand_set (dse_gd->stores, &bd->stores, get_stmt_ann (phi)->uid); } static void dse_finalize_block (struct dom_walk_data *walk_data, basic_block bb ATTRIBUTE_UNUSED) { struct dse_block_local_data *bd = VARRAY_TOP_GENERIC_PTR (walk_data->block_data_stack); struct dse_global_data *dse_gd = walk_data->global_data; bitmap stores = dse_gd->stores; unsigned int i; /* Unwind the stores noted in this basic block. */ if (bd->stores) EXECUTE_IF_SET_IN_BITMAP (bd->stores, 0, i, bitmap_clear_bit (stores, i);); } static void tree_ssa_dse (void) { struct dom_walk_data walk_data; struct dse_global_data dse_gd; unsigned int uid = 0; basic_block bb; /* Create a UID for each statement in the function. Ordering of the UIDs is not important for this pass. */ FOR_EACH_BB (bb) { block_stmt_iterator bsi; tree phi; for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) stmt_ann (bsi_stmt (bsi))->uid = uid++; for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi)) stmt_ann (phi)->uid = uid++; } /* We might consider making this a property of each pass so that it can be [re]computed on an as-needed basis. Particularly since this pass could be seen as an extension of DCE which needs post dominators. */ calculate_dominance_info (CDI_POST_DOMINATORS); /* We also need immediate use information for virtual operands. */ compute_immediate_uses (TDFA_USE_VOPS, need_imm_uses_for_dse); /* Dead store elimination is fundamentally a walk of the post-dominator tree and a backwards walk of statements within each block. */ walk_data.walk_stmts_backward = true; walk_data.dom_direction = CDI_POST_DOMINATORS; walk_data.initialize_block_local_data = dse_initialize_block_local_data; walk_data.before_dom_children_before_stmts = NULL; walk_data.before_dom_children_walk_stmts = dse_optimize_stmt; walk_data.before_dom_children_after_stmts = dse_record_phis; walk_data.after_dom_children_before_stmts = NULL; walk_data.after_dom_children_walk_stmts = NULL; walk_data.after_dom_children_after_stmts = dse_finalize_block; walk_data.block_local_data_size = sizeof (struct dse_block_local_data); /* This is the main hash table for the dead store elimination pass. */ dse_gd.stores = BITMAP_XMALLOC (); walk_data.global_data = &dse_gd; /* Initialize the dominator walker. */ init_walk_dominator_tree (&walk_data); /* Recursively walk the dominator tree. */ walk_dominator_tree (&walk_data, EXIT_BLOCK_PTR); /* Finalize the dominator walker. */ fini_walk_dominator_tree (&walk_data); /* Release the main bitmap. */ BITMAP_XFREE (dse_gd.stores); /* Free dataflow information. It's probably out of date now anyway. */ free_df (); /* For now, just wipe the post-dominator information. */ free_dominance_info (CDI_POST_DOMINATORS); } static bool gate_dse (void) { return flag_tree_dse != 0; } struct tree_opt_pass pass_dse = { "dse", /* name */ gate_dse, /* gate */ tree_ssa_dse, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ TV_TREE_DSE, /* tv_id */ PROP_cfg | PROP_ssa, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func | TODO_ggc_collect /* todo_flags_finish */ | TODO_verify_ssa }; /* SSA Dominator optimizations for trees Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Diego Novillo This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file implements optimizations on the dominator tree. */ /* Hash table with expressions made available during the renaming process. When an assignment of the form X_i = EXPR is found, the statement is stored in this table. If the same expression EXPR is later found on the RHS of another statement, it is replaced with X_i (thus performing global redundancy elimination). Similarly as we pass through conditionals we record the conditional itself as having either a true or false value in this table. */ static htab_t avail_exprs; /* Structure for entries in the expression hash table. This requires more memory for the hash table entries, but allows us to avoid creating silly tree nodes and annotations for conditionals, eliminates 2 global hash tables and two block local varrays. It also allows us to reduce the number of hash table lookups we have to perform in lookup_avail_expr and finally it allows us to significantly reduce the number of calls into the hashing routine itself. */ struct expr_hash_elt { /* The value (lhs) of this expression. */ tree lhs; /* The expression (rhs) we want to record. */ tree rhs; /* The annotation if this element corresponds to a statement. */ stmt_ann_t ann; /* The hash value for RHS/ann. */ hashval_t hash; }; /* Table of constant values and copies indexed by SSA name. When the renaming pass finds an assignment of a constant (X_i = C) or a copy assignment from another SSA variable (X_i = Y_j), it creates a mapping between X_i and the RHS in this table. This mapping is used later on, when renaming uses of X_i. If an assignment to X_i is found in this table, instead of using X_i, we use the RHS of the statement stored in this table (thus performing very simplistic copy and constant propagation). */ static varray_type const_and_copies; /* Bitmap of SSA_NAMEs known to have a nonzero value, even if we do not know their exact value. */ static bitmap nonzero_vars; /* Track whether or not we have changed the control flow graph. */ static bool cfg_altered; /* Bitmap of blocks that have had EH statements cleaned. We should remove their dead edges eventually. */ static bitmap need_eh_cleanup; /* Statistics for dominator optimizations. */ struct opt_stats_d { long num_stmts; long num_exprs_considered; long num_re; }; /* Value range propagation record. Each time we encounter a conditional of the form SSA_NAME COND CONST we create a new vrp_element to record how the condition affects the possible values SSA_NAME may have. Each record contains the condition tested (COND), and the the range of values the variable may legitimately have if COND is true. Note the range of values may be a smaller range than COND specifies if we have recorded other ranges for this variable. Each record also contains the block in which the range was recorded for invalidation purposes. Note that the current known range is computed lazily. This allows us to avoid the overhead of computing ranges which are never queried. When we encounter a conditional, we look for records which constrain the SSA_NAME used in the condition. In some cases those records allow us to determine the condition's result at compile time. In other cases they may allow us to simplify the condition. We also use value ranges to do things like transform signed div/mod operations into unsigned div/mod or to simplify ABS_EXPRs. Simple experiments have shown these optimizations to not be all that useful on switch statements (much to my surprise). So switch statement optimizations are not performed. Note carefully we do not propagate information through each statement in the block. ie, if we know variable X has a value defined of [0, 25] and we encounter Y = X + 1, we do not track a value range for Y (which would be [1, 26] if we cared). Similarly we do not constrain values as we encounter narrowing typecasts, etc. */ struct vrp_element { /* The highest and lowest values the variable in COND may contain when COND is true. Note this may not necessarily be the same values tested by COND if the same variable was used in earlier conditionals. Note this is computed lazily and thus can be NULL indicating that the values have not been computed yet. */ tree low; tree high; /* The actual conditional we recorded. This is needed since we compute ranges lazily. */ tree cond; /* The basic block where this record was created. We use this to determine when to remove records. */ basic_block bb; }; static struct opt_stats_d opt_stats; /* This virtual array holds pairs of edges which describe a scheduled edge redirection from jump threading. The first entry in each pair is the edge we are going to redirect. The second entry in each pair is the edge leading to our final destination block. By providing this as an edge rather than the final target block itself we can correctly handle redirections when the target block had PHIs which required edge insertions/splitting to remove the PHIs. */ static GTY(()) varray_type redirection_edges; /* A virtual array holding value range records for the variable identified by the index, SSA_VERSION. */ static varray_type vrp_data; /* Datastructure for block local data used during the dominator walk. We maintain a stack of these as we recursively walk down the dominator tree. */ struct dom_walk_block_data { /* Array of all the expressions entered into the global expression hash table by this block. During finalization we use this array to know what expressions to remove from the global expression hash table. */ varray_type avail_exprs; /* Array of dest, src pairs that need to be restored during finalization into the global const/copies table during finalization. */ varray_type const_and_copies; /* Similarly for the nonzero state of variables that needs to be restored during finalization. */ varray_type nonzero_vars; /* Array of statements we need to rescan during finalization for newly exposed variables. */ varray_type stmts_to_rescan; /* Array of variables which have their values constrained by operations in this basic block. We use this during finalization to know which variables need their VRP data updated. */ varray_type vrp_variables; /* Array of tree pairs used to restore the global currdefs to its original state after completing optimization of a block and its dominator children. */ varray_type block_defs; }; struct eq_expr_value { tree src; tree dst; }; /* Local functions. */ static void optimize_stmt (struct dom_walk_data *, basic_block bb, block_stmt_iterator); static inline tree get_value_for (tree, varray_type table); static inline void set_value_for (tree, tree, varray_type table); static tree lookup_avail_expr (tree, varray_type *, bool); static struct eq_expr_value get_eq_expr_value (tree, int, varray_type *, basic_block, varray_type *); static hashval_t avail_expr_hash (const void *); static hashval_t real_avail_expr_hash (const void *); static int avail_expr_eq (const void *, const void *); static void htab_statistics2 (FILE *, htab_t); static void record_cond (tree, tree, varray_type *); static void record_dominating_conditions (tree, varray_type *); static void record_const_or_copy (tree, tree, varray_type *); static void record_equality (tree, tree, varray_type *); static tree update_rhs_and_lookup_avail_expr (tree, tree, varray_type *, stmt_ann_t, bool); static tree simplify_rhs_and_lookup_avail_expr (struct dom_walk_data *, tree, stmt_ann_t, int); static tree simplify_cond_and_lookup_avail_expr (tree, varray_type *, stmt_ann_t, int); static tree simplify_switch_and_lookup_avail_expr (tree, varray_type *, stmt_ann_t, int); static tree find_equivalent_equality_comparison (tree); static void record_range (tree, basic_block, varray_type *); static bool extract_range_from_cond (tree, tree *, tree *, int *); static void record_equivalences_from_phis (struct dom_walk_data *, basic_block); static void record_equivalences_from_incoming_edge (struct dom_walk_data *, basic_block); static bool eliminate_redundant_computations (struct dom_walk_data *, tree, stmt_ann_t); static void record_equivalences_from_stmt (tree, varray_type *, varray_type *, int, stmt_ann_t); static void thread_across_edge (struct dom_walk_data *, edge); static void dom_opt_finalize_block (struct dom_walk_data *, basic_block); static void dom_opt_initialize_block_local_data (struct dom_walk_data *, basic_block, bool); static void dom_opt_initialize_block (struct dom_walk_data *, basic_block); static void cprop_into_phis (struct dom_walk_data *, basic_block); static void remove_local_expressions_from_table (varray_type locals, unsigned limit, htab_t table); static void restore_vars_to_original_value (varray_type locals, unsigned limit, varray_type table); static void restore_currdefs_to_original_value (varray_type locals, unsigned limit); static void register_definitions_for_stmt (stmt_ann_t, varray_type *); static void redirect_edges_and_update_ssa_graph (varray_type); /* Local version of fold that doesn't introduce cruft. */ static tree local_fold (tree t) { t = fold (t); /* Strip away useless type conversions. Both the NON_LVALUE_EXPR that may have been added by fold, and "useless" type conversions that might now be apparent due to propagation. */ STRIP_USELESS_TYPE_CONVERSION (t); return t; } /* Return the value associated with variable VAR in TABLE. */ static inline tree get_value_for (tree var, varray_type table) { return VARRAY_TREE (table, SSA_NAME_VERSION (var)); } /* Associate VALUE to variable VAR in TABLE. */ static inline void set_value_for (tree var, tree value, varray_type table) { VARRAY_TREE (table, SSA_NAME_VERSION (var)) = value; } /* REDIRECTION_EDGES contains edge pairs where we want to revector the destination of the first edge to the destination of the second edge. These redirections may significantly change the SSA graph since we allow redirection through blocks with PHI nodes and blocks with real instructions in some cases. This routine will perform the requested redirections and incrementally update the SSA graph. Note in some cases requested redirections may be ignored as they can not be safely implemented. */ static void redirect_edges_and_update_ssa_graph (varray_type redirection_edges) { basic_block tgt, bb; tree phi; unsigned int i; size_t old_num_referenced_vars = num_referenced_vars; bitmap virtuals_to_rename = BITMAP_XMALLOC (); /* First note any variables which we are going to have to take out of SSA form as well as any virtuals which need updating. */ for (i = 0; i < VARRAY_ACTIVE_SIZE (redirection_edges); i += 2) { block_stmt_iterator bsi; edge e; basic_block tgt; tree phi; e = VARRAY_EDGE (redirection_edges, i); tgt = VARRAY_EDGE (redirection_edges, i + 1)->dest; /* All variables referenced in PHI nodes we bypass must be renamed. */ for (phi = phi_nodes (e->dest); phi; phi = PHI_CHAIN (phi)) { tree result = SSA_NAME_VAR (PHI_RESULT (phi)); if (is_gimple_reg (PHI_RESULT (phi))) bitmap_set_bit (vars_to_rename, var_ann (result)->uid); else bitmap_set_bit (virtuals_to_rename, var_ann (result)->uid); } /* Any variables set by statements at the start of the block we are bypassing must also be taken our of SSA form. */ for (bsi = bsi_start (e->dest); ! bsi_end_p (bsi); bsi_next (&bsi)) { unsigned int j; def_optype defs; v_may_def_optype v_may_defs; v_must_def_optype v_must_defs; tree stmt = bsi_stmt (bsi); stmt_ann_t ann = stmt_ann (stmt); if (TREE_CODE (stmt) == COND_EXPR) break; get_stmt_operands (stmt); defs = DEF_OPS (ann); for (j = 0; j < NUM_DEFS (defs); j++) { tree op = SSA_NAME_VAR (DEF_OP (defs, j)); bitmap_set_bit (vars_to_rename, var_ann (op)->uid); } v_may_defs = STMT_V_MAY_DEF_OPS (stmt); for (j = 0; j < NUM_V_MAY_DEFS (v_may_defs); j++) { tree op = V_MAY_DEF_RESULT (v_may_defs, j); bitmap_set_bit (vars_to_rename, var_ann (op)->uid); } v_must_defs = STMT_V_MUST_DEF_OPS (stmt); for (j = 0; j < NUM_V_MUST_DEFS (v_must_defs); j++) { tree op = V_MUST_DEF_OP (v_must_defs, j); bitmap_set_bit (vars_to_rename, var_ann (op)->uid); } } /* Finally, any variables in PHI nodes at our final destination must also be taken our of SSA form. */ for (phi = phi_nodes (tgt); phi; phi = PHI_CHAIN (phi)) { tree result = SSA_NAME_VAR (PHI_RESULT (phi)); if (is_gimple_reg (PHI_RESULT (phi))) bitmap_set_bit (vars_to_rename, var_ann (result)->uid); else bitmap_set_bit (virtuals_to_rename, var_ann (result)->uid); } } /* Take those selected variables out of SSA form. This must be done before we start redirecting edges. */ if (bitmap_first_set_bit (vars_to_rename) >= 0) rewrite_vars_out_of_ssa (vars_to_rename); /* The out of SSA translation above may split the edge from E->src to E->dest. This could potentially cause us to lose an assignment leading to invalid warnings about uninitialized variables or incorrect code. Luckily, we can detect this by looking at the last statement in E->dest. If it is not a COND_EXPR or SWITCH_EXPR, then the edge was split and instead of E, we want E->dest->succ. */ for (i = 0; i < VARRAY_ACTIVE_SIZE (redirection_edges); i += 2) { edge e = VARRAY_EDGE (redirection_edges, i); tree last = last_stmt (e->dest); if (last && TREE_CODE (last) != COND_EXPR && TREE_CODE (last) != SWITCH_EXPR) { e = e->dest->succ; #ifdef ENABLE_CHECKING /* There should only be a single successor if the original edge was split. */ if (e->succ_next) abort (); #endif /* Replace the edge in REDIRECTION_EDGES for the loop below. */ VARRAY_EDGE (redirection_edges, i) = e; } } /* If we created any new variables as part of the out-of-ssa translation, then any jump threads must be invalidated if they bypass a block in which we skipped instructions. This is necessary as instructions which appeared to be NOPS may be necessary after the out-of-ssa translation. */ if (num_referenced_vars != old_num_referenced_vars) { for (i = 0; i < VARRAY_ACTIVE_SIZE (redirection_edges); i += 2) { block_stmt_iterator bsi; edge e; e = VARRAY_EDGE (redirection_edges, i); for (bsi = bsi_start (e->dest); ! bsi_end_p (bsi); bsi_next (&bsi)) { tree stmt = bsi_stmt (bsi); if (IS_EMPTY_STMT (stmt) || TREE_CODE (stmt) == LABEL_EXPR) continue; if (TREE_CODE (stmt) == COND_EXPR) break; /* Invalidate the jump thread. */ VARRAY_EDGE (redirection_edges, i) = NULL; VARRAY_EDGE (redirection_edges, i + 1) = NULL; break; } } } /* Now redirect the edges. */ for (i = 0; i < VARRAY_ACTIVE_SIZE (redirection_edges); i += 2) { basic_block src; edge e; e = VARRAY_EDGE (redirection_edges, i); if (!e) continue; tgt = VARRAY_EDGE (redirection_edges, i + 1)->dest; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Threaded jump %d --> %d to %d\n", e->src->index, e->dest->index, tgt->index); src = e->src; e = redirect_edge_and_branch (e, tgt); PENDING_STMT (e) = NULL_TREE; /* Updating the dominance information would be nontrivial. */ free_dominance_info (CDI_DOMINATORS); if ((dump_file && (dump_flags & TDF_DETAILS)) && e->src != src) fprintf (dump_file, " basic block %d created\n", e->src->index); cfg_altered = true; } VARRAY_CLEAR (redirection_edges); for (i = old_num_referenced_vars; i < num_referenced_vars; i++) { bitmap_set_bit (vars_to_rename, i); var_ann (referenced_var (i))->out_of_ssa_tag = 0; } bitmap_a_or_b (vars_to_rename, vars_to_rename, virtuals_to_rename); /* We must remove any PHIs for virtual variables that we are going to re-rename. Hopefully we'll be able to simply update these incrementally soon. */ FOR_EACH_BB (bb) { tree next; for (phi = phi_nodes (bb); phi; phi = next) { tree result = PHI_RESULT (phi); next = PHI_CHAIN (phi); if (bitmap_bit_p (virtuals_to_rename, var_ann (SSA_NAME_VAR (result))->uid)) remove_phi_node (phi, NULL, bb); } } BITMAP_XFREE (virtuals_to_rename); } /* Jump threading, redundancy elimination and const/copy propagation. This pass may expose new symbols that need to be renamed into SSA. For every new symbol exposed, its corresponding bit will be set in VARS_TO_RENAME. */ static void tree_ssa_dominator_optimize (void) { basic_block bb; struct dom_walk_data walk_data; unsigned int i; for (i = 0; i < num_referenced_vars; i++) var_ann (referenced_var (i))->current_def = NULL; /* Mark loop edges so we avoid threading across loop boundaries. This may result in transforming natural loop into irreducible region. */ mark_dfs_back_edges (); /* Create our hash tables. */ avail_exprs = htab_create (1024, real_avail_expr_hash, avail_expr_eq, free); VARRAY_TREE_INIT (const_and_copies, num_ssa_names, "const_and_copies"); nonzero_vars = BITMAP_XMALLOC (); VARRAY_EDGE_INIT (redirection_edges, 20, "redirection_edges"); VARRAY_GENERIC_PTR_INIT (vrp_data, num_ssa_names, "vrp_data"); need_eh_cleanup = BITMAP_XMALLOC (); /* Setup callbacks for the generic dominator tree walker. */ walk_data.walk_stmts_backward = false; walk_data.dom_direction = CDI_DOMINATORS; walk_data.initialize_block_local_data = dom_opt_initialize_block_local_data; walk_data.before_dom_children_before_stmts = dom_opt_initialize_block; walk_data.before_dom_children_walk_stmts = optimize_stmt; walk_data.before_dom_children_after_stmts = cprop_into_phis; walk_data.after_dom_children_before_stmts = NULL; walk_data.after_dom_children_walk_stmts = NULL; walk_data.after_dom_children_after_stmts = dom_opt_finalize_block; /* Right now we only attach a dummy COND_EXPR to the global data pointer. When we attach more stuff we'll need to fill this out with a real structure. */ walk_data.global_data = NULL; walk_data.block_local_data_size = sizeof (struct dom_walk_block_data); /* Now initialize the dominator walker. */ init_walk_dominator_tree (&walk_data); /* Reset block_forwardable in each block's annotation. We use that attribute when threading through COND_EXPRs. */ FOR_EACH_BB (bb) bb_ann (bb)->forwardable = 1; calculate_dominance_info (CDI_DOMINATORS); /* If we prove certain blocks are unreachable, then we want to repeat the dominator optimization process as PHI nodes may have turned into copies which allows better propagation of values. So we repeat until we do not identify any new unreachable blocks. */ do { /* Optimize the dominator tree. */ cfg_altered = false; /* Recursively walk the dominator tree optimizing statements. */ walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR); /* Wipe the hash tables. */ if (VARRAY_ACTIVE_SIZE (redirection_edges) > 0) redirect_edges_and_update_ssa_graph (redirection_edges); if (bitmap_first_set_bit (need_eh_cleanup) >= 0) { cfg_altered = tree_purge_all_dead_eh_edges (need_eh_cleanup); bitmap_zero (need_eh_cleanup); } /* We may have made some basic blocks unreachable, remove them. */ cfg_altered |= delete_unreachable_blocks (); /* If the CFG was altered, then recompute the dominator tree. This is not strictly needed if we only removed unreachable blocks, but may produce better results. If we threaded jumps, then rebuilding the dominator tree is strictly necessary. Likewise with EH cleanup. Free the dominance info first so that cleanup_tree_cfg doesn't try to verify it. */ if (cfg_altered) { free_dominance_info (CDI_DOMINATORS); cleanup_tree_cfg (); calculate_dominance_info (CDI_DOMINATORS); } /* If we are going to iterate (CFG_ALTERED is true), then we must perform any queued renaming before the next iteration. */ if (cfg_altered && bitmap_first_set_bit (vars_to_rename) >= 0) { rewrite_into_ssa (false); bitmap_clear (vars_to_rename); /* The into SSA translation may have created new SSA_NAMES whic affect the size of CONST_AND_COPIES and VRP_DATA. */ VARRAY_GROW (const_and_copies, num_ssa_names); VARRAY_GROW (vrp_data, num_ssa_names); } /* Reinitialize the various tables. */ bitmap_clear (nonzero_vars); htab_empty (avail_exprs); VARRAY_CLEAR (const_and_copies); VARRAY_CLEAR (vrp_data); for (i = 0; i < num_referenced_vars; i++) var_ann (referenced_var (i))->current_def = NULL; } while (cfg_altered); /* Remove any unreachable blocks left behind and linearize the CFG. */ cleanup_tree_cfg (); /* Debugging dumps. */ if (dump_file && (dump_flags & TDF_STATS)) dump_dominator_optimization_stats (dump_file); /* We emptied the hash table earlier, now delete it completely. */ htab_delete (avail_exprs); /* It is not necessary to clear CURRDEFS, REDIRECTION_EDGES, VRP_DATA, CONST_AND_COPIES, and NONZERO_VARS as they all get cleared at the bottom of the do-while loop above. */ /* And finalize the dominator walker. */ fini_walk_dominator_tree (&walk_data); /* Free nonzero_vars. */ BITMAP_XFREE (nonzero_vars); BITMAP_XFREE (need_eh_cleanup); } static bool gate_dominator (void) { return flag_tree_dom != 0; } struct tree_opt_pass pass_dominator = { "dom", /* name */ gate_dominator, /* gate */ tree_ssa_dominator_optimize, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */ PROP_cfg | PROP_ssa, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func | TODO_rename_vars | TODO_verify_ssa /* todo_flags_finish */ }; /* We are exiting BB, see if the target block begins with a conditional jump which has a known value when reached via BB. */ static void thread_across_edge (struct dom_walk_data *walk_data, edge e) { struct dom_walk_block_data *bd = VARRAY_TOP_GENERIC_PTR (walk_data->block_data_stack); block_stmt_iterator bsi; tree stmt = NULL; tree phi; /* Each PHI creates a temporary equivalence, record them. */ for (phi = phi_nodes (e->dest); phi; phi = PHI_CHAIN (phi)) { tree src = PHI_ARG_DEF_FROM_EDGE (phi, e); tree dst = PHI_RESULT (phi); record_const_or_copy (dst, src, &bd->const_and_copies); register_new_def (dst, &bd->block_defs); } for (bsi = bsi_start (e->dest); ! bsi_end_p (bsi); bsi_next (&bsi)) { tree lhs, cached_lhs; stmt = bsi_stmt (bsi); /* Ignore empty statements and labels. */ if (IS_EMPTY_STMT (stmt) || TREE_CODE (stmt) == LABEL_EXPR) continue; /* If this is not a MODIFY_EXPR which sets an SSA_NAME to a new value, then stop our search here. Ideally when we stop a search we stop on a COND_EXPR or SWITCH_EXPR. */ if (TREE_CODE (stmt) != MODIFY_EXPR || TREE_CODE (TREE_OPERAND (stmt, 0)) != SSA_NAME) break; /* At this point we have a statement which assigns an RHS to an SSA_VAR on the LHS. We want to prove that the RHS is already available and that its value is held in the current definition of the LHS -- meaning that this assignment is a NOP when reached via edge E. */ if (TREE_CODE (TREE_OPERAND (stmt, 1)) == SSA_NAME) cached_lhs = TREE_OPERAND (stmt, 1); else cached_lhs = lookup_avail_expr (stmt, NULL, false); lhs = TREE_OPERAND (stmt, 0); /* This can happen if we thread around to the start of a loop. */ if (lhs == cached_lhs) break; /* If we did not find RHS in the hash table, then try again after temporarily const/copy propagating the operands. */ if (!cached_lhs) { /* Copy the operands. */ stmt_ann_t ann = stmt_ann (stmt); use_optype uses = USE_OPS (ann); vuse_optype vuses = VUSE_OPS (ann); tree *uses_copy = xcalloc (NUM_USES (uses), sizeof (tree)); tree *vuses_copy = xcalloc (NUM_VUSES (vuses), sizeof (tree)); unsigned int i; /* Make a copy of the uses into USES_COPY, then cprop into the use operands. */ for (i = 0; i < NUM_USES (uses); i++) { tree tmp = NULL; uses_copy[i] = USE_OP (uses, i); if (TREE_CODE (USE_OP (uses, i)) == SSA_NAME) tmp = get_value_for (USE_OP (uses, i), const_and_copies); if (tmp) SET_USE_OP (uses, i, tmp); } /* Similarly for virtual uses. */ for (i = 0; i < NUM_VUSES (vuses); i++) { tree tmp = NULL; vuses_copy[i] = VUSE_OP (vuses, i); if (TREE_CODE (VUSE_OP (vuses, i)) == SSA_NAME) tmp = get_value_for (VUSE_OP (vuses, i), const_and_copies); if (tmp) SET_VUSE_OP (vuses, i, tmp); } /* Try to lookup the new expression. */ cached_lhs = lookup_avail_expr (stmt, NULL, false); /* Restore the statement's original uses/defs. */ for (i = 0; i < NUM_USES (uses); i++) SET_USE_OP (uses, i, uses_copy[i]); for (i = 0; i < NUM_VUSES (vuses); i++) SET_VUSE_OP (vuses, i, vuses_copy[i]); free (uses_copy); free (vuses_copy); /* If we still did not find the expression in the hash table, then we can not ignore this statement. */ if (! cached_lhs) break; } /* If the expression in the hash table was not assigned to an SSA_NAME, then we can not ignore this statement. */ if (TREE_CODE (cached_lhs) != SSA_NAME) break; /* If we have different underlying variables, then we can not ignore this statement. */ if (SSA_NAME_VAR (cached_lhs) != SSA_NAME_VAR (lhs)) break; /* If CACHED_LHS does not represent the current value of the undering variable in CACHED_LHS/LHS, then we can not ignore this statement. */ if (var_ann (SSA_NAME_VAR (lhs))->current_def != cached_lhs) break; /* If we got here, then we can ignore this statement and continue walking through the statements in the block looking for a threadable COND_EXPR. We want to record an equivalence lhs = cache_lhs so that if the result of this statement is used later we can copy propagate suitably. */ record_const_or_copy (lhs, cached_lhs, &bd->const_and_copies); register_new_def (lhs, &bd->block_defs); } /* If we stopped at a COND_EXPR or SWITCH_EXPR, then see if we know which arm will be taken. */ if (stmt && (TREE_CODE (stmt) == COND_EXPR || TREE_CODE (stmt) == SWITCH_EXPR)) { tree cond, cached_lhs; edge e1; /* Do not forward entry edges into the loop. In the case loop has multiple entry edges we may end up in constructing irreducible region. ??? We may consider forwarding the edges in the case all incoming edges forward to the same destination block. */ if (!e->flags & EDGE_DFS_BACK) { for (e1 = e->dest->pred; e; e = e->pred_next) if (e1->flags & EDGE_DFS_BACK) break; if (e1) return; } /* Now temporarily cprop the operands and try to find the resulting expression in the hash tables. */ if (TREE_CODE (stmt) == COND_EXPR) cond = COND_EXPR_COND (stmt); else cond = SWITCH_COND (stmt); if (TREE_CODE_CLASS (TREE_CODE (cond)) == '<') { tree dummy_cond, op0, op1; enum tree_code cond_code; op0 = TREE_OPERAND (cond, 0); op1 = TREE_OPERAND (cond, 1); cond_code = TREE_CODE (cond); /* Get the current value of both operands. */ if (TREE_CODE (op0) == SSA_NAME) { tree tmp = get_value_for (op0, const_and_copies); if (tmp) op0 = tmp; } if (TREE_CODE (op1) == SSA_NAME) { tree tmp = get_value_for (op1, const_and_copies); if (tmp) op1 = tmp; } /* Stuff the operator and operands into our dummy conditional expression, creating the dummy conditional if necessary. */ dummy_cond = walk_data->global_data; if (! dummy_cond) { dummy_cond = build (cond_code, boolean_type_node, op0, op1); dummy_cond = build (COND_EXPR, void_type_node, dummy_cond, NULL, NULL); walk_data->global_data = dummy_cond; } else { TREE_SET_CODE (TREE_OPERAND (dummy_cond, 0), cond_code); TREE_OPERAND (TREE_OPERAND (dummy_cond, 0), 0) = op0; TREE_OPERAND (TREE_OPERAND (dummy_cond, 0), 1) = op1; } /* If the conditional folds to an invariant, then we are done, otherwise look it up in the hash tables. */ cached_lhs = local_fold (COND_EXPR_COND (dummy_cond)); if (! is_gimple_min_invariant (cached_lhs)) cached_lhs = lookup_avail_expr (dummy_cond, NULL, false); if (!cached_lhs || ! is_gimple_min_invariant (cached_lhs)) { stmt_ann_t ann = get_stmt_ann (dummy_cond); cached_lhs = simplify_cond_and_lookup_avail_expr (dummy_cond, NULL, ann, false); } } /* We can have conditionals which just test the state of a variable rather than use a relational operator. These are simpler to handle. */ else if (TREE_CODE (cond) == SSA_NAME) { cached_lhs = cond; cached_lhs = get_value_for (cached_lhs, const_and_copies); if (cached_lhs && ! is_gimple_min_invariant (cached_lhs)) cached_lhs = 0; } else cached_lhs = lookup_avail_expr (stmt, NULL, false); if (cached_lhs) { edge taken_edge = find_taken_edge (e->dest, cached_lhs); basic_block dest = (taken_edge ? taken_edge->dest : NULL); if (dest == e->dest) return; /* If we have a known destination for the conditional, then we can perform this optimization, which saves at least one conditional jump each time it applies since we get to bypass the conditional at our original destination. Note that we can either thread through a block with PHIs or to a block with PHIs, but not both. At this time the bookkeeping to keep the CFG & SSA up-to-date has proven difficult. */ if (dest) { int saved_forwardable = bb_ann (e->src)->forwardable; edge tmp_edge; bb_ann (e->src)->forwardable = 0; tmp_edge = tree_block_forwards_to (dest); taken_edge = (tmp_edge ? tmp_edge : taken_edge); bb_ann (e->src)->forwardable = saved_forwardable; VARRAY_PUSH_EDGE (redirection_edges, e); VARRAY_PUSH_EDGE (redirection_edges, taken_edge); } } } } /* Initialize the local stacks. AVAIL_EXPRS stores all the expressions made available in this block. CONST_AND_COPIES stores var/value pairs to restore at the end of this block. NONZERO_VARS stores the vars which have a nonzero value made in this block. STMTS_TO_RESCAN is a list of statements we will rescan for operands. VRP_VARIABLES is the list of variables which have had their values constrained by an operation in this block. These stacks are cleared in the finalization routine run for each block. */ static void dom_opt_initialize_block_local_data (struct dom_walk_data *walk_data ATTRIBUTE_UNUSED, basic_block bb ATTRIBUTE_UNUSED, bool recycled ATTRIBUTE_UNUSED) { #ifdef ENABLE_CHECKING struct dom_walk_block_data *bd = (struct dom_walk_block_data *)VARRAY_TOP_GENERIC_PTR (walk_data->block_data_stack); /* We get cleared memory from the allocator, so if the memory is not cleared, then we are re-using a previously allocated entry. In that case, we can also re-use the underlying virtual arrays. Just make sure we clear them before using them! */ if (recycled) { if (bd->avail_exprs && VARRAY_ACTIVE_SIZE (bd->avail_exprs) > 0) abort (); if (bd->const_and_copies && VARRAY_ACTIVE_SIZE (bd->const_and_copies) > 0) abort (); if (bd->nonzero_vars && VARRAY_ACTIVE_SIZE (bd->nonzero_vars) > 0) abort (); if (bd->stmts_to_rescan && VARRAY_ACTIVE_SIZE (bd->stmts_to_rescan) > 0) abort (); if (bd->vrp_variables && VARRAY_ACTIVE_SIZE (bd->vrp_variables) > 0) abort (); if (bd->block_defs && VARRAY_ACTIVE_SIZE (bd->block_defs) > 0) abort (); } #endif } /* Initialize local stacks for this optimizer and record equivalences upon entry to BB. Equivalences can come from the edge traversed to reach BB or they may come from PHI nodes at the start of BB. */ static void dom_opt_initialize_block (struct dom_walk_data *walk_data, basic_block bb) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index); record_equivalences_from_incoming_edge (walk_data, bb); /* PHI nodes can create equivalences too. */ record_equivalences_from_phis (walk_data, bb); } /* Given an expression EXPR (a relational expression or a statement), initialize the hash table element pointed by by ELEMENT. */ static void initialize_hash_element (tree expr, tree lhs, struct expr_hash_elt *element) { /* Hash table elements may be based on conditional expressions or statements. For the former case, we have no annotation and we want to hash the conditional expression. In the latter case we have an annotation and we want to record the expression the statement evaluates. */ if (TREE_CODE_CLASS (TREE_CODE (expr)) == '<' || TREE_CODE (expr) == TRUTH_NOT_EXPR) { element->ann = NULL; element->rhs = expr; } else if (TREE_CODE (expr) == COND_EXPR) { element->ann = stmt_ann (expr); element->rhs = COND_EXPR_COND (expr); } else if (TREE_CODE (expr) == SWITCH_EXPR) { element->ann = stmt_ann (expr); element->rhs = SWITCH_COND (expr); } else if (TREE_CODE (expr) == RETURN_EXPR && TREE_OPERAND (expr, 0)) { element->ann = stmt_ann (expr); element->rhs = TREE_OPERAND (TREE_OPERAND (expr, 0), 1); } else { element->ann = stmt_ann (expr); element->rhs = TREE_OPERAND (expr, 1); } element->lhs = lhs; element->hash = avail_expr_hash (element); } /* Remove all the expressions in LOCALS from TABLE, stopping when there are LIMIT entries left in LOCALs. */ static void remove_local_expressions_from_table (varray_type locals, unsigned limit, htab_t table) { if (! locals) return; /* Remove all the expressions made available in this block. */ while (VARRAY_ACTIVE_SIZE (locals) > limit) { struct expr_hash_elt element; tree expr = VARRAY_TOP_TREE (locals); VARRAY_POP (locals); initialize_hash_element (expr, NULL, &element); htab_remove_elt_with_hash (table, &element, element.hash); } } /* Use the SSA_NAMES in LOCALS to restore TABLE to its original state, stopping when there are LIMIT entries left in LOCALs. */ static void restore_nonzero_vars_to_original_value (varray_type locals, unsigned limit, bitmap table) { if (!locals) return; while (VARRAY_ACTIVE_SIZE (locals) > limit) { tree name = VARRAY_TOP_TREE (locals); VARRAY_POP (locals); bitmap_clear_bit (table, SSA_NAME_VERSION (name)); } } /* Use the source/dest pairs in LOCALS to restore TABLE to its original state, stopping when there are LIMIT entries left in LOCALs. */ static void restore_vars_to_original_value (varray_type locals, unsigned limit, varray_type table) { if (! locals) return; while (VARRAY_ACTIVE_SIZE (locals) > limit) { tree prev_value, dest; prev_value = VARRAY_TOP_TREE (locals); VARRAY_POP (locals); dest = VARRAY_TOP_TREE (locals); VARRAY_POP (locals); set_value_for (dest, prev_value, table); } } /* Similar to restore_vars_to_original_value, except that it restores CURRDEFS to its original value. */ static void restore_currdefs_to_original_value (varray_type locals, unsigned limit) { if (!locals) return; /* Restore CURRDEFS to its original state. */ while (VARRAY_ACTIVE_SIZE (locals) > limit) { tree tmp = VARRAY_TOP_TREE (locals); tree saved_def, var; VARRAY_POP (locals); /* If we recorded an SSA_NAME, then make the SSA_NAME the current definition of its underlying variable. If we recorded anything else, it must have been an _DECL node and its current reaching definition must have been NULL. */ if (TREE_CODE (tmp) == SSA_NAME) { saved_def = tmp; var = SSA_NAME_VAR (saved_def); } else { saved_def = NULL; var = tmp; } var_ann (var)->current_def = saved_def; } } /* We have finished processing the dominator children of BB, perform any finalization actions in preparation for leaving this node in the dominator tree. */ static void dom_opt_finalize_block (struct dom_walk_data *walk_data, basic_block bb) { struct dom_walk_block_data *bd = VARRAY_TOP_GENERIC_PTR (walk_data->block_data_stack); tree last; /* If we are at a leaf node in the dominator graph, see if we can thread the edge from BB through its successor. Do this before we remove entries from our equivalence tables. */ if (bb->succ && ! bb->succ->succ_next && (bb->succ->flags & EDGE_ABNORMAL) == 0 && (get_immediate_dominator (CDI_DOMINATORS, bb->succ->dest) != bb || phi_nodes (bb->succ->dest))) { thread_across_edge (walk_data, bb->succ); } else if ((last = last_stmt (bb)) && TREE_CODE (last) == COND_EXPR && (TREE_CODE_CLASS (TREE_CODE (COND_EXPR_COND (last))) == '<' || TREE_CODE (COND_EXPR_COND (last)) == SSA_NAME) && bb->succ && (bb->succ->flags & EDGE_ABNORMAL) == 0 && bb->succ->succ_next && (bb->succ->succ_next->flags & EDGE_ABNORMAL) == 0 && ! bb->succ->succ_next->succ_next) { edge true_edge, false_edge; tree cond, inverted = NULL; enum tree_code cond_code; extract_true_false_edges_from_block (bb, &true_edge, &false_edge); cond = COND_EXPR_COND (last); cond_code = TREE_CODE (cond); if (TREE_CODE_CLASS (cond_code) == '<') inverted = invert_truthvalue (cond); /* If the THEN arm is the end of a dominator tree or has PHI nodes, then try to thread through its edge. */ if (get_immediate_dominator (CDI_DOMINATORS, true_edge->dest) != bb || phi_nodes (true_edge->dest)) { unsigned avail_expr_limit; unsigned const_and_copies_limit; unsigned currdefs_limit; avail_expr_limit = bd->avail_exprs ? VARRAY_ACTIVE_SIZE (bd->avail_exprs) : 0; const_and_copies_limit = bd->const_and_copies ? VARRAY_ACTIVE_SIZE (bd->const_and_copies) : 0; currdefs_limit = bd->block_defs ? VARRAY_ACTIVE_SIZE (bd->block_defs) : 0; /* Record any equivalences created by following this edge. */ if (TREE_CODE_CLASS (cond_code) == '<') { record_cond (cond, boolean_true_node, &bd->avail_exprs); record_dominating_conditions (cond, &bd->avail_exprs); record_cond (inverted, boolean_false_node, &bd->avail_exprs); } else if (cond_code == SSA_NAME) record_const_or_copy (cond, boolean_true_node, &bd->const_and_copies); /* Now thread the edge. */ thread_across_edge (walk_data, true_edge); /* And restore the various tables to their state before we threaded this edge. */ remove_local_expressions_from_table (bd->avail_exprs, avail_expr_limit, avail_exprs); restore_vars_to_original_value (bd->const_and_copies, const_and_copies_limit, const_and_copies); restore_currdefs_to_original_value (bd->block_defs, currdefs_limit); } /* Similarly for the ELSE arm. */ if (get_immediate_dominator (CDI_DOMINATORS, false_edge->dest) != bb || phi_nodes (false_edge->dest)) { /* Record any equivalences created by following this edge. */ if (TREE_CODE_CLASS (cond_code) == '<') { record_cond (cond, boolean_false_node, &bd->avail_exprs); record_cond (inverted, boolean_true_node, &bd->avail_exprs); record_dominating_conditions (inverted, &bd->avail_exprs); } else if (cond_code == SSA_NAME) record_const_or_copy (cond, boolean_false_node, &bd->const_and_copies); thread_across_edge (walk_data, false_edge); /* No need to remove local expressions from our tables or restore vars to their original value as that will be done immediately below. */ } } remove_local_expressions_from_table (bd->avail_exprs, 0, avail_exprs); restore_nonzero_vars_to_original_value (bd->nonzero_vars, 0, nonzero_vars); restore_vars_to_original_value (bd->const_and_copies, 0, const_and_copies); restore_currdefs_to_original_value (bd->block_defs, 0); /* Remove VRP records associated with this basic block. They are no longer valid. To be efficient, we note which variables have had their values constrained in this block. So walk over each variable in the VRP_VARIABLEs array. */ while (bd->vrp_variables && VARRAY_ACTIVE_SIZE (bd->vrp_variables) > 0) { tree var = VARRAY_TOP_TREE (bd->vrp_variables); /* Each variable has a stack of value range records. We want to invalidate those associated with our basic block. So we walk the array backwards popping off records associated with our block. Once we hit a record not associated with our block we are done. */ varray_type var_vrp_records = VARRAY_GENERIC_PTR (vrp_data, SSA_NAME_VERSION (var)); while (VARRAY_ACTIVE_SIZE (var_vrp_records) > 0) { struct vrp_element *element = (struct vrp_element *)VARRAY_TOP_GENERIC_PTR (var_vrp_records); if (element->bb != bb) break; VARRAY_POP (var_vrp_records); } VARRAY_POP (bd->vrp_variables); } /* Re-scan operands in all statements that may have had new symbols exposed. */ while (bd->stmts_to_rescan && VARRAY_ACTIVE_SIZE (bd->stmts_to_rescan) > 0) { tree stmt = VARRAY_TOP_TREE (bd->stmts_to_rescan); VARRAY_POP (bd->stmts_to_rescan); mark_new_vars_to_rename (stmt, vars_to_rename); } } /* PHI nodes can create equivalences too. Ignoring any alternatives which are the same as the result, if all the alternatives are equal, then the PHI node creates an equivalence. Additionally, if all the PHI alternatives are known to have a nonzero value, then the result of this PHI is known to have a nonzero value, even if we do not know its exact value. */ static void record_equivalences_from_phis (struct dom_walk_data *walk_data, basic_block bb) { struct dom_walk_block_data *bd = VARRAY_TOP_GENERIC_PTR (walk_data->block_data_stack); tree phi; for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi)) { tree lhs = PHI_RESULT (phi); tree rhs = NULL; int i; for (i = 0; i < PHI_NUM_ARGS (phi); i++) { tree t = PHI_ARG_DEF (phi, i); if (TREE_CODE (t) == SSA_NAME || is_gimple_min_invariant (t)) { /* Ignore alternatives which are the same as our LHS. */ if (operand_equal_p (lhs, t, 0)) continue; /* If we have not processed an alternative yet, then set RHS to this alternative. */ if (rhs == NULL) rhs = t; /* If we have processed an alternative (stored in RHS), then see if it is equal to this one. If it isn't, then stop the search. */ else if (! operand_equal_p (rhs, t, 0)) break; } else break; } /* If we had no interesting alternatives, then all the RHS alternatives must have been the same as LHS. */ if (!rhs) rhs = lhs; /* If we managed to iterate through each PHI alternative without breaking out of the loop, then we have a PHI which may create a useful equivalence. We do not need to record unwind data for this, since this is a true assignment and not an equivalence inferred from a comparison. All uses of this ssa name are dominated by this assignment, so unwinding just costs time and space. */ if (i == PHI_NUM_ARGS (phi) && may_propagate_copy (lhs, rhs)) set_value_for (lhs, rhs, const_and_copies); /* Now see if we know anything about the nonzero property for the result of this PHI. */ for (i = 0; i < PHI_NUM_ARGS (phi); i++) { if (!PHI_ARG_NONZERO (phi, i)) break; } if (i == PHI_NUM_ARGS (phi)) bitmap_set_bit (nonzero_vars, SSA_NAME_VERSION (PHI_RESULT (phi))); register_new_def (lhs, &bd->block_defs); } } /* Record any equivalences created by the incoming edge to BB. If BB has more than one incoming edge, then no equivalence is created. */ static void record_equivalences_from_incoming_edge (struct dom_walk_data *walk_data, basic_block bb) { int edge_flags; basic_block parent; struct eq_expr_value eq_expr_value; tree parent_block_last_stmt = NULL; struct dom_walk_block_data *bd = VARRAY_TOP_GENERIC_PTR (walk_data->block_data_stack); /* If our parent block ended with a control statment, then we may be able to record some equivalences based on which outgoing edge from the parent was followed. */ parent = get_immediate_dominator (CDI_DOMINATORS, bb); if (parent) { parent_block_last_stmt = last_stmt (parent); if (parent_block_last_stmt && !is_ctrl_stmt (parent_block_last_stmt)) parent_block_last_stmt = NULL; } eq_expr_value.src = NULL; eq_expr_value.dst = NULL; /* If we have a single predecessor, then extract EDGE_FLAGS from our single incoming edge. Otherwise clear EDGE_FLAGS and PARENT_BLOCK_LAST_STMT since they're not needed. */ if (bb->pred && ! bb->pred->pred_next && parent_block_last_stmt && bb_for_stmt (parent_block_last_stmt) == bb->pred->src) { edge_flags = bb->pred->flags; } else { edge_flags = 0; parent_block_last_stmt = NULL; } /* If our parent block ended in a COND_EXPR, add any equivalences created by the COND_EXPR to the hash table and initialize EQ_EXPR_VALUE appropriately. EQ_EXPR_VALUE is an assignment expression created when BB's immediate dominator ends in a COND_EXPR statement whose predicate is of the form 'VAR == VALUE', where VALUE may be another variable or a constant. This is used to propagate VALUE on the THEN_CLAUSE of that conditional. This assignment is inserted in CONST_AND_COPIES so that the copy and constant propagator can find more propagation opportunities. */ if (parent_block_last_stmt && bb->pred->pred_next == NULL && TREE_CODE (parent_block_last_stmt) == COND_EXPR && (edge_flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE))) eq_expr_value = get_eq_expr_value (parent_block_last_stmt, (edge_flags & EDGE_TRUE_VALUE) != 0, &bd->avail_exprs, bb, &bd->vrp_variables); /* Similarly when the parent block ended in a SWITCH_EXPR. We can only know the value of the switch's condition if the dominator parent is also the only predecessor of this block. */ else if (parent_block_last_stmt && bb->pred->pred_next == NULL && bb->pred->src == parent && TREE_CODE (parent_block_last_stmt) == SWITCH_EXPR) { tree switch_cond = SWITCH_COND (parent_block_last_stmt); /* If the switch's condition is an SSA variable, then we may know its value at each of the case labels. */ if (TREE_CODE (switch_cond) == SSA_NAME) { tree switch_vec = SWITCH_LABELS (parent_block_last_stmt); size_t i, n = TREE_VEC_LENGTH (switch_vec); int case_count = 0; tree match_case = NULL_TREE; /* Search the case labels for those whose destination is the current basic block. */ for (i = 0; i < n; ++i) { tree elt = TREE_VEC_ELT (switch_vec, i); if (label_to_block (CASE_LABEL (elt)) == bb) { if (++case_count > 1 || CASE_HIGH (elt)) break; match_case = elt; } } /* If we encountered precisely one CASE_LABEL_EXPR and it was not the default case, or a case range, then we know the exact value of SWITCH_COND which caused us to get to this block. Record that equivalence in EQ_EXPR_VALUE. */ if (case_count == 1 && match_case && CASE_LOW (match_case) && !CASE_HIGH (match_case)) { eq_expr_value.dst = switch_cond; eq_expr_value.src = CASE_LOW (match_case); } } } /* If EQ_EXPR_VALUE (VAR == VALUE) is given, register the VALUE as a new value for VAR, so that occurrences of VAR can be replaced with VALUE while re-writing the THEN arm of a COND_EXPR. */ if (eq_expr_value.src && eq_expr_value.dst) record_equality (eq_expr_value.dst, eq_expr_value.src, &bd->const_and_copies); } /* Dump SSA statistics on FILE. */ void dump_dominator_optimization_stats (FILE *file) { long n_exprs; fprintf (file, "Total number of statements: %6ld\n\n", opt_stats.num_stmts); fprintf (file, "Exprs considered for dominator optimizations: %6ld\n", opt_stats.num_exprs_considered); n_exprs = opt_stats.num_exprs_considered; if (n_exprs == 0) n_exprs = 1; fprintf (file, " Redundant expressions eliminated: %6ld (%.0f%%)\n", opt_stats.num_re, PERCENT (opt_stats.num_re, n_exprs)); fprintf (file, "\nHash table statistics:\n"); fprintf (file, " avail_exprs: "); htab_statistics2 (file, avail_exprs); } /* Dump SSA statistics on stderr. */ void debug_dominator_optimization_stats (void) { dump_dominator_optimization_stats (stderr); } /* Dump statistics for the hash table HTAB. */ /* XXX Duplicates htab_statistics in tree-into-ssa.c */ static void htab_statistics2 (FILE *file, htab_t htab) { fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n", (long) htab_size (htab), (long) htab_elements (htab), htab_collisions (htab)); } /* Record the fact that VAR has a nonzero value, though we may not know its exact value. Note that if VAR is already known to have a nonzero value, then we do nothing. */ static void record_var_is_nonzero (tree var, varray_type *block_nonzero_vars_p) { int indx = SSA_NAME_VERSION (var); if (bitmap_bit_p (nonzero_vars, indx)) return; /* Mark it in the global table. */ bitmap_set_bit (nonzero_vars, indx); /* Record this SSA_NAME so that we can reset the global table when we leave this block. */ if (! *block_nonzero_vars_p) VARRAY_TREE_INIT (*block_nonzero_vars_p, 2, "block_nonzero_vars"); VARRAY_PUSH_TREE (*block_nonzero_vars_p, var); } /* Enter a statement into the true/false expression hash table indicating that the condition COND has the value VALUE. */ static void record_cond (tree cond, tree value, varray_type *block_avail_exprs_p) { struct expr_hash_elt *element = xmalloc (sizeof (struct expr_hash_elt)); void **slot; initialize_hash_element (cond, value, element); slot = htab_find_slot_with_hash (avail_exprs, (void *)element, element->hash, true); if (*slot == NULL) { *slot = (void *) element; if (! *block_avail_exprs_p) VARRAY_TREE_INIT (*block_avail_exprs_p, 20, "block_avail_exprs"); VARRAY_PUSH_TREE (*block_avail_exprs_p, cond); } else free (element); } /* COND is a condition which is known to be true. Record variants of COND which must also be true. For example, if a < b is true, then a <= b must also be true. */ static void record_dominating_conditions (tree cond, varray_type *block_avail_exprs_p) { switch (TREE_CODE (cond)) { case LT_EXPR: record_cond (build2 (LE_EXPR, boolean_type_node, TREE_OPERAND (cond, 0), TREE_OPERAND (cond, 1)), boolean_true_node, block_avail_exprs_p); record_cond (build2 (ORDERED_EXPR, boolean_type_node, TREE_OPERAND (cond, 0), TREE_OPERAND (cond, 1)), boolean_true_node, block_avail_exprs_p); record_cond (build2 (NE_EXPR, boolean_type_node, TREE_OPERAND (cond, 0), TREE_OPERAND (cond, 1)), boolean_true_node, block_avail_exprs_p); record_cond (build2 (LTGT_EXPR, boolean_type_node, TREE_OPERAND (cond, 0), TREE_OPERAND (cond, 1)), boolean_true_node, block_avail_exprs_p); break; case GT_EXPR: record_cond (build2 (GE_EXPR, boolean_type_node, TREE_OPERAND (cond, 0), TREE_OPERAND (cond, 1)), boolean_true_node, block_avail_exprs_p); record_cond (build2 (ORDERED_EXPR, boolean_type_node, TREE_OPERAND (cond, 0), TREE_OPERAND (cond, 1)), boolean_true_node, block_avail_exprs_p); record_cond (build2 (NE_EXPR, boolean_type_node, TREE_OPERAND (cond, 0), TREE_OPERAND (cond, 1)), boolean_true_node, block_avail_exprs_p); record_cond (build2 (LTGT_EXPR, boolean_type_node, TREE_OPERAND (cond, 0), TREE_OPERAND (cond, 1)), boolean_true_node, block_avail_exprs_p); break; case GE_EXPR: case LE_EXPR: record_cond (build2 (ORDERED_EXPR, boolean_type_node, TREE_OPERAND (cond, 0), TREE_OPERAND (cond, 1)), boolean_true_node, block_avail_exprs_p); break; case EQ_EXPR: record_cond (build2 (ORDERED_EXPR, boolean_type_node, TREE_OPERAND (cond, 0), TREE_OPERAND (cond, 1)), boolean_true_node, block_avail_exprs_p); record_cond (build2 (LE_EXPR, boolean_type_node, TREE_OPERAND (cond, 0), TREE_OPERAND (cond, 1)), boolean_true_node, block_avail_exprs_p); record_cond (build2 (GE_EXPR, boolean_type_node, TREE_OPERAND (cond, 0), TREE_OPERAND (cond, 1)), boolean_true_node, block_avail_exprs_p); break; case UNORDERED_EXPR: record_cond (build2 (NE_EXPR, boolean_type_node, TREE_OPERAND (cond, 0), TREE_OPERAND (cond, 1)), boolean_true_node, block_avail_exprs_p); record_cond (build2 (UNLE_EXPR, boolean_type_node, TREE_OPERAND (cond, 0), TREE_OPERAND (cond, 1)), boolean_true_node, block_avail_exprs_p); record_cond (build2 (UNGE_EXPR, boolean_type_node, TREE_OPERAND (cond, 0), TREE_OPERAND (cond, 1)), boolean_true_node, block_avail_exprs_p); record_cond (build2 (UNEQ_EXPR, boolean_type_node, TREE_OPERAND (cond, 0), TREE_OPERAND (cond, 1)), boolean_true_node, block_avail_exprs_p); record_cond (build2 (UNLT_EXPR, boolean_type_node, TREE_OPERAND (cond, 0), TREE_OPERAND (cond, 1)), boolean_true_node, block_avail_exprs_p); record_cond (build2 (UNGT_EXPR, boolean_type_node, TREE_OPERAND (cond, 0), TREE_OPERAND (cond, 1)), boolean_true_node, block_avail_exprs_p); break; case UNLT_EXPR: record_cond (build2 (UNLE_EXPR, boolean_type_node, TREE_OPERAND (cond, 0), TREE_OPERAND (cond, 1)), boolean_true_node, block_avail_exprs_p); record_cond (build2 (NE_EXPR, boolean_type_node, TREE_OPERAND (cond, 0), TREE_OPERAND (cond, 1)), boolean_true_node, block_avail_exprs_p); break; case UNGT_EXPR: record_cond (build2 (UNGE_EXPR, boolean_type_node, TREE_OPERAND (cond, 0), TREE_OPERAND (cond, 1)), boolean_true_node, block_avail_exprs_p); record_cond (build2 (NE_EXPR, boolean_type_node, TREE_OPERAND (cond, 0), TREE_OPERAND (cond, 1)), boolean_true_node, block_avail_exprs_p); break; case UNEQ_EXPR: record_cond (build2 (UNLE_EXPR, boolean_type_node, TREE_OPERAND (cond, 0), TREE_OPERAND (cond, 1)), boolean_true_node, block_avail_exprs_p); record_cond (build2 (UNGE_EXPR, boolean_type_node, TREE_OPERAND (cond, 0), TREE_OPERAND (cond, 1)), boolean_true_node, block_avail_exprs_p); break; case LTGT_EXPR: record_cond (build2 (NE_EXPR, boolean_type_node, TREE_OPERAND (cond, 0), TREE_OPERAND (cond, 1)), boolean_true_node, block_avail_exprs_p); record_cond (build2 (ORDERED_EXPR, boolean_type_node, TREE_OPERAND (cond, 0), TREE_OPERAND (cond, 1)), boolean_true_node, block_avail_exprs_p); default: break; } } /* A helper function for record_const_or_copy and record_equality. Do the work of recording the value and undo info. */ static void record_const_or_copy_1 (tree x, tree y, tree prev_x, varray_type *block_const_and_copies_p) { set_value_for (x, y, const_and_copies); if (!*block_const_and_copies_p) VARRAY_TREE_INIT (*block_const_and_copies_p, 2, "block_const_and_copies"); VARRAY_PUSH_TREE (*block_const_and_copies_p, x); VARRAY_PUSH_TREE (*block_const_and_copies_p, prev_x); } /* Record that X is equal to Y in const_and_copies. Record undo information in the block-local varray. */ static void record_const_or_copy (tree x, tree y, varray_type *block_const_and_copies_p) { tree prev_x = get_value_for (x, const_and_copies); if (TREE_CODE (y) == SSA_NAME) { tree tmp = get_value_for (y, const_and_copies); if (tmp) y = tmp; } record_const_or_copy_1 (x, y, prev_x, block_const_and_copies_p); } /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR. This constrains the cases in which we may treat this as assignment. */ static void record_equality (tree x, tree y, varray_type *block_const_and_copies_p) { tree prev_x = NULL, prev_y = NULL; if (TREE_CODE (x) == SSA_NAME) prev_x = get_value_for (x, const_and_copies); if (TREE_CODE (y) == SSA_NAME) prev_y = get_value_for (y, const_and_copies); /* If one of the previous values is invariant, then use that. Otherwise it doesn't matter which value we choose, just so long as we canonicalize on one value. */ if (TREE_INVARIANT (y)) ; else if (TREE_INVARIANT (x)) prev_x = x, x = y, y = prev_x, prev_x = prev_y; else if (prev_x && TREE_INVARIANT (prev_x)) x = y, y = prev_x, prev_x = prev_y; else if (prev_y) y = prev_y; /* After the swapping, we must have one SSA_NAME. */ if (TREE_CODE (x) != SSA_NAME) return; /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a variable compared against zero. If we're honoring signed zeros, then we cannot record this value unless we know that the value is nonzero. */ if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (x))) && (TREE_CODE (y) != REAL_CST || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y)))) return; record_const_or_copy_1 (x, y, prev_x, block_const_and_copies_p); } /* STMT is a MODIFY_EXPR for which we were unable to find RHS in the hash tables. Try to simplify the RHS using whatever equivalences we may have recorded. If we are able to simplify the RHS, then lookup the simplified form in the hash table and return the result. Otherwise return NULL. */ static tree simplify_rhs_and_lookup_avail_expr (struct dom_walk_data *walk_data, tree stmt, stmt_ann_t ann, int insert) { tree rhs = TREE_OPERAND (stmt, 1); enum tree_code rhs_code = TREE_CODE (rhs); tree result = NULL; struct dom_walk_block_data *bd = VARRAY_TOP_GENERIC_PTR (walk_data->block_data_stack); /* If we have lhs = ~x, look and see if we earlier had x = ~y. In which case we can change this statement to be lhs = y. Which can then be copy propagated. Similarly for negation. */ if ((rhs_code == BIT_NOT_EXPR || rhs_code == NEGATE_EXPR) && TREE_CODE (TREE_OPERAND (rhs, 0)) == SSA_NAME) { /* Get the definition statement for our RHS. */ tree rhs_def_stmt = SSA_NAME_DEF_STMT (TREE_OPERAND (rhs, 0)); /* See if the RHS_DEF_STMT has the same form as our statement. */ if (TREE_CODE (rhs_def_stmt) == MODIFY_EXPR && TREE_CODE (TREE_OPERAND (rhs_def_stmt, 1)) == rhs_code) { tree rhs_def_operand; rhs_def_operand = TREE_OPERAND (TREE_OPERAND (rhs_def_stmt, 1), 0); /* Verify that RHS_DEF_OPERAND is a suitable SSA variable. */ if (TREE_CODE (rhs_def_operand) == SSA_NAME && ! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs_def_operand)) result = update_rhs_and_lookup_avail_expr (stmt, rhs_def_operand, &bd->avail_exprs, ann, insert); } } /* If we have z = (x OP C1), see if we earlier had x = y OP C2. If OP is associative, create and fold (y OP C2) OP C1 which should result in (y OP C3), use that as the RHS for the assignment. Add minus to this, as we handle it specially below. */ if ((associative_tree_code (rhs_code) || rhs_code == MINUS_EXPR) && TREE_CODE (TREE_OPERAND (rhs, 0)) == SSA_NAME && is_gimple_min_invariant (TREE_OPERAND (rhs, 1))) { tree rhs_def_stmt = SSA_NAME_DEF_STMT (TREE_OPERAND (rhs, 0)); /* See if the RHS_DEF_STMT has the same form as our statement. */ if (TREE_CODE (rhs_def_stmt) == MODIFY_EXPR) { tree rhs_def_rhs = TREE_OPERAND (rhs_def_stmt, 1); enum tree_code rhs_def_code = TREE_CODE (rhs_def_rhs); if (rhs_code == rhs_def_code || (rhs_code == PLUS_EXPR && rhs_def_code == MINUS_EXPR) || (rhs_code == MINUS_EXPR && rhs_def_code == PLUS_EXPR)) { tree def_stmt_op0 = TREE_OPERAND (rhs_def_rhs, 0); tree def_stmt_op1 = TREE_OPERAND (rhs_def_rhs, 1); if (TREE_CODE (def_stmt_op0) == SSA_NAME && ! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def_stmt_op0) && is_gimple_min_invariant (def_stmt_op1)) { tree outer_const = TREE_OPERAND (rhs, 1); tree type = TREE_TYPE (TREE_OPERAND (stmt, 0)); tree t; /* If we care about correct floating point results, then don't fold x + c1 - c2. Note that we need to take both the codes and the signs to figure this out. */ if (FLOAT_TYPE_P (type) && !flag_unsafe_math_optimizations && (rhs_def_code == PLUS_EXPR || rhs_def_code == MINUS_EXPR)) { bool neg = false; neg ^= (rhs_code == MINUS_EXPR); neg ^= (rhs_def_code == MINUS_EXPR); neg ^= real_isneg (TREE_REAL_CST_PTR (outer_const)); neg ^= real_isneg (TREE_REAL_CST_PTR (def_stmt_op1)); if (neg) goto dont_fold_assoc; } /* Ho hum. So fold will only operate on the outermost thingy that we give it, so we have to build the new expression in two pieces. This requires that we handle combinations of plus and minus. */ if (rhs_def_code != rhs_code) { if (rhs_def_code == MINUS_EXPR) t = build (MINUS_EXPR, type, outer_const, def_stmt_op1); else t = build (MINUS_EXPR, type, def_stmt_op1, outer_const); rhs_code = PLUS_EXPR; } else if (rhs_def_code == MINUS_EXPR) t = build (PLUS_EXPR, type, def_stmt_op1, outer_const); else t = build (rhs_def_code, type, def_stmt_op1, outer_const); t = local_fold (t); t = build (rhs_code, type, def_stmt_op0, t); t = local_fold (t); /* If the result is a suitable looking gimple expression, then use it instead of the original for STMT. */ if (TREE_CODE (t) == SSA_NAME || (TREE_CODE_CLASS (TREE_CODE (t)) == '1' && TREE_CODE (TREE_OPERAND (t, 0)) == SSA_NAME) || ((TREE_CODE_CLASS (TREE_CODE (t)) == '2' || TREE_CODE_CLASS (TREE_CODE (t)) == '<') && TREE_CODE (TREE_OPERAND (t, 0)) == SSA_NAME && is_gimple_val (TREE_OPERAND (t, 1)))) result = update_rhs_and_lookup_avail_expr (stmt, t, &bd->avail_exprs, ann, insert); } } } dont_fold_assoc:; } /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR and BIT_AND_EXPR respectively if the first operand is greater than zero and the second operand is an exact power of two. */ if ((rhs_code == TRUNC_DIV_EXPR || rhs_code == TRUNC_MOD_EXPR) && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (rhs, 0))) && integer_pow2p (TREE_OPERAND (rhs, 1))) { tree val; tree op = TREE_OPERAND (rhs, 0); if (TYPE_UNSIGNED (TREE_TYPE (op))) { val = integer_one_node; } else { tree dummy_cond = walk_data->global_data; if (! dummy_cond) { dummy_cond = build (GT_EXPR, boolean_type_node, op, integer_zero_node); dummy_cond = build (COND_EXPR, void_type_node, dummy_cond, NULL, NULL); walk_data->global_data = dummy_cond; } else { TREE_SET_CODE (TREE_OPERAND (dummy_cond, 0), GT_EXPR); TREE_OPERAND (TREE_OPERAND (dummy_cond, 0), 0) = op; TREE_OPERAND (TREE_OPERAND (dummy_cond, 0), 1) = integer_zero_node; } val = simplify_cond_and_lookup_avail_expr (dummy_cond, &bd->avail_exprs, NULL, false); } if (val && integer_onep (val)) { tree t; tree op0 = TREE_OPERAND (rhs, 0); tree op1 = TREE_OPERAND (rhs, 1); if (rhs_code == TRUNC_DIV_EXPR) t = build (RSHIFT_EXPR, TREE_TYPE (op0), op0, build_int_2 (tree_log2 (op1), 0)); else t = build (BIT_AND_EXPR, TREE_TYPE (op0), op0, local_fold (build (MINUS_EXPR, TREE_TYPE (op1), op1, integer_one_node))); result = update_rhs_and_lookup_avail_expr (stmt, t, &bd->avail_exprs, ann, insert); } } /* Transform ABS (X) into X or -X as appropriate. */ if (rhs_code == ABS_EXPR && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (rhs, 0)))) { tree val; tree op = TREE_OPERAND (rhs, 0); tree type = TREE_TYPE (op); if (TYPE_UNSIGNED (type)) { val = integer_zero_node; } else { tree dummy_cond = walk_data->global_data; if (! dummy_cond) { dummy_cond = build (LE_EXPR, boolean_type_node, op, integer_zero_node); dummy_cond = build (COND_EXPR, void_type_node, dummy_cond, NULL, NULL); walk_data->global_data = dummy_cond; } else { TREE_SET_CODE (TREE_OPERAND (dummy_cond, 0), LE_EXPR); TREE_OPERAND (TREE_OPERAND (dummy_cond, 0), 0) = op; TREE_OPERAND (TREE_OPERAND (dummy_cond, 0), 1) = fold_convert (type, integer_zero_node); } val = simplify_cond_and_lookup_avail_expr (dummy_cond, &bd->avail_exprs, NULL, false); if (!val) { TREE_SET_CODE (TREE_OPERAND (dummy_cond, 0), GE_EXPR); TREE_OPERAND (TREE_OPERAND (dummy_cond, 0), 0) = op; TREE_OPERAND (TREE_OPERAND (dummy_cond, 0), 1) = fold_convert (type, integer_zero_node); val = simplify_cond_and_lookup_avail_expr (dummy_cond, &bd->avail_exprs, NULL, false); if (val) { if (integer_zerop (val)) val = integer_one_node; else if (integer_onep (val)) val = integer_zero_node; } } } if (val && (integer_onep (val) || integer_zerop (val))) { tree t; if (integer_onep (val)) t = build1 (NEGATE_EXPR, TREE_TYPE (op), op); else t = op; result = update_rhs_and_lookup_avail_expr (stmt, t, &bd->avail_exprs, ann, insert); } } /* Optimize *"foo" into 'f'. This is done here rather than in fold to avoid problems with stuff like &*"foo". */ if (TREE_CODE (rhs) == INDIRECT_REF || TREE_CODE (rhs) == ARRAY_REF) { tree t = fold_read_from_constant_string (rhs); if (t) result = update_rhs_and_lookup_avail_expr (stmt, t, &bd->avail_exprs, ann, insert); } return result; } /* COND is a condition of the form: x == const or x != const Look back to x's defining statement and see if x is defined as x = (type) y; If const is unchanged if we convert it to type, then we can build the equivalent expression: y == const or y != const Which may allow further optimizations. Return the equivalent comparison or NULL if no such equivalent comparison was found. */ static tree find_equivalent_equality_comparison (tree cond) { tree op0 = TREE_OPERAND (cond, 0); tree op1 = TREE_OPERAND (cond, 1); tree def_stmt = SSA_NAME_DEF_STMT (op0); /* OP0 might have been a parameter, so first make sure it was defined by a MODIFY_EXPR. */ if (def_stmt && TREE_CODE (def_stmt) == MODIFY_EXPR) { tree def_rhs = TREE_OPERAND (def_stmt, 1); /* Now make sure the RHS of the MODIFY_EXPR is a typecast. */ if ((TREE_CODE (def_rhs) == NOP_EXPR || TREE_CODE (def_rhs) == CONVERT_EXPR) && TREE_CODE (TREE_OPERAND (def_rhs, 0)) == SSA_NAME) { tree def_rhs_inner = TREE_OPERAND (def_rhs, 0); tree def_rhs_inner_type = TREE_TYPE (def_rhs_inner); tree new; if (TYPE_PRECISION (def_rhs_inner_type) > TYPE_PRECISION (TREE_TYPE (def_rhs))) return NULL; /* What we want to prove is that if we convert OP1 to the type of the object inside the NOP_EXPR that the result is still equivalent to SRC. If that is true, the build and return new equivalent condition which uses the source of the typecast and the new constant (which has only changed its type). */ new = build1 (TREE_CODE (def_rhs), def_rhs_inner_type, op1); new = local_fold (new); if (is_gimple_val (new) && tree_int_cst_equal (new, op1)) return build (TREE_CODE (cond), TREE_TYPE (cond), def_rhs_inner, new); } } return NULL; } /* STMT is a COND_EXPR for which we could not trivially determine its result. This routine attempts to find equivalent forms of the condition which we may be able to optimize better. It also uses simple value range propagation to optimize conditionals. */ static tree simplify_cond_and_lookup_avail_expr (tree stmt, varray_type *block_avail_exprs_p, stmt_ann_t ann, int insert) { tree cond = COND_EXPR_COND (stmt); if (TREE_CODE_CLASS (TREE_CODE (cond)) == '<') { tree op0 = TREE_OPERAND (cond, 0); tree op1 = TREE_OPERAND (cond, 1); if (TREE_CODE (op0) == SSA_NAME && is_gimple_min_invariant (op1)) { int limit; tree low, high, cond_low, cond_high; int lowequal, highequal, swapped, no_overlap, subset, cond_inverted; varray_type vrp_records; struct vrp_element *element; /* First see if we have test of an SSA_NAME against a constant where the SSA_NAME is defined by an earlier typecast which is irrelevant when performing tests against the given constant. */ if (TREE_CODE (cond) == EQ_EXPR || TREE_CODE (cond) == NE_EXPR) { tree new_cond = find_equivalent_equality_comparison (cond); if (new_cond) { /* Update the statement to use the new equivalent condition. */ COND_EXPR_COND (stmt) = new_cond; ann->modified = 1; /* Lookup the condition and return its known value if it exists. */ new_cond = lookup_avail_expr (stmt, block_avail_exprs_p, insert); if (new_cond) return new_cond; /* The operands have changed, so update op0 and op1. */ op0 = TREE_OPERAND (cond, 0); op1 = TREE_OPERAND (cond, 1); } } /* Consult the value range records for this variable (if they exist) to see if we can eliminate or simplify this conditional. Note two tests are necessary to determine no records exist. First we have to see if the virtual array exists, if it exists, then we have to check its active size. Also note the vast majority of conditionals are not testing a variable which has had its range constrained by an earlier conditional. So this filter avoids a lot of unnecessary work. */ vrp_records = VARRAY_GENERIC_PTR (vrp_data, SSA_NAME_VERSION (op0)); if (vrp_records == NULL) return NULL; limit = VARRAY_ACTIVE_SIZE (vrp_records); /* If we have no value range records for this variable, or we are unable to extract a range for this condition, then there is nothing to do. */ if (limit == 0 || ! extract_range_from_cond (cond, &cond_high, &cond_low, &cond_inverted)) return NULL; /* We really want to avoid unnecessary computations of range info. So all ranges are computed lazily; this avoids a lot of unnecessary work. ie, we record the conditional, but do not process how it constrains the variable's potential values until we know that processing the condition could be helpful. However, we do not want to have to walk a potentially long list of ranges, nor do we want to compute a variable's range more than once for a given path. Luckily, each time we encounter a conditional that can not be otherwise optimized we will end up here and we will compute the necessary range information for the variable used in this condition. Thus you can conclude that there will never be more than one conditional associated with a variable which has not been processed. So we never need to merge more than one new conditional into the current range. These properties also help us avoid unnecessary work. */ element = (struct vrp_element *)VARRAY_GENERIC_PTR (vrp_records, limit - 1); if (element->high && element->low) { /* The last element has been processed, so there is no range merging to do, we can simply use the high/low values recorded in the last element. */ low = element->low; high = element->high; } else { tree tmp_high, tmp_low; int dummy; /* The last element has not been processed. Process it now. */ extract_range_from_cond (element->cond, &tmp_high, &tmp_low, &dummy); /* If this is the only element, then no merging is necessary, the high/low values from extract_range_from_cond are all we need. */ if (limit == 1) { low = tmp_low; high = tmp_high; } else { /* Get the high/low value from the previous element. */ struct vrp_element *prev = (struct vrp_element *)VARRAY_GENERIC_PTR (vrp_records, limit - 2); low = prev->low; high = prev->high; /* Merge in this element's range with the range from the previous element. The low value for the merged range is the maximum of the previous low value and the low value of this record. Similarly the high value for the merged range is the minimum of the previous high value and the high value of this record. */ low = (tree_int_cst_compare (low, tmp_low) == 1 ? low : tmp_low); high = (tree_int_cst_compare (high, tmp_high) == -1 ? high : tmp_high); } /* And record the computed range. */ element->low = low; element->high = high; } /* After we have constrained this variable's potential values, we try to determine the result of the given conditional. To simplify later tests, first determine if the current low value is the same low value as the conditional. Similarly for the current high value and the high value for the conditional. */ lowequal = tree_int_cst_equal (low, cond_low); highequal = tree_int_cst_equal (high, cond_high); if (lowequal && highequal) return (cond_inverted ? boolean_false_node : boolean_true_node); /* To simplify the overlap/subset tests below we may want to swap the two ranges so that the larger of the two ranges occurs "first". */ swapped = 0; if (tree_int_cst_compare (low, cond_low) == 1 || (lowequal && tree_int_cst_compare (cond_high, high) == 1)) { tree temp; swapped = 1; temp = low; low = cond_low; cond_low = temp; temp = high; high = cond_high; cond_high = temp; } /* Now determine if there is no overlap in the ranges or if the second range is a subset of the first range. */ no_overlap = tree_int_cst_lt (high, cond_low); subset = tree_int_cst_compare (cond_high, high) != 1; /* If there was no overlap in the ranges, then this conditional always has a false value (unless we had to invert this conditional, in which case it always has a true value). */ if (no_overlap) return (cond_inverted ? boolean_true_node : boolean_false_node); /* If the current range is a subset of the condition's range, then this conditional always has a true value (unless we had to invert this conditional, in which case it always has a true value). */ if (subset && swapped) return (cond_inverted ? boolean_false_node : boolean_true_node); /* We were unable to determine the result of the conditional. However, we may be able to simplify the conditional. First merge the ranges in the same manner as range merging above. */ low = tree_int_cst_compare (low, cond_low) == 1 ? low : cond_low; high = tree_int_cst_compare (high, cond_high) == -1 ? high : cond_high; /* If the range has converged to a single point, then turn this into an equality comparison. */ if (TREE_CODE (cond) != EQ_EXPR && TREE_CODE (cond) != NE_EXPR && tree_int_cst_equal (low, high)) { TREE_SET_CODE (cond, EQ_EXPR); TREE_OPERAND (cond, 1) = high; } } } return 0; } /* STMT is a SWITCH_EXPR for which we could not trivially determine its result. This routine attempts to find equivalent forms of the condition which we may be able to optimize better. */ static tree simplify_switch_and_lookup_avail_expr (tree stmt, varray_type *block_avail_exprs_p, stmt_ann_t ann, int insert) { tree cond = SWITCH_COND (stmt); tree def, to, ti; /* The optimization that we really care about is removing unnecessary casts. That will let us do much better in propagating the inferred constant at the switch target. */ if (TREE_CODE (cond) == SSA_NAME) { def = SSA_NAME_DEF_STMT (cond); if (TREE_CODE (def) == MODIFY_EXPR) { def = TREE_OPERAND (def, 1); if (TREE_CODE (def) == NOP_EXPR) { def = TREE_OPERAND (def, 0); to = TREE_TYPE (cond); ti = TREE_TYPE (def); /* If we have an extension that preserves sign, then we can copy the source value into the switch. */ if (TYPE_UNSIGNED (to) == TYPE_UNSIGNED (ti) && TYPE_PRECISION (to) >= TYPE_PRECISION (ti) && is_gimple_val (def)) { SWITCH_COND (stmt) = def; ann->modified = 1; return lookup_avail_expr (stmt, block_avail_exprs_p, insert); } } } } return 0; } /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current known value for that SSA_NAME (or NULL if no value is known). NONZERO_VARS is the set SSA_NAMES known to have a nonzero value, even if we don't know their precise value. Propagate values from CONST_AND_COPIES and NONZERO_VARS into the PHI nodes of the successors of BB. */ static void cprop_into_successor_phis (basic_block bb, varray_type const_and_copies, bitmap nonzero_vars) { edge e; /* This can get rather expensive if the implementation is naive in how it finds the phi alternative associated with a particular edge. */ for (e = bb->succ; e; e = e->succ_next) { tree phi; int phi_num_args; int hint; /* If this is an abnormal edge, then we do not want to copy propagate into the PHI alternative associated with this edge. */ if (e->flags & EDGE_ABNORMAL) continue; phi = phi_nodes (e->dest); if (! phi) continue; /* There is no guarantee that for any two PHI nodes in a block that the phi alternative associated with a particular edge will be at the same index in the phi alternative array. However, it is very likely they will be the same. So we keep track of the index of the alternative where we found the edge in the previous phi node and check that index first in the next phi node. If that hint fails, then we actually search all the entries. */ phi_num_args = PHI_NUM_ARGS (phi); hint = phi_num_args; for ( ; phi; phi = PHI_CHAIN (phi)) { int i; tree new; use_operand_p orig_p; tree orig; /* If the hint is valid (!= phi_num_args), see if it points us to the desired phi alternative. */ if (hint != phi_num_args && PHI_ARG_EDGE (phi, hint) == e) ; else { /* The hint was either invalid or did not point to the correct phi alternative. Search all the alternatives for the correct one. Update the hint. */ for (i = 0; i < phi_num_args; i++) if (PHI_ARG_EDGE (phi, i) == e) break; hint = i; } #ifdef ENABLE_CHECKING /* If we did not find the proper alternative, then something is horribly wrong. */ if (hint == phi_num_args) abort (); #endif /* The alternative may be associated with a constant, so verify it is an SSA_NAME before doing anything with it. */ orig_p = PHI_ARG_DEF_PTR (phi, hint); orig = USE_FROM_PTR (orig_p); if (TREE_CODE (orig) != SSA_NAME) continue; /* If the alternative is known to have a nonzero value, record that fact in the PHI node itself for future use. */ if (bitmap_bit_p (nonzero_vars, SSA_NAME_VERSION (orig))) PHI_ARG_NONZERO (phi, hint) = true; /* If we have *ORIG_P in our constant/copy table, then replace ORIG_P with its value in our constant/copy table. */ new = VARRAY_TREE (const_and_copies, SSA_NAME_VERSION (orig)); if (new && (TREE_CODE (new) == SSA_NAME || is_gimple_min_invariant (new)) && may_propagate_copy (orig, new)) { propagate_value (orig_p, new); } } } } /* Propagate known constants/copies into PHI nodes of BB's successor blocks. */ static void cprop_into_phis (struct dom_walk_data *walk_data ATTRIBUTE_UNUSED, basic_block bb) { cprop_into_successor_phis (bb, const_and_copies, nonzero_vars); } /* Search for redundant computations in STMT. If any are found, then replace them with the variable holding the result of the computation. If safe, record this expression into the available expression hash table. */ static bool eliminate_redundant_computations (struct dom_walk_data *walk_data, tree stmt, stmt_ann_t ann) { v_may_def_optype v_may_defs = V_MAY_DEF_OPS (ann); tree *expr_p, def = NULL_TREE; bool insert = true; tree cached_lhs; bool retval = false; struct dom_walk_block_data *bd = VARRAY_TOP_GENERIC_PTR (walk_data->block_data_stack); if (TREE_CODE (stmt) == MODIFY_EXPR) def = TREE_OPERAND (stmt, 0); /* Certain expressions on the RHS can be optimized away, but can not themselves be entered into the hash tables. */ if (ann->makes_aliased_stores || ! def || TREE_CODE (def) != SSA_NAME || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def) || NUM_V_MAY_DEFS (v_may_defs) != 0) insert = false; /* Check if the expression has been computed before. */ cached_lhs = lookup_avail_expr (stmt, &bd->avail_exprs, insert); /* If this is an assignment and the RHS was not in the hash table, then try to simplify the RHS and lookup the new RHS in the hash table. */ if (! cached_lhs && TREE_CODE (stmt) == MODIFY_EXPR) cached_lhs = simplify_rhs_and_lookup_avail_expr (walk_data, stmt, ann, insert); /* Similarly if this is a COND_EXPR and we did not find its expression in the hash table, simplify the condition and try again. */ else if (! cached_lhs && TREE_CODE (stmt) == COND_EXPR) cached_lhs = simplify_cond_and_lookup_avail_expr (stmt, &bd->avail_exprs, ann, insert); /* Similarly for a SWITCH_EXPR. */ else if (!cached_lhs && TREE_CODE (stmt) == SWITCH_EXPR) cached_lhs = simplify_switch_and_lookup_avail_expr (stmt, &bd->avail_exprs, ann, insert); opt_stats.num_exprs_considered++; /* Get a pointer to the expression we are trying to optimize. */ if (TREE_CODE (stmt) == COND_EXPR) expr_p = &COND_EXPR_COND (stmt); else if (TREE_CODE (stmt) == SWITCH_EXPR) expr_p = &SWITCH_COND (stmt); else if (TREE_CODE (stmt) == RETURN_EXPR && TREE_OPERAND (stmt, 0)) expr_p = &TREE_OPERAND (TREE_OPERAND (stmt, 0), 1); else expr_p = &TREE_OPERAND (stmt, 1); /* It is safe to ignore types here since we have already done type checking in the hashing and equality routines. In fact type checking here merely gets in the way of constant propagation. Also, make sure that it is safe to propagate CACHED_LHS into *EXPR_P. */ if (cached_lhs && (TREE_CODE (cached_lhs) != SSA_NAME || may_propagate_copy (*expr_p, cached_lhs))) { if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, " Replaced redundant expr '"); print_generic_expr (dump_file, *expr_p, dump_flags); fprintf (dump_file, "' with '"); print_generic_expr (dump_file, cached_lhs, dump_flags); fprintf (dump_file, "'\n"); } opt_stats.num_re++; #if defined ENABLE_CHECKING if (TREE_CODE (cached_lhs) != SSA_NAME && !is_gimple_min_invariant (cached_lhs)) abort (); #endif if (TREE_CODE (cached_lhs) == ADDR_EXPR || (POINTER_TYPE_P (TREE_TYPE (*expr_p)) && is_gimple_min_invariant (cached_lhs))) retval = true; propagate_tree_value (expr_p, cached_lhs); ann->modified = 1; } return retval; } /* STMT, a MODIFY_EXPR, may create certain equivalences, in either the available expressions table or the const_and_copies table. Detect and record those equivalences. */ static void record_equivalences_from_stmt (tree stmt, varray_type *block_avail_exprs_p, varray_type *block_nonzero_vars_p, int may_optimize_p, stmt_ann_t ann) { tree lhs = TREE_OPERAND (stmt, 0); enum tree_code lhs_code = TREE_CODE (lhs); int i; if (lhs_code == SSA_NAME) { tree rhs = TREE_OPERAND (stmt, 1); /* Strip away any useless type conversions. */ STRIP_USELESS_TYPE_CONVERSION (rhs); /* If the RHS of the assignment is a constant or another variable that may be propagated, register it in the CONST_AND_COPIES table. We do not need to record unwind data for this, since this is a true assignment and not an equivalence inferred from a comparison. All uses of this ssa name are dominated by this assignment, so unwinding just costs time and space. */ if (may_optimize_p && (TREE_CODE (rhs) == SSA_NAME || is_gimple_min_invariant (rhs))) set_value_for (lhs, rhs, const_and_copies); /* alloca never returns zero and the address of a non-weak symbol is never zero. NOP_EXPRs and CONVERT_EXPRs can be completely stripped as they do not affect this equivalence. */ while (TREE_CODE (rhs) == NOP_EXPR || TREE_CODE (rhs) == CONVERT_EXPR) rhs = TREE_OPERAND (rhs, 0); if (alloca_call_p (rhs) || (TREE_CODE (rhs) == ADDR_EXPR && DECL_P (TREE_OPERAND (rhs, 0)) && ! DECL_WEAK (TREE_OPERAND (rhs, 0)))) record_var_is_nonzero (lhs, block_nonzero_vars_p); /* IOR of any value with a nonzero value will result in a nonzero value. Even if we do not know the exact result recording that the result is nonzero is worth the effort. */ if (TREE_CODE (rhs) == BIT_IOR_EXPR && integer_nonzerop (TREE_OPERAND (rhs, 1))) record_var_is_nonzero (lhs, block_nonzero_vars_p); } /* Look at both sides for pointer dereferences. If we find one, then the pointer must be nonnull and we can enter that equivalence into the hash tables. */ if (flag_delete_null_pointer_checks) for (i = 0; i < 2; i++) { tree t = TREE_OPERAND (stmt, i); /* Strip away any COMPONENT_REFs. */ while (TREE_CODE (t) == COMPONENT_REF) t = TREE_OPERAND (t, 0); /* Now see if this is a pointer dereference. */ if (TREE_CODE (t) == INDIRECT_REF) { tree op = TREE_OPERAND (t, 0); /* If the pointer is a SSA variable, then enter new equivalences into the hash table. */ while (TREE_CODE (op) == SSA_NAME) { tree def = SSA_NAME_DEF_STMT (op); record_var_is_nonzero (op, block_nonzero_vars_p); /* And walk up the USE-DEF chains noting other SSA_NAMEs which are known to have a nonzero value. */ if (def && TREE_CODE (def) == MODIFY_EXPR && TREE_CODE (TREE_OPERAND (def, 1)) == NOP_EXPR) op = TREE_OPERAND (TREE_OPERAND (def, 1), 0); else break; } } } /* A memory store, even an aliased store, creates a useful equivalence. By exchanging the LHS and RHS, creating suitable vops and recording the result in the available expression table, we may be able to expose more redundant loads. */ if (!ann->has_volatile_ops && (TREE_CODE (TREE_OPERAND (stmt, 1)) == SSA_NAME || is_gimple_min_invariant (TREE_OPERAND (stmt, 1))) && !is_gimple_reg (lhs)) { tree rhs = TREE_OPERAND (stmt, 1); tree new; size_t j; /* FIXME: If the LHS of the assignment is a bitfield and the RHS is a constant, we need to adjust the constant to fit into the type of the LHS. If the LHS is a bitfield and the RHS is not a constant, then we can not record any equivalences for this statement since we would need to represent the widening or narrowing of RHS. This fixes gcc.c-torture/execute/921016-1.c and should not be necessary if GCC represented bitfields properly. */ if (lhs_code == COMPONENT_REF && DECL_BIT_FIELD (TREE_OPERAND (lhs, 1))) { if (TREE_CONSTANT (rhs)) rhs = widen_bitfield (rhs, TREE_OPERAND (lhs, 1), lhs); else rhs = NULL; /* If the value overflowed, then we can not use this equivalence. */ if (rhs && ! is_gimple_min_invariant (rhs)) rhs = NULL; } if (rhs) { v_may_def_optype v_may_defs = V_MAY_DEF_OPS (ann); v_must_def_optype v_must_defs = V_MUST_DEF_OPS (ann); /* Build a new statement with the RHS and LHS exchanged. */ new = build (MODIFY_EXPR, TREE_TYPE (stmt), rhs, lhs); /* Get an annotation and set up the real operands. */ get_stmt_ann (new); get_stmt_operands (new); /* Clear out the virtual operands on the new statement, we are going to set them explicitly below. */ remove_vuses (new); remove_v_may_defs (new); remove_v_must_defs (new); start_ssa_stmt_operands (new); /* For each VDEF on the original statement, we want to create a VUSE of the V_MAY_DEF result or V_MUST_DEF op on the new statement. */ for (j = 0; j < NUM_V_MAY_DEFS (v_may_defs); j++) { tree op = V_MAY_DEF_RESULT (v_may_defs, j); add_vuse (op, new); } for (j = 0; j < NUM_V_MUST_DEFS (v_must_defs); j++) { tree op = V_MUST_DEF_OP (v_must_defs, j); add_vuse (op, new); } finalize_ssa_stmt_operands (new); /* Finally enter the statement into the available expression table. */ lookup_avail_expr (new, block_avail_exprs_p, true); } } } /* Replace *OP_P in STMT with any known equivalent value for *OP_P from CONST_AND_COPIES. */ static bool cprop_operand (stmt_ann_t ann, use_operand_p op_p, varray_type const_and_copies) { bool may_have_exposed_new_symbols = false; tree val; tree op = USE_FROM_PTR (op_p); /* If the operand has a known constant value or it is known to be a copy of some other variable, use the value or copy stored in CONST_AND_COPIES. */ val = VARRAY_TREE (const_and_copies, SSA_NAME_VERSION (op)); if (val) { tree op_type, val_type; /* Do not change the base variable in the virtual operand tables. That would make it impossible to reconstruct the renamed virtual operand if we later modify this statement. Also only allow the new value to be an SSA_NAME for propagation into virtual operands. */ if (!is_gimple_reg (op) && (get_virtual_var (val) != get_virtual_var (op) || TREE_CODE (val) != SSA_NAME)) return false; /* Get the toplevel type of each operand. */ op_type = TREE_TYPE (op); val_type = TREE_TYPE (val); /* While both types are pointers, get the type of the object pointed to. */ while (POINTER_TYPE_P (op_type) && POINTER_TYPE_P (val_type)) { op_type = TREE_TYPE (op_type); val_type = TREE_TYPE (val_type); } /* Make sure underlying types match before propagating a constant by converting the constant to the proper type. Note that convert may return a non-gimple expression, in which case we ignore this propagation opportunity. */ if (!lang_hooks.types_compatible_p (op_type, val_type) && TREE_CODE (val) != SSA_NAME) { val = fold_convert (TREE_TYPE (op), val); if (!is_gimple_min_invariant (val) && TREE_CODE (val) != SSA_NAME) return false; } /* Certain operands are not allowed to be copy propagated due to their interaction with exception handling and some GCC extensions. */ if (TREE_CODE (val) == SSA_NAME && !may_propagate_copy (op, val)) return false; /* Dump details. */ if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, " Replaced '"); print_generic_expr (dump_file, op, dump_flags); fprintf (dump_file, "' with %s '", (TREE_CODE (val) != SSA_NAME ? "constant" : "variable")); print_generic_expr (dump_file, val, dump_flags); fprintf (dump_file, "'\n"); } /* If VAL is an ADDR_EXPR or a constant of pointer type, note that we may have exposed a new symbol for SSA renaming. */ if (TREE_CODE (val) == ADDR_EXPR || (POINTER_TYPE_P (TREE_TYPE (op)) && is_gimple_min_invariant (val))) may_have_exposed_new_symbols = true; propagate_value (op_p, val); /* And note that we modified this statement. This is now safe, even if we changed virtual operands since we will rescan the statement and rewrite its operands again. */ ann->modified = 1; } return may_have_exposed_new_symbols; } /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current known value for that SSA_NAME (or NULL if no value is known). Propagate values from CONST_AND_COPIES into the uses, vuses and v_may_def_ops of STMT. */ static bool cprop_into_stmt (tree stmt, varray_type const_and_copies) { bool may_have_exposed_new_symbols = false; stmt_ann_t ann = stmt_ann (stmt); size_t i, num_uses, num_vuses, num_v_may_defs; vuse_optype vuses; v_may_def_optype v_may_defs; use_optype uses; uses = USE_OPS (ann); num_uses = NUM_USES (uses); for (i = 0; i < num_uses; i++) { use_operand_p op_p = USE_OP_PTR (uses, i); if (TREE_CODE (USE_FROM_PTR (op_p)) == SSA_NAME) may_have_exposed_new_symbols |= cprop_operand (ann, op_p, const_and_copies); } vuses = VUSE_OPS (ann); num_vuses = NUM_VUSES (vuses); for (i = 0; i < num_vuses; i++) { use_operand_p op_p = VUSE_OP_PTR (vuses, i); if (TREE_CODE (USE_FROM_PTR (op_p)) == SSA_NAME) may_have_exposed_new_symbols |= cprop_operand (ann, op_p, const_and_copies); } v_may_defs = V_MAY_DEF_OPS (ann); num_v_may_defs = NUM_V_MAY_DEFS (v_may_defs); for (i = 0; i < num_v_may_defs; i++) { use_operand_p op_p = V_MAY_DEF_OP_PTR (v_may_defs, i); if (TREE_CODE (USE_FROM_PTR (op_p)) == SSA_NAME) may_have_exposed_new_symbols |= cprop_operand (ann, op_p, const_and_copies); } return may_have_exposed_new_symbols; } /* Optimize the statement pointed by iterator SI. We try to perform some simplistic global redundancy elimination and constant propagation: 1- To detect global redundancy, we keep track of expressions that have been computed in this block and its dominators. If we find that the same expression is computed more than once, we eliminate repeated computations by using the target of the first one. 2- Constant values and copy assignments. This is used to do very simplistic constant and copy propagation. When a constant or copy assignment is found, we map the value on the RHS of the assignment to the variable in the LHS in the CONST_AND_COPIES table. */ static void optimize_stmt (struct dom_walk_data *walk_data, basic_block bb, block_stmt_iterator si) { stmt_ann_t ann; tree stmt; bool may_optimize_p; bool may_have_exposed_new_symbols = false; struct dom_walk_block_data *bd = VARRAY_TOP_GENERIC_PTR (walk_data->block_data_stack); stmt = bsi_stmt (si); get_stmt_operands (stmt); ann = stmt_ann (stmt); opt_stats.num_stmts++; may_have_exposed_new_symbols = false; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Optimizing statement "); print_generic_stmt (dump_file, stmt, TDF_SLIM); } /* Const/copy propagate into USES, VUSES and the RHS of V_MAY_DEFs. */ may_have_exposed_new_symbols = cprop_into_stmt (stmt, const_and_copies); /* If the statement has been modified with constant replacements, fold its RHS before checking for redundant computations. */ if (ann->modified) { /* Try to fold the statement making sure that STMT is kept up to date. */ if (fold_stmt (bsi_stmt_ptr (si))) { stmt = bsi_stmt (si); ann = stmt_ann (stmt); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, " Folded to: "); print_generic_stmt (dump_file, stmt, TDF_SLIM); } } /* Constant/copy propagation above may change the set of virtual operands associated with this statement. Folding may remove the need for some virtual operands. Indicate we will need to rescan and rewrite the statement. */ may_have_exposed_new_symbols = true; } /* Check for redundant computations. Do this optimization only for assignments that have no volatile ops and conditionals. */ may_optimize_p = (!ann->has_volatile_ops && ((TREE_CODE (stmt) == RETURN_EXPR && TREE_OPERAND (stmt, 0) && TREE_CODE (TREE_OPERAND (stmt, 0)) == MODIFY_EXPR && ! (TREE_SIDE_EFFECTS (TREE_OPERAND (TREE_OPERAND (stmt, 0), 1)))) || (TREE_CODE (stmt) == MODIFY_EXPR && ! TREE_SIDE_EFFECTS (TREE_OPERAND (stmt, 1))) || TREE_CODE (stmt) == COND_EXPR || TREE_CODE (stmt) == SWITCH_EXPR)); if (may_optimize_p) may_have_exposed_new_symbols |= eliminate_redundant_computations (walk_data, stmt, ann); /* Record any additional equivalences created by this statement. */ if (TREE_CODE (stmt) == MODIFY_EXPR) record_equivalences_from_stmt (stmt, &bd->avail_exprs, &bd->nonzero_vars, may_optimize_p, ann); register_definitions_for_stmt (ann, &bd->block_defs); /* If STMT is a COND_EXPR and it was modified, then we may know where it goes. If that is the case, then mark the CFG as altered. This will cause us to later call remove_unreachable_blocks and cleanup_tree_cfg when it is safe to do so. It is not safe to clean things up here since removal of edges and such can trigger the removal of PHI nodes, which in turn can release SSA_NAMEs to the manager. That's all fine and good, except that once SSA_NAMEs are released to the manager, we must not call create_ssa_name until all references to released SSA_NAMEs have been eliminated. All references to the deleted SSA_NAMEs can not be eliminated until we remove unreachable blocks. We can not remove unreachable blocks until after we have completed any queued jump threading. We can not complete any queued jump threads until we have taken appropriate variables out of SSA form. Taking variables out of SSA form can call create_ssa_name and thus we lose. Ultimately I suspect we're going to need to change the interface into the SSA_NAME manager. */ if (ann->modified) { tree val = NULL; if (TREE_CODE (stmt) == COND_EXPR) val = COND_EXPR_COND (stmt); else if (TREE_CODE (stmt) == SWITCH_EXPR) val = SWITCH_COND (stmt); if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val)) cfg_altered = true; /* If we simplified a statement in such a way as to be shown that it cannot trap, update the eh information and the cfg to match. */ if (maybe_clean_eh_stmt (stmt)) { bitmap_set_bit (need_eh_cleanup, bb->index); if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Flagged to clear EH edges.\n"); } } if (may_have_exposed_new_symbols) { if (! bd->stmts_to_rescan) VARRAY_TREE_INIT (bd->stmts_to_rescan, 20, "stmts_to_rescan"); VARRAY_PUSH_TREE (bd->stmts_to_rescan, bsi_stmt (si)); } } /* Replace the RHS of STMT with NEW_RHS. If RHS can be found in the available expression hashtable, then return the LHS from the hash table. If INSERT is true, then we also update the available expression hash table to account for the changes made to STMT. */ static tree update_rhs_and_lookup_avail_expr (tree stmt, tree new_rhs, varray_type *block_avail_exprs_p, stmt_ann_t ann, bool insert) { tree cached_lhs = NULL; /* Remove the old entry from the hash table. */ if (insert) { struct expr_hash_elt element; initialize_hash_element (stmt, NULL, &element); htab_remove_elt_with_hash (avail_exprs, &element, element.hash); } /* Now update the RHS of the assignment. */ TREE_OPERAND (stmt, 1) = new_rhs; /* Now lookup the updated statement in the hash table. */ cached_lhs = lookup_avail_expr (stmt, block_avail_exprs_p, insert); /* We have now called lookup_avail_expr twice with two different versions of this same statement, once in optimize_stmt, once here. We know the call in optimize_stmt did not find an existing entry in the hash table, so a new entry was created. At the same time this statement was pushed onto the BLOCK_AVAIL_EXPRS varray. If this call failed to find an existing entry on the hash table, then the new version of this statement was entered into the hash table. And this statement was pushed onto BLOCK_AVAIL_EXPR for the second time. So there are two copies on BLOCK_AVAIL_EXPRs If this call succeeded, we still have one copy of this statement on the BLOCK_AVAIL_EXPRs varray. For both cases, we need to pop the most recent entry off the BLOCK_AVAIL_EXPRs varray. For the case where we never found this statement in the hash tables, that will leave precisely one copy of this statement on BLOCK_AVAIL_EXPRs. For the case where we found a copy of this statement in the second hash table lookup we want _no_ copies of this statement in BLOCK_AVAIL_EXPRs. */ if (insert) VARRAY_POP (*block_avail_exprs_p); /* And make sure we record the fact that we modified this statement. */ ann->modified = 1; return cached_lhs; } /* Search for an existing instance of STMT in the AVAIL_EXPRS table. If found, return its LHS. Otherwise insert STMT in the table and return NULL_TREE. Also, when an expression is first inserted in the AVAIL_EXPRS table, it is also added to the stack pointed by BLOCK_AVAIL_EXPRS_P, so that they can be removed when we finish processing this block and its children. NOTE: This function assumes that STMT is a MODIFY_EXPR node that contains no CALL_EXPR on its RHS and makes no volatile nor aliased references. */ static tree lookup_avail_expr (tree stmt, varray_type *block_avail_exprs_p, bool insert) { void **slot; tree lhs; tree temp; struct expr_hash_elt *element = xcalloc (sizeof (struct expr_hash_elt), 1); lhs = TREE_CODE (stmt) == MODIFY_EXPR ? TREE_OPERAND (stmt, 0) : NULL; initialize_hash_element (stmt, lhs, element); /* Don't bother remembering constant assignments and copy operations. Constants and copy operations are handled by the constant/copy propagator in optimize_stmt. */ if (TREE_CODE (element->rhs) == SSA_NAME || is_gimple_min_invariant (element->rhs)) { free (element); return NULL_TREE; } /* If this is an equality test against zero, see if we have recorded a nonzero value for the variable in question. */ if ((TREE_CODE (element->rhs) == EQ_EXPR || TREE_CODE (element->rhs) == NE_EXPR) && TREE_CODE (TREE_OPERAND (element->rhs, 0)) == SSA_NAME && integer_zerop (TREE_OPERAND (element->rhs, 1))) { int indx = SSA_NAME_VERSION (TREE_OPERAND (element->rhs, 0)); if (bitmap_bit_p (nonzero_vars, indx)) { tree t = element->rhs; free (element); if (TREE_CODE (t) == EQ_EXPR) return boolean_false_node; else return boolean_true_node; } } /* Finally try to find the expression in the main expression hash table. */ slot = htab_find_slot_with_hash (avail_exprs, element, element->hash, (insert ? INSERT : NO_INSERT)); if (slot == NULL) { free (element); return NULL_TREE; } if (*slot == NULL) { *slot = (void *) element; if (! *block_avail_exprs_p) VARRAY_TREE_INIT (*block_avail_exprs_p, 20, "block_avail_exprs"); VARRAY_PUSH_TREE (*block_avail_exprs_p, stmt ? stmt : element->rhs); return NULL_TREE; } /* Extract the LHS of the assignment so that it can be used as the current definition of another variable. */ lhs = ((struct expr_hash_elt *)*slot)->lhs; /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then use the value from the const_and_copies table. */ if (TREE_CODE (lhs) == SSA_NAME) { temp = get_value_for (lhs, const_and_copies); if (temp) lhs = temp; } free (element); return lhs; } /* Given a condition COND, record into HI_P, LO_P and INVERTED_P the range of values that result in the conditional having a true value. Return true if we are successful in extracting a range from COND and false if we are unsuccessful. */ static bool extract_range_from_cond (tree cond, tree *hi_p, tree *lo_p, int *inverted_p) { tree op1 = TREE_OPERAND (cond, 1); tree high, low, type; int inverted; /* Experiments have shown that it's rarely, if ever useful to record ranges for enumerations. Presumably this is due to the fact that they're rarely used directly. They are typically cast into an integer type and used that way. */ if (TREE_CODE (TREE_TYPE (op1)) != INTEGER_TYPE) return 0; type = TREE_TYPE (op1); switch (TREE_CODE (cond)) { case EQ_EXPR: high = low = op1; inverted = 0; break; case NE_EXPR: high = low = op1; inverted = 1; break; case GE_EXPR: low = op1; high = TYPE_MAX_VALUE (type); inverted = 0; break; case GT_EXPR: low = int_const_binop (PLUS_EXPR, op1, integer_one_node, 1); high = TYPE_MAX_VALUE (type); inverted = 0; break; case LE_EXPR: high = op1; low = TYPE_MIN_VALUE (type); inverted = 0; break; case LT_EXPR: high = int_const_binop (MINUS_EXPR, op1, integer_one_node, 1); low = TYPE_MIN_VALUE (type); inverted = 0; break; default: return 0; } *hi_p = high; *lo_p = low; *inverted_p = inverted; return 1; } /* Record a range created by COND for basic block BB. */ static void record_range (tree cond, basic_block bb, varray_type *vrp_variables_p) { /* We explicitly ignore NE_EXPRs. They rarely allow for meaningful range optimizations and significantly complicate the implementation. */ if (TREE_CODE_CLASS (TREE_CODE (cond)) == '<' && TREE_CODE (cond) != NE_EXPR && TREE_CODE (TREE_TYPE (TREE_OPERAND (cond, 1))) == INTEGER_TYPE) { struct vrp_element *element = ggc_alloc (sizeof (struct vrp_element)); int ssa_version = SSA_NAME_VERSION (TREE_OPERAND (cond, 0)); varray_type *vrp_records_p = (varray_type *)&VARRAY_GENERIC_PTR (vrp_data, ssa_version); element->low = NULL; element->high = NULL; element->cond = cond; element->bb = bb; if (*vrp_records_p == NULL) { VARRAY_GENERIC_PTR_INIT (*vrp_records_p, 2, "vrp records"); VARRAY_GENERIC_PTR (vrp_data, ssa_version) = *vrp_records_p; } VARRAY_PUSH_GENERIC_PTR (*vrp_records_p, element); if (! *vrp_variables_p) VARRAY_TREE_INIT (*vrp_variables_p, 2, "vrp_variables"); VARRAY_PUSH_TREE (*vrp_variables_p, TREE_OPERAND (cond, 0)); } } /* Given a conditional statement IF_STMT, return the assignment 'X = Y' known to be true depending on which arm of IF_STMT is taken. Not all conditional statements will result in a useful assignment. Return NULL_TREE in that case. Also enter into the available expression table statements of the form: TRUE ARM FALSE ARM 1 = cond 1 = cond' 0 = cond' 0 = cond This allows us to lookup the condition in a dominated block and get back a constant indicating if the condition is true. */ static struct eq_expr_value get_eq_expr_value (tree if_stmt, int true_arm, varray_type *block_avail_exprs_p, basic_block bb, varray_type *vrp_variables_p) { tree cond; struct eq_expr_value retval; cond = COND_EXPR_COND (if_stmt); retval.src = NULL; retval.dst = NULL; /* If the conditional is a single variable 'X', return 'X = 1' for the true arm and 'X = 0' on the false arm. */ if (TREE_CODE (cond) == SSA_NAME) { retval.dst = cond; retval.src = (true_arm ? integer_one_node : integer_zero_node); return retval; } /* If we have a comparison expression, then record its result into the available expression table. */ if (TREE_CODE_CLASS (TREE_CODE (cond)) == '<') { tree op0 = TREE_OPERAND (cond, 0); tree op1 = TREE_OPERAND (cond, 1); /* Special case comparing booleans against a constant as we know the value of OP0 on both arms of the branch. ie, we can record an equivalence for OP0 rather than COND. */ if ((TREE_CODE (cond) == EQ_EXPR || TREE_CODE (cond) == NE_EXPR) && TREE_CODE (op0) == SSA_NAME && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE && is_gimple_min_invariant (op1)) { if ((TREE_CODE (cond) == EQ_EXPR && true_arm) || (TREE_CODE (cond) == NE_EXPR && ! true_arm)) { retval.src = op1; } else { if (integer_zerop (op1)) retval.src = boolean_true_node; else retval.src = boolean_false_node; } retval.dst = op0; return retval; } if (TREE_CODE (op0) == SSA_NAME && (is_gimple_min_invariant (op1) || TREE_CODE (op1) == SSA_NAME)) { tree inverted = invert_truthvalue (cond); /* When we find an available expression in the hash table, we replace the expression with the LHS of the statement in the hash table. So, we want to build statements such as "1 = " on the true arm and "0 = " on the false arm. That way if we find the expression in the table, we will replace it with its known constant value. Also insert inversions of the result and condition into the hash table. */ if (true_arm) { record_cond (cond, boolean_true_node, block_avail_exprs_p); record_dominating_conditions (cond, block_avail_exprs_p); record_cond (inverted, boolean_false_node, block_avail_exprs_p); if (TREE_CONSTANT (op1)) record_range (cond, bb, vrp_variables_p); /* If the conditional is of the form 'X == Y', return 'X = Y' for the true arm. */ if (TREE_CODE (cond) == EQ_EXPR) { retval.dst = op0; retval.src = op1; return retval; } } else { record_cond (inverted, boolean_true_node, block_avail_exprs_p); record_dominating_conditions (inverted, block_avail_exprs_p); record_cond (cond, boolean_false_node, block_avail_exprs_p); if (TREE_CONSTANT (op1)) record_range (inverted, bb, vrp_variables_p); /* If the conditional is of the form 'X != Y', return 'X = Y' for the false arm. */ if (TREE_CODE (cond) == NE_EXPR) { retval.dst = op0; retval.src = op1; return retval; } } } } return retval; } /* Hashing and equality functions for AVAIL_EXPRS. The table stores MODIFY_EXPR statements. We compute a value number for expressions using the code of the expression and the SSA numbers of its operands. */ static hashval_t avail_expr_hash (const void *p) { stmt_ann_t ann = ((struct expr_hash_elt *)p)->ann; tree rhs = ((struct expr_hash_elt *)p)->rhs; hashval_t val = 0; size_t i; vuse_optype vuses; /* iterative_hash_expr knows how to deal with any expression and deals with commutative operators as well, so just use it instead of duplicating such complexities here. */ val = iterative_hash_expr (rhs, val); /* If the hash table entry is not associated with a statement, then we can just hash the expression and not worry about virtual operands and such. */ if (!ann) return val; /* Add the SSA version numbers of every vuse operand. This is important because compound variables like arrays are not renamed in the operands. Rather, the rename is done on the virtual variable representing all the elements of the array. */ vuses = VUSE_OPS (ann); for (i = 0; i < NUM_VUSES (vuses); i++) val = iterative_hash_expr (VUSE_OP (vuses, i), val); return val; } static hashval_t real_avail_expr_hash (const void *p) { return ((const struct expr_hash_elt *)p)->hash; } static int avail_expr_eq (const void *p1, const void *p2) { stmt_ann_t ann1 = ((struct expr_hash_elt *)p1)->ann; tree rhs1 = ((struct expr_hash_elt *)p1)->rhs; stmt_ann_t ann2 = ((struct expr_hash_elt *)p2)->ann; tree rhs2 = ((struct expr_hash_elt *)p2)->rhs; /* If they are the same physical expression, return true. */ if (rhs1 == rhs2 && ann1 == ann2) return true; /* If their codes are not equal, then quit now. */ if (TREE_CODE (rhs1) != TREE_CODE (rhs2)) return false; /* In case of a collision, both RHS have to be identical and have the same VUSE operands. */ if ((TREE_TYPE (rhs1) == TREE_TYPE (rhs2) || lang_hooks.types_compatible_p (TREE_TYPE (rhs1), TREE_TYPE (rhs2))) && operand_equal_p (rhs1, rhs2, OEP_PURE_SAME)) { vuse_optype ops1 = NULL; vuse_optype ops2 = NULL; size_t num_ops1 = 0; size_t num_ops2 = 0; size_t i; if (ann1) { ops1 = VUSE_OPS (ann1); num_ops1 = NUM_VUSES (ops1); } if (ann2) { ops2 = VUSE_OPS (ann2); num_ops2 = NUM_VUSES (ops2); } /* If the number of virtual uses is different, then we consider them not equal. */ if (num_ops1 != num_ops2) return false; for (i = 0; i < num_ops1; i++) if (VUSE_OP (ops1, i) != VUSE_OP (ops2, i)) return false; #ifdef ENABLE_CHECKING if (((struct expr_hash_elt *)p1)->hash != ((struct expr_hash_elt *)p2)->hash) abort (); #endif return true; } return false; } /* Given STMT and a pointer to the block local definitions BLOCK_DEFS_P, register register all objects set by this statement into BLOCK_DEFS_P and CURRDEFS. */ static void register_definitions_for_stmt (stmt_ann_t ann, varray_type *block_defs_p) { def_optype defs; v_may_def_optype v_may_defs; v_must_def_optype v_must_defs; unsigned int i; defs = DEF_OPS (ann); for (i = 0; i < NUM_DEFS (defs); i++) { tree def = DEF_OP (defs, i); /* FIXME: We shouldn't be registering new defs if the variable doesn't need to be renamed. */ register_new_def (def, block_defs_p); } /* Register new virtual definitions made by the statement. */ v_may_defs = V_MAY_DEF_OPS (ann); for (i = 0; i < NUM_V_MAY_DEFS (v_may_defs); i++) { /* FIXME: We shouldn't be registering new defs if the variable doesn't need to be renamed. */ register_new_def (V_MAY_DEF_RESULT (v_may_defs, i), block_defs_p); } /* Register new virtual mustdefs made by the statement. */ v_must_defs = V_MUST_DEF_OPS (ann); for (i = 0; i < NUM_V_MUST_DEFS (v_must_defs); i++) { /* FIXME: We shouldn't be registering new defs if the variable doesn't need to be renamed. */ register_new_def (V_MUST_DEF_OP (v_must_defs, i), block_defs_p); } } /* Generic dominator tree walker Copyright (C) 2003, 2004 Free Software Foundation, Inc. Contributed by Diego Novillo This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file implements a generic walker for dominator trees. To understand the dominator walker one must first have a grasp of dominators, immediate dominators and the dominator tree. Dominators A block B1 is said to dominate B2 if every path from the entry to B2 must pass through B1. Given the dominance relationship, we can proceed to compute immediate dominators. Note it is not important whether or not our definition allows a block to dominate itself. Immediate Dominators: Every block in the CFG has no more than one immediate dominator. The immediate dominator of block BB must dominate BB and must not dominate any other dominator of BB and must not be BB itself. Dominator tree: If we then construct a tree where each node is a basic block and there is an edge from each block's immediate dominator to the block itself, then we have a dominator tree. [ Note this walker can also walk the post-dominator tree, which is defined in a similar manner. ie, block B1 is said to post-dominate block B2 if all paths from B2 to the exit block must pass through B1. ] For example, given the CFG 1 | 2 / \ 3 4 / \ +---------->5 6 | / \ / | +--->8 7 | | / | | +--9 11 | / | +--- 10 ---> 12 We have a dominator tree which looks like 1 | 2 / \ / \ 3 4 / / \ \ | | | | 5 6 7 12 | | 8 11 | 9 | 10 The dominator tree is the basis for a number of analysis, transformation and optimization algorithms that operate on a semi-global basis. The dominator walker is a generic routine which visits blocks in the CFG via a depth first search of the dominator tree. In the example above the dominator walker might visit blocks in the following order 1, 2, 3, 4, 5, 8, 9, 10, 6, 7, 11, 12. The dominator walker has a number of callbacks to perform actions during the walk of the dominator tree. There are two callbacks which walk statements, one before visiting the dominator children, one after visiting the dominator children. There is a callback before and after each statement walk callback. In addition, the dominator walker manages allocation/deallocation of data structures which are local to each block visited. The dominator walker is meant to provide a generic means to build a pass which can analyze or transform/optimize a function based on walking the dominator tree. One simply fills in the dominator walker data structure with the appropriate callbacks and calls the walker. We currently use the dominator walker to prune the set of variables which might need PHI nodes (which can greatly improve compile-time performance in some cases). We also use the dominator walker to rewrite the function into SSA form which reduces code duplication since the rewriting phase is inherently a walk of the dominator tree. And (of course), we use the dominator walker to drive a our dominator optimizer, which is a semi-global optimizer. TODO: Walking statements is based on the block statement iterator abstraction, which is currently an abstraction over walking tree statements. Thus the dominator walker is currently only useful for trees. */ /* Recursively walk the dominator tree. WALK_DATA contains a set of callbacks to perform pass-specific actions during the dominator walk as well as a stack of block local data maintained during the dominator walk. BB is the basic block we are currently visiting. */ void walk_dominator_tree (struct dom_walk_data *walk_data, basic_block bb) { void *bd = NULL; basic_block dest; block_stmt_iterator bsi; /* Callback to initialize the local data structure. */ if (walk_data->initialize_block_local_data) { bool recycled; /* First get some local data, reusing any local data pointer we may have saved. */ if (VARRAY_ACTIVE_SIZE (walk_data->free_block_data) > 0) { bd = VARRAY_TOP_GENERIC_PTR (walk_data->free_block_data); VARRAY_POP (walk_data->free_block_data); recycled = 1; } else { bd = xcalloc (1, walk_data->block_local_data_size); recycled = 0; } /* Push the local data into the local data stack. */ VARRAY_PUSH_GENERIC_PTR (walk_data->block_data_stack, bd); /* Call the initializer. */ walk_data->initialize_block_local_data (walk_data, bb, recycled); } /* Callback for operations to execute before we have walked the dominator children, but before we walk statements. */ if (walk_data->before_dom_children_before_stmts) (*walk_data->before_dom_children_before_stmts) (walk_data, bb); /* Statement walk before walking dominator children. */ if (walk_data->before_dom_children_walk_stmts) { if (walk_data->walk_stmts_backward) for (bsi = bsi_last (bb); !bsi_end_p (bsi); bsi_prev (&bsi)) (*walk_data->before_dom_children_walk_stmts) (walk_data, bb, bsi); else for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) (*walk_data->before_dom_children_walk_stmts) (walk_data, bb, bsi); } /* Callback for operations to execute before we have walked the dominator children, and after we walk statements. */ if (walk_data->before_dom_children_after_stmts) (*walk_data->before_dom_children_after_stmts) (walk_data, bb); /* Recursively call ourselves on the dominator children of BB. */ for (dest = first_dom_son (walk_data->dom_direction, bb); dest; dest = next_dom_son (walk_data->dom_direction, dest)) { /* The destination block may have become unreachable, in which case there's no point in optimizing it. */ if (dest->pred) walk_dominator_tree (walk_data, dest); } /* Callback for operations to execute after we have walked the dominator children, but before we walk statements. */ if (walk_data->after_dom_children_before_stmts) (*walk_data->after_dom_children_before_stmts) (walk_data, bb); /* Statement walk after walking dominator children. */ if (walk_data->after_dom_children_walk_stmts) { if (walk_data->walk_stmts_backward) for (bsi = bsi_last (bb); !bsi_end_p (bsi); bsi_prev (&bsi)) (*walk_data->after_dom_children_walk_stmts) (walk_data, bb, bsi); else for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) (*walk_data->after_dom_children_walk_stmts) (walk_data, bb, bsi); } /* Callback for operations to execute after we have walked the dominator children and after we have walked statements. */ if (walk_data->after_dom_children_after_stmts) (*walk_data->after_dom_children_after_stmts) (walk_data, bb); if (walk_data->initialize_block_local_data) { /* And save the block data so that we can re-use it. */ VARRAY_PUSH_GENERIC_PTR (walk_data->free_block_data, bd); /* And finally pop the record off the block local data stack. */ VARRAY_POP (walk_data->block_data_stack); } } void init_walk_dominator_tree (struct dom_walk_data *walk_data) { if (walk_data->initialize_block_local_data) { VARRAY_GENERIC_PTR_INIT (walk_data->free_block_data, 2, "freelist "); VARRAY_GENERIC_PTR_INIT (walk_data->block_data_stack, 2, "block_data"); } else { walk_data->free_block_data = NULL; walk_data->block_data_stack = NULL; } } void fini_walk_dominator_tree (struct dom_walk_data *walk_data) { if (walk_data->initialize_block_local_data) { while (VARRAY_ACTIVE_SIZE (walk_data->free_block_data) > 0) { free (VARRAY_TOP_GENERIC_PTR (walk_data->free_block_data)); VARRAY_POP (walk_data->free_block_data); } } } /* Tail call optimization on trees. Copyright (C) 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* The file implements the tail recursion elimination. It is also used to analyze the tail calls in general, passing the results to the rtl level where they are used for sibcall optimization. In addition to the standard tail recursion elimination, we handle the most trivial cases of making the call tail recursive by creating accumulators. For example the following function int sum (int n) { if (n > 0) return n + sum (n - 1); else return 0; } is transformed into int sum (int n) { int acc = 0; while (n > 0) acc += n--; return acc; } To do this, we maintain two accumulators (a_acc and m_acc) that indicate when we reach the return x statement, we should return a_acc + x * m_acc instead. They are initially initialized to 0 and 1, respectively, so the semantics of the function is obviously preserved. If we are guaranteed that the value of the accumulator never change, we omit the accumulator. There are three cases how the function may exit. The first one is handled in adjust_return_value, the other two in adjust_accumulator_values (the second case is actually a special case of the third one and we present it separately just for clarity): 1) Just return x, where x is not in any of the remaining special shapes. We rewrite this to a gimple equivalent of return m_acc * x + a_acc. 2) return f (...), where f is the current function, is rewritten in a classical tail-recursion elimination way, into assignment of arguments and jump to the start of the function. Values of the accumulators are unchanged. 3) return a + m * f(...), where a and m do not depend on call to f. To preserve the semantics described before we want this to be rewritten in such a way that we finally return a_acc + (a + m * f(...)) * m_acc = (a_acc + a * m_acc) + (m * m_acc) * f(...). I.e. we increase a_acc by a * m_acc, multiply m_acc by m and eliminate the tail call to f. Special cases when the value is just added or just multiplied are obtained by setting a = 0 or m = 1. TODO -- it is possible to do similar tricks for other operations. */ /* A structure that describes the tailcall. */ struct tailcall { /* The block in that the call occur. */ basic_block call_block; /* The iterator pointing to the call statement. */ block_stmt_iterator call_bsi; /* True if it is a call to the current function. */ bool tail_recursion; /* The return value of the caller is mult * f + add, where f is the return value of the call. */ tree mult, add; /* Next tailcall in the chain. */ struct tailcall *next; }; /* The variables holding the value of multiplicative and additive accumulator. */ static tree m_acc, a_acc; static bool suitable_for_tail_opt_p (void); static bool optimize_tail_call (struct tailcall *, bool); static void eliminate_tail_call (struct tailcall *); static void find_tail_calls (basic_block, struct tailcall **); /* Returns false when the function is not suitable for tail call optimization from some reason (e.g. if it takes variable number of arguments). */ static bool suitable_for_tail_opt_p (void) { int i; if (current_function_stdarg) return false; /* No local variable should be call-clobbered. We ignore any kind of memory tag, as these are not real variables. */ for (i = 0; i < (int) VARRAY_ACTIVE_SIZE (referenced_vars); i++) { tree var = VARRAY_TREE (referenced_vars, i); if (decl_function_context (var) == current_function_decl && !TREE_STATIC (var) && var_ann (var)->mem_tag_kind == NOT_A_TAG && is_call_clobbered (var)) return false; } return true; } /* Returns false when the function is not suitable for tail call optimization from some reason (e.g. if it takes variable number of arguments). This test must pass in addition to suitable_for_tail_opt_p in order to make tail call discovery happen. */ static bool suitable_for_tail_call_opt_p (void) { /* alloca (until we have stack slot life analysis) inhibits sibling call optimizations, but not tail recursion. */ if (current_function_calls_alloca) return false; /* If we are using sjlj exceptions, we may need to add a call to _Unwind_SjLj_Unregister at exit of the function. Which means that we cannot do any sibcall transformations. */ if (USING_SJLJ_EXCEPTIONS && current_function_has_exception_handlers ()) return false; /* Any function that calls setjmp might have longjmp called from any called function. ??? We really should represent this properly in the CFG so that this needn't be special cased. */ if (current_function_calls_setjmp) return false; return true; } /* Checks whether the expression EXPR in stmt AT is independent of the statement pointed by BSI (in a sense that we already know EXPR's value at BSI). We use the fact that we are only called from the chain of basic blocks that have only single successor. Returns the expression containing the value of EXPR at BSI. */ static tree independent_of_stmt_p (tree expr, tree at, block_stmt_iterator bsi) { basic_block bb, call_bb, at_bb; edge e; if (is_gimple_min_invariant (expr)) return expr; if (TREE_CODE (expr) != SSA_NAME) return NULL_TREE; /* Mark the blocks in the chain leading to the end. */ at_bb = bb_for_stmt (at); call_bb = bb_for_stmt (bsi_stmt (bsi)); for (bb = call_bb; bb != at_bb; bb = bb->succ->dest) bb->aux = &bb->aux; bb->aux = &bb->aux; while (1) { at = SSA_NAME_DEF_STMT (expr); bb = bb_for_stmt (at); /* The default definition or defined before the chain. */ if (!bb || !bb->aux) break; if (bb == call_bb) { for (; !bsi_end_p (bsi); bsi_next (&bsi)) if (bsi_stmt (bsi) == at) break; if (!bsi_end_p (bsi)) expr = NULL_TREE; break; } if (TREE_CODE (at) != PHI_NODE) { expr = NULL_TREE; break; } for (e = bb->pred; e; e = e->pred_next) if (e->src->aux) break; if (!e) abort (); expr = PHI_ARG_DEF_FROM_EDGE (at, e); } /* Unmark the blocks. */ for (bb = call_bb; bb != at_bb; bb = bb->succ->dest) bb->aux = NULL; bb->aux = NULL; return expr; } /* Simulates the effect of an assignment of ASS in STMT on the return value of the tail recursive CALL passed in ASS_VAR. M and A are the multiplicative and the additive factor for the real return value. */ static bool process_assignment (tree ass, tree stmt, block_stmt_iterator call, tree *m, tree *a, tree *ass_var) { tree op0, op1, non_ass_var; tree dest = TREE_OPERAND (ass, 0); tree src = TREE_OPERAND (ass, 1); enum tree_code code = TREE_CODE (src); tree src_var = src; /* See if this is a simple copy operation of an SSA name to the function result. In that case we may have a simple tail call. Ignore type conversions that can never produce extra code between the function call and the function return. */ STRIP_NOPS (src_var); if (TREE_CODE (src_var) == SSA_NAME) { if (src_var != *ass_var) return false; *ass_var = dest; return true; } if (TREE_CODE_CLASS (code) != '2') return false; /* We only handle the code like x = call (); y = m * x; z = y + a; return z; TODO -- Extend it for cases where the linear transformation of the output is expressed in a more complicated way. */ op0 = TREE_OPERAND (src, 0); op1 = TREE_OPERAND (src, 1); if (op0 == *ass_var && (non_ass_var = independent_of_stmt_p (op1, stmt, call))) ; else if (op1 == *ass_var && (non_ass_var = independent_of_stmt_p (op0, stmt, call))) ; else return false; switch (code) { case PLUS_EXPR: /* There should be no previous addition. TODO -- it should be fairly straightforward to lift this restriction -- just allow storing more complicated expressions in *A, and gimplify it in adjust_accumulator_values. */ if (*a) return false; *a = non_ass_var; *ass_var = dest; return true; case MULT_EXPR: /* Similar remark applies here. Handling multiplication after addition is just slightly more complicated -- we need to multiply both *A and *M. */ if (*a || *m) return false; *m = non_ass_var; *ass_var = dest; return true; /* TODO -- Handle other codes (NEGATE_EXPR, MINUS_EXPR). */ default: return false; } } /* Propagate VAR through phis on edge E. */ static tree propagate_through_phis (tree var, edge e) { basic_block dest = e->dest; tree phi; for (phi = phi_nodes (dest); phi; phi = PHI_CHAIN (phi)) if (PHI_ARG_DEF_FROM_EDGE (phi, e) == var) return PHI_RESULT (phi); return var; } /* Finds tailcalls falling into basic block BB. The list of found tailcalls is added to the start of RET. */ static void find_tail_calls (basic_block bb, struct tailcall **ret) { tree ass_var, ret_var, stmt, func, param, args, call = NULL_TREE; block_stmt_iterator bsi, absi; bool tail_recursion; struct tailcall *nw; edge e; tree m, a; basic_block abb; stmt_ann_t ann; if (bb->succ->succ_next) return; for (bsi = bsi_last (bb); !bsi_end_p (bsi); bsi_prev (&bsi)) { stmt = bsi_stmt (bsi); /* Ignore labels. */ if (TREE_CODE (stmt) == LABEL_EXPR) continue; get_stmt_operands (stmt); /* Check for a call. */ if (TREE_CODE (stmt) == MODIFY_EXPR) { ass_var = TREE_OPERAND (stmt, 0); call = TREE_OPERAND (stmt, 1); } else { ass_var = NULL_TREE; call = stmt; } if (TREE_CODE (call) == CALL_EXPR) break; /* If the statement has virtual operands, fail. */ ann = stmt_ann (stmt); if (NUM_V_MAY_DEFS (V_MAY_DEF_OPS (ann)) || NUM_V_MUST_DEFS (V_MUST_DEF_OPS (ann)) || NUM_VUSES (VUSE_OPS (ann))) return; } if (bsi_end_p (bsi)) { /* Recurse to the predecessors. */ for (e = bb->pred; e; e = e->pred_next) find_tail_calls (e->src, ret); return; } /* We found the call, check whether it is suitable. */ tail_recursion = false; func = get_callee_fndecl (call); if (func == current_function_decl) { for (param = DECL_ARGUMENTS (func), args = TREE_OPERAND (call, 1); param && args; param = TREE_CHAIN (param), args = TREE_CHAIN (args)) { tree arg = TREE_VALUE (args); if (param != arg /* Make sure there are no problems with copying. Note we must have a copyable type and the two arguments must have reasonably equivalent types. The latter requirement could be relaxed if we emitted a suitable type conversion statement. */ && (!is_gimple_reg_type (TREE_TYPE (param)) || !lang_hooks.types_compatible_p (TREE_TYPE (param), TREE_TYPE (arg)))) break; } if (!args && !param) tail_recursion = true; } /* Now check the statements after the call. None of them has virtual operands, so they may only depend on the call through its return value. The return value should also be dependent on each of them, since we are running after dce. */ m = NULL_TREE; a = NULL_TREE; abb = bb; absi = bsi; while (1) { bsi_next (&absi); while (bsi_end_p (absi)) { ass_var = propagate_through_phis (ass_var, abb->succ); abb = abb->succ->dest; absi = bsi_start (abb); } stmt = bsi_stmt (absi); if (TREE_CODE (stmt) == LABEL_EXPR) continue; if (TREE_CODE (stmt) == RETURN_EXPR) break; if (TREE_CODE (stmt) != MODIFY_EXPR) return; if (!process_assignment (stmt, stmt, bsi, &m, &a, &ass_var)) return; } /* See if this is a tail call we can handle. */ ret_var = TREE_OPERAND (stmt, 0); if (ret_var && TREE_CODE (ret_var) == MODIFY_EXPR) { tree ret_op = TREE_OPERAND (ret_var, 1); STRIP_NOPS (ret_op); if (!tail_recursion && TREE_CODE (ret_op) != SSA_NAME) return; if (!process_assignment (ret_var, stmt, bsi, &m, &a, &ass_var)) return; ret_var = TREE_OPERAND (ret_var, 0); } /* We may proceed if there either is no return value, or the return value is identical to the call's return. */ if (ret_var && (ret_var != ass_var)) return; /* If this is not a tail recursive call, we cannot handle addends or multiplicands. */ if (!tail_recursion && (m || a)) return; nw = xmalloc (sizeof (struct tailcall)); nw->call_block = bb; nw->call_bsi = bsi; nw->tail_recursion = tail_recursion; nw->mult = m; nw->add = a; nw->next = *ret; *ret = nw; } /* Adjust the accumulator values according to A and M after BSI, and update the phi nodes on edge BACK. */ static void adjust_accumulator_values (block_stmt_iterator bsi, tree m, tree a, edge back) { tree stmt, var, phi, tmp; tree ret_type = TREE_TYPE (DECL_RESULT (current_function_decl)); tree a_acc_arg = a_acc, m_acc_arg = m_acc; if (a) { if (m_acc) { if (integer_onep (a)) var = m_acc; else { stmt = build (MODIFY_EXPR, ret_type, NULL_TREE, build (MULT_EXPR, ret_type, m_acc, a)); tmp = create_tmp_var (ret_type, "acc_tmp"); add_referenced_tmp_var (tmp); var = make_ssa_name (tmp, stmt); TREE_OPERAND (stmt, 0) = var; bsi_insert_after (&bsi, stmt, BSI_NEW_STMT); } } else var = a; stmt = build (MODIFY_EXPR, ret_type, NULL_TREE, build (PLUS_EXPR, ret_type, a_acc, var)); var = make_ssa_name (SSA_NAME_VAR (a_acc), stmt); TREE_OPERAND (stmt, 0) = var; bsi_insert_after (&bsi, stmt, BSI_NEW_STMT); a_acc_arg = var; } if (m) { stmt = build (MODIFY_EXPR, ret_type, NULL_TREE, build (MULT_EXPR, ret_type, m_acc, m)); var = make_ssa_name (SSA_NAME_VAR (m_acc), stmt); TREE_OPERAND (stmt, 0) = var; bsi_insert_after (&bsi, stmt, BSI_NEW_STMT); m_acc_arg = var; } if (a_acc) { for (phi = phi_nodes (back->dest); phi; phi = PHI_CHAIN (phi)) if (PHI_RESULT (phi) == a_acc) break; add_phi_arg (&phi, a_acc_arg, back); } if (m_acc) { for (phi = phi_nodes (back->dest); phi; phi = PHI_CHAIN (phi)) if (PHI_RESULT (phi) == m_acc) break; add_phi_arg (&phi, m_acc_arg, back); } } /* Adjust value of the return at the end of BB according to M and A accumulators. */ static void adjust_return_value (basic_block bb, tree m, tree a) { tree ret_stmt = last_stmt (bb), ret_var, var, stmt, tmp; tree ret_type = TREE_TYPE (DECL_RESULT (current_function_decl)); block_stmt_iterator bsi = bsi_last (bb); if (TREE_CODE (ret_stmt) != RETURN_EXPR) abort (); ret_var = TREE_OPERAND (ret_stmt, 0); if (!ret_var) return; if (TREE_CODE (ret_var) == MODIFY_EXPR) { ret_var->common.ann = (tree_ann_t) stmt_ann (ret_stmt); bsi_replace (&bsi, ret_var, true); SSA_NAME_DEF_STMT (TREE_OPERAND (ret_var, 0)) = ret_var; ret_var = TREE_OPERAND (ret_var, 0); ret_stmt = build1 (RETURN_EXPR, TREE_TYPE (ret_stmt), ret_var); bsi_insert_after (&bsi, ret_stmt, BSI_NEW_STMT); } if (m) { stmt = build (MODIFY_EXPR, ret_type, NULL_TREE, build (MULT_EXPR, ret_type, m_acc, ret_var)); tmp = create_tmp_var (ret_type, "acc_tmp"); add_referenced_tmp_var (tmp); var = make_ssa_name (tmp, stmt); TREE_OPERAND (stmt, 0) = var; bsi_insert_before (&bsi, stmt, BSI_NEW_STMT); } else var = ret_var; if (a) { stmt = build (MODIFY_EXPR, ret_type, NULL_TREE, build (PLUS_EXPR, ret_type, a_acc, var)); tmp = create_tmp_var (ret_type, "acc_tmp"); add_referenced_tmp_var (tmp); var = make_ssa_name (tmp, stmt); TREE_OPERAND (stmt, 0) = var; bsi_insert_before (&bsi, stmt, BSI_NEW_STMT); } TREE_OPERAND (ret_stmt, 0) = var; modify_stmt (ret_stmt); } /* Eliminates tail call described by T. TMP_VARS is a list of temporary variables used to copy the function arguments. */ static void eliminate_tail_call (struct tailcall *t) { tree param, stmt, args, rslt, call; basic_block bb, first; edge e; tree phi; stmt_ann_t ann; v_may_def_optype v_may_defs; unsigned i; stmt = bsi_stmt (t->call_bsi); get_stmt_operands (stmt); ann = stmt_ann (stmt); bb = t->call_block; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Eliminated tail recursion in bb %d : ", bb->index); print_generic_stmt (dump_file, stmt, TDF_SLIM); fprintf (dump_file, "\n"); } if (TREE_CODE (stmt) == MODIFY_EXPR) stmt = TREE_OPERAND (stmt, 1); first = ENTRY_BLOCK_PTR->succ->dest; /* Replace the call by a jump to the start of function. */ e = redirect_edge_and_branch (t->call_block->succ, first); if (!e) abort (); PENDING_STMT (e) = NULL_TREE; /* Add phi node entries for arguments. Not every PHI node corresponds to a function argument (there may be PHI nodes for virtual definitions of the eliminated calls), so we search for a PHI corresponding to each argument rather than searching for which argument a PHI node corresponds to. */ for (param = DECL_ARGUMENTS (current_function_decl), args = TREE_OPERAND (stmt, 1); param; param = TREE_CHAIN (param), args = TREE_CHAIN (args)) { for (phi = phi_nodes (first); phi; phi = PHI_CHAIN (phi)) if (param == SSA_NAME_VAR (PHI_RESULT (phi))) break; /* The phi node indeed does not have to be there, in case the operand is invariant in the function. */ if (!phi) continue; add_phi_arg (&phi, TREE_VALUE (args), e); } /* Add phi nodes for the call clobbered variables. */ v_may_defs = V_MAY_DEF_OPS (ann); for (i = 0; i < NUM_V_MAY_DEFS (v_may_defs); i++) { param = SSA_NAME_VAR (V_MAY_DEF_RESULT (v_may_defs, i)); for (phi = phi_nodes (first); phi; phi = PHI_CHAIN (phi)) if (param == SSA_NAME_VAR (PHI_RESULT (phi))) break; if (!phi) { tree name = var_ann (param)->default_def; tree new_name = make_ssa_name (param, SSA_NAME_DEF_STMT (name)); var_ann (param)->default_def = new_name; phi = create_phi_node (name, first); SSA_NAME_DEF_STMT (name) = phi; add_phi_arg (&phi, new_name, ENTRY_BLOCK_PTR->succ); /* For all calls the same set of variables should be clobbered. This means that there always should be the appropriate phi node except for the first time we eliminate the call. */ if (first->pred->pred_next->pred_next) abort (); } add_phi_arg (&phi, V_MAY_DEF_OP (v_may_defs, i), e); } /* Update the values of accumulators. */ adjust_accumulator_values (t->call_bsi, t->mult, t->add, e); call = bsi_stmt (t->call_bsi); if (TREE_CODE (call) == MODIFY_EXPR) { rslt = TREE_OPERAND (call, 0); /* Result of the call will no longer be defined. So adjust the SSA_NAME_DEF_STMT accordingly. */ SSA_NAME_DEF_STMT (rslt) = build_empty_stmt (); } bsi_remove (&t->call_bsi); } /* Optimizes the tailcall described by T. If OPT_TAILCALLS is true, also mark the tailcalls for the sibcall optimization. */ static bool optimize_tail_call (struct tailcall *t, bool opt_tailcalls) { if (t->tail_recursion) { eliminate_tail_call (t); return true; } if (opt_tailcalls) { tree stmt = bsi_stmt (t->call_bsi); if (TREE_CODE (stmt) == MODIFY_EXPR) stmt = TREE_OPERAND (stmt, 1); if (TREE_CODE (stmt) != CALL_EXPR) abort (); CALL_EXPR_TAILCALL (stmt) = 1; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Found tail call "); print_generic_expr (dump_file, stmt, dump_flags); fprintf (dump_file, " in bb %i\n", t->call_block->index); } } return false; } /* Optimizes tail calls in the function, turning the tail recursion into iteration. */ static void tree_optimize_tail_calls_1 (bool opt_tailcalls) { edge e; bool phis_constructed = false; struct tailcall *tailcalls = NULL, *act, *next; bool changed = false; basic_block first = ENTRY_BLOCK_PTR->succ->dest; tree stmt, param, ret_type, tmp, phi; if (!suitable_for_tail_opt_p ()) return; if (opt_tailcalls) opt_tailcalls = suitable_for_tail_call_opt_p (); for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next) { /* Only traverse the normal exits, i.e. those that end with return statement. */ stmt = last_stmt (e->src); if (stmt && TREE_CODE (stmt) == RETURN_EXPR) find_tail_calls (e->src, &tailcalls); } /* Construct the phi nodes and accumulators if necessary. */ a_acc = m_acc = NULL_TREE; for (act = tailcalls; act; act = act->next) { if (!act->tail_recursion) continue; if (!phis_constructed) { /* Ensure that there is only one predecessor of the block. */ if (first->pred->pred_next) first = split_edge (ENTRY_BLOCK_PTR->succ); /* Copy the args if needed. */ for (param = DECL_ARGUMENTS (current_function_decl); param; param = TREE_CHAIN (param)) if (var_ann (param) /* Also parameters that are only defined but never used need not be copied. */ && (var_ann (param)->default_def && TREE_CODE (var_ann (param)->default_def) == SSA_NAME)) { tree name = var_ann (param)->default_def; tree new_name = make_ssa_name (param, SSA_NAME_DEF_STMT (name)); tree phi; var_ann (param)->default_def = new_name; phi = create_phi_node (name, first); SSA_NAME_DEF_STMT (name) = phi; add_phi_arg (&phi, new_name, first->pred); } phis_constructed = true; } if (act->add && !a_acc) { ret_type = TREE_TYPE (DECL_RESULT (current_function_decl)); tmp = create_tmp_var (ret_type, "add_acc"); add_referenced_tmp_var (tmp); phi = create_phi_node (tmp, first); add_phi_arg (&phi, fold_convert (ret_type, integer_zero_node), first->pred); a_acc = PHI_RESULT (phi); } if (act->mult && !m_acc) { ret_type = TREE_TYPE (DECL_RESULT (current_function_decl)); tmp = create_tmp_var (ret_type, "mult_acc"); add_referenced_tmp_var (tmp); phi = create_phi_node (tmp, first); add_phi_arg (&phi, fold_convert (ret_type, integer_one_node), first->pred); m_acc = PHI_RESULT (phi); } } for (; tailcalls; tailcalls = next) { next = tailcalls->next; changed |= optimize_tail_call (tailcalls, opt_tailcalls); free (tailcalls); } if (a_acc || m_acc) { /* Modify the remaining return statements. */ for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next) { stmt = last_stmt (e->src); if (stmt && TREE_CODE (stmt) == RETURN_EXPR) adjust_return_value (e->src, m_acc, a_acc); } } if (changed) { free_dominance_info (CDI_DOMINATORS); cleanup_tree_cfg (); } } static void execute_tail_recursion (void) { tree_optimize_tail_calls_1 (false); } static bool gate_tail_calls (void) { return flag_optimize_sibling_calls != 0; } static void execute_tail_calls (void) { tree_optimize_tail_calls_1 (true); } struct tree_opt_pass pass_tail_recursion = { "tailr", /* name */ NULL, /* gate */ execute_tail_recursion, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ 0, /* tv_id */ PROP_cfg | PROP_ssa, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func | TODO_verify_ssa /* todo_flags_finish */ }; struct tree_opt_pass pass_tail_calls = { "tailc", /* name */ gate_tail_calls, /* gate */ execute_tail_calls, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ 0, /* tv_id */ PROP_cfg | PROP_ssa, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func | TODO_verify_ssa /* todo_flags_finish */ }; /* Tree lowering pass. Lowers GIMPLE into unstructured form. Copyright (C) 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ struct lower_data { /* Block the current statement belongs to. */ tree block; /* A TREE_LIST of label and return statements to be moved to the end of the function. */ tree return_statements; }; static void lower_stmt (tree_stmt_iterator *, struct lower_data *); static void lower_bind_expr (tree_stmt_iterator *, struct lower_data *); static void lower_cond_expr (tree_stmt_iterator *, struct lower_data *); static void lower_return_expr (tree_stmt_iterator *, struct lower_data *); static bool expand_var_p (tree); /* Lowers the body of current_function_decl. */ static void lower_function_body (void) { struct lower_data data; tree *body_p = &DECL_SAVED_TREE (current_function_decl); tree bind = *body_p; tree_stmt_iterator i; tree t, x; if (TREE_CODE (bind) != BIND_EXPR) abort (); data.block = DECL_INITIAL (current_function_decl); BLOCK_SUBBLOCKS (data.block) = NULL_TREE; BLOCK_CHAIN (data.block) = NULL_TREE; TREE_ASM_WRITTEN (data.block) = 1; data.return_statements = NULL_TREE; *body_p = alloc_stmt_list (); i = tsi_start (*body_p); tsi_link_after (&i, bind, TSI_NEW_STMT); lower_bind_expr (&i, &data); i = tsi_last (*body_p); /* If the function falls off the end, we need a null return statement. If we've already got one in the return_statements list, we don't need to do anything special. Otherwise build one by hand. */ if (block_may_fallthru (*body_p) && (data.return_statements == NULL || TREE_OPERAND (TREE_VALUE (data.return_statements), 0) != NULL)) { x = build (RETURN_EXPR, void_type_node, NULL); SET_EXPR_LOCATION (x, cfun->function_end_locus); tsi_link_after (&i, x, TSI_CONTINUE_LINKING); } /* If we lowered any return statements, emit the representative at the end of the function. */ for (t = data.return_statements ; t ; t = TREE_CHAIN (t)) { x = build (LABEL_EXPR, void_type_node, TREE_PURPOSE (t)); tsi_link_after (&i, x, TSI_CONTINUE_LINKING); /* Remove the line number from the representative return statement. It now fills in for many such returns. Failure to remove this will result in incorrect results for coverage analysis. */ x = TREE_VALUE (t); #ifdef USE_MAPPED_LOCATION SET_EXPR_LOCATION (x, UNKNOWN_LOCATION); #else SET_EXPR_LOCUS (x, NULL); #endif tsi_link_after (&i, x, TSI_CONTINUE_LINKING); } if (data.block != DECL_INITIAL (current_function_decl)) abort (); BLOCK_SUBBLOCKS (data.block) = blocks_nreverse (BLOCK_SUBBLOCKS (data.block)); clear_block_marks (data.block); } struct tree_opt_pass pass_lower_cf = { "lower", /* name */ NULL, /* gate */ lower_function_body, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ 0, /* tv_id */ PROP_gimple_any, /* properties_required */ PROP_gimple_lcf, /* properties_provided */ PROP_gimple_any, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func /* todo_flags_finish */ }; /* Lowers the EXPR. Unlike gimplification the statements are not relowered when they are changed -- if this has to be done, the lowering routine must do it explicitly. DATA is passed through the recursion. */ void lower_stmt_body (tree expr, struct lower_data *data) { tree_stmt_iterator tsi; for (tsi = tsi_start (expr); !tsi_end_p (tsi); ) lower_stmt (&tsi, data); } /* Lowers statement TSI. DATA is passed through the recursion. */ static void lower_stmt (tree_stmt_iterator *tsi, struct lower_data *data) { tree stmt = tsi_stmt (*tsi); if (EXPR_HAS_LOCATION (stmt) && data) TREE_BLOCK (stmt) = data->block; switch (TREE_CODE (stmt)) { case BIND_EXPR: lower_bind_expr (tsi, data); return; case COND_EXPR: lower_cond_expr (tsi, data); return; case RETURN_EXPR: lower_return_expr (tsi, data); return; case TRY_FINALLY_EXPR: case TRY_CATCH_EXPR: lower_stmt_body (TREE_OPERAND (stmt, 0), data); lower_stmt_body (TREE_OPERAND (stmt, 1), data); break; case CATCH_EXPR: lower_stmt_body (CATCH_BODY (stmt), data); break; case EH_FILTER_EXPR: lower_stmt_body (EH_FILTER_FAILURE (stmt), data); break; case NOP_EXPR: case ASM_EXPR: case MODIFY_EXPR: case CALL_EXPR: case GOTO_EXPR: case LABEL_EXPR: case VA_ARG_EXPR: case SWITCH_EXPR: break; default: print_node_brief (stderr, "", stmt, 0); case COMPOUND_EXPR: abort (); } tsi_next (tsi); } /* Lowers a bind_expr TSI. DATA is passed through the recursion. */ static void lower_bind_expr (tree_stmt_iterator *tsi, struct lower_data *data) { tree old_block = data->block; tree stmt = tsi_stmt (*tsi); tree new_block = BIND_EXPR_BLOCK (stmt); if (new_block) { if (new_block == old_block) { /* The outermost block of the original function may not be the outermost statement chain of the gimplified function. So we may see the outermost block just inside the function. */ if (new_block != DECL_INITIAL (current_function_decl)) abort (); new_block = NULL; } else { /* We do not expect to handle duplicate blocks. */ if (TREE_ASM_WRITTEN (new_block)) abort (); TREE_ASM_WRITTEN (new_block) = 1; /* Block tree may get clobbered by inlining. Normally this would be fixed in rest_of_decl_compilation using block notes, but since we are not going to emit them, it is up to us. */ BLOCK_CHAIN (new_block) = BLOCK_SUBBLOCKS (old_block); BLOCK_SUBBLOCKS (old_block) = new_block; BLOCK_SUBBLOCKS (new_block) = NULL_TREE; BLOCK_SUPERCONTEXT (new_block) = old_block; data->block = new_block; } } record_vars (BIND_EXPR_VARS (stmt)); lower_stmt_body (BIND_EXPR_BODY (stmt), data); if (new_block) { if (data->block != new_block) abort (); BLOCK_SUBBLOCKS (new_block) = blocks_nreverse (BLOCK_SUBBLOCKS (new_block)); data->block = old_block; } /* The BIND_EXPR no longer carries any useful information -- kill it. */ tsi_link_before (tsi, BIND_EXPR_BODY (stmt), TSI_SAME_STMT); tsi_delink (tsi); } /* Try to determine if we can fall out of the bottom of BLOCK. This guess need not be 100% accurate; simply be conservative and return true if we don't know. This is used only to avoid stupidly generating extra code. If we're wrong, we'll just delete the extra code later. */ bool block_may_fallthru (tree block) { tree stmt = expr_last (block); switch (stmt ? TREE_CODE (stmt) : ERROR_MARK) { case GOTO_EXPR: case RETURN_EXPR: case RESX_EXPR: case SWITCH_EXPR: /* Easy cases. If the last statement of the block implies control transfer, then we can't fall through. */ return false; case COND_EXPR: if (block_may_fallthru (COND_EXPR_THEN (stmt))) return true; return block_may_fallthru (COND_EXPR_ELSE (stmt)); case BIND_EXPR: return block_may_fallthru (BIND_EXPR_BODY (stmt)); case TRY_FINALLY_EXPR: return block_may_fallthru (TREE_OPERAND (stmt, 1)); case MODIFY_EXPR: if (TREE_CODE (TREE_OPERAND (stmt, 1)) == CALL_EXPR) stmt = TREE_OPERAND (stmt, 1); else return true; /* FALLTHRU */ case CALL_EXPR: /* Functions that do not return do not fall through. */ return (call_expr_flags (stmt) & ECF_NORETURN) == 0; default: return true; } } /* Lowers a cond_expr TSI. DATA is passed through the recursion. */ static void lower_cond_expr (tree_stmt_iterator *tsi, struct lower_data *data) { tree stmt = tsi_stmt (*tsi); bool then_is_goto, else_is_goto; tree then_branch, else_branch; tree then_goto, else_goto; then_branch = COND_EXPR_THEN (stmt); else_branch = COND_EXPR_ELSE (stmt); lower_stmt_body (then_branch, data); lower_stmt_body (else_branch, data); then_goto = expr_only (then_branch); then_is_goto = then_goto && simple_goto_p (then_goto); else_goto = expr_only (else_branch); else_is_goto = else_goto && simple_goto_p (else_goto); if (!then_is_goto || !else_is_goto) { tree then_label, else_label, end_label, t; then_label = NULL_TREE; else_label = NULL_TREE; end_label = NULL_TREE; /* Replace the cond_expr with explicit gotos. */ if (!then_is_goto) { t = build1 (LABEL_EXPR, void_type_node, NULL_TREE); if (TREE_SIDE_EFFECTS (then_branch)) then_label = t; else end_label = t; then_goto = build_and_jump (&LABEL_EXPR_LABEL (t)); } if (!else_is_goto) { t = build1 (LABEL_EXPR, void_type_node, NULL_TREE); if (TREE_SIDE_EFFECTS (else_branch)) else_label = t; else { /* Both THEN and ELSE can be no-ops if one or both contained an empty BIND_EXPR that was associated with the toplevel block of an inlined function. In that case remove_useless_stmts can't have cleaned things up for us; kill the whole conditional now. */ if (end_label) { tsi_delink (tsi); return; } else end_label = t; } else_goto = build_and_jump (&LABEL_EXPR_LABEL (t)); } if (then_label) { bool may_fallthru = block_may_fallthru (then_branch); tsi_link_after (tsi, then_label, TSI_CONTINUE_LINKING); tsi_link_after (tsi, then_branch, TSI_CONTINUE_LINKING); if (else_label && may_fallthru) { end_label = build1 (LABEL_EXPR, void_type_node, NULL_TREE); t = build_and_jump (&LABEL_EXPR_LABEL (end_label)); tsi_link_after (tsi, t, TSI_CONTINUE_LINKING); } } if (else_label) { tsi_link_after (tsi, else_label, TSI_CONTINUE_LINKING); tsi_link_after (tsi, else_branch, TSI_CONTINUE_LINKING); } if (end_label) tsi_link_after (tsi, end_label, TSI_CONTINUE_LINKING); } COND_EXPR_THEN (stmt) = then_goto; COND_EXPR_ELSE (stmt) = else_goto; tsi_next (tsi); } static void lower_return_expr (tree_stmt_iterator *tsi, struct lower_data *data) { tree stmt = tsi_stmt (*tsi); tree value, t, label; /* Extract the value being returned. */ value = TREE_OPERAND (stmt, 0); if (value && TREE_CODE (value) == MODIFY_EXPR) value = TREE_OPERAND (value, 1); /* Match this up with an existing return statement that's been created. */ for (t = data->return_statements; t ; t = TREE_CHAIN (t)) { tree tvalue = TREE_OPERAND (TREE_VALUE (t), 0); if (tvalue && TREE_CODE (tvalue) == MODIFY_EXPR) tvalue = TREE_OPERAND (tvalue, 1); if (value == tvalue) { label = TREE_PURPOSE (t); goto found; } } /* Not found. Create a new label and record the return statement. */ label = create_artificial_label (); data->return_statements = tree_cons (label, stmt, data->return_statements); /* Generate a goto statement and remove the return statement. */ found: t = build (GOTO_EXPR, void_type_node, label); SET_EXPR_LOCUS (t, EXPR_LOCUS (stmt)); tsi_link_before (tsi, t, TSI_SAME_STMT); tsi_delink (tsi); } /* Record the variables in VARS. */ void record_vars (tree vars) { for (; vars; vars = TREE_CHAIN (vars)) { tree var = vars; /* Nothing to do in this case. */ if (DECL_EXTERNAL (var)) continue; if (TREE_CODE (var) == FUNCTION_DECL) continue; /* Record the variable. */ cfun->unexpanded_var_list = tree_cons (NULL_TREE, var, cfun->unexpanded_var_list); } } /* Check whether to expand a variable VAR. */ static bool expand_var_p (tree var) { struct var_ann_d *ann; if (TREE_CODE (var) != VAR_DECL) return true; /* Remove all unused, unaliased temporaries. Also remove unused, unaliased local variables during highly optimizing compilations. */ ann = var_ann (var); if (ann && ! ann->may_aliases && ! ann->used && ! ann->has_hidden_use && ! TREE_ADDRESSABLE (var) && ! TREE_THIS_VOLATILE (var) && (DECL_ARTIFICIAL (var) || optimize >= 2)) return false; return true; } /* Throw away variables that are unused. */ static void remove_useless_vars (void) { tree var, *cell; for (cell = &cfun->unexpanded_var_list; *cell; ) { var = TREE_VALUE (*cell); if (!expand_var_p (var)) { *cell = TREE_CHAIN (*cell); continue; } cell = &TREE_CHAIN (*cell); } } /* Expand variables in the unexpanded_var_list. */ void expand_used_vars (void) { tree cell; cfun->unexpanded_var_list = nreverse (cfun->unexpanded_var_list); for (cell = cfun->unexpanded_var_list; cell; cell = TREE_CHAIN (cell)) expand_var (TREE_VALUE (cell)); cfun->unexpanded_var_list = NULL_TREE; } struct tree_opt_pass pass_remove_useless_vars = { "vars", /* name */ NULL, /* gate */ remove_useless_vars, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ 0, /* tv_id */ 0, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func /* todo_flags_finish */ }; /* Iterator routines for manipulating GENERIC and GIMPLE tree statements. Copyright (C) 2003, 2004 Free Software Foundation, Inc. Contributed by Andrew MacLeod This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This is a cache of STATEMENT_LIST nodes. We create and destroy them fairly often during gimplification. */ static GTY ((deletable (""))) tree stmt_list_cache; tree alloc_stmt_list (void) { tree list = stmt_list_cache; if (list) { stmt_list_cache = TREE_CHAIN (list); memset (list, 0, sizeof(struct tree_common)); TREE_SET_CODE (list, STATEMENT_LIST); } else list = make_node (STATEMENT_LIST); TREE_TYPE (list) = void_type_node; return list; } void free_stmt_list (tree t) { #ifdef ENABLE_CHECKING if (STATEMENT_LIST_HEAD (t) || STATEMENT_LIST_TAIL (t)) abort (); #endif TREE_CHAIN (t) = stmt_list_cache; stmt_list_cache = t; } /* Links a statement, or a chain of statements, before the current stmt. */ void tsi_link_before (tree_stmt_iterator *i, tree t, enum tsi_iterator_update mode) { struct tree_statement_list_node *head, *tail, *cur; /* Die on looping. */ if (t == i->container) abort (); if (TREE_CODE (t) == STATEMENT_LIST) { head = STATEMENT_LIST_HEAD (t); tail = STATEMENT_LIST_TAIL (t); STATEMENT_LIST_HEAD (t) = NULL; STATEMENT_LIST_TAIL (t) = NULL; free_stmt_list (t); /* Empty statement lists need no work. */ if (!head || !tail) { if (head != tail) abort (); return; } } else { head = ggc_alloc (sizeof (*head)); head->prev = NULL; head->next = NULL; head->stmt = t; tail = head; } TREE_SIDE_EFFECTS (i->container) = 1; cur = i->ptr; /* Link it into the list. */ if (cur) { head->prev = cur->prev; if (head->prev) head->prev->next = head; else STATEMENT_LIST_HEAD (i->container) = head; tail->next = cur; cur->prev = tail; } else { if (STATEMENT_LIST_TAIL (i->container)) abort (); STATEMENT_LIST_HEAD (i->container) = head; STATEMENT_LIST_TAIL (i->container) = tail; } /* Update the iterator, if requested. */ switch (mode) { case TSI_NEW_STMT: case TSI_CONTINUE_LINKING: case TSI_CHAIN_START: i->ptr = head; break; case TSI_CHAIN_END: i->ptr = tail; break; case TSI_SAME_STMT: if (!cur) abort (); break; } } /* Links a statement, or a chain of statements, after the current stmt. */ void tsi_link_after (tree_stmt_iterator *i, tree t, enum tsi_iterator_update mode) { struct tree_statement_list_node *head, *tail, *cur; /* Die on looping. */ if (t == i->container) abort (); if (TREE_CODE (t) == STATEMENT_LIST) { head = STATEMENT_LIST_HEAD (t); tail = STATEMENT_LIST_TAIL (t); STATEMENT_LIST_HEAD (t) = NULL; STATEMENT_LIST_TAIL (t) = NULL; free_stmt_list (t); /* Empty statement lists need no work. */ if (!head || !tail) { if (head != tail) abort (); return; } } else { head = ggc_alloc (sizeof (*head)); head->prev = NULL; head->next = NULL; head->stmt = t; tail = head; } TREE_SIDE_EFFECTS (i->container) = 1; cur = i->ptr; /* Link it into the list. */ if (cur) { tail->next = cur->next; if (tail->next) tail->next->prev = tail; else STATEMENT_LIST_TAIL (i->container) = tail; head->prev = cur; cur->next = head; } else { if (STATEMENT_LIST_TAIL (i->container)) abort (); STATEMENT_LIST_HEAD (i->container) = head; STATEMENT_LIST_TAIL (i->container) = tail; } /* Update the iterator, if requested. */ switch (mode) { case TSI_NEW_STMT: case TSI_CHAIN_START: i->ptr = head; break; case TSI_CONTINUE_LINKING: case TSI_CHAIN_END: i->ptr = tail; break; case TSI_SAME_STMT: if (!cur) abort (); break; } } /* Remove a stmt from the tree list. The iterator is updated to point to the next stmt. */ void tsi_delink (tree_stmt_iterator *i) { struct tree_statement_list_node *cur, *next, *prev; cur = i->ptr; next = cur->next; prev = cur->prev; if (prev) prev->next = next; else STATEMENT_LIST_HEAD (i->container) = next; if (next) next->prev = prev; else STATEMENT_LIST_TAIL (i->container) = prev; if (!next && !prev) TREE_SIDE_EFFECTS (i->container) = 0; i->ptr = next; } /* Move all statements in the statement list after I to a new statement list. I itself is unchanged. */ tree tsi_split_statement_list_after (const tree_stmt_iterator *i) { struct tree_statement_list_node *cur, *next; tree old_sl, new_sl; cur = i->ptr; /* How can we possibly split after the end, or before the beginning? */ if (cur == NULL) abort (); next = cur->next; old_sl = i->container; new_sl = alloc_stmt_list (); TREE_SIDE_EFFECTS (new_sl) = 1; STATEMENT_LIST_HEAD (new_sl) = next; STATEMENT_LIST_TAIL (new_sl) = STATEMENT_LIST_TAIL (old_sl); STATEMENT_LIST_TAIL (old_sl) = cur; cur->next = NULL; next->prev = NULL; return new_sl; } /* Move all statements in the statement list before I to a new statement list. I is set to the head of the new list. */ tree tsi_split_statement_list_before (tree_stmt_iterator *i) { struct tree_statement_list_node *cur, *prev; tree old_sl, new_sl; cur = i->ptr; /* How can we possibly split after the end, or before the beginning? */ if (cur == NULL) abort (); prev = cur->prev; old_sl = i->container; new_sl = alloc_stmt_list (); TREE_SIDE_EFFECTS (new_sl) = 1; i->container = new_sl; STATEMENT_LIST_HEAD (new_sl) = cur; STATEMENT_LIST_TAIL (new_sl) = STATEMENT_LIST_TAIL (old_sl); STATEMENT_LIST_TAIL (old_sl) = prev; cur->prev = NULL; prev->next = NULL; return new_sl; } /* Return the first expression in a sequence of COMPOUND_EXPRs, or in a STATEMENT_LIST. */ tree expr_first (tree expr) { if (expr == NULL_TREE) return expr; if (TREE_CODE (expr) == STATEMENT_LIST) { struct tree_statement_list_node *n = STATEMENT_LIST_HEAD (expr); return n ? n->stmt : NULL_TREE; } while (TREE_CODE (expr) == COMPOUND_EXPR) expr = TREE_OPERAND (expr, 0); return expr; } /* Return the last expression in a sequence of COMPOUND_EXPRs, or in a STATEMENT_LIST. */ tree expr_last (tree expr) { if (expr == NULL_TREE) return expr; if (TREE_CODE (expr) == STATEMENT_LIST) { struct tree_statement_list_node *n = STATEMENT_LIST_TAIL (expr); return n ? n->stmt : NULL_TREE; } while (TREE_CODE (expr) == COMPOUND_EXPR) expr = TREE_OPERAND (expr, 1); return expr; } /* If EXPR is a single statement, naked or in a STATEMENT_LIST, then return it. Otherwise return NULL. */ tree expr_only (tree expr) { if (expr == NULL_TREE) return NULL_TREE; if (TREE_CODE (expr) == STATEMENT_LIST) { struct tree_statement_list_node *n = STATEMENT_LIST_TAIL (expr); if (n && STATEMENT_LIST_HEAD (expr) == n) return n->stmt; else return NULL_TREE; } if (TREE_CODE (expr) == COMPOUND_EXPR) return NULL_TREE; return expr; } /* Type information for tree-iterator.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ /* GC roots. */ const struct ggc_root_tab gt_ggc_rd_gt_tree_iterator_h[] = { { &stmt_list_cache, 1, sizeof (stmt_list_cache), NULL, NULL }, LAST_GGC_ROOT_TAB }; /* Generic routines for manipulating PHIs Copyright (C) 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Rewriting a function into SSA form can create a huge number of PHIs many of which may be thrown away shortly after their creation if jumps were threaded through PHI nodes. While our garbage collection mechanisms will handle this situation, it is extremely wasteful to create nodes and throw them away, especially when the nodes can be reused. For PR 8361, we can significantly reduce the number of nodes allocated and thus the total amount of memory allocated by managing PHIs a little. This additionally helps reduce the amount of work done by the garbage collector. Similar results have been seen on a wider variety of tests (such as the compiler itself). Right now we maintain our free list on a per-function basis. It may or may not make sense to maintain the free list for the duration of a compilation unit. We could also use a zone allocator for these objects since they have a very well defined lifetime. If someone wants to experiment with that this is the place to try it. PHI nodes have different sizes, so we can't have a single list of all the PHI nodes as it would be too expensive to walk down that list to find a PHI of a suitable size. Instead we have an array of lists of free PHI nodes. The array is indexed by the number of PHI alternatives that PHI node can hold. Except for the last array member, which holds all remaining PHI nodes. So to find a free PHI node, we compute its index into the free PHI node array and see if there are any elements with an exact match. If so, then we are done. Otherwise, we test the next larger size up and continue until we are in the last array element. We do not actually walk members of the last array element. While it might allow us to pick up a few reusable PHI nodes, it could potentially be very expensive if the program has released a bunch of large PHI nodes, but keeps asking for even larger PHI nodes. Experiments have shown that walking the elements of the last array entry would result in finding less than .1% additional reusable PHI nodes. Note that we can never have less than two PHI argument slots. Thus, the -2 on all the calculations below. */ #define NUM_BUCKETS 10 static GTY ((deletable (""))) tree free_phinodes[NUM_BUCKETS - 2]; static unsigned long free_phinode_count; static int ideal_phi_node_len (int); static void resize_phi_node (tree *, int); #ifdef GATHER_STATISTICS unsigned int phi_nodes_reused; unsigned int phi_nodes_created; #endif /* Initialize management of PHIs. */ void init_phinodes (void) { int i; for (i = 0; i < NUM_BUCKETS - 2; i++) free_phinodes[i] = NULL; free_phinode_count = 0; } /* Finalize management of PHIs. */ void fini_phinodes (void) { int i; for (i = 0; i < NUM_BUCKETS - 2; i++) free_phinodes[i] = NULL; free_phinode_count = 0; } /* Dump some simple statistics regarding the re-use of PHI nodes. */ #ifdef GATHER_STATISTICS void phinodes_print_statistics (void) { fprintf (stderr, "PHI nodes allocated: %u\n", phi_nodes_created); fprintf (stderr, "PHI nodes reused: %u\n", phi_nodes_reused); } #endif /* Given LEN, the original number of requested PHI arguments, return a new, "ideal" length for the PHI node. The "ideal" length rounds the total size of the PHI node up to the next power of two bytes. Rounding up will not result in wasting any memory since the size request will be rounded up by the GC system anyway. [ Note this is not entirely true since the original length might have fit on one of the special GC pages. ] By rounding up, we may avoid the need to reallocate the PHI node later if we increase the number of arguments for the PHI. */ static int ideal_phi_node_len (int len) { size_t size, new_size; int log2, new_len; /* We do not support allocations of less than two PHI argument slots. */ if (len < 2) len = 2; /* Compute the number of bytes of the original request. */ size = sizeof (struct tree_phi_node) + (len - 1) * sizeof (struct phi_arg_d); /* Round it up to the next power of two. */ log2 = ceil_log2 (size); new_size = 1 << log2; /* Now compute and return the number of PHI argument slots given an ideal size allocation. */ new_len = len + (new_size - size) / sizeof (struct phi_arg_d); return new_len; } /* Return a PHI node for variable VAR defined in statement STMT. STMT may be an empty statement for artificial references (e.g., default definitions created when a variable is used without a preceding definition). */ tree make_phi_node (tree var, int len) { tree phi; int size; int bucket = NUM_BUCKETS - 2; len = ideal_phi_node_len (len); size = sizeof (struct tree_phi_node) + (len - 1) * sizeof (struct phi_arg_d); if (free_phinode_count) for (bucket = len - 2; bucket < NUM_BUCKETS - 2; bucket++) if (free_phinodes[bucket]) break; /* If our free list has an element, then use it. */ if (bucket < NUM_BUCKETS - 2 && PHI_ARG_CAPACITY (free_phinodes[bucket]) >= len) { free_phinode_count--; phi = free_phinodes[bucket]; free_phinodes[bucket] = PHI_CHAIN (free_phinodes[bucket]); #ifdef GATHER_STATISTICS phi_nodes_reused++; #endif } else { phi = ggc_alloc (size); #ifdef GATHER_STATISTICS phi_nodes_created++; tree_node_counts[(int) phi_kind]++; tree_node_sizes[(int) phi_kind] += size; #endif } memset (phi, 0, size); TREE_SET_CODE (phi, PHI_NODE); PHI_ARG_CAPACITY (phi) = len; if (TREE_CODE (var) == SSA_NAME) SET_PHI_RESULT (phi, var); else SET_PHI_RESULT (phi, make_ssa_name (var, phi)); return phi; } /* We no longer need PHI, release it so that it may be reused. */ void release_phi_node (tree phi) { int bucket; int len = PHI_ARG_CAPACITY (phi); bucket = len > NUM_BUCKETS - 1 ? NUM_BUCKETS - 1 : len; bucket -= 2; PHI_CHAIN (phi) = free_phinodes[bucket]; free_phinodes[bucket] = phi; free_phinode_count++; } /* Resize an existing PHI node. The only way is up. Return the possibly relocated phi. */ static void resize_phi_node (tree *phi, int len) { int size, old_size; tree new_phi; int i, old_len, bucket = NUM_BUCKETS - 2; #ifdef ENABLE_CHECKING if (len < PHI_ARG_CAPACITY (*phi)) abort (); #endif /* Note that OLD_SIZE is guaranteed to be smaller than SIZE. */ old_size = (sizeof (struct tree_phi_node) + (PHI_ARG_CAPACITY (*phi) - 1) * sizeof (struct phi_arg_d)); size = sizeof (struct tree_phi_node) + (len - 1) * sizeof (struct phi_arg_d); if (free_phinode_count) for (bucket = len - 2; bucket < NUM_BUCKETS - 2; bucket++) if (free_phinodes[bucket]) break; /* If our free list has an element, then use it. */ if (bucket < NUM_BUCKETS - 2 && PHI_ARG_CAPACITY (free_phinodes[bucket]) >= len) { free_phinode_count--; new_phi = free_phinodes[bucket]; free_phinodes[bucket] = PHI_CHAIN (free_phinodes[bucket]); #ifdef GATHER_STATISTICS phi_nodes_reused++; #endif } else { new_phi = ggc_alloc (size); #ifdef GATHER_STATISTICS phi_nodes_created++; tree_node_counts[(int) phi_kind]++; tree_node_sizes[(int) phi_kind] += size; #endif } memcpy (new_phi, *phi, old_size); old_len = PHI_ARG_CAPACITY (new_phi); PHI_ARG_CAPACITY (new_phi) = len; for (i = old_len; i < len; i++) { SET_PHI_ARG_DEF (new_phi, i, NULL_TREE); PHI_ARG_EDGE (new_phi, i) = NULL; PHI_ARG_NONZERO (new_phi, i) = false; } *phi = new_phi; } /* Create a new PHI node for variable VAR at basic block BB. */ tree create_phi_node (tree var, basic_block bb) { tree phi; phi = make_phi_node (var, bb_ann (bb)->num_preds); /* This is a new phi node, so note that is has not yet been rewritten. */ PHI_REWRITTEN (phi) = 0; /* Add the new PHI node to the list of PHI nodes for block BB. */ PHI_CHAIN (phi) = phi_nodes (bb); bb_ann (bb)->phi_nodes = phi; /* Associate BB to the PHI node. */ set_bb_for_stmt (phi, bb); return phi; } /* Add a new argument to PHI node PHI. DEF is the incoming reaching definition and E is the edge through which DEF reaches PHI. The new argument is added at the end of the argument list. If PHI has reached its maximum capacity, add a few slots. In this case, PHI points to the reallocated phi node when we return. */ void add_phi_arg (tree *phi, tree def, edge e) { int i = PHI_NUM_ARGS (*phi); if (i >= PHI_ARG_CAPACITY (*phi)) { tree old_phi = *phi; /* Resize the phi. Unfortunately, this may also relocate it. */ resize_phi_node (phi, ideal_phi_node_len (i + 4)); /* The result of the phi is defined by this phi node. */ SSA_NAME_DEF_STMT (PHI_RESULT (*phi)) = *phi; /* If the PHI was relocated, update the PHI chains appropriately and release the old PHI node. */ if (*phi != old_phi) { release_phi_node (old_phi); /* Update the list head if replacing the first listed phi. */ if (phi_nodes (e->dest) == old_phi) bb_ann (e->dest)->phi_nodes = *phi; else { /* Traverse the list looking for the phi node to chain to. */ tree p; for (p = phi_nodes (e->dest); p && PHI_CHAIN (p) != old_phi; p = PHI_CHAIN (p)) ; if (!p) abort (); PHI_CHAIN (p) = *phi; } } } /* Copy propagation needs to know what object occur in abnormal PHI nodes. This is a convenient place to record such information. */ if (e->flags & EDGE_ABNORMAL) { SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def) = 1; SSA_NAME_OCCURS_IN_ABNORMAL_PHI (PHI_RESULT (*phi)) = 1; } SET_PHI_ARG_DEF (*phi, i, def); PHI_ARG_EDGE (*phi, i) = e; PHI_ARG_NONZERO (*phi, i) = false; PHI_NUM_ARGS (*phi)++; } /* Remove a PHI argument from PHI. BLOCK is the predecessor block where the PHI argument is coming from. */ void remove_phi_arg (tree phi, basic_block block) { int i, num_elem = PHI_NUM_ARGS (phi); for (i = 0; i < num_elem; i++) { basic_block src_bb; src_bb = PHI_ARG_EDGE (phi, i)->src; if (src_bb == block) { remove_phi_arg_num (phi, i); return; } } } /* Remove the Ith argument from PHI's argument list. This routine assumes ordering of alternatives in the vector is not important and implements removal by swapping the last alternative with the alternative we want to delete, then shrinking the vector. */ void remove_phi_arg_num (tree phi, int i) { int num_elem = PHI_NUM_ARGS (phi); /* If we are not at the last element, switch the last element with the element we want to delete. */ if (i != num_elem - 1) { SET_PHI_ARG_DEF (phi, i, PHI_ARG_DEF (phi, num_elem - 1)); PHI_ARG_EDGE (phi, i) = PHI_ARG_EDGE (phi, num_elem - 1); PHI_ARG_NONZERO (phi, i) = PHI_ARG_NONZERO (phi, num_elem - 1); } /* Shrink the vector and return. */ SET_PHI_ARG_DEF (phi, num_elem - 1, NULL_TREE); PHI_ARG_EDGE (phi, num_elem - 1) = NULL; PHI_ARG_NONZERO (phi, num_elem - 1) = false; PHI_NUM_ARGS (phi)--; /* If we removed the last PHI argument, then go ahead and remove the PHI node. */ if (PHI_NUM_ARGS (phi) == 0) remove_phi_node (phi, NULL, bb_for_stmt (phi)); } /* Remove PHI node PHI from basic block BB. If PREV is non-NULL, it is used as the node immediately before PHI in the linked list. */ void remove_phi_node (tree phi, tree prev, basic_block bb) { if (prev) { /* Rewire the list if we are given a PREV pointer. */ PHI_CHAIN (prev) = PHI_CHAIN (phi); /* If we are deleting the PHI node, then we should release the SSA_NAME node so that it can be reused. */ release_ssa_name (PHI_RESULT (phi)); release_phi_node (phi); } else if (phi == phi_nodes (bb)) { /* Update the list head if removing the first element. */ bb_ann (bb)->phi_nodes = PHI_CHAIN (phi); /* If we are deleting the PHI node, then we should release the SSA_NAME node so that it can be reused. */ release_ssa_name (PHI_RESULT (phi)); release_phi_node (phi); } else { /* Traverse the list looking for the node to remove. */ tree prev, t; prev = NULL_TREE; for (t = phi_nodes (bb); t && t != phi; t = PHI_CHAIN (t)) prev = t; if (t) remove_phi_node (t, prev, bb); } } /* Remove all the PHI nodes for variables in the VARS bitmap. */ void remove_all_phi_nodes_for (bitmap vars) { basic_block bb; FOR_EACH_BB (bb) { /* Build a new PHI list for BB without variables in VARS. */ tree phi, new_phi_list, last_phi, next; last_phi = new_phi_list = NULL_TREE; for (phi = phi_nodes (bb), next = NULL; phi; phi = next) { tree var = SSA_NAME_VAR (PHI_RESULT (phi)); next = PHI_CHAIN (phi); /* Only add PHI nodes for variables not in VARS. */ if (!bitmap_bit_p (vars, var_ann (var)->uid)) { /* If we're not removing this PHI node, then it must have been rewritten by a previous call into the SSA rewriter. Note that fact in PHI_REWRITTEN. */ PHI_REWRITTEN (phi) = 1; if (new_phi_list == NULL_TREE) new_phi_list = last_phi = phi; else { PHI_CHAIN (last_phi) = phi; last_phi = phi; } } else { /* If we are deleting the PHI node, then we should release the SSA_NAME node so that it can be reused. */ release_ssa_name (PHI_RESULT (phi)); release_phi_node (phi); } } /* Make sure the last node in the new list has no successors. */ if (last_phi) PHI_CHAIN (last_phi) = NULL_TREE; bb_ann (bb)->phi_nodes = new_phi_list; #if defined ENABLE_CHECKING for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi)) { tree var = SSA_NAME_VAR (PHI_RESULT (phi)); if (bitmap_bit_p (vars, var_ann (var)->uid)) abort (); } #endif } } /* Type information for tree-phinodes.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ /* GC roots. */ const struct ggc_root_tab gt_ggc_rd_gt_tree_phinodes_h[] = { { &free_phinodes, 1, sizeof (free_phinodes), NULL, NULL }, LAST_GGC_ROOT_TAB }; /* Generic routines for manipulating SSA_NAME expressions Copyright (C) 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Rewriting a function into SSA form can create a huge number of SSA_NAMEs, many of which may be thrown away shortly after their creation if jumps were threaded through PHI nodes. While our garbage collection mechanisms will handle this situation, it is extremely wasteful to create nodes and throw them away, especially when the nodes can be reused. For PR 8361, we can significantly reduce the number of nodes allocated and thus the total amount of memory allocated by managing SSA_NAMEs a little. This additionally helps reduce the amount of work done by the garbage collector. Similar results have been seen on a wider variety of tests (such as the compiler itself). Right now we maintain our free list on a per-function basis. It may or may not make sense to maintain the free list for the duration of a compilation unit. External code should rely solely upon HIGHEST_SSA_VERSION and the externally defined functions. External code should not know about the details of the free list management. External code should also not assume the version number on nodes is monotonically increasing. We reuse the version number when we reuse an SSA_NAME expression. This helps keep arrays and bitmaps more compact. We could also use a zone allocator for these objects since they have a very well defined lifetime. If someone wants to experiment with that this is the place to try it. */ /* Array of all SSA_NAMEs used in the function. */ varray_type ssa_names; /* Free list of SSA_NAMEs. This list is wiped at the end of each function after we leave SSA form. */ static GTY (()) tree free_ssanames; /* Version numbers with special meanings. We start allocating new version numbers after the special ones. */ #define UNUSED_NAME_VERSION 0 #ifdef GATHER_STATISTICS unsigned int ssa_name_nodes_reused; unsigned int ssa_name_nodes_created; #endif /* Initialize management of SSA_NAMEs. */ void init_ssanames (void) { VARRAY_TREE_INIT (ssa_names, 50, "ssa_names table"); /* Version 0 is special, so reserve the first slot in the table. Though currently unused, we may use version 0 in alias analysis as part of the heuristics used to group aliases when the alias sets are too large. */ VARRAY_PUSH_TREE (ssa_names, NULL_TREE); free_ssanames = NULL; } /* Finalize management of SSA_NAMEs. */ void fini_ssanames (void) { free_ssanames = NULL; } /* Dump some simple statistics regarding the re-use of SSA_NAME nodes. */ #ifdef GATHER_STATISTICS void ssanames_print_statistics (void) { fprintf (stderr, "SSA_NAME nodes allocated: %u\n", ssa_name_nodes_created); fprintf (stderr, "SSA_NAME nodes reused: %u\n", ssa_name_nodes_reused); } #endif /* Return an SSA_NAME node for variable VAR defined in statement STMT. STMT may be an empty statement for artificial references (e.g., default definitions created when a variable is used without a preceding definition). */ tree make_ssa_name (tree var, tree stmt) { tree t; #if defined ENABLE_CHECKING if ((!DECL_P (var) && TREE_CODE (var) != INDIRECT_REF) || (!IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (TREE_CODE (stmt))) && TREE_CODE (stmt) != PHI_NODE)) abort (); #endif /* If our free list has an element, then use it. Also reuse the SSA version number of the element on the free list which helps keep sbitmaps and arrays sized HIGHEST_SSA_VERSION smaller. */ if (free_ssanames) { unsigned int save_version; t = free_ssanames; free_ssanames = TREE_CHAIN (free_ssanames); #ifdef GATHER_STATISTICS ssa_name_nodes_reused++; #endif /* Clear the node so that it looks just like one we would have received from make_node. */ save_version = SSA_NAME_VERSION (t); memset (t, 0, tree_size (t)); TREE_SET_CODE (t, SSA_NAME); SSA_NAME_VERSION (t) = save_version; } else { t = make_node (SSA_NAME); SSA_NAME_VERSION (t) = num_ssa_names; VARRAY_PUSH_TREE (ssa_names, t); #ifdef GATHER_STATISTICS ssa_name_nodes_created++; #endif } TREE_TYPE (t) = TREE_TYPE (var); SSA_NAME_VAR (t) = var; SSA_NAME_DEF_STMT (t) = stmt; SSA_NAME_PTR_INFO (t) = NULL; return t; } /* We no longer need the SSA_NAME expression VAR, release it so that it may be reused. Note it is assumed that no calls to make_ssa_name will be made until all uses of the ssa name are released and that the only use of the SSA_NAME expression is to check its SSA_NAME_VAR. All other fields must be assumed clobbered. */ void release_ssa_name (tree var) { /* release_ssa_name can be called multiple times on a single SSA_NAME. However, it should only end up on our free list one time. We keep a status bit in the SSA_NAME node itself to indicate it has been put on the free list. Note that once on the freelist you can not reference the SSA_NAME's defining statement. */ if (! SSA_NAME_IN_FREE_LIST (var)) { SSA_NAME_IN_FREE_LIST (var) = 1; TREE_CHAIN (var) = free_ssanames; free_ssanames = var; } } /* Creates a duplicate of a ssa name NAME defined in statement STMT. */ tree duplicate_ssa_name (tree name, tree stmt) { tree new_name = make_ssa_name (SSA_NAME_VAR (name), stmt); struct ptr_info_def *old_ptr_info = SSA_NAME_PTR_INFO (name); struct ptr_info_def *new_ptr_info; if (!old_ptr_info) return new_name; new_ptr_info = ggc_alloc (sizeof (struct ptr_info_def)); *new_ptr_info = *old_ptr_info; if (old_ptr_info->pt_vars) { new_ptr_info->pt_vars = BITMAP_GGC_ALLOC (); bitmap_copy (new_ptr_info->pt_vars, old_ptr_info->pt_vars); } SSA_NAME_PTR_INFO (new_name) = new_ptr_info; return new_name; } /* Type information for tree-ssanames.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_tree_ssanames_h[] = { { &free_ssanames, 1, sizeof (free_ssanames), >_ggc_mx_tree_node, >_pch_nx_tree_node }, LAST_GGC_ROOT_TAB }; /* Scalar Replacement of Aggregates (SRA) converts some structure references into scalar references, exposing them to the scalar optimizers. Copyright (C) 2003, 2004 Free Software Foundation, Inc. Contributed by Diego Novillo This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* These RTL headers are needed for basic-block.h. */ /* expr.h is needed for MOVE_RATIO. */ /* This object of this pass is to replace a non-addressable aggregate with a set of independent variables. Most of the time, all of these variables will be scalars. But a secondary objective is to break up larger aggregates into smaller aggregates. In the process we may find that some bits of the larger aggregate can be deleted as unreferenced. This substitution is done globally. More localized substitutions would be the purvey of a load-store motion pass. The optimization proceeds in phases: (1) Identify variables that have types that are candidates for decomposition. (2) Scan the function looking for the ways these variables are used. In particular we're interested in the number of times a variable (or member) is needed as a complete unit, and the number of times a variable (or member) is copied. (3) Based on the usage profile, instantiate substitution variables. (4) Scan the function making replacements. */ /* The set of aggregate variables that are candidates for scalarization. */ static bitmap sra_candidates; /* Set of scalarizable PARM_DECLs that need copy-in operations at the beginning of the function. */ static bitmap needs_copy_in; /* Sets of bit pairs that cache type decomposition and instantiation. */ static bitmap sra_type_decomp_cache; static bitmap sra_type_inst_cache; /* One of these structures is created for each candidate aggregate and each (accessed) member of such an aggregate. */ struct sra_elt { /* A tree of the elements. Used when we want to traverse everything. */ struct sra_elt *parent; struct sra_elt *children; struct sra_elt *sibling; /* If this element is a root, then this is the VAR_DECL. If this is a sub-element, this is some token used to identify the reference. In the case of COMPONENT_REF, this is the FIELD_DECL. In the case of an ARRAY_REF, this is the (constant) index. In the case of a complex number, this is a zero or one. */ tree element; /* The type of the element. */ tree type; /* A VAR_DECL, for any sub-element we've decided to replace. */ tree replacement; /* The number of times the element is referenced as a whole. I.e. given "a.b.c", this would be incremented for C, but not for A or B. */ unsigned int n_uses; /* The number of times the element is copied to or from another scalarizable element. */ unsigned int n_copies; /* True if TYPE is scalar. */ bool is_scalar; /* True if we saw something about this element that prevents scalarization, such as non-constant indexing. */ bool cannot_scalarize; /* True if we've decided that structure-to-structure assignment should happen via memcpy and not per-element. */ bool use_block_copy; /* A flag for use with/after random access traversals. */ bool visited; }; /* Random access to the child of a parent is performed by hashing. This prevents quadratic behaviour, and allows SRA to function reasonably on larger records. */ static htab_t sra_map; /* All structures are allocated out of the following obstack. */ static struct obstack sra_obstack; /* Debugging functions. */ static void dump_sra_elt_name (FILE *, struct sra_elt *); extern void debug_sra_elt_name (struct sra_elt *); /* Return true if DECL is an SRA candidate. */ static bool is_sra_candidate_decl (tree decl) { return DECL_P (decl) && bitmap_bit_p (sra_candidates, var_ann (decl)->uid); } /* Return true if TYPE is a scalar type. */ static bool is_sra_scalar_type (tree type) { enum tree_code code = TREE_CODE (type); return (code == INTEGER_TYPE || code == REAL_TYPE || code == VECTOR_TYPE || code == ENUMERAL_TYPE || code == BOOLEAN_TYPE || code == CHAR_TYPE || code == POINTER_TYPE || code == OFFSET_TYPE || code == REFERENCE_TYPE); } /* Return true if TYPE can be decomposed into a set of independent variables. Note that this doesn't imply that all elements of TYPE can be instantiated, just that if we decide to break up the type into separate pieces that it can be done. */ static bool type_can_be_decomposed_p (tree type) { unsigned int cache = TYPE_UID (TYPE_MAIN_VARIANT (type)) * 2; tree t; /* Avoid searching the same type twice. */ if (bitmap_bit_p (sra_type_decomp_cache, cache+0)) return true; if (bitmap_bit_p (sra_type_decomp_cache, cache+1)) return false; /* The type must have a definite non-zero size. */ if (TYPE_SIZE (type) == NULL || integer_zerop (TYPE_SIZE (type))) goto fail; /* The type must be a non-union aggregate. */ switch (TREE_CODE (type)) { case RECORD_TYPE: { bool saw_one_field = false; for (t = TYPE_FIELDS (type); t ; t = TREE_CHAIN (t)) if (TREE_CODE (t) == FIELD_DECL) { /* Reject incorrectly represented bit fields. */ if (DECL_BIT_FIELD (t) && (tree_low_cst (DECL_SIZE (t), 1) != TYPE_PRECISION (TREE_TYPE (t)))) goto fail; saw_one_field = true; } /* Record types must have at least one field. */ if (!saw_one_field) goto fail; } break; case ARRAY_TYPE: /* Array types must have a fixed lower and upper bound. */ t = TYPE_DOMAIN (type); if (t == NULL) goto fail; if (TYPE_MIN_VALUE (t) == NULL || !TREE_CONSTANT (TYPE_MIN_VALUE (t))) goto fail; if (TYPE_MAX_VALUE (t) == NULL || !TREE_CONSTANT (TYPE_MAX_VALUE (t))) goto fail; break; case COMPLEX_TYPE: break; default: goto fail; } bitmap_set_bit (sra_type_decomp_cache, cache+0); return true; fail: bitmap_set_bit (sra_type_decomp_cache, cache+1); return false; } /* Return true if DECL can be decomposed into a set of independent (though not necessarily scalar) variables. */ static bool decl_can_be_decomposed_p (tree var) { /* Early out for scalars. */ if (is_sra_scalar_type (TREE_TYPE (var))) return false; /* The variable must not be aliased. */ if (!is_gimple_non_addressable (var)) { if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Cannot scalarize variable "); print_generic_expr (dump_file, var, dump_flags); fprintf (dump_file, " because it must live in memory\n"); } return false; } /* The variable must not be volatile. */ if (TREE_THIS_VOLATILE (var)) { if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Cannot scalarize variable "); print_generic_expr (dump_file, var, dump_flags); fprintf (dump_file, " because it is declared volatile\n"); } return false; } /* We must be able to decompose the variable's type. */ if (!type_can_be_decomposed_p (TREE_TYPE (var))) { if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Cannot scalarize variable "); print_generic_expr (dump_file, var, dump_flags); fprintf (dump_file, " because its type cannot be decomposed\n"); } return false; } return true; } /* Return true if TYPE can be *completely* decomposed into scalars. */ static bool type_can_instantiate_all_elements (tree type) { if (is_sra_scalar_type (type)) return true; if (!type_can_be_decomposed_p (type)) return false; switch (TREE_CODE (type)) { case RECORD_TYPE: { unsigned int cache = TYPE_UID (TYPE_MAIN_VARIANT (type)) * 2; tree f; if (bitmap_bit_p (sra_type_inst_cache, cache+0)) return true; if (bitmap_bit_p (sra_type_inst_cache, cache+1)) return false; for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f)) if (TREE_CODE (f) == FIELD_DECL) { if (!type_can_instantiate_all_elements (TREE_TYPE (f))) { bitmap_set_bit (sra_type_inst_cache, cache+1); return false; } } bitmap_set_bit (sra_type_inst_cache, cache+0); return true; } case ARRAY_TYPE: return type_can_instantiate_all_elements (TREE_TYPE (type)); case COMPLEX_TYPE: return true; default: abort (); } } /* Test whether ELT or some sub-element cannot be scalarized. */ static bool can_completely_scalarize_p (struct sra_elt *elt) { struct sra_elt *c; if (elt->cannot_scalarize) return false; for (c = elt->children; c ; c = c->sibling) if (!can_completely_scalarize_p (c)) return false; return true; } /* A simplified tree hashing algorithm that only handles the types of trees we expect to find in sra_elt->element. */ static hashval_t sra_hash_tree (tree t) { switch (TREE_CODE (t)) { case VAR_DECL: case PARM_DECL: case RESULT_DECL: case FIELD_DECL: return DECL_UID (t); case INTEGER_CST: return TREE_INT_CST_LOW (t) ^ TREE_INT_CST_HIGH (t); default: abort (); } } /* Hash function for type SRA_PAIR. */ static hashval_t sra_elt_hash (const void *x) { const struct sra_elt *e = x; const struct sra_elt *p; hashval_t h; h = sra_hash_tree (e->element); /* Take into account everything back up the chain. Given that chain lengths are rarely very long, this should be acceptable. If we truely identify this as a performance problem, it should work to hash the pointer value "e->parent". */ for (p = e->parent; p ; p = p->parent) h = (h * 65521) ^ sra_hash_tree (p->element); return h; } /* Equality function for type SRA_PAIR. */ static int sra_elt_eq (const void *x, const void *y) { const struct sra_elt *a = x; const struct sra_elt *b = y; if (a->parent != b->parent) return false; /* All the field/decl stuff is unique. */ if (a->element == b->element) return true; /* The only thing left is integer equality. */ if (TREE_CODE (a->element) == INTEGER_CST && TREE_CODE (b->element) == INTEGER_CST) return tree_int_cst_equal (a->element, b->element); else return false; } /* Create or return the SRA_ELT structure for CHILD in PARENT. PARENT may be null, in which case CHILD must be a DECL. */ static struct sra_elt * lookup_element (struct sra_elt *parent, tree child, tree type, enum insert_option insert) { struct sra_elt dummy; struct sra_elt **slot; struct sra_elt *elt; dummy.parent = parent; dummy.element = child; slot = (struct sra_elt **) htab_find_slot (sra_map, &dummy, insert); if (!slot && insert == NO_INSERT) return NULL; elt = *slot; if (!elt && insert == INSERT) { *slot = elt = obstack_alloc (&sra_obstack, sizeof (*elt)); memset (elt, 0, sizeof (*elt)); elt->parent = parent; elt->element = child; elt->type = type; elt->is_scalar = is_sra_scalar_type (type); if (parent) { elt->sibling = parent->children; parent->children = elt; } /* If this is a parameter, then if we want to scalarize, we have one copy from the true function parameter. Count it now. */ if (TREE_CODE (child) == PARM_DECL) { elt->n_copies = 1; bitmap_set_bit (needs_copy_in, var_ann (child)->uid); } } return elt; } /* Return true if the ARRAY_REF in EXPR is a constant, in bounds access. */ static bool is_valid_const_index (tree expr) { tree dom, t, index = TREE_OPERAND (expr, 1); if (TREE_CODE (index) != INTEGER_CST) return false; /* Watch out for stupid user tricks, indexing outside the array. Careful, we're not called only on scalarizable types, so do not assume constant array bounds. We needn't do anything with such cases, since they'll be referring to objects that we should have already rejected for scalarization, so returning false is fine. */ dom = TYPE_DOMAIN (TREE_TYPE (TREE_OPERAND (expr, 0))); if (dom == NULL) return false; t = TYPE_MIN_VALUE (dom); if (!t || TREE_CODE (t) != INTEGER_CST) return false; if (tree_int_cst_lt (index, t)) return false; t = TYPE_MAX_VALUE (dom); if (!t || TREE_CODE (t) != INTEGER_CST) return false; if (tree_int_cst_lt (t, index)) return false; return true; } /* Create or return the SRA_ELT structure for EXPR if the expression refers to a scalarizable variable. */ static struct sra_elt * maybe_lookup_element_for_expr (tree expr) { struct sra_elt *elt; tree child; switch (TREE_CODE (expr)) { case VAR_DECL: case PARM_DECL: case RESULT_DECL: if (is_sra_candidate_decl (expr)) return lookup_element (NULL, expr, TREE_TYPE (expr), INSERT); return NULL; case ARRAY_REF: /* We can't scalarize variable array indicies. */ if (is_valid_const_index (expr)) child = TREE_OPERAND (expr, 1); else return NULL; break; case COMPONENT_REF: /* Don't look through unions. */ if (TREE_CODE (TREE_TYPE (TREE_OPERAND (expr, 0))) != RECORD_TYPE) return NULL; child = TREE_OPERAND (expr, 1); break; case REALPART_EXPR: child = integer_zero_node; break; case IMAGPART_EXPR: child = integer_one_node; break; default: return NULL; } elt = maybe_lookup_element_for_expr (TREE_OPERAND (expr, 0)); if (elt) return lookup_element (elt, child, TREE_TYPE (expr), INSERT); return NULL; } /* Functions to walk just enough of the tree to see all scalarizable references, and categorize them. */ /* A set of callbacks for phases 2 and 4. They'll be invoked for the various kinds of references seen. In all cases, *BSI is an iterator pointing to the statement being processed. */ struct sra_walk_fns { /* Invoked when ELT is required as a unit. Note that ELT might refer to a leaf node, in which case this is a simple scalar reference. *EXPR_P points to the location of the expression. IS_OUTPUT is true if this is a left-hand-side reference. */ void (*use) (struct sra_elt *elt, tree *expr_p, block_stmt_iterator *bsi, bool is_output); /* Invoked when we have a copy between two scalarizable references. */ void (*copy) (struct sra_elt *lhs_elt, struct sra_elt *rhs_elt, block_stmt_iterator *bsi); /* Invoked when ELT is initialized from a constant. VALUE may be NULL, in which case it should be treated as an empty CONSTRUCTOR. Return false if we found a case we couldn't handle. */ bool (*init) (struct sra_elt *elt, tree value, block_stmt_iterator *bsi); /* Invoked when we have a copy between one scalarizable reference ELT and one non-scalarizable reference OTHER. IS_OUTPUT is true if ELT is on the left-hand side. */ void (*ldst) (struct sra_elt *elt, tree other, block_stmt_iterator *bsi, bool is_output); /* True during phase 2, false during phase 4. */ /* ??? This is a hack. */ bool initial_scan; }; #ifdef ENABLE_CHECKING /* Invoked via walk_tree, if *TP contains an candidate decl, return it. */ static tree sra_find_candidate_decl (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED) { tree t = *tp; enum tree_code code = TREE_CODE (t); if (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL) { *walk_subtrees = 0; if (is_sra_candidate_decl (t)) return t; } else if (TYPE_P (t)) *walk_subtrees = 0; return NULL; } #endif /* Walk most expressions looking for a scalarizable aggregate. If we find one, invoke FNS->USE. */ static void sra_walk_expr (tree *expr_p, block_stmt_iterator *bsi, bool is_output, const struct sra_walk_fns *fns) { tree expr = *expr_p; tree inner = expr; bool disable_scalarization = false; /* We're looking to collect a reference expression between EXPR and INNER, such that INNER is a scalarizable decl and all other nodes through EXPR are references that we can scalarize. If we come across something that we can't scalarize, we reset EXPR. This has the effect of making it appear that we're referring to the larger expression as a whole. */ while (1) switch (TREE_CODE (inner)) { case VAR_DECL: case PARM_DECL: case RESULT_DECL: /* If there is a scalarizable decl at the bottom, then process it. */ if (is_sra_candidate_decl (inner)) { struct sra_elt *elt = maybe_lookup_element_for_expr (expr); if (disable_scalarization) elt->cannot_scalarize = true; else fns->use (elt, expr_p, bsi, is_output); } return; case ARRAY_REF: /* Non-constant index means any member may be accessed. Prevent the expression from being scalarized. If we were to treat this as a reference to the whole array, we can wind up with a single dynamic index reference inside a loop being overridden by several constant index references during loop setup. It's possible that this could be avoided by using dynamic usage counts based on BB trip counts (based on loop analysis or profiling), but that hardly seems worth the effort. */ /* ??? Hack. Figure out how to push this into the scan routines without duplicating too much code. */ if (!is_valid_const_index (inner)) { disable_scalarization = true; goto use_all; } /* ??? Are we assured that non-constant bounds and stride will have the same value everywhere? I don't think Fortran will... */ if (TREE_OPERAND (inner, 2) || TREE_OPERAND (inner, 3)) goto use_all; inner = TREE_OPERAND (inner, 0); break; case COMPONENT_REF: /* A reference to a union member constitutes a reference to the entire union. */ if (TREE_CODE (TREE_TYPE (TREE_OPERAND (inner, 0))) != RECORD_TYPE) goto use_all; /* ??? See above re non-constant stride. */ if (TREE_OPERAND (inner, 2)) goto use_all; inner = TREE_OPERAND (inner, 0); break; case REALPART_EXPR: case IMAGPART_EXPR: inner = TREE_OPERAND (inner, 0); break; case BIT_FIELD_REF: /* A bit field reference (access to *multiple* fields simultaneously) is not currently scalarized. Consider this an access to the complete outer element, to which walk_tree will bring us next. */ goto use_all; case ARRAY_RANGE_REF: /* Similarly, an subrange reference is used to modify indexing. Which means that the canonical element names that we have won't work. */ goto use_all; case VIEW_CONVERT_EXPR: case NOP_EXPR: /* Similarly, a view/nop explicitly wants to look at an object in a type other than the one we've scalarized. */ goto use_all; use_all: expr_p = &TREE_OPERAND (inner, 0); inner = expr = *expr_p; break; default: #ifdef ENABLE_CHECKING /* Validate that we're not missing any references. */ if (walk_tree (&inner, sra_find_candidate_decl, NULL, NULL)) abort (); #endif return; } } /* Walk a TREE_LIST of values looking for scalarizable aggregates. If we find one, invoke FNS->USE. */ static void sra_walk_tree_list (tree list, block_stmt_iterator *bsi, bool is_output, const struct sra_walk_fns *fns) { tree op; for (op = list; op ; op = TREE_CHAIN (op)) sra_walk_expr (&TREE_VALUE (op), bsi, is_output, fns); } /* Walk the arguments of a CALL_EXPR looking for scalarizable aggregates. If we find one, invoke FNS->USE. */ static void sra_walk_call_expr (tree expr, block_stmt_iterator *bsi, const struct sra_walk_fns *fns) { sra_walk_tree_list (TREE_OPERAND (expr, 1), bsi, false, fns); } /* Walk the inputs and outputs of an ASM_EXPR looking for scalarizable aggregates. If we find one, invoke FNS->USE. */ static void sra_walk_asm_expr (tree expr, block_stmt_iterator *bsi, const struct sra_walk_fns *fns) { sra_walk_tree_list (ASM_INPUTS (expr), bsi, false, fns); sra_walk_tree_list (ASM_OUTPUTS (expr), bsi, true, fns); } /* Walk a MODIFY_EXPR and categorize the assignment appropriately. */ static void sra_walk_modify_expr (tree expr, block_stmt_iterator *bsi, const struct sra_walk_fns *fns) { struct sra_elt *lhs_elt, *rhs_elt; tree lhs, rhs; lhs = TREE_OPERAND (expr, 0); rhs = TREE_OPERAND (expr, 1); lhs_elt = maybe_lookup_element_for_expr (lhs); rhs_elt = maybe_lookup_element_for_expr (rhs); /* If both sides are scalarizable, this is a COPY operation. */ if (lhs_elt && rhs_elt) { fns->copy (lhs_elt, rhs_elt, bsi); return; } if (lhs_elt) { /* If this is an assignment from a constant, or constructor, then we have access to all of the elements individually. Invoke INIT. */ if ((TREE_CODE (rhs) == COMPLEX_EXPR || TREE_CODE (rhs) == COMPLEX_CST || TREE_CODE (rhs) == CONSTRUCTOR) && fns->init (lhs_elt, rhs, bsi)) ; /* If this is an assignment from read-only memory, treat this as if we'd been passed the constructor directly. Invoke INIT. */ else if (TREE_CODE (rhs) == VAR_DECL && TREE_STATIC (rhs) && TREE_READONLY (rhs) && targetm.binds_local_p (rhs) && DECL_INITIAL (rhs) && fns->init (lhs_elt, DECL_INITIAL (rhs), bsi)) ; /* If this is a copy from a non-scalarizable lvalue, invoke LDST. The lvalue requirement prevents us from trying to directly scalarize the result of a function call. Which would result in trying to call the function multiple times, and other evil things. */ else if (!lhs_elt->is_scalar && is_gimple_addr_expr_arg (rhs)) fns->ldst (lhs_elt, rhs, bsi, true); /* Otherwise we're being used in some context that requires the aggregate to be seen as a whole. Invoke USE. */ else fns->use (lhs_elt, &TREE_OPERAND (expr, 0), bsi, true); } else { /* LHS_ELT being null only means that the LHS as a whole is not a scalarizable reference. There may be occurrences of scalarizable variables within, which implies a USE. */ sra_walk_expr (&TREE_OPERAND (expr, 0), bsi, true, fns); } /* Likewise for the right-hand side. The only difference here is that we don't have to handle constants, and the RHS may be a call. */ if (rhs_elt) { if (!rhs_elt->is_scalar) fns->ldst (rhs_elt, lhs, bsi, false); else fns->use (rhs_elt, &TREE_OPERAND (expr, 1), bsi, false); } else if (TREE_CODE (rhs) == CALL_EXPR) sra_walk_call_expr (rhs, bsi, fns); else sra_walk_expr (&TREE_OPERAND (expr, 1), bsi, false, fns); } /* Entry point to the walk functions. Search the entire function, invoking the callbacks in FNS on each of the references to scalarizable variables. */ static void sra_walk_function (const struct sra_walk_fns *fns) { basic_block bb; block_stmt_iterator si, ni; /* ??? Phase 4 could derive some benefit to walking the function in dominator tree order. */ FOR_EACH_BB (bb) for (si = bsi_start (bb); !bsi_end_p (si); si = ni) { tree stmt, t; stmt_ann_t ann; stmt = bsi_stmt (si); ann = stmt_ann (stmt); ni = si; bsi_next (&ni); /* If the statement has no virtual operands, then it doesn't make any structure references that we care about. */ if (NUM_V_MAY_DEFS (V_MAY_DEF_OPS (ann)) == 0 && NUM_VUSES (VUSE_OPS (ann)) == 0 && NUM_V_MUST_DEFS (V_MUST_DEF_OPS (ann)) == 0) continue; switch (TREE_CODE (stmt)) { case RETURN_EXPR: /* If we have "return " then the return value is already exposed for our pleasure. Walk it as a USE to force all the components back in place for the return. If we have an embedded assignment, then is of a type that gets returned in registers in this ABI, and we do not wish to extend their lifetimes. Treat this as a USE of the variable on the RHS of this assignment. */ t = TREE_OPERAND (stmt, 0); if (TREE_CODE (t) == MODIFY_EXPR) sra_walk_expr (&TREE_OPERAND (t, 1), &si, false, fns); else sra_walk_expr (&TREE_OPERAND (stmt, 0), &si, false, fns); break; case MODIFY_EXPR: sra_walk_modify_expr (stmt, &si, fns); break; case CALL_EXPR: sra_walk_call_expr (stmt, &si, fns); break; case ASM_EXPR: sra_walk_asm_expr (stmt, &si, fns); break; default: break; } } } /* Phase One: Scan all referenced variables in the program looking for structures that could be decomposed. */ static bool find_candidates_for_sra (void) { size_t i; bool any_set = false; for (i = 0; i < num_referenced_vars; i++) { tree var = referenced_var (i); if (decl_can_be_decomposed_p (var)) { bitmap_set_bit (sra_candidates, var_ann (var)->uid); any_set = true; } } return any_set; } /* Phase Two: Scan all references to scalarizable variables. Count the number of times they are used or copied respectively. */ /* Callbacks to fill in SRA_WALK_FNS. Everything but USE is considered a copy, because we can decompose the reference such that the sub-elements needn't be contiguous. */ static void scan_use (struct sra_elt *elt, tree *expr_p ATTRIBUTE_UNUSED, block_stmt_iterator *bsi ATTRIBUTE_UNUSED, bool is_output ATTRIBUTE_UNUSED) { elt->n_uses += 1; } static void scan_copy (struct sra_elt *lhs_elt, struct sra_elt *rhs_elt, block_stmt_iterator *bsi ATTRIBUTE_UNUSED) { lhs_elt->n_copies += 1; rhs_elt->n_copies += 1; } static bool scan_init (struct sra_elt *lhs_elt, tree rhs ATTRIBUTE_UNUSED, block_stmt_iterator *bsi ATTRIBUTE_UNUSED) { lhs_elt->n_copies += 1; return true; } static void scan_ldst (struct sra_elt *elt, tree other ATTRIBUTE_UNUSED, block_stmt_iterator *bsi ATTRIBUTE_UNUSED, bool is_output ATTRIBUTE_UNUSED) { elt->n_copies += 1; } /* Dump the values we collected during the scanning phase. */ static void scan_dump (struct sra_elt *elt) { struct sra_elt *c; dump_sra_elt_name (dump_file, elt); fprintf (dump_file, ": n_uses=%u n_copies=%u\n", elt->n_uses, elt->n_copies); for (c = elt->children; c ; c = c->sibling) scan_dump (c); } /* Entry point to phase 2. Scan the entire function, building up scalarization data structures, recording copies and uses. */ static void scan_function (void) { static const struct sra_walk_fns fns = { scan_use, scan_copy, scan_init, scan_ldst, true }; sra_walk_function (&fns); if (dump_file && (dump_flags & TDF_DETAILS)) { size_t i; fputs ("\nScan results:\n", dump_file); EXECUTE_IF_SET_IN_BITMAP (sra_candidates, 0, i, { tree var = referenced_var (i); struct sra_elt *elt = lookup_element (NULL, var, NULL, NO_INSERT); if (elt) scan_dump (elt); }); fputc ('\n', dump_file); } } /* Phase Three: Make decisions about which variables to scalarize, if any. All elements to be scalarized have replacement variables made for them. */ /* A subroutine of build_element_name. Recursively build the element name on the obstack. */ static void build_element_name_1 (struct sra_elt *elt) { tree t; char buffer[32]; if (elt->parent) { build_element_name_1 (elt->parent); obstack_1grow (&sra_obstack, '$'); if (TREE_CODE (elt->parent->type) == COMPLEX_TYPE) { if (elt->element == integer_zero_node) obstack_grow (&sra_obstack, "real", 4); else obstack_grow (&sra_obstack, "imag", 4); return; } } t = elt->element; if (TREE_CODE (t) == INTEGER_CST) { /* ??? Eh. Don't bother doing double-wide printing. */ sprintf (buffer, HOST_WIDE_INT_PRINT_DEC, TREE_INT_CST_LOW (t)); obstack_grow (&sra_obstack, buffer, strlen (buffer)); } else { tree name = DECL_NAME (t); if (name) obstack_grow (&sra_obstack, IDENTIFIER_POINTER (name), IDENTIFIER_LENGTH (name)); else { sprintf (buffer, "D%u", DECL_UID (t)); obstack_grow (&sra_obstack, buffer, strlen (buffer)); } } } /* Construct a pretty variable name for an element's replacement variable. The name is built on the obstack. */ static char * build_element_name (struct sra_elt *elt) { build_element_name_1 (elt); obstack_1grow (&sra_obstack, '\0'); return obstack_finish (&sra_obstack); } /* Instantiate an element as an independent variable. */ static void instantiate_element (struct sra_elt *elt) { struct sra_elt *base_elt; tree var, base; for (base_elt = elt; base_elt->parent; base_elt = base_elt->parent) continue; base = base_elt->element; elt->replacement = var = make_rename_temp (elt->type, "SR"); DECL_SOURCE_LOCATION (var) = DECL_SOURCE_LOCATION (base); TREE_NO_WARNING (var) = TREE_NO_WARNING (base); DECL_ARTIFICIAL (var) = DECL_ARTIFICIAL (base); if (DECL_NAME (base) && !DECL_IGNORED_P (base)) { char *pretty_name = build_element_name (elt); DECL_NAME (var) = get_identifier (pretty_name); obstack_free (&sra_obstack, pretty_name); } if (dump_file) { fputs (" ", dump_file); dump_sra_elt_name (dump_file, elt); fputs (" -> ", dump_file); print_generic_expr (dump_file, var, dump_flags); fputc ('\n', dump_file); } } /* Make one pass across an element tree deciding whether or not it's profitable to instantiate individual leaf scalars. PARENT_USES and PARENT_COPIES are the sum of the N_USES and N_COPIES fields all the way up the tree. */ static void decide_instantiation_1 (struct sra_elt *elt, unsigned int parent_uses, unsigned int parent_copies) { if (dump_file && !elt->parent) { fputs ("Initial instantiation for ", dump_file); dump_sra_elt_name (dump_file, elt); fputc ('\n', dump_file); } if (elt->cannot_scalarize) return; if (elt->is_scalar) { /* The decision is simple: instantiate if we're used more frequently than the parent needs to be seen as a complete unit. */ if (elt->n_uses + elt->n_copies + parent_copies > parent_uses) instantiate_element (elt); } else { struct sra_elt *c; unsigned int this_uses = elt->n_uses + parent_uses; unsigned int this_copies = elt->n_copies + parent_copies; for (c = elt->children; c ; c = c->sibling) decide_instantiation_1 (c, this_uses, this_copies); } } /* Compute the size and number of all instantiated elements below ELT. We will only care about this if the size of the complete structure fits in a HOST_WIDE_INT, so we don't have to worry about overflow. */ static unsigned int sum_instantiated_sizes (struct sra_elt *elt, unsigned HOST_WIDE_INT *sizep) { if (elt->replacement) { *sizep += TREE_INT_CST_LOW (TYPE_SIZE_UNIT (elt->type)); return 1; } else { struct sra_elt *c; unsigned int count = 0; for (c = elt->children; c ; c = c->sibling) count += sum_instantiated_sizes (c, sizep); return count; } } /* Instantiate fields in ELT->TYPE that are not currently present as children of ELT. */ static void instantiate_missing_elements (struct sra_elt *elt); static void instantiate_missing_elements_1 (struct sra_elt *elt, tree child, tree type) { struct sra_elt *sub = lookup_element (elt, child, type, INSERT); if (sub->is_scalar) { if (sub->replacement == NULL) instantiate_element (sub); } else instantiate_missing_elements (sub); } static void instantiate_missing_elements (struct sra_elt *elt) { tree type = elt->type; switch (TREE_CODE (type)) { case RECORD_TYPE: { tree f; for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f)) if (TREE_CODE (f) == FIELD_DECL) instantiate_missing_elements_1 (elt, f, TREE_TYPE (f)); break; } case ARRAY_TYPE: { tree i, max, subtype; i = TYPE_MIN_VALUE (TYPE_DOMAIN (type)); max = TYPE_MAX_VALUE (TYPE_DOMAIN (type)); subtype = TREE_TYPE (type); while (1) { instantiate_missing_elements_1 (elt, i, subtype); if (tree_int_cst_equal (i, max)) break; i = int_const_binop (PLUS_EXPR, i, integer_one_node, true); } break; } case COMPLEX_TYPE: type = TREE_TYPE (type); instantiate_missing_elements_1 (elt, integer_zero_node, type); instantiate_missing_elements_1 (elt, integer_one_node, type); break; default: abort (); } } /* Make one pass across an element tree deciding whether to perform block or element copies. If we decide on element copies, instantiate all elements. Return true if there are any instantiated sub-elements. */ static bool decide_block_copy (struct sra_elt *elt) { struct sra_elt *c; bool any_inst; /* If scalarization is disabled, respect it. */ if (elt->cannot_scalarize) { elt->use_block_copy = 1; if (dump_file) { fputs ("Scalarization disabled for ", dump_file); dump_sra_elt_name (dump_file, elt); fputc ('\n', dump_file); } return false; } /* Don't decide if we've no uses. */ if (elt->n_uses == 0 && elt->n_copies == 0) ; else if (!elt->is_scalar) { tree size_tree = TYPE_SIZE_UNIT (elt->type); bool use_block_copy = true; /* Don't bother trying to figure out the rest if the structure is so large we can't do easy arithmetic. This also forces block copies for variable sized structures. */ if (host_integerp (size_tree, 1)) { unsigned HOST_WIDE_INT full_size, inst_size = 0; unsigned int inst_count; full_size = tree_low_cst (size_tree, 1); /* ??? What to do here. If there are two fields, and we've only instantiated one, then instantiating the other is clearly a win. If there are a large number of fields then the size of the copy is much more of a factor. */ /* If the structure is small, and we've made copies, go ahead and instantiate, hoping that the copies will go away. */ if (full_size <= (unsigned) MOVE_RATIO * UNITS_PER_WORD && elt->n_copies > elt->n_uses) use_block_copy = false; else { inst_count = sum_instantiated_sizes (elt, &inst_size); if (inst_size * 4 >= full_size * 3) use_block_copy = false; } /* In order to avoid block copy, we have to be able to instantiate all elements of the type. See if this is possible. */ if (!use_block_copy && (!can_completely_scalarize_p (elt) || !type_can_instantiate_all_elements (elt->type))) use_block_copy = true; } elt->use_block_copy = use_block_copy; if (dump_file) { fprintf (dump_file, "Using %s for ", use_block_copy ? "block-copy" : "element-copy"); dump_sra_elt_name (dump_file, elt); fputc ('\n', dump_file); } if (!use_block_copy) { instantiate_missing_elements (elt); return true; } } any_inst = elt->replacement != NULL; for (c = elt->children; c ; c = c->sibling) any_inst |= decide_block_copy (c); return any_inst; } /* Entry point to phase 3. Instantiate scalar replacement variables. */ static void decide_instantiations (void) { unsigned int i; bool cleared_any; struct bitmap_head_def done_head; /* We cannot clear bits from a bitmap we're iterating over, so save up all the bits to clear until the end. */ bitmap_initialize (&done_head, 1); cleared_any = false; EXECUTE_IF_SET_IN_BITMAP (sra_candidates, 0, i, { tree var = referenced_var (i); struct sra_elt *elt = lookup_element (NULL, var, NULL, NO_INSERT); if (elt) { decide_instantiation_1 (elt, 0, 0); if (!decide_block_copy (elt)) elt = NULL; } if (!elt) { bitmap_set_bit (&done_head, i); cleared_any = true; } }); if (cleared_any) { bitmap_operation (sra_candidates, sra_candidates, &done_head, BITMAP_AND_COMPL); bitmap_operation (needs_copy_in, needs_copy_in, &done_head, BITMAP_AND_COMPL); } bitmap_clear (&done_head); if (dump_file) fputc ('\n', dump_file); } /* Phase Four: Update the function to match the replacements created. */ /* Mark all the variables in V_MAY_DEF or V_MUST_DEF operands for STMT for renaming. This becomes necessary when we modify all of a non-scalar. */ static void mark_all_v_defs (tree stmt) { v_may_def_optype v_may_defs; v_must_def_optype v_must_defs; size_t i, n; get_stmt_operands (stmt); v_may_defs = V_MAY_DEF_OPS (stmt_ann (stmt)); n = NUM_V_MAY_DEFS (v_may_defs); for (i = 0; i < n; i++) { tree sym = V_MAY_DEF_RESULT (v_may_defs, i); if (TREE_CODE (sym) == SSA_NAME) sym = SSA_NAME_VAR (sym); bitmap_set_bit (vars_to_rename, var_ann (sym)->uid); } v_must_defs = V_MUST_DEF_OPS (stmt_ann (stmt)); n = NUM_V_MUST_DEFS (v_must_defs); for (i = 0; i < n; i++) { tree sym = V_MUST_DEF_OP (v_must_defs, i); if (TREE_CODE (sym) == SSA_NAME) sym = SSA_NAME_VAR (sym); bitmap_set_bit (vars_to_rename, var_ann (sym)->uid); } } /* Build a single level component reference to ELT rooted at BASE. */ static tree generate_one_element_ref (struct sra_elt *elt, tree base) { switch (TREE_CODE (TREE_TYPE (base))) { case RECORD_TYPE: return build (COMPONENT_REF, elt->type, base, elt->element, NULL); case ARRAY_TYPE: return build (ARRAY_REF, elt->type, base, elt->element, NULL, NULL); case COMPLEX_TYPE: if (elt->element == integer_zero_node) return build (REALPART_EXPR, elt->type, base); else return build (IMAGPART_EXPR, elt->type, base); default: abort (); } } /* Build a full component reference to ELT rooted at its native variable. */ static tree generate_element_ref (struct sra_elt *elt) { if (elt->parent) return generate_one_element_ref (elt, generate_element_ref (elt->parent)); else return elt->element; } /* Generate a set of assignment statements in *LIST_P to copy all instantiated elements under ELT to or from the equivalent structure rooted at EXPR. COPY_OUT controls the direction of the copy, with true meaning to copy out of EXPR into ELT. */ static void generate_copy_inout (struct sra_elt *elt, bool copy_out, tree expr, tree *list_p) { struct sra_elt *c; tree t; if (elt->replacement) { if (copy_out) t = build (MODIFY_EXPR, void_type_node, elt->replacement, expr); else t = build (MODIFY_EXPR, void_type_node, expr, elt->replacement); append_to_statement_list (t, list_p); } else { for (c = elt->children; c ; c = c->sibling) { t = generate_one_element_ref (c, unshare_expr (expr)); generate_copy_inout (c, copy_out, t, list_p); } } } /* Generate a set of assignment statements in *LIST_P to copy all instantiated elements under SRC to their counterparts under DST. There must be a 1-1 correspondence of instantiated elements. */ static void generate_element_copy (struct sra_elt *dst, struct sra_elt *src, tree *list_p) { struct sra_elt *dc, *sc; for (dc = dst->children; dc ; dc = dc->sibling) { sc = lookup_element (src, dc->element, NULL, NO_INSERT); if (sc == NULL) abort (); generate_element_copy (dc, sc, list_p); } if (dst->replacement) { tree t; if (src->replacement == NULL) abort (); t = build (MODIFY_EXPR, void_type_node, dst->replacement, src->replacement); append_to_statement_list (t, list_p); } } /* Generate a set of assignment statements in *LIST_P to zero all instantiated elements under ELT. In addition, do not assign to elements that have been marked VISITED but do reset the visited flag; this allows easy coordination with generate_element_init. */ static void generate_element_zero (struct sra_elt *elt, tree *list_p) { struct sra_elt *c; for (c = elt->children; c ; c = c->sibling) generate_element_zero (c, list_p); if (elt->visited) elt->visited = false; else if (elt->replacement) { tree t; if (elt->is_scalar) t = fold_convert (elt->type, integer_zero_node); else /* We generated a replacement for a non-scalar? */ abort (); t = build (MODIFY_EXPR, void_type_node, elt->replacement, t); append_to_statement_list (t, list_p); } } /* Generate a set of assignment statements in *LIST_P to set all instantiated elements under ELT with the contents of the initializer INIT. In addition, mark all assigned elements VISITED; this allows easy coordination with generate_element_zero. Return false if we found a case we couldn't handle. */ static bool generate_element_init (struct sra_elt *elt, tree init, tree *list_p) { bool result = true; enum tree_code init_code; struct sra_elt *sub; tree t; /* We can be passed DECL_INITIAL of a static variable. It might have a conversion, which we strip off here. */ STRIP_USELESS_TYPE_CONVERSION (init); init_code = TREE_CODE (init); if (elt->is_scalar) { if (elt->replacement) { t = build (MODIFY_EXPR, void_type_node, elt->replacement, init); append_to_statement_list (t, list_p); elt->visited = true; } return result; } switch (init_code) { case COMPLEX_CST: case COMPLEX_EXPR: for (sub = elt->children; sub ; sub = sub->sibling) { if (sub->element == integer_zero_node) t = (init_code == COMPLEX_EXPR ? TREE_OPERAND (init, 0) : TREE_REALPART (init)); else t = (init_code == COMPLEX_EXPR ? TREE_OPERAND (init, 1) : TREE_IMAGPART (init)); result &= generate_element_init (sub, t, list_p); } break; case CONSTRUCTOR: for (t = CONSTRUCTOR_ELTS (init); t ; t = TREE_CHAIN (t)) { sub = lookup_element (elt, TREE_PURPOSE (t), NULL, NO_INSERT); if (sub == NULL) continue; result &= generate_element_init (sub, TREE_VALUE (t), list_p); } break; default: result = false; } return result; } /* Insert STMT on all the outgoing edges out of BB. Note that if BB has more than one edge, STMT will be replicated for each edge. Also, abnormal edges will be ignored. */ void insert_edge_copies (tree stmt, basic_block bb) { edge e; bool first_copy; first_copy = true; for (e = bb->succ; e; e = e->succ_next) { /* We don't need to insert copies on abnormal edges. The value of the scalar replacement is not guaranteed to be valid through an abnormal edge. */ if (!(e->flags & EDGE_ABNORMAL)) { if (first_copy) { bsi_insert_on_edge (e, stmt); first_copy = false; } else bsi_insert_on_edge (e, lhd_unsave_expr_now (stmt)); } } } /* Helper function to insert LIST before BSI, and set up line number info. */ static void sra_insert_before (block_stmt_iterator *bsi, tree list) { tree stmt = bsi_stmt (*bsi); if (EXPR_HAS_LOCATION (stmt)) annotate_all_with_locus (&list, EXPR_LOCATION (stmt)); bsi_insert_before (bsi, list, BSI_SAME_STMT); } /* Similarly, but insert after BSI. Handles insertion onto edges as well. */ static void sra_insert_after (block_stmt_iterator *bsi, tree list) { tree stmt = bsi_stmt (*bsi); if (EXPR_HAS_LOCATION (stmt)) annotate_all_with_locus (&list, EXPR_LOCATION (stmt)); if (stmt_ends_bb_p (stmt)) insert_edge_copies (list, bsi->bb); else bsi_insert_after (bsi, list, BSI_SAME_STMT); } /* Similarly, but replace the statement at BSI. */ static void sra_replace (block_stmt_iterator *bsi, tree list) { sra_insert_before (bsi, list); bsi_remove (bsi); if (bsi_end_p (*bsi)) *bsi = bsi_last (bsi->bb); else bsi_prev (bsi); } /* Scalarize a USE. To recap, this is either a simple reference to ELT, if elt is scalar, or some occurrence of ELT that requires a complete aggregate. IS_OUTPUT is true if ELT is being modified. */ static void scalarize_use (struct sra_elt *elt, tree *expr_p, block_stmt_iterator *bsi, bool is_output) { tree list = NULL, stmt = bsi_stmt (*bsi); if (elt->replacement) { /* If we have a replacement, then updating the reference is as simple as modifying the existing statement in place. */ if (is_output) mark_all_v_defs (stmt); *expr_p = elt->replacement; modify_stmt (stmt); } else { /* Otherwise we need some copies. If ELT is being read, then we want to store all (modified) sub-elements back into the structure before the reference takes place. If ELT is being written, then we want to load the changed values back into our shadow variables. */ /* ??? We don't check modified for reads, we just always write all of the values. We should be able to record the SSA number of the VOP for which the values were last read. If that number matches the SSA number of the VOP in the current statement, then we needn't emit an assignment. This would also eliminate double writes when a structure is passed as more than one argument to a function call. This optimization would be most effective if sra_walk_function processed the blocks in dominator order. */ generate_copy_inout (elt, is_output, generate_element_ref (elt), &list); if (list == NULL) return; if (is_output) { mark_all_v_defs (expr_first (list)); sra_insert_after (bsi, list); } else sra_insert_before (bsi, list); } } /* Scalarize a COPY. To recap, this is an assignment statement between two scalarizable references, LHS_ELT and RHS_ELT. */ static void scalarize_copy (struct sra_elt *lhs_elt, struct sra_elt *rhs_elt, block_stmt_iterator *bsi) { tree list, stmt; if (lhs_elt->replacement && rhs_elt->replacement) { /* If we have two scalar operands, modify the existing statement. */ stmt = bsi_stmt (*bsi); #ifdef ENABLE_CHECKING /* See the commentary in sra_walk_function concerning RETURN_EXPR, and why we should never see one here. */ if (TREE_CODE (stmt) != MODIFY_EXPR) abort (); #endif TREE_OPERAND (stmt, 0) = lhs_elt->replacement; TREE_OPERAND (stmt, 1) = rhs_elt->replacement; modify_stmt (stmt); } else if (lhs_elt->use_block_copy || rhs_elt->use_block_copy) { /* If either side requires a block copy, then sync the RHS back to the original structure, leave the original assignment statement (which will perform the block copy), then load the LHS values out of its now-updated original structure. */ /* ??? Could perform a modified pair-wise element copy. That would at least allow those elements that are instantiated in both structures to be optimized well. */ list = NULL; generate_copy_inout (rhs_elt, false, generate_element_ref (rhs_elt), &list); if (list) { mark_all_v_defs (expr_first (list)); sra_insert_before (bsi, list); } list = NULL; generate_copy_inout (lhs_elt, true, generate_element_ref (lhs_elt), &list); if (list) sra_insert_after (bsi, list); } else { /* Otherwise both sides must be fully instantiated. In which case perform pair-wise element assignments and replace the original block copy statement. */ stmt = bsi_stmt (*bsi); mark_all_v_defs (stmt); list = NULL; generate_element_copy (lhs_elt, rhs_elt, &list); if (list == NULL) abort (); sra_replace (bsi, list); } } /* Scalarize an INIT. To recap, this is an assignment to a scalarizable reference from some form of constructor: CONSTRUCTOR, COMPLEX_CST or COMPLEX_EXPR. If RHS is NULL, it should be treated as an empty CONSTRUCTOR. Return false if we didn't handle this case. */ static bool scalarize_init (struct sra_elt *lhs_elt, tree rhs, block_stmt_iterator *bsi) { bool result = true; tree list = NULL; /* Generate initialization statements for all members extant in the RHS. */ if (rhs) result = generate_element_init (lhs_elt, rhs, &list); /* CONSTRUCTOR is defined such that any member not mentioned is assigned a zero value. Initialize the rest of the instantiated elements. */ generate_element_zero (lhs_elt, &list); /* If we didn't generate anything or couldn't handle this case return. Say which it was. */ if (!result || list == NULL) return result; if (lhs_elt->use_block_copy) { /* Since LHS is not fully instantiated, we must leave the structure assignment in place. Treating this case differently from a USE exposes constants to later optimizations. */ mark_all_v_defs (expr_first (list)); sra_insert_after (bsi, list); } else { /* The LHS is fully instantiated. The list of initializations replaces the original structure assignment. */ mark_all_v_defs (bsi_stmt (*bsi)); sra_replace (bsi, list); } return true; } /* A subroutine of scalarize_ldst called via walk_tree. Set TREE_NO_TRAP on all INDIRECT_REFs. */ static tree mark_notrap (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED) { tree t = *tp; if (TREE_CODE (t) == INDIRECT_REF) { TREE_THIS_NOTRAP (t) = 1; *walk_subtrees = 0; } else if (DECL_P (t) || TYPE_P (t)) *walk_subtrees = 0; return NULL; } /* Scalarize a LDST. To recap, this is an assignment between one scalarizable reference ELT and one non-scalarizable reference OTHER. IS_OUTPUT is true if ELT is on the left-hand side. */ static void scalarize_ldst (struct sra_elt *elt, tree other, block_stmt_iterator *bsi, bool is_output) { /* Shouldn't have gotten called for a scalar. */ if (elt->replacement) abort (); if (elt->use_block_copy) { /* Since ELT is not fully instantiated, we have to leave the block copy in place. Treat this as a USE. */ scalarize_use (elt, NULL, bsi, is_output); } else { /* The interesting case is when ELT is fully instantiated. In this case we can have each element stored/loaded directly to/from the corresponding slot in OTHER. This avoids a block copy. */ tree list = NULL, stmt = bsi_stmt (*bsi); mark_all_v_defs (stmt); generate_copy_inout (elt, is_output, other, &list); if (list == NULL) abort (); /* Preserve EH semantics. */ if (stmt_ends_bb_p (stmt)) { tree_stmt_iterator tsi; tree first; /* Extract the first statement from LIST. */ tsi = tsi_start (list); first = tsi_stmt (tsi); tsi_delink (&tsi); /* Replace the old statement with this new representative. */ bsi_replace (bsi, first, true); if (!tsi_end_p (tsi)) { /* If any reference would trap, then they all would. And more to the point, the first would. Therefore none of the rest will trap since the first didn't. Indicate this by iterating over the remaining statements and set TREE_THIS_NOTRAP in all INDIRECT_REFs. */ do { walk_tree (tsi_stmt_ptr (tsi), mark_notrap, NULL, NULL); tsi_next (&tsi); } while (!tsi_end_p (tsi)); insert_edge_copies (list, bsi->bb); } } else sra_replace (bsi, list); } } /* Generate initializations for all scalarizable parameters. */ static void scalarize_parms (void) { tree list = NULL; size_t i; EXECUTE_IF_SET_IN_BITMAP (needs_copy_in, 0, i, { tree var = referenced_var (i); struct sra_elt *elt = lookup_element (NULL, var, NULL, NO_INSERT); generate_copy_inout (elt, true, var, &list); }); if (list) insert_edge_copies (list, ENTRY_BLOCK_PTR); } /* Entry point to phase 4. Update the function to match replacements. */ static void scalarize_function (void) { static const struct sra_walk_fns fns = { scalarize_use, scalarize_copy, scalarize_init, scalarize_ldst, false }; sra_walk_function (&fns); scalarize_parms (); bsi_commit_edge_inserts (NULL); } /* Debug helper function. Print ELT in a nice human-readable format. */ static void dump_sra_elt_name (FILE *f, struct sra_elt *elt) { if (elt->parent && TREE_CODE (elt->parent->type) == COMPLEX_TYPE) { fputs (elt->element == integer_zero_node ? "__real__ " : "__imag__ ", f); dump_sra_elt_name (f, elt->parent); } else { if (elt->parent) dump_sra_elt_name (f, elt->parent); if (DECL_P (elt->element)) { if (TREE_CODE (elt->element) == FIELD_DECL) fputc ('.', f); print_generic_expr (f, elt->element, dump_flags); } else fprintf (f, "[" HOST_WIDE_INT_PRINT_DEC "]", TREE_INT_CST_LOW (elt->element)); } } /* Likewise, but callable from the debugger. */ void debug_sra_elt_name (struct sra_elt *elt) { dump_sra_elt_name (stderr, elt); fputc ('\n', stderr); } /* Main entry point. */ static void tree_sra (void) { /* Initialize local variables. */ gcc_obstack_init (&sra_obstack); sra_candidates = BITMAP_XMALLOC (); needs_copy_in = BITMAP_XMALLOC (); sra_type_decomp_cache = BITMAP_XMALLOC (); sra_type_inst_cache = BITMAP_XMALLOC (); sra_map = htab_create (101, sra_elt_hash, sra_elt_eq, NULL); /* Scan. If we find anything, instantiate and scalarize. */ if (find_candidates_for_sra ()) { scan_function (); decide_instantiations (); scalarize_function (); } /* Free allocated memory. */ htab_delete (sra_map); sra_map = NULL; BITMAP_XFREE (sra_candidates); BITMAP_XFREE (needs_copy_in); BITMAP_XFREE (sra_type_decomp_cache); BITMAP_XFREE (sra_type_inst_cache); obstack_free (&sra_obstack, NULL); } static bool gate_sra (void) { return flag_tree_sra != 0; } struct tree_opt_pass pass_sra = { "sra", /* name */ gate_sra, /* gate */ tree_sra, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ TV_TREE_SRA, /* tv_id */ PROP_cfg | PROP_ssa, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func | TODO_rename_vars | TODO_ggc_collect | TODO_verify_ssa /* todo_flags_finish */ }; /* Lower complex operations to scalar operations. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Force EXP to be a gimple_val. */ static tree gimplify_val_complex (block_stmt_iterator *bsi, tree type, tree exp) { tree t, new_stmt, orig_stmt; if (is_gimple_val (exp)) return exp; t = make_rename_temp (type, NULL); new_stmt = build (MODIFY_EXPR, type, t, exp); orig_stmt = bsi_stmt (*bsi); SET_EXPR_LOCUS (new_stmt, EXPR_LOCUS (orig_stmt)); TREE_BLOCK (new_stmt) = TREE_BLOCK (orig_stmt); bsi_insert_before (bsi, new_stmt, BSI_SAME_STMT); return t; } /* Extract the real or imaginary part of a complex variable or constant. Make sure that it's a proper gimple_val and gimplify it if not. Emit any new code before BSI. */ static tree extract_component (block_stmt_iterator *bsi, tree t, bool imagpart_p) { tree ret, inner_type; inner_type = TREE_TYPE (TREE_TYPE (t)); switch (TREE_CODE (t)) { case COMPLEX_CST: ret = (imagpart_p ? TREE_IMAGPART (t) : TREE_REALPART (t)); break; case COMPLEX_EXPR: ret = TREE_OPERAND (t, imagpart_p); break; case VAR_DECL: case PARM_DECL: ret = build1 ((imagpart_p ? IMAGPART_EXPR : REALPART_EXPR), inner_type, t); break; default: abort (); } return gimplify_val_complex (bsi, inner_type, ret); } /* Build a binary operation and gimplify it. Emit code before BSI. Return the gimple_val holding the result. */ static tree do_binop (block_stmt_iterator *bsi, enum tree_code code, tree type, tree a, tree b) { tree ret; ret = fold (build (code, type, a, b)); STRIP_NOPS (ret); return gimplify_val_complex (bsi, type, ret); } /* Build a unary operation and gimplify it. Emit code before BSI. Return the gimple_val holding the result. */ static tree do_unop (block_stmt_iterator *bsi, enum tree_code code, tree type, tree a) { tree ret; ret = fold (build1 (code, type, a)); STRIP_NOPS (ret); return gimplify_val_complex (bsi, type, ret); } /* Update an assignment to a complex variable in place. */ static void update_complex_assignment (block_stmt_iterator *bsi, tree r, tree i) { tree stmt = bsi_stmt (*bsi); tree type; modify_stmt (stmt); if (TREE_CODE (stmt) == RETURN_EXPR) stmt = TREE_OPERAND (stmt, 0); type = TREE_TYPE (TREE_OPERAND (stmt, 1)); TREE_OPERAND (stmt, 1) = build (COMPLEX_EXPR, type, r, i); } /* Expand complex addition to scalars: a + b = (ar + br) + i(ai + bi) a - b = (ar - br) + i(ai + bi) */ static void expand_complex_addition (block_stmt_iterator *bsi, tree inner_type, tree ar, tree ai, tree br, tree bi, enum tree_code code) { tree rr, ri; rr = do_binop (bsi, code, inner_type, ar, br); ri = do_binop (bsi, code, inner_type, ai, bi); update_complex_assignment (bsi, rr, ri); } /* Expand complex multiplication to scalars: a * b = (ar*br - ai*bi) + i(ar*bi + br*ai) */ static void expand_complex_multiplication (block_stmt_iterator *bsi, tree inner_type, tree ar, tree ai, tree br, tree bi) { tree t1, t2, t3, t4, rr, ri; t1 = do_binop (bsi, MULT_EXPR, inner_type, ar, br); t2 = do_binop (bsi, MULT_EXPR, inner_type, ai, bi); t3 = do_binop (bsi, MULT_EXPR, inner_type, ar, bi); /* Avoid expanding redundant multiplication for the common case of squaring a complex number. */ if (ar == br && ai == bi) t4 = t3; else t4 = do_binop (bsi, MULT_EXPR, inner_type, ai, br); rr = do_binop (bsi, MINUS_EXPR, inner_type, t1, t2); ri = do_binop (bsi, PLUS_EXPR, inner_type, t3, t4); update_complex_assignment (bsi, rr, ri); } /* Expand complex division to scalars, straightforward algorithm. a / b = ((ar*br + ai*bi)/t) + i((ai*br - ar*bi)/t) t = br*br + bi*bi */ static void expand_complex_div_straight (block_stmt_iterator *bsi, tree inner_type, tree ar, tree ai, tree br, tree bi, enum tree_code code) { tree rr, ri, div, t1, t2, t3; t1 = do_binop (bsi, MULT_EXPR, inner_type, br, br); t2 = do_binop (bsi, MULT_EXPR, inner_type, bi, bi); div = do_binop (bsi, PLUS_EXPR, inner_type, t1, t2); t1 = do_binop (bsi, MULT_EXPR, inner_type, ar, br); t2 = do_binop (bsi, MULT_EXPR, inner_type, ai, bi); t3 = do_binop (bsi, PLUS_EXPR, inner_type, t1, t2); rr = do_binop (bsi, code, inner_type, t3, div); t1 = do_binop (bsi, MULT_EXPR, inner_type, ai, br); t2 = do_binop (bsi, MULT_EXPR, inner_type, ar, bi); t3 = do_binop (bsi, MINUS_EXPR, inner_type, t1, t2); ri = do_binop (bsi, code, inner_type, t3, div); update_complex_assignment (bsi, rr, ri); } /* Expand complex division to scalars, modified algorithm to minimize overflow with wide input ranges. */ static void expand_complex_div_wide (block_stmt_iterator *bsi, tree inner_type, tree ar, tree ai, tree br, tree bi, enum tree_code code) { tree rr, ri, ratio, div, t1, t2, min, max, cond; /* Examine |br| < |bi|, and branch. */ t1 = do_unop (bsi, ABS_EXPR, inner_type, br); t2 = do_unop (bsi, ABS_EXPR, inner_type, bi); cond = fold (build (LT_EXPR, boolean_type_node, t1, t2)); STRIP_NOPS (cond); if (TREE_CONSTANT (cond)) { if (integer_zerop (cond)) min = bi, max = br; else min = br, max = bi; } else { basic_block bb_cond, bb_true, bb_false, bb_join; tree l1, l2, l3; edge e; l1 = create_artificial_label (); t1 = build (GOTO_EXPR, void_type_node, l1); l2 = create_artificial_label (); t2 = build (GOTO_EXPR, void_type_node, l2); cond = build (COND_EXPR, void_type_node, cond, t1, t2); bsi_insert_before (bsi, cond, BSI_SAME_STMT); min = make_rename_temp (inner_type, NULL); max = make_rename_temp (inner_type, NULL); l3 = create_artificial_label (); /* Split the original block, and create the TRUE and FALSE blocks. */ e = split_block (bsi->bb, cond); bb_cond = e->src; bb_join = e->dest; bb_true = create_empty_bb (bb_cond); bb_false = create_empty_bb (bb_true); /* Wire the blocks together. */ e->flags = EDGE_TRUE_VALUE; redirect_edge_succ (e, bb_true); make_edge (bb_cond, bb_false, EDGE_FALSE_VALUE); make_edge (bb_true, bb_join, 0); make_edge (bb_false, bb_join, 0); /* Update dominance info. Note that bb_join's data was updated by split_block. */ if (dom_computed[CDI_DOMINATORS] >= DOM_CONS_OK) { set_immediate_dominator (CDI_DOMINATORS, bb_true, bb_cond); set_immediate_dominator (CDI_DOMINATORS, bb_false, bb_cond); } /* Compute min and max for TRUE block. */ *bsi = bsi_start (bb_true); t1 = build (LABEL_EXPR, void_type_node, l1); bsi_insert_after (bsi, t1, BSI_NEW_STMT); t1 = build (MODIFY_EXPR, inner_type, min, br); bsi_insert_after (bsi, t1, BSI_NEW_STMT); t1 = build (MODIFY_EXPR, inner_type, max, bi); bsi_insert_after (bsi, t1, BSI_NEW_STMT); /* Compute min and max for FALSE block. */ *bsi = bsi_start (bb_false); t1 = build (LABEL_EXPR, void_type_node, l2); bsi_insert_after (bsi, t1, BSI_NEW_STMT); t1 = build (MODIFY_EXPR, inner_type, min, bi); bsi_insert_after (bsi, t1, BSI_NEW_STMT); t1 = build (MODIFY_EXPR, inner_type, max, br); bsi_insert_after (bsi, t1, BSI_NEW_STMT); /* Insert the join label into the tail of the original block. */ *bsi = bsi_start (bb_join); t1 = build (LABEL_EXPR, void_type_node, l3); bsi_insert_before (bsi, t1, BSI_SAME_STMT); } /* Now we have MIN(|br|, |bi|) and MAX(|br|, |bi|). We now use the ratio min/max to scale both the dividend and divisor. */ ratio = do_binop (bsi, code, inner_type, min, max); /* Calculate the divisor: min*ratio + max. */ t1 = do_binop (bsi, MULT_EXPR, inner_type, min, ratio); div = do_binop (bsi, PLUS_EXPR, inner_type, t1, max); /* Result is now ((ar + ai*ratio)/div) + i((ai - ar*ratio)/div). */ t1 = do_binop (bsi, MULT_EXPR, inner_type, ai, ratio); t2 = do_binop (bsi, PLUS_EXPR, inner_type, ar, t1); rr = do_binop (bsi, code, inner_type, t2, div); t1 = do_binop (bsi, MULT_EXPR, inner_type, ar, ratio); t2 = do_binop (bsi, MINUS_EXPR, inner_type, ai, t1); ri = do_binop (bsi, code, inner_type, t2, div); update_complex_assignment (bsi, rr, ri); } /* Expand complex division to scalars. */ static void expand_complex_division (block_stmt_iterator *bsi, tree inner_type, tree ar, tree ai, tree br, tree bi, enum tree_code code) { switch (flag_complex_divide_method) { case 0: /* straightforward implementation of complex divide acceptable. */ expand_complex_div_straight (bsi, inner_type, ar, ai, br, bi, code); break; case 1: /* wide ranges of inputs must work for complex divide. */ expand_complex_div_wide (bsi, inner_type, ar, ai, br, bi, code); break; default: /* C99-like requirements for complex divide (not yet implemented). */ abort (); } } /* Expand complex negation to scalars: -a = (-ar) + i(-ai) */ static void expand_complex_negation (block_stmt_iterator *bsi, tree inner_type, tree ar, tree ai) { tree rr, ri; rr = do_unop (bsi, NEGATE_EXPR, inner_type, ar); ri = do_unop (bsi, NEGATE_EXPR, inner_type, ai); update_complex_assignment (bsi, rr, ri); } /* Expand complex conjugate to scalars: ~a = (ar) + i(-ai) */ static void expand_complex_conjugate (block_stmt_iterator *bsi, tree inner_type, tree ar, tree ai) { tree ri; ri = do_unop (bsi, NEGATE_EXPR, inner_type, ai); update_complex_assignment (bsi, ar, ri); } /* Expand complex comparison (EQ or NE only). */ static void expand_complex_comparison (block_stmt_iterator *bsi, tree ar, tree ai, tree br, tree bi, enum tree_code code) { tree cr, ci, cc, stmt, type; cr = do_binop (bsi, code, boolean_type_node, ar, br); ci = do_binop (bsi, code, boolean_type_node, ai, bi); cc = do_binop (bsi, (code == EQ_EXPR ? TRUTH_AND_EXPR : TRUTH_OR_EXPR), boolean_type_node, cr, ci); stmt = bsi_stmt (*bsi); modify_stmt (stmt); switch (TREE_CODE (stmt)) { case RETURN_EXPR: stmt = TREE_OPERAND (stmt, 0); /* FALLTHRU */ case MODIFY_EXPR: type = TREE_TYPE (TREE_OPERAND (stmt, 1)); TREE_OPERAND (stmt, 1) = fold_convert (type, cc); break; case COND_EXPR: TREE_OPERAND (stmt, 0) = cc; break; default: abort (); } } /* Process one statement. If we identify a complex operation, expand it. */ static void expand_complex_operations_1 (block_stmt_iterator *bsi) { tree stmt = bsi_stmt (*bsi); tree rhs, type, inner_type; tree ac, ar, ai, bc, br, bi; enum tree_code code; switch (TREE_CODE (stmt)) { case RETURN_EXPR: stmt = TREE_OPERAND (stmt, 0); if (!stmt) return; if (TREE_CODE (stmt) != MODIFY_EXPR) return; /* FALLTHRU */ case MODIFY_EXPR: rhs = TREE_OPERAND (stmt, 1); break; case COND_EXPR: rhs = TREE_OPERAND (stmt, 0); break; default: return; } type = TREE_TYPE (rhs); code = TREE_CODE (rhs); /* Initial filter for operations we handle. */ switch (code) { case PLUS_EXPR: case MINUS_EXPR: case MULT_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case RDIV_EXPR: case NEGATE_EXPR: case CONJ_EXPR: if (TREE_CODE (type) != COMPLEX_TYPE) return; inner_type = TREE_TYPE (type); break; case EQ_EXPR: case NE_EXPR: inner_type = TREE_TYPE (TREE_OPERAND (rhs, 1)); if (TREE_CODE (inner_type) != COMPLEX_TYPE) return; break; default: return; } /* Extract the components of the two complex values. Make sure and handle the common case of the same value used twice specially. */ ac = TREE_OPERAND (rhs, 0); ar = extract_component (bsi, ac, 0); ai = extract_component (bsi, ac, 1); if (TREE_CODE_CLASS (code) == '1') bc = br = bi = NULL; else { bc = TREE_OPERAND (rhs, 1); if (ac == bc) br = ar, bi = ai; else { br = extract_component (bsi, bc, 0); bi = extract_component (bsi, bc, 1); } } switch (code) { case PLUS_EXPR: case MINUS_EXPR: expand_complex_addition (bsi, inner_type, ar, ai, br, bi, code); break; case MULT_EXPR: expand_complex_multiplication (bsi, inner_type, ar, ai, br, bi); break; case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case RDIV_EXPR: expand_complex_division (bsi, inner_type, ar, ai, br, bi, code); break; case NEGATE_EXPR: expand_complex_negation (bsi, inner_type, ar, ai); break; case CONJ_EXPR: expand_complex_conjugate (bsi, inner_type, ar, ai); break; case EQ_EXPR: case NE_EXPR: expand_complex_comparison (bsi, ar, ai, br, bi, code); break; default: abort (); } } /* Main loop to process each statement. */ /* ??? Could use dominator bits to propagate from complex_expr at the same time. This might reveal more constants, particularly in cases such as (complex = complex op scalar). This may not be relevant after SRA and subsequent cleanups. Proof of this would be if we verify that the code generated by expand_complex_div_wide is simplified properly to straight-line code. */ static void expand_complex_operations (void) { int old_last_basic_block = last_basic_block; block_stmt_iterator bsi; basic_block bb; FOR_EACH_BB (bb) { if (bb->index >= old_last_basic_block) continue; for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) expand_complex_operations_1 (&bsi); } } struct tree_opt_pass pass_lower_complex = { "complex", /* name */ NULL, /* gate */ expand_complex_operations, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ 0, /* tv_id */ PROP_cfg, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_dump_func | TODO_rename_vars | TODO_ggc_collect | TODO_verify_ssa | TODO_verify_stmts | TODO_verify_flow /* todo_flags_finish */ }; /* Loop optimizations over tree-ssa. Copyright (C) 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* The loop tree currently optimized. */ struct loops *current_loops; /* Initializes the loop structures. DUMP is the file to that the details about the analysis should be dumped. */ static struct loops * tree_loop_optimizer_init (FILE *dump) { struct loops *loops = loop_optimizer_init (dump); if (!loops) return NULL; /* Creation of preheaders may create redundant phi nodes if the loop is entered by more than one edge, but the initial value of the induction variable is the same on all of them. */ kill_redundant_phi_nodes (); rewrite_into_ssa (false); bitmap_clear (vars_to_rename); return loops; } /* The loop superpass. */ static bool gate_loop (void) { return flag_tree_loop_optimize != 0; } struct tree_opt_pass pass_loop = { "loop", /* name */ gate_loop, /* gate */ NULL, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ TV_TREE_LOOP, /* tv_id */ PROP_cfg, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ TODO_ggc_collect, /* todo_flags_start */ TODO_dump_func | TODO_verify_ssa | TODO_ggc_collect /* todo_flags_finish */ }; /* Loop optimizer initialization. */ static void tree_ssa_loop_init (void) { current_loops = tree_loop_optimizer_init (dump_file); } struct tree_opt_pass pass_loop_init = { "loopinit", /* name */ NULL, /* gate */ tree_ssa_loop_init, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ 0, /* tv_id */ PROP_cfg, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0 /* todo_flags_finish */ }; /* Loop optimizer finalization. */ static void tree_ssa_loop_done (void) { if (!current_loops) return; loop_optimizer_finalize (current_loops, (dump_flags & TDF_DETAILS ? dump_file : NULL)); current_loops = NULL; cleanup_tree_cfg (); } struct tree_opt_pass pass_loop_done = { "loopdone", /* name */ NULL, /* gate */ tree_ssa_loop_done, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ 0, /* tv_id */ PROP_cfg, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0 /* todo_flags_finish */ }; /* Generated automatically by the program `genattrtab' from the machine description file `md'. */ /* Declarations for interface to insn recognizer and insn-output.c. Copyright (C) 1987, 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_RECOG_H #define GCC_RECOG_H /* Random number that should be large enough for all purposes. */ #define MAX_RECOG_ALTERNATIVES 30 #define recog_memoized(I) (INSN_CODE (I) >= 0 \ ? INSN_CODE (I) : recog_memoized_1 (I)) /* Types of operands. */ enum op_type { OP_IN, OP_OUT, OP_INOUT }; struct operand_alternative { /* Pointer to the beginning of the constraint string for this alternative, for easier access by alternative number. */ const char *constraint; /* The register class valid for this alternative (possibly NO_REGS). */ enum reg_class class; /* "Badness" of this alternative, computed from number of '?' and '!' characters in the constraint string. */ unsigned int reject; /* -1 if no matching constraint was found, or an operand number. */ int matches; /* The same information, but reversed: -1 if this operand is not matched by any other, or the operand number of the operand that matches this one. */ int matched; /* Nonzero if '&' was found in the constraint string. */ unsigned int earlyclobber:1; /* Nonzero if 'm' was found in the constraint string. */ unsigned int memory_ok:1; /* Nonzero if 'o' was found in the constraint string. */ unsigned int offmem_ok:1; /* Nonzero if 'V' was found in the constraint string. */ unsigned int nonoffmem_ok:1; /* Nonzero if '<' was found in the constraint string. */ unsigned int decmem_ok:1; /* Nonzero if '>' was found in the constraint string. */ unsigned int incmem_ok:1; /* Nonzero if 'p' was found in the constraint string. */ unsigned int is_address:1; /* Nonzero if 'X' was found in the constraint string, or if the constraint string for this alternative was empty. */ unsigned int anything_ok:1; }; extern void init_recog (void); extern void init_recog_no_volatile (void); extern int recog_memoized_1 (rtx); extern int check_asm_operands (rtx); extern int asm_operand_ok (rtx, const char *); extern int validate_change (rtx, rtx *, rtx, int); extern int insn_invalid_p (rtx); extern int apply_change_group (void); extern int num_validated_changes (void); extern void cancel_changes (int); extern int constrain_operands (int); extern int constrain_operands_cached (int); extern int memory_address_p (enum machine_mode, rtx); extern int strict_memory_address_p (enum machine_mode, rtx); extern int validate_replace_rtx_subexp (rtx, rtx, rtx, rtx *); extern int validate_replace_rtx (rtx, rtx, rtx); extern void validate_replace_rtx_group (rtx, rtx, rtx); extern void validate_replace_src_group (rtx, rtx, rtx); extern int num_changes_pending (void); #ifdef HAVE_cc0 extern int next_insn_tests_no_inequality (rtx); #endif extern int reg_fits_class_p (rtx, enum reg_class, int, enum machine_mode); extern rtx *find_single_use (rtx, rtx, rtx *); extern int general_operand (rtx, enum machine_mode); extern int address_operand (rtx, enum machine_mode); extern int register_operand (rtx, enum machine_mode); extern int pmode_register_operand (rtx, enum machine_mode); extern int scratch_operand (rtx, enum machine_mode); extern int immediate_operand (rtx, enum machine_mode); extern int const_int_operand (rtx, enum machine_mode); extern int const_double_operand (rtx, enum machine_mode); extern int nonimmediate_operand (rtx, enum machine_mode); extern int nonmemory_operand (rtx, enum machine_mode); extern int push_operand (rtx, enum machine_mode); extern int pop_operand (rtx, enum machine_mode); extern int memory_operand (rtx, enum machine_mode); extern int indirect_operand (rtx, enum machine_mode); extern int comparison_operator (rtx, enum machine_mode); extern int offsettable_memref_p (rtx); extern int offsettable_nonstrict_memref_p (rtx); extern int offsettable_address_p (int, enum machine_mode, rtx); extern int mode_dependent_address_p (rtx); extern int recog (rtx, rtx, int *); extern void add_clobbers (rtx, int); extern int added_clobbers_hard_reg_p (int); extern void insn_extract (rtx); extern void extract_insn (rtx); extern void extract_constrain_insn_cached (rtx); extern void extract_insn_cached (rtx); extern void preprocess_constraints (void); extern rtx peep2_next_insn (int); extern int peep2_regno_dead_p (int, int); extern int peep2_reg_dead_p (int, rtx); #ifdef CLEAR_HARD_REG_SET extern rtx peep2_find_free_register (int, int, const char *, enum machine_mode, HARD_REG_SET *); #endif extern void peephole2_optimize (FILE *); extern rtx peephole2_insns (rtx, rtx, int *); extern int store_data_bypass_p (rtx, rtx); extern int if_test_bypass_p (rtx, rtx); /* Nonzero means volatile operands are recognized. */ extern int volatile_ok; /* Set by constrain_operands to the number of the alternative that matched. */ extern int which_alternative; /* The following vectors hold the results from insn_extract. */ struct recog_data { /* It is very tempting to make the 5 operand related arrays into a structure and index on that. However, to be source compatible with all of the existing md file insn constraints and output templates, we need `operand' as a flat array. Without that member, making an array for the rest seems pointless. */ /* Gives value of operand N. */ rtx operand[MAX_RECOG_OPERANDS]; /* Gives location where operand N was found. */ rtx *operand_loc[MAX_RECOG_OPERANDS]; /* Gives the constraint string for operand N. */ const char *constraints[MAX_RECOG_OPERANDS]; /* Gives the mode of operand N. */ enum machine_mode operand_mode[MAX_RECOG_OPERANDS]; /* Gives the type (in, out, inout) for operand N. */ enum op_type operand_type[MAX_RECOG_OPERANDS]; /* Gives location where the Nth duplicate-appearance of an operand was found. This is something that matched MATCH_DUP. */ rtx *dup_loc[MAX_DUP_OPERANDS]; /* Gives the operand number that was duplicated in the Nth duplicate-appearance of an operand. */ char dup_num[MAX_DUP_OPERANDS]; /* ??? Note that these are `char' instead of `unsigned char' to (try to) avoid certain lossage from K&R C, wherein `unsigned char' default promotes to `unsigned int' instead of `int' as in ISO C. As of 1999, the most common places to bootstrap from K&R C are SunOS and HPUX, both of which have signed characters by default. The only other supported natives that have both K&R C and unsigned characters are ROMP and Irix 3, and neither have been seen for a while, but do continue to consider unsignedness when performing arithmetic inside a comparison. */ /* The number of operands of the insn. */ char n_operands; /* The number of MATCH_DUPs in the insn. */ char n_dups; /* The number of alternatives in the constraints for the insn. */ char n_alternatives; /* In case we are caching, hold insn data was generated for. */ rtx insn; }; extern struct recog_data recog_data; /* Contains a vector of operand_alternative structures for every operand. Set up by preprocess_constraints. */ extern struct operand_alternative recog_op_alt[MAX_RECOG_OPERANDS][MAX_RECOG_ALTERNATIVES]; /* A table defined in insn-output.c that give information about each insn-code value. */ typedef int (*insn_operand_predicate_fn) (rtx, enum machine_mode); typedef const char * (*insn_output_fn) (rtx *, rtx); typedef rtx (*insn_gen_fn) (rtx, ...); struct insn_operand_data { const insn_operand_predicate_fn predicate; const char *const constraint; ENUM_BITFIELD(machine_mode) const mode : 16; const char strict_low; const char eliminable; }; /* Legal values for insn_data.output_format. Indicate what type of data is stored in insn_data.output. */ #define INSN_OUTPUT_FORMAT_NONE 0 /* abort */ #define INSN_OUTPUT_FORMAT_SINGLE 1 /* const char * */ #define INSN_OUTPUT_FORMAT_MULTI 2 /* const char * const * */ #define INSN_OUTPUT_FORMAT_FUNCTION 3 /* const char * (*)(...) */ struct insn_data { const char *const name; #if HAVE_DESIGNATED_INITIALIZERS union { const char *single; const char *const *multi; insn_output_fn function; } output; #else struct { const char *single; const char *const *multi; insn_output_fn function; } output; #endif const insn_gen_fn genfun; const struct insn_operand_data *const operand; const char n_operands; const char n_dups; const char n_alternatives; const char output_format; }; extern const struct insn_data insn_data[]; #endif /* GCC_RECOG_H */ /* Define per-register tables for data flow info and register allocation. Copyright (C) 1987, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_REGS_H #define GCC_REGS_H #define REG_BYTES(R) mode_size[(int) GET_MODE (R)] /* When you only have the mode of a pseudo register before it has a hard register chosen for it, this reports the size of each hard register a pseudo in such a mode would get allocated to. A target may override this. */ #ifndef REGMODE_NATURAL_SIZE #define REGMODE_NATURAL_SIZE(MODE) UNITS_PER_WORD #endif #ifndef SMALL_REGISTER_CLASSES #define SMALL_REGISTER_CLASSES 0 #endif /* Maximum register number used in this function, plus one. */ extern int max_regno; /* Register information indexed by register number */ typedef struct reg_info_def { /* fields set by reg_scan */ int first_uid; /* UID of first insn to use (REG n) */ int last_uid; /* UID of last insn to use (REG n) */ int last_note_uid; /* UID of last note to use (REG n) */ /* fields set by reg_scan & flow_analysis */ int sets; /* # of times (REG n) is set */ /* fields set by flow_analysis */ int refs; /* # of times (REG n) is used or set */ int freq; /* # estimated frequency (REG n) is used or set */ int deaths; /* # of times (REG n) dies */ int live_length; /* # of instructions (REG n) is live */ int calls_crossed; /* # of calls (REG n) is live across */ int basic_block; /* # of basic blocks (REG n) is used in */ char changes_mode; /* whether (SUBREG (REG n)) exists and is illegal. */ } reg_info; extern varray_type reg_n_info; extern bitmap_head subregs_of_mode; /* Indexed by n, gives number of times (REG n) is used or set. */ #define REG_N_REFS(N) (VARRAY_REG (reg_n_info, N)->refs) /* Estimate frequency of references to register N. */ #define REG_FREQ(N) (VARRAY_REG (reg_n_info, N)->freq) /* The weights for each insn varries from 0 to REG_FREQ_BASE. This constant does not need to be high, as in infrequently executed regions we want to count instructions equivalently to optimize for size instead of speed. */ #define REG_FREQ_MAX 1000 /* Compute register frequency from the BB frequency. When optimizing for size, or profile driven feedback is available and the function is never executed, frequency is always equivalent. Otherwise rescale the basic block frequency. */ #define REG_FREQ_FROM_BB(bb) (optimize_size \ || (flag_branch_probabilities \ && !ENTRY_BLOCK_PTR->count) \ ? REG_FREQ_MAX \ : ((bb)->frequency * REG_FREQ_MAX / BB_FREQ_MAX)\ ? ((bb)->frequency * REG_FREQ_MAX / BB_FREQ_MAX)\ : 1) /* Indexed by n, gives number of times (REG n) is set. ??? both regscan and flow allocate space for this. We should settle on just copy. */ #define REG_N_SETS(N) (VARRAY_REG (reg_n_info, N)->sets) /* Indexed by N, gives number of insns in which register N dies. Note that if register N is live around loops, it can die in transitions between basic blocks, and that is not counted here. So this is only a reliable indicator of how many regions of life there are for registers that are contained in one basic block. */ #define REG_N_DEATHS(N) (VARRAY_REG (reg_n_info, N)->deaths) /* Get the number of consecutive words required to hold pseudo-reg N. */ #define PSEUDO_REGNO_SIZE(N) \ ((GET_MODE_SIZE (PSEUDO_REGNO_MODE (N)) + UNITS_PER_WORD - 1) \ / UNITS_PER_WORD) /* Get the number of bytes required to hold pseudo-reg N. */ #define PSEUDO_REGNO_BYTES(N) \ GET_MODE_SIZE (PSEUDO_REGNO_MODE (N)) /* Get the machine mode of pseudo-reg N. */ #define PSEUDO_REGNO_MODE(N) GET_MODE (regno_reg_rtx[N]) /* Indexed by N, gives number of CALL_INSNS across which (REG n) is live. */ #define REG_N_CALLS_CROSSED(N) (VARRAY_REG (reg_n_info, N)->calls_crossed) /* Total number of instructions at which (REG n) is live. The larger this is, the less priority (REG n) gets for allocation in a hard register (in global-alloc). This is set in flow.c and remains valid for the rest of the compilation of the function; it is used to control register allocation. local-alloc.c may alter this number to change the priority. Negative values are special. -1 is used to mark a pseudo reg which has a constant or memory equivalent and is used infrequently enough that it should not get a hard register. -2 is used to mark a pseudo reg for a parameter, when a frame pointer is not required. global.c makes an allocno for this but does not try to assign a hard register to it. */ #define REG_LIVE_LENGTH(N) (VARRAY_REG (reg_n_info, N)->live_length) /* Vector of substitutions of register numbers, used to map pseudo regs into hardware regs. This can't be folded into reg_n_info without changing all of the machine dependent directories, since the reload functions in the machine dependent files access it. */ extern short *reg_renumber; /* Vector indexed by hardware reg saying whether that reg is ever used. */ extern char regs_ever_live[FIRST_PSEUDO_REGISTER]; /* Like regs_ever_live, but saying whether reg is set by asm statements. */ extern char regs_asm_clobbered[FIRST_PSEUDO_REGISTER]; /* For each hard register, the widest mode object that it can contain. This will be a MODE_INT mode if the register can hold integers. Otherwise it will be a MODE_FLOAT or a MODE_CC mode, whichever is valid for the register. */ extern enum machine_mode reg_raw_mode[FIRST_PSEUDO_REGISTER]; /* Vector indexed by regno; gives uid of first insn using that reg. This is computed by reg_scan for use by cse and loop. It is sometimes adjusted for subsequent changes during loop, but not adjusted by cse even if cse invalidates it. */ #define REGNO_FIRST_UID(N) (VARRAY_REG (reg_n_info, N)->first_uid) /* Vector indexed by regno; gives uid of last insn using that reg. This is computed by reg_scan for use by cse and loop. It is sometimes adjusted for subsequent changes during loop, but not adjusted by cse even if cse invalidates it. This is harmless since cse won't scan through a loop end. */ #define REGNO_LAST_UID(N) (VARRAY_REG (reg_n_info, N)->last_uid) /* Similar, but includes insns that mention the reg in their notes. */ #define REGNO_LAST_NOTE_UID(N) (VARRAY_REG (reg_n_info, N)->last_note_uid) /* List made of EXPR_LIST rtx's which gives pairs of pseudo registers that have to go in the same hard reg. */ extern rtx regs_may_share; /* Flag set by local-alloc or global-alloc if they decide to allocate something in a call-clobbered register. */ extern int caller_save_needed; /* Predicate to decide whether to give a hard reg to a pseudo which is referenced REFS times and would need to be saved and restored around a call CALLS times. */ #ifndef CALLER_SAVE_PROFITABLE #define CALLER_SAVE_PROFITABLE(REFS, CALLS) (4 * (CALLS) < (REFS)) #endif /* On most machines a register class is likely to be spilled if it only has one register. */ #ifndef CLASS_LIKELY_SPILLED_P #define CLASS_LIKELY_SPILLED_P(CLASS) (reg_class_size[(int) (CLASS)] == 1) #endif /* Select a register mode required for caller save of hard regno REGNO. */ #ifndef HARD_REGNO_CALLER_SAVE_MODE #define HARD_REGNO_CALLER_SAVE_MODE(REGNO, NREGS, MODE) \ choose_hard_reg_mode (REGNO, NREGS, false) #endif /* Registers that get partially clobbered by a call in a given mode. These must not be call used registers. */ #ifndef HARD_REGNO_CALL_PART_CLOBBERED #define HARD_REGNO_CALL_PART_CLOBBERED(REGNO, MODE) 0 #endif /* Allocate reg_n_info tables */ extern void allocate_reg_info (size_t, int, int); /* Specify number of hard registers given machine mode occupy. */ extern unsigned char hard_regno_nregs[FIRST_PSEUDO_REGISTER][MAX_MACHINE_MODE]; #endif /* GCC_REGS_H */ /* Generated automatically by the program `genattr' from the machine description file `md'. */ #ifndef GCC_INSN_ATTR_H #define GCC_INSN_ATTR_H #define HAVE_ATTR_alternative #define get_attr_alternative(insn) which_alternative #define HAVE_ATTR_cpu enum attr_cpu {CPU_I386, CPU_I486, CPU_PENTIUM, CPU_PENTIUMPRO, CPU_K6, CPU_ATHLON, CPU_PENTIUM4, CPU_K8, CPU_NOCONA}; extern enum attr_cpu get_attr_cpu (void); #define HAVE_ATTR_type enum attr_type {TYPE_OTHER, TYPE_MULTI, TYPE_ALU, TYPE_ALU1, TYPE_NEGNOT, TYPE_IMOV, TYPE_IMOVX, TYPE_LEA, TYPE_INCDEC, TYPE_ISHIFT, TYPE_ISHIFT1, TYPE_ROTATE, TYPE_ROTATE1, TYPE_IMUL, TYPE_IDIV, TYPE_ICMP, TYPE_TEST, TYPE_IBR, TYPE_SETCC, TYPE_ICMOV, TYPE_PUSH, TYPE_POP, TYPE_CALL, TYPE_CALLV, TYPE_LEAVE, TYPE_STR, TYPE_CLD, TYPE_FMOV, TYPE_FOP, TYPE_FSGN, TYPE_FMUL, TYPE_FDIV, TYPE_FPSPC, TYPE_FCMOV, TYPE_FCMP, TYPE_FXCH, TYPE_FISTP, TYPE_SSELOG, TYPE_SSEIADD, TYPE_SSEISHFT, TYPE_SSEIMUL, TYPE_SSE, TYPE_SSEMOV, TYPE_SSEADD, TYPE_SSEMUL, TYPE_SSECMP, TYPE_SSECOMI, TYPE_SSECVT, TYPE_SSEICVT, TYPE_SSEDIV, TYPE_MMX, TYPE_MMXMOV, TYPE_MMXADD, TYPE_MMXMUL, TYPE_MMXCMP, TYPE_MMXCVT, TYPE_MMXSHFT}; extern enum attr_type get_attr_type (rtx); #define HAVE_ATTR_mode enum attr_mode {MODE_UNKNOWN, MODE_NONE, MODE_QI, MODE_HI, MODE_SI, MODE_DI, MODE_SF, MODE_DF, MODE_XF, MODE_TI, MODE_V4SF, MODE_V2DF, MODE_V2SF}; extern enum attr_mode get_attr_mode (rtx); #define HAVE_ATTR_unit enum attr_unit {UNIT_INTEGER, UNIT_I387, UNIT_SSE, UNIT_MMX, UNIT_UNKNOWN}; extern enum attr_unit get_attr_unit (rtx); #define HAVE_ATTR_length_immediate extern int get_attr_length_immediate (rtx); #define HAVE_ATTR_length_address extern int get_attr_length_address (rtx); #define HAVE_ATTR_prefix_data16 extern int get_attr_prefix_data16 (rtx); #define HAVE_ATTR_prefix_rep extern int get_attr_prefix_rep (rtx); #define HAVE_ATTR_prefix_0f extern int get_attr_prefix_0f (rtx); #define HAVE_ATTR_prefix_rex extern int get_attr_prefix_rex (rtx); #define HAVE_ATTR_modrm extern int get_attr_modrm (rtx); #define HAVE_ATTR_length extern int get_attr_length (rtx); extern void shorten_branches (rtx); extern int insn_default_length (rtx); extern int insn_variable_length_p (rtx); extern int insn_current_length (rtx); /* Macros to support INSN_ADDRESSES Copyright (C) 2000 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_INSN_ADDR_H #define GCC_INSN_ADDR_H extern GTY(()) varray_type insn_addresses_; extern int insn_current_address; #define INSN_ADDRESSES_DEFN() varray_type insn_addresses_ #define INSN_ADDRESSES(id) VARRAY_INT (insn_addresses_, (id)) #define INSN_ADDRESSES_ALLOC(size) \ VARRAY_INT_INIT (insn_addresses_, (size), "insn_addresses") #define INSN_ADDRESSES_FREE() (insn_addresses_ = 0) #define INSN_ADDRESSES_SET_P() (insn_addresses_ != 0) #define INSN_ADDRESSES_SIZE() VARRAY_SIZE (insn_addresses_) #define INSN_ADDRESSES_NEW(insn, addr) do \ { \ unsigned insn_uid__ = INSN_UID ((insn)); \ int insn_addr__ = (addr); \ \ if (INSN_ADDRESSES_SET_P ()) \ { \ if (INSN_ADDRESSES_SIZE () <= insn_uid__) \ VARRAY_GROW (insn_addresses_, insn_uid__ + 1); \ INSN_ADDRESSES (insn_uid__) = insn_addr__; \ } \ } \ while (0) #endif /* ! GCC_INSN_ADDR_H */ #define HAVE_ATTR_memory enum attr_memory {MEMORY_NONE, MEMORY_LOAD, MEMORY_STORE, MEMORY_BOTH, MEMORY_UNKNOWN}; extern enum attr_memory get_attr_memory (rtx); #define HAVE_ATTR_imm_disp enum attr_imm_disp {IMM_DISP_FALSE, IMM_DISP_TRUE, IMM_DISP_UNKNOWN}; extern enum attr_imm_disp get_attr_imm_disp (rtx); #define HAVE_ATTR_fp_int_src enum attr_fp_int_src {FP_INT_SRC_FALSE, FP_INT_SRC_TRUE}; extern enum attr_fp_int_src get_attr_fp_int_src (rtx); #define HAVE_ATTR_pent_prefix enum attr_pent_prefix {PENT_PREFIX_FALSE, PENT_PREFIX_TRUE}; extern enum attr_pent_prefix get_attr_pent_prefix (rtx); #define HAVE_ATTR_pent_pair enum attr_pent_pair {PENT_PAIR_UV, PENT_PAIR_PU, PENT_PAIR_PV, PENT_PAIR_NP}; extern enum attr_pent_pair get_attr_pent_pair (rtx); #define HAVE_ATTR_athlon_decode enum attr_athlon_decode {ATHLON_DECODE_DIRECT, ATHLON_DECODE_VECTOR, ATHLON_DECODE_DOUBLE}; extern enum attr_athlon_decode get_attr_athlon_decode (rtx); #define INSN_SCHEDULING extern int result_ready_cost (rtx); extern int function_units_used (rtx); extern const struct function_unit_desc { const char *const name; const int bitmask; const int multiplicity; const int simultaneity; const int default_cost; const int max_issue_delay; int (*const ready_cost_function) (rtx); int (*const conflict_cost_function) (rtx, rtx); const int max_blockage; unsigned int (*const blockage_range_function) (rtx); int (*const blockage_function) (rtx, rtx); } function_units[]; #define FUNCTION_UNITS_SIZE 0 #define MIN_MULTIPLICITY 100000 #define MAX_MULTIPLICITY -1 #define MIN_SIMULTANEITY 100000 #define MAX_SIMULTANEITY -1 #define MIN_READY_COST 100000 #define MAX_READY_COST -1 #define MIN_ISSUE_DELAY 100000 #define MAX_ISSUE_DELAY -1 #define MIN_BLOCKAGE 100000 #define MAX_BLOCKAGE -1 #define BLOCKAGE_BITS 1 #define INSN_QUEUE_SIZE 1 /* DFA based pipeline interface. */ #ifndef AUTOMATON_ALTS #define AUTOMATON_ALTS 0 #endif #ifndef AUTOMATON_STATE_ALTS #define AUTOMATON_STATE_ALTS 0 #endif #ifndef CPU_UNITS_QUERY #define CPU_UNITS_QUERY 0 #endif extern int max_dfa_issue_rate; /* The following macro value is calculated from the automaton based pipeline description and is equal to maximal number of all insns described in constructions `define_insn_reservation' which can be issued on the same processor cycle. */ #define MAX_DFA_ISSUE_RATE max_dfa_issue_rate /* Insn latency time defined in define_insn_reservation. */ extern int insn_default_latency (rtx); /* Return nonzero if there is a bypass for given insn which is a data producer. */ extern int bypass_p (rtx); /* Insn latency time on data consumed by the 2nd insn. Use the function if bypass_p returns nonzero for the 1st insn. */ extern int insn_latency (rtx, rtx); #if AUTOMATON_ALTS /* The following function returns number of alternative reservations of given insn. It may be used for better insns scheduling heuristics. */ extern int insn_alts (rtx); #endif /* Maximal possible number of insns waiting results being produced by insns whose execution is not finished. */ extern int max_insn_queue_index; /* Pointer to data describing current state of DFA. */ typedef void *state_t; /* Size of the data in bytes. */ extern int state_size (void); /* Initiate given DFA state, i.e. Set up the state as all functional units were not reserved. */ extern void state_reset (state_t); /* The following function returns negative value if given insn can be issued in processor state described by given DFA state. In this case, the DFA state is changed to reflect the current and future reservations by given insn. Otherwise the function returns minimal time delay to issue the insn. This delay may be zero for superscalar or VLIW processors. If the second parameter is NULL the function changes given DFA state as new processor cycle started. */ extern int state_transition (state_t, rtx); #if AUTOMATON_STATE_ALTS /* The following function returns number of possible alternative reservations of given insn in given DFA state. It may be used for better insns scheduling heuristics. By default the function is defined if macro AUTOMATON_STATE_ALTS is defined because its implementation may require much memory. */ extern int state_alts (state_t, rtx); #endif extern int min_issue_delay (state_t, rtx); /* The following function returns nonzero if no one insn can be issued in current DFA state. */ extern int state_dead_lock_p (state_t); /* The function returns minimal delay of issue of the 2nd insn after issuing the 1st insn in given DFA state. The 1st insn should be issued in given state (i.e. state_transition should return negative value for the insn and the state). Data dependencies between the insns are ignored by the function. */ extern int min_insn_conflict_delay (state_t, rtx, rtx); /* The following function outputs reservations for given insn as they are described in the corresponding define_insn_reservation. */ extern void print_reservation (FILE *, rtx); #if CPU_UNITS_QUERY /* The following function returns code of functional unit with given name (see define_cpu_unit). */ extern int get_cpu_unit_code (const char *); /* The following function returns nonzero if functional unit with given code is currently reserved in given DFA state. */ extern int cpu_unit_reservation_p (state_t, int); #endif /* Clean insn code cache. It should be called if there is a chance that condition value in a define_insn_reservation will be changed after last call of dfa_start. */ extern void dfa_clean_insn_cache (void); /* Initiate and finish work with DFA. They should be called as the first and the last interface functions. */ extern void dfa_start (void); extern void dfa_finish (void); #define ATTR_FLAG_forward 0x1 #define ATTR_FLAG_backward 0x2 #define ATTR_FLAG_likely 0x4 #define ATTR_FLAG_very_likely 0x8 #define ATTR_FLAG_unlikely 0x10 #define ATTR_FLAG_very_unlikely 0x20 #endif /* GCC_INSN_ATTR_H */ #define operands recog_data.operand int insn_current_length (rtx insn ATTRIBUTE_UNUSED) { switch (recog_memoized (insn)) { case 514: extract_constrain_insn_cached (insn); if ((which_alternative == 0) && ((((INSN_ADDRESSES_SET_P () ? INSN_ADDRESSES (INSN_UID (GET_CODE (operands[0]) == LABEL_REF ? XEXP (operands[0], 0) : operands[0])) : 0) - (insn_current_reference_address (insn))) >= (-126)) && (((INSN_ADDRESSES_SET_P () ? INSN_ADDRESSES (INSN_UID (GET_CODE (operands[0]) == LABEL_REF ? XEXP (operands[0], 0) : operands[0])) : 0) - (insn_current_reference_address (insn))) < (128)))) { return 2; } else { return 16 /* 0x10 */; } case 509: extract_insn_cached (insn); if ((((INSN_ADDRESSES_SET_P () ? INSN_ADDRESSES (INSN_UID (GET_CODE (operands[0]) == LABEL_REF ? XEXP (operands[0], 0) : operands[0])) : 0) - (insn_current_reference_address (insn))) >= (-126)) && (((INSN_ADDRESSES_SET_P () ? INSN_ADDRESSES (INSN_UID (GET_CODE (operands[0]) == LABEL_REF ? XEXP (operands[0], 0) : operands[0])) : 0) - (insn_current_reference_address (insn))) < (128))) { return 2; } else { return 5; } case 498: extract_insn_cached (insn); if ((((INSN_ADDRESSES_SET_P () ? INSN_ADDRESSES (INSN_UID (GET_CODE (operands[0]) == LABEL_REF ? XEXP (operands[0], 0) : operands[0])) : 0) - (insn_current_reference_address (insn))) >= (-126)) && (((INSN_ADDRESSES_SET_P () ? INSN_ADDRESSES (INSN_UID (GET_CODE (operands[0]) == LABEL_REF ? XEXP (operands[0], 0) : operands[0])) : 0) - (insn_current_reference_address (insn))) < (128))) { return 2; } else { return 6; } case 497: extract_insn_cached (insn); if ((((INSN_ADDRESSES_SET_P () ? INSN_ADDRESSES (INSN_UID (GET_CODE (operands[0]) == LABEL_REF ? XEXP (operands[0], 0) : operands[0])) : 0) - (insn_current_reference_address (insn))) >= (-126)) && (((INSN_ADDRESSES_SET_P () ? INSN_ADDRESSES (INSN_UID (GET_CODE (operands[0]) == LABEL_REF ? XEXP (operands[0], 0) : operands[0])) : 0) - (insn_current_reference_address (insn))) < (128))) { return 2; } else { return 6; } case -1: if (GET_CODE (PATTERN (insn)) != ASM_INPUT && asm_noperands (PATTERN (insn)) < 0) fatal_insn_not_found (insn); default: return 0; } } int insn_variable_length_p (rtx insn ATTRIBUTE_UNUSED) { switch (recog_memoized (insn)) { case 514: case 509: case 498: case 497: return 1; case -1: if (GET_CODE (PATTERN (insn)) != ASM_INPUT && asm_noperands (PATTERN (insn)) < 0) fatal_insn_not_found (insn); default: return 0; } } int insn_default_length (rtx insn ATTRIBUTE_UNUSED) { switch (recog_memoized (insn)) { case 655: extract_constrain_insn_cached (insn); if (!((1 << which_alternative) & 0x3)) { return 16 /* 0x10 */; } else { return 2 + get_attr_prefix_data16 (insn) + get_attr_length_address (insn); } case 580: case 579: case 578: case 577: case 576: case 575: case 574: case 573: case 572: case 571: case 570: case 568: case 567: case 566: case 565: case 563: case 562: if (get_attr_unit (insn) == UNIT_I387) { return 2 + get_attr_prefix_data16 (insn) + get_attr_length_address (insn); } else { return get_attr_modrm (insn) + get_attr_prefix_0f (insn) + get_attr_prefix_rex (insn) + 1 + get_attr_prefix_rep (insn) + get_attr_prefix_data16 (insn) + get_attr_length_immediate (insn) + get_attr_length_address (insn); } case 584: case 581: case 559: case 556: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return 2 + get_attr_prefix_data16 (insn) + get_attr_length_address (insn); } else { return get_attr_modrm (insn) + get_attr_prefix_0f (insn) + get_attr_prefix_rex (insn) + 1 + get_attr_prefix_rep (insn) + get_attr_prefix_data16 (insn) + get_attr_length_immediate (insn) + get_attr_length_address (insn); } case 490: case 489: case 487: case 478: case 477: case 475: case 464: case 463: case 459: case 443: case 440: case 439: case 437: case 435: extract_insn_cached (insn); if (register_operand (operands[0], VOIDmode)) { return 2; } else { return get_attr_modrm (insn) + get_attr_prefix_0f (insn) + get_attr_prefix_rex (insn) + 1 + get_attr_prefix_rep (insn) + get_attr_prefix_data16 (insn) + get_attr_length_immediate (insn) + get_attr_length_address (insn); } case 484: case 483: case 471: case 467: case 461: case 455: case 451: case 431: case 427: extract_insn_cached (insn); if (register_operand (operands[0], SImode)) { return 2; } else { return get_attr_modrm (insn) + get_attr_prefix_0f (insn) + get_attr_prefix_rex (insn) + 1 + get_attr_prefix_rep (insn) + get_attr_prefix_data16 (insn) + get_attr_length_immediate (insn) + get_attr_length_address (insn); } case 481: case 469: case 447: case 445: case 420: case 418: extract_insn_cached (insn); if (register_operand (operands[0], DImode)) { return 2; } else { return get_attr_modrm (insn) + get_attr_prefix_0f (insn) + get_attr_prefix_rex (insn) + 1 + get_attr_prefix_rep (insn) + get_attr_prefix_data16 (insn) + get_attr_length_immediate (insn) + get_attr_length_address (insn); } case 172: case 169: case 166: case 163: extract_constrain_insn_cached (insn); if (which_alternative == 1) { return 16 /* 0x10 */; } else if (which_alternative == 0) { return 2 + get_attr_prefix_data16 (insn) + get_attr_length_address (insn); } else { return get_attr_modrm (insn) + get_attr_prefix_0f (insn) + get_attr_prefix_rex (insn) + 1 + get_attr_prefix_rep (insn) + get_attr_prefix_data16 (insn) + get_attr_length_immediate (insn) + get_attr_length_address (insn); } case 137: extract_constrain_insn_cached (insn); if (which_alternative != 0) { return 2 + get_attr_prefix_data16 (insn) + get_attr_length_address (insn); } else { return get_attr_modrm (insn) + get_attr_prefix_0f (insn) + get_attr_prefix_rex (insn) + 1 + get_attr_prefix_rep (insn) + get_attr_prefix_data16 (insn) + get_attr_length_immediate (insn) + get_attr_length_address (insn); } case 136: extract_constrain_insn_cached (insn); if (!((1 << which_alternative) & 0x3)) { return 2 + get_attr_prefix_data16 (insn) + get_attr_length_address (insn); } else { return get_attr_modrm (insn) + get_attr_prefix_0f (insn) + get_attr_prefix_rex (insn) + 1 + get_attr_prefix_rep (insn) + get_attr_prefix_data16 (insn) + get_attr_length_immediate (insn) + get_attr_length_address (insn); } case 135: case 134: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0xe)) { return 16 /* 0x10 */; } else if (which_alternative == 0) { return 2 + get_attr_prefix_data16 (insn) + get_attr_length_address (insn); } else { return get_attr_modrm (insn) + get_attr_prefix_0f (insn) + get_attr_prefix_rex (insn) + 1 + get_attr_prefix_rep (insn) + get_attr_prefix_data16 (insn) + get_attr_length_immediate (insn) + get_attr_length_address (insn); } case 176: case 175: case 174: case 171: case 168: case 165: case 162: case 145: case 142: case 133: extract_constrain_insn_cached (insn); if (which_alternative != 0) { return 16 /* 0x10 */; } else { return 2 + get_attr_prefix_data16 (insn) + get_attr_length_address (insn); } case 656: case 654: case 128: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { return 2 + get_attr_prefix_data16 (insn) + get_attr_length_address (insn); } else { return get_attr_modrm (insn) + get_attr_prefix_0f (insn) + get_attr_prefix_rex (insn) + 1 + get_attr_prefix_rep (insn) + get_attr_prefix_data16 (insn) + get_attr_length_immediate (insn) + get_attr_length_address (insn); } case 112: case 111: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x7)) { return 16 /* 0x10 */; } else { return get_attr_modrm (insn) + get_attr_prefix_0f (insn) + get_attr_prefix_rex (insn) + 1 + get_attr_prefix_rep (insn) + get_attr_prefix_data16 (insn) + get_attr_length_immediate (insn) + get_attr_length_address (insn); } case 101: case 100: extract_constrain_insn_cached (insn); if (!((1 << which_alternative) & 0x7)) { return 16 /* 0x10 */; } else { return 2 + get_attr_prefix_data16 (insn) + get_attr_length_address (insn); } case 96: case 95: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x18)) { return 16 /* 0x10 */; } else if (((1 << which_alternative) & 0x7)) { return 2 + get_attr_prefix_data16 (insn) + get_attr_length_address (insn); } else { return get_attr_modrm (insn) + get_attr_prefix_0f (insn) + get_attr_prefix_rex (insn) + 1 + get_attr_prefix_rep (insn) + get_attr_prefix_data16 (insn) + get_attr_length_immediate (insn) + get_attr_length_address (insn); } case 657: case 616: case 615: case 614: case 613: case 612: case 611: case 610: case 609: case 608: case 607: case 606: case 605: case 604: case 603: case 602: case 601: case 600: case 599: case 598: case 597: case 596: case 595: case 594: case 593: case 592: case 591: case 590: case 589: case 588: case 587: case 586: case 583: case 561: case 558: case 555: case 391: case 390: case 389: case 388: case 387: case 386: case 376: case 375: case 374: case 373: case 372: case 371: case 146: case 144: case 143: case 141: case 138: case 132: case 131: case 130: case 102: case 97: case 92: return 2 + get_attr_prefix_data16 (insn) + get_attr_length_address (insn); case 91: case 90: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x7)) { return 2 + get_attr_prefix_data16 (insn) + get_attr_length_address (insn); } else { return get_attr_modrm (insn) + get_attr_prefix_0f (insn) + get_attr_prefix_rex (insn) + 1 + get_attr_prefix_rep (insn) + get_attr_prefix_data16 (insn) + get_attr_length_immediate (insn) + get_attr_length_address (insn); } case 89: case 88: extract_constrain_insn_cached (insn); if (which_alternative != 1) { return 16 /* 0x10 */; } else { return get_attr_modrm (insn) + get_attr_prefix_0f (insn) + get_attr_prefix_rex (insn) + 1 + get_attr_prefix_rep (insn) + get_attr_prefix_data16 (insn) + get_attr_length_immediate (insn) + get_attr_length_address (insn); } case 84: case 83: extract_constrain_insn_cached (insn); if (which_alternative == 4) { return 16 /* 0x10 */; } else { return get_attr_modrm (insn) + get_attr_prefix_0f (insn) + get_attr_prefix_rex (insn) + 1 + get_attr_prefix_rep (insn) + get_attr_prefix_data16 (insn) + get_attr_length_immediate (insn) + get_attr_length_address (insn); } case 721: case 720: case 82: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { return 16 /* 0x10 */; } else { return get_attr_modrm (insn) + get_attr_prefix_0f (insn) + get_attr_prefix_rex (insn) + 1 + get_attr_prefix_rep (insn) + get_attr_prefix_data16 (insn) + get_attr_length_immediate (insn) + get_attr_length_address (insn); } case 76: extract_constrain_insn_cached (insn); if (which_alternative != 0) { return 16 /* 0x10 */; } else { return get_attr_modrm (insn) + get_attr_prefix_0f (insn) + get_attr_prefix_rex (insn) + 1 + get_attr_prefix_rep (insn) + get_attr_prefix_data16 (insn) + get_attr_length_immediate (insn) + get_attr_length_address (insn); } case 34: case 31: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return 4; } else { return get_attr_modrm (insn) + get_attr_prefix_0f (insn) + get_attr_prefix_rex (insn) + 1 + get_attr_prefix_rep (insn) + get_attr_prefix_data16 (insn) + get_attr_length_immediate (insn) + get_attr_length_address (insn); } case 33: case 30: case 25: case 23: case 21: case 20: case 19: return 4; case -1: if (GET_CODE (PATTERN (insn)) != ASM_INPUT && asm_noperands (PATTERN (insn)) < 0) fatal_insn_not_found (insn); return 128 /* 0x80 */; case 1022: case 1021: case 526: return 3; case 853: return 135 /* 0x87 */; case 554: case 553: case 552: case 551: return 7; case 549: case 544: case 530: return 12 /* 0xc */; case 548: return 13 /* 0xd */; case 547: return 11 /* 0xb */; case 545: return 14 /* 0xe */; case 528: case 525: case 524: case 29: return 1; case 523: return 0; case 718: case 717: case 716: case 715: case 714: case 713: case 712: case 711: case 710: case 709: case 708: case 697: case 696: case 684: case 683: case 682: case 681: case 680: case 679: case 678: case 677: case 676: case 675: case 674: case 673: case 668: case 667: case 665: case 664: case 662: case 661: case 659: case 658: case 550: case 543: case 542: case 541: case 540: case 539: case 538: case 537: case 536: case 535: case 532: case 531: case 508: case 507: case 506: case 505: case 504: case 503: case 502: case 501: case 500: case 499: case 450: case 449: case 423: case 422: case 405: case 404: case 385: case 384: case 383: case 382: case 381: case 380: case 379: case 378: case 377: case 370: case 369: case 368: case 367: case 366: case 365: case 364: case 363: case 362: case 351: case 287: case 286: case 275: case 273: case 272: case 270: case 269: case 267: case 266: case 225: case 177: case 159: case 158: case 157: case 154: case 153: case 152: case 149: case 148: case 147: case 127: case 126: case 117: case 99: case 98: case 94: case 93: case 75: case 27: case 26: case 24: case 22: case 18: case 546: case 529: case 514: return 16 /* 0x10 */; case 686: case 685: case 509: return 5; case 498: case 497: return 6; case 472: case 456: case 452: case 432: case 428: case 161: case 160: case 28: return 2; default: return get_attr_modrm (insn) + get_attr_prefix_0f (insn) + get_attr_prefix_rex (insn) + 1 + get_attr_prefix_rep (insn) + get_attr_prefix_data16 (insn) + get_attr_length_immediate (insn) + get_attr_length_address (insn); } } int bypass_p (rtx insn ATTRIBUTE_UNUSED) { switch (recog_memoized (insn)) { case 89: case 88: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 1) && (! (memory_operand (operands[1], VOIDmode))))) { return 1; } else { return 0; } case 76: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && (! (memory_operand (operands[1], VOIDmode))))) { return 1; } else { return 0; } case 534: case 533: case 79: case 78: case 40: case 39: if (((ix86_tune) == (CPU_PENTIUM))) { return 1; } else { return 0; } case 77: case 58: case 57: case 49: case 48: case 38: case 37: case 36: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (! (memory_operand (operands[1], VOIDmode)))) { return 1; } else { return 0; } case -1: if (GET_CODE (PATTERN (insn)) != ASM_INPUT && asm_noperands (PATTERN (insn)) < 0) fatal_insn_not_found (insn); default: return 0; } } int insn_default_latency (rtx insn ATTRIBUTE_UNUSED) { switch (recog_memoized (insn)) { case 1015: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 0)) { return 2; } else { return 0; } case 1009: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (memory_operand (operands[1], DFmode)))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && ((memory_operand (operands[1], DFmode)) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_K8))) && (which_alternative == 1)) || (((((ix86_tune) == (CPU_ATHLON))) && (which_alternative == 1)) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 1))))) { return 2; } else { return 0; } case 1008: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (memory_operand (operands[1], DFmode)))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && ((memory_operand (operands[1], DFmode)) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || ((which_alternative == 1) && (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))))) { return 2; } else { return 0; } case 1012: case 1011: case 1010: case 1004: case 1002: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((memory_operand (operands[1], DFmode)) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON))))) { return 2; } else { return 0; } case 966: case 965: case 964: case 963: case 962: case 961: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 2; } else { return 0; } case 952: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((optimize_size) != (0)))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) { return 2; } else { return 0; } case 903: case 902: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else { return 0; } case 899: case 898: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 5; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 3; } else { return 0; } case 891: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 22 /* 0x16 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 20 /* 0x14 */; } else { return 0; } case 890: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 39 /* 0x27 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 35 /* 0x23 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 39 /* 0x27 */; } else { return 0; } case 895: case 893: case 889: case 887: case 885: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 4; } else { return 0; } case 1028: case 1026: case 1024: case 894: case 892: case 888: case 886: case 884: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 5; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 7; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 5; } else { return 0; } case 881: case 880: if (((ix86_tune) == (CPU_PENTIUM))) { return 1; } else { return 0; } case 879: case 878: case 870: case 869: case 865: case 864: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 2; } else { return 0; } case 1020: case 1019: case 1018: case 852: if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 3; } else { return 0; } case 851: if (((ix86_tune) == (CPU_K6))) { return 2; } else { return 0; } case 850: if (((ix86_tune) == (CPU_PENTIUM))) { return 2; } else if (((ix86_tune) == (CPU_K6))) { return 3; } else { return 0; } case 849: if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 2; } else { return 0; } case 859: case 858: case 857: case 827: case 826: case 825: case 824: case 823: case 822: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 2; } else { return 0; } case 871: case 842: case 841: case 840: case 839: case 838: case 837: case 836: case 835: case 834: case 833: case 832: case 818: case 817: case 816: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 2; } else { return 0; } case 883: case 882: case 863: case 813: case 812: if ((((ix86_tune) == (CPU_PENTIUM))) || (((ix86_tune) == (CPU_PENTIUMPRO)))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 2; } else { return 0; } case 877: case 862: case 810: case 809: case 808: case 807: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 3; } else { return 0; } case 876: case 875: case 874: case 873: case 872: case 868: case 867: case 866: case 861: case 860: case 856: case 855: case 854: case 831: case 830: case 829: case 828: case 815: case 814: case 811: case 806: case 805: case 804: case 803: case 802: case 801: case 800: case 799: case 798: case 797: case 796: case 795: case 794: case 793: case 792: case 791: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 2; } else { return 0; } case 923: case 921: case 788: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) || (((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) || ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))))) { return 9; } else { return 0; } case 922: case 920: case 787: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) || ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) || (((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) || ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))))) { return 9; } else { return 0; } case 775: case 774: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else { return 0; } case 771: case 770: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if (((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 5; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 3; } else { return 0; } case 769: if (((ix86_tune) == (CPU_PENTIUM))) { return 1; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 3; } else { return 0; } case 768: extract_constrain_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 3; } else { return 0; } case 767: case 766: case 765: case 764: case 763: case 762: case 761: case 760: case 759: case 758: case 757: case 756: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 5; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 3; } else { return 0; } case 755: case 754: case 753: case 752: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 5; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 3; } else { return 0; } case 1030: case 1029: case 780: case 778: case 750: case 748: case 746: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else { return 0; } case 745: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 22 /* 0x16 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 20 /* 0x14 */; } else { return 0; } case 744: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 48 /* 0x30 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 39 /* 0x27 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 35 /* 0x23 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 39 /* 0x27 */; } else { return 0; } case 743: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 4; } else { return 0; } case 742: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 5; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 5; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 7; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 5; } else { return 0; } case 741: case 739: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 4; } else { return 0; } case 1027: case 1025: case 1023: case 740: case 738: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 5; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 7; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 5; } else { return 0; } case 736: case 735: case 734: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_STORE))) || ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && (memory_operand (operands[1], DFmode))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if (((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))))) { return 2; } else { return 0; } case 928: case 783: case 782: case 777: case 776: case 737: case 733: case 732: case 731: case 730: case 725: case 724: case 723: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else { return 0; } case 721: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((1 << which_alternative) & 0x3)) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) || ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((memory_operand (operands[1], DFmode)) || (((((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0xc)) && (! ((optimize_size) != (0)))) || ((which_alternative == 4) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0xc)) && (! ((optimize_size) != (0)))) || ((which_alternative == 4) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || (((((ix86_tune) == (CPU_K8))) && ((((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0xc)) && (! ((optimize_size) != (0)))) || ((which_alternative == 4) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0)))))))) || (((((ix86_tune) == (CPU_ATHLON))) && ((((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0xc)) && (! ((optimize_size) != (0)))) || ((which_alternative == 4) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0)))))))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))))) { return 2; } else { return 0; } case 720: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((1 << which_alternative) & 0x3)) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) || ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((memory_operand (operands[1], DFmode)) || (((((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0xc)) && (! ((optimize_size) != (0)))) || ((which_alternative == 4) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0xc)) && (! ((optimize_size) != (0)))) || ((which_alternative == 4) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || (((((ix86_tune) == (CPU_K8))) && ((((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0xc)) && (! ((optimize_size) != (0)))) || ((which_alternative == 4) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0)))))))) || (((((ix86_tune) == (CPU_ATHLON))) && ((((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0xc)) && (! ((optimize_size) != (0)))) || ((which_alternative == 4) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0)))))))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))))) { return 2; } else { return 0; } case 719: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && ((optimize_size) != (0))))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && ((optimize_size) != (0)))))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && ((optimize_size) != (0)))))) || ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((memory_operand (operands[1], DFmode)) || (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && ((optimize_size) != (0)))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && (! ((optimize_size) != (0)))) || (!((1 << which_alternative) & 0x7))))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && ((optimize_size) != (0)))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && (! ((optimize_size) != (0)))) || (!((1 << which_alternative) & 0x7))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || (((((ix86_tune) == (CPU_K8))) && ((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && ((optimize_size) != (0)))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && (! ((optimize_size) != (0)))) || (!((1 << which_alternative) & 0x7)))))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))))) { return 2; } else { return 0; } case 707: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0)))))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) || ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((memory_operand (operands[1], DFmode)) || (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || (((((ix86_tune) == (CPU_K8))) && ((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7)))))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))))) { return 2; } else { return 0; } case 706: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0)))))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) || ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((memory_operand (operands[1], DFmode)) || (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || (((((ix86_tune) == (CPU_K8))) && ((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7)))))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))))) { return 2; } else { return 0; } case 705: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0)))))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) || ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((memory_operand (operands[1], DFmode)) || (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || (((((ix86_tune) == (CPU_K8))) && ((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7)))))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))))) { return 2; } else { return 0; } case 848: case 847: case 846: case 845: case 844: case 843: case 821: case 820: case 819: case 727: case 726: case 704: case 703: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 2; } else { return 0; } case 729: case 702: case 701: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 0; } else if (((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))))) { return 2; } else { return 0; } case 700: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0)))))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) || ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((memory_operand (operands[1], DFmode)) || (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || (((((ix86_tune) == (CPU_K8))) && ((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7)))))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))))) { return 2; } else { return 0; } case 699: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0)))))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) || ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((memory_operand (operands[1], DFmode)) || (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || (((((ix86_tune) == (CPU_K8))) && ((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7)))))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))))) { return 2; } else { return 0; } case 728: case 722: case 698: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) || ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((memory_operand (operands[1], DFmode)) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON))))) { return 2; } else { return 0; } case 695: case 694: case 693: case 692: case 691: case 690: case 689: case 688: case 687: extract_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 10 /* 0xa */; } else { return 6; } case 672: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_NONE)) || ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_NONE)) || ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 0))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (which_alternative == 0))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if (((which_alternative == 1) && (((ix86_tune) == (CPU_K6)))) || (((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((which_alternative == 1) && ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 671: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 1) && (const0_operand (operands[2], DImode)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 1) && (const0_operand (operands[2], DImode)))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 1) && (const0_operand (operands[2], DImode))))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_STORE) && ((which_alternative == 1) && (const0_operand (operands[2], DImode)))) || ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 1) && (! (const0_operand (operands[2], DImode)))))) || ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 0))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (which_alternative == 0))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 1) && (const0_operand (operands[2], DImode))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 1) && (const0_operand (operands[2], DImode))) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))))) { return 0; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 1) && (const0_operand (operands[2], DImode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 1) && (const0_operand (operands[2], DImode))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 1) && (const0_operand (operands[2], DImode))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if (((((ix86_tune) == (CPU_K6))) && (((which_alternative == 1) && (! (const0_operand (operands[2], DImode)))) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (! (const0_operand (operands[2], DImode)))))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative == 1) && (const0_operand (operands[2], DImode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative == 1) && (const0_operand (operands[2], DImode))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 670: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 1) && (const0_operand (operands[2], SImode)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 1) && (const0_operand (operands[2], SImode)))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 1) && (const0_operand (operands[2], SImode))))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_STORE) && ((which_alternative == 1) && (const0_operand (operands[2], SImode)))) || ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 1) && (! (const0_operand (operands[2], SImode)))))) || ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 0))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (which_alternative == 0))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 1) && (const0_operand (operands[2], SImode))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 1) && (const0_operand (operands[2], SImode))) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))))) { return 0; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 1) && (const0_operand (operands[2], SImode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 1) && (const0_operand (operands[2], SImode))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 1) && (const0_operand (operands[2], SImode))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if (((((ix86_tune) == (CPU_K6))) && (((which_alternative == 1) && (! (const0_operand (operands[2], SImode)))) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (! (const0_operand (operands[2], SImode)))))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative == 1) && (const0_operand (operands[2], SImode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative == 1) && (const0_operand (operands[2], SImode))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 657: if (((ix86_tune) == (CPU_PENTIUM))) { return 1; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 2; } else if (((ix86_tune) == (CPU_ATHLON))) { return 7; } else if (((ix86_tune) == (CPU_K8))) { return 15 /* 0xf */; } else { return 0; } case 655: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((1 << which_alternative) & 0x3))) { return 1; } else if (!((1 << which_alternative) & 0x3)) { return 6; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 2; } else if (((ix86_tune) == (CPU_ATHLON))) { return 7; } else if (((ix86_tune) == (CPU_K8))) { return 15 /* 0xf */; } else { return 0; } case 656: case 654: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((1 << which_alternative) & 0x3))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (!((1 << which_alternative) & 0x3)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (!((1 << which_alternative) & 0x3)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (!((1 << which_alternative) & 0x3)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (!((1 << which_alternative) & 0x3)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x3)) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((1 << which_alternative) & 0x3))) { return 7; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 17 /* 0x11 */; } else if ((((ix86_tune) == (CPU_K8))) && (((1 << which_alternative) & 0x3))) { return 15 /* 0xf */; } else { return 0; } case 653: case 652: case 651: case 649: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 650: case 648: if ((((ix86_tune) == (CPU_PENTIUM))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) || ((((ix86_tune) == (CPU_K6))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))))) { return 1; } else { return 0; } case 647: case 646: case 645: case 644: case 643: case 642: case 641: case 640: case 639: case 638: case 637: case 636: case 635: case 634: case 633: case 632: case 631: case 630: if (((ix86_tune) == (CPU_PENTIUM))) { return 12 /* 0xc */; } else { return 6; } case 629: case 628: case 627: case 626: case 625: case 624: case 623: case 622: case 621: case 620: case 619: case 618: if (((ix86_tune) == (CPU_PENTIUM))) { return 12 /* 0xc */; } else { return 6; } case 617: if (((ix86_tune) == (CPU_PENTIUM))) { return 2; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 2; } else if (((ix86_tune) == (CPU_K6))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 2; } else { return 0; } case 616: case 615: case 614: case 613: case 612: case 611: case 610: case 607: case 604: case 600: case 596: case 592: case 591: case 590: case 589: case 588: if (((ix86_tune) == (CPU_PENTIUM))) { return 70 /* 0x46 */; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 38 /* 0x26 */; } else if (((ix86_tune) == (CPU_K6))) { return 56 /* 0x38 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 100 /* 0x64 */; } else { return 0; } case 608: case 605: case 603: case 601: case 599: case 597: case 595: case 593: case 587: case 586: if (((ix86_tune) == (CPU_PENTIUM))) { return 70 /* 0x46 */; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 32 /* 0x20 */; } else if (((ix86_tune) == (CPU_K6))) { return 56 /* 0x38 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 100 /* 0x64 */; } else { return 0; } case 1031: case 1017: case 1013: case 1007: case 1006: case 1005: case 1003: case 1001: case 1000: case 999: case 998: case 997: case 996: case 995: case 994: case 993: case 992: case 991: case 990: case 989: case 960: case 959: case 958: case 957: case 956: case 929: case 919: case 918: case 917: case 916: case 915: case 914: case 913: case 912: case 911: case 910: case 909: case 908: case 907: case 906: case 905: case 904: case 896: case 784: case 669: case 663: case 585: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else { return 0; } case 584: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (which_alternative == 0)) { return 70 /* 0x46 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 33 /* 0x21 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (which_alternative == 0)) { return 56 /* 0x38 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 103 /* 0x67 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 0)) { return 100 /* 0x64 */; } else { return 0; } case 609: case 606: case 602: case 598: case 594: case 583: if (((ix86_tune) == (CPU_PENTIUM))) { return 70 /* 0x46 */; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 18 /* 0x12 */; } else if (((ix86_tune) == (CPU_K6))) { return 56 /* 0x38 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 100 /* 0x64 */; } else { return 0; } case 897: case 781: case 779: case 751: case 749: case 747: case 666: case 660: case 582: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) || ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else { return 0; } case 581: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (which_alternative == 0)) { return 70 /* 0x46 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 19 /* 0x13 */; } else if (((which_alternative == 1) && (((ix86_tune) == (CPU_PENTIUMPRO)))) || ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (which_alternative == 0)) { return 56 /* 0x38 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 103 /* 0x67 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 0)) { return 100 /* 0x64 */; } else { return 0; } case 580: case 579: case 578: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_type (insn) == TYPE_FOP) || (mult_operator (operands[3], XFmode)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FDIV)) { return 39 /* 0x27 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FOP))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FOP))) { return 5; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_FOP))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_FOP)) || ((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], XFmode))))) { return 5; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], XFmode)))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FDIV))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FDIV))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_K6))) && ((mult_operator (operands[3], XFmode)) && ((get_attr_memory (insn) == MEMORY_NONE) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_type (insn) == TYPE_FDIV)) { return 56 /* 0x38 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_FOP)) || ((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], XFmode)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], XFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], XFmode))) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 13 /* 0xd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_FDIV)) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_FDIV)) { return 11 /* 0xb */; } else { return 0; } case 577: case 576: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_type (insn) == TYPE_FOP) || (mult_operator (operands[3], XFmode)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FDIV)) { return 39 /* 0x27 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FOP))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FOP))) { return 5; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_FOP))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_FOP)) || ((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], XFmode))))) { return 5; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], XFmode)))) { return 6; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_K6))) && ((mult_operator (operands[3], XFmode)) && ((get_attr_memory (insn) == MEMORY_NONE) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_type (insn) == TYPE_FDIV)) { return 56 /* 0x38 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_FOP)) || ((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], XFmode)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], XFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], XFmode))) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 13 /* 0xd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_FDIV)) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_FDIV)) { return 11 /* 0xb */; } else { return 0; } case 575: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_type (insn) == TYPE_FOP) || (mult_operator (operands[3], XFmode)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FDIV)) { return 39 /* 0x27 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FOP))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FOP))) { return 5; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_FOP))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_FOP)) || ((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], XFmode))))) { return 5; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], XFmode)))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FDIV))) { return 38 /* 0x26 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FDIV))) { return 39 /* 0x27 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_K6))) && ((mult_operator (operands[3], XFmode)) && ((get_attr_memory (insn) == MEMORY_NONE) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_type (insn) == TYPE_FDIV)) { return 56 /* 0x38 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_FOP)) || ((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], XFmode)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], XFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], XFmode))) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 13 /* 0xd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_FDIV)) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_FDIV)) { return 11 /* 0xb */; } else { return 0; } case 574: case 573: case 572: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_type (insn) == TYPE_FOP) || (mult_operator (operands[3], DFmode)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FDIV)) { return 39 /* 0x27 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FOP))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FOP))) { return 5; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_FOP))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_FOP)) || ((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], DFmode))))) { return 5; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], DFmode)))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FDIV))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FDIV))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_K6))) && ((mult_operator (operands[3], DFmode)) && ((get_attr_memory (insn) == MEMORY_NONE) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_type (insn) == TYPE_FDIV)) { return 56 /* 0x38 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_FOP)) || ((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], DFmode)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], DFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], DFmode))) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 13 /* 0xd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_FDIV)) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_FDIV)) { return 11 /* 0xb */; } else { return 0; } case 571: case 570: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_type (insn) == TYPE_FOP) || (mult_operator (operands[3], DFmode)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FDIV)) { return 39 /* 0x27 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FOP))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FOP))) { return 5; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_FOP))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_FOP)) || ((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], DFmode))))) { return 5; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], DFmode)))) { return 6; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_K6))) && ((mult_operator (operands[3], DFmode)) && ((get_attr_memory (insn) == MEMORY_NONE) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_type (insn) == TYPE_FDIV)) { return 56 /* 0x38 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_FOP)) || ((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], DFmode)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], DFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], DFmode))) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 13 /* 0xd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_FDIV)) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_FDIV)) { return 11 /* 0xb */; } else { return 0; } case 569: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_SSEADD) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_SSEADD) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_SSEADD)) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_SSEADD)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_SSEADD))) { return 5; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], SFmode))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) && (mult_operator (operands[3], SFmode))) || ((((ix86_tune) == (CPU_K8))) && (mult_operator (operands[3], SFmode)))) { return 5; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_SSEDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_SSEDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 22 /* 0x16 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_SSEDIV)) { return 20 /* 0x14 */; } else if (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_SSEDIV)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_SSEDIV))) { return 39 /* 0x27 */; } else { return 0; } case 568: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_type (insn) == TYPE_FOP) || ((which_alternative != 2) && (mult_operator (operands[3], DFmode))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FDIV)) { return 39 /* 0x27 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FOP))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FOP))) { return 5; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_FOP))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_FOP)) || ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative != 2) && (mult_operator (operands[3], DFmode)))))) { return 5; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative != 2) && (mult_operator (operands[3], DFmode))))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FDIV))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FDIV))) { return 33 /* 0x21 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative != 2) && (mult_operator (operands[3], DFmode))) && ((get_attr_memory (insn) == MEMORY_NONE) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_type (insn) == TYPE_FDIV)) { return 56 /* 0x38 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_FOP)) || ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative != 2) && (mult_operator (operands[3], DFmode))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative != 2) && (mult_operator (operands[3], DFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative != 2) && (mult_operator (operands[3], DFmode)))) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 13 /* 0xd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_FDIV)) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_FDIV)) { return 11 /* 0xb */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_SSEADD) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_SSEADD) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_SSEADD)) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_SSEADD)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_SSEADD))) { return 5; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 2) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 2) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 2) && (mult_operator (operands[3], SFmode)))) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 2) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 5; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 2) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 7; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 2) && (mult_operator (operands[3], SFmode)))) || ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 2) && (mult_operator (operands[3], SFmode))))) { return 5; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_SSEDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_SSEDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 22 /* 0x16 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_SSEDIV)) { return 20 /* 0x14 */; } else if (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_SSEDIV)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_SSEDIV))) { return 39 /* 0x27 */; } else { return 0; } case 567: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_type (insn) == TYPE_FOP) || (mult_operator (operands[3], DFmode)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FDIV)) { return 39 /* 0x27 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FOP))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FOP))) { return 5; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_FOP))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_FOP)) || ((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], DFmode))))) { return 5; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], DFmode)))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FDIV))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FDIV))) { return 33 /* 0x21 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_K6))) && ((mult_operator (operands[3], DFmode)) && ((get_attr_memory (insn) == MEMORY_NONE) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_type (insn) == TYPE_FDIV)) { return 56 /* 0x38 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_FOP)) || ((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], DFmode)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], DFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], DFmode))) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 13 /* 0xd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_FDIV)) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_FDIV)) { return 11 /* 0xb */; } else { return 0; } case 566: case 565: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_type (insn) == TYPE_FOP) || (mult_operator (operands[3], SFmode)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FDIV)) { return 39 /* 0x27 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FOP))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FOP))) { return 5; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_FOP))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_FOP)) || ((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], SFmode))))) { return 5; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], SFmode)))) { return 6; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_K6))) && ((mult_operator (operands[3], SFmode)) && ((get_attr_memory (insn) == MEMORY_NONE) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_type (insn) == TYPE_FDIV)) { return 56 /* 0x38 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_FOP)) || ((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], SFmode))) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 13 /* 0xd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_FDIV)) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_FDIV)) { return 11 /* 0xb */; } else { return 0; } case 564: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_SSEADD)) || ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_SSEADD)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], SFmode))) || ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], SFmode))))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_SSEDIV))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_SSEADD) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_SSEADD) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_SSEADD)) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_SSEADD)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_SSEADD))) { return 5; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], SFmode))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) && (mult_operator (operands[3], SFmode))) || ((((ix86_tune) == (CPU_K8))) && (mult_operator (operands[3], SFmode)))) { return 5; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_SSEDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_SSEDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 22 /* 0x16 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_SSEDIV)) { return 20 /* 0x14 */; } else if (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_SSEDIV)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_SSEDIV))) { return 39 /* 0x27 */; } else { return 0; } case 563: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_type (insn) == TYPE_FOP) || ((which_alternative != 2) && (mult_operator (operands[3], SFmode))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FDIV)) { return 39 /* 0x27 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FOP))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FOP))) { return 5; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_FOP))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_FOP)) || ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative != 2) && (mult_operator (operands[3], SFmode)))))) { return 5; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative != 2) && (mult_operator (operands[3], SFmode))))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FDIV))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FDIV))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_SSEADD)) || ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_SSEADD)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 2) && (mult_operator (operands[3], SFmode)))) || ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 2) && (mult_operator (operands[3], SFmode)))))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_SSEDIV))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative != 2) && (mult_operator (operands[3], SFmode))) && ((get_attr_memory (insn) == MEMORY_NONE) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_type (insn) == TYPE_FDIV)) { return 56 /* 0x38 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_FOP)) || ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative != 2) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative != 2) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative != 2) && (mult_operator (operands[3], SFmode)))) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 13 /* 0xd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_FDIV)) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_FDIV)) { return 11 /* 0xb */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_SSEADD) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_SSEADD) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_SSEADD)) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_SSEADD)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_SSEADD))) { return 5; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 2) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 2) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 2) && (mult_operator (operands[3], SFmode)))) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 2) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 5; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 2) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 7; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 2) && (mult_operator (operands[3], SFmode)))) || ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 2) && (mult_operator (operands[3], SFmode))))) { return 5; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_SSEDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_SSEDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 22 /* 0x16 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_SSEDIV)) { return 20 /* 0x14 */; } else if (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_SSEDIV)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_SSEDIV))) { return 39 /* 0x27 */; } else { return 0; } case 562: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_type (insn) == TYPE_FOP) || (mult_operator (operands[3], SFmode)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FDIV)) { return 39 /* 0x27 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FOP))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FOP))) { return 5; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_FOP))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_FOP)) || ((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], SFmode))))) { return 5; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], SFmode)))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FDIV))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FDIV))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_K6))) && ((mult_operator (operands[3], SFmode)) && ((get_attr_memory (insn) == MEMORY_NONE) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_type (insn) == TYPE_FDIV)) { return 56 /* 0x38 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_FOP)) || ((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], SFmode))) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 13 /* 0xd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_FDIV)) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_FDIV)) { return 11 /* 0xb */; } else { return 0; } case 561: extract_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (! (mult_operator (operands[3], XFmode))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (! (mult_operator (operands[3], XFmode))))) { return 5; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (! (mult_operator (operands[3], XFmode))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_BOTH) && (! (mult_operator (operands[3], XFmode)))) || ((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], XFmode))))) { return 5; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], XFmode)))) { return 6; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((! (mult_operator (operands[3], XFmode))) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((! (mult_operator (operands[3], XFmode))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_K6))) && ((mult_operator (operands[3], XFmode)) && ((get_attr_memory (insn) == MEMORY_NONE) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((! (mult_operator (operands[3], XFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((! (mult_operator (operands[3], XFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (! (mult_operator (operands[3], XFmode)))) || ((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], XFmode)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], XFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], XFmode))) { return 4; } else { return 0; } case 560: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((! (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((! (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (! (mult_operator (operands[3], SFmode)))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) && (! (mult_operator (operands[3], SFmode)))) || ((((ix86_tune) == (CPU_K8))) && (! (mult_operator (operands[3], SFmode))))) { return 5; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], SFmode))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) && (mult_operator (operands[3], SFmode))) || ((((ix86_tune) == (CPU_K8))) && (mult_operator (operands[3], SFmode)))) { return 5; } else { return 0; } case 559: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative != 1) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))))) { return 5; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_BOTH) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode))))) || ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 0) && (mult_operator (operands[3], SFmode)))))) { return 5; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 0) && (mult_operator (operands[3], SFmode))))) { return 6; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) || (((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 0) && (mult_operator (operands[3], SFmode))) && ((get_attr_memory (insn) == MEMORY_NONE) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode))))) || ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 0) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 0) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 0) && (mult_operator (operands[3], SFmode)))) || ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 1) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 1) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (! (mult_operator (operands[3], SFmode))))) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 1) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 5; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 1) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 7; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (! (mult_operator (operands[3], SFmode))))) || ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (! (mult_operator (operands[3], SFmode)))))) { return 5; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 1) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 1) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (mult_operator (operands[3], SFmode)))) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 1) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 5; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 1) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 7; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (mult_operator (operands[3], SFmode)))) || ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (mult_operator (operands[3], SFmode))))) { return 5; } else { return 0; } case 557: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_NONE) && (! (mult_operator (operands[3], SFmode)))) || ((get_attr_memory (insn) == MEMORY_LOAD) && (! (mult_operator (operands[3], SFmode)))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], SFmode))) || ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], SFmode))))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((! (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((! (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (! (mult_operator (operands[3], SFmode)))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) && (! (mult_operator (operands[3], SFmode)))) || ((((ix86_tune) == (CPU_K8))) && (! (mult_operator (operands[3], SFmode))))) { return 5; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], SFmode))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) && (mult_operator (operands[3], SFmode))) || ((((ix86_tune) == (CPU_K8))) && (mult_operator (operands[3], SFmode)))) { return 5; } else { return 0; } case 556: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative != 1) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))))) { return 5; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_BOTH) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode))))) || ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 0) && (mult_operator (operands[3], SFmode)))))) { return 5; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 0) && (mult_operator (operands[3], SFmode))))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 1) && (! (mult_operator (operands[3], SFmode))))) || ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 1) && (! (mult_operator (operands[3], SFmode))))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 1) && (mult_operator (operands[3], SFmode)))) || ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 1) && (mult_operator (operands[3], SFmode)))))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) || (((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 0) && (mult_operator (operands[3], SFmode))) && ((get_attr_memory (insn) == MEMORY_NONE) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode))))) || ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 0) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 0) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 0) && (mult_operator (operands[3], SFmode)))) || ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 1) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 1) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (! (mult_operator (operands[3], SFmode))))) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 1) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 5; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 1) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 7; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (! (mult_operator (operands[3], SFmode))))) || ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (! (mult_operator (operands[3], SFmode)))))) { return 5; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 1) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 1) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (mult_operator (operands[3], SFmode)))) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 1) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 5; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 1) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 7; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (mult_operator (operands[3], SFmode)))) || ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (mult_operator (operands[3], SFmode))))) { return 5; } else { return 0; } case 558: case 555: extract_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (! (mult_operator (operands[3], SFmode))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (! (mult_operator (operands[3], SFmode))))) { return 5; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (! (mult_operator (operands[3], SFmode))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_BOTH) && (! (mult_operator (operands[3], SFmode)))) || ((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], SFmode))))) { return 5; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], SFmode)))) { return 6; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((! (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((! (mult_operator (operands[3], SFmode))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_K6))) && ((mult_operator (operands[3], SFmode)) && ((get_attr_memory (insn) == MEMORY_NONE) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((! (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((! (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (! (mult_operator (operands[3], SFmode)))) || ((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], SFmode))) { return 4; } else { return 0; } case 554: case 552: if (((ix86_tune) == (CPU_PENTIUM))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) || (((ix86_tune) == (CPU_K6)))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 4; } else { return 0; } case 534: case 533: if (((ix86_tune) == (CPU_PENTIUM))) { return 1; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 4; } else if (((ix86_tune) == (CPU_K6))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 3; } else { return 0; } case 522: case 521: case 520: case 519: case 518: case 517: case 516: case 515: extract_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 10 /* 0xa */; } else { return 6; } case 527: case 514: case 513: case 512: case 511: case 510: case 509: case 498: case 497: extract_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (memory_operand (operands[0], VOIDmode)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (memory_operand (operands[0], VOIDmode))) { return 6; } else if (((ix86_tune) == (CPU_K6))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 0; } else { return 0; } case 901: case 900: case 496: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 2; } else { return 0; } case 773: case 772: case 495: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 2; } else { return 0; } case 494: case 493: extract_insn_cached (insn); if (((((ix86_tune) == (CPU_PENTIUM))) && (! (memory_operand (operands[0], VOIDmode)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) || ((((ix86_tune) == (CPU_K6))) && (! (memory_operand (operands[0], VOIDmode)))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (memory_operand (operands[0], VOIDmode))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 1; } else { return 0; } case 492: case 490: case 479: case 477: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const1_operand (operands[1], VOIDmode))) && (get_attr_memory (insn) == MEMORY_BOTH)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const1_operand (operands[1], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const1_operand (operands[1], VOIDmode))) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const1_operand (operands[1], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const1_operand (operands[1], VOIDmode))) && (get_attr_memory (insn) == MEMORY_NONE)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const1_operand (operands[1], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 491: case 489: case 488: case 487: case 486: case 485: case 484: case 483: case 482: case 481: case 480: case 478: case 476: case 475: case 474: case 473: case 472: case 471: case 470: case 469: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const1_operand (operands[2], VOIDmode))) && (get_attr_memory (insn) == MEMORY_BOTH)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const1_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const1_operand (operands[2], VOIDmode))) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const1_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const1_operand (operands[2], VOIDmode))) && (get_attr_memory (insn) == MEMORY_NONE)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const1_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 466: case 464: case 442: case 440: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const_int_operand (operands[1], VOIDmode))) && (memory_operand (operands[1], VOIDmode))) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const_int_operand (operands[1], VOIDmode)))) && (memory_operand (operands[1], VOIDmode))))) { return 3; } else if (((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const_int_operand (operands[1], VOIDmode))) && (get_attr_memory (insn) == MEMORY_NONE)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const_int_operand (operands[1], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (memory_operand (operands[1], VOIDmode)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (memory_operand (operands[1], VOIDmode)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (memory_operand (operands[1], VOIDmode))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 424: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 468: case 467: case 465: case 463: case 462: case 461: case 460: case 459: case 458: case 457: case 456: case 455: case 454: case 453: case 452: case 451: case 448: case 447: case 446: case 445: case 444: case 443: case 441: case 439: case 438: case 437: case 436: case 435: case 434: case 433: case 432: case 431: case 430: case 429: case 428: case 427: case 421: case 420: case 419: case 418: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const_int_operand (operands[2], VOIDmode))) && (get_attr_memory (insn) == MEMORY_BOTH)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const_int_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const_int_operand (operands[2], VOIDmode))) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const_int_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const_int_operand (operands[2], VOIDmode))) && (get_attr_memory (insn) == MEMORY_NONE)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const_int_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 426: case 425: case 417: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((which_alternative == 1) && (const_int_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_BOTH)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || ((which_alternative != 1) || (! (const_int_operand (operands[2], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((which_alternative == 1) && (const_int_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || ((which_alternative != 1) || (! (const_int_operand (operands[2], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((which_alternative == 1) && (const_int_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_NONE)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || ((which_alternative != 1) || (! (const_int_operand (operands[2], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (! (get_attr_memory (insn) == MEMORY_NONE)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 414: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((get_attr_type (insn) == TYPE_ALU) || (which_alternative == 2))) && (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((get_attr_type (insn) == TYPE_ALU) || (which_alternative == 2))) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((get_attr_type (insn) == TYPE_ALU) || (which_alternative == 2))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_NONE))) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 2) || (get_attr_type (insn) == TYPE_ISHIFT))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((! (get_attr_memory (insn) == MEMORY_NONE)) && (get_attr_type (insn) == TYPE_ISHIFT))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_ALU))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_ALU))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_ALU))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_ALU))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ALU) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_type (insn) == TYPE_ALU) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if (((((ix86_tune) == (CPU_K6))) && ((which_alternative == 2) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 2))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 413: case 412: if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_NONE)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_ISHIFT)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((! (get_attr_memory (insn) == MEMORY_NONE)) && (get_attr_type (insn) == TYPE_ISHIFT))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_ALU))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_ALU))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_ALU))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_ALU))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ALU) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_type (insn) == TYPE_ALU) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 411: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((((which_alternative == 1) && (! (get_attr_imm_disp (insn) == IMM_DISP_TRUE))) && (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((((which_alternative == 1) && (! (get_attr_imm_disp (insn) == IMM_DISP_TRUE))) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (((((which_alternative == 1) && (! (get_attr_imm_disp (insn) == IMM_DISP_TRUE))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_NONE))) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative != 0) || (get_attr_type (insn) == TYPE_ISHIFT))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((! (get_attr_memory (insn) == MEMORY_NONE)) && (get_attr_type (insn) == TYPE_ISHIFT))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_ALU))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_ALU))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_ALU))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_ALU))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ALU) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_type (insn) == TYPE_ALU) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if (((((ix86_tune) == (CPU_K6))) && ((which_alternative != 0) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 1))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 410: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_NONE))) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((! ((TARGET_DOUBLE_WITH_ADD) != (0))) || (! (const1_operand (operands[2], VOIDmode))))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((! (get_attr_memory (insn) == MEMORY_NONE)) && ((! ((TARGET_DOUBLE_WITH_ADD) != (0))) || (! (const1_operand (operands[2], VOIDmode)))))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (((! ((TARGET_DOUBLE_WITH_ADD) != (0))) || (! (const1_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((! ((TARGET_DOUBLE_WITH_ADD) != (0))) || (! (const1_operand (operands[2], VOIDmode)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 408: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((which_alternative != 0) || (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((which_alternative != 0) || (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((which_alternative != 0) || (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_NONE))) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative != 0) || ((! ((TARGET_DOUBLE_WITH_ADD) != (0))) || (! (const1_operand (operands[2], VOIDmode)))))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((! (get_attr_memory (insn) == MEMORY_NONE)) && ((which_alternative == 0) && ((! ((TARGET_DOUBLE_WITH_ADD) != (0))) || (! (const1_operand (operands[2], VOIDmode))))))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 0) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode)))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 0) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode)))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && ((which_alternative == 0) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode)))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && ((which_alternative == 0) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode)))))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 0) && ((! ((TARGET_DOUBLE_WITH_ADD) != (0))) || (! (const1_operand (operands[2], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 0) && ((! ((TARGET_DOUBLE_WITH_ADD) != (0))) || (! (const1_operand (operands[2], VOIDmode))))) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 0) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((((which_alternative == 0) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if (((((ix86_tune) == (CPU_K6))) && ((which_alternative != 0) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 1))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 406: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if (((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 2; } else { return 0; } case 416: case 415: case 409: case 403: if ((((ix86_tune) == (CPU_PENTIUM))) && (((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_type (insn) == TYPE_ALU)) && (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_type (insn) == TYPE_ALU)) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_type (insn) == TYPE_ALU)) && (get_attr_memory (insn) == MEMORY_NONE)) || ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_NONE))) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_ISHIFT)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((! (get_attr_memory (insn) == MEMORY_NONE)) && (get_attr_type (insn) == TYPE_ISHIFT))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_ALU))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_ALU))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_ALU))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_ALU))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ALU) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_type (insn) == TYPE_ALU) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 407: case 402: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((which_alternative != 0) || (get_attr_type (insn) == TYPE_ALU))) && (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((which_alternative != 0) || (get_attr_type (insn) == TYPE_ALU))) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((which_alternative != 0) || (get_attr_type (insn) == TYPE_ALU))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_NONE))) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative != 0) || (get_attr_type (insn) == TYPE_ISHIFT))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((! (get_attr_memory (insn) == MEMORY_NONE)) && (get_attr_type (insn) == TYPE_ISHIFT))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_ALU))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_ALU))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_ALU))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_ALU))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ALU) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_type (insn) == TYPE_ALU) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if (((((ix86_tune) == (CPU_K6))) && ((which_alternative != 0) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 1))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 391: case 390: case 389: case 388: case 387: case 386: case 376: case 375: case 374: case 373: case 372: case 371: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) || (((ix86_tune) == (CPU_PENTIUMPRO)))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if (((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))) { return 2; } else { return 0; } case 400: case 398: case 395: case 394: case 392: case 361: case 360: case 359: case 358: case 357: case 356: case 355: case 354: case 353: case 352: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (memory_operand (operands[1], VOIDmode))) { return 3; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (memory_operand (operands[1], VOIDmode))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (memory_operand (operands[1], VOIDmode)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (memory_operand (operands[1], VOIDmode)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (memory_operand (operands[1], VOIDmode)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (memory_operand (operands[1], VOIDmode))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 294: case 290: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_BOTH)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative == 2)) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative == 2)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_NONE)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative == 2)) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((1 << which_alternative) & 0x3)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((1 << which_alternative) & 0x3)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((1 << which_alternative) & 0x3)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (((1 << which_alternative) & 0x3)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 288: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x7))) && (get_attr_memory (insn) == MEMORY_BOTH)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative == 3)) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x7))) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative == 3)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x7))) && (get_attr_memory (insn) == MEMORY_NONE)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative == 3)) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 3) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 3) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((1 << which_alternative) & 0x7)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((1 << which_alternative) & 0x7)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((1 << which_alternative) & 0x7)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (((1 << which_alternative) & 0x7)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 282: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 4; } else { return 0; } case 281: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((((1 << which_alternative) & 0x5)) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((!((1 << which_alternative) & 0x5)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (((((1 << which_alternative) & 0x5)) && (get_attr_memory (insn) == MEMORY_NONE)) || ((!((1 << which_alternative) & 0x5)) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 4; } else { return 0; } case 280: case 279: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((which_alternative != 1) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (((which_alternative != 1) && (get_attr_memory (insn) == MEMORY_NONE)) || ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 4; } else { return 0; } case 278: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((!((1 << which_alternative) & 0xa)) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((((1 << which_alternative) & 0xa)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (((!((1 << which_alternative) & 0xa)) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((1 << which_alternative) & 0xa)) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 4; } else { return 0; } case 277: if (((ix86_tune) == (CPU_PENTIUM))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 17 /* 0x11 */; } else if ((((ix86_tune) == (CPU_K6))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 19 /* 0x13 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 9; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 2; } else { return 0; } case 276: case 271: if (((ix86_tune) == (CPU_PENTIUM))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 39 /* 0x27 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 17 /* 0x11 */; } else if ((((ix86_tune) == (CPU_K6))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 19 /* 0x13 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 9; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 2; } else { return 0; } case 274: case 268: if (((ix86_tune) == (CPU_PENTIUM))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 17 /* 0x11 */; } else if ((((ix86_tune) == (CPU_K6))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 19 /* 0x13 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 9; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 2; } else { return 0; } case 265: case 264: if (((ix86_tune) == (CPU_PENTIUM))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 17 /* 0x11 */; } else if ((((ix86_tune) == (CPU_K6))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 19 /* 0x13 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 9; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 2; } else { return 0; } case 263: case 262: case 260: case 259: case 257: case 255: if (((ix86_tune) == (CPU_PENTIUM))) { return 11 /* 0xb */; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 4; } else if (((ix86_tune) == (CPU_K6))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 5; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 8; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 2; } else { return 0; } case 261: case 258: case 256: case 254: if (((ix86_tune) == (CPU_PENTIUM))) { return 11 /* 0xb */; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 4; } else if (((ix86_tune) == (CPU_K6))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 5; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 8; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 7; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 2; } else { return 0; } case 253: case 252: case 251: if (((ix86_tune) == (CPU_PENTIUM))) { return 11 /* 0xb */; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 4; } else if (((ix86_tune) == (CPU_K6))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 5; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 8; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((! (((ix86_tune) == (CPU_ATHLON)))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((! (((ix86_tune) == (CPU_ATHLON)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((! (((ix86_tune) == (CPU_ATHLON)))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((! (((ix86_tune) == (CPU_ATHLON)))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 2; } else { return 0; } case 250: extract_constrain_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 11 /* 0xb */; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 4; } else if (((ix86_tune) == (CPU_K6))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 5; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 8; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative == 0) && (! (((ix86_tune) == (CPU_ATHLON))))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative != 0) || (((ix86_tune) == (CPU_ATHLON)))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative == 0) && (! (((ix86_tune) == (CPU_ATHLON))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative != 0) || (((ix86_tune) == (CPU_ATHLON)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative == 0) && (! (((ix86_tune) == (CPU_ATHLON))))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative != 0) || (((ix86_tune) == (CPU_ATHLON)))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative == 0) && (! (((ix86_tune) == (CPU_ATHLON))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative != 0) || (((ix86_tune) == (CPU_ATHLON)))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 2; } else { return 0; } case 249: case 248: extract_constrain_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 11 /* 0xb */; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 4; } else if (((ix86_tune) == (CPU_K6))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 5; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 8; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((! (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative != 1) && ((which_alternative != 2) || (! (memory_operand (operands[1], VOIDmode)))))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((((ix86_tune) == (CPU_ATHLON))) || ((which_alternative == 1) || ((which_alternative == 2) && (memory_operand (operands[1], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((! (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative != 1) && ((which_alternative != 2) || (! (memory_operand (operands[1], VOIDmode)))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((((ix86_tune) == (CPU_ATHLON))) || ((which_alternative == 1) || ((which_alternative == 2) && (memory_operand (operands[1], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((! (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative != 1) && ((which_alternative != 2) || (! (memory_operand (operands[1], VOIDmode)))))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((((ix86_tune) == (CPU_ATHLON))) || ((which_alternative == 1) || ((which_alternative == 2) && (memory_operand (operands[1], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((! (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative != 1) && ((which_alternative != 2) || (! (memory_operand (operands[1], VOIDmode)))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((((ix86_tune) == (CPU_ATHLON))) || ((which_alternative == 1) || ((which_alternative == 2) && (memory_operand (operands[1], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 2; } else { return 0; } case 247: extract_constrain_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 11 /* 0xb */; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 4; } else if (((ix86_tune) == (CPU_K6))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 5; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 8; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 7; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((! (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative != 1) && ((which_alternative != 2) || (! (memory_operand (operands[1], VOIDmode)))))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((((ix86_tune) == (CPU_ATHLON))) || ((which_alternative == 1) || ((which_alternative == 2) && (memory_operand (operands[1], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((! (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative != 1) && ((which_alternative != 2) || (! (memory_operand (operands[1], VOIDmode)))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((((ix86_tune) == (CPU_ATHLON))) || ((which_alternative == 1) || ((which_alternative == 2) && (memory_operand (operands[1], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((! (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative != 1) && ((which_alternative != 2) || (! (memory_operand (operands[1], VOIDmode)))))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((((ix86_tune) == (CPU_ATHLON))) || ((which_alternative == 1) || ((which_alternative == 2) && (memory_operand (operands[1], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((! (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative != 1) && ((which_alternative != 2) || (! (memory_operand (operands[1], VOIDmode)))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((((ix86_tune) == (CPU_ATHLON))) || ((which_alternative == 1) || ((which_alternative == 2) && (memory_operand (operands[1], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 2; } else { return 0; } case 217: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_NONE)) || ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((! (incdec_operand (operands[2], QImode))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((! (incdec_operand (operands[2], QImode))) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 215: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_NONE)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative != 3))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative != 3))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (which_alternative != 3))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative != 3) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative != 3) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if (((((ix86_tune) == (CPU_K6))) && ((which_alternative == 3) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 3))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 209: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (which_alternative == 2)) && (get_attr_memory (insn) == MEMORY_BOTH)) || (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (which_alternative == 2)) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (which_alternative == 2)) && (get_attr_memory (insn) == MEMORY_NONE)) || (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_NONE))) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative != 2))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative != 2))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (which_alternative != 2))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative != 2) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative != 2) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if (((((ix86_tune) == (CPU_K6))) && ((which_alternative == 2) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 2))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 202: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_NONE)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((which_alternative != 0) || (pic_symbolic_operand (operands[2], SImode))) || ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if (((((ix86_tune) == (CPU_K6))) && (((which_alternative != 0) || (pic_symbolic_operand (operands[2], SImode))) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative != 0) || (pic_symbolic_operand (operands[2], SImode))))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 201: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_NONE)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((which_alternative == 2) || (pic_symbolic_operand (operands[2], SImode))) || ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if (((((ix86_tune) == (CPU_K6))) && (((which_alternative == 2) || (pic_symbolic_operand (operands[2], SImode))) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 2) || (pic_symbolic_operand (operands[2], SImode))))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 196: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_NONE)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((which_alternative == 2) || (pic_symbolic_operand (operands[2], DImode))) || ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if (((((ix86_tune) == (CPU_K6))) && (((which_alternative == 2) || (pic_symbolic_operand (operands[2], DImode))) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 2) || (pic_symbolic_operand (operands[2], DImode))))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 195: case 194: case 193: case 192: case 191: case 190: case 189: case 188: case 187: case 186: if ((((ix86_tune) == (CPU_PENTIUM))) || (((ix86_tune) == (CPU_PENTIUMPRO)))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))) { return 2; } else { return 0; } case 350: case 349: case 348: case 346: case 345: case 344: case 343: case 342: case 340: case 339: case 338: case 337: case 336: case 335: case 334: case 333: case 332: case 331: case 330: case 329: case 328: case 327: case 326: case 325: case 324: case 323: case 322: case 320: case 318: case 317: case 316: case 315: case 314: case 313: case 312: case 311: case 310: case 309: case 308: case 307: case 306: case 305: case 304: case 303: case 302: case 301: case 300: case 298: case 296: case 295: case 293: case 292: case 291: case 289: case 246: case 245: case 243: case 242: case 241: case 240: case 239: case 238: case 237: case 236: case 235: case 234: case 229: case 228: case 227: case 224: case 223: case 222: case 221: case 220: case 219: case 218: case 216: case 214: case 213: case 212: case 211: case 210: case 208: case 207: case 206: case 205: case 204: case 203: case 200: case 199: case 198: case 197: case 185: case 184: case 179: if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_NONE)) || ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 233: case 232: case 231: case 230: case 226: case 183: case 182: case 181: case 180: case 178: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 176: case 175: case 174: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (which_alternative != 0) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_STORE))) || ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 13 /* 0xd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 10 /* 0xa */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 8; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))) { return 2; } else { return 0; } case 925: case 924: case 173: case 170: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 9; } else if (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 11 /* 0xb */; } else { return 0; } case 172: case 169: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (which_alternative == 1) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0)) || ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) || ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 0)))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 4; } else if (((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 0))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 3) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 9; } else if (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_NONE))) { return 11 /* 0xb */; } else { return 0; } case 786: case 785: case 167: case 164: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 9; } else if (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_NONE))) { return 11 /* 0xb */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 14 /* 0xe */; } else { return 0; } case 166: case 163: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (which_alternative == 1) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0)) || ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) || ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 0)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (!((1 << which_alternative) & 0x3))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 4; } else if (((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 0))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 3) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 3) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 9; } else if (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 3) && (get_attr_memory (insn) == MEMORY_NONE))) { return 11 /* 0xb */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x3)) && ((((1 << which_alternative) & 0x6)) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 14 /* 0xe */; } else { return 0; } case 171: case 168: case 165: case 162: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (which_alternative != 0) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))) { return 2; } else { return 0; } case 156: case 155: case 151: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 9; } else if (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 11 /* 0xb */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_NONE))) { return 14 /* 0xe */; } else { return 0; } case 790: case 789: case 150: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 9; } else if (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 11 /* 0xb */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_NONE))) { return 14 /* 0xe */; } else { return 0; } case 159: case 158: case 157: case 154: case 153: case 152: case 149: case 148: case 147: if (((ix86_tune) == (CPU_PENTIUM))) { return 3; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 5; } else if (((ix86_tune) == (CPU_K6))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 4; } else { return 0; } case 926: case 139: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 9; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 12 /* 0xc */; } else { return 0; } case 137: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 1) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_STORE))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 4; } else if (((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || ((which_alternative == 1) && ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))))) { return 2; } else if (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 0)) { return 2; } else { return 0; } case 136: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 2) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 2) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 2) && (((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_STORE))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 2) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 2) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 4; } else if (((((ix86_tune) == (CPU_K8))) && ((which_alternative == 2) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || ((which_alternative == 2) && ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))))) { return 2; } else if (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 9; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 12 /* 0xc */; } else { return 0; } case 135: case 134: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((1 << which_alternative) & 0xe)) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0)) || ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) || ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 0)))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 4; } else if (((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 0))) { return 2; } else if (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 4) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((which_alternative == 4) && ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))) { return 2; } else { return 0; } case 145: case 142: case 133: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (which_alternative != 0) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))) { return 2; } else { return 0; } case 146: case 144: case 143: case 141: case 138: case 132: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))) { return 2; } else { return 0; } case 131: case 130: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) || ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 0))) { return 1; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_STORE))) || ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 13 /* 0xd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 10 /* 0xa */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 8; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))) { return 2; } else { return 0; } case 1033: case 1032: case 1016: case 1014: case 927: case 140: case 129: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 2; } else { return 0; } case 128: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0x3)) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_STORE)) && (which_alternative == 1))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0x3)) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_NONE) && (((1 << which_alternative) & 0x3))) || ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 1))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 0))) { return 1; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 1))) || ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x3)) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 1))) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 1))) { return 13 /* 0xd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) && (which_alternative == 1))) { return 10 /* 0xa */; } else if ((((ix86_tune) == (CPU_K8))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) && (which_alternative == 1))) { return 8; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x3)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 4; } else if (((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x3)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0x3)))) { return 2; } else if (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((which_alternative == 2) && ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))) { return 2; } else { return 0; } case 114: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (which_alternative == 1)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (which_alternative == 1)) && (get_attr_memory (insn) == MEMORY_BOTH)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative != 1)) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (which_alternative == 1)) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative != 1)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (which_alternative == 1)) && (get_attr_memory (insn) == MEMORY_NONE)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative != 1)) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 1)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 1))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 1)) || ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))))) { return 0; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 3) && (memory_operand (operands[1], DFmode)))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 3) && (memory_operand (operands[1], DFmode)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((!((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 0; } else if (((((ix86_tune) == (CPU_K8))) && ((!((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_LOAD))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (!((1 << which_alternative) & 0x3)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 2)) { return 2; } else { return 0; } case 113: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (which_alternative == 1)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (which_alternative == 1)) && (get_attr_memory (insn) == MEMORY_BOTH)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative != 1)) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (which_alternative == 1)) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative != 1)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (which_alternative == 1)) && (get_attr_memory (insn) == MEMORY_NONE)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative != 1)) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 1)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 1))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 1)) || ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))))) { return 0; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 3) && (memory_operand (operands[1], DFmode)))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 3) && ((memory_operand (operands[1], DFmode)) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 3) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((!((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((!((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 3) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x3)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((which_alternative == 3) && ((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON))))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (!((1 << which_alternative) & 0x3))))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 2)) { return 2; } else { return 0; } case 112: case 111: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((1 << which_alternative) & 0x7)) { return 6; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 4) && (memory_operand (operands[1], DFmode)))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 4) && ((memory_operand (operands[1], DFmode)) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 4) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || (((which_alternative == 4) && ((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON))))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))))) { return 2; } else { return 0; } case 109: case 106: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((((which_alternative == 1) && (! (get_attr_imm_disp (insn) == IMM_DISP_TRUE))) && (get_attr_memory (insn) == MEMORY_BOTH)) || (((which_alternative != 1) || (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((which_alternative == 1) && (! (get_attr_imm_disp (insn) == IMM_DISP_TRUE))) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((which_alternative != 1) || (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && ((((which_alternative == 1) && (! (get_attr_imm_disp (insn) == IMM_DISP_TRUE))) && (get_attr_memory (insn) == MEMORY_NONE)) || (((which_alternative != 1) || (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 101: case 100: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0x7)) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (!((1 << which_alternative) & 0x7)) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_STORE))) || ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 13 /* 0xd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 10 /* 0xa */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 8; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))) { return 2; } else { return 0; } case 96: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0x7)) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((1 << which_alternative) & 0x18)) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((((get_attr_memory (insn) == MEMORY_NONE) && (((1 << which_alternative) & 0x7))) || ((get_attr_memory (insn) == MEMORY_LOAD) && (((1 << which_alternative) & 0x7)))) || ((get_attr_memory (insn) == MEMORY_STORE) && (((1 << which_alternative) & 0x7)))) || ((get_attr_memory (insn) == MEMORY_NONE) && (((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0))))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))))) || ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x7)) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 4; } else if (((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0x7)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((!((1 << which_alternative) & 0x1f)) && (memory_operand (operands[1], DFmode)))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && (((!((1 << which_alternative) & 0x1f)) && (memory_operand (operands[1], DFmode))) || (((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0)))))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0)))))) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((!((1 << which_alternative) & 0x1f)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((!((1 << which_alternative) & 0x1f)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_K8))) && (((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0)))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x1f)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_K8))) && ((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))))) || (((((ix86_tune) == (CPU_ATHLON))) && ((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (!((1 << which_alternative) & 0x1f)))))) { return 2; } else { return 0; } case 95: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0x7)) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((1 << which_alternative) & 0x18)) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((((get_attr_memory (insn) == MEMORY_NONE) && (((1 << which_alternative) & 0x7))) || ((get_attr_memory (insn) == MEMORY_LOAD) && (((1 << which_alternative) & 0x7)))) || ((get_attr_memory (insn) == MEMORY_STORE) && (((1 << which_alternative) & 0x7)))) || ((get_attr_memory (insn) == MEMORY_NONE) && (((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0))))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))))) || ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x7)) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 4; } else if (((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0x7)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((!((1 << which_alternative) & 0x1f)) && (memory_operand (operands[1], DFmode)))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && (((!((1 << which_alternative) & 0x1f)) && (memory_operand (operands[1], DFmode))) || (((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0)))))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0)))))) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((!((1 << which_alternative) & 0x1f)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((!((1 << which_alternative) & 0x1f)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_K8))) && (((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0)))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x1f)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_K8))) && ((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))))) || (((((ix86_tune) == (CPU_ATHLON))) && ((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (!((1 << which_alternative) & 0x1f)))))) { return 2; } else { return 0; } case 988: case 987: case 986: case 985: case 984: case 983: case 982: case 981: case 980: case 979: case 978: case 977: case 976: case 975: case 974: case 973: case 972: case 971: case 970: case 969: case 968: case 967: case 955: case 954: case 953: case 951: case 950: case 949: case 948: case 947: case 946: case 945: case 944: case 943: case 942: case 941: case 940: case 939: case 938: case 937: case 936: case 935: case 934: case 933: case 932: case 931: case 930: case 102: case 97: case 92: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else { return 0; } case 91: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0x7)) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((1 << which_alternative) & 0x18))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x18))) && (get_attr_memory (insn) == MEMORY_BOTH)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (!((1 << which_alternative) & 0x18))) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x18))) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (!((1 << which_alternative) & 0x18))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x18))) && (get_attr_memory (insn) == MEMORY_NONE)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (!((1 << which_alternative) & 0x18))) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((1 << which_alternative) & 0x18))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((1 << which_alternative) & 0x18)))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((((get_attr_memory (insn) == MEMORY_STORE) && (((1 << which_alternative) & 0x18))) || ((get_attr_memory (insn) == MEMORY_NONE) && (((1 << which_alternative) & 0x7)))) || ((get_attr_memory (insn) == MEMORY_LOAD) && (((1 << which_alternative) & 0x7)))) || ((get_attr_memory (insn) == MEMORY_STORE) && (((1 << which_alternative) & 0x7))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_NONE) && ((((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78))) && (((1 << which_alternative) & 0x1e0)))) || ((get_attr_memory (insn) == MEMORY_LOAD) && ((((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78))) && (((1 << which_alternative) & 0x1e0))))) || ((get_attr_memory (insn) == MEMORY_STORE) && ((((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78))) && (((1 << which_alternative) & 0x1e0)))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x18)) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x18)) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))))) { return 0; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x7)) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 4; } else if (((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0x7)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x1e0)) && (memory_operand (operands[1], DFmode)))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && (((((1 << which_alternative) & 0x1e0)) && (memory_operand (operands[1], DFmode))) || (((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0)))))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0)))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 0; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x1e0)) && ((((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78))) && (get_attr_memory (insn) == MEMORY_LOAD)))) || ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x1e0)) && ((((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78))) && (get_attr_memory (insn) == MEMORY_LOAD))))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((!((1 << which_alternative) & 0x1f)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((!((1 << which_alternative) & 0x1f)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_K8))) && (((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0)))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x1f)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_K8))) && ((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0))))))) || (((((ix86_tune) == (CPU_ATHLON))) && ((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0))))))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (!((1 << which_alternative) & 0x1f)))))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x1ff)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (!((1 << which_alternative) & 0x1ff))) { return 2; } else { return 0; } case 90: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0x7)) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((1 << which_alternative) & 0x18))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x18))) && (get_attr_memory (insn) == MEMORY_BOTH)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (!((1 << which_alternative) & 0x18))) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x18))) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (!((1 << which_alternative) & 0x18))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x18))) && (get_attr_memory (insn) == MEMORY_NONE)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (!((1 << which_alternative) & 0x18))) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((1 << which_alternative) & 0x18))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((1 << which_alternative) & 0x18)))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((((get_attr_memory (insn) == MEMORY_STORE) && (((1 << which_alternative) & 0x18))) || ((get_attr_memory (insn) == MEMORY_NONE) && (((1 << which_alternative) & 0x7)))) || ((get_attr_memory (insn) == MEMORY_LOAD) && (((1 << which_alternative) & 0x7)))) || ((get_attr_memory (insn) == MEMORY_STORE) && (((1 << which_alternative) & 0x7))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_NONE) && ((((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78))) && (((1 << which_alternative) & 0x1e0)))) || ((get_attr_memory (insn) == MEMORY_LOAD) && ((((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78))) && (((1 << which_alternative) & 0x1e0))))) || ((get_attr_memory (insn) == MEMORY_STORE) && ((((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78))) && (((1 << which_alternative) & 0x1e0)))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x18)) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x18)) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))))) { return 0; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x7)) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 4; } else if (((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0x7)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x1e0)) && (memory_operand (operands[1], DFmode)))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && (((((1 << which_alternative) & 0x1e0)) && (memory_operand (operands[1], DFmode))) || (((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0)))))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0)))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 0; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x1e0)) && ((((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78))) && (get_attr_memory (insn) == MEMORY_LOAD)))) || ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x1e0)) && ((((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78))) && (get_attr_memory (insn) == MEMORY_LOAD))))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((!((1 << which_alternative) & 0x1f)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((!((1 << which_alternative) & 0x1f)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_K8))) && (((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0)))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x1f)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_K8))) && ((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0))))))) || (((((ix86_tune) == (CPU_ATHLON))) && ((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0))))))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (!((1 << which_alternative) & 0x1f)))))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x1ff)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (!((1 << which_alternative) & 0x1ff))) { return 2; } else { return 0; } case 89: case 88: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 1) && (! (memory_operand (operands[1], VOIDmode))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((which_alternative == 1) && (! (memory_operand (operands[1], VOIDmode)))) && ((which_alternative == 1) && (memory_operand (operands[1], VOIDmode)))) || (((which_alternative != 1) || (memory_operand (operands[1], VOIDmode))) && ((which_alternative == 1) && (memory_operand (operands[1], VOIDmode)))))) { return 3; } else if (which_alternative != 1) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (memory_operand (operands[1], VOIDmode)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (memory_operand (operands[1], VOIDmode))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (memory_operand (operands[1], VOIDmode))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))) { return 2; } else { return 0; } case 84: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode)))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_NONE)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 1; } else if (which_alternative == 4) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))))) { return 4; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_STORE) && ((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode)))))) || ((get_attr_memory (insn) == MEMORY_NONE) && ((!((1 << which_alternative) & 0x7f0)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], DImode))))))) || ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode)))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))))) { return 0; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && ((get_attr_memory (insn) == MEMORY_BOTH) || (which_alternative == 4)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if (((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0x7f0)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], DImode)))) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x7f0)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], DImode)))))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x700)) && (memory_operand (operands[1], DFmode)))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && (((((1 << which_alternative) & 0x700)) && (memory_operand (operands[1], DFmode))) || ((which_alternative == 8) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 8) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 0; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x700)) && ((!((1 << which_alternative) & 0x111)) && (get_attr_memory (insn) == MEMORY_LOAD)))) || ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x700)) && ((!((1 << which_alternative) & 0x111)) && (get_attr_memory (insn) == MEMORY_LOAD))))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 8) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x7e0)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_K8))) && (which_alternative == 8)) || (((((ix86_tune) == (CPU_ATHLON))) && (which_alternative == 8)) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0x7e0)))))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0xe0)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0xe0))) { return 2; } else { return 0; } case 83: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode)))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_NONE)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 1; } else if (which_alternative == 4) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))))) { return 4; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_STORE) && ((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode)))))) || ((get_attr_memory (insn) == MEMORY_NONE) && ((!((1 << which_alternative) & 0x7f0)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], DImode))))))) || ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode)))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))))) { return 0; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && ((get_attr_memory (insn) == MEMORY_BOTH) || (which_alternative == 4)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if (((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0x7f0)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], DImode)))) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x7f0)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], DImode)))))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x700)) && (memory_operand (operands[1], DFmode)))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && (((((1 << which_alternative) & 0x700)) && (memory_operand (operands[1], DFmode))) || ((which_alternative == 8) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 8) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 0; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x700)) && ((!((1 << which_alternative) & 0x111)) && (get_attr_memory (insn) == MEMORY_LOAD)))) || ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x700)) && ((!((1 << which_alternative) & 0x111)) && (get_attr_memory (insn) == MEMORY_LOAD))))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 8) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x7e0)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_K8))) && (which_alternative == 8)) || (((((ix86_tune) == (CPU_ATHLON))) && (which_alternative == 8)) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0x7e0)))))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0xe0)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0xe0))) { return 2; } else { return 0; } case 82: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((1 << which_alternative) & 0x3)) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((1 << which_alternative) & 0xc)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((1 << which_alternative) & 0xc)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((1 << which_alternative) & 0xc)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (((1 << which_alternative) & 0xc)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((!((1 << which_alternative) & 0xf)) && (memory_operand (operands[1], DFmode)))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && (((!((1 << which_alternative) & 0xf)) && (memory_operand (operands[1], DFmode))) || ((which_alternative == 5) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 5) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 0; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((!((1 << which_alternative) & 0xf)) && ((which_alternative != 5) && (get_attr_memory (insn) == MEMORY_LOAD)))) || ((((ix86_tune) == (CPU_K8))) && ((!((1 << which_alternative) & 0xf)) && ((which_alternative != 5) && (get_attr_memory (insn) == MEMORY_LOAD))))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((!((1 << which_alternative) & 0xf)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((!((1 << which_alternative) & 0xf)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 5) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0xf)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_K8))) && (which_alternative == 5)) || (((((ix86_tune) == (CPU_ATHLON))) && (which_alternative == 5)) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (!((1 << which_alternative) & 0xf)))))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0xc)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0xc))) { return 2; } else { return 0; } case 76: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && (! (memory_operand (operands[1], VOIDmode))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((which_alternative == 0) && (! (memory_operand (operands[1], VOIDmode)))) && ((which_alternative == 0) && (memory_operand (operands[1], VOIDmode)))) || (((which_alternative != 0) || (memory_operand (operands[1], VOIDmode))) && ((which_alternative == 0) && (memory_operand (operands[1], VOIDmode)))))) { return 3; } else if (which_alternative != 0) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (memory_operand (operands[1], VOIDmode)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (memory_operand (operands[1], VOIDmode))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (memory_operand (operands[1], VOIDmode))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))) { return 2; } else { return 0; } case 71: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0))))) && (get_attr_memory (insn) == MEMORY_BOTH)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || ((! (q_regs_operand (operands[0], QImode))) || ((TARGET_MOVX) != (0)))) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0))))) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || ((! (q_regs_operand (operands[0], QImode))) || ((TARGET_MOVX) != (0)))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0))))) && (get_attr_memory (insn) == MEMORY_NONE)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || ((! (q_regs_operand (operands[0], QImode))) || ((TARGET_MOVX) != (0)))) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0))))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0)))))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_STORE) && ((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0))))) || ((get_attr_memory (insn) == MEMORY_NONE) && ((! (q_regs_operand (operands[0], QImode))) || ((TARGET_MOVX) != (0)))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((! (q_regs_operand (operands[0], QImode))) || ((TARGET_MOVX) != (0))))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (((! (q_regs_operand (operands[0], QImode))) || ((TARGET_MOVX) != (0))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((! (q_regs_operand (operands[0], QImode))) || ((TARGET_MOVX) != (0))) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0)))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0)))) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))))) { return 0; } else if ((((ix86_tune) == (CPU_K6))) && (((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0)))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0)))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0)))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 70: case 66: case 65: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_IMOV)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_type (insn) == TYPE_IMOV)) && (get_attr_memory (insn) == MEMORY_BOTH)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (get_attr_type (insn) == TYPE_IMOV))) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_type (insn) == TYPE_IMOV)) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (get_attr_type (insn) == TYPE_IMOV))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_type (insn) == TYPE_IMOV)) && (get_attr_memory (insn) == MEMORY_NONE)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (get_attr_type (insn) == TYPE_IMOV))) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_IMOV)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_IMOV))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_IMOV)) || ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_IMOVX)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_IMOVX))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_IMOVX) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_IMOVX) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_IMOV) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_IMOV) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))))) { return 0; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_IMOV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_IMOV) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_IMOV) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_type (insn) == TYPE_IMOV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_type (insn) == TYPE_IMOV) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 125: case 124: case 123: case 122: case 121: case 120: case 119: case 118: case 116: case 115: case 110: case 107: case 104: case 69: case 64: case 63: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 59: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2))))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && ((((optimize_size) != (0)) || ((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0))))) || ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && (get_attr_memory (insn) == MEMORY_BOTH)) || (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2))))) && ((! ((optimize_size) != (0))) && (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2))))))) && (get_attr_memory (insn) == MEMORY_BOTH))) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || ((! ((optimize_size) != (0))) && (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2))))))) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && ((((optimize_size) != (0)) || ((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0))))) || ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2))))) && ((! ((optimize_size) != (0))) && (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2))))))) && (get_attr_memory (insn) == MEMORY_LOAD))) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || ((! ((optimize_size) != (0))) && (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2))))))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && ((((optimize_size) != (0)) || ((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0))))) || ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && (get_attr_memory (insn) == MEMORY_NONE)) || (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2))))) && ((! ((optimize_size) != (0))) && (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2))))))) && (get_attr_memory (insn) == MEMORY_NONE))) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || ((! ((optimize_size) != (0))) && (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2))))))) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2))))))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_STORE) && (((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2))))))) || ((get_attr_memory (insn) == MEMORY_NONE) && (((! ((optimize_size) != (0))) && ((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0)))))) && ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2))))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((! ((optimize_size) != (0))) && ((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0)))))) && ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2)))))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((((! ((optimize_size) != (0))) && ((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0)))))) && ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2)))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((((! ((optimize_size) != (0))) && ((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0)))))) && ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))))) { return 0; } else if ((((ix86_tune) == (CPU_K6))) && ((((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 74: case 73: case 72: case 61: case 55: extract_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 4; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) || ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode)))) { return 0; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 60: case 54: case 53: extract_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 4; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) || ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode)))) { return 0; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 50: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_IMOV)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_BOTH))) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (get_attr_type (insn) == TYPE_IMOV))) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_LOAD))) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (get_attr_type (insn) == TYPE_IMOV))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && ((((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_NONE)) || ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_NONE))) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (get_attr_type (insn) == TYPE_IMOV))) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_IMOV)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_IMOV))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_IMOV)) || ((get_attr_memory (insn) == MEMORY_NONE) && (((! ((optimize_size) != (0))) && (((which_alternative != 0) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_HIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x6)) || (! (aligned_operand (operands[1], HImode)))))) && (((TARGET_MOVX) != (0)) && (((1 << which_alternative) & 0x5))))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((! ((optimize_size) != (0))) && (((which_alternative != 0) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_HIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x6)) || (! (aligned_operand (operands[1], HImode)))))) && (((TARGET_MOVX) != (0)) && (((1 << which_alternative) & 0x5)))))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((((! ((optimize_size) != (0))) && (((which_alternative != 0) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_HIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x6)) || (! (aligned_operand (operands[1], HImode)))))) && (((TARGET_MOVX) != (0)) && (((1 << which_alternative) & 0x5)))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((((! ((optimize_size) != (0))) && (((which_alternative != 0) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_HIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x6)) || (! (aligned_operand (operands[1], HImode)))))) && (((TARGET_MOVX) != (0)) && (((1 << which_alternative) & 0x5)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_IMOV) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_IMOV) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))))) { return 0; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_IMOV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_IMOV) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_IMOV) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_type (insn) == TYPE_IMOV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_type (insn) == TYPE_IMOV) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 87: case 47: extract_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 4; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) || ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode)))) { return 0; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if (((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 2; } else { return 0; } case 553: case 551: case 86: case 68: case 52: case 46: if (((ix86_tune) == (CPU_PENTIUM))) { return 1; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 4; } else if (((ix86_tune) == (CPU_K6))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 3; } else { return 0; } case 85: case 67: case 51: case 45: if ((((ix86_tune) == (CPU_PENTIUM))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) || (((ix86_tune) == (CPU_K6))))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 1; } else { return 0; } case 44: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode)))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_NONE)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode)))))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))))) { return 4; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_STORE) && ((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode)))))) || ((get_attr_memory (insn) == MEMORY_NONE) && ((!((1 << which_alternative) & 0xfc)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], SImode))))))) || ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode)))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))))) { return 0; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if (((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0xfc)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], SImode)))) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0xfc)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], SImode)))))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0xe0)) && (memory_operand (operands[1], DFmode)))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && (((((1 << which_alternative) & 0xe0)) && (memory_operand (operands[1], DFmode))) || ((which_alternative == 5) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 5) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 5) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0xfc)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_K8))) && (which_alternative == 5)) || (((((ix86_tune) == (CPU_ATHLON))) && (which_alternative == 5)) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0xfc)))))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x1c)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0x1c))) { return 2; } else { return 0; } case 43: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode)))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_NONE)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode)))))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))))) { return 4; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_STORE) && ((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode)))))) || ((get_attr_memory (insn) == MEMORY_NONE) && ((!((1 << which_alternative) & 0xfc)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], SImode))))))) || ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode)))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))))) { return 0; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if (((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0xfc)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], SImode)))) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0xfc)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], SImode)))))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0xe0)) && (memory_operand (operands[1], DFmode)))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && (((((1 << which_alternative) & 0xe0)) && (memory_operand (operands[1], DFmode))) || ((which_alternative == 5) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 5) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 0; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 5) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0xfc)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_K8))) && (which_alternative == 5)) || (((((ix86_tune) == (CPU_ATHLON))) && (which_alternative == 5)) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0xfc)))))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x1c)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0x1c))) { return 2; } else { return 0; } case 401: case 399: case 397: case 396: case 393: case 347: case 341: case 321: case 319: case 299: case 297: case 244: case 108: case 105: case 103: case 81: case 80: case 62: case 56: case 42: case 41: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (memory_operand (operands[1], VOIDmode))) || ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (memory_operand (operands[1], VOIDmode))))) { return 3; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_NONE)) || ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (memory_operand (operands[1], VOIDmode))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (memory_operand (operands[1], VOIDmode)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (memory_operand (operands[1], VOIDmode)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (memory_operand (operands[1], VOIDmode)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (memory_operand (operands[1], VOIDmode))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else { return 0; } case 79: case 78: case 40: case 39: extract_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (memory_operand (operands[0], VOIDmode)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (memory_operand (operands[0], VOIDmode))) { return 4; } else if (((ix86_tune) == (CPU_K6))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 4; } else { return 0; } case 77: case 58: case 57: case 49: case 48: case 38: case 37: case 36: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (! (memory_operand (operands[1], VOIDmode)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (memory_operand (operands[1], VOIDmode))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (memory_operand (operands[1], VOIDmode)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (memory_operand (operands[1], VOIDmode))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (memory_operand (operands[1], VOIDmode))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))) { return 2; } else { return 0; } case 35: case 32: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_NONE) && (GET_MODE (operands[1]) == SFmode)) || ((get_attr_memory (insn) == MEMORY_LOAD) && (GET_MODE (operands[1]) == SFmode))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else { return 0; } case 34: case 31: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 1) && (GET_MODE (operands[1]) == SFmode))) || ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 1) && (GET_MODE (operands[1]) == SFmode))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 5; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 0)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 0)) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else { return 0; } case 33: case 30: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 5; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 3; } else { return 0; } case 25: case 23: case 21: case 20: case 19: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 2; } else { return 0; } case 285: case 284: case 283: case 17: case 16: case 15: case 14: case 13: case 12: case 11: case 10: case 9: case 8: case 7: case 6: case 5: case 4: case 3: case 2: case 1: case 0: if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_NONE)) || ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_NONE)))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 4; } else { return 0; } case -1: if (GET_CODE (PATTERN (insn)) != ASM_INPUT && asm_noperands (PATTERN (insn)) < 0) fatal_insn_not_found (insn); default: return 6; } } #if AUTOMATON_ALTS int insn_alts (rtx insn ATTRIBUTE_UNUSED) { switch (recog_memoized (insn)) { case 1015: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 0)) { return 3; } else { return 0; } case 1009: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (memory_operand (operands[1], DFmode)))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (memory_operand (operands[1], DFmode)))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 9; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && (which_alternative == 1)) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (which_alternative == 1)) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 1)) { return 6; } else { return 0; } case 1008: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (memory_operand (operands[1], DFmode)))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (memory_operand (operands[1], DFmode)))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 9; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 6; } else if ((which_alternative == 1) && (((ix86_tune) == (CPU_K8)))) { return 12 /* 0xc */; } else if ((which_alternative == 1) && (((ix86_tune) == (CPU_ATHLON)))) { return 4; } else if ((which_alternative == 1) && ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))) { return 6; } else { return 0; } case 1012: case 1011: case 1010: case 1004: case 1002: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && (memory_operand (operands[1], DFmode))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 9; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 6; } else if (((ix86_tune) == (CPU_K8))) { return 12 /* 0xc */; } else if (((ix86_tune) == (CPU_ATHLON))) { return 4; } else { return 0; } case 966: case 965: case 964: case 963: case 962: case 961: if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 3; } else { return 0; } case 952: extract_constrain_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((optimize_size) != (0))) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && (memory_operand (operands[1], DFmode))) { return 6; } else if (((ix86_tune) == (CPU_K8))) { return 12 /* 0xc */; } else if (((ix86_tune) == (CPU_ATHLON))) { return 4; } else { return 0; } case 903: case 902: if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else { return 0; } case 899: case 898: if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if (((ix86_tune) == (CPU_ATHLON))) { return 1; } else if (((ix86_tune) == (CPU_K8))) { return 3; } else { return 0; } case 895: case 893: case 891: case 889: case 887: case 885: if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 3; } else { return 0; } case 881: case 880: if (((ix86_tune) == (CPU_PENTIUM))) { return 1; } else { return 0; } case 879: case 878: case 870: case 869: case 865: case 864: if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 6; } else { return 0; } case 1020: case 1019: case 1018: case 852: if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 1; } else { return 0; } case 851: if (((ix86_tune) == (CPU_K6))) { return 2; } else { return 0; } case 850: if (((ix86_tune) == (CPU_PENTIUM))) { return 1; } else if (((ix86_tune) == (CPU_K6))) { return 2; } else { return 0; } case 849: if ((((ix86_tune) == (CPU_PENTIUMPRO))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))) { return 6; } else { return 0; } case 859: case 858: case 857: case 827: case 826: case 825: case 824: case 823: case 822: if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 6; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 6; } else { return 0; } case 871: case 842: case 841: case 840: case 839: case 838: case 837: case 836: case 835: case 834: case 833: case 832: case 818: case 817: case 816: if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 6; } else { return 0; } case 883: case 882: case 863: case 813: case 812: if (((ix86_tune) == (CPU_PENTIUM))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))) { return 6; } else { return 0; } case 877: case 862: case 810: case 809: case 808: case 807: if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 3; } else { return 0; } case 876: case 875: case 874: case 873: case 872: case 868: case 867: case 866: case 861: case 860: case 856: case 855: case 854: case 831: case 830: case 829: case 828: case 815: case 814: case 811: case 806: case 805: case 804: case 803: case 802: case 801: case 800: case 799: case 798: case 797: case 796: case 795: case 794: case 793: case 792: case 791: if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 6; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_STORE)) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 6; } else { return 0; } case 923: case 921: case 788: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else { return 0; } case 922: case 920: case 787: extract_constrain_insn_cached (insn); if (((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) || (((ix86_tune) == (CPU_PENTIUMPRO)))) { return 1; } else if (((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else { return 0; } case 775: case 774: if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 1; } else if (((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else { return 0; } case 771: case 770: if (((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_NONE))) || (((ix86_tune) == (CPU_PENTIUMPRO)))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if (((ix86_tune) == (CPU_ATHLON))) { return 1; } else if (((ix86_tune) == (CPU_K8))) { return 3; } else { return 0; } case 769: if (((ix86_tune) == (CPU_PENTIUM))) { return 1; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 3; } else if (((ix86_tune) == (CPU_ATHLON))) { return 1; } else if (((ix86_tune) == (CPU_K8))) { return 3; } else { return 0; } case 768: extract_constrain_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) { return 3; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || (((ix86_tune) == (CPU_ATHLON)))) { return 1; } else if (((ix86_tune) == (CPU_K8))) { return 3; } else { return 0; } case 1028: case 1026: case 1024: case 894: case 892: case 890: case 888: case 886: case 884: case 767: case 766: case 765: case 764: case 763: case 762: case 761: case 760: case 759: case 758: case 757: case 756: if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if (((ix86_tune) == (CPU_ATHLON))) { return 1; } else if (((ix86_tune) == (CPU_K8))) { return 3; } else { return 0; } case 755: case 754: case 753: case 752: if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if (((ix86_tune) == (CPU_ATHLON))) { return 1; } else if (((ix86_tune) == (CPU_K8))) { return 3; } else { return 0; } case 1030: case 1029: case 780: case 778: case 750: case 748: case 746: if (((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) || (((ix86_tune) == (CPU_PENTIUMPRO)))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else { return 0; } case 745: if (((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 3; } else { return 0; } case 744: if (((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if (((ix86_tune) == (CPU_ATHLON))) { return 1; } else if (((ix86_tune) == (CPU_K8))) { return 3; } else { return 0; } case 743: case 741: case 739: if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 3; } else { return 0; } case 1027: case 1025: case 1023: case 742: case 740: case 738: if (((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if (((ix86_tune) == (CPU_ATHLON))) { return 1; } else if (((ix86_tune) == (CPU_K8))) { return 3; } else { return 0; } case 736: case 735: case 734: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && (memory_operand (operands[1], DFmode))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 18 /* 0x12 */; } else if (((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))))) { return 6; } else { return 0; } case 928: case 783: case 782: case 777: case 776: case 737: case 733: case 732: case 731: case 730: case 725: case 724: case 723: if (((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else { return 0; } case 721: extract_constrain_insn_cached (insn); if (((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) || (((1 << which_alternative) & 0x3))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_LOAD) && (((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0)))))) || ((get_attr_memory (insn) == MEMORY_STORE) && (((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0)))))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && (memory_operand (operands[1], DFmode))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0xc)) && (! ((optimize_size) != (0)))) || ((which_alternative == 4) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0xc)) && (! ((optimize_size) != (0)))) || ((which_alternative == 4) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 9; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0xc)) && (! ((optimize_size) != (0)))) || ((which_alternative == 4) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0xc)) && (! ((optimize_size) != (0)))) || ((which_alternative == 4) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && ((((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0xc)) && (! ((optimize_size) != (0)))) || ((which_alternative == 4) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0)))))))) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0xc)) && (! ((optimize_size) != (0)))) || ((which_alternative == 4) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0)))))))) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 6; } else { return 0; } case 720: extract_constrain_insn_cached (insn); if (((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) || (((1 << which_alternative) & 0x3))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_LOAD) && (((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0)))))) || ((get_attr_memory (insn) == MEMORY_STORE) && (((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0)))))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && (memory_operand (operands[1], DFmode))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0xc)) && (! ((optimize_size) != (0)))) || ((which_alternative == 4) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0xc)) && (! ((optimize_size) != (0)))) || ((which_alternative == 4) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 9; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0xc)) && (! ((optimize_size) != (0)))) || ((which_alternative == 4) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0xc)) && (! ((optimize_size) != (0)))) || ((which_alternative == 4) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && ((((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0xc)) && (! ((optimize_size) != (0)))) || ((which_alternative == 4) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0)))))))) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0xc)) && (! ((optimize_size) != (0)))) || ((which_alternative == 4) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0)))))))) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 6; } else { return 0; } case 719: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && ((optimize_size) != (0)))))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_LOAD) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && ((optimize_size) != (0))))) || ((get_attr_memory (insn) == MEMORY_STORE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && ((optimize_size) != (0))))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && (memory_operand (operands[1], DFmode))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && ((optimize_size) != (0)))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && (! ((optimize_size) != (0)))) || (!((1 << which_alternative) & 0x7))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && ((optimize_size) != (0)))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && (! ((optimize_size) != (0)))) || (!((1 << which_alternative) & 0x7))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 9; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && ((optimize_size) != (0)))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && (! ((optimize_size) != (0)))) || (!((1 << which_alternative) & 0x7))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && ((optimize_size) != (0)))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && (! ((optimize_size) != (0)))) || (!((1 << which_alternative) & 0x7))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && ((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && ((optimize_size) != (0)))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && (! ((optimize_size) != (0)))) || (!((1 << which_alternative) & 0x7)))))) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && ((optimize_size) != (0)))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && (! ((optimize_size) != (0)))) || (!((1 << which_alternative) & 0x7)))))) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 6; } else { return 0; } case 707: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_LOAD) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0)))))) || ((get_attr_memory (insn) == MEMORY_STORE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0)))))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && (memory_operand (operands[1], DFmode))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 9; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && ((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7)))))) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7)))))) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 6; } else { return 0; } case 706: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_LOAD) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0)))))) || ((get_attr_memory (insn) == MEMORY_STORE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0)))))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && (memory_operand (operands[1], DFmode))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 9; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && ((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7)))))) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7)))))) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 6; } else { return 0; } case 705: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_LOAD) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0)))))) || ((get_attr_memory (insn) == MEMORY_STORE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0)))))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && (memory_operand (operands[1], DFmode))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 9; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && ((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7)))))) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7)))))) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 6; } else { return 0; } case 848: case 847: case 846: case 845: case 844: case 843: case 821: case 820: case 819: case 727: case 726: case 704: case 703: if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 6; } else { return 0; } case 729: case 702: case 701: if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 18 /* 0x12 */; } else if (((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))))) { return 6; } else { return 0; } case 700: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_LOAD) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0)))))) || ((get_attr_memory (insn) == MEMORY_STORE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0)))))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && (memory_operand (operands[1], DFmode))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 9; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && ((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7)))))) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7)))))) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 6; } else { return 0; } case 699: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_LOAD) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0)))))) || ((get_attr_memory (insn) == MEMORY_STORE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0)))))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && (memory_operand (operands[1], DFmode))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 9; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && ((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7)))))) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7)))))) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 6; } else { return 0; } case 728: case 722: case 698: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && (memory_operand (operands[1], DFmode))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 9; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 6; } else if (((ix86_tune) == (CPU_K8))) { return 12 /* 0xc */; } else if (((ix86_tune) == (CPU_ATHLON))) { return 4; } else { return 0; } case 695: case 694: case 693: case 692: case 691: case 690: case 689: case 688: case 687: extract_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 2; } else { return 1; } case 672: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0)) || ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 0))) || ((get_attr_memory (insn) == MEMORY_BOTH) && (which_alternative == 0)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((which_alternative == 1) && (((ix86_tune) == (CPU_K6)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((which_alternative == 1) && ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 671: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 1) && (const0_operand (operands[2], DImode)))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 1) && (const0_operand (operands[2], DImode))))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 1) && (const0_operand (operands[2], DImode))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && ((which_alternative == 1) && (const0_operand (operands[2], DImode))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 1) && (! (const0_operand (operands[2], DImode)))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0)) || ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 0))) || ((get_attr_memory (insn) == MEMORY_BOTH) && (which_alternative == 0)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 1) && (const0_operand (operands[2], DImode))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((((which_alternative == 1) && (const0_operand (operands[2], DImode))) && (((((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_STORE)) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && (! (const0_operand (operands[2], DImode))))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative == 1) && (! (const0_operand (operands[2], DImode)))) || (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (((which_alternative == 1) && (const0_operand (operands[2], DImode))) && (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 670: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 1) && (const0_operand (operands[2], SImode)))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 1) && (const0_operand (operands[2], SImode))))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 1) && (const0_operand (operands[2], SImode))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && ((which_alternative == 1) && (const0_operand (operands[2], SImode))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 1) && (! (const0_operand (operands[2], SImode)))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0)) || ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 0))) || ((get_attr_memory (insn) == MEMORY_BOTH) && (which_alternative == 0)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 1) && (const0_operand (operands[2], SImode))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((((which_alternative == 1) && (const0_operand (operands[2], SImode))) && (((((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_STORE)) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && (! (const0_operand (operands[2], SImode))))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative == 1) && (! (const0_operand (operands[2], SImode)))) || (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (((which_alternative == 1) && (const0_operand (operands[2], SImode))) && (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 657: if ((((ix86_tune) == (CPU_PENTIUM))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))))) { return 1; } else { return 0; } case 655: extract_constrain_insn_cached (insn); if (((((ix86_tune) == (CPU_PENTIUM))) && (((1 << which_alternative) & 0x3))) || ((!((1 << which_alternative) & 0x3)) || (((((ix86_tune) == (CPU_PENTIUMPRO))) && (((1 << which_alternative) & 0x3))) || (((((ix86_tune) == (CPU_ATHLON))) && (((1 << which_alternative) & 0x3))) || ((((ix86_tune) == (CPU_K8))) && (((1 << which_alternative) & 0x3))))))) { return 1; } else { return 0; } case 656: case 654: extract_constrain_insn_cached (insn); if (((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((1 << which_alternative) & 0x3)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (!((1 << which_alternative) & 0x3)))) { return 6; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_LOAD) && (!((1 << which_alternative) & 0x3))) || ((get_attr_memory (insn) == MEMORY_STORE) && (!((1 << which_alternative) & 0x3)))) || ((get_attr_memory (insn) == MEMORY_BOTH) && (!((1 << which_alternative) & 0x3))))) || ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((1 << which_alternative) & 0x3))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_K8))) && (((1 << which_alternative) & 0x3))) { return 1; } else { return 0; } case 653: case 652: case 651: case 649: if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 6; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_STORE)) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 650: case 648: if (((ix86_tune) == (CPU_PENTIUM))) { return 1; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 6; } else if (((ix86_tune) == (CPU_K6))) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 3; } else { return 0; } case 617: if ((((ix86_tune) == (CPU_PENTIUM))) || (((ix86_tune) == (CPU_PENTIUMPRO)))) { return 1; } else if (((ix86_tune) == (CPU_K6))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 1; } else { return 0; } case 1031: case 1017: case 1013: case 1007: case 1006: case 1005: case 1003: case 1001: case 1000: case 999: case 998: case 997: case 996: case 995: case 994: case 993: case 992: case 991: case 990: case 989: case 960: case 959: case 958: case 957: case 956: case 929: case 919: case 918: case 917: case 916: case 915: case 914: case 913: case 912: case 911: case 910: case 909: case 908: case 907: case 906: case 905: case 904: case 896: case 784: case 669: case 663: case 585: if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else { return 0; } case 584: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((((which_alternative != 1) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 1; } else if (((((ix86_tune) == (CPU_K6))) && ((which_alternative != 1) || (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 0)) { return 1; } else { return 0; } case 616: case 615: case 614: case 613: case 612: case 611: case 610: case 609: case 608: case 607: case 606: case 605: case 604: case 603: case 602: case 601: case 600: case 599: case 598: case 597: case 596: case 595: case 594: case 593: case 592: case 591: case 590: case 589: case 588: case 587: case 586: case 583: if (((ix86_tune) == (CPU_PENTIUM))) { return 1; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 3; } else if (((ix86_tune) == (CPU_K6))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 1; } else { return 0; } case 897: case 781: case 779: case 751: case 749: case 747: case 666: case 660: case 582: if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else { return 0; } case 581: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((((which_alternative != 1) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 1; } else if ((which_alternative == 1) && (((ix86_tune) == (CPU_PENTIUMPRO)))) { return 3; } else if (((((ix86_tune) == (CPU_K6))) && ((which_alternative != 1) || (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 0)) { return 1; } else { return 0; } case 577: case 576: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FOP)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (mult_operator (operands[3], XFmode))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((get_attr_type (insn) == TYPE_FDIV) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FOP))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FOP)) || ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_FOP))) || ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_FOP))) || ((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], XFmode)))) || ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], XFmode))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((((get_attr_type (insn) == TYPE_FOP) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE))) || ((mult_operator (operands[3], XFmode)) && (get_attr_memory (insn) == MEMORY_NONE))) || ((mult_operator (operands[3], XFmode)) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (get_attr_type (insn) == TYPE_FDIV))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 24 /* 0x18 */; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) || (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_FOP)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], XFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], XFmode)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], XFmode))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_FDIV)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_FDIV))) { return 3; } else { return 0; } case 580: case 579: case 578: case 575: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FOP)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (mult_operator (operands[3], XFmode))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((get_attr_type (insn) == TYPE_FDIV) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FOP))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FOP)) || ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_FOP))) || ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_FOP))) || ((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], XFmode)))) || ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], XFmode))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FDIV))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FDIV))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((((get_attr_type (insn) == TYPE_FOP) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE))) || ((mult_operator (operands[3], XFmode)) && (get_attr_memory (insn) == MEMORY_NONE))) || ((mult_operator (operands[3], XFmode)) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (get_attr_type (insn) == TYPE_FDIV))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 24 /* 0x18 */; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) || (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_FOP)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], XFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], XFmode)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], XFmode))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_FDIV)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_FDIV))) { return 3; } else { return 0; } case 571: case 570: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FOP)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (mult_operator (operands[3], DFmode))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((get_attr_type (insn) == TYPE_FDIV) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FOP))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FOP)) || ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_FOP))) || ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_FOP))) || ((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], DFmode)))) || ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], DFmode))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((((get_attr_type (insn) == TYPE_FOP) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE))) || ((mult_operator (operands[3], DFmode)) && (get_attr_memory (insn) == MEMORY_NONE))) || ((mult_operator (operands[3], DFmode)) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (get_attr_type (insn) == TYPE_FDIV))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 24 /* 0x18 */; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) || (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_FOP)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], DFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], DFmode)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], DFmode))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_FDIV)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_FDIV))) { return 3; } else { return 0; } case 569: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 24 /* 0x18 */; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) || (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_SSEADD) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_SSEADD) && (get_attr_memory (insn) == MEMORY_LOAD))))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_SSEADD)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], SFmode))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_SSEDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_SSEDIV) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_SSEDIV)) { return 3; } else { return 0; } case 568: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FOP)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative != 2) && (mult_operator (operands[3], DFmode)))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((get_attr_type (insn) == TYPE_FDIV) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FOP))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FOP)) || ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_FOP))) || ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_FOP))) || ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative != 2) && (mult_operator (operands[3], DFmode))))) || ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative != 2) && (mult_operator (operands[3], DFmode)))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FDIV))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FDIV))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((((get_attr_type (insn) == TYPE_FOP) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE))) || (((which_alternative != 2) && (mult_operator (operands[3], DFmode))) && (get_attr_memory (insn) == MEMORY_NONE))) || (((which_alternative != 2) && (mult_operator (operands[3], DFmode))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (get_attr_type (insn) == TYPE_FDIV))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 24 /* 0x18 */; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) || (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_FOP)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative != 2) && (mult_operator (operands[3], DFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && (((which_alternative != 2) && (mult_operator (operands[3], DFmode))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative != 2) && (mult_operator (operands[3], DFmode)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_FDIV)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_FDIV))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_SSEADD) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_SSEADD) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_SSEADD)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 2) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 2) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 2) && (mult_operator (operands[3], SFmode)))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 2) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 2) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 2) && (mult_operator (operands[3], SFmode)))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 2) && (mult_operator (operands[3], SFmode)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_SSEDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_SSEDIV) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_SSEDIV)) { return 3; } else { return 0; } case 574: case 573: case 572: case 567: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FOP)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (mult_operator (operands[3], DFmode))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((get_attr_type (insn) == TYPE_FDIV) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FOP))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FOP)) || ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_FOP))) || ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_FOP))) || ((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], DFmode)))) || ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], DFmode))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FDIV))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FDIV))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((((get_attr_type (insn) == TYPE_FOP) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE))) || ((mult_operator (operands[3], DFmode)) && (get_attr_memory (insn) == MEMORY_NONE))) || ((mult_operator (operands[3], DFmode)) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (get_attr_type (insn) == TYPE_FDIV))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 24 /* 0x18 */; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) || (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_FOP)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], DFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], DFmode)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], DFmode))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_FDIV)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_FDIV))) { return 3; } else { return 0; } case 566: case 565: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FOP)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (mult_operator (operands[3], SFmode))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((get_attr_type (insn) == TYPE_FDIV) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FOP))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FOP)) || ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_FOP))) || ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_FOP))) || ((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], SFmode)))) || ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], SFmode))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((((get_attr_type (insn) == TYPE_FOP) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE))) || ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_NONE))) || ((mult_operator (operands[3], SFmode)) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (get_attr_type (insn) == TYPE_FDIV))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 24 /* 0x18 */; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) || (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_FOP)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], SFmode))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_FDIV)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_FDIV))) { return 3; } else { return 0; } case 564: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_SSEADD))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_SSEADD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], SFmode)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], SFmode))) || ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_SSEDIV)))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 24 /* 0x18 */; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) || (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_SSEADD) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_SSEADD) && (get_attr_memory (insn) == MEMORY_LOAD))))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_SSEADD)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], SFmode))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_SSEDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_SSEDIV) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_SSEDIV)) { return 3; } else { return 0; } case 563: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FOP)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative != 2) && (mult_operator (operands[3], SFmode)))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((get_attr_type (insn) == TYPE_FDIV) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FOP))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FOP)) || ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_FOP))) || ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_FOP))) || ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative != 2) && (mult_operator (operands[3], SFmode))))) || ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative != 2) && (mult_operator (operands[3], SFmode)))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FDIV))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FDIV))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_SSEADD))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_SSEADD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 2) && (mult_operator (operands[3], SFmode))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 2) && (mult_operator (operands[3], SFmode)))) || ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_SSEDIV)))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((((get_attr_type (insn) == TYPE_FOP) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE))) || (((which_alternative != 2) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_NONE))) || (((which_alternative != 2) && (mult_operator (operands[3], SFmode))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (get_attr_type (insn) == TYPE_FDIV))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 24 /* 0x18 */; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) || (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_FOP)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative != 2) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && (((which_alternative != 2) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative != 2) && (mult_operator (operands[3], SFmode)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_FDIV)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_FDIV))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_SSEADD) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_SSEADD) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_SSEADD)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 2) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 2) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 2) && (mult_operator (operands[3], SFmode)))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 2) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 2) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 2) && (mult_operator (operands[3], SFmode)))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 2) && (mult_operator (operands[3], SFmode)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_SSEDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_SSEDIV) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_SSEDIV)) { return 3; } else { return 0; } case 562: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FOP)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (mult_operator (operands[3], SFmode))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((get_attr_type (insn) == TYPE_FDIV) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FOP))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FOP)) || ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_FOP))) || ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_FOP))) || ((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], SFmode)))) || ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], SFmode))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FDIV))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FDIV))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((((get_attr_type (insn) == TYPE_FOP) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE))) || ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_NONE))) || ((mult_operator (operands[3], SFmode)) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (get_attr_type (insn) == TYPE_FDIV))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 24 /* 0x18 */; } else if ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) || (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_FOP)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], SFmode))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_FDIV)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_FDIV))) { return 3; } else { return 0; } case 561: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (! (mult_operator (operands[3], XFmode)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (mult_operator (operands[3], XFmode))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (! (mult_operator (operands[3], XFmode))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((((get_attr_memory (insn) == MEMORY_LOAD) && (! (mult_operator (operands[3], XFmode)))) || ((get_attr_memory (insn) == MEMORY_STORE) && (! (mult_operator (operands[3], XFmode))))) || ((get_attr_memory (insn) == MEMORY_BOTH) && (! (mult_operator (operands[3], XFmode))))) || ((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], XFmode)))) || ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], XFmode))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((! (mult_operator (operands[3], XFmode))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((((! (mult_operator (operands[3], XFmode))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE))) || ((mult_operator (operands[3], XFmode)) && (get_attr_memory (insn) == MEMORY_NONE))) || ((mult_operator (operands[3], XFmode)) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((! (mult_operator (operands[3], XFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((! (mult_operator (operands[3], XFmode))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (! (mult_operator (operands[3], XFmode)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], XFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], XFmode)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], XFmode))) { return 3; } else { return 0; } case 560: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((! (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((! (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (! (mult_operator (operands[3], SFmode)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], SFmode))) { return 3; } else { return 0; } case 559: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && (mult_operator (operands[3], SFmode)))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode))))) || ((get_attr_memory (insn) == MEMORY_STORE) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))))) || ((get_attr_memory (insn) == MEMORY_BOTH) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))))) || ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 0) && (mult_operator (operands[3], SFmode))))) || ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 0) && (mult_operator (operands[3], SFmode)))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE))) || (((which_alternative == 0) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_NONE))) || (((which_alternative == 0) && (mult_operator (operands[3], SFmode))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode))))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 0) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 0) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 0) && (mult_operator (operands[3], SFmode)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 1) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 1) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (! (mult_operator (operands[3], SFmode))))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 1) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 1) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (! (mult_operator (operands[3], SFmode))))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (! (mult_operator (operands[3], SFmode))))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 1) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 1) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (mult_operator (operands[3], SFmode)))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 1) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 1) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (mult_operator (operands[3], SFmode)))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (mult_operator (operands[3], SFmode)))) { return 3; } else { return 0; } case 557: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (! (mult_operator (operands[3], SFmode))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (! (mult_operator (operands[3], SFmode))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], SFmode)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], SFmode)))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((! (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((! (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (! (mult_operator (operands[3], SFmode)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], SFmode))) { return 3; } else { return 0; } case 556: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && (mult_operator (operands[3], SFmode)))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode))))) || ((get_attr_memory (insn) == MEMORY_STORE) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))))) || ((get_attr_memory (insn) == MEMORY_BOTH) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))))) || ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 0) && (mult_operator (operands[3], SFmode))))) || ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 0) && (mult_operator (operands[3], SFmode)))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 1) && (! (mult_operator (operands[3], SFmode)))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 1) && (! (mult_operator (operands[3], SFmode)))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 1) && (mult_operator (operands[3], SFmode))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 1) && (mult_operator (operands[3], SFmode))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE))) || (((which_alternative == 0) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_NONE))) || (((which_alternative == 0) && (mult_operator (operands[3], SFmode))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode))))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 0) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 0) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 0) && (mult_operator (operands[3], SFmode)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 1) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 1) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (! (mult_operator (operands[3], SFmode))))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 1) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 1) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (! (mult_operator (operands[3], SFmode))))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (! (mult_operator (operands[3], SFmode))))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 1) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 1) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (mult_operator (operands[3], SFmode)))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 1) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 1) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (mult_operator (operands[3], SFmode)))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (mult_operator (operands[3], SFmode)))) { return 3; } else { return 0; } case 558: case 555: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (! (mult_operator (operands[3], SFmode)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (mult_operator (operands[3], SFmode))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (! (mult_operator (operands[3], SFmode))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((((get_attr_memory (insn) == MEMORY_LOAD) && (! (mult_operator (operands[3], SFmode)))) || ((get_attr_memory (insn) == MEMORY_STORE) && (! (mult_operator (operands[3], SFmode))))) || ((get_attr_memory (insn) == MEMORY_BOTH) && (! (mult_operator (operands[3], SFmode))))) || ((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], SFmode)))) || ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], SFmode))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((! (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((((! (mult_operator (operands[3], SFmode))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE))) || ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_NONE))) || ((mult_operator (operands[3], SFmode)) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((! (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((! (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (! (mult_operator (operands[3], SFmode)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], SFmode))) { return 3; } else { return 0; } case 554: case 552: if (((ix86_tune) == (CPU_PENTIUM))) { return 32 /* 0x20 */; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 2; } else if (((ix86_tune) == (CPU_K6))) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 6; } else { return 0; } case 553: case 551: if (((ix86_tune) == (CPU_PENTIUM))) { return 4; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 3; } else if (((ix86_tune) == (CPU_K6))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 6; } else { return 0; } case 534: case 533: if (((ix86_tune) == (CPU_PENTIUM))) { return 4; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 4; } else if (((ix86_tune) == (CPU_K6))) { return 2; } else if (((ix86_tune) == (CPU_ATHLON))) { return 2; } else if (((ix86_tune) == (CPU_K8))) { return 6; } else { return 0; } case 522: case 521: case 520: case 519: case 518: case 517: case 516: case 515: extract_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 2; } else { return 1; } case 527: case 514: case 513: case 512: case 511: case 510: case 509: case 498: case 497: extract_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (memory_operand (operands[0], VOIDmode)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (memory_operand (operands[0], VOIDmode))) { return 1; } else if (((ix86_tune) == (CPU_K6))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 3; } else { return 0; } case 901: case 900: case 496: if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 3; } else { return 0; } case 773: case 772: case 495: if (((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_NONE))) || ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 3; } else { return 0; } case 494: case 493: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (! (memory_operand (operands[0], VOIDmode)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (memory_operand (operands[0], VOIDmode)))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (memory_operand (operands[0], VOIDmode))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (! (memory_operand (operands[0], VOIDmode)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (memory_operand (operands[0], VOIDmode))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (! (memory_operand (operands[0], VOIDmode)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (memory_operand (operands[0], VOIDmode))) { return 6; } else { return 0; } case 492: case 490: case 479: case 477: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const1_operand (operands[1], VOIDmode))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const1_operand (operands[1], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const1_operand (operands[1], VOIDmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const1_operand (operands[1], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const1_operand (operands[1], VOIDmode))) && (get_attr_memory (insn) == MEMORY_NONE))) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const1_operand (operands[1], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 491: case 489: case 488: case 487: case 486: case 485: case 484: case 483: case 482: case 481: case 480: case 478: case 476: case 475: case 474: case 473: case 472: case 471: case 470: case 469: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const1_operand (operands[2], VOIDmode))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const1_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const1_operand (operands[2], VOIDmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const1_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const1_operand (operands[2], VOIDmode))) && (get_attr_memory (insn) == MEMORY_NONE))) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const1_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 466: case 464: case 442: case 440: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const_int_operand (operands[1], VOIDmode))) && (memory_operand (operands[1], VOIDmode)))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const_int_operand (operands[1], VOIDmode)))) && (memory_operand (operands[1], VOIDmode))) || (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const_int_operand (operands[1], VOIDmode))) && (get_attr_memory (insn) == MEMORY_NONE))) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const_int_operand (operands[1], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (memory_operand (operands[1], VOIDmode)))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((memory_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (memory_operand (operands[1], VOIDmode))) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 424: if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 468: case 467: case 465: case 463: case 462: case 461: case 460: case 459: case 458: case 457: case 456: case 455: case 454: case 453: case 452: case 451: case 448: case 447: case 446: case 445: case 444: case 443: case 441: case 439: case 438: case 437: case 436: case 435: case 434: case 433: case 432: case 431: case 430: case 429: case 428: case 427: case 421: case 420: case 419: case 418: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const_int_operand (operands[2], VOIDmode))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const_int_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const_int_operand (operands[2], VOIDmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const_int_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const_int_operand (operands[2], VOIDmode))) && (get_attr_memory (insn) == MEMORY_NONE))) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const_int_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 426: case 425: case 417: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((which_alternative == 1) && (const_int_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || ((which_alternative != 1) || (! (const_int_operand (operands[2], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((which_alternative == 1) && (const_int_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || ((which_alternative != 1) || (! (const_int_operand (operands[2], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((which_alternative == 1) && (const_int_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_NONE))) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || ((which_alternative != 1) || (! (const_int_operand (operands[2], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0)) || ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (! (get_attr_memory (insn) == MEMORY_NONE)))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 414: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((get_attr_type (insn) == TYPE_ALU) || (which_alternative == 2))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((get_attr_type (insn) == TYPE_ALU) || (which_alternative == 2))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((get_attr_type (insn) == TYPE_ALU) || (which_alternative == 2))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_NONE)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 2) || (get_attr_type (insn) == TYPE_ISHIFT)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((! (get_attr_memory (insn) == MEMORY_NONE)) && (get_attr_type (insn) == TYPE_ISHIFT))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_ALU))) { return 6; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_ALU)) || ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_ALU))) || ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_ALU)))) || ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ALU) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_type (insn) == TYPE_ALU) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (which_alternative == 2)) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 2) || (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 413: case 412: if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_NONE))) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_ISHIFT))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((! (get_attr_memory (insn) == MEMORY_NONE)) && (get_attr_type (insn) == TYPE_ISHIFT))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_ALU))) { return 6; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_ALU)) || ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_ALU))) || ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_ALU)))) || ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ALU) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 411: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((which_alternative == 1) && (! (get_attr_imm_disp (insn) == IMM_DISP_TRUE))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((which_alternative == 1) && (! (get_attr_imm_disp (insn) == IMM_DISP_TRUE))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((which_alternative == 1) && (! (get_attr_imm_disp (insn) == IMM_DISP_TRUE))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_NONE)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative != 0) || (get_attr_type (insn) == TYPE_ISHIFT)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((! (get_attr_memory (insn) == MEMORY_NONE)) && (get_attr_type (insn) == TYPE_ISHIFT))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_ALU))) { return 6; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_ALU)) || ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_ALU))) || ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_ALU)))) || ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ALU) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_type (insn) == TYPE_ALU) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (which_alternative == 1)) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative != 0) || (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 410: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_NONE)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((! ((TARGET_DOUBLE_WITH_ADD) != (0))) || (! (const1_operand (operands[2], VOIDmode)))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((! (get_attr_memory (insn) == MEMORY_NONE)) && ((! ((TARGET_DOUBLE_WITH_ADD) != (0))) || (! (const1_operand (operands[2], VOIDmode)))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))))) { return 6; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_LOAD) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode)))) || ((get_attr_memory (insn) == MEMORY_STORE) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))))) || ((get_attr_memory (insn) == MEMORY_BOTH) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode)))))) || ((((ix86_tune) == (CPU_K6))) && (((! ((TARGET_DOUBLE_WITH_ADD) != (0))) || (! (const1_operand (operands[2], VOIDmode)))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (((! ((TARGET_DOUBLE_WITH_ADD) != (0))) || (! (const1_operand (operands[2], VOIDmode)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 408: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((which_alternative != 0) || (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((which_alternative != 0) || (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((which_alternative != 0) || (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_NONE)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative != 0) || ((! ((TARGET_DOUBLE_WITH_ADD) != (0))) || (! (const1_operand (operands[2], VOIDmode))))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((! (get_attr_memory (insn) == MEMORY_NONE)) && ((which_alternative == 0) && ((! ((TARGET_DOUBLE_WITH_ADD) != (0))) || (! (const1_operand (operands[2], VOIDmode))))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 0) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode)))))) { return 6; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 0) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))))) || ((get_attr_memory (insn) == MEMORY_STORE) && ((which_alternative == 0) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode)))))) || ((get_attr_memory (insn) == MEMORY_BOTH) && ((which_alternative == 0) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))))))) || ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 0) && ((! ((TARGET_DOUBLE_WITH_ADD) != (0))) || (! (const1_operand (operands[2], VOIDmode))))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 0) && ((! ((TARGET_DOUBLE_WITH_ADD) != (0))) || (! (const1_operand (operands[2], VOIDmode))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 0) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode)))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((((which_alternative == 0) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (which_alternative == 1)) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative != 0) || (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 406: if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 2; } else { return 0; } case 416: case 415: case 409: case 403: if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_type (insn) == TYPE_ALU)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_type (insn) == TYPE_ALU)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_type (insn) == TYPE_ALU)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_NONE)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_ISHIFT))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((! (get_attr_memory (insn) == MEMORY_NONE)) && (get_attr_type (insn) == TYPE_ISHIFT))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_ALU))) { return 6; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_ALU)) || ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_ALU))) || ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_ALU)))) || ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ALU) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 407: case 402: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((which_alternative != 0) || (get_attr_type (insn) == TYPE_ALU))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((which_alternative != 0) || (get_attr_type (insn) == TYPE_ALU))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((which_alternative != 0) || (get_attr_type (insn) == TYPE_ALU))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_NONE)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative != 0) || (get_attr_type (insn) == TYPE_ISHIFT)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((! (get_attr_memory (insn) == MEMORY_NONE)) && (get_attr_type (insn) == TYPE_ISHIFT))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_ALU))) { return 6; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_ALU)) || ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_ALU))) || ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_ALU)))) || ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ALU) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_type (insn) == TYPE_ALU) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (which_alternative == 1)) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative != 0) || (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 391: case 390: case 389: case 388: case 387: case 386: case 376: case 375: case 374: case 373: case 372: case 371: if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 3; } else { return 0; } case 400: case 398: case 395: case 394: case 392: case 361: case 360: case 359: case 358: case 357: case 356: case 355: case 354: case 353: case 352: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((memory_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 6; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) || (memory_operand (operands[1], VOIDmode)))) || ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (memory_operand (operands[1], VOIDmode)))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (memory_operand (operands[1], VOIDmode)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (memory_operand (operands[1], VOIDmode))) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 294: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative == 2)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative == 2)) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_NONE))) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative == 2)) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_NONE))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((1 << which_alternative) & 0x3)))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_LOAD) && (((1 << which_alternative) & 0x3))) || ((get_attr_memory (insn) == MEMORY_STORE) && (((1 << which_alternative) & 0x3)))) || ((get_attr_memory (insn) == MEMORY_BOTH) && (((1 << which_alternative) & 0x3))))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 290: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative == 2)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative == 2)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative == 2)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_NONE))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((1 << which_alternative) & 0x3)))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_LOAD) && (((1 << which_alternative) & 0x3))) || ((get_attr_memory (insn) == MEMORY_STORE) && (((1 << which_alternative) & 0x3)))) || ((get_attr_memory (insn) == MEMORY_BOTH) && (((1 << which_alternative) & 0x3))))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 288: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x7))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative == 3)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x7))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative == 3)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x7))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative == 3)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 3) && (get_attr_memory (insn) == MEMORY_NONE))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 3) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((1 << which_alternative) & 0x7)))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_LOAD) && (((1 << which_alternative) & 0x7))) || ((get_attr_memory (insn) == MEMORY_STORE) && (((1 << which_alternative) & 0x7)))) || ((get_attr_memory (insn) == MEMORY_BOTH) && (((1 << which_alternative) & 0x7))))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 282: if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else { return 0; } case 281: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0x5)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((!((1 << which_alternative) & 0x5)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0x5)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((!((1 << which_alternative) & 0x5)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else { return 0; } case 280: case 279: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative != 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative != 1) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else { return 0; } case 278: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((!((1 << which_alternative) & 0xa)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0xa)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((!((1 << which_alternative) & 0xa)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0xa)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else { return 0; } case 274: case 268: if ((((ix86_tune) == (CPU_PENTIUM))) || (((ix86_tune) == (CPU_K6)))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 2; } else { return 0; } case 277: case 276: case 271: case 265: case 264: if (((ix86_tune) == (CPU_PENTIUM))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if (((ix86_tune) == (CPU_K6))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 2; } else { return 0; } case 263: case 262: case 261: case 260: case 259: case 258: case 257: case 256: case 255: case 254: if (((ix86_tune) == (CPU_PENTIUM))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (get_attr_memory (insn) == MEMORY_NONE))) || (((ix86_tune) == (CPU_K6)))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 2; } else { return 0; } case 253: case 252: case 251: if (((ix86_tune) == (CPU_PENTIUM))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (get_attr_memory (insn) == MEMORY_NONE))) || (((ix86_tune) == (CPU_K6)))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((! (((ix86_tune) == (CPU_ATHLON)))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((! (((ix86_tune) == (CPU_ATHLON)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((! (((ix86_tune) == (CPU_ATHLON)))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((! (((ix86_tune) == (CPU_ATHLON)))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 2; } else { return 0; } case 250: extract_constrain_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (get_attr_memory (insn) == MEMORY_NONE))) || (((ix86_tune) == (CPU_K6)))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative == 0) && (! (((ix86_tune) == (CPU_ATHLON))))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative != 0) || (((ix86_tune) == (CPU_ATHLON)))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative == 0) && (! (((ix86_tune) == (CPU_ATHLON))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative != 0) || (((ix86_tune) == (CPU_ATHLON)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative == 0) && (! (((ix86_tune) == (CPU_ATHLON))))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative != 0) || (((ix86_tune) == (CPU_ATHLON)))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative == 0) && (! (((ix86_tune) == (CPU_ATHLON))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative != 0) || (((ix86_tune) == (CPU_ATHLON)))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 2; } else { return 0; } case 249: case 248: case 247: extract_constrain_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (get_attr_memory (insn) == MEMORY_NONE))) || (((ix86_tune) == (CPU_K6)))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((! (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative != 1) && ((which_alternative != 2) || (! (memory_operand (operands[1], VOIDmode)))))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((((ix86_tune) == (CPU_ATHLON))) || ((which_alternative == 1) || ((which_alternative == 2) && (memory_operand (operands[1], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((! (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative != 1) && ((which_alternative != 2) || (! (memory_operand (operands[1], VOIDmode)))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((((ix86_tune) == (CPU_ATHLON))) || ((which_alternative == 1) || ((which_alternative == 2) && (memory_operand (operands[1], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((! (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative != 1) && ((which_alternative != 2) || (! (memory_operand (operands[1], VOIDmode)))))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((((ix86_tune) == (CPU_ATHLON))) || ((which_alternative == 1) || ((which_alternative == 2) && (memory_operand (operands[1], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((! (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative != 1) && ((which_alternative != 2) || (! (memory_operand (operands[1], VOIDmode)))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((((ix86_tune) == (CPU_ATHLON))) || ((which_alternative == 1) || ((which_alternative == 2) && (memory_operand (operands[1], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 2; } else { return 0; } case 217: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 6; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_STORE)) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((((ix86_tune) == (CPU_K6))) && ((! (incdec_operand (operands[2], QImode))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((! (incdec_operand (operands[2], QImode))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 215: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 3))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative != 3))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative != 3)) || ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative != 3))) || ((get_attr_memory (insn) == MEMORY_BOTH) && (which_alternative != 3)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative != 3) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative != 3) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (which_alternative == 3)) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 3) || (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 339: case 338: case 337: case 317: case 316: case 315: case 295: case 242: case 241: case 240: case 214: case 212: case 211: case 210: if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_NONE))) || ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_STORE)) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 209: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (which_alternative == 2)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (which_alternative == 2)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (which_alternative == 2)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_NONE)) || ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 2))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative != 2))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative != 2)) || ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative != 2))) || ((get_attr_memory (insn) == MEMORY_BOTH) && (which_alternative != 2)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative != 2) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative != 2) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (which_alternative == 2)) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 2) || (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 202: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative != 0) || (pic_symbolic_operand (operands[2], SImode))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_LOAD) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC))) || ((get_attr_memory (insn) == MEMORY_STORE) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)))) || ((get_attr_memory (insn) == MEMORY_BOTH) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC))))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative != 0) || (pic_symbolic_operand (operands[2], SImode)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative != 0) || (pic_symbolic_operand (operands[2], SImode))) || (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 201: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 2) || (pic_symbolic_operand (operands[2], SImode))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_LOAD) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC))) || ((get_attr_memory (insn) == MEMORY_STORE) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)))) || ((get_attr_memory (insn) == MEMORY_BOTH) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC))))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 2) || (pic_symbolic_operand (operands[2], SImode)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative == 2) || (pic_symbolic_operand (operands[2], SImode))) || (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 196: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 2) || (pic_symbolic_operand (operands[2], DImode))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_LOAD) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC))) || ((get_attr_memory (insn) == MEMORY_STORE) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)))) || ((get_attr_memory (insn) == MEMORY_BOTH) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC))))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 2) || (pic_symbolic_operand (operands[2], DImode)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative == 2) || (pic_symbolic_operand (operands[2], DImode))) || (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 195: case 194: case 193: case 192: case 191: case 190: case 189: case 188: case 187: case 186: if (((ix86_tune) == (CPU_PENTIUM))) { return 4; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 3; } else if (((ix86_tune) == (CPU_K6))) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 3; } else { return 0; } case 350: case 349: case 348: case 346: case 345: case 344: case 343: case 342: case 340: case 336: case 335: case 334: case 333: case 332: case 331: case 330: case 329: case 328: case 327: case 326: case 325: case 324: case 323: case 322: case 320: case 318: case 314: case 313: case 312: case 311: case 310: case 309: case 308: case 307: case 306: case 305: case 304: case 303: case 302: case 301: case 300: case 298: case 296: case 293: case 292: case 291: case 289: case 246: case 245: case 243: case 239: case 238: case 237: case 236: case 235: case 234: case 229: case 228: case 227: case 224: case 223: case 222: case 221: case 220: case 219: case 218: case 216: case 213: case 208: case 207: case 206: case 205: case 204: case 203: case 200: case 199: case 198: case 197: case 185: case 184: case 179: if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_STORE)) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 233: case 232: case 231: case 230: case 226: case 183: case 182: case 181: case 180: case 178: if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_STORE)) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 176: case 175: case 174: extract_constrain_insn_cached (insn); if (((((ix86_tune) == (CPU_PENTIUM))) && (((((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_STORE) || (immediate_operand (operands[1], VOIDmode)))) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) || (which_alternative != 0)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || (((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))))) { return 6; } else { return 0; } case 925: case 924: case 173: case 170: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if (((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) || (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 6; } else { return 0; } case 172: case 169: extract_constrain_insn_cached (insn); if (((((ix86_tune) == (CPU_PENTIUM))) && (((((which_alternative == 0) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE))) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) || (which_alternative == 1)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0)) || ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) || ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 0)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 18 /* 0x12 */; } else if (((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) || (((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 0) || ((which_alternative == 3) && (get_attr_memory (insn) == MEMORY_LOAD))))))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if (((((ix86_tune) == (CPU_K8))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_LOAD))) || (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 6; } else { return 0; } case 786: case 785: case 167: case 164: extract_constrain_insn_cached (insn); if (((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) || (((ix86_tune) == (CPU_PENTIUMPRO)))) { return 1; } else if (((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) || (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 2; } else { return 0; } case 166: case 163: extract_constrain_insn_cached (insn); if (((((ix86_tune) == (CPU_PENTIUM))) && (((((which_alternative == 0) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE))) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) || (which_alternative == 1)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0)) || ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) || ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 0)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (!((1 << which_alternative) & 0x3))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 18 /* 0x12 */; } else if (((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) || (((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 0))))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 3) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if (((((ix86_tune) == (CPU_K8))) && ((which_alternative == 3) && (get_attr_memory (insn) == MEMORY_LOAD))) || (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 3) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x3)) && ((((1 << which_alternative) & 0x6)) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 2; } else { return 0; } case 171: case 168: case 165: case 162: extract_constrain_insn_cached (insn); if (((((ix86_tune) == (CPU_PENTIUM))) && (((((which_alternative == 0) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE))) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) || (which_alternative != 0)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_STORE))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 18 /* 0x12 */; } else if (((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || (((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))))) { return 6; } else { return 0; } case 156: case 155: case 151: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) || (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_NONE))) { return 2; } else { return 0; } case 790: case 789: case 150: extract_constrain_insn_cached (insn); if (((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) || (((ix86_tune) == (CPU_PENTIUMPRO)))) { return 1; } else if (((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if (((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) || (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_NONE))) { return 2; } else { return 0; } case 159: case 158: case 157: case 154: case 153: case 152: case 149: case 148: case 147: if ((((ix86_tune) == (CPU_PENTIUM))) || (((ix86_tune) == (CPU_PENTIUMPRO)))) { return 1; } else if (((ix86_tune) == (CPU_K6))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 6; } else { return 0; } case 926: case 139: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else { return 0; } case 137: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((((which_alternative == 1) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE))) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 18 /* 0x12 */; } else if (((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) || (((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((which_alternative == 1) && ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))) || (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))))))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 0)) { return 3; } else { return 0; } case 136: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((((which_alternative == 2) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE))) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 2) && (((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 2) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 18 /* 0x12 */; } else if (((((ix86_tune) == (CPU_K8))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_LOAD))) || (((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 2) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_K8))) && ((which_alternative == 2) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((which_alternative == 2) && ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))) || (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))))))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else { return 0; } case 135: case 134: extract_constrain_insn_cached (insn); if (((((ix86_tune) == (CPU_PENTIUM))) && (((((which_alternative == 0) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE))) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) || (((1 << which_alternative) & 0xe))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0)) || ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) || ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 0)))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 18 /* 0x12 */; } else if (((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) || (((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 0)) || (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 4) && (get_attr_memory (insn) == MEMORY_LOAD))))))) { return 6; } else if ((which_alternative == 4) && ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))) { return 3; } else { return 0; } case 145: case 142: case 133: extract_constrain_insn_cached (insn); if (((((ix86_tune) == (CPU_PENTIUM))) && (((((which_alternative == 0) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE))) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) || (which_alternative != 0)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_STORE))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 18 /* 0x12 */; } else if (((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || (((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))))) { return 6; } else { return 0; } case 146: case 144: case 143: case 141: case 138: case 132: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE)) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_STORE))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 18 /* 0x12 */; } else if (((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || (((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))))) { return 6; } else { return 0; } case 131: case 130: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((((((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_STORE)) || ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE))) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) || ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 0))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || (((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))))) { return 6; } else { return 0; } case 1033: case 1032: case 1016: case 1014: case 927: case 140: case 129: if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 3; } else { return 0; } case 128: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((((((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 1)) || ((((1 << which_alternative) & 0x3)) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE)))) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_NONE) && (((1 << which_alternative) & 0x3))) || ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 1))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 0))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 1))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x3)) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 1))) || ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 1)))) { return 1; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) && (which_alternative == 1))) || ((((ix86_tune) == (CPU_K8))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) && (which_alternative == 1)))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x3)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x3)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || ((((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0x3))) || (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_LOAD)))))) { return 6; } else if ((which_alternative == 2) && ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))) { return 3; } else { return 0; } case 114: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (which_alternative == 1)) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative != 1)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (which_alternative == 1)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative != 1)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (which_alternative == 1)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative != 1)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 1))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 1))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 1))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((((which_alternative == 1) && (((((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_STORE)) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 3) && (memory_operand (operands[1], DFmode)))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 3) && (memory_operand (operands[1], DFmode)))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((!((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 18 /* 0x12 */; } else if (((((ix86_tune) == (CPU_K8))) && ((!((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_LOAD))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (!((1 << which_alternative) & 0x3)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 12 /* 0xc */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 2)) { return 6; } else { return 0; } case 113: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (which_alternative == 1)) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative != 1)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (which_alternative == 1)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative != 1)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (which_alternative == 1)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative != 1)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 1))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 1))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 1))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((((which_alternative == 1) && (((((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_STORE)) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 3) && (memory_operand (operands[1], DFmode)))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 3) && (memory_operand (operands[1], DFmode)))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 3) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 3) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 9; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((!((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && ((!((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 3) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 3) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x3)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 6; } else if ((which_alternative == 3) && (((ix86_tune) == (CPU_K8)))) { return 12 /* 0xc */; } else if ((which_alternative == 3) && (((ix86_tune) == (CPU_ATHLON)))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (!((1 << which_alternative) & 0x3))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 12 /* 0xc */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 2)) { return 6; } else { return 0; } case 112: case 111: extract_constrain_insn_cached (insn); if (((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) || (((1 << which_alternative) & 0x7))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 4) && (memory_operand (operands[1], DFmode)))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 4) && (memory_operand (operands[1], DFmode)))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 4) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 4) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 9; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 4) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 4) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 6; } else if ((which_alternative == 4) && (((ix86_tune) == (CPU_K8)))) { return 12 /* 0xc */; } else if ((which_alternative == 4) && (((ix86_tune) == (CPU_ATHLON)))) { return 4; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 6; } else { return 0; } case 109: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((which_alternative == 1) && (! (get_attr_imm_disp (insn) == IMM_DISP_TRUE))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((which_alternative != 1) || (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((which_alternative == 1) && (! (get_attr_imm_disp (insn) == IMM_DISP_TRUE))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((which_alternative != 1) || (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((which_alternative == 1) && (! (get_attr_imm_disp (insn) == IMM_DISP_TRUE))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((which_alternative != 1) || (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_NONE))) { return 6; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_STORE)) || (get_attr_memory (insn) == MEMORY_BOTH)))) || ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 106: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((which_alternative == 1) && (! (get_attr_imm_disp (insn) == IMM_DISP_TRUE))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((which_alternative != 1) || (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((which_alternative == 1) && (! (get_attr_imm_disp (insn) == IMM_DISP_TRUE))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((((which_alternative != 1) || (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((which_alternative == 1) && (! (get_attr_imm_disp (insn) == IMM_DISP_TRUE))) && (get_attr_memory (insn) == MEMORY_NONE))) || (((which_alternative != 1) || (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_NONE))) { return 6; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_STORE)) || (get_attr_memory (insn) == MEMORY_BOTH)))) || ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 101: case 100: extract_constrain_insn_cached (insn); if (((((ix86_tune) == (CPU_PENTIUM))) && (((((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_STORE) || (immediate_operand (operands[1], VOIDmode)))) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) || (!((1 << which_alternative) & 0x7))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_STORE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || (((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))))) { return 6; } else { return 0; } case 96: extract_constrain_insn_cached (insn); if (((((ix86_tune) == (CPU_PENTIUM))) && (((((((1 << which_alternative) & 0x7)) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE))) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) || (((1 << which_alternative) & 0x18))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_NONE) && (((1 << which_alternative) & 0x7))) || ((get_attr_memory (insn) == MEMORY_LOAD) && (((1 << which_alternative) & 0x7)))) || ((get_attr_memory (insn) == MEMORY_STORE) && (((1 << which_alternative) & 0x7))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_LOAD) && (((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0))))) || ((get_attr_memory (insn) == MEMORY_STORE) && (((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0))))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x7)) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 18 /* 0x12 */; } else if (((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_LOAD))) || (((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0x7)))))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((!((1 << which_alternative) & 0x1f)) && (memory_operand (operands[1], DFmode)))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && ((!((1 << which_alternative) & 0x1f)) && (memory_operand (operands[1], DFmode)))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && (((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0)))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0)))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 9; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((!((1 << which_alternative) & 0x1f)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && ((!((1 << which_alternative) & 0x1f)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && (((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0)))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && (((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0)))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x1f)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && ((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))))) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (!((1 << which_alternative) & 0x1f))) { return 6; } else { return 0; } case 95: extract_constrain_insn_cached (insn); if (((((ix86_tune) == (CPU_PENTIUM))) && (((((((1 << which_alternative) & 0x7)) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE))) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) || (((1 << which_alternative) & 0x18))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_NONE) && (((1 << which_alternative) & 0x7))) || ((get_attr_memory (insn) == MEMORY_LOAD) && (((1 << which_alternative) & 0x7)))) || ((get_attr_memory (insn) == MEMORY_STORE) && (((1 << which_alternative) & 0x7))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_LOAD) && (((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0))))) || ((get_attr_memory (insn) == MEMORY_STORE) && (((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0))))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x7)) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 18 /* 0x12 */; } else if (((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_LOAD))) || (((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0x7)))))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((!((1 << which_alternative) & 0x1f)) && (memory_operand (operands[1], DFmode)))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && ((!((1 << which_alternative) & 0x1f)) && (memory_operand (operands[1], DFmode)))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && (((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0)))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0)))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 9; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((!((1 << which_alternative) & 0x1f)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && ((!((1 << which_alternative) & 0x1f)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && (((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0)))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && (((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0)))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x1f)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && ((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))))) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (!((1 << which_alternative) & 0x1f))) { return 6; } else { return 0; } case 988: case 987: case 986: case 985: case 984: case 983: case 982: case 981: case 980: case 979: case 978: case 977: case 976: case 975: case 974: case 973: case 972: case 971: case 970: case 969: case 968: case 967: case 955: case 954: case 953: case 951: case 950: case 949: case 948: case 947: case 946: case 945: case 944: case 943: case 942: case 941: case 940: case 939: case 938: case 937: case 936: case 935: case 934: case 933: case 932: case 931: case 930: case 102: case 97: case 92: if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 6; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_STORE)) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 2; } else { return 0; } case 91: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0x7)) && (((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)) || ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((1 << which_alternative) & 0x18))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (!((1 << which_alternative) & 0x18))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x18))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (!((1 << which_alternative) & 0x18))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x18))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (!((1 << which_alternative) & 0x18))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((1 << which_alternative) & 0x18)))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((1 << which_alternative) & 0x18)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((1 << which_alternative) & 0x18)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_NONE) && (((1 << which_alternative) & 0x7))) || ((get_attr_memory (insn) == MEMORY_LOAD) && (((1 << which_alternative) & 0x7)))) || ((get_attr_memory (insn) == MEMORY_STORE) && (((1 << which_alternative) & 0x7))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_NONE) && ((((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78))) && (((1 << which_alternative) & 0x1e0)))) || ((get_attr_memory (insn) == MEMORY_LOAD) && ((((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78))) && (((1 << which_alternative) & 0x1e0)))))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && ((((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78))) && (((1 << which_alternative) & 0x1e0))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_LOAD) && (((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0)))))) || ((get_attr_memory (insn) == MEMORY_STORE) && (((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0)))))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x18)) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((((((1 << which_alternative) & 0x18)) && (((((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_STORE)) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x7)) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x18)) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 18 /* 0x12 */; } else if (((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_LOAD))) || (((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0x7)))))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x1e0)) && (memory_operand (operands[1], DFmode)))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x1e0)) && (memory_operand (operands[1], DFmode)))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && (((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0)))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0)))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 9; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x1e0)) && ((((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78))) && (get_attr_memory (insn) == MEMORY_LOAD)))) || (((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x1e0)) && ((((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78))) && (get_attr_memory (insn) == MEMORY_LOAD)))) || ((((ix86_tune) == (CPU_ATHLON))) && ((!((1 << which_alternative) & 0x1f)) && (get_attr_memory (insn) == MEMORY_LOAD))))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && ((!((1 << which_alternative) & 0x1f)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && (((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0)))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && (((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0)))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x1f)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && ((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0))))))) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0))))))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (!((1 << which_alternative) & 0x1f))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x1ff)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 12 /* 0xc */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (!((1 << which_alternative) & 0x1ff))) { return 6; } else { return 0; } case 90: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0x7)) && (((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)) || ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((1 << which_alternative) & 0x18))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (!((1 << which_alternative) & 0x18))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x18))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (!((1 << which_alternative) & 0x18))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x18))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (!((1 << which_alternative) & 0x18))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((1 << which_alternative) & 0x18)))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((1 << which_alternative) & 0x18)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((1 << which_alternative) & 0x18)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_NONE) && (((1 << which_alternative) & 0x7))) || ((get_attr_memory (insn) == MEMORY_LOAD) && (((1 << which_alternative) & 0x7)))) || ((get_attr_memory (insn) == MEMORY_STORE) && (((1 << which_alternative) & 0x7))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_NONE) && ((((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78))) && (((1 << which_alternative) & 0x1e0)))) || ((get_attr_memory (insn) == MEMORY_LOAD) && ((((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78))) && (((1 << which_alternative) & 0x1e0)))))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && ((((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78))) && (((1 << which_alternative) & 0x1e0))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((get_attr_memory (insn) == MEMORY_LOAD) && (((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0)))))) || ((get_attr_memory (insn) == MEMORY_STORE) && (((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0)))))))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x18)) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((((((1 << which_alternative) & 0x18)) && (((((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_STORE)) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x7)) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x18)) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 18 /* 0x12 */; } else if (((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_LOAD))) || (((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) || (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0x7)))))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x1e0)) && (memory_operand (operands[1], DFmode)))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x1e0)) && (memory_operand (operands[1], DFmode)))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && (((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0)))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0)))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 9; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x1e0)) && ((((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78))) && (get_attr_memory (insn) == MEMORY_LOAD)))) || (((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x1e0)) && ((((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78))) && (get_attr_memory (insn) == MEMORY_LOAD)))) || ((((ix86_tune) == (CPU_ATHLON))) && ((!((1 << which_alternative) & 0x1f)) && (get_attr_memory (insn) == MEMORY_LOAD))))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && ((!((1 << which_alternative) & 0x1f)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && (((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0)))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && (((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0)))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x1f)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && ((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0))))))) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0))))))) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (!((1 << which_alternative) & 0x1f))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x1ff)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 12 /* 0xc */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (!((1 << which_alternative) & 0x1ff))) { return 6; } else { return 0; } case 89: case 88: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 1) && (! (memory_operand (operands[1], VOIDmode))))) { return 4; } else if (((((ix86_tune) == (CPU_PENTIUM))) && ((memory_operand (operands[1], VOIDmode)) && (which_alternative == 1))) || (which_alternative != 1)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) || (((ix86_tune) == (CPU_K6)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 6; } else { return 0; } case 84: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode)))))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if (((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE))) || (which_alternative == 4)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && ((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((!((1 << which_alternative) & 0x7f0)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], DImode)))))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && (((((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_STORE)) || ((get_attr_memory (insn) == MEMORY_BOTH) || (which_alternative == 4)))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((!((1 << which_alternative) & 0x7f0)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], DImode))))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((!((1 << which_alternative) & 0x7f0)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], DImode)))) || ((!((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((!((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x700)) && (memory_operand (operands[1], DFmode)))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x700)) && (memory_operand (operands[1], DFmode)))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 8) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 8) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 9; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x700)) && ((!((1 << which_alternative) & 0x111)) && (get_attr_memory (insn) == MEMORY_LOAD)))) || (((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x700)) && ((!((1 << which_alternative) & 0x111)) && (get_attr_memory (insn) == MEMORY_LOAD)))) || ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_LOAD))))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 8) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 8) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x7e0)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && (which_alternative == 8)) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (which_alternative == 8)) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0x7e0))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0xe0)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 12 /* 0xc */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0xe0))) { return 6; } else { return 0; } case 83: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode)))))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if (((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE))) || (which_alternative == 4)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && ((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((!((1 << which_alternative) & 0x7f0)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], DImode)))))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && (((((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_STORE)) || ((get_attr_memory (insn) == MEMORY_BOTH) || (which_alternative == 4)))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((!((1 << which_alternative) & 0x7f0)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], DImode))))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((!((1 << which_alternative) & 0x7f0)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], DImode)))) || ((!((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((!((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x700)) && (memory_operand (operands[1], DFmode)))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x700)) && (memory_operand (operands[1], DFmode)))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 8) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 8) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 9; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x700)) && ((!((1 << which_alternative) & 0x111)) && (get_attr_memory (insn) == MEMORY_LOAD)))) || (((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x700)) && ((!((1 << which_alternative) & 0x111)) && (get_attr_memory (insn) == MEMORY_LOAD)))) || ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_LOAD))))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 8) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 8) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x7e0)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && (which_alternative == 8)) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (which_alternative == 8)) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0x7e0))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0xe0)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 12 /* 0xc */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0xe0))) { return 6; } else { return 0; } case 82: extract_constrain_insn_cached (insn); if (((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) || (((1 << which_alternative) & 0x3))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((1 << which_alternative) & 0xc)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 6; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((get_attr_memory (insn) == MEMORY_LOAD) && (((1 << which_alternative) & 0xc))) || ((get_attr_memory (insn) == MEMORY_STORE) && (((1 << which_alternative) & 0xc)))) || ((get_attr_memory (insn) == MEMORY_BOTH) && (((1 << which_alternative) & 0xc))))) || ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((!((1 << which_alternative) & 0xf)) && (memory_operand (operands[1], DFmode)))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && ((!((1 << which_alternative) & 0xf)) && (memory_operand (operands[1], DFmode)))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 5) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 5) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 9; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((!((1 << which_alternative) & 0xf)) && ((which_alternative != 5) && (get_attr_memory (insn) == MEMORY_LOAD)))) || (((((ix86_tune) == (CPU_K8))) && ((!((1 << which_alternative) & 0xf)) && ((which_alternative != 5) && (get_attr_memory (insn) == MEMORY_LOAD)))) || ((((ix86_tune) == (CPU_ATHLON))) && ((!((1 << which_alternative) & 0xf)) && (get_attr_memory (insn) == MEMORY_LOAD))))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && ((!((1 << which_alternative) & 0xf)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 5) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 5) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0xf)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && (which_alternative == 5)) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (which_alternative == 5)) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (!((1 << which_alternative) & 0xf))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0xc)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 12 /* 0xc */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0xc))) { return 6; } else { return 0; } case 76: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && (! (memory_operand (operands[1], VOIDmode))))) { return 4; } else if (((((ix86_tune) == (CPU_PENTIUM))) && ((memory_operand (operands[1], VOIDmode)) && (which_alternative == 0))) || (which_alternative != 0)) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) || (((ix86_tune) == (CPU_K6)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 6; } else { return 0; } case 71: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0))))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0))))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || ((! (q_regs_operand (operands[0], QImode))) || ((TARGET_MOVX) != (0)))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || ((! (q_regs_operand (operands[0], QImode))) || ((TARGET_MOVX) != (0)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0))))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || ((! (q_regs_operand (operands[0], QImode))) || ((TARGET_MOVX) != (0)))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0)))))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0)))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && ((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0)))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((! (q_regs_operand (operands[0], QImode))) || ((TARGET_MOVX) != (0))))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((! (q_regs_operand (operands[0], QImode))) || ((TARGET_MOVX) != (0))))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((! (q_regs_operand (operands[0], QImode))) || ((TARGET_MOVX) != (0))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (((! (q_regs_operand (operands[0], QImode))) || ((TARGET_MOVX) != (0))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0)))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (((((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0)))) && (((((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_STORE)) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0)))) && (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 70: case 66: case 65: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_IMOV)) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (get_attr_type (insn) == TYPE_IMOV))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_type (insn) == TYPE_IMOV)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (get_attr_type (insn) == TYPE_IMOV))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_type (insn) == TYPE_IMOV)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (get_attr_type (insn) == TYPE_IMOV))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_IMOV))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_IMOV))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_IMOV))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_IMOVX))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_IMOVX))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_IMOVX) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_IMOVX) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_IMOV) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((((get_attr_type (insn) == TYPE_IMOV) && (((((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_STORE)) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_type (insn) == TYPE_IMOV) && (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 125: case 124: case 123: case 122: case 121: case 120: case 119: case 118: case 116: case 115: case 110: case 107: case 104: case 69: case 64: case 63: if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_BOTH) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 74: case 73: case 72: case 61: extract_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (((((((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_STORE)) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_STORE))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 59: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2))))))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && ((((optimize_size) != (0)) || ((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0))))) || ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2))))) && ((! ((optimize_size) != (0))) && (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2))))))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || ((! ((optimize_size) != (0))) && (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2))))))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && ((((optimize_size) != (0)) || ((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0))))) || ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2))))) && ((! ((optimize_size) != (0))) && (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2))))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || ((! ((optimize_size) != (0))) && (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2))))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && ((((optimize_size) != (0)) || ((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0))))) || ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2))))) && ((! ((optimize_size) != (0))) && (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2))))))) && (get_attr_memory (insn) == MEMORY_NONE)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || ((! ((optimize_size) != (0))) && (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2))))))) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((! ((optimize_size) != (0))) && ((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0)))))) && ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2)))))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((! ((optimize_size) != (0))) && ((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0)))))) && ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2)))))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((((! ((optimize_size) != (0))) && ((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0)))))) && ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2)))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((((! ((optimize_size) != (0))) && ((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0)))))) && ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((((((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && (((((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_STORE)) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 399: case 105: case 56: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (memory_operand (operands[1], VOIDmode)))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (memory_operand (operands[1], VOIDmode))) || ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_NONE))) || ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 6; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) || (memory_operand (operands[1], VOIDmode)))) || ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (memory_operand (operands[1], VOIDmode)))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (memory_operand (operands[1], VOIDmode)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (memory_operand (operands[1], VOIDmode))) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 55: extract_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (((((((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_STORE)) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_STORE))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 60: case 54: case 53: extract_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (((((((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_STORE)) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_STORE))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 52: if (((ix86_tune) == (CPU_PENTIUM))) { return 4; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 3; } else if (((ix86_tune) == (CPU_K6))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 6; } else { return 0; } case 50: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_IMOV)) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (get_attr_type (insn) == TYPE_IMOV))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (get_attr_type (insn) == TYPE_IMOV))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_NONE)) || (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (get_attr_type (insn) == TYPE_IMOV))) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_IMOV))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_IMOV))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_IMOV))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((! ((optimize_size) != (0))) && (((which_alternative != 0) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_HIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x6)) || (! (aligned_operand (operands[1], HImode)))))) && (((TARGET_MOVX) != (0)) && (((1 << which_alternative) & 0x5)))))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((! ((optimize_size) != (0))) && (((which_alternative != 0) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_HIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x6)) || (! (aligned_operand (operands[1], HImode)))))) && (((TARGET_MOVX) != (0)) && (((1 << which_alternative) & 0x5)))))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && ((((! ((optimize_size) != (0))) && (((which_alternative != 0) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_HIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x6)) || (! (aligned_operand (operands[1], HImode)))))) && (((TARGET_MOVX) != (0)) && (((1 << which_alternative) & 0x5)))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((((! ((optimize_size) != (0))) && (((which_alternative != 0) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_HIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x6)) || (! (aligned_operand (operands[1], HImode)))))) && (((TARGET_MOVX) != (0)) && (((1 << which_alternative) & 0x5)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_IMOV) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((((get_attr_type (insn) == TYPE_IMOV) && (((((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_STORE)) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || ((get_attr_type (insn) == TYPE_IMOV) && (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 87: case 47: extract_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode)))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (((((((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_STORE)) || (get_attr_memory (insn) == MEMORY_BOTH)) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 2; } else { return 0; } case 86: case 68: case 46: if (((ix86_tune) == (CPU_PENTIUM))) { return 4; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 3; } else if (((ix86_tune) == (CPU_K6))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 6; } else { return 0; } case 85: case 67: case 51: case 45: if (((ix86_tune) == (CPU_PENTIUM))) { return 4; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 1; } else if (((ix86_tune) == (CPU_K6))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 6; } else { return 0; } case 44: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode)))))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && ((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((!((1 << which_alternative) & 0xfc)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], SImode)))))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && (((((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_STORE)) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((!((1 << which_alternative) & 0xfc)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], SImode))))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0xfc)) && ((((flag_pic) != (0)) && (symbolic_operand (operands[1], SImode))) || (get_attr_memory (insn) == MEMORY_NONE)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((!((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_LOAD))) || (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0xe0)) && (memory_operand (operands[1], DFmode)))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0xe0)) && (memory_operand (operands[1], DFmode)))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 5) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 5) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 9; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 5) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 5) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0xfc)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && (which_alternative == 5)) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (which_alternative == 5)) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0xfc))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x1c)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 12 /* 0xc */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0x1c))) { return 6; } else { return 0; } case 43: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode)))))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && ((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((!((1 << which_alternative) & 0xfc)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], SImode)))))) { return 3; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && (((((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))) || (get_attr_memory (insn) == MEMORY_LOAD)) || (get_attr_memory (insn) == MEMORY_STORE)) || (get_attr_memory (insn) == MEMORY_BOTH))) || ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((!((1 << which_alternative) & 0xfc)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], SImode))))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0xfc)) && ((((flag_pic) != (0)) && (symbolic_operand (operands[1], SImode))) || (get_attr_memory (insn) == MEMORY_NONE)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((!((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_LOAD))) || (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0xe0)) && (memory_operand (operands[1], DFmode)))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0xe0)) && (memory_operand (operands[1], DFmode)))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 5) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 3; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 5) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 9; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 18 /* 0x12 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 5) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 1; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 5) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0xfc)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 6; } else if ((((ix86_tune) == (CPU_K8))) && (which_alternative == 5)) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (which_alternative == 5)) { return 4; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0xfc))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x1c)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 12 /* 0xc */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0x1c))) { return 6; } else { return 0; } case 401: case 397: case 396: case 393: case 347: case 341: case 321: case 319: case 299: case 297: case 244: case 108: case 103: case 81: case 80: case 62: case 42: case 41: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (memory_operand (operands[1], VOIDmode)))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (memory_operand (operands[1], VOIDmode)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 6; } else if (((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) || (memory_operand (operands[1], VOIDmode)))) || ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (memory_operand (operands[1], VOIDmode)))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (memory_operand (operands[1], VOIDmode)))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (memory_operand (operands[1], VOIDmode))) { return 24 /* 0x18 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 6; } else { return 0; } case 79: case 78: case 40: case 39: extract_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) || ((((ix86_tune) == (CPU_K6))) || ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))))) { return 2; } else { return 0; } case 77: case 58: case 57: case 49: case 48: case 38: case 37: case 36: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (! (memory_operand (operands[1], VOIDmode)))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (memory_operand (operands[1], VOIDmode))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) || (((ix86_tune) == (CPU_K6)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 6; } else { return 0; } case 35: case 32: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (GET_MODE (operands[1]) == SFmode))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (GET_MODE (operands[1]) == SFmode))) { return 1; } else if (((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else { return 0; } case 34: case 31: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 1) && (GET_MODE (operands[1]) == SFmode)))) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 1) && (GET_MODE (operands[1]) == SFmode)))) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if (((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) || (((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))))) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 0)) { return 1; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 6; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 0)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) || ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else { return 0; } case 33: case 30: if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if (((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) || (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 2; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 1; } else { return 0; } case 25: case 23: case 21: case 20: case 19: if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 1; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 6; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 3; } else { return 0; } case 8: case 7: case 6: if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_LOAD)) || ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_NONE))) || ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else { return 0; } case 285: case 284: case 283: case 17: case 16: case 15: case 14: case 13: case 12: case 11: case 10: case 9: case 5: case 4: case 3: case 2: case 1: case 0: if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_NONE))) { return 1; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 2; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 3; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 6; } else { return 0; } case -1: if (GET_CODE (PATTERN (insn)) != ASM_INPUT && asm_noperands (PATTERN (insn)) < 0) fatal_insn_not_found (insn); default: return 1; } } #endif static int internal_dfa_insn_code (rtx insn ATTRIBUTE_UNUSED) { switch (recog_memoized (insn)) { case 1015: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 242 /* 0xf2 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 0)) { return 243 /* 0xf3 */; } else { return 273 /* 0x111 */; } case 1009: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (memory_operand (operands[1], DFmode)))) { return 203 /* 0xcb */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (memory_operand (operands[1], DFmode)))) { return 204 /* 0xcc */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 205 /* 0xcd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 206 /* 0xce */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 209 /* 0xd1 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 210 /* 0xd2 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 211 /* 0xd3 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 212 /* 0xd4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 213 /* 0xd5 */; } else if ((((ix86_tune) == (CPU_K8))) && (which_alternative == 1)) { return 214 /* 0xd6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (which_alternative == 1)) { return 215 /* 0xd7 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 1)) { return 216 /* 0xd8 */; } else { return 273 /* 0x111 */; } case 1008: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (memory_operand (operands[1], DFmode)))) { return 203 /* 0xcb */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (memory_operand (operands[1], DFmode)))) { return 204 /* 0xcc */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 205 /* 0xcd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 206 /* 0xce */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 209 /* 0xd1 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 210 /* 0xd2 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 211 /* 0xd3 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 212 /* 0xd4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 213 /* 0xd5 */; } else if ((which_alternative == 1) && (((ix86_tune) == (CPU_K8)))) { return 214 /* 0xd6 */; } else if ((which_alternative == 1) && (((ix86_tune) == (CPU_ATHLON)))) { return 215 /* 0xd7 */; } else if ((which_alternative == 1) && ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))) { return 216 /* 0xd8 */; } else { return 273 /* 0x111 */; } case 1012: case 1011: case 1010: case 1004: case 1002: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 203 /* 0xcb */; } else if ((((ix86_tune) == (CPU_K8))) && (memory_operand (operands[1], DFmode))) { return 204 /* 0xcc */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 205 /* 0xcd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 206 /* 0xce */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 211 /* 0xd3 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 212 /* 0xd4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 213 /* 0xd5 */; } else if (((ix86_tune) == (CPU_K8))) { return 214 /* 0xd6 */; } else if (((ix86_tune) == (CPU_ATHLON))) { return 215 /* 0xd7 */; } else { return 273 /* 0x111 */; } case 966: case 965: case 964: case 963: case 962: case 961: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 226 /* 0xe2 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 227 /* 0xe3 */; } else { return 273 /* 0x111 */; } case 952: extract_constrain_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((optimize_size) != (0))) { return 106 /* 0x6a */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 203 /* 0xcb */; } else if ((((ix86_tune) == (CPU_K8))) && (memory_operand (operands[1], DFmode))) { return 204 /* 0xcc */; } else if (((ix86_tune) == (CPU_K8))) { return 214 /* 0xd6 */; } else if (((ix86_tune) == (CPU_ATHLON))) { return 215 /* 0xd7 */; } else { return 273 /* 0x111 */; } case 903: case 902: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 232 /* 0xe8 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 233 /* 0xe9 */; } else { return 273 /* 0x111 */; } case 899: case 898: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 228 /* 0xe4 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 229 /* 0xe5 */; } else if (((ix86_tune) == (CPU_ATHLON))) { return 230 /* 0xe6 */; } else if (((ix86_tune) == (CPU_K8))) { return 231 /* 0xe7 */; } else { return 273 /* 0x111 */; } case 891: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 265 /* 0x109 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 266 /* 0x10a */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 267 /* 0x10b */; } else { return 273 /* 0x111 */; } case 890: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 268 /* 0x10c */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 269 /* 0x10d */; } else if (((ix86_tune) == (CPU_ATHLON))) { return 270 /* 0x10e */; } else if (((ix86_tune) == (CPU_K8))) { return 271 /* 0x10f */; } else { return 273 /* 0x111 */; } case 889: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 258 /* 0x102 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 259 /* 0x103 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 260 /* 0x104 */; } else { return 273 /* 0x111 */; } case 888: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 261 /* 0x105 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 262 /* 0x106 */; } else if (((ix86_tune) == (CPU_ATHLON))) { return 263 /* 0x107 */; } else if (((ix86_tune) == (CPU_K8))) { return 264 /* 0x108 */; } else { return 273 /* 0x111 */; } case 895: case 893: case 887: case 885: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 235 /* 0xeb */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 236 /* 0xec */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 237 /* 0xed */; } else { return 273 /* 0x111 */; } case 1028: case 1026: case 1024: case 894: case 892: case 886: case 884: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 238 /* 0xee */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 239 /* 0xef */; } else if (((ix86_tune) == (CPU_ATHLON))) { return 240 /* 0xf0 */; } else if (((ix86_tune) == (CPU_K8))) { return 241 /* 0xf1 */; } else { return 273 /* 0x111 */; } case 881: case 880: if (((ix86_tune) == (CPU_PENTIUM))) { return 27 /* 0x1b */; } else { return 273 /* 0x111 */; } case 879: case 878: case 870: case 869: case 865: case 864: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 219 /* 0xdb */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 220 /* 0xdc */; } else { return 273 /* 0x111 */; } case 1020: case 1019: case 1018: case 852: if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 76 /* 0x4c */; } else { return 273 /* 0x111 */; } case 851: if (((ix86_tune) == (CPU_K6))) { return 135 /* 0x87 */; } else { return 273 /* 0x111 */; } case 850: if (((ix86_tune) == (CPU_PENTIUM))) { return 23 /* 0x17 */; } else if (((ix86_tune) == (CPU_K6))) { return 131 /* 0x83 */; } else { return 273 /* 0x111 */; } case 849: if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 220 /* 0xdc */; } else { return 273 /* 0x111 */; } case 859: case 858: case 857: case 827: case 826: case 825: case 824: case 823: case 822: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 131 /* 0x83 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 219 /* 0xdb */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 220 /* 0xdc */; } else { return 273 /* 0x111 */; } case 871: case 842: case 841: case 840: case 839: case 838: case 837: case 836: case 835: case 834: case 833: case 832: case 818: case 817: case 816: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 71 /* 0x47 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 219 /* 0xdb */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 220 /* 0xdc */; } else { return 273 /* 0x111 */; } case 883: case 882: case 863: case 813: case 812: if (((ix86_tune) == (CPU_PENTIUM))) { return 27 /* 0x1b */; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 220 /* 0xdc */; } else { return 273 /* 0x111 */; } case 877: case 862: case 810: case 809: case 808: case 807: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 73 /* 0x49 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 217 /* 0xd9 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 218 /* 0xda */; } else { return 273 /* 0x111 */; } case 876: case 875: case 874: case 873: case 872: case 868: case 867: case 866: case 861: case 860: case 856: case 855: case 854: case 831: case 830: case 829: case 828: case 815: case 814: case 811: case 806: case 805: case 804: case 803: case 802: case 801: case 800: case 799: case 798: case 797: case 796: case 795: case 794: case 793: case 792: case 791: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 219 /* 0xdb */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 220 /* 0xdc */; } else { return 273 /* 0x111 */; } case 923: case 921: case 788: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 255 /* 0xff */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 256 /* 0x100 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 257 /* 0x101 */; } else { return 273 /* 0x111 */; } case 922: case 920: case 787: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 89 /* 0x59 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 255 /* 0xff */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 256 /* 0x100 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 257 /* 0x101 */; } else { return 273 /* 0x111 */; } case 775: case 774: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 82 /* 0x52 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 83 /* 0x53 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 232 /* 0xe8 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 233 /* 0xe9 */; } else { return 273 /* 0x111 */; } case 771: case 770: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 96 /* 0x60 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 97 /* 0x61 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 99 /* 0x63 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 228 /* 0xe4 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 229 /* 0xe5 */; } else if (((ix86_tune) == (CPU_ATHLON))) { return 230 /* 0xe6 */; } else if (((ix86_tune) == (CPU_K8))) { return 231 /* 0xe7 */; } else { return 273 /* 0x111 */; } case 769: if (((ix86_tune) == (CPU_PENTIUM))) { return 27 /* 0x1b */; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 104 /* 0x68 */; } else if (((ix86_tune) == (CPU_ATHLON))) { return 223 /* 0xdf */; } else if (((ix86_tune) == (CPU_K8))) { return 224 /* 0xe0 */; } else { return 273 /* 0x111 */; } case 768: extract_constrain_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) { return 104 /* 0x68 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) { return 105 /* 0x69 */; } else if (((ix86_tune) == (CPU_ATHLON))) { return 223 /* 0xdf */; } else if (((ix86_tune) == (CPU_K8))) { return 224 /* 0xe0 */; } else { return 273 /* 0x111 */; } case 767: case 766: case 765: case 764: case 763: case 762: case 761: case 760: case 759: case 758: case 757: case 756: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 221 /* 0xdd */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 222 /* 0xde */; } else if (((ix86_tune) == (CPU_ATHLON))) { return 223 /* 0xdf */; } else if (((ix86_tune) == (CPU_K8))) { return 224 /* 0xe0 */; } else { return 273 /* 0x111 */; } case 755: case 754: case 753: case 752: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 104 /* 0x68 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 221 /* 0xdd */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 222 /* 0xde */; } else if (((ix86_tune) == (CPU_ATHLON))) { return 223 /* 0xdf */; } else if (((ix86_tune) == (CPU_K8))) { return 224 /* 0xe0 */; } else { return 273 /* 0x111 */; } case 1030: case 1029: case 780: case 778: case 750: case 748: case 746: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 93 /* 0x5d */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else { return 273 /* 0x111 */; } case 745: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 86 /* 0x56 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 265 /* 0x109 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 266 /* 0x10a */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 267 /* 0x10b */; } else { return 273 /* 0x111 */; } case 744: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 102 /* 0x66 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 268 /* 0x10c */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 269 /* 0x10d */; } else if (((ix86_tune) == (CPU_ATHLON))) { return 270 /* 0x10e */; } else if (((ix86_tune) == (CPU_K8))) { return 271 /* 0x10f */; } else { return 273 /* 0x111 */; } case 743: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 84 /* 0x54 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 85 /* 0x55 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 258 /* 0x102 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 259 /* 0x103 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 260 /* 0x104 */; } else { return 273 /* 0x111 */; } case 742: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 100 /* 0x64 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 101 /* 0x65 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 261 /* 0x105 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 262 /* 0x106 */; } else if (((ix86_tune) == (CPU_ATHLON))) { return 263 /* 0x107 */; } else if (((ix86_tune) == (CPU_K8))) { return 264 /* 0x108 */; } else { return 273 /* 0x111 */; } case 741: case 739: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 78 /* 0x4e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 79 /* 0x4f */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 235 /* 0xeb */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 236 /* 0xec */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 237 /* 0xed */; } else { return 273 /* 0x111 */; } case 1027: case 1025: case 1023: case 740: case 738: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 94 /* 0x5e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 95 /* 0x5f */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 238 /* 0xee */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 239 /* 0xef */; } else if (((ix86_tune) == (CPU_ATHLON))) { return 240 /* 0xf0 */; } else if (((ix86_tune) == (CPU_K8))) { return 241 /* 0xf1 */; } else { return 273 /* 0x111 */; } case 736: case 735: case 734: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 90 /* 0x5a */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 91 /* 0x5b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 92 /* 0x5c */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 203 /* 0xcb */; } else if ((((ix86_tune) == (CPU_K8))) && (memory_operand (operands[1], DFmode))) { return 204 /* 0xcc */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 207 /* 0xcf */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 208 /* 0xd0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 213 /* 0xd5 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 216 /* 0xd8 */; } else { return 273 /* 0x111 */; } case 928: case 783: case 782: case 777: case 776: case 737: case 733: case 732: case 731: case 730: case 725: case 724: case 723: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 98 /* 0x62 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else { return 273 /* 0x111 */; } case 721: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if (((1 << which_alternative) & 0x3)) { return 28 /* 0x1c */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 106 /* 0x6a */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 107 /* 0x6b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 108 /* 0x6c */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 203 /* 0xcb */; } else if ((((ix86_tune) == (CPU_K8))) && (memory_operand (operands[1], DFmode))) { return 204 /* 0xcc */; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0xc)) && (! ((optimize_size) != (0)))) || ((which_alternative == 4) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 205 /* 0xcd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0xc)) && (! ((optimize_size) != (0)))) || ((which_alternative == 4) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 206 /* 0xce */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 209 /* 0xd1 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 210 /* 0xd2 */; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0xc)) && (! ((optimize_size) != (0)))) || ((which_alternative == 4) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 211 /* 0xd3 */; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0xc)) && (! ((optimize_size) != (0)))) || ((which_alternative == 4) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 212 /* 0xd4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 213 /* 0xd5 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0xc)) && (! ((optimize_size) != (0)))) || ((which_alternative == 4) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0)))))))) { return 214 /* 0xd6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0xc)) && (! ((optimize_size) != (0)))) || ((which_alternative == 4) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0)))))))) { return 215 /* 0xd7 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 216 /* 0xd8 */; } else { return 273 /* 0x111 */; } case 720: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if (((1 << which_alternative) & 0x3)) { return 28 /* 0x1c */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 106 /* 0x6a */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 107 /* 0x6b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 108 /* 0x6c */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 203 /* 0xcb */; } else if ((((ix86_tune) == (CPU_K8))) && (memory_operand (operands[1], DFmode))) { return 204 /* 0xcc */; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0xc)) && (! ((optimize_size) != (0)))) || ((which_alternative == 4) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 205 /* 0xcd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0xc)) && (! ((optimize_size) != (0)))) || ((which_alternative == 4) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 206 /* 0xce */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 209 /* 0xd1 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 210 /* 0xd2 */; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0xc)) && (! ((optimize_size) != (0)))) || ((which_alternative == 4) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 211 /* 0xd3 */; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0xc)) && (! ((optimize_size) != (0)))) || ((which_alternative == 4) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 212 /* 0xd4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 213 /* 0xd5 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0xc)) && (! ((optimize_size) != (0)))) || ((which_alternative == 4) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0)))))))) { return 214 /* 0xd6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((((1 << which_alternative) & 0xc)) && ((optimize_size) != (0))) || ((which_alternative == 4) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0xc)) && (! ((optimize_size) != (0)))) || ((which_alternative == 4) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0)))))))) { return 215 /* 0xd7 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 216 /* 0xd8 */; } else { return 273 /* 0x111 */; } case 719: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && ((optimize_size) != (0)))))) { return 106 /* 0x6a */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && ((optimize_size) != (0)))))) { return 107 /* 0x6b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && ((optimize_size) != (0)))))) { return 108 /* 0x6c */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 203 /* 0xcb */; } else if ((((ix86_tune) == (CPU_K8))) && (memory_operand (operands[1], DFmode))) { return 204 /* 0xcc */; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && ((optimize_size) != (0)))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && (! ((optimize_size) != (0)))) || (!((1 << which_alternative) & 0x7))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 205 /* 0xcd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && ((optimize_size) != (0)))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && (! ((optimize_size) != (0)))) || (!((1 << which_alternative) & 0x7))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 206 /* 0xce */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 209 /* 0xd1 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 210 /* 0xd2 */; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && ((optimize_size) != (0)))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && (! ((optimize_size) != (0)))) || (!((1 << which_alternative) & 0x7))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 211 /* 0xd3 */; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && ((optimize_size) != (0)))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && (! ((optimize_size) != (0)))) || (!((1 << which_alternative) & 0x7))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 212 /* 0xd4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 213 /* 0xd5 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && ((optimize_size) != (0)))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && (! ((optimize_size) != (0)))) || (!((1 << which_alternative) & 0x7)))))) { return 214 /* 0xd6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && ((optimize_size) != (0)))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && (! ((optimize_size) != (0)))) || (!((1 << which_alternative) & 0x7)))))) { return 215 /* 0xd7 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 216 /* 0xd8 */; } else { return 273 /* 0x111 */; } case 707: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 106 /* 0x6a */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 107 /* 0x6b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 108 /* 0x6c */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 203 /* 0xcb */; } else if ((((ix86_tune) == (CPU_K8))) && (memory_operand (operands[1], DFmode))) { return 204 /* 0xcc */; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 205 /* 0xcd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 206 /* 0xce */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 209 /* 0xd1 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 210 /* 0xd2 */; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 211 /* 0xd3 */; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 212 /* 0xd4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 213 /* 0xd5 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7)))))) { return 214 /* 0xd6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7)))))) { return 215 /* 0xd7 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 216 /* 0xd8 */; } else { return 273 /* 0x111 */; } case 706: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 106 /* 0x6a */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 107 /* 0x6b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 108 /* 0x6c */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 203 /* 0xcb */; } else if ((((ix86_tune) == (CPU_K8))) && (memory_operand (operands[1], DFmode))) { return 204 /* 0xcc */; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 205 /* 0xcd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 206 /* 0xce */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 209 /* 0xd1 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 210 /* 0xd2 */; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 211 /* 0xd3 */; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 212 /* 0xd4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 213 /* 0xd5 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7)))))) { return 214 /* 0xd6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7)))))) { return 215 /* 0xd7 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 216 /* 0xd8 */; } else { return 273 /* 0x111 */; } case 705: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 106 /* 0x6a */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 107 /* 0x6b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 108 /* 0x6c */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 203 /* 0xcb */; } else if ((((ix86_tune) == (CPU_K8))) && (memory_operand (operands[1], DFmode))) { return 204 /* 0xcc */; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 205 /* 0xcd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 206 /* 0xce */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 209 /* 0xd1 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 210 /* 0xd2 */; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 211 /* 0xd3 */; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 212 /* 0xd4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 213 /* 0xd5 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7)))))) { return 214 /* 0xd6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7)))))) { return 215 /* 0xd7 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 216 /* 0xd8 */; } else { return 273 /* 0x111 */; } case 848: case 847: case 846: case 845: case 844: case 843: case 821: case 820: case 819: case 727: case 726: case 704: case 703: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 75 /* 0x4b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 219 /* 0xdb */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 220 /* 0xdc */; } else { return 273 /* 0x111 */; } case 729: case 702: case 701: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 209 /* 0xd1 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 210 /* 0xd2 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 213 /* 0xd5 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 216 /* 0xd8 */; } else { return 273 /* 0x111 */; } case 700: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 106 /* 0x6a */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 107 /* 0x6b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 108 /* 0x6c */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 203 /* 0xcb */; } else if ((((ix86_tune) == (CPU_K8))) && (memory_operand (operands[1], DFmode))) { return 204 /* 0xcc */; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 205 /* 0xcd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 206 /* 0xce */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 209 /* 0xd1 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 210 /* 0xd2 */; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 211 /* 0xd3 */; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 212 /* 0xd4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 213 /* 0xd5 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7)))))) { return 214 /* 0xd6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7)))))) { return 215 /* 0xd7 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 216 /* 0xd8 */; } else { return 273 /* 0x111 */; } case 699: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 106 /* 0x6a */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 107 /* 0x6b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))))) { return 108 /* 0x6c */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 203 /* 0xcb */; } else if ((((ix86_tune) == (CPU_K8))) && (memory_operand (operands[1], DFmode))) { return 204 /* 0xcc */; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 205 /* 0xcd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 206 /* 0xce */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 209 /* 0xd1 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 210 /* 0xd2 */; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 211 /* 0xd3 */; } else if ((((ix86_tune) == (CPU_K8))) && (((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 212 /* 0xd4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 213 /* 0xd5 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7)))))) { return 214 /* 0xd6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((((1 << which_alternative) & 0x3)) && ((optimize_size) != (0))) || ((which_alternative == 2) && (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))))) || (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7)))))) { return 215 /* 0xd7 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 216 /* 0xd8 */; } else { return 273 /* 0x111 */; } case 728: case 722: case 698: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 106 /* 0x6a */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 107 /* 0x6b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 108 /* 0x6c */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (memory_operand (operands[1], DFmode))) { return 203 /* 0xcb */; } else if ((((ix86_tune) == (CPU_K8))) && (memory_operand (operands[1], DFmode))) { return 204 /* 0xcc */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 205 /* 0xcd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 206 /* 0xce */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 211 /* 0xd3 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 212 /* 0xd4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 213 /* 0xd5 */; } else if (((ix86_tune) == (CPU_K8))) { return 214 /* 0xd6 */; } else if (((ix86_tune) == (CPU_ATHLON))) { return 215 /* 0xd7 */; } else { return 273 /* 0x111 */; } case 695: case 694: case 693: case 692: case 691: case 690: case 689: case 688: case 687: extract_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 10 /* 0xa */; } else { return 28 /* 0x1c */; } case 672: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 16 /* 0x10 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_NONE))) { return 34 /* 0x22 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0))) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 0))) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (which_alternative == 0))) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((which_alternative == 1) && (((ix86_tune) == (CPU_K6)))) { return 134 /* 0x86 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((which_alternative == 1) && ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))) { return 150 /* 0x96 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 671: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 1) && (const0_operand (operands[2], DImode)))) { return 7; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 16 /* 0x10 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 1) && (const0_operand (operands[2], DImode))))) { return 29 /* 0x1d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 1) && (const0_operand (operands[2], DImode))))) { return 30 /* 0x1e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && ((which_alternative == 1) && (const0_operand (operands[2], DImode))))) { return 31 /* 0x1f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 1) && (! (const0_operand (operands[2], DImode)))))) { return 34 /* 0x22 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0))) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 0))) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (which_alternative == 0))) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 1) && (const0_operand (operands[2], DImode))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 124 /* 0x7c */; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 1) && (const0_operand (operands[2], DImode))) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))))) { return 125 /* 0x7d */; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 1) && (const0_operand (operands[2], DImode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 126 /* 0x7e */; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 1) && (const0_operand (operands[2], DImode))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 127 /* 0x7f */; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 1) && (const0_operand (operands[2], DImode))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 128 /* 0x80 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && (! (const0_operand (operands[2], DImode))))) { return 134 /* 0x86 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (! (const0_operand (operands[2], DImode))))) { return 150 /* 0x96 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative == 1) && (const0_operand (operands[2], DImode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 162 /* 0xa2 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative == 1) && (const0_operand (operands[2], DImode))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 165 /* 0xa5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 670: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 1) && (const0_operand (operands[2], SImode)))) { return 7; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 16 /* 0x10 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 1) && (const0_operand (operands[2], SImode))))) { return 29 /* 0x1d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 1) && (const0_operand (operands[2], SImode))))) { return 30 /* 0x1e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && ((which_alternative == 1) && (const0_operand (operands[2], SImode))))) { return 31 /* 0x1f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 1) && (! (const0_operand (operands[2], SImode)))))) { return 34 /* 0x22 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0))) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 0))) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (which_alternative == 0))) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 1) && (const0_operand (operands[2], SImode))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 124 /* 0x7c */; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 1) && (const0_operand (operands[2], SImode))) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))))) { return 125 /* 0x7d */; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 1) && (const0_operand (operands[2], SImode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 126 /* 0x7e */; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 1) && (const0_operand (operands[2], SImode))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 127 /* 0x7f */; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 1) && (const0_operand (operands[2], SImode))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 128 /* 0x80 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && (! (const0_operand (operands[2], SImode))))) { return 134 /* 0x86 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (! (const0_operand (operands[2], SImode))))) { return 150 /* 0x96 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative == 1) && (const0_operand (operands[2], SImode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 162 /* 0xa2 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative == 1) && (const0_operand (operands[2], SImode))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 165 /* 0xa5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 657: if (((ix86_tune) == (CPU_PENTIUM))) { return 27 /* 0x1b */; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 55 /* 0x37 */; } else if (((ix86_tune) == (CPU_ATHLON))) { return 194 /* 0xc2 */; } else if (((ix86_tune) == (CPU_K8))) { return 196 /* 0xc4 */; } else { return 273 /* 0x111 */; } case 655: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((1 << which_alternative) & 0x3))) { return 27 /* 0x1b */; } else if (!((1 << which_alternative) & 0x3)) { return 28 /* 0x1c */; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 55 /* 0x37 */; } else if (((ix86_tune) == (CPU_ATHLON))) { return 194 /* 0xc2 */; } else if (((ix86_tune) == (CPU_K8))) { return 196 /* 0xc4 */; } else { return 273 /* 0x111 */; } case 656: case 654: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (((1 << which_alternative) & 0x3))) { return 55 /* 0x37 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (!((1 << which_alternative) & 0x3)))) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (!((1 << which_alternative) & 0x3)))) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (!((1 << which_alternative) & 0x3)))) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (!((1 << which_alternative) & 0x3)))) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 168 /* 0xa8 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 193 /* 0xc1 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((1 << which_alternative) & 0x3))) { return 194 /* 0xc2 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 195 /* 0xc3 */; } else if ((((ix86_tune) == (CPU_K8))) && (((1 << which_alternative) & 0x3))) { return 196 /* 0xc4 */; } else { return 273 /* 0x111 */; } case 653: case 652: case 651: case 649: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 650: case 648: if (((ix86_tune) == (CPU_PENTIUM))) { return 25 /* 0x19 */; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 109 /* 0x6d */; } else if (((ix86_tune) == (CPU_K6))) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 160 /* 0xa0 */; } else { return 273 /* 0x111 */; } case 647: case 646: case 645: case 644: case 643: case 642: case 641: case 640: case 639: case 638: case 637: case 636: case 635: case 634: case 633: case 632: case 631: case 630: if (((ix86_tune) == (CPU_PENTIUM))) { return 1; } else { return 28 /* 0x1c */; } case 629: case 628: case 627: case 626: case 625: case 624: case 623: case 622: case 621: case 620: case 619: case 618: if (((ix86_tune) == (CPU_PENTIUM))) { return 1; } else { return 28 /* 0x1c */; } case 617: if (((ix86_tune) == (CPU_PENTIUM))) { return 3; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 37 /* 0x25 */; } else if (((ix86_tune) == (CPU_K6))) { return 113 /* 0x71 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 161 /* 0xa1 */; } else { return 273 /* 0x111 */; } case 616: case 615: case 614: case 613: case 612: case 611: case 610: case 607: case 604: case 600: case 596: case 592: case 591: case 590: case 589: case 588: if (((ix86_tune) == (CPU_PENTIUM))) { return 15 /* 0xf */; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 69 /* 0x45 */; } else if (((ix86_tune) == (CPU_K6))) { return 142 /* 0x8e */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 192 /* 0xc0 */; } else { return 273 /* 0x111 */; } case 608: case 605: case 603: case 601: case 599: case 597: case 595: case 593: case 587: case 586: if (((ix86_tune) == (CPU_PENTIUM))) { return 15 /* 0xf */; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 67 /* 0x43 */; } else if (((ix86_tune) == (CPU_K6))) { return 142 /* 0x8e */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 192 /* 0xc0 */; } else { return 273 /* 0x111 */; } case 1031: case 1017: case 1013: case 1007: case 1006: case 1005: case 1003: case 1001: case 1000: case 999: case 998: case 997: case 996: case 995: case 994: case 993: case 992: case 991: case 990: case 989: case 960: case 959: case 958: case 957: case 956: case 929: case 919: case 918: case 917: case 916: case 915: case 914: case 913: case 912: case 911: case 910: case 909: case 908: case 907: case 906: case 905: case 904: case 896: case 784: case 669: case 663: case 585: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else { return 273 /* 0x111 */; } case 584: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (which_alternative == 0)) { return 15 /* 0xf */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0))) { return 67 /* 0x43 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 68 /* 0x44 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_K6))) && (which_alternative == 0)) { return 142 /* 0x8e */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 191 /* 0xbf */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 0)) { return 192 /* 0xc0 */; } else { return 273 /* 0x111 */; } case 609: case 606: case 602: case 598: case 594: case 583: if (((ix86_tune) == (CPU_PENTIUM))) { return 15 /* 0xf */; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 65 /* 0x41 */; } else if (((ix86_tune) == (CPU_K6))) { return 142 /* 0x8e */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 192 /* 0xc0 */; } else { return 273 /* 0x111 */; } case 897: case 781: case 779: case 751: case 749: case 747: case 666: case 660: case 582: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 77 /* 0x4d */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else { return 273 /* 0x111 */; } case 581: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (which_alternative == 0)) { return 15 /* 0xf */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0))) { return 65 /* 0x41 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 66 /* 0x42 */; } else if ((which_alternative == 1) && (((ix86_tune) == (CPU_PENTIUMPRO)))) { return 77 /* 0x4d */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_K6))) && (which_alternative == 0)) { return 142 /* 0x8e */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 191 /* 0xbf */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 0)) { return 192 /* 0xc0 */; } else { return 273 /* 0x111 */; } case 580: case 579: case 578: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FOP)) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (mult_operator (operands[3], XFmode))) { return 13 /* 0xd */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FDIV)) { return 14 /* 0xe */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FOP))) { return 49 /* 0x31 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FOP))) { return 50 /* 0x32 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_FOP))) { return 51 /* 0x33 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_FOP))) { return 52 /* 0x34 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], XFmode)))) { return 63 /* 0x3f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], XFmode)))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FDIV))) { return 65 /* 0x41 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FDIV))) { return 66 /* 0x42 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_STORE))) { return 139 /* 0x8b */; } else if ((((ix86_tune) == (CPU_K6))) && ((mult_operator (operands[3], XFmode)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 140 /* 0x8c */; } else if ((((ix86_tune) == (CPU_K6))) && ((mult_operator (operands[3], XFmode)) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 141 /* 0x8d */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_type (insn) == TYPE_FDIV)) { return 142 /* 0x8e */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) { return 168 /* 0xa8 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 180 /* 0xb4 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 181 /* 0xb5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_FOP)) { return 182 /* 0xb6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], XFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 183 /* 0xb7 */; } else if ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], XFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 184 /* 0xb8 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], XFmode))) { return 185 /* 0xb9 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 187 /* 0xbb */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 188 /* 0xbc */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_FDIV)) { return 189 /* 0xbd */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_FDIV)) { return 190 /* 0xbe */; } else { return 273 /* 0x111 */; } case 577: case 576: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FOP)) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (mult_operator (operands[3], XFmode))) { return 13 /* 0xd */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FDIV)) { return 14 /* 0xe */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FOP))) { return 49 /* 0x31 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FOP))) { return 50 /* 0x32 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_FOP))) { return 51 /* 0x33 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_FOP))) { return 52 /* 0x34 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], XFmode)))) { return 63 /* 0x3f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], XFmode)))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_STORE))) { return 139 /* 0x8b */; } else if ((((ix86_tune) == (CPU_K6))) && ((mult_operator (operands[3], XFmode)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 140 /* 0x8c */; } else if ((((ix86_tune) == (CPU_K6))) && ((mult_operator (operands[3], XFmode)) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 141 /* 0x8d */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_type (insn) == TYPE_FDIV)) { return 142 /* 0x8e */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) { return 168 /* 0xa8 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 180 /* 0xb4 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 181 /* 0xb5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_FOP)) { return 182 /* 0xb6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], XFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 183 /* 0xb7 */; } else if ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], XFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 184 /* 0xb8 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], XFmode))) { return 185 /* 0xb9 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 187 /* 0xbb */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 188 /* 0xbc */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_FDIV)) { return 189 /* 0xbd */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_FDIV)) { return 190 /* 0xbe */; } else { return 273 /* 0x111 */; } case 575: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FOP)) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (mult_operator (operands[3], XFmode))) { return 13 /* 0xd */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FDIV)) { return 14 /* 0xe */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FOP))) { return 49 /* 0x31 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FOP))) { return 50 /* 0x32 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_FOP))) { return 51 /* 0x33 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_FOP))) { return 52 /* 0x34 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], XFmode)))) { return 63 /* 0x3f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], XFmode)))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FDIV))) { return 69 /* 0x45 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FDIV))) { return 70 /* 0x46 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_STORE))) { return 139 /* 0x8b */; } else if ((((ix86_tune) == (CPU_K6))) && ((mult_operator (operands[3], XFmode)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 140 /* 0x8c */; } else if ((((ix86_tune) == (CPU_K6))) && ((mult_operator (operands[3], XFmode)) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 141 /* 0x8d */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_type (insn) == TYPE_FDIV)) { return 142 /* 0x8e */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) { return 168 /* 0xa8 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 180 /* 0xb4 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 181 /* 0xb5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_FOP)) { return 182 /* 0xb6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], XFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 183 /* 0xb7 */; } else if ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], XFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 184 /* 0xb8 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], XFmode))) { return 185 /* 0xb9 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 187 /* 0xbb */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 188 /* 0xbc */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_FDIV)) { return 189 /* 0xbd */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_FDIV)) { return 190 /* 0xbe */; } else { return 273 /* 0x111 */; } case 574: case 573: case 572: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FOP)) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (mult_operator (operands[3], DFmode))) { return 13 /* 0xd */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FDIV)) { return 14 /* 0xe */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FOP))) { return 49 /* 0x31 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FOP))) { return 50 /* 0x32 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_FOP))) { return 51 /* 0x33 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_FOP))) { return 52 /* 0x34 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], DFmode)))) { return 63 /* 0x3f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], DFmode)))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FDIV))) { return 65 /* 0x41 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FDIV))) { return 66 /* 0x42 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_STORE))) { return 139 /* 0x8b */; } else if ((((ix86_tune) == (CPU_K6))) && ((mult_operator (operands[3], DFmode)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 140 /* 0x8c */; } else if ((((ix86_tune) == (CPU_K6))) && ((mult_operator (operands[3], DFmode)) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 141 /* 0x8d */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_type (insn) == TYPE_FDIV)) { return 142 /* 0x8e */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) { return 168 /* 0xa8 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 180 /* 0xb4 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 181 /* 0xb5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_FOP)) { return 182 /* 0xb6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], DFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 183 /* 0xb7 */; } else if ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], DFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 184 /* 0xb8 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], DFmode))) { return 185 /* 0xb9 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 187 /* 0xbb */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 188 /* 0xbc */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_FDIV)) { return 189 /* 0xbd */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_FDIV)) { return 190 /* 0xbe */; } else { return 273 /* 0x111 */; } case 571: case 570: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FOP)) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (mult_operator (operands[3], DFmode))) { return 13 /* 0xd */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FDIV)) { return 14 /* 0xe */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FOP))) { return 49 /* 0x31 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FOP))) { return 50 /* 0x32 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_FOP))) { return 51 /* 0x33 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_FOP))) { return 52 /* 0x34 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], DFmode)))) { return 63 /* 0x3f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], DFmode)))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_STORE))) { return 139 /* 0x8b */; } else if ((((ix86_tune) == (CPU_K6))) && ((mult_operator (operands[3], DFmode)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 140 /* 0x8c */; } else if ((((ix86_tune) == (CPU_K6))) && ((mult_operator (operands[3], DFmode)) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 141 /* 0x8d */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_type (insn) == TYPE_FDIV)) { return 142 /* 0x8e */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) { return 168 /* 0xa8 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 180 /* 0xb4 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 181 /* 0xb5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_FOP)) { return 182 /* 0xb6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], DFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 183 /* 0xb7 */; } else if ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], DFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 184 /* 0xb8 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], DFmode))) { return 185 /* 0xb9 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 187 /* 0xbb */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 188 /* 0xbc */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_FDIV)) { return 189 /* 0xbd */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_FDIV)) { return 190 /* 0xbe */; } else { return 273 /* 0x111 */; } case 569: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) { return 168 /* 0xa8 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_SSEADD) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 235 /* 0xeb */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_SSEADD) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 236 /* 0xec */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_SSEADD)) { return 237 /* 0xed */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 258 /* 0x102 */; } else if ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 259 /* 0x103 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], SFmode))) { return 260 /* 0x104 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_SSEDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 265 /* 0x109 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_SSEDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 266 /* 0x10a */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_SSEDIV)) { return 267 /* 0x10b */; } else { return 273 /* 0x111 */; } case 568: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FOP)) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative != 2) && (mult_operator (operands[3], DFmode)))) { return 13 /* 0xd */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FDIV)) { return 14 /* 0xe */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FOP))) { return 49 /* 0x31 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FOP))) { return 50 /* 0x32 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_FOP))) { return 51 /* 0x33 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_FOP))) { return 52 /* 0x34 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative != 2) && (mult_operator (operands[3], DFmode))))) { return 63 /* 0x3f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative != 2) && (mult_operator (operands[3], DFmode))))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FDIV))) { return 67 /* 0x43 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FDIV))) { return 68 /* 0x44 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_STORE))) { return 139 /* 0x8b */; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative != 2) && (mult_operator (operands[3], DFmode))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 140 /* 0x8c */; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative != 2) && (mult_operator (operands[3], DFmode))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 141 /* 0x8d */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_type (insn) == TYPE_FDIV)) { return 142 /* 0x8e */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) { return 168 /* 0xa8 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 180 /* 0xb4 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 181 /* 0xb5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_FOP)) { return 182 /* 0xb6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative != 2) && (mult_operator (operands[3], DFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 183 /* 0xb7 */; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative != 2) && (mult_operator (operands[3], DFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 184 /* 0xb8 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative != 2) && (mult_operator (operands[3], DFmode)))) { return 185 /* 0xb9 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 187 /* 0xbb */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 188 /* 0xbc */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_FDIV)) { return 189 /* 0xbd */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_FDIV)) { return 190 /* 0xbe */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_SSEADD) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 235 /* 0xeb */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_SSEADD) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 236 /* 0xec */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_SSEADD)) { return 237 /* 0xed */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 2) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 258 /* 0x102 */; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 2) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 259 /* 0x103 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 2) && (mult_operator (operands[3], SFmode)))) { return 260 /* 0x104 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 2) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 261 /* 0x105 */; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 2) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 262 /* 0x106 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 2) && (mult_operator (operands[3], SFmode)))) { return 263 /* 0x107 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 2) && (mult_operator (operands[3], SFmode)))) { return 264 /* 0x108 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_SSEDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 265 /* 0x109 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_SSEDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 266 /* 0x10a */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_SSEDIV)) { return 267 /* 0x10b */; } else { return 273 /* 0x111 */; } case 567: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FOP)) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (mult_operator (operands[3], DFmode))) { return 13 /* 0xd */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FDIV)) { return 14 /* 0xe */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FOP))) { return 49 /* 0x31 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FOP))) { return 50 /* 0x32 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_FOP))) { return 51 /* 0x33 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_FOP))) { return 52 /* 0x34 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], DFmode)))) { return 63 /* 0x3f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], DFmode)))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FDIV))) { return 67 /* 0x43 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FDIV))) { return 68 /* 0x44 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_STORE))) { return 139 /* 0x8b */; } else if ((((ix86_tune) == (CPU_K6))) && ((mult_operator (operands[3], DFmode)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 140 /* 0x8c */; } else if ((((ix86_tune) == (CPU_K6))) && ((mult_operator (operands[3], DFmode)) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 141 /* 0x8d */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_type (insn) == TYPE_FDIV)) { return 142 /* 0x8e */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) { return 168 /* 0xa8 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 180 /* 0xb4 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 181 /* 0xb5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_FOP)) { return 182 /* 0xb6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], DFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 183 /* 0xb7 */; } else if ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], DFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 184 /* 0xb8 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], DFmode))) { return 185 /* 0xb9 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 187 /* 0xbb */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 188 /* 0xbc */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_FDIV)) { return 189 /* 0xbd */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_FDIV)) { return 190 /* 0xbe */; } else { return 273 /* 0x111 */; } case 566: case 565: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FOP)) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (mult_operator (operands[3], SFmode))) { return 13 /* 0xd */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FDIV)) { return 14 /* 0xe */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FOP))) { return 49 /* 0x31 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FOP))) { return 50 /* 0x32 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_FOP))) { return 51 /* 0x33 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_FOP))) { return 52 /* 0x34 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], SFmode)))) { return 63 /* 0x3f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], SFmode)))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_STORE))) { return 139 /* 0x8b */; } else if ((((ix86_tune) == (CPU_K6))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 140 /* 0x8c */; } else if ((((ix86_tune) == (CPU_K6))) && ((mult_operator (operands[3], SFmode)) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 141 /* 0x8d */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_type (insn) == TYPE_FDIV)) { return 142 /* 0x8e */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) { return 168 /* 0xa8 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 180 /* 0xb4 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 181 /* 0xb5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_FOP)) { return 182 /* 0xb6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 183 /* 0xb7 */; } else if ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 184 /* 0xb8 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], SFmode))) { return 185 /* 0xb9 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 187 /* 0xbb */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 188 /* 0xbc */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_FDIV)) { return 189 /* 0xbd */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_FDIV)) { return 190 /* 0xbe */; } else { return 273 /* 0x111 */; } case 564: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_SSEADD))) { return 78 /* 0x4e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_SSEADD))) { return 79 /* 0x4f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], SFmode)))) { return 84 /* 0x54 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], SFmode)))) { return 85 /* 0x55 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_SSEDIV))) { return 86 /* 0x56 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_SSEDIV))) { return 87 /* 0x57 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) { return 168 /* 0xa8 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_SSEADD) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 235 /* 0xeb */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_SSEADD) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 236 /* 0xec */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_SSEADD)) { return 237 /* 0xed */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 258 /* 0x102 */; } else if ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 259 /* 0x103 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], SFmode))) { return 260 /* 0x104 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_SSEDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 265 /* 0x109 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_SSEDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 266 /* 0x10a */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_SSEDIV)) { return 267 /* 0x10b */; } else { return 273 /* 0x111 */; } case 563: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FOP)) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative != 2) && (mult_operator (operands[3], SFmode)))) { return 13 /* 0xd */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FDIV)) { return 14 /* 0xe */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FOP))) { return 49 /* 0x31 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FOP))) { return 50 /* 0x32 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_FOP))) { return 51 /* 0x33 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_FOP))) { return 52 /* 0x34 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative != 2) && (mult_operator (operands[3], SFmode))))) { return 63 /* 0x3f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative != 2) && (mult_operator (operands[3], SFmode))))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FDIV))) { return 65 /* 0x41 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FDIV))) { return 66 /* 0x42 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_SSEADD))) { return 78 /* 0x4e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_SSEADD))) { return 79 /* 0x4f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 2) && (mult_operator (operands[3], SFmode))))) { return 84 /* 0x54 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 2) && (mult_operator (operands[3], SFmode))))) { return 85 /* 0x55 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_SSEDIV))) { return 86 /* 0x56 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_SSEDIV))) { return 87 /* 0x57 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_STORE))) { return 139 /* 0x8b */; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative != 2) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 140 /* 0x8c */; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative != 2) && (mult_operator (operands[3], SFmode))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 141 /* 0x8d */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_type (insn) == TYPE_FDIV)) { return 142 /* 0x8e */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) { return 168 /* 0xa8 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 180 /* 0xb4 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 181 /* 0xb5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_FOP)) { return 182 /* 0xb6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative != 2) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 183 /* 0xb7 */; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative != 2) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 184 /* 0xb8 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative != 2) && (mult_operator (operands[3], SFmode)))) { return 185 /* 0xb9 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 187 /* 0xbb */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 188 /* 0xbc */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_FDIV)) { return 189 /* 0xbd */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_FDIV)) { return 190 /* 0xbe */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_SSEADD) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 235 /* 0xeb */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_SSEADD) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 236 /* 0xec */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_SSEADD)) { return 237 /* 0xed */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 2) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 258 /* 0x102 */; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 2) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 259 /* 0x103 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 2) && (mult_operator (operands[3], SFmode)))) { return 260 /* 0x104 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 2) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 261 /* 0x105 */; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 2) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 262 /* 0x106 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 2) && (mult_operator (operands[3], SFmode)))) { return 263 /* 0x107 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 2) && (mult_operator (operands[3], SFmode)))) { return 264 /* 0x108 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_SSEDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 265 /* 0x109 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_SSEDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 266 /* 0x10a */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_SSEDIV)) { return 267 /* 0x10b */; } else { return 273 /* 0x111 */; } case 562: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FOP)) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (mult_operator (operands[3], SFmode))) { return 13 /* 0xd */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_FDIV)) { return 14 /* 0xe */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FOP))) { return 49 /* 0x31 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FOP))) { return 50 /* 0x32 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_FOP))) { return 51 /* 0x33 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_FOP))) { return 52 /* 0x34 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], SFmode)))) { return 63 /* 0x3f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], SFmode)))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_FDIV))) { return 65 /* 0x41 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_FDIV))) { return 66 /* 0x42 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_STORE))) { return 139 /* 0x8b */; } else if ((((ix86_tune) == (CPU_K6))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 140 /* 0x8c */; } else if ((((ix86_tune) == (CPU_K6))) && ((mult_operator (operands[3], SFmode)) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 141 /* 0x8d */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_type (insn) == TYPE_FDIV)) { return 142 /* 0x8e */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_NONE))) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_unit (insn) == UNIT_INTEGER) && (get_attr_memory (insn) == MEMORY_STORE))) { return 168 /* 0xa8 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 180 /* 0xb4 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FOP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 181 /* 0xb5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_type (insn) == TYPE_FOP)) { return 182 /* 0xb6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 183 /* 0xb7 */; } else if ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 184 /* 0xb8 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], SFmode))) { return 185 /* 0xb9 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 187 /* 0xbb */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_type (insn) == TYPE_FDIV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 188 /* 0xbc */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_type (insn) == TYPE_FDIV)) { return 189 /* 0xbd */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_type (insn) == TYPE_FDIV)) { return 190 /* 0xbe */; } else { return 273 /* 0x111 */; } case 561: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (! (mult_operator (operands[3], XFmode)))) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (mult_operator (operands[3], XFmode))) { return 13 /* 0xd */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (! (mult_operator (operands[3], XFmode))))) { return 49 /* 0x31 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (! (mult_operator (operands[3], XFmode))))) { return 50 /* 0x32 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (! (mult_operator (operands[3], XFmode))))) { return 51 /* 0x33 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (! (mult_operator (operands[3], XFmode))))) { return 52 /* 0x34 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], XFmode)))) { return 63 /* 0x3f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], XFmode)))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_K6))) && ((! (mult_operator (operands[3], XFmode))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && ((! (mult_operator (operands[3], XFmode))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_K6))) && ((! (mult_operator (operands[3], XFmode))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 139 /* 0x8b */; } else if ((((ix86_tune) == (CPU_K6))) && ((mult_operator (operands[3], XFmode)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 140 /* 0x8c */; } else if ((((ix86_tune) == (CPU_K6))) && ((mult_operator (operands[3], XFmode)) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 141 /* 0x8d */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((! (mult_operator (operands[3], XFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 180 /* 0xb4 */; } else if ((((ix86_tune) == (CPU_K8))) && ((! (mult_operator (operands[3], XFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 181 /* 0xb5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (! (mult_operator (operands[3], XFmode)))) { return 182 /* 0xb6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], XFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 183 /* 0xb7 */; } else if ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], XFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 184 /* 0xb8 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], XFmode))) { return 185 /* 0xb9 */; } else { return 273 /* 0x111 */; } case 560: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((! (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 235 /* 0xeb */; } else if ((((ix86_tune) == (CPU_K8))) && ((! (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 236 /* 0xec */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (! (mult_operator (operands[3], SFmode)))) { return 237 /* 0xed */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 258 /* 0x102 */; } else if ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 259 /* 0x103 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], SFmode))) { return 260 /* 0x104 */; } else { return 273 /* 0x111 */; } case 559: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode))))) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && (mult_operator (operands[3], SFmode)))) { return 13 /* 0xd */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))))) { return 49 /* 0x31 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))))) { return 50 /* 0x32 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))))) { return 51 /* 0x33 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))))) { return 52 /* 0x34 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 0) && (mult_operator (operands[3], SFmode))))) { return 63 /* 0x3f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 0) && (mult_operator (operands[3], SFmode))))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 139 /* 0x8b */; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 0) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 140 /* 0x8c */; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 0) && (mult_operator (operands[3], SFmode))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 141 /* 0x8d */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 180 /* 0xb4 */; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 181 /* 0xb5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode))))) { return 182 /* 0xb6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 0) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 183 /* 0xb7 */; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 0) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 184 /* 0xb8 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 0) && (mult_operator (operands[3], SFmode)))) { return 185 /* 0xb9 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 1) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 235 /* 0xeb */; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 1) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 236 /* 0xec */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (! (mult_operator (operands[3], SFmode))))) { return 237 /* 0xed */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 1) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 238 /* 0xee */; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 1) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 239 /* 0xef */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (! (mult_operator (operands[3], SFmode))))) { return 240 /* 0xf0 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (! (mult_operator (operands[3], SFmode))))) { return 241 /* 0xf1 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 1) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 258 /* 0x102 */; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 1) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 259 /* 0x103 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (mult_operator (operands[3], SFmode)))) { return 260 /* 0x104 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 1) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 261 /* 0x105 */; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 1) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 262 /* 0x106 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (mult_operator (operands[3], SFmode)))) { return 263 /* 0x107 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (mult_operator (operands[3], SFmode)))) { return 264 /* 0x108 */; } else { return 273 /* 0x111 */; } case 557: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (! (mult_operator (operands[3], SFmode))))) { return 78 /* 0x4e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (! (mult_operator (operands[3], SFmode))))) { return 79 /* 0x4f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], SFmode)))) { return 84 /* 0x54 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], SFmode)))) { return 85 /* 0x55 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((! (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 235 /* 0xeb */; } else if ((((ix86_tune) == (CPU_K8))) && ((! (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 236 /* 0xec */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (! (mult_operator (operands[3], SFmode)))) { return 237 /* 0xed */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 258 /* 0x102 */; } else if ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 259 /* 0x103 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], SFmode))) { return 260 /* 0x104 */; } else { return 273 /* 0x111 */; } case 556: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode))))) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && (mult_operator (operands[3], SFmode)))) { return 13 /* 0xd */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))))) { return 49 /* 0x31 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))))) { return 50 /* 0x32 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))))) { return 51 /* 0x33 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))))) { return 52 /* 0x34 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 0) && (mult_operator (operands[3], SFmode))))) { return 63 /* 0x3f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 0) && (mult_operator (operands[3], SFmode))))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 1) && (! (mult_operator (operands[3], SFmode)))))) { return 78 /* 0x4e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 1) && (! (mult_operator (operands[3], SFmode)))))) { return 79 /* 0x4f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 1) && (mult_operator (operands[3], SFmode))))) { return 84 /* 0x54 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 1) && (mult_operator (operands[3], SFmode))))) { return 85 /* 0x55 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 139 /* 0x8b */; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 0) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 140 /* 0x8c */; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 0) && (mult_operator (operands[3], SFmode))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 141 /* 0x8d */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 180 /* 0xb4 */; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 0) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 181 /* 0xb5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 0) && (! (mult_operator (operands[3], SFmode))))) { return 182 /* 0xb6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 0) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 183 /* 0xb7 */; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 0) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 184 /* 0xb8 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 0) && (mult_operator (operands[3], SFmode)))) { return 185 /* 0xb9 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 1) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 235 /* 0xeb */; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 1) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 236 /* 0xec */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (! (mult_operator (operands[3], SFmode))))) { return 237 /* 0xed */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 1) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 238 /* 0xee */; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 1) && (! (mult_operator (operands[3], SFmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 239 /* 0xef */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (! (mult_operator (operands[3], SFmode))))) { return 240 /* 0xf0 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (! (mult_operator (operands[3], SFmode))))) { return 241 /* 0xf1 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 1) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 258 /* 0x102 */; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 1) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 259 /* 0x103 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (mult_operator (operands[3], SFmode)))) { return 260 /* 0x104 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((which_alternative == 1) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 261 /* 0x105 */; } else if ((((ix86_tune) == (CPU_K8))) && (((which_alternative == 1) && (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 262 /* 0x106 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (mult_operator (operands[3], SFmode)))) { return 263 /* 0x107 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (mult_operator (operands[3], SFmode)))) { return 264 /* 0x108 */; } else { return 273 /* 0x111 */; } case 558: case 555: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (! (mult_operator (operands[3], SFmode)))) { return 12 /* 0xc */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (mult_operator (operands[3], SFmode))) { return 13 /* 0xd */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (! (mult_operator (operands[3], SFmode))))) { return 49 /* 0x31 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (! (mult_operator (operands[3], SFmode))))) { return 50 /* 0x32 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (! (mult_operator (operands[3], SFmode))))) { return 51 /* 0x33 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (! (mult_operator (operands[3], SFmode))))) { return 52 /* 0x34 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (mult_operator (operands[3], SFmode)))) { return 63 /* 0x3f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (mult_operator (operands[3], SFmode)))) { return 64 /* 0x40 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_K6))) && ((! (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && ((! (mult_operator (operands[3], SFmode))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_K6))) && ((! (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 139 /* 0x8b */; } else if ((((ix86_tune) == (CPU_K6))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 140 /* 0x8c */; } else if ((((ix86_tune) == (CPU_K6))) && ((mult_operator (operands[3], SFmode)) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 141 /* 0x8d */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((! (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 180 /* 0xb4 */; } else if ((((ix86_tune) == (CPU_K8))) && ((! (mult_operator (operands[3], SFmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 181 /* 0xb5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (! (mult_operator (operands[3], SFmode)))) { return 182 /* 0xb6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 183 /* 0xb7 */; } else if ((((ix86_tune) == (CPU_K8))) && ((mult_operator (operands[3], SFmode)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 184 /* 0xb8 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (mult_operator (operands[3], SFmode))) { return 185 /* 0xb9 */; } else { return 273 /* 0x111 */; } case 554: case 552: if (((ix86_tune) == (CPU_PENTIUM))) { return 20 /* 0x14 */; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 110 /* 0x6e */; } else if (((ix86_tune) == (CPU_K6))) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 163 /* 0xa3 */; } else { return 273 /* 0x111 */; } case 553: case 551: if (((ix86_tune) == (CPU_PENTIUM))) { return 7; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 30 /* 0x1e */; } else if (((ix86_tune) == (CPU_K6))) { return 126 /* 0x7e */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 162 /* 0xa2 */; } else { return 273 /* 0x111 */; } case 534: case 533: if (((ix86_tune) == (CPU_PENTIUM))) { return 9; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 40 /* 0x28 */; } else if (((ix86_tune) == (CPU_K6))) { return 131 /* 0x83 */; } else if (((ix86_tune) == (CPU_ATHLON))) { return 148 /* 0x94 */; } else if (((ix86_tune) == (CPU_K8))) { return 149 /* 0x95 */; } else { return 273 /* 0x111 */; } case 522: case 521: case 520: case 519: case 518: case 517: case 516: case 515: extract_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 10 /* 0xa */; } else { return 28 /* 0x1c */; } case 527: case 514: case 513: case 512: case 511: case 510: case 509: case 498: case 497: extract_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 11 /* 0xb */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (memory_operand (operands[0], VOIDmode)))) { return 38 /* 0x26 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (memory_operand (operands[0], VOIDmode))) { return 39 /* 0x27 */; } else if (((ix86_tune) == (CPU_K6))) { return 130 /* 0x82 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 143 /* 0x8f */; } else { return 273 /* 0x111 */; } case 901: case 900: case 496: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 225 /* 0xe1 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 226 /* 0xe2 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 227 /* 0xe3 */; } else { return 273 /* 0x111 */; } case 773: case 772: case 495: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 80 /* 0x50 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 81 /* 0x51 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 225 /* 0xe1 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 226 /* 0xe2 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 227 /* 0xe3 */; } else { return 273 /* 0x111 */; } case 494: case 493: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (! (memory_operand (operands[0], VOIDmode)))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (memory_operand (operands[0], VOIDmode)))) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (memory_operand (operands[0], VOIDmode))) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_K6))) && (! (memory_operand (operands[0], VOIDmode)))) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && (memory_operand (operands[0], VOIDmode))) { return 123 /* 0x7b */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (! (memory_operand (operands[0], VOIDmode)))) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (memory_operand (operands[0], VOIDmode))) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 492: case 490: case 479: case 477: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const1_operand (operands[1], VOIDmode))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 17 /* 0x11 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const1_operand (operands[1], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const1_operand (operands[1], VOIDmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 21 /* 0x15 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const1_operand (operands[1], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const1_operand (operands[1], VOIDmode))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 25 /* 0x19 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const1_operand (operands[1], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 35 /* 0x23 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 36 /* 0x24 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 113 /* 0x71 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 114 /* 0x72 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 115 /* 0x73 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 491: case 489: case 488: case 487: case 486: case 485: case 484: case 483: case 482: case 481: case 480: case 478: case 476: case 475: case 474: case 473: case 472: case 471: case 470: case 469: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const1_operand (operands[2], VOIDmode))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 17 /* 0x11 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const1_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const1_operand (operands[2], VOIDmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 21 /* 0x15 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const1_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const1_operand (operands[2], VOIDmode))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 25 /* 0x19 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const1_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 35 /* 0x23 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 36 /* 0x24 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 113 /* 0x71 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 114 /* 0x72 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 115 /* 0x73 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 466: case 464: case 442: case 440: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const_int_operand (operands[1], VOIDmode))) && (memory_operand (operands[1], VOIDmode)))) { return 17 /* 0x11 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const_int_operand (operands[1], VOIDmode)))) && (memory_operand (operands[1], VOIDmode)))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const_int_operand (operands[1], VOIDmode))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 25 /* 0x19 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const_int_operand (operands[1], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 35 /* 0x23 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 36 /* 0x24 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 113 /* 0x71 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (memory_operand (operands[1], VOIDmode)))) { return 115 /* 0x73 */; } else if ((((ix86_tune) == (CPU_K6))) && (memory_operand (operands[1], VOIDmode))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (memory_operand (operands[1], VOIDmode)))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (memory_operand (operands[1], VOIDmode))) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 424: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 35 /* 0x23 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 36 /* 0x24 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 113 /* 0x71 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 114 /* 0x72 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 115 /* 0x73 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 468: case 467: case 465: case 463: case 462: case 461: case 460: case 459: case 458: case 457: case 456: case 455: case 454: case 453: case 452: case 451: case 448: case 447: case 446: case 445: case 444: case 443: case 441: case 439: case 438: case 437: case 436: case 435: case 434: case 433: case 432: case 431: case 430: case 429: case 428: case 427: case 421: case 420: case 419: case 418: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const_int_operand (operands[2], VOIDmode))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 17 /* 0x11 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const_int_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const_int_operand (operands[2], VOIDmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 21 /* 0x15 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const_int_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (const_int_operand (operands[2], VOIDmode))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 25 /* 0x19 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (const_int_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 35 /* 0x23 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 36 /* 0x24 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 113 /* 0x71 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 114 /* 0x72 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 115 /* 0x73 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 426: case 425: case 417: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((which_alternative == 1) && (const_int_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 17 /* 0x11 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || ((which_alternative != 1) || (! (const_int_operand (operands[2], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((which_alternative == 1) && (const_int_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 21 /* 0x15 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || ((which_alternative != 1) || (! (const_int_operand (operands[2], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((which_alternative == 1) && (const_int_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 25 /* 0x19 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || ((which_alternative != 1) || (! (const_int_operand (operands[2], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 33 /* 0x21 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_NONE))) { return 35 /* 0x23 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (! (get_attr_memory (insn) == MEMORY_NONE)))) { return 36 /* 0x24 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_NONE))) { return 113 /* 0x71 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 114 /* 0x72 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 115 /* 0x73 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 414: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((get_attr_type (insn) == TYPE_ALU) || (which_alternative == 2))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 16 /* 0x10 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 17 /* 0x11 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((get_attr_type (insn) == TYPE_ALU) || (which_alternative == 2))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 21 /* 0x15 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((get_attr_type (insn) == TYPE_ALU) || (which_alternative == 2))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_NONE))) { return 25 /* 0x19 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 2))) { return 34 /* 0x22 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_ISHIFT))) { return 35 /* 0x23 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((! (get_attr_memory (insn) == MEMORY_NONE)) && (get_attr_type (insn) == TYPE_ISHIFT))) { return 36 /* 0x24 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_ALU))) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_ALU))) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_ALU))) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_ALU))) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && (get_attr_memory (insn) == MEMORY_NONE))) { return 113 /* 0x71 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 114 /* 0x72 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 115 /* 0x73 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ALU) && (get_attr_memory (insn) == MEMORY_NONE))) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ALU) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ALU) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && (which_alternative == 2)) { return 134 /* 0x86 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 2)) { return 150 /* 0x96 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 413: case 412: if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 17 /* 0x11 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 21 /* 0x15 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_NONE))) { return 25 /* 0x19 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_ISHIFT))) { return 35 /* 0x23 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((! (get_attr_memory (insn) == MEMORY_NONE)) && (get_attr_type (insn) == TYPE_ISHIFT))) { return 36 /* 0x24 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_ALU))) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_ALU))) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_ALU))) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_ALU))) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && (get_attr_memory (insn) == MEMORY_NONE))) { return 113 /* 0x71 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 114 /* 0x72 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 115 /* 0x73 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ALU) && (get_attr_memory (insn) == MEMORY_NONE))) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ALU) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ALU) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 411: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((which_alternative == 1) && (! (get_attr_imm_disp (insn) == IMM_DISP_TRUE))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 16 /* 0x10 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 17 /* 0x11 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((which_alternative == 1) && (! (get_attr_imm_disp (insn) == IMM_DISP_TRUE))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 21 /* 0x15 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((which_alternative == 1) && (! (get_attr_imm_disp (insn) == IMM_DISP_TRUE))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_NONE))) { return 25 /* 0x19 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 1))) { return 34 /* 0x22 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_ISHIFT))) { return 35 /* 0x23 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((! (get_attr_memory (insn) == MEMORY_NONE)) && (get_attr_type (insn) == TYPE_ISHIFT))) { return 36 /* 0x24 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_ALU))) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_ALU))) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_ALU))) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_ALU))) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && (get_attr_memory (insn) == MEMORY_NONE))) { return 113 /* 0x71 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 114 /* 0x72 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 115 /* 0x73 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ALU) && (get_attr_memory (insn) == MEMORY_NONE))) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ALU) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ALU) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && (which_alternative == 1)) { return 134 /* 0x86 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 1)) { return 150 /* 0x96 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 410: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 16 /* 0x10 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 17 /* 0x11 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 21 /* 0x15 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_NONE))) { return 25 /* 0x19 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((! ((TARGET_DOUBLE_WITH_ADD) != (0))) || (! (const1_operand (operands[2], VOIDmode)))))) { return 35 /* 0x23 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((! (get_attr_memory (insn) == MEMORY_NONE)) && ((! ((TARGET_DOUBLE_WITH_ADD) != (0))) || (! (const1_operand (operands[2], VOIDmode)))))) { return 36 /* 0x24 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))))) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))))) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))))) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))))) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && (((! ((TARGET_DOUBLE_WITH_ADD) != (0))) || (! (const1_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 113 /* 0x71 */; } else if ((((ix86_tune) == (CPU_K6))) && (((! ((TARGET_DOUBLE_WITH_ADD) != (0))) || (! (const1_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 114 /* 0x72 */; } else if ((((ix86_tune) == (CPU_K6))) && (((! ((TARGET_DOUBLE_WITH_ADD) != (0))) || (! (const1_operand (operands[2], VOIDmode)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 115 /* 0x73 */; } else if ((((ix86_tune) == (CPU_K6))) && ((((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && ((((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_K6))) && ((((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 408: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((which_alternative != 0) || (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 16 /* 0x10 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 17 /* 0x11 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((which_alternative != 0) || (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 21 /* 0x15 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((which_alternative != 0) || (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_NONE))) { return 25 /* 0x19 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 1))) { return 34 /* 0x22 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 0) && ((! ((TARGET_DOUBLE_WITH_ADD) != (0))) || (! (const1_operand (operands[2], VOIDmode))))))) { return 35 /* 0x23 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((! (get_attr_memory (insn) == MEMORY_NONE)) && ((which_alternative == 0) && ((! ((TARGET_DOUBLE_WITH_ADD) != (0))) || (! (const1_operand (operands[2], VOIDmode))))))) { return 36 /* 0x24 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 0) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode)))))) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 0) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode)))))) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && ((which_alternative == 0) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode)))))) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && ((which_alternative == 0) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode)))))) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 0) && ((! ((TARGET_DOUBLE_WITH_ADD) != (0))) || (! (const1_operand (operands[2], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 113 /* 0x71 */; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 0) && ((! ((TARGET_DOUBLE_WITH_ADD) != (0))) || (! (const1_operand (operands[2], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 114 /* 0x72 */; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 0) && ((! ((TARGET_DOUBLE_WITH_ADD) != (0))) || (! (const1_operand (operands[2], VOIDmode))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 115 /* 0x73 */; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 0) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 0) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_K6))) && (((which_alternative == 0) && (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && (which_alternative == 1)) { return 134 /* 0x86 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 1)) { return 150 /* 0x96 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 406: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 35 /* 0x23 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 36 /* 0x24 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 113 /* 0x71 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 114 /* 0x72 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 115 /* 0x73 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 161 /* 0xa1 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 164 /* 0xa4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 167 /* 0xa7 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 169 /* 0xa9 */; } else { return 273 /* 0x111 */; } case 416: case 415: case 409: case 403: if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_type (insn) == TYPE_ALU)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 16 /* 0x10 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 17 /* 0x11 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_type (insn) == TYPE_ALU)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 21 /* 0x15 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_type (insn) == TYPE_ALU)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_NONE))) { return 25 /* 0x19 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_ISHIFT))) { return 35 /* 0x23 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((! (get_attr_memory (insn) == MEMORY_NONE)) && (get_attr_type (insn) == TYPE_ISHIFT))) { return 36 /* 0x24 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_ALU))) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_ALU))) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_ALU))) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_ALU))) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && (get_attr_memory (insn) == MEMORY_NONE))) { return 113 /* 0x71 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 114 /* 0x72 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 115 /* 0x73 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ALU) && (get_attr_memory (insn) == MEMORY_NONE))) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ALU) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ALU) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 407: case 402: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((which_alternative != 0) || (get_attr_type (insn) == TYPE_ALU))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 16 /* 0x10 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 17 /* 0x11 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((which_alternative != 0) || (get_attr_type (insn) == TYPE_ALU))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 21 /* 0x15 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((which_alternative != 0) || (get_attr_type (insn) == TYPE_ALU))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_NONE))) { return 25 /* 0x19 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 1))) { return 34 /* 0x22 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_ISHIFT))) { return 35 /* 0x23 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((! (get_attr_memory (insn) == MEMORY_NONE)) && (get_attr_type (insn) == TYPE_ISHIFT))) { return 36 /* 0x24 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_ALU))) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_ALU))) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_ALU))) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (get_attr_type (insn) == TYPE_ALU))) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && (get_attr_memory (insn) == MEMORY_NONE))) { return 113 /* 0x71 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 114 /* 0x72 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ISHIFT) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 115 /* 0x73 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ALU) && (get_attr_memory (insn) == MEMORY_NONE))) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ALU) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_ALU) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && (which_alternative == 1)) { return 134 /* 0x86 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 1)) { return 150 /* 0x96 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 391: case 390: case 389: case 388: case 387: case 386: case 376: case 375: case 374: case 373: case 372: case 371: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 53 /* 0x35 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 186 /* 0xba */; } else { return 273 /* 0x111 */; } case 400: case 398: case 395: case 394: case 392: case 361: case 360: case 359: case 358: case 357: case 356: case 355: case 354: case 353: case 352: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (memory_operand (operands[1], VOIDmode))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (memory_operand (operands[1], VOIDmode))) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 113 /* 0x71 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (memory_operand (operands[1], VOIDmode)))) { return 115 /* 0x73 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (memory_operand (operands[1], VOIDmode)))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && (memory_operand (operands[1], VOIDmode))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (memory_operand (operands[1], VOIDmode)))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (memory_operand (operands[1], VOIDmode))) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 294: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 17 /* 0x11 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative == 2)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 21 /* 0x15 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative == 2)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 25 /* 0x19 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative == 2)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_NONE))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 33 /* 0x21 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((1 << which_alternative) & 0x3)))) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((1 << which_alternative) & 0x3)))) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((1 << which_alternative) & 0x3)))) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (((1 << which_alternative) & 0x3)))) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 290: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 16 /* 0x10 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative == 2)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative == 2)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative == 2)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_NONE))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 33 /* 0x21 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((1 << which_alternative) & 0x3)))) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((1 << which_alternative) & 0x3)))) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((1 << which_alternative) & 0x3)))) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (((1 << which_alternative) & 0x3)))) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 288: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x7))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 16 /* 0x10 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative == 3)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x7))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative == 3)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x7))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative == 3)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 3) && (get_attr_memory (insn) == MEMORY_NONE))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 3) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 33 /* 0x21 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((1 << which_alternative) & 0x7)))) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((1 << which_alternative) & 0x7)))) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((1 << which_alternative) & 0x7)))) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (((1 << which_alternative) & 0x7)))) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 282: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 122 /* 0x7a */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else { return 273 /* 0x111 */; } case 281: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0x5)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((!((1 << which_alternative) & 0x5)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0x5)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((!((1 << which_alternative) & 0x5)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 122 /* 0x7a */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else { return 273 /* 0x111 */; } case 280: case 279: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative != 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative != 1) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 122 /* 0x7a */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else { return 273 /* 0x111 */; } case 278: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((!((1 << which_alternative) & 0xa)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0xa)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((!((1 << which_alternative) & 0xa)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0xa)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 122 /* 0x7a */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else { return 273 /* 0x111 */; } case 277: if (((ix86_tune) == (CPU_PENTIUM))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 45 /* 0x2d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 46 /* 0x2e */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 119 /* 0x77 */; } else if ((((ix86_tune) == (CPU_K6))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 120 /* 0x78 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 157 /* 0x9d */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 158 /* 0x9e */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 161 /* 0xa1 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 164 /* 0xa4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 167 /* 0xa7 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 169 /* 0xa9 */; } else { return 273 /* 0x111 */; } case 276: case 271: if (((ix86_tune) == (CPU_PENTIUM))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 47 /* 0x2f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 48 /* 0x30 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 119 /* 0x77 */; } else if ((((ix86_tune) == (CPU_K6))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 120 /* 0x78 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 157 /* 0x9d */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 158 /* 0x9e */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 161 /* 0xa1 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 164 /* 0xa4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 167 /* 0xa7 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 169 /* 0xa9 */; } else { return 273 /* 0x111 */; } case 274: case 268: if (((ix86_tune) == (CPU_PENTIUM))) { return 2; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 119 /* 0x77 */; } else if ((((ix86_tune) == (CPU_K6))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 120 /* 0x78 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 157 /* 0x9d */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 158 /* 0x9e */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 161 /* 0xa1 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 164 /* 0xa4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 167 /* 0xa7 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 169 /* 0xa9 */; } else { return 273 /* 0x111 */; } case 265: case 264: if (((ix86_tune) == (CPU_PENTIUM))) { return 2; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 43 /* 0x2b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 44 /* 0x2c */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 119 /* 0x77 */; } else if ((((ix86_tune) == (CPU_K6))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 120 /* 0x78 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 157 /* 0x9d */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 158 /* 0x9e */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 161 /* 0xa1 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 164 /* 0xa4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 167 /* 0xa7 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 169 /* 0xa9 */; } else { return 273 /* 0x111 */; } case 263: case 262: case 260: case 259: case 257: case 255: if (((ix86_tune) == (CPU_PENTIUM))) { return 0; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 41 /* 0x29 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 42 /* 0x2a */; } else if (((ix86_tune) == (CPU_K6))) { return 116 /* 0x74 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 151 /* 0x97 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 153 /* 0x99 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 154 /* 0x9a */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 156 /* 0x9c */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 161 /* 0xa1 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 164 /* 0xa4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 167 /* 0xa7 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 169 /* 0xa9 */; } else { return 273 /* 0x111 */; } case 261: case 258: case 256: case 254: if (((ix86_tune) == (CPU_PENTIUM))) { return 0; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 41 /* 0x29 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 42 /* 0x2a */; } else if (((ix86_tune) == (CPU_K6))) { return 116 /* 0x74 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 151 /* 0x97 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 152 /* 0x98 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 154 /* 0x9a */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 155 /* 0x9b */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 156 /* 0x9c */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 161 /* 0xa1 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 164 /* 0xa4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 167 /* 0xa7 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 169 /* 0xa9 */; } else { return 273 /* 0x111 */; } case 253: case 252: case 251: if (((ix86_tune) == (CPU_PENTIUM))) { return 0; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 41 /* 0x29 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 42 /* 0x2a */; } else if (((ix86_tune) == (CPU_K6))) { return 116 /* 0x74 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 151 /* 0x97 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 153 /* 0x99 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 154 /* 0x9a */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 156 /* 0x9c */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((! (((ix86_tune) == (CPU_ATHLON)))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 161 /* 0xa1 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((! (((ix86_tune) == (CPU_ATHLON)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 164 /* 0xa4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((! (((ix86_tune) == (CPU_ATHLON)))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 167 /* 0xa7 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((! (((ix86_tune) == (CPU_ATHLON)))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 168 /* 0xa8 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 169 /* 0xa9 */; } else { return 273 /* 0x111 */; } case 250: extract_constrain_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 0; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 41 /* 0x29 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 42 /* 0x2a */; } else if (((ix86_tune) == (CPU_K6))) { return 116 /* 0x74 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 151 /* 0x97 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 153 /* 0x99 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 154 /* 0x9a */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 156 /* 0x9c */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative == 0) && (! (((ix86_tune) == (CPU_ATHLON))))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative != 0) || (((ix86_tune) == (CPU_ATHLON)))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 161 /* 0xa1 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative == 0) && (! (((ix86_tune) == (CPU_ATHLON))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative != 0) || (((ix86_tune) == (CPU_ATHLON)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 164 /* 0xa4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative == 0) && (! (((ix86_tune) == (CPU_ATHLON))))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative != 0) || (((ix86_tune) == (CPU_ATHLON)))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 167 /* 0xa7 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative == 0) && (! (((ix86_tune) == (CPU_ATHLON))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 168 /* 0xa8 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((which_alternative != 0) || (((ix86_tune) == (CPU_ATHLON)))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 169 /* 0xa9 */; } else { return 273 /* 0x111 */; } case 249: case 248: extract_constrain_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 0; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 41 /* 0x29 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 42 /* 0x2a */; } else if (((ix86_tune) == (CPU_K6))) { return 116 /* 0x74 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 151 /* 0x97 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 153 /* 0x99 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 154 /* 0x9a */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 156 /* 0x9c */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((! (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative != 1) && ((which_alternative != 2) || (! (memory_operand (operands[1], VOIDmode)))))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((((ix86_tune) == (CPU_ATHLON))) || ((which_alternative == 1) || ((which_alternative == 2) && (memory_operand (operands[1], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 161 /* 0xa1 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((! (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative != 1) && ((which_alternative != 2) || (! (memory_operand (operands[1], VOIDmode)))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((((ix86_tune) == (CPU_ATHLON))) || ((which_alternative == 1) || ((which_alternative == 2) && (memory_operand (operands[1], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 164 /* 0xa4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((! (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative != 1) && ((which_alternative != 2) || (! (memory_operand (operands[1], VOIDmode)))))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((((ix86_tune) == (CPU_ATHLON))) || ((which_alternative == 1) || ((which_alternative == 2) && (memory_operand (operands[1], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 167 /* 0xa7 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((! (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative != 1) && ((which_alternative != 2) || (! (memory_operand (operands[1], VOIDmode)))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 168 /* 0xa8 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((((ix86_tune) == (CPU_ATHLON))) || ((which_alternative == 1) || ((which_alternative == 2) && (memory_operand (operands[1], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 169 /* 0xa9 */; } else { return 273 /* 0x111 */; } case 247: extract_constrain_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 0; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 41 /* 0x29 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (get_attr_memory (insn) == MEMORY_NONE))) { return 42 /* 0x2a */; } else if (((ix86_tune) == (CPU_K6))) { return 116 /* 0x74 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 151 /* 0x97 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 152 /* 0x98 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 154 /* 0x9a */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 155 /* 0x9b */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 156 /* 0x9c */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((! (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative != 1) && ((which_alternative != 2) || (! (memory_operand (operands[1], VOIDmode)))))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((((ix86_tune) == (CPU_ATHLON))) || ((which_alternative == 1) || ((which_alternative == 2) && (memory_operand (operands[1], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 161 /* 0xa1 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((! (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative != 1) && ((which_alternative != 2) || (! (memory_operand (operands[1], VOIDmode)))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((((ix86_tune) == (CPU_ATHLON))) || ((which_alternative == 1) || ((which_alternative == 2) && (memory_operand (operands[1], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 164 /* 0xa4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((! (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative != 1) && ((which_alternative != 2) || (! (memory_operand (operands[1], VOIDmode)))))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((((ix86_tune) == (CPU_ATHLON))) || ((which_alternative == 1) || ((which_alternative == 2) && (memory_operand (operands[1], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 167 /* 0xa7 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((! (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative != 1) && ((which_alternative != 2) || (! (memory_operand (operands[1], VOIDmode)))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 168 /* 0xa8 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((((ix86_tune) == (CPU_ATHLON))) || ((which_alternative == 1) || ((which_alternative == 2) && (memory_operand (operands[1], VOIDmode))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 169 /* 0xa9 */; } else { return 273 /* 0x111 */; } case 217: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 16 /* 0x10 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && ((! (incdec_operand (operands[2], QImode))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 113 /* 0x71 */; } else if ((((ix86_tune) == (CPU_K6))) && ((! (incdec_operand (operands[2], QImode))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 114 /* 0x72 */; } else if ((((ix86_tune) == (CPU_K6))) && ((! (incdec_operand (operands[2], QImode))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 115 /* 0x73 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 215: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 16 /* 0x10 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 3))) { return 34 /* 0x22 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative != 3))) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative != 3))) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative != 3))) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (which_alternative != 3))) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative != 3) && (get_attr_memory (insn) == MEMORY_NONE))) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative != 3) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative != 3) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && (which_alternative == 3)) { return 134 /* 0x86 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 3)) { return 150 /* 0x96 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 339: case 338: case 337: case 317: case 316: case 315: case 295: case 242: case 241: case 240: case 214: case 212: case 211: case 210: if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 17 /* 0x11 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 21 /* 0x15 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 25 /* 0x19 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 209: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (which_alternative == 2)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 16 /* 0x10 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 17 /* 0x11 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (which_alternative == 2)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 21 /* 0x15 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (which_alternative == 2)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x3))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 25 /* 0x19 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 2))) { return 34 /* 0x22 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative != 2))) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative != 2))) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative != 2))) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (which_alternative != 2))) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative != 2) && (get_attr_memory (insn) == MEMORY_NONE))) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative != 2) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative != 2) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && (which_alternative == 2)) { return 134 /* 0x86 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 2)) { return 150 /* 0x96 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 202: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 16 /* 0x10 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative != 0) || (pic_symbolic_operand (operands[2], SImode))))) { return 34 /* 0x22 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)))) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)))) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)))) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)))) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative != 0) || (pic_symbolic_operand (operands[2], SImode)))) { return 134 /* 0x86 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative != 0) || (pic_symbolic_operand (operands[2], SImode)))) { return 150 /* 0x96 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 201: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 16 /* 0x10 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 2) || (pic_symbolic_operand (operands[2], SImode))))) { return 34 /* 0x22 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)))) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)))) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)))) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)))) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 2) || (pic_symbolic_operand (operands[2], SImode)))) { return 134 /* 0x86 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 2) || (pic_symbolic_operand (operands[2], SImode)))) { return 150 /* 0x96 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 196: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 16 /* 0x10 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 2) || (pic_symbolic_operand (operands[2], DImode))))) { return 34 /* 0x22 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)))) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)))) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)))) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)))) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_K6))) && (((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_INCDEC)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 2) || (pic_symbolic_operand (operands[2], DImode)))) { return 134 /* 0x86 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 2) || (pic_symbolic_operand (operands[2], DImode)))) { return 150 /* 0x96 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 195: case 194: case 193: case 192: case 191: case 190: case 189: case 188: case 187: case 186: if (((ix86_tune) == (CPU_PENTIUM))) { return 24 /* 0x18 */; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 34 /* 0x22 */; } else if (((ix86_tune) == (CPU_K6))) { return 134 /* 0x86 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 150 /* 0x96 */; } else { return 273 /* 0x111 */; } case 350: case 349: case 348: case 346: case 345: case 344: case 343: case 342: case 340: case 336: case 335: case 334: case 333: case 332: case 331: case 330: case 329: case 328: case 327: case 326: case 325: case 324: case 323: case 322: case 320: case 318: case 314: case 313: case 312: case 311: case 310: case 309: case 308: case 307: case 306: case 305: case 304: case 303: case 302: case 301: case 300: case 298: case 296: case 293: case 292: case 291: case 289: case 246: case 245: case 243: case 239: case 238: case 237: case 236: case 235: case 234: case 229: case 228: case 227: case 224: case 223: case 222: case 221: case 220: case 219: case 218: case 216: case 213: case 208: case 207: case 206: case 205: case 204: case 203: case 200: case 199: case 198: case 197: case 185: case 184: case 179: if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 16 /* 0x10 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 233: case 232: case 231: case 230: case 226: case 183: case 182: case 181: case 180: case 178: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 17 /* 0x11 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 21 /* 0x15 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 25 /* 0x19 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 176: case 175: case 174: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 5; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if (which_alternative != 0) { return 28 /* 0x1c */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 58 /* 0x3a */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 60 /* 0x3c */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 62 /* 0x3e */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 139 /* 0x8b */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 170 /* 0xaa */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 171 /* 0xab */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 174 /* 0xae */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 175 /* 0xaf */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 176 /* 0xb0 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 177 /* 0xb1 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 179 /* 0xb3 */; } else { return 273 /* 0x111 */; } case 925: case 924: case 173: case 170: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 246 /* 0xf6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 247 /* 0xf7 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 248 /* 0xf8 */; } else if (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 249 /* 0xf9 */; } else { return 273 /* 0x111 */; } case 172: case 169: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if (which_alternative == 1) { return 28 /* 0x1c */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0))) { return 58 /* 0x3a */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 59 /* 0x3b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 0))) { return 61 /* 0x3d */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_STORE))) { return 139 /* 0x8b */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 172 /* 0xac */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 173 /* 0xad */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 176 /* 0xb0 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 177 /* 0xb1 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 0)) { return 179 /* 0xb3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 3) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 246 /* 0xf6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 247 /* 0xf7 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 248 /* 0xf8 */; } else if (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_NONE))) { return 249 /* 0xf9 */; } else { return 273 /* 0x111 */; } case 786: case 785: case 167: case 164: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 88 /* 0x58 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 247 /* 0xf7 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 248 /* 0xf8 */; } else if (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_NONE))) { return 249 /* 0xf9 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 250 /* 0xfa */; } else { return 273 /* 0x111 */; } case 166: case 163: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if (which_alternative == 1) { return 28 /* 0x1c */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0))) { return 58 /* 0x3a */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 59 /* 0x3b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 0))) { return 61 /* 0x3d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (!((1 << which_alternative) & 0x3))) { return 88 /* 0x58 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_STORE))) { return 139 /* 0x8b */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 172 /* 0xac */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 173 /* 0xad */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 176 /* 0xb0 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 177 /* 0xb1 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 0)) { return 179 /* 0xb3 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 3) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 247 /* 0xf7 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 3) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 248 /* 0xf8 */; } else if (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 3) && (get_attr_memory (insn) == MEMORY_NONE))) { return 249 /* 0xf9 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x3)) && ((((1 << which_alternative) & 0x6)) && (get_attr_memory (insn) == MEMORY_NONE)))) { return 250 /* 0xfa */; } else { return 273 /* 0x111 */; } case 171: case 168: case 165: case 162: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if (which_alternative != 0) { return 28 /* 0x1c */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 58 /* 0x3a */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 59 /* 0x3b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 61 /* 0x3d */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 139 /* 0x8b */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 172 /* 0xac */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 173 /* 0xad */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 176 /* 0xb0 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 177 /* 0xb1 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 179 /* 0xb3 */; } else { return 273 /* 0x111 */; } case 156: case 155: case 151: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 247 /* 0xf7 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 248 /* 0xf8 */; } else if (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 249 /* 0xf9 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_NONE))) { return 250 /* 0xfa */; } else { return 273 /* 0x111 */; } case 790: case 789: case 150: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 88 /* 0x58 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 247 /* 0xf7 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 248 /* 0xf8 */; } else if (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 249 /* 0xf9 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_NONE))) { return 250 /* 0xfa */; } else { return 273 /* 0x111 */; } case 159: case 158: case 157: case 154: case 153: case 152: case 149: case 148: case 147: if (((ix86_tune) == (CPU_PENTIUM))) { return 12 /* 0xc */; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 54 /* 0x36 */; } else if (((ix86_tune) == (CPU_K6))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 178 /* 0xb2 */; } else { return 273 /* 0x111 */; } case 926: case 139: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 251 /* 0xfb */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 252 /* 0xfc */; } else { return 273 /* 0x111 */; } case 137: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 1) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_NONE))) { return 58 /* 0x3a */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 59 /* 0x3b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_STORE))) { return 61 /* 0x3d */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_NONE))) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_STORE))) { return 139 /* 0x8b */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 172 /* 0xac */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 173 /* 0xad */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 176 /* 0xb0 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 177 /* 0xb1 */; } else if ((which_alternative == 1) && ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))) { return 179 /* 0xb3 */; } else if (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 242 /* 0xf2 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 0)) { return 243 /* 0xf3 */; } else { return 273 /* 0x111 */; } case 136: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 2) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 2) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_NONE))) { return 58 /* 0x3a */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 59 /* 0x3b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_STORE))) { return 61 /* 0x3d */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_NONE))) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 2) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_STORE))) { return 139 /* 0x8b */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 172 /* 0xac */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 173 /* 0xad */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 2) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 176 /* 0xb0 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 2) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 177 /* 0xb1 */; } else if ((which_alternative == 2) && ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))) { return 179 /* 0xb3 */; } else if (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 251 /* 0xfb */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 252 /* 0xfc */; } else { return 273 /* 0x111 */; } case 135: case 134: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if (((1 << which_alternative) & 0xe)) { return 28 /* 0x1c */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0))) { return 58 /* 0x3a */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 59 /* 0x3b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 0))) { return 61 /* 0x3d */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_STORE))) { return 139 /* 0x8b */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 172 /* 0xac */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 173 /* 0xad */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 176 /* 0xb0 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 177 /* 0xb1 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 0)) { return 179 /* 0xb3 */; } else if (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 4) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 242 /* 0xf2 */; } else if ((which_alternative == 4) && ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))) { return 243 /* 0xf3 */; } else { return 273 /* 0x111 */; } case 145: case 142: case 133: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if (which_alternative != 0) { return 28 /* 0x1c */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 58 /* 0x3a */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 59 /* 0x3b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 61 /* 0x3d */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 139 /* 0x8b */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 172 /* 0xac */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 173 /* 0xad */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 176 /* 0xb0 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 177 /* 0xb1 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 179 /* 0xb3 */; } else { return 273 /* 0x111 */; } case 146: case 144: case 143: case 141: case 138: case 132: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 58 /* 0x3a */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 59 /* 0x3b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 61 /* 0x3d */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 139 /* 0x8b */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 172 /* 0xac */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 173 /* 0xad */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 176 /* 0xb0 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 177 /* 0xb1 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 179 /* 0xb3 */; } else { return 273 /* 0x111 */; } case 131: case 130: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 5; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 58 /* 0x3a */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 59 /* 0x3b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 60 /* 0x3c */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 0))) { return 61 /* 0x3d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_STORE))) { return 62 /* 0x3e */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 139 /* 0x8b */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 170 /* 0xaa */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 171 /* 0xab */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 172 /* 0xac */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 173 /* 0xad */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 174 /* 0xae */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 175 /* 0xaf */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 176 /* 0xb0 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 177 /* 0xb1 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 179 /* 0xb3 */; } else { return 273 /* 0x111 */; } case 1033: case 1032: case 1016: case 1014: case 927: case 140: case 129: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 242 /* 0xf2 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 243 /* 0xf3 */; } else { return 273 /* 0x111 */; } case 128: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0x3)) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_STORE)) && (which_alternative == 1))) { return 5; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0x3)) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((1 << which_alternative) & 0x3)))) { return 58 /* 0x3a */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 59 /* 0x3b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 1))) { return 60 /* 0x3c */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 0))) { return 61 /* 0x3d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 1))) { return 62 /* 0x3e */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x3)) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 139 /* 0x8b */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 1))) { return 170 /* 0xaa */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 1))) { return 171 /* 0xab */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 172 /* 0xac */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 173 /* 0xad */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) && (which_alternative == 1))) { return 174 /* 0xae */; } else if ((((ix86_tune) == (CPU_K8))) && (((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)) && (which_alternative == 1))) { return 175 /* 0xaf */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x3)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 176 /* 0xb0 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x3)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 177 /* 0xb1 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0x3))) { return 179 /* 0xb3 */; } else if (((((ix86_tune) == (CPU_K8))) || (((ix86_tune) == (CPU_ATHLON)))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 242 /* 0xf2 */; } else if ((which_alternative == 2) && ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8))))) { return 243 /* 0xf3 */; } else { return 273 /* 0x111 */; } case 114: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (which_alternative == 1)) { return 7; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative != 1)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (which_alternative == 1)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative != 1)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (which_alternative == 1)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative != 1)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 1))) { return 29 /* 0x1d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 1))) { return 30 /* 0x1e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 1))) { return 31 /* 0x1f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 33 /* 0x21 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 124 /* 0x7c */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))))) { return 125 /* 0x7d */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 126 /* 0x7e */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_STORE))) { return 127 /* 0x7f */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 128 /* 0x80 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 162 /* 0xa2 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_STORE))) { return 165 /* 0xa5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 168 /* 0xa8 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 3) && (memory_operand (operands[1], DFmode)))) { return 203 /* 0xcb */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 3) && (memory_operand (operands[1], DFmode)))) { return 204 /* 0xcc */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((!((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 209 /* 0xd1 */; } else if ((((ix86_tune) == (CPU_K8))) && ((!((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 210 /* 0xd2 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x3)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 213 /* 0xd5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (!((1 << which_alternative) & 0x3))) { return 216 /* 0xd8 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 219 /* 0xdb */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 2)) { return 220 /* 0xdc */; } else { return 273 /* 0x111 */; } case 113: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (which_alternative == 1)) { return 7; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative != 1)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (which_alternative == 1)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative != 1)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (which_alternative == 1)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (which_alternative != 1)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 1))) { return 29 /* 0x1d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 1))) { return 30 /* 0x1e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (which_alternative == 1))) { return 31 /* 0x1f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 33 /* 0x21 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 124 /* 0x7c */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))))) { return 125 /* 0x7d */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 126 /* 0x7e */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_STORE))) { return 127 /* 0x7f */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 128 /* 0x80 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 162 /* 0xa2 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_STORE))) { return 165 /* 0xa5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 168 /* 0xa8 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 3) && (memory_operand (operands[1], DFmode)))) { return 203 /* 0xcb */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 3) && (memory_operand (operands[1], DFmode)))) { return 204 /* 0xcc */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 3) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 205 /* 0xcd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 3) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 206 /* 0xce */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((!((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 209 /* 0xd1 */; } else if ((((ix86_tune) == (CPU_K8))) && ((!((1 << which_alternative) & 0x3)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 210 /* 0xd2 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 3) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 211 /* 0xd3 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 3) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 212 /* 0xd4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x3)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 213 /* 0xd5 */; } else if ((which_alternative == 3) && (((ix86_tune) == (CPU_K8)))) { return 214 /* 0xd6 */; } else if ((which_alternative == 3) && (((ix86_tune) == (CPU_ATHLON)))) { return 215 /* 0xd7 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (!((1 << which_alternative) & 0x3))) { return 216 /* 0xd8 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((which_alternative == 2) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 219 /* 0xdb */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 2)) { return 220 /* 0xdc */; } else { return 273 /* 0x111 */; } case 112: case 111: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if (((1 << which_alternative) & 0x7)) { return 28 /* 0x1c */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 4) && (memory_operand (operands[1], DFmode)))) { return 203 /* 0xcb */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 4) && (memory_operand (operands[1], DFmode)))) { return 204 /* 0xcc */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 4) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 205 /* 0xcd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 4) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 206 /* 0xce */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 209 /* 0xd1 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 210 /* 0xd2 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 4) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 211 /* 0xd3 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 4) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 212 /* 0xd4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 213 /* 0xd5 */; } else if ((which_alternative == 4) && (((ix86_tune) == (CPU_K8)))) { return 214 /* 0xd6 */; } else if ((which_alternative == 4) && (((ix86_tune) == (CPU_ATHLON)))) { return 215 /* 0xd7 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 216 /* 0xd8 */; } else { return 273 /* 0x111 */; } case 109: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((which_alternative == 1) && (! (get_attr_imm_disp (insn) == IMM_DISP_TRUE))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 16 /* 0x10 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((which_alternative != 1) || (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((which_alternative == 1) && (! (get_attr_imm_disp (insn) == IMM_DISP_TRUE))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((which_alternative != 1) || (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((which_alternative == 1) && (! (get_attr_imm_disp (insn) == IMM_DISP_TRUE))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((which_alternative != 1) || (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 33 /* 0x21 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_NONE))) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_STORE))) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_NONE))) { return 113 /* 0x71 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 114 /* 0x72 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 115 /* 0x73 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 106: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((which_alternative == 1) && (! (get_attr_imm_disp (insn) == IMM_DISP_TRUE))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 17 /* 0x11 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((which_alternative != 1) || (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((which_alternative == 1) && (! (get_attr_imm_disp (insn) == IMM_DISP_TRUE))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 21 /* 0x15 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((which_alternative != 1) || (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((which_alternative == 1) && (! (get_attr_imm_disp (insn) == IMM_DISP_TRUE))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 25 /* 0x19 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((which_alternative != 1) || (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 33 /* 0x21 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_NONE))) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_STORE))) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_NONE))) { return 113 /* 0x71 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 114 /* 0x72 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 115 /* 0x73 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 101: case 100: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 5; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0x7)) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if (!((1 << which_alternative) & 0x7)) { return 28 /* 0x1c */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 58 /* 0x3a */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 60 /* 0x3c */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 62 /* 0x3e */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 139 /* 0x8b */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 170 /* 0xaa */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 171 /* 0xab */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 174 /* 0xae */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 175 /* 0xaf */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 176 /* 0xb0 */; } else if ((((ix86_tune) == (CPU_K8))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 177 /* 0xb1 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 179 /* 0xb3 */; } else { return 273 /* 0x111 */; } case 96: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0x7)) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if (((1 << which_alternative) & 0x18)) { return 28 /* 0x1c */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((1 << which_alternative) & 0x7)))) { return 58 /* 0x3a */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((1 << which_alternative) & 0x7)))) { return 59 /* 0x3b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((1 << which_alternative) & 0x7)))) { return 61 /* 0x3d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))))) { return 106 /* 0x6a */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))))) { return 107 /* 0x6b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))))) { return 108 /* 0x6c */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 139 /* 0x8b */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 172 /* 0xac */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 173 /* 0xad */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 176 /* 0xb0 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 177 /* 0xb1 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0x7))) { return 179 /* 0xb3 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((!((1 << which_alternative) & 0x1f)) && (memory_operand (operands[1], DFmode)))) { return 203 /* 0xcb */; } else if ((((ix86_tune) == (CPU_K8))) && ((!((1 << which_alternative) & 0x1f)) && (memory_operand (operands[1], DFmode)))) { return 204 /* 0xcc */; } else if ((((ix86_tune) == (CPU_K8))) && (((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0)))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 205 /* 0xcd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0)))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 206 /* 0xce */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((!((1 << which_alternative) & 0x1f)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 209 /* 0xd1 */; } else if ((((ix86_tune) == (CPU_K8))) && ((!((1 << which_alternative) & 0x1f)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 210 /* 0xd2 */; } else if ((((ix86_tune) == (CPU_K8))) && (((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0)))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 211 /* 0xd3 */; } else if ((((ix86_tune) == (CPU_K8))) && (((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0)))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 212 /* 0xd4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x1f)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 213 /* 0xd5 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))))) { return 214 /* 0xd6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))))) { return 215 /* 0xd7 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (!((1 << which_alternative) & 0x1f))) { return 216 /* 0xd8 */; } else { return 273 /* 0x111 */; } case 95: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0x7)) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if (((1 << which_alternative) & 0x18)) { return 28 /* 0x1c */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((1 << which_alternative) & 0x7)))) { return 58 /* 0x3a */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((1 << which_alternative) & 0x7)))) { return 59 /* 0x3b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((1 << which_alternative) & 0x7)))) { return 61 /* 0x3d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))))) { return 106 /* 0x6a */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))))) { return 107 /* 0x6b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))))) { return 108 /* 0x6c */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 139 /* 0x8b */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 172 /* 0xac */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 173 /* 0xad */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 176 /* 0xb0 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 177 /* 0xb1 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0x7))) { return 179 /* 0xb3 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((!((1 << which_alternative) & 0x1f)) && (memory_operand (operands[1], DFmode)))) { return 203 /* 0xcb */; } else if ((((ix86_tune) == (CPU_K8))) && ((!((1 << which_alternative) & 0x1f)) && (memory_operand (operands[1], DFmode)))) { return 204 /* 0xcc */; } else if ((((ix86_tune) == (CPU_K8))) && (((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0)))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 205 /* 0xcd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0)))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 206 /* 0xce */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((!((1 << which_alternative) & 0x1f)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 209 /* 0xd1 */; } else if ((((ix86_tune) == (CPU_K8))) && ((!((1 << which_alternative) & 0x1f)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 210 /* 0xd2 */; } else if ((((ix86_tune) == (CPU_K8))) && (((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0)))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 211 /* 0xd3 */; } else if ((((ix86_tune) == (CPU_K8))) && (((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0)))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 212 /* 0xd4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x1f)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 213 /* 0xd5 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))))) { return 214 /* 0xd6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((which_alternative == 5) && ((optimize_size) != (0))) || ((which_alternative == 6) && ((optimize_size) != (0)))) || ((((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))))) { return 215 /* 0xd7 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (!((1 << which_alternative) & 0x1f))) { return 216 /* 0xd8 */; } else { return 273 /* 0x111 */; } case 988: case 987: case 986: case 985: case 984: case 983: case 982: case 981: case 980: case 979: case 978: case 977: case 976: case 975: case 974: case 973: case 972: case 971: case 970: case 969: case 968: case 967: case 955: case 954: case 953: case 951: case 950: case 949: case 948: case 947: case 946: case 945: case 944: case 943: case 942: case 941: case 940: case 939: case 938: case 937: case 936: case 935: case 934: case 933: case 932: case 931: case 930: case 102: case 97: case 92: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else { return 273 /* 0x111 */; } case 91: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0x7)) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((1 << which_alternative) & 0x18))) { return 7; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (!((1 << which_alternative) & 0x18))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x18))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (!((1 << which_alternative) & 0x18))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x18))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (!((1 << which_alternative) & 0x18))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((1 << which_alternative) & 0x18)))) { return 29 /* 0x1d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((1 << which_alternative) & 0x18)))) { return 30 /* 0x1e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((1 << which_alternative) & 0x18)))) { return 31 /* 0x1f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((1 << which_alternative) & 0x7)))) { return 58 /* 0x3a */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((1 << which_alternative) & 0x7)))) { return 59 /* 0x3b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((1 << which_alternative) & 0x7)))) { return 61 /* 0x3d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78))) && (((1 << which_alternative) & 0x1e0))))) { return 90 /* 0x5a */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78))) && (((1 << which_alternative) & 0x1e0))))) { return 91 /* 0x5b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && ((((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78))) && (((1 << which_alternative) & 0x1e0))))) { return 92 /* 0x5c */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))))) { return 106 /* 0x6a */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))))) { return 107 /* 0x6b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))))) { return 108 /* 0x6c */; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x18)) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 124 /* 0x7c */; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x18)) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))))) { return 125 /* 0x7d */; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 126 /* 0x7e */; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 127 /* 0x7f */; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 128 /* 0x80 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 139 /* 0x8b */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 162 /* 0xa2 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 165 /* 0xa5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 168 /* 0xa8 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 172 /* 0xac */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 173 /* 0xad */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 176 /* 0xb0 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 177 /* 0xb1 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0x7))) { return 179 /* 0xb3 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x1e0)) && (memory_operand (operands[1], DFmode)))) { return 203 /* 0xcb */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x1e0)) && (memory_operand (operands[1], DFmode)))) { return 204 /* 0xcc */; } else if ((((ix86_tune) == (CPU_K8))) && (((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0)))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 205 /* 0xcd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0)))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 206 /* 0xce */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x1e0)) && ((((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 207 /* 0xcf */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x1e0)) && ((((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 208 /* 0xd0 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((!((1 << which_alternative) & 0x1f)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 209 /* 0xd1 */; } else if ((((ix86_tune) == (CPU_K8))) && ((!((1 << which_alternative) & 0x1f)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 210 /* 0xd2 */; } else if ((((ix86_tune) == (CPU_K8))) && (((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0)))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 211 /* 0xd3 */; } else if ((((ix86_tune) == (CPU_K8))) && (((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0)))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 212 /* 0xd4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x1f)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 213 /* 0xd5 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0))))))) { return 214 /* 0xd6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0))))))) { return 215 /* 0xd7 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (!((1 << which_alternative) & 0x1f))) { return 216 /* 0xd8 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x1ff)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 219 /* 0xdb */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (!((1 << which_alternative) & 0x1ff))) { return 220 /* 0xdc */; } else { return 273 /* 0x111 */; } case 90: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_NONE) || (get_attr_memory (insn) == MEMORY_LOAD)))) { return 4; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((((1 << which_alternative) & 0x7)) && ((immediate_operand (operands[1], VOIDmode)) || (get_attr_memory (insn) == MEMORY_STORE)))) { return 6; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((1 << which_alternative) & 0x18))) { return 7; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (!((1 << which_alternative) & 0x18))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x18))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (!((1 << which_alternative) & 0x18))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (((1 << which_alternative) & 0x18))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (!((1 << which_alternative) & 0x18))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((1 << which_alternative) & 0x18)))) { return 29 /* 0x1d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((1 << which_alternative) & 0x18)))) { return 30 /* 0x1e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((1 << which_alternative) & 0x18)))) { return 31 /* 0x1f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((1 << which_alternative) & 0x7)))) { return 58 /* 0x3a */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((1 << which_alternative) & 0x7)))) { return 59 /* 0x3b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((1 << which_alternative) & 0x7)))) { return 61 /* 0x3d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78))) && (((1 << which_alternative) & 0x1e0))))) { return 90 /* 0x5a */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78))) && (((1 << which_alternative) & 0x1e0))))) { return 91 /* 0x5b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && ((((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78))) && (((1 << which_alternative) & 0x1e0))))) { return 92 /* 0x5c */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))))) { return 106 /* 0x6a */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))))) { return 107 /* 0x6b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))))) { return 108 /* 0x6c */; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x18)) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 124 /* 0x7c */; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x18)) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))))) { return 125 /* 0x7d */; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 126 /* 0x7e */; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 127 /* 0x7f */; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 128 /* 0x80 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_K6))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 139 /* 0x8b */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 162 /* 0xa2 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 165 /* 0xa5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x18)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 168 /* 0xa8 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 172 /* 0xac */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x7)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 173 /* 0xad */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 176 /* 0xb0 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 177 /* 0xb1 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0x7))) { return 179 /* 0xb3 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x1e0)) && (memory_operand (operands[1], DFmode)))) { return 203 /* 0xcb */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x1e0)) && (memory_operand (operands[1], DFmode)))) { return 204 /* 0xcc */; } else if ((((ix86_tune) == (CPU_K8))) && (((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0)))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 205 /* 0xcd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0)))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 206 /* 0xce */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x1e0)) && ((((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 207 /* 0xcf */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x1e0)) && ((((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78))) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 208 /* 0xd0 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((!((1 << which_alternative) & 0x1f)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 209 /* 0xd1 */; } else if ((((ix86_tune) == (CPU_K8))) && ((!((1 << which_alternative) & 0x1f)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 210 /* 0xd2 */; } else if ((((ix86_tune) == (CPU_K8))) && (((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0)))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 211 /* 0xd3 */; } else if ((((ix86_tune) == (CPU_K8))) && (((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0)))))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 212 /* 0xd4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x1f)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 213 /* 0xd5 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0))))))) { return 214 /* 0xd6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((which_alternative == 5) && (((! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))) || (! ((TARGET_SSE2) != (0)))) || (! ((optimize_size) == (0))))) || ((which_alternative == 6) && (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))))) || ((which_alternative == 5) && (((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && (((TARGET_SSE2) != (0)) && ((optimize_size) == (0))))))) { return 215 /* 0xd7 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (!((1 << which_alternative) & 0x1f))) { return 216 /* 0xd8 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x1ff)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 219 /* 0xdb */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (!((1 << which_alternative) & 0x1ff))) { return 220 /* 0xdc */; } else { return 273 /* 0x111 */; } case 89: case 88: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 1) && (! (memory_operand (operands[1], VOIDmode))))) { return 8; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((which_alternative != 1) || (memory_operand (operands[1], VOIDmode))) && ((which_alternative == 1) && (memory_operand (operands[1], VOIDmode))))) { return 19 /* 0x13 */; } else if (which_alternative != 1) { return 28 /* 0x1c */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (memory_operand (operands[1], VOIDmode)))) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (memory_operand (operands[1], VOIDmode))) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && (memory_operand (operands[1], VOIDmode))) { return 131 /* 0x83 */; } else if (((ix86_tune) == (CPU_K6))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 145 /* 0x91 */; } else { return 273 /* 0x111 */; } case 84: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode)))))) { return 7; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 16 /* 0x10 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if (which_alternative == 4) { return 28 /* 0x1c */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))))) { return 29 /* 0x1d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))))) { return 30 /* 0x1e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && ((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))))) { return 31 /* 0x1f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((!((1 << which_alternative) & 0x7f0)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], DImode)))))) { return 34 /* 0x22 */; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 124 /* 0x7c */; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))))) { return 125 /* 0x7d */; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 126 /* 0x7e */; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 127 /* 0x7f */; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && ((get_attr_memory (insn) == MEMORY_BOTH) || (which_alternative == 4)))) { return 128 /* 0x80 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((!((1 << which_alternative) & 0x7f0)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], DImode))))) { return 134 /* 0x86 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x7f0)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], DImode))))) { return 150 /* 0x96 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 162 /* 0xa2 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 165 /* 0xa5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 168 /* 0xa8 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x700)) && (memory_operand (operands[1], DFmode)))) { return 203 /* 0xcb */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x700)) && (memory_operand (operands[1], DFmode)))) { return 204 /* 0xcc */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 8) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 205 /* 0xcd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 8) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 206 /* 0xce */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x700)) && ((!((1 << which_alternative) & 0x111)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 207 /* 0xcf */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x700)) && ((!((1 << which_alternative) & 0x111)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 208 /* 0xd0 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 209 /* 0xd1 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 210 /* 0xd2 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 8) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 211 /* 0xd3 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 8) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 212 /* 0xd4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x7e0)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 213 /* 0xd5 */; } else if ((((ix86_tune) == (CPU_K8))) && (which_alternative == 8)) { return 214 /* 0xd6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (which_alternative == 8)) { return 215 /* 0xd7 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0x7e0))) { return 216 /* 0xd8 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0xe0)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 219 /* 0xdb */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0xe0))) { return 220 /* 0xdc */; } else { return 273 /* 0x111 */; } case 83: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode)))))) { return 7; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 16 /* 0x10 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if (which_alternative == 4) { return 28 /* 0x1c */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))))) { return 29 /* 0x1d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))))) { return 30 /* 0x1e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && ((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))))) { return 31 /* 0x1f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((!((1 << which_alternative) & 0x7f0)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], DImode)))))) { return 34 /* 0x22 */; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 124 /* 0x7c */; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))))) { return 125 /* 0x7d */; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 126 /* 0x7e */; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 127 /* 0x7f */; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && ((get_attr_memory (insn) == MEMORY_BOTH) || (which_alternative == 4)))) { return 128 /* 0x80 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((!((1 << which_alternative) & 0x7f0)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], DImode))))) { return 134 /* 0x86 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x7f0)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], DImode))))) { return 150 /* 0x96 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 162 /* 0xa2 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 165 /* 0xa5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 168 /* 0xa8 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x700)) && (memory_operand (operands[1], DFmode)))) { return 203 /* 0xcb */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x700)) && (memory_operand (operands[1], DFmode)))) { return 204 /* 0xcc */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 8) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 205 /* 0xcd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 8) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 206 /* 0xce */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x700)) && ((!((1 << which_alternative) & 0x111)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 207 /* 0xcf */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x700)) && ((!((1 << which_alternative) & 0x111)) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 208 /* 0xd0 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 209 /* 0xd1 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0x7e0)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 210 /* 0xd2 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 8) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 211 /* 0xd3 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 8) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 212 /* 0xd4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x7e0)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 213 /* 0xd5 */; } else if ((((ix86_tune) == (CPU_K8))) && (which_alternative == 8)) { return 214 /* 0xd6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (which_alternative == 8)) { return 215 /* 0xd7 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0x7e0))) { return 216 /* 0xd8 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0xe0)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 219 /* 0xdb */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0xe0))) { return 220 /* 0xdc */; } else { return 273 /* 0x111 */; } case 82: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if (((1 << which_alternative) & 0x3)) { return 28 /* 0x1c */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((((1 << which_alternative) & 0xc)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((1 << which_alternative) & 0xc)))) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((1 << which_alternative) & 0xc)))) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_BOTH) && (((1 << which_alternative) & 0xc)))) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((!((1 << which_alternative) & 0xf)) && (memory_operand (operands[1], DFmode)))) { return 203 /* 0xcb */; } else if ((((ix86_tune) == (CPU_K8))) && ((!((1 << which_alternative) & 0xf)) && (memory_operand (operands[1], DFmode)))) { return 204 /* 0xcc */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 5) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 205 /* 0xcd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 5) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 206 /* 0xce */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((!((1 << which_alternative) & 0xf)) && ((which_alternative != 5) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 207 /* 0xcf */; } else if ((((ix86_tune) == (CPU_K8))) && ((!((1 << which_alternative) & 0xf)) && ((which_alternative != 5) && (get_attr_memory (insn) == MEMORY_LOAD)))) { return 208 /* 0xd0 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((!((1 << which_alternative) & 0xf)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 209 /* 0xd1 */; } else if ((((ix86_tune) == (CPU_K8))) && ((!((1 << which_alternative) & 0xf)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 210 /* 0xd2 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 5) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 211 /* 0xd3 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 5) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 212 /* 0xd4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0xf)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 213 /* 0xd5 */; } else if ((((ix86_tune) == (CPU_K8))) && (which_alternative == 5)) { return 214 /* 0xd6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (which_alternative == 5)) { return 215 /* 0xd7 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (!((1 << which_alternative) & 0xf))) { return 216 /* 0xd8 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0xc)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 219 /* 0xdb */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0xc))) { return 220 /* 0xdc */; } else { return 273 /* 0x111 */; } case 76: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((which_alternative == 0) && (! (memory_operand (operands[1], VOIDmode))))) { return 8; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((which_alternative != 0) || (memory_operand (operands[1], VOIDmode))) && ((which_alternative == 0) && (memory_operand (operands[1], VOIDmode))))) { return 19 /* 0x13 */; } else if (which_alternative != 0) { return 28 /* 0x1c */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (memory_operand (operands[1], VOIDmode)))) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (memory_operand (operands[1], VOIDmode))) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && (memory_operand (operands[1], VOIDmode))) { return 131 /* 0x83 */; } else if (((ix86_tune) == (CPU_K6))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 145 /* 0x91 */; } else { return 273 /* 0x111 */; } case 71: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0))))) { return 7; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0))))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 16 /* 0x10 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || ((! (q_regs_operand (operands[0], QImode))) || ((TARGET_MOVX) != (0)))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || ((! (q_regs_operand (operands[0], QImode))) || ((TARGET_MOVX) != (0)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0))))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || ((! (q_regs_operand (operands[0], QImode))) || ((TARGET_MOVX) != (0)))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0)))))) { return 29 /* 0x1d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0)))))) { return 30 /* 0x1e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && ((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0)))))) { return 31 /* 0x1f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((! (q_regs_operand (operands[0], QImode))) || ((TARGET_MOVX) != (0))))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((! (q_regs_operand (operands[0], QImode))) || ((TARGET_MOVX) != (0))))) { return 33 /* 0x21 */; } else if ((((ix86_tune) == (CPU_K6))) && (((! (q_regs_operand (operands[0], QImode))) || ((TARGET_MOVX) != (0))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && (((! (q_regs_operand (operands[0], QImode))) || ((TARGET_MOVX) != (0))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_K6))) && (((! (q_regs_operand (operands[0], QImode))) || ((TARGET_MOVX) != (0))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && (((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0)))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 124 /* 0x7c */; } else if ((((ix86_tune) == (CPU_K6))) && (((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0)))) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))))) { return 125 /* 0x7d */; } else if ((((ix86_tune) == (CPU_K6))) && (((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 126 /* 0x7e */; } else if ((((ix86_tune) == (CPU_K6))) && (((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0)))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 127 /* 0x7f */; } else if ((((ix86_tune) == (CPU_K6))) && (((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0)))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 128 /* 0x80 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 162 /* 0xa2 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0)))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 165 /* 0xa5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 70: case 66: case 65: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_IMOV)) { return 7; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (get_attr_type (insn) == TYPE_IMOV))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_type (insn) == TYPE_IMOV)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (get_attr_type (insn) == TYPE_IMOV))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_type (insn) == TYPE_IMOV)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (get_attr_type (insn) == TYPE_IMOV))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_IMOV))) { return 29 /* 0x1d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_IMOV))) { return 30 /* 0x1e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_IMOV))) { return 31 /* 0x1f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_IMOVX))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_IMOVX))) { return 33 /* 0x21 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_IMOVX) && (get_attr_memory (insn) == MEMORY_NONE))) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_IMOVX) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_IMOVX) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_IMOV) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 124 /* 0x7c */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_IMOV) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))))) { return 125 /* 0x7d */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_IMOV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 126 /* 0x7e */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_IMOV) && (get_attr_memory (insn) == MEMORY_STORE))) { return 127 /* 0x7f */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_IMOV) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 128 /* 0x80 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_type (insn) == TYPE_IMOV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 162 /* 0xa2 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_type (insn) == TYPE_IMOV) && (get_attr_memory (insn) == MEMORY_STORE))) { return 165 /* 0xa5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 125: case 124: case 123: case 122: case 121: case 120: case 119: case 118: case 116: case 115: case 110: case 107: case 104: case 69: case 64: case 63: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 33 /* 0x21 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 74: case 73: case 72: case 61: extract_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 7; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 29 /* 0x1d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 30 /* 0x1e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 31 /* 0x1f */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode)))) { return 124 /* 0x7c */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode)))) { return 125 /* 0x7d */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 126 /* 0x7e */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 127 /* 0x7f */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 128 /* 0x80 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 162 /* 0xa2 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 165 /* 0xa5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 59: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2))))))) { return 7; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && ((((optimize_size) != (0)) || ((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0))))) || ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 16 /* 0x10 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2))))) && ((! ((optimize_size) != (0))) && (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2))))))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 17 /* 0x11 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || ((! ((optimize_size) != (0))) && (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2))))))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && ((((optimize_size) != (0)) || ((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0))))) || ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2))))) && ((! ((optimize_size) != (0))) && (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2))))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 21 /* 0x15 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || ((! ((optimize_size) != (0))) && (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2))))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && ((((optimize_size) != (0)) || ((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0))))) || ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && ((((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2))))) && ((! ((optimize_size) != (0))) && (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2))))))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 25 /* 0x19 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || ((! ((optimize_size) != (0))) && (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2))))))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))))) { return 29 /* 0x1d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))))) { return 30 /* 0x1e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))))) { return 31 /* 0x1f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((! ((optimize_size) != (0))) && ((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0)))))) && ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2)))))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((! ((optimize_size) != (0))) && ((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0)))))) && ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2)))))) { return 33 /* 0x21 */; } else if ((((ix86_tune) == (CPU_K6))) && ((((! ((optimize_size) != (0))) && ((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0)))))) && ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2)))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && ((((! ((optimize_size) != (0))) && ((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0)))))) && ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_K6))) && ((((! ((optimize_size) != (0))) && ((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0)))))) && ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && ((((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 124 /* 0x7c */; } else if ((((ix86_tune) == (CPU_K6))) && ((((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))))) { return 125 /* 0x7d */; } else if ((((ix86_tune) == (CPU_K6))) && ((((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 126 /* 0x7e */; } else if ((((ix86_tune) == (CPU_K6))) && ((((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 127 /* 0x7f */; } else if ((((ix86_tune) == (CPU_K6))) && ((((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 128 /* 0x80 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 162 /* 0xa2 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 165 /* 0xa5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 399: case 105: case 56: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (memory_operand (operands[1], VOIDmode)))) { return 17 /* 0x11 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (memory_operand (operands[1], VOIDmode)))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 25 /* 0x19 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (memory_operand (operands[1], VOIDmode))) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 113 /* 0x71 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (memory_operand (operands[1], VOIDmode)))) { return 115 /* 0x73 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (memory_operand (operands[1], VOIDmode)))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && (memory_operand (operands[1], VOIDmode))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (memory_operand (operands[1], VOIDmode)))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (memory_operand (operands[1], VOIDmode))) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 55: extract_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 7; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 29 /* 0x1d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 30 /* 0x1e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 31 /* 0x1f */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode)))) { return 124 /* 0x7c */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode)))) { return 125 /* 0x7d */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 126 /* 0x7e */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 127 /* 0x7f */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 128 /* 0x80 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 162 /* 0xa2 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 165 /* 0xa5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 60: case 54: case 53: extract_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 7; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 29 /* 0x1d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 30 /* 0x1e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 31 /* 0x1f */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode)))) { return 124 /* 0x7c */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode)))) { return 125 /* 0x7d */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 126 /* 0x7e */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 127 /* 0x7f */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 128 /* 0x80 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 162 /* 0xa2 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 165 /* 0xa5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 52: if (((ix86_tune) == (CPU_PENTIUM))) { return 7; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 30 /* 0x1e */; } else if (((ix86_tune) == (CPU_K6))) { return 126 /* 0x7e */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 162 /* 0xa2 */; } else { return 273 /* 0x111 */; } case 50: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_type (insn) == TYPE_IMOV)) { return 7; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 16 /* 0x10 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 17 /* 0x11 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (get_attr_type (insn) == TYPE_IMOV))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 21 /* 0x15 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (get_attr_type (insn) == TYPE_IMOV))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_PU) && (get_attr_memory (insn) == MEMORY_NONE))) { return 25 /* 0x19 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (((get_attr_imm_disp (insn) == IMM_DISP_TRUE) || (! (get_attr_type (insn) == TYPE_IMOV))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (get_attr_type (insn) == TYPE_IMOV))) { return 29 /* 0x1d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (get_attr_type (insn) == TYPE_IMOV))) { return 30 /* 0x1e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && (get_attr_type (insn) == TYPE_IMOV))) { return 31 /* 0x1f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (((! ((optimize_size) != (0))) && (((which_alternative != 0) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_HIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x6)) || (! (aligned_operand (operands[1], HImode)))))) && (((TARGET_MOVX) != (0)) && (((1 << which_alternative) & 0x5)))))) { return 32 /* 0x20 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (((! ((optimize_size) != (0))) && (((which_alternative != 0) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_HIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x6)) || (! (aligned_operand (operands[1], HImode)))))) && (((TARGET_MOVX) != (0)) && (((1 << which_alternative) & 0x5)))))) { return 33 /* 0x21 */; } else if ((((ix86_tune) == (CPU_K6))) && ((((! ((optimize_size) != (0))) && (((which_alternative != 0) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_HIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x6)) || (! (aligned_operand (operands[1], HImode)))))) && (((TARGET_MOVX) != (0)) && (((1 << which_alternative) & 0x5)))) && (get_attr_memory (insn) == MEMORY_NONE))) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && ((((! ((optimize_size) != (0))) && (((which_alternative != 0) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_HIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x6)) || (! (aligned_operand (operands[1], HImode)))))) && (((TARGET_MOVX) != (0)) && (((1 << which_alternative) & 0x5)))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 122 /* 0x7a */; } else if ((((ix86_tune) == (CPU_K6))) && ((((! ((optimize_size) != (0))) && (((which_alternative != 0) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_HIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x6)) || (! (aligned_operand (operands[1], HImode)))))) && (((TARGET_MOVX) != (0)) && (((1 << which_alternative) & 0x5)))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_IMOV) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 124 /* 0x7c */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_IMOV) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))))) { return 125 /* 0x7d */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_IMOV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 126 /* 0x7e */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_IMOV) && (get_attr_memory (insn) == MEMORY_STORE))) { return 127 /* 0x7f */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_type (insn) == TYPE_IMOV) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 128 /* 0x80 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_type (insn) == TYPE_IMOV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 162 /* 0xa2 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((get_attr_type (insn) == TYPE_IMOV) && (get_attr_memory (insn) == MEMORY_STORE))) { return 165 /* 0xa5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 87: case 47: extract_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 7; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 29 /* 0x1d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 30 /* 0x1e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 31 /* 0x1f */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode)))) { return 124 /* 0x7c */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode)))) { return 125 /* 0x7d */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 126 /* 0x7e */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 127 /* 0x7f */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 128 /* 0x80 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 161 /* 0xa1 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 162 /* 0xa2 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 164 /* 0xa4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 165 /* 0xa5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_BOTH)) { return 167 /* 0xa7 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 169 /* 0xa9 */; } else { return 273 /* 0x111 */; } case 86: case 68: case 46: if (((ix86_tune) == (CPU_PENTIUM))) { return 7; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 30 /* 0x1e */; } else if (((ix86_tune) == (CPU_K6))) { return 126 /* 0x7e */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 162 /* 0xa2 */; } else { return 273 /* 0x111 */; } case 85: case 67: case 51: case 45: if (((ix86_tune) == (CPU_PENTIUM))) { return 7; } else if (((ix86_tune) == (CPU_PENTIUMPRO))) { return 31 /* 0x1f */; } else if (((ix86_tune) == (CPU_K6))) { return 127 /* 0x7f */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 165 /* 0xa5 */; } else { return 273 /* 0x111 */; } case 44: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode)))))) { return 7; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 16 /* 0x10 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))))) { return 29 /* 0x1d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))))) { return 30 /* 0x1e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && ((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))))) { return 31 /* 0x1f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((!((1 << which_alternative) & 0xfc)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], SImode)))))) { return 34 /* 0x22 */; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 124 /* 0x7c */; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))))) { return 125 /* 0x7d */; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 126 /* 0x7e */; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 127 /* 0x7f */; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 128 /* 0x80 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((!((1 << which_alternative) & 0xfc)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], SImode))))) { return 134 /* 0x86 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0xfc)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], SImode))))) { return 150 /* 0x96 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 162 /* 0xa2 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 165 /* 0xa5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 168 /* 0xa8 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0xe0)) && (memory_operand (operands[1], DFmode)))) { return 203 /* 0xcb */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0xe0)) && (memory_operand (operands[1], DFmode)))) { return 204 /* 0xcc */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 5) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 205 /* 0xcd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 5) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 206 /* 0xce */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 209 /* 0xd1 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 210 /* 0xd2 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 5) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 211 /* 0xd3 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 5) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 212 /* 0xd4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0xfc)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 213 /* 0xd5 */; } else if ((((ix86_tune) == (CPU_K8))) && (which_alternative == 5)) { return 214 /* 0xd6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (which_alternative == 5)) { return 215 /* 0xd7 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0xfc))) { return 216 /* 0xd8 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x1c)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 219 /* 0xdb */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0x1c))) { return 220 /* 0xdc */; } else { return 273 /* 0x111 */; } case 43: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode)))))) { return 7; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 16 /* 0x10 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_UV) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_pent_pair (insn) == PENT_PAIR_NP) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))))) { return 29 /* 0x1d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))))) { return 30 /* 0x1e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_STORE) && ((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))))) { return 31 /* 0x1f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((!((1 << which_alternative) & 0xfc)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], SImode)))))) { return 34 /* 0x22 */; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && ((get_attr_memory (insn) == MEMORY_NONE) && (nonimmediate_operand (operands[1], VOIDmode))))) { return 124 /* 0x7c */; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && ((get_attr_memory (insn) == MEMORY_NONE) && (immediate_operand (operands[1], VOIDmode))))) { return 125 /* 0x7d */; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 126 /* 0x7e */; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 127 /* 0x7f */; } else if ((((ix86_tune) == (CPU_K6))) && (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 128 /* 0x80 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((!((1 << which_alternative) & 0xfc)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], SImode))))) { return 134 /* 0x86 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0xfc)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], SImode))))) { return 150 /* 0x96 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 162 /* 0xa2 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 163 /* 0xa3 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && (get_attr_memory (insn) == MEMORY_STORE))) { return 165 /* 0xa5 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_BOTH))) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((!((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_STORE))) { return 168 /* 0xa8 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0xe0)) && (memory_operand (operands[1], DFmode)))) { return 203 /* 0xcb */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0xe0)) && (memory_operand (operands[1], DFmode)))) { return 204 /* 0xcc */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 5) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 205 /* 0xcd */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 5) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 206 /* 0xce */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 209 /* 0xd1 */; } else if ((((ix86_tune) == (CPU_K8))) && ((((1 << which_alternative) & 0xfc)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 210 /* 0xd2 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 5) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 211 /* 0xd3 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 5) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 212 /* 0xd4 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0xfc)) && ((get_attr_memory (insn) == MEMORY_STORE) || (get_attr_memory (insn) == MEMORY_BOTH)))) { return 213 /* 0xd5 */; } else if ((((ix86_tune) == (CPU_K8))) && (which_alternative == 5)) { return 214 /* 0xd6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (which_alternative == 5)) { return 215 /* 0xd7 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0xfc))) { return 216 /* 0xd8 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && ((((1 << which_alternative) & 0x1c)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 219 /* 0xdb */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (((1 << which_alternative) & 0x1c))) { return 220 /* 0xdc */; } else { return 273 /* 0x111 */; } case 401: case 397: case 396: case 393: case 347: case 341: case 321: case 319: case 299: case 297: case 244: case 108: case 103: case 81: case 80: case 62: case 42: case 41: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (memory_operand (operands[1], VOIDmode)))) { return 16 /* 0x10 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (memory_operand (operands[1], VOIDmode)))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (memory_operand (operands[1], VOIDmode))) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 113 /* 0x71 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (memory_operand (operands[1], VOIDmode)))) { return 115 /* 0x73 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (memory_operand (operands[1], VOIDmode)))) { return 123 /* 0x7b */; } else if ((((ix86_tune) == (CPU_K6))) && (memory_operand (operands[1], VOIDmode))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((get_attr_memory (insn) == MEMORY_STORE) || (memory_operand (operands[1], VOIDmode)))) { return 135 /* 0x87 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (memory_operand (operands[1], VOIDmode))) { return 166 /* 0xa6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_STORE)) { return 168 /* 0xa8 */; } else { return 273 /* 0x111 */; } case 79: case 78: case 40: case 39: extract_insn_cached (insn); if (((ix86_tune) == (CPU_PENTIUM))) { return 9; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (memory_operand (operands[0], VOIDmode)))) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (memory_operand (operands[0], VOIDmode))) { return 112 /* 0x70 */; } else if (((ix86_tune) == (CPU_K6))) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 146 /* 0x92 */; } else { return 273 /* 0x111 */; } case 77: case 58: case 57: case 49: case 48: case 38: case 37: case 36: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (! (memory_operand (operands[1], VOIDmode)))) { return 8; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (memory_operand (operands[1], VOIDmode))) { return 19 /* 0x13 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (! (memory_operand (operands[1], VOIDmode)))) { return 111 /* 0x6f */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (memory_operand (operands[1], VOIDmode))) { return 112 /* 0x70 */; } else if ((((ix86_tune) == (CPU_K6))) && (memory_operand (operands[1], VOIDmode))) { return 131 /* 0x83 */; } else if (((ix86_tune) == (CPU_K6))) { return 135 /* 0x87 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 145 /* 0x91 */; } else { return 273 /* 0x111 */; } case 35: case 32: extract_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (GET_MODE (operands[1]) == SFmode))) { return 82 /* 0x52 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (GET_MODE (operands[1]) == SFmode))) { return 83 /* 0x53 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 232 /* 0xe8 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 233 /* 0xe9 */; } else { return 273 /* 0x111 */; } case 34: case 31: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && (which_alternative == 0))) { return 56 /* 0x38 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && (which_alternative == 0))) { return 57 /* 0x39 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_NONE) && ((which_alternative == 1) && (GET_MODE (operands[1]) == SFmode)))) { return 82 /* 0x52 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && ((get_attr_memory (insn) == MEMORY_LOAD) && ((which_alternative == 1) && (GET_MODE (operands[1]) == SFmode)))) { return 83 /* 0x53 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_NONE))) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 197 /* 0xc5 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 0) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 198 /* 0xc6 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 0)) { return 199 /* 0xc7 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (which_alternative == 0)) { return 202 /* 0xca */; } else if ((((ix86_tune) == (CPU_ATHLON))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 232 /* 0xe8 */; } else if ((((ix86_tune) == (CPU_K8))) && ((which_alternative == 1) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 233 /* 0xe9 */; } else { return 273 /* 0x111 */; } case 33: case 30: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 56 /* 0x38 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 57 /* 0x39 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 197 /* 0xc5 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 198 /* 0xc6 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 199 /* 0xc7 */; } else { return 273 /* 0x111 */; } case 25: case 23: case 21: case 20: case 19: if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 56 /* 0x38 */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 57 /* 0x39 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 131 /* 0x83 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 137 /* 0x89 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 138 /* 0x8a */; } else if ((((ix86_tune) == (CPU_ATHLON))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 200 /* 0xc8 */; } else if ((((ix86_tune) == (CPU_K8))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 201 /* 0xc9 */; } else if ((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) { return 202 /* 0xca */; } else { return 273 /* 0x111 */; } case 8: case 7: case 6: if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 21 /* 0x15 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 25 /* 0x19 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 122 /* 0x7a */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else { return 273 /* 0x111 */; } case 285: case 284: case 283: case 17: case 16: case 15: case 14: case 13: case 12: case 11: case 10: case 9: case 5: case 4: case 3: case 2: case 1: case 0: if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 20 /* 0x14 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_LOAD))) { return 23 /* 0x17 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((! (get_attr_imm_disp (insn) == IMM_DISP_TRUE)) && (get_attr_memory (insn) == MEMORY_NONE))) { return 24 /* 0x18 */; } else if ((((ix86_tune) == (CPU_PENTIUM))) && ((get_attr_imm_disp (insn) == IMM_DISP_TRUE) && (get_attr_memory (insn) == MEMORY_NONE))) { return 27 /* 0x1b */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 109 /* 0x6d */; } else if ((((ix86_tune) == (CPU_PENTIUMPRO))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 110 /* 0x6e */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 121 /* 0x79 */; } else if ((((ix86_tune) == (CPU_K6))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 122 /* 0x7a */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_NONE)) { return 160 /* 0xa0 */; } else if (((((ix86_tune) == (CPU_ATHLON))) || (((ix86_tune) == (CPU_K8)))) && (get_attr_memory (insn) == MEMORY_LOAD)) { return 163 /* 0xa3 */; } else { return 273 /* 0x111 */; } case -1: if (GET_CODE (PATTERN (insn)) != ASM_INPUT && asm_noperands (PATTERN (insn)) < 0) fatal_insn_not_found (insn); default: return 28 /* 0x1c */; } } int result_ready_cost (rtx insn ATTRIBUTE_UNUSED) { switch (recog_memoized (insn)) { case -1: if (GET_CODE (PATTERN (insn)) != ASM_INPUT && asm_noperands (PATTERN (insn)) < 0) fatal_insn_not_found (insn); default: return 1; } } int function_units_used (rtx insn ATTRIBUTE_UNUSED) { switch (recog_memoized (insn)) { case -1: if (GET_CODE (PATTERN (insn)) != ASM_INPUT && asm_noperands (PATTERN (insn)) < 0) fatal_insn_not_found (insn); default: return -1 /* units: none */; } } enum attr_athlon_decode get_attr_athlon_decode (rtx insn ATTRIBUTE_UNUSED) { switch (recog_memoized (insn)) { case 584: case 581: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return ATHLON_DECODE_DIRECT; } else { return ATHLON_DECODE_DIRECT; } case 250: extract_constrain_insn_cached (insn); if ((which_alternative != 0) || (((ix86_tune) == (CPU_ATHLON)))) { return ATHLON_DECODE_VECTOR; } else { return ATHLON_DECODE_DIRECT; } case 249: case 248: case 247: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_ATHLON))) || ((which_alternative == 1) || ((which_alternative == 2) && (memory_operand (operands[1], VOIDmode))))) { return ATHLON_DECODE_VECTOR; } else { return ATHLON_DECODE_DIRECT; } case 176: case 175: case 174: extract_constrain_insn_cached (insn); if ((which_alternative != 0) || ((which_alternative == 0) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_STORE)))) { return ATHLON_DECODE_VECTOR; } else { return ATHLON_DECODE_DIRECT; } case 172: case 169: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { if (which_alternative == 1) { return ATHLON_DECODE_VECTOR; } else { return ATHLON_DECODE_DIRECT; } } else if (which_alternative == 2) { return ATHLON_DECODE_DOUBLE; } else { return ATHLON_DECODE_DIRECT; } case 166: case 163: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { if (which_alternative == 1) { return ATHLON_DECODE_VECTOR; } else { return ATHLON_DECODE_DIRECT; } } else if (which_alternative == 2) { return ATHLON_DECODE_VECTOR; } else { return ATHLON_DECODE_DOUBLE; } case 136: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return ATHLON_DECODE_VECTOR; } else if (which_alternative == 1) { return ATHLON_DECODE_DOUBLE; } else { return ATHLON_DECODE_DIRECT; } case 135: case 134: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0xe)) { return ATHLON_DECODE_VECTOR; } else { return ATHLON_DECODE_DIRECT; } case 171: case 168: case 165: case 162: case 145: case 142: case 133: extract_constrain_insn_cached (insn); if (which_alternative != 0) { return ATHLON_DECODE_VECTOR; } else { return ATHLON_DECODE_DIRECT; } case 131: case 130: extract_constrain_insn_cached (insn); if ((which_alternative == 1) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_STORE))) { return ATHLON_DECODE_VECTOR; } else { return ATHLON_DECODE_DIRECT; } case 128: extract_constrain_insn_cached (insn); if (((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_STORE)) && (which_alternative == 1)) { return ATHLON_DECODE_VECTOR; } else { return ATHLON_DECODE_DIRECT; } case 112: case 111: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x7)) { return ATHLON_DECODE_VECTOR; } else { return ATHLON_DECODE_DIRECT; } case 101: case 100: extract_constrain_insn_cached (insn); if ((!((1 << which_alternative) & 0x7)) || ((((1 << which_alternative) & 0x7)) && ((get_attr_memory (insn) == MEMORY_LOAD) || (get_attr_memory (insn) == MEMORY_STORE)))) { return ATHLON_DECODE_VECTOR; } else { return ATHLON_DECODE_DIRECT; } case 96: case 95: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x18)) { return ATHLON_DECODE_VECTOR; } else { return ATHLON_DECODE_DIRECT; } case 89: case 88: extract_constrain_insn_cached (insn); if ((which_alternative != 1) || ((which_alternative == 1) && (memory_operand (operands[1], VOIDmode)))) { return ATHLON_DECODE_VECTOR; } else { return ATHLON_DECODE_DIRECT; } case 84: case 83: extract_constrain_insn_cached (insn); if (which_alternative == 4) { return ATHLON_DECODE_VECTOR; } else { return ATHLON_DECODE_DIRECT; } case 721: case 720: case 656: case 654: case 82: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { return ATHLON_DECODE_VECTOR; } else { return ATHLON_DECODE_DIRECT; } case 76: extract_constrain_insn_cached (insn); if ((which_alternative != 0) || ((which_alternative == 0) && (memory_operand (operands[1], VOIDmode)))) { return ATHLON_DECODE_VECTOR; } else { return ATHLON_DECODE_DIRECT; } case 77: case 58: case 57: case 49: case 48: case 38: case 37: case 36: extract_insn_cached (insn); if (memory_operand (operands[1], VOIDmode)) { return ATHLON_DECODE_VECTOR; } else { return ATHLON_DECODE_DIRECT; } case 251: case 252: case 253: if (((ix86_tune) == (CPU_ATHLON))) { return ATHLON_DECODE_VECTOR; } else { return ATHLON_DECODE_DIRECT; } case 254: case 255: case 256: case 257: case 258: case 259: case 260: case 261: case 262: case 263: if (((ix86_tune) == (CPU_ATHLON))) { return ATHLON_DECODE_VECTOR; } else { return ATHLON_DECODE_DOUBLE; } case 150: case 151: case 155: case 156: case 787: case 788: case 789: case 790: case 920: case 921: case 922: case 923: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return ATHLON_DECODE_DOUBLE; } else { return ATHLON_DECODE_VECTOR; } case 170: case 173: case 924: case 925: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return ATHLON_DECODE_DOUBLE; } else { return ATHLON_DECODE_DIRECT; } case 139: case 164: case 167: case 785: case 786: case 926: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return ATHLON_DECODE_VECTOR; } else { return ATHLON_DECODE_DOUBLE; } case -1: if (GET_CODE (PATTERN (insn)) != ASM_INPUT && asm_noperands (PATTERN (insn)) < 0) fatal_insn_not_found (insn); case 1022: case 1021: case 853: case 718: case 717: case 716: case 715: case 714: case 713: case 712: case 711: case 710: case 709: case 708: case 697: case 696: case 686: case 685: case 684: case 683: case 682: case 681: case 680: case 679: case 678: case 677: case 676: case 675: case 674: case 673: case 668: case 667: case 665: case 664: case 662: case 661: case 659: case 658: case 657: case 655: case 647: case 646: case 645: case 644: case 643: case 642: case 641: case 640: case 639: case 638: case 637: case 636: case 635: case 634: case 633: case 632: case 631: case 630: case 629: case 628: case 627: case 626: case 625: case 624: case 623: case 622: case 621: case 620: case 619: case 618: case 617: case 616: case 615: case 614: case 613: case 612: case 611: case 610: case 609: case 608: case 607: case 606: case 605: case 604: case 603: case 602: case 601: case 600: case 599: case 598: case 597: case 596: case 595: case 594: case 593: case 592: case 591: case 550: case 549: case 548: case 547: case 546: case 545: case 544: case 543: case 542: case 541: case 540: case 539: case 538: case 537: case 536: case 535: case 534: case 533: case 532: case 531: case 530: case 529: case 528: case 526: case 525: case 524: case 523: case 522: case 521: case 520: case 519: case 518: case 517: case 516: case 515: case 508: case 507: case 506: case 505: case 504: case 503: case 502: case 501: case 500: case 499: case 450: case 449: case 423: case 422: case 405: case 404: case 385: case 384: case 383: case 382: case 381: case 380: case 379: case 378: case 377: case 370: case 369: case 368: case 367: case 366: case 365: case 364: case 363: case 362: case 351: case 287: case 286: case 277: case 276: case 275: case 274: case 273: case 272: case 271: case 270: case 269: case 268: case 267: case 266: case 265: case 264: case 225: case 177: case 160: case 127: case 126: case 117: case 99: case 98: case 94: case 93: case 79: case 78: case 75: case 40: case 39: case 28: case 27: case 26: case 24: case 22: case 18: case 29: case 30: case 31: case 32: case 33: case 34: case 35: case 47: case 87: case 161: case 406: return ATHLON_DECODE_VECTOR; default: return ATHLON_DECODE_DIRECT; } } enum attr_fp_int_src get_attr_fp_int_src (rtx insn ATTRIBUTE_UNUSED) { switch (recog_memoized (insn)) { case 162: case 163: case 164: case 165: case 166: case 167: case 168: case 169: case 170: case 171: case 172: case 173: case 174: case 175: case 176: case 565: case 566: case 570: case 571: case 576: case 577: return FP_INT_SRC_TRUE; case -1: if (GET_CODE (PATTERN (insn)) != ASM_INPUT && asm_noperands (PATTERN (insn)) < 0) fatal_insn_not_found (insn); default: return FP_INT_SRC_FALSE; } } enum attr_imm_disp get_attr_imm_disp (rtx insn ATTRIBUTE_UNUSED) { switch (recog_memoized (insn)) { case 672: extract_constrain_insn_cached (insn); if ((which_alternative == 0) && ((memory_displacement_operand (operands[0], VOIDmode)) && (immediate_operand (operands[2], VOIDmode)))) { return IMM_DISP_TRUE; } else { return IMM_DISP_FALSE; } case 671: extract_constrain_insn_cached (insn); if ((((which_alternative == 1) && (const0_operand (operands[2], DImode))) && ((memory_displacement_operand (operands[0], VOIDmode)) && (immediate_operand (operands[1], VOIDmode)))) || ((which_alternative == 0) && ((memory_displacement_operand (operands[0], VOIDmode)) && (immediate_operand (operands[2], VOIDmode))))) { return IMM_DISP_TRUE; } else { return IMM_DISP_FALSE; } case 670: extract_constrain_insn_cached (insn); if ((((which_alternative == 1) && (const0_operand (operands[2], SImode))) && ((memory_displacement_operand (operands[0], VOIDmode)) && (immediate_operand (operands[1], VOIDmode)))) || ((which_alternative == 0) && ((memory_displacement_operand (operands[0], VOIDmode)) && (immediate_operand (operands[2], VOIDmode))))) { return IMM_DISP_TRUE; } else { return IMM_DISP_FALSE; } case 655: extract_constrain_insn_cached (insn); if (!((1 << which_alternative) & 0x3)) { return IMM_DISP_UNKNOWN; } else { return IMM_DISP_FALSE; } case 426: case 425: case 417: extract_constrain_insn_cached (insn); if ((which_alternative == 1) && ((memory_displacement_operand (operands[0], VOIDmode)) && (immediate_operand (operands[2], VOIDmode)))) { return IMM_DISP_TRUE; } else { return IMM_DISP_FALSE; } case 410: extract_constrain_insn_cached (insn); if (((((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))) || ((! ((TARGET_DOUBLE_WITH_ADD) != (0))) || (! (const1_operand (operands[2], VOIDmode))))) && ((memory_displacement_operand (operands[0], VOIDmode)) && (immediate_operand (operands[2], VOIDmode)))) { return IMM_DISP_TRUE; } else { return IMM_DISP_FALSE; } case 408: extract_constrain_insn_cached (insn); if (((which_alternative == 0) && ((((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))) || ((! ((TARGET_DOUBLE_WITH_ADD) != (0))) || (! (const1_operand (operands[2], VOIDmode)))))) && ((memory_displacement_operand (operands[0], VOIDmode)) && (immediate_operand (operands[2], VOIDmode)))) { return IMM_DISP_TRUE; } else { return IMM_DISP_FALSE; } case 416: case 415: case 414: case 413: case 412: case 411: case 409: case 407: case 403: case 402: extract_insn_cached (insn); if (((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_ISHIFT)) && ((memory_displacement_operand (operands[0], VOIDmode)) && (immediate_operand (operands[2], VOIDmode)))) { return IMM_DISP_TRUE; } else { return IMM_DISP_FALSE; } case 294: case 290: extract_constrain_insn_cached (insn); if ((((1 << which_alternative) & 0x3)) && ((memory_displacement_operand (operands[0], VOIDmode)) && (immediate_operand (operands[2], VOIDmode)))) { return IMM_DISP_TRUE; } else { return IMM_DISP_FALSE; } case 288: extract_constrain_insn_cached (insn); if ((((1 << which_alternative) & 0x7)) && ((memory_displacement_operand (operands[0], VOIDmode)) && (immediate_operand (operands[2], VOIDmode)))) { return IMM_DISP_TRUE; } else { return IMM_DISP_FALSE; } case 217: extract_insn_cached (insn); if ((! (incdec_operand (operands[2], QImode))) && ((memory_displacement_operand (operands[0], VOIDmode)) && (immediate_operand (operands[1], VOIDmode)))) { return IMM_DISP_TRUE; } else { return IMM_DISP_FALSE; } case 223: case 222: case 221: case 219: case 218: case 216: extract_insn_cached (insn); if ((! (incdec_operand (operands[2], QImode))) && ((memory_displacement_operand (operands[0], VOIDmode)) && (immediate_operand (operands[2], VOIDmode)))) { return IMM_DISP_TRUE; } else { return IMM_DISP_FALSE; } case 215: extract_constrain_insn_cached (insn); if (((which_alternative != 3) && (! (incdec_operand (operands[2], QImode)))) && ((memory_displacement_operand (operands[0], VOIDmode)) && (immediate_operand (operands[2], VOIDmode)))) { return IMM_DISP_TRUE; } else { return IMM_DISP_FALSE; } case 220: case 214: case 213: case 212: case 211: case 210: extract_insn_cached (insn); if ((! (incdec_operand (operands[2], HImode))) && ((memory_displacement_operand (operands[0], VOIDmode)) && (immediate_operand (operands[2], VOIDmode)))) { return IMM_DISP_TRUE; } else { return IMM_DISP_FALSE; } case 209: extract_constrain_insn_cached (insn); if (((which_alternative != 2) && (! (incdec_operand (operands[2], HImode)))) && ((memory_displacement_operand (operands[0], VOIDmode)) && (immediate_operand (operands[2], VOIDmode)))) { return IMM_DISP_TRUE; } else { return IMM_DISP_FALSE; } case 208: case 207: case 206: case 205: case 204: case 203: extract_insn_cached (insn); if ((! (incdec_operand (operands[2], SImode))) && ((memory_displacement_operand (operands[0], VOIDmode)) && (immediate_operand (operands[2], VOIDmode)))) { return IMM_DISP_TRUE; } else { return IMM_DISP_FALSE; } case 200: case 199: case 198: case 197: extract_insn_cached (insn); if ((! (incdec_operand (operands[2], DImode))) && ((memory_displacement_operand (operands[0], VOIDmode)) && (immediate_operand (operands[2], VOIDmode)))) { return IMM_DISP_TRUE; } else { return IMM_DISP_FALSE; } case 202: case 201: case 196: extract_insn_cached (insn); if ((get_attr_type (insn) == TYPE_ALU) && ((memory_displacement_operand (operands[0], VOIDmode)) && (immediate_operand (operands[2], VOIDmode)))) { return IMM_DISP_TRUE; } else { return IMM_DISP_FALSE; } case 491: case 489: case 488: case 487: case 486: case 485: case 484: case 483: case 482: case 481: case 480: case 478: case 476: case 475: case 474: case 473: case 472: case 471: case 470: case 469: case 468: case 467: case 465: case 463: case 462: case 461: case 460: case 459: case 458: case 457: case 456: case 455: case 454: case 453: case 452: case 451: case 448: case 447: case 446: case 445: case 444: case 443: case 441: case 439: case 438: case 437: case 436: case 435: case 434: case 433: case 432: case 431: case 430: case 429: case 428: case 427: case 424: case 421: case 420: case 419: case 418: case 406: case 350: case 349: case 348: case 346: case 345: case 344: case 343: case 342: case 340: case 339: case 338: case 337: case 336: case 335: case 334: case 333: case 332: case 331: case 330: case 329: case 328: case 327: case 326: case 325: case 324: case 323: case 322: case 320: case 318: case 317: case 316: case 315: case 314: case 313: case 312: case 311: case 310: case 309: case 308: case 307: case 306: case 305: case 304: case 303: case 302: case 301: case 300: case 298: case 296: case 295: case 293: case 292: case 291: case 289: case 277: case 276: case 274: case 271: case 268: case 265: case 264: case 263: case 262: case 261: case 260: case 259: case 258: case 257: case 256: case 255: case 254: case 253: case 252: case 251: case 250: case 249: case 248: case 247: case 246: case 245: case 243: case 242: case 241: case 240: case 239: case 238: case 237: case 236: case 235: case 234: case 233: case 232: case 231: case 230: case 229: case 228: case 227: case 226: case 224: case 185: case 184: case 183: case 182: case 181: case 180: case 179: case 178: extract_insn_cached (insn); if ((memory_displacement_operand (operands[0], VOIDmode)) && (immediate_operand (operands[2], VOIDmode))) { return IMM_DISP_TRUE; } else { return IMM_DISP_FALSE; } case 172: case 169: case 166: case 163: extract_constrain_insn_cached (insn); if (which_alternative == 1) { return IMM_DISP_UNKNOWN; } else { return IMM_DISP_FALSE; } case 135: case 134: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0xe)) { return IMM_DISP_UNKNOWN; } else { return IMM_DISP_FALSE; } case 112: case 111: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x7)) { return IMM_DISP_UNKNOWN; } else { return IMM_DISP_FALSE; } case 114: case 113: case 109: case 106: extract_constrain_insn_cached (insn); if ((which_alternative == 1) && ((memory_displacement_operand (operands[0], VOIDmode)) && (immediate_operand (operands[1], VOIDmode)))) { return IMM_DISP_TRUE; } else { return IMM_DISP_FALSE; } case 101: case 100: extract_constrain_insn_cached (insn); if (!((1 << which_alternative) & 0x7)) { return IMM_DISP_UNKNOWN; } else { return IMM_DISP_FALSE; } case 96: case 95: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x18)) { return IMM_DISP_UNKNOWN; } else { return IMM_DISP_FALSE; } case 91: case 90: extract_constrain_insn_cached (insn); if ((((1 << which_alternative) & 0x18)) && ((memory_displacement_operand (operands[0], VOIDmode)) && (immediate_operand (operands[1], VOIDmode)))) { return IMM_DISP_TRUE; } else { return IMM_DISP_FALSE; } case 89: case 88: extract_constrain_insn_cached (insn); if (which_alternative != 1) { return IMM_DISP_UNKNOWN; } else { return IMM_DISP_FALSE; } case 84: extract_constrain_insn_cached (insn); if (which_alternative == 4) { return IMM_DISP_UNKNOWN; } else if (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && ((memory_displacement_operand (operands[0], VOIDmode)) && (immediate_operand (operands[1], VOIDmode)))) { return IMM_DISP_TRUE; } else { return IMM_DISP_FALSE; } case 83: extract_constrain_insn_cached (insn); if (which_alternative == 4) { return IMM_DISP_UNKNOWN; } else if (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && ((memory_displacement_operand (operands[0], VOIDmode)) && (immediate_operand (operands[1], VOIDmode)))) { return IMM_DISP_TRUE; } else { return IMM_DISP_FALSE; } case 721: case 720: case 82: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { return IMM_DISP_UNKNOWN; } else { return IMM_DISP_FALSE; } case 176: case 175: case 174: case 171: case 168: case 165: case 162: case 145: case 142: case 133: case 76: extract_constrain_insn_cached (insn); if (which_alternative != 0) { return IMM_DISP_UNKNOWN; } else { return IMM_DISP_FALSE; } case 71: extract_constrain_insn_cached (insn); if (((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0)))) && ((memory_displacement_operand (operands[0], VOIDmode)) && (immediate_operand (operands[1], VOIDmode)))) { return IMM_DISP_TRUE; } else { return IMM_DISP_FALSE; } case 59: extract_constrain_insn_cached (insn); if ((((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && ((memory_displacement_operand (operands[0], VOIDmode)) && (immediate_operand (operands[1], VOIDmode)))) { return IMM_DISP_TRUE; } else { return IMM_DISP_FALSE; } case 70: case 66: case 65: case 50: extract_insn_cached (insn); if ((get_attr_type (insn) == TYPE_IMOV) && ((memory_displacement_operand (operands[0], VOIDmode)) && (immediate_operand (operands[1], VOIDmode)))) { return IMM_DISP_TRUE; } else { return IMM_DISP_FALSE; } case 44: extract_constrain_insn_cached (insn); if (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && ((memory_displacement_operand (operands[0], VOIDmode)) && (immediate_operand (operands[1], VOIDmode)))) { return IMM_DISP_TRUE; } else { return IMM_DISP_FALSE; } case 43: extract_constrain_insn_cached (insn); if (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && ((memory_displacement_operand (operands[0], VOIDmode)) && (immediate_operand (operands[1], VOIDmode)))) { return IMM_DISP_TRUE; } else { return IMM_DISP_FALSE; } case 492: case 490: case 479: case 477: case 466: case 464: case 442: case 440: case 401: case 399: case 397: case 396: case 393: case 347: case 341: case 321: case 319: case 299: case 297: case 285: case 284: case 283: case 282: case 281: case 280: case 279: case 278: case 244: case 108: case 105: case 103: case 87: case 86: case 85: case 81: case 80: case 74: case 73: case 72: case 68: case 67: case 62: case 61: case 60: case 56: case 55: case 54: case 53: case 52: case 51: case 47: case 46: case 45: case 42: case 41: case 17: case 16: case 15: case 14: case 13: case 12: case 11: case 10: case 9: case 8: case 7: case 6: case 5: case 4: case 3: case 2: case 1: case 0: extract_insn_cached (insn); if ((memory_displacement_operand (operands[0], VOIDmode)) && (immediate_operand (operands[1], VOIDmode))) { return IMM_DISP_TRUE; } else { return IMM_DISP_FALSE; } case -1: if (GET_CODE (PATTERN (insn)) != ASM_INPUT && asm_noperands (PATTERN (insn)) < 0) fatal_insn_not_found (insn); case 1022: case 1021: case 853: case 718: case 717: case 716: case 715: case 714: case 713: case 712: case 711: case 710: case 709: case 708: case 697: case 696: case 686: case 685: case 684: case 683: case 682: case 681: case 680: case 679: case 678: case 677: case 676: case 675: case 674: case 673: case 668: case 667: case 665: case 664: case 662: case 661: case 659: case 658: case 550: case 549: case 548: case 547: case 546: case 545: case 544: case 543: case 542: case 541: case 540: case 539: case 538: case 537: case 536: case 535: case 532: case 531: case 530: case 529: case 528: case 526: case 525: case 524: case 523: case 508: case 507: case 506: case 505: case 504: case 503: case 502: case 501: case 500: case 499: case 450: case 449: case 423: case 422: case 405: case 404: case 385: case 384: case 383: case 382: case 381: case 380: case 379: case 378: case 377: case 370: case 369: case 368: case 367: case 366: case 365: case 364: case 363: case 362: case 351: case 287: case 286: case 275: case 273: case 272: case 270: case 269: case 267: case 266: case 225: case 177: case 161: case 160: case 127: case 126: case 117: case 99: case 98: case 94: case 93: case 75: case 29: case 28: case 27: case 26: case 24: case 22: case 18: return IMM_DISP_UNKNOWN; default: return IMM_DISP_FALSE; } } int get_attr_length_address (rtx insn ATTRIBUTE_UNUSED) { switch (recog_memoized (insn)) { case 695: case 694: case 693: case 692: case 691: case 690: case 689: case 688: case 687: extract_constrain_insn_cached (insn); if (constant_call_address_operand (operands[1], VOIDmode)) { return 0; } else { return ix86_attr_length_address_default (insn); } case 655: extract_constrain_insn_cached (insn); if (!((1 << which_alternative) & 0x3)) { return 0; } else { return ix86_attr_length_address_default (insn); } case 522: case 521: case 520: case 519: case 518: case 517: case 516: case 515: extract_constrain_insn_cached (insn); if (constant_call_address_operand (operands[0], VOIDmode)) { return 0; } else { return ix86_attr_length_address_default (insn); } case 172: case 169: case 166: case 163: extract_constrain_insn_cached (insn); if (which_alternative == 1) { return 0; } else { return ix86_attr_length_address_default (insn); } case 135: case 134: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0xe)) { return 0; } else { return ix86_attr_length_address_default (insn); } case 112: case 111: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x7)) { return 0; } else { return ix86_attr_length_address_default (insn); } case 101: case 100: extract_constrain_insn_cached (insn); if (!((1 << which_alternative) & 0x7)) { return 0; } else { return ix86_attr_length_address_default (insn); } case 96: case 95: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x18)) { return 0; } else { return ix86_attr_length_address_default (insn); } case 89: case 88: extract_constrain_insn_cached (insn); if (which_alternative != 1) { return 0; } else { return ix86_attr_length_address_default (insn); } case 84: case 83: extract_constrain_insn_cached (insn); if (which_alternative == 4) { return 0; } else { return ix86_attr_length_address_default (insn); } case 721: case 720: case 82: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { return 0; } else { return ix86_attr_length_address_default (insn); } case 176: case 175: case 174: case 171: case 168: case 165: case 162: case 145: case 142: case 133: case 76: extract_constrain_insn_cached (insn); if (which_alternative != 0) { return 0; } else { return ix86_attr_length_address_default (insn); } case 45: case 46: case 51: case 52: case 67: case 68: case 85: case 86: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return 8; } else { return 0; } case -1: if (GET_CODE (PATTERN (insn)) != ASM_INPUT && asm_noperands (PATTERN (insn)) < 0) fatal_insn_not_found (insn); case 1022: case 1021: case 718: case 717: case 716: case 715: case 714: case 713: case 712: case 711: case 710: case 709: case 708: case 697: case 696: case 686: case 685: case 684: case 683: case 682: case 681: case 680: case 679: case 678: case 677: case 676: case 675: case 674: case 673: case 668: case 667: case 665: case 664: case 662: case 661: case 659: case 658: case 647: case 646: case 645: case 644: case 643: case 642: case 641: case 640: case 639: case 638: case 637: case 636: case 635: case 634: case 633: case 632: case 631: case 630: case 629: case 628: case 627: case 626: case 625: case 624: case 623: case 622: case 621: case 620: case 619: case 618: case 617: case 550: case 549: case 548: case 547: case 546: case 545: case 544: case 543: case 542: case 541: case 540: case 539: case 538: case 537: case 536: case 535: case 532: case 531: case 530: case 529: case 528: case 526: case 525: case 524: case 523: case 508: case 507: case 506: case 505: case 504: case 503: case 502: case 501: case 500: case 499: case 450: case 449: case 423: case 422: case 405: case 404: case 385: case 384: case 383: case 382: case 381: case 380: case 379: case 378: case 377: case 370: case 369: case 368: case 367: case 366: case 365: case 364: case 363: case 362: case 351: case 287: case 286: case 275: case 273: case 272: case 270: case 269: case 267: case 266: case 225: case 177: case 161: case 160: case 127: case 126: case 117: case 102: case 99: case 98: case 97: case 94: case 93: case 92: case 75: case 29: case 28: case 27: case 26: case 24: case 22: case 18: case 853: return 0; default: extract_constrain_insn_cached (insn); return ix86_attr_length_address_default (insn); } } int get_attr_length_immediate (rtx insn ATTRIBUTE_UNUSED) { switch (recog_memoized (insn)) { case 695: case 694: case 693: case 692: case 691: case 690: case 689: case 688: case 687: extract_insn_cached (insn); if (constant_call_address_operand (operands[1], VOIDmode)) { return 4; } else { return 0; } case 671: extract_constrain_insn_cached (insn); if ((which_alternative == 1) && (! (const0_operand (operands[2], DImode)))) { return 0; } else if (which_alternative == 0) { return ix86_attr_length_immediate_default(insn,1); } else { return ix86_attr_length_immediate_default(insn,0); } case 670: extract_constrain_insn_cached (insn); if ((which_alternative == 1) && (! (const0_operand (operands[2], SImode)))) { return 0; } else if (which_alternative == 0) { return ix86_attr_length_immediate_default(insn,1); } else { return ix86_attr_length_immediate_default(insn,0); } case 569: case 564: extract_constrain_insn_cached (insn); if (get_attr_unit (insn) == UNIT_SSE) { return 0; } else { return /* Update immediate_length and other attributes! */ abort(),1; } case 568: case 563: extract_constrain_insn_cached (insn); if ((get_attr_unit (insn) == UNIT_I387) || (get_attr_unit (insn) == UNIT_SSE)) { return 0; } else { return /* Update immediate_length and other attributes! */ abort(),1; } case 580: case 579: case 578: case 577: case 576: case 575: case 574: case 573: case 572: case 571: case 570: case 567: case 566: case 565: case 562: extract_constrain_insn_cached (insn); if (get_attr_unit (insn) == UNIT_I387) { return 0; } else { return /* Update immediate_length and other attributes! */ abort(),1; } case 559: case 556: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { return 0; } else { return /* Update immediate_length and other attributes! */ abort(),1; } case 522: case 521: case 520: case 519: case 518: case 517: case 516: case 515: extract_insn_cached (insn); if (constant_call_address_operand (operands[0], VOIDmode)) { return 4; } else { return 0; } case 426: case 425: case 417: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return 0; } else { return ix86_attr_length_immediate_default(insn,1); } case 414: extract_constrain_insn_cached (insn); if (which_alternative == 2) { return 0; } else if ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_ISHIFT)) { return ix86_attr_length_immediate_default(insn,1); } else { return /* Update immediate_length and other attributes! */ abort(),1; } case 410: extract_constrain_insn_cached (insn); if ((((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))) || ((! ((TARGET_DOUBLE_WITH_ADD) != (0))) || (! (const1_operand (operands[2], VOIDmode))))) { return ix86_attr_length_immediate_default(insn,1); } else { return /* Update immediate_length and other attributes! */ abort(),1; } case 408: extract_constrain_insn_cached (insn); if (which_alternative == 1) { return 0; } else if ((which_alternative == 0) && ((((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))) || ((! ((TARGET_DOUBLE_WITH_ADD) != (0))) || (! (const1_operand (operands[2], VOIDmode)))))) { return ix86_attr_length_immediate_default(insn,1); } else { return /* Update immediate_length and other attributes! */ abort(),1; } case 416: case 415: case 413: case 412: case 409: case 403: extract_constrain_insn_cached (insn); if ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_ISHIFT)) { return ix86_attr_length_immediate_default(insn,1); } else { return /* Update immediate_length and other attributes! */ abort(),1; } case 411: case 407: case 402: extract_constrain_insn_cached (insn); if (which_alternative == 1) { return 0; } else if ((get_attr_type (insn) == TYPE_ALU) || (get_attr_type (insn) == TYPE_ISHIFT)) { return ix86_attr_length_immediate_default(insn,1); } else { return /* Update immediate_length and other attributes! */ abort(),1; } case 294: case 290: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { return ix86_attr_length_immediate_default(insn,1); } else { return 0; } case 288: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x7)) { return ix86_attr_length_immediate_default(insn,1); } else { return 0; } case 223: case 222: case 221: case 219: case 218: case 217: case 216: extract_constrain_insn_cached (insn); if (incdec_operand (operands[2], QImode)) { return 0; } else { return ix86_attr_length_immediate_default(insn,1); } case 215: extract_constrain_insn_cached (insn); if (((which_alternative != 3) && (incdec_operand (operands[2], QImode))) || (which_alternative == 3)) { return 0; } else { return ix86_attr_length_immediate_default(insn,1); } case 220: case 214: case 213: case 212: case 211: case 210: extract_constrain_insn_cached (insn); if (incdec_operand (operands[2], HImode)) { return 0; } else { return ix86_attr_length_immediate_default(insn,1); } case 209: extract_constrain_insn_cached (insn); if (((which_alternative != 2) && (incdec_operand (operands[2], HImode))) || (which_alternative == 2)) { return 0; } else { return ix86_attr_length_immediate_default(insn,1); } case 208: case 207: case 206: case 205: case 204: case 203: extract_constrain_insn_cached (insn); if (incdec_operand (operands[2], SImode)) { return 0; } else { return ix86_attr_length_immediate_default(insn,1); } case 202: extract_constrain_insn_cached (insn); if ((get_attr_type (insn) == TYPE_INCDEC) || ((which_alternative != 0) || (pic_symbolic_operand (operands[2], SImode)))) { return 0; } else if (get_attr_type (insn) == TYPE_ALU) { return ix86_attr_length_immediate_default(insn,1); } else { return /* Update immediate_length and other attributes! */ abort(),1; } case 201: extract_constrain_insn_cached (insn); if ((get_attr_type (insn) == TYPE_INCDEC) || ((which_alternative == 2) || (pic_symbolic_operand (operands[2], SImode)))) { return 0; } else if (get_attr_type (insn) == TYPE_ALU) { return ix86_attr_length_immediate_default(insn,1); } else { return /* Update immediate_length and other attributes! */ abort(),1; } case 200: case 199: case 198: case 197: extract_constrain_insn_cached (insn); if (incdec_operand (operands[2], DImode)) { return 0; } else { return ix86_attr_length_immediate_default(insn,1); } case 196: extract_constrain_insn_cached (insn); if ((get_attr_type (insn) == TYPE_INCDEC) || ((which_alternative == 2) || (pic_symbolic_operand (operands[2], DImode)))) { return 0; } else if (get_attr_type (insn) == TYPE_ALU) { return ix86_attr_length_immediate_default(insn,1); } else { return /* Update immediate_length and other attributes! */ abort(),1; } case 114: case 113: extract_constrain_insn_cached (insn); if (!((1 << which_alternative) & 0x3)) { return 0; } else if (which_alternative == 0) { return ix86_attr_length_immediate_default(insn,1); } else { return ix86_attr_length_immediate_default(insn,0); } case 91: case 90: extract_constrain_insn_cached (insn); if (!((1 << which_alternative) & 0x18)) { return 0; } else { return ix86_attr_length_immediate_default(insn,0); } case 89: case 88: extract_constrain_insn_cached (insn); if (which_alternative != 1) { return 0; } else { return ix86_attr_length_immediate_default(insn,1); } case 84: extract_constrain_insn_cached (insn); if (which_alternative == 0) { if (((flag_pic) != (0)) && (symbolic_operand (operands[1], DImode))) { return 0; } else { return ix86_attr_length_immediate_default(insn,0); } } else if (which_alternative == 1) { return 4; } else if (which_alternative == 2) { return 8; } else { if (((!((1 << which_alternative) & 0x7f0)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], DImode)))) || (((1 << which_alternative) & 0x7f0))) { return 0; } else { return ix86_attr_length_immediate_default(insn,0); } } case 83: extract_constrain_insn_cached (insn); if (which_alternative == 0) { if (((flag_pic) != (0)) && (symbolic_operand (operands[1], DImode))) { return 0; } else { return ix86_attr_length_immediate_default(insn,0); } } else if (which_alternative == 1) { return 4; } else if (which_alternative == 2) { return 8; } else { if (((!((1 << which_alternative) & 0x7f0)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], DImode)))) || (((1 << which_alternative) & 0x7f0))) { return 0; } else { return ix86_attr_length_immediate_default(insn,0); } } case 672: case 76: extract_constrain_insn_cached (insn); if (which_alternative != 0) { return 0; } else { return ix86_attr_length_immediate_default(insn,1); } case 71: extract_constrain_insn_cached (insn); if ((! (q_regs_operand (operands[0], QImode))) || ((TARGET_MOVX) != (0))) { return ix86_attr_length_immediate_default(insn,1); } else { return ix86_attr_length_immediate_default(insn,0); } case 70: case 66: case 65: extract_constrain_insn_cached (insn); if (get_attr_type (insn) == TYPE_IMOVX) { return ix86_attr_length_immediate_default(insn,1); } else if (get_attr_type (insn) == TYPE_IMOV) { return ix86_attr_length_immediate_default(insn,0); } else { return /* Update immediate_length and other attributes! */ abort(),1; } case 59: extract_constrain_insn_cached (insn); if (((! ((optimize_size) != (0))) && ((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0)))))) && ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2)))) { return ix86_attr_length_immediate_default(insn,1); } else if (((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) { return ix86_attr_length_immediate_default(insn,0); } else { return /* Update immediate_length and other attributes! */ abort(),1; } case 50: extract_constrain_insn_cached (insn); if (((! ((optimize_size) != (0))) && (((which_alternative != 0) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_HIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x6)) || (! (aligned_operand (operands[1], HImode)))))) && (((TARGET_MOVX) != (0)) && (((1 << which_alternative) & 0x5)))) { return ix86_attr_length_immediate_default(insn,1); } else if (get_attr_type (insn) == TYPE_IMOV) { return ix86_attr_length_immediate_default(insn,0); } else { return /* Update immediate_length and other attributes! */ abort(),1; } case 553: case 551: case 285: case 284: case 283: case 281: case 280: case 279: case 278: case 87: case 74: case 73: case 72: case 61: case 60: case 55: case 54: case 53: case 47: extract_constrain_insn_cached (insn); return ix86_attr_length_immediate_default(insn,0); case 85: case 67: case 51: case 45: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return 0; } else { return ix86_attr_length_immediate_default(insn,0); } case 44: extract_constrain_insn_cached (insn); if (((!((1 << which_alternative) & 0xfc)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], SImode)))) || (((1 << which_alternative) & 0xfc))) { return 0; } else { return ix86_attr_length_immediate_default(insn,0); } case 43: extract_constrain_insn_cached (insn); if (((!((1 << which_alternative) & 0xfc)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], SImode)))) || (((1 << which_alternative) & 0xfc))) { return 0; } else { return ix86_attr_length_immediate_default(insn,0); } case 554: case 552: case 492: case 491: case 490: case 489: case 488: case 487: case 486: case 485: case 484: case 483: case 482: case 481: case 480: case 479: case 478: case 477: case 476: case 475: case 474: case 473: case 472: case 471: case 470: case 469: case 468: case 467: case 466: case 465: case 464: case 463: case 462: case 461: case 460: case 459: case 458: case 457: case 456: case 455: case 454: case 453: case 452: case 451: case 448: case 447: case 446: case 445: case 444: case 443: case 442: case 441: case 440: case 439: case 438: case 437: case 436: case 435: case 434: case 433: case 432: case 431: case 430: case 429: case 428: case 427: case 424: case 421: case 420: case 419: case 418: case 406: case 401: case 400: case 399: case 398: case 397: case 396: case 395: case 394: case 393: case 392: case 361: case 360: case 359: case 358: case 357: case 356: case 355: case 354: case 353: case 352: case 350: case 349: case 348: case 347: case 346: case 341: case 340: case 339: case 338: case 337: case 336: case 335: case 334: case 333: case 332: case 331: case 330: case 329: case 328: case 327: case 322: case 321: case 320: case 319: case 318: case 317: case 316: case 315: case 314: case 313: case 312: case 311: case 310: case 309: case 308: case 307: case 306: case 305: case 299: case 298: case 297: case 296: case 295: case 293: case 292: case 291: case 289: case 263: case 262: case 261: case 250: case 249: case 248: case 247: case 246: case 245: case 244: case 243: case 242: case 241: case 240: case 239: case 238: case 237: case 236: case 235: case 234: case 233: case 232: case 231: case 230: case 229: case 228: case 227: case 226: case 224: case 185: case 184: case 183: case 182: case 181: case 180: case 179: case 178: case 125: case 124: case 123: case 122: case 121: case 120: case 119: case 118: case 116: case 115: case 110: case 109: case 108: case 107: case 106: case 105: case 104: case 103: case 79: case 78: case 77: case 69: case 64: case 63: case 58: case 57: case 49: case 48: case 40: case 39: case 38: case 37: case 36: case 17: case 16: case 15: case 13: case 12: case 11: case 10: case 8: case 7: case 5: case 4: case 2: case 1: extract_constrain_insn_cached (insn); return ix86_attr_length_immediate_default(insn,1); case 0: case 3: case 6: case 9: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return 0; } else { return 1; } case 514: case 509: case 498: case 497: case 42: case 81: case 282: case 300: case 301: case 323: case 342: return 1; case 526: return 2; case -1: if (GET_CODE (PATTERN (insn)) != ASM_INPUT && asm_noperands (PATTERN (insn)) < 0) fatal_insn_not_found (insn); default: return 0; } } enum attr_memory get_attr_memory (rtx insn ATTRIBUTE_UNUSED) { switch (recog_memoized (insn)) { case 695: case 694: case 693: case 692: case 691: case 690: case 689: case 688: case 687: extract_insn_cached (insn); if (constant_call_address_operand (operands[1], VOIDmode)) { return MEMORY_NONE; } else { return MEMORY_LOAD; } case 672: extract_constrain_insn_cached (insn); if (which_alternative != 0) { return MEMORY_NONE; } else if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if ((memory_operand (operands[1], VOIDmode)) || (memory_operand (operands[2], VOIDmode))) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 671: extract_constrain_insn_cached (insn); if ((which_alternative == 1) && (! (const0_operand (operands[2], DImode)))) { return MEMORY_NONE; } else if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if ((memory_operand (operands[1], VOIDmode)) || (((which_alternative != 1) || (! (const0_operand (operands[2], DImode)))) && (memory_operand (operands[2], VOIDmode)))) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 670: extract_constrain_insn_cached (insn); if ((which_alternative == 1) && (! (const0_operand (operands[2], SImode)))) { return MEMORY_NONE; } else if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if ((memory_operand (operands[1], VOIDmode)) || (((which_alternative != 1) || (! (const0_operand (operands[2], SImode)))) && (memory_operand (operands[2], VOIDmode)))) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 655: extract_constrain_insn_cached (insn); if (!((1 << which_alternative) & 0x3)) { return MEMORY_UNKNOWN; } else { return MEMORY_NONE; } case 656: case 654: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { return MEMORY_NONE; } else if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if ((memory_operand (operands[1], VOIDmode)) || ((memory_operand (operands[2], VOIDmode)) || (memory_operand (operands[3], VOIDmode)))) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 653: case 652: case 651: case 649: extract_insn_cached (insn); if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if ((memory_operand (operands[1], VOIDmode)) || ((memory_operand (operands[2], VOIDmode)) || (memory_operand (operands[3], VOIDmode)))) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 584: case 581: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return MEMORY_NONE; } else if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if (memory_operand (operands[1], VOIDmode)) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 522: case 521: case 520: case 519: case 518: case 517: case 516: case 515: extract_insn_cached (insn); if (constant_call_address_operand (operands[0], VOIDmode)) { return MEMORY_NONE; } else { return MEMORY_LOAD; } case 527: case 514: case 513: case 512: case 511: case 510: case 509: case 498: case 497: extract_insn_cached (insn); if (memory_operand (operands[0], VOIDmode)) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 494: case 493: extract_insn_cached (insn); if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else { return MEMORY_NONE; } case 426: case 425: case 417: extract_constrain_insn_cached (insn); if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if ((memory_operand (operands[1], VOIDmode)) || ((which_alternative == 1) && (memory_operand (operands[2], VOIDmode)))) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 411: case 408: case 407: case 402: extract_constrain_insn_cached (insn); if (which_alternative == 1) { return MEMORY_NONE; } else if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if ((memory_operand (operands[1], VOIDmode)) || (memory_operand (operands[2], VOIDmode))) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 294: case 290: extract_constrain_insn_cached (insn); if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if ((memory_operand (operands[1], VOIDmode)) || ((((1 << which_alternative) & 0x3)) && (memory_operand (operands[2], VOIDmode)))) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 288: extract_constrain_insn_cached (insn); if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if ((memory_operand (operands[1], VOIDmode)) || ((((1 << which_alternative) & 0x7)) && (memory_operand (operands[2], VOIDmode)))) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 217: extract_insn_cached (insn); if (((! (incdec_operand (operands[2], QImode))) && (memory_operand (operands[1], VOIDmode))) || ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode)))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if ((memory_operand (operands[1], VOIDmode)) || ((incdec_operand (operands[2], QImode)) && (memory_operand (operands[2], VOIDmode)))) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 215: extract_constrain_insn_cached (insn); if (which_alternative == 3) { return MEMORY_NONE; } else if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if ((memory_operand (operands[1], VOIDmode)) || (memory_operand (operands[2], VOIDmode))) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 414: case 209: extract_constrain_insn_cached (insn); if (which_alternative == 2) { return MEMORY_NONE; } else if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if ((memory_operand (operands[1], VOIDmode)) || (memory_operand (operands[2], VOIDmode))) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 202: extract_constrain_insn_cached (insn); if ((which_alternative != 0) || (pic_symbolic_operand (operands[2], SImode))) { return MEMORY_NONE; } else if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if ((memory_operand (operands[1], VOIDmode)) || (memory_operand (operands[2], VOIDmode))) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 201: extract_constrain_insn_cached (insn); if ((which_alternative == 2) || (pic_symbolic_operand (operands[2], SImode))) { return MEMORY_NONE; } else if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if ((memory_operand (operands[1], VOIDmode)) || (memory_operand (operands[2], VOIDmode))) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 196: extract_constrain_insn_cached (insn); if ((which_alternative == 2) || (pic_symbolic_operand (operands[2], DImode))) { return MEMORY_NONE; } else if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if ((memory_operand (operands[1], VOIDmode)) || (memory_operand (operands[2], VOIDmode))) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 172: case 169: case 166: case 163: extract_constrain_insn_cached (insn); if (which_alternative == 1) { return MEMORY_UNKNOWN; } else if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if (memory_operand (operands[1], VOIDmode)) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 176: case 175: case 174: case 171: case 168: case 165: case 162: extract_constrain_insn_cached (insn); if (which_alternative != 0) { return MEMORY_UNKNOWN; } else if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if (memory_operand (operands[1], VOIDmode)) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 135: case 134: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0xe)) { return MEMORY_UNKNOWN; } else if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if (memory_operand (operands[1], VOIDmode)) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 145: case 142: case 133: extract_constrain_insn_cached (insn); if (which_alternative != 0) { return MEMORY_UNKNOWN; } else if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if (memory_operand (operands[1], VOIDmode)) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 112: case 111: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x7)) { return MEMORY_UNKNOWN; } else if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if (memory_operand (operands[1], VOIDmode)) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 109: case 106: extract_constrain_insn_cached (insn); if (((which_alternative == 1) && (memory_operand (operands[1], VOIDmode))) || ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode)))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if (memory_operand (operands[1], VOIDmode)) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 101: case 100: extract_constrain_insn_cached (insn); if (!((1 << which_alternative) & 0x7)) { return MEMORY_UNKNOWN; } else if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if (memory_operand (operands[1], VOIDmode)) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 96: case 95: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x18)) { return MEMORY_UNKNOWN; } else if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if (memory_operand (operands[1], VOIDmode)) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 89: case 88: extract_constrain_insn_cached (insn); if (which_alternative != 1) { return MEMORY_UNKNOWN; } else { if (memory_operand (operands[1], VOIDmode)) { return MEMORY_BOTH; } else { return MEMORY_STORE; } } case 84: extract_constrain_insn_cached (insn); if (which_alternative == 4) { return MEMORY_UNKNOWN; } else if ((!((1 << which_alternative) & 0x7f0)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], DImode)))) { return MEMORY_NONE; } else if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if ((memory_operand (operands[1], VOIDmode)) || ((((((1 << which_alternative) & 0x7f0)) || (((flag_pic) != (0)) && (symbolic_operand (operands[1], DImode)))) && (!((1 << which_alternative) & 0x7e0))) && (memory_operand (operands[2], VOIDmode)))) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 83: extract_constrain_insn_cached (insn); if (which_alternative == 4) { return MEMORY_UNKNOWN; } else if ((!((1 << which_alternative) & 0x7f0)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], DImode)))) { return MEMORY_NONE; } else if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if ((memory_operand (operands[1], VOIDmode)) || ((((((1 << which_alternative) & 0x7f0)) || (((flag_pic) != (0)) && (symbolic_operand (operands[1], DImode)))) && (!((1 << which_alternative) & 0x7e0))) && (memory_operand (operands[2], VOIDmode)))) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 721: case 720: case 82: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { return MEMORY_UNKNOWN; } else if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if (memory_operand (operands[1], VOIDmode)) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 76: extract_constrain_insn_cached (insn); if (which_alternative != 0) { return MEMORY_UNKNOWN; } else { if (memory_operand (operands[1], VOIDmode)) { return MEMORY_BOTH; } else { return MEMORY_STORE; } } case 71: extract_constrain_insn_cached (insn); if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if ((memory_operand (operands[1], VOIDmode)) || ((((! (q_regs_operand (operands[0], QImode))) || ((TARGET_MOVX) != (0))) && ((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0))))) && (memory_operand (operands[2], VOIDmode)))) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 70: case 66: case 65: extract_insn_cached (insn); if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if ((memory_operand (operands[1], VOIDmode)) || (((! (get_attr_type (insn) == TYPE_IMOV)) && (! (get_attr_type (insn) == TYPE_IMOVX))) && (memory_operand (operands[2], VOIDmode)))) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 59: extract_constrain_insn_cached (insn); if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if ((memory_operand (operands[1], VOIDmode)) || ((((! ((optimize_size) != (0))) && (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2)))))) && ((((optimize_size) != (0)) || ((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0))))) || ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2))))) && (memory_operand (operands[2], VOIDmode)))) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 50: extract_constrain_insn_cached (insn); if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if ((memory_operand (operands[1], VOIDmode)) || (((! (get_attr_type (insn) == TYPE_IMOV)) && ((((optimize_size) != (0)) || (((which_alternative == 0) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_HIMODE_MATH) == (0)))) || ((((1 << which_alternative) & 0x6)) && (aligned_operand (operands[1], HImode))))) || ((! ((TARGET_MOVX) != (0))) || (!((1 << which_alternative) & 0x5))))) && (memory_operand (operands[2], VOIDmode)))) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 1033: case 1032: case 1031: case 1030: case 1029: case 1017: case 1016: case 1015: case 1014: case 1013: case 1012: case 1011: case 1010: case 1009: case 1008: case 1007: case 1006: case 1005: case 1004: case 1003: case 1002: case 1001: case 1000: case 999: case 998: case 997: case 996: case 995: case 994: case 993: case 992: case 991: case 990: case 989: case 960: case 959: case 958: case 957: case 956: case 929: case 928: case 927: case 926: case 925: case 924: case 923: case 922: case 921: case 920: case 919: case 918: case 917: case 916: case 915: case 914: case 913: case 912: case 911: case 910: case 909: case 908: case 907: case 906: case 905: case 904: case 897: case 896: case 879: case 878: case 876: case 875: case 874: case 873: case 872: case 870: case 869: case 865: case 864: case 848: case 847: case 846: case 845: case 844: case 843: case 821: case 820: case 819: case 790: case 789: case 788: case 787: case 786: case 785: case 784: case 783: case 782: case 781: case 780: case 779: case 778: case 777: case 776: case 751: case 750: case 749: case 748: case 747: case 746: case 737: case 736: case 735: case 734: case 733: case 732: case 731: case 730: case 729: case 728: case 727: case 726: case 725: case 724: case 723: case 722: case 719: case 707: case 706: case 705: case 704: case 703: case 702: case 701: case 700: case 699: case 698: case 669: case 666: case 663: case 660: case 585: case 582: case 391: case 390: case 389: case 388: case 387: case 386: case 376: case 375: case 374: case 373: case 372: case 371: case 173: case 170: case 167: case 164: case 156: case 155: case 151: case 150: case 146: case 144: case 143: case 141: case 140: case 139: case 138: case 137: case 136: case 132: case 131: case 130: case 129: case 128: case 125: case 124: case 123: case 122: case 121: case 120: case 119: case 118: case 116: case 115: case 114: case 113: case 110: case 107: case 104: case 91: case 90: case 87: case 74: case 73: case 72: case 69: case 64: case 63: case 61: case 60: case 55: case 54: case 53: case 47: extract_insn_cached (insn); if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if (memory_operand (operands[1], VOIDmode)) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 44: extract_constrain_insn_cached (insn); if ((!((1 << which_alternative) & 0xfc)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], SImode)))) { return MEMORY_NONE; } else if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if ((memory_operand (operands[1], VOIDmode)) || (((((flag_pic) != (0)) && (symbolic_operand (operands[1], SImode))) && (!((1 << which_alternative) & 0xfc))) && (memory_operand (operands[2], VOIDmode)))) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 43: extract_constrain_insn_cached (insn); if ((!((1 << which_alternative) & 0xfc)) && (((flag_pic) != (0)) && (symbolic_operand (operands[1], SImode)))) { return MEMORY_NONE; } else if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if ((memory_operand (operands[1], VOIDmode)) || (((((flag_pic) != (0)) && (symbolic_operand (operands[1], SImode))) && (!((1 << which_alternative) & 0xfc))) && (memory_operand (operands[2], VOIDmode)))) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case 466: case 464: case 442: case 440: case 401: case 400: case 399: case 398: case 397: case 396: case 395: case 394: case 393: case 392: case 361: case 360: case 359: case 358: case 357: case 356: case 355: case 354: case 353: case 352: case 347: case 341: case 321: case 319: case 299: case 297: case 244: case 108: case 105: case 103: case 81: case 80: case 62: case 56: case 42: case 41: extract_insn_cached (insn); if (memory_operand (operands[1], VOIDmode)) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else { return MEMORY_NONE; } case 79: case 78: case 40: case 39: extract_insn_cached (insn); if (memory_operand (operands[0], VOIDmode)) { return MEMORY_BOTH; } else { return MEMORY_LOAD; } case 77: case 58: case 57: case 49: case 48: case 38: case 37: case 36: extract_insn_cached (insn); if (memory_operand (operands[1], VOIDmode)) { return MEMORY_BOTH; } else { return MEMORY_STORE; } case 966: case 965: case 964: case 963: case 962: case 961: case 903: case 902: case 901: case 900: case 899: case 898: case 859: case 858: case 857: case 827: case 826: case 825: case 824: case 823: case 822: case 775: case 774: case 773: case 772: case 771: case 770: case 496: case 495: case 285: case 284: case 283: case 282: case 281: case 280: case 279: case 278: case 35: case 34: case 33: case 32: case 31: case 30: case 25: case 23: case 21: case 20: case 19: case 17: case 16: case 15: case 14: case 13: case 12: case 11: case 10: case 9: case 8: case 7: case 6: case 5: case 4: case 3: case 2: case 1: case 0: extract_insn_cached (insn); if ((memory_operand (operands[0], VOIDmode)) || (memory_operand (operands[1], VOIDmode))) { return MEMORY_LOAD; } else { return MEMORY_NONE; } case -1: if (GET_CODE (PATTERN (insn)) != ASM_INPUT && asm_noperands (PATTERN (insn)) < 0) fatal_insn_not_found (insn); case 1022: case 1021: case 718: case 717: case 716: case 715: case 714: case 713: case 712: case 711: case 710: case 709: case 708: case 697: case 696: case 686: case 685: case 684: case 683: case 682: case 681: case 680: case 679: case 678: case 677: case 676: case 675: case 674: case 673: case 668: case 667: case 665: case 664: case 662: case 661: case 659: case 658: case 647: case 646: case 645: case 644: case 643: case 642: case 550: case 549: case 548: case 547: case 546: case 545: case 544: case 543: case 542: case 541: case 540: case 539: case 538: case 537: case 536: case 535: case 532: case 531: case 530: case 529: case 528: case 526: case 525: case 524: case 523: case 508: case 507: case 506: case 505: case 504: case 503: case 502: case 501: case 500: case 499: case 450: case 449: case 423: case 422: case 405: case 404: case 385: case 384: case 383: case 382: case 381: case 380: case 379: case 378: case 377: case 370: case 369: case 368: case 367: case 366: case 365: case 364: case 363: case 362: case 351: case 287: case 286: case 275: case 273: case 272: case 270: case 269: case 267: case 266: case 225: case 177: case 161: case 160: case 127: case 126: case 117: case 99: case 98: case 94: case 93: case 75: case 29: case 28: case 27: case 26: case 24: case 22: case 18: case 849: case 852: case 1018: case 1019: case 1020: return MEMORY_UNKNOWN; case 534: case 533: case 159: case 158: case 157: case 154: case 153: case 152: case 149: case 148: case 147: case 618: case 619: case 620: case 621: case 622: case 623: case 624: case 625: case 626: case 627: case 628: case 629: return MEMORY_BOTH; case 45: case 51: case 67: case 85: case 630: case 631: case 632: case 633: case 634: case 635: case 636: case 637: case 638: case 639: case 640: case 641: case 851: case 853: return MEMORY_STORE; case 46: case 52: case 68: case 86: case 551: case 552: case 553: case 554: case 850: return MEMORY_LOAD; case 657: case 617: case 616: case 615: case 614: case 613: case 612: case 611: case 610: case 609: case 608: case 607: case 606: case 605: case 604: case 603: case 602: case 601: case 600: case 599: case 598: case 597: case 596: case 595: case 594: case 593: case 592: case 591: case 590: case 589: case 588: case 587: case 586: case 583: case 195: case 194: case 193: case 192: case 191: case 190: case 189: case 188: case 187: case 186: case 648: case 650: case 768: case 769: case 812: case 813: case 863: case 880: case 881: case 882: case 883: case 952: return MEMORY_NONE; default: extract_insn_cached (insn); if ((memory_operand (operands[0], VOIDmode)) && (memory_operand (operands[1], VOIDmode))) { return MEMORY_BOTH; } else if (memory_operand (operands[0], VOIDmode)) { return MEMORY_STORE; } else if ((memory_operand (operands[1], VOIDmode)) || (memory_operand (operands[2], VOIDmode))) { return MEMORY_LOAD; } else { return MEMORY_NONE; } } } int get_attr_modrm (rtx insn ATTRIBUTE_UNUSED) { switch (recog_memoized (insn)) { case 695: case 694: case 693: case 692: case 691: case 690: case 689: case 688: case 687: extract_insn_cached (insn); if (constant_call_address_operand (operands[1], VOIDmode)) { return 0; } else { return 1; } case 671: extract_constrain_insn_cached (insn); if (((which_alternative == 1) && (const0_operand (operands[2], DImode))) && ((register_operand (operands[0], VOIDmode)) && (immediate_operand (operands[1], VOIDmode)))) { return 0; } else { return 1; } case 670: extract_constrain_insn_cached (insn); if (((which_alternative == 1) && (const0_operand (operands[2], SImode))) && ((register_operand (operands[0], VOIDmode)) && (immediate_operand (operands[1], VOIDmode)))) { return 0; } else { return 1; } case 580: case 579: case 578: case 577: case 576: case 575: case 574: case 573: case 572: case 571: case 570: case 568: case 567: case 566: case 565: case 563: case 562: if (get_attr_unit (insn) == UNIT_I387) { return 0; } else { return 1; } case 522: case 521: case 520: case 519: case 518: case 517: case 516: case 515: extract_insn_cached (insn); if (constant_call_address_operand (operands[0], VOIDmode)) { return 0; } else { return 1; } case 278: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return 0; } else if (which_alternative == 1) { return 1; } else if (which_alternative == 2) { return 0; } else { return 1; } case 223: case 222: case 221: case 219: case 218: case 217: case 216: extract_insn_cached (insn); if ((incdec_operand (operands[2], QImode)) && ((register_operand (operands[1], SImode)) || (register_operand (operands[1], HImode)))) { return 0; } else { return 1; } case 215: extract_constrain_insn_cached (insn); if (((which_alternative != 3) && (incdec_operand (operands[2], QImode))) && ((register_operand (operands[1], SImode)) || (register_operand (operands[1], HImode)))) { return 0; } else { return 1; } case 220: case 214: case 213: case 212: case 211: case 210: extract_insn_cached (insn); if ((incdec_operand (operands[2], HImode)) && ((register_operand (operands[1], SImode)) || (register_operand (operands[1], HImode)))) { return 0; } else { return 1; } case 209: extract_constrain_insn_cached (insn); if (((which_alternative != 2) && (incdec_operand (operands[2], HImode))) && ((register_operand (operands[1], SImode)) || (register_operand (operands[1], HImode)))) { return 0; } else { return 1; } case 208: case 207: case 206: case 205: case 204: case 203: extract_insn_cached (insn); if ((incdec_operand (operands[2], SImode)) && ((register_operand (operands[1], SImode)) || (register_operand (operands[1], HImode)))) { return 0; } else { return 1; } case 200: case 199: case 198: case 197: extract_insn_cached (insn); if ((incdec_operand (operands[2], DImode)) && ((register_operand (operands[1], SImode)) || (register_operand (operands[1], HImode)))) { return 0; } else { return 1; } case 202: case 201: case 196: extract_insn_cached (insn); if ((get_attr_type (insn) == TYPE_INCDEC) && ((register_operand (operands[1], SImode)) || (register_operand (operands[1], HImode)))) { return 0; } else { return 1; } case 137: extract_constrain_insn_cached (insn); if (which_alternative != 0) { return 0; } else { return 1; } case 136: extract_constrain_insn_cached (insn); if (!((1 << which_alternative) & 0x3)) { return 0; } else { return 1; } case 656: case 655: case 654: case 128: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { return 0; } else { return 1; } case 123: case 122: case 121: extract_constrain_insn_cached (insn); if ((! (((ix86_tune) == (CPU_K6)))) && (which_alternative == 0)) { return 0; } else { return 1; } case 114: case 113: extract_constrain_insn_cached (insn); if ((which_alternative == 1) && ((register_operand (operands[0], VOIDmode)) && (immediate_operand (operands[1], VOIDmode)))) { return 0; } else { return 1; } case 101: case 100: case 96: case 95: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x7)) { return 0; } else { return 1; } case 91: case 90: extract_constrain_insn_cached (insn); if ((((1 << which_alternative) & 0x7)) || ((((1 << which_alternative) & 0x18)) && ((register_operand (operands[0], VOIDmode)) && (immediate_operand (operands[1], VOIDmode))))) { return 0; } else { return 1; } case 89: case 88: extract_constrain_insn_cached (insn); if ((which_alternative == 1) && (! (memory_operand (operands[1], VOIDmode)))) { return 0; } else { return 1; } case 84: extract_constrain_insn_cached (insn); if (which_alternative == 0) { if (((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode)))) && ((register_operand (operands[0], VOIDmode)) && (immediate_operand (operands[1], VOIDmode)))) { return 0; } else { return 1; } } else if (((1 << which_alternative) & 0x6)) { return 0; } else { if (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && ((register_operand (operands[0], VOIDmode)) && (immediate_operand (operands[1], VOIDmode)))) { return 0; } else { return 1; } } case 83: extract_constrain_insn_cached (insn); if (which_alternative == 0) { if (((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode)))) && ((register_operand (operands[0], VOIDmode)) && (immediate_operand (operands[1], VOIDmode)))) { return 0; } else { return 1; } } else if (((1 << which_alternative) & 0x6)) { return 0; } else { if (((!((1 << which_alternative) & 0x7f0)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode))))) && ((register_operand (operands[0], VOIDmode)) && (immediate_operand (operands[1], VOIDmode)))) { return 0; } else { return 1; } } case 76: extract_constrain_insn_cached (insn); if ((which_alternative == 0) && (! (memory_operand (operands[1], VOIDmode)))) { return 0; } else { return 1; } case 71: extract_constrain_insn_cached (insn); if (((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0)))) && ((register_operand (operands[0], VOIDmode)) && (immediate_operand (operands[1], VOIDmode)))) { return 0; } else { return 1; } case 59: extract_constrain_insn_cached (insn); if ((((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) && ((register_operand (operands[0], VOIDmode)) && (immediate_operand (operands[1], VOIDmode)))) { return 0; } else { return 1; } case 74: case 73: case 72: case 61: case 55: extract_insn_cached (insn); if ((register_operand (operands[0], VOIDmode)) && (immediate_operand (operands[1], VOIDmode))) { return 0; } else { return 1; } case 70: case 66: case 65: case 50: extract_insn_cached (insn); if ((get_attr_type (insn) == TYPE_IMOV) && ((register_operand (operands[0], VOIDmode)) && (immediate_operand (operands[1], VOIDmode)))) { return 0; } else { return 1; } case 86: case 85: case 68: case 67: case 52: case 51: case 46: case 45: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return 0; } else { if ((register_operand (operands[0], VOIDmode)) && (immediate_operand (operands[1], VOIDmode))) { return 0; } else { return 1; } } case 44: extract_constrain_insn_cached (insn); if (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && ((register_operand (operands[0], VOIDmode)) && (immediate_operand (operands[1], VOIDmode)))) { return 0; } else { return 1; } case 43: extract_constrain_insn_cached (insn); if (((!((1 << which_alternative) & 0xfc)) && ((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode))))) && ((register_operand (operands[0], VOIDmode)) && (immediate_operand (operands[1], VOIDmode)))) { return 0; } else { return 1; } case 79: case 78: case 40: case 39: extract_insn_cached (insn); if (! (memory_operand (operands[0], VOIDmode))) { return 0; } else { return 1; } case 77: case 58: case 57: case 49: case 48: case 38: case 37: case 36: extract_insn_cached (insn); if (! (memory_operand (operands[1], VOIDmode))) { return 0; } else { return 1; } case 584: case 581: case 559: case 556: case 281: case 280: case 279: case 176: case 175: case 174: case 172: case 171: case 169: case 168: case 166: case 165: case 163: case 162: case 145: case 142: case 135: case 134: case 133: case 34: case 31: case 118: case 417: case 425: case 426: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return 0; } else { return 1; } case 657: case 647: case 646: case 645: case 644: case 643: case 642: case 641: case 640: case 639: case 638: case 637: case 636: case 635: case 634: case 633: case 632: case 631: case 630: case 629: case 628: case 627: case 626: case 625: case 624: case 623: case 622: case 621: case 620: case 619: case 618: case 617: case 616: case 615: case 614: case 613: case 612: case 611: case 610: case 609: case 608: case 607: case 606: case 605: case 604: case 603: case 602: case 601: case 600: case 599: case 598: case 597: case 596: case 595: case 594: case 593: case 592: case 591: case 590: case 589: case 588: case 587: case 586: case 583: case 561: case 558: case 555: case 534: case 533: case 391: case 390: case 389: case 388: case 387: case 386: case 376: case 375: case 374: case 373: case 372: case 371: case 161: case 160: case 159: case 158: case 157: case 154: case 153: case 152: case 149: case 148: case 147: case 146: case 144: case 143: case 141: case 138: case 132: case 131: case 130: case 102: case 97: case 92: case 33: case 30: case 28: case 25: case 23: case 21: case 20: case 19: case 47: case 53: case 54: case 60: case 87: case 497: case 498: case 509: case 524: case 525: case 526: case 528: case 551: case 552: case 553: case 554: case 853: return 0; case -1: if (GET_CODE (PATTERN (insn)) != ASM_INPUT && asm_noperands (PATTERN (insn)) < 0) fatal_insn_not_found (insn); default: return 1; } } enum attr_mode get_attr_mode (rtx insn ATTRIBUTE_UNUSED) { switch (recog_memoized (insn)) { case 1015: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return MODE_DF; } else { return MODE_V2DF; } case 654: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { return MODE_SF; } else { return MODE_SI; } case 281: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x7)) { return MODE_QI; } else { return MODE_SI; } case 278: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { return MODE_SI; } else { return MODE_DI; } case 340: case 318: case 298: case 296: case 216: case 215: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { return MODE_QI; } else { return MODE_SI; } case 294: case 209: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { return MODE_HI; } else { return MODE_SI; } case 135: case 134: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0xf)) { return MODE_SF; } else { return MODE_DF; } case 114: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return MODE_SI; } else if (which_alternative == 1) { return MODE_DI; } else { return MODE_SI; } case 113: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return MODE_SI; } else if (((1 << which_alternative) & 0x6)) { return MODE_DI; } else { return MODE_TI; } case 112: case 111: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x7)) { return MODE_SI; } else if (which_alternative == 3) { return MODE_DI; } else { return MODE_TI; } case 101: case 100: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x7)) { return MODE_XF; } else { return MODE_SI; } case 93: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return MODE_DF; } else if (((1 << which_alternative) & 0x6)) { return MODE_SI; } else { return MODE_DF; } case 84: case 83: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return MODE_SI; } else if (((1 << which_alternative) & 0xe)) { return MODE_DI; } else if (which_alternative == 4) { return MODE_SI; } else if (((1 << which_alternative) & 0xe0)) { return MODE_DI; } else if (which_alternative == 8) { return MODE_TI; } else { return MODE_DI; } case 82: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x1f)) { return MODE_DI; } else if (which_alternative == 5) { return MODE_TI; } else { return MODE_DI; } case 71: extract_constrain_insn_cached (insn); if ((! (q_regs_operand (operands[0], QImode))) || ((TARGET_MOVX) != (0))) { return MODE_SI; } else { return MODE_QI; } case 59: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x38)) { return MODE_SI; } else if (which_alternative == 6) { return MODE_QI; } else if (((! ((optimize_size) != (0))) && (((TARGET_MOVX) != (0)) && (which_alternative == 2))) || ((((optimize_size) != (0)) || ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2))) && ((((1 << which_alternative) & 0x7)) && (((TARGET_PARTIAL_REG_DEPENDENCY) != (0)) || (((TARGET_PARTIAL_REG_STALL) != (0)) && ((TARGET_QIMODE_MATH) == (0))))))) { return MODE_SI; } else { return MODE_QI; } case 50: extract_constrain_insn_cached (insn); if ((((! ((optimize_size) != (0))) && (((which_alternative != 0) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_HIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x6)) || (! (aligned_operand (operands[1], HImode)))))) && (((TARGET_MOVX) != (0)) && (((1 << which_alternative) & 0x5)))) || (((((1 << which_alternative) & 0x6)) && (aligned_operand (operands[1], HImode))) || ((which_alternative == 0) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_HIMODE_MATH) == (0)))))) { return MODE_SI; } else { return MODE_HI; } case 44: case 43: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { return MODE_SI; } else if (which_alternative == 2) { return MODE_DI; } else if (((1 << which_alternative) & 0x18)) { return MODE_SI; } else if (which_alternative == 5) { return MODE_TI; } else { return MODE_SI; } case 18: case 25: case 26: case 30: case 33: extract_insn_cached (insn); if (GET_MODE (operands[1]) == SFmode) { return MODE_SF; } else if (GET_MODE (operands[1]) == DFmode) { return MODE_DF; } else { return MODE_XF; } case 31: case 32: case 34: case 35: extract_insn_cached (insn); if (GET_MODE (operands[1]) == SFmode) { return MODE_SF; } else { return MODE_DF; } case 65: case 66: case 70: if (get_attr_type (insn) == TYPE_IMOVX) { return MODE_SI; } else { return MODE_QI; } case 88: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return MODE_SF; } else if (which_alternative == 1) { return MODE_SI; } else { return MODE_SF; } case 89: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return MODE_SF; } else if (which_alternative == 1) { return MODE_DI; } else { return MODE_SF; } case 90: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x618)) { return MODE_SI; } else if (which_alternative == 5) { if ((((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && ((TARGET_SSE2) != (0))) && ((optimize_size) == (0))) { return MODE_TI; } else { return MODE_V4SF; } } else if (which_alternative == 6) { if (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))) { return MODE_V4SF; } else { return MODE_SF; } } else if (which_alternative == 11) { return MODE_DI; } else { return MODE_SF; } case 91: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x618)) { return MODE_SI; } else if (which_alternative == 5) { if ((((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && ((TARGET_SSE2) != (0))) && ((optimize_size) == (0))) { return MODE_TI; } else { return MODE_V4SF; } } else if (which_alternative == 6) { if (((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) || ((TARGET_SSE_PARTIAL_REGS) != (0))) { return MODE_V4SF; } else { return MODE_SF; } } else if (which_alternative == 11) { return MODE_DI; } else { return MODE_SF; } case 94: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return MODE_DF; } else if (which_alternative == 1) { return MODE_SI; } else { return MODE_DF; } case 95: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x18)) { return MODE_SI; } else if (which_alternative == 5) { if ((optimize_size) != (0)) { return MODE_V4SF; } else if ((TARGET_SSE_LOAD0_BY_PXOR) != (0)) { return MODE_TI; } else { return MODE_V2DF; } } else if (which_alternative == 6) { if ((optimize_size) != (0)) { return MODE_V4SF; } else if ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) { return MODE_V2DF; } else { return MODE_DF; } } else if (which_alternative == 7) { if ((TARGET_SSE_PARTIAL_REGS) != (0)) { return MODE_V2DF; } else { return MODE_DF; } } else { return MODE_DF; } case 96: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x18)) { return MODE_SI; } else if (which_alternative == 5) { if ((optimize_size) != (0)) { return MODE_V4SF; } else if ((TARGET_SSE_LOAD0_BY_PXOR) != (0)) { return MODE_TI; } else { return MODE_V2DF; } } else if (which_alternative == 6) { if ((optimize_size) != (0)) { return MODE_V4SF; } else if ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)) { return MODE_V2DF; } else { return MODE_DF; } } else if (which_alternative == 7) { if ((TARGET_SSE_PARTIAL_REGS) != (0)) { return MODE_V2DF; } else { return MODE_DF; } } else { return MODE_DF; } case 98: case 99: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return MODE_XF; } else { return MODE_SI; } case 289: case 288: case 115: case 116: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return MODE_SI; } else { return MODE_DI; } case 128: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return MODE_SF; } else if (which_alternative == 1) { return MODE_XF; } else { return MODE_DF; } case 130: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return MODE_SF; } else { return MODE_XF; } case 131: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return MODE_DF; } else { return MODE_XF; } case 137: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return MODE_DF; } else { return MODE_SF; } case 411: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return MODE_HI; } else { return MODE_SI; } case 414: case 400: case 415: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return MODE_QI; } else { return MODE_SI; } case 699: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { if ((optimize_size) != (0)) { return MODE_V4SF; } else { return MODE_TI; } } else if (which_alternative == 2) { if (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))) { return MODE_V4SF; } else { return MODE_TI; } } else { return MODE_TI; } case 700: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { if ((optimize_size) != (0)) { return MODE_V4SF; } else { return MODE_TI; } } else if (which_alternative == 2) { if (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))) { return MODE_V4SF; } else { return MODE_TI; } } else { return MODE_TI; } case 705: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { if ((optimize_size) != (0)) { return MODE_V4SF; } else { return MODE_V2DF; } } else if (which_alternative == 2) { if (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))) { return MODE_V4SF; } else { return MODE_V2DF; } } else { return MODE_V2DF; } case 706: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { if ((optimize_size) != (0)) { return MODE_V4SF; } else { return MODE_TI; } } else if (which_alternative == 2) { if (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))) { return MODE_V4SF; } else { return MODE_TI; } } else { return MODE_TI; } case 707: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { if ((optimize_size) != (0)) { return MODE_V4SF; } else { return MODE_TI; } } else if (which_alternative == 2) { if (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))) { return MODE_V4SF; } else { return MODE_TI; } } else { return MODE_TI; } case 719: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { if ((optimize_size) != (0)) { return MODE_V4SF; } else { return MODE_TI; } } else if (which_alternative == 2) { if ((optimize_size) != (0)) { return MODE_V4SF; } else { return MODE_TI; } } else { return MODE_TI; } case 720: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0xc)) { if ((optimize_size) != (0)) { return MODE_V4SF; } else { return MODE_TI; } } else if (which_alternative == 4) { if (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))) { return MODE_V4SF; } else { return MODE_TI; } } else { return MODE_DI; } case 721: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0xc)) { if ((optimize_size) != (0)) { return MODE_V4SF; } else { return MODE_TI; } } else if (which_alternative == 4) { if (((TARGET_SSE_TYPELESS_STORES) != (0)) || ((optimize_size) != (0))) { return MODE_V4SF; } else { return MODE_TI; } } else { return MODE_DI; } case 768: extract_constrain_insn_cached (insn); if ((((TARGET_SSE_LOAD0_BY_PXOR) != (0)) && ((TARGET_SSE2) != (0))) && ((optimize_size) == (0))) { return MODE_TI; } else { return MODE_V4SF; } case 952: extract_constrain_insn_cached (insn); if ((optimize_size) != (0)) { return MODE_V4SF; } else { return MODE_TI; } case 854: case 855: case 856: case 857: case 858: case 859: case 860: case 861: case 862: case 864: case 865: case 866: case 867: case 868: case 869: case 870: return MODE_V2SF; case 756: case 757: case 758: case 759: case 884: case 886: case 888: case 890: case 892: case 894: case 896: case 898: case 899: case 904: case 905: case 908: case 910: case 911: case 914: case 929: case 989: case 990: case 1002: case 1003: case 1013: case 1017: case 1024: case 1026: case 1028: return MODE_V2DF; case 698: case 722: case 723: case 724: case 725: case 728: case 730: case 731: case 732: case 733: case 737: case 738: case 740: case 742: case 744: case 746: case 748: case 750: case 752: case 753: case 754: case 755: case 769: case 770: case 771: case 776: case 777: case 778: case 780: case 782: case 783: case 928: case 1023: case 1025: case 1027: case 1029: case 1030: return MODE_V4SF; case 760: case 761: case 762: case 763: case 764: case 765: case 766: case 767: case 871: case 872: case 873: case 874: case 875: case 876: case 877: case 878: case 879: case 906: case 907: case 909: case 912: case 913: case 915: case 916: case 917: case 918: case 919: case 930: case 931: case 932: case 933: case 934: case 935: case 936: case 937: case 938: case 939: case 940: case 941: case 942: case 943: case 944: case 945: case 946: case 947: case 948: case 949: case 950: case 951: case 953: case 954: case 955: case 956: case 957: case 958: case 959: case 960: case 961: case 962: case 963: case 964: case 965: case 966: case 967: case 968: case 969: case 970: case 971: case 972: case 973: case 974: case 975: case 976: case 977: case 978: case 979: case 980: case 981: case 982: case 983: case 984: case 985: case 986: case 987: case 988: case 991: case 992: case 993: case 994: case 995: case 996: case 997: case 998: case 999: case 1000: case 1001: case 1004: case 1005: case 1006: case 1007: case 1008: case 1009: case 1010: case 1011: case 1012: case 1031: return MODE_TI; case 23: case 24: case 102: case 174: case 175: case 176: case 374: case 375: case 376: case 390: case 391: case 561: case 575: case 588: case 589: case 590: case 591: case 592: case 596: case 600: case 604: case 607: case 610: case 611: case 612: case 613: case 614: case 615: case 616: case 657: return MODE_XF; case 21: case 22: case 97: case 129: case 140: case 144: case 145: case 146: case 151: case 155: case 156: case 168: case 169: case 170: case 171: case 172: case 173: case 372: case 373: case 387: case 388: case 389: case 496: case 558: case 559: case 560: case 567: case 568: case 569: case 584: case 585: case 586: case 587: case 593: case 595: case 597: case 599: case 601: case 603: case 605: case 608: case 655: case 656: case 663: case 669: case 885: case 887: case 889: case 891: case 893: case 895: case 900: case 901: case 902: case 903: case 924: case 925: case 927: case 1014: case 1016: case 1032: case 1033: return MODE_DF; case 19: case 20: case 92: case 132: case 133: case 136: case 138: case 139: case 141: case 142: case 143: case 150: case 162: case 163: case 164: case 165: case 166: case 167: case 371: case 386: case 495: case 555: case 556: case 557: case 562: case 563: case 564: case 572: case 573: case 574: case 578: case 579: case 580: case 581: case 582: case 583: case 594: case 598: case 602: case 606: case 609: case 660: case 666: case 734: case 735: case 736: case 739: case 741: case 743: case 745: case 747: case 749: case 751: case 772: case 773: case 774: case 775: case 779: case 781: case 784: case 785: case 786: case 789: case 790: case 897: case 926: return MODE_SF; case 0: case 1: case 2: case 76: case 77: case 78: case 79: case 81: case 85: case 86: case 87: case 118: case 119: case 120: case 147: case 148: case 149: case 178: case 179: case 189: case 196: case 197: case 198: case 199: case 200: case 226: case 227: case 228: case 229: case 239: case 247: case 254: case 256: case 258: case 261: case 268: case 273: case 274: case 305: case 306: case 307: case 327: case 328: case 329: case 352: case 353: case 392: case 393: case 402: case 403: case 417: case 419: case 421: case 446: case 448: case 470: case 482: case 618: case 625: case 630: case 637: case 648: case 649: case 671: case 672: case 701: case 702: case 703: case 704: case 726: case 727: case 729: case 788: case 791: case 792: case 793: case 794: case 795: case 796: case 797: case 798: case 799: case 800: case 801: case 802: case 803: case 804: case 805: case 806: case 807: case 808: case 809: case 810: case 811: case 812: case 813: case 814: case 815: case 816: case 817: case 818: case 819: case 820: case 821: case 822: case 823: case 824: case 825: case 826: case 827: case 828: case 829: case 830: case 831: case 832: case 833: case 834: case 835: case 836: case 837: case 838: case 839: case 840: case 841: case 842: case 843: case 844: case 845: case 846: case 847: case 848: case 853: case 921: case 923: return MODE_DI; case 3: case 4: case 5: case 28: case 29: case 36: case 37: case 38: case 39: case 40: case 41: case 42: case 45: case 46: case 47: case 54: case 63: case 64: case 69: case 80: case 103: case 104: case 108: case 109: case 110: case 121: case 122: case 124: case 125: case 152: case 153: case 154: case 182: case 183: case 184: case 186: case 187: case 188: case 190: case 191: case 192: case 193: case 194: case 195: case 201: case 202: case 203: case 204: case 205: case 206: case 207: case 208: case 213: case 232: case 233: case 234: case 235: case 236: case 237: case 238: case 248: case 249: case 255: case 257: case 259: case 260: case 262: case 263: case 271: case 272: case 275: case 276: case 279: case 290: case 291: case 292: case 293: case 308: case 309: case 310: case 311: case 312: case 313: case 314: case 330: case 331: case 332: case 333: case 334: case 335: case 336: case 354: case 355: case 356: case 357: case 394: case 395: case 396: case 397: case 406: case 407: case 408: case 409: case 410: case 424: case 425: case 426: case 429: case 430: case 433: case 434: case 453: case 454: case 457: case 458: case 473: case 474: case 485: case 486: case 565: case 566: case 570: case 571: case 576: case 577: case 619: case 620: case 626: case 627: case 628: case 629: case 631: case 632: case 638: case 639: case 650: case 651: case 653: case 670: case 787: case 920: case 922: return MODE_SI; case 6: case 7: case 8: case 48: case 51: case 52: case 53: case 55: case 56: case 57: case 105: case 106: case 107: case 123: case 157: case 158: case 159: case 160: case 161: case 181: case 210: case 211: case 212: case 214: case 231: case 240: case 241: case 242: case 250: case 277: case 280: case 295: case 315: case 316: case 317: case 337: case 338: case 339: case 358: case 359: case 398: case 399: case 412: case 413: case 436: case 438: case 460: case 462: case 476: case 488: case 621: case 622: case 633: case 634: case 652: return MODE_HI; case 9: case 10: case 11: case 12: case 13: case 14: case 15: case 16: case 17: case 49: case 58: case 60: case 61: case 62: case 67: case 68: case 72: case 73: case 74: case 180: case 185: case 217: case 218: case 219: case 220: case 221: case 222: case 223: case 224: case 230: case 243: case 244: case 245: case 246: case 251: case 252: case 253: case 264: case 265: case 282: case 283: case 284: case 285: case 297: case 299: case 300: case 301: case 302: case 303: case 304: case 319: case 320: case 321: case 322: case 323: case 324: case 325: case 326: case 341: case 342: case 343: case 344: case 345: case 346: case 347: case 348: case 349: case 350: case 360: case 361: case 401: case 416: case 441: case 442: case 444: case 465: case 466: case 468: case 479: case 480: case 491: case 492: case 493: case 494: case 623: case 624: case 635: case 636: case 640: case 641: case 642: case 643: case 644: case 645: case 646: case 647: return MODE_QI; case -1: if (GET_CODE (PATTERN (insn)) != ASM_INPUT && asm_noperands (PATTERN (insn)) < 0) fatal_insn_not_found (insn); default: return MODE_UNKNOWN; } } enum attr_pent_pair get_attr_pent_pair (rtx insn ATTRIBUTE_UNUSED) { switch (recog_memoized (insn)) { case 695: case 694: case 693: case 692: case 691: case 690: case 689: case 688: case 687: extract_insn_cached (insn); if (constant_call_address_operand (operands[1], VOIDmode)) { return PENT_PAIR_PV; } else { return PENT_PAIR_NP; } case 671: case 670: extract_constrain_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else { return PENT_PAIR_UV; } case 522: case 521: case 520: case 519: case 518: case 517: case 516: case 515: extract_insn_cached (insn); if (constant_call_address_operand (operands[0], VOIDmode)) { return PENT_PAIR_PV; } else { return PENT_PAIR_NP; } case 492: case 490: case 479: case 477: extract_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if (const1_operand (operands[1], VOIDmode)) { return PENT_PAIR_PU; } else { return PENT_PAIR_NP; } case 491: case 489: case 488: case 487: case 486: case 485: case 484: case 483: case 482: case 481: case 480: case 478: case 476: case 475: case 474: case 473: case 472: case 471: case 470: case 469: extract_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if (const1_operand (operands[2], VOIDmode)) { return PENT_PAIR_PU; } else { return PENT_PAIR_NP; } case 466: case 464: case 442: case 440: extract_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if (const_int_operand (operands[1], VOIDmode)) { return PENT_PAIR_PU; } else { return PENT_PAIR_NP; } case 468: case 467: case 465: case 463: case 462: case 461: case 460: case 459: case 458: case 457: case 456: case 455: case 454: case 453: case 452: case 451: case 448: case 447: case 446: case 445: case 444: case 443: case 441: case 439: case 438: case 437: case 436: case 435: case 434: case 433: case 432: case 431: case 430: case 429: case 428: case 427: case 421: case 420: case 419: case 418: extract_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if (const_int_operand (operands[2], VOIDmode)) { return PENT_PAIR_PU; } else { return PENT_PAIR_NP; } case 426: case 425: case 417: extract_constrain_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if ((which_alternative == 1) && (const_int_operand (operands[2], VOIDmode))) { return PENT_PAIR_PU; } else { return PENT_PAIR_NP; } case 414: extract_constrain_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if ((get_attr_type (insn) == TYPE_ALU) || (which_alternative == 2)) { return PENT_PAIR_UV; } else if ((get_attr_type (insn) == TYPE_ISHIFT) && (const_int_operand (operands[2], VOIDmode))) { return PENT_PAIR_PU; } else { return PENT_PAIR_NP; } case 413: case 412: extract_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if ((get_attr_type (insn) == TYPE_ALU) || ((get_attr_type (insn) == TYPE_ISHIFT) && (const_int_operand (operands[2], VOIDmode)))) { return PENT_PAIR_PU; } else { return PENT_PAIR_NP; } case 411: extract_constrain_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if ((which_alternative != 0) || (get_attr_type (insn) == TYPE_ALU)) { if (which_alternative == 0) { return PENT_PAIR_PU; } else { return PENT_PAIR_UV; } } else if ((get_attr_type (insn) == TYPE_ISHIFT) && (const_int_operand (operands[2], VOIDmode))) { return PENT_PAIR_PU; } else { return PENT_PAIR_NP; } case 410: extract_constrain_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))) { return PENT_PAIR_UV; } else if (const_int_operand (operands[2], VOIDmode)) { return PENT_PAIR_PU; } else { return PENT_PAIR_NP; } case 408: extract_constrain_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if ((which_alternative != 0) || (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode)))) { return PENT_PAIR_UV; } else if (const_int_operand (operands[2], VOIDmode)) { return PENT_PAIR_PU; } else { return PENT_PAIR_NP; } case 416: case 415: case 409: case 403: extract_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if (get_attr_type (insn) == TYPE_ALU) { return PENT_PAIR_UV; } else if ((get_attr_type (insn) == TYPE_ISHIFT) && (const_int_operand (operands[2], VOIDmode))) { return PENT_PAIR_PU; } else { return PENT_PAIR_NP; } case 407: case 402: extract_constrain_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if ((which_alternative != 0) || (get_attr_type (insn) == TYPE_ALU)) { return PENT_PAIR_UV; } else if ((get_attr_type (insn) == TYPE_ISHIFT) && (const_int_operand (operands[2], VOIDmode))) { return PENT_PAIR_PU; } else { return PENT_PAIR_NP; } case 294: extract_constrain_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if (((1 << which_alternative) & 0x3)) { return PENT_PAIR_PU; } else { return PENT_PAIR_NP; } case 290: extract_constrain_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if (((1 << which_alternative) & 0x3)) { return PENT_PAIR_UV; } else { return PENT_PAIR_NP; } case 288: extract_constrain_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if (((1 << which_alternative) & 0x7)) { return PENT_PAIR_UV; } else { return PENT_PAIR_NP; } case 215: extract_constrain_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if (((which_alternative != 3) && (! (incdec_operand (operands[2], QImode)))) || ((which_alternative == 3) || ((which_alternative != 3) && (incdec_operand (operands[2], QImode))))) { return PENT_PAIR_UV; } else { return PENT_PAIR_NP; } case 209: extract_constrain_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if (((which_alternative != 2) && (! (incdec_operand (operands[2], HImode)))) || ((which_alternative == 2) || ((which_alternative != 2) && (incdec_operand (operands[2], HImode))))) { if (((1 << which_alternative) & 0x3)) { return PENT_PAIR_PU; } else { return PENT_PAIR_UV; } } else { return PENT_PAIR_NP; } case 202: extract_constrain_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if ((get_attr_type (insn) == TYPE_ALU) || (((which_alternative != 0) || (pic_symbolic_operand (operands[2], SImode))) || (get_attr_type (insn) == TYPE_INCDEC))) { return PENT_PAIR_UV; } else { return PENT_PAIR_NP; } case 201: extract_constrain_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if ((get_attr_type (insn) == TYPE_ALU) || (((which_alternative == 2) || (pic_symbolic_operand (operands[2], SImode))) || (get_attr_type (insn) == TYPE_INCDEC))) { return PENT_PAIR_UV; } else { return PENT_PAIR_NP; } case 196: extract_constrain_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if ((get_attr_type (insn) == TYPE_ALU) || (((which_alternative == 2) || (pic_symbolic_operand (operands[2], DImode))) || (get_attr_type (insn) == TYPE_INCDEC))) { return PENT_PAIR_UV; } else { return PENT_PAIR_NP; } case 114: case 113: extract_constrain_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if (which_alternative == 1) { return PENT_PAIR_UV; } else { return PENT_PAIR_NP; } case 109: extract_constrain_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if (which_alternative != 0) { return PENT_PAIR_UV; } else { return PENT_PAIR_NP; } case 106: extract_constrain_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if (which_alternative != 0) { return PENT_PAIR_PU; } else { return PENT_PAIR_NP; } case 91: extract_constrain_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if (((1 << which_alternative) & 0x18)) { return PENT_PAIR_UV; } else { return PENT_PAIR_NP; } case 90: extract_constrain_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if (((1 << which_alternative) & 0x18)) { return PENT_PAIR_UV; } else { return PENT_PAIR_NP; } case 89: case 88: extract_constrain_insn_cached (insn); if ((which_alternative == 1) && (! (memory_operand (operands[1], VOIDmode)))) { return PENT_PAIR_UV; } else { return PENT_PAIR_NP; } case 84: extract_constrain_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if ((!((1 << which_alternative) & 0x7f0)) && (((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode)))) || (((flag_pic) != (0)) && (symbolic_operand (operands[1], DImode))))) { return PENT_PAIR_UV; } else { return PENT_PAIR_NP; } case 83: extract_constrain_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if ((!((1 << which_alternative) & 0x7f0)) && (((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], DImode)))) || (((flag_pic) != (0)) && (symbolic_operand (operands[1], DImode))))) { return PENT_PAIR_UV; } else { return PENT_PAIR_NP; } case 76: extract_constrain_insn_cached (insn); if ((which_alternative == 0) && (! (memory_operand (operands[1], VOIDmode)))) { return PENT_PAIR_UV; } else { return PENT_PAIR_NP; } case 71: extract_constrain_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if ((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0)))) { return PENT_PAIR_UV; } else { return PENT_PAIR_NP; } case 70: case 66: case 65: if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if (get_attr_type (insn) == TYPE_IMOV) { return PENT_PAIR_UV; } else { return PENT_PAIR_NP; } case 59: extract_constrain_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if (((optimize_size) != (0)) || (((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0)))) || (((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x28)) && ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2)))))) { if (((! ((optimize_size) != (0))) && ((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0)))))) && ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2)))) { return PENT_PAIR_PU; } else { return PENT_PAIR_UV; } } else { return PENT_PAIR_NP; } case 50: extract_constrain_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if (get_attr_type (insn) == TYPE_IMOV) { if ((((! ((optimize_size) != (0))) && (((which_alternative != 0) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_HIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x6)) || (! (aligned_operand (operands[1], HImode)))))) && (((TARGET_MOVX) != (0)) && (((1 << which_alternative) & 0x5)))) || (get_attr_mode (insn) == MODE_HI)) { return PENT_PAIR_PU; } else { return PENT_PAIR_UV; } } else { return PENT_PAIR_NP; } case 57: case 48: extract_insn_cached (insn); if (! (memory_operand (operands[1], VOIDmode))) { return PENT_PAIR_PU; } else { return PENT_PAIR_NP; } case 44: extract_constrain_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if ((!((1 << which_alternative) & 0xfc)) && (((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode)))) || (((flag_pic) != (0)) && (symbolic_operand (operands[1], SImode))))) { return PENT_PAIR_UV; } else { return PENT_PAIR_NP; } case 43: extract_constrain_insn_cached (insn); if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else if ((!((1 << which_alternative) & 0xfc)) && (((! ((flag_pic) != (0))) || (! (symbolic_operand (operands[1], SImode)))) || (((flag_pic) != (0)) && (symbolic_operand (operands[1], SImode))))) { return PENT_PAIR_UV; } else { return PENT_PAIR_NP; } case 79: case 78: case 40: case 39: extract_insn_cached (insn); if (! (memory_operand (operands[0], VOIDmode))) { return PENT_PAIR_UV; } else { return PENT_PAIR_NP; } case 77: case 58: case 49: case 38: case 37: case 36: extract_insn_cached (insn); if (! (memory_operand (operands[1], VOIDmode))) { return PENT_PAIR_UV; } else { return PENT_PAIR_NP; } case 399: case 339: case 338: case 337: case 317: case 316: case 315: case 295: case 242: case 241: case 240: case 214: case 212: case 211: case 210: case 105: case 56: case 55: case 52: case 51: case 8: case 7: case 6: if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else { return PENT_PAIR_PU; } case 672: case 401: case 397: case 396: case 393: case 350: case 349: case 348: case 347: case 346: case 345: case 344: case 343: case 342: case 341: case 340: case 336: case 335: case 334: case 333: case 332: case 331: case 330: case 329: case 328: case 327: case 326: case 325: case 324: case 323: case 322: case 321: case 320: case 319: case 318: case 314: case 313: case 312: case 311: case 310: case 309: case 308: case 307: case 306: case 305: case 304: case 303: case 302: case 301: case 300: case 299: case 298: case 297: case 296: case 293: case 292: case 291: case 289: case 285: case 284: case 283: case 246: case 245: case 244: case 243: case 239: case 238: case 237: case 236: case 235: case 234: case 229: case 228: case 227: case 224: case 223: case 222: case 221: case 220: case 219: case 218: case 217: case 216: case 213: case 208: case 207: case 206: case 205: case 204: case 203: case 200: case 199: case 198: case 197: case 185: case 184: case 179: case 108: case 103: case 86: case 85: case 81: case 80: case 74: case 73: case 72: case 68: case 67: case 62: case 61: case 46: case 45: case 42: case 41: case 17: case 16: case 15: case 14: case 13: case 12: case 11: case 10: case 9: case 5: case 4: case 3: case 2: case 1: case 0: if (get_attr_imm_disp (insn) == IMM_DISP_TRUE) { return PENT_PAIR_NP; } else { return PENT_PAIR_UV; } case 278: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return PENT_PAIR_UV; } else if (which_alternative == 1) { return PENT_PAIR_NP; } else if (which_alternative == 2) { return PENT_PAIR_UV; } else if (which_alternative == 3) { return PENT_PAIR_NP; } else { return PENT_PAIR_UV; } case 279: case 280: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return PENT_PAIR_UV; } else if (which_alternative == 1) { return PENT_PAIR_NP; } else { return PENT_PAIR_UV; } case 281: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return PENT_PAIR_UV; } else if (which_alternative == 1) { return PENT_PAIR_NP; } else if (which_alternative == 2) { return PENT_PAIR_UV; } else { return PENT_PAIR_NP; } case 527: case 514: case 513: case 512: case 511: case 510: case 509: case 498: case 497: return PENT_PAIR_PV; case 178: case 180: case 181: case 182: case 183: case 226: case 230: case 231: case 232: case 233: case 648: case 650: return PENT_PAIR_PU; case 554: case 553: case 552: case 551: case 195: case 194: case 193: case 192: case 191: case 190: case 189: case 188: case 187: case 186: return PENT_PAIR_UV; case -1: if (GET_CODE (PATTERN (insn)) != ASM_INPUT && asm_noperands (PATTERN (insn)) < 0) fatal_insn_not_found (insn); default: return PENT_PAIR_NP; } } enum attr_pent_prefix get_attr_pent_prefix (rtx insn ATTRIBUTE_UNUSED) { switch (recog_memoized (insn)) { case 569: case 568: case 564: case 563: if (get_attr_unit (insn) == UNIT_SSE) { return PENT_PREFIX_TRUE; } else { return PENT_PREFIX_FALSE; } case 559: case 556: extract_constrain_insn_cached (insn); if (which_alternative == 1) { return PENT_PREFIX_TRUE; } else { return PENT_PREFIX_FALSE; } case 209: case 136: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { return PENT_PREFIX_TRUE; } else { return PENT_PREFIX_FALSE; } case 135: case 134: extract_constrain_insn_cached (insn); if (!((1 << which_alternative) & 0xf)) { return PENT_PREFIX_TRUE; } else { return PENT_PREFIX_FALSE; } case 122: case 121: extract_constrain_insn_cached (insn); if ((((ix86_tune) == (CPU_K6))) || (which_alternative != 0)) { return PENT_PREFIX_TRUE; } else { return PENT_PREFIX_FALSE; } case 114: case 113: extract_constrain_insn_cached (insn); if (which_alternative != 1) { return PENT_PREFIX_TRUE; } else { return PENT_PREFIX_FALSE; } case 288: case 112: case 111: extract_constrain_insn_cached (insn); if (!((1 << which_alternative) & 0x7)) { return PENT_PREFIX_TRUE; } else { return PENT_PREFIX_FALSE; } case 411: case 137: case 109: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return PENT_PREFIX_TRUE; } else { return PENT_PREFIX_FALSE; } case 96: extract_constrain_insn_cached (insn); if ((!((1 << which_alternative) & 0x1f)) || (((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0)))))) { return PENT_PREFIX_TRUE; } else { return PENT_PREFIX_FALSE; } case 95: extract_constrain_insn_cached (insn); if ((!((1 << which_alternative) & 0x1f)) || (((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0)))))) { return PENT_PREFIX_TRUE; } else { return PENT_PREFIX_FALSE; } case 91: extract_constrain_insn_cached (insn); if ((!((1 << which_alternative) & 0x1f)) || ((((1 << which_alternative) & 0x1e0)) && (((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78))))) { return PENT_PREFIX_TRUE; } else { return PENT_PREFIX_FALSE; } case 90: extract_constrain_insn_cached (insn); if ((!((1 << which_alternative) & 0x1f)) || ((((1 << which_alternative) & 0x1e0)) && (((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78))))) { return PENT_PREFIX_TRUE; } else { return PENT_PREFIX_FALSE; } case 84: case 83: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x7e0)) { return PENT_PREFIX_TRUE; } else { return PENT_PREFIX_FALSE; } case 721: case 720: case 656: case 654: case 290: case 249: case 248: case 247: case 172: case 169: case 166: case 163: case 128: case 82: extract_constrain_insn_cached (insn); if (!((1 << which_alternative) & 0x3)) { return PENT_PREFIX_TRUE; } else { return PENT_PREFIX_FALSE; } case 71: extract_constrain_insn_cached (insn); if ((! (q_regs_operand (operands[0], QImode))) || ((TARGET_MOVX) != (0))) { return PENT_PREFIX_TRUE; } else { return PENT_PREFIX_FALSE; } case 70: case 66: case 65: if (get_attr_type (insn) == TYPE_IMOVX) { return PENT_PREFIX_TRUE; } else { return PENT_PREFIX_FALSE; } case 59: extract_constrain_insn_cached (insn); if (((! ((optimize_size) != (0))) && ((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0)))))) && ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2)))) { return PENT_PREFIX_TRUE; } else { return PENT_PREFIX_FALSE; } case 50: extract_constrain_insn_cached (insn); if ((((! ((optimize_size) != (0))) && (((which_alternative != 0) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_HIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x6)) || (! (aligned_operand (operands[1], HImode)))))) && (((TARGET_MOVX) != (0)) && (((1 << which_alternative) & 0x5)))) || (get_attr_mode (insn) == MODE_HI)) { return PENT_PREFIX_TRUE; } else { return PENT_PREFIX_FALSE; } case 44: case 43: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0xfc)) { return PENT_PREFIX_TRUE; } else { return PENT_PREFIX_FALSE; } case 584: case 581: case 34: case 31: extract_constrain_insn_cached (insn); if (which_alternative != 0) { return PENT_PREFIX_TRUE; } else { return PENT_PREFIX_FALSE; } case 1033: case 1032: case 1031: case 1030: case 1029: case 1028: case 1027: case 1026: case 1025: case 1024: case 1023: case 1020: case 1019: case 1018: case 1017: case 1016: case 1015: case 1014: case 1013: case 1012: case 1011: case 1010: case 1009: case 1008: case 1007: case 1006: case 1005: case 1004: case 1003: case 1002: case 1001: case 1000: case 999: case 998: case 997: case 996: case 995: case 994: case 993: case 992: case 991: case 990: case 989: case 988: case 987: case 986: case 985: case 984: case 983: case 982: case 981: case 980: case 979: case 978: case 977: case 976: case 975: case 974: case 973: case 972: case 971: case 970: case 969: case 968: case 967: case 966: case 965: case 964: case 963: case 962: case 961: case 960: case 959: case 958: case 957: case 956: case 955: case 954: case 953: case 952: case 951: case 950: case 949: case 948: case 947: case 946: case 945: case 944: case 943: case 942: case 941: case 940: case 939: case 938: case 937: case 936: case 935: case 934: case 933: case 932: case 931: case 930: case 929: case 928: case 927: case 926: case 925: case 924: case 923: case 922: case 921: case 920: case 919: case 918: case 917: case 916: case 915: case 914: case 913: case 912: case 911: case 910: case 909: case 908: case 907: case 906: case 905: case 904: case 903: case 902: case 901: case 900: case 899: case 898: case 897: case 896: case 895: case 894: case 893: case 892: case 891: case 890: case 889: case 888: case 887: case 886: case 885: case 884: case 883: case 882: case 881: case 880: case 879: case 878: case 877: case 876: case 875: case 874: case 873: case 872: case 871: case 870: case 869: case 868: case 867: case 866: case 865: case 864: case 863: case 862: case 861: case 860: case 859: case 858: case 857: case 856: case 855: case 854: case 852: case 851: case 850: case 849: case 848: case 847: case 846: case 845: case 844: case 843: case 842: case 841: case 840: case 839: case 838: case 837: case 836: case 835: case 834: case 833: case 832: case 831: case 830: case 829: case 828: case 827: case 826: case 825: case 824: case 823: case 822: case 821: case 820: case 819: case 818: case 817: case 816: case 815: case 814: case 813: case 812: case 811: case 810: case 809: case 808: case 807: case 806: case 805: case 804: case 803: case 802: case 801: case 800: case 799: case 798: case 797: case 796: case 795: case 794: case 793: case 792: case 791: case 790: case 789: case 788: case 787: case 786: case 785: case 784: case 783: case 782: case 781: case 780: case 779: case 778: case 777: case 776: case 775: case 774: case 773: case 772: case 771: case 770: case 769: case 768: case 767: case 766: case 765: case 764: case 763: case 762: case 761: case 760: case 759: case 758: case 757: case 756: case 755: case 754: case 753: case 752: case 751: case 750: case 749: case 748: case 747: case 746: case 745: case 744: case 743: case 742: case 741: case 740: case 739: case 738: case 737: case 736: case 735: case 734: case 733: case 732: case 731: case 730: case 729: case 728: case 727: case 726: case 725: case 724: case 723: case 722: case 719: case 707: case 706: case 705: case 704: case 703: case 702: case 701: case 700: case 699: case 698: case 669: case 666: case 663: case 660: case 653: case 652: case 651: case 649: case 647: case 646: case 645: case 644: case 643: case 642: case 641: case 640: case 639: case 638: case 637: case 634: case 633: case 629: case 628: case 627: case 626: case 625: case 622: case 621: case 585: case 582: case 560: case 557: case 543: case 542: case 541: case 540: case 539: case 537: case 525: case 496: case 495: case 494: case 493: case 488: case 476: case 462: case 460: case 438: case 436: case 424: case 413: case 412: case 406: case 399: case 398: case 359: case 358: case 339: case 338: case 337: case 317: case 316: case 315: case 295: case 294: case 280: case 277: case 250: case 242: case 241: case 240: case 231: case 214: case 212: case 211: case 210: case 181: case 173: case 170: case 167: case 164: case 161: case 160: case 159: case 158: case 157: case 156: case 155: case 151: case 150: case 140: case 139: case 129: case 125: case 124: case 123: case 120: case 119: case 116: case 115: case 110: case 107: case 106: case 105: case 104: case 69: case 64: case 63: case 57: case 56: case 55: case 53: case 52: case 51: case 48: case 35: case 32: case 8: case 7: case 6: return PENT_PREFIX_TRUE; case -1: if (GET_CODE (PATTERN (insn)) != ASM_INPUT && asm_noperands (PATTERN (insn)) < 0) fatal_insn_not_found (insn); default: return PENT_PREFIX_FALSE; } } int get_attr_prefix_rex (rtx insn ATTRIBUTE_UNUSED) { switch (recog_memoized (insn)) { case 721: case 720: extract_constrain_insn_cached (insn); if ((!((1 << which_alternative) & 0x1c)) || ((x86_extended_reg_mentioned_p (insn)) != (0))) { return 1; } else { return 0; } case 415: case 414: case 400: extract_constrain_insn_cached (insn); if (((which_alternative == 0) && ((x86_extended_QIreg_mentioned_p (insn)) != (0))) || ((x86_extended_reg_mentioned_p (insn)) != (0))) { return 1; } else { return 0; } case 281: extract_constrain_insn_cached (insn); if (((((1 << which_alternative) & 0x7)) && ((x86_extended_QIreg_mentioned_p (insn)) != (0))) || ((x86_extended_reg_mentioned_p (insn)) != (0))) { return 1; } else { return 0; } case 278: extract_constrain_insn_cached (insn); if ((!((1 << which_alternative) & 0x3)) || ((x86_extended_reg_mentioned_p (insn)) != (0))) { return 1; } else { return 0; } case 340: case 318: case 298: case 296: case 216: case 215: extract_constrain_insn_cached (insn); if (((((1 << which_alternative) & 0x3)) && ((x86_extended_QIreg_mentioned_p (insn)) != (0))) || ((x86_extended_reg_mentioned_p (insn)) != (0))) { return 1; } else { return 0; } case 114: extract_constrain_insn_cached (insn); if ((which_alternative == 1) || ((x86_extended_reg_mentioned_p (insn)) != (0))) { return 1; } else { return 0; } case 113: extract_constrain_insn_cached (insn); if ((((1 << which_alternative) & 0x6)) || ((x86_extended_reg_mentioned_p (insn)) != (0))) { return 1; } else { return 0; } case 112: case 111: extract_constrain_insn_cached (insn); if ((which_alternative == 3) || ((x86_extended_reg_mentioned_p (insn)) != (0))) { return 1; } else { return 0; } case 91: case 90: extract_constrain_insn_cached (insn); if ((which_alternative == 11) || ((x86_extended_reg_mentioned_p (insn)) != (0))) { return 1; } else { return 0; } case 84: case 83: extract_constrain_insn_cached (insn); if ((!((1 << which_alternative) & 0x111)) || ((x86_extended_reg_mentioned_p (insn)) != (0))) { return 1; } else { return 0; } case 82: extract_constrain_insn_cached (insn); if ((which_alternative != 5) || ((x86_extended_reg_mentioned_p (insn)) != (0))) { return 1; } else { return 0; } case 289: case 288: case 116: case 115: case 76: extract_constrain_insn_cached (insn); if ((which_alternative != 0) || ((x86_extended_reg_mentioned_p (insn)) != (0))) { return 1; } else { return 0; } case 71: extract_constrain_insn_cached (insn); if ((((q_regs_operand (operands[0], QImode)) && (! ((TARGET_MOVX) != (0)))) && ((x86_extended_QIreg_mentioned_p (insn)) != (0))) || ((x86_extended_reg_mentioned_p (insn)) != (0))) { return 1; } else { return 0; } case 70: case 66: case 65: extract_constrain_insn_cached (insn); if (((! (get_attr_type (insn) == TYPE_IMOVX)) && ((x86_extended_QIreg_mentioned_p (insn)) != (0))) || ((x86_extended_reg_mentioned_p (insn)) != (0))) { return 1; } else { return 0; } case 59: extract_constrain_insn_cached (insn); if ((((which_alternative == 6) || ((!((1 << which_alternative) & 0x78)) && ((((optimize_size) != (0)) || ((! ((TARGET_MOVX) != (0))) || (which_alternative != 2))) && ((((! ((optimize_size) != (0))) && (((TARGET_MOVX) != (0)) && (which_alternative == 2))) || ((!((1 << which_alternative) & 0x7)) || (! ((TARGET_PARTIAL_REG_DEPENDENCY) != (0))))) && (((! ((optimize_size) != (0))) && (((TARGET_MOVX) != (0)) && (which_alternative == 2))) || ((!((1 << which_alternative) & 0x7)) || ((! ((TARGET_PARTIAL_REG_STALL) != (0))) || (! ((TARGET_QIMODE_MATH) == (0)))))))))) && ((x86_extended_QIreg_mentioned_p (insn)) != (0))) || ((x86_extended_reg_mentioned_p (insn)) != (0))) { return 1; } else { return 0; } case 44: case 43: extract_constrain_insn_cached (insn); if ((which_alternative == 2) || ((x86_extended_reg_mentioned_p (insn)) != (0))) { return 1; } else { return 0; } case 647: case 646: case 645: case 644: case 643: case 642: case 641: case 640: case 636: case 635: case 624: case 623: case 494: case 493: case 492: case 491: case 480: case 479: case 468: case 466: case 465: case 444: case 442: case 441: case 416: case 401: case 361: case 360: case 350: case 349: case 348: case 347: case 346: case 345: case 344: case 343: case 342: case 341: case 326: case 325: case 324: case 323: case 322: case 321: case 320: case 319: case 304: case 303: case 302: case 301: case 300: case 299: case 297: case 285: case 284: case 283: case 282: case 265: case 264: case 253: case 252: case 251: case 246: case 245: case 244: case 243: case 230: case 224: case 223: case 222: case 221: case 220: case 219: case 218: case 217: case 185: case 180: case 74: case 73: case 72: case 68: case 67: case 62: case 61: case 60: case 58: case 49: case 17: case 16: case 15: case 14: case 13: case 12: case 11: case 10: case 9: extract_constrain_insn_cached (insn); if (((x86_extended_QIreg_mentioned_p (insn)) != (0)) || ((x86_extended_reg_mentioned_p (insn)) != (0))) { return 1; } else { return 0; } case 923: case 921: case 853: case 848: case 847: case 846: case 845: case 844: case 843: case 842: case 841: case 840: case 839: case 838: case 837: case 836: case 835: case 834: case 833: case 832: case 831: case 830: case 829: case 828: case 827: case 826: case 825: case 824: case 823: case 822: case 821: case 820: case 819: case 818: case 817: case 816: case 815: case 814: case 813: case 812: case 811: case 810: case 809: case 808: case 807: case 806: case 805: case 804: case 803: case 802: case 801: case 800: case 799: case 798: case 797: case 796: case 795: case 794: case 793: case 792: case 791: case 788: case 729: case 727: case 726: case 704: case 703: case 702: case 701: case 672: case 671: case 649: case 648: case 637: case 630: case 625: case 618: case 482: case 470: case 448: case 446: case 421: case 419: case 417: case 403: case 402: case 393: case 392: case 353: case 352: case 329: case 328: case 327: case 307: case 306: case 305: case 274: case 273: case 268: case 261: case 258: case 256: case 254: case 247: case 239: case 229: case 228: case 227: case 226: case 200: case 199: case 198: case 197: case 196: case 189: case 179: case 178: case 149: case 148: case 147: case 120: case 119: case 118: case 87: case 86: case 85: case 81: case 2: case 1: case 0: return 1; case -1: if (GET_CODE (PATTERN (insn)) != ASM_INPUT && asm_noperands (PATTERN (insn)) < 0) fatal_insn_not_found (insn); default: extract_constrain_insn_cached (insn); if ((x86_extended_reg_mentioned_p (insn)) != (0)) { return 1; } else { return 0; } } } int get_attr_prefix_0f (rtx insn ATTRIBUTE_UNUSED) { switch (recog_memoized (insn)) { case 569: case 568: case 564: case 563: if (get_attr_unit (insn) == UNIT_SSE) { return 1; } else { return 0; } case 559: case 556: extract_constrain_insn_cached (insn); if (which_alternative == 1) { return 1; } else { return 0; } case 426: case 425: case 417: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return 0; } else { return 0; } case 250: case 249: case 248: case 247: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { return 0; } else { return 1; } case 136: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { return 1; } else { return 0; } case 135: case 134: extract_constrain_insn_cached (insn); if (!((1 << which_alternative) & 0xf)) { return 1; } else { return 0; } case 114: case 113: extract_constrain_insn_cached (insn); if (which_alternative != 1) { return 1; } else { return 0; } case 288: case 112: case 111: extract_constrain_insn_cached (insn); if (!((1 << which_alternative) & 0x7)) { return 1; } else { return 0; } case 137: case 109: case 106: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return 1; } else { return 0; } case 96: case 95: case 91: case 90: extract_constrain_insn_cached (insn); if (!((1 << which_alternative) & 0x1f)) { return 1; } else { return 0; } case 84: case 83: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x7e0)) { return 1; } else { return 0; } case 721: case 720: case 656: case 654: case 294: case 290: case 172: case 169: case 166: case 163: case 128: case 82: extract_constrain_insn_cached (insn); if (!((1 << which_alternative) & 0x3)) { return 1; } else { return 0; } case 71: extract_constrain_insn_cached (insn); if ((! (q_regs_operand (operands[0], QImode))) || ((TARGET_MOVX) != (0))) { return 1; } else { return 0; } case 70: case 66: case 65: if (get_attr_type (insn) == TYPE_IMOVX) { return 1; } else { return 0; } case 59: extract_constrain_insn_cached (insn); if (((! ((optimize_size) != (0))) && ((which_alternative != 3) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_QIMODE_MATH) == (0)))))) && ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2)))) { return 1; } else { return 0; } case 50: extract_constrain_insn_cached (insn); if (((! ((optimize_size) != (0))) && (((which_alternative != 0) || ((! ((TARGET_PARTIAL_REG_STALL) == (0))) && (! ((TARGET_HIMODE_MATH) == (0))))) && ((!((1 << which_alternative) & 0x6)) || (! (aligned_operand (operands[1], HImode)))))) && (((TARGET_MOVX) != (0)) && (((1 << which_alternative) & 0x5)))) { return 1; } else { return 0; } case 44: case 43: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0xfc)) { return 1; } else { return 0; } case 584: case 581: case 34: case 31: extract_constrain_insn_cached (insn); if (which_alternative != 0) { return 1; } else { return 0; } case 121: case 122: case 123: extract_constrain_insn_cached (insn); if ((! (((ix86_tune) == (CPU_K6)))) && (which_alternative == 0)) { return 0; } else { return 1; } case 1033: case 1032: case 1031: case 1030: case 1029: case 1028: case 1027: case 1026: case 1025: case 1024: case 1023: case 1020: case 1019: case 1018: case 1017: case 1016: case 1015: case 1014: case 1013: case 1012: case 1011: case 1010: case 1009: case 1008: case 1007: case 1006: case 1005: case 1004: case 1003: case 1002: case 1001: case 1000: case 999: case 998: case 997: case 996: case 995: case 994: case 993: case 992: case 991: case 990: case 989: case 988: case 987: case 986: case 985: case 984: case 983: case 982: case 981: case 980: case 979: case 978: case 977: case 976: case 975: case 974: case 973: case 972: case 971: case 970: case 969: case 968: case 967: case 966: case 965: case 964: case 963: case 962: case 961: case 960: case 959: case 958: case 957: case 956: case 955: case 954: case 953: case 952: case 951: case 950: case 949: case 948: case 947: case 946: case 945: case 944: case 943: case 942: case 941: case 940: case 939: case 938: case 937: case 936: case 935: case 934: case 933: case 932: case 931: case 930: case 929: case 928: case 927: case 926: case 925: case 924: case 923: case 922: case 921: case 920: case 919: case 918: case 917: case 916: case 915: case 914: case 913: case 912: case 911: case 910: case 909: case 908: case 907: case 906: case 905: case 904: case 903: case 902: case 901: case 900: case 899: case 898: case 897: case 896: case 895: case 894: case 893: case 892: case 891: case 890: case 889: case 888: case 887: case 886: case 885: case 884: case 883: case 882: case 881: case 880: case 879: case 878: case 877: case 876: case 875: case 874: case 873: case 872: case 871: case 870: case 869: case 868: case 867: case 866: case 865: case 864: case 863: case 862: case 861: case 860: case 859: case 858: case 857: case 856: case 855: case 854: case 852: case 851: case 850: case 849: case 848: case 847: case 846: case 845: case 844: case 843: case 842: case 841: case 840: case 839: case 838: case 837: case 836: case 835: case 834: case 833: case 832: case 831: case 830: case 829: case 828: case 827: case 826: case 825: case 824: case 823: case 822: case 821: case 820: case 819: case 818: case 817: case 816: case 815: case 814: case 813: case 812: case 811: case 810: case 809: case 808: case 807: case 806: case 805: case 804: case 803: case 802: case 801: case 800: case 799: case 798: case 797: case 796: case 795: case 794: case 793: case 792: case 791: case 790: case 789: case 788: case 787: case 786: case 785: case 784: case 783: case 782: case 781: case 780: case 779: case 778: case 777: case 776: case 775: case 774: case 773: case 772: case 771: case 770: case 769: case 768: case 767: case 766: case 765: case 764: case 763: case 762: case 761: case 760: case 759: case 758: case 757: case 756: case 755: case 754: case 753: case 752: case 751: case 750: case 749: case 748: case 747: case 746: case 745: case 744: case 743: case 742: case 741: case 740: case 739: case 738: case 737: case 736: case 735: case 734: case 733: case 732: case 731: case 730: case 729: case 728: case 727: case 726: case 725: case 724: case 723: case 722: case 719: case 707: case 706: case 705: case 704: case 703: case 702: case 701: case 700: case 699: case 698: case 669: case 666: case 663: case 660: case 653: case 652: case 651: case 649: case 585: case 582: case 560: case 557: case 496: case 495: case 494: case 493: case 173: case 170: case 167: case 164: case 156: case 155: case 151: case 150: case 140: case 139: case 129: case 125: case 124: case 120: case 119: case 116: case 115: case 110: case 107: case 104: case 69: case 64: case 63: case 35: case 32: case 406: case 424: case 537: case 539: case 540: case 541: case 542: case 543: return 1; case -1: if (GET_CODE (PATTERN (insn)) != ASM_INPUT && asm_noperands (PATTERN (insn)) < 0) fatal_insn_not_found (insn); default: return 0; } } int get_attr_prefix_rep (rtx insn ATTRIBUTE_UNUSED) { switch (recog_memoized (insn)) { case 569: case 568: case 564: case 563: if (get_attr_unit (insn) == UNIT_SSE) { return 1; } else { return 0; } case 559: case 556: extract_constrain_insn_cached (insn); if (which_alternative == 1) { return 1; } else { return 0; } case 1015: case 137: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return 1; } else { return 0; } case 136: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { return 1; } else { return 0; } case 135: case 134: extract_constrain_insn_cached (insn); if (!((1 << which_alternative) & 0xf)) { return 1; } else { return 0; } case 172: case 169: case 166: case 163: case 128: extract_constrain_insn_cached (insn); if (!((1 << which_alternative) & 0x3)) { return 1; } else { return 0; } case 96: extract_constrain_insn_cached (insn); if ((!((1 << which_alternative) & 0x1f)) && (((which_alternative == 6) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))))) || (((which_alternative == 7) && (! ((TARGET_SSE_PARTIAL_REGS) != (0)))) || (!((1 << which_alternative) & 0xf8))))) { return 1; } else { return 0; } case 95: extract_constrain_insn_cached (insn); if ((!((1 << which_alternative) & 0x1f)) && (((which_alternative == 6) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))))) || (((which_alternative == 7) && (! ((TARGET_SSE_PARTIAL_REGS) != (0)))) || (!((1 << which_alternative) & 0xf8))))) { return 1; } else { return 0; } case 91: extract_constrain_insn_cached (insn); if ((((1 << which_alternative) & 0x1e0)) && (((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78)))) { return 1; } else { return 0; } case 90: extract_constrain_insn_cached (insn); if ((((1 << which_alternative) & 0x1e0)) && (((which_alternative == 6) && ((! ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0))) && (! ((TARGET_SSE_PARTIAL_REGS) != (0))))) || (!((1 << which_alternative) & 0xe78)))) { return 1; } else { return 0; } case 584: case 581: case 34: case 31: extract_constrain_insn_cached (insn); if (which_alternative != 0) { return 1; } else { return 0; } case 1033: case 1032: case 1016: case 1014: case 927: case 926: case 925: case 924: case 903: case 902: case 901: case 900: case 897: case 895: case 893: case 891: case 889: case 887: case 885: case 790: case 789: case 786: case 785: case 784: case 781: case 779: case 775: case 774: case 773: case 772: case 751: case 749: case 747: case 745: case 743: case 741: case 739: case 736: case 735: case 734: case 669: case 666: case 663: case 660: case 585: case 582: case 560: case 557: case 496: case 495: case 173: case 170: case 167: case 164: case 156: case 155: case 151: case 150: case 140: case 139: case 129: case 35: case 32: case 525: case 625: case 626: case 627: case 628: case 629: case 637: case 638: case 639: case 640: case 641: case 642: case 643: case 644: case 645: case 646: case 647: return 1; case -1: if (GET_CODE (PATTERN (insn)) != ASM_INPUT && asm_noperands (PATTERN (insn)) < 0) fatal_insn_not_found (insn); default: return 0; } } int get_attr_prefix_data16 (rtx insn ATTRIBUTE_UNUSED) { switch (recog_memoized (insn)) { case 1015: extract_constrain_insn_cached (insn); if (which_alternative != 0) { return 1; } else { return 0; } case 705: extract_constrain_insn_cached (insn); if (((((1 << which_alternative) & 0x3)) && (! ((optimize_size) != (0)))) || (((which_alternative == 2) && ((! ((TARGET_SSE_TYPELESS_STORES) != (0))) && (! ((optimize_size) != (0))))) || (!((1 << which_alternative) & 0x7)))) { return 1; } else { return 0; } case 411: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return 1; } else { return 0; } case 294: case 209: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { return 1; } else { return 0; } case 96: extract_constrain_insn_cached (insn); if (((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) { return 1; } else { return 0; } case 95: extract_constrain_insn_cached (insn); if (((which_alternative == 5) && ((! ((optimize_size) != (0))) && (! ((TARGET_SSE_LOAD0_BY_PXOR) != (0))))) || (((which_alternative == 6) && ((! ((optimize_size) != (0))) && ((TARGET_SSE_PARTIAL_REG_DEPENDENCY) != (0)))) || ((which_alternative == 7) && ((TARGET_SSE_PARTIAL_REGS) != (0))))) { return 1; } else { return 0; } case 50: if (get_attr_mode (insn) == MODE_HI) { return 1; } else { return 0; } case 1028: case 1026: case 1024: case 1017: case 1013: case 1003: case 1002: case 990: case 989: case 929: case 914: case 911: case 910: case 908: case 905: case 904: case 899: case 898: case 896: case 894: case 892: case 890: case 888: case 886: case 884: case 759: case 758: case 757: case 756: case 652: case 634: case 633: case 622: case 621: case 488: case 476: case 462: case 460: case 438: case 436: case 413: case 412: case 399: case 398: case 359: case 358: case 339: case 338: case 337: case 317: case 316: case 315: case 295: case 280: case 277: case 250: case 242: case 241: case 240: case 231: case 214: case 212: case 211: case 210: case 181: case 161: case 160: case 159: case 158: case 157: case 123: case 107: case 106: case 105: case 57: case 56: case 55: case 53: case 52: case 51: case 48: case 8: case 7: case 6: return 1; case -1: if (GET_CODE (PATTERN (insn)) != ASM_INPUT && asm_noperands (PATTERN (insn)) < 0) fatal_insn_not_found (insn); default: return 0; } } enum attr_type get_attr_type (rtx insn ATTRIBUTE_UNUSED) { switch (recog_memoized (insn)) { case 721: case 720: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { return TYPE_OTHER; } else { return TYPE_SSEMOV; } case 655: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { return TYPE_FCMOV; } else { return TYPE_MULTI; } case 656: case 654: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { return TYPE_FCMOV; } else { return TYPE_ICMOV; } case 294: case 290: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { return TYPE_ALU; } else { return TYPE_IMOVX; } case 288: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x7)) { return TYPE_ALU; } else { return TYPE_IMOVX; } case 202: extract_constrain_insn_cached (insn); if ((which_alternative != 0) || (pic_symbolic_operand (operands[2], SImode))) { return TYPE_LEA; } else if (incdec_operand (operands[2], SImode)) { return TYPE_INCDEC; } else { return TYPE_ALU; } case 201: extract_constrain_insn_cached (insn); if ((which_alternative == 2) || (pic_symbolic_operand (operands[2], SImode))) { return TYPE_LEA; } else if (incdec_operand (operands[2], SImode)) { return TYPE_INCDEC; } else { return TYPE_ALU; } case 196: extract_constrain_insn_cached (insn); if ((which_alternative == 2) || (pic_symbolic_operand (operands[2], DImode))) { return TYPE_LEA; } else if (incdec_operand (operands[2], DImode)) { return TYPE_INCDEC; } else { return TYPE_ALU; } case 172: case 169: case 166: case 163: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return TYPE_FMOV; } else if (which_alternative == 1) { return TYPE_MULTI; } else { return TYPE_SSEICVT; } case 136: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { return TYPE_SSECVT; } else { return TYPE_FMOV; } case 135: case 134: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return TYPE_FMOV; } else if (((1 << which_alternative) & 0xe)) { return TYPE_MULTI; } else { return TYPE_SSECVT; } case 128: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { return TYPE_FMOV; } else { return TYPE_SSECVT; } case 112: case 111: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x7)) { return TYPE_MULTI; } else if (which_alternative == 3) { return TYPE_MMXMOV; } else { return TYPE_SSEMOV; } case 101: case 100: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x7)) { return TYPE_FMOV; } else { return TYPE_MULTI; } case 96: case 95: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x7)) { return TYPE_FMOV; } else if (((1 << which_alternative) & 0x18)) { return TYPE_MULTI; } else { return TYPE_SSEMOV; } case 91: case 90: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x7)) { return TYPE_FMOV; } else if (((1 << which_alternative) & 0x18)) { return TYPE_IMOV; } else if (((1 << which_alternative) & 0x1e0)) { return TYPE_SSEMOV; } else { return TYPE_MMXMOV; } case 82: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { return TYPE_OTHER; } else if (((1 << which_alternative) & 0xc)) { return TYPE_MMX; } else { return TYPE_SSEMOV; } case 59: extract_constrain_insn_cached (insn); if (((optimize_size) != (0)) || ((which_alternative == 3) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_QIMODE_MATH) == (0))))) { return TYPE_IMOV; } else if ((((1 << which_alternative) & 0x28)) || (((TARGET_MOVX) != (0)) && (which_alternative == 2))) { return TYPE_IMOVX; } else { return TYPE_IMOV; } case 50: extract_constrain_insn_cached (insn); if (((optimize_size) != (0)) || (((which_alternative == 0) && (((TARGET_PARTIAL_REG_STALL) == (0)) || ((TARGET_HIMODE_MATH) == (0)))) || ((((1 << which_alternative) & 0x6)) && (aligned_operand (operands[1], HImode))))) { return TYPE_IMOV; } else if (((TARGET_MOVX) != (0)) && (((1 << which_alternative) & 0x5))) { return TYPE_IMOVX; } else { return TYPE_IMOV; } case 0: case 3: case 6: case 9: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return TYPE_TEST; } else { return TYPE_ICMP; } case 31: case 34: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return TYPE_FCMP; } else { return TYPE_SSECOMI; } case 43: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x1c)) { return TYPE_MMXMOV; } else if (((1 << which_alternative) & 0xe0)) { return TYPE_SSEMOV; } else if (((flag_pic) != (0)) && (symbolic_operand (operands[1], SImode))) { return TYPE_LEA; } else { return TYPE_IMOV; } case 44: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x1c)) { return TYPE_MMXMOV; } else if (((1 << which_alternative) & 0xe0)) { return TYPE_SSEMOV; } else if (((flag_pic) != (0)) && (symbolic_operand (operands[1], SImode))) { return TYPE_LEA; } else { return TYPE_IMOV; } case 65: extract_constrain_insn_cached (insn); if ((register_operand (operands[0], QImode)) && ((! (q_regs_operand (operands[0], QImode))) || ((TARGET_MOVX) != (0)))) { return TYPE_IMOVX; } else { return TYPE_IMOV; } case 66: extract_constrain_insn_cached (insn); if ((register_operand (operands[0], QImode)) && ((! (q_regs_operand (operands[0], QImode))) || ((TARGET_MOVX) != (0)))) { return TYPE_IMOVX; } else { return TYPE_IMOV; } case 70: extract_constrain_insn_cached (insn); if ((register_operand (operands[0], QImode)) && ((! (q_regs_operand (operands[0], QImode))) || ((TARGET_MOVX) != (0)))) { return TYPE_IMOVX; } else { return TYPE_IMOV; } case 71: extract_constrain_insn_cached (insn); if ((! (q_regs_operand (operands[0], QImode))) || ((TARGET_MOVX) != (0))) { return TYPE_IMOVX; } else { return TYPE_IMOV; } case 76: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return TYPE_PUSH; } else { return TYPE_MULTI; } case 83: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0xe0)) { return TYPE_MMXMOV; } else if (((1 << which_alternative) & 0x700)) { return TYPE_SSEMOV; } else if (which_alternative == 4) { return TYPE_MULTI; } else if (((flag_pic) != (0)) && (symbolic_operand (operands[1], DImode))) { return TYPE_LEA; } else { return TYPE_IMOV; } case 84: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0xe0)) { return TYPE_MMXMOV; } else if (((1 << which_alternative) & 0x700)) { return TYPE_SSEMOV; } else if (which_alternative == 4) { return TYPE_MULTI; } else if (((flag_pic) != (0)) && (symbolic_operand (operands[1], DImode))) { return TYPE_LEA; } else { return TYPE_IMOV; } case 88: case 89: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return TYPE_MULTI; } else if (which_alternative == 1) { return TYPE_PUSH; } else { return TYPE_MULTI; } case 106: case 109: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return TYPE_IMOVX; } else { return TYPE_ALU1; } case 113: case 114: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return TYPE_IMOVX; } else if (which_alternative == 1) { return TYPE_IMOV; } else if (which_alternative == 2) { return TYPE_MMXMOV; } else { return TYPE_SSEMOV; } case 137: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return TYPE_SSECVT; } else { return TYPE_FMOV; } case 145: case 142: case 133: case 162: case 165: case 168: case 171: case 174: case 175: case 176: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return TYPE_FMOV; } else { return TYPE_MULTI; } case 197: case 198: case 199: case 200: extract_insn_cached (insn); if (incdec_operand (operands[2], DImode)) { return TYPE_INCDEC; } else { return TYPE_ALU; } case 203: case 204: case 205: case 206: case 207: case 208: extract_insn_cached (insn); if (incdec_operand (operands[2], SImode)) { return TYPE_INCDEC; } else { return TYPE_ALU; } case 209: extract_constrain_insn_cached (insn); if (which_alternative == 2) { return TYPE_LEA; } else { if (incdec_operand (operands[2], HImode)) { return TYPE_INCDEC; } else { return TYPE_ALU; } } case 215: extract_constrain_insn_cached (insn); if (which_alternative == 3) { return TYPE_LEA; } else { if (incdec_operand (operands[2], QImode)) { return TYPE_INCDEC; } else { return TYPE_ALU; } } case 217: extract_insn_cached (insn); if (incdec_operand (operands[2], QImode)) { return TYPE_INCDEC; } else { return TYPE_ALU1; } case 210: case 211: case 212: case 213: case 214: case 220: extract_insn_cached (insn); if (incdec_operand (operands[2], HImode)) { return TYPE_INCDEC; } else { return TYPE_ALU; } case 216: case 218: case 219: case 221: case 222: case 223: extract_insn_cached (insn); if (incdec_operand (operands[2], QImode)) { return TYPE_INCDEC; } else { return TYPE_ALU; } case 402: extract_constrain_insn_cached (insn); if (which_alternative == 1) { return TYPE_LEA; } else if ((((TARGET_DOUBLE_WITH_ADD) != (0)) && (register_operand (operands[0], VOIDmode))) && (const1_operand (operands[2], VOIDmode))) { return TYPE_ALU; } else { return TYPE_ISHIFT; } case 403: extract_constrain_insn_cached (insn); if ((((TARGET_DOUBLE_WITH_ADD) != (0)) && (register_operand (operands[0], VOIDmode))) && (const1_operand (operands[2], VOIDmode))) { return TYPE_ALU; } else { return TYPE_ISHIFT; } case 407: extract_constrain_insn_cached (insn); if (which_alternative == 1) { return TYPE_LEA; } else if ((((TARGET_DOUBLE_WITH_ADD) != (0)) && (register_operand (operands[0], VOIDmode))) && (const1_operand (operands[2], VOIDmode))) { return TYPE_ALU; } else { return TYPE_ISHIFT; } case 408: extract_constrain_insn_cached (insn); if (which_alternative == 1) { return TYPE_LEA; } else if (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))) { return TYPE_ALU; } else { return TYPE_ISHIFT; } case 409: extract_constrain_insn_cached (insn); if ((((TARGET_DOUBLE_WITH_ADD) != (0)) && (register_operand (operands[0], VOIDmode))) && (const1_operand (operands[2], VOIDmode))) { return TYPE_ALU; } else { return TYPE_ISHIFT; } case 410: extract_constrain_insn_cached (insn); if (((TARGET_DOUBLE_WITH_ADD) != (0)) && (const1_operand (operands[2], VOIDmode))) { return TYPE_ALU; } else { return TYPE_ISHIFT; } case 411: extract_constrain_insn_cached (insn); if (which_alternative == 1) { return TYPE_LEA; } else if ((((TARGET_DOUBLE_WITH_ADD) != (0)) && (register_operand (operands[0], VOIDmode))) && (const1_operand (operands[2], VOIDmode))) { return TYPE_ALU; } else { return TYPE_ISHIFT; } case 412: extract_constrain_insn_cached (insn); if ((((TARGET_DOUBLE_WITH_ADD) != (0)) && (register_operand (operands[0], VOIDmode))) && (const1_operand (operands[2], VOIDmode))) { return TYPE_ALU; } else { return TYPE_ISHIFT; } case 413: extract_constrain_insn_cached (insn); if ((((TARGET_DOUBLE_WITH_ADD) != (0)) && (register_operand (operands[0], VOIDmode))) && (const1_operand (operands[2], VOIDmode))) { return TYPE_ALU; } else { return TYPE_ISHIFT; } case 414: extract_constrain_insn_cached (insn); if (which_alternative == 2) { return TYPE_LEA; } else if ((((TARGET_DOUBLE_WITH_ADD) != (0)) && (register_operand (operands[0], VOIDmode))) && (const1_operand (operands[2], VOIDmode))) { return TYPE_ALU; } else { return TYPE_ISHIFT; } case 415: extract_constrain_insn_cached (insn); if ((((TARGET_DOUBLE_WITH_ADD) != (0)) && (register_operand (operands[0], VOIDmode))) && (const1_operand (operands[2], VOIDmode))) { return TYPE_ALU; } else { return TYPE_ISHIFT; } case 416: extract_constrain_insn_cached (insn); if ((((TARGET_DOUBLE_WITH_ADD) != (0)) && (register_operand (operands[0], VOIDmode))) && (const1_operand (operands[2], VOIDmode))) { return TYPE_ALU; } else { return TYPE_ISHIFT; } case 417: case 425: case 426: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return TYPE_IMOVX; } else { return TYPE_ISHIFT; } case 555: case 558: extract_insn_cached (insn); if (mult_operator (operands[3], SFmode)) { return TYPE_FMUL; } else { return TYPE_FOP; } case 556: case 559: extract_constrain_insn_cached (insn); if (which_alternative == 1) { if (mult_operator (operands[3], SFmode)) { return TYPE_SSEMUL; } else { return TYPE_SSEADD; } } else { if (mult_operator (operands[3], SFmode)) { return TYPE_FMUL; } else { return TYPE_FOP; } } case 557: case 560: extract_insn_cached (insn); if (mult_operator (operands[3], SFmode)) { return TYPE_SSEMUL; } else { return TYPE_SSEADD; } case 561: extract_insn_cached (insn); if (mult_operator (operands[3], XFmode)) { return TYPE_FMUL; } else { return TYPE_FOP; } case 563: extract_constrain_insn_cached (insn); if ((which_alternative == 2) && (mult_operator (operands[3], SFmode))) { return TYPE_SSEMUL; } else if ((which_alternative == 2) && (div_operator (operands[3], SFmode))) { return TYPE_SSEDIV; } else if (which_alternative == 2) { return TYPE_SSEADD; } else if (mult_operator (operands[3], SFmode)) { return TYPE_FMUL; } else if (div_operator (operands[3], SFmode)) { return TYPE_FDIV; } else { return TYPE_FOP; } case 562: case 565: case 566: extract_insn_cached (insn); if (mult_operator (operands[3], SFmode)) { return TYPE_FMUL; } else if (div_operator (operands[3], SFmode)) { return TYPE_FDIV; } else { return TYPE_FOP; } case 568: extract_constrain_insn_cached (insn); if ((which_alternative == 2) && (mult_operator (operands[3], SFmode))) { return TYPE_SSEMUL; } else if ((which_alternative == 2) && (div_operator (operands[3], SFmode))) { return TYPE_SSEDIV; } else if (which_alternative == 2) { return TYPE_SSEADD; } else if (mult_operator (operands[3], DFmode)) { return TYPE_FMUL; } else if (div_operator (operands[3], DFmode)) { return TYPE_FDIV; } else { return TYPE_FOP; } case 564: case 569: extract_insn_cached (insn); if (mult_operator (operands[3], SFmode)) { return TYPE_SSEMUL; } else if (div_operator (operands[3], SFmode)) { return TYPE_SSEDIV; } else { return TYPE_SSEADD; } case 567: case 570: case 571: case 572: case 573: case 574: extract_insn_cached (insn); if (mult_operator (operands[3], DFmode)) { return TYPE_FMUL; } else if (div_operator (operands[3], DFmode)) { return TYPE_FDIV; } else { return TYPE_FOP; } case 575: case 576: case 577: case 578: case 579: case 580: extract_insn_cached (insn); if (mult_operator (operands[3], XFmode)) { return TYPE_FMUL; } else if (div_operator (operands[3], XFmode)) { return TYPE_FDIV; } else { return TYPE_FOP; } case 581: case 584: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return TYPE_FPSPC; } else { return TYPE_SSE; } case 670: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return TYPE_ALU; } else if (const0_operand (operands[2], SImode)) { return TYPE_IMOV; } else { return TYPE_LEA; } case 671: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return TYPE_ALU; } else if (const0_operand (operands[2], DImode)) { return TYPE_IMOV; } else { return TYPE_LEA; } case 672: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return TYPE_ALU; } else { return TYPE_LEA; } case 1008: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return TYPE_SSECVT; } else { return TYPE_SSEMOV; } case 1009: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return TYPE_SSECVT; } else if (which_alternative == 1) { return TYPE_SSEMOV; } else { return TYPE_SSECVT; } case 816: case 817: case 818: case 832: case 833: case 834: case 835: case 836: case 837: case 838: case 839: case 840: case 841: case 842: case 871: return TYPE_MMXSHFT; case 703: case 704: case 726: case 727: case 819: case 820: case 821: case 843: case 844: case 845: case 846: case 847: case 848: case 864: case 865: case 869: case 870: case 878: case 879: return TYPE_MMXCVT; case 822: case 823: case 824: case 825: case 826: case 827: case 857: case 858: case 859: return TYPE_MMXCMP; case 807: case 808: case 809: case 810: case 862: case 877: return TYPE_MMXMUL; case 791: case 792: case 793: case 794: case 795: case 796: case 797: case 798: case 799: case 800: case 801: case 802: case 803: case 804: case 805: case 806: case 811: case 812: case 813: case 814: case 815: case 828: case 829: case 830: case 831: case 854: case 855: case 856: case 860: case 861: case 866: case 867: case 868: return TYPE_MMXADD; case 701: case 702: case 729: return TYPE_MMXMOV; case 849: case 863: case 872: case 873: case 874: case 875: case 876: case 882: case 883: return TYPE_MMX; case 744: case 745: case 890: case 891: return TYPE_SSEDIV; case 150: case 151: case 155: case 156: case 164: case 167: case 170: case 173: case 785: case 786: case 787: case 788: case 789: case 790: case 920: case 921: case 922: case 923: case 924: case 925: return TYPE_SSEICVT; case 129: case 139: case 140: case 723: case 724: case 725: case 730: case 731: case 732: case 733: case 737: case 776: case 777: case 782: case 783: case 784: case 904: case 905: case 906: case 907: case 908: case 909: case 910: case 911: case 912: case 913: case 914: case 915: case 916: case 917: case 918: case 919: case 926: case 927: case 928: case 929: case 956: case 957: case 958: case 959: case 960: case 989: case 990: case 991: case 992: case 993: case 994: case 995: case 996: case 997: case 998: case 999: case 1000: case 1001: case 1003: case 1005: case 1006: case 1007: case 1013: case 1014: case 1015: case 1016: case 1017: case 1031: case 1032: case 1033: return TYPE_SSECVT; case 32: case 35: case 774: case 775: case 902: case 903: return TYPE_SSECOMI; case 495: case 496: case 770: case 771: case 772: case 773: case 898: case 899: case 900: case 901: case 961: case 962: case 963: case 964: case 965: case 966: return TYPE_SSECMP; case 742: case 743: case 888: case 889: return TYPE_SSEMUL; case 738: case 739: case 740: case 741: case 884: case 885: case 886: case 887: case 892: case 893: case 894: case 895: case 1023: case 1024: case 1025: case 1026: case 1027: case 1028: return TYPE_SSEADD; case 698: case 699: case 700: case 705: case 706: case 707: case 719: case 722: case 728: case 734: case 735: case 736: case 952: case 1002: case 1004: case 1010: case 1011: case 1012: return TYPE_SSEMOV; case 582: case 585: case 660: case 663: case 666: case 669: case 746: case 747: case 748: case 749: case 750: case 751: case 778: case 779: case 780: case 781: case 850: case 851: case 852: case 880: case 881: case 896: case 897: case 1018: case 1019: case 1020: case 1029: case 1030: return TYPE_SSE; case 946: case 947: case 948: case 949: case 950: return TYPE_SSEIMUL; case 971: case 972: case 973: case 974: case 975: case 976: case 977: case 978: case 979: case 980: case 981: case 982: case 983: case 984: case 985: case 986: case 987: case 988: return TYPE_SSEISHFT; case 930: case 931: case 932: case 933: case 934: case 935: case 936: case 937: case 938: case 939: case 940: case 941: case 942: case 943: case 944: case 945: case 951: case 953: case 954: case 955: case 967: case 968: case 969: case 970: return TYPE_SSEIADD; case 752: case 753: case 754: case 755: case 756: case 757: case 758: case 759: case 760: case 761: case 762: case 763: case 764: case 765: case 766: case 767: case 768: case 769: return TYPE_SSELOG; case 147: case 148: case 149: case 152: case 153: case 154: case 157: case 158: case 159: return TYPE_FISTP; case 92: case 97: case 102: return TYPE_FXCH; case 19: case 20: case 21: case 23: case 25: case 30: case 33: return TYPE_FCMP; case 657: return TYPE_FCMOV; case 583: case 586: case 587: case 588: case 589: case 590: case 591: case 592: case 593: case 594: case 595: case 596: case 597: case 598: case 599: case 600: case 601: case 602: case 603: case 604: case 605: case 606: case 607: case 608: case 609: case 610: case 611: case 612: case 613: case 614: case 615: case 616: return TYPE_FPSPC; case 371: case 372: case 373: case 374: case 375: case 376: case 386: case 387: case 388: case 389: case 390: case 391: return TYPE_FSGN; case 130: case 131: case 132: case 138: case 141: case 143: case 144: case 146: return TYPE_FMOV; case 617: return TYPE_CLD; case 618: case 619: case 620: case 621: case 622: case 623: case 624: case 625: case 626: case 627: case 628: case 629: case 630: case 631: case 632: case 633: case 634: case 635: case 636: case 637: case 638: case 639: case 640: case 641: case 642: case 643: case 644: case 645: case 646: case 647: return TYPE_STR; case 533: case 534: return TYPE_LEAVE; case 687: case 688: case 689: case 690: case 691: case 692: case 693: case 694: case 695: return TYPE_CALLV; case 515: case 516: case 517: case 518: case 519: case 520: case 521: case 522: return TYPE_CALL; case 39: case 40: case 78: case 79: return TYPE_POP; case 36: case 37: case 38: case 48: case 49: case 57: case 58: case 77: return TYPE_PUSH; case 649: case 651: case 652: case 653: return TYPE_ICMOV; case 493: case 494: return TYPE_SETCC; case 497: case 498: case 509: case 510: case 511: case 512: case 513: case 514: case 527: return TYPE_IBR; case 14: case 278: case 279: case 280: case 281: case 282: case 283: case 284: case 285: return TYPE_TEST; case 1: case 2: case 4: case 5: case 7: case 8: case 10: case 11: case 12: case 13: case 15: case 16: case 17: return TYPE_ICMP; case 264: case 265: case 268: case 271: case 274: case 276: case 277: return TYPE_IDIV; case 247: case 248: case 249: case 250: case 251: case 252: case 253: case 254: case 255: case 256: case 257: case 258: case 259: case 260: case 261: case 262: case 263: return TYPE_IMUL; case 477: case 479: case 490: case 492: return TYPE_ROTATE1; case 469: case 470: case 471: case 472: case 473: case 474: case 475: case 476: case 478: case 480: case 481: case 482: case 483: case 484: case 485: case 486: case 487: case 488: case 489: case 491: return TYPE_ROTATE; case 440: case 442: case 464: case 466: return TYPE_ISHIFT1; case 406: case 418: case 419: case 420: case 421: case 424: case 427: case 428: case 429: case 430: case 431: case 432: case 433: case 434: case 435: case 436: case 437: case 438: case 439: case 441: case 443: case 444: case 445: case 446: case 447: case 448: case 451: case 452: case 453: case 454: case 455: case 456: case 457: case 458: case 459: case 460: case 461: case 462: case 463: case 465: case 467: case 468: return TYPE_ISHIFT; case 186: case 187: case 188: case 189: case 190: case 191: case 192: case 193: case 194: case 195: return TYPE_LEA; case 63: case 64: case 69: case 104: case 107: case 110: case 115: case 116: case 118: case 119: case 120: case 121: case 122: case 123: case 124: case 125: return TYPE_IMOVX; case 45: case 46: case 47: case 51: case 52: case 53: case 54: case 55: case 60: case 61: case 67: case 68: case 72: case 73: case 74: case 85: case 86: case 87: case 551: case 553: return TYPE_IMOV; case 352: case 353: case 354: case 355: case 356: case 357: case 358: case 359: case 360: case 361: case 392: case 394: case 395: case 398: case 400: return TYPE_NEGNOT; case 41: case 42: case 56: case 62: case 80: case 81: case 103: case 105: case 108: case 244: case 297: case 299: case 319: case 321: case 341: case 347: case 393: case 396: case 397: case 399: case 401: return TYPE_ALU1; case 178: case 179: case 180: case 181: case 182: case 183: case 184: case 185: case 224: case 226: case 227: case 228: case 229: case 230: case 231: case 232: case 233: case 234: case 235: case 236: case 237: case 238: case 239: case 240: case 241: case 242: case 243: case 245: case 246: case 289: case 291: case 292: case 293: case 295: case 296: case 298: case 300: case 301: case 302: case 303: case 304: case 305: case 306: case 307: case 308: case 309: case 310: case 311: case 312: case 313: case 314: case 315: case 316: case 317: case 318: case 320: case 322: case 323: case 324: case 325: case 326: case 327: case 328: case 329: case 330: case 331: case 332: case 333: case 334: case 335: case 336: case 337: case 338: case 339: case 340: case 342: case 343: case 344: case 345: case 346: case 348: case 349: case 350: case 552: case 554: case 648: case 650: return TYPE_ALU; case -1: if (GET_CODE (PATTERN (insn)) != ASM_INPUT && asm_noperands (PATTERN (insn)) < 0) fatal_insn_not_found (insn); case 18: case 22: case 24: case 26: case 93: case 94: case 98: case 99: case 266: case 267: case 269: case 270: case 272: case 273: case 275: case 404: case 405: case 422: case 423: case 449: case 450: case 530: case 544: case 545: case 546: case 547: case 548: case 549: case 685: case 686: return TYPE_MULTI; default: return TYPE_OTHER; } } enum attr_unit get_attr_unit (rtx insn ATTRIBUTE_UNUSED) { switch (recog_memoized (insn)) { case 721: case 720: extract_constrain_insn_cached (insn); if (!((1 << which_alternative) & 0x3)) { return UNIT_SSE; } else { return UNIT_UNKNOWN; } case 656: case 655: case 654: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { return UNIT_I387; } else { return UNIT_INTEGER; } case 580: case 579: case 578: case 577: case 576: case 575: extract_insn_cached (insn); if ((get_attr_type (insn) == TYPE_FOP) || ((mult_operator (operands[3], XFmode)) || (get_attr_type (insn) == TYPE_FDIV))) { return UNIT_I387; } else { return UNIT_INTEGER; } case 568: extract_constrain_insn_cached (insn); if ((get_attr_type (insn) == TYPE_FOP) || (((which_alternative != 2) && (mult_operator (operands[3], DFmode))) || (get_attr_type (insn) == TYPE_FDIV))) { return UNIT_I387; } else if ((get_attr_type (insn) == TYPE_SSEADD) || (((which_alternative == 2) && (mult_operator (operands[3], SFmode))) || (get_attr_type (insn) == TYPE_SSEDIV))) { return UNIT_SSE; } else { return UNIT_INTEGER; } case 574: case 573: case 572: case 571: case 570: case 567: extract_insn_cached (insn); if ((get_attr_type (insn) == TYPE_FOP) || ((mult_operator (operands[3], DFmode)) || (get_attr_type (insn) == TYPE_FDIV))) { return UNIT_I387; } else { return UNIT_INTEGER; } case 569: case 564: extract_insn_cached (insn); if ((get_attr_type (insn) == TYPE_SSEADD) || ((mult_operator (operands[3], SFmode)) || (get_attr_type (insn) == TYPE_SSEDIV))) { return UNIT_SSE; } else { return UNIT_INTEGER; } case 563: extract_constrain_insn_cached (insn); if ((get_attr_type (insn) == TYPE_FOP) || (((which_alternative != 2) && (mult_operator (operands[3], SFmode))) || (get_attr_type (insn) == TYPE_FDIV))) { return UNIT_I387; } else if ((get_attr_type (insn) == TYPE_SSEADD) || (((which_alternative == 2) && (mult_operator (operands[3], SFmode))) || (get_attr_type (insn) == TYPE_SSEDIV))) { return UNIT_SSE; } else { return UNIT_INTEGER; } case 566: case 565: case 562: extract_insn_cached (insn); if ((get_attr_type (insn) == TYPE_FOP) || ((mult_operator (operands[3], SFmode)) || (get_attr_type (insn) == TYPE_FDIV))) { return UNIT_I387; } else { return UNIT_INTEGER; } case 559: case 556: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return UNIT_I387; } else if (which_alternative == 1) { return UNIT_SSE; } else { return UNIT_INTEGER; } case 172: case 169: case 166: case 163: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return UNIT_I387; } else if (!((1 << which_alternative) & 0x3)) { return UNIT_SSE; } else { return UNIT_INTEGER; } case 137: extract_constrain_insn_cached (insn); if (which_alternative != 0) { return UNIT_I387; } else { return UNIT_SSE; } case 136: extract_constrain_insn_cached (insn); if (!((1 << which_alternative) & 0x3)) { return UNIT_I387; } else { return UNIT_SSE; } case 135: case 134: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return UNIT_I387; } else if (!((1 << which_alternative) & 0xf)) { return UNIT_SSE; } else { return UNIT_INTEGER; } case 176: case 175: case 174: case 171: case 168: case 165: case 162: case 145: case 142: case 133: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return UNIT_I387; } else { return UNIT_INTEGER; } case 128: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x3)) { return UNIT_I387; } else { return UNIT_SSE; } case 114: case 113: extract_constrain_insn_cached (insn); if (!((1 << which_alternative) & 0x7)) { return UNIT_SSE; } else if (which_alternative == 2) { return UNIT_MMX; } else { return UNIT_INTEGER; } case 112: case 111: extract_constrain_insn_cached (insn); if (!((1 << which_alternative) & 0xf)) { return UNIT_SSE; } else if (which_alternative == 3) { return UNIT_MMX; } else { return UNIT_INTEGER; } case 101: case 100: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x7)) { return UNIT_I387; } else { return UNIT_INTEGER; } case 96: case 95: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x7)) { return UNIT_I387; } else if (!((1 << which_alternative) & 0x1f)) { return UNIT_SSE; } else { return UNIT_INTEGER; } case 91: case 90: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x7)) { return UNIT_I387; } else if (((1 << which_alternative) & 0x1e0)) { return UNIT_SSE; } else if (!((1 << which_alternative) & 0x1ff)) { return UNIT_MMX; } else { return UNIT_INTEGER; } case 84: case 83: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0x700)) { return UNIT_SSE; } else if (((1 << which_alternative) & 0xe0)) { return UNIT_MMX; } else { return UNIT_INTEGER; } case 82: extract_constrain_insn_cached (insn); if (!((1 << which_alternative) & 0xf)) { return UNIT_SSE; } else if (((1 << which_alternative) & 0xc)) { return UNIT_MMX; } else { return UNIT_UNKNOWN; } case 44: case 43: extract_constrain_insn_cached (insn); if (((1 << which_alternative) & 0xe0)) { return UNIT_SSE; } else if (((1 << which_alternative) & 0x1c)) { return UNIT_MMX; } else { return UNIT_INTEGER; } case 584: case 581: case 34: case 31: extract_constrain_insn_cached (insn); if (which_alternative == 0) { return UNIT_I387; } else { return UNIT_SSE; } case 1022: case 1021: case 853: case 718: case 717: case 716: case 715: case 714: case 713: case 712: case 711: case 710: case 709: case 708: case 697: case 696: case 684: case 683: case 682: case 681: case 680: case 679: case 678: case 677: case 676: case 675: case 674: case 673: case 668: case 667: case 665: case 664: case 662: case 661: case 659: case 658: case 550: case 543: case 542: case 541: case 540: case 539: case 538: case 537: case 536: case 535: case 532: case 531: case 529: case 528: case 526: case 525: case 524: case 523: case 508: case 507: case 506: case 505: case 504: case 503: case 502: case 501: case 500: case 499: case 385: case 384: case 383: case 382: case 381: case 380: case 379: case 378: case 377: case 370: case 369: case 368: case 367: case 366: case 365: case 364: case 363: case 362: case 351: case 287: case 286: case 225: case 177: case 127: case 126: case 117: case 75: case 29: case 27: return UNIT_UNKNOWN; case 883: case 882: case 879: case 878: case 877: case 876: case 875: case 874: case 873: case 872: case 871: case 870: case 869: case 868: case 867: case 866: case 865: case 864: case 863: case 862: case 861: case 860: case 859: case 858: case 857: case 856: case 855: case 854: case 849: case 848: case 847: case 846: case 845: case 844: case 843: case 842: case 841: case 840: case 839: case 838: case 837: case 836: case 835: case 834: case 833: case 832: case 831: case 830: case 829: case 828: case 827: case 826: case 825: case 824: case 823: case 822: case 821: case 820: case 819: case 818: case 817: case 816: case 815: case 814: case 813: case 812: case 811: case 810: case 809: case 808: case 807: case 806: case 805: case 804: case 803: case 802: case 801: case 800: case 799: case 798: case 797: case 796: case 795: case 794: case 793: case 792: case 791: case 729: case 727: case 726: case 704: case 703: case 702: case 701: return UNIT_MMX; case 1033: case 1032: case 1031: case 1030: case 1029: case 1028: case 1027: case 1026: case 1025: case 1024: case 1023: case 1020: case 1019: case 1018: case 1017: case 1016: case 1015: case 1014: case 1013: case 1012: case 1011: case 1010: case 1009: case 1008: case 1007: case 1006: case 1005: case 1004: case 1003: case 1002: case 1001: case 1000: case 999: case 998: case 997: case 996: case 995: case 994: case 993: case 992: case 991: case 990: case 989: case 988: case 987: case 986: case 985: case 984: case 983: case 982: case 981: case 980: case 979: case 978: case 977: case 976: case 975: case 974: case 973: case 972: case 971: case 970: case 969: case 968: case 967: case 966: case 965: case 964: case 963: case 962: case 961: case 960: case 959: case 958: case 957: case 956: case 955: case 954: case 953: case 952: case 951: case 950: case 949: case 948: case 947: case 946: case 945: case 944: case 943: case 942: case 941: case 940: case 939: case 938: case 937: case 936: case 935: case 934: case 933: case 932: case 931: case 930: case 929: case 928: case 927: case 926: case 925: case 924: case 923: case 922: case 921: case 920: case 919: case 918: case 917: case 916: case 915: case 914: case 913: case 912: case 911: case 910: case 909: case 908: case 907: case 906: case 905: case 904: case 903: case 902: case 901: case 900: case 899: case 898: case 897: case 896: case 895: case 894: case 893: case 892: case 891: case 890: case 889: case 888: case 887: case 886: case 885: case 884: case 881: case 880: case 852: case 851: case 850: case 790: case 789: case 788: case 787: case 786: case 785: case 784: case 783: case 782: case 781: case 780: case 779: case 778: case 777: case 776: case 775: case 774: case 773: case 772: case 771: case 770: case 769: case 768: case 767: case 766: case 765: case 764: case 763: case 762: case 761: case 760: case 759: case 758: case 757: case 756: case 755: case 754: case 753: case 752: case 751: case 750: case 749: case 748: case 747: case 746: case 745: case 744: case 743: case 742: case 741: case 740: case 739: case 738: case 737: case 736: case 735: case 734: case 733: case 732: case 731: case 730: case 728: case 725: case 724: case 723: case 722: case 719: case 707: case 706: case 705: case 700: case 699: case 698: case 669: case 666: case 663: case 660: case 585: case 582: case 560: case 557: case 496: case 495: case 173: case 170: case 167: case 164: case 156: case 155: case 151: case 150: case 140: case 139: case 129: case 35: case 32: return UNIT_SSE; case 657: case 616: case 615: case 614: case 613: case 612: case 611: case 610: case 609: case 608: case 607: case 606: case 605: case 604: case 603: case 602: case 601: case 600: case 599: case 598: case 597: case 596: case 595: case 594: case 593: case 592: case 591: case 590: case 589: case 588: case 587: case 586: case 583: case 561: case 558: case 555: case 391: case 390: case 389: case 388: case 387: case 386: case 376: case 375: case 374: case 373: case 372: case 371: case 159: case 158: case 157: case 154: case 153: case 152: case 149: case 148: case 147: case 146: case 144: case 143: case 141: case 138: case 132: case 131: case 130: case 102: case 97: case 92: case 33: case 30: case 25: case 23: case 21: case 20: case 19: case 28: case 160: case 161: return UNIT_I387; case -1: if (GET_CODE (PATTERN (insn)) != ASM_INPUT && asm_noperands (PATTERN (insn)) < 0) fatal_insn_not_found (insn); default: return UNIT_INTEGER; } } const struct function_unit_desc function_units[] = { {"dummy", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} /* a dummy element */}; int max_dfa_issue_rate = 6; /* Vector translating external insn codes to internal ones.*/ static const unsigned char pentium_translate[] ATTRIBUTE_UNUSED = { 0, 1, 2, 3, 2, 4, 3, 5, 5, 5, 6, 7, 8, 5, 2, 2, 9, 10, 11, 4, 12, 13, 14, 3, 5, 8, 7, 2, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16}; /* Comb vector for state transitions. */ static const unsigned char pentium_transitions[] ATTRIBUTE_UNUSED = { 19, 18, 2, 3, 4, 15, 17, 16, 15, 14, 14, 13, 5, 5, 1, 0, 0, 3, 6, 3, 20, 4, 20, 4, 3, 20, 3, 5, 2, 2, 6, 2, 4, 4, 20, 4, 3, 3, 3, 15, 0, 4, 13, 3, 3, 2, 0, 1, 2, 4, 6, 4, 3, 2, 2, 4, 3, 20, 4, 14, 3, 3, 6, 20, 16, 0, 6, 6, 7, 6, 7, 8, 17, 7, 8, 9, 9, 10, 10, 11, 11, 12, 12, 4, 18, 19, 19, 6}; /* Check vector for state transitions. */ static const unsigned char pentium_check[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5, 5, 20, 5, 20, 5, 5, 20, 5, 5, 5, 15, 15, 15, 13, 15, 20, 15, 15, 1, 15, 15, 15, 1, 13, 13, 1, 2, 2, 1, 1, 14, 14, 14, 3, 3, 16, 4, 4, 20, 16, 14, 14, 16, 17, 20, 16, 16, 17, 6, 6, 17, 7, 7, 17, 17, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 18, 18, 19, 19}; /* Base vector for state transitions. */ static const unsigned char pentium_base[] = { 0, 32, 30, 37, 40, 12, 52, 55, 59, 61, 63, 65, 67, 27, 44, 24, 49, 57, 69, 71, }; #if AUTOMATON_STATE_ALTS /* Comb vector for state insn alternatives. */ static const unsigned char pentium_state_alts[] ATTRIBUTE_UNUSED = { 1, 1, 1, 1, 1, 2, 1, 1, 1, 24, 2, 2, 12, 2, 2, 1, 1, 1, 1, 1, 0, 10, 0, 1, 5, 0, 1, 1, 1, 1, 1, 1, 1, 16, 0, 2, 8, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 8, 1, 1, 4, 1, 0, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; /* Check vector for state insn alternatives. */ static const unsigned char pentium_check_state_alts[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5, 5, 20, 5, 20, 5, 5, 20, 5, 5, 5, 15, 15, 15, 13, 15, 20, 15, 15, 1, 15, 15, 15, 1, 13, 13, 1, 2, 2, 1, 1, 14, 14, 14, 3, 3, 16, 4, 4, 20, 16, 14, 14, 16, 17, 20, 16, 16, 17, 6, 6, 17, 7, 7, 17, 17, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 18, 18, 19, 19}; /* Base vector for state insn alternatives. */ static const unsigned char pentium_base_state_alts[] = { 0, 32, 30, 37, 40, 12, 52, 55, 59, 61, 63, 65, 67, 27, 44, 24, 49, 57, 69, 71, }; #endif /* #if AUTOMATON_STATE_ALTS */ /* Vector of min issue delay of insns. */ static const unsigned char pentium_min_issue_delay[] ATTRIBUTE_UNUSED = { 0, 0, 0, 0, 0, 0, 0, 0, 2, 34, 34, 2, 34, 2, 32, 34, 0, 17, 17, 17, 17, 17, 17, 17, 16, 2, 34, 34, 34, 34, 34, 34, 34, 0, 51, 51, 51, 51, 51, 51, 51, 48, 2, 34, 34, 0, 2, 2, 0, 32, 0, 170, 170, 170, 170, 170, 170, 170, 160, 9, 153, 153, 153, 153, 153, 153, 153, 0, 136, 136, 136, 136, 136, 136, 136, 128, 7, 119, 119, 119, 119, 119, 119, 119, 0, 102, 102, 102, 102, 102, 102, 102, 96, 5, 85, 85, 85, 85, 85, 85, 85, 0, 68, 68, 68, 68, 68, 68, 68, 64, 3, 51, 51, 3, 51, 51, 51, 51, 0, 51, 51, 48, 0, 51, 51, 51, 48, 1, 17, 17, 0, 1, 1, 0, 16, 0, 17, 17, 16, 17, 16, 17, 1, 16, 10, 170, 170, 10, 170, 10, 160, 170, 0, 204, 204, 204, 204, 204, 204, 204, 192, 11, 187, 187, 187, 187, 187, 187, 187, 0, }; /* Vector for locked state flags. */ static const unsigned char pentium_dead_lock[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; #if CPU_UNITS_QUERY /* Vector for reserved units of states. */ static const unsigned char pentium_reserved_units[] = { 0 /* This is dummy el because the vect is empty */}; #endif /* #if CPU_UNITS_QUERY */ /* Vector translating external insn codes to internal ones.*/ static const unsigned char pentium_fpu_translate[] ATTRIBUTE_UNUSED = { 0, 0, 1, 0, 1, 2, 3, 0, 0, 0, 0, 0, 1, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7}; /* Comb vector for state transitions. */ static const unsigned char pentium_fpu_transitions[] ATTRIBUTE_UNUSED = { 0, 72, 73, 71, 74, 32, 1, 0, 69, 74, 73, 71, 75, 1, 2, 70, 70, 72, 73, 71, 2, 3, 3, 0, 4, 5, 6, 7, 8, 4, 9, 5, 6, 7, 8, 9, 10, 10, 11, 12, 13, 14, 15, 11, 16, 12, 13, 14, 15, 16, 17, 17, 18, 19, 20, 21, 22, 18, 23, 19, 20, 21, 22, 23, 24, 24, 25, 26, 27, 28, 29, 25, 30, 26, 27, 28, 29, 30, 31, 31, 32, 33, 34, 35, 36, 32, 37, 33, 34, 35, 36, 37, 38, 38, 39, 40, 41, 42, 43, 39, 44, 40, 41, 42, 43, 44, 45, 45, 46, 47, 48, 49, 50, 46, 51, 47, 48, 49, 50, 51, 52, 52, 53, 54, 55, 56, 57, 53, 58, 54, 55, 56, 57, 58, 59, 59, 60, 61, 62, 63, 64, 60, 65, 61, 62, 63, 64, 65, 66, 66, 67, 68, 71, 72, 73, 67, 74, 68, 69, 72, 0, 71, 75, 70}; /* Check vector for state transitions. */ static const unsigned char pentium_fpu_check[] = { 0, 0, 0, 0, 0, 0, 0, 0, 69, 69, 69, 69, 75, 1, 2, 69, 70, 70, 70, 70, 1, 2, 3, 70, 4, 5, 6, 7, 8, 3, 9, 4, 5, 6, 7, 8, 10, 9, 11, 12, 13, 14, 15, 10, 16, 11, 12, 13, 14, 15, 17, 16, 18, 19, 20, 21, 22, 17, 23, 18, 19, 20, 21, 22, 24, 23, 25, 26, 27, 28, 29, 24, 30, 25, 26, 27, 28, 29, 31, 30, 32, 33, 34, 35, 36, 31, 37, 32, 33, 34, 35, 36, 38, 37, 39, 40, 41, 42, 43, 38, 44, 39, 40, 41, 42, 43, 45, 44, 46, 47, 48, 49, 50, 45, 51, 46, 47, 48, 49, 50, 52, 51, 53, 54, 55, 56, 57, 52, 58, 53, 54, 55, 56, 57, 59, 58, 60, 61, 62, 63, 64, 59, 65, 60, 61, 62, 63, 64, 66, 65, 67, 68, 71, 72, 73, 66, 74, 67, 68, 71, 72, 73, 75, 74}; /* Base vector for state transitions. */ static const unsigned char pentium_fpu_base[] = { 0, 13, 14, 22, 24, 25, 26, 27, 28, 30, 36, 38, 39, 40, 41, 42, 44, 50, 52, 53, 54, 55, 56, 58, 64, 66, 67, 68, 69, 70, 72, 78, 80, 81, 82, 83, 84, 86, 92, 94, 95, 96, 97, 98, 100, 106, 108, 109, 110, 111, 112, 114, 120, 122, 123, 124, 125, 126, 128, 134, 136, 137, 138, 139, 140, 142, 148, 150, 151, 8, 16, 152, 153, 154, 156}; #if AUTOMATON_STATE_ALTS /* Comb vector for state insn alternatives. */ static const unsigned char pentium_fpu_state_alts[] ATTRIBUTE_UNUSED = { 1, 1, 1, 1, 4, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1}; /* Check vector for state insn alternatives. */ static const unsigned char pentium_fpu_check_state_alts[] = { 0, 0, 0, 0, 0, 0, 0, 0, 69, 69, 69, 69, 75, 1, 2, 69, 70, 70, 70, 70, 1, 2, 3, 70, 4, 5, 6, 7, 8, 3, 9, 4, 5, 6, 7, 8, 10, 9, 11, 12, 13, 14, 15, 10, 16, 11, 12, 13, 14, 15, 17, 16, 18, 19, 20, 21, 22, 17, 23, 18, 19, 20, 21, 22, 24, 23, 25, 26, 27, 28, 29, 24, 30, 25, 26, 27, 28, 29, 31, 30, 32, 33, 34, 35, 36, 31, 37, 32, 33, 34, 35, 36, 38, 37, 39, 40, 41, 42, 43, 38, 44, 39, 40, 41, 42, 43, 45, 44, 46, 47, 48, 49, 50, 45, 51, 46, 47, 48, 49, 50, 52, 51, 53, 54, 55, 56, 57, 52, 58, 53, 54, 55, 56, 57, 59, 58, 60, 61, 62, 63, 64, 59, 65, 60, 61, 62, 63, 64, 66, 65, 67, 68, 71, 72, 73, 66, 74, 67, 68, 71, 72, 73, 75, 74}; /* Base vector for state insn alternatives. */ static const unsigned char pentium_fpu_base_state_alts[] = { 0, 13, 14, 22, 24, 25, 26, 27, 28, 30, 36, 38, 39, 40, 41, 42, 44, 50, 52, 53, 54, 55, 56, 58, 64, 66, 67, 68, 69, 70, 72, 78, 80, 81, 82, 83, 84, 86, 92, 94, 95, 96, 97, 98, 100, 106, 108, 109, 110, 111, 112, 114, 120, 122, 123, 124, 125, 126, 128, 134, 136, 137, 138, 139, 140, 142, 148, 150, 151, 8, 16, 152, 153, 154, 156}; #endif /* #if AUTOMATON_STATE_ALTS */ /* Vector of min issue delay of insns. */ static const unsigned char pentium_fpu_min_issue_delay[] ATTRIBUTE_UNUSED = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 68, 68, 68, 70, 70, 70, 0, 0, 67, 67, 67, 69, 69, 69, 0, 0, 66, 66, 66, 68, 68, 68, 0, 0, 65, 65, 65, 67, 67, 67, 0, 0, 64, 64, 64, 66, 66, 66, 0, 0, 63, 63, 63, 65, 65, 65, 0, 0, 62, 62, 62, 64, 64, 64, 0, 0, 61, 61, 61, 63, 63, 63, 0, 0, 60, 60, 60, 62, 62, 62, 0, 0, 59, 59, 59, 61, 61, 61, 0, 0, 58, 58, 58, 60, 60, 60, 0, 0, 57, 57, 57, 59, 59, 59, 0, 0, 56, 56, 56, 58, 58, 58, 0, 0, 55, 55, 55, 57, 57, 57, 0, 0, 54, 54, 54, 56, 56, 56, 0, 0, 53, 53, 53, 55, 55, 55, 0, 0, 52, 52, 52, 54, 54, 54, 0, 0, 51, 51, 51, 53, 53, 53, 0, 0, 50, 50, 50, 52, 52, 52, 0, 0, 49, 49, 49, 51, 51, 51, 0, 0, 48, 48, 48, 50, 50, 50, 0, 0, 47, 47, 47, 49, 49, 49, 0, 0, 46, 46, 46, 48, 48, 48, 0, 0, 45, 45, 45, 47, 47, 47, 0, 0, 44, 44, 44, 46, 46, 46, 0, 0, 43, 43, 43, 45, 45, 45, 0, 0, 42, 42, 42, 44, 44, 44, 0, 0, 41, 41, 41, 43, 43, 43, 0, 0, 40, 40, 40, 42, 42, 42, 0, 0, 39, 39, 39, 41, 41, 41, 0, 0, 38, 38, 38, 40, 40, 40, 0, 0, 37, 37, 37, 39, 39, 39, 0, 0, 36, 36, 36, 38, 38, 38, 0, 0, 35, 35, 35, 37, 37, 37, 0, 0, 34, 34, 34, 36, 36, 36, 0, 0, 33, 33, 33, 35, 35, 35, 0, 0, 32, 32, 32, 34, 34, 34, 0, 0, 31, 31, 31, 33, 33, 33, 0, 0, 30, 30, 30, 32, 32, 32, 0, 0, 29, 29, 29, 31, 31, 31, 0, 0, 28, 28, 28, 30, 30, 30, 0, 0, 27, 27, 27, 29, 29, 29, 0, 0, 26, 26, 26, 28, 28, 28, 0, 0, 25, 25, 25, 27, 27, 27, 0, 0, 24, 24, 24, 26, 26, 26, 0, 0, 23, 23, 23, 25, 25, 25, 0, 0, 22, 22, 22, 24, 24, 24, 0, 0, 21, 21, 21, 23, 23, 23, 0, 0, 20, 20, 20, 22, 22, 22, 0, 0, 19, 19, 19, 21, 21, 21, 0, 0, 18, 18, 18, 20, 20, 20, 0, 0, 17, 17, 17, 19, 19, 19, 0, 0, 16, 16, 16, 18, 18, 18, 0, 0, 15, 15, 15, 17, 17, 17, 0, 0, 14, 14, 14, 16, 16, 16, 0, 0, 13, 13, 13, 15, 15, 15, 0, 0, 12, 12, 12, 14, 14, 14, 0, 0, 11, 11, 11, 13, 13, 13, 0, 0, 10, 10, 10, 12, 12, 12, 0, 0, 9, 9, 9, 11, 11, 11, 0, 0, 8, 8, 8, 10, 10, 10, 0, 0, 7, 7, 7, 9, 9, 9, 0, 0, 6, 6, 6, 8, 8, 8, 0, 0, 5, 5, 5, 7, 7, 7, 0, 0, 4, 4, 4, 6, 6, 6, 0, 0, 3, 3, 3, 5, 5, 5, 0, 0, 2, 2, 2, 4, 4, 4, 0, 0, 1, 1, 1, 3, 3, 3, 0, 0, 0, 0, 0, 2, 2, 2, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 2, 2, 2, 2, 2, 2, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 3, 3, 3, 3, 3, 3, 0, 0, 1, 1, 1, 2, 2, 2, 0, }; /* Vector for locked state flags. */ static const unsigned char pentium_fpu_dead_lock[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #if CPU_UNITS_QUERY /* Vector for reserved units of states. */ static const unsigned char pentium_fpu_reserved_units[] = { 0 /* This is dummy el because the vect is empty */}; #endif /* #if CPU_UNITS_QUERY */ /* Vector translating external insn codes to internal ones.*/ static const unsigned char ppro_decoder_translate[] ATTRIBUTE_UNUSED = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 1, 2, 2, 2, 2, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 2, 1, 2, 2, 1, 2, 1, 1, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 2, 1, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3}; /* Vector for state transitions. */ static const unsigned char ppro_decoder_transitions[] ATTRIBUTE_UNUSED = { 0, 1, 1, 0, 1, 4, 2, 0, 2, 4, 3, 0, 3, 4, 4, 0}; #if AUTOMATON_STATE_ALTS /* Vector for state insn alternatives. */ static const unsigned char ppro_decoder_state_alts[] ATTRIBUTE_UNUSED = { 1, 1, 2, 1, 1, 0, 4, 1, 1, 0, 2, 1, 1, 0, 0, 1}; #endif /* #if AUTOMATON_STATE_ALTS */ /* Vector of min issue delay of insns. */ static const unsigned char ppro_decoder_min_issue_delay[] ATTRIBUTE_UNUSED = { 4, 70}; /* Vector for locked state flags. */ static const unsigned char ppro_decoder_dead_lock[] = { 0, 0, 0, 0}; #if CPU_UNITS_QUERY /* Vector for reserved units of states. */ static const unsigned char ppro_decoder_reserved_units[] = { 0 /* This is dummy el because the vect is empty */}; #endif /* #if CPU_UNITS_QUERY */ /* Vector translating external insn codes to internal ones.*/ static const unsigned char ppro_core_translate[] ATTRIBUTE_UNUSED = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 2, 2, 2, 3, 4, 4, 5, 2, 2, 6, 6, 7, 6, 7, 6, 2, 8, 6, 8, 2, 8, 8, 2, 2, 2, 0, 8, 2, 8, 8, 8, 2, 2, 2, 2, 2, 2, 4, 4, 2, 2, 4, 0, 2, 4, 4, 4, 4, 2, 2, 2, 2, 9, 9, 10, 4, 1, 1, 0, 10, 10, 10, 10, 10, 10, 4, 8, 8, 11, 11, 4, 4, 5, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12}; /* Comb vector for state transitions. */ static const unsigned char ppro_core_transitions[] ATTRIBUTE_UNUSED = { 0, 34, 34, 43, 36, 33, 32, 31, 33, 18, 104, 1, 0, 36, 42, 42, 105, 35, 40, 39, 41, 40, 38, 105, 37, 0, 104, 35, 35, 36, 37, 43, 44, 45, 43, 70, 38, 103, 36, 1, 37, 105, 2, 37, 103, 105, 39, 40, 19, 103, 105, 2, 2, 102, 105, 41, 102, 101, 33, 34, 42, 43, 101, 105, 3, 3, 100, 32, 44, 100, 99, 105, 0, 42, 45, 99, 105, 4, 4, 98, 40, 46, 98, 97, 105, 47, 39, 48, 97, 105, 5, 5, 96, 41, 49, 96, 95, 31, 50, 47, 51, 95, 105, 6, 6, 94, 30, 52, 94, 93, 49, 53, 29, 54, 93, 105, 7, 7, 92, 51, 55, 92, 91, 28, 56, 53, 57, 91, 105, 8, 8, 90, 27, 58, 90, 89, 55, 59, 26, 60, 89, 105, 9, 9, 88, 57, 61, 88, 87, 25, 62, 59, 63, 87, 105, 10, 10, 86, 24, 64, 86, 85, 61, 65, 23, 66, 85, 105, 11, 11, 84, 63, 67, 84, 83, 22, 68, 65, 69, 83, 105, 12, 12, 82, 21, 70, 82, 81, 67, 71, 20, 72, 81, 105, 13, 13, 80, 69, 73, 80, 79, 38, 74, 18, 75, 79, 105, 14, 14, 78, 72, 76, 78, 77, 17, 77, 74, 78, 77, 105, 15, 15, 76, 16, 79, 76, 75, 76, 80, 15, 81, 75, 105, 16, 16, 74, 78, 82, 74, 73, 14, 83, 80, 84, 73, 105, 17, 17, 72, 13, 85, 72, 71, 82, 86, 12, 87, 71, 105, 18, 18, 38, 84, 88, 38, 70, 11, 89, 86, 90, 70, 105, 19, 19, 69, 10, 91, 69, 68, 88, 92, 9, 93, 68, 105, 20, 20, 67, 90, 94, 67, 66, 8, 95, 92, 96, 66, 105, 21, 21, 65, 7, 97, 65, 64, 94, 98, 6, 99, 64, 105, 22, 22, 63, 96, 100, 63, 62, 5, 101, 98, 102, 62, 105, 23, 23, 61, 4, 103, 61, 60, 100, 105, 3, 105, 60, 105, 24, 24, 59, 102, 105, 59, 58, 105, 105, 105, 105, 58, 105, 25, 25, 57, 105, 105, 57, 56, 105, 105, 105, 105, 56, 105, 26, 26, 55, 105, 105, 55, 54, 105, 105, 105, 105, 54, 105, 27, 27, 53, 105, 105, 53, 52, 105, 105, 105, 105, 52, 105, 28, 28, 51, 105, 105, 51, 50, 105, 105, 105, 105, 50, 105, 29, 29, 49, 105, 105, 49, 48, 105, 105, 105, 105, 48, 105, 30, 30, 47, 105, 105, 47, 46, 105, 105, 105, 105, 46, 105, 31, 31, 41, 105, 105, 41, 45, 105, 105, 105, 105, 45, 105, 32, 32, 39, 105, 105, 39, 44, 105, 105, 105, 105, 44, 105, 33, 33, 40, 105, 105, 40, 43, 105, 105, 105, 105, 43, 105, 34, 34, 42, 105, 105, 42, 40, 105, 105, 105, 105, 35, 105, 0}; /* Check vector for state transitions. */ static const unsigned char ppro_core_check[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 36, 36, 36, 105, 35, 36, 36, 36, 36, 36, 105, 36, 36, 104, 104, 104, 35, 37, 104, 104, 104, 104, 104, 38, 104, 104, 1, 1, 105, 37, 1, 1, 105, 39, 40, 38, 1, 105, 1, 2, 2, 105, 41, 2, 2, 39, 40, 42, 43, 2, 105, 2, 3, 3, 41, 44, 3, 3, 105, 42, 43, 45, 3, 105, 3, 4, 4, 44, 46, 4, 4, 105, 47, 45, 48, 4, 105, 4, 5, 5, 46, 49, 5, 5, 47, 50, 48, 51, 5, 105, 5, 6, 6, 49, 52, 6, 6, 50, 53, 51, 54, 6, 105, 6, 7, 7, 52, 55, 7, 7, 53, 56, 54, 57, 7, 105, 7, 8, 8, 55, 58, 8, 8, 56, 59, 57, 60, 8, 105, 8, 9, 9, 58, 61, 9, 9, 59, 62, 60, 63, 9, 105, 9, 10, 10, 61, 64, 10, 10, 62, 65, 63, 66, 10, 105, 10, 11, 11, 64, 67, 11, 11, 65, 68, 66, 69, 11, 105, 11, 12, 12, 67, 70, 12, 12, 68, 71, 69, 72, 12, 105, 12, 13, 13, 70, 73, 13, 13, 71, 74, 72, 75, 13, 105, 13, 14, 14, 73, 76, 14, 14, 74, 77, 75, 78, 14, 105, 14, 15, 15, 76, 79, 15, 15, 77, 80, 78, 81, 15, 105, 15, 16, 16, 79, 82, 16, 16, 80, 83, 81, 84, 16, 105, 16, 17, 17, 82, 85, 17, 17, 83, 86, 84, 87, 17, 105, 17, 18, 18, 85, 88, 18, 18, 86, 89, 87, 90, 18, 105, 18, 19, 19, 88, 91, 19, 19, 89, 92, 90, 93, 19, 105, 19, 20, 20, 91, 94, 20, 20, 92, 95, 93, 96, 20, 105, 20, 21, 21, 94, 97, 21, 21, 95, 98, 96, 99, 21, 105, 21, 22, 22, 97, 100, 22, 22, 98, 101, 99, 102, 22, 105, 22, 23, 23, 100, 103, 23, 23, 101, 105, 102, 105, 23, 105, 23, 24, 24, 103, 105, 24, 24, 105, 105, 105, 105, 24, 105, 24, 25, 25, 105, 105, 25, 25, 105, 105, 105, 105, 25, 105, 25, 26, 26, 105, 105, 26, 26, 105, 105, 105, 105, 26, 105, 26, 27, 27, 105, 105, 27, 27, 105, 105, 105, 105, 27, 105, 27, 28, 28, 105, 105, 28, 28, 105, 105, 105, 105, 28, 105, 28, 29, 29, 105, 105, 29, 29, 105, 105, 105, 105, 29, 105, 29, 30, 30, 105, 105, 30, 30, 105, 105, 105, 105, 30, 105, 30, 31, 31, 105, 105, 31, 31, 105, 105, 105, 105, 31, 105, 31, 32, 32, 105, 105, 32, 32, 105, 105, 105, 105, 32, 105, 32, 33, 33, 105, 105, 33, 33, 105, 105, 105, 105, 33, 105, 33, 34, 34, 105, 105, 34, 34, 105, 105, 105, 105, 34, 105, 34}; /* Base vector for state transitions. */ static const unsigned short ppro_core_base[] = { 0, 39, 52, 65, 78, 91, 104, 117, 130, 143, 156, 169, 182, 195, 208, 221, 234, 247, 260, 273, 286, 299, 312, 325, 338, 351, 364, 377, 390, 403, 416, 429, 442, 455, 468, 17, 13, 30, 36, 46, 47, 55, 60, 61, 68, 74, 81, 85, 87, 94, 98, 100, 107, 111, 113, 120, 124, 126, 133, 137, 139, 146, 150, 152, 159, 163, 165, 172, 176, 178, 185, 189, 191, 198, 202, 204, 211, 215, 217, 224, 228, 230, 237, 241, 243, 250, 254, 256, 263, 267, 269, 276, 280, 282, 289, 293, 295, 302, 306, 308, 315, 319, 321, 328, 26}; #if AUTOMATON_STATE_ALTS /* Comb vector for state insn alternatives. */ static const unsigned char ppro_core_state_alts[] ATTRIBUTE_UNUSED = { 1, 6, 3, 1, 3, 4, 2, 2, 1, 1, 1, 1, 1, 1, 3, 3, 0, 1, 2, 2, 2, 1, 1, 0, 1, 1, 1, 3, 3, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 3, 0, 1, 3, 1, 0, 1, 1, 1, 1, 0, 1, 1, 3, 0, 1, 3, 1, 1, 1, 1, 1, 1, 0, 1, 1, 3, 1, 1, 3, 1, 0, 1, 1, 1, 1, 0, 1, 1, 3, 1, 1, 3, 1, 0, 1, 1, 1, 1, 0, 1, 1, 3, 1, 1, 3, 1, 1, 1, 1, 1, 1, 0, 1, 1, 3, 1, 1, 3, 1, 1, 1, 1, 1, 1, 0, 1, 1, 3, 1, 1, 3, 1, 1, 1, 1, 1, 1, 0, 1, 1, 3, 1, 1, 3, 1, 1, 1, 1, 1, 1, 0, 1, 1, 3, 1, 1, 3, 1, 1, 1, 1, 1, 1, 0, 1, 1, 3, 1, 1, 3, 1, 1, 1, 1, 1, 1, 0, 1, 1, 3, 1, 1, 3, 1, 1, 1, 1, 1, 1, 0, 1, 1, 3, 1, 1, 3, 1, 1, 1, 1, 1, 1, 0, 1, 1, 3, 1, 1, 3, 1, 1, 1, 1, 1, 1, 0, 1, 1, 3, 1, 1, 3, 1, 1, 1, 1, 1, 1, 0, 1, 1, 3, 1, 1, 3, 1, 1, 1, 1, 1, 1, 0, 1, 1, 3, 1, 1, 3, 1, 1, 1, 1, 1, 1, 0, 1, 1, 3, 1, 1, 3, 1, 1, 1, 1, 1, 1, 0, 1, 1, 3, 1, 1, 3, 1, 1, 1, 1, 1, 1, 0, 1, 1, 3, 1, 1, 3, 1, 1, 1, 1, 1, 1, 0, 1, 1, 3, 1, 1, 3, 1, 1, 1, 1, 1, 1, 0, 1, 1, 3, 1, 1, 3, 1, 1, 1, 1, 1, 1, 0, 1, 1, 3, 1, 1, 3, 1, 1, 1, 1, 1, 1, 0, 1, 1, 3, 1, 1, 3, 1, 1, 0, 1, 0, 1, 0, 1, 1, 3, 1, 0, 3, 1, 0, 0, 0, 0, 1, 0, 1, 1, 3, 0, 0, 3, 1, 0, 0, 0, 0, 1, 0, 1, 1, 3, 0, 0, 3, 1, 0, 0, 0, 0, 1, 0, 1, 1, 3, 0, 0, 3, 1, 0, 0, 0, 0, 1, 0, 1, 1, 3, 0, 0, 3, 1, 0, 0, 0, 0, 1, 0, 1, 1, 3, 0, 0, 3, 1, 0, 0, 0, 0, 1, 0, 1, 1, 3, 0, 0, 3, 1, 0, 0, 0, 0, 1, 0, 1, 1, 3, 0, 0, 3, 1, 0, 0, 0, 0, 1, 0, 1, 1, 3, 0, 0, 3, 1, 0, 0, 0, 0, 1, 0, 1, 1, 3, 0, 0, 3, 1, 0, 0, 0, 0, 1, 0, 1, 1, 3, 0, 0, 3, 2, 0, 0, 0, 0, 1, 0, 1}; /* Check vector for state insn alternatives. */ static const unsigned char ppro_core_check_state_alts[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 36, 36, 36, 105, 35, 36, 36, 36, 36, 36, 105, 36, 36, 104, 104, 104, 35, 37, 104, 104, 104, 104, 104, 38, 104, 104, 1, 1, 105, 37, 1, 1, 105, 39, 40, 38, 1, 105, 1, 2, 2, 105, 41, 2, 2, 39, 40, 42, 43, 2, 105, 2, 3, 3, 41, 44, 3, 3, 105, 42, 43, 45, 3, 105, 3, 4, 4, 44, 46, 4, 4, 105, 47, 45, 48, 4, 105, 4, 5, 5, 46, 49, 5, 5, 47, 50, 48, 51, 5, 105, 5, 6, 6, 49, 52, 6, 6, 50, 53, 51, 54, 6, 105, 6, 7, 7, 52, 55, 7, 7, 53, 56, 54, 57, 7, 105, 7, 8, 8, 55, 58, 8, 8, 56, 59, 57, 60, 8, 105, 8, 9, 9, 58, 61, 9, 9, 59, 62, 60, 63, 9, 105, 9, 10, 10, 61, 64, 10, 10, 62, 65, 63, 66, 10, 105, 10, 11, 11, 64, 67, 11, 11, 65, 68, 66, 69, 11, 105, 11, 12, 12, 67, 70, 12, 12, 68, 71, 69, 72, 12, 105, 12, 13, 13, 70, 73, 13, 13, 71, 74, 72, 75, 13, 105, 13, 14, 14, 73, 76, 14, 14, 74, 77, 75, 78, 14, 105, 14, 15, 15, 76, 79, 15, 15, 77, 80, 78, 81, 15, 105, 15, 16, 16, 79, 82, 16, 16, 80, 83, 81, 84, 16, 105, 16, 17, 17, 82, 85, 17, 17, 83, 86, 84, 87, 17, 105, 17, 18, 18, 85, 88, 18, 18, 86, 89, 87, 90, 18, 105, 18, 19, 19, 88, 91, 19, 19, 89, 92, 90, 93, 19, 105, 19, 20, 20, 91, 94, 20, 20, 92, 95, 93, 96, 20, 105, 20, 21, 21, 94, 97, 21, 21, 95, 98, 96, 99, 21, 105, 21, 22, 22, 97, 100, 22, 22, 98, 101, 99, 102, 22, 105, 22, 23, 23, 100, 103, 23, 23, 101, 105, 102, 105, 23, 105, 23, 24, 24, 103, 105, 24, 24, 105, 105, 105, 105, 24, 105, 24, 25, 25, 105, 105, 25, 25, 105, 105, 105, 105, 25, 105, 25, 26, 26, 105, 105, 26, 26, 105, 105, 105, 105, 26, 105, 26, 27, 27, 105, 105, 27, 27, 105, 105, 105, 105, 27, 105, 27, 28, 28, 105, 105, 28, 28, 105, 105, 105, 105, 28, 105, 28, 29, 29, 105, 105, 29, 29, 105, 105, 105, 105, 29, 105, 29, 30, 30, 105, 105, 30, 30, 105, 105, 105, 105, 30, 105, 30, 31, 31, 105, 105, 31, 31, 105, 105, 105, 105, 31, 105, 31, 32, 32, 105, 105, 32, 32, 105, 105, 105, 105, 32, 105, 32, 33, 33, 105, 105, 33, 33, 105, 105, 105, 105, 33, 105, 33, 34, 34, 105, 105, 34, 34, 105, 105, 105, 105, 34, 105, 34}; /* Base vector for state insn alternatives. */ static const unsigned short ppro_core_base_state_alts[] = { 0, 39, 52, 65, 78, 91, 104, 117, 130, 143, 156, 169, 182, 195, 208, 221, 234, 247, 260, 273, 286, 299, 312, 325, 338, 351, 364, 377, 390, 403, 416, 429, 442, 455, 468, 17, 13, 30, 36, 46, 47, 55, 60, 61, 68, 74, 81, 85, 87, 94, 98, 100, 107, 111, 113, 120, 124, 126, 133, 137, 139, 146, 150, 152, 159, 163, 165, 172, 176, 178, 185, 189, 191, 198, 202, 204, 211, 215, 217, 224, 228, 230, 237, 241, 243, 250, 254, 256, 263, 267, 269, 276, 280, 282, 289, 293, 295, 302, 306, 308, 315, 319, 321, 328, 26}; #endif /* #if AUTOMATON_STATE_ALTS */ /* Vector of min issue delay of insns. */ static const unsigned char ppro_core_min_issue_delay[] ATTRIBUTE_UNUSED = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 34, 0, 0, 34, 34, 34, 34, 0, 34, 0, 0, 0, 33, 33, 0, 0, 33, 33, 33, 33, 0, 33, 0, 0, 0, 32, 32, 0, 0, 32, 32, 32, 32, 0, 32, 0, 0, 0, 31, 31, 0, 0, 31, 31, 31, 31, 0, 31, 0, 0, 0, 30, 30, 0, 0, 30, 30, 30, 30, 0, 30, 0, 0, 0, 29, 29, 0, 0, 29, 29, 29, 29, 0, 29, 0, 0, 0, 28, 28, 0, 0, 28, 28, 28, 28, 0, 28, 0, 0, 0, 27, 27, 0, 0, 27, 27, 27, 27, 0, 27, 0, 0, 0, 26, 26, 0, 0, 26, 26, 26, 26, 0, 26, 0, 0, 0, 25, 25, 0, 0, 25, 25, 25, 25, 0, 25, 0, 0, 0, 24, 24, 0, 0, 24, 24, 24, 24, 0, 24, 0, 0, 0, 23, 23, 0, 0, 23, 23, 23, 23, 0, 23, 0, 0, 0, 22, 22, 0, 0, 22, 22, 22, 22, 0, 22, 0, 0, 0, 21, 21, 0, 0, 21, 21, 21, 21, 0, 21, 0, 0, 0, 20, 20, 0, 0, 20, 20, 20, 20, 0, 20, 0, 0, 0, 19, 19, 0, 0, 19, 19, 19, 19, 0, 19, 0, 0, 0, 18, 18, 0, 0, 18, 18, 18, 18, 0, 18, 0, 0, 0, 17, 17, 0, 0, 17, 17, 17, 17, 0, 17, 0, 0, 0, 16, 16, 0, 0, 16, 16, 16, 16, 0, 16, 0, 0, 0, 15, 15, 0, 0, 15, 15, 15, 15, 0, 15, 0, 0, 0, 14, 14, 0, 0, 14, 14, 14, 14, 0, 14, 0, 0, 0, 13, 13, 0, 0, 13, 13, 13, 13, 0, 13, 0, 0, 0, 12, 12, 0, 0, 12, 12, 12, 12, 0, 12, 0, 0, 0, 11, 11, 0, 0, 11, 11, 11, 11, 0, 11, 0, 0, 0, 10, 10, 0, 0, 10, 10, 10, 10, 0, 10, 0, 0, 0, 9, 9, 0, 0, 9, 9, 9, 9, 0, 9, 0, 0, 0, 8, 8, 0, 0, 8, 8, 8, 8, 0, 8, 0, 0, 0, 7, 7, 0, 0, 7, 7, 7, 7, 0, 7, 0, 0, 0, 6, 6, 0, 0, 6, 6, 6, 6, 0, 6, 0, 0, 0, 5, 5, 0, 0, 5, 5, 5, 5, 0, 5, 0, 0, 0, 4, 4, 0, 0, 4, 4, 4, 4, 0, 4, 0, 0, 0, 3, 3, 0, 0, 3, 3, 3, 3, 0, 3, 0, 0, 0, 2, 2, 0, 0, 2, 2, 2, 2, 0, 2, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 2, 2, 1, 1, 1, 1, 1, 2, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 34, 34, 1, 1, 34, 34, 34, 34, 1, 34, 0, 0, 1, 17, 17, 1, 1, 17, 17, 17, 17, 1, 17, 0, 0, 1, 3, 3, 1, 1, 3, 3, 3, 3, 1, 3, 0, 0, 1, 2, 2, 1, 1, 2, 2, 2, 2, 1, 2, 0, 0, 1, 4, 4, 1, 1, 4, 4, 4, 4, 1, 4, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 2, 3, 3, 2, 2, 3, 3, 3, 3, 2, 3, 0, 0, 2, 4, 4, 2, 2, 4, 4, 4, 4, 2, 4, 0, 0, 2, 5, 5, 2, 2, 5, 5, 5, 5, 2, 5, 0, 0, 1, 5, 5, 1, 1, 5, 5, 5, 5, 1, 5, 0, 0, 2, 6, 6, 2, 2, 6, 6, 6, 6, 2, 6, 0, 0, 1, 6, 6, 1, 1, 6, 6, 6, 6, 1, 6, 0, 0, 2, 7, 7, 2, 2, 7, 7, 7, 7, 2, 7, 0, 0, 1, 7, 7, 1, 1, 7, 7, 7, 7, 1, 7, 0, 0, 2, 8, 8, 2, 2, 8, 8, 8, 8, 2, 8, 0, 0, 1, 8, 8, 1, 1, 8, 8, 8, 8, 1, 8, 0, 0, 2, 9, 9, 2, 2, 9, 9, 9, 9, 2, 9, 0, 0, 1, 9, 9, 1, 1, 9, 9, 9, 9, 1, 9, 0, 0, 2, 10, 10, 2, 2, 10, 10, 10, 10, 2, 10, 0, 0, 1, 10, 10, 1, 1, 10, 10, 10, 10, 1, 10, 0, 0, 2, 11, 11, 2, 2, 11, 11, 11, 11, 2, 11, 0, 0, 1, 11, 11, 1, 1, 11, 11, 11, 11, 1, 11, 0, 0, 2, 12, 12, 2, 2, 12, 12, 12, 12, 2, 12, 0, 0, 1, 12, 12, 1, 1, 12, 12, 12, 12, 1, 12, 0, 0, 2, 13, 13, 2, 2, 13, 13, 13, 13, 2, 13, 0, 0, 1, 13, 13, 1, 1, 13, 13, 13, 13, 1, 13, 0, 0, 2, 14, 14, 2, 2, 14, 14, 14, 14, 2, 14, 0, 0, 1, 14, 14, 1, 1, 14, 14, 14, 14, 1, 14, 0, 0, 2, 15, 15, 2, 2, 15, 15, 15, 15, 2, 15, 0, 0, 1, 15, 15, 1, 1, 15, 15, 15, 15, 1, 15, 0, 0, 2, 16, 16, 2, 2, 16, 16, 16, 16, 2, 16, 0, 0, 1, 16, 16, 1, 1, 16, 16, 16, 16, 1, 16, 0, 0, 2, 17, 17, 2, 2, 17, 17, 17, 17, 2, 17, 0, 0, 2, 18, 18, 2, 2, 18, 18, 18, 18, 2, 18, 0, 0, 1, 18, 18, 1, 1, 18, 18, 18, 18, 1, 18, 0, 0, 2, 19, 19, 2, 2, 19, 19, 19, 19, 2, 19, 0, 0, 1, 19, 19, 1, 1, 19, 19, 19, 19, 1, 19, 0, 0, 2, 20, 20, 2, 2, 20, 20, 20, 20, 2, 20, 0, 0, 1, 20, 20, 1, 1, 20, 20, 20, 20, 1, 20, 0, 0, 2, 21, 21, 2, 2, 21, 21, 21, 21, 2, 21, 0, 0, 1, 21, 21, 1, 1, 21, 21, 21, 21, 1, 21, 0, 0, 2, 22, 22, 2, 2, 22, 22, 22, 22, 2, 22, 0, 0, 1, 22, 22, 1, 1, 22, 22, 22, 22, 1, 22, 0, 0, 2, 23, 23, 2, 2, 23, 23, 23, 23, 2, 23, 0, 0, 1, 23, 23, 1, 1, 23, 23, 23, 23, 1, 23, 0, 0, 2, 24, 24, 2, 2, 24, 24, 24, 24, 2, 24, 0, 0, 1, 24, 24, 1, 1, 24, 24, 24, 24, 1, 24, 0, 0, 2, 25, 25, 2, 2, 25, 25, 25, 25, 2, 25, 0, 0, 1, 25, 25, 1, 1, 25, 25, 25, 25, 1, 25, 0, 0, 2, 26, 26, 2, 2, 26, 26, 26, 26, 2, 26, 0, 0, 1, 26, 26, 1, 1, 26, 26, 26, 26, 1, 26, 0, 0, 2, 27, 27, 2, 2, 27, 27, 27, 27, 2, 27, 0, 0, 1, 27, 27, 1, 1, 27, 27, 27, 27, 1, 27, 0, 0, 2, 28, 28, 2, 2, 28, 28, 28, 28, 2, 28, 0, 0, 1, 28, 28, 1, 1, 28, 28, 28, 28, 1, 28, 0, 0, 2, 29, 29, 2, 2, 29, 29, 29, 29, 2, 29, 0, 0, 1, 29, 29, 1, 1, 29, 29, 29, 29, 1, 29, 0, 0, 2, 30, 30, 2, 2, 30, 30, 30, 30, 2, 30, 0, 0, 1, 30, 30, 1, 1, 30, 30, 30, 30, 1, 30, 0, 0, 2, 31, 31, 2, 2, 31, 31, 31, 31, 2, 31, 0, 0, 1, 31, 31, 1, 1, 31, 31, 31, 31, 1, 31, 0, 0, 2, 32, 32, 2, 2, 32, 32, 32, 32, 2, 32, 0, 0, 1, 32, 32, 1, 1, 32, 32, 32, 32, 1, 32, 0, 0, 2, 33, 33, 2, 2, 33, 33, 33, 33, 2, 33, 0, 0, 1, 33, 33, 1, 1, 33, 33, 33, 33, 1, 33, 0, 0, 2, 34, 34, 2, 2, 34, 34, 34, 34, 2, 34, 0, 0, 0, 0, 2, 2, 0, 0, 0, 0, 0, 2, 0, 0}; /* Vector for locked state flags. */ static const unsigned char ppro_core_dead_lock[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #if CPU_UNITS_QUERY /* Vector for reserved units of states. */ static const unsigned char ppro_core_reserved_units[] = { 0 /* This is dummy el because the vect is empty */}; #endif /* #if CPU_UNITS_QUERY */ /* Vector translating external insn codes to internal ones.*/ static const unsigned char ppro_idiv_translate[] ATTRIBUTE_UNUSED = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4}; /* Vector for state transitions. */ static const unsigned char ppro_idiv_transitions[] ATTRIBUTE_UNUSED = { 0, 26, 17, 1, 0, 1, 38, 38, 38, 2, 2, 38, 38, 38, 3, 3, 38, 38, 38, 4, 4, 38, 38, 38, 5, 5, 38, 38, 38, 6, 6, 38, 38, 38, 7, 7, 38, 38, 38, 8, 8, 38, 38, 38, 9, 9, 38, 38, 38, 10, 10, 38, 38, 38, 11, 11, 38, 38, 38, 12, 12, 38, 38, 38, 13, 13, 38, 38, 38, 14, 14, 38, 38, 38, 15, 15, 38, 38, 38, 16, 16, 38, 38, 38, 17, 17, 38, 38, 38, 18, 18, 38, 38, 38, 19, 19, 38, 38, 38, 20, 20, 38, 38, 38, 21, 21, 38, 38, 38, 22, 22, 38, 38, 38, 23, 23, 38, 38, 38, 24, 24, 38, 38, 38, 25, 25, 38, 38, 38, 26, 26, 38, 38, 38, 27, 27, 38, 38, 38, 28, 28, 38, 38, 38, 29, 29, 38, 38, 38, 30, 30, 38, 38, 38, 31, 31, 38, 38, 38, 32, 32, 38, 38, 38, 33, 33, 38, 38, 38, 34, 34, 38, 38, 38, 35, 35, 38, 38, 38, 36, 36, 38, 38, 38, 37, 37, 38, 38, 38, 0, }; #if AUTOMATON_STATE_ALTS /* Vector for state insn alternatives. */ static const unsigned char ppro_idiv_state_alts[] ATTRIBUTE_UNUSED = { 1, 2, 2, 2, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, }; #endif /* #if AUTOMATON_STATE_ALTS */ /* Vector of min issue delay of insns. */ static const unsigned char ppro_idiv_min_issue_delay[] ATTRIBUTE_UNUSED = { 0, 0, 0, 0, 0, 0, 37, 37, 37, 0, 0, 36, 36, 36, 0, 0, 35, 35, 35, 0, 0, 34, 34, 34, 0, 0, 33, 33, 33, 0, 0, 32, 32, 32, 0, 0, 31, 31, 31, 0, 0, 30, 30, 30, 0, 0, 29, 29, 29, 0, 0, 28, 28, 28, 0, 0, 27, 27, 27, 0, 0, 26, 26, 26, 0, 0, 25, 25, 25, 0, 0, 24, 24, 24, 0, 0, 23, 23, 23, 0, 0, 22, 22, 22, 0, 0, 21, 21, 21, 0, 0, 20, 20, 20, 0, 0, 19, 19, 19, 0, 0, 18, 18, 18, 0, 0, 17, 17, 17, 0, 0, 16, 16, 16, 0, 0, 15, 15, 15, 0, 0, 14, 14, 14, 0, 0, 13, 13, 13, 0, 0, 12, 12, 12, 0, 0, 11, 11, 11, 0, 0, 10, 10, 10, 0, 0, 9, 9, 9, 0, 0, 8, 8, 8, 0, 0, 7, 7, 7, 0, 0, 6, 6, 6, 0, 0, 5, 5, 5, 0, 0, 4, 4, 4, 0, 0, 3, 3, 3, 0, 0, 2, 2, 2, 0, 0, 1, 1, 1, 0, }; /* Vector for locked state flags. */ static const unsigned char ppro_idiv_dead_lock[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #if CPU_UNITS_QUERY /* Vector for reserved units of states. */ static const unsigned char ppro_idiv_reserved_units[] = { 0 /* This is dummy el because the vect is empty */}; #endif /* #if CPU_UNITS_QUERY */ /* Vector translating external insn codes to internal ones.*/ static const unsigned char ppro_fdiv_translate[] ATTRIBUTE_UNUSED = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4}; /* Vector for state transitions. */ static const unsigned char ppro_fdiv_transitions[] ATTRIBUTE_UNUSED = { 0, 21, 7, 1, 0, 1, 38, 38, 38, 2, 2, 38, 38, 38, 3, 3, 38, 38, 38, 4, 4, 38, 38, 38, 5, 5, 38, 38, 38, 6, 6, 38, 38, 38, 7, 7, 38, 38, 38, 8, 8, 38, 38, 38, 9, 9, 38, 38, 38, 10, 10, 38, 38, 38, 11, 11, 38, 38, 38, 12, 12, 38, 38, 38, 13, 13, 38, 38, 38, 14, 14, 38, 38, 38, 15, 15, 38, 38, 38, 16, 16, 38, 38, 38, 17, 17, 38, 38, 38, 18, 18, 38, 38, 38, 19, 19, 38, 38, 38, 20, 20, 38, 38, 38, 21, 21, 38, 38, 38, 22, 22, 38, 38, 38, 23, 23, 38, 38, 38, 24, 24, 38, 38, 38, 25, 25, 38, 38, 38, 26, 26, 38, 38, 38, 27, 27, 38, 38, 38, 28, 28, 38, 38, 38, 29, 29, 38, 38, 38, 30, 30, 38, 38, 38, 31, 31, 38, 38, 38, 32, 32, 38, 38, 38, 33, 33, 38, 38, 38, 34, 34, 38, 38, 38, 35, 35, 38, 38, 38, 36, 36, 38, 38, 38, 37, 37, 38, 38, 38, 0, }; #if AUTOMATON_STATE_ALTS /* Vector for state insn alternatives. */ static const unsigned char ppro_fdiv_state_alts[] ATTRIBUTE_UNUSED = { 1, 3, 3, 3, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, }; #endif /* #if AUTOMATON_STATE_ALTS */ /* Vector of min issue delay of insns. */ static const unsigned char ppro_fdiv_min_issue_delay[] ATTRIBUTE_UNUSED = { 0, 0, 0, 0, 0, 0, 37, 37, 37, 0, 0, 36, 36, 36, 0, 0, 35, 35, 35, 0, 0, 34, 34, 34, 0, 0, 33, 33, 33, 0, 0, 32, 32, 32, 0, 0, 31, 31, 31, 0, 0, 30, 30, 30, 0, 0, 29, 29, 29, 0, 0, 28, 28, 28, 0, 0, 27, 27, 27, 0, 0, 26, 26, 26, 0, 0, 25, 25, 25, 0, 0, 24, 24, 24, 0, 0, 23, 23, 23, 0, 0, 22, 22, 22, 0, 0, 21, 21, 21, 0, 0, 20, 20, 20, 0, 0, 19, 19, 19, 0, 0, 18, 18, 18, 0, 0, 17, 17, 17, 0, 0, 16, 16, 16, 0, 0, 15, 15, 15, 0, 0, 14, 14, 14, 0, 0, 13, 13, 13, 0, 0, 12, 12, 12, 0, 0, 11, 11, 11, 0, 0, 10, 10, 10, 0, 0, 9, 9, 9, 0, 0, 8, 8, 8, 0, 0, 7, 7, 7, 0, 0, 6, 6, 6, 0, 0, 5, 5, 5, 0, 0, 4, 4, 4, 0, 0, 3, 3, 3, 0, 0, 2, 2, 2, 0, 0, 1, 1, 1, 0, }; /* Vector for locked state flags. */ static const unsigned char ppro_fdiv_dead_lock[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #if CPU_UNITS_QUERY /* Vector for reserved units of states. */ static const unsigned char ppro_fdiv_reserved_units[] = { 0 /* This is dummy el because the vect is empty */}; #endif /* #if CPU_UNITS_QUERY */ /* Vector translating external insn codes to internal ones.*/ static const unsigned char ppro_load_translate[] ATTRIBUTE_UNUSED = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 2, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 1, 0, 1, 0, 0, 0, 2, 0, 2, 0, 0, 0, 2, 0, 2, 0, 1, 0, 2, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3}; /* Vector for state transitions. */ static const unsigned char ppro_load_transitions[] ATTRIBUTE_UNUSED = { 0, 2, 1, 0, 1, 3, 3, 2, 2, 3, 3, 0}; #if AUTOMATON_STATE_ALTS /* Vector for state insn alternatives. */ static const unsigned char ppro_load_state_alts[] ATTRIBUTE_UNUSED = { 1, 3, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1}; #endif /* #if AUTOMATON_STATE_ALTS */ /* Vector of min issue delay of insns. */ static const unsigned char ppro_load_min_issue_delay[] ATTRIBUTE_UNUSED = { 0, 40, 20}; /* Vector for locked state flags. */ static const unsigned char ppro_load_dead_lock[] = { 0, 0, 0}; #if CPU_UNITS_QUERY /* Vector for reserved units of states. */ static const unsigned char ppro_load_reserved_units[] = { 0 /* This is dummy el because the vect is empty */}; #endif /* #if CPU_UNITS_QUERY */ /* Vector translating external insn codes to internal ones.*/ static const unsigned char ppro_store_translate[] ATTRIBUTE_UNUSED = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 2, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6}; /* Vector for state transitions. */ static const unsigned char ppro_store_transitions[] ATTRIBUTE_UNUSED = { 0, 3, 5, 10, 7, 1, 0, 1, 11, 11, 2, 11, 11, 3, 2, 11, 11, 11, 11, 11, 1, 3, 11, 1, 4, 11, 11, 0, 4, 11, 2, 11, 11, 11, 5, 5, 1, 11, 6, 11, 11, 3, 6, 2, 11, 11, 11, 11, 1, 7, 11, 11, 2, 11, 11, 8, 8, 11, 1, 9, 7, 11, 0, 9, 11, 2, 11, 2, 11, 5, 10, 4, 6, 11, 2, 2, 5}; #if AUTOMATON_STATE_ALTS /* Vector for state insn alternatives. */ static const unsigned char ppro_store_state_alts[] ATTRIBUTE_UNUSED = { 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1}; #endif /* #if AUTOMATON_STATE_ALTS */ /* Vector of min issue delay of insns. */ static const unsigned char ppro_store_min_issue_delay[] ATTRIBUTE_UNUSED = { 0, 0, 146, 131, 159, 4, 20, 17, 240, 18, 128, 159, 9, 24, 16, 16, 68, 192, 16, 0, }; /* Vector for locked state flags. */ static const unsigned char ppro_store_dead_lock[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #if CPU_UNITS_QUERY /* Vector for reserved units of states. */ static const unsigned char ppro_store_reserved_units[] = { 0 /* This is dummy el because the vect is empty */}; #endif /* #if CPU_UNITS_QUERY */ /* Vector translating external insn codes to internal ones.*/ static const unsigned char k6_decoder_translate[] ATTRIBUTE_UNUSED = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 2, 2, 2, 2, 1, 1, 2, 1, 1, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 0, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3}; /* Vector for state transitions. */ static const unsigned char k6_decoder_transitions[] ATTRIBUTE_UNUSED = { 0, 2, 1, 0, 1, 3, 3, 0, 2, 1, 3, 0}; #if AUTOMATON_STATE_ALTS /* Vector for state insn alternatives. */ static const unsigned char k6_decoder_state_alts[] ATTRIBUTE_UNUSED = { 1, 2, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1}; #endif /* #if AUTOMATON_STATE_ALTS */ /* Vector of min issue delay of insns. */ static const unsigned char k6_decoder_min_issue_delay[] ATTRIBUTE_UNUSED = { 6, 32}; /* Vector for locked state flags. */ static const unsigned char k6_decoder_dead_lock[] = { 0, 0, 0}; #if CPU_UNITS_QUERY /* Vector for reserved units of states. */ static const unsigned char k6_decoder_reserved_units[] = { 0 /* This is dummy el because the vect is empty */}; #endif /* #if CPU_UNITS_QUERY */ /* Vector translating external insn codes to internal ones.*/ static const unsigned char k6_load_unit_translate[] ATTRIBUTE_UNUSED = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 2, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3}; /* Vector for state transitions. */ static const unsigned char k6_load_unit_transitions[] ATTRIBUTE_UNUSED = { 0, 10, 1, 0, 1, 11, 11, 2, 2, 11, 11, 3, 3, 11, 11, 4, 4, 11, 11, 5, 5, 11, 11, 6, 6, 11, 11, 7, 7, 11, 11, 8, 8, 11, 11, 9, 9, 11, 11, 10, 10, 11, 11, 0}; #if AUTOMATON_STATE_ALTS /* Vector for state insn alternatives. */ static const unsigned char k6_load_unit_state_alts[] ATTRIBUTE_UNUSED = { 1, 2, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1}; #endif /* #if AUTOMATON_STATE_ALTS */ /* Vector of min issue delay of insns. */ static const unsigned char k6_load_unit_min_issue_delay[] ATTRIBUTE_UNUSED = { 0, 0, 10, 160, 9, 144, 8, 128, 7, 112, 6, 96, 5, 80, 4, 64, 3, 48, 2, 32, 1, 16}; /* Vector for locked state flags. */ static const unsigned char k6_load_unit_dead_lock[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #if CPU_UNITS_QUERY /* Vector for reserved units of states. */ static const unsigned char k6_load_unit_reserved_units[] = { 0 /* This is dummy el because the vect is empty */}; #endif /* #if CPU_UNITS_QUERY */ /* Vector translating external insn codes to internal ones.*/ static const unsigned char k6_store_unit_translate[] ATTRIBUTE_UNUSED = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 3, 3, 4, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5}; /* Vector for state transitions. */ static const unsigned char k6_store_unit_transitions[] ATTRIBUTE_UNUSED = { 0, 23, 36, 31, 1, 0, 1, 37, 37, 37, 37, 2, 2, 37, 37, 37, 37, 3, 3, 37, 37, 37, 37, 4, 4, 37, 37, 37, 37, 5, 5, 37, 37, 37, 37, 6, 6, 37, 5, 37, 37, 7, 7, 37, 8, 37, 37, 17, 8, 37, 37, 37, 37, 9, 9, 6, 10, 37, 37, 28, 10, 5, 37, 37, 37, 11, 11, 37, 12, 37, 37, 15, 12, 37, 37, 37, 37, 13, 13, 37, 14, 6, 37, 7, 14, 37, 37, 5, 37, 6, 15, 37, 16, 7, 37, 17, 16, 37, 37, 8, 37, 9, 17, 7, 18, 37, 37, 31, 18, 8, 37, 37, 37, 19, 19, 11, 20, 37, 37, 23, 20, 12, 37, 37, 37, 21, 21, 37, 22, 11, 37, 15, 22, 37, 37, 12, 37, 13, 23, 37, 24, 28, 37, 29, 24, 37, 37, 25, 37, 26, 25, 37, 37, 37, 37, 26, 26, 13, 27, 9, 37, 28, 27, 14, 37, 10, 37, 11, 28, 37, 25, 37, 37, 29, 29, 15, 30, 17, 37, 31, 30, 16, 37, 18, 37, 19, 31, 28, 32, 37, 37, 35, 32, 25, 37, 37, 37, 33, 33, 21, 34, 19, 37, 23, 34, 22, 37, 20, 37, 21, 35, 23, 36, 31, 37, 0, 36, 24, 37, 32, 37, 33}; #if AUTOMATON_STATE_ALTS /* Vector for state insn alternatives. */ static const unsigned char k6_store_unit_state_alts[] ATTRIBUTE_UNUSED = { 1, 1, 1, 2, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 2, 0, 1, 1, 0, 0, 2, 0, 1, 1, 0, 1, 2, 0, 1, 1, 0, 0, 2, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 2, 0, 1, 1, 0, 0, 2, 0, 1, 1, 0, 1, 2, 0, 1, 1, 0, 0, 2, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 2, 0, 1, 1, 1, 0, 2, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 2, 0, 1, 1, 1, 0, 2, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 2, 0, 1, 1, 1, 0, 2, 0, 1, 1, 1, 1, 2, 0, 1, 1, 1, 0, 2, 0, 1}; #endif /* #if AUTOMATON_STATE_ALTS */ /* Vector of min issue delay of insns. */ static const unsigned char k6_store_unit_min_issue_delay[] ATTRIBUTE_UNUSED = { 0, 0, 0, 0, 0, 0, 0, 7, 5, 9, 18, 0, 0, 6, 4, 8, 17, 0, 0, 5, 3, 7, 16, 0, 0, 4, 2, 6, 15, 0, 0, 3, 1, 5, 14, 0, 0, 2, 0, 4, 13, 0, 0, 1, 0, 3, 12, 0, 0, 1, 1, 3, 12, 0, 0, 0, 0, 2, 11, 0, 0, 0, 1, 2, 11, 0, 0, 2, 0, 1, 10, 0, 0, 3, 1, 1, 14, 0, 0, 2, 0, 0, 13, 0, 0, 3, 1, 0, 14, 0, 0, 1, 0, 0, 9, 0, 0, 1, 1, 0, 12, 0, 0, 0, 0, 2, 8, 0, 0, 0, 1, 2, 8, 0, 0, 0, 0, 1, 7, 0, 0, 0, 1, 1, 11, 0, 0, 2, 0, 0, 10, 0, 0, 3, 1, 0, 14, 0, 0, 1, 0, 0, 6, 0, 0, 1, 1, 0, 6, 0, 0, 1, 1, 1, 6, 0, 0, 0, 0, 0, 5, 0, 0, 0, 1, 0, 11, 0, 0, 1, 0, 1, 4, 0, 0, 0, 0, 0, 3, 0, 0, 0, 1, 0, 8, 0, 0, 0, 0, 1, 2, 0, 0, 0, 1, 1, 6, 0, 0, 0, 0, 0, 7, 0, 0, 0, 1, 0, 11, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 6, 0}; /* Vector for locked state flags. */ static const unsigned char k6_store_unit_dead_lock[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #if CPU_UNITS_QUERY /* Vector for reserved units of states. */ static const unsigned char k6_store_unit_reserved_units[] = { 0 /* This is dummy el because the vect is empty */}; #endif /* #if CPU_UNITS_QUERY */ /* Vector translating external insn codes to internal ones.*/ static const unsigned char k6_integer_units_translate[] ATTRIBUTE_UNUSED = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 7, 8, 8, 7, 0, 0, 0, 8, 0, 0, 0, 9, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10}; /* Comb vector for state transitions. */ static const unsigned char k6_integer_units_transitions[] ATTRIBUTE_UNUSED = { 0, 3, 106, 4, 111, 13, 108, 3, 106, 1, 0, 3, 114, 2, 114, 26, 114, 100, 7, 2, 4, 0, 7, 114, 11, 6, 31, 114, 12, 5, 11, 8, 0, 1, 4, 7, 9, 106, 2, 6, 4, 105, 104, 2, 2, 107, 105, 3, 108, 100, 114, 114, 111, 26, 9, 100, 110, 109, 13, 26, 113, 112, 4, 2, 6, 114, 114, 4, 114, 10, 11, 103, 99, 3, 8, 99, 5, 2, 13, 11, 114, 114, 14, 114, 28, 96, 94, 92, 14, 91, 89, 87, 15, 15, 10, 114, 114, 16, 114, 30, 86, 84, 82, 16, 81, 79, 77, 17, 17, 8, 114, 114, 18, 114, 33, 76, 74, 72, 18, 71, 69, 67, 19, 19, 30, 114, 114, 20, 114, 35, 66, 64, 62, 20, 61, 59, 57, 21, 21, 31, 114, 114, 22, 114, 38, 56, 54, 52, 22, 51, 49, 47, 23, 23, 35, 114, 114, 24, 114, 40, 46, 44, 42, 24, 41, 39, 37, 25, 25, 36, 114, 114, 26, 114, 43, 36, 34, 32, 26, 31, 29, 27, 4, 100, 40, 114, 8, 114, 114, 11, 12, 102, 101, 13, 10, 9, 2, 6, 10, 3, 12, 114, 114, 31, 114, 114, 36, 114, 98, 97, 13, 30, 28, 4, 35, 33, 26, 41, 114, 114, 46, 114, 114, 51, 114, 40, 38, 25, 45, 43, 24, 50, 48, 23, 56, 114, 114, 61, 114, 114, 66, 114, 55, 53, 22, 60, 58, 21, 65, 63, 20, 71, 114, 114, 76, 114, 114, 81, 114, 70, 68, 19, 75, 73, 18, 80, 78, 17, 86, 114, 114, 91, 114, 114, 96, 114, 85, 83, 16, 90, 88, 15, 95, 93, 14, 104, 5, 105, 99, 107, 103, 114, 5, 114, 99, 6, 103, 11, 45, 7, 109, 101, 110, 102, 112, 27, 114, 101, 41, 102, 95, 27, 96, 27, 10, 113, 29, 29, 114, 32, 28, 34, 29, 10, 30, 8, 33, 8, 35, 30, 37, 31, 39, 114, 42, 114, 44, 38, 47, 40, 35, 43, 36, 45, 40, 48, 41, 49, 45, 52, 114, 54, 114, 57, 50, 59, 53, 46, 55, 50, 58, 51, 60, 55, 62, 56, 64, 114, 67, 114, 69, 63, 72, 65, 60, 68, 61, 70, 65, 73, 66, 74, 70, 77, 114, 79, 114, 82, 75, 84, 78, 71, 80, 75, 83, 76, 85, 80, 87, 81, 89, 114, 92, 114, 94, 88, 99, 90, 85, 93, 86, 95, 90, 10, 91, 101, 11, 102, 48, 103, 50, 53, 97, 55, 98, 95, 6, 96, 45, 7, 46, 50, 58, 51, 60, 63, 65, 68, 70, 73, 75, 78, 55, 80, 56, 60, 61, 65, 66, 70, 71, 75, 83, 76, 85, 88, 90, 93, 95, 97, 98, 114, 80, 114, 81, 85, 86, 90, 91, 95, 96}; /* Check vector for state transitions. */ static const unsigned char k6_integer_units_check[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 114, 3, 114, 3, 114, 3, 3, 3, 3, 3, 7, 114, 7, 6, 7, 114, 7, 5, 7, 7, 7, 1, 1, 6, 5, 106, 106, 5, 1, 1, 1, 1, 106, 106, 106, 106, 108, 108, 114, 114, 111, 111, 9, 108, 108, 108, 108, 111, 111, 111, 111, 2, 9, 114, 114, 4, 114, 10, 2, 2, 2, 2, 4, 4, 4, 4, 13, 10, 114, 114, 14, 114, 28, 13, 13, 13, 13, 14, 14, 14, 14, 15, 28, 114, 114, 16, 114, 30, 15, 15, 15, 15, 16, 16, 16, 16, 17, 30, 114, 114, 18, 114, 33, 17, 17, 17, 17, 18, 18, 18, 18, 19, 33, 114, 114, 20, 114, 35, 19, 19, 19, 19, 20, 20, 20, 20, 21, 35, 114, 114, 22, 114, 38, 21, 21, 21, 21, 22, 22, 22, 22, 23, 38, 114, 114, 24, 114, 40, 23, 23, 23, 23, 24, 24, 24, 24, 25, 40, 114, 114, 26, 114, 43, 25, 25, 25, 25, 26, 26, 26, 26, 100, 43, 114, 8, 114, 114, 11, 100, 100, 100, 100, 8, 8, 8, 11, 11, 11, 12, 114, 114, 31, 114, 114, 36, 114, 12, 12, 12, 31, 31, 31, 36, 36, 36, 41, 114, 114, 46, 114, 114, 51, 114, 41, 41, 41, 46, 46, 46, 51, 51, 51, 56, 114, 114, 61, 114, 114, 66, 114, 56, 56, 56, 61, 61, 61, 66, 66, 66, 71, 114, 114, 76, 114, 114, 81, 114, 71, 71, 71, 76, 76, 76, 81, 81, 81, 86, 114, 114, 91, 114, 114, 96, 114, 86, 86, 86, 91, 91, 91, 96, 96, 96, 104, 104, 105, 105, 107, 107, 114, 104, 114, 105, 104, 107, 105, 45, 107, 109, 109, 110, 110, 112, 112, 114, 109, 45, 110, 109, 112, 110, 27, 112, 113, 113, 29, 114, 32, 27, 34, 113, 27, 29, 113, 32, 29, 34, 32, 37, 34, 39, 114, 42, 114, 44, 37, 47, 39, 37, 42, 39, 44, 42, 47, 44, 49, 47, 52, 114, 54, 114, 57, 49, 59, 52, 49, 54, 52, 57, 54, 59, 57, 62, 59, 64, 114, 67, 114, 69, 62, 72, 64, 62, 67, 64, 69, 67, 72, 69, 74, 72, 77, 114, 79, 114, 82, 74, 84, 77, 74, 79, 77, 82, 79, 84, 82, 87, 84, 89, 114, 92, 114, 94, 87, 99, 89, 87, 92, 89, 94, 92, 99, 94, 101, 99, 102, 48, 103, 50, 53, 101, 55, 102, 101, 103, 102, 48, 103, 50, 53, 58, 55, 60, 63, 65, 68, 70, 73, 75, 78, 58, 80, 60, 63, 65, 68, 70, 73, 75, 78, 83, 80, 85, 88, 90, 93, 95, 97, 98, 114, 83, 114, 85, 88, 90, 93, 95, 97, 98}; /* Base vector for state transitions. */ static const unsigned short k6_integer_units_base[] = { 0, 33, 63, 11, 67, 29, 25, 22, 186, 54, 69, 189, 200, 78, 82, 93, 97, 108, 112, 123, 127, 138, 142, 153, 157, 168, 172, 313, 84, 317, 99, 203, 319, 114, 321, 129, 206, 330, 144, 332, 159, 217, 334, 174, 336, 298, 220, 338, 418, 347, 420, 223, 349, 421, 351, 423, 234, 353, 432, 355, 434, 237, 364, 435, 366, 436, 240, 368, 437, 370, 438, 251, 372, 439, 381, 440, 254, 383, 441, 385, 443, 257, 387, 452, 389, 454, 268, 398, 455, 400, 456, 271, 402, 457, 404, 458, 274, 459, 460, 406, 183, 415, 417, 419, 285, 287, 37, 289, 48, 300, 302, 52, 304, 315}; #if AUTOMATON_STATE_ALTS /* Comb vector for state insn alternatives. */ static const unsigned char k6_integer_units_state_alts[] ATTRIBUTE_UNUSED = { 1, 2, 2, 1, 1, 1, 1, 4, 4, 4, 1, 1, 0, 2, 0, 1, 0, 1, 2, 4, 4, 1, 1, 0, 2, 1, 1, 0, 1, 1, 4, 4, 1, 1, 2, 1, 2, 1, 2, 1, 4, 2, 1, 1, 4, 2, 2, 1, 1, 2, 0, 0, 1, 2, 1, 4, 2, 1, 1, 4, 2, 1, 1, 1, 1, 0, 0, 1, 0, 1, 2, 2, 2, 1, 2, 2, 1, 1, 1, 1, 0, 0, 1, 0, 1, 2, 2, 1, 1, 2, 2, 1, 1, 1, 1, 0, 0, 1, 0, 1, 2, 2, 1, 1, 2, 2, 1, 1, 1, 1, 0, 0, 1, 0, 1, 2, 2, 1, 1, 2, 2, 1, 1, 1, 1, 0, 0, 1, 0, 1, 2, 2, 1, 1, 2, 2, 1, 1, 1, 1, 0, 0, 1, 0, 1, 2, 2, 1, 1, 2, 2, 1, 1, 1, 1, 0, 0, 1, 0, 1, 2, 2, 1, 1, 2, 2, 1, 1, 1, 1, 0, 0, 1, 0, 1, 2, 2, 1, 1, 2, 2, 1, 1, 1, 1, 0, 1, 0, 0, 1, 2, 2, 1, 1, 2, 1, 1, 2, 2, 1, 1, 0, 0, 1, 0, 0, 1, 0, 2, 1, 1, 2, 1, 1, 2, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 2, 1, 1, 2, 1, 1, 2, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 2, 1, 1, 2, 1, 1, 2, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 2, 1, 1, 2, 1, 1, 2, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 2, 1, 1, 2, 1, 1, 2, 1, 1, 1, 2, 1, 2, 1, 2, 0, 4, 0, 4, 1, 4, 1, 1, 1, 1, 2, 1, 2, 1, 2, 0, 4, 1, 4, 1, 4, 1, 1, 1, 1, 2, 1, 0, 1, 2, 1, 4, 1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 0, 1, 0, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 0, 1, 0, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 0, 1, 0, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 0, 1, 0, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 0, 1, 0, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1}; /* Check vector for state insn alternatives. */ static const unsigned char k6_integer_units_check_state_alts[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 114, 3, 114, 3, 114, 3, 3, 3, 3, 3, 7, 114, 7, 6, 7, 114, 7, 5, 7, 7, 7, 1, 1, 6, 5, 106, 106, 5, 1, 1, 1, 1, 106, 106, 106, 106, 108, 108, 114, 114, 111, 111, 9, 108, 108, 108, 108, 111, 111, 111, 111, 2, 9, 114, 114, 4, 114, 10, 2, 2, 2, 2, 4, 4, 4, 4, 13, 10, 114, 114, 14, 114, 28, 13, 13, 13, 13, 14, 14, 14, 14, 15, 28, 114, 114, 16, 114, 30, 15, 15, 15, 15, 16, 16, 16, 16, 17, 30, 114, 114, 18, 114, 33, 17, 17, 17, 17, 18, 18, 18, 18, 19, 33, 114, 114, 20, 114, 35, 19, 19, 19, 19, 20, 20, 20, 20, 21, 35, 114, 114, 22, 114, 38, 21, 21, 21, 21, 22, 22, 22, 22, 23, 38, 114, 114, 24, 114, 40, 23, 23, 23, 23, 24, 24, 24, 24, 25, 40, 114, 114, 26, 114, 43, 25, 25, 25, 25, 26, 26, 26, 26, 100, 43, 114, 8, 114, 114, 11, 100, 100, 100, 100, 8, 8, 8, 11, 11, 11, 12, 114, 114, 31, 114, 114, 36, 114, 12, 12, 12, 31, 31, 31, 36, 36, 36, 41, 114, 114, 46, 114, 114, 51, 114, 41, 41, 41, 46, 46, 46, 51, 51, 51, 56, 114, 114, 61, 114, 114, 66, 114, 56, 56, 56, 61, 61, 61, 66, 66, 66, 71, 114, 114, 76, 114, 114, 81, 114, 71, 71, 71, 76, 76, 76, 81, 81, 81, 86, 114, 114, 91, 114, 114, 96, 114, 86, 86, 86, 91, 91, 91, 96, 96, 96, 104, 104, 105, 105, 107, 107, 114, 104, 114, 105, 104, 107, 105, 45, 107, 109, 109, 110, 110, 112, 112, 114, 109, 45, 110, 109, 112, 110, 27, 112, 113, 113, 29, 114, 32, 27, 34, 113, 27, 29, 113, 32, 29, 34, 32, 37, 34, 39, 114, 42, 114, 44, 37, 47, 39, 37, 42, 39, 44, 42, 47, 44, 49, 47, 52, 114, 54, 114, 57, 49, 59, 52, 49, 54, 52, 57, 54, 59, 57, 62, 59, 64, 114, 67, 114, 69, 62, 72, 64, 62, 67, 64, 69, 67, 72, 69, 74, 72, 77, 114, 79, 114, 82, 74, 84, 77, 74, 79, 77, 82, 79, 84, 82, 87, 84, 89, 114, 92, 114, 94, 87, 99, 89, 87, 92, 89, 94, 92, 99, 94, 101, 99, 102, 48, 103, 50, 53, 101, 55, 102, 101, 103, 102, 48, 103, 50, 53, 58, 55, 60, 63, 65, 68, 70, 73, 75, 78, 58, 80, 60, 63, 65, 68, 70, 73, 75, 78, 83, 80, 85, 88, 90, 93, 95, 97, 98, 114, 83, 114, 85, 88, 90, 93, 95, 97, 98}; /* Base vector for state insn alternatives. */ static const unsigned short k6_integer_units_base_state_alts[] = { 0, 33, 63, 11, 67, 29, 25, 22, 186, 54, 69, 189, 200, 78, 82, 93, 97, 108, 112, 123, 127, 138, 142, 153, 157, 168, 172, 313, 84, 317, 99, 203, 319, 114, 321, 129, 206, 330, 144, 332, 159, 217, 334, 174, 336, 298, 220, 338, 418, 347, 420, 223, 349, 421, 351, 423, 234, 353, 432, 355, 434, 237, 364, 435, 366, 436, 240, 368, 437, 370, 438, 251, 372, 439, 381, 440, 254, 383, 441, 385, 443, 257, 387, 452, 389, 454, 268, 398, 455, 400, 456, 271, 402, 457, 404, 458, 274, 459, 460, 406, 183, 415, 417, 419, 285, 287, 37, 289, 48, 300, 302, 52, 304, 315}; #endif /* #if AUTOMATON_STATE_ALTS */ /* Vector of min issue delay of insns. */ static const unsigned char k6_integer_units_min_issue_delay[] ATTRIBUTE_UNUSED = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 2, 3, 2, 0, 0, 0, 0, 0, 2, 1, 2, 1, 2, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 3, 2, 3, 2, 3, 2, 0, 0, 0, 0, 0, 3, 2, 3, 2, 3, 2, 0, 2, 2, 0, 0, 2, 1, 2, 1, 2, 1, 2, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 3, 2, 3, 2, 3, 2, 1, 0, 0, 0, 0, 3, 2, 3, 2, 3, 2, 3, 2, 2, 0, 0, 3, 2, 3, 2, 3, 2, 2, 1, 1, 0, 0, 2, 1, 2, 1, 2, 1, 1, 0, 0, 0, 0, 18, 17, 18, 17, 18, 17, 1, 0, 0, 0, 0, 17, 16, 17, 16, 17, 16, 0, 0, 0, 0, 0, 16, 15, 16, 15, 16, 15, 0, 0, 0, 0, 0, 15, 14, 15, 14, 15, 14, 0, 0, 0, 0, 0, 14, 13, 14, 13, 14, 13, 0, 0, 0, 0, 0, 13, 12, 13, 12, 13, 12, 0, 0, 0, 0, 0, 12, 11, 12, 11, 12, 11, 0, 0, 0, 0, 0, 11, 10, 11, 10, 11, 10, 0, 0, 0, 0, 0, 10, 9, 10, 9, 10, 9, 0, 0, 0, 0, 0, 9, 8, 9, 8, 9, 8, 0, 0, 0, 0, 0, 8, 7, 8, 7, 8, 7, 0, 0, 0, 0, 0, 7, 6, 7, 6, 7, 6, 0, 0, 0, 0, 0, 6, 5, 6, 5, 6, 5, 0, 0, 0, 0, 0, 5, 4, 5, 4, 5, 4, 0, 0, 0, 0, 0, 4, 3, 4, 3, 4, 3, 0, 0, 0, 0, 0, 4, 3, 4, 3, 4, 3, 0, 2, 2, 0, 0, 4, 3, 4, 3, 4, 3, 3, 2, 2, 0, 0, 4, 3, 4, 3, 4, 3, 0, 1, 1, 0, 0, 4, 3, 4, 3, 4, 3, 2, 1, 1, 0, 0, 4, 3, 4, 3, 4, 3, 1, 0, 0, 0, 0, 5, 4, 5, 4, 5, 4, 0, 2, 2, 0, 0, 5, 4, 5, 4, 5, 4, 3, 2, 2, 0, 0, 5, 4, 5, 4, 5, 4, 0, 1, 1, 0, 0, 5, 4, 5, 4, 5, 4, 2, 1, 1, 0, 0, 5, 4, 5, 4, 5, 4, 1, 0, 0, 0, 0, 6, 5, 6, 5, 6, 5, 0, 2, 2, 0, 0, 6, 5, 6, 5, 6, 5, 3, 2, 2, 0, 0, 6, 5, 6, 5, 6, 5, 0, 1, 1, 0, 0, 6, 5, 6, 5, 6, 5, 2, 1, 1, 0, 0, 6, 5, 6, 5, 6, 5, 1, 0, 0, 0, 0, 7, 6, 7, 6, 7, 6, 0, 2, 2, 0, 0, 7, 6, 7, 6, 7, 6, 3, 2, 2, 0, 0, 7, 6, 7, 6, 7, 6, 0, 1, 1, 0, 0, 7, 6, 7, 6, 7, 6, 2, 1, 1, 0, 0, 7, 6, 7, 6, 7, 6, 1, 0, 0, 0, 0, 8, 7, 8, 7, 8, 7, 0, 2, 2, 0, 0, 8, 7, 8, 7, 8, 7, 3, 2, 2, 0, 0, 8, 7, 8, 7, 8, 7, 0, 1, 1, 0, 0, 8, 7, 8, 7, 8, 7, 2, 1, 1, 0, 0, 8, 7, 8, 7, 8, 7, 1, 0, 0, 0, 0, 9, 8, 9, 8, 9, 8, 0, 2, 2, 0, 0, 9, 8, 9, 8, 9, 8, 3, 2, 2, 0, 0, 9, 8, 9, 8, 9, 8, 0, 1, 1, 0, 0, 9, 8, 9, 8, 9, 8, 2, 1, 1, 0, 0, 9, 8, 9, 8, 9, 8, 1, 0, 0, 0, 0, 10, 9, 10, 9, 10, 9, 0, 2, 2, 0, 0, 10, 9, 10, 9, 10, 9, 3, 2, 2, 0, 0, 10, 9, 10, 9, 10, 9, 0, 1, 1, 0, 0, 10, 9, 10, 9, 10, 9, 2, 1, 1, 0, 0, 10, 9, 10, 9, 10, 9, 1, 0, 0, 0, 0, 11, 10, 11, 10, 11, 10, 0, 2, 2, 0, 0, 11, 10, 11, 10, 11, 10, 3, 2, 2, 0, 0, 11, 10, 11, 10, 11, 10, 0, 1, 1, 0, 0, 11, 10, 11, 10, 11, 10, 2, 1, 1, 0, 0, 11, 10, 11, 10, 11, 10, 1, 0, 0, 0, 0, 12, 11, 12, 11, 12, 11, 0, 2, 2, 0, 0, 12, 11, 12, 11, 12, 11, 3, 2, 2, 0, 0, 12, 11, 12, 11, 12, 11, 0, 1, 1, 0, 0, 12, 11, 12, 11, 12, 11, 2, 1, 1, 0, 0, 12, 11, 12, 11, 12, 11, 1, 0, 0, 0, 0, 13, 12, 13, 12, 13, 12, 0, 2, 2, 0, 0, 13, 12, 13, 12, 13, 12, 3, 2, 2, 0, 0, 13, 12, 13, 12, 13, 12, 0, 1, 1, 0, 0, 13, 12, 13, 12, 13, 12, 2, 1, 1, 0, 0, 13, 12, 13, 12, 13, 12, 1, 0, 0, 0, 0, 14, 13, 14, 13, 14, 13, 0, 2, 2, 0, 0, 14, 13, 14, 13, 14, 13, 3, 2, 2, 0, 0, 14, 13, 14, 13, 14, 13, 0, 1, 1, 0, 0, 14, 13, 14, 13, 14, 13, 2, 1, 1, 0, 0, 14, 13, 14, 13, 14, 13, 1, 0, 0, 0, 0, 15, 14, 15, 14, 15, 14, 0, 2, 2, 0, 0, 15, 14, 15, 14, 15, 14, 3, 2, 2, 0, 0, 15, 14, 15, 14, 15, 14, 0, 1, 1, 0, 0, 15, 14, 15, 14, 15, 14, 2, 1, 1, 0, 0, 15, 14, 15, 14, 15, 14, 1, 0, 0, 0, 0, 16, 15, 16, 15, 16, 15, 0, 2, 2, 0, 0, 16, 15, 16, 15, 16, 15, 3, 2, 2, 0, 0, 16, 15, 16, 15, 16, 15, 0, 1, 1, 0, 0, 16, 15, 16, 15, 16, 15, 2, 1, 1, 0, 0, 16, 15, 16, 15, 16, 15, 1, 0, 0, 0, 0, 17, 16, 17, 16, 17, 16, 0, 2, 2, 0, 0, 17, 16, 17, 16, 17, 16, 3, 2, 2, 0, 0, 17, 16, 17, 16, 17, 16, 0, 1, 1, 0, 0, 17, 16, 17, 16, 17, 16, 2, 1, 1, 0, 0, 17, 16, 17, 16, 17, 16, 1, 0, 0, 0, 0, 18, 17, 18, 17, 18, 17, 3, 2, 2, 0, 0, 18, 17, 18, 17, 18, 17, 2, 1, 1, 0, 0, 3, 2, 3, 2, 3, 2, 0, 1, 1, 0, 0, 18, 17, 18, 17, 18, 17, 0, 0, 0, 0, 0, 18, 17, 18, 17, 18, 17, 0, 2, 2, 0, 0, 18, 17, 18, 17, 18, 17, 0, 1, 1, 0, 0, 2, 1, 2, 1, 2, 1, 0, 1, 1, 0, 0, 0, 2, 3, 2, 3, 2, 0, 2, 2, 0, 0, 0, 2, 3, 2, 3, 2, 0, 1, 1, 0, 0, 0, 1, 2, 1, 2, 1, 0, 0, 0, 0, 0, 0, 1, 2, 1, 2, 1, 0, 1, 1, 0, 0, 0, 17, 18, 17, 18, 17, 0, 0, 0, 0, 0, 0, 17, 18, 17, 18, 17, 0, 2, 2, 0, 0, 0, 17, 18, 17, 18, 17, 0, 1, 1, 0, 0, 0, 3, 4, 3, 4, 3, 0, 0, 0, 0, 0, 0, 3, 4, 3, 4, 3, 0, 2, 2, 0, 0, 0, 3, 4, 3, 4, 3, 0, 1, 1, 0}; /* Vector for locked state flags. */ static const unsigned char k6_integer_units_dead_lock[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #if CPU_UNITS_QUERY /* Vector for reserved units of states. */ static const unsigned char k6_integer_units_reserved_units[] = { 0 /* This is dummy el because the vect is empty */}; #endif /* #if CPU_UNITS_QUERY */ /* Vector translating external insn codes to internal ones.*/ static const unsigned char k6_fpu_unit_translate[] ATTRIBUTE_UNUSED = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4}; /* Vector for state transitions. */ static const unsigned char k6_fpu_unit_transitions[] ATTRIBUTE_UNUSED = { 0, 55, 54, 1, 0, 1, 57, 57, 57, 2, 2, 57, 57, 57, 3, 3, 57, 57, 57, 4, 4, 57, 57, 57, 5, 5, 57, 57, 57, 6, 6, 57, 57, 57, 7, 7, 57, 57, 57, 8, 8, 57, 57, 57, 9, 9, 57, 57, 57, 10, 10, 57, 57, 57, 11, 11, 57, 57, 57, 12, 12, 57, 57, 57, 13, 13, 57, 57, 57, 14, 14, 57, 57, 57, 15, 15, 57, 57, 57, 16, 16, 57, 57, 57, 17, 17, 57, 57, 57, 18, 18, 57, 57, 57, 19, 19, 57, 57, 57, 20, 20, 57, 57, 57, 21, 21, 57, 57, 57, 22, 22, 57, 57, 57, 23, 23, 57, 57, 57, 24, 24, 57, 57, 57, 25, 25, 57, 57, 57, 26, 26, 57, 57, 57, 27, 27, 57, 57, 57, 28, 28, 57, 57, 57, 29, 29, 57, 57, 57, 30, 30, 57, 57, 57, 31, 31, 57, 57, 57, 32, 32, 57, 57, 57, 33, 33, 57, 57, 57, 34, 34, 57, 57, 57, 35, 35, 57, 57, 57, 36, 36, 57, 57, 57, 37, 37, 57, 57, 57, 38, 38, 57, 57, 57, 39, 39, 57, 57, 57, 40, 40, 57, 57, 57, 41, 41, 57, 57, 57, 42, 42, 57, 57, 57, 43, 43, 57, 57, 57, 44, 44, 57, 57, 57, 45, 45, 57, 57, 57, 46, 46, 57, 57, 57, 47, 47, 57, 57, 57, 48, 48, 57, 57, 57, 49, 49, 57, 57, 57, 50, 50, 57, 57, 57, 51, 51, 57, 57, 57, 52, 52, 57, 57, 57, 53, 53, 57, 57, 57, 54, 54, 57, 57, 57, 55, 55, 57, 57, 57, 56, 56, 57, 54, 57, 0}; #if AUTOMATON_STATE_ALTS /* Vector for state insn alternatives. */ static const unsigned char k6_fpu_unit_state_alts[] ATTRIBUTE_UNUSED = { 1, 1, 2, 2, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 2, 0, 1}; #endif /* #if AUTOMATON_STATE_ALTS */ /* Vector of min issue delay of insns. */ static const unsigned char k6_fpu_unit_min_issue_delay[] ATTRIBUTE_UNUSED = { 0, 0, 0, 0, 0, 0, 56, 55, 56, 0, 0, 55, 54, 55, 0, 0, 54, 53, 54, 0, 0, 53, 52, 53, 0, 0, 52, 51, 52, 0, 0, 51, 50, 51, 0, 0, 50, 49, 50, 0, 0, 49, 48, 49, 0, 0, 48, 47, 48, 0, 0, 47, 46, 47, 0, 0, 46, 45, 46, 0, 0, 45, 44, 45, 0, 0, 44, 43, 44, 0, 0, 43, 42, 43, 0, 0, 42, 41, 42, 0, 0, 41, 40, 41, 0, 0, 40, 39, 40, 0, 0, 39, 38, 39, 0, 0, 38, 37, 38, 0, 0, 37, 36, 37, 0, 0, 36, 35, 36, 0, 0, 35, 34, 35, 0, 0, 34, 33, 34, 0, 0, 33, 32, 33, 0, 0, 32, 31, 32, 0, 0, 31, 30, 31, 0, 0, 30, 29, 30, 0, 0, 29, 28, 29, 0, 0, 28, 27, 28, 0, 0, 27, 26, 27, 0, 0, 26, 25, 26, 0, 0, 25, 24, 25, 0, 0, 24, 23, 24, 0, 0, 23, 22, 23, 0, 0, 22, 21, 22, 0, 0, 21, 20, 21, 0, 0, 20, 19, 20, 0, 0, 19, 18, 19, 0, 0, 18, 17, 18, 0, 0, 17, 16, 17, 0, 0, 16, 15, 16, 0, 0, 15, 14, 15, 0, 0, 14, 13, 14, 0, 0, 13, 12, 13, 0, 0, 12, 11, 12, 0, 0, 11, 10, 11, 0, 0, 10, 9, 10, 0, 0, 9, 8, 9, 0, 0, 8, 7, 8, 0, 0, 7, 6, 7, 0, 0, 6, 5, 6, 0, 0, 5, 4, 5, 0, 0, 4, 3, 4, 0, 0, 3, 2, 3, 0, 0, 2, 1, 2, 0, 0, 1, 0, 1, 0}; /* Vector for locked state flags. */ static const unsigned char k6_fpu_unit_dead_lock[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #if CPU_UNITS_QUERY /* Vector for reserved units of states. */ static const unsigned char k6_fpu_unit_reserved_units[] = { 0 /* This is dummy el because the vect is empty */}; #endif /* #if CPU_UNITS_QUERY */ /* Vector translating external insn codes to internal ones.*/ static const unsigned char k6_branch_unit_translate[] ATTRIBUTE_UNUSED = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2}; /* Vector for state transitions. */ static const unsigned char k6_branch_unit_transitions[] ATTRIBUTE_UNUSED = { 0, 1, 0, 1, 2, 0}; #if AUTOMATON_STATE_ALTS /* Vector for state insn alternatives. */ static const unsigned char k6_branch_unit_state_alts[] ATTRIBUTE_UNUSED = { 1, 1, 1, 1, 0, 1}; #endif /* #if AUTOMATON_STATE_ALTS */ /* Vector of min issue delay of insns. */ static const unsigned char k6_branch_unit_min_issue_delay[] ATTRIBUTE_UNUSED = { 8}; /* Vector for locked state flags. */ static const unsigned char k6_branch_unit_dead_lock[] = { 0, 0}; #if CPU_UNITS_QUERY /* Vector for reserved units of states. */ static const unsigned char k6_branch_unit_reserved_units[] = { 0 /* This is dummy el because the vect is empty */}; #endif /* #if CPU_UNITS_QUERY */ /* Vector translating external insn codes to internal ones.*/ static const unsigned char athlon_translate[] ATTRIBUTE_UNUSED = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 2, 3, 2, 3, 1, 4, 5, 6, 2, 2, 2, 7, 8, 8, 1, 2, 1, 1, 2, 1, 1, 2, 1, 2, 2, 2, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 3, 2, 2, 3, 1, 1, 2, 3, 1, 3, 2, 1, 1, 1, 1, 1, 2, 3, 2, 3, 1, 1, 1, 2, 3, 2, 3, 2, 2, 2, 1, 1, 1, 2, 3, 2, 3, 1, 1, 3, 3, 1, 2, 3, 3, 2, 3, 2, 3, 2, 2, 2, 3, 1, 1, 1, 2, 3, 2, 3, 1, 1, 1, 2, 3, 2, 3, 9}; /* Vector for state transitions. */ static const unsigned char athlon_transitions[] ATTRIBUTE_UNUSED = { 0, 1, 3, 2, 5, 70, 8, 28, 16, 0, 1, 2, 76, 3, 76, 76, 76, 76, 76, 4, 2, 3, 76, 76, 76, 76, 76, 76, 76, 4, 3, 76, 76, 76, 76, 76, 76, 76, 76, 0, 4, 1, 3, 1, 5, 70, 8, 28, 16, 0, 5, 76, 76, 76, 76, 76, 76, 76, 76, 6, 6, 70, 52, 51, 72, 76, 69, 76, 76, 7, 7, 8, 10, 9, 54, 69, 76, 76, 16, 11, 8, 9, 76, 10, 76, 76, 76, 76, 76, 71, 9, 10, 76, 76, 76, 76, 76, 76, 76, 71, 10, 76, 76, 76, 76, 76, 76, 76, 76, 11, 11, 12, 14, 13, 49, 65, 38, 76, 16, 15, 12, 13, 76, 14, 76, 76, 76, 76, 76, 48, 13, 14, 76, 76, 76, 76, 76, 76, 76, 48, 14, 76, 76, 76, 76, 76, 76, 76, 76, 15, 15, 1, 3, 2, 76, 76, 76, 76, 16, 0, 16, 76, 76, 76, 76, 76, 76, 76, 76, 17, 17, 18, 20, 19, 76, 76, 76, 76, 76, 21, 18, 19, 76, 20, 76, 76, 76, 76, 76, 47, 19, 20, 76, 76, 76, 76, 76, 76, 76, 47, 20, 76, 76, 76, 76, 76, 76, 76, 76, 21, 21, 22, 24, 23, 76, 76, 76, 76, 76, 25, 22, 23, 76, 24, 76, 76, 76, 76, 76, 46, 23, 24, 76, 76, 76, 76, 76, 76, 76, 46, 24, 76, 76, 76, 76, 76, 76, 76, 76, 25, 25, 26, 28, 27, 76, 76, 76, 76, 76, 29, 26, 27, 76, 28, 76, 76, 76, 76, 76, 45, 27, 28, 76, 76, 76, 76, 76, 76, 76, 45, 28, 76, 76, 76, 76, 76, 76, 76, 76, 29, 29, 30, 32, 31, 76, 76, 76, 76, 76, 33, 30, 31, 76, 32, 76, 76, 76, 76, 76, 44, 31, 32, 76, 76, 76, 76, 76, 76, 76, 44, 32, 76, 76, 76, 76, 76, 76, 76, 76, 33, 33, 34, 36, 35, 76, 76, 76, 76, 76, 37, 34, 35, 76, 36, 76, 76, 76, 76, 76, 43, 35, 36, 76, 76, 76, 76, 76, 76, 76, 43, 36, 76, 76, 76, 76, 76, 76, 76, 76, 37, 37, 38, 40, 39, 76, 76, 76, 76, 16, 41, 38, 39, 76, 40, 76, 76, 76, 76, 76, 42, 39, 40, 76, 76, 76, 76, 76, 76, 76, 42, 40, 76, 76, 76, 76, 76, 76, 76, 76, 41, 41, 12, 14, 13, 76, 76, 76, 76, 16, 15, 42, 12, 14, 12, 76, 76, 76, 76, 16, 15, 43, 38, 40, 38, 76, 76, 76, 76, 16, 41, 44, 34, 36, 34, 76, 76, 76, 76, 76, 37, 45, 30, 32, 30, 76, 76, 76, 76, 76, 33, 46, 26, 28, 26, 76, 76, 76, 76, 76, 29, 47, 22, 24, 22, 76, 76, 76, 76, 76, 25, 48, 1, 3, 1, 76, 76, 76, 76, 16, 0, 49, 76, 76, 76, 76, 76, 76, 76, 76, 50, 50, 70, 52, 51, 76, 76, 76, 76, 76, 7, 51, 52, 76, 76, 76, 76, 76, 76, 76, 53, 52, 76, 76, 76, 76, 76, 76, 76, 76, 7, 53, 8, 10, 8, 54, 69, 76, 76, 16, 11, 54, 76, 76, 76, 76, 76, 76, 76, 76, 55, 55, 65, 63, 62, 57, 76, 34, 76, 76, 56, 56, 8, 10, 9, 76, 76, 76, 76, 16, 11, 57, 76, 76, 76, 76, 76, 76, 76, 76, 58, 58, 69, 67, 66, 76, 76, 76, 76, 76, 59, 59, 38, 40, 39, 60, 34, 76, 76, 16, 41, 60, 76, 76, 76, 76, 76, 76, 76, 76, 61, 61, 65, 63, 62, 76, 76, 76, 76, 76, 56, 62, 63, 76, 76, 76, 76, 76, 76, 76, 64, 63, 76, 76, 76, 76, 76, 76, 76, 76, 56, 64, 8, 10, 8, 76, 76, 76, 76, 16, 11, 65, 62, 76, 63, 76, 76, 76, 76, 76, 64, 66, 67, 76, 76, 76, 76, 76, 76, 76, 68, 67, 76, 76, 76, 76, 76, 76, 76, 76, 59, 68, 38, 40, 38, 60, 34, 76, 76, 16, 41, 69, 66, 76, 67, 76, 76, 76, 76, 76, 68, 70, 51, 76, 52, 76, 76, 76, 76, 76, 53, 71, 12, 14, 12, 49, 65, 38, 76, 16, 15, 72, 76, 76, 76, 76, 76, 76, 76, 76, 73, 73, 69, 67, 66, 74, 76, 76, 76, 76, 59, 74, 76, 76, 76, 76, 76, 76, 76, 76, 75, 75, 34, 36, 35, 32, 76, 76, 76, 76, 37, }; #if AUTOMATON_STATE_ALTS /* Vector for state insn alternatives. */ static const unsigned char athlon_state_alts[] ATTRIBUTE_UNUSED = { 1, 2, 1, 2, 1, 1, 1, 1, 2, 1, 1, 2, 0, 2, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 1, 4, 1, 1, 1, 1, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 1, 2, 1, 0, 1, 0, 0, 1, 1, 2, 1, 2, 1, 1, 0, 0, 2, 1, 1, 2, 0, 2, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 1, 2, 1, 1, 1, 0, 2, 1, 1, 2, 0, 2, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 1, 2, 0, 0, 0, 0, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 1, 2, 0, 0, 0, 0, 0, 1, 1, 2, 0, 2, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 1, 2, 0, 0, 0, 0, 0, 1, 1, 2, 0, 2, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 1, 2, 0, 0, 0, 0, 0, 1, 1, 2, 0, 2, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 1, 2, 0, 0, 0, 0, 0, 1, 1, 2, 0, 2, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 1, 2, 0, 0, 0, 0, 0, 1, 1, 2, 0, 2, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 1, 2, 0, 0, 0, 0, 2, 1, 1, 2, 0, 2, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 1, 2, 0, 0, 0, 0, 2, 1, 1, 2, 1, 4, 0, 0, 0, 0, 2, 1, 1, 2, 1, 4, 0, 0, 0, 0, 2, 1, 1, 2, 1, 4, 0, 0, 0, 0, 0, 1, 1, 2, 1, 4, 0, 0, 0, 0, 0, 1, 1, 2, 1, 4, 0, 0, 0, 0, 0, 1, 1, 2, 1, 4, 0, 0, 0, 0, 0, 1, 1, 2, 1, 4, 0, 0, 0, 0, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 1, 2, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 1, 4, 1, 1, 0, 0, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 1, 2, 1, 0, 1, 0, 0, 1, 1, 2, 1, 2, 0, 0, 0, 0, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 1, 2, 0, 0, 0, 0, 0, 1, 1, 2, 1, 2, 1, 1, 0, 0, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 1, 2, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 1, 4, 0, 0, 0, 0, 2, 1, 1, 2, 0, 2, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 1, 4, 1, 1, 0, 0, 2, 1, 1, 2, 0, 2, 0, 0, 0, 0, 0, 1, 1, 2, 0, 2, 0, 0, 0, 0, 0, 1, 1, 2, 1, 4, 1, 1, 1, 0, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 1, 2, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 1, 2, 1, 0, 0, 0, 0, 1, }; #endif /* #if AUTOMATON_STATE_ALTS */ /* Vector of min issue delay of insns. */ static const unsigned char athlon_min_issue_delay[] ATTRIBUTE_UNUSED = { 0, 0, 0, 0, 0, 0, 16, 17, 17, 16, 0, 17, 17, 17, 16, 1, 17, 17, 17, 16, 0, 0, 0, 0, 0, 1, 17, 18, 21, 32, 0, 0, 1, 4, 16, 0, 0, 0, 19, 0, 0, 16, 17, 19, 16, 0, 17, 17, 19, 16, 1, 17, 17, 19, 16, 0, 0, 0, 2, 0, 0, 16, 34, 34, 16, 0, 17, 34, 34, 16, 1, 17, 34, 34, 16, 0, 0, 17, 17, 0, 1, 17, 153, 153, 96, 0, 0, 136, 136, 80, 0, 16, 136, 136, 80, 0, 17, 136, 136, 80, 1, 17, 136, 136, 80, 0, 0, 119, 119, 64, 0, 16, 119, 119, 64, 0, 17, 119, 119, 64, 1, 17, 119, 119, 64, 0, 0, 102, 102, 48, 0, 16, 102, 102, 48, 0, 17, 102, 102, 48, 1, 17, 102, 102, 48, 0, 0, 85, 85, 32, 0, 16, 85, 85, 32, 0, 17, 85, 85, 32, 1, 17, 85, 85, 32, 0, 0, 68, 68, 16, 0, 16, 68, 68, 16, 0, 17, 68, 68, 16, 1, 17, 68, 68, 16, 0, 0, 51, 51, 0, 0, 16, 51, 51, 16, 0, 17, 51, 51, 16, 1, 17, 51, 51, 16, 0, 0, 34, 34, 0, 0, 0, 34, 34, 0, 0, 0, 51, 51, 0, 0, 0, 68, 68, 16, 0, 0, 85, 85, 32, 0, 0, 102, 102, 48, 0, 0, 119, 119, 64, 0, 0, 17, 17, 0, 1, 17, 34, 53, 32, 0, 0, 17, 36, 16, 0, 17, 17, 36, 16, 1, 17, 17, 36, 16, 0, 0, 0, 19, 0, 1, 17, 19, 21, 32, 0, 0, 2, 4, 16, 0, 0, 17, 19, 0, 1, 17, 34, 85, 32, 0, 0, 17, 68, 16, 0, 0, 0, 51, 0, 1, 17, 51, 53, 32, 0, 0, 34, 36, 16, 0, 17, 34, 36, 16, 1, 17, 34, 36, 16, 0, 0, 17, 19, 0, 0, 16, 34, 36, 16, 0, 17, 17, 68, 16, 1, 17, 17, 68, 16, 0, 0, 0, 51, 0, 0, 16, 17, 68, 16, 0, 16, 17, 36, 16, 0, 0, 0, 2, 0, 1, 17, 18, 85, 32, 0, 0, 1, 68, 16, 1, 17, 21, 85, 32, 0, 0, 4, 68, 16, }; /* Vector for locked state flags. */ static const unsigned char athlon_dead_lock[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #if CPU_UNITS_QUERY /* Vector for reserved units of states. */ static const unsigned char athlon_reserved_units[] = { 0 /* This is dummy el because the vect is empty */}; #endif /* #if CPU_UNITS_QUERY */ /* Vector translating external insn codes to internal ones.*/ static const unsigned char athlon_load_translate[] ATTRIBUTE_UNUSED = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 2, 3, 1, 4, 5, 5, 1, 1, 6, 6, 7, 7, 7, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 5, 5, 1, 1, 1, 1, 8, 8, 7, 0, 0, 0, 1, 0, 1, 0, 5, 5, 0, 0, 1, 1, 0, 5, 5, 0, 0, 1, 1, 0, 1, 1, 0, 5, 5, 0, 0, 1, 0, 5, 0, 1, 1, 1, 1, 1, 1, 0, 5, 0, 1, 0, 0, 1, 1, 0, 5, 5, 0, 0, 1, 1, 0, 5, 5, 0, 0, 9}; /* Vector for state transitions. */ static const unsigned char athlon_load_transitions[] ATTRIBUTE_UNUSED = { 0, 12, 151, 159, 11, 4, 60, 59, 1, 0, 1, 13, 162, 146, 133, 5, 162, 162, 162, 2, 2, 7, 53, 38, 162, 16, 162, 162, 162, 3, 3, 162, 162, 162, 162, 162, 16, 19, 33, 4, 4, 162, 162, 162, 20, 162, 41, 10, 5, 0, 5, 162, 162, 162, 6, 162, 162, 162, 162, 2, 6, 162, 162, 162, 33, 162, 162, 162, 162, 7, 7, 16, 54, 8, 162, 162, 162, 162, 162, 3, 8, 162, 162, 162, 162, 162, 162, 162, 162, 9, 9, 162, 162, 162, 162, 162, 18, 52, 162, 10, 10, 162, 162, 162, 21, 162, 162, 41, 162, 11, 11, 22, 125, 156, 31, 20, 61, 69, 133, 12, 12, 4, 143, 149, 22, 162, 30, 29, 13, 0, 13, 5, 162, 85, 14, 162, 162, 162, 162, 2, 14, 6, 162, 75, 15, 162, 162, 162, 162, 7, 15, 33, 162, 17, 162, 162, 162, 162, 162, 16, 16, 162, 162, 162, 162, 162, 162, 162, 162, 3, 17, 162, 162, 162, 162, 162, 162, 162, 162, 18, 18, 162, 162, 162, 162, 162, 162, 162, 162, 19, 19, 162, 162, 162, 162, 162, 162, 16, 162, 20, 20, 162, 162, 162, 3, 162, 42, 21, 6, 12, 21, 162, 162, 162, 19, 162, 162, 42, 162, 22, 22, 20, 78, 141, 25, 162, 24, 23, 14, 12, 23, 21, 76, 107, 26, 162, 162, 24, 162, 22, 24, 42, 74, 108, 7, 162, 162, 162, 162, 25, 25, 3, 67, 140, 162, 162, 7, 26, 15, 4, 26, 19, 56, 27, 162, 162, 162, 7, 162, 20, 27, 162, 162, 162, 162, 162, 162, 8, 162, 28, 28, 162, 162, 162, 9, 162, 51, 50, 162, 29, 29, 10, 87, 135, 23, 162, 162, 30, 162, 11, 30, 41, 81, 120, 24, 162, 162, 162, 162, 31, 31, 25, 111, 57, 162, 3, 2, 36, 32, 4, 32, 15, 162, 34, 162, 33, 162, 162, 162, 16, 33, 162, 162, 162, 162, 162, 162, 162, 162, 16, 34, 17, 162, 35, 162, 162, 162, 162, 162, 18, 35, 162, 162, 162, 162, 162, 162, 162, 162, 33, 36, 26, 49, 37, 162, 19, 162, 2, 162, 20, 37, 27, 48, 45, 162, 162, 162, 38, 162, 28, 38, 8, 44, 39, 162, 162, 162, 162, 162, 9, 39, 162, 162, 162, 162, 162, 162, 162, 162, 40, 40, 162, 162, 162, 162, 162, 33, 43, 162, 41, 41, 162, 162, 162, 42, 162, 162, 162, 162, 31, 42, 162, 162, 162, 16, 162, 162, 162, 162, 25, 43, 162, 162, 162, 162, 162, 162, 33, 162, 42, 44, 162, 162, 162, 162, 162, 162, 162, 162, 43, 45, 162, 162, 162, 162, 162, 162, 39, 162, 46, 46, 162, 162, 162, 40, 162, 6, 47, 162, 30, 47, 162, 162, 162, 43, 162, 162, 6, 162, 24, 48, 162, 162, 162, 162, 162, 162, 44, 162, 47, 49, 56, 55, 48, 162, 162, 162, 53, 162, 50, 50, 162, 162, 162, 52, 162, 162, 51, 162, 23, 51, 162, 162, 162, 18, 162, 162, 162, 162, 26, 52, 162, 162, 162, 162, 162, 162, 18, 162, 21, 53, 54, 35, 44, 162, 162, 162, 162, 162, 52, 54, 162, 162, 162, 162, 162, 162, 162, 162, 52, 55, 162, 162, 162, 162, 162, 162, 35, 162, 6, 56, 162, 162, 162, 162, 162, 162, 54, 162, 50, 57, 140, 112, 138, 162, 162, 38, 37, 34, 58, 58, 162, 162, 162, 28, 162, 86, 68, 162, 59, 59, 29, 136, 123, 69, 10, 162, 60, 162, 11, 60, 30, 121, 109, 61, 41, 162, 162, 162, 31, 61, 24, 90, 62, 2, 42, 162, 162, 162, 25, 62, 108, 91, 106, 38, 162, 162, 162, 162, 63, 63, 9, 102, 67, 162, 162, 65, 64, 162, 10, 64, 52, 66, 56, 162, 162, 162, 65, 162, 21, 65, 18, 17, 54, 162, 162, 162, 162, 162, 19, 66, 162, 162, 162, 162, 162, 162, 17, 162, 51, 67, 162, 162, 162, 162, 162, 54, 56, 162, 68, 68, 162, 162, 162, 50, 162, 162, 86, 162, 69, 69, 23, 95, 70, 36, 21, 162, 61, 162, 22, 70, 107, 96, 103, 37, 162, 162, 62, 162, 71, 71, 28, 101, 78, 63, 162, 73, 72, 162, 29, 72, 50, 77, 76, 64, 162, 162, 73, 162, 23, 73, 51, 75, 74, 65, 162, 162, 162, 162, 26, 74, 162, 162, 162, 54, 162, 162, 162, 162, 64, 75, 162, 162, 162, 17, 162, 162, 162, 162, 65, 76, 162, 162, 162, 56, 162, 162, 74, 162, 72, 77, 162, 162, 162, 66, 162, 162, 75, 162, 73, 78, 162, 162, 162, 67, 162, 74, 76, 162, 79, 79, 68, 100, 87, 72, 162, 162, 80, 162, 69, 80, 86, 85, 81, 73, 162, 162, 162, 162, 36, 81, 162, 162, 162, 74, 162, 162, 162, 162, 82, 82, 64, 84, 49, 162, 52, 162, 83, 162, 21, 83, 65, 34, 53, 162, 18, 162, 162, 162, 19, 84, 66, 162, 55, 162, 162, 162, 34, 162, 51, 85, 162, 162, 162, 75, 162, 162, 162, 162, 83, 86, 162, 162, 162, 51, 162, 162, 162, 162, 36, 87, 162, 162, 162, 76, 162, 162, 81, 162, 88, 88, 72, 99, 95, 82, 50, 162, 89, 162, 23, 89, 73, 94, 90, 83, 51, 162, 162, 162, 26, 90, 74, 93, 91, 53, 162, 162, 162, 162, 64, 91, 162, 162, 162, 44, 162, 162, 162, 162, 92, 92, 43, 162, 66, 162, 162, 162, 15, 162, 42, 93, 162, 162, 162, 35, 162, 162, 162, 162, 15, 94, 75, 162, 93, 34, 162, 162, 162, 162, 65, 95, 76, 98, 96, 49, 162, 162, 90, 162, 72, 96, 162, 162, 162, 48, 162, 162, 91, 162, 97, 97, 47, 162, 77, 92, 162, 162, 14, 162, 24, 98, 162, 162, 162, 55, 162, 162, 93, 162, 14, 99, 77, 162, 98, 84, 162, 162, 94, 162, 73, 100, 162, 162, 162, 77, 162, 162, 85, 162, 89, 101, 162, 162, 162, 102, 162, 75, 77, 162, 80, 102, 162, 162, 162, 162, 162, 17, 66, 162, 86, 103, 162, 162, 162, 45, 162, 162, 106, 162, 104, 104, 46, 162, 101, 105, 162, 14, 97, 162, 30, 105, 40, 162, 102, 162, 162, 15, 92, 162, 41, 106, 162, 162, 162, 39, 162, 162, 162, 162, 105, 107, 162, 162, 162, 27, 162, 162, 108, 162, 71, 108, 162, 162, 162, 8, 162, 162, 162, 162, 63, 109, 120, 119, 116, 62, 162, 162, 162, 162, 110, 110, 63, 115, 111, 162, 9, 83, 82, 162, 10, 111, 67, 114, 112, 162, 162, 53, 49, 162, 68, 112, 162, 162, 162, 162, 162, 44, 48, 162, 113, 113, 162, 162, 162, 47, 162, 162, 5, 162, 61, 114, 162, 162, 162, 162, 162, 35, 55, 162, 5, 115, 102, 162, 114, 162, 162, 34, 84, 162, 86, 116, 162, 162, 162, 106, 162, 162, 162, 162, 117, 117, 105, 162, 115, 162, 40, 32, 118, 162, 41, 118, 92, 162, 84, 162, 43, 162, 32, 162, 42, 119, 162, 162, 162, 91, 162, 162, 162, 162, 118, 120, 162, 162, 162, 108, 162, 162, 162, 162, 110, 121, 81, 122, 119, 90, 162, 162, 162, 162, 82, 122, 162, 162, 162, 93, 162, 162, 162, 162, 32, 123, 135, 134, 130, 70, 162, 162, 109, 162, 124, 124, 71, 129, 125, 110, 28, 89, 88, 162, 29, 125, 78, 128, 126, 111, 162, 90, 95, 162, 79, 126, 162, 162, 162, 112, 162, 91, 96, 162, 127, 127, 113, 162, 100, 97, 162, 162, 13, 162, 61, 128, 162, 162, 162, 114, 162, 93, 98, 162, 13, 129, 101, 162, 128, 115, 162, 94, 99, 162, 80, 130, 162, 162, 162, 103, 162, 162, 116, 162, 131, 131, 104, 162, 129, 117, 46, 133, 132, 162, 30, 132, 97, 162, 99, 118, 47, 162, 133, 162, 24, 133, 14, 162, 94, 32, 6, 162, 162, 162, 7, 134, 162, 162, 162, 96, 162, 162, 119, 162, 132, 135, 162, 162, 162, 107, 162, 162, 120, 162, 124, 136, 87, 137, 134, 95, 162, 162, 121, 162, 88, 137, 162, 162, 162, 98, 162, 162, 122, 162, 133, 138, 162, 162, 162, 162, 162, 39, 45, 35, 139, 139, 162, 162, 162, 46, 162, 5, 113, 162, 60, 140, 162, 162, 162, 162, 162, 8, 27, 17, 58, 141, 162, 162, 162, 140, 162, 108, 107, 75, 142, 142, 58, 148, 143, 71, 162, 80, 79, 162, 59, 143, 162, 162, 162, 78, 162, 81, 87, 162, 144, 144, 79, 147, 136, 88, 68, 162, 145, 162, 69, 145, 80, 146, 121, 89, 86, 162, 162, 162, 36, 146, 85, 162, 122, 94, 162, 162, 162, 162, 83, 147, 100, 162, 137, 99, 162, 162, 146, 162, 89, 148, 162, 162, 162, 101, 162, 85, 100, 162, 145, 149, 162, 162, 162, 141, 162, 120, 135, 85, 150, 150, 142, 155, 151, 124, 58, 145, 144, 162, 59, 151, 143, 154, 152, 125, 162, 121, 136, 162, 144, 152, 162, 162, 162, 126, 162, 119, 134, 162, 153, 153, 127, 162, 147, 132, 113, 162, 1, 162, 61, 154, 162, 162, 162, 128, 162, 122, 137, 162, 1, 155, 148, 162, 154, 129, 162, 146, 147, 162, 145, 156, 141, 126, 157, 57, 162, 62, 70, 94, 142, 157, 162, 162, 162, 138, 162, 106, 103, 93, 158, 158, 139, 162, 148, 104, 162, 13, 127, 162, 60, 159, 149, 152, 160, 156, 162, 109, 123, 146, 150, 160, 162, 162, 162, 157, 162, 116, 130, 122, 161, 161, 158, 162, 155, 131, 139, 1, 153, 162, 60, }; #if AUTOMATON_STATE_ALTS /* Vector for state insn alternatives. */ static const unsigned char athlon_load_state_alts[] ATTRIBUTE_UNUSED = { 1, 6, 24, 4, 2, 1, 1, 6, 1, 1, 1, 6, 0, 4, 2, 1, 0, 0, 0, 1, 1, 6, 24, 4, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 6, 1, 1, 1, 0, 0, 0, 2, 0, 1, 6, 1, 1, 1, 0, 0, 0, 2, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 3, 12, 2, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 6, 0, 1, 1, 0, 0, 0, 2, 0, 0, 3, 0, 1, 1, 6, 24, 4, 1, 1, 1, 6, 1, 1, 1, 3, 12, 2, 2, 0, 1, 6, 1, 1, 1, 3, 0, 2, 2, 0, 0, 0, 0, 1, 1, 3, 0, 2, 1, 0, 0, 0, 0, 1, 1, 3, 0, 2, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 3, 0, 1, 1, 0, 0, 0, 1, 0, 1, 6, 1, 1, 1, 0, 0, 0, 1, 0, 0, 3, 0, 1, 1, 3, 12, 2, 1, 0, 1, 6, 1, 1, 1, 3, 12, 2, 1, 0, 0, 3, 0, 1, 1, 3, 12, 2, 1, 0, 0, 0, 0, 1, 1, 3, 12, 2, 0, 0, 1, 6, 1, 1, 1, 3, 12, 2, 0, 0, 0, 3, 0, 1, 1, 0, 0, 0, 0, 0, 0, 3, 0, 1, 1, 0, 0, 0, 1, 0, 1, 6, 0, 1, 1, 3, 12, 2, 2, 0, 0, 3, 0, 1, 1, 3, 12, 2, 2, 0, 0, 0, 0, 1, 1, 6, 24, 4, 0, 1, 1, 6, 1, 1, 1, 6, 0, 4, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 3, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 6, 24, 4, 0, 1, 0, 3, 0, 1, 1, 3, 6, 1, 0, 0, 0, 3, 0, 1, 1, 3, 6, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 6, 0, 1, 1, 0, 0, 0, 2, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 3, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 3, 0, 1, 1, 0, 0, 0, 1, 0, 1, 6, 0, 1, 1, 0, 0, 0, 1, 0, 0, 3, 0, 1, 1, 0, 0, 0, 0, 0, 0, 3, 0, 1, 1, 3, 3, 1, 0, 0, 0, 3, 0, 1, 1, 0, 0, 0, 1, 0, 0, 3, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 3, 0, 1, 1, 3, 3, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 3, 0, 1, 1, 0, 0, 0, 0, 0, 0, 3, 0, 1, 1, 3, 6, 1, 0, 0, 1, 6, 1, 1, 1, 0, 0, 0, 2, 0, 1, 6, 0, 1, 1, 6, 24, 4, 2, 1, 0, 3, 0, 1, 1, 6, 24, 4, 2, 1, 0, 0, 0, 1, 1, 6, 24, 4, 1, 1, 0, 0, 0, 1, 1, 3, 6, 1, 1, 0, 0, 0, 0, 1, 1, 3, 6, 2, 0, 0, 1, 6, 0, 1, 1, 3, 6, 2, 0, 0, 0, 3, 0, 1, 1, 3, 6, 2, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 3, 0, 1, 1, 0, 0, 0, 0, 0, 1, 6, 0, 1, 1, 0, 0, 0, 2, 0, 0, 3, 0, 1, 1, 6, 24, 4, 1, 1, 0, 3, 0, 1, 1, 3, 6, 1, 1, 0, 0, 3, 0, 1, 1, 3, 6, 2, 1, 0, 1, 6, 0, 1, 1, 3, 6, 2, 1, 0, 0, 3, 0, 1, 1, 3, 6, 2, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 3, 0, 1, 1, 0, 0, 0, 1, 0, 0, 3, 0, 1, 1, 0, 0, 0, 1, 0, 1, 6, 0, 1, 1, 3, 6, 2, 2, 0, 0, 3, 0, 1, 1, 3, 6, 2, 2, 0, 0, 0, 0, 1, 1, 0, 0, 0, 2, 0, 0, 0, 0, 1, 1, 6, 12, 4, 0, 1, 0, 3, 0, 1, 1, 6, 12, 4, 0, 1, 0, 0, 0, 1, 1, 3, 0, 1, 0, 0, 0, 3, 0, 1, 1, 0, 0, 0, 2, 0, 0, 0, 0, 1, 1, 0, 0, 0, 2, 0, 0, 0, 0, 1, 1, 0, 0, 0, 2, 0, 0, 3, 0, 1, 1, 6, 12, 4, 1, 1, 0, 3, 0, 1, 1, 6, 12, 4, 1, 1, 0, 0, 0, 1, 1, 3, 3, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 3, 0, 2, 0, 0, 0, 3, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 3, 0, 1, 1, 0, 0, 0, 0, 1, 1, 3, 3, 1, 1, 0, 0, 3, 0, 1, 1, 0, 0, 0, 1, 0, 0, 3, 0, 1, 1, 3, 0, 2, 1, 0, 0, 3, 0, 1, 1, 0, 0, 0, 1, 0, 0, 3, 0, 1, 1, 3, 0, 1, 1, 0, 0, 3, 0, 1, 1, 0, 0, 0, 2, 0, 0, 3, 0, 1, 1, 0, 0, 0, 1, 0, 1, 6, 0, 1, 1, 0, 0, 0, 0, 0, 1, 6, 0, 1, 1, 0, 0, 0, 1, 0, 0, 3, 0, 1, 1, 3, 0, 2, 1, 0, 1, 6, 0, 1, 1, 3, 0, 2, 0, 0, 1, 6, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 3, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 3, 6, 1, 2, 0, 0, 0, 0, 1, 1, 6, 12, 4, 0, 1, 1, 6, 0, 1, 1, 3, 3, 1, 0, 0, 1, 6, 0, 1, 1, 0, 0, 0, 0, 0, 1, 6, 0, 1, 1, 0, 0, 0, 2, 0, 0, 3, 0, 1, 1, 0, 0, 0, 0, 0, 1, 6, 0, 1, 1, 3, 0, 1, 0, 0, 1, 6, 0, 1, 1, 0, 0, 0, 2, 0, 0, 0, 0, 1, 1, 6, 0, 4, 0, 1, 1, 6, 0, 1, 1, 6, 0, 4, 0, 1, 0, 3, 0, 1, 1, 0, 0, 0, 2, 0, 0, 0, 0, 1, 1, 0, 0, 0, 2, 0, 0, 0, 0, 1, 1, 3, 3, 1, 2, 0, 0, 0, 0, 1, 1, 0, 0, 0, 2, 0, 0, 0, 0, 1, 1, 3, 6, 1, 2, 0, 0, 3, 0, 1, 1, 6, 12, 4, 1, 1, 1, 6, 0, 1, 1, 3, 3, 1, 1, 0, 1, 6, 0, 1, 1, 0, 0, 0, 1, 0, 1, 6, 0, 1, 1, 3, 0, 2, 2, 0, 0, 3, 0, 1, 1, 0, 0, 0, 1, 0, 1, 6, 0, 1, 1, 3, 0, 1, 1, 0, 1, 6, 0, 1, 1, 0, 0, 0, 2, 0, 0, 3, 0, 1, 1, 6, 0, 4, 1, 1, 1, 6, 0, 1, 1, 6, 0, 4, 1, 1, 0, 3, 0, 1, 1, 6, 0, 4, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 2, 0, 0, 3, 0, 1, 1, 0, 0, 0, 2, 0, 0, 3, 0, 1, 1, 3, 3, 1, 2, 0, 0, 3, 0, 1, 1, 0, 0, 0, 2, 0, 0, 3, 0, 1, 1, 0, 0, 0, 0, 0, 1, 6, 1, 1, 1, 0, 0, 0, 2, 0, 1, 6, 0, 1, 1, 0, 0, 0, 0, 0, 1, 6, 1, 1, 1, 0, 0, 0, 1, 0, 1, 6, 1, 1, 1, 3, 6, 2, 2, 0, 1, 6, 0, 1, 1, 0, 0, 0, 2, 0, 1, 6, 0, 1, 1, 6, 12, 4, 2, 1, 0, 3, 0, 1, 1, 6, 12, 4, 2, 1, 0, 0, 0, 1, 1, 3, 0, 1, 2, 0, 0, 0, 0, 1, 1, 3, 0, 1, 2, 0, 0, 3, 0, 1, 1, 0, 0, 0, 2, 0, 1, 6, 0, 1, 1, 0, 0, 0, 2, 0, 1, 6, 1, 1, 1, 6, 12, 4, 2, 1, 1, 6, 0, 1, 1, 3, 3, 1, 2, 0, 1, 6, 0, 1, 1, 0, 0, 0, 2, 0, 1, 6, 0, 1, 1, 6, 0, 4, 2, 1, 0, 3, 0, 1, 1, 0, 0, 0, 2, 0, 1, 6, 0, 1, 1, 3, 0, 1, 2, 0, 1, 6, 0, 1, 1, 3, 6, 1, 1, 0, 1, 6, 1, 1, 1, 0, 0, 0, 1, 0, 1, 6, 1, 1, 1, 3, 0, 2, 2, 0, 1, 6, 0, 1, 1, 3, 6, 1, 2, 0, 1, 6, 1, 1, 1, 0, 0, 0, 2, 0, 1, 6, 1, 1, 1, 6, 0, 4, 2, 1, 1, 6, 0, 1, }; #endif /* #if AUTOMATON_STATE_ALTS */ /* Vector of min issue delay of insns. */ static const unsigned char athlon_load_min_issue_delay[] ATTRIBUTE_UNUSED = { 0, 0, 0, 0, 0, 0, 16, 0, 34, 32, 0, 0, 32, 17, 16, 2, 34, 18, 0, 0, 1, 17, 1, 0, 0, 1, 17, 1, 34, 32, 1, 17, 4, 34, 32, 0, 0, 35, 17, 16, 3, 51, 35, 17, 48, 2, 34, 18, 0, 32, 1, 17, 1, 16, 16, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 16, 1, 34, 32, 0, 16, 4, 34, 32, 0, 64, 52, 34, 32, 3, 51, 35, 17, 16, 4, 68, 53, 50, 48, 3, 51, 36, 33, 32, 2, 34, 19, 16, 16, 1, 17, 2, 0, 0, 1, 17, 3, 16, 16, 0, 0, 2, 0, 0, 0, 0, 3, 16, 16, 0, 0, 3, 17, 16, 0, 0, 18, 0, 0, 0, 0, 19, 16, 16, 2, 34, 19, 16, 48, 1, 17, 2, 0, 32, 0, 0, 1, 16, 16, 0, 0, 1, 17, 16, 0, 0, 16, 0, 0, 0, 64, 48, 34, 32, 4, 68, 52, 34, 32, 0, 64, 53, 50, 48, 5, 85, 69, 51, 48, 0, 0, 16, 16, 16, 0, 0, 19, 16, 48, 0, 0, 35, 17, 48, 3, 51, 35, 17, 48, 2, 34, 18, 0, 32, 1, 17, 1, 17, 16, 1, 17, 3, 17, 16, 2, 34, 20, 32, 32, 3, 51, 37, 49, 48, 2, 34, 19, 16, 48, 1, 17, 2, 0, 32, 1, 17, 4, 32, 32, 2, 34, 21, 48, 48, 0, 0, 21, 48, 48, 1, 17, 4, 32, 32, 1, 17, 4, 33, 32, 2, 34, 20, 32, 32, 0, 0, 37, 49, 48, 3, 51, 37, 49, 48, 2, 34, 21, 48, 48, 2, 34, 21, 48, 48, 0, 0, 18, 0, 0, 1, 17, 1, 0, 32, 0, 0, 0, 16, 16, 0, 0, 0, 17, 16, 0, 0, 0, 17, 16, 0, 0, 3, 17, 48, 0, 0, 18, 0, 32, 0, 0, 20, 32, 32, 0, 0, 36, 33, 32, 2, 34, 21, 48, 48, 2, 34, 18, 0, 48, 1, 17, 1, 32, 32, 0, 0, 0, 16, 16, 0, 0, 3, 16, 48, 0, 0, 2, 0, 32, 0, 0, 4, 32, 32, 0, 0, 4, 33, 32, 1, 17, 5, 49, 48, 1, 17, 5, 50, 48, 1, 17, 5, 48, 48, 1, 17, 5, 48, 48, 1, 17, 2, 0, 48, 0, 0, 1, 32, 32, 0, 0, 1, 33, 32, 1, 17, 1, 49, 48, 0, 0, 16, 32, 32, 0, 0, 32, 33, 32, 0, 32, 21, 48, 48, 1, 17, 1, 50, 48, 1, 17, 1, 33, 32, 1, 17, 1, 48, 48, 0, 0, 0, 32, 32, 0, 0, 0, 33, 32, 0, 0, 5, 49, 48, 1, 49, 5, 49, 48, 0, 32, 20, 32, 32, 1, 81, 5, 51, 48, 0, 16, 5, 50, 48, 0, 0, 5, 48, 48, 1, 33, 5, 48, 48, 0, 16, 4, 32, 32, 1, 33, 5, 48, 48, 0, 16, 5, 48, 48, 1, 17, 1, 48, 48, 1, 17, 2, 0, 48, 2, 34, 18, 0, 48, 1, 33, 3, 16, 48, 0, 16, 2, 0, 32, 0, 32, 18, 0, 32, 1, 49, 3, 17, 48, 1, 17, 3, 16, 48, 1, 17, 3, 17, 48, 0, 0, 1, 17, 48, 0, 0, 16, 0, 32, 0, 0, 18, 0, 48, 2, 34, 18, 0, 48, 1, 17, 1, 32, 32, 2, 34, 18, 0, 48, 0, 32, 18, 0, 48, 1, 49, 1, 17, 48, 0, 32, 16, 0, 32, 0, 32, 16, 32, 32, 1, 49, 1, 49, 48, 1, 17, 1, 17, 48, 0, 0, 1, 49, 48, 1, 81, 1, 51, 48, 0, 0, 1, 16, 48, 0, 0, 0, 0, 32, 0, 0, 2, 0, 48, 1, 33, 2, 0, 48, 0, 16, 1, 32, 32, 1, 33, 2, 0, 48, 0, 16, 2, 0, 48, 1, 33, 1, 16, 48, 0, 16, 0, 0, 32, 0, 16, 0, 32, 32, 0, 16, 0, 34, 32, 1, 33, 1, 48, 48, 1, 17, 1, 16, 48, 0, 0, 1, 48, 48, 1, 33, 1, 48, 48, 2, 34, 18, 0, 0, 1, 17, 1, 0, 32, 2, 34, 18, 0, 0, 1, 17, 2, 0, 0, 0, 0, 1, 0, 32, 1, 17, 1, 0, 48, 0, 0, 0, 32, 32, 0, 0, 0, 33, 32, 0, 16, 1, 50, 48, 0, 16, 1, 48, 48, 1, 17, 1, 0, 48, 1, 17, 1, 0, 0, 0, 0, 0, 0, 32, 0, 0, 1, 0, 48, 1, 33, 1, 0, 48, 0, 16, 0, 32, 32, 1, 33, 1, 0, 48, 0, 16, 1, 0, 48, 0, 0, 2, 0, 0, 1, 33, 2, 0, 0, 0, 16, 1, 0, 32, 0, 0, 1, 0, 0, 1, 33, 1, 0, 0, 0, 16, 0, 0, 32, }; /* Vector for locked state flags. */ static const unsigned char athlon_load_dead_lock[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #if CPU_UNITS_QUERY /* Vector for reserved units of states. */ static const unsigned char athlon_load_reserved_units[] = { 0 /* This is dummy el because the vect is empty */}; #endif /* #if CPU_UNITS_QUERY */ /* Vector translating external insn codes to internal ones.*/ static const unsigned char athlon_mult_translate[] ATTRIBUTE_UNUSED = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3}; /* Vector for state transitions. */ static const unsigned char athlon_mult_transitions[] ATTRIBUTE_UNUSED = { 0, 4, 1, 0, 1, 5, 16, 2, 2, 7, 15, 3, 3, 10, 6, 4, 4, 16, 5, 0, 5, 16, 16, 2, 6, 11, 16, 7, 7, 16, 8, 3, 8, 16, 16, 9, 9, 13, 12, 10, 10, 16, 11, 4, 11, 16, 16, 7, 12, 14, 16, 13, 13, 16, 14, 10, 14, 16, 16, 13, 15, 8, 16, 9}; #if AUTOMATON_STATE_ALTS /* Vector for state insn alternatives. */ static const unsigned char athlon_mult_state_alts[] ATTRIBUTE_UNUSED = { 1, 1, 2, 1, 1, 1, 0, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 0, 2, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 2, 1, 1, 0, 0, 1, 1, 1, 2, 1, 1, 0, 2, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 2, 1, 1, 0, 0, 1, 1, 1, 0, 1}; #endif /* #if AUTOMATON_STATE_ALTS */ /* Vector of min issue delay of insns. */ static const unsigned char athlon_mult_min_issue_delay[] ATTRIBUTE_UNUSED = { 0, 0, 0, 16, 0, 0, 0, 0, 1, 0, 1, 16, 0, 16, 1, 0, 1, 16, 0, 0, 2, 0, 2, 16, 0, 16, 3, 0, 4, 16, 0, 16}; /* Vector for locked state flags. */ static const unsigned char athlon_mult_dead_lock[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #if CPU_UNITS_QUERY /* Vector for reserved units of states. */ static const unsigned char athlon_mult_reserved_units[] = { 0 /* This is dummy el because the vect is empty */}; #endif /* #if CPU_UNITS_QUERY */ /* Vector translating external insn codes to internal ones.*/ static const unsigned char athlon_fp_translate[] ATTRIBUTE_UNUSED = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 3, 4, 5, 6, 4, 4, 4, 7, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 1, 1, 1, 1, 1, 1, 8, 8, 8, 8, 8, 8, 3, 4, 10, 3, 11, 4, 3, 4, 10, 10, 4, 7, 7, 7, 9, 9, 7, 7, 12, 12, 12, 9, 8, 8, 8, 13, 13, 13, 13, 8, 8, 8, 8, 8, 8, 13, 13, 13, 13, 4, 4, 10, 10, 4, 10, 10, 4, 14, 15, 16, 15, 14, 1, 1, 4, 9, 9, 9, 12, 12, 12, 12, 17, 17, 17, 18, 18, 19, 19, 20}; /* Comb vector for state transitions. */ static const unsigned short athlon_fp_transitions[] ATTRIBUTE_UNUSED = { 0, 1, 3, 419, 419, 5, 6, 18, 416, 18, 424, 424, 68, 460, 10, 462, 9, 244, 443, 202, 0, 2, 1, 3, 11, 11, 5, 6, 18, 15, 18, 21, 21, 68, 457, 10, 459, 9, 244, 443, 463, 446, 411, 1, 3, 412, 412, 5, 6, 18, 429, 18, 432, 432, 68, 434, 10, 440, 9, 244, 443, 463, 0, 446, 1, 3, 400, 400, 5, 6, 18, 447, 18, 450, 450, 68, 452, 10, 454, 9, 244, 443, 416, 411, 1, 417, 417, 463, 3, 19, 463, 19, 418, 418, 71, 463, 4, 422, 463, 256, 428, 382, 0, 460, 2, 5, 420, 420, 4, 6, 435, 463, 435, 425, 425, 74, 5, 7, 461, 463, 260, 439, 386, 416, 15, 6, 8, 16, 16, 7, 9, 19, 463, 19, 25, 25, 71, 8, 463, 455, 419, 256, 428, 12, 446, 10, 9, 12, 417, 12, 10, 22, 69, 420, 463, 13, 14, 245, 421, 203, 0, 424, 463, 17, 22, 1, 463, 20, 22, 418, 22, 463, 73, 73, 425, 11, 15, 463, 258, 427, 384, 419, 429, 18, 463, 413, 413, 21, 463, 19, 463, 19, 430, 430, 71, 23, 463, 431, 463, 256, 428, 434, 0, 463, 414, 414, 24, 463, 435, 463, 435, 433, 433, 74, 26, 12, 437, 463, 260, 439, 447, 416, 463, 401, 401, 28, 16, 19, 463, 19, 448, 448, 71, 31, 27, 449, 463, 256, 428, 452, 411, 463, 444, 444, 34, 29, 435, 463, 435, 451, 451, 74, 37, 32, 453, 463, 260, 439, 457, 429, 40, 456, 456, 463, 35, 435, 463, 435, 445, 445, 74, 463, 38, 458, 462, 260, 439, 441, 447, 43, 41, 441, 422, 441, 463, 76, 76, 461, 463, 46, 49, 262, 442, 388, 424, 11, 52, 55, 12, 44, 463, 58, 12, 16, 12, 61, 22, 69, 456, 47, 50, 21, 245, 421, 22, 446, 53, 56, 22, 25, 22, 59, 73, 73, 445, 62, 463, 400, 258, 427, 12, 400, 463, 64, 12, 401, 12, 67, 22, 69, 444, 463, 463, 412, 245, 421, 12, 411, 463, 70, 12, 413, 12, 65, 22, 69, 414, 68, 463, 432, 245, 421, 22, 0, 463, 72, 22, 430, 22, 19, 73, 73, 433, 463, 463, 440, 258, 427, 441, 419, 463, 75, 441, 431, 441, 22, 76, 76, 437, 463, 463, 450, 262, 442, 22, 424, 463, 77, 22, 448, 22, 13, 73, 73, 451, 463, 463, 454, 258, 427, 441, 412, 463, 78, 441, 449, 441, 69, 76, 76, 453, 463, 463, 459, 262, 442, 441, 432, 80, 463, 441, 455, 441, 71, 76, 76, 458, 463, 82, 463, 262, 442, 18, 450, 463, 12, 12, 463, 73, 19, 19, 463, 22, 22, 463, 435, 29, 441, 17, 30, 30, 463, 2, 187, 187, 463, 189, 189, 463, 191, 32, 193, 463, 33, 33, 463, 32, 178, 178, 463, 180, 180, 463, 182, 35, 184, 463, 36, 36, 463, 35, 169, 169, 463, 171, 171, 463, 173, 38, 175, 463, 39, 39, 463, 38, 160, 160, 463, 162, 162, 463, 164, 41, 166, 463, 42, 42, 463, 41, 151, 151, 463, 153, 153, 463, 155, 44, 157, 463, 45, 45, 463, 44, 142, 142, 463, 144, 144, 463, 146, 47, 148, 463, 48, 48, 463, 47, 133, 133, 463, 135, 135, 463, 137, 50, 139, 463, 51, 51, 463, 50, 124, 124, 463, 126, 126, 463, 128, 53, 130, 463, 54, 54, 463, 53, 115, 115, 463, 117, 117, 463, 119, 56, 121, 463, 57, 57, 463, 56, 106, 106, 463, 108, 108, 463, 110, 59, 112, 463, 60, 60, 463, 59, 97, 97, 463, 99, 99, 463, 101, 62, 103, 463, 63, 63, 463, 62, 88, 88, 463, 90, 90, 463, 92, 65, 94, 463, 66, 66, 463, 65, 79, 79, 463, 81, 81, 463, 83, 68, 85, 463, 69, 69, 463, 68, 71, 71, 463, 73, 73, 463, 74, 199, 76, 463, 200, 200, 463, 18, 391, 391, 463, 393, 393, 463, 395, 202, 397, 463, 203, 203, 463, 202, 382, 382, 463, 384, 384, 463, 386, 205, 388, 463, 206, 206, 463, 205, 373, 373, 463, 375, 375, 463, 377, 208, 379, 463, 209, 209, 463, 208, 364, 364, 463, 366, 366, 463, 368, 211, 370, 463, 212, 212, 463, 211, 355, 355, 463, 357, 357, 463, 359, 214, 361, 463, 215, 215, 463, 214, 346, 346, 463, 348, 348, 463, 350, 217, 352, 463, 218, 218, 463, 217, 337, 337, 463, 339, 339, 463, 341, 220, 343, 463, 221, 221, 463, 220, 328, 328, 463, 330, 330, 463, 332, 223, 334, 463, 224, 224, 463, 223, 319, 319, 463, 321, 321, 463, 323, 226, 325, 463, 227, 227, 463, 226, 310, 310, 463, 312, 312, 463, 314, 229, 316, 463, 230, 230, 463, 229, 301, 301, 463, 303, 303, 463, 305, 232, 307, 463, 233, 233, 463, 232, 292, 292, 463, 294, 294, 463, 296, 235, 298, 463, 236, 236, 463, 235, 283, 283, 463, 285, 285, 463, 287, 238, 289, 463, 239, 239, 463, 238, 274, 274, 463, 276, 276, 463, 278, 241, 280, 463, 242, 242, 463, 241, 265, 265, 463, 267, 267, 463, 269, 244, 271, 463, 245, 245, 463, 244, 256, 256, 463, 258, 258, 463, 260, 247, 262, 463, 27, 27, 463, 247, 248, 248, 463, 250, 250, 84, 252, 403, 254, 86, 197, 197, 463, 29, 404, 404, 87, 406, 406, 463, 408, 417, 410, 89, 1, 23, 91, 199, 1, 66, 1, 463, 13, 17, 463, 93, 79, 95, 246, 402, 204, 0, 418, 81, 96, 13, 67, 98, 100, 13, 463, 13, 463, 23, 23, 77, 102, 63, 104, 26, 196, 380, 419, 420, 88, 105, 14, 90, 64, 107, 14, 463, 14, 463, 24, 70, 86, 109, 60, 111, 255, 415, 381, 416, 422, 97, 113, 20, 463, 99, 114, 20, 463, 20, 463, 72, 72, 61, 116, 95, 118, 257, 423, 383, 424, 425, 57, 120, 24, 463, 106, 122, 24, 463, 24, 123, 10, 10, 108, 463, 58, 463, 259, 426, 385, 417, 443, 104, 125, 421, 421, 54, 463, 428, 428, 115, 427, 427, 463, 439, 461, 442, 127, 436, 463, 129, 403, 436, 117, 436, 463, 75, 75, 463, 131, 132, 134, 261, 438, 387, 418, 16, 55, 136, 1, 113, 463, 138, 1, 463, 1, 140, 13, 17, 51, 124, 126, 25, 246, 402, 13, 446, 463, 52, 13, 463, 13, 122, 23, 23, 463, 48, 463, 401, 26, 196, 1, 400, 463, 141, 1, 463, 1, 143, 13, 17, 463, 463, 463, 413, 246, 402, 1, 411, 463, 145, 1, 463, 1, 133, 13, 17, 463, 135, 463, 414, 246, 402, 14, 0, 463, 147, 14, 463, 14, 49, 24, 70, 463, 463, 463, 430, 255, 415, 13, 416, 463, 149, 13, 463, 13, 131, 23, 23, 463, 463, 463, 431, 26, 196, 20, 419, 463, 150, 20, 463, 20, 45, 72, 72, 463, 463, 463, 433, 257, 423, 24, 424, 463, 152, 24, 463, 24, 142, 10, 10, 463, 463, 463, 437, 259, 426, 436, 417, 463, 154, 436, 463, 436, 144, 75, 75, 463, 463, 463, 444, 261, 438, 14, 418, 463, 156, 14, 463, 14, 46, 24, 70, 463, 463, 463, 445, 255, 415, 24, 429, 463, 158, 24, 463, 24, 140, 10, 10, 463, 463, 463, 448, 259, 426, 13, 401, 463, 159, 13, 463, 13, 42, 23, 23, 463, 463, 463, 449, 26, 196, 20, 412, 463, 161, 20, 463, 20, 151, 72, 72, 463, 463, 463, 451, 257, 423, 24, 432, 463, 163, 24, 463, 24, 153, 10, 10, 463, 463, 463, 453, 259, 426, 436, 413, 463, 165, 436, 463, 436, 43, 75, 75, 463, 463, 463, 455, 261, 438, 20, 430, 463, 167, 20, 463, 20, 149, 72, 72, 463, 463, 463, 456, 257, 423, 14, 450, 463, 168, 14, 463, 14, 39, 24, 70, 463, 463, 463, 458, 255, 415, 436, 447, 463, 463, 436, 170, 436, 160, 75, 75, 463, 172, 174, 12, 261, 438, 1, 448, 463, 19, 1, 1, 1, 1, 13, 162, 14, 176, 463, 13, 13, 40, 158, 2, 20, 22, 463, 463, 13, 2, 463, 177, 13, 13, 179, 181, 23, 36, 24, 27, 463, 463, 28, 183, 463, 11, 28, 28, 185, 186, 194, 169, 195, 30, 171, 37, 31, 188, 463, 29, 31, 31, 190, 167, 185, 192, 186, 33, 33, 178, 34, 194, 463, 32, 34, 34, 195, 180, 176, 196, 177, 36, 34, 463, 37, 176, 463, 35, 37, 37, 198, 30, 167, 201, 168, 39, 187, 463, 40, 197, 463, 38, 40, 40, 204, 207, 158, 210, 159, 42, 199, 463, 43, 202, 463, 41, 43, 43, 213, 216, 149, 219, 150, 45, 205, 208, 46, 211, 463, 44, 46, 46, 222, 225, 140, 228, 141, 48, 214, 217, 49, 220, 463, 47, 49, 49, 231, 234, 131, 237, 132, 51, 223, 226, 52, 229, 463, 50, 52, 52, 240, 243, 122, 246, 123, 54, 232, 235, 55, 238, 463, 53, 55, 55, 249, 251, 113, 253, 114, 57, 241, 244, 58, 247, 463, 56, 58, 58, 255, 257, 104, 259, 105, 60, 189, 31, 61, 185, 463, 59, 61, 61, 261, 263, 95, 264, 96, 63, 248, 250, 64, 28, 463, 62, 64, 64, 266, 268, 86, 270, 87, 66, 194, 245, 67, 256, 463, 65, 67, 67, 463, 463, 77, 463, 78, 69, 258, 246, 17, 26, 463, 68, 17, 17, 463, 463, 23, 71, 70, 463, 17, 17, 272, 73, 463, 18, 23, 23, 23, 463, 23, 23, 72, 463, 10, 74, 10, 18, 70, 70, 273, 76, 242, 12, 72, 10, 10, 463, 72, 72, 75, 463, 75, 79, 75, 19, 67, 67, 275, 81, 265, 22, 77, 77, 77, 463, 77, 77, 80, 463, 82, 83, 82, 68, 78, 78, 277, 85, 267, 69, 80, 82, 82, 463, 80, 80, 84, 463, 84, 88, 84, 71, 64, 64, 279, 90, 243, 73, 86, 86, 86, 463, 86, 86, 89, 463, 91, 92, 91, 65, 87, 87, 281, 94, 263, 66, 89, 91, 91, 463, 89, 89, 93, 463, 93, 97, 93, 79, 61, 61, 282, 99, 239, 81, 95, 95, 95, 463, 95, 95, 98, 463, 100, 101, 100, 62, 96, 96, 284, 103, 274, 63, 98, 100, 100, 463, 98, 98, 102, 463, 102, 106, 102, 88, 58, 58, 286, 108, 276, 90, 104, 104, 104, 463, 104, 104, 107, 463, 109, 110, 109, 59, 105, 105, 288, 112, 240, 60, 107, 109, 109, 463, 107, 107, 111, 463, 111, 115, 111, 97, 55, 55, 290, 117, 272, 99, 113, 113, 113, 463, 113, 113, 116, 463, 118, 119, 118, 56, 114, 114, 291, 121, 236, 57, 116, 118, 118, 463, 116, 116, 120, 463, 120, 124, 120, 106, 52, 52, 293, 126, 283, 108, 122, 122, 122, 463, 122, 122, 125, 463, 127, 128, 127, 53, 123, 123, 295, 130, 285, 54, 125, 127, 127, 463, 125, 125, 129, 463, 129, 133, 129, 115, 49, 49, 297, 135, 237, 117, 131, 131, 131, 463, 131, 131, 134, 463, 136, 137, 136, 50, 132, 132, 299, 139, 281, 51, 134, 136, 136, 463, 134, 134, 138, 463, 138, 142, 138, 124, 46, 46, 300, 144, 233, 126, 140, 140, 140, 463, 140, 140, 143, 463, 145, 146, 145, 47, 141, 141, 302, 148, 292, 48, 143, 145, 145, 463, 143, 143, 147, 463, 147, 151, 147, 133, 43, 43, 304, 153, 294, 135, 149, 149, 149, 463, 149, 149, 152, 463, 154, 155, 154, 44, 150, 150, 306, 157, 234, 45, 152, 154, 154, 463, 152, 152, 156, 463, 156, 160, 156, 142, 40, 40, 308, 162, 290, 144, 158, 158, 158, 463, 158, 158, 161, 463, 163, 164, 163, 41, 159, 159, 309, 166, 230, 42, 161, 163, 163, 463, 161, 161, 165, 463, 165, 169, 165, 151, 37, 37, 311, 171, 301, 153, 167, 167, 167, 463, 167, 167, 170, 463, 172, 173, 172, 38, 168, 168, 313, 175, 303, 39, 170, 172, 172, 463, 170, 170, 174, 463, 174, 178, 174, 160, 34, 34, 315, 180, 231, 162, 176, 176, 176, 463, 176, 176, 179, 463, 181, 182, 181, 35, 177, 177, 317, 184, 299, 36, 179, 181, 181, 463, 179, 179, 183, 463, 183, 187, 183, 169, 31, 31, 318, 189, 227, 171, 185, 185, 185, 463, 185, 185, 188, 320, 190, 191, 190, 32, 186, 186, 322, 193, 310, 33, 188, 190, 190, 463, 188, 188, 192, 324, 192, 312, 192, 178, 197, 463, 463, 198, 228, 180, 326, 198, 198, 327, 329, 398, 331, 399, 200, 308, 463, 201, 333, 463, 199, 201, 201, 335, 336, 389, 224, 390, 203, 319, 321, 204, 225, 463, 202, 204, 204, 338, 317, 380, 340, 381, 206, 221, 328, 207, 342, 463, 205, 207, 207, 344, 345, 371, 347, 372, 209, 330, 463, 210, 222, 463, 208, 210, 210, 349, 326, 362, 351, 363, 212, 218, 337, 213, 339, 463, 211, 213, 213, 353, 354, 353, 356, 354, 215, 219, 463, 216, 335, 463, 214, 216, 216, 358, 360, 344, 362, 345, 218, 215, 346, 219, 348, 463, 217, 219, 219, 363, 365, 335, 367, 336, 221, 216, 344, 222, 212, 463, 220, 222, 222, 369, 371, 326, 372, 327, 224, 355, 357, 225, 213, 463, 223, 225, 225, 374, 376, 317, 378, 318, 227, 353, 209, 228, 364, 463, 226, 228, 228, 380, 381, 308, 383, 309, 230, 366, 210, 231, 362, 463, 229, 231, 231, 385, 387, 299, 389, 300, 233, 206, 373, 234, 375, 463, 232, 234, 234, 390, 392, 290, 394, 291, 236, 207, 371, 237, 203, 463, 235, 237, 237, 396, 398, 281, 399, 282, 239, 382, 384, 240, 204, 463, 238, 240, 240, 402, 405, 272, 407, 273, 242, 380, 200, 243, 391, 463, 241, 243, 243, 463, 463, 263, 463, 264, 245, 403, 393, 246, 201, 463, 244, 246, 246, 463, 463, 26, 248, 255, 463, 28, 28, 409, 250, 463, 247, 194, 194, 194, 463, 194, 194, 249, 463, 251, 252, 251, 29, 195, 195, 415, 254, 389, 30, 249, 251, 251, 463, 249, 249, 253, 463, 253, 256, 253, 187, 246, 246, 423, 258, 404, 189, 26, 26, 26, 463, 26, 26, 257, 463, 259, 260, 259, 247, 255, 255, 426, 262, 406, 27, 257, 259, 259, 463, 257, 257, 261, 463, 261, 265, 261, 248, 243, 243, 436, 267, 198, 250, 263, 263, 263, 463, 263, 263, 266, 463, 268, 269, 268, 244, 264, 264, 438, 271, 25, 245, 266, 268, 268, 463, 266, 266, 270, 463, 270, 274, 270, 256, 240, 240, 463, 276, 398, 258, 272, 272, 272, 463, 272, 272, 275, 463, 277, 278, 277, 241, 273, 273, 463, 280, 463, 242, 275, 277, 277, 463, 275, 275, 279, 463, 279, 283, 279, 265, 237, 237, 463, 285, 463, 267, 281, 281, 281, 463, 281, 281, 284, 463, 286, 287, 286, 238, 282, 282, 463, 289, 463, 239, 284, 286, 286, 463, 284, 284, 288, 463, 288, 292, 288, 274, 234, 234, 463, 294, 463, 276, 290, 290, 290, 463, 290, 290, 293, 463, 295, 296, 295, 235, 291, 291, 463, 298, 463, 236, 293, 295, 295, 463, 293, 293, 297, 463, 297, 301, 297, 283, 231, 231, 463, 303, 463, 285, 299, 299, 299, 463, 299, 299, 302, 463, 304, 305, 304, 232, 300, 300, 463, 307, 463, 233, 302, 304, 304, 463, 302, 302, 306, 463, 306, 310, 306, 292, 228, 228, 463, 312, 463, 294, 308, 308, 308, 463, 308, 308, 311, 463, 313, 314, 313, 229, 309, 309, 463, 316, 463, 230, 311, 313, 313, 463, 311, 311, 315, 463, 315, 319, 315, 301, 225, 225, 463, 321, 463, 303, 317, 317, 317, 463, 317, 317, 320, 463, 322, 323, 322, 226, 318, 318, 463, 325, 463, 227, 320, 322, 322, 463, 320, 320, 324, 463, 324, 328, 324, 310, 222, 222, 463, 330, 463, 312, 326, 326, 326, 463, 326, 326, 329, 463, 331, 332, 331, 223, 327, 327, 463, 334, 463, 224, 329, 331, 331, 463, 329, 329, 333, 463, 333, 337, 333, 319, 219, 219, 463, 339, 463, 321, 335, 335, 335, 463, 335, 335, 338, 463, 340, 341, 340, 220, 336, 336, 463, 343, 463, 221, 338, 340, 340, 463, 338, 338, 342, 463, 342, 346, 342, 328, 216, 216, 463, 348, 463, 330, 344, 344, 344, 463, 344, 344, 347, 463, 349, 350, 349, 217, 345, 345, 463, 352, 463, 218, 347, 349, 349, 463, 347, 347, 351, 463, 351, 355, 351, 337, 213, 213, 463, 357, 463, 339, 353, 353, 353, 463, 353, 353, 356, 463, 358, 359, 358, 214, 354, 354, 463, 361, 463, 215, 356, 358, 358, 463, 356, 356, 360, 463, 360, 364, 360, 346, 210, 210, 463, 366, 463, 348, 362, 362, 362, 463, 362, 362, 365, 463, 367, 368, 367, 211, 363, 363, 463, 370, 463, 212, 365, 367, 367, 463, 365, 365, 369, 463, 369, 373, 369, 355, 207, 207, 463, 375, 463, 357, 371, 371, 371, 463, 371, 371, 374, 463, 376, 377, 376, 208, 372, 372, 463, 379, 463, 209, 374, 376, 376, 463, 374, 374, 378, 463, 378, 382, 378, 364, 204, 204, 463, 384, 463, 366, 380, 380, 380, 463, 380, 380, 383, 463, 385, 386, 385, 205, 381, 381, 463, 388, 463, 206, 383, 385, 385, 463, 383, 383, 387, 463, 387, 391, 387, 373, 201, 201, 463, 393, 463, 375, 389, 389, 389, 463, 389, 389, 392, 463, 394, 395, 394, 202, 390, 390, 463, 397, 463, 203, 392, 394, 394, 463, 392, 392, 396, 463, 396, 404, 396, 382, 198, 198, 463, 406, 463, 384, 398, 398, 398, 463, 398, 398, 405, 463, 407, 408, 407, 199, 399, 399, 463, 410, 463, 200, 405, 407, 407, 463, 405, 405, 409, 463, 409, 463, 409, 391, 421, 463, 463, 402, 463, 393, 463, 402, 402, 463, 463, 196, 463, 415, 427, 463, 463, 196, 463, 463, 403, 196, 196, 463, 463, 426, 428, 426, 435, 402, 402, 14, 14, 463, 197, 463, 196, 196, 24, 24, 463, 423, 463, 436, 463, 463, 403, 439, 15, 463, 415, 415, 441, 463, 463, 20, 463, 426, 426, 20, 20, 463, 438, 72, 463, 436, 442, 404, 463, 423, 463, 463, 21, 423, 423, 463, 463, 438, 463, 438, 463, 463, 463, 463, 463, 463, 406}; /* Check vector for state transitions. */ static const unsigned short athlon_fp_check[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 463, 2, 411, 411, 411, 411, 411, 411, 411, 411, 411, 411, 411, 411, 411, 411, 411, 411, 411, 411, 411, 463, 411, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 416, 446, 1, 416, 416, 463, 3, 416, 463, 416, 416, 416, 416, 463, 4, 416, 463, 416, 416, 416, 416, 460, 1, 5, 460, 460, 3, 6, 460, 463, 460, 460, 460, 460, 4, 7, 460, 463, 460, 460, 460, 460, 15, 5, 8, 15, 15, 6, 9, 15, 463, 15, 15, 15, 15, 7, 463, 15, 419, 15, 15, 419, 15, 10, 8, 419, 419, 419, 9, 419, 419, 419, 463, 13, 14, 419, 419, 419, 419, 424, 463, 17, 424, 10, 463, 20, 424, 424, 424, 463, 424, 424, 424, 13, 14, 463, 424, 424, 424, 424, 429, 17, 463, 429, 429, 20, 463, 429, 463, 429, 429, 429, 429, 23, 463, 429, 463, 429, 429, 434, 429, 463, 434, 434, 24, 463, 434, 463, 434, 434, 434, 434, 26, 23, 434, 463, 434, 434, 447, 434, 463, 447, 447, 28, 24, 447, 463, 447, 447, 447, 447, 31, 26, 447, 463, 447, 447, 452, 447, 463, 452, 452, 34, 28, 452, 463, 452, 452, 452, 452, 37, 31, 452, 463, 452, 452, 457, 452, 40, 457, 457, 463, 34, 457, 463, 457, 457, 457, 457, 463, 37, 457, 462, 457, 457, 462, 457, 43, 40, 462, 462, 462, 463, 462, 462, 462, 463, 46, 49, 462, 462, 462, 462, 11, 52, 55, 11, 43, 463, 58, 11, 11, 11, 61, 11, 11, 11, 46, 49, 21, 11, 11, 21, 11, 52, 55, 21, 21, 21, 58, 21, 21, 21, 61, 463, 400, 21, 21, 400, 21, 463, 64, 400, 400, 400, 67, 400, 400, 400, 463, 463, 412, 400, 400, 412, 400, 463, 70, 412, 412, 412, 64, 412, 412, 412, 67, 463, 432, 412, 412, 432, 412, 463, 72, 432, 432, 432, 70, 432, 432, 432, 463, 463, 440, 432, 432, 440, 432, 463, 75, 440, 440, 440, 72, 440, 440, 440, 463, 463, 450, 440, 440, 450, 440, 463, 77, 450, 450, 450, 75, 450, 450, 450, 463, 463, 454, 450, 450, 454, 450, 463, 78, 454, 454, 454, 77, 454, 454, 454, 463, 463, 459, 454, 454, 459, 454, 80, 463, 459, 459, 459, 78, 459, 459, 459, 463, 82, 463, 459, 459, 18, 459, 463, 18, 18, 463, 80, 18, 18, 463, 18, 18, 463, 18, 29, 18, 82, 29, 29, 463, 18, 29, 29, 463, 29, 29, 463, 29, 32, 29, 463, 32, 32, 463, 29, 32, 32, 463, 32, 32, 463, 32, 35, 32, 463, 35, 35, 463, 32, 35, 35, 463, 35, 35, 463, 35, 38, 35, 463, 38, 38, 463, 35, 38, 38, 463, 38, 38, 463, 38, 41, 38, 463, 41, 41, 463, 38, 41, 41, 463, 41, 41, 463, 41, 44, 41, 463, 44, 44, 463, 41, 44, 44, 463, 44, 44, 463, 44, 47, 44, 463, 47, 47, 463, 44, 47, 47, 463, 47, 47, 463, 47, 50, 47, 463, 50, 50, 463, 47, 50, 50, 463, 50, 50, 463, 50, 53, 50, 463, 53, 53, 463, 50, 53, 53, 463, 53, 53, 463, 53, 56, 53, 463, 56, 56, 463, 53, 56, 56, 463, 56, 56, 463, 56, 59, 56, 463, 59, 59, 463, 56, 59, 59, 463, 59, 59, 463, 59, 62, 59, 463, 62, 62, 463, 59, 62, 62, 463, 62, 62, 463, 62, 65, 62, 463, 65, 65, 463, 62, 65, 65, 463, 65, 65, 463, 65, 68, 65, 463, 68, 68, 463, 65, 68, 68, 463, 68, 68, 463, 68, 199, 68, 463, 199, 199, 463, 68, 199, 199, 463, 199, 199, 463, 199, 202, 199, 463, 202, 202, 463, 199, 202, 202, 463, 202, 202, 463, 202, 205, 202, 463, 205, 205, 463, 202, 205, 205, 463, 205, 205, 463, 205, 208, 205, 463, 208, 208, 463, 205, 208, 208, 463, 208, 208, 463, 208, 211, 208, 463, 211, 211, 463, 208, 211, 211, 463, 211, 211, 463, 211, 214, 211, 463, 214, 214, 463, 211, 214, 214, 463, 214, 214, 463, 214, 217, 214, 463, 217, 217, 463, 214, 217, 217, 463, 217, 217, 463, 217, 220, 217, 463, 220, 220, 463, 217, 220, 220, 463, 220, 220, 463, 220, 223, 220, 463, 223, 223, 463, 220, 223, 223, 463, 223, 223, 463, 223, 226, 223, 463, 226, 226, 463, 223, 226, 226, 463, 226, 226, 463, 226, 229, 226, 463, 229, 229, 463, 226, 229, 229, 463, 229, 229, 463, 229, 232, 229, 463, 232, 232, 463, 229, 232, 232, 463, 232, 232, 463, 232, 235, 232, 463, 235, 235, 463, 232, 235, 235, 463, 235, 235, 463, 235, 238, 235, 463, 238, 238, 463, 235, 238, 238, 463, 238, 238, 463, 238, 241, 238, 463, 241, 241, 463, 238, 241, 241, 463, 241, 241, 463, 241, 244, 241, 463, 244, 244, 463, 241, 244, 244, 463, 244, 244, 463, 244, 247, 244, 463, 247, 247, 463, 244, 247, 247, 463, 247, 247, 84, 247, 403, 247, 86, 403, 403, 463, 247, 403, 403, 87, 403, 403, 463, 403, 417, 403, 89, 417, 84, 91, 403, 417, 86, 417, 463, 417, 417, 463, 93, 87, 95, 417, 417, 417, 417, 418, 89, 96, 418, 91, 98, 100, 418, 463, 418, 463, 418, 418, 93, 102, 95, 104, 418, 418, 418, 418, 420, 96, 105, 420, 98, 100, 107, 420, 463, 420, 463, 420, 420, 102, 109, 104, 111, 420, 420, 420, 420, 422, 105, 113, 422, 463, 107, 114, 422, 463, 422, 463, 422, 422, 109, 116, 111, 118, 422, 422, 422, 422, 425, 113, 120, 425, 463, 114, 122, 425, 463, 425, 123, 425, 425, 116, 463, 118, 463, 425, 425, 425, 425, 443, 120, 125, 443, 443, 122, 463, 443, 443, 123, 443, 443, 463, 443, 461, 443, 127, 461, 463, 129, 443, 461, 125, 461, 463, 461, 461, 463, 131, 132, 134, 461, 461, 461, 461, 16, 127, 136, 16, 129, 463, 138, 16, 463, 16, 140, 16, 16, 131, 132, 134, 25, 16, 16, 25, 16, 463, 136, 25, 463, 25, 138, 25, 25, 463, 140, 463, 401, 25, 25, 401, 25, 463, 141, 401, 463, 401, 143, 401, 401, 463, 463, 463, 413, 401, 401, 413, 401, 463, 145, 413, 463, 413, 141, 413, 413, 463, 143, 463, 414, 413, 413, 414, 413, 463, 147, 414, 463, 414, 145, 414, 414, 463, 463, 463, 430, 414, 414, 430, 414, 463, 149, 430, 463, 430, 147, 430, 430, 463, 463, 463, 431, 430, 430, 431, 430, 463, 150, 431, 463, 431, 149, 431, 431, 463, 463, 463, 433, 431, 431, 433, 431, 463, 152, 433, 463, 433, 150, 433, 433, 463, 463, 463, 437, 433, 433, 437, 433, 463, 154, 437, 463, 437, 152, 437, 437, 463, 463, 463, 444, 437, 437, 444, 437, 463, 156, 444, 463, 444, 154, 444, 444, 463, 463, 463, 445, 444, 444, 445, 444, 463, 158, 445, 463, 445, 156, 445, 445, 463, 463, 463, 448, 445, 445, 448, 445, 463, 159, 448, 463, 448, 158, 448, 448, 463, 463, 463, 449, 448, 448, 449, 448, 463, 161, 449, 463, 449, 159, 449, 449, 463, 463, 463, 451, 449, 449, 451, 449, 463, 163, 451, 463, 451, 161, 451, 451, 463, 463, 463, 453, 451, 451, 453, 451, 463, 165, 453, 463, 453, 163, 453, 453, 463, 463, 463, 455, 453, 453, 455, 453, 463, 167, 455, 463, 455, 165, 455, 455, 463, 463, 463, 456, 455, 455, 456, 455, 463, 168, 456, 463, 456, 167, 456, 456, 463, 463, 463, 458, 456, 456, 458, 456, 463, 463, 458, 170, 458, 168, 458, 458, 463, 172, 174, 12, 458, 458, 12, 458, 463, 19, 12, 12, 19, 19, 12, 170, 12, 176, 463, 19, 19, 172, 174, 12, 19, 22, 463, 463, 22, 19, 463, 177, 22, 22, 179, 181, 22, 176, 22, 27, 463, 463, 27, 183, 463, 22, 27, 27, 185, 186, 27, 177, 27, 30, 179, 181, 30, 188, 463, 27, 30, 30, 190, 183, 30, 192, 30, 33, 185, 186, 33, 194, 463, 30, 33, 33, 195, 188, 33, 196, 33, 36, 190, 463, 36, 192, 463, 33, 36, 36, 198, 194, 36, 201, 36, 39, 195, 463, 39, 196, 463, 36, 39, 39, 204, 207, 39, 210, 39, 42, 198, 463, 42, 201, 463, 39, 42, 42, 213, 216, 42, 219, 42, 45, 204, 207, 45, 210, 463, 42, 45, 45, 222, 225, 45, 228, 45, 48, 213, 216, 48, 219, 463, 45, 48, 48, 231, 234, 48, 237, 48, 51, 222, 225, 51, 228, 463, 48, 51, 51, 240, 243, 51, 246, 51, 54, 231, 234, 54, 237, 463, 51, 54, 54, 249, 251, 54, 253, 54, 57, 240, 243, 57, 246, 463, 54, 57, 57, 255, 257, 57, 259, 57, 60, 249, 251, 60, 253, 463, 57, 60, 60, 261, 263, 60, 264, 60, 63, 255, 257, 63, 259, 463, 60, 63, 63, 266, 268, 63, 270, 63, 66, 261, 263, 66, 264, 463, 63, 66, 66, 463, 463, 66, 463, 66, 69, 266, 268, 69, 270, 463, 66, 69, 69, 463, 463, 69, 71, 69, 463, 71, 71, 272, 73, 463, 69, 73, 71, 71, 463, 73, 73, 71, 463, 73, 74, 73, 71, 74, 74, 273, 76, 272, 73, 76, 74, 74, 463, 76, 76, 74, 463, 76, 79, 76, 74, 79, 79, 275, 81, 273, 76, 81, 79, 79, 463, 81, 81, 79, 463, 81, 83, 81, 79, 83, 83, 277, 85, 275, 81, 85, 83, 83, 463, 85, 85, 83, 463, 85, 88, 85, 83, 88, 88, 279, 90, 277, 85, 90, 88, 88, 463, 90, 90, 88, 463, 90, 92, 90, 88, 92, 92, 281, 94, 279, 90, 94, 92, 92, 463, 94, 94, 92, 463, 94, 97, 94, 92, 97, 97, 282, 99, 281, 94, 99, 97, 97, 463, 99, 99, 97, 463, 99, 101, 99, 97, 101, 101, 284, 103, 282, 99, 103, 101, 101, 463, 103, 103, 101, 463, 103, 106, 103, 101, 106, 106, 286, 108, 284, 103, 108, 106, 106, 463, 108, 108, 106, 463, 108, 110, 108, 106, 110, 110, 288, 112, 286, 108, 112, 110, 110, 463, 112, 112, 110, 463, 112, 115, 112, 110, 115, 115, 290, 117, 288, 112, 117, 115, 115, 463, 117, 117, 115, 463, 117, 119, 117, 115, 119, 119, 291, 121, 290, 117, 121, 119, 119, 463, 121, 121, 119, 463, 121, 124, 121, 119, 124, 124, 293, 126, 291, 121, 126, 124, 124, 463, 126, 126, 124, 463, 126, 128, 126, 124, 128, 128, 295, 130, 293, 126, 130, 128, 128, 463, 130, 130, 128, 463, 130, 133, 130, 128, 133, 133, 297, 135, 295, 130, 135, 133, 133, 463, 135, 135, 133, 463, 135, 137, 135, 133, 137, 137, 299, 139, 297, 135, 139, 137, 137, 463, 139, 139, 137, 463, 139, 142, 139, 137, 142, 142, 300, 144, 299, 139, 144, 142, 142, 463, 144, 144, 142, 463, 144, 146, 144, 142, 146, 146, 302, 148, 300, 144, 148, 146, 146, 463, 148, 148, 146, 463, 148, 151, 148, 146, 151, 151, 304, 153, 302, 148, 153, 151, 151, 463, 153, 153, 151, 463, 153, 155, 153, 151, 155, 155, 306, 157, 304, 153, 157, 155, 155, 463, 157, 157, 155, 463, 157, 160, 157, 155, 160, 160, 308, 162, 306, 157, 162, 160, 160, 463, 162, 162, 160, 463, 162, 164, 162, 160, 164, 164, 309, 166, 308, 162, 166, 164, 164, 463, 166, 166, 164, 463, 166, 169, 166, 164, 169, 169, 311, 171, 309, 166, 171, 169, 169, 463, 171, 171, 169, 463, 171, 173, 171, 169, 173, 173, 313, 175, 311, 171, 175, 173, 173, 463, 175, 175, 173, 463, 175, 178, 175, 173, 178, 178, 315, 180, 313, 175, 180, 178, 178, 463, 180, 180, 178, 463, 180, 182, 180, 178, 182, 182, 317, 184, 315, 180, 184, 182, 182, 463, 184, 184, 182, 463, 184, 187, 184, 182, 187, 187, 318, 189, 317, 184, 189, 187, 187, 463, 189, 189, 187, 320, 189, 191, 189, 187, 191, 191, 322, 193, 318, 189, 193, 191, 191, 463, 193, 193, 191, 324, 193, 320, 193, 191, 197, 463, 463, 197, 322, 193, 326, 197, 197, 327, 329, 197, 331, 197, 200, 324, 463, 200, 333, 463, 197, 200, 200, 335, 336, 200, 326, 200, 203, 327, 329, 203, 331, 463, 200, 203, 203, 338, 333, 203, 340, 203, 206, 335, 336, 206, 342, 463, 203, 206, 206, 344, 345, 206, 347, 206, 209, 338, 463, 209, 340, 463, 206, 209, 209, 349, 342, 209, 351, 209, 212, 344, 345, 212, 347, 463, 209, 212, 212, 353, 354, 212, 356, 212, 215, 349, 463, 215, 351, 463, 212, 215, 215, 358, 360, 215, 362, 215, 218, 353, 354, 218, 356, 463, 215, 218, 218, 363, 365, 218, 367, 218, 221, 358, 360, 221, 362, 463, 218, 221, 221, 369, 371, 221, 372, 221, 224, 363, 365, 224, 367, 463, 221, 224, 224, 374, 376, 224, 378, 224, 227, 369, 371, 227, 372, 463, 224, 227, 227, 380, 381, 227, 383, 227, 230, 374, 376, 230, 378, 463, 227, 230, 230, 385, 387, 230, 389, 230, 233, 380, 381, 233, 383, 463, 230, 233, 233, 390, 392, 233, 394, 233, 236, 385, 387, 236, 389, 463, 233, 236, 236, 396, 398, 236, 399, 236, 239, 390, 392, 239, 394, 463, 236, 239, 239, 402, 405, 239, 407, 239, 242, 396, 398, 242, 399, 463, 239, 242, 242, 463, 463, 242, 463, 242, 245, 402, 405, 245, 407, 463, 242, 245, 245, 463, 463, 245, 248, 245, 463, 248, 248, 409, 250, 463, 245, 250, 248, 248, 463, 250, 250, 248, 463, 250, 252, 250, 248, 252, 252, 415, 254, 409, 250, 254, 252, 252, 463, 254, 254, 252, 463, 254, 256, 254, 252, 256, 256, 423, 258, 415, 254, 258, 256, 256, 463, 258, 258, 256, 463, 258, 260, 258, 256, 260, 260, 426, 262, 423, 258, 262, 260, 260, 463, 262, 262, 260, 463, 262, 265, 262, 260, 265, 265, 436, 267, 426, 262, 267, 265, 265, 463, 267, 267, 265, 463, 267, 269, 267, 265, 269, 269, 438, 271, 436, 267, 271, 269, 269, 463, 271, 271, 269, 463, 271, 274, 271, 269, 274, 274, 463, 276, 438, 271, 276, 274, 274, 463, 276, 276, 274, 463, 276, 278, 276, 274, 278, 278, 463, 280, 463, 276, 280, 278, 278, 463, 280, 280, 278, 463, 280, 283, 280, 278, 283, 283, 463, 285, 463, 280, 285, 283, 283, 463, 285, 285, 283, 463, 285, 287, 285, 283, 287, 287, 463, 289, 463, 285, 289, 287, 287, 463, 289, 289, 287, 463, 289, 292, 289, 287, 292, 292, 463, 294, 463, 289, 294, 292, 292, 463, 294, 294, 292, 463, 294, 296, 294, 292, 296, 296, 463, 298, 463, 294, 298, 296, 296, 463, 298, 298, 296, 463, 298, 301, 298, 296, 301, 301, 463, 303, 463, 298, 303, 301, 301, 463, 303, 303, 301, 463, 303, 305, 303, 301, 305, 305, 463, 307, 463, 303, 307, 305, 305, 463, 307, 307, 305, 463, 307, 310, 307, 305, 310, 310, 463, 312, 463, 307, 312, 310, 310, 463, 312, 312, 310, 463, 312, 314, 312, 310, 314, 314, 463, 316, 463, 312, 316, 314, 314, 463, 316, 316, 314, 463, 316, 319, 316, 314, 319, 319, 463, 321, 463, 316, 321, 319, 319, 463, 321, 321, 319, 463, 321, 323, 321, 319, 323, 323, 463, 325, 463, 321, 325, 323, 323, 463, 325, 325, 323, 463, 325, 328, 325, 323, 328, 328, 463, 330, 463, 325, 330, 328, 328, 463, 330, 330, 328, 463, 330, 332, 330, 328, 332, 332, 463, 334, 463, 330, 334, 332, 332, 463, 334, 334, 332, 463, 334, 337, 334, 332, 337, 337, 463, 339, 463, 334, 339, 337, 337, 463, 339, 339, 337, 463, 339, 341, 339, 337, 341, 341, 463, 343, 463, 339, 343, 341, 341, 463, 343, 343, 341, 463, 343, 346, 343, 341, 346, 346, 463, 348, 463, 343, 348, 346, 346, 463, 348, 348, 346, 463, 348, 350, 348, 346, 350, 350, 463, 352, 463, 348, 352, 350, 350, 463, 352, 352, 350, 463, 352, 355, 352, 350, 355, 355, 463, 357, 463, 352, 357, 355, 355, 463, 357, 357, 355, 463, 357, 359, 357, 355, 359, 359, 463, 361, 463, 357, 361, 359, 359, 463, 361, 361, 359, 463, 361, 364, 361, 359, 364, 364, 463, 366, 463, 361, 366, 364, 364, 463, 366, 366, 364, 463, 366, 368, 366, 364, 368, 368, 463, 370, 463, 366, 370, 368, 368, 463, 370, 370, 368, 463, 370, 373, 370, 368, 373, 373, 463, 375, 463, 370, 375, 373, 373, 463, 375, 375, 373, 463, 375, 377, 375, 373, 377, 377, 463, 379, 463, 375, 379, 377, 377, 463, 379, 379, 377, 463, 379, 382, 379, 377, 382, 382, 463, 384, 463, 379, 384, 382, 382, 463, 384, 384, 382, 463, 384, 386, 384, 382, 386, 386, 463, 388, 463, 384, 388, 386, 386, 463, 388, 388, 386, 463, 388, 391, 388, 386, 391, 391, 463, 393, 463, 388, 393, 391, 391, 463, 393, 393, 391, 463, 393, 395, 393, 391, 395, 395, 463, 397, 463, 393, 397, 395, 395, 463, 397, 397, 395, 463, 397, 404, 397, 395, 404, 404, 463, 406, 463, 397, 406, 404, 404, 463, 406, 406, 404, 463, 406, 408, 406, 404, 408, 408, 463, 410, 463, 406, 410, 408, 408, 463, 410, 410, 408, 463, 410, 463, 410, 408, 421, 463, 463, 421, 463, 410, 463, 421, 421, 463, 463, 421, 463, 421, 427, 463, 463, 427, 463, 463, 421, 427, 427, 463, 463, 427, 428, 427, 435, 428, 428, 435, 435, 463, 427, 463, 428, 428, 435, 435, 463, 428, 463, 435, 463, 463, 428, 439, 435, 463, 439, 439, 441, 463, 463, 441, 463, 439, 439, 441, 441, 463, 439, 441, 463, 441, 442, 439, 463, 442, 463, 463, 441, 442, 442, 463, 463, 442, 463, 442, 463, 463, 463, 463, 463, 463, 442}; /* Base vector for state transitions. */ static const unsigned short athlon_fp_base[] = { 0, 84, 21, 88, 96, 105, 109, 117, 126, 130, 145, 295, 1332, 155, 156, 124, 1044, 163, 442, 1338, 167, 311, 1354, 195, 206, 1060, 214, 1368, 225, 456, 1382, 233, 470, 1396, 244, 484, 1410, 252, 498, 1424, 260, 512, 1438, 279, 526, 1452, 289, 540, 1466, 290, 554, 1480, 296, 568, 1494, 297, 582, 1508, 301, 596, 1522, 305, 610, 1536, 333, 624, 1550, 337, 638, 1564, 349, 1576, 365, 1582, 1594, 381, 1600, 397, 413, 1612, 428, 1618, 438, 1630, 888, 1636, 892, 899, 1648, 906, 1654, 909, 1666, 918, 1672, 920, 927, 1684, 930, 1690, 931, 1702, 939, 1708, 941, 948, 1720, 952, 1726, 960, 1738, 962, 1744, 969, 973, 1756, 981, 1762, 983, 1774, 990, 1780, 994, 998, 1792, 1011, 1798, 1025, 1810, 1028, 1816, 1037, 1038, 1828, 1039, 1834, 1046, 1846, 1050, 1852, 1054, 1082, 1864, 1086, 1870, 1098, 1882, 1114, 1888, 1130, 1146, 1900, 1162, 1906, 1178, 1918, 1194, 1924, 1210, 1226, 1936, 1242, 1942, 1258, 1954, 1274, 1960, 1290, 1306, 1972, 1324, 1978, 1330, 1990, 1331, 1996, 1346, 1360, 2008, 1363, 2014, 1364, 2026, 1372, 2032, 1377, 1378, 2044, 1386, 2050, 1391, 2062, 1394, 2068, 1400, 1405, 1408, 2083, 1419, 652, 2097, 1422, 666, 2111, 1433, 680, 2125, 1434, 694, 2139, 1436, 708, 2153, 1447, 722, 2167, 1448, 736, 2181, 1450, 750, 2195, 1461, 764, 2209, 1462, 778, 2223, 1464, 792, 2237, 1475, 806, 2251, 1476, 820, 2265, 1478, 834, 2279, 1489, 848, 2293, 1490, 862, 2307, 1492, 876, 2319, 1503, 2325, 1504, 2337, 1506, 2343, 1517, 2355, 1518, 2361, 1520, 2373, 1531, 2379, 1532, 1534, 2391, 1545, 2397, 1546, 2409, 1548, 2415, 1581, 1599, 2427, 1617, 2433, 1635, 2445, 1653, 2451, 1671, 1689, 2463, 1707, 2469, 1725, 2481, 1743, 2487, 1761, 1779, 2499, 1797, 2505, 1815, 2517, 1833, 2523, 1851, 1869, 2535, 1887, 2541, 1905, 2553, 1923, 2559, 1941, 1959, 2571, 1977, 2577, 1995, 2589, 2013, 2595, 2031, 2049, 2607, 2060, 2613, 2067, 2625, 2078, 2631, 2089, 2092, 2643, 2093, 2649, 2095, 2661, 2101, 2667, 2106, 2107, 2679, 2120, 2685, 2123, 2697, 2129, 2703, 2134, 2135, 2715, 2137, 2721, 2148, 2733, 2151, 2739, 2162, 2163, 2751, 2165, 2757, 2176, 2769, 2177, 2775, 2179, 2190, 2787, 2191, 2793, 2193, 2805, 2204, 2811, 2205, 2207, 2823, 2218, 2829, 2219, 2841, 2221, 2847, 2232, 2233, 2859, 2235, 2865, 2246, 2877, 2247, 2883, 2249, 2260, 2895, 2261, 2901, 2263, 2913, 2274, 2919, 2275, 2277, 327, 1076, 2288, 890, 2931, 2289, 2937, 2291, 2949, 2324, 2955, 42, 343, 1092, 1108, 2342, 82, 904, 925, 140, 946, 2970, 967, 2360, 161, 988, 2378, 2984, 2996, 182, 1124, 1140, 359, 1156, 201, 2998, 2396, 1172, 2414, 3017, 375, 3022, 3036, 1009, 1188, 1204, 63, 220, 1220, 1236, 391, 1252, 239, 1268, 407, 1284, 1300, 258, 1316, 423, 103, 1023, 274}; #if AUTOMATON_STATE_ALTS /* Comb vector for state insn alternatives. */ static const unsigned char athlon_fp_state_alts[] ATTRIBUTE_UNUSED = { 1, 1, 1, 18, 6, 1, 1, 6, 6, 6, 3, 18, 1, 1, 2, 6, 1, 6, 1, 1, 1, 1, 1, 1, 18, 6, 1, 1, 6, 6, 6, 3, 18, 1, 1, 2, 6, 1, 6, 1, 0, 1, 1, 1, 1, 18, 6, 1, 1, 6, 6, 6, 3, 18, 1, 1, 2, 6, 1, 6, 1, 0, 1, 1, 1, 1, 18, 6, 1, 1, 6, 6, 6, 3, 18, 1, 1, 2, 6, 1, 6, 1, 1, 1, 1, 12, 6, 0, 1, 3, 0, 6, 3, 12, 1, 0, 1, 6, 0, 6, 1, 1, 1, 1, 1, 1, 12, 6, 1, 1, 3, 0, 6, 3, 8, 1, 1, 1, 6, 0, 6, 1, 1, 1, 1, 1, 1, 12, 6, 1, 1, 3, 0, 6, 3, 12, 1, 1, 0, 6, 1, 6, 1, 12, 1, 1, 1, 6, 6, 6, 1, 12, 1, 1, 0, 1, 1, 6, 1, 1, 1, 1, 0, 1, 12, 1, 0, 1, 6, 6, 6, 0, 8, 1, 1, 1, 1, 0, 6, 1, 1, 1, 1, 1, 0, 12, 6, 1, 0, 3, 0, 6, 3, 12, 1, 1, 0, 6, 0, 6, 1, 1, 1, 0, 12, 6, 1, 0, 3, 0, 6, 3, 8, 1, 1, 1, 6, 0, 6, 1, 1, 1, 0, 12, 6, 1, 1, 3, 0, 6, 3, 12, 1, 1, 1, 6, 0, 6, 1, 1, 1, 0, 12, 6, 1, 1, 3, 0, 6, 3, 8, 1, 1, 1, 6, 0, 6, 1, 1, 1, 1, 12, 6, 0, 1, 3, 0, 6, 3, 8, 1, 0, 1, 6, 1, 6, 1, 12, 1, 1, 1, 6, 6, 6, 0, 8, 1, 1, 0, 1, 1, 6, 1, 1, 1, 1, 1, 1, 12, 1, 0, 1, 6, 6, 6, 1, 12, 1, 1, 1, 1, 1, 6, 1, 12, 1, 1, 1, 6, 6, 6, 1, 8, 1, 1, 1, 0, 1, 6, 1, 12, 1, 0, 1, 6, 6, 6, 1, 12, 1, 1, 0, 0, 1, 6, 1, 12, 1, 0, 1, 6, 6, 6, 1, 12, 1, 1, 1, 0, 1, 6, 1, 12, 1, 0, 1, 6, 6, 6, 1, 8, 1, 1, 0, 0, 1, 6, 1, 12, 1, 0, 1, 6, 6, 6, 1, 8, 1, 1, 0, 0, 1, 6, 1, 12, 1, 0, 1, 6, 6, 6, 1, 8, 1, 1, 0, 0, 1, 6, 1, 12, 1, 0, 1, 6, 6, 6, 1, 8, 1, 1, 0, 0, 1, 6, 1, 12, 1, 1, 0, 6, 6, 6, 1, 8, 1, 1, 0, 1, 0, 6, 1, 1, 1, 0, 12, 6, 0, 1, 3, 6, 0, 3, 12, 0, 1, 1, 6, 1, 12, 6, 0, 1, 3, 6, 0, 3, 8, 0, 1, 1, 6, 0, 12, 6, 0, 1, 3, 6, 0, 3, 8, 0, 1, 1, 6, 0, 12, 6, 0, 1, 3, 6, 0, 3, 8, 0, 1, 1, 6, 0, 12, 6, 0, 1, 3, 6, 0, 3, 8, 0, 1, 1, 6, 0, 12, 6, 0, 1, 3, 6, 0, 3, 8, 0, 1, 1, 6, 0, 12, 6, 0, 1, 3, 6, 0, 3, 8, 0, 1, 1, 6, 0, 12, 6, 0, 1, 3, 6, 0, 3, 8, 0, 1, 1, 6, 0, 12, 6, 0, 1, 3, 6, 0, 3, 8, 0, 1, 1, 6, 0, 12, 6, 0, 1, 3, 6, 0, 3, 8, 0, 1, 1, 6, 0, 12, 6, 0, 1, 3, 6, 0, 3, 8, 0, 1, 1, 6, 0, 12, 6, 0, 1, 3, 6, 0, 3, 8, 0, 1, 1, 6, 0, 12, 6, 0, 1, 3, 6, 0, 3, 8, 0, 1, 1, 6, 0, 12, 6, 0, 1, 3, 6, 0, 3, 8, 0, 1, 1, 6, 0, 12, 6, 0, 1, 3, 6, 0, 3, 8, 0, 1, 1, 6, 0, 12, 6, 0, 1, 3, 6, 0, 3, 8, 0, 1, 1, 6, 0, 12, 6, 0, 1, 3, 6, 0, 3, 8, 0, 1, 1, 6, 0, 12, 6, 0, 1, 3, 6, 0, 3, 8, 0, 1, 1, 6, 0, 12, 6, 0, 1, 3, 6, 0, 3, 8, 0, 1, 1, 6, 0, 12, 6, 0, 1, 3, 6, 0, 3, 8, 0, 1, 1, 6, 0, 12, 6, 0, 1, 3, 6, 0, 3, 8, 0, 1, 1, 6, 0, 12, 6, 0, 1, 3, 6, 0, 3, 8, 0, 1, 1, 6, 0, 12, 6, 0, 1, 3, 6, 0, 3, 8, 0, 1, 1, 6, 0, 12, 6, 0, 1, 3, 6, 0, 3, 8, 0, 1, 1, 6, 0, 12, 6, 0, 1, 3, 6, 0, 3, 8, 0, 1, 1, 6, 0, 12, 6, 0, 1, 3, 6, 0, 3, 8, 0, 1, 1, 6, 0, 12, 6, 0, 1, 3, 6, 0, 3, 8, 0, 1, 1, 6, 0, 12, 6, 0, 1, 3, 6, 0, 3, 8, 0, 1, 1, 6, 0, 12, 6, 0, 1, 3, 6, 0, 3, 8, 0, 1, 1, 6, 0, 12, 6, 0, 1, 3, 6, 0, 3, 8, 0, 1, 1, 6, 0, 12, 6, 0, 1, 3, 6, 0, 3, 8, 0, 1, 1, 6, 0, 12, 6, 0, 1, 3, 6, 0, 3, 8, 1, 1, 1, 6, 1, 12, 6, 0, 1, 3, 6, 1, 3, 8, 0, 1, 1, 6, 1, 6, 1, 1, 1, 3, 1, 6, 0, 6, 1, 0, 1, 1, 1, 6, 1, 1, 1, 1, 1, 1, 6, 1, 1, 1, 3, 0, 6, 0, 4, 1, 1, 1, 1, 1, 6, 1, 1, 1, 1, 1, 1, 6, 1, 1, 1, 3, 0, 6, 0, 4, 1, 1, 1, 1, 1, 6, 1, 1, 1, 1, 1, 1, 6, 0, 1, 1, 3, 0, 6, 0, 4, 1, 1, 1, 1, 1, 6, 1, 1, 1, 1, 1, 1, 6, 0, 1, 1, 3, 0, 6, 1, 2, 1, 1, 0, 1, 0, 6, 1, 1, 1, 1, 1, 1, 12, 6, 1, 0, 3, 6, 1, 3, 8, 0, 1, 1, 6, 1, 6, 0, 1, 1, 3, 1, 6, 0, 2, 1, 0, 1, 1, 1, 6, 1, 1, 1, 1, 1, 1, 6, 1, 0, 1, 3, 0, 6, 1, 6, 1, 1, 1, 1, 1, 6, 1, 6, 1, 0, 1, 3, 0, 6, 1, 4, 1, 0, 1, 0, 1, 6, 1, 6, 1, 0, 1, 3, 0, 6, 1, 6, 1, 0, 0, 0, 1, 6, 1, 6, 1, 0, 1, 3, 0, 6, 1, 6, 1, 0, 1, 0, 1, 6, 1, 6, 1, 0, 1, 3, 0, 6, 1, 4, 1, 0, 0, 0, 1, 6, 1, 6, 1, 0, 1, 3, 0, 6, 1, 4, 1, 0, 0, 0, 1, 6, 1, 6, 1, 0, 1, 3, 0, 6, 1, 4, 1, 0, 0, 0, 1, 6, 1, 6, 1, 0, 1, 3, 0, 6, 1, 2, 1, 0, 0, 0, 1, 6, 1, 6, 1, 0, 1, 3, 0, 6, 1, 2, 1, 0, 0, 0, 1, 6, 1, 6, 1, 0, 1, 3, 0, 6, 1, 4, 1, 0, 0, 0, 1, 6, 1, 6, 1, 0, 1, 3, 0, 6, 1, 2, 1, 0, 0, 0, 1, 6, 1, 6, 1, 0, 1, 3, 0, 6, 1, 4, 1, 0, 0, 0, 1, 6, 1, 6, 1, 0, 1, 3, 0, 6, 1, 4, 1, 0, 0, 0, 1, 6, 1, 6, 1, 0, 1, 3, 0, 6, 1, 2, 1, 0, 0, 0, 1, 6, 1, 6, 1, 0, 1, 3, 0, 6, 1, 2, 1, 0, 0, 0, 1, 6, 1, 6, 1, 0, 1, 3, 0, 6, 1, 4, 1, 0, 0, 0, 1, 6, 1, 6, 1, 0, 1, 3, 0, 6, 1, 4, 1, 0, 0, 0, 1, 6, 1, 6, 1, 0, 0, 3, 1, 6, 1, 2, 1, 0, 1, 1, 1, 6, 1, 6, 1, 0, 1, 3, 6, 6, 6, 6, 1, 1, 1, 0, 3, 6, 1, 1, 1, 6, 1, 0, 0, 6, 1, 0, 1, 3, 6, 1, 1, 4, 1, 1, 1, 0, 0, 6, 1, 0, 1, 3, 6, 1, 1, 4, 1, 1, 1, 1, 1, 6, 1, 0, 1, 3, 6, 1, 1, 4, 1, 1, 1, 1, 1, 6, 1, 0, 1, 3, 6, 1, 1, 4, 1, 1, 1, 1, 0, 6, 1, 0, 1, 3, 6, 1, 1, 4, 1, 1, 1, 1, 0, 6, 1, 0, 1, 3, 6, 1, 1, 4, 1, 1, 1, 1, 0, 6, 1, 0, 1, 3, 6, 1, 1, 4, 1, 1, 1, 1, 1, 6, 1, 0, 1, 3, 6, 1, 1, 4, 1, 1, 1, 1, 1, 6, 1, 0, 1, 3, 6, 1, 1, 4, 1, 1, 1, 1, 1, 6, 1, 0, 1, 3, 6, 1, 1, 4, 1, 1, 1, 1, 1, 6, 1, 0, 1, 3, 6, 1, 1, 4, 1, 1, 1, 1, 1, 6, 1, 0, 1, 3, 6, 1, 1, 4, 1, 1, 1, 1, 1, 6, 1, 0, 1, 3, 6, 1, 1, 4, 1, 1, 1, 1, 1, 6, 1, 0, 1, 3, 6, 1, 1, 4, 1, 1, 1, 1, 1, 6, 1, 0, 1, 3, 6, 0, 0, 4, 0, 1, 1, 1, 1, 6, 1, 0, 1, 3, 6, 0, 0, 4, 1, 1, 0, 6, 6, 1, 1, 0, 1, 6, 3, 4, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 1, 1, 1, 1, 6, 3, 2, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 1, 1, 1, 1, 6, 3, 4, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 1, 1, 1, 1, 6, 3, 2, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 1, 1, 1, 1, 6, 3, 4, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 1, 1, 1, 1, 6, 3, 2, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 1, 1, 1, 1, 6, 3, 4, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 1, 1, 1, 1, 6, 3, 2, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 1, 1, 1, 1, 6, 3, 4, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 1, 1, 1, 1, 6, 3, 2, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 1, 1, 1, 1, 6, 3, 4, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 1, 1, 1, 1, 6, 3, 2, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 1, 1, 1, 1, 6, 3, 4, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 1, 1, 1, 1, 6, 3, 2, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 1, 1, 1, 1, 6, 3, 4, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 1, 1, 1, 1, 6, 3, 2, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 1, 1, 1, 1, 6, 3, 4, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 1, 1, 1, 1, 6, 3, 2, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 1, 1, 1, 1, 6, 3, 4, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 1, 1, 1, 1, 6, 3, 2, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 1, 1, 1, 1, 6, 3, 4, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 1, 1, 1, 1, 6, 3, 2, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 1, 1, 1, 1, 6, 3, 4, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 1, 1, 1, 1, 6, 3, 2, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 1, 1, 1, 1, 6, 3, 4, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 1, 1, 1, 1, 6, 3, 2, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 1, 1, 1, 1, 6, 3, 4, 0, 3, 6, 6, 1, 2, 1, 1, 1, 6, 6, 1, 1, 1, 1, 6, 3, 2, 0, 3, 6, 6, 1, 2, 1, 1, 1, 1, 0, 0, 6, 1, 1, 1, 3, 6, 1, 1, 4, 1, 1, 1, 1, 0, 6, 1, 0, 1, 3, 6, 1, 1, 4, 1, 1, 1, 1, 1, 6, 1, 0, 1, 3, 6, 1, 1, 4, 1, 1, 1, 1, 1, 6, 1, 0, 1, 3, 6, 1, 1, 4, 1, 1, 1, 1, 0, 6, 1, 0, 1, 3, 6, 1, 1, 4, 1, 1, 1, 1, 1, 6, 1, 0, 1, 3, 6, 1, 1, 4, 1, 1, 1, 1, 0, 6, 1, 0, 1, 3, 6, 1, 1, 4, 1, 1, 1, 1, 1, 6, 1, 0, 1, 3, 6, 1, 1, 4, 1, 1, 1, 1, 1, 6, 1, 0, 1, 3, 6, 1, 1, 4, 1, 1, 1, 1, 1, 6, 1, 0, 1, 3, 6, 1, 1, 4, 1, 1, 1, 1, 1, 6, 1, 0, 1, 3, 6, 1, 1, 4, 1, 1, 1, 1, 1, 6, 1, 0, 1, 3, 6, 1, 1, 4, 1, 1, 1, 1, 1, 6, 1, 0, 1, 3, 6, 1, 1, 4, 1, 1, 1, 1, 1, 6, 1, 0, 1, 3, 6, 1, 1, 4, 1, 1, 1, 1, 1, 6, 1, 0, 1, 3, 6, 1, 1, 4, 1, 1, 1, 1, 1, 6, 1, 0, 1, 3, 6, 0, 0, 4, 0, 1, 1, 1, 1, 6, 1, 0, 1, 3, 6, 0, 0, 4, 1, 1, 0, 6, 6, 1, 1, 0, 1, 6, 3, 4, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 1, 1, 1, 1, 6, 3, 2, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 1, 1, 1, 1, 6, 3, 4, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 1, 1, 1, 1, 6, 3, 2, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 1, 1, 1, 1, 6, 3, 4, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 1, 1, 1, 1, 6, 3, 2, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 0, 1, 1, 1, 6, 3, 4, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 0, 1, 0, 1, 6, 3, 2, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 0, 1, 0, 1, 6, 3, 4, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 0, 1, 0, 1, 6, 3, 2, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 0, 1, 0, 1, 6, 3, 4, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 0, 1, 0, 1, 6, 3, 2, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 0, 1, 0, 1, 6, 3, 4, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 0, 1, 0, 1, 6, 3, 2, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 0, 1, 0, 1, 6, 3, 4, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 0, 1, 0, 1, 6, 3, 2, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 0, 1, 0, 1, 6, 3, 4, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 0, 1, 0, 1, 6, 3, 2, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 0, 1, 0, 1, 6, 3, 4, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 0, 1, 0, 1, 6, 3, 2, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 0, 1, 0, 1, 6, 3, 4, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 0, 1, 0, 1, 6, 3, 2, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 0, 1, 0, 1, 6, 3, 4, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 0, 1, 0, 1, 6, 3, 2, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 0, 1, 0, 1, 6, 3, 4, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 0, 1, 0, 1, 6, 3, 2, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 0, 1, 0, 1, 6, 3, 4, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 0, 1, 0, 1, 6, 3, 2, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 0, 1, 0, 1, 6, 3, 4, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 0, 1, 0, 1, 6, 3, 2, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 0, 1, 0, 1, 6, 3, 4, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 0, 1, 0, 1, 6, 3, 2, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 0, 1, 0, 1, 6, 3, 4, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 0, 1, 0, 1, 6, 3, 2, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 0, 1, 0, 1, 6, 3, 4, 0, 3, 6, 6, 0, 2, 1, 1, 1, 6, 6, 0, 1, 0, 1, 6, 3, 2, 0, 3, 6, 6, 0, 2, 0, 1, 1, 1, 0, 0, 6, 0, 1, 0, 3, 6, 0, 0, 4, 0, 1, 1, 0, 0, 6, 0, 0, 1, 3, 6, 0, 0, 2, 1, 1, 1, 6, 6, 6, 6, 0, 1, 0, 3, 4, 3, 4, 0, 6, 0, 6, 0, 0, 1, 1, 1, 0, 6, 6, 1, 0, 0, 6, 0, 3, 2, 3, 6, 0, 6, 4, 0, 1, 1, 1, 0, 6, 0, 0, 1, 3, 6, 0, 0, 2, 0, 1, 0, 0, 0, 0, 0, 0, 1}; /* Check vector for state insn alternatives. */ static const unsigned short athlon_fp_check_state_alts[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 463, 2, 411, 411, 411, 411, 411, 411, 411, 411, 411, 411, 411, 411, 411, 411, 411, 411, 411, 411, 411, 463, 411, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 446, 416, 446, 1, 416, 416, 463, 3, 416, 463, 416, 416, 416, 416, 463, 4, 416, 463, 416, 416, 416, 416, 460, 1, 5, 460, 460, 3, 6, 460, 463, 460, 460, 460, 460, 4, 7, 460, 463, 460, 460, 460, 460, 15, 5, 8, 15, 15, 6, 9, 15, 463, 15, 15, 15, 15, 7, 463, 15, 419, 15, 15, 419, 15, 10, 8, 419, 419, 419, 9, 419, 419, 419, 463, 13, 14, 419, 419, 419, 419, 424, 463, 17, 424, 10, 463, 20, 424, 424, 424, 463, 424, 424, 424, 13, 14, 463, 424, 424, 424, 424, 429, 17, 463, 429, 429, 20, 463, 429, 463, 429, 429, 429, 429, 23, 463, 429, 463, 429, 429, 434, 429, 463, 434, 434, 24, 463, 434, 463, 434, 434, 434, 434, 26, 23, 434, 463, 434, 434, 447, 434, 463, 447, 447, 28, 24, 447, 463, 447, 447, 447, 447, 31, 26, 447, 463, 447, 447, 452, 447, 463, 452, 452, 34, 28, 452, 463, 452, 452, 452, 452, 37, 31, 452, 463, 452, 452, 457, 452, 40, 457, 457, 463, 34, 457, 463, 457, 457, 457, 457, 463, 37, 457, 462, 457, 457, 462, 457, 43, 40, 462, 462, 462, 463, 462, 462, 462, 463, 46, 49, 462, 462, 462, 462, 11, 52, 55, 11, 43, 463, 58, 11, 11, 11, 61, 11, 11, 11, 46, 49, 21, 11, 11, 21, 11, 52, 55, 21, 21, 21, 58, 21, 21, 21, 61, 463, 400, 21, 21, 400, 21, 463, 64, 400, 400, 400, 67, 400, 400, 400, 463, 463, 412, 400, 400, 412, 400, 463, 70, 412, 412, 412, 64, 412, 412, 412, 67, 463, 432, 412, 412, 432, 412, 463, 72, 432, 432, 432, 70, 432, 432, 432, 463, 463, 440, 432, 432, 440, 432, 463, 75, 440, 440, 440, 72, 440, 440, 440, 463, 463, 450, 440, 440, 450, 440, 463, 77, 450, 450, 450, 75, 450, 450, 450, 463, 463, 454, 450, 450, 454, 450, 463, 78, 454, 454, 454, 77, 454, 454, 454, 463, 463, 459, 454, 454, 459, 454, 80, 463, 459, 459, 459, 78, 459, 459, 459, 463, 82, 463, 459, 459, 18, 459, 463, 18, 18, 463, 80, 18, 18, 463, 18, 18, 463, 18, 29, 18, 82, 29, 29, 463, 18, 29, 29, 463, 29, 29, 463, 29, 32, 29, 463, 32, 32, 463, 29, 32, 32, 463, 32, 32, 463, 32, 35, 32, 463, 35, 35, 463, 32, 35, 35, 463, 35, 35, 463, 35, 38, 35, 463, 38, 38, 463, 35, 38, 38, 463, 38, 38, 463, 38, 41, 38, 463, 41, 41, 463, 38, 41, 41, 463, 41, 41, 463, 41, 44, 41, 463, 44, 44, 463, 41, 44, 44, 463, 44, 44, 463, 44, 47, 44, 463, 47, 47, 463, 44, 47, 47, 463, 47, 47, 463, 47, 50, 47, 463, 50, 50, 463, 47, 50, 50, 463, 50, 50, 463, 50, 53, 50, 463, 53, 53, 463, 50, 53, 53, 463, 53, 53, 463, 53, 56, 53, 463, 56, 56, 463, 53, 56, 56, 463, 56, 56, 463, 56, 59, 56, 463, 59, 59, 463, 56, 59, 59, 463, 59, 59, 463, 59, 62, 59, 463, 62, 62, 463, 59, 62, 62, 463, 62, 62, 463, 62, 65, 62, 463, 65, 65, 463, 62, 65, 65, 463, 65, 65, 463, 65, 68, 65, 463, 68, 68, 463, 65, 68, 68, 463, 68, 68, 463, 68, 199, 68, 463, 199, 199, 463, 68, 199, 199, 463, 199, 199, 463, 199, 202, 199, 463, 202, 202, 463, 199, 202, 202, 463, 202, 202, 463, 202, 205, 202, 463, 205, 205, 463, 202, 205, 205, 463, 205, 205, 463, 205, 208, 205, 463, 208, 208, 463, 205, 208, 208, 463, 208, 208, 463, 208, 211, 208, 463, 211, 211, 463, 208, 211, 211, 463, 211, 211, 463, 211, 214, 211, 463, 214, 214, 463, 211, 214, 214, 463, 214, 214, 463, 214, 217, 214, 463, 217, 217, 463, 214, 217, 217, 463, 217, 217, 463, 217, 220, 217, 463, 220, 220, 463, 217, 220, 220, 463, 220, 220, 463, 220, 223, 220, 463, 223, 223, 463, 220, 223, 223, 463, 223, 223, 463, 223, 226, 223, 463, 226, 226, 463, 223, 226, 226, 463, 226, 226, 463, 226, 229, 226, 463, 229, 229, 463, 226, 229, 229, 463, 229, 229, 463, 229, 232, 229, 463, 232, 232, 463, 229, 232, 232, 463, 232, 232, 463, 232, 235, 232, 463, 235, 235, 463, 232, 235, 235, 463, 235, 235, 463, 235, 238, 235, 463, 238, 238, 463, 235, 238, 238, 463, 238, 238, 463, 238, 241, 238, 463, 241, 241, 463, 238, 241, 241, 463, 241, 241, 463, 241, 244, 241, 463, 244, 244, 463, 241, 244, 244, 463, 244, 244, 463, 244, 247, 244, 463, 247, 247, 463, 244, 247, 247, 463, 247, 247, 84, 247, 403, 247, 86, 403, 403, 463, 247, 403, 403, 87, 403, 403, 463, 403, 417, 403, 89, 417, 84, 91, 403, 417, 86, 417, 463, 417, 417, 463, 93, 87, 95, 417, 417, 417, 417, 418, 89, 96, 418, 91, 98, 100, 418, 463, 418, 463, 418, 418, 93, 102, 95, 104, 418, 418, 418, 418, 420, 96, 105, 420, 98, 100, 107, 420, 463, 420, 463, 420, 420, 102, 109, 104, 111, 420, 420, 420, 420, 422, 105, 113, 422, 463, 107, 114, 422, 463, 422, 463, 422, 422, 109, 116, 111, 118, 422, 422, 422, 422, 425, 113, 120, 425, 463, 114, 122, 425, 463, 425, 123, 425, 425, 116, 463, 118, 463, 425, 425, 425, 425, 443, 120, 125, 443, 443, 122, 463, 443, 443, 123, 443, 443, 463, 443, 461, 443, 127, 461, 463, 129, 443, 461, 125, 461, 463, 461, 461, 463, 131, 132, 134, 461, 461, 461, 461, 16, 127, 136, 16, 129, 463, 138, 16, 463, 16, 140, 16, 16, 131, 132, 134, 25, 16, 16, 25, 16, 463, 136, 25, 463, 25, 138, 25, 25, 463, 140, 463, 401, 25, 25, 401, 25, 463, 141, 401, 463, 401, 143, 401, 401, 463, 463, 463, 413, 401, 401, 413, 401, 463, 145, 413, 463, 413, 141, 413, 413, 463, 143, 463, 414, 413, 413, 414, 413, 463, 147, 414, 463, 414, 145, 414, 414, 463, 463, 463, 430, 414, 414, 430, 414, 463, 149, 430, 463, 430, 147, 430, 430, 463, 463, 463, 431, 430, 430, 431, 430, 463, 150, 431, 463, 431, 149, 431, 431, 463, 463, 463, 433, 431, 431, 433, 431, 463, 152, 433, 463, 433, 150, 433, 433, 463, 463, 463, 437, 433, 433, 437, 433, 463, 154, 437, 463, 437, 152, 437, 437, 463, 463, 463, 444, 437, 437, 444, 437, 463, 156, 444, 463, 444, 154, 444, 444, 463, 463, 463, 445, 444, 444, 445, 444, 463, 158, 445, 463, 445, 156, 445, 445, 463, 463, 463, 448, 445, 445, 448, 445, 463, 159, 448, 463, 448, 158, 448, 448, 463, 463, 463, 449, 448, 448, 449, 448, 463, 161, 449, 463, 449, 159, 449, 449, 463, 463, 463, 451, 449, 449, 451, 449, 463, 163, 451, 463, 451, 161, 451, 451, 463, 463, 463, 453, 451, 451, 453, 451, 463, 165, 453, 463, 453, 163, 453, 453, 463, 463, 463, 455, 453, 453, 455, 453, 463, 167, 455, 463, 455, 165, 455, 455, 463, 463, 463, 456, 455, 455, 456, 455, 463, 168, 456, 463, 456, 167, 456, 456, 463, 463, 463, 458, 456, 456, 458, 456, 463, 463, 458, 170, 458, 168, 458, 458, 463, 172, 174, 12, 458, 458, 12, 458, 463, 19, 12, 12, 19, 19, 12, 170, 12, 176, 463, 19, 19, 172, 174, 12, 19, 22, 463, 463, 22, 19, 463, 177, 22, 22, 179, 181, 22, 176, 22, 27, 463, 463, 27, 183, 463, 22, 27, 27, 185, 186, 27, 177, 27, 30, 179, 181, 30, 188, 463, 27, 30, 30, 190, 183, 30, 192, 30, 33, 185, 186, 33, 194, 463, 30, 33, 33, 195, 188, 33, 196, 33, 36, 190, 463, 36, 192, 463, 33, 36, 36, 198, 194, 36, 201, 36, 39, 195, 463, 39, 196, 463, 36, 39, 39, 204, 207, 39, 210, 39, 42, 198, 463, 42, 201, 463, 39, 42, 42, 213, 216, 42, 219, 42, 45, 204, 207, 45, 210, 463, 42, 45, 45, 222, 225, 45, 228, 45, 48, 213, 216, 48, 219, 463, 45, 48, 48, 231, 234, 48, 237, 48, 51, 222, 225, 51, 228, 463, 48, 51, 51, 240, 243, 51, 246, 51, 54, 231, 234, 54, 237, 463, 51, 54, 54, 249, 251, 54, 253, 54, 57, 240, 243, 57, 246, 463, 54, 57, 57, 255, 257, 57, 259, 57, 60, 249, 251, 60, 253, 463, 57, 60, 60, 261, 263, 60, 264, 60, 63, 255, 257, 63, 259, 463, 60, 63, 63, 266, 268, 63, 270, 63, 66, 261, 263, 66, 264, 463, 63, 66, 66, 463, 463, 66, 463, 66, 69, 266, 268, 69, 270, 463, 66, 69, 69, 463, 463, 69, 71, 69, 463, 71, 71, 272, 73, 463, 69, 73, 71, 71, 463, 73, 73, 71, 463, 73, 74, 73, 71, 74, 74, 273, 76, 272, 73, 76, 74, 74, 463, 76, 76, 74, 463, 76, 79, 76, 74, 79, 79, 275, 81, 273, 76, 81, 79, 79, 463, 81, 81, 79, 463, 81, 83, 81, 79, 83, 83, 277, 85, 275, 81, 85, 83, 83, 463, 85, 85, 83, 463, 85, 88, 85, 83, 88, 88, 279, 90, 277, 85, 90, 88, 88, 463, 90, 90, 88, 463, 90, 92, 90, 88, 92, 92, 281, 94, 279, 90, 94, 92, 92, 463, 94, 94, 92, 463, 94, 97, 94, 92, 97, 97, 282, 99, 281, 94, 99, 97, 97, 463, 99, 99, 97, 463, 99, 101, 99, 97, 101, 101, 284, 103, 282, 99, 103, 101, 101, 463, 103, 103, 101, 463, 103, 106, 103, 101, 106, 106, 286, 108, 284, 103, 108, 106, 106, 463, 108, 108, 106, 463, 108, 110, 108, 106, 110, 110, 288, 112, 286, 108, 112, 110, 110, 463, 112, 112, 110, 463, 112, 115, 112, 110, 115, 115, 290, 117, 288, 112, 117, 115, 115, 463, 117, 117, 115, 463, 117, 119, 117, 115, 119, 119, 291, 121, 290, 117, 121, 119, 119, 463, 121, 121, 119, 463, 121, 124, 121, 119, 124, 124, 293, 126, 291, 121, 126, 124, 124, 463, 126, 126, 124, 463, 126, 128, 126, 124, 128, 128, 295, 130, 293, 126, 130, 128, 128, 463, 130, 130, 128, 463, 130, 133, 130, 128, 133, 133, 297, 135, 295, 130, 135, 133, 133, 463, 135, 135, 133, 463, 135, 137, 135, 133, 137, 137, 299, 139, 297, 135, 139, 137, 137, 463, 139, 139, 137, 463, 139, 142, 139, 137, 142, 142, 300, 144, 299, 139, 144, 142, 142, 463, 144, 144, 142, 463, 144, 146, 144, 142, 146, 146, 302, 148, 300, 144, 148, 146, 146, 463, 148, 148, 146, 463, 148, 151, 148, 146, 151, 151, 304, 153, 302, 148, 153, 151, 151, 463, 153, 153, 151, 463, 153, 155, 153, 151, 155, 155, 306, 157, 304, 153, 157, 155, 155, 463, 157, 157, 155, 463, 157, 160, 157, 155, 160, 160, 308, 162, 306, 157, 162, 160, 160, 463, 162, 162, 160, 463, 162, 164, 162, 160, 164, 164, 309, 166, 308, 162, 166, 164, 164, 463, 166, 166, 164, 463, 166, 169, 166, 164, 169, 169, 311, 171, 309, 166, 171, 169, 169, 463, 171, 171, 169, 463, 171, 173, 171, 169, 173, 173, 313, 175, 311, 171, 175, 173, 173, 463, 175, 175, 173, 463, 175, 178, 175, 173, 178, 178, 315, 180, 313, 175, 180, 178, 178, 463, 180, 180, 178, 463, 180, 182, 180, 178, 182, 182, 317, 184, 315, 180, 184, 182, 182, 463, 184, 184, 182, 463, 184, 187, 184, 182, 187, 187, 318, 189, 317, 184, 189, 187, 187, 463, 189, 189, 187, 320, 189, 191, 189, 187, 191, 191, 322, 193, 318, 189, 193, 191, 191, 463, 193, 193, 191, 324, 193, 320, 193, 191, 197, 463, 463, 197, 322, 193, 326, 197, 197, 327, 329, 197, 331, 197, 200, 324, 463, 200, 333, 463, 197, 200, 200, 335, 336, 200, 326, 200, 203, 327, 329, 203, 331, 463, 200, 203, 203, 338, 333, 203, 340, 203, 206, 335, 336, 206, 342, 463, 203, 206, 206, 344, 345, 206, 347, 206, 209, 338, 463, 209, 340, 463, 206, 209, 209, 349, 342, 209, 351, 209, 212, 344, 345, 212, 347, 463, 209, 212, 212, 353, 354, 212, 356, 212, 215, 349, 463, 215, 351, 463, 212, 215, 215, 358, 360, 215, 362, 215, 218, 353, 354, 218, 356, 463, 215, 218, 218, 363, 365, 218, 367, 218, 221, 358, 360, 221, 362, 463, 218, 221, 221, 369, 371, 221, 372, 221, 224, 363, 365, 224, 367, 463, 221, 224, 224, 374, 376, 224, 378, 224, 227, 369, 371, 227, 372, 463, 224, 227, 227, 380, 381, 227, 383, 227, 230, 374, 376, 230, 378, 463, 227, 230, 230, 385, 387, 230, 389, 230, 233, 380, 381, 233, 383, 463, 230, 233, 233, 390, 392, 233, 394, 233, 236, 385, 387, 236, 389, 463, 233, 236, 236, 396, 398, 236, 399, 236, 239, 390, 392, 239, 394, 463, 236, 239, 239, 402, 405, 239, 407, 239, 242, 396, 398, 242, 399, 463, 239, 242, 242, 463, 463, 242, 463, 242, 245, 402, 405, 245, 407, 463, 242, 245, 245, 463, 463, 245, 248, 245, 463, 248, 248, 409, 250, 463, 245, 250, 248, 248, 463, 250, 250, 248, 463, 250, 252, 250, 248, 252, 252, 415, 254, 409, 250, 254, 252, 252, 463, 254, 254, 252, 463, 254, 256, 254, 252, 256, 256, 423, 258, 415, 254, 258, 256, 256, 463, 258, 258, 256, 463, 258, 260, 258, 256, 260, 260, 426, 262, 423, 258, 262, 260, 260, 463, 262, 262, 260, 463, 262, 265, 262, 260, 265, 265, 436, 267, 426, 262, 267, 265, 265, 463, 267, 267, 265, 463, 267, 269, 267, 265, 269, 269, 438, 271, 436, 267, 271, 269, 269, 463, 271, 271, 269, 463, 271, 274, 271, 269, 274, 274, 463, 276, 438, 271, 276, 274, 274, 463, 276, 276, 274, 463, 276, 278, 276, 274, 278, 278, 463, 280, 463, 276, 280, 278, 278, 463, 280, 280, 278, 463, 280, 283, 280, 278, 283, 283, 463, 285, 463, 280, 285, 283, 283, 463, 285, 285, 283, 463, 285, 287, 285, 283, 287, 287, 463, 289, 463, 285, 289, 287, 287, 463, 289, 289, 287, 463, 289, 292, 289, 287, 292, 292, 463, 294, 463, 289, 294, 292, 292, 463, 294, 294, 292, 463, 294, 296, 294, 292, 296, 296, 463, 298, 463, 294, 298, 296, 296, 463, 298, 298, 296, 463, 298, 301, 298, 296, 301, 301, 463, 303, 463, 298, 303, 301, 301, 463, 303, 303, 301, 463, 303, 305, 303, 301, 305, 305, 463, 307, 463, 303, 307, 305, 305, 463, 307, 307, 305, 463, 307, 310, 307, 305, 310, 310, 463, 312, 463, 307, 312, 310, 310, 463, 312, 312, 310, 463, 312, 314, 312, 310, 314, 314, 463, 316, 463, 312, 316, 314, 314, 463, 316, 316, 314, 463, 316, 319, 316, 314, 319, 319, 463, 321, 463, 316, 321, 319, 319, 463, 321, 321, 319, 463, 321, 323, 321, 319, 323, 323, 463, 325, 463, 321, 325, 323, 323, 463, 325, 325, 323, 463, 325, 328, 325, 323, 328, 328, 463, 330, 463, 325, 330, 328, 328, 463, 330, 330, 328, 463, 330, 332, 330, 328, 332, 332, 463, 334, 463, 330, 334, 332, 332, 463, 334, 334, 332, 463, 334, 337, 334, 332, 337, 337, 463, 339, 463, 334, 339, 337, 337, 463, 339, 339, 337, 463, 339, 341, 339, 337, 341, 341, 463, 343, 463, 339, 343, 341, 341, 463, 343, 343, 341, 463, 343, 346, 343, 341, 346, 346, 463, 348, 463, 343, 348, 346, 346, 463, 348, 348, 346, 463, 348, 350, 348, 346, 350, 350, 463, 352, 463, 348, 352, 350, 350, 463, 352, 352, 350, 463, 352, 355, 352, 350, 355, 355, 463, 357, 463, 352, 357, 355, 355, 463, 357, 357, 355, 463, 357, 359, 357, 355, 359, 359, 463, 361, 463, 357, 361, 359, 359, 463, 361, 361, 359, 463, 361, 364, 361, 359, 364, 364, 463, 366, 463, 361, 366, 364, 364, 463, 366, 366, 364, 463, 366, 368, 366, 364, 368, 368, 463, 370, 463, 366, 370, 368, 368, 463, 370, 370, 368, 463, 370, 373, 370, 368, 373, 373, 463, 375, 463, 370, 375, 373, 373, 463, 375, 375, 373, 463, 375, 377, 375, 373, 377, 377, 463, 379, 463, 375, 379, 377, 377, 463, 379, 379, 377, 463, 379, 382, 379, 377, 382, 382, 463, 384, 463, 379, 384, 382, 382, 463, 384, 384, 382, 463, 384, 386, 384, 382, 386, 386, 463, 388, 463, 384, 388, 386, 386, 463, 388, 388, 386, 463, 388, 391, 388, 386, 391, 391, 463, 393, 463, 388, 393, 391, 391, 463, 393, 393, 391, 463, 393, 395, 393, 391, 395, 395, 463, 397, 463, 393, 397, 395, 395, 463, 397, 397, 395, 463, 397, 404, 397, 395, 404, 404, 463, 406, 463, 397, 406, 404, 404, 463, 406, 406, 404, 463, 406, 408, 406, 404, 408, 408, 463, 410, 463, 406, 410, 408, 408, 463, 410, 410, 408, 463, 410, 463, 410, 408, 421, 463, 463, 421, 463, 410, 463, 421, 421, 463, 463, 421, 463, 421, 427, 463, 463, 427, 463, 463, 421, 427, 427, 463, 463, 427, 428, 427, 435, 428, 428, 435, 435, 463, 427, 463, 428, 428, 435, 435, 463, 428, 463, 435, 463, 463, 428, 439, 435, 463, 439, 439, 441, 463, 463, 441, 463, 439, 439, 441, 441, 463, 439, 441, 463, 441, 442, 439, 463, 442, 463, 463, 441, 442, 442, 463, 463, 442, 463, 442, 463, 463, 463, 463, 463, 463, 442}; /* Base vector for state insn alternatives. */ static const unsigned short athlon_fp_base_state_alts[] = { 0, 84, 21, 88, 96, 105, 109, 117, 126, 130, 145, 295, 1332, 155, 156, 124, 1044, 163, 442, 1338, 167, 311, 1354, 195, 206, 1060, 214, 1368, 225, 456, 1382, 233, 470, 1396, 244, 484, 1410, 252, 498, 1424, 260, 512, 1438, 279, 526, 1452, 289, 540, 1466, 290, 554, 1480, 296, 568, 1494, 297, 582, 1508, 301, 596, 1522, 305, 610, 1536, 333, 624, 1550, 337, 638, 1564, 349, 1576, 365, 1582, 1594, 381, 1600, 397, 413, 1612, 428, 1618, 438, 1630, 888, 1636, 892, 899, 1648, 906, 1654, 909, 1666, 918, 1672, 920, 927, 1684, 930, 1690, 931, 1702, 939, 1708, 941, 948, 1720, 952, 1726, 960, 1738, 962, 1744, 969, 973, 1756, 981, 1762, 983, 1774, 990, 1780, 994, 998, 1792, 1011, 1798, 1025, 1810, 1028, 1816, 1037, 1038, 1828, 1039, 1834, 1046, 1846, 1050, 1852, 1054, 1082, 1864, 1086, 1870, 1098, 1882, 1114, 1888, 1130, 1146, 1900, 1162, 1906, 1178, 1918, 1194, 1924, 1210, 1226, 1936, 1242, 1942, 1258, 1954, 1274, 1960, 1290, 1306, 1972, 1324, 1978, 1330, 1990, 1331, 1996, 1346, 1360, 2008, 1363, 2014, 1364, 2026, 1372, 2032, 1377, 1378, 2044, 1386, 2050, 1391, 2062, 1394, 2068, 1400, 1405, 1408, 2083, 1419, 652, 2097, 1422, 666, 2111, 1433, 680, 2125, 1434, 694, 2139, 1436, 708, 2153, 1447, 722, 2167, 1448, 736, 2181, 1450, 750, 2195, 1461, 764, 2209, 1462, 778, 2223, 1464, 792, 2237, 1475, 806, 2251, 1476, 820, 2265, 1478, 834, 2279, 1489, 848, 2293, 1490, 862, 2307, 1492, 876, 2319, 1503, 2325, 1504, 2337, 1506, 2343, 1517, 2355, 1518, 2361, 1520, 2373, 1531, 2379, 1532, 1534, 2391, 1545, 2397, 1546, 2409, 1548, 2415, 1581, 1599, 2427, 1617, 2433, 1635, 2445, 1653, 2451, 1671, 1689, 2463, 1707, 2469, 1725, 2481, 1743, 2487, 1761, 1779, 2499, 1797, 2505, 1815, 2517, 1833, 2523, 1851, 1869, 2535, 1887, 2541, 1905, 2553, 1923, 2559, 1941, 1959, 2571, 1977, 2577, 1995, 2589, 2013, 2595, 2031, 2049, 2607, 2060, 2613, 2067, 2625, 2078, 2631, 2089, 2092, 2643, 2093, 2649, 2095, 2661, 2101, 2667, 2106, 2107, 2679, 2120, 2685, 2123, 2697, 2129, 2703, 2134, 2135, 2715, 2137, 2721, 2148, 2733, 2151, 2739, 2162, 2163, 2751, 2165, 2757, 2176, 2769, 2177, 2775, 2179, 2190, 2787, 2191, 2793, 2193, 2805, 2204, 2811, 2205, 2207, 2823, 2218, 2829, 2219, 2841, 2221, 2847, 2232, 2233, 2859, 2235, 2865, 2246, 2877, 2247, 2883, 2249, 2260, 2895, 2261, 2901, 2263, 2913, 2274, 2919, 2275, 2277, 327, 1076, 2288, 890, 2931, 2289, 2937, 2291, 2949, 2324, 2955, 42, 343, 1092, 1108, 2342, 82, 904, 925, 140, 946, 2970, 967, 2360, 161, 988, 2378, 2984, 2996, 182, 1124, 1140, 359, 1156, 201, 2998, 2396, 1172, 2414, 3017, 375, 3022, 3036, 1009, 1188, 1204, 63, 220, 1220, 1236, 391, 1252, 239, 1268, 407, 1284, 1300, 258, 1316, 423, 103, 1023, 274}; #endif /* #if AUTOMATON_STATE_ALTS */ /* Vector of min issue delay of insns. */ static const unsigned char athlon_fp_min_issue_delay[] ATTRIBUTE_UNUSED = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 15, 0, 0, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 14, 0, 0, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 13, 0, 0, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 12, 0, 0, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 11, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 10, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 9, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 8, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 6, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 6, 0, 0, 2, 2, 1, 2, 2, 2, 1, 1, 1, 2, 1, 1, 1, 2, 2, 2, 1, 1, 7, 0, 0, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 1, 1, 2, 2, 1, 2, 1, 1, 6, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 5, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 5, 0, 0, 2, 2, 1, 1, 2, 2, 1, 1, 2, 1, 1, 2, 1, 2, 1, 2, 2, 2, 5, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 4, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 4, 0, 0, 3, 3, 1, 3, 3, 3, 1, 1, 1, 3, 1, 1, 1, 3, 3, 3, 1, 1, 4, 0, 0, 2, 2, 0, 2, 2, 2, 0, 0, 0, 2, 0, 0, 0, 2, 2, 2, 0, 0, 3, 0, 0, 2, 2, 0, 2, 2, 2, 0, 0, 1, 2, 0, 1, 0, 2, 2, 2, 1, 1, 6, 0, 0, 2, 2, 1, 2, 2, 2, 1, 1, 2, 2, 1, 2, 1, 2, 2, 2, 2, 2, 7, 0, 0, 2, 2, 1, 2, 2, 2, 1, 2, 1, 2, 1, 1, 2, 2, 2, 2, 1, 1, 6, 0, 0, 2, 2, 0, 2, 2, 2, 0, 1, 0, 2, 0, 0, 1, 2, 2, 2, 0, 0, 3, 0, 0, 17, 17, 1, 2, 17, 17, 1, 1, 17, 2, 1, 17, 1, 17, 2, 17, 17, 17, 20, 0, 0, 16, 16, 0, 1, 16, 16, 0, 0, 16, 1, 0, 16, 0, 16, 1, 16, 16, 16, 19, 0, 0, 16, 16, 1, 1, 16, 16, 1, 1, 16, 1, 1, 16, 1, 16, 1, 16, 16, 16, 19, 0, 0, 15, 15, 0, 0, 15, 15, 0, 0, 15, 0, 0, 15, 0, 15, 0, 15, 15, 15, 18, 0, 0, 15, 15, 0, 1, 15, 15, 0, 0, 15, 1, 0, 15, 0, 15, 1, 15, 15, 15, 18, 0, 0, 15, 15, 1, 1, 15, 15, 1, 1, 15, 1, 1, 15, 1, 15, 1, 15, 15, 15, 18, 0, 0, 14, 14, 0, 0, 14, 14, 0, 0, 14, 0, 0, 14, 0, 14, 0, 14, 14, 14, 17, 0, 0, 14, 14, 0, 1, 14, 14, 0, 0, 14, 1, 0, 14, 0, 14, 1, 14, 14, 14, 17, 0, 0, 14, 14, 1, 1, 14, 14, 1, 1, 14, 1, 1, 14, 1, 14, 1, 14, 14, 14, 17, 0, 0, 13, 13, 0, 0, 13, 13, 0, 0, 13, 0, 0, 13, 0, 13, 0, 13, 13, 13, 16, 0, 0, 13, 13, 0, 1, 13, 13, 0, 0, 13, 1, 0, 13, 0, 13, 1, 13, 13, 13, 16, 0, 0, 13, 13, 1, 1, 13, 13, 1, 1, 13, 1, 1, 13, 1, 13, 1, 13, 13, 13, 16, 0, 0, 12, 12, 0, 0, 12, 12, 0, 0, 12, 0, 0, 12, 0, 12, 0, 12, 12, 12, 15, 0, 0, 12, 12, 0, 1, 12, 12, 0, 0, 12, 1, 0, 12, 0, 12, 1, 12, 12, 12, 15, 0, 0, 12, 12, 1, 1, 12, 12, 1, 1, 12, 1, 1, 12, 1, 12, 1, 12, 12, 12, 15, 0, 0, 11, 11, 0, 0, 11, 11, 0, 0, 11, 0, 0, 11, 0, 11, 0, 11, 11, 11, 14, 0, 0, 11, 11, 0, 1, 11, 11, 0, 0, 11, 1, 0, 11, 0, 11, 1, 11, 11, 11, 14, 0, 0, 11, 11, 1, 1, 11, 11, 1, 1, 11, 1, 1, 11, 1, 11, 1, 11, 11, 11, 14, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0, 10, 0, 0, 10, 0, 10, 0, 10, 10, 10, 13, 0, 0, 10, 10, 0, 1, 10, 10, 0, 0, 10, 1, 0, 10, 0, 10, 1, 10, 10, 10, 13, 0, 0, 10, 10, 1, 1, 10, 10, 1, 1, 10, 1, 1, 10, 1, 10, 1, 10, 10, 10, 13, 0, 0, 9, 9, 0, 0, 9, 9, 0, 0, 9, 0, 0, 9, 0, 9, 0, 9, 9, 9, 12, 0, 0, 9, 9, 0, 1, 9, 9, 0, 0, 9, 1, 0, 9, 0, 9, 1, 9, 9, 9, 12, 0, 0, 9, 9, 1, 1, 9, 9, 1, 1, 9, 1, 1, 9, 1, 9, 1, 9, 9, 9, 12, 0, 0, 8, 8, 0, 0, 8, 8, 0, 0, 8, 0, 0, 8, 0, 8, 0, 8, 8, 8, 11, 0, 0, 8, 8, 0, 1, 8, 8, 0, 0, 8, 1, 0, 8, 0, 8, 1, 8, 8, 8, 11, 0, 0, 8, 8, 1, 1, 8, 8, 1, 1, 8, 1, 1, 8, 1, 8, 1, 8, 8, 8, 11, 0, 0, 7, 7, 0, 0, 7, 7, 0, 0, 7, 0, 0, 7, 0, 7, 0, 7, 7, 7, 10, 0, 0, 7, 7, 0, 1, 7, 7, 0, 0, 7, 1, 0, 7, 0, 7, 1, 7, 7, 7, 10, 0, 0, 7, 7, 1, 1, 7, 7, 1, 1, 7, 1, 1, 7, 1, 7, 1, 7, 7, 7, 10, 0, 0, 6, 6, 0, 0, 6, 6, 0, 0, 6, 0, 0, 6, 0, 6, 0, 6, 6, 6, 9, 0, 0, 6, 6, 0, 1, 6, 6, 0, 0, 6, 1, 0, 6, 0, 6, 1, 6, 6, 6, 9, 0, 0, 6, 6, 1, 1, 6, 6, 1, 1, 6, 1, 1, 6, 1, 6, 1, 6, 6, 6, 9, 0, 0, 5, 5, 0, 0, 5, 5, 0, 0, 5, 0, 0, 5, 0, 5, 0, 5, 5, 5, 8, 0, 0, 5, 5, 0, 1, 5, 5, 0, 0, 5, 1, 0, 5, 0, 5, 1, 5, 5, 5, 8, 0, 0, 5, 5, 1, 1, 5, 5, 1, 1, 5, 1, 1, 5, 1, 5, 1, 5, 5, 5, 8, 0, 0, 4, 4, 0, 0, 4, 4, 0, 0, 4, 0, 0, 4, 0, 4, 0, 4, 4, 4, 7, 0, 0, 4, 4, 0, 1, 4, 4, 0, 0, 4, 1, 0, 4, 0, 4, 1, 4, 4, 4, 7, 0, 0, 4, 4, 1, 1, 4, 4, 1, 1, 4, 1, 1, 4, 1, 4, 1, 4, 4, 4, 7, 0, 0, 3, 3, 0, 0, 3, 3, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3, 3, 3, 6, 0, 0, 3, 3, 0, 1, 3, 3, 0, 0, 3, 1, 0, 3, 0, 3, 1, 3, 3, 3, 6, 0, 0, 3, 3, 1, 1, 3, 3, 1, 1, 3, 1, 1, 3, 1, 3, 1, 3, 3, 3, 6, 0, 0, 2, 2, 0, 0, 2, 2, 0, 0, 2, 0, 0, 2, 0, 2, 0, 2, 2, 2, 5, 0, 0, 2, 2, 0, 1, 2, 2, 0, 0, 2, 1, 0, 2, 0, 2, 1, 2, 2, 2, 5, 0, 0, 2, 2, 1, 1, 2, 2, 2, 2, 2, 1, 1, 2, 2, 2, 1, 2, 2, 2, 5, 0, 0, 2, 2, 0, 0, 2, 2, 1, 1, 2, 0, 0, 2, 1, 2, 0, 2, 2, 2, 5, 0, 0, 3, 3, 1, 3, 3, 3, 1, 1, 2, 3, 1, 2, 1, 3, 3, 3, 2, 2, 7, 0, 0, 2, 2, 0, 2, 2, 2, 0, 0, 2, 2, 0, 2, 0, 2, 2, 2, 2, 2, 7, 0, 0, 2, 2, 0, 0, 2, 2, 2, 2, 2, 0, 0, 2, 2, 2, 0, 2, 2, 2, 5, 0, 0, 3, 3, 2, 3, 3, 3, 2, 2, 2, 3, 2, 2, 2, 3, 3, 3, 2, 2, 8, 0, 0, 3, 3, 0, 3, 3, 3, 0, 0, 2, 3, 0, 2, 0, 3, 3, 3, 2, 2, 7, 0, 0, 3, 3, 1, 2, 3, 3, 1, 1, 3, 2, 1, 3, 1, 3, 2, 3, 3, 3, 6, 0, 0, 3, 3, 1, 1, 3, 3, 2, 2, 3, 1, 1, 3, 2, 3, 1, 3, 3, 3, 6, 0, 0, 3, 3, 0, 0, 3, 3, 1, 1, 3, 0, 0, 3, 1, 3, 0, 3, 3, 3, 6, 0, 0, 3, 3, 1, 3, 3, 3, 1, 1, 3, 3, 1, 3, 1, 3, 3, 3, 3, 3, 8, 0, 0, 3, 3, 0, 2, 3, 3, 0, 0, 3, 2, 0, 3, 0, 3, 2, 3, 3, 3, 6, 0, 0, 3, 3, 2, 2, 3, 3, 2, 2, 3, 2, 2, 3, 2, 3, 2, 3, 3, 3, 6, 0, 0, 3, 3, 0, 0, 3, 3, 2, 2, 3, 0, 0, 3, 2, 3, 0, 3, 3, 3, 6, 0, 0, 3, 3, 2, 3, 3, 3, 2, 2, 3, 3, 2, 3, 2, 3, 3, 3, 3, 3, 8, 0, 0, 3, 3, 0, 3, 3, 3, 0, 0, 3, 3, 0, 3, 0, 3, 3, 3, 3, 3, 8, 0, 0, 4, 4, 1, 2, 4, 4, 1, 1, 4, 2, 1, 4, 1, 4, 2, 4, 4, 4, 7, 0, 0, 4, 4, 1, 1, 4, 4, 2, 2, 4, 1, 1, 4, 2, 4, 1, 4, 4, 4, 7, 0, 0, 4, 4, 0, 0, 4, 4, 1, 1, 4, 0, 0, 4, 1, 4, 0, 4, 4, 4, 7, 0, 0, 4, 4, 1, 3, 4, 4, 1, 1, 4, 3, 1, 4, 1, 4, 3, 4, 4, 4, 7, 0, 0, 4, 4, 0, 2, 4, 4, 0, 0, 4, 2, 0, 4, 0, 4, 2, 4, 4, 4, 7, 0, 0, 4, 4, 2, 2, 4, 4, 2, 2, 4, 2, 2, 4, 2, 4, 2, 4, 4, 4, 7, 0, 0, 4, 4, 0, 0, 4, 4, 2, 2, 4, 0, 0, 4, 2, 4, 0, 4, 4, 4, 7, 0, 0, 4, 4, 2, 3, 4, 4, 2, 2, 4, 3, 2, 4, 2, 4, 3, 4, 4, 4, 7, 0, 0, 4, 4, 0, 3, 4, 4, 0, 0, 4, 3, 0, 4, 0, 4, 3, 4, 4, 4, 7, 0, 0, 5, 5, 1, 2, 5, 5, 1, 1, 5, 2, 1, 5, 1, 5, 2, 5, 5, 5, 8, 0, 0, 5, 5, 1, 1, 5, 5, 2, 2, 5, 1, 1, 5, 2, 5, 1, 5, 5, 5, 8, 0, 0, 5, 5, 0, 0, 5, 5, 1, 1, 5, 0, 0, 5, 1, 5, 0, 5, 5, 5, 8, 0, 0, 5, 5, 1, 3, 5, 5, 1, 1, 5, 3, 1, 5, 1, 5, 3, 5, 5, 5, 8, 0, 0, 5, 5, 0, 2, 5, 5, 0, 0, 5, 2, 0, 5, 0, 5, 2, 5, 5, 5, 8, 0, 0, 5, 5, 2, 2, 5, 5, 2, 2, 5, 2, 2, 5, 2, 5, 2, 5, 5, 5, 8, 0, 0, 5, 5, 0, 0, 5, 5, 2, 2, 5, 0, 0, 5, 2, 5, 0, 5, 5, 5, 8, 0, 0, 5, 5, 2, 3, 5, 5, 2, 2, 5, 3, 2, 5, 2, 5, 3, 5, 5, 5, 8, 0, 0, 5, 5, 0, 3, 5, 5, 0, 0, 5, 3, 0, 5, 0, 5, 3, 5, 5, 5, 8, 0, 0, 6, 6, 1, 2, 6, 6, 1, 1, 6, 2, 1, 6, 1, 6, 2, 6, 6, 6, 9, 0, 0, 6, 6, 1, 1, 6, 6, 2, 2, 6, 1, 1, 6, 2, 6, 1, 6, 6, 6, 9, 0, 0, 6, 6, 0, 0, 6, 6, 1, 1, 6, 0, 0, 6, 1, 6, 0, 6, 6, 6, 9, 0, 0, 6, 6, 1, 3, 6, 6, 1, 1, 6, 3, 1, 6, 1, 6, 3, 6, 6, 6, 9, 0, 0, 6, 6, 0, 2, 6, 6, 0, 0, 6, 2, 0, 6, 0, 6, 2, 6, 6, 6, 9, 0, 0, 6, 6, 2, 2, 6, 6, 2, 2, 6, 2, 2, 6, 2, 6, 2, 6, 6, 6, 9, 0, 0, 6, 6, 0, 0, 6, 6, 2, 2, 6, 0, 0, 6, 2, 6, 0, 6, 6, 6, 9, 0, 0, 6, 6, 2, 3, 6, 6, 2, 2, 6, 3, 2, 6, 2, 6, 3, 6, 6, 6, 9, 0, 0, 6, 6, 0, 3, 6, 6, 0, 0, 6, 3, 0, 6, 0, 6, 3, 6, 6, 6, 9, 0, 0, 7, 7, 1, 2, 7, 7, 1, 1, 7, 2, 1, 7, 1, 7, 2, 7, 7, 7, 10, 0, 0, 7, 7, 1, 1, 7, 7, 2, 2, 7, 1, 1, 7, 2, 7, 1, 7, 7, 7, 10, 0, 0, 7, 7, 0, 0, 7, 7, 1, 1, 7, 0, 0, 7, 1, 7, 0, 7, 7, 7, 10, 0, 0, 7, 7, 1, 3, 7, 7, 1, 1, 7, 3, 1, 7, 1, 7, 3, 7, 7, 7, 10, 0, 0, 7, 7, 0, 2, 7, 7, 0, 0, 7, 2, 0, 7, 0, 7, 2, 7, 7, 7, 10, 0, 0, 7, 7, 2, 2, 7, 7, 2, 2, 7, 2, 2, 7, 2, 7, 2, 7, 7, 7, 10, 0, 0, 7, 7, 0, 0, 7, 7, 2, 2, 7, 0, 0, 7, 2, 7, 0, 7, 7, 7, 10, 0, 0, 7, 7, 2, 3, 7, 7, 2, 2, 7, 3, 2, 7, 2, 7, 3, 7, 7, 7, 10, 0, 0, 7, 7, 0, 3, 7, 7, 0, 0, 7, 3, 0, 7, 0, 7, 3, 7, 7, 7, 10, 0, 0, 8, 8, 1, 2, 8, 8, 1, 1, 8, 2, 1, 8, 1, 8, 2, 8, 8, 8, 11, 0, 0, 8, 8, 1, 1, 8, 8, 2, 2, 8, 1, 1, 8, 2, 8, 1, 8, 8, 8, 11, 0, 0, 8, 8, 0, 0, 8, 8, 1, 1, 8, 0, 0, 8, 1, 8, 0, 8, 8, 8, 11, 0, 0, 8, 8, 1, 3, 8, 8, 1, 1, 8, 3, 1, 8, 1, 8, 3, 8, 8, 8, 11, 0, 0, 8, 8, 0, 2, 8, 8, 0, 0, 8, 2, 0, 8, 0, 8, 2, 8, 8, 8, 11, 0, 0, 8, 8, 2, 2, 8, 8, 2, 2, 8, 2, 2, 8, 2, 8, 2, 8, 8, 8, 11, 0, 0, 8, 8, 0, 0, 8, 8, 2, 2, 8, 0, 0, 8, 2, 8, 0, 8, 8, 8, 11, 0, 0, 8, 8, 2, 3, 8, 8, 2, 2, 8, 3, 2, 8, 2, 8, 3, 8, 8, 8, 11, 0, 0, 8, 8, 0, 3, 8, 8, 0, 0, 8, 3, 0, 8, 0, 8, 3, 8, 8, 8, 11, 0, 0, 9, 9, 1, 2, 9, 9, 1, 1, 9, 2, 1, 9, 1, 9, 2, 9, 9, 9, 12, 0, 0, 9, 9, 1, 1, 9, 9, 2, 2, 9, 1, 1, 9, 2, 9, 1, 9, 9, 9, 12, 0, 0, 9, 9, 0, 0, 9, 9, 1, 1, 9, 0, 0, 9, 1, 9, 0, 9, 9, 9, 12, 0, 0, 9, 9, 1, 3, 9, 9, 1, 1, 9, 3, 1, 9, 1, 9, 3, 9, 9, 9, 12, 0, 0, 9, 9, 0, 2, 9, 9, 0, 0, 9, 2, 0, 9, 0, 9, 2, 9, 9, 9, 12, 0, 0, 9, 9, 2, 2, 9, 9, 2, 2, 9, 2, 2, 9, 2, 9, 2, 9, 9, 9, 12, 0, 0, 9, 9, 0, 0, 9, 9, 2, 2, 9, 0, 0, 9, 2, 9, 0, 9, 9, 9, 12, 0, 0, 9, 9, 2, 3, 9, 9, 2, 2, 9, 3, 2, 9, 2, 9, 3, 9, 9, 9, 12, 0, 0, 9, 9, 0, 3, 9, 9, 0, 0, 9, 3, 0, 9, 0, 9, 3, 9, 9, 9, 12, 0, 0, 10, 10, 1, 2, 10, 10, 1, 1, 10, 2, 1, 10, 1, 10, 2, 10, 10, 10, 13, 0, 0, 10, 10, 1, 1, 10, 10, 2, 2, 10, 1, 1, 10, 2, 10, 1, 10, 10, 10, 13, 0, 0, 10, 10, 0, 0, 10, 10, 1, 1, 10, 0, 0, 10, 1, 10, 0, 10, 10, 10, 13, 0, 0, 10, 10, 1, 3, 10, 10, 1, 1, 10, 3, 1, 10, 1, 10, 3, 10, 10, 10, 13, 0, 0, 10, 10, 0, 2, 10, 10, 0, 0, 10, 2, 0, 10, 0, 10, 2, 10, 10, 10, 13, 0, 0, 10, 10, 2, 2, 10, 10, 2, 2, 10, 2, 2, 10, 2, 10, 2, 10, 10, 10, 13, 0, 0, 10, 10, 0, 0, 10, 10, 2, 2, 10, 0, 0, 10, 2, 10, 0, 10, 10, 10, 13, 0, 0, 10, 10, 2, 3, 10, 10, 2, 2, 10, 3, 2, 10, 2, 10, 3, 10, 10, 10, 13, 0, 0, 10, 10, 0, 3, 10, 10, 0, 0, 10, 3, 0, 10, 0, 10, 3, 10, 10, 10, 13, 0, 0, 11, 11, 1, 2, 11, 11, 1, 1, 11, 2, 1, 11, 1, 11, 2, 11, 11, 11, 14, 0, 0, 11, 11, 1, 1, 11, 11, 2, 2, 11, 1, 1, 11, 2, 11, 1, 11, 11, 11, 14, 0, 0, 11, 11, 0, 0, 11, 11, 1, 1, 11, 0, 0, 11, 1, 11, 0, 11, 11, 11, 14, 0, 0, 11, 11, 1, 3, 11, 11, 1, 1, 11, 3, 1, 11, 1, 11, 3, 11, 11, 11, 14, 0, 0, 11, 11, 0, 2, 11, 11, 0, 0, 11, 2, 0, 11, 0, 11, 2, 11, 11, 11, 14, 0, 0, 11, 11, 2, 2, 11, 11, 2, 2, 11, 2, 2, 11, 2, 11, 2, 11, 11, 11, 14, 0, 0, 11, 11, 0, 0, 11, 11, 2, 2, 11, 0, 0, 11, 2, 11, 0, 11, 11, 11, 14, 0, 0, 11, 11, 2, 3, 11, 11, 2, 2, 11, 3, 2, 11, 2, 11, 3, 11, 11, 11, 14, 0, 0, 11, 11, 0, 3, 11, 11, 0, 0, 11, 3, 0, 11, 0, 11, 3, 11, 11, 11, 14, 0, 0, 12, 12, 1, 2, 12, 12, 1, 1, 12, 2, 1, 12, 1, 12, 2, 12, 12, 12, 15, 0, 0, 12, 12, 1, 1, 12, 12, 2, 2, 12, 1, 1, 12, 2, 12, 1, 12, 12, 12, 15, 0, 0, 12, 12, 0, 0, 12, 12, 1, 1, 12, 0, 0, 12, 1, 12, 0, 12, 12, 12, 15, 0, 0, 12, 12, 1, 3, 12, 12, 1, 1, 12, 3, 1, 12, 1, 12, 3, 12, 12, 12, 15, 0, 0, 12, 12, 0, 2, 12, 12, 0, 0, 12, 2, 0, 12, 0, 12, 2, 12, 12, 12, 15, 0, 0, 12, 12, 2, 2, 12, 12, 2, 2, 12, 2, 2, 12, 2, 12, 2, 12, 12, 12, 15, 0, 0, 12, 12, 0, 0, 12, 12, 2, 2, 12, 0, 0, 12, 2, 12, 0, 12, 12, 12, 15, 0, 0, 12, 12, 2, 3, 12, 12, 2, 2, 12, 3, 2, 12, 2, 12, 3, 12, 12, 12, 15, 0, 0, 12, 12, 0, 3, 12, 12, 0, 0, 12, 3, 0, 12, 0, 12, 3, 12, 12, 12, 15, 0, 0, 13, 13, 1, 2, 13, 13, 1, 1, 13, 2, 1, 13, 1, 13, 2, 13, 13, 13, 16, 0, 0, 13, 13, 1, 1, 13, 13, 2, 2, 13, 1, 1, 13, 2, 13, 1, 13, 13, 13, 16, 0, 0, 13, 13, 0, 0, 13, 13, 1, 1, 13, 0, 0, 13, 1, 13, 0, 13, 13, 13, 16, 0, 0, 13, 13, 1, 3, 13, 13, 1, 1, 13, 3, 1, 13, 1, 13, 3, 13, 13, 13, 16, 0, 0, 13, 13, 0, 2, 13, 13, 0, 0, 13, 2, 0, 13, 0, 13, 2, 13, 13, 13, 16, 0, 0, 13, 13, 2, 2, 13, 13, 2, 2, 13, 2, 2, 13, 2, 13, 2, 13, 13, 13, 16, 0, 0, 13, 13, 0, 0, 13, 13, 2, 2, 13, 0, 0, 13, 2, 13, 0, 13, 13, 13, 16, 0, 0, 13, 13, 2, 3, 13, 13, 2, 2, 13, 3, 2, 13, 2, 13, 3, 13, 13, 13, 16, 0, 0, 13, 13, 0, 3, 13, 13, 0, 0, 13, 3, 0, 13, 0, 13, 3, 13, 13, 13, 16, 0, 0, 14, 14, 1, 2, 14, 14, 1, 1, 14, 2, 1, 14, 1, 14, 2, 14, 14, 14, 17, 0, 0, 14, 14, 1, 1, 14, 14, 2, 2, 14, 1, 1, 14, 2, 14, 1, 14, 14, 14, 17, 0, 0, 14, 14, 0, 0, 14, 14, 1, 1, 14, 0, 0, 14, 1, 14, 0, 14, 14, 14, 17, 0, 0, 14, 14, 1, 3, 14, 14, 1, 1, 14, 3, 1, 14, 1, 14, 3, 14, 14, 14, 17, 0, 0, 14, 14, 0, 2, 14, 14, 0, 0, 14, 2, 0, 14, 0, 14, 2, 14, 14, 14, 17, 0, 0, 14, 14, 2, 2, 14, 14, 2, 2, 14, 2, 2, 14, 2, 14, 2, 14, 14, 14, 17, 0, 0, 14, 14, 0, 0, 14, 14, 2, 2, 14, 0, 0, 14, 2, 14, 0, 14, 14, 14, 17, 0, 0, 14, 14, 2, 3, 14, 14, 2, 2, 14, 3, 2, 14, 2, 14, 3, 14, 14, 14, 17, 0, 0, 14, 14, 0, 3, 14, 14, 0, 0, 14, 3, 0, 14, 0, 14, 3, 14, 14, 14, 17, 0, 0, 15, 15, 1, 2, 15, 15, 1, 1, 15, 2, 1, 15, 1, 15, 2, 15, 15, 15, 18, 0, 0, 15, 15, 1, 1, 15, 15, 2, 2, 15, 1, 1, 15, 2, 15, 1, 15, 15, 15, 18, 0, 0, 15, 15, 0, 0, 15, 15, 1, 1, 15, 0, 0, 15, 1, 15, 0, 15, 15, 15, 18, 0, 0, 15, 15, 1, 3, 15, 15, 1, 1, 15, 3, 1, 15, 1, 15, 3, 15, 15, 15, 18, 0, 0, 15, 15, 0, 2, 15, 15, 0, 0, 15, 2, 0, 15, 0, 15, 2, 15, 15, 15, 18, 0, 0, 15, 15, 2, 2, 15, 15, 2, 2, 15, 2, 2, 15, 2, 15, 2, 15, 15, 15, 18, 0, 0, 15, 15, 0, 0, 15, 15, 2, 2, 15, 0, 0, 15, 2, 15, 0, 15, 15, 15, 18, 0, 0, 15, 15, 2, 3, 15, 15, 2, 2, 15, 3, 2, 15, 2, 15, 3, 15, 15, 15, 18, 0, 0, 15, 15, 0, 3, 15, 15, 0, 0, 15, 3, 0, 15, 0, 15, 3, 15, 15, 15, 18, 0, 0, 16, 16, 1, 2, 16, 16, 1, 1, 16, 2, 1, 16, 1, 16, 2, 16, 16, 16, 19, 0, 0, 16, 16, 1, 1, 16, 16, 2, 2, 16, 1, 1, 16, 2, 16, 1, 16, 16, 16, 19, 0, 0, 34, 34, 1, 2, 34, 34, 1, 1, 34, 2, 1, 34, 1, 34, 2, 34, 34, 34, 37, 0, 0, 33, 33, 0, 1, 33, 33, 0, 0, 33, 1, 0, 33, 0, 33, 1, 33, 33, 33, 36, 0, 0, 33, 33, 1, 1, 33, 33, 1, 1, 33, 1, 1, 33, 1, 33, 1, 33, 33, 33, 36, 0, 0, 32, 32, 0, 0, 32, 32, 0, 0, 32, 0, 0, 32, 0, 32, 0, 32, 32, 32, 35, 0, 0, 32, 32, 0, 1, 32, 32, 0, 0, 32, 1, 0, 32, 0, 32, 1, 32, 32, 32, 35, 0, 0, 32, 32, 1, 1, 32, 32, 1, 1, 32, 1, 1, 32, 1, 32, 1, 32, 32, 32, 35, 0, 0, 31, 31, 0, 0, 31, 31, 0, 0, 31, 0, 0, 31, 0, 31, 0, 31, 31, 31, 34, 0, 0, 31, 31, 0, 1, 31, 31, 0, 0, 31, 1, 0, 31, 0, 31, 1, 31, 31, 31, 34, 0, 0, 31, 31, 1, 1, 31, 31, 1, 1, 31, 1, 1, 31, 1, 31, 1, 31, 31, 31, 34, 0, 0, 30, 30, 0, 0, 30, 30, 0, 0, 30, 0, 0, 30, 0, 30, 0, 30, 30, 30, 33, 0, 0, 30, 30, 0, 1, 30, 30, 0, 0, 30, 1, 0, 30, 0, 30, 1, 30, 30, 30, 33, 0, 0, 30, 30, 1, 1, 30, 30, 1, 1, 30, 1, 1, 30, 1, 30, 1, 30, 30, 30, 33, 0, 0, 29, 29, 0, 0, 29, 29, 0, 0, 29, 0, 0, 29, 0, 29, 0, 29, 29, 29, 32, 0, 0, 29, 29, 0, 1, 29, 29, 0, 0, 29, 1, 0, 29, 0, 29, 1, 29, 29, 29, 32, 0, 0, 29, 29, 1, 1, 29, 29, 1, 1, 29, 1, 1, 29, 1, 29, 1, 29, 29, 29, 32, 0, 0, 28, 28, 0, 0, 28, 28, 0, 0, 28, 0, 0, 28, 0, 28, 0, 28, 28, 28, 31, 0, 0, 28, 28, 0, 1, 28, 28, 0, 0, 28, 1, 0, 28, 0, 28, 1, 28, 28, 28, 31, 0, 0, 28, 28, 1, 1, 28, 28, 1, 1, 28, 1, 1, 28, 1, 28, 1, 28, 28, 28, 31, 0, 0, 27, 27, 0, 0, 27, 27, 0, 0, 27, 0, 0, 27, 0, 27, 0, 27, 27, 27, 30, 0, 0, 27, 27, 0, 1, 27, 27, 0, 0, 27, 1, 0, 27, 0, 27, 1, 27, 27, 27, 30, 0, 0, 27, 27, 1, 1, 27, 27, 1, 1, 27, 1, 1, 27, 1, 27, 1, 27, 27, 27, 30, 0, 0, 26, 26, 0, 0, 26, 26, 0, 0, 26, 0, 0, 26, 0, 26, 0, 26, 26, 26, 29, 0, 0, 26, 26, 0, 1, 26, 26, 0, 0, 26, 1, 0, 26, 0, 26, 1, 26, 26, 26, 29, 0, 0, 26, 26, 1, 1, 26, 26, 1, 1, 26, 1, 1, 26, 1, 26, 1, 26, 26, 26, 29, 0, 0, 25, 25, 0, 0, 25, 25, 0, 0, 25, 0, 0, 25, 0, 25, 0, 25, 25, 25, 28, 0, 0, 25, 25, 0, 1, 25, 25, 0, 0, 25, 1, 0, 25, 0, 25, 1, 25, 25, 25, 28, 0, 0, 25, 25, 1, 1, 25, 25, 1, 1, 25, 1, 1, 25, 1, 25, 1, 25, 25, 25, 28, 0, 0, 24, 24, 0, 0, 24, 24, 0, 0, 24, 0, 0, 24, 0, 24, 0, 24, 24, 24, 27, 0, 0, 24, 24, 0, 1, 24, 24, 0, 0, 24, 1, 0, 24, 0, 24, 1, 24, 24, 24, 27, 0, 0, 24, 24, 1, 1, 24, 24, 1, 1, 24, 1, 1, 24, 1, 24, 1, 24, 24, 24, 27, 0, 0, 23, 23, 0, 0, 23, 23, 0, 0, 23, 0, 0, 23, 0, 23, 0, 23, 23, 23, 26, 0, 0, 23, 23, 0, 1, 23, 23, 0, 0, 23, 1, 0, 23, 0, 23, 1, 23, 23, 23, 26, 0, 0, 23, 23, 1, 1, 23, 23, 1, 1, 23, 1, 1, 23, 1, 23, 1, 23, 23, 23, 26, 0, 0, 22, 22, 0, 0, 22, 22, 0, 0, 22, 0, 0, 22, 0, 22, 0, 22, 22, 22, 25, 0, 0, 22, 22, 0, 1, 22, 22, 0, 0, 22, 1, 0, 22, 0, 22, 1, 22, 22, 22, 25, 0, 0, 22, 22, 1, 1, 22, 22, 1, 1, 22, 1, 1, 22, 1, 22, 1, 22, 22, 22, 25, 0, 0, 21, 21, 0, 0, 21, 21, 0, 0, 21, 0, 0, 21, 0, 21, 0, 21, 21, 21, 24, 0, 0, 21, 21, 0, 1, 21, 21, 0, 0, 21, 1, 0, 21, 0, 21, 1, 21, 21, 21, 24, 0, 0, 21, 21, 1, 1, 21, 21, 1, 1, 21, 1, 1, 21, 1, 21, 1, 21, 21, 21, 24, 0, 0, 20, 20, 0, 0, 20, 20, 0, 0, 20, 0, 0, 20, 0, 20, 0, 20, 20, 20, 23, 0, 0, 20, 20, 0, 1, 20, 20, 0, 0, 20, 1, 0, 20, 0, 20, 1, 20, 20, 20, 23, 0, 0, 20, 20, 1, 1, 20, 20, 1, 1, 20, 1, 1, 20, 1, 20, 1, 20, 20, 20, 23, 0, 0, 19, 19, 0, 0, 19, 19, 0, 0, 19, 0, 0, 19, 0, 19, 0, 19, 19, 19, 22, 0, 0, 19, 19, 0, 1, 19, 19, 0, 0, 19, 1, 0, 19, 0, 19, 1, 19, 19, 19, 22, 0, 0, 19, 19, 1, 1, 19, 19, 1, 1, 19, 1, 1, 19, 1, 19, 1, 19, 19, 19, 22, 0, 0, 18, 18, 0, 0, 18, 18, 0, 0, 18, 0, 0, 18, 0, 18, 0, 18, 18, 18, 21, 0, 0, 18, 18, 0, 1, 18, 18, 0, 0, 18, 1, 0, 18, 0, 18, 1, 18, 18, 18, 21, 0, 0, 18, 18, 1, 1, 18, 18, 1, 1, 18, 1, 1, 18, 1, 18, 1, 18, 18, 18, 21, 0, 0, 17, 17, 0, 0, 17, 17, 0, 0, 17, 0, 0, 17, 0, 17, 0, 17, 17, 17, 20, 0, 0, 17, 17, 0, 1, 17, 17, 0, 0, 17, 1, 0, 17, 0, 17, 1, 17, 17, 17, 20, 0, 0, 17, 17, 1, 1, 17, 17, 1, 1, 17, 1, 1, 17, 1, 17, 1, 17, 17, 17, 20, 0, 0, 16, 16, 0, 0, 16, 16, 0, 0, 16, 0, 0, 16, 0, 16, 0, 16, 16, 16, 19, 0, 0, 16, 16, 0, 0, 16, 16, 1, 1, 16, 0, 0, 16, 1, 16, 0, 16, 16, 16, 19, 0, 0, 16, 16, 1, 3, 16, 16, 1, 1, 16, 3, 1, 16, 1, 16, 3, 16, 16, 16, 19, 0, 0, 16, 16, 0, 2, 16, 16, 0, 0, 16, 2, 0, 16, 0, 16, 2, 16, 16, 16, 19, 0, 0, 16, 16, 2, 2, 16, 16, 2, 2, 16, 2, 2, 16, 2, 16, 2, 16, 16, 16, 19, 0, 0, 16, 16, 0, 0, 16, 16, 2, 2, 16, 0, 0, 16, 2, 16, 0, 16, 16, 16, 19, 0, 0, 16, 16, 2, 3, 16, 16, 2, 2, 16, 3, 2, 16, 2, 16, 3, 16, 16, 16, 19, 0, 0, 16, 16, 0, 3, 16, 16, 0, 0, 16, 3, 0, 16, 0, 16, 3, 16, 16, 16, 19, 0, 0, 17, 17, 1, 1, 17, 17, 2, 2, 17, 1, 1, 17, 2, 17, 1, 17, 17, 17, 20, 0, 0, 17, 17, 0, 0, 17, 17, 1, 1, 17, 0, 0, 17, 1, 17, 0, 17, 17, 17, 20, 0, 0, 17, 17, 1, 3, 17, 17, 1, 1, 17, 3, 1, 17, 1, 17, 3, 17, 17, 17, 20, 0, 0, 17, 17, 0, 2, 17, 17, 0, 0, 17, 2, 0, 17, 0, 17, 2, 17, 17, 17, 20, 0, 0, 17, 17, 2, 2, 17, 17, 2, 2, 17, 2, 2, 17, 2, 17, 2, 17, 17, 17, 20, 0, 0, 17, 17, 0, 0, 17, 17, 2, 2, 17, 0, 0, 17, 2, 17, 0, 17, 17, 17, 20, 0, 0, 17, 17, 2, 3, 17, 17, 2, 2, 17, 3, 2, 17, 2, 17, 3, 17, 17, 17, 20, 0, 0, 17, 17, 0, 3, 17, 17, 0, 0, 17, 3, 0, 17, 0, 17, 3, 17, 17, 17, 20, 0, 0, 18, 18, 1, 2, 18, 18, 1, 1, 18, 2, 1, 18, 1, 18, 2, 18, 18, 18, 21, 0, 0, 18, 18, 1, 1, 18, 18, 2, 2, 18, 1, 1, 18, 2, 18, 1, 18, 18, 18, 21, 0, 0, 18, 18, 0, 0, 18, 18, 1, 1, 18, 0, 0, 18, 1, 18, 0, 18, 18, 18, 21, 0, 0, 18, 18, 1, 3, 18, 18, 1, 1, 18, 3, 1, 18, 1, 18, 3, 18, 18, 18, 21, 0, 0, 18, 18, 0, 2, 18, 18, 0, 0, 18, 2, 0, 18, 0, 18, 2, 18, 18, 18, 21, 0, 0, 18, 18, 2, 2, 18, 18, 2, 2, 18, 2, 2, 18, 2, 18, 2, 18, 18, 18, 21, 0, 0, 18, 18, 0, 0, 18, 18, 2, 2, 18, 0, 0, 18, 2, 18, 0, 18, 18, 18, 21, 0, 0, 18, 18, 2, 3, 18, 18, 2, 2, 18, 3, 2, 18, 2, 18, 3, 18, 18, 18, 21, 0, 0, 18, 18, 0, 3, 18, 18, 0, 0, 18, 3, 0, 18, 0, 18, 3, 18, 18, 18, 21, 0, 0, 19, 19, 1, 2, 19, 19, 1, 1, 19, 2, 1, 19, 1, 19, 2, 19, 19, 19, 22, 0, 0, 19, 19, 1, 1, 19, 19, 2, 2, 19, 1, 1, 19, 2, 19, 1, 19, 19, 19, 22, 0, 0, 19, 19, 0, 0, 19, 19, 1, 1, 19, 0, 0, 19, 1, 19, 0, 19, 19, 19, 22, 0, 0, 19, 19, 1, 3, 19, 19, 1, 1, 19, 3, 1, 19, 1, 19, 3, 19, 19, 19, 22, 0, 0, 19, 19, 0, 2, 19, 19, 0, 0, 19, 2, 0, 19, 0, 19, 2, 19, 19, 19, 22, 0, 0, 19, 19, 2, 2, 19, 19, 2, 2, 19, 2, 2, 19, 2, 19, 2, 19, 19, 19, 22, 0, 0, 19, 19, 0, 0, 19, 19, 2, 2, 19, 0, 0, 19, 2, 19, 0, 19, 19, 19, 22, 0, 0, 19, 19, 2, 3, 19, 19, 2, 2, 19, 3, 2, 19, 2, 19, 3, 19, 19, 19, 22, 0, 0, 19, 19, 0, 3, 19, 19, 0, 0, 19, 3, 0, 19, 0, 19, 3, 19, 19, 19, 22, 0, 0, 20, 20, 1, 2, 20, 20, 1, 1, 20, 2, 1, 20, 1, 20, 2, 20, 20, 20, 23, 0, 0, 20, 20, 1, 1, 20, 20, 2, 2, 20, 1, 1, 20, 2, 20, 1, 20, 20, 20, 23, 0, 0, 20, 20, 0, 0, 20, 20, 1, 1, 20, 0, 0, 20, 1, 20, 0, 20, 20, 20, 23, 0, 0, 20, 20, 1, 3, 20, 20, 1, 1, 20, 3, 1, 20, 1, 20, 3, 20, 20, 20, 23, 0, 0, 20, 20, 0, 2, 20, 20, 0, 0, 20, 2, 0, 20, 0, 20, 2, 20, 20, 20, 23, 0, 0, 20, 20, 2, 2, 20, 20, 2, 2, 20, 2, 2, 20, 2, 20, 2, 20, 20, 20, 23, 0, 0, 20, 20, 0, 0, 20, 20, 2, 2, 20, 0, 0, 20, 2, 20, 0, 20, 20, 20, 23, 0, 0, 20, 20, 2, 3, 20, 20, 2, 2, 20, 3, 2, 20, 2, 20, 3, 20, 20, 20, 23, 0, 0, 20, 20, 0, 3, 20, 20, 0, 0, 20, 3, 0, 20, 0, 20, 3, 20, 20, 20, 23, 0, 0, 21, 21, 1, 2, 21, 21, 1, 1, 21, 2, 1, 21, 1, 21, 2, 21, 21, 21, 24, 0, 0, 21, 21, 1, 1, 21, 21, 2, 2, 21, 1, 1, 21, 2, 21, 1, 21, 21, 21, 24, 0, 0, 21, 21, 0, 0, 21, 21, 1, 1, 21, 0, 0, 21, 1, 21, 0, 21, 21, 21, 24, 0, 0, 21, 21, 1, 3, 21, 21, 1, 1, 21, 3, 1, 21, 1, 21, 3, 21, 21, 21, 24, 0, 0, 21, 21, 0, 2, 21, 21, 0, 0, 21, 2, 0, 21, 0, 21, 2, 21, 21, 21, 24, 0, 0, 21, 21, 2, 2, 21, 21, 2, 2, 21, 2, 2, 21, 2, 21, 2, 21, 21, 21, 24, 0, 0, 21, 21, 0, 0, 21, 21, 2, 2, 21, 0, 0, 21, 2, 21, 0, 21, 21, 21, 24, 0, 0, 21, 21, 2, 3, 21, 21, 2, 2, 21, 3, 2, 21, 2, 21, 3, 21, 21, 21, 24, 0, 0, 21, 21, 0, 3, 21, 21, 0, 0, 21, 3, 0, 21, 0, 21, 3, 21, 21, 21, 24, 0, 0, 22, 22, 1, 2, 22, 22, 1, 1, 22, 2, 1, 22, 1, 22, 2, 22, 22, 22, 25, 0, 0, 22, 22, 1, 1, 22, 22, 2, 2, 22, 1, 1, 22, 2, 22, 1, 22, 22, 22, 25, 0, 0, 22, 22, 0, 0, 22, 22, 1, 1, 22, 0, 0, 22, 1, 22, 0, 22, 22, 22, 25, 0, 0, 22, 22, 1, 3, 22, 22, 1, 1, 22, 3, 1, 22, 1, 22, 3, 22, 22, 22, 25, 0, 0, 22, 22, 0, 2, 22, 22, 0, 0, 22, 2, 0, 22, 0, 22, 2, 22, 22, 22, 25, 0, 0, 22, 22, 2, 2, 22, 22, 2, 2, 22, 2, 2, 22, 2, 22, 2, 22, 22, 22, 25, 0, 0, 22, 22, 0, 0, 22, 22, 2, 2, 22, 0, 0, 22, 2, 22, 0, 22, 22, 22, 25, 0, 0, 22, 22, 2, 3, 22, 22, 2, 2, 22, 3, 2, 22, 2, 22, 3, 22, 22, 22, 25, 0, 0, 22, 22, 0, 3, 22, 22, 0, 0, 22, 3, 0, 22, 0, 22, 3, 22, 22, 22, 25, 0, 0, 23, 23, 1, 2, 23, 23, 1, 1, 23, 2, 1, 23, 1, 23, 2, 23, 23, 23, 26, 0, 0, 23, 23, 1, 1, 23, 23, 2, 2, 23, 1, 1, 23, 2, 23, 1, 23, 23, 23, 26, 0, 0, 23, 23, 0, 0, 23, 23, 1, 1, 23, 0, 0, 23, 1, 23, 0, 23, 23, 23, 26, 0, 0, 23, 23, 1, 3, 23, 23, 1, 1, 23, 3, 1, 23, 1, 23, 3, 23, 23, 23, 26, 0, 0, 23, 23, 0, 2, 23, 23, 0, 0, 23, 2, 0, 23, 0, 23, 2, 23, 23, 23, 26, 0, 0, 23, 23, 2, 2, 23, 23, 2, 2, 23, 2, 2, 23, 2, 23, 2, 23, 23, 23, 26, 0, 0, 23, 23, 0, 0, 23, 23, 2, 2, 23, 0, 0, 23, 2, 23, 0, 23, 23, 23, 26, 0, 0, 23, 23, 2, 3, 23, 23, 2, 2, 23, 3, 2, 23, 2, 23, 3, 23, 23, 23, 26, 0, 0, 23, 23, 0, 3, 23, 23, 0, 0, 23, 3, 0, 23, 0, 23, 3, 23, 23, 23, 26, 0, 0, 24, 24, 1, 2, 24, 24, 1, 1, 24, 2, 1, 24, 1, 24, 2, 24, 24, 24, 27, 0, 0, 24, 24, 1, 1, 24, 24, 2, 2, 24, 1, 1, 24, 2, 24, 1, 24, 24, 24, 27, 0, 0, 24, 24, 0, 0, 24, 24, 1, 1, 24, 0, 0, 24, 1, 24, 0, 24, 24, 24, 27, 0, 0, 24, 24, 1, 3, 24, 24, 1, 1, 24, 3, 1, 24, 1, 24, 3, 24, 24, 24, 27, 0, 0, 24, 24, 0, 2, 24, 24, 0, 0, 24, 2, 0, 24, 0, 24, 2, 24, 24, 24, 27, 0, 0, 24, 24, 2, 2, 24, 24, 2, 2, 24, 2, 2, 24, 2, 24, 2, 24, 24, 24, 27, 0, 0, 24, 24, 0, 0, 24, 24, 2, 2, 24, 0, 0, 24, 2, 24, 0, 24, 24, 24, 27, 0, 0, 24, 24, 2, 3, 24, 24, 2, 2, 24, 3, 2, 24, 2, 24, 3, 24, 24, 24, 27, 0, 0, 24, 24, 0, 3, 24, 24, 0, 0, 24, 3, 0, 24, 0, 24, 3, 24, 24, 24, 27, 0, 0, 25, 25, 1, 2, 25, 25, 1, 1, 25, 2, 1, 25, 1, 25, 2, 25, 25, 25, 28, 0, 0, 25, 25, 1, 1, 25, 25, 2, 2, 25, 1, 1, 25, 2, 25, 1, 25, 25, 25, 28, 0, 0, 25, 25, 0, 0, 25, 25, 1, 1, 25, 0, 0, 25, 1, 25, 0, 25, 25, 25, 28, 0, 0, 25, 25, 1, 3, 25, 25, 1, 1, 25, 3, 1, 25, 1, 25, 3, 25, 25, 25, 28, 0, 0, 25, 25, 0, 2, 25, 25, 0, 0, 25, 2, 0, 25, 0, 25, 2, 25, 25, 25, 28, 0, 0, 25, 25, 2, 2, 25, 25, 2, 2, 25, 2, 2, 25, 2, 25, 2, 25, 25, 25, 28, 0, 0, 25, 25, 0, 0, 25, 25, 2, 2, 25, 0, 0, 25, 2, 25, 0, 25, 25, 25, 28, 0, 0, 25, 25, 2, 3, 25, 25, 2, 2, 25, 3, 2, 25, 2, 25, 3, 25, 25, 25, 28, 0, 0, 25, 25, 0, 3, 25, 25, 0, 0, 25, 3, 0, 25, 0, 25, 3, 25, 25, 25, 28, 0, 0, 26, 26, 1, 2, 26, 26, 1, 1, 26, 2, 1, 26, 1, 26, 2, 26, 26, 26, 29, 0, 0, 26, 26, 1, 1, 26, 26, 2, 2, 26, 1, 1, 26, 2, 26, 1, 26, 26, 26, 29, 0, 0, 26, 26, 0, 0, 26, 26, 1, 1, 26, 0, 0, 26, 1, 26, 0, 26, 26, 26, 29, 0, 0, 26, 26, 1, 3, 26, 26, 1, 1, 26, 3, 1, 26, 1, 26, 3, 26, 26, 26, 29, 0, 0, 26, 26, 0, 2, 26, 26, 0, 0, 26, 2, 0, 26, 0, 26, 2, 26, 26, 26, 29, 0, 0, 26, 26, 2, 2, 26, 26, 2, 2, 26, 2, 2, 26, 2, 26, 2, 26, 26, 26, 29, 0, 0, 26, 26, 0, 0, 26, 26, 2, 2, 26, 0, 0, 26, 2, 26, 0, 26, 26, 26, 29, 0, 0, 26, 26, 2, 3, 26, 26, 2, 2, 26, 3, 2, 26, 2, 26, 3, 26, 26, 26, 29, 0, 0, 26, 26, 0, 3, 26, 26, 0, 0, 26, 3, 0, 26, 0, 26, 3, 26, 26, 26, 29, 0, 0, 27, 27, 1, 2, 27, 27, 1, 1, 27, 2, 1, 27, 1, 27, 2, 27, 27, 27, 30, 0, 0, 27, 27, 1, 1, 27, 27, 2, 2, 27, 1, 1, 27, 2, 27, 1, 27, 27, 27, 30, 0, 0, 27, 27, 0, 0, 27, 27, 1, 1, 27, 0, 0, 27, 1, 27, 0, 27, 27, 27, 30, 0, 0, 27, 27, 1, 3, 27, 27, 1, 1, 27, 3, 1, 27, 1, 27, 3, 27, 27, 27, 30, 0, 0, 27, 27, 0, 2, 27, 27, 0, 0, 27, 2, 0, 27, 0, 27, 2, 27, 27, 27, 30, 0, 0, 27, 27, 2, 2, 27, 27, 2, 2, 27, 2, 2, 27, 2, 27, 2, 27, 27, 27, 30, 0, 0, 27, 27, 0, 0, 27, 27, 2, 2, 27, 0, 0, 27, 2, 27, 0, 27, 27, 27, 30, 0, 0, 27, 27, 2, 3, 27, 27, 2, 2, 27, 3, 2, 27, 2, 27, 3, 27, 27, 27, 30, 0, 0, 27, 27, 0, 3, 27, 27, 0, 0, 27, 3, 0, 27, 0, 27, 3, 27, 27, 27, 30, 0, 0, 28, 28, 1, 2, 28, 28, 1, 1, 28, 2, 1, 28, 1, 28, 2, 28, 28, 28, 31, 0, 0, 28, 28, 1, 1, 28, 28, 2, 2, 28, 1, 1, 28, 2, 28, 1, 28, 28, 28, 31, 0, 0, 28, 28, 0, 0, 28, 28, 1, 1, 28, 0, 0, 28, 1, 28, 0, 28, 28, 28, 31, 0, 0, 28, 28, 1, 3, 28, 28, 1, 1, 28, 3, 1, 28, 1, 28, 3, 28, 28, 28, 31, 0, 0, 28, 28, 0, 2, 28, 28, 0, 0, 28, 2, 0, 28, 0, 28, 2, 28, 28, 28, 31, 0, 0, 28, 28, 2, 2, 28, 28, 2, 2, 28, 2, 2, 28, 2, 28, 2, 28, 28, 28, 31, 0, 0, 28, 28, 0, 0, 28, 28, 2, 2, 28, 0, 0, 28, 2, 28, 0, 28, 28, 28, 31, 0, 0, 28, 28, 2, 3, 28, 28, 2, 2, 28, 3, 2, 28, 2, 28, 3, 28, 28, 28, 31, 0, 0, 28, 28, 0, 3, 28, 28, 0, 0, 28, 3, 0, 28, 0, 28, 3, 28, 28, 28, 31, 0, 0, 29, 29, 1, 2, 29, 29, 1, 1, 29, 2, 1, 29, 1, 29, 2, 29, 29, 29, 32, 0, 0, 29, 29, 1, 1, 29, 29, 2, 2, 29, 1, 1, 29, 2, 29, 1, 29, 29, 29, 32, 0, 0, 29, 29, 0, 0, 29, 29, 1, 1, 29, 0, 0, 29, 1, 29, 0, 29, 29, 29, 32, 0, 0, 29, 29, 1, 3, 29, 29, 1, 1, 29, 3, 1, 29, 1, 29, 3, 29, 29, 29, 32, 0, 0, 29, 29, 0, 2, 29, 29, 0, 0, 29, 2, 0, 29, 0, 29, 2, 29, 29, 29, 32, 0, 0, 29, 29, 2, 2, 29, 29, 2, 2, 29, 2, 2, 29, 2, 29, 2, 29, 29, 29, 32, 0, 0, 29, 29, 0, 0, 29, 29, 2, 2, 29, 0, 0, 29, 2, 29, 0, 29, 29, 29, 32, 0, 0, 29, 29, 2, 3, 29, 29, 2, 2, 29, 3, 2, 29, 2, 29, 3, 29, 29, 29, 32, 0, 0, 29, 29, 0, 3, 29, 29, 0, 0, 29, 3, 0, 29, 0, 29, 3, 29, 29, 29, 32, 0, 0, 30, 30, 1, 2, 30, 30, 1, 1, 30, 2, 1, 30, 1, 30, 2, 30, 30, 30, 33, 0, 0, 30, 30, 1, 1, 30, 30, 2, 2, 30, 1, 1, 30, 2, 30, 1, 30, 30, 30, 33, 0, 0, 30, 30, 0, 0, 30, 30, 1, 1, 30, 0, 0, 30, 1, 30, 0, 30, 30, 30, 33, 0, 0, 30, 30, 1, 3, 30, 30, 1, 1, 30, 3, 1, 30, 1, 30, 3, 30, 30, 30, 33, 0, 0, 30, 30, 0, 2, 30, 30, 0, 0, 30, 2, 0, 30, 0, 30, 2, 30, 30, 30, 33, 0, 0, 30, 30, 2, 2, 30, 30, 2, 2, 30, 2, 2, 30, 2, 30, 2, 30, 30, 30, 33, 0, 0, 30, 30, 0, 0, 30, 30, 2, 2, 30, 0, 0, 30, 2, 30, 0, 30, 30, 30, 33, 0, 0, 30, 30, 2, 3, 30, 30, 2, 2, 30, 3, 2, 30, 2, 30, 3, 30, 30, 30, 33, 0, 0, 30, 30, 0, 3, 30, 30, 0, 0, 30, 3, 0, 30, 0, 30, 3, 30, 30, 30, 33, 0, 0, 31, 31, 1, 2, 31, 31, 1, 1, 31, 2, 1, 31, 1, 31, 2, 31, 31, 31, 34, 0, 0, 31, 31, 1, 1, 31, 31, 2, 2, 31, 1, 1, 31, 2, 31, 1, 31, 31, 31, 34, 0, 0, 31, 31, 0, 0, 31, 31, 1, 1, 31, 0, 0, 31, 1, 31, 0, 31, 31, 31, 34, 0, 0, 31, 31, 1, 3, 31, 31, 1, 1, 31, 3, 1, 31, 1, 31, 3, 31, 31, 31, 34, 0, 0, 31, 31, 0, 2, 31, 31, 0, 0, 31, 2, 0, 31, 0, 31, 2, 31, 31, 31, 34, 0, 0, 31, 31, 2, 2, 31, 31, 2, 2, 31, 2, 2, 31, 2, 31, 2, 31, 31, 31, 34, 0, 0, 31, 31, 0, 0, 31, 31, 2, 2, 31, 0, 0, 31, 2, 31, 0, 31, 31, 31, 34, 0, 0, 31, 31, 2, 3, 31, 31, 2, 2, 31, 3, 2, 31, 2, 31, 3, 31, 31, 31, 34, 0, 0, 31, 31, 0, 3, 31, 31, 0, 0, 31, 3, 0, 31, 0, 31, 3, 31, 31, 31, 34, 0, 0, 32, 32, 1, 2, 32, 32, 1, 1, 32, 2, 1, 32, 1, 32, 2, 32, 32, 32, 35, 0, 0, 32, 32, 1, 1, 32, 32, 2, 2, 32, 1, 1, 32, 2, 32, 1, 32, 32, 32, 35, 0, 0, 32, 32, 0, 0, 32, 32, 1, 1, 32, 0, 0, 32, 1, 32, 0, 32, 32, 32, 35, 0, 0, 32, 32, 1, 3, 32, 32, 1, 1, 32, 3, 1, 32, 1, 32, 3, 32, 32, 32, 35, 0, 0, 32, 32, 0, 2, 32, 32, 0, 0, 32, 2, 0, 32, 0, 32, 2, 32, 32, 32, 35, 0, 0, 32, 32, 2, 2, 32, 32, 2, 2, 32, 2, 2, 32, 2, 32, 2, 32, 32, 32, 35, 0, 0, 32, 32, 0, 0, 32, 32, 2, 2, 32, 0, 0, 32, 2, 32, 0, 32, 32, 32, 35, 0, 0, 32, 32, 2, 3, 32, 32, 2, 2, 32, 3, 2, 32, 2, 32, 3, 32, 32, 32, 35, 0, 0, 32, 32, 0, 3, 32, 32, 0, 0, 32, 3, 0, 32, 0, 32, 3, 32, 32, 32, 35, 0, 0, 33, 33, 1, 2, 33, 33, 1, 1, 33, 2, 1, 33, 1, 33, 2, 33, 33, 33, 36, 0, 0, 33, 33, 1, 1, 33, 33, 2, 2, 33, 1, 1, 33, 2, 33, 1, 33, 33, 33, 36, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 2, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 2, 0, 0, 34, 34, 1, 1, 34, 34, 1, 1, 34, 1, 1, 34, 1, 34, 1, 34, 34, 34, 37, 0, 0, 33, 33, 0, 0, 33, 33, 0, 0, 33, 0, 0, 33, 0, 33, 0, 33, 33, 33, 36, 0, 0, 33, 33, 0, 0, 33, 33, 1, 1, 33, 0, 0, 33, 1, 33, 0, 33, 33, 33, 36, 0, 0, 33, 33, 1, 3, 33, 33, 1, 1, 33, 3, 1, 33, 1, 33, 3, 33, 33, 33, 36, 0, 0, 33, 33, 0, 2, 33, 33, 0, 0, 33, 2, 0, 33, 0, 33, 2, 33, 33, 33, 36, 0, 0, 33, 33, 2, 2, 33, 33, 2, 2, 33, 2, 2, 33, 2, 33, 2, 33, 33, 33, 36, 0, 0, 33, 33, 0, 0, 33, 33, 2, 2, 33, 0, 0, 33, 2, 33, 0, 33, 33, 33, 36, 0, 0, 33, 33, 2, 3, 33, 33, 2, 2, 33, 3, 2, 33, 2, 33, 3, 33, 33, 33, 36, 0, 0, 33, 33, 0, 3, 33, 33, 0, 0, 33, 3, 0, 33, 0, 33, 3, 33, 33, 33, 36, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 2, 2, 0, 1, 2, 2, 0, 2, 0, 1, 0, 0, 2, 2, 1, 2, 0, 0, 1, 0, 0, 34, 34, 1, 1, 34, 34, 2, 2, 34, 1, 1, 34, 2, 34, 1, 34, 34, 34, 37, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 2, 2, 0, 2, 2, 2, 0, 1, 0, 2, 0, 0, 1, 2, 2, 2, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 2, 2, 0, 1, 2, 2, 0, 2, 0, 1, 0, 0, 2, 2, 1, 2, 0, 0, 0, 0, 0, 34, 34, 0, 1, 34, 34, 0, 0, 34, 1, 0, 34, 0, 34, 1, 34, 34, 34, 37, 0, 0, 3, 3, 0, 3, 3, 3, 0, 1, 0, 3, 0, 0, 1, 3, 3, 3, 0, 0, 0, 0, 0, 34, 34, 1, 3, 34, 34, 1, 1, 34, 3, 1, 34, 1, 34, 3, 34, 34, 34, 37, 0, 0, 2, 2, 0, 2, 2, 2, 0, 0, 0, 2, 0, 0, 0, 2, 2, 2, 0, 0, 0, 0, 0, 2, 2, 0, 2, 2, 2, 0, 2, 0, 2, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0, 0, 34, 34, 2, 2, 34, 34, 2, 2, 34, 2, 2, 34, 2, 34, 2, 34, 34, 34, 37, 0, 0, 34, 34, 0, 2, 34, 34, 0, 0, 34, 2, 0, 34, 0, 34, 2, 34, 34, 34, 37, 0, 0, 34, 34, 0, 0, 34, 34, 1, 1, 34, 0, 0, 34, 1, 34, 0, 34, 34, 34, 37, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 2, 2, 0, 2, 2, 2, 0, 1, 0, 2, 0, 0, 1, 2, 2, 2, 0, 0, 1, 0, 0, 3, 3, 0, 3, 3, 3, 0, 1, 0, 3, 0, 0, 1, 3, 3, 3, 0, 0, 1, 0, 0, 2, 2, 0, 2, 2, 2, 0, 0, 0, 2, 0, 0, 0, 2, 2, 2, 0, 0, 1, 0, 0, 2, 2, 0, 2, 2, 2, 0, 2, 0, 2, 0, 0, 2, 2, 2, 2, 0, 0, 1, 0, 0, 2, 2, 0, 0, 2, 2, 0, 2, 0, 0, 0, 0, 2, 2, 0, 2, 0, 0, 1, 0, 0, 2, 2, 0, 0, 2, 2, 1, 2, 1, 0, 0, 1, 2, 2, 0, 2, 1, 1, 4, 0, 0, 3, 3, 1, 3, 3, 3, 1, 2, 1, 3, 1, 1, 2, 3, 3, 3, 1, 1, 4, 0, 0, 3, 3, 0, 3, 3, 3, 0, 2, 0, 3, 0, 0, 2, 3, 3, 3, 0, 0, 1, 0, 0, 34, 34, 2, 3, 34, 34, 2, 2, 34, 3, 2, 34, 2, 34, 3, 34, 34, 34, 37, 0, 0, 34, 34, 0, 0, 34, 34, 2, 2, 34, 0, 0, 34, 2, 34, 0, 34, 34, 34, 37, 0, 0, 3, 3, 0, 3, 3, 3, 0, 0, 0, 3, 0, 0, 0, 3, 3, 3, 0, 0, 1, 0, 0, 3, 3, 0, 3, 3, 3, 0, 0, 1, 3, 0, 1, 0, 3, 3, 3, 1, 1, 4, 0, 0, 34, 34, 0, 3, 34, 34, 0, 0, 34, 3, 0, 34, 0, 34, 3, 34, 34, 34, 37, 0, 0, 34, 34, 0, 0, 34, 34, 0, 0, 34, 0, 0, 34, 0, 34, 0, 34, 34, 34, 37, 0, 0, 2, 2, 0, 1, 2, 2, 0, 2, 0, 1, 0, 0, 2, 2, 1, 2, 0, 0, 2, 0, 0, 2, 2, 0, 2, 2, 2, 0, 2, 0, 2, 0, 0, 2, 2, 2, 2, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 2, 0, 0, 2, 2, 0, 2, 2, 2, 0, 1, 0, 2, 0, 0, 1, 2, 2, 2, 0, 0, 2, 0, 0, 3, 3, 0, 3, 3, 3, 0, 1, 0, 3, 0, 0, 1, 3, 3, 3, 0, 0, 2, 0, 0, 2, 2, 0, 2, 2, 2, 0, 0, 0, 2, 0, 0, 0, 2, 2, 2, 0, 0, 2, 0, 0, 2, 2, 0, 2, 2, 2, 0, 2, 0, 2, 0, 0, 2, 2, 2, 2, 0, 0, 2, 0, 0, 2, 2, 0, 0, 2, 2, 0, 2, 0, 0, 0, 0, 2, 2, 0, 2, 0, 0, 2, 0, 0, 3, 3, 0, 3, 3, 3, 0, 2, 0, 3, 0, 0, 2, 3, 3, 3, 0, 0, 2, 0, 0, 3, 3, 0, 3, 3, 3, 0, 0, 0, 3, 0, 0, 0, 3, 3, 3, 0, 0, 2, 0, 0, 3, 3, 0, 3, 3, 3, 0, 1, 0, 3, 0, 0, 1, 3, 3, 3, 0, 0, 3, 0, 0, 2, 2, 0, 1, 2, 2, 0, 2, 0, 1, 0, 0, 2, 2, 1, 2, 0, 0, 3, 0, 0, 2, 2, 0, 0, 2, 2, 0, 2, 0, 0, 0, 0, 2, 2, 0, 2, 0, 0, 3, 0, 0, 3, 3, 0, 3, 3, 3, 0, 2, 0, 3, 0, 0, 2, 3, 3, 3, 0, 0, 3, 0, 0, 3, 3, 0, 3, 3, 3, 0, 0, 0, 3, 0, 0, 0, 3, 3, 3, 0, 0, 3, 0, 0, 2, 2, 0, 0, 2, 2, 0, 2, 0, 0, 0, 0, 2, 2, 0, 2, 0, 0, 0, 0, 0, 3, 3, 0, 3, 3, 3, 0, 2, 0, 3, 0, 0, 2, 3, 3, 3, 0, 0, 0, 0, 0, 3, 3, 0, 3, 3, 3, 0, 0, 0, 3, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0}; /* Vector for locked state flags. */ static const unsigned char athlon_fp_dead_lock[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #if CPU_UNITS_QUERY /* Vector for reserved units of states. */ static const unsigned char athlon_fp_reserved_units[] = { 0 /* This is dummy el because the vect is empty */}; #endif /* #if CPU_UNITS_QUERY */ #define DFA__ADVANCE_CYCLE 272 struct DFA_chip { unsigned char pentium_automaton_state; unsigned char pentium_fpu_automaton_state; unsigned char ppro_decoder_automaton_state; unsigned char ppro_core_automaton_state; unsigned char ppro_idiv_automaton_state; unsigned char ppro_fdiv_automaton_state; unsigned char ppro_load_automaton_state; unsigned char ppro_store_automaton_state; unsigned char k6_decoder_automaton_state; unsigned char k6_load_unit_automaton_state; unsigned char k6_store_unit_automaton_state; unsigned char k6_integer_units_automaton_state; unsigned char k6_fpu_unit_automaton_state; unsigned char k6_branch_unit_automaton_state; unsigned char athlon_automaton_state; unsigned char athlon_load_automaton_state; unsigned char athlon_mult_automaton_state; unsigned short athlon_fp_automaton_state; }; int max_insn_queue_index = 127; static int internal_min_issue_delay (int insn_code, struct DFA_chip *chip ATTRIBUTE_UNUSED) { int temp ATTRIBUTE_UNUSED; int res = -1; switch (insn_code) { case 0: /* pent_mul */ case 1: /* pent_str */ case 3: /* pent_cld */ case 7: /* pent_imov */ case 8: /* pent_push */ case 9: /* pent_pop */ case 10: /* pent_call */ case 11: /* pent_branch */ case 16: /* pent_uv_both */ case 17: /* pent_u_both */ case 18: /* pent_v_both */ case 19: /* pent_np_both */ case 20: /* pent_uv_load */ case 21: /* pent_u_load */ case 22: /* pent_v_load */ case 23: /* pent_np_load */ case 24: /* pent_uv */ case 25: /* pent_u */ case 26: /* pent_v */ case 27: /* pent_np */ temp = pentium_min_issue_delay [(pentium_translate [insn_code] + chip->pentium_automaton_state * 17) / 2]; temp = (temp >> (8 - (pentium_translate [insn_code] % 2 + 1) * 4)) & 15; res = temp; break; case 2: /* pent_block */ case 4: /* pent_fmov */ case 5: /* pent_fpmovxf */ case 6: /* pent_fpstore */ case 12: /* pent_fp */ case 13: /* pent_fmul */ case 14: /* pent_fdiv */ case 15: /* pent_fpspc */ temp = pentium_fpu_min_issue_delay [pentium_fpu_translate [insn_code] + chip->pentium_fpu_automaton_state * 8]; res = temp; temp = pentium_min_issue_delay [(pentium_translate [insn_code] + chip->pentium_automaton_state * 17) / 2]; temp = (temp >> (8 - (pentium_translate [insn_code] % 2 + 1) * 4)) & 15; if (temp > res) res = temp; break; case 28: /* ppro_complex_insn */ temp = ppro_decoder_min_issue_delay [(ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4) / 8]; temp = (temp >> (8 - (ppro_decoder_translate [insn_code] % 8 + 1) * 1)) & 1; res = temp; break; case 29: /* ppro_imov */ case 32: /* ppro_imovx */ case 34: /* ppro_lea */ case 35: /* ppro_shift_rotate */ case 37: /* ppro_cld */ case 38: /* ppro_branch */ case 41: /* ppro_imul */ case 49: /* ppro_fop */ case 53: /* ppro_fsgn */ case 55: /* ppro_fcmov */ case 56: /* ppro_fcmp */ case 58: /* ppro_fmov */ case 61: /* ppro_fmov_store */ case 63: /* ppro_fmul */ case 71: /* ppro_mmx_shft */ case 73: /* ppro_mmx_mul */ case 75: /* ppro_sse_mmxcvt */ case 77: /* ppro_sse_SF */ case 78: /* ppro_sse_add_SF */ case 80: /* ppro_sse_cmp_SF */ case 82: /* ppro_sse_comi_SF */ case 84: /* ppro_sse_mul_SF */ case 86: /* ppro_sse_div_SF */ case 90: /* ppro_sse_mov_SF */ case 93: /* ppro_sse_V4SF */ case 94: /* ppro_sse_add_V4SF */ case 96: /* ppro_sse_cmp_V4SF */ case 98: /* ppro_sse_cvt_V4SF */ case 100: /* ppro_sse_mul_V4SF */ case 102: /* ppro_sse_div_V4SF */ case 104: /* ppro_sse_log_V4SF */ case 106: /* ppro_sse_mov_V4SF */ case 109: /* ppro_insn */ temp = ppro_core_min_issue_delay [ppro_core_translate [insn_code] + chip->ppro_core_automaton_state * 13]; res = temp; temp = ppro_decoder_min_issue_delay [(ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4) / 8]; temp = (temp >> (8 - (ppro_decoder_translate [insn_code] % 8 + 1) * 1)) & 1; if (temp > res) res = temp; break; case 30: /* ppro_imov_load */ case 33: /* ppro_imovx_load */ case 59: /* ppro_fmov_load */ case 107: /* ppro_sse_mov_V4SF_load */ temp = ppro_load_min_issue_delay [(ppro_load_translate [insn_code] + chip->ppro_load_automaton_state * 4) / 4]; temp = (temp >> (8 - (ppro_load_translate [insn_code] % 4 + 1) * 2)) & 3; res = temp; temp = ppro_decoder_min_issue_delay [(ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4) / 8]; temp = (temp >> (8 - (ppro_decoder_translate [insn_code] % 8 + 1) * 1)) & 1; if (temp > res) res = temp; break; case 31: /* ppro_imov_store */ case 76: /* ppro_sse_sfence */ case 92: /* ppro_sse_mov_SF_store */ case 108: /* ppro_sse_mov_V4SF_store */ temp = ppro_store_min_issue_delay [(ppro_store_translate [insn_code] + chip->ppro_store_automaton_state * 7) / 4]; temp = (temp >> (8 - (ppro_store_translate [insn_code] % 4 + 1) * 2)) & 3; res = temp; temp = ppro_decoder_min_issue_delay [(ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4) / 8]; temp = (temp >> (8 - (ppro_decoder_translate [insn_code] % 8 + 1) * 1)) & 1; if (temp > res) res = temp; break; case 36: /* ppro_shift_rotate_mem */ case 52: /* ppro_fop_both */ case 112: /* ppro_insn_both */ temp = ppro_store_min_issue_delay [(ppro_store_translate [insn_code] + chip->ppro_store_automaton_state * 7) / 4]; temp = (temp >> (8 - (ppro_store_translate [insn_code] % 4 + 1) * 2)) & 3; res = temp; temp = ppro_load_min_issue_delay [(ppro_load_translate [insn_code] + chip->ppro_load_automaton_state * 4) / 4]; temp = (temp >> (8 - (ppro_load_translate [insn_code] % 4 + 1) * 2)) & 3; if (temp > res) res = temp; temp = ppro_core_min_issue_delay [ppro_core_translate [insn_code] + chip->ppro_core_automaton_state * 13]; if (temp > res) res = temp; temp = ppro_decoder_min_issue_delay [(ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4) / 8]; temp = (temp >> (8 - (ppro_decoder_translate [insn_code] % 8 + 1) * 1)) & 1; if (temp > res) res = temp; break; case 39: /* ppro_indirect_branch */ case 40: /* ppro_leave */ case 42: /* ppro_imul_mem */ case 50: /* ppro_fop_load */ case 57: /* ppro_fcmp_load */ case 60: /* ppro_fmov_XF_load */ case 64: /* ppro_fmul_load */ case 72: /* ppro_mmx_shft_load */ case 74: /* ppro_mmx_mul_load */ case 79: /* ppro_sse_add_SF_load */ case 81: /* ppro_sse_cmp_SF_load */ case 83: /* ppro_sse_comi_SF_load */ case 85: /* ppro_sse_mul_SF_load */ case 87: /* ppro_sse_div_SF_load */ case 88: /* ppro_sse_icvt_SF */ case 89: /* ppro_sse_icvt_SI */ case 91: /* ppro_sse_mov_SF_load */ case 95: /* ppro_sse_add_V4SF_load */ case 97: /* ppro_sse_cmp_V4SF_load */ case 101: /* ppro_sse_mul_V4SF_load */ case 103: /* ppro_sse_div_V4SF_load */ case 105: /* ppro_sse_log_V4SF_load */ case 110: /* ppro_insn_load */ temp = ppro_load_min_issue_delay [(ppro_load_translate [insn_code] + chip->ppro_load_automaton_state * 4) / 4]; temp = (temp >> (8 - (ppro_load_translate [insn_code] % 4 + 1) * 2)) & 3; res = temp; temp = ppro_core_min_issue_delay [ppro_core_translate [insn_code] + chip->ppro_core_automaton_state * 13]; if (temp > res) res = temp; temp = ppro_decoder_min_issue_delay [(ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4) / 8]; temp = (temp >> (8 - (ppro_decoder_translate [insn_code] % 8 + 1) * 1)) & 1; if (temp > res) res = temp; break; case 43: /* ppro_idiv_QI */ case 45: /* ppro_idiv_HI */ case 47: /* ppro_idiv_SI */ temp = ppro_idiv_min_issue_delay [ppro_idiv_translate [insn_code] + chip->ppro_idiv_automaton_state * 5]; res = temp; temp = ppro_core_min_issue_delay [ppro_core_translate [insn_code] + chip->ppro_core_automaton_state * 13]; if (temp > res) res = temp; temp = ppro_decoder_min_issue_delay [(ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4) / 8]; temp = (temp >> (8 - (ppro_decoder_translate [insn_code] % 8 + 1) * 1)) & 1; if (temp > res) res = temp; break; case 44: /* ppro_idiv_QI_load */ case 46: /* ppro_idiv_HI_load */ case 48: /* ppro_idiv_SI_load */ temp = ppro_load_min_issue_delay [(ppro_load_translate [insn_code] + chip->ppro_load_automaton_state * 4) / 4]; temp = (temp >> (8 - (ppro_load_translate [insn_code] % 4 + 1) * 2)) & 3; res = temp; temp = ppro_idiv_min_issue_delay [ppro_idiv_translate [insn_code] + chip->ppro_idiv_automaton_state * 5]; if (temp > res) res = temp; temp = ppro_core_min_issue_delay [ppro_core_translate [insn_code] + chip->ppro_core_automaton_state * 13]; if (temp > res) res = temp; temp = ppro_decoder_min_issue_delay [(ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4) / 8]; temp = (temp >> (8 - (ppro_decoder_translate [insn_code] % 8 + 1) * 1)) & 1; if (temp > res) res = temp; break; case 51: /* ppro_fop_store */ case 54: /* ppro_fistp */ case 62: /* ppro_fmov_XF_store */ case 99: /* ppro_sse_cvt_V4SF_other */ case 111: /* ppro_insn_store */ temp = ppro_store_min_issue_delay [(ppro_store_translate [insn_code] + chip->ppro_store_automaton_state * 7) / 4]; temp = (temp >> (8 - (ppro_store_translate [insn_code] % 4 + 1) * 2)) & 3; res = temp; temp = ppro_core_min_issue_delay [ppro_core_translate [insn_code] + chip->ppro_core_automaton_state * 13]; if (temp > res) res = temp; temp = ppro_decoder_min_issue_delay [(ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4) / 8]; temp = (temp >> (8 - (ppro_decoder_translate [insn_code] % 8 + 1) * 1)) & 1; if (temp > res) res = temp; break; case 65: /* ppro_fdiv_SF */ case 67: /* ppro_fdiv_DF */ case 69: /* ppro_fdiv_XF */ temp = ppro_fdiv_min_issue_delay [ppro_fdiv_translate [insn_code] + chip->ppro_fdiv_automaton_state * 5]; res = temp; temp = ppro_core_min_issue_delay [ppro_core_translate [insn_code] + chip->ppro_core_automaton_state * 13]; if (temp > res) res = temp; temp = ppro_decoder_min_issue_delay [(ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4) / 8]; temp = (temp >> (8 - (ppro_decoder_translate [insn_code] % 8 + 1) * 1)) & 1; if (temp > res) res = temp; break; case 66: /* ppro_fdiv_SF_load */ case 68: /* ppro_fdiv_DF_load */ case 70: /* ppro_fdiv_XF_load */ temp = ppro_load_min_issue_delay [(ppro_load_translate [insn_code] + chip->ppro_load_automaton_state * 4) / 4]; temp = (temp >> (8 - (ppro_load_translate [insn_code] % 4 + 1) * 2)) & 3; res = temp; temp = ppro_fdiv_min_issue_delay [ppro_fdiv_translate [insn_code] + chip->ppro_fdiv_automaton_state * 5]; if (temp > res) res = temp; temp = ppro_core_min_issue_delay [ppro_core_translate [insn_code] + chip->ppro_core_automaton_state * 13]; if (temp > res) res = temp; temp = ppro_decoder_min_issue_delay [(ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4) / 8]; temp = (temp >> (8 - (ppro_decoder_translate [insn_code] % 8 + 1) * 1)) & 1; if (temp > res) res = temp; break; case 113: /* k6_alux_only */ case 116: /* k6_alu_imul */ case 119: /* k6_alu_idiv */ case 121: /* k6_alu */ case 124: /* k6_alu_imov */ temp = k6_integer_units_min_issue_delay [k6_integer_units_translate [insn_code] + chip->k6_integer_units_automaton_state * 11]; res = temp; temp = k6_decoder_min_issue_delay [(k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4) / 8]; temp = (temp >> (8 - (k6_decoder_translate [insn_code] % 8 + 1) * 1)) & 1; if (temp > res) res = temp; break; case 114: /* k6_alux_only_load */ case 117: /* k6_alu_imul_load */ case 120: /* k6_alu_idiv_mem */ case 122: /* k6_alu_load */ case 128: /* k6_alu_imov_both */ case 132: /* k6_load_leave */ temp = k6_integer_units_min_issue_delay [k6_integer_units_translate [insn_code] + chip->k6_integer_units_automaton_state * 11]; res = temp; temp = k6_load_unit_min_issue_delay [(k6_load_unit_translate [insn_code] + chip->k6_load_unit_automaton_state * 4) / 2]; temp = (temp >> (8 - (k6_load_unit_translate [insn_code] % 2 + 1) * 4)) & 15; if (temp > res) res = temp; temp = k6_decoder_min_issue_delay [(k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4) / 8]; temp = (temp >> (8 - (k6_decoder_translate [insn_code] % 8 + 1) * 1)) & 1; if (temp > res) res = temp; break; case 115: /* k6_alux_only_store */ case 118: /* k6_alu_imul_store */ case 123: /* k6_alu_store */ temp = k6_integer_units_min_issue_delay [k6_integer_units_translate [insn_code] + chip->k6_integer_units_automaton_state * 11]; res = temp; temp = k6_store_unit_min_issue_delay [k6_store_unit_translate [insn_code] + chip->k6_store_unit_automaton_state * 6]; if (temp > res) res = temp; temp = k6_load_unit_min_issue_delay [(k6_load_unit_translate [insn_code] + chip->k6_load_unit_automaton_state * 4) / 2]; temp = (temp >> (8 - (k6_load_unit_translate [insn_code] % 2 + 1) * 4)) & 15; if (temp > res) res = temp; temp = k6_decoder_min_issue_delay [(k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4) / 8]; temp = (temp >> (8 - (k6_decoder_translate [insn_code] % 8 + 1) * 1)) & 1; if (temp > res) res = temp; break; case 125: /* k6_alu_imov_imm */ temp = k6_decoder_min_issue_delay [(k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4) / 8]; temp = (temp >> (8 - (k6_decoder_translate [insn_code] % 8 + 1) * 1)) & 1; res = temp; break; case 126: /* k6_alu_imov_load */ case 131: /* k6_load_pop */ case 133: /* k6_load_str */ temp = k6_load_unit_min_issue_delay [(k6_load_unit_translate [insn_code] + chip->k6_load_unit_automaton_state * 4) / 2]; temp = (temp >> (8 - (k6_load_unit_translate [insn_code] % 2 + 1) * 4)) & 15; res = temp; temp = k6_decoder_min_issue_delay [(k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4) / 8]; temp = (temp >> (8 - (k6_decoder_translate [insn_code] % 8 + 1) * 1)) & 1; if (temp > res) res = temp; break; case 127: /* k6_alu_imov_store */ case 135: /* k6_store_push */ temp = k6_store_unit_min_issue_delay [k6_store_unit_translate [insn_code] + chip->k6_store_unit_automaton_state * 6]; res = temp; temp = k6_decoder_min_issue_delay [(k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4) / 8]; temp = (temp >> (8 - (k6_decoder_translate [insn_code] % 8 + 1) * 1)) & 1; if (temp > res) res = temp; break; case 129: /* k6_branch_call */ case 130: /* k6_branch_branch */ temp = k6_branch_unit_min_issue_delay [(k6_branch_unit_translate [insn_code] + chip->k6_branch_unit_automaton_state * 3) / 8]; temp = (temp >> (8 - (k6_branch_unit_translate [insn_code] % 8 + 1) * 1)) & 1; res = temp; temp = k6_decoder_min_issue_delay [(k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4) / 8]; temp = (temp >> (8 - (k6_decoder_translate [insn_code] % 8 + 1) * 1)) & 1; if (temp > res) res = temp; break; case 134: /* k6_store_lea */ temp = k6_integer_units_min_issue_delay [k6_integer_units_translate [insn_code] + chip->k6_integer_units_automaton_state * 11]; res = temp; temp = k6_store_unit_min_issue_delay [k6_store_unit_translate [insn_code] + chip->k6_store_unit_automaton_state * 6]; if (temp > res) res = temp; temp = k6_decoder_min_issue_delay [(k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4) / 8]; temp = (temp >> (8 - (k6_decoder_translate [insn_code] % 8 + 1) * 1)) & 1; if (temp > res) res = temp; break; case 136: /* k6_store_str */ temp = k6_store_unit_min_issue_delay [k6_store_unit_translate [insn_code] + chip->k6_store_unit_automaton_state * 6]; res = temp; break; case 137: /* k6_fpu */ case 140: /* k6_fpu_fmul */ case 142: /* k6_fpu_expensive */ temp = k6_fpu_unit_min_issue_delay [k6_fpu_unit_translate [insn_code] + chip->k6_fpu_unit_automaton_state * 5]; res = temp; temp = k6_decoder_min_issue_delay [(k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4) / 8]; temp = (temp >> (8 - (k6_decoder_translate [insn_code] % 8 + 1) * 1)) & 1; if (temp > res) res = temp; break; case 138: /* k6_fpu_load */ case 141: /* k6_fpu_fmul_load */ temp = k6_fpu_unit_min_issue_delay [k6_fpu_unit_translate [insn_code] + chip->k6_fpu_unit_automaton_state * 5]; res = temp; temp = k6_load_unit_min_issue_delay [(k6_load_unit_translate [insn_code] + chip->k6_load_unit_automaton_state * 4) / 2]; temp = (temp >> (8 - (k6_load_unit_translate [insn_code] % 2 + 1) * 4)) & 15; if (temp > res) res = temp; temp = k6_decoder_min_issue_delay [(k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4) / 8]; temp = (temp >> (8 - (k6_decoder_translate [insn_code] % 8 + 1) * 1)) & 1; if (temp > res) res = temp; break; case 139: /* k6_fpu_store */ temp = k6_fpu_unit_min_issue_delay [k6_fpu_unit_translate [insn_code] + chip->k6_fpu_unit_automaton_state * 5]; res = temp; temp = k6_store_unit_min_issue_delay [k6_store_unit_translate [insn_code] + chip->k6_store_unit_automaton_state * 6]; if (temp > res) res = temp; temp = k6_decoder_min_issue_delay [(k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4) / 8]; temp = (temp >> (8 - (k6_decoder_translate [insn_code] % 8 + 1) * 1)) & 1; if (temp > res) res = temp; break; case 143: /* athlon_branch */ case 144: /* athlon_call */ case 150: /* athlon_lea */ case 160: /* athlon_idirect */ case 161: /* athlon_ivector */ temp = athlon_min_issue_delay [(athlon_translate [insn_code] + chip->athlon_automaton_state * 10) / 2]; temp = (temp >> (8 - (athlon_translate [insn_code] % 2 + 1) * 4)) & 15; res = temp; break; case 145: /* athlon_push */ case 146: /* athlon_pop */ case 147: /* athlon_pop_k8 */ case 148: /* athlon_leave */ case 149: /* athlon_leave_k8 */ case 159: /* athlon_str */ case 162: /* athlon_idirect_loadmov */ case 163: /* athlon_idirect_load */ case 164: /* athlon_ivector_load */ case 165: /* athlon_idirect_movstore */ case 166: /* athlon_idirect_both */ case 167: /* athlon_ivector_both */ case 168: /* athlon_idirect_store */ case 169: /* athlon_ivector_store */ temp = athlon_load_min_issue_delay [(athlon_load_translate [insn_code] + chip->athlon_load_automaton_state * 10) / 2]; temp = (temp >> (8 - (athlon_load_translate [insn_code] % 2 + 1) * 4)) & 15; res = temp; temp = athlon_min_issue_delay [(athlon_translate [insn_code] + chip->athlon_automaton_state * 10) / 2]; temp = (temp >> (8 - (athlon_translate [insn_code] % 2 + 1) * 4)) & 15; if (temp > res) res = temp; break; case 151: /* athlon_imul */ case 152: /* athlon_imul_k8_DI */ case 153: /* athlon_imul_k8 */ temp = athlon_mult_min_issue_delay [(athlon_mult_translate [insn_code] + chip->athlon_mult_automaton_state * 4) / 2]; temp = (temp >> (8 - (athlon_mult_translate [insn_code] % 2 + 1) * 4)) & 15; res = temp; temp = athlon_min_issue_delay [(athlon_translate [insn_code] + chip->athlon_automaton_state * 10) / 2]; temp = (temp >> (8 - (athlon_translate [insn_code] % 2 + 1) * 4)) & 15; if (temp > res) res = temp; break; case 154: /* athlon_imul_mem */ case 155: /* athlon_imul_mem_k8_DI */ case 156: /* athlon_imul_mem_k8 */ temp = athlon_mult_min_issue_delay [(athlon_mult_translate [insn_code] + chip->athlon_mult_automaton_state * 4) / 2]; temp = (temp >> (8 - (athlon_mult_translate [insn_code] % 2 + 1) * 4)) & 15; res = temp; temp = athlon_load_min_issue_delay [(athlon_load_translate [insn_code] + chip->athlon_load_automaton_state * 10) / 2]; temp = (temp >> (8 - (athlon_load_translate [insn_code] % 2 + 1) * 4)) & 15; if (temp > res) res = temp; temp = athlon_min_issue_delay [(athlon_translate [insn_code] + chip->athlon_automaton_state * 10) / 2]; temp = (temp >> (8 - (athlon_translate [insn_code] % 2 + 1) * 4)) & 15; if (temp > res) res = temp; break; case 157: /* athlon_idiv */ case 179: /* athlon_fmov */ case 182: /* athlon_fadd */ case 185: /* athlon_fmul */ case 186: /* athlon_fsgn */ case 189: /* athlon_fdiv */ case 190: /* athlon_fdiv_k8 */ case 192: /* athlon_fpspc */ case 194: /* athlon_fcmov */ case 196: /* athlon_fcmov_k8 */ case 199: /* athlon_fcomi */ case 202: /* athlon_fcom */ case 214: /* athlon_movaps */ case 215: /* athlon_movaps_k8 */ case 216: /* athlon_mmxssemov */ case 218: /* athlon_mmxmul */ case 220: /* athlon_mmx */ case 223: /* athlon_sselog */ case 224: /* athlon_sselog_k8 */ case 227: /* athlon_ssecmp */ case 230: /* athlon_ssecmpvector */ case 231: /* athlon_ssecmpvector_k8 */ case 234: /* athlon_ssecomi */ case 237: /* athlon_sseadd */ case 240: /* athlon_sseaddvector */ case 241: /* athlon_sseaddvector_k8 */ case 243: /* athlon_ssecvt_cvtss2sd */ case 245: /* athlon_ssecvt_cvtps2pd_k8 */ case 252: /* athlon_ssecvt_cvtsd2ss */ case 254: /* athlon_ssecvt_cvtpd2ps */ case 256: /* athlon_ssecvt_cvtsX2si */ case 257: /* athlon_ssecvt_cvtsX2si_k8 */ case 260: /* athlon_ssemul */ case 263: /* athlon_ssemulvector */ case 264: /* athlon_ssemulvector_k8 */ case 267: /* athlon_ssediv */ case 270: /* athlon_ssedivvector */ case 271: /* athlon_ssedivvector_k8 */ temp = athlon_fp_min_issue_delay [athlon_fp_translate [insn_code] + chip->athlon_fp_automaton_state * 21]; res = temp; temp = athlon_min_issue_delay [(athlon_translate [insn_code] + chip->athlon_automaton_state * 10) / 2]; temp = (temp >> (8 - (athlon_translate [insn_code] % 2 + 1) * 4)) & 15; if (temp > res) res = temp; break; case 158: /* athlon_idiv_mem */ case 170: /* athlon_fldxf */ case 171: /* athlon_fldxf_k8 */ case 172: /* athlon_fld */ case 173: /* athlon_fld_k8 */ case 174: /* athlon_fstxf */ case 175: /* athlon_fstxf_k8 */ case 176: /* athlon_fst */ case 177: /* athlon_fst_k8 */ case 178: /* athlon_fist */ case 180: /* athlon_fadd_load */ case 181: /* athlon_fadd_load_k8 */ case 183: /* athlon_fmul_load */ case 184: /* athlon_fmul_load_k8 */ case 187: /* athlon_fdiv_load */ case 188: /* athlon_fdiv_load_k8 */ case 191: /* athlon_fpspc_load */ case 193: /* athlon_fcmov_load */ case 195: /* athlon_fcmov_load_k8 */ case 197: /* athlon_fcomi_load */ case 198: /* athlon_fcomi_load_k8 */ case 200: /* athlon_fcom_load */ case 201: /* athlon_fcom_load_k8 */ case 203: /* athlon_movlpd_load */ case 204: /* athlon_movlpd_load_k8 */ case 205: /* athlon_movaps_load_k8 */ case 206: /* athlon_movaps_load */ case 207: /* athlon_movss_load */ case 208: /* athlon_movss_load_k8 */ case 209: /* athlon_mmxsseld */ case 210: /* athlon_mmxsseld_k8 */ case 211: /* athlon_mmxssest */ case 212: /* athlon_mmxssest_k8 */ case 213: /* athlon_mmxssest_short */ case 217: /* athlon_mmxmul_load */ case 219: /* athlon_mmx_load */ case 221: /* athlon_sselog_load */ case 222: /* athlon_sselog_load_k8 */ case 225: /* athlon_ssecmp_load */ case 226: /* athlon_ssecmp_load_k8 */ case 228: /* athlon_ssecmpvector_load */ case 229: /* athlon_ssecmpvector_load_k8 */ case 232: /* athlon_ssecomi_load */ case 233: /* athlon_ssecomi_load_k8 */ case 235: /* athlon_sseadd_load */ case 236: /* athlon_sseadd_load_k8 */ case 238: /* athlon_sseaddvector_load */ case 239: /* athlon_sseaddvector_load_k8 */ case 242: /* athlon_ssecvt_cvtss2sd_load_k8 */ case 244: /* athlon_ssecvt_cvtps2pd_load_k8 */ case 246: /* athlon_sseicvt_cvtsi2sd_load */ case 247: /* athlon_sseicvt_cvtsi2ss_load */ case 248: /* athlon_sseicvt_cvtsi2ss_load_k8 */ case 249: /* athlon_sseicvt_cvtsi2sd_k8 */ case 250: /* athlon_sseicvt_cvtsi2ss */ case 251: /* athlon_ssecvt_cvtsd2ss_load_k8 */ case 253: /* athlon_ssecvt_cvtpd2ps_load_k8 */ case 255: /* athlon_secvt_cvtsX2si_load */ case 258: /* athlon_ssemul_load */ case 259: /* athlon_ssemul_load_k8 */ case 261: /* athlon_ssemulvector_load */ case 262: /* athlon_ssemulvector_load_k8 */ case 265: /* athlon_ssediv_load */ case 266: /* athlon_ssediv_load_k8 */ case 268: /* athlon_ssedivvector_load */ case 269: /* athlon_ssedivvector_load_k8 */ temp = athlon_fp_min_issue_delay [athlon_fp_translate [insn_code] + chip->athlon_fp_automaton_state * 21]; res = temp; temp = athlon_load_min_issue_delay [(athlon_load_translate [insn_code] + chip->athlon_load_automaton_state * 10) / 2]; temp = (temp >> (8 - (athlon_load_translate [insn_code] % 2 + 1) * 4)) & 15; if (temp > res) res = temp; temp = athlon_min_issue_delay [(athlon_translate [insn_code] + chip->athlon_automaton_state * 10) / 2]; temp = (temp >> (8 - (athlon_translate [insn_code] % 2 + 1) * 4)) & 15; if (temp > res) res = temp; break; case 272: /* $advance_cycle */ temp = athlon_fp_min_issue_delay [athlon_fp_translate [insn_code] + chip->athlon_fp_automaton_state * 21]; res = temp; temp = athlon_mult_min_issue_delay [(athlon_mult_translate [insn_code] + chip->athlon_mult_automaton_state * 4) / 2]; temp = (temp >> (8 - (athlon_mult_translate [insn_code] % 2 + 1) * 4)) & 15; if (temp > res) res = temp; temp = athlon_load_min_issue_delay [(athlon_load_translate [insn_code] + chip->athlon_load_automaton_state * 10) / 2]; temp = (temp >> (8 - (athlon_load_translate [insn_code] % 2 + 1) * 4)) & 15; if (temp > res) res = temp; temp = athlon_min_issue_delay [(athlon_translate [insn_code] + chip->athlon_automaton_state * 10) / 2]; temp = (temp >> (8 - (athlon_translate [insn_code] % 2 + 1) * 4)) & 15; if (temp > res) res = temp; temp = k6_branch_unit_min_issue_delay [(k6_branch_unit_translate [insn_code] + chip->k6_branch_unit_automaton_state * 3) / 8]; temp = (temp >> (8 - (k6_branch_unit_translate [insn_code] % 8 + 1) * 1)) & 1; if (temp > res) res = temp; temp = k6_fpu_unit_min_issue_delay [k6_fpu_unit_translate [insn_code] + chip->k6_fpu_unit_automaton_state * 5]; if (temp > res) res = temp; temp = k6_integer_units_min_issue_delay [k6_integer_units_translate [insn_code] + chip->k6_integer_units_automaton_state * 11]; if (temp > res) res = temp; temp = k6_store_unit_min_issue_delay [k6_store_unit_translate [insn_code] + chip->k6_store_unit_automaton_state * 6]; if (temp > res) res = temp; temp = k6_load_unit_min_issue_delay [(k6_load_unit_translate [insn_code] + chip->k6_load_unit_automaton_state * 4) / 2]; temp = (temp >> (8 - (k6_load_unit_translate [insn_code] % 2 + 1) * 4)) & 15; if (temp > res) res = temp; temp = k6_decoder_min_issue_delay [(k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4) / 8]; temp = (temp >> (8 - (k6_decoder_translate [insn_code] % 8 + 1) * 1)) & 1; if (temp > res) res = temp; temp = ppro_store_min_issue_delay [(ppro_store_translate [insn_code] + chip->ppro_store_automaton_state * 7) / 4]; temp = (temp >> (8 - (ppro_store_translate [insn_code] % 4 + 1) * 2)) & 3; if (temp > res) res = temp; temp = ppro_load_min_issue_delay [(ppro_load_translate [insn_code] + chip->ppro_load_automaton_state * 4) / 4]; temp = (temp >> (8 - (ppro_load_translate [insn_code] % 4 + 1) * 2)) & 3; if (temp > res) res = temp; temp = ppro_fdiv_min_issue_delay [ppro_fdiv_translate [insn_code] + chip->ppro_fdiv_automaton_state * 5]; if (temp > res) res = temp; temp = ppro_idiv_min_issue_delay [ppro_idiv_translate [insn_code] + chip->ppro_idiv_automaton_state * 5]; if (temp > res) res = temp; temp = ppro_core_min_issue_delay [ppro_core_translate [insn_code] + chip->ppro_core_automaton_state * 13]; if (temp > res) res = temp; temp = ppro_decoder_min_issue_delay [(ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4) / 8]; temp = (temp >> (8 - (ppro_decoder_translate [insn_code] % 8 + 1) * 1)) & 1; if (temp > res) res = temp; temp = pentium_fpu_min_issue_delay [pentium_fpu_translate [insn_code] + chip->pentium_fpu_automaton_state * 8]; if (temp > res) res = temp; temp = pentium_min_issue_delay [(pentium_translate [insn_code] + chip->pentium_automaton_state * 17) / 2]; temp = (temp >> (8 - (pentium_translate [insn_code] % 2 + 1) * 4)) & 15; if (temp > res) res = temp; break; default: res = -1; break; } return res; } static int internal_state_transition (int insn_code, struct DFA_chip *chip ATTRIBUTE_UNUSED) { int temp ATTRIBUTE_UNUSED; switch (insn_code) { case 0: /* pent_mul */ case 1: /* pent_str */ case 3: /* pent_cld */ case 7: /* pent_imov */ case 8: /* pent_push */ case 9: /* pent_pop */ case 10: /* pent_call */ case 11: /* pent_branch */ case 16: /* pent_uv_both */ case 17: /* pent_u_both */ case 18: /* pent_v_both */ case 19: /* pent_np_both */ case 20: /* pent_uv_load */ case 21: /* pent_u_load */ case 22: /* pent_v_load */ case 23: /* pent_np_load */ case 24: /* pent_uv */ case 25: /* pent_u */ case 26: /* pent_v */ case 27: /* pent_np */ { temp = pentium_base [chip->pentium_automaton_state] + pentium_translate [insn_code]; if (pentium_check [temp] != chip->pentium_automaton_state) return internal_min_issue_delay (insn_code, chip); else chip->pentium_automaton_state = pentium_transitions [temp]; return -1; } case 2: /* pent_block */ case 4: /* pent_fmov */ case 5: /* pent_fpmovxf */ case 6: /* pent_fpstore */ case 12: /* pent_fp */ case 13: /* pent_fmul */ case 14: /* pent_fdiv */ case 15: /* pent_fpspc */ { unsigned char _pentium_fpu_automaton_state; temp = pentium_fpu_base [chip->pentium_fpu_automaton_state] + pentium_fpu_translate [insn_code]; if (pentium_fpu_check [temp] != chip->pentium_fpu_automaton_state) return internal_min_issue_delay (insn_code, chip); else _pentium_fpu_automaton_state = pentium_fpu_transitions [temp]; temp = pentium_base [chip->pentium_automaton_state] + pentium_translate [insn_code]; if (pentium_check [temp] != chip->pentium_automaton_state) return internal_min_issue_delay (insn_code, chip); else chip->pentium_automaton_state = pentium_transitions [temp]; chip->pentium_fpu_automaton_state = _pentium_fpu_automaton_state; return -1; } case 28: /* ppro_complex_insn */ { temp = ppro_decoder_transitions [ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4]; if (temp >= 4) return internal_min_issue_delay (insn_code, chip); else chip->ppro_decoder_automaton_state = temp; return -1; } case 29: /* ppro_imov */ case 32: /* ppro_imovx */ case 34: /* ppro_lea */ case 35: /* ppro_shift_rotate */ case 37: /* ppro_cld */ case 38: /* ppro_branch */ case 41: /* ppro_imul */ case 49: /* ppro_fop */ case 53: /* ppro_fsgn */ case 55: /* ppro_fcmov */ case 56: /* ppro_fcmp */ case 58: /* ppro_fmov */ case 61: /* ppro_fmov_store */ case 63: /* ppro_fmul */ case 71: /* ppro_mmx_shft */ case 73: /* ppro_mmx_mul */ case 75: /* ppro_sse_mmxcvt */ case 77: /* ppro_sse_SF */ case 78: /* ppro_sse_add_SF */ case 80: /* ppro_sse_cmp_SF */ case 82: /* ppro_sse_comi_SF */ case 84: /* ppro_sse_mul_SF */ case 86: /* ppro_sse_div_SF */ case 90: /* ppro_sse_mov_SF */ case 93: /* ppro_sse_V4SF */ case 94: /* ppro_sse_add_V4SF */ case 96: /* ppro_sse_cmp_V4SF */ case 98: /* ppro_sse_cvt_V4SF */ case 100: /* ppro_sse_mul_V4SF */ case 102: /* ppro_sse_div_V4SF */ case 104: /* ppro_sse_log_V4SF */ case 106: /* ppro_sse_mov_V4SF */ case 109: /* ppro_insn */ { unsigned char _ppro_core_automaton_state; temp = ppro_core_base [chip->ppro_core_automaton_state] + ppro_core_translate [insn_code]; if (ppro_core_check [temp] != chip->ppro_core_automaton_state) return internal_min_issue_delay (insn_code, chip); else _ppro_core_automaton_state = ppro_core_transitions [temp]; temp = ppro_decoder_transitions [ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4]; if (temp >= 4) return internal_min_issue_delay (insn_code, chip); else chip->ppro_decoder_automaton_state = temp; chip->ppro_core_automaton_state = _ppro_core_automaton_state; return -1; } case 30: /* ppro_imov_load */ case 33: /* ppro_imovx_load */ case 59: /* ppro_fmov_load */ case 107: /* ppro_sse_mov_V4SF_load */ { unsigned char _ppro_load_automaton_state; temp = ppro_load_transitions [ppro_load_translate [insn_code] + chip->ppro_load_automaton_state * 4]; if (temp >= 3) return internal_min_issue_delay (insn_code, chip); else _ppro_load_automaton_state = temp; temp = ppro_decoder_transitions [ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4]; if (temp >= 4) return internal_min_issue_delay (insn_code, chip); else chip->ppro_decoder_automaton_state = temp; chip->ppro_load_automaton_state = _ppro_load_automaton_state; return -1; } case 31: /* ppro_imov_store */ case 76: /* ppro_sse_sfence */ case 92: /* ppro_sse_mov_SF_store */ case 108: /* ppro_sse_mov_V4SF_store */ { unsigned char _ppro_store_automaton_state; temp = ppro_store_transitions [ppro_store_translate [insn_code] + chip->ppro_store_automaton_state * 7]; if (temp >= 11) return internal_min_issue_delay (insn_code, chip); else _ppro_store_automaton_state = temp; temp = ppro_decoder_transitions [ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4]; if (temp >= 4) return internal_min_issue_delay (insn_code, chip); else chip->ppro_decoder_automaton_state = temp; chip->ppro_store_automaton_state = _ppro_store_automaton_state; return -1; } case 36: /* ppro_shift_rotate_mem */ case 52: /* ppro_fop_both */ case 112: /* ppro_insn_both */ { unsigned char _ppro_store_automaton_state; unsigned char _ppro_load_automaton_state; unsigned char _ppro_core_automaton_state; temp = ppro_store_transitions [ppro_store_translate [insn_code] + chip->ppro_store_automaton_state * 7]; if (temp >= 11) return internal_min_issue_delay (insn_code, chip); else _ppro_store_automaton_state = temp; temp = ppro_load_transitions [ppro_load_translate [insn_code] + chip->ppro_load_automaton_state * 4]; if (temp >= 3) return internal_min_issue_delay (insn_code, chip); else _ppro_load_automaton_state = temp; temp = ppro_core_base [chip->ppro_core_automaton_state] + ppro_core_translate [insn_code]; if (ppro_core_check [temp] != chip->ppro_core_automaton_state) return internal_min_issue_delay (insn_code, chip); else _ppro_core_automaton_state = ppro_core_transitions [temp]; temp = ppro_decoder_transitions [ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4]; if (temp >= 4) return internal_min_issue_delay (insn_code, chip); else chip->ppro_decoder_automaton_state = temp; chip->ppro_store_automaton_state = _ppro_store_automaton_state; chip->ppro_load_automaton_state = _ppro_load_automaton_state; chip->ppro_core_automaton_state = _ppro_core_automaton_state; return -1; } case 39: /* ppro_indirect_branch */ case 40: /* ppro_leave */ case 42: /* ppro_imul_mem */ case 50: /* ppro_fop_load */ case 57: /* ppro_fcmp_load */ case 60: /* ppro_fmov_XF_load */ case 64: /* ppro_fmul_load */ case 72: /* ppro_mmx_shft_load */ case 74: /* ppro_mmx_mul_load */ case 79: /* ppro_sse_add_SF_load */ case 81: /* ppro_sse_cmp_SF_load */ case 83: /* ppro_sse_comi_SF_load */ case 85: /* ppro_sse_mul_SF_load */ case 87: /* ppro_sse_div_SF_load */ case 88: /* ppro_sse_icvt_SF */ case 89: /* ppro_sse_icvt_SI */ case 91: /* ppro_sse_mov_SF_load */ case 95: /* ppro_sse_add_V4SF_load */ case 97: /* ppro_sse_cmp_V4SF_load */ case 101: /* ppro_sse_mul_V4SF_load */ case 103: /* ppro_sse_div_V4SF_load */ case 105: /* ppro_sse_log_V4SF_load */ case 110: /* ppro_insn_load */ { unsigned char _ppro_load_automaton_state; unsigned char _ppro_core_automaton_state; temp = ppro_load_transitions [ppro_load_translate [insn_code] + chip->ppro_load_automaton_state * 4]; if (temp >= 3) return internal_min_issue_delay (insn_code, chip); else _ppro_load_automaton_state = temp; temp = ppro_core_base [chip->ppro_core_automaton_state] + ppro_core_translate [insn_code]; if (ppro_core_check [temp] != chip->ppro_core_automaton_state) return internal_min_issue_delay (insn_code, chip); else _ppro_core_automaton_state = ppro_core_transitions [temp]; temp = ppro_decoder_transitions [ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4]; if (temp >= 4) return internal_min_issue_delay (insn_code, chip); else chip->ppro_decoder_automaton_state = temp; chip->ppro_load_automaton_state = _ppro_load_automaton_state; chip->ppro_core_automaton_state = _ppro_core_automaton_state; return -1; } case 43: /* ppro_idiv_QI */ case 45: /* ppro_idiv_HI */ case 47: /* ppro_idiv_SI */ { unsigned char _ppro_idiv_automaton_state; unsigned char _ppro_core_automaton_state; temp = ppro_idiv_transitions [ppro_idiv_translate [insn_code] + chip->ppro_idiv_automaton_state * 5]; if (temp >= 38) return internal_min_issue_delay (insn_code, chip); else _ppro_idiv_automaton_state = temp; temp = ppro_core_base [chip->ppro_core_automaton_state] + ppro_core_translate [insn_code]; if (ppro_core_check [temp] != chip->ppro_core_automaton_state) return internal_min_issue_delay (insn_code, chip); else _ppro_core_automaton_state = ppro_core_transitions [temp]; temp = ppro_decoder_transitions [ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4]; if (temp >= 4) return internal_min_issue_delay (insn_code, chip); else chip->ppro_decoder_automaton_state = temp; chip->ppro_idiv_automaton_state = _ppro_idiv_automaton_state; chip->ppro_core_automaton_state = _ppro_core_automaton_state; return -1; } case 44: /* ppro_idiv_QI_load */ case 46: /* ppro_idiv_HI_load */ case 48: /* ppro_idiv_SI_load */ { unsigned char _ppro_load_automaton_state; unsigned char _ppro_idiv_automaton_state; unsigned char _ppro_core_automaton_state; temp = ppro_load_transitions [ppro_load_translate [insn_code] + chip->ppro_load_automaton_state * 4]; if (temp >= 3) return internal_min_issue_delay (insn_code, chip); else _ppro_load_automaton_state = temp; temp = ppro_idiv_transitions [ppro_idiv_translate [insn_code] + chip->ppro_idiv_automaton_state * 5]; if (temp >= 38) return internal_min_issue_delay (insn_code, chip); else _ppro_idiv_automaton_state = temp; temp = ppro_core_base [chip->ppro_core_automaton_state] + ppro_core_translate [insn_code]; if (ppro_core_check [temp] != chip->ppro_core_automaton_state) return internal_min_issue_delay (insn_code, chip); else _ppro_core_automaton_state = ppro_core_transitions [temp]; temp = ppro_decoder_transitions [ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4]; if (temp >= 4) return internal_min_issue_delay (insn_code, chip); else chip->ppro_decoder_automaton_state = temp; chip->ppro_load_automaton_state = _ppro_load_automaton_state; chip->ppro_idiv_automaton_state = _ppro_idiv_automaton_state; chip->ppro_core_automaton_state = _ppro_core_automaton_state; return -1; } case 51: /* ppro_fop_store */ case 54: /* ppro_fistp */ case 62: /* ppro_fmov_XF_store */ case 99: /* ppro_sse_cvt_V4SF_other */ case 111: /* ppro_insn_store */ { unsigned char _ppro_store_automaton_state; unsigned char _ppro_core_automaton_state; temp = ppro_store_transitions [ppro_store_translate [insn_code] + chip->ppro_store_automaton_state * 7]; if (temp >= 11) return internal_min_issue_delay (insn_code, chip); else _ppro_store_automaton_state = temp; temp = ppro_core_base [chip->ppro_core_automaton_state] + ppro_core_translate [insn_code]; if (ppro_core_check [temp] != chip->ppro_core_automaton_state) return internal_min_issue_delay (insn_code, chip); else _ppro_core_automaton_state = ppro_core_transitions [temp]; temp = ppro_decoder_transitions [ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4]; if (temp >= 4) return internal_min_issue_delay (insn_code, chip); else chip->ppro_decoder_automaton_state = temp; chip->ppro_store_automaton_state = _ppro_store_automaton_state; chip->ppro_core_automaton_state = _ppro_core_automaton_state; return -1; } case 65: /* ppro_fdiv_SF */ case 67: /* ppro_fdiv_DF */ case 69: /* ppro_fdiv_XF */ { unsigned char _ppro_fdiv_automaton_state; unsigned char _ppro_core_automaton_state; temp = ppro_fdiv_transitions [ppro_fdiv_translate [insn_code] + chip->ppro_fdiv_automaton_state * 5]; if (temp >= 38) return internal_min_issue_delay (insn_code, chip); else _ppro_fdiv_automaton_state = temp; temp = ppro_core_base [chip->ppro_core_automaton_state] + ppro_core_translate [insn_code]; if (ppro_core_check [temp] != chip->ppro_core_automaton_state) return internal_min_issue_delay (insn_code, chip); else _ppro_core_automaton_state = ppro_core_transitions [temp]; temp = ppro_decoder_transitions [ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4]; if (temp >= 4) return internal_min_issue_delay (insn_code, chip); else chip->ppro_decoder_automaton_state = temp; chip->ppro_fdiv_automaton_state = _ppro_fdiv_automaton_state; chip->ppro_core_automaton_state = _ppro_core_automaton_state; return -1; } case 66: /* ppro_fdiv_SF_load */ case 68: /* ppro_fdiv_DF_load */ case 70: /* ppro_fdiv_XF_load */ { unsigned char _ppro_load_automaton_state; unsigned char _ppro_fdiv_automaton_state; unsigned char _ppro_core_automaton_state; temp = ppro_load_transitions [ppro_load_translate [insn_code] + chip->ppro_load_automaton_state * 4]; if (temp >= 3) return internal_min_issue_delay (insn_code, chip); else _ppro_load_automaton_state = temp; temp = ppro_fdiv_transitions [ppro_fdiv_translate [insn_code] + chip->ppro_fdiv_automaton_state * 5]; if (temp >= 38) return internal_min_issue_delay (insn_code, chip); else _ppro_fdiv_automaton_state = temp; temp = ppro_core_base [chip->ppro_core_automaton_state] + ppro_core_translate [insn_code]; if (ppro_core_check [temp] != chip->ppro_core_automaton_state) return internal_min_issue_delay (insn_code, chip); else _ppro_core_automaton_state = ppro_core_transitions [temp]; temp = ppro_decoder_transitions [ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4]; if (temp >= 4) return internal_min_issue_delay (insn_code, chip); else chip->ppro_decoder_automaton_state = temp; chip->ppro_load_automaton_state = _ppro_load_automaton_state; chip->ppro_fdiv_automaton_state = _ppro_fdiv_automaton_state; chip->ppro_core_automaton_state = _ppro_core_automaton_state; return -1; } case 113: /* k6_alux_only */ case 116: /* k6_alu_imul */ case 119: /* k6_alu_idiv */ case 121: /* k6_alu */ case 124: /* k6_alu_imov */ { unsigned char _k6_integer_units_automaton_state; temp = k6_integer_units_base [chip->k6_integer_units_automaton_state] + k6_integer_units_translate [insn_code]; if (k6_integer_units_check [temp] != chip->k6_integer_units_automaton_state) return internal_min_issue_delay (insn_code, chip); else _k6_integer_units_automaton_state = k6_integer_units_transitions [temp]; temp = k6_decoder_transitions [k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4]; if (temp >= 3) return internal_min_issue_delay (insn_code, chip); else chip->k6_decoder_automaton_state = temp; chip->k6_integer_units_automaton_state = _k6_integer_units_automaton_state; return -1; } case 114: /* k6_alux_only_load */ case 117: /* k6_alu_imul_load */ case 120: /* k6_alu_idiv_mem */ case 122: /* k6_alu_load */ case 128: /* k6_alu_imov_both */ case 132: /* k6_load_leave */ { unsigned char _k6_integer_units_automaton_state; unsigned char _k6_load_unit_automaton_state; temp = k6_integer_units_base [chip->k6_integer_units_automaton_state] + k6_integer_units_translate [insn_code]; if (k6_integer_units_check [temp] != chip->k6_integer_units_automaton_state) return internal_min_issue_delay (insn_code, chip); else _k6_integer_units_automaton_state = k6_integer_units_transitions [temp]; temp = k6_load_unit_transitions [k6_load_unit_translate [insn_code] + chip->k6_load_unit_automaton_state * 4]; if (temp >= 11) return internal_min_issue_delay (insn_code, chip); else _k6_load_unit_automaton_state = temp; temp = k6_decoder_transitions [k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4]; if (temp >= 3) return internal_min_issue_delay (insn_code, chip); else chip->k6_decoder_automaton_state = temp; chip->k6_integer_units_automaton_state = _k6_integer_units_automaton_state; chip->k6_load_unit_automaton_state = _k6_load_unit_automaton_state; return -1; } case 115: /* k6_alux_only_store */ case 118: /* k6_alu_imul_store */ case 123: /* k6_alu_store */ { unsigned char _k6_integer_units_automaton_state; unsigned char _k6_store_unit_automaton_state; unsigned char _k6_load_unit_automaton_state; temp = k6_integer_units_base [chip->k6_integer_units_automaton_state] + k6_integer_units_translate [insn_code]; if (k6_integer_units_check [temp] != chip->k6_integer_units_automaton_state) return internal_min_issue_delay (insn_code, chip); else _k6_integer_units_automaton_state = k6_integer_units_transitions [temp]; temp = k6_store_unit_transitions [k6_store_unit_translate [insn_code] + chip->k6_store_unit_automaton_state * 6]; if (temp >= 37) return internal_min_issue_delay (insn_code, chip); else _k6_store_unit_automaton_state = temp; temp = k6_load_unit_transitions [k6_load_unit_translate [insn_code] + chip->k6_load_unit_automaton_state * 4]; if (temp >= 11) return internal_min_issue_delay (insn_code, chip); else _k6_load_unit_automaton_state = temp; temp = k6_decoder_transitions [k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4]; if (temp >= 3) return internal_min_issue_delay (insn_code, chip); else chip->k6_decoder_automaton_state = temp; chip->k6_integer_units_automaton_state = _k6_integer_units_automaton_state; chip->k6_store_unit_automaton_state = _k6_store_unit_automaton_state; chip->k6_load_unit_automaton_state = _k6_load_unit_automaton_state; return -1; } case 125: /* k6_alu_imov_imm */ { temp = k6_decoder_transitions [k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4]; if (temp >= 3) return internal_min_issue_delay (insn_code, chip); else chip->k6_decoder_automaton_state = temp; return -1; } case 126: /* k6_alu_imov_load */ case 131: /* k6_load_pop */ case 133: /* k6_load_str */ { unsigned char _k6_load_unit_automaton_state; temp = k6_load_unit_transitions [k6_load_unit_translate [insn_code] + chip->k6_load_unit_automaton_state * 4]; if (temp >= 11) return internal_min_issue_delay (insn_code, chip); else _k6_load_unit_automaton_state = temp; temp = k6_decoder_transitions [k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4]; if (temp >= 3) return internal_min_issue_delay (insn_code, chip); else chip->k6_decoder_automaton_state = temp; chip->k6_load_unit_automaton_state = _k6_load_unit_automaton_state; return -1; } case 127: /* k6_alu_imov_store */ case 135: /* k6_store_push */ { unsigned char _k6_store_unit_automaton_state; temp = k6_store_unit_transitions [k6_store_unit_translate [insn_code] + chip->k6_store_unit_automaton_state * 6]; if (temp >= 37) return internal_min_issue_delay (insn_code, chip); else _k6_store_unit_automaton_state = temp; temp = k6_decoder_transitions [k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4]; if (temp >= 3) return internal_min_issue_delay (insn_code, chip); else chip->k6_decoder_automaton_state = temp; chip->k6_store_unit_automaton_state = _k6_store_unit_automaton_state; return -1; } case 129: /* k6_branch_call */ case 130: /* k6_branch_branch */ { unsigned char _k6_branch_unit_automaton_state; temp = k6_branch_unit_transitions [k6_branch_unit_translate [insn_code] + chip->k6_branch_unit_automaton_state * 3]; if (temp >= 2) return internal_min_issue_delay (insn_code, chip); else _k6_branch_unit_automaton_state = temp; temp = k6_decoder_transitions [k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4]; if (temp >= 3) return internal_min_issue_delay (insn_code, chip); else chip->k6_decoder_automaton_state = temp; chip->k6_branch_unit_automaton_state = _k6_branch_unit_automaton_state; return -1; } case 134: /* k6_store_lea */ { unsigned char _k6_integer_units_automaton_state; unsigned char _k6_store_unit_automaton_state; temp = k6_integer_units_base [chip->k6_integer_units_automaton_state] + k6_integer_units_translate [insn_code]; if (k6_integer_units_check [temp] != chip->k6_integer_units_automaton_state) return internal_min_issue_delay (insn_code, chip); else _k6_integer_units_automaton_state = k6_integer_units_transitions [temp]; temp = k6_store_unit_transitions [k6_store_unit_translate [insn_code] + chip->k6_store_unit_automaton_state * 6]; if (temp >= 37) return internal_min_issue_delay (insn_code, chip); else _k6_store_unit_automaton_state = temp; temp = k6_decoder_transitions [k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4]; if (temp >= 3) return internal_min_issue_delay (insn_code, chip); else chip->k6_decoder_automaton_state = temp; chip->k6_integer_units_automaton_state = _k6_integer_units_automaton_state; chip->k6_store_unit_automaton_state = _k6_store_unit_automaton_state; return -1; } case 136: /* k6_store_str */ { temp = k6_store_unit_transitions [k6_store_unit_translate [insn_code] + chip->k6_store_unit_automaton_state * 6]; if (temp >= 37) return internal_min_issue_delay (insn_code, chip); else chip->k6_store_unit_automaton_state = temp; return -1; } case 137: /* k6_fpu */ case 140: /* k6_fpu_fmul */ case 142: /* k6_fpu_expensive */ { unsigned char _k6_fpu_unit_automaton_state; temp = k6_fpu_unit_transitions [k6_fpu_unit_translate [insn_code] + chip->k6_fpu_unit_automaton_state * 5]; if (temp >= 57) return internal_min_issue_delay (insn_code, chip); else _k6_fpu_unit_automaton_state = temp; temp = k6_decoder_transitions [k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4]; if (temp >= 3) return internal_min_issue_delay (insn_code, chip); else chip->k6_decoder_automaton_state = temp; chip->k6_fpu_unit_automaton_state = _k6_fpu_unit_automaton_state; return -1; } case 138: /* k6_fpu_load */ case 141: /* k6_fpu_fmul_load */ { unsigned char _k6_fpu_unit_automaton_state; unsigned char _k6_load_unit_automaton_state; temp = k6_fpu_unit_transitions [k6_fpu_unit_translate [insn_code] + chip->k6_fpu_unit_automaton_state * 5]; if (temp >= 57) return internal_min_issue_delay (insn_code, chip); else _k6_fpu_unit_automaton_state = temp; temp = k6_load_unit_transitions [k6_load_unit_translate [insn_code] + chip->k6_load_unit_automaton_state * 4]; if (temp >= 11) return internal_min_issue_delay (insn_code, chip); else _k6_load_unit_automaton_state = temp; temp = k6_decoder_transitions [k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4]; if (temp >= 3) return internal_min_issue_delay (insn_code, chip); else chip->k6_decoder_automaton_state = temp; chip->k6_fpu_unit_automaton_state = _k6_fpu_unit_automaton_state; chip->k6_load_unit_automaton_state = _k6_load_unit_automaton_state; return -1; } case 139: /* k6_fpu_store */ { unsigned char _k6_fpu_unit_automaton_state; unsigned char _k6_store_unit_automaton_state; temp = k6_fpu_unit_transitions [k6_fpu_unit_translate [insn_code] + chip->k6_fpu_unit_automaton_state * 5]; if (temp >= 57) return internal_min_issue_delay (insn_code, chip); else _k6_fpu_unit_automaton_state = temp; temp = k6_store_unit_transitions [k6_store_unit_translate [insn_code] + chip->k6_store_unit_automaton_state * 6]; if (temp >= 37) return internal_min_issue_delay (insn_code, chip); else _k6_store_unit_automaton_state = temp; temp = k6_decoder_transitions [k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4]; if (temp >= 3) return internal_min_issue_delay (insn_code, chip); else chip->k6_decoder_automaton_state = temp; chip->k6_fpu_unit_automaton_state = _k6_fpu_unit_automaton_state; chip->k6_store_unit_automaton_state = _k6_store_unit_automaton_state; return -1; } case 143: /* athlon_branch */ case 144: /* athlon_call */ case 150: /* athlon_lea */ case 160: /* athlon_idirect */ case 161: /* athlon_ivector */ { temp = athlon_transitions [athlon_translate [insn_code] + chip->athlon_automaton_state * 10]; if (temp >= 76) return internal_min_issue_delay (insn_code, chip); else chip->athlon_automaton_state = temp; return -1; } case 145: /* athlon_push */ case 146: /* athlon_pop */ case 147: /* athlon_pop_k8 */ case 148: /* athlon_leave */ case 149: /* athlon_leave_k8 */ case 159: /* athlon_str */ case 162: /* athlon_idirect_loadmov */ case 163: /* athlon_idirect_load */ case 164: /* athlon_ivector_load */ case 165: /* athlon_idirect_movstore */ case 166: /* athlon_idirect_both */ case 167: /* athlon_ivector_both */ case 168: /* athlon_idirect_store */ case 169: /* athlon_ivector_store */ { unsigned char _athlon_load_automaton_state; temp = athlon_load_transitions [athlon_load_translate [insn_code] + chip->athlon_load_automaton_state * 10]; if (temp >= 162) return internal_min_issue_delay (insn_code, chip); else _athlon_load_automaton_state = temp; temp = athlon_transitions [athlon_translate [insn_code] + chip->athlon_automaton_state * 10]; if (temp >= 76) return internal_min_issue_delay (insn_code, chip); else chip->athlon_automaton_state = temp; chip->athlon_load_automaton_state = _athlon_load_automaton_state; return -1; } case 151: /* athlon_imul */ case 152: /* athlon_imul_k8_DI */ case 153: /* athlon_imul_k8 */ { unsigned char _athlon_mult_automaton_state; temp = athlon_mult_transitions [athlon_mult_translate [insn_code] + chip->athlon_mult_automaton_state * 4]; if (temp >= 16) return internal_min_issue_delay (insn_code, chip); else _athlon_mult_automaton_state = temp; temp = athlon_transitions [athlon_translate [insn_code] + chip->athlon_automaton_state * 10]; if (temp >= 76) return internal_min_issue_delay (insn_code, chip); else chip->athlon_automaton_state = temp; chip->athlon_mult_automaton_state = _athlon_mult_automaton_state; return -1; } case 154: /* athlon_imul_mem */ case 155: /* athlon_imul_mem_k8_DI */ case 156: /* athlon_imul_mem_k8 */ { unsigned char _athlon_mult_automaton_state; unsigned char _athlon_load_automaton_state; temp = athlon_mult_transitions [athlon_mult_translate [insn_code] + chip->athlon_mult_automaton_state * 4]; if (temp >= 16) return internal_min_issue_delay (insn_code, chip); else _athlon_mult_automaton_state = temp; temp = athlon_load_transitions [athlon_load_translate [insn_code] + chip->athlon_load_automaton_state * 10]; if (temp >= 162) return internal_min_issue_delay (insn_code, chip); else _athlon_load_automaton_state = temp; temp = athlon_transitions [athlon_translate [insn_code] + chip->athlon_automaton_state * 10]; if (temp >= 76) return internal_min_issue_delay (insn_code, chip); else chip->athlon_automaton_state = temp; chip->athlon_mult_automaton_state = _athlon_mult_automaton_state; chip->athlon_load_automaton_state = _athlon_load_automaton_state; return -1; } case 157: /* athlon_idiv */ case 179: /* athlon_fmov */ case 182: /* athlon_fadd */ case 185: /* athlon_fmul */ case 186: /* athlon_fsgn */ case 189: /* athlon_fdiv */ case 190: /* athlon_fdiv_k8 */ case 192: /* athlon_fpspc */ case 194: /* athlon_fcmov */ case 196: /* athlon_fcmov_k8 */ case 199: /* athlon_fcomi */ case 202: /* athlon_fcom */ case 214: /* athlon_movaps */ case 215: /* athlon_movaps_k8 */ case 216: /* athlon_mmxssemov */ case 218: /* athlon_mmxmul */ case 220: /* athlon_mmx */ case 223: /* athlon_sselog */ case 224: /* athlon_sselog_k8 */ case 227: /* athlon_ssecmp */ case 230: /* athlon_ssecmpvector */ case 231: /* athlon_ssecmpvector_k8 */ case 234: /* athlon_ssecomi */ case 237: /* athlon_sseadd */ case 240: /* athlon_sseaddvector */ case 241: /* athlon_sseaddvector_k8 */ case 243: /* athlon_ssecvt_cvtss2sd */ case 245: /* athlon_ssecvt_cvtps2pd_k8 */ case 252: /* athlon_ssecvt_cvtsd2ss */ case 254: /* athlon_ssecvt_cvtpd2ps */ case 256: /* athlon_ssecvt_cvtsX2si */ case 257: /* athlon_ssecvt_cvtsX2si_k8 */ case 260: /* athlon_ssemul */ case 263: /* athlon_ssemulvector */ case 264: /* athlon_ssemulvector_k8 */ case 267: /* athlon_ssediv */ case 270: /* athlon_ssedivvector */ case 271: /* athlon_ssedivvector_k8 */ { unsigned short _athlon_fp_automaton_state; temp = athlon_fp_base [chip->athlon_fp_automaton_state] + athlon_fp_translate [insn_code]; if (athlon_fp_check [temp] != chip->athlon_fp_automaton_state) return internal_min_issue_delay (insn_code, chip); else _athlon_fp_automaton_state = athlon_fp_transitions [temp]; temp = athlon_transitions [athlon_translate [insn_code] + chip->athlon_automaton_state * 10]; if (temp >= 76) return internal_min_issue_delay (insn_code, chip); else chip->athlon_automaton_state = temp; chip->athlon_fp_automaton_state = _athlon_fp_automaton_state; return -1; } case 158: /* athlon_idiv_mem */ case 170: /* athlon_fldxf */ case 171: /* athlon_fldxf_k8 */ case 172: /* athlon_fld */ case 173: /* athlon_fld_k8 */ case 174: /* athlon_fstxf */ case 175: /* athlon_fstxf_k8 */ case 176: /* athlon_fst */ case 177: /* athlon_fst_k8 */ case 178: /* athlon_fist */ case 180: /* athlon_fadd_load */ case 181: /* athlon_fadd_load_k8 */ case 183: /* athlon_fmul_load */ case 184: /* athlon_fmul_load_k8 */ case 187: /* athlon_fdiv_load */ case 188: /* athlon_fdiv_load_k8 */ case 191: /* athlon_fpspc_load */ case 193: /* athlon_fcmov_load */ case 195: /* athlon_fcmov_load_k8 */ case 197: /* athlon_fcomi_load */ case 198: /* athlon_fcomi_load_k8 */ case 200: /* athlon_fcom_load */ case 201: /* athlon_fcom_load_k8 */ case 203: /* athlon_movlpd_load */ case 204: /* athlon_movlpd_load_k8 */ case 205: /* athlon_movaps_load_k8 */ case 206: /* athlon_movaps_load */ case 207: /* athlon_movss_load */ case 208: /* athlon_movss_load_k8 */ case 209: /* athlon_mmxsseld */ case 210: /* athlon_mmxsseld_k8 */ case 211: /* athlon_mmxssest */ case 212: /* athlon_mmxssest_k8 */ case 213: /* athlon_mmxssest_short */ case 217: /* athlon_mmxmul_load */ case 219: /* athlon_mmx_load */ case 221: /* athlon_sselog_load */ case 222: /* athlon_sselog_load_k8 */ case 225: /* athlon_ssecmp_load */ case 226: /* athlon_ssecmp_load_k8 */ case 228: /* athlon_ssecmpvector_load */ case 229: /* athlon_ssecmpvector_load_k8 */ case 232: /* athlon_ssecomi_load */ case 233: /* athlon_ssecomi_load_k8 */ case 235: /* athlon_sseadd_load */ case 236: /* athlon_sseadd_load_k8 */ case 238: /* athlon_sseaddvector_load */ case 239: /* athlon_sseaddvector_load_k8 */ case 242: /* athlon_ssecvt_cvtss2sd_load_k8 */ case 244: /* athlon_ssecvt_cvtps2pd_load_k8 */ case 246: /* athlon_sseicvt_cvtsi2sd_load */ case 247: /* athlon_sseicvt_cvtsi2ss_load */ case 248: /* athlon_sseicvt_cvtsi2ss_load_k8 */ case 249: /* athlon_sseicvt_cvtsi2sd_k8 */ case 250: /* athlon_sseicvt_cvtsi2ss */ case 251: /* athlon_ssecvt_cvtsd2ss_load_k8 */ case 253: /* athlon_ssecvt_cvtpd2ps_load_k8 */ case 255: /* athlon_secvt_cvtsX2si_load */ case 258: /* athlon_ssemul_load */ case 259: /* athlon_ssemul_load_k8 */ case 261: /* athlon_ssemulvector_load */ case 262: /* athlon_ssemulvector_load_k8 */ case 265: /* athlon_ssediv_load */ case 266: /* athlon_ssediv_load_k8 */ case 268: /* athlon_ssedivvector_load */ case 269: /* athlon_ssedivvector_load_k8 */ { unsigned short _athlon_fp_automaton_state; unsigned char _athlon_load_automaton_state; temp = athlon_fp_base [chip->athlon_fp_automaton_state] + athlon_fp_translate [insn_code]; if (athlon_fp_check [temp] != chip->athlon_fp_automaton_state) return internal_min_issue_delay (insn_code, chip); else _athlon_fp_automaton_state = athlon_fp_transitions [temp]; temp = athlon_load_transitions [athlon_load_translate [insn_code] + chip->athlon_load_automaton_state * 10]; if (temp >= 162) return internal_min_issue_delay (insn_code, chip); else _athlon_load_automaton_state = temp; temp = athlon_transitions [athlon_translate [insn_code] + chip->athlon_automaton_state * 10]; if (temp >= 76) return internal_min_issue_delay (insn_code, chip); else chip->athlon_automaton_state = temp; chip->athlon_fp_automaton_state = _athlon_fp_automaton_state; chip->athlon_load_automaton_state = _athlon_load_automaton_state; return -1; } case 272: /* $advance_cycle */ { unsigned short _athlon_fp_automaton_state; unsigned char _athlon_mult_automaton_state; unsigned char _athlon_load_automaton_state; unsigned char _athlon_automaton_state; unsigned char _k6_branch_unit_automaton_state; unsigned char _k6_fpu_unit_automaton_state; unsigned char _k6_integer_units_automaton_state; unsigned char _k6_store_unit_automaton_state; unsigned char _k6_load_unit_automaton_state; unsigned char _k6_decoder_automaton_state; unsigned char _ppro_store_automaton_state; unsigned char _ppro_load_automaton_state; unsigned char _ppro_fdiv_automaton_state; unsigned char _ppro_idiv_automaton_state; unsigned char _ppro_core_automaton_state; unsigned char _ppro_decoder_automaton_state; unsigned char _pentium_fpu_automaton_state; temp = athlon_fp_base [chip->athlon_fp_automaton_state] + athlon_fp_translate [insn_code]; if (athlon_fp_check [temp] != chip->athlon_fp_automaton_state) return internal_min_issue_delay (insn_code, chip); else _athlon_fp_automaton_state = athlon_fp_transitions [temp]; temp = athlon_mult_transitions [athlon_mult_translate [insn_code] + chip->athlon_mult_automaton_state * 4]; if (temp >= 16) return internal_min_issue_delay (insn_code, chip); else _athlon_mult_automaton_state = temp; temp = athlon_load_transitions [athlon_load_translate [insn_code] + chip->athlon_load_automaton_state * 10]; if (temp >= 162) return internal_min_issue_delay (insn_code, chip); else _athlon_load_automaton_state = temp; temp = athlon_transitions [athlon_translate [insn_code] + chip->athlon_automaton_state * 10]; if (temp >= 76) return internal_min_issue_delay (insn_code, chip); else _athlon_automaton_state = temp; temp = k6_branch_unit_transitions [k6_branch_unit_translate [insn_code] + chip->k6_branch_unit_automaton_state * 3]; if (temp >= 2) return internal_min_issue_delay (insn_code, chip); else _k6_branch_unit_automaton_state = temp; temp = k6_fpu_unit_transitions [k6_fpu_unit_translate [insn_code] + chip->k6_fpu_unit_automaton_state * 5]; if (temp >= 57) return internal_min_issue_delay (insn_code, chip); else _k6_fpu_unit_automaton_state = temp; temp = k6_integer_units_base [chip->k6_integer_units_automaton_state] + k6_integer_units_translate [insn_code]; if (k6_integer_units_check [temp] != chip->k6_integer_units_automaton_state) return internal_min_issue_delay (insn_code, chip); else _k6_integer_units_automaton_state = k6_integer_units_transitions [temp]; temp = k6_store_unit_transitions [k6_store_unit_translate [insn_code] + chip->k6_store_unit_automaton_state * 6]; if (temp >= 37) return internal_min_issue_delay (insn_code, chip); else _k6_store_unit_automaton_state = temp; temp = k6_load_unit_transitions [k6_load_unit_translate [insn_code] + chip->k6_load_unit_automaton_state * 4]; if (temp >= 11) return internal_min_issue_delay (insn_code, chip); else _k6_load_unit_automaton_state = temp; temp = k6_decoder_transitions [k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4]; if (temp >= 3) return internal_min_issue_delay (insn_code, chip); else _k6_decoder_automaton_state = temp; temp = ppro_store_transitions [ppro_store_translate [insn_code] + chip->ppro_store_automaton_state * 7]; if (temp >= 11) return internal_min_issue_delay (insn_code, chip); else _ppro_store_automaton_state = temp; temp = ppro_load_transitions [ppro_load_translate [insn_code] + chip->ppro_load_automaton_state * 4]; if (temp >= 3) return internal_min_issue_delay (insn_code, chip); else _ppro_load_automaton_state = temp; temp = ppro_fdiv_transitions [ppro_fdiv_translate [insn_code] + chip->ppro_fdiv_automaton_state * 5]; if (temp >= 38) return internal_min_issue_delay (insn_code, chip); else _ppro_fdiv_automaton_state = temp; temp = ppro_idiv_transitions [ppro_idiv_translate [insn_code] + chip->ppro_idiv_automaton_state * 5]; if (temp >= 38) return internal_min_issue_delay (insn_code, chip); else _ppro_idiv_automaton_state = temp; temp = ppro_core_base [chip->ppro_core_automaton_state] + ppro_core_translate [insn_code]; if (ppro_core_check [temp] != chip->ppro_core_automaton_state) return internal_min_issue_delay (insn_code, chip); else _ppro_core_automaton_state = ppro_core_transitions [temp]; temp = ppro_decoder_transitions [ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4]; if (temp >= 4) return internal_min_issue_delay (insn_code, chip); else _ppro_decoder_automaton_state = temp; temp = pentium_fpu_base [chip->pentium_fpu_automaton_state] + pentium_fpu_translate [insn_code]; if (pentium_fpu_check [temp] != chip->pentium_fpu_automaton_state) return internal_min_issue_delay (insn_code, chip); else _pentium_fpu_automaton_state = pentium_fpu_transitions [temp]; temp = pentium_base [chip->pentium_automaton_state] + pentium_translate [insn_code]; if (pentium_check [temp] != chip->pentium_automaton_state) return internal_min_issue_delay (insn_code, chip); else chip->pentium_automaton_state = pentium_transitions [temp]; chip->athlon_fp_automaton_state = _athlon_fp_automaton_state; chip->athlon_mult_automaton_state = _athlon_mult_automaton_state; chip->athlon_load_automaton_state = _athlon_load_automaton_state; chip->athlon_automaton_state = _athlon_automaton_state; chip->k6_branch_unit_automaton_state = _k6_branch_unit_automaton_state; chip->k6_fpu_unit_automaton_state = _k6_fpu_unit_automaton_state; chip->k6_integer_units_automaton_state = _k6_integer_units_automaton_state; chip->k6_store_unit_automaton_state = _k6_store_unit_automaton_state; chip->k6_load_unit_automaton_state = _k6_load_unit_automaton_state; chip->k6_decoder_automaton_state = _k6_decoder_automaton_state; chip->ppro_store_automaton_state = _ppro_store_automaton_state; chip->ppro_load_automaton_state = _ppro_load_automaton_state; chip->ppro_fdiv_automaton_state = _ppro_fdiv_automaton_state; chip->ppro_idiv_automaton_state = _ppro_idiv_automaton_state; chip->ppro_core_automaton_state = _ppro_core_automaton_state; chip->ppro_decoder_automaton_state = _ppro_decoder_automaton_state; chip->pentium_fpu_automaton_state = _pentium_fpu_automaton_state; return -1; } default: return -1; } } static int *dfa_insn_codes; static int dfa_insn_codes_length; static void dfa_insn_code_enlarge (int uid) { int i = dfa_insn_codes_length; dfa_insn_codes_length = 2 * uid; dfa_insn_codes = xrealloc (dfa_insn_codes, dfa_insn_codes_length * sizeof(int)); for (; i < dfa_insn_codes_length; i++) dfa_insn_codes[i] = -1; } static inline int dfa_insn_code (rtx insn) { int uid = INSN_UID (insn); int insn_code; if (uid >= dfa_insn_codes_length) dfa_insn_code_enlarge (uid); insn_code = dfa_insn_codes[uid]; if (insn_code < 0) { insn_code = internal_dfa_insn_code (insn); dfa_insn_codes[uid] = insn_code; } return insn_code; } int state_transition (state_t state, rtx insn) { int insn_code; if (insn != 0) { insn_code = dfa_insn_code (insn); if (insn_code > DFA__ADVANCE_CYCLE) return -1; } else insn_code = DFA__ADVANCE_CYCLE; return internal_state_transition (insn_code, state); } #if AUTOMATON_STATE_ALTS static int internal_state_alts (int insn_code, struct DFA_chip *chip) { int res; switch (insn_code) { case 0: /* pent_mul */ case 1: /* pent_str */ case 3: /* pent_cld */ case 7: /* pent_imov */ case 8: /* pent_push */ case 9: /* pent_pop */ case 10: /* pent_call */ case 11: /* pent_branch */ case 16: /* pent_uv_both */ case 17: /* pent_u_both */ case 18: /* pent_v_both */ case 19: /* pent_np_both */ case 20: /* pent_uv_load */ case 21: /* pent_u_load */ case 22: /* pent_v_load */ case 23: /* pent_np_load */ case 24: /* pent_uv */ case 25: /* pent_u */ case 26: /* pent_v */ case 27: /* pent_np */ { int temp; temp = pentium_base_state_alts [chip->pentium_automaton_state] + pentium_translate [insn_code]; if (pentium_check_state_alts [temp] != chip->pentium_automaton_state) return 0; else res = pentium_state_alts [temp]; break; } case 2: /* pent_block */ case 4: /* pent_fmov */ case 5: /* pent_fpmovxf */ case 6: /* pent_fpstore */ case 12: /* pent_fp */ case 13: /* pent_fmul */ case 14: /* pent_fdiv */ case 15: /* pent_fpspc */ { int temp; temp = pentium_fpu_base_state_alts [chip->pentium_fpu_automaton_state] + pentium_fpu_translate [insn_code]; if (pentium_fpu_check_state_alts [temp] != chip->pentium_fpu_automaton_state) return 0; else res = pentium_fpu_state_alts [temp]; temp = pentium_base_state_alts [chip->pentium_automaton_state] + pentium_translate [insn_code]; if (pentium_check_state_alts [temp] != chip->pentium_automaton_state) return 0; else res += pentium_state_alts [temp]; break; } case 28: /* ppro_complex_insn */ { res = ppro_decoder_state_alts [ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4]; break; } case 29: /* ppro_imov */ case 32: /* ppro_imovx */ case 34: /* ppro_lea */ case 35: /* ppro_shift_rotate */ case 37: /* ppro_cld */ case 38: /* ppro_branch */ case 41: /* ppro_imul */ case 49: /* ppro_fop */ case 53: /* ppro_fsgn */ case 55: /* ppro_fcmov */ case 56: /* ppro_fcmp */ case 58: /* ppro_fmov */ case 61: /* ppro_fmov_store */ case 63: /* ppro_fmul */ case 71: /* ppro_mmx_shft */ case 73: /* ppro_mmx_mul */ case 75: /* ppro_sse_mmxcvt */ case 77: /* ppro_sse_SF */ case 78: /* ppro_sse_add_SF */ case 80: /* ppro_sse_cmp_SF */ case 82: /* ppro_sse_comi_SF */ case 84: /* ppro_sse_mul_SF */ case 86: /* ppro_sse_div_SF */ case 90: /* ppro_sse_mov_SF */ case 93: /* ppro_sse_V4SF */ case 94: /* ppro_sse_add_V4SF */ case 96: /* ppro_sse_cmp_V4SF */ case 98: /* ppro_sse_cvt_V4SF */ case 100: /* ppro_sse_mul_V4SF */ case 102: /* ppro_sse_div_V4SF */ case 104: /* ppro_sse_log_V4SF */ case 106: /* ppro_sse_mov_V4SF */ case 109: /* ppro_insn */ { int temp; temp = ppro_core_base_state_alts [chip->ppro_core_automaton_state] + ppro_core_translate [insn_code]; if (ppro_core_check_state_alts [temp] != chip->ppro_core_automaton_state) return 0; else res = ppro_core_state_alts [temp]; res += ppro_decoder_state_alts [ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4]; break; } case 30: /* ppro_imov_load */ case 33: /* ppro_imovx_load */ case 59: /* ppro_fmov_load */ case 107: /* ppro_sse_mov_V4SF_load */ { res = ppro_load_state_alts [ppro_load_translate [insn_code] + chip->ppro_load_automaton_state * 4]; res += ppro_decoder_state_alts [ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4]; break; } case 31: /* ppro_imov_store */ case 76: /* ppro_sse_sfence */ case 92: /* ppro_sse_mov_SF_store */ case 108: /* ppro_sse_mov_V4SF_store */ { res = ppro_store_state_alts [ppro_store_translate [insn_code] + chip->ppro_store_automaton_state * 7]; res += ppro_decoder_state_alts [ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4]; break; } case 36: /* ppro_shift_rotate_mem */ case 52: /* ppro_fop_both */ case 112: /* ppro_insn_both */ { int temp; res = ppro_store_state_alts [ppro_store_translate [insn_code] + chip->ppro_store_automaton_state * 7]; res += ppro_load_state_alts [ppro_load_translate [insn_code] + chip->ppro_load_automaton_state * 4]; temp = ppro_core_base_state_alts [chip->ppro_core_automaton_state] + ppro_core_translate [insn_code]; if (ppro_core_check_state_alts [temp] != chip->ppro_core_automaton_state) return 0; else res += ppro_core_state_alts [temp]; res += ppro_decoder_state_alts [ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4]; break; } case 39: /* ppro_indirect_branch */ case 40: /* ppro_leave */ case 42: /* ppro_imul_mem */ case 50: /* ppro_fop_load */ case 57: /* ppro_fcmp_load */ case 60: /* ppro_fmov_XF_load */ case 64: /* ppro_fmul_load */ case 72: /* ppro_mmx_shft_load */ case 74: /* ppro_mmx_mul_load */ case 79: /* ppro_sse_add_SF_load */ case 81: /* ppro_sse_cmp_SF_load */ case 83: /* ppro_sse_comi_SF_load */ case 85: /* ppro_sse_mul_SF_load */ case 87: /* ppro_sse_div_SF_load */ case 88: /* ppro_sse_icvt_SF */ case 89: /* ppro_sse_icvt_SI */ case 91: /* ppro_sse_mov_SF_load */ case 95: /* ppro_sse_add_V4SF_load */ case 97: /* ppro_sse_cmp_V4SF_load */ case 101: /* ppro_sse_mul_V4SF_load */ case 103: /* ppro_sse_div_V4SF_load */ case 105: /* ppro_sse_log_V4SF_load */ case 110: /* ppro_insn_load */ { int temp; res = ppro_load_state_alts [ppro_load_translate [insn_code] + chip->ppro_load_automaton_state * 4]; temp = ppro_core_base_state_alts [chip->ppro_core_automaton_state] + ppro_core_translate [insn_code]; if (ppro_core_check_state_alts [temp] != chip->ppro_core_automaton_state) return 0; else res += ppro_core_state_alts [temp]; res += ppro_decoder_state_alts [ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4]; break; } case 43: /* ppro_idiv_QI */ case 45: /* ppro_idiv_HI */ case 47: /* ppro_idiv_SI */ { int temp; res = ppro_idiv_state_alts [ppro_idiv_translate [insn_code] + chip->ppro_idiv_automaton_state * 5]; temp = ppro_core_base_state_alts [chip->ppro_core_automaton_state] + ppro_core_translate [insn_code]; if (ppro_core_check_state_alts [temp] != chip->ppro_core_automaton_state) return 0; else res += ppro_core_state_alts [temp]; res += ppro_decoder_state_alts [ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4]; break; } case 44: /* ppro_idiv_QI_load */ case 46: /* ppro_idiv_HI_load */ case 48: /* ppro_idiv_SI_load */ { int temp; res = ppro_load_state_alts [ppro_load_translate [insn_code] + chip->ppro_load_automaton_state * 4]; res += ppro_idiv_state_alts [ppro_idiv_translate [insn_code] + chip->ppro_idiv_automaton_state * 5]; temp = ppro_core_base_state_alts [chip->ppro_core_automaton_state] + ppro_core_translate [insn_code]; if (ppro_core_check_state_alts [temp] != chip->ppro_core_automaton_state) return 0; else res += ppro_core_state_alts [temp]; res += ppro_decoder_state_alts [ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4]; break; } case 51: /* ppro_fop_store */ case 54: /* ppro_fistp */ case 62: /* ppro_fmov_XF_store */ case 99: /* ppro_sse_cvt_V4SF_other */ case 111: /* ppro_insn_store */ { int temp; res = ppro_store_state_alts [ppro_store_translate [insn_code] + chip->ppro_store_automaton_state * 7]; temp = ppro_core_base_state_alts [chip->ppro_core_automaton_state] + ppro_core_translate [insn_code]; if (ppro_core_check_state_alts [temp] != chip->ppro_core_automaton_state) return 0; else res += ppro_core_state_alts [temp]; res += ppro_decoder_state_alts [ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4]; break; } case 65: /* ppro_fdiv_SF */ case 67: /* ppro_fdiv_DF */ case 69: /* ppro_fdiv_XF */ { int temp; res = ppro_fdiv_state_alts [ppro_fdiv_translate [insn_code] + chip->ppro_fdiv_automaton_state * 5]; temp = ppro_core_base_state_alts [chip->ppro_core_automaton_state] + ppro_core_translate [insn_code]; if (ppro_core_check_state_alts [temp] != chip->ppro_core_automaton_state) return 0; else res += ppro_core_state_alts [temp]; res += ppro_decoder_state_alts [ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4]; break; } case 66: /* ppro_fdiv_SF_load */ case 68: /* ppro_fdiv_DF_load */ case 70: /* ppro_fdiv_XF_load */ { int temp; res = ppro_load_state_alts [ppro_load_translate [insn_code] + chip->ppro_load_automaton_state * 4]; res += ppro_fdiv_state_alts [ppro_fdiv_translate [insn_code] + chip->ppro_fdiv_automaton_state * 5]; temp = ppro_core_base_state_alts [chip->ppro_core_automaton_state] + ppro_core_translate [insn_code]; if (ppro_core_check_state_alts [temp] != chip->ppro_core_automaton_state) return 0; else res += ppro_core_state_alts [temp]; res += ppro_decoder_state_alts [ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4]; break; } case 113: /* k6_alux_only */ case 116: /* k6_alu_imul */ case 119: /* k6_alu_idiv */ case 121: /* k6_alu */ case 124: /* k6_alu_imov */ { int temp; temp = k6_integer_units_base_state_alts [chip->k6_integer_units_automaton_state] + k6_integer_units_translate [insn_code]; if (k6_integer_units_check_state_alts [temp] != chip->k6_integer_units_automaton_state) return 0; else res = k6_integer_units_state_alts [temp]; res += k6_decoder_state_alts [k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4]; break; } case 114: /* k6_alux_only_load */ case 117: /* k6_alu_imul_load */ case 120: /* k6_alu_idiv_mem */ case 122: /* k6_alu_load */ case 128: /* k6_alu_imov_both */ case 132: /* k6_load_leave */ { int temp; temp = k6_integer_units_base_state_alts [chip->k6_integer_units_automaton_state] + k6_integer_units_translate [insn_code]; if (k6_integer_units_check_state_alts [temp] != chip->k6_integer_units_automaton_state) return 0; else res = k6_integer_units_state_alts [temp]; res += k6_load_unit_state_alts [k6_load_unit_translate [insn_code] + chip->k6_load_unit_automaton_state * 4]; res += k6_decoder_state_alts [k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4]; break; } case 115: /* k6_alux_only_store */ case 118: /* k6_alu_imul_store */ case 123: /* k6_alu_store */ { int temp; temp = k6_integer_units_base_state_alts [chip->k6_integer_units_automaton_state] + k6_integer_units_translate [insn_code]; if (k6_integer_units_check_state_alts [temp] != chip->k6_integer_units_automaton_state) return 0; else res = k6_integer_units_state_alts [temp]; res += k6_store_unit_state_alts [k6_store_unit_translate [insn_code] + chip->k6_store_unit_automaton_state * 6]; res += k6_load_unit_state_alts [k6_load_unit_translate [insn_code] + chip->k6_load_unit_automaton_state * 4]; res += k6_decoder_state_alts [k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4]; break; } case 125: /* k6_alu_imov_imm */ { res = k6_decoder_state_alts [k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4]; break; } case 126: /* k6_alu_imov_load */ case 131: /* k6_load_pop */ case 133: /* k6_load_str */ { res = k6_load_unit_state_alts [k6_load_unit_translate [insn_code] + chip->k6_load_unit_automaton_state * 4]; res += k6_decoder_state_alts [k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4]; break; } case 127: /* k6_alu_imov_store */ case 135: /* k6_store_push */ { res = k6_store_unit_state_alts [k6_store_unit_translate [insn_code] + chip->k6_store_unit_automaton_state * 6]; res += k6_decoder_state_alts [k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4]; break; } case 129: /* k6_branch_call */ case 130: /* k6_branch_branch */ { res = k6_branch_unit_state_alts [k6_branch_unit_translate [insn_code] + chip->k6_branch_unit_automaton_state * 3]; res += k6_decoder_state_alts [k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4]; break; } case 134: /* k6_store_lea */ { int temp; temp = k6_integer_units_base_state_alts [chip->k6_integer_units_automaton_state] + k6_integer_units_translate [insn_code]; if (k6_integer_units_check_state_alts [temp] != chip->k6_integer_units_automaton_state) return 0; else res = k6_integer_units_state_alts [temp]; res += k6_store_unit_state_alts [k6_store_unit_translate [insn_code] + chip->k6_store_unit_automaton_state * 6]; res += k6_decoder_state_alts [k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4]; break; } case 136: /* k6_store_str */ { res = k6_store_unit_state_alts [k6_store_unit_translate [insn_code] + chip->k6_store_unit_automaton_state * 6]; break; } case 137: /* k6_fpu */ case 140: /* k6_fpu_fmul */ case 142: /* k6_fpu_expensive */ { res = k6_fpu_unit_state_alts [k6_fpu_unit_translate [insn_code] + chip->k6_fpu_unit_automaton_state * 5]; res += k6_decoder_state_alts [k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4]; break; } case 138: /* k6_fpu_load */ case 141: /* k6_fpu_fmul_load */ { res = k6_fpu_unit_state_alts [k6_fpu_unit_translate [insn_code] + chip->k6_fpu_unit_automaton_state * 5]; res += k6_load_unit_state_alts [k6_load_unit_translate [insn_code] + chip->k6_load_unit_automaton_state * 4]; res += k6_decoder_state_alts [k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4]; break; } case 139: /* k6_fpu_store */ { res = k6_fpu_unit_state_alts [k6_fpu_unit_translate [insn_code] + chip->k6_fpu_unit_automaton_state * 5]; res += k6_store_unit_state_alts [k6_store_unit_translate [insn_code] + chip->k6_store_unit_automaton_state * 6]; res += k6_decoder_state_alts [k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4]; break; } case 143: /* athlon_branch */ case 144: /* athlon_call */ case 150: /* athlon_lea */ case 160: /* athlon_idirect */ case 161: /* athlon_ivector */ { res = athlon_state_alts [athlon_translate [insn_code] + chip->athlon_automaton_state * 10]; break; } case 145: /* athlon_push */ case 146: /* athlon_pop */ case 147: /* athlon_pop_k8 */ case 148: /* athlon_leave */ case 149: /* athlon_leave_k8 */ case 159: /* athlon_str */ case 162: /* athlon_idirect_loadmov */ case 163: /* athlon_idirect_load */ case 164: /* athlon_ivector_load */ case 165: /* athlon_idirect_movstore */ case 166: /* athlon_idirect_both */ case 167: /* athlon_ivector_both */ case 168: /* athlon_idirect_store */ case 169: /* athlon_ivector_store */ { res = athlon_load_state_alts [athlon_load_translate [insn_code] + chip->athlon_load_automaton_state * 10]; res += athlon_state_alts [athlon_translate [insn_code] + chip->athlon_automaton_state * 10]; break; } case 151: /* athlon_imul */ case 152: /* athlon_imul_k8_DI */ case 153: /* athlon_imul_k8 */ { res = athlon_mult_state_alts [athlon_mult_translate [insn_code] + chip->athlon_mult_automaton_state * 4]; res += athlon_state_alts [athlon_translate [insn_code] + chip->athlon_automaton_state * 10]; break; } case 154: /* athlon_imul_mem */ case 155: /* athlon_imul_mem_k8_DI */ case 156: /* athlon_imul_mem_k8 */ { res = athlon_mult_state_alts [athlon_mult_translate [insn_code] + chip->athlon_mult_automaton_state * 4]; res += athlon_load_state_alts [athlon_load_translate [insn_code] + chip->athlon_load_automaton_state * 10]; res += athlon_state_alts [athlon_translate [insn_code] + chip->athlon_automaton_state * 10]; break; } case 157: /* athlon_idiv */ case 179: /* athlon_fmov */ case 182: /* athlon_fadd */ case 185: /* athlon_fmul */ case 186: /* athlon_fsgn */ case 189: /* athlon_fdiv */ case 190: /* athlon_fdiv_k8 */ case 192: /* athlon_fpspc */ case 194: /* athlon_fcmov */ case 196: /* athlon_fcmov_k8 */ case 199: /* athlon_fcomi */ case 202: /* athlon_fcom */ case 214: /* athlon_movaps */ case 215: /* athlon_movaps_k8 */ case 216: /* athlon_mmxssemov */ case 218: /* athlon_mmxmul */ case 220: /* athlon_mmx */ case 223: /* athlon_sselog */ case 224: /* athlon_sselog_k8 */ case 227: /* athlon_ssecmp */ case 230: /* athlon_ssecmpvector */ case 231: /* athlon_ssecmpvector_k8 */ case 234: /* athlon_ssecomi */ case 237: /* athlon_sseadd */ case 240: /* athlon_sseaddvector */ case 241: /* athlon_sseaddvector_k8 */ case 243: /* athlon_ssecvt_cvtss2sd */ case 245: /* athlon_ssecvt_cvtps2pd_k8 */ case 252: /* athlon_ssecvt_cvtsd2ss */ case 254: /* athlon_ssecvt_cvtpd2ps */ case 256: /* athlon_ssecvt_cvtsX2si */ case 257: /* athlon_ssecvt_cvtsX2si_k8 */ case 260: /* athlon_ssemul */ case 263: /* athlon_ssemulvector */ case 264: /* athlon_ssemulvector_k8 */ case 267: /* athlon_ssediv */ case 270: /* athlon_ssedivvector */ case 271: /* athlon_ssedivvector_k8 */ { int temp; temp = athlon_fp_base_state_alts [chip->athlon_fp_automaton_state] + athlon_fp_translate [insn_code]; if (athlon_fp_check_state_alts [temp] != chip->athlon_fp_automaton_state) return 0; else res = athlon_fp_state_alts [temp]; res += athlon_state_alts [athlon_translate [insn_code] + chip->athlon_automaton_state * 10]; break; } case 158: /* athlon_idiv_mem */ case 170: /* athlon_fldxf */ case 171: /* athlon_fldxf_k8 */ case 172: /* athlon_fld */ case 173: /* athlon_fld_k8 */ case 174: /* athlon_fstxf */ case 175: /* athlon_fstxf_k8 */ case 176: /* athlon_fst */ case 177: /* athlon_fst_k8 */ case 178: /* athlon_fist */ case 180: /* athlon_fadd_load */ case 181: /* athlon_fadd_load_k8 */ case 183: /* athlon_fmul_load */ case 184: /* athlon_fmul_load_k8 */ case 187: /* athlon_fdiv_load */ case 188: /* athlon_fdiv_load_k8 */ case 191: /* athlon_fpspc_load */ case 193: /* athlon_fcmov_load */ case 195: /* athlon_fcmov_load_k8 */ case 197: /* athlon_fcomi_load */ case 198: /* athlon_fcomi_load_k8 */ case 200: /* athlon_fcom_load */ case 201: /* athlon_fcom_load_k8 */ case 203: /* athlon_movlpd_load */ case 204: /* athlon_movlpd_load_k8 */ case 205: /* athlon_movaps_load_k8 */ case 206: /* athlon_movaps_load */ case 207: /* athlon_movss_load */ case 208: /* athlon_movss_load_k8 */ case 209: /* athlon_mmxsseld */ case 210: /* athlon_mmxsseld_k8 */ case 211: /* athlon_mmxssest */ case 212: /* athlon_mmxssest_k8 */ case 213: /* athlon_mmxssest_short */ case 217: /* athlon_mmxmul_load */ case 219: /* athlon_mmx_load */ case 221: /* athlon_sselog_load */ case 222: /* athlon_sselog_load_k8 */ case 225: /* athlon_ssecmp_load */ case 226: /* athlon_ssecmp_load_k8 */ case 228: /* athlon_ssecmpvector_load */ case 229: /* athlon_ssecmpvector_load_k8 */ case 232: /* athlon_ssecomi_load */ case 233: /* athlon_ssecomi_load_k8 */ case 235: /* athlon_sseadd_load */ case 236: /* athlon_sseadd_load_k8 */ case 238: /* athlon_sseaddvector_load */ case 239: /* athlon_sseaddvector_load_k8 */ case 242: /* athlon_ssecvt_cvtss2sd_load_k8 */ case 244: /* athlon_ssecvt_cvtps2pd_load_k8 */ case 246: /* athlon_sseicvt_cvtsi2sd_load */ case 247: /* athlon_sseicvt_cvtsi2ss_load */ case 248: /* athlon_sseicvt_cvtsi2ss_load_k8 */ case 249: /* athlon_sseicvt_cvtsi2sd_k8 */ case 250: /* athlon_sseicvt_cvtsi2ss */ case 251: /* athlon_ssecvt_cvtsd2ss_load_k8 */ case 253: /* athlon_ssecvt_cvtpd2ps_load_k8 */ case 255: /* athlon_secvt_cvtsX2si_load */ case 258: /* athlon_ssemul_load */ case 259: /* athlon_ssemul_load_k8 */ case 261: /* athlon_ssemulvector_load */ case 262: /* athlon_ssemulvector_load_k8 */ case 265: /* athlon_ssediv_load */ case 266: /* athlon_ssediv_load_k8 */ case 268: /* athlon_ssedivvector_load */ case 269: /* athlon_ssedivvector_load_k8 */ { int temp; temp = athlon_fp_base_state_alts [chip->athlon_fp_automaton_state] + athlon_fp_translate [insn_code]; if (athlon_fp_check_state_alts [temp] != chip->athlon_fp_automaton_state) return 0; else res = athlon_fp_state_alts [temp]; res += athlon_load_state_alts [athlon_load_translate [insn_code] + chip->athlon_load_automaton_state * 10]; res += athlon_state_alts [athlon_translate [insn_code] + chip->athlon_automaton_state * 10]; break; } case 272: /* $advance_cycle */ { int temp; temp = athlon_fp_base_state_alts [chip->athlon_fp_automaton_state] + athlon_fp_translate [insn_code]; if (athlon_fp_check_state_alts [temp] != chip->athlon_fp_automaton_state) return 0; else res = athlon_fp_state_alts [temp]; res += athlon_mult_state_alts [athlon_mult_translate [insn_code] + chip->athlon_mult_automaton_state * 4]; res += athlon_load_state_alts [athlon_load_translate [insn_code] + chip->athlon_load_automaton_state * 10]; res += athlon_state_alts [athlon_translate [insn_code] + chip->athlon_automaton_state * 10]; res += k6_branch_unit_state_alts [k6_branch_unit_translate [insn_code] + chip->k6_branch_unit_automaton_state * 3]; res += k6_fpu_unit_state_alts [k6_fpu_unit_translate [insn_code] + chip->k6_fpu_unit_automaton_state * 5]; temp = k6_integer_units_base_state_alts [chip->k6_integer_units_automaton_state] + k6_integer_units_translate [insn_code]; if (k6_integer_units_check_state_alts [temp] != chip->k6_integer_units_automaton_state) return 0; else res += k6_integer_units_state_alts [temp]; res += k6_store_unit_state_alts [k6_store_unit_translate [insn_code] + chip->k6_store_unit_automaton_state * 6]; res += k6_load_unit_state_alts [k6_load_unit_translate [insn_code] + chip->k6_load_unit_automaton_state * 4]; res += k6_decoder_state_alts [k6_decoder_translate [insn_code] + chip->k6_decoder_automaton_state * 4]; res += ppro_store_state_alts [ppro_store_translate [insn_code] + chip->ppro_store_automaton_state * 7]; res += ppro_load_state_alts [ppro_load_translate [insn_code] + chip->ppro_load_automaton_state * 4]; res += ppro_fdiv_state_alts [ppro_fdiv_translate [insn_code] + chip->ppro_fdiv_automaton_state * 5]; res += ppro_idiv_state_alts [ppro_idiv_translate [insn_code] + chip->ppro_idiv_automaton_state * 5]; temp = ppro_core_base_state_alts [chip->ppro_core_automaton_state] + ppro_core_translate [insn_code]; if (ppro_core_check_state_alts [temp] != chip->ppro_core_automaton_state) return 0; else res += ppro_core_state_alts [temp]; res += ppro_decoder_state_alts [ppro_decoder_translate [insn_code] + chip->ppro_decoder_automaton_state * 4]; temp = pentium_fpu_base_state_alts [chip->pentium_fpu_automaton_state] + pentium_fpu_translate [insn_code]; if (pentium_fpu_check_state_alts [temp] != chip->pentium_fpu_automaton_state) return 0; else res += pentium_fpu_state_alts [temp]; temp = pentium_base_state_alts [chip->pentium_automaton_state] + pentium_translate [insn_code]; if (pentium_check_state_alts [temp] != chip->pentium_automaton_state) return 0; else res += pentium_state_alts [temp]; break; } default: res = 0; break; } return res; } int state_alts (state, insn) state_t state; rtx insn; { int insn_code; if (insn != 0) { insn_code = dfa_insn_code (insn); if (insn_code > DFA__ADVANCE_CYCLE) return 0; } else insn_code = DFA__ADVANCE_CYCLE; return internal_state_alts (insn_code, state); } #endif /* #if AUTOMATON_STATE_ALTS */ int min_issue_delay (state_t state, rtx insn) { int insn_code; if (insn != 0) { insn_code = dfa_insn_code (insn); if (insn_code > DFA__ADVANCE_CYCLE) return 0; } else insn_code = DFA__ADVANCE_CYCLE; return internal_min_issue_delay (insn_code, state); } static int internal_state_dead_lock_p (struct DFA_chip *chip) { if (pentium_dead_lock [chip->pentium_automaton_state]) return 1/* TRUE */; if (pentium_fpu_dead_lock [chip->pentium_fpu_automaton_state]) return 1/* TRUE */; if (ppro_decoder_dead_lock [chip->ppro_decoder_automaton_state]) return 1/* TRUE */; if (ppro_core_dead_lock [chip->ppro_core_automaton_state]) return 1/* TRUE */; if (ppro_idiv_dead_lock [chip->ppro_idiv_automaton_state]) return 1/* TRUE */; if (ppro_fdiv_dead_lock [chip->ppro_fdiv_automaton_state]) return 1/* TRUE */; if (ppro_load_dead_lock [chip->ppro_load_automaton_state]) return 1/* TRUE */; if (ppro_store_dead_lock [chip->ppro_store_automaton_state]) return 1/* TRUE */; if (k6_decoder_dead_lock [chip->k6_decoder_automaton_state]) return 1/* TRUE */; if (k6_load_unit_dead_lock [chip->k6_load_unit_automaton_state]) return 1/* TRUE */; if (k6_store_unit_dead_lock [chip->k6_store_unit_automaton_state]) return 1/* TRUE */; if (k6_integer_units_dead_lock [chip->k6_integer_units_automaton_state]) return 1/* TRUE */; if (k6_fpu_unit_dead_lock [chip->k6_fpu_unit_automaton_state]) return 1/* TRUE */; if (k6_branch_unit_dead_lock [chip->k6_branch_unit_automaton_state]) return 1/* TRUE */; if (athlon_dead_lock [chip->athlon_automaton_state]) return 1/* TRUE */; if (athlon_load_dead_lock [chip->athlon_load_automaton_state]) return 1/* TRUE */; if (athlon_mult_dead_lock [chip->athlon_mult_automaton_state]) return 1/* TRUE */; if (athlon_fp_dead_lock [chip->athlon_fp_automaton_state]) return 1/* TRUE */; return 0/* FALSE */; } int state_dead_lock_p (state_t state) { return internal_state_dead_lock_p (state); } int state_size (void) { return sizeof (struct DFA_chip); } static inline void internal_reset (struct DFA_chip *chip) { memset (chip, 0, sizeof (struct DFA_chip)); } void state_reset (state_t state) { internal_reset (state); } int min_insn_conflict_delay (state_t state, rtx insn, rtx insn2) { struct DFA_chip DFA_chip; int insn_code, insn2_code; if (insn != 0) { insn_code = dfa_insn_code (insn); if (insn_code > DFA__ADVANCE_CYCLE) return 0; } else insn_code = DFA__ADVANCE_CYCLE; if (insn2 != 0) { insn2_code = dfa_insn_code (insn2); if (insn2_code > DFA__ADVANCE_CYCLE) return 0; } else insn2_code = DFA__ADVANCE_CYCLE; memcpy (&DFA_chip, state, sizeof (DFA_chip)); internal_reset (&DFA_chip); if (internal_state_transition (insn_code, &DFA_chip) > 0) abort (); return internal_min_issue_delay (insn2_code, &DFA_chip); } static int internal_insn_latency (int insn_code ATTRIBUTE_UNUSED, int insn2_code ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, rtx insn2 ATTRIBUTE_UNUSED) { static const unsigned char default_latencies[] = { 11, 12, 1, 2, 1, 3, 2, 1, 1, 1, 10, 1, 3, 3, 39, 70, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 6, 1, 4, 1, 1, 4, 1, 1, 4, 2, 1, 6, 4, 4, 4, 19, 19, 23, 23, 39, 39, 3, 5, 3, 5, 1, 5, 2, 1, 4, 1, 1, 3, 1, 3, 5, 6, 18, 19, 32, 33, 38, 39, 1, 2, 3, 3, 4, 3, 3, 3, 3, 3, 3, 1, 1, 4, 4, 18, 18, 4, 3, 3, 3, 3, 4, 3, 3, 3, 3, 3, 4, 5, 5, 48, 48, 2, 2, 1, 2, 3, 1, 3, 1, 4, 1, 3, 3, 2, 4, 4, 17, 19, 1, 3, 3, 1, 0, 2, 1, 2, 1, 1, 3, 5, 10, 2, 2, 10, 2, 6, 6, 2, 2, 56, 0, 0, 2, 4, 3, 3, 3, 2, 5, 4, 3, 8, 7, 6, 6, 9, 6, 1, 2, 3, 4, 6, 1, 4, 6, 1, 2, 12, 13, 0, 2, 10, 8, 4, 2, 4, 2, 4, 6, 4, 4, 6, 4, 2, 24, 13, 24, 11, 103, 100, 7, 7, 17, 15, 3, 5, 3, 2, 4, 2, 0, 2, 2, 0, 1, 1, 0, 2, 3, 3, 2, 2, 2, 2, 4, 3, 3, 2, 3, 5, 3, 3, 2, 4, 2, 3, 5, 3, 3, 4, 6, 4, 4, 6, 4, 5, 7, 5, 5, 4, 2, 5, 3, 6, 9, 9, 11, 14, 9, 12, 8, 8, 9, 9, 9, 4, 6, 4, 5, 7, 5, 5, 20, 22, 20, 39, 35, 39, 39, }; if (insn_code >= DFA__ADVANCE_CYCLE || insn2_code >= DFA__ADVANCE_CYCLE) return 0; switch (insn_code) { case 8: switch (insn2_code) { case 10: return 0; case 9: return 0; case 8: return 0; } break; case 9: switch (insn2_code) { case 10: return 0; case 9: return 0; case 8: return 0; } break; } return default_latencies[insn_code]; } int insn_latency (rtx insn, rtx insn2) { int insn_code, insn2_code; if (insn != 0) { insn_code = dfa_insn_code (insn); if (insn_code > DFA__ADVANCE_CYCLE) return 0; } else insn_code = DFA__ADVANCE_CYCLE; if (insn2 != 0) { insn2_code = dfa_insn_code (insn2); if (insn2_code > DFA__ADVANCE_CYCLE) return 0; } else insn2_code = DFA__ADVANCE_CYCLE; return internal_insn_latency (insn_code, insn2_code, insn, insn2); } void print_reservation (FILE *f, rtx insn ATTRIBUTE_UNUSED) { static const char *const reservation_names[] = { "pentium-np*11", "pentium-np*12", "(pentium-np+pentium-fp)", "pentium-np*2", "(pentium-fp+pentium-np)", "((pentium-fp+pentium-np))*3", "((pentium-fp+pentium-np))*2", "pentium-firstuv", "pentium-firstuv", "pentium-firstuv", "pentium-firstv,pentium-v*9", "pentium-firstv", "(pentium-firstu+pentium-fp),nothing,nothing", "(pentium-firstuv+pentium-fp+pentium-fmul),pentium-fmul,nothing", "(pentium-np+pentium-fp+pentium-fmul),((pentium-fp+pentium-fmul))*36,pentium-fmul*2", "(pentium-np+pentium-fp+pentium-fmul),((pentium-fp+pentium-fmul))*67,pentium-fmul*2", "pentium-firstuvboth,(pentium-uv+pentium-memory),pentium-uv", "pentium-firstuboth,(pentium-u+pentium-memory),pentium-u", "pentium-firstvboth,(pentium-v+pentium-memory),pentium-v", "pentium-np,pentium-np,pentium-np", "pentium-firstuvload,pentium-uv", "pentium-firstuload,pentium-u", "pentium-firstvload,pentium-v", "pentium-np,pentium-np", "pentium-firstuv", "pentium-firstu", "pentium-firstv", "pentium-np", "decoder0", "decodern,p0|p1", "decodern,p2", "decoder0,(p4+p3)", "decodern,p0|p1", "decodern,p2", "decodern,p0", "decodern,p0", "decoder0,(p2+p0),(p4+p3)", "decoder0,((p0+p1))*2", "decodern,p1", "decoder0,(p2+p1)", "decoder0,(p2+(p0|p1)),p0|p1", "decodern,p0", "decoder0,(p2+p0)", "decoder0,((p0+idiv))*2,((p0|p1)+idiv),idiv*9", "decoder0,(p2+p0+idiv),(p0+idiv),((p0|p1)+idiv),idiv*9", "decoder0,((p0+idiv))*3,((p0|p1)+idiv),idiv*17", "decoder0,(p2+p0+idiv),(p0+idiv),((p0|p1)+idiv),idiv*18", "decoder0,((p0+idiv))*3,((p0|p1)+idiv),idiv*33", "decoder0,(p2+p0+idiv),(p0+idiv),((p0|p1)+idiv),idiv*34", "decodern,p0", "decoder0,(p2+p0),p0", "decoder0,p0,p0,(p0+p4+p3)", "decoder0,(p2+p0),(p0+p4+p3)", "decodern,p0", "decoder0,p0*2,(p4+p3)", "decoder0,p0*2", "decodern,p0", "decoder0,(p2+p0)", "decodern,p0", "decodern,p2", "decoder0,((p2+p0))*2", "decodern,p0", "decoder0,(p0+p4),(p0+p3)", "decoder0,p0*2", "decoder0,(p2+p0),p0", "decodern,(p0+fdiv),fdiv*16", "decoder0,(p2+p0+fdiv),fdiv*16", "decodern,(p0+fdiv),fdiv*30", "decoder0,(p2+p0+fdiv),fdiv*30", "decodern,(p0+fdiv),fdiv*36", "decoder0,(p2+p0+fdiv),fdiv*36", "decodern,p1", "decoder0,(p2+p1)", "decodern,p0", "decoder0,(p2+p0)", "decodern,p1", "decoder0,(p4+p3)", "decodern,p0", "decodern,p1", "decoder0,(p2+p1)", "decoder0,p1", "decoder0,(p2+p1)", "decodern,p0", "decoder0,(p2+p0)", "decodern,p0", "decoder0,(p2+p0)", "decoder0,p0*17", "decoder0,(p2+p0),p0*16", "decoder0,((p2+p1))*2", "decoder0,(p2+p1)", "decoder0,p0|p1", "decoder0,(p2+(p0|p1))", "decoder0,(p4+p3)", "decoder0,p1*2", "decoder0,p1*2", "decoder0,((p2+p1))*2", "decoder0,p1*2", "decoder0,((p2+p1))*2", "decoder0,p1*2", "decoder0,p1,(p4+p3)", "decoder0,p0*2", "decoder0,((p2+p0))*2", "decoder0,p0*34", "decoder0,((p2+p0))*2,p0*32", "decodern,p1", "decoder0,(p2+p1)", "decoder0,(p0|p1)*2", "decoder0,p2*2", "decoder0,((p4+p3))*2", "decodern,p0|p1", "decoder0,(p2+(p0|p1))", "decoder0,p0|p1,(p4+p3)", "decoder0,(p2+(p0|p1)),(p4+p3)", "k6_decode_short,k6_alux", "k6_decode_short,k6_load,k6_alux", "k6_decode_long,k6_load,k6_alux,k6_store", "k6_decode_vector,k6_alux*3", "k6_decode_vector,k6_load,k6_alux*3", "k6_decode_vector,k6_load,k6_alux*3,k6_store", "k6_decode_vector,k6_alux*17", "k6_decode_vector,k6_load,k6_alux*17", "k6_decode_short,k6_alux|k6_aluy", "k6_decode_short,k6_load,k6_alux|k6_aluy", "k6_decode_long,k6_load,k6_alux|k6_aluy,k6_store", "k6_decode_short,k6_alux|k6_aluy", "k6_decode_short", "k6_decode_short,k6_load", "k6_decode_short,k6_store", "k6_decode_long,k6_load,k6_alux|k6_aluy", "k6_decode_vector,k6_branch", "k6_decode_short,k6_branch", "k6_decode_short,k6_load", "k6_decode_long,k6_load,(k6_alux|k6_aluy)*2", "k6_decode_vector,k6_load*10", "k6_decode_short,k6_store,k6_alux|k6_aluy", "k6_decode_short,k6_store", "k6_store*10", "k6_decode_vector,k6_fpu*2", "k6_decode_short,k6_load,k6_fpu*2", "k6_decode_short,k6_store,k6_fpu*2", "k6_decode_short,k6_fpu*2", "k6_decode_short,k6_load,k6_fpu*2", "k6_decode_short,k6_fpu*56", "athlon-direct,athlon-ieu", "athlon-vector,athlon-ieu", "athlon-direct,athlon-agu,athlon-store", "athlon-vector,athlon-load,athlon-ieu", "athlon-double,(athlon-ieu+athlon-load)", "athlon-vector,(athlon-ieu+athlon-load)", "athlon-double,(athlon-ieu+athlon-load)", "athlon-direct,athlon-agu,nothing", "athlon-vector,athlon-ieu0,athlon-mult,nothing,nothing,athlon-ieu0", "athlon-direct0,athlon-ieu0,athlon-mult,nothing,athlon-ieu0", "athlon-direct0,athlon-ieu0,athlon-mult,athlon-ieu0", "athlon-vector,athlon-load,athlon-ieu,athlon-mult,nothing,nothing,athlon-ieu", "athlon-vector,athlon-load,athlon-ieu,athlon-mult,nothing,athlon-ieu", "athlon-vector,athlon-load,athlon-ieu,athlon-mult,athlon-ieu", "athlon-vector,(athlon-ieu0*6+(athlon-fpsched,athlon-fvector))", "athlon-vector,((athlon-load,athlon-ieu0*6)+(athlon-fpsched,athlon-fvector))", "athlon-vector,athlon-load,athlon-ieu0*6", "athlon-direct,athlon-ieu", "athlon-vector,athlon-ieu,athlon-ieu", "athlon-direct,athlon-load", "athlon-direct,athlon-load,athlon-ieu", "athlon-vector,athlon-load,athlon-ieu,athlon-ieu", "athlon-direct,athlon-agu,athlon-store", "athlon-direct,athlon-load,athlon-ieu,athlon-store,athlon-store", "athlon-vector,athlon-load,athlon-ieu,athlon-ieu,athlon-store", "athlon-direct,(athlon-ieu+athlon-agu),athlon-store", "athlon-vector,(athlon-ieu+athlon-agu),athlon-ieu,athlon-store", "athlon-vector,athlon-fpload2,athlon-fvector*9", "athlon-vector,athlon-fpload2k8,athlon-fvector*9", "athlon-direct,athlon-fpload,athlon-fany", "athlon-direct,athlon-fploadk8,athlon-fstore", "athlon-vector,(athlon-fpsched+athlon-agu),(athlon-store2+athlon-fvector*7)", "athlon-vector,(athlon-fpsched+athlon-agu),(athlon-store2+athlon-fvector*6)", "athlon-direct,(athlon-fpsched+athlon-agu),(athlon-fstore+athlon-store)", "athlon-direct,(athlon-fpsched+athlon-agu),(athlon-fstore+athlon-store)", "athlon-direct,(athlon-fpsched+athlon-agu),(athlon-fstore+athlon-store)", "athlon-direct,athlon-fpsched,athlon-faddmul", "athlon-direct,athlon-fpload,athlon-fadd", "athlon-direct,athlon-fploadk8,athlon-fadd", "athlon-direct,athlon-fpsched,athlon-fadd", "athlon-direct,athlon-fpload,athlon-fmul", "athlon-direct,athlon-fploadk8,athlon-fmul", "athlon-direct,athlon-fpsched,athlon-fmul", "athlon-direct,athlon-fpsched,athlon-fmul", "athlon-direct,athlon-fpload,athlon-fmul", "athlon-direct,athlon-fploadk8,athlon-fmul", "athlon-direct,athlon-fpsched,athlon-fmul", "athlon-direct,athlon-fpsched,athlon-fmul", "athlon-vector,athlon-fpload,athlon-fvector", "athlon-vector,athlon-fpsched,athlon-fvector", "athlon-vector,athlon-fpload,athlon-fvector", "athlon-vector,athlon-fpsched,athlon-fvector", "athlon-vector,athlon-fploadk8,athlon-fvector", "athlon-vector,athlon-fpsched,athlon-fvector", "athlon-vector,athlon-fpload,athlon-fadd", "athlon-vector,athlon-fploadk8,athlon-fadd", "athlon-vector,athlon-fpsched,athlon-fadd", "athlon-direct,athlon-fpload,athlon-fadd", "athlon-direct,athlon-fploadk8,athlon-fadd", "athlon-direct,athlon-fpsched,athlon-fadd", "athlon-direct,athlon-fpload,athlon-fany", "athlon-direct,athlon-fploadk8,athlon-fstore", "athlon-double,athlon-fpload2k8,athlon-fstore,athlon-fstore", "athlon-vector,athlon-fpload2,(athlon-fany+athlon-fany)", "athlon-vector,athlon-fpload,athlon-fany*2", "athlon-double,athlon-fploadk8,(athlon-fstore+athlon-fany)", "athlon-direct,athlon-fpload,athlon-fany", "athlon-direct,athlon-fploadk8,athlon-fstore", "athlon-vector,(athlon-fpsched+athlon-agu),((athlon-fstore+athlon-store2))*2", "athlon-double,(athlon-fpsched+athlon-agu),((athlon-fstore+athlon-store2))*2", "athlon-direct,(athlon-fpsched+athlon-agu),(athlon-fstore+athlon-store)", "athlon-double,athlon-fpsched,(athlon-faddmul+athlon-faddmul)", "athlon-vector,athlon-fpsched,(athlon-faddmul+athlon-faddmul)", "athlon-direct,athlon-fpsched,athlon-faddmul", "athlon-direct,athlon-fpload,athlon-fmul", "athlon-direct,athlon-fpsched,athlon-fmul", "athlon-direct,athlon-fpload,athlon-faddmul", "athlon-direct,athlon-fpsched,athlon-faddmul", "athlon-vector,athlon-fpload2,athlon-fmul*2", "athlon-double,athlon-fpload2k8,athlon-fmul*2", "athlon-vector,athlon-fpsched,athlon-fmul*2", "athlon-double,athlon-fpsched,athlon-fmul", "athlon-direct,athlon-fpload,athlon-fadd", "athlon-direct,athlon-fploadk8,athlon-fadd", "athlon-direct,athlon-fpsched,athlon-fadd", "athlon-vector,athlon-fpload2,athlon-fadd*2", "athlon-double,athlon-fpload2k8,athlon-fadd*2", "athlon-vector,athlon-fpsched,athlon-fadd*2", "athlon-double,athlon-fpsched,athlon-fadd*2", "athlon-vector,athlon-fpload,athlon-fadd", "athlon-vector,athlon-fploadk8,athlon-fadd", "athlon-vector,athlon-fpsched,athlon-fadd", "athlon-direct,athlon-fpload,athlon-fadd", "athlon-direct,athlon-fploadk8,athlon-fadd", "athlon-direct,athlon-fpsched,athlon-fadd", "athlon-vector,athlon-fpload2,athlon-fadd*2", "athlon-double,athlon-fpload2k8,athlon-fadd*2", "athlon-vector,athlon-fpsched,athlon-fadd*2", "athlon-double,athlon-fpsched,athlon-fadd*2", "athlon-direct,athlon-fploadk8,athlon-fstore", "athlon-direct,athlon-fpsched,athlon-fstore", "athlon-double,athlon-fpload2k8,athlon-fstore*2", "athlon-double,athlon-fpsched,athlon-fstore,athlon-fstore", "athlon-direct,athlon-fploadk8,athlon-fstore", "athlon-vector,athlon-fpload,athlon-fstore*2", "athlon-double,athlon-fploadk8,athlon-fstore*2", "athlon-double,athlon-fploadk8,athlon-fstore", "athlon-vector,athlon-fploadk8,athlon-fvector*2", "athlon-double,athlon-fploadk8,athlon-fstore*3", "athlon-vector,athlon-fpsched,athlon-fvector*3", "athlon-double,athlon-fpload2k8,athlon-fstore*3", "athlon-vector,athlon-fpsched,athlon-fvector*2", "athlon-vector,athlon-fploadk8,athlon-fvector", "athlon-vector,athlon-fpsched,athlon-fvector", "athlon-double,athlon-fpsched,athlon-fstore", "athlon-direct,athlon-fpload,athlon-fmul", "athlon-direct,athlon-fploadk8,athlon-fmul", "athlon-direct,athlon-fpsched,athlon-fmul", "athlon-vector,athlon-fpload2,athlon-fmul*2", "athlon-double,athlon-fpload2k8,athlon-fmul*2", "athlon-vector,athlon-fpsched,athlon-fmul*2", "athlon-double,athlon-fpsched,athlon-fmul*2", "athlon-direct,athlon-fpload,athlon-fmul*17", "athlon-direct,athlon-fploadk8,athlon-fmul*17", "athlon-direct,athlon-fpsched,athlon-fmul*17", "athlon-vector,athlon-fpload2,athlon-fmul*34", "athlon-double,athlon-fpload2k8,athlon-fmul*34", "athlon-vector,athlon-fmul*34", "athlon-double,athlon-fmul*34", "nothing" }; int insn_code; if (insn == 0) insn_code = DFA__ADVANCE_CYCLE; else { insn_code = dfa_insn_code (insn); if (insn_code > DFA__ADVANCE_CYCLE) insn_code = DFA__ADVANCE_CYCLE; } fputs (reservation_names[insn_code], f); } #if CPU_UNITS_QUERY int get_cpu_unit_code (cpu_unit_name) const char *cpu_unit_name; { struct name_code {const char *name; int code;}; int cmp, l, m, h; static struct name_code name_code_table [] = { }; /* The following is binary search: */ l = 0; h = sizeof (name_code_table) / sizeof (struct name_code) - 1; while (l <= h) { m = (l + h) / 2; cmp = strcmp (cpu_unit_name, name_code_table [m].name); if (cmp < 0) h = m - 1; else if (cmp > 0) l = m + 1; else return name_code_table [m].code; } return -1; } int cpu_unit_reservation_p (state, cpu_unit_code) state_t state; int cpu_unit_code; { if (cpu_unit_code < 0 || cpu_unit_code >= 0) abort (); if ((pentium_reserved_units [((struct DFA_chip *) state)->pentium_automaton_state * 0 + cpu_unit_code / 8] >> (cpu_unit_code % 8)) & 1) return 1; if ((pentium_fpu_reserved_units [((struct DFA_chip *) state)->pentium_fpu_automaton_state * 0 + cpu_unit_code / 8] >> (cpu_unit_code % 8)) & 1) return 1; if ((ppro_decoder_reserved_units [((struct DFA_chip *) state)->ppro_decoder_automaton_state * 0 + cpu_unit_code / 8] >> (cpu_unit_code % 8)) & 1) return 1; if ((ppro_core_reserved_units [((struct DFA_chip *) state)->ppro_core_automaton_state * 0 + cpu_unit_code / 8] >> (cpu_unit_code % 8)) & 1) return 1; if ((ppro_idiv_reserved_units [((struct DFA_chip *) state)->ppro_idiv_automaton_state * 0 + cpu_unit_code / 8] >> (cpu_unit_code % 8)) & 1) return 1; if ((ppro_fdiv_reserved_units [((struct DFA_chip *) state)->ppro_fdiv_automaton_state * 0 + cpu_unit_code / 8] >> (cpu_unit_code % 8)) & 1) return 1; if ((ppro_load_reserved_units [((struct DFA_chip *) state)->ppro_load_automaton_state * 0 + cpu_unit_code / 8] >> (cpu_unit_code % 8)) & 1) return 1; if ((ppro_store_reserved_units [((struct DFA_chip *) state)->ppro_store_automaton_state * 0 + cpu_unit_code / 8] >> (cpu_unit_code % 8)) & 1) return 1; if ((k6_decoder_reserved_units [((struct DFA_chip *) state)->k6_decoder_automaton_state * 0 + cpu_unit_code / 8] >> (cpu_unit_code % 8)) & 1) return 1; if ((k6_load_unit_reserved_units [((struct DFA_chip *) state)->k6_load_unit_automaton_state * 0 + cpu_unit_code / 8] >> (cpu_unit_code % 8)) & 1) return 1; if ((k6_store_unit_reserved_units [((struct DFA_chip *) state)->k6_store_unit_automaton_state * 0 + cpu_unit_code / 8] >> (cpu_unit_code % 8)) & 1) return 1; if ((k6_integer_units_reserved_units [((struct DFA_chip *) state)->k6_integer_units_automaton_state * 0 + cpu_unit_code / 8] >> (cpu_unit_code % 8)) & 1) return 1; if ((k6_fpu_unit_reserved_units [((struct DFA_chip *) state)->k6_fpu_unit_automaton_state * 0 + cpu_unit_code / 8] >> (cpu_unit_code % 8)) & 1) return 1; if ((k6_branch_unit_reserved_units [((struct DFA_chip *) state)->k6_branch_unit_automaton_state * 0 + cpu_unit_code / 8] >> (cpu_unit_code % 8)) & 1) return 1; if ((athlon_reserved_units [((struct DFA_chip *) state)->athlon_automaton_state * 0 + cpu_unit_code / 8] >> (cpu_unit_code % 8)) & 1) return 1; if ((athlon_load_reserved_units [((struct DFA_chip *) state)->athlon_load_automaton_state * 0 + cpu_unit_code / 8] >> (cpu_unit_code % 8)) & 1) return 1; if ((athlon_mult_reserved_units [((struct DFA_chip *) state)->athlon_mult_automaton_state * 0 + cpu_unit_code / 8] >> (cpu_unit_code % 8)) & 1) return 1; if ((athlon_fp_reserved_units [((struct DFA_chip *) state)->athlon_fp_automaton_state * 0 + cpu_unit_code / 8] >> (cpu_unit_code % 8)) & 1) return 1; return 0; } #endif /* #if CPU_UNITS_QUERY */ void dfa_clean_insn_cache (void) { int i; for (i = 0; i < dfa_insn_codes_length; i++) dfa_insn_codes [i] = -1; } void dfa_start (void) { dfa_insn_codes_length = get_max_uid (); dfa_insn_codes = xmalloc (dfa_insn_codes_length * sizeof (int)); dfa_clean_insn_cache (); } void dfa_finish (void) { free (dfa_insn_codes); } int length_unit_log = 0; #undef operands /* Generated automatically by the program `genemit' from the machine description file `md'. */ /* Definitions for code generation pass of GNU compiler. Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_OPTABS_H #define GCC_OPTABS_H /* Generated automatically by the program `gencodes' from the machine description file `md'. */ #ifndef GCC_INSN_CODES_H #define GCC_INSN_CODES_H enum insn_code { CODE_FOR_cmpdi_ccno_1_rex64 = 0, CODE_FOR_cmpdi_1_insn_rex64 = 2, CODE_FOR_cmpqi_ext_3_insn = 15, CODE_FOR_cmpqi_ext_3_insn_rex64 = 16, CODE_FOR_x86_fnstsw_1 = 28, CODE_FOR_x86_sahf_1 = 29, CODE_FOR_popsi1 = 40, CODE_FOR_movsi_insv_1 = 72, CODE_FOR_movdi_insv_1_rex64 = 73, CODE_FOR_pushdi2_rex64 = 76, CODE_FOR_popdi1 = 79, CODE_FOR_swapxf = 102, CODE_FOR_zero_extendhisi2_and = 103, CODE_FOR_zero_extendsidi2_32 = 111, CODE_FOR_zero_extendsidi2_rex64 = 113, CODE_FOR_zero_extendhidi2 = 115, CODE_FOR_zero_extendqidi2 = 116, CODE_FOR_extendsidi2_rex64 = 118, CODE_FOR_extendhidi2 = 119, CODE_FOR_extendqidi2 = 120, CODE_FOR_extendhisi2 = 121, CODE_FOR_extendqihi2 = 123, CODE_FOR_extendqisi2 = 124, CODE_FOR_truncdfsf2_noop = 132, CODE_FOR_truncdfsf2_sse_only = 139, CODE_FOR_truncxfsf2_noop = 141, CODE_FOR_truncxfdf2_noop = 144, CODE_FOR_fix_truncdi_nomemory = 148, CODE_FOR_fix_truncdi_memory = 149, CODE_FOR_fix_truncsfdi_sse = 150, CODE_FOR_fix_truncdfdi_sse = 151, CODE_FOR_fix_truncsi_nomemory = 153, CODE_FOR_fix_truncsi_memory = 154, CODE_FOR_fix_truncsfsi_sse = 155, CODE_FOR_fix_truncdfsi_sse = 156, CODE_FOR_fix_trunchi_nomemory = 158, CODE_FOR_fix_trunchi_memory = 159, CODE_FOR_x86_fnstcw_1 = 160, CODE_FOR_x86_fldcw_1 = 161, CODE_FOR_floathixf2 = 174, CODE_FOR_floatsixf2 = 175, CODE_FOR_floatdixf2 = 176, CODE_FOR_adddi3_carry_rex64 = 178, CODE_FOR_addqi3_carry = 180, CODE_FOR_addhi3_carry = 181, CODE_FOR_addsi3_carry = 182, CODE_FOR_addqi3_cc = 185, CODE_FOR_addsi_1_zext = 202, CODE_FOR_addqi_ext_1 = 222, CODE_FOR_subdi3_carry_rex64 = 226, CODE_FOR_subqi3_carry = 230, CODE_FOR_subhi3_carry = 231, CODE_FOR_subsi3_carry = 232, CODE_FOR_subsi3_carry_zext = 233, CODE_FOR_divqi3 = 264, CODE_FOR_udivqi3 = 265, CODE_FOR_divmodhi4 = 272, CODE_FOR_udivmoddi4 = 273, CODE_FOR_udivmodsi4 = 275, CODE_FOR_testsi_1 = 279, CODE_FOR_andqi_ext_0 = 300, CODE_FOR_iorqi_ext_0 = 323, CODE_FOR_xorqi_ext_0 = 342, CODE_FOR_negsf2_memory = 362, CODE_FOR_negsf2_ifs = 363, CODE_FOR_negdf2_memory = 365, CODE_FOR_negdf2_ifs = 366, CODE_FOR_abssf2_memory = 377, CODE_FOR_abssf2_ifs = 378, CODE_FOR_absdf2_memory = 380, CODE_FOR_absdf2_ifs = 381, CODE_FOR_ashldi3_1 = 404, CODE_FOR_x86_shld_1 = 406, CODE_FOR_ashrdi3_63_rex64 = 417, CODE_FOR_ashrdi3_1 = 422, CODE_FOR_x86_shrd_1 = 424, CODE_FOR_ashrsi3_31 = 425, CODE_FOR_lshrdi3_1 = 449, CODE_FOR_setcc_2 = 494, CODE_FOR_jump = 509, CODE_FOR_doloop_end_internal = 514, CODE_FOR_blockage = 523, CODE_FOR_return_internal = 524, CODE_FOR_return_internal_long = 525, CODE_FOR_return_pop_internal = 526, CODE_FOR_return_indirect_internal = 527, CODE_FOR_nop = 528, CODE_FOR_align = 529, CODE_FOR_set_got = 530, CODE_FOR_eh_return_si = 531, CODE_FOR_eh_return_di = 532, CODE_FOR_leave = 533, CODE_FOR_leave_rex64 = 534, CODE_FOR_ctzsi2 = 540, CODE_FOR_ctzdi2 = 541, CODE_FOR_sqrtsf2_1 = 581, CODE_FOR_sqrtsf2_1_sse_only = 582, CODE_FOR_sqrtsf2_i387 = 583, CODE_FOR_sqrtdf2_1 = 584, CODE_FOR_sqrtdf2_1_sse_only = 585, CODE_FOR_sqrtdf2_i387 = 586, CODE_FOR_sqrtxf2 = 588, CODE_FOR_fpremxf4 = 591, CODE_FOR_fprem1xf4 = 592, CODE_FOR_sincosdf3 = 601, CODE_FOR_sincossf3 = 602, CODE_FOR_sincosxf3 = 604, CODE_FOR_atan2df3_1 = 608, CODE_FOR_atan2sf3_1 = 609, CODE_FOR_atan2xf3_1 = 610, CODE_FOR_fyl2x_xf3 = 611, CODE_FOR_fyl2xp1_xf3 = 612, CODE_FOR_cld = 617, CODE_FOR_x86_movdicc_0_m1_rex64 = 648, CODE_FOR_movdicc_c_rex64 = 649, CODE_FOR_x86_movsicc_0_m1 = 650, CODE_FOR_pro_epilogue_adjust_stack_1 = 670, CODE_FOR_pro_epilogue_adjust_stack_rex64 = 671, CODE_FOR_pro_epilogue_adjust_stack_rex64_2 = 672, CODE_FOR_sse_movsfcc = 673, CODE_FOR_sse_movsfcc_eq = 674, CODE_FOR_sse_movdfcc = 675, CODE_FOR_sse_movdfcc_eq = 676, CODE_FOR_allocate_stack_worker_1 = 685, CODE_FOR_allocate_stack_worker_rex64 = 686, CODE_FOR_trap = 696, CODE_FOR_movv4sf_internal = 698, CODE_FOR_movv4si_internal = 699, CODE_FOR_movv2di_internal = 700, CODE_FOR_movv8qi_internal = 701, CODE_FOR_movv4hi_internal = 702, CODE_FOR_movv2si_internal = 703, CODE_FOR_movv2sf_internal = 704, CODE_FOR_movv2df_internal = 705, CODE_FOR_movv8hi_internal = 706, CODE_FOR_movv16qi_internal = 707, CODE_FOR_movti_internal = 719, CODE_FOR_sse_movmskps = 724, CODE_FOR_mmx_pmovmskb = 725, CODE_FOR_mmx_maskmovq = 726, CODE_FOR_mmx_maskmovq_rex = 727, CODE_FOR_sse_movntv4sf = 728, CODE_FOR_sse_movntdi = 729, CODE_FOR_sse_movhlps = 730, CODE_FOR_sse_movlhps = 731, CODE_FOR_sse_movhps = 732, CODE_FOR_sse_movlps = 733, CODE_FOR_sse_loadss_1 = 734, CODE_FOR_sse_movss = 735, CODE_FOR_sse_storess = 736, CODE_FOR_sse_shufps = 737, CODE_FOR_addv4sf3 = 738, CODE_FOR_vmaddv4sf3 = 739, CODE_FOR_subv4sf3 = 740, CODE_FOR_vmsubv4sf3 = 741, CODE_FOR_mulv4sf3 = 742, CODE_FOR_vmmulv4sf3 = 743, CODE_FOR_divv4sf3 = 744, CODE_FOR_vmdivv4sf3 = 745, CODE_FOR_rcpv4sf2 = 746, CODE_FOR_vmrcpv4sf2 = 747, CODE_FOR_rsqrtv4sf2 = 748, CODE_FOR_vmrsqrtv4sf2 = 749, CODE_FOR_sqrtv4sf2 = 750, CODE_FOR_vmsqrtv4sf2 = 751, CODE_FOR_sse2_andv2di3 = 761, CODE_FOR_sse2_nandv2di3 = 763, CODE_FOR_sse2_iorv2di3 = 765, CODE_FOR_sse2_xorv2di3 = 767, CODE_FOR_sse_clrv4sf = 768, CODE_FOR_sse_clrv2df = 769, CODE_FOR_maskcmpv4sf3 = 770, CODE_FOR_maskncmpv4sf3 = 771, CODE_FOR_vmmaskcmpv4sf3 = 772, CODE_FOR_vmmaskncmpv4sf3 = 773, CODE_FOR_sse_comi = 774, CODE_FOR_sse_ucomi = 775, CODE_FOR_sse_unpckhps = 776, CODE_FOR_sse_unpcklps = 777, CODE_FOR_smaxv4sf3 = 778, CODE_FOR_vmsmaxv4sf3 = 779, CODE_FOR_sminv4sf3 = 780, CODE_FOR_vmsminv4sf3 = 781, CODE_FOR_cvtpi2ps = 782, CODE_FOR_cvtps2pi = 783, CODE_FOR_cvttps2pi = 784, CODE_FOR_cvtsi2ss = 785, CODE_FOR_cvtsi2ssq = 786, CODE_FOR_cvtss2si = 787, CODE_FOR_cvtss2siq = 788, CODE_FOR_cvttss2si = 789, CODE_FOR_cvttss2siq = 790, CODE_FOR_addv8qi3 = 791, CODE_FOR_addv4hi3 = 792, CODE_FOR_addv2si3 = 793, CODE_FOR_mmx_adddi3 = 794, CODE_FOR_ssaddv8qi3 = 795, CODE_FOR_ssaddv4hi3 = 796, CODE_FOR_usaddv8qi3 = 797, CODE_FOR_usaddv4hi3 = 798, CODE_FOR_subv8qi3 = 799, CODE_FOR_subv4hi3 = 800, CODE_FOR_subv2si3 = 801, CODE_FOR_mmx_subdi3 = 802, CODE_FOR_sssubv8qi3 = 803, CODE_FOR_sssubv4hi3 = 804, CODE_FOR_ussubv8qi3 = 805, CODE_FOR_ussubv4hi3 = 806, CODE_FOR_mulv4hi3 = 807, CODE_FOR_smulv4hi3_highpart = 808, CODE_FOR_umulv4hi3_highpart = 809, CODE_FOR_mmx_pmaddwd = 810, CODE_FOR_mmx_iordi3 = 811, CODE_FOR_mmx_xordi3 = 812, CODE_FOR_mmx_clrdi = 813, CODE_FOR_mmx_anddi3 = 814, CODE_FOR_mmx_nanddi3 = 815, CODE_FOR_mmx_uavgv8qi3 = 816, CODE_FOR_mmx_uavgv4hi3 = 817, CODE_FOR_mmx_psadbw = 818, CODE_FOR_mmx_pinsrw = 819, CODE_FOR_mmx_pextrw = 820, CODE_FOR_mmx_pshufw = 821, CODE_FOR_eqv8qi3 = 822, CODE_FOR_eqv4hi3 = 823, CODE_FOR_eqv2si3 = 824, CODE_FOR_gtv8qi3 = 825, CODE_FOR_gtv4hi3 = 826, CODE_FOR_gtv2si3 = 827, CODE_FOR_umaxv8qi3 = 828, CODE_FOR_smaxv4hi3 = 829, CODE_FOR_uminv8qi3 = 830, CODE_FOR_sminv4hi3 = 831, CODE_FOR_ashrv4hi3 = 832, CODE_FOR_ashrv2si3 = 833, CODE_FOR_lshrv4hi3 = 834, CODE_FOR_lshrv2si3 = 835, CODE_FOR_mmx_lshrdi3 = 836, CODE_FOR_ashlv4hi3 = 837, CODE_FOR_ashlv2si3 = 838, CODE_FOR_mmx_ashldi3 = 839, CODE_FOR_mmx_packsswb = 840, CODE_FOR_mmx_packssdw = 841, CODE_FOR_mmx_packuswb = 842, CODE_FOR_mmx_punpckhbw = 843, CODE_FOR_mmx_punpckhwd = 844, CODE_FOR_mmx_punpckhdq = 845, CODE_FOR_mmx_punpcklbw = 846, CODE_FOR_mmx_punpcklwd = 847, CODE_FOR_mmx_punpckldq = 848, CODE_FOR_emms = 849, CODE_FOR_ldmxcsr = 850, CODE_FOR_stmxcsr = 851, CODE_FOR_addv2sf3 = 854, CODE_FOR_subv2sf3 = 855, CODE_FOR_subrv2sf3 = 856, CODE_FOR_gtv2sf3 = 857, CODE_FOR_gev2sf3 = 858, CODE_FOR_eqv2sf3 = 859, CODE_FOR_pfmaxv2sf3 = 860, CODE_FOR_pfminv2sf3 = 861, CODE_FOR_mulv2sf3 = 862, CODE_FOR_femms = 863, CODE_FOR_pf2id = 864, CODE_FOR_pf2iw = 865, CODE_FOR_pfacc = 866, CODE_FOR_pfnacc = 867, CODE_FOR_pfpnacc = 868, CODE_FOR_pi2fw = 869, CODE_FOR_floatv2si2 = 870, CODE_FOR_pavgusb = 871, CODE_FOR_pfrcpv2sf2 = 872, CODE_FOR_pfrcpit1v2sf3 = 873, CODE_FOR_pfrcpit2v2sf3 = 874, CODE_FOR_pfrsqrtv2sf2 = 875, CODE_FOR_pfrsqit1v2sf3 = 876, CODE_FOR_pmulhrwv4hi3 = 877, CODE_FOR_pswapdv2si2 = 878, CODE_FOR_pswapdv2sf2 = 879, CODE_FOR_addv2df3 = 884, CODE_FOR_vmaddv2df3 = 885, CODE_FOR_subv2df3 = 886, CODE_FOR_vmsubv2df3 = 887, CODE_FOR_mulv2df3 = 888, CODE_FOR_vmmulv2df3 = 889, CODE_FOR_divv2df3 = 890, CODE_FOR_vmdivv2df3 = 891, CODE_FOR_smaxv2df3 = 892, CODE_FOR_vmsmaxv2df3 = 893, CODE_FOR_sminv2df3 = 894, CODE_FOR_vmsminv2df3 = 895, CODE_FOR_sqrtv2df2 = 896, CODE_FOR_vmsqrtv2df2 = 897, CODE_FOR_maskcmpv2df3 = 898, CODE_FOR_maskncmpv2df3 = 899, CODE_FOR_vmmaskcmpv2df3 = 900, CODE_FOR_vmmaskncmpv2df3 = 901, CODE_FOR_sse2_comi = 902, CODE_FOR_sse2_ucomi = 903, CODE_FOR_sse2_movmskpd = 904, CODE_FOR_sse2_pmovmskb = 905, CODE_FOR_sse2_maskmovdqu = 906, CODE_FOR_sse2_maskmovdqu_rex64 = 907, CODE_FOR_sse2_movntv2df = 908, CODE_FOR_sse2_movntv2di = 909, CODE_FOR_sse2_movntsi = 910, CODE_FOR_cvtdq2ps = 911, CODE_FOR_cvtps2dq = 912, CODE_FOR_cvttps2dq = 913, CODE_FOR_cvtdq2pd = 914, CODE_FOR_cvtpd2dq = 915, CODE_FOR_cvttpd2dq = 916, CODE_FOR_cvtpd2pi = 917, CODE_FOR_cvttpd2pi = 918, CODE_FOR_cvtpi2pd = 919, CODE_FOR_cvtsd2si = 920, CODE_FOR_cvtsd2siq = 921, CODE_FOR_cvttsd2si = 922, CODE_FOR_cvttsd2siq = 923, CODE_FOR_cvtsi2sd = 924, CODE_FOR_cvtsi2sdq = 925, CODE_FOR_cvtsd2ss = 926, CODE_FOR_cvtss2sd = 927, CODE_FOR_cvtpd2ps = 928, CODE_FOR_cvtps2pd = 929, CODE_FOR_addv16qi3 = 930, CODE_FOR_addv8hi3 = 931, CODE_FOR_addv4si3 = 932, CODE_FOR_addv2di3 = 933, CODE_FOR_ssaddv16qi3 = 934, CODE_FOR_ssaddv8hi3 = 935, CODE_FOR_usaddv16qi3 = 936, CODE_FOR_usaddv8hi3 = 937, CODE_FOR_subv16qi3 = 938, CODE_FOR_subv8hi3 = 939, CODE_FOR_subv4si3 = 940, CODE_FOR_subv2di3 = 941, CODE_FOR_sssubv16qi3 = 942, CODE_FOR_sssubv8hi3 = 943, CODE_FOR_ussubv16qi3 = 944, CODE_FOR_ussubv8hi3 = 945, CODE_FOR_mulv8hi3 = 946, CODE_FOR_smulv8hi3_highpart = 947, CODE_FOR_umulv8hi3_highpart = 948, CODE_FOR_sse2_umulsidi3 = 949, CODE_FOR_sse2_umulv2siv2di3 = 950, CODE_FOR_sse2_pmaddwd = 951, CODE_FOR_sse2_clrti = 952, CODE_FOR_sse2_uavgv16qi3 = 953, CODE_FOR_sse2_uavgv8hi3 = 954, CODE_FOR_sse2_psadbw = 955, CODE_FOR_sse2_pinsrw = 956, CODE_FOR_sse2_pextrw = 957, CODE_FOR_sse2_pshufd = 958, CODE_FOR_sse2_pshuflw = 959, CODE_FOR_sse2_pshufhw = 960, CODE_FOR_eqv16qi3 = 961, CODE_FOR_eqv8hi3 = 962, CODE_FOR_eqv4si3 = 963, CODE_FOR_gtv16qi3 = 964, CODE_FOR_gtv8hi3 = 965, CODE_FOR_gtv4si3 = 966, CODE_FOR_umaxv16qi3 = 967, CODE_FOR_smaxv8hi3 = 968, CODE_FOR_uminv16qi3 = 969, CODE_FOR_sminv8hi3 = 970, CODE_FOR_ashrv8hi3 = 971, CODE_FOR_ashrv4si3 = 972, CODE_FOR_lshrv8hi3 = 973, CODE_FOR_lshrv4si3 = 974, CODE_FOR_lshrv2di3 = 975, CODE_FOR_ashlv8hi3 = 976, CODE_FOR_ashlv4si3 = 977, CODE_FOR_ashlv2di3 = 978, CODE_FOR_ashrv8hi3_ti = 979, CODE_FOR_ashrv4si3_ti = 980, CODE_FOR_lshrv8hi3_ti = 981, CODE_FOR_lshrv4si3_ti = 982, CODE_FOR_lshrv2di3_ti = 983, CODE_FOR_ashlv8hi3_ti = 984, CODE_FOR_ashlv4si3_ti = 985, CODE_FOR_ashlv2di3_ti = 986, CODE_FOR_sse2_ashlti3 = 987, CODE_FOR_sse2_lshrti3 = 988, CODE_FOR_sse2_unpckhpd = 989, CODE_FOR_sse2_unpcklpd = 990, CODE_FOR_sse2_packsswb = 991, CODE_FOR_sse2_packssdw = 992, CODE_FOR_sse2_packuswb = 993, CODE_FOR_sse2_punpckhbw = 994, CODE_FOR_sse2_punpckhwd = 995, CODE_FOR_sse2_punpckhdq = 996, CODE_FOR_sse2_punpcklbw = 997, CODE_FOR_sse2_punpcklwd = 998, CODE_FOR_sse2_punpckldq = 999, CODE_FOR_sse2_punpcklqdq = 1000, CODE_FOR_sse2_punpckhqdq = 1001, CODE_FOR_sse2_movapd = 1002, CODE_FOR_sse2_movupd = 1003, CODE_FOR_sse2_movdqa = 1004, CODE_FOR_sse2_movdqu = 1005, CODE_FOR_sse2_movdq2q = 1006, CODE_FOR_sse2_movdq2q_rex64 = 1007, CODE_FOR_sse2_movq2dq = 1008, CODE_FOR_sse2_movq2dq_rex64 = 1009, CODE_FOR_sse2_movq = 1010, CODE_FOR_sse2_loadd = 1011, CODE_FOR_sse2_stored = 1012, CODE_FOR_sse2_movhpd = 1013, CODE_FOR_sse2_loadsd_1 = 1014, CODE_FOR_sse2_movsd = 1015, CODE_FOR_sse2_storesd = 1016, CODE_FOR_sse2_shufpd = 1017, CODE_FOR_sse2_clflush = 1018, CODE_FOR_mwait = 1021, CODE_FOR_monitor = 1022, CODE_FOR_addsubv4sf3 = 1023, CODE_FOR_addsubv2df3 = 1024, CODE_FOR_haddv4sf3 = 1025, CODE_FOR_haddv2df3 = 1026, CODE_FOR_hsubv4sf3 = 1027, CODE_FOR_hsubv2df3 = 1028, CODE_FOR_movshdup = 1029, CODE_FOR_movsldup = 1030, CODE_FOR_lddqu = 1031, CODE_FOR_loadddup = 1032, CODE_FOR_movddup = 1033, CODE_FOR_cmpdi = 1034, CODE_FOR_cmpsi = 1035, CODE_FOR_cmphi = 1036, CODE_FOR_cmpqi = 1037, CODE_FOR_cmpdi_1_rex64 = 1038, CODE_FOR_cmpsi_1 = 1039, CODE_FOR_cmpqi_ext_3 = 1040, CODE_FOR_cmpxf = 1041, CODE_FOR_cmpdf = 1042, CODE_FOR_cmpsf = 1043, CODE_FOR_movsi = 1045, CODE_FOR_movhi = 1046, CODE_FOR_movstricthi = 1047, CODE_FOR_movqi = 1048, CODE_FOR_reload_outqi = 1049, CODE_FOR_movstrictqi = 1050, CODE_FOR_movdi = 1051, CODE_FOR_movsf = 1060, CODE_FOR_movdf = 1064, CODE_FOR_movxf = 1069, CODE_FOR_zero_extendhisi2 = 1075, CODE_FOR_zero_extendqihi2 = 1077, CODE_FOR_zero_extendqisi2 = 1081, CODE_FOR_zero_extendsidi2 = 1085, CODE_FOR_extendsidi2 = 1089, CODE_FOR_extendsfdf2 = 1099, CODE_FOR_extendsfxf2 = 1100, CODE_FOR_extenddfxf2 = 1101, CODE_FOR_truncdfsf2 = 1102, CODE_FOR_truncxfsf2 = 1107, CODE_FOR_truncxfdf2 = 1110, CODE_FOR_fix_truncxfdi2 = 1113, CODE_FOR_fix_truncdfdi2 = 1114, CODE_FOR_fix_truncsfdi2 = 1115, CODE_FOR_fix_truncxfsi2 = 1121, CODE_FOR_fix_truncdfsi2 = 1122, CODE_FOR_fix_truncsfsi2 = 1123, CODE_FOR_fix_truncxfhi2 = 1129, CODE_FOR_fix_truncdfhi2 = 1130, CODE_FOR_fix_truncsfhi2 = 1131, CODE_FOR_floathisf2 = 1135, CODE_FOR_floatsisf2 = 1136, CODE_FOR_floatdisf2 = 1138, CODE_FOR_floathidf2 = 1140, CODE_FOR_floatsidf2 = 1141, CODE_FOR_floatdidf2 = 1142, CODE_FOR_floatunssisf2 = 1144, CODE_FOR_floatunsdisf2 = 1145, CODE_FOR_floatunsdidf2 = 1146, CODE_FOR_vec_setv2df = 1147, CODE_FOR_vec_extractv2df = 1148, CODE_FOR_vec_initv2df = 1149, CODE_FOR_vec_setv4sf = 1150, CODE_FOR_vec_extractv4sf = 1151, CODE_FOR_vec_initv4sf = 1152, CODE_FOR_adddi3 = 1153, CODE_FOR_addsi3 = 1155, CODE_FOR_addhi3 = 1165, CODE_FOR_addqi3 = 1166, CODE_FOR_addxf3 = 1167, CODE_FOR_adddf3 = 1168, CODE_FOR_addsf3 = 1169, CODE_FOR_subdi3 = 1170, CODE_FOR_subsi3 = 1172, CODE_FOR_subhi3 = 1173, CODE_FOR_subqi3 = 1174, CODE_FOR_subxf3 = 1175, CODE_FOR_subdf3 = 1176, CODE_FOR_subsf3 = 1177, CODE_FOR_muldi3 = 1178, CODE_FOR_mulsi3 = 1179, CODE_FOR_mulhi3 = 1180, CODE_FOR_mulqi3 = 1181, CODE_FOR_umulqihi3 = 1182, CODE_FOR_mulqihi3 = 1183, CODE_FOR_umulditi3 = 1184, CODE_FOR_umulsidi3 = 1185, CODE_FOR_mulditi3 = 1186, CODE_FOR_mulsidi3 = 1187, CODE_FOR_umuldi3_highpart = 1188, CODE_FOR_umulsi3_highpart = 1189, CODE_FOR_smuldi3_highpart = 1190, CODE_FOR_smulsi3_highpart = 1191, CODE_FOR_mulxf3 = 1192, CODE_FOR_muldf3 = 1193, CODE_FOR_mulsf3 = 1194, CODE_FOR_divxf3 = 1195, CODE_FOR_divdf3 = 1196, CODE_FOR_divsf3 = 1197, CODE_FOR_divmoddi4 = 1198, CODE_FOR_divmodsi4 = 1200, CODE_FOR_udivmodhi4 = 1204, CODE_FOR_testsi_ccno_1 = 1205, CODE_FOR_testqi_ccz_1 = 1206, CODE_FOR_testqi_ext_ccno_0 = 1207, CODE_FOR_anddi3 = 1211, CODE_FOR_andsi3 = 1212, CODE_FOR_andhi3 = 1216, CODE_FOR_andqi3 = 1217, CODE_FOR_iordi3 = 1220, CODE_FOR_iorsi3 = 1221, CODE_FOR_iorhi3 = 1222, CODE_FOR_iorqi3 = 1223, CODE_FOR_xordi3 = 1226, CODE_FOR_xorsi3 = 1227, CODE_FOR_xorhi3 = 1228, CODE_FOR_xorqi3 = 1229, CODE_FOR_xorqi_cc_ext_1 = 1230, CODE_FOR_negdi2 = 1233, CODE_FOR_negsi2 = 1235, CODE_FOR_neghi2 = 1236, CODE_FOR_negqi2 = 1237, CODE_FOR_negsf2 = 1238, CODE_FOR_negdf2 = 1245, CODE_FOR_negxf2 = 1252, CODE_FOR_abssf2 = 1255, CODE_FOR_absdf2 = 1262, CODE_FOR_absxf2 = 1268, CODE_FOR_one_cmpldi2 = 1271, CODE_FOR_one_cmplsi2 = 1273, CODE_FOR_one_cmplhi2 = 1276, CODE_FOR_one_cmplqi2 = 1278, CODE_FOR_ashldi3 = 1280, CODE_FOR_x86_shift_adj_1 = 1284, CODE_FOR_x86_shift_adj_2 = 1285, CODE_FOR_ashlsi3 = 1286, CODE_FOR_ashlhi3 = 1290, CODE_FOR_ashlqi3 = 1291, CODE_FOR_ashrdi3 = 1292, CODE_FOR_x86_shift_adj_3 = 1295, CODE_FOR_ashrsi3 = 1296, CODE_FOR_ashrhi3 = 1297, CODE_FOR_ashrqi3 = 1298, CODE_FOR_lshrdi3 = 1299, CODE_FOR_lshrsi3 = 1302, CODE_FOR_lshrhi3 = 1303, CODE_FOR_lshrqi3 = 1304, CODE_FOR_rotldi3 = 1305, CODE_FOR_rotlsi3 = 1306, CODE_FOR_rotlhi3 = 1307, CODE_FOR_rotlqi3 = 1308, CODE_FOR_rotrdi3 = 1309, CODE_FOR_rotrsi3 = 1310, CODE_FOR_rotrhi3 = 1311, CODE_FOR_rotrqi3 = 1312, CODE_FOR_extv = 1313, CODE_FOR_extzv = 1314, CODE_FOR_insv = 1315, CODE_FOR_seq = 1316, CODE_FOR_sne = 1317, CODE_FOR_sgt = 1318, CODE_FOR_sgtu = 1319, CODE_FOR_slt = 1320, CODE_FOR_sltu = 1321, CODE_FOR_sge = 1322, CODE_FOR_sgeu = 1323, CODE_FOR_sle = 1324, CODE_FOR_sleu = 1325, CODE_FOR_sunordered = 1326, CODE_FOR_sordered = 1327, CODE_FOR_suneq = 1328, CODE_FOR_sunge = 1329, CODE_FOR_sungt = 1330, CODE_FOR_sunle = 1331, CODE_FOR_sunlt = 1332, CODE_FOR_sltgt = 1333, CODE_FOR_beq = 1338, CODE_FOR_bne = 1339, CODE_FOR_bgt = 1340, CODE_FOR_bgtu = 1341, CODE_FOR_blt = 1342, CODE_FOR_bltu = 1343, CODE_FOR_bge = 1344, CODE_FOR_bgeu = 1345, CODE_FOR_ble = 1346, CODE_FOR_bleu = 1347, CODE_FOR_bunordered = 1348, CODE_FOR_bordered = 1349, CODE_FOR_buneq = 1350, CODE_FOR_bunge = 1351, CODE_FOR_bungt = 1352, CODE_FOR_bunle = 1353, CODE_FOR_bunlt = 1354, CODE_FOR_bltgt = 1355, CODE_FOR_indirect_jump = 1360, CODE_FOR_tablejump = 1361, CODE_FOR_doloop_end = 1362, CODE_FOR_call_pop = 1367, CODE_FOR_call = 1368, CODE_FOR_sibcall = 1369, CODE_FOR_call_value_pop = 1370, CODE_FOR_call_value = 1371, CODE_FOR_sibcall_value = 1372, CODE_FOR_untyped_call = 1373, CODE_FOR_return = 1374, CODE_FOR_prologue = 1375, CODE_FOR_epilogue = 1376, CODE_FOR_sibcall_epilogue = 1377, CODE_FOR_eh_return = 1378, CODE_FOR_ffssi2 = 1381, CODE_FOR_ffsdi2 = 1384, CODE_FOR_clzsi2 = 1386, CODE_FOR_clzdi2 = 1387, CODE_FOR_tls_global_dynamic_32 = 1388, CODE_FOR_tls_global_dynamic_64 = 1389, CODE_FOR_tls_local_dynamic_base_32 = 1390, CODE_FOR_tls_local_dynamic_base_64 = 1391, CODE_FOR_sqrtsf2 = 1395, CODE_FOR_sqrtdf2 = 1396, CODE_FOR_fmodsf3 = 1397, CODE_FOR_fmoddf3 = 1398, CODE_FOR_fmodxf3 = 1399, CODE_FOR_dremsf3 = 1400, CODE_FOR_dremdf3 = 1401, CODE_FOR_dremxf3 = 1402, CODE_FOR_tandf2 = 1412, CODE_FOR_tansf2 = 1414, CODE_FOR_tanxf2 = 1416, CODE_FOR_atan2df3 = 1417, CODE_FOR_atandf2 = 1418, CODE_FOR_atan2sf3 = 1419, CODE_FOR_atansf2 = 1420, CODE_FOR_atan2xf3 = 1421, CODE_FOR_atanxf2 = 1422, CODE_FOR_asindf2 = 1423, CODE_FOR_asinsf2 = 1424, CODE_FOR_asinxf2 = 1425, CODE_FOR_acosdf2 = 1426, CODE_FOR_acossf2 = 1427, CODE_FOR_acosxf2 = 1428, CODE_FOR_logsf2 = 1429, CODE_FOR_logdf2 = 1430, CODE_FOR_logxf2 = 1431, CODE_FOR_log10sf2 = 1432, CODE_FOR_log10df2 = 1433, CODE_FOR_log10xf2 = 1434, CODE_FOR_log2sf2 = 1435, CODE_FOR_log2df2 = 1436, CODE_FOR_log2xf2 = 1437, CODE_FOR_log1psf2 = 1438, CODE_FOR_log1pdf2 = 1439, CODE_FOR_log1pxf2 = 1440, CODE_FOR_logbsf2 = 1441, CODE_FOR_logbdf2 = 1442, CODE_FOR_logbxf2 = 1443, CODE_FOR_ilogbsi2 = 1444, CODE_FOR_expsf2 = 1445, CODE_FOR_expdf2 = 1446, CODE_FOR_expxf2 = 1447, CODE_FOR_exp10sf2 = 1448, CODE_FOR_exp10df2 = 1449, CODE_FOR_exp10xf2 = 1450, CODE_FOR_exp2sf2 = 1451, CODE_FOR_exp2df2 = 1452, CODE_FOR_exp2xf2 = 1453, CODE_FOR_expm1df2 = 1454, CODE_FOR_expm1sf2 = 1455, CODE_FOR_expm1xf2 = 1456, CODE_FOR_movstrsi = 1457, CODE_FOR_movstrdi = 1458, CODE_FOR_strmov = 1459, CODE_FOR_strmov_singleop = 1460, CODE_FOR_rep_mov = 1461, CODE_FOR_clrstrsi = 1462, CODE_FOR_clrstrdi = 1463, CODE_FOR_strset = 1464, CODE_FOR_strset_singleop = 1465, CODE_FOR_rep_stos = 1466, CODE_FOR_cmpstrsi = 1467, CODE_FOR_cmpintqi = 1468, CODE_FOR_cmpstrqi_nz_1 = 1469, CODE_FOR_cmpstrqi_1 = 1470, CODE_FOR_strlensi = 1471, CODE_FOR_strlendi = 1472, CODE_FOR_strlenqi_1 = 1473, CODE_FOR_movdicc = 1476, CODE_FOR_movsicc = 1477, CODE_FOR_movhicc = 1478, CODE_FOR_movqicc = 1479, CODE_FOR_movsfcc = 1481, CODE_FOR_movdfcc = 1482, CODE_FOR_movxfcc = 1484, CODE_FOR_minsf3 = 1485, CODE_FOR_addqicc = 1487, CODE_FOR_addhicc = 1488, CODE_FOR_addsicc = 1489, CODE_FOR_adddicc = 1490, CODE_FOR_mindf3 = 1492, CODE_FOR_maxsf3 = 1495, CODE_FOR_maxdf3 = 1498, CODE_FOR_allocate_stack_worker = 1506, CODE_FOR_allocate_stack_worker_postreload = 1507, CODE_FOR_allocate_stack_worker_rex64_postreload = 1508, CODE_FOR_allocate_stack = 1509, CODE_FOR_builtin_setjmp_receiver = 1510, CODE_FOR_conditional_trap = 1579, CODE_FOR_movti = 1582, CODE_FOR_movtf = 1583, CODE_FOR_movv2df = 1584, CODE_FOR_movv8hi = 1585, CODE_FOR_movv16qi = 1586, CODE_FOR_movv4sf = 1587, CODE_FOR_movv4si = 1588, CODE_FOR_movv2di = 1589, CODE_FOR_movv2si = 1590, CODE_FOR_movv4hi = 1591, CODE_FOR_movv8qi = 1592, CODE_FOR_movv2sf = 1593, CODE_FOR_sse_movaps = 1598, CODE_FOR_sse_movups = 1599, CODE_FOR_sse_loadss = 1600, CODE_FOR_negv4sf2 = 1601, CODE_FOR_sse_andv4sf3 = 1602, CODE_FOR_sse_nandv4sf3 = 1603, CODE_FOR_sse_iorv4sf3 = 1604, CODE_FOR_sse_xorv4sf3 = 1605, CODE_FOR_sse2_andv2df3 = 1606, CODE_FOR_sse2_nandv2df3 = 1607, CODE_FOR_sse2_iorv2df3 = 1608, CODE_FOR_sse2_xorv2df3 = 1609, CODE_FOR_sfence = 1610, CODE_FOR_sse_prologue_save = 1611, CODE_FOR_prefetch = 1612, CODE_FOR_sse2_loadsd = 1613, CODE_FOR_sse2_mfence = 1614, CODE_FOR_sse2_lfence = 1615, CODE_FOR_nothing }; #endif /* GCC_INSN_CODES_H */ /* Optabs are tables saying how to generate insn bodies for various machine modes and numbers of operands. Each optab applies to one operation. For example, add_optab applies to addition. The insn_code slot is the enum insn_code that says how to generate an insn for this operation on a particular machine mode. It is CODE_FOR_nothing if there is no such insn on the target machine. The `lib_call' slot is the name of the library function that can be used to perform the operation. A few optabs, such as move_optab and cmp_optab, are used by special code. */ struct optab_handlers GTY(()) { enum insn_code insn_code; rtx libfunc; }; struct optab GTY(()) { enum rtx_code code; struct optab_handlers handlers[NUM_MACHINE_MODES]; }; typedef struct optab * optab; /* A convert_optab is for some sort of conversion operation between modes. The first array index is the destination mode, the second is the source mode. */ struct convert_optab GTY(()) { enum rtx_code code; struct optab_handlers handlers[NUM_MACHINE_MODES][NUM_MACHINE_MODES]; }; typedef struct convert_optab *convert_optab; /* Given an enum insn_code, access the function to construct the body of that kind of insn. */ #define GEN_FCN(CODE) (insn_data[CODE].genfun) /* Enumeration of valid indexes into optab_table. */ enum optab_index { OTI_add, OTI_addv, OTI_sub, OTI_subv, /* Signed and fp multiply */ OTI_smul, OTI_smulv, /* Signed multiply, return high word */ OTI_smul_highpart, OTI_umul_highpart, /* Signed multiply with result one machine mode wider than args */ OTI_smul_widen, OTI_umul_widen, /* Signed divide */ OTI_sdiv, OTI_sdivv, /* Signed divide-and-remainder in one */ OTI_sdivmod, OTI_udiv, OTI_udivmod, /* Signed remainder */ OTI_smod, OTI_umod, /* Floating point remainder functions */ OTI_fmod, OTI_drem, /* Convert float to integer in float fmt */ OTI_ftrunc, /* Logical and */ OTI_and, /* Logical or */ OTI_ior, /* Logical xor */ OTI_xor, /* Arithmetic shift left */ OTI_ashl, /* Logical shift right */ OTI_lshr, /* Arithmetic shift right */ OTI_ashr, /* Rotate left */ OTI_rotl, /* Rotate right */ OTI_rotr, /* Signed and floating-point minimum value */ OTI_smin, /* Signed and floating-point maximum value */ OTI_smax, /* Unsigned minimum value */ OTI_umin, /* Unsigned maximum value */ OTI_umax, /* Power */ OTI_pow, /* Arc tangent of y/x */ OTI_atan2, /* Move instruction. */ OTI_mov, /* Move, preserving high part of register. */ OTI_movstrict, /* Unary operations */ /* Negation */ OTI_neg, OTI_negv, /* Abs value */ OTI_abs, OTI_absv, /* Bitwise not */ OTI_one_cmpl, /* Bit scanning and counting */ OTI_ffs, OTI_clz, OTI_ctz, OTI_popcount, OTI_parity, /* Square root */ OTI_sqrt, /* Sine-Cosine */ OTI_sincos, /* Sine */ OTI_sin, /* Inverse sine */ OTI_asin, /* Cosine */ OTI_cos, /* Inverse cosine */ OTI_acos, /* Exponential */ OTI_exp, /* Base-10 Exponential */ OTI_exp10, /* Base-2 Exponential */ OTI_exp2, /* Exponential - 1*/ OTI_expm1, /* Radix-independent exponent */ OTI_logb, OTI_ilogb, /* Natural Logarithm */ OTI_log, /* Base-10 Logarithm */ OTI_log10, /* Base-2 Logarithm */ OTI_log2, /* logarithm of 1 plus argument */ OTI_log1p, /* Rounding functions */ OTI_floor, OTI_ceil, OTI_trunc, OTI_round, OTI_nearbyint, /* Tangent */ OTI_tan, /* Inverse tangent */ OTI_atan, /* Compare insn; two operands. */ OTI_cmp, /* Used only for libcalls for unsigned comparisons. */ OTI_ucmp, /* tst insn; compare one operand against 0 */ OTI_tst, /* Floating point comparison optabs - used primarily for libfuncs */ OTI_eq, OTI_ne, OTI_gt, OTI_ge, OTI_lt, OTI_le, OTI_unord, /* String length */ OTI_strlen, /* Combined compare & jump/store flags/move operations. */ OTI_cbranch, OTI_cmov, OTI_cstore, /* Push instruction. */ OTI_push, /* Conditional add instruction. */ OTI_addcc, /* Set specified field of vector operand. */ OTI_vec_set, /* Extract specified field of vector operand. */ OTI_vec_extract, /* Initialize vector operand. */ OTI_vec_init, OTI_MAX }; extern GTY(()) optab optab_table[OTI_MAX]; #define add_optab (optab_table[OTI_add]) #define sub_optab (optab_table[OTI_sub]) #define smul_optab (optab_table[OTI_smul]) #define addv_optab (optab_table[OTI_addv]) #define subv_optab (optab_table[OTI_subv]) #define smul_highpart_optab (optab_table[OTI_smul_highpart]) #define umul_highpart_optab (optab_table[OTI_umul_highpart]) #define smul_widen_optab (optab_table[OTI_smul_widen]) #define umul_widen_optab (optab_table[OTI_umul_widen]) #define sdiv_optab (optab_table[OTI_sdiv]) #define smulv_optab (optab_table[OTI_smulv]) #define sdivv_optab (optab_table[OTI_sdivv]) #define sdivmod_optab (optab_table[OTI_sdivmod]) #define udiv_optab (optab_table[OTI_udiv]) #define udivmod_optab (optab_table[OTI_udivmod]) #define smod_optab (optab_table[OTI_smod]) #define umod_optab (optab_table[OTI_umod]) #define fmod_optab (optab_table[OTI_fmod]) #define drem_optab (optab_table[OTI_drem]) #define ftrunc_optab (optab_table[OTI_ftrunc]) #define and_optab (optab_table[OTI_and]) #define ior_optab (optab_table[OTI_ior]) #define xor_optab (optab_table[OTI_xor]) #define ashl_optab (optab_table[OTI_ashl]) #define lshr_optab (optab_table[OTI_lshr]) #define ashr_optab (optab_table[OTI_ashr]) #define rotl_optab (optab_table[OTI_rotl]) #define rotr_optab (optab_table[OTI_rotr]) #define smin_optab (optab_table[OTI_smin]) #define smax_optab (optab_table[OTI_smax]) #define umin_optab (optab_table[OTI_umin]) #define umax_optab (optab_table[OTI_umax]) #define pow_optab (optab_table[OTI_pow]) #define atan2_optab (optab_table[OTI_atan2]) #define mov_optab (optab_table[OTI_mov]) #define movstrict_optab (optab_table[OTI_movstrict]) #define neg_optab (optab_table[OTI_neg]) #define negv_optab (optab_table[OTI_negv]) #define abs_optab (optab_table[OTI_abs]) #define absv_optab (optab_table[OTI_absv]) #define one_cmpl_optab (optab_table[OTI_one_cmpl]) #define ffs_optab (optab_table[OTI_ffs]) #define clz_optab (optab_table[OTI_clz]) #define ctz_optab (optab_table[OTI_ctz]) #define popcount_optab (optab_table[OTI_popcount]) #define parity_optab (optab_table[OTI_parity]) #define sqrt_optab (optab_table[OTI_sqrt]) #define sincos_optab (optab_table[OTI_sincos]) #define sin_optab (optab_table[OTI_sin]) #define asin_optab (optab_table[OTI_asin]) #define cos_optab (optab_table[OTI_cos]) #define acos_optab (optab_table[OTI_acos]) #define exp_optab (optab_table[OTI_exp]) #define exp10_optab (optab_table[OTI_exp10]) #define exp2_optab (optab_table[OTI_exp2]) #define expm1_optab (optab_table[OTI_expm1]) #define logb_optab (optab_table[OTI_logb]) #define ilogb_optab (optab_table[OTI_ilogb]) #define log_optab (optab_table[OTI_log]) #define log10_optab (optab_table[OTI_log10]) #define log2_optab (optab_table[OTI_log2]) #define log1p_optab (optab_table[OTI_log1p]) #define floor_optab (optab_table[OTI_floor]) #define ceil_optab (optab_table[OTI_ceil]) #define btrunc_optab (optab_table[OTI_trunc]) #define round_optab (optab_table[OTI_round]) #define nearbyint_optab (optab_table[OTI_nearbyint]) #define tan_optab (optab_table[OTI_tan]) #define atan_optab (optab_table[OTI_atan]) #define cmp_optab (optab_table[OTI_cmp]) #define ucmp_optab (optab_table[OTI_ucmp]) #define tst_optab (optab_table[OTI_tst]) #define eq_optab (optab_table[OTI_eq]) #define ne_optab (optab_table[OTI_ne]) #define gt_optab (optab_table[OTI_gt]) #define ge_optab (optab_table[OTI_ge]) #define lt_optab (optab_table[OTI_lt]) #define le_optab (optab_table[OTI_le]) #define unord_optab (optab_table[OTI_unord]) #define strlen_optab (optab_table[OTI_strlen]) #define cbranch_optab (optab_table[OTI_cbranch]) #define cmov_optab (optab_table[OTI_cmov]) #define cstore_optab (optab_table[OTI_cstore]) #define push_optab (optab_table[OTI_push]) #define addcc_optab (optab_table[OTI_addcc]) #define vec_set_optab (optab_table[OTI_vec_set]) #define vec_extract_optab (optab_table[OTI_vec_extract]) #define vec_init_optab (optab_table[OTI_vec_init]) /* Conversion optabs have their own table and indexes. */ enum convert_optab_index { CTI_sext, CTI_zext, CTI_trunc, CTI_sfix, CTI_ufix, CTI_sfixtrunc, CTI_ufixtrunc, CTI_sfloat, CTI_ufloat, CONVERT_OPTAB_MAX }; extern GTY(()) convert_optab convert_optab_table[CONVERT_OPTAB_MAX]; #define sext_optab (convert_optab_table[CTI_sext]) #define zext_optab (convert_optab_table[CTI_zext]) #define trunc_optab (convert_optab_table[CTI_trunc]) #define sfix_optab (convert_optab_table[CTI_sfix]) #define ufix_optab (convert_optab_table[CTI_ufix]) #define sfixtrunc_optab (convert_optab_table[CTI_sfixtrunc]) #define ufixtrunc_optab (convert_optab_table[CTI_ufixtrunc]) #define sfloat_optab (convert_optab_table[CTI_sfloat]) #define ufloat_optab (convert_optab_table[CTI_ufloat]) /* These arrays record the insn_code of insns that may be needed to perform input and output reloads of special objects. They provide a place to pass a scratch register. */ extern enum insn_code reload_in_optab[NUM_MACHINE_MODES]; extern enum insn_code reload_out_optab[NUM_MACHINE_MODES]; /* Contains the optab used for each rtx code. */ extern GTY(()) optab code_to_optab[NUM_RTX_CODE + 1]; typedef rtx (*rtxfun) (rtx); /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...) gives the gen_function to make a branch to test that condition. */ extern rtxfun bcc_gen_fctn[NUM_RTX_CODE]; /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...) gives the insn code to make a store-condition insn to test that condition. */ extern enum insn_code setcc_gen_code[NUM_RTX_CODE]; #ifdef HAVE_conditional_move /* Indexed by the machine mode, gives the insn code to make a conditional move insn. */ extern enum insn_code movcc_gen_code[NUM_MACHINE_MODES]; #endif /* This array records the insn_code of insns to perform block moves. */ extern enum insn_code movstr_optab[NUM_MACHINE_MODES]; /* This array records the insn_code of insns to perform block clears. */ extern enum insn_code clrstr_optab[NUM_MACHINE_MODES]; /* These arrays record the insn_code of two different kinds of insns to perform block compares. */ extern enum insn_code cmpstr_optab[NUM_MACHINE_MODES]; extern enum insn_code cmpmem_optab[NUM_MACHINE_MODES]; /* Define functions given in optabs.c. */ /* Expand a binary operation given optab and rtx operands. */ extern rtx expand_binop (enum machine_mode, optab, rtx, rtx, rtx, int, enum optab_methods); /* Expand a binary operation with both signed and unsigned forms. */ extern rtx sign_expand_binop (enum machine_mode, optab, optab, rtx, rtx, rtx, int, enum optab_methods); /* Generate code to perform an operation on one operand with two results. */ extern int expand_twoval_unop (optab, rtx, rtx, rtx, int); /* Generate code to perform an operation on two operands with two results. */ extern int expand_twoval_binop (optab, rtx, rtx, rtx, rtx, int); /* Expand a unary arithmetic operation given optab rtx operand. */ extern rtx expand_unop (enum machine_mode, optab, rtx, rtx, int); /* Expand the absolute value operation. */ extern rtx expand_abs_nojump (enum machine_mode, rtx, rtx, int); extern rtx expand_abs (enum machine_mode, rtx, rtx, int, int); /* Expand the complex absolute value operation. */ extern rtx expand_complex_abs (enum machine_mode, rtx, rtx, int); /* Generate an instruction with a given INSN_CODE with an output and an input. */ extern void emit_unop_insn (int, rtx, rtx, enum rtx_code); /* Emit code to perform a series of operations on a multi-word quantity, one word at a time. */ extern rtx emit_no_conflict_block (rtx, rtx, rtx, rtx, rtx); /* Emit one rtl instruction to store zero in specified rtx. */ extern void emit_clr_insn (rtx); /* Emit one rtl insn to store 1 in specified rtx assuming it contains 0. */ extern void emit_0_to_1_insn (rtx); /* Emit one rtl insn to compare two rtx's. */ extern void emit_cmp_insn (rtx, rtx, enum rtx_code, rtx, enum machine_mode, int); /* The various uses that a comparison can have; used by can_compare_p: jumps, conditional moves, store flag operations. */ enum can_compare_purpose { ccp_jump, ccp_cmov, ccp_store_flag }; /* Nonzero if a compare of mode MODE can be done straightforwardly (without splitting it into pieces). */ extern int can_compare_p (enum rtx_code, enum machine_mode, enum can_compare_purpose); extern rtx prepare_operand (int, rtx, int, enum machine_mode, enum machine_mode, int); /* Return the INSN_CODE to use for an extend operation. */ extern enum insn_code can_extend_p (enum machine_mode, enum machine_mode, int); /* Generate the body of an insn to extend Y (with mode MFROM) into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */ extern rtx gen_extend_insn (rtx, rtx, enum machine_mode, enum machine_mode, int); /* Initialize the tables that control conversion between fixed and floating values. */ extern void init_fixtab (void); extern void init_floattab (void); /* Call this to reset the function entry for one optab. */ extern void set_optab_libfunc (optab, enum machine_mode, const char *); extern void set_conv_libfunc (convert_optab, enum machine_mode, enum machine_mode, const char *); /* Generate code for a FLOAT_EXPR. */ extern void expand_float (rtx, rtx, int); /* Generate code for a FIX_EXPR. */ extern void expand_fix (rtx, rtx, int); #endif /* GCC_OPTABS_H */ /* Definitions for computing resource usage of specific insns. Copyright (C) 1999, 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_RESOURCE_H #define GCC_RESOURCE_H /* Macro to clear all resources. */ #define CLEAR_RESOURCE(RES) \ do { (RES)->memory = (RES)->unch_memory = (RES)->volatil = (RES)->cc = 0; \ CLEAR_HARD_REG_SET ((RES)->regs); } while (0) /* The resources used by a given insn. */ struct resources { char memory; /* Insn sets or needs a memory location. */ char unch_memory; /* Insn sets of needs a "unchanging" MEM. */ char volatil; /* Insn sets or needs a volatile memory loc. */ char cc; /* Insn sets or needs the condition codes. */ HARD_REG_SET regs; /* Which registers are set or needed. */ }; /* The kinds of rtl mark_*_resources will consider */ enum mark_resource_type { MARK_SRC_DEST = 0, MARK_SRC_DEST_CALL = 1 }; extern void mark_target_live_regs (rtx, rtx, struct resources *); extern void mark_set_resources (rtx, struct resources *, int, enum mark_resource_type); extern void mark_referenced_resources (rtx, struct resources *, int); extern void clear_hashed_info_for_insn (rtx); extern void incr_ticks_for_insn (rtx); extern void mark_end_of_function_resources (rtx, int); extern void init_resource_info (rtx); extern void free_resource_info (void); #endif /* GCC_RESOURCE_H */ /* Communication between reload.c and reload1.c. Copyright (C) 1987, 1991, 1992, 1993, 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_RELOAD_H #define GCC_RELOAD_H /* If secondary reloads are the same for inputs and outputs, define those macros here. */ #ifdef SECONDARY_RELOAD_CLASS #define SECONDARY_INPUT_RELOAD_CLASS(CLASS, MODE, X) \ SECONDARY_RELOAD_CLASS (CLASS, MODE, X) #define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS, MODE, X) \ SECONDARY_RELOAD_CLASS (CLASS, MODE, X) #endif /* If either macro is defined, show that we need secondary reloads. */ #if defined(SECONDARY_INPUT_RELOAD_CLASS) || defined(SECONDARY_OUTPUT_RELOAD_CLASS) #define HAVE_SECONDARY_RELOADS #endif /* If MEMORY_MOVE_COST isn't defined, give it a default here. */ #ifndef MEMORY_MOVE_COST #ifdef HAVE_SECONDARY_RELOADS #define MEMORY_MOVE_COST(MODE,CLASS,IN) \ (4 + memory_move_secondary_cost ((MODE), (CLASS), (IN))) #else #define MEMORY_MOVE_COST(MODE,CLASS,IN) 4 #endif #endif extern int memory_move_secondary_cost (enum machine_mode, enum reg_class, int); /* Maximum number of reloads we can need. */ #define MAX_RELOADS (2 * MAX_RECOG_OPERANDS * (MAX_REGS_PER_ADDRESS + 1)) /* Encode the usage of a reload. The following codes are supported: RELOAD_FOR_INPUT reload of an input operand RELOAD_FOR_OUTPUT likewise, for output RELOAD_FOR_INSN a reload that must not conflict with anything used in the insn, but may conflict with something used before or after the insn RELOAD_FOR_INPUT_ADDRESS reload for parts of the address of an object that is an input reload RELOAD_FOR_INPADDR_ADDRESS reload needed for RELOAD_FOR_INPUT_ADDRESS RELOAD_FOR_OUTPUT_ADDRESS like RELOAD_FOR INPUT_ADDRESS, for output RELOAD_FOR_OUTADDR_ADDRESS reload needed for RELOAD_FOR_OUTPUT_ADDRESS RELOAD_FOR_OPERAND_ADDRESS reload for the address of a non-reloaded operand; these don't conflict with any other addresses. RELOAD_FOR_OPADDR_ADDR reload needed for RELOAD_FOR_OPERAND_ADDRESS reloads; usually secondary reloads RELOAD_OTHER none of the above, usually multiple uses RELOAD_FOR_OTHER_ADDRESS reload for part of the address of an input that is marked RELOAD_OTHER. This used to be "enum reload_when_needed" but some debuggers have trouble with an enum tag and variable of the same name. */ enum reload_type { RELOAD_FOR_INPUT, RELOAD_FOR_OUTPUT, RELOAD_FOR_INSN, RELOAD_FOR_INPUT_ADDRESS, RELOAD_FOR_INPADDR_ADDRESS, RELOAD_FOR_OUTPUT_ADDRESS, RELOAD_FOR_OUTADDR_ADDRESS, RELOAD_FOR_OPERAND_ADDRESS, RELOAD_FOR_OPADDR_ADDR, RELOAD_OTHER, RELOAD_FOR_OTHER_ADDRESS }; #ifdef ONE_COMPILATION_UNIT #endif #ifdef GCC_INSN_CODES_H /* Each reload is recorded with a structure like this. */ struct reload { /* The value to reload from */ rtx in; /* Where to store reload-reg afterward if nec (often the same as reload_in) */ rtx out; /* The class of registers to reload into. */ enum reg_class class; /* The mode this operand should have when reloaded, on input. */ enum machine_mode inmode; /* The mode this operand should have when reloaded, on output. */ enum machine_mode outmode; /* The mode of the reload register. */ enum machine_mode mode; /* the largest number of registers this reload will require. */ unsigned int nregs; /* Positive amount to increment or decrement by if reload_in is a PRE_DEC, PRE_INC, POST_DEC, POST_INC. Ignored otherwise (don't assume it is zero). */ int inc; /* A reg for which reload_in is the equivalent. If reload_in is a symbol_ref which came from reg_equiv_constant, then this is the pseudo which has that symbol_ref as equivalent. */ rtx in_reg; rtx out_reg; /* Used in find_reload_regs to record the allocated register. */ int regno; /* This is the register to reload into. If it is zero when `find_reloads' returns, you must find a suitable register in the class specified by reload_reg_class, and store here an rtx for that register with mode from reload_inmode or reload_outmode. */ rtx reg_rtx; /* The operand number being reloaded. This is used to group related reloads and need not always be equal to the actual operand number in the insn, though it current will be; for in-out operands, it is one of the two operand numbers. */ int opnum; /* Gives the reload number of a secondary input reload, when needed; otherwise -1. */ int secondary_in_reload; /* Gives the reload number of a secondary output reload, when needed; otherwise -1. */ int secondary_out_reload; /* If a secondary input reload is required, gives the INSN_CODE that uses the secondary reload as a scratch register, or CODE_FOR_nothing if the secondary reload register is to be an intermediate register. */ enum insn_code secondary_in_icode; /* Likewise, for a secondary output reload. */ enum insn_code secondary_out_icode; /* Classifies reload as needed either for addressing an input reload, addressing an output, for addressing a non-reloaded mem ref, or for unspecified purposes (i.e., more than one of the above). */ enum reload_type when_needed; /* Nonzero for an optional reload. Optional reloads are ignored unless the value is already sitting in a register. */ unsigned int optional:1; /* nonzero if this reload shouldn't be combined with another reload. */ unsigned int nocombine:1; /* Nonzero if this is a secondary register for one or more reloads. */ unsigned int secondary_p:1; /* Nonzero if this reload must use a register not already allocated to a group. */ unsigned int nongroup:1; }; extern struct reload rld[MAX_RELOADS]; extern int n_reloads; #endif extern GTY (()) struct varray_head_tag *reg_equiv_memory_loc_varray; extern rtx *reg_equiv_constant; extern rtx *reg_equiv_memory_loc; extern rtx *reg_equiv_address; extern rtx *reg_equiv_mem; /* All the "earlyclobber" operands of the current insn are recorded here. */ extern int n_earlyclobbers; extern rtx reload_earlyclobbers[MAX_RECOG_OPERANDS]; /* Save the number of operands. */ extern int reload_n_operands; /* First uid used by insns created by reload in this function. Used in find_equiv_reg. */ extern int reload_first_uid; /* Nonzero if indirect addressing is supported when the innermost MEM is of the form (MEM (SYMBOL_REF sym)). It is assumed that the level to which these are valid is the same as spill_indirect_levels, above. */ extern char indirect_symref_ok; /* Nonzero if an address (plus (reg frame_pointer) (reg ...)) is valid. */ extern char double_reg_address_ok; extern int num_not_at_initial_offset; struct needs { /* [0] is normal, [1] is nongroup. */ short regs[2][N_REG_CLASSES]; short groups[N_REG_CLASSES]; }; #if defined SET_HARD_REG_BIT && defined CLEAR_REG_SET /* This structure describes instructions which are relevant for reload. Apart from all regular insns, this also includes CODE_LABELs, since they must be examined for register elimination. */ struct insn_chain { /* Links to the neighbor instructions. */ struct insn_chain *next, *prev; /* Link through a chains set up by calculate_needs_all_insns, containing all insns that need reloading. */ struct insn_chain *next_need_reload; /* The basic block this insn is in. */ int block; /* The rtx of the insn. */ rtx insn; /* Register life information: record all live hard registers, and all live pseudos that have a hard register. */ regset_head live_throughout; regset_head dead_or_set; /* Copies of the global variables computed by find_reloads. */ struct reload *rld; int n_reloads; /* Indicates which registers have already been used for spills. */ HARD_REG_SET used_spill_regs; /* Describe the needs for reload registers of this insn. */ struct needs need; /* Nonzero if find_reloads said the insn requires reloading. */ unsigned int need_reload:1; /* Nonzero if find_reloads needs to be run during reload_as_needed to perform modifications on any operands. */ unsigned int need_operand_change:1; /* Nonzero if eliminate_regs_in_insn said it requires eliminations. */ unsigned int need_elim:1; /* Nonzero if this insn was inserted by perform_caller_saves. */ unsigned int is_caller_save_insn:1; }; /* A chain of insn_chain structures to describe all non-note insns in a function. */ extern struct insn_chain *reload_insn_chain; /* Allocate a new insn_chain structure. */ extern struct insn_chain *new_insn_chain (void); extern void compute_use_by_pseudos (HARD_REG_SET *, regset); #endif /* Functions from reload.c: */ /* Return a memory location that will be used to copy X in mode MODE. If we haven't already made a location for this mode in this insn, call find_reloads_address on the location being returned. */ extern rtx get_secondary_mem (rtx, enum machine_mode, int, enum reload_type); /* Clear any secondary memory locations we've made. */ extern void clear_secondary_mem (void); /* Transfer all replacements that used to be in reload FROM to be in reload TO. */ extern void transfer_replacements (int, int); /* IN_RTX is the value loaded by a reload that we now decided to inherit, or a subpart of it. If we have any replacements registered for IN_RTX, cancel the reloads that were supposed to load them. Return nonzero if we canceled any reloads. */ extern int remove_address_replacements (rtx in_rtx); /* Like rtx_equal_p except that it allows a REG and a SUBREG to match if they are the same hard reg, and has special hacks for autoincrement and autodecrement. */ extern int operands_match_p (rtx, rtx); /* Return 1 if altering OP will not modify the value of CLOBBER. */ extern int safe_from_earlyclobber (rtx, rtx); /* Search the body of INSN for values that need reloading and record them with push_reload. REPLACE nonzero means record also where the values occur so that subst_reloads can be used. */ extern int find_reloads (rtx, int, int, int, short *); /* Compute the sum of X and Y, making canonicalizations assumed in an address, namely: sum constant integers, surround the sum of two constants with a CONST, put the constant as the second operand, and group the constant on the outermost sum. */ extern rtx form_sum (rtx, rtx); /* Substitute into the current INSN the registers into which we have reloaded the things that need reloading. */ extern void subst_reloads (rtx); /* Make a copy of any replacements being done into X and move those copies to locations in Y, a copy of X. We only look at the highest level of the RTL. */ extern void copy_replacements (rtx, rtx); /* Change any replacements being done to *X to be done to *Y */ extern void move_replacements (rtx *x, rtx *y); /* If LOC was scheduled to be replaced by something, return the replacement. Otherwise, return *LOC. */ extern rtx find_replacement (rtx *); /* Return nonzero if register in range [REGNO, ENDREGNO) appears either explicitly or implicitly in X other than being stored into. */ extern int refers_to_regno_for_reload_p (unsigned int, unsigned int, rtx, rtx *); /* Nonzero if modifying X will affect IN. */ extern int reg_overlap_mentioned_for_reload_p (rtx, rtx); /* Return nonzero if anything in X contains a MEM. Look also for pseudo registers. */ extern int refers_to_mem_for_reload_p (rtx); /* Check the insns before INSN to see if there is a suitable register containing the same value as GOAL. */ extern rtx find_equiv_reg (rtx, rtx, enum reg_class, int, short *, int, enum machine_mode); /* Return 1 if register REGNO is the subject of a clobber in insn INSN. */ extern int regno_clobbered_p (unsigned int, rtx, enum machine_mode, int); /* Return 1 if X is an operand of an insn that is being earlyclobbered. */ extern int earlyclobber_operand_p (rtx); /* Record one reload that needs to be performed. */ extern int push_reload (rtx, rtx, rtx *, rtx *, enum reg_class, enum machine_mode, enum machine_mode, int, int, int, enum reload_type); /* Functions in postreload.c: */ extern void reload_cse_regs (rtx); /* Functions in reload1.c: */ extern int reloads_conflict (int, int); /* Initialize the reload pass once per compilation. */ extern void init_reload (void); /* The reload pass itself. */ extern int reload (rtx, int); /* Mark the slots in regs_ever_live for the hard regs used by pseudo-reg number REGNO. */ extern void mark_home_live (int); /* Scan X and replace any eliminable registers (such as fp) with a replacement (such as sp), plus an offset. */ extern rtx eliminate_regs (rtx, enum machine_mode, rtx); /* Emit code to perform a reload from IN (which may be a reload register) to OUT (which may also be a reload register). IN or OUT is from operand OPNUM with reload type TYPE. */ extern rtx gen_reload (rtx, rtx, int, enum reload_type); /* Deallocate the reload register used by reload number R. */ extern void deallocate_reload_reg (int r); /* Functions in caller-save.c: */ /* Initialize for caller-save. */ extern void init_caller_save (void); /* Initialize save areas by showing that we haven't allocated any yet. */ extern void init_save_areas (void); /* Allocate save areas for any hard registers that might need saving. */ extern void setup_save_areas (void); /* Find the places where hard regs are live across calls and save them. */ extern void save_call_clobbered_regs (void); /* Replace (subreg (reg)) with the appropriate (reg) for any operands. */ extern void cleanup_subreg_operands (rtx); /* Debugging support. */ extern void debug_reload_to_stream (FILE *); extern void debug_reload (void); /* Compute the actual register we should reload to, in case we're reloading to/from a register that is wider than a word. */ extern rtx reload_adjust_reg_for_mode (rtx, enum machine_mode); #endif /* GCC_RELOAD_H */ #define FAIL return (end_sequence (), _val) #define DONE return (_val = get_insns (), end_sequence (), _val) /* ../../gcc/gcc/config/i386/i386.md:492 */ rtx gen_cmpdi_ccno_1_rex64 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, gen_rtx_REG (VOIDmode, 17), gen_rtx_COMPARE (VOIDmode, operand0, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:521 */ rtx gen_cmpdi_1_insn_rex64 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, gen_rtx_REG (VOIDmode, 17), gen_rtx_COMPARE (VOIDmode, operand0, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:689 */ rtx gen_cmpqi_ext_3_insn (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, gen_rtx_REG (VOIDmode, 17), gen_rtx_COMPARE (VOIDmode, gen_rtx_SUBREG (QImode, gen_rtx_ZERO_EXTRACT (SImode, operand0, const_int_rtx[MAX_SAVED_CONST_INT + (8)], const_int_rtx[MAX_SAVED_CONST_INT + (8)]), 0), operand1)); } /* ../../gcc/gcc/config/i386/i386.md:703 */ rtx gen_cmpqi_ext_3_insn_rex64 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, gen_rtx_REG (VOIDmode, 17), gen_rtx_COMPARE (VOIDmode, gen_rtx_SUBREG (QImode, gen_rtx_ZERO_EXTRACT (SImode, operand0, const_int_rtx[MAX_SAVED_CONST_INT + (8)], const_int_rtx[MAX_SAVED_CONST_INT + (8)]), 0), operand1)); } /* ../../gcc/gcc/config/i386/i386.md:960 */ rtx gen_x86_fnstsw_1 (rtx operand0 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (HImode, gen_rtvec (1, gen_rtx_REG (CCFPmode, 18)), 24)); } /* ../../gcc/gcc/config/i386/i386.md:972 */ rtx gen_x86_sahf_1 (rtx operand0 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, 17), gen_rtx_UNSPEC (CCmode, gen_rtvec (1, operand0), 25)); } /* ../../gcc/gcc/config/i386/i386.md:1135 */ rtx gen_popsi1 (rtx operand0 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MEM (SImode, gen_rtx_REG (SImode, 7))), gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 7), gen_rtx_PLUS (SImode, gen_rtx_REG (SImode, 7), const_int_rtx[MAX_SAVED_CONST_INT + (4)])))); } /* ../../gcc/gcc/config/i386/i386.md:1779 */ rtx gen_movsi_insv_1 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, gen_rtx_ZERO_EXTRACT (SImode, operand0, const_int_rtx[MAX_SAVED_CONST_INT + (8)], const_int_rtx[MAX_SAVED_CONST_INT + (8)]), operand1); } /* ../../gcc/gcc/config/i386/i386.md:1789 */ rtx gen_movdi_insv_1_rex64 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, gen_rtx_ZERO_EXTRACT (DImode, operand0, const_int_rtx[MAX_SAVED_CONST_INT + (8)], const_int_rtx[MAX_SAVED_CONST_INT + (8)]), operand1); } /* ../../gcc/gcc/config/i386/i386.md:1822 */ rtx gen_pushdi2_rex64 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, operand1); } /* ../../gcc/gcc/config/i386/i386.md:1895 */ rtx gen_popdi1 (rtx operand0 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MEM (DImode, gen_rtx_REG (DImode, 7))), gen_rtx_SET (VOIDmode, gen_rtx_REG (DImode, 7), gen_rtx_PLUS (DImode, gen_rtx_REG (DImode, 7), const_int_rtx[MAX_SAVED_CONST_INT + (8)])))); } /* ../../gcc/gcc/config/i386/i386.md:2869 */ rtx gen_swapxf (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, operand1), gen_rtx_SET (VOIDmode, operand1, operand0))); } /* ../../gcc/gcc/config/i386/i386.md:2899 */ rtx gen_zero_extendhisi2_and (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_ZERO_EXTEND (SImode, operand1)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:3078 */ rtx gen_zero_extendsidi2_32 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_ZERO_EXTEND (DImode, operand1)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:3106 */ rtx gen_zero_extendsidi2_rex64 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_ZERO_EXTEND (DImode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:3156 */ rtx gen_zero_extendhidi2 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_ZERO_EXTEND (DImode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:3166 */ rtx gen_zero_extendqidi2 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_ZERO_EXTEND (DImode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:3200 */ rtx gen_extendsidi2_rex64 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SIGN_EXTEND (DImode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:3212 */ rtx gen_extendhidi2 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SIGN_EXTEND (DImode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:3220 */ rtx gen_extendqidi2 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SIGN_EXTEND (DImode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:3302 */ rtx gen_extendhisi2 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SIGN_EXTEND (SImode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:3355 */ rtx gen_extendqihi2 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SIGN_EXTEND (HImode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:3381 */ rtx gen_extendqisi2 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SIGN_EXTEND (SImode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:3619 */ rtx gen_truncdfsf2_noop (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_TRUNCATE (SFmode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:3758 */ rtx gen_truncdfsf2_sse_only (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_TRUNCATE (SFmode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:3866 */ rtx gen_truncxfsf2_noop (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_TRUNCATE (SFmode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:3949 */ rtx gen_truncxfdf2_noop (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_TRUNCATE (DFmode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:4089 */ rtx gen_fix_truncdi_nomemory (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED, rtx operand4 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (5, gen_rtx_SET (VOIDmode, operand0, gen_rtx_FIX (DImode, operand1)), gen_rtx_USE (VOIDmode, operand2), gen_rtx_USE (VOIDmode, operand3), gen_rtx_CLOBBER (VOIDmode, operand4), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)))); } /* ../../gcc/gcc/config/i386/i386.md:4102 */ rtx gen_fix_truncdi_memory (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, gen_rtx_SET (VOIDmode, operand0, gen_rtx_FIX (DImode, operand1)), gen_rtx_USE (VOIDmode, operand2), gen_rtx_USE (VOIDmode, operand3), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)))); } /* ../../gcc/gcc/config/i386/i386.md:4144 */ rtx gen_fix_truncsfdi_sse (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_FIX (DImode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:4163 */ rtx gen_fix_truncdfdi_sse (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_FIX (DImode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:4254 */ rtx gen_fix_truncsi_nomemory (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED, rtx operand4 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, gen_rtx_SET (VOIDmode, operand0, gen_rtx_FIX (SImode, operand1)), gen_rtx_USE (VOIDmode, operand2), gen_rtx_USE (VOIDmode, operand3), gen_rtx_CLOBBER (VOIDmode, operand4))); } /* ../../gcc/gcc/config/i386/i386.md:4266 */ rtx gen_fix_truncsi_memory (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_FIX (SImode, operand1)), gen_rtx_USE (VOIDmode, operand2), gen_rtx_USE (VOIDmode, operand3))); } /* ../../gcc/gcc/config/i386/i386.md:4278 */ rtx gen_fix_truncsfsi_sse (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_FIX (SImode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:4297 */ rtx gen_fix_truncdfsi_sse (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_FIX (SImode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:4395 */ rtx gen_fix_trunchi_nomemory (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED, rtx operand4 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, gen_rtx_SET (VOIDmode, operand0, gen_rtx_FIX (HImode, operand1)), gen_rtx_USE (VOIDmode, operand2), gen_rtx_USE (VOIDmode, operand3), gen_rtx_CLOBBER (VOIDmode, operand4))); } /* ../../gcc/gcc/config/i386/i386.md:4407 */ rtx gen_fix_trunchi_memory (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_FIX (HImode, operand1)), gen_rtx_USE (VOIDmode, operand2), gen_rtx_USE (VOIDmode, operand3))); } /* ../../gcc/gcc/config/i386/i386.md:4445 */ rtx gen_x86_fnstcw_1 (rtx operand0 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (HImode, gen_rtvec (1, gen_rtx_REG (HImode, 18)), 26)); } /* ../../gcc/gcc/config/i386/i386.md:4454 */ rtx gen_x86_fldcw_1 (rtx operand0 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, gen_rtx_REG (HImode, 18), gen_rtx_UNSPEC (HImode, gen_rtvec (1, operand0), 28)); } /* ../../gcc/gcc/config/i386/i386.md:4691 */ rtx gen_floathixf2 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT (XFmode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:4702 */ rtx gen_floatsixf2 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT (XFmode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:4713 */ rtx gen_floatdixf2 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT (XFmode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:4962 */ rtx gen_adddi3_carry_rex64 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (DImode, gen_rtx_PLUS (DImode, operand3, operand1), operand2)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:4986 */ rtx gen_addqi3_carry (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (QImode, gen_rtx_PLUS (QImode, operand3, operand1), operand2)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:4998 */ rtx gen_addhi3_carry (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (HImode, gen_rtx_PLUS (HImode, operand3, operand1), operand2)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:5010 */ rtx gen_addsi3_carry (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (SImode, gen_rtx_PLUS (SImode, operand3, operand1), operand2)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:5047 */ rtx gen_addqi3_cc (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, 17), gen_rtx_UNSPEC (CCmode, gen_rtvec (2, operand1, operand2), 27)), gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (QImode, operand1, operand2)))); } /* ../../gcc/gcc/config/i386/i386.md:5612 */ rtx gen_addsi_1_zext (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_ZERO_EXTEND (DImode, gen_rtx_PLUS (SImode, operand1, operand2))), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:6483 */ rtx gen_addqi_ext_1 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_ZERO_EXTRACT (SImode, operand0, const_int_rtx[MAX_SAVED_CONST_INT + (8)], const_int_rtx[MAX_SAVED_CONST_INT + (8)]), gen_rtx_PLUS (SImode, gen_rtx_ZERO_EXTRACT (SImode, operand1, const_int_rtx[MAX_SAVED_CONST_INT + (8)], const_int_rtx[MAX_SAVED_CONST_INT + (8)]), operand2)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:6630 */ rtx gen_subdi3_carry_rex64 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MINUS (DImode, operand1, gen_rtx_PLUS (DImode, operand3, operand2))), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:6678 */ rtx gen_subqi3_carry (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MINUS (QImode, operand1, gen_rtx_PLUS (QImode, operand3, operand2))), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:6690 */ rtx gen_subhi3_carry (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MINUS (HImode, operand1, gen_rtx_PLUS (HImode, operand3, operand2))), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:6702 */ rtx gen_subsi3_carry (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MINUS (SImode, operand1, gen_rtx_PLUS (SImode, operand3, operand2))), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:6714 */ rtx gen_subsi3_carry_zext (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_ZERO_EXTEND (DImode, gen_rtx_MINUS (SImode, operand1, gen_rtx_PLUS (SImode, operand3, operand2)))), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:7439 */ rtx gen_divqi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_DIV (QImode, operand1, operand2)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:7449 */ rtx gen_udivqi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UDIV (QImode, operand1, operand2)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:7651 */ rtx gen_divmodhi4 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_DIV (HImode, operand1, operand2)), gen_rtx_SET (VOIDmode, operand3, gen_rtx_MOD (HImode, operand1, operand2)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:7664 */ rtx gen_udivmoddi4 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UDIV (DImode, operand1, operand2)), gen_rtx_SET (VOIDmode, operand3, gen_rtx_UMOD (DImode, operand1, operand2)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:7707 */ rtx gen_udivmodsi4 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UDIV (SImode, operand1, operand2)), gen_rtx_SET (VOIDmode, operand3, gen_rtx_UMOD (SImode, operand1, operand2)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:7818 */ rtx gen_testsi_1 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, gen_rtx_REG (VOIDmode, 17), gen_rtx_COMPARE (VOIDmode, gen_rtx_AND (SImode, operand0, operand1), const0_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:8413 */ rtx gen_andqi_ext_0 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_ZERO_EXTRACT (SImode, operand0, const_int_rtx[MAX_SAVED_CONST_INT + (8)], const_int_rtx[MAX_SAVED_CONST_INT + (8)]), gen_rtx_AND (SImode, gen_rtx_ZERO_EXTRACT (SImode, operand1, const_int_rtx[MAX_SAVED_CONST_INT + (8)], const_int_rtx[MAX_SAVED_CONST_INT + (8)]), operand2)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:8819 */ rtx gen_iorqi_ext_0 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_ZERO_EXTRACT (SImode, operand0, const_int_rtx[MAX_SAVED_CONST_INT + (8)], const_int_rtx[MAX_SAVED_CONST_INT + (8)]), gen_rtx_IOR (SImode, gen_rtx_ZERO_EXTRACT (SImode, operand1, const_int_rtx[MAX_SAVED_CONST_INT + (8)], const_int_rtx[MAX_SAVED_CONST_INT + (8)]), operand2)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:9157 */ rtx gen_xorqi_ext_0 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_ZERO_EXTRACT (SImode, operand0, const_int_rtx[MAX_SAVED_CONST_INT + (8)], const_int_rtx[MAX_SAVED_CONST_INT + (8)]), gen_rtx_XOR (SImode, gen_rtx_ZERO_EXTRACT (SImode, operand1, const_int_rtx[MAX_SAVED_CONST_INT + (8)], const_int_rtx[MAX_SAVED_CONST_INT + (8)]), operand2)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:9587 */ rtx gen_negsf2_memory (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_NEG (SFmode, operand1)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:9594 */ rtx gen_negsf2_ifs (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_NEG (SFmode, operand1)), gen_rtx_USE (VOIDmode, operand2), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:9730 */ rtx gen_negdf2_memory (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_NEG (DFmode, operand1)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:9737 */ rtx gen_negdf2_ifs (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_NEG (DFmode, operand1)), gen_rtx_USE (VOIDmode, operand2), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:9988 */ rtx gen_abssf2_memory (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_ABS (SFmode, operand1)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:9995 */ rtx gen_abssf2_ifs (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_ABS (SFmode, operand1)), gen_rtx_USE (VOIDmode, operand2), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:10131 */ rtx gen_absdf2_memory (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_ABS (DFmode, operand1)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:10138 */ rtx gen_absdf2_ifs (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_ABS (DFmode, operand1)), gen_rtx_USE (VOIDmode, operand2), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:10677 */ rtx gen_ashldi3_1 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFT (DImode, operand1, operand2)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SImode)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:10715 */ rtx gen_x86_shld_1 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_IOR (SImode, gen_rtx_ASHIFT (SImode, operand0, operand2), gen_rtx_LSHIFTRT (SImode, operand1, gen_rtx_MINUS (QImode, const_int_rtx[MAX_SAVED_CONST_INT + (32)], operand2)))), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:11300 */ rtx gen_ashrdi3_63_rex64 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFTRT (DImode, operand1, operand2)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:11381 */ rtx gen_ashrdi3_1 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFTRT (DImode, operand1, operand2)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SImode)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:11419 */ rtx gen_x86_shrd_1 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_IOR (SImode, gen_rtx_ASHIFTRT (SImode, operand0, operand2), gen_rtx_ASHIFT (SImode, operand1, gen_rtx_MINUS (QImode, const_int_rtx[MAX_SAVED_CONST_INT + (32)], operand2)))), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:11463 */ rtx gen_ashrsi3_31 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFTRT (SImode, operand1, operand2)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:11876 */ rtx gen_lshrdi3_1 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_LSHIFTRT (DImode, operand1, operand2)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SImode)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:12779 */ rtx gen_setcc_2 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, operand0), gen_rtx_fmt_ee (GET_CODE (operand1), QImode, gen_rtx_REG (VOIDmode, 17), const0_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:13325 */ rtx gen_jump (rtx operand0 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, pc_rtx, gen_rtx_LABEL_REF (VOIDmode, operand0)); } /* ../../gcc/gcc/config/i386/i386.md:13435 */ rtx gen_doloop_end_internal (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, gen_rtx_SET (VOIDmode, pc_rtx, gen_rtx_IF_THEN_ELSE (VOIDmode, gen_rtx_NE (VOIDmode, operand1, const1_rtx), gen_rtx_LABEL_REF (VOIDmode, operand0), pc_rtx)), gen_rtx_SET (VOIDmode, operand2, gen_rtx_PLUS (SImode, operand1, constm1_rtx)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SImode)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:13777 */ rtx gen_blockage (rtx operand0 ATTRIBUTE_UNUSED) { return gen_rtx_UNSPEC_VOLATILE (VOIDmode, gen_rtvec (1, operand0), 0); } /* ../../gcc/gcc/config/i386/i386.md:13799 */ rtx gen_return_internal (void) { return gen_rtx_RETURN (VOIDmode); } /* ../../gcc/gcc/config/i386/i386.md:13810 */ rtx gen_return_internal_long (void) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_RETURN (VOIDmode), gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx), 75))); } /* ../../gcc/gcc/config/i386/i386.md:13820 */ rtx gen_return_pop_internal (rtx operand0 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_RETURN (VOIDmode), gen_rtx_USE (VOIDmode, operand0))); } /* ../../gcc/gcc/config/i386/i386.md:13829 */ rtx gen_return_indirect_internal (rtx operand0 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_RETURN (VOIDmode), gen_rtx_USE (VOIDmode, operand0))); } /* ../../gcc/gcc/config/i386/i386.md:13837 */ rtx gen_nop (void) { return const0_rtx; } /* ../../gcc/gcc/config/i386/i386.md:13849 */ rtx gen_align (rtx operand0 ATTRIBUTE_UNUSED) { return gen_rtx_UNSPEC_VOLATILE (VOIDmode, gen_rtvec (1, operand0), 68); } /* ../../gcc/gcc/config/i386/i386.md:13870 */ rtx gen_set_got (rtx operand0 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (SImode, gen_rtvec (1, const0_rtx), 12)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:13911 */ rtx gen_eh_return_si (rtx operand0 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, pc_rtx, gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, operand0), 76)); } /* ../../gcc/gcc/config/i386/i386.md:13921 */ rtx gen_eh_return_di (rtx operand0 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, pc_rtx, gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, operand0), 76)); } /* ../../gcc/gcc/config/i386/i386.md:13931 */ rtx gen_leave (void) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 7), gen_rtx_PLUS (SImode, gen_rtx_REG (SImode, 6), const_int_rtx[MAX_SAVED_CONST_INT + (4)])), gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 6), gen_rtx_MEM (SImode, gen_rtx_REG (SImode, 6))), gen_rtx_CLOBBER (VOIDmode, gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode))))); } /* ../../gcc/gcc/config/i386/i386.md:13939 */ rtx gen_leave_rex64 (void) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, gen_rtx_REG (DImode, 7), gen_rtx_PLUS (DImode, gen_rtx_REG (DImode, 6), const_int_rtx[MAX_SAVED_CONST_INT + (8)])), gen_rtx_SET (VOIDmode, gen_rtx_REG (DImode, 6), gen_rtx_MEM (DImode, gen_rtx_REG (DImode, 6))), gen_rtx_CLOBBER (VOIDmode, gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode))))); } /* ../../gcc/gcc/config/i386/i386.md:14046 */ rtx gen_ctzsi2 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_CTZ (SImode, operand1)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:14054 */ rtx gen_ctzdi2 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_CTZ (DImode, operand1)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:14840 */ rtx gen_sqrtsf2_1 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SQRT (SFmode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:14852 */ rtx gen_sqrtsf2_1_sse_only (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SQRT (SFmode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:14861 */ rtx gen_sqrtsf2_i387 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SQRT (SFmode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:14881 */ rtx gen_sqrtdf2_1 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SQRT (DFmode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:14893 */ rtx gen_sqrtdf2_1_sse_only (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SQRT (DFmode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:14902 */ rtx gen_sqrtdf2_i387 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SQRT (DFmode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:14923 */ rtx gen_sqrtxf2 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SQRT (XFmode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:14953 */ rtx gen_fpremxf4 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand2, operand3), 88)), gen_rtx_SET (VOIDmode, operand1, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand2, operand3), 89)), gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 18), gen_rtx_UNSPEC (CCFPmode, gen_rtvec (1, const0_rtx), 45)))); } /* ../../gcc/gcc/config/i386/i386.md:15036 */ rtx gen_fprem1xf4 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand2, operand3), 90)), gen_rtx_SET (VOIDmode, operand1, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand2, operand3), 91)), gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 18), gen_rtx_UNSPEC (CCFPmode, gen_rtvec (1, const0_rtx), 45)))); } /* ../../gcc/gcc/config/i386/i386.md:15201 */ rtx gen_sincosdf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (DFmode, gen_rtvec (1, operand2), 80)), gen_rtx_SET (VOIDmode, operand1, gen_rtx_UNSPEC (DFmode, gen_rtvec (1, operand2), 81)))); } /* ../../gcc/gcc/config/i386/i386.md:15235 */ rtx gen_sincossf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (SFmode, gen_rtvec (1, operand2), 80)), gen_rtx_SET (VOIDmode, operand1, gen_rtx_UNSPEC (SFmode, gen_rtvec (1, operand2), 81)))); } /* ../../gcc/gcc/config/i386/i386.md:15311 */ rtx gen_sincosxf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand2), 80)), gen_rtx_SET (VOIDmode, operand1, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand2), 81)))); } /* ../../gcc/gcc/config/i386/i386.md:15471 */ rtx gen_atan2df3_1 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (DFmode, gen_rtvec (2, operand2, operand1), 65)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)))); } /* ../../gcc/gcc/config/i386/i386.md:15509 */ rtx gen_atan2sf3_1 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (SFmode, gen_rtvec (2, operand2, operand1), 65)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SFmode)))); } /* ../../gcc/gcc/config/i386/i386.md:15547 */ rtx gen_atan2xf3_1 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand2, operand1), 65)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (XFmode)))); } /* ../../gcc/gcc/config/i386/i386.md:15719 */ rtx gen_fyl2x_xf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand2, operand1), 66)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (XFmode)))); } /* ../../gcc/gcc/config/i386/i386.md:15899 */ rtx gen_fyl2xp1_xf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand2, operand1), 67)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (XFmode)))); } /* ../../gcc/gcc/config/i386/i386.md:16407 */ rtx gen_cld (void) { return gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 19), const0_rtx); } /* ../../gcc/gcc/config/i386/i386.md:17293 */ rtx gen_x86_movdicc_0_m1_rex64 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_IF_THEN_ELSE (DImode, operand1, constm1_rtx, const0_rtx)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:17310 */ rtx gen_movdicc_c_rex64 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_IF_THEN_ELSE (DImode, gen_rtx_fmt_ee (GET_CODE (operand1), VOIDmode, gen_rtx_REG (VOIDmode, 17), const0_rtx), operand2, operand3)); } /* ../../gcc/gcc/config/i386/i386.md:17336 */ rtx gen_x86_movsicc_0_m1 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_IF_THEN_ELSE (SImode, operand1, constm1_rtx, const0_rtx)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:17891 */ rtx gen_pro_epilogue_adjust_stack_1 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (SImode, operand1, operand2)), gen_hard_reg_clobber (CCmode, 17), gen_rtx_CLOBBER (VOIDmode, gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode))))); } /* ../../gcc/gcc/config/i386/i386.md:17932 */ rtx gen_pro_epilogue_adjust_stack_rex64 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (DImode, operand1, operand2)), gen_hard_reg_clobber (CCmode, 17), gen_rtx_CLOBBER (VOIDmode, gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode))))); } /* ../../gcc/gcc/config/i386/i386.md:17975 */ rtx gen_pro_epilogue_adjust_stack_rex64_2 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (DImode, operand1, operand3)), gen_rtx_USE (VOIDmode, operand2), gen_hard_reg_clobber (CCmode, 17), gen_rtx_CLOBBER (VOIDmode, gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode))))); } /* ../../gcc/gcc/config/i386/i386.md:18008 */ rtx gen_sse_movsfcc (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED, rtx operand4 ATTRIBUTE_UNUSED, rtx operand5 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_IF_THEN_ELSE (SFmode, gen_rtx_fmt_ee (GET_CODE (operand1), VOIDmode, operand4, operand5), operand2, operand3)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SFmode)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:18029 */ rtx gen_sse_movsfcc_eq (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED, rtx operand4 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_IF_THEN_ELSE (SFmode, gen_rtx_EQ (VOIDmode, operand3, operand4), operand1, operand2)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SFmode)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:18041 */ rtx gen_sse_movdfcc (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED, rtx operand4 ATTRIBUTE_UNUSED, rtx operand5 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_IF_THEN_ELSE (DFmode, gen_rtx_fmt_ee (GET_CODE (operand1), VOIDmode, operand4, operand5), operand2, operand3)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:18062 */ rtx gen_sse_movdfcc_eq (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED, rtx operand4 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_IF_THEN_ELSE (DFmode, gen_rtx_EQ (VOIDmode, operand3, operand4), operand1, operand2)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:18380 */ rtx gen_allocate_stack_worker_1 (rtx operand0 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, gen_rtx_UNSPEC_VOLATILE (SImode, gen_rtvec (1, operand0), 10), gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 7), gen_rtx_MINUS (SImode, gen_rtx_REG (SImode, 7), operand0)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SImode)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:18400 */ rtx gen_allocate_stack_worker_rex64 (rtx operand0 ATTRIBUTE_UNUSED) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, gen_rtx_UNSPEC_VOLATILE (DImode, gen_rtvec (1, operand0), 10), gen_rtx_SET (VOIDmode, gen_rtx_REG (DImode, 7), gen_rtx_MINUS (DImode, gen_rtx_REG (DImode, 7), operand0)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DImode)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:19532 */ rtx gen_trap (void) { return gen_rtx_TRAP_IF (VOIDmode, const1_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (5)]); } /* ../../gcc/gcc/config/i386/i386.md:19584 */ rtx gen_movv4sf_internal (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, operand1); } /* ../../gcc/gcc/config/i386/i386.md:19609 */ rtx gen_movv4si_internal (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, operand1); } /* ../../gcc/gcc/config/i386/i386.md:19649 */ rtx gen_movv2di_internal (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, operand1); } /* ../../gcc/gcc/config/i386/i386.md:19703 */ rtx gen_movv8qi_internal (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, operand1); } /* ../../gcc/gcc/config/i386/i386.md:19715 */ rtx gen_movv4hi_internal (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, operand1); } /* ../../gcc/gcc/config/i386/i386.md:19727 */ rtx gen_movv2si_internal (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, operand1); } /* ../../gcc/gcc/config/i386/i386.md:19739 */ rtx gen_movv2sf_internal (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, operand1); } /* ../../gcc/gcc/config/i386/i386.md:19775 */ rtx gen_movv2df_internal (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, operand1); } /* ../../gcc/gcc/config/i386/i386.md:19816 */ rtx gen_movv8hi_internal (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, operand1); } /* ../../gcc/gcc/config/i386/i386.md:19857 */ rtx gen_movv16qi_internal (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, operand1); } /* ../../gcc/gcc/config/i386/i386.md:20077 */ rtx gen_movti_internal (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, operand1); } /* ../../gcc/gcc/config/i386/i386.md:20274 */ rtx gen_sse_movmskps (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (SImode, gen_rtvec (1, operand1), 33)); } /* ../../gcc/gcc/config/i386/i386.md:20283 */ rtx gen_mmx_pmovmskb (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (SImode, gen_rtvec (1, operand1), 33)); } /* ../../gcc/gcc/config/i386/i386.md:20293 */ rtx gen_mmx_maskmovq (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, gen_rtx_MEM (V8QImode, operand0), gen_rtx_UNSPEC (V8QImode, gen_rtvec (2, operand1, operand2), 32)); } /* ../../gcc/gcc/config/i386/i386.md:20304 */ rtx gen_mmx_maskmovq_rex (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, gen_rtx_MEM (V8QImode, operand0), gen_rtx_UNSPEC (V8QImode, gen_rtvec (2, operand1, operand2), 32)); } /* ../../gcc/gcc/config/i386/i386.md:20315 */ rtx gen_sse_movntv4sf (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V4SFmode, gen_rtvec (1, operand1), 34)); } /* ../../gcc/gcc/config/i386/i386.md:20324 */ rtx gen_sse_movntdi (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (DImode, gen_rtvec (1, operand1), 34)); } /* ../../gcc/gcc/config/i386/i386.md:20333 */ rtx gen_sse_movhlps (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V4SFmode, operand1, gen_rtx_VEC_SELECT (V4SFmode, operand2, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, const_int_rtx[MAX_SAVED_CONST_INT + (2)], const_int_rtx[MAX_SAVED_CONST_INT + (3)], const0_rtx, const1_rtx))), const_int_rtx[MAX_SAVED_CONST_INT + (3)])); } /* ../../gcc/gcc/config/i386/i386.md:20348 */ rtx gen_sse_movlhps (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V4SFmode, operand1, gen_rtx_VEC_SELECT (V4SFmode, operand2, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, const_int_rtx[MAX_SAVED_CONST_INT + (2)], const_int_rtx[MAX_SAVED_CONST_INT + (3)], const0_rtx, const1_rtx))), const_int_rtx[MAX_SAVED_CONST_INT + (12)])); } /* ../../gcc/gcc/config/i386/i386.md:20363 */ rtx gen_sse_movhps (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V4SFmode, operand1, operand2, const_int_rtx[MAX_SAVED_CONST_INT + (12)])); } /* ../../gcc/gcc/config/i386/i386.md:20375 */ rtx gen_sse_movlps (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V4SFmode, operand1, operand2, const_int_rtx[MAX_SAVED_CONST_INT + (3)])); } /* ../../gcc/gcc/config/i386/i386.md:20397 */ rtx gen_sse_loadss_1 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V4SFmode, gen_rtx_VEC_DUPLICATE (V4SFmode, operand1), operand2, const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:20408 */ rtx gen_sse_movss (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V4SFmode, operand1, operand2, const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:20419 */ rtx gen_sse_storess (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_SELECT (SFmode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx)))); } /* ../../gcc/gcc/config/i386/i386.md:20429 */ rtx gen_sse_shufps (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V4SFmode, gen_rtvec (3, operand1, operand2, operand3), 41)); } /* ../../gcc/gcc/config/i386/i386.md:20444 */ rtx gen_addv4sf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (V4SFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:20453 */ rtx gen_vmaddv4sf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V4SFmode, gen_rtx_PLUS (V4SFmode, operand1, operand2), operand1, const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:20465 */ rtx gen_subv4sf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_MINUS (V4SFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:20474 */ rtx gen_vmsubv4sf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V4SFmode, gen_rtx_MINUS (V4SFmode, operand1, operand2), operand1, const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:20498 */ rtx gen_mulv4sf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_MULT (V4SFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:20507 */ rtx gen_vmmulv4sf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V4SFmode, gen_rtx_MULT (V4SFmode, operand1, operand2), operand1, const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:20519 */ rtx gen_divv4sf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_DIV (V4SFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:20528 */ rtx gen_vmdivv4sf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V4SFmode, gen_rtx_DIV (V4SFmode, operand1, operand2), operand1, const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:20543 */ rtx gen_rcpv4sf2 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V4SFmode, gen_rtvec (1, operand1), 42)); } /* ../../gcc/gcc/config/i386/i386.md:20552 */ rtx gen_vmrcpv4sf2 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V4SFmode, gen_rtx_UNSPEC (V4SFmode, gen_rtvec (1, operand1), 42), operand2, const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:20564 */ rtx gen_rsqrtv4sf2 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V4SFmode, gen_rtvec (1, operand1), 43)); } /* ../../gcc/gcc/config/i386/i386.md:20573 */ rtx gen_vmrsqrtv4sf2 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V4SFmode, gen_rtx_UNSPEC (V4SFmode, gen_rtvec (1, operand1), 43), operand2, const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:20585 */ rtx gen_sqrtv4sf2 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SQRT (V4SFmode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:20593 */ rtx gen_vmsqrtv4sf2 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V4SFmode, gen_rtx_SQRT (V4SFmode, operand1), operand2, const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:20784 */ rtx gen_sse2_andv2di3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_AND (V2DImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:20803 */ rtx gen_sse2_nandv2di3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_AND (V2DImode, gen_rtx_NOT (V2DImode, operand1), operand2)); } /* ../../gcc/gcc/config/i386/i386.md:20823 */ rtx gen_sse2_iorv2di3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_IOR (V2DImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:20843 */ rtx gen_sse2_xorv2di3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_XOR (V2DImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:20855 */ rtx gen_sse_clrv4sf (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, operand1); } /* ../../gcc/gcc/config/i386/i386.md:20880 */ rtx gen_sse_clrv2df (rtx operand0 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V2DFmode, gen_rtvec (1, const0_rtx), 45)); } /* ../../gcc/gcc/config/i386/i386.md:20891 */ rtx gen_maskcmpv4sf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_fmt_ee (GET_CODE (operand3), V4SImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:20901 */ rtx gen_maskncmpv4sf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_NOT (V4SImode, gen_rtx_fmt_ee (GET_CODE (operand3), V4SImode, operand1, operand2))); } /* ../../gcc/gcc/config/i386/i386.md:20917 */ rtx gen_vmmaskcmpv4sf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V4SImode, gen_rtx_fmt_ee (GET_CODE (operand3), V4SImode, operand1, operand2), gen_rtx_SUBREG (V4SImode, operand1, 0), const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:20930 */ rtx gen_vmmaskncmpv4sf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V4SImode, gen_rtx_NOT (V4SImode, gen_rtx_fmt_ee (GET_CODE (operand3), V4SImode, operand1, operand2)), gen_rtx_SUBREG (V4SImode, operand1, 0), const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:20949 */ rtx gen_sse_comi (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 17), gen_rtx_COMPARE (CCFPmode, gen_rtx_VEC_SELECT (SFmode, operand0, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx))), gen_rtx_VEC_SELECT (SFmode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx))))); } /* ../../gcc/gcc/config/i386/i386.md:20962 */ rtx gen_sse_ucomi (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPUmode, 17), gen_rtx_COMPARE (CCFPUmode, gen_rtx_VEC_SELECT (SFmode, operand0, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx))), gen_rtx_VEC_SELECT (SFmode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx))))); } /* ../../gcc/gcc/config/i386/i386.md:20978 */ rtx gen_sse_unpckhps (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V4SFmode, gen_rtx_VEC_SELECT (V4SFmode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, const_int_rtx[MAX_SAVED_CONST_INT + (2)], const0_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (3)], const1_rtx))), gen_rtx_VEC_SELECT (V4SFmode, operand2, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, const0_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (2)], const1_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (3)]))), const_int_rtx[MAX_SAVED_CONST_INT + (5)])); } /* ../../gcc/gcc/config/i386/i386.md:20997 */ rtx gen_sse_unpcklps (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V4SFmode, gen_rtx_VEC_SELECT (V4SFmode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, const0_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (2)], const1_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (3)]))), gen_rtx_VEC_SELECT (V4SFmode, operand2, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, const_int_rtx[MAX_SAVED_CONST_INT + (2)], const0_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (3)], const1_rtx))), const_int_rtx[MAX_SAVED_CONST_INT + (5)])); } /* ../../gcc/gcc/config/i386/i386.md:21019 */ rtx gen_smaxv4sf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SMAX (V4SFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21028 */ rtx gen_vmsmaxv4sf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V4SFmode, gen_rtx_SMAX (V4SFmode, operand1, operand2), operand1, const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:21040 */ rtx gen_sminv4sf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SMIN (V4SFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21049 */ rtx gen_vmsminv4sf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V4SFmode, gen_rtx_SMIN (V4SFmode, operand1, operand2), operand1, const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:21063 */ rtx gen_cvtpi2ps (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V4SFmode, operand1, gen_rtx_VEC_DUPLICATE (V4SFmode, gen_rtx_FLOAT (V2SFmode, operand2)), const_int_rtx[MAX_SAVED_CONST_INT + (12)])); } /* ../../gcc/gcc/config/i386/i386.md:21075 */ rtx gen_cvtps2pi (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_SELECT (V2SImode, gen_rtx_FIX (V4SImode, operand1), gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const0_rtx, const1_rtx)))); } /* ../../gcc/gcc/config/i386/i386.md:21085 */ rtx gen_cvttps2pi (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_SELECT (V2SImode, gen_rtx_UNSPEC (V4SImode, gen_rtvec (1, operand1), 30), gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const0_rtx, const1_rtx)))); } /* ../../gcc/gcc/config/i386/i386.md:21096 */ rtx gen_cvtsi2ss (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V4SFmode, operand1, gen_rtx_VEC_DUPLICATE (V4SFmode, gen_rtx_FLOAT (SFmode, operand2)), const_int_rtx[MAX_SAVED_CONST_INT + (14)])); } /* ../../gcc/gcc/config/i386/i386.md:21109 */ rtx gen_cvtsi2ssq (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V4SFmode, operand1, gen_rtx_VEC_DUPLICATE (V4SFmode, gen_rtx_FLOAT (SFmode, operand2)), const_int_rtx[MAX_SAVED_CONST_INT + (14)])); } /* ../../gcc/gcc/config/i386/i386.md:21122 */ rtx gen_cvtss2si (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_SELECT (SImode, gen_rtx_FIX (V4SImode, operand1), gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx)))); } /* ../../gcc/gcc/config/i386/i386.md:21133 */ rtx gen_cvtss2siq (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_SELECT (DImode, gen_rtx_FIX (V4DImode, operand1), gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx)))); } /* ../../gcc/gcc/config/i386/i386.md:21144 */ rtx gen_cvttss2si (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_SELECT (SImode, gen_rtx_UNSPEC (V4SImode, gen_rtvec (1, operand1), 30), gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx)))); } /* ../../gcc/gcc/config/i386/i386.md:21156 */ rtx gen_cvttss2siq (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_SELECT (DImode, gen_rtx_UNSPEC (V4DImode, gen_rtvec (1, operand1), 30), gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx)))); } /* ../../gcc/gcc/config/i386/i386.md:21173 */ rtx gen_addv8qi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (V8QImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21182 */ rtx gen_addv4hi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (V4HImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21191 */ rtx gen_addv2si3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (V2SImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21200 */ rtx gen_mmx_adddi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (DImode, gen_rtvec (1, gen_rtx_PLUS (DImode, operand1, operand2)), 45)); } /* ../../gcc/gcc/config/i386/i386.md:21211 */ rtx gen_ssaddv8qi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SS_PLUS (V8QImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21220 */ rtx gen_ssaddv4hi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SS_PLUS (V4HImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21229 */ rtx gen_usaddv8qi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_US_PLUS (V8QImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21238 */ rtx gen_usaddv4hi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_US_PLUS (V4HImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21247 */ rtx gen_subv8qi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_MINUS (V8QImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21256 */ rtx gen_subv4hi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_MINUS (V4HImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21265 */ rtx gen_subv2si3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_MINUS (V2SImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21274 */ rtx gen_mmx_subdi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (DImode, gen_rtvec (1, gen_rtx_MINUS (DImode, operand1, operand2)), 45)); } /* ../../gcc/gcc/config/i386/i386.md:21285 */ rtx gen_sssubv8qi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SS_MINUS (V8QImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21294 */ rtx gen_sssubv4hi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SS_MINUS (V4HImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21303 */ rtx gen_ussubv8qi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_US_MINUS (V8QImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21312 */ rtx gen_ussubv4hi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_US_MINUS (V4HImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21321 */ rtx gen_mulv4hi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_MULT (V4HImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21330 */ rtx gen_smulv4hi3_highpart (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_TRUNCATE (V4HImode, gen_rtx_LSHIFTRT (V4SImode, gen_rtx_MULT (V4SImode, gen_rtx_SIGN_EXTEND (V4SImode, operand1), gen_rtx_SIGN_EXTEND (V4SImode, operand2)), const_int_rtx[MAX_SAVED_CONST_INT + (16)]))); } /* ../../gcc/gcc/config/i386/i386.md:21344 */ rtx gen_umulv4hi3_highpart (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_TRUNCATE (V4HImode, gen_rtx_LSHIFTRT (V4SImode, gen_rtx_MULT (V4SImode, gen_rtx_ZERO_EXTEND (V4SImode, operand1), gen_rtx_ZERO_EXTEND (V4SImode, operand2)), const_int_rtx[MAX_SAVED_CONST_INT + (16)]))); } /* ../../gcc/gcc/config/i386/i386.md:21358 */ rtx gen_mmx_pmaddwd (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (V2SImode, gen_rtx_MULT (V2SImode, gen_rtx_SIGN_EXTEND (V2SImode, gen_rtx_VEC_SELECT (V2HImode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const0_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (2)])))), gen_rtx_SIGN_EXTEND (V2SImode, gen_rtx_VEC_SELECT (V2HImode, operand2, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const0_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (2)]))))), gen_rtx_MULT (V2SImode, gen_rtx_SIGN_EXTEND (V2SImode, gen_rtx_VEC_SELECT (V2HImode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const1_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (3)])))), gen_rtx_SIGN_EXTEND (V2SImode, gen_rtx_VEC_SELECT (V2HImode, operand2, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const1_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (3)]))))))); } /* ../../gcc/gcc/config/i386/i386.md:21385 */ rtx gen_mmx_iordi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (DImode, gen_rtvec (1, gen_rtx_IOR (DImode, operand1, operand2)), 45)); } /* ../../gcc/gcc/config/i386/i386.md:21396 */ rtx gen_mmx_xordi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (DImode, gen_rtvec (1, gen_rtx_XOR (DImode, operand1, operand2)), 45)); } /* ../../gcc/gcc/config/i386/i386.md:21410 */ rtx gen_mmx_clrdi (rtx operand0 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (DImode, gen_rtvec (1, const0_rtx), 45)); } /* ../../gcc/gcc/config/i386/i386.md:21419 */ rtx gen_mmx_anddi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (DImode, gen_rtvec (1, gen_rtx_AND (DImode, operand1, operand2)), 45)); } /* ../../gcc/gcc/config/i386/i386.md:21430 */ rtx gen_mmx_nanddi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (DImode, gen_rtvec (1, gen_rtx_AND (DImode, gen_rtx_NOT (DImode, operand1), operand2)), 45)); } /* ../../gcc/gcc/config/i386/i386.md:21444 */ rtx gen_mmx_uavgv8qi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFTRT (V8QImode, gen_rtx_PLUS (V8QImode, gen_rtx_PLUS (V8QImode, operand1, operand2), gen_rtx_CONST_VECTOR (V8QImode, gen_rtvec (8, const1_rtx, const1_rtx, const1_rtx, const1_rtx, const1_rtx, const1_rtx, const1_rtx, const1_rtx))), const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:21464 */ rtx gen_mmx_uavgv4hi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFTRT (V4HImode, gen_rtx_PLUS (V4HImode, gen_rtx_PLUS (V4HImode, operand1, operand2), gen_rtx_CONST_VECTOR (V4HImode, gen_rtvec (4, const1_rtx, const1_rtx, const1_rtx, const1_rtx))), const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:21480 */ rtx gen_mmx_psadbw (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (DImode, gen_rtvec (2, operand1, operand2), 61)); } /* ../../gcc/gcc/config/i386/i386.md:21493 */ rtx gen_mmx_pinsrw (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V4HImode, operand1, gen_rtx_VEC_DUPLICATE (V4HImode, gen_rtx_TRUNCATE (HImode, operand2)), operand3)); } /* ../../gcc/gcc/config/i386/i386.md:21504 */ rtx gen_mmx_pextrw (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_ZERO_EXTEND (SImode, gen_rtx_VEC_SELECT (HImode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, operand2))))); } /* ../../gcc/gcc/config/i386/i386.md:21514 */ rtx gen_mmx_pshufw (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V4HImode, gen_rtvec (2, operand1, operand2), 41)); } /* ../../gcc/gcc/config/i386/i386.md:21527 */ rtx gen_eqv8qi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_EQ (V8QImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21536 */ rtx gen_eqv4hi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_EQ (V4HImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21545 */ rtx gen_eqv2si3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_EQ (V2SImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21554 */ rtx gen_gtv8qi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_GT (V8QImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21563 */ rtx gen_gtv4hi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_GT (V4HImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21572 */ rtx gen_gtv2si3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_GT (V2SImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21584 */ rtx gen_umaxv8qi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UMAX (V8QImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21593 */ rtx gen_smaxv4hi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SMAX (V4HImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21602 */ rtx gen_uminv8qi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UMIN (V8QImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21611 */ rtx gen_sminv4hi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SMIN (V4HImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21623 */ rtx gen_ashrv4hi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFTRT (V4HImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21632 */ rtx gen_ashrv2si3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFTRT (V2SImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21641 */ rtx gen_lshrv4hi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_LSHIFTRT (V4HImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21650 */ rtx gen_lshrv2si3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_LSHIFTRT (V2SImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21660 */ rtx gen_mmx_lshrdi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (DImode, gen_rtvec (1, gen_rtx_LSHIFTRT (DImode, operand1, operand2)), 45)); } /* ../../gcc/gcc/config/i386/i386.md:21671 */ rtx gen_ashlv4hi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFT (V4HImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21680 */ rtx gen_ashlv2si3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFT (V2SImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21690 */ rtx gen_mmx_ashldi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (DImode, gen_rtvec (1, gen_rtx_ASHIFT (DImode, operand1, operand2)), 45)); } /* ../../gcc/gcc/config/i386/i386.md:21704 */ rtx gen_mmx_packsswb (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_CONCAT (V8QImode, gen_rtx_SS_TRUNCATE (V4QImode, operand1), gen_rtx_SS_TRUNCATE (V4QImode, operand2))); } /* ../../gcc/gcc/config/i386/i386.md:21714 */ rtx gen_mmx_packssdw (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_CONCAT (V4HImode, gen_rtx_SS_TRUNCATE (V2HImode, operand1), gen_rtx_SS_TRUNCATE (V2HImode, operand2))); } /* ../../gcc/gcc/config/i386/i386.md:21724 */ rtx gen_mmx_packuswb (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_CONCAT (V8QImode, gen_rtx_US_TRUNCATE (V4QImode, operand1), gen_rtx_US_TRUNCATE (V4QImode, operand2))); } /* ../../gcc/gcc/config/i386/i386.md:21734 */ rtx gen_mmx_punpckhbw (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V8QImode, gen_rtx_VEC_SELECT (V8QImode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (8, const_int_rtx[MAX_SAVED_CONST_INT + (4)], const0_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (5)], const1_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (6)], const_int_rtx[MAX_SAVED_CONST_INT + (2)], const_int_rtx[MAX_SAVED_CONST_INT + (7)], const_int_rtx[MAX_SAVED_CONST_INT + (3)]))), gen_rtx_VEC_SELECT (V8QImode, operand2, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (8, const0_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (4)], const1_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (5)], const_int_rtx[MAX_SAVED_CONST_INT + (2)], const_int_rtx[MAX_SAVED_CONST_INT + (6)], const_int_rtx[MAX_SAVED_CONST_INT + (3)], const_int_rtx[MAX_SAVED_CONST_INT + (7)]))), GEN_INT (85L))); } /* ../../gcc/gcc/config/i386/i386.md:21761 */ rtx gen_mmx_punpckhwd (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V4HImode, gen_rtx_VEC_SELECT (V4HImode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, const0_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (2)], const1_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (3)]))), gen_rtx_VEC_SELECT (V4HImode, operand2, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, const_int_rtx[MAX_SAVED_CONST_INT + (2)], const0_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (3)], const1_rtx))), const_int_rtx[MAX_SAVED_CONST_INT + (5)])); } /* ../../gcc/gcc/config/i386/i386.md:21780 */ rtx gen_mmx_punpckhdq (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V2SImode, operand1, gen_rtx_VEC_SELECT (V2SImode, operand2, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const1_rtx, const0_rtx))), const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:21793 */ rtx gen_mmx_punpcklbw (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V8QImode, gen_rtx_VEC_SELECT (V8QImode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (8, const0_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (4)], const1_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (5)], const_int_rtx[MAX_SAVED_CONST_INT + (2)], const_int_rtx[MAX_SAVED_CONST_INT + (6)], const_int_rtx[MAX_SAVED_CONST_INT + (3)], const_int_rtx[MAX_SAVED_CONST_INT + (7)]))), gen_rtx_VEC_SELECT (V8QImode, operand2, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (8, const_int_rtx[MAX_SAVED_CONST_INT + (4)], const0_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (5)], const1_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (6)], const_int_rtx[MAX_SAVED_CONST_INT + (2)], const_int_rtx[MAX_SAVED_CONST_INT + (7)], const_int_rtx[MAX_SAVED_CONST_INT + (3)]))), GEN_INT (85L))); } /* ../../gcc/gcc/config/i386/i386.md:21820 */ rtx gen_mmx_punpcklwd (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V4HImode, gen_rtx_VEC_SELECT (V4HImode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, const_int_rtx[MAX_SAVED_CONST_INT + (2)], const0_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (3)], const1_rtx))), gen_rtx_VEC_SELECT (V4HImode, operand2, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, const0_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (2)], const1_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (3)]))), const_int_rtx[MAX_SAVED_CONST_INT + (5)])); } /* ../../gcc/gcc/config/i386/i386.md:21839 */ rtx gen_mmx_punpckldq (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V2SImode, gen_rtx_VEC_SELECT (V2SImode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const1_rtx, const0_rtx))), operand2, const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:21855 */ rtx gen_emms (void) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (17, gen_rtx_UNSPEC_VOLATILE (VOIDmode, gen_rtvec (1, const0_rtx), 31), gen_hard_reg_clobber (XFmode, 8), gen_hard_reg_clobber (XFmode, 9), gen_hard_reg_clobber (XFmode, 10), gen_hard_reg_clobber (XFmode, 11), gen_hard_reg_clobber (XFmode, 12), gen_hard_reg_clobber (XFmode, 13), gen_hard_reg_clobber (XFmode, 14), gen_hard_reg_clobber (XFmode, 15), gen_hard_reg_clobber (DImode, 29), gen_hard_reg_clobber (DImode, 30), gen_hard_reg_clobber (DImode, 31), gen_hard_reg_clobber (DImode, 32), gen_hard_reg_clobber (DImode, 33), gen_hard_reg_clobber (DImode, 34), gen_hard_reg_clobber (DImode, 35), gen_hard_reg_clobber (DImode, 36))); } /* ../../gcc/gcc/config/i386/i386.md:21878 */ rtx gen_ldmxcsr (rtx operand0 ATTRIBUTE_UNUSED) { return gen_rtx_UNSPEC_VOLATILE (VOIDmode, gen_rtvec (1, operand0), 37); } /* ../../gcc/gcc/config/i386/i386.md:21886 */ rtx gen_stmxcsr (rtx operand0 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC_VOLATILE (SImode, gen_rtvec (1, const0_rtx), 40)); } /* ../../gcc/gcc/config/i386/i386.md:21974 */ rtx gen_addv2sf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (V2SFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21983 */ rtx gen_subv2sf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_MINUS (V2SFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21992 */ rtx gen_subrv2sf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_MINUS (V2SFmode, operand2, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:22001 */ rtx gen_gtv2sf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_GT (V2SImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:22010 */ rtx gen_gev2sf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_GE (V2SImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:22019 */ rtx gen_eqv2sf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_EQ (V2SImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:22028 */ rtx gen_pfmaxv2sf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SMAX (V2SFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:22037 */ rtx gen_pfminv2sf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SMIN (V2SFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:22046 */ rtx gen_mulv2sf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_MULT (V2SFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:22055 */ rtx gen_femms (void) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (17, gen_rtx_UNSPEC_VOLATILE (VOIDmode, gen_rtvec (1, const0_rtx), 46), gen_hard_reg_clobber (XFmode, 8), gen_hard_reg_clobber (XFmode, 9), gen_hard_reg_clobber (XFmode, 10), gen_hard_reg_clobber (XFmode, 11), gen_hard_reg_clobber (XFmode, 12), gen_hard_reg_clobber (XFmode, 13), gen_hard_reg_clobber (XFmode, 14), gen_hard_reg_clobber (XFmode, 15), gen_hard_reg_clobber (DImode, 29), gen_hard_reg_clobber (DImode, 30), gen_hard_reg_clobber (DImode, 31), gen_hard_reg_clobber (DImode, 32), gen_hard_reg_clobber (DImode, 33), gen_hard_reg_clobber (DImode, 34), gen_hard_reg_clobber (DImode, 35), gen_hard_reg_clobber (DImode, 36))); } /* ../../gcc/gcc/config/i386/i386.md:22078 */ rtx gen_pf2id (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_FIX (V2SImode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:22086 */ rtx gen_pf2iw (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SIGN_EXTEND (V2SImode, gen_rtx_SS_TRUNCATE (V2HImode, gen_rtx_FIX (V2SImode, operand1)))); } /* ../../gcc/gcc/config/i386/i386.md:22096 */ rtx gen_pfacc (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_CONCAT (V2SFmode, gen_rtx_PLUS (SFmode, gen_rtx_VEC_SELECT (SFmode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx))), gen_rtx_VEC_SELECT (SFmode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const1_rtx)))), gen_rtx_PLUS (SFmode, gen_rtx_VEC_SELECT (SFmode, operand2, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx))), gen_rtx_VEC_SELECT (SFmode, operand2, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const1_rtx)))))); } /* ../../gcc/gcc/config/i386/i386.md:22114 */ rtx gen_pfnacc (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_CONCAT (V2SFmode, gen_rtx_MINUS (SFmode, gen_rtx_VEC_SELECT (SFmode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx))), gen_rtx_VEC_SELECT (SFmode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const1_rtx)))), gen_rtx_MINUS (SFmode, gen_rtx_VEC_SELECT (SFmode, operand2, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx))), gen_rtx_VEC_SELECT (SFmode, operand2, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const1_rtx)))))); } /* ../../gcc/gcc/config/i386/i386.md:22132 */ rtx gen_pfpnacc (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_CONCAT (V2SFmode, gen_rtx_MINUS (SFmode, gen_rtx_VEC_SELECT (SFmode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx))), gen_rtx_VEC_SELECT (SFmode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const1_rtx)))), gen_rtx_PLUS (SFmode, gen_rtx_VEC_SELECT (SFmode, operand2, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx))), gen_rtx_VEC_SELECT (SFmode, operand2, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const1_rtx)))))); } /* ../../gcc/gcc/config/i386/i386.md:22150 */ rtx gen_pi2fw (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT (V2SFmode, gen_rtx_VEC_CONCAT (V2SImode, gen_rtx_SIGN_EXTEND (SImode, gen_rtx_TRUNCATE (HImode, gen_rtx_VEC_SELECT (SImode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx))))), gen_rtx_SIGN_EXTEND (SImode, gen_rtx_TRUNCATE (HImode, gen_rtx_VEC_SELECT (SImode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const1_rtx)))))))); } /* ../../gcc/gcc/config/i386/i386.md:22167 */ rtx gen_floatv2si2 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT (V2SFmode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:22178 */ rtx gen_pavgusb (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V8QImode, gen_rtvec (2, operand1, operand2), 49)); } /* ../../gcc/gcc/config/i386/i386.md:22191 */ rtx gen_pfrcpv2sf2 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V2SFmode, gen_rtvec (1, operand1), 50)); } /* ../../gcc/gcc/config/i386/i386.md:22200 */ rtx gen_pfrcpit1v2sf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V2SFmode, gen_rtvec (2, operand1, operand2), 51)); } /* ../../gcc/gcc/config/i386/i386.md:22210 */ rtx gen_pfrcpit2v2sf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V2SFmode, gen_rtvec (2, operand1, operand2), 52)); } /* ../../gcc/gcc/config/i386/i386.md:22220 */ rtx gen_pfrsqrtv2sf2 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V2SFmode, gen_rtvec (1, operand1), 53)); } /* ../../gcc/gcc/config/i386/i386.md:22229 */ rtx gen_pfrsqit1v2sf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V2SFmode, gen_rtvec (2, operand1, operand2), 54)); } /* ../../gcc/gcc/config/i386/i386.md:22239 */ rtx gen_pmulhrwv4hi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_TRUNCATE (V4HImode, gen_rtx_LSHIFTRT (V4SImode, gen_rtx_PLUS (V4SImode, gen_rtx_MULT (V4SImode, gen_rtx_SIGN_EXTEND (V4SImode, operand1), gen_rtx_SIGN_EXTEND (V4SImode, operand2)), gen_rtx_CONST_VECTOR (V4SImode, gen_rtvec (4, GEN_INT (32768L), GEN_INT (32768L), GEN_INT (32768L), GEN_INT (32768L)))), const_int_rtx[MAX_SAVED_CONST_INT + (16)]))); } /* ../../gcc/gcc/config/i386/i386.md:22259 */ rtx gen_pswapdv2si2 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_SELECT (V2SImode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const1_rtx, const0_rtx)))); } /* ../../gcc/gcc/config/i386/i386.md:22268 */ rtx gen_pswapdv2sf2 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_SELECT (V2SFmode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const1_rtx, const0_rtx)))); } /* ../../gcc/gcc/config/i386/i386.md:22371 */ rtx gen_addv2df3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (V2DFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:22380 */ rtx gen_vmaddv2df3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V2DFmode, gen_rtx_PLUS (V2DFmode, operand1, operand2), operand1, const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:22391 */ rtx gen_subv2df3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_MINUS (V2DFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:22400 */ rtx gen_vmsubv2df3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V2DFmode, gen_rtx_MINUS (V2DFmode, operand1, operand2), operand1, const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:22411 */ rtx gen_mulv2df3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_MULT (V2DFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:22420 */ rtx gen_vmmulv2df3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V2DFmode, gen_rtx_MULT (V2DFmode, operand1, operand2), operand1, const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:22431 */ rtx gen_divv2df3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_DIV (V2DFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:22440 */ rtx gen_vmdivv2df3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V2DFmode, gen_rtx_DIV (V2DFmode, operand1, operand2), operand1, const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:22453 */ rtx gen_smaxv2df3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SMAX (V2DFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:22462 */ rtx gen_vmsmaxv2df3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V2DFmode, gen_rtx_SMAX (V2DFmode, operand1, operand2), operand1, const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:22473 */ rtx gen_sminv2df3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SMIN (V2DFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:22482 */ rtx gen_vmsminv2df3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V2DFmode, gen_rtx_SMIN (V2DFmode, operand1, operand2), operand1, const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:22495 */ rtx gen_sqrtv2df2 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SQRT (V2DFmode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:22503 */ rtx gen_vmsqrtv2df2 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V2DFmode, gen_rtx_SQRT (V2DFmode, operand1), operand2, const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:22515 */ rtx gen_maskcmpv2df3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_fmt_ee (GET_CODE (operand3), V2DImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:22525 */ rtx gen_maskncmpv2df3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_NOT (V2DImode, gen_rtx_fmt_ee (GET_CODE (operand3), V2DImode, operand1, operand2))); } /* ../../gcc/gcc/config/i386/i386.md:22541 */ rtx gen_vmmaskcmpv2df3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V2DImode, gen_rtx_fmt_ee (GET_CODE (operand3), V2DImode, operand1, operand2), gen_rtx_SUBREG (V2DImode, operand1, 0), const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:22554 */ rtx gen_vmmaskncmpv2df3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V2DImode, gen_rtx_NOT (V2DImode, gen_rtx_fmt_ee (GET_CODE (operand3), V2DImode, operand1, operand2)), gen_rtx_SUBREG (V2DImode, operand1, 0), const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:22573 */ rtx gen_sse2_comi (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 17), gen_rtx_COMPARE (CCFPmode, gen_rtx_VEC_SELECT (DFmode, operand0, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx))), gen_rtx_VEC_SELECT (DFmode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx))))); } /* ../../gcc/gcc/config/i386/i386.md:22586 */ rtx gen_sse2_ucomi (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPUmode, 17), gen_rtx_COMPARE (CCFPUmode, gen_rtx_VEC_SELECT (DFmode, operand0, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx))), gen_rtx_VEC_SELECT (DFmode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx))))); } /* ../../gcc/gcc/config/i386/i386.md:22601 */ rtx gen_sse2_movmskpd (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (SImode, gen_rtvec (1, operand1), 33)); } /* ../../gcc/gcc/config/i386/i386.md:22610 */ rtx gen_sse2_pmovmskb (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (SImode, gen_rtvec (1, operand1), 33)); } /* ../../gcc/gcc/config/i386/i386.md:22619 */ rtx gen_sse2_maskmovdqu (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, gen_rtx_MEM (V16QImode, operand0), gen_rtx_UNSPEC (V16QImode, gen_rtvec (2, operand1, operand2), 32)); } /* ../../gcc/gcc/config/i386/i386.md:22630 */ rtx gen_sse2_maskmovdqu_rex64 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, gen_rtx_MEM (V16QImode, operand0), gen_rtx_UNSPEC (V16QImode, gen_rtvec (2, operand1, operand2), 32)); } /* ../../gcc/gcc/config/i386/i386.md:22641 */ rtx gen_sse2_movntv2df (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V2DFmode, gen_rtvec (1, operand1), 34)); } /* ../../gcc/gcc/config/i386/i386.md:22650 */ rtx gen_sse2_movntv2di (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V2DImode, gen_rtvec (1, operand1), 34)); } /* ../../gcc/gcc/config/i386/i386.md:22659 */ rtx gen_sse2_movntsi (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (SImode, gen_rtvec (1, operand1), 34)); } /* ../../gcc/gcc/config/i386/i386.md:22672 */ rtx gen_cvtdq2ps (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT (V4SFmode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:22680 */ rtx gen_cvtps2dq (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_FIX (V4SImode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:22688 */ rtx gen_cvttps2dq (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V4SImode, gen_rtvec (1, operand1), 30)); } /* ../../gcc/gcc/config/i386/i386.md:22699 */ rtx gen_cvtdq2pd (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT (V2DFmode, gen_rtx_VEC_SELECT (V2SImode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const0_rtx, const1_rtx))))); } /* ../../gcc/gcc/config/i386/i386.md:22711 */ rtx gen_cvtpd2dq (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_CONCAT (V4SImode, gen_rtx_FIX (V2SImode, operand1), gen_rtx_CONST_VECTOR (V2SImode, gen_rtvec (2, const0_rtx, const0_rtx)))); } /* ../../gcc/gcc/config/i386/i386.md:22721 */ rtx gen_cvttpd2dq (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_CONCAT (V4SImode, gen_rtx_UNSPEC (V2SImode, gen_rtvec (1, operand1), 30), gen_rtx_CONST_VECTOR (V2SImode, gen_rtvec (2, const0_rtx, const0_rtx)))); } /* ../../gcc/gcc/config/i386/i386.md:22732 */ rtx gen_cvtpd2pi (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_FIX (V2SImode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:22740 */ rtx gen_cvttpd2pi (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V2SImode, gen_rtvec (1, operand1), 30)); } /* ../../gcc/gcc/config/i386/i386.md:22749 */ rtx gen_cvtpi2pd (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT (V2DFmode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:22759 */ rtx gen_cvtsd2si (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_FIX (SImode, gen_rtx_VEC_SELECT (DFmode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx))))); } /* ../../gcc/gcc/config/i386/i386.md:22769 */ rtx gen_cvtsd2siq (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_FIX (DImode, gen_rtx_VEC_SELECT (DFmode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx))))); } /* ../../gcc/gcc/config/i386/i386.md:22779 */ rtx gen_cvttsd2si (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (SImode, gen_rtvec (1, gen_rtx_VEC_SELECT (DFmode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx)))), 30)); } /* ../../gcc/gcc/config/i386/i386.md:22789 */ rtx gen_cvttsd2siq (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (DImode, gen_rtvec (1, gen_rtx_VEC_SELECT (DFmode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx)))), 30)); } /* ../../gcc/gcc/config/i386/i386.md:22799 */ rtx gen_cvtsi2sd (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V2DFmode, operand1, gen_rtx_VEC_DUPLICATE (V2DFmode, gen_rtx_FLOAT (DFmode, operand2)), const_int_rtx[MAX_SAVED_CONST_INT + (2)])); } /* ../../gcc/gcc/config/i386/i386.md:22812 */ rtx gen_cvtsi2sdq (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V2DFmode, operand1, gen_rtx_VEC_DUPLICATE (V2DFmode, gen_rtx_FLOAT (DFmode, operand2)), const_int_rtx[MAX_SAVED_CONST_INT + (2)])); } /* ../../gcc/gcc/config/i386/i386.md:22827 */ rtx gen_cvtsd2ss (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V4SFmode, operand1, gen_rtx_VEC_DUPLICATE (V4SFmode, gen_rtx_FLOAT_TRUNCATE (V2SFmode, operand2)), const_int_rtx[MAX_SAVED_CONST_INT + (14)])); } /* ../../gcc/gcc/config/i386/i386.md:22840 */ rtx gen_cvtss2sd (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V2DFmode, operand1, gen_rtx_FLOAT_EXTEND (V2DFmode, gen_rtx_VEC_SELECT (V2SFmode, operand2, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const0_rtx, const1_rtx)))), const_int_rtx[MAX_SAVED_CONST_INT + (2)])); } /* ../../gcc/gcc/config/i386/i386.md:22854 */ rtx gen_cvtpd2ps (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SUBREG (V4SFmode, gen_rtx_VEC_CONCAT (V4SImode, gen_rtx_SUBREG (V2SImode, gen_rtx_FLOAT_TRUNCATE (V2SFmode, operand1), 0), gen_rtx_CONST_VECTOR (V2SImode, gen_rtvec (2, const0_rtx, const0_rtx))), 0)); } /* ../../gcc/gcc/config/i386/i386.md:22866 */ rtx gen_cvtps2pd (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_EXTEND (V2DFmode, gen_rtx_VEC_SELECT (V2SFmode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const0_rtx, const1_rtx))))); } /* ../../gcc/gcc/config/i386/i386.md:22881 */ rtx gen_addv16qi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (V16QImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:22890 */ rtx gen_addv8hi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (V8HImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:22899 */ rtx gen_addv4si3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (V4SImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:22908 */ rtx gen_addv2di3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (V2DImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:22917 */ rtx gen_ssaddv16qi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SS_PLUS (V16QImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:22926 */ rtx gen_ssaddv8hi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SS_PLUS (V8HImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:22935 */ rtx gen_usaddv16qi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_US_PLUS (V16QImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:22944 */ rtx gen_usaddv8hi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_US_PLUS (V8HImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:22953 */ rtx gen_subv16qi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_MINUS (V16QImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:22962 */ rtx gen_subv8hi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_MINUS (V8HImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:22971 */ rtx gen_subv4si3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_MINUS (V4SImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:22980 */ rtx gen_subv2di3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_MINUS (V2DImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:22989 */ rtx gen_sssubv16qi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SS_MINUS (V16QImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:22998 */ rtx gen_sssubv8hi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SS_MINUS (V8HImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:23007 */ rtx gen_ussubv16qi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_US_MINUS (V16QImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:23016 */ rtx gen_ussubv8hi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_US_MINUS (V8HImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:23025 */ rtx gen_mulv8hi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_MULT (V8HImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:23034 */ rtx gen_smulv8hi3_highpart (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_TRUNCATE (V8HImode, gen_rtx_LSHIFTRT (V8SImode, gen_rtx_MULT (V8SImode, gen_rtx_SIGN_EXTEND (V8SImode, operand1), gen_rtx_SIGN_EXTEND (V8SImode, operand2)), const_int_rtx[MAX_SAVED_CONST_INT + (16)]))); } /* ../../gcc/gcc/config/i386/i386.md:23046 */ rtx gen_umulv8hi3_highpart (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_TRUNCATE (V8HImode, gen_rtx_LSHIFTRT (V8SImode, gen_rtx_MULT (V8SImode, gen_rtx_ZERO_EXTEND (V8SImode, operand1), gen_rtx_ZERO_EXTEND (V8SImode, operand2)), const_int_rtx[MAX_SAVED_CONST_INT + (16)]))); } /* ../../gcc/gcc/config/i386/i386.md:23058 */ rtx gen_sse2_umulsidi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_MULT (DImode, gen_rtx_ZERO_EXTEND (DImode, gen_rtx_VEC_SELECT (SImode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx)))), gen_rtx_ZERO_EXTEND (DImode, gen_rtx_VEC_SELECT (SImode, operand2, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx)))))); } /* ../../gcc/gcc/config/i386/i386.md:23071 */ rtx gen_sse2_umulv2siv2di3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_MULT (V2DImode, gen_rtx_ZERO_EXTEND (V2DImode, gen_rtx_VEC_SELECT (V2SImode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const0_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (2)])))), gen_rtx_ZERO_EXTEND (V2DImode, gen_rtx_VEC_SELECT (V2SImode, operand2, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const0_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (2)])))))); } /* ../../gcc/gcc/config/i386/i386.md:23086 */ rtx gen_sse2_pmaddwd (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (V4SImode, gen_rtx_MULT (V4SImode, gen_rtx_SIGN_EXTEND (V4SImode, gen_rtx_VEC_SELECT (V4HImode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, const0_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (2)], const_int_rtx[MAX_SAVED_CONST_INT + (4)], const_int_rtx[MAX_SAVED_CONST_INT + (6)])))), gen_rtx_SIGN_EXTEND (V4SImode, gen_rtx_VEC_SELECT (V4HImode, operand2, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, const0_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (2)], const_int_rtx[MAX_SAVED_CONST_INT + (4)], const_int_rtx[MAX_SAVED_CONST_INT + (6)]))))), gen_rtx_MULT (V4SImode, gen_rtx_SIGN_EXTEND (V4SImode, gen_rtx_VEC_SELECT (V4HImode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, const1_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (3)], const_int_rtx[MAX_SAVED_CONST_INT + (5)], const_int_rtx[MAX_SAVED_CONST_INT + (7)])))), gen_rtx_SIGN_EXTEND (V4SImode, gen_rtx_VEC_SELECT (V4HImode, operand2, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, const1_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (3)], const_int_rtx[MAX_SAVED_CONST_INT + (5)], const_int_rtx[MAX_SAVED_CONST_INT + (7)]))))))); } /* ../../gcc/gcc/config/i386/i386.md:23118 */ rtx gen_sse2_clrti (rtx operand0 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, const0_rtx); } /* ../../gcc/gcc/config/i386/i386.md:23138 */ rtx gen_sse2_uavgv16qi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFTRT (V16QImode, gen_rtx_PLUS (V16QImode, gen_rtx_PLUS (V16QImode, operand1, operand2), gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec (16, const1_rtx, const1_rtx, const1_rtx, const1_rtx, const1_rtx, const1_rtx, const1_rtx, const1_rtx, const1_rtx, const1_rtx, const1_rtx, const1_rtx, const1_rtx, const1_rtx, const1_rtx, const1_rtx))), const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:23158 */ rtx gen_sse2_uavgv8hi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFTRT (V8HImode, gen_rtx_PLUS (V8HImode, gen_rtx_PLUS (V8HImode, operand1, operand2), gen_rtx_CONST_VECTOR (V8HImode, gen_rtvec (8, const1_rtx, const1_rtx, const1_rtx, const1_rtx, const1_rtx, const1_rtx, const1_rtx, const1_rtx))), const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:23175 */ rtx gen_sse2_psadbw (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V2DImode, gen_rtvec (2, operand1, operand2), 61)); } /* ../../gcc/gcc/config/i386/i386.md:23188 */ rtx gen_sse2_pinsrw (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V8HImode, operand1, gen_rtx_VEC_DUPLICATE (V8HImode, gen_rtx_TRUNCATE (HImode, operand2)), operand3)); } /* ../../gcc/gcc/config/i386/i386.md:23200 */ rtx gen_sse2_pextrw (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_ZERO_EXTEND (SImode, gen_rtx_VEC_SELECT (HImode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, operand2))))); } /* ../../gcc/gcc/config/i386/i386.md:23211 */ rtx gen_sse2_pshufd (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V4SImode, gen_rtvec (2, operand1, operand2), 41)); } /* ../../gcc/gcc/config/i386/i386.md:23221 */ rtx gen_sse2_pshuflw (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V8HImode, gen_rtvec (2, operand1, operand2), 55)); } /* ../../gcc/gcc/config/i386/i386.md:23231 */ rtx gen_sse2_pshufhw (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V8HImode, gen_rtvec (2, operand1, operand2), 56)); } /* ../../gcc/gcc/config/i386/i386.md:23243 */ rtx gen_eqv16qi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_EQ (V16QImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:23252 */ rtx gen_eqv8hi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_EQ (V8HImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:23261 */ rtx gen_eqv4si3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_EQ (V4SImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:23270 */ rtx gen_gtv16qi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_GT (V16QImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:23279 */ rtx gen_gtv8hi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_GT (V8HImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:23288 */ rtx gen_gtv4si3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_GT (V4SImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:23300 */ rtx gen_umaxv16qi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UMAX (V16QImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:23309 */ rtx gen_smaxv8hi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SMAX (V8HImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:23318 */ rtx gen_uminv16qi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UMIN (V16QImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:23327 */ rtx gen_sminv8hi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_SMIN (V8HImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:23339 */ rtx gen_ashrv8hi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFTRT (V8HImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:23348 */ rtx gen_ashrv4si3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFTRT (V4SImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:23357 */ rtx gen_lshrv8hi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_LSHIFTRT (V8HImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:23366 */ rtx gen_lshrv4si3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_LSHIFTRT (V4SImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:23375 */ rtx gen_lshrv2di3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_LSHIFTRT (V2DImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:23384 */ rtx gen_ashlv8hi3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFT (V8HImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:23393 */ rtx gen_ashlv4si3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFT (V4SImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:23402 */ rtx gen_ashlv2di3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFT (V2DImode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:23411 */ rtx gen_ashrv8hi3_ti (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFTRT (V8HImode, operand1, gen_rtx_SUBREG (SImode, operand2, 0))); } /* ../../gcc/gcc/config/i386/i386.md:23420 */ rtx gen_ashrv4si3_ti (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFTRT (V4SImode, operand1, gen_rtx_SUBREG (SImode, operand2, 0))); } /* ../../gcc/gcc/config/i386/i386.md:23429 */ rtx gen_lshrv8hi3_ti (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_LSHIFTRT (V8HImode, operand1, gen_rtx_SUBREG (SImode, operand2, 0))); } /* ../../gcc/gcc/config/i386/i386.md:23438 */ rtx gen_lshrv4si3_ti (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_LSHIFTRT (V4SImode, operand1, gen_rtx_SUBREG (SImode, operand2, 0))); } /* ../../gcc/gcc/config/i386/i386.md:23447 */ rtx gen_lshrv2di3_ti (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_LSHIFTRT (V2DImode, operand1, gen_rtx_SUBREG (SImode, operand2, 0))); } /* ../../gcc/gcc/config/i386/i386.md:23456 */ rtx gen_ashlv8hi3_ti (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFT (V8HImode, operand1, gen_rtx_SUBREG (SImode, operand2, 0))); } /* ../../gcc/gcc/config/i386/i386.md:23465 */ rtx gen_ashlv4si3_ti (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFT (V4SImode, operand1, gen_rtx_SUBREG (SImode, operand2, 0))); } /* ../../gcc/gcc/config/i386/i386.md:23474 */ rtx gen_ashlv2di3_ti (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFT (V2DImode, operand1, gen_rtx_SUBREG (SImode, operand2, 0))); } /* ../../gcc/gcc/config/i386/i386.md:23487 */ rtx gen_sse2_ashlti3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (TImode, gen_rtvec (1, gen_rtx_ASHIFT (TImode, operand1, gen_rtx_MULT (SImode, operand2, const_int_rtx[MAX_SAVED_CONST_INT + (8)]))), 45)); } /* ../../gcc/gcc/config/i386/i386.md:23498 */ rtx gen_sse2_lshrti3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (TImode, gen_rtvec (1, gen_rtx_LSHIFTRT (TImode, operand1, gen_rtx_MULT (SImode, operand2, const_int_rtx[MAX_SAVED_CONST_INT + (8)]))), 45)); } /* ../../gcc/gcc/config/i386/i386.md:23511 */ rtx gen_sse2_unpckhpd (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_CONCAT (V2DFmode, gen_rtx_VEC_SELECT (DFmode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const1_rtx))), gen_rtx_VEC_SELECT (DFmode, operand2, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const1_rtx))))); } /* ../../gcc/gcc/config/i386/i386.md:23523 */ rtx gen_sse2_unpcklpd (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_CONCAT (V2DFmode, gen_rtx_VEC_SELECT (DFmode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx))), gen_rtx_VEC_SELECT (DFmode, operand2, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx))))); } /* ../../gcc/gcc/config/i386/i386.md:23537 */ rtx gen_sse2_packsswb (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_CONCAT (V16QImode, gen_rtx_SS_TRUNCATE (V8QImode, operand1), gen_rtx_SS_TRUNCATE (V8QImode, operand2))); } /* ../../gcc/gcc/config/i386/i386.md:23547 */ rtx gen_sse2_packssdw (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_CONCAT (V8HImode, gen_rtx_SS_TRUNCATE (V4HImode, operand1), gen_rtx_SS_TRUNCATE (V4HImode, operand2))); } /* ../../gcc/gcc/config/i386/i386.md:23557 */ rtx gen_sse2_packuswb (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_CONCAT (V16QImode, gen_rtx_US_TRUNCATE (V8QImode, operand1), gen_rtx_US_TRUNCATE (V8QImode, operand2))); } /* ../../gcc/gcc/config/i386/i386.md:23567 */ rtx gen_sse2_punpckhbw (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V16QImode, gen_rtx_VEC_SELECT (V16QImode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (16, const_int_rtx[MAX_SAVED_CONST_INT + (8)], const0_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (9)], const1_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (10)], const_int_rtx[MAX_SAVED_CONST_INT + (2)], const_int_rtx[MAX_SAVED_CONST_INT + (11)], const_int_rtx[MAX_SAVED_CONST_INT + (3)], const_int_rtx[MAX_SAVED_CONST_INT + (12)], const_int_rtx[MAX_SAVED_CONST_INT + (4)], const_int_rtx[MAX_SAVED_CONST_INT + (13)], const_int_rtx[MAX_SAVED_CONST_INT + (5)], const_int_rtx[MAX_SAVED_CONST_INT + (14)], const_int_rtx[MAX_SAVED_CONST_INT + (6)], const_int_rtx[MAX_SAVED_CONST_INT + (15)], const_int_rtx[MAX_SAVED_CONST_INT + (7)]))), gen_rtx_VEC_SELECT (V16QImode, operand2, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (16, const0_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (8)], const1_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (9)], const_int_rtx[MAX_SAVED_CONST_INT + (2)], const_int_rtx[MAX_SAVED_CONST_INT + (10)], const_int_rtx[MAX_SAVED_CONST_INT + (3)], const_int_rtx[MAX_SAVED_CONST_INT + (11)], const_int_rtx[MAX_SAVED_CONST_INT + (4)], const_int_rtx[MAX_SAVED_CONST_INT + (12)], const_int_rtx[MAX_SAVED_CONST_INT + (5)], const_int_rtx[MAX_SAVED_CONST_INT + (13)], const_int_rtx[MAX_SAVED_CONST_INT + (6)], const_int_rtx[MAX_SAVED_CONST_INT + (14)], const_int_rtx[MAX_SAVED_CONST_INT + (7)], const_int_rtx[MAX_SAVED_CONST_INT + (15)]))), GEN_INT (21845L))); } /* ../../gcc/gcc/config/i386/i386.md:23594 */ rtx gen_sse2_punpckhwd (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V8HImode, gen_rtx_VEC_SELECT (V8HImode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (8, const_int_rtx[MAX_SAVED_CONST_INT + (4)], const0_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (5)], const1_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (6)], const_int_rtx[MAX_SAVED_CONST_INT + (2)], const_int_rtx[MAX_SAVED_CONST_INT + (7)], const_int_rtx[MAX_SAVED_CONST_INT + (3)]))), gen_rtx_VEC_SELECT (V8HImode, operand2, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (8, const0_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (4)], const1_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (5)], const_int_rtx[MAX_SAVED_CONST_INT + (2)], const_int_rtx[MAX_SAVED_CONST_INT + (6)], const_int_rtx[MAX_SAVED_CONST_INT + (3)], const_int_rtx[MAX_SAVED_CONST_INT + (7)]))), GEN_INT (85L))); } /* ../../gcc/gcc/config/i386/i386.md:23613 */ rtx gen_sse2_punpckhdq (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V4SImode, gen_rtx_VEC_SELECT (V4SImode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, const_int_rtx[MAX_SAVED_CONST_INT + (2)], const0_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (3)], const1_rtx))), gen_rtx_VEC_SELECT (V4SImode, operand2, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, const0_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (2)], const1_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (3)]))), const_int_rtx[MAX_SAVED_CONST_INT + (5)])); } /* ../../gcc/gcc/config/i386/i386.md:23628 */ rtx gen_sse2_punpcklbw (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V16QImode, gen_rtx_VEC_SELECT (V16QImode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (16, const0_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (8)], const1_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (9)], const_int_rtx[MAX_SAVED_CONST_INT + (2)], const_int_rtx[MAX_SAVED_CONST_INT + (10)], const_int_rtx[MAX_SAVED_CONST_INT + (3)], const_int_rtx[MAX_SAVED_CONST_INT + (11)], const_int_rtx[MAX_SAVED_CONST_INT + (4)], const_int_rtx[MAX_SAVED_CONST_INT + (12)], const_int_rtx[MAX_SAVED_CONST_INT + (5)], const_int_rtx[MAX_SAVED_CONST_INT + (13)], const_int_rtx[MAX_SAVED_CONST_INT + (6)], const_int_rtx[MAX_SAVED_CONST_INT + (14)], const_int_rtx[MAX_SAVED_CONST_INT + (7)], const_int_rtx[MAX_SAVED_CONST_INT + (15)]))), gen_rtx_VEC_SELECT (V16QImode, operand2, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (16, const_int_rtx[MAX_SAVED_CONST_INT + (8)], const0_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (9)], const1_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (10)], const_int_rtx[MAX_SAVED_CONST_INT + (2)], const_int_rtx[MAX_SAVED_CONST_INT + (11)], const_int_rtx[MAX_SAVED_CONST_INT + (3)], const_int_rtx[MAX_SAVED_CONST_INT + (12)], const_int_rtx[MAX_SAVED_CONST_INT + (4)], const_int_rtx[MAX_SAVED_CONST_INT + (13)], const_int_rtx[MAX_SAVED_CONST_INT + (5)], const_int_rtx[MAX_SAVED_CONST_INT + (14)], const_int_rtx[MAX_SAVED_CONST_INT + (6)], const_int_rtx[MAX_SAVED_CONST_INT + (15)], const_int_rtx[MAX_SAVED_CONST_INT + (7)]))), GEN_INT (21845L))); } /* ../../gcc/gcc/config/i386/i386.md:23655 */ rtx gen_sse2_punpcklwd (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V8HImode, gen_rtx_VEC_SELECT (V8HImode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (8, const0_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (4)], const1_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (5)], const_int_rtx[MAX_SAVED_CONST_INT + (2)], const_int_rtx[MAX_SAVED_CONST_INT + (6)], const_int_rtx[MAX_SAVED_CONST_INT + (3)], const_int_rtx[MAX_SAVED_CONST_INT + (7)]))), gen_rtx_VEC_SELECT (V8HImode, operand2, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (8, const_int_rtx[MAX_SAVED_CONST_INT + (4)], const0_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (5)], const1_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (6)], const_int_rtx[MAX_SAVED_CONST_INT + (2)], const_int_rtx[MAX_SAVED_CONST_INT + (7)], const_int_rtx[MAX_SAVED_CONST_INT + (3)]))), GEN_INT (85L))); } /* ../../gcc/gcc/config/i386/i386.md:23674 */ rtx gen_sse2_punpckldq (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V4SImode, gen_rtx_VEC_SELECT (V4SImode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, const0_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (2)], const1_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (3)]))), gen_rtx_VEC_SELECT (V4SImode, operand2, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, const_int_rtx[MAX_SAVED_CONST_INT + (2)], const0_rtx, const_int_rtx[MAX_SAVED_CONST_INT + (3)], const1_rtx))), const_int_rtx[MAX_SAVED_CONST_INT + (5)])); } /* ../../gcc/gcc/config/i386/i386.md:23689 */ rtx gen_sse2_punpcklqdq (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V2DImode, gen_rtx_VEC_SELECT (V2DImode, operand2, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const1_rtx, const0_rtx))), operand1, const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:23702 */ rtx gen_sse2_punpckhqdq (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V2DImode, operand1, gen_rtx_VEC_SELECT (V2DImode, operand2, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const1_rtx, const0_rtx))), const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:23717 */ rtx gen_sse2_movapd (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V2DFmode, gen_rtvec (1, operand1), 38)); } /* ../../gcc/gcc/config/i386/i386.md:23727 */ rtx gen_sse2_movupd (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V2DFmode, gen_rtvec (1, operand1), 39)); } /* ../../gcc/gcc/config/i386/i386.md:23737 */ rtx gen_sse2_movdqa (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V16QImode, gen_rtvec (1, operand1), 38)); } /* ../../gcc/gcc/config/i386/i386.md:23747 */ rtx gen_sse2_movdqu (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V16QImode, gen_rtvec (1, operand1), 39)); } /* ../../gcc/gcc/config/i386/i386.md:23757 */ rtx gen_sse2_movdq2q (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_SELECT (DImode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx)))); } /* ../../gcc/gcc/config/i386/i386.md:23768 */ rtx gen_sse2_movdq2q_rex64 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_SELECT (DImode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx)))); } /* ../../gcc/gcc/config/i386/i386.md:23780 */ rtx gen_sse2_movq2dq (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_CONCAT (V2DImode, operand1, const0_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:23791 */ rtx gen_sse2_movq2dq_rex64 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_CONCAT (V2DImode, operand1, const0_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:23803 */ rtx gen_sse2_movq (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_CONCAT (V2DImode, gen_rtx_VEC_SELECT (DImode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx))), const0_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:23814 */ rtx gen_sse2_loadd (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V4SImode, gen_rtx_VEC_DUPLICATE (V4SImode, operand1), gen_rtx_CONST_VECTOR (V4SImode, gen_rtvec (4, const0_rtx, const0_rtx, const0_rtx, const0_rtx)), const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:23828 */ rtx gen_sse2_stored (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_SELECT (SImode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx)))); } /* ../../gcc/gcc/config/i386/i386.md:23838 */ rtx gen_sse2_movhpd (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V2DFmode, operand1, operand2, const_int_rtx[MAX_SAVED_CONST_INT + (2)])); } /* ../../gcc/gcc/config/i386/i386.md:23859 */ rtx gen_sse2_loadsd_1 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V2DFmode, gen_rtx_VEC_DUPLICATE (V2DFmode, operand1), operand2, const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:23870 */ rtx gen_sse2_movsd (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V2DFmode, operand1, operand2, const1_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:23883 */ rtx gen_sse2_storesd (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_SELECT (DFmode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx)))); } /* ../../gcc/gcc/config/i386/i386.md:23893 */ rtx gen_sse2_shufpd (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED, rtx operand3 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V2DFmode, gen_rtvec (3, operand1, operand2, operand3), 41)); } /* ../../gcc/gcc/config/i386/i386.md:23905 */ rtx gen_sse2_clflush (rtx operand0 ATTRIBUTE_UNUSED) { return gen_rtx_UNSPEC_VOLATILE (VOIDmode, gen_rtvec (1, operand0), 57); } /* ../../gcc/gcc/config/i386/i386.md:23949 */ rtx gen_mwait (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_UNSPEC_VOLATILE (VOIDmode, gen_rtvec (2, operand0, operand1), 70); } /* ../../gcc/gcc/config/i386/i386.md:23957 */ rtx gen_monitor (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_UNSPEC_VOLATILE (VOIDmode, gen_rtvec (3, operand0, operand1, operand2), 69); } /* ../../gcc/gcc/config/i386/i386.md:23968 */ rtx gen_addsubv4sf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V4SFmode, gen_rtvec (2, operand1, operand2), 71)); } /* ../../gcc/gcc/config/i386/i386.md:23978 */ rtx gen_addsubv2df3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V2DFmode, gen_rtvec (2, operand1, operand2), 71)); } /* ../../gcc/gcc/config/i386/i386.md:23988 */ rtx gen_haddv4sf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V4SFmode, gen_rtvec (2, operand1, operand2), 72)); } /* ../../gcc/gcc/config/i386/i386.md:23998 */ rtx gen_haddv2df3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V2DFmode, gen_rtvec (2, operand1, operand2), 72)); } /* ../../gcc/gcc/config/i386/i386.md:24008 */ rtx gen_hsubv4sf3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V4SFmode, gen_rtvec (2, operand1, operand2), 73)); } /* ../../gcc/gcc/config/i386/i386.md:24018 */ rtx gen_hsubv2df3 (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED, rtx operand2 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V2DFmode, gen_rtvec (2, operand1, operand2), 73)); } /* ../../gcc/gcc/config/i386/i386.md:24028 */ rtx gen_movshdup (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V4SFmode, gen_rtvec (1, operand1), 74)); } /* ../../gcc/gcc/config/i386/i386.md:24037 */ rtx gen_movsldup (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V4SFmode, gen_rtvec (1, operand1), 75)); } /* ../../gcc/gcc/config/i386/i386.md:24046 */ rtx gen_lddqu (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V16QImode, gen_rtvec (1, operand1), 76)); } /* ../../gcc/gcc/config/i386/i386.md:24055 */ rtx gen_loadddup (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_DUPLICATE (V2DFmode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:24063 */ rtx gen_movddup (rtx operand0 ATTRIBUTE_UNUSED, rtx operand1 ATTRIBUTE_UNUSED) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_DUPLICATE (V2DFmode, gen_rtx_VEC_SELECT (DFmode, operand1, gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx))))); } /* ../../gcc/gcc/config/i386/i386.md:440 */ rtx gen_cmpdi (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM) operands[0] = force_reg (DImode, operands[0]); ix86_compare_op0 = operands[0]; ix86_compare_op1 = operands[1]; DONE; } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, 17), gen_rtx_COMPARE (CCmode, operand0, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:453 */ rtx gen_cmpsi (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM) operands[0] = force_reg (SImode, operands[0]); ix86_compare_op0 = operands[0]; ix86_compare_op1 = operands[1]; DONE; } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, 17), gen_rtx_COMPARE (CCmode, operand0, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:466 */ rtx gen_cmphi (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM) operands[0] = force_reg (HImode, operands[0]); ix86_compare_op0 = operands[0]; ix86_compare_op1 = operands[1]; DONE; } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, 17), gen_rtx_COMPARE (CCmode, operand0, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:479 */ rtx gen_cmpqi (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM) operands[0] = force_reg (QImode, operands[0]); ix86_compare_op0 = operands[0]; ix86_compare_op1 = operands[1]; DONE; } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, 17), gen_rtx_COMPARE (CCmode, operand0, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:514 */ rtx gen_cmpdi_1_rex64 (rtx operand0, rtx operand1) { return gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, 17), gen_rtx_COMPARE (CCmode, operand0, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:553 */ rtx gen_cmpsi_1 (rtx operand0, rtx operand1) { return gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, 17), gen_rtx_COMPARE (CCmode, operand0, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:677 */ rtx gen_cmpqi_ext_3 (rtx operand0, rtx operand1) { return gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, 17), gen_rtx_COMPARE (CCmode, gen_rtx_SUBREG (QImode, gen_rtx_ZERO_EXTRACT (SImode, operand0, const_int_rtx[MAX_SAVED_CONST_INT + (8)], const_int_rtx[MAX_SAVED_CONST_INT + (8)]), 0), operand1)); } /* ../../gcc/gcc/config/i386/i386.md:740 */ rtx gen_cmpxf (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { ix86_compare_op0 = operands[0]; ix86_compare_op1 = operands[1]; DONE; } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, 17), gen_rtx_COMPARE (CCmode, operand0, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:751 */ rtx gen_cmpdf (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { ix86_compare_op0 = operands[0]; ix86_compare_op1 = operands[1]; DONE; } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, 17), gen_rtx_COMPARE (CCmode, operand0, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:762 */ rtx gen_cmpsf (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { ix86_compare_op0 = operands[0]; ix86_compare_op1 = operands[1]; DONE; } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, 17), gen_rtx_COMPARE (CCmode, operand0, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:944 */ extern rtx gen_split_1044 (rtx, rtx *); rtx gen_split_1044 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operands[2] = gen_rtx_MEM (Pmode, stack_pointer_rtx); operands[2] = gen_rtx_FLOAT (GET_MODE (operands[0]), operands[2]); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (SImode, gen_rtx_PRE_DEC (SImode, gen_rtx_REG (SImode, 7))), operand1)); emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 18), gen_rtx_COMPARE (CCFPmode, operand0, operand2))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, copy_rtx (operand1), gen_rtx_MEM (SImode, gen_rtx_REG (SImode, 7))), gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 7), gen_rtx_PLUS (SImode, gen_rtx_REG (SImode, 7), const_int_rtx[MAX_SAVED_CONST_INT + (4)]))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:1083 */ rtx gen_movsi (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; ix86_expand_move (SImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:1292 */ rtx gen_movhi (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; ix86_expand_move (HImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:1423 */ rtx gen_movstricthi (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { /* Don't generate memory->memory moves, go through a register */ if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM) operands[1] = force_reg (HImode, operands[1]); } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, operand0), operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:1453 */ rtx gen_movqi (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; ix86_expand_move (QImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:1550 */ rtx gen_reload_outqi (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; { rtx op0, op1, op2; op0 = operands[0]; op1 = operands[1]; op2 = operands[2]; if (reg_overlap_mentioned_p (op2, op0)) abort (); if (! q_regs_operand (op1, QImode)) { emit_insn (gen_movqi (op2, op1)); op1 = op2; } emit_insn (gen_movqi (op0, op1)); DONE; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, operand0, operand1, operand2))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:1582 */ rtx gen_movstrictqi (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { /* Don't generate memory->memory moves, go through a register. */ if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM) operands[1] = force_reg (QImode, operands[1]); } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, operand0), operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:1810 */ rtx gen_movdi (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; ix86_expand_move (DImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:1836 */ extern rtx gen_peephole2_1052 (rtx, rtx *); rtx gen_peephole2_1052 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[2] = peep2_find_free_register (0, 0, "r", DImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand2, operand1)); emit_insn (gen_rtx_SET (VOIDmode, operand0, copy_rtx (operand2))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:1848 */ extern rtx gen_peephole2_1053 (rtx, rtx *); rtx gen_peephole2_1053 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); start_sequence (); split_di (operands + 1, 1, operands + 2, operands + 3); operands[1] = gen_lowpart (DImode, operands[2]); operands[2] = gen_rtx_MEM (SImode, gen_rtx_PLUS (DImode, stack_pointer_rtx, GEN_INT (4))); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1)); emit_insn (gen_rtx_SET (VOIDmode, operand2, operand3)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:1861 */ extern rtx gen_split_1054 (rtx, rtx *); rtx gen_split_1054 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx _val = 0; start_sequence (); split_di (operands + 1, 1, operands + 2, operands + 3); operands[1] = gen_lowpart (DImode, operands[2]); operands[2] = gen_rtx_MEM (SImode, gen_rtx_PLUS (DImode, stack_pointer_rtx, GEN_INT (4))); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1)); emit_insn (gen_rtx_SET (VOIDmode, operand2, operand3)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:1947 */ extern rtx gen_split_1055 (rtx, rtx *); rtx gen_split_1055 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); ix86_split_long_move (operands); DONE; emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:1956 */ extern rtx gen_split_1056 (rtx, rtx *); rtx gen_split_1056 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); ix86_split_long_move (operands); DONE; emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:2095 */ extern rtx gen_peephole2_1057 (rtx, rtx *); rtx gen_peephole2_1057 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[2] = peep2_find_free_register (0, 0, "r", DImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand2, operand1)); emit_insn (gen_rtx_SET (VOIDmode, operand0, copy_rtx (operand2))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:2107 */ extern rtx gen_peephole2_1058 (rtx, rtx *); rtx gen_peephole2_1058 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); start_sequence (); split_di (operands, 2, operands + 2, operands + 4); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; emit_insn (gen_rtx_SET (VOIDmode, operand2, operand3)); emit_insn (gen_rtx_SET (VOIDmode, operand4, operand5)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:2116 */ extern rtx gen_split_1059 (rtx, rtx *); rtx gen_split_1059 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx _val = 0; start_sequence (); split_di (operands, 2, operands + 2, operands + 4); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; emit_insn (gen_rtx_SET (VOIDmode, operand2, operand3)); emit_insn (gen_rtx_SET (VOIDmode, operand4, operand5)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:2140 */ rtx gen_movsf (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; ix86_expand_move (SFmode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:2182 */ extern rtx gen_split_1061 (rtx, rtx *); rtx gen_split_1061 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operands[1] = get_pool_constant (XEXP (operands[1], 0)); operand0 = operands[0]; operand1 = operands[1]; emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:2195 */ extern rtx gen_split_1062 (rtx, rtx *); rtx gen_split_1062 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 7), gen_rtx_PLUS (SImode, gen_rtx_REG (SImode, 7), const_int_rtx[MAX_SAVED_CONST_INT + (-4)]))); emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (SFmode, gen_rtx_REG (SImode, 7)), operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:2202 */ extern rtx gen_split_1063 (rtx, rtx *); rtx gen_split_1063 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (DImode, 7), gen_rtx_PLUS (DImode, gen_rtx_REG (DImode, 7), const_int_rtx[MAX_SAVED_CONST_INT + (-8)]))); emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (SFmode, gen_rtx_REG (DImode, 7)), operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:2400 */ rtx gen_movdf (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; ix86_expand_move (DFmode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:2434 */ extern rtx gen_split_1065 (rtx, rtx *); rtx gen_split_1065 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 7), gen_rtx_PLUS (SImode, gen_rtx_REG (SImode, 7), const_int_rtx[MAX_SAVED_CONST_INT + (-8)]))); emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (DFmode, gen_rtx_REG (SImode, 7)), operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:2442 */ extern rtx gen_split_1066 (rtx, rtx *); rtx gen_split_1066 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (DImode, 7), gen_rtx_PLUS (DImode, gen_rtx_REG (DImode, 7), const_int_rtx[MAX_SAVED_CONST_INT + (-8)]))); emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (DFmode, gen_rtx_REG (DImode, 7)), operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:2450 */ extern rtx gen_split_1067 (rtx, rtx *); rtx gen_split_1067 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); ix86_split_long_move (operands); DONE; emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:2664 */ extern rtx gen_split_1068 (rtx, rtx *); rtx gen_split_1068 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); ix86_split_long_move (operands); DONE; emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:2693 */ rtx gen_movxf (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; ix86_expand_move (XFmode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:2728 */ extern rtx gen_split_1070 (rtx, rtx *); rtx gen_split_1070 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); ix86_split_long_move (operands); DONE; emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:2738 */ extern rtx gen_split_1071 (rtx, rtx *); rtx gen_split_1071 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operands[2] = GEN_INT (TARGET_128BIT_LONG_DOUBLE ? -16 : -12); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 7), gen_rtx_PLUS (SImode, gen_rtx_REG (SImode, 7), operand2))); emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (XFmode, gen_rtx_REG (SImode, 7)), operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:2746 */ extern rtx gen_split_1072 (rtx, rtx *); rtx gen_split_1072 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operands[2] = GEN_INT (TARGET_128BIT_LONG_DOUBLE ? -16 : -12); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (DImode, 7), gen_rtx_PLUS (DImode, gen_rtx_REG (DImode, 7), operand2))); emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (XFmode, gen_rtx_REG (DImode, 7)), operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:2821 */ extern rtx gen_split_1073 (rtx, rtx *); rtx gen_split_1073 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); ix86_split_long_move (operands); DONE; emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:2836 */ extern rtx gen_split_1074 (rtx, rtx *); rtx gen_split_1074 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); { rtx c = get_pool_constant (XEXP (operands[1], 0)); rtx r = operands[0]; if (GET_CODE (r) == SUBREG) r = SUBREG_REG (r); if (SSE_REG_P (r)) { if (!standard_sse_constant_p (c)) FAIL; } else if (FP_REG_P (r)) { if (!standard_80387_constant_p (c)) FAIL; } else if (MMX_REG_P (r)) FAIL; operands[1] = c; } operand0 = operands[0]; operand1 = operands[1]; emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:2886 */ rtx gen_zero_extendhisi2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { if (TARGET_ZERO_EXTEND_WITH_AND && !optimize_size) { operands[1] = force_reg (HImode, operands[1]); emit_insn (gen_zero_extendhisi2_and (operands[0], operands[1])); DONE; } } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_ZERO_EXTEND (SImode, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:2908 */ extern rtx gen_split_1076 (rtx, rtx *); rtx gen_split_1076 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx _val = 0; start_sequence (); operand0 = operands[0]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_AND (SImode, copy_rtx (operand0), GEN_INT (65535L))), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:2925 */ rtx gen_zero_extendqihi2 (rtx operand0, rtx operand1) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_ZERO_EXTEND (HImode, operand1)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:2960 */ extern rtx gen_split_1078 (rtx, rtx *); rtx gen_split_1078 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_ZERO_EXTEND (HImode, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:2972 */ extern rtx gen_split_1079 (rtx, rtx *); rtx gen_split_1079 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operands[2] = gen_lowpart (QImode, operands[0]); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand0, const0_rtx)); emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, operand2), operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:2985 */ extern rtx gen_split_1080 (rtx, rtx *); rtx gen_split_1080 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx _val = 0; start_sequence (); operand0 = operands[0]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_AND (HImode, copy_rtx (operand0), GEN_INT (255L))), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:2995 */ rtx gen_zero_extendqisi2 (rtx operand0, rtx operand1) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_ZERO_EXTEND (SImode, operand1)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:3030 */ extern rtx gen_split_1082 (rtx, rtx *); rtx gen_split_1082 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_ZERO_EXTEND (SImode, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:3042 */ extern rtx gen_split_1083 (rtx, rtx *); rtx gen_split_1083 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operands[2] = gen_lowpart (QImode, operands[0]); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand0, const0_rtx)); emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, operand2), operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:3056 */ extern rtx gen_split_1084 (rtx, rtx *); rtx gen_split_1084 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx _val = 0; start_sequence (); operand0 = operands[0]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_AND (SImode, copy_rtx (operand0), GEN_INT (255L))), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:3067 */ rtx gen_zero_extendsidi2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; if (!TARGET_64BIT) { emit_insn (gen_zero_extendsidi2_32 (operands[0], operands[1])); DONE; } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_ZERO_EXTEND (DImode, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:3130 */ extern rtx gen_split_1086 (rtx, rtx *); rtx gen_split_1086 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx operand4; rtx _val = 0; start_sequence (); split_di (&operands[0], 1, &operands[3], &operands[4]); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; emit_insn (gen_rtx_SET (VOIDmode, operand4, const0_rtx)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:3137 */ extern rtx gen_split_1087 (rtx, rtx *); rtx gen_split_1087 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx operand4; rtx _val = 0; start_sequence (); split_di (&operands[0], 1, &operands[3], &operands[4]); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; emit_insn (gen_rtx_SET (VOIDmode, operand4, const0_rtx)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:3146 */ extern rtx gen_split_1088 (rtx, rtx *); rtx gen_split_1088 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx operand4; rtx _val = 0; start_sequence (); split_di (&operands[0], 1, &operands[3], &operands[4]); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; emit_insn (gen_rtx_SET (VOIDmode, operand3, operand1)); emit_insn (gen_rtx_SET (VOIDmode, operand4, const0_rtx)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:3178 */ rtx gen_extendsidi2 (rtx operand0, rtx operand1) { rtx operand2 ATTRIBUTE_UNUSED; rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; { if (TARGET_64BIT) { emit_insn (gen_extendsidi2_rex64 (operands[0], operands[1])); DONE; } } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_SIGN_EXTEND (DImode, operand1)), gen_hard_reg_clobber (CCmode, 17), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SImode))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:3229 */ extern rtx gen_split_1090 (rtx, rtx *); rtx gen_split_1090 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx operand4; rtx _val = 0; start_sequence (); split_di (&operands[0], 1, &operands[3], &operands[4]); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; emit_insn (gen_rtx_SET (VOIDmode, operand3, operand1)); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, copy_rtx (operand1), gen_rtx_ASHIFTRT (SImode, copy_rtx (operand1), const_int_rtx[MAX_SAVED_CONST_INT + (31)])), gen_hard_reg_clobber (CCmode, 17)))); emit_insn (gen_rtx_SET (VOIDmode, operand4, copy_rtx (operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:3244 */ extern rtx gen_split_1091 (rtx, rtx *); rtx gen_split_1091 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); { split_di (&operands[0], 1, &operands[3], &operands[4]); emit_move_insn (operands[3], operands[1]); /* Generate a cltd if possible and doing so it profitable. */ if (true_regnum (operands[1]) == 0 && true_regnum (operands[2]) == 1 && (optimize_size || TARGET_USE_CLTD)) { emit_insn (gen_ashrsi3_31 (operands[2], operands[1], GEN_INT (31))); } else { emit_move_insn (operands[2], operands[1]); emit_insn (gen_ashrsi3_31 (operands[2], operands[2], GEN_INT (31))); } emit_move_insn (operands[4], operands[2]); DONE; } emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:3274 */ extern rtx gen_split_1092 (rtx, rtx *); rtx gen_split_1092 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); { split_di (&operands[0], 1, &operands[3], &operands[4]); if (true_regnum (operands[3]) != true_regnum (operands[1])) emit_move_insn (operands[3], operands[1]); /* Generate a cltd if possible and doing so it profitable. */ if (true_regnum (operands[3]) == 0 && (optimize_size || TARGET_USE_CLTD)) { emit_insn (gen_ashrsi3_31 (operands[4], operands[3], GEN_INT (31))); DONE; } if (true_regnum (operands[4]) != true_regnum (operands[1])) emit_move_insn (operands[4], operands[1]); emit_insn (gen_ashrsi3_31 (operands[4], operands[4], GEN_INT (31))); DONE; } emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:3410 */ extern rtx gen_split_1093 (rtx, rtx *); rtx gen_split_1093 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 7), gen_rtx_PLUS (SImode, gen_rtx_REG (SImode, 7), const_int_rtx[MAX_SAVED_CONST_INT + (-8)]))); emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (DFmode, gen_rtx_REG (SImode, 7)), gen_rtx_FLOAT_EXTEND (DFmode, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:3417 */ extern rtx gen_split_1094 (rtx, rtx *); rtx gen_split_1094 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (DImode, 7), gen_rtx_PLUS (DImode, gen_rtx_REG (DImode, 7), const_int_rtx[MAX_SAVED_CONST_INT + (-8)]))); emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (DFmode, gen_rtx_REG (DImode, 7)), gen_rtx_FLOAT_EXTEND (DFmode, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:3430 */ extern rtx gen_split_1095 (rtx, rtx *); rtx gen_split_1095 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operands[2] = GEN_INT (TARGET_128BIT_LONG_DOUBLE ? -16 : -12); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 7), gen_rtx_PLUS (SImode, gen_rtx_REG (SImode, 7), operand2))); emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (XFmode, gen_rtx_REG (SImode, 7)), gen_rtx_FLOAT_EXTEND (XFmode, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:3438 */ extern rtx gen_split_1096 (rtx, rtx *); rtx gen_split_1096 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operands[2] = GEN_INT (TARGET_128BIT_LONG_DOUBLE ? -16 : -12); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (DImode, 7), gen_rtx_PLUS (DImode, gen_rtx_REG (DImode, 7), operand2))); emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (DFmode, gen_rtx_REG (DImode, 7)), gen_rtx_FLOAT_EXTEND (XFmode, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:3446 */ extern rtx gen_split_1097 (rtx, rtx *); rtx gen_split_1097 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operands[2] = GEN_INT (TARGET_128BIT_LONG_DOUBLE ? -16 : -12); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 7), gen_rtx_PLUS (SImode, gen_rtx_REG (SImode, 7), operand2))); emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (DFmode, gen_rtx_REG (SImode, 7)), gen_rtx_FLOAT_EXTEND (XFmode, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:3454 */ extern rtx gen_split_1098 (rtx, rtx *); rtx gen_split_1098 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operands[2] = GEN_INT (TARGET_128BIT_LONG_DOUBLE ? -16 : -12); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (DImode, 7), gen_rtx_PLUS (DImode, gen_rtx_REG (DImode, 7), operand2))); emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (XFmode, gen_rtx_REG (DImode, 7)), gen_rtx_FLOAT_EXTEND (XFmode, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:3462 */ rtx gen_extendsfdf2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { /* ??? Needed for compress_float_constant since all fp constants are LEGITIMATE_CONSTANT_P. */ if (GET_CODE (operands[1]) == CONST_DOUBLE) operands[1] = validize_mem (force_const_mem (SFmode, operands[1])); if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM) operands[1] = force_reg (SFmode, operands[1]); } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_EXTEND (DFmode, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:3511 */ rtx gen_extendsfxf2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { /* ??? Needed for compress_float_constant since all fp constants are LEGITIMATE_CONSTANT_P. */ if (GET_CODE (operands[1]) == CONST_DOUBLE) operands[1] = validize_mem (force_const_mem (SFmode, operands[1])); if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM) operands[1] = force_reg (SFmode, operands[1]); } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_EXTEND (XFmode, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:3550 */ rtx gen_extenddfxf2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { /* ??? Needed for compress_float_constant since all fp constants are LEGITIMATE_CONSTANT_P. */ if (GET_CODE (operands[1]) == CONST_DOUBLE) operands[1] = validize_mem (force_const_mem (DFmode, operands[1])); if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM) operands[1] = force_reg (DFmode, operands[1]); } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_EXTEND (XFmode, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:3595 */ rtx gen_truncdfsf2 (rtx operand0, rtx operand1) { rtx operand2; rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; if (!TARGET_80387) { emit_insn (gen_truncdfsf2_sse_only (operands[0], operands[1])); DONE; } else if (flag_unsafe_math_optimizations) { rtx reg = REG_P (operands[0]) ? operands[0] : gen_reg_rtx (SFmode); emit_insn (gen_truncdfsf2_noop (reg, operands[1])); if (reg != operands[0]) emit_move_insn (operands[0], reg); DONE; } else operands[2] = assign_386_stack_local (SFmode, 0); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_TRUNCATE (SFmode, operand1)), gen_rtx_CLOBBER (VOIDmode, operand2)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:3777 */ extern rtx gen_split_1103 (rtx, rtx *); rtx gen_split_1103 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_TRUNCATE (SFmode, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:3788 */ extern rtx gen_split_1104 (rtx, rtx *); rtx gen_split_1104 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); { rtx src, dest; if (!TARGET_SSE_PARTIAL_REGS_FOR_CVTSD2SS) emit_insn (gen_truncdfsf2_sse_only (operands[0], operands[1])); else { dest = simplify_gen_subreg (V4SFmode, operands[0], SFmode, 0); src = simplify_gen_subreg (V2DFmode, operands[1], DFmode, 0); /* simplify_gen_subreg refuses to widen memory references. */ if (GET_CODE (src) == SUBREG) alter_subreg (&src); if (reg_overlap_mentioned_p (operands[0], operands[1])) abort (); emit_insn (gen_sse_clrv4sf (dest, CONST0_RTX (V4SFmode))); emit_insn (gen_cvtsd2ss (dest, dest, src)); } DONE; } emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:3816 */ extern rtx gen_split_1105 (rtx, rtx *); rtx gen_split_1105 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); { rtx src, dest; dest = simplify_gen_subreg (V4SFmode, operands[0], SFmode, 0); src = simplify_gen_subreg (V2DFmode, operands[1], DFmode, 0); /* simplify_gen_subreg refuses to widen memory references. */ if (GET_CODE (src) == SUBREG) alter_subreg (&src); if (reg_overlap_mentioned_p (operands[0], operands[1])) abort (); emit_insn (gen_sse_clrv4sf (dest, CONST0_RTX (V4SFmode))); emit_insn (gen_cvtsd2ss (dest, dest, src)); DONE; } emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:3837 */ extern rtx gen_split_1106 (rtx, rtx *); rtx gen_split_1106 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand2, gen_rtx_FLOAT_TRUNCATE (SFmode, operand1))); emit_insn (gen_rtx_SET (VOIDmode, operand0, copy_rtx (operand2))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:3847 */ rtx gen_truncxfsf2 (rtx operand0, rtx operand1) { rtx operand2; rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; if (flag_unsafe_math_optimizations) { rtx reg = REG_P (operands[0]) ? operands[0] : gen_reg_rtx (SFmode); emit_insn (gen_truncxfsf2_noop (reg, operands[1])); if (reg != operands[0]) emit_move_insn (operands[0], reg); DONE; } else operands[2] = assign_386_stack_local (SFmode, 0); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_TRUNCATE (SFmode, operand1)), gen_rtx_CLOBBER (VOIDmode, operand2)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:3911 */ extern rtx gen_split_1108 (rtx, rtx *); rtx gen_split_1108 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_TRUNCATE (SFmode, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:3920 */ extern rtx gen_split_1109 (rtx, rtx *); rtx gen_split_1109 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand2, gen_rtx_FLOAT_TRUNCATE (SFmode, operand1))); emit_insn (gen_rtx_SET (VOIDmode, operand0, copy_rtx (operand2))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:3930 */ rtx gen_truncxfdf2 (rtx operand0, rtx operand1) { rtx operand2; rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; if (flag_unsafe_math_optimizations) { rtx reg = REG_P (operands[0]) ? operands[0] : gen_reg_rtx (DFmode); emit_insn (gen_truncxfdf2_noop (reg, operands[1])); if (reg != operands[0]) emit_move_insn (operands[0], reg); DONE; } else operands[2] = assign_386_stack_local (DFmode, 0); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_TRUNCATE (DFmode, operand1)), gen_rtx_CLOBBER (VOIDmode, operand2)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:3995 */ extern rtx gen_split_1111 (rtx, rtx *); rtx gen_split_1111 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_TRUNCATE (DFmode, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4004 */ extern rtx gen_split_1112 (rtx, rtx *); rtx gen_split_1112 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand2, gen_rtx_FLOAT_TRUNCATE (DFmode, operand1))); emit_insn (gen_rtx_SET (VOIDmode, operand0, copy_rtx (operand2))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4019 */ rtx gen_fix_truncxfdi2 (rtx operand0, rtx operand1) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_FIX (DImode, operand1)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:4026 */ rtx gen_fix_truncdfdi2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { if (TARGET_64BIT && TARGET_SSE2) { rtx out = REG_P (operands[0]) ? operands[0] : gen_reg_rtx (DImode); emit_insn (gen_fix_truncdfdi_sse (out, operands[1])); if (out != operands[0]) emit_move_insn (operands[0], out); DONE; } } operand0 = operands[0]; operand1 = operands[1]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_FIX (DImode, operand1)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4042 */ rtx gen_fix_truncsfdi2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { if (TARGET_SSE && TARGET_64BIT) { rtx out = REG_P (operands[0]) ? operands[0] : gen_reg_rtx (DImode); emit_insn (gen_fix_truncsfdi_sse (out, operands[1])); if (out != operands[0]) emit_move_insn (operands[0], out); DONE; } } operand0 = operands[0]; operand1 = operands[1]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_FIX (DImode, operand1)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4060 */ extern rtx gen_split_1116 (rtx, rtx *); rtx gen_split_1116 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); { ix86_optimize_mode_switching = 1; operands[2] = assign_386_stack_local (HImode, 1); operands[3] = assign_386_stack_local (HImode, 2); if (memory_operand (operands[0], VOIDmode)) emit_insn (gen_fix_truncdi_memory (operands[0], operands[1], operands[2], operands[3])); else { operands[4] = assign_386_stack_local (DImode, 0); emit_insn (gen_fix_truncdi_nomemory (operands[0], operands[1], operands[2], operands[3], operands[4])); } DONE; } emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4114 */ extern rtx gen_split_1117 (rtx, rtx *); rtx gen_split_1117 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, gen_rtx_SET (VOIDmode, operand4, gen_rtx_FIX (DImode, operand1)), gen_rtx_USE (VOIDmode, operand2), gen_rtx_USE (VOIDmode, operand3), gen_rtx_CLOBBER (VOIDmode, operand5)))); emit_insn (gen_rtx_SET (VOIDmode, operand0, copy_rtx (operand4))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4129 */ extern rtx gen_split_1118 (rtx, rtx *); rtx gen_split_1118 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, gen_rtx_SET (VOIDmode, operand0, gen_rtx_FIX (DImode, operand1)), gen_rtx_USE (VOIDmode, operand2), gen_rtx_USE (VOIDmode, operand3), gen_rtx_CLOBBER (VOIDmode, operand5)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4154 */ extern rtx gen_peephole2_1119 (rtx, rtx *); rtx gen_peephole2_1119 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[2] = peep2_find_free_register (0, 0, "x", SFmode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand2, operand1)); emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_FIX (DImode, copy_rtx (operand2)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4173 */ extern rtx gen_peephole2_1120 (rtx, rtx *); rtx gen_peephole2_1120 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[2] = peep2_find_free_register (0, 0, "Y", DFmode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand2, operand1)); emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_FIX (DImode, copy_rtx (operand2)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4184 */ rtx gen_fix_truncxfsi2 (rtx operand0, rtx operand1) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_FIX (SImode, operand1)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:4191 */ rtx gen_fix_truncdfsi2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { if (TARGET_SSE2) { rtx out = REG_P (operands[0]) ? operands[0] : gen_reg_rtx (SImode); emit_insn (gen_fix_truncdfsi_sse (out, operands[1])); if (out != operands[0]) emit_move_insn (operands[0], out); DONE; } } operand0 = operands[0]; operand1 = operands[1]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_FIX (SImode, operand1)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4207 */ rtx gen_fix_truncsfsi2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { if (TARGET_SSE) { rtx out = REG_P (operands[0]) ? operands[0] : gen_reg_rtx (SImode); emit_insn (gen_fix_truncsfsi_sse (out, operands[1])); if (out != operands[0]) emit_move_insn (operands[0], out); DONE; } } operand0 = operands[0]; operand1 = operands[1]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_FIX (SImode, operand1)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4225 */ extern rtx gen_split_1124 (rtx, rtx *); rtx gen_split_1124 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); { ix86_optimize_mode_switching = 1; operands[2] = assign_386_stack_local (HImode, 1); operands[3] = assign_386_stack_local (HImode, 2); if (memory_operand (operands[0], VOIDmode)) emit_insn (gen_fix_truncsi_memory (operands[0], operands[1], operands[2], operands[3])); else { operands[4] = assign_386_stack_local (SImode, 0); emit_insn (gen_fix_truncsi_nomemory (operands[0], operands[1], operands[2], operands[3], operands[4])); } DONE; } emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4288 */ extern rtx gen_peephole2_1125 (rtx, rtx *); rtx gen_peephole2_1125 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[2] = peep2_find_free_register (0, 0, "x", SFmode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand2, operand1)); emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_FIX (SImode, copy_rtx (operand2)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4307 */ extern rtx gen_peephole2_1126 (rtx, rtx *); rtx gen_peephole2_1126 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[2] = peep2_find_free_register (0, 0, "Y", DFmode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand2, operand1)); emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_FIX (SImode, copy_rtx (operand2)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4316 */ extern rtx gen_split_1127 (rtx, rtx *); rtx gen_split_1127 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx operand4; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand4, gen_rtx_FIX (SImode, operand1)), gen_rtx_USE (VOIDmode, operand2), gen_rtx_USE (VOIDmode, operand3)))); emit_insn (gen_rtx_SET (VOIDmode, operand0, copy_rtx (operand4))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4329 */ extern rtx gen_split_1128 (rtx, rtx *); rtx gen_split_1128 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_FIX (SImode, operand1)), gen_rtx_USE (VOIDmode, operand2), gen_rtx_USE (VOIDmode, operand3)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4343 */ rtx gen_fix_truncxfhi2 (rtx operand0, rtx operand1) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_FIX (HImode, operand1)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:4350 */ rtx gen_fix_truncdfhi2 (rtx operand0, rtx operand1) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_FIX (HImode, operand1)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:4357 */ rtx gen_fix_truncsfhi2 (rtx operand0, rtx operand1) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_FIX (HImode, operand1)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:4366 */ extern rtx gen_split_1132 (rtx, rtx *); rtx gen_split_1132 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); { ix86_optimize_mode_switching = 1; operands[2] = assign_386_stack_local (HImode, 1); operands[3] = assign_386_stack_local (HImode, 2); if (memory_operand (operands[0], VOIDmode)) emit_insn (gen_fix_trunchi_memory (operands[0], operands[1], operands[2], operands[3])); else { operands[4] = assign_386_stack_local (HImode, 0); emit_insn (gen_fix_trunchi_nomemory (operands[0], operands[1], operands[2], operands[3], operands[4])); } DONE; } emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4418 */ extern rtx gen_split_1133 (rtx, rtx *); rtx gen_split_1133 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_FIX (HImode, operand1)), gen_rtx_USE (VOIDmode, operand2), gen_rtx_USE (VOIDmode, operand3)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4430 */ extern rtx gen_split_1134 (rtx, rtx *); rtx gen_split_1134 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx operand4; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, gen_rtx_SET (VOIDmode, operand4, gen_rtx_FIX (HImode, operand1)), gen_rtx_USE (VOIDmode, operand2), gen_rtx_USE (VOIDmode, operand3), gen_rtx_CLOBBER (VOIDmode, copy_rtx (operand4))))); emit_insn (gen_rtx_SET (VOIDmode, operand0, copy_rtx (operand4))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4469 */ rtx gen_floathisf2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { if (TARGET_SSE && TARGET_SSE_MATH) { emit_insn (gen_floatsisf2 (operands[0], convert_to_mode (SImode, operands[1], 0))); DONE; } } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT (SFmode, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4493 */ rtx gen_floatsisf2 (rtx operand0, rtx operand1) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT (SFmode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:4525 */ extern rtx gen_split_1137 (rtx, rtx *); rtx gen_split_1137 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); { rtx dest; dest = simplify_gen_subreg (V4SFmode, operands[0], SFmode, 0); emit_insn (gen_sse_clrv4sf (dest, CONST0_RTX (V4SFmode))); emit_insn (gen_cvtsi2ss (dest, dest, operands[1])); DONE; } emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4539 */ rtx gen_floatdisf2 (rtx operand0, rtx operand1) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT (SFmode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:4582 */ extern rtx gen_split_1139 (rtx, rtx *); rtx gen_split_1139 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); { rtx dest; dest = simplify_gen_subreg (V4SFmode, operands[0], SFmode, 0); emit_insn (gen_sse_clrv4sf (dest, CONST0_RTX (V4SFmode))); emit_insn (gen_cvtsi2ssq (dest, dest, operands[1])); DONE; } emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4596 */ rtx gen_floathidf2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { if (TARGET_SSE && TARGET_SSE_MATH) { emit_insn (gen_floatsidf2 (operands[0], convert_to_mode (SImode, operands[1], 0))); DONE; } } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT (DFmode, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4620 */ rtx gen_floatsidf2 (rtx operand0, rtx operand1) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT (DFmode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:4650 */ rtx gen_floatdidf2 (rtx operand0, rtx operand1) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT (DFmode, operand1)); } /* ../../gcc/gcc/config/i386/i386.md:4725 */ extern rtx gen_split_1143 (rtx, rtx *); rtx gen_split_1143 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); { operands[2] = ix86_force_to_memory (GET_MODE (operands[1]), operands[1]); operands[2] = gen_rtx_FLOAT (GET_MODE (operands[0]), operands[2]); emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[2])); ix86_free_from_memory (GET_MODE (operands[1])); DONE; } emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4738 */ rtx gen_floatunssisf2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; x86_emit_floatuns (operands); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_USE (VOIDmode, operand0)); emit_insn (gen_rtx_USE (VOIDmode, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4744 */ rtx gen_floatunsdisf2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; x86_emit_floatuns (operands); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_USE (VOIDmode, operand0)); emit_insn (gen_rtx_USE (VOIDmode, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4750 */ rtx gen_floatunsdidf2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; x86_emit_floatuns (operands); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_USE (VOIDmode, operand0)); emit_insn (gen_rtx_USE (VOIDmode, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4758 */ rtx gen_vec_setv2df (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; { switch (INTVAL (operands[2])) { case 0: emit_insn (gen_sse2_movsd (operands[0], operands[0], simplify_gen_subreg (V2DFmode, operands[1], DFmode, 0))); break; case 1: { rtx op1 = simplify_gen_subreg (V2DFmode, operands[1], DFmode, 0); emit_insn (gen_sse2_unpcklpd (operands[0], operands[0], op1)); } break; default: abort (); } DONE; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit (operand0); emit (operand1); emit (operand2); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4784 */ rtx gen_vec_extractv2df (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; { switch (INTVAL (operands[2])) { case 0: emit_move_insn (operands[0], gen_lowpart (DFmode, operands[1])); break; case 1: { rtx dest = simplify_gen_subreg (V2DFmode, operands[0], DFmode, 0); emit_insn (gen_sse2_unpckhpd (dest, operands[1], operands[1])); } break; default: abort (); } DONE; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit (operand0); emit (operand1); emit (operand2); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4808 */ rtx gen_vec_initv2df (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { ix86_expand_vector_init (operands[0], operands[1]); DONE; } operand0 = operands[0]; operand1 = operands[1]; } emit (operand0); emit (operand1); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4817 */ rtx gen_vec_setv4sf (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; { switch (INTVAL (operands[2])) { case 0: emit_insn (gen_sse_movss (operands[0], operands[0], simplify_gen_subreg (V4SFmode, operands[1], SFmode, 0))); break; case 1: { rtx op1 = simplify_gen_subreg (V4SFmode, operands[1], SFmode, 0); rtx tmp = gen_reg_rtx (V4SFmode); emit_move_insn (tmp, operands[0]); emit_insn (gen_sse_unpcklps (operands[0], operands[0], operands[0])); emit_insn (gen_sse_movss (operands[0], operands[0], op1)); emit_insn (gen_sse_shufps (operands[0], operands[0], tmp, GEN_INT (1 + (0<<2) + (2<<4) + (3<<6)))); } case 2: { rtx op1 = simplify_gen_subreg (V4SFmode, operands[1], SFmode, 0); rtx tmp = gen_reg_rtx (V4SFmode); emit_move_insn (tmp, operands[0]); emit_insn (gen_sse_movss (tmp, tmp, op1)); emit_insn (gen_sse_shufps (operands[0], operands[0], tmp, GEN_INT (0 + (1<<2) + (0<<4) + (3<<6)))); } break; case 3: { rtx op1 = simplify_gen_subreg (V4SFmode, operands[1], SFmode, 0); rtx tmp = gen_reg_rtx (V4SFmode); emit_move_insn (tmp, operands[0]); emit_insn (gen_sse_movss (tmp, tmp, op1)); emit_insn (gen_sse_shufps (operands[0], operands[0], tmp, GEN_INT (0 + (1<<2) + (2<<4) + (0<<6)))); } break; default: abort (); } DONE; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit (operand0); emit (operand1); emit (operand2); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4869 */ rtx gen_vec_extractv4sf (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; { switch (INTVAL (operands[2])) { case 0: emit_move_insn (operands[0], gen_lowpart (SFmode, operands[1])); break; case 1: { rtx op0 = simplify_gen_subreg (V4SFmode, operands[1], SFmode, 0); rtx tmp = gen_reg_rtx (V4SFmode); emit_move_insn (tmp, operands[1]); emit_insn (gen_sse_shufps (op0, tmp, tmp, const1_rtx)); } case 2: { rtx op0 = simplify_gen_subreg (V4SFmode, operands[1], SFmode, 0); rtx tmp = gen_reg_rtx (V4SFmode); emit_move_insn (tmp, operands[1]); emit_insn (gen_sse_unpckhps (op0, tmp, tmp)); } case 3: { rtx op0 = simplify_gen_subreg (V4SFmode, operands[1], SFmode, 0); rtx tmp = gen_reg_rtx (V4SFmode); emit_move_insn (tmp, operands[1]); emit_insn (gen_sse_shufps (op0, tmp, tmp, GEN_INT (3))); } default: abort (); } DONE; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit (operand0); emit (operand1); emit (operand2); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4912 */ rtx gen_vec_initv4sf (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { ix86_expand_vector_init (operands[0], operands[1]); DONE; } operand0 = operands[0]; operand1 = operands[1]; } emit (operand0); emit (operand1); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4928 */ rtx gen_adddi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (PLUS, DImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (DImode, operand1, operand2))); emit_insn (gen_hard_reg_clobber (CCmode, 17)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:4944 */ extern rtx gen_split_1154 (rtx, rtx *); rtx gen_split_1154 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx _val = 0; start_sequence (); split_di (operands+0, 1, operands+0, operands+3); split_di (operands+1, 1, operands+1, operands+4); split_di (operands+2, 1, operands+2, operands+5); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, 17), gen_rtx_UNSPEC (CCmode, gen_rtvec (2, operand1, operand2), 27)), gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (SImode, copy_rtx (operand1), copy_rtx (operand2)))))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand3, gen_rtx_PLUS (SImode, gen_rtx_PLUS (SImode, gen_rtx_LTU (SImode, gen_rtx_REG (CCmode, 17), const0_rtx), operand4), operand5)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:5059 */ rtx gen_addsi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (PLUS, SImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (SImode, operand1, operand2)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:5103 */ extern rtx gen_split_1156 (rtx, rtx *); rtx gen_split_1156 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); { rtx pat; operands[0] = gen_lowpart (SImode, operands[0]); operands[1] = gen_lowpart (Pmode, operands[1]); operands[2] = gen_lowpart (Pmode, operands[2]); operands[3] = gen_lowpart (Pmode, operands[3]); pat = gen_rtx_PLUS (Pmode, gen_rtx_PLUS (Pmode, operands[1], operands[2]), operands[3]); if (Pmode != SImode) pat = gen_rtx_SUBREG (SImode, pat, 0); emit_insn (gen_rtx_SET (VOIDmode, operands[0], pat)); DONE; } emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:5134 */ extern rtx gen_split_1157 (rtx, rtx *); rtx gen_split_1157 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx _val = 0; start_sequence (); { operands[1] = gen_lowpart (Pmode, operands[1]); operands[2] = gen_lowpart (Pmode, operands[2]); operands[3] = gen_lowpart (Pmode, operands[3]); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_ZERO_EXTEND (DImode, gen_rtx_SUBREG (SImode, gen_rtx_PLUS (DImode, gen_rtx_PLUS (DImode, operand1, operand2), operand3), 0)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:5155 */ extern rtx gen_split_1158 (rtx, rtx *); rtx gen_split_1158 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); { rtx pat; operands[0] = gen_lowpart (SImode, operands[0]); operands[1] = gen_lowpart (Pmode, operands[1]); operands[3] = gen_lowpart (Pmode, operands[3]); pat = gen_rtx_PLUS (Pmode, gen_rtx_MULT (Pmode, operands[1], operands[2]), operands[3]); if (Pmode != SImode) pat = gen_rtx_SUBREG (SImode, pat, 0); emit_insn (gen_rtx_SET (VOIDmode, operands[0], pat)); DONE; } emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:5184 */ extern rtx gen_split_1159 (rtx, rtx *); rtx gen_split_1159 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx _val = 0; start_sequence (); { operands[1] = gen_lowpart (Pmode, operands[1]); operands[3] = gen_lowpart (Pmode, operands[3]); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_ZERO_EXTEND (DImode, gen_rtx_SUBREG (SImode, gen_rtx_PLUS (DImode, gen_rtx_MULT (DImode, operand1, operand2), operand3), 0)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:5204 */ extern rtx gen_split_1160 (rtx, rtx *); rtx gen_split_1160 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); { rtx pat; operands[0] = gen_lowpart (SImode, operands[0]); operands[1] = gen_lowpart (Pmode, operands[1]); operands[3] = gen_lowpart (Pmode, operands[3]); operands[4] = gen_lowpart (Pmode, operands[4]); pat = gen_rtx_PLUS (Pmode, gen_rtx_PLUS (Pmode, gen_rtx_MULT (Pmode, operands[1], operands[2]), operands[3]), operands[4]); if (Pmode != SImode) pat = gen_rtx_SUBREG (SImode, pat, 0); emit_insn (gen_rtx_SET (VOIDmode, operands[0], pat)); DONE; } emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:5237 */ extern rtx gen_split_1161 (rtx, rtx *); rtx gen_split_1161 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx operand4; rtx _val = 0; start_sequence (); { operands[1] = gen_lowpart (Pmode, operands[1]); operands[3] = gen_lowpart (Pmode, operands[3]); operands[4] = gen_lowpart (Pmode, operands[4]); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_ZERO_EXTEND (DImode, gen_rtx_SUBREG (SImode, gen_rtx_PLUS (DImode, gen_rtx_PLUS (DImode, gen_rtx_MULT (DImode, operand1, operand2), operand3), operand4), 0)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:5316 */ extern rtx gen_split_1162 (rtx, rtx *); rtx gen_split_1162 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (DImode, operand1, operand2))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:5582 */ extern rtx gen_split_1163 (rtx, rtx *); rtx gen_split_1163 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); { rtx pat; /* In -fPIC mode the constructs like (const (unspec [symbol_ref])) may confuse gen_lowpart. */ if (GET_MODE (operands[0]) != Pmode) { operands[1] = gen_lowpart (Pmode, operands[1]); operands[2] = gen_lowpart (Pmode, operands[2]); } operands[0] = gen_lowpart (SImode, operands[0]); pat = gen_rtx_PLUS (Pmode, operands[1], operands[2]); if (Pmode != SImode) pat = gen_rtx_SUBREG (SImode, pat, 0); emit_insn (gen_rtx_SET (VOIDmode, operands[0], pat)); DONE; } emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:5662 */ extern rtx gen_split_1164 (rtx, rtx *); rtx gen_split_1164 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); { operands[1] = gen_lowpart (Pmode, operands[1]); operands[2] = gen_lowpart (Pmode, operands[2]); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_ZERO_EXTEND (DImode, gen_rtx_SUBREG (SImode, gen_rtx_PLUS (DImode, operand1, operand2), 0)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:5950 */ rtx gen_addhi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (PLUS, HImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (HImode, operand1, operand2)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:6195 */ rtx gen_addqi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (PLUS, QImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (QImode, operand1, operand2)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:6572 */ rtx gen_addxf3 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (XFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:6579 */ rtx gen_adddf3 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (DFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:6586 */ rtx gen_addsf3 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (SFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:6597 */ rtx gen_subdi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (MINUS, DImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MINUS (DImode, operand1, operand2)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:6613 */ extern rtx gen_split_1171 (rtx, rtx *); rtx gen_split_1171 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx _val = 0; start_sequence (); split_di (operands+0, 1, operands+0, operands+3); split_di (operands+1, 1, operands+1, operands+4); split_di (operands+2, 1, operands+2, operands+5); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, 17), gen_rtx_COMPARE (CCmode, operand1, operand2)), gen_rtx_SET (VOIDmode, operand0, gen_rtx_MINUS (SImode, copy_rtx (operand1), copy_rtx (operand2)))))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand3, gen_rtx_MINUS (SImode, operand4, gen_rtx_PLUS (SImode, gen_rtx_LTU (SImode, gen_rtx_REG (CCmode, 17), const0_rtx), operand5))), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:6727 */ rtx gen_subsi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (MINUS, SImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MINUS (SImode, operand1, operand2)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:6812 */ rtx gen_subhi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (MINUS, HImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MINUS (HImode, operand1, operand2)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:6856 */ rtx gen_subqi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (MINUS, QImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MINUS (QImode, operand1, operand2)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:6913 */ rtx gen_subxf3 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_MINUS (XFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:6920 */ rtx gen_subdf3 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_MINUS (DFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:6927 */ rtx gen_subsf3 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_MINUS (SFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:6936 */ rtx gen_muldi3 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MULT (DImode, operand1, operand2)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:6968 */ rtx gen_mulsi3 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MULT (SImode, operand1, operand2)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:7024 */ rtx gen_mulhi3 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MULT (HImode, operand1, operand2)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:7052 */ rtx gen_mulqi3 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MULT (QImode, operand1, operand2)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:7076 */ rtx gen_umulqihi3 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MULT (HImode, gen_rtx_ZERO_EXTEND (HImode, operand1), gen_rtx_ZERO_EXTEND (HImode, operand2))), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:7102 */ rtx gen_mulqihi3 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MULT (HImode, gen_rtx_SIGN_EXTEND (HImode, operand1), gen_rtx_SIGN_EXTEND (HImode, operand2))), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:7126 */ rtx gen_umulditi3 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MULT (TImode, gen_rtx_ZERO_EXTEND (TImode, operand1), gen_rtx_ZERO_EXTEND (TImode, operand2))), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:7153 */ rtx gen_umulsidi3 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MULT (DImode, gen_rtx_ZERO_EXTEND (DImode, operand1), gen_rtx_ZERO_EXTEND (DImode, operand2))), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:7179 */ rtx gen_mulditi3 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MULT (TImode, gen_rtx_SIGN_EXTEND (TImode, operand1), gen_rtx_SIGN_EXTEND (TImode, operand2))), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:7205 */ rtx gen_mulsidi3 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MULT (DImode, gen_rtx_SIGN_EXTEND (DImode, operand1), gen_rtx_SIGN_EXTEND (DImode, operand2))), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:7231 */ rtx gen_umuldi3_highpart (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_TRUNCATE (DImode, gen_rtx_LSHIFTRT (TImode, gen_rtx_MULT (TImode, gen_rtx_ZERO_EXTEND (TImode, operand1), gen_rtx_ZERO_EXTEND (TImode, operand2)), const_int_rtx[MAX_SAVED_CONST_INT + (64)]))), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DImode)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:7267 */ rtx gen_umulsi3_highpart (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_TRUNCATE (SImode, gen_rtx_LSHIFTRT (DImode, gen_rtx_MULT (DImode, gen_rtx_ZERO_EXTEND (DImode, operand1), gen_rtx_ZERO_EXTEND (DImode, operand2)), const_int_rtx[MAX_SAVED_CONST_INT + (32)]))), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SImode)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:7324 */ rtx gen_smuldi3_highpart (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_TRUNCATE (DImode, gen_rtx_LSHIFTRT (TImode, gen_rtx_MULT (TImode, gen_rtx_SIGN_EXTEND (TImode, operand1), gen_rtx_SIGN_EXTEND (TImode, operand2)), const_int_rtx[MAX_SAVED_CONST_INT + (64)]))), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DImode)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:7359 */ rtx gen_smulsi3_highpart (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_TRUNCATE (SImode, gen_rtx_LSHIFTRT (DImode, gen_rtx_MULT (DImode, gen_rtx_SIGN_EXTEND (DImode, operand1), gen_rtx_SIGN_EXTEND (DImode, operand2)), const_int_rtx[MAX_SAVED_CONST_INT + (32)]))), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SImode)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:7416 */ rtx gen_mulxf3 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_MULT (XFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:7423 */ rtx gen_muldf3 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_MULT (DFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:7430 */ rtx gen_mulsf3 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_MULT (SFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:7461 */ rtx gen_divxf3 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_DIV (XFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:7468 */ rtx gen_divdf3 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_DIV (DFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:7475 */ rtx gen_divsf3 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_DIV (SFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:7484 */ rtx gen_divmoddi4 (rtx operand0, rtx operand1, rtx operand2, rtx operand3) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_DIV (DImode, operand1, operand2)), gen_rtx_SET (VOIDmode, operand3, gen_rtx_MOD (DImode, operand1, operand2)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:7532 */ extern rtx gen_split_1199 (rtx, rtx *); rtx gen_split_1199 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx operand4; rtx _val = 0; start_sequence (); { /* Avoid use of cltd in favor of a mov+shift. */ if (!TARGET_USE_CLTD && !optimize_size) { if (true_regnum (operands[1])) emit_move_insn (operands[0], operands[1]); else emit_move_insn (operands[3], operands[1]); operands[4] = operands[3]; } else { if (true_regnum (operands[1])) abort(); operands[4] = operands[1]; } } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand3, gen_rtx_ASHIFTRT (DImode, operand4, const_int_rtx[MAX_SAVED_CONST_INT + (63)])), gen_hard_reg_clobber (CCmode, 17)))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, gen_rtx_SET (VOIDmode, operand0, gen_rtx_DIV (DImode, gen_rtx_REG (DImode, 0), operand2)), gen_rtx_SET (VOIDmode, copy_rtx (operand3), gen_rtx_MOD (DImode, gen_rtx_REG (DImode, 0), copy_rtx (operand2))), gen_rtx_USE (VOIDmode, copy_rtx (operand3)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:7568 */ rtx gen_divmodsi4 (rtx operand0, rtx operand1, rtx operand2, rtx operand3) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_DIV (SImode, operand1, operand2)), gen_rtx_SET (VOIDmode, operand3, gen_rtx_MOD (SImode, operand1, operand2)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:7616 */ extern rtx gen_split_1201 (rtx, rtx *); rtx gen_split_1201 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx operand4; rtx _val = 0; start_sequence (); { /* Avoid use of cltd in favor of a mov+shift. */ if (!TARGET_USE_CLTD && !optimize_size) { if (true_regnum (operands[1])) emit_move_insn (operands[0], operands[1]); else emit_move_insn (operands[3], operands[1]); operands[4] = operands[3]; } else { if (true_regnum (operands[1])) abort(); operands[4] = operands[1]; } } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand3, gen_rtx_ASHIFTRT (SImode, operand4, const_int_rtx[MAX_SAVED_CONST_INT + (31)])), gen_hard_reg_clobber (CCmode, 17)))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, gen_rtx_SET (VOIDmode, operand0, gen_rtx_DIV (SImode, gen_rtx_REG (SImode, 0), operand2)), gen_rtx_SET (VOIDmode, copy_rtx (operand3), gen_rtx_MOD (SImode, gen_rtx_REG (SImode, 0), copy_rtx (operand2))), gen_rtx_USE (VOIDmode, copy_rtx (operand3)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:7690 */ extern rtx gen_split_1202 (rtx, rtx *); rtx gen_split_1202 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; emit_insn (gen_rtx_SET (VOIDmode, operand3, const0_rtx)); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UDIV (DImode, operand1, operand2)), gen_rtx_SET (VOIDmode, copy_rtx (operand3), gen_rtx_UMOD (DImode, copy_rtx (operand1), copy_rtx (operand2))), gen_rtx_USE (VOIDmode, copy_rtx (operand3)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:7733 */ extern rtx gen_split_1203 (rtx, rtx *); rtx gen_split_1203 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; emit_insn (gen_rtx_SET (VOIDmode, operand3, const0_rtx)); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UDIV (SImode, operand1, operand2)), gen_rtx_SET (VOIDmode, copy_rtx (operand3), gen_rtx_UMOD (SImode, copy_rtx (operand1), copy_rtx (operand2))), gen_rtx_USE (VOIDmode, copy_rtx (operand3)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:7750 */ rtx gen_udivmodhi4 (rtx operand0, rtx operand1, rtx operand2, rtx operand3) { rtx operand4; rtx _val = 0; start_sequence (); { rtx operands[5]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; operands[3] = operand3; operands[4] = gen_reg_rtx (HImode); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; } emit_insn (gen_rtx_SET (VOIDmode, operand4, const0_rtx)); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UDIV (HImode, operand1, operand2)), gen_rtx_SET (VOIDmode, operand3, gen_rtx_UMOD (HImode, operand1, operand2)), gen_rtx_USE (VOIDmode, operand4), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:7832 */ rtx gen_testsi_ccno_1 (rtx operand0, rtx operand1) { return gen_rtx_SET (VOIDmode, gen_rtx_REG (CCNOmode, 17), gen_rtx_COMPARE (CCNOmode, gen_rtx_AND (SImode, operand0, operand1), const0_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:7854 */ rtx gen_testqi_ccz_1 (rtx operand0, rtx operand1) { return gen_rtx_SET (VOIDmode, gen_rtx_REG (CCZmode, 17), gen_rtx_COMPARE (CCZmode, gen_rtx_AND (QImode, operand0, operand1), const0_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:7884 */ rtx gen_testqi_ext_ccno_0 (rtx operand0, rtx operand1) { return gen_rtx_SET (VOIDmode, gen_rtx_REG (CCNOmode, 17), gen_rtx_COMPARE (CCNOmode, gen_rtx_AND (SImode, gen_rtx_ZERO_EXTRACT (SImode, operand0, const_int_rtx[MAX_SAVED_CONST_INT + (8)], const_int_rtx[MAX_SAVED_CONST_INT + (8)]), operand1), const0_rtx)); } /* ../../gcc/gcc/config/i386/i386.md:8001 */ extern rtx gen_split_1208 (rtx, rtx *); rtx gen_split_1208 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx _val = 0; start_sequence (); { HOST_WIDE_INT len = INTVAL (operands[1]); HOST_WIDE_INT pos = INTVAL (operands[2]); HOST_WIDE_INT mask; enum machine_mode mode, submode; mode = GET_MODE (operands[0]); if (GET_CODE (operands[0]) == MEM) { /* ??? Combine likes to put non-volatile mem extractions in QImode no matter the size of the test. So find a mode that works. */ if (! MEM_VOLATILE_P (operands[0])) { mode = smallest_mode_for_size (pos + len, MODE_INT); operands[0] = adjust_address (operands[0], mode, 0); } } else if (GET_CODE (operands[0]) == SUBREG && (submode = GET_MODE (SUBREG_REG (operands[0])), GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (submode)) && pos + len <= GET_MODE_BITSIZE (submode)) { /* Narrow a paradoxical subreg to prevent partial register stalls. */ mode = submode; operands[0] = SUBREG_REG (operands[0]); } else if (mode == HImode && pos + len <= 8) { /* Small HImode tests can be converted to QImode. */ mode = QImode; operands[0] = gen_lowpart (QImode, operands[0]); } mask = ((HOST_WIDE_INT)1 << (pos + len)) - 1; mask &= ~(((HOST_WIDE_INT)1 << pos) - 1); operands[3] = gen_rtx_AND (mode, operands[0], gen_int_mode (mask, mode)); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (CCNOmode, 17), gen_rtx_COMPARE (CCNOmode, operand3, const0_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:8054 */ extern rtx gen_split_1209 (rtx, rtx *); rtx gen_split_1209 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operands[0] = gen_lowpart (SImode, operands[0]); operands[1] = gen_int_mode (INTVAL (operands[1]) >> 8, SImode); operand0 = operands[0]; operand1 = operands[1]; emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (CCNOmode, 17), gen_rtx_COMPARE (CCNOmode, gen_rtx_AND (SImode, gen_rtx_ZERO_EXTRACT (SImode, operand0, const_int_rtx[MAX_SAVED_CONST_INT + (8)], const_int_rtx[MAX_SAVED_CONST_INT + (8)]), operand1), const0_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:8075 */ extern rtx gen_split_1210 (rtx, rtx *); rtx gen_split_1210 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operands[0] = gen_lowpart (QImode, operands[0]); operands[1] = gen_lowpart (QImode, operands[1]); operand0 = operands[0]; operand1 = operands[1]; emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (CCNOmode, 17), gen_rtx_COMPARE (CCNOmode, gen_rtx_AND (QImode, operand0, operand1), const0_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:8101 */ rtx gen_anddi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (AND, DImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_AND (DImode, operand1, operand2))); emit_insn (gen_hard_reg_clobber (CCmode, 17)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:8167 */ rtx gen_andsi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (AND, SImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_AND (SImode, operand1, operand2))); emit_insn (gen_hard_reg_clobber (CCmode, 17)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:8214 */ extern rtx gen_split_1213 (rtx, rtx *); rtx gen_split_1213 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operands[1] = gen_lowpart (HImode, operands[0]); operand0 = operands[0]; operand1 = operands[1]; emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, operand1), const0_rtx)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:8223 */ extern rtx gen_split_1214 (rtx, rtx *); rtx gen_split_1214 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operands[1] = gen_lowpart (QImode, operands[0]); operand0 = operands[0]; operand1 = operands[1]; emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, operand1), const0_rtx)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:8232 */ extern rtx gen_split_1215 (rtx, rtx *); rtx gen_split_1215 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx _val = 0; start_sequence (); operands[0] = gen_lowpart (SImode, operands[0]); operand0 = operands[0]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_ZERO_EXTRACT (SImode, operand0, const_int_rtx[MAX_SAVED_CONST_INT + (8)], const_int_rtx[MAX_SAVED_CONST_INT + (8)]), gen_rtx_XOR (SImode, gen_rtx_ZERO_EXTRACT (SImode, copy_rtx (operand0), const_int_rtx[MAX_SAVED_CONST_INT + (8)], const_int_rtx[MAX_SAVED_CONST_INT + (8)]), gen_rtx_ZERO_EXTRACT (SImode, copy_rtx (operand0), const_int_rtx[MAX_SAVED_CONST_INT + (8)], const_int_rtx[MAX_SAVED_CONST_INT + (8)]))), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:8290 */ rtx gen_andhi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (AND, HImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_AND (HImode, operand1, operand2))); emit_insn (gen_hard_reg_clobber (CCmode, 17)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:8338 */ rtx gen_andqi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (AND, QImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_AND (QImode, operand1, operand2))); emit_insn (gen_hard_reg_clobber (CCmode, 17)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:8519 */ extern rtx gen_split_1218 (rtx, rtx *); rtx gen_split_1218 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operands[0] = gen_lowpart (SImode, operands[0]); operands[1] = gen_lowpart (SImode, operands[1]); operands[2] = gen_int_mode ((INTVAL (operands[2]) >> 8) & 0xff, SImode); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_ZERO_EXTRACT (SImode, operand0, const_int_rtx[MAX_SAVED_CONST_INT + (8)], const_int_rtx[MAX_SAVED_CONST_INT + (8)]), gen_rtx_AND (SImode, gen_rtx_ZERO_EXTRACT (SImode, operand1, const_int_rtx[MAX_SAVED_CONST_INT + (8)], const_int_rtx[MAX_SAVED_CONST_INT + (8)]), operand2)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:8540 */ extern rtx gen_split_1219 (rtx, rtx *); rtx gen_split_1219 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operands[0] = gen_lowpart (QImode, operands[0]); operands[1] = gen_lowpart (QImode, operands[1]); operands[2] = gen_lowpart (QImode, operands[2]); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, operand0), gen_rtx_AND (QImode, operand1, operand2)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:8564 */ rtx gen_iordi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (IOR, DImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_IOR (DImode, operand1, operand2))); emit_insn (gen_hard_reg_clobber (CCmode, 17)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:8611 */ rtx gen_iorsi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (IOR, SImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_IOR (SImode, operand1, operand2))); emit_insn (gen_hard_reg_clobber (CCmode, 17)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:8704 */ rtx gen_iorhi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (IOR, HImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_IOR (HImode, operand1, operand2))); emit_insn (gen_hard_reg_clobber (CCmode, 17)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:8747 */ rtx gen_iorqi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (IOR, QImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_IOR (QImode, operand1, operand2))); emit_insn (gen_hard_reg_clobber (CCmode, 17)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:8892 */ extern rtx gen_split_1224 (rtx, rtx *); rtx gen_split_1224 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operands[0] = gen_lowpart (SImode, operands[0]); operands[1] = gen_lowpart (SImode, operands[1]); operands[2] = gen_int_mode ((INTVAL (operands[2]) >> 8) & 0xff, SImode); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_ZERO_EXTRACT (SImode, operand0, const_int_rtx[MAX_SAVED_CONST_INT + (8)], const_int_rtx[MAX_SAVED_CONST_INT + (8)]), gen_rtx_IOR (SImode, gen_rtx_ZERO_EXTRACT (SImode, operand1, const_int_rtx[MAX_SAVED_CONST_INT + (8)], const_int_rtx[MAX_SAVED_CONST_INT + (8)]), operand2)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:8913 */ extern rtx gen_split_1225 (rtx, rtx *); rtx gen_split_1225 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operands[0] = gen_lowpart (QImode, operands[0]); operands[1] = gen_lowpart (QImode, operands[1]); operands[2] = gen_lowpart (QImode, operands[2]); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, operand0), gen_rtx_IOR (QImode, operand1, operand2)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:8937 */ rtx gen_xordi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (XOR, DImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_XOR (DImode, operand1, operand2))); emit_insn (gen_hard_reg_clobber (CCmode, 17)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:8987 */ rtx gen_xorsi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (XOR, SImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_XOR (SImode, operand1, operand2))); emit_insn (gen_hard_reg_clobber (CCmode, 17)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:9081 */ rtx gen_xorhi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (XOR, HImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_XOR (HImode, operand1, operand2))); emit_insn (gen_hard_reg_clobber (CCmode, 17)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:9124 */ rtx gen_xorqi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (XOR, QImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_XOR (QImode, operand1, operand2))); emit_insn (gen_hard_reg_clobber (CCmode, 17)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:9313 */ rtx gen_xorqi_cc_ext_1 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_REG (CCNOmode, 17), gen_rtx_COMPARE (CCNOmode, gen_rtx_XOR (SImode, gen_rtx_ZERO_EXTRACT (SImode, operand1, const_int_rtx[MAX_SAVED_CONST_INT + (8)], const_int_rtx[MAX_SAVED_CONST_INT + (8)]), operand2), const0_rtx)), gen_rtx_SET (VOIDmode, gen_rtx_ZERO_EXTRACT (SImode, operand0, const_int_rtx[MAX_SAVED_CONST_INT + (8)], const_int_rtx[MAX_SAVED_CONST_INT + (8)]), gen_rtx_XOR (SImode, gen_rtx_ZERO_EXTRACT (SImode, operand1, const_int_rtx[MAX_SAVED_CONST_INT + (8)], const_int_rtx[MAX_SAVED_CONST_INT + (8)]), operand2)))); } /* ../../gcc/gcc/config/i386/i386.md:9333 */ extern rtx gen_split_1231 (rtx, rtx *); rtx gen_split_1231 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operands[0] = gen_lowpart (SImode, operands[0]); operands[1] = gen_lowpart (SImode, operands[1]); operands[2] = gen_int_mode ((INTVAL (operands[2]) >> 8) & 0xff, SImode); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_ZERO_EXTRACT (SImode, operand0, const_int_rtx[MAX_SAVED_CONST_INT + (8)], const_int_rtx[MAX_SAVED_CONST_INT + (8)]), gen_rtx_XOR (SImode, gen_rtx_ZERO_EXTRACT (SImode, operand1, const_int_rtx[MAX_SAVED_CONST_INT + (8)], const_int_rtx[MAX_SAVED_CONST_INT + (8)]), operand2)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:9354 */ extern rtx gen_split_1232 (rtx, rtx *); rtx gen_split_1232 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operands[0] = gen_lowpart (QImode, operands[0]); operands[1] = gen_lowpart (QImode, operands[1]); operands[2] = gen_lowpart (QImode, operands[2]); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, operand0), gen_rtx_XOR (QImode, operand1, operand2)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:9375 */ rtx gen_negdi2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; ix86_expand_unary_operator (NEG, DImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_NEG (DImode, operand1)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:9390 */ extern rtx gen_split_1234 (rtx, rtx *); rtx gen_split_1234 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx _val = 0; start_sequence (); split_di (operands+1, 1, operands+2, operands+3); split_di (operands+0, 1, operands+0, operands+1); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_REG (CCZmode, 17), gen_rtx_COMPARE (CCZmode, gen_rtx_NEG (SImode, operand2), const0_rtx)), gen_rtx_SET (VOIDmode, operand0, gen_rtx_NEG (SImode, copy_rtx (operand2)))))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand1, gen_rtx_PLUS (SImode, gen_rtx_PLUS (SImode, gen_rtx_LTU (SImode, gen_rtx_REG (CCmode, 17), const0_rtx), operand3), const0_rtx)), gen_hard_reg_clobber (CCmode, 17)))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, copy_rtx (operand1), gen_rtx_NEG (SImode, copy_rtx (operand1))), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:9437 */ rtx gen_negsi2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; ix86_expand_unary_operator (NEG, SImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_NEG (SImode, operand1)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:9497 */ rtx gen_neghi2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; ix86_expand_unary_operator (NEG, HImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_NEG (HImode, operand1)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:9524 */ rtx gen_negqi2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; ix86_expand_unary_operator (NEG, QImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_NEG (QImode, operand1)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:9553 */ rtx gen_negsf2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; if (TARGET_SSE) { /* In case operand is in memory, we will not use SSE. */ if (memory_operand (operands[0], VOIDmode) && rtx_equal_p (operands[0], operands[1])) emit_insn (gen_negsf2_memory (operands[0], operands[1])); else { /* Using SSE is tricky, since we need bitwise negation of -0 in register. */ rtx reg = gen_reg_rtx (SFmode); rtx dest = operands[0]; rtx imm = gen_lowpart (SFmode, gen_int_mode (0x80000000, SImode)); operands[1] = force_reg (SFmode, operands[1]); operands[0] = force_reg (SFmode, operands[0]); reg = force_reg (V4SFmode, gen_rtx_CONST_VECTOR (V4SFmode, gen_rtvec (4, imm, CONST0_RTX (SFmode), CONST0_RTX (SFmode), CONST0_RTX (SFmode)))); emit_insn (gen_negsf2_ifs (operands[0], operands[1], reg)); if (dest != operands[0]) emit_move_insn (dest, operands[0]); } DONE; } ix86_expand_unary_operator (NEG, SFmode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_NEG (SFmode, operand1)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:9605 */ extern rtx gen_split_1239 (rtx, rtx *); rtx gen_split_1239 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_NEG (SFmode, operand1)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:9615 */ extern rtx gen_split_1240 (rtx, rtx *); rtx gen_split_1240 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_NEG (SFmode, operand1)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:9625 */ extern rtx gen_split_1241 (rtx, rtx *); rtx gen_split_1241 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); { operands[0] = simplify_gen_subreg (V4SFmode, operands[0], SFmode, 0); operands[1] = simplify_gen_subreg (V4SFmode, operands[1], SFmode, 0); if (operands_match_p (operands[0], operands[2])) { rtx tmp; tmp = operands[1]; operands[1] = operands[2]; operands[2] = tmp; } } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_XOR (V4SFmode, operand1, operand2))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:9658 */ extern rtx gen_split_1242 (rtx, rtx *); rtx gen_split_1242 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_NEG (SFmode, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:9667 */ extern rtx gen_split_1243 (rtx, rtx *); rtx gen_split_1243 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operands[1] = gen_int_mode (0x80000000, SImode); operands[0] = gen_lowpart (SImode, operands[0]); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_XOR (SImode, copy_rtx (operand0), operand1)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:9677 */ extern rtx gen_split_1244 (rtx, rtx *); rtx gen_split_1244 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); { int size = GET_MODE_SIZE (GET_MODE (operands[1])); if (GET_MODE (operands[1]) == XFmode) size = 10; operands[0] = adjust_address (operands[0], QImode, size - 1); operands[1] = gen_int_mode (0x80, QImode); } operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_XOR (QImode, copy_rtx (operand0), operand1)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:9693 */ rtx gen_negdf2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; if (TARGET_SSE2) { /* In case operand is in memory, we will not use SSE. */ if (memory_operand (operands[0], VOIDmode) && rtx_equal_p (operands[0], operands[1])) emit_insn (gen_negdf2_memory (operands[0], operands[1])); else { /* Using SSE is tricky, since we need bitwise negation of -0 in register. */ rtx reg; #if HOST_BITS_PER_WIDE_INT >= 64 rtx imm = gen_int_mode (((HOST_WIDE_INT)1) << 63, DImode); #else rtx imm = immed_double_const (0, 0x80000000, DImode); #endif rtx dest = operands[0]; operands[1] = force_reg (DFmode, operands[1]); operands[0] = force_reg (DFmode, operands[0]); imm = gen_lowpart (DFmode, imm); reg = force_reg (V2DFmode, gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, imm, CONST0_RTX (DFmode)))); emit_insn (gen_negdf2_ifs (operands[0], operands[1], reg)); if (dest != operands[0]) emit_move_insn (dest, operands[0]); } DONE; } ix86_expand_unary_operator (NEG, DFmode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_NEG (DFmode, operand1)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:9759 */ extern rtx gen_split_1246 (rtx, rtx *); rtx gen_split_1246 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_NEG (DFmode, operand1)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:9769 */ extern rtx gen_split_1247 (rtx, rtx *); rtx gen_split_1247 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_NEG (DFmode, operand1)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:9780 */ extern rtx gen_split_1248 (rtx, rtx *); rtx gen_split_1248 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operands[0] = gen_lowpart (DImode, operands[0]); operands[1] = gen_lowpart (DImode, operands[1]); operands[2] = gen_lowpart (DImode, operands[2]); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_XOR (DImode, operand1, operand2)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:9793 */ extern rtx gen_split_1249 (rtx, rtx *); rtx gen_split_1249 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); { operands[0] = simplify_gen_subreg (V2DFmode, operands[0], DFmode, 0); operands[1] = simplify_gen_subreg (V2DFmode, operands[1], DFmode, 0); /* Avoid possible reformatting on the operands. */ if (TARGET_SSE_PARTIAL_REGS && !optimize_size) emit_insn (gen_sse2_unpcklpd (operands[0], operands[0], operands[0])); if (operands_match_p (operands[0], operands[2])) { rtx tmp; tmp = operands[1]; operands[1] = operands[2]; operands[2] = tmp; } } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_XOR (V2DFmode, operand1, operand2))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:9840 */ extern rtx gen_split_1250 (rtx, rtx *); rtx gen_split_1250 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_NEG (DFmode, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:9849 */ extern rtx gen_split_1251 (rtx, rtx *); rtx gen_split_1251 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx operand4; rtx _val = 0; start_sequence (); operands[4] = gen_int_mode (0x80000000, SImode); split_di (operands+0, 1, operands+2, operands+3); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand3, gen_rtx_XOR (SImode, copy_rtx (operand3), operand4)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:9859 */ rtx gen_negxf2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; ix86_expand_unary_operator (NEG, XFmode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_NEG (XFmode, operand1)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:9877 */ extern rtx gen_split_1253 (rtx, rtx *); rtx gen_split_1253 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_NEG (XFmode, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:9886 */ extern rtx gen_split_1254 (rtx, rtx *); rtx gen_split_1254 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operands[1] = GEN_INT (0x8000); operands[0] = gen_rtx_REG (SImode, true_regnum (operands[0]) + (TARGET_64BIT ? 1 : 2)); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_XOR (SImode, copy_rtx (operand0), operand1)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:9953 */ rtx gen_abssf2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; if (TARGET_SSE) { /* In case operand is in memory, we will not use SSE. */ if (memory_operand (operands[0], VOIDmode) && rtx_equal_p (operands[0], operands[1])) emit_insn (gen_abssf2_memory (operands[0], operands[1])); else { /* Using SSE is tricky, since we need bitwise negation of -0 in register. */ rtx reg = gen_reg_rtx (V4SFmode); rtx dest = operands[0]; rtx imm; operands[1] = force_reg (SFmode, operands[1]); operands[0] = force_reg (SFmode, operands[0]); imm = gen_lowpart (SFmode, gen_int_mode(~0x80000000, SImode)); reg = force_reg (V4SFmode, gen_rtx_CONST_VECTOR (V4SFmode, gen_rtvec (4, imm, CONST0_RTX (SFmode), CONST0_RTX (SFmode), CONST0_RTX (SFmode)))); emit_insn (gen_abssf2_ifs (operands[0], operands[1], reg)); if (dest != operands[0]) emit_move_insn (dest, operands[0]); } DONE; } ix86_expand_unary_operator (ABS, SFmode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_NEG (SFmode, operand1)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10006 */ extern rtx gen_split_1256 (rtx, rtx *); rtx gen_split_1256 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_ABS (SFmode, operand1)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10016 */ extern rtx gen_split_1257 (rtx, rtx *); rtx gen_split_1257 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_ABS (SFmode, operand1)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10026 */ extern rtx gen_split_1258 (rtx, rtx *); rtx gen_split_1258 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); { operands[0] = simplify_gen_subreg (V4SFmode, operands[0], SFmode, 0); operands[1] = simplify_gen_subreg (V4SFmode, operands[1], SFmode, 0); if (operands_match_p (operands[0], operands[2])) { rtx tmp; tmp = operands[1]; operands[1] = operands[2]; operands[2] = tmp; } } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_AND (V4SFmode, operand1, operand2))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10057 */ extern rtx gen_split_1259 (rtx, rtx *); rtx gen_split_1259 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_ABS (SFmode, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10066 */ extern rtx gen_split_1260 (rtx, rtx *); rtx gen_split_1260 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operands[1] = gen_int_mode (~0x80000000, SImode); operands[0] = gen_lowpart (SImode, operands[0]); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_AND (SImode, copy_rtx (operand0), operand1)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10076 */ extern rtx gen_split_1261 (rtx, rtx *); rtx gen_split_1261 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); { int size = GET_MODE_SIZE (GET_MODE (operands[1])); if (GET_MODE (operands[1]) == XFmode) size = 10; operands[0] = adjust_address (operands[0], QImode, size - 1); operands[1] = gen_int_mode (~0x80, QImode); } operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_AND (QImode, copy_rtx (operand0), operand1)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10092 */ rtx gen_absdf2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; if (TARGET_SSE2) { /* In case operand is in memory, we will not use SSE. */ if (memory_operand (operands[0], VOIDmode) && rtx_equal_p (operands[0], operands[1])) emit_insn (gen_absdf2_memory (operands[0], operands[1])); else { /* Using SSE is tricky, since we need bitwise negation of -0 in register. */ rtx reg = gen_reg_rtx (V2DFmode); #if HOST_BITS_PER_WIDE_INT >= 64 rtx imm = gen_int_mode (~(((HOST_WIDE_INT)1) << 63), DImode); #else rtx imm = immed_double_const (~0, ~0x80000000, DImode); #endif rtx dest = operands[0]; operands[1] = force_reg (DFmode, operands[1]); operands[0] = force_reg (DFmode, operands[0]); /* Produce LONG_DOUBLE with the proper immediate argument. */ imm = gen_lowpart (DFmode, imm); reg = force_reg (V2DFmode, gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, imm, CONST0_RTX (DFmode)))); emit_insn (gen_absdf2_ifs (operands[0], operands[1], reg)); if (dest != operands[0]) emit_move_insn (dest, operands[0]); } DONE; } ix86_expand_unary_operator (ABS, DFmode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_NEG (DFmode, operand1)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10160 */ extern rtx gen_split_1263 (rtx, rtx *); rtx gen_split_1263 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_ABS (DFmode, operand1)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10170 */ extern rtx gen_split_1264 (rtx, rtx *); rtx gen_split_1264 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_ABS (DFmode, operand1)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10180 */ extern rtx gen_split_1265 (rtx, rtx *); rtx gen_split_1265 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); { operands[0] = simplify_gen_subreg (V2DFmode, operands[0], DFmode, 0); operands[1] = simplify_gen_subreg (V2DFmode, operands[1], DFmode, 0); /* Avoid possible reformatting on the operands. */ if (TARGET_SSE_PARTIAL_REGS && !optimize_size) emit_insn (gen_sse2_unpcklpd (operands[0], operands[0], operands[0])); if (operands_match_p (operands[0], operands[2])) { rtx tmp; tmp = operands[1]; operands[1] = operands[2]; operands[2] = tmp; } } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_AND (V2DFmode, operand1, operand2))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10228 */ extern rtx gen_split_1266 (rtx, rtx *); rtx gen_split_1266 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_ABS (DFmode, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10237 */ extern rtx gen_split_1267 (rtx, rtx *); rtx gen_split_1267 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx operand4; rtx _val = 0; start_sequence (); operands[4] = gen_int_mode (~0x80000000, SImode); split_di (operands+0, 1, operands+2, operands+3); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand3, gen_rtx_AND (SImode, copy_rtx (operand3), operand4)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10247 */ rtx gen_absxf2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; ix86_expand_unary_operator (ABS, XFmode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_NEG (XFmode, operand1)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10265 */ extern rtx gen_split_1269 (rtx, rtx *); rtx gen_split_1269 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_ABS (XFmode, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10274 */ extern rtx gen_split_1270 (rtx, rtx *); rtx gen_split_1270 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operands[1] = GEN_INT (~0x8000); operands[0] = gen_rtx_REG (SImode, true_regnum (operands[0]) + (TARGET_64BIT ? 1 : 2)); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_AND (SImode, copy_rtx (operand0), operand1)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10338 */ rtx gen_one_cmpldi2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; ix86_expand_unary_operator (NOT, DImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_NOT (DImode, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10364 */ extern rtx gen_split_1272 (rtx, rtx *); rtx gen_split_1272 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_REG (CCNOmode, 17), gen_rtx_COMPARE (CCNOmode, gen_rtx_XOR (DImode, operand1, constm1_rtx), const0_rtx)), gen_rtx_SET (VOIDmode, operand0, gen_rtx_XOR (DImode, copy_rtx (operand1), constm1_rtx))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10378 */ rtx gen_one_cmplsi2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; ix86_expand_unary_operator (NOT, SImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_NOT (SImode, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10413 */ extern rtx gen_split_1274 (rtx, rtx *); rtx gen_split_1274 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_REG (CCNOmode, 17), gen_rtx_COMPARE (CCNOmode, gen_rtx_XOR (SImode, operand1, constm1_rtx), const0_rtx)), gen_rtx_SET (VOIDmode, operand0, gen_rtx_XOR (SImode, copy_rtx (operand1), constm1_rtx))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10440 */ extern rtx gen_split_1275 (rtx, rtx *); rtx gen_split_1275 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_REG (CCNOmode, 17), gen_rtx_COMPARE (CCNOmode, gen_rtx_XOR (SImode, operand1, constm1_rtx), const0_rtx)), gen_rtx_SET (VOIDmode, operand0, gen_rtx_ZERO_EXTEND (DImode, gen_rtx_XOR (SImode, copy_rtx (operand1), constm1_rtx)))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10454 */ rtx gen_one_cmplhi2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; ix86_expand_unary_operator (NOT, HImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_NOT (HImode, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10480 */ extern rtx gen_split_1277 (rtx, rtx *); rtx gen_split_1277 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_REG (CCNOmode, 17), gen_rtx_COMPARE (CCNOmode, gen_rtx_XOR (HImode, operand1, constm1_rtx), const0_rtx)), gen_rtx_SET (VOIDmode, operand0, gen_rtx_XOR (HImode, copy_rtx (operand1), constm1_rtx))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10495 */ rtx gen_one_cmplqi2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; ix86_expand_unary_operator (NOT, QImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_NOT (QImode, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10523 */ extern rtx gen_split_1279 (rtx, rtx *); rtx gen_split_1279 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_REG (CCNOmode, 17), gen_rtx_COMPARE (CCNOmode, gen_rtx_XOR (QImode, operand1, constm1_rtx), const0_rtx)), gen_rtx_SET (VOIDmode, operand0, gen_rtx_XOR (QImode, copy_rtx (operand1), constm1_rtx))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10561 */ rtx gen_ashldi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; { if (!TARGET_64BIT && TARGET_CMOVE && ! immediate_operand (operands[2], QImode)) { emit_insn (gen_ashldi3_1 (operands[0], operands[1], operands[2])); DONE; } ix86_expand_binary_operator (ASHIFT, DImode, operands); DONE; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFT (DImode, operand1, operand2)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10624 */ extern rtx gen_split_1281 (rtx, rtx *); rtx gen_split_1281 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operands[2] = gen_int_mode (1 << INTVAL (operands[2]), DImode); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_MULT (DImode, operand1, operand2))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10696 */ extern rtx gen_split_1282 (rtx, rtx *); rtx gen_split_1282 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); ix86_split_ashldi (operands, operands[3]); DONE; emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10706 */ extern rtx gen_split_1283 (rtx, rtx *); rtx gen_split_1283 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); ix86_split_ashldi (operands, NULL_RTX); DONE; emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10732 */ rtx gen_x86_shift_adj_1 (rtx operand0, rtx operand1, rtx operand2, rtx operand3) { rtx _val = 0; start_sequence (); emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (CCZmode, 17), gen_rtx_COMPARE (CCZmode, gen_rtx_AND (QImode, operand2, const_int_rtx[MAX_SAVED_CONST_INT + (32)]), const0_rtx))); emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_IF_THEN_ELSE (SImode, gen_rtx_NE (VOIDmode, gen_rtx_REG (CCZmode, 17), const0_rtx), operand1, operand0))); emit_insn (gen_rtx_SET (VOIDmode, operand1, gen_rtx_IF_THEN_ELSE (SImode, gen_rtx_NE (VOIDmode, gen_rtx_REG (CCZmode, 17), const0_rtx), operand3, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10748 */ rtx gen_x86_shift_adj_2 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; { rtx label = gen_label_rtx (); rtx tmp; emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (32))); tmp = gen_rtx_REG (CCZmode, FLAGS_REG); tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx); tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp, gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx); tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp)); JUMP_LABEL (tmp) = label; emit_move_insn (operands[0], operands[1]); emit_move_insn (operands[1], const0_rtx); emit_label (label); LABEL_NUSES (label) = 1; DONE; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_USE (VOIDmode, operand0)); emit_insn (gen_rtx_USE (VOIDmode, operand1)); emit_insn (gen_rtx_USE (VOIDmode, operand2)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10776 */ rtx gen_ashlsi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (ASHIFT, SImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFT (SImode, operand1, operand2))); emit_insn (gen_hard_reg_clobber (CCmode, 17)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10826 */ extern rtx gen_split_1287 (rtx, rtx *); rtx gen_split_1287 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); { rtx pat; operands[0] = gen_lowpart (SImode, operands[0]); operands[1] = gen_lowpart (Pmode, operands[1]); operands[2] = gen_int_mode (1 << INTVAL (operands[2]), Pmode); pat = gen_rtx_MULT (Pmode, operands[1], operands[2]); if (Pmode != SImode) pat = gen_rtx_SUBREG (SImode, pat, 0); emit_insn (gen_rtx_SET (VOIDmode, operands[0], pat)); DONE; } emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10847 */ extern rtx gen_split_1288 (rtx, rtx *); rtx gen_split_1288 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); { rtx pat, clob; emit_move_insn (operands[1], operands[0]); pat = gen_rtx_SET (VOIDmode, operands[0], gen_rtx_ASHIFT (GET_MODE (operands[0]), operands[0], operands[2])); clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG)); emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, pat, clob))); DONE; } emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10905 */ extern rtx gen_split_1289 (rtx, rtx *); rtx gen_split_1289 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); { operands[1] = gen_lowpart (Pmode, operands[1]); operands[2] = gen_int_mode (1 << INTVAL (operands[2]), Pmode); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_ZERO_EXTEND (DImode, gen_rtx_SUBREG (SImode, gen_rtx_MULT (SImode, operand1, operand2), 0)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:10998 */ rtx gen_ashlhi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (ASHIFT, HImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFT (HImode, operand1, operand2))); emit_insn (gen_hard_reg_clobber (CCmode, 17)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:11121 */ rtx gen_ashlqi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (ASHIFT, QImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFT (QImode, operand1, operand2))); emit_insn (gen_hard_reg_clobber (CCmode, 17)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:11284 */ rtx gen_ashrdi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; { if (!TARGET_64BIT && TARGET_CMOVE && ! immediate_operand (operands[2], QImode)) { emit_insn (gen_ashrdi3_1 (operands[0], operands[1], operands[2])); DONE; } ix86_expand_binary_operator (ASHIFTRT, DImode, operands); DONE; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFTRT (DImode, operand1, operand2)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:11400 */ extern rtx gen_split_1293 (rtx, rtx *); rtx gen_split_1293 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); ix86_split_ashrdi (operands, operands[3]); DONE; emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:11410 */ extern rtx gen_split_1294 (rtx, rtx *); rtx gen_split_1294 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); ix86_split_ashrdi (operands, NULL_RTX); DONE; emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:11435 */ rtx gen_x86_shift_adj_3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; { rtx label = gen_label_rtx (); rtx tmp; emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (32))); tmp = gen_rtx_REG (CCZmode, FLAGS_REG); tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx); tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp, gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx); tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp)); JUMP_LABEL (tmp) = label; emit_move_insn (operands[0], operands[1]); emit_insn (gen_ashrsi3_31 (operands[1], operands[1], GEN_INT (31))); emit_label (label); LABEL_NUSES (label) = 1; DONE; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_USE (VOIDmode, operand0)); emit_insn (gen_rtx_USE (VOIDmode, operand1)); emit_insn (gen_rtx_USE (VOIDmode, operand2)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:11496 */ rtx gen_ashrsi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (ASHIFTRT, SImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFTRT (SImode, operand1, operand2))); emit_insn (gen_hard_reg_clobber (CCmode, 17)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:11620 */ rtx gen_ashrhi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (ASHIFTRT, HImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFTRT (HImode, operand1, operand2))); emit_insn (gen_hard_reg_clobber (CCmode, 17)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:11692 */ rtx gen_ashrqi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (ASHIFTRT, QImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFTRT (QImode, operand1, operand2))); emit_insn (gen_hard_reg_clobber (CCmode, 17)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:11796 */ rtx gen_lshrdi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; { if (!TARGET_64BIT && TARGET_CMOVE && ! immediate_operand (operands[2], QImode)) { emit_insn (gen_lshrdi3_1 (operands[0], operands[1], operands[2])); DONE; } ix86_expand_binary_operator (LSHIFTRT, DImode, operands); DONE; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_LSHIFTRT (DImode, operand1, operand2)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:11895 */ extern rtx gen_split_1300 (rtx, rtx *); rtx gen_split_1300 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); ix86_split_lshrdi (operands, operands[3]); DONE; emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:11905 */ extern rtx gen_split_1301 (rtx, rtx *); rtx gen_split_1301 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); ix86_split_lshrdi (operands, NULL_RTX); DONE; emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:11914 */ rtx gen_lshrsi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (LSHIFTRT, SImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_LSHIFTRT (SImode, operand1, operand2))); emit_insn (gen_hard_reg_clobber (CCmode, 17)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12039 */ rtx gen_lshrhi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (LSHIFTRT, HImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_LSHIFTRT (HImode, operand1, operand2))); emit_insn (gen_hard_reg_clobber (CCmode, 17)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12111 */ rtx gen_lshrqi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (LSHIFTRT, QImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_LSHIFTRT (QImode, operand1, operand2))); emit_insn (gen_hard_reg_clobber (CCmode, 17)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12212 */ rtx gen_rotldi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (ROTATE, DImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_ROTATE (DImode, operand1, operand2))); emit_insn (gen_hard_reg_clobber (CCmode, 17)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12246 */ rtx gen_rotlsi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (ROTATE, SImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_ROTATE (SImode, operand1, operand2))); emit_insn (gen_hard_reg_clobber (CCmode, 17)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12305 */ rtx gen_rotlhi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (ROTATE, HImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_ROTATE (HImode, operand1, operand2))); emit_insn (gen_hard_reg_clobber (CCmode, 17)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12339 */ rtx gen_rotlqi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (ROTATE, QImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_ROTATE (QImode, operand1, operand2))); emit_insn (gen_hard_reg_clobber (CCmode, 17)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12400 */ rtx gen_rotrdi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (ROTATERT, DImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_ROTATERT (DImode, operand1, operand2))); emit_insn (gen_hard_reg_clobber (CCmode, 17)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12434 */ rtx gen_rotrsi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (ROTATERT, SImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_ROTATERT (SImode, operand1, operand2))); emit_insn (gen_hard_reg_clobber (CCmode, 17)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12496 */ rtx gen_rotrhi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (ROTATERT, HImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_ROTATERT (HImode, operand1, operand2))); emit_insn (gen_hard_reg_clobber (CCmode, 17)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12530 */ rtx gen_rotrqi3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; ix86_expand_binary_operator (ROTATERT, QImode, operands); DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_ROTATERT (QImode, operand1, operand2))); emit_insn (gen_hard_reg_clobber (CCmode, 17)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12593 */ rtx gen_extv (rtx operand0, rtx operand1, rtx operand2, rtx operand3) { rtx _val = 0; start_sequence (); { rtx operands[4]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; operands[3] = operand3; { /* Handle extractions from %ah et al. */ if (INTVAL (operands[2]) != 8 || INTVAL (operands[3]) != 8) FAIL; /* From mips.md: extract_bit_field doesn't verify that our source matches the predicate, so check it again here. */ if (! register_operand (operands[1], VOIDmode)) FAIL; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_SIGN_EXTRACT (SImode, operand1, operand2, operand3))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12610 */ rtx gen_extzv (rtx operand0, rtx operand1, rtx operand2, rtx operand3) { rtx _val = 0; start_sequence (); { rtx operands[4]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; operands[3] = operand3; { /* Handle extractions from %ah et al. */ if (INTVAL (operands[2]) != 8 || INTVAL (operands[3]) != 8) FAIL; /* From mips.md: extract_bit_field doesn't verify that our source matches the predicate, so check it again here. */ if (! register_operand (operands[1], VOIDmode)) FAIL; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_ZERO_EXTRACT (SImode, operand1, operand2, operand3))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12627 */ rtx gen_insv (rtx operand0, rtx operand1, rtx operand2, rtx operand3) { rtx _val = 0; start_sequence (); { rtx operands[4]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; operands[3] = operand3; { /* Handle extractions from %ah et al. */ if (INTVAL (operands[1]) != 8 || INTVAL (operands[2]) != 8) FAIL; /* From mips.md: insert_bit_field doesn't verify that our source matches the predicate, so check it again here. */ if (! register_operand (operands[0], VOIDmode)) FAIL; if (TARGET_64BIT) emit_insn (gen_movdi_insv_1_rex64 (operands[0], operands[3])); else emit_insn (gen_movsi_insv_1 (operands[0], operands[3])); DONE; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; } emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_ZERO_EXTRACT (VOIDmode, operand0, operand1, operand2), operand3)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12662 */ rtx gen_seq (rtx operand0) { rtx _val = 0; start_sequence (); { rtx operands[1]; operands[0] = operand0; if (ix86_expand_setcc (EQ, operands[0])) DONE; else FAIL; operand0 = operands[0]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_EQ (QImode, gen_rtx_REG (CCmode, 17), const0_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12668 */ rtx gen_sne (rtx operand0) { rtx _val = 0; start_sequence (); { rtx operands[1]; operands[0] = operand0; if (ix86_expand_setcc (NE, operands[0])) DONE; else FAIL; operand0 = operands[0]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_NE (QImode, gen_rtx_REG (CCmode, 17), const0_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12674 */ rtx gen_sgt (rtx operand0) { rtx _val = 0; start_sequence (); { rtx operands[1]; operands[0] = operand0; if (ix86_expand_setcc (GT, operands[0])) DONE; else FAIL; operand0 = operands[0]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_GT (QImode, gen_rtx_REG (CCmode, 17), const0_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12680 */ rtx gen_sgtu (rtx operand0) { rtx _val = 0; start_sequence (); { rtx operands[1]; operands[0] = operand0; if (ix86_expand_setcc (GTU, operands[0])) DONE; else FAIL; operand0 = operands[0]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_GTU (QImode, gen_rtx_REG (CCmode, 17), const0_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12686 */ rtx gen_slt (rtx operand0) { rtx _val = 0; start_sequence (); { rtx operands[1]; operands[0] = operand0; if (ix86_expand_setcc (LT, operands[0])) DONE; else FAIL; operand0 = operands[0]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_LT (QImode, gen_rtx_REG (CCmode, 17), const0_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12692 */ rtx gen_sltu (rtx operand0) { rtx _val = 0; start_sequence (); { rtx operands[1]; operands[0] = operand0; if (ix86_expand_setcc (LTU, operands[0])) DONE; else FAIL; operand0 = operands[0]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_LTU (QImode, gen_rtx_REG (CCmode, 17), const0_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12698 */ rtx gen_sge (rtx operand0) { rtx _val = 0; start_sequence (); { rtx operands[1]; operands[0] = operand0; if (ix86_expand_setcc (GE, operands[0])) DONE; else FAIL; operand0 = operands[0]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_GE (QImode, gen_rtx_REG (CCmode, 17), const0_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12704 */ rtx gen_sgeu (rtx operand0) { rtx _val = 0; start_sequence (); { rtx operands[1]; operands[0] = operand0; if (ix86_expand_setcc (GEU, operands[0])) DONE; else FAIL; operand0 = operands[0]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_GEU (QImode, gen_rtx_REG (CCmode, 17), const0_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12710 */ rtx gen_sle (rtx operand0) { rtx _val = 0; start_sequence (); { rtx operands[1]; operands[0] = operand0; if (ix86_expand_setcc (LE, operands[0])) DONE; else FAIL; operand0 = operands[0]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_LE (QImode, gen_rtx_REG (CCmode, 17), const0_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12716 */ rtx gen_sleu (rtx operand0) { rtx _val = 0; start_sequence (); { rtx operands[1]; operands[0] = operand0; if (ix86_expand_setcc (LEU, operands[0])) DONE; else FAIL; operand0 = operands[0]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_LEU (QImode, gen_rtx_REG (CCmode, 17), const0_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12722 */ rtx gen_sunordered (rtx operand0) { rtx _val = 0; start_sequence (); { rtx operands[1]; operands[0] = operand0; if (ix86_expand_setcc (UNORDERED, operands[0])) DONE; else FAIL; operand0 = operands[0]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNORDERED (QImode, gen_rtx_REG (CCmode, 17), const0_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12728 */ rtx gen_sordered (rtx operand0) { rtx _val = 0; start_sequence (); { rtx operands[1]; operands[0] = operand0; if (ix86_expand_setcc (ORDERED, operands[0])) DONE; else FAIL; operand0 = operands[0]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_ORDERED (QImode, gen_rtx_REG (CCmode, 17), const0_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12734 */ rtx gen_suneq (rtx operand0) { rtx _val = 0; start_sequence (); { rtx operands[1]; operands[0] = operand0; if (ix86_expand_setcc (UNEQ, operands[0])) DONE; else FAIL; operand0 = operands[0]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNEQ (QImode, gen_rtx_REG (CCmode, 17), const0_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12740 */ rtx gen_sunge (rtx operand0) { rtx _val = 0; start_sequence (); { rtx operands[1]; operands[0] = operand0; if (ix86_expand_setcc (UNGE, operands[0])) DONE; else FAIL; operand0 = operands[0]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNGE (QImode, gen_rtx_REG (CCmode, 17), const0_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12746 */ rtx gen_sungt (rtx operand0) { rtx _val = 0; start_sequence (); { rtx operands[1]; operands[0] = operand0; if (ix86_expand_setcc (UNGT, operands[0])) DONE; else FAIL; operand0 = operands[0]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNGT (QImode, gen_rtx_REG (CCmode, 17), const0_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12752 */ rtx gen_sunle (rtx operand0) { rtx _val = 0; start_sequence (); { rtx operands[1]; operands[0] = operand0; if (ix86_expand_setcc (UNLE, operands[0])) DONE; else FAIL; operand0 = operands[0]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNLE (QImode, gen_rtx_REG (CCmode, 17), const0_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12758 */ rtx gen_sunlt (rtx operand0) { rtx _val = 0; start_sequence (); { rtx operands[1]; operands[0] = operand0; if (ix86_expand_setcc (UNLT, operands[0])) DONE; else FAIL; operand0 = operands[0]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNLT (QImode, gen_rtx_REG (CCmode, 17), const0_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12764 */ rtx gen_sltgt (rtx operand0) { rtx _val = 0; start_sequence (); { rtx operands[1]; operands[0] = operand0; if (ix86_expand_setcc (LTGT, operands[0])) DONE; else FAIL; operand0 = operands[0]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_LTGT (QImode, gen_rtx_REG (CCmode, 17), const0_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12796 */ extern rtx gen_split_1334 (rtx, rtx *); rtx gen_split_1334 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); { PUT_MODE (operands[1], QImode); } operand0 = operands[0]; operand1 = operands[1]; emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12807 */ extern rtx gen_split_1335 (rtx, rtx *); rtx gen_split_1335 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); { PUT_MODE (operands[1], QImode); } operand0 = operands[0]; operand1 = operands[1]; emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12818 */ extern rtx gen_split_1336 (rtx, rtx *); rtx gen_split_1336 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); { rtx new_op1 = copy_rtx (operands[1]); operands[1] = new_op1; PUT_MODE (new_op1, QImode); PUT_CODE (new_op1, ix86_reverse_condition (GET_CODE (new_op1), GET_MODE (XEXP (new_op1, 0)))); /* Make sure that (a) the CCmode we have for the flags is strong enough for the reversed compare or (b) we have a valid FP compare. */ if (! ix86_comparison_operator (new_op1, VOIDmode)) FAIL; } operand0 = operands[0]; operand1 = operands[1]; emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12838 */ extern rtx gen_split_1337 (rtx, rtx *); rtx gen_split_1337 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); { rtx new_op1 = copy_rtx (operands[1]); operands[1] = new_op1; PUT_MODE (new_op1, QImode); PUT_CODE (new_op1, ix86_reverse_condition (GET_CODE (new_op1), GET_MODE (XEXP (new_op1, 0)))); /* Make sure that (a) the CCmode we have for the flags is strong enough for the reversed compare or (b) we have a valid FP compare. */ if (! ix86_comparison_operator (new_op1, VOIDmode)) FAIL; } operand0 = operands[0]; operand1 = operands[1]; emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12892 */ rtx gen_beq (rtx operand0) { rtx operand1; rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; ix86_expand_branch (EQ, operands[0]); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, gen_rtx_IF_THEN_ELSE (VOIDmode, operand1, gen_rtx_LABEL_REF (VOIDmode, operand0), pc_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12900 */ rtx gen_bne (rtx operand0) { rtx operand1; rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; ix86_expand_branch (NE, operands[0]); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, gen_rtx_IF_THEN_ELSE (VOIDmode, operand1, gen_rtx_LABEL_REF (VOIDmode, operand0), pc_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12908 */ rtx gen_bgt (rtx operand0) { rtx operand1; rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; ix86_expand_branch (GT, operands[0]); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, gen_rtx_IF_THEN_ELSE (VOIDmode, operand1, gen_rtx_LABEL_REF (VOIDmode, operand0), pc_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12916 */ rtx gen_bgtu (rtx operand0) { rtx operand1; rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; ix86_expand_branch (GTU, operands[0]); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, gen_rtx_IF_THEN_ELSE (VOIDmode, operand1, gen_rtx_LABEL_REF (VOIDmode, operand0), pc_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12924 */ rtx gen_blt (rtx operand0) { rtx operand1; rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; ix86_expand_branch (LT, operands[0]); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, gen_rtx_IF_THEN_ELSE (VOIDmode, operand1, gen_rtx_LABEL_REF (VOIDmode, operand0), pc_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12932 */ rtx gen_bltu (rtx operand0) { rtx operand1; rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; ix86_expand_branch (LTU, operands[0]); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, gen_rtx_IF_THEN_ELSE (VOIDmode, operand1, gen_rtx_LABEL_REF (VOIDmode, operand0), pc_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12940 */ rtx gen_bge (rtx operand0) { rtx operand1; rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; ix86_expand_branch (GE, operands[0]); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, gen_rtx_IF_THEN_ELSE (VOIDmode, operand1, gen_rtx_LABEL_REF (VOIDmode, operand0), pc_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12948 */ rtx gen_bgeu (rtx operand0) { rtx operand1; rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; ix86_expand_branch (GEU, operands[0]); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, gen_rtx_IF_THEN_ELSE (VOIDmode, operand1, gen_rtx_LABEL_REF (VOIDmode, operand0), pc_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12956 */ rtx gen_ble (rtx operand0) { rtx operand1; rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; ix86_expand_branch (LE, operands[0]); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, gen_rtx_IF_THEN_ELSE (VOIDmode, operand1, gen_rtx_LABEL_REF (VOIDmode, operand0), pc_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12964 */ rtx gen_bleu (rtx operand0) { rtx operand1; rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; ix86_expand_branch (LEU, operands[0]); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, gen_rtx_IF_THEN_ELSE (VOIDmode, operand1, gen_rtx_LABEL_REF (VOIDmode, operand0), pc_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12972 */ rtx gen_bunordered (rtx operand0) { rtx operand1; rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; ix86_expand_branch (UNORDERED, operands[0]); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, gen_rtx_IF_THEN_ELSE (VOIDmode, operand1, gen_rtx_LABEL_REF (VOIDmode, operand0), pc_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12980 */ rtx gen_bordered (rtx operand0) { rtx operand1; rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; ix86_expand_branch (ORDERED, operands[0]); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, gen_rtx_IF_THEN_ELSE (VOIDmode, operand1, gen_rtx_LABEL_REF (VOIDmode, operand0), pc_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12988 */ rtx gen_buneq (rtx operand0) { rtx operand1; rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; ix86_expand_branch (UNEQ, operands[0]); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, gen_rtx_IF_THEN_ELSE (VOIDmode, operand1, gen_rtx_LABEL_REF (VOIDmode, operand0), pc_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:12996 */ rtx gen_bunge (rtx operand0) { rtx operand1; rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; ix86_expand_branch (UNGE, operands[0]); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, gen_rtx_IF_THEN_ELSE (VOIDmode, operand1, gen_rtx_LABEL_REF (VOIDmode, operand0), pc_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:13004 */ rtx gen_bungt (rtx operand0) { rtx operand1; rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; ix86_expand_branch (UNGT, operands[0]); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, gen_rtx_IF_THEN_ELSE (VOIDmode, operand1, gen_rtx_LABEL_REF (VOIDmode, operand0), pc_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:13012 */ rtx gen_bunle (rtx operand0) { rtx operand1; rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; ix86_expand_branch (UNLE, operands[0]); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, gen_rtx_IF_THEN_ELSE (VOIDmode, operand1, gen_rtx_LABEL_REF (VOIDmode, operand0), pc_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:13020 */ rtx gen_bunlt (rtx operand0) { rtx operand1; rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; ix86_expand_branch (UNLT, operands[0]); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, gen_rtx_IF_THEN_ELSE (VOIDmode, operand1, gen_rtx_LABEL_REF (VOIDmode, operand0), pc_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:13028 */ rtx gen_bltgt (rtx operand0) { rtx operand1; rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; ix86_expand_branch (LTGT, operands[0]); DONE; operand0 = operands[0]; operand1 = operands[1]; } emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, gen_rtx_IF_THEN_ELSE (VOIDmode, operand1, gen_rtx_LABEL_REF (VOIDmode, operand0), pc_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:13080 */ extern rtx gen_split_1356 (rtx, rtx *); rtx gen_split_1356 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); { PUT_MODE (operands[0], VOIDmode); } operand0 = operands[0]; operand1 = operands[1]; emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, gen_rtx_IF_THEN_ELSE (VOIDmode, operand0, gen_rtx_LABEL_REF (VOIDmode, operand1), pc_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:13096 */ extern rtx gen_split_1357 (rtx, rtx *); rtx gen_split_1357 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); { rtx new_op0 = copy_rtx (operands[0]); operands[0] = new_op0; PUT_MODE (new_op0, VOIDmode); PUT_CODE (new_op0, ix86_reverse_condition (GET_CODE (new_op0), GET_MODE (XEXP (new_op0, 0)))); /* Make sure that (a) the CCmode we have for the flags is strong enough for the reversed compare or (b) we have a valid FP compare. */ if (! ix86_comparison_operator (new_op0, VOIDmode)) FAIL; } operand0 = operands[0]; operand1 = operands[1]; emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, gen_rtx_IF_THEN_ELSE (VOIDmode, operand0, gen_rtx_LABEL_REF (VOIDmode, operand1), pc_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:13285 */ extern rtx gen_split_1358 (rtx, rtx *); rtx gen_split_1358 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); { ix86_split_fp_branch (GET_CODE (operands[0]), operands[1], operands[2], operands[3], operands[4], NULL_RTX); DONE; } emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:13302 */ extern rtx gen_split_1359 (rtx, rtx *); rtx gen_split_1359 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx operand6; rtx _val = 0; start_sequence (); { ix86_split_fp_branch (GET_CODE (operands[0]), operands[1], operands[2], operands[3], operands[4], operands[5]); DONE; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; operand6 = operands[6]; emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, gen_rtx_IF_THEN_ELSE (VOIDmode, operand6, operand3, operand4))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:13340 */ rtx gen_indirect_jump (rtx operand0) { return gen_rtx_SET (VOIDmode, pc_rtx, operand0); } /* ../../gcc/gcc/config/i386/i386.md:13359 */ rtx gen_tablejump (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { /* In PIC mode, the table entries are stored GOT (32-bit) or PC (64-bit) relative. Convert the relative address to an absolute address. */ if (flag_pic) { rtx op0, op1; enum rtx_code code; if (TARGET_64BIT) { code = PLUS; op0 = operands[0]; op1 = gen_rtx_LABEL_REF (Pmode, operands[1]); } else if (TARGET_MACHO || HAVE_AS_GOTOFF_IN_DATA) { code = PLUS; op0 = operands[0]; op1 = pic_offset_table_rtx; } else { code = MINUS; op0 = pic_offset_table_rtx; op1 = operands[0]; } operands[0] = expand_simple_binop (Pmode, code, op0, op1, NULL_RTX, 0, OPTAB_DIRECT); } } operand0 = operands[0]; operand1 = operands[1]; } emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, pc_rtx, operand0), gen_rtx_USE (VOIDmode, gen_rtx_LABEL_REF (VOIDmode, operand1))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:13416 */ rtx gen_doloop_end (rtx operand0, rtx operand1, rtx operand2, rtx operand3, rtx operand4) { rtx _val = 0; start_sequence (); { rtx operands[5]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; operands[3] = operand3; operands[4] = operand4; { /* Only use cloop on innermost loops. */ if (INTVAL (operands[3]) > 1) FAIL; if (GET_MODE (operands[0]) != SImode) FAIL; emit_jump_insn (gen_doloop_end_internal (operands[4], operands[0], operands[0])); DONE; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; } emit_insn (gen_rtx_USE (VOIDmode, operand0)); emit_insn (gen_rtx_USE (VOIDmode, operand1)); emit_insn (gen_rtx_USE (VOIDmode, operand2)); emit_insn (gen_rtx_USE (VOIDmode, operand3)); emit_insn (gen_rtx_USE (VOIDmode, operand4)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:13469 */ extern rtx gen_split_1363 (rtx, rtx *); rtx gen_split_1363 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_REG (CCZmode, 17), gen_rtx_COMPARE (CCZmode, gen_rtx_PLUS (SImode, operand1, constm1_rtx), const0_rtx)), gen_rtx_SET (VOIDmode, copy_rtx (operand1), gen_rtx_PLUS (SImode, copy_rtx (operand1), constm1_rtx))))); emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, gen_rtx_IF_THEN_ELSE (VOIDmode, gen_rtx_NE (VOIDmode, gen_rtx_REG (CCZmode, 17), const0_rtx), operand0, pc_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:13492 */ extern rtx gen_split_1364 (rtx, rtx *); rtx gen_split_1364 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; emit_insn (gen_rtx_SET (VOIDmode, operand3, operand1)); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_REG (CCZmode, 17), gen_rtx_COMPARE (CCZmode, gen_rtx_PLUS (SImode, copy_rtx (operand3), constm1_rtx), const0_rtx)), gen_rtx_SET (VOIDmode, copy_rtx (operand3), gen_rtx_PLUS (SImode, copy_rtx (operand3), constm1_rtx))))); emit_insn (gen_rtx_SET (VOIDmode, operand2, copy_rtx (operand3))); emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, gen_rtx_IF_THEN_ELSE (VOIDmode, gen_rtx_NE (VOIDmode, gen_rtx_REG (CCZmode, 17), const0_rtx), operand0, pc_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:13520 */ extern rtx gen_peephole2_1365 (rtx, rtx *); rtx gen_peephole2_1365 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); start_sequence (); { operands[4] = gen_rtx_REG (GET_MODE (operands[0]), 17); operands[5] = gen_lowpart (QImode, operands[3]); ix86_expand_clear (operands[3]); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; emit_insn (gen_rtx_SET (VOIDmode, operand4, operand0)); emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, operand5), operand2)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:13541 */ extern rtx gen_peephole2_1366 (rtx, rtx *); rtx gen_peephole2_1366 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); start_sequence (); { operands[4] = gen_rtx_REG (GET_MODE (operands[0]), 17); operands[5] = gen_lowpart (QImode, operands[3]); ix86_expand_clear (operands[3]); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; emit_insn (gen_rtx_SET (VOIDmode, operand4, operand0)); emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, operand5), operand2)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:13569 */ rtx gen_call_pop (rtx operand0, rtx operand1, rtx operand2, rtx operand3) { rtx _val = 0; start_sequence (); { rtx operands[4]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; operands[3] = operand3; { ix86_expand_call (NULL, operands[0], operands[1], operands[2], operands[3], 0); DONE; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; } emit_call_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_CALL (VOIDmode, operand0, operand1), gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 7), gen_rtx_PLUS (SImode, gen_rtx_REG (SImode, 7), operand3))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:13616 */ rtx gen_call (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; { ix86_expand_call (NULL, operands[0], operands[1], operands[2], NULL, 0); DONE; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_call_insn (gen_rtx_CALL (VOIDmode, operand0, operand1)); emit_insn (gen_rtx_USE (VOIDmode, operand2)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:13626 */ rtx gen_sibcall (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; { ix86_expand_call (NULL, operands[0], operands[1], operands[2], NULL, 1); DONE; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_call_insn (gen_rtx_CALL (VOIDmode, operand0, operand1)); emit_insn (gen_rtx_USE (VOIDmode, operand2)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:13698 */ rtx gen_call_value_pop (rtx operand0, rtx operand1, rtx operand2, rtx operand3, rtx operand4) { rtx _val = 0; start_sequence (); { rtx operands[5]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; operands[3] = operand3; operands[4] = operand4; { ix86_expand_call (operands[0], operands[1], operands[2], operands[3], operands[4], 0); DONE; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; } emit_call_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_CALL (VOIDmode, operand1, operand2)), gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 7), gen_rtx_PLUS (SImode, gen_rtx_REG (SImode, 7), operand4))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:13712 */ rtx gen_call_value (rtx operand0, rtx operand1, rtx operand2, rtx operand3) { rtx _val = 0; start_sequence (); { rtx operands[4]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; operands[3] = operand3; { ix86_expand_call (operands[0], operands[1], operands[2], operands[3], NULL, 0); DONE; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; } emit_call_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_CALL (VOIDmode, operand1, operand2))); emit_insn (gen_rtx_USE (VOIDmode, operand3)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:13724 */ rtx gen_sibcall_value (rtx operand0, rtx operand1, rtx operand2, rtx operand3) { rtx _val = 0; start_sequence (); { rtx operands[4]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; operands[3] = operand3; { ix86_expand_call (operands[0], operands[1], operands[2], operands[3], NULL, 1); DONE; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; } emit_call_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_CALL (VOIDmode, operand1, operand2))); emit_insn (gen_rtx_USE (VOIDmode, operand3)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:13738 */ rtx gen_untyped_call (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; { int i; /* In order to give reg-stack an easier job in validating two coprocessor registers as containing a possible return value, simply pretend the untyped call returns a complex long double value. */ ix86_expand_call ((TARGET_FLOAT_RETURNS_IN_80387 ? gen_rtx_REG (XCmode, FIRST_FLOAT_REG) : NULL), operands[0], const0_rtx, GEN_INT (SSE_REGPARM_MAX - 1), NULL, 0); for (i = 0; i < XVECLEN (operands[2], 0); i++) { rtx set = XVECEXP (operands[2], 0, i); emit_move_insn (SET_DEST (set), SET_SRC (set)); } /* The optimizer does not know that the call sets the function value registers we stored in the result block. We avoid problems by claiming that all hard registers are used and clobbered at this point. */ emit_insn (gen_blockage (const0_rtx)); DONE; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_call_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_CALL (VOIDmode, operand0, const0_rtx), operand1, operand2))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:13787 */ rtx gen_return (void) { rtx _val = 0; start_sequence (); { { if (current_function_pops_args) { rtx popc = GEN_INT (current_function_pops_args); emit_jump_insn (gen_return_pop_internal (popc)); DONE; } } } emit_jump_insn (gen_rtx_RETURN (VOIDmode)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:13865 */ rtx gen_prologue (void) { rtx _val = 0; start_sequence (); { ix86_expand_prologue (); DONE; } emit_insn (const1_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:13879 */ rtx gen_epilogue (void) { rtx _val = 0; start_sequence (); { ix86_expand_epilogue (1); DONE; } emit_insn (const1_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:13884 */ rtx gen_sibcall_epilogue (void) { rtx _val = 0; start_sequence (); { ix86_expand_epilogue (0); DONE; } emit_insn (const1_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:13889 */ rtx gen_eh_return (rtx operand0) { rtx _val = 0; start_sequence (); { rtx operands[1]; operands[0] = operand0; { rtx tmp, sa = EH_RETURN_STACKADJ_RTX, ra = operands[0]; /* Tricky bit: we write the address of the handler to which we will be returning into someone else's stack frame, one word below the stack address we wish to restore. */ tmp = gen_rtx_PLUS (Pmode, arg_pointer_rtx, sa); tmp = plus_constant (tmp, -UNITS_PER_WORD); tmp = gen_rtx_MEM (Pmode, tmp); emit_move_insn (tmp, ra); if (Pmode == SImode) emit_jump_insn (gen_eh_return_si (sa)); else emit_jump_insn (gen_eh_return_di (sa)); emit_barrier (); DONE; } operand0 = operands[0]; } emit_insn (gen_rtx_USE (VOIDmode, operand0)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:13911 */ extern rtx gen_split_1379 (rtx, rtx *); rtx gen_split_1379 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); ix86_expand_epilogue (2); DONE; emit_insn (const1_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:13921 */ extern rtx gen_split_1380 (rtx, rtx *); rtx gen_split_1380 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); ix86_expand_epilogue (2); DONE; emit_insn (const1_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:13947 */ rtx gen_ffssi2 (rtx operand0, rtx operand1) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_FFS (SImode, operand1)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SImode)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:13956 */ extern rtx gen_split_1382 (rtx, rtx *); rtx gen_split_1382 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand2, constm1_rtx)); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_REG (CCZmode, 17), gen_rtx_COMPARE (CCZmode, operand1, const0_rtx)), gen_rtx_SET (VOIDmode, operand0, gen_rtx_CTZ (SImode, copy_rtx (operand1)))))); emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operand0), gen_rtx_IF_THEN_ELSE (SImode, gen_rtx_EQ (VOIDmode, gen_rtx_REG (CCZmode, 17), const0_rtx), copy_rtx (operand2), copy_rtx (operand0)))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, copy_rtx (operand0), gen_rtx_PLUS (SImode, copy_rtx (operand0), const1_rtx)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:13975 */ extern rtx gen_split_1383 (rtx, rtx *); rtx gen_split_1383 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx _val = 0; start_sequence (); { operands[3] = gen_lowpart (QImode, operands[2]); ix86_expand_clear (operands[2]); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_REG (CCZmode, 17), gen_rtx_COMPARE (CCZmode, operand1, const0_rtx)), gen_rtx_SET (VOIDmode, operand0, gen_rtx_CTZ (SImode, copy_rtx (operand1)))))); emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, operand3), gen_rtx_EQ (QImode, gen_rtx_REG (CCZmode, 17), const0_rtx))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand2, gen_rtx_NEG (SImode, copy_rtx (operand2))), gen_hard_reg_clobber (CCmode, 17)))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, copy_rtx (operand0), gen_rtx_IOR (SImode, copy_rtx (operand0), copy_rtx (operand2))), gen_hard_reg_clobber (CCmode, 17)))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, copy_rtx (operand0), gen_rtx_PLUS (SImode, copy_rtx (operand0), const1_rtx)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:14008 */ rtx gen_ffsdi2 (rtx operand0, rtx operand1) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_FFS (DImode, operand1)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DImode)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:14017 */ extern rtx gen_split_1385 (rtx, rtx *); rtx gen_split_1385 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand2, constm1_rtx)); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_REG (CCZmode, 17), gen_rtx_COMPARE (CCZmode, operand1, const0_rtx)), gen_rtx_SET (VOIDmode, operand0, gen_rtx_CTZ (DImode, copy_rtx (operand1)))))); emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operand0), gen_rtx_IF_THEN_ELSE (DImode, gen_rtx_EQ (VOIDmode, gen_rtx_REG (CCZmode, 17), const0_rtx), copy_rtx (operand2), copy_rtx (operand0)))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, copy_rtx (operand0), gen_rtx_PLUS (DImode, copy_rtx (operand0), const1_rtx)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:14062 */ rtx gen_clzsi2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MINUS (SImode, const_int_rtx[MAX_SAVED_CONST_INT + (31)], gen_rtx_CLZ (SImode, operand1))), gen_hard_reg_clobber (CCmode, 17)))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_XOR (SImode, operand0, const_int_rtx[MAX_SAVED_CONST_INT + (31)])), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:14083 */ rtx gen_clzdi2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MINUS (DImode, const_int_rtx[MAX_SAVED_CONST_INT + (63)], gen_rtx_CLZ (DImode, operand1))), gen_hard_reg_clobber (CCmode, 17)))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_XOR (DImode, operand0, const_int_rtx[MAX_SAVED_CONST_INT + (63)])), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:14138 */ rtx gen_tls_global_dynamic_32 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3; rtx operand4 ATTRIBUTE_UNUSED; rtx operand5 ATTRIBUTE_UNUSED; rtx _val = 0; start_sequence (); { rtx operands[6]; operands[0] = operand0; operands[1] = operand1; { if (flag_pic) operands[2] = pic_offset_table_rtx; else { operands[2] = gen_reg_rtx (Pmode); emit_insn (gen_set_got (operands[2])); } operands[3] = ix86_tls_get_addr (); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (SImode, gen_rtvec (3, operand2, operand1, operand3), 16)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SImode)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SImode)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:14171 */ rtx gen_tls_global_dynamic_64 (rtx operand0, rtx operand1) { rtx operand2; rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; { operands[2] = ix86_tls_get_addr (); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_call_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_CALL (VOIDmode, gen_rtx_MEM (QImode, operand2), const0_rtx)), gen_rtx_UNSPEC (DImode, gen_rtvec (1, operand1), 16)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:14208 */ rtx gen_tls_local_dynamic_base_32 (rtx operand0) { rtx operand1; rtx operand2; rtx operand3 ATTRIBUTE_UNUSED; rtx operand4 ATTRIBUTE_UNUSED; rtx _val = 0; start_sequence (); { rtx operands[5]; operands[0] = operand0; { if (flag_pic) operands[1] = pic_offset_table_rtx; else { operands[1] = gen_reg_rtx (Pmode); emit_insn (gen_set_got (operands[1])); } operands[2] = ix86_tls_get_addr (); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (SImode, gen_rtvec (2, operand1, operand2), 17)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SImode)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SImode)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:14237 */ rtx gen_tls_local_dynamic_base_64 (rtx operand0) { rtx operand1; rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; { operands[1] = ix86_tls_get_addr (); } operand0 = operands[0]; operand1 = operands[1]; } emit_call_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_CALL (VOIDmode, gen_rtx_MEM (QImode, operand1), const0_rtx)), gen_rtx_UNSPEC (DImode, gen_rtvec (1, const0_rtx), 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:14249 */ extern rtx gen_split_1392 (rtx, rtx *); rtx gen_split_1392 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (SImode, gen_rtvec (3, operand1, operand3, operand2), 16)), gen_rtx_CLOBBER (VOIDmode, operand4), gen_rtx_CLOBBER (VOIDmode, operand5), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:14789 */ extern rtx gen_split_1393 (rtx, rtx *); rtx gen_split_1393 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); { operands[4] = ix86_force_to_memory (GET_MODE (operands[1]), operands[1]); operands[4] = gen_rtx_FLOAT (GET_MODE (operands[0]), operands[4]); emit_insn (gen_rtx_SET (VOIDmode, operands[0], gen_rtx_fmt_ee (GET_CODE (operands[3]), GET_MODE (operands[3]), operands[4], operands[2]))); ix86_free_from_memory (GET_MODE (operands[1])); DONE; } emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:14809 */ extern rtx gen_split_1394 (rtx, rtx *); rtx gen_split_1394 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); { operands[4] = ix86_force_to_memory (GET_MODE (operands[2]), operands[2]); operands[4] = gen_rtx_FLOAT (GET_MODE (operands[0]), operands[4]); emit_insn (gen_rtx_SET (VOIDmode, operands[0], gen_rtx_fmt_ee (GET_CODE (operands[3]), GET_MODE (operands[3]), operands[1], operands[4]))); ix86_free_from_memory (GET_MODE (operands[2])); DONE; } emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:14831 */ rtx gen_sqrtsf2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { if (!TARGET_SSE_MATH) operands[1] = force_reg (SFmode, operands[1]); } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_SQRT (SFmode, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:14871 */ rtx gen_sqrtdf2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { if (!TARGET_SSE2 || !TARGET_SSE_MATH) operands[1] = force_reg (DFmode, operands[1]); } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_SQRT (DFmode, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:14969 */ rtx gen_fmodsf3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; { rtx label = gen_label_rtx (); rtx op1 = gen_reg_rtx (XFmode); rtx op2 = gen_reg_rtx (XFmode); emit_insn(gen_extendsfxf2 (op1, operands[1])); emit_insn(gen_extendsfxf2 (op2, operands[2])); emit_label (label); emit_insn (gen_fpremxf4 (op1, op2, op1, op2)); ix86_emit_fp_unordered_jump (label); emit_insn (gen_truncxfsf2_noop (operands[0], op1)); DONE; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_USE (VOIDmode, operand0)); emit_insn (gen_rtx_USE (VOIDmode, operand1)); emit_insn (gen_rtx_USE (VOIDmode, operand2)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:14993 */ rtx gen_fmoddf3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; { rtx label = gen_label_rtx (); rtx op1 = gen_reg_rtx (XFmode); rtx op2 = gen_reg_rtx (XFmode); emit_insn (gen_extenddfxf2 (op1, operands[1])); emit_insn (gen_extenddfxf2 (op2, operands[2])); emit_label (label); emit_insn (gen_fpremxf4 (op1, op2, op1, op2)); ix86_emit_fp_unordered_jump (label); emit_insn (gen_truncxfdf2_noop (operands[0], op1)); DONE; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_USE (VOIDmode, operand0)); emit_insn (gen_rtx_USE (VOIDmode, operand1)); emit_insn (gen_rtx_USE (VOIDmode, operand2)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15017 */ rtx gen_fmodxf3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; { rtx label = gen_label_rtx (); emit_label (label); emit_insn (gen_fpremxf4 (operands[1], operands[2], operands[1], operands[2])); ix86_emit_fp_unordered_jump (label); emit_move_insn (operands[0], operands[1]); DONE; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_USE (VOIDmode, operand0)); emit_insn (gen_rtx_USE (VOIDmode, operand1)); emit_insn (gen_rtx_USE (VOIDmode, operand2)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15052 */ rtx gen_dremsf3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; { rtx label = gen_label_rtx (); rtx op1 = gen_reg_rtx (XFmode); rtx op2 = gen_reg_rtx (XFmode); emit_insn(gen_extendsfxf2 (op1, operands[1])); emit_insn(gen_extendsfxf2 (op2, operands[2])); emit_label (label); emit_insn (gen_fprem1xf4 (op1, op2, op1, op2)); ix86_emit_fp_unordered_jump (label); emit_insn (gen_truncxfsf2_noop (operands[0], op1)); DONE; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_USE (VOIDmode, operand0)); emit_insn (gen_rtx_USE (VOIDmode, operand1)); emit_insn (gen_rtx_USE (VOIDmode, operand2)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15076 */ rtx gen_dremdf3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; { rtx label = gen_label_rtx (); rtx op1 = gen_reg_rtx (XFmode); rtx op2 = gen_reg_rtx (XFmode); emit_insn (gen_extenddfxf2 (op1, operands[1])); emit_insn (gen_extenddfxf2 (op2, operands[2])); emit_label (label); emit_insn (gen_fprem1xf4 (op1, op2, op1, op2)); ix86_emit_fp_unordered_jump (label); emit_insn (gen_truncxfdf2_noop (operands[0], op1)); DONE; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_USE (VOIDmode, operand0)); emit_insn (gen_rtx_USE (VOIDmode, operand1)); emit_insn (gen_rtx_USE (VOIDmode, operand2)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15100 */ rtx gen_dremxf3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; { rtx label = gen_label_rtx (); emit_label (label); emit_insn (gen_fprem1xf4 (operands[1], operands[2], operands[1], operands[2])); ix86_emit_fp_unordered_jump (label); emit_move_insn (operands[0], operands[1]); DONE; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_USE (VOIDmode, operand0)); emit_insn (gen_rtx_USE (VOIDmode, operand1)); emit_insn (gen_rtx_USE (VOIDmode, operand2)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15213 */ extern rtx gen_split_1403 (rtx, rtx *); rtx gen_split_1403 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand1, gen_rtx_UNSPEC (DFmode, gen_rtvec (1, operand2), 21))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15224 */ extern rtx gen_split_1404 (rtx, rtx *); rtx gen_split_1404 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (DFmode, gen_rtvec (1, operand2), 22))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15247 */ extern rtx gen_split_1405 (rtx, rtx *); rtx gen_split_1405 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand1, gen_rtx_UNSPEC (SFmode, gen_rtvec (1, operand2), 21))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15258 */ extern rtx gen_split_1406 (rtx, rtx *); rtx gen_split_1406 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (SFmode, gen_rtvec (1, operand2), 22))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15283 */ extern rtx gen_split_1407 (rtx, rtx *); rtx gen_split_1407 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand1, gen_rtx_UNSPEC (DFmode, gen_rtvec (1, gen_rtx_FLOAT_EXTEND (DFmode, operand2)), 21))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15297 */ extern rtx gen_split_1408 (rtx, rtx *); rtx gen_split_1408 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (DFmode, gen_rtvec (1, gen_rtx_FLOAT_EXTEND (DFmode, operand2)), 22))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15323 */ extern rtx gen_split_1409 (rtx, rtx *); rtx gen_split_1409 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand1, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand2), 21))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15334 */ extern rtx gen_split_1410 (rtx, rtx *); rtx gen_split_1410 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand2), 22))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15362 */ extern rtx gen_peephole2_1411 (rtx, rtx *); rtx gen_peephole2_1411 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (DFmode, gen_rtvec (1, operand2), 82)), gen_rtx_SET (VOIDmode, operand1, gen_rtx_UNSPEC (DFmode, gen_rtvec (1, copy_rtx (operand2)), 83))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15375 */ rtx gen_tandf2 (rtx operand0, rtx operand1) { rtx operand2; rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; { operands[2] = gen_reg_rtx (DFmode); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand2, gen_rtx_UNSPEC (DFmode, gen_rtvec (1, operand1), 82)), gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (DFmode, gen_rtvec (1, operand1), 83))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15404 */ extern rtx gen_peephole2_1413 (rtx, rtx *); rtx gen_peephole2_1413 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (SFmode, gen_rtvec (1, operand2), 82)), gen_rtx_SET (VOIDmode, operand1, gen_rtx_UNSPEC (SFmode, gen_rtvec (1, copy_rtx (operand2)), 83))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15417 */ rtx gen_tansf2 (rtx operand0, rtx operand1) { rtx operand2; rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; { operands[2] = gen_reg_rtx (SFmode); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand2, gen_rtx_UNSPEC (SFmode, gen_rtvec (1, operand1), 82)), gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (SFmode, gen_rtvec (1, operand1), 83))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15446 */ extern rtx gen_peephole2_1415 (rtx, rtx *); rtx gen_peephole2_1415 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand2), 82)), gen_rtx_SET (VOIDmode, operand1, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, copy_rtx (operand2)), 83))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15459 */ rtx gen_tanxf2 (rtx operand0, rtx operand1) { rtx operand2; rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; { operands[2] = gen_reg_rtx (XFmode); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand2, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand1), 82)), gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand1), 83))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15483 */ rtx gen_atan2df3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; { rtx copy = gen_reg_rtx (DFmode); emit_move_insn (copy, operands[1]); emit_insn (gen_atan2df3_1 (operands[0], copy, operands[2])); DONE; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_USE (VOIDmode, operand0)); emit_insn (gen_rtx_USE (VOIDmode, operand2)); emit_insn (gen_rtx_USE (VOIDmode, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15496 */ rtx gen_atandf2 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3 ATTRIBUTE_UNUSED; rtx _val = 0; start_sequence (); { rtx operands[4]; operands[0] = operand0; operands[1] = operand1; { operands[2] = gen_reg_rtx (DFmode); emit_move_insn (operands[2], CONST1_RTX (DFmode)); /* fld1 */ } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (DFmode, gen_rtvec (2, operand2, operand1), 65)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15521 */ rtx gen_atan2sf3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; { rtx copy = gen_reg_rtx (SFmode); emit_move_insn (copy, operands[1]); emit_insn (gen_atan2sf3_1 (operands[0], copy, operands[2])); DONE; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_USE (VOIDmode, operand0)); emit_insn (gen_rtx_USE (VOIDmode, operand2)); emit_insn (gen_rtx_USE (VOIDmode, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15534 */ rtx gen_atansf2 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3 ATTRIBUTE_UNUSED; rtx _val = 0; start_sequence (); { rtx operands[4]; operands[0] = operand0; operands[1] = operand1; { operands[2] = gen_reg_rtx (SFmode); emit_move_insn (operands[2], CONST1_RTX (SFmode)); /* fld1 */ } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (SFmode, gen_rtvec (2, operand2, operand1), 65)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SFmode))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15559 */ rtx gen_atan2xf3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; { rtx copy = gen_reg_rtx (XFmode); emit_move_insn (copy, operands[1]); emit_insn (gen_atan2xf3_1 (operands[0], copy, operands[2])); DONE; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_USE (VOIDmode, operand0)); emit_insn (gen_rtx_USE (VOIDmode, operand2)); emit_insn (gen_rtx_USE (VOIDmode, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15572 */ rtx gen_atanxf2 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3 ATTRIBUTE_UNUSED; rtx _val = 0; start_sequence (); { rtx operands[4]; operands[0] = operand0; operands[1] = operand1; { operands[2] = gen_reg_rtx (XFmode); emit_move_insn (operands[2], CONST1_RTX (XFmode)); /* fld1 */ } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand2, operand1), 65)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (XFmode))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15585 */ rtx gen_asindf2 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx operand6; rtx operand7; rtx operand8 ATTRIBUTE_UNUSED; rtx _val = 0; start_sequence (); { rtx operands[9]; operands[0] = operand0; operands[1] = operand1; { int i; for (i=2; i<8; i++) operands[i] = gen_reg_rtx (XFmode); emit_move_insn (operands[4], CONST1_RTX (XFmode)); /* fld1 */ } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; operand6 = operands[6]; operand7 = operands[7]; operand8 = operands[8]; } emit_insn (gen_rtx_SET (VOIDmode, operand2, gen_rtx_FLOAT_EXTEND (XFmode, operand1))); emit_insn (gen_rtx_SET (VOIDmode, operand3, gen_rtx_MULT (XFmode, operand2, operand2))); emit_insn (gen_rtx_SET (VOIDmode, operand5, gen_rtx_MINUS (XFmode, operand4, operand3))); emit_insn (gen_rtx_SET (VOIDmode, operand6, gen_rtx_SQRT (XFmode, operand5))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand7, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand6, operand2), 65)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (XFmode))))); emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_TRUNCATE (DFmode, operand7))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15608 */ rtx gen_asinsf2 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx operand6; rtx operand7; rtx operand8 ATTRIBUTE_UNUSED; rtx _val = 0; start_sequence (); { rtx operands[9]; operands[0] = operand0; operands[1] = operand1; { int i; for (i=2; i<8; i++) operands[i] = gen_reg_rtx (XFmode); emit_move_insn (operands[4], CONST1_RTX (XFmode)); /* fld1 */ } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; operand6 = operands[6]; operand7 = operands[7]; operand8 = operands[8]; } emit_insn (gen_rtx_SET (VOIDmode, operand2, gen_rtx_FLOAT_EXTEND (XFmode, operand1))); emit_insn (gen_rtx_SET (VOIDmode, operand3, gen_rtx_MULT (XFmode, operand2, operand2))); emit_insn (gen_rtx_SET (VOIDmode, operand5, gen_rtx_MINUS (XFmode, operand4, operand3))); emit_insn (gen_rtx_SET (VOIDmode, operand6, gen_rtx_SQRT (XFmode, operand5))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand7, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand6, operand2), 65)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (XFmode))))); emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_TRUNCATE (SFmode, operand7))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15631 */ rtx gen_asinxf2 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx operand6 ATTRIBUTE_UNUSED; rtx _val = 0; start_sequence (); { rtx operands[7]; operands[0] = operand0; operands[1] = operand1; { int i; for (i=2; i<6; i++) operands[i] = gen_reg_rtx (XFmode); emit_move_insn (operands[3], CONST1_RTX (XFmode)); /* fld1 */ } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; operand6 = operands[6]; } emit_insn (gen_rtx_SET (VOIDmode, operand2, gen_rtx_MULT (XFmode, operand1, operand1))); emit_insn (gen_rtx_SET (VOIDmode, operand4, gen_rtx_MINUS (XFmode, operand3, operand2))); emit_insn (gen_rtx_SET (VOIDmode, operand5, gen_rtx_SQRT (XFmode, operand4))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand5, operand1), 65)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (XFmode))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15652 */ rtx gen_acosdf2 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx operand6; rtx operand7; rtx operand8 ATTRIBUTE_UNUSED; rtx _val = 0; start_sequence (); { rtx operands[9]; operands[0] = operand0; operands[1] = operand1; { int i; for (i=2; i<8; i++) operands[i] = gen_reg_rtx (XFmode); emit_move_insn (operands[4], CONST1_RTX (XFmode)); /* fld1 */ } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; operand6 = operands[6]; operand7 = operands[7]; operand8 = operands[8]; } emit_insn (gen_rtx_SET (VOIDmode, operand2, gen_rtx_FLOAT_EXTEND (XFmode, operand1))); emit_insn (gen_rtx_SET (VOIDmode, operand3, gen_rtx_MULT (XFmode, operand2, operand2))); emit_insn (gen_rtx_SET (VOIDmode, operand5, gen_rtx_MINUS (XFmode, operand4, operand3))); emit_insn (gen_rtx_SET (VOIDmode, operand6, gen_rtx_SQRT (XFmode, operand5))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand7, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand2, operand6), 65)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (XFmode))))); emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_TRUNCATE (DFmode, operand7))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15675 */ rtx gen_acossf2 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx operand6; rtx operand7; rtx operand8 ATTRIBUTE_UNUSED; rtx _val = 0; start_sequence (); { rtx operands[9]; operands[0] = operand0; operands[1] = operand1; { int i; for (i=2; i<8; i++) operands[i] = gen_reg_rtx (XFmode); emit_move_insn (operands[4], CONST1_RTX (XFmode)); /* fld1 */ } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; operand6 = operands[6]; operand7 = operands[7]; operand8 = operands[8]; } emit_insn (gen_rtx_SET (VOIDmode, operand2, gen_rtx_FLOAT_EXTEND (XFmode, operand1))); emit_insn (gen_rtx_SET (VOIDmode, operand3, gen_rtx_MULT (XFmode, operand2, operand2))); emit_insn (gen_rtx_SET (VOIDmode, operand5, gen_rtx_MINUS (XFmode, operand4, operand3))); emit_insn (gen_rtx_SET (VOIDmode, operand6, gen_rtx_SQRT (XFmode, operand5))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand7, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand2, operand6), 65)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (XFmode))))); emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_TRUNCATE (SFmode, operand7))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15698 */ rtx gen_acosxf2 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx operand6 ATTRIBUTE_UNUSED; rtx _val = 0; start_sequence (); { rtx operands[7]; operands[0] = operand0; operands[1] = operand1; { int i; for (i=2; i<6; i++) operands[i] = gen_reg_rtx (XFmode); emit_move_insn (operands[3], CONST1_RTX (XFmode)); /* fld1 */ } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; operand6 = operands[6]; } emit_insn (gen_rtx_SET (VOIDmode, operand2, gen_rtx_MULT (XFmode, operand1, operand1))); emit_insn (gen_rtx_SET (VOIDmode, operand4, gen_rtx_MINUS (XFmode, operand3, operand2))); emit_insn (gen_rtx_SET (VOIDmode, operand5, gen_rtx_SQRT (XFmode, operand4))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand1, operand5), 65)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (XFmode))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15731 */ rtx gen_logsf2 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3; rtx operand4; rtx operand5 ATTRIBUTE_UNUSED; rtx _val = 0; start_sequence (); { rtx operands[6]; operands[0] = operand0; operands[1] = operand1; { rtx temp; operands[2] = gen_reg_rtx (XFmode); operands[3] = gen_reg_rtx (XFmode); operands[4] = gen_reg_rtx (XFmode); temp = standard_80387_constant_rtx (4); /* fldln2 */ emit_move_insn (operands[3], temp); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; } emit_insn (gen_rtx_SET (VOIDmode, operand2, gen_rtx_FLOAT_EXTEND (XFmode, operand1))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand4, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand2, operand3), 66)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (XFmode))))); emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_TRUNCATE (SFmode, operand4))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15753 */ rtx gen_logdf2 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3; rtx operand4; rtx operand5 ATTRIBUTE_UNUSED; rtx _val = 0; start_sequence (); { rtx operands[6]; operands[0] = operand0; operands[1] = operand1; { rtx temp; operands[2] = gen_reg_rtx (XFmode); operands[3] = gen_reg_rtx (XFmode); operands[4] = gen_reg_rtx (XFmode); temp = standard_80387_constant_rtx (4); /* fldln2 */ emit_move_insn (operands[3], temp); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; } emit_insn (gen_rtx_SET (VOIDmode, operand2, gen_rtx_FLOAT_EXTEND (XFmode, operand1))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand4, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand2, operand3), 66)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (XFmode))))); emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_TRUNCATE (DFmode, operand4))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15775 */ rtx gen_logxf2 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3 ATTRIBUTE_UNUSED; rtx _val = 0; start_sequence (); { rtx operands[4]; operands[0] = operand0; operands[1] = operand1; { rtx temp; operands[2] = gen_reg_rtx (XFmode); temp = standard_80387_constant_rtx (4); /* fldln2 */ emit_move_insn (operands[2], temp); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand1, operand2), 66)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (XFmode))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15790 */ rtx gen_log10sf2 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3; rtx operand4; rtx operand5 ATTRIBUTE_UNUSED; rtx _val = 0; start_sequence (); { rtx operands[6]; operands[0] = operand0; operands[1] = operand1; { rtx temp; operands[2] = gen_reg_rtx (XFmode); operands[3] = gen_reg_rtx (XFmode); operands[4] = gen_reg_rtx (XFmode); temp = standard_80387_constant_rtx (3); /* fldlg2 */ emit_move_insn (operands[3], temp); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; } emit_insn (gen_rtx_SET (VOIDmode, operand2, gen_rtx_FLOAT_EXTEND (XFmode, operand1))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand4, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand2, operand3), 66)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (XFmode))))); emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_TRUNCATE (SFmode, operand4))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15812 */ rtx gen_log10df2 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3; rtx operand4; rtx operand5 ATTRIBUTE_UNUSED; rtx _val = 0; start_sequence (); { rtx operands[6]; operands[0] = operand0; operands[1] = operand1; { rtx temp; operands[2] = gen_reg_rtx (XFmode); operands[3] = gen_reg_rtx (XFmode); operands[4] = gen_reg_rtx (XFmode); temp = standard_80387_constant_rtx (3); /* fldlg2 */ emit_move_insn (operands[3], temp); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; } emit_insn (gen_rtx_SET (VOIDmode, operand2, gen_rtx_FLOAT_EXTEND (XFmode, operand1))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand4, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand2, operand3), 66)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (XFmode))))); emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_TRUNCATE (DFmode, operand4))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15834 */ rtx gen_log10xf2 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3 ATTRIBUTE_UNUSED; rtx _val = 0; start_sequence (); { rtx operands[4]; operands[0] = operand0; operands[1] = operand1; { rtx temp; operands[2] = gen_reg_rtx (XFmode); temp = standard_80387_constant_rtx (3); /* fldlg2 */ emit_move_insn (operands[2], temp); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand1, operand2), 66)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (XFmode))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15849 */ rtx gen_log2sf2 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3; rtx operand4; rtx operand5 ATTRIBUTE_UNUSED; rtx _val = 0; start_sequence (); { rtx operands[6]; operands[0] = operand0; operands[1] = operand1; { operands[2] = gen_reg_rtx (XFmode); operands[3] = gen_reg_rtx (XFmode); operands[4] = gen_reg_rtx (XFmode); emit_move_insn (operands[3], CONST1_RTX (XFmode)); /* fld1 */ } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; } emit_insn (gen_rtx_SET (VOIDmode, operand2, gen_rtx_FLOAT_EXTEND (XFmode, operand1))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand4, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand2, operand3), 66)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (XFmode))))); emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_TRUNCATE (SFmode, operand4))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15868 */ rtx gen_log2df2 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3; rtx operand4; rtx operand5 ATTRIBUTE_UNUSED; rtx _val = 0; start_sequence (); { rtx operands[6]; operands[0] = operand0; operands[1] = operand1; { operands[2] = gen_reg_rtx (XFmode); operands[3] = gen_reg_rtx (XFmode); operands[4] = gen_reg_rtx (XFmode); emit_move_insn (operands[3], CONST1_RTX (XFmode)); /* fld1 */ } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; } emit_insn (gen_rtx_SET (VOIDmode, operand2, gen_rtx_FLOAT_EXTEND (XFmode, operand1))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand4, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand2, operand3), 66)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (XFmode))))); emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_TRUNCATE (DFmode, operand4))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15887 */ rtx gen_log2xf2 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3 ATTRIBUTE_UNUSED; rtx _val = 0; start_sequence (); { rtx operands[4]; operands[0] = operand0; operands[1] = operand1; { operands[2] = gen_reg_rtx (XFmode); emit_move_insn (operands[2], CONST1_RTX (XFmode)); /* fld1 */ } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand1, operand2), 66)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (XFmode))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15911 */ rtx gen_log1psf2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { rtx op0 = gen_reg_rtx (XFmode); rtx op1 = gen_reg_rtx (XFmode); emit_insn (gen_extendsfxf2 (op1, operands[1])); ix86_emit_i387_log1p (op0, op1); emit_insn (gen_truncxfsf2_noop (operands[0], op0)); DONE; } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_USE (VOIDmode, operand0)); emit_insn (gen_rtx_USE (VOIDmode, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15926 */ rtx gen_log1pdf2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { rtx op0 = gen_reg_rtx (XFmode); rtx op1 = gen_reg_rtx (XFmode); emit_insn (gen_extenddfxf2 (op1, operands[1])); ix86_emit_i387_log1p (op0, op1); emit_insn (gen_truncxfdf2_noop (operands[0], op0)); DONE; } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_USE (VOIDmode, operand0)); emit_insn (gen_rtx_USE (VOIDmode, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15941 */ rtx gen_log1pxf2 (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { ix86_emit_i387_log1p (operands[0], operands[1]); DONE; } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_USE (VOIDmode, operand0)); emit_insn (gen_rtx_USE (VOIDmode, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15963 */ rtx gen_logbsf2 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3; rtx operand4; rtx _val = 0; start_sequence (); { rtx operands[5]; operands[0] = operand0; operands[1] = operand1; { operands[2] = gen_reg_rtx (XFmode); operands[3] = gen_reg_rtx (XFmode); operands[4] = gen_reg_rtx (XFmode); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; } emit_insn (gen_rtx_SET (VOIDmode, operand2, gen_rtx_FLOAT_EXTEND (XFmode, operand1))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand3, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand2), 84)), gen_rtx_SET (VOIDmode, operand4, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand2), 85))))); emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_TRUNCATE (SFmode, operand4))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15980 */ rtx gen_logbdf2 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3; rtx operand4; rtx _val = 0; start_sequence (); { rtx operands[5]; operands[0] = operand0; operands[1] = operand1; { operands[2] = gen_reg_rtx (XFmode); operands[3] = gen_reg_rtx (XFmode); operands[4] = gen_reg_rtx (XFmode); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; } emit_insn (gen_rtx_SET (VOIDmode, operand2, gen_rtx_FLOAT_EXTEND (XFmode, operand1))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand3, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand2), 84)), gen_rtx_SET (VOIDmode, operand4, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand2), 85))))); emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_TRUNCATE (DFmode, operand4))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:15997 */ rtx gen_logbxf2 (rtx operand0, rtx operand1) { rtx operand2; rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; { operands[2] = gen_reg_rtx (XFmode); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand2, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand1), 84)), gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand1), 85))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:16009 */ rtx gen_ilogbsi2 (rtx operand0, rtx operand1, rtx operand2, rtx operand3) { rtx _val = 0; start_sequence (); { rtx operands[4]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; operands[3] = operand3; { operands[2] = gen_reg_rtx (XFmode); operands[3] = gen_reg_rtx (XFmode); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand2, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand1), 84)), gen_rtx_SET (VOIDmode, operand3, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand1), 85))))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_FIX (SImode, operand3)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:16059 */ rtx gen_expsf2 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx operand6; rtx operand7; rtx operand8; rtx operand9; rtx operand10; rtx operand11; rtx _val = 0; start_sequence (); { rtx operands[12]; operands[0] = operand0; operands[1] = operand1; { rtx temp; int i; for (i=2; i<12; i++) operands[i] = gen_reg_rtx (XFmode); temp = standard_80387_constant_rtx (5); /* fldl2e */ emit_move_insn (operands[3], temp); emit_move_insn (operands[8], CONST1_RTX (XFmode)); /* fld1 */ } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; operand6 = operands[6]; operand7 = operands[7]; operand8 = operands[8]; operand9 = operands[9]; operand10 = operands[10]; operand11 = operands[11]; } emit_insn (gen_rtx_SET (VOIDmode, operand2, gen_rtx_FLOAT_EXTEND (XFmode, operand1))); emit_insn (gen_rtx_SET (VOIDmode, operand4, gen_rtx_MULT (XFmode, operand2, operand3))); emit_insn (gen_rtx_SET (VOIDmode, operand5, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand4), 68))); emit_insn (gen_rtx_SET (VOIDmode, operand6, gen_rtx_MINUS (XFmode, operand4, operand5))); emit_insn (gen_rtx_SET (VOIDmode, operand7, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand6), 69))); emit_insn (gen_rtx_SET (VOIDmode, operand9, gen_rtx_PLUS (XFmode, operand7, operand8))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand10, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand9, operand5), 86)), gen_rtx_SET (VOIDmode, operand11, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand9, operand5), 87))))); emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_TRUNCATE (SFmode, operand10))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:16088 */ rtx gen_expdf2 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx operand6; rtx operand7; rtx operand8; rtx operand9; rtx operand10; rtx operand11; rtx _val = 0; start_sequence (); { rtx operands[12]; operands[0] = operand0; operands[1] = operand1; { rtx temp; int i; for (i=2; i<12; i++) operands[i] = gen_reg_rtx (XFmode); temp = standard_80387_constant_rtx (5); /* fldl2e */ emit_move_insn (operands[3], temp); emit_move_insn (operands[8], CONST1_RTX (XFmode)); /* fld1 */ } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; operand6 = operands[6]; operand7 = operands[7]; operand8 = operands[8]; operand9 = operands[9]; operand10 = operands[10]; operand11 = operands[11]; } emit_insn (gen_rtx_SET (VOIDmode, operand2, gen_rtx_FLOAT_EXTEND (XFmode, operand1))); emit_insn (gen_rtx_SET (VOIDmode, operand4, gen_rtx_MULT (XFmode, operand2, operand3))); emit_insn (gen_rtx_SET (VOIDmode, operand5, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand4), 68))); emit_insn (gen_rtx_SET (VOIDmode, operand6, gen_rtx_MINUS (XFmode, operand4, operand5))); emit_insn (gen_rtx_SET (VOIDmode, operand7, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand6), 69))); emit_insn (gen_rtx_SET (VOIDmode, operand9, gen_rtx_PLUS (XFmode, operand7, operand8))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand10, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand9, operand5), 86)), gen_rtx_SET (VOIDmode, operand11, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand9, operand5), 87))))); emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_TRUNCATE (DFmode, operand10))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:16117 */ rtx gen_expxf2 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx operand6; rtx operand7; rtx operand8; rtx operand9; rtx _val = 0; start_sequence (); { rtx operands[10]; operands[0] = operand0; operands[1] = operand1; { rtx temp; int i; for (i=2; i<10; i++) operands[i] = gen_reg_rtx (XFmode); temp = standard_80387_constant_rtx (5); /* fldl2e */ emit_move_insn (operands[2], temp); emit_move_insn (operands[7], CONST1_RTX (XFmode)); /* fld1 */ } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; operand6 = operands[6]; operand7 = operands[7]; operand8 = operands[8]; operand9 = operands[9]; } emit_insn (gen_rtx_SET (VOIDmode, operand3, gen_rtx_MULT (XFmode, operand1, operand2))); emit_insn (gen_rtx_SET (VOIDmode, operand4, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand3), 68))); emit_insn (gen_rtx_SET (VOIDmode, operand5, gen_rtx_MINUS (XFmode, operand3, operand4))); emit_insn (gen_rtx_SET (VOIDmode, operand6, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand5), 69))); emit_insn (gen_rtx_SET (VOIDmode, operand8, gen_rtx_PLUS (XFmode, operand6, operand7))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand8, operand4), 86)), gen_rtx_SET (VOIDmode, operand9, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand8, operand4), 87))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:16143 */ rtx gen_exp10sf2 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx operand6; rtx operand7; rtx operand8; rtx operand9; rtx operand10; rtx operand11; rtx _val = 0; start_sequence (); { rtx operands[12]; operands[0] = operand0; operands[1] = operand1; { rtx temp; int i; for (i=2; i<12; i++) operands[i] = gen_reg_rtx (XFmode); temp = standard_80387_constant_rtx (6); /* fldl2t */ emit_move_insn (operands[3], temp); emit_move_insn (operands[8], CONST1_RTX (XFmode)); /* fld1 */ } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; operand6 = operands[6]; operand7 = operands[7]; operand8 = operands[8]; operand9 = operands[9]; operand10 = operands[10]; operand11 = operands[11]; } emit_insn (gen_rtx_SET (VOIDmode, operand2, gen_rtx_FLOAT_EXTEND (XFmode, operand1))); emit_insn (gen_rtx_SET (VOIDmode, operand4, gen_rtx_MULT (XFmode, operand2, operand3))); emit_insn (gen_rtx_SET (VOIDmode, operand5, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand4), 68))); emit_insn (gen_rtx_SET (VOIDmode, operand6, gen_rtx_MINUS (XFmode, operand4, operand5))); emit_insn (gen_rtx_SET (VOIDmode, operand7, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand6), 69))); emit_insn (gen_rtx_SET (VOIDmode, operand9, gen_rtx_PLUS (XFmode, operand7, operand8))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand10, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand9, operand5), 86)), gen_rtx_SET (VOIDmode, operand11, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand9, operand5), 87))))); emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_TRUNCATE (SFmode, operand10))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:16172 */ rtx gen_exp10df2 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx operand6; rtx operand7; rtx operand8; rtx operand9; rtx operand10; rtx operand11; rtx _val = 0; start_sequence (); { rtx operands[12]; operands[0] = operand0; operands[1] = operand1; { rtx temp; int i; for (i=2; i<12; i++) operands[i] = gen_reg_rtx (XFmode); temp = standard_80387_constant_rtx (6); /* fldl2t */ emit_move_insn (operands[3], temp); emit_move_insn (operands[8], CONST1_RTX (XFmode)); /* fld1 */ } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; operand6 = operands[6]; operand7 = operands[7]; operand8 = operands[8]; operand9 = operands[9]; operand10 = operands[10]; operand11 = operands[11]; } emit_insn (gen_rtx_SET (VOIDmode, operand2, gen_rtx_FLOAT_EXTEND (XFmode, operand1))); emit_insn (gen_rtx_SET (VOIDmode, operand4, gen_rtx_MULT (XFmode, operand2, operand3))); emit_insn (gen_rtx_SET (VOIDmode, operand5, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand4), 68))); emit_insn (gen_rtx_SET (VOIDmode, operand6, gen_rtx_MINUS (XFmode, operand4, operand5))); emit_insn (gen_rtx_SET (VOIDmode, operand7, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand6), 69))); emit_insn (gen_rtx_SET (VOIDmode, operand9, gen_rtx_PLUS (XFmode, operand7, operand8))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand10, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand9, operand5), 86)), gen_rtx_SET (VOIDmode, operand11, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand9, operand5), 87))))); emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_TRUNCATE (DFmode, operand10))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:16201 */ rtx gen_exp10xf2 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx operand6; rtx operand7; rtx operand8; rtx operand9; rtx _val = 0; start_sequence (); { rtx operands[10]; operands[0] = operand0; operands[1] = operand1; { rtx temp; int i; for (i=2; i<10; i++) operands[i] = gen_reg_rtx (XFmode); temp = standard_80387_constant_rtx (6); /* fldl2t */ emit_move_insn (operands[2], temp); emit_move_insn (operands[7], CONST1_RTX (XFmode)); /* fld1 */ } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; operand6 = operands[6]; operand7 = operands[7]; operand8 = operands[8]; operand9 = operands[9]; } emit_insn (gen_rtx_SET (VOIDmode, operand3, gen_rtx_MULT (XFmode, operand1, operand2))); emit_insn (gen_rtx_SET (VOIDmode, operand4, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand3), 68))); emit_insn (gen_rtx_SET (VOIDmode, operand5, gen_rtx_MINUS (XFmode, operand3, operand4))); emit_insn (gen_rtx_SET (VOIDmode, operand6, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand5), 69))); emit_insn (gen_rtx_SET (VOIDmode, operand8, gen_rtx_PLUS (XFmode, operand6, operand7))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand8, operand4), 86)), gen_rtx_SET (VOIDmode, operand9, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand8, operand4), 87))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:16227 */ rtx gen_exp2sf2 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx operand6; rtx operand7; rtx operand8; rtx operand9; rtx _val = 0; start_sequence (); { rtx operands[10]; operands[0] = operand0; operands[1] = operand1; { int i; for (i=2; i<10; i++) operands[i] = gen_reg_rtx (XFmode); emit_move_insn (operands[6], CONST1_RTX (XFmode)); /* fld1 */ } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; operand6 = operands[6]; operand7 = operands[7]; operand8 = operands[8]; operand9 = operands[9]; } emit_insn (gen_rtx_SET (VOIDmode, operand2, gen_rtx_FLOAT_EXTEND (XFmode, operand1))); emit_insn (gen_rtx_SET (VOIDmode, operand3, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand2), 68))); emit_insn (gen_rtx_SET (VOIDmode, operand4, gen_rtx_MINUS (XFmode, operand2, operand3))); emit_insn (gen_rtx_SET (VOIDmode, operand5, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand4), 69))); emit_insn (gen_rtx_SET (VOIDmode, operand7, gen_rtx_PLUS (XFmode, operand5, operand6))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand8, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand7, operand3), 86)), gen_rtx_SET (VOIDmode, operand9, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand7, operand3), 87))))); emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_TRUNCATE (SFmode, operand8))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:16252 */ rtx gen_exp2df2 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx operand6; rtx operand7; rtx operand8; rtx operand9; rtx _val = 0; start_sequence (); { rtx operands[10]; operands[0] = operand0; operands[1] = operand1; { int i; for (i=2; i<10; i++) operands[i] = gen_reg_rtx (XFmode); emit_move_insn (operands[6], CONST1_RTX (XFmode)); /* fld1 */ } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; operand6 = operands[6]; operand7 = operands[7]; operand8 = operands[8]; operand9 = operands[9]; } emit_insn (gen_rtx_SET (VOIDmode, operand2, gen_rtx_FLOAT_EXTEND (XFmode, operand1))); emit_insn (gen_rtx_SET (VOIDmode, operand3, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand2), 68))); emit_insn (gen_rtx_SET (VOIDmode, operand4, gen_rtx_MINUS (XFmode, operand2, operand3))); emit_insn (gen_rtx_SET (VOIDmode, operand5, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand4), 69))); emit_insn (gen_rtx_SET (VOIDmode, operand7, gen_rtx_PLUS (XFmode, operand5, operand6))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand8, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand7, operand3), 86)), gen_rtx_SET (VOIDmode, operand9, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand7, operand3), 87))))); emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_TRUNCATE (DFmode, operand8))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:16277 */ rtx gen_exp2xf2 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx operand6; rtx operand7; rtx operand8; rtx _val = 0; start_sequence (); { rtx operands[9]; operands[0] = operand0; operands[1] = operand1; { int i; for (i=2; i<9; i++) operands[i] = gen_reg_rtx (XFmode); emit_move_insn (operands[6], CONST1_RTX (XFmode)); /* fld1 */ } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; operand6 = operands[6]; operand7 = operands[7]; operand8 = operands[8]; } emit_insn (gen_rtx_SET (VOIDmode, operand2, operand1)); emit_insn (gen_rtx_SET (VOIDmode, operand3, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand2), 68))); emit_insn (gen_rtx_SET (VOIDmode, operand4, gen_rtx_MINUS (XFmode, operand2, operand3))); emit_insn (gen_rtx_SET (VOIDmode, operand5, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand4), 69))); emit_insn (gen_rtx_SET (VOIDmode, operand7, gen_rtx_PLUS (XFmode, operand5, operand6))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand7, operand3), 86)), gen_rtx_SET (VOIDmode, operand8, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand7, operand3), 87))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:16299 */ rtx gen_expm1df2 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx operand6; rtx operand7; rtx operand8; rtx operand9; rtx operand10; rtx operand11; rtx operand12; rtx operand13; rtx operand14; rtx _val = 0; start_sequence (); { rtx operands[15]; operands[0] = operand0; operands[1] = operand1; { rtx temp; int i; for (i=2; i<15; i++) operands[i] = gen_reg_rtx (XFmode); temp = standard_80387_constant_rtx (5); /* fldl2e */ emit_move_insn (operands[3], temp); emit_move_insn (operands[10], CONST1_RTX (XFmode)); /* fld1 */ } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; operand6 = operands[6]; operand7 = operands[7]; operand8 = operands[8]; operand9 = operands[9]; operand10 = operands[10]; operand11 = operands[11]; operand12 = operands[12]; operand13 = operands[13]; operand14 = operands[14]; } emit_insn (gen_rtx_SET (VOIDmode, operand2, gen_rtx_FLOAT_EXTEND (XFmode, operand1))); emit_insn (gen_rtx_SET (VOIDmode, operand4, gen_rtx_MULT (XFmode, operand2, operand3))); emit_insn (gen_rtx_SET (VOIDmode, operand5, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand4), 68))); emit_insn (gen_rtx_SET (VOIDmode, operand6, gen_rtx_MINUS (XFmode, operand4, operand5))); emit_insn (gen_rtx_SET (VOIDmode, operand7, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand6), 69))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand8, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand7, operand5), 86)), gen_rtx_SET (VOIDmode, operand9, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand7, operand5), 87))))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand11, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand10, operand9), 86)), gen_rtx_SET (VOIDmode, operand12, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand10, operand9), 87))))); emit_insn (gen_rtx_SET (VOIDmode, operand13, gen_rtx_MINUS (XFmode, operand11, operand10))); emit_insn (gen_rtx_SET (VOIDmode, operand14, gen_rtx_PLUS (XFmode, operand13, operand8))); emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_TRUNCATE (DFmode, operand14))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:16335 */ rtx gen_expm1sf2 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx operand6; rtx operand7; rtx operand8; rtx operand9; rtx operand10; rtx operand11; rtx operand12; rtx operand13; rtx operand14; rtx _val = 0; start_sequence (); { rtx operands[15]; operands[0] = operand0; operands[1] = operand1; { rtx temp; int i; for (i=2; i<15; i++) operands[i] = gen_reg_rtx (XFmode); temp = standard_80387_constant_rtx (5); /* fldl2e */ emit_move_insn (operands[3], temp); emit_move_insn (operands[10], CONST1_RTX (XFmode)); /* fld1 */ } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; operand6 = operands[6]; operand7 = operands[7]; operand8 = operands[8]; operand9 = operands[9]; operand10 = operands[10]; operand11 = operands[11]; operand12 = operands[12]; operand13 = operands[13]; operand14 = operands[14]; } emit_insn (gen_rtx_SET (VOIDmode, operand2, gen_rtx_FLOAT_EXTEND (XFmode, operand1))); emit_insn (gen_rtx_SET (VOIDmode, operand4, gen_rtx_MULT (XFmode, operand2, operand3))); emit_insn (gen_rtx_SET (VOIDmode, operand5, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand4), 68))); emit_insn (gen_rtx_SET (VOIDmode, operand6, gen_rtx_MINUS (XFmode, operand4, operand5))); emit_insn (gen_rtx_SET (VOIDmode, operand7, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand6), 69))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand8, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand7, operand5), 86)), gen_rtx_SET (VOIDmode, operand9, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand7, operand5), 87))))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand11, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand10, operand9), 86)), gen_rtx_SET (VOIDmode, operand12, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand10, operand9), 87))))); emit_insn (gen_rtx_SET (VOIDmode, operand13, gen_rtx_MINUS (XFmode, operand11, operand10))); emit_insn (gen_rtx_SET (VOIDmode, operand14, gen_rtx_PLUS (XFmode, operand13, operand8))); emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_FLOAT_TRUNCATE (SFmode, operand14))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:16371 */ rtx gen_expm1xf2 (rtx operand0, rtx operand1) { rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx operand6; rtx operand7; rtx operand8; rtx operand9; rtx operand10; rtx operand11; rtx operand12; rtx _val = 0; start_sequence (); { rtx operands[13]; operands[0] = operand0; operands[1] = operand1; { rtx temp; int i; for (i=2; i<13; i++) operands[i] = gen_reg_rtx (XFmode); temp = standard_80387_constant_rtx (5); /* fldl2e */ emit_move_insn (operands[2], temp); emit_move_insn (operands[9], CONST1_RTX (XFmode)); /* fld1 */ } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; operand6 = operands[6]; operand7 = operands[7]; operand8 = operands[8]; operand9 = operands[9]; operand10 = operands[10]; operand11 = operands[11]; operand12 = operands[12]; } emit_insn (gen_rtx_SET (VOIDmode, operand3, gen_rtx_MULT (XFmode, operand1, operand2))); emit_insn (gen_rtx_SET (VOIDmode, operand4, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand3), 68))); emit_insn (gen_rtx_SET (VOIDmode, operand5, gen_rtx_MINUS (XFmode, operand3, operand4))); emit_insn (gen_rtx_SET (VOIDmode, operand6, gen_rtx_UNSPEC (XFmode, gen_rtvec (1, operand5), 69))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand7, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand6, operand4), 86)), gen_rtx_SET (VOIDmode, operand8, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand6, operand4), 87))))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand10, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand9, operand8), 86)), gen_rtx_SET (VOIDmode, operand11, gen_rtx_UNSPEC (XFmode, gen_rtvec (2, operand9, operand8), 87))))); emit_insn (gen_rtx_SET (VOIDmode, operand12, gen_rtx_MINUS (XFmode, operand10, operand9))); emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (XFmode, operand12, operand7))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:16413 */ rtx gen_movstrsi (rtx operand0, rtx operand1, rtx operand2, rtx operand3) { rtx _val = 0; start_sequence (); { rtx operands[4]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; operands[3] = operand3; { if (ix86_expand_movstr (operands[0], operands[1], operands[2], operands[3])) DONE; else FAIL; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; } emit_insn (gen_rtx_USE (VOIDmode, operand0)); emit_insn (gen_rtx_USE (VOIDmode, operand1)); emit_insn (gen_rtx_USE (VOIDmode, operand2)); emit_insn (gen_rtx_USE (VOIDmode, operand3)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:16426 */ rtx gen_movstrdi (rtx operand0, rtx operand1, rtx operand2, rtx operand3) { rtx _val = 0; start_sequence (); { rtx operands[4]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; operands[3] = operand3; { if (ix86_expand_movstr (operands[0], operands[1], operands[2], operands[3])) DONE; else FAIL; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; } emit_insn (gen_rtx_USE (VOIDmode, operand0)); emit_insn (gen_rtx_USE (VOIDmode, operand1)); emit_insn (gen_rtx_USE (VOIDmode, operand2)); emit_insn (gen_rtx_USE (VOIDmode, operand3)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:16442 */ rtx gen_strmov (rtx operand0, rtx operand1, rtx operand2, rtx operand3) { rtx operand4; rtx operand5; rtx operand6; rtx _val = 0; start_sequence (); { rtx operands[7]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; operands[3] = operand3; { rtx adjust = GEN_INT (GET_MODE_SIZE (GET_MODE (operands[1]))); /* If .md ever supports :P for Pmode, these can be directly in the pattern above. */ operands[5] = gen_rtx_PLUS (Pmode, operands[0], adjust); operands[6] = gen_rtx_PLUS (Pmode, operands[2], adjust); if (TARGET_SINGLE_STRINGOP || optimize_size) { emit_insn (gen_strmov_singleop (operands[0], operands[1], operands[2], operands[3], operands[5], operands[6])); DONE; } operands[4] = gen_reg_rtx (GET_MODE (operands[1])); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; operand6 = operands[6]; } emit_insn (gen_rtx_SET (VOIDmode, operand4, operand3)); emit_insn (gen_rtx_SET (VOIDmode, operand1, operand4)); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, operand5), gen_hard_reg_clobber (CCmode, 17)))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand2, operand6), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:16469 */ rtx gen_strmov_singleop (rtx operand0, rtx operand1, rtx operand2, rtx operand3, rtx operand4, rtx operand5) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, gen_rtx_SET (VOIDmode, operand1, operand3), gen_rtx_SET (VOIDmode, operand0, operand4), gen_rtx_SET (VOIDmode, operand2, operand5), gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 19)))); } /* ../../gcc/gcc/config/i386/i386.md:16592 */ rtx gen_rep_mov (rtx operand0, rtx operand1, rtx operand2, rtx operand3, rtx operand4, rtx operand5, rtx operand6) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (6, gen_rtx_SET (VOIDmode, operand4, const0_rtx), gen_rtx_SET (VOIDmode, operand0, operand5), gen_rtx_SET (VOIDmode, operand2, operand6), gen_rtx_SET (VOIDmode, operand1, operand3), gen_rtx_USE (VOIDmode, operand4), gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 19)))); } /* ../../gcc/gcc/config/i386/i386.md:16701 */ rtx gen_clrstrsi (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; { if (ix86_expand_clrstr (operands[0], operands[1], operands[2])) DONE; else FAIL; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_USE (VOIDmode, operand0)); emit_insn (gen_rtx_USE (VOIDmode, operand1)); emit_insn (gen_rtx_USE (VOIDmode, operand2)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:16713 */ rtx gen_clrstrdi (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; { if (ix86_expand_clrstr (operands[0], operands[1], operands[2])) DONE; else FAIL; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_USE (VOIDmode, operand0)); emit_insn (gen_rtx_USE (VOIDmode, operand1)); emit_insn (gen_rtx_USE (VOIDmode, operand2)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:16728 */ rtx gen_strset (rtx operand0, rtx operand1, rtx operand2) { rtx operand3; rtx _val = 0; start_sequence (); { rtx operands[4]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; { if (GET_MODE (operands[1]) != GET_MODE (operands[2])) operands[1] = adjust_address_nv (operands[1], GET_MODE (operands[2]), 0); /* If .md ever supports :P for Pmode, this can be directly in the pattern above. */ operands[3] = gen_rtx_PLUS (Pmode, operands[0], GEN_INT (GET_MODE_SIZE (GET_MODE (operands[2])))); if (TARGET_SINGLE_STRINGOP || optimize_size) { emit_insn (gen_strset_singleop (operands[0], operands[1], operands[2], operands[3])); DONE; } } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; } emit_insn (gen_rtx_SET (VOIDmode, operand1, operand2)); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, operand3), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:16752 */ rtx gen_strset_singleop (rtx operand0, rtx operand1, rtx operand2, rtx operand3) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand1, operand2), gen_rtx_SET (VOIDmode, operand0, operand3), gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 19)))); } /* ../../gcc/gcc/config/i386/i386.md:16852 */ rtx gen_rep_stos (rtx operand0, rtx operand1, rtx operand2, rtx operand3, rtx operand4) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (6, gen_rtx_SET (VOIDmode, operand1, const0_rtx), gen_rtx_SET (VOIDmode, operand0, operand4), gen_rtx_SET (VOIDmode, operand2, const0_rtx), gen_rtx_USE (VOIDmode, operand3), gen_rtx_USE (VOIDmode, operand1), gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 19)))); } /* ../../gcc/gcc/config/i386/i386.md:16951 */ rtx gen_cmpstrsi (rtx operand0, rtx operand1, rtx operand2, rtx operand3, rtx operand4) { rtx _val = 0; start_sequence (); { rtx operands[5]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; operands[3] = operand3; operands[4] = operand4; { rtx addr1, addr2, out, outlow, count, countreg, align; /* Can't use this if the user has appropriated esi or edi. */ if (global_regs[4] || global_regs[5]) FAIL; out = operands[0]; if (GET_CODE (out) != REG) out = gen_reg_rtx (SImode); addr1 = copy_to_mode_reg (Pmode, XEXP (operands[1], 0)); addr2 = copy_to_mode_reg (Pmode, XEXP (operands[2], 0)); if (addr1 != XEXP (operands[1], 0)) operands[1] = replace_equiv_address_nv (operands[1], addr1); if (addr2 != XEXP (operands[2], 0)) operands[2] = replace_equiv_address_nv (operands[2], addr2); count = operands[3]; countreg = ix86_zero_extend_to_Pmode (count); /* %%% Iff we are testing strict equality, we can use known alignment to good advantage. This may be possible with combine, particularly once cc0 is dead. */ align = operands[4]; emit_insn (gen_cld ()); if (GET_CODE (count) == CONST_INT) { if (INTVAL (count) == 0) { emit_move_insn (operands[0], const0_rtx); DONE; } emit_insn (gen_cmpstrqi_nz_1 (addr1, addr2, countreg, align, operands[1], operands[2])); } else { if (TARGET_64BIT) emit_insn (gen_cmpdi_1_rex64 (countreg, countreg)); else emit_insn (gen_cmpsi_1 (countreg, countreg)); emit_insn (gen_cmpstrqi_1 (addr1, addr2, countreg, align, operands[1], operands[2])); } outlow = gen_lowpart (QImode, out); emit_insn (gen_cmpintqi (outlow)); emit_move_insn (out, gen_rtx_SIGN_EXTEND (SImode, outlow)); if (operands[0] != out) emit_move_insn (operands[0], out); DONE; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_COMPARE (SImode, operand1, operand2))); emit_insn (gen_rtx_USE (VOIDmode, operand3)); emit_insn (gen_rtx_USE (VOIDmode, operand4)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:17017 */ rtx gen_cmpintqi (rtx operand0) { rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = gen_reg_rtx (QImode); operands[2] = gen_reg_rtx (QImode); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_SET (VOIDmode, operand1, gen_rtx_GTU (QImode, gen_rtx_REG (CCmode, 17), const0_rtx))); emit_insn (gen_rtx_SET (VOIDmode, operand2, gen_rtx_LTU (QImode, gen_rtx_REG (CCmode, 17), const0_rtx))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MINUS (QImode, operand1, operand2)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:17033 */ rtx gen_cmpstrqi_nz_1 (rtx operand0, rtx operand1, rtx operand2, rtx operand3, rtx operand4, rtx operand5) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (7, gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, 17), gen_rtx_COMPARE (CCmode, operand4, operand5)), gen_rtx_USE (VOIDmode, operand2), gen_rtx_USE (VOIDmode, operand3), gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 19)), gen_rtx_CLOBBER (VOIDmode, operand0), gen_rtx_CLOBBER (VOIDmode, operand1), gen_rtx_CLOBBER (VOIDmode, operand2))); } /* ../../gcc/gcc/config/i386/i386.md:17080 */ rtx gen_cmpstrqi_1 (rtx operand0, rtx operand1, rtx operand2, rtx operand3, rtx operand4, rtx operand5) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (7, gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, 17), gen_rtx_IF_THEN_ELSE (CCmode, gen_rtx_NE (VOIDmode, operand2, const0_rtx), gen_rtx_COMPARE (CCmode, operand4, operand5), const0_rtx)), gen_rtx_USE (VOIDmode, operand3), gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, 17)), gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 19)), gen_rtx_CLOBBER (VOIDmode, operand0), gen_rtx_CLOBBER (VOIDmode, operand1), gen_rtx_CLOBBER (VOIDmode, operand2))); } /* ../../gcc/gcc/config/i386/i386.md:17134 */ rtx gen_strlensi (rtx operand0, rtx operand1, rtx operand2, rtx operand3) { rtx _val = 0; start_sequence (); { rtx operands[4]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; operands[3] = operand3; { if (ix86_expand_strlen (operands[0], operands[1], operands[2], operands[3])) DONE; else FAIL; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (SImode, gen_rtvec (3, operand1, operand2, operand3), 20))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:17147 */ rtx gen_strlendi (rtx operand0, rtx operand1, rtx operand2, rtx operand3) { rtx _val = 0; start_sequence (); { rtx operands[4]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; operands[3] = operand3; { if (ix86_expand_strlen (operands[0], operands[1], operands[2], operands[3])) DONE; else FAIL; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (DImode, gen_rtvec (3, operand1, operand2, operand3), 20))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:17160 */ rtx gen_strlenqi_1 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, gen_rtx_SET (VOIDmode, operand0, operand2), gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 19)), gen_rtx_CLOBBER (VOIDmode, operand1), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:17212 */ extern rtx gen_peephole2_1474 (rtx, rtx *); rtx gen_peephole2_1474 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx operand6; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; operand6 = operands[6]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (7, gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, 17), gen_rtx_COMPARE (CCmode, gen_rtx_MEM (BLKmode, operand4), gen_rtx_MEM (BLKmode, operand5))), gen_rtx_USE (VOIDmode, operand6), gen_rtx_USE (VOIDmode, operand3), gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 19)), gen_rtx_CLOBBER (VOIDmode, operand0), gen_rtx_CLOBBER (VOIDmode, operand1), gen_rtx_CLOBBER (VOIDmode, operand2)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:17244 */ extern rtx gen_peephole2_1475 (rtx, rtx *); rtx gen_peephole2_1475 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx operand6; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; operand6 = operands[6]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (7, gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, 17), gen_rtx_IF_THEN_ELSE (CCmode, gen_rtx_NE (VOIDmode, operand6, const0_rtx), gen_rtx_COMPARE (CCmode, gen_rtx_MEM (BLKmode, operand4), gen_rtx_MEM (BLKmode, operand5)), const0_rtx)), gen_rtx_USE (VOIDmode, operand3), gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, 17)), gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 19)), gen_rtx_CLOBBER (VOIDmode, operand0), gen_rtx_CLOBBER (VOIDmode, operand1), gen_rtx_CLOBBER (VOIDmode, operand2)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:17285 */ rtx gen_movdicc (rtx operand0, rtx operand1, rtx operand2, rtx operand3) { rtx _val = 0; start_sequence (); { rtx operands[4]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; operands[3] = operand3; if (!ix86_expand_int_movcc (operands)) FAIL; DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_IF_THEN_ELSE (DImode, operand1, operand2, operand3))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:17324 */ rtx gen_movsicc (rtx operand0, rtx operand1, rtx operand2, rtx operand3) { rtx _val = 0; start_sequence (); { rtx operands[4]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; operands[3] = operand3; if (!ix86_expand_int_movcc (operands)) FAIL; DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_IF_THEN_ELSE (SImode, operand1, operand2, operand3))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:17367 */ rtx gen_movhicc (rtx operand0, rtx operand1, rtx operand2, rtx operand3) { rtx _val = 0; start_sequence (); { rtx operands[4]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; operands[3] = operand3; if (!ix86_expand_int_movcc (operands)) FAIL; DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_IF_THEN_ELSE (HImode, operand1, operand2, operand3))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:17389 */ rtx gen_movqicc (rtx operand0, rtx operand1, rtx operand2, rtx operand3) { rtx _val = 0; start_sequence (); { rtx operands[4]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; operands[3] = operand3; if (!ix86_expand_int_movcc (operands)) FAIL; DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_IF_THEN_ELSE (QImode, operand1, operand2, operand3))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:17397 */ extern rtx gen_split_1480 (rtx, rtx *); rtx gen_split_1480 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx operand4; rtx _val = 0; start_sequence (); operands[0] = gen_lowpart (SImode, operands[0]); operands[2] = gen_lowpart (SImode, operands[2]); operands[3] = gen_lowpart (SImode, operands[3]); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_IF_THEN_ELSE (SImode, gen_rtx_fmt_ee (GET_CODE (operand1), GET_MODE (operand1), operand4, const0_rtx), operand2, operand3))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:17416 */ rtx gen_movsfcc (rtx operand0, rtx operand1, rtx operand2, rtx operand3) { rtx _val = 0; start_sequence (); { rtx operands[4]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; operands[3] = operand3; if (! ix86_expand_fp_movcc (operands)) FAIL; DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_IF_THEN_ELSE (SFmode, operand1, operand2, operand3))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:17440 */ rtx gen_movdfcc (rtx operand0, rtx operand1, rtx operand2, rtx operand3) { rtx _val = 0; start_sequence (); { rtx operands[4]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; operands[3] = operand3; if (! ix86_expand_fp_movcc (operands)) FAIL; DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_IF_THEN_ELSE (DFmode, operand1, operand2, operand3))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:17480 */ extern rtx gen_split_1483 (rtx, rtx *); rtx gen_split_1483 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx operand6; rtx operand7; rtx operand8; rtx _val = 0; start_sequence (); split_di (operands+2, 1, operands+5, operands+6); split_di (operands+3, 1, operands+7, operands+8); split_di (operands, 1, operands+2, operands+3); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; operand6 = operands[6]; operand7 = operands[7]; operand8 = operands[8]; emit_insn (gen_rtx_SET (VOIDmode, operand2, gen_rtx_IF_THEN_ELSE (SImode, gen_rtx_fmt_ee (GET_CODE (operand1), GET_MODE (operand1), operand4, const0_rtx), operand5, operand7))); emit_insn (gen_rtx_SET (VOIDmode, operand3, gen_rtx_IF_THEN_ELSE (SImode, gen_rtx_fmt_ee (GET_CODE (operand1), GET_MODE (operand1), copy_rtx (operand4), const0_rtx), operand6, operand8))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:17499 */ rtx gen_movxfcc (rtx operand0, rtx operand1, rtx operand2, rtx operand3) { rtx _val = 0; start_sequence (); { rtx operands[4]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; operands[3] = operand3; if (! ix86_expand_fp_movcc (operands)) FAIL; DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_IF_THEN_ELSE (XFmode, operand1, operand2, operand3))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:17520 */ rtx gen_minsf3 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_IF_THEN_ELSE (SFmode, gen_rtx_LT (VOIDmode, operand1, operand2), operand1, operand2)), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:17552 */ extern rtx gen_split_1486 (rtx, rtx *); rtx gen_split_1486 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_IF_THEN_ELSE (SFmode, gen_rtx_LT (VOIDmode, operand1, operand2), copy_rtx (operand1), copy_rtx (operand2)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:17571 */ rtx gen_addqicc (rtx operand0, rtx operand1, rtx operand2, rtx operand3) { rtx _val = 0; start_sequence (); { rtx operands[4]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; operands[3] = operand3; if (!ix86_expand_int_addcc (operands)) FAIL; DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; } emit (operand0); emit (operand1); emit (operand2); emit (operand3); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:17579 */ rtx gen_addhicc (rtx operand0, rtx operand1, rtx operand2, rtx operand3) { rtx _val = 0; start_sequence (); { rtx operands[4]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; operands[3] = operand3; if (!ix86_expand_int_addcc (operands)) FAIL; DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; } emit (operand0); emit (operand1); emit (operand2); emit (operand3); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:17587 */ rtx gen_addsicc (rtx operand0, rtx operand1, rtx operand2, rtx operand3) { rtx _val = 0; start_sequence (); { rtx operands[4]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; operands[3] = operand3; if (!ix86_expand_int_addcc (operands)) FAIL; DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; } emit (operand0); emit (operand1); emit (operand2); emit (operand3); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:17595 */ rtx gen_adddicc (rtx operand0, rtx operand1, rtx operand2, rtx operand3) { rtx _val = 0; start_sequence (); { rtx operands[4]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; operands[3] = operand3; if (!ix86_expand_int_addcc (operands)) FAIL; DONE; operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; } emit (operand0); emit (operand1); emit (operand2); emit (operand3); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:17605 */ extern rtx gen_split_1491 (rtx, rtx *); rtx gen_split_1491 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 17), gen_rtx_COMPARE (CCFPmode, operand2, operand1))); emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_IF_THEN_ELSE (SFmode, gen_rtx_GE (VOIDmode, gen_rtx_REG (CCFPmode, 17), const0_rtx), copy_rtx (operand1), copy_rtx (operand2)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:17636 */ rtx gen_mindf3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; # operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_IF_THEN_ELSE (DFmode, gen_rtx_LT (VOIDmode, operand1, operand2), operand1, operand2)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:17668 */ extern rtx gen_split_1493 (rtx, rtx *); rtx gen_split_1493 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_IF_THEN_ELSE (DFmode, gen_rtx_LT (VOIDmode, operand1, operand2), copy_rtx (operand1), copy_rtx (operand2)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:17687 */ extern rtx gen_split_1494 (rtx, rtx *); rtx gen_split_1494 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 17), gen_rtx_COMPARE (CCFPmode, operand2, operand1))); emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_IF_THEN_ELSE (DFmode, gen_rtx_GE (VOIDmode, gen_rtx_REG (CCFPmode, 17), const0_rtx), copy_rtx (operand1), copy_rtx (operand2)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:17718 */ rtx gen_maxsf3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; # operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_IF_THEN_ELSE (SFmode, gen_rtx_GT (VOIDmode, operand1, operand2), operand1, operand2)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:17750 */ extern rtx gen_split_1496 (rtx, rtx *); rtx gen_split_1496 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_IF_THEN_ELSE (SFmode, gen_rtx_GT (VOIDmode, operand1, operand2), copy_rtx (operand1), copy_rtx (operand2)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:17768 */ extern rtx gen_split_1497 (rtx, rtx *); rtx gen_split_1497 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 17), gen_rtx_COMPARE (CCFPmode, operand1, operand2))); emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_IF_THEN_ELSE (SFmode, gen_rtx_GT (VOIDmode, gen_rtx_REG (CCFPmode, 17), const0_rtx), copy_rtx (operand1), copy_rtx (operand2)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:17799 */ rtx gen_maxdf3 (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; # operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_IF_THEN_ELSE (DFmode, gen_rtx_GT (VOIDmode, operand1, operand2), operand1, operand2)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:17831 */ extern rtx gen_split_1499 (rtx, rtx *); rtx gen_split_1499 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_IF_THEN_ELSE (DFmode, gen_rtx_GT (VOIDmode, operand1, operand2), copy_rtx (operand1), copy_rtx (operand2)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:17849 */ extern rtx gen_split_1500 (rtx, rtx *); rtx gen_split_1500 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 17), gen_rtx_COMPARE (CCFPmode, operand1, operand2))); emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_IF_THEN_ELSE (DFmode, gen_rtx_GT (VOIDmode, gen_rtx_REG (CCFPmode, 17), const0_rtx), copy_rtx (operand1), copy_rtx (operand2)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18075 */ extern rtx gen_split_1501 (rtx, rtx *); rtx gen_split_1501 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); { ix86_compare_op0 = operands[5]; ix86_compare_op1 = operands[4]; operands[1] = gen_rtx_fmt_ee (swap_condition (GET_CODE (operands[1])), VOIDmode, operands[5], operands[4]); ix86_expand_fp_movcc (operands); DONE; } emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18101 */ extern rtx gen_split_1502 (rtx, rtx *); rtx gen_split_1502 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx operand6; rtx operand7; rtx operand8; rtx _val = 0; start_sequence (); { /* If op2 == op3, op3 would be clobbered before it is used. */ if (operands_match_p (operands[2], operands[3])) { emit_move_insn (operands[0], operands[2]); DONE; } PUT_MODE (operands[1], GET_MODE (operands[0])); if (operands_match_p (operands[0], operands[4])) operands[6] = operands[4], operands[7] = operands[2]; else operands[6] = operands[2], operands[7] = operands[4]; operands[0] = simplify_gen_subreg (V4SFmode, operands[0], SFmode, 0); operands[2] = simplify_gen_subreg (V4SFmode, operands[2], SFmode, 0); operands[3] = simplify_gen_subreg (V4SFmode, operands[3], SFmode, 0); operands[8] = simplify_gen_subreg (V4SFmode, operands[4], SFmode, 0); operands[6] = simplify_gen_subreg (V4SFmode, operands[6], SFmode, 0); operands[7] = simplify_gen_subreg (V4SFmode, operands[7], SFmode, 0); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; operand6 = operands[6]; operand7 = operands[7]; operand8 = operands[8]; emit_insn (gen_rtx_SET (VOIDmode, operand4, gen_rtx_fmt_ee (GET_CODE (operand1), GET_MODE (operand1), copy_rtx (operand4), operand5))); emit_insn (gen_rtx_SET (VOIDmode, operand2, gen_rtx_AND (V4SFmode, copy_rtx (operand2), operand8))); emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operand8), gen_rtx_AND (V4SFmode, gen_rtx_NOT (V4SFmode, copy_rtx (operand8)), operand3))); emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_IOR (V4SFmode, operand6, operand7))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18139 */ extern rtx gen_split_1503 (rtx, rtx *); rtx gen_split_1503 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx operand6; rtx operand7; rtx operand8; rtx _val = 0; start_sequence (); { if (GET_MODE (operands[2]) == DFmode && TARGET_SSE_PARTIAL_REGS && !optimize_size) { rtx op = simplify_gen_subreg (V2DFmode, operands[2], DFmode, 0); emit_insn (gen_sse2_unpcklpd (op, op, op)); op = simplify_gen_subreg (V2DFmode, operands[3], DFmode, 0); emit_insn (gen_sse2_unpcklpd (op, op, op)); } /* If op2 == op3, op3 would be clobbered before it is used. */ if (operands_match_p (operands[2], operands[3])) { emit_move_insn (operands[0], operands[2]); DONE; } PUT_MODE (operands[1], GET_MODE (operands[0])); if (operands_match_p (operands[0], operands[4])) operands[6] = operands[4], operands[7] = operands[2]; else operands[6] = operands[2], operands[7] = operands[4]; operands[0] = simplify_gen_subreg (V2DFmode, operands[0], DFmode, 0); operands[2] = simplify_gen_subreg (V2DFmode, operands[2], DFmode, 0); operands[3] = simplify_gen_subreg (V2DFmode, operands[3], DFmode, 0); operands[8] = simplify_gen_subreg (V2DFmode, operands[4], DFmode, 0); operands[6] = simplify_gen_subreg (V2DFmode, operands[6], DFmode, 0); operands[7] = simplify_gen_subreg (V2DFmode, operands[7], DFmode, 0); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; operand6 = operands[6]; operand7 = operands[7]; operand8 = operands[8]; emit_insn (gen_rtx_SET (VOIDmode, operand4, gen_rtx_fmt_ee (GET_CODE (operand1), GET_MODE (operand1), copy_rtx (operand4), operand5))); emit_insn (gen_rtx_SET (VOIDmode, operand2, gen_rtx_AND (V2DFmode, copy_rtx (operand2), operand8))); emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operand8), gen_rtx_AND (V2DFmode, gen_rtx_NOT (V2DFmode, copy_rtx (operand8)), operand3))); emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_IOR (V2DFmode, operand6, operand7))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18269 */ extern rtx gen_split_1504 (rtx, rtx *); rtx gen_split_1504 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx operand6; rtx operand7; rtx operand8; rtx _val = 0; start_sequence (); { PUT_MODE (operands[1], GET_MODE (operands[0])); if (!sse_comparison_operator (operands[1], VOIDmode) || !rtx_equal_p (operands[0], operands[4])) { rtx tmp = operands[5]; operands[5] = operands[4]; operands[4] = tmp; PUT_CODE (operands[1], swap_condition (GET_CODE (operands[1]))); } if (!rtx_equal_p (operands[0], operands[4])) abort (); operands[8] = simplify_gen_subreg (V4SFmode, operands[0], SFmode, 0); if (const0_operand (operands[2], GET_MODE (operands[2]))) { operands[7] = operands[3]; operands[6] = gen_rtx_NOT (V4SFmode, operands[8]); } else { operands[7] = operands[2]; operands[6] = operands[8]; } operands[7] = simplify_gen_subreg (V4SFmode, operands[7], SFmode, 0); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; operand6 = operands[6]; operand7 = operands[7]; operand8 = operands[8]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_fmt_ee (GET_CODE (operand1), GET_MODE (operand1), copy_rtx (operand0), operand5))); emit_insn (gen_rtx_SET (VOIDmode, operand8, gen_rtx_AND (V4SFmode, operand6, operand7))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18307 */ extern rtx gen_split_1505 (rtx, rtx *); rtx gen_split_1505 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx operand4; rtx operand5; rtx operand6; rtx operand7; rtx operand8; rtx _val = 0; start_sequence (); { if (TARGET_SSE_PARTIAL_REGS && !optimize_size && GET_MODE (operands[2]) == DFmode) { if (REG_P (operands[2])) { rtx op = simplify_gen_subreg (V2DFmode, operands[2], DFmode, 0); emit_insn (gen_sse2_unpcklpd (op, op, op)); } if (REG_P (operands[3])) { rtx op = simplify_gen_subreg (V2DFmode, operands[3], DFmode, 0); emit_insn (gen_sse2_unpcklpd (op, op, op)); } } PUT_MODE (operands[1], GET_MODE (operands[0])); if (!sse_comparison_operator (operands[1], VOIDmode) || !rtx_equal_p (operands[0], operands[4])) { rtx tmp = operands[5]; operands[5] = operands[4]; operands[4] = tmp; PUT_CODE (operands[1], swap_condition (GET_CODE (operands[1]))); } if (!rtx_equal_p (operands[0], operands[4])) abort (); operands[8] = simplify_gen_subreg (V2DFmode, operands[0], DFmode, 0); if (const0_operand (operands[2], GET_MODE (operands[2]))) { operands[7] = operands[3]; operands[6] = gen_rtx_NOT (V2DFmode, operands[8]); } else { operands[7] = operands[2]; operands[6] = operands[8]; } operands[7] = simplify_gen_subreg (V2DFmode, operands[7], DFmode, 0); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; operand4 = operands[4]; operand5 = operands[5]; operand6 = operands[6]; operand7 = operands[7]; operand8 = operands[8]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_fmt_ee (GET_CODE (operand1), GET_MODE (operand1), copy_rtx (operand0), operand5))); emit_insn (gen_rtx_SET (VOIDmode, operand8, gen_rtx_AND (V2DFmode, operand6, operand7))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18359 */ rtx gen_allocate_stack_worker (rtx operand0) { rtx _val = 0; start_sequence (); { rtx operands[1]; operands[0] = operand0; { if (reload_completed) { if (TARGET_64BIT) emit_insn (gen_allocate_stack_worker_rex64_postreload (operands[0])); else emit_insn (gen_allocate_stack_worker_postreload (operands[0])); } else { if (TARGET_64BIT) emit_insn (gen_allocate_stack_worker_rex64 (operands[0])); else emit_insn (gen_allocate_stack_worker_1 (operands[0])); } DONE; } operand0 = operands[0]; } emit (operand0); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18391 */ rtx gen_allocate_stack_worker_postreload (rtx operand0) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, gen_rtx_UNSPEC_VOLATILE (SImode, gen_rtvec (1, operand0), 10), gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 7), gen_rtx_MINUS (SImode, gen_rtx_REG (SImode, 7), operand0)), gen_rtx_CLOBBER (VOIDmode, operand0), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:18411 */ rtx gen_allocate_stack_worker_rex64_postreload (rtx operand0) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, gen_rtx_UNSPEC_VOLATILE (DImode, gen_rtvec (1, operand0), 10), gen_rtx_SET (VOIDmode, gen_rtx_REG (DImode, 7), gen_rtx_MINUS (DImode, gen_rtx_REG (DImode, 7), operand0)), gen_rtx_CLOBBER (VOIDmode, operand0), gen_hard_reg_clobber (CCmode, 17))); } /* ../../gcc/gcc/config/i386/i386.md:18420 */ rtx gen_allocate_stack (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { #ifdef CHECK_STACK_LIMIT if (GET_CODE (operands[1]) == CONST_INT && INTVAL (operands[1]) < CHECK_STACK_LIMIT) emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, operands[1])); else #endif emit_insn (gen_allocate_stack_worker (copy_to_mode_reg (SImode, operands[1]))); emit_move_insn (operands[0], virtual_stack_dynamic_rtx); DONE; } operand0 = operands[0]; operand1 = operands[1]; } emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MINUS (SImode, gen_rtx_REG (SImode, 7), operand1)), gen_hard_reg_clobber (CCmode, 17)))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 7), gen_rtx_MINUS (SImode, gen_rtx_REG (SImode, 7), operand1)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18444 */ rtx gen_builtin_setjmp_receiver (rtx operand0) { rtx _val = 0; start_sequence (); { rtx operands[1]; operands[0] = operand0; { emit_insn (gen_set_got (pic_offset_table_rtx)); DONE; } operand0 = operands[0]; } emit_insn (gen_rtx_LABEL_REF (VOIDmode, operand0)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18454 */ extern rtx gen_split_1511 (rtx, rtx *); rtx gen_split_1511 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx _val = 0; start_sequence (); operands[0] = gen_lowpart (SImode, operands[0]); operands[1] = gen_lowpart (SImode, operands[1]); if (GET_CODE (operands[3]) != ASHIFT) operands[2] = gen_lowpart (SImode, operands[2]); PUT_MODE (operands[3], SImode); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_fmt_ee (GET_CODE (operand3), GET_MODE (operand3), operand1, operand2)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18480 */ extern rtx gen_split_1512 (rtx, rtx *); rtx gen_split_1512 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); operands[2] = gen_int_mode (INTVAL (operands[2]) & GET_MODE_MASK (GET_MODE (operands[0])), SImode); operands[0] = gen_lowpart (SImode, operands[0]); operands[1] = gen_lowpart (SImode, operands[1]); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_REG (CCNOmode, 17), gen_rtx_COMPARE (CCNOmode, gen_rtx_AND (SImode, operand1, operand2), const0_rtx)), gen_rtx_SET (VOIDmode, operand0, gen_rtx_AND (SImode, copy_rtx (operand1), copy_rtx (operand2)))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18509 */ extern rtx gen_split_1513 (rtx, rtx *); rtx gen_split_1513 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operands[1] = gen_int_mode (INTVAL (operands[1]) & GET_MODE_MASK (GET_MODE (operands[0])), SImode); operands[0] = gen_lowpart (SImode, operands[0]); operand0 = operands[0]; operand1 = operands[1]; emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (CCNOmode, 17), gen_rtx_COMPARE (CCNOmode, gen_rtx_AND (SImode, operand0, operand1), const0_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18528 */ extern rtx gen_split_1514 (rtx, rtx *); rtx gen_split_1514 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operands[0] = gen_lowpart (SImode, operands[0]); operands[1] = gen_lowpart (SImode, operands[1]); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_NEG (SImode, operand1)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18542 */ extern rtx gen_split_1515 (rtx, rtx *); rtx gen_split_1515 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; start_sequence (); operands[0] = gen_lowpart (SImode, operands[0]); operands[1] = gen_lowpart (SImode, operands[1]); operand0 = operands[0]; operand1 = operands[1]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_NOT (SImode, operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18554 */ extern rtx gen_split_1516 (rtx, rtx *); rtx gen_split_1516 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx _val = 0; start_sequence (); operands[0] = gen_lowpart (SImode, operands[0]); operands[2] = gen_lowpart (SImode, operands[2]); operands[3] = gen_lowpart (SImode, operands[3]); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_IF_THEN_ELSE (SImode, operand1, operand2, operand3))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18575 */ extern rtx gen_peephole2_1517 (rtx, rtx *); rtx gen_peephole2_1517 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[2] = peep2_find_free_register (1, 1, "r", SImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand2, operand1)); emit_insn (gen_rtx_SET (VOIDmode, operand0, copy_rtx (operand2))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18584 */ extern rtx gen_peephole2_1518 (rtx, rtx *); rtx gen_peephole2_1518 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[2] = peep2_find_free_register (1, 1, "r", DImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand2, operand1)); emit_insn (gen_rtx_SET (VOIDmode, operand0, copy_rtx (operand2))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18595 */ extern rtx gen_peephole2_1519 (rtx, rtx *); rtx gen_peephole2_1519 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[2] = peep2_find_free_register (1, 1, "r", SFmode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand2, operand1)); emit_insn (gen_rtx_SET (VOIDmode, operand0, copy_rtx (operand2))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18604 */ extern rtx gen_peephole2_1520 (rtx, rtx *); rtx gen_peephole2_1520 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[2] = peep2_find_free_register (1, 1, "r", HImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand2, operand1)); emit_insn (gen_rtx_SET (VOIDmode, operand0, copy_rtx (operand2))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18613 */ extern rtx gen_peephole2_1521 (rtx, rtx *); rtx gen_peephole2_1521 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[2] = peep2_find_free_register (1, 1, "q", QImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand2, operand1)); emit_insn (gen_rtx_SET (VOIDmode, operand0, copy_rtx (operand2))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18624 */ extern rtx gen_peephole2_1522 (rtx, rtx *); rtx gen_peephole2_1522 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[1] = peep2_find_free_register (0, 0, "r", SImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand1, const0_rtx), gen_hard_reg_clobber (CCmode, 17)))); emit_insn (gen_rtx_SET (VOIDmode, operand0, copy_rtx (operand1))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18638 */ extern rtx gen_peephole2_1523 (rtx, rtx *); rtx gen_peephole2_1523 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[1] = peep2_find_free_register (0, 0, "r", HImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operands[2] = gen_lowpart (SImode, operands[1]); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand2, const0_rtx), gen_hard_reg_clobber (CCmode, 17)))); emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18652 */ extern rtx gen_peephole2_1524 (rtx, rtx *); rtx gen_peephole2_1524 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[1] = peep2_find_free_register (0, 0, "q", QImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operands[2] = gen_lowpart (SImode, operands[1]); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand2, const0_rtx), gen_hard_reg_clobber (CCmode, 17)))); emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18666 */ extern rtx gen_peephole2_1525 (rtx, rtx *); rtx gen_peephole2_1525 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[2] = peep2_find_free_register (0, 0, "r", SImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand2, operand1)); emit_insn (gen_rtx_SET (VOIDmode, operand0, copy_rtx (operand2))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18677 */ extern rtx gen_peephole2_1526 (rtx, rtx *); rtx gen_peephole2_1526 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[2] = peep2_find_free_register (0, 0, "r", HImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand2, operand1)); emit_insn (gen_rtx_SET (VOIDmode, operand0, copy_rtx (operand2))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18687 */ extern rtx gen_peephole2_1527 (rtx, rtx *); rtx gen_peephole2_1527 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[2] = peep2_find_free_register (0, 0, "q", QImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand2, operand1)); emit_insn (gen_rtx_SET (VOIDmode, operand0, copy_rtx (operand2))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18698 */ extern rtx gen_peephole2_1528 (rtx, rtx *); rtx gen_peephole2_1528 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[3] = peep2_find_free_register (1, 1, "r", SImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; emit_insn (gen_rtx_SET (VOIDmode, operand3, operand0)); emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (CCNOmode, 17), gen_rtx_COMPARE (CCNOmode, copy_rtx (operand3), const0_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18719 */ extern rtx gen_peephole2_1529 (rtx, rtx *); rtx gen_peephole2_1529 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_XOR (SImode, operand1, constm1_rtx)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18733 */ extern rtx gen_peephole2_1530 (rtx, rtx *); rtx gen_peephole2_1530 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_XOR (HImode, operand1, constm1_rtx)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18747 */ extern rtx gen_peephole2_1531 (rtx, rtx *); rtx gen_peephole2_1531 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_XOR (QImode, operand1, constm1_rtx)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18768 */ extern rtx gen_peephole2_1532 (rtx, rtx *); rtx gen_peephole2_1532 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_REG (CCNOmode, 17), gen_rtx_COMPARE (CCNOmode, gen_rtx_AND (SImode, operand0, operand1), const0_rtx)), gen_rtx_SET (VOIDmode, copy_rtx (operand0), gen_rtx_AND (SImode, copy_rtx (operand0), copy_rtx (operand1)))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18790 */ extern rtx gen_peephole2_1533 (rtx, rtx *); rtx gen_peephole2_1533 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_REG (CCNOmode, 17), gen_rtx_COMPARE (CCNOmode, gen_rtx_AND (QImode, operand0, operand1), const0_rtx)), gen_rtx_SET (VOIDmode, copy_rtx (operand0), gen_rtx_AND (QImode, copy_rtx (operand0), copy_rtx (operand1)))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18808 */ extern rtx gen_peephole2_1534 (rtx, rtx *); rtx gen_peephole2_1534 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_REG (CCNOmode, 17), gen_rtx_COMPARE (CCNOmode, gen_rtx_AND (SImode, gen_rtx_ZERO_EXTRACT (SImode, operand0, const_int_rtx[MAX_SAVED_CONST_INT + (8)], const_int_rtx[MAX_SAVED_CONST_INT + (8)]), operand1), const0_rtx)), gen_rtx_SET (VOIDmode, gen_rtx_ZERO_EXTRACT (SImode, copy_rtx (operand0), const_int_rtx[MAX_SAVED_CONST_INT + (8)], const_int_rtx[MAX_SAVED_CONST_INT + (8)]), gen_rtx_AND (SImode, gen_rtx_ZERO_EXTRACT (SImode, copy_rtx (operand0), const_int_rtx[MAX_SAVED_CONST_INT + (8)], const_int_rtx[MAX_SAVED_CONST_INT + (8)]), copy_rtx (operand1)))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18843 */ extern rtx gen_peephole2_1535 (rtx, rtx *); rtx gen_peephole2_1535 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[2] = peep2_find_free_register (0, 0, "r", SImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; emit_insn (gen_rtx_SET (VOIDmode, operand2, operand1)); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_fmt_ee (GET_CODE (operand3), GET_MODE (operand3), copy_rtx (operand0), copy_rtx (operand2))), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18857 */ extern rtx gen_peephole2_1536 (rtx, rtx *); rtx gen_peephole2_1536 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[2] = peep2_find_free_register (0, 0, "r", SImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; emit_insn (gen_rtx_SET (VOIDmode, operand2, operand1)); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_fmt_ee (GET_CODE (operand3), GET_MODE (operand3), copy_rtx (operand2), copy_rtx (operand0))), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18877 */ extern rtx gen_peephole2_1537 (rtx, rtx *); rtx gen_peephole2_1537 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[2] = peep2_find_free_register (0, 0, "r", SImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; emit_insn (gen_rtx_SET (VOIDmode, operand2, operand0)); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, copy_rtx (operand2), gen_rtx_fmt_ee (GET_CODE (operand3), GET_MODE (operand3), copy_rtx (operand2), operand1)), gen_hard_reg_clobber (CCmode, 17)))); emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operand0), copy_rtx (operand2))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18892 */ extern rtx gen_peephole2_1538 (rtx, rtx *); rtx gen_peephole2_1538 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[2] = peep2_find_free_register (0, 0, "r", SImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; emit_insn (gen_rtx_SET (VOIDmode, operand2, operand0)); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, copy_rtx (operand2), gen_rtx_fmt_ee (GET_CODE (operand3), GET_MODE (operand3), operand1, copy_rtx (operand2))), gen_hard_reg_clobber (CCmode, 17)))); emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operand0), copy_rtx (operand2))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18908 */ extern rtx gen_peephole2_1539 (rtx, rtx *); rtx gen_peephole2_1539 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); start_sequence (); operands[0] = gen_lowpart (GET_MODE (operands[0]) == DImode ? DImode : SImode, operands[0]); operand0 = operands[0]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, const0_rtx), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18922 */ extern rtx gen_peephole2_1540 (rtx, rtx *); rtx gen_peephole2_1540 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); start_sequence (); operand0 = operands[0]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, operand0), const0_rtx), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18933 */ extern rtx gen_peephole2_1541 (rtx, rtx *); rtx gen_peephole2_1541 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); start_sequence (); operands[0] = gen_lowpart (GET_MODE (operands[0]) == DImode ? DImode : SImode, operands[0]); operand0 = operands[0]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, constm1_rtx), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18948 */ extern rtx gen_peephole2_1542 (rtx, rtx *); rtx gen_peephole2_1542 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (SImode, copy_rtx (operand0), operand1)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18957 */ extern rtx gen_peephole2_1543 (rtx, rtx *); rtx gen_peephole2_1543 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); start_sequence (); operands[2] = gen_lowpart (SImode, operands[2]); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (SImode, copy_rtx (operand0), operand2)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18966 */ extern rtx gen_peephole2_1544 (rtx, rtx *); rtx gen_peephole2_1544 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_PLUS (DImode, copy_rtx (operand0), operand1)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18975 */ extern rtx gen_peephole2_1545 (rtx, rtx *); rtx gen_peephole2_1545 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); start_sequence (); operands[2] = GEN_INT (exact_log2 (INTVAL (operands[1]))); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFT (SImode, copy_rtx (operand0), operand2)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18985 */ extern rtx gen_peephole2_1546 (rtx, rtx *); rtx gen_peephole2_1546 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); start_sequence (); operands[2] = GEN_INT (exact_log2 (INTVAL (operands[1]))); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFT (DImode, copy_rtx (operand0), operand2)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:18995 */ extern rtx gen_peephole2_1547 (rtx, rtx *); rtx gen_peephole2_1547 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); start_sequence (); operands[2] = GEN_INT (exact_log2 (INTVAL (operands[2]))); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_ASHIFT (SImode, copy_rtx (operand0), operand2)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19025 */ extern rtx gen_peephole2_1548 (rtx, rtx *); rtx gen_peephole2_1548 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[0] = peep2_find_free_register (0, 0, "r", SImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; emit_insn (gen_rtx_CLOBBER (VOIDmode, operand0)); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_MEM (SImode, gen_rtx_PRE_DEC (SImode, gen_rtx_REG (SImode, 7))), copy_rtx (operand0)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode)))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19035 */ extern rtx gen_peephole2_1549 (rtx, rtx *); rtx gen_peephole2_1549 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[0] = peep2_find_free_register (0, 0, "r", SImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; emit_insn (gen_rtx_CLOBBER (VOIDmode, operand0)); emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (SImode, gen_rtx_PRE_DEC (SImode, gen_rtx_REG (SImode, 7))), copy_rtx (operand0))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_MEM (SImode, gen_rtx_PRE_DEC (SImode, gen_rtx_REG (SImode, 7))), copy_rtx (operand0)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode)))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19047 */ extern rtx gen_peephole2_1550 (rtx, rtx *); rtx gen_peephole2_1550 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[0] = peep2_find_free_register (0, 0, "r", SImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; emit_insn (gen_rtx_CLOBBER (VOIDmode, operand0)); emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (SImode, gen_rtx_PRE_DEC (SImode, gen_rtx_REG (SImode, 7))), copy_rtx (operand0))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19055 */ extern rtx gen_peephole2_1551 (rtx, rtx *); rtx gen_peephole2_1551 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[0] = peep2_find_free_register (0, 0, "r", SImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; emit_insn (gen_rtx_CLOBBER (VOIDmode, operand0)); emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (SImode, gen_rtx_PRE_DEC (SImode, gen_rtx_REG (SImode, 7))), copy_rtx (operand0))); emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (SImode, gen_rtx_PRE_DEC (SImode, gen_rtx_REG (SImode, 7))), copy_rtx (operand0))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19065 */ extern rtx gen_peephole2_1552 (rtx, rtx *); rtx gen_peephole2_1552 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[0] = peep2_find_free_register (0, 0, "r", SImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MEM (SImode, gen_rtx_REG (SImode, 7))), gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 7), gen_rtx_PLUS (SImode, gen_rtx_REG (SImode, 7), const_int_rtx[MAX_SAVED_CONST_INT + (4)])), gen_rtx_CLOBBER (VOIDmode, gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode)))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19078 */ extern rtx gen_peephole2_1553 (rtx, rtx *); rtx gen_peephole2_1553 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[0] = peep2_find_free_register (0, 0, "r", SImode, &_regs_allocated)) == NULL_RTX) return NULL; if ((operands[1] = peep2_find_free_register (0, 0, "r", SImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MEM (SImode, gen_rtx_REG (SImode, 7))), gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 7), gen_rtx_PLUS (SImode, gen_rtx_REG (SImode, 7), const_int_rtx[MAX_SAVED_CONST_INT + (4)])), gen_rtx_CLOBBER (VOIDmode, gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode)))))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand1, gen_rtx_MEM (SImode, gen_rtx_REG (SImode, 7))), gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 7), gen_rtx_PLUS (SImode, gen_rtx_REG (SImode, 7), const_int_rtx[MAX_SAVED_CONST_INT + (4)]))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19092 */ extern rtx gen_peephole2_1554 (rtx, rtx *); rtx gen_peephole2_1554 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[0] = peep2_find_free_register (0, 0, "r", SImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MEM (SImode, gen_rtx_REG (SImode, 7))), gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 7), gen_rtx_PLUS (SImode, gen_rtx_REG (SImode, 7), const_int_rtx[MAX_SAVED_CONST_INT + (4)])), gen_rtx_CLOBBER (VOIDmode, gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode)))))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, copy_rtx (operand0), gen_rtx_MEM (SImode, gen_rtx_REG (SImode, 7))), gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 7), gen_rtx_PLUS (SImode, gen_rtx_REG (SImode, 7), const_int_rtx[MAX_SAVED_CONST_INT + (4)]))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19106 */ extern rtx gen_peephole2_1555 (rtx, rtx *); rtx gen_peephole2_1555 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[0] = peep2_find_free_register (0, 0, "r", SImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MEM (SImode, gen_rtx_REG (SImode, 7))), gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 7), gen_rtx_PLUS (SImode, gen_rtx_REG (SImode, 7), const_int_rtx[MAX_SAVED_CONST_INT + (4)]))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19117 */ extern rtx gen_peephole2_1556 (rtx, rtx *); rtx gen_peephole2_1556 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[0] = peep2_find_free_register (0, 0, "r", SImode, &_regs_allocated)) == NULL_RTX) return NULL; if ((operands[1] = peep2_find_free_register (0, 0, "r", SImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MEM (SImode, gen_rtx_REG (SImode, 7))), gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 7), gen_rtx_PLUS (SImode, gen_rtx_REG (SImode, 7), const_int_rtx[MAX_SAVED_CONST_INT + (4)]))))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand1, gen_rtx_MEM (SImode, gen_rtx_REG (SImode, 7))), gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 7), gen_rtx_PLUS (SImode, gen_rtx_REG (SImode, 7), const_int_rtx[MAX_SAVED_CONST_INT + (4)]))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19129 */ extern rtx gen_peephole2_1557 (rtx, rtx *); rtx gen_peephole2_1557 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[0] = peep2_find_free_register (0, 0, "r", SImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MEM (SImode, gen_rtx_REG (SImode, 7))), gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 7), gen_rtx_PLUS (SImode, gen_rtx_REG (SImode, 7), const_int_rtx[MAX_SAVED_CONST_INT + (4)]))))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, copy_rtx (operand0), gen_rtx_MEM (SImode, gen_rtx_REG (SImode, 7))), gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 7), gen_rtx_PLUS (SImode, gen_rtx_REG (SImode, 7), const_int_rtx[MAX_SAVED_CONST_INT + (4)]))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19142 */ extern rtx gen_peephole2_1558 (rtx, rtx *); rtx gen_peephole2_1558 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_REG (CCGCmode, 17), gen_rtx_COMPARE (CCGCmode, operand0, operand1)), gen_rtx_CLOBBER (VOIDmode, copy_rtx (operand0))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19154 */ extern rtx gen_peephole2_1559 (rtx, rtx *); rtx gen_peephole2_1559 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_REG (CCGCmode, 17), gen_rtx_COMPARE (CCGCmode, operand0, operand1)), gen_rtx_CLOBBER (VOIDmode, copy_rtx (operand0))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19166 */ extern rtx gen_peephole2_1560 (rtx, rtx *); rtx gen_peephole2_1560 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_REG (CCGCmode, 17), gen_rtx_COMPARE (CCGCmode, operand0, operand1)), gen_rtx_CLOBBER (VOIDmode, copy_rtx (operand0))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19179 */ extern rtx gen_peephole2_1561 (rtx, rtx *); rtx gen_peephole2_1561 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); start_sequence (); operand0 = operands[0]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_REG (CCGCmode, 17), gen_rtx_COMPARE (CCGCmode, operand0, GEN_INT (128L))), gen_rtx_CLOBBER (VOIDmode, copy_rtx (operand0))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19191 */ extern rtx gen_peephole2_1562 (rtx, rtx *); rtx gen_peephole2_1562 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); start_sequence (); operand0 = operands[0]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_REG (CCGCmode, 17), gen_rtx_COMPARE (CCGCmode, operand0, GEN_INT (128L))), gen_rtx_CLOBBER (VOIDmode, copy_rtx (operand0))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19203 */ extern rtx gen_peephole2_1563 (rtx, rtx *); rtx gen_peephole2_1563 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[0] = peep2_find_free_register (0, 0, "r", DImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; emit_insn (gen_rtx_CLOBBER (VOIDmode, operand0)); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_MEM (DImode, gen_rtx_PRE_DEC (DImode, gen_rtx_REG (DImode, 7))), copy_rtx (operand0)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode)))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19213 */ extern rtx gen_peephole2_1564 (rtx, rtx *); rtx gen_peephole2_1564 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[0] = peep2_find_free_register (0, 0, "r", DImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; emit_insn (gen_rtx_CLOBBER (VOIDmode, operand0)); emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (DImode, gen_rtx_PRE_DEC (DImode, gen_rtx_REG (DImode, 7))), copy_rtx (operand0))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, gen_rtx_MEM (DImode, gen_rtx_PRE_DEC (DImode, gen_rtx_REG (DImode, 7))), copy_rtx (operand0)), gen_rtx_CLOBBER (VOIDmode, gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode)))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19225 */ extern rtx gen_peephole2_1565 (rtx, rtx *); rtx gen_peephole2_1565 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[0] = peep2_find_free_register (0, 0, "r", DImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; emit_insn (gen_rtx_CLOBBER (VOIDmode, operand0)); emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (DImode, gen_rtx_PRE_DEC (DImode, gen_rtx_REG (DImode, 7))), copy_rtx (operand0))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19233 */ extern rtx gen_peephole2_1566 (rtx, rtx *); rtx gen_peephole2_1566 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[0] = peep2_find_free_register (0, 0, "r", DImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; emit_insn (gen_rtx_CLOBBER (VOIDmode, operand0)); emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (DImode, gen_rtx_PRE_DEC (DImode, gen_rtx_REG (DImode, 7))), copy_rtx (operand0))); emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (DImode, gen_rtx_PRE_DEC (DImode, gen_rtx_REG (DImode, 7))), copy_rtx (operand0))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19243 */ extern rtx gen_peephole2_1567 (rtx, rtx *); rtx gen_peephole2_1567 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[0] = peep2_find_free_register (0, 0, "r", DImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MEM (DImode, gen_rtx_REG (DImode, 7))), gen_rtx_SET (VOIDmode, gen_rtx_REG (DImode, 7), gen_rtx_PLUS (DImode, gen_rtx_REG (DImode, 7), const_int_rtx[MAX_SAVED_CONST_INT + (8)])), gen_rtx_CLOBBER (VOIDmode, gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode)))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19256 */ extern rtx gen_peephole2_1568 (rtx, rtx *); rtx gen_peephole2_1568 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[0] = peep2_find_free_register (0, 0, "r", DImode, &_regs_allocated)) == NULL_RTX) return NULL; if ((operands[1] = peep2_find_free_register (0, 0, "r", DImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MEM (DImode, gen_rtx_REG (DImode, 7))), gen_rtx_SET (VOIDmode, gen_rtx_REG (DImode, 7), gen_rtx_PLUS (DImode, gen_rtx_REG (DImode, 7), const_int_rtx[MAX_SAVED_CONST_INT + (8)])), gen_rtx_CLOBBER (VOIDmode, gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode)))))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand1, gen_rtx_MEM (DImode, gen_rtx_REG (DImode, 7))), gen_rtx_SET (VOIDmode, gen_rtx_REG (DImode, 7), gen_rtx_PLUS (DImode, gen_rtx_REG (DImode, 7), const_int_rtx[MAX_SAVED_CONST_INT + (8)]))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19270 */ extern rtx gen_peephole2_1569 (rtx, rtx *); rtx gen_peephole2_1569 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[0] = peep2_find_free_register (0, 0, "r", DImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MEM (DImode, gen_rtx_REG (DImode, 7))), gen_rtx_SET (VOIDmode, gen_rtx_REG (DImode, 7), gen_rtx_PLUS (DImode, gen_rtx_REG (DImode, 7), const_int_rtx[MAX_SAVED_CONST_INT + (8)])), gen_rtx_CLOBBER (VOIDmode, gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode)))))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, copy_rtx (operand0), gen_rtx_MEM (DImode, gen_rtx_REG (DImode, 7))), gen_rtx_SET (VOIDmode, gen_rtx_REG (DImode, 7), gen_rtx_PLUS (DImode, gen_rtx_REG (DImode, 7), const_int_rtx[MAX_SAVED_CONST_INT + (8)]))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19284 */ extern rtx gen_peephole2_1570 (rtx, rtx *); rtx gen_peephole2_1570 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[0] = peep2_find_free_register (0, 0, "r", DImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MEM (DImode, gen_rtx_REG (DImode, 7))), gen_rtx_SET (VOIDmode, gen_rtx_REG (DImode, 7), gen_rtx_PLUS (DImode, gen_rtx_REG (DImode, 7), const_int_rtx[MAX_SAVED_CONST_INT + (8)]))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19295 */ extern rtx gen_peephole2_1571 (rtx, rtx *); rtx gen_peephole2_1571 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[0] = peep2_find_free_register (0, 0, "r", DImode, &_regs_allocated)) == NULL_RTX) return NULL; if ((operands[1] = peep2_find_free_register (0, 0, "r", DImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MEM (DImode, gen_rtx_REG (DImode, 7))), gen_rtx_SET (VOIDmode, gen_rtx_REG (DImode, 7), gen_rtx_PLUS (DImode, gen_rtx_REG (DImode, 7), const_int_rtx[MAX_SAVED_CONST_INT + (8)]))))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand1, gen_rtx_MEM (DImode, gen_rtx_REG (DImode, 7))), gen_rtx_SET (VOIDmode, gen_rtx_REG (DImode, 7), gen_rtx_PLUS (DImode, gen_rtx_REG (DImode, 7), const_int_rtx[MAX_SAVED_CONST_INT + (8)]))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19307 */ extern rtx gen_peephole2_1572 (rtx, rtx *); rtx gen_peephole2_1572 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[0] = peep2_find_free_register (0, 0, "r", DImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MEM (DImode, gen_rtx_REG (DImode, 7))), gen_rtx_SET (VOIDmode, gen_rtx_REG (DImode, 7), gen_rtx_PLUS (DImode, gen_rtx_REG (DImode, 7), const_int_rtx[MAX_SAVED_CONST_INT + (8)]))))); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, copy_rtx (operand0), gen_rtx_MEM (DImode, gen_rtx_REG (DImode, 7))), gen_rtx_SET (VOIDmode, gen_rtx_REG (DImode, 7), gen_rtx_PLUS (DImode, gen_rtx_REG (DImode, 7), const_int_rtx[MAX_SAVED_CONST_INT + (8)]))))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19320 */ extern rtx gen_peephole2_1573 (rtx, rtx *); rtx gen_peephole2_1573 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[3] = peep2_find_free_register (0, 0, "r", DImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; emit_insn (gen_rtx_SET (VOIDmode, operand3, operand1)); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MULT (DImode, copy_rtx (operand3), operand2)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19334 */ extern rtx gen_peephole2_1574 (rtx, rtx *); rtx gen_peephole2_1574 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[3] = peep2_find_free_register (0, 0, "r", SImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; emit_insn (gen_rtx_SET (VOIDmode, operand3, operand1)); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MULT (SImode, copy_rtx (operand3), operand2)), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19348 */ extern rtx gen_peephole2_1575 (rtx, rtx *); rtx gen_peephole2_1575 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[3] = peep2_find_free_register (0, 0, "r", SImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; emit_insn (gen_rtx_SET (VOIDmode, operand3, operand1)); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_ZERO_EXTEND (DImode, gen_rtx_MULT (SImode, copy_rtx (operand3), operand2))), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19367 */ extern rtx gen_peephole2_1576 (rtx, rtx *); rtx gen_peephole2_1576 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[3] = peep2_find_free_register (1, 1, "r", DImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); { if (!rtx_equal_p (operands[0], operands[1])) emit_move_insn (operands[0], operands[1]); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; emit_insn (gen_rtx_SET (VOIDmode, operand3, operand2)); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MULT (DImode, copy_rtx (operand0), copy_rtx (operand3))), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19383 */ extern rtx gen_peephole2_1577 (rtx, rtx *); rtx gen_peephole2_1577 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[3] = peep2_find_free_register (1, 1, "r", SImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); { if (!rtx_equal_p (operands[0], operands[1])) emit_move_insn (operands[0], operands[1]); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; emit_insn (gen_rtx_SET (VOIDmode, operand3, operand2)); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MULT (SImode, copy_rtx (operand0), copy_rtx (operand3))), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19399 */ extern rtx gen_peephole2_1578 (rtx, rtx *); rtx gen_peephole2_1578 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx _val = 0; HARD_REG_SET _regs_allocated; CLEAR_HARD_REG_SET (_regs_allocated); if ((operands[3] = peep2_find_free_register (1, 1, "r", HImode, &_regs_allocated)) == NULL_RTX) return NULL; start_sequence (); { if (!rtx_equal_p (operands[0], operands[1])) emit_move_insn (operands[0], operands[1]); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; emit_insn (gen_rtx_SET (VOIDmode, operand3, operand2)); emit (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, operand0, gen_rtx_MULT (HImode, copy_rtx (operand0), copy_rtx (operand3))), gen_hard_reg_clobber (CCmode, 17)))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19554 */ rtx gen_conditional_trap (rtx operand0, rtx operand1) { rtx operand2; rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; { emit_insn (gen_rtx_TRAP_IF (VOIDmode, ix86_expand_compare (GET_CODE (operands[0]), NULL, NULL), operands[1])); DONE; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_TRAP_IF (VOIDmode, gen_rtx_fmt_ee (GET_CODE (operand0), VOIDmode, operand2, const0_rtx), operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19595 */ extern rtx gen_split_1580 (rtx, rtx *); rtx gen_split_1580 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); { operands[1] = simplify_gen_subreg (SFmode, operands[1], V4SFmode, 0); operands[2] = CONST0_RTX (V4SFmode); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V4SFmode, gen_rtx_VEC_DUPLICATE (V4SFmode, operand1), operand2, const1_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19689 */ extern rtx gen_split_1581 (rtx, rtx *); rtx gen_split_1581 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx _val = 0; start_sequence (); { operands[1] = simplify_gen_subreg (DFmode, operands[1], V2DFmode, 0); operands[2] = CONST0_RTX (V2DFmode); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_VEC_MERGE (V2DFmode, gen_rtx_VEC_DUPLICATE (V2DFmode, operand1), operand2, const1_rtx))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19751 */ rtx gen_movti (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { if (TARGET_64BIT) ix86_expand_move (TImode, operands); else ix86_expand_vector_move (TImode, operands); DONE; } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19763 */ rtx gen_movtf (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { if (TARGET_64BIT) ix86_expand_move (TFmode, operands); else ix86_expand_vector_move (TFmode, operands); DONE; } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19898 */ rtx gen_movv2df (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { ix86_expand_vector_move (V2DFmode, operands); DONE; } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19907 */ rtx gen_movv8hi (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { ix86_expand_vector_move (V8HImode, operands); DONE; } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19916 */ rtx gen_movv16qi (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { ix86_expand_vector_move (V16QImode, operands); DONE; } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19925 */ rtx gen_movv4sf (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { ix86_expand_vector_move (V4SFmode, operands); DONE; } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19934 */ rtx gen_movv4si (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { ix86_expand_vector_move (V4SImode, operands); DONE; } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19943 */ rtx gen_movv2di (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { ix86_expand_vector_move (V2DImode, operands); DONE; } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19952 */ rtx gen_movv2si (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { ix86_expand_vector_move (V2SImode, operands); DONE; } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19961 */ rtx gen_movv4hi (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { ix86_expand_vector_move (V4HImode, operands); DONE; } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19970 */ rtx gen_movv8qi (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { ix86_expand_vector_move (V8QImode, operands); DONE; } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:19979 */ rtx gen_movv2sf (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { ix86_expand_vector_move (V2SFmode, operands); DONE; } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:20054 */ extern rtx gen_split_1594 (rtx, rtx *); rtx gen_split_1594 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx _val = 0; start_sequence (); operands[2] = change_address (operands[0], GET_MODE (operands[0]), stack_pointer_rtx); operands[3] = GEN_INT (-GET_MODE_SIZE (GET_MODE (operands[0]))); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 7), gen_rtx_PLUS (SImode, gen_rtx_REG (SImode, 7), operand3))); emit_insn (gen_rtx_SET (VOIDmode, operand2, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:20065 */ extern rtx gen_split_1595 (rtx, rtx *); rtx gen_split_1595 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands) { rtx operand0; rtx operand1; rtx operand2; rtx operand3; rtx _val = 0; start_sequence (); operands[2] = change_address (operands[0], GET_MODE (operands[0]), stack_pointer_rtx); operands[3] = GEN_INT (-GET_MODE_SIZE (GET_MODE (operands[0]))); operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; operand3 = operands[3]; emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (DImode, 7), gen_rtx_PLUS (DImode, gen_rtx_REG (DImode, 7), operand3))); emit_insn (gen_rtx_SET (VOIDmode, operand2, operand1)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:20204 */ extern rtx gen_split_1596 (rtx, rtx *); rtx gen_split_1596 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); ix86_split_long_move (operands); DONE; emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:20212 */ extern rtx gen_split_1597 (rtx, rtx *); rtx gen_split_1597 (rtx curr_insn ATTRIBUTE_UNUSED, rtx *operands ATTRIBUTE_UNUSED) { rtx _val = 0; start_sequence (); ix86_split_long_move (operands); DONE; emit_insn (const0_rtx); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:20222 */ rtx gen_sse_movaps (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM) { rtx tmp = gen_reg_rtx (V4SFmode); emit_insn (gen_sse_movaps (tmp, operands[1])); emit_move_insn (operands[0], tmp); DONE; } } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V4SFmode, gen_rtvec (1, operand1), 38))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:20247 */ rtx gen_sse_movups (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM) { rtx tmp = gen_reg_rtx (V4SFmode); emit_insn (gen_sse_movups (tmp, operands[1])); emit_move_insn (operands[0], tmp); DONE; } } operand0 = operands[0]; operand1 = operands[1]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (V4SFmode, gen_rtvec (1, operand1), 39))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:20387 */ rtx gen_sse_loadss (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { emit_insn (gen_sse_loadss_1 (operands[0], operands[1], CONST0_RTX (V4SFmode))); DONE; } operand0 = operands[0]; operand1 = operands[1]; } emit (operand0); emit (operand1); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:20487 */ rtx gen_negv4sf2 (rtx operand0, rtx operand1) { rtx operand2; rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; { rtx m0 = gen_lowpart (SFmode, gen_int_mode (0x80000000, SImode)); rtx vm0 = gen_rtx_CONST_VECTOR (V4SFmode, gen_rtvec (4, m0, m0, m0, m0)); operands[2] = force_reg (V4SFmode, vm0); } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_XOR (V4SFmode, operand1, operand2))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:20635 */ rtx gen_sse_andv4sf3 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_AND (V4SFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:20652 */ rtx gen_sse_nandv4sf3 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_AND (V4SFmode, gen_rtx_NOT (V4SFmode, operand1), operand2)); } /* ../../gcc/gcc/config/i386/i386.md:20668 */ rtx gen_sse_iorv4sf3 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_IOR (V4SFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:20685 */ rtx gen_sse_xorv4sf3 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_XOR (V4SFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:20704 */ rtx gen_sse2_andv2df3 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_AND (V2DFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:20721 */ rtx gen_sse2_nandv2df3 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_AND (V2DFmode, gen_rtx_NOT (V2DFmode, operand1), operand2)); } /* ../../gcc/gcc/config/i386/i386.md:20737 */ rtx gen_sse2_iorv2df3 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_IOR (V2DFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:20754 */ rtx gen_sse2_xorv2df3 (rtx operand0, rtx operand1, rtx operand2) { return gen_rtx_SET (VOIDmode, operand0, gen_rtx_XOR (V2DFmode, operand1, operand2)); } /* ../../gcc/gcc/config/i386/i386.md:21894 */ rtx gen_sfence (void) { rtx operand0; rtx _val = 0; start_sequence (); { rtx operands[1]; { operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode)); MEM_VOLATILE_P (operands[0]) = 1; } operand0 = operands[0]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (BLKmode, gen_rtvec (1, operand0), 44))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:21911 */ rtx gen_sse_prologue_save (rtx operand0, rtx operand1, rtx operand2, rtx operand3) { return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (BLKmode, gen_rtvec (8, gen_rtx_REG (DImode, 21), gen_rtx_REG (DImode, 22), gen_rtx_REG (DImode, 23), gen_rtx_REG (DImode, 24), gen_rtx_REG (DImode, 25), gen_rtx_REG (DImode, 26), gen_rtx_REG (DImode, 27), gen_rtx_REG (DImode, 28)), 13)), gen_rtx_USE (VOIDmode, operand1), gen_rtx_USE (VOIDmode, operand2), gen_rtx_USE (VOIDmode, gen_rtx_LABEL_REF (DImode, operand3)))); } /* ../../gcc/gcc/config/i386/i386.md:22277 */ rtx gen_prefetch (rtx operand0, rtx operand1, rtx operand2) { rtx _val = 0; start_sequence (); { rtx operands[3]; operands[0] = operand0; operands[1] = operand1; operands[2] = operand2; { int rw = INTVAL (operands[1]); int locality = INTVAL (operands[2]); if (rw != 0 && rw != 1) abort (); if (locality < 0 || locality > 3) abort (); if (GET_MODE (operands[0]) != Pmode && GET_MODE (operands[0]) != VOIDmode) abort (); /* Use 3dNOW prefetch in case we are asking for write prefetch not suported by SSE counterpart or the SSE prefetch is not available (K6 machines). Otherwise use SSE prefetch as it allows specifying of locality. */ if (TARGET_3DNOW && (!TARGET_PREFETCH_SSE || rw)) operands[2] = GEN_INT (3); else operands[1] = const0_rtx; } operand0 = operands[0]; operand1 = operands[1]; operand2 = operands[2]; } emit_insn (gen_rtx_PREFETCH (VOIDmode, operand0, operand1, operand2)); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:23849 */ rtx gen_sse2_loadsd (rtx operand0, rtx operand1) { rtx _val = 0; start_sequence (); { rtx operands[2]; operands[0] = operand0; operands[1] = operand1; { emit_insn (gen_sse2_loadsd_1 (operands[0], operands[1], CONST0_RTX (V2DFmode))); DONE; } operand0 = operands[0]; operand1 = operands[1]; } emit (operand0); emit (operand1); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:23913 */ rtx gen_sse2_mfence (void) { rtx operand0; rtx _val = 0; start_sequence (); { rtx operands[1]; { operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode)); MEM_VOLATILE_P (operands[0]) = 1; } operand0 = operands[0]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (BLKmode, gen_rtvec (1, operand0), 59))); _val = get_insns (); end_sequence (); return _val; } /* ../../gcc/gcc/config/i386/i386.md:23930 */ rtx gen_sse2_lfence (void) { rtx operand0; rtx _val = 0; start_sequence (); { rtx operands[1]; { operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode)); MEM_VOLATILE_P (operands[0]) = 1; } operand0 = operands[0]; } emit_insn (gen_rtx_SET (VOIDmode, operand0, gen_rtx_UNSPEC (BLKmode, gen_rtvec (1, operand0), 60))); _val = get_insns (); end_sequence (); return _val; } void add_clobbers (rtx pattern ATTRIBUTE_UNUSED, int insn_code_number) { switch (insn_code_number) { case 863: case 849: XVECEXP (pattern, 0, 1) = gen_hard_reg_clobber (XFmode, 8); XVECEXP (pattern, 0, 2) = gen_hard_reg_clobber (XFmode, 9); XVECEXP (pattern, 0, 3) = gen_hard_reg_clobber (XFmode, 10); XVECEXP (pattern, 0, 4) = gen_hard_reg_clobber (XFmode, 11); XVECEXP (pattern, 0, 5) = gen_hard_reg_clobber (XFmode, 12); XVECEXP (pattern, 0, 6) = gen_hard_reg_clobber (XFmode, 13); XVECEXP (pattern, 0, 7) = gen_hard_reg_clobber (XFmode, 14); XVECEXP (pattern, 0, 8) = gen_hard_reg_clobber (XFmode, 15); XVECEXP (pattern, 0, 9) = gen_hard_reg_clobber (DImode, 29); XVECEXP (pattern, 0, 10) = gen_hard_reg_clobber (DImode, 30); XVECEXP (pattern, 0, 11) = gen_hard_reg_clobber (DImode, 31); XVECEXP (pattern, 0, 12) = gen_hard_reg_clobber (DImode, 32); XVECEXP (pattern, 0, 13) = gen_hard_reg_clobber (DImode, 33); XVECEXP (pattern, 0, 14) = gen_hard_reg_clobber (DImode, 34); XVECEXP (pattern, 0, 15) = gen_hard_reg_clobber (DImode, 35); XVECEXP (pattern, 0, 16) = gen_hard_reg_clobber (DImode, 36); break; case 686: XVECEXP (pattern, 0, 2) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DImode)); XVECEXP (pattern, 0, 3) = gen_hard_reg_clobber (CCmode, 17); break; case 676: case 675: XVECEXP (pattern, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)); XVECEXP (pattern, 0, 2) = gen_hard_reg_clobber (CCmode, 17); break; case 674: case 673: XVECEXP (pattern, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SFmode)); XVECEXP (pattern, 0, 2) = gen_hard_reg_clobber (CCmode, 17); break; case 612: case 611: case 610: XVECEXP (pattern, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (XFmode)); break; case 609: XVECEXP (pattern, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SFmode)); break; case 608: XVECEXP (pattern, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)); break; case 550: case 548: case 547: case 545: case 544: XVECEXP (pattern, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SImode)); XVECEXP (pattern, 0, 2) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SImode)); XVECEXP (pattern, 0, 3) = gen_hard_reg_clobber (CCmode, 17); break; case 685: case 514: XVECEXP (pattern, 0, 2) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SImode)); XVECEXP (pattern, 0, 3) = gen_hard_reg_clobber (CCmode, 17); break; case 508: case 507: case 506: case 505: XVECEXP (pattern, 0, 1) = gen_hard_reg_clobber (CCFPmode, 18); XVECEXP (pattern, 0, 2) = gen_hard_reg_clobber (CCFPmode, 17); XVECEXP (pattern, 0, 3) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode)); break; case 504: case 503: case 502: case 501: case 500: case 499: XVECEXP (pattern, 0, 1) = gen_hard_reg_clobber (CCFPmode, 18); XVECEXP (pattern, 0, 2) = gen_hard_reg_clobber (CCFPmode, 17); break; case 647: case 646: case 277: case 276: case 274: case 271: case 268: XVECEXP (pattern, 0, 3) = gen_hard_reg_clobber (CCmode, 17); break; case 382: case 381: case 378: case 367: case 366: case 363: case 275: case 273: case 272: case 270: case 269: case 267: case 266: XVECEXP (pattern, 0, 2) = gen_hard_reg_clobber (CCmode, 17); break; case 536: case 535: case 449: case 422: case 404: case 263: case 262: case 260: case 259: XVECEXP (pattern, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SImode)); XVECEXP (pattern, 0, 2) = gen_hard_reg_clobber (CCmode, 17); break; case 538: case 261: case 258: XVECEXP (pattern, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DImode)); XVECEXP (pattern, 0, 2) = gen_hard_reg_clobber (CCmode, 17); break; case 348: case 322: case 221: case 220: case 219: XVECEXP (pattern, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (QImode)); break; case 339: case 317: case 214: case 213: case 212: XVECEXP (pattern, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode)); break; case 336: case 314: case 208: case 207: case 205: XVECEXP (pattern, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SImode)); break; case 329: case 307: case 200: case 199: case 198: XVECEXP (pattern, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DImode)); break; case 149: XVECEXP (pattern, 0, 3) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)); break; case 148: XVECEXP (pattern, 0, 4) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)); break; case 117: XVECEXP (pattern, 0, 1) = gen_hard_reg_clobber (CCmode, 17); XVECEXP (pattern, 0, 2) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SImode)); break; case 668: case 667: case 665: case 664: case 662: case 661: case 659: case 658: case 650: case 648: case 554: case 552: case 543: case 542: case 541: case 540: case 530: case 492: case 491: case 490: case 489: case 488: case 487: case 486: case 485: case 484: case 483: case 482: case 481: case 480: case 479: case 478: case 477: case 476: case 475: case 474: case 473: case 472: case 471: case 470: case 469: case 466: case 465: case 464: case 463: case 460: case 459: case 454: case 453: case 452: case 451: case 450: case 446: case 445: case 442: case 441: case 440: case 439: case 436: case 435: case 430: case 429: case 428: case 427: case 426: case 425: case 424: case 423: case 419: case 418: case 417: case 415: case 414: case 412: case 411: case 408: case 407: case 406: case 405: case 402: case 385: case 384: case 383: case 380: case 379: case 377: case 370: case 369: case 368: case 365: case 364: case 362: case 360: case 358: case 355: case 354: case 352: case 351: case 345: case 344: case 343: case 342: case 341: case 340: case 337: case 332: case 331: case 330: case 327: case 326: case 325: case 324: case 323: case 319: case 318: case 315: case 310: case 309: case 308: case 305: case 304: case 303: case 302: case 300: case 297: case 296: case 294: case 291: case 290: case 288: case 265: case 264: case 257: case 256: case 255: case 254: case 253: case 252: case 251: case 250: case 249: case 248: case 247: case 244: case 243: case 240: case 235: case 234: case 233: case 232: case 231: case 230: case 227: case 226: case 225: case 224: case 223: case 222: case 217: case 216: case 215: case 210: case 209: case 202: case 201: case 196: case 183: case 182: case 181: case 180: case 178: case 177: case 157: case 152: case 147: case 112: case 111: case 109: case 108: case 106: case 105: case 103: case 81: case 80: case 62: case 56: case 42: case 41: XVECEXP (pattern, 0, 1) = gen_hard_reg_clobber (CCmode, 17); break; default: abort (); } } int added_clobbers_hard_reg_p (int insn_code_number) { switch (insn_code_number) { case 612: case 611: case 610: case 609: case 608: case 348: case 322: case 221: case 220: case 219: case 339: case 317: case 214: case 213: case 212: case 336: case 314: case 208: case 207: case 205: case 329: case 307: case 200: case 199: case 198: case 149: case 148: return 0; case 863: case 849: case 686: case 676: case 675: case 674: case 673: case 550: case 548: case 547: case 545: case 544: case 685: case 514: case 508: case 507: case 506: case 505: case 504: case 503: case 502: case 501: case 500: case 499: case 647: case 646: case 277: case 276: case 274: case 271: case 268: case 382: case 381: case 378: case 367: case 366: case 363: case 275: case 273: case 272: case 270: case 269: case 267: case 266: case 536: case 535: case 449: case 422: case 404: case 263: case 262: case 260: case 259: case 538: case 261: case 258: case 117: case 668: case 667: case 665: case 664: case 662: case 661: case 659: case 658: case 650: case 648: case 554: case 552: case 543: case 542: case 541: case 540: case 530: case 492: case 491: case 490: case 489: case 488: case 487: case 486: case 485: case 484: case 483: case 482: case 481: case 480: case 479: case 478: case 477: case 476: case 475: case 474: case 473: case 472: case 471: case 470: case 469: case 466: case 465: case 464: case 463: case 460: case 459: case 454: case 453: case 452: case 451: case 450: case 446: case 445: case 442: case 441: case 440: case 439: case 436: case 435: case 430: case 429: case 428: case 427: case 426: case 425: case 424: case 423: case 419: case 418: case 417: case 415: case 414: case 412: case 411: case 408: case 407: case 406: case 405: case 402: case 385: case 384: case 383: case 380: case 379: case 377: case 370: case 369: case 368: case 365: case 364: case 362: case 360: case 358: case 355: case 354: case 352: case 351: case 345: case 344: case 343: case 342: case 341: case 340: case 337: case 332: case 331: case 330: case 327: case 326: case 325: case 324: case 323: case 319: case 318: case 315: case 310: case 309: case 308: case 305: case 304: case 303: case 302: case 300: case 297: case 296: case 294: case 291: case 290: case 288: case 265: case 264: case 257: case 256: case 255: case 254: case 253: case 252: case 251: case 250: case 249: case 248: case 247: case 244: case 243: case 240: case 235: case 234: case 233: case 232: case 231: case 230: case 227: case 226: case 225: case 224: case 223: case 222: case 217: case 216: case 215: case 210: case 209: case 202: case 201: case 196: case 183: case 182: case 181: case 180: case 178: case 177: case 157: case 152: case 147: case 112: case 111: case 109: case 108: case 106: case 105: case 103: case 81: case 80: case 62: case 56: case 42: case 41: return 1; default: abort (); } } /* Generated automatically from machmode.def and config/i386/i386-modes.def by genmodes. */ const char *const mode_name[NUM_MACHINE_MODES] = { "VOID", "BLK", "CC", "CCGC", "CCGOC", "CCNO", "CCZ", "CCFP", "CCFPU", "BI", "QI", "HI", "SI", "DI", "TI", "SF", "DF", "XF", "TF", "CQI", "CHI", "CSI", "CDI", "CTI", "SC", "DC", "XC", "TC", "V2QI", "V4QI", "V2HI", "V8QI", "V4HI", "V2SI", "V1DI", "V16QI", "V8HI", "V4SI", "V2DI", "V8SI", "V4DI", "V8DI", "V2SF", "V4SF", "V2DF", "V8SF", "V4DF", "V16SF", "V8DF", }; const unsigned char mode_class[NUM_MACHINE_MODES] = { MODE_RANDOM, /* VOID */ MODE_RANDOM, /* BLK */ MODE_CC, /* CC */ MODE_CC, /* CCGC */ MODE_CC, /* CCGOC */ MODE_CC, /* CCNO */ MODE_CC, /* CCZ */ MODE_CC, /* CCFP */ MODE_CC, /* CCFPU */ MODE_INT, /* BI */ MODE_INT, /* QI */ MODE_INT, /* HI */ MODE_INT, /* SI */ MODE_INT, /* DI */ MODE_INT, /* TI */ MODE_FLOAT, /* SF */ MODE_FLOAT, /* DF */ MODE_FLOAT, /* XF */ MODE_FLOAT, /* TF */ MODE_COMPLEX_INT, /* CQI */ MODE_COMPLEX_INT, /* CHI */ MODE_COMPLEX_INT, /* CSI */ MODE_COMPLEX_INT, /* CDI */ MODE_COMPLEX_INT, /* CTI */ MODE_COMPLEX_FLOAT, /* SC */ MODE_COMPLEX_FLOAT, /* DC */ MODE_COMPLEX_FLOAT, /* XC */ MODE_COMPLEX_FLOAT, /* TC */ MODE_VECTOR_INT, /* V2QI */ MODE_VECTOR_INT, /* V4QI */ MODE_VECTOR_INT, /* V2HI */ MODE_VECTOR_INT, /* V8QI */ MODE_VECTOR_INT, /* V4HI */ MODE_VECTOR_INT, /* V2SI */ MODE_VECTOR_INT, /* V1DI */ MODE_VECTOR_INT, /* V16QI */ MODE_VECTOR_INT, /* V8HI */ MODE_VECTOR_INT, /* V4SI */ MODE_VECTOR_INT, /* V2DI */ MODE_VECTOR_INT, /* V8SI */ MODE_VECTOR_INT, /* V4DI */ MODE_VECTOR_INT, /* V8DI */ MODE_VECTOR_FLOAT, /* V2SF */ MODE_VECTOR_FLOAT, /* V4SF */ MODE_VECTOR_FLOAT, /* V2DF */ MODE_VECTOR_FLOAT, /* V8SF */ MODE_VECTOR_FLOAT, /* V4DF */ MODE_VECTOR_FLOAT, /* V16SF */ MODE_VECTOR_FLOAT, /* V8DF */ }; const unsigned short mode_precision[NUM_MACHINE_MODES] = { 0, /* VOID */ 0, /* BLK */ 4*BITS_PER_UNIT, /* CC */ 4*BITS_PER_UNIT, /* CCGC */ 4*BITS_PER_UNIT, /* CCGOC */ 4*BITS_PER_UNIT, /* CCNO */ 4*BITS_PER_UNIT, /* CCZ */ 4*BITS_PER_UNIT, /* CCFP */ 4*BITS_PER_UNIT, /* CCFPU */ 1, /* BI */ 1*BITS_PER_UNIT, /* QI */ 2*BITS_PER_UNIT, /* HI */ 4*BITS_PER_UNIT, /* SI */ 8*BITS_PER_UNIT, /* DI */ 16*BITS_PER_UNIT, /* TI */ 4*BITS_PER_UNIT, /* SF */ 8*BITS_PER_UNIT, /* DF */ 12*BITS_PER_UNIT, /* XF */ 16*BITS_PER_UNIT, /* TF */ 2*BITS_PER_UNIT, /* CQI */ 4*BITS_PER_UNIT, /* CHI */ 8*BITS_PER_UNIT, /* CSI */ 16*BITS_PER_UNIT, /* CDI */ 32*BITS_PER_UNIT, /* CTI */ 8*BITS_PER_UNIT, /* SC */ 16*BITS_PER_UNIT, /* DC */ 24*BITS_PER_UNIT, /* XC */ 32*BITS_PER_UNIT, /* TC */ 2*BITS_PER_UNIT, /* V2QI */ 4*BITS_PER_UNIT, /* V4QI */ 4*BITS_PER_UNIT, /* V2HI */ 8*BITS_PER_UNIT, /* V8QI */ 8*BITS_PER_UNIT, /* V4HI */ 8*BITS_PER_UNIT, /* V2SI */ 8*BITS_PER_UNIT, /* V1DI */ 16*BITS_PER_UNIT, /* V16QI */ 16*BITS_PER_UNIT, /* V8HI */ 16*BITS_PER_UNIT, /* V4SI */ 16*BITS_PER_UNIT, /* V2DI */ 32*BITS_PER_UNIT, /* V8SI */ 32*BITS_PER_UNIT, /* V4DI */ 64*BITS_PER_UNIT, /* V8DI */ 8*BITS_PER_UNIT, /* V2SF */ 16*BITS_PER_UNIT, /* V4SF */ 16*BITS_PER_UNIT, /* V2DF */ 32*BITS_PER_UNIT, /* V8SF */ 32*BITS_PER_UNIT, /* V4DF */ 64*BITS_PER_UNIT, /* V16SF */ 64*BITS_PER_UNIT, /* V8DF */ }; unsigned char mode_size[NUM_MACHINE_MODES] = { 0, /* VOID */ 0, /* BLK */ 4, /* CC */ 4, /* CCGC */ 4, /* CCGOC */ 4, /* CCNO */ 4, /* CCZ */ 4, /* CCFP */ 4, /* CCFPU */ 1, /* BI */ 1, /* QI */ 2, /* HI */ 4, /* SI */ 8, /* DI */ 16, /* TI */ 4, /* SF */ 8, /* DF */ 12, /* XF */ 16, /* TF */ 2, /* CQI */ 4, /* CHI */ 8, /* CSI */ 16, /* CDI */ 32, /* CTI */ 8, /* SC */ 16, /* DC */ 24, /* XC */ 32, /* TC */ 2, /* V2QI */ 4, /* V4QI */ 4, /* V2HI */ 8, /* V8QI */ 8, /* V4HI */ 8, /* V2SI */ 8, /* V1DI */ 16, /* V16QI */ 16, /* V8HI */ 16, /* V4SI */ 16, /* V2DI */ 32, /* V8SI */ 32, /* V4DI */ 64, /* V8DI */ 8, /* V2SF */ 16, /* V4SF */ 16, /* V2DF */ 32, /* V8SF */ 32, /* V4DF */ 64, /* V16SF */ 64, /* V8DF */ }; const unsigned char mode_nunits[NUM_MACHINE_MODES] = { 0, /* VOID */ 0, /* BLK */ 1, /* CC */ 1, /* CCGC */ 1, /* CCGOC */ 1, /* CCNO */ 1, /* CCZ */ 1, /* CCFP */ 1, /* CCFPU */ 1, /* BI */ 1, /* QI */ 1, /* HI */ 1, /* SI */ 1, /* DI */ 1, /* TI */ 1, /* SF */ 1, /* DF */ 1, /* XF */ 1, /* TF */ 2, /* CQI */ 2, /* CHI */ 2, /* CSI */ 2, /* CDI */ 2, /* CTI */ 2, /* SC */ 2, /* DC */ 2, /* XC */ 2, /* TC */ 2, /* V2QI */ 4, /* V4QI */ 2, /* V2HI */ 8, /* V8QI */ 4, /* V4HI */ 2, /* V2SI */ 1, /* V1DI */ 16, /* V16QI */ 8, /* V8HI */ 4, /* V4SI */ 2, /* V2DI */ 8, /* V8SI */ 4, /* V4DI */ 8, /* V8DI */ 2, /* V2SF */ 4, /* V4SF */ 2, /* V2DF */ 8, /* V8SF */ 4, /* V4DF */ 16, /* V16SF */ 8, /* V8DF */ }; const unsigned char mode_wider[NUM_MACHINE_MODES] = { VOIDmode, /* VOID */ VOIDmode, /* BLK */ VOIDmode, /* CC */ VOIDmode, /* CCGC */ VOIDmode, /* CCGOC */ VOIDmode, /* CCNO */ VOIDmode, /* CCZ */ VOIDmode, /* CCFP */ VOIDmode, /* CCFPU */ QImode, /* BI */ HImode, /* QI */ SImode, /* HI */ DImode, /* SI */ TImode, /* DI */ VOIDmode, /* TI */ DFmode, /* SF */ XFmode, /* DF */ TFmode, /* XF */ VOIDmode, /* TF */ CHImode, /* CQI */ CSImode, /* CHI */ CDImode, /* CSI */ CTImode, /* CDI */ VOIDmode, /* CTI */ DCmode, /* SC */ XCmode, /* DC */ TCmode, /* XC */ VOIDmode, /* TC */ V4QImode, /* V2QI */ V2HImode, /* V4QI */ V8QImode, /* V2HI */ V4HImode, /* V8QI */ V2SImode, /* V4HI */ V1DImode, /* V2SI */ V16QImode, /* V1DI */ V8HImode, /* V16QI */ V4SImode, /* V8HI */ V2DImode, /* V4SI */ V8SImode, /* V2DI */ V4DImode, /* V8SI */ V8DImode, /* V4DI */ VOIDmode, /* V8DI */ V4SFmode, /* V2SF */ V2DFmode, /* V4SF */ V8SFmode, /* V2DF */ V4DFmode, /* V8SF */ V16SFmode, /* V4DF */ V8DFmode, /* V16SF */ VOIDmode, /* V8DF */ }; const unsigned HOST_WIDE_INT mode_mask_array[NUM_MACHINE_MODES] = { #define MODE_MASK(m) \ ((m) >= HOST_BITS_PER_WIDE_INT) \ ? ~(unsigned HOST_WIDE_INT) 0 \ : ((unsigned HOST_WIDE_INT) 1 << (m)) - 1 MODE_MASK (0), /* VOID */ MODE_MASK (0), /* BLK */ MODE_MASK (4*BITS_PER_UNIT), /* CC */ MODE_MASK (4*BITS_PER_UNIT), /* CCGC */ MODE_MASK (4*BITS_PER_UNIT), /* CCGOC */ MODE_MASK (4*BITS_PER_UNIT), /* CCNO */ MODE_MASK (4*BITS_PER_UNIT), /* CCZ */ MODE_MASK (4*BITS_PER_UNIT), /* CCFP */ MODE_MASK (4*BITS_PER_UNIT), /* CCFPU */ MODE_MASK (1), /* BI */ MODE_MASK (1*BITS_PER_UNIT), /* QI */ MODE_MASK (2*BITS_PER_UNIT), /* HI */ MODE_MASK (4*BITS_PER_UNIT), /* SI */ MODE_MASK (8*BITS_PER_UNIT), /* DI */ MODE_MASK (16*BITS_PER_UNIT), /* TI */ MODE_MASK (4*BITS_PER_UNIT), /* SF */ MODE_MASK (8*BITS_PER_UNIT), /* DF */ MODE_MASK (12*BITS_PER_UNIT), /* XF */ MODE_MASK (16*BITS_PER_UNIT), /* TF */ MODE_MASK (2*BITS_PER_UNIT), /* CQI */ MODE_MASK (4*BITS_PER_UNIT), /* CHI */ MODE_MASK (8*BITS_PER_UNIT), /* CSI */ MODE_MASK (16*BITS_PER_UNIT), /* CDI */ MODE_MASK (32*BITS_PER_UNIT), /* CTI */ MODE_MASK (8*BITS_PER_UNIT), /* SC */ MODE_MASK (16*BITS_PER_UNIT), /* DC */ MODE_MASK (24*BITS_PER_UNIT), /* XC */ MODE_MASK (32*BITS_PER_UNIT), /* TC */ MODE_MASK (2*BITS_PER_UNIT), /* V2QI */ MODE_MASK (4*BITS_PER_UNIT), /* V4QI */ MODE_MASK (4*BITS_PER_UNIT), /* V2HI */ MODE_MASK (8*BITS_PER_UNIT), /* V8QI */ MODE_MASK (8*BITS_PER_UNIT), /* V4HI */ MODE_MASK (8*BITS_PER_UNIT), /* V2SI */ MODE_MASK (8*BITS_PER_UNIT), /* V1DI */ MODE_MASK (16*BITS_PER_UNIT), /* V16QI */ MODE_MASK (16*BITS_PER_UNIT), /* V8HI */ MODE_MASK (16*BITS_PER_UNIT), /* V4SI */ MODE_MASK (16*BITS_PER_UNIT), /* V2DI */ MODE_MASK (32*BITS_PER_UNIT), /* V8SI */ MODE_MASK (32*BITS_PER_UNIT), /* V4DI */ MODE_MASK (64*BITS_PER_UNIT), /* V8DI */ MODE_MASK (8*BITS_PER_UNIT), /* V2SF */ MODE_MASK (16*BITS_PER_UNIT), /* V4SF */ MODE_MASK (16*BITS_PER_UNIT), /* V2DF */ MODE_MASK (32*BITS_PER_UNIT), /* V8SF */ MODE_MASK (32*BITS_PER_UNIT), /* V4DF */ MODE_MASK (64*BITS_PER_UNIT), /* V16SF */ MODE_MASK (64*BITS_PER_UNIT), /* V8DF */ #undef MODE_MASK }; const unsigned char mode_inner[NUM_MACHINE_MODES] = { VOIDmode, /* VOID */ VOIDmode, /* BLK */ VOIDmode, /* CC */ VOIDmode, /* CCGC */ VOIDmode, /* CCGOC */ VOIDmode, /* CCNO */ VOIDmode, /* CCZ */ VOIDmode, /* CCFP */ VOIDmode, /* CCFPU */ VOIDmode, /* BI */ VOIDmode, /* QI */ VOIDmode, /* HI */ VOIDmode, /* SI */ VOIDmode, /* DI */ VOIDmode, /* TI */ VOIDmode, /* SF */ VOIDmode, /* DF */ VOIDmode, /* XF */ VOIDmode, /* TF */ QImode, /* CQI */ HImode, /* CHI */ SImode, /* CSI */ DImode, /* CDI */ TImode, /* CTI */ SFmode, /* SC */ DFmode, /* DC */ XFmode, /* XC */ TFmode, /* TC */ QImode, /* V2QI */ QImode, /* V4QI */ HImode, /* V2HI */ QImode, /* V8QI */ HImode, /* V4HI */ SImode, /* V2SI */ DImode, /* V1DI */ QImode, /* V16QI */ HImode, /* V8HI */ SImode, /* V4SI */ DImode, /* V2DI */ SImode, /* V8SI */ DImode, /* V4DI */ DImode, /* V8DI */ SFmode, /* V2SF */ SFmode, /* V4SF */ DFmode, /* V2DF */ SFmode, /* V8SF */ DFmode, /* V4DF */ SFmode, /* V16SF */ DFmode, /* V8DF */ }; unsigned char mode_base_align[NUM_MACHINE_MODES] = { 0, /* VOID */ 0, /* BLK */ 4, /* CC */ 4, /* CCGC */ 4, /* CCGOC */ 4, /* CCNO */ 4, /* CCZ */ 4, /* CCFP */ 4, /* CCFPU */ 1, /* BI */ 1, /* QI */ 2, /* HI */ 4, /* SI */ 8, /* DI */ 16, /* TI */ 4, /* SF */ 8, /* DF */ 4, /* XF */ 16, /* TF */ 1, /* CQI */ 2, /* CHI */ 4, /* CSI */ 8, /* CDI */ 16, /* CTI */ 4, /* SC */ 8, /* DC */ 4, /* XC */ 16, /* TC */ 2, /* V2QI */ 4, /* V4QI */ 4, /* V2HI */ 8, /* V8QI */ 8, /* V4HI */ 8, /* V2SI */ 8, /* V1DI */ 16, /* V16QI */ 16, /* V8HI */ 16, /* V4SI */ 16, /* V2DI */ 32, /* V8SI */ 32, /* V4DI */ 64, /* V8DI */ 8, /* V2SF */ 16, /* V4SF */ 16, /* V2DF */ 32, /* V8SF */ 32, /* V4DF */ 64, /* V16SF */ 64, /* V8DF */ }; const unsigned char class_narrowest_mode[MAX_MODE_CLASS] = { MIN_MODE_RANDOM, /* VOID */ MIN_MODE_CC, /* CC */ MIN_MODE_INT, /* QI */ MIN_MODE_PARTIAL_INT, /* VOID */ MIN_MODE_FLOAT, /* SF */ MIN_MODE_COMPLEX_INT, /* CQI */ MIN_MODE_COMPLEX_FLOAT, /* SC */ MIN_MODE_VECTOR_INT, /* V2QI */ MIN_MODE_VECTOR_FLOAT, /* V2SF */ }; const struct real_format * real_format_for_mode[MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1] = { &ieee_single_format, /* SF */ &ieee_double_format, /* DF */ &ieee_extended_intel_96_format, /* XF */ &ieee_quad_format, /* TF */ }; void init_adjust_machine_modes (void) { size_t s ATTRIBUTE_UNUSED; /* config/i386/i386-modes.def:33 */ s = TARGET_128BIT_LONG_DOUBLE ? 16 : 12; mode_size[XFmode] = s; mode_base_align[XFmode] = s & (~s + 1); mode_size[XCmode] = 2*s; mode_base_align[XCmode] = s & (~s + 1); /* config/i386/i386-modes.def:34 */ s = TARGET_128BIT_LONG_DOUBLE ? 16 : 4; mode_base_align[XFmode] = s; mode_base_align[XCmode] = s; /* config/i386/i386-modes.def:30 */ REAL_MODE_FORMAT (XFmode) = (TARGET_128BIT_LONG_DOUBLE ? &ieee_extended_intel_128_format : &ieee_extended_intel_96_format); } /* Generated automatically by the program `genextract' from the machine description file `md'. */ static rtx junk ATTRIBUTE_UNUSED; void insn_extract (rtx insn) { rtx *ro = recog_data.operand; rtx **ro_loc = recog_data.operand_loc; rtx pat = PATTERN (insn); int i ATTRIBUTE_UNUSED; switch (INSN_CODE (insn)) { case -1: fatal_insn_not_found (insn); case 1022: /* monitor */ ro[0] = *(ro_loc[0] = &XVECEXP (pat, 0, 0)); ro[1] = *(ro_loc[1] = &XVECEXP (pat, 0, 1)); ro[2] = *(ro_loc[2] = &XVECEXP (pat, 0, 2)); break; case 1021: /* mwait */ ro[0] = *(ro_loc[0] = &XVECEXP (pat, 0, 0)); ro[1] = *(ro_loc[1] = &XVECEXP (pat, 0, 1)); break; case 1000: /* sse2_punpcklqdq */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (pat, 1), 1)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (pat, 1), 0), 0)); break; case 988: /* sse2_lshrti3 */ case 987: /* sse2_ashlti3 */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XVECEXP (XEXP (pat, 1), 0, 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XVECEXP (XEXP (pat, 1), 0, 0), 1), 0)); break; case 950: /* sse2_umulv2siv2di3 */ case 949: /* sse2_umulsidi3 */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XEXP (pat, 1), 0), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XEXP (pat, 1), 1), 0), 0)); break; case 928: /* cvtpd2ps */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XEXP (XEXP (pat, 1), 0), 0), 0), 0)); break; case 881: /* *prefetch_sse_rex */ case 880: /* *prefetch_sse */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (pat, 2)); break; case 877: /* pmulhrwv4hi3 */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XEXP (XEXP (XEXP (pat, 1), 0), 0), 0), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XEXP (XEXP (XEXP (pat, 1), 0), 0), 0), 1), 0)); break; case 869: /* pi2fw */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XEXP (XEXP (XEXP (pat, 1), 0), 0), 0), 0), 0)); recog_data.dup_loc[0] = &XEXP (XEXP (XEXP (XEXP (XEXP (XEXP (pat, 1), 0), 1), 0), 0), 0); recog_data.dup_num[0] = 1; break; case 868: /* pfpnacc */ case 867: /* pfnacc */ case 866: /* pfacc */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XEXP (pat, 1), 0), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XEXP (pat, 1), 1), 0), 0)); recog_data.dup_loc[0] = &XEXP (XEXP (XEXP (XEXP (pat, 1), 0), 1), 0); recog_data.dup_num[0] = 1; recog_data.dup_loc[1] = &XEXP (XEXP (XEXP (XEXP (pat, 1), 1), 1), 0); recog_data.dup_num[1] = 2; break; case 865: /* pf2iw */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XEXP (pat, 1), 0), 0), 0)); break; case 856: /* subrv2sf3 */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (pat, 1), 1)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (pat, 1), 0)); break; case 853: /* *sse_prologue_save_insn */ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 0), 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[2] = *(ro_loc[2] = &XEXP (XVECEXP (pat, 0, 2), 0)); ro[3] = *(ro_loc[3] = &XEXP (XEXP (XVECEXP (pat, 0, 3), 0), 0)); ro[4] = *(ro_loc[4] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 0), 0), 1)); break; case 1020: /* *lfence_insn */ case 1019: /* *mfence_insn */ case 852: /* *sfence_insn */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); recog_data.dup_loc[0] = &XVECEXP (XEXP (pat, 1), 0, 0); recog_data.dup_num[0] = 0; break; case 957: /* sse2_pextrw */ case 820: /* mmx_pextrw */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (pat, 1), 0), 0)); ro[2] = *(ro_loc[2] = &XVECEXP (XEXP (XEXP (XEXP (pat, 1), 0), 1), 0, 0)); break; case 956: /* sse2_pinsrw */ case 819: /* mmx_pinsrw */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (pat, 1), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XEXP (pat, 1), 1), 0), 0)); ro[3] = *(ro_loc[3] = &XEXP (XEXP (pat, 1), 2)); break; case 1028: /* hsubv2df3 */ case 1027: /* hsubv4sf3 */ case 1026: /* haddv2df3 */ case 1025: /* haddv4sf3 */ case 1024: /* addsubv2df3 */ case 1023: /* addsubv4sf3 */ case 960: /* sse2_pshufhw */ case 959: /* sse2_pshuflw */ case 958: /* sse2_pshufd */ case 955: /* sse2_psadbw */ case 876: /* pfrsqit1v2sf3 */ case 874: /* pfrcpit2v2sf3 */ case 873: /* pfrcpit1v2sf3 */ case 871: /* pavgusb */ case 821: /* mmx_pshufw */ case 818: /* mmx_psadbw */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XVECEXP (XEXP (pat, 1), 0, 0)); ro[2] = *(ro_loc[2] = &XVECEXP (XEXP (pat, 1), 0, 1)); break; case 954: /* sse2_uavgv8hi3 */ case 953: /* sse2_uavgv16qi3 */ case 817: /* mmx_uavgv4hi3 */ case 816: /* mmx_uavgv8qi3 */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XEXP (pat, 1), 0), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XEXP (pat, 1), 0), 0), 1)); break; case 815: /* mmx_nanddi3 */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (XEXP (pat, 1), 0, 0), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XVECEXP (XEXP (pat, 1), 0, 0), 1)); break; case 951: /* sse2_pmaddwd */ case 810: /* mmx_pmaddwd */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XEXP (XEXP (pat, 1), 0), 0), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XEXP (XEXP (pat, 1), 0), 1), 0), 0)); recog_data.dup_loc[0] = &XEXP (XEXP (XEXP (XEXP (XEXP (pat, 1), 1), 0), 0), 0); recog_data.dup_num[0] = 1; recog_data.dup_loc[1] = &XEXP (XEXP (XEXP (XEXP (XEXP (pat, 1), 1), 1), 0), 0); recog_data.dup_num[1] = 2; break; case 948: /* umulv8hi3_highpart */ case 947: /* smulv8hi3_highpart */ case 809: /* umulv4hi3_highpart */ case 808: /* smulv4hi3_highpart */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XEXP (XEXP (pat, 1), 0), 0), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XEXP (XEXP (pat, 1), 0), 0), 1), 0)); break; case 916: /* cvttpd2dq */ case 790: /* cvttss2siq */ case 789: /* cvttss2si */ case 784: /* cvttps2pi */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XVECEXP (XEXP (XEXP (pat, 1), 0), 0, 0)); break; case 927: /* cvtss2sd */ case 926: /* cvtsd2ss */ case 925: /* cvtsi2sdq */ case 924: /* cvtsi2sd */ case 786: /* cvtsi2ssq */ case 785: /* cvtsi2ss */ case 782: /* cvtpi2ps */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (pat, 1), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XEXP (pat, 1), 1), 0), 0)); break; case 999: /* sse2_punpckldq */ case 998: /* sse2_punpcklwd */ case 997: /* sse2_punpcklbw */ case 996: /* sse2_punpckhdq */ case 995: /* sse2_punpckhwd */ case 994: /* sse2_punpckhbw */ case 993: /* sse2_packuswb */ case 992: /* sse2_packssdw */ case 991: /* sse2_packsswb */ case 990: /* sse2_unpcklpd */ case 989: /* sse2_unpckhpd */ case 847: /* mmx_punpcklwd */ case 846: /* mmx_punpcklbw */ case 844: /* mmx_punpckhwd */ case 843: /* mmx_punpckhbw */ case 842: /* mmx_packuswb */ case 841: /* mmx_packssdw */ case 840: /* mmx_packsswb */ case 777: /* sse_unpcklps */ case 776: /* sse_unpckhps */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (pat, 1), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (pat, 1), 1), 0)); break; case 903: /* sse2_ucomi */ case 902: /* sse2_comi */ case 775: /* sse_ucomi */ case 774: /* sse_comi */ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XEXP (pat, 1), 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (pat, 1), 1), 0)); break; case 901: /* vmmaskncmpv2df3 */ case 773: /* vmmaskncmpv4sf3 */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XEXP (pat, 1), 0), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XEXP (pat, 1), 0), 0), 1)); ro[3] = *(ro_loc[3] = &XEXP (XEXP (XEXP (pat, 1), 0), 0)); recog_data.dup_loc[0] = &XEXP (XEXP (XEXP (pat, 1), 1), 0); recog_data.dup_num[0] = 1; break; case 900: /* vmmaskcmpv2df3 */ case 772: /* vmmaskcmpv4sf3 */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (pat, 1), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (pat, 1), 0), 1)); ro[3] = *(ro_loc[3] = &XEXP (XEXP (pat, 1), 0)); recog_data.dup_loc[0] = &XEXP (XEXP (XEXP (pat, 1), 1), 0); recog_data.dup_num[0] = 1; break; case 899: /* maskncmpv2df3 */ case 771: /* maskncmpv4sf3 */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (pat, 1), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (pat, 1), 0), 1)); ro[3] = *(ro_loc[3] = &XEXP (XEXP (pat, 1), 0)); break; case 749: /* vmrsqrtv4sf2 */ case 747: /* vmrcpv4sf2 */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XVECEXP (XEXP (XEXP (pat, 1), 0), 0, 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (pat, 1), 1)); break; case 895: /* vmsminv2df3 */ case 893: /* vmsmaxv2df3 */ case 891: /* vmdivv2df3 */ case 889: /* vmmulv2df3 */ case 887: /* vmsubv2df3 */ case 885: /* vmaddv2df3 */ case 781: /* vmsminv4sf3 */ case 779: /* vmsmaxv4sf3 */ case 745: /* vmdivv4sf3 */ case 743: /* vmmulv4sf3 */ case 741: /* vmsubv4sf3 */ case 739: /* vmaddv4sf3 */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (pat, 1), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (pat, 1), 0), 1)); recog_data.dup_loc[0] = &XEXP (XEXP (pat, 1), 1); recog_data.dup_num[0] = 1; break; case 1017: /* sse2_shufpd */ case 737: /* sse_shufps */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XVECEXP (XEXP (pat, 1), 0, 0)); ro[2] = *(ro_loc[2] = &XVECEXP (XEXP (pat, 1), 0, 1)); ro[3] = *(ro_loc[3] = &XVECEXP (XEXP (pat, 1), 0, 2)); break; case 1015: /* sse2_movsd */ case 1013: /* sse2_movhpd */ case 978: /* ashlv2di3 */ case 977: /* ashlv4si3 */ case 976: /* ashlv8hi3 */ case 975: /* lshrv2di3 */ case 974: /* lshrv4si3 */ case 973: /* lshrv8hi3 */ case 972: /* ashrv4si3 */ case 971: /* ashrv8hi3 */ case 970: /* sminv8hi3 */ case 969: /* uminv16qi3 */ case 968: /* smaxv8hi3 */ case 967: /* umaxv16qi3 */ case 966: /* gtv4si3 */ case 965: /* gtv8hi3 */ case 964: /* gtv16qi3 */ case 963: /* eqv4si3 */ case 962: /* eqv8hi3 */ case 961: /* eqv16qi3 */ case 946: /* mulv8hi3 */ case 945: /* ussubv8hi3 */ case 944: /* ussubv16qi3 */ case 943: /* sssubv8hi3 */ case 942: /* sssubv16qi3 */ case 941: /* subv2di3 */ case 940: /* subv4si3 */ case 939: /* subv8hi3 */ case 938: /* subv16qi3 */ case 937: /* usaddv8hi3 */ case 936: /* usaddv16qi3 */ case 935: /* ssaddv8hi3 */ case 934: /* ssaddv16qi3 */ case 933: /* addv2di3 */ case 932: /* addv4si3 */ case 931: /* addv8hi3 */ case 930: /* addv16qi3 */ case 894: /* sminv2df3 */ case 892: /* smaxv2df3 */ case 890: /* divv2df3 */ case 888: /* mulv2df3 */ case 886: /* subv2df3 */ case 884: /* addv2df3 */ case 862: /* mulv2sf3 */ case 861: /* pfminv2sf3 */ case 860: /* pfmaxv2sf3 */ case 859: /* eqv2sf3 */ case 858: /* gev2sf3 */ case 857: /* gtv2sf3 */ case 855: /* subv2sf3 */ case 854: /* addv2sf3 */ case 838: /* ashlv2si3 */ case 837: /* ashlv4hi3 */ case 835: /* lshrv2si3 */ case 834: /* lshrv4hi3 */ case 833: /* ashrv2si3 */ case 832: /* ashrv4hi3 */ case 831: /* sminv4hi3 */ case 830: /* uminv8qi3 */ case 829: /* smaxv4hi3 */ case 828: /* umaxv8qi3 */ case 827: /* gtv2si3 */ case 826: /* gtv4hi3 */ case 825: /* gtv8qi3 */ case 824: /* eqv2si3 */ case 823: /* eqv4hi3 */ case 822: /* eqv8qi3 */ case 807: /* mulv4hi3 */ case 806: /* ussubv4hi3 */ case 805: /* ussubv8qi3 */ case 804: /* sssubv4hi3 */ case 803: /* sssubv8qi3 */ case 801: /* subv2si3 */ case 800: /* subv4hi3 */ case 799: /* subv8qi3 */ case 798: /* usaddv4hi3 */ case 797: /* usaddv8qi3 */ case 796: /* ssaddv4hi3 */ case 795: /* ssaddv8qi3 */ case 793: /* addv2si3 */ case 792: /* addv4hi3 */ case 791: /* addv8qi3 */ case 780: /* sminv4sf3 */ case 778: /* smaxv4sf3 */ case 767: /* sse2_xorv2di3 */ case 766: /* *sse2_xorti3 */ case 765: /* sse2_iorv2di3 */ case 764: /* *sse2_iorti3 */ case 761: /* sse2_andv2di3 */ case 760: /* *sse2_andti3 */ case 759: /* *sse2_xorv2df3 */ case 758: /* *sse2_iorv2df3 */ case 756: /* *sse2_andv2df3 */ case 755: /* *sse_xorv4sf3 */ case 754: /* *sse_iorv4sf3 */ case 752: /* *sse_andv4sf3 */ case 744: /* divv4sf3 */ case 742: /* mulv4sf3 */ case 740: /* subv4sf3 */ case 738: /* addv4sf3 */ case 735: /* sse_movss */ case 733: /* sse_movlps */ case 732: /* sse_movhps */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (pat, 1), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (pat, 1), 1)); break; case 1001: /* sse2_punpckhqdq */ case 986: /* ashlv2di3_ti */ case 985: /* ashlv4si3_ti */ case 984: /* ashlv8hi3_ti */ case 983: /* lshrv2di3_ti */ case 982: /* lshrv4si3_ti */ case 981: /* lshrv8hi3_ti */ case 980: /* ashrv4si3_ti */ case 979: /* ashrv8hi3_ti */ case 845: /* mmx_punpckhdq */ case 731: /* sse_movlhps */ case 730: /* sse_movhlps */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (pat, 1), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (pat, 1), 1), 0)); break; case 907: /* sse2_maskmovdqu_rex64 */ case 906: /* sse2_maskmovdqu */ case 727: /* mmx_maskmovq_rex */ case 726: /* mmx_maskmovq */ ro[0] = *(ro_loc[0] = &XEXP (XEXP (pat, 0), 0)); ro[1] = *(ro_loc[1] = &XVECEXP (XEXP (pat, 1), 0, 0)); ro[2] = *(ro_loc[2] = &XVECEXP (XEXP (pat, 1), 0, 1)); break; case 695: /* *sibcall_value_1_rex64_v */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (pat, 1), 1)); break; case 1014: /* sse2_loadsd_1 */ case 897: /* vmsqrtv2df2 */ case 848: /* mmx_punpckldq */ case 763: /* sse2_nandv2di3 */ case 762: /* *sse2_nandti3 */ case 757: /* *sse2_nandv2df3 */ case 753: /* *sse_nandv4sf3 */ case 751: /* vmsqrtv4sf2 */ case 734: /* sse_loadss_1 */ case 694: /* *sibcall_value_1_rex64 */ case 693: /* *call_value_1_rex64 */ case 692: /* *sibcall_value_1 */ case 691: /* *call_value_1 */ case 690: /* *call_value_0_rex64 */ case 689: /* *call_value_0 */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (pat, 1), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (pat, 1), 1)); break; case 688: /* *call_value_pop_1 */ case 687: /* *call_value_pop_0 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1)); ro[3] = *(ro_loc[3] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 1)); break; case 686: /* allocate_stack_worker_rex64 */ case 685: /* allocate_stack_worker_1 */ ro[0] = *(ro_loc[0] = &XVECEXP (XVECEXP (pat, 0, 0), 0, 0)); ro[1] = *(ro_loc[1] = &XEXP (XVECEXP (pat, 0, 2), 0)); recog_data.dup_loc[0] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 1); recog_data.dup_num[0] = 0; break; case 684: /* *sse_movdfcc_const0_4 */ case 683: /* *sse_movdfcc_const0_3 */ case 682: /* *sse_movdfcc_const0_2 */ case 681: /* *sse_movdfcc_const0_1 */ case 680: /* *sse_movsfcc_const0_4 */ case 679: /* *sse_movsfcc_const0_3 */ case 678: /* *sse_movsfcc_const0_2 */ case 677: /* *sse_movsfcc_const0_1 */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (pat, 1), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (pat, 1), 1)); ro[3] = *(ro_loc[3] = &XEXP (XEXP (pat, 1), 2)); ro[4] = *(ro_loc[4] = &XEXP (XEXP (XEXP (pat, 1), 0), 0)); ro[5] = *(ro_loc[5] = &XEXP (XEXP (XEXP (pat, 1), 0), 1)); break; case 676: /* sse_movdfcc_eq */ case 674: /* sse_movsfcc_eq */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 2)); ro[3] = *(ro_loc[3] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0)); ro[4] = *(ro_loc[4] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 1)); ro[5] = *(ro_loc[5] = &XEXP (XVECEXP (pat, 0, 1), 0)); break; case 675: /* sse_movdfcc */ case 673: /* sse_movsfcc */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1)); ro[3] = *(ro_loc[3] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 2)); ro[4] = *(ro_loc[4] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0)); ro[5] = *(ro_loc[5] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 1)); ro[6] = *(ro_loc[6] = &XEXP (XVECEXP (pat, 0, 1), 0)); break; case 672: /* pro_epilogue_adjust_stack_rex64_2 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0)); ro[2] = *(ro_loc[2] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[3] = *(ro_loc[3] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1)); break; case 669: /* *maxdf_sse */ case 666: /* *maxsf_sse */ case 663: /* *mindf_sse */ case 660: /* *minsf_sse */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (pat, 1), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (pat, 1), 0), 1)); recog_data.dup_loc[0] = &XEXP (XEXP (pat, 1), 1); recog_data.dup_num[0] = 1; recog_data.dup_loc[1] = &XEXP (XEXP (pat, 1), 2); recog_data.dup_num[1] = 2; break; case 668: /* *maxdf_nonieee */ case 667: /* *maxdf */ case 665: /* *maxsf_nonieee */ case 664: /* *maxsf */ case 662: /* *mindf_nonieee */ case 661: /* *mindf */ case 659: /* *minsf_nonieee */ case 658: /* *minsf */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 1)); recog_data.dup_loc[0] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1); recog_data.dup_num[0] = 1; recog_data.dup_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 2); recog_data.dup_num[1] = 2; break; case 653: /* *movqicc_noc */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (pat, 1), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (pat, 1), 1)); ro[3] = *(ro_loc[3] = &XEXP (XEXP (pat, 1), 2)); ro[4] = *(ro_loc[4] = &XEXP (XEXP (XEXP (pat, 1), 0), 0)); break; case 657: /* *movxfcc_1 */ case 656: /* *movdfcc_1_rex64 */ case 655: /* *movdfcc_1 */ case 654: /* *movsfcc_1 */ case 652: /* *movhicc_noc */ case 651: /* *movsicc_noc */ case 649: /* movdicc_c_rex64 */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (pat, 1), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (pat, 1), 1)); ro[3] = *(ro_loc[3] = &XEXP (XEXP (pat, 1), 2)); break; case 647: /* *strlenqi_rex_1 */ case 646: /* *strlenqi_1 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XVECEXP (pat, 0, 2), 0)); ro[2] = *(ro_loc[2] = &XVECEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0, 1)); ro[3] = *(ro_loc[3] = &XVECEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0, 2)); ro[4] = *(ro_loc[4] = &XVECEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0, 3)); ro[5] = *(ro_loc[5] = &XEXP (XVECEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0, 0), 0)); break; case 645: /* *cmpstrqi_rex_1 */ case 644: /* *cmpstrqi_1 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 4), 0)); ro[1] = *(ro_loc[1] = &XEXP (XVECEXP (pat, 0, 5), 0)); ro[2] = *(ro_loc[2] = &XEXP (XVECEXP (pat, 0, 6), 0)); ro[3] = *(ro_loc[3] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[4] = *(ro_loc[4] = &XEXP (XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1), 0), 0)); ro[5] = *(ro_loc[5] = &XEXP (XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1), 1), 0)); ro[6] = *(ro_loc[6] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0)); break; case 643: /* *cmpstrqi_nz_rex_1 */ case 642: /* *cmpstrqi_nz_1 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 4), 0)); ro[1] = *(ro_loc[1] = &XEXP (XVECEXP (pat, 0, 5), 0)); ro[2] = *(ro_loc[2] = &XEXP (XVECEXP (pat, 0, 6), 0)); ro[3] = *(ro_loc[3] = &XEXP (XVECEXP (pat, 0, 2), 0)); ro[4] = *(ro_loc[4] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0)); ro[5] = *(ro_loc[5] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1), 0)); ro[6] = *(ro_loc[6] = &XEXP (XVECEXP (pat, 0, 1), 0)); break; case 641: /* *rep_stosqi_rex64 */ case 640: /* *rep_stosqi */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[1] = *(ro_loc[1] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XVECEXP (pat, 0, 3), 0)); ro[3] = *(ro_loc[3] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 0)); ro[4] = *(ro_loc[4] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 1)); recog_data.dup_loc[0] = &XEXP (XVECEXP (pat, 0, 4), 0); recog_data.dup_num[0] = 4; recog_data.dup_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 2), 0), 0); recog_data.dup_num[1] = 3; break; case 639: /* *rep_stossi_rex64 */ case 638: /* *rep_stossi */ case 637: /* *rep_stosdi_rex64 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[1] = *(ro_loc[1] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XVECEXP (pat, 0, 3), 0)); ro[3] = *(ro_loc[3] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 1)); ro[4] = *(ro_loc[4] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 0), 0)); recog_data.dup_loc[0] = &XEXP (XVECEXP (pat, 0, 4), 0); recog_data.dup_num[0] = 4; recog_data.dup_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 2), 0), 0); recog_data.dup_num[1] = 3; break; case 636: /* *strsetqi_rex_1 */ case 635: /* *strsetqi_1 */ case 634: /* *strsethi_rex_1 */ case 633: /* *strsethi_1 */ case 632: /* *strsetsi_rex_1 */ case 631: /* *strsetsi_1 */ case 630: /* *strsetdi_rex_1 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XVECEXP (pat, 0, 0), 1)); recog_data.dup_loc[0] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 0); recog_data.dup_num[0] = 1; break; case 629: /* *rep_movqi_rex64 */ case 628: /* *rep_movqi */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[1] = *(ro_loc[1] = &XEXP (XVECEXP (pat, 0, 2), 0)); ro[2] = *(ro_loc[2] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[3] = *(ro_loc[3] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 0)); ro[4] = *(ro_loc[4] = &XEXP (XEXP (XVECEXP (pat, 0, 2), 1), 0)); ro[5] = *(ro_loc[5] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 1)); recog_data.dup_loc[0] = &XEXP (XVECEXP (pat, 0, 4), 0); recog_data.dup_num[0] = 5; recog_data.dup_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 3), 0), 0); recog_data.dup_num[1] = 3; recog_data.dup_loc[2] = &XEXP (XEXP (XVECEXP (pat, 0, 3), 1), 0); recog_data.dup_num[2] = 4; recog_data.dup_loc[3] = &XEXP (XEXP (XVECEXP (pat, 0, 2), 1), 1); recog_data.dup_num[3] = 5; break; case 627: /* *rep_movsi_rex64 */ case 626: /* *rep_movsi */ case 625: /* *rep_movdi_rex64 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[1] = *(ro_loc[1] = &XEXP (XVECEXP (pat, 0, 2), 0)); ro[2] = *(ro_loc[2] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[3] = *(ro_loc[3] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 1)); ro[4] = *(ro_loc[4] = &XEXP (XEXP (XVECEXP (pat, 0, 2), 1), 1)); ro[5] = *(ro_loc[5] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 0), 0)); recog_data.dup_loc[0] = &XEXP (XVECEXP (pat, 0, 4), 0); recog_data.dup_num[0] = 5; recog_data.dup_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 3), 0), 0); recog_data.dup_num[1] = 3; recog_data.dup_loc[2] = &XEXP (XEXP (XVECEXP (pat, 0, 3), 1), 0); recog_data.dup_num[2] = 4; recog_data.dup_loc[3] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 2), 1), 0), 0); recog_data.dup_num[3] = 5; break; case 624: /* *strmovqi_rex_1 */ case 623: /* *strmovqi_1 */ case 622: /* *strmovhi_rex_1 */ case 621: /* *strmovhi_1 */ case 620: /* *strmovsi_rex_1 */ case 619: /* *strmovsi_1 */ case 618: /* *strmovdi_rex_1 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[1] = *(ro_loc[1] = &XEXP (XVECEXP (pat, 0, 2), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 0), 0)); ro[3] = *(ro_loc[3] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0)); recog_data.dup_loc[0] = &XEXP (XEXP (XVECEXP (pat, 0, 2), 1), 0); recog_data.dup_num[0] = 3; recog_data.dup_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 0); recog_data.dup_num[1] = 2; break; case 612: /* fyl2xp1_xf3 */ case 611: /* fyl2x_xf3 */ case 610: /* atan2xf3_1 */ case 609: /* atan2sf3_1 */ case 608: /* atan2df3_1 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XVECEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0, 1)); ro[2] = *(ro_loc[2] = &XVECEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0, 0)); ro[3] = *(ro_loc[3] = &XEXP (XVECEXP (pat, 0, 1), 0)); break; case 603: /* *sincosextendsfdf3 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[2] = *(ro_loc[2] = &XEXP (XVECEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0, 0), 0)); recog_data.dup_loc[0] = &XEXP (XVECEXP (XEXP (XVECEXP (pat, 0, 1), 1), 0, 0), 0); recog_data.dup_num[0] = 2; break; case 613: /* *fxtractxf3 */ case 607: /* *tanxf3_1 */ case 606: /* *tansf3_1 */ case 605: /* *tandf3_1 */ case 604: /* sincosxf3 */ case 602: /* sincossf3 */ case 601: /* sincosdf3 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[2] = *(ro_loc[2] = &XVECEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0, 0)); recog_data.dup_loc[0] = &XVECEXP (XEXP (XVECEXP (pat, 0, 1), 1), 0, 0); recog_data.dup_num[0] = 2; break; case 923: /* cvttsd2siq */ case 922: /* cvttsd2si */ case 599: /* *cosextendsfdf2 */ case 595: /* *sinextendsfdf2 */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XVECEXP (XEXP (pat, 1), 0, 0), 0)); break; case 1031: /* lddqu */ case 1030: /* movsldup */ case 1029: /* movshdup */ case 1005: /* sse2_movdqu */ case 1004: /* sse2_movdqa */ case 1003: /* sse2_movupd */ case 1002: /* sse2_movapd */ case 918: /* cvttpd2pi */ case 913: /* cvttps2dq */ case 910: /* sse2_movntsi */ case 909: /* sse2_movntv2di */ case 908: /* sse2_movntv2df */ case 905: /* sse2_pmovmskb */ case 904: /* sse2_movmskpd */ case 875: /* pfrsqrtv2sf2 */ case 872: /* pfrcpv2sf2 */ case 748: /* rsqrtv4sf2 */ case 746: /* rcpv4sf2 */ case 729: /* sse_movntdi */ case 728: /* sse_movntv4sf */ case 725: /* mmx_pmovmskb */ case 724: /* sse_movmskps */ case 723: /* *sse_movups_1 */ case 722: /* *sse_movaps_1 */ case 615: /* *f2xm1xf2 */ case 614: /* *frndintxf2 */ case 600: /* *cosxf2 */ case 598: /* *cossf2 */ case 597: /* *cosdf2 */ case 596: /* *sinxf2 */ case 594: /* *sinsf2 */ case 593: /* *sindf2 */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XVECEXP (XEXP (pat, 1), 0, 0)); break; case 616: /* *fscalexf4 */ case 592: /* fprem1xf4 */ case 591: /* fpremxf4 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[2] = *(ro_loc[2] = &XVECEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0, 0)); ro[3] = *(ro_loc[3] = &XVECEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0, 1)); recog_data.dup_loc[0] = &XVECEXP (XEXP (XVECEXP (pat, 0, 1), 1), 0, 1); recog_data.dup_num[0] = 3; recog_data.dup_loc[1] = &XVECEXP (XEXP (XVECEXP (pat, 0, 1), 1), 0, 0); recog_data.dup_num[1] = 2; break; case 580: /* *fop_xf_6 */ case 574: /* *fop_df_6 */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (pat, 1), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (pat, 1), 1), 0)); ro[3] = *(ro_loc[3] = &XEXP (pat, 1)); break; case 579: /* *fop_xf_5 */ case 577: /* *fop_xf_3 */ case 573: /* *fop_df_5 */ case 571: /* *fop_df_3 */ case 566: /* *fop_sf_3 */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (pat, 1), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (pat, 1), 1), 0)); ro[3] = *(ro_loc[3] = &XEXP (pat, 1)); break; case 578: /* *fop_xf_4 */ case 576: /* *fop_xf_2 */ case 572: /* *fop_df_4 */ case 570: /* *fop_df_2 */ case 565: /* *fop_sf_2 */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (pat, 1), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (pat, 1), 1)); ro[3] = *(ro_loc[3] = &XEXP (pat, 1)); break; case 898: /* maskcmpv2df3 */ case 770: /* maskcmpv4sf3 */ case 575: /* *fop_xf_1 */ case 569: /* *fop_df_1_sse */ case 568: /* *fop_df_1 */ case 567: /* *fop_df_1_nosse */ case 564: /* *fop_sf_1_sse */ case 563: /* *fop_sf_1 */ case 562: /* *fop_sf_1_nosse */ case 561: /* *fop_xf_comm */ case 560: /* *fop_df_comm_sse */ case 559: /* *fop_df_comm */ case 558: /* *fop_df_comm_nosse */ case 557: /* *fop_sf_comm_sse */ case 556: /* *fop_sf_comm */ case 555: /* *fop_sf_comm_nosse */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (pat, 1), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (pat, 1), 1)); ro[3] = *(ro_loc[3] = &XEXP (pat, 1)); break; case 554: /* *add_tp_di */ case 552: /* *add_tp_si */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1)); break; case 550: /* *tls_local_dynamic_32_once */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XVECEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0, 0)); ro[2] = *(ro_loc[2] = &XVECEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0, 1)); ro[3] = *(ro_loc[3] = &XVECEXP (XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1), 0), 0, 0)); ro[4] = *(ro_loc[4] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[5] = *(ro_loc[5] = &XEXP (XVECEXP (pat, 0, 2), 0)); break; case 548: /* *tls_local_dynamic_base_32_sun */ case 547: /* *tls_local_dynamic_base_32_gnu */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XVECEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0, 0)); ro[2] = *(ro_loc[2] = &XVECEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0, 1)); ro[3] = *(ro_loc[3] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[4] = *(ro_loc[4] = &XEXP (XVECEXP (pat, 0, 2), 0)); break; case 546: /* *tls_global_dynamic_64 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XVECEXP (XVECEXP (pat, 0, 1), 0, 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0)); ro[3] = *(ro_loc[3] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1)); break; case 545: /* *tls_global_dynamic_32_sun */ case 544: /* *tls_global_dynamic_32_gnu */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XVECEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0, 0)); ro[2] = *(ro_loc[2] = &XVECEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0, 1)); ro[3] = *(ro_loc[3] = &XVECEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0, 2)); ro[4] = *(ro_loc[4] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[5] = *(ro_loc[5] = &XEXP (XVECEXP (pat, 0, 2), 0)); break; case 543: /* *bsr_rex64 */ case 542: /* *bsr */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1), 0)); break; case 539: /* *ffsdi_1 */ case 537: /* *ffssi_1 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0)); recog_data.dup_loc[0] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 0); recog_data.dup_num[0] = 1; break; case 527: /* return_indirect_internal */ case 526: /* return_pop_internal */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 1), 0)); break; case 863: /* femms */ case 849: /* emms */ case 696: /* trap */ case 617: /* cld */ case 534: /* leave_rex64 */ case 533: /* leave */ case 528: /* nop */ case 525: /* return_internal_long */ case 524: /* return_internal */ break; case 1018: /* sse2_clflush */ case 850: /* ldmxcsr */ case 529: /* align */ case 523: /* blockage */ ro[0] = *(ro_loc[0] = &XVECEXP (pat, 0, 0)); break; case 516: /* *call_pop_1 */ case 515: /* *call_pop_0 */ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XVECEXP (pat, 0, 0), 1)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 1)); break; case 514: /* doloop_end_internal */ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[3] = *(ro_loc[3] = &XEXP (XVECEXP (pat, 0, 2), 0)); recog_data.dup_loc[0] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 0); recog_data.dup_num[0] = 1; break; case 513: /* *tablejump_1_rtx64 */ case 512: /* *tablejump_1 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 1)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 0), 0)); break; case 522: /* *sibcall_1_rex64_v */ case 511: /* *indirect_jump_rtx64 */ case 510: /* *indirect_jump */ ro[0] = *(ro_loc[0] = &XEXP (pat, 1)); break; case 509: /* jump */ ro[0] = *(ro_loc[0] = &XEXP (XEXP (pat, 1), 0)); break; case 508: /* *fp_jcc_6 */ case 506: /* *fp_jcc_4 */ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 1)); ro[3] = *(ro_loc[3] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 2), 0)); ro[4] = *(ro_loc[4] = &XEXP (XVECEXP (pat, 0, 3), 0)); break; case 507: /* *fp_jcc_5 */ case 505: /* *fp_jcc_3 */ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 1)); ro[3] = *(ro_loc[3] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1), 0)); ro[4] = *(ro_loc[4] = &XEXP (XVECEXP (pat, 0, 3), 0)); break; case 504: /* *fp_jcc_2_sse_only */ case 503: /* *fp_jcc_2_sse */ case 502: /* *fp_jcc_2 */ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 1)); ro[3] = *(ro_loc[3] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 2), 0)); break; case 501: /* *fp_jcc_1_sse_only */ case 500: /* *fp_jcc_1_sse */ case 499: /* *fp_jcc_1 */ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 1)); ro[3] = *(ro_loc[3] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1), 0)); break; case 498: /* *jcc_2 */ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XEXP (pat, 1), 2), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (pat, 1), 0)); break; case 497: /* *jcc_1 */ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XEXP (pat, 1), 1), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (pat, 1), 0)); break; case 496: /* *sse_setccdf */ case 495: /* *sse_setccsf */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (pat, 1)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (pat, 1), 0)); ro[3] = *(ro_loc[3] = &XEXP (XEXP (pat, 1), 1)); break; case 424: /* x86_shrd_1 */ case 406: /* x86_shld_1 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 1)); recog_data.dup_loc[0] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0); recog_data.dup_num[0] = 0; recog_data.dup_loc[1] = &XEXP (XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1), 1), 1); recog_data.dup_num[1] = 2; break; case 449: /* lshrdi3_1 */ case 422: /* ashrdi3_1 */ case 404: /* ashldi3_1 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1)); ro[3] = *(ro_loc[3] = &XEXP (XVECEXP (pat, 0, 1), 0)); break; case 397: /* *one_cmplsi2_2_zext */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0)); recog_data.dup_loc[0] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 0), 0); recog_data.dup_num[0] = 1; break; case 357: /* *negsi2_cmpz_zext */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0), 0), 0)); recog_data.dup_loc[0] = &XEXP (XEXP (XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 0), 0), 0); recog_data.dup_num[0] = 1; break; case 355: /* *negsi2_1_zext */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0), 0)); break; case 401: /* *one_cmplqi2_2 */ case 399: /* *one_cmplhi2_2 */ case 396: /* *one_cmplsi2_2 */ case 393: /* *one_cmpldi2_2_rex64 */ case 361: /* *negqi2_cmpz */ case 359: /* *neghi2_cmpz */ case 356: /* *negsi2_cmpz */ case 353: /* *negdi2_cmpz_rex64 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0)); recog_data.dup_loc[0] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 0); recog_data.dup_num[0] = 1; break; case 458: /* *lshrsi3_cmp_zext */ case 456: /* *lshrsi3_cmp_one_bit_zext */ case 335: /* *xorsi_2_zext_imm */ case 313: /* *iorsi_2_zext_imm */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 1)); recog_data.dup_loc[0] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 0), 0); recog_data.dup_num[0] = 1; recog_data.dup_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 1); recog_data.dup_num[1] = 2; break; case 549: /* *tls_local_dynamic_base_64 */ case 452: /* *lshrsi3_1_one_bit_zext */ case 332: /* *xorsi_1_zext_imm */ case 310: /* *iorsi_1_zext_imm */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1)); break; case 350: /* *xorqi_cc_ext_1_rex64 */ case 349: /* *xorqi_cc_ext_1 */ case 301: /* *andqi_ext_0_cc */ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 1)); recog_data.dup_loc[0] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 0), 0); recog_data.dup_num[0] = 1; recog_data.dup_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 1); recog_data.dup_num[1] = 2; break; case 347: /* *xorqi_2_slp */ case 321: /* *iorqi_2_slp */ case 299: /* *andqi_2_slp */ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 1)); recog_data.dup_loc[0] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 0), 0); recog_data.dup_num[0] = 0; recog_data.dup_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 0); recog_data.dup_num[1] = 0; recog_data.dup_loc[2] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 1); recog_data.dup_num[2] = 1; break; case 287: /* *testqi_ext_3_rex64 */ case 286: /* *testqi_ext_3 */ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XEXP (pat, 1), 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (pat, 1), 0), 1)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (pat, 1), 0), 2)); break; case 285: /* *testqi_ext_2 */ case 284: /* *testqi_ext_1_rex64 */ case 283: /* *testqi_ext_1 */ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XEXP (XEXP (pat, 1), 0), 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XEXP (pat, 1), 0), 1), 0)); break; case 282: /* *testqi_ext_0 */ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XEXP (XEXP (pat, 1), 0), 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (pat, 1), 0), 1)); break; case 276: /* *udivmodsi4_noext */ case 274: /* *udivmoddi4_noext */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1)); ro[3] = *(ro_loc[3] = &XEXP (XVECEXP (pat, 0, 1), 0)); recog_data.dup_loc[0] = &XEXP (XVECEXP (pat, 0, 2), 0); recog_data.dup_num[0] = 3; recog_data.dup_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 0); recog_data.dup_num[1] = 1; recog_data.dup_loc[2] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 1); recog_data.dup_num[2] = 2; break; case 275: /* udivmodsi4 */ case 273: /* udivmoddi4 */ case 272: /* divmodhi4 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1)); ro[3] = *(ro_loc[3] = &XEXP (XVECEXP (pat, 0, 1), 0)); recog_data.dup_loc[0] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 0); recog_data.dup_num[0] = 1; recog_data.dup_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 1); recog_data.dup_num[1] = 2; break; case 277: /* *udivmodhi_noext */ case 271: /* *divmodsi_noext */ case 268: /* *divmoddi_noext_rex64 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1)); ro[3] = *(ro_loc[3] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[4] = *(ro_loc[4] = &XEXP (XVECEXP (pat, 0, 2), 0)); recog_data.dup_loc[0] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 0); recog_data.dup_num[0] = 1; recog_data.dup_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 1); recog_data.dup_num[1] = 2; break; case 270: /* *divmodsi4_cltd */ case 269: /* *divmodsi4_nocltd */ case 267: /* *divmoddi4_cltd_rex64 */ case 266: /* *divmoddi4_nocltd_rex64 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0)); ro[3] = *(ro_loc[3] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1)); recog_data.dup_loc[0] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 0); recog_data.dup_num[0] = 2; recog_data.dup_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 1); recog_data.dup_num[1] = 3; break; case 263: /* *smulsi3_highpart_zext */ case 260: /* *umulsi3_highpart_zext */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0), 0), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0), 0), 1), 0)); ro[3] = *(ro_loc[3] = &XEXP (XVECEXP (pat, 0, 1), 0)); break; case 262: /* *smulsi3_highpart_insn */ case 261: /* *smuldi3_highpart_rex64 */ case 259: /* *umulsi3_highpart_insn */ case 258: /* *umuldi3_highpart_rex64 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0), 1), 0)); ro[3] = *(ro_loc[3] = &XEXP (XVECEXP (pat, 0, 1), 0)); break; case 257: /* *mulsidi3_insn */ case 256: /* *mulditi3_insn */ case 255: /* *umulsidi3_insn */ case 254: /* *umulditi3_insn */ case 253: /* *mulqihi3_insn */ case 252: /* *umulqihi3_1 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1), 0)); break; case 239: /* *subsi_3_zext */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1)); recog_data.dup_loc[0] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 0), 0); recog_data.dup_num[0] = 1; recog_data.dup_loc[1] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 0), 1); recog_data.dup_num[1] = 2; break; case 233: /* subsi3_carry_zext */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 1), 1)); ro[3] = *(ro_loc[3] = &XEXP (XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 1), 0)); break; case 246: /* *subqi_3 */ case 242: /* *subhi_3 */ case 238: /* *subsi_3 */ case 229: /* *subdi_3_rex63 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1)); recog_data.dup_loc[0] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 0); recog_data.dup_num[0] = 1; recog_data.dup_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 1); recog_data.dup_num[1] = 2; break; case 232: /* subsi3_carry */ case 231: /* subhi3_carry */ case 230: /* subqi3_carry */ case 226: /* subdi3_carry_rex64 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1), 1)); ro[3] = *(ro_loc[3] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1), 0)); break; case 345: /* *xorqi_ext_2 */ case 344: /* *xorqi_ext_1_rex64 */ case 343: /* *xorqi_ext_1 */ case 326: /* *iorqi_ext_2 */ case 325: /* *iorqi_ext_1_rex64 */ case 324: /* *iorqi_ext_1 */ case 304: /* *andqi_ext_2 */ case 303: /* *andqi_ext_1_rex64 */ case 302: /* *andqi_ext_1 */ case 224: /* *addqi_ext_2 */ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1), 0)); break; case 342: /* xorqi_ext_0 */ case 323: /* iorqi_ext_0 */ case 300: /* andqi_ext_0 */ case 223: /* *addqi_ext_1_rex64 */ case 222: /* addqi_ext_1 */ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1)); break; case 492: /* *rotrqi3_1_slp */ case 490: /* *rotrqi3_1_one_bit_slp */ case 479: /* *rotlqi3_1_slp */ case 477: /* *rotlqi3_1_one_bit_slp */ case 466: /* *lshrqi3_1_slp */ case 464: /* *lshrqi3_1_one_bit_slp */ case 442: /* *ashrqi3_1_slp */ case 440: /* *ashrqi3_1_one_bit_slp */ case 341: /* *xorqi_1_slp */ case 319: /* *iorqi_1_slp */ case 297: /* *andqi_1_slp */ case 244: /* *subqi_1_slp */ case 217: /* *addqi_1_slp */ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1)); recog_data.dup_loc[0] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0); recog_data.dup_num[0] = 0; break; case 206: /* *addsi_3_zext */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0)); recog_data.dup_loc[0] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 0), 0); recog_data.dup_num[0] = 1; recog_data.dup_loc[1] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 0), 1); recog_data.dup_num[1] = 2; break; case 434: /* *ashrsi3_cmp_zext */ case 432: /* *ashrsi3_one_bit_cmp_zext */ case 410: /* *ashlsi3_cmp_zext */ case 334: /* *xorsi_2_zext */ case 312: /* *iorsi_2_zext */ case 293: /* *andsi_2_zext */ case 237: /* *subsi_2_zext */ case 204: /* *addsi_2_zext */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 1)); recog_data.dup_loc[0] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 0), 0); recog_data.dup_num[0] = 1; recog_data.dup_loc[1] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 0), 1); recog_data.dup_num[1] = 2; break; case 486: /* *rotrsi3_1_zext */ case 484: /* *rotrsi3_1_one_bit_zext */ case 474: /* *rotlsi3_1_zext */ case 472: /* *rotlsi3_1_one_bit_zext */ case 454: /* *lshrsi3_1_zext */ case 430: /* *ashrsi3_1_zext */ case 428: /* *ashrsi3_1_one_bit_zext */ case 426: /* *ashrsi3_31_zext */ case 408: /* *ashlsi3_1_zext */ case 331: /* *xorsi_1_zext */ case 309: /* *iorsi_1_zext */ case 291: /* *andsi_1_zext */ case 249: /* *mulsi3_1_zext */ case 235: /* *subsi_1_zext */ case 202: /* addsi_1_zext */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 1)); break; case 348: /* *xorqi_cc_2 */ case 339: /* *xorhi_3 */ case 336: /* *xorsi_3 */ case 329: /* *xordi_3_rex64 */ case 322: /* *iorqi_3 */ case 317: /* *iorhi_3 */ case 314: /* *iorsi_3 */ case 307: /* *iordi_3_rex64 */ case 221: /* *addqi_5 */ case 214: /* *addhi_5 */ case 208: /* *addsi_5 */ case 200: /* *adddi_5_rex64 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 1)); break; case 220: /* *addqi_4 */ case 213: /* *addhi_4 */ case 207: /* *addsi_4 */ case 199: /* *adddi_4_rex64 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1)); break; case 219: /* *addqi_3 */ case 212: /* *addhi_3 */ case 205: /* *addsi_3 */ case 198: /* *adddi_3_rex64 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0)); break; case 468: /* *lshrqi2_cmp */ case 467: /* *lshrqi2_one_bit_cmp */ case 462: /* *lshrhi3_cmp */ case 461: /* *lshrhi3_one_bit_cmp */ case 457: /* *lshrsi3_cmp */ case 455: /* *lshrsi3_one_bit_cmp */ case 448: /* *lshrdi3_cmp_rex64 */ case 447: /* *lshrdi3_cmp_one_bit_rex64 */ case 444: /* *ashrqi3_cmp */ case 443: /* *ashrqi3_one_bit_cmp */ case 438: /* *ashrhi3_cmp */ case 437: /* *ashrhi3_one_bit_cmp */ case 433: /* *ashrsi3_cmp */ case 431: /* *ashrsi3_one_bit_cmp */ case 421: /* *ashrdi3_cmp_rex64 */ case 420: /* *ashrdi3_one_bit_cmp_rex64 */ case 416: /* *ashlqi3_cmp */ case 413: /* *ashlhi3_cmp */ case 409: /* *ashlsi3_cmp */ case 403: /* *ashldi3_cmp_rex64 */ case 346: /* *xorqi_cc_1 */ case 338: /* *xorhi_2 */ case 333: /* *xorsi_2 */ case 328: /* *xordi_2_rex64 */ case 320: /* *iorqi_2 */ case 316: /* *iorhi_2 */ case 311: /* *iorsi_2 */ case 306: /* *iordi_2_rex64 */ case 298: /* *andqi_2 */ case 295: /* *andhi_2 */ case 292: /* *andsi_2 */ case 289: /* *anddi_2 */ case 245: /* *subqi_2 */ case 241: /* *subhi_2 */ case 236: /* *subsi_2 */ case 228: /* *subdi_2_rex64 */ case 218: /* *addqi_2 */ case 211: /* *addhi_2 */ case 203: /* *addsi_2 */ case 197: /* *adddi_2_rex64 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 1)); recog_data.dup_loc[0] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 0); recog_data.dup_num[0] = 1; recog_data.dup_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 1); recog_data.dup_num[1] = 2; break; case 195: /* *lea_general_3_zext */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XEXP (XEXP (pat, 1), 0), 0), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XEXP (XEXP (pat, 1), 0), 0), 0), 1)); ro[3] = *(ro_loc[3] = &XEXP (XEXP (XEXP (XEXP (pat, 1), 0), 0), 1)); ro[4] = *(ro_loc[4] = &XEXP (XEXP (XEXP (pat, 1), 0), 1)); break; case 194: /* *lea_general_3 */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XEXP (pat, 1), 0), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XEXP (pat, 1), 0), 0), 1)); ro[3] = *(ro_loc[3] = &XEXP (XEXP (XEXP (pat, 1), 0), 1)); ro[4] = *(ro_loc[4] = &XEXP (XEXP (pat, 1), 1)); break; case 193: /* *lea_general_2_zext */ case 191: /* *lea_general_1_zext */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XEXP (pat, 1), 0), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XEXP (pat, 1), 0), 0), 1)); ro[3] = *(ro_loc[3] = &XEXP (XEXP (XEXP (pat, 1), 0), 1)); break; case 192: /* *lea_general_2 */ case 190: /* *lea_general_1 */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (pat, 1), 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (pat, 1), 0), 1)); ro[3] = *(ro_loc[3] = &XEXP (XEXP (pat, 1), 1)); break; case 183: /* *addsi3_carry_zext */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0), 1)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 1)); ro[3] = *(ro_loc[3] = &XEXP (XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0), 0)); break; case 185: /* addqi3_cc */ case 184: /* *addsi3_cc */ case 179: /* *adddi3_cc_rex64 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[1] = *(ro_loc[1] = &XVECEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0, 0)); ro[2] = *(ro_loc[2] = &XVECEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0, 1)); recog_data.dup_loc[0] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 0); recog_data.dup_num[0] = 1; recog_data.dup_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 1), 1), 1); recog_data.dup_num[1] = 2; break; case 182: /* addsi3_carry */ case 181: /* addhi3_carry */ case 180: /* addqi3_carry */ case 178: /* adddi3_carry_rex64 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 1)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1)); ro[3] = *(ro_loc[3] = &XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0)); break; case 671: /* pro_epilogue_adjust_stack_rex64 */ case 670: /* pro_epilogue_adjust_stack_1 */ case 491: /* *rotrqi3_1 */ case 489: /* *rotrqi3_1_one_bit */ case 488: /* *rotrhi3 */ case 487: /* *rotrhi3_one_bit */ case 485: /* *rotrsi3_1 */ case 483: /* *rotrsi3_1_one_bit */ case 482: /* *rotrdi3_1_rex64 */ case 481: /* *rotrdi3_1_one_bit_rex64 */ case 480: /* *rotlqi3_1 */ case 478: /* *rotlqi3_1_one_bit */ case 476: /* *rotlhi3_1 */ case 475: /* *rotlhi3_1_one_bit */ case 473: /* *rotlsi3_1 */ case 471: /* *rotlsi3_1_one_bit */ case 470: /* *rotldi3_1_rex64 */ case 469: /* *rotlsi3_1_one_bit_rex64 */ case 465: /* *lshrqi3_1 */ case 463: /* *lshrqi3_1_one_bit */ case 460: /* *lshrhi3_1 */ case 459: /* *lshrhi3_1_one_bit */ case 453: /* *lshrsi3_1 */ case 451: /* *lshrsi3_1_one_bit */ case 450: /* *lshrdi3_2 */ case 446: /* *lshrdi3_1_rex64 */ case 445: /* *lshrdi3_1_one_bit_rex64 */ case 441: /* *ashrqi3_1 */ case 439: /* *ashrqi3_1_one_bit */ case 436: /* *ashrhi3_1 */ case 435: /* *ashrhi3_1_one_bit */ case 429: /* *ashrsi3_1 */ case 427: /* *ashrsi3_1_one_bit */ case 425: /* ashrsi3_31 */ case 423: /* *ashrdi3_2 */ case 419: /* *ashrdi3_1_rex64 */ case 418: /* *ashrdi3_1_one_bit_rex64 */ case 417: /* ashrdi3_63_rex64 */ case 415: /* *ashlqi3_1 */ case 414: /* *ashlqi3_1_lea */ case 412: /* *ashlhi3_1 */ case 411: /* *ashlhi3_1_lea */ case 407: /* *ashlsi3_1 */ case 405: /* *ashldi3_2 */ case 402: /* *ashldi3_1_rex64 */ case 340: /* *xorqi_1 */ case 337: /* *xorhi_1 */ case 330: /* *xorsi_1 */ case 327: /* *xordi_1_rex64 */ case 318: /* *iorqi_1 */ case 315: /* *iorhi_1 */ case 308: /* *iorsi_1 */ case 305: /* *iordi_1_rex64 */ case 296: /* *andqi_1 */ case 294: /* *andhi_1 */ case 290: /* *andsi_1 */ case 288: /* *anddi_1_rex64 */ case 265: /* udivqi3 */ case 264: /* divqi3 */ case 251: /* *mulqi3_1 */ case 250: /* *mulhi3_1 */ case 248: /* *mulsi3_1 */ case 247: /* *muldi3_1_rex64 */ case 243: /* *subqi_1 */ case 240: /* *subhi_1 */ case 234: /* *subsi_1 */ case 227: /* *subdi_1_rex64 */ case 225: /* *subdi3_1 */ case 216: /* *addqi_1 */ case 215: /* *addqi_1_lea */ case 210: /* *addhi_1 */ case 209: /* *addhi_1_lea */ case 201: /* *addsi_1 */ case 196: /* *adddi_1_rex64 */ case 177: /* *adddi3_1 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0)); ro[2] = *(ro_loc[2] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 1)); break; case 159: /* fix_trunchi_memory */ case 154: /* fix_truncsi_memory */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0)); ro[2] = *(ro_loc[2] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[3] = *(ro_loc[3] = &XEXP (XVECEXP (pat, 0, 2), 0)); break; case 158: /* fix_trunchi_nomemory */ case 153: /* fix_truncsi_nomemory */ case 149: /* fix_truncdi_memory */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0)); ro[2] = *(ro_loc[2] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[3] = *(ro_loc[3] = &XEXP (XVECEXP (pat, 0, 2), 0)); ro[4] = *(ro_loc[4] = &XEXP (XVECEXP (pat, 0, 3), 0)); break; case 148: /* fix_truncdi_nomemory */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0)); ro[2] = *(ro_loc[2] = &XEXP (XVECEXP (pat, 0, 1), 0)); ro[3] = *(ro_loc[3] = &XEXP (XVECEXP (pat, 0, 2), 0)); ro[4] = *(ro_loc[4] = &XEXP (XVECEXP (pat, 0, 3), 0)); ro[5] = *(ro_loc[5] = &XEXP (XVECEXP (pat, 0, 4), 0)); break; case 538: /* *ffs_rex64 */ case 536: /* *ffs_no_cmove */ case 535: /* *ffs_cmove */ case 382: /* *absdf2_ifs_rex64 */ case 381: /* absdf2_ifs */ case 378: /* abssf2_ifs */ case 367: /* *negdf2_ifs_rex64 */ case 366: /* negdf2_ifs */ case 363: /* negsf2_ifs */ case 145: /* *truncxfdf2_1 */ case 142: /* *truncxfsf2_1 */ case 135: /* *truncdfsf2_1_sse_nooverlap */ case 134: /* *truncdfsf2_1_sse */ case 133: /* *truncdfsf2_1 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0)); ro[2] = *(ro_loc[2] = &XEXP (XVECEXP (pat, 0, 1), 0)); break; case 117: /* *extendsidi2_1 */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0)); ro[2] = *(ro_loc[2] = &XEXP (XVECEXP (pat, 0, 2), 0)); break; case 650: /* x86_movsicc_0_m1 */ case 648: /* x86_movdicc_0_m1_rex64 */ case 541: /* ctzdi2 */ case 540: /* ctzsi2 */ case 385: /* *absxf2_if */ case 384: /* *absdf2_if_rex64 */ case 383: /* *absdf2_if */ case 380: /* absdf2_memory */ case 379: /* *abssf2_if */ case 377: /* abssf2_memory */ case 370: /* *negxf2_if */ case 369: /* *negdf2_if_rex64 */ case 368: /* *negdf2_if */ case 365: /* negdf2_memory */ case 364: /* *negsf2_if */ case 362: /* negsf2_memory */ case 360: /* *negqi2_1 */ case 358: /* *neghi2_1 */ case 354: /* *negsi2_1 */ case 352: /* *negdi2_1_rex64 */ case 351: /* *negdi2_1 */ case 157: /* *fix_trunchi_1 */ case 152: /* *fix_truncsi_1 */ case 147: /* *fix_truncdi_1 */ case 112: /* *zero_extendsidi2_32_1 */ case 111: /* zero_extendsidi2_32 */ case 109: /* *zero_extendqisi2_movzbw_and */ case 108: /* *zero_extendqisi2_and */ case 106: /* *zero_extendqihi2_movzbw_and */ case 105: /* *zero_extendqihi2_and */ case 103: /* zero_extendhisi2_and */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0)); break; case 74: /* *movqi_insv_2 */ ro[0] = *(ro_loc[0] = &XEXP (XEXP (pat, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (pat, 1), 0)); break; case 1033: /* movddup */ case 1011: /* sse2_loadd */ case 1010: /* sse2_movq */ case 929: /* cvtps2pd */ case 921: /* cvtsd2siq */ case 920: /* cvtsd2si */ case 915: /* cvtpd2dq */ case 914: /* cvtdq2pd */ case 788: /* cvtss2siq */ case 787: /* cvtss2si */ case 783: /* cvtps2pi */ case 590: /* *sqrtextendsfxf2 */ case 589: /* *sqrtextenddfxf2 */ case 587: /* *sqrtextendsfdf2 */ case 395: /* *one_cmplsi2_1_zext */ case 391: /* *absextendsfxf2 */ case 390: /* *absextenddfxf2 */ case 388: /* *absextendsfdf2 */ case 376: /* *negextendsfxf2 */ case 375: /* *negextenddfxf2 */ case 373: /* *negextendsfdf2 */ case 188: /* *lea_1_zext */ case 125: /* *extendqisi2_zext */ case 122: /* *extendhisi2_zext */ case 71: /* *movqi_extzv_2_rex64 */ case 70: /* *movqi_extzv_2 */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (pat, 1), 0), 0)); break; case 62: /* *movstrictqi_xor */ case 56: /* *movstricthi_xor */ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XVECEXP (pat, 0, 0), 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XVECEXP (pat, 0, 0), 1)); break; case 102: /* swapxf */ case 97: /* *swapdf */ case 92: /* *swapsf */ case 87: /* *swapdi_rex64 */ case 60: /* *swapqi */ case 54: /* *swaphi_2 */ case 53: /* *swaphi_1 */ case 47: /* *swapsi */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XVECEXP (pat, 0, 0), 1)); recog_data.dup_loc[0] = &XEXP (XVECEXP (pat, 0, 1), 0); recog_data.dup_num[0] = 1; recog_data.dup_loc[1] = &XEXP (XVECEXP (pat, 0, 1), 1); recog_data.dup_num[1] = 0; break; case 1032: /* loadddup */ case 1016: /* sse2_storesd */ case 1012: /* sse2_stored */ case 1009: /* sse2_movq2dq_rex64 */ case 1008: /* sse2_movq2dq */ case 1007: /* sse2_movdq2q_rex64 */ case 1006: /* sse2_movdq2q */ case 919: /* cvtpi2pd */ case 917: /* cvtpd2pi */ case 912: /* cvtps2dq */ case 911: /* cvtdq2ps */ case 896: /* sqrtv2df2 */ case 879: /* pswapdv2sf2 */ case 878: /* pswapdv2si2 */ case 870: /* floatv2si2 */ case 864: /* pf2id */ case 750: /* sqrtv4sf2 */ case 736: /* sse_storess */ case 588: /* sqrtxf2 */ case 586: /* sqrtdf2_i387 */ case 585: /* sqrtdf2_1_sse_only */ case 584: /* sqrtdf2_1 */ case 583: /* sqrtsf2_i387 */ case 582: /* sqrtsf2_1_sse_only */ case 581: /* sqrtsf2_1 */ case 400: /* *one_cmplqi2_1 */ case 398: /* *one_cmplhi2_1 */ case 394: /* *one_cmplsi2_1 */ case 392: /* *one_cmpldi2_1_rex64 */ case 389: /* *absxf2_1 */ case 387: /* *absdf2_1 */ case 386: /* *abssf2_1 */ case 374: /* *negxf2_1 */ case 372: /* *negdf2_1 */ case 371: /* *negsf2_1 */ case 187: /* *lea_1_rex64 */ case 176: /* floatdixf2 */ case 175: /* floatsixf2 */ case 174: /* floathixf2 */ case 173: /* *floatdidf2_sse */ case 172: /* *floatdidf2_i387 */ case 171: /* *floatdidf2_i387_only */ case 170: /* *floatsidf2_sse */ case 169: /* *floatsidf2_i387 */ case 168: /* *floathidf2_1 */ case 167: /* *floatdisf2_sse */ case 166: /* *floatdisf2_i387 */ case 165: /* *floatdisf2_i387_only */ case 164: /* *floatsisf2_sse */ case 163: /* *floatsisf2_i387 */ case 162: /* *floathisf2_1 */ case 156: /* fix_truncdfsi_sse */ case 155: /* fix_truncsfsi_sse */ case 151: /* fix_truncdfdi_sse */ case 150: /* fix_truncsfdi_sse */ case 146: /* *truncxfdf2_2 */ case 144: /* truncxfdf2_noop */ case 143: /* *truncxfsf2_2 */ case 141: /* truncxfsf2_noop */ case 140: /* *truncdfsf2_sse_only_nooverlap */ case 139: /* truncdfsf2_sse_only */ case 138: /* *truncdfsf2_3 */ case 137: /* *truncdfsf2_2_nooverlap */ case 136: /* *truncdfsf2_2 */ case 132: /* truncdfsf2_noop */ case 131: /* *extenddfxf2_1 */ case 130: /* *extendsfxf2_1 */ case 129: /* *extendsfdf2_1_sse_only */ case 128: /* *extendsfdf2_1 */ case 127: /* *dummy_extendsfxf2 */ case 126: /* *dummy_extendsfdf2 */ case 124: /* extendqisi2 */ case 123: /* extendqihi2 */ case 121: /* extendhisi2 */ case 120: /* extendqidi2 */ case 119: /* extendhidi2 */ case 118: /* extendsidi2_rex64 */ case 116: /* zero_extendqidi2 */ case 115: /* zero_extendhidi2 */ case 114: /* *zero_extendsidi2_rex64_1 */ case 113: /* zero_extendsidi2_rex64 */ case 110: /* *zero_extendqisi2_movzbw */ case 107: /* *zero_extendqihi2_movzbw */ case 104: /* *zero_extendhisi2_movzwl */ case 86: /* *movabsdi_2_rex64 */ case 69: /* *movsi_extzv_1 */ case 68: /* *movabsqi_2_rex64 */ case 66: /* *movqi_extv_1_rex64 */ case 65: /* *movqi_extv_1 */ case 64: /* *movhi_extv_1 */ case 63: /* *movsi_extv_1 */ case 52: /* *movabshi_2_rex64 */ case 46: /* *movabssi_2_rex64 */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (pat, 1), 0)); break; case 521: /* *sibcall_1_rex64 */ case 520: /* *call_1_rex64 */ case 519: /* *sibcall_1 */ case 518: /* *call_1 */ case 517: /* *call_0 */ case 494: /* setcc_2 */ case 85: /* *movabsdi_1_rex64 */ case 73: /* movdi_insv_1_rex64 */ case 72: /* movsi_insv_1 */ case 67: /* *movabsqi_1_rex64 */ case 61: /* *movstrictqi_1 */ case 55: /* *movstricthi_1 */ case 51: /* *movabshi_1_rex64 */ case 45: /* *movabssi_1_rex64 */ ro[0] = *(ro_loc[0] = &XEXP (XEXP (pat, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (pat, 1)); break; case 530: /* set_got */ case 79: /* popdi1 */ case 78: /* *popdi1_epilogue_rex64 */ case 40: /* popsi1 */ case 39: /* *popsi1_epilogue */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); break; case 81: /* *movdi_or_rex64 */ case 80: /* *movdi_xor_rex64 */ case 77: /* *pushdi2_prologue_rex64 */ case 42: /* *movsi_or */ case 41: /* *movsi_xor */ case 38: /* *pushsi2_prologue */ ro[0] = *(ro_loc[0] = &XEXP (XVECEXP (pat, 0, 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XVECEXP (pat, 0, 0), 1)); break; case 883: /* *prefetch_3dnow_rex */ case 882: /* *prefetch_3dnow */ case 768: /* sse_clrv4sf */ case 721: /* *movtf_rex64 */ case 720: /* *movti_rex64 */ case 719: /* movti_internal */ case 718: /* *pushv2sf */ case 717: /* *pushv8qi */ case 716: /* *pushv4hi */ case 715: /* *pushv2si */ case 714: /* *pushv4si */ case 713: /* *pushv4sf */ case 712: /* *pushv16qi */ case 711: /* *pushv8hi */ case 710: /* *pushv2di */ case 709: /* *pushv2df */ case 708: /* *pushti */ case 707: /* movv16qi_internal */ case 706: /* movv8hi_internal */ case 705: /* movv2df_internal */ case 704: /* movv2sf_internal */ case 703: /* movv2si_internal */ case 702: /* movv4hi_internal */ case 701: /* movv8qi_internal */ case 700: /* movv2di_internal */ case 699: /* movv4si_internal */ case 698: /* movv4sf_internal */ case 697: /* *conditional_trap_1 */ case 493: /* *setcc_1 */ case 189: /* *lea_2_rex64 */ case 186: /* *lea_1 */ case 101: /* *movxf_integer */ case 100: /* *movxf_nointeger */ case 99: /* *pushxf_integer */ case 98: /* *pushxf_nointeger */ case 96: /* *movdf_integer */ case 95: /* *movdf_nointeger */ case 94: /* *pushdf_integer */ case 93: /* *pushdf_nointeger */ case 91: /* *movsf_1_nointerunit */ case 90: /* *movsf_1 */ case 89: /* *pushsf_rex64 */ case 88: /* *pushsf */ case 84: /* *movdi_1_rex64_nointerunit */ case 83: /* *movdi_1_rex64 */ case 82: /* *movdi_2 */ case 76: /* pushdi2_rex64 */ case 75: /* *pushdi */ case 59: /* *movqi_1 */ case 58: /* *pushqi2_rex64 */ case 57: /* *pushqi2 */ case 50: /* *movhi_1 */ case 49: /* *pushhi2_rex64 */ case 48: /* *pushhi2 */ case 44: /* *movsi_1_nointernunit */ case 43: /* *movsi_1 */ case 37: /* *pushsi2_rex64 */ case 36: /* *pushsi2 */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (pat, 1)); break; case 532: /* eh_return_di */ case 531: /* eh_return_si */ case 161: /* x86_fldcw_1 */ case 29: /* x86_sahf_1 */ ro[0] = *(ro_loc[0] = &XVECEXP (XEXP (pat, 1), 0, 0)); break; case 952: /* sse2_clrti */ case 851: /* stmxcsr */ case 813: /* mmx_clrdi */ case 769: /* sse_clrv2df */ case 553: /* *load_tp_di */ case 551: /* *load_tp_si */ case 160: /* x86_fnstcw_1 */ case 28: /* x86_fnstsw_1 */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); break; case 27: /* *ficom_1 */ ro[0] = *(ro_loc[0] = &XEXP (XEXP (pat, 1), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (pat, 1), 1), 0)); break; case 839: /* mmx_ashldi3 */ case 836: /* mmx_lshrdi3 */ case 814: /* mmx_anddi3 */ case 812: /* mmx_xordi3 */ case 811: /* mmx_iordi3 */ case 802: /* mmx_subdi3 */ case 794: /* mmx_adddi3 */ case 26: /* *cmpfp_2u_1 */ case 24: /* *cmpfp_2_xf_1 */ case 22: /* *cmpfp_2_df_1 */ case 20: /* *cmpfp_2_sf_1 */ case 18: /* *cmpfp_0 */ ro[0] = *(ro_loc[0] = &XEXP (pat, 0)); ro[1] = *(ro_loc[1] = &XEXP (XVECEXP (XEXP (pat, 1), 0, 0), 0)); ro[2] = *(ro_loc[2] = &XEXP (XVECEXP (XEXP (pat, 1), 0, 0), 1)); break; case 17: /* *cmpqi_ext_4 */ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XEXP (XEXP (pat, 1), 0), 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XEXP (pat, 1), 1), 0), 0)); break; case 16: /* cmpqi_ext_3_insn_rex64 */ case 15: /* cmpqi_ext_3_insn */ case 14: /* *cmpqi_ext_2 */ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XEXP (XEXP (pat, 1), 0), 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (pat, 1), 1)); break; case 13: /* *cmpqi_ext_1_rex64 */ case 12: /* *cmpqi_ext_1 */ ro[0] = *(ro_loc[0] = &XEXP (XEXP (pat, 1), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (XEXP (pat, 1), 1), 0), 0)); break; case 281: /* *testqi_1 */ case 280: /* *testhi_1 */ case 279: /* testsi_1 */ case 278: /* *testdi_1_rex64 */ case 11: /* *cmpqi_minus_1 */ case 7: /* *cmphi_minus_1 */ case 4: /* *cmpsi_minus_1 */ case 1: /* *cmpdi_minus_1_rex64 */ ro[0] = *(ro_loc[0] = &XEXP (XEXP (XEXP (pat, 1), 0), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (XEXP (pat, 1), 0), 1)); break; case 35: /* *cmpfp_iu_sse_only */ case 34: /* *cmpfp_iu_sse */ case 33: /* *cmpfp_iu */ case 32: /* *cmpfp_i_sse_only */ case 31: /* *cmpfp_i_sse */ case 30: /* *cmpfp_i */ case 25: /* *cmpfp_2u */ case 23: /* *cmpfp_2_xf */ case 21: /* *cmpfp_2_df */ case 19: /* *cmpfp_2_sf */ case 10: /* *cmpqi_1 */ case 9: /* *cmpqi_ccno_1 */ case 8: /* *cmphi_1 */ case 6: /* *cmphi_ccno_1 */ case 5: /* *cmpsi_1_insn */ case 3: /* *cmpsi_ccno_1 */ case 2: /* cmpdi_1_insn_rex64 */ case 0: /* cmpdi_ccno_1_rex64 */ ro[0] = *(ro_loc[0] = &XEXP (XEXP (pat, 1), 0)); ro[1] = *(ro_loc[1] = &XEXP (XEXP (pat, 1), 1)); break; default: abort (); } } /* Generated automatically by the program `genopinit' from the machine description file `md'. */ void init_all_optabs (void) { #ifdef FIXUNS_TRUNC_LIKE_FIX_TRUNC int i, j; #endif if (HAVE_zero_extendhidi2) zext_optab->handlers[DImode][HImode].insn_code = CODE_FOR_zero_extendhidi2; if (HAVE_zero_extendqidi2) zext_optab->handlers[DImode][QImode].insn_code = CODE_FOR_zero_extendqidi2; if (HAVE_extendhidi2) sext_optab->handlers[DImode][HImode].insn_code = CODE_FOR_extendhidi2; if (HAVE_extendqidi2) sext_optab->handlers[DImode][QImode].insn_code = CODE_FOR_extendqidi2; sext_optab->handlers[SImode][HImode].insn_code = CODE_FOR_extendhisi2; sext_optab->handlers[HImode][QImode].insn_code = CODE_FOR_extendqihi2; sext_optab->handlers[SImode][QImode].insn_code = CODE_FOR_extendqisi2; if (HAVE_floathixf2) sfloat_optab->handlers[XFmode][HImode].insn_code = CODE_FOR_floathixf2; if (HAVE_floatsixf2) sfloat_optab->handlers[XFmode][SImode].insn_code = CODE_FOR_floatsixf2; if (HAVE_floatdixf2) sfloat_optab->handlers[XFmode][DImode].insn_code = CODE_FOR_floatdixf2; if (HAVE_divqi3) sdiv_optab->handlers[QImode].insn_code = CODE_FOR_divqi3; if (HAVE_udivqi3) udiv_optab->handlers[QImode].insn_code = CODE_FOR_udivqi3; if (HAVE_divmodhi4) sdivmod_optab->handlers[HImode].insn_code = CODE_FOR_divmodhi4; if (HAVE_udivmoddi4) udivmod_optab->handlers[DImode].insn_code = CODE_FOR_udivmoddi4; udivmod_optab->handlers[SImode].insn_code = CODE_FOR_udivmodsi4; ctz_optab->handlers[SImode].insn_code = CODE_FOR_ctzsi2; if (HAVE_ctzdi2) ctz_optab->handlers[DImode].insn_code = CODE_FOR_ctzdi2; if (HAVE_sqrtxf2) sqrt_optab->handlers[XFmode].insn_code = CODE_FOR_sqrtxf2; if (HAVE_sincosdf3) sincos_optab->handlers[DFmode].insn_code = CODE_FOR_sincosdf3; if (HAVE_sincossf3) sincos_optab->handlers[SFmode].insn_code = CODE_FOR_sincossf3; if (HAVE_sincosxf3) sincos_optab->handlers[XFmode].insn_code = CODE_FOR_sincosxf3; if (HAVE_addv4sf3) addv_optab->handlers[V4SFmode].insn_code = add_optab->handlers[V4SFmode].insn_code = CODE_FOR_addv4sf3; if (HAVE_subv4sf3) subv_optab->handlers[V4SFmode].insn_code = sub_optab->handlers[V4SFmode].insn_code = CODE_FOR_subv4sf3; if (HAVE_mulv4sf3) smulv_optab->handlers[V4SFmode].insn_code = smul_optab->handlers[V4SFmode].insn_code = CODE_FOR_mulv4sf3; if (HAVE_divv4sf3) sdiv_optab->handlers[V4SFmode].insn_code = CODE_FOR_divv4sf3; if (HAVE_sqrtv4sf2) sqrt_optab->handlers[V4SFmode].insn_code = CODE_FOR_sqrtv4sf2; if (HAVE_addv8qi3) add_optab->handlers[V8QImode].insn_code = CODE_FOR_addv8qi3; if (HAVE_addv4hi3) add_optab->handlers[V4HImode].insn_code = CODE_FOR_addv4hi3; if (HAVE_addv2si3) add_optab->handlers[V2SImode].insn_code = CODE_FOR_addv2si3; if (HAVE_subv8qi3) sub_optab->handlers[V8QImode].insn_code = CODE_FOR_subv8qi3; if (HAVE_subv4hi3) sub_optab->handlers[V4HImode].insn_code = CODE_FOR_subv4hi3; if (HAVE_subv2si3) sub_optab->handlers[V2SImode].insn_code = CODE_FOR_subv2si3; if (HAVE_mulv4hi3) smul_optab->handlers[V4HImode].insn_code = CODE_FOR_mulv4hi3; if (HAVE_smulv4hi3_highpart) smul_highpart_optab->handlers[V4HImode].insn_code = CODE_FOR_smulv4hi3_highpart; if (HAVE_umulv4hi3_highpart) umul_highpart_optab->handlers[V4HImode].insn_code = CODE_FOR_umulv4hi3_highpart; if (HAVE_umaxv8qi3) umax_optab->handlers[V8QImode].insn_code = CODE_FOR_umaxv8qi3; if (HAVE_smaxv4hi3) smax_optab->handlers[V4HImode].insn_code = CODE_FOR_smaxv4hi3; if (HAVE_uminv8qi3) umin_optab->handlers[V8QImode].insn_code = CODE_FOR_uminv8qi3; if (HAVE_sminv4hi3) smin_optab->handlers[V4HImode].insn_code = CODE_FOR_sminv4hi3; if (HAVE_ashrv4hi3) ashr_optab->handlers[V4HImode].insn_code = CODE_FOR_ashrv4hi3; if (HAVE_ashrv2si3) ashr_optab->handlers[V2SImode].insn_code = CODE_FOR_ashrv2si3; if (HAVE_lshrv4hi3) lshr_optab->handlers[V4HImode].insn_code = CODE_FOR_lshrv4hi3; if (HAVE_lshrv2si3) lshr_optab->handlers[V2SImode].insn_code = CODE_FOR_lshrv2si3; if (HAVE_ashlv4hi3) ashl_optab->handlers[V4HImode].insn_code = CODE_FOR_ashlv4hi3; if (HAVE_ashlv2si3) ashl_optab->handlers[V2SImode].insn_code = CODE_FOR_ashlv2si3; if (HAVE_addv2sf3) addv_optab->handlers[V2SFmode].insn_code = add_optab->handlers[V2SFmode].insn_code = CODE_FOR_addv2sf3; if (HAVE_subv2sf3) subv_optab->handlers[V2SFmode].insn_code = sub_optab->handlers[V2SFmode].insn_code = CODE_FOR_subv2sf3; if (HAVE_mulv2sf3) smulv_optab->handlers[V2SFmode].insn_code = smul_optab->handlers[V2SFmode].insn_code = CODE_FOR_mulv2sf3; if (HAVE_addv2df3) addv_optab->handlers[V2DFmode].insn_code = add_optab->handlers[V2DFmode].insn_code = CODE_FOR_addv2df3; if (HAVE_subv2df3) subv_optab->handlers[V2DFmode].insn_code = sub_optab->handlers[V2DFmode].insn_code = CODE_FOR_subv2df3; if (HAVE_mulv2df3) smulv_optab->handlers[V2DFmode].insn_code = smul_optab->handlers[V2DFmode].insn_code = CODE_FOR_mulv2df3; if (HAVE_divv2df3) sdiv_optab->handlers[V2DFmode].insn_code = CODE_FOR_divv2df3; if (HAVE_sqrtv2df2) sqrt_optab->handlers[V2DFmode].insn_code = CODE_FOR_sqrtv2df2; if (HAVE_addv16qi3) add_optab->handlers[V16QImode].insn_code = CODE_FOR_addv16qi3; if (HAVE_addv8hi3) add_optab->handlers[V8HImode].insn_code = CODE_FOR_addv8hi3; if (HAVE_addv4si3) add_optab->handlers[V4SImode].insn_code = CODE_FOR_addv4si3; if (HAVE_addv2di3) add_optab->handlers[V2DImode].insn_code = CODE_FOR_addv2di3; if (HAVE_subv16qi3) sub_optab->handlers[V16QImode].insn_code = CODE_FOR_subv16qi3; if (HAVE_subv8hi3) sub_optab->handlers[V8HImode].insn_code = CODE_FOR_subv8hi3; if (HAVE_subv4si3) sub_optab->handlers[V4SImode].insn_code = CODE_FOR_subv4si3; if (HAVE_subv2di3) sub_optab->handlers[V2DImode].insn_code = CODE_FOR_subv2di3; if (HAVE_mulv8hi3) smul_optab->handlers[V8HImode].insn_code = CODE_FOR_mulv8hi3; if (HAVE_smulv8hi3_highpart) smul_highpart_optab->handlers[V8HImode].insn_code = CODE_FOR_smulv8hi3_highpart; if (HAVE_umulv8hi3_highpart) umul_highpart_optab->handlers[V8HImode].insn_code = CODE_FOR_umulv8hi3_highpart; if (HAVE_umaxv16qi3) umax_optab->handlers[V16QImode].insn_code = CODE_FOR_umaxv16qi3; if (HAVE_smaxv8hi3) smax_optab->handlers[V8HImode].insn_code = CODE_FOR_smaxv8hi3; if (HAVE_uminv16qi3) umin_optab->handlers[V16QImode].insn_code = CODE_FOR_uminv16qi3; if (HAVE_sminv8hi3) smin_optab->handlers[V8HImode].insn_code = CODE_FOR_sminv8hi3; if (HAVE_ashrv8hi3) ashr_optab->handlers[V8HImode].insn_code = CODE_FOR_ashrv8hi3; if (HAVE_ashrv4si3) ashr_optab->handlers[V4SImode].insn_code = CODE_FOR_ashrv4si3; if (HAVE_lshrv8hi3) lshr_optab->handlers[V8HImode].insn_code = CODE_FOR_lshrv8hi3; if (HAVE_lshrv4si3) lshr_optab->handlers[V4SImode].insn_code = CODE_FOR_lshrv4si3; if (HAVE_lshrv2di3) lshr_optab->handlers[V2DImode].insn_code = CODE_FOR_lshrv2di3; if (HAVE_ashlv8hi3) ashl_optab->handlers[V8HImode].insn_code = CODE_FOR_ashlv8hi3; if (HAVE_ashlv4si3) ashl_optab->handlers[V4SImode].insn_code = CODE_FOR_ashlv4si3; if (HAVE_ashlv2di3) ashl_optab->handlers[V2DImode].insn_code = CODE_FOR_ashlv2di3; cmp_optab->handlers[DImode].insn_code = CODE_FOR_cmpdi; cmp_optab->handlers[SImode].insn_code = CODE_FOR_cmpsi; cmp_optab->handlers[HImode].insn_code = CODE_FOR_cmphi; if (HAVE_cmpqi) cmp_optab->handlers[QImode].insn_code = CODE_FOR_cmpqi; if (HAVE_cmpxf) cmp_optab->handlers[XFmode].insn_code = CODE_FOR_cmpxf; if (HAVE_cmpdf) cmp_optab->handlers[DFmode].insn_code = CODE_FOR_cmpdf; if (HAVE_cmpsf) cmp_optab->handlers[SFmode].insn_code = CODE_FOR_cmpsf; mov_optab->handlers[SImode].insn_code = CODE_FOR_movsi; mov_optab->handlers[HImode].insn_code = CODE_FOR_movhi; if (HAVE_movstricthi) movstrict_optab->handlers[HImode].insn_code = CODE_FOR_movstricthi; mov_optab->handlers[QImode].insn_code = CODE_FOR_movqi; reload_out_optab[QImode] = CODE_FOR_reload_outqi; if (HAVE_movstrictqi) movstrict_optab->handlers[QImode].insn_code = CODE_FOR_movstrictqi; mov_optab->handlers[DImode].insn_code = CODE_FOR_movdi; mov_optab->handlers[SFmode].insn_code = CODE_FOR_movsf; mov_optab->handlers[DFmode].insn_code = CODE_FOR_movdf; mov_optab->handlers[XFmode].insn_code = CODE_FOR_movxf; zext_optab->handlers[SImode][HImode].insn_code = CODE_FOR_zero_extendhisi2; zext_optab->handlers[HImode][QImode].insn_code = CODE_FOR_zero_extendqihi2; zext_optab->handlers[SImode][QImode].insn_code = CODE_FOR_zero_extendqisi2; zext_optab->handlers[DImode][SImode].insn_code = CODE_FOR_zero_extendsidi2; sext_optab->handlers[DImode][SImode].insn_code = CODE_FOR_extendsidi2; if (HAVE_extendsfdf2) sext_optab->handlers[DFmode][SFmode].insn_code = CODE_FOR_extendsfdf2; if (HAVE_extendsfxf2) sext_optab->handlers[XFmode][SFmode].insn_code = CODE_FOR_extendsfxf2; if (HAVE_extenddfxf2) sext_optab->handlers[XFmode][DFmode].insn_code = CODE_FOR_extenddfxf2; if (HAVE_truncdfsf2) trunc_optab->handlers[SFmode][DFmode].insn_code = CODE_FOR_truncdfsf2; if (HAVE_truncxfsf2) trunc_optab->handlers[SFmode][XFmode].insn_code = CODE_FOR_truncxfsf2; if (HAVE_truncxfdf2) trunc_optab->handlers[DFmode][XFmode].insn_code = CODE_FOR_truncxfdf2; if (HAVE_fix_truncxfdi2) sfixtrunc_optab->handlers[DImode][XFmode].insn_code = CODE_FOR_fix_truncxfdi2; if (HAVE_fix_truncdfdi2) sfixtrunc_optab->handlers[DImode][DFmode].insn_code = CODE_FOR_fix_truncdfdi2; if (HAVE_fix_truncsfdi2) sfixtrunc_optab->handlers[DImode][SFmode].insn_code = CODE_FOR_fix_truncsfdi2; if (HAVE_fix_truncxfsi2) sfixtrunc_optab->handlers[SImode][XFmode].insn_code = CODE_FOR_fix_truncxfsi2; if (HAVE_fix_truncdfsi2) sfixtrunc_optab->handlers[SImode][DFmode].insn_code = CODE_FOR_fix_truncdfsi2; if (HAVE_fix_truncsfsi2) sfixtrunc_optab->handlers[SImode][SFmode].insn_code = CODE_FOR_fix_truncsfsi2; if (HAVE_fix_truncxfhi2) sfixtrunc_optab->handlers[HImode][XFmode].insn_code = CODE_FOR_fix_truncxfhi2; if (HAVE_fix_truncdfhi2) sfixtrunc_optab->handlers[HImode][DFmode].insn_code = CODE_FOR_fix_truncdfhi2; if (HAVE_fix_truncsfhi2) sfixtrunc_optab->handlers[HImode][SFmode].insn_code = CODE_FOR_fix_truncsfhi2; if (HAVE_floathisf2) sfloat_optab->handlers[SFmode][HImode].insn_code = CODE_FOR_floathisf2; if (HAVE_floatsisf2) sfloat_optab->handlers[SFmode][SImode].insn_code = CODE_FOR_floatsisf2; if (HAVE_floatdisf2) sfloat_optab->handlers[SFmode][DImode].insn_code = CODE_FOR_floatdisf2; if (HAVE_floathidf2) sfloat_optab->handlers[DFmode][HImode].insn_code = CODE_FOR_floathidf2; if (HAVE_floatsidf2) sfloat_optab->handlers[DFmode][SImode].insn_code = CODE_FOR_floatsidf2; if (HAVE_floatdidf2) sfloat_optab->handlers[DFmode][DImode].insn_code = CODE_FOR_floatdidf2; if (HAVE_floatunssisf2) ufloat_optab->handlers[SFmode][SImode].insn_code = CODE_FOR_floatunssisf2; if (HAVE_floatunsdisf2) ufloat_optab->handlers[SFmode][DImode].insn_code = CODE_FOR_floatunsdisf2; if (HAVE_floatunsdidf2) ufloat_optab->handlers[DFmode][DImode].insn_code = CODE_FOR_floatunsdidf2; if (HAVE_vec_setv2df) vec_set_optab->handlers[V2DFmode].insn_code = CODE_FOR_vec_setv2df; if (HAVE_vec_extractv2df) vec_extract_optab->handlers[V2DFmode].insn_code = CODE_FOR_vec_extractv2df; if (HAVE_vec_initv2df) vec_init_optab->handlers[V2DFmode].insn_code = CODE_FOR_vec_initv2df; if (HAVE_vec_setv4sf) vec_set_optab->handlers[V4SFmode].insn_code = CODE_FOR_vec_setv4sf; if (HAVE_vec_extractv4sf) vec_extract_optab->handlers[V4SFmode].insn_code = CODE_FOR_vec_extractv4sf; if (HAVE_vec_initv4sf) vec_init_optab->handlers[V4SFmode].insn_code = CODE_FOR_vec_initv4sf; add_optab->handlers[DImode].insn_code = CODE_FOR_adddi3; add_optab->handlers[SImode].insn_code = CODE_FOR_addsi3; if (HAVE_addhi3) add_optab->handlers[HImode].insn_code = CODE_FOR_addhi3; if (HAVE_addqi3) add_optab->handlers[QImode].insn_code = CODE_FOR_addqi3; if (HAVE_addxf3) addv_optab->handlers[XFmode].insn_code = add_optab->handlers[XFmode].insn_code = CODE_FOR_addxf3; if (HAVE_adddf3) addv_optab->handlers[DFmode].insn_code = add_optab->handlers[DFmode].insn_code = CODE_FOR_adddf3; if (HAVE_addsf3) addv_optab->handlers[SFmode].insn_code = add_optab->handlers[SFmode].insn_code = CODE_FOR_addsf3; sub_optab->handlers[DImode].insn_code = CODE_FOR_subdi3; sub_optab->handlers[SImode].insn_code = CODE_FOR_subsi3; if (HAVE_subhi3) sub_optab->handlers[HImode].insn_code = CODE_FOR_subhi3; if (HAVE_subqi3) sub_optab->handlers[QImode].insn_code = CODE_FOR_subqi3; if (HAVE_subxf3) subv_optab->handlers[XFmode].insn_code = sub_optab->handlers[XFmode].insn_code = CODE_FOR_subxf3; if (HAVE_subdf3) subv_optab->handlers[DFmode].insn_code = sub_optab->handlers[DFmode].insn_code = CODE_FOR_subdf3; if (HAVE_subsf3) subv_optab->handlers[SFmode].insn_code = sub_optab->handlers[SFmode].insn_code = CODE_FOR_subsf3; if (HAVE_muldi3) smul_optab->handlers[DImode].insn_code = CODE_FOR_muldi3; smul_optab->handlers[SImode].insn_code = CODE_FOR_mulsi3; if (HAVE_mulhi3) smul_optab->handlers[HImode].insn_code = CODE_FOR_mulhi3; if (HAVE_mulqi3) smul_optab->handlers[QImode].insn_code = CODE_FOR_mulqi3; if (HAVE_umulqihi3) umul_widen_optab->handlers[HImode].insn_code = CODE_FOR_umulqihi3; if (HAVE_mulqihi3) smul_widen_optab->handlers[HImode].insn_code = CODE_FOR_mulqihi3; if (HAVE_umulditi3) umul_widen_optab->handlers[TImode].insn_code = CODE_FOR_umulditi3; if (HAVE_umulsidi3) umul_widen_optab->handlers[DImode].insn_code = CODE_FOR_umulsidi3; if (HAVE_mulditi3) smul_widen_optab->handlers[TImode].insn_code = CODE_FOR_mulditi3; if (HAVE_mulsidi3) smul_widen_optab->handlers[DImode].insn_code = CODE_FOR_mulsidi3; if (HAVE_umuldi3_highpart) umul_highpart_optab->handlers[DImode].insn_code = CODE_FOR_umuldi3_highpart; umul_highpart_optab->handlers[SImode].insn_code = CODE_FOR_umulsi3_highpart; if (HAVE_smuldi3_highpart) smul_highpart_optab->handlers[DImode].insn_code = CODE_FOR_smuldi3_highpart; smul_highpart_optab->handlers[SImode].insn_code = CODE_FOR_smulsi3_highpart; if (HAVE_mulxf3) smulv_optab->handlers[XFmode].insn_code = smul_optab->handlers[XFmode].insn_code = CODE_FOR_mulxf3; if (HAVE_muldf3) smulv_optab->handlers[DFmode].insn_code = smul_optab->handlers[DFmode].insn_code = CODE_FOR_muldf3; if (HAVE_mulsf3) smulv_optab->handlers[SFmode].insn_code = smul_optab->handlers[SFmode].insn_code = CODE_FOR_mulsf3; if (HAVE_divxf3) sdiv_optab->handlers[XFmode].insn_code = CODE_FOR_divxf3; if (HAVE_divdf3) sdiv_optab->handlers[DFmode].insn_code = CODE_FOR_divdf3; if (HAVE_divsf3) sdiv_optab->handlers[SFmode].insn_code = CODE_FOR_divsf3; if (HAVE_divmoddi4) sdivmod_optab->handlers[DImode].insn_code = CODE_FOR_divmoddi4; sdivmod_optab->handlers[SImode].insn_code = CODE_FOR_divmodsi4; if (HAVE_udivmodhi4) udivmod_optab->handlers[HImode].insn_code = CODE_FOR_udivmodhi4; if (HAVE_anddi3) and_optab->handlers[DImode].insn_code = CODE_FOR_anddi3; and_optab->handlers[SImode].insn_code = CODE_FOR_andsi3; if (HAVE_andhi3) and_optab->handlers[HImode].insn_code = CODE_FOR_andhi3; if (HAVE_andqi3) and_optab->handlers[QImode].insn_code = CODE_FOR_andqi3; if (HAVE_iordi3) ior_optab->handlers[DImode].insn_code = CODE_FOR_iordi3; ior_optab->handlers[SImode].insn_code = CODE_FOR_iorsi3; if (HAVE_iorhi3) ior_optab->handlers[HImode].insn_code = CODE_FOR_iorhi3; if (HAVE_iorqi3) ior_optab->handlers[QImode].insn_code = CODE_FOR_iorqi3; if (HAVE_xordi3) xor_optab->handlers[DImode].insn_code = CODE_FOR_xordi3; xor_optab->handlers[SImode].insn_code = CODE_FOR_xorsi3; if (HAVE_xorhi3) xor_optab->handlers[HImode].insn_code = CODE_FOR_xorhi3; if (HAVE_xorqi3) xor_optab->handlers[QImode].insn_code = CODE_FOR_xorqi3; neg_optab->handlers[DImode].insn_code = CODE_FOR_negdi2; neg_optab->handlers[SImode].insn_code = CODE_FOR_negsi2; if (HAVE_neghi2) neg_optab->handlers[HImode].insn_code = CODE_FOR_neghi2; if (HAVE_negqi2) neg_optab->handlers[QImode].insn_code = CODE_FOR_negqi2; if (HAVE_negsf2) negv_optab->handlers[SFmode].insn_code = neg_optab->handlers[SFmode].insn_code = CODE_FOR_negsf2; if (HAVE_negdf2) negv_optab->handlers[DFmode].insn_code = neg_optab->handlers[DFmode].insn_code = CODE_FOR_negdf2; if (HAVE_negxf2) negv_optab->handlers[XFmode].insn_code = neg_optab->handlers[XFmode].insn_code = CODE_FOR_negxf2; if (HAVE_abssf2) absv_optab->handlers[SFmode].insn_code = abs_optab->handlers[SFmode].insn_code = CODE_FOR_abssf2; if (HAVE_absdf2) absv_optab->handlers[DFmode].insn_code = abs_optab->handlers[DFmode].insn_code = CODE_FOR_absdf2; if (HAVE_absxf2) absv_optab->handlers[XFmode].insn_code = abs_optab->handlers[XFmode].insn_code = CODE_FOR_absxf2; if (HAVE_one_cmpldi2) one_cmpl_optab->handlers[DImode].insn_code = CODE_FOR_one_cmpldi2; one_cmpl_optab->handlers[SImode].insn_code = CODE_FOR_one_cmplsi2; if (HAVE_one_cmplhi2) one_cmpl_optab->handlers[HImode].insn_code = CODE_FOR_one_cmplhi2; if (HAVE_one_cmplqi2) one_cmpl_optab->handlers[QImode].insn_code = CODE_FOR_one_cmplqi2; ashl_optab->handlers[DImode].insn_code = CODE_FOR_ashldi3; ashl_optab->handlers[SImode].insn_code = CODE_FOR_ashlsi3; if (HAVE_ashlhi3) ashl_optab->handlers[HImode].insn_code = CODE_FOR_ashlhi3; if (HAVE_ashlqi3) ashl_optab->handlers[QImode].insn_code = CODE_FOR_ashlqi3; ashr_optab->handlers[DImode].insn_code = CODE_FOR_ashrdi3; ashr_optab->handlers[SImode].insn_code = CODE_FOR_ashrsi3; if (HAVE_ashrhi3) ashr_optab->handlers[HImode].insn_code = CODE_FOR_ashrhi3; if (HAVE_ashrqi3) ashr_optab->handlers[QImode].insn_code = CODE_FOR_ashrqi3; lshr_optab->handlers[DImode].insn_code = CODE_FOR_lshrdi3; lshr_optab->handlers[SImode].insn_code = CODE_FOR_lshrsi3; if (HAVE_lshrhi3) lshr_optab->handlers[HImode].insn_code = CODE_FOR_lshrhi3; if (HAVE_lshrqi3) lshr_optab->handlers[QImode].insn_code = CODE_FOR_lshrqi3; if (HAVE_rotldi3) rotl_optab->handlers[DImode].insn_code = CODE_FOR_rotldi3; rotl_optab->handlers[SImode].insn_code = CODE_FOR_rotlsi3; if (HAVE_rotlhi3) rotl_optab->handlers[HImode].insn_code = CODE_FOR_rotlhi3; if (HAVE_rotlqi3) rotl_optab->handlers[QImode].insn_code = CODE_FOR_rotlqi3; if (HAVE_rotrdi3) rotr_optab->handlers[DImode].insn_code = CODE_FOR_rotrdi3; rotr_optab->handlers[SImode].insn_code = CODE_FOR_rotrsi3; if (HAVE_rotrhi3) rotr_optab->handlers[HImode].insn_code = CODE_FOR_rotrhi3; if (HAVE_rotrqi3) rotr_optab->handlers[QImode].insn_code = CODE_FOR_rotrqi3; setcc_gen_code[EQ] = CODE_FOR_seq; setcc_gen_code[NE] = CODE_FOR_sne; setcc_gen_code[GT] = CODE_FOR_sgt; setcc_gen_code[GTU] = CODE_FOR_sgtu; setcc_gen_code[LT] = CODE_FOR_slt; setcc_gen_code[LTU] = CODE_FOR_sltu; setcc_gen_code[GE] = CODE_FOR_sge; setcc_gen_code[GEU] = CODE_FOR_sgeu; setcc_gen_code[LE] = CODE_FOR_sle; setcc_gen_code[LEU] = CODE_FOR_sleu; if (HAVE_sunordered) setcc_gen_code[UNORDERED] = CODE_FOR_sunordered; if (HAVE_sordered) setcc_gen_code[ORDERED] = CODE_FOR_sordered; if (HAVE_suneq) setcc_gen_code[UNEQ] = CODE_FOR_suneq; if (HAVE_sunge) setcc_gen_code[UNGE] = CODE_FOR_sunge; if (HAVE_sungt) setcc_gen_code[UNGT] = CODE_FOR_sungt; if (HAVE_sunle) setcc_gen_code[UNLE] = CODE_FOR_sunle; if (HAVE_sunlt) setcc_gen_code[UNLT] = CODE_FOR_sunlt; if (HAVE_sltgt) setcc_gen_code[LTGT] = CODE_FOR_sltgt; bcc_gen_fctn[EQ] = gen_beq; bcc_gen_fctn[NE] = gen_bne; bcc_gen_fctn[GT] = gen_bgt; bcc_gen_fctn[GTU] = gen_bgtu; bcc_gen_fctn[LT] = gen_blt; bcc_gen_fctn[LTU] = gen_bltu; bcc_gen_fctn[GE] = gen_bge; bcc_gen_fctn[GEU] = gen_bgeu; bcc_gen_fctn[LE] = gen_ble; bcc_gen_fctn[LEU] = gen_bleu; if (HAVE_bunordered) bcc_gen_fctn[UNORDERED] = gen_bunordered; if (HAVE_bordered) bcc_gen_fctn[ORDERED] = gen_bordered; if (HAVE_buneq) bcc_gen_fctn[UNEQ] = gen_buneq; if (HAVE_bunge) bcc_gen_fctn[UNGE] = gen_bunge; if (HAVE_bungt) bcc_gen_fctn[UNGT] = gen_bungt; if (HAVE_bunle) bcc_gen_fctn[UNLE] = gen_bunle; if (HAVE_bunlt) bcc_gen_fctn[UNLT] = gen_bunlt; if (HAVE_bltgt) bcc_gen_fctn[LTGT] = gen_bltgt; ffs_optab->handlers[SImode].insn_code = CODE_FOR_ffssi2; if (HAVE_ffsdi2) ffs_optab->handlers[DImode].insn_code = CODE_FOR_ffsdi2; clz_optab->handlers[SImode].insn_code = CODE_FOR_clzsi2; if (HAVE_clzdi2) clz_optab->handlers[DImode].insn_code = CODE_FOR_clzdi2; if (HAVE_sqrtsf2) sqrt_optab->handlers[SFmode].insn_code = CODE_FOR_sqrtsf2; if (HAVE_sqrtdf2) sqrt_optab->handlers[DFmode].insn_code = CODE_FOR_sqrtdf2; if (HAVE_fmodsf3) fmod_optab->handlers[SFmode].insn_code = CODE_FOR_fmodsf3; if (HAVE_fmoddf3) fmod_optab->handlers[DFmode].insn_code = CODE_FOR_fmoddf3; if (HAVE_fmodxf3) fmod_optab->handlers[XFmode].insn_code = CODE_FOR_fmodxf3; if (HAVE_dremsf3) drem_optab->handlers[SFmode].insn_code = CODE_FOR_dremsf3; if (HAVE_dremdf3) drem_optab->handlers[DFmode].insn_code = CODE_FOR_dremdf3; if (HAVE_dremxf3) drem_optab->handlers[XFmode].insn_code = CODE_FOR_dremxf3; if (HAVE_tandf2) tan_optab->handlers[DFmode].insn_code = CODE_FOR_tandf2; if (HAVE_tansf2) tan_optab->handlers[SFmode].insn_code = CODE_FOR_tansf2; if (HAVE_tanxf2) tan_optab->handlers[XFmode].insn_code = CODE_FOR_tanxf2; if (HAVE_atan2df3) atan2_optab->handlers[DFmode].insn_code = CODE_FOR_atan2df3; if (HAVE_atandf2) atan_optab->handlers[DFmode].insn_code = CODE_FOR_atandf2; if (HAVE_atan2sf3) atan2_optab->handlers[SFmode].insn_code = CODE_FOR_atan2sf3; if (HAVE_atansf2) atan_optab->handlers[SFmode].insn_code = CODE_FOR_atansf2; if (HAVE_atan2xf3) atan2_optab->handlers[XFmode].insn_code = CODE_FOR_atan2xf3; if (HAVE_atanxf2) atan_optab->handlers[XFmode].insn_code = CODE_FOR_atanxf2; if (HAVE_asindf2) asin_optab->handlers[DFmode].insn_code = CODE_FOR_asindf2; if (HAVE_asinsf2) asin_optab->handlers[SFmode].insn_code = CODE_FOR_asinsf2; if (HAVE_asinxf2) asin_optab->handlers[XFmode].insn_code = CODE_FOR_asinxf2; if (HAVE_acosdf2) acos_optab->handlers[DFmode].insn_code = CODE_FOR_acosdf2; if (HAVE_acossf2) acos_optab->handlers[SFmode].insn_code = CODE_FOR_acossf2; if (HAVE_acosxf2) acos_optab->handlers[XFmode].insn_code = CODE_FOR_acosxf2; if (HAVE_logsf2) log_optab->handlers[SFmode].insn_code = CODE_FOR_logsf2; if (HAVE_logdf2) log_optab->handlers[DFmode].insn_code = CODE_FOR_logdf2; if (HAVE_logxf2) log_optab->handlers[XFmode].insn_code = CODE_FOR_logxf2; if (HAVE_log10sf2) log10_optab->handlers[SFmode].insn_code = CODE_FOR_log10sf2; if (HAVE_log10df2) log10_optab->handlers[DFmode].insn_code = CODE_FOR_log10df2; if (HAVE_log10xf2) log10_optab->handlers[XFmode].insn_code = CODE_FOR_log10xf2; if (HAVE_log2sf2) log2_optab->handlers[SFmode].insn_code = CODE_FOR_log2sf2; if (HAVE_log2df2) log2_optab->handlers[DFmode].insn_code = CODE_FOR_log2df2; if (HAVE_log2xf2) log2_optab->handlers[XFmode].insn_code = CODE_FOR_log2xf2; if (HAVE_log1psf2) log1p_optab->handlers[SFmode].insn_code = CODE_FOR_log1psf2; if (HAVE_log1pdf2) log1p_optab->handlers[DFmode].insn_code = CODE_FOR_log1pdf2; if (HAVE_log1pxf2) log1p_optab->handlers[XFmode].insn_code = CODE_FOR_log1pxf2; if (HAVE_logbsf2) logb_optab->handlers[SFmode].insn_code = CODE_FOR_logbsf2; if (HAVE_logbdf2) logb_optab->handlers[DFmode].insn_code = CODE_FOR_logbdf2; if (HAVE_logbxf2) logb_optab->handlers[XFmode].insn_code = CODE_FOR_logbxf2; if (HAVE_ilogbsi2) ilogb_optab->handlers[SImode].insn_code = CODE_FOR_ilogbsi2; if (HAVE_expsf2) exp_optab->handlers[SFmode].insn_code = CODE_FOR_expsf2; if (HAVE_expdf2) exp_optab->handlers[DFmode].insn_code = CODE_FOR_expdf2; if (HAVE_expxf2) exp_optab->handlers[XFmode].insn_code = CODE_FOR_expxf2; if (HAVE_exp10sf2) exp10_optab->handlers[SFmode].insn_code = CODE_FOR_exp10sf2; if (HAVE_exp10df2) exp10_optab->handlers[DFmode].insn_code = CODE_FOR_exp10df2; if (HAVE_exp10xf2) exp10_optab->handlers[XFmode].insn_code = CODE_FOR_exp10xf2; if (HAVE_exp2sf2) exp2_optab->handlers[SFmode].insn_code = CODE_FOR_exp2sf2; if (HAVE_exp2df2) exp2_optab->handlers[DFmode].insn_code = CODE_FOR_exp2df2; if (HAVE_exp2xf2) exp2_optab->handlers[XFmode].insn_code = CODE_FOR_exp2xf2; if (HAVE_expm1df2) expm1_optab->handlers[DFmode].insn_code = CODE_FOR_expm1df2; if (HAVE_expm1sf2) expm1_optab->handlers[SFmode].insn_code = CODE_FOR_expm1sf2; if (HAVE_expm1xf2) expm1_optab->handlers[XFmode].insn_code = CODE_FOR_expm1xf2; if (HAVE_movstrsi) movstr_optab[SImode] = CODE_FOR_movstrsi; if (HAVE_movstrdi) movstr_optab[DImode] = CODE_FOR_movstrdi; clrstr_optab[SImode] = CODE_FOR_clrstrsi; if (HAVE_clrstrdi) clrstr_optab[DImode] = CODE_FOR_clrstrdi; if (HAVE_cmpstrsi) cmpstr_optab[SImode] = CODE_FOR_cmpstrsi; strlen_optab->handlers[SImode].insn_code = CODE_FOR_strlensi; strlen_optab->handlers[DImode].insn_code = CODE_FOR_strlendi; if (HAVE_movdicc) movcc_gen_code[DImode] = CODE_FOR_movdicc; movcc_gen_code[SImode] = CODE_FOR_movsicc; if (HAVE_movhicc) movcc_gen_code[HImode] = CODE_FOR_movhicc; if (HAVE_movqicc) movcc_gen_code[QImode] = CODE_FOR_movqicc; if (HAVE_movsfcc) movcc_gen_code[SFmode] = CODE_FOR_movsfcc; if (HAVE_movdfcc) movcc_gen_code[DFmode] = CODE_FOR_movdfcc; if (HAVE_movxfcc) movcc_gen_code[XFmode] = CODE_FOR_movxfcc; if (HAVE_minsf3) smin_optab->handlers[SFmode].insn_code = CODE_FOR_minsf3; addcc_optab->handlers[QImode].insn_code = CODE_FOR_addqicc; addcc_optab->handlers[HImode].insn_code = CODE_FOR_addhicc; addcc_optab->handlers[SImode].insn_code = CODE_FOR_addsicc; if (HAVE_adddicc) addcc_optab->handlers[DImode].insn_code = CODE_FOR_adddicc; if (HAVE_mindf3) smin_optab->handlers[DFmode].insn_code = CODE_FOR_mindf3; if (HAVE_maxsf3) smax_optab->handlers[SFmode].insn_code = CODE_FOR_maxsf3; if (HAVE_maxdf3) smax_optab->handlers[DFmode].insn_code = CODE_FOR_maxdf3; if (HAVE_movti) mov_optab->handlers[TImode].insn_code = CODE_FOR_movti; if (HAVE_movtf) mov_optab->handlers[TFmode].insn_code = CODE_FOR_movtf; if (HAVE_movv2df) mov_optab->handlers[V2DFmode].insn_code = CODE_FOR_movv2df; if (HAVE_movv8hi) mov_optab->handlers[V8HImode].insn_code = CODE_FOR_movv8hi; if (HAVE_movv16qi) mov_optab->handlers[V16QImode].insn_code = CODE_FOR_movv16qi; if (HAVE_movv4sf) mov_optab->handlers[V4SFmode].insn_code = CODE_FOR_movv4sf; if (HAVE_movv4si) mov_optab->handlers[V4SImode].insn_code = CODE_FOR_movv4si; if (HAVE_movv2di) mov_optab->handlers[V2DImode].insn_code = CODE_FOR_movv2di; if (HAVE_movv2si) mov_optab->handlers[V2SImode].insn_code = CODE_FOR_movv2si; if (HAVE_movv4hi) mov_optab->handlers[V4HImode].insn_code = CODE_FOR_movv4hi; if (HAVE_movv8qi) mov_optab->handlers[V8QImode].insn_code = CODE_FOR_movv8qi; if (HAVE_movv2sf) mov_optab->handlers[V2SFmode].insn_code = CODE_FOR_movv2sf; if (HAVE_negv4sf2) negv_optab->handlers[V4SFmode].insn_code = neg_optab->handlers[V4SFmode].insn_code = CODE_FOR_negv4sf2; #ifdef FIXUNS_TRUNC_LIKE_FIX_TRUNC /* This flag says the same insns that convert to a signed fixnum also convert validly to an unsigned one. */ for (i = 0; i < NUM_MACHINE_MODES; i++) for (j = 0; j < NUM_MACHINE_MODES; j++) ufixtrunc_optab->handlers[i][j].insn_code = sfixtrunc_optab->handlers[i][j].insn_code; #endif } /* Generated automatically by the program `genoutput' from the machine description file `md'. */ /* Definitions for condition code handling in final.c and output routines. Copyright (C) 1987 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* None of the things in the files exist if we don't use CC0. */ #ifdef HAVE_cc0 /* The variable cc_status says how to interpret the condition code. It is set by output routines for an instruction that sets the cc's and examined by output routines for jump instructions. cc_status contains two components named `value1' and `value2' that record two equivalent expressions for the values that the condition codes were set from. (Either or both may be null if there is no useful expression to record.) These fields are used for eliminating redundant test and compare instructions in the cases where the condition codes were already set by the previous instruction. cc_status.flags contains flags which say that the condition codes were set in a nonstandard manner. The output of jump instructions uses these flags to compensate and produce the standard result with the nonstandard condition codes. Standard flags are defined here. The tm.h file can also define other machine-dependent flags. cc_status also contains a machine-dependent component `mdep' whose type, `CC_STATUS_MDEP', may be defined as a macro in the tm.h file. */ #ifndef CC_STATUS_MDEP #define CC_STATUS_MDEP int #endif #ifndef CC_STATUS_MDEP_INIT #define CC_STATUS_MDEP_INIT 0 #endif typedef struct {int flags; rtx value1, value2; CC_STATUS_MDEP mdep;} CC_STATUS; /* While outputting an insn as assembler code, this is the status BEFORE that insn. */ extern CC_STATUS cc_prev_status; /* While outputting an insn as assembler code, this is being altered to the status AFTER that insn. */ extern CC_STATUS cc_status; /* These are the machine-independent flags: */ /* Set if the sign of the cc value is inverted: output a following jump-if-less as a jump-if-greater, etc. */ #define CC_REVERSED 1 /* This bit means that the current setting of the N bit is bogus and conditional jumps should use the Z bit in its place. This state obtains when an extraction of a signed single-bit field or an arithmetic shift right of a byte by 7 bits is turned into a btst, because btst does not set the N bit. */ #define CC_NOT_POSITIVE 2 /* This bit means that the current setting of the N bit is bogus and conditional jumps should pretend that the N bit is clear. Used after extraction of an unsigned bit or logical shift right of a byte by 7 bits is turned into a btst. The btst does not alter the N bit, but the result of that shift or extract is never negative. */ #define CC_NOT_NEGATIVE 4 /* This bit means that the current setting of the overflow flag is bogus and conditional jumps should pretend there is no overflow. */ /* ??? Note that for most targets this macro is misnamed as it applies to the carry flag, not the overflow flag. */ #define CC_NO_OVERFLOW 010 /* This bit means that what ought to be in the Z bit should be tested as the complement of the N bit. */ #define CC_Z_IN_NOT_N 020 /* This bit means that what ought to be in the Z bit should be tested as the N bit. */ #define CC_Z_IN_N 040 /* Nonzero if we must invert the sense of the following branch, i.e. change EQ to NE. This is not safe for IEEE floating point operations! It is intended for use only when a combination of arithmetic or logical insns can leave the condition codes set in a fortuitous (though inverted) state. */ #define CC_INVERTED 0100 /* Nonzero if we must convert signed condition operators to unsigned. This is only used by machine description files. */ #define CC_NOT_SIGNED 0200 /* This is how to initialize the variable cc_status. final does this at appropriate moments. */ #define CC_STATUS_INIT \ (cc_status.flags = 0, cc_status.value1 = 0, cc_status.value2 = 0, \ CC_STATUS_MDEP_INIT) #endif static const char * const output_0[] = { "test{q}\t{%0, %0|%0, %0}", "cmp{q}\t{%1, %0|%0, %1}", }; static const char * const output_3[] = { "test{l}\t{%0, %0|%0, %0}", "cmp{l}\t{%1, %0|%0, %1}", }; static const char * const output_6[] = { "test{w}\t{%0, %0|%0, %0}", "cmp{w}\t{%1, %0|%0, %1}", }; static const char * const output_9[] = { "test{b}\t{%0, %0|%0, %0}", "cmp{b}\t{$0, %0|%0, 0}", }; static const char * output_18 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { if (find_regno_note (insn, REG_DEAD, REGNO (operands[1]))) return "ftst\n\tfnstsw\t%0\n\tfstp\t%y0"; else return "ftst\n\tfnstsw\t%0"; } } static const char * output_19 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_fp_compare (insn, operands, 0, 0); } static const char * output_20 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_fp_compare (insn, operands, 2, 0); } static const char * output_21 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_fp_compare (insn, operands, 0, 0); } static const char * output_22 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_fp_compare (insn, operands, 2, 0); } static const char * output_23 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_fp_compare (insn, operands, 0, 0); } static const char * output_24 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_fp_compare (insn, operands, 2, 0); } static const char * output_25 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_fp_compare (insn, operands, 0, 1); } static const char * output_26 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_fp_compare (insn, operands, 2, 1); } static const char * output_30 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_fp_compare (insn, operands, 1, 0); } static const char * output_31 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_fp_compare (insn, operands, 1, 0); } static const char * output_32 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_fp_compare (insn, operands, 1, 0); } static const char * output_33 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_fp_compare (insn, operands, 1, 1); } static const char * output_34 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_fp_compare (insn, operands, 1, 1); } static const char * output_35 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_fp_compare (insn, operands, 1, 1); } static const char * output_42 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { operands[1] = constm1_rtx; return "or{l}\t{%1, %0|%0, %1}"; } } static const char * output_43 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_SSEMOV: if (get_attr_mode (insn) == MODE_TI) return "movdqa\t{%1, %0|%0, %1}"; return "movd\t{%1, %0|%0, %1}"; case TYPE_MMXMOV: if (get_attr_mode (insn) == MODE_DI) return "movq\t{%1, %0|%0, %1}"; return "movd\t{%1, %0|%0, %1}"; case TYPE_LEA: return "lea{l}\t{%1, %0|%0, %1}"; default: if (flag_pic && !LEGITIMATE_PIC_OPERAND_P (operands[1])) abort(); return "mov{l}\t{%1, %0|%0, %1}"; } } } static const char * output_44 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_SSEMOV: if (get_attr_mode (insn) == MODE_TI) return "movdqa\t{%1, %0|%0, %1}"; return "movd\t{%1, %0|%0, %1}"; case TYPE_MMXMOV: if (get_attr_mode (insn) == MODE_DI) return "movq\t{%1, %0|%0, %1}"; return "movd\t{%1, %0|%0, %1}"; case TYPE_LEA: return "lea{l}\t{%1, %0|%0, %1}"; default: if (flag_pic && !LEGITIMATE_PIC_OPERAND_P (operands[1])) abort(); return "mov{l}\t{%1, %0|%0, %1}"; } } } static const char * const output_45[] = { "movabs{l}\t{%1, %P0|%P0, %1}", "mov{l}\t{%1, %a0|%a0, %1}", }; static const char * const output_46[] = { "movabs{l}\t{%P1, %0|%0, %P1}", "mov{l}\t{%a1, %0|%0, %a1}", }; static const char * const output_48[] = { "push{w}\t{|WORD PTR }%1", "push{w}\t%1", }; static const char * output_50 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_IMOVX: /* movzwl is faster than movw on p2 due to partial word stalls, though not as fast as an aligned movl. */ return "movz{wl|x}\t{%1, %k0|%k0, %1}"; default: if (get_attr_mode (insn) == MODE_SI) return "mov{l}\t{%k1, %k0|%k0, %k1}"; else return "mov{w}\t{%1, %0|%0, %1}"; } } } static const char * const output_51[] = { "movabs{w}\t{%1, %P0|%P0, %1}", "mov{w}\t{%1, %a0|%a0, %1}", }; static const char * const output_52[] = { "movabs{w}\t{%P1, %0|%0, %P1}", "mov{w}\t{%a1, %0|%0, %a1}", }; static const char * const output_57[] = { "push{w}\t{|word ptr }%1", "push{w}\t%w1", }; static const char * output_59 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_IMOVX: if (!ANY_QI_REG_P (operands[1]) && GET_CODE (operands[1]) != MEM) abort (); return "movz{bl|x}\t{%1, %k0|%k0, %1}"; default: if (get_attr_mode (insn) == MODE_SI) return "mov{l}\t{%k1, %k0|%k0, %k1}"; else return "mov{b}\t{%1, %0|%0, %1}"; } } } static const char * output_65 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_IMOVX: return "movs{bl|x}\t{%h1, %k0|%k0, %h1}"; default: return "mov{b}\t{%h1, %0|%0, %h1}"; } } } static const char * output_66 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_IMOVX: return "movs{bl|x}\t{%h1, %k0|%k0, %h1}"; default: return "mov{b}\t{%h1, %0|%0, %h1}"; } } } static const char * const output_67[] = { "movabs{b}\t{%1, %P0|%P0, %1}", "mov{b}\t{%1, %a0|%a0, %1}", }; static const char * const output_68[] = { "movabs{b}\t{%P1, %0|%0, %P1}", "mov{b}\t{%a1, %0|%0, %a1}", }; static const char * output_70 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_IMOVX: return "movz{bl|x}\t{%h1, %k0|%k0, %h1}"; default: return "mov{b}\t{%h1, %0|%0, %h1}"; } } } static const char * output_71 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_IMOVX: return "movz{bl|x}\t{%h1, %k0|%k0, %h1}"; default: return "mov{b}\t{%h1, %0|%0, %h1}"; } } } static const char * const output_76[] = { "push{q}\t%1", "#", }; static const char * output_81 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { operands[1] = constm1_rtx; return "or{q}\t{%1, %0|%0, %1}"; } } static const char * const output_82[] = { "#", "#", "movq\t{%1, %0|%0, %1}", "movq\t{%1, %0|%0, %1}", "movq\t{%1, %0|%0, %1}", "movdqa\t{%1, %0|%0, %1}", "movq\t{%1, %0|%0, %1}", }; static const char * output_83 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_SSEMOV: if (get_attr_mode (insn) == MODE_TI) return "movdqa\t{%1, %0|%0, %1}"; /* FALLTHRU */ case TYPE_MMXMOV: /* Moves from and into integer register is done using movd opcode with REX prefix. */ if (GENERAL_REG_P (operands[0]) || GENERAL_REG_P (operands[1])) return "movd\t{%1, %0|%0, %1}"; return "movq\t{%1, %0|%0, %1}"; case TYPE_MULTI: return "#"; case TYPE_LEA: return "lea{q}\t{%a1, %0|%0, %a1}"; default: if (flag_pic && !LEGITIMATE_PIC_OPERAND_P (operands[1])) abort (); if (get_attr_mode (insn) == MODE_SI) return "mov{l}\t{%k1, %k0|%k0, %k1}"; else if (which_alternative == 2) return "movabs{q}\t{%1, %0|%0, %1}"; else return "mov{q}\t{%1, %0|%0, %1}"; } } } static const char * output_84 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_SSEMOV: if (get_attr_mode (insn) == MODE_TI) return "movdqa\t{%1, %0|%0, %1}"; /* FALLTHRU */ case TYPE_MMXMOV: return "movq\t{%1, %0|%0, %1}"; case TYPE_MULTI: return "#"; case TYPE_LEA: return "lea{q}\t{%a1, %0|%0, %a1}"; default: if (flag_pic && !LEGITIMATE_PIC_OPERAND_P (operands[1])) abort (); if (get_attr_mode (insn) == MODE_SI) return "mov{l}\t{%k1, %k0|%k0, %k1}"; else if (which_alternative == 2) return "movabs{q}\t{%1, %0|%0, %1}"; else return "mov{q}\t{%1, %0|%0, %1}"; } } } static const char * const output_85[] = { "movabs{q}\t{%1, %P0|%P0, %1}", "mov{q}\t{%1, %a0|%a0, %1}", }; static const char * const output_86[] = { "movabs{q}\t{%P1, %0|%0, %P1}", "mov{q}\t{%a1, %0|%0, %a1}", }; static const char * output_88 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (which_alternative) { case 1: return "push{l}\t%1"; default: /* This insn should be already split before reg-stack. */ abort (); } } } static const char * output_89 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (which_alternative) { case 1: return "push{q}\t%q1"; default: /* This insn should be already split before reg-stack. */ abort (); } } } static const char * output_90 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (which_alternative) { case 0: return output_387_reg_move (insn, operands); case 1: if (find_regno_note (insn, REG_DEAD, REGNO (operands[1]))) return "fstp%z0\t%y0"; else return "fst%z0\t%y0"; case 2: return standard_80387_constant_opcode (operands[1]); case 3: case 4: return "mov{l}\t{%1, %0|%0, %1}"; case 5: if (get_attr_mode (insn) == MODE_TI) return "pxor\t%0, %0"; else return "xorps\t%0, %0"; case 6: if (get_attr_mode (insn) == MODE_V4SF) return "movaps\t{%1, %0|%0, %1}"; else return "movss\t{%1, %0|%0, %1}"; case 7: case 8: return "movss\t{%1, %0|%0, %1}"; case 9: case 10: return "movd\t{%1, %0|%0, %1}"; case 11: return "movq\t{%1, %0|%0, %1}"; default: abort(); } } } static const char * output_91 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (which_alternative) { case 0: return output_387_reg_move (insn, operands); case 1: if (find_regno_note (insn, REG_DEAD, REGNO (operands[1]))) return "fstp%z0\t%y0"; else return "fst%z0\t%y0"; case 2: return standard_80387_constant_opcode (operands[1]); case 3: case 4: return "mov{l}\t{%1, %0|%0, %1}"; case 5: if (get_attr_mode (insn) == MODE_TI) return "pxor\t%0, %0"; else return "xorps\t%0, %0"; case 6: if (get_attr_mode (insn) == MODE_V4SF) return "movaps\t{%1, %0|%0, %1}"; else return "movss\t{%1, %0|%0, %1}"; case 7: case 8: return "movss\t{%1, %0|%0, %1}"; case 9: case 10: return "movd\t{%1, %0|%0, %1}"; case 11: return "movq\t{%1, %0|%0, %1}"; default: abort(); } } } static const char * output_92 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { if (STACK_TOP_P (operands[0])) return "fxch\t%1"; else return "fxch\t%0"; } } static const char * output_93 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { /* This insn should be already split before reg-stack. */ abort (); } } static const char * output_94 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { /* This insn should be already split before reg-stack. */ abort (); } } static const char * output_95 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (which_alternative) { case 0: return output_387_reg_move (insn, operands); case 1: if (find_regno_note (insn, REG_DEAD, REGNO (operands[1]))) return "fstp%z0\t%y0"; else return "fst%z0\t%y0"; case 2: return standard_80387_constant_opcode (operands[1]); case 3: case 4: return "#"; case 5: switch (get_attr_mode (insn)) { case MODE_V4SF: return "xorps\t%0, %0"; case MODE_V2DF: return "xorpd\t%0, %0"; case MODE_TI: return "pxor\t%0, %0"; default: abort (); } case 6: switch (get_attr_mode (insn)) { case MODE_V4SF: return "movaps\t{%1, %0|%0, %1}"; case MODE_V2DF: return "movapd\t{%1, %0|%0, %1}"; case MODE_DF: return "movsd\t{%1, %0|%0, %1}"; default: abort (); } case 7: if (get_attr_mode (insn) == MODE_V2DF) return "movlpd\t{%1, %0|%0, %1}"; else return "movsd\t{%1, %0|%0, %1}"; case 8: return "movsd\t{%1, %0|%0, %1}"; default: abort(); } } } static const char * output_96 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (which_alternative) { case 0: return output_387_reg_move (insn, operands); case 1: if (find_regno_note (insn, REG_DEAD, REGNO (operands[1]))) return "fstp%z0\t%y0"; else return "fst%z0\t%y0"; case 2: return standard_80387_constant_opcode (operands[1]); case 3: case 4: return "#"; case 5: switch (get_attr_mode (insn)) { case MODE_V4SF: return "xorps\t%0, %0"; case MODE_V2DF: return "xorpd\t%0, %0"; case MODE_TI: return "pxor\t%0, %0"; default: abort (); } case 6: switch (get_attr_mode (insn)) { case MODE_V4SF: return "movaps\t{%1, %0|%0, %1}"; case MODE_V2DF: return "movapd\t{%1, %0|%0, %1}"; case MODE_DF: return "movsd\t{%1, %0|%0, %1}"; default: abort (); } case 7: if (get_attr_mode (insn) == MODE_V2DF) return "movlpd\t{%1, %0|%0, %1}"; else return "movsd\t{%1, %0|%0, %1}"; case 8: return "movsd\t{%1, %0|%0, %1}"; default: abort(); } } } static const char * output_97 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { if (STACK_TOP_P (operands[0])) return "fxch\t%1"; else return "fxch\t%0"; } } static const char * output_98 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { /* This insn should be already split before reg-stack. */ abort (); } } static const char * output_99 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { /* This insn should be already split before reg-stack. */ abort (); } } static const char * output_100 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (which_alternative) { case 0: return output_387_reg_move (insn, operands); case 1: /* There is no non-popping store to memory for XFmode. So if we need one, follow the store with a load. */ if (! find_regno_note (insn, REG_DEAD, REGNO (operands[1]))) return "fstp%z0\t%y0\n\tfld%z0\t%y0"; else return "fstp%z0\t%y0"; case 2: return standard_80387_constant_opcode (operands[1]); case 3: case 4: return "#"; } abort(); } } static const char * output_101 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (which_alternative) { case 0: return output_387_reg_move (insn, operands); case 1: /* There is no non-popping store to memory for XFmode. So if we need one, follow the store with a load. */ if (! find_regno_note (insn, REG_DEAD, REGNO (operands[1]))) return "fstp%z0\t%y0\n\tfld%z0\t%y0"; else return "fstp%z0\t%y0"; case 2: return standard_80387_constant_opcode (operands[1]); case 3: case 4: return "#"; } abort(); } } static const char * output_102 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { if (STACK_TOP_P (operands[0])) return "fxch\t%1"; else return "fxch\t%0"; } } static const char * const output_111[] = { "#", "#", "#", "movd\t{%1, %0|%0, %1}", "movd\t{%1, %0|%0, %1}", }; static const char * const output_112[] = { "#", "#", "#", "movd\t{%1, %0|%0, %1}", "movd\t{%1, %0|%0, %1}", }; static const char * const output_113[] = { "mov\t{%k1, %k0|%k0, %k1}", "#", "movd\t{%1, %0|%0, %1}", "movd\t{%1, %0|%0, %1}", }; static const char * const output_114[] = { "mov\t{%k1, %k0|%k0, %k1}", "#", "movd\t{%1, %0|%0, %1}", "movd\t{%1, %0|%0, %1}", }; static const char * const output_115[] = { "movz{wl|x}\t{%1, %k0|%k0, %1}", "movz{wq|x}\t{%1, %0|%0, %1}", }; static const char * const output_116[] = { "movz{bl|x}\t{%1, %k0|%k0, %1}", "movz{bq|x}\t{%1, %0|%0, %1}", }; static const char * const output_118[] = { "{cltq|cdqe}", "movs{lq|x}\t{%1,%0|%0, %1}", }; static const char * output_121 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_prefix_0f (insn)) { case 0: return "{cwtl|cwde}"; default: return "movs{wl|x}\t{%1,%0|%0, %1}"; } } } static const char * output_122 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_prefix_0f (insn)) { case 0: return "{cwtl|cwde}"; default: return "movs{wl|x}\t{%1,%k0|%k0, %1}"; } } } static const char * output_123 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_prefix_0f (insn)) { case 0: return "{cbtw|cbw}"; default: return "movs{bw|x}\t{%1,%0|%0, %1}"; } } } static const char * output_128 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (which_alternative) { case 0: return output_387_reg_move (insn, operands); case 1: if (find_regno_note (insn, REG_DEAD, REGNO (operands[1]))) return "fstp%z0\t%y0"; else return "fst%z0\t%y0"; case 2: return "cvtss2sd\t{%1, %0|%0, %1}"; default: abort (); } } } static const char * output_130 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (which_alternative) { case 0: return output_387_reg_move (insn, operands); case 1: /* There is no non-popping store to memory for XFmode. So if we need one, follow the store with a load. */ if (! find_regno_note (insn, REG_DEAD, REGNO (operands[1]))) return "fstp%z0\t%y0\n\tfld%z0\t%y0"; else return "fstp%z0\t%y0"; default: abort (); } } } static const char * output_131 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (which_alternative) { case 0: return output_387_reg_move (insn, operands); case 1: /* There is no non-popping store to memory for XFmode. So if we need one, follow the store with a load. */ if (! find_regno_note (insn, REG_DEAD, REGNO (operands[1]))) return "fstp%z0\t%y0\n\tfld%z0\t%y0"; else return "fstp%z0\t%y0"; default: abort (); } } } static const char * output_132 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { return output_387_reg_move (insn, operands); } } static const char * output_133 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (which_alternative) { case 0: if (find_regno_note (insn, REG_DEAD, REGNO (operands[1]))) return "fstp%z0\t%y0"; else return "fst%z0\t%y0"; default: abort (); } } } static const char * output_134 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (which_alternative) { case 0: if (find_regno_note (insn, REG_DEAD, REGNO (operands[1]))) return "fstp%z0\t%y0"; else return "fst%z0\t%y0"; case 4: return "#"; default: abort (); } } } static const char * output_135 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (which_alternative) { case 0: if (find_regno_note (insn, REG_DEAD, REGNO (operands[1]))) return "fstp%z0\t%y0"; else return "fst%z0\t%y0"; case 4: return "#"; default: abort (); } } } static const char * output_136 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (which_alternative) { case 0: case 1: return "cvtsd2ss\t{%1, %0|%0, %1}"; case 2: if (find_regno_note (insn, REG_DEAD, REGNO (operands[1]))) return "fstp%z0\t%y0"; else return "fst%z0\t%y0"; default: abort (); } } } static const char * output_137 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (which_alternative) { case 0: return "#"; case 1: if (find_regno_note (insn, REG_DEAD, REGNO (operands[1]))) return "fstp%z0\t%y0"; else return "fst%z0\t%y0"; default: abort (); } } } static const char * output_138 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { if (find_regno_note (insn, REG_DEAD, REGNO (operands[1]))) return "fstp%z0\t%y0"; else return "fst%z0\t%y0"; } } static const char * output_141 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { return output_387_reg_move (insn, operands); } } static const char * output_142 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (which_alternative) { case 0: if (find_regno_note (insn, REG_DEAD, REGNO (operands[1]))) return "fstp%z0\t%y0"; else return "fst%z0\t%y0"; default: abort(); } } } static const char * output_143 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { if (find_regno_note (insn, REG_DEAD, REGNO (operands[1]))) return "fstp%z0\t%y0"; else return "fst%z0\t%y0"; } } static const char * output_144 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { return output_387_reg_move (insn, operands); } } static const char * output_145 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (which_alternative) { case 0: if (find_regno_note (insn, REG_DEAD, REGNO (operands[1]))) return "fstp%z0\t%y0"; else return "fst%z0\t%y0"; default: abort(); } abort (); } } static const char * output_146 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { if (find_regno_note (insn, REG_DEAD, REGNO (operands[1]))) return "fstp%z0\t%y0"; else return "fst%z0\t%y0"; } } static const char * output_149 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { operands[5] = operands[4]; return output_fix_trunc (insn, operands); } static const char * output_154 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_fix_trunc (insn, operands); } static const char * output_159 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_fix_trunc (insn, operands); } static const char * const output_162[] = { "fild%z1\t%1", "#", }; static const char * const output_163[] = { "fild%z1\t%1", "#", "cvtsi2ss\t{%1, %0|%0, %1}", "cvtsi2ss\t{%1, %0|%0, %1}", }; static const char * const output_165[] = { "fild%z1\t%1", "#", }; static const char * const output_166[] = { "fild%z1\t%1", "#", "cvtsi2ss{q}\t{%1, %0|%0, %1}", "cvtsi2ss{q}\t{%1, %0|%0, %1}", }; static const char * const output_168[] = { "fild%z1\t%1", "#", }; static const char * const output_169[] = { "fild%z1\t%1", "#", "cvtsi2sd\t{%1, %0|%0, %1}", "cvtsi2sd\t{%1, %0|%0, %1}", }; static const char * const output_171[] = { "fild%z1\t%1", "#", }; static const char * const output_172[] = { "fild%z1\t%1", "#", "cvtsi2sd{q}\t{%1, %0|%0, %1}", "cvtsi2sd{q}\t{%1, %0|%0, %1}", }; static const char * const output_174[] = { "fild%z1\t%1", "#", }; static const char * const output_175[] = { "fild%z1\t%1", "#", }; static const char * const output_176[] = { "fild%z1\t%1", "#", }; static const char * output_196 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_LEA: operands[2] = SET_SRC (XVECEXP (PATTERN (insn), 0, 0)); return "lea{q}\t{%a2, %0|%0, %a2}"; case TYPE_INCDEC: if (! rtx_equal_p (operands[0], operands[1])) abort (); if (operands[2] == const1_rtx) return "inc{q}\t%0"; else if (operands[2] == constm1_rtx) return "dec{q}\t%0"; else abort (); default: if (! rtx_equal_p (operands[0], operands[1])) abort (); /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'. Exceptions: -128 encodes smaller than 128, so swap sign and op. */ if (GET_CODE (operands[2]) == CONST_INT /* Avoid overflows. */ && ((INTVAL (operands[2]) & ((((unsigned int) 1) << 31) - 1))) && (INTVAL (operands[2]) == 128 || (INTVAL (operands[2]) < 0 && INTVAL (operands[2]) != -128))) { operands[2] = GEN_INT (-INTVAL (operands[2])); return "sub{q}\t{%2, %0|%0, %2}"; } return "add{q}\t{%2, %0|%0, %2}"; } } } static const char * output_197 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_INCDEC: if (! rtx_equal_p (operands[0], operands[1])) abort (); if (operands[2] == const1_rtx) return "inc{q}\t%0"; else if (operands[2] == constm1_rtx) return "dec{q}\t%0"; else abort (); default: if (! rtx_equal_p (operands[0], operands[1])) abort (); /* ???? We ought to handle there the 32bit case too - do we need new constraint? */ /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'. Exceptions: -128 encodes smaller than 128, so swap sign and op. */ if (GET_CODE (operands[2]) == CONST_INT /* Avoid overflows. */ && ((INTVAL (operands[2]) & ((((unsigned int) 1) << 31) - 1))) && (INTVAL (operands[2]) == 128 || (INTVAL (operands[2]) < 0 && INTVAL (operands[2]) != -128))) { operands[2] = GEN_INT (-INTVAL (operands[2])); return "sub{q}\t{%2, %0|%0, %2}"; } return "add{q}\t{%2, %0|%0, %2}"; } } } static const char * output_198 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_INCDEC: if (! rtx_equal_p (operands[0], operands[1])) abort (); if (operands[2] == const1_rtx) return "inc{q}\t%0"; else if (operands[2] == constm1_rtx) return "dec{q}\t%0"; else abort (); default: if (! rtx_equal_p (operands[0], operands[1])) abort (); /* ???? We ought to handle there the 32bit case too - do we need new constraint? */ /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'. Exceptions: -128 encodes smaller than 128, so swap sign and op. */ if (GET_CODE (operands[2]) == CONST_INT /* Avoid overflows. */ && ((INTVAL (operands[2]) & ((((unsigned int) 1) << 31) - 1))) && (INTVAL (operands[2]) == 128 || (INTVAL (operands[2]) < 0 && INTVAL (operands[2]) != -128))) { operands[2] = GEN_INT (-INTVAL (operands[2])); return "sub{q}\t{%2, %0|%0, %2}"; } return "add{q}\t{%2, %0|%0, %2}"; } } } static const char * output_199 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_INCDEC: if (operands[2] == constm1_rtx) return "inc{q}\t%0"; else if (operands[2] == const1_rtx) return "dec{q}\t%0"; else abort(); default: if (! rtx_equal_p (operands[0], operands[1])) abort (); /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'. Exceptions: -128 encodes smaller than 128, so swap sign and op. */ if ((INTVAL (operands[2]) == -128 || (INTVAL (operands[2]) > 0 && INTVAL (operands[2]) != 128)) /* Avoid overflows. */ && ((INTVAL (operands[2]) & ((((unsigned int) 1) << 31) - 1)))) return "sub{q}\t{%2, %0|%0, %2}"; operands[2] = GEN_INT (-INTVAL (operands[2])); return "add{q}\t{%2, %0|%0, %2}"; } } } static const char * output_200 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_INCDEC: if (! rtx_equal_p (operands[0], operands[1])) abort (); if (operands[2] == const1_rtx) return "inc{q}\t%0"; else if (operands[2] == constm1_rtx) return "dec{q}\t%0"; else abort(); default: if (! rtx_equal_p (operands[0], operands[1])) abort (); /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'. Exceptions: -128 encodes smaller than 128, so swap sign and op. */ if (GET_CODE (operands[2]) == CONST_INT /* Avoid overflows. */ && ((INTVAL (operands[2]) & ((((unsigned int) 1) << 31) - 1))) && (INTVAL (operands[2]) == 128 || (INTVAL (operands[2]) < 0 && INTVAL (operands[2]) != -128))) { operands[2] = GEN_INT (-INTVAL (operands[2])); return "sub{q}\t{%2, %0|%0, %2}"; } return "add{q}\t{%2, %0|%0, %2}"; } } } static const char * output_201 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_LEA: operands[2] = SET_SRC (XVECEXP (PATTERN (insn), 0, 0)); return "lea{l}\t{%a2, %0|%0, %a2}"; case TYPE_INCDEC: if (! rtx_equal_p (operands[0], operands[1])) abort (); if (operands[2] == const1_rtx) return "inc{l}\t%0"; else if (operands[2] == constm1_rtx) return "dec{l}\t%0"; else abort(); default: if (! rtx_equal_p (operands[0], operands[1])) abort (); /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'. Exceptions: -128 encodes smaller than 128, so swap sign and op. */ if (GET_CODE (operands[2]) == CONST_INT && (INTVAL (operands[2]) == 128 || (INTVAL (operands[2]) < 0 && INTVAL (operands[2]) != -128))) { operands[2] = GEN_INT (-INTVAL (operands[2])); return "sub{l}\t{%2, %0|%0, %2}"; } return "add{l}\t{%2, %0|%0, %2}"; } } } static const char * output_202 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_LEA: operands[2] = SET_SRC (XVECEXP (PATTERN (insn), 0, 0)); return "lea{l}\t{%a2, %k0|%k0, %a2}"; case TYPE_INCDEC: if (operands[2] == const1_rtx) return "inc{l}\t%k0"; else if (operands[2] == constm1_rtx) return "dec{l}\t%k0"; else abort(); default: /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'. Exceptions: -128 encodes smaller than 128, so swap sign and op. */ if (GET_CODE (operands[2]) == CONST_INT && (INTVAL (operands[2]) == 128 || (INTVAL (operands[2]) < 0 && INTVAL (operands[2]) != -128))) { operands[2] = GEN_INT (-INTVAL (operands[2])); return "sub{l}\t{%2, %k0|%k0, %2}"; } return "add{l}\t{%2, %k0|%k0, %2}"; } } } static const char * output_203 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_INCDEC: if (! rtx_equal_p (operands[0], operands[1])) abort (); if (operands[2] == const1_rtx) return "inc{l}\t%0"; else if (operands[2] == constm1_rtx) return "dec{l}\t%0"; else abort(); default: if (! rtx_equal_p (operands[0], operands[1])) abort (); /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'. Exceptions: -128 encodes smaller than 128, so swap sign and op. */ if (GET_CODE (operands[2]) == CONST_INT && (INTVAL (operands[2]) == 128 || (INTVAL (operands[2]) < 0 && INTVAL (operands[2]) != -128))) { operands[2] = GEN_INT (-INTVAL (operands[2])); return "sub{l}\t{%2, %0|%0, %2}"; } return "add{l}\t{%2, %0|%0, %2}"; } } } static const char * output_204 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_INCDEC: if (operands[2] == const1_rtx) return "inc{l}\t%k0"; else if (operands[2] == constm1_rtx) return "dec{l}\t%k0"; else abort(); default: /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'. Exceptions: -128 encodes smaller than 128, so swap sign and op. */ if (GET_CODE (operands[2]) == CONST_INT && (INTVAL (operands[2]) == 128 || (INTVAL (operands[2]) < 0 && INTVAL (operands[2]) != -128))) { operands[2] = GEN_INT (-INTVAL (operands[2])); return "sub{l}\t{%2, %k0|%k0, %2}"; } return "add{l}\t{%2, %k0|%k0, %2}"; } } } static const char * output_205 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_INCDEC: if (! rtx_equal_p (operands[0], operands[1])) abort (); if (operands[2] == const1_rtx) return "inc{l}\t%0"; else if (operands[2] == constm1_rtx) return "dec{l}\t%0"; else abort(); default: if (! rtx_equal_p (operands[0], operands[1])) abort (); /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'. Exceptions: -128 encodes smaller than 128, so swap sign and op. */ if (GET_CODE (operands[2]) == CONST_INT && (INTVAL (operands[2]) == 128 || (INTVAL (operands[2]) < 0 && INTVAL (operands[2]) != -128))) { operands[2] = GEN_INT (-INTVAL (operands[2])); return "sub{l}\t{%2, %0|%0, %2}"; } return "add{l}\t{%2, %0|%0, %2}"; } } } static const char * output_206 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_INCDEC: if (operands[2] == const1_rtx) return "inc{l}\t%k0"; else if (operands[2] == constm1_rtx) return "dec{l}\t%k0"; else abort(); default: /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'. Exceptions: -128 encodes smaller than 128, so swap sign and op. */ if (GET_CODE (operands[2]) == CONST_INT && (INTVAL (operands[2]) == 128 || (INTVAL (operands[2]) < 0 && INTVAL (operands[2]) != -128))) { operands[2] = GEN_INT (-INTVAL (operands[2])); return "sub{l}\t{%2, %k0|%k0, %2}"; } return "add{l}\t{%2, %k0|%k0, %2}"; } } } static const char * output_207 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_INCDEC: if (operands[2] == constm1_rtx) return "inc{l}\t%0"; else if (operands[2] == const1_rtx) return "dec{l}\t%0"; else abort(); default: if (! rtx_equal_p (operands[0], operands[1])) abort (); /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'. Exceptions: -128 encodes smaller than 128, so swap sign and op. */ if ((INTVAL (operands[2]) == -128 || (INTVAL (operands[2]) > 0 && INTVAL (operands[2]) != 128))) return "sub{l}\t{%2, %0|%0, %2}"; operands[2] = GEN_INT (-INTVAL (operands[2])); return "add{l}\t{%2, %0|%0, %2}"; } } } static const char * output_208 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_INCDEC: if (! rtx_equal_p (operands[0], operands[1])) abort (); if (operands[2] == const1_rtx) return "inc{l}\t%0"; else if (operands[2] == constm1_rtx) return "dec{l}\t%0"; else abort(); default: if (! rtx_equal_p (operands[0], operands[1])) abort (); /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'. Exceptions: -128 encodes smaller than 128, so swap sign and op. */ if (GET_CODE (operands[2]) == CONST_INT && (INTVAL (operands[2]) == 128 || (INTVAL (operands[2]) < 0 && INTVAL (operands[2]) != -128))) { operands[2] = GEN_INT (-INTVAL (operands[2])); return "sub{l}\t{%2, %0|%0, %2}"; } return "add{l}\t{%2, %0|%0, %2}"; } } } static const char * output_209 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_LEA: return "#"; case TYPE_INCDEC: if (operands[2] == const1_rtx) return "inc{w}\t%0"; else if (operands[2] == constm1_rtx) return "dec{w}\t%0"; abort(); default: /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'. Exceptions: -128 encodes smaller than 128, so swap sign and op. */ if (GET_CODE (operands[2]) == CONST_INT && (INTVAL (operands[2]) == 128 || (INTVAL (operands[2]) < 0 && INTVAL (operands[2]) != -128))) { operands[2] = GEN_INT (-INTVAL (operands[2])); return "sub{w}\t{%2, %0|%0, %2}"; } return "add{w}\t{%2, %0|%0, %2}"; } } } static const char * output_210 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_INCDEC: if (operands[2] == const1_rtx) return "inc{w}\t%0"; else if (operands[2] == constm1_rtx) return "dec{w}\t%0"; abort(); default: /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'. Exceptions: -128 encodes smaller than 128, so swap sign and op. */ if (GET_CODE (operands[2]) == CONST_INT && (INTVAL (operands[2]) == 128 || (INTVAL (operands[2]) < 0 && INTVAL (operands[2]) != -128))) { operands[2] = GEN_INT (-INTVAL (operands[2])); return "sub{w}\t{%2, %0|%0, %2}"; } return "add{w}\t{%2, %0|%0, %2}"; } } } static const char * output_211 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_INCDEC: if (operands[2] == const1_rtx) return "inc{w}\t%0"; else if (operands[2] == constm1_rtx) return "dec{w}\t%0"; abort(); default: /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'. Exceptions: -128 encodes smaller than 128, so swap sign and op. */ if (GET_CODE (operands[2]) == CONST_INT && (INTVAL (operands[2]) == 128 || (INTVAL (operands[2]) < 0 && INTVAL (operands[2]) != -128))) { operands[2] = GEN_INT (-INTVAL (operands[2])); return "sub{w}\t{%2, %0|%0, %2}"; } return "add{w}\t{%2, %0|%0, %2}"; } } } static const char * output_212 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_INCDEC: if (operands[2] == const1_rtx) return "inc{w}\t%0"; else if (operands[2] == constm1_rtx) return "dec{w}\t%0"; abort(); default: /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'. Exceptions: -128 encodes smaller than 128, so swap sign and op. */ if (GET_CODE (operands[2]) == CONST_INT && (INTVAL (operands[2]) == 128 || (INTVAL (operands[2]) < 0 && INTVAL (operands[2]) != -128))) { operands[2] = GEN_INT (-INTVAL (operands[2])); return "sub{w}\t{%2, %0|%0, %2}"; } return "add{w}\t{%2, %0|%0, %2}"; } } } static const char * output_213 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_INCDEC: if (operands[2] == constm1_rtx) return "inc{w}\t%0"; else if (operands[2] == const1_rtx) return "dec{w}\t%0"; else abort(); default: if (! rtx_equal_p (operands[0], operands[1])) abort (); /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'. Exceptions: -128 encodes smaller than 128, so swap sign and op. */ if ((INTVAL (operands[2]) == -128 || (INTVAL (operands[2]) > 0 && INTVAL (operands[2]) != 128))) return "sub{w}\t{%2, %0|%0, %2}"; operands[2] = GEN_INT (-INTVAL (operands[2])); return "add{w}\t{%2, %0|%0, %2}"; } } } static const char * output_214 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_INCDEC: if (operands[2] == const1_rtx) return "inc{w}\t%0"; else if (operands[2] == constm1_rtx) return "dec{w}\t%0"; abort(); default: /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'. Exceptions: -128 encodes smaller than 128, so swap sign and op. */ if (GET_CODE (operands[2]) == CONST_INT && (INTVAL (operands[2]) == 128 || (INTVAL (operands[2]) < 0 && INTVAL (operands[2]) != -128))) { operands[2] = GEN_INT (-INTVAL (operands[2])); return "sub{w}\t{%2, %0|%0, %2}"; } return "add{w}\t{%2, %0|%0, %2}"; } } } static const char * output_215 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { int widen = (which_alternative == 2); switch (get_attr_type (insn)) { case TYPE_LEA: return "#"; case TYPE_INCDEC: if (operands[2] == const1_rtx) return widen ? "inc{l}\t%k0" : "inc{b}\t%0"; else if (operands[2] == constm1_rtx) return widen ? "dec{l}\t%k0" : "dec{b}\t%0"; abort(); default: /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'. Exceptions: -128 encodes smaller than 128, so swap sign and op. */ if (GET_CODE (operands[2]) == CONST_INT && (INTVAL (operands[2]) == 128 || (INTVAL (operands[2]) < 0 && INTVAL (operands[2]) != -128))) { operands[2] = GEN_INT (-INTVAL (operands[2])); if (widen) return "sub{l}\t{%2, %k0|%k0, %2}"; else return "sub{b}\t{%2, %0|%0, %2}"; } if (widen) return "add{l}\t{%k2, %k0|%k0, %k2}"; else return "add{b}\t{%2, %0|%0, %2}"; } } } static const char * output_216 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { int widen = (which_alternative == 2); switch (get_attr_type (insn)) { case TYPE_INCDEC: if (operands[2] == const1_rtx) return widen ? "inc{l}\t%k0" : "inc{b}\t%0"; else if (operands[2] == constm1_rtx) return widen ? "dec{l}\t%k0" : "dec{b}\t%0"; abort(); default: /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'. Exceptions: -128 encodes smaller than 128, so swap sign and op. */ if (GET_CODE (operands[2]) == CONST_INT && (INTVAL (operands[2]) == 128 || (INTVAL (operands[2]) < 0 && INTVAL (operands[2]) != -128))) { operands[2] = GEN_INT (-INTVAL (operands[2])); if (widen) return "sub{l}\t{%2, %k0|%k0, %2}"; else return "sub{b}\t{%2, %0|%0, %2}"; } if (widen) return "add{l}\t{%k2, %k0|%k0, %k2}"; else return "add{b}\t{%2, %0|%0, %2}"; } } } static const char * output_217 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_INCDEC: if (operands[1] == const1_rtx) return "inc{b}\t%0"; else if (operands[1] == constm1_rtx) return "dec{b}\t%0"; abort(); default: /* Make things pretty and `subl $4,%eax' rather than `addl $-4, %eax'. */ if (GET_CODE (operands[1]) == CONST_INT && INTVAL (operands[1]) < 0) { operands[1] = GEN_INT (-INTVAL (operands[1])); return "sub{b}\t{%1, %0|%0, %1}"; } return "add{b}\t{%1, %0|%0, %1}"; } } } static const char * output_218 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_INCDEC: if (operands[2] == const1_rtx) return "inc{b}\t%0"; else if (operands[2] == constm1_rtx || (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) == 255)) return "dec{b}\t%0"; abort(); default: /* Make things pretty and `subb $4,%al' rather than `addb $-4, %al'. */ if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < 0) { operands[2] = GEN_INT (-INTVAL (operands[2])); return "sub{b}\t{%2, %0|%0, %2}"; } return "add{b}\t{%2, %0|%0, %2}"; } } } static const char * output_219 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_INCDEC: if (operands[2] == const1_rtx) return "inc{b}\t%0"; else if (operands[2] == constm1_rtx || (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) == 255)) return "dec{b}\t%0"; abort(); default: /* Make things pretty and `subb $4,%al' rather than `addb $-4, %al'. */ if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < 0) { operands[2] = GEN_INT (-INTVAL (operands[2])); return "sub{b}\t{%2, %0|%0, %2}"; } return "add{b}\t{%2, %0|%0, %2}"; } } } static const char * output_220 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_INCDEC: if (operands[2] == constm1_rtx || (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) == 255)) return "inc{b}\t%0"; else if (operands[2] == const1_rtx) return "dec{b}\t%0"; else abort(); default: if (! rtx_equal_p (operands[0], operands[1])) abort (); if (INTVAL (operands[2]) < 0) { operands[2] = GEN_INT (-INTVAL (operands[2])); return "add{b}\t{%2, %0|%0, %2}"; } return "sub{b}\t{%2, %0|%0, %2}"; } } } static const char * output_221 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_INCDEC: if (operands[2] == const1_rtx) return "inc{b}\t%0"; else if (operands[2] == constm1_rtx || (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) == 255)) return "dec{b}\t%0"; abort(); default: /* Make things pretty and `subb $4,%al' rather than `addb $-4, %al'. */ if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < 0) { operands[2] = GEN_INT (-INTVAL (operands[2])); return "sub{b}\t{%2, %0|%0, %2}"; } return "add{b}\t{%2, %0|%0, %2}"; } } } static const char * output_222 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_INCDEC: if (operands[2] == const1_rtx) return "inc{b}\t%h0"; else if (operands[2] == constm1_rtx || (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) == 255)) return "dec{b}\t%h0"; abort(); default: return "add{b}\t{%2, %h0|%h0, %2}"; } } } static const char * output_223 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_INCDEC: if (operands[2] == const1_rtx) return "inc{b}\t%h0"; else if (operands[2] == constm1_rtx || (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) == 255)) return "dec{b}\t%h0"; abort(); default: return "add{b}\t{%2, %h0|%h0, %2}"; } } } static const char * const output_247[] = { "imul{q}\t{%2, %1, %0|%0, %1, %2}", "imul{q}\t{%2, %1, %0|%0, %1, %2}", "imul{q}\t{%2, %0|%0, %2}", }; static const char * const output_248[] = { "imul{l}\t{%2, %1, %0|%0, %1, %2}", "imul{l}\t{%2, %1, %0|%0, %1, %2}", "imul{l}\t{%2, %0|%0, %2}", }; static const char * const output_249[] = { "imul{l}\t{%2, %1, %k0|%k0, %1, %2}", "imul{l}\t{%2, %1, %k0|%k0, %1, %2}", "imul{l}\t{%2, %k0|%k0, %2}", }; static const char * const output_250[] = { "imul{w}\t{%2, %1, %0|%0, %1, %2}", "imul{w}\t{%2, %1, %0|%0, %1, %2}", "imul{w}\t{%2, %0|%0, %2}", }; static const char * const output_278[] = { "test{l}\t{%k1, %k0|%k0, %k1}", "test{l}\t{%k1, %k0|%k0, %k1}", "test{q}\t{%1, %0|%0, %1}", "test{q}\t{%1, %0|%0, %1}", "test{q}\t{%1, %0|%0, %1}", }; static const char * output_281 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { if (which_alternative == 3) { if (GET_CODE (operands[1]) == CONST_INT && (INTVAL (operands[1]) & 0xffffff00)) operands[1] = GEN_INT (INTVAL (operands[1]) & 0xff); return "test{l}\t{%1, %k0|%k0, %1}"; } return "test{b}\t{%1, %0|%0, %1}"; } } static const char * output_288 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_IMOVX: { enum machine_mode mode; if (GET_CODE (operands[2]) != CONST_INT) abort (); if (INTVAL (operands[2]) == 0xff) mode = QImode; else if (INTVAL (operands[2]) == 0xffff) mode = HImode; else abort (); operands[1] = gen_lowpart (mode, operands[1]); if (mode == QImode) return "movz{bq|x}\t{%1,%0|%0, %1}"; else return "movz{wq|x}\t{%1,%0|%0, %1}"; } default: if (! rtx_equal_p (operands[0], operands[1])) abort (); if (get_attr_mode (insn) == MODE_SI) return "and{l}\t{%k2, %k0|%k0, %k2}"; else return "and{q}\t{%2, %0|%0, %2}"; } } } static const char * const output_289[] = { "and{l}\t{%k2, %k0|%k0, %k2}", "and{q}\t{%2, %0|%0, %2}", "and{q}\t{%2, %0|%0, %2}", }; static const char * output_290 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_IMOVX: { enum machine_mode mode; if (GET_CODE (operands[2]) != CONST_INT) abort (); if (INTVAL (operands[2]) == 0xff) mode = QImode; else if (INTVAL (operands[2]) == 0xffff) mode = HImode; else abort (); operands[1] = gen_lowpart (mode, operands[1]); if (mode == QImode) return "movz{bl|x}\t{%1,%0|%0, %1}"; else return "movz{wl|x}\t{%1,%0|%0, %1}"; } default: if (! rtx_equal_p (operands[0], operands[1])) abort (); return "and{l}\t{%2, %0|%0, %2}"; } } } static const char * output_294 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_IMOVX: if (GET_CODE (operands[2]) != CONST_INT) abort (); if (INTVAL (operands[2]) == 0xff) return "movz{bl|x}\t{%b1, %k0|%k0, %b1}"; abort (); default: if (! rtx_equal_p (operands[0], operands[1])) abort (); return "and{w}\t{%2, %0|%0, %2}"; } } } static const char * const output_296[] = { "and{b}\t{%2, %0|%0, %2}", "and{b}\t{%2, %0|%0, %2}", "and{l}\t{%k2, %k0|%k0, %k2}", }; static const char * output_298 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { if (which_alternative == 2) { if (GET_CODE (operands[2]) == CONST_INT && (INTVAL (operands[2]) & 0xffffff00)) operands[2] = GEN_INT (INTVAL (operands[2]) & 0xff); return "and{l}\t{%2, %k0|%k0, %2}"; } return "and{b}\t{%2, %0|%0, %2}"; } } static const char * const output_318[] = { "or{b}\t{%2, %0|%0, %2}", "or{b}\t{%2, %0|%0, %2}", "or{l}\t{%k2, %k0|%k0, %k2}", }; static const char * const output_327[] = { "xor{q}\t{%2, %0|%0, %2}", "xor{q}\t{%2, %0|%0, %2}", }; static const char * const output_328[] = { "xor{q}\t{%2, %0|%0, %2}", "xor{q}\t{%2, %0|%0, %2}", }; static const char * const output_340[] = { "xor{b}\t{%2, %0|%0, %2}", "xor{b}\t{%2, %0|%0, %2}", "xor{l}\t{%k2, %k0|%k0, %k2}", }; static const char * const output_400[] = { "not{b}\t%0", "not{l}\t%k0", }; static const char * output_402 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_ALU: if (operands[2] != const1_rtx) abort (); if (!rtx_equal_p (operands[0], operands[1])) abort (); return "add{q}\t{%0, %0|%0, %0}"; case TYPE_LEA: if (GET_CODE (operands[2]) != CONST_INT || (unsigned HOST_WIDE_INT) INTVAL (operands[2]) > 3) abort (); operands[1] = gen_rtx_MULT (DImode, operands[1], GEN_INT (1 << INTVAL (operands[2]))); return "lea{q}\t{%a1, %0|%0, %a1}"; default: if (REG_P (operands[2])) return "sal{q}\t{%b2, %0|%0, %b2}"; else if (operands[2] == const1_rtx && (TARGET_SHIFT1 || optimize_size)) return "sal{q}\t%0"; else return "sal{q}\t{%2, %0|%0, %2}"; } } } static const char * output_403 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_ALU: if (operands[2] != const1_rtx) abort (); return "add{q}\t{%0, %0|%0, %0}"; default: if (REG_P (operands[2])) return "sal{q}\t{%b2, %0|%0, %b2}"; else if (operands[2] == const1_rtx && (TARGET_SHIFT1 || optimize_size)) return "sal{q}\t%0"; else return "sal{q}\t{%2, %0|%0, %2}"; } } } static const char * const output_406[] = { "shld{l}\t{%2, %1, %0|%0, %1, %2}", "shld{l}\t{%s2%1, %0|%0, %1, %2}", }; static const char * output_407 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_ALU: if (operands[2] != const1_rtx) abort (); if (!rtx_equal_p (operands[0], operands[1])) abort (); return "add{l}\t{%0, %0|%0, %0}"; case TYPE_LEA: return "#"; default: if (REG_P (operands[2])) return "sal{l}\t{%b2, %0|%0, %b2}"; else if (operands[2] == const1_rtx && (TARGET_SHIFT1 || optimize_size)) return "sal{l}\t%0"; else return "sal{l}\t{%2, %0|%0, %2}"; } } } static const char * output_408 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_ALU: if (operands[2] != const1_rtx) abort (); return "add{l}\t{%k0, %k0|%k0, %k0}"; case TYPE_LEA: return "#"; default: if (REG_P (operands[2])) return "sal{l}\t{%b2, %k0|%k0, %b2}"; else if (operands[2] == const1_rtx && (TARGET_SHIFT1 || optimize_size)) return "sal{l}\t%k0"; else return "sal{l}\t{%2, %k0|%k0, %2}"; } } } static const char * output_409 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_ALU: if (operands[2] != const1_rtx) abort (); return "add{l}\t{%0, %0|%0, %0}"; default: if (REG_P (operands[2])) return "sal{l}\t{%b2, %0|%0, %b2}"; else if (operands[2] == const1_rtx && (TARGET_SHIFT1 || optimize_size)) return "sal{l}\t%0"; else return "sal{l}\t{%2, %0|%0, %2}"; } } } static const char * output_410 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_ALU: if (operands[2] != const1_rtx) abort (); return "add{l}\t{%k0, %k0|%k0, %k0}"; default: if (REG_P (operands[2])) return "sal{l}\t{%b2, %k0|%k0, %b2}"; else if (operands[2] == const1_rtx && (TARGET_SHIFT1 || optimize_size)) return "sal{l}\t%k0"; else return "sal{l}\t{%2, %k0|%k0, %2}"; } } } static const char * output_411 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_LEA: return "#"; case TYPE_ALU: if (operands[2] != const1_rtx) abort (); return "add{w}\t{%0, %0|%0, %0}"; default: if (REG_P (operands[2])) return "sal{w}\t{%b2, %0|%0, %b2}"; else if (operands[2] == const1_rtx && (TARGET_SHIFT1 || optimize_size)) return "sal{w}\t%0"; else return "sal{w}\t{%2, %0|%0, %2}"; } } } static const char * output_412 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_ALU: if (operands[2] != const1_rtx) abort (); return "add{w}\t{%0, %0|%0, %0}"; default: if (REG_P (operands[2])) return "sal{w}\t{%b2, %0|%0, %b2}"; else if (operands[2] == const1_rtx && (TARGET_SHIFT1 || optimize_size)) return "sal{w}\t%0"; else return "sal{w}\t{%2, %0|%0, %2}"; } } } static const char * output_413 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_ALU: if (operands[2] != const1_rtx) abort (); return "add{w}\t{%0, %0|%0, %0}"; default: if (REG_P (operands[2])) return "sal{w}\t{%b2, %0|%0, %b2}"; else if (operands[2] == const1_rtx && (TARGET_SHIFT1 || optimize_size)) return "sal{w}\t%0"; else return "sal{w}\t{%2, %0|%0, %2}"; } } } static const char * output_414 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_LEA: return "#"; case TYPE_ALU: if (operands[2] != const1_rtx) abort (); if (REG_P (operands[1]) && !ANY_QI_REG_P (operands[1])) return "add{l}\t{%k0, %k0|%k0, %k0}"; else return "add{b}\t{%0, %0|%0, %0}"; default: if (REG_P (operands[2])) { if (get_attr_mode (insn) == MODE_SI) return "sal{l}\t{%b2, %k0|%k0, %b2}"; else return "sal{b}\t{%b2, %0|%0, %b2}"; } else if (operands[2] == const1_rtx && (TARGET_SHIFT1 || optimize_size)) { if (get_attr_mode (insn) == MODE_SI) return "sal{l}\t%0"; else return "sal{b}\t%0"; } else { if (get_attr_mode (insn) == MODE_SI) return "sal{l}\t{%2, %k0|%k0, %2}"; else return "sal{b}\t{%2, %0|%0, %2}"; } } } } static const char * output_415 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_ALU: if (operands[2] != const1_rtx) abort (); if (REG_P (operands[1]) && !ANY_QI_REG_P (operands[1])) return "add{l}\t{%k0, %k0|%k0, %k0}"; else return "add{b}\t{%0, %0|%0, %0}"; default: if (REG_P (operands[2])) { if (get_attr_mode (insn) == MODE_SI) return "sal{l}\t{%b2, %k0|%k0, %b2}"; else return "sal{b}\t{%b2, %0|%0, %b2}"; } else if (operands[2] == const1_rtx && (TARGET_SHIFT1 || optimize_size)) { if (get_attr_mode (insn) == MODE_SI) return "sal{l}\t%0"; else return "sal{b}\t%0"; } else { if (get_attr_mode (insn) == MODE_SI) return "sal{l}\t{%2, %k0|%k0, %2}"; else return "sal{b}\t{%2, %0|%0, %2}"; } } } } static const char * output_416 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_ALU: if (operands[2] != const1_rtx) abort (); return "add{b}\t{%0, %0|%0, %0}"; default: if (REG_P (operands[2])) return "sal{b}\t{%b2, %0|%0, %b2}"; else if (operands[2] == const1_rtx && (TARGET_SHIFT1 || optimize_size)) return "sal{b}\t%0"; else return "sal{b}\t{%2, %0|%0, %2}"; } } } static const char * const output_417[] = { "{cqto|cqo}", "sar{q}\t{%2, %0|%0, %2}", }; static const char * const output_419[] = { "sar{q}\t{%2, %0|%0, %2}", "sar{q}\t{%b2, %0|%0, %b2}", }; static const char * const output_424[] = { "shrd{l}\t{%2, %1, %0|%0, %1, %2}", "shrd{l}\t{%s2%1, %0|%0, %1, %2}", }; static const char * const output_425[] = { "{cltd|cdq}", "sar{l}\t{%2, %0|%0, %2}", }; static const char * const output_426[] = { "{cltd|cdq}", "sar{l}\t{%2, %k0|%k0, %2}", }; static const char * const output_429[] = { "sar{l}\t{%2, %0|%0, %2}", "sar{l}\t{%b2, %0|%0, %b2}", }; static const char * const output_430[] = { "sar{l}\t{%2, %k0|%k0, %2}", "sar{l}\t{%b2, %k0|%k0, %b2}", }; static const char * const output_436[] = { "sar{w}\t{%2, %0|%0, %2}", "sar{w}\t{%b2, %0|%0, %b2}", }; static const char * const output_441[] = { "sar{b}\t{%2, %0|%0, %2}", "sar{b}\t{%b2, %0|%0, %b2}", }; static const char * const output_442[] = { "sar{b}\t{%1, %0|%0, %1}", "sar{b}\t{%b1, %0|%0, %b1}", }; static const char * const output_446[] = { "shr{q}\t{%2, %0|%0, %2}", "shr{q}\t{%b2, %0|%0, %b2}", }; static const char * const output_453[] = { "shr{l}\t{%2, %0|%0, %2}", "shr{l}\t{%b2, %0|%0, %b2}", }; static const char * const output_454[] = { "shr{l}\t{%2, %k0|%k0, %2}", "shr{l}\t{%b2, %k0|%k0, %b2}", }; static const char * const output_460[] = { "shr{w}\t{%2, %0|%0, %2}", "shr{w}\t{%b2, %0|%0, %b2}", }; static const char * const output_465[] = { "shr{b}\t{%2, %0|%0, %2}", "shr{b}\t{%b2, %0|%0, %b2}", }; static const char * const output_466[] = { "shr{b}\t{%1, %0|%0, %1}", "shr{b}\t{%b1, %0|%0, %b1}", }; static const char * const output_470[] = { "rol{q}\t{%2, %0|%0, %2}", "rol{q}\t{%b2, %0|%0, %b2}", }; static const char * const output_473[] = { "rol{l}\t{%2, %0|%0, %2}", "rol{l}\t{%b2, %0|%0, %b2}", }; static const char * const output_474[] = { "rol{l}\t{%2, %k0|%k0, %2}", "rol{l}\t{%b2, %k0|%k0, %b2}", }; static const char * const output_476[] = { "rol{w}\t{%2, %0|%0, %2}", "rol{w}\t{%b2, %0|%0, %b2}", }; static const char * const output_479[] = { "rol{b}\t{%1, %0|%0, %1}", "rol{b}\t{%b1, %0|%0, %b1}", }; static const char * const output_480[] = { "rol{b}\t{%2, %0|%0, %2}", "rol{b}\t{%b2, %0|%0, %b2}", }; static const char * const output_482[] = { "ror{q}\t{%2, %0|%0, %2}", "ror{q}\t{%b2, %0|%0, %b2}", }; static const char * const output_485[] = { "ror{l}\t{%2, %0|%0, %2}", "ror{l}\t{%b2, %0|%0, %b2}", }; static const char * const output_486[] = { "ror{l}\t{%2, %k0|%k0, %2}", "ror{l}\t{%b2, %k0|%k0, %b2}", }; static const char * const output_488[] = { "ror{w}\t{%2, %0|%0, %2}", "ror{w}\t{%b2, %0|%0, %b2}", }; static const char * const output_491[] = { "ror{b}\t{%2, %0|%0, %2}", "ror{b}\t{%b2, %0|%0, %b2}", }; static const char * const output_492[] = { "ror{b}\t{%1, %0|%0, %1}", "ror{b}\t{%b1, %0|%0, %b1}", }; static const char * output_514 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { if (which_alternative != 0) return "#"; if (get_attr_length (insn) == 2) return "%+loop\t%l0"; else return "dec{l}\t%1\n\t%+jne\t%l0"; } } static const char * output_515 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { if (SIBLING_CALL_P (insn)) return "jmp\t%P0"; else return "call\t%P0"; } } static const char * output_516 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { if (constant_call_address_operand (operands[0], Pmode)) { if (SIBLING_CALL_P (insn)) return "jmp\t%P0"; else return "call\t%P0"; } if (SIBLING_CALL_P (insn)) return "jmp\t%A0"; else return "call\t%A0"; } } static const char * output_517 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { if (SIBLING_CALL_P (insn)) return "jmp\t%P0"; else return "call\t%P0"; } } static const char * output_518 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { if (constant_call_address_operand (operands[0], QImode)) return "call\t%P0"; return "call\t%A0"; } } static const char * output_519 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { if (constant_call_address_operand (operands[0], QImode)) return "jmp\t%P0"; return "jmp\t%A0"; } } static const char * output_520 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { if (constant_call_address_operand (operands[0], QImode)) return "call\t%P0"; return "call\t%A0"; } } static const char * output_529 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { #ifdef ASM_OUTPUT_MAX_SKIP_ALIGN ASM_OUTPUT_MAX_SKIP_ALIGN (asm_out_file, 4, (int)INTVAL (operands[0])); #else /* It is tempting to use ASM_OUTPUT_ALIGN here, but we don't want to do that. The align insn is used to avoid 3 jump instructions in the row to improve branch prediction and the benefits hardly outweight the cost of extra 8 nops on the average inserted by full alignment pseudo operation. */ #endif return ""; } } static const char * output_530 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { return output_set_got (operands[0]); } } static const char * output_555 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_387_binary_op (insn, operands); } static const char * output_556 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_387_binary_op (insn, operands); } static const char * output_557 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_387_binary_op (insn, operands); } static const char * output_558 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_387_binary_op (insn, operands); } static const char * output_559 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_387_binary_op (insn, operands); } static const char * output_560 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_387_binary_op (insn, operands); } static const char * output_561 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_387_binary_op (insn, operands); } static const char * output_562 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_387_binary_op (insn, operands); } static const char * output_563 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_387_binary_op (insn, operands); } static const char * output_564 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_387_binary_op (insn, operands); } static const char * output_565 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return which_alternative ? "#" : output_387_binary_op (insn, operands); } static const char * output_566 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return which_alternative ? "#" : output_387_binary_op (insn, operands); } static const char * output_567 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_387_binary_op (insn, operands); } static const char * output_568 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_387_binary_op (insn, operands); } static const char * output_569 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_387_binary_op (insn, operands); } static const char * output_570 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return which_alternative ? "#" : output_387_binary_op (insn, operands); } static const char * output_571 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return which_alternative ? "#" : output_387_binary_op (insn, operands); } static const char * output_572 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_387_binary_op (insn, operands); } static const char * output_573 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_387_binary_op (insn, operands); } static const char * output_574 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_387_binary_op (insn, operands); } static const char * output_575 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_387_binary_op (insn, operands); } static const char * output_576 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return which_alternative ? "#" : output_387_binary_op (insn, operands); } static const char * output_577 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return which_alternative ? "#" : output_387_binary_op (insn, operands); } static const char * output_578 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_387_binary_op (insn, operands); } static const char * output_579 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_387_binary_op (insn, operands); } static const char * output_580 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return output_387_binary_op (insn, operands); } static const char * const output_581[] = { "fsqrt", "sqrtss\t{%1, %0|%0, %1}", }; static const char * const output_584[] = { "fsqrt", "sqrtsd\t{%1, %0|%0, %1}", }; static const char * const output_649[] = { "cmov%O2%C1\t{%2, %0|%0, %2}", "cmov%O2%c1\t{%3, %0|%0, %3}", }; static const char * const output_651[] = { "cmov%O2%C1\t{%2, %0|%0, %2}", "cmov%O2%c1\t{%3, %0|%0, %3}", }; static const char * const output_652[] = { "cmov%O2%C1\t{%2, %0|%0, %2}", "cmov%O2%c1\t{%3, %0|%0, %3}", }; static const char * const output_654[] = { "fcmov%F1\t{%2, %0|%0, %2}", "fcmov%f1\t{%3, %0|%0, %3}", "cmov%O2%C1\t{%2, %0|%0, %2}", "cmov%O2%c1\t{%3, %0|%0, %3}", }; static const char * const output_655[] = { "fcmov%F1\t{%2, %0|%0, %2}", "fcmov%f1\t{%3, %0|%0, %3}", "#", "#", }; static const char * const output_656[] = { "fcmov%F1\t{%2, %0|%0, %2}", "fcmov%f1\t{%3, %0|%0, %3}", "cmov%O2%C1\t{%2, %0|%0, %2}", "cmov%O2%c1\t{%3, %0|%0, %3}", }; static const char * const output_657[] = { "fcmov%F1\t{%2, %0|%0, %2}", "fcmov%f1\t{%3, %0|%0, %3}", }; static const char * output_670 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_IMOV: return "mov{l}\t{%1, %0|%0, %1}"; case TYPE_ALU: if (GET_CODE (operands[2]) == CONST_INT && (INTVAL (operands[2]) == 128 || (INTVAL (operands[2]) < 0 && INTVAL (operands[2]) != -128))) { operands[2] = GEN_INT (-INTVAL (operands[2])); return "sub{l}\t{%2, %0|%0, %2}"; } return "add{l}\t{%2, %0|%0, %2}"; case TYPE_LEA: operands[2] = SET_SRC (XVECEXP (PATTERN (insn), 0, 0)); return "lea{l}\t{%a2, %0|%0, %a2}"; default: abort (); } } } static const char * output_671 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_IMOV: return "mov{q}\t{%1, %0|%0, %1}"; case TYPE_ALU: if (GET_CODE (operands[2]) == CONST_INT /* Avoid overflows. */ && ((INTVAL (operands[2]) & ((((unsigned int) 1) << 31) - 1))) && (INTVAL (operands[2]) == 128 || (INTVAL (operands[2]) < 0 && INTVAL (operands[2]) != -128))) { operands[2] = GEN_INT (-INTVAL (operands[2])); return "sub{q}\t{%2, %0|%0, %2}"; } return "add{q}\t{%2, %0|%0, %2}"; case TYPE_LEA: operands[2] = SET_SRC (XVECEXP (PATTERN (insn), 0, 0)); return "lea{q}\t{%a2, %0|%0, %a2}"; default: abort (); } } } static const char * output_672 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (get_attr_type (insn)) { case TYPE_ALU: return "add{q}\t{%2, %0|%0, %2}"; case TYPE_LEA: operands[2] = gen_rtx_PLUS (DImode, operands[1], operands[2]); return "lea{q}\t{%a2, %0|%0, %a2}"; default: abort (); } } } static const char * output_687 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { if (SIBLING_CALL_P (insn)) return "jmp\t%P1"; else return "call\t%P1"; } } static const char * output_688 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { if (constant_call_address_operand (operands[1], QImode)) { if (SIBLING_CALL_P (insn)) return "jmp\t%P1"; else return "call\t%P1"; } if (SIBLING_CALL_P (insn)) return "jmp\t%A1"; else return "call\t%A1"; } } static const char * output_689 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { if (SIBLING_CALL_P (insn)) return "jmp\t%P1"; else return "call\t%P1"; } } static const char * output_690 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { if (SIBLING_CALL_P (insn)) return "jmp\t%P1"; else return "call\t%P1"; } } static const char * output_691 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { if (constant_call_address_operand (operands[1], QImode)) return "call\t%P1"; return "call\t%*%1"; } } static const char * output_692 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { if (constant_call_address_operand (operands[1], QImode)) return "jmp\t%P1"; return "jmp\t%*%1"; } } static const char * output_693 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { if (constant_call_address_operand (operands[1], QImode)) return "call\t%P1"; return "call\t%A1"; } } static const char * output_697 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { operands[2] = gen_label_rtx (); output_asm_insn ("j%c0\t%l2\n\t int\t%1", operands); (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (operands[2])); RET; } } static const char * const output_698[] = { "xorps\t%0, %0", "movaps\t{%1, %0|%0, %1}", "movaps\t{%1, %0|%0, %1}", }; static const char * output_699 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (which_alternative) { case 0: if (get_attr_mode (insn) == MODE_V4SF) return "xorps\t%0, %0"; else return "pxor\t%0, %0"; case 1: case 2: if (get_attr_mode (insn) == MODE_V4SF) return "movaps\t{%1, %0|%0, %1}"; else return "movdqa\t{%1, %0|%0, %1}"; default: abort (); } } } static const char * output_700 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (which_alternative) { case 0: if (get_attr_mode (insn) == MODE_V4SF) return "xorps\t%0, %0"; else return "pxor\t%0, %0"; case 1: case 2: if (get_attr_mode (insn) == MODE_V4SF) return "movaps\t{%1, %0|%0, %1}"; else return "movdqa\t{%1, %0|%0, %1}"; default: abort (); } } } static const char * const output_701[] = { "pxor\t%0, %0", "movq\t{%1, %0|%0, %1}", "movq\t{%1, %0|%0, %1}", }; static const char * const output_702[] = { "pxor\t%0, %0", "movq\t{%1, %0|%0, %1}", "movq\t{%1, %0|%0, %1}", }; static const char * const output_703[] = { "pxor\t%0, %0", "movq\t{%1, %0|%0, %1}", "movq\t{%1, %0|%0, %1}", }; static const char * const output_704[] = { "pxor\t%0, %0", "movq\t{%1, %0|%0, %1}", "movq\t{%1, %0|%0, %1}", }; static const char * output_705 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (which_alternative) { case 0: if (get_attr_mode (insn) == MODE_V4SF) return "xorps\t%0, %0"; else return "xorpd\t%0, %0"; case 1: case 2: if (get_attr_mode (insn) == MODE_V4SF) return "movaps\t{%1, %0|%0, %1}"; else return "movapd\t{%1, %0|%0, %1}"; default: abort (); } } } static const char * output_706 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (which_alternative) { case 0: if (get_attr_mode (insn) == MODE_V4SF) return "xorps\t%0, %0"; else return "pxor\t%0, %0"; case 1: case 2: if (get_attr_mode (insn) == MODE_V4SF) return "movaps\t{%1, %0|%0, %1}"; else return "movdqa\t{%1, %0|%0, %1}"; default: abort (); } } } static const char * output_707 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (which_alternative) { case 0: if (get_attr_mode (insn) == MODE_V4SF) return "xorps\t%0, %0"; else return "pxor\t%0, %0"; case 1: case 2: if (get_attr_mode (insn) == MODE_V4SF) return "movaps\t{%1, %0|%0, %1}"; else return "movdqa\t{%1, %0|%0, %1}"; default: abort (); } } } static const char * output_719 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (which_alternative) { case 0: if (get_attr_mode (insn) == MODE_V4SF) return "xorps\t%0, %0"; else return "pxor\t%0, %0"; case 1: case 2: if (get_attr_mode (insn) == MODE_V4SF) return "movaps\t{%1, %0|%0, %1}"; else return "movdqa\t{%1, %0|%0, %1}"; default: abort (); } } } static const char * output_720 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (which_alternative) { case 0: case 1: return "#"; case 2: if (get_attr_mode (insn) == MODE_V4SF) return "xorps\t%0, %0"; else return "pxor\t%0, %0"; case 3: case 4: if (get_attr_mode (insn) == MODE_V4SF) return "movaps\t{%1, %0|%0, %1}"; else return "movdqa\t{%1, %0|%0, %1}"; default: abort (); } } } static const char * output_721 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { switch (which_alternative) { case 0: case 1: return "#"; case 2: if (get_attr_mode (insn) == MODE_V4SF) return "xorps\t%0, %0"; else return "pxor\t%0, %0"; case 3: case 4: if (get_attr_mode (insn) == MODE_V4SF) return "movaps\t{%1, %0|%0, %1}"; else return "movdqa\t{%1, %0|%0, %1}"; default: abort (); } } } static const char * output_768 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { if (get_attr_mode (insn) == MODE_TI) return "pxor\t{%0, %0|%0, %0}"; else return "xorps\t{%0, %0|%0, %0}"; } } static const char * output_771 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { if (GET_CODE (operands[3]) == UNORDERED) return "cmpordps\t{%2, %0|%0, %2}"; else return "cmpn%D3ps\t{%2, %0|%0, %2}"; } } static const char * output_773 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { if (GET_CODE (operands[3]) == UNORDERED) return "cmpordss\t{%2, %0|%0, %2}"; else return "cmpn%D3ss\t{%2, %0|%0, %2}"; } } static const char * output_853 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { int i; operands[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, operands[0], operands[4])); output_asm_insn ("jmp\t%A1", operands); for (i = SSE_REGPARM_MAX - 1; i >= INTVAL (operands[2]); i--) { operands[4] = adjust_address (operands[0], DImode, i*16); operands[5] = gen_rtx_REG (TImode, SSE_REGNO (i)); PUT_MODE (operands[4], TImode); if (GET_CODE (XEXP (operands[0], 0)) != PLUS) output_asm_insn ("rex", operands); output_asm_insn ("movaps\t{%5, %4|%4, %5}", operands); } (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (operands[3])); RET; } } static const char * output_880 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { static const char * const patterns[4] = { "prefetchnta\t%a0", "prefetcht2\t%a0", "prefetcht1\t%a0", "prefetcht0\t%a0" }; int locality = INTVAL (operands[1]); if (locality < 0 || locality > 3) abort (); return patterns[locality]; } } static const char * output_881 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { static const char * const patterns[4] = { "prefetchnta\t%a0", "prefetcht2\t%a0", "prefetcht1\t%a0", "prefetcht0\t%a0" }; int locality = INTVAL (operands[1]); if (locality < 0 || locality > 3) abort (); return patterns[locality]; } } static const char * output_882 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { if (INTVAL (operands[1]) == 0) return "prefetch\t%a0"; else return "prefetchw\t%a0"; } } static const char * output_883 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { if (INTVAL (operands[1]) == 0) return "prefetch\t%a0"; else return "prefetchw\t%a0"; } } static const char * output_899 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { if (GET_CODE (operands[3]) == UNORDERED) return "cmpordps\t{%2, %0|%0, %2}"; else return "cmpn%D3pd\t{%2, %0|%0, %2}"; } } static const char * output_901 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { if (GET_CODE (operands[3]) == UNORDERED) return "cmpordsd\t{%2, %0|%0, %2}"; else return "cmpn%D3sd\t{%2, %0|%0, %2}"; } } static const char * output_952 (rtx *operands ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { { if (get_attr_mode (insn) == MODE_TI) return "pxor\t%0, %0"; else return "xorps\t%0, %0"; } } static const char * const output_1006[] = { "movq\t{%1, %0|%0, %1}", "movdq2q\t{%1, %0|%0, %1}", }; static const char * const output_1007[] = { "movq\t{%1, %0|%0, %1}", "movdq2q\t{%1, %0|%0, %1}", "movd\t{%1, %0|%0, %1}", }; static const char * const output_1008[] = { "movq\t{%1, %0|%0, %1}", "movq2dq\t{%1, %0|%0, %1}", }; static const char * const output_1009[] = { "movq\t{%1, %0|%0, %1}", "movq2dq\t{%1, %0|%0, %1}", "movd\t{%1, %0|%0, %1}", }; static const char * const output_1015[] = { "movsd\t{%2, %0|%0, %2}", "movlpd\t{%2, %0|%0, %2}", "movlpd\t{%2, %0|%0, %2}", }; static const struct insn_operand_data operand_data[] = { { 0, "", VOIDmode, 0, 0 }, { nonimmediate_operand, "r,?mr", DImode, 0, 1 }, { const0_operand, "n,n", DImode, 0, 1 }, { nonimmediate_operand, "rm,r", DImode, 0, 1 }, { x86_64_general_operand, "re,mr", DImode, 0, 1 }, { nonimmediate_operand, "mr,r", DImode, 0, 1 }, { x86_64_general_operand, "re,mr", DImode, 0, 1 }, { nonimmediate_operand, "r,?mr", SImode, 0, 1 }, { const0_operand, "n,n", SImode, 0, 1 }, { nonimmediate_operand, "rm,r", SImode, 0, 1 }, { general_operand, "ri,mr", SImode, 0, 1 }, { nonimmediate_operand, "r,?mr", HImode, 0, 1 }, { const0_operand, "n,n", HImode, 0, 1 }, { nonimmediate_operand, "rm,r", HImode, 0, 1 }, { general_operand, "ri,mr", HImode, 0, 1 }, { nonimmediate_operand, "q,?mq", QImode, 0, 1 }, { const0_operand, "n,n", QImode, 0, 1 }, { nonimmediate_operand, "qm,q", QImode, 0, 1 }, { general_operand, "qi,mq", QImode, 0, 1 }, { general_operand, "Qm", QImode, 0, 1 }, { ext_register_operand, "Q", VOIDmode, 0, 1 }, { register_operand, "Q", QImode, 0, 1 }, { ext_register_operand, "Q", VOIDmode, 0, 1 }, { const0_operand, "n", QImode, 0, 1 }, { ext_register_operand, "Q", VOIDmode, 0, 1 }, { general_operand, "Qmn", QImode, 0, 1 }, { ext_register_operand, "Q", VOIDmode, 0, 1 }, { nonmemory_operand, "Qn", QImode, 0, 1 }, { ext_register_operand, "Q", VOIDmode, 0, 1 }, { ext_register_operand, "Q", VOIDmode, 0, 1 }, { register_operand, "=a", HImode, 0, 1 }, { register_operand, "f", VOIDmode, 0, 1 }, { const0_operand, "X", VOIDmode, 0, 1 }, { register_operand, "f", SFmode, 0, 1 }, { nonimmediate_operand, "fm", SFmode, 0, 1 }, { register_operand, "=a", HImode, 0, 1 }, { register_operand, "f", SFmode, 0, 1 }, { nonimmediate_operand, "fm", SFmode, 0, 1 }, { register_operand, "f", DFmode, 0, 1 }, { nonimmediate_operand, "fm", DFmode, 0, 1 }, { register_operand, "=a", HImode, 0, 1 }, { register_operand, "f", DFmode, 0, 1 }, { nonimmediate_operand, "fm", DFmode, 0, 1 }, { register_operand, "f", XFmode, 0, 1 }, { register_operand, "f", XFmode, 0, 1 }, { register_operand, "=a", HImode, 0, 1 }, { register_operand, "f", XFmode, 0, 1 }, { register_operand, "f", XFmode, 0, 1 }, { register_operand, "f", VOIDmode, 0, 1 }, { register_operand, "f", VOIDmode, 0, 1 }, { register_operand, "=a", HImode, 0, 1 }, { register_operand, "f", VOIDmode, 0, 1 }, { register_operand, "f", VOIDmode, 0, 1 }, { register_operand, "f,f", VOIDmode, 0, 1 }, { nonimmediate_operand, "m,?r", SImode, 0, 1 }, { register_operand, "a", HImode, 0, 1 }, { register_operand, "f#x,x#f", VOIDmode, 0, 1 }, { nonimmediate_operand, "f#x,xm#f", VOIDmode, 0, 1 }, { register_operand, "x", VOIDmode, 0, 1 }, { nonimmediate_operand, "xm", VOIDmode, 0, 1 }, { push_operand, "=<", SImode, 0, 1 }, { general_no_elim_operand, "ri*m", SImode, 0, 1 }, { push_operand, "=X", SImode, 0, 1 }, { nonmemory_no_elim_operand, "ri", SImode, 0, 1 }, { nonimmediate_operand, "=r*m", SImode, 0, 1 }, { register_operand, "=r", SImode, 0, 1 }, { const0_operand, "i", SImode, 0, 1 }, { register_operand, "=r", SImode, 0, 1 }, { immediate_operand, "i", SImode, 0, 1 }, { nonimmediate_operand, "=r,m,!*y,!rm,!*y,!*Y,!rm,!*Y", SImode, 0, 1 }, { general_operand, "rinm,rin,*y,*y,rm,*Y,*Y,rm", SImode, 0, 1 }, { nonimmediate_operand, "=r,m,!*y,!m,!*y,!*Y,!m,!*Y", SImode, 0, 1 }, { general_operand, "rinm,rin,*y,*y,m,*Y,*Y,m", SImode, 0, 1 }, { x86_64_movabs_operand, "i,r", DImode, 0, 1 }, { nonmemory_operand, "a,er", SImode, 0, 1 }, { register_operand, "=a,r", SImode, 0, 1 }, { x86_64_movabs_operand, "i,r", DImode, 0, 1 }, { register_operand, "+r", SImode, 0, 1 }, { register_operand, "+r", SImode, 0, 1 }, { push_operand, "=<,<", HImode, 0, 1 }, { general_no_elim_operand, "n,r*m", HImode, 0, 1 }, { push_operand, "=X", HImode, 0, 1 }, { nonmemory_no_elim_operand, "ri", HImode, 0, 1 }, { nonimmediate_operand, "=r,r,r,m", HImode, 0, 1 }, { general_operand, "r,rn,rm,rn", HImode, 0, 1 }, { x86_64_movabs_operand, "i,r", DImode, 0, 1 }, { nonmemory_operand, "a,er", HImode, 0, 1 }, { register_operand, "=a,r", HImode, 0, 1 }, { x86_64_movabs_operand, "i,r", DImode, 0, 1 }, { register_operand, "+r", HImode, 0, 1 }, { register_operand, "+r", HImode, 0, 1 }, { nonimmediate_operand, "+rm,r", HImode, 1, 1 }, { general_operand, "rn,m", HImode, 0, 1 }, { register_operand, "+r", HImode, 1, 1 }, { const0_operand, "i", HImode, 0, 1 }, { push_operand, "=X,X", QImode, 0, 1 }, { nonmemory_no_elim_operand, "n,r", QImode, 0, 1 }, { push_operand, "=X", QImode, 0, 1 }, { nonmemory_no_elim_operand, "qi", QImode, 0, 1 }, { nonimmediate_operand, "=q,q,q,r,r,?r,m", QImode, 0, 1 }, { general_operand, "q,qn,qm,q,rn,qm,qn", QImode, 0, 1 }, { register_operand, "+r", QImode, 0, 1 }, { register_operand, "+r", QImode, 0, 1 }, { nonimmediate_operand, "+qm,q", QImode, 1, 1 }, { general_operand, "*qn,m", QImode, 0, 1 }, { q_regs_operand, "+q", QImode, 1, 1 }, { const0_operand, "i", QImode, 0, 1 }, { register_operand, "=R", SImode, 0, 1 }, { ext_register_operand, "Q", VOIDmode, 0, 1 }, { register_operand, "=R", HImode, 0, 1 }, { ext_register_operand, "Q", VOIDmode, 0, 1 }, { nonimmediate_operand, "=Qm,?r", QImode, 0, 1 }, { ext_register_operand, "Q,Q", VOIDmode, 0, 1 }, { register_operand, "=Q,?R", QImode, 0, 1 }, { ext_register_operand, "Q,Q", VOIDmode, 0, 1 }, { x86_64_movabs_operand, "i,r", DImode, 0, 1 }, { nonmemory_operand, "a,er", QImode, 0, 1 }, { register_operand, "=a,r", QImode, 0, 1 }, { x86_64_movabs_operand, "i,r", DImode, 0, 1 }, { nonimmediate_operand, "=Qm,?R", QImode, 0, 1 }, { ext_register_operand, "Q,Q", VOIDmode, 0, 1 }, { ext_register_operand, "+Q", VOIDmode, 0, 1 }, { general_operand, "Qmn", SImode, 0, 1 }, { ext_register_operand, "+Q", VOIDmode, 0, 1 }, { nonmemory_operand, "Qn", DImode, 0, 1 }, { ext_register_operand, "+Q", VOIDmode, 0, 1 }, { register_operand, "Q", SImode, 0, 1 }, { push_operand, "=<", DImode, 0, 1 }, { general_no_elim_operand, "riF*m", DImode, 0, 1 }, { push_operand, "=<,!<", DImode, 0, 1 }, { general_no_elim_operand, "re*m,n", DImode, 0, 1 }, { push_operand, "=<", DImode, 0, 1 }, { general_no_elim_operand, "re*m", DImode, 0, 1 }, { nonimmediate_operand, "=r*m", DImode, 0, 1 }, { register_operand, "=r", DImode, 0, 1 }, { const0_operand, "i", DImode, 0, 1 }, { register_operand, "=r", DImode, 0, 1 }, { const_int_operand, "i", DImode, 0, 1 }, { nonimmediate_operand, "=r,o,!m*y,!*y,!m,!*Y,!*Y", DImode, 0, 1 }, { general_operand, "riFo,riF,*y,m,*Y,*Y,m", DImode, 0, 1 }, { nonimmediate_operand, "=r,r,r,mr,!mr,!*y,!rm,!*y,!*Y,!rm,!*Y", DImode, 0, 1 }, { general_operand, "Z,rem,i,re,n,*y,*y,rm,*Y,*Y,rm", DImode, 0, 1 }, { nonimmediate_operand, "=r,r,r,mr,!mr,!*y,!m,!*y,!*Y,!m,!*Y", DImode, 0, 1 }, { general_operand, "Z,rem,i,re,n,*y,*y,m,*Y,*Y,m", DImode, 0, 1 }, { x86_64_movabs_operand, "i,r", DImode, 0, 1 }, { nonmemory_operand, "a,er", DImode, 0, 1 }, { register_operand, "=a,r", DImode, 0, 1 }, { x86_64_movabs_operand, "i,r", DImode, 0, 1 }, { register_operand, "+r", DImode, 0, 1 }, { register_operand, "+r", DImode, 0, 1 }, { push_operand, "=<,<,<", SFmode, 0, 1 }, { general_no_elim_operand, "f#rx,rFm#fx,x#rf", SFmode, 0, 1 }, { push_operand, "=X,X,X", SFmode, 0, 1 }, { nonmemory_no_elim_operand, "f#rx,rF#fx,x#rf", SFmode, 0, 1 }, { nonimmediate_operand, "=f#xr,m,f#xr,r#xf,m,x#rf,x#rf,x#rf,m,!*y,!rm,!*y", SFmode, 0, 1 }, { general_operand, "fm#rx,f#rx,G,rmF#fx,Fr#fx,C,x,xm#rf,x#rf,rm,*y,*y", SFmode, 0, 1 }, { nonimmediate_operand, "=f#xr,m,f#xr,r#xf,m,x#rf,x#rf,x#rf,m,!*y,!m,!*y", SFmode, 0, 1 }, { general_operand, "fm#rx,f#rx,G,rmF#fx,Fr#fx,C,x,xm#rf,x#rf,m,*y,*y", SFmode, 0, 1 }, { register_operand, "+f", SFmode, 0, 1 }, { register_operand, "+f", SFmode, 0, 1 }, { push_operand, "=<,<,<,<", DFmode, 0, 1 }, { general_no_elim_operand, "f#Y,Fo#fY,*r#fY,Y#f", DFmode, 0, 1 }, { push_operand, "=<,<,<", DFmode, 0, 1 }, { general_no_elim_operand, "f#rY,rFo#fY,Y#rf", DFmode, 0, 1 }, { nonimmediate_operand, "=f#Y,m,f#Y,*r,o,Y#f,Y#f,Y#f,m", DFmode, 0, 1 }, { general_operand, "fm#Y,f#Y,G,*roF,F*r,C,Y#f,YHm#f,Y#f", DFmode, 0, 1 }, { nonimmediate_operand, "=f#Yr,m,f#Yr,r#Yf,o,Y#rf,Y#rf,Y#rf,m", DFmode, 0, 1 }, { general_operand, "fm#Yr,f#Yr,G,roF#Yf,Fr#Yf,C,Y#rf,Ym#rf,Y#rf", DFmode, 0, 1 }, { register_operand, "+f", DFmode, 0, 1 }, { register_operand, "+f", DFmode, 0, 1 }, { push_operand, "=X,X,X", XFmode, 0, 1 }, { general_no_elim_operand, "f,Fo,*r", XFmode, 0, 1 }, { push_operand, "=<,<", XFmode, 0, 1 }, { general_no_elim_operand, "f#r,ro#f", XFmode, 0, 1 }, { nonimmediate_operand, "=f,m,f,*r,o", XFmode, 0, 1 }, { general_operand, "fm,f,G,*roF,F*r", XFmode, 0, 1 }, { nonimmediate_operand, "=f#r,m,f#r,r#f,o", XFmode, 0, 1 }, { general_operand, "fm#r,f#r,G,roF#f,Fr#f", XFmode, 0, 1 }, { register_operand, "+f", XFmode, 0, 1 }, { register_operand, "+f", XFmode, 0, 1 }, { register_operand, "=r", SImode, 0, 1 }, { register_operand, "0", HImode, 0, 1 }, { register_operand, "=r", SImode, 0, 1 }, { nonimmediate_operand, "rm", HImode, 0, 1 }, { register_operand, "=r,?&q", HImode, 0, 1 }, { nonimmediate_operand, "0,qm", QImode, 0, 1 }, { register_operand, "=r,r", HImode, 0, 1 }, { nonimmediate_operand, "qm,0", QImode, 0, 1 }, { register_operand, "=r", HImode, 0, 1 }, { nonimmediate_operand, "qm", QImode, 0, 1 }, { register_operand, "=r,?&q", SImode, 0, 1 }, { nonimmediate_operand, "0,qm", QImode, 0, 1 }, { register_operand, "=r,r", SImode, 0, 1 }, { nonimmediate_operand, "qm,0", QImode, 0, 1 }, { register_operand, "=r", SImode, 0, 1 }, { nonimmediate_operand, "qm", QImode, 0, 1 }, { nonimmediate_operand, "=r,?r,?*o,!?y,!?Y", DImode, 0, 1 }, { nonimmediate_operand, "0,rm,r,m,m", SImode, 0, 1 }, { nonimmediate_operand, "=r,?r,?*o,!?y,!?Y", DImode, 0, 1 }, { nonimmediate_operand, "0,rm,r,rm,rm", SImode, 0, 1 }, { nonimmediate_operand, "=r,o,!?y,!?Y", DImode, 0, 1 }, { nonimmediate_operand, "rm,0,m,m", SImode, 0, 1 }, { nonimmediate_operand, "=r,o,!?y,!*?", DImode, 0, 1 }, { nonimmediate_operand, "rm,0,rm,rm", SImode, 0, 1 }, { register_operand, "=r,r", DImode, 0, 1 }, { nonimmediate_operand, "r,m", HImode, 0, 1 }, { register_operand, "=r,r", DImode, 0, 1 }, { nonimmediate_operand, "Q,m", QImode, 0, 1 }, { nonimmediate_operand, "=*A,r,?r,?*o", DImode, 0, 1 }, { register_operand, "0,0,r,r", SImode, 0, 1 }, { scratch_operand, "=X,X,X,&r", SImode, 0, 0 }, { register_operand, "=*a,r", DImode, 0, 1 }, { nonimmediate_operand, "*0,rm", SImode, 0, 1 }, { register_operand, "=r", DImode, 0, 1 }, { nonimmediate_operand, "rm", HImode, 0, 1 }, { register_operand, "=r", DImode, 0, 1 }, { nonimmediate_operand, "qm", QImode, 0, 1 }, { register_operand, "=*a,r", SImode, 0, 1 }, { nonimmediate_operand, "*0,rm", HImode, 0, 1 }, { register_operand, "=*a,r", DImode, 0, 1 }, { nonimmediate_operand, "*0,rm", HImode, 0, 1 }, { register_operand, "=*a,r", HImode, 0, 1 }, { nonimmediate_operand, "*0,qm", QImode, 0, 1 }, { push_operand, "=<", DFmode, 0, 1 }, { nonimmediate_operand, "fY", SFmode, 0, 1 }, { push_operand, "=<", XFmode, 0, 1 }, { nonimmediate_operand, "f", SFmode, 0, 1 }, { nonimmediate_operand, "=f#Y,mf#Y,Y#f", DFmode, 0, 1 }, { nonimmediate_operand, "fm#Y,f#Y,mY#f", SFmode, 0, 1 }, { register_operand, "=Y", DFmode, 0, 1 }, { nonimmediate_operand, "mY", SFmode, 0, 1 }, { nonimmediate_operand, "=f,m", XFmode, 0, 1 }, { nonimmediate_operand, "fm,f", SFmode, 0, 1 }, { nonimmediate_operand, "=f,m", XFmode, 0, 1 }, { nonimmediate_operand, "fm,f", DFmode, 0, 1 }, { register_operand, "=f", SFmode, 0, 1 }, { register_operand, "f", DFmode, 0, 1 }, { nonimmediate_operand, "=m,?f#rx,?r#fx,?x#rf", SFmode, 0, 1 }, { register_operand, "f,f,f,f", DFmode, 0, 1 }, { memory_operand, "=X,m,m,m", SFmode, 0, 1 }, { nonimmediate_operand, "=*!m#fxr,?f#xr,?r#fx,?x#fr,Y#fr", SFmode, 0, 1 }, { nonimmediate_operand, "f#Y,f#Y,f#Y,f#Y,mY#f", DFmode, 0, 1 }, { memory_operand, "=X,m,m,m,X", SFmode, 0, 1 }, { nonimmediate_operand, "=*!m,?f#rx,?r#fx,?x#rf,&Y", SFmode, 0, 1 }, { nonimmediate_operand, "f#Y,f#Y,f#Y,f#Y,mY#f", DFmode, 0, 1 }, { memory_operand, "=X,m,m,m,X", SFmode, 0, 1 }, { nonimmediate_operand, "=Y,Y,!m", SFmode, 0, 1 }, { nonimmediate_operand, "Y,mY,f#Y", DFmode, 0, 1 }, { nonimmediate_operand, "=&Y,!m", SFmode, 0, 1 }, { nonimmediate_operand, "mY,f", DFmode, 0, 1 }, { memory_operand, "=m", SFmode, 0, 1 }, { register_operand, "f", DFmode, 0, 1 }, { register_operand, "=Y,Y", SFmode, 0, 1 }, { nonimmediate_operand, "Y,mY", DFmode, 0, 1 }, { register_operand, "=&Y", SFmode, 0, 1 }, { nonimmediate_operand, "mY", DFmode, 0, 1 }, { register_operand, "=f", SFmode, 0, 1 }, { register_operand, "f", XFmode, 0, 1 }, { nonimmediate_operand, "=m,?f#rx,?r#fx,?x#rf", SFmode, 0, 1 }, { register_operand, "f,f,f,f", XFmode, 0, 1 }, { memory_operand, "=X,m,m,m", SFmode, 0, 1 }, { memory_operand, "=m", SFmode, 0, 1 }, { register_operand, "f", XFmode, 0, 1 }, { register_operand, "=f", DFmode, 0, 1 }, { register_operand, "f", XFmode, 0, 1 }, { nonimmediate_operand, "=m,?f#rY,?r#fY,?Y#rf", DFmode, 0, 1 }, { register_operand, "f,f,f,f", XFmode, 0, 1 }, { memory_operand, "=X,m,m,m", DFmode, 0, 1 }, { memory_operand, "=m", DFmode, 0, 1 }, { register_operand, "f", XFmode, 0, 1 }, { nonimmediate_operand, "=m,?r", DImode, 0, 1 }, { register_operand, "f,f", VOIDmode, 0, 1 }, { memory_operand, "m,m", HImode, 0, 1 }, { memory_operand, "m,m", HImode, 0, 1 }, { memory_operand, "=m,m", DImode, 0, 1 }, { scratch_operand, "=&1f,&1f", DFmode, 0, 0 }, { memory_operand, "=m", DImode, 0, 1 }, { register_operand, "f", VOIDmode, 0, 1 }, { memory_operand, "m", HImode, 0, 1 }, { memory_operand, "m", HImode, 0, 1 }, { scratch_operand, "=&1f", DFmode, 0, 0 }, { register_operand, "=r,r", DImode, 0, 1 }, { nonimmediate_operand, "x,xm", SFmode, 0, 1 }, { register_operand, "=r,r", DImode, 0, 1 }, { nonimmediate_operand, "Y,Ym", DFmode, 0, 1 }, { nonimmediate_operand, "=m,?r", SImode, 0, 1 }, { register_operand, "f,f", VOIDmode, 0, 1 }, { memory_operand, "m,m", HImode, 0, 1 }, { memory_operand, "m,m", HImode, 0, 1 }, { memory_operand, "=m,m", SImode, 0, 1 }, { memory_operand, "=m", SImode, 0, 1 }, { register_operand, "f", VOIDmode, 0, 1 }, { memory_operand, "m", HImode, 0, 1 }, { memory_operand, "m", HImode, 0, 1 }, { register_operand, "=r,r", SImode, 0, 1 }, { nonimmediate_operand, "x,xm", SFmode, 0, 1 }, { register_operand, "=r,r", SImode, 0, 1 }, { nonimmediate_operand, "Y,Ym", DFmode, 0, 1 }, { nonimmediate_operand, "=m,?r", HImode, 0, 1 }, { register_operand, "f,f", VOIDmode, 0, 1 }, { memory_operand, "m,m", HImode, 0, 1 }, { memory_operand, "m,m", HImode, 0, 1 }, { memory_operand, "=m,m", HImode, 0, 1 }, { memory_operand, "=m", HImode, 0, 1 }, { register_operand, "f", VOIDmode, 0, 1 }, { memory_operand, "m", HImode, 0, 1 }, { memory_operand, "m", HImode, 0, 1 }, { register_operand, "=f,f", SFmode, 0, 1 }, { nonimmediate_operand, "m,r", HImode, 0, 1 }, { register_operand, "=f#x,?f#x,x#f,x#f", SFmode, 0, 1 }, { nonimmediate_operand, "m,r,r,mr", SImode, 0, 1 }, { register_operand, "=x,x", SFmode, 0, 1 }, { nonimmediate_operand, "r,mr", SImode, 0, 1 }, { register_operand, "=f,?f", SFmode, 0, 1 }, { nonimmediate_operand, "m,r", DImode, 0, 1 }, { register_operand, "=f#x,?f#x,x#f,x#f", SFmode, 0, 1 }, { nonimmediate_operand, "m,r,r,mr", DImode, 0, 1 }, { register_operand, "=x,x", SFmode, 0, 1 }, { nonimmediate_operand, "r,mr", DImode, 0, 1 }, { register_operand, "=f,f", DFmode, 0, 1 }, { nonimmediate_operand, "m,r", HImode, 0, 1 }, { register_operand, "=f#Y,?f#Y,Y#f,Y#f", DFmode, 0, 1 }, { nonimmediate_operand, "m,r,r,mr", SImode, 0, 1 }, { register_operand, "=Y,Y", DFmode, 0, 1 }, { nonimmediate_operand, "r,mr", SImode, 0, 1 }, { register_operand, "=f,?f", DFmode, 0, 1 }, { nonimmediate_operand, "m,r", DImode, 0, 1 }, { register_operand, "=f#Y,?f#Y,Y#f,Y#f", DFmode, 0, 1 }, { nonimmediate_operand, "m,r,r,mr", DImode, 0, 1 }, { register_operand, "=Y,Y", DFmode, 0, 1 }, { nonimmediate_operand, "r,mr", DImode, 0, 1 }, { register_operand, "=f,f", XFmode, 0, 1 }, { nonimmediate_operand, "m,r", HImode, 0, 1 }, { register_operand, "=f,f", XFmode, 0, 1 }, { nonimmediate_operand, "m,r", SImode, 0, 1 }, { register_operand, "=f,f", XFmode, 0, 1 }, { nonimmediate_operand, "m,r", DImode, 0, 1 }, { nonimmediate_operand, "=r,o", DImode, 0, 1 }, { nonimmediate_operand, "%0,0", DImode, 0, 1 }, { general_operand, "roiF,riF", DImode, 0, 1 }, { nonimmediate_operand, "=rm,r", DImode, 0, 1 }, { nonimmediate_operand, "%0,0", DImode, 0, 1 }, { x86_64_general_operand, "re,rm", DImode, 0, 1 }, { ix86_carry_flag_operator, "", DImode, 0, 1 }, { nonimmediate_operand, "=qm,q", QImode, 0, 1 }, { nonimmediate_operand, "%0,0", QImode, 0, 1 }, { general_operand, "qi,qm", QImode, 0, 1 }, { ix86_carry_flag_operator, "", QImode, 0, 1 }, { nonimmediate_operand, "=rm,r", HImode, 0, 1 }, { nonimmediate_operand, "%0,0", HImode, 0, 1 }, { general_operand, "ri,rm", HImode, 0, 1 }, { ix86_carry_flag_operator, "", HImode, 0, 1 }, { nonimmediate_operand, "=rm,r", SImode, 0, 1 }, { nonimmediate_operand, "%0,0", SImode, 0, 1 }, { general_operand, "ri,rm", SImode, 0, 1 }, { ix86_carry_flag_operator, "", SImode, 0, 1 }, { register_operand, "=r", DImode, 0, 1 }, { nonimmediate_operand, "%0", SImode, 0, 1 }, { general_operand, "rim", SImode, 0, 1 }, { ix86_carry_flag_operator, "", SImode, 0, 1 }, { register_operand, "=r", SImode, 0, 1 }, { no_seg_address_operand, "p", SImode, 0, 1 }, { register_operand, "=r", SImode, 0, 1 }, { no_seg_address_operand, "p", DImode, 0, 1 }, { register_operand, "=r", DImode, 0, 1 }, { no_seg_address_operand, "p", DImode, 0, 1 }, { register_operand, "=r", VOIDmode, 0, 1 }, { index_register_operand, "r", VOIDmode, 0, 1 }, { register_operand, "r", VOIDmode, 0, 1 }, { immediate_operand, "i", VOIDmode, 0, 1 }, { register_operand, "=r", DImode, 0, 1 }, { index_register_operand, "r", SImode, 0, 1 }, { register_operand, "r", SImode, 0, 1 }, { immediate_operand, "i", SImode, 0, 1 }, { register_operand, "=r", VOIDmode, 0, 1 }, { index_register_operand, "r", VOIDmode, 0, 1 }, { const248_operand, "i", VOIDmode, 0, 1 }, { nonmemory_operand, "ri", VOIDmode, 0, 1 }, { register_operand, "=r", DImode, 0, 1 }, { index_register_operand, "r", SImode, 0, 1 }, { const248_operand, "n", SImode, 0, 1 }, { nonmemory_operand, "ri", SImode, 0, 1 }, { register_operand, "=r", VOIDmode, 0, 1 }, { index_register_operand, "r", VOIDmode, 0, 1 }, { const248_operand, "i", VOIDmode, 0, 1 }, { register_operand, "r", VOIDmode, 0, 1 }, { immediate_operand, "i", VOIDmode, 0, 1 }, { register_operand, "=r", DImode, 0, 1 }, { index_register_operand, "r", SImode, 0, 1 }, { const248_operand, "n", SImode, 0, 1 }, { register_operand, "r", SImode, 0, 1 }, { immediate_operand, "i", SImode, 0, 1 }, { nonimmediate_operand, "=r,rm,r", DImode, 0, 1 }, { nonimmediate_operand, "%0,0,r", DImode, 0, 1 }, { x86_64_general_operand, "rme,re,re", DImode, 0, 1 }, { nonimmediate_operand, "=r,rm", DImode, 0, 1 }, { nonimmediate_operand, "%0,0", DImode, 0, 1 }, { x86_64_general_operand, "rme,re", DImode, 0, 1 }, { scratch_operand, "=r", DImode, 0, 0 }, { x86_64_general_operand, "%0", DImode, 0, 1 }, { x86_64_general_operand, "rme", DImode, 0, 1 }, { scratch_operand, "=rm", DImode, 0, 0 }, { nonimmediate_operand, "0", DImode, 0, 1 }, { x86_64_immediate_operand, "e", DImode, 0, 1 }, { scratch_operand, "=r", DImode, 0, 0 }, { nonimmediate_operand, "%0", DImode, 0, 1 }, { x86_64_general_operand, "rme", DImode, 0, 1 }, { nonimmediate_operand, "=r,rm,r", SImode, 0, 1 }, { nonimmediate_operand, "%0,0,r", SImode, 0, 1 }, { general_operand, "rmni,rni,rni", SImode, 0, 1 }, { register_operand, "=r,r", DImode, 0, 1 }, { nonimmediate_operand, "%0,r", SImode, 0, 1 }, { general_operand, "rmni,rni", SImode, 0, 1 }, { nonimmediate_operand, "=r,rm", SImode, 0, 1 }, { nonimmediate_operand, "%0,0", SImode, 0, 1 }, { general_operand, "rmni,rni", SImode, 0, 1 }, { register_operand, "=r", DImode, 0, 1 }, { nonimmediate_operand, "%0", SImode, 0, 1 }, { general_operand, "rmni", SImode, 0, 1 }, { scratch_operand, "=r", SImode, 0, 0 }, { nonimmediate_operand, "%0", SImode, 0, 1 }, { general_operand, "rmni", SImode, 0, 1 }, { scratch_operand, "=rm", SImode, 0, 0 }, { nonimmediate_operand, "0", SImode, 0, 1 }, { const_int_operand, "n", SImode, 0, 1 }, { nonimmediate_operand, "=rm,r,r", HImode, 0, 1 }, { nonimmediate_operand, "%0,0,r", HImode, 0, 1 }, { general_operand, "ri,rm,rni", HImode, 0, 1 }, { nonimmediate_operand, "=r,rm", HImode, 0, 1 }, { nonimmediate_operand, "%0,0", HImode, 0, 1 }, { general_operand, "rmni,rni", HImode, 0, 1 }, { scratch_operand, "=r", HImode, 0, 0 }, { nonimmediate_operand, "%0", HImode, 0, 1 }, { general_operand, "rmni", HImode, 0, 1 }, { scratch_operand, "=rm", HImode, 0, 0 }, { nonimmediate_operand, "0", HImode, 0, 1 }, { const_int_operand, "n", HImode, 0, 1 }, { nonimmediate_operand, "=qm,q,r,r", QImode, 0, 1 }, { nonimmediate_operand, "%0,0,0,r", QImode, 0, 1 }, { general_operand, "qn,qmn,rn,rn", QImode, 0, 1 }, { nonimmediate_operand, "=qm,q,r", QImode, 0, 1 }, { nonimmediate_operand, "%0,0,0", QImode, 0, 1 }, { general_operand, "qn,qmn,rn", QImode, 0, 1 }, { nonimmediate_operand, "+qm,q", QImode, 1, 1 }, { general_operand, "qn,qnm", QImode, 0, 1 }, { nonimmediate_operand, "=q,qm", QImode, 0, 1 }, { nonimmediate_operand, "%0,0", QImode, 0, 1 }, { general_operand, "qmni,qni", QImode, 0, 1 }, { scratch_operand, "=q", QImode, 0, 0 }, { nonimmediate_operand, "%0", QImode, 0, 1 }, { general_operand, "qmni", QImode, 0, 1 }, { scratch_operand, "=qm", QImode, 0, 0 }, { nonimmediate_operand, "0", QImode, 0, 1 }, { const_int_operand, "n", QImode, 0, 1 }, { ext_register_operand, "=Q", VOIDmode, 0, 1 }, { ext_register_operand, "0", VOIDmode, 0, 1 }, { general_operand, "Qmn", QImode, 0, 1 }, { ext_register_operand, "=Q", VOIDmode, 0, 1 }, { ext_register_operand, "0", VOIDmode, 0, 1 }, { nonmemory_operand, "Qn", QImode, 0, 1 }, { ext_register_operand, "=Q", VOIDmode, 0, 1 }, { ext_register_operand, "%0", VOIDmode, 0, 1 }, { ext_register_operand, "Q", VOIDmode, 0, 1 }, { nonimmediate_operand, "=r,o", DImode, 0, 1 }, { nonimmediate_operand, "0,0", DImode, 0, 1 }, { general_operand, "roiF,riF", DImode, 0, 1 }, { nonimmediate_operand, "=rm,r", DImode, 0, 1 }, { nonimmediate_operand, "0,0", DImode, 0, 1 }, { x86_64_general_operand, "re,rm", DImode, 0, 1 }, { ix86_carry_flag_operator, "", DImode, 0, 1 }, { nonimmediate_operand, "=qm,q", QImode, 0, 1 }, { nonimmediate_operand, "0,0", QImode, 0, 1 }, { general_operand, "qi,qm", QImode, 0, 1 }, { ix86_carry_flag_operator, "", QImode, 0, 1 }, { nonimmediate_operand, "=rm,r", HImode, 0, 1 }, { nonimmediate_operand, "0,0", HImode, 0, 1 }, { general_operand, "ri,rm", HImode, 0, 1 }, { ix86_carry_flag_operator, "", HImode, 0, 1 }, { nonimmediate_operand, "=rm,r", SImode, 0, 1 }, { nonimmediate_operand, "0,0", SImode, 0, 1 }, { general_operand, "ri,rm", SImode, 0, 1 }, { ix86_carry_flag_operator, "", SImode, 0, 1 }, { register_operand, "=rm,r", DImode, 0, 1 }, { register_operand, "0,0", SImode, 0, 1 }, { general_operand, "ri,rm", SImode, 0, 1 }, { ix86_carry_flag_operator, "", SImode, 0, 1 }, { register_operand, "=r", DImode, 0, 1 }, { register_operand, "0", SImode, 0, 1 }, { general_operand, "rim", SImode, 0, 1 }, { nonimmediate_operand, "=qm,q", QImode, 0, 1 }, { nonimmediate_operand, "0,0", QImode, 0, 1 }, { general_operand, "qn,qmn", QImode, 0, 1 }, { nonimmediate_operand, "+qm,q", QImode, 1, 1 }, { general_operand, "qn,qmn", QImode, 0, 1 }, { nonimmediate_operand, "=qm,q", HImode, 0, 1 }, { nonimmediate_operand, "0,0", QImode, 0, 1 }, { general_operand, "qi,qm", QImode, 0, 1 }, { register_operand, "=r,r,r", DImode, 0, 1 }, { nonimmediate_operand, "%rm,rm,0", DImode, 0, 1 }, { x86_64_general_operand, "K,e,mr", DImode, 0, 1 }, { register_operand, "=r,r,r", SImode, 0, 1 }, { nonimmediate_operand, "%rm,rm,0", SImode, 0, 1 }, { general_operand, "K,i,mr", SImode, 0, 1 }, { register_operand, "=r,r,r", DImode, 0, 1 }, { nonimmediate_operand, "%rm,rm,0", SImode, 0, 1 }, { general_operand, "K,i,mr", SImode, 0, 1 }, { register_operand, "=r,r,r", HImode, 0, 1 }, { nonimmediate_operand, "%rm,rm,0", HImode, 0, 1 }, { general_operand, "K,i,mr", HImode, 0, 1 }, { register_operand, "=a", QImode, 0, 1 }, { nonimmediate_operand, "%0", QImode, 0, 1 }, { nonimmediate_operand, "qm", QImode, 0, 1 }, { register_operand, "=a", HImode, 0, 1 }, { nonimmediate_operand, "%0", QImode, 0, 1 }, { nonimmediate_operand, "qm", QImode, 0, 1 }, { register_operand, "=A", TImode, 0, 1 }, { nonimmediate_operand, "%0", DImode, 0, 1 }, { nonimmediate_operand, "rm", DImode, 0, 1 }, { register_operand, "=A", DImode, 0, 1 }, { nonimmediate_operand, "%0", SImode, 0, 1 }, { nonimmediate_operand, "rm", SImode, 0, 1 }, { register_operand, "=d", DImode, 0, 1 }, { nonimmediate_operand, "%a", DImode, 0, 1 }, { nonimmediate_operand, "rm", DImode, 0, 1 }, { scratch_operand, "=1", DImode, 0, 0 }, { register_operand, "=d", SImode, 0, 1 }, { nonimmediate_operand, "%a", SImode, 0, 1 }, { nonimmediate_operand, "rm", SImode, 0, 1 }, { scratch_operand, "=1", SImode, 0, 0 }, { register_operand, "=d", DImode, 0, 1 }, { nonimmediate_operand, "%a", SImode, 0, 1 }, { nonimmediate_operand, "rm", SImode, 0, 1 }, { scratch_operand, "=1", SImode, 0, 0 }, { register_operand, "=a", QImode, 0, 1 }, { register_operand, "0", HImode, 0, 1 }, { nonimmediate_operand, "qm", QImode, 0, 1 }, { register_operand, "=&a,?a", DImode, 0, 1 }, { register_operand, "=&d,&d", DImode, 0, 1 }, { register_operand, "1,0", DImode, 0, 1 }, { nonimmediate_operand, "rm,rm", DImode, 0, 1 }, { register_operand, "=a", DImode, 0, 1 }, { register_operand, "=&d", DImode, 0, 1 }, { register_operand, "a", DImode, 0, 1 }, { nonimmediate_operand, "rm", DImode, 0, 1 }, { register_operand, "=a", DImode, 0, 1 }, { register_operand, "0", DImode, 0, 1 }, { nonimmediate_operand, "rm", DImode, 0, 1 }, { register_operand, "=d", DImode, 0, 1 }, { register_operand, "3", DImode, 0, 1 }, { register_operand, "=&a,?a", SImode, 0, 1 }, { register_operand, "=&d,&d", SImode, 0, 1 }, { register_operand, "1,0", SImode, 0, 1 }, { nonimmediate_operand, "rm,rm", SImode, 0, 1 }, { register_operand, "=a", SImode, 0, 1 }, { register_operand, "=&d", SImode, 0, 1 }, { register_operand, "a", SImode, 0, 1 }, { nonimmediate_operand, "rm", SImode, 0, 1 }, { register_operand, "=a", SImode, 0, 1 }, { register_operand, "0", SImode, 0, 1 }, { nonimmediate_operand, "rm", SImode, 0, 1 }, { register_operand, "=d", SImode, 0, 1 }, { register_operand, "3", SImode, 0, 1 }, { register_operand, "=a", HImode, 0, 1 }, { register_operand, "0", HImode, 0, 1 }, { nonimmediate_operand, "rm", HImode, 0, 1 }, { register_operand, "=&d", HImode, 0, 1 }, { register_operand, "=a", DImode, 0, 1 }, { register_operand, "0", DImode, 0, 1 }, { nonimmediate_operand, "rm", DImode, 0, 1 }, { register_operand, "=&d", DImode, 0, 1 }, { register_operand, "=a", SImode, 0, 1 }, { register_operand, "0", SImode, 0, 1 }, { nonimmediate_operand, "rm", SImode, 0, 1 }, { register_operand, "=&d", SImode, 0, 1 }, { register_operand, "=a", HImode, 0, 1 }, { register_operand, "0", HImode, 0, 1 }, { nonimmediate_operand, "rm", HImode, 0, 1 }, { register_operand, "=d", HImode, 0, 1 }, { register_operand, "3", HImode, 0, 1 }, { nonimmediate_operand, "%!*a,r,!*a,r,rm", DImode, 0, 1 }, { x86_64_szext_general_operand, "Z,Z,e,e,re", DImode, 0, 1 }, { nonimmediate_operand, "%!*a,r,rm", SImode, 0, 1 }, { general_operand, "in,in,rin", SImode, 0, 1 }, { nonimmediate_operand, "%!*a,r,rm", HImode, 0, 1 }, { general_operand, "n,n,rn", HImode, 0, 1 }, { nonimmediate_operand, "%!*a,q,qm,r", QImode, 0, 1 }, { general_operand, "n,n,qn,n", QImode, 0, 1 }, { ext_register_operand, "Q", VOIDmode, 0, 1 }, { const_int_operand, "n", VOIDmode, 0, 1 }, { ext_register_operand, "Q", VOIDmode, 0, 1 }, { general_operand, "Qm", QImode, 0, 1 }, { nonimmediate_operand, "rm", VOIDmode, 0, 1 }, { const_int_operand, "", SImode, 0, 1 }, { const_int_operand, "", SImode, 0, 1 }, { nonimmediate_operand, "rm", VOIDmode, 0, 1 }, { const_int_operand, "", DImode, 0, 1 }, { const_int_operand, "", DImode, 0, 1 }, { nonimmediate_operand, "=r,rm,r,r", DImode, 0, 1 }, { nonimmediate_operand, "%0,0,0,qm", DImode, 0, 1 }, { x86_64_szext_general_operand, "Z,re,rm,L", DImode, 0, 1 }, { nonimmediate_operand, "=r,r,rm", DImode, 0, 1 }, { nonimmediate_operand, "%0,0,0", DImode, 0, 1 }, { x86_64_szext_general_operand, "Z,rem,re", DImode, 0, 1 }, { nonimmediate_operand, "=rm,r,r", SImode, 0, 1 }, { nonimmediate_operand, "%0,0,qm", SImode, 0, 1 }, { general_operand, "ri,rm,L", SImode, 0, 1 }, { nonimmediate_operand, "=r,rm", SImode, 0, 1 }, { nonimmediate_operand, "%0,0", SImode, 0, 1 }, { general_operand, "rim,ri", SImode, 0, 1 }, { nonimmediate_operand, "=rm,r,r", HImode, 0, 1 }, { nonimmediate_operand, "%0,0,qm", HImode, 0, 1 }, { general_operand, "ri,rm,L", HImode, 0, 1 }, { nonimmediate_operand, "=r,rm", HImode, 0, 1 }, { nonimmediate_operand, "%0,0", HImode, 0, 1 }, { general_operand, "rim,ri", HImode, 0, 1 }, { nonimmediate_operand, "=qm,q,r", QImode, 0, 1 }, { nonimmediate_operand, "%0,0,0", QImode, 0, 1 }, { general_operand, "qi,qmi,ri", QImode, 0, 1 }, { nonimmediate_operand, "+qm,q", QImode, 1, 1 }, { general_operand, "qi,qmi", QImode, 0, 1 }, { nonimmediate_operand, "=q,qm,*r", QImode, 0, 1 }, { nonimmediate_operand, "%0,0,0", QImode, 0, 1 }, { general_operand, "qim,qi,i", QImode, 0, 1 }, { nonimmediate_operand, "+q,qm", QImode, 0, 1 }, { nonimmediate_operand, "qmi,qi", QImode, 0, 1 }, { ext_register_operand, "=Q", VOIDmode, 0, 1 }, { ext_register_operand, "0", VOIDmode, 0, 1 }, { const_int_operand, "n", VOIDmode, 0, 1 }, { ext_register_operand, "=Q", VOIDmode, 0, 1 }, { ext_register_operand, "0", VOIDmode, 0, 1 }, { general_operand, "Qm", QImode, 0, 1 }, { ext_register_operand, "=Q", VOIDmode, 0, 1 }, { ext_register_operand, "0", VOIDmode, 0, 1 }, { ext_register_operand, "Q", VOIDmode, 0, 1 }, { nonimmediate_operand, "=rm,r", DImode, 0, 1 }, { nonimmediate_operand, "%0,0", DImode, 0, 1 }, { x86_64_general_operand, "re,rme", DImode, 0, 1 }, { nonimmediate_operand, "=r,rm", DImode, 0, 1 }, { nonimmediate_operand, "%0,0", DImode, 0, 1 }, { x86_64_general_operand, "rem,re", DImode, 0, 1 }, { scratch_operand, "=r", DImode, 0, 0 }, { nonimmediate_operand, "%0", DImode, 0, 1 }, { x86_64_general_operand, "rem", DImode, 0, 1 }, { nonimmediate_operand, "=rm,r", SImode, 0, 1 }, { nonimmediate_operand, "%0,0", SImode, 0, 1 }, { general_operand, "ri,rmi", SImode, 0, 1 }, { register_operand, "=rm", DImode, 0, 1 }, { nonimmediate_operand, "%0", SImode, 0, 1 }, { general_operand, "rim", SImode, 0, 1 }, { register_operand, "=rm", DImode, 0, 1 }, { register_operand, "%0", SImode, 0, 1 }, { x86_64_zext_immediate_operand, "Z", DImode, 0, 1 }, { register_operand, "=r", DImode, 0, 1 }, { nonimmediate_operand, "%0", SImode, 0, 1 }, { x86_64_zext_immediate_operand, "Z", VOIDmode, 0, 1 }, { scratch_operand, "=r", SImode, 0, 0 }, { nonimmediate_operand, "%0", SImode, 0, 1 }, { general_operand, "rim", SImode, 0, 1 }, { nonimmediate_operand, "=r,m", HImode, 0, 1 }, { nonimmediate_operand, "%0,0", HImode, 0, 1 }, { general_operand, "rmi,ri", HImode, 0, 1 }, { scratch_operand, "=r", HImode, 0, 0 }, { nonimmediate_operand, "%0", HImode, 0, 1 }, { general_operand, "rim", HImode, 0, 1 }, { nonimmediate_operand, "=q,m,r", QImode, 0, 1 }, { nonimmediate_operand, "%0,0,0", QImode, 0, 1 }, { general_operand, "qmi,qi,ri", QImode, 0, 1 }, { nonimmediate_operand, "+q,m", QImode, 1, 1 }, { general_operand, "qmi,qi", QImode, 0, 1 }, { nonimmediate_operand, "=q,qm", QImode, 0, 1 }, { nonimmediate_operand, "%0,0", QImode, 0, 1 }, { general_operand, "qim,qi", QImode, 0, 1 }, { nonimmediate_operand, "+q,qm", QImode, 0, 1 }, { general_operand, "qim,qi", QImode, 0, 1 }, { scratch_operand, "=q", QImode, 0, 0 }, { nonimmediate_operand, "%0", QImode, 0, 1 }, { general_operand, "qim", QImode, 0, 1 }, { register_operand, "=r", DImode, 0, 1 }, { register_operand, "%0", SImode, 0, 1 }, { x86_64_zext_immediate_operand, "Z", DImode, 0, 1 }, { ext_register_operand, "=q", VOIDmode, 0, 1 }, { ext_register_operand, "0", VOIDmode, 0, 1 }, { general_operand, "qmn", QImode, 0, 1 }, { nonimmediate_operand, "=ro", DImode, 0, 1 }, { general_operand, "0", DImode, 0, 1 }, { nonimmediate_operand, "=rm", DImode, 0, 1 }, { nonimmediate_operand, "0", DImode, 0, 1 }, { nonimmediate_operand, "=rm", SImode, 0, 1 }, { nonimmediate_operand, "0", SImode, 0, 1 }, { register_operand, "=r", DImode, 0, 1 }, { register_operand, "0", DImode, 0, 1 }, { nonimmediate_operand, "=rm", HImode, 0, 1 }, { nonimmediate_operand, "0", HImode, 0, 1 }, { nonimmediate_operand, "=qm", QImode, 0, 1 }, { nonimmediate_operand, "0", QImode, 0, 1 }, { memory_operand, "=m", SFmode, 0, 1 }, { memory_operand, "0", SFmode, 0, 1 }, { nonimmediate_operand, "=x#fr,x#fr,f#xr,rm#xf", SFmode, 0, 1 }, { nonimmediate_operand, "0,x#fr,0,0", SFmode, 0, 1 }, { nonimmediate_operand, "xm,0,xm*r,xm*r", V4SFmode, 0, 1 }, { nonimmediate_operand, "=f#r,rm#f", SFmode, 0, 1 }, { nonimmediate_operand, "0,0", SFmode, 0, 1 }, { memory_operand, "=m", DFmode, 0, 1 }, { memory_operand, "0", DFmode, 0, 1 }, { nonimmediate_operand, "=Y#fr,Y#fr,f#Yr,rm#Yf", DFmode, 0, 1 }, { nonimmediate_operand, "0,Y#fr,0,0", DFmode, 0, 1 }, { nonimmediate_operand, "Ym,0,Ym*r,Ym*r", V2DFmode, 0, 1 }, { nonimmediate_operand, "=Y#f,Y#f,fm#Y", DFmode, 0, 1 }, { nonimmediate_operand, "0,Y#fr,0", DFmode, 0, 1 }, { nonimmediate_operand, "Ym,0,Ym*r", V2DFmode, 0, 1 }, { nonimmediate_operand, "=f#r,rm#f", DFmode, 0, 1 }, { nonimmediate_operand, "0,0", DFmode, 0, 1 }, { nonimmediate_operand, "=f,mf", DFmode, 0, 1 }, { nonimmediate_operand, "0,0", DFmode, 0, 1 }, { nonimmediate_operand, "=f#r,rm#f", XFmode, 0, 1 }, { nonimmediate_operand, "0,0", XFmode, 0, 1 }, { register_operand, "=f", SFmode, 0, 1 }, { register_operand, "0", SFmode, 0, 1 }, { register_operand, "=f", DFmode, 0, 1 }, { register_operand, "0", DFmode, 0, 1 }, { register_operand, "=f", DFmode, 0, 1 }, { register_operand, "0", SFmode, 0, 1 }, { register_operand, "=f", XFmode, 0, 1 }, { register_operand, "0", XFmode, 0, 1 }, { register_operand, "=f", XFmode, 0, 1 }, { register_operand, "0", DFmode, 0, 1 }, { register_operand, "=f", XFmode, 0, 1 }, { register_operand, "0", SFmode, 0, 1 }, { nonimmediate_operand, "=Y#fr,Y#fr,mf#Yr,mr#Yf", DFmode, 0, 1 }, { nonimmediate_operand, "0,Y#fr,0,0", DFmode, 0, 1 }, { nonimmediate_operand, "Ym,0,Ym*r,Ym*r", V2DFmode, 0, 1 }, { nonimmediate_operand, "=Y#fr,Y#fr,mf#Yr", DFmode, 0, 1 }, { nonimmediate_operand, "0,Y#fr,0", DFmode, 0, 1 }, { nonimmediate_operand, "Ym,0,Ym*r", V2DFmode, 0, 1 }, { nonimmediate_operand, "=qm,r", QImode, 0, 1 }, { nonimmediate_operand, "0,0", QImode, 0, 1 }, { nonimmediate_operand, "=rm,r", DImode, 0, 1 }, { nonimmediate_operand, "0,r", DImode, 0, 1 }, { nonmemory_operand, "cJ,M", QImode, 0, 1 }, { nonimmediate_operand, "=rm", DImode, 0, 1 }, { nonimmediate_operand, "0", DImode, 0, 1 }, { immediate_operand, "e", QImode, 0, 1 }, { register_operand, "=r", DImode, 0, 1 }, { register_operand, "0", DImode, 0, 1 }, { nonmemory_operand, "Jc", QImode, 0, 1 }, { scratch_operand, "=&r", SImode, 0, 0 }, { nonimmediate_operand, "+r*m,r*m", SImode, 0, 1 }, { register_operand, "r,r", SImode, 0, 1 }, { nonmemory_operand, "I,c", QImode, 0, 1 }, { nonimmediate_operand, "=rm,r", SImode, 0, 1 }, { nonimmediate_operand, "0,r", SImode, 0, 1 }, { nonmemory_operand, "cI,M", QImode, 0, 1 }, { register_operand, "=r,r", DImode, 0, 1 }, { register_operand, "0,r", SImode, 0, 1 }, { nonmemory_operand, "cI,M", QImode, 0, 1 }, { nonimmediate_operand, "=rm", SImode, 0, 1 }, { nonimmediate_operand, "0", SImode, 0, 1 }, { const_int_1_31_operand, "I", QImode, 0, 1 }, { register_operand, "=r", DImode, 0, 1 }, { register_operand, "0", SImode, 0, 1 }, { const_int_1_31_operand, "I", QImode, 0, 1 }, { nonimmediate_operand, "=rm,r", HImode, 0, 1 }, { nonimmediate_operand, "0,r", HImode, 0, 1 }, { nonmemory_operand, "cI,M", QImode, 0, 1 }, { nonimmediate_operand, "=rm", HImode, 0, 1 }, { nonimmediate_operand, "0", HImode, 0, 1 }, { nonmemory_operand, "cI", QImode, 0, 1 }, { nonimmediate_operand, "=rm", HImode, 0, 1 }, { nonimmediate_operand, "0", HImode, 0, 1 }, { const_int_1_31_operand, "I", QImode, 0, 1 }, { nonimmediate_operand, "=qm,r,r", QImode, 0, 1 }, { nonimmediate_operand, "0,0,r", QImode, 0, 1 }, { nonmemory_operand, "cI,cI,M", QImode, 0, 1 }, { nonimmediate_operand, "=qm,r", QImode, 0, 1 }, { nonimmediate_operand, "0,0", QImode, 0, 1 }, { nonmemory_operand, "cI,cI", QImode, 0, 1 }, { nonimmediate_operand, "=qm", QImode, 0, 1 }, { nonimmediate_operand, "0", QImode, 0, 1 }, { const_int_1_31_operand, "I", QImode, 0, 1 }, { nonimmediate_operand, "=*d,rm", DImode, 0, 1 }, { nonimmediate_operand, "*a,0", DImode, 0, 1 }, { const_int_operand, "i,i", DImode, 0, 1 }, { nonimmediate_operand, "=rm", DImode, 0, 1 }, { nonimmediate_operand, "0", DImode, 0, 1 }, { const1_operand, "", QImode, 0, 1 }, { nonimmediate_operand, "=rm,rm", DImode, 0, 1 }, { nonimmediate_operand, "0,0", DImode, 0, 1 }, { nonmemory_operand, "J,c", QImode, 0, 1 }, { nonimmediate_operand, "=rm", DImode, 0, 1 }, { nonimmediate_operand, "0", DImode, 0, 1 }, { const_int_operand, "n", QImode, 0, 1 }, { nonimmediate_operand, "=*d,rm", SImode, 0, 1 }, { nonimmediate_operand, "*a,0", SImode, 0, 1 }, { const_int_operand, "i,i", SImode, 0, 1 }, { register_operand, "=*d,r", DImode, 0, 1 }, { register_operand, "*a,0", SImode, 0, 1 }, { const_int_operand, "i,i", SImode, 0, 1 }, { nonimmediate_operand, "=rm", SImode, 0, 1 }, { nonimmediate_operand, "0", SImode, 0, 1 }, { const1_operand, "", QImode, 0, 1 }, { register_operand, "=r", DImode, 0, 1 }, { register_operand, "0", SImode, 0, 1 }, { const1_operand, "", QImode, 0, 1 }, { nonimmediate_operand, "=rm,rm", SImode, 0, 1 }, { nonimmediate_operand, "0,0", SImode, 0, 1 }, { nonmemory_operand, "I,c", QImode, 0, 1 }, { register_operand, "=r,r", DImode, 0, 1 }, { register_operand, "0,0", SImode, 0, 1 }, { nonmemory_operand, "I,c", QImode, 0, 1 }, { nonimmediate_operand, "=rm", HImode, 0, 1 }, { nonimmediate_operand, "0", HImode, 0, 1 }, { const1_operand, "", QImode, 0, 1 }, { nonimmediate_operand, "=rm,rm", HImode, 0, 1 }, { nonimmediate_operand, "0,0", HImode, 0, 1 }, { nonmemory_operand, "I,c", QImode, 0, 1 }, { nonimmediate_operand, "=qm", QImode, 0, 1 }, { nonimmediate_operand, "0", QImode, 0, 1 }, { const1_operand, "", QImode, 0, 1 }, { nonimmediate_operand, "+qm", QImode, 1, 1 }, { const1_operand, "", QImode, 0, 1 }, { nonimmediate_operand, "=qm,qm", QImode, 0, 1 }, { nonimmediate_operand, "0,0", QImode, 0, 1 }, { nonmemory_operand, "I,c", QImode, 0, 1 }, { nonimmediate_operand, "+qm,qm", QImode, 1, 1 }, { nonmemory_operand, "I,c", QImode, 0, 1 }, { nonimmediate_operand, "=qm", QImode, 0, 1 }, { nonimmediate_operand, "0", QImode, 0, 1 }, { const1_operand, "I", QImode, 0, 1 }, { nonimmediate_operand, "=rm", DImode, 0, 1 }, { nonimmediate_operand, "0", DImode, 0, 1 }, { const_int_operand, "e", QImode, 0, 1 }, { register_operand, "=r,r", DImode, 0, 1 }, { nonimmediate_operand, "0,0", SImode, 0, 1 }, { nonmemory_operand, "I,c", QImode, 0, 1 }, { nonimmediate_operand, "=rm,rm", DImode, 0, 1 }, { nonimmediate_operand, "0,0", DImode, 0, 1 }, { nonmemory_operand, "e,c", QImode, 0, 1 }, { nonimmediate_operand, "=qm", QImode, 0, 1 }, { ix86_comparison_operator, "", QImode, 0, 0 }, { nonimmediate_operand, "+qm", QImode, 1, 1 }, { ix86_comparison_operator, "", QImode, 0, 0 }, { register_operand, "=x", SFmode, 0, 1 }, { sse_comparison_operator, "", SFmode, 0, 0 }, { register_operand, "0", SFmode, 0, 1 }, { nonimmediate_operand, "xm", SFmode, 0, 1 }, { register_operand, "=Y", DFmode, 0, 1 }, { sse_comparison_operator, "", DFmode, 0, 0 }, { register_operand, "0", DFmode, 0, 1 }, { nonimmediate_operand, "Ym", DFmode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { ix86_comparison_operator, "", VOIDmode, 0, 0 }, { comparison_operator, "", VOIDmode, 0, 0 }, { register_operand, "f", VOIDmode, 0, 1 }, { register_operand, "f", VOIDmode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { comparison_operator, "", VOIDmode, 0, 0 }, { register_operand, "f#x,x#f", VOIDmode, 0, 1 }, { nonimmediate_operand, "f#x,xm#f", VOIDmode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { comparison_operator, "", VOIDmode, 0, 0 }, { register_operand, "x", VOIDmode, 0, 1 }, { nonimmediate_operand, "xm", VOIDmode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { comparison_operator, "", VOIDmode, 0, 0 }, { register_operand, "f", VOIDmode, 0, 1 }, { nonimmediate_operand, "fm", VOIDmode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { scratch_operand, "=a", HImode, 0, 0 }, { comparison_operator, "", VOIDmode, 0, 0 }, { register_operand, "f", VOIDmode, 0, 1 }, { register_operand, "f", VOIDmode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { scratch_operand, "=a", HImode, 0, 0 }, { nonimmediate_operand, "rm", SImode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { nonimmediate_operand, "rm", DImode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { register_operand, "c,?*r,?*r", SImode, 0, 1 }, { nonimmediate_operand, "=1,1,*m*r", SImode, 0, 1 }, { scratch_operand, "=X,X,r", SImode, 0, 0 }, { constant_call_address_operand, "", SImode, 0, 1 }, { 0, "", SImode, 0, 1 }, { immediate_operand, "", SImode, 0, 1 }, { call_insn_operand, "rsm", SImode, 0, 1 }, { 0, "", SImode, 0, 1 }, { immediate_operand, "i", SImode, 0, 1 }, { constant_call_address_operand, "", VOIDmode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { call_insn_operand, "rsm", SImode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { sibcall_insn_operand, "s,c,d,a", SImode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { call_insn_operand, "rsm", DImode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { constant_call_address_operand, "", DImode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { register_operand, "c", SImode, 0, 1 }, { register_operand, "c", DImode, 0, 1 }, { register_operand, "=r", SImode, 0, 1 }, { nonimmediate_operand, "rm", SImode, 0, 1 }, { scratch_operand, "=&r", SImode, 0, 0 }, { nonimmediate_operand, "=r", SImode, 0, 1 }, { nonimmediate_operand, "rm", SImode, 0, 1 }, { scratch_operand, "=&q", SImode, 0, 0 }, { register_operand, "=r", DImode, 0, 1 }, { nonimmediate_operand, "rm", DImode, 0, 1 }, { scratch_operand, "=&r", DImode, 0, 0 }, { register_operand, "=a", SImode, 0, 1 }, { register_operand, "b", SImode, 0, 1 }, { tls_symbolic_operand, "", SImode, 0, 1 }, { call_insn_operand, "", SImode, 0, 1 }, { scratch_operand, "=d", SImode, 0, 0 }, { scratch_operand, "=c", SImode, 0, 0 }, { register_operand, "=a", DImode, 0, 1 }, { tls_symbolic_operand, "", DImode, 0, 1 }, { call_insn_operand, "", DImode, 0, 1 }, { 0, "", DImode, 0, 1 }, { register_operand, "=a", SImode, 0, 1 }, { register_operand, "b", SImode, 0, 1 }, { call_insn_operand, "", SImode, 0, 1 }, { scratch_operand, "=d", SImode, 0, 0 }, { scratch_operand, "=c", SImode, 0, 0 }, { register_operand, "=a", DImode, 0, 1 }, { call_insn_operand, "", DImode, 0, 1 }, { 0, "", DImode, 0, 1 }, { register_operand, "=a", SImode, 0, 1 }, { register_operand, "b", SImode, 0, 1 }, { call_insn_operand, "", SImode, 0, 1 }, { tls_symbolic_operand, "", SImode, 0, 1 }, { scratch_operand, "=d", SImode, 0, 0 }, { scratch_operand, "=c", SImode, 0, 0 }, { register_operand, "=r", SImode, 0, 1 }, { register_operand, "0", SImode, 0, 1 }, { register_operand, "=f", SFmode, 0, 1 }, { nonimmediate_operand, "%0", SFmode, 0, 1 }, { nonimmediate_operand, "fm", SFmode, 0, 1 }, { binary_fp_operator, "", SFmode, 0, 0 }, { register_operand, "=f#x,x#f", SFmode, 0, 1 }, { nonimmediate_operand, "%0,0", SFmode, 0, 1 }, { nonimmediate_operand, "fm#x,xm#f", SFmode, 0, 1 }, { binary_fp_operator, "", SFmode, 0, 0 }, { register_operand, "=x", SFmode, 0, 1 }, { nonimmediate_operand, "%0", SFmode, 0, 1 }, { nonimmediate_operand, "xm", SFmode, 0, 1 }, { binary_fp_operator, "", SFmode, 0, 0 }, { register_operand, "=f", DFmode, 0, 1 }, { nonimmediate_operand, "%0", DFmode, 0, 1 }, { nonimmediate_operand, "fm", DFmode, 0, 1 }, { binary_fp_operator, "", DFmode, 0, 0 }, { register_operand, "=f#Y,Y#f", DFmode, 0, 1 }, { nonimmediate_operand, "%0,0", DFmode, 0, 1 }, { nonimmediate_operand, "fm#Y,Ym#f", DFmode, 0, 1 }, { binary_fp_operator, "", DFmode, 0, 0 }, { register_operand, "=Y", DFmode, 0, 1 }, { nonimmediate_operand, "%0", DFmode, 0, 1 }, { nonimmediate_operand, "Ym", DFmode, 0, 1 }, { binary_fp_operator, "", DFmode, 0, 0 }, { register_operand, "=f", XFmode, 0, 1 }, { register_operand, "%0", XFmode, 0, 1 }, { register_operand, "f", XFmode, 0, 1 }, { binary_fp_operator, "", XFmode, 0, 0 }, { register_operand, "=f,f", SFmode, 0, 1 }, { nonimmediate_operand, "0,fm", SFmode, 0, 1 }, { nonimmediate_operand, "fm,0", SFmode, 0, 1 }, { binary_fp_operator, "", SFmode, 0, 0 }, { register_operand, "=f,f,x", SFmode, 0, 1 }, { nonimmediate_operand, "0,fm,0", SFmode, 0, 1 }, { nonimmediate_operand, "fm,0,xm#f", SFmode, 0, 1 }, { binary_fp_operator, "", SFmode, 0, 0 }, { register_operand, "=x", SFmode, 0, 1 }, { register_operand, "0", SFmode, 0, 1 }, { nonimmediate_operand, "xm", SFmode, 0, 1 }, { binary_fp_operator, "", SFmode, 0, 0 }, { register_operand, "=f,f", SFmode, 0, 1 }, { nonimmediate_operand, "m,?r", SImode, 0, 1 }, { register_operand, "0,0", SFmode, 0, 1 }, { binary_fp_operator, "", SFmode, 0, 0 }, { register_operand, "=f,f", SFmode, 0, 1 }, { register_operand, "0,0", SFmode, 0, 1 }, { nonimmediate_operand, "m,?r", SImode, 0, 1 }, { binary_fp_operator, "", SFmode, 0, 0 }, { register_operand, "=f,f", DFmode, 0, 1 }, { nonimmediate_operand, "0,fm", DFmode, 0, 1 }, { nonimmediate_operand, "fm,0", DFmode, 0, 1 }, { binary_fp_operator, "", DFmode, 0, 0 }, { register_operand, "=f#Y,f#Y,Y#f", DFmode, 0, 1 }, { nonimmediate_operand, "0,fm,0", DFmode, 0, 1 }, { nonimmediate_operand, "fm,0,Ym#f", DFmode, 0, 1 }, { binary_fp_operator, "", DFmode, 0, 0 }, { register_operand, "=Y", DFmode, 0, 1 }, { register_operand, "0", DFmode, 0, 1 }, { nonimmediate_operand, "Ym", DFmode, 0, 1 }, { binary_fp_operator, "", DFmode, 0, 0 }, { register_operand, "=f,f", DFmode, 0, 1 }, { nonimmediate_operand, "m,?r", SImode, 0, 1 }, { register_operand, "0,0", DFmode, 0, 1 }, { binary_fp_operator, "", DFmode, 0, 0 }, { register_operand, "=f,f", DFmode, 0, 1 }, { register_operand, "0,0", DFmode, 0, 1 }, { nonimmediate_operand, "m,?r", SImode, 0, 1 }, { binary_fp_operator, "", DFmode, 0, 0 }, { register_operand, "=f,f", DFmode, 0, 1 }, { nonimmediate_operand, "fm,0", SFmode, 0, 1 }, { register_operand, "0,f", DFmode, 0, 1 }, { binary_fp_operator, "", DFmode, 0, 0 }, { register_operand, "=f,f", DFmode, 0, 1 }, { register_operand, "0,f", DFmode, 0, 1 }, { nonimmediate_operand, "fm,0", SFmode, 0, 1 }, { binary_fp_operator, "", DFmode, 0, 0 }, { register_operand, "=f,f", DFmode, 0, 1 }, { register_operand, "0,f", SFmode, 0, 1 }, { nonimmediate_operand, "fm,0", SFmode, 0, 1 }, { binary_fp_operator, "", DFmode, 0, 0 }, { register_operand, "=f,f", XFmode, 0, 1 }, { register_operand, "0,f", XFmode, 0, 1 }, { register_operand, "f,0", XFmode, 0, 1 }, { binary_fp_operator, "", XFmode, 0, 0 }, { register_operand, "=f,f", XFmode, 0, 1 }, { nonimmediate_operand, "m,?r", SImode, 0, 1 }, { register_operand, "0,0", XFmode, 0, 1 }, { binary_fp_operator, "", XFmode, 0, 0 }, { register_operand, "=f,f", XFmode, 0, 1 }, { register_operand, "0,0", XFmode, 0, 1 }, { nonimmediate_operand, "m,?r", SImode, 0, 1 }, { binary_fp_operator, "", XFmode, 0, 0 }, { register_operand, "=f,f", XFmode, 0, 1 }, { nonimmediate_operand, "fm,0", VOIDmode, 0, 1 }, { register_operand, "0,f", XFmode, 0, 1 }, { binary_fp_operator, "", XFmode, 0, 0 }, { register_operand, "=f,f", XFmode, 0, 1 }, { register_operand, "0,f", XFmode, 0, 1 }, { nonimmediate_operand, "fm,0", VOIDmode, 0, 1 }, { binary_fp_operator, "", XFmode, 0, 0 }, { register_operand, "=f,f", XFmode, 0, 1 }, { register_operand, "0,f", VOIDmode, 0, 1 }, { nonimmediate_operand, "fm,0", VOIDmode, 0, 1 }, { binary_fp_operator, "", XFmode, 0, 0 }, { register_operand, "=f#x,x#f", SFmode, 0, 1 }, { nonimmediate_operand, "0#x,xm#f", SFmode, 0, 1 }, { register_operand, "=x", SFmode, 0, 1 }, { nonimmediate_operand, "xm", SFmode, 0, 1 }, { register_operand, "=f#Y,Y#f", DFmode, 0, 1 }, { nonimmediate_operand, "0#Y,Ym#f", DFmode, 0, 1 }, { register_operand, "=Y", DFmode, 0, 1 }, { nonimmediate_operand, "Ym", DFmode, 0, 1 }, { register_operand, "=f", XFmode, 0, 1 }, { register_operand, "=u", XFmode, 0, 1 }, { register_operand, "0", XFmode, 0, 1 }, { register_operand, "1", XFmode, 0, 1 }, { register_operand, "=f", DFmode, 0, 1 }, { register_operand, "=u", DFmode, 0, 1 }, { register_operand, "0", DFmode, 0, 1 }, { register_operand, "=f", SFmode, 0, 1 }, { register_operand, "=u", SFmode, 0, 1 }, { register_operand, "0", SFmode, 0, 1 }, { register_operand, "=f", DFmode, 0, 1 }, { register_operand, "=u", DFmode, 0, 1 }, { register_operand, "0", SFmode, 0, 1 }, { register_operand, "=f", DFmode, 0, 1 }, { register_operand, "u", DFmode, 0, 1 }, { register_operand, "0", DFmode, 0, 1 }, { scratch_operand, "=1", DFmode, 0, 0 }, { register_operand, "=f", SFmode, 0, 1 }, { register_operand, "u", SFmode, 0, 1 }, { register_operand, "0", SFmode, 0, 1 }, { scratch_operand, "=1", SFmode, 0, 0 }, { register_operand, "=f", XFmode, 0, 1 }, { register_operand, "u", XFmode, 0, 1 }, { register_operand, "0", XFmode, 0, 1 }, { scratch_operand, "=1", XFmode, 0, 0 }, { register_operand, "=D", DImode, 0, 1 }, { register_operand, "=S", DImode, 0, 1 }, { register_operand, "0", DImode, 0, 1 }, { register_operand, "1", DImode, 0, 1 }, { register_operand, "=D", SImode, 0, 1 }, { register_operand, "=S", SImode, 0, 1 }, { register_operand, "0", SImode, 0, 1 }, { register_operand, "1", SImode, 0, 1 }, { register_operand, "=D", DImode, 0, 1 }, { register_operand, "=S", DImode, 0, 1 }, { register_operand, "=c", DImode, 0, 1 }, { register_operand, "0", DImode, 0, 1 }, { register_operand, "1", DImode, 0, 1 }, { register_operand, "2", DImode, 0, 1 }, { register_operand, "=D", SImode, 0, 1 }, { register_operand, "=S", SImode, 0, 1 }, { register_operand, "=c", SImode, 0, 1 }, { register_operand, "0", SImode, 0, 1 }, { register_operand, "1", SImode, 0, 1 }, { register_operand, "2", SImode, 0, 1 }, { register_operand, "=D", DImode, 0, 1 }, { register_operand, "0", DImode, 0, 1 }, { register_operand, "a", SImode, 0, 1 }, { register_operand, "=D", SImode, 0, 1 }, { register_operand, "0", SImode, 0, 1 }, { register_operand, "a", SImode, 0, 1 }, { register_operand, "=D", SImode, 0, 1 }, { register_operand, "0", SImode, 0, 1 }, { register_operand, "a", HImode, 0, 1 }, { register_operand, "=D", DImode, 0, 1 }, { register_operand, "0", DImode, 0, 1 }, { register_operand, "a", HImode, 0, 1 }, { register_operand, "=D", SImode, 0, 1 }, { register_operand, "0", SImode, 0, 1 }, { register_operand, "a", QImode, 0, 1 }, { register_operand, "=D", DImode, 0, 1 }, { register_operand, "0", DImode, 0, 1 }, { register_operand, "a", QImode, 0, 1 }, { register_operand, "=D", DImode, 0, 1 }, { register_operand, "=c", DImode, 0, 1 }, { register_operand, "a", DImode, 0, 1 }, { register_operand, "0", DImode, 0, 1 }, { register_operand, "1", DImode, 0, 1 }, { register_operand, "=D", SImode, 0, 1 }, { register_operand, "=c", SImode, 0, 1 }, { register_operand, "a", SImode, 0, 1 }, { register_operand, "0", SImode, 0, 1 }, { register_operand, "1", SImode, 0, 1 }, { register_operand, "=D", DImode, 0, 1 }, { register_operand, "=c", DImode, 0, 1 }, { register_operand, "a", SImode, 0, 1 }, { register_operand, "0", DImode, 0, 1 }, { register_operand, "1", DImode, 0, 1 }, { register_operand, "=D", SImode, 0, 1 }, { register_operand, "=c", SImode, 0, 1 }, { register_operand, "a", QImode, 0, 1 }, { register_operand, "0", SImode, 0, 1 }, { register_operand, "1", SImode, 0, 1 }, { register_operand, "=D", DImode, 0, 1 }, { register_operand, "=c", DImode, 0, 1 }, { register_operand, "a", QImode, 0, 1 }, { register_operand, "0", DImode, 0, 1 }, { register_operand, "1", DImode, 0, 1 }, { register_operand, "=S", SImode, 0, 1 }, { register_operand, "=D", SImode, 0, 1 }, { register_operand, "=c", SImode, 0, 1 }, { immediate_operand, "i", SImode, 0, 1 }, { register_operand, "0", SImode, 0, 1 }, { register_operand, "1", SImode, 0, 1 }, { register_operand, "2", SImode, 0, 1 }, { register_operand, "=S", DImode, 0, 1 }, { register_operand, "=D", DImode, 0, 1 }, { register_operand, "=c", DImode, 0, 1 }, { immediate_operand, "i", SImode, 0, 1 }, { register_operand, "0", DImode, 0, 1 }, { register_operand, "1", DImode, 0, 1 }, { register_operand, "2", DImode, 0, 1 }, { register_operand, "=&c", SImode, 0, 1 }, { register_operand, "=D", SImode, 0, 1 }, { register_operand, "a", QImode, 0, 1 }, { immediate_operand, "i", SImode, 0, 1 }, { register_operand, "0", SImode, 0, 1 }, { register_operand, "1", SImode, 0, 1 }, { register_operand, "=&c", DImode, 0, 1 }, { register_operand, "=D", DImode, 0, 1 }, { register_operand, "a", QImode, 0, 1 }, { immediate_operand, "i", DImode, 0, 1 }, { register_operand, "0", DImode, 0, 1 }, { register_operand, "1", DImode, 0, 1 }, { register_operand, "=r", DImode, 0, 1 }, { ix86_carry_flag_operator, "", VOIDmode, 0, 1 }, { register_operand, "=r,r", DImode, 0, 1 }, { ix86_comparison_operator, "", VOIDmode, 0, 0 }, { nonimmediate_operand, "rm,0", DImode, 0, 1 }, { nonimmediate_operand, "0,rm", DImode, 0, 1 }, { register_operand, "=r", SImode, 0, 1 }, { ix86_carry_flag_operator, "", VOIDmode, 0, 1 }, { register_operand, "=r,r", SImode, 0, 1 }, { ix86_comparison_operator, "", VOIDmode, 0, 0 }, { nonimmediate_operand, "rm,0", SImode, 0, 1 }, { nonimmediate_operand, "0,rm", SImode, 0, 1 }, { register_operand, "=r,r", HImode, 0, 1 }, { ix86_comparison_operator, "", VOIDmode, 0, 0 }, { nonimmediate_operand, "rm,0", HImode, 0, 1 }, { nonimmediate_operand, "0,rm", HImode, 0, 1 }, { register_operand, "=r,r", QImode, 0, 1 }, { ix86_comparison_operator, "", VOIDmode, 0, 0 }, { register_operand, "r,0", QImode, 0, 1 }, { register_operand, "0,r", QImode, 0, 1 }, { flags_reg_operand, "", VOIDmode, 0, 1 }, { register_operand, "=f#r,f#r,r#f,r#f", SFmode, 0, 1 }, { fcmov_comparison_operator, "", VOIDmode, 0, 0 }, { nonimmediate_operand, "f#r,0,rm#f,0", SFmode, 0, 1 }, { nonimmediate_operand, "0,f#r,0,rm#f", SFmode, 0, 1 }, { register_operand, "=f#r,f#r,&r#f,&r#f", DFmode, 0, 1 }, { fcmov_comparison_operator, "", VOIDmode, 0, 0 }, { nonimmediate_operand, "f#r,0,rm#f,0", DFmode, 0, 1 }, { nonimmediate_operand, "0,f#r,0,rm#f", DFmode, 0, 1 }, { register_operand, "=f#r,f#r,r#f,r#f", DFmode, 0, 1 }, { fcmov_comparison_operator, "", VOIDmode, 0, 0 }, { nonimmediate_operand, "f#r,0#r,rm#f,0#f", DFmode, 0, 1 }, { nonimmediate_operand, "0#r,f#r,0#f,rm#f", DFmode, 0, 1 }, { register_operand, "=f,f", XFmode, 0, 1 }, { fcmov_comparison_operator, "", VOIDmode, 0, 0 }, { register_operand, "f,0", XFmode, 0, 1 }, { register_operand, "0,f", XFmode, 0, 1 }, { register_operand, "=x#f,f#x,f#x", SFmode, 0, 1 }, { register_operand, "0,0,f#x", SFmode, 0, 1 }, { nonimmediate_operand, "xm#f,f#x,0", SFmode, 0, 1 }, { register_operand, "=x#f,f#x", SFmode, 0, 1 }, { nonimmediate_operand, "%0,0", SFmode, 0, 1 }, { nonimmediate_operand, "xm#f,f#x", SFmode, 0, 1 }, { register_operand, "=Y#f,f#Y,f#Y", DFmode, 0, 1 }, { register_operand, "0,0,f#Y", DFmode, 0, 1 }, { nonimmediate_operand, "Ym#f,f#Y,0", DFmode, 0, 1 }, { register_operand, "=Y#f,f#Y", DFmode, 0, 1 }, { nonimmediate_operand, "%0,0", DFmode, 0, 1 }, { nonimmediate_operand, "Ym#f,f#Y", DFmode, 0, 1 }, { register_operand, "=r,r", SImode, 0, 1 }, { register_operand, "0,r", SImode, 0, 1 }, { immediate_operand, "i,i", SImode, 0, 1 }, { register_operand, "=r,r", DImode, 0, 1 }, { register_operand, "0,r", DImode, 0, 1 }, { x86_64_immediate_operand, "e,e", DImode, 0, 1 }, { register_operand, "=r,r", DImode, 0, 1 }, { register_operand, "0,r", DImode, 0, 1 }, { register_operand, "r,r", DImode, 0, 1 }, { immediate_operand, "i,i", DImode, 0, 1 }, { register_operand, "=&x#rf,x#rf,?f#xr,?f#xr,?f#xr,?f#xr,?r#xf,?r#xf,?r#xf,?r#xf", SFmode, 0, 1 }, { sse_comparison_operator, "", VOIDmode, 0, 0 }, { nonimmediate_operand, "x#fr,0#fr,f#fx,0#fx,f#fx,0#fx,rm#rx,0#rx,rm#rx,0#rx", SFmode, 0, 1 }, { nonimmediate_operand, "x#fr,x#fr,0#fx,f#fx,0#fx,f#fx,0#fx,rm#rx,0#rx,rm#rx", SFmode, 0, 1 }, { nonimmediate_operand, "0#fx,x#fx,f#x,f#x,xm#f,xm#f,f#x,f#x,xm#f,xm#f", SFmode, 0, 1 }, { nonimmediate_operand, "xm#f,xm#f,f#x,f#x,x#f,x#f,f#x,f#x,x#f,x#f", SFmode, 0, 1 }, { scratch_operand, "=2,&4,X,X,X,X,X,X,X,X", SFmode, 0, 0 }, { register_operand, "=&x#rf,x#rf,?f#xr,?f#xr,?r#xf,?r#xf", SFmode, 0, 1 }, { nonimmediate_operand, "x#fr,0#fr,0#fx,0#fx,0#rx,0#rx", SFmode, 0, 1 }, { nonimmediate_operand, "x#fr,x#fr,f#fx,f#fx,rm#rx,rm#rx", SFmode, 0, 1 }, { nonimmediate_operand, "%0#fx,x#fx,f#x,xm#f,f#x,xm#f", SFmode, 0, 1 }, { nonimmediate_operand, "xm#f,xm#f,f#x,x#f,f#x,x#f", SFmode, 0, 1 }, { scratch_operand, "=1,&3,X,X,X,X", SFmode, 0, 0 }, { register_operand, "=&Y#rf,Y#rf,?f#Yr,?f#Yr,?f#Yr,?f#Yr,?r#Yf,?r#Yf,?r#Yf,?r#Yf", DFmode, 0, 1 }, { sse_comparison_operator, "", VOIDmode, 0, 0 }, { nonimmediate_operand, "Y#fr,0#fr,f#fY,0#fY,f#fY,0#fY,rm#rY,0#rY,rm#rY,0#rY", DFmode, 0, 1 }, { nonimmediate_operand, "Y#fr,Y#fr,0#fY,f#fY,0#fY,f#fY,0#fY,rm#rY,0#rY,rm#rY", DFmode, 0, 1 }, { nonimmediate_operand, "0#fY,Y#fY,f#Y,f#Y,Ym#f,Ym#f,f#Y,f#Y,Ym#f,Ym#f", DFmode, 0, 1 }, { nonimmediate_operand, "Ym#f,Ym#f,f#Y,f#Y,Y#f,Y#f,f#Y,f#Y,Y#f,Y#f", DFmode, 0, 1 }, { scratch_operand, "=2,&4,X,X,X,X,X,X,X,X", DFmode, 0, 0 }, { register_operand, "=&Y#rf,Y#rf,?f#Yr,?f#Yr,?r#Yf,?r#Yf", DFmode, 0, 1 }, { nonimmediate_operand, "Y#fr,0#fr,0#fY,0#fY,0#rY,0#rY", DFmode, 0, 1 }, { nonimmediate_operand, "Y#fr,Y#fr,f#fY,f#fY,rm#rY,rm#rY", DFmode, 0, 1 }, { nonimmediate_operand, "%0#fY,Y#fY,f#Y,Ym#f,f#Y,Ym#f", DFmode, 0, 1 }, { nonimmediate_operand, "Ym#f,Ym#f,f#Y,Y#f,f#Y,Y#f", DFmode, 0, 1 }, { scratch_operand, "=1,&3,X,X,X,X", DFmode, 0, 0 }, { register_operand, "=&x", SFmode, 0, 1 }, { sse_comparison_operator, "", VOIDmode, 0, 0 }, { register_operand, "x", SFmode, 0, 1 }, { const0_operand, "X", SFmode, 0, 1 }, { register_operand, "0", SFmode, 0, 1 }, { nonimmediate_operand, "xm", SFmode, 0, 1 }, { register_operand, "=&x", SFmode, 0, 1 }, { sse_comparison_operator, "", VOIDmode, 0, 0 }, { const0_operand, "X", SFmode, 0, 1 }, { register_operand, "x", SFmode, 0, 1 }, { register_operand, "0", SFmode, 0, 1 }, { nonimmediate_operand, "xm", SFmode, 0, 1 }, { register_operand, "=&x", SFmode, 0, 1 }, { fcmov_comparison_operator, "", VOIDmode, 0, 0 }, { register_operand, "x", SFmode, 0, 1 }, { const0_operand, "X", SFmode, 0, 1 }, { nonimmediate_operand, "xm", SFmode, 0, 1 }, { register_operand, "0", SFmode, 0, 1 }, { register_operand, "=&x", SFmode, 0, 1 }, { fcmov_comparison_operator, "", VOIDmode, 0, 0 }, { const0_operand, "X", SFmode, 0, 1 }, { register_operand, "x", SFmode, 0, 1 }, { nonimmediate_operand, "xm", SFmode, 0, 1 }, { register_operand, "0", SFmode, 0, 1 }, { register_operand, "=&Y", DFmode, 0, 1 }, { sse_comparison_operator, "", VOIDmode, 0, 0 }, { register_operand, "Y", DFmode, 0, 1 }, { const0_operand, "X", DFmode, 0, 1 }, { register_operand, "0", DFmode, 0, 1 }, { nonimmediate_operand, "Ym", DFmode, 0, 1 }, { register_operand, "=&Y", DFmode, 0, 1 }, { sse_comparison_operator, "", VOIDmode, 0, 0 }, { const0_operand, "X", DFmode, 0, 1 }, { register_operand, "Y", DFmode, 0, 1 }, { register_operand, "0", DFmode, 0, 1 }, { nonimmediate_operand, "Ym", DFmode, 0, 1 }, { register_operand, "=&Y", DFmode, 0, 1 }, { fcmov_comparison_operator, "", VOIDmode, 0, 0 }, { register_operand, "Y", DFmode, 0, 1 }, { const0_operand, "X", DFmode, 0, 1 }, { nonimmediate_operand, "Ym", DFmode, 0, 1 }, { register_operand, "0", DFmode, 0, 1 }, { register_operand, "=&Y", DFmode, 0, 1 }, { fcmov_comparison_operator, "", VOIDmode, 0, 0 }, { const0_operand, "X", DFmode, 0, 1 }, { register_operand, "Y", DFmode, 0, 1 }, { nonimmediate_operand, "Ym", DFmode, 0, 1 }, { register_operand, "0", DFmode, 0, 1 }, { register_operand, "a", SImode, 0, 1 }, { scratch_operand, "=0", SImode, 0, 0 }, { register_operand, "a", DImode, 0, 1 }, { scratch_operand, "=0", DImode, 0, 0 }, { 0, "", VOIDmode, 0, 1 }, { constant_call_address_operand, "", SImode, 0, 1 }, { 0, "", SImode, 0, 1 }, { immediate_operand, "", SImode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { call_insn_operand, "rsm", SImode, 0, 1 }, { 0, "", SImode, 0, 1 }, { immediate_operand, "i", SImode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { constant_call_address_operand, "", DImode, 0, 1 }, { const_int_operand, "", DImode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { sibcall_insn_operand, "s,c,d,a", SImode, 0, 1 }, { 0, "", SImode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { call_insn_operand, "rsm", DImode, 0, 1 }, { 0, "", DImode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { constant_call_address_operand, "", DImode, 0, 1 }, { 0, "", DImode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { 0, "", DImode, 0, 1 }, { comparison_operator, "", VOIDmode, 0, 0 }, { const_int_operand, "", VOIDmode, 0, 1 }, { nonimmediate_operand, "=x,x,m", V4SFmode, 0, 1 }, { vector_move_operand, "C,xm,x", V4SFmode, 0, 1 }, { nonimmediate_operand, "=x,x,m", V4SImode, 0, 1 }, { vector_move_operand, "C,xm,x", V4SImode, 0, 1 }, { nonimmediate_operand, "=x,x,m", V2DImode, 0, 1 }, { vector_move_operand, "C,xm,x", V2DImode, 0, 1 }, { nonimmediate_operand, "=y,y,m", V8QImode, 0, 1 }, { vector_move_operand, "C,ym,y", V8QImode, 0, 1 }, { nonimmediate_operand, "=y,y,m", V4HImode, 0, 1 }, { vector_move_operand, "C,ym,y", V4HImode, 0, 1 }, { nonimmediate_operand, "=y,y,m", V2SImode, 0, 1 }, { vector_move_operand, "C,ym,y", V2SImode, 0, 1 }, { nonimmediate_operand, "=y,y,m", V2SFmode, 0, 1 }, { vector_move_operand, "C,ym,y", V2SFmode, 0, 1 }, { nonimmediate_operand, "=x,x,m", V2DFmode, 0, 1 }, { vector_move_operand, "C,xm,x", V2DFmode, 0, 1 }, { nonimmediate_operand, "=x,x,m", V8HImode, 0, 1 }, { vector_move_operand, "C,xm,x", V8HImode, 0, 1 }, { nonimmediate_operand, "=x,x,m", V16QImode, 0, 1 }, { nonimmediate_operand, "C,xm,x", V16QImode, 0, 1 }, { push_operand, "=<", TImode, 0, 1 }, { register_operand, "x", TImode, 0, 1 }, { push_operand, "=<", V2DFmode, 0, 1 }, { register_operand, "x", V2DFmode, 0, 1 }, { push_operand, "=<", V2DImode, 0, 1 }, { register_operand, "x", V2DImode, 0, 1 }, { push_operand, "=<", V8HImode, 0, 1 }, { register_operand, "x", V8HImode, 0, 1 }, { push_operand, "=<", V16QImode, 0, 1 }, { register_operand, "x", V16QImode, 0, 1 }, { push_operand, "=<", V4SFmode, 0, 1 }, { register_operand, "x", V4SFmode, 0, 1 }, { push_operand, "=<", V4SImode, 0, 1 }, { register_operand, "x", V4SImode, 0, 1 }, { push_operand, "=<", V2SImode, 0, 1 }, { register_operand, "y", V2SImode, 0, 1 }, { push_operand, "=<", V4HImode, 0, 1 }, { register_operand, "y", V4HImode, 0, 1 }, { push_operand, "=<", V8QImode, 0, 1 }, { register_operand, "y", V8QImode, 0, 1 }, { push_operand, "=<", V2SFmode, 0, 1 }, { register_operand, "y", V2SFmode, 0, 1 }, { nonimmediate_operand, "=x,x,m", TImode, 0, 1 }, { vector_move_operand, "C,xm,x", TImode, 0, 1 }, { nonimmediate_operand, "=r,o,x,x,xm", TImode, 0, 1 }, { general_operand, "riFo,riF,C,xm,x", TImode, 0, 1 }, { nonimmediate_operand, "=r,o,x,x,xm", TFmode, 0, 1 }, { general_operand, "riFo,riF,C,xm,x", TFmode, 0, 1 }, { nonimmediate_operand, "=x,m", V4SFmode, 0, 1 }, { nonimmediate_operand, "xm,x", V4SFmode, 0, 1 }, { register_operand, "=r", SImode, 0, 1 }, { register_operand, "x", V4SFmode, 0, 1 }, { register_operand, "=r", SImode, 0, 1 }, { register_operand, "y", V8QImode, 0, 1 }, { register_operand, "D", SImode, 0, 1 }, { register_operand, "y", V8QImode, 0, 1 }, { register_operand, "y", V8QImode, 0, 1 }, { register_operand, "D", DImode, 0, 1 }, { register_operand, "y", V8QImode, 0, 1 }, { register_operand, "y", V8QImode, 0, 1 }, { memory_operand, "=m", V4SFmode, 0, 1 }, { register_operand, "x", V4SFmode, 0, 1 }, { memory_operand, "=m", DImode, 0, 1 }, { register_operand, "y", DImode, 0, 1 }, { register_operand, "=x", V4SFmode, 0, 1 }, { register_operand, "0", V4SFmode, 0, 1 }, { register_operand, "x", V4SFmode, 0, 1 }, { nonimmediate_operand, "=x,m", V4SFmode, 0, 1 }, { nonimmediate_operand, "0,0", V4SFmode, 0, 1 }, { nonimmediate_operand, "m,x", V4SFmode, 0, 1 }, { register_operand, "=x", V4SFmode, 0, 1 }, { memory_operand, "m", SFmode, 0, 1 }, { const0_operand, "X", V4SFmode, 0, 1 }, { memory_operand, "=m", SFmode, 0, 1 }, { register_operand, "x", V4SFmode, 0, 1 }, { register_operand, "=x", V4SFmode, 0, 1 }, { register_operand, "0", V4SFmode, 0, 1 }, { nonimmediate_operand, "xm", V4SFmode, 0, 1 }, { immediate_operand, "i", SImode, 0, 1 }, { register_operand, "=x", V4SFmode, 0, 1 }, { nonimmediate_operand, "xm", V4SFmode, 0, 1 }, { register_operand, "0", V4SFmode, 0, 1 }, { register_operand, "=x", V4SFmode, 0, 1 }, { nonimmediate_operand, "%0", V4SFmode, 0, 1 }, { nonimmediate_operand, "xm", V4SFmode, 0, 1 }, { register_operand, "=x", V2DFmode, 0, 1 }, { nonimmediate_operand, "%0", V2DFmode, 0, 1 }, { nonimmediate_operand, "xm", V2DFmode, 0, 1 }, { register_operand, "=x", V2DFmode, 0, 1 }, { register_operand, "0", V2DFmode, 0, 1 }, { nonimmediate_operand, "xm", V2DFmode, 0, 1 }, { register_operand, "=x", TImode, 0, 1 }, { nonimmediate_operand, "%0", TImode, 0, 1 }, { nonimmediate_operand, "xm", TImode, 0, 1 }, { register_operand, "=x", V2DImode, 0, 1 }, { nonimmediate_operand, "%0", V2DImode, 0, 1 }, { nonimmediate_operand, "xm", V2DImode, 0, 1 }, { register_operand, "=x", TImode, 0, 1 }, { register_operand, "0", TImode, 0, 1 }, { nonimmediate_operand, "xm", TImode, 0, 1 }, { register_operand, "=x", V2DImode, 0, 1 }, { register_operand, "0", V2DImode, 0, 1 }, { nonimmediate_operand, "xm", V2DImode, 0, 1 }, { register_operand, "=x", V4SFmode, 0, 1 }, { const0_operand, "X", V4SFmode, 0, 1 }, { register_operand, "=x", V4SImode, 0, 1 }, { register_operand, "0", V4SFmode, 0, 1 }, { register_operand, "x", V4SFmode, 0, 1 }, { sse_comparison_operator, "", V4SImode, 0, 0 }, { register_operand, "x", V4SFmode, 0, 1 }, { register_operand, "x", V4SFmode, 0, 1 }, { register_operand, "=x", V4SFmode, 0, 1 }, { register_operand, "0", V4SFmode, 0, 1 }, { nonimmediate_operand, "ym", V2SImode, 0, 1 }, { register_operand, "=y", V2SImode, 0, 1 }, { nonimmediate_operand, "xm", V4SFmode, 0, 1 }, { register_operand, "=x,x", V4SFmode, 0, 1 }, { register_operand, "0,0", V4SFmode, 0, 1 }, { nonimmediate_operand, "r,rm", SImode, 0, 1 }, { register_operand, "=x,x", V4SFmode, 0, 1 }, { register_operand, "0,0", V4SFmode, 0, 1 }, { nonimmediate_operand, "r,rm", DImode, 0, 1 }, { register_operand, "=r,r", SImode, 0, 1 }, { nonimmediate_operand, "x,m", V4SFmode, 0, 1 }, { register_operand, "=r,r", DImode, 0, 1 }, { nonimmediate_operand, "x,m", V4SFmode, 0, 1 }, { register_operand, "=r,r", SImode, 0, 1 }, { nonimmediate_operand, "x,xm", V4SFmode, 0, 1 }, { register_operand, "=r,r", DImode, 0, 1 }, { nonimmediate_operand, "x,xm", V4SFmode, 0, 1 }, { register_operand, "=y", V8QImode, 0, 1 }, { register_operand, "%0", V8QImode, 0, 1 }, { nonimmediate_operand, "ym", V8QImode, 0, 1 }, { register_operand, "=y", V4HImode, 0, 1 }, { register_operand, "%0", V4HImode, 0, 1 }, { nonimmediate_operand, "ym", V4HImode, 0, 1 }, { register_operand, "=y", V2SImode, 0, 1 }, { register_operand, "%0", V2SImode, 0, 1 }, { nonimmediate_operand, "ym", V2SImode, 0, 1 }, { register_operand, "=y", DImode, 0, 1 }, { register_operand, "%0", DImode, 0, 1 }, { nonimmediate_operand, "ym", DImode, 0, 1 }, { register_operand, "=y", V8QImode, 0, 1 }, { register_operand, "0", V8QImode, 0, 1 }, { nonimmediate_operand, "ym", V8QImode, 0, 1 }, { register_operand, "=y", V4HImode, 0, 1 }, { register_operand, "0", V4HImode, 0, 1 }, { nonimmediate_operand, "ym", V4HImode, 0, 1 }, { register_operand, "=y", V2SImode, 0, 1 }, { register_operand, "0", V2SImode, 0, 1 }, { nonimmediate_operand, "ym", V2SImode, 0, 1 }, { register_operand, "=y", DImode, 0, 1 }, { register_operand, "0", DImode, 0, 1 }, { nonimmediate_operand, "ym", DImode, 0, 1 }, { register_operand, "=y", V2SImode, 0, 1 }, { register_operand, "0", V4HImode, 0, 1 }, { nonimmediate_operand, "ym", V4HImode, 0, 1 }, { register_operand, "=y", DImode, 0, 1 }, { register_operand, "0", V8QImode, 0, 1 }, { nonimmediate_operand, "ym", V8QImode, 0, 1 }, { register_operand, "=y", V4HImode, 0, 1 }, { register_operand, "0", V4HImode, 0, 1 }, { nonimmediate_operand, "rm", SImode, 0, 1 }, { const_0_to_15_operand, "N", SImode, 0, 1 }, { register_operand, "=r", SImode, 0, 1 }, { register_operand, "y", V4HImode, 0, 1 }, { const_0_to_3_operand, "N", SImode, 0, 1 }, { register_operand, "=y", V4HImode, 0, 1 }, { nonimmediate_operand, "ym", V4HImode, 0, 1 }, { immediate_operand, "i", SImode, 0, 1 }, { register_operand, "=y", V4HImode, 0, 1 }, { register_operand, "0", V4HImode, 0, 1 }, { nonmemory_operand, "yi", DImode, 0, 1 }, { register_operand, "=y", V2SImode, 0, 1 }, { register_operand, "0", V2SImode, 0, 1 }, { nonmemory_operand, "yi", DImode, 0, 1 }, { register_operand, "=y", DImode, 0, 1 }, { register_operand, "0", DImode, 0, 1 }, { nonmemory_operand, "yi", DImode, 0, 1 }, { register_operand, "=y", V8QImode, 0, 1 }, { register_operand, "0", V4HImode, 0, 1 }, { register_operand, "y", V4HImode, 0, 1 }, { register_operand, "=y", V4HImode, 0, 1 }, { register_operand, "0", V2SImode, 0, 1 }, { register_operand, "y", V2SImode, 0, 1 }, { register_operand, "=y", V8QImode, 0, 1 }, { register_operand, "0", V8QImode, 0, 1 }, { register_operand, "y", V8QImode, 0, 1 }, { register_operand, "=y", V4HImode, 0, 1 }, { register_operand, "0", V4HImode, 0, 1 }, { register_operand, "y", V4HImode, 0, 1 }, { register_operand, "=y", V2SImode, 0, 1 }, { register_operand, "0", V2SImode, 0, 1 }, { register_operand, "y", V2SImode, 0, 1 }, { memory_operand, "m", SImode, 0, 1 }, { 0, "", BLKmode, 0, 1 }, { register_operand, "R", DImode, 0, 1 }, { register_operand, "r", DImode, 0, 1 }, { const_int_operand, "i", DImode, 0, 1 }, { 0, "X", VOIDmode, 0, 1 }, { const_int_operand, "n", DImode, 0, 1 }, { register_operand, "=y", V2SFmode, 0, 1 }, { register_operand, "0", V2SFmode, 0, 1 }, { nonimmediate_operand, "ym", V2SFmode, 0, 1 }, { register_operand, "=y", V2SImode, 0, 1 }, { register_operand, "0", V2SFmode, 0, 1 }, { nonimmediate_operand, "ym", V2SFmode, 0, 1 }, { register_operand, "=y", V2SImode, 0, 1 }, { nonimmediate_operand, "ym", V2SFmode, 0, 1 }, { register_operand, "=y", V2SFmode, 0, 1 }, { register_operand, "0", V2SFmode, 0, 1 }, { nonimmediate_operand, "y", V2SFmode, 0, 1 }, { register_operand, "=y", V2SFmode, 0, 1 }, { nonimmediate_operand, "ym", V2SImode, 0, 1 }, { register_operand, "=y", V2SFmode, 0, 1 }, { nonimmediate_operand, "ym", V2SFmode, 0, 1 }, { register_operand, "=y", V2SImode, 0, 1 }, { nonimmediate_operand, "ym", V2SImode, 0, 1 }, { address_operand, "p", SImode, 0, 1 }, { const_int_operand, "", SImode, 0, 1 }, { address_operand, "p", DImode, 0, 1 }, { const_int_operand, "", SImode, 0, 1 }, { address_operand, "p", SImode, 0, 1 }, { const_int_operand, "n", SImode, 0, 1 }, { address_operand, "p", DImode, 0, 1 }, { const_int_operand, "n", SImode, 0, 1 }, { register_operand, "=x", V2DFmode, 0, 1 }, { register_operand, "xm", V2DFmode, 0, 1 }, { register_operand, "0", V2DFmode, 0, 1 }, { register_operand, "=x", V2DImode, 0, 1 }, { register_operand, "0", V2DFmode, 0, 1 }, { nonimmediate_operand, "x", V2DFmode, 0, 1 }, { sse_comparison_operator, "", V2DImode, 0, 0 }, { register_operand, "x", V2DFmode, 0, 1 }, { register_operand, "x", V2DFmode, 0, 1 }, { register_operand, "=r", SImode, 0, 1 }, { register_operand, "x", V2DFmode, 0, 1 }, { register_operand, "=r", SImode, 0, 1 }, { register_operand, "x", V16QImode, 0, 1 }, { register_operand, "D", SImode, 0, 1 }, { register_operand, "x", V16QImode, 0, 1 }, { register_operand, "x", V16QImode, 0, 1 }, { register_operand, "D", DImode, 0, 1 }, { register_operand, "x", V16QImode, 0, 1 }, { register_operand, "x", V16QImode, 0, 1 }, { memory_operand, "=m", V2DFmode, 0, 1 }, { register_operand, "x", V2DFmode, 0, 1 }, { memory_operand, "=m", V2DImode, 0, 1 }, { register_operand, "x", V2DImode, 0, 1 }, { memory_operand, "=m", SImode, 0, 1 }, { register_operand, "r", SImode, 0, 1 }, { register_operand, "=x", V4SFmode, 0, 1 }, { nonimmediate_operand, "xm", V4SImode, 0, 1 }, { register_operand, "=x", V4SImode, 0, 1 }, { nonimmediate_operand, "xm", V4SFmode, 0, 1 }, { register_operand, "=x", V2DFmode, 0, 1 }, { nonimmediate_operand, "xm", V4SImode, 0, 1 }, { register_operand, "=x", V4SImode, 0, 1 }, { nonimmediate_operand, "xm", V2DFmode, 0, 1 }, { register_operand, "=y", V2SImode, 0, 1 }, { nonimmediate_operand, "xm", V2DFmode, 0, 1 }, { register_operand, "=x", V2DFmode, 0, 1 }, { nonimmediate_operand, "ym", V2SImode, 0, 1 }, { register_operand, "=r,r", SImode, 0, 1 }, { register_operand, "x,m", V2DFmode, 0, 1 }, { register_operand, "=r,r", DImode, 0, 1 }, { register_operand, "x,m", V2DFmode, 0, 1 }, { register_operand, "=r,r", SImode, 0, 1 }, { register_operand, "x,xm", V2DFmode, 0, 1 }, { register_operand, "=r,r", DImode, 0, 1 }, { register_operand, "x,xm", V2DFmode, 0, 1 }, { register_operand, "=x,x", V2DFmode, 0, 1 }, { register_operand, "0,0", V2DFmode, 0, 1 }, { nonimmediate_operand, "r,rm", SImode, 0, 1 }, { register_operand, "=x,x", V2DFmode, 0, 1 }, { register_operand, "0,0", V2DFmode, 0, 1 }, { nonimmediate_operand, "r,rm", DImode, 0, 1 }, { register_operand, "=x,x", V4SFmode, 0, 1 }, { register_operand, "0,0", V4SFmode, 0, 1 }, { nonimmediate_operand, "x,xm", V2DFmode, 0, 1 }, { register_operand, "=x", V2DFmode, 0, 1 }, { register_operand, "0", V2DFmode, 0, 1 }, { nonimmediate_operand, "xm", V4SFmode, 0, 1 }, { register_operand, "=x", V4SFmode, 0, 1 }, { nonimmediate_operand, "xm", V2DFmode, 0, 1 }, { register_operand, "=x", V2DFmode, 0, 1 }, { nonimmediate_operand, "xm", V4SFmode, 0, 1 }, { register_operand, "=x", V16QImode, 0, 1 }, { register_operand, "%0", V16QImode, 0, 1 }, { nonimmediate_operand, "xm", V16QImode, 0, 1 }, { register_operand, "=x", V8HImode, 0, 1 }, { register_operand, "%0", V8HImode, 0, 1 }, { nonimmediate_operand, "xm", V8HImode, 0, 1 }, { register_operand, "=x", V4SImode, 0, 1 }, { register_operand, "%0", V4SImode, 0, 1 }, { nonimmediate_operand, "xm", V4SImode, 0, 1 }, { register_operand, "=x", V2DImode, 0, 1 }, { register_operand, "%0", V2DImode, 0, 1 }, { nonimmediate_operand, "xm", V2DImode, 0, 1 }, { register_operand, "=x", V16QImode, 0, 1 }, { register_operand, "0", V16QImode, 0, 1 }, { nonimmediate_operand, "xm", V16QImode, 0, 1 }, { register_operand, "=x", V8HImode, 0, 1 }, { register_operand, "0", V8HImode, 0, 1 }, { nonimmediate_operand, "xm", V8HImode, 0, 1 }, { register_operand, "=x", V4SImode, 0, 1 }, { register_operand, "0", V4SImode, 0, 1 }, { nonimmediate_operand, "xm", V4SImode, 0, 1 }, { register_operand, "=y", DImode, 0, 1 }, { register_operand, "0", V2SImode, 0, 1 }, { nonimmediate_operand, "ym", V2SImode, 0, 1 }, { register_operand, "=x", V2DImode, 0, 1 }, { register_operand, "0", V4SImode, 0, 1 }, { nonimmediate_operand, "xm", V4SImode, 0, 1 }, { register_operand, "=x", V4SImode, 0, 1 }, { register_operand, "0", V8HImode, 0, 1 }, { nonimmediate_operand, "xm", V8HImode, 0, 1 }, { register_operand, "=x", V2DImode, 0, 1 }, { register_operand, "0", V16QImode, 0, 1 }, { nonimmediate_operand, "xm", V16QImode, 0, 1 }, { register_operand, "=x", V8HImode, 0, 1 }, { register_operand, "0", V8HImode, 0, 1 }, { nonimmediate_operand, "rm", SImode, 0, 1 }, { const_0_to_255_operand, "N", SImode, 0, 1 }, { register_operand, "=r", SImode, 0, 1 }, { register_operand, "x", V8HImode, 0, 1 }, { const_0_to_7_operand, "N", SImode, 0, 1 }, { register_operand, "=x", V4SImode, 0, 1 }, { nonimmediate_operand, "xm", V4SImode, 0, 1 }, { immediate_operand, "i", SImode, 0, 1 }, { register_operand, "=x", V8HImode, 0, 1 }, { nonimmediate_operand, "xm", V8HImode, 0, 1 }, { immediate_operand, "i", SImode, 0, 1 }, { register_operand, "=x", V8HImode, 0, 1 }, { register_operand, "0", V8HImode, 0, 1 }, { nonmemory_operand, "xi", SImode, 0, 1 }, { register_operand, "=x", V4SImode, 0, 1 }, { register_operand, "0", V4SImode, 0, 1 }, { nonmemory_operand, "xi", SImode, 0, 1 }, { register_operand, "=x", V2DImode, 0, 1 }, { register_operand, "0", V2DImode, 0, 1 }, { nonmemory_operand, "xi", SImode, 0, 1 }, { register_operand, "=x", V8HImode, 0, 1 }, { register_operand, "0", V8HImode, 0, 1 }, { nonmemory_operand, "xi", V2DImode, 0, 1 }, { register_operand, "=x", V4SImode, 0, 1 }, { register_operand, "0", V4SImode, 0, 1 }, { nonmemory_operand, "xi", V2DImode, 0, 1 }, { register_operand, "=x", V2DImode, 0, 1 }, { register_operand, "0", V2DImode, 0, 1 }, { nonmemory_operand, "xi", V2DImode, 0, 1 }, { register_operand, "=x", TImode, 0, 1 }, { register_operand, "0", TImode, 0, 1 }, { immediate_operand, "i", SImode, 0, 1 }, { register_operand, "=x", V2DFmode, 0, 1 }, { register_operand, "0", V2DFmode, 0, 1 }, { register_operand, "x", V2DFmode, 0, 1 }, { register_operand, "=x", V16QImode, 0, 1 }, { register_operand, "0", V8HImode, 0, 1 }, { register_operand, "x", V8HImode, 0, 1 }, { register_operand, "=x", V8HImode, 0, 1 }, { register_operand, "0", V4SImode, 0, 1 }, { register_operand, "x", V4SImode, 0, 1 }, { register_operand, "=x", V16QImode, 0, 1 }, { register_operand, "0", V16QImode, 0, 1 }, { register_operand, "x", V16QImode, 0, 1 }, { register_operand, "=x", V8HImode, 0, 1 }, { register_operand, "0", V8HImode, 0, 1 }, { register_operand, "x", V8HImode, 0, 1 }, { register_operand, "=x", V4SImode, 0, 1 }, { register_operand, "0", V4SImode, 0, 1 }, { register_operand, "x", V4SImode, 0, 1 }, { register_operand, "=x", V2DImode, 0, 1 }, { register_operand, "0", V2DImode, 0, 1 }, { register_operand, "x", V2DImode, 0, 1 }, { nonimmediate_operand, "=x,m", V2DFmode, 0, 1 }, { nonimmediate_operand, "xm,x", V2DFmode, 0, 1 }, { nonimmediate_operand, "=x,m", V16QImode, 0, 1 }, { nonimmediate_operand, "xm,x", V16QImode, 0, 1 }, { nonimmediate_operand, "=m,y", DImode, 0, 1 }, { register_operand, "x,x", V2DImode, 0, 1 }, { nonimmediate_operand, "=m,y,r", DImode, 0, 1 }, { register_operand, "x,x,x", V2DImode, 0, 1 }, { register_operand, "=x,?x", V2DImode, 0, 1 }, { nonimmediate_operand, "m,y", DImode, 0, 1 }, { register_operand, "=x,?x,?x", V2DImode, 0, 1 }, { nonimmediate_operand, "m,y,r", DImode, 0, 1 }, { register_operand, "=x", V2DImode, 0, 1 }, { nonimmediate_operand, "xm", V2DImode, 0, 1 }, { register_operand, "=x", V4SImode, 0, 1 }, { nonimmediate_operand, "mr", SImode, 0, 1 }, { nonimmediate_operand, "=mr", SImode, 0, 1 }, { register_operand, "x", V4SImode, 0, 1 }, { nonimmediate_operand, "=x,m", V2DFmode, 0, 1 }, { nonimmediate_operand, "0,0", V2DFmode, 0, 1 }, { nonimmediate_operand, "m,x", V2DFmode, 0, 1 }, { register_operand, "=x", V2DFmode, 0, 1 }, { memory_operand, "m", DFmode, 0, 1 }, { const0_operand, "X", V2DFmode, 0, 1 }, { nonimmediate_operand, "=x,x,m", V2DFmode, 0, 1 }, { nonimmediate_operand, "0,0,0", V2DFmode, 0, 1 }, { nonimmediate_operand, "x,m,x", V2DFmode, 0, 1 }, { memory_operand, "=m", DFmode, 0, 1 }, { register_operand, "x", V2DFmode, 0, 1 }, { register_operand, "=x", V2DFmode, 0, 1 }, { register_operand, "0", V2DFmode, 0, 1 }, { nonimmediate_operand, "xm", V2DFmode, 0, 1 }, { immediate_operand, "i", SImode, 0, 1 }, { address_operand, "p", VOIDmode, 0, 1 }, { register_operand, "a", SImode, 0, 1 }, { register_operand, "c", SImode, 0, 1 }, { register_operand, "d", SImode, 0, 1 }, { register_operand, "=x", V16QImode, 0, 1 }, { memory_operand, "m", V16QImode, 0, 1 }, { register_operand, "=x", V2DFmode, 0, 1 }, { register_operand, "x", V2DFmode, 0, 1 }, { nonimmediate_operand, "", DImode, 0, 1 }, { x86_64_general_operand, "", DImode, 0, 1 }, { cmpsi_operand, "", SImode, 0, 1 }, { general_operand, "", SImode, 0, 1 }, { nonimmediate_operand, "", HImode, 0, 1 }, { general_operand, "", HImode, 0, 1 }, { nonimmediate_operand, "", QImode, 0, 1 }, { general_operand, "", QImode, 0, 1 }, { nonimmediate_operand, "", DImode, 0, 1 }, { general_operand, "", DImode, 0, 1 }, { ext_register_operand, "", VOIDmode, 0, 1 }, { general_operand, "", QImode, 0, 1 }, { cmp_fp_expander_operand, "", XFmode, 0, 1 }, { cmp_fp_expander_operand, "", XFmode, 0, 1 }, { cmp_fp_expander_operand, "", DFmode, 0, 1 }, { cmp_fp_expander_operand, "", DFmode, 0, 1 }, { cmp_fp_expander_operand, "", SFmode, 0, 1 }, { cmp_fp_expander_operand, "", SFmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { nonimmediate_operand, "", SImode, 0, 1 }, { general_operand, "", SImode, 0, 1 }, { nonimmediate_operand, "", HImode, 1, 1 }, { general_operand, "", HImode, 0, 1 }, { 0, "=m", QImode, 0, 1 }, { register_operand, "r", QImode, 0, 1 }, { register_operand, "=&q", QImode, 0, 1 }, { nonimmediate_operand, "", QImode, 1, 1 }, { general_operand, "", QImode, 0, 1 }, { push_operand, "", DImode, 0, 1 }, { immediate_operand, "", DImode, 0, 1 }, { scratch_operand, "r", DImode, 0, 0 }, { push_operand, "", DImode, 0, 1 }, { general_operand, "", DImode, 0, 1 }, { memory_operand, "", DImode, 0, 1 }, { immediate_operand, "", DImode, 0, 1 }, { scratch_operand, "r", DImode, 0, 0 }, { nonimmediate_operand, "", SFmode, 0, 1 }, { general_operand, "", SFmode, 0, 1 }, { push_operand, "", SFmode, 0, 1 }, { memory_operand, "", SFmode, 0, 1 }, { push_operand, "", SFmode, 0, 1 }, { any_fp_register_operand, "", SFmode, 0, 1 }, { nonimmediate_operand, "", DFmode, 0, 1 }, { general_operand, "", DFmode, 0, 1 }, { push_operand, "", DFmode, 0, 1 }, { any_fp_register_operand, "", DFmode, 0, 1 }, { push_operand, "", DFmode, 0, 1 }, { general_operand, "", DFmode, 0, 1 }, { nonimmediate_operand, "", XFmode, 0, 1 }, { general_operand, "", XFmode, 0, 1 }, { push_operand, "", VOIDmode, 0, 1 }, { general_operand, "", VOIDmode, 0, 1 }, { push_operand, "", XFmode, 0, 1 }, { any_fp_register_operand, "", XFmode, 0, 1 }, { nonimmediate_operand, "", VOIDmode, 0, 1 }, { general_operand, "", VOIDmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { memory_operand, "", VOIDmode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { nonimmediate_operand, "", HImode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { register_operand, "", HImode, 0, 1 }, { nonimmediate_operand, "", QImode, 0, 1 }, { register_operand, "", HImode, 0, 1 }, { register_operand, "", QImode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { nonimmediate_operand, "", QImode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { register_operand, "", QImode, 0, 1 }, { register_operand, "=r", DImode, 0, 1 }, { nonimmediate_operand, "rm", SImode, 0, 1 }, { register_operand, "", DImode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { nonimmediate_operand, "", DImode, 0, 1 }, { general_operand, "", SImode, 0, 1 }, { register_operand, "", DImode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { scratch_operand, "", SImode, 0, 0 }, { memory_operand, "", DImode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { push_operand, "", DFmode, 0, 1 }, { fp_register_operand, "", SFmode, 0, 1 }, { push_operand, "", XFmode, 0, 1 }, { fp_register_operand, "", SFmode, 0, 1 }, { push_operand, "", XFmode, 0, 1 }, { fp_register_operand, "", DFmode, 0, 1 }, { nonimmediate_operand, "", DFmode, 0, 1 }, { general_operand, "", SFmode, 0, 1 }, { nonimmediate_operand, "", XFmode, 0, 1 }, { general_operand, "", SFmode, 0, 1 }, { nonimmediate_operand, "", XFmode, 0, 1 }, { general_operand, "", DFmode, 0, 1 }, { nonimmediate_operand, "", SFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { memory_operand, "", SFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { memory_operand, "", SFmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { nonimmediate_operand, "", DFmode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { fp_register_operand, "", DFmode, 0, 1 }, { memory_operand, "", SFmode, 0, 1 }, { nonimmediate_operand, "", SFmode, 0, 1 }, { register_operand, "", XFmode, 0, 1 }, { memory_operand, "", SFmode, 0, 1 }, { register_operand, "", XFmode, 0, 1 }, { memory_operand, "", SFmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { register_operand, "", XFmode, 0, 1 }, { memory_operand, "", SFmode, 0, 1 }, { nonimmediate_operand, "", DFmode, 0, 1 }, { register_operand, "", XFmode, 0, 1 }, { memory_operand, "", DFmode, 0, 1 }, { register_operand, "", XFmode, 0, 1 }, { memory_operand, "", DFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { register_operand, "", XFmode, 0, 1 }, { memory_operand, "", DFmode, 0, 1 }, { nonimmediate_operand, "", DImode, 0, 1 }, { register_operand, "", XFmode, 0, 1 }, { nonimmediate_operand, "", DImode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { nonimmediate_operand, "", DImode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { nonimmediate_operand, "", DImode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { register_operand, "", DImode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { memory_operand, "", HImode, 0, 1 }, { memory_operand, "", HImode, 0, 1 }, { memory_operand, "", DImode, 0, 1 }, { scratch_operand, "", VOIDmode, 0, 0 }, { memory_operand, "", DImode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { memory_operand, "", HImode, 0, 1 }, { memory_operand, "", HImode, 0, 1 }, { memory_operand, "", DImode, 0, 1 }, { scratch_operand, "", VOIDmode, 0, 0 }, { register_operand, "", DImode, 0, 1 }, { memory_operand, "", SFmode, 0, 1 }, { scratch_operand, "x", SFmode, 0, 0 }, { register_operand, "", DImode, 0, 1 }, { memory_operand, "", DFmode, 0, 1 }, { scratch_operand, "Y", DFmode, 0, 0 }, { nonimmediate_operand, "", SImode, 0, 1 }, { register_operand, "", XFmode, 0, 1 }, { nonimmediate_operand, "", SImode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { nonimmediate_operand, "", SImode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { nonimmediate_operand, "", SImode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { memory_operand, "", SFmode, 0, 1 }, { scratch_operand, "x", SFmode, 0, 0 }, { register_operand, "", SImode, 0, 1 }, { memory_operand, "", DFmode, 0, 1 }, { scratch_operand, "Y", DFmode, 0, 0 }, { register_operand, "", SImode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { memory_operand, "", HImode, 0, 1 }, { memory_operand, "", HImode, 0, 1 }, { memory_operand, "", SImode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { memory_operand, "", HImode, 0, 1 }, { memory_operand, "", HImode, 0, 1 }, { memory_operand, "", SImode, 0, 1 }, { nonimmediate_operand, "", HImode, 0, 1 }, { register_operand, "", XFmode, 0, 1 }, { nonimmediate_operand, "", HImode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { nonimmediate_operand, "", HImode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { nonimmediate_operand, "", HImode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { memory_operand, "", HImode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { memory_operand, "", HImode, 0, 1 }, { memory_operand, "", HImode, 0, 1 }, { memory_operand, "", HImode, 0, 1 }, { register_operand, "", HImode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { memory_operand, "", HImode, 0, 1 }, { memory_operand, "", HImode, 0, 1 }, { memory_operand, "", HImode, 0, 1 }, { fp_register_operand, "", VOIDmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { register_operand, "", DImode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { register_operand, "", DImode, 0, 1 }, { register_operand, "", V2DFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { const_int_operand, "", VOIDmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { register_operand, "", V2DFmode, 0, 1 }, { const_int_operand, "", VOIDmode, 0, 1 }, { register_operand, "", V2DFmode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { register_operand, "", V4SFmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { const_int_operand, "", VOIDmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { register_operand, "", V4SFmode, 0, 1 }, { const_int_operand, "", VOIDmode, 0, 1 }, { register_operand, "", V4SFmode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { nonimmediate_operand, "", DImode, 0, 1 }, { nonimmediate_operand, "", DImode, 0, 1 }, { x86_64_general_operand, "", DImode, 0, 1 }, { nonimmediate_operand, "", DImode, 0, 1 }, { nonimmediate_operand, "", DImode, 0, 1 }, { general_operand, "", DImode, 0, 1 }, { nonimmediate_operand, "", SImode, 0, 1 }, { nonimmediate_operand, "", SImode, 0, 1 }, { general_operand, "", SImode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { index_register_operand, "", VOIDmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { immediate_operand, "", VOIDmode, 0, 1 }, { register_operand, "", DImode, 0, 1 }, { index_register_operand, "", SImode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { immediate_operand, "", SImode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { index_register_operand, "", VOIDmode, 0, 1 }, { const248_operand, "", VOIDmode, 0, 1 }, { nonmemory_operand, "", VOIDmode, 0, 1 }, { register_operand, "", DImode, 0, 1 }, { index_register_operand, "", SImode, 0, 1 }, { const248_operand, "", SImode, 0, 1 }, { nonmemory_operand, "", SImode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { index_register_operand, "", VOIDmode, 0, 1 }, { const248_operand, "", VOIDmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { immediate_operand, "", VOIDmode, 0, 1 }, { register_operand, "", DImode, 0, 1 }, { index_register_operand, "", SImode, 0, 1 }, { const248_operand, "", SImode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { immediate_operand, "", SImode, 0, 1 }, { register_operand, "", DImode, 0, 1 }, { register_operand, "", DImode, 0, 1 }, { x86_64_nonmemory_operand, "", DImode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { nonmemory_operand, "", VOIDmode, 0, 1 }, { register_operand, "", DImode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { nonmemory_operand, "", SImode, 0, 1 }, { nonimmediate_operand, "", HImode, 0, 1 }, { nonimmediate_operand, "", HImode, 0, 1 }, { general_operand, "", HImode, 0, 1 }, { nonimmediate_operand, "", QImode, 0, 1 }, { nonimmediate_operand, "", QImode, 0, 1 }, { general_operand, "", QImode, 0, 1 }, { register_operand, "", XFmode, 0, 1 }, { register_operand, "", XFmode, 0, 1 }, { register_operand, "", XFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { nonimmediate_operand, "", DFmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { nonimmediate_operand, "", SFmode, 0, 1 }, { register_operand, "", DImode, 0, 1 }, { register_operand, "", DImode, 0, 1 }, { x86_64_general_operand, "", DImode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { general_operand, "", SImode, 0, 1 }, { register_operand, "", HImode, 0, 1 }, { register_operand, "", HImode, 0, 1 }, { general_operand, "", HImode, 0, 1 }, { register_operand, "", QImode, 0, 1 }, { nonimmediate_operand, "", QImode, 0, 1 }, { register_operand, "", QImode, 0, 1 }, { register_operand, "", HImode, 0, 1 }, { nonimmediate_operand, "", QImode, 0, 1 }, { register_operand, "", QImode, 0, 1 }, { register_operand, "", TImode, 0, 1 }, { nonimmediate_operand, "", DImode, 0, 1 }, { register_operand, "", DImode, 0, 1 }, { nonimmediate_operand, "", SImode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { register_operand, "", DImode, 0, 1 }, { nonimmediate_operand, "", DImode, 0, 1 }, { register_operand, "", DImode, 0, 1 }, { scratch_operand, "", DImode, 0, 0 }, { register_operand, "", SImode, 0, 1 }, { nonimmediate_operand, "", SImode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { scratch_operand, "", SImode, 0, 0 }, { register_operand, "=d", DImode, 0, 1 }, { nonimmediate_operand, "", DImode, 0, 1 }, { register_operand, "", DImode, 0, 1 }, { scratch_operand, "", DImode, 0, 0 }, { register_operand, "", DImode, 0, 1 }, { register_operand, "", DImode, 0, 1 }, { nonimmediate_operand, "", DImode, 0, 1 }, { register_operand, "", DImode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { nonimmediate_operand, "", SImode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { register_operand, "", HImode, 0, 1 }, { register_operand, "", HImode, 0, 1 }, { nonimmediate_operand, "", HImode, 0, 1 }, { register_operand, "", HImode, 0, 1 }, { nonimmediate_operand, "", SImode, 0, 1 }, { nonmemory_operand, "", SImode, 0, 1 }, { nonimmediate_operand, "", QImode, 0, 1 }, { nonmemory_operand, "", QImode, 0, 1 }, { ext_register_operand, "", VOIDmode, 0, 1 }, { const_int_operand, "", VOIDmode, 0, 1 }, { nonimmediate_operand, "", VOIDmode, 0, 1 }, { const_int_operand, "", VOIDmode, 0, 1 }, { const_int_operand, "", VOIDmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { const_int_operand, "", VOIDmode, 0, 1 }, { nonimmediate_operand, "", DImode, 0, 1 }, { nonimmediate_operand, "", DImode, 0, 1 }, { x86_64_szext_general_operand, "", DImode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { const_int_operand, "", VOIDmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { general_operand, "", VOIDmode, 0, 1 }, { const_int_operand, "", VOIDmode, 0, 1 }, { ext_register_operand, "", VOIDmode, 0, 1 }, { ext_register_operand, "", VOIDmode, 0, 1 }, { general_operand, "", QImode, 0, 1 }, { nonimmediate_operand, "", SFmode, 0, 1 }, { nonimmediate_operand, "", SFmode, 0, 1 }, { memory_operand, "", SFmode, 0, 1 }, { memory_operand, "", SFmode, 0, 1 }, { 0, "", SFmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { 0, "", V4SFmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { nonimmediate_operand, "", V4SFmode, 0, 1 }, { fp_register_operand, "", SFmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { register_and_not_fp_reg_operand, "", SFmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { memory_operand, "", VOIDmode, 0, 1 }, { memory_operand, "", VOIDmode, 0, 1 }, { nonimmediate_operand, "", DFmode, 0, 1 }, { nonimmediate_operand, "", DFmode, 0, 1 }, { memory_operand, "", DFmode, 0, 1 }, { memory_operand, "", DFmode, 0, 1 }, { 0, "", V2DFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { 0, "", V2DFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { nonimmediate_operand, "", V2DFmode, 0, 1 }, { fp_register_operand, "", DFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { register_and_not_fp_reg_operand, "", DFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { nonimmediate_operand, "", XFmode, 0, 1 }, { nonimmediate_operand, "", XFmode, 0, 1 }, { fp_register_operand, "", XFmode, 0, 1 }, { register_operand, "", XFmode, 0, 1 }, { register_and_not_fp_reg_operand, "", XFmode, 0, 1 }, { register_operand, "", XFmode, 0, 1 }, { memory_operand, "", SFmode, 0, 1 }, { memory_operand, "", SFmode, 0, 1 }, { 0, "", V4SFmode, 0, 1 }, { shiftdi_operand, "", DImode, 0, 1 }, { shiftdi_operand, "", DImode, 0, 1 }, { nonmemory_operand, "", QImode, 0, 1 }, { register_operand, "", DImode, 0, 1 }, { register_operand, "", DImode, 0, 1 }, { immediate_operand, "", QImode, 0, 1 }, { register_operand, "", DImode, 0, 1 }, { register_operand, "", DImode, 0, 1 }, { nonmemory_operand, "", QImode, 0, 1 }, { scratch_operand, "", SImode, 0, 0 }, { register_operand, "", SImode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { register_operand, "", QImode, 0, 1 }, { register_operand, "r", SImode, 0, 1 }, { nonimmediate_operand, "", SImode, 0, 1 }, { nonimmediate_operand, "", SImode, 0, 1 }, { nonmemory_operand, "", QImode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { index_register_operand, "", VOIDmode, 0, 1 }, { const_int_operand, "", QImode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { const_int_operand, "", QImode, 0, 1 }, { register_operand, "", DImode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { const_int_operand, "", QImode, 0, 1 }, { nonimmediate_operand, "", HImode, 0, 1 }, { nonimmediate_operand, "", HImode, 0, 1 }, { nonmemory_operand, "", QImode, 0, 1 }, { nonimmediate_operand, "", QImode, 0, 1 }, { nonimmediate_operand, "", QImode, 0, 1 }, { nonmemory_operand, "", QImode, 0, 1 }, { nonimmediate_operand, "", DImode, 0, 1 }, { nonimmediate_operand, "", DImode, 0, 1 }, { nonmemory_operand, "", QImode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { immediate_operand, "", SImode, 0, 1 }, { immediate_operand, "", SImode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { ext_register_operand, "", VOIDmode, 0, 1 }, { immediate_operand, "", SImode, 0, 1 }, { immediate_operand, "", SImode, 0, 1 }, { ext_register_operand, "", VOIDmode, 0, 1 }, { immediate_operand, "", VOIDmode, 0, 1 }, { immediate_operand, "", VOIDmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { nonimmediate_operand, "", QImode, 0, 1 }, { ix86_comparison_operator, "", VOIDmode, 0, 0 }, { nonimmediate_operand, "", QImode, 1, 1 }, { ix86_comparison_operator, "", VOIDmode, 0, 0 }, { 0, "", VOIDmode, 0, 1 }, { comparison_operator, "", VOIDmode, 0, 0 }, { register_operand, "", VOIDmode, 0, 1 }, { nonimmediate_operand, "", VOIDmode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { scratch_operand, "=a", HImode, 0, 0 }, { nonimmediate_operand, "rm", VOIDmode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { scratch_operand, "", SImode, 0, 0 }, { 0, "", VOIDmode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { nonimmediate_operand, "", SImode, 0, 1 }, { scratch_operand, "", SImode, 0, 0 }, { 0, "", VOIDmode, 0, 1 }, { register_operand, "", QImode, 0, 1 }, { ix86_comparison_operator, "", QImode, 0, 0 }, { q_regs_operand, "", VOIDmode, 0, 1 }, { 0, "", QImode, 0, 1 }, { 0, "", SImode, 0, 1 }, { 0, "", VOIDmode, 0, 0 }, { 0, "", SImode, 0, 1 }, { 0, "", QImode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { 0, "", QImode, 0, 1 }, { 0, "", SImode, 0, 1 }, { 0, "", VOIDmode, 0, 0 }, { 0, "", SImode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { 0, "", QImode, 0, 1 }, { 0, "", SImode, 0, 1 }, { 0, "", SImode, 0, 1 }, { nonimmediate_operand, "", SImode, 0, 1 }, { nonimmediate_operand, "", SImode, 0, 1 }, { scratch_operand, "", SImode, 0, 0 }, { register_operand, "", DImode, 0, 1 }, { nonimmediate_operand, "", DImode, 0, 1 }, { scratch_operand, "", DImode, 0, 0 }, { register_operand, "", SImode, 0, 1 }, { tls_symbolic_operand, "", SImode, 0, 1 }, { 0, "", VOIDmode, 0, 0 }, { 0, "", VOIDmode, 0, 0 }, { scratch_operand, "", SImode, 0, 0 }, { scratch_operand, "", SImode, 0, 0 }, { register_operand, "", DImode, 0, 1 }, { tls_symbolic_operand, "", DImode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { 0, "", VOIDmode, 0, 0 }, { 0, "", VOIDmode, 0, 0 }, { scratch_operand, "", SImode, 0, 0 }, { scratch_operand, "", SImode, 0, 0 }, { register_operand, "", SImode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { call_insn_operand, "", SImode, 0, 1 }, { tls_symbolic_operand, "", SImode, 0, 1 }, { scratch_operand, "", SImode, 0, 0 }, { scratch_operand, "", SImode, 0, 0 }, { register_operand, "", VOIDmode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { binary_fp_operator, "", VOIDmode, 0, 0 }, { register_operand, "", VOIDmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { binary_fp_operator, "", VOIDmode, 0, 0 }, { register_operand, "", SFmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { immediate_operand, "", DFmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { immediate_operand, "", SFmode, 0, 1 }, { register_operand, "", XFmode, 0, 1 }, { register_operand, "", XFmode, 0, 1 }, { register_operand, "", XFmode, 0, 1 }, { immediate_operand, "", XFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { 0, "", VOIDmode, 0, 0 }, { scratch_operand, "", DFmode, 0, 0 }, { register_operand, "", SFmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { 0, "", VOIDmode, 0, 0 }, { scratch_operand, "", SFmode, 0, 0 }, { register_operand, "", XFmode, 0, 1 }, { register_operand, "", XFmode, 0, 1 }, { 0, "", VOIDmode, 0, 0 }, { scratch_operand, "", XFmode, 0, 0 }, { register_operand, "", DFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { 0, "", VOIDmode, 0, 0 }, { 0, "", VOIDmode, 0, 0 }, { 0, "", VOIDmode, 0, 0 }, { 0, "", VOIDmode, 0, 0 }, { 0, "", VOIDmode, 0, 0 }, { 0, "", VOIDmode, 0, 0 }, { scratch_operand, "", XFmode, 0, 0 }, { register_operand, "", SFmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { 0, "", VOIDmode, 0, 0 }, { 0, "", VOIDmode, 0, 0 }, { 0, "", VOIDmode, 0, 0 }, { 0, "", VOIDmode, 0, 0 }, { 0, "", VOIDmode, 0, 0 }, { 0, "", VOIDmode, 0, 0 }, { scratch_operand, "", XFmode, 0, 0 }, { register_operand, "", XFmode, 0, 1 }, { register_operand, "", XFmode, 0, 1 }, { 0, "", VOIDmode, 0, 0 }, { 0, "", VOIDmode, 0, 0 }, { 0, "", VOIDmode, 0, 0 }, { 0, "", VOIDmode, 0, 0 }, { scratch_operand, "", XFmode, 0, 0 }, { register_operand, "", SFmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { 0, "", VOIDmode, 0, 0 }, { 0, "", VOIDmode, 0, 0 }, { 0, "", VOIDmode, 0, 0 }, { scratch_operand, "", XFmode, 0, 0 }, { register_operand, "", DFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { 0, "", VOIDmode, 0, 0 }, { 0, "", VOIDmode, 0, 0 }, { 0, "", VOIDmode, 0, 0 }, { scratch_operand, "", XFmode, 0, 0 }, { register_operand, "", SImode, 0, 1 }, { register_operand, "", XFmode, 0, 1 }, { 0, "", VOIDmode, 0, 0 }, { register_operand, "", XFmode, 0, 1 }, { memory_operand, "", BLKmode, 0, 1 }, { memory_operand, "", BLKmode, 0, 1 }, { nonmemory_operand, "", SImode, 0, 1 }, { const_int_operand, "", SImode, 0, 1 }, { memory_operand, "", BLKmode, 0, 1 }, { memory_operand, "", BLKmode, 0, 1 }, { nonmemory_operand, "", DImode, 0, 1 }, { const_int_operand, "", DImode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { memory_operand, "", VOIDmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { memory_operand, "", VOIDmode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { memory_operand, "", VOIDmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { memory_operand, "", VOIDmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { memory_operand, "", BLKmode, 0, 1 }, { nonmemory_operand, "", SImode, 0, 1 }, { const_int_operand, "", VOIDmode, 0, 1 }, { memory_operand, "", BLKmode, 0, 1 }, { nonmemory_operand, "", DImode, 0, 1 }, { const_int_operand, "", VOIDmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { memory_operand, "", VOIDmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { general_operand, "", BLKmode, 0, 1 }, { general_operand, "", BLKmode, 0, 1 }, { general_operand, "", VOIDmode, 0, 1 }, { immediate_operand, "", VOIDmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { immediate_operand, "", SImode, 0, 1 }, { memory_operand, "", VOIDmode, 0, 1 }, { memory_operand, "", VOIDmode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { general_operand, "", BLKmode, 0, 1 }, { immediate_operand, "", QImode, 0, 1 }, { immediate_operand, "", VOIDmode, 0, 1 }, { register_operand, "", DImode, 0, 1 }, { general_operand, "", BLKmode, 0, 1 }, { immediate_operand, "", QImode, 0, 1 }, { immediate_operand, "", VOIDmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { immediate_operand, "", SImode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { register_operand, "", QImode, 0, 1 }, { register_operand, "", QImode, 0, 1 }, { register_operand, "", DImode, 0, 1 }, { comparison_operator, "", VOIDmode, 0, 1 }, { general_operand, "", DImode, 0, 1 }, { general_operand, "", DImode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { comparison_operator, "", VOIDmode, 0, 1 }, { general_operand, "", SImode, 0, 1 }, { general_operand, "", SImode, 0, 1 }, { register_operand, "", HImode, 0, 1 }, { comparison_operator, "", VOIDmode, 0, 1 }, { general_operand, "", HImode, 0, 1 }, { general_operand, "", HImode, 0, 1 }, { register_operand, "", QImode, 0, 1 }, { comparison_operator, "", VOIDmode, 0, 1 }, { general_operand, "", QImode, 0, 1 }, { general_operand, "", QImode, 0, 1 }, { register_operand, "", QImode, 0, 1 }, { ix86_comparison_operator, "", VOIDmode, 0, 0 }, { register_operand, "", QImode, 0, 1 }, { register_operand, "", QImode, 0, 1 }, { flags_reg_operand, "", VOIDmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { comparison_operator, "", VOIDmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { comparison_operator, "", VOIDmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { register_and_not_any_fp_reg_operand, "", DFmode, 0, 1 }, { fcmov_comparison_operator, "", VOIDmode, 0, 0 }, { nonimmediate_operand, "", DFmode, 0, 1 }, { nonimmediate_operand, "", DFmode, 0, 1 }, { flags_reg_operand, "", VOIDmode, 0, 1 }, { register_operand, "", XFmode, 0, 1 }, { comparison_operator, "", VOIDmode, 0, 1 }, { register_operand, "", XFmode, 0, 1 }, { register_operand, "", XFmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { nonimmediate_operand, "", SFmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { nonimmediate_operand, "", SFmode, 0, 1 }, { register_operand, "", QImode, 0, 1 }, { comparison_operator, "", VOIDmode, 0, 1 }, { register_operand, "", QImode, 0, 1 }, { const_int_operand, "", QImode, 0, 1 }, { register_operand, "", HImode, 0, 1 }, { comparison_operator, "", VOIDmode, 0, 1 }, { register_operand, "", HImode, 0, 1 }, { const_int_operand, "", HImode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { comparison_operator, "", VOIDmode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { const_int_operand, "", SImode, 0, 1 }, { register_operand, "", DImode, 0, 1 }, { comparison_operator, "", VOIDmode, 0, 1 }, { register_operand, "", DImode, 0, 1 }, { const_int_operand, "", DImode, 0, 1 }, { fp_register_operand, "", SFmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { nonimmediate_operand, "", DFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { nonimmediate_operand, "", DFmode, 0, 1 }, { fp_register_operand, "", DFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { comparison_operator, "", VOIDmode, 0, 0 }, { nonimmediate_operand, "", VOIDmode, 0, 1 }, { nonimmediate_operand, "", VOIDmode, 0, 1 }, { nonimmediate_operand, "", VOIDmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { sse_comparison_operator, "", SFmode, 0, 0 }, { register_operand, "", SFmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { nonimmediate_operand, "", SFmode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { sse_comparison_operator, "", DFmode, 0, 0 }, { register_operand, "", DFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { nonimmediate_operand, "", DFmode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { register_operand, "", SFmode, 0, 1 }, { comparison_operator, "", VOIDmode, 0, 0 }, { nonmemory_operand, "", SFmode, 0, 1 }, { nonmemory_operand, "", SFmode, 0, 1 }, { nonimmediate_operand, "", SFmode, 0, 1 }, { nonimmediate_operand, "", SFmode, 0, 1 }, { register_operand, "", DFmode, 0, 1 }, { comparison_operator, "", VOIDmode, 0, 0 }, { nonmemory_operand, "", DFmode, 0, 1 }, { nonmemory_operand, "", DFmode, 0, 1 }, { nonimmediate_operand, "", DFmode, 0, 1 }, { nonimmediate_operand, "", DFmode, 0, 1 }, { register_operand, "=r", SImode, 0, 1 }, { general_operand, "", SImode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { aligned_operand, "", VOIDmode, 0, 1 }, { promotable_binary_operator, "", VOIDmode, 0, 0 }, { register_operand, "", VOIDmode, 0, 1 }, { aligned_operand, "", VOIDmode, 0, 1 }, { const_int_operand, "", VOIDmode, 0, 1 }, { aligned_operand, "", HImode, 0, 1 }, { const_int_operand, "", HImode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { comparison_operator, "", VOIDmode, 0, 0 }, { register_operand, "", VOIDmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { push_operand, "", SImode, 0, 1 }, { memory_operand, "", SImode, 0, 1 }, { scratch_operand, "r", SImode, 0, 0 }, { push_operand, "", DImode, 0, 1 }, { memory_operand, "", DImode, 0, 1 }, { scratch_operand, "r", DImode, 0, 0 }, { push_operand, "", SFmode, 0, 1 }, { memory_operand, "", SFmode, 0, 1 }, { scratch_operand, "r", SFmode, 0, 0 }, { push_operand, "", HImode, 0, 1 }, { memory_operand, "", HImode, 0, 1 }, { scratch_operand, "r", HImode, 0, 0 }, { push_operand, "", QImode, 0, 1 }, { memory_operand, "", QImode, 0, 1 }, { scratch_operand, "q", QImode, 0, 0 }, { memory_operand, "", SImode, 0, 1 }, { immediate_operand, "", SImode, 0, 1 }, { scratch_operand, "r", SImode, 0, 0 }, { memory_operand, "", HImode, 0, 1 }, { immediate_operand, "", HImode, 0, 1 }, { scratch_operand, "r", HImode, 0, 0 }, { memory_operand, "", QImode, 0, 1 }, { immediate_operand, "", QImode, 0, 1 }, { scratch_operand, "q", QImode, 0, 0 }, { memory_operand, "", SImode, 0, 1 }, { 0, "", VOIDmode, 0, 0 }, { 0, "", VOIDmode, 0, 0 }, { scratch_operand, "r", SImode, 0, 0 }, { register_operand, "", QImode, 0, 1 }, { immediate_operand, "", QImode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { memory_operand, "", SImode, 0, 1 }, { scratch_operand, "r", SImode, 0, 0 }, { arith_or_logical_operator, "", SImode, 0, 0 }, { memory_operand, "", SImode, 0, 1 }, { nonmemory_operand, "", SImode, 0, 1 }, { scratch_operand, "r", SImode, 0, 0 }, { arith_or_logical_operator, "", SImode, 0, 0 }, { register_operand, "", VOIDmode, 1, 1 }, { register_operand, "", SImode, 0, 1 }, { register_operand, "", DImode, 0, 1 }, { nonmemory_operand, "", DImode, 0, 1 }, { register_operand, "", SImode, 0, 1 }, { register_operand, "", DImode, 0, 1 }, { const_int_operand, "", DImode, 0, 1 }, { scratch_operand, "r", SImode, 0, 0 }, { scratch_operand, "r", SImode, 0, 0 }, { register_operand, "", SImode, 0, 1 }, { incdec_operand, "", SImode, 0, 1 }, { register_operand, "", HImode, 0, 1 }, { incdec_operand, "", HImode, 0, 1 }, { register_operand, "", QImode, 0, 1 }, { incdec_operand, "", QImode, 0, 1 }, { scratch_operand, "r", DImode, 0, 0 }, { scratch_operand, "r", DImode, 0, 0 }, { register_operand, "", DImode, 0, 1 }, { memory_operand, "", DImode, 0, 1 }, { immediate_operand, "", DImode, 0, 1 }, { scratch_operand, "r", DImode, 0, 0 }, { register_operand, "", SImode, 0, 1 }, { memory_operand, "", SImode, 0, 1 }, { immediate_operand, "", SImode, 0, 1 }, { scratch_operand, "r", SImode, 0, 0 }, { register_operand, "", DImode, 0, 1 }, { memory_operand, "", SImode, 0, 1 }, { immediate_operand, "", SImode, 0, 1 }, { scratch_operand, "r", SImode, 0, 0 }, { register_operand, "", DImode, 0, 1 }, { nonimmediate_operand, "", DImode, 0, 1 }, { const_int_operand, "", DImode, 0, 1 }, { scratch_operand, "r", DImode, 0, 0 }, { register_operand, "", SImode, 0, 1 }, { nonimmediate_operand, "", SImode, 0, 1 }, { const_int_operand, "", SImode, 0, 1 }, { scratch_operand, "r", SImode, 0, 0 }, { register_operand, "", HImode, 0, 1 }, { nonimmediate_operand, "", HImode, 0, 1 }, { immediate_operand, "", HImode, 0, 1 }, { scratch_operand, "r", HImode, 0, 0 }, { register_operand, "", V4SFmode, 0, 1 }, { zero_extended_scalar_load_operand, "", V4SFmode, 0, 1 }, { register_operand, "", V2DFmode, 0, 1 }, { zero_extended_scalar_load_operand, "", V2DFmode, 0, 1 }, { nonimmediate_operand, "", TImode, 0, 1 }, { nonimmediate_operand, "", TImode, 0, 1 }, { nonimmediate_operand, "", TFmode, 0, 1 }, { nonimmediate_operand, "", TFmode, 0, 1 }, { nonimmediate_operand, "", V2DFmode, 0, 1 }, { nonimmediate_operand, "", V2DFmode, 0, 1 }, { nonimmediate_operand, "", V8HImode, 0, 1 }, { nonimmediate_operand, "", V8HImode, 0, 1 }, { nonimmediate_operand, "", V16QImode, 0, 1 }, { nonimmediate_operand, "", V16QImode, 0, 1 }, { nonimmediate_operand, "", V4SFmode, 0, 1 }, { nonimmediate_operand, "", V4SFmode, 0, 1 }, { nonimmediate_operand, "", V4SImode, 0, 1 }, { nonimmediate_operand, "", V4SImode, 0, 1 }, { nonimmediate_operand, "", V2DImode, 0, 1 }, { nonimmediate_operand, "", V2DImode, 0, 1 }, { nonimmediate_operand, "", V2SImode, 0, 1 }, { nonimmediate_operand, "", V2SImode, 0, 1 }, { nonimmediate_operand, "", V4HImode, 0, 1 }, { nonimmediate_operand, "", V4HImode, 0, 1 }, { nonimmediate_operand, "", V8QImode, 0, 1 }, { nonimmediate_operand, "", V8QImode, 0, 1 }, { nonimmediate_operand, "", V2SFmode, 0, 1 }, { nonimmediate_operand, "", V2SFmode, 0, 1 }, { push_operand, "", VOIDmode, 0, 1 }, { register_operand, "", VOIDmode, 0, 1 }, { nonimmediate_operand, "", TImode, 0, 1 }, { general_operand, "", TImode, 0, 1 }, { nonimmediate_operand, "", TFmode, 0, 1 }, { general_operand, "", TFmode, 0, 1 }, { register_operand, "", V4SFmode, 0, 1 }, { memory_operand, "", SFmode, 0, 1 }, { register_operand, "", V4SFmode, 0, 1 }, { nonimmediate_operand, "", V4SFmode, 0, 1 }, { register_operand, "", V4SFmode, 0, 1 }, { register_operand, "", V4SFmode, 0, 1 }, { nonimmediate_operand, "", V4SFmode, 0, 1 }, { register_operand, "", V2DFmode, 0, 1 }, { register_operand, "", V2DFmode, 0, 1 }, { nonimmediate_operand, "", V2DFmode, 0, 1 }, { nonimmediate_operand, "", V2DFmode, 0, 1 }, { 0, "", BLKmode, 0, 1 }, { register_operand, "", DImode, 0, 1 }, { immediate_operand, "", DImode, 0, 1 }, { 0, "", VOIDmode, 0, 1 }, { address_operand, "", VOIDmode, 0, 1 }, { const_int_operand, "", SImode, 0, 1 }, { const_int_operand, "", SImode, 0, 1 }, { register_operand, "", V2DFmode, 0, 1 }, { memory_operand, "", DFmode, 0, 1 }, }; #if GCC_VERSION >= 2007 __extension__ #endif const struct insn_data insn_data[] = { { "cmpdi_ccno_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_0 }, #else { 0, output_0, 0 }, #endif (insn_gen_fn) gen_cmpdi_ccno_1_rex64, &operand_data[1], 2, 0, 2, 2 }, { "*cmpdi_minus_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cmp{q}\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[3], 2, 0, 2, 1 }, { "cmpdi_1_insn_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cmp{q}\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_cmpdi_1_insn_rex64, &operand_data[5], 2, 0, 2, 1 }, { "*cmpsi_ccno_1", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_3 }, #else { 0, output_3, 0 }, #endif 0, &operand_data[7], 2, 0, 2, 2 }, { "*cmpsi_minus_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cmp{l}\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[9], 2, 0, 2, 1 }, { "*cmpsi_1_insn", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cmp{l}\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[9], 2, 0, 2, 1 }, { "*cmphi_ccno_1", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_6 }, #else { 0, output_6, 0 }, #endif 0, &operand_data[11], 2, 0, 2, 2 }, { "*cmphi_minus_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cmp{w}\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[13], 2, 0, 2, 1 }, { "*cmphi_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cmp{w}\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[13], 2, 0, 2, 1 }, { "*cmpqi_ccno_1", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_9 }, #else { 0, output_9, 0 }, #endif 0, &operand_data[15], 2, 0, 2, 2 }, { "*cmpqi_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cmp{b}\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[17], 2, 0, 2, 1 }, { "*cmpqi_minus_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cmp{b}\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[17], 2, 0, 2, 1 }, { "*cmpqi_ext_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cmp{b}\t{%h1, %0|%0, %h1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[19], 2, 0, 1, 1 }, { "*cmpqi_ext_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cmp{b}\t{%h1, %0|%0, %h1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[21], 2, 0, 1, 1 }, { "*cmpqi_ext_2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "test{b}\t%h0, %h0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[22], 2, 0, 1, 1 }, { "cmpqi_ext_3_insn", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cmp{b}\t{%1, %h0|%h0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_cmpqi_ext_3_insn, &operand_data[24], 2, 0, 1, 1 }, { "cmpqi_ext_3_insn_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cmp{b}\t{%1, %h0|%h0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_cmpqi_ext_3_insn_rex64, &operand_data[26], 2, 0, 1, 1 }, { "*cmpqi_ext_4", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cmp{b}\t{%h1, %h0|%h0, %h1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[28], 2, 0, 1, 1 }, { "*cmpfp_0", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_18 }, #else { 0, 0, output_18 }, #endif 0, &operand_data[30], 3, 0, 1, 3 }, { "*cmpfp_2_sf", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_19 }, #else { 0, 0, output_19 }, #endif 0, &operand_data[33], 2, 0, 1, 3 }, { "*cmpfp_2_sf_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_20 }, #else { 0, 0, output_20 }, #endif 0, &operand_data[35], 3, 0, 1, 3 }, { "*cmpfp_2_df", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_21 }, #else { 0, 0, output_21 }, #endif 0, &operand_data[38], 2, 0, 1, 3 }, { "*cmpfp_2_df_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_22 }, #else { 0, 0, output_22 }, #endif 0, &operand_data[40], 3, 0, 1, 3 }, { "*cmpfp_2_xf", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_23 }, #else { 0, 0, output_23 }, #endif 0, &operand_data[43], 2, 0, 1, 3 }, { "*cmpfp_2_xf_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_24 }, #else { 0, 0, output_24 }, #endif 0, &operand_data[45], 3, 0, 1, 3 }, { "*cmpfp_2u", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_25 }, #else { 0, 0, output_25 }, #endif 0, &operand_data[48], 2, 0, 1, 3 }, { "*cmpfp_2u_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_26 }, #else { 0, 0, output_26 }, #endif 0, &operand_data[50], 3, 0, 1, 3 }, { "*ficom_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[53], 2, 0, 2, 1 }, { "x86_fnstsw_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fnstsw\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_x86_fnstsw_1, &operand_data[30], 1, 0, 1, 1 }, { "x86_sahf_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sahf", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_x86_sahf_1, &operand_data[55], 1, 0, 1, 1 }, { "*cmpfp_i", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_30 }, #else { 0, 0, output_30 }, #endif 0, &operand_data[48], 2, 0, 1, 3 }, { "*cmpfp_i_sse", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_31 }, #else { 0, 0, output_31 }, #endif 0, &operand_data[56], 2, 0, 2, 3 }, { "*cmpfp_i_sse_only", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_32 }, #else { 0, 0, output_32 }, #endif 0, &operand_data[58], 2, 0, 1, 3 }, { "*cmpfp_iu", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_33 }, #else { 0, 0, output_33 }, #endif 0, &operand_data[48], 2, 0, 1, 3 }, { "*cmpfp_iu_sse", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_34 }, #else { 0, 0, output_34 }, #endif 0, &operand_data[56], 2, 0, 2, 3 }, { "*cmpfp_iu_sse_only", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_35 }, #else { 0, 0, output_35 }, #endif 0, &operand_data[58], 2, 0, 1, 3 }, { "*pushsi2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "push{l}\t%1", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[60], 2, 0, 1, 1 }, { "*pushsi2_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "push{q}\t%q1", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[62], 2, 0, 1, 1 }, { "*pushsi2_prologue", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "push{l}\t%1", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[60], 2, 0, 1, 1 }, { "*popsi1_epilogue", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pop{l}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[64], 1, 0, 1, 1 }, { "popsi1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pop{l}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_popsi1, &operand_data[64], 1, 0, 1, 1 }, { "*movsi_xor", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xor{l}\t{%0, %0|%0, %0}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[65], 2, 0, 1, 1 }, { "*movsi_or", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_42 }, #else { 0, 0, output_42 }, #endif 0, &operand_data[67], 2, 0, 1, 3 }, { "*movsi_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_43 }, #else { 0, 0, output_43 }, #endif 0, &operand_data[69], 2, 0, 8, 3 }, { "*movsi_1_nointernunit", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_44 }, #else { 0, 0, output_44 }, #endif 0, &operand_data[71], 2, 0, 8, 3 }, { "*movabssi_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_45 }, #else { 0, output_45, 0 }, #endif 0, &operand_data[73], 2, 0, 2, 2 }, { "*movabssi_2_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_46 }, #else { 0, output_46, 0 }, #endif 0, &operand_data[75], 2, 0, 2, 2 }, { "*swapsi", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xchg{l}\t%1, %0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[77], 2, 2, 1, 1 }, { "*pushhi2", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_48 }, #else { 0, output_48, 0 }, #endif 0, &operand_data[79], 2, 0, 2, 2 }, { "*pushhi2_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "push{q}\t%q1", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[81], 2, 0, 1, 1 }, { "*movhi_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_50 }, #else { 0, 0, output_50 }, #endif 0, &operand_data[83], 2, 0, 4, 3 }, { "*movabshi_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_51 }, #else { 0, output_51, 0 }, #endif 0, &operand_data[85], 2, 0, 2, 2 }, { "*movabshi_2_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_52 }, #else { 0, output_52, 0 }, #endif 0, &operand_data[87], 2, 0, 2, 2 }, { "*swaphi_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xchg{w}\t%1, %0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[89], 2, 2, 1, 1 }, { "*swaphi_2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xchg{l}\t%k1, %k0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[89], 2, 2, 1, 1 }, { "*movstricthi_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "mov{w}\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[91], 2, 0, 2, 1 }, { "*movstricthi_xor", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xor{w}\t{%0, %0|%0, %0}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[93], 2, 0, 1, 1 }, { "*pushqi2", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_57 }, #else { 0, output_57, 0 }, #endif 0, &operand_data[95], 2, 0, 2, 2 }, { "*pushqi2_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "push{q}\t%q1", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[97], 2, 0, 1, 1 }, { "*movqi_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_59 }, #else { 0, 0, output_59 }, #endif 0, &operand_data[99], 2, 0, 7, 3 }, { "*swapqi", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xchg{b}\t%1, %0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[101], 2, 2, 1, 1 }, { "*movstrictqi_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "mov{b}\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[103], 2, 0, 2, 1 }, { "*movstrictqi_xor", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xor{b}\t{%0, %0|%0, %0}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[105], 2, 0, 1, 1 }, { "*movsi_extv_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movs{bl|x}\t{%h1, %0|%0, %h1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[107], 2, 0, 1, 1 }, { "*movhi_extv_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movs{bl|x}\t{%h1, %k0|%k0, %h1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[109], 2, 0, 1, 1 }, { "*movqi_extv_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_65 }, #else { 0, 0, output_65 }, #endif 0, &operand_data[111], 2, 0, 2, 3 }, { "*movqi_extv_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_66 }, #else { 0, 0, output_66 }, #endif 0, &operand_data[113], 2, 0, 2, 3 }, { "*movabsqi_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_67 }, #else { 0, output_67, 0 }, #endif 0, &operand_data[115], 2, 0, 2, 2 }, { "*movabsqi_2_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_68 }, #else { 0, output_68, 0 }, #endif 0, &operand_data[117], 2, 0, 2, 2 }, { "*movsi_extzv_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movz{bl|x}\t{%h1, %0|%0, %h1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[107], 2, 0, 1, 1 }, { "*movqi_extzv_2", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_70 }, #else { 0, 0, output_70 }, #endif 0, &operand_data[119], 2, 0, 2, 3 }, { "*movqi_extzv_2_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_71 }, #else { 0, 0, output_71 }, #endif 0, &operand_data[113], 2, 0, 2, 3 }, { "movsi_insv_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "mov{b}\t{%b1, %h0|%h0, %b1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_movsi_insv_1, &operand_data[121], 2, 0, 1, 1 }, { "movdi_insv_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "mov{b}\t{%b1, %h0|%h0, %b1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_movdi_insv_1_rex64, &operand_data[123], 2, 0, 1, 1 }, { "*movqi_insv_2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "mov{b}\t{%h1, %h0|%h0, %h1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[125], 2, 0, 1, 1 }, { "*pushdi", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[127], 2, 0, 1, 1 }, { "pushdi2_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_76 }, #else { 0, output_76, 0 }, #endif (insn_gen_fn) gen_pushdi2_rex64, &operand_data[129], 2, 0, 2, 2 }, { "*pushdi2_prologue_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "push{q}\t%1", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[131], 2, 0, 1, 1 }, { "*popdi1_epilogue_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pop{q}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[133], 1, 0, 1, 1 }, { "popdi1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pop{q}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_popdi1, &operand_data[133], 1, 0, 1, 1 }, { "*movdi_xor_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xor{l}\t{%k0, %k0|%k0, %k0}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[134], 2, 0, 1, 1 }, { "*movdi_or_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_81 }, #else { 0, 0, output_81 }, #endif 0, &operand_data[136], 2, 0, 1, 3 }, { "*movdi_2", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_82 }, #else { 0, output_82, 0 }, #endif 0, &operand_data[138], 2, 0, 7, 2 }, { "*movdi_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_83 }, #else { 0, 0, output_83 }, #endif 0, &operand_data[140], 2, 0, 11, 3 }, { "*movdi_1_rex64_nointerunit", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_84 }, #else { 0, 0, output_84 }, #endif 0, &operand_data[142], 2, 0, 11, 3 }, { "*movabsdi_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_85 }, #else { 0, output_85, 0 }, #endif 0, &operand_data[144], 2, 0, 2, 2 }, { "*movabsdi_2_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_86 }, #else { 0, output_86, 0 }, #endif 0, &operand_data[146], 2, 0, 2, 2 }, { "*swapdi_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xchg{q}\t%1, %0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[148], 2, 2, 1, 1 }, { "*pushsf", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_88 }, #else { 0, 0, output_88 }, #endif 0, &operand_data[150], 2, 0, 3, 3 }, { "*pushsf_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_89 }, #else { 0, 0, output_89 }, #endif 0, &operand_data[152], 2, 0, 3, 3 }, { "*movsf_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_90 }, #else { 0, 0, output_90 }, #endif 0, &operand_data[154], 2, 0, 12, 3 }, { "*movsf_1_nointerunit", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_91 }, #else { 0, 0, output_91 }, #endif 0, &operand_data[156], 2, 0, 12, 3 }, { "*swapsf", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_92 }, #else { 0, 0, output_92 }, #endif 0, &operand_data[158], 2, 2, 1, 3 }, { "*pushdf_nointeger", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_93 }, #else { 0, 0, output_93 }, #endif 0, &operand_data[160], 2, 0, 4, 3 }, { "*pushdf_integer", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_94 }, #else { 0, 0, output_94 }, #endif 0, &operand_data[162], 2, 0, 3, 3 }, { "*movdf_nointeger", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_95 }, #else { 0, 0, output_95 }, #endif 0, &operand_data[164], 2, 0, 9, 3 }, { "*movdf_integer", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_96 }, #else { 0, 0, output_96 }, #endif 0, &operand_data[166], 2, 0, 9, 3 }, { "*swapdf", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_97 }, #else { 0, 0, output_97 }, #endif 0, &operand_data[168], 2, 2, 1, 3 }, { "*pushxf_nointeger", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_98 }, #else { 0, 0, output_98 }, #endif 0, &operand_data[170], 2, 0, 3, 3 }, { "*pushxf_integer", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_99 }, #else { 0, 0, output_99 }, #endif 0, &operand_data[172], 2, 0, 2, 3 }, { "*movxf_nointeger", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_100 }, #else { 0, 0, output_100 }, #endif 0, &operand_data[174], 2, 0, 5, 3 }, { "*movxf_integer", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_101 }, #else { 0, 0, output_101 }, #endif 0, &operand_data[176], 2, 0, 5, 3 }, { "swapxf", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_102 }, #else { 0, 0, output_102 }, #endif (insn_gen_fn) gen_swapxf, &operand_data[178], 2, 2, 1, 3 }, { "zero_extendhisi2_and", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_zero_extendhisi2_and, &operand_data[180], 2, 0, 1, 1 }, { "*zero_extendhisi2_movzwl", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movz{wl|x}\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[182], 2, 0, 1, 1 }, { "*zero_extendqihi2_and", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[184], 2, 0, 2, 1 }, { "*zero_extendqihi2_movzbw_and", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[186], 2, 0, 2, 1 }, { "*zero_extendqihi2_movzbw", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movz{bw|x}\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[188], 2, 0, 1, 1 }, { "*zero_extendqisi2_and", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[190], 2, 0, 2, 1 }, { "*zero_extendqisi2_movzbw_and", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[192], 2, 0, 2, 1 }, { "*zero_extendqisi2_movzbw", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movz{bl|x}\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[194], 2, 0, 1, 1 }, { "zero_extendsidi2_32", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_111 }, #else { 0, output_111, 0 }, #endif (insn_gen_fn) gen_zero_extendsidi2_32, &operand_data[196], 2, 0, 5, 2 }, { "*zero_extendsidi2_32_1", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_112 }, #else { 0, output_112, 0 }, #endif 0, &operand_data[198], 2, 0, 5, 2 }, { "zero_extendsidi2_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_113 }, #else { 0, output_113, 0 }, #endif (insn_gen_fn) gen_zero_extendsidi2_rex64, &operand_data[200], 2, 0, 4, 2 }, { "*zero_extendsidi2_rex64_1", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_114 }, #else { 0, output_114, 0 }, #endif 0, &operand_data[202], 2, 0, 4, 2 }, { "zero_extendhidi2", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_115 }, #else { 0, output_115, 0 }, #endif (insn_gen_fn) gen_zero_extendhidi2, &operand_data[204], 2, 0, 2, 2 }, { "zero_extendqidi2", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_116 }, #else { 0, output_116, 0 }, #endif (insn_gen_fn) gen_zero_extendqidi2, &operand_data[206], 2, 0, 2, 2 }, { "*extendsidi2_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[208], 3, 0, 4, 1 }, { "extendsidi2_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_118 }, #else { 0, output_118, 0 }, #endif (insn_gen_fn) gen_extendsidi2_rex64, &operand_data[211], 2, 0, 2, 2 }, { "extendhidi2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movs{wq|x}\t{%1,%0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_extendhidi2, &operand_data[213], 2, 0, 1, 1 }, { "extendqidi2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movs{bq|x}\t{%1,%0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_extendqidi2, &operand_data[215], 2, 0, 1, 1 }, { "extendhisi2", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_121 }, #else { 0, 0, output_121 }, #endif (insn_gen_fn) gen_extendhisi2, &operand_data[217], 2, 0, 2, 3 }, { "*extendhisi2_zext", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_122 }, #else { 0, 0, output_122 }, #endif 0, &operand_data[219], 2, 0, 2, 3 }, { "extendqihi2", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_123 }, #else { 0, 0, output_123 }, #endif (insn_gen_fn) gen_extendqihi2, &operand_data[221], 2, 0, 2, 3 }, { "extendqisi2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movs{bl|x}\t{%1,%0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_extendqisi2, &operand_data[194], 2, 0, 1, 1 }, { "*extendqisi2_zext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movs{bl|x}\t{%1,%k0|%k0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[215], 2, 0, 1, 1 }, { "*dummy_extendsfdf2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[223], 2, 0, 1, 1 }, { "*dummy_extendsfxf2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[225], 2, 0, 1, 1 }, { "*extendsfdf2_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_128 }, #else { 0, 0, output_128 }, #endif 0, &operand_data[227], 2, 0, 3, 3 }, { "*extendsfdf2_1_sse_only", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvtss2sd\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[229], 2, 0, 1, 1 }, { "*extendsfxf2_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_130 }, #else { 0, 0, output_130 }, #endif 0, &operand_data[231], 2, 0, 2, 3 }, { "*extenddfxf2_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_131 }, #else { 0, 0, output_131 }, #endif 0, &operand_data[233], 2, 0, 2, 3 }, { "truncdfsf2_noop", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_132 }, #else { 0, 0, output_132 }, #endif (insn_gen_fn) gen_truncdfsf2_noop, &operand_data[235], 2, 0, 1, 3 }, { "*truncdfsf2_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_133 }, #else { 0, 0, output_133 }, #endif 0, &operand_data[237], 3, 0, 4, 3 }, { "*truncdfsf2_1_sse", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_134 }, #else { 0, 0, output_134 }, #endif 0, &operand_data[240], 3, 0, 5, 3 }, { "*truncdfsf2_1_sse_nooverlap", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_135 }, #else { 0, 0, output_135 }, #endif 0, &operand_data[243], 3, 0, 5, 3 }, { "*truncdfsf2_2", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_136 }, #else { 0, 0, output_136 }, #endif 0, &operand_data[246], 2, 0, 3, 3 }, { "*truncdfsf2_2_nooverlap", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_137 }, #else { 0, 0, output_137 }, #endif 0, &operand_data[248], 2, 0, 2, 3 }, { "*truncdfsf2_3", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_138 }, #else { 0, 0, output_138 }, #endif 0, &operand_data[250], 2, 0, 1, 3 }, { "truncdfsf2_sse_only", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvtsd2ss\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_truncdfsf2_sse_only, &operand_data[252], 2, 0, 2, 1 }, { "*truncdfsf2_sse_only_nooverlap", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[254], 2, 0, 1, 1 }, { "truncxfsf2_noop", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_141 }, #else { 0, 0, output_141 }, #endif (insn_gen_fn) gen_truncxfsf2_noop, &operand_data[256], 2, 0, 1, 3 }, { "*truncxfsf2_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_142 }, #else { 0, 0, output_142 }, #endif 0, &operand_data[258], 3, 0, 4, 3 }, { "*truncxfsf2_2", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_143 }, #else { 0, 0, output_143 }, #endif 0, &operand_data[261], 2, 0, 1, 3 }, { "truncxfdf2_noop", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_144 }, #else { 0, 0, output_144 }, #endif (insn_gen_fn) gen_truncxfdf2_noop, &operand_data[263], 2, 0, 1, 3 }, { "*truncxfdf2_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_145 }, #else { 0, 0, output_145 }, #endif 0, &operand_data[265], 3, 0, 4, 3 }, { "*truncxfdf2_2", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_146 }, #else { 0, 0, output_146 }, #endif 0, &operand_data[268], 2, 0, 1, 3 }, { "*fix_truncdi_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[270], 2, 0, 2, 1 }, { "fix_truncdi_nomemory", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_fix_truncdi_nomemory, &operand_data[270], 6, 0, 2, 1 }, { "fix_truncdi_memory", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_149 }, #else { 0, 0, output_149 }, #endif (insn_gen_fn) gen_fix_truncdi_memory, &operand_data[276], 5, 0, 1, 3 }, { "fix_truncsfdi_sse", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvttss2si{q}\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_fix_truncsfdi_sse, &operand_data[281], 2, 0, 2, 1 }, { "fix_truncdfdi_sse", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvttsd2si{q}\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_fix_truncdfdi_sse, &operand_data[283], 2, 0, 2, 1 }, { "*fix_truncsi_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[285], 2, 0, 2, 1 }, { "fix_truncsi_nomemory", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_fix_truncsi_nomemory, &operand_data[285], 5, 0, 2, 1 }, { "fix_truncsi_memory", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_154 }, #else { 0, 0, output_154 }, #endif (insn_gen_fn) gen_fix_truncsi_memory, &operand_data[290], 4, 0, 1, 3 }, { "fix_truncsfsi_sse", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvttss2si\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_fix_truncsfsi_sse, &operand_data[294], 2, 0, 2, 1 }, { "fix_truncdfsi_sse", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvttsd2si\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_fix_truncdfsi_sse, &operand_data[296], 2, 0, 2, 1 }, { "*fix_trunchi_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[298], 2, 0, 2, 1 }, { "fix_trunchi_nomemory", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_fix_trunchi_nomemory, &operand_data[298], 5, 0, 2, 1 }, { "fix_trunchi_memory", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_159 }, #else { 0, 0, output_159 }, #endif (insn_gen_fn) gen_fix_trunchi_memory, &operand_data[303], 4, 0, 1, 3 }, { "x86_fnstcw_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fnstcw\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_x86_fnstcw_1, &operand_data[303], 1, 0, 1, 1 }, { "x86_fldcw_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fldcw\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_x86_fldcw_1, &operand_data[278], 1, 0, 1, 1 }, { "*floathisf2_1", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_162 }, #else { 0, output_162, 0 }, #endif 0, &operand_data[307], 2, 0, 2, 2 }, { "*floatsisf2_i387", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_163 }, #else { 0, output_163, 0 }, #endif 0, &operand_data[309], 2, 0, 4, 2 }, { "*floatsisf2_sse", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvtsi2ss\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[311], 2, 0, 2, 1 }, { "*floatdisf2_i387_only", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_165 }, #else { 0, output_165, 0 }, #endif 0, &operand_data[313], 2, 0, 2, 2 }, { "*floatdisf2_i387", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_166 }, #else { 0, output_166, 0 }, #endif 0, &operand_data[315], 2, 0, 4, 2 }, { "*floatdisf2_sse", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvtsi2ss{q}\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[317], 2, 0, 2, 1 }, { "*floathidf2_1", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_168 }, #else { 0, output_168, 0 }, #endif 0, &operand_data[319], 2, 0, 2, 2 }, { "*floatsidf2_i387", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_169 }, #else { 0, output_169, 0 }, #endif 0, &operand_data[321], 2, 0, 4, 2 }, { "*floatsidf2_sse", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvtsi2sd\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[323], 2, 0, 2, 1 }, { "*floatdidf2_i387_only", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_171 }, #else { 0, output_171, 0 }, #endif 0, &operand_data[325], 2, 0, 2, 2 }, { "*floatdidf2_i387", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_172 }, #else { 0, output_172, 0 }, #endif 0, &operand_data[327], 2, 0, 4, 2 }, { "*floatdidf2_sse", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvtsi2sd{q}\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[329], 2, 0, 2, 1 }, { "floathixf2", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_174 }, #else { 0, output_174, 0 }, #endif (insn_gen_fn) gen_floathixf2, &operand_data[331], 2, 0, 2, 2 }, { "floatsixf2", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_175 }, #else { 0, output_175, 0 }, #endif (insn_gen_fn) gen_floatsixf2, &operand_data[333], 2, 0, 2, 2 }, { "floatdixf2", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_176 }, #else { 0, output_176, 0 }, #endif (insn_gen_fn) gen_floatdixf2, &operand_data[335], 2, 0, 2, 2 }, { "*adddi3_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[337], 3, 0, 2, 1 }, { "adddi3_carry_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "adc{q}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_adddi3_carry_rex64, &operand_data[340], 4, 0, 2, 1 }, { "*adddi3_cc_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "add{q}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[340], 3, 2, 2, 1 }, { "addqi3_carry", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "adc{b}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_addqi3_carry, &operand_data[344], 4, 0, 2, 1 }, { "addhi3_carry", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "adc{w}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_addhi3_carry, &operand_data[348], 4, 0, 2, 1 }, { "addsi3_carry", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "adc{l}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_addsi3_carry, &operand_data[352], 4, 0, 2, 1 }, { "*addsi3_carry_zext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "adc{l}\t{%2, %k0|%k0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[356], 4, 0, 1, 1 }, { "*addsi3_cc", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "add{l}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[352], 3, 2, 2, 1 }, { "addqi3_cc", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "add{b}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_addqi3_cc, &operand_data[344], 3, 2, 2, 1 }, { "*lea_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "lea{l}\t{%a1, %0|%0, %a1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[360], 2, 0, 1, 1 }, { "*lea_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "lea{l}\t{%a1, %0|%0, %a1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[362], 2, 0, 1, 1 }, { "*lea_1_zext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "lea{l}\t{%a1, %k0|%k0, %a1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[364], 2, 0, 1, 1 }, { "*lea_2_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "lea{q}\t{%a1, %0|%0, %a1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[364], 2, 0, 1, 1 }, { "*lea_general_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[366], 4, 0, 1, 1 }, { "*lea_general_1_zext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[370], 4, 0, 1, 1 }, { "*lea_general_2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[374], 4, 0, 1, 1 }, { "*lea_general_2_zext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[378], 4, 0, 1, 1 }, { "*lea_general_3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[382], 5, 0, 1, 1 }, { "*lea_general_3_zext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[387], 5, 0, 1, 1 }, { "*adddi_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_196 }, #else { 0, 0, output_196 }, #endif 0, &operand_data[392], 3, 0, 3, 3 }, { "*adddi_2_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_197 }, #else { 0, 0, output_197 }, #endif 0, &operand_data[395], 3, 2, 2, 3 }, { "*adddi_3_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_198 }, #else { 0, 0, output_198 }, #endif 0, &operand_data[398], 3, 0, 1, 3 }, { "*adddi_4_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_199 }, #else { 0, 0, output_199 }, #endif 0, &operand_data[401], 3, 0, 1, 3 }, { "*adddi_5_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_200 }, #else { 0, 0, output_200 }, #endif 0, &operand_data[404], 3, 0, 1, 3 }, { "*addsi_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_201 }, #else { 0, 0, output_201 }, #endif 0, &operand_data[407], 3, 0, 3, 3 }, { "addsi_1_zext", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_202 }, #else { 0, 0, output_202 }, #endif (insn_gen_fn) gen_addsi_1_zext, &operand_data[410], 3, 0, 2, 3 }, { "*addsi_2", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_203 }, #else { 0, 0, output_203 }, #endif 0, &operand_data[413], 3, 2, 2, 3 }, { "*addsi_2_zext", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_204 }, #else { 0, 0, output_204 }, #endif 0, &operand_data[416], 3, 2, 1, 3 }, { "*addsi_3", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_205 }, #else { 0, 0, output_205 }, #endif 0, &operand_data[419], 3, 0, 1, 3 }, { "*addsi_3_zext", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_206 }, #else { 0, 0, output_206 }, #endif 0, &operand_data[416], 3, 2, 1, 3 }, { "*addsi_4", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_207 }, #else { 0, 0, output_207 }, #endif 0, &operand_data[422], 3, 0, 1, 3 }, { "*addsi_5", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_208 }, #else { 0, 0, output_208 }, #endif 0, &operand_data[419], 3, 0, 1, 3 }, { "*addhi_1_lea", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_209 }, #else { 0, 0, output_209 }, #endif 0, &operand_data[425], 3, 0, 3, 3 }, { "*addhi_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_210 }, #else { 0, 0, output_210 }, #endif 0, &operand_data[348], 3, 0, 2, 3 }, { "*addhi_2", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_211 }, #else { 0, 0, output_211 }, #endif 0, &operand_data[428], 3, 2, 2, 3 }, { "*addhi_3", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_212 }, #else { 0, 0, output_212 }, #endif 0, &operand_data[431], 3, 0, 1, 3 }, { "*addhi_4", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_213 }, #else { 0, 0, output_213 }, #endif 0, &operand_data[434], 3, 0, 1, 3 }, { "*addhi_5", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_214 }, #else { 0, 0, output_214 }, #endif 0, &operand_data[431], 3, 0, 1, 3 }, { "*addqi_1_lea", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_215 }, #else { 0, 0, output_215 }, #endif 0, &operand_data[437], 3, 0, 4, 3 }, { "*addqi_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_216 }, #else { 0, 0, output_216 }, #endif 0, &operand_data[440], 3, 0, 3, 3 }, { "*addqi_1_slp", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_217 }, #else { 0, 0, output_217 }, #endif 0, &operand_data[443], 2, 1, 2, 3 }, { "*addqi_2", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_218 }, #else { 0, 0, output_218 }, #endif 0, &operand_data[445], 3, 2, 2, 3 }, { "*addqi_3", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_219 }, #else { 0, 0, output_219 }, #endif 0, &operand_data[448], 3, 0, 1, 3 }, { "*addqi_4", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_220 }, #else { 0, 0, output_220 }, #endif 0, &operand_data[451], 3, 0, 1, 3 }, { "*addqi_5", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_221 }, #else { 0, 0, output_221 }, #endif 0, &operand_data[448], 3, 0, 1, 3 }, { "addqi_ext_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_222 }, #else { 0, 0, output_222 }, #endif (insn_gen_fn) gen_addqi_ext_1, &operand_data[454], 3, 0, 1, 3 }, { "*addqi_ext_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_223 }, #else { 0, 0, output_223 }, #endif 0, &operand_data[457], 3, 0, 1, 3 }, { "*addqi_ext_2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "add{b}\t{%h2, %h0|%h0, %h2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[460], 3, 0, 1, 1 }, { "*subdi3_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[463], 3, 0, 2, 1 }, { "subdi3_carry_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sbb{q}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_subdi3_carry_rex64, &operand_data[466], 4, 0, 2, 1 }, { "*subdi_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sub{q}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[466], 3, 0, 2, 1 }, { "*subdi_2_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sub{q}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[466], 3, 2, 2, 1 }, { "*subdi_3_rex63", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sub{q}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[466], 3, 2, 2, 1 }, { "subqi3_carry", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sbb{b}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_subqi3_carry, &operand_data[470], 4, 0, 2, 1 }, { "subhi3_carry", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sbb{w}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_subhi3_carry, &operand_data[474], 4, 0, 2, 1 }, { "subsi3_carry", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sbb{l}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_subsi3_carry, &operand_data[478], 4, 0, 2, 1 }, { "subsi3_carry_zext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sbb{l}\t{%2, %k0|%k0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_subsi3_carry_zext, &operand_data[482], 4, 0, 2, 1 }, { "*subsi_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sub{l}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[478], 3, 0, 2, 1 }, { "*subsi_1_zext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sub{l}\t{%2, %k0|%k0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[486], 3, 0, 1, 1 }, { "*subsi_2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sub{l}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[478], 3, 2, 2, 1 }, { "*subsi_2_zext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sub{l}\t{%2, %k0|%k0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[486], 3, 2, 1, 1 }, { "*subsi_3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sub{l}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[478], 3, 2, 2, 1 }, { "*subsi_3_zext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sub{q}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[486], 3, 2, 1, 1 }, { "*subhi_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sub{w}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[474], 3, 0, 2, 1 }, { "*subhi_2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sub{w}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[474], 3, 2, 2, 1 }, { "*subhi_3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sub{w}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[474], 3, 2, 2, 1 }, { "*subqi_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sub{b}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[489], 3, 0, 2, 1 }, { "*subqi_1_slp", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sub{b}\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[492], 2, 1, 2, 1 }, { "*subqi_2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sub{b}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[494], 3, 2, 2, 1 }, { "*subqi_3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sub{b}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[494], 3, 2, 2, 1 }, { "*muldi3_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_247 }, #else { 0, output_247, 0 }, #endif 0, &operand_data[497], 3, 0, 3, 2 }, { "*mulsi3_1", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_248 }, #else { 0, output_248, 0 }, #endif 0, &operand_data[500], 3, 0, 3, 2 }, { "*mulsi3_1_zext", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_249 }, #else { 0, output_249, 0 }, #endif 0, &operand_data[503], 3, 0, 3, 2 }, { "*mulhi3_1", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_250 }, #else { 0, output_250, 0 }, #endif 0, &operand_data[506], 3, 0, 3, 2 }, { "*mulqi3_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "mul{b}\t%2", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[509], 3, 0, 1, 1 }, { "*umulqihi3_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "mul{b}\t%2", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[512], 3, 0, 1, 1 }, { "*mulqihi3_insn", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "imul{b}\t%2", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[512], 3, 0, 1, 1 }, { "*umulditi3_insn", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "mul{q}\t%2", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[515], 3, 0, 1, 1 }, { "*umulsidi3_insn", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "mul{l}\t%2", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[518], 3, 0, 1, 1 }, { "*mulditi3_insn", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "imul{q}\t%2", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[515], 3, 0, 1, 1 }, { "*mulsidi3_insn", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "imul{l}\t%2", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[518], 3, 0, 1, 1 }, { "*umuldi3_highpart_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "mul{q}\t%2", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[521], 4, 0, 1, 1 }, { "*umulsi3_highpart_insn", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "mul{l}\t%2", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[525], 4, 0, 1, 1 }, { "*umulsi3_highpart_zext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "mul{l}\t%2", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[529], 4, 0, 1, 1 }, { "*smuldi3_highpart_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "imul{q}\t%2", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[521], 4, 0, 1, 1 }, { "*smulsi3_highpart_insn", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "imul{l}\t%2", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[525], 4, 0, 1, 1 }, { "*smulsi3_highpart_zext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "imul{l}\t%2", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[529], 4, 0, 1, 1 }, { "divqi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "idiv{b}\t%2", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_divqi3, &operand_data[533], 3, 0, 1, 1 }, { "udivqi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "div{b}\t%2", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_udivqi3, &operand_data[533], 3, 0, 1, 1 }, { "*divmoddi4_nocltd_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[536], 4, 2, 2, 1 }, { "*divmoddi4_cltd_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[540], 4, 2, 1, 1 }, { "*divmoddi_noext_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "idiv{q}\t%2", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[544], 5, 2, 1, 1 }, { "*divmodsi4_nocltd", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[549], 4, 2, 2, 1 }, { "*divmodsi4_cltd", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[553], 4, 2, 1, 1 }, { "*divmodsi_noext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "idiv{l}\t%2", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[557], 5, 2, 1, 1 }, { "divmodhi4", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cwtd\n\tidiv{w}\t%2", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_divmodhi4, &operand_data[562], 4, 2, 1, 1 }, { "udivmoddi4", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xor{q}\t%3, %3\n\tdiv{q}\t%2", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_udivmoddi4, &operand_data[566], 4, 2, 1, 1 }, { "*udivmoddi4_noext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "div{q}\t%2", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[544], 4, 3, 1, 1 }, { "udivmodsi4", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xor{l}\t%3, %3\n\tdiv{l}\t%2", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_udivmodsi4, &operand_data[570], 4, 2, 1, 1 }, { "*udivmodsi4_noext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "div{l}\t%2", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[557], 4, 3, 1, 1 }, { "*udivmodhi_noext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "div{w}\t%2", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[574], 5, 2, 1, 1 }, { "*testdi_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_278 }, #else { 0, output_278, 0 }, #endif 0, &operand_data[579], 2, 0, 5, 2 }, { "testsi_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "test{l}\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_testsi_1, &operand_data[581], 2, 0, 3, 1 }, { "*testhi_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "test{w}\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[583], 2, 0, 3, 1 }, { "*testqi_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_281 }, #else { 0, 0, output_281 }, #endif 0, &operand_data[585], 2, 0, 4, 3 }, { "*testqi_ext_0", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "test{b}\t{%1, %h0|%h0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[587], 2, 0, 1, 1 }, { "*testqi_ext_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "test{b}\t{%1, %h0|%h0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[589], 2, 0, 1, 1 }, { "*testqi_ext_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "test{b}\t{%1, %h0|%h0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[20], 2, 0, 1, 1 }, { "*testqi_ext_2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "test{b}\t{%h1, %h0|%h0, %h1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[28], 2, 0, 1, 1 }, { "*testqi_ext_3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[591], 3, 0, 1, 1 }, { "*testqi_ext_3_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[594], 3, 0, 1, 1 }, { "*anddi_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_288 }, #else { 0, 0, output_288 }, #endif 0, &operand_data[597], 3, 0, 4, 3 }, { "*anddi_2", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_289 }, #else { 0, output_289, 0 }, #endif 0, &operand_data[600], 3, 2, 3, 2 }, { "*andsi_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_290 }, #else { 0, 0, output_290 }, #endif 0, &operand_data[603], 3, 0, 3, 3 }, { "*andsi_1_zext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "and{l}\t{%2, %k0|%k0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[356], 3, 0, 1, 1 }, { "*andsi_2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "and{l}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[606], 3, 2, 2, 1 }, { "*andsi_2_zext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "and{l}\t{%2, %k0|%k0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[356], 3, 2, 1, 1 }, { "*andhi_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_294 }, #else { 0, 0, output_294 }, #endif 0, &operand_data[609], 3, 0, 3, 3 }, { "*andhi_2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "and{w}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[612], 3, 2, 2, 1 }, { "*andqi_1", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_296 }, #else { 0, output_296, 0 }, #endif 0, &operand_data[615], 3, 0, 3, 2 }, { "*andqi_1_slp", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "and{b}\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[618], 2, 1, 2, 1 }, { "*andqi_2", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_298 }, #else { 0, 0, output_298 }, #endif 0, &operand_data[620], 3, 2, 3, 3 }, { "*andqi_2_slp", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "and{b}\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[623], 2, 3, 2, 1 }, { "andqi_ext_0", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "and{b}\t{%2, %h0|%h0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_andqi_ext_0, &operand_data[625], 3, 0, 1, 1 }, { "*andqi_ext_0_cc", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "and{b}\t{%2, %h0|%h0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[625], 3, 2, 1, 1 }, { "*andqi_ext_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "and{b}\t{%2, %h0|%h0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[628], 3, 0, 1, 1 }, { "*andqi_ext_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "and{b}\t{%2, %h0|%h0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[631], 3, 0, 1, 1 }, { "*andqi_ext_2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "and{b}\t{%h2, %h0|%h0, %h2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[460], 3, 0, 1, 1 }, { "*iordi_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "or{q}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[634], 3, 0, 2, 1 }, { "*iordi_2_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "or{q}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[637], 3, 2, 2, 1 }, { "*iordi_3_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "or{q}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[640], 3, 0, 1, 1 }, { "*iorsi_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "or{l}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[643], 3, 0, 2, 1 }, { "*iorsi_1_zext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "or{l}\t{%2, %k0|%k0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[646], 3, 0, 1, 1 }, { "*iorsi_1_zext_imm", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "or{l}\t{%2, %k0|%k0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[649], 3, 0, 1, 1 }, { "*iorsi_2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "or{l}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[606], 3, 2, 2, 1 }, { "*iorsi_2_zext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "or{l}\t{%2, %k0|%k0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[356], 3, 2, 1, 1 }, { "*iorsi_2_zext_imm", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "or{l}\t{%2, %k0|%k0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[652], 3, 2, 1, 1 }, { "*iorsi_3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "or{l}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[655], 3, 0, 1, 1 }, { "*iorhi_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "or{w}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[658], 3, 0, 2, 1 }, { "*iorhi_2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "or{w}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[612], 3, 2, 2, 1 }, { "*iorhi_3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "or{w}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[661], 3, 0, 1, 1 }, { "*iorqi_1", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_318 }, #else { 0, output_318, 0 }, #endif 0, &operand_data[664], 3, 0, 3, 2 }, { "*iorqi_1_slp", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "or{b}\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[667], 2, 1, 2, 1 }, { "*iorqi_2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "or{b}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[669], 3, 2, 2, 1 }, { "*iorqi_2_slp", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "or{b}\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[672], 2, 3, 2, 1 }, { "*iorqi_3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "or{b}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[674], 3, 0, 1, 1 }, { "iorqi_ext_0", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "or{b}\t{%2, %h0|%h0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_iorqi_ext_0, &operand_data[625], 3, 0, 1, 1 }, { "*iorqi_ext_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "or{b}\t{%2, %h0|%h0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[628], 3, 0, 1, 1 }, { "*iorqi_ext_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "or{b}\t{%2, %h0|%h0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[631], 3, 0, 1, 1 }, { "*iorqi_ext_2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "ior{b}\t{%h2, %h0|%h0, %h2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[631], 3, 0, 1, 1 }, { "*xordi_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_327 }, #else { 0, output_327, 0 }, #endif 0, &operand_data[340], 3, 0, 2, 2 }, { "*xordi_2_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_328 }, #else { 0, output_328, 0 }, #endif 0, &operand_data[637], 3, 2, 2, 2 }, { "*xordi_3_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xor{q}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[640], 3, 0, 1, 1 }, { "*xorsi_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xor{l}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[352], 3, 0, 2, 1 }, { "*xorsi_1_zext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xor{l}\t{%2, %k0|%k0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[356], 3, 0, 1, 1 }, { "*xorsi_1_zext_imm", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xor{l}\t{%2, %k0|%k0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[677], 3, 0, 1, 1 }, { "*xorsi_2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xor{l}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[606], 3, 2, 2, 1 }, { "*xorsi_2_zext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xor{l}\t{%2, %k0|%k0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[356], 3, 2, 1, 1 }, { "*xorsi_2_zext_imm", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xor{l}\t{%2, %k0|%k0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[652], 3, 2, 1, 1 }, { "*xorsi_3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xor{l}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[655], 3, 0, 1, 1 }, { "*xorhi_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xor{w}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[658], 3, 0, 2, 1 }, { "*xorhi_2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xor{w}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[612], 3, 2, 2, 1 }, { "*xorhi_3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xor{w}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[661], 3, 0, 1, 1 }, { "*xorqi_1", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_340 }, #else { 0, output_340, 0 }, #endif 0, &operand_data[664], 3, 0, 3, 2 }, { "*xorqi_1_slp", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xor{b}\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[618], 2, 1, 2, 1 }, { "xorqi_ext_0", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xor{b}\t{%2, %h0|%h0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_xorqi_ext_0, &operand_data[625], 3, 0, 1, 1 }, { "*xorqi_ext_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xor{b}\t{%2, %h0|%h0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[628], 3, 0, 1, 1 }, { "*xorqi_ext_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xor{b}\t{%2, %h0|%h0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[631], 3, 0, 1, 1 }, { "*xorqi_ext_2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xor{b}\t{%h2, %h0|%h0, %h2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[631], 3, 0, 1, 1 }, { "*xorqi_cc_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xor{b}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[669], 3, 2, 2, 1 }, { "*xorqi_2_slp", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xor{b}\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[672], 2, 3, 2, 1 }, { "*xorqi_cc_2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xor{b}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[674], 3, 0, 1, 1 }, { "*xorqi_cc_ext_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xor{b}\t{%2, %h0|%h0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[680], 3, 2, 1, 1 }, { "*xorqi_cc_ext_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xor{b}\t{%2, %h0|%h0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[457], 3, 2, 1, 1 }, { "*negdi2_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[683], 2, 0, 1, 1 }, { "*negdi2_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "neg{q}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[685], 2, 0, 1, 1 }, { "*negdi2_cmpz_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "neg{q}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[685], 2, 1, 1, 1 }, { "*negsi2_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "neg{l}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[687], 2, 0, 1, 1 }, { "*negsi2_1_zext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "neg{l}\t%k0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[689], 2, 0, 1, 1 }, { "*negsi2_cmpz", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "neg{l}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[687], 2, 1, 1, 1 }, { "*negsi2_cmpz_zext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "neg{l}\t%k0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[689], 2, 1, 1, 1 }, { "*neghi2_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "neg{w}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[691], 2, 0, 1, 1 }, { "*neghi2_cmpz", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "neg{w}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[691], 2, 1, 1, 1 }, { "*negqi2_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "neg{b}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[693], 2, 0, 1, 1 }, { "*negqi2_cmpz", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "neg{b}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[693], 2, 1, 1, 1 }, { "negsf2_memory", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_negsf2_memory, &operand_data[695], 2, 0, 1, 1 }, { "negsf2_ifs", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_negsf2_ifs, &operand_data[697], 3, 0, 4, 1 }, { "*negsf2_if", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[700], 2, 0, 2, 1 }, { "negdf2_memory", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_negdf2_memory, &operand_data[702], 2, 0, 1, 1 }, { "negdf2_ifs", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_negdf2_ifs, &operand_data[704], 3, 0, 4, 1 }, { "*negdf2_ifs_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[707], 3, 0, 3, 1 }, { "*negdf2_if", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[710], 2, 0, 2, 1 }, { "*negdf2_if_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[712], 2, 0, 2, 1 }, { "*negxf2_if", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[714], 2, 0, 2, 1 }, { "*negsf2_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fchs", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[716], 2, 0, 1, 1 }, { "*negdf2_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fchs", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[718], 2, 0, 1, 1 }, { "*negextendsfdf2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fchs", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[720], 2, 0, 1, 1 }, { "*negxf2_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fchs", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[722], 2, 0, 1, 1 }, { "*negextenddfxf2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fchs", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[724], 2, 0, 1, 1 }, { "*negextendsfxf2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fchs", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[726], 2, 0, 1, 1 }, { "abssf2_memory", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_abssf2_memory, &operand_data[695], 2, 0, 1, 1 }, { "abssf2_ifs", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_abssf2_ifs, &operand_data[697], 3, 0, 4, 1 }, { "*abssf2_if", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[700], 2, 0, 2, 1 }, { "absdf2_memory", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_absdf2_memory, &operand_data[702], 2, 0, 1, 1 }, { "absdf2_ifs", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_absdf2_ifs, &operand_data[728], 3, 0, 4, 1 }, { "*absdf2_ifs_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[731], 3, 0, 3, 1 }, { "*absdf2_if", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[710], 2, 0, 2, 1 }, { "*absdf2_if_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[712], 2, 0, 2, 1 }, { "*absxf2_if", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[714], 2, 0, 2, 1 }, { "*abssf2_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fabs", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[716], 2, 0, 1, 1 }, { "*absdf2_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fabs", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[718], 2, 0, 1, 1 }, { "*absextendsfdf2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fabs", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[720], 2, 0, 1, 1 }, { "*absxf2_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fabs", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[722], 2, 0, 1, 1 }, { "*absextenddfxf2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fabs", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[724], 2, 0, 1, 1 }, { "*absextendsfxf2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fabs", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[726], 2, 0, 1, 1 }, { "*one_cmpldi2_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "not{q}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[685], 2, 0, 1, 1 }, { "*one_cmpldi2_2_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[685], 2, 1, 1, 1 }, { "*one_cmplsi2_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "not{l}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[687], 2, 0, 1, 1 }, { "*one_cmplsi2_1_zext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "not{l}\t%k0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[486], 2, 0, 1, 1 }, { "*one_cmplsi2_2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[687], 2, 1, 1, 1 }, { "*one_cmplsi2_2_zext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[486], 2, 1, 1, 1 }, { "*one_cmplhi2_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "not{w}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[691], 2, 0, 1, 1 }, { "*one_cmplhi2_2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[691], 2, 1, 1, 1 }, { "*one_cmplqi2_1", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_400 }, #else { 0, output_400, 0 }, #endif 0, &operand_data[734], 2, 0, 2, 2 }, { "*one_cmplqi2_2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[693], 2, 1, 1, 1 }, { "*ashldi3_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_402 }, #else { 0, 0, output_402 }, #endif 0, &operand_data[736], 3, 0, 2, 3 }, { "*ashldi3_cmp_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_403 }, #else { 0, 0, output_403 }, #endif 0, &operand_data[739], 3, 2, 1, 3 }, { "ashldi3_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_ashldi3_1, &operand_data[742], 4, 0, 1, 1 }, { "*ashldi3_2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[742], 3, 0, 1, 1 }, { "x86_shld_1", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_406 }, #else { 0, output_406, 0 }, #endif (insn_gen_fn) gen_x86_shld_1, &operand_data[746], 3, 2, 2, 2 }, { "*ashlsi3_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_407 }, #else { 0, 0, output_407 }, #endif 0, &operand_data[749], 3, 0, 2, 3 }, { "*ashlsi3_1_zext", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_408 }, #else { 0, 0, output_408 }, #endif 0, &operand_data[752], 3, 0, 2, 3 }, { "*ashlsi3_cmp", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_409 }, #else { 0, 0, output_409 }, #endif 0, &operand_data[755], 3, 2, 1, 3 }, { "*ashlsi3_cmp_zext", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_410 }, #else { 0, 0, output_410 }, #endif 0, &operand_data[758], 3, 2, 1, 3 }, { "*ashlhi3_1_lea", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_411 }, #else { 0, 0, output_411 }, #endif 0, &operand_data[761], 3, 0, 2, 3 }, { "*ashlhi3_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_412 }, #else { 0, 0, output_412 }, #endif 0, &operand_data[764], 3, 0, 1, 3 }, { "*ashlhi3_cmp", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_413 }, #else { 0, 0, output_413 }, #endif 0, &operand_data[767], 3, 2, 1, 3 }, { "*ashlqi3_1_lea", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_414 }, #else { 0, 0, output_414 }, #endif 0, &operand_data[770], 3, 0, 3, 3 }, { "*ashlqi3_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_415 }, #else { 0, 0, output_415 }, #endif 0, &operand_data[773], 3, 0, 2, 3 }, { "*ashlqi3_cmp", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_416 }, #else { 0, 0, output_416 }, #endif 0, &operand_data[776], 3, 2, 1, 3 }, { "ashrdi3_63_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_417 }, #else { 0, output_417, 0 }, #endif (insn_gen_fn) gen_ashrdi3_63_rex64, &operand_data[779], 3, 0, 2, 2 }, { "*ashrdi3_1_one_bit_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sar{q}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[782], 3, 0, 1, 1 }, { "*ashrdi3_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_419 }, #else { 0, output_419, 0 }, #endif 0, &operand_data[785], 3, 0, 2, 2 }, { "*ashrdi3_one_bit_cmp_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sar{q}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[782], 3, 2, 1, 1 }, { "*ashrdi3_cmp_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sar{q}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[788], 3, 2, 1, 1 }, { "ashrdi3_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_ashrdi3_1, &operand_data[742], 4, 0, 1, 1 }, { "*ashrdi3_2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[742], 3, 0, 1, 1 }, { "x86_shrd_1", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_424 }, #else { 0, output_424, 0 }, #endif (insn_gen_fn) gen_x86_shrd_1, &operand_data[746], 3, 2, 2, 2 }, { "ashrsi3_31", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_425 }, #else { 0, output_425, 0 }, #endif (insn_gen_fn) gen_ashrsi3_31, &operand_data[791], 3, 0, 2, 2 }, { "*ashrsi3_31_zext", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_426 }, #else { 0, output_426, 0 }, #endif 0, &operand_data[794], 3, 0, 2, 2 }, { "*ashrsi3_1_one_bit", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sar{l}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[797], 3, 0, 1, 1 }, { "*ashrsi3_1_one_bit_zext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sar{l}\t%k0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[800], 3, 0, 1, 1 }, { "*ashrsi3_1", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_429 }, #else { 0, output_429, 0 }, #endif 0, &operand_data[803], 3, 0, 2, 2 }, { "*ashrsi3_1_zext", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_430 }, #else { 0, output_430, 0 }, #endif 0, &operand_data[806], 3, 0, 2, 2 }, { "*ashrsi3_one_bit_cmp", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sar{l}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[797], 3, 2, 1, 1 }, { "*ashrsi3_one_bit_cmp_zext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sar{l}\t%k0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[800], 3, 2, 1, 1 }, { "*ashrsi3_cmp", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sar{l}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[755], 3, 2, 1, 1 }, { "*ashrsi3_cmp_zext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sar{l}\t{%2, %k0|%k0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[758], 3, 2, 1, 1 }, { "*ashrhi3_1_one_bit", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sar{w}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[809], 3, 0, 1, 1 }, { "*ashrhi3_1", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_436 }, #else { 0, output_436, 0 }, #endif 0, &operand_data[812], 3, 0, 2, 2 }, { "*ashrhi3_one_bit_cmp", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sar{w}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[809], 3, 2, 1, 1 }, { "*ashrhi3_cmp", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sar{w}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[767], 3, 2, 1, 1 }, { "*ashrqi3_1_one_bit", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sar{b}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[815], 3, 0, 1, 1 }, { "*ashrqi3_1_one_bit_slp", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sar{b}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[818], 2, 1, 1, 1 }, { "*ashrqi3_1", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_441 }, #else { 0, output_441, 0 }, #endif 0, &operand_data[820], 3, 0, 2, 2 }, { "*ashrqi3_1_slp", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_442 }, #else { 0, output_442, 0 }, #endif 0, &operand_data[823], 2, 1, 2, 2 }, { "*ashrqi3_one_bit_cmp", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sar{b}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[825], 3, 2, 1, 1 }, { "*ashrqi3_cmp", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sar{b}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[776], 3, 2, 1, 1 }, { "*lshrdi3_1_one_bit_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "shr{q}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[782], 3, 0, 1, 1 }, { "*lshrdi3_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_446 }, #else { 0, output_446, 0 }, #endif 0, &operand_data[785], 3, 0, 2, 2 }, { "*lshrdi3_cmp_one_bit_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "shr{q}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[782], 3, 2, 1, 1 }, { "*lshrdi3_cmp_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "shr{q}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[828], 3, 2, 1, 1 }, { "lshrdi3_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_lshrdi3_1, &operand_data[742], 4, 0, 1, 1 }, { "*lshrdi3_2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[742], 3, 0, 1, 1 }, { "*lshrsi3_1_one_bit", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "shr{l}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[797], 3, 0, 1, 1 }, { "*lshrsi3_1_one_bit_zext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "shr{l}\t%k0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[800], 3, 0, 1, 1 }, { "*lshrsi3_1", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_453 }, #else { 0, output_453, 0 }, #endif 0, &operand_data[803], 3, 0, 2, 2 }, { "*lshrsi3_1_zext", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_454 }, #else { 0, output_454, 0 }, #endif 0, &operand_data[831], 3, 0, 2, 2 }, { "*lshrsi3_one_bit_cmp", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "shr{l}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[797], 3, 2, 1, 1 }, { "*lshrsi3_cmp_one_bit_zext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "shr{l}\t%k0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[800], 3, 2, 1, 1 }, { "*lshrsi3_cmp", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "shr{l}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[755], 3, 2, 1, 1 }, { "*lshrsi3_cmp_zext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "shr{l}\t{%2, %k0|%k0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[758], 3, 2, 1, 1 }, { "*lshrhi3_1_one_bit", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "shr{w}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[809], 3, 0, 1, 1 }, { "*lshrhi3_1", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_460 }, #else { 0, output_460, 0 }, #endif 0, &operand_data[812], 3, 0, 2, 2 }, { "*lshrhi3_one_bit_cmp", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "shr{w}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[809], 3, 2, 1, 1 }, { "*lshrhi3_cmp", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "shr{w}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[767], 3, 2, 1, 1 }, { "*lshrqi3_1_one_bit", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "shr{b}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[815], 3, 0, 1, 1 }, { "*lshrqi3_1_one_bit_slp", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "shr{b}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[818], 2, 1, 1, 1 }, { "*lshrqi3_1", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_465 }, #else { 0, output_465, 0 }, #endif 0, &operand_data[820], 3, 0, 2, 2 }, { "*lshrqi3_1_slp", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_466 }, #else { 0, output_466, 0 }, #endif 0, &operand_data[823], 2, 1, 2, 2 }, { "*lshrqi2_one_bit_cmp", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "shr{b}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[815], 3, 2, 1, 1 }, { "*lshrqi2_cmp", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "shr{b}\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[776], 3, 2, 1, 1 }, { "*rotlsi3_1_one_bit_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "rol{q}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[782], 3, 0, 1, 1 }, { "*rotldi3_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_470 }, #else { 0, output_470, 0 }, #endif 0, &operand_data[834], 3, 0, 2, 2 }, { "*rotlsi3_1_one_bit", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "rol{l}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[797], 3, 0, 1, 1 }, { "*rotlsi3_1_one_bit_zext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "rol{l}\t%k0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[800], 3, 0, 1, 1 }, { "*rotlsi3_1", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_473 }, #else { 0, output_473, 0 }, #endif 0, &operand_data[803], 3, 0, 2, 2 }, { "*rotlsi3_1_zext", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_474 }, #else { 0, output_474, 0 }, #endif 0, &operand_data[806], 3, 0, 2, 2 }, { "*rotlhi3_1_one_bit", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "rol{w}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[809], 3, 0, 1, 1 }, { "*rotlhi3_1", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_476 }, #else { 0, output_476, 0 }, #endif 0, &operand_data[812], 3, 0, 2, 2 }, { "*rotlqi3_1_one_bit_slp", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "rol{b}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[818], 2, 1, 1, 1 }, { "*rotlqi3_1_one_bit", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "rol{b}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[815], 3, 0, 1, 1 }, { "*rotlqi3_1_slp", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_479 }, #else { 0, output_479, 0 }, #endif 0, &operand_data[823], 2, 1, 2, 2 }, { "*rotlqi3_1", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_480 }, #else { 0, output_480, 0 }, #endif 0, &operand_data[820], 3, 0, 2, 2 }, { "*rotrdi3_1_one_bit_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "ror{q}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[782], 3, 0, 1, 1 }, { "*rotrdi3_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_482 }, #else { 0, output_482, 0 }, #endif 0, &operand_data[785], 3, 0, 2, 2 }, { "*rotrsi3_1_one_bit", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "ror{l}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[797], 3, 0, 1, 1 }, { "*rotrsi3_1_one_bit_zext", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "ror{l}\t%k0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[800], 3, 0, 1, 1 }, { "*rotrsi3_1", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_485 }, #else { 0, output_485, 0 }, #endif 0, &operand_data[803], 3, 0, 2, 2 }, { "*rotrsi3_1_zext", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_486 }, #else { 0, output_486, 0 }, #endif 0, &operand_data[806], 3, 0, 2, 2 }, { "*rotrhi3_one_bit", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "ror{w}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[809], 3, 0, 1, 1 }, { "*rotrhi3", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_488 }, #else { 0, output_488, 0 }, #endif 0, &operand_data[812], 3, 0, 2, 2 }, { "*rotrqi3_1_one_bit", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "ror{b}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[815], 3, 0, 1, 1 }, { "*rotrqi3_1_one_bit_slp", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "ror{b}\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[818], 2, 1, 1, 1 }, { "*rotrqi3_1", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_491 }, #else { 0, output_491, 0 }, #endif 0, &operand_data[820], 3, 0, 2, 2 }, { "*rotrqi3_1_slp", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_492 }, #else { 0, output_492, 0 }, #endif 0, &operand_data[823], 2, 1, 2, 2 }, { "*setcc_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "set%C1\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[837], 2, 0, 1, 1 }, { "setcc_2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "set%C1\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_setcc_2, &operand_data[839], 2, 0, 1, 1 }, { "*sse_setccsf", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cmp%D1ss\t{%3, %0|%0, %3}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[841], 4, 0, 1, 1 }, { "*sse_setccdf", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cmp%D1sd\t{%3, %0|%0, %3}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[845], 4, 0, 1, 1 }, { "*jcc_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "%+j%C1\t%l0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[849], 2, 0, 0, 1 }, { "*jcc_2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "%+j%c1\t%l0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[849], 2, 0, 0, 1 }, { "*fp_jcc_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[851], 4, 0, 1, 1 }, { "*fp_jcc_1_sse", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[855], 4, 0, 2, 1 }, { "*fp_jcc_1_sse_only", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[859], 4, 0, 1, 1 }, { "*fp_jcc_2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[851], 4, 0, 1, 1 }, { "*fp_jcc_2_sse", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[855], 4, 0, 2, 1 }, { "*fp_jcc_2_sse_only", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[859], 4, 0, 1, 1 }, { "*fp_jcc_3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[863], 5, 0, 1, 1 }, { "*fp_jcc_4", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[863], 5, 0, 1, 1 }, { "*fp_jcc_5", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[868], 5, 0, 1, 1 }, { "*fp_jcc_6", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[868], 5, 0, 1, 1 }, { "jump", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "jmp\t%l0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_jump, &operand_data[849], 1, 0, 0, 1 }, { "*indirect_jump", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "jmp\t%A0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[520], 1, 0, 1, 1 }, { "*indirect_jump_rtx64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "jmp\t%A0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[517], 1, 0, 1, 1 }, { "*tablejump_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "jmp\t%A0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[873], 2, 0, 1, 1 }, { "*tablejump_1_rtx64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "jmp\t%A0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[875], 2, 0, 1, 1 }, { "doloop_end_internal", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_514 }, #else { 0, 0, output_514 }, #endif (insn_gen_fn) gen_doloop_end_internal, &operand_data[876], 4, 1, 3, 3 }, { "*call_pop_0", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_515 }, #else { 0, 0, output_515 }, #endif 0, &operand_data[880], 3, 0, 0, 3 }, { "*call_pop_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_516 }, #else { 0, 0, output_516 }, #endif 0, &operand_data[883], 3, 0, 1, 3 }, { "*call_0", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_517 }, #else { 0, 0, output_517 }, #endif 0, &operand_data[886], 2, 0, 0, 3 }, { "*call_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_518 }, #else { 0, 0, output_518 }, #endif 0, &operand_data[888], 2, 0, 1, 3 }, { "*sibcall_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_519 }, #else { 0, 0, output_519 }, #endif 0, &operand_data[890], 2, 0, 4, 3 }, { "*call_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_520 }, #else { 0, 0, output_520 }, #endif 0, &operand_data[892], 2, 0, 1, 3 }, { "*sibcall_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "jmp\t%P0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[894], 2, 0, 0, 1 }, { "*sibcall_1_rex64_v", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "jmp\t*%%r11", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[849], 1, 0, 0, 1 }, { "blockage", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_blockage, &operand_data[849], 1, 0, 0, 1 }, { "return_internal", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "ret", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_return_internal, &operand_data[0], 0, 0, 0, 1 }, { "return_internal_long", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "rep {;} ret", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_return_internal_long, &operand_data[0], 0, 0, 0, 1 }, { "return_pop_internal", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "ret\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_return_pop_internal, &operand_data[592], 1, 0, 0, 1 }, { "return_indirect_internal", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "jmp\t%A0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_return_indirect_internal, &operand_data[372], 1, 0, 1, 1 }, { "nop", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "nop", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_nop, &operand_data[0], 0, 0, 0, 1 }, { "align", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_529 }, #else { 0, 0, output_529 }, #endif (insn_gen_fn) gen_align, &operand_data[849], 1, 0, 0, 3 }, { "set_got", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_530 }, #else { 0, 0, output_530 }, #endif (insn_gen_fn) gen_set_got, &operand_data[65], 1, 0, 1, 3 }, { "eh_return_si", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_eh_return_si, &operand_data[896], 1, 0, 1, 1 }, { "eh_return_di", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_eh_return_di, &operand_data[897], 1, 0, 1, 1 }, { "leave", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "leave", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_leave, &operand_data[0], 0, 0, 0, 1 }, { "leave_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "leave", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_leave_rex64, &operand_data[0], 0, 0, 0, 1 }, { "*ffs_cmove", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[898], 3, 0, 1, 1 }, { "*ffs_no_cmove", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[901], 3, 0, 1, 1 }, { "*ffssi_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "bsf{l}\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[898], 2, 1, 1, 1 }, { "*ffs_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[904], 3, 0, 1, 1 }, { "*ffsdi_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "bsf{q}\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[904], 2, 1, 1, 1 }, { "ctzsi2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "bsf{l}\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_ctzsi2, &operand_data[898], 2, 0, 1, 1 }, { "ctzdi2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "bsf{q}\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_ctzdi2, &operand_data[904], 2, 0, 1, 1 }, { "*bsr", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "bsr{l}\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[898], 2, 0, 1, 1 }, { "*bsr_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "bsr{q}\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[904], 2, 0, 1, 1 }, { "*tls_global_dynamic_32_gnu", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "lea{l}\t{%a2@TLSGD(,%1,1), %0|%0, %a2@TLSGD[%1*1]}\n\tcall\t%P3", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[907], 6, 0, 1, 1 }, { "*tls_global_dynamic_32_sun", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "lea{l}\t{%a2@DTLNDX(%1), %4|%4, %a2@DTLNDX[%1]}\n\ push{l}\t%4\n\tcall\t%a2@TLSPLT\n\tpop{l}\t%4\n\tnop", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[907], 6, 0, 1, 1 }, { "*tls_global_dynamic_64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif ".byte\t0x66\n\tlea{q}\t{%a1@TLSGD(%%rip), %%rdi|%%rdi, %a1@TLSGD[%%rip]}\n\t.word\t0x6666\n\trex64\n\tcall\t%P2", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[913], 4, 0, 1, 1 }, { "*tls_local_dynamic_base_32_gnu", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "lea{l}\t{%&@TLSLDM(%1), %0|%0, %&@TLSLDM[%1]}\n\tcall\t%P2", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[917], 5, 0, 1, 1 }, { "*tls_local_dynamic_base_32_sun", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "lea{l}\t{%&@TMDNX(%1), %3|%3, %&@TMDNX[%1]}\n\ push{l}\t%3\n\tcall\t%&@TLSPLT\n\tpop{l}\t%3", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[917], 5, 0, 1, 1 }, { "*tls_local_dynamic_base_64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "lea{q}\t{%&@TLSLD(%%rip), %%rdi|%%rdi, %&@TLSLD[%%rip]}\n\tcall\t%P1", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[922], 3, 0, 1, 1 }, { "*tls_local_dynamic_32_once", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[925], 6, 0, 1, 1 }, { "*load_tp_si", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "mov{l}\t{%%gs:0, %0|%0, DWORD PTR %%gs:0}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[65], 1, 0, 1, 1 }, { "*add_tp_si", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "add{l}\t{%%gs:0, %0|%0, DWORD PTR %%gs:0}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[931], 2, 0, 1, 1 }, { "*load_tp_di", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "mov{q}\t{%%fs:0, %0|%0, QWORD PTR %%fs:0}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[134], 1, 0, 1, 1 }, { "*add_tp_di", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "add{q}\t{%%fs:0, %0|%0, QWORD PTR %%fs:0}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[689], 2, 0, 1, 1 }, { "*fop_sf_comm_nosse", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_555 }, #else { 0, 0, output_555 }, #endif 0, &operand_data[933], 4, 0, 1, 3 }, { "*fop_sf_comm", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_556 }, #else { 0, 0, output_556 }, #endif 0, &operand_data[937], 4, 0, 2, 3 }, { "*fop_sf_comm_sse", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_557 }, #else { 0, 0, output_557 }, #endif 0, &operand_data[941], 4, 0, 1, 3 }, { "*fop_df_comm_nosse", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_558 }, #else { 0, 0, output_558 }, #endif 0, &operand_data[945], 4, 0, 1, 3 }, { "*fop_df_comm", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_559 }, #else { 0, 0, output_559 }, #endif 0, &operand_data[949], 4, 0, 2, 3 }, { "*fop_df_comm_sse", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_560 }, #else { 0, 0, output_560 }, #endif 0, &operand_data[953], 4, 0, 1, 3 }, { "*fop_xf_comm", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_561 }, #else { 0, 0, output_561 }, #endif 0, &operand_data[957], 4, 0, 1, 3 }, { "*fop_sf_1_nosse", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_562 }, #else { 0, 0, output_562 }, #endif 0, &operand_data[961], 4, 0, 2, 3 }, { "*fop_sf_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_563 }, #else { 0, 0, output_563 }, #endif 0, &operand_data[965], 4, 0, 3, 3 }, { "*fop_sf_1_sse", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_564 }, #else { 0, 0, output_564 }, #endif 0, &operand_data[969], 4, 0, 1, 3 }, { "*fop_sf_2", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_565 }, #else { 0, 0, output_565 }, #endif 0, &operand_data[973], 4, 0, 2, 3 }, { "*fop_sf_3", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_566 }, #else { 0, 0, output_566 }, #endif 0, &operand_data[977], 4, 0, 2, 3 }, { "*fop_df_1_nosse", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_567 }, #else { 0, 0, output_567 }, #endif 0, &operand_data[981], 4, 0, 2, 3 }, { "*fop_df_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_568 }, #else { 0, 0, output_568 }, #endif 0, &operand_data[985], 4, 0, 3, 3 }, { "*fop_df_1_sse", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_569 }, #else { 0, 0, output_569 }, #endif 0, &operand_data[989], 4, 0, 1, 3 }, { "*fop_df_2", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_570 }, #else { 0, 0, output_570 }, #endif 0, &operand_data[993], 4, 0, 2, 3 }, { "*fop_df_3", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_571 }, #else { 0, 0, output_571 }, #endif 0, &operand_data[997], 4, 0, 2, 3 }, { "*fop_df_4", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_572 }, #else { 0, 0, output_572 }, #endif 0, &operand_data[1001], 4, 0, 2, 3 }, { "*fop_df_5", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_573 }, #else { 0, 0, output_573 }, #endif 0, &operand_data[1005], 4, 0, 2, 3 }, { "*fop_df_6", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_574 }, #else { 0, 0, output_574 }, #endif 0, &operand_data[1009], 4, 0, 2, 3 }, { "*fop_xf_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_575 }, #else { 0, 0, output_575 }, #endif 0, &operand_data[1013], 4, 0, 2, 3 }, { "*fop_xf_2", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_576 }, #else { 0, 0, output_576 }, #endif 0, &operand_data[1017], 4, 0, 2, 3 }, { "*fop_xf_3", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_577 }, #else { 0, 0, output_577 }, #endif 0, &operand_data[1021], 4, 0, 2, 3 }, { "*fop_xf_4", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_578 }, #else { 0, 0, output_578 }, #endif 0, &operand_data[1025], 4, 0, 2, 3 }, { "*fop_xf_5", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_579 }, #else { 0, 0, output_579 }, #endif 0, &operand_data[1029], 4, 0, 2, 3 }, { "*fop_xf_6", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_580 }, #else { 0, 0, output_580 }, #endif 0, &operand_data[1033], 4, 0, 2, 3 }, { "sqrtsf2_1", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_581 }, #else { 0, output_581, 0 }, #endif (insn_gen_fn) gen_sqrtsf2_1, &operand_data[1037], 2, 0, 2, 2 }, { "sqrtsf2_1_sse_only", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sqrtss\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sqrtsf2_1_sse_only, &operand_data[1039], 2, 0, 1, 1 }, { "sqrtsf2_i387", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fsqrt", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sqrtsf2_i387, &operand_data[716], 2, 0, 1, 1 }, { "sqrtdf2_1", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_584 }, #else { 0, output_584, 0 }, #endif (insn_gen_fn) gen_sqrtdf2_1, &operand_data[1041], 2, 0, 2, 2 }, { "sqrtdf2_1_sse_only", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sqrtsd\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sqrtdf2_1_sse_only, &operand_data[1043], 2, 0, 1, 1 }, { "sqrtdf2_i387", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fsqrt", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sqrtdf2_i387, &operand_data[718], 2, 0, 1, 1 }, { "*sqrtextendsfdf2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fsqrt", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[720], 2, 0, 1, 1 }, { "sqrtxf2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fsqrt", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sqrtxf2, &operand_data[722], 2, 0, 1, 1 }, { "*sqrtextenddfxf2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fsqrt", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[724], 2, 0, 1, 1 }, { "*sqrtextendsfxf2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fsqrt", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[726], 2, 0, 1, 1 }, { "fpremxf4", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fprem", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_fpremxf4, &operand_data[1045], 4, 2, 1, 1 }, { "fprem1xf4", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fprem1", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_fprem1xf4, &operand_data[1045], 4, 2, 1, 1 }, { "*sindf2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fsin", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[718], 2, 0, 1, 1 }, { "*sinsf2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fsin", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[716], 2, 0, 1, 1 }, { "*sinextendsfdf2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fsin", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[720], 2, 0, 1, 1 }, { "*sinxf2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fsin", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[722], 2, 0, 1, 1 }, { "*cosdf2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fcos", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[718], 2, 0, 1, 1 }, { "*cossf2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fcos", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[716], 2, 0, 1, 1 }, { "*cosextendsfdf2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fcos", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[720], 2, 0, 1, 1 }, { "*cosxf2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fcos", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[722], 2, 0, 1, 1 }, { "sincosdf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fsincos", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sincosdf3, &operand_data[1049], 3, 1, 1, 1 }, { "sincossf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fsincos", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sincossf3, &operand_data[1052], 3, 1, 1, 1 }, { "*sincosextendsfdf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fsincos", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1055], 3, 1, 1, 1 }, { "sincosxf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fsincos", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sincosxf3, &operand_data[1045], 3, 1, 1, 1 }, { "*tandf3_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fptan", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1049], 3, 1, 1, 1 }, { "*tansf3_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fptan", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1052], 3, 1, 1, 1 }, { "*tanxf3_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fptan", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1045], 3, 1, 1, 1 }, { "atan2df3_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fpatan", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_atan2df3_1, &operand_data[1058], 4, 0, 1, 1 }, { "atan2sf3_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fpatan", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_atan2sf3_1, &operand_data[1062], 4, 0, 1, 1 }, { "atan2xf3_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fpatan", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_atan2xf3_1, &operand_data[1066], 4, 0, 1, 1 }, { "fyl2x_xf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fyl2x", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_fyl2x_xf3, &operand_data[1066], 4, 0, 1, 1 }, { "fyl2xp1_xf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fyl2xp1", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_fyl2xp1_xf3, &operand_data[1066], 4, 0, 1, 1 }, { "*fxtractxf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fxtract", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1045], 3, 1, 1, 1 }, { "*frndintxf2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "frndint", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[722], 2, 0, 1, 1 }, { "*f2xm1xf2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "f2xm1", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[722], 2, 0, 1, 1 }, { "*fscalexf4", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "fscale", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1045], 4, 2, 1, 1 }, { "cld", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cld", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_cld, &operand_data[0], 0, 0, 0, 1 }, { "*strmovdi_rex_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movsq", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1070], 4, 2, 1, 1 }, { "*strmovsi_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "{movsl|movsd}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1074], 4, 2, 1, 1 }, { "*strmovsi_rex_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "{movsl|movsd}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1070], 4, 2, 1, 1 }, { "*strmovhi_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movsw", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1074], 4, 2, 1, 1 }, { "*strmovhi_rex_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movsw", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1070], 4, 2, 1, 1 }, { "*strmovqi_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movsb", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1074], 4, 2, 1, 1 }, { "*strmovqi_rex_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movsb", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1070], 4, 2, 1, 1 }, { "*rep_movdi_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "{rep\n\tmovsq|rep movsq}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1078], 6, 4, 1, 1 }, { "*rep_movsi", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "{rep\n\tmovsl|rep movsd}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1084], 6, 4, 1, 1 }, { "*rep_movsi_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "{rep\n\tmovsl|rep movsd}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1078], 6, 4, 1, 1 }, { "*rep_movqi", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "{rep\n\tmovsb|rep movsb}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1084], 6, 4, 1, 1 }, { "*rep_movqi_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "{rep\n\tmovsb|rep movsb}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1078], 6, 4, 1, 1 }, { "*strsetdi_rex_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "stosq", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1090], 3, 1, 1, 1 }, { "*strsetsi_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "{stosl|stosd}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1093], 3, 1, 1, 1 }, { "*strsetsi_rex_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "{stosl|stosd}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1090], 3, 1, 1, 1 }, { "*strsethi_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "stosw", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1096], 3, 1, 1, 1 }, { "*strsethi_rex_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "stosw", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1099], 3, 1, 1, 1 }, { "*strsetqi_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "stosb", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1102], 3, 1, 1, 1 }, { "*strsetqi_rex_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "stosb", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1105], 3, 1, 1, 1 }, { "*rep_stosdi_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "{rep\n\tstosq|rep stosq}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1108], 5, 2, 1, 1 }, { "*rep_stossi", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "{rep\n\tstosl|rep stosd}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1113], 5, 2, 1, 1 }, { "*rep_stossi_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "{rep\n\tstosl|rep stosd}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1118], 5, 2, 1, 1 }, { "*rep_stosqi", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "{rep\n\tstosb|rep stosb}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1123], 5, 2, 1, 1 }, { "*rep_stosqi_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "{rep\n\tstosb|rep stosb}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1128], 5, 2, 1, 1 }, { "*cmpstrqi_nz_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "repz{\n\t| }cmpsb", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1133], 7, 0, 1, 1 }, { "*cmpstrqi_nz_rex_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "repz{\n\t| }cmpsb", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1140], 7, 0, 1, 1 }, { "*cmpstrqi_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "repz{\n\t| }cmpsb", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1133], 7, 0, 1, 1 }, { "*cmpstrqi_rex_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "repz{\n\t| }cmpsb", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1140], 7, 0, 1, 1 }, { "*strlenqi_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "repnz{\n\t| }scasb", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1147], 6, 0, 1, 1 }, { "*strlenqi_rex_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "repnz{\n\t| }scasb", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1153], 6, 0, 1, 1 }, { "x86_movdicc_0_m1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sbb{q}\t%0, %0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_x86_movdicc_0_m1_rex64, &operand_data[1159], 2, 0, 1, 1 }, { "movdicc_c_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_649 }, #else { 0, output_649, 0 }, #endif (insn_gen_fn) gen_movdicc_c_rex64, &operand_data[1161], 4, 0, 2, 2 }, { "x86_movsicc_0_m1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sbb{l}\t%0, %0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_x86_movsicc_0_m1, &operand_data[1165], 2, 0, 1, 1 }, { "*movsicc_noc", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_651 }, #else { 0, output_651, 0 }, #endif 0, &operand_data[1167], 4, 0, 2, 2 }, { "*movhicc_noc", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_652 }, #else { 0, output_652, 0 }, #endif 0, &operand_data[1171], 4, 0, 2, 2 }, { "*movqicc_noc", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1175], 5, 0, 2, 1 }, { "*movsfcc_1", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_654 }, #else { 0, output_654, 0 }, #endif 0, &operand_data[1180], 4, 0, 4, 2 }, { "*movdfcc_1", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_655 }, #else { 0, output_655, 0 }, #endif 0, &operand_data[1184], 4, 0, 4, 2 }, { "*movdfcc_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_656 }, #else { 0, output_656, 0 }, #endif 0, &operand_data[1188], 4, 0, 4, 2 }, { "*movxfcc_1", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_657 }, #else { 0, output_657, 0 }, #endif 0, &operand_data[1192], 4, 0, 2, 2 }, { "*minsf", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1196], 3, 2, 3, 1 }, { "*minsf_nonieee", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1199], 3, 2, 2, 1 }, { "*minsf_sse", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "minss\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[969], 3, 2, 1, 1 }, { "*mindf", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1202], 3, 2, 3, 1 }, { "*mindf_nonieee", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1205], 3, 2, 2, 1 }, { "*mindf_sse", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "minsd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[989], 3, 2, 1, 1 }, { "*maxsf", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1196], 3, 2, 3, 1 }, { "*maxsf_nonieee", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1199], 3, 2, 2, 1 }, { "*maxsf_sse", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "maxss\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[969], 3, 2, 1, 1 }, { "*maxdf", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1202], 3, 2, 3, 1 }, { "*maxdf_nonieee", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1205], 3, 2, 2, 1 }, { "*maxdf_sse", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "maxsd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[989], 3, 2, 1, 1 }, { "pro_epilogue_adjust_stack_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_670 }, #else { 0, 0, output_670 }, #endif (insn_gen_fn) gen_pro_epilogue_adjust_stack_1, &operand_data[1208], 3, 0, 2, 3 }, { "pro_epilogue_adjust_stack_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_671 }, #else { 0, 0, output_671 }, #endif (insn_gen_fn) gen_pro_epilogue_adjust_stack_rex64, &operand_data[1211], 3, 0, 2, 3 }, { "pro_epilogue_adjust_stack_rex64_2", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_672 }, #else { 0, 0, output_672 }, #endif (insn_gen_fn) gen_pro_epilogue_adjust_stack_rex64_2, &operand_data[1214], 4, 0, 2, 3 }, { "sse_movsfcc", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse_movsfcc, &operand_data[1218], 7, 0, 10, 1 }, { "sse_movsfcc_eq", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse_movsfcc_eq, &operand_data[1225], 6, 0, 6, 1 }, { "sse_movdfcc", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse_movdfcc, &operand_data[1231], 7, 0, 10, 1 }, { "sse_movdfcc_eq", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse_movdfcc_eq, &operand_data[1238], 6, 0, 6, 1 }, { "*sse_movsfcc_const0_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1244], 6, 0, 1, 1 }, { "*sse_movsfcc_const0_2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1250], 6, 0, 1, 1 }, { "*sse_movsfcc_const0_3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1256], 6, 0, 1, 1 }, { "*sse_movsfcc_const0_4", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1262], 6, 0, 1, 1 }, { "*sse_movdfcc_const0_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1268], 6, 0, 1, 1 }, { "*sse_movdfcc_const0_2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1274], 6, 0, 1, 1 }, { "*sse_movdfcc_const0_3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1280], 6, 0, 1, 1 }, { "*sse_movdfcc_const0_4", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1286], 6, 0, 1, 1 }, { "allocate_stack_worker_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "call\t__alloca", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_allocate_stack_worker_1, &operand_data[1292], 2, 1, 1, 1 }, { "allocate_stack_worker_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "call\t__alloca", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_allocate_stack_worker_rex64, &operand_data[1294], 2, 1, 1, 1 }, { "*call_value_pop_0", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_687 }, #else { 0, 0, output_687 }, #endif 0, &operand_data[1296], 4, 0, 0, 3 }, { "*call_value_pop_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_688 }, #else { 0, 0, output_688 }, #endif 0, &operand_data[1300], 4, 0, 1, 3 }, { "*call_value_0", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_689 }, #else { 0, 0, output_689 }, #endif 0, &operand_data[1296], 3, 0, 0, 3 }, { "*call_value_0_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_690 }, #else { 0, 0, output_690 }, #endif 0, &operand_data[1304], 3, 0, 0, 3 }, { "*call_value_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_691 }, #else { 0, 0, output_691 }, #endif 0, &operand_data[1300], 3, 0, 1, 3 }, { "*sibcall_value_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_692 }, #else { 0, 0, output_692 }, #endif 0, &operand_data[1307], 3, 0, 4, 3 }, { "*call_value_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_693 }, #else { 0, 0, output_693 }, #endif 0, &operand_data[1310], 3, 0, 1, 3 }, { "*sibcall_value_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "jmp\t%P1", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1313], 3, 0, 0, 1 }, { "*sibcall_value_1_rex64_v", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "jmp\t*%%r11", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1316], 2, 0, 0, 1 }, { "trap", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "int\t$5", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_trap, &operand_data[0], 0, 0, 0, 1 }, { "*conditional_trap_1", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_697 }, #else { 0, 0, output_697 }, #endif 0, &operand_data[1318], 2, 0, 0, 3 }, { "movv4sf_internal", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_698 }, #else { 0, output_698, 0 }, #endif (insn_gen_fn) gen_movv4sf_internal, &operand_data[1320], 2, 0, 3, 2 }, { "movv4si_internal", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_699 }, #else { 0, 0, output_699 }, #endif (insn_gen_fn) gen_movv4si_internal, &operand_data[1322], 2, 0, 3, 3 }, { "movv2di_internal", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_700 }, #else { 0, 0, output_700 }, #endif (insn_gen_fn) gen_movv2di_internal, &operand_data[1324], 2, 0, 3, 3 }, { "movv8qi_internal", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_701 }, #else { 0, output_701, 0 }, #endif (insn_gen_fn) gen_movv8qi_internal, &operand_data[1326], 2, 0, 3, 2 }, { "movv4hi_internal", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_702 }, #else { 0, output_702, 0 }, #endif (insn_gen_fn) gen_movv4hi_internal, &operand_data[1328], 2, 0, 3, 2 }, { "movv2si_internal", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_703 }, #else { 0, output_703, 0 }, #endif (insn_gen_fn) gen_movv2si_internal, &operand_data[1330], 2, 0, 3, 2 }, { "movv2sf_internal", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_704 }, #else { 0, output_704, 0 }, #endif (insn_gen_fn) gen_movv2sf_internal, &operand_data[1332], 2, 0, 3, 2 }, { "movv2df_internal", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_705 }, #else { 0, 0, output_705 }, #endif (insn_gen_fn) gen_movv2df_internal, &operand_data[1334], 2, 0, 3, 3 }, { "movv8hi_internal", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_706 }, #else { 0, 0, output_706 }, #endif (insn_gen_fn) gen_movv8hi_internal, &operand_data[1336], 2, 0, 3, 3 }, { "movv16qi_internal", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_707 }, #else { 0, 0, output_707 }, #endif (insn_gen_fn) gen_movv16qi_internal, &operand_data[1338], 2, 0, 3, 3 }, { "*pushti", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1340], 2, 0, 1, 1 }, { "*pushv2df", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1342], 2, 0, 1, 1 }, { "*pushv2di", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1344], 2, 0, 1, 1 }, { "*pushv8hi", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1346], 2, 0, 1, 1 }, { "*pushv16qi", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1348], 2, 0, 1, 1 }, { "*pushv4sf", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1350], 2, 0, 1, 1 }, { "*pushv4si", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1352], 2, 0, 1, 1 }, { "*pushv2si", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1354], 2, 0, 1, 1 }, { "*pushv4hi", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1356], 2, 0, 1, 1 }, { "*pushv8qi", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1358], 2, 0, 1, 1 }, { "*pushv2sf", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "#", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1360], 2, 0, 1, 1 }, { "movti_internal", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_719 }, #else { 0, 0, output_719 }, #endif (insn_gen_fn) gen_movti_internal, &operand_data[1362], 2, 0, 3, 3 }, { "*movti_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_720 }, #else { 0, 0, output_720 }, #endif 0, &operand_data[1364], 2, 0, 5, 3 }, { "*movtf_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_721 }, #else { 0, 0, output_721 }, #endif 0, &operand_data[1366], 2, 0, 5, 3 }, { "*sse_movaps_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movaps\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1368], 2, 0, 2, 1 }, { "*sse_movups_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movups\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1368], 2, 0, 2, 1 }, { "sse_movmskps", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movmskps\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse_movmskps, &operand_data[1370], 2, 0, 1, 1 }, { "mmx_pmovmskb", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pmovmskb\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mmx_pmovmskb, &operand_data[1372], 2, 0, 1, 1 }, { "mmx_maskmovq", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "maskmovq\t{%2, %1|%1, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mmx_maskmovq, &operand_data[1374], 3, 0, 1, 1 }, { "mmx_maskmovq_rex", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "maskmovq\t{%2, %1|%1, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mmx_maskmovq_rex, &operand_data[1377], 3, 0, 1, 1 }, { "sse_movntv4sf", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movntps\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse_movntv4sf, &operand_data[1380], 2, 0, 1, 1 }, { "sse_movntdi", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movntq\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse_movntdi, &operand_data[1382], 2, 0, 1, 1 }, { "sse_movhlps", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movhlps\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse_movhlps, &operand_data[1384], 3, 0, 1, 1 }, { "sse_movlhps", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movlhps\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse_movlhps, &operand_data[1384], 3, 0, 1, 1 }, { "sse_movhps", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movhps\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse_movhps, &operand_data[1387], 3, 0, 2, 1 }, { "sse_movlps", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movlps\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse_movlps, &operand_data[1387], 3, 0, 2, 1 }, { "sse_loadss_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movss\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse_loadss_1, &operand_data[1390], 3, 0, 1, 1 }, { "sse_movss", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movss\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse_movss, &operand_data[1384], 3, 0, 1, 1 }, { "sse_storess", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movss\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse_storess, &operand_data[1393], 2, 0, 1, 1 }, { "sse_shufps", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "shufps\t{%3, %2, %0|%0, %2, %3}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse_shufps, &operand_data[1395], 4, 0, 1, 1 }, { "addv4sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "addps\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_addv4sf3, &operand_data[1395], 3, 0, 1, 1 }, { "vmaddv4sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "addss\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_vmaddv4sf3, &operand_data[1395], 3, 1, 1, 1 }, { "subv4sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "subps\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_subv4sf3, &operand_data[1395], 3, 0, 1, 1 }, { "vmsubv4sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "subss\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_vmsubv4sf3, &operand_data[1395], 3, 1, 1, 1 }, { "mulv4sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "mulps\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mulv4sf3, &operand_data[1395], 3, 0, 1, 1 }, { "vmmulv4sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "mulss\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_vmmulv4sf3, &operand_data[1395], 3, 1, 1, 1 }, { "divv4sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "divps\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_divv4sf3, &operand_data[1395], 3, 0, 1, 1 }, { "vmdivv4sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "divss\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_vmdivv4sf3, &operand_data[1395], 3, 1, 1, 1 }, { "rcpv4sf2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "rcpps\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_rcpv4sf2, &operand_data[1399], 2, 0, 1, 1 }, { "vmrcpv4sf2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "rcpss\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_vmrcpv4sf2, &operand_data[1399], 3, 0, 1, 1 }, { "rsqrtv4sf2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "rsqrtps\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_rsqrtv4sf2, &operand_data[1399], 2, 0, 1, 1 }, { "vmrsqrtv4sf2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "rsqrtss\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_vmrsqrtv4sf2, &operand_data[1399], 3, 0, 1, 1 }, { "sqrtv4sf2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sqrtps\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sqrtv4sf2, &operand_data[1399], 2, 0, 1, 1 }, { "vmsqrtv4sf2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sqrtss\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_vmsqrtv4sf2, &operand_data[1399], 3, 0, 1, 1 }, { "*sse_andv4sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "andps\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1402], 3, 0, 1, 1 }, { "*sse_nandv4sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "andnps\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1395], 3, 0, 1, 1 }, { "*sse_iorv4sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "orps\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1402], 3, 0, 1, 1 }, { "*sse_xorv4sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xorps\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1402], 3, 0, 1, 1 }, { "*sse2_andv2df3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "andpd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1405], 3, 0, 1, 1 }, { "*sse2_nandv2df3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "andnpd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1408], 3, 0, 1, 1 }, { "*sse2_iorv2df3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "orpd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1405], 3, 0, 1, 1 }, { "*sse2_xorv2df3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xorpd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1405], 3, 0, 1, 1 }, { "*sse2_andti3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pand\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1411], 3, 0, 1, 1 }, { "sse2_andv2di3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pand\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_andv2di3, &operand_data[1414], 3, 0, 1, 1 }, { "*sse2_nandti3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pandn\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1417], 3, 0, 1, 1 }, { "sse2_nandv2di3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pandn\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_nandv2di3, &operand_data[1420], 3, 0, 1, 1 }, { "*sse2_iorti3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "por\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1411], 3, 0, 1, 1 }, { "sse2_iorv2di3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "por\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_iorv2di3, &operand_data[1414], 3, 0, 1, 1 }, { "*sse2_xorti3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pxor\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1411], 3, 0, 1, 1 }, { "sse2_xorv2di3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pxor\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_xorv2di3, &operand_data[1414], 3, 0, 1, 1 }, { "sse_clrv4sf", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_768 }, #else { 0, 0, output_768 }, #endif (insn_gen_fn) gen_sse_clrv4sf, &operand_data[1423], 2, 0, 1, 3 }, { "sse_clrv2df", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "xorpd\t{%0, %0|%0, %0}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse_clrv2df, &operand_data[1405], 1, 0, 1, 1 }, { "maskcmpv4sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cmp%D3ps\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_maskcmpv4sf3, &operand_data[1425], 4, 0, 1, 1 }, { "maskncmpv4sf3", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_771 }, #else { 0, 0, output_771 }, #endif (insn_gen_fn) gen_maskncmpv4sf3, &operand_data[1425], 4, 0, 1, 3 }, { "vmmaskcmpv4sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cmp%D3ss\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_vmmaskcmpv4sf3, &operand_data[1425], 4, 1, 1, 1 }, { "vmmaskncmpv4sf3", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_773 }, #else { 0, 0, output_773 }, #endif (insn_gen_fn) gen_vmmaskncmpv4sf3, &operand_data[1425], 4, 1, 1, 3 }, { "sse_comi", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "comiss\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse_comi, &operand_data[1429], 2, 0, 1, 1 }, { "sse_ucomi", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "ucomiss\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse_ucomi, &operand_data[1429], 2, 0, 1, 1 }, { "sse_unpckhps", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "unpckhps\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse_unpckhps, &operand_data[1384], 3, 0, 1, 1 }, { "sse_unpcklps", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "unpcklps\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse_unpcklps, &operand_data[1384], 3, 0, 1, 1 }, { "smaxv4sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "maxps\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_smaxv4sf3, &operand_data[1395], 3, 0, 1, 1 }, { "vmsmaxv4sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "maxss\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_vmsmaxv4sf3, &operand_data[1395], 3, 1, 1, 1 }, { "sminv4sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "minps\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sminv4sf3, &operand_data[1395], 3, 0, 1, 1 }, { "vmsminv4sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "minss\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_vmsminv4sf3, &operand_data[1395], 3, 1, 1, 1 }, { "cvtpi2ps", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvtpi2ps\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_cvtpi2ps, &operand_data[1431], 3, 0, 1, 1 }, { "cvtps2pi", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvtps2pi\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_cvtps2pi, &operand_data[1434], 2, 0, 1, 1 }, { "cvttps2pi", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvttps2pi\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_cvttps2pi, &operand_data[1434], 2, 0, 1, 1 }, { "cvtsi2ss", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvtsi2ss\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_cvtsi2ss, &operand_data[1436], 3, 0, 2, 1 }, { "cvtsi2ssq", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvtsi2ssq\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_cvtsi2ssq, &operand_data[1439], 3, 0, 2, 1 }, { "cvtss2si", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvtss2si\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_cvtss2si, &operand_data[1442], 2, 0, 2, 1 }, { "cvtss2siq", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvtss2siq\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_cvtss2siq, &operand_data[1444], 2, 0, 2, 1 }, { "cvttss2si", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvttss2si\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_cvttss2si, &operand_data[1446], 2, 0, 2, 1 }, { "cvttss2siq", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvttss2siq\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_cvttss2siq, &operand_data[1448], 2, 0, 2, 1 }, { "addv8qi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "paddb\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_addv8qi3, &operand_data[1450], 3, 0, 1, 1 }, { "addv4hi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "paddw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_addv4hi3, &operand_data[1453], 3, 0, 1, 1 }, { "addv2si3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "paddd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_addv2si3, &operand_data[1456], 3, 0, 1, 1 }, { "mmx_adddi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "paddq\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mmx_adddi3, &operand_data[1459], 3, 0, 1, 1 }, { "ssaddv8qi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "paddsb\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_ssaddv8qi3, &operand_data[1450], 3, 0, 1, 1 }, { "ssaddv4hi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "paddsw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_ssaddv4hi3, &operand_data[1453], 3, 0, 1, 1 }, { "usaddv8qi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "paddusb\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_usaddv8qi3, &operand_data[1450], 3, 0, 1, 1 }, { "usaddv4hi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "paddusw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_usaddv4hi3, &operand_data[1453], 3, 0, 1, 1 }, { "subv8qi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psubb\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_subv8qi3, &operand_data[1462], 3, 0, 1, 1 }, { "subv4hi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psubw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_subv4hi3, &operand_data[1465], 3, 0, 1, 1 }, { "subv2si3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psubd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_subv2si3, &operand_data[1468], 3, 0, 1, 1 }, { "mmx_subdi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psubq\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mmx_subdi3, &operand_data[1471], 3, 0, 1, 1 }, { "sssubv8qi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psubsb\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sssubv8qi3, &operand_data[1462], 3, 0, 1, 1 }, { "sssubv4hi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psubsw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sssubv4hi3, &operand_data[1465], 3, 0, 1, 1 }, { "ussubv8qi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psubusb\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_ussubv8qi3, &operand_data[1462], 3, 0, 1, 1 }, { "ussubv4hi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psubusw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_ussubv4hi3, &operand_data[1465], 3, 0, 1, 1 }, { "mulv4hi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pmullw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mulv4hi3, &operand_data[1465], 3, 0, 1, 1 }, { "smulv4hi3_highpart", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pmulhw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_smulv4hi3_highpart, &operand_data[1465], 3, 0, 1, 1 }, { "umulv4hi3_highpart", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pmulhuw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_umulv4hi3_highpart, &operand_data[1465], 3, 0, 1, 1 }, { "mmx_pmaddwd", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pmaddwd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mmx_pmaddwd, &operand_data[1474], 3, 2, 1, 1 }, { "mmx_iordi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "por\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mmx_iordi3, &operand_data[1459], 3, 0, 1, 1 }, { "mmx_xordi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pxor\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mmx_xordi3, &operand_data[1459], 3, 0, 1, 1 }, { "mmx_clrdi", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pxor\t{%0, %0|%0, %0}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mmx_clrdi, &operand_data[1459], 1, 0, 1, 1 }, { "mmx_anddi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pand\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mmx_anddi3, &operand_data[1459], 3, 0, 1, 1 }, { "mmx_nanddi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pandn\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mmx_nanddi3, &operand_data[1471], 3, 0, 1, 1 }, { "mmx_uavgv8qi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pavgb\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mmx_uavgv8qi3, &operand_data[1462], 3, 0, 1, 1 }, { "mmx_uavgv4hi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pavgw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mmx_uavgv4hi3, &operand_data[1465], 3, 0, 1, 1 }, { "mmx_psadbw", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psadbw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mmx_psadbw, &operand_data[1477], 3, 0, 1, 1 }, { "mmx_pinsrw", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pinsrw\t{%3, %2, %0|%0, %2, %3}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mmx_pinsrw, &operand_data[1480], 4, 0, 1, 1 }, { "mmx_pextrw", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pextrw\t{%2, %1, %0|%0, %1, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mmx_pextrw, &operand_data[1484], 3, 0, 1, 1 }, { "mmx_pshufw", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pshufw\t{%2, %1, %0|%0, %1, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mmx_pshufw, &operand_data[1487], 3, 0, 1, 1 }, { "eqv8qi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pcmpeqb\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_eqv8qi3, &operand_data[1462], 3, 0, 1, 1 }, { "eqv4hi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pcmpeqw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_eqv4hi3, &operand_data[1465], 3, 0, 1, 1 }, { "eqv2si3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pcmpeqd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_eqv2si3, &operand_data[1468], 3, 0, 1, 1 }, { "gtv8qi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pcmpgtb\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_gtv8qi3, &operand_data[1462], 3, 0, 1, 1 }, { "gtv4hi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pcmpgtw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_gtv4hi3, &operand_data[1465], 3, 0, 1, 1 }, { "gtv2si3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pcmpgtd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_gtv2si3, &operand_data[1468], 3, 0, 1, 1 }, { "umaxv8qi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pmaxub\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_umaxv8qi3, &operand_data[1462], 3, 0, 1, 1 }, { "smaxv4hi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pmaxsw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_smaxv4hi3, &operand_data[1465], 3, 0, 1, 1 }, { "uminv8qi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pminub\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_uminv8qi3, &operand_data[1462], 3, 0, 1, 1 }, { "sminv4hi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pminsw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sminv4hi3, &operand_data[1465], 3, 0, 1, 1 }, { "ashrv4hi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psraw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_ashrv4hi3, &operand_data[1490], 3, 0, 1, 1 }, { "ashrv2si3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psrad\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_ashrv2si3, &operand_data[1493], 3, 0, 1, 1 }, { "lshrv4hi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psrlw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_lshrv4hi3, &operand_data[1490], 3, 0, 1, 1 }, { "lshrv2si3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psrld\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_lshrv2si3, &operand_data[1493], 3, 0, 1, 1 }, { "mmx_lshrdi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psrlq\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mmx_lshrdi3, &operand_data[1496], 3, 0, 1, 1 }, { "ashlv4hi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psllw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_ashlv4hi3, &operand_data[1490], 3, 0, 1, 1 }, { "ashlv2si3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pslld\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_ashlv2si3, &operand_data[1493], 3, 0, 1, 1 }, { "mmx_ashldi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psllq\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mmx_ashldi3, &operand_data[1496], 3, 0, 1, 1 }, { "mmx_packsswb", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "packsswb\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mmx_packsswb, &operand_data[1499], 3, 0, 1, 1 }, { "mmx_packssdw", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "packssdw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mmx_packssdw, &operand_data[1502], 3, 0, 1, 1 }, { "mmx_packuswb", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "packuswb\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mmx_packuswb, &operand_data[1499], 3, 0, 1, 1 }, { "mmx_punpckhbw", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "punpckhbw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mmx_punpckhbw, &operand_data[1505], 3, 0, 1, 1 }, { "mmx_punpckhwd", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "punpckhwd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mmx_punpckhwd, &operand_data[1508], 3, 0, 1, 1 }, { "mmx_punpckhdq", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "punpckhdq\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mmx_punpckhdq, &operand_data[1511], 3, 0, 1, 1 }, { "mmx_punpcklbw", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "punpcklbw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mmx_punpcklbw, &operand_data[1505], 3, 0, 1, 1 }, { "mmx_punpcklwd", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "punpcklwd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mmx_punpcklwd, &operand_data[1508], 3, 0, 1, 1 }, { "mmx_punpckldq", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "punpckldq\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mmx_punpckldq, &operand_data[1511], 3, 0, 1, 1 }, { "emms", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "emms", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_emms, &operand_data[0], 0, 0, 0, 1 }, { "ldmxcsr", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "ldmxcsr\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_ldmxcsr, &operand_data[1514], 1, 0, 1, 1 }, { "stmxcsr", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "stmxcsr\t%0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_stmxcsr, &operand_data[290], 1, 0, 1, 1 }, { "*sfence_insn", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sfence", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1515], 1, 1, 0, 1 }, { "*sse_prologue_save_insn", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_853 }, #else { 0, 0, output_853 }, #endif 0, &operand_data[1516], 5, 0, 1, 3 }, { "addv2sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pfadd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_addv2sf3, &operand_data[1521], 3, 0, 1, 1 }, { "subv2sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pfsub\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_subv2sf3, &operand_data[1521], 3, 0, 1, 1 }, { "subrv2sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pfsubr\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_subrv2sf3, &operand_data[1521], 3, 0, 1, 1 }, { "gtv2sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pfcmpgt\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_gtv2sf3, &operand_data[1524], 3, 0, 1, 1 }, { "gev2sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pfcmpge\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_gev2sf3, &operand_data[1524], 3, 0, 1, 1 }, { "eqv2sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pfcmpeq\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_eqv2sf3, &operand_data[1524], 3, 0, 1, 1 }, { "pfmaxv2sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pfmax\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_pfmaxv2sf3, &operand_data[1521], 3, 0, 1, 1 }, { "pfminv2sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pfmin\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_pfminv2sf3, &operand_data[1521], 3, 0, 1, 1 }, { "mulv2sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pfmul\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mulv2sf3, &operand_data[1521], 3, 0, 1, 1 }, { "femms", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "femms", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_femms, &operand_data[0], 0, 0, 0, 1 }, { "pf2id", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pf2id\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_pf2id, &operand_data[1527], 2, 0, 1, 1 }, { "pf2iw", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pf2iw\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_pf2iw, &operand_data[1527], 2, 0, 1, 1 }, { "pfacc", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pfacc\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_pfacc, &operand_data[1529], 3, 2, 1, 1 }, { "pfnacc", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pfnacc\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_pfnacc, &operand_data[1529], 3, 2, 1, 1 }, { "pfpnacc", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pfpnacc\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_pfpnacc, &operand_data[1529], 3, 2, 1, 1 }, { "pi2fw", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pi2fw\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_pi2fw, &operand_data[1532], 2, 1, 1, 1 }, { "floatv2si2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pi2fd\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_floatv2si2, &operand_data[1532], 2, 0, 1, 1 }, { "pavgusb", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pavgusb\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_pavgusb, &operand_data[1462], 3, 0, 1, 1 }, { "pfrcpv2sf2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pfrcp\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_pfrcpv2sf2, &operand_data[1534], 2, 0, 1, 1 }, { "pfrcpit1v2sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pfrcpit1\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_pfrcpit1v2sf3, &operand_data[1521], 3, 0, 1, 1 }, { "pfrcpit2v2sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pfrcpit2\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_pfrcpit2v2sf3, &operand_data[1521], 3, 0, 1, 1 }, { "pfrsqrtv2sf2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pfrsqrt\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_pfrsqrtv2sf2, &operand_data[1534], 2, 0, 1, 1 }, { "pfrsqit1v2sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pfrsqit1\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_pfrsqit1v2sf3, &operand_data[1521], 3, 0, 1, 1 }, { "pmulhrwv4hi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pmulhrw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_pmulhrwv4hi3, &operand_data[1465], 3, 0, 1, 1 }, { "pswapdv2si2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pswapd\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_pswapdv2si2, &operand_data[1536], 2, 0, 1, 1 }, { "pswapdv2sf2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pswapd\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_pswapdv2sf2, &operand_data[1534], 2, 0, 1, 1 }, { "*prefetch_sse", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_880 }, #else { 0, 0, output_880 }, #endif 0, &operand_data[1538], 2, 0, 1, 3 }, { "*prefetch_sse_rex", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_881 }, #else { 0, 0, output_881 }, #endif 0, &operand_data[1540], 2, 0, 1, 3 }, { "*prefetch_3dnow", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_882 }, #else { 0, 0, output_882 }, #endif 0, &operand_data[1542], 2, 0, 1, 3 }, { "*prefetch_3dnow_rex", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_883 }, #else { 0, 0, output_883 }, #endif 0, &operand_data[1544], 2, 0, 1, 3 }, { "addv2df3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "addpd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_addv2df3, &operand_data[1408], 3, 0, 1, 1 }, { "vmaddv2df3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "addsd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_vmaddv2df3, &operand_data[1408], 3, 1, 1, 1 }, { "subv2df3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "subpd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_subv2df3, &operand_data[1408], 3, 0, 1, 1 }, { "vmsubv2df3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "subsd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_vmsubv2df3, &operand_data[1408], 3, 1, 1, 1 }, { "mulv2df3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "mulpd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mulv2df3, &operand_data[1408], 3, 0, 1, 1 }, { "vmmulv2df3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "mulsd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_vmmulv2df3, &operand_data[1408], 3, 1, 1, 1 }, { "divv2df3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "divpd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_divv2df3, &operand_data[1408], 3, 0, 1, 1 }, { "vmdivv2df3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "divsd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_vmdivv2df3, &operand_data[1408], 3, 1, 1, 1 }, { "smaxv2df3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "maxpd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_smaxv2df3, &operand_data[1408], 3, 0, 1, 1 }, { "vmsmaxv2df3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "maxsd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_vmsmaxv2df3, &operand_data[1408], 3, 1, 1, 1 }, { "sminv2df3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "minpd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sminv2df3, &operand_data[1408], 3, 0, 1, 1 }, { "vmsminv2df3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "minsd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_vmsminv2df3, &operand_data[1408], 3, 1, 1, 1 }, { "sqrtv2df2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sqrtpd\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sqrtv2df2, &operand_data[1546], 2, 0, 1, 1 }, { "vmsqrtv2df2", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "sqrtsd\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_vmsqrtv2df2, &operand_data[1546], 3, 0, 1, 1 }, { "maskcmpv2df3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cmp%D3pd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_maskcmpv2df3, &operand_data[1549], 4, 0, 1, 1 }, { "maskncmpv2df3", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_899 }, #else { 0, 0, output_899 }, #endif (insn_gen_fn) gen_maskncmpv2df3, &operand_data[1549], 4, 0, 1, 3 }, { "vmmaskcmpv2df3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cmp%D3sd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_vmmaskcmpv2df3, &operand_data[1549], 4, 1, 1, 1 }, { "vmmaskncmpv2df3", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_901 }, #else { 0, 0, output_901 }, #endif (insn_gen_fn) gen_vmmaskncmpv2df3, &operand_data[1549], 4, 1, 1, 3 }, { "sse2_comi", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "comisd\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_comi, &operand_data[1553], 2, 0, 1, 1 }, { "sse2_ucomi", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "ucomisd\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_ucomi, &operand_data[1553], 2, 0, 1, 1 }, { "sse2_movmskpd", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movmskpd\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_movmskpd, &operand_data[1555], 2, 0, 1, 1 }, { "sse2_pmovmskb", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pmovmskb\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_pmovmskb, &operand_data[1557], 2, 0, 1, 1 }, { "sse2_maskmovdqu", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "maskmovdqu\t{%2, %1|%1, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_maskmovdqu, &operand_data[1559], 3, 0, 1, 1 }, { "sse2_maskmovdqu_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "maskmovdqu\t{%2, %1|%1, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_maskmovdqu_rex64, &operand_data[1562], 3, 0, 1, 1 }, { "sse2_movntv2df", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movntpd\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_movntv2df, &operand_data[1565], 2, 0, 1, 1 }, { "sse2_movntv2di", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movntdq\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_movntv2di, &operand_data[1567], 2, 0, 1, 1 }, { "sse2_movntsi", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movnti\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_movntsi, &operand_data[1569], 2, 0, 1, 1 }, { "cvtdq2ps", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvtdq2ps\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_cvtdq2ps, &operand_data[1571], 2, 0, 1, 1 }, { "cvtps2dq", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvtps2dq\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_cvtps2dq, &operand_data[1573], 2, 0, 1, 1 }, { "cvttps2dq", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvttps2dq\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_cvttps2dq, &operand_data[1573], 2, 0, 1, 1 }, { "cvtdq2pd", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvtdq2pd\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_cvtdq2pd, &operand_data[1575], 2, 0, 1, 1 }, { "cvtpd2dq", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvtpd2dq\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_cvtpd2dq, &operand_data[1577], 2, 0, 1, 1 }, { "cvttpd2dq", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvttpd2dq\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_cvttpd2dq, &operand_data[1577], 2, 0, 1, 1 }, { "cvtpd2pi", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvtpd2pi\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_cvtpd2pi, &operand_data[1579], 2, 0, 1, 1 }, { "cvttpd2pi", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvttpd2pi\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_cvttpd2pi, &operand_data[1579], 2, 0, 1, 1 }, { "cvtpi2pd", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvtpi2pd\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_cvtpi2pd, &operand_data[1581], 2, 0, 1, 1 }, { "cvtsd2si", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvtsd2si\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_cvtsd2si, &operand_data[1583], 2, 0, 2, 1 }, { "cvtsd2siq", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvtsd2siq\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_cvtsd2siq, &operand_data[1585], 2, 0, 2, 1 }, { "cvttsd2si", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvttsd2si\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_cvttsd2si, &operand_data[1587], 2, 0, 2, 1 }, { "cvttsd2siq", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvttsd2siq\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_cvttsd2siq, &operand_data[1589], 2, 0, 2, 1 }, { "cvtsi2sd", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvtsi2sd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_cvtsi2sd, &operand_data[1591], 3, 0, 2, 1 }, { "cvtsi2sdq", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvtsi2sdq\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_cvtsi2sdq, &operand_data[1594], 3, 0, 2, 1 }, { "cvtsd2ss", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvtsd2ss\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_cvtsd2ss, &operand_data[1597], 3, 0, 2, 1 }, { "cvtss2sd", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvtss2sd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_cvtss2sd, &operand_data[1600], 3, 0, 1, 1 }, { "cvtpd2ps", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvtpd2ps\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_cvtpd2ps, &operand_data[1603], 2, 0, 1, 1 }, { "cvtps2pd", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "cvtps2pd\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_cvtps2pd, &operand_data[1605], 2, 0, 1, 1 }, { "addv16qi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "paddb\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_addv16qi3, &operand_data[1607], 3, 0, 1, 1 }, { "addv8hi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "paddw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_addv8hi3, &operand_data[1610], 3, 0, 1, 1 }, { "addv4si3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "paddd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_addv4si3, &operand_data[1613], 3, 0, 1, 1 }, { "addv2di3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "paddq\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_addv2di3, &operand_data[1616], 3, 0, 1, 1 }, { "ssaddv16qi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "paddsb\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_ssaddv16qi3, &operand_data[1607], 3, 0, 1, 1 }, { "ssaddv8hi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "paddsw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_ssaddv8hi3, &operand_data[1610], 3, 0, 1, 1 }, { "usaddv16qi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "paddusb\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_usaddv16qi3, &operand_data[1607], 3, 0, 1, 1 }, { "usaddv8hi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "paddusw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_usaddv8hi3, &operand_data[1610], 3, 0, 1, 1 }, { "subv16qi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psubb\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_subv16qi3, &operand_data[1619], 3, 0, 1, 1 }, { "subv8hi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psubw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_subv8hi3, &operand_data[1622], 3, 0, 1, 1 }, { "subv4si3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psubd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_subv4si3, &operand_data[1625], 3, 0, 1, 1 }, { "subv2di3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psubq\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_subv2di3, &operand_data[1420], 3, 0, 1, 1 }, { "sssubv16qi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psubsb\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sssubv16qi3, &operand_data[1619], 3, 0, 1, 1 }, { "sssubv8hi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psubsw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sssubv8hi3, &operand_data[1622], 3, 0, 1, 1 }, { "ussubv16qi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psubusb\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_ussubv16qi3, &operand_data[1619], 3, 0, 1, 1 }, { "ussubv8hi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psubusw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_ussubv8hi3, &operand_data[1622], 3, 0, 1, 1 }, { "mulv8hi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pmullw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mulv8hi3, &operand_data[1622], 3, 0, 1, 1 }, { "smulv8hi3_highpart", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pmulhw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_smulv8hi3_highpart, &operand_data[1622], 3, 0, 1, 1 }, { "umulv8hi3_highpart", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pmulhuw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_umulv8hi3_highpart, &operand_data[1622], 3, 0, 1, 1 }, { "sse2_umulsidi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pmuludq\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_umulsidi3, &operand_data[1628], 3, 0, 1, 1 }, { "sse2_umulv2siv2di3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pmuludq\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_umulv2siv2di3, &operand_data[1631], 3, 0, 1, 1 }, { "sse2_pmaddwd", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pmaddwd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_pmaddwd, &operand_data[1634], 3, 2, 1, 1 }, { "sse2_clrti", #if HAVE_DESIGNATED_INITIALIZERS { .function = output_952 }, #else { 0, 0, output_952 }, #endif (insn_gen_fn) gen_sse2_clrti, &operand_data[1411], 1, 0, 1, 3 }, { "sse2_uavgv16qi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pavgb\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_uavgv16qi3, &operand_data[1619], 3, 0, 1, 1 }, { "sse2_uavgv8hi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pavgw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_uavgv8hi3, &operand_data[1622], 3, 0, 1, 1 }, { "sse2_psadbw", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psadbw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_psadbw, &operand_data[1637], 3, 0, 1, 1 }, { "sse2_pinsrw", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pinsrw\t{%3, %2, %0|%0, %2, %3}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_pinsrw, &operand_data[1640], 4, 0, 1, 1 }, { "sse2_pextrw", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pextrw\t{%2, %1, %0|%0, %1, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_pextrw, &operand_data[1644], 3, 0, 1, 1 }, { "sse2_pshufd", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pshufd\t{%2, %1, %0|%0, %1, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_pshufd, &operand_data[1647], 3, 0, 1, 1 }, { "sse2_pshuflw", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pshuflw\t{%2, %1, %0|%0, %1, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_pshuflw, &operand_data[1650], 3, 0, 1, 1 }, { "sse2_pshufhw", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pshufhw\t{%2, %1, %0|%0, %1, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_pshufhw, &operand_data[1650], 3, 0, 1, 1 }, { "eqv16qi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pcmpeqb\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_eqv16qi3, &operand_data[1619], 3, 0, 1, 1 }, { "eqv8hi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pcmpeqw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_eqv8hi3, &operand_data[1622], 3, 0, 1, 1 }, { "eqv4si3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pcmpeqd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_eqv4si3, &operand_data[1625], 3, 0, 1, 1 }, { "gtv16qi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pcmpgtb\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_gtv16qi3, &operand_data[1619], 3, 0, 1, 1 }, { "gtv8hi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pcmpgtw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_gtv8hi3, &operand_data[1622], 3, 0, 1, 1 }, { "gtv4si3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pcmpgtd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_gtv4si3, &operand_data[1625], 3, 0, 1, 1 }, { "umaxv16qi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pmaxub\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_umaxv16qi3, &operand_data[1619], 3, 0, 1, 1 }, { "smaxv8hi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pmaxsw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_smaxv8hi3, &operand_data[1622], 3, 0, 1, 1 }, { "uminv16qi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pminub\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_uminv16qi3, &operand_data[1619], 3, 0, 1, 1 }, { "sminv8hi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pminsw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sminv8hi3, &operand_data[1622], 3, 0, 1, 1 }, { "ashrv8hi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psraw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_ashrv8hi3, &operand_data[1653], 3, 0, 1, 1 }, { "ashrv4si3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psrad\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_ashrv4si3, &operand_data[1656], 3, 0, 1, 1 }, { "lshrv8hi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psrlw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_lshrv8hi3, &operand_data[1653], 3, 0, 1, 1 }, { "lshrv4si3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psrld\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_lshrv4si3, &operand_data[1656], 3, 0, 1, 1 }, { "lshrv2di3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psrlq\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_lshrv2di3, &operand_data[1659], 3, 0, 1, 1 }, { "ashlv8hi3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psllw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_ashlv8hi3, &operand_data[1653], 3, 0, 1, 1 }, { "ashlv4si3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pslld\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_ashlv4si3, &operand_data[1656], 3, 0, 1, 1 }, { "ashlv2di3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psllq\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_ashlv2di3, &operand_data[1659], 3, 0, 1, 1 }, { "ashrv8hi3_ti", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psraw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_ashrv8hi3_ti, &operand_data[1662], 3, 0, 1, 1 }, { "ashrv4si3_ti", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psrad\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_ashrv4si3_ti, &operand_data[1665], 3, 0, 1, 1 }, { "lshrv8hi3_ti", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psrlw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_lshrv8hi3_ti, &operand_data[1662], 3, 0, 1, 1 }, { "lshrv4si3_ti", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psrld\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_lshrv4si3_ti, &operand_data[1665], 3, 0, 1, 1 }, { "lshrv2di3_ti", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psrlq\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_lshrv2di3_ti, &operand_data[1668], 3, 0, 1, 1 }, { "ashlv8hi3_ti", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psllw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_ashlv8hi3_ti, &operand_data[1662], 3, 0, 1, 1 }, { "ashlv4si3_ti", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pslld\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_ashlv4si3_ti, &operand_data[1665], 3, 0, 1, 1 }, { "ashlv2di3_ti", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psllq\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_ashlv2di3_ti, &operand_data[1668], 3, 0, 1, 1 }, { "sse2_ashlti3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "pslldq\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_ashlti3, &operand_data[1671], 3, 0, 1, 1 }, { "sse2_lshrti3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "psrldq\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_lshrti3, &operand_data[1671], 3, 0, 1, 1 }, { "sse2_unpckhpd", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "unpckhpd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_unpckhpd, &operand_data[1674], 3, 0, 1, 1 }, { "sse2_unpcklpd", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "unpcklpd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_unpcklpd, &operand_data[1674], 3, 0, 1, 1 }, { "sse2_packsswb", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "packsswb\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_packsswb, &operand_data[1677], 3, 0, 1, 1 }, { "sse2_packssdw", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "packssdw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_packssdw, &operand_data[1680], 3, 0, 1, 1 }, { "sse2_packuswb", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "packuswb\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_packuswb, &operand_data[1677], 3, 0, 1, 1 }, { "sse2_punpckhbw", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "punpckhbw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_punpckhbw, &operand_data[1683], 3, 0, 1, 1 }, { "sse2_punpckhwd", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "punpckhwd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_punpckhwd, &operand_data[1686], 3, 0, 1, 1 }, { "sse2_punpckhdq", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "punpckhdq\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_punpckhdq, &operand_data[1689], 3, 0, 1, 1 }, { "sse2_punpcklbw", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "punpcklbw\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_punpcklbw, &operand_data[1683], 3, 0, 1, 1 }, { "sse2_punpcklwd", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "punpcklwd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_punpcklwd, &operand_data[1686], 3, 0, 1, 1 }, { "sse2_punpckldq", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "punpckldq\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_punpckldq, &operand_data[1689], 3, 0, 1, 1 }, { "sse2_punpcklqdq", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "punpcklqdq\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_punpcklqdq, &operand_data[1692], 3, 0, 1, 1 }, { "sse2_punpckhqdq", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "punpckhqdq\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_punpckhqdq, &operand_data[1692], 3, 0, 1, 1 }, { "sse2_movapd", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movapd\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_movapd, &operand_data[1695], 2, 0, 2, 1 }, { "sse2_movupd", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movupd\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_movupd, &operand_data[1695], 2, 0, 2, 1 }, { "sse2_movdqa", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movdqa\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_movdqa, &operand_data[1697], 2, 0, 2, 1 }, { "sse2_movdqu", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movdqu\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_movdqu, &operand_data[1697], 2, 0, 2, 1 }, { "sse2_movdq2q", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_1006 }, #else { 0, output_1006, 0 }, #endif (insn_gen_fn) gen_sse2_movdq2q, &operand_data[1699], 2, 0, 2, 2 }, { "sse2_movdq2q_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_1007 }, #else { 0, output_1007, 0 }, #endif (insn_gen_fn) gen_sse2_movdq2q_rex64, &operand_data[1701], 2, 0, 3, 2 }, { "sse2_movq2dq", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_1008 }, #else { 0, output_1008, 0 }, #endif (insn_gen_fn) gen_sse2_movq2dq, &operand_data[1703], 2, 0, 2, 2 }, { "sse2_movq2dq_rex64", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_1009 }, #else { 0, output_1009, 0 }, #endif (insn_gen_fn) gen_sse2_movq2dq_rex64, &operand_data[1705], 2, 0, 3, 2 }, { "sse2_movq", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movq\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_movq, &operand_data[1707], 2, 0, 1, 1 }, { "sse2_loadd", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movd\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_loadd, &operand_data[1709], 2, 0, 1, 1 }, { "sse2_stored", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movd\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_stored, &operand_data[1711], 2, 0, 1, 1 }, { "sse2_movhpd", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movhpd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_movhpd, &operand_data[1713], 3, 0, 2, 1 }, { "sse2_loadsd_1", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movsd\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_loadsd_1, &operand_data[1716], 3, 0, 1, 1 }, { "sse2_movsd", #if HAVE_DESIGNATED_INITIALIZERS { .multi = output_1015 }, #else { 0, output_1015, 0 }, #endif (insn_gen_fn) gen_sse2_movsd, &operand_data[1719], 3, 0, 3, 2 }, { "sse2_storesd", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movsd\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_storesd, &operand_data[1722], 2, 0, 1, 1 }, { "sse2_shufpd", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "shufpd\t{%3, %2, %0|%0, %2, %3}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_shufpd, &operand_data[1724], 4, 0, 1, 1 }, { "sse2_clflush", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "clflush %0", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_sse2_clflush, &operand_data[1728], 1, 0, 1, 1 }, { "*mfence_insn", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "mfence", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1515], 1, 1, 0, 1 }, { "*lfence_insn", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "lfence", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif 0, &operand_data[1515], 1, 1, 0, 1 }, { "mwait", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "mwait\t%0, %1", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_mwait, &operand_data[1729], 2, 0, 1, 1 }, { "monitor", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "monitor\t%0, %1, %2", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_monitor, &operand_data[1729], 3, 0, 1, 1 }, { "addsubv4sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "addsubps\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_addsubv4sf3, &operand_data[1395], 3, 0, 1, 1 }, { "addsubv2df3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "addsubpd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_addsubv2df3, &operand_data[1408], 3, 0, 1, 1 }, { "haddv4sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "haddps\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_haddv4sf3, &operand_data[1395], 3, 0, 1, 1 }, { "haddv2df3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "haddpd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_haddv2df3, &operand_data[1408], 3, 0, 1, 1 }, { "hsubv4sf3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "hsubps\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_hsubv4sf3, &operand_data[1395], 3, 0, 1, 1 }, { "hsubv2df3", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "hsubpd\t{%2, %0|%0, %2}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_hsubv2df3, &operand_data[1408], 3, 0, 1, 1 }, { "movshdup", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movshdup\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_movshdup, &operand_data[1399], 2, 0, 1, 1 }, { "movsldup", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movsldup\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_movsldup, &operand_data[1399], 2, 0, 1, 1 }, { "lddqu", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "lddqu\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_lddqu, &operand_data[1732], 2, 0, 1, 1 }, { "loadddup", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movddup\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_loadddup, &operand_data[1716], 2, 0, 1, 1 }, { "movddup", #if HAVE_DESIGNATED_INITIALIZERS { .single = #else { #endif "movddup\t{%1, %0|%0, %1}", #if HAVE_DESIGNATED_INITIALIZERS }, #else 0, 0 }, #endif (insn_gen_fn) gen_movddup, &operand_data[1734], 2, 0, 1, 1 }, { "cmpdi", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_cmpdi, &operand_data[1736], 2, 0, 0, 0 }, { "cmpsi", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_cmpsi, &operand_data[1738], 2, 0, 0, 0 }, { "cmphi", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_cmphi, &operand_data[1740], 2, 0, 0, 0 }, { "cmpqi", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_cmpqi, &operand_data[1742], 2, 0, 0, 0 }, { "cmpdi_1_rex64", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_cmpdi_1_rex64, &operand_data[1744], 2, 0, 0, 0 }, { "cmpsi_1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_cmpsi_1, &operand_data[9], 2, 0, 2, 0 }, { "cmpqi_ext_3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_cmpqi_ext_3, &operand_data[1746], 2, 0, 0, 0 }, { "cmpxf", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_cmpxf, &operand_data[1748], 2, 0, 0, 0 }, { "cmpdf", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_cmpdf, &operand_data[1750], 2, 0, 0, 0 }, { "cmpsf", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_cmpsf, &operand_data[1752], 2, 0, 0, 0 }, { "cmpsf+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1754], 2, 0, 0, 0 }, { "movsi", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_movsi, &operand_data[1756], 2, 0, 0, 0 }, { "movhi", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_movhi, &operand_data[1740], 2, 0, 0, 0 }, { "movstricthi", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_movstricthi, &operand_data[1758], 2, 0, 0, 0 }, { "movqi", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_movqi, &operand_data[1742], 2, 0, 0, 0 }, { "reload_outqi", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_reload_outqi, &operand_data[1760], 3, 0, 1, 0 }, { "movstrictqi", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_movstrictqi, &operand_data[1763], 2, 0, 0, 0 }, { "movdi", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_movdi, &operand_data[1744], 2, 0, 0, 0 }, { "movdi+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1765], 3, 0, 0, 0 }, { "movdi+2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1765], 2, 0, 0, 0 }, { "movdi+3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1765], 2, 0, 0, 0 }, { "movdi+4", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1768], 2, 0, 0, 0 }, { "movsf-4", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1744], 2, 0, 0, 0 }, { "movsf-3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1770], 3, 0, 0, 0 }, { "movsf-2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1770], 2, 0, 0, 0 }, { "movsf-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1770], 2, 0, 0, 0 }, { "movsf", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_movsf, &operand_data[1773], 2, 0, 0, 0 }, { "movsf+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1775], 2, 0, 0, 0 }, { "movsf+2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1777], 2, 0, 0, 0 }, { "movdf-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1777], 2, 0, 0, 0 }, { "movdf", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_movdf, &operand_data[1779], 2, 0, 0, 0 }, { "movdf+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1781], 2, 0, 0, 0 }, { "movdf+2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1781], 2, 0, 0, 0 }, { "movxf-2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1783], 2, 0, 0, 0 }, { "movxf-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1779], 2, 0, 0, 0 }, { "movxf", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_movxf, &operand_data[1785], 2, 0, 0, 0 }, { "movxf+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1787], 2, 0, 0, 0 }, { "movxf+2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1789], 2, 0, 0, 0 }, { "movxf+3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1789], 2, 0, 0, 0 }, { "zero_extendhisi2-2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1791], 2, 0, 0, 0 }, { "zero_extendhisi2-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1793], 2, 0, 0, 0 }, { "zero_extendhisi2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_zero_extendhisi2, &operand_data[1795], 2, 0, 0, 0 }, { "zero_extendhisi2+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1797], 2, 0, 0, 0 }, { "zero_extendqihi2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_zero_extendqihi2, &operand_data[1798], 2, 0, 0, 0 }, { "zero_extendqihi2+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1798], 2, 0, 0, 0 }, { "zero_extendqihi2+2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1798], 2, 0, 0, 0 }, { "zero_extendqisi2-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1800], 2, 0, 0, 0 }, { "zero_extendqisi2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_zero_extendqisi2, &operand_data[1802], 2, 0, 0, 0 }, { "zero_extendqisi2+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1802], 2, 0, 0, 0 }, { "zero_extendqisi2+2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1802], 2, 0, 0, 0 }, { "zero_extendsidi2-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1804], 2, 0, 0, 0 }, { "zero_extendsidi2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_zero_extendsidi2, &operand_data[1806], 2, 0, 1, 0 }, { "zero_extendsidi2+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1770], 1, 0, 0, 0 }, { "zero_extendsidi2+2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1808], 2, 0, 0, 0 }, { "extendsidi2-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1810], 2, 0, 0, 0 }, { "extendsidi2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_extendsidi2, &operand_data[1812], 3, 0, 0, 0 }, { "extendsidi2+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1815], 3, 0, 0, 0 }, { "extendsidi2+2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1815], 3, 0, 0, 0 }, { "extendsidi2+3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1812], 3, 0, 0, 0 }, { "extendsidi2+4", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1818], 2, 0, 0, 0 }, { "extendsidi2+5", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1818], 2, 0, 0, 0 }, { "extendsfdf2-4", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1820], 2, 0, 0, 0 }, { "extendsfdf2-3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1820], 2, 0, 0, 0 }, { "extendsfdf2-2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1822], 2, 0, 0, 0 }, { "extendsfdf2-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1822], 2, 0, 0, 0 }, { "extendsfdf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_extendsfdf2, &operand_data[1824], 2, 0, 0, 0 }, { "extendsfxf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_extendsfxf2, &operand_data[1826], 2, 0, 0, 0 }, { "extenddfxf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_extenddfxf2, &operand_data[1828], 2, 0, 0, 0 }, { "truncdfsf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_truncdfsf2, &operand_data[1830], 2, 1, 0, 0 }, { "truncdfsf2+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1832], 3, 0, 0, 0 }, { "truncdfsf2+2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1835], 3, 0, 0, 0 }, { "truncxfsf2-2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1835], 2, 0, 0, 0 }, { "truncxfsf2-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1838], 3, 0, 0, 0 }, { "truncxfsf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_truncxfsf2, &operand_data[1841], 2, 1, 0, 0 }, { "truncxfsf2+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1843], 3, 0, 0, 0 }, { "truncxfdf2-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1846], 3, 0, 0, 0 }, { "truncxfdf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_truncxfdf2, &operand_data[1849], 2, 1, 0, 0 }, { "truncxfdf2+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1851], 3, 0, 0, 0 }, { "fix_truncxfdi2-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1854], 3, 0, 0, 0 }, { "fix_truncxfdi2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_fix_truncxfdi2, &operand_data[1857], 2, 0, 0, 0 }, { "fix_truncdfdi2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_fix_truncdfdi2, &operand_data[1859], 2, 0, 0, 0 }, { "fix_truncsfdi2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_fix_truncsfdi2, &operand_data[1861], 2, 0, 0, 0 }, { "fix_truncsfdi2+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1863], 2, 0, 0, 0 }, { "fix_truncsfdi2+2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1865], 6, 0, 0, 0 }, { "fix_truncsfdi2+3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1871], 6, 0, 0, 0 }, { "fix_truncxfsi2-2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1877], 3, 0, 0, 0 }, { "fix_truncxfsi2-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1880], 3, 0, 0, 0 }, { "fix_truncxfsi2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_fix_truncxfsi2, &operand_data[1883], 2, 0, 0, 0 }, { "fix_truncdfsi2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_fix_truncdfsi2, &operand_data[1885], 2, 0, 0, 0 }, { "fix_truncsfsi2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_fix_truncsfsi2, &operand_data[1887], 2, 0, 0, 0 }, { "fix_truncsfsi2+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1889], 2, 0, 0, 0 }, { "fix_truncsfsi2+2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1891], 3, 0, 0, 0 }, { "fix_truncsfsi2+3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1894], 3, 0, 0, 0 }, { "fix_truncxfhi2-2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1897], 5, 0, 0, 0 }, { "fix_truncxfhi2-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1901], 5, 0, 0, 0 }, { "fix_truncxfhi2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_fix_truncxfhi2, &operand_data[1906], 2, 0, 0, 0 }, { "fix_truncdfhi2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_fix_truncdfhi2, &operand_data[1908], 2, 0, 0, 0 }, { "fix_truncsfhi2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_fix_truncsfhi2, &operand_data[1910], 2, 0, 0, 0 }, { "fix_truncsfhi2+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1912], 2, 0, 0, 0 }, { "fix_truncsfhi2+2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1914], 5, 0, 0, 0 }, { "floathisf2-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1919], 5, 0, 0, 0 }, { "floathisf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_floathisf2, &operand_data[1911], 2, 0, 0, 0 }, { "floatsisf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_floatsisf2, &operand_data[1888], 2, 0, 0, 0 }, { "floatsisf2+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1888], 2, 0, 0, 0 }, { "floatdisf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_floatdisf2, &operand_data[1862], 2, 0, 0, 0 }, { "floatdisf2+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1862], 2, 0, 0, 0 }, { "floathidf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_floathidf2, &operand_data[1909], 2, 0, 0, 0 }, { "floatsidf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_floatsidf2, &operand_data[1886], 2, 0, 0, 0 }, { "floatdidf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_floatdidf2, &operand_data[1860], 2, 0, 0, 0 }, { "floatdidf2+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1924], 2, 0, 0, 0 }, { "floatunssisf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_floatunssisf2, &operand_data[1754], 2, 0, 0, 0 }, { "floatunsdisf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_floatunsdisf2, &operand_data[1926], 2, 0, 0, 0 }, { "floatunsdidf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_floatunsdidf2, &operand_data[1928], 2, 0, 0, 0 }, { "vec_setv2df", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_vec_setv2df, &operand_data[1930], 3, 0, 0, 0 }, { "vec_extractv2df", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_vec_extractv2df, &operand_data[1933], 3, 0, 0, 0 }, { "vec_initv2df", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_vec_initv2df, &operand_data[1936], 2, 0, 0, 0 }, { "vec_setv4sf", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_vec_setv4sf, &operand_data[1938], 3, 0, 0, 0 }, { "vec_extractv4sf", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_vec_extractv4sf, &operand_data[1941], 3, 0, 0, 0 }, { "vec_initv4sf", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_vec_initv4sf, &operand_data[1944], 2, 0, 0, 0 }, { "adddi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_adddi3, &operand_data[1946], 3, 0, 0, 0 }, { "adddi3+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1949], 3, 0, 0, 0 }, { "addsi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_addsi3, &operand_data[1952], 3, 0, 0, 0 }, { "addsi3+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1955], 4, 0, 0, 0 }, { "addsi3+2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1959], 4, 0, 0, 0 }, { "addsi3+3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1963], 4, 0, 0, 0 }, { "addsi3+4", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1967], 4, 0, 0, 0 }, { "addsi3+5", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1971], 5, 0, 0, 0 }, { "addhi3-4", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1976], 5, 0, 0, 0 }, { "addhi3-3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1981], 3, 0, 0, 0 }, { "addhi3-2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1984], 3, 0, 0, 0 }, { "addhi3-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1987], 3, 0, 0, 0 }, { "addhi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_addhi3, &operand_data[1990], 3, 0, 0, 0 }, { "addqi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_addqi3, &operand_data[1993], 3, 0, 0, 0 }, { "addxf3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_addxf3, &operand_data[1996], 3, 0, 0, 0 }, { "adddf3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_adddf3, &operand_data[1999], 3, 0, 0, 0 }, { "addsf3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_addsf3, &operand_data[2002], 3, 0, 0, 0 }, { "subdi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_subdi3, &operand_data[1946], 3, 0, 0, 0 }, { "subdi3+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1949], 3, 0, 0, 0 }, { "subsi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_subsi3, &operand_data[1952], 3, 0, 0, 0 }, { "subhi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_subhi3, &operand_data[1990], 3, 0, 0, 0 }, { "subqi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_subqi3, &operand_data[1993], 3, 0, 0, 0 }, { "subxf3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_subxf3, &operand_data[1996], 3, 0, 0, 0 }, { "subdf3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_subdf3, &operand_data[1999], 3, 0, 0, 0 }, { "subsf3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_subsf3, &operand_data[2002], 3, 0, 0, 0 }, { "muldi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_muldi3, &operand_data[2005], 3, 0, 0, 0 }, { "mulsi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_mulsi3, &operand_data[2008], 3, 0, 0, 0 }, { "mulhi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_mulhi3, &operand_data[2011], 3, 0, 0, 0 }, { "mulqi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_mulqi3, &operand_data[2014], 3, 0, 0, 0 }, { "umulqihi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_umulqihi3, &operand_data[2017], 3, 0, 0, 0 }, { "mulqihi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_mulqihi3, &operand_data[2017], 3, 0, 0, 0 }, { "umulditi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_umulditi3, &operand_data[2020], 3, 0, 0, 0 }, { "umulsidi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_umulsidi3, &operand_data[2022], 3, 0, 0, 0 }, { "mulditi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_mulditi3, &operand_data[2020], 3, 0, 0, 0 }, { "mulsidi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_mulsidi3, &operand_data[2022], 3, 0, 0, 0 }, { "umuldi3_highpart", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_umuldi3_highpart, &operand_data[2025], 4, 0, 0, 0 }, { "umulsi3_highpart", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_umulsi3_highpart, &operand_data[2029], 4, 0, 0, 0 }, { "smuldi3_highpart", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_smuldi3_highpart, &operand_data[2033], 4, 0, 1, 0 }, { "smulsi3_highpart", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_smulsi3_highpart, &operand_data[2029], 4, 0, 0, 0 }, { "mulxf3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_mulxf3, &operand_data[1996], 3, 0, 0, 0 }, { "muldf3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_muldf3, &operand_data[1999], 3, 0, 0, 0 }, { "mulsf3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_mulsf3, &operand_data[2002], 3, 0, 0, 0 }, { "divxf3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_divxf3, &operand_data[1996], 3, 0, 0, 0 }, { "divdf3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_divdf3, &operand_data[1999], 3, 0, 0, 0 }, { "divsf3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_divsf3, &operand_data[2002], 3, 0, 0, 0 }, { "divmoddi4", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_divmoddi4, &operand_data[2037], 4, 2, 0, 0 }, { "divmoddi4+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2037], 4, 0, 0, 0 }, { "divmodsi4", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_divmodsi4, &operand_data[2041], 4, 2, 0, 0 }, { "divmodsi4+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2041], 4, 0, 0, 0 }, { "divmodsi4+2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2037], 4, 0, 0, 0 }, { "udivmodhi4-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2041], 4, 0, 0, 0 }, { "udivmodhi4", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_udivmodhi4, &operand_data[2045], 4, 4, 0, 0 }, { "testsi_ccno_1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_testsi_ccno_1, &operand_data[2049], 2, 0, 0, 0 }, { "testqi_ccz_1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_testqi_ccz_1, &operand_data[2051], 2, 0, 0, 0 }, { "testqi_ext_ccno_0", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_testqi_ext_ccno_0, &operand_data[2053], 2, 0, 0, 0 }, { "testqi_ext_ccno_0+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2055], 3, 0, 0, 0 }, { "testqi_ext_ccno_0+2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2058], 2, 0, 0, 0 }, { "anddi3-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2055], 2, 0, 0, 0 }, { "anddi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_anddi3, &operand_data[2060], 3, 0, 0, 0 }, { "andsi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_andsi3, &operand_data[1952], 3, 0, 0, 0 }, { "andsi3+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1793], 1, 0, 0, 0 }, { "andsi3+2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1746], 1, 0, 0, 0 }, { "andhi3-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1746], 1, 0, 0, 0 }, { "andhi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_andhi3, &operand_data[1990], 3, 0, 0, 0 }, { "andqi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_andqi3, &operand_data[1993], 3, 0, 0, 0 }, { "andqi3+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2063], 3, 0, 0, 0 }, { "iordi3-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2066], 3, 0, 0, 0 }, { "iordi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_iordi3, &operand_data[1946], 3, 0, 0, 0 }, { "iorsi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_iorsi3, &operand_data[1952], 3, 0, 0, 0 }, { "iorhi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_iorhi3, &operand_data[1990], 3, 0, 0, 0 }, { "iorqi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_iorqi3, &operand_data[1993], 3, 0, 0, 0 }, { "iorqi3+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2063], 3, 0, 0, 0 }, { "xordi3-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2066], 3, 0, 0, 0 }, { "xordi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_xordi3, &operand_data[1946], 3, 0, 0, 0 }, { "xorsi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_xorsi3, &operand_data[1952], 3, 0, 0, 0 }, { "xorhi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_xorhi3, &operand_data[1990], 3, 0, 0, 0 }, { "xorqi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_xorqi3, &operand_data[1993], 3, 0, 0, 0 }, { "xorqi_cc_ext_1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_xorqi_cc_ext_1, &operand_data[2069], 3, 2, 0, 0 }, { "xorqi_cc_ext_1+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2063], 3, 0, 0, 0 }, { "negdi2-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2066], 3, 0, 0, 0 }, { "negdi2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_negdi2, &operand_data[1946], 2, 0, 0, 0 }, { "negdi2+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1744], 2, 0, 0, 0 }, { "negsi2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_negsi2, &operand_data[1952], 2, 0, 0, 0 }, { "neghi2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_neghi2, &operand_data[1990], 2, 0, 0, 0 }, { "negqi2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_negqi2, &operand_data[1993], 2, 0, 0, 0 }, { "negsf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_negsf2, &operand_data[2072], 2, 0, 0, 0 }, { "negsf2+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2074], 3, 0, 0, 0 }, { "negsf2+2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2077], 3, 0, 0, 0 }, { "negsf2+3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2080], 3, 0, 0, 0 }, { "negdf2-3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2083], 2, 0, 0, 0 }, { "negdf2-2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2085], 2, 0, 0, 0 }, { "negdf2-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2087], 2, 0, 0, 0 }, { "negdf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_negdf2, &operand_data[2089], 2, 0, 0, 0 }, { "negdf2+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2091], 3, 0, 0, 0 }, { "negdf2+2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2094], 3, 0, 0, 0 }, { "negdf2+3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2094], 3, 0, 0, 0 }, { "negxf2-3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2097], 3, 0, 0, 0 }, { "negxf2-2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2100], 2, 0, 0, 0 }, { "negxf2-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2102], 2, 0, 0, 0 }, { "negxf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_negxf2, &operand_data[2104], 2, 0, 0, 0 }, { "negxf2+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2106], 2, 0, 0, 0 }, { "abssf2-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2108], 2, 0, 0, 0 }, { "abssf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_abssf2, &operand_data[2072], 2, 0, 0, 0 }, { "abssf2+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2110], 3, 0, 0, 0 }, { "abssf2+2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2077], 3, 0, 0, 0 }, { "abssf2+3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2080], 3, 0, 0, 0 }, { "absdf2-3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2083], 2, 0, 0, 0 }, { "absdf2-2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2085], 2, 0, 0, 0 }, { "absdf2-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2087], 2, 0, 0, 0 }, { "absdf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_absdf2, &operand_data[2089], 2, 0, 0, 0 }, { "absdf2+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2091], 3, 0, 0, 0 }, { "absdf2+2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2094], 3, 0, 0, 0 }, { "absdf2+3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2097], 3, 0, 0, 0 }, { "absxf2-2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2100], 2, 0, 0, 0 }, { "absxf2-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2102], 2, 0, 0, 0 }, { "absxf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_absxf2, &operand_data[2104], 2, 0, 0, 0 }, { "absxf2+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2106], 2, 0, 0, 0 }, { "one_cmpldi2-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2108], 2, 0, 0, 0 }, { "one_cmpldi2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_one_cmpldi2, &operand_data[1946], 2, 0, 0, 0 }, { "one_cmpldi2+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1946], 2, 0, 0, 0 }, { "one_cmplsi2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_one_cmplsi2, &operand_data[1952], 2, 0, 0, 0 }, { "one_cmplsi2+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1952], 2, 0, 0, 0 }, { "one_cmplhi2-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1808], 2, 0, 0, 0 }, { "one_cmplhi2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_one_cmplhi2, &operand_data[1990], 2, 0, 0, 0 }, { "one_cmplhi2+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1990], 2, 0, 0, 0 }, { "one_cmplqi2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_one_cmplqi2, &operand_data[1993], 2, 0, 0, 0 }, { "one_cmplqi2+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1993], 2, 0, 0, 0 }, { "ashldi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_ashldi3, &operand_data[2113], 3, 0, 0, 0 }, { "ashldi3+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2116], 3, 0, 0, 0 }, { "ashldi3+2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2119], 4, 0, 0, 0 }, { "x86_shift_adj_1-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2119], 3, 0, 0, 0 }, { "x86_shift_adj_1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_x86_shift_adj_1, &operand_data[2123], 4, 3, 1, 0 }, { "x86_shift_adj_2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_x86_shift_adj_2, &operand_data[2123], 3, 0, 0, 0 }, { "ashlsi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_ashlsi3, &operand_data[2127], 3, 0, 0, 0 }, { "ashlsi3+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2130], 3, 0, 0, 0 }, { "ashlsi3+2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2133], 3, 0, 0, 0 }, { "ashlhi3-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2136], 3, 0, 0, 0 }, { "ashlhi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_ashlhi3, &operand_data[2139], 3, 0, 0, 0 }, { "ashlqi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_ashlqi3, &operand_data[2142], 3, 0, 0, 0 }, { "ashrdi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_ashrdi3, &operand_data[2113], 3, 0, 0, 0 }, { "ashrdi3+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2119], 4, 0, 0, 0 }, { "x86_shift_adj_3-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2119], 3, 0, 0, 0 }, { "x86_shift_adj_3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_x86_shift_adj_3, &operand_data[2123], 3, 0, 0, 0 }, { "ashrsi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_ashrsi3, &operand_data[2127], 3, 0, 0, 0 }, { "ashrhi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_ashrhi3, &operand_data[2139], 3, 0, 0, 0 }, { "ashrqi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_ashrqi3, &operand_data[2142], 3, 0, 0, 0 }, { "lshrdi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_lshrdi3, &operand_data[2113], 3, 0, 0, 0 }, { "lshrdi3+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2119], 4, 0, 0, 0 }, { "lshrsi3-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2119], 3, 0, 0, 0 }, { "lshrsi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_lshrsi3, &operand_data[2127], 3, 0, 0, 0 }, { "lshrhi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_lshrhi3, &operand_data[2139], 3, 0, 0, 0 }, { "lshrqi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_lshrqi3, &operand_data[2142], 3, 0, 0, 0 }, { "rotldi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_rotldi3, &operand_data[2145], 3, 0, 0, 0 }, { "rotlsi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_rotlsi3, &operand_data[2127], 3, 0, 0, 0 }, { "rotlhi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_rotlhi3, &operand_data[2139], 3, 0, 0, 0 }, { "rotlqi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_rotlqi3, &operand_data[2142], 3, 0, 0, 0 }, { "rotrdi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_rotrdi3, &operand_data[2145], 3, 0, 0, 0 }, { "rotrsi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_rotrsi3, &operand_data[2127], 3, 0, 0, 0 }, { "rotrhi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_rotrhi3, &operand_data[2139], 3, 0, 0, 0 }, { "rotrqi3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_rotrqi3, &operand_data[2142], 3, 0, 0, 0 }, { "extv", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_extv, &operand_data[2148], 4, 0, 0, 0 }, { "extzv", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_extzv, &operand_data[2152], 4, 0, 0, 0 }, { "insv", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_insv, &operand_data[2156], 4, 0, 0, 0 }, { "seq", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_seq, &operand_data[1801], 1, 0, 0, 0 }, { "sne", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sne, &operand_data[1801], 1, 0, 0, 0 }, { "sgt", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sgt, &operand_data[1801], 1, 0, 0, 0 }, { "sgtu", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sgtu, &operand_data[1801], 1, 0, 0, 0 }, { "slt", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_slt, &operand_data[1801], 1, 0, 0, 0 }, { "sltu", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sltu, &operand_data[1801], 1, 0, 0, 0 }, { "sge", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sge, &operand_data[1801], 1, 0, 0, 0 }, { "sgeu", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sgeu, &operand_data[1801], 1, 0, 0, 0 }, { "sle", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sle, &operand_data[1801], 1, 0, 0, 0 }, { "sleu", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sleu, &operand_data[1801], 1, 0, 0, 0 }, { "sunordered", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sunordered, &operand_data[1801], 1, 0, 0, 0 }, { "sordered", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sordered, &operand_data[1801], 1, 0, 0, 0 }, { "suneq", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_suneq, &operand_data[1801], 1, 0, 0, 0 }, { "sunge", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sunge, &operand_data[1801], 1, 0, 0, 0 }, { "sungt", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sungt, &operand_data[1801], 1, 0, 0, 0 }, { "sunle", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sunle, &operand_data[1801], 1, 0, 0, 0 }, { "sunlt", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sunlt, &operand_data[1801], 1, 0, 0, 0 }, { "sltgt", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sltgt, &operand_data[1801], 1, 0, 0, 0 }, { "sltgt+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2160], 2, 0, 0, 0 }, { "sltgt+2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2162], 2, 0, 0, 0 }, { "beq-2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2160], 2, 0, 0, 0 }, { "beq-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2162], 2, 0, 0, 0 }, { "beq", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_beq, &operand_data[849], 1, 1, 0, 0 }, { "bne", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_bne, &operand_data[849], 1, 1, 0, 0 }, { "bgt", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_bgt, &operand_data[849], 1, 1, 0, 0 }, { "bgtu", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_bgtu, &operand_data[849], 1, 1, 0, 0 }, { "blt", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_blt, &operand_data[849], 1, 1, 0, 0 }, { "bltu", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_bltu, &operand_data[849], 1, 1, 0, 0 }, { "bge", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_bge, &operand_data[849], 1, 1, 0, 0 }, { "bgeu", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_bgeu, &operand_data[849], 1, 1, 0, 0 }, { "ble", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_ble, &operand_data[849], 1, 1, 0, 0 }, { "bleu", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_bleu, &operand_data[849], 1, 1, 0, 0 }, { "bunordered", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_bunordered, &operand_data[849], 1, 1, 0, 0 }, { "bordered", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_bordered, &operand_data[849], 1, 1, 0, 0 }, { "buneq", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_buneq, &operand_data[849], 1, 1, 0, 0 }, { "bunge", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_bunge, &operand_data[849], 1, 1, 0, 0 }, { "bungt", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_bungt, &operand_data[849], 1, 1, 0, 0 }, { "bunle", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_bunle, &operand_data[849], 1, 1, 0, 0 }, { "bunlt", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_bunlt, &operand_data[849], 1, 1, 0, 0 }, { "bltgt", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_bltgt, &operand_data[849], 1, 1, 0, 0 }, { "bltgt+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2163], 2, 0, 0, 0 }, { "bltgt+2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2163], 2, 0, 0, 0 }, { "indirect_jump-2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2165], 5, 0, 0, 0 }, { "indirect_jump-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2165], 6, 0, 0, 0 }, { "indirect_jump", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_indirect_jump, &operand_data[591], 1, 0, 1, 0 }, { "tablejump", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_tablejump, &operand_data[2171], 2, 0, 1, 0 }, { "doloop_end", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_doloop_end, &operand_data[2172], 5, 0, 0, 0 }, { "doloop_end+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2176], 3, 0, 0, 0 }, { "doloop_end+2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2179], 4, 0, 0, 0 }, { "call_pop-2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2183], 4, 0, 0, 0 }, { "call_pop-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2183], 4, 0, 0, 0 }, { "call_pop", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_call_pop, &operand_data[2187], 4, 0, 0, 0 }, { "call", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_call, &operand_data[2191], 3, 0, 0, 0 }, { "sibcall", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sibcall, &operand_data[2191], 3, 0, 0, 0 }, { "call_value_pop", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_call_value_pop, &operand_data[2193], 5, 0, 0, 0 }, { "call_value", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_call_value, &operand_data[2198], 4, 0, 0, 0 }, { "sibcall_value", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sibcall_value, &operand_data[2198], 4, 0, 0, 0 }, { "untyped_call", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_untyped_call, &operand_data[2172], 3, 0, 0, 0 }, { "return", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_return, &operand_data[0], 0, 0, 0, 0 }, { "prologue", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_prologue, &operand_data[0], 0, 0, 0, 0 }, { "epilogue", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_epilogue, &operand_data[0], 0, 0, 0, 0 }, { "sibcall_epilogue", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sibcall_epilogue, &operand_data[0], 0, 0, 0, 0 }, { "eh_return", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_eh_return, &operand_data[1793], 1, 0, 0, 0 }, { "eh_return+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1755], 1, 0, 0, 0 }, { "ffssi2-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1808], 1, 0, 0, 0 }, { "ffssi2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_ffssi2, &operand_data[2180], 3, 0, 0, 0 }, { "ffssi2+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2180], 3, 0, 0, 0 }, { "ffsdi2-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2202], 3, 0, 0, 0 }, { "ffsdi2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_ffsdi2, &operand_data[2205], 3, 0, 0, 0 }, { "ffsdi2+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2205], 3, 0, 0, 0 }, { "clzsi2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_clzsi2, &operand_data[1755], 2, 2, 0, 0 }, { "clzdi2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_clzdi2, &operand_data[2025], 2, 2, 0, 0 }, { "tls_global_dynamic_32", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_tls_global_dynamic_32, &operand_data[2208], 6, 2, 0, 0 }, { "tls_global_dynamic_64", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_tls_global_dynamic_64, &operand_data[2214], 2, 1, 0, 0 }, { "tls_local_dynamic_base_32", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_tls_local_dynamic_base_32, &operand_data[2216], 5, 2, 0, 0 }, { "tls_local_dynamic_base_64", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_tls_local_dynamic_base_64, &operand_data[1808], 1, 1, 0, 0 }, { "tls_local_dynamic_base_64+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2221], 6, 0, 0, 0 }, { "tls_local_dynamic_base_64+2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2227], 4, 0, 0, 0 }, { "sqrtsf2-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2231], 4, 0, 0, 0 }, { "sqrtsf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sqrtsf2, &operand_data[2003], 2, 0, 0, 0 }, { "sqrtdf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sqrtdf2, &operand_data[2000], 2, 0, 0, 0 }, { "fmodsf3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_fmodsf3, &operand_data[2235], 3, 0, 0, 0 }, { "fmoddf3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_fmoddf3, &operand_data[2238], 3, 0, 0, 0 }, { "fmodxf3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_fmodxf3, &operand_data[1996], 3, 0, 0, 0 }, { "dremsf3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_dremsf3, &operand_data[2235], 3, 0, 0, 0 }, { "dremdf3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_dremdf3, &operand_data[2238], 3, 0, 0, 0 }, { "dremxf3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_dremxf3, &operand_data[1996], 3, 0, 0, 0 }, { "dremxf3+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2238], 3, 0, 0, 0 }, { "dremxf3+2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2238], 3, 0, 0, 0 }, { "dremxf3+3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2235], 3, 0, 0, 0 }, { "dremxf3+4", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2235], 3, 0, 0, 0 }, { "dremxf3+5", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2239], 3, 0, 0, 0 }, { "tandf2-4", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2239], 3, 0, 0, 0 }, { "tandf2-3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1996], 3, 0, 0, 0 }, { "tandf2-2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1996], 3, 0, 0, 0 }, { "tandf2-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2242], 4, 0, 0, 0 }, { "tandf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_tandf2, &operand_data[1999], 2, 2, 0, 0 }, { "tandf2+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2246], 4, 0, 0, 0 }, { "tansf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_tansf2, &operand_data[2002], 2, 2, 0, 0 }, { "tansf2+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2250], 4, 0, 0, 0 }, { "tanxf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_tanxf2, &operand_data[1996], 2, 2, 0, 0 }, { "atan2df3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_atan2df3, &operand_data[1058], 3, 0, 1, 0 }, { "atandf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_atandf2, &operand_data[2254], 4, 1, 0, 0 }, { "atan2sf3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_atan2sf3, &operand_data[1062], 3, 0, 1, 0 }, { "atansf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_atansf2, &operand_data[2258], 4, 1, 0, 0 }, { "atan2xf3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_atan2xf3, &operand_data[1066], 3, 0, 1, 0 }, { "atanxf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_atanxf2, &operand_data[2262], 4, 1, 0, 0 }, { "asindf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_asindf2, &operand_data[2266], 9, 13, 0, 0 }, { "asinsf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_asinsf2, &operand_data[2275], 9, 13, 0, 0 }, { "asinxf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_asinxf2, &operand_data[2284], 7, 9, 0, 0 }, { "acosdf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_acosdf2, &operand_data[2266], 9, 13, 0, 0 }, { "acossf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_acossf2, &operand_data[2275], 9, 13, 0, 0 }, { "acosxf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_acosxf2, &operand_data[2284], 7, 9, 0, 0 }, { "logsf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_logsf2, &operand_data[2291], 6, 5, 0, 0 }, { "logdf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_logdf2, &operand_data[2297], 6, 5, 0, 0 }, { "logxf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_logxf2, &operand_data[2262], 4, 1, 0, 0 }, { "log10sf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_log10sf2, &operand_data[2291], 6, 5, 0, 0 }, { "log10df2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_log10df2, &operand_data[2297], 6, 5, 0, 0 }, { "log10xf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_log10xf2, &operand_data[2262], 4, 1, 0, 0 }, { "log2sf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_log2sf2, &operand_data[2291], 6, 5, 0, 0 }, { "log2df2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_log2df2, &operand_data[2297], 6, 5, 0, 0 }, { "log2xf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_log2xf2, &operand_data[2262], 4, 1, 0, 0 }, { "log1psf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_log1psf2, &operand_data[1996], 2, 0, 0, 0 }, { "log1pdf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_log1pdf2, &operand_data[1996], 2, 0, 0, 0 }, { "log1pxf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_log1pxf2, &operand_data[1996], 2, 0, 0, 0 }, { "logbsf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_logbsf2, &operand_data[2002], 2, 6, 0, 0 }, { "logbdf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_logbdf2, &operand_data[1999], 2, 6, 0, 0 }, { "logbxf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_logbxf2, &operand_data[1996], 2, 2, 0, 0 }, { "ilogbsi2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_ilogbsi2, &operand_data[2303], 4, 3, 0, 0 }, { "expsf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_expsf2, &operand_data[2002], 2, 21, 0, 0 }, { "expdf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_expdf2, &operand_data[1999], 2, 21, 0, 0 }, { "expxf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_expxf2, &operand_data[1996], 2, 17, 0, 0 }, { "exp10sf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_exp10sf2, &operand_data[2002], 2, 21, 0, 0 }, { "exp10df2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_exp10df2, &operand_data[1999], 2, 21, 0, 0 }, { "exp10xf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_exp10xf2, &operand_data[1996], 2, 17, 0, 0 }, { "exp2sf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_exp2sf2, &operand_data[2002], 2, 18, 0, 0 }, { "exp2df2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_exp2df2, &operand_data[1999], 2, 18, 0, 0 }, { "exp2xf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_exp2xf2, &operand_data[1996], 2, 16, 0, 0 }, { "expm1df2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_expm1df2, &operand_data[1999], 2, 30, 0, 0 }, { "expm1sf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_expm1sf2, &operand_data[2002], 2, 30, 0, 0 }, { "expm1xf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_expm1xf2, &operand_data[1996], 2, 26, 0, 0 }, { "movstrsi", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_movstrsi, &operand_data[2307], 4, 0, 0, 0 }, { "movstrdi", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_movstrdi, &operand_data[2311], 4, 0, 0, 0 }, { "strmov", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_strmov, &operand_data[2315], 4, 4, 0, 0 }, { "strmov_singleop", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_strmov_singleop, &operand_data[2315], 6, 0, 0, 0 }, { "rep_mov", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_rep_mov, &operand_data[2321], 7, 1, 0, 0 }, { "clrstrsi", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_clrstrsi, &operand_data[2328], 3, 0, 0, 0 }, { "clrstrdi", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_clrstrdi, &operand_data[2331], 3, 0, 0, 0 }, { "strset", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_strset, &operand_data[2315], 3, 1, 0, 0 }, { "strset_singleop", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_strset_singleop, &operand_data[2323], 4, 0, 0, 0 }, { "rep_stos", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_rep_stos, &operand_data[2334], 5, 1, 0, 0 }, { "cmpstrsi", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_cmpstrsi, &operand_data[2339], 5, 0, 0, 0 }, { "cmpintqi", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_cmpintqi, &operand_data[1801], 1, 4, 0, 0 }, { "cmpstrqi_nz_1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_cmpstrqi_nz_1, &operand_data[2344], 6, 1, 0, 0 }, { "cmpstrqi_1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_cmpstrqi_1, &operand_data[2344], 6, 1, 0, 0 }, { "strlensi", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_strlensi, &operand_data[2350], 4, 0, 0, 0 }, { "strlendi", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_strlendi, &operand_data[2354], 4, 0, 0, 0 }, { "strlenqi_1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_strlenqi_1, &operand_data[2358], 3, 0, 0, 0 }, { "strlenqi_1+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2361], 9, 0, 0, 0 }, { "movdicc-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2361], 9, 0, 0, 0 }, { "movdicc", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_movdicc, &operand_data[2370], 4, 0, 0, 0 }, { "movsicc", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_movsicc, &operand_data[2374], 4, 0, 0, 0 }, { "movhicc", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_movhicc, &operand_data[2378], 4, 0, 0, 0 }, { "movqicc", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_movqicc, &operand_data[2382], 4, 0, 0, 0 }, { "movqicc+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2386], 5, 0, 0, 0 }, { "movsfcc", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_movsfcc, &operand_data[2391], 4, 0, 0, 0 }, { "movdfcc", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_movdfcc, &operand_data[2395], 4, 0, 0, 0 }, { "movdfcc+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2399], 5, 0, 0, 0 }, { "movxfcc", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_movxfcc, &operand_data[2404], 4, 0, 0, 0 }, { "minsf3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_minsf3, &operand_data[2002], 3, 2, 0, 0 }, { "minsf3+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2408], 5, 0, 0, 0 }, { "addqicc", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_addqicc, &operand_data[2413], 4, 0, 0, 0 }, { "addhicc", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_addhicc, &operand_data[2417], 4, 0, 0, 0 }, { "addsicc", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_addsicc, &operand_data[2421], 4, 0, 0, 0 }, { "adddicc", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_adddicc, &operand_data[2425], 4, 0, 0, 0 }, { "adddicc+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2429], 5, 0, 0, 0 }, { "mindf3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_mindf3, &operand_data[1999], 3, 2, 0, 0 }, { "mindf3+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2434], 5, 0, 0, 0 }, { "maxsf3-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2439], 5, 0, 0, 0 }, { "maxsf3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_maxsf3, &operand_data[2002], 3, 2, 0, 0 }, { "maxsf3+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2408], 5, 0, 0, 0 }, { "maxdf3-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2429], 5, 0, 0, 0 }, { "maxdf3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_maxdf3, &operand_data[1999], 3, 2, 0, 0 }, { "maxdf3+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2434], 5, 0, 0, 0 }, { "maxdf3+2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2439], 5, 0, 0, 0 }, { "maxdf3+3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2444], 7, 0, 0, 0 }, { "maxdf3+4", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2451], 7, 0, 0, 0 }, { "allocate_stack_worker-3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2458], 7, 0, 0, 0 }, { "allocate_stack_worker-2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2465], 6, 0, 0, 0 }, { "allocate_stack_worker-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2471], 6, 0, 0, 0 }, { "allocate_stack_worker", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_allocate_stack_worker, &operand_data[1755], 1, 0, 0, 0 }, { "allocate_stack_worker_postreload", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_allocate_stack_worker_postreload, &operand_data[555], 1, 2, 1, 0 }, { "allocate_stack_worker_rex64_postreload", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_allocate_stack_worker_rex64_postreload, &operand_data[542], 1, 2, 1, 0 }, { "allocate_stack", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_allocate_stack, &operand_data[2477], 2, 1, 1, 0 }, { "builtin_setjmp_receiver", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_builtin_setjmp_receiver, &operand_data[849], 1, 0, 0, 0 }, { "builtin_setjmp_receiver+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2479], 4, 0, 0, 0 }, { "builtin_setjmp_receiver+2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2483], 3, 0, 0, 0 }, { "builtin_setjmp_receiver+3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2486], 2, 0, 0, 0 }, { "builtin_setjmp_receiver+4", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1984], 2, 0, 0, 0 }, { "builtin_setjmp_receiver+5", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1984], 2, 0, 0, 0 }, { "builtin_setjmp_receiver+6", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2488], 4, 0, 0, 0 }, { "builtin_setjmp_receiver+7", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2492], 3, 0, 0, 0 }, { "builtin_setjmp_receiver+8", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2495], 3, 0, 0, 0 }, { "builtin_setjmp_receiver+9", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2498], 3, 0, 0, 0 }, { "builtin_setjmp_receiver+10", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2501], 3, 0, 0, 0 }, { "builtin_setjmp_receiver+11", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2504], 3, 0, 0, 0 }, { "builtin_setjmp_receiver+12", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2493], 2, 0, 0, 0 }, { "builtin_setjmp_receiver+13", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2502], 2, 0, 0, 0 }, { "builtin_setjmp_receiver+14", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2505], 2, 0, 0, 0 }, { "builtin_setjmp_receiver+15", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2507], 3, 0, 0, 0 }, { "builtin_setjmp_receiver+16", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2510], 3, 0, 0, 0 }, { "builtin_setjmp_receiver+17", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2513], 3, 0, 0, 0 }, { "builtin_setjmp_receiver+18", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2516], 4, 0, 0, 0 }, { "builtin_setjmp_receiver+19", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1952], 2, 0, 0, 0 }, { "builtin_setjmp_receiver+20", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1990], 2, 0, 0, 0 }, { "builtin_setjmp_receiver+21", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1993], 2, 0, 0, 0 }, { "builtin_setjmp_receiver+22", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1961], 2, 0, 0, 0 }, { "builtin_setjmp_receiver+23", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2520], 2, 0, 0, 0 }, { "builtin_setjmp_receiver+24", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2053], 2, 0, 0, 0 }, { "builtin_setjmp_receiver+25", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2522], 4, 0, 0, 0 }, { "builtin_setjmp_receiver+26", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2522], 4, 0, 0, 0 }, { "builtin_setjmp_receiver+27", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2526], 4, 0, 0, 0 }, { "builtin_setjmp_receiver+28", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2526], 4, 0, 0, 0 }, { "builtin_setjmp_receiver+29", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1793], 1, 0, 0, 0 }, { "builtin_setjmp_receiver+30", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2530], 1, 0, 0, 0 }, { "builtin_setjmp_receiver+31", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1793], 1, 0, 0, 0 }, { "builtin_setjmp_receiver+32", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1988], 2, 0, 0, 0 }, { "builtin_setjmp_receiver+33", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2531], 3, 0, 0, 0 }, { "builtin_setjmp_receiver+34", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2006], 2, 0, 0, 0 }, { "conditional_trap-34", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2423], 2, 0, 0, 0 }, { "conditional_trap-33", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2427], 2, 0, 0, 0 }, { "conditional_trap-32", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2534], 3, 0, 0, 0 }, { "conditional_trap-31", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2494], 1, 0, 0, 0 }, { "conditional_trap-30", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2494], 1, 0, 0, 0 }, { "conditional_trap-29", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2494], 1, 0, 0, 0 }, { "conditional_trap-28", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2494], 1, 0, 0, 0 }, { "conditional_trap-27", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2494], 1, 0, 0, 0 }, { "conditional_trap-26", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2537], 2, 0, 0, 0 }, { "conditional_trap-25", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2494], 1, 0, 0, 0 }, { "conditional_trap-24", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2494], 1, 0, 0, 0 }, { "conditional_trap-23", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2537], 2, 0, 0, 0 }, { "conditional_trap-22", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2494], 1, 0, 0, 0 }, { "conditional_trap-21", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2539], 2, 0, 0, 0 }, { "conditional_trap-20", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2541], 2, 0, 0, 0 }, { "conditional_trap-19", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2543], 2, 0, 0, 0 }, { "conditional_trap-18", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1755], 1, 0, 0, 0 }, { "conditional_trap-17", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1798], 1, 0, 0, 0 }, { "conditional_trap-16", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1767], 1, 0, 0, 0 }, { "conditional_trap-15", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1767], 1, 0, 0, 0 }, { "conditional_trap-14", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1767], 1, 0, 0, 0 }, { "conditional_trap-13", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1767], 1, 0, 0, 0 }, { "conditional_trap-12", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1767], 1, 0, 0, 0 }, { "conditional_trap-11", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2545], 2, 0, 0, 0 }, { "conditional_trap-10", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1767], 1, 0, 0, 0 }, { "conditional_trap-9", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1767], 1, 0, 0, 0 }, { "conditional_trap-8", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2545], 2, 0, 0, 0 }, { "conditional_trap-7", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[1767], 1, 0, 0, 0 }, { "conditional_trap-6", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2547], 4, 0, 0, 0 }, { "conditional_trap-5", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2551], 4, 0, 0, 0 }, { "conditional_trap-4", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2555], 4, 0, 0, 0 }, { "conditional_trap-3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2559], 4, 0, 0, 0 }, { "conditional_trap-2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2563], 4, 0, 0, 0 }, { "conditional_trap-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2567], 4, 0, 0, 0 }, { "conditional_trap", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_conditional_trap, &operand_data[1318], 2, 1, 0, 0 }, { "conditional_trap+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2571], 2, 0, 0, 0 }, { "movti-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2573], 2, 0, 0, 0 }, { "movti", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_movti, &operand_data[2575], 2, 0, 0, 0 }, { "movtf", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_movtf, &operand_data[2577], 2, 0, 0, 0 }, { "movv2df", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_movv2df, &operand_data[2579], 2, 0, 0, 0 }, { "movv8hi", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_movv8hi, &operand_data[2581], 2, 0, 0, 0 }, { "movv16qi", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_movv16qi, &operand_data[2583], 2, 0, 0, 0 }, { "movv4sf", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_movv4sf, &operand_data[2585], 2, 0, 0, 0 }, { "movv4si", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_movv4si, &operand_data[2587], 2, 0, 0, 0 }, { "movv2di", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_movv2di, &operand_data[2589], 2, 0, 0, 0 }, { "movv2si", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_movv2si, &operand_data[2591], 2, 0, 0, 0 }, { "movv4hi", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_movv4hi, &operand_data[2593], 2, 0, 0, 0 }, { "movv8qi", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_movv8qi, &operand_data[2595], 2, 0, 0, 0 }, { "movv2sf", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_movv2sf, &operand_data[2597], 2, 0, 0, 0 }, { "movv2sf+1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2599], 2, 0, 0, 0 }, { "movv2sf+2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2599], 2, 0, 0, 0 }, { "sse_movaps-2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2601], 2, 0, 0, 0 }, { "sse_movaps-1", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif 0, &operand_data[2603], 2, 0, 0, 0 }, { "sse_movaps", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sse_movaps, &operand_data[2585], 2, 0, 0, 0 }, { "sse_movups", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sse_movups, &operand_data[2585], 2, 0, 0, 0 }, { "sse_loadss", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sse_loadss, &operand_data[2605], 2, 0, 0, 0 }, { "negv4sf2", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_negv4sf2, &operand_data[2607], 2, 1, 0, 0 }, { "sse_andv4sf3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sse_andv4sf3, &operand_data[2609], 3, 0, 0, 0 }, { "sse_nandv4sf3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sse_nandv4sf3, &operand_data[2609], 3, 0, 0, 0 }, { "sse_iorv4sf3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sse_iorv4sf3, &operand_data[2609], 3, 0, 0, 0 }, { "sse_xorv4sf3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sse_xorv4sf3, &operand_data[2609], 3, 0, 0, 0 }, { "sse2_andv2df3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sse2_andv2df3, &operand_data[2612], 3, 0, 0, 0 }, { "sse2_nandv2df3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sse2_nandv2df3, &operand_data[2612], 3, 0, 0, 0 }, { "sse2_iorv2df3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sse2_iorv2df3, &operand_data[2612], 3, 0, 0, 0 }, { "sse2_xorv2df3", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sse2_xorv2df3, &operand_data[2613], 3, 0, 0, 0 }, { "sfence", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sfence, &operand_data[0], 0, 2, 0, 0 }, { "sse_prologue_save", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sse_prologue_save, &operand_data[2616], 4, 0, 0, 0 }, { "prefetch", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_prefetch, &operand_data[2620], 3, 0, 0, 0 }, { "sse2_loadsd", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sse2_loadsd, &operand_data[2623], 2, 0, 0, 0 }, { "sse2_mfence", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sse2_mfence, &operand_data[0], 0, 2, 0, 0 }, { "sse2_lfence", #if HAVE_DESIGNATED_INITIALIZERS { 0 }, #else { 0, 0, 0 }, #endif (insn_gen_fn) gen_sse2_lfence, &operand_data[0], 0, 2, 0, 0 }, }; const char * get_insn_name (int code) { if (code == NOOP_MOVE_INSN_CODE) return "NOOP_MOVE"; else return insn_data[code].name; } /* Generated automatically by the program `genpeep' from the machine description file `md'. */ #ifdef HAVE_peephole extern rtx peep_operand[]; #define operands peep_operand rtx peephole (rtx ins1) { rtx insn ATTRIBUTE_UNUSED, x ATTRIBUTE_UNUSED, pat ATTRIBUTE_UNUSED; if (NEXT_INSN (ins1) && GET_CODE (NEXT_INSN (ins1)) == BARRIER) return 0; return 0; } rtx peep_operand[2]; #endif #undef operands /* Generated automatically by the program `genrecog' from the target machine description file. */ /* `recog' contains a decision tree that recognizes whether the rtx X0 is a valid instruction. recog returns -1 if the rtx is not valid. If the rtx is valid, recog returns a nonnegative number which is the insn code number for the pattern that matched. This is the same as the order in the machine description of the entry that matched. This number can be used as an index into `insn_data' and other tables. The third argument to recog is an optional pointer to an int. If present, recog will accept a pattern if it matches except for missing CLOBBER expressions at the end. In that case, the value pointed to by the optional pointer will be set to the number of CLOBBERs that need to be added (it should be initialized to zero by the caller). If it is set nonzero, the caller should allocate a PARALLEL of the appropriate size, copy the initial entries, and call add_clobbers (found in insn-emit.c) to fill in the CLOBBERs. The function split_insns returns 0 if the rtl could not be split or the split rtl as an INSN list if it can be. The function peephole2_insns returns 0 if the rtl could not be matched. If there was a match, the new rtl is returned in an INSN list, and LAST_INSN will point to the last recognized insn in the old sequence. */ extern rtx gen_split_1044 (rtx, rtx *); extern rtx gen_peephole2_1052 (rtx, rtx *); extern rtx gen_peephole2_1053 (rtx, rtx *); extern rtx gen_split_1054 (rtx, rtx *); extern rtx gen_split_1055 (rtx, rtx *); extern rtx gen_split_1056 (rtx, rtx *); extern rtx gen_peephole2_1057 (rtx, rtx *); extern rtx gen_peephole2_1058 (rtx, rtx *); extern rtx gen_split_1059 (rtx, rtx *); extern rtx gen_split_1061 (rtx, rtx *); extern rtx gen_split_1062 (rtx, rtx *); extern rtx gen_split_1063 (rtx, rtx *); extern rtx gen_split_1065 (rtx, rtx *); extern rtx gen_split_1066 (rtx, rtx *); extern rtx gen_split_1067 (rtx, rtx *); extern rtx gen_split_1068 (rtx, rtx *); extern rtx gen_split_1070 (rtx, rtx *); extern rtx gen_split_1071 (rtx, rtx *); extern rtx gen_split_1072 (rtx, rtx *); extern rtx gen_split_1073 (rtx, rtx *); extern rtx gen_split_1074 (rtx, rtx *); extern rtx gen_split_1076 (rtx, rtx *); extern rtx gen_split_1078 (rtx, rtx *); extern rtx gen_split_1079 (rtx, rtx *); extern rtx gen_split_1080 (rtx, rtx *); extern rtx gen_split_1082 (rtx, rtx *); extern rtx gen_split_1083 (rtx, rtx *); extern rtx gen_split_1084 (rtx, rtx *); extern rtx gen_split_1086 (rtx, rtx *); extern rtx gen_split_1087 (rtx, rtx *); extern rtx gen_split_1088 (rtx, rtx *); extern rtx gen_split_1090 (rtx, rtx *); extern rtx gen_split_1091 (rtx, rtx *); extern rtx gen_split_1092 (rtx, rtx *); extern rtx gen_split_1093 (rtx, rtx *); extern rtx gen_split_1094 (rtx, rtx *); extern rtx gen_split_1095 (rtx, rtx *); extern rtx gen_split_1096 (rtx, rtx *); extern rtx gen_split_1097 (rtx, rtx *); extern rtx gen_split_1098 (rtx, rtx *); extern rtx gen_split_1103 (rtx, rtx *); extern rtx gen_split_1104 (rtx, rtx *); extern rtx gen_split_1105 (rtx, rtx *); extern rtx gen_split_1106 (rtx, rtx *); extern rtx gen_split_1108 (rtx, rtx *); extern rtx gen_split_1109 (rtx, rtx *); extern rtx gen_split_1111 (rtx, rtx *); extern rtx gen_split_1112 (rtx, rtx *); extern rtx gen_split_1116 (rtx, rtx *); extern rtx gen_split_1117 (rtx, rtx *); extern rtx gen_split_1118 (rtx, rtx *); extern rtx gen_peephole2_1119 (rtx, rtx *); extern rtx gen_peephole2_1120 (rtx, rtx *); extern rtx gen_split_1124 (rtx, rtx *); extern rtx gen_peephole2_1125 (rtx, rtx *); extern rtx gen_peephole2_1126 (rtx, rtx *); extern rtx gen_split_1127 (rtx, rtx *); extern rtx gen_split_1128 (rtx, rtx *); extern rtx gen_split_1132 (rtx, rtx *); extern rtx gen_split_1133 (rtx, rtx *); extern rtx gen_split_1134 (rtx, rtx *); extern rtx gen_split_1137 (rtx, rtx *); extern rtx gen_split_1139 (rtx, rtx *); extern rtx gen_split_1143 (rtx, rtx *); extern rtx gen_split_1154 (rtx, rtx *); extern rtx gen_split_1156 (rtx, rtx *); extern rtx gen_split_1157 (rtx, rtx *); extern rtx gen_split_1158 (rtx, rtx *); extern rtx gen_split_1159 (rtx, rtx *); extern rtx gen_split_1160 (rtx, rtx *); extern rtx gen_split_1161 (rtx, rtx *); extern rtx gen_split_1162 (rtx, rtx *); extern rtx gen_split_1163 (rtx, rtx *); extern rtx gen_split_1164 (rtx, rtx *); extern rtx gen_split_1171 (rtx, rtx *); extern rtx gen_split_1199 (rtx, rtx *); extern rtx gen_split_1201 (rtx, rtx *); extern rtx gen_split_1202 (rtx, rtx *); extern rtx gen_split_1203 (rtx, rtx *); extern rtx gen_split_1208 (rtx, rtx *); extern rtx gen_split_1209 (rtx, rtx *); extern rtx gen_split_1210 (rtx, rtx *); extern rtx gen_split_1213 (rtx, rtx *); extern rtx gen_split_1214 (rtx, rtx *); extern rtx gen_split_1215 (rtx, rtx *); extern rtx gen_split_1218 (rtx, rtx *); extern rtx gen_split_1219 (rtx, rtx *); extern rtx gen_split_1224 (rtx, rtx *); extern rtx gen_split_1225 (rtx, rtx *); extern rtx gen_split_1231 (rtx, rtx *); extern rtx gen_split_1232 (rtx, rtx *); extern rtx gen_split_1234 (rtx, rtx *); extern rtx gen_split_1239 (rtx, rtx *); extern rtx gen_split_1240 (rtx, rtx *); extern rtx gen_split_1241 (rtx, rtx *); extern rtx gen_split_1242 (rtx, rtx *); extern rtx gen_split_1243 (rtx, rtx *); extern rtx gen_split_1244 (rtx, rtx *); extern rtx gen_split_1246 (rtx, rtx *); extern rtx gen_split_1247 (rtx, rtx *); extern rtx gen_split_1248 (rtx, rtx *); extern rtx gen_split_1249 (rtx, rtx *); extern rtx gen_split_1250 (rtx, rtx *); extern rtx gen_split_1251 (rtx, rtx *); extern rtx gen_split_1253 (rtx, rtx *); extern rtx gen_split_1254 (rtx, rtx *); extern rtx gen_split_1256 (rtx, rtx *); extern rtx gen_split_1257 (rtx, rtx *); extern rtx gen_split_1258 (rtx, rtx *); extern rtx gen_split_1259 (rtx, rtx *); extern rtx gen_split_1260 (rtx, rtx *); extern rtx gen_split_1261 (rtx, rtx *); extern rtx gen_split_1263 (rtx, rtx *); extern rtx gen_split_1264 (rtx, rtx *); extern rtx gen_split_1265 (rtx, rtx *); extern rtx gen_split_1266 (rtx, rtx *); extern rtx gen_split_1267 (rtx, rtx *); extern rtx gen_split_1269 (rtx, rtx *); extern rtx gen_split_1270 (rtx, rtx *); extern rtx gen_split_1272 (rtx, rtx *); extern rtx gen_split_1274 (rtx, rtx *); extern rtx gen_split_1275 (rtx, rtx *); extern rtx gen_split_1277 (rtx, rtx *); extern rtx gen_split_1279 (rtx, rtx *); extern rtx gen_split_1281 (rtx, rtx *); extern rtx gen_split_1282 (rtx, rtx *); extern rtx gen_split_1283 (rtx, rtx *); extern rtx gen_split_1287 (rtx, rtx *); extern rtx gen_split_1288 (rtx, rtx *); extern rtx gen_split_1289 (rtx, rtx *); extern rtx gen_split_1293 (rtx, rtx *); extern rtx gen_split_1294 (rtx, rtx *); extern rtx gen_split_1300 (rtx, rtx *); extern rtx gen_split_1301 (rtx, rtx *); extern rtx gen_split_1334 (rtx, rtx *); extern rtx gen_split_1335 (rtx, rtx *); extern rtx gen_split_1336 (rtx, rtx *); extern rtx gen_split_1337 (rtx, rtx *); extern rtx gen_split_1356 (rtx, rtx *); extern rtx gen_split_1357 (rtx, rtx *); extern rtx gen_split_1358 (rtx, rtx *); extern rtx gen_split_1359 (rtx, rtx *); extern rtx gen_split_1363 (rtx, rtx *); extern rtx gen_split_1364 (rtx, rtx *); extern rtx gen_peephole2_1365 (rtx, rtx *); extern rtx gen_peephole2_1366 (rtx, rtx *); extern rtx gen_split_1379 (rtx, rtx *); extern rtx gen_split_1380 (rtx, rtx *); extern rtx gen_split_1382 (rtx, rtx *); extern rtx gen_split_1383 (rtx, rtx *); extern rtx gen_split_1385 (rtx, rtx *); extern rtx gen_split_1392 (rtx, rtx *); extern rtx gen_split_1393 (rtx, rtx *); extern rtx gen_split_1394 (rtx, rtx *); extern rtx gen_split_1403 (rtx, rtx *); extern rtx gen_split_1404 (rtx, rtx *); extern rtx gen_split_1405 (rtx, rtx *); extern rtx gen_split_1406 (rtx, rtx *); extern rtx gen_split_1407 (rtx, rtx *); extern rtx gen_split_1408 (rtx, rtx *); extern rtx gen_split_1409 (rtx, rtx *); extern rtx gen_split_1410 (rtx, rtx *); extern rtx gen_peephole2_1411 (rtx, rtx *); extern rtx gen_peephole2_1413 (rtx, rtx *); extern rtx gen_peephole2_1415 (rtx, rtx *); extern rtx gen_peephole2_1474 (rtx, rtx *); extern rtx gen_peephole2_1475 (rtx, rtx *); extern rtx gen_split_1480 (rtx, rtx *); extern rtx gen_split_1483 (rtx, rtx *); extern rtx gen_split_1486 (rtx, rtx *); extern rtx gen_split_1491 (rtx, rtx *); extern rtx gen_split_1493 (rtx, rtx *); extern rtx gen_split_1494 (rtx, rtx *); extern rtx gen_split_1496 (rtx, rtx *); extern rtx gen_split_1497 (rtx, rtx *); extern rtx gen_split_1499 (rtx, rtx *); extern rtx gen_split_1500 (rtx, rtx *); extern rtx gen_split_1501 (rtx, rtx *); extern rtx gen_split_1502 (rtx, rtx *); extern rtx gen_split_1503 (rtx, rtx *); extern rtx gen_split_1504 (rtx, rtx *); extern rtx gen_split_1505 (rtx, rtx *); extern rtx gen_split_1511 (rtx, rtx *); extern rtx gen_split_1512 (rtx, rtx *); extern rtx gen_split_1513 (rtx, rtx *); extern rtx gen_split_1514 (rtx, rtx *); extern rtx gen_split_1515 (rtx, rtx *); extern rtx gen_split_1516 (rtx, rtx *); extern rtx gen_peephole2_1517 (rtx, rtx *); extern rtx gen_peephole2_1518 (rtx, rtx *); extern rtx gen_peephole2_1519 (rtx, rtx *); extern rtx gen_peephole2_1520 (rtx, rtx *); extern rtx gen_peephole2_1521 (rtx, rtx *); extern rtx gen_peephole2_1522 (rtx, rtx *); extern rtx gen_peephole2_1523 (rtx, rtx *); extern rtx gen_peephole2_1524 (rtx, rtx *); extern rtx gen_peephole2_1525 (rtx, rtx *); extern rtx gen_peephole2_1526 (rtx, rtx *); extern rtx gen_peephole2_1527 (rtx, rtx *); extern rtx gen_peephole2_1528 (rtx, rtx *); extern rtx gen_peephole2_1529 (rtx, rtx *); extern rtx gen_peephole2_1530 (rtx, rtx *); extern rtx gen_peephole2_1531 (rtx, rtx *); extern rtx gen_peephole2_1532 (rtx, rtx *); extern rtx gen_peephole2_1533 (rtx, rtx *); extern rtx gen_peephole2_1534 (rtx, rtx *); extern rtx gen_peephole2_1535 (rtx, rtx *); extern rtx gen_peephole2_1536 (rtx, rtx *); extern rtx gen_peephole2_1537 (rtx, rtx *); extern rtx gen_peephole2_1538 (rtx, rtx *); extern rtx gen_peephole2_1539 (rtx, rtx *); extern rtx gen_peephole2_1540 (rtx, rtx *); extern rtx gen_peephole2_1541 (rtx, rtx *); extern rtx gen_peephole2_1542 (rtx, rtx *); extern rtx gen_peephole2_1543 (rtx, rtx *); extern rtx gen_peephole2_1544 (rtx, rtx *); extern rtx gen_peephole2_1545 (rtx, rtx *); extern rtx gen_peephole2_1546 (rtx, rtx *); extern rtx gen_peephole2_1547 (rtx, rtx *); extern rtx gen_peephole2_1548 (rtx, rtx *); extern rtx gen_peephole2_1549 (rtx, rtx *); extern rtx gen_peephole2_1550 (rtx, rtx *); extern rtx gen_peephole2_1551 (rtx, rtx *); extern rtx gen_peephole2_1552 (rtx, rtx *); extern rtx gen_peephole2_1553 (rtx, rtx *); extern rtx gen_peephole2_1554 (rtx, rtx *); extern rtx gen_peephole2_1555 (rtx, rtx *); extern rtx gen_peephole2_1556 (rtx, rtx *); extern rtx gen_peephole2_1557 (rtx, rtx *); extern rtx gen_peephole2_1558 (rtx, rtx *); extern rtx gen_peephole2_1559 (rtx, rtx *); extern rtx gen_peephole2_1560 (rtx, rtx *); extern rtx gen_peephole2_1561 (rtx, rtx *); extern rtx gen_peephole2_1562 (rtx, rtx *); extern rtx gen_peephole2_1563 (rtx, rtx *); extern rtx gen_peephole2_1564 (rtx, rtx *); extern rtx gen_peephole2_1565 (rtx, rtx *); extern rtx gen_peephole2_1566 (rtx, rtx *); extern rtx gen_peephole2_1567 (rtx, rtx *); extern rtx gen_peephole2_1568 (rtx, rtx *); extern rtx gen_peephole2_1569 (rtx, rtx *); extern rtx gen_peephole2_1570 (rtx, rtx *); extern rtx gen_peephole2_1571 (rtx, rtx *); extern rtx gen_peephole2_1572 (rtx, rtx *); extern rtx gen_peephole2_1573 (rtx, rtx *); extern rtx gen_peephole2_1574 (rtx, rtx *); extern rtx gen_peephole2_1575 (rtx, rtx *); extern rtx gen_peephole2_1576 (rtx, rtx *); extern rtx gen_peephole2_1577 (rtx, rtx *); extern rtx gen_peephole2_1578 (rtx, rtx *); extern rtx gen_split_1580 (rtx, rtx *); extern rtx gen_split_1581 (rtx, rtx *); extern rtx gen_split_1594 (rtx, rtx *); extern rtx gen_split_1595 (rtx, rtx *); extern rtx gen_split_1596 (rtx, rtx *); extern rtx gen_split_1597 (rtx, rtx *); static int recog_1 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XEXP (x0, 0); switch (GET_CODE (x1)) { case MEM: goto L13230; case REG: goto L13231; default: break; } L13109: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, HImode)) { operands[0] = x1; goto L139; } L13119: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, HImode)) { operands[0] = x1; goto L1068; } L13120: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x1) == MEM) goto L345; if (register_operand (x1, HImode)) { operands[0] = x1; goto L350; } L13148: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x1, HImode)) { operands[0] = x1; goto L1095; } goto ret0; L13230: ATTRIBUTE_UNUSED_LABEL if (push_operand (x1, HImode)) { operands[0] = x1; goto L333; } goto L13119; L333: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (general_no_elim_operand (x1, HImode)) { operands[1] = x1; goto L334; } L337: ATTRIBUTE_UNUSED_LABEL if (nonmemory_no_elim_operand (x1, HImode)) { operands[1] = x1; goto L338; } x1 = XEXP (x0, 0); goto L13119; L334: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT)) { return 48; } x1 = XEXP (x0, 1); goto L337; L338: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT)) { return 49; } x1 = XEXP (x0, 0); goto L13119; L13231: ATTRIBUTE_UNUSED_LABEL if (XINT (x1, 0) == 18) goto L1099; goto L13109; L1099: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == HImode && GET_CODE (x1) == UNSPEC && XVECLEN (x1, 0) == 1 && XINT (x1, 1) == 28) goto L1100; x1 = XEXP (x0, 0); goto L13109; L1100: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (memory_operand (x2, HImode)) { operands[0] = x2; goto L1101; } x1 = XEXP (x0, 0); goto L13109; L1101: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387)) { return 161; } x1 = XEXP (x0, 0); goto L13109; L139: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == HImode && GET_CODE (x1) == UNSPEC && XVECLEN (x1, 0) == 1 && XINT (x1, 1) == 24) goto L140; x1 = XEXP (x0, 0); goto L13119; L140: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); switch (GET_MODE (x2)) { case CCFPmode: goto L13232; case CCFPUmode: goto L13233; default: break; } x1 = XEXP (x0, 0); goto L13119; L13232: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case COMPARE: goto L154; case REG: goto L13235; default: break; } x1 = XEXP (x0, 0); goto L13119; L154: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); switch (GET_MODE (x3)) { case SFmode: goto L13236; case DFmode: goto L13237; case XFmode: goto L13238; default: break; } L141: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, VOIDmode)) { operands[1] = x3; goto L142; } x1 = XEXP (x0, 0); goto L13119; L13236: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, SFmode)) { operands[1] = x3; goto L155; } goto L141; L155: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, SFmode)) { operands[2] = x3; goto L156; } x3 = XEXP (x2, 0); goto L141; L156: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387)) { return 20; } x1 = XEXP (x0, 1); x2 = XVECEXP (x1, 0, 0); x3 = XEXP (x2, 0); goto L141; L13237: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, DFmode)) { operands[1] = x3; goto L168; } goto L141; L168: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, DFmode)) { operands[2] = x3; goto L169; } x3 = XEXP (x2, 0); goto L141; L169: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387)) { return 22; } x1 = XEXP (x0, 1); x2 = XVECEXP (x1, 0, 0); x3 = XEXP (x2, 0); goto L141; L13238: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, XFmode)) { operands[1] = x3; goto L181; } goto L141; L181: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, XFmode)) { operands[2] = x3; goto L182; } x3 = XEXP (x2, 0); goto L141; L182: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387)) { return 24; } x1 = XEXP (x0, 1); x2 = XVECEXP (x1, 0, 0); x3 = XEXP (x2, 0); goto L141; L142: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const0_operand (x3, VOIDmode)) { operands[2] = x3; goto L143; } x1 = XEXP (x0, 0); goto L13119; L143: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && FLOAT_MODE_P (GET_MODE (operands[1])) && GET_MODE (operands[1]) == GET_MODE (operands[2]))) { return 18; } x1 = XEXP (x0, 0); goto L13119; L13235: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 0) == 18 && (TARGET_80387)) { return 28; } x1 = XEXP (x0, 0); goto L13119; L13233: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == COMPARE) goto L193; x1 = XEXP (x0, 0); goto L13119; L193: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, VOIDmode)) { operands[1] = x3; goto L194; } x1 = XEXP (x0, 0); goto L13119; L194: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, VOIDmode)) { operands[2] = x3; goto L195; } x1 = XEXP (x0, 0); goto L13119; L195: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && FLOAT_MODE_P (GET_MODE (operands[1])) && GET_MODE (operands[1]) == GET_MODE (operands[2]))) { return 26; } x1 = XEXP (x0, 0); goto L13119; L1068: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == HImode) goto L13239; L341: ATTRIBUTE_UNUSED_LABEL if (general_operand (x1, HImode)) { operands[1] = x1; goto L342; } x1 = XEXP (x0, 0); goto L13120; L13239: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case FIX: goto L1069; case PLUS: goto L1251; case MINUS: goto L1969; case AND: goto L3041; case IOR: goto L3410; case XOR: goto L3793; case NEG: goto L4165; case NOT: goto L4566; case ASHIFT: goto L4745; case ASHIFTRT: goto L5094; case LSHIFTRT: goto L5432; case ROTATE: goto L5660; case ROTATERT: goto L5836; default: break; } goto L341; L1069: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, VOIDmode)) { operands[1] = x2; goto L1070; } goto L341; L1070: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && FLOAT_MODE_P (GET_MODE (operands[1])) && !reload_completed && !reload_in_progress && !SSE_FLOAT_MODE_P (GET_MODE (operands[1]))) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 157; } x1 = XEXP (x0, 1); goto L341; L1251: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == HImode) goto L13252; goto L341; L13252: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == PLUS) goto L1252; if (nonimmediate_operand (x2, HImode)) { operands[1] = x2; goto L1599; } goto L341; L1252: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (ix86_carry_flag_operator (x3, HImode)) { operands[3] = x3; goto L1253; } goto L341; L1253: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, HImode)) { operands[1] = x3; goto L1254; } goto L341; L1254: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (general_operand (x2, HImode)) { operands[2] = x2; goto L1255; } goto L341; L1255: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (PLUS, HImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 181; } x1 = XEXP (x0, 1); goto L341; L1599: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (general_operand (x2, HImode)) { operands[2] = x2; goto L1600; } goto L341; L1600: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_PARTIAL_REG_STALL && ix86_binary_operator_ok (PLUS, HImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 209; } L1614: ATTRIBUTE_UNUSED_LABEL if ((TARGET_PARTIAL_REG_STALL && ix86_binary_operator_ok (PLUS, HImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 210; } x1 = XEXP (x0, 1); goto L341; L1969: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, HImode)) { operands[1] = x2; goto L1970; } goto L341; L1970: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == PLUS) goto L1971; if (general_operand (x2, HImode)) { operands[2] = x2; goto L2105; } goto L341; L1971: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (ix86_carry_flag_operator (x3, HImode)) { operands[3] = x3; goto L1972; } goto L341; L1972: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, HImode)) { operands[2] = x3; goto L1973; } goto L341; L1973: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (MINUS, HImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 231; } x1 = XEXP (x0, 1); goto L341; L2105: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (MINUS, HImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 240; } x1 = XEXP (x0, 1); goto L341; L3041: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, HImode)) { operands[1] = x2; goto L3042; } goto L341; L3042: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (general_operand (x2, HImode)) { operands[2] = x2; goto L3043; } goto L341; L3043: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (AND, HImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 294; } x1 = XEXP (x0, 1); goto L341; L3410: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, HImode)) { operands[1] = x2; goto L3411; } goto L341; L3411: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (general_operand (x2, HImode)) { operands[2] = x2; goto L3412; } goto L341; L3412: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (IOR, HImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 315; } x1 = XEXP (x0, 1); goto L341; L3793: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, HImode)) { operands[1] = x2; goto L3794; } goto L341; L3794: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (general_operand (x2, HImode)) { operands[2] = x2; goto L3795; } goto L341; L3795: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (XOR, HImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 337; } x1 = XEXP (x0, 1); goto L341; L4165: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, HImode)) { operands[1] = x2; goto L4166; } goto L341; L4166: ATTRIBUTE_UNUSED_LABEL if ((ix86_unary_operator_ok (NEG, HImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 358; } x1 = XEXP (x0, 1); goto L341; L4566: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, HImode)) { operands[1] = x2; goto L4567; } goto L341; L4567: ATTRIBUTE_UNUSED_LABEL if ((ix86_unary_operator_ok (NOT, HImode, operands))) { return 398; } x1 = XEXP (x0, 1); goto L341; L4745: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, HImode)) { operands[1] = x2; goto L4746; } goto L341; L4746: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonmemory_operand (x2, QImode)) { operands[2] = x2; goto L4747; } goto L341; L4747: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_PARTIAL_REG_STALL && ix86_binary_operator_ok (ASHIFT, HImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 411; } L4761: ATTRIBUTE_UNUSED_LABEL if ((TARGET_PARTIAL_REG_STALL && ix86_binary_operator_ok (ASHIFT, HImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 412; } x1 = XEXP (x0, 1); goto L341; L5094: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, HImode)) { operands[1] = x2; goto L5095; } goto L341; L5095: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const1_operand (x2, QImode)) { operands[2] = x2; goto L5096; } L5109: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x2, QImode)) { operands[2] = x2; goto L5110; } goto L341; L5096: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (ASHIFTRT, HImode, operands) && (TARGET_SHIFT1 || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 435; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L5109; L5110: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (ASHIFTRT, HImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 436; } x1 = XEXP (x0, 1); goto L341; L5432: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, HImode)) { operands[1] = x2; goto L5433; } goto L341; L5433: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const1_operand (x2, QImode)) { operands[2] = x2; goto L5434; } L5447: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x2, QImode)) { operands[2] = x2; goto L5448; } goto L341; L5434: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (LSHIFTRT, HImode, operands) && (TARGET_SHIFT1 || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 459; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L5447; L5448: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (LSHIFTRT, HImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 460; } x1 = XEXP (x0, 1); goto L341; L5660: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, HImode)) { operands[1] = x2; goto L5661; } goto L341; L5661: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const1_operand (x2, QImode)) { operands[2] = x2; goto L5662; } L5675: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x2, QImode)) { operands[2] = x2; goto L5676; } goto L341; L5662: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (ROTATE, HImode, operands) && (TARGET_SHIFT1 || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 475; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L5675; L5676: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (ROTATE, HImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 476; } x1 = XEXP (x0, 1); goto L341; L5836: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, HImode)) { operands[1] = x2; goto L5837; } goto L341; L5837: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const1_operand (x2, QImode)) { operands[2] = x2; goto L5838; } L5851: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x2, QImode)) { operands[2] = x2; goto L5852; } goto L341; L5838: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (ROTATERT, HImode, operands) && (TARGET_SHIFT1 || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 487; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L5851; L5852: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (ROTATERT, HImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 488; } x1 = XEXP (x0, 1); goto L341; L342: ATTRIBUTE_UNUSED_LABEL if ((GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)) { return 50; } x1 = XEXP (x0, 0); goto L13120; L345: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (x86_64_movabs_operand (x2, DImode)) { operands[0] = x2; goto L346; } goto L13148; L346: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (nonmemory_operand (x1, HImode)) { operands[1] = x1; goto L347; } x1 = XEXP (x0, 0); goto L13148; L347: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_check_movabs (insn, 0))) { return 51; } x1 = XEXP (x0, 0); goto L13148; L350: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == HImode) goto L13254; x1 = XEXP (x0, 0); goto L13148; L13254: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case MEM: goto L351; case SIGN_EXTRACT: goto L429; case ZERO_EXTEND: goto L679; case SIGN_EXTEND: goto L814; case MULT: goto L2239; case IF_THEN_ELSE: goto L7769; default: break; } x1 = XEXP (x0, 0); goto L13148; L351: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (x86_64_movabs_operand (x2, DImode)) { operands[1] = x2; goto L352; } x1 = XEXP (x0, 0); goto L13148; L352: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_check_movabs (insn, 1))) { return 52; } x1 = XEXP (x0, 0); goto L13148; L429: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (ext_register_operand (x2, VOIDmode)) { operands[1] = x2; goto L430; } x1 = XEXP (x0, 0); goto L13148; L430: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L431; x1 = XEXP (x0, 0); goto L13148; L431: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) { return 64; } x1 = XEXP (x0, 0); goto L13148; L679: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, QImode)) { operands[1] = x2; goto L680; } x1 = XEXP (x0, 0); goto L13148; L680: ATTRIBUTE_UNUSED_LABEL if ((TARGET_ZERO_EXTEND_WITH_AND && !optimize_size) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 105; } L692: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_ZERO_EXTEND_WITH_AND || optimize_size) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 106; } L697: ATTRIBUTE_UNUSED_LABEL if (((!TARGET_ZERO_EXTEND_WITH_AND || optimize_size) && reload_completed)) { return 107; } x1 = XEXP (x0, 0); goto L13148; L814: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, QImode)) { operands[1] = x2; return 123; } x1 = XEXP (x0, 0); goto L13148; L2239: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == HImode) goto L13261; x1 = XEXP (x0, 0); goto L13148; L13261: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case ZERO_EXTEND: goto L2270; case SIGN_EXTEND: goto L2288; case SUBREG: case REG: case MEM: goto L13260; default: x1 = XEXP (x0, 0); goto L13148; } L13260: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, HImode)) { operands[1] = x2; goto L2240; } x1 = XEXP (x0, 0); goto L13148; L2270: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, QImode)) { operands[1] = x3; goto L2271; } x1 = XEXP (x0, 0); goto L13148; L2271: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == ZERO_EXTEND) goto L2272; x1 = XEXP (x0, 0); goto L13148; L2272: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, QImode)) { operands[2] = x3; goto L2273; } x1 = XEXP (x0, 0); goto L13148; L2273: ATTRIBUTE_UNUSED_LABEL if ((TARGET_QIMODE_MATH && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 252; } x1 = XEXP (x0, 0); goto L13148; L2288: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, QImode)) { operands[1] = x3; goto L2289; } x1 = XEXP (x0, 0); goto L13148; L2289: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == SIGN_EXTEND) goto L2290; x1 = XEXP (x0, 0); goto L13148; L2290: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, QImode)) { operands[2] = x3; goto L2291; } x1 = XEXP (x0, 0); goto L13148; L2291: ATTRIBUTE_UNUSED_LABEL if ((TARGET_QIMODE_MATH && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 253; } x1 = XEXP (x0, 0); goto L13148; L2240: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (general_operand (x2, HImode)) { operands[2] = x2; goto L2241; } x1 = XEXP (x0, 0); goto L13148; L2241: ATTRIBUTE_UNUSED_LABEL if ((GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 250; } x1 = XEXP (x0, 0); goto L13148; L7769: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (ix86_comparison_operator (x2, VOIDmode)) { operands[1] = x2; goto L7770; } x1 = XEXP (x0, 0); goto L13148; L7770: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == REG && XINT (x3, 0) == 17) goto L7771; x1 = XEXP (x0, 0); goto L13148; L7771: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L7772; x1 = XEXP (x0, 0); goto L13148; L7772: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, HImode)) { operands[2] = x2; goto L7773; } x1 = XEXP (x0, 0); goto L13148; L7773: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (nonimmediate_operand (x2, HImode)) { operands[3] = x2; goto L7774; } x1 = XEXP (x0, 0); goto L13148; L7774: ATTRIBUTE_UNUSED_LABEL if ((TARGET_CMOVE && (GET_CODE (operands[2]) != MEM || GET_CODE (operands[3]) != MEM))) { return 652; } x1 = XEXP (x0, 0); goto L13148; L1095: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == HImode && GET_CODE (x1) == UNSPEC && XVECLEN (x1, 0) == 1 && XINT (x1, 1) == 26) goto L1096; goto ret0; L1096: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (GET_MODE (x2) == HImode && GET_CODE (x2) == REG && XINT (x2, 0) == 18 && (TARGET_80387)) { return 160; } goto ret0; ret0: return -1; } static int recog_2 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XEXP (x0, 1); if (GET_MODE (x1) == SImode) goto L13280; L1318: ATTRIBUTE_UNUSED_LABEL if (no_seg_address_operand (x1, SImode)) { operands[1] = x1; goto L1319; } L1322: ATTRIBUTE_UNUSED_LABEL if (GET_MODE (x1) == SImode) goto L13295; goto ret0; L13280: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case MEM: goto L322; case SIGN_EXTRACT: goto L423; case ZERO_EXTRACT: goto L457; case ZERO_EXTEND: goto L662; case SIGN_EXTEND: goto L804; case FIX: goto L1052; case TRUNCATE: goto L2407; case UNSPEC: goto L13298; case FFS: goto L6393; case CTZ: goto L6453; case MINUS: goto L6479; case IF_THEN_ELSE: goto L7754; default: break; } goto L1318; L322: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (x86_64_movabs_operand (x2, DImode)) { operands[1] = x2; goto L323; } goto L1318; L323: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_check_movabs (insn, 1))) { return 46; } x1 = XEXP (x0, 1); goto L1318; L423: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (ext_register_operand (x2, VOIDmode)) { operands[1] = x2; goto L424; } goto L1318; L424: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L425; goto L1318; L425: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) { return 63; } goto L1318; L457: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (ext_register_operand (x2, VOIDmode)) { operands[1] = x2; goto L458; } goto L1318; L458: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L459; goto L1318; L459: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) { return 69; } goto L1318; L662: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case HImode: goto L13302; case QImode: goto L13304; default: break; } goto L1318; L13302: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, HImode)) { operands[1] = x2; goto L663; } L13303: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, HImode)) { operands[1] = x2; goto L668; } goto L1318; L663: ATTRIBUTE_UNUSED_LABEL if ((TARGET_ZERO_EXTEND_WITH_AND && !optimize_size) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 103; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); goto L13303; L668: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_ZERO_EXTEND_WITH_AND || optimize_size)) { return 104; } x1 = XEXP (x0, 1); goto L1318; L13304: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, QImode)) { operands[1] = x2; goto L709; } goto L1318; L709: ATTRIBUTE_UNUSED_LABEL if ((TARGET_ZERO_EXTEND_WITH_AND && !optimize_size) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 108; } L721: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_ZERO_EXTEND_WITH_AND || optimize_size) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 109; } L726: ATTRIBUTE_UNUSED_LABEL if (((!TARGET_ZERO_EXTEND_WITH_AND || optimize_size) && reload_completed)) { return 110; } x1 = XEXP (x0, 1); goto L1318; L804: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case HImode: goto L13305; case QImode: goto L13306; default: break; } goto L1318; L13305: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, HImode)) { operands[1] = x2; return 121; } goto L1318; L13306: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, QImode)) { operands[1] = x2; return 124; } goto L1318; L1052: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case SFmode: goto L13307; case DFmode: goto L13308; default: break; } goto L1318; L13307: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, SFmode)) { operands[1] = x2; goto L1053; } goto L1318; L1053: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE)) { return 155; } x1 = XEXP (x0, 1); goto L1318; L13308: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, DFmode)) { operands[1] = x2; goto L1058; } goto L1318; L1058: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 156; } x1 = XEXP (x0, 1); goto L1318; L2407: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode && GET_CODE (x2) == LSHIFTRT) goto L2408; goto L1318; L2408: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode && GET_CODE (x3) == MULT) goto L2409; goto L1318; L2409: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == DImode) goto L13309; goto L1318; L13309: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x4)) { case ZERO_EXTEND: goto L2410; case SIGN_EXTEND: goto L2487; default: break; } goto L1318; L2410: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (nonimmediate_operand (x5, SImode)) { operands[1] = x5; goto L2411; } goto L1318; L2411: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_MODE (x4) == DImode && GET_CODE (x4) == ZERO_EXTEND) goto L2412; goto L1318; L2412: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (nonimmediate_operand (x5, SImode)) { operands[2] = x5; goto L2413; } goto L1318; L2413: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (32)] && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM) && pnum_clobbers != NULL) { *pnum_clobbers = 2; return 259; } goto L1318; L2487: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (nonimmediate_operand (x5, SImode)) { operands[1] = x5; goto L2488; } goto L1318; L2488: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_MODE (x4) == DImode && GET_CODE (x4) == SIGN_EXTEND) goto L2489; goto L1318; L2489: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (nonimmediate_operand (x5, SImode)) { operands[2] = x5; goto L2490; } goto L1318; L2490: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (32)] && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM) && pnum_clobbers != NULL) { *pnum_clobbers = 2; return 262; } goto L1318; L13298: ATTRIBUTE_UNUSED_LABEL switch (XVECLEN (x1, 0)) { case 1: goto L13311; case 3: goto L13312; case 2: goto L13313; default: break; } goto L1318; L13311: ATTRIBUTE_UNUSED_LABEL switch (XINT (x1, 1)) { case 12L: goto L6344; case 15L: goto L6623; default: break; } goto L1318; L6344: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (!TARGET_64BIT) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 530; } goto L1318; L6623: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (!TARGET_64BIT)) { return 551; } goto L1318; L13312: ATTRIBUTE_UNUSED_LABEL if (XINT (x1, 1) == 16) goto L6515; goto L1318; L6515: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, SImode)) { operands[1] = x2; goto L6516; } goto L1318; L6516: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 1); if (tls_symbolic_operand (x2, SImode)) { operands[2] = x2; goto L6517; } goto L1318; L6517: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 2); if (call_insn_operand (x2, SImode)) { operands[3] = x2; goto L6518; } goto L1318; L6518: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && TARGET_GNU_TLS) && pnum_clobbers != NULL) { *pnum_clobbers = 3; return 544; } L6538: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && TARGET_SUN_TLS) && pnum_clobbers != NULL) { *pnum_clobbers = 3; return 545; } x1 = XEXP (x0, 1); goto L1318; L13313: ATTRIBUTE_UNUSED_LABEL if (XINT (x1, 1) == 17) goto L6564; goto L1318; L6564: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, SImode)) { operands[1] = x2; goto L6565; } goto L1318; L6565: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 1); if (call_insn_operand (x2, SImode)) { operands[2] = x2; goto L6566; } goto L1318; L6566: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && TARGET_GNU_TLS) && pnum_clobbers != NULL) { *pnum_clobbers = 3; return 547; } L6584: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && TARGET_SUN_TLS) && pnum_clobbers != NULL) { *pnum_clobbers = 3; return 548; } x1 = XEXP (x0, 1); goto L1318; L6393: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SImode)) { operands[1] = x2; goto L6394; } goto L1318; L6394: ATTRIBUTE_UNUSED_LABEL if ((TARGET_CMOVE) && pnum_clobbers != NULL) { *pnum_clobbers = 2; return 535; } x1 = XEXP (x0, 1); goto L1318; L6453: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SImode)) { operands[1] = x2; goto L6454; } goto L1318; L6454: ATTRIBUTE_UNUSED_LABEL if (pnum_clobbers != NULL) { *pnum_clobbers = 1; return 540; } x1 = XEXP (x0, 1); goto L1318; L6479: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (31)]) goto L6480; goto L1318; L6480: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == CLZ) goto L6481; goto L1318; L6481: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L6482; } goto L1318; L6482: ATTRIBUTE_UNUSED_LABEL if (pnum_clobbers != NULL) { *pnum_clobbers = 1; return 542; } x1 = XEXP (x0, 1); goto L1318; L7754: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (ix86_carry_flag_operator (x2, VOIDmode)) { operands[1] = x2; goto L7755; } L7760: ATTRIBUTE_UNUSED_LABEL if (ix86_comparison_operator (x2, VOIDmode)) { operands[1] = x2; goto L7761; } goto L1318; L7755: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (-1)]) goto L7756; x2 = XEXP (x1, 0); goto L7760; L7756: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 650; } x2 = XEXP (x1, 0); goto L7760; L7761: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == REG && XINT (x3, 0) == 17) goto L7762; goto L1318; L7762: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L7763; goto L1318; L7763: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, SImode)) { operands[2] = x2; goto L7764; } goto L1318; L7764: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (nonimmediate_operand (x2, SImode)) { operands[3] = x2; goto L7765; } goto L1318; L7765: ATTRIBUTE_UNUSED_LABEL if ((TARGET_CMOVE && (GET_CODE (operands[2]) != MEM || GET_CODE (operands[3]) != MEM))) { return 651; } x1 = XEXP (x0, 1); goto L1318; L1319: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT)) { return 186; } x1 = XEXP (x0, 1); goto L1322; L13295: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case SUBREG: goto L13315; case MULT: goto L2209; case PLUS: goto L6613; default: break; } goto ret0; L13315: ATTRIBUTE_UNUSED_LABEL if (XINT (x1, 1) == 0) goto L1323; goto ret0; L1323: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (no_seg_address_operand (x2, DImode)) { operands[1] = x2; goto L1324; } goto ret0; L1324: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT)) { return 187; } goto ret0; L2209: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SImode)) { operands[1] = x2; goto L2210; } goto ret0; L2210: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (general_operand (x2, SImode)) { operands[2] = x2; goto L2211; } goto ret0; L2211: ATTRIBUTE_UNUSED_LABEL if ((GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 248; } goto ret0; L6613: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode) goto L13316; goto ret0; L13316: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == UNSPEC) goto L13318; goto ret0; L13318: ATTRIBUTE_UNUSED_LABEL switch (XVECLEN (x2, 0)) { case 2: goto L13320; case 1: goto L13321; default: break; } goto ret0; L13320: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 1) == 17) goto L6614; goto ret0; L6614: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (register_operand (x3, SImode)) { operands[1] = x3; goto L6615; } goto ret0; L6615: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 1); if (call_insn_operand (x3, SImode)) { operands[2] = x3; goto L6616; } goto ret0; L6616: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == CONST) goto L6617; goto ret0; L6617: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == UNSPEC && XVECLEN (x3, 0) == 1 && XINT (x3, 1) == 6) goto L6618; goto ret0; L6618: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (tls_symbolic_operand (x4, SImode)) { operands[3] = x4; goto L6619; } goto ret0; L6619: ATTRIBUTE_UNUSED_LABEL if (pnum_clobbers != NULL) { *pnum_clobbers = 3; return 550; } goto ret0; L13321: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 1) == 15) goto L6637; goto ret0; L6637: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L6638; goto ret0; L6638: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, SImode)) { operands[1] = x2; goto L6639; } goto ret0; L6639: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 552; } goto ret0; ret0: return -1; } static int recog_3 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XEXP (x0, 0); switch (GET_CODE (x1)) { case MEM: goto L13266; case ZERO_EXTRACT: goto L476; case SUBREG: case REG: goto L13114; default: goto L13115; } L13114: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, SImode)) { operands[0] = x1; goto L294; } L13115: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, SImode)) { operands[0] = x1; goto L1024; } L13116: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x1) == MEM) goto L316; if (register_operand (x1, SImode)) { operands[0] = x1; goto L321; } L13159: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x1) == REG && XINT (x1, 0) == 19) goto L7127; L13152: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, SImode)) { operands[0] = x1; goto L1477; } goto ret0; L13266: ATTRIBUTE_UNUSED_LABEL if (push_operand (x1, SImode)) { operands[0] = x1; goto L250; } goto L13115; L250: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (general_no_elim_operand (x1, SImode)) { operands[1] = x1; goto L251; } L254: ATTRIBUTE_UNUSED_LABEL if (nonmemory_no_elim_operand (x1, SImode)) { operands[1] = x1; goto L255; } x1 = XEXP (x0, 0); goto L13115; L251: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT)) { return 36; } x1 = XEXP (x0, 1); goto L254; L255: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT)) { return 37; } x1 = XEXP (x0, 0); goto L13115; L476: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (ext_register_operand (x2, VOIDmode)) { operands[0] = x2; goto L477; } goto ret0; L477: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L478; goto ret0; L478: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L493; goto ret0; L493: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == SImode) goto L13267; L479: ATTRIBUTE_UNUSED_LABEL if (general_operand (x1, SImode)) { operands[1] = x1; goto L480; } goto ret0; L13267: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case LSHIFTRT: goto L494; case PLUS: goto L1805; case AND: goto L3134; case IOR: goto L3539; case XOR: goto L3877; default: break; } goto L479; L494: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[1] = x2; goto L495; } goto L479; L495: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) { return 74; } goto L479; L1805: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == ZERO_EXTRACT) goto L1806; goto L479; L1806: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (ext_register_operand (x3, VOIDmode)) { operands[1] = x3; goto L1807; } goto L479; L1807: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L1808; goto L479; L1808: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L1864; goto L479; L1864: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == ZERO_EXTRACT) goto L1865; if (general_operand (x2, QImode)) { operands[2] = x2; goto L1810; } L1835: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x2, QImode)) { operands[2] = x2; goto L1836; } goto L479; L1865: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (ext_register_operand (x3, VOIDmode)) { operands[2] = x3; goto L1866; } goto L479; L1866: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L1867; goto L479; L1867: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)] && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 224; } goto L479; L1810: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 222; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L1835; L1836: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 223; } x1 = XEXP (x0, 1); goto L479; L3134: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == ZERO_EXTRACT) goto L3135; goto L479; L3135: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (ext_register_operand (x3, VOIDmode)) { operands[1] = x3; goto L3136; } goto L479; L3136: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L3137; goto L479; L3137: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L3187; goto L479; L3187: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode) goto L13272; L3138: ATTRIBUTE_UNUSED_LABEL if (const_int_operand (x2, VOIDmode)) { operands[2] = x2; goto L3139; } goto L479; L13272: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case ZERO_EXTEND: goto L3188; case ZERO_EXTRACT: goto L3246; default: break; } goto L3138; L3188: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (general_operand (x3, QImode)) { operands[2] = x3; goto L3189; } L3216: ATTRIBUTE_UNUSED_LABEL if (ext_register_operand (x3, VOIDmode)) { operands[2] = x3; goto L3217; } goto L3138; L3189: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 302; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L3216; L3217: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 303; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L3138; L3246: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (ext_register_operand (x3, VOIDmode)) { operands[2] = x3; goto L3247; } goto L3138; L3247: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L3248; goto L3138; L3248: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)] && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 304; } goto L3138; L3139: ATTRIBUTE_UNUSED_LABEL if (pnum_clobbers != NULL) { *pnum_clobbers = 1; return 300; } x1 = XEXP (x0, 1); goto L479; L3539: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == ZERO_EXTRACT) goto L3540; goto L479; L3540: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (ext_register_operand (x3, VOIDmode)) { operands[1] = x3; goto L3541; } goto L479; L3541: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L3542; goto L479; L3542: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L3570; goto L479; L3570: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode) goto L13274; L3543: ATTRIBUTE_UNUSED_LABEL if (const_int_operand (x2, VOIDmode)) { operands[2] = x2; goto L3544; } goto L479; L13274: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case ZERO_EXTEND: goto L3571; case ZERO_EXTRACT: goto L3629; default: break; } goto L3543; L3571: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (general_operand (x3, QImode)) { operands[2] = x3; goto L3572; } L3599: ATTRIBUTE_UNUSED_LABEL if (ext_register_operand (x3, VOIDmode)) { operands[2] = x3; goto L3600; } goto L3543; L3572: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && (!TARGET_PARTIAL_REG_STALL || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 324; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L3599; L3600: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && (!TARGET_PARTIAL_REG_STALL || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 325; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L3543; L3629: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (ext_register_operand (x3, VOIDmode)) { operands[2] = x3; goto L3630; } goto L3543; L3630: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L3631; goto L3543; L3631: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)] && ((!TARGET_PARTIAL_REG_STALL || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 326; } goto L3543; L3544: ATTRIBUTE_UNUSED_LABEL if (((!TARGET_PARTIAL_REG_STALL || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 323; } x1 = XEXP (x0, 1); goto L479; L3877: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == ZERO_EXTRACT) goto L3878; goto L479; L3878: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (ext_register_operand (x3, VOIDmode)) { operands[1] = x3; goto L3879; } goto L479; L3879: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L3880; goto L479; L3880: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L3908; goto L479; L3908: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode) goto L13276; L3881: ATTRIBUTE_UNUSED_LABEL if (const_int_operand (x2, VOIDmode)) { operands[2] = x2; goto L3882; } goto L479; L13276: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case ZERO_EXTEND: goto L3909; case ZERO_EXTRACT: goto L3967; default: break; } goto L3881; L3909: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (general_operand (x3, QImode)) { operands[2] = x3; goto L3910; } L3937: ATTRIBUTE_UNUSED_LABEL if (ext_register_operand (x3, VOIDmode)) { operands[2] = x3; goto L3938; } goto L3881; L3910: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && (!TARGET_PARTIAL_REG_STALL || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 343; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L3937; L3938: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && (!TARGET_PARTIAL_REG_STALL || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 344; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L3881; L3967: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (ext_register_operand (x3, VOIDmode)) { operands[2] = x3; goto L3968; } goto L3881; L3968: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L3969; goto L3881; L3969: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)] && ((!TARGET_PARTIAL_REG_STALL || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 345; } goto L3881; L3882: ATTRIBUTE_UNUSED_LABEL if (((!TARGET_PARTIAL_REG_STALL || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 342; } x1 = XEXP (x0, 1); goto L479; L480: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT)) { return 72; } goto ret0; L294: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (const0_operand (x1, SImode)) { operands[1] = x1; goto L295; } L304: ATTRIBUTE_UNUSED_LABEL if (immediate_operand (x1, SImode)) { operands[1] = x1; goto L305; } x1 = XEXP (x0, 0); goto L13115; L295: ATTRIBUTE_UNUSED_LABEL if ((reload_completed && (!TARGET_USE_MOV0 || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 41; } x1 = XEXP (x0, 1); goto L304; L305: ATTRIBUTE_UNUSED_LABEL if ((reload_completed && operands[1] == constm1_rtx && (TARGET_PENTIUM || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 42; } x1 = XEXP (x0, 0); goto L13115; L1024: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == SImode) goto L13278; L308: ATTRIBUTE_UNUSED_LABEL if (general_operand (x1, SImode)) { operands[1] = x1; goto L309; } x1 = XEXP (x0, 0); goto L13116; L13278: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case FIX: goto L1025; case PLUS: goto L1269; default: break; } goto L308; L1025: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, VOIDmode)) { operands[1] = x2; goto L1026; } goto L308; L1026: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && FLOAT_MODE_P (GET_MODE (operands[1])) && !reload_completed && !reload_in_progress && !SSE_FLOAT_MODE_P (GET_MODE (operands[1]))) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 152; } x1 = XEXP (x0, 1); goto L308; L1269: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS) goto L1270; goto L308; L1270: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (ix86_carry_flag_operator (x3, SImode)) { operands[3] = x3; goto L1271; } goto L308; L1271: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L1272; } goto L308; L1272: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (general_operand (x2, SImode)) { operands[2] = x2; goto L1273; } goto L308; L1273: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (PLUS, SImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 182; } x1 = XEXP (x0, 1); goto L308; L309: ATTRIBUTE_UNUSED_LABEL if (((TARGET_INTER_UNIT_MOVES || optimize_size) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 43; } L313: ATTRIBUTE_UNUSED_LABEL if (((!TARGET_INTER_UNIT_MOVES && !optimize_size) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 44; } x1 = XEXP (x0, 0); goto L13116; L316: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (x86_64_movabs_operand (x2, DImode)) { operands[0] = x2; goto L317; } goto L13152; L317: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (nonmemory_operand (x1, SImode)) { operands[1] = x1; goto L318; } x1 = XEXP (x0, 0); goto L13152; L318: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_check_movabs (insn, 0))) { return 45; } x1 = XEXP (x0, 0); goto L13152; L321: ATTRIBUTE_UNUSED_LABEL tem = recog_2 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; x1 = XEXP (x0, 0); goto L13159; L7127: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (x1 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) { return 617; } x1 = XEXP (x0, 0); goto L13152; L1477: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == SImode) goto L13322; goto ret0; L13322: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case PLUS: goto L1478; case MINUS: goto L1987; case AND: goto L2984; case IOR: goto L3305; case XOR: goto L3688; case NEG: goto L4104; case NOT: goto L4532; case ASHIFT: goto L4688; case ASHIFTRT: goto L4950; case LSHIFTRT: goto L5318; case ROTATE: goto L5600; case ROTATERT: goto L5776; case FFS: goto L6407; default: break; } goto ret0; L1478: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SImode)) { operands[1] = x2; goto L1479; } goto ret0; L1479: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (general_operand (x2, SImode)) { operands[2] = x2; goto L1480; } goto ret0; L1480: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (PLUS, SImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 201; } goto ret0; L1987: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SImode)) { operands[1] = x2; goto L1988; } goto ret0; L1988: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS) goto L1989; if (general_operand (x2, SImode)) { operands[2] = x2; goto L2025; } goto ret0; L1989: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (ix86_carry_flag_operator (x3, SImode)) { operands[3] = x3; goto L1990; } goto ret0; L1990: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, SImode)) { operands[2] = x3; goto L1991; } goto ret0; L1991: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (MINUS, SImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 232; } goto ret0; L2025: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (MINUS, SImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 234; } goto ret0; L2984: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SImode)) { operands[1] = x2; goto L2985; } goto ret0; L2985: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (general_operand (x2, SImode)) { operands[2] = x2; goto L2986; } goto ret0; L2986: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (AND, SImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 290; } goto ret0; L3305: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode) goto L13336; goto ret0; L13336: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case ASHIFT: goto L4670; case ASHIFTRT: goto L4932; case SUBREG: case REG: case MEM: goto L13335; default: goto ret0; } L13335: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, SImode)) { operands[1] = x2; goto L3306; } goto ret0; L4670: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[0])) goto L4671; goto ret0; L4671: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L4672; } goto ret0; L4672: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == LSHIFTRT) goto L4673; goto ret0; L4673: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SImode)) { operands[1] = x3; goto L4674; } goto ret0; L4674: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == QImode && GET_CODE (x3) == MINUS) goto L4675; goto ret0; L4675: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (32)]) goto L4676; goto ret0; L4676: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (rtx_equal_p (x4, operands[2]) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 406; } goto ret0; L4932: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[0])) goto L4933; goto ret0; L4933: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L4934; } goto ret0; L4934: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == ASHIFT) goto L4935; goto ret0; L4935: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SImode)) { operands[1] = x3; goto L4936; } goto ret0; L4936: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == QImode && GET_CODE (x3) == MINUS) goto L4937; goto ret0; L4937: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (32)]) goto L4938; goto ret0; L4938: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (rtx_equal_p (x4, operands[2]) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 424; } goto ret0; L3306: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (general_operand (x2, SImode)) { operands[2] = x2; goto L3307; } goto ret0; L3307: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (IOR, SImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 308; } goto ret0; L3688: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SImode)) { operands[1] = x2; goto L3689; } goto ret0; L3689: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (general_operand (x2, SImode)) { operands[2] = x2; goto L3690; } goto ret0; L3690: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (XOR, SImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 330; } goto ret0; L4104: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SImode)) { operands[1] = x2; goto L4105; } goto ret0; L4105: ATTRIBUTE_UNUSED_LABEL if ((ix86_unary_operator_ok (NEG, SImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 354; } goto ret0; L4532: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SImode)) { operands[1] = x2; goto L4533; } goto ret0; L4533: ATTRIBUTE_UNUSED_LABEL if ((ix86_unary_operator_ok (NOT, SImode, operands))) { return 394; } goto ret0; L4688: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SImode)) { operands[1] = x2; goto L4689; } goto ret0; L4689: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonmemory_operand (x2, QImode)) { operands[2] = x2; goto L4690; } goto ret0; L4690: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (ASHIFT, SImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 407; } goto ret0; L4950: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SImode)) { operands[1] = x2; goto L4951; } goto ret0; L4951: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == CONST_INT) goto L13338; L5011: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x2, QImode)) { operands[2] = x2; goto L5012; } goto ret0; L13338: ATTRIBUTE_UNUSED_LABEL if (const_int_operand (x2, SImode)) { operands[2] = x2; goto L4952; } L13339: ATTRIBUTE_UNUSED_LABEL if (const1_operand (x2, QImode)) { operands[2] = x2; goto L4982; } goto L5011; L4952: ATTRIBUTE_UNUSED_LABEL if ((INTVAL (operands[2]) == 31 && (TARGET_USE_CLTD || optimize_size) && ix86_binary_operator_ok (ASHIFTRT, SImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 425; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L13339; L4982: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (ASHIFTRT, SImode, operands) && (TARGET_SHIFT1 || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 427; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L5011; L5012: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (ASHIFTRT, SImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 429; } goto ret0; L5318: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SImode)) { operands[1] = x2; goto L5319; } goto ret0; L5319: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const1_operand (x2, QImode)) { operands[2] = x2; goto L5320; } L5349: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x2, QImode)) { operands[2] = x2; goto L5350; } goto ret0; L5320: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (LSHIFTRT, HImode, operands) && (TARGET_SHIFT1 || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 451; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L5349; L5350: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (LSHIFTRT, HImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 453; } goto ret0; L5600: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SImode)) { operands[1] = x2; goto L5601; } goto ret0; L5601: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const1_operand (x2, QImode)) { operands[2] = x2; goto L5602; } L5631: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x2, QImode)) { operands[2] = x2; goto L5632; } goto ret0; L5602: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (ROTATE, SImode, operands) && (TARGET_SHIFT1 || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 471; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L5631; L5632: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (ROTATE, SImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 473; } goto ret0; L5776: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SImode)) { operands[1] = x2; goto L5777; } goto ret0; L5777: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const1_operand (x2, QImode)) { operands[2] = x2; goto L5778; } L5807: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x2, QImode)) { operands[2] = x2; goto L5808; } goto ret0; L5778: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (ROTATERT, SImode, operands) && (TARGET_SHIFT1 || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 483; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L5807; L5808: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (ROTATERT, SImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 485; } goto ret0; L6407: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SImode)) { operands[1] = x2; goto L6408; } goto ret0; L6408: ATTRIBUTE_UNUSED_LABEL if (pnum_clobbers != NULL) { *pnum_clobbers = 2; return 536; } goto ret0; ret0: return -1; } static int recog_4 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XEXP (x0, 0); if (push_operand (x1, QImode)) { operands[0] = x1; goto L386; } L13123: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, QImode)) { operands[0] = x1; goto L434; } L13125: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x1) == MEM) goto L446; if (register_operand (x1, QImode)) { operands[0] = x1; goto L440; } L13126: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, QImode)) { operands[0] = x1; goto L462; } L13127: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, QImode)) { operands[0] = x1; goto L469; } goto ret0; L386: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (nonmemory_no_elim_operand (x1, QImode)) { operands[1] = x1; goto L387; } x1 = XEXP (x0, 0); goto L13123; L387: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT)) { return 57; } L391: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT)) { return 58; } x1 = XEXP (x0, 0); goto L13123; L434: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == QImode && GET_CODE (x1) == SIGN_EXTRACT) goto L435; if (general_operand (x1, QImode)) { operands[1] = x1; goto L395; } x1 = XEXP (x0, 0); goto L13125; L435: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (ext_register_operand (x2, VOIDmode)) { operands[1] = x2; goto L436; } x1 = XEXP (x0, 0); goto L13125; L436: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L437; x1 = XEXP (x0, 0); goto L13125; L437: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (8)] && (!TARGET_64BIT)) { return 65; } x1 = XEXP (x0, 0); goto L13125; L395: ATTRIBUTE_UNUSED_LABEL if ((GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)) { return 59; } x1 = XEXP (x0, 0); goto L13125; L446: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (x86_64_movabs_operand (x2, DImode)) { operands[0] = x2; goto L447; } goto L13126; L447: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (nonmemory_operand (x1, QImode)) { operands[1] = x1; goto L448; } x1 = XEXP (x0, 0); goto L13126; L448: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_check_movabs (insn, 0))) { return 67; } x1 = XEXP (x0, 0); goto L13126; L440: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == QImode) goto L13340; x1 = XEXP (x0, 0); goto L13126; L13340: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case SIGN_EXTRACT: goto L441; case MEM: goto L452; default: break; } x1 = XEXP (x0, 0); goto L13126; L441: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (ext_register_operand (x2, VOIDmode)) { operands[1] = x2; goto L442; } x1 = XEXP (x0, 0); goto L13126; L442: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L443; x1 = XEXP (x0, 0); goto L13126; L443: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (8)] && (TARGET_64BIT)) { return 66; } x1 = XEXP (x0, 0); goto L13126; L452: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (x86_64_movabs_operand (x2, DImode)) { operands[1] = x2; goto L453; } x1 = XEXP (x0, 0); goto L13126; L453: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_check_movabs (insn, 1))) { return 68; } x1 = XEXP (x0, 0); goto L13126; L462: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == QImode) goto L13342; x1 = XEXP (x0, 0); goto L13127; L13342: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case SUBREG: goto L13356; case PLUS: goto L1233; case MINUS: goto L1951; case AND: goto L3068; case IOR: goto L3455; case XOR: goto L3838; case NEG: goto L4188; case NOT: goto L4582; case ASHIFT: goto L4786; case ASHIFTRT: goto L5148; case LSHIFTRT: goto L5486; case ROTATE: goto L5704; case ROTATERT: goto L5864; case EQ: case NE: case LE: case LT: case GE: case GT: case LEU: case LTU: case GEU: case GTU: case UNORDERED: case ORDERED: case UNLE: case UNLT: case UNGE: case UNGT: case LTGT: case UNEQ: goto L13355; default: x1 = XEXP (x0, 0); goto L13127; } L13355: ATTRIBUTE_UNUSED_LABEL if (ix86_comparison_operator (x1, QImode)) { operands[1] = x1; goto L5916; } x1 = XEXP (x0, 0); goto L13127; L13356: ATTRIBUTE_UNUSED_LABEL if (XINT (x1, 1) == 0) goto L463; x1 = XEXP (x0, 0); goto L13127; L463: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == ZERO_EXTRACT) goto L464; x1 = XEXP (x0, 0); goto L13127; L464: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (ext_register_operand (x3, VOIDmode)) { operands[1] = x3; goto L465; } x1 = XEXP (x0, 0); goto L13127; L465: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L466; x1 = XEXP (x0, 0); goto L13127; L466: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)] && (!TARGET_64BIT)) { return 70; } x1 = XEXP (x0, 0); goto L13127; L1233: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == QImode) goto L13357; x1 = XEXP (x0, 0); goto L13127; L13357: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == PLUS) goto L1234; if (nonimmediate_operand (x2, QImode)) { operands[1] = x2; goto L1690; } x1 = XEXP (x0, 0); goto L13127; L1234: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (ix86_carry_flag_operator (x3, QImode)) { operands[3] = x3; goto L1235; } x1 = XEXP (x0, 0); goto L13127; L1235: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, QImode)) { operands[1] = x3; goto L1236; } x1 = XEXP (x0, 0); goto L13127; L1236: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (general_operand (x2, QImode)) { operands[2] = x2; goto L1237; } x1 = XEXP (x0, 0); goto L13127; L1237: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (PLUS, QImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 180; } x1 = XEXP (x0, 0); goto L13127; L1690: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (general_operand (x2, QImode)) { operands[2] = x2; goto L1691; } x1 = XEXP (x0, 0); goto L13127; L1691: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_PARTIAL_REG_STALL && ix86_binary_operator_ok (PLUS, QImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 215; } L1705: ATTRIBUTE_UNUSED_LABEL if ((TARGET_PARTIAL_REG_STALL && ix86_binary_operator_ok (PLUS, QImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 216; } x1 = XEXP (x0, 0); goto L13127; L1951: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, QImode)) { operands[1] = x2; goto L1952; } x1 = XEXP (x0, 0); goto L13127; L1952: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == QImode && GET_CODE (x2) == PLUS) goto L1953; if (general_operand (x2, QImode)) { operands[2] = x2; goto L2143; } x1 = XEXP (x0, 0); goto L13127; L1953: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (ix86_carry_flag_operator (x3, QImode)) { operands[3] = x3; goto L1954; } x1 = XEXP (x0, 0); goto L13127; L1954: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, QImode)) { operands[2] = x3; goto L1955; } x1 = XEXP (x0, 0); goto L13127; L1955: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (MINUS, QImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 230; } x1 = XEXP (x0, 0); goto L13127; L2143: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (MINUS, QImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 243; } x1 = XEXP (x0, 0); goto L13127; L3068: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, QImode)) { operands[1] = x2; goto L3069; } x1 = XEXP (x0, 0); goto L13127; L3069: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (general_operand (x2, QImode)) { operands[2] = x2; goto L3070; } x1 = XEXP (x0, 0); goto L13127; L3070: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (AND, QImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 296; } x1 = XEXP (x0, 0); goto L13127; L3455: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, QImode)) { operands[1] = x2; goto L3456; } x1 = XEXP (x0, 0); goto L13127; L3456: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (general_operand (x2, QImode)) { operands[2] = x2; goto L3457; } x1 = XEXP (x0, 0); goto L13127; L3457: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (IOR, QImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 318; } x1 = XEXP (x0, 0); goto L13127; L3838: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, QImode)) { operands[1] = x2; goto L3839; } x1 = XEXP (x0, 0); goto L13127; L3839: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (general_operand (x2, QImode)) { operands[2] = x2; goto L3840; } x1 = XEXP (x0, 0); goto L13127; L3840: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (XOR, QImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 340; } x1 = XEXP (x0, 0); goto L13127; L4188: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, QImode)) { operands[1] = x2; goto L4189; } x1 = XEXP (x0, 0); goto L13127; L4189: ATTRIBUTE_UNUSED_LABEL if ((ix86_unary_operator_ok (NEG, QImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 360; } x1 = XEXP (x0, 0); goto L13127; L4582: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, QImode)) { operands[1] = x2; goto L4583; } x1 = XEXP (x0, 0); goto L13127; L4583: ATTRIBUTE_UNUSED_LABEL if ((ix86_unary_operator_ok (NOT, QImode, operands))) { return 400; } x1 = XEXP (x0, 0); goto L13127; L4786: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, QImode)) { operands[1] = x2; goto L4787; } x1 = XEXP (x0, 0); goto L13127; L4787: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonmemory_operand (x2, QImode)) { operands[2] = x2; goto L4788; } x1 = XEXP (x0, 0); goto L13127; L4788: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_PARTIAL_REG_STALL && ix86_binary_operator_ok (ASHIFT, QImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 414; } L4802: ATTRIBUTE_UNUSED_LABEL if ((TARGET_PARTIAL_REG_STALL && ix86_binary_operator_ok (ASHIFT, QImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 415; } x1 = XEXP (x0, 0); goto L13127; L5148: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, QImode)) { operands[1] = x2; goto L5149; } x1 = XEXP (x0, 0); goto L13127; L5149: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const1_operand (x2, QImode)) { operands[2] = x2; goto L5150; } L5179: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x2, QImode)) { operands[2] = x2; goto L5180; } x1 = XEXP (x0, 0); goto L13127; L5150: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (ASHIFTRT, QImode, operands) && (TARGET_SHIFT1 || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 439; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L5179; L5180: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (ASHIFTRT, QImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 441; } x1 = XEXP (x0, 0); goto L13127; L5486: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, QImode)) { operands[1] = x2; goto L5487; } x1 = XEXP (x0, 0); goto L13127; L5487: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const1_operand (x2, QImode)) { operands[2] = x2; goto L5488; } L5517: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x2, QImode)) { operands[2] = x2; goto L5518; } x1 = XEXP (x0, 0); goto L13127; L5488: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (LSHIFTRT, QImode, operands) && (TARGET_SHIFT1 || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 463; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L5517; L5518: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (LSHIFTRT, QImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 465; } x1 = XEXP (x0, 0); goto L13127; L5704: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, QImode)) { operands[1] = x2; goto L5705; } x1 = XEXP (x0, 0); goto L13127; L5705: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const1_operand (x2, QImode)) { operands[2] = x2; goto L5706; } L5735: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x2, QImode)) { operands[2] = x2; goto L5736; } x1 = XEXP (x0, 0); goto L13127; L5706: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (ROTATE, QImode, operands) && (TARGET_SHIFT1 || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 478; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L5735; L5736: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (ROTATE, QImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 480; } x1 = XEXP (x0, 0); goto L13127; L5864: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, QImode)) { operands[1] = x2; goto L5865; } x1 = XEXP (x0, 0); goto L13127; L5865: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const1_operand (x2, QImode)) { operands[2] = x2; goto L5866; } L5895: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x2, QImode)) { operands[2] = x2; goto L5896; } x1 = XEXP (x0, 0); goto L13127; L5866: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (ROTATERT, QImode, operands) && (TARGET_SHIFT1 || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 489; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L5895; L5896: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (ROTATERT, QImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 491; } x1 = XEXP (x0, 0); goto L13127; L5916: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_CODE (x2) == REG && XINT (x2, 0) == 17) goto L5917; x1 = XEXP (x0, 0); goto L13127; L5917: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) { return 493; } x1 = XEXP (x0, 0); goto L13127; L469: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == QImode) goto L13359; goto ret0; L13359: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case SUBREG: goto L13364; case MULT: goto L2253; case DIV: goto L2529; case UDIV: goto L2543; case IF_THEN_ELSE: goto L7778; default: break; } goto ret0; L13364: ATTRIBUTE_UNUSED_LABEL if (XINT (x1, 1) == 0) goto L470; goto ret0; L470: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == ZERO_EXTRACT) goto L471; goto ret0; L471: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (ext_register_operand (x3, VOIDmode)) { operands[1] = x3; goto L472; } goto ret0; L472: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L473; goto ret0; L473: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)] && (TARGET_64BIT)) { return 71; } goto ret0; L2253: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, QImode)) { operands[1] = x2; goto L2254; } goto ret0; L2254: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, QImode)) { operands[2] = x2; goto L2255; } goto ret0; L2255: ATTRIBUTE_UNUSED_LABEL if ((TARGET_QIMODE_MATH && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 251; } goto ret0; L2529: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, HImode)) { operands[1] = x2; goto L2530; } goto ret0; L2530: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, QImode)) { operands[2] = x2; goto L2531; } goto ret0; L2531: ATTRIBUTE_UNUSED_LABEL if ((TARGET_QIMODE_MATH) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 264; } goto ret0; L2543: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, HImode)) { operands[1] = x2; goto L2544; } goto ret0; L2544: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, QImode)) { operands[2] = x2; goto L2545; } goto ret0; L2545: ATTRIBUTE_UNUSED_LABEL if ((TARGET_QIMODE_MATH) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 265; } goto ret0; L7778: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (ix86_comparison_operator (x2, VOIDmode)) { operands[1] = x2; goto L7779; } goto ret0; L7779: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (flags_reg_operand (x3, VOIDmode)) { operands[4] = x3; goto L7780; } goto ret0; L7780: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L7781; goto ret0; L7781: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, QImode)) { operands[2] = x2; goto L7782; } goto ret0; L7782: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (register_operand (x2, QImode)) { operands[3] = x2; goto L7783; } goto ret0; L7783: ATTRIBUTE_UNUSED_LABEL if ((TARGET_CMOVE && !TARGET_PARTIAL_REG_STALL)) { return 653; } goto ret0; ret0: return -1; } static int recog_5 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XEXP (x0, 1); switch (GET_CODE (x1)) { case MEM: goto L574; case ZERO_EXTEND: goto L764; case SIGN_EXTEND: goto L789; case FIX: goto L1008; case TRUNCATE: goto L2382; case IOR: goto L3336; case XOR: goto L3719; case LSHIFTRT: goto L4120; default: break; } goto ret0; L574: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (x86_64_movabs_operand (x2, DImode)) { operands[1] = x2; goto L575; } goto ret0; L575: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_check_movabs (insn, 1))) { return 86; } goto ret0; L764: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case HImode: goto L13380; case QImode: goto L13381; case SImode: goto L13382; default: break; } goto ret0; L13380: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, HImode)) { operands[1] = x2; goto L765; } goto ret0; L765: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT)) { return 115; } goto ret0; L13381: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, QImode)) { operands[1] = x2; goto L770; } goto ret0; L770: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT)) { return 116; } goto ret0; L13382: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case SIGN_EXTEND: goto L809; case PLUS: goto L1289; case SUBREG: goto L13392; case MINUS: goto L2007; case MULT: goto L2225; case TRUNCATE: goto L2434; case AND: goto L3000; case IOR: goto L3321; case XOR: goto L3704; case NOT: goto L4538; default: break; } goto ret0; L809: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); switch (GET_MODE (x3)) { case HImode: goto L13393; case QImode: goto L13394; default: break; } goto ret0; L13393: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x3, HImode)) { operands[1] = x3; goto L810; } goto ret0; L810: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT)) { return 122; } goto ret0; L13394: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x3, QImode)) { operands[1] = x3; goto L824; } goto ret0; L824: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT)) { return 125; } goto ret0; L1289: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode) goto L13395; goto ret0; L13395: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x3)) { case PLUS: goto L1290; case MULT: goto L1365; case SUBREG: case REG: case MEM: goto L13397; default: goto ret0; } L13397: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L1495; } goto ret0; L1290: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == SImode) goto L13400; goto ret0; L13400: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x4) == MULT) goto L1385; if (ix86_carry_flag_operator (x4, SImode)) { operands[3] = x4; goto L1291; } if (index_register_operand (x4, SImode)) { operands[1] = x4; goto L1349; } goto ret0; L1385: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (index_register_operand (x5, SImode)) { operands[1] = x5; goto L1386; } goto ret0; L1386: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 1); if (const248_operand (x5, SImode)) { operands[2] = x5; goto L1387; } goto ret0; L1387: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (register_operand (x4, SImode)) { operands[3] = x4; goto L1388; } goto ret0; L1388: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (immediate_operand (x3, SImode)) { operands[4] = x3; goto L1389; } goto ret0; L1389: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT)) { return 195; } goto ret0; L1291: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonimmediate_operand (x4, SImode)) { operands[1] = x4; goto L1292; } goto ret0; L1292: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, SImode)) { operands[2] = x3; goto L1293; } goto ret0; L1293: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (PLUS, SImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 183; } goto ret0; L1349: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (register_operand (x4, SImode)) { operands[2] = x4; goto L1350; } goto ret0; L1350: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (immediate_operand (x3, SImode)) { operands[3] = x3; goto L1351; } goto ret0; L1351: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT)) { return 191; } goto ret0; L1365: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (index_register_operand (x4, SImode)) { operands[1] = x4; goto L1366; } goto ret0; L1366: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (const248_operand (x4, SImode)) { operands[2] = x4; goto L1367; } goto ret0; L1367: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonmemory_operand (x3, SImode)) { operands[3] = x3; goto L1368; } goto ret0; L1368: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT)) { return 193; } goto ret0; L1495: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, SImode)) { operands[2] = x3; goto L1496; } goto ret0; L1496: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (PLUS, SImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 202; } goto ret0; L13392: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 1) == 0) goto L1329; goto ret0; L1329: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (no_seg_address_operand (x3, DImode)) { operands[1] = x3; goto L1330; } goto ret0; L1330: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT)) { return 188; } goto ret0; L2007: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SImode)) { operands[1] = x3; goto L2008; } goto ret0; L2008: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == SImode && GET_CODE (x3) == PLUS) goto L2009; if (general_operand (x3, SImode)) { operands[2] = x3; goto L2041; } goto ret0; L2009: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (ix86_carry_flag_operator (x4, SImode)) { operands[3] = x4; goto L2010; } goto ret0; L2010: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (general_operand (x4, SImode)) { operands[2] = x4; goto L2011; } goto ret0; L2011: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (MINUS, SImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 233; } goto ret0; L2041: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (MINUS, SImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 235; } goto ret0; L2225: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L2226; } goto ret0; L2226: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, SImode)) { operands[2] = x3; goto L2227; } goto ret0; L2227: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 249; } goto ret0; L2434: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode && GET_CODE (x3) == LSHIFTRT) goto L2435; goto ret0; L2435: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == DImode && GET_CODE (x4) == MULT) goto L2436; goto ret0; L2436: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (GET_MODE (x5) == DImode) goto L13401; goto ret0; L13401: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x5)) { case ZERO_EXTEND: goto L2437; case SIGN_EXTEND: goto L2514; default: break; } goto ret0; L2437: ATTRIBUTE_UNUSED_LABEL x6 = XEXP (x5, 0); if (nonimmediate_operand (x6, SImode)) { operands[1] = x6; goto L2438; } goto ret0; L2438: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 1); if (GET_MODE (x5) == DImode && GET_CODE (x5) == ZERO_EXTEND) goto L2439; goto ret0; L2439: ATTRIBUTE_UNUSED_LABEL x6 = XEXP (x5, 0); if (nonimmediate_operand (x6, SImode)) { operands[2] = x6; goto L2440; } goto ret0; L2440: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (32)] && (TARGET_64BIT && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 2; return 260; } goto ret0; L2514: ATTRIBUTE_UNUSED_LABEL x6 = XEXP (x5, 0); if (nonimmediate_operand (x6, SImode)) { operands[1] = x6; goto L2515; } goto ret0; L2515: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 1); if (GET_MODE (x5) == DImode && GET_CODE (x5) == SIGN_EXTEND) goto L2516; goto ret0; L2516: ATTRIBUTE_UNUSED_LABEL x6 = XEXP (x5, 0); if (nonimmediate_operand (x6, SImode)) { operands[2] = x6; goto L2517; } goto ret0; L2517: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (32)] && (TARGET_64BIT && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 2; return 263; } goto ret0; L3000: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L3001; } goto ret0; L3001: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, SImode)) { operands[2] = x3; goto L3002; } goto ret0; L3002: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (AND, SImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 291; } goto ret0; L3321: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L3322; } goto ret0; L3322: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, SImode)) { operands[2] = x3; goto L3323; } goto ret0; L3323: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (IOR, SImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 309; } goto ret0; L3704: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L3705; } goto ret0; L3705: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, SImode)) { operands[2] = x3; goto L3706; } goto ret0; L3706: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (XOR, SImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 331; } goto ret0; L4538: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SImode)) { operands[1] = x3; goto L4539; } goto ret0; L4539: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_unary_operator_ok (NOT, SImode, operands))) { return 395; } goto ret0; L789: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case SImode: goto L13403; case HImode: goto L13404; case QImode: goto L13405; default: break; } goto ret0; L13403: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, SImode)) { operands[1] = x2; goto L790; } goto ret0; L790: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT)) { return 118; } goto ret0; L13404: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, HImode)) { operands[1] = x2; goto L795; } goto ret0; L795: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT)) { return 119; } goto ret0; L13405: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, QImode)) { operands[1] = x2; goto L800; } goto ret0; L800: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT)) { return 120; } goto ret0; L1008: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case SFmode: goto L13406; case DFmode: goto L13407; default: break; } goto ret0; L13406: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, SFmode)) { operands[1] = x2; goto L1009; } goto ret0; L1009: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && TARGET_SSE)) { return 150; } goto ret0; L13407: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, DFmode)) { operands[1] = x2; goto L1014; } goto ret0; L1014: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && TARGET_SSE2)) { return 151; } goto ret0; L2382: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == TImode && GET_CODE (x2) == LSHIFTRT) goto L2383; goto ret0; L2383: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == TImode && GET_CODE (x3) == MULT) goto L2384; goto ret0; L2384: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == TImode) goto L13408; goto ret0; L13408: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x4)) { case ZERO_EXTEND: goto L2385; case SIGN_EXTEND: goto L2462; default: break; } goto ret0; L2385: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (nonimmediate_operand (x5, DImode)) { operands[1] = x5; goto L2386; } goto ret0; L2386: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_MODE (x4) == TImode && GET_CODE (x4) == ZERO_EXTEND) goto L2387; goto ret0; L2387: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (nonimmediate_operand (x5, DImode)) { operands[2] = x5; goto L2388; } goto ret0; L2388: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (64)] && (TARGET_64BIT && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 2; return 258; } goto ret0; L2462: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (nonimmediate_operand (x5, DImode)) { operands[1] = x5; goto L2463; } goto ret0; L2463: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_MODE (x4) == TImode && GET_CODE (x4) == SIGN_EXTEND) goto L2464; goto ret0; L2464: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (nonimmediate_operand (x5, DImode)) { operands[2] = x5; goto L2465; } goto ret0; L2465: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (64)] && (TARGET_64BIT && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 2; return 261; } goto ret0; L3336: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode && GET_CODE (x2) == ZERO_EXTEND) goto L3337; goto ret0; L3337: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SImode)) { operands[1] = x3; goto L3338; } goto ret0; L3338: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x86_64_zext_immediate_operand (x2, DImode)) { operands[2] = x2; goto L3339; } goto ret0; L3339: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 310; } goto ret0; L3719: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode && GET_CODE (x2) == ZERO_EXTEND) goto L3720; goto ret0; L3720: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SImode)) { operands[1] = x3; goto L3721; } goto ret0; L3721: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x86_64_zext_immediate_operand (x2, DImode)) { operands[2] = x2; goto L3722; } goto ret0; L3722: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (XOR, SImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 332; } goto ret0; L4120: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode && GET_CODE (x2) == NEG) goto L4121; goto ret0; L4121: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode && GET_CODE (x3) == ASHIFT) goto L4122; goto ret0; L4122: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, DImode)) { operands[1] = x4; goto L4123; } goto ret0; L4123: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (32)]) goto L4124; goto ret0; L4124: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (32)] && (TARGET_64BIT && ix86_unary_operator_ok (NEG, SImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 355; } goto ret0; ret0: return -1; } static int recog_6 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XEXP (x0, 0); switch (GET_CODE (x1)) { case ZERO_EXTRACT: goto L483; case MEM: goto L13365; case SUBREG: case REG: goto L13131; default: goto L13132; } L13131: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, DImode)) { operands[0] = x1; goto L542; } L13132: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, DImode)) { operands[0] = x1; goto L736; } L13133: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x1) == MEM) goto L568; if (register_operand (x1, DImode)) { operands[0] = x1; goto L573; } L13151: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, DImode)) { operands[0] = x1; goto L1400; } L13158: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, DImode)) { operands[0] = x1; goto L4634; } goto ret0; L483: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (ext_register_operand (x2, VOIDmode)) { operands[0] = x2; goto L484; } goto ret0; L484: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L485; goto ret0; L485: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L486; goto ret0; L486: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (nonmemory_operand (x1, DImode)) { operands[1] = x1; goto L487; } goto ret0; L487: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT)) { return 73; } goto ret0; L13365: ATTRIBUTE_UNUSED_LABEL if (push_operand (x1, DImode)) { operands[0] = x1; goto L498; } goto L13132; L498: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (general_no_elim_operand (x1, DImode)) { operands[1] = x1; goto L499; } x1 = XEXP (x0, 0); goto L13132; L499: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT)) { return 75; } L503: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT)) { return 76; } x1 = XEXP (x0, 0); goto L13132; L542: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (const0_operand (x1, DImode)) { operands[1] = x1; goto L543; } L552: ATTRIBUTE_UNUSED_LABEL if (const_int_operand (x1, DImode)) { operands[1] = x1; goto L553; } x1 = XEXP (x0, 0); goto L13132; L543: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && (!TARGET_USE_MOV0 || optimize_size) && reload_completed) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 80; } x1 = XEXP (x0, 1); goto L552; L553: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && (TARGET_PENTIUM || optimize_size) && reload_completed && operands[1] == constm1_rtx) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 81; } x1 = XEXP (x0, 0); goto L13132; L736: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == DImode) goto L13366; L556: ATTRIBUTE_UNUSED_LABEL if (general_operand (x1, DImode)) { operands[1] = x1; goto L557; } x1 = XEXP (x0, 0); goto L13133; L13366: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case ZERO_EXTEND: goto L737; case SIGN_EXTEND: goto L784; case FIX: goto L955; case PLUS: goto L1188; default: break; } goto L556; L737: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SImode)) { operands[1] = x2; goto L738; } goto L556; L738: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && !TARGET_INTER_UNIT_MOVES) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 111; } L750: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && TARGET_INTER_UNIT_MOVES) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 112; } L755: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && !TARGET_INTER_UNIT_MOVES)) { return 113; } L760: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && TARGET_INTER_UNIT_MOVES)) { return 114; } x1 = XEXP (x0, 1); goto L556; L784: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[1] = x2; goto L785; } goto L556; L785: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT) && pnum_clobbers != NULL) { *pnum_clobbers = 2; return 117; } x1 = XEXP (x0, 1); goto L556; L955: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, VOIDmode)) { operands[1] = x2; goto L956; } goto L556; L956: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && FLOAT_MODE_P (GET_MODE (operands[1])) && !reload_completed && !reload_in_progress && (!SSE_FLOAT_MODE_P (GET_MODE (operands[1])) || !TARGET_64BIT)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 147; } x1 = XEXP (x0, 1); goto L556; L1188: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode) goto L13371; goto L556; L13371: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == PLUS) goto L1205; if (nonimmediate_operand (x2, DImode)) { operands[1] = x2; goto L1189; } goto L556; L1205: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (ix86_carry_flag_operator (x3, DImode)) { operands[3] = x3; goto L1206; } goto L556; L1206: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, DImode)) { operands[1] = x3; goto L1207; } goto L556; L1207: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x86_64_general_operand (x2, DImode)) { operands[2] = x2; goto L1208; } goto L556; L1208: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (PLUS, DImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 178; } x1 = XEXP (x0, 1); goto L556; L1189: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (general_operand (x2, DImode)) { operands[2] = x2; goto L1190; } goto L556; L1190: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && ix86_binary_operator_ok (PLUS, DImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 177; } x1 = XEXP (x0, 1); goto L556; L557: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 82; } L561: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && (TARGET_INTER_UNIT_MOVES || optimize_size) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 83; } L565: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && (!TARGET_INTER_UNIT_MOVES && !optimize_size) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 84; } x1 = XEXP (x0, 0); goto L13133; L568: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (x86_64_movabs_operand (x2, DImode)) { operands[0] = x2; goto L569; } goto L13151; L569: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (nonmemory_operand (x1, DImode)) { operands[1] = x1; goto L570; } x1 = XEXP (x0, 0); goto L13151; L570: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_check_movabs (insn, 0))) { return 85; } x1 = XEXP (x0, 0); goto L13151; L573: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == DImode) goto L13372; L1333: ATTRIBUTE_UNUSED_LABEL if (no_seg_address_operand (x1, DImode)) { operands[1] = x1; goto L1334; } L2194: ATTRIBUTE_UNUSED_LABEL if (GET_MODE (x1) == DImode && GET_CODE (x1) == MULT) goto L2195; x1 = XEXP (x0, 0); goto L13151; L13372: ATTRIBUTE_UNUSED_LABEL tem = recog_5 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; goto L1333; L1334: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT)) { return 189; } x1 = XEXP (x0, 1); goto L2194; L2195: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode) goto L13411; x1 = XEXP (x0, 0); goto L13151; L13411: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case ZERO_EXTEND: goto L2324; case SIGN_EXTEND: goto L2360; case SUBREG: case REG: case MEM: goto L13410; default: x1 = XEXP (x0, 0); goto L13151; } L13410: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, DImode)) { operands[1] = x2; goto L2196; } x1 = XEXP (x0, 0); goto L13151; L2324: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L2325; } x1 = XEXP (x0, 0); goto L13151; L2325: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == ZERO_EXTEND) goto L2326; x1 = XEXP (x0, 0); goto L13151; L2326: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[2] = x3; goto L2327; } x1 = XEXP (x0, 0); goto L13151; L2327: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 255; } x1 = XEXP (x0, 0); goto L13151; L2360: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L2361; } x1 = XEXP (x0, 0); goto L13151; L2361: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == SIGN_EXTEND) goto L2362; x1 = XEXP (x0, 0); goto L13151; L2362: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[2] = x3; goto L2363; } x1 = XEXP (x0, 0); goto L13151; L2363: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 257; } x1 = XEXP (x0, 0); goto L13151; L2196: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x86_64_general_operand (x2, DImode)) { operands[2] = x2; goto L2197; } x1 = XEXP (x0, 0); goto L13151; L2197: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 247; } x1 = XEXP (x0, 0); goto L13151; L1400: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == DImode) goto L13413; x1 = XEXP (x0, 0); goto L13158; L13413: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case PLUS: goto L1401; case MINUS: goto L1879; case AND: goto L2957; case IOR: goto L3260; case XOR: goto L3643; case NEG: goto L4069; case NOT: goto L4516; case ASHIFT: goto L4606; case ASHIFTRT: goto L4827; case LSHIFTRT: goto L5234; case ROTATE: goto L5572; case ROTATERT: goto L5748; default: break; } x1 = XEXP (x0, 0); goto L13158; L1401: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DImode)) { operands[1] = x2; goto L1402; } x1 = XEXP (x0, 0); goto L13158; L1402: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x86_64_general_operand (x2, DImode)) { operands[2] = x2; goto L1403; } x1 = XEXP (x0, 0); goto L13158; L1403: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (PLUS, DImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 196; } x1 = XEXP (x0, 0); goto L13158; L1879: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DImode)) { operands[1] = x2; goto L1896; } x1 = XEXP (x0, 0); goto L13158; L1896: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == PLUS) goto L1897; if (general_operand (x2, DImode)) { operands[2] = x2; goto L1881; } L1912: ATTRIBUTE_UNUSED_LABEL if (x86_64_general_operand (x2, DImode)) { operands[2] = x2; goto L1913; } x1 = XEXP (x0, 0); goto L13158; L1897: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (ix86_carry_flag_operator (x3, DImode)) { operands[3] = x3; goto L1898; } x1 = XEXP (x0, 0); goto L13158; L1898: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x86_64_general_operand (x3, DImode)) { operands[2] = x3; goto L1899; } x1 = XEXP (x0, 0); goto L13158; L1899: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (MINUS, DImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 226; } x1 = XEXP (x0, 0); goto L13158; L1881: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && ix86_binary_operator_ok (MINUS, DImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 225; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L1912; L1913: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (MINUS, DImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 227; } x1 = XEXP (x0, 0); goto L13158; L2957: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DImode)) { operands[1] = x2; goto L2958; } x1 = XEXP (x0, 0); goto L13158; L2958: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x86_64_szext_general_operand (x2, DImode)) { operands[2] = x2; goto L2959; } x1 = XEXP (x0, 0); goto L13158; L2959: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (AND, DImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 288; } x1 = XEXP (x0, 0); goto L13158; L3260: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DImode)) { operands[1] = x2; goto L3261; } x1 = XEXP (x0, 0); goto L13158; L3261: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x86_64_general_operand (x2, DImode)) { operands[2] = x2; goto L3262; } x1 = XEXP (x0, 0); goto L13158; L3262: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (IOR, DImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 305; } x1 = XEXP (x0, 0); goto L13158; L3643: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DImode)) { operands[1] = x2; goto L3644; } x1 = XEXP (x0, 0); goto L13158; L3644: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x86_64_general_operand (x2, DImode)) { operands[2] = x2; goto L3645; } x1 = XEXP (x0, 0); goto L13158; L3645: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (XOR, DImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 327; } x1 = XEXP (x0, 0); goto L13158; L4069: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (general_operand (x2, DImode)) { operands[1] = x2; goto L4070; } L4081: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, DImode)) { operands[1] = x2; goto L4082; } x1 = XEXP (x0, 0); goto L13158; L4070: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && ix86_unary_operator_ok (NEG, DImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 351; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); goto L4081; L4082: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_unary_operator_ok (NEG, DImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 352; } x1 = XEXP (x0, 0); goto L13158; L4516: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DImode)) { operands[1] = x2; goto L4517; } x1 = XEXP (x0, 0); goto L13158; L4517: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_unary_operator_ok (NOT, DImode, operands))) { return 392; } x1 = XEXP (x0, 0); goto L13158; L4606: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DImode)) { operands[1] = x2; goto L4607; } x1 = XEXP (x0, 0); goto L13158; L4607: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonmemory_operand (x2, QImode)) { operands[2] = x2; goto L4608; } x1 = XEXP (x0, 0); goto L13158; L4608: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (ASHIFT, DImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 402; } x1 = XEXP (x0, 0); goto L13158; L4827: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DImode)) { operands[1] = x2; goto L4828; } x1 = XEXP (x0, 0); goto L13158; L4828: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == CONST_INT) goto L13425; L4856: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x2, QImode)) { operands[2] = x2; goto L4857; } x1 = XEXP (x0, 0); goto L13158; L13425: ATTRIBUTE_UNUSED_LABEL if (const_int_operand (x2, DImode)) { operands[2] = x2; goto L4829; } L13426: ATTRIBUTE_UNUSED_LABEL if (const1_operand (x2, QImode)) { operands[2] = x2; goto L4843; } goto L4856; L4829: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && INTVAL (operands[2]) == 63 && (TARGET_USE_CLTD || optimize_size) && ix86_binary_operator_ok (ASHIFTRT, DImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 417; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L13426; L4843: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (ASHIFTRT, DImode, operands) && (TARGET_SHIFT1 || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 418; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L4856; L4857: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (ASHIFTRT, DImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 419; } x1 = XEXP (x0, 0); goto L13158; L5234: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DImode)) { operands[1] = x2; goto L5235; } x1 = XEXP (x0, 0); goto L13158; L5235: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const1_operand (x2, QImode)) { operands[2] = x2; goto L5236; } L5249: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x2, QImode)) { operands[2] = x2; goto L5250; } x1 = XEXP (x0, 0); goto L13158; L5236: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (LSHIFTRT, HImode, operands) && (TARGET_SHIFT1 || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 445; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L5249; L5250: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (LSHIFTRT, HImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 446; } x1 = XEXP (x0, 0); goto L13158; L5572: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DImode)) { operands[1] = x2; goto L5573; } x1 = XEXP (x0, 0); goto L13158; L5573: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const1_operand (x2, QImode)) { operands[2] = x2; goto L5574; } L5587: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x2, QImode)) { operands[2] = x2; goto L5588; } x1 = XEXP (x0, 0); goto L13158; L5574: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (ROTATE, DImode, operands) && (TARGET_SHIFT1 || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 469; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L5587; L5588: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (ROTATE, DImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 470; } x1 = XEXP (x0, 0); goto L13158; L5748: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DImode)) { operands[1] = x2; goto L5749; } x1 = XEXP (x0, 0); goto L13158; L5749: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const1_operand (x2, QImode)) { operands[2] = x2; goto L5750; } L5763: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x2, QImode)) { operands[2] = x2; goto L5764; } x1 = XEXP (x0, 0); goto L13158; L5750: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (ROTATERT, DImode, operands) && (TARGET_SHIFT1 || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 481; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L5763; L5764: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (ROTATERT, DImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 482; } x1 = XEXP (x0, 0); goto L13158; L4634: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == DImode) goto L13427; goto ret0; L13427: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case ASHIFT: goto L4635; case ZERO_EXTEND: goto L4703; case ASHIFTRT: goto L4897; case LSHIFTRT: goto L5290; case FFS: goto L6431; case CTZ: goto L6465; case MINUS: goto L6495; case UNSPEC: goto L13437; case PLUS: goto L6656; case IF_THEN_ELSE: goto L7730; default: break; } goto ret0; L4635: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[1] = x2; goto L4636; } goto ret0; L4636: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonmemory_operand (x2, QImode)) { operands[2] = x2; goto L4637; } goto ret0; L4637: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && TARGET_CMOVE) && pnum_clobbers != NULL) { *pnum_clobbers = 2; return 404; } L4651: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 405; } goto ret0; L4703: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode) goto L13438; goto ret0; L13438: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case ASHIFT: goto L4704; case ASHIFTRT: goto L4966; case LSHIFTRT: goto L5364; case ROTATE: goto L5616; case ROTATERT: goto L5792; default: break; } goto ret0; L4704: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SImode)) { operands[1] = x3; goto L4705; } goto ret0; L4705: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L4706; } goto ret0; L4706: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (ASHIFT, SImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 408; } goto ret0; L4966: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SImode)) { operands[1] = x3; goto L4967; } goto ret0; L4967: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == CONST_INT) goto L13443; L5027: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L5028; } goto ret0; L13443: ATTRIBUTE_UNUSED_LABEL if (const_int_operand (x3, SImode)) { operands[2] = x3; goto L4968; } L13444: ATTRIBUTE_UNUSED_LABEL if (const1_operand (x3, QImode)) { operands[2] = x3; goto L4998; } goto L5027; L4968: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && (TARGET_USE_CLTD || optimize_size) && INTVAL (operands[2]) == 31 && ix86_binary_operator_ok (ASHIFTRT, SImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 426; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); x3 = XEXP (x2, 1); goto L13444; L4998: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (ASHIFTRT, SImode, operands) && (TARGET_SHIFT1 || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 428; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); x3 = XEXP (x2, 1); goto L5027; L5028: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (ASHIFTRT, SImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 430; } goto ret0; L5364: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L5365; } goto ret0; L5365: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L5366; } goto ret0; L5366: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (LSHIFTRT, HImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 454; } goto ret0; L5616: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SImode)) { operands[1] = x3; goto L5617; } goto ret0; L5617: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const1_operand (x3, QImode)) { operands[2] = x3; goto L5618; } L5647: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L5648; } goto ret0; L5618: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (ROTATE, SImode, operands) && (TARGET_SHIFT1 || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 472; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); x3 = XEXP (x2, 1); goto L5647; L5648: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (ROTATE, SImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 474; } goto ret0; L5792: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SImode)) { operands[1] = x3; goto L5793; } goto ret0; L5793: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const1_operand (x3, QImode)) { operands[2] = x3; goto L5794; } L5823: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L5824; } goto ret0; L5794: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (ROTATERT, SImode, operands) && (TARGET_SHIFT1 || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 484; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); x3 = XEXP (x2, 1); goto L5823; L5824: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (ROTATERT, SImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 486; } goto ret0; L4897: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[1] = x2; goto L4898; } goto ret0; L4898: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonmemory_operand (x2, QImode)) { operands[2] = x2; goto L4899; } goto ret0; L4899: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && TARGET_CMOVE) && pnum_clobbers != NULL) { *pnum_clobbers = 2; return 422; } L4913: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 423; } goto ret0; L5290: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode) goto L13446; goto ret0; L13446: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == ZERO_EXTEND) goto L5334; if (register_operand (x2, DImode)) { operands[1] = x2; goto L5291; } goto ret0; L5334: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SImode)) { operands[1] = x3; goto L5335; } goto ret0; L5335: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const1_operand (x2, QImode)) { operands[2] = x2; goto L5336; } goto ret0; L5336: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_binary_operator_ok (LSHIFTRT, HImode, operands) && (TARGET_SHIFT1 || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 452; } goto ret0; L5291: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonmemory_operand (x2, QImode)) { operands[2] = x2; goto L5292; } goto ret0; L5292: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && TARGET_CMOVE) && pnum_clobbers != NULL) { *pnum_clobbers = 2; return 449; } L5306: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 450; } goto ret0; L6431: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DImode)) { operands[1] = x2; goto L6432; } goto ret0; L6432: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && TARGET_CMOVE) && pnum_clobbers != NULL) { *pnum_clobbers = 2; return 538; } goto ret0; L6465: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DImode)) { operands[1] = x2; goto L6466; } goto ret0; L6466: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 541; } goto ret0; L6495: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (63)]) goto L6496; goto ret0; L6496: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == CLZ) goto L6497; goto ret0; L6497: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, DImode)) { operands[1] = x3; goto L6498; } goto ret0; L6498: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 543; } goto ret0; L13437: ATTRIBUTE_UNUSED_LABEL if (XVECLEN (x1, 0) == 1 && XINT (x1, 1) == 15) goto L6643; goto ret0; L6643: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_64BIT)) { return 553; } goto ret0; L6656: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode && GET_CODE (x2) == UNSPEC && XVECLEN (x2, 0) == 1 && XINT (x2, 1) == 15) goto L6657; goto ret0; L6657: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L6658; goto ret0; L6658: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, DImode)) { operands[1] = x2; goto L6659; } goto ret0; L6659: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 554; } goto ret0; L7730: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (ix86_carry_flag_operator (x2, VOIDmode)) { operands[1] = x2; goto L7731; } L7736: ATTRIBUTE_UNUSED_LABEL if (ix86_comparison_operator (x2, VOIDmode)) { operands[1] = x2; goto L7737; } goto ret0; L7731: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (-1)]) goto L7732; x2 = XEXP (x1, 0); goto L7736; L7732: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_64BIT) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 648; } x2 = XEXP (x1, 0); goto L7736; L7737: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == REG && XINT (x3, 0) == 17) goto L7738; goto ret0; L7738: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L7739; goto ret0; L7739: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, DImode)) { operands[2] = x2; goto L7740; } goto ret0; L7740: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (nonimmediate_operand (x2, DImode)) { operands[3] = x2; goto L7741; } goto ret0; L7741: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && TARGET_CMOVE && (GET_CODE (operands[2]) != MEM || GET_CODE (operands[3]) != MEM))) { return 649; } goto ret0; ret0: return -1; } static int recog_7 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XEXP (x0, 1); switch (GET_CODE (x1)) { case NEG: goto L4327; case ABS: goto L4483; case SQRT: goto L6833; case UNSPEC: goto L13468; case IF_THEN_ELSE: goto L7834; case EQ: case LT: case LE: case UNORDERED: case NE: case UNGE: case UNGT: case ORDERED: case UNEQ: case UNLT: case UNLE: case LTGT: case GE: case GT: goto L13461; case PLUS: case MINUS: case MULT: case DIV: goto L13462; default: goto ret0; } L13461: ATTRIBUTE_UNUSED_LABEL if (sse_comparison_operator (x1, SFmode)) { operands[1] = x1; goto L5927; } L13462: ATTRIBUTE_UNUSED_LABEL if (binary_fp_operator (x1, SFmode)) { operands[3] = x1; goto L6663; } goto ret0; L4327: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SFmode)) { operands[1] = x2; goto L4328; } goto ret0; L4328: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && reload_completed)) { return 371; } goto ret0; L4483: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SFmode)) { operands[1] = x2; goto L4484; } goto ret0; L4484: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && reload_completed)) { return 386; } goto ret0; L6833: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SFmode) goto L13471; goto ret0; L13471: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, SFmode)) { operands[1] = x2; goto L6834; } L13472: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, SFmode)) { operands[1] = x2; goto L6844; } goto ret0; L6834: ATTRIBUTE_UNUSED_LABEL if ((! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && (TARGET_SSE_MATH && TARGET_MIX_SSE_I387))) { return 581; } L6839: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE_MATH && (!TARGET_80387 || !TARGET_MIX_SSE_I387))) { return 582; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); goto L13472; L6844: ATTRIBUTE_UNUSED_LABEL if ((! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && !TARGET_SSE_MATH)) { return 583; } goto ret0; L13468: ATTRIBUTE_UNUSED_LABEL switch (XVECLEN (x1, 0)) { case 1: goto L13473; case 2: goto L13475; default: break; } goto ret0; L13473: ATTRIBUTE_UNUSED_LABEL switch (XINT (x1, 1)) { case 21L: goto L6921; case 22L: goto L6942; default: break; } goto ret0; L6921: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, SFmode)) { operands[1] = x2; goto L6922; } goto ret0; L6922: ATTRIBUTE_UNUSED_LABEL if ((! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_unsafe_math_optimizations)) { return 594; } goto ret0; L6942: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, SFmode)) { operands[1] = x2; goto L6943; } goto ret0; L6943: ATTRIBUTE_UNUSED_LABEL if ((! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_unsafe_math_optimizations)) { return 598; } goto ret0; L13475: ATTRIBUTE_UNUSED_LABEL if (XINT (x1, 1) == 65) goto L7047; goto ret0; L7047: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, SFmode)) { operands[2] = x2; goto L7048; } goto ret0; L7048: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 1); if (register_operand (x2, SFmode)) { operands[1] = x2; goto L7049; } goto ret0; L7049: ATTRIBUTE_UNUSED_LABEL if ((! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_unsafe_math_optimizations) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 609; } goto ret0; L7834: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_CODE (x2)) { case LT: goto L7835; case GT: goto L7927; default: break; } L7787: ATTRIBUTE_UNUSED_LABEL if (fcmov_comparison_operator (x2, VOIDmode)) { operands[1] = x2; goto L7788; } L8055: ATTRIBUTE_UNUSED_LABEL if (sse_comparison_operator (x2, VOIDmode)) { operands[1] = x2; goto L8056; } L8077: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == EQ) goto L8078; L8148: ATTRIBUTE_UNUSED_LABEL if (fcmov_comparison_operator (x2, VOIDmode)) { operands[1] = x2; goto L8149; } goto ret0; L7835: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SFmode) goto L13476; goto L7787; L13476: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, SFmode)) { operands[1] = x3; goto L7836; } L13477: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x3, SFmode)) { operands[1] = x3; goto L7855; } L13478: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, SFmode)) { operands[1] = x3; goto L7863; } goto L7787; L7836: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, SFmode)) { operands[2] = x3; goto L7837; } x3 = XEXP (x2, 0); goto L13477; L7837: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (rtx_equal_p (x2, operands[1])) goto L7838; x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L13477; L7838: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (rtx_equal_p (x2, operands[2]) && (TARGET_SSE && TARGET_IEEE_FP) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 658; } x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L13477; L7855: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, SFmode)) { operands[2] = x3; goto L7856; } x3 = XEXP (x2, 0); goto L13478; L7856: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (rtx_equal_p (x2, operands[1])) goto L7857; x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L13478; L7857: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (rtx_equal_p (x2, operands[2]) && (TARGET_SSE && !TARGET_IEEE_FP && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 659; } x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L13478; L7863: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, SFmode)) { operands[2] = x3; goto L7864; } goto L7787; L7864: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (rtx_equal_p (x2, operands[1])) goto L7865; x2 = XEXP (x1, 0); goto L7787; L7865: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (rtx_equal_p (x2, operands[2]) && (TARGET_SSE && reload_completed)) { return 660; } x2 = XEXP (x1, 0); goto L7787; L7927: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SFmode) goto L13479; goto L7787; L13479: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, SFmode)) { operands[1] = x3; goto L7928; } L13480: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x3, SFmode)) { operands[1] = x3; goto L7947; } L13481: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, SFmode)) { operands[1] = x3; goto L7955; } goto L7787; L7928: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, SFmode)) { operands[2] = x3; goto L7929; } x3 = XEXP (x2, 0); goto L13480; L7929: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (rtx_equal_p (x2, operands[1])) goto L7930; x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L13480; L7930: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (rtx_equal_p (x2, operands[2]) && (TARGET_SSE && TARGET_IEEE_FP) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 664; } x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L13480; L7947: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, SFmode)) { operands[2] = x3; goto L7948; } x3 = XEXP (x2, 0); goto L13481; L7948: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (rtx_equal_p (x2, operands[1])) goto L7949; x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L13481; L7949: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (rtx_equal_p (x2, operands[2]) && (TARGET_SSE && !TARGET_IEEE_FP && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 665; } x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L13481; L7955: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, SFmode)) { operands[2] = x3; goto L7956; } goto L7787; L7956: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (rtx_equal_p (x2, operands[1])) goto L7957; x2 = XEXP (x1, 0); goto L7787; L7957: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (rtx_equal_p (x2, operands[2]) && (TARGET_SSE && reload_completed)) { return 666; } x2 = XEXP (x1, 0); goto L7787; L7788: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == REG && XINT (x3, 0) == 17) goto L7789; goto L8055; L7789: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L7790; goto L8055; L7790: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, SFmode)) { operands[2] = x2; goto L7791; } x2 = XEXP (x1, 0); goto L8055; L7791: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (nonimmediate_operand (x2, SFmode)) { operands[3] = x2; goto L7792; } x2 = XEXP (x1, 0); goto L8055; L7792: ATTRIBUTE_UNUSED_LABEL if ((TARGET_CMOVE && (GET_CODE (operands[2]) != MEM || GET_CODE (operands[3]) != MEM))) { return 654; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); goto L8055; L8056: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SFmode) goto L13482; goto L8077; L13482: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x3, SFmode)) { operands[4] = x3; goto L8057; } L13483: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, SFmode)) { operands[4] = x3; goto L8132; } goto L8077; L8057: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, SFmode)) { operands[5] = x3; goto L8058; } x3 = XEXP (x2, 0); goto L13483; L8058: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, SFmode)) { operands[2] = x2; goto L8059; } x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L13483; L8059: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (nonimmediate_operand (x2, SFmode)) { operands[3] = x2; goto L8060; } x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L13483; L8060: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE && (GET_CODE (operands[2]) != MEM || GET_CODE (operands[3]) != MEM) /* Avoid combine from being smart and converting min/max instruction patterns into conditional moves. */ && ((GET_CODE (operands[1]) != LT && GET_CODE (operands[1]) != GT && GET_CODE (operands[1]) != UNLE && GET_CODE (operands[1]) != UNGE) || !rtx_equal_p (operands[4], operands[2]) || !rtx_equal_p (operands[5], operands[3])) && (!TARGET_IEEE_FP || (GET_CODE (operands[1]) != EQ && GET_CODE (operands[1]) != NE))) && pnum_clobbers != NULL) { *pnum_clobbers = 2; return 673; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L13483; L8132: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, SFmode)) { operands[5] = x3; goto L8133; } goto L8077; L8133: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, SFmode)) { operands[2] = x2; goto L8134; } if (const0_operand (x2, SFmode)) { operands[2] = x2; goto L8143; } x2 = XEXP (x1, 0); goto L8077; L8134: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (const0_operand (x2, SFmode)) { operands[3] = x2; goto L8135; } x2 = XEXP (x1, 0); goto L8077; L8135: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE)) { return 677; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); goto L8077; L8143: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (register_operand (x2, SFmode)) { operands[3] = x2; goto L8144; } x2 = XEXP (x1, 0); goto L8077; L8144: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE)) { return 678; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); goto L8077; L8078: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SFmode)) { operands[3] = x3; goto L8079; } goto L8148; L8079: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, SFmode)) { operands[4] = x3; goto L8080; } goto L8148; L8080: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, SFmode)) { operands[1] = x2; goto L8081; } x2 = XEXP (x1, 0); goto L8148; L8081: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (nonimmediate_operand (x2, SFmode)) { operands[2] = x2; goto L8082; } x2 = XEXP (x1, 0); goto L8148; L8082: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE && (GET_CODE (operands[2]) != MEM || GET_CODE (operands[3]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 2; return 674; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); goto L8148; L8149: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SFmode)) { operands[4] = x3; goto L8150; } goto ret0; L8150: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, SFmode)) { operands[5] = x3; goto L8151; } goto ret0; L8151: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, SFmode)) { operands[2] = x2; goto L8152; } if (const0_operand (x2, SFmode)) { operands[2] = x2; goto L8161; } goto ret0; L8152: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (const0_operand (x2, SFmode)) { operands[3] = x2; goto L8153; } goto ret0; L8153: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE)) { return 679; } goto ret0; L8161: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (register_operand (x2, SFmode)) { operands[3] = x2; goto L8162; } goto ret0; L8162: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE)) { return 680; } goto ret0; L5927: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SFmode)) { operands[2] = x2; goto L5928; } goto ret0; L5928: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, SFmode)) { operands[3] = x2; goto L5929; } goto ret0; L5929: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE && reload_completed)) { return 495; } goto ret0; L6663: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SFmode) goto L13486; goto ret0; L13486: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == FLOAT) goto L6724; if (nonimmediate_operand (x2, SFmode)) { operands[1] = x2; goto L6664; } L13485: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, SFmode)) { operands[1] = x2; goto L6718; } goto ret0; L6724: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L6725; } goto ret0; L6725: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, SFmode)) { operands[2] = x2; goto L6726; } goto ret0; L6726: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && TARGET_USE_FIOP && !TARGET_SSE_MATH)) { return 565; } goto ret0; L6664: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, SFmode)) { operands[2] = x2; goto L6665; } x2 = XEXP (x1, 0); goto L13485; L6665: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && !TARGET_SSE_MATH && COMMUTATIVE_ARITH_P (operands[3]) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 555; } L6671: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && TARGET_SSE_MATH && TARGET_MIX_SSE_I387 && COMMUTATIVE_ARITH_P (operands[3]) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 556; } L6677: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE_MATH && COMMUTATIVE_ARITH_P (operands[3]) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 557; } L6707: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && !TARGET_SSE_MATH && !COMMUTATIVE_ARITH_P (operands[3]) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 562; } L6713: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && TARGET_SSE_MATH && TARGET_MIX_SSE_I387 && !COMMUTATIVE_ARITH_P (operands[3]) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 563; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); goto L13485; L6718: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SFmode) goto L13488; goto ret0; L13488: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == FLOAT) goto L6732; if (nonimmediate_operand (x2, SFmode)) { operands[2] = x2; goto L6719; } goto ret0; L6732: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[2] = x3; goto L6733; } goto ret0; L6733: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && TARGET_USE_FIOP && !TARGET_SSE_MATH)) { return 566; } goto ret0; L6719: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE_MATH && !COMMUTATIVE_ARITH_P (operands[3]))) { return 564; } goto ret0; ret0: return -1; } static int recog_8 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XEXP (x0, 1); switch (GET_CODE (x1)) { case NEG: goto L4332; case ABS: goto L4488; case SQRT: goto L6848; case UNSPEC: goto L13509; case IF_THEN_ELSE: goto L7880; case EQ: case LT: case LE: case UNORDERED: case NE: case UNGE: case UNGT: case ORDERED: case UNEQ: case UNLT: case UNLE: case LTGT: case GE: case GT: goto L13502; case PLUS: case MINUS: case MULT: case DIV: goto L13503; default: goto ret0; } L13502: ATTRIBUTE_UNUSED_LABEL if (sse_comparison_operator (x1, DFmode)) { operands[1] = x1; goto L5933; } L13503: ATTRIBUTE_UNUSED_LABEL if (binary_fp_operator (x1, DFmode)) { operands[3] = x1; goto L6681; } goto ret0; L4332: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DFmode) goto L13513; goto ret0; L13513: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == FLOAT_EXTEND) goto L4338; if (register_operand (x2, DFmode)) { operands[1] = x2; goto L4333; } goto ret0; L4338: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SFmode)) { operands[1] = x3; goto L4339; } goto ret0; L4339: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387)) { return 373; } goto ret0; L4333: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && reload_completed)) { return 372; } goto ret0; L4488: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DFmode) goto L13515; goto ret0; L13515: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == FLOAT_EXTEND) goto L4494; if (register_operand (x2, DFmode)) { operands[1] = x2; goto L4489; } goto ret0; L4494: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SFmode)) { operands[1] = x3; goto L4495; } goto ret0; L4495: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387)) { return 388; } goto ret0; L4489: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && reload_completed)) { return 387; } goto ret0; L6848: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DFmode) goto L13518; goto ret0; L13518: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == FLOAT_EXTEND) goto L6864; if (nonimmediate_operand (x2, DFmode)) { operands[1] = x2; goto L6849; } L13517: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, DFmode)) { operands[1] = x2; goto L6859; } goto ret0; L6864: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SFmode)) { operands[1] = x3; goto L6865; } goto ret0; L6865: ATTRIBUTE_UNUSED_LABEL if ((! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && !(TARGET_SSE2 && TARGET_SSE_MATH))) { return 587; } goto ret0; L6849: ATTRIBUTE_UNUSED_LABEL if ((! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && (TARGET_SSE2 && TARGET_SSE_MATH && TARGET_MIX_SSE_I387))) { return 584; } L6854: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2 && TARGET_SSE_MATH && (!TARGET_80387 || !TARGET_MIX_SSE_I387))) { return 585; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); goto L13517; L6859: ATTRIBUTE_UNUSED_LABEL if ((! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && (!TARGET_SSE2 || !TARGET_SSE_MATH))) { return 586; } goto ret0; L13509: ATTRIBUTE_UNUSED_LABEL switch (XVECLEN (x1, 0)) { case 1: goto L13519; case 2: goto L13521; default: break; } goto ret0; L13519: ATTRIBUTE_UNUSED_LABEL switch (XINT (x1, 1)) { case 21L: goto L6916; case 22L: goto L6937; default: break; } goto ret0; L6916: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (GET_MODE (x2) == DFmode) goto L13523; goto ret0; L13523: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == FLOAT_EXTEND) goto L6927; if (register_operand (x2, DFmode)) { operands[1] = x2; goto L6917; } goto ret0; L6927: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SFmode)) { operands[1] = x3; goto L6928; } goto ret0; L6928: ATTRIBUTE_UNUSED_LABEL if ((! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_unsafe_math_optimizations)) { return 595; } goto ret0; L6917: ATTRIBUTE_UNUSED_LABEL if ((! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_unsafe_math_optimizations)) { return 593; } goto ret0; L6937: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (GET_MODE (x2) == DFmode) goto L13525; goto ret0; L13525: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == FLOAT_EXTEND) goto L6948; if (register_operand (x2, DFmode)) { operands[1] = x2; goto L6938; } goto ret0; L6948: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SFmode)) { operands[1] = x3; goto L6949; } goto ret0; L6949: ATTRIBUTE_UNUSED_LABEL if ((! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_unsafe_math_optimizations)) { return 599; } goto ret0; L6938: ATTRIBUTE_UNUSED_LABEL if ((! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_unsafe_math_optimizations)) { return 597; } goto ret0; L13521: ATTRIBUTE_UNUSED_LABEL if (XINT (x1, 1) == 65) goto L7032; goto ret0; L7032: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, DFmode)) { operands[2] = x2; goto L7033; } goto ret0; L7033: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 1); if (register_operand (x2, DFmode)) { operands[1] = x2; goto L7034; } goto ret0; L7034: ATTRIBUTE_UNUSED_LABEL if ((! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_unsafe_math_optimizations) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 608; } goto ret0; L7880: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_CODE (x2)) { case LT: goto L7881; case GT: goto L7973; default: break; } L7796: ATTRIBUTE_UNUSED_LABEL if (fcmov_comparison_operator (x2, VOIDmode)) { operands[1] = x2; goto L7797; } L8099: ATTRIBUTE_UNUSED_LABEL if (sse_comparison_operator (x2, VOIDmode)) { operands[1] = x2; goto L8100; } L8121: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == EQ) goto L8122; L8184: ATTRIBUTE_UNUSED_LABEL if (fcmov_comparison_operator (x2, VOIDmode)) { operands[1] = x2; goto L8185; } goto ret0; L7881: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DFmode) goto L13526; goto L7796; L13526: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, DFmode)) { operands[1] = x3; goto L7882; } L13527: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x3, DFmode)) { operands[1] = x3; goto L7901; } L13528: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, DFmode)) { operands[1] = x3; goto L7909; } goto L7796; L7882: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, DFmode)) { operands[2] = x3; goto L7883; } x3 = XEXP (x2, 0); goto L13527; L7883: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (rtx_equal_p (x2, operands[1])) goto L7884; x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L13527; L7884: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (rtx_equal_p (x2, operands[2]) && (TARGET_SSE2 && TARGET_IEEE_FP && TARGET_SSE_MATH) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 661; } x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L13527; L7901: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, DFmode)) { operands[2] = x3; goto L7902; } x3 = XEXP (x2, 0); goto L13528; L7902: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (rtx_equal_p (x2, operands[1])) goto L7903; x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L13528; L7903: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (rtx_equal_p (x2, operands[2]) && (TARGET_SSE2 && TARGET_SSE_MATH && !TARGET_IEEE_FP && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 662; } x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L13528; L7909: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, DFmode)) { operands[2] = x3; goto L7910; } goto L7796; L7910: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (rtx_equal_p (x2, operands[1])) goto L7911; x2 = XEXP (x1, 0); goto L7796; L7911: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (rtx_equal_p (x2, operands[2]) && (TARGET_SSE2 && TARGET_SSE_MATH && reload_completed)) { return 663; } x2 = XEXP (x1, 0); goto L7796; L7973: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DFmode) goto L13529; goto L7796; L13529: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, DFmode)) { operands[1] = x3; goto L7974; } L13530: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x3, DFmode)) { operands[1] = x3; goto L7993; } L13531: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, DFmode)) { operands[1] = x3; goto L8001; } goto L7796; L7974: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, DFmode)) { operands[2] = x3; goto L7975; } x3 = XEXP (x2, 0); goto L13530; L7975: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (rtx_equal_p (x2, operands[1])) goto L7976; x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L13530; L7976: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (rtx_equal_p (x2, operands[2]) && (TARGET_SSE2 && TARGET_SSE_MATH && TARGET_IEEE_FP) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 667; } x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L13530; L7993: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, DFmode)) { operands[2] = x3; goto L7994; } x3 = XEXP (x2, 0); goto L13531; L7994: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (rtx_equal_p (x2, operands[1])) goto L7995; x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L13531; L7995: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (rtx_equal_p (x2, operands[2]) && (TARGET_SSE2 && TARGET_SSE_MATH && !TARGET_IEEE_FP && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 668; } x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L13531; L8001: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, DFmode)) { operands[2] = x3; goto L8002; } goto L7796; L8002: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (rtx_equal_p (x2, operands[1])) goto L8003; x2 = XEXP (x1, 0); goto L7796; L8003: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (rtx_equal_p (x2, operands[2]) && (TARGET_SSE2 && TARGET_SSE_MATH && reload_completed)) { return 669; } x2 = XEXP (x1, 0); goto L7796; L7797: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == REG && XINT (x3, 0) == 17) goto L7798; goto L8099; L7798: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L7799; goto L8099; L7799: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, DFmode)) { operands[2] = x2; goto L7800; } x2 = XEXP (x1, 0); goto L8099; L7800: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (nonimmediate_operand (x2, DFmode)) { operands[3] = x2; goto L7801; } x2 = XEXP (x1, 0); goto L8099; L7801: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && TARGET_CMOVE && (GET_CODE (operands[2]) != MEM || GET_CODE (operands[3]) != MEM))) { return 655; } L7810: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && TARGET_CMOVE && (GET_CODE (operands[2]) != MEM || GET_CODE (operands[3]) != MEM))) { return 656; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); goto L8099; L8100: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DFmode) goto L13532; goto L8121; L13532: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x3, DFmode)) { operands[4] = x3; goto L8101; } L13533: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, DFmode)) { operands[4] = x3; goto L8168; } goto L8121; L8101: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, DFmode)) { operands[5] = x3; goto L8102; } x3 = XEXP (x2, 0); goto L13533; L8102: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, DFmode)) { operands[2] = x2; goto L8103; } x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L13533; L8103: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (nonimmediate_operand (x2, DFmode)) { operands[3] = x2; goto L8104; } x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L13533; L8104: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2 && (GET_CODE (operands[2]) != MEM || GET_CODE (operands[3]) != MEM) /* Avoid combine from being smart and converting min/max instruction patterns into conditional moves. */ && ((GET_CODE (operands[1]) != LT && GET_CODE (operands[1]) != GT && GET_CODE (operands[1]) != UNLE && GET_CODE (operands[1]) != UNGE) || !rtx_equal_p (operands[4], operands[2]) || !rtx_equal_p (operands[5], operands[3])) && (!TARGET_IEEE_FP || (GET_CODE (operands[1]) != EQ && GET_CODE (operands[1]) != NE))) && pnum_clobbers != NULL) { *pnum_clobbers = 2; return 675; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L13533; L8168: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, DFmode)) { operands[5] = x3; goto L8169; } goto L8121; L8169: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, DFmode)) { operands[2] = x2; goto L8170; } if (const0_operand (x2, DFmode)) { operands[2] = x2; goto L8179; } x2 = XEXP (x1, 0); goto L8121; L8170: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (const0_operand (x2, DFmode)) { operands[3] = x2; goto L8171; } x2 = XEXP (x1, 0); goto L8121; L8171: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 681; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); goto L8121; L8179: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (register_operand (x2, DFmode)) { operands[3] = x2; goto L8180; } x2 = XEXP (x1, 0); goto L8121; L8180: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 682; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); goto L8121; L8122: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, DFmode)) { operands[3] = x3; goto L8123; } goto L8184; L8123: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, DFmode)) { operands[4] = x3; goto L8124; } goto L8184; L8124: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, DFmode)) { operands[1] = x2; goto L8125; } x2 = XEXP (x1, 0); goto L8184; L8125: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (nonimmediate_operand (x2, DFmode)) { operands[2] = x2; goto L8126; } x2 = XEXP (x1, 0); goto L8184; L8126: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE && (GET_CODE (operands[2]) != MEM || GET_CODE (operands[3]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 2; return 676; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); goto L8184; L8185: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, DFmode)) { operands[4] = x3; goto L8186; } goto ret0; L8186: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, DFmode)) { operands[5] = x3; goto L8187; } goto ret0; L8187: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, DFmode)) { operands[2] = x2; goto L8188; } if (const0_operand (x2, DFmode)) { operands[2] = x2; goto L8197; } goto ret0; L8188: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (const0_operand (x2, DFmode)) { operands[3] = x2; goto L8189; } goto ret0; L8189: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 683; } goto ret0; L8197: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (register_operand (x2, DFmode)) { operands[3] = x2; goto L8198; } goto ret0; L8198: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 684; } goto ret0; L5933: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DFmode)) { operands[2] = x2; goto L5934; } goto ret0; L5934: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, DFmode)) { operands[3] = x2; goto L5935; } goto ret0; L5935: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2 && reload_completed)) { return 496; } goto ret0; L6681: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DFmode) goto L13536; goto ret0; L13536: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case FLOAT: goto L6756; case FLOAT_EXTEND: goto L6770; case SUBREG: case REG: case MEM: goto L13534; default: goto L13535; } L13534: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, DFmode)) { operands[1] = x2; goto L6682; } L13535: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, DFmode)) { operands[1] = x2; goto L6750; } goto ret0; L6756: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L6757; } goto ret0; L6757: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, DFmode)) { operands[2] = x2; goto L6758; } goto ret0; L6758: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && TARGET_USE_FIOP && !(TARGET_SSE2 && TARGET_SSE_MATH))) { return 570; } goto ret0; L6770: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SFmode) goto L13538; goto ret0; L13538: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x3, SFmode)) { operands[1] = x3; goto L6771; } L13539: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, SFmode)) { operands[1] = x3; goto L6785; } goto ret0; L6771: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, DFmode)) { operands[2] = x2; goto L6772; } x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L13539; L6772: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && (!TARGET_SSE2 || !TARGET_SSE_MATH) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 572; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L13539; L6785: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DFmode && GET_CODE (x2) == FLOAT_EXTEND) goto L6786; goto ret0; L6786: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SFmode)) { operands[2] = x3; goto L6787; } goto ret0; L6787: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && !(TARGET_SSE2 && TARGET_SSE_MATH))) { return 574; } goto ret0; L6682: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, DFmode)) { operands[2] = x2; goto L6683; } x2 = XEXP (x1, 0); goto L13535; L6683: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && (!TARGET_SSE2 || !TARGET_SSE_MATH) && COMMUTATIVE_ARITH_P (operands[3]) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 558; } L6689: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && TARGET_SSE_MATH && TARGET_SSE2 && TARGET_MIX_SSE_I387 && COMMUTATIVE_ARITH_P (operands[3]) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 559; } L6695: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2 && TARGET_SSE_MATH && COMMUTATIVE_ARITH_P (operands[3]) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 560; } L6739: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && (!TARGET_SSE2 || !TARGET_SSE_MATH) && !COMMUTATIVE_ARITH_P (operands[3]) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 567; } L6745: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && TARGET_SSE2 && TARGET_SSE_MATH && TARGET_MIX_SSE_I387 && !COMMUTATIVE_ARITH_P (operands[3]) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 568; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); goto L13535; L6750: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DFmode) goto L13541; goto ret0; L13541: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case FLOAT: goto L6764; case FLOAT_EXTEND: goto L6778; case SUBREG: case REG: case MEM: goto L13540; default: goto ret0; } L13540: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, DFmode)) { operands[2] = x2; goto L6751; } goto ret0; L6764: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[2] = x3; goto L6765; } goto ret0; L6765: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && TARGET_USE_FIOP && !(TARGET_SSE2 && TARGET_SSE_MATH))) { return 571; } goto ret0; L6778: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SFmode)) { operands[2] = x3; goto L6779; } goto ret0; L6779: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && !(TARGET_SSE2 && TARGET_SSE_MATH))) { return 573; } goto ret0; L6751: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2 && TARGET_SSE_MATH && !COMMUTATIVE_ARITH_P (operands[3]))) { return 569; } goto ret0; ret0: return -1; } static int recog_9 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XEXP (x0, 1); switch (GET_CODE (x1)) { case FLOAT: goto L1165; case NEG: goto L4343; case ABS: goto L4499; case SQRT: goto L6869; case UNSPEC: goto L13561; case IF_THEN_ELSE: goto L7814; case PLUS: case MINUS: case MULT: case DIV: goto L13551; default: goto ret0; } L13551: ATTRIBUTE_UNUSED_LABEL if (binary_fp_operator (x1, XFmode)) { operands[3] = x1; goto L6699; } goto ret0; L1165: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case HImode: goto L13568; case SImode: goto L13569; case DImode: goto L13570; default: break; } goto ret0; L13568: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, HImode)) { operands[1] = x2; goto L1166; } goto ret0; L1166: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387)) { return 174; } goto ret0; L13569: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, SImode)) { operands[1] = x2; goto L1171; } goto ret0; L1171: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387)) { return 175; } goto ret0; L13570: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, DImode)) { operands[1] = x2; goto L1176; } goto ret0; L1176: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387)) { return 176; } goto ret0; L4343: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == XFmode) goto L13572; goto ret0; L13572: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == FLOAT_EXTEND) goto L4349; if (register_operand (x2, XFmode)) { operands[1] = x2; goto L4344; } goto ret0; L4349: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); switch (GET_MODE (x3)) { case DFmode: goto L13573; case SFmode: goto L13574; default: break; } goto ret0; L13573: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, DFmode)) { operands[1] = x3; goto L4350; } goto ret0; L4350: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387)) { return 375; } goto ret0; L13574: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, SFmode)) { operands[1] = x3; goto L4356; } goto ret0; L4356: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387)) { return 376; } goto ret0; L4344: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && reload_completed)) { return 374; } goto ret0; L4499: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == XFmode) goto L13576; goto ret0; L13576: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == FLOAT_EXTEND) goto L4505; if (register_operand (x2, XFmode)) { operands[1] = x2; goto L4500; } goto ret0; L4505: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); switch (GET_MODE (x3)) { case DFmode: goto L13577; case SFmode: goto L13578; default: break; } goto ret0; L13577: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, DFmode)) { operands[1] = x3; goto L4506; } goto ret0; L4506: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387)) { return 390; } goto ret0; L13578: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, SFmode)) { operands[1] = x3; goto L4512; } goto ret0; L4512: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387)) { return 391; } goto ret0; L4500: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && reload_completed)) { return 389; } goto ret0; L6869: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == XFmode) goto L13580; goto ret0; L13580: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == FLOAT_EXTEND) goto L6875; if (register_operand (x2, XFmode)) { operands[1] = x2; goto L6870; } goto ret0; L6875: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); switch (GET_MODE (x3)) { case DFmode: goto L13581; case SFmode: goto L13582; default: break; } goto ret0; L13581: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, DFmode)) { operands[1] = x3; goto L6876; } goto ret0; L6876: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && !TARGET_NO_FANCY_MATH_387)) { return 589; } goto ret0; L13582: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, SFmode)) { operands[1] = x3; goto L6882; } goto ret0; L6882: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && !TARGET_NO_FANCY_MATH_387)) { return 590; } goto ret0; L6870: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && !TARGET_NO_FANCY_MATH_387 && (TARGET_IEEE_FP || flag_unsafe_math_optimizations) )) { return 588; } goto ret0; L13561: ATTRIBUTE_UNUSED_LABEL switch (XVECLEN (x1, 0)) { case 1: goto L13583; case 2: goto L13585; default: break; } goto ret0; L13583: ATTRIBUTE_UNUSED_LABEL switch (XINT (x1, 1)) { case 21L: goto L6932; case 22L: goto L6953; case 68L: goto L7107; case 69L: goto L7112; default: break; } goto ret0; L6932: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, XFmode)) { operands[1] = x2; goto L6933; } goto ret0; L6933: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && !TARGET_NO_FANCY_MATH_387 && flag_unsafe_math_optimizations)) { return 596; } goto ret0; L6953: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, XFmode)) { operands[1] = x2; goto L6954; } goto ret0; L6954: ATTRIBUTE_UNUSED_LABEL if ((! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_unsafe_math_optimizations)) { return 600; } goto ret0; L7107: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, XFmode)) { operands[1] = x2; goto L7108; } goto ret0; L7108: ATTRIBUTE_UNUSED_LABEL if ((! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_unsafe_math_optimizations)) { return 614; } goto ret0; L7112: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, XFmode)) { operands[1] = x2; goto L7113; } goto ret0; L7113: ATTRIBUTE_UNUSED_LABEL if ((! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_unsafe_math_optimizations)) { return 615; } goto ret0; L13585: ATTRIBUTE_UNUSED_LABEL switch (XINT (x1, 1)) { case 65L: goto L7062; case 66L: goto L7077; case 67L: goto L7092; default: break; } goto ret0; L7062: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, XFmode)) { operands[2] = x2; goto L7063; } goto ret0; L7063: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 1); if (register_operand (x2, XFmode)) { operands[1] = x2; goto L7064; } goto ret0; L7064: ATTRIBUTE_UNUSED_LABEL if ((! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_unsafe_math_optimizations) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 610; } goto ret0; L7077: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, XFmode)) { operands[2] = x2; goto L7078; } goto ret0; L7078: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 1); if (register_operand (x2, XFmode)) { operands[1] = x2; goto L7079; } goto ret0; L7079: ATTRIBUTE_UNUSED_LABEL if ((! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_unsafe_math_optimizations) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 611; } goto ret0; L7092: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, XFmode)) { operands[2] = x2; goto L7093; } goto ret0; L7093: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 1); if (register_operand (x2, XFmode)) { operands[1] = x2; goto L7094; } goto ret0; L7094: ATTRIBUTE_UNUSED_LABEL if ((! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_unsafe_math_optimizations) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 612; } goto ret0; L7814: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (fcmov_comparison_operator (x2, VOIDmode)) { operands[1] = x2; goto L7815; } goto ret0; L7815: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == REG && XINT (x3, 0) == 17) goto L7816; goto ret0; L7816: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L7817; goto ret0; L7817: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, XFmode)) { operands[2] = x2; goto L7818; } goto ret0; L7818: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (register_operand (x2, XFmode)) { operands[3] = x2; goto L7819; } goto ret0; L7819: ATTRIBUTE_UNUSED_LABEL if ((TARGET_CMOVE)) { return 657; } goto ret0; L6699: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == XFmode) goto L13591; goto ret0; L13591: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case FLOAT: goto L6798; case FLOAT_EXTEND: goto L6812; case SUBREG: case REG: goto L13590; default: goto ret0; } L13590: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, XFmode)) { operands[1] = x2; goto L6700; } goto ret0; L6798: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L6799; } goto ret0; L6799: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, XFmode)) { operands[2] = x2; goto L6800; } goto ret0; L6800: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && TARGET_USE_FIOP)) { return 576; } goto ret0; L6812: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, VOIDmode)) { operands[1] = x3; goto L6813; } L6826: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, VOIDmode)) { operands[1] = x3; goto L6827; } goto ret0; L6813: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, XFmode)) { operands[2] = x2; goto L6814; } x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L6826; L6814: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387)) { return 578; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L6826; L6827: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == XFmode && GET_CODE (x2) == FLOAT_EXTEND) goto L6828; goto ret0; L6828: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, VOIDmode)) { operands[2] = x3; goto L6829; } goto ret0; L6829: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387)) { return 580; } goto ret0; L6700: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == XFmode) goto L13594; goto ret0; L13594: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case FLOAT: goto L6806; case FLOAT_EXTEND: goto L6820; case SUBREG: case REG: goto L13593; default: goto ret0; } L13593: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, XFmode)) { operands[2] = x2; goto L6701; } goto ret0; L6806: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[2] = x3; goto L6807; } goto ret0; L6807: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && TARGET_USE_FIOP)) { return 577; } goto ret0; L6820: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, VOIDmode)) { operands[2] = x3; goto L6821; } goto ret0; L6821: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387)) { return 579; } goto ret0; L6701: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && COMMUTATIVE_ARITH_P (operands[3]))) { return 561; } L6793: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && !COMMUTATIVE_ARITH_P (operands[3]))) { return 575; } goto ret0; ret0: return -1; } static int recog_10 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case DImode: goto L13625; case SImode: goto L13626; case HImode: goto L13629; case QImode: goto L13632; default: break; } goto ret0; L13625: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case PLUS: goto L1464; case AND: goto L2861; case ZERO_EXTRACT: goto L2942; case IOR: goto L3291; case XOR: goto L3674; case SUBREG: case REG: case MEM: goto L13624; default: goto ret0; } L13624: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, DImode)) { operands[1] = x2; goto L1447; } goto ret0; L1464: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, DImode)) { operands[1] = x3; goto L1465; } goto ret0; L1465: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x86_64_general_operand (x3, DImode)) { operands[2] = x3; goto L1466; } goto ret0; L1466: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM) /* Current assemblers are broken and do not allow @GOTOFF in ought but a memory context. */ && ! pic_symbolic_operand (operands[2], VOIDmode)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 200; } goto ret0; L2861: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, DImode)) { operands[0] = x3; goto L2862; } goto ret0; L2862: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x86_64_szext_general_operand (x3, DImode)) { operands[1] = x3; goto L2863; } goto ret0; L2863: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 278; } goto ret0; L2942: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, VOIDmode)) { operands[0] = x3; goto L2943; } goto ret0; L2943: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const_int_operand (x3, DImode)) { operands[1] = x3; goto L2944; } goto ret0; L2944: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (const_int_operand (x3, DImode)) { operands[2] = x3; goto L2945; } goto ret0; L2945: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode) /* The code below cannot deal with constants outside HOST_WIDE_INT. */ && INTVAL (operands[1]) + INTVAL (operands[2]) < HOST_BITS_PER_WIDE_INT /* Ensure that resulting mask is zero or sign extended operand. */ && (INTVAL (operands[1]) + INTVAL (operands[2]) <= 32 || (INTVAL (operands[1]) + INTVAL (operands[2]) == 64 && INTVAL (operands[1]) > 32)) && (GET_MODE (operands[0]) == SImode || GET_MODE (operands[0]) == DImode || GET_MODE (operands[0]) == HImode || GET_MODE (operands[0]) == QImode))) { return 287; } goto ret0; L3291: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, DImode)) { operands[1] = x3; goto L3292; } goto ret0; L3292: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x86_64_general_operand (x3, DImode)) { operands[2] = x3; goto L3293; } goto ret0; L3293: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode) && ix86_binary_operator_ok (IOR, DImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 307; } goto ret0; L3674: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, DImode)) { operands[1] = x3; goto L3675; } goto ret0; L3675: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x86_64_general_operand (x3, DImode)) { operands[2] = x3; goto L3676; } goto ret0; L3676: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode) && ix86_binary_operator_ok (XOR, DImode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 329; } goto ret0; L1447: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x86_64_immediate_operand (x2, DImode)) { operands[2] = x2; goto L1448; } goto ret0; L1448: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_match_ccmode (insn, CCGCmode)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 199; } goto ret0; L13626: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case NEG: goto L1538; case PLUS: goto L1584; case AND: goto L2868; case ZERO_EXTRACT: goto L2934; case IOR: goto L3396; case XOR: goto L3779; case SUBREG: case REG: case MEM: goto L13627; default: goto ret0; } L13627: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, SImode)) { operands[1] = x2; goto L1567; } goto ret0; L1538: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (general_operand (x3, SImode)) { operands[2] = x3; goto L1539; } goto ret0; L1539: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, SImode)) { operands[1] = x2; goto L1540; } goto ret0; L1540: ATTRIBUTE_UNUSED_LABEL if ((ix86_match_ccmode (insn, CCZmode) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM) /* Current assemblers are broken and do not allow @GOTOFF in ought but a memory context. */ && ! pic_symbolic_operand (operands[2], VOIDmode)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 205; } goto ret0; L1584: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L1585; } goto ret0; L1585: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, SImode)) { operands[2] = x3; goto L1586; } goto ret0; L1586: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (ix86_match_ccmode (insn, CCGOCmode) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM) /* Current assemblers are broken and do not allow @GOTOFF in ought but a memory context. */ && ! pic_symbolic_operand (operands[2], VOIDmode)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 208; } goto ret0; L2868: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode) goto L13650; goto ret0; L13650: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x3) == ZERO_EXTRACT) goto L2890; if (nonimmediate_operand (x3, SImode)) { operands[0] = x3; goto L2869; } goto ret0; L2890: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (ext_register_operand (x4, VOIDmode)) { operands[0] = x4; goto L2891; } goto ret0; L2891: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L2892; goto ret0; L2892: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L2903; goto ret0; L2903: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == SImode) goto L13651; L2893: ATTRIBUTE_UNUSED_LABEL if (const_int_operand (x3, VOIDmode)) { operands[1] = x3; goto L2894; } goto ret0; L13651: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x3)) { case ZERO_EXTEND: goto L2904; case ZERO_EXTRACT: goto L2926; default: break; } goto L2893; L2904: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (general_operand (x4, QImode)) { operands[1] = x4; goto L2905; } L2915: ATTRIBUTE_UNUSED_LABEL if (register_operand (x4, QImode)) { operands[1] = x4; goto L2916; } goto L2893; L2905: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (!TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 283; } x2 = XEXP (x1, 0); x3 = XEXP (x2, 1); x4 = XEXP (x3, 0); goto L2915; L2916: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode))) { return 284; } x2 = XEXP (x1, 0); x3 = XEXP (x2, 1); goto L2893; L2926: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (ext_register_operand (x4, VOIDmode)) { operands[1] = x4; goto L2927; } goto L2893; L2927: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L2928; goto L2893; L2928: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L2929; goto L2893; L2929: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (ix86_match_ccmode (insn, CCNOmode))) { return 285; } x2 = XEXP (x1, 0); x3 = XEXP (x2, 1); goto L2893; L2894: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (ix86_match_ccmode (insn, CCNOmode))) { return 282; } goto ret0; L2869: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, SImode)) { operands[1] = x3; goto L2870; } goto ret0; L2870: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (ix86_match_ccmode (insn, CCNOmode) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 279; } goto ret0; L2934: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, VOIDmode)) { operands[0] = x3; goto L2935; } goto ret0; L2935: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const_int_operand (x3, SImode)) { operands[1] = x3; goto L2936; } goto ret0; L2936: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (const_int_operand (x3, SImode)) { operands[2] = x3; goto L2937; } goto ret0; L2937: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (ix86_match_ccmode (insn, CCNOmode) && (GET_MODE (operands[0]) == SImode || (TARGET_64BIT && GET_MODE (operands[0]) == DImode) || GET_MODE (operands[0]) == HImode || GET_MODE (operands[0]) == QImode))) { return 286; } goto ret0; L3396: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L3397; } goto ret0; L3397: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, SImode)) { operands[2] = x3; goto L3398; } goto ret0; L3398: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (ix86_match_ccmode (insn, CCNOmode) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 314; } goto ret0; L3779: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L3780; } goto ret0; L3780: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, SImode)) { operands[2] = x3; goto L3781; } goto ret0; L3781: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (ix86_match_ccmode (insn, CCNOmode) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 336; } goto ret0; L1567: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const_int_operand (x2, SImode)) { operands[2] = x2; goto L1568; } goto ret0; L1568: ATTRIBUTE_UNUSED_LABEL if ((ix86_match_ccmode (insn, CCGCmode) && (INTVAL (operands[2]) & 0xffffffff) != 0x80000000) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 207; } goto ret0; L13629: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case NEG: goto L1642; case PLUS: goto L1675; case AND: goto L2875; case IOR: goto L3441; case XOR: goto L3824; case SUBREG: case REG: case MEM: goto L13630; default: goto ret0; } L13630: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, HImode)) { operands[1] = x2; goto L1658; } goto ret0; L1642: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (general_operand (x3, HImode)) { operands[2] = x3; goto L1643; } goto ret0; L1643: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, HImode)) { operands[1] = x2; goto L1644; } goto ret0; L1644: ATTRIBUTE_UNUSED_LABEL if ((ix86_match_ccmode (insn, CCZmode) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 212; } goto ret0; L1675: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, HImode)) { operands[1] = x3; goto L1676; } goto ret0; L1676: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, HImode)) { operands[2] = x3; goto L1677; } goto ret0; L1677: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (ix86_match_ccmode (insn, CCGOCmode) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 214; } goto ret0; L2875: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, HImode)) { operands[0] = x3; goto L2876; } goto ret0; L2876: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, HImode)) { operands[1] = x3; goto L2877; } goto ret0; L2877: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (ix86_match_ccmode (insn, CCNOmode) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 280; } goto ret0; L3441: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, HImode)) { operands[1] = x3; goto L3442; } goto ret0; L3442: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, HImode)) { operands[2] = x3; goto L3443; } goto ret0; L3443: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (ix86_match_ccmode (insn, CCNOmode) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 317; } goto ret0; L3824: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, HImode)) { operands[1] = x3; goto L3825; } goto ret0; L3825: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, HImode)) { operands[2] = x3; goto L3826; } goto ret0; L3826: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (ix86_match_ccmode (insn, CCNOmode) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 339; } goto ret0; L1658: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const_int_operand (x2, HImode)) { operands[2] = x2; goto L1659; } goto ret0; L1659: ATTRIBUTE_UNUSED_LABEL if ((ix86_match_ccmode (insn, CCGCmode) && (INTVAL (operands[2]) & 0xffff) != 0x8000) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 213; } goto ret0; L13632: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case NEG: goto L1749; case PLUS: goto L1782; case AND: goto L2882; case IOR: goto L3516; case XOR: goto L4012; case SUBREG: case REG: case MEM: goto L13633; default: goto ret0; } L13633: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, QImode)) { operands[1] = x2; goto L1765; } goto ret0; L1749: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (general_operand (x3, QImode)) { operands[2] = x3; goto L1750; } goto ret0; L1750: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, QImode)) { operands[1] = x2; goto L1751; } goto ret0; L1751: ATTRIBUTE_UNUSED_LABEL if ((ix86_match_ccmode (insn, CCZmode) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 219; } goto ret0; L1782: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, QImode)) { operands[1] = x3; goto L1783; } goto ret0; L1783: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, QImode)) { operands[2] = x3; goto L1784; } goto ret0; L1784: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (ix86_match_ccmode (insn, CCGOCmode) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 221; } goto ret0; L2882: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, QImode)) { operands[0] = x3; goto L2883; } goto ret0; L2883: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, QImode)) { operands[1] = x3; goto L2884; } goto ret0; L2884: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (ix86_match_ccmode (insn, CCNOmode) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 281; } goto ret0; L3516: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, QImode)) { operands[1] = x3; goto L3517; } goto ret0; L3517: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, QImode)) { operands[2] = x3; goto L3518; } goto ret0; L3518: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (ix86_match_ccmode (insn, CCNOmode) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 322; } goto ret0; L4012: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, QImode)) { operands[1] = x3; goto L4013; } goto ret0; L4013: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, QImode)) { operands[2] = x3; goto L4014; } goto ret0; L4014: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (ix86_match_ccmode (insn, CCNOmode) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 348; } goto ret0; L1765: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const_int_operand (x2, QImode)) { operands[2] = x2; goto L1766; } goto ret0; L1766: ATTRIBUTE_UNUSED_LABEL if ((ix86_match_ccmode (insn, CCGCmode) && (INTVAL (operands[2]) & 0xff) != 0x80) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 220; } goto ret0; ret0: return -1; } static int recog_11 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); switch (GET_CODE (x2)) { case VEC_DUPLICATE: goto L8502; case PLUS: goto L8534; case MINUS: goto L8548; case MULT: goto L8562; case DIV: goto L8576; case UNSPEC: goto L13709; case SQRT: goto L8613; case VEC_SELECT: goto L8782; case SMAX: goto L8824; case SMIN: goto L8838; case SUBREG: case REG: goto L13697; default: goto ret0; } L13697: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, V4SFmode)) { operands[1] = x2; goto L8466; } goto ret0; L8502: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (memory_operand (x3, SFmode)) { operands[1] = x3; goto L8503; } goto ret0; L8503: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const0_operand (x2, V4SFmode)) { operands[2] = x2; goto L8504; } goto ret0; L8504: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE)) { return 734; } goto ret0; L8534: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V4SFmode)) { operands[1] = x3; goto L8535; } goto ret0; L8535: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, V4SFmode)) { operands[2] = x3; goto L8536; } goto ret0; L8536: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (rtx_equal_p (x2, operands[1])) goto L8537; goto ret0; L8537: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE)) { return 739; } goto ret0; L8548: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V4SFmode)) { operands[1] = x3; goto L8549; } goto ret0; L8549: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, V4SFmode)) { operands[2] = x3; goto L8550; } goto ret0; L8550: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (rtx_equal_p (x2, operands[1])) goto L8551; goto ret0; L8551: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE)) { return 741; } goto ret0; L8562: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V4SFmode)) { operands[1] = x3; goto L8563; } goto ret0; L8563: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, V4SFmode)) { operands[2] = x3; goto L8564; } goto ret0; L8564: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (rtx_equal_p (x2, operands[1])) goto L8565; goto ret0; L8565: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE)) { return 743; } goto ret0; L8576: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V4SFmode)) { operands[1] = x3; goto L8577; } goto ret0; L8577: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, V4SFmode)) { operands[2] = x3; goto L8578; } goto ret0; L8578: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (rtx_equal_p (x2, operands[1])) goto L8579; goto ret0; L8579: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE)) { return 745; } goto ret0; L13709: ATTRIBUTE_UNUSED_LABEL if (XVECLEN (x2, 0) == 1) goto L13711; goto ret0; L13711: ATTRIBUTE_UNUSED_LABEL switch (XINT (x2, 1)) { case 42L: goto L8589; case 43L: goto L8601; default: break; } goto ret0; L8589: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (nonimmediate_operand (x3, V4SFmode)) { operands[1] = x3; goto L8590; } goto ret0; L8590: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, V4SFmode)) { operands[2] = x2; goto L8591; } goto ret0; L8591: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE)) { return 747; } goto ret0; L8601: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (nonimmediate_operand (x3, V4SFmode)) { operands[1] = x3; goto L8602; } goto ret0; L8602: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, V4SFmode)) { operands[2] = x2; goto L8603; } goto ret0; L8603: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE)) { return 749; } goto ret0; L8613: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, V4SFmode)) { operands[1] = x3; goto L8614; } goto ret0; L8614: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, V4SFmode)) { operands[2] = x2; goto L8615; } goto ret0; L8615: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE)) { return 751; } goto ret0; L8782: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V4SFmode)) { operands[1] = x3; goto L8783; } goto ret0; L8783: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 4) goto L8784; goto ret0; L8784: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (GET_CODE (x4) == CONST_INT) goto L13713; goto ret0; L13713: ATTRIBUTE_UNUSED_LABEL if ((int) XWINT (x4, 0) == XWINT (x4, 0)) switch ((int) XWINT (x4, 0)) { case 2L: goto L8785; case 0L: goto L8803; default: break; } goto ret0; L8785: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L8786; goto ret0; L8786: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (3)]) goto L8787; goto ret0; L8787: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 3); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L8788; goto ret0; L8788: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V4SFmode && GET_CODE (x2) == VEC_SELECT) goto L8789; goto ret0; L8789: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V4SFmode)) { operands[2] = x3; goto L8790; } goto ret0; L8790: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 4) goto L8791; goto ret0; L8791: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L8792; goto ret0; L8792: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L8793; goto ret0; L8793: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L8794; goto ret0; L8794: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 3); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (3)]) goto L8795; goto ret0; L8795: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (5)] && (TARGET_SSE)) { return 776; } goto ret0; L8803: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L8804; goto ret0; L8804: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L8805; goto ret0; L8805: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 3); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (3)]) goto L8806; goto ret0; L8806: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V4SFmode && GET_CODE (x2) == VEC_SELECT) goto L8807; goto ret0; L8807: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V4SFmode)) { operands[2] = x3; goto L8808; } goto ret0; L8808: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 4) goto L8809; goto ret0; L8809: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L8810; goto ret0; L8810: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L8811; goto ret0; L8811: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (3)]) goto L8812; goto ret0; L8812: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 3); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L8813; goto ret0; L8813: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (5)] && (TARGET_SSE)) { return 777; } goto ret0; L8824: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V4SFmode)) { operands[1] = x3; goto L8825; } goto ret0; L8825: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, V4SFmode)) { operands[2] = x3; goto L8826; } goto ret0; L8826: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (rtx_equal_p (x2, operands[1])) goto L8827; goto ret0; L8827: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE)) { return 779; } goto ret0; L8838: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V4SFmode)) { operands[1] = x3; goto L8839; } goto ret0; L8839: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, V4SFmode)) { operands[2] = x3; goto L8840; } goto ret0; L8840: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (rtx_equal_p (x2, operands[1])) goto L8841; goto ret0; L8841: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE)) { return 781; } goto ret0; L8466: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V4SFmode) goto L13715; goto ret0; L13715: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case VEC_SELECT: goto L8467; case VEC_DUPLICATE: goto L8847; case SUBREG: case REG: goto L13716; default: goto ret0; } L13716: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, V4SFmode)) { operands[2] = x2; goto L8510; } goto ret0; L8467: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V4SFmode)) { operands[2] = x3; goto L8468; } goto ret0; L8468: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 4) goto L8469; goto ret0; L8469: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L8470; goto ret0; L8470: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (3)]) goto L8471; goto ret0; L8471: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L8472; goto ret0; L8472: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 3); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L8473; goto ret0; L8473: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (GET_CODE (x2) == CONST_INT) goto L13718; goto ret0; L13718: ATTRIBUTE_UNUSED_LABEL if ((int) XWINT (x2, 0) == XWINT (x2, 0)) switch ((int) XWINT (x2, 0)) { case 3L: goto L13720; case 12L: goto L13721; default: break; } goto ret0; L13720: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE)) { return 730; } goto ret0; L13721: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE)) { return 731; } goto ret0; L8847: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); switch (GET_MODE (x3)) { case V2SFmode: goto L13722; case SFmode: goto L13723; default: break; } goto ret0; L13722: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x3) == FLOAT) goto L8848; goto ret0; L8848: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, V2SImode)) { operands[2] = x4; goto L8849; } goto ret0; L8849: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (12)] && (TARGET_SSE)) { return 782; } goto ret0; L13723: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x3) == FLOAT) goto L8872; goto ret0; L8872: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); switch (GET_MODE (x4)) { case SImode: goto L13724; case DImode: goto L13725; default: break; } goto ret0; L13724: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x4, SImode)) { operands[2] = x4; goto L8873; } goto ret0; L8873: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (14)] && (TARGET_SSE)) { return 785; } goto ret0; L13725: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x4, DImode)) { operands[2] = x4; goto L8881; } goto ret0; L8881: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (14)] && (TARGET_SSE && TARGET_64BIT)) { return 786; } goto ret0; L8510: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE)) { return 735; } goto ret0; ret0: return -1; } static int recog_12 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XEXP (x0, 1); switch (GET_CODE (x1)) { case PLUS: goto L8913; case SS_PLUS: goto L8938; case US_PLUS: goto L8950; case MINUS: goto L8962; case SS_MINUS: goto L8987; case US_MINUS: goto L8999; case ASHIFTRT: goto L9099; case EQ: goto L9158; case GT: goto L9176; case UMAX: goto L9194; case UMIN: goto L9206; case VEC_CONCAT: goto L9268; case VEC_MERGE: goto L9292; default: break; } goto ret0; L8913: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V8QImode)) { operands[1] = x2; goto L8914; } goto ret0; L8914: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V8QImode)) { operands[2] = x2; goto L8915; } goto ret0; L8915: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 791; } goto ret0; L8938: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V8QImode)) { operands[1] = x2; goto L8939; } goto ret0; L8939: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V8QImode)) { operands[2] = x2; goto L8940; } goto ret0; L8940: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 795; } goto ret0; L8950: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V8QImode)) { operands[1] = x2; goto L8951; } goto ret0; L8951: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V8QImode)) { operands[2] = x2; goto L8952; } goto ret0; L8952: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 797; } goto ret0; L8962: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V8QImode)) { operands[1] = x2; goto L8963; } goto ret0; L8963: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V8QImode)) { operands[2] = x2; goto L8964; } goto ret0; L8964: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 799; } goto ret0; L8987: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V8QImode)) { operands[1] = x2; goto L8988; } goto ret0; L8988: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V8QImode)) { operands[2] = x2; goto L8989; } goto ret0; L8989: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 803; } goto ret0; L8999: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V8QImode)) { operands[1] = x2; goto L9000; } goto ret0; L9000: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V8QImode)) { operands[2] = x2; goto L9001; } goto ret0; L9001: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 805; } goto ret0; L9099: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V8QImode && GET_CODE (x2) == PLUS) goto L9100; goto ret0; L9100: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == V8QImode && GET_CODE (x3) == PLUS) goto L9101; goto ret0; L9101: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, V8QImode)) { operands[1] = x4; goto L9102; } goto ret0; L9102: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonimmediate_operand (x4, V8QImode)) { operands[2] = x4; goto L9103; } goto ret0; L9103: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == V8QImode && GET_CODE (x3) == CONST_VECTOR && XVECLEN (x3, 0) == 8) goto L9104; goto ret0; L9104: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L9105; goto ret0; L9105: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L9106; goto ret0; L9106: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L9107; goto ret0; L9107: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 3); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L9108; goto ret0; L9108: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 4); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L9109; goto ret0; L9109: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 5); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L9110; goto ret0; L9110: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 6); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L9111; goto ret0; L9111: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 7); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L9112; goto ret0; L9112: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE || TARGET_3DNOW_A)) { return 816; } goto ret0; L9158: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V8QImode)) { operands[1] = x2; goto L9159; } goto ret0; L9159: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V8QImode)) { operands[2] = x2; goto L9160; } goto ret0; L9160: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 822; } goto ret0; L9176: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V8QImode)) { operands[1] = x2; goto L9177; } goto ret0; L9177: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V8QImode)) { operands[2] = x2; goto L9178; } goto ret0; L9178: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 825; } goto ret0; L9194: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V8QImode)) { operands[1] = x2; goto L9195; } goto ret0; L9195: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V8QImode)) { operands[2] = x2; goto L9196; } goto ret0; L9196: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE || TARGET_3DNOW_A)) { return 828; } goto ret0; L9206: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V8QImode)) { operands[1] = x2; goto L9207; } goto ret0; L9207: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V8QImode)) { operands[2] = x2; goto L9208; } goto ret0; L9208: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE || TARGET_3DNOW_A)) { return 830; } goto ret0; L9268: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V4QImode) goto L13756; goto ret0; L13756: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case SS_TRUNCATE: goto L9269; case US_TRUNCATE: goto L9285; default: break; } goto ret0; L9269: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V4HImode)) { operands[1] = x3; goto L9270; } goto ret0; L9270: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V4QImode && GET_CODE (x2) == SS_TRUNCATE) goto L9271; goto ret0; L9271: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V4HImode)) { operands[2] = x3; goto L9272; } goto ret0; L9272: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 840; } goto ret0; L9285: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V4HImode)) { operands[1] = x3; goto L9286; } goto ret0; L9286: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V4QImode && GET_CODE (x2) == US_TRUNCATE) goto L9287; goto ret0; L9287: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V4HImode)) { operands[2] = x3; goto L9288; } goto ret0; L9288: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 842; } goto ret0; L9292: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V8QImode && GET_CODE (x2) == VEC_SELECT) goto L9293; goto ret0; L9293: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V8QImode)) { operands[1] = x3; goto L9294; } goto ret0; L9294: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 8) goto L9295; goto ret0; L9295: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (GET_CODE (x4) == CONST_INT) goto L13758; goto ret0; L13758: ATTRIBUTE_UNUSED_LABEL if ((int) XWINT (x4, 0) == XWINT (x4, 0)) switch ((int) XWINT (x4, 0)) { case 4L: goto L9296; case 0L: goto L9350; default: break; } goto ret0; L9296: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L9297; goto ret0; L9297: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (5)]) goto L9298; goto ret0; L9298: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 3); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L9299; goto ret0; L9299: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 4); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (6)]) goto L9300; goto ret0; L9300: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 5); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L9301; goto ret0; L9301: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 6); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (7)]) goto L9302; goto ret0; L9302: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 7); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (3)]) goto L9303; goto ret0; L9303: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V8QImode && GET_CODE (x2) == VEC_SELECT) goto L9304; goto ret0; L9304: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V8QImode)) { operands[2] = x3; goto L9305; } goto ret0; L9305: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 8) goto L9306; goto ret0; L9306: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L9307; goto ret0; L9307: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (4)]) goto L9308; goto ret0; L9308: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L9309; goto ret0; L9309: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 3); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (5)]) goto L9310; goto ret0; L9310: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 4); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L9311; goto ret0; L9311: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 5); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (6)]) goto L9312; goto ret0; L9312: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 6); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (3)]) goto L9313; goto ret0; L9313: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 7); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (7)]) goto L9314; goto ret0; L9314: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (GET_CODE (x2) == CONST_INT && XWINT (x2, 0) == 85L && (TARGET_MMX)) { return 843; } goto ret0; L9350: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (4)]) goto L9351; goto ret0; L9351: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L9352; goto ret0; L9352: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 3); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (5)]) goto L9353; goto ret0; L9353: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 4); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L9354; goto ret0; L9354: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 5); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (6)]) goto L9355; goto ret0; L9355: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 6); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (3)]) goto L9356; goto ret0; L9356: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 7); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (7)]) goto L9357; goto ret0; L9357: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V8QImode && GET_CODE (x2) == VEC_SELECT) goto L9358; goto ret0; L9358: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V8QImode)) { operands[2] = x3; goto L9359; } goto ret0; L9359: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 8) goto L9360; goto ret0; L9360: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (4)]) goto L9361; goto ret0; L9361: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L9362; goto ret0; L9362: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (5)]) goto L9363; goto ret0; L9363: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 3); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L9364; goto ret0; L9364: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 4); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (6)]) goto L9365; goto ret0; L9365: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 5); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L9366; goto ret0; L9366: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 6); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (7)]) goto L9367; goto ret0; L9367: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 7); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (3)]) goto L9368; goto ret0; L9368: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (GET_CODE (x2) == CONST_INT && XWINT (x2, 0) == 85L && (TARGET_MMX)) { return 846; } goto ret0; ret0: return -1; } static int recog_13 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XEXP (x0, 1); switch (GET_CODE (x1)) { case PLUS: goto L8919; case SS_PLUS: goto L8944; case US_PLUS: goto L8956; case MINUS: goto L8968; case SS_MINUS: goto L8993; case US_MINUS: goto L9005; case MULT: goto L9011; case TRUNCATE: goto L9017; case ASHIFTRT: goto L9116; case VEC_MERGE: goto L9135; case UNSPEC: goto L13778; case EQ: goto L9164; case GT: goto L9182; case SMAX: goto L9200; case SMIN: goto L9212; case LSHIFTRT: goto L9230; case ASHIFT: goto L9249; case VEC_CONCAT: goto L9276; default: break; } goto ret0; L8919: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V4HImode)) { operands[1] = x2; goto L8920; } goto ret0; L8920: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V4HImode)) { operands[2] = x2; goto L8921; } goto ret0; L8921: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 792; } goto ret0; L8944: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V4HImode)) { operands[1] = x2; goto L8945; } goto ret0; L8945: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V4HImode)) { operands[2] = x2; goto L8946; } goto ret0; L8946: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 796; } goto ret0; L8956: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V4HImode)) { operands[1] = x2; goto L8957; } goto ret0; L8957: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V4HImode)) { operands[2] = x2; goto L8958; } goto ret0; L8958: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 798; } goto ret0; L8968: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V4HImode)) { operands[1] = x2; goto L8969; } goto ret0; L8969: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V4HImode)) { operands[2] = x2; goto L8970; } goto ret0; L8970: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 800; } goto ret0; L8993: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V4HImode)) { operands[1] = x2; goto L8994; } goto ret0; L8994: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V4HImode)) { operands[2] = x2; goto L8995; } goto ret0; L8995: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 804; } goto ret0; L9005: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V4HImode)) { operands[1] = x2; goto L9006; } goto ret0; L9006: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V4HImode)) { operands[2] = x2; goto L9007; } goto ret0; L9007: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 806; } goto ret0; L9011: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V4HImode)) { operands[1] = x2; goto L9012; } goto ret0; L9012: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V4HImode)) { operands[2] = x2; goto L9013; } goto ret0; L9013: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 807; } goto ret0; L9017: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V4SImode && GET_CODE (x2) == LSHIFTRT) goto L9018; goto ret0; L9018: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == V4SImode && GET_CODE (x3) == MULT) goto L9019; goto ret0; L9019: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == V4SImode) goto L13779; goto ret0; L13779: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x4)) { case SIGN_EXTEND: goto L9020; case ZERO_EXTEND: goto L9030; default: break; } goto ret0; L9020: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (register_operand (x5, V4HImode)) { operands[1] = x5; goto L9021; } goto ret0; L9021: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_MODE (x4) == V4SImode && GET_CODE (x4) == SIGN_EXTEND) goto L9022; goto ret0; L9022: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (nonimmediate_operand (x5, V4HImode)) { operands[2] = x5; goto L9023; } goto ret0; L9023: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (16)] && (TARGET_MMX)) { return 808; } goto ret0; L9030: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (register_operand (x5, V4HImode)) { operands[1] = x5; goto L9031; } goto ret0; L9031: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_MODE (x4) == V4SImode && GET_CODE (x4) == ZERO_EXTEND) goto L9032; goto ret0; L9032: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (nonimmediate_operand (x5, V4HImode)) { operands[2] = x5; goto L9033; } goto ret0; L9033: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (16)] && (TARGET_SSE || TARGET_3DNOW_A)) { return 809; } goto ret0; L9116: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V4HImode) goto L13781; goto ret0; L13781: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == PLUS) goto L9117; if (register_operand (x2, V4HImode)) { operands[1] = x2; goto L9219; } goto ret0; L9117: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == V4HImode && GET_CODE (x3) == PLUS) goto L9118; goto ret0; L9118: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, V4HImode)) { operands[1] = x4; goto L9119; } goto ret0; L9119: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonimmediate_operand (x4, V4HImode)) { operands[2] = x4; goto L9120; } goto ret0; L9120: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == V4HImode && GET_CODE (x3) == CONST_VECTOR && XVECLEN (x3, 0) == 4) goto L9121; goto ret0; L9121: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L9122; goto ret0; L9122: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L9123; goto ret0; L9123: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L9124; goto ret0; L9124: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 3); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L9125; goto ret0; L9125: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE || TARGET_3DNOW_A)) { return 817; } goto ret0; L9219: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonmemory_operand (x2, DImode)) { operands[2] = x2; goto L9220; } goto ret0; L9220: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 832; } goto ret0; L9135: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V4HImode) goto L13784; goto ret0; L13784: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == VEC_SELECT) goto L9319; if (register_operand (x2, V4HImode)) { operands[1] = x2; goto L9136; } goto ret0; L9319: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V4HImode)) { operands[1] = x3; goto L9320; } goto ret0; L9320: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 4) goto L9321; goto ret0; L9321: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (GET_CODE (x4) == CONST_INT) goto L13785; goto ret0; L13785: ATTRIBUTE_UNUSED_LABEL if ((int) XWINT (x4, 0) == XWINT (x4, 0)) switch ((int) XWINT (x4, 0)) { case 0L: goto L9322; case 2L: goto L9376; default: break; } goto ret0; L9322: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L9323; goto ret0; L9323: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L9324; goto ret0; L9324: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 3); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (3)]) goto L9325; goto ret0; L9325: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V4HImode && GET_CODE (x2) == VEC_SELECT) goto L9326; goto ret0; L9326: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V4HImode)) { operands[2] = x3; goto L9327; } goto ret0; L9327: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 4) goto L9328; goto ret0; L9328: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L9329; goto ret0; L9329: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L9330; goto ret0; L9330: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (3)]) goto L9331; goto ret0; L9331: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 3); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L9332; goto ret0; L9332: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (5)] && (TARGET_MMX)) { return 844; } goto ret0; L9376: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L9377; goto ret0; L9377: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (3)]) goto L9378; goto ret0; L9378: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 3); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L9379; goto ret0; L9379: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V4HImode && GET_CODE (x2) == VEC_SELECT) goto L9380; goto ret0; L9380: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V4HImode)) { operands[2] = x3; goto L9381; } goto ret0; L9381: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 4) goto L9382; goto ret0; L9382: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L9383; goto ret0; L9383: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L9384; goto ret0; L9384: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L9385; goto ret0; L9385: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 3); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (3)]) goto L9386; goto ret0; L9386: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (5)] && (TARGET_MMX)) { return 847; } goto ret0; L9136: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V4HImode && GET_CODE (x2) == VEC_DUPLICATE) goto L9137; goto ret0; L9137: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == HImode && GET_CODE (x3) == TRUNCATE) goto L9138; goto ret0; L9138: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, SImode)) { operands[2] = x4; goto L9139; } goto ret0; L9139: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (const_0_to_15_operand (x2, SImode)) { operands[3] = x2; goto L9140; } goto ret0; L9140: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE || TARGET_3DNOW_A)) { return 819; } goto ret0; L13778: ATTRIBUTE_UNUSED_LABEL if (XVECLEN (x1, 0) == 2 && XINT (x1, 1) == 41) goto L9152; goto ret0; L9152: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (nonimmediate_operand (x2, V4HImode)) { operands[1] = x2; goto L9153; } goto ret0; L9153: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 1); if (immediate_operand (x2, SImode)) { operands[2] = x2; goto L9154; } goto ret0; L9154: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE || TARGET_3DNOW_A)) { return 821; } goto ret0; L9164: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V4HImode)) { operands[1] = x2; goto L9165; } goto ret0; L9165: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V4HImode)) { operands[2] = x2; goto L9166; } goto ret0; L9166: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 823; } goto ret0; L9182: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V4HImode)) { operands[1] = x2; goto L9183; } goto ret0; L9183: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V4HImode)) { operands[2] = x2; goto L9184; } goto ret0; L9184: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 826; } goto ret0; L9200: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V4HImode)) { operands[1] = x2; goto L9201; } goto ret0; L9201: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V4HImode)) { operands[2] = x2; goto L9202; } goto ret0; L9202: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE || TARGET_3DNOW_A)) { return 829; } goto ret0; L9212: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V4HImode)) { operands[1] = x2; goto L9213; } goto ret0; L9213: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V4HImode)) { operands[2] = x2; goto L9214; } goto ret0; L9214: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE || TARGET_3DNOW_A)) { return 831; } goto ret0; L9230: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V4HImode)) { operands[1] = x2; goto L9231; } goto ret0; L9231: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonmemory_operand (x2, DImode)) { operands[2] = x2; goto L9232; } goto ret0; L9232: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 834; } goto ret0; L9249: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V4HImode)) { operands[1] = x2; goto L9250; } goto ret0; L9250: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonmemory_operand (x2, DImode)) { operands[2] = x2; goto L9251; } goto ret0; L9251: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 837; } goto ret0; L9276: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V2HImode && GET_CODE (x2) == SS_TRUNCATE) goto L9277; goto ret0; L9277: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V2SImode)) { operands[1] = x3; goto L9278; } goto ret0; L9278: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V2HImode && GET_CODE (x2) == SS_TRUNCATE) goto L9279; goto ret0; L9279: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V2SImode)) { operands[2] = x3; goto L9280; } goto ret0; L9280: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 841; } goto ret0; ret0: return -1; } static int recog_14 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XEXP (x0, 1); switch (GET_CODE (x1)) { case PLUS: goto L9471; case MINUS: goto L9477; case SMAX: goto L9507; case SMIN: goto L9513; case MULT: goto L9519; case VEC_CONCAT: goto L9574; case FLOAT: goto L9637; case UNSPEC: goto L13856; case VEC_SELECT: goto L9715; default: break; } goto ret0; L9471: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V2SFmode)) { operands[1] = x2; goto L9472; } goto ret0; L9472: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V2SFmode)) { operands[2] = x2; goto L9473; } goto ret0; L9473: ATTRIBUTE_UNUSED_LABEL if ((TARGET_3DNOW)) { return 854; } goto ret0; L9477: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V2SFmode) goto L13861; goto ret0; L13861: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, V2SFmode)) { operands[1] = x2; goto L9478; } L13862: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, V2SFmode)) { operands[2] = x2; goto L9484; } goto ret0; L9478: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V2SFmode)) { operands[2] = x2; goto L9479; } x2 = XEXP (x1, 0); goto L13862; L9479: ATTRIBUTE_UNUSED_LABEL if ((TARGET_3DNOW)) { return 855; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); goto L13862; L9484: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, V2SFmode)) { operands[1] = x2; goto L9485; } goto ret0; L9485: ATTRIBUTE_UNUSED_LABEL if ((TARGET_3DNOW)) { return 856; } goto ret0; L9507: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V2SFmode)) { operands[1] = x2; goto L9508; } goto ret0; L9508: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V2SFmode)) { operands[2] = x2; goto L9509; } goto ret0; L9509: ATTRIBUTE_UNUSED_LABEL if ((TARGET_3DNOW)) { return 860; } goto ret0; L9513: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V2SFmode)) { operands[1] = x2; goto L9514; } goto ret0; L9514: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V2SFmode)) { operands[2] = x2; goto L9515; } goto ret0; L9515: ATTRIBUTE_UNUSED_LABEL if ((TARGET_3DNOW)) { return 861; } goto ret0; L9519: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V2SFmode)) { operands[1] = x2; goto L9520; } goto ret0; L9520: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V2SFmode)) { operands[2] = x2; goto L9521; } goto ret0; L9521: ATTRIBUTE_UNUSED_LABEL if ((TARGET_3DNOW)) { return 862; } goto ret0; L9574: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SFmode) goto L13863; goto ret0; L13863: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case PLUS: goto L9575; case MINUS: goto L9596; default: break; } goto ret0; L9575: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SFmode && GET_CODE (x3) == VEC_SELECT) goto L9576; goto ret0; L9576: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, V2SFmode)) { operands[1] = x4; goto L9577; } goto ret0; L9577: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_CODE (x4) == PARALLEL && XVECLEN (x4, 0) == 1) goto L9578; goto ret0; L9578: ATTRIBUTE_UNUSED_LABEL x5 = XVECEXP (x4, 0, 0); if (x5 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L9579; goto ret0; L9579: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == SFmode && GET_CODE (x3) == VEC_SELECT) goto L9580; goto ret0; L9580: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (rtx_equal_p (x4, operands[1])) goto L9581; goto ret0; L9581: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_CODE (x4) == PARALLEL && XVECLEN (x4, 0) == 1) goto L9582; goto ret0; L9582: ATTRIBUTE_UNUSED_LABEL x5 = XVECEXP (x4, 0, 0); if (x5 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L9583; goto ret0; L9583: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SFmode && GET_CODE (x2) == PLUS) goto L9584; goto ret0; L9584: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SFmode && GET_CODE (x3) == VEC_SELECT) goto L9585; goto ret0; L9585: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, V2SFmode)) { operands[2] = x4; goto L9586; } goto ret0; L9586: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_CODE (x4) == PARALLEL && XVECLEN (x4, 0) == 1) goto L9587; goto ret0; L9587: ATTRIBUTE_UNUSED_LABEL x5 = XVECEXP (x4, 0, 0); if (x5 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L9588; goto ret0; L9588: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == SFmode && GET_CODE (x3) == VEC_SELECT) goto L9589; goto ret0; L9589: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (rtx_equal_p (x4, operands[2])) goto L9590; goto ret0; L9590: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_CODE (x4) == PARALLEL && XVECLEN (x4, 0) == 1) goto L9591; goto ret0; L9591: ATTRIBUTE_UNUSED_LABEL x5 = XVECEXP (x4, 0, 0); if (x5 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_3DNOW)) { return 866; } goto ret0; L9596: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SFmode && GET_CODE (x3) == VEC_SELECT) goto L9597; goto ret0; L9597: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, V2SFmode)) { operands[1] = x4; goto L9598; } goto ret0; L9598: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_CODE (x4) == PARALLEL && XVECLEN (x4, 0) == 1) goto L9599; goto ret0; L9599: ATTRIBUTE_UNUSED_LABEL x5 = XVECEXP (x4, 0, 0); if (x5 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L9600; goto ret0; L9600: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == SFmode && GET_CODE (x3) == VEC_SELECT) goto L9601; goto ret0; L9601: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (rtx_equal_p (x4, operands[1])) goto L9602; goto ret0; L9602: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_CODE (x4) == PARALLEL && XVECLEN (x4, 0) == 1) goto L9603; goto ret0; L9603: ATTRIBUTE_UNUSED_LABEL x5 = XVECEXP (x4, 0, 0); if (x5 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L9604; goto ret0; L9604: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SFmode) goto L13865; goto ret0; L13865: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case MINUS: goto L9605; case PLUS: goto L9626; default: break; } goto ret0; L9605: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SFmode && GET_CODE (x3) == VEC_SELECT) goto L9606; goto ret0; L9606: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, V2SFmode)) { operands[2] = x4; goto L9607; } goto ret0; L9607: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_CODE (x4) == PARALLEL && XVECLEN (x4, 0) == 1) goto L9608; goto ret0; L9608: ATTRIBUTE_UNUSED_LABEL x5 = XVECEXP (x4, 0, 0); if (x5 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L9609; goto ret0; L9609: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == SFmode && GET_CODE (x3) == VEC_SELECT) goto L9610; goto ret0; L9610: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (rtx_equal_p (x4, operands[2])) goto L9611; goto ret0; L9611: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_CODE (x4) == PARALLEL && XVECLEN (x4, 0) == 1) goto L9612; goto ret0; L9612: ATTRIBUTE_UNUSED_LABEL x5 = XVECEXP (x4, 0, 0); if (x5 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_3DNOW_A)) { return 867; } goto ret0; L9626: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SFmode && GET_CODE (x3) == VEC_SELECT) goto L9627; goto ret0; L9627: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, V2SFmode)) { operands[2] = x4; goto L9628; } goto ret0; L9628: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_CODE (x4) == PARALLEL && XVECLEN (x4, 0) == 1) goto L9629; goto ret0; L9629: ATTRIBUTE_UNUSED_LABEL x5 = XVECEXP (x4, 0, 0); if (x5 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L9630; goto ret0; L9630: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == SFmode && GET_CODE (x3) == VEC_SELECT) goto L9631; goto ret0; L9631: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (rtx_equal_p (x4, operands[2])) goto L9632; goto ret0; L9632: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_CODE (x4) == PARALLEL && XVECLEN (x4, 0) == 1) goto L9633; goto ret0; L9633: ATTRIBUTE_UNUSED_LABEL x5 = XVECEXP (x4, 0, 0); if (x5 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_3DNOW_A)) { return 868; } goto ret0; L9637: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V2SImode) goto L13867; goto ret0; L13867: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == VEC_CONCAT) goto L9638; if (nonimmediate_operand (x2, V2SImode)) { operands[1] = x2; goto L9654; } goto ret0; L9638: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == SIGN_EXTEND) goto L9639; goto ret0; L9639: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == HImode && GET_CODE (x4) == TRUNCATE) goto L9640; goto ret0; L9640: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (GET_MODE (x5) == SImode && GET_CODE (x5) == VEC_SELECT) goto L9641; goto ret0; L9641: ATTRIBUTE_UNUSED_LABEL x6 = XEXP (x5, 0); if (nonimmediate_operand (x6, V2SImode)) { operands[1] = x6; goto L9642; } goto ret0; L9642: ATTRIBUTE_UNUSED_LABEL x6 = XEXP (x5, 1); if (GET_CODE (x6) == PARALLEL && XVECLEN (x6, 0) == 1) goto L9643; goto ret0; L9643: ATTRIBUTE_UNUSED_LABEL x7 = XVECEXP (x6, 0, 0); if (x7 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L9644; goto ret0; L9644: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == SImode && GET_CODE (x3) == SIGN_EXTEND) goto L9645; goto ret0; L9645: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == HImode && GET_CODE (x4) == TRUNCATE) goto L9646; goto ret0; L9646: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (GET_MODE (x5) == SImode && GET_CODE (x5) == VEC_SELECT) goto L9647; goto ret0; L9647: ATTRIBUTE_UNUSED_LABEL x6 = XEXP (x5, 0); if (rtx_equal_p (x6, operands[1])) goto L9648; goto ret0; L9648: ATTRIBUTE_UNUSED_LABEL x6 = XEXP (x5, 1); if (GET_CODE (x6) == PARALLEL && XVECLEN (x6, 0) == 1) goto L9649; goto ret0; L9649: ATTRIBUTE_UNUSED_LABEL x7 = XVECEXP (x6, 0, 0); if (x7 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_3DNOW_A)) { return 869; } goto ret0; L9654: ATTRIBUTE_UNUSED_LABEL if ((TARGET_3DNOW)) { return 870; } goto ret0; L13856: ATTRIBUTE_UNUSED_LABEL switch (XVECLEN (x1, 0)) { case 1: goto L13869; case 2: goto L13870; default: break; } goto ret0; L13869: ATTRIBUTE_UNUSED_LABEL switch (XINT (x1, 1)) { case 50L: goto L9664; case 53L: goto L9681; default: break; } goto ret0; L9664: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (nonimmediate_operand (x2, V2SFmode)) { operands[1] = x2; goto L9665; } goto ret0; L9665: ATTRIBUTE_UNUSED_LABEL if ((TARGET_3DNOW)) { return 872; } goto ret0; L9681: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (nonimmediate_operand (x2, V2SFmode)) { operands[1] = x2; goto L9682; } goto ret0; L9682: ATTRIBUTE_UNUSED_LABEL if ((TARGET_3DNOW)) { return 875; } goto ret0; L13870: ATTRIBUTE_UNUSED_LABEL switch (XINT (x1, 1)) { case 51L: goto L9669; case 52L: goto L9675; case 54L: goto L9686; default: break; } goto ret0; L9669: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, V2SFmode)) { operands[1] = x2; goto L9670; } goto ret0; L9670: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 1); if (nonimmediate_operand (x2, V2SFmode)) { operands[2] = x2; goto L9671; } goto ret0; L9671: ATTRIBUTE_UNUSED_LABEL if ((TARGET_3DNOW)) { return 873; } goto ret0; L9675: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, V2SFmode)) { operands[1] = x2; goto L9676; } goto ret0; L9676: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 1); if (nonimmediate_operand (x2, V2SFmode)) { operands[2] = x2; goto L9677; } goto ret0; L9677: ATTRIBUTE_UNUSED_LABEL if ((TARGET_3DNOW)) { return 874; } goto ret0; L9686: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, V2SFmode)) { operands[1] = x2; goto L9687; } goto ret0; L9687: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 1); if (nonimmediate_operand (x2, V2SFmode)) { operands[2] = x2; goto L9688; } goto ret0; L9688: ATTRIBUTE_UNUSED_LABEL if ((TARGET_3DNOW)) { return 876; } goto ret0; L9715: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, V2SFmode)) { operands[1] = x2; goto L9716; } goto ret0; L9716: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == PARALLEL && XVECLEN (x2, 0) == 2) goto L9717; goto ret0; L9717: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L9718; goto ret0; L9718: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_3DNOW_A)) { return 879; } goto ret0; ret0: return -1; } static int recog_15 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XEXP (x0, 1); switch (GET_CODE (x1)) { case PLUS: goto L9740; case VEC_MERGE: goto L9746; case MINUS: goto L9754; case MULT: goto L9768; case DIV: goto L9782; case SMAX: goto L9796; case SMIN: goto L9810; case SQRT: goto L9824; case FLOAT: goto L9944; case FLOAT_EXTEND: goto L10056; case VEC_CONCAT: goto L10517; case UNSPEC: goto L13896; default: break; } goto ret0; L9740: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V2DFmode)) { operands[1] = x2; goto L9741; } goto ret0; L9741: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V2DFmode)) { operands[2] = x2; goto L9742; } goto ret0; L9742: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 884; } goto ret0; L9746: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V2DFmode) goto L13897; goto ret0; L13897: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case PLUS: goto L9747; case MINUS: goto L9761; case MULT: goto L9775; case DIV: goto L9789; case SMAX: goto L9803; case SMIN: goto L9817; case SQRT: goto L9830; case VEC_DUPLICATE: goto L10829; case SUBREG: case REG: goto L13904; default: goto ret0; } L13904: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, V2DFmode)) { operands[1] = x2; goto L10012; } goto ret0; L9747: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V2DFmode)) { operands[1] = x3; goto L9748; } goto ret0; L9748: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, V2DFmode)) { operands[2] = x3; goto L9749; } goto ret0; L9749: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (rtx_equal_p (x2, operands[1])) goto L9750; goto ret0; L9750: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE2)) { return 885; } goto ret0; L9761: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V2DFmode)) { operands[1] = x3; goto L9762; } goto ret0; L9762: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, V2DFmode)) { operands[2] = x3; goto L9763; } goto ret0; L9763: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (rtx_equal_p (x2, operands[1])) goto L9764; goto ret0; L9764: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE2)) { return 887; } goto ret0; L9775: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V2DFmode)) { operands[1] = x3; goto L9776; } goto ret0; L9776: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, V2DFmode)) { operands[2] = x3; goto L9777; } goto ret0; L9777: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (rtx_equal_p (x2, operands[1])) goto L9778; goto ret0; L9778: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE2)) { return 889; } goto ret0; L9789: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V2DFmode)) { operands[1] = x3; goto L9790; } goto ret0; L9790: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, V2DFmode)) { operands[2] = x3; goto L9791; } goto ret0; L9791: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (rtx_equal_p (x2, operands[1])) goto L9792; goto ret0; L9792: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE2)) { return 891; } goto ret0; L9803: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V2DFmode)) { operands[1] = x3; goto L9804; } goto ret0; L9804: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, V2DFmode)) { operands[2] = x3; goto L9805; } goto ret0; L9805: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (rtx_equal_p (x2, operands[1])) goto L9806; goto ret0; L9806: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE2)) { return 893; } goto ret0; L9817: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V2DFmode)) { operands[1] = x3; goto L9818; } goto ret0; L9818: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, V2DFmode)) { operands[2] = x3; goto L9819; } goto ret0; L9819: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (rtx_equal_p (x2, operands[1])) goto L9820; goto ret0; L9820: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE2)) { return 895; } goto ret0; L9830: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V2DFmode)) { operands[1] = x3; goto L9831; } goto ret0; L9831: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, V2DFmode)) { operands[2] = x2; goto L9832; } goto ret0; L9832: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE2)) { return 897; } goto ret0; L10829: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (memory_operand (x3, DFmode)) { operands[1] = x3; goto L10830; } goto ret0; L10830: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const0_operand (x2, V2DFmode)) { operands[2] = x2; goto L10831; } goto ret0; L10831: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE2)) { return 1014; } goto ret0; L10012: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V2DFmode) goto L13906; goto ret0; L13906: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case VEC_DUPLICATE: goto L10013; case FLOAT_EXTEND: goto L10037; default: break; } goto ret0; L10013: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DFmode && GET_CODE (x3) == FLOAT) goto L10014; goto ret0; L10014: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); switch (GET_MODE (x4)) { case SImode: goto L13908; case DImode: goto L13909; default: break; } goto ret0; L13908: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x4, SImode)) { operands[2] = x4; goto L10015; } goto ret0; L10015: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (2)] && (TARGET_SSE2)) { return 924; } goto ret0; L13909: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x4, DImode)) { operands[2] = x4; goto L10023; } goto ret0; L10023: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (2)] && (TARGET_SSE2 && TARGET_64BIT)) { return 925; } goto ret0; L10037: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == V2SFmode && GET_CODE (x3) == VEC_SELECT) goto L10038; goto ret0; L10038: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, V4SFmode)) { operands[2] = x4; goto L10039; } goto ret0; L10039: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_CODE (x4) == PARALLEL && XVECLEN (x4, 0) == 2) goto L10040; goto ret0; L10040: ATTRIBUTE_UNUSED_LABEL x5 = XVECEXP (x4, 0, 0); if (x5 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L10041; goto ret0; L10041: ATTRIBUTE_UNUSED_LABEL x5 = XVECEXP (x4, 0, 1); if (x5 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10042; goto ret0; L10042: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (2)] && (TARGET_SSE2)) { return 927; } goto ret0; L9754: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V2DFmode)) { operands[1] = x2; goto L9755; } goto ret0; L9755: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V2DFmode)) { operands[2] = x2; goto L9756; } goto ret0; L9756: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 886; } goto ret0; L9768: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V2DFmode)) { operands[1] = x2; goto L9769; } goto ret0; L9769: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V2DFmode)) { operands[2] = x2; goto L9770; } goto ret0; L9770: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 888; } goto ret0; L9782: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V2DFmode)) { operands[1] = x2; goto L9783; } goto ret0; L9783: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V2DFmode)) { operands[2] = x2; goto L9784; } goto ret0; L9784: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 890; } goto ret0; L9796: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V2DFmode)) { operands[1] = x2; goto L9797; } goto ret0; L9797: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V2DFmode)) { operands[2] = x2; goto L9798; } goto ret0; L9798: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 892; } goto ret0; L9810: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V2DFmode)) { operands[1] = x2; goto L9811; } goto ret0; L9811: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V2DFmode)) { operands[2] = x2; goto L9812; } goto ret0; L9812: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 894; } goto ret0; L9824: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V2DFmode)) { operands[1] = x2; goto L9825; } goto ret0; L9825: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 896; } goto ret0; L9944: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V2SImode) goto L13910; goto ret0; L13910: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == VEC_SELECT) goto L9945; if (nonimmediate_operand (x2, V2SImode)) { operands[1] = x2; goto L9979; } goto ret0; L9945: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, V4SImode)) { operands[1] = x3; goto L9946; } goto ret0; L9946: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 2) goto L9947; goto ret0; L9947: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L9948; goto ret0; L9948: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE2)) { return 914; } goto ret0; L9979: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 919; } goto ret0; L10056: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V2SFmode && GET_CODE (x2) == VEC_SELECT) goto L10057; goto ret0; L10057: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, V4SFmode)) { operands[1] = x3; goto L10058; } goto ret0; L10058: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 2) goto L10059; goto ret0; L10059: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L10060; goto ret0; L10060: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE2)) { return 929; } goto ret0; L10517: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DFmode && GET_CODE (x2) == VEC_SELECT) goto L10518; goto ret0; L10518: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V2DFmode)) { operands[1] = x3; goto L10519; } goto ret0; L10519: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 1) goto L10520; goto ret0; L10520: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (GET_CODE (x4) == CONST_INT) goto L13912; goto ret0; L13912: ATTRIBUTE_UNUSED_LABEL if ((int) XWINT (x4, 0) == XWINT (x4, 0)) switch ((int) XWINT (x4, 0)) { case 1L: goto L10521; case 0L: goto L10532; default: break; } goto ret0; L10521: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DFmode && GET_CODE (x2) == VEC_SELECT) goto L10522; goto ret0; L10522: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V2DFmode)) { operands[2] = x3; goto L10523; } goto ret0; L10523: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 1) goto L10524; goto ret0; L10524: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE2)) { return 989; } goto ret0; L10532: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DFmode && GET_CODE (x2) == VEC_SELECT) goto L10533; goto ret0; L10533: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V2DFmode)) { operands[2] = x3; goto L10534; } goto ret0; L10534: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 1) goto L10535; goto ret0; L10535: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_SSE2)) { return 990; } goto ret0; L13896: ATTRIBUTE_UNUSED_LABEL if (XVECLEN (x1, 0) == 3 && XINT (x1, 1) == 41) goto L10847; goto ret0; L10847: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, V2DFmode)) { operands[1] = x2; goto L10848; } goto ret0; L10848: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 1); if (nonimmediate_operand (x2, V2DFmode)) { operands[2] = x2; goto L10849; } goto ret0; L10849: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 2); if (immediate_operand (x2, SImode)) { operands[3] = x2; goto L10850; } goto ret0; L10850: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 1017; } goto ret0; ret0: return -1; } static int recog_16 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XEXP (x0, 1); switch (GET_CODE (x1)) { case PLUS: goto L10064; case SS_PLUS: goto L10088; case US_PLUS: goto L10100; case MINUS: goto L10112; case SS_MINUS: goto L10136; case US_MINUS: goto L10148; case ASHIFTRT: goto L10254; case EQ: goto L10337; case GT: goto L10355; case UMAX: goto L10373; case UMIN: goto L10385; case VEC_CONCAT: goto L10539; case VEC_MERGE: goto L10563; default: break; } goto ret0; L10064: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V16QImode)) { operands[1] = x2; goto L10065; } goto ret0; L10065: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V16QImode)) { operands[2] = x2; goto L10066; } goto ret0; L10066: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 930; } goto ret0; L10088: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V16QImode)) { operands[1] = x2; goto L10089; } goto ret0; L10089: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V16QImode)) { operands[2] = x2; goto L10090; } goto ret0; L10090: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 934; } goto ret0; L10100: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V16QImode)) { operands[1] = x2; goto L10101; } goto ret0; L10101: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V16QImode)) { operands[2] = x2; goto L10102; } goto ret0; L10102: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 936; } goto ret0; L10112: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V16QImode)) { operands[1] = x2; goto L10113; } goto ret0; L10113: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V16QImode)) { operands[2] = x2; goto L10114; } goto ret0; L10114: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 938; } goto ret0; L10136: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V16QImode)) { operands[1] = x2; goto L10137; } goto ret0; L10137: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V16QImode)) { operands[2] = x2; goto L10138; } goto ret0; L10138: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 942; } goto ret0; L10148: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V16QImode)) { operands[1] = x2; goto L10149; } goto ret0; L10149: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V16QImode)) { operands[2] = x2; goto L10150; } goto ret0; L10150: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 944; } goto ret0; L10254: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V16QImode && GET_CODE (x2) == PLUS) goto L10255; goto ret0; L10255: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == V16QImode && GET_CODE (x3) == PLUS) goto L10256; goto ret0; L10256: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, V16QImode)) { operands[1] = x4; goto L10257; } goto ret0; L10257: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonimmediate_operand (x4, V16QImode)) { operands[2] = x4; goto L10258; } goto ret0; L10258: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == V16QImode && GET_CODE (x3) == CONST_VECTOR && XVECLEN (x3, 0) == 16) goto L10259; goto ret0; L10259: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10260; goto ret0; L10260: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10261; goto ret0; L10261: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10262; goto ret0; L10262: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 3); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10263; goto ret0; L10263: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 4); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10264; goto ret0; L10264: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 5); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10265; goto ret0; L10265: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 6); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10266; goto ret0; L10266: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 7); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10267; goto ret0; L10267: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 8); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10268; goto ret0; L10268: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 9); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10269; goto ret0; L10269: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 10); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10270; goto ret0; L10270: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 11); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10271; goto ret0; L10271: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 12); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10272; goto ret0; L10272: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 13); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10273; goto ret0; L10273: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 14); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10274; goto ret0; L10274: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 15); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10275; goto ret0; L10275: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE2)) { return 953; } goto ret0; L10337: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V16QImode)) { operands[1] = x2; goto L10338; } goto ret0; L10338: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V16QImode)) { operands[2] = x2; goto L10339; } goto ret0; L10339: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 961; } goto ret0; L10355: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V16QImode)) { operands[1] = x2; goto L10356; } goto ret0; L10356: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V16QImode)) { operands[2] = x2; goto L10357; } goto ret0; L10357: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 964; } goto ret0; L10373: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V16QImode)) { operands[1] = x2; goto L10374; } goto ret0; L10374: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V16QImode)) { operands[2] = x2; goto L10375; } goto ret0; L10375: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 967; } goto ret0; L10385: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V16QImode)) { operands[1] = x2; goto L10386; } goto ret0; L10386: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V16QImode)) { operands[2] = x2; goto L10387; } goto ret0; L10387: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 969; } goto ret0; L10539: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V8QImode) goto L13971; goto ret0; L13971: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case SS_TRUNCATE: goto L10540; case US_TRUNCATE: goto L10556; default: break; } goto ret0; L10540: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V8HImode)) { operands[1] = x3; goto L10541; } goto ret0; L10541: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V8QImode && GET_CODE (x2) == SS_TRUNCATE) goto L10542; goto ret0; L10542: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V8HImode)) { operands[2] = x3; goto L10543; } goto ret0; L10543: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 991; } goto ret0; L10556: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V8HImode)) { operands[1] = x3; goto L10557; } goto ret0; L10557: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V8QImode && GET_CODE (x2) == US_TRUNCATE) goto L10558; goto ret0; L10558: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V8HImode)) { operands[2] = x3; goto L10559; } goto ret0; L10559: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 993; } goto ret0; L10563: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V16QImode && GET_CODE (x2) == VEC_SELECT) goto L10564; goto ret0; L10564: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V16QImode)) { operands[1] = x3; goto L10565; } goto ret0; L10565: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 16) goto L10566; goto ret0; L10566: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (GET_CODE (x4) == CONST_INT) goto L13973; goto ret0; L13973: ATTRIBUTE_UNUSED_LABEL if ((int) XWINT (x4, 0) == XWINT (x4, 0)) switch ((int) XWINT (x4, 0)) { case 8L: goto L10567; case 0L: goto L10653; default: break; } goto ret0; L10567: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L10568; goto ret0; L10568: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (9)]) goto L10569; goto ret0; L10569: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 3); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10570; goto ret0; L10570: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 4); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (10)]) goto L10571; goto ret0; L10571: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 5); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L10572; goto ret0; L10572: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 6); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (11)]) goto L10573; goto ret0; L10573: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 7); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (3)]) goto L10574; goto ret0; L10574: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 8); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (12)]) goto L10575; goto ret0; L10575: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 9); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (4)]) goto L10576; goto ret0; L10576: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 10); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (13)]) goto L10577; goto ret0; L10577: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 11); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (5)]) goto L10578; goto ret0; L10578: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 12); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (14)]) goto L10579; goto ret0; L10579: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 13); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (6)]) goto L10580; goto ret0; L10580: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 14); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (15)]) goto L10581; goto ret0; L10581: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 15); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (7)]) goto L10582; goto ret0; L10582: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V16QImode && GET_CODE (x2) == VEC_SELECT) goto L10583; goto ret0; L10583: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V16QImode)) { operands[2] = x3; goto L10584; } goto ret0; L10584: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 16) goto L10585; goto ret0; L10585: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L10586; goto ret0; L10586: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L10587; goto ret0; L10587: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10588; goto ret0; L10588: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 3); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (9)]) goto L10589; goto ret0; L10589: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 4); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L10590; goto ret0; L10590: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 5); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (10)]) goto L10591; goto ret0; L10591: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 6); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (3)]) goto L10592; goto ret0; L10592: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 7); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (11)]) goto L10593; goto ret0; L10593: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 8); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (4)]) goto L10594; goto ret0; L10594: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 9); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (12)]) goto L10595; goto ret0; L10595: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 10); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (5)]) goto L10596; goto ret0; L10596: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 11); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (13)]) goto L10597; goto ret0; L10597: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 12); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (6)]) goto L10598; goto ret0; L10598: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 13); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (14)]) goto L10599; goto ret0; L10599: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 14); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (7)]) goto L10600; goto ret0; L10600: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 15); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (15)]) goto L10601; goto ret0; L10601: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (GET_CODE (x2) == CONST_INT && XWINT (x2, 0) == 21845L && (TARGET_SSE2)) { return 994; } goto ret0; L10653: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L10654; goto ret0; L10654: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10655; goto ret0; L10655: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 3); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (9)]) goto L10656; goto ret0; L10656: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 4); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L10657; goto ret0; L10657: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 5); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (10)]) goto L10658; goto ret0; L10658: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 6); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (3)]) goto L10659; goto ret0; L10659: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 7); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (11)]) goto L10660; goto ret0; L10660: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 8); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (4)]) goto L10661; goto ret0; L10661: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 9); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (12)]) goto L10662; goto ret0; L10662: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 10); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (5)]) goto L10663; goto ret0; L10663: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 11); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (13)]) goto L10664; goto ret0; L10664: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 12); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (6)]) goto L10665; goto ret0; L10665: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 13); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (14)]) goto L10666; goto ret0; L10666: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 14); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (7)]) goto L10667; goto ret0; L10667: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 15); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (15)]) goto L10668; goto ret0; L10668: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V16QImode && GET_CODE (x2) == VEC_SELECT) goto L10669; goto ret0; L10669: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V16QImode)) { operands[2] = x3; goto L10670; } goto ret0; L10670: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 16) goto L10671; goto ret0; L10671: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L10672; goto ret0; L10672: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L10673; goto ret0; L10673: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (9)]) goto L10674; goto ret0; L10674: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 3); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10675; goto ret0; L10675: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 4); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (10)]) goto L10676; goto ret0; L10676: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 5); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L10677; goto ret0; L10677: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 6); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (11)]) goto L10678; goto ret0; L10678: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 7); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (3)]) goto L10679; goto ret0; L10679: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 8); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (12)]) goto L10680; goto ret0; L10680: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 9); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (4)]) goto L10681; goto ret0; L10681: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 10); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (13)]) goto L10682; goto ret0; L10682: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 11); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (5)]) goto L10683; goto ret0; L10683: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 12); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (14)]) goto L10684; goto ret0; L10684: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 13); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (6)]) goto L10685; goto ret0; L10685: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 14); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (15)]) goto L10686; goto ret0; L10686: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 15); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (7)]) goto L10687; goto ret0; L10687: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (GET_CODE (x2) == CONST_INT && XWINT (x2, 0) == 21845L && (TARGET_SSE2)) { return 997; } goto ret0; ret0: return -1; } static int recog_17 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XEXP (x0, 1); switch (GET_CODE (x1)) { case FIX: goto L9934; case UNSPEC: goto L13997; case VEC_CONCAT: goto L9952; case PLUS: goto L10076; case MINUS: goto L10124; case EQ: goto L10349; case GT: goto L10367; case ASHIFTRT: goto L10403; case LSHIFTRT: goto L10415; case ASHIFT: goto L10433; case VEC_MERGE: goto L10631; default: break; } goto ret0; L9934: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, V4SFmode)) { operands[1] = x2; goto L9935; } goto ret0; L9935: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 912; } goto ret0; L13997: ATTRIBUTE_UNUSED_LABEL switch (XVECLEN (x1, 0)) { case 1: goto L13999; case 2: goto L14000; default: break; } goto ret0; L13999: ATTRIBUTE_UNUSED_LABEL if (XINT (x1, 1) == 30) goto L9939; goto ret0; L9939: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (nonimmediate_operand (x2, V4SFmode)) { operands[1] = x2; goto L9940; } goto ret0; L9940: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 913; } goto ret0; L14000: ATTRIBUTE_UNUSED_LABEL if (XINT (x1, 1) == 41) goto L10319; goto ret0; L10319: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (nonimmediate_operand (x2, V4SImode)) { operands[1] = x2; goto L10320; } goto ret0; L10320: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 1); if (immediate_operand (x2, SImode)) { operands[2] = x2; goto L10321; } goto ret0; L10321: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 958; } goto ret0; L9952: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V2SImode) goto L14001; goto ret0; L14001: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case FIX: goto L9953; case UNSPEC: goto L14003; default: break; } goto ret0; L9953: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, V2DFmode)) { operands[1] = x3; goto L9954; } goto ret0; L9954: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V2SImode && GET_CODE (x2) == CONST_VECTOR && XVECLEN (x2, 0) == 2) goto L9955; goto ret0; L9955: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L9956; goto ret0; L9956: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_SSE2)) { return 915; } goto ret0; L14003: ATTRIBUTE_UNUSED_LABEL if (XVECLEN (x2, 0) == 1 && XINT (x2, 1) == 30) goto L9961; goto ret0; L9961: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (nonimmediate_operand (x3, V2DFmode)) { operands[1] = x3; goto L9962; } goto ret0; L9962: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V2SImode && GET_CODE (x2) == CONST_VECTOR && XVECLEN (x2, 0) == 2) goto L9963; goto ret0; L9963: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L9964; goto ret0; L9964: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_SSE2)) { return 916; } goto ret0; L10076: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V4SImode) goto L14005; goto ret0; L14005: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == MULT) goto L10215; if (register_operand (x2, V4SImode)) { operands[1] = x2; goto L10077; } goto ret0; L10215: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == V4SImode && GET_CODE (x3) == SIGN_EXTEND) goto L10216; goto ret0; L10216: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == V4HImode && GET_CODE (x4) == VEC_SELECT) goto L10217; goto ret0; L10217: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (register_operand (x5, V8HImode)) { operands[1] = x5; goto L10218; } goto ret0; L10218: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 1); if (GET_CODE (x5) == PARALLEL && XVECLEN (x5, 0) == 4) goto L10219; goto ret0; L10219: ATTRIBUTE_UNUSED_LABEL x6 = XVECEXP (x5, 0, 0); if (x6 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L10220; goto ret0; L10220: ATTRIBUTE_UNUSED_LABEL x6 = XVECEXP (x5, 0, 1); if (x6 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L10221; goto ret0; L10221: ATTRIBUTE_UNUSED_LABEL x6 = XVECEXP (x5, 0, 2); if (x6 == const_int_rtx[MAX_SAVED_CONST_INT + (4)]) goto L10222; goto ret0; L10222: ATTRIBUTE_UNUSED_LABEL x6 = XVECEXP (x5, 0, 3); if (x6 == const_int_rtx[MAX_SAVED_CONST_INT + (6)]) goto L10223; goto ret0; L10223: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == V4SImode && GET_CODE (x3) == SIGN_EXTEND) goto L10224; goto ret0; L10224: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == V4HImode && GET_CODE (x4) == VEC_SELECT) goto L10225; goto ret0; L10225: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (nonimmediate_operand (x5, V8HImode)) { operands[2] = x5; goto L10226; } goto ret0; L10226: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 1); if (GET_CODE (x5) == PARALLEL && XVECLEN (x5, 0) == 4) goto L10227; goto ret0; L10227: ATTRIBUTE_UNUSED_LABEL x6 = XVECEXP (x5, 0, 0); if (x6 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L10228; goto ret0; L10228: ATTRIBUTE_UNUSED_LABEL x6 = XVECEXP (x5, 0, 1); if (x6 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L10229; goto ret0; L10229: ATTRIBUTE_UNUSED_LABEL x6 = XVECEXP (x5, 0, 2); if (x6 == const_int_rtx[MAX_SAVED_CONST_INT + (4)]) goto L10230; goto ret0; L10230: ATTRIBUTE_UNUSED_LABEL x6 = XVECEXP (x5, 0, 3); if (x6 == const_int_rtx[MAX_SAVED_CONST_INT + (6)]) goto L10231; goto ret0; L10231: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V4SImode && GET_CODE (x2) == MULT) goto L10232; goto ret0; L10232: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == V4SImode && GET_CODE (x3) == SIGN_EXTEND) goto L10233; goto ret0; L10233: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == V4HImode && GET_CODE (x4) == VEC_SELECT) goto L10234; goto ret0; L10234: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (rtx_equal_p (x5, operands[1])) goto L10235; goto ret0; L10235: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 1); if (GET_CODE (x5) == PARALLEL && XVECLEN (x5, 0) == 4) goto L10236; goto ret0; L10236: ATTRIBUTE_UNUSED_LABEL x6 = XVECEXP (x5, 0, 0); if (x6 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10237; goto ret0; L10237: ATTRIBUTE_UNUSED_LABEL x6 = XVECEXP (x5, 0, 1); if (x6 == const_int_rtx[MAX_SAVED_CONST_INT + (3)]) goto L10238; goto ret0; L10238: ATTRIBUTE_UNUSED_LABEL x6 = XVECEXP (x5, 0, 2); if (x6 == const_int_rtx[MAX_SAVED_CONST_INT + (5)]) goto L10239; goto ret0; L10239: ATTRIBUTE_UNUSED_LABEL x6 = XVECEXP (x5, 0, 3); if (x6 == const_int_rtx[MAX_SAVED_CONST_INT + (7)]) goto L10240; goto ret0; L10240: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == V4SImode && GET_CODE (x3) == SIGN_EXTEND) goto L10241; goto ret0; L10241: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == V4HImode && GET_CODE (x4) == VEC_SELECT) goto L10242; goto ret0; L10242: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (rtx_equal_p (x5, operands[2])) goto L10243; goto ret0; L10243: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 1); if (GET_CODE (x5) == PARALLEL && XVECLEN (x5, 0) == 4) goto L10244; goto ret0; L10244: ATTRIBUTE_UNUSED_LABEL x6 = XVECEXP (x5, 0, 0); if (x6 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10245; goto ret0; L10245: ATTRIBUTE_UNUSED_LABEL x6 = XVECEXP (x5, 0, 1); if (x6 == const_int_rtx[MAX_SAVED_CONST_INT + (3)]) goto L10246; goto ret0; L10246: ATTRIBUTE_UNUSED_LABEL x6 = XVECEXP (x5, 0, 2); if (x6 == const_int_rtx[MAX_SAVED_CONST_INT + (5)]) goto L10247; goto ret0; L10247: ATTRIBUTE_UNUSED_LABEL x6 = XVECEXP (x5, 0, 3); if (x6 == const_int_rtx[MAX_SAVED_CONST_INT + (7)] && (TARGET_SSE2)) { return 951; } goto ret0; L10077: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V4SImode)) { operands[2] = x2; goto L10078; } goto ret0; L10078: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 932; } goto ret0; L10124: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V4SImode)) { operands[1] = x2; goto L10125; } goto ret0; L10125: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V4SImode)) { operands[2] = x2; goto L10126; } goto ret0; L10126: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 940; } goto ret0; L10349: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V4SImode)) { operands[1] = x2; goto L10350; } goto ret0; L10350: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V4SImode)) { operands[2] = x2; goto L10351; } goto ret0; L10351: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 963; } goto ret0; L10367: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V4SImode)) { operands[1] = x2; goto L10368; } goto ret0; L10368: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V4SImode)) { operands[2] = x2; goto L10369; } goto ret0; L10369: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 966; } goto ret0; L10403: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V4SImode)) { operands[1] = x2; goto L10404; } goto ret0; L10404: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonmemory_operand (x2, SImode)) { operands[2] = x2; goto L10405; } L10453: ATTRIBUTE_UNUSED_LABEL if (GET_MODE (x2) == SImode && GET_CODE (x2) == SUBREG && XINT (x2, 1) == 0) goto L10454; goto ret0; L10405: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 972; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L10453; L10454: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonmemory_operand (x3, V2DImode)) { operands[2] = x3; goto L10455; } goto ret0; L10455: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 980; } goto ret0; L10415: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V4SImode)) { operands[1] = x2; goto L10416; } goto ret0; L10416: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonmemory_operand (x2, SImode)) { operands[2] = x2; goto L10417; } L10467: ATTRIBUTE_UNUSED_LABEL if (GET_MODE (x2) == SImode && GET_CODE (x2) == SUBREG && XINT (x2, 1) == 0) goto L10468; goto ret0; L10417: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 974; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L10467; L10468: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonmemory_operand (x3, V2DImode)) { operands[2] = x3; goto L10469; } goto ret0; L10469: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 982; } goto ret0; L10433: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V4SImode)) { operands[1] = x2; goto L10434; } goto ret0; L10434: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonmemory_operand (x2, SImode)) { operands[2] = x2; goto L10435; } L10488: ATTRIBUTE_UNUSED_LABEL if (GET_MODE (x2) == SImode && GET_CODE (x2) == SUBREG && XINT (x2, 1) == 0) goto L10489; goto ret0; L10435: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 977; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L10488; L10489: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonmemory_operand (x3, V2DImode)) { operands[2] = x3; goto L10490; } goto ret0; L10490: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 985; } goto ret0; L10631: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V4SImode) goto L14006; goto ret0; L14006: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case VEC_SELECT: goto L10632; case VEC_DUPLICATE: goto L10806; default: break; } goto ret0; L10632: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V4SImode)) { operands[1] = x3; goto L10633; } goto ret0; L10633: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 4) goto L10634; goto ret0; L10634: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (GET_CODE (x4) == CONST_INT) goto L14008; goto ret0; L14008: ATTRIBUTE_UNUSED_LABEL if ((int) XWINT (x4, 0) == XWINT (x4, 0)) switch ((int) XWINT (x4, 0)) { case 2L: goto L10635; case 0L: goto L10721; default: break; } goto ret0; L10635: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L10636; goto ret0; L10636: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (3)]) goto L10637; goto ret0; L10637: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 3); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10638; goto ret0; L10638: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V4SImode && GET_CODE (x2) == VEC_SELECT) goto L10639; goto ret0; L10639: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V4SImode)) { operands[2] = x3; goto L10640; } goto ret0; L10640: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 4) goto L10641; goto ret0; L10641: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L10642; goto ret0; L10642: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L10643; goto ret0; L10643: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10644; goto ret0; L10644: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 3); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (3)]) goto L10645; goto ret0; L10645: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (5)] && (TARGET_SSE2)) { return 996; } goto ret0; L10721: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L10722; goto ret0; L10722: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10723; goto ret0; L10723: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 3); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (3)]) goto L10724; goto ret0; L10724: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V4SImode && GET_CODE (x2) == VEC_SELECT) goto L10725; goto ret0; L10725: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V4SImode)) { operands[2] = x3; goto L10726; } goto ret0; L10726: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 4) goto L10727; goto ret0; L10727: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L10728; goto ret0; L10728: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L10729; goto ret0; L10729: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (3)]) goto L10730; goto ret0; L10730: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 3); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10731; goto ret0; L10731: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (5)] && (TARGET_SSE2)) { return 999; } goto ret0; L10806: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L10807; } goto ret0; L10807: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V4SImode && GET_CODE (x2) == CONST_VECTOR && XVECLEN (x2, 0) == 4) goto L10808; goto ret0; L10808: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L10809; goto ret0; L10809: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L10810; goto ret0; L10810: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 2); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L10811; goto ret0; L10811: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 3); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L10812; goto ret0; L10812: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE2)) { return 1011; } goto ret0; ret0: return -1; } static int recog_18 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XEXP (x0, 1); switch (GET_CODE (x1)) { case PLUS: goto L10070; case SS_PLUS: goto L10094; case US_PLUS: goto L10106; case MINUS: goto L10118; case SS_MINUS: goto L10142; case US_MINUS: goto L10154; case MULT: goto L10160; case TRUNCATE: goto L10166; case ASHIFTRT: goto L10279; case VEC_MERGE: goto L10302; case UNSPEC: goto L14037; case EQ: goto L10343; case GT: goto L10361; case SMAX: goto L10379; case SMIN: goto L10391; case LSHIFTRT: goto L10409; case ASHIFT: goto L10427; case VEC_CONCAT: goto L10547; default: break; } goto ret0; L10070: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V8HImode)) { operands[1] = x2; goto L10071; } goto ret0; L10071: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V8HImode)) { operands[2] = x2; goto L10072; } goto ret0; L10072: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 931; } goto ret0; L10094: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V8HImode)) { operands[1] = x2; goto L10095; } goto ret0; L10095: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V8HImode)) { operands[2] = x2; goto L10096; } goto ret0; L10096: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 935; } goto ret0; L10106: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V8HImode)) { operands[1] = x2; goto L10107; } goto ret0; L10107: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V8HImode)) { operands[2] = x2; goto L10108; } goto ret0; L10108: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 937; } goto ret0; L10118: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V8HImode)) { operands[1] = x2; goto L10119; } goto ret0; L10119: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V8HImode)) { operands[2] = x2; goto L10120; } goto ret0; L10120: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 939; } goto ret0; L10142: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V8HImode)) { operands[1] = x2; goto L10143; } goto ret0; L10143: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V8HImode)) { operands[2] = x2; goto L10144; } goto ret0; L10144: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 943; } goto ret0; L10154: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V8HImode)) { operands[1] = x2; goto L10155; } goto ret0; L10155: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V8HImode)) { operands[2] = x2; goto L10156; } goto ret0; L10156: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 945; } goto ret0; L10160: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V8HImode)) { operands[1] = x2; goto L10161; } goto ret0; L10161: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V8HImode)) { operands[2] = x2; goto L10162; } goto ret0; L10162: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 946; } goto ret0; L10166: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V8SImode && GET_CODE (x2) == LSHIFTRT) goto L10167; goto ret0; L10167: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == V8SImode && GET_CODE (x3) == MULT) goto L10168; goto ret0; L10168: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == V8SImode) goto L14039; goto ret0; L14039: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x4)) { case SIGN_EXTEND: goto L10169; case ZERO_EXTEND: goto L10179; default: break; } goto ret0; L10169: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (register_operand (x5, V8HImode)) { operands[1] = x5; goto L10170; } goto ret0; L10170: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_MODE (x4) == V8SImode && GET_CODE (x4) == SIGN_EXTEND) goto L10171; goto ret0; L10171: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (nonimmediate_operand (x5, V8HImode)) { operands[2] = x5; goto L10172; } goto ret0; L10172: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (16)] && (TARGET_SSE2)) { return 947; } goto ret0; L10179: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (register_operand (x5, V8HImode)) { operands[1] = x5; goto L10180; } goto ret0; L10180: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_MODE (x4) == V8SImode && GET_CODE (x4) == ZERO_EXTEND) goto L10181; goto ret0; L10181: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (nonimmediate_operand (x5, V8HImode)) { operands[2] = x5; goto L10182; } goto ret0; L10182: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (16)] && (TARGET_SSE2)) { return 948; } goto ret0; L10279: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V8HImode) goto L14041; goto ret0; L14041: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == PLUS) goto L10280; if (register_operand (x2, V8HImode)) { operands[1] = x2; goto L10398; } goto ret0; L10280: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == V8HImode && GET_CODE (x3) == PLUS) goto L10281; goto ret0; L10281: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, V8HImode)) { operands[1] = x4; goto L10282; } goto ret0; L10282: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonimmediate_operand (x4, V8HImode)) { operands[2] = x4; goto L10283; } goto ret0; L10283: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == V8HImode && GET_CODE (x3) == CONST_VECTOR && XVECLEN (x3, 0) == 8) goto L10284; goto ret0; L10284: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10285; goto ret0; L10285: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10286; goto ret0; L10286: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10287; goto ret0; L10287: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 3); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10288; goto ret0; L10288: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 4); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10289; goto ret0; L10289: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 5); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10290; goto ret0; L10290: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 6); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10291; goto ret0; L10291: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 7); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10292; goto ret0; L10292: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE2)) { return 954; } goto ret0; L10398: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonmemory_operand (x2, SImode)) { operands[2] = x2; goto L10399; } L10446: ATTRIBUTE_UNUSED_LABEL if (GET_MODE (x2) == SImode && GET_CODE (x2) == SUBREG && XINT (x2, 1) == 0) goto L10447; goto ret0; L10399: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 971; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L10446; L10447: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonmemory_operand (x3, V2DImode)) { operands[2] = x3; goto L10448; } goto ret0; L10448: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 979; } goto ret0; L10302: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V8HImode) goto L14044; goto ret0; L14044: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == VEC_SELECT) goto L10606; if (register_operand (x2, V8HImode)) { operands[1] = x2; goto L10303; } goto ret0; L10606: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V8HImode)) { operands[1] = x3; goto L10607; } goto ret0; L10607: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 8) goto L10608; goto ret0; L10608: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (GET_CODE (x4) == CONST_INT) goto L14045; goto ret0; L14045: ATTRIBUTE_UNUSED_LABEL if ((int) XWINT (x4, 0) == XWINT (x4, 0)) switch ((int) XWINT (x4, 0)) { case 4L: goto L10609; case 0L: goto L10695; default: break; } goto ret0; L10609: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L10610; goto ret0; L10610: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (5)]) goto L10611; goto ret0; L10611: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 3); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10612; goto ret0; L10612: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 4); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (6)]) goto L10613; goto ret0; L10613: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 5); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L10614; goto ret0; L10614: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 6); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (7)]) goto L10615; goto ret0; L10615: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 7); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (3)]) goto L10616; goto ret0; L10616: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V8HImode && GET_CODE (x2) == VEC_SELECT) goto L10617; goto ret0; L10617: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V8HImode)) { operands[2] = x3; goto L10618; } goto ret0; L10618: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 8) goto L10619; goto ret0; L10619: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L10620; goto ret0; L10620: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (4)]) goto L10621; goto ret0; L10621: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10622; goto ret0; L10622: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 3); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (5)]) goto L10623; goto ret0; L10623: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 4); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L10624; goto ret0; L10624: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 5); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (6)]) goto L10625; goto ret0; L10625: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 6); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (3)]) goto L10626; goto ret0; L10626: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 7); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (7)]) goto L10627; goto ret0; L10627: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (GET_CODE (x2) == CONST_INT && XWINT (x2, 0) == 85L && (TARGET_SSE2)) { return 995; } goto ret0; L10695: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (4)]) goto L10696; goto ret0; L10696: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10697; goto ret0; L10697: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 3); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (5)]) goto L10698; goto ret0; L10698: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 4); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L10699; goto ret0; L10699: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 5); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (6)]) goto L10700; goto ret0; L10700: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 6); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (3)]) goto L10701; goto ret0; L10701: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 7); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (7)]) goto L10702; goto ret0; L10702: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V8HImode && GET_CODE (x2) == VEC_SELECT) goto L10703; goto ret0; L10703: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V8HImode)) { operands[2] = x3; goto L10704; } goto ret0; L10704: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 8) goto L10705; goto ret0; L10705: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (4)]) goto L10706; goto ret0; L10706: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L10707; goto ret0; L10707: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (5)]) goto L10708; goto ret0; L10708: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 3); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10709; goto ret0; L10709: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 4); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (6)]) goto L10710; goto ret0; L10710: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 5); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L10711; goto ret0; L10711: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 6); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (7)]) goto L10712; goto ret0; L10712: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 7); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (3)]) goto L10713; goto ret0; L10713: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (GET_CODE (x2) == CONST_INT && XWINT (x2, 0) == 85L && (TARGET_SSE2)) { return 998; } goto ret0; L10303: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V8HImode && GET_CODE (x2) == VEC_DUPLICATE) goto L10304; goto ret0; L10304: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == HImode && GET_CODE (x3) == TRUNCATE) goto L10305; goto ret0; L10305: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, SImode)) { operands[2] = x4; goto L10306; } goto ret0; L10306: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (const_0_to_255_operand (x2, SImode)) { operands[3] = x2; goto L10307; } goto ret0; L10307: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 956; } goto ret0; L14037: ATTRIBUTE_UNUSED_LABEL if (XVECLEN (x1, 0) == 2) goto L14047; goto ret0; L14047: ATTRIBUTE_UNUSED_LABEL switch (XINT (x1, 1)) { case 55L: goto L10325; case 56L: goto L10331; default: break; } goto ret0; L10325: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (nonimmediate_operand (x2, V8HImode)) { operands[1] = x2; goto L10326; } goto ret0; L10326: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 1); if (immediate_operand (x2, SImode)) { operands[2] = x2; goto L10327; } goto ret0; L10327: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 959; } goto ret0; L10331: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (nonimmediate_operand (x2, V8HImode)) { operands[1] = x2; goto L10332; } goto ret0; L10332: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 1); if (immediate_operand (x2, SImode)) { operands[2] = x2; goto L10333; } goto ret0; L10333: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 960; } goto ret0; L10343: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V8HImode)) { operands[1] = x2; goto L10344; } goto ret0; L10344: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V8HImode)) { operands[2] = x2; goto L10345; } goto ret0; L10345: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 962; } goto ret0; L10361: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V8HImode)) { operands[1] = x2; goto L10362; } goto ret0; L10362: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V8HImode)) { operands[2] = x2; goto L10363; } goto ret0; L10363: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 965; } goto ret0; L10379: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V8HImode)) { operands[1] = x2; goto L10380; } goto ret0; L10380: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V8HImode)) { operands[2] = x2; goto L10381; } goto ret0; L10381: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 968; } goto ret0; L10391: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V8HImode)) { operands[1] = x2; goto L10392; } goto ret0; L10392: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V8HImode)) { operands[2] = x2; goto L10393; } goto ret0; L10393: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 970; } goto ret0; L10409: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V8HImode)) { operands[1] = x2; goto L10410; } goto ret0; L10410: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonmemory_operand (x2, SImode)) { operands[2] = x2; goto L10411; } L10460: ATTRIBUTE_UNUSED_LABEL if (GET_MODE (x2) == SImode && GET_CODE (x2) == SUBREG && XINT (x2, 1) == 0) goto L10461; goto ret0; L10411: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 973; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L10460; L10461: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonmemory_operand (x3, V2DImode)) { operands[2] = x3; goto L10462; } goto ret0; L10462: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 981; } goto ret0; L10427: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V8HImode)) { operands[1] = x2; goto L10428; } goto ret0; L10428: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonmemory_operand (x2, SImode)) { operands[2] = x2; goto L10429; } L10481: ATTRIBUTE_UNUSED_LABEL if (GET_MODE (x2) == SImode && GET_CODE (x2) == SUBREG && XINT (x2, 1) == 0) goto L10482; goto ret0; L10429: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 976; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L10481; L10482: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonmemory_operand (x3, V2DImode)) { operands[2] = x3; goto L10483; } goto ret0; L10483: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 984; } goto ret0; L10547: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V4HImode && GET_CODE (x2) == SS_TRUNCATE) goto L10548; goto ret0; L10548: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V4SImode)) { operands[1] = x3; goto L10549; } goto ret0; L10549: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V4HImode && GET_CODE (x2) == SS_TRUNCATE) goto L10550; goto ret0; L10550: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V4SImode)) { operands[2] = x3; goto L10551; } goto ret0; L10551: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 992; } goto ret0; ret0: return -1; } static int recog_19 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XEXP (x0, 0); switch (GET_MODE (x1)) { case HImode: goto L13118; case CCFPmode: goto L13110; case CCFPUmode: goto L13111; case CCmode: goto L13112; case SImode: goto L13113; case QImode: goto L13122; case DImode: goto L13129; case SFmode: goto L13135; case DFmode: goto L13137; case XFmode: goto L13139; case TImode: goto L13153; default: break; } L1: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x1) == REG && XINT (x1, 0) == 17) goto L2; L213: ATTRIBUTE_UNUSED_LABEL switch (GET_MODE (x1)) { case CCFPmode: goto L13160; case CCFPUmode: goto L13161; default: break; } L368: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case STRICT_LOW_PART: goto L369; case REG: goto L13162; case PC: goto L6207; default: break; } L1336: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, VOIDmode)) { operands[0] = x1; goto L1337; } L8266: ATTRIBUTE_UNUSED_LABEL operands[0] = x1; goto L8267; L8323: ATTRIBUTE_UNUSED_LABEL switch (GET_MODE (x1)) { case V4SFmode: goto L13163; case V4SImode: goto L13164; case V2DImode: goto L13165; case V8QImode: goto L13187; case V4HImode: goto L13167; case V2SImode: goto L13168; case V2SFmode: goto L13169; case V2DFmode: goto L13170; case V8HImode: goto L13171; case V16QImode: goto L13172; case TImode: goto L13173; case TFmode: goto L13185; case SImode: goto L13186; case DImode: goto L13189; case SFmode: goto L13191; case CCFPmode: goto L13196; case CCFPUmode: goto L13197; default: break; } L9442: ATTRIBUTE_UNUSED_LABEL operands[0] = x1; goto L9443; L9469: ATTRIBUTE_UNUSED_LABEL switch (GET_MODE (x1)) { case V2SFmode: goto L13203; case V2SImode: goto L13204; case V8QImode: goto L13205; case V4HImode: goto L13206; case V2DFmode: goto L13207; case V2DImode: goto L13208; case CCFPmode: goto L13209; case CCFPUmode: goto L13210; case SImode: goto L13211; case V16QImode: goto L13212; case V4SFmode: goto L13216; case V4SImode: goto L13217; case DImode: goto L13218; case V8HImode: goto L13220; case TImode: goto L13221; case DFmode: goto L13226; default: break; } L10855: ATTRIBUTE_UNUSED_LABEL operands[0] = x1; goto L10856; L10872: ATTRIBUTE_UNUSED_LABEL switch (GET_MODE (x1)) { case V4SFmode: goto L13227; case V2DFmode: goto L13228; case V16QImode: goto L13229; default: break; } goto ret0; L13118: ATTRIBUTE_UNUSED_LABEL tem = recog_1 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; goto L1; L13110: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x1) == REG && XINT (x1, 0) == 18) goto L146; goto L1; L146: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == CCFPmode && GET_CODE (x1) == COMPARE) goto L147; x1 = XEXP (x0, 0); goto L1; L147: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case SFmode: goto L13263; case DFmode: goto L13264; case XFmode: goto L13265; default: break; } L199: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, VOIDmode)) { operands[0] = x2; goto L200; } x1 = XEXP (x0, 0); goto L1; L13263: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, SFmode)) { operands[0] = x2; goto L148; } goto L199; L148: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, SFmode)) { operands[1] = x2; goto L149; } x2 = XEXP (x1, 0); goto L199; L149: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387)) { return 19; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); goto L199; L13264: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, DFmode)) { operands[0] = x2; goto L161; } goto L199; L161: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, DFmode)) { operands[1] = x2; goto L162; } x2 = XEXP (x1, 0); goto L199; L162: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387)) { return 21; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); goto L199; L13265: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, XFmode)) { operands[0] = x2; goto L174; } goto L199; L174: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, XFmode)) { operands[1] = x2; goto L175; } x2 = XEXP (x1, 0); goto L199; L175: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387)) { return 23; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); goto L199; L200: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == FLOAT) goto L201; x1 = XEXP (x0, 0); goto L1; L201: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L202; } x1 = XEXP (x0, 0); goto L1; L202: ATTRIBUTE_UNUSED_LABEL if ((0 && TARGET_80387 && FLOAT_MODE_P (GET_MODE (operands[0])) && GET_MODE (XEXP (SET_SRC (PATTERN (insn)), 1)) == GET_MODE (operands[0]))) { return 27; } x1 = XEXP (x0, 0); goto L1; L13111: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x1) == REG && XINT (x1, 0) == 18) goto L185; goto L1; L185: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == CCFPUmode && GET_CODE (x1) == COMPARE) goto L186; x1 = XEXP (x0, 0); goto L1; L186: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, VOIDmode)) { operands[0] = x2; goto L187; } x1 = XEXP (x0, 0); goto L1; L187: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, VOIDmode)) { operands[1] = x2; goto L188; } x1 = XEXP (x0, 0); goto L1; L188: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && FLOAT_MODE_P (GET_MODE (operands[0])) && GET_MODE (operands[0]) == GET_MODE (operands[1]))) { return 25; } x1 = XEXP (x0, 0); goto L1; L13112: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x1) == REG && XINT (x1, 0) == 17) goto L209; goto L1; L209: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == CCmode && GET_CODE (x1) == UNSPEC && XVECLEN (x1, 0) == 1 && XINT (x1, 1) == 25) goto L210; x1 = XEXP (x0, 0); goto L1; L210: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, HImode)) { operands[0] = x2; goto L211; } x1 = XEXP (x0, 0); goto L1; L211: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT)) { return 29; } x1 = XEXP (x0, 0); goto L1; L13113: ATTRIBUTE_UNUSED_LABEL tem = recog_3 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; goto L1; L13122: ATTRIBUTE_UNUSED_LABEL tem = recog_4 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; goto L1; L13129: ATTRIBUTE_UNUSED_LABEL tem = recog_6 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; goto L1; L13135: ATTRIBUTE_UNUSED_LABEL if (push_operand (x1, SFmode)) { operands[0] = x1; goto L585; } L13136: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, SFmode)) { operands[0] = x1; goto L593; } L13142: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, SFmode)) { operands[0] = x1; goto L857; } L13143: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, SFmode)) { operands[0] = x1; goto L886; } L13144: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x1, SFmode)) { operands[0] = x1; goto L896; } L13145: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, SFmode)) { operands[0] = x1; goto L901; } L13146: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x1, SFmode)) { operands[0] = x1; goto L924; } L13154: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, SFmode)) { operands[0] = x1; goto L4239; } L13156: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, SFmode)) { operands[0] = x1; goto L4326; } goto L1; L585: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (general_no_elim_operand (x1, SFmode)) { operands[1] = x1; goto L586; } L589: ATTRIBUTE_UNUSED_LABEL if (nonmemory_no_elim_operand (x1, SFmode)) { operands[1] = x1; goto L590; } x1 = XEXP (x0, 0); goto L13136; L586: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT)) { return 88; } x1 = XEXP (x0, 1); goto L589; L590: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT)) { return 89; } x1 = XEXP (x0, 0); goto L13136; L593: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (general_operand (x1, SFmode)) { operands[1] = x1; goto L594; } x1 = XEXP (x0, 0); goto L13142; L594: ATTRIBUTE_UNUSED_LABEL if (((TARGET_INTER_UNIT_MOVES || optimize_size) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM) && (reload_in_progress || reload_completed || (ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_LARGE) || GET_CODE (operands[1]) != CONST_DOUBLE || memory_operand (operands[0], SFmode)))) { return 90; } L598: ATTRIBUTE_UNUSED_LABEL if (((!TARGET_INTER_UNIT_MOVES && !optimize_size) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM) && (reload_in_progress || reload_completed || (ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_LARGE) || GET_CODE (operands[1]) != CONST_DOUBLE || memory_operand (operands[0], SFmode)))) { return 91; } x1 = XEXP (x0, 0); goto L13142; L857: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == SFmode && GET_CODE (x1) == FLOAT_TRUNCATE) goto L858; x1 = XEXP (x0, 0); goto L13143; L858: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DFmode)) { operands[1] = x2; goto L859; } x1 = XEXP (x0, 0); goto L13143; L859: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && flag_unsafe_math_optimizations)) { return 132; } x1 = XEXP (x0, 0); goto L13143; L886: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == SFmode && GET_CODE (x1) == FLOAT_TRUNCATE) goto L887; x1 = XEXP (x0, 0); goto L13144; L887: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DFmode)) { operands[1] = x2; goto L888; } x1 = XEXP (x0, 0); goto L13144; L888: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && TARGET_SSE2 && !TARGET_SSE_PARTIAL_REGS_FOR_CVTSD2SS && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 136; } L893: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && TARGET_SSE2 && TARGET_SSE_PARTIAL_REGS_FOR_CVTSD2SS && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 137; } x1 = XEXP (x0, 0); goto L13144; L896: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == SFmode && GET_CODE (x1) == FLOAT_TRUNCATE) goto L897; x1 = XEXP (x0, 0); goto L13145; L897: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DFmode)) { operands[1] = x2; goto L898; } x1 = XEXP (x0, 0); goto L13145; L898: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387)) { return 138; } x1 = XEXP (x0, 0); goto L13145; L901: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == SFmode) goto L13447; x1 = XEXP (x0, 0); goto L13146; L13447: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case FLOAT_TRUNCATE: goto L902; case FLOAT: goto L1105; default: break; } x1 = XEXP (x0, 0); goto L13146; L902: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case DFmode: goto L13449; case XFmode: goto L13450; default: break; } x1 = XEXP (x0, 0); goto L13146; L13449: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, DFmode)) { operands[1] = x2; goto L903; } x1 = XEXP (x0, 0); goto L13146; L903: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_80387 && TARGET_SSE2 && !TARGET_SSE_PARTIAL_REGS_FOR_CVTSD2SS)) { return 139; } L908: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_80387 && TARGET_SSE2 && TARGET_SSE_PARTIAL_REGS_FOR_CVTSD2SS)) { return 140; } x1 = XEXP (x0, 0); goto L13146; L13450: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, XFmode)) { operands[1] = x2; goto L913; } x1 = XEXP (x0, 0); goto L13146; L913: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && flag_unsafe_math_optimizations)) { return 141; } x1 = XEXP (x0, 0); goto L13146; L1105: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case HImode: goto L13451; case SImode: goto L13452; case DImode: goto L13453; default: break; } x1 = XEXP (x0, 0); goto L13146; L13451: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, HImode)) { operands[1] = x2; goto L1106; } x1 = XEXP (x0, 0); goto L13146; L1106: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && (!TARGET_SSE || !TARGET_SSE_MATH))) { return 162; } x1 = XEXP (x0, 0); goto L13146; L13452: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, SImode)) { operands[1] = x2; goto L1111; } x1 = XEXP (x0, 0); goto L13146; L1111: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && (!TARGET_SSE || TARGET_MIX_SSE_I387))) { return 163; } L1116: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE)) { return 164; } x1 = XEXP (x0, 0); goto L13146; L13453: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, DImode)) { operands[1] = x2; goto L1121; } x1 = XEXP (x0, 0); goto L13146; L1121: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && (!TARGET_SSE || !TARGET_64BIT || TARGET_MIX_SSE_I387))) { return 165; } L1126: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && TARGET_80387 && (!TARGET_SSE || TARGET_MIX_SSE_I387))) { return 166; } L1131: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && TARGET_SSE)) { return 167; } x1 = XEXP (x0, 0); goto L13146; L924: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == SFmode) goto L13454; x1 = XEXP (x0, 0); goto L13154; L13454: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case FLOAT_TRUNCATE: goto L925; case NEG: goto L4211; case ABS: goto L4367; default: break; } x1 = XEXP (x0, 0); goto L13154; L925: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, XFmode)) { operands[1] = x2; goto L926; } x1 = XEXP (x0, 0); goto L13154; L926: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387)) { return 143; } x1 = XEXP (x0, 0); goto L13154; L4211: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, SFmode)) { operands[1] = x2; goto L4212; } x1 = XEXP (x0, 0); goto L13154; L4212: ATTRIBUTE_UNUSED_LABEL if ((ix86_unary_operator_ok (NEG, SFmode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 362; } x1 = XEXP (x0, 0); goto L13154; L4367: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, SFmode)) { operands[1] = x2; goto L4368; } x1 = XEXP (x0, 0); goto L13154; L4368: ATTRIBUTE_UNUSED_LABEL if ((ix86_unary_operator_ok (ABS, SFmode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 377; } x1 = XEXP (x0, 0); goto L13154; L4239: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == SFmode) goto L13457; x1 = XEXP (x0, 0); goto L13156; L13457: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case NEG: goto L4240; case ABS: goto L4396; default: break; } x1 = XEXP (x0, 0); goto L13156; L4240: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SFmode)) { operands[1] = x2; goto L4241; } x1 = XEXP (x0, 0); goto L13156; L4241: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && !TARGET_SSE && ix86_unary_operator_ok (NEG, SFmode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 364; } x1 = XEXP (x0, 0); goto L13156; L4396: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SFmode)) { operands[1] = x2; goto L4397; } x1 = XEXP (x0, 0); goto L13156; L4397: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && ix86_unary_operator_ok (ABS, SFmode, operands) && !TARGET_SSE) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 379; } x1 = XEXP (x0, 0); goto L13156; L4326: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == SFmode) goto L13459; x1 = XEXP (x0, 0); goto L1; L13459: ATTRIBUTE_UNUSED_LABEL tem = recog_7 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; x1 = XEXP (x0, 0); goto L1; L13137: ATTRIBUTE_UNUSED_LABEL if (push_operand (x1, DFmode)) { operands[0] = x1; goto L827; } L13138: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, DFmode)) { operands[0] = x1; goto L837; } L13141: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, DFmode)) { operands[0] = x1; goto L842; } L13147: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x1, DFmode)) { operands[0] = x1; goto L942; } L13155: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, DFmode)) { operands[0] = x1; goto L4297; } L13157: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, DFmode)) { operands[0] = x1; goto L4331; } goto L1; L827: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == DFmode && GET_CODE (x1) == FLOAT_EXTEND) goto L828; if (general_no_elim_operand (x1, DFmode)) { operands[1] = x1; goto L609; } x1 = XEXP (x0, 0); goto L13138; L828: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SFmode)) { operands[1] = x2; goto L829; } x1 = XEXP (x0, 0); goto L13138; L829: ATTRIBUTE_UNUSED_LABEL if ((0)) { return 126; } x1 = XEXP (x0, 0); goto L13138; L609: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && !TARGET_INTEGER_DFMODE_MOVES)) { return 93; } L613: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT || TARGET_INTEGER_DFMODE_MOVES)) { return 94; } x1 = XEXP (x0, 0); goto L13138; L837: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == DFmode && GET_CODE (x1) == FLOAT_EXTEND) goto L838; if (general_operand (x1, DFmode)) { operands[1] = x1; goto L617; } x1 = XEXP (x0, 0); goto L13141; L838: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SFmode)) { operands[1] = x2; goto L839; } x1 = XEXP (x0, 0); goto L13141; L839: ATTRIBUTE_UNUSED_LABEL if (((TARGET_80387 || TARGET_SSE2) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 128; } x1 = XEXP (x0, 0); goto L13141; L617: ATTRIBUTE_UNUSED_LABEL if (((GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM) && ((optimize_size || !TARGET_INTEGER_DFMODE_MOVES) && !TARGET_64BIT) && (reload_in_progress || reload_completed || (ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_LARGE) || GET_CODE (operands[1]) != CONST_DOUBLE || memory_operand (operands[0], DFmode)))) { return 95; } L621: ATTRIBUTE_UNUSED_LABEL if (((GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM) && ((!optimize_size && TARGET_INTEGER_DFMODE_MOVES) || TARGET_64BIT) && (reload_in_progress || reload_completed || (ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_LARGE) || GET_CODE (operands[1]) != CONST_DOUBLE || memory_operand (operands[0], DFmode)))) { return 96; } x1 = XEXP (x0, 0); goto L13141; L842: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == DFmode) goto L13489; x1 = XEXP (x0, 0); goto L13147; L13489: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case FLOAT_EXTEND: goto L843; case FLOAT_TRUNCATE: goto L930; case FLOAT: goto L1135; default: break; } x1 = XEXP (x0, 0); goto L13147; L843: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SFmode)) { operands[1] = x2; goto L844; } x1 = XEXP (x0, 0); goto L13147; L844: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_80387 && TARGET_SSE2 && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 129; } x1 = XEXP (x0, 0); goto L13147; L930: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, XFmode)) { operands[1] = x2; goto L931; } x1 = XEXP (x0, 0); goto L13147; L931: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && flag_unsafe_math_optimizations)) { return 144; } x1 = XEXP (x0, 0); goto L13147; L1135: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case HImode: goto L13492; case SImode: goto L13493; case DImode: goto L13494; default: break; } x1 = XEXP (x0, 0); goto L13147; L13492: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, HImode)) { operands[1] = x2; goto L1136; } x1 = XEXP (x0, 0); goto L13147; L1136: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && (!TARGET_SSE2 || !TARGET_SSE_MATH))) { return 168; } x1 = XEXP (x0, 0); goto L13147; L13493: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, SImode)) { operands[1] = x2; goto L1141; } x1 = XEXP (x0, 0); goto L13147; L1141: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && (!TARGET_SSE2 || TARGET_MIX_SSE_I387))) { return 169; } L1146: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 170; } x1 = XEXP (x0, 0); goto L13147; L13494: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, DImode)) { operands[1] = x2; goto L1151; } x1 = XEXP (x0, 0); goto L13147; L1151: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && (!TARGET_SSE2 || !TARGET_64BIT))) { return 171; } L1156: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && TARGET_80387 && (!TARGET_SSE2 || TARGET_MIX_SSE_I387))) { return 172; } L1161: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 173; } x1 = XEXP (x0, 0); goto L13147; L942: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == DFmode) goto L13495; x1 = XEXP (x0, 0); goto L13155; L13495: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case FLOAT_TRUNCATE: goto L943; case NEG: goto L4252; case ABS: goto L4408; default: break; } x1 = XEXP (x0, 0); goto L13155; L943: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, XFmode)) { operands[1] = x2; goto L944; } x1 = XEXP (x0, 0); goto L13155; L944: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387)) { return 146; } x1 = XEXP (x0, 0); goto L13155; L4252: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, DFmode)) { operands[1] = x2; goto L4253; } x1 = XEXP (x0, 0); goto L13155; L4253: ATTRIBUTE_UNUSED_LABEL if ((ix86_unary_operator_ok (NEG, DFmode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 365; } x1 = XEXP (x0, 0); goto L13155; L4408: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, DFmode)) { operands[1] = x2; goto L4409; } x1 = XEXP (x0, 0); goto L13155; L4409: ATTRIBUTE_UNUSED_LABEL if ((ix86_unary_operator_ok (ABS, DFmode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 380; } x1 = XEXP (x0, 0); goto L13155; L4297: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == DFmode) goto L13498; x1 = XEXP (x0, 0); goto L13157; L13498: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case NEG: goto L4298; case ABS: goto L4454; default: break; } x1 = XEXP (x0, 0); goto L13157; L4298: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DFmode)) { operands[1] = x2; goto L4299; } x1 = XEXP (x0, 0); goto L13157; L4299: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && TARGET_80387 && ix86_unary_operator_ok (NEG, DFmode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 368; } L4311: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && TARGET_80387 && ix86_unary_operator_ok (NEG, DFmode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 369; } x1 = XEXP (x0, 0); goto L13157; L4454: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DFmode)) { operands[1] = x2; goto L4455; } x1 = XEXP (x0, 0); goto L13157; L4455: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && TARGET_80387 && ix86_unary_operator_ok (ABS, DFmode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 383; } L4467: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && TARGET_80387 && ix86_unary_operator_ok (ABS, DFmode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 384; } x1 = XEXP (x0, 0); goto L13157; L4331: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == DFmode) goto L13500; x1 = XEXP (x0, 0); goto L1; L13500: ATTRIBUTE_UNUSED_LABEL tem = recog_8 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; x1 = XEXP (x0, 0); goto L1; L13139: ATTRIBUTE_UNUSED_LABEL if (push_operand (x1, XFmode)) { operands[0] = x1; goto L832; } L13140: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, XFmode)) { operands[0] = x1; goto L847; } L13150: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, XFmode)) { operands[0] = x1; goto L1164; } goto L1; L832: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == XFmode && GET_CODE (x1) == FLOAT_EXTEND) goto L833; if (general_no_elim_operand (x1, XFmode)) { operands[1] = x1; goto L632; } x1 = XEXP (x0, 0); goto L13140; L833: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SFmode)) { operands[1] = x2; goto L834; } x1 = XEXP (x0, 0); goto L13140; L834: ATTRIBUTE_UNUSED_LABEL if ((0)) { return 127; } x1 = XEXP (x0, 0); goto L13140; L632: ATTRIBUTE_UNUSED_LABEL if ((optimize_size)) { return 98; } L636: ATTRIBUTE_UNUSED_LABEL if ((!optimize_size)) { return 99; } x1 = XEXP (x0, 0); goto L13140; L847: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == XFmode) goto L13543; L639: ATTRIBUTE_UNUSED_LABEL if (general_operand (x1, XFmode)) { operands[1] = x1; goto L640; } x1 = XEXP (x0, 0); goto L13150; L13543: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case FLOAT_EXTEND: goto L848; case NEG: goto L4322; case ABS: goto L4478; default: break; } goto L639; L848: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case SFmode: goto L13546; case DFmode: goto L13547; default: break; } goto L639; L13546: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, SFmode)) { operands[1] = x2; goto L849; } goto L639; L849: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 130; } x1 = XEXP (x0, 1); goto L639; L13547: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, DFmode)) { operands[1] = x2; goto L854; } goto L639; L854: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 131; } x1 = XEXP (x0, 1); goto L639; L4322: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, XFmode)) { operands[1] = x2; goto L4323; } goto L639; L4323: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && ix86_unary_operator_ok (NEG, XFmode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 370; } x1 = XEXP (x0, 1); goto L639; L4478: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, XFmode)) { operands[1] = x2; goto L4479; } goto L639; L4479: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && ix86_unary_operator_ok (ABS, XFmode, operands)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 385; } x1 = XEXP (x0, 1); goto L639; L640: ATTRIBUTE_UNUSED_LABEL if ((optimize_size && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM) && (reload_in_progress || reload_completed || GET_CODE (operands[1]) != CONST_DOUBLE || memory_operand (operands[0], XFmode)))) { return 100; } L644: ATTRIBUTE_UNUSED_LABEL if ((!optimize_size && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM) && (reload_in_progress || reload_completed || GET_CODE (operands[1]) != CONST_DOUBLE || memory_operand (operands[0], XFmode)))) { return 101; } x1 = XEXP (x0, 0); goto L13150; L1164: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == XFmode) goto L13548; x1 = XEXP (x0, 0); goto L1; L13548: ATTRIBUTE_UNUSED_LABEL tem = recog_9 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; x1 = XEXP (x0, 0); goto L1; L13153: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, TImode)) { operands[0] = x1; goto L2304; } goto L1; L2304: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == TImode && GET_CODE (x1) == MULT) goto L2305; x1 = XEXP (x0, 0); goto L1; L2305: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == TImode) goto L13596; x1 = XEXP (x0, 0); goto L1; L13596: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case ZERO_EXTEND: goto L2306; case SIGN_EXTEND: goto L2342; default: break; } x1 = XEXP (x0, 0); goto L1; L2306: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, DImode)) { operands[1] = x3; goto L2307; } x1 = XEXP (x0, 0); goto L1; L2307: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == TImode && GET_CODE (x2) == ZERO_EXTEND) goto L2308; x1 = XEXP (x0, 0); goto L1; L2308: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, DImode)) { operands[2] = x3; goto L2309; } x1 = XEXP (x0, 0); goto L1; L2309: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 254; } x1 = XEXP (x0, 0); goto L1; L2342: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, DImode)) { operands[1] = x3; goto L2343; } x1 = XEXP (x0, 0); goto L1; L2343: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == TImode && GET_CODE (x2) == SIGN_EXTEND) goto L2344; x1 = XEXP (x0, 0); goto L1; L2344: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, DImode)) { operands[2] = x3; goto L2345; } x1 = XEXP (x0, 0); goto L1; L2345: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 256; } x1 = XEXP (x0, 0); goto L1; L2: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_CODE (x1) == COMPARE) goto L3; x1 = XEXP (x0, 0); goto L213; L3: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case DImode: goto L13599; case SImode: goto L13601; case HImode: goto L13603; case QImode: goto L13605; default: break; } L79: ATTRIBUTE_UNUSED_LABEL if (general_operand (x2, QImode)) { operands[0] = x2; goto L80; } L88: ATTRIBUTE_UNUSED_LABEL if (GET_MODE (x2) == QImode) goto L13608; x1 = XEXP (x0, 0); goto L213; L13599: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case MINUS: goto L10; case NEG: goto L1431; case SUBREG: case REG: case MEM: goto L13598; default: x1 = XEXP (x0, 0); goto L213; } L13598: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, DImode)) { operands[0] = x2; goto L4; } x1 = XEXP (x0, 0); goto L213; L10: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, DImode)) { operands[0] = x3; goto L11; } x1 = XEXP (x0, 0); goto L213; L11: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x86_64_general_operand (x3, DImode)) { operands[1] = x3; goto L12; } x1 = XEXP (x0, 0); goto L213; L12: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode))) { return 1; } x1 = XEXP (x0, 0); goto L213; L1431: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (x86_64_general_operand (x3, DImode)) { operands[2] = x3; goto L1432; } x1 = XEXP (x0, 0); goto L213; L1432: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x86_64_general_operand (x2, DImode)) { operands[1] = x2; goto L1433; } x1 = XEXP (x0, 0); goto L213; L1433: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_match_ccmode (insn, CCZmode) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM) /* Current assemblers are broken and do not allow @GOTOFF in ought but a memory context. */ && ! pic_symbolic_operand (operands[2], VOIDmode)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 198; } x1 = XEXP (x0, 0); goto L213; L4: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const0_operand (x2, DImode)) { operands[1] = x2; goto L5; } L17: ATTRIBUTE_UNUSED_LABEL if (x86_64_general_operand (x2, DImode)) { operands[1] = x2; goto L18; } x1 = XEXP (x0, 0); goto L213; L5: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode))) { return 0; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L17; L18: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_match_ccmode (insn, CCmode))) { return 2; } x1 = XEXP (x0, 0); goto L213; L13601: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == MINUS) goto L29; if (nonimmediate_operand (x2, SImode)) { operands[0] = x2; goto L23; } x1 = XEXP (x0, 0); goto L213; L29: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[0] = x3; goto L30; } x1 = XEXP (x0, 0); goto L213; L30: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, SImode)) { operands[1] = x3; goto L31; } x1 = XEXP (x0, 0); goto L213; L31: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (ix86_match_ccmode (insn, CCGOCmode))) { return 4; } x1 = XEXP (x0, 0); goto L213; L23: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const0_operand (x2, SImode)) { operands[1] = x2; goto L24; } L36: ATTRIBUTE_UNUSED_LABEL if (general_operand (x2, SImode)) { operands[1] = x2; goto L37; } x1 = XEXP (x0, 0); goto L213; L24: ATTRIBUTE_UNUSED_LABEL if ((ix86_match_ccmode (insn, CCNOmode))) { return 3; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L36; L37: ATTRIBUTE_UNUSED_LABEL if (((GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM) && ix86_match_ccmode (insn, CCmode))) { return 5; } x1 = XEXP (x0, 0); goto L213; L13603: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == MINUS) goto L48; if (nonimmediate_operand (x2, HImode)) { operands[0] = x2; goto L42; } x1 = XEXP (x0, 0); goto L213; L48: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, HImode)) { operands[0] = x3; goto L49; } x1 = XEXP (x0, 0); goto L213; L49: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, HImode)) { operands[1] = x3; goto L50; } x1 = XEXP (x0, 0); goto L213; L50: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (ix86_match_ccmode (insn, CCGOCmode))) { return 7; } x1 = XEXP (x0, 0); goto L213; L42: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const0_operand (x2, HImode)) { operands[1] = x2; goto L43; } L55: ATTRIBUTE_UNUSED_LABEL if (general_operand (x2, HImode)) { operands[1] = x2; goto L56; } x1 = XEXP (x0, 0); goto L213; L43: ATTRIBUTE_UNUSED_LABEL if ((ix86_match_ccmode (insn, CCNOmode))) { return 6; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L55; L56: ATTRIBUTE_UNUSED_LABEL if (((GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM) && ix86_match_ccmode (insn, CCmode))) { return 8; } x1 = XEXP (x0, 0); goto L213; L13605: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == MINUS) goto L73; if (nonimmediate_operand (x2, QImode)) { operands[0] = x2; goto L61; } L13606: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == SUBREG && XINT (x2, 1) == 0) goto L98; goto L79; L73: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, QImode)) { operands[0] = x3; goto L74; } goto L79; L74: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, QImode)) { operands[1] = x3; goto L75; } goto L79; L75: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (ix86_match_ccmode (insn, CCGOCmode))) { return 11; } x2 = XEXP (x1, 0); goto L79; L61: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const0_operand (x2, QImode)) { operands[1] = x2; goto L62; } L67: ATTRIBUTE_UNUSED_LABEL if (general_operand (x2, QImode)) { operands[1] = x2; goto L68; } x2 = XEXP (x1, 0); goto L13606; L62: ATTRIBUTE_UNUSED_LABEL if ((ix86_match_ccmode (insn, CCNOmode))) { return 9; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L67; L68: ATTRIBUTE_UNUSED_LABEL if (((GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM) && ix86_match_ccmode (insn, CCmode))) { return 10; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); goto L13606; L98: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == ZERO_EXTRACT) goto L99; goto L79; L99: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (ext_register_operand (x4, VOIDmode)) { operands[0] = x4; goto L100; } goto L79; L100: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L101; goto L79; L101: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L102; goto L79; L102: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const0_operand (x2, QImode)) { operands[1] = x2; goto L103; } x2 = XEXP (x1, 0); goto L79; L103: ATTRIBUTE_UNUSED_LABEL if ((ix86_match_ccmode (insn, CCNOmode))) { return 14; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); goto L79; L80: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == QImode && GET_CODE (x2) == SUBREG && XINT (x2, 1) == 0) goto L81; x2 = XEXP (x1, 0); goto L88; L81: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == ZERO_EXTRACT) goto L82; x2 = XEXP (x1, 0); goto L88; L82: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (ext_register_operand (x4, VOIDmode)) { operands[1] = x4; goto L83; } x2 = XEXP (x1, 0); goto L88; L83: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L84; x2 = XEXP (x1, 0); goto L88; L84: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)] && (!TARGET_64BIT && ix86_match_ccmode (insn, CCmode))) { return 12; } x2 = XEXP (x1, 0); goto L88; L13608: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, QImode)) { operands[0] = x2; goto L89; } L13609: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == SUBREG && XINT (x2, 1) == 0) goto L108; x1 = XEXP (x0, 0); goto L213; L89: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == QImode && GET_CODE (x2) == SUBREG && XINT (x2, 1) == 0) goto L90; x2 = XEXP (x1, 0); goto L13609; L90: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == ZERO_EXTRACT) goto L91; x2 = XEXP (x1, 0); goto L13609; L91: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (ext_register_operand (x4, VOIDmode)) { operands[1] = x4; goto L92; } x2 = XEXP (x1, 0); goto L13609; L92: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L93; x2 = XEXP (x1, 0); goto L13609; L93: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)] && (TARGET_64BIT && ix86_match_ccmode (insn, CCmode))) { return 13; } x2 = XEXP (x1, 0); goto L13609; L108: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == ZERO_EXTRACT) goto L109; x1 = XEXP (x0, 0); goto L213; L109: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (ext_register_operand (x4, VOIDmode)) { operands[0] = x4; goto L110; } x1 = XEXP (x0, 0); goto L213; L110: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L111; x1 = XEXP (x0, 0); goto L213; L111: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L112; x1 = XEXP (x0, 0); goto L213; L112: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (general_operand (x2, QImode)) { operands[1] = x2; goto L113; } L122: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x2, QImode)) { operands[1] = x2; goto L123; } L132: ATTRIBUTE_UNUSED_LABEL if (GET_MODE (x2) == QImode && GET_CODE (x2) == SUBREG && XINT (x2, 1) == 0) goto L133; x1 = XEXP (x0, 0); goto L213; L113: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && ix86_match_ccmode (insn, CCmode))) { return 15; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L122; L123: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_match_ccmode (insn, CCmode))) { return 16; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L132; L133: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == ZERO_EXTRACT) goto L134; x1 = XEXP (x0, 0); goto L213; L134: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (ext_register_operand (x4, VOIDmode)) { operands[1] = x4; goto L135; } x1 = XEXP (x0, 0); goto L213; L135: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L136; x1 = XEXP (x0, 0); goto L213; L136: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)] && (ix86_match_ccmode (insn, CCmode))) { return 17; } x1 = XEXP (x0, 0); goto L213; L13160: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x1) == REG && XINT (x1, 0) == 17) goto L214; goto L368; L214: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == CCFPmode && GET_CODE (x1) == COMPARE) goto L215; x1 = XEXP (x0, 0); goto L368; L215: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, VOIDmode)) { operands[0] = x2; goto L216; } x1 = XEXP (x0, 0); goto L368; L216: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, VOIDmode)) { operands[1] = x2; goto L217; } L222: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, VOIDmode)) { operands[1] = x2; goto L223; } x1 = XEXP (x0, 0); goto L368; L217: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && TARGET_CMOVE && !SSE_FLOAT_MODE_P (GET_MODE (operands[0])) && FLOAT_MODE_P (GET_MODE (operands[0])) && GET_MODE (operands[0]) == GET_MODE (operands[0]))) { return 30; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L222; L223: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && SSE_FLOAT_MODE_P (GET_MODE (operands[0])) && GET_MODE (operands[0]) == GET_MODE (operands[0]))) { return 31; } L229: ATTRIBUTE_UNUSED_LABEL if ((SSE_FLOAT_MODE_P (GET_MODE (operands[0])) && GET_MODE (operands[0]) == GET_MODE (operands[0]))) { return 32; } x1 = XEXP (x0, 0); goto L368; L13161: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x1) == REG && XINT (x1, 0) == 17) goto L232; goto L368; L232: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == CCFPUmode && GET_CODE (x1) == COMPARE) goto L233; x1 = XEXP (x0, 0); goto L368; L233: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, VOIDmode)) { operands[0] = x2; goto L234; } x1 = XEXP (x0, 0); goto L368; L234: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, VOIDmode)) { operands[1] = x2; goto L235; } L240: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, VOIDmode)) { operands[1] = x2; goto L241; } x1 = XEXP (x0, 0); goto L368; L235: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && TARGET_CMOVE && !SSE_FLOAT_MODE_P (GET_MODE (operands[0])) && FLOAT_MODE_P (GET_MODE (operands[0])) && GET_MODE (operands[0]) == GET_MODE (operands[1]))) { return 33; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L240; L241: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && SSE_FLOAT_MODE_P (GET_MODE (operands[0])) && GET_MODE (operands[0]) == GET_MODE (operands[1]))) { return 34; } L247: ATTRIBUTE_UNUSED_LABEL if ((SSE_FLOAT_MODE_P (GET_MODE (operands[0])) && GET_MODE (operands[0]) == GET_MODE (operands[1]))) { return 35; } x1 = XEXP (x0, 0); goto L368; L369: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case HImode: goto L13610; case QImode: goto L13612; default: break; } goto L8266; L13610: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, HImode)) { operands[0] = x2; goto L370; } L13611: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, HImode)) { operands[0] = x2; goto L382; } goto L8266; L370: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (general_operand (x1, HImode)) { operands[1] = x1; goto L371; } x1 = XEXP (x0, 0); x2 = XEXP (x1, 0); goto L13611; L371: ATTRIBUTE_UNUSED_LABEL if (((! TARGET_PARTIAL_REG_STALL || optimize_size) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 55; } x1 = XEXP (x0, 0); x2 = XEXP (x1, 0); goto L13611; L382: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (const0_operand (x1, HImode)) { operands[1] = x1; goto L383; } x1 = XEXP (x0, 0); goto L8266; L383: ATTRIBUTE_UNUSED_LABEL if ((reload_completed && ((!TARGET_USE_MOV0 && !TARGET_PARTIAL_REG_STALL) || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 56; } x1 = XEXP (x0, 0); goto L8266; L13612: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, QImode)) { operands[0] = x2; goto L1718; } L13613: ATTRIBUTE_UNUSED_LABEL if (q_regs_operand (x2, QImode)) { operands[0] = x2; goto L418; } goto L8266; L1718: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == QImode) goto L13614; L406: ATTRIBUTE_UNUSED_LABEL if (general_operand (x1, QImode)) { operands[1] = x1; goto L407; } x1 = XEXP (x0, 0); x2 = XEXP (x1, 0); goto L13613; L13614: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case PLUS: goto L1719; case MINUS: goto L2157; case AND: goto L3084; case IOR: goto L3471; case XOR: goto L3854; case ASHIFTRT: goto L5164; case LSHIFTRT: goto L5502; case ROTATE: goto L5690; case ROTATERT: goto L5880; case EQ: case NE: case LE: case LT: case GE: case GT: case LEU: case LTU: case GEU: case GTU: case UNORDERED: case ORDERED: case UNLE: case UNLT: case UNGE: case UNGT: case LTGT: case UNEQ: goto L13623; default: goto L406; } L13623: ATTRIBUTE_UNUSED_LABEL if (ix86_comparison_operator (x1, QImode)) { operands[1] = x1; goto L5922; } goto L406; L1719: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[0])) goto L1720; goto L406; L1720: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (general_operand (x2, QImode)) { operands[1] = x2; goto L1721; } goto L406; L1721: ATTRIBUTE_UNUSED_LABEL if (((! TARGET_PARTIAL_REG_STALL || optimize_size) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 217; } x1 = XEXP (x0, 1); goto L406; L2157: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[0])) goto L2158; goto L406; L2158: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (general_operand (x2, QImode)) { operands[1] = x2; goto L2159; } goto L406; L2159: ATTRIBUTE_UNUSED_LABEL if (((! TARGET_PARTIAL_REG_STALL || optimize_size) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 244; } x1 = XEXP (x0, 1); goto L406; L3084: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[0])) goto L3085; goto L406; L3085: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (general_operand (x2, QImode)) { operands[1] = x2; goto L3086; } goto L406; L3086: ATTRIBUTE_UNUSED_LABEL if (((! TARGET_PARTIAL_REG_STALL || optimize_size) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 297; } x1 = XEXP (x0, 1); goto L406; L3471: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[0])) goto L3472; goto L406; L3472: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (general_operand (x2, QImode)) { operands[1] = x2; goto L3473; } goto L406; L3473: ATTRIBUTE_UNUSED_LABEL if (((! TARGET_PARTIAL_REG_STALL || optimize_size) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 319; } x1 = XEXP (x0, 1); goto L406; L3854: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[0])) goto L3855; goto L406; L3855: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (general_operand (x2, QImode)) { operands[1] = x2; goto L3856; } goto L406; L3856: ATTRIBUTE_UNUSED_LABEL if (((! TARGET_PARTIAL_REG_STALL || optimize_size) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 341; } x1 = XEXP (x0, 1); goto L406; L5164: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[0])) goto L5165; goto L406; L5165: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const1_operand (x2, QImode)) { operands[1] = x2; goto L5166; } L5195: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x2, QImode)) { operands[1] = x2; goto L5196; } goto L406; L5166: ATTRIBUTE_UNUSED_LABEL if ((ix86_binary_operator_ok (ASHIFTRT, QImode, operands) && (! TARGET_PARTIAL_REG_STALL || optimize_size) && (TARGET_SHIFT1 || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 440; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L5195; L5196: ATTRIBUTE_UNUSED_LABEL if (((! TARGET_PARTIAL_REG_STALL || optimize_size) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 442; } x1 = XEXP (x0, 1); goto L406; L5502: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[0])) goto L5503; goto L406; L5503: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const1_operand (x2, QImode)) { operands[1] = x2; goto L5504; } L5533: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x2, QImode)) { operands[1] = x2; goto L5534; } goto L406; L5504: ATTRIBUTE_UNUSED_LABEL if (((! TARGET_PARTIAL_REG_STALL || optimize_size) && (TARGET_SHIFT1 || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 464; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L5533; L5534: ATTRIBUTE_UNUSED_LABEL if (((! TARGET_PARTIAL_REG_STALL || optimize_size) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 466; } x1 = XEXP (x0, 1); goto L406; L5690: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[0])) goto L5691; goto L406; L5691: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const1_operand (x2, QImode)) { operands[1] = x2; goto L5692; } L5721: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x2, QImode)) { operands[1] = x2; goto L5722; } goto L406; L5692: ATTRIBUTE_UNUSED_LABEL if (((! TARGET_PARTIAL_REG_STALL || optimize_size) && (TARGET_SHIFT1 || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 477; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L5721; L5722: ATTRIBUTE_UNUSED_LABEL if (((! TARGET_PARTIAL_REG_STALL || optimize_size) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 479; } x1 = XEXP (x0, 1); goto L406; L5880: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[0])) goto L5881; goto L406; L5881: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const1_operand (x2, QImode)) { operands[1] = x2; goto L5882; } L5911: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x2, QImode)) { operands[1] = x2; goto L5912; } goto L406; L5882: ATTRIBUTE_UNUSED_LABEL if (((! TARGET_PARTIAL_REG_STALL || optimize_size) && (TARGET_SHIFT1 || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 490; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L5911; L5912: ATTRIBUTE_UNUSED_LABEL if (((! TARGET_PARTIAL_REG_STALL || optimize_size) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 492; } x1 = XEXP (x0, 1); goto L406; L5922: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_CODE (x2) == REG && XINT (x2, 0) == 17) goto L5923; goto L406; L5923: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) { return 494; } goto L406; L407: ATTRIBUTE_UNUSED_LABEL if (((! TARGET_PARTIAL_REG_STALL || optimize_size) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 61; } x1 = XEXP (x0, 0); x2 = XEXP (x1, 0); goto L13613; L418: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (const0_operand (x1, QImode)) { operands[1] = x1; goto L419; } x1 = XEXP (x0, 0); goto L8266; L419: ATTRIBUTE_UNUSED_LABEL if ((reload_completed && (!TARGET_USE_MOV0 || optimize_size)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 62; } x1 = XEXP (x0, 0); goto L8266; L13162: ATTRIBUTE_UNUSED_LABEL if (XINT (x1, 0) == 17) goto L1445; goto L1336; L1445: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_CODE (x1) == COMPARE) goto L1446; x1 = XEXP (x0, 0); goto L1336; L1446: ATTRIBUTE_UNUSED_LABEL tem = recog_10 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; x1 = XEXP (x0, 0); goto L1336; L6207: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); switch (GET_MODE (x1)) { case SImode: goto L13653; case DImode: goto L13654; default: break; } L5938: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case IF_THEN_ELSE: goto L5939; case LABEL_REF: goto L6204; case UNSPEC: goto L13655; default: break; } x1 = XEXP (x0, 0); goto L8266; L13653: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, SImode)) { operands[0] = x1; goto L6208; } goto L5938; L6208: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT)) { return 510; } x1 = XEXP (x0, 1); goto L5938; L13654: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, DImode)) { operands[0] = x1; goto L6212; } goto L5938; L6212: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT)) { return 511; } x1 = XEXP (x0, 1); goto L5938; L5939: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (ix86_comparison_operator (x2, VOIDmode)) { operands[1] = x2; goto L5940; } L5971: ATTRIBUTE_UNUSED_LABEL if (comparison_operator (x2, VOIDmode)) { operands[0] = x2; goto L5972; } x1 = XEXP (x0, 0); goto L8266; L5940: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == REG && XINT (x3, 0) == 17) goto L5941; goto L5971; L5941: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L5942; goto L5971; L5942: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); switch (GET_CODE (x2)) { case LABEL_REF: goto L5943; case PC: goto L5952; default: break; } x2 = XEXP (x1, 0); goto L5971; L5943: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); operands[0] = x3; goto L5944; L5944: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (GET_CODE (x2) == PC) { return 497; } x2 = XEXP (x1, 0); goto L5971; L5952: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (GET_CODE (x2) == LABEL_REF) goto L5953; x2 = XEXP (x1, 0); goto L5971; L5953: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); operands[0] = x3; return 498; L5972: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, VOIDmode)) { operands[1] = x3; goto L5973; } x1 = XEXP (x0, 0); goto L8266; L5973: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, VOIDmode)) { operands[2] = x3; goto L5974; } L5996: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x3, VOIDmode)) { operands[2] = x3; goto L5997; } L6170: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, VOIDmode)) { operands[2] = x3; goto L6171; } x1 = XEXP (x0, 0); goto L8266; L5974: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); switch (GET_CODE (x2)) { case LABEL_REF: goto L5975; case PC: goto L6044; default: break; } x2 = XEXP (x1, 0); x3 = XEXP (x2, 1); goto L5996; L5975: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); operands[3] = x3; goto L5976; L5976: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (GET_CODE (x2) == PC && (TARGET_CMOVE && TARGET_80387 && !SSE_FLOAT_MODE_P (GET_MODE (operands[1])) && FLOAT_MODE_P (GET_MODE (operands[1])) && GET_MODE (operands[1]) == GET_MODE (operands[2]) && ix86_fp_jump_nontrivial_p (GET_CODE (operands[0]))) && pnum_clobbers != NULL) { *pnum_clobbers = 2; return 499; } x2 = XEXP (x1, 0); x3 = XEXP (x2, 1); goto L5996; L6044: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (GET_CODE (x2) == LABEL_REF) goto L6045; x2 = XEXP (x1, 0); x3 = XEXP (x2, 1); goto L5996; L6045: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); operands[3] = x3; goto L6046; L6046: ATTRIBUTE_UNUSED_LABEL if ((TARGET_CMOVE && TARGET_80387 && !SSE_FLOAT_MODE_P (GET_MODE (operands[1])) && FLOAT_MODE_P (GET_MODE (operands[1])) && GET_MODE (operands[1]) == GET_MODE (operands[2]) && ix86_fp_jump_nontrivial_p (GET_CODE (operands[0]))) && pnum_clobbers != NULL) { *pnum_clobbers = 2; return 502; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); x3 = XEXP (x2, 1); goto L5996; L5997: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); switch (GET_CODE (x2)) { case LABEL_REF: goto L5998; case PC: goto L6068; default: break; } x2 = XEXP (x1, 0); x3 = XEXP (x2, 1); goto L6170; L5998: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); operands[3] = x3; goto L5999; L5999: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (GET_CODE (x2) == PC) goto L13656; x2 = XEXP (x1, 0); x3 = XEXP (x2, 1); goto L6170; L13656: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && SSE_FLOAT_MODE_P (GET_MODE (operands[1])) && GET_MODE (operands[1]) == GET_MODE (operands[2]) && ix86_fp_jump_nontrivial_p (GET_CODE (operands[0]))) && pnum_clobbers != NULL) { *pnum_clobbers = 2; return 500; } L13657: ATTRIBUTE_UNUSED_LABEL if ((SSE_FLOAT_MODE_P (GET_MODE (operands[1])) && GET_MODE (operands[1]) == GET_MODE (operands[2]) && ix86_fp_jump_nontrivial_p (GET_CODE (operands[0]))) && pnum_clobbers != NULL) { *pnum_clobbers = 2; return 501; } L13658: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && (GET_MODE (operands[1]) == SFmode || GET_MODE (operands[1]) == DFmode) && GET_MODE (operands[1]) == GET_MODE (operands[2]) && !ix86_use_fcomi_compare (GET_CODE (operands[0])) && SELECT_CC_MODE (GET_CODE (operands[0]), operands[1], operands[2]) == CCFPmode && ix86_fp_jump_nontrivial_p (GET_CODE (operands[0]))) && pnum_clobbers != NULL) { *pnum_clobbers = 3; return 505; } x2 = XEXP (x1, 0); x3 = XEXP (x2, 1); goto L6170; L6068: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (GET_CODE (x2) == LABEL_REF) goto L6069; x2 = XEXP (x1, 0); x3 = XEXP (x2, 1); goto L6170; L6069: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); operands[3] = x3; goto L6070; L6070: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && SSE_FLOAT_MODE_P (GET_MODE (operands[1])) && GET_MODE (operands[1]) == GET_MODE (operands[2]) && ix86_fp_jump_nontrivial_p (GET_CODE (operands[0]))) && pnum_clobbers != NULL) { *pnum_clobbers = 2; return 503; } L6094: ATTRIBUTE_UNUSED_LABEL if ((SSE_FLOAT_MODE_P (GET_MODE (operands[1])) && GET_MODE (operands[1]) == GET_MODE (operands[2]) && ix86_fp_jump_nontrivial_p (GET_CODE (operands[0]))) && pnum_clobbers != NULL) { *pnum_clobbers = 2; return 504; } L6147: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && (GET_MODE (operands[1]) == SFmode || GET_MODE (operands[1]) == DFmode) && GET_MODE (operands[1]) == GET_MODE (operands[2]) && !ix86_use_fcomi_compare (GET_CODE (operands[0])) && SELECT_CC_MODE (GET_CODE (operands[0]), operands[1], operands[2]) == CCFPmode && ix86_fp_jump_nontrivial_p (GET_CODE (operands[0]))) && pnum_clobbers != NULL) { *pnum_clobbers = 3; return 506; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); x3 = XEXP (x2, 1); goto L6170; L6171: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); switch (GET_CODE (x2)) { case LABEL_REF: goto L6172; case PC: goto L6198; default: break; } x1 = XEXP (x0, 0); goto L8266; L6172: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); operands[3] = x3; goto L6173; L6173: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (GET_CODE (x2) == PC && (TARGET_80387 && FLOAT_MODE_P (GET_MODE (operands[1])) && GET_MODE (operands[1]) == GET_MODE (operands[2]) && ix86_fp_jump_nontrivial_p (GET_CODE (operands[0]))) && pnum_clobbers != NULL) { *pnum_clobbers = 3; return 507; } x1 = XEXP (x0, 0); goto L8266; L6198: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (GET_CODE (x2) == LABEL_REF) goto L6199; x1 = XEXP (x0, 0); goto L8266; L6199: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); operands[3] = x3; goto L6200; L6200: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && FLOAT_MODE_P (GET_MODE (operands[1])) && GET_MODE (operands[1]) == GET_MODE (operands[2]) && ix86_fp_jump_nontrivial_p (GET_CODE (operands[0]))) && pnum_clobbers != NULL) { *pnum_clobbers = 3; return 508; } x1 = XEXP (x0, 0); goto L8266; L6204: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); operands[0] = x2; return 509; L13655: ATTRIBUTE_UNUSED_LABEL if (XVECLEN (x1, 0) == 1 && XINT (x1, 1) == 76) goto L6348; x1 = XEXP (x0, 0); goto L8266; L6348: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); switch (GET_MODE (x2)) { case SImode: goto L13659; case DImode: goto L13660; default: break; } x1 = XEXP (x0, 0); goto L8266; L13659: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, SImode)) { operands[0] = x2; goto L6349; } x1 = XEXP (x0, 0); goto L8266; L6349: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT)) { return 531; } x1 = XEXP (x0, 0); goto L8266; L13660: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, DImode)) { operands[0] = x2; goto L6354; } x1 = XEXP (x0, 0); goto L8266; L6354: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT)) { return 532; } x1 = XEXP (x0, 0); goto L8266; L1337: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_CODE (x1) == PLUS) goto L1338; x1 = XEXP (x0, 0); goto L8266; L1338: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_CODE (x2)) { case PLUS: goto L1373; case MULT: goto L1356; default: break; } x1 = XEXP (x0, 0); goto L8266; L1373: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == MULT) goto L1374; if (index_register_operand (x3, VOIDmode)) { operands[1] = x3; goto L1340; } x1 = XEXP (x0, 0); goto L8266; L1374: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (index_register_operand (x4, VOIDmode)) { operands[1] = x4; goto L1375; } x1 = XEXP (x0, 0); goto L8266; L1375: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (const248_operand (x4, VOIDmode)) { operands[2] = x4; goto L1376; } x1 = XEXP (x0, 0); goto L8266; L1376: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, VOIDmode)) { operands[3] = x3; goto L1377; } x1 = XEXP (x0, 0); goto L8266; L1377: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (immediate_operand (x2, VOIDmode)) { operands[4] = x2; goto L1378; } x1 = XEXP (x0, 0); goto L8266; L1378: ATTRIBUTE_UNUSED_LABEL if (((GET_MODE (operands[0]) == QImode || GET_MODE (operands[0]) == HImode || (TARGET_64BIT && GET_MODE (operands[0]) == SImode)) && (!TARGET_PARTIAL_REG_STALL || optimize_size) && GET_MODE (operands[0]) == GET_MODE (operands[1]) && GET_MODE (operands[0]) == GET_MODE (operands[3]))) { return 194; } x1 = XEXP (x0, 0); goto L8266; L1340: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, VOIDmode)) { operands[2] = x3; goto L1341; } x1 = XEXP (x0, 0); goto L8266; L1341: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (immediate_operand (x2, VOIDmode)) { operands[3] = x2; goto L1342; } x1 = XEXP (x0, 0); goto L8266; L1342: ATTRIBUTE_UNUSED_LABEL if (((GET_MODE (operands[0]) == QImode || GET_MODE (operands[0]) == HImode || (TARGET_64BIT && GET_MODE (operands[0]) == SImode)) && (!TARGET_PARTIAL_REG_STALL || optimize_size) && GET_MODE (operands[0]) == GET_MODE (operands[1]) && GET_MODE (operands[0]) == GET_MODE (operands[2]) && (GET_MODE (operands[0]) == GET_MODE (operands[3]) || GET_MODE (operands[3]) == VOIDmode))) { return 190; } x1 = XEXP (x0, 0); goto L8266; L1356: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (index_register_operand (x3, VOIDmode)) { operands[1] = x3; goto L1357; } x1 = XEXP (x0, 0); goto L8266; L1357: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const248_operand (x3, VOIDmode)) { operands[2] = x3; goto L1358; } x1 = XEXP (x0, 0); goto L8266; L1358: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonmemory_operand (x2, VOIDmode)) { operands[3] = x2; goto L1359; } x1 = XEXP (x0, 0); goto L8266; L1359: ATTRIBUTE_UNUSED_LABEL if (((GET_MODE (operands[0]) == QImode || GET_MODE (operands[0]) == HImode || (TARGET_64BIT && GET_MODE (operands[0]) == SImode)) && (!TARGET_PARTIAL_REG_STALL || optimize_size) && GET_MODE (operands[0]) == GET_MODE (operands[1]) && (GET_MODE (operands[0]) == GET_MODE (operands[3]) || GET_MODE (operands[3]) == VOIDmode))) { return 192; } x1 = XEXP (x0, 0); goto L8266; L8267: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_CODE (x1) == CALL) goto L8268; x1 = XEXP (x0, 0); goto L8323; L8268: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == QImode && GET_CODE (x2) == MEM) goto L8269; x1 = XEXP (x0, 0); goto L8323; L8269: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); switch (GET_MODE (x3)) { case SImode: goto L13661; case DImode: goto L13662; default: break; } x1 = XEXP (x0, 0); goto L8323; L13661: ATTRIBUTE_UNUSED_LABEL if (constant_call_address_operand (x3, SImode)) { operands[1] = x3; goto L8270; } L13663: ATTRIBUTE_UNUSED_LABEL if (call_insn_operand (x3, SImode)) { operands[1] = x3; goto L8284; } L13664: ATTRIBUTE_UNUSED_LABEL if (sibcall_insn_operand (x3, SImode)) { operands[1] = x3; goto L8291; } x1 = XEXP (x0, 0); goto L8323; L8270: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); operands[2] = x2; goto L8271; L8271: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT)) { return 689; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L13663; L8284: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); operands[2] = x2; goto L8285; L8285: ATTRIBUTE_UNUSED_LABEL if ((!SIBLING_CALL_P (insn) && !TARGET_64BIT)) { return 691; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L13664; L8291: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); operands[2] = x2; goto L8292; L8292: ATTRIBUTE_UNUSED_LABEL if ((SIBLING_CALL_P (insn) && !TARGET_64BIT)) { return 692; } x1 = XEXP (x0, 0); goto L8323; L13662: ATTRIBUTE_UNUSED_LABEL if (constant_call_address_operand (x3, DImode)) { operands[1] = x3; goto L8277; } L13665: ATTRIBUTE_UNUSED_LABEL if (call_insn_operand (x3, DImode)) { operands[1] = x3; goto L8298; } L13667: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x3) == REG && XINT (x3, 0) == 40) goto L8312; if (constant_call_address_operand (x3, DImode)) { operands[1] = x3; goto L8305; } x1 = XEXP (x0, 0); goto L8323; L8277: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const_int_operand (x2, DImode)) { operands[2] = x2; goto L8278; } x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L13665; L8278: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT)) { return 690; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L13665; L8298: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); operands[2] = x2; goto L8299; L8299: ATTRIBUTE_UNUSED_LABEL if ((!SIBLING_CALL_P (insn) && TARGET_64BIT)) { return 693; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L13667; L8312: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); operands[1] = x2; goto L8313; L8313: ATTRIBUTE_UNUSED_LABEL if ((SIBLING_CALL_P (insn) && TARGET_64BIT)) { return 695; } x1 = XEXP (x0, 0); goto L8323; L8305: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); operands[2] = x2; goto L8306; L8306: ATTRIBUTE_UNUSED_LABEL if ((SIBLING_CALL_P (insn) && TARGET_64BIT)) { return 694; } x1 = XEXP (x0, 0); goto L8323; L13163: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, V4SFmode)) { operands[0] = x1; goto L8324; } L13178: ATTRIBUTE_UNUSED_LABEL if (push_operand (x1, V4SFmode)) { operands[0] = x1; goto L8384; } L13188: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x1, V4SFmode)) { operands[0] = x1; goto L8454; } L13190: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, V4SFmode)) { operands[0] = x1; goto L8464; } goto L9442; L8324: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == V4SFmode) goto L13669; x1 = XEXP (x0, 0); goto L13178; L13669: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case UNSPEC: goto L13672; case VEC_MERGE: goto L8489; case CONST_VECTOR: case SUBREG: case REG: case MEM: goto L13668; default: x1 = XEXP (x0, 0); goto L13178; } L13668: ATTRIBUTE_UNUSED_LABEL if (vector_move_operand (x1, V4SFmode)) { operands[1] = x1; goto L8325; } x1 = XEXP (x0, 0); goto L13178; L13672: ATTRIBUTE_UNUSED_LABEL if (XVECLEN (x1, 0) == 1) goto L13674; x1 = XEXP (x0, 0); goto L13178; L13674: ATTRIBUTE_UNUSED_LABEL switch (XINT (x1, 1)) { case 38L: goto L8421; case 39L: goto L8426; default: break; } x1 = XEXP (x0, 0); goto L13178; L8421: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (nonimmediate_operand (x2, V4SFmode)) { operands[1] = x2; goto L8422; } x1 = XEXP (x0, 0); goto L13178; L8422: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 722; } x1 = XEXP (x0, 0); goto L13178; L8426: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (nonimmediate_operand (x2, V4SFmode)) { operands[1] = x2; goto L8427; } x1 = XEXP (x0, 0); goto L13178; L8427: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 723; } x1 = XEXP (x0, 0); goto L13178; L8489: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, V4SFmode)) { operands[1] = x2; goto L8490; } x1 = XEXP (x0, 0); goto L13178; L8490: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V4SFmode)) { operands[2] = x2; goto L8491; } x1 = XEXP (x0, 0); goto L13178; L8491: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (GET_CODE (x2) == CONST_INT) goto L13676; x1 = XEXP (x0, 0); goto L13178; L13676: ATTRIBUTE_UNUSED_LABEL if ((int) XWINT (x2, 0) == XWINT (x2, 0)) switch ((int) XWINT (x2, 0)) { case 12L: goto L13678; case 3L: goto L13679; default: break; } x1 = XEXP (x0, 0); goto L13178; L13678: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE && (GET_CODE (operands[1]) == MEM || GET_CODE (operands[2]) == MEM))) { return 732; } x1 = XEXP (x0, 0); goto L13178; L13679: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE && (GET_CODE (operands[1]) == MEM || GET_CODE (operands[2]) == MEM))) { return 733; } x1 = XEXP (x0, 0); goto L13178; L8325: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE)) { return 698; } x1 = XEXP (x0, 0); goto L13178; L8384: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (register_operand (x1, V4SFmode)) { operands[1] = x1; goto L8385; } x1 = XEXP (x0, 0); goto L13188; L8385: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE)) { return 713; } x1 = XEXP (x0, 0); goto L13188; L8454: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == V4SFmode && GET_CODE (x1) == UNSPEC && XVECLEN (x1, 0) == 1 && XINT (x1, 1) == 34) goto L8455; x1 = XEXP (x0, 0); goto L13190; L8455: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, V4SFmode)) { operands[1] = x2; goto L8456; } x1 = XEXP (x0, 0); goto L13190; L8456: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE)) { return 728; } x1 = XEXP (x0, 0); goto L13190; L8464: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == V4SFmode) goto L13680; L8718: ATTRIBUTE_UNUSED_LABEL if (const0_operand (x1, V4SFmode)) { operands[1] = x1; goto L8719; } x1 = XEXP (x0, 0); goto L9442; L13680: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case VEC_MERGE: goto L8465; case UNSPEC: goto L13694; case PLUS: goto L8527; case MINUS: goto L8541; case MULT: goto L8555; case DIV: goto L8569; case SQRT: goto L8607; case AND: goto L8619; case IOR: goto L8632; case XOR: goto L8638; case SMAX: goto L8817; case SMIN: goto L8831; default: break; } goto L8718; L8465: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V4SFmode) goto L13698; goto L8718; L13698: ATTRIBUTE_UNUSED_LABEL tem = recog_11 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; goto L8718; L13694: ATTRIBUTE_UNUSED_LABEL switch (XVECLEN (x1, 0)) { case 3: goto L13726; case 1: goto L13727; default: break; } goto L8718; L13726: ATTRIBUTE_UNUSED_LABEL if (XINT (x1, 1) == 41) goto L8520; goto L8718; L8520: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, V4SFmode)) { operands[1] = x2; goto L8521; } goto L8718; L8521: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 1); if (nonimmediate_operand (x2, V4SFmode)) { operands[2] = x2; goto L8522; } goto L8718; L8522: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 2); if (immediate_operand (x2, SImode)) { operands[3] = x2; goto L8523; } goto L8718; L8523: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE)) { return 737; } x1 = XEXP (x0, 1); goto L8718; L13727: ATTRIBUTE_UNUSED_LABEL switch (XINT (x1, 1)) { case 42L: goto L8583; case 43L: goto L8595; default: break; } goto L8718; L8583: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (nonimmediate_operand (x2, V4SFmode)) { operands[1] = x2; goto L8584; } goto L8718; L8584: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE)) { return 746; } x1 = XEXP (x0, 1); goto L8718; L8595: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (nonimmediate_operand (x2, V4SFmode)) { operands[1] = x2; goto L8596; } goto L8718; L8596: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE)) { return 748; } x1 = XEXP (x0, 1); goto L8718; L8527: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V4SFmode)) { operands[1] = x2; goto L8528; } goto L8718; L8528: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V4SFmode)) { operands[2] = x2; goto L8529; } goto L8718; L8529: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE)) { return 738; } x1 = XEXP (x0, 1); goto L8718; L8541: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V4SFmode)) { operands[1] = x2; goto L8542; } goto L8718; L8542: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V4SFmode)) { operands[2] = x2; goto L8543; } goto L8718; L8543: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE)) { return 740; } x1 = XEXP (x0, 1); goto L8718; L8555: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V4SFmode)) { operands[1] = x2; goto L8556; } goto L8718; L8556: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V4SFmode)) { operands[2] = x2; goto L8557; } goto L8718; L8557: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE)) { return 742; } x1 = XEXP (x0, 1); goto L8718; L8569: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V4SFmode)) { operands[1] = x2; goto L8570; } goto L8718; L8570: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V4SFmode)) { operands[2] = x2; goto L8571; } goto L8718; L8571: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE)) { return 744; } x1 = XEXP (x0, 1); goto L8718; L8607: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, V4SFmode)) { operands[1] = x2; goto L8608; } goto L8718; L8608: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE)) { return 750; } x1 = XEXP (x0, 1); goto L8718; L8619: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V4SFmode) goto L13730; goto L8718; L13730: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == NOT) goto L8626; if (nonimmediate_operand (x2, V4SFmode)) { operands[1] = x2; goto L8620; } goto L8718; L8626: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V4SFmode)) { operands[1] = x3; goto L8627; } goto L8718; L8627: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V4SFmode)) { operands[2] = x2; goto L8628; } goto L8718; L8628: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE)) { return 753; } x1 = XEXP (x0, 1); goto L8718; L8620: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V4SFmode)) { operands[2] = x2; goto L8621; } goto L8718; L8621: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 752; } x1 = XEXP (x0, 1); goto L8718; L8632: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, V4SFmode)) { operands[1] = x2; goto L8633; } goto L8718; L8633: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V4SFmode)) { operands[2] = x2; goto L8634; } goto L8718; L8634: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 754; } x1 = XEXP (x0, 1); goto L8718; L8638: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, V4SFmode)) { operands[1] = x2; goto L8639; } goto L8718; L8639: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V4SFmode)) { operands[2] = x2; goto L8640; } goto L8718; L8640: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 755; } x1 = XEXP (x0, 1); goto L8718; L8817: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V4SFmode)) { operands[1] = x2; goto L8818; } goto L8718; L8818: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V4SFmode)) { operands[2] = x2; goto L8819; } goto L8718; L8819: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE)) { return 778; } x1 = XEXP (x0, 1); goto L8718; L8831: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V4SFmode)) { operands[1] = x2; goto L8832; } goto L8718; L8832: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V4SFmode)) { operands[2] = x2; goto L8833; } goto L8718; L8833: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE)) { return 780; } x1 = XEXP (x0, 1); goto L8718; L8719: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE)) { return 768; } x1 = XEXP (x0, 0); goto L9442; L13164: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, V4SImode)) { operands[0] = x1; goto L8328; } L13179: ATTRIBUTE_UNUSED_LABEL if (push_operand (x1, V4SImode)) { operands[0] = x1; goto L8388; } if (register_operand (x1, V4SImode)) { operands[0] = x1; goto L8726; } goto L9442; L8328: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (vector_move_operand (x1, V4SImode)) { operands[1] = x1; goto L8329; } x1 = XEXP (x0, 0); goto L13179; L8329: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE)) { return 699; } x1 = XEXP (x0, 0); goto L13179; L8388: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (register_operand (x1, V4SImode)) { operands[1] = x1; goto L8389; } x1 = XEXP (x0, 0); goto L9442; L8389: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 714; } x1 = XEXP (x0, 0); goto L9442; L8726: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == V4SImode) goto L13732; x1 = XEXP (x0, 0); goto L9442; L13732: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case NOT: goto L8733; case VEC_MERGE: goto L8740; case EQ: case LT: case LE: case UNORDERED: case NE: case UNGE: case UNGT: case ORDERED: case UNEQ: case UNLT: case UNLE: case LTGT: case GE: case GT: goto L13731; default: x1 = XEXP (x0, 0); goto L9442; } L13731: ATTRIBUTE_UNUSED_LABEL if (sse_comparison_operator (x1, V4SImode)) { operands[3] = x1; goto L8727; } x1 = XEXP (x0, 0); goto L9442; L8733: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (sse_comparison_operator (x2, V4SImode)) { operands[3] = x2; goto L8734; } x1 = XEXP (x0, 0); goto L9442; L8734: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V4SFmode)) { operands[1] = x3; goto L8735; } x1 = XEXP (x0, 0); goto L9442; L8735: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, V4SFmode)) { operands[2] = x3; goto L8736; } x1 = XEXP (x0, 0); goto L9442; L8736: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE)) { return 771; } x1 = XEXP (x0, 0); goto L9442; L8740: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V4SImode) goto L13735; x1 = XEXP (x0, 0); goto L9442; L13735: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == NOT) goto L8750; if (sse_comparison_operator (x2, V4SImode)) { operands[3] = x2; goto L8741; } x1 = XEXP (x0, 0); goto L9442; L8750: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (sse_comparison_operator (x3, V4SImode)) { operands[3] = x3; goto L8751; } x1 = XEXP (x0, 0); goto L9442; L8751: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, V4SFmode)) { operands[1] = x4; goto L8752; } x1 = XEXP (x0, 0); goto L9442; L8752: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (register_operand (x4, V4SFmode)) { operands[2] = x4; goto L8753; } x1 = XEXP (x0, 0); goto L9442; L8753: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V4SImode && GET_CODE (x2) == SUBREG && XINT (x2, 1) == 0) goto L8754; x1 = XEXP (x0, 0); goto L9442; L8754: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L8755; x1 = XEXP (x0, 0); goto L9442; L8755: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE)) { return 773; } x1 = XEXP (x0, 0); goto L9442; L8741: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V4SFmode)) { operands[1] = x3; goto L8742; } x1 = XEXP (x0, 0); goto L9442; L8742: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, V4SFmode)) { operands[2] = x3; goto L8743; } x1 = XEXP (x0, 0); goto L9442; L8743: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V4SImode && GET_CODE (x2) == SUBREG && XINT (x2, 1) == 0) goto L8744; x1 = XEXP (x0, 0); goto L9442; L8744: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L8745; x1 = XEXP (x0, 0); goto L9442; L8745: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE)) { return 772; } x1 = XEXP (x0, 0); goto L9442; L8727: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V4SFmode)) { operands[1] = x2; goto L8728; } x1 = XEXP (x0, 0); goto L9442; L8728: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, V4SFmode)) { operands[2] = x2; goto L8729; } x1 = XEXP (x0, 0); goto L9442; L8729: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE)) { return 770; } x1 = XEXP (x0, 0); goto L9442; L13165: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, V2DImode)) { operands[0] = x1; goto L8332; } L13175: ATTRIBUTE_UNUSED_LABEL if (push_operand (x1, V2DImode)) { operands[0] = x1; goto L8372; } if (register_operand (x1, V2DImode)) { operands[0] = x1; goto L8674; } goto L9442; L8332: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (vector_move_operand (x1, V2DImode)) { operands[1] = x1; goto L8333; } x1 = XEXP (x0, 0); goto L13175; L8333: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE)) { return 700; } x1 = XEXP (x0, 0); goto L13175; L8372: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (register_operand (x1, V2DImode)) { operands[1] = x1; goto L8373; } x1 = XEXP (x0, 0); goto L9442; L8373: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 710; } x1 = XEXP (x0, 0); goto L9442; L8674: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == V2DImode) goto L13736; x1 = XEXP (x0, 0); goto L9442; L13736: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case AND: goto L8675; case IOR: goto L8701; case XOR: goto L8713; default: break; } x1 = XEXP (x0, 0); goto L9442; L8675: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V2DImode) goto L13740; x1 = XEXP (x0, 0); goto L9442; L13740: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == NOT) goto L8689; if (nonimmediate_operand (x2, V2DImode)) { operands[1] = x2; goto L8676; } x1 = XEXP (x0, 0); goto L9442; L8689: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V2DImode)) { operands[1] = x3; goto L8690; } x1 = XEXP (x0, 0); goto L9442; L8690: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V2DImode)) { operands[2] = x2; goto L8691; } x1 = XEXP (x0, 0); goto L9442; L8691: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2 && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 763; } x1 = XEXP (x0, 0); goto L9442; L8676: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V2DImode)) { operands[2] = x2; goto L8677; } x1 = XEXP (x0, 0); goto L9442; L8677: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2 && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 761; } x1 = XEXP (x0, 0); goto L9442; L8701: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, V2DImode)) { operands[1] = x2; goto L8702; } x1 = XEXP (x0, 0); goto L9442; L8702: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V2DImode)) { operands[2] = x2; goto L8703; } x1 = XEXP (x0, 0); goto L9442; L8703: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2 && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 765; } x1 = XEXP (x0, 0); goto L9442; L8713: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, V2DImode)) { operands[1] = x2; goto L8714; } x1 = XEXP (x0, 0); goto L9442; L8714: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V2DImode)) { operands[2] = x2; goto L8715; } x1 = XEXP (x0, 0); goto L9442; L8715: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2 && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 767; } x1 = XEXP (x0, 0); goto L9442; L13187: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x1) == MEM) goto L8440; L13166: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, V8QImode)) { operands[0] = x1; goto L8336; } L13182: ATTRIBUTE_UNUSED_LABEL if (push_operand (x1, V8QImode)) { operands[0] = x1; goto L8400; } if (register_operand (x1, V8QImode)) { operands[0] = x1; goto L8912; } goto L9442; L8440: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case SImode: goto L13741; case DImode: goto L13742; default: break; } goto L13166; L13741: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, SImode)) { operands[0] = x2; goto L8441; } goto L13166; L8441: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == V8QImode && GET_CODE (x1) == UNSPEC && XVECLEN (x1, 0) == 2 && XINT (x1, 1) == 32) goto L8442; x1 = XEXP (x0, 0); goto L13166; L8442: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, V8QImode)) { operands[1] = x2; goto L8443; } x1 = XEXP (x0, 0); goto L13166; L8443: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 1); if (register_operand (x2, V8QImode)) { operands[2] = x2; goto L8444; } x1 = XEXP (x0, 0); goto L13166; L8444: ATTRIBUTE_UNUSED_LABEL if (((TARGET_SSE || TARGET_3DNOW_A) && !TARGET_64BIT)) { return 726; } x1 = XEXP (x0, 0); goto L13166; L13742: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, DImode)) { operands[0] = x2; goto L8448; } goto L13166; L8448: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == V8QImode && GET_CODE (x1) == UNSPEC && XVECLEN (x1, 0) == 2 && XINT (x1, 1) == 32) goto L8449; x1 = XEXP (x0, 0); goto L13166; L8449: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, V8QImode)) { operands[1] = x2; goto L8450; } x1 = XEXP (x0, 0); goto L13166; L8450: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 1); if (register_operand (x2, V8QImode)) { operands[2] = x2; goto L8451; } x1 = XEXP (x0, 0); goto L13166; L8451: ATTRIBUTE_UNUSED_LABEL if (((TARGET_SSE || TARGET_3DNOW_A) && TARGET_64BIT)) { return 727; } x1 = XEXP (x0, 0); goto L13166; L8336: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (vector_move_operand (x1, V8QImode)) { operands[1] = x1; goto L8337; } x1 = XEXP (x0, 0); goto L13182; L8337: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 701; } x1 = XEXP (x0, 0); goto L13182; L8400: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (register_operand (x1, V8QImode)) { operands[1] = x1; goto L8401; } x1 = XEXP (x0, 0); goto L9442; L8401: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 717; } x1 = XEXP (x0, 0); goto L9442; L8912: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == V8QImode) goto L13743; x1 = XEXP (x0, 0); goto L9442; L13743: ATTRIBUTE_UNUSED_LABEL tem = recog_12 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; x1 = XEXP (x0, 0); goto L9442; L13167: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, V4HImode)) { operands[0] = x1; goto L8340; } L13181: ATTRIBUTE_UNUSED_LABEL if (push_operand (x1, V4HImode)) { operands[0] = x1; goto L8396; } if (register_operand (x1, V4HImode)) { operands[0] = x1; goto L8918; } goto L9442; L8340: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (vector_move_operand (x1, V4HImode)) { operands[1] = x1; goto L8341; } x1 = XEXP (x0, 0); goto L13181; L8341: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 702; } x1 = XEXP (x0, 0); goto L13181; L8396: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (register_operand (x1, V4HImode)) { operands[1] = x1; goto L8397; } x1 = XEXP (x0, 0); goto L9442; L8397: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 716; } x1 = XEXP (x0, 0); goto L9442; L8918: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == V4HImode) goto L13760; x1 = XEXP (x0, 0); goto L9442; L13760: ATTRIBUTE_UNUSED_LABEL tem = recog_13 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; x1 = XEXP (x0, 0); goto L9442; L13168: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, V2SImode)) { operands[0] = x1; goto L8344; } L13180: ATTRIBUTE_UNUSED_LABEL if (push_operand (x1, V2SImode)) { operands[0] = x1; goto L8392; } if (register_operand (x1, V2SImode)) { operands[0] = x1; goto L8852; } goto L9442; L8344: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (vector_move_operand (x1, V2SImode)) { operands[1] = x1; goto L8345; } x1 = XEXP (x0, 0); goto L13180; L8345: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 703; } x1 = XEXP (x0, 0); goto L13180; L8392: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (register_operand (x1, V2SImode)) { operands[1] = x1; goto L8393; } x1 = XEXP (x0, 0); goto L9442; L8393: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 715; } x1 = XEXP (x0, 0); goto L9442; L8852: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == V2SImode) goto L13787; x1 = XEXP (x0, 0); goto L9442; L13787: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case VEC_SELECT: goto L8853; case PLUS: goto L8925; case MINUS: goto L8974; case EQ: goto L9170; case GT: goto L9188; case ASHIFTRT: goto L9224; case LSHIFTRT: goto L9236; case ASHIFT: goto L9255; case VEC_MERGE: goto L9336; default: break; } x1 = XEXP (x0, 0); goto L9442; L8853: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V4SImode) goto L13796; x1 = XEXP (x0, 0); goto L9442; L13796: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case FIX: goto L8854; case UNSPEC: goto L13798; default: break; } x1 = XEXP (x0, 0); goto L9442; L8854: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, V4SFmode)) { operands[1] = x3; goto L8855; } x1 = XEXP (x0, 0); goto L9442; L8855: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == PARALLEL && XVECLEN (x2, 0) == 2) goto L8856; x1 = XEXP (x0, 0); goto L9442; L8856: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L8857; x1 = XEXP (x0, 0); goto L9442; L8857: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE)) { return 783; } x1 = XEXP (x0, 0); goto L9442; L13798: ATTRIBUTE_UNUSED_LABEL if (XVECLEN (x2, 0) == 1 && XINT (x2, 1) == 30) goto L8862; x1 = XEXP (x0, 0); goto L9442; L8862: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (nonimmediate_operand (x3, V4SFmode)) { operands[1] = x3; goto L8863; } x1 = XEXP (x0, 0); goto L9442; L8863: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == PARALLEL && XVECLEN (x2, 0) == 2) goto L8864; x1 = XEXP (x0, 0); goto L9442; L8864: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L8865; x1 = XEXP (x0, 0); goto L9442; L8865: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE)) { return 784; } x1 = XEXP (x0, 0); goto L9442; L8925: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V2SImode) goto L13800; x1 = XEXP (x0, 0); goto L9442; L13800: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == MULT) goto L9038; if (register_operand (x2, V2SImode)) { operands[1] = x2; goto L8926; } x1 = XEXP (x0, 0); goto L9442; L9038: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == V2SImode && GET_CODE (x3) == SIGN_EXTEND) goto L9039; x1 = XEXP (x0, 0); goto L9442; L9039: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == V2HImode && GET_CODE (x4) == VEC_SELECT) goto L9040; x1 = XEXP (x0, 0); goto L9442; L9040: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (register_operand (x5, V4HImode)) { operands[1] = x5; goto L9041; } x1 = XEXP (x0, 0); goto L9442; L9041: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 1); if (GET_CODE (x5) == PARALLEL && XVECLEN (x5, 0) == 2) goto L9042; x1 = XEXP (x0, 0); goto L9442; L9042: ATTRIBUTE_UNUSED_LABEL x6 = XVECEXP (x5, 0, 0); if (x6 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L9043; x1 = XEXP (x0, 0); goto L9442; L9043: ATTRIBUTE_UNUSED_LABEL x6 = XVECEXP (x5, 0, 1); if (x6 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L9044; x1 = XEXP (x0, 0); goto L9442; L9044: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == V2SImode && GET_CODE (x3) == SIGN_EXTEND) goto L9045; x1 = XEXP (x0, 0); goto L9442; L9045: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == V2HImode && GET_CODE (x4) == VEC_SELECT) goto L9046; x1 = XEXP (x0, 0); goto L9442; L9046: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (nonimmediate_operand (x5, V4HImode)) { operands[2] = x5; goto L9047; } x1 = XEXP (x0, 0); goto L9442; L9047: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 1); if (GET_CODE (x5) == PARALLEL && XVECLEN (x5, 0) == 2) goto L9048; x1 = XEXP (x0, 0); goto L9442; L9048: ATTRIBUTE_UNUSED_LABEL x6 = XVECEXP (x5, 0, 0); if (x6 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L9049; x1 = XEXP (x0, 0); goto L9442; L9049: ATTRIBUTE_UNUSED_LABEL x6 = XVECEXP (x5, 0, 1); if (x6 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L9050; x1 = XEXP (x0, 0); goto L9442; L9050: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V2SImode && GET_CODE (x2) == MULT) goto L9051; x1 = XEXP (x0, 0); goto L9442; L9051: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == V2SImode && GET_CODE (x3) == SIGN_EXTEND) goto L9052; x1 = XEXP (x0, 0); goto L9442; L9052: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == V2HImode && GET_CODE (x4) == VEC_SELECT) goto L9053; x1 = XEXP (x0, 0); goto L9442; L9053: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (rtx_equal_p (x5, operands[1])) goto L9054; x1 = XEXP (x0, 0); goto L9442; L9054: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 1); if (GET_CODE (x5) == PARALLEL && XVECLEN (x5, 0) == 2) goto L9055; x1 = XEXP (x0, 0); goto L9442; L9055: ATTRIBUTE_UNUSED_LABEL x6 = XVECEXP (x5, 0, 0); if (x6 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L9056; x1 = XEXP (x0, 0); goto L9442; L9056: ATTRIBUTE_UNUSED_LABEL x6 = XVECEXP (x5, 0, 1); if (x6 == const_int_rtx[MAX_SAVED_CONST_INT + (3)]) goto L9057; x1 = XEXP (x0, 0); goto L9442; L9057: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == V2SImode && GET_CODE (x3) == SIGN_EXTEND) goto L9058; x1 = XEXP (x0, 0); goto L9442; L9058: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == V2HImode && GET_CODE (x4) == VEC_SELECT) goto L9059; x1 = XEXP (x0, 0); goto L9442; L9059: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (rtx_equal_p (x5, operands[2])) goto L9060; x1 = XEXP (x0, 0); goto L9442; L9060: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 1); if (GET_CODE (x5) == PARALLEL && XVECLEN (x5, 0) == 2) goto L9061; x1 = XEXP (x0, 0); goto L9442; L9061: ATTRIBUTE_UNUSED_LABEL x6 = XVECEXP (x5, 0, 0); if (x6 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L9062; x1 = XEXP (x0, 0); goto L9442; L9062: ATTRIBUTE_UNUSED_LABEL x6 = XVECEXP (x5, 0, 1); if (x6 == const_int_rtx[MAX_SAVED_CONST_INT + (3)] && (TARGET_MMX)) { return 810; } x1 = XEXP (x0, 0); goto L9442; L8926: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V2SImode)) { operands[2] = x2; goto L8927; } x1 = XEXP (x0, 0); goto L9442; L8927: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 793; } x1 = XEXP (x0, 0); goto L9442; L8974: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V2SImode)) { operands[1] = x2; goto L8975; } x1 = XEXP (x0, 0); goto L9442; L8975: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V2SImode)) { operands[2] = x2; goto L8976; } x1 = XEXP (x0, 0); goto L9442; L8976: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 801; } x1 = XEXP (x0, 0); goto L9442; L9170: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V2SImode)) { operands[1] = x2; goto L9171; } x1 = XEXP (x0, 0); goto L9442; L9171: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V2SImode)) { operands[2] = x2; goto L9172; } x1 = XEXP (x0, 0); goto L9442; L9172: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 824; } x1 = XEXP (x0, 0); goto L9442; L9188: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V2SImode)) { operands[1] = x2; goto L9189; } x1 = XEXP (x0, 0); goto L9442; L9189: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V2SImode)) { operands[2] = x2; goto L9190; } x1 = XEXP (x0, 0); goto L9442; L9190: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 827; } x1 = XEXP (x0, 0); goto L9442; L9224: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V2SImode)) { operands[1] = x2; goto L9225; } x1 = XEXP (x0, 0); goto L9442; L9225: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonmemory_operand (x2, DImode)) { operands[2] = x2; goto L9226; } x1 = XEXP (x0, 0); goto L9442; L9226: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 833; } x1 = XEXP (x0, 0); goto L9442; L9236: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V2SImode)) { operands[1] = x2; goto L9237; } x1 = XEXP (x0, 0); goto L9442; L9237: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonmemory_operand (x2, DImode)) { operands[2] = x2; goto L9238; } x1 = XEXP (x0, 0); goto L9442; L9238: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 835; } x1 = XEXP (x0, 0); goto L9442; L9255: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V2SImode)) { operands[1] = x2; goto L9256; } x1 = XEXP (x0, 0); goto L9442; L9256: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonmemory_operand (x2, DImode)) { operands[2] = x2; goto L9257; } x1 = XEXP (x0, 0); goto L9442; L9257: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 838; } x1 = XEXP (x0, 0); goto L9442; L9336: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V2SImode) goto L13802; x1 = XEXP (x0, 0); goto L9442; L13802: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == VEC_SELECT) goto L9391; if (register_operand (x2, V2SImode)) { operands[1] = x2; goto L9337; } x1 = XEXP (x0, 0); goto L9442; L9391: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V2SImode)) { operands[1] = x3; goto L9392; } x1 = XEXP (x0, 0); goto L9442; L9392: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 2) goto L9393; x1 = XEXP (x0, 0); goto L9442; L9393: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L9394; x1 = XEXP (x0, 0); goto L9442; L9394: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L9395; x1 = XEXP (x0, 0); goto L9442; L9395: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, V2SImode)) { operands[2] = x2; goto L9396; } x1 = XEXP (x0, 0); goto L9442; L9396: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_MMX)) { return 848; } x1 = XEXP (x0, 0); goto L9442; L9337: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V2SImode && GET_CODE (x2) == VEC_SELECT) goto L9338; x1 = XEXP (x0, 0); goto L9442; L9338: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V2SImode)) { operands[2] = x3; goto L9339; } x1 = XEXP (x0, 0); goto L9442; L9339: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 2) goto L9340; x1 = XEXP (x0, 0); goto L9442; L9340: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L9341; x1 = XEXP (x0, 0); goto L9442; L9341: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L9342; x1 = XEXP (x0, 0); goto L9442; L9342: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_MMX)) { return 845; } x1 = XEXP (x0, 0); goto L9442; L13169: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, V2SFmode)) { operands[0] = x1; goto L8348; } L13183: ATTRIBUTE_UNUSED_LABEL if (push_operand (x1, V2SFmode)) { operands[0] = x1; goto L8404; } goto L9442; L8348: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (vector_move_operand (x1, V2SFmode)) { operands[1] = x1; goto L8349; } x1 = XEXP (x0, 0); goto L13183; L8349: ATTRIBUTE_UNUSED_LABEL if ((TARGET_3DNOW && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 704; } x1 = XEXP (x0, 0); goto L13183; L8404: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (register_operand (x1, V2SFmode)) { operands[1] = x1; goto L8405; } x1 = XEXP (x0, 0); goto L9442; L8405: ATTRIBUTE_UNUSED_LABEL if ((TARGET_3DNOW)) { return 718; } x1 = XEXP (x0, 0); goto L9442; L13170: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, V2DFmode)) { operands[0] = x1; goto L8352; } L13174: ATTRIBUTE_UNUSED_LABEL if (push_operand (x1, V2DFmode)) { operands[0] = x1; goto L8368; } if (register_operand (x1, V2DFmode)) { operands[0] = x1; goto L8643; } goto L9442; L8352: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (vector_move_operand (x1, V2DFmode)) { operands[1] = x1; goto L8353; } x1 = XEXP (x0, 0); goto L13174; L8353: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2 && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 705; } x1 = XEXP (x0, 0); goto L13174; L8368: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (register_operand (x1, V2DFmode)) { operands[1] = x1; goto L8369; } x1 = XEXP (x0, 0); goto L9442; L8369: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE)) { return 709; } x1 = XEXP (x0, 0); goto L9442; L8643: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == V2DFmode) goto L13803; x1 = XEXP (x0, 0); goto L9442; L13803: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case AND: goto L8644; case IOR: goto L8657; case XOR: goto L8663; case UNSPEC: goto L13807; default: break; } x1 = XEXP (x0, 0); goto L9442; L8644: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V2DFmode) goto L13809; x1 = XEXP (x0, 0); goto L9442; L13809: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == NOT) goto L8651; if (nonimmediate_operand (x2, V2DFmode)) { operands[1] = x2; goto L8645; } x1 = XEXP (x0, 0); goto L9442; L8651: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V2DFmode)) { operands[1] = x3; goto L8652; } x1 = XEXP (x0, 0); goto L9442; L8652: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V2DFmode)) { operands[2] = x2; goto L8653; } x1 = XEXP (x0, 0); goto L9442; L8653: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 757; } x1 = XEXP (x0, 0); goto L9442; L8645: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V2DFmode)) { operands[2] = x2; goto L8646; } x1 = XEXP (x0, 0); goto L9442; L8646: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2 && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 756; } x1 = XEXP (x0, 0); goto L9442; L8657: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, V2DFmode)) { operands[1] = x2; goto L8658; } x1 = XEXP (x0, 0); goto L9442; L8658: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V2DFmode)) { operands[2] = x2; goto L8659; } x1 = XEXP (x0, 0); goto L9442; L8659: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2 && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 758; } x1 = XEXP (x0, 0); goto L9442; L8663: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, V2DFmode)) { operands[1] = x2; goto L8664; } x1 = XEXP (x0, 0); goto L9442; L8664: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V2DFmode)) { operands[2] = x2; goto L8665; } x1 = XEXP (x0, 0); goto L9442; L8665: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2 && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 759; } x1 = XEXP (x0, 0); goto L9442; L13807: ATTRIBUTE_UNUSED_LABEL if (XVECLEN (x1, 0) == 1 && XINT (x1, 1) == 45) goto L8723; x1 = XEXP (x0, 0); goto L9442; L8723: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_SSE2)) { return 769; } x1 = XEXP (x0, 0); goto L9442; L13171: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, V8HImode)) { operands[0] = x1; goto L8356; } L13176: ATTRIBUTE_UNUSED_LABEL if (push_operand (x1, V8HImode)) { operands[0] = x1; goto L8376; } goto L9442; L8356: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (vector_move_operand (x1, V8HImode)) { operands[1] = x1; goto L8357; } x1 = XEXP (x0, 0); goto L13176; L8357: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2 && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 706; } x1 = XEXP (x0, 0); goto L13176; L8376: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (register_operand (x1, V8HImode)) { operands[1] = x1; goto L8377; } x1 = XEXP (x0, 0); goto L9442; L8377: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 711; } x1 = XEXP (x0, 0); goto L9442; L13172: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, V16QImode)) { operands[0] = x1; goto L8360; } L13177: ATTRIBUTE_UNUSED_LABEL if (push_operand (x1, V16QImode)) { operands[0] = x1; goto L8380; } goto L9442; L8360: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (nonimmediate_operand (x1, V16QImode)) { operands[1] = x1; goto L8361; } x1 = XEXP (x0, 0); goto L13177; L8361: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2 && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 707; } x1 = XEXP (x0, 0); goto L13177; L8380: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (register_operand (x1, V16QImode)) { operands[1] = x1; goto L8381; } x1 = XEXP (x0, 0); goto L9442; L8381: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 712; } x1 = XEXP (x0, 0); goto L9442; L13173: ATTRIBUTE_UNUSED_LABEL if (push_operand (x1, TImode)) { operands[0] = x1; goto L8364; } L13184: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, TImode)) { operands[0] = x1; goto L8408; } L13193: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, TImode)) { operands[0] = x1; goto L8668; } goto L9442; L8364: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (register_operand (x1, TImode)) { operands[1] = x1; goto L8365; } x1 = XEXP (x0, 0); goto L13184; L8365: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE)) { return 708; } x1 = XEXP (x0, 0); goto L13184; L8408: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (vector_move_operand (x1, TImode)) { operands[1] = x1; goto L8409; } L8412: ATTRIBUTE_UNUSED_LABEL if (general_operand (x1, TImode)) { operands[1] = x1; goto L8413; } x1 = XEXP (x0, 0); goto L13193; L8409: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE && !TARGET_64BIT && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 719; } x1 = XEXP (x0, 1); goto L8412; L8413: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 720; } x1 = XEXP (x0, 0); goto L13193; L8668: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == TImode) goto L13810; x1 = XEXP (x0, 0); goto L9442; L13810: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case AND: goto L8669; case IOR: goto L8695; case XOR: goto L8707; default: break; } x1 = XEXP (x0, 0); goto L9442; L8669: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == TImode) goto L13814; x1 = XEXP (x0, 0); goto L9442; L13814: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == NOT) goto L8682; if (nonimmediate_operand (x2, TImode)) { operands[1] = x2; goto L8670; } x1 = XEXP (x0, 0); goto L9442; L8682: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, TImode)) { operands[1] = x3; goto L8683; } x1 = XEXP (x0, 0); goto L9442; L8683: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, TImode)) { operands[2] = x2; goto L8684; } x1 = XEXP (x0, 0); goto L9442; L8684: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 762; } x1 = XEXP (x0, 0); goto L9442; L8670: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, TImode)) { operands[2] = x2; goto L8671; } x1 = XEXP (x0, 0); goto L9442; L8671: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2 && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 760; } x1 = XEXP (x0, 0); goto L9442; L8695: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, TImode)) { operands[1] = x2; goto L8696; } x1 = XEXP (x0, 0); goto L9442; L8696: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, TImode)) { operands[2] = x2; goto L8697; } x1 = XEXP (x0, 0); goto L9442; L8697: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2 && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 764; } x1 = XEXP (x0, 0); goto L9442; L8707: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, TImode)) { operands[1] = x2; goto L8708; } x1 = XEXP (x0, 0); goto L9442; L8708: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, TImode)) { operands[2] = x2; goto L8709; } x1 = XEXP (x0, 0); goto L9442; L8709: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2 && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 766; } x1 = XEXP (x0, 0); goto L9442; L13185: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, TFmode)) { operands[0] = x1; goto L8416; } goto L9442; L8416: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (general_operand (x1, TFmode)) { operands[1] = x1; goto L8417; } x1 = XEXP (x0, 0); goto L9442; L8417: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 721; } x1 = XEXP (x0, 0); goto L9442; L13186: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, SImode)) { operands[0] = x1; goto L8430; } L13202: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x1, SImode)) { operands[0] = x1; goto L9439; } goto L9442; L8430: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == SImode) goto L13815; x1 = XEXP (x0, 0); goto L13202; L13815: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case UNSPEC: goto L13818; case VEC_SELECT: goto L8885; case ZERO_EXTEND: goto L9144; default: break; } x1 = XEXP (x0, 0); goto L13202; L13818: ATTRIBUTE_UNUSED_LABEL if (XVECLEN (x1, 0) == 1 && XINT (x1, 1) == 33) goto L8431; x1 = XEXP (x0, 0); goto L13202; L8431: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); switch (GET_MODE (x2)) { case V4SFmode: goto L13819; case V8QImode: goto L13820; default: break; } x1 = XEXP (x0, 0); goto L13202; L13819: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, V4SFmode)) { operands[1] = x2; goto L8432; } x1 = XEXP (x0, 0); goto L13202; L8432: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE)) { return 724; } x1 = XEXP (x0, 0); goto L13202; L13820: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, V8QImode)) { operands[1] = x2; goto L8437; } x1 = XEXP (x0, 0); goto L13202; L8437: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE || TARGET_3DNOW_A)) { return 725; } x1 = XEXP (x0, 0); goto L13202; L8885: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V4SImode) goto L13821; x1 = XEXP (x0, 0); goto L13202; L13821: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case FIX: goto L8886; case UNSPEC: goto L13823; default: break; } x1 = XEXP (x0, 0); goto L13202; L8886: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, V4SFmode)) { operands[1] = x3; goto L8887; } x1 = XEXP (x0, 0); goto L13202; L8887: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == PARALLEL && XVECLEN (x2, 0) == 1) goto L8888; x1 = XEXP (x0, 0); goto L13202; L8888: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_SSE)) { return 787; } x1 = XEXP (x0, 0); goto L13202; L13823: ATTRIBUTE_UNUSED_LABEL if (XVECLEN (x2, 0) == 1 && XINT (x2, 1) == 30) goto L8900; x1 = XEXP (x0, 0); goto L13202; L8900: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (nonimmediate_operand (x3, V4SFmode)) { operands[1] = x3; goto L8901; } x1 = XEXP (x0, 0); goto L13202; L8901: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == PARALLEL && XVECLEN (x2, 0) == 1) goto L8902; x1 = XEXP (x0, 0); goto L13202; L8902: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_SSE)) { return 789; } x1 = XEXP (x0, 0); goto L13202; L9144: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == HImode && GET_CODE (x2) == VEC_SELECT) goto L9145; x1 = XEXP (x0, 0); goto L13202; L9145: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V4HImode)) { operands[1] = x3; goto L9146; } x1 = XEXP (x0, 0); goto L13202; L9146: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 1) goto L9147; x1 = XEXP (x0, 0); goto L13202; L9147: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (const_0_to_3_operand (x4, SImode)) { operands[2] = x4; goto L9148; } x1 = XEXP (x0, 0); goto L13202; L9148: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE || TARGET_3DNOW_A)) { return 820; } x1 = XEXP (x0, 0); goto L13202; L9439: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == SImode && GET_CODE (x1) == UNSPEC_VOLATILE && XVECLEN (x1, 0) == 1 && XINT (x1, 1) == 40) goto L9440; x1 = XEXP (x0, 0); goto L9442; L9440: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_SSE)) { return 851; } x1 = XEXP (x0, 0); goto L9442; L13189: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x1, DImode)) { operands[0] = x1; goto L8459; } L13199: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, DImode)) { operands[0] = x1; goto L8891; } goto L9442; L8459: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == DImode && GET_CODE (x1) == UNSPEC && XVECLEN (x1, 0) == 1 && XINT (x1, 1) == 34) goto L8460; x1 = XEXP (x0, 0); goto L13199; L8460: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, DImode)) { operands[1] = x2; goto L8461; } x1 = XEXP (x0, 0); goto L13199; L8461: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE || TARGET_3DNOW_A)) { return 729; } x1 = XEXP (x0, 0); goto L13199; L8891: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == DImode) goto L13824; x1 = XEXP (x0, 0); goto L9442; L13824: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case VEC_SELECT: goto L8892; case UNSPEC: goto L13827; default: break; } x1 = XEXP (x0, 0); goto L9442; L8892: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V4DImode) goto L13829; x1 = XEXP (x0, 0); goto L9442; L13829: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case FIX: goto L8893; case UNSPEC: goto L13831; default: break; } x1 = XEXP (x0, 0); goto L9442; L8893: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, V4SFmode)) { operands[1] = x3; goto L8894; } x1 = XEXP (x0, 0); goto L9442; L8894: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == PARALLEL && XVECLEN (x2, 0) == 1) goto L8895; x1 = XEXP (x0, 0); goto L9442; L8895: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_SSE)) { return 788; } x1 = XEXP (x0, 0); goto L9442; L13831: ATTRIBUTE_UNUSED_LABEL if (XVECLEN (x2, 0) == 1 && XINT (x2, 1) == 30) goto L8907; x1 = XEXP (x0, 0); goto L9442; L8907: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (nonimmediate_operand (x3, V4SFmode)) { operands[1] = x3; goto L8908; } x1 = XEXP (x0, 0); goto L9442; L8908: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == PARALLEL && XVECLEN (x2, 0) == 1) goto L8909; x1 = XEXP (x0, 0); goto L9442; L8909: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_SSE && TARGET_64BIT)) { return 790; } x1 = XEXP (x0, 0); goto L9442; L13827: ATTRIBUTE_UNUSED_LABEL switch (XVECLEN (x1, 0)) { case 1: goto L13832; case 2: goto L13833; default: break; } x1 = XEXP (x0, 0); goto L9442; L13832: ATTRIBUTE_UNUSED_LABEL if (XINT (x1, 1) == 45) goto L8931; x1 = XEXP (x0, 0); goto L9442; L8931: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (GET_MODE (x2) == DImode) goto L13834; L9080: ATTRIBUTE_UNUSED_LABEL if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_MMX)) { return 813; } x1 = XEXP (x0, 0); goto L9442; L13834: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case PLUS: goto L8932; case MINUS: goto L8981; case IOR: goto L9067; case XOR: goto L9074; case AND: goto L9085; case LSHIFTRT: goto L9243; case ASHIFT: goto L9262; default: break; } goto L9080; L8932: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[1] = x3; goto L8933; } goto L9080; L8933: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, DImode)) { operands[2] = x3; goto L8934; } goto L9080; L8934: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 794; } x1 = XEXP (x0, 1); x2 = XVECEXP (x1, 0, 0); goto L9080; L8981: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[1] = x3; goto L8982; } goto L9080; L8982: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, DImode)) { operands[2] = x3; goto L8983; } goto L9080; L8983: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 802; } x1 = XEXP (x0, 1); x2 = XVECEXP (x1, 0, 0); goto L9080; L9067: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[1] = x3; goto L9068; } goto L9080; L9068: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, DImode)) { operands[2] = x3; goto L9069; } goto L9080; L9069: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 811; } x1 = XEXP (x0, 1); x2 = XVECEXP (x1, 0, 0); goto L9080; L9074: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[1] = x3; goto L9075; } goto L9080; L9075: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, DImode)) { operands[2] = x3; goto L9076; } goto L9080; L9076: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 812; } x1 = XEXP (x0, 1); x2 = XVECEXP (x1, 0, 0); goto L9080; L9085: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode) goto L13842; goto L9080; L13842: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x3) == NOT) goto L9093; if (register_operand (x3, DImode)) { operands[1] = x3; goto L9086; } goto L9080; L9093: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, DImode)) { operands[1] = x4; goto L9094; } goto L9080; L9094: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, DImode)) { operands[2] = x3; goto L9095; } goto L9080; L9095: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 815; } x1 = XEXP (x0, 1); x2 = XVECEXP (x1, 0, 0); goto L9080; L9086: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, DImode)) { operands[2] = x3; goto L9087; } goto L9080; L9087: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 814; } x1 = XEXP (x0, 1); x2 = XVECEXP (x1, 0, 0); goto L9080; L9243: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[1] = x3; goto L9244; } goto L9080; L9244: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonmemory_operand (x3, DImode)) { operands[2] = x3; goto L9245; } goto L9080; L9245: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 836; } x1 = XEXP (x0, 1); x2 = XVECEXP (x1, 0, 0); goto L9080; L9262: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[1] = x3; goto L9263; } goto L9080; L9263: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonmemory_operand (x3, DImode)) { operands[2] = x3; goto L9264; } goto L9080; L9264: ATTRIBUTE_UNUSED_LABEL if ((TARGET_MMX)) { return 839; } x1 = XEXP (x0, 1); x2 = XVECEXP (x1, 0, 0); goto L9080; L13833: ATTRIBUTE_UNUSED_LABEL if (XINT (x1, 1) == 61) goto L9129; x1 = XEXP (x0, 0); goto L9442; L9129: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, V8QImode)) { operands[1] = x2; goto L9130; } x1 = XEXP (x0, 0); goto L9442; L9130: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 1); if (nonimmediate_operand (x2, V8QImode)) { operands[2] = x2; goto L9131; } x1 = XEXP (x0, 0); goto L9442; L9131: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE || TARGET_3DNOW_A)) { return 818; } x1 = XEXP (x0, 0); goto L9442; L13191: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x1, SFmode)) { operands[0] = x1; goto L8513; } goto L9442; L8513: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == SFmode && GET_CODE (x1) == VEC_SELECT) goto L8514; x1 = XEXP (x0, 0); goto L9442; L8514: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V4SFmode)) { operands[1] = x2; goto L8515; } x1 = XEXP (x0, 0); goto L9442; L8515: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == PARALLEL && XVECLEN (x2, 0) == 1) goto L8516; x1 = XEXP (x0, 0); goto L9442; L8516: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_SSE)) { return 736; } x1 = XEXP (x0, 0); goto L9442; L13196: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x1) == REG && XINT (x1, 0) == 17) goto L8758; goto L9442; L8758: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == CCFPmode && GET_CODE (x1) == COMPARE) goto L8759; x1 = XEXP (x0, 0); goto L9442; L8759: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SFmode && GET_CODE (x2) == VEC_SELECT) goto L8760; x1 = XEXP (x0, 0); goto L9442; L8760: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V4SFmode)) { operands[0] = x3; goto L8761; } x1 = XEXP (x0, 0); goto L9442; L8761: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 1) goto L8762; x1 = XEXP (x0, 0); goto L9442; L8762: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L8763; x1 = XEXP (x0, 0); goto L9442; L8763: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SFmode && GET_CODE (x2) == VEC_SELECT) goto L8764; x1 = XEXP (x0, 0); goto L9442; L8764: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V4SFmode)) { operands[1] = x3; goto L8765; } x1 = XEXP (x0, 0); goto L9442; L8765: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 1) goto L8766; x1 = XEXP (x0, 0); goto L9442; L8766: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_SSE)) { return 774; } x1 = XEXP (x0, 0); goto L9442; L13197: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x1) == REG && XINT (x1, 0) == 17) goto L8769; goto L9442; L8769: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == CCFPUmode && GET_CODE (x1) == COMPARE) goto L8770; x1 = XEXP (x0, 0); goto L9442; L8770: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SFmode && GET_CODE (x2) == VEC_SELECT) goto L8771; x1 = XEXP (x0, 0); goto L9442; L8771: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V4SFmode)) { operands[0] = x3; goto L8772; } x1 = XEXP (x0, 0); goto L9442; L8772: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 1) goto L8773; x1 = XEXP (x0, 0); goto L9442; L8773: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L8774; x1 = XEXP (x0, 0); goto L9442; L8774: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SFmode && GET_CODE (x2) == VEC_SELECT) goto L8775; x1 = XEXP (x0, 0); goto L9442; L8775: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V4SFmode)) { operands[1] = x3; goto L8776; } x1 = XEXP (x0, 0); goto L9442; L8776: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 1) goto L8777; x1 = XEXP (x0, 0); goto L9442; L8777: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_SSE)) { return 775; } x1 = XEXP (x0, 0); goto L9442; L9443: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == BLKmode && GET_CODE (x1) == UNSPEC && XVECLEN (x1, 0) == 1 && XINT (x1, 1) == 44) goto L9444; x1 = XEXP (x0, 0); goto L9469; L9444: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (rtx_equal_p (x2, operands[0]) && (TARGET_SSE || TARGET_3DNOW_A)) { return 852; } x1 = XEXP (x0, 0); goto L9469; L13203: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, V2SFmode)) { operands[0] = x1; goto L9470; } goto L10855; L9470: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == V2SFmode) goto L13843; x1 = XEXP (x0, 0); goto L10855; L13843: ATTRIBUTE_UNUSED_LABEL tem = recog_14 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; x1 = XEXP (x0, 0); goto L10855; L13204: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, V2SImode)) { operands[0] = x1; goto L9488; } goto L10855; L9488: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == V2SImode) goto L13874; x1 = XEXP (x0, 0); goto L10855; L13874: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case GT: goto L9489; case GE: goto L9495; case EQ: goto L9501; case FIX: goto L9562; case SIGN_EXTEND: goto L9567; case VEC_SELECT: goto L9708; case UNSPEC: goto L13881; default: break; } x1 = XEXP (x0, 0); goto L10855; L9489: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V2SFmode)) { operands[1] = x2; goto L9490; } x1 = XEXP (x0, 0); goto L10855; L9490: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V2SFmode)) { operands[2] = x2; goto L9491; } x1 = XEXP (x0, 0); goto L10855; L9491: ATTRIBUTE_UNUSED_LABEL if ((TARGET_3DNOW)) { return 857; } x1 = XEXP (x0, 0); goto L10855; L9495: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V2SFmode)) { operands[1] = x2; goto L9496; } x1 = XEXP (x0, 0); goto L10855; L9496: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V2SFmode)) { operands[2] = x2; goto L9497; } x1 = XEXP (x0, 0); goto L10855; L9497: ATTRIBUTE_UNUSED_LABEL if ((TARGET_3DNOW)) { return 858; } x1 = XEXP (x0, 0); goto L10855; L9501: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V2SFmode)) { operands[1] = x2; goto L9502; } x1 = XEXP (x0, 0); goto L10855; L9502: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V2SFmode)) { operands[2] = x2; goto L9503; } x1 = XEXP (x0, 0); goto L10855; L9503: ATTRIBUTE_UNUSED_LABEL if ((TARGET_3DNOW)) { return 859; } x1 = XEXP (x0, 0); goto L10855; L9562: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case V2SFmode: goto L13882; case V2DFmode: goto L13883; default: break; } x1 = XEXP (x0, 0); goto L10855; L13882: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, V2SFmode)) { operands[1] = x2; goto L9563; } x1 = XEXP (x0, 0); goto L10855; L9563: ATTRIBUTE_UNUSED_LABEL if ((TARGET_3DNOW)) { return 864; } x1 = XEXP (x0, 0); goto L10855; L13883: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, V2DFmode)) { operands[1] = x2; goto L9969; } x1 = XEXP (x0, 0); goto L10855; L9969: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 917; } x1 = XEXP (x0, 0); goto L10855; L9567: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V2HImode && GET_CODE (x2) == SS_TRUNCATE) goto L9568; x1 = XEXP (x0, 0); goto L10855; L9568: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == V2SImode && GET_CODE (x3) == FIX) goto L9569; x1 = XEXP (x0, 0); goto L10855; L9569: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, V2SFmode)) { operands[1] = x4; goto L9570; } x1 = XEXP (x0, 0); goto L10855; L9570: ATTRIBUTE_UNUSED_LABEL if ((TARGET_3DNOW_A)) { return 865; } x1 = XEXP (x0, 0); goto L10855; L9708: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, V2SImode)) { operands[1] = x2; goto L9709; } x1 = XEXP (x0, 0); goto L10855; L9709: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == PARALLEL && XVECLEN (x2, 0) == 2) goto L9710; x1 = XEXP (x0, 0); goto L10855; L9710: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L9711; x1 = XEXP (x0, 0); goto L10855; L9711: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_3DNOW_A)) { return 878; } x1 = XEXP (x0, 0); goto L10855; L13881: ATTRIBUTE_UNUSED_LABEL if (XVECLEN (x1, 0) == 1 && XINT (x1, 1) == 30) goto L9973; x1 = XEXP (x0, 0); goto L10855; L9973: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (nonimmediate_operand (x2, V2DFmode)) { operands[1] = x2; goto L9974; } x1 = XEXP (x0, 0); goto L10855; L9974: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 918; } x1 = XEXP (x0, 0); goto L10855; L13205: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, V8QImode)) { operands[0] = x1; goto L9657; } goto L10855; L9657: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == V8QImode && GET_CODE (x1) == UNSPEC && XVECLEN (x1, 0) == 2 && XINT (x1, 1) == 49) goto L9658; x1 = XEXP (x0, 0); goto L10855; L9658: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, V8QImode)) { operands[1] = x2; goto L9659; } x1 = XEXP (x0, 0); goto L10855; L9659: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 1); if (nonimmediate_operand (x2, V8QImode)) { operands[2] = x2; goto L9660; } x1 = XEXP (x0, 0); goto L10855; L9660: ATTRIBUTE_UNUSED_LABEL if ((TARGET_3DNOW)) { return 871; } x1 = XEXP (x0, 0); goto L10855; L13206: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, V4HImode)) { operands[0] = x1; goto L9691; } goto L10855; L9691: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == V4HImode && GET_CODE (x1) == TRUNCATE) goto L9692; x1 = XEXP (x0, 0); goto L10855; L9692: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V4SImode && GET_CODE (x2) == LSHIFTRT) goto L9693; x1 = XEXP (x0, 0); goto L10855; L9693: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == V4SImode && GET_CODE (x3) == PLUS) goto L9694; x1 = XEXP (x0, 0); goto L10855; L9694: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == V4SImode && GET_CODE (x4) == MULT) goto L9695; x1 = XEXP (x0, 0); goto L10855; L9695: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (GET_MODE (x5) == V4SImode && GET_CODE (x5) == SIGN_EXTEND) goto L9696; x1 = XEXP (x0, 0); goto L10855; L9696: ATTRIBUTE_UNUSED_LABEL x6 = XEXP (x5, 0); if (register_operand (x6, V4HImode)) { operands[1] = x6; goto L9697; } x1 = XEXP (x0, 0); goto L10855; L9697: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 1); if (GET_MODE (x5) == V4SImode && GET_CODE (x5) == SIGN_EXTEND) goto L9698; x1 = XEXP (x0, 0); goto L10855; L9698: ATTRIBUTE_UNUSED_LABEL x6 = XEXP (x5, 0); if (nonimmediate_operand (x6, V4HImode)) { operands[2] = x6; goto L9699; } x1 = XEXP (x0, 0); goto L10855; L9699: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_MODE (x4) == V4SImode && GET_CODE (x4) == CONST_VECTOR && XVECLEN (x4, 0) == 4) goto L9700; x1 = XEXP (x0, 0); goto L10855; L9700: ATTRIBUTE_UNUSED_LABEL x5 = XVECEXP (x4, 0, 0); if (GET_CODE (x5) == CONST_INT && XWINT (x5, 0) == 32768L) goto L9701; x1 = XEXP (x0, 0); goto L10855; L9701: ATTRIBUTE_UNUSED_LABEL x5 = XVECEXP (x4, 0, 1); if (GET_CODE (x5) == CONST_INT && XWINT (x5, 0) == 32768L) goto L9702; x1 = XEXP (x0, 0); goto L10855; L9702: ATTRIBUTE_UNUSED_LABEL x5 = XVECEXP (x4, 0, 2); if (GET_CODE (x5) == CONST_INT && XWINT (x5, 0) == 32768L) goto L9703; x1 = XEXP (x0, 0); goto L10855; L9703: ATTRIBUTE_UNUSED_LABEL x5 = XVECEXP (x4, 0, 3); if (GET_CODE (x5) == CONST_INT && XWINT (x5, 0) == 32768L) goto L9704; x1 = XEXP (x0, 0); goto L10855; L9704: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (16)] && (TARGET_3DNOW)) { return 877; } x1 = XEXP (x0, 0); goto L10855; L13207: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, V2DFmode)) { operands[0] = x1; goto L9739; } L13213: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x1, V2DFmode)) { operands[0] = x1; goto L9913; } L13222: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, V2DFmode)) { operands[0] = x1; goto L10754; } goto L10855; L9739: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == V2DFmode) goto L13884; x1 = XEXP (x0, 0); goto L13213; L13884: ATTRIBUTE_UNUSED_LABEL tem = recog_15 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; x1 = XEXP (x0, 0); goto L13213; L9913: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == V2DFmode && GET_CODE (x1) == UNSPEC && XVECLEN (x1, 0) == 1 && XINT (x1, 1) == 34) goto L9914; x1 = XEXP (x0, 0); goto L13222; L9914: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, V2DFmode)) { operands[1] = x2; goto L9915; } x1 = XEXP (x0, 0); goto L13222; L9915: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 908; } x1 = XEXP (x0, 0); goto L13222; L10754: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == V2DFmode) goto L13914; x1 = XEXP (x0, 0); goto L10855; L13914: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case UNSPEC: goto L13917; case VEC_MERGE: goto L10822; default: break; } x1 = XEXP (x0, 0); goto L10855; L13917: ATTRIBUTE_UNUSED_LABEL if (XVECLEN (x1, 0) == 1) goto L13919; x1 = XEXP (x0, 0); goto L10855; L13919: ATTRIBUTE_UNUSED_LABEL switch (XINT (x1, 1)) { case 38L: goto L10755; case 39L: goto L10760; default: break; } x1 = XEXP (x0, 0); goto L10855; L10755: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (nonimmediate_operand (x2, V2DFmode)) { operands[1] = x2; goto L10756; } x1 = XEXP (x0, 0); goto L10855; L10756: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2 && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 1002; } x1 = XEXP (x0, 0); goto L10855; L10760: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (nonimmediate_operand (x2, V2DFmode)) { operands[1] = x2; goto L10761; } x1 = XEXP (x0, 0); goto L10855; L10761: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2 && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 1003; } x1 = XEXP (x0, 0); goto L10855; L10822: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, V2DFmode)) { operands[1] = x2; goto L10823; } x1 = XEXP (x0, 0); goto L10855; L10823: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V2DFmode)) { operands[2] = x2; goto L10824; } x1 = XEXP (x0, 0); goto L10855; L10824: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (GET_CODE (x2) == CONST_INT) goto L13921; x1 = XEXP (x0, 0); goto L10855; L13921: ATTRIBUTE_UNUSED_LABEL if ((int) XWINT (x2, 0) == XWINT (x2, 0)) switch ((int) XWINT (x2, 0)) { case 2L: goto L13923; case 1L: goto L13924; default: break; } x1 = XEXP (x0, 0); goto L10855; L13923: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2 && (GET_CODE (operands[1]) == MEM || GET_CODE (operands[2]) == MEM))) { return 1013; } x1 = XEXP (x0, 0); goto L10855; L13924: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2 && ix86_binary_operator_ok (UNKNOWN, V2DFmode, operands))) { return 1015; } x1 = XEXP (x0, 0); goto L10855; L13208: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, V2DImode)) { operands[0] = x1; goto L9835; } L13214: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x1, V2DImode)) { operands[0] = x1; goto L9918; } goto L10855; L9835: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == V2DImode) goto L13926; x1 = XEXP (x0, 0); goto L13214; L13926: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case NOT: goto L9842; case VEC_MERGE: goto L9849; case PLUS: goto L10082; case MINUS: goto L10130; case MULT: goto L10199; case UNSPEC: goto L13935; case LSHIFTRT: goto L10421; case ASHIFT: goto L10439; case VEC_CONCAT: goto L10787; case EQ: case LT: case LE: case UNORDERED: case NE: case UNGE: case UNGT: case ORDERED: case UNEQ: case UNLT: case UNLE: case LTGT: case GE: case GT: goto L13925; default: x1 = XEXP (x0, 0); goto L13214; } L13925: ATTRIBUTE_UNUSED_LABEL if (sse_comparison_operator (x1, V2DImode)) { operands[3] = x1; goto L9836; } x1 = XEXP (x0, 0); goto L13214; L9842: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (sse_comparison_operator (x2, V2DImode)) { operands[3] = x2; goto L9843; } x1 = XEXP (x0, 0); goto L13214; L9843: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V2DFmode)) { operands[1] = x3; goto L9844; } x1 = XEXP (x0, 0); goto L13214; L9844: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, V2DFmode)) { operands[2] = x3; goto L9845; } x1 = XEXP (x0, 0); goto L13214; L9845: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 899; } x1 = XEXP (x0, 0); goto L13214; L9849: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V2DImode) goto L13937; x1 = XEXP (x0, 0); goto L13214; L13937: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case NOT: goto L9859; case VEC_SELECT: goto L10736; case EQ: case LT: case LE: case UNORDERED: case NE: case UNGE: case UNGT: case ORDERED: case UNEQ: case UNLT: case UNLE: case LTGT: case GE: case GT: goto L13936; case SUBREG: case REG: goto L13939; default: x1 = XEXP (x0, 0); goto L13214; } L13936: ATTRIBUTE_UNUSED_LABEL if (sse_comparison_operator (x2, V2DImode)) { operands[3] = x2; goto L9850; } L13939: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, V2DImode)) { operands[1] = x2; goto L10746; } x1 = XEXP (x0, 0); goto L13214; L9859: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (sse_comparison_operator (x3, V2DImode)) { operands[3] = x3; goto L9860; } x1 = XEXP (x0, 0); goto L13214; L9860: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, V2DFmode)) { operands[1] = x4; goto L9861; } x1 = XEXP (x0, 0); goto L13214; L9861: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonimmediate_operand (x4, V2DFmode)) { operands[2] = x4; goto L9862; } x1 = XEXP (x0, 0); goto L13214; L9862: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V2DImode && GET_CODE (x2) == SUBREG && XINT (x2, 1) == 0) goto L9863; x1 = XEXP (x0, 0); goto L13214; L9863: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L9864; x1 = XEXP (x0, 0); goto L13214; L9864: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE2)) { return 901; } x1 = XEXP (x0, 0); goto L13214; L10736: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V2DImode)) { operands[2] = x3; goto L10737; } x1 = XEXP (x0, 0); goto L13214; L10737: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 2) goto L10738; x1 = XEXP (x0, 0); goto L13214; L10738: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10739; x1 = XEXP (x0, 0); goto L13214; L10739: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L10740; x1 = XEXP (x0, 0); goto L13214; L10740: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, V2DImode)) { operands[1] = x2; goto L10741; } x1 = XEXP (x0, 0); goto L13214; L10741: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE2)) { return 1000; } x1 = XEXP (x0, 0); goto L13214; L9850: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V2DFmode)) { operands[1] = x3; goto L9851; } x1 = XEXP (x0, 0); goto L13214; L9851: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, V2DFmode)) { operands[2] = x3; goto L9852; } x1 = XEXP (x0, 0); goto L13214; L9852: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V2DImode && GET_CODE (x2) == SUBREG && XINT (x2, 1) == 0) goto L9853; x1 = XEXP (x0, 0); goto L13214; L9853: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L9854; x1 = XEXP (x0, 0); goto L13214; L9854: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE2)) { return 900; } x1 = XEXP (x0, 0); goto L13214; L10746: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V2DImode && GET_CODE (x2) == VEC_SELECT) goto L10747; x1 = XEXP (x0, 0); goto L13214; L10747: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V2DImode)) { operands[2] = x3; goto L10748; } x1 = XEXP (x0, 0); goto L13214; L10748: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 2) goto L10749; x1 = XEXP (x0, 0); goto L13214; L10749: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L10750; x1 = XEXP (x0, 0); goto L13214; L10750: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L10751; x1 = XEXP (x0, 0); goto L13214; L10751: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (1)] && (TARGET_SSE2)) { return 1001; } x1 = XEXP (x0, 0); goto L13214; L10082: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V2DImode)) { operands[1] = x2; goto L10083; } x1 = XEXP (x0, 0); goto L13214; L10083: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V2DImode)) { operands[2] = x2; goto L10084; } x1 = XEXP (x0, 0); goto L13214; L10084: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 933; } x1 = XEXP (x0, 0); goto L13214; L10130: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V2DImode)) { operands[1] = x2; goto L10131; } x1 = XEXP (x0, 0); goto L13214; L10131: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V2DImode)) { operands[2] = x2; goto L10132; } x1 = XEXP (x0, 0); goto L13214; L10132: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 941; } x1 = XEXP (x0, 0); goto L13214; L10199: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V2DImode && GET_CODE (x2) == ZERO_EXTEND) goto L10200; x1 = XEXP (x0, 0); goto L13214; L10200: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == V2SImode && GET_CODE (x3) == VEC_SELECT) goto L10201; x1 = XEXP (x0, 0); goto L13214; L10201: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, V4SImode)) { operands[1] = x4; goto L10202; } x1 = XEXP (x0, 0); goto L13214; L10202: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_CODE (x4) == PARALLEL && XVECLEN (x4, 0) == 2) goto L10203; x1 = XEXP (x0, 0); goto L13214; L10203: ATTRIBUTE_UNUSED_LABEL x5 = XVECEXP (x4, 0, 0); if (x5 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L10204; x1 = XEXP (x0, 0); goto L13214; L10204: ATTRIBUTE_UNUSED_LABEL x5 = XVECEXP (x4, 0, 1); if (x5 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L10205; x1 = XEXP (x0, 0); goto L13214; L10205: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V2DImode && GET_CODE (x2) == ZERO_EXTEND) goto L10206; x1 = XEXP (x0, 0); goto L13214; L10206: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == V2SImode && GET_CODE (x3) == VEC_SELECT) goto L10207; x1 = XEXP (x0, 0); goto L13214; L10207: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, V4SImode)) { operands[2] = x4; goto L10208; } x1 = XEXP (x0, 0); goto L13214; L10208: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_CODE (x4) == PARALLEL && XVECLEN (x4, 0) == 2) goto L10209; x1 = XEXP (x0, 0); goto L13214; L10209: ATTRIBUTE_UNUSED_LABEL x5 = XVECEXP (x4, 0, 0); if (x5 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L10210; x1 = XEXP (x0, 0); goto L13214; L10210: ATTRIBUTE_UNUSED_LABEL x5 = XVECEXP (x4, 0, 1); if (x5 == const_int_rtx[MAX_SAVED_CONST_INT + (2)] && (TARGET_SSE2)) { return 950; } x1 = XEXP (x0, 0); goto L13214; L13935: ATTRIBUTE_UNUSED_LABEL if (XVECLEN (x1, 0) == 2 && XINT (x1, 1) == 61) goto L10296; x1 = XEXP (x0, 0); goto L13214; L10296: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, V16QImode)) { operands[1] = x2; goto L10297; } x1 = XEXP (x0, 0); goto L13214; L10297: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 1); if (nonimmediate_operand (x2, V16QImode)) { operands[2] = x2; goto L10298; } x1 = XEXP (x0, 0); goto L13214; L10298: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 955; } x1 = XEXP (x0, 0); goto L13214; L10421: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V2DImode)) { operands[1] = x2; goto L10422; } x1 = XEXP (x0, 0); goto L13214; L10422: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonmemory_operand (x2, SImode)) { operands[2] = x2; goto L10423; } L10474: ATTRIBUTE_UNUSED_LABEL if (GET_MODE (x2) == SImode && GET_CODE (x2) == SUBREG && XINT (x2, 1) == 0) goto L10475; x1 = XEXP (x0, 0); goto L13214; L10423: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 975; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L10474; L10475: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonmemory_operand (x3, V2DImode)) { operands[2] = x3; goto L10476; } x1 = XEXP (x0, 0); goto L13214; L10476: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 983; } x1 = XEXP (x0, 0); goto L13214; L10439: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V2DImode)) { operands[1] = x2; goto L10440; } x1 = XEXP (x0, 0); goto L13214; L10440: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonmemory_operand (x2, SImode)) { operands[2] = x2; goto L10441; } L10495: ATTRIBUTE_UNUSED_LABEL if (GET_MODE (x2) == SImode && GET_CODE (x2) == SUBREG && XINT (x2, 1) == 0) goto L10496; x1 = XEXP (x0, 0); goto L13214; L10441: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 978; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L10495; L10496: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonmemory_operand (x3, V2DImode)) { operands[2] = x3; goto L10497; } x1 = XEXP (x0, 0); goto L13214; L10497: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 986; } x1 = XEXP (x0, 0); goto L13214; L10787: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode) goto L13941; x1 = XEXP (x0, 0); goto L13214; L13941: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == VEC_SELECT) goto L10798; if (nonimmediate_operand (x2, DImode)) { operands[1] = x2; goto L10788; } x1 = XEXP (x0, 0); goto L13214; L10798: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, V2DImode)) { operands[1] = x3; goto L10799; } x1 = XEXP (x0, 0); goto L13214; L10799: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 1) goto L10800; x1 = XEXP (x0, 0); goto L13214; L10800: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L10801; x1 = XEXP (x0, 0); goto L13214; L10801: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_SSE2)) { return 1010; } x1 = XEXP (x0, 0); goto L13214; L10788: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == CONST_INT) goto L13942; x1 = XEXP (x0, 0); goto L13214; L13942: ATTRIBUTE_UNUSED_LABEL if (XWINT (x2, 0) == 0L) goto L13944; x1 = XEXP (x0, 0); goto L13214; L13944: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2 && !TARGET_64BIT)) { return 1008; } L13945: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2 && TARGET_64BIT)) { return 1009; } x1 = XEXP (x0, 0); goto L13214; L9836: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V2DFmode)) { operands[1] = x2; goto L9837; } x1 = XEXP (x0, 0); goto L13214; L9837: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, V2DFmode)) { operands[2] = x2; goto L9838; } x1 = XEXP (x0, 0); goto L13214; L9838: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 898; } x1 = XEXP (x0, 0); goto L13214; L9918: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == V2DImode && GET_CODE (x1) == UNSPEC && XVECLEN (x1, 0) == 1 && XINT (x1, 1) == 34) goto L9919; x1 = XEXP (x0, 0); goto L10855; L9919: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, V2DImode)) { operands[1] = x2; goto L9920; } x1 = XEXP (x0, 0); goto L10855; L9920: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 909; } x1 = XEXP (x0, 0); goto L10855; L13209: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x1) == REG && XINT (x1, 0) == 17) goto L9867; goto L10855; L9867: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == CCFPmode && GET_CODE (x1) == COMPARE) goto L9868; x1 = XEXP (x0, 0); goto L10855; L9868: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DFmode && GET_CODE (x2) == VEC_SELECT) goto L9869; x1 = XEXP (x0, 0); goto L10855; L9869: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V2DFmode)) { operands[0] = x3; goto L9870; } x1 = XEXP (x0, 0); goto L10855; L9870: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 1) goto L9871; x1 = XEXP (x0, 0); goto L10855; L9871: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L9872; x1 = XEXP (x0, 0); goto L10855; L9872: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DFmode && GET_CODE (x2) == VEC_SELECT) goto L9873; x1 = XEXP (x0, 0); goto L10855; L9873: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V2DFmode)) { operands[1] = x3; goto L9874; } x1 = XEXP (x0, 0); goto L10855; L9874: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 1) goto L9875; x1 = XEXP (x0, 0); goto L10855; L9875: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_SSE2)) { return 902; } x1 = XEXP (x0, 0); goto L10855; L13210: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x1) == REG && XINT (x1, 0) == 17) goto L9878; goto L10855; L9878: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == CCFPUmode && GET_CODE (x1) == COMPARE) goto L9879; x1 = XEXP (x0, 0); goto L10855; L9879: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DFmode && GET_CODE (x2) == VEC_SELECT) goto L9880; x1 = XEXP (x0, 0); goto L10855; L9880: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V2DFmode)) { operands[0] = x3; goto L9881; } x1 = XEXP (x0, 0); goto L10855; L9881: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 1) goto L9882; x1 = XEXP (x0, 0); goto L10855; L9882: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L9883; x1 = XEXP (x0, 0); goto L10855; L9883: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DFmode && GET_CODE (x2) == VEC_SELECT) goto L9884; x1 = XEXP (x0, 0); goto L10855; L9884: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V2DFmode)) { operands[1] = x3; goto L9885; } x1 = XEXP (x0, 0); goto L10855; L9885: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 1) goto L9886; x1 = XEXP (x0, 0); goto L10855; L9886: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_SSE2)) { return 903; } x1 = XEXP (x0, 0); goto L10855; L13211: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, SImode)) { operands[0] = x1; goto L9889; } L13215: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x1, SImode)) { operands[0] = x1; goto L9923; } L13225: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, SImode)) { operands[0] = x1; goto L10815; } goto L10855; L9889: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == SImode) goto L13946; x1 = XEXP (x0, 0); goto L13215; L13946: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case UNSPEC: goto L13950; case FIX: goto L9983; case ZERO_EXTEND: goto L10311; default: break; } x1 = XEXP (x0, 0); goto L13215; L13950: ATTRIBUTE_UNUSED_LABEL if (XVECLEN (x1, 0) == 1) goto L13952; x1 = XEXP (x0, 0); goto L13215; L13952: ATTRIBUTE_UNUSED_LABEL switch (XINT (x1, 1)) { case 33L: goto L9890; case 30L: goto L9997; default: break; } x1 = XEXP (x0, 0); goto L13215; L9890: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); switch (GET_MODE (x2)) { case V2DFmode: goto L13954; case V16QImode: goto L13955; default: break; } x1 = XEXP (x0, 0); goto L13215; L13954: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, V2DFmode)) { operands[1] = x2; goto L9891; } x1 = XEXP (x0, 0); goto L13215; L9891: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 904; } x1 = XEXP (x0, 0); goto L13215; L13955: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, V16QImode)) { operands[1] = x2; goto L9896; } x1 = XEXP (x0, 0); goto L13215; L9896: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 905; } x1 = XEXP (x0, 0); goto L13215; L9997: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (GET_MODE (x2) == DFmode && GET_CODE (x2) == VEC_SELECT) goto L9998; x1 = XEXP (x0, 0); goto L13215; L9998: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V2DFmode)) { operands[1] = x3; goto L9999; } x1 = XEXP (x0, 0); goto L13215; L9999: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 1) goto L10000; x1 = XEXP (x0, 0); goto L13215; L10000: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_SSE2)) { return 922; } x1 = XEXP (x0, 0); goto L13215; L9983: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DFmode && GET_CODE (x2) == VEC_SELECT) goto L9984; x1 = XEXP (x0, 0); goto L13215; L9984: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V2DFmode)) { operands[1] = x3; goto L9985; } x1 = XEXP (x0, 0); goto L13215; L9985: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 1) goto L9986; x1 = XEXP (x0, 0); goto L13215; L9986: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_SSE2)) { return 920; } x1 = XEXP (x0, 0); goto L13215; L10311: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == HImode && GET_CODE (x2) == VEC_SELECT) goto L10312; x1 = XEXP (x0, 0); goto L13215; L10312: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V8HImode)) { operands[1] = x3; goto L10313; } x1 = XEXP (x0, 0); goto L13215; L10313: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 1) goto L10314; x1 = XEXP (x0, 0); goto L13215; L10314: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (const_0_to_7_operand (x4, SImode)) { operands[2] = x4; goto L10315; } x1 = XEXP (x0, 0); goto L13215; L10315: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 957; } x1 = XEXP (x0, 0); goto L13215; L9923: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == SImode && GET_CODE (x1) == UNSPEC && XVECLEN (x1, 0) == 1 && XINT (x1, 1) == 34) goto L9924; x1 = XEXP (x0, 0); goto L13225; L9924: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, SImode)) { operands[1] = x2; goto L9925; } x1 = XEXP (x0, 0); goto L13225; L9925: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 910; } x1 = XEXP (x0, 0); goto L13225; L10815: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == SImode && GET_CODE (x1) == VEC_SELECT) goto L10816; x1 = XEXP (x0, 0); goto L10855; L10816: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V4SImode)) { operands[1] = x2; goto L10817; } x1 = XEXP (x0, 0); goto L10855; L10817: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == PARALLEL && XVECLEN (x2, 0) == 1) goto L10818; x1 = XEXP (x0, 0); goto L10855; L10818: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_SSE2)) { return 1012; } x1 = XEXP (x0, 0); goto L10855; L13212: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x1) == MEM) goto L9899; if (register_operand (x1, V16QImode)) { operands[0] = x1; goto L10063; } L13223: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, V16QImode)) { operands[0] = x1; goto L10764; } goto L10855; L9899: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case SImode: goto L13956; case DImode: goto L13957; default: break; } goto L13223; L13956: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, SImode)) { operands[0] = x2; goto L9900; } goto L13223; L9900: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == V16QImode && GET_CODE (x1) == UNSPEC && XVECLEN (x1, 0) == 2 && XINT (x1, 1) == 32) goto L9901; x1 = XEXP (x0, 0); goto L13223; L9901: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, V16QImode)) { operands[1] = x2; goto L9902; } x1 = XEXP (x0, 0); goto L13223; L9902: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 1); if (register_operand (x2, V16QImode)) { operands[2] = x2; goto L9903; } x1 = XEXP (x0, 0); goto L13223; L9903: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 906; } x1 = XEXP (x0, 0); goto L13223; L13957: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, DImode)) { operands[0] = x2; goto L9907; } goto L13223; L9907: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == V16QImode && GET_CODE (x1) == UNSPEC && XVECLEN (x1, 0) == 2 && XINT (x1, 1) == 32) goto L9908; x1 = XEXP (x0, 0); goto L13223; L9908: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, V16QImode)) { operands[1] = x2; goto L9909; } x1 = XEXP (x0, 0); goto L13223; L9909: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 1); if (register_operand (x2, V16QImode)) { operands[2] = x2; goto L9910; } x1 = XEXP (x0, 0); goto L13223; L9910: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 907; } x1 = XEXP (x0, 0); goto L13223; L10063: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == V16QImode) goto L13958; x1 = XEXP (x0, 0); goto L13223; L13958: ATTRIBUTE_UNUSED_LABEL tem = recog_16 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; x1 = XEXP (x0, 0); goto L13223; L10764: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == V16QImode) goto L13975; x1 = XEXP (x0, 0); goto L10855; L13975: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x1) == UNSPEC) goto L13977; x1 = XEXP (x0, 0); goto L10855; L13977: ATTRIBUTE_UNUSED_LABEL if (XVECLEN (x1, 0) == 1) goto L13979; x1 = XEXP (x0, 0); goto L10855; L13979: ATTRIBUTE_UNUSED_LABEL switch (XINT (x1, 1)) { case 38L: goto L10765; case 39L: goto L10770; default: break; } x1 = XEXP (x0, 0); goto L10855; L10765: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (nonimmediate_operand (x2, V16QImode)) { operands[1] = x2; goto L10766; } x1 = XEXP (x0, 0); goto L10855; L10766: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2 && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 1004; } x1 = XEXP (x0, 0); goto L10855; L10770: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (nonimmediate_operand (x2, V16QImode)) { operands[1] = x2; goto L10771; } x1 = XEXP (x0, 0); goto L10855; L10771: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2 && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 1005; } x1 = XEXP (x0, 0); goto L10855; L13216: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, V4SFmode)) { operands[0] = x1; goto L9928; } goto L10855; L9928: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == V4SFmode) goto L13981; x1 = XEXP (x0, 0); goto L10855; L13981: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case FLOAT: goto L9929; case VEC_MERGE: goto L10027; case SUBREG: goto L13984; default: break; } x1 = XEXP (x0, 0); goto L10855; L9929: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, V4SImode)) { operands[1] = x2; goto L9930; } x1 = XEXP (x0, 0); goto L10855; L9930: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 911; } x1 = XEXP (x0, 0); goto L10855; L10027: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V4SFmode)) { operands[1] = x2; goto L10028; } x1 = XEXP (x0, 0); goto L10855; L10028: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == V4SFmode && GET_CODE (x2) == VEC_DUPLICATE) goto L10029; x1 = XEXP (x0, 0); goto L10855; L10029: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == V2SFmode && GET_CODE (x3) == FLOAT_TRUNCATE) goto L10030; x1 = XEXP (x0, 0); goto L10855; L10030: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, V2DFmode)) { operands[2] = x4; goto L10031; } x1 = XEXP (x0, 0); goto L10855; L10031: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (14)] && (TARGET_SSE2)) { return 926; } x1 = XEXP (x0, 0); goto L10855; L13984: ATTRIBUTE_UNUSED_LABEL if (XINT (x1, 1) == 0) goto L10046; x1 = XEXP (x0, 0); goto L10855; L10046: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == V4SImode && GET_CODE (x2) == VEC_CONCAT) goto L10047; x1 = XEXP (x0, 0); goto L10855; L10047: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == V2SImode && GET_CODE (x3) == SUBREG && XINT (x3, 1) == 0) goto L10048; x1 = XEXP (x0, 0); goto L10855; L10048: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == V2SFmode && GET_CODE (x4) == FLOAT_TRUNCATE) goto L10049; x1 = XEXP (x0, 0); goto L10855; L10049: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (nonimmediate_operand (x5, V2DFmode)) { operands[1] = x5; goto L10050; } x1 = XEXP (x0, 0); goto L10855; L10050: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == V2SImode && GET_CODE (x3) == CONST_VECTOR && XVECLEN (x3, 0) == 2) goto L10051; x1 = XEXP (x0, 0); goto L10855; L10051: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L10052; x1 = XEXP (x0, 0); goto L10855; L10052: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_SSE2)) { return 928; } x1 = XEXP (x0, 0); goto L10855; L13217: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, V4SImode)) { operands[0] = x1; goto L9933; } goto L10855; L9933: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == V4SImode) goto L13985; x1 = XEXP (x0, 0); goto L10855; L13985: ATTRIBUTE_UNUSED_LABEL tem = recog_17 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; x1 = XEXP (x0, 0); goto L10855; L13218: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, DImode)) { operands[0] = x1; goto L9989; } L13224: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, DImode)) { operands[0] = x1; goto L10774; } goto L10855; L9989: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == DImode) goto L14010; x1 = XEXP (x0, 0); goto L13224; L14010: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case FIX: goto L9990; case UNSPEC: goto L14013; case MULT: goto L10186; default: break; } x1 = XEXP (x0, 0); goto L13224; L9990: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DFmode && GET_CODE (x2) == VEC_SELECT) goto L9991; x1 = XEXP (x0, 0); goto L13224; L9991: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V2DFmode)) { operands[1] = x3; goto L9992; } x1 = XEXP (x0, 0); goto L13224; L9992: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 1) goto L9993; x1 = XEXP (x0, 0); goto L13224; L9993: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_SSE2 && TARGET_64BIT)) { return 921; } x1 = XEXP (x0, 0); goto L13224; L14013: ATTRIBUTE_UNUSED_LABEL if (XVECLEN (x1, 0) == 1 && XINT (x1, 1) == 30) goto L10004; x1 = XEXP (x0, 0); goto L13224; L10004: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (GET_MODE (x2) == DFmode && GET_CODE (x2) == VEC_SELECT) goto L10005; x1 = XEXP (x0, 0); goto L13224; L10005: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V2DFmode)) { operands[1] = x3; goto L10006; } x1 = XEXP (x0, 0); goto L13224; L10006: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 1) goto L10007; x1 = XEXP (x0, 0); goto L13224; L10007: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_SSE2 && TARGET_64BIT)) { return 923; } x1 = XEXP (x0, 0); goto L13224; L10186: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode && GET_CODE (x2) == ZERO_EXTEND) goto L10187; x1 = XEXP (x0, 0); goto L13224; L10187: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == VEC_SELECT) goto L10188; x1 = XEXP (x0, 0); goto L13224; L10188: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, V2SImode)) { operands[1] = x4; goto L10189; } x1 = XEXP (x0, 0); goto L13224; L10189: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_CODE (x4) == PARALLEL && XVECLEN (x4, 0) == 1) goto L10190; x1 = XEXP (x0, 0); goto L13224; L10190: ATTRIBUTE_UNUSED_LABEL x5 = XVECEXP (x4, 0, 0); if (x5 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L10191; x1 = XEXP (x0, 0); goto L13224; L10191: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == ZERO_EXTEND) goto L10192; x1 = XEXP (x0, 0); goto L13224; L10192: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == VEC_SELECT) goto L10193; x1 = XEXP (x0, 0); goto L13224; L10193: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, V2SImode)) { operands[2] = x4; goto L10194; } x1 = XEXP (x0, 0); goto L13224; L10194: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_CODE (x4) == PARALLEL && XVECLEN (x4, 0) == 1) goto L10195; x1 = XEXP (x0, 0); goto L13224; L10195: ATTRIBUTE_UNUSED_LABEL x5 = XVECEXP (x4, 0, 0); if (x5 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_SSE2)) { return 949; } x1 = XEXP (x0, 0); goto L13224; L10774: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == DImode && GET_CODE (x1) == VEC_SELECT) goto L10775; x1 = XEXP (x0, 0); goto L10855; L10775: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V2DImode)) { operands[1] = x2; goto L10776; } x1 = XEXP (x0, 0); goto L10855; L10776: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == PARALLEL && XVECLEN (x2, 0) == 1) goto L10777; x1 = XEXP (x0, 0); goto L10855; L10777: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (GET_CODE (x3) == CONST_INT) goto L14014; x1 = XEXP (x0, 0); goto L10855; L14014: ATTRIBUTE_UNUSED_LABEL if (XWINT (x3, 0) == 0L) goto L14016; x1 = XEXP (x0, 0); goto L10855; L14016: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2 && !TARGET_64BIT)) { return 1006; } L14017: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2 && TARGET_64BIT)) { return 1007; } x1 = XEXP (x0, 0); goto L10855; L13220: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, V8HImode)) { operands[0] = x1; goto L10069; } goto L10855; L10069: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == V8HImode) goto L14018; x1 = XEXP (x0, 0); goto L10855; L14018: ATTRIBUTE_UNUSED_LABEL tem = recog_18 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; x1 = XEXP (x0, 0); goto L10855; L13221: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, TImode)) { operands[0] = x1; goto L10500; } goto L10855; L10500: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == TImode && GET_CODE (x1) == UNSPEC && XVECLEN (x1, 0) == 1 && XINT (x1, 1) == 45) goto L10501; if (x1 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_SSE2)) { return 952; } x1 = XEXP (x0, 0); goto L10855; L10501: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (GET_MODE (x2) == TImode) goto L14049; x1 = XEXP (x0, 0); goto L10855; L14049: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case ASHIFT: goto L10502; case LSHIFTRT: goto L10510; default: break; } x1 = XEXP (x0, 0); goto L10855; L10502: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, TImode)) { operands[1] = x3; goto L10503; } x1 = XEXP (x0, 0); goto L10855; L10503: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == SImode && GET_CODE (x3) == MULT) goto L10504; x1 = XEXP (x0, 0); goto L10855; L10504: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (immediate_operand (x4, SImode)) { operands[2] = x4; goto L10505; } x1 = XEXP (x0, 0); goto L10855; L10505: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)] && (TARGET_SSE2)) { return 987; } x1 = XEXP (x0, 0); goto L10855; L10510: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, TImode)) { operands[1] = x3; goto L10511; } x1 = XEXP (x0, 0); goto L10855; L10511: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == SImode && GET_CODE (x3) == MULT) goto L10512; x1 = XEXP (x0, 0); goto L10855; L10512: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (immediate_operand (x4, SImode)) { operands[2] = x4; goto L10513; } x1 = XEXP (x0, 0); goto L10855; L10513: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)] && (TARGET_SSE2)) { return 988; } x1 = XEXP (x0, 0); goto L10855; L13226: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x1, DFmode)) { operands[0] = x1; goto L10840; } goto L10855; L10840: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == DFmode && GET_CODE (x1) == VEC_SELECT) goto L10841; x1 = XEXP (x0, 0); goto L10855; L10841: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, V2DFmode)) { operands[1] = x2; goto L10842; } x1 = XEXP (x0, 0); goto L10855; L10842: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == PARALLEL && XVECLEN (x2, 0) == 1) goto L10843; x1 = XEXP (x0, 0); goto L10855; L10843: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_SSE2)) { return 1016; } x1 = XEXP (x0, 0); goto L10855; L10856: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == BLKmode) goto L14051; x1 = XEXP (x0, 0); goto L10872; L14051: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x1) == UNSPEC) goto L14053; x1 = XEXP (x0, 0); goto L10872; L14053: ATTRIBUTE_UNUSED_LABEL if (XVECLEN (x1, 0) == 1) goto L14055; x1 = XEXP (x0, 0); goto L10872; L14055: ATTRIBUTE_UNUSED_LABEL switch (XINT (x1, 1)) { case 59L: goto L10857; case 60L: goto L10861; default: break; } x1 = XEXP (x0, 0); goto L10872; L10857: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (rtx_equal_p (x2, operands[0]) && (TARGET_SSE2)) { return 1019; } x1 = XEXP (x0, 0); goto L10872; L10861: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (rtx_equal_p (x2, operands[0]) && (TARGET_SSE2)) { return 1020; } x1 = XEXP (x0, 0); goto L10872; L13227: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, V4SFmode)) { operands[0] = x1; goto L10873; } goto ret0; L10873: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == V4SFmode) goto L14057; goto ret0; L14057: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x1) == UNSPEC) goto L14062; goto ret0; L14062: ATTRIBUTE_UNUSED_LABEL switch (XVECLEN (x1, 0)) { case 2: goto L14067; case 1: goto L14070; default: break; } goto ret0; L14067: ATTRIBUTE_UNUSED_LABEL switch (XINT (x1, 1)) { case 71L: goto L10874; case 72L: goto L10886; case 73L: goto L10898; default: break; } goto ret0; L10874: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, V4SFmode)) { operands[1] = x2; goto L10875; } goto ret0; L10875: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 1); if (nonimmediate_operand (x2, V4SFmode)) { operands[2] = x2; goto L10876; } goto ret0; L10876: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE3)) { return 1023; } goto ret0; L10886: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, V4SFmode)) { operands[1] = x2; goto L10887; } goto ret0; L10887: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 1); if (nonimmediate_operand (x2, V4SFmode)) { operands[2] = x2; goto L10888; } goto ret0; L10888: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE3)) { return 1025; } goto ret0; L10898: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, V4SFmode)) { operands[1] = x2; goto L10899; } goto ret0; L10899: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 1); if (nonimmediate_operand (x2, V4SFmode)) { operands[2] = x2; goto L10900; } goto ret0; L10900: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE3)) { return 1027; } goto ret0; L14070: ATTRIBUTE_UNUSED_LABEL switch (XINT (x1, 1)) { case 74L: goto L10910; case 75L: goto L10915; default: break; } goto ret0; L10910: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (nonimmediate_operand (x2, V4SFmode)) { operands[1] = x2; goto L10911; } goto ret0; L10911: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE3)) { return 1029; } goto ret0; L10915: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (nonimmediate_operand (x2, V4SFmode)) { operands[1] = x2; goto L10916; } goto ret0; L10916: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE3)) { return 1030; } goto ret0; L13228: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, V2DFmode)) { operands[0] = x1; goto L10879; } goto ret0; L10879: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == V2DFmode) goto L14072; goto ret0; L14072: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case UNSPEC: goto L14076; case VEC_DUPLICATE: goto L10925; default: break; } goto ret0; L14076: ATTRIBUTE_UNUSED_LABEL if (XVECLEN (x1, 0) == 2) goto L14079; goto ret0; L14079: ATTRIBUTE_UNUSED_LABEL switch (XINT (x1, 1)) { case 71L: goto L10880; case 72L: goto L10892; case 73L: goto L10904; default: break; } goto ret0; L10880: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, V2DFmode)) { operands[1] = x2; goto L10881; } goto ret0; L10881: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 1); if (nonimmediate_operand (x2, V2DFmode)) { operands[2] = x2; goto L10882; } goto ret0; L10882: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE3)) { return 1024; } goto ret0; L10892: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, V2DFmode)) { operands[1] = x2; goto L10893; } goto ret0; L10893: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 1); if (nonimmediate_operand (x2, V2DFmode)) { operands[2] = x2; goto L10894; } goto ret0; L10894: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE3)) { return 1026; } goto ret0; L10904: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, V2DFmode)) { operands[1] = x2; goto L10905; } goto ret0; L10905: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 1); if (nonimmediate_operand (x2, V2DFmode)) { operands[2] = x2; goto L10906; } goto ret0; L10906: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE3)) { return 1028; } goto ret0; L10925: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DFmode) goto L14083; goto ret0; L14083: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == VEC_SELECT) goto L10931; if (memory_operand (x2, DFmode)) { operands[1] = x2; goto L10926; } goto ret0; L10931: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, V2DFmode)) { operands[1] = x3; goto L10932; } goto ret0; L10932: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == PARALLEL && XVECLEN (x3, 0) == 1) goto L10933; goto ret0; L10933: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_SSE3)) { return 1033; } goto ret0; L10926: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE3)) { return 1032; } goto ret0; L13229: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, V16QImode)) { operands[0] = x1; goto L10919; } goto ret0; L10919: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == V16QImode && GET_CODE (x1) == UNSPEC && XVECLEN (x1, 0) == 1 && XINT (x1, 1) == 76) goto L10920; goto ret0; L10920: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (memory_operand (x2, V16QImode)) { operands[1] = x2; goto L10921; } goto ret0; L10921: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE3)) { return 1031; } goto ret0; ret0: return -1; } static int recog_20 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); switch (GET_CODE (x2)) { case MEM: goto L280; case FIX: goto L1019; case PLUS: goto L1260; case MINUS: goto L1978; case AND: goto L2977; case IOR: goto L3298; case XOR: goto L3681; case NEG: goto L4098; case ASHIFT: goto L4681; case ASHIFTRT: goto L4943; case LSHIFTRT: goto L5311; case ROTATE: goto L5593; case ROTATERT: goto L5769; default: break; } goto ret0; L280: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == REG && XINT (x3, 0) == 7) goto L281; goto ret0; L281: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L282; goto ret0; L282: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 7) goto L283; goto ret0; L283: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS) goto L284; goto ret0; L284: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == REG && XINT (x3, 0) == 7) goto L285; goto ret0; L285: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (4)] && (!TARGET_64BIT)) { return 40; } goto ret0; L1019: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, VOIDmode)) { operands[1] = x3; goto L1020; } goto ret0; L1020: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L1021; goto ret0; L1021: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_80387 && FLOAT_MODE_P (GET_MODE (operands[1])) && !reload_completed && !reload_in_progress && !SSE_FLOAT_MODE_P (GET_MODE (operands[1])))) { return 152; } goto ret0; L1260: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode) goto L14135; goto ret0; L14135: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x3) == PLUS) goto L1261; if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L1472; } goto ret0; L1261: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (ix86_carry_flag_operator (x4, SImode)) { operands[3] = x4; goto L1262; } goto ret0; L1262: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonimmediate_operand (x4, SImode)) { operands[1] = x4; goto L1263; } goto ret0; L1263: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, SImode)) { operands[2] = x3; goto L1264; } goto ret0; L1264: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L1265; goto ret0; L1265: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (PLUS, SImode, operands))) { return 182; } goto ret0; L1472: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, SImode)) { operands[2] = x3; goto L1473; } goto ret0; L1473: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L1474; goto ret0; L1474: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (PLUS, SImode, operands))) { return 201; } goto ret0; L1978: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L1979; } goto ret0; L1979: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == SImode && GET_CODE (x3) == PLUS) goto L1980; if (general_operand (x3, SImode)) { operands[2] = x3; goto L2018; } goto ret0; L1980: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (ix86_carry_flag_operator (x4, SImode)) { operands[3] = x4; goto L1981; } goto ret0; L1981: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (general_operand (x4, SImode)) { operands[2] = x4; goto L1982; } goto ret0; L1982: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L1983; goto ret0; L1983: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (MINUS, SImode, operands))) { return 232; } goto ret0; L2018: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L2019; goto ret0; L2019: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (MINUS, SImode, operands))) { return 234; } goto ret0; L2977: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L2978; } goto ret0; L2978: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, SImode)) { operands[2] = x3; goto L2979; } goto ret0; L2979: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L2980; goto ret0; L2980: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (AND, SImode, operands))) { return 290; } goto ret0; L3298: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode) goto L14138; goto ret0; L14138: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x3)) { case ASHIFT: goto L4657; case ASHIFTRT: goto L4919; case SUBREG: case REG: case MEM: goto L14137; default: goto ret0; } L14137: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L3299; } goto ret0; L4657: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (rtx_equal_p (x4, operands[0])) goto L4658; goto ret0; L4658: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonmemory_operand (x4, QImode)) { operands[2] = x4; goto L4659; } goto ret0; L4659: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == SImode && GET_CODE (x3) == LSHIFTRT) goto L4660; goto ret0; L4660: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, SImode)) { operands[1] = x4; goto L4661; } goto ret0; L4661: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_MODE (x4) == QImode && GET_CODE (x4) == MINUS) goto L4662; goto ret0; L4662: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (x5 == const_int_rtx[MAX_SAVED_CONST_INT + (32)]) goto L4663; goto ret0; L4663: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 1); if (rtx_equal_p (x5, operands[2])) goto L4664; goto ret0; L4664: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L4665; goto ret0; L4665: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) { return 406; } goto ret0; L4919: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (rtx_equal_p (x4, operands[0])) goto L4920; goto ret0; L4920: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonmemory_operand (x4, QImode)) { operands[2] = x4; goto L4921; } goto ret0; L4921: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == SImode && GET_CODE (x3) == ASHIFT) goto L4922; goto ret0; L4922: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, SImode)) { operands[1] = x4; goto L4923; } goto ret0; L4923: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_MODE (x4) == QImode && GET_CODE (x4) == MINUS) goto L4924; goto ret0; L4924: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (x5 == const_int_rtx[MAX_SAVED_CONST_INT + (32)]) goto L4925; goto ret0; L4925: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 1); if (rtx_equal_p (x5, operands[2])) goto L4926; goto ret0; L4926: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L4927; goto ret0; L4927: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) { return 424; } goto ret0; L3299: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, SImode)) { operands[2] = x3; goto L3300; } goto ret0; L3300: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L3301; goto ret0; L3301: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (IOR, SImode, operands))) { return 308; } goto ret0; L3681: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L3682; } goto ret0; L3682: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, SImode)) { operands[2] = x3; goto L3683; } goto ret0; L3683: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L3684; goto ret0; L3684: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (XOR, SImode, operands))) { return 330; } goto ret0; L4098: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L4099; } goto ret0; L4099: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L4100; goto ret0; L4100: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_unary_operator_ok (NEG, SImode, operands))) { return 354; } goto ret0; L4681: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L4682; } goto ret0; L4682: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L4683; } goto ret0; L4683: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L4684; goto ret0; L4684: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (ASHIFT, SImode, operands))) { return 407; } goto ret0; L4943: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L4944; } goto ret0; L4944: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == CONST_INT) goto L14140; L5004: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L5005; } goto ret0; L14140: ATTRIBUTE_UNUSED_LABEL if (const_int_operand (x3, SImode)) { operands[2] = x3; goto L4945; } L14141: ATTRIBUTE_UNUSED_LABEL if (const1_operand (x3, QImode)) { operands[2] = x3; goto L4975; } goto L5004; L4945: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L4946; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L14141; L4946: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (INTVAL (operands[2]) == 31 && (TARGET_USE_CLTD || optimize_size) && ix86_binary_operator_ok (ASHIFTRT, SImode, operands))) { return 425; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L14141; L4975: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L4976; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5004; L4976: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (ASHIFTRT, SImode, operands) && (TARGET_SHIFT1 || optimize_size))) { return 427; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5004; L5005: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5006; goto ret0; L5006: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (ASHIFTRT, SImode, operands))) { return 429; } goto ret0; L5311: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L5312; } goto ret0; L5312: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const1_operand (x3, QImode)) { operands[2] = x3; goto L5313; } L5342: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L5343; } goto ret0; L5313: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5314; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5342; L5314: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (LSHIFTRT, HImode, operands) && (TARGET_SHIFT1 || optimize_size))) { return 451; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5342; L5343: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5344; goto ret0; L5344: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (LSHIFTRT, HImode, operands))) { return 453; } goto ret0; L5593: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L5594; } goto ret0; L5594: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const1_operand (x3, QImode)) { operands[2] = x3; goto L5595; } L5624: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L5625; } goto ret0; L5595: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5596; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5624; L5596: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (ROTATE, SImode, operands) && (TARGET_SHIFT1 || optimize_size))) { return 471; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5624; L5625: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5626; goto ret0; L5626: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (ROTATE, SImode, operands))) { return 473; } goto ret0; L5769: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L5770; } goto ret0; L5770: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const1_operand (x3, QImode)) { operands[2] = x3; goto L5771; } L5800: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L5801; } goto ret0; L5771: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5772; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5800; L5772: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (ROTATERT, SImode, operands) && (TARGET_SHIFT1 || optimize_size))) { return 483; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5800; L5801: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5802; goto ret0; L5802: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (ROTATERT, SImode, operands))) { return 485; } goto ret0; ret0: return -1; } static int recog_21 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); switch (GET_CODE (x2)) { case MEM: goto L14111; case ZERO_EXTRACT: goto L1788; default: break; } L14087: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, SImode)) { operands[0] = x2; goto L279; } L14088: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, SImode)) { operands[0] = x2; goto L327; } goto ret0; L14111: ATTRIBUTE_UNUSED_LABEL if (push_operand (x2, SImode)) { operands[0] = x2; goto L259; } goto L14087; L259: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (general_no_elim_operand (x2, SImode)) { operands[1] = x2; goto L260; } x2 = XEXP (x1, 0); goto L14087; L260: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L261; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14087; L261: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM) goto L262; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14087; L262: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == SCRATCH && (!TARGET_64BIT)) { return 38; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14087; L1788: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (ext_register_operand (x3, VOIDmode)) { operands[0] = x3; goto L1789; } goto ret0; L1789: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L1790; goto ret0; L1790: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L1791; goto ret0; L1791: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode) goto L14112; goto ret0; L14112: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case PLUS: goto L1792; case AND: goto L3121; case IOR: goto L3526; case XOR: goto L3864; default: break; } goto ret0; L1792: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == ZERO_EXTRACT) goto L1793; goto ret0; L1793: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (ext_register_operand (x4, VOIDmode)) { operands[1] = x4; goto L1794; } goto ret0; L1794: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L1795; goto ret0; L1795: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L1848; goto ret0; L1848: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == SImode && GET_CODE (x3) == ZERO_EXTRACT) goto L1849; if (general_operand (x3, QImode)) { operands[2] = x3; goto L1797; } L1822: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L1823; } goto ret0; L1849: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (ext_register_operand (x4, VOIDmode)) { operands[2] = x4; goto L1850; } goto ret0; L1850: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L1851; goto ret0; L1851: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L1852; goto ret0; L1852: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L1853; goto ret0; L1853: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) { return 224; } goto ret0; L1797: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L1798; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L1822; L1798: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT)) { return 222; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L1822; L1823: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L1824; goto ret0; L1824: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT)) { return 223; } goto ret0; L3121: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == ZERO_EXTRACT) goto L3122; goto ret0; L3122: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (ext_register_operand (x4, VOIDmode)) { operands[1] = x4; goto L3123; } goto ret0; L3123: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L3124; goto ret0; L3124: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L3173; goto ret0; L3173: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == SImode) goto L14116; L3125: ATTRIBUTE_UNUSED_LABEL if (const_int_operand (x3, VOIDmode)) { operands[2] = x3; goto L3126; } goto ret0; L14116: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x3)) { case ZERO_EXTEND: goto L3174; case ZERO_EXTRACT: goto L3230; default: break; } goto L3125; L3174: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (general_operand (x4, QImode)) { operands[2] = x4; goto L3175; } L3202: ATTRIBUTE_UNUSED_LABEL if (ext_register_operand (x4, VOIDmode)) { operands[2] = x4; goto L3203; } goto L3125; L3175: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L3176; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); x4 = XEXP (x3, 0); goto L3202; L3176: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT)) { return 302; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); x4 = XEXP (x3, 0); goto L3202; L3203: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L3204; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L3125; L3204: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT)) { return 303; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L3125; L3230: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (ext_register_operand (x4, VOIDmode)) { operands[2] = x4; goto L3231; } goto L3125; L3231: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L3232; goto L3125; L3232: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L3233; goto L3125; L3233: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L3234; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L3125; L3234: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) { return 304; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L3125; L3126: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L3127; goto ret0; L3127: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) { return 300; } goto ret0; L3526: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == ZERO_EXTRACT) goto L3527; goto ret0; L3527: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (ext_register_operand (x4, VOIDmode)) { operands[1] = x4; goto L3528; } goto ret0; L3528: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L3529; goto ret0; L3529: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L3556; goto ret0; L3556: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == SImode) goto L14118; L3530: ATTRIBUTE_UNUSED_LABEL if (const_int_operand (x3, VOIDmode)) { operands[2] = x3; goto L3531; } goto ret0; L14118: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x3)) { case ZERO_EXTEND: goto L3557; case ZERO_EXTRACT: goto L3613; default: break; } goto L3530; L3557: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (general_operand (x4, QImode)) { operands[2] = x4; goto L3558; } L3585: ATTRIBUTE_UNUSED_LABEL if (ext_register_operand (x4, VOIDmode)) { operands[2] = x4; goto L3586; } goto L3530; L3558: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L3559; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); x4 = XEXP (x3, 0); goto L3585; L3559: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT && (!TARGET_PARTIAL_REG_STALL || optimize_size))) { return 324; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); x4 = XEXP (x3, 0); goto L3585; L3586: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L3587; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L3530; L3587: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && (!TARGET_PARTIAL_REG_STALL || optimize_size))) { return 325; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L3530; L3613: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (ext_register_operand (x4, VOIDmode)) { operands[2] = x4; goto L3614; } goto L3530; L3614: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L3615; goto L3530; L3615: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L3616; goto L3530; L3616: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L3617; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L3530; L3617: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && ((!TARGET_PARTIAL_REG_STALL || optimize_size))) { return 326; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L3530; L3531: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L3532; goto ret0; L3532: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && ((!TARGET_PARTIAL_REG_STALL || optimize_size))) { return 323; } goto ret0; L3864: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == ZERO_EXTRACT) goto L3865; goto ret0; L3865: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (ext_register_operand (x4, VOIDmode)) { operands[1] = x4; goto L3866; } goto ret0; L3866: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L3867; goto ret0; L3867: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L3894; goto ret0; L3894: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == SImode) goto L14120; L3868: ATTRIBUTE_UNUSED_LABEL if (const_int_operand (x3, VOIDmode)) { operands[2] = x3; goto L3869; } goto ret0; L14120: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x3)) { case ZERO_EXTEND: goto L3895; case ZERO_EXTRACT: goto L3951; default: break; } goto L3868; L3895: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (general_operand (x4, QImode)) { operands[2] = x4; goto L3896; } L3923: ATTRIBUTE_UNUSED_LABEL if (ext_register_operand (x4, VOIDmode)) { operands[2] = x4; goto L3924; } goto L3868; L3896: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L3897; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); x4 = XEXP (x3, 0); goto L3923; L3897: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT && (!TARGET_PARTIAL_REG_STALL || optimize_size))) { return 343; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); x4 = XEXP (x3, 0); goto L3923; L3924: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L3925; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L3868; L3925: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && (!TARGET_PARTIAL_REG_STALL || optimize_size))) { return 344; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L3868; L3951: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (ext_register_operand (x4, VOIDmode)) { operands[2] = x4; goto L3952; } goto L3868; L3952: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L3953; goto L3868; L3953: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L3954; goto L3868; L3954: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L3955; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L3868; L3955: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && ((!TARGET_PARTIAL_REG_STALL || optimize_size))) { return 345; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L3868; L3869: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L3870; goto ret0; L3870: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && ((!TARGET_PARTIAL_REG_STALL || optimize_size))) { return 342; } goto ret0; L279: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode) goto L14122; x2 = XEXP (x1, 0); goto L14088; L14122: ATTRIBUTE_UNUSED_LABEL tem = recog_20 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; x2 = XEXP (x1, 0); goto L14088; L327: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode) goto L14143; L289: ATTRIBUTE_UNUSED_LABEL if (const0_operand (x2, SImode)) { operands[1] = x2; goto L290; } L299: ATTRIBUTE_UNUSED_LABEL if (immediate_operand (x2, SImode)) { operands[1] = x2; goto L300; } goto ret0; L14143: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case ZERO_EXTEND: goto L656; case MULT: goto L2202; case DIV: goto L2640; case UDIV: goto L2793; case UNSPEC: goto L14152; case CTZ: goto L6447; case MINUS: goto L6471; case PLUS: goto L6628; case IF_THEN_ELSE: goto L7746; case SUBREG: case REG: goto L14142; default: goto L289; } L14142: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, SImode)) { operands[1] = x2; goto L328; } goto L289; L656: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); switch (GET_MODE (x3)) { case HImode: goto L14153; case QImode: goto L14154; default: break; } goto L289; L14153: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, HImode)) { operands[1] = x3; goto L657; } goto L289; L657: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L658; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L289; L658: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_ZERO_EXTEND_WITH_AND && !optimize_size)) { return 103; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L289; L14154: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x3, QImode)) { operands[1] = x3; goto L703; } goto L289; L703: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L704; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L289; L704: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode) goto L14155; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L289; L14155: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG) goto L14157; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L289; L14157: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 0) == 17) goto L14159; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L289; L14159: ATTRIBUTE_UNUSED_LABEL if ((TARGET_ZERO_EXTEND_WITH_AND && !optimize_size)) { return 108; } L14160: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_ZERO_EXTEND_WITH_AND || optimize_size)) { return 109; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L289; L2202: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L2203; } goto L289; L2203: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, SImode)) { operands[2] = x3; goto L2204; } goto L289; L2204: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L2205; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L289; L2205: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) { return 248; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L289; L2640: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SImode)) { operands[2] = x3; goto L2641; } goto L289; L2641: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, SImode)) { operands[3] = x3; goto L2642; } goto L289; L2642: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L2643; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L289; L2643: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[1] = x2; goto L2644; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L289; L2644: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == MOD) goto L2645; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L289; L2645: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[2])) goto L2646; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L289; L2646: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[3]) && (!optimize_size && !TARGET_USE_CLTD) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 269; } L2670: ATTRIBUTE_UNUSED_LABEL if (rtx_equal_p (x3, operands[3]) && (optimize_size || TARGET_USE_CLTD) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 270; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L289; L2793: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SImode)) { operands[1] = x3; goto L2794; } goto L289; L2794: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, SImode)) { operands[2] = x3; goto L2795; } goto L289; L2795: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L2796; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L289; L2796: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[3] = x2; goto L2797; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L289; L2797: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == UMOD) goto L2798; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L289; L2798: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L2799; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L289; L2799: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 275; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L289; L14152: ATTRIBUTE_UNUSED_LABEL if (XVECLEN (x2, 0) == 1 && XINT (x2, 1) == 12) goto L6338; goto L289; L6338: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L6339; goto L289; L6339: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L6340; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L289; L6340: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT)) { return 530; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L289; L6447: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L6448; } goto L289; L6448: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L6449; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L289; L6449: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) { return 540; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L289; L6471: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (31)]) goto L6472; goto L289; L6472: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == SImode && GET_CODE (x3) == CLZ) goto L6473; goto L289; L6473: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, SImode)) { operands[1] = x4; goto L6474; } goto L289; L6474: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L6475; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L289; L6475: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) { return 542; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L289; L6628: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == UNSPEC && XVECLEN (x3, 0) == 1 && XINT (x3, 1) == 15) goto L6629; goto L289; L6629: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L6630; goto L289; L6630: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, SImode)) { operands[1] = x3; goto L6631; } goto L289; L6631: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L6632; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L289; L6632: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT)) { return 552; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L289; L7746: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (ix86_carry_flag_operator (x3, VOIDmode)) { operands[1] = x3; goto L7747; } goto L289; L7747: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (-1)]) goto L7748; goto L289; L7748: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L7749; goto L289; L7749: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L7750; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L289; L7750: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) { return 650; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L289; L328: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L329; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L289; L329: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[1])) goto L330; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L289; L330: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (rtx_equal_p (x2, operands[0])) { return 47; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L289; L290: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L291; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L299; L291: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (reload_completed && (!TARGET_USE_MOV0 || optimize_size))) { return 41; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L299; L300: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L301; goto ret0; L301: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (reload_completed && operands[1] == constm1_rtx && (TARGET_PENTIUM || optimize_size))) { return 42; } goto ret0; ret0: return -1; } static int recog_22 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); if (register_operand (x2, HImode)) { operands[0] = x2; goto L356; } L14099: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, HImode)) { operands[0] = x2; goto L1062; } goto ret0; L356: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode) goto L14162; x2 = XEXP (x1, 0); goto L14099; L14162: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case ZERO_EXTEND: goto L673; case MULT: goto L2232; case DIV: goto L2717; case SUBREG: case REG: goto L14161; default: x2 = XEXP (x1, 0); goto L14099; } L14161: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, HImode)) { operands[1] = x2; goto L357; } x2 = XEXP (x1, 0); goto L14099; L673: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, QImode)) { operands[1] = x3; goto L674; } x2 = XEXP (x1, 0); goto L14099; L674: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L675; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14099; L675: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode) goto L14165; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14099; L14165: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG) goto L14167; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14099; L14167: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 0) == 17) goto L14169; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14099; L14169: ATTRIBUTE_UNUSED_LABEL if ((TARGET_ZERO_EXTEND_WITH_AND && !optimize_size)) { return 105; } L14170: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_ZERO_EXTEND_WITH_AND || optimize_size)) { return 106; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14099; L2232: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == HImode) goto L14172; x2 = XEXP (x1, 0); goto L14099; L14172: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x3)) { case ZERO_EXTEND: goto L2261; case SIGN_EXTEND: goto L2279; case SUBREG: case REG: case MEM: goto L14171; default: x2 = XEXP (x1, 0); goto L14099; } L14171: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x3, HImode)) { operands[1] = x3; goto L2233; } x2 = XEXP (x1, 0); goto L14099; L2261: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, QImode)) { operands[1] = x4; goto L2262; } x2 = XEXP (x1, 0); goto L14099; L2262: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == HImode && GET_CODE (x3) == ZERO_EXTEND) goto L2263; x2 = XEXP (x1, 0); goto L14099; L2263: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, QImode)) { operands[2] = x4; goto L2264; } x2 = XEXP (x1, 0); goto L14099; L2264: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L2265; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14099; L2265: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_QIMODE_MATH && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 252; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14099; L2279: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, QImode)) { operands[1] = x4; goto L2280; } x2 = XEXP (x1, 0); goto L14099; L2280: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == HImode && GET_CODE (x3) == SIGN_EXTEND) goto L2281; x2 = XEXP (x1, 0); goto L14099; L2281: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, QImode)) { operands[2] = x4; goto L2282; } x2 = XEXP (x1, 0); goto L14099; L2282: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L2283; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14099; L2283: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_QIMODE_MATH && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 253; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14099; L2233: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, HImode)) { operands[2] = x3; goto L2234; } x2 = XEXP (x1, 0); goto L14099; L2234: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L2235; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14099; L2235: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) { return 250; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14099; L2717: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, HImode)) { operands[1] = x3; goto L2718; } x2 = XEXP (x1, 0); goto L14099; L2718: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, HImode)) { operands[2] = x3; goto L2719; } x2 = XEXP (x1, 0); goto L14099; L2719: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L2720; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14099; L2720: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, HImode)) { operands[3] = x2; goto L2721; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14099; L2721: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == MOD) goto L2722; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14099; L2722: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L2723; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14099; L2723: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (TARGET_HIMODE_MATH) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 272; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14099; L357: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L358; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14099; L358: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[1])) goto L359; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14099; L359: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (rtx_equal_p (x2, operands[0]) && (TARGET_PARTIAL_REG_STALL)) { return 53; } L366: ATTRIBUTE_UNUSED_LABEL if (rtx_equal_p (x2, operands[0]) && (! TARGET_PARTIAL_REG_STALL)) { return 54; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14099; L1062: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode) goto L14174; goto ret0; L14174: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case FIX: goto L1063; case PLUS: goto L1242; case MINUS: goto L1960; case AND: goto L3034; case IOR: goto L3403; case XOR: goto L3786; case NEG: goto L4159; case ASHIFT: goto L4738; case ASHIFTRT: goto L5087; case LSHIFTRT: goto L5425; case ROTATE: goto L5653; case ROTATERT: goto L5829; default: break; } goto ret0; L1063: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, VOIDmode)) { operands[1] = x3; goto L1064; } goto ret0; L1064: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L1065; goto ret0; L1065: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_80387 && FLOAT_MODE_P (GET_MODE (operands[1])) && !reload_completed && !reload_in_progress && !SSE_FLOAT_MODE_P (GET_MODE (operands[1])))) { return 157; } goto ret0; L1242: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == HImode) goto L14186; goto ret0; L14186: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x3) == PLUS) goto L1243; if (nonimmediate_operand (x3, HImode)) { operands[1] = x3; goto L1592; } goto ret0; L1243: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (ix86_carry_flag_operator (x4, HImode)) { operands[3] = x4; goto L1244; } goto ret0; L1244: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonimmediate_operand (x4, HImode)) { operands[1] = x4; goto L1245; } goto ret0; L1245: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, HImode)) { operands[2] = x3; goto L1246; } goto ret0; L1246: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L1247; goto ret0; L1247: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (PLUS, HImode, operands))) { return 181; } goto ret0; L1592: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, HImode)) { operands[2] = x3; goto L1593; } goto ret0; L1593: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L1594; goto ret0; L1594: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode) goto L14188; goto ret0; L14188: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG) goto L14190; goto ret0; L14190: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 0) == 17) goto L14192; goto ret0; L14192: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_PARTIAL_REG_STALL && ix86_binary_operator_ok (PLUS, HImode, operands))) { return 209; } L14193: ATTRIBUTE_UNUSED_LABEL if ((TARGET_PARTIAL_REG_STALL && ix86_binary_operator_ok (PLUS, HImode, operands))) { return 210; } goto ret0; L1960: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, HImode)) { operands[1] = x3; goto L1961; } goto ret0; L1961: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == HImode && GET_CODE (x3) == PLUS) goto L1962; if (general_operand (x3, HImode)) { operands[2] = x3; goto L2098; } goto ret0; L1962: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (ix86_carry_flag_operator (x4, HImode)) { operands[3] = x4; goto L1963; } goto ret0; L1963: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (general_operand (x4, HImode)) { operands[2] = x4; goto L1964; } goto ret0; L1964: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L1965; goto ret0; L1965: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (MINUS, HImode, operands))) { return 231; } goto ret0; L2098: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L2099; goto ret0; L2099: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (MINUS, HImode, operands))) { return 240; } goto ret0; L3034: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, HImode)) { operands[1] = x3; goto L3035; } goto ret0; L3035: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, HImode)) { operands[2] = x3; goto L3036; } goto ret0; L3036: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L3037; goto ret0; L3037: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (AND, HImode, operands))) { return 294; } goto ret0; L3403: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, HImode)) { operands[1] = x3; goto L3404; } goto ret0; L3404: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, HImode)) { operands[2] = x3; goto L3405; } goto ret0; L3405: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L3406; goto ret0; L3406: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (IOR, HImode, operands))) { return 315; } goto ret0; L3786: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, HImode)) { operands[1] = x3; goto L3787; } goto ret0; L3787: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, HImode)) { operands[2] = x3; goto L3788; } goto ret0; L3788: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L3789; goto ret0; L3789: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (XOR, HImode, operands))) { return 337; } goto ret0; L4159: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, HImode)) { operands[1] = x3; goto L4160; } goto ret0; L4160: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L4161; goto ret0; L4161: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_unary_operator_ok (NEG, HImode, operands))) { return 358; } goto ret0; L4738: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, HImode)) { operands[1] = x3; goto L4739; } goto ret0; L4739: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L4740; } goto ret0; L4740: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L4741; goto ret0; L4741: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode) goto L14194; goto ret0; L14194: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG) goto L14196; goto ret0; L14196: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 0) == 17) goto L14198; goto ret0; L14198: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_PARTIAL_REG_STALL && ix86_binary_operator_ok (ASHIFT, HImode, operands))) { return 411; } L14199: ATTRIBUTE_UNUSED_LABEL if ((TARGET_PARTIAL_REG_STALL && ix86_binary_operator_ok (ASHIFT, HImode, operands))) { return 412; } goto ret0; L5087: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, HImode)) { operands[1] = x3; goto L5088; } goto ret0; L5088: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const1_operand (x3, QImode)) { operands[2] = x3; goto L5089; } L5102: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L5103; } goto ret0; L5089: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5090; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5102; L5090: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (ASHIFTRT, HImode, operands) && (TARGET_SHIFT1 || optimize_size))) { return 435; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5102; L5103: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5104; goto ret0; L5104: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (ASHIFTRT, HImode, operands))) { return 436; } goto ret0; L5425: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, HImode)) { operands[1] = x3; goto L5426; } goto ret0; L5426: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const1_operand (x3, QImode)) { operands[2] = x3; goto L5427; } L5440: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L5441; } goto ret0; L5427: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5428; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5440; L5428: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (LSHIFTRT, HImode, operands) && (TARGET_SHIFT1 || optimize_size))) { return 459; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5440; L5441: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5442; goto ret0; L5442: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (LSHIFTRT, HImode, operands))) { return 460; } goto ret0; L5653: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, HImode)) { operands[1] = x3; goto L5654; } goto ret0; L5654: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const1_operand (x3, QImode)) { operands[2] = x3; goto L5655; } L5668: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L5669; } goto ret0; L5655: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5656; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5668; L5656: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (ROTATE, HImode, operands) && (TARGET_SHIFT1 || optimize_size))) { return 475; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5668; L5669: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5670; goto ret0; L5670: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (ROTATE, HImode, operands))) { return 476; } goto ret0; L5829: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, HImode)) { operands[1] = x3; goto L5830; } goto ret0; L5830: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const1_operand (x3, QImode)) { operands[2] = x3; goto L5831; } L5844: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L5845; } goto ret0; L5831: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5832; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5844; L5832: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (ROTATERT, HImode, operands) && (TARGET_SHIFT1 || optimize_size))) { return 487; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5844; L5845: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5846; goto ret0; L5846: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (ROTATERT, HImode, operands))) { return 488; } goto ret0; ret0: return -1; } static int recog_23 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); if (register_operand (x2, QImode)) { operands[0] = x2; goto L399; } L14101: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, QImode)) { operands[0] = x2; goto L1223; } goto ret0; L399: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == QImode) goto L14201; x2 = XEXP (x1, 0); goto L14101; L14201: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case MULT: goto L2246; case DIV: goto L2522; case UDIV: goto L2536; case SUBREG: case REG: goto L14200; default: x2 = XEXP (x1, 0); goto L14101; } L14200: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, QImode)) { operands[1] = x2; goto L400; } x2 = XEXP (x1, 0); goto L14101; L2246: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, QImode)) { operands[1] = x3; goto L2247; } x2 = XEXP (x1, 0); goto L14101; L2247: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, QImode)) { operands[2] = x3; goto L2248; } x2 = XEXP (x1, 0); goto L14101; L2248: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L2249; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14101; L2249: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_QIMODE_MATH && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 251; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14101; L2522: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, HImode)) { operands[1] = x3; goto L2523; } x2 = XEXP (x1, 0); goto L14101; L2523: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, QImode)) { operands[2] = x3; goto L2524; } x2 = XEXP (x1, 0); goto L14101; L2524: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L2525; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14101; L2525: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_QIMODE_MATH)) { return 264; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14101; L2536: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, HImode)) { operands[1] = x3; goto L2537; } x2 = XEXP (x1, 0); goto L14101; L2537: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, QImode)) { operands[2] = x3; goto L2538; } x2 = XEXP (x1, 0); goto L14101; L2538: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L2539; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14101; L2539: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_QIMODE_MATH)) { return 265; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14101; L400: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L401; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14101; L401: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[1])) goto L402; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14101; L402: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (rtx_equal_p (x2, operands[0])) { return 60; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14101; L1223: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == QImode) goto L14204; goto ret0; L14204: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case PLUS: goto L1224; case MINUS: goto L1942; case AND: goto L3061; case IOR: goto L3448; case XOR: goto L3831; case NEG: goto L4182; case ASHIFT: goto L4779; case ASHIFTRT: goto L5141; case LSHIFTRT: goto L5479; case ROTATE: goto L5697; case ROTATERT: goto L5857; default: break; } goto ret0; L1224: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == QImode) goto L14215; goto ret0; L14215: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x3) == PLUS) goto L1225; if (nonimmediate_operand (x3, QImode)) { operands[1] = x3; goto L1683; } goto ret0; L1225: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (ix86_carry_flag_operator (x4, QImode)) { operands[3] = x4; goto L1226; } goto ret0; L1226: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonimmediate_operand (x4, QImode)) { operands[1] = x4; goto L1227; } goto ret0; L1227: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, QImode)) { operands[2] = x3; goto L1228; } goto ret0; L1228: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L1229; goto ret0; L1229: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (PLUS, QImode, operands))) { return 180; } goto ret0; L1683: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, QImode)) { operands[2] = x3; goto L1684; } goto ret0; L1684: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L1685; goto ret0; L1685: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode) goto L14217; goto ret0; L14217: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG) goto L14219; goto ret0; L14219: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 0) == 17) goto L14221; goto ret0; L14221: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_PARTIAL_REG_STALL && ix86_binary_operator_ok (PLUS, QImode, operands))) { return 215; } L14222: ATTRIBUTE_UNUSED_LABEL if ((TARGET_PARTIAL_REG_STALL && ix86_binary_operator_ok (PLUS, QImode, operands))) { return 216; } goto ret0; L1942: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, QImode)) { operands[1] = x3; goto L1943; } goto ret0; L1943: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == QImode && GET_CODE (x3) == PLUS) goto L1944; if (general_operand (x3, QImode)) { operands[2] = x3; goto L2136; } goto ret0; L1944: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (ix86_carry_flag_operator (x4, QImode)) { operands[3] = x4; goto L1945; } goto ret0; L1945: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (general_operand (x4, QImode)) { operands[2] = x4; goto L1946; } goto ret0; L1946: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L1947; goto ret0; L1947: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (MINUS, QImode, operands))) { return 230; } goto ret0; L2136: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L2137; goto ret0; L2137: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (MINUS, QImode, operands))) { return 243; } goto ret0; L3061: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, QImode)) { operands[1] = x3; goto L3062; } goto ret0; L3062: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, QImode)) { operands[2] = x3; goto L3063; } goto ret0; L3063: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L3064; goto ret0; L3064: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (AND, QImode, operands))) { return 296; } goto ret0; L3448: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, QImode)) { operands[1] = x3; goto L3449; } goto ret0; L3449: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, QImode)) { operands[2] = x3; goto L3450; } goto ret0; L3450: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L3451; goto ret0; L3451: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (IOR, QImode, operands))) { return 318; } goto ret0; L3831: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, QImode)) { operands[1] = x3; goto L3832; } goto ret0; L3832: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, QImode)) { operands[2] = x3; goto L3833; } goto ret0; L3833: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L3834; goto ret0; L3834: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (XOR, QImode, operands))) { return 340; } goto ret0; L4182: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, QImode)) { operands[1] = x3; goto L4183; } goto ret0; L4183: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L4184; goto ret0; L4184: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_unary_operator_ok (NEG, QImode, operands))) { return 360; } goto ret0; L4779: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, QImode)) { operands[1] = x3; goto L4780; } goto ret0; L4780: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L4781; } goto ret0; L4781: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L4782; goto ret0; L4782: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode) goto L14223; goto ret0; L14223: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG) goto L14225; goto ret0; L14225: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 0) == 17) goto L14227; goto ret0; L14227: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_PARTIAL_REG_STALL && ix86_binary_operator_ok (ASHIFT, QImode, operands))) { return 414; } L14228: ATTRIBUTE_UNUSED_LABEL if ((TARGET_PARTIAL_REG_STALL && ix86_binary_operator_ok (ASHIFT, QImode, operands))) { return 415; } goto ret0; L5141: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, QImode)) { operands[1] = x3; goto L5142; } goto ret0; L5142: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const1_operand (x3, QImode)) { operands[2] = x3; goto L5143; } L5172: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L5173; } goto ret0; L5143: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5144; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5172; L5144: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (ASHIFTRT, QImode, operands) && (TARGET_SHIFT1 || optimize_size))) { return 439; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5172; L5173: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5174; goto ret0; L5174: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (ASHIFTRT, QImode, operands))) { return 441; } goto ret0; L5479: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, QImode)) { operands[1] = x3; goto L5480; } goto ret0; L5480: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const1_operand (x3, QImode)) { operands[2] = x3; goto L5481; } L5510: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L5511; } goto ret0; L5481: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5482; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5510; L5482: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (LSHIFTRT, QImode, operands) && (TARGET_SHIFT1 || optimize_size))) { return 463; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5510; L5511: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5512; goto ret0; L5512: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (LSHIFTRT, QImode, operands))) { return 465; } goto ret0; L5697: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, QImode)) { operands[1] = x3; goto L5698; } goto ret0; L5698: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const1_operand (x3, QImode)) { operands[2] = x3; goto L5699; } L5728: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L5729; } goto ret0; L5699: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5700; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5728; L5700: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (ROTATE, QImode, operands) && (TARGET_SHIFT1 || optimize_size))) { return 478; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5728; L5729: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5730; goto ret0; L5730: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (ROTATE, QImode, operands))) { return 480; } goto ret0; L5857: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, QImode)) { operands[1] = x3; goto L5858; } goto ret0; L5858: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const1_operand (x3, QImode)) { operands[2] = x3; goto L5859; } L5888: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L5889; } goto ret0; L5859: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5860; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5888; L5860: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (ROTATERT, QImode, operands) && (TARGET_SHIFT1 || optimize_size))) { return 489; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5888; L5889: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5890; goto ret0; L5890: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (ROTATERT, QImode, operands))) { return 491; } goto ret0; ret0: return -1; } static int recog_24 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); switch (GET_CODE (x2)) { case MEM: goto L528; case ZERO_EXTEND: goto L731; case FIX: goto L949; case PLUS: goto L1181; case MINUS: goto L1872; case AND: goto L2950; case IOR: goto L3253; case XOR: goto L3636; case NEG: goto L4063; case ASHIFT: goto L4599; case ASHIFTRT: goto L4820; case LSHIFTRT: goto L5227; case ROTATE: goto L5565; case ROTATERT: goto L5741; default: break; } goto ret0; L528: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode && GET_CODE (x3) == REG && XINT (x3, 0) == 7) goto L529; goto ret0; L529: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L530; goto ret0; L530: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode && GET_CODE (x2) == REG && XINT (x2, 0) == 7) goto L531; goto ret0; L531: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == PLUS) goto L532; goto ret0; L532: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode && GET_CODE (x3) == REG && XINT (x3, 0) == 7) goto L533; goto ret0; L533: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)] && (TARGET_64BIT)) { return 79; } goto ret0; L731: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L732; } goto ret0; L732: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L733; goto ret0; L733: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode) goto L14243; goto ret0; L14243: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG) goto L14245; goto ret0; L14245: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 0) == 17) goto L14247; goto ret0; L14247: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && !TARGET_INTER_UNIT_MOVES)) { return 111; } L14248: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && TARGET_INTER_UNIT_MOVES)) { return 112; } goto ret0; L949: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, VOIDmode)) { operands[1] = x3; goto L950; } goto ret0; L950: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L951; goto ret0; L951: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_80387 && FLOAT_MODE_P (GET_MODE (operands[1])) && !reload_completed && !reload_in_progress && (!SSE_FLOAT_MODE_P (GET_MODE (operands[1])) || !TARGET_64BIT))) { return 147; } goto ret0; L1181: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode) goto L14250; goto ret0; L14250: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x3) == PLUS) goto L1196; if (nonimmediate_operand (x3, DImode)) { operands[1] = x3; goto L1182; } goto ret0; L1196: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (ix86_carry_flag_operator (x4, DImode)) { operands[3] = x4; goto L1197; } goto ret0; L1197: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonimmediate_operand (x4, DImode)) { operands[1] = x4; goto L1198; } goto ret0; L1198: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x86_64_general_operand (x3, DImode)) { operands[2] = x3; goto L1199; } goto ret0; L1199: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L1200; goto ret0; L1200: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (PLUS, DImode, operands))) { return 178; } goto ret0; L1182: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, DImode)) { operands[2] = x3; goto L1183; } L1395: ATTRIBUTE_UNUSED_LABEL if (x86_64_general_operand (x3, DImode)) { operands[2] = x3; goto L1396; } goto ret0; L1183: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L1184; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L1395; L1184: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT && ix86_binary_operator_ok (PLUS, DImode, operands))) { return 177; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L1395; L1396: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L1397; goto ret0; L1397: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (PLUS, DImode, operands))) { return 196; } goto ret0; L1872: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, DImode)) { operands[1] = x3; goto L1887; } goto ret0; L1887: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == DImode && GET_CODE (x3) == PLUS) goto L1888; if (general_operand (x3, DImode)) { operands[2] = x3; goto L1874; } L1905: ATTRIBUTE_UNUSED_LABEL if (x86_64_general_operand (x3, DImode)) { operands[2] = x3; goto L1906; } goto ret0; L1888: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (ix86_carry_flag_operator (x4, DImode)) { operands[3] = x4; goto L1889; } goto ret0; L1889: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x86_64_general_operand (x4, DImode)) { operands[2] = x4; goto L1890; } goto ret0; L1890: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L1891; goto ret0; L1891: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (MINUS, DImode, operands))) { return 226; } goto ret0; L1874: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L1875; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L1905; L1875: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT && ix86_binary_operator_ok (MINUS, DImode, operands))) { return 225; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L1905; L1906: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L1907; goto ret0; L1907: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (MINUS, DImode, operands))) { return 227; } goto ret0; L2950: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, DImode)) { operands[1] = x3; goto L2951; } goto ret0; L2951: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x86_64_szext_general_operand (x3, DImode)) { operands[2] = x3; goto L2952; } goto ret0; L2952: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L2953; goto ret0; L2953: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (AND, DImode, operands))) { return 288; } goto ret0; L3253: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, DImode)) { operands[1] = x3; goto L3254; } goto ret0; L3254: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x86_64_general_operand (x3, DImode)) { operands[2] = x3; goto L3255; } goto ret0; L3255: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L3256; goto ret0; L3256: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (IOR, DImode, operands))) { return 305; } goto ret0; L3636: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, DImode)) { operands[1] = x3; goto L3637; } goto ret0; L3637: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x86_64_general_operand (x3, DImode)) { operands[2] = x3; goto L3638; } goto ret0; L3638: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L3639; goto ret0; L3639: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (XOR, DImode, operands))) { return 327; } goto ret0; L4063: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (general_operand (x3, DImode)) { operands[1] = x3; goto L4064; } L4075: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x3, DImode)) { operands[1] = x3; goto L4076; } goto ret0; L4064: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L4065; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L4075; L4065: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT && ix86_unary_operator_ok (NEG, DImode, operands))) { return 351; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L4075; L4076: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L4077; goto ret0; L4077: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_unary_operator_ok (NEG, DImode, operands))) { return 352; } goto ret0; L4599: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, DImode)) { operands[1] = x3; goto L4600; } goto ret0; L4600: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L4601; } goto ret0; L4601: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L4602; goto ret0; L4602: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (ASHIFT, DImode, operands))) { return 402; } goto ret0; L4820: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, DImode)) { operands[1] = x3; goto L4821; } goto ret0; L4821: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == CONST_INT) goto L14251; L4849: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L4850; } goto ret0; L14251: ATTRIBUTE_UNUSED_LABEL if (const_int_operand (x3, DImode)) { operands[2] = x3; goto L4822; } L14252: ATTRIBUTE_UNUSED_LABEL if (const1_operand (x3, QImode)) { operands[2] = x3; goto L4836; } goto L4849; L4822: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L4823; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L14252; L4823: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && INTVAL (operands[2]) == 63 && (TARGET_USE_CLTD || optimize_size) && ix86_binary_operator_ok (ASHIFTRT, DImode, operands))) { return 417; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L14252; L4836: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L4837; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L4849; L4837: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (ASHIFTRT, DImode, operands) && (TARGET_SHIFT1 || optimize_size))) { return 418; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L4849; L4850: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L4851; goto ret0; L4851: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (ASHIFTRT, DImode, operands))) { return 419; } goto ret0; L5227: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, DImode)) { operands[1] = x3; goto L5228; } goto ret0; L5228: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const1_operand (x3, QImode)) { operands[2] = x3; goto L5229; } L5242: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L5243; } goto ret0; L5229: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5230; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5242; L5230: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (LSHIFTRT, HImode, operands) && (TARGET_SHIFT1 || optimize_size))) { return 445; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5242; L5243: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5244; goto ret0; L5244: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (LSHIFTRT, HImode, operands))) { return 446; } goto ret0; L5565: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, DImode)) { operands[1] = x3; goto L5566; } goto ret0; L5566: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const1_operand (x3, QImode)) { operands[2] = x3; goto L5567; } L5580: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L5581; } goto ret0; L5567: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5568; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5580; L5568: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (ROTATE, DImode, operands) && (TARGET_SHIFT1 || optimize_size))) { return 469; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5580; L5581: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5582; goto ret0; L5582: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (ROTATE, DImode, operands))) { return 470; } goto ret0; L5741: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, DImode)) { operands[1] = x3; goto L5742; } goto ret0; L5742: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const1_operand (x3, QImode)) { operands[2] = x3; goto L5743; } L5756: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L5757; } goto ret0; L5743: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5744; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5756; L5744: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (ROTATERT, DImode, operands) && (TARGET_SHIFT1 || optimize_size))) { return 481; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5756; L5757: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5758; goto ret0; L5758: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (ROTATERT, DImode, operands))) { return 482; } goto ret0; ret0: return -1; } static int recog_25 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); switch (GET_CODE (x2)) { case ZERO_EXTEND: goto L1278; case MULT: goto L2188; case DIV: goto L2563; case UDIV: goto L2741; case IOR: goto L3328; case XOR: goto L3711; case LSHIFTRT: goto L4110; case ASHIFT: goto L4642; case ASHIFTRT: goto L4904; case CTZ: goto L6459; case MINUS: goto L6487; case PLUS: goto L6648; case IF_THEN_ELSE: goto L7722; case SUBREG: case REG: goto L14253; default: goto ret0; } L14253: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, DImode)) { operands[1] = x2; goto L580; } goto ret0; L1278: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode) goto L14267; goto ret0; L14267: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x3)) { case PLUS: goto L1279; case MINUS: goto L1997; case MULT: goto L2217; case AND: goto L2992; case IOR: goto L3313; case XOR: goto L3696; case ASHIFT: goto L4696; case ASHIFTRT: goto L4958; case LSHIFTRT: goto L5356; case ROTATE: goto L5608; case ROTATERT: goto L5784; default: break; } goto ret0; L1279: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == SImode) goto L14278; goto ret0; L14278: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x4) == PLUS) goto L1280; if (nonimmediate_operand (x4, SImode)) { operands[1] = x4; goto L1487; } goto ret0; L1280: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (ix86_carry_flag_operator (x5, SImode)) { operands[3] = x5; goto L1281; } goto ret0; L1281: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 1); if (nonimmediate_operand (x5, SImode)) { operands[1] = x5; goto L1282; } goto ret0; L1282: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (general_operand (x4, SImode)) { operands[2] = x4; goto L1283; } goto ret0; L1283: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L1284; goto ret0; L1284: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (PLUS, SImode, operands))) { return 183; } goto ret0; L1487: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (general_operand (x4, SImode)) { operands[2] = x4; goto L1488; } goto ret0; L1488: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L1489; goto ret0; L1489: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (PLUS, SImode, operands))) { return 202; } goto ret0; L1997: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, SImode)) { operands[1] = x4; goto L1998; } goto ret0; L1998: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_MODE (x4) == SImode && GET_CODE (x4) == PLUS) goto L1999; if (general_operand (x4, SImode)) { operands[2] = x4; goto L2033; } goto ret0; L1999: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (ix86_carry_flag_operator (x5, SImode)) { operands[3] = x5; goto L2000; } goto ret0; L2000: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 1); if (general_operand (x5, SImode)) { operands[2] = x5; goto L2001; } goto ret0; L2001: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L2002; goto ret0; L2002: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (MINUS, SImode, operands))) { return 233; } goto ret0; L2033: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L2034; goto ret0; L2034: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (MINUS, SImode, operands))) { return 235; } goto ret0; L2217: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, SImode)) { operands[1] = x4; goto L2218; } goto ret0; L2218: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (general_operand (x4, SImode)) { operands[2] = x4; goto L2219; } goto ret0; L2219: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L2220; goto ret0; L2220: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 249; } goto ret0; L2992: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, SImode)) { operands[1] = x4; goto L2993; } goto ret0; L2993: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (general_operand (x4, SImode)) { operands[2] = x4; goto L2994; } goto ret0; L2994: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L2995; goto ret0; L2995: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (AND, SImode, operands))) { return 291; } goto ret0; L3313: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, SImode)) { operands[1] = x4; goto L3314; } goto ret0; L3314: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (general_operand (x4, SImode)) { operands[2] = x4; goto L3315; } goto ret0; L3315: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L3316; goto ret0; L3316: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (IOR, SImode, operands))) { return 309; } goto ret0; L3696: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, SImode)) { operands[1] = x4; goto L3697; } goto ret0; L3697: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (general_operand (x4, SImode)) { operands[2] = x4; goto L3698; } goto ret0; L3698: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L3699; goto ret0; L3699: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (XOR, SImode, operands))) { return 331; } goto ret0; L4696: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, SImode)) { operands[1] = x4; goto L4697; } goto ret0; L4697: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonmemory_operand (x4, QImode)) { operands[2] = x4; goto L4698; } goto ret0; L4698: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L4699; goto ret0; L4699: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (ASHIFT, SImode, operands))) { return 408; } goto ret0; L4958: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, SImode)) { operands[1] = x4; goto L4959; } goto ret0; L4959: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_CODE (x4) == CONST_INT) goto L14280; L5019: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x4, QImode)) { operands[2] = x4; goto L5020; } goto ret0; L14280: ATTRIBUTE_UNUSED_LABEL if (const_int_operand (x4, SImode)) { operands[2] = x4; goto L4960; } L14281: ATTRIBUTE_UNUSED_LABEL if (const1_operand (x4, QImode)) { operands[2] = x4; goto L4990; } goto L5019; L4960: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L4961; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14281; L4961: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && (TARGET_USE_CLTD || optimize_size) && INTVAL (operands[2]) == 31 && ix86_binary_operator_ok (ASHIFTRT, SImode, operands))) { return 426; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14281; L4990: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L4991; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L5019; L4991: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (ASHIFTRT, SImode, operands) && (TARGET_SHIFT1 || optimize_size))) { return 428; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L5019; L5020: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5021; goto ret0; L5021: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (ASHIFTRT, SImode, operands))) { return 430; } goto ret0; L5356: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, SImode)) { operands[1] = x4; goto L5357; } goto ret0; L5357: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonmemory_operand (x4, QImode)) { operands[2] = x4; goto L5358; } goto ret0; L5358: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5359; goto ret0; L5359: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (LSHIFTRT, HImode, operands))) { return 454; } goto ret0; L5608: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, SImode)) { operands[1] = x4; goto L5609; } goto ret0; L5609: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (const1_operand (x4, QImode)) { operands[2] = x4; goto L5610; } L5639: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x4, QImode)) { operands[2] = x4; goto L5640; } goto ret0; L5610: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5611; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L5639; L5611: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (ROTATE, SImode, operands) && (TARGET_SHIFT1 || optimize_size))) { return 472; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L5639; L5640: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5641; goto ret0; L5641: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (ROTATE, SImode, operands))) { return 474; } goto ret0; L5784: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, SImode)) { operands[1] = x4; goto L5785; } goto ret0; L5785: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (const1_operand (x4, QImode)) { operands[2] = x4; goto L5786; } L5815: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x4, QImode)) { operands[2] = x4; goto L5816; } goto ret0; L5786: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5787; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L5815; L5787: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (ROTATERT, SImode, operands) && (TARGET_SHIFT1 || optimize_size))) { return 484; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L5815; L5816: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5817; goto ret0; L5817: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (ROTATERT, SImode, operands))) { return 486; } goto ret0; L2188: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode) goto L14283; goto ret0; L14283: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x3)) { case ZERO_EXTEND: goto L2315; case SIGN_EXTEND: goto L2351; case SUBREG: case REG: case MEM: goto L14282; default: goto ret0; } L14282: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x3, DImode)) { operands[1] = x3; goto L2189; } goto ret0; L2315: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, SImode)) { operands[1] = x4; goto L2316; } goto ret0; L2316: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == DImode && GET_CODE (x3) == ZERO_EXTEND) goto L2317; goto ret0; L2317: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, SImode)) { operands[2] = x4; goto L2318; } goto ret0; L2318: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L2319; goto ret0; L2319: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 255; } goto ret0; L2351: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, SImode)) { operands[1] = x4; goto L2352; } goto ret0; L2352: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == DImode && GET_CODE (x3) == SIGN_EXTEND) goto L2353; goto ret0; L2353: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, SImode)) { operands[2] = x4; goto L2354; } goto ret0; L2354: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L2355; goto ret0; L2355: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 257; } goto ret0; L2189: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x86_64_general_operand (x3, DImode)) { operands[2] = x3; goto L2190; } goto ret0; L2190: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L2191; goto ret0; L2191: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 247; } goto ret0; L2563: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[2] = x3; goto L2564; } goto ret0; L2564: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, DImode)) { operands[3] = x3; goto L2565; } goto ret0; L2565: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L2566; goto ret0; L2566: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[1] = x2; goto L2567; } goto ret0; L2567: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == MOD) goto L2568; goto ret0; L2568: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[2])) goto L2569; goto ret0; L2569: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[3]) && (TARGET_64BIT && !optimize_size && !TARGET_USE_CLTD) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 266; } L2593: ATTRIBUTE_UNUSED_LABEL if (rtx_equal_p (x3, operands[3]) && (TARGET_64BIT && (optimize_size || TARGET_USE_CLTD)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 267; } goto ret0; L2741: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[1] = x3; goto L2742; } goto ret0; L2742: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, DImode)) { operands[2] = x3; goto L2743; } goto ret0; L2743: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L2744; goto ret0; L2744: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[3] = x2; goto L2745; } goto ret0; L2745: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == UMOD) goto L2746; goto ret0; L2746: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L2747; goto ret0; L2747: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (TARGET_64BIT) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 273; } goto ret0; L3328: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode && GET_CODE (x3) == ZERO_EXTEND) goto L3329; goto ret0; L3329: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, SImode)) { operands[1] = x4; goto L3330; } goto ret0; L3330: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x86_64_zext_immediate_operand (x3, DImode)) { operands[2] = x3; goto L3331; } goto ret0; L3331: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L3332; goto ret0; L3332: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT)) { return 310; } goto ret0; L3711: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode && GET_CODE (x3) == ZERO_EXTEND) goto L3712; goto ret0; L3712: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, SImode)) { operands[1] = x4; goto L3713; } goto ret0; L3713: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x86_64_zext_immediate_operand (x3, DImode)) { operands[2] = x3; goto L3714; } goto ret0; L3714: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L3715; goto ret0; L3715: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (XOR, SImode, operands))) { return 332; } goto ret0; L4110: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode) goto L14285; goto ret0; L14285: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x3)) { case NEG: goto L4111; case ZERO_EXTEND: goto L5326; case SUBREG: case REG: goto L14286; default: goto ret0; } L14286: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, DImode)) { operands[1] = x3; goto L5298; } goto ret0; L4111: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == DImode && GET_CODE (x4) == ASHIFT) goto L4112; goto ret0; L4112: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (register_operand (x5, DImode)) { operands[1] = x5; goto L4113; } goto ret0; L4113: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 1); if (x5 == const_int_rtx[MAX_SAVED_CONST_INT + (32)]) goto L4114; goto ret0; L4114: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (32)]) goto L4115; goto ret0; L4115: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L4116; goto ret0; L4116: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_unary_operator_ok (NEG, SImode, operands))) { return 355; } goto ret0; L5326: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, SImode)) { operands[1] = x4; goto L5327; } goto ret0; L5327: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const1_operand (x3, QImode)) { operands[2] = x3; goto L5328; } goto ret0; L5328: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5329; goto ret0; L5329: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && ix86_binary_operator_ok (LSHIFTRT, HImode, operands) && (TARGET_SHIFT1 || optimize_size))) { return 452; } goto ret0; L5298: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L5299; } goto ret0; L5299: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5300; goto ret0; L5300: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT)) { return 450; } goto ret0; L4642: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[1] = x3; goto L4643; } goto ret0; L4643: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L4644; } goto ret0; L4644: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L4645; goto ret0; L4645: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT)) { return 405; } goto ret0; L4904: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[1] = x3; goto L4905; } goto ret0; L4905: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L4906; } goto ret0; L4906: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L4907; goto ret0; L4907: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT)) { return 423; } goto ret0; L6459: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, DImode)) { operands[1] = x3; goto L6460; } goto ret0; L6460: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L6461; goto ret0; L6461: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT)) { return 541; } goto ret0; L6487: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (63)]) goto L6488; goto ret0; L6488: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == DImode && GET_CODE (x3) == CLZ) goto L6489; goto ret0; L6489: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, DImode)) { operands[1] = x4; goto L6490; } goto ret0; L6490: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L6491; goto ret0; L6491: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT)) { return 543; } goto ret0; L6648: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode && GET_CODE (x3) == UNSPEC && XVECLEN (x3, 0) == 1 && XINT (x3, 1) == 15) goto L6649; goto ret0; L6649: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L6650; goto ret0; L6650: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, DImode)) { operands[1] = x3; goto L6651; } goto ret0; L6651: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L6652; goto ret0; L6652: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT)) { return 554; } goto ret0; L7722: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (ix86_carry_flag_operator (x3, VOIDmode)) { operands[1] = x3; goto L7723; } goto ret0; L7723: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (-1)]) goto L7724; goto ret0; L7724: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L7725; goto ret0; L7725: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L7726; goto ret0; L7726: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT)) { return 648; } goto ret0; L580: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L581; goto ret0; L581: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[1])) goto L582; goto ret0; L582: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (rtx_equal_p (x2, operands[0]) && (TARGET_64BIT)) { return 87; } goto ret0; ret0: return -1; } static int recog_26 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); if (register_operand (x2, SFmode)) { operands[0] = x2; goto L602; } L14097: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, SFmode)) { operands[0] = x2; goto L863; } L14105: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x2, SFmode)) { operands[0] = x2; goto L4204; } L14106: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, SFmode)) { operands[0] = x2; goto L4233; } goto ret0; L602: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SFmode) goto L14291; x2 = XEXP (x1, 0); goto L14097; L14291: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case UNSPEC: goto L14295; case IF_THEN_ELSE: goto L7824; case SUBREG: case REG: goto L14290; default: x2 = XEXP (x1, 0); goto L14097; } L14290: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, SFmode)) { operands[1] = x2; goto L603; } x2 = XEXP (x1, 0); goto L14097; L14295: ATTRIBUTE_UNUSED_LABEL switch (XVECLEN (x2, 0)) { case 1: goto L14298; case 2: goto L14300; default: break; } x2 = XEXP (x1, 0); goto L14097; L14298: ATTRIBUTE_UNUSED_LABEL switch (XINT (x2, 1)) { case 80L: goto L6968; case 82L: goto L7006; default: break; } x2 = XEXP (x1, 0); goto L14097; L6968: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (register_operand (x3, SFmode)) { operands[2] = x3; goto L6969; } x2 = XEXP (x1, 0); goto L14097; L6969: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L6970; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14097; L6970: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SFmode)) { operands[1] = x2; goto L6971; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14097; L6971: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SFmode && GET_CODE (x2) == UNSPEC && XVECLEN (x2, 0) == 1 && XINT (x2, 1) == 81) goto L6972; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14097; L6972: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (rtx_equal_p (x3, operands[2]) && (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_unsafe_math_optimizations)) { return 602; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14097; L7006: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (register_operand (x3, SFmode)) { operands[2] = x3; goto L7007; } x2 = XEXP (x1, 0); goto L14097; L7007: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L7008; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14097; L7008: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SFmode)) { operands[1] = x2; goto L7009; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14097; L7009: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SFmode && GET_CODE (x2) == UNSPEC && XVECLEN (x2, 0) == 1 && XINT (x2, 1) == 83) goto L7010; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14097; L7010: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (rtx_equal_p (x3, operands[2]) && (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_unsafe_math_optimizations)) { return 606; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14097; L14300: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 1) == 65) goto L7039; x2 = XEXP (x1, 0); goto L14097; L7039: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (register_operand (x3, SFmode)) { operands[2] = x3; goto L7040; } x2 = XEXP (x1, 0); goto L14097; L7040: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 1); if (register_operand (x3, SFmode)) { operands[1] = x3; goto L7041; } x2 = XEXP (x1, 0); goto L14097; L7041: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L7042; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14097; L7042: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SFmode)) { operands[3] = x2; goto L7043; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14097; L7043: ATTRIBUTE_UNUSED_LABEL if ((! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_unsafe_math_optimizations)) { return 609; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14097; L7824: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); switch (GET_CODE (x3)) { case LT: goto L7825; case GT: goto L7917; default: break; } x2 = XEXP (x1, 0); goto L14097; L7825: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == SFmode) goto L14301; x2 = XEXP (x1, 0); goto L14097; L14301: ATTRIBUTE_UNUSED_LABEL if (register_operand (x4, SFmode)) { operands[1] = x4; goto L7826; } L14302: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x4, SFmode)) { operands[1] = x4; goto L7845; } x2 = XEXP (x1, 0); goto L14097; L7826: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonimmediate_operand (x4, SFmode)) { operands[2] = x4; goto L7827; } x4 = XEXP (x3, 0); goto L14302; L7827: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[1])) goto L7828; x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14302; L7828: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (rtx_equal_p (x3, operands[2])) goto L7829; x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14302; L7829: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L7830; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14302; L7830: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_SSE && TARGET_IEEE_FP)) { return 658; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14302; L7845: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonimmediate_operand (x4, SFmode)) { operands[2] = x4; goto L7846; } x2 = XEXP (x1, 0); goto L14097; L7846: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[1])) goto L7847; x2 = XEXP (x1, 0); goto L14097; L7847: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (rtx_equal_p (x3, operands[2])) goto L7848; x2 = XEXP (x1, 0); goto L14097; L7848: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L7849; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14097; L7849: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_SSE && !TARGET_IEEE_FP && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 659; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14097; L7917: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == SFmode) goto L14303; x2 = XEXP (x1, 0); goto L14097; L14303: ATTRIBUTE_UNUSED_LABEL if (register_operand (x4, SFmode)) { operands[1] = x4; goto L7918; } L14304: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x4, SFmode)) { operands[1] = x4; goto L7937; } x2 = XEXP (x1, 0); goto L14097; L7918: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonimmediate_operand (x4, SFmode)) { operands[2] = x4; goto L7919; } x4 = XEXP (x3, 0); goto L14304; L7919: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[1])) goto L7920; x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14304; L7920: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (rtx_equal_p (x3, operands[2])) goto L7921; x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14304; L7921: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L7922; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14304; L7922: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_SSE && TARGET_IEEE_FP)) { return 664; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14304; L7937: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonimmediate_operand (x4, SFmode)) { operands[2] = x4; goto L7938; } x2 = XEXP (x1, 0); goto L14097; L7938: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[1])) goto L7939; x2 = XEXP (x1, 0); goto L14097; L7939: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (rtx_equal_p (x3, operands[2])) goto L7940; x2 = XEXP (x1, 0); goto L14097; L7940: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L7941; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14097; L7941: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_SSE && !TARGET_IEEE_FP && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 665; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14097; L603: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L604; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14097; L604: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[1])) goto L605; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14097; L605: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (rtx_equal_p (x2, operands[0]) && (reload_completed || !TARGET_SSE)) { return 92; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14097; L863: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SFmode) goto L14305; x2 = XEXP (x1, 0); goto L14105; L14305: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case FLOAT_TRUNCATE: goto L864; case NEG: goto L4226; default: break; } x2 = XEXP (x1, 0); goto L14105; L864: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); switch (GET_MODE (x3)) { case DFmode: goto L14307; case XFmode: goto L14309; default: break; } x2 = XEXP (x1, 0); goto L14105; L14307: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, DFmode)) { operands[1] = x3; goto L865; } L14308: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x3, DFmode)) { operands[1] = x3; goto L873; } x2 = XEXP (x1, 0); goto L14105; L865: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L866; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14308; L866: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, SFmode)) { operands[2] = x2; goto L867; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14308; L867: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && !TARGET_SSE2)) { return 133; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14308; L873: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L874; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14105; L874: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, SFmode)) { operands[2] = x2; goto L875; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14105; L875: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && TARGET_SSE2 && !TARGET_SSE_PARTIAL_REGS_FOR_CVTSD2SS)) { return 134; } L883: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && TARGET_SSE2 && TARGET_SSE_PARTIAL_REGS_FOR_CVTSD2SS)) { return 135; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14105; L14309: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, XFmode)) { operands[1] = x3; goto L919; } x2 = XEXP (x1, 0); goto L14105; L919: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L920; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14105; L920: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, SFmode)) { operands[2] = x2; goto L921; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14105; L921: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387)) { return 142; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14105; L4226: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SFmode)) { operands[1] = x3; goto L4227; } x2 = XEXP (x1, 0); goto L14105; L4227: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L4228; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14105; L4228: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, V4SFmode)) { operands[2] = x2; goto L4229; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14105; L4229: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE && (reload_in_progress || reload_completed || (register_operand (operands[0], VOIDmode) && register_operand (operands[1], VOIDmode)))) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 363; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14105; L4204: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SFmode) goto L14310; x2 = XEXP (x1, 0); goto L14106; L14310: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case NEG: goto L4205; case ABS: goto L4361; default: break; } x2 = XEXP (x1, 0); goto L14106; L4205: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (memory_operand (x3, SFmode)) { operands[1] = x3; goto L4206; } x2 = XEXP (x1, 0); goto L14106; L4206: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L4207; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14106; L4207: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_unary_operator_ok (NEG, SFmode, operands))) { return 362; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14106; L4361: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (memory_operand (x3, SFmode)) { operands[1] = x3; goto L4362; } x2 = XEXP (x1, 0); goto L14106; L4362: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L4363; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14106; L4363: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_unary_operator_ok (ABS, SFmode, operands))) { return 377; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14106; L4233: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SFmode) goto L14312; goto ret0; L14312: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case NEG: goto L4234; case ABS: goto L4382; default: break; } goto ret0; L4234: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SFmode)) { operands[1] = x3; goto L4235; } goto ret0; L4235: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L4236; goto ret0; L4236: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_80387 && !TARGET_SSE && ix86_unary_operator_ok (NEG, SFmode, operands))) { return 364; } goto ret0; L4382: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SFmode)) { operands[1] = x3; goto L4383; } goto ret0; L4383: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); switch (GET_CODE (x1)) { case USE: goto L4384; case CLOBBER: goto L4392; default: break; } goto ret0; L4384: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, V4SFmode)) { operands[2] = x2; goto L4385; } goto ret0; L4385: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE && (reload_in_progress || reload_completed || (register_operand (operands[0], VOIDmode) && register_operand (operands[1], VOIDmode)))) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 378; } goto ret0; L4392: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_80387 && ix86_unary_operator_ok (ABS, SFmode, operands) && !TARGET_SSE)) { return 379; } goto ret0; ret0: return -1; } static int recog_27 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); if (register_operand (x2, DFmode)) { operands[0] = x2; goto L625; } L14098: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, DFmode)) { operands[0] = x2; goto L935; } L14107: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x2, DFmode)) { operands[0] = x2; goto L4245; } L14108: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, DFmode)) { operands[0] = x2; goto L4291; } goto ret0; L625: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DFmode) goto L14315; x2 = XEXP (x1, 0); goto L14098; L14315: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case UNSPEC: goto L14319; case IF_THEN_ELSE: goto L7870; case SUBREG: case REG: goto L14314; default: x2 = XEXP (x1, 0); goto L14098; } L14314: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, DFmode)) { operands[1] = x2; goto L626; } x2 = XEXP (x1, 0); goto L14098; L14319: ATTRIBUTE_UNUSED_LABEL switch (XVECLEN (x2, 0)) { case 1: goto L14322; case 2: goto L14324; default: break; } x2 = XEXP (x1, 0); goto L14098; L14322: ATTRIBUTE_UNUSED_LABEL switch (XINT (x2, 1)) { case 80L: goto L6959; case 82L: goto L6997; default: break; } x2 = XEXP (x1, 0); goto L14098; L6959: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (GET_MODE (x3) == DFmode) goto L14326; x2 = XEXP (x1, 0); goto L14098; L14326: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x3) == FLOAT_EXTEND) goto L6978; if (register_operand (x3, DFmode)) { operands[2] = x3; goto L6960; } x2 = XEXP (x1, 0); goto L14098; L6978: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, SFmode)) { operands[2] = x4; goto L6979; } x2 = XEXP (x1, 0); goto L14098; L6979: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L6980; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14098; L6980: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DFmode)) { operands[1] = x2; goto L6981; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14098; L6981: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DFmode && GET_CODE (x2) == UNSPEC && XVECLEN (x2, 0) == 1 && XINT (x2, 1) == 81) goto L6982; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14098; L6982: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (GET_MODE (x3) == DFmode && GET_CODE (x3) == FLOAT_EXTEND) goto L6983; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14098; L6983: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (rtx_equal_p (x4, operands[2]) && (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_unsafe_math_optimizations)) { return 603; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14098; L6960: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L6961; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14098; L6961: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DFmode)) { operands[1] = x2; goto L6962; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14098; L6962: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DFmode && GET_CODE (x2) == UNSPEC && XVECLEN (x2, 0) == 1 && XINT (x2, 1) == 81) goto L6963; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14098; L6963: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (rtx_equal_p (x3, operands[2]) && (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_unsafe_math_optimizations)) { return 601; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14098; L6997: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (register_operand (x3, DFmode)) { operands[2] = x3; goto L6998; } x2 = XEXP (x1, 0); goto L14098; L6998: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L6999; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14098; L6999: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DFmode)) { operands[1] = x2; goto L7000; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14098; L7000: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DFmode && GET_CODE (x2) == UNSPEC && XVECLEN (x2, 0) == 1 && XINT (x2, 1) == 83) goto L7001; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14098; L7001: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (rtx_equal_p (x3, operands[2]) && (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_unsafe_math_optimizations)) { return 605; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14098; L14324: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 1) == 65) goto L7024; x2 = XEXP (x1, 0); goto L14098; L7024: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (register_operand (x3, DFmode)) { operands[2] = x3; goto L7025; } x2 = XEXP (x1, 0); goto L14098; L7025: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 1); if (register_operand (x3, DFmode)) { operands[1] = x3; goto L7026; } x2 = XEXP (x1, 0); goto L14098; L7026: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L7027; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14098; L7027: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, DFmode)) { operands[3] = x2; goto L7028; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14098; L7028: ATTRIBUTE_UNUSED_LABEL if ((! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_unsafe_math_optimizations)) { return 608; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14098; L7870: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); switch (GET_CODE (x3)) { case LT: goto L7871; case GT: goto L7963; default: break; } x2 = XEXP (x1, 0); goto L14098; L7871: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == DFmode) goto L14327; x2 = XEXP (x1, 0); goto L14098; L14327: ATTRIBUTE_UNUSED_LABEL if (register_operand (x4, DFmode)) { operands[1] = x4; goto L7872; } L14328: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x4, DFmode)) { operands[1] = x4; goto L7891; } x2 = XEXP (x1, 0); goto L14098; L7872: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonimmediate_operand (x4, DFmode)) { operands[2] = x4; goto L7873; } x4 = XEXP (x3, 0); goto L14328; L7873: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[1])) goto L7874; x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14328; L7874: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (rtx_equal_p (x3, operands[2])) goto L7875; x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14328; L7875: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L7876; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14328; L7876: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_SSE2 && TARGET_IEEE_FP && TARGET_SSE_MATH)) { return 661; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14328; L7891: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonimmediate_operand (x4, DFmode)) { operands[2] = x4; goto L7892; } x2 = XEXP (x1, 0); goto L14098; L7892: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[1])) goto L7893; x2 = XEXP (x1, 0); goto L14098; L7893: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (rtx_equal_p (x3, operands[2])) goto L7894; x2 = XEXP (x1, 0); goto L14098; L7894: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L7895; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14098; L7895: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_SSE2 && TARGET_SSE_MATH && !TARGET_IEEE_FP && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 662; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14098; L7963: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == DFmode) goto L14329; x2 = XEXP (x1, 0); goto L14098; L14329: ATTRIBUTE_UNUSED_LABEL if (register_operand (x4, DFmode)) { operands[1] = x4; goto L7964; } L14330: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x4, DFmode)) { operands[1] = x4; goto L7983; } x2 = XEXP (x1, 0); goto L14098; L7964: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonimmediate_operand (x4, DFmode)) { operands[2] = x4; goto L7965; } x4 = XEXP (x3, 0); goto L14330; L7965: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[1])) goto L7966; x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14330; L7966: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (rtx_equal_p (x3, operands[2])) goto L7967; x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14330; L7967: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L7968; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14330; L7968: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_SSE2 && TARGET_SSE_MATH && TARGET_IEEE_FP)) { return 667; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14330; L7983: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonimmediate_operand (x4, DFmode)) { operands[2] = x4; goto L7984; } x2 = XEXP (x1, 0); goto L14098; L7984: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[1])) goto L7985; x2 = XEXP (x1, 0); goto L14098; L7985: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (rtx_equal_p (x3, operands[2])) goto L7986; x2 = XEXP (x1, 0); goto L14098; L7986: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L7987; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14098; L7987: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_SSE2 && TARGET_SSE_MATH && !TARGET_IEEE_FP && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 668; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14098; L626: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L627; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14098; L627: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[1])) goto L628; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14098; L628: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (rtx_equal_p (x2, operands[0]) && (reload_completed || !TARGET_SSE2)) { return 97; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14098; L935: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DFmode) goto L14331; x2 = XEXP (x1, 0); goto L14107; L14331: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case FLOAT_TRUNCATE: goto L936; case NEG: goto L4267; default: break; } x2 = XEXP (x1, 0); goto L14107; L936: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, XFmode)) { operands[1] = x3; goto L937; } x2 = XEXP (x1, 0); goto L14107; L937: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L938; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14107; L938: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, DFmode)) { operands[2] = x2; goto L939; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14107; L939: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387)) { return 145; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14107; L4267: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, DFmode)) { operands[1] = x3; goto L4268; } x2 = XEXP (x1, 0); goto L14107; L4268: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L4269; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14107; L4269: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, V2DFmode)) { operands[2] = x2; goto L4270; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14107; L4270: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && TARGET_SSE2 && (reload_in_progress || reload_completed || (register_operand (operands[0], VOIDmode) && register_operand (operands[1], VOIDmode)))) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 366; } L4287: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && TARGET_SSE2 && (reload_in_progress || reload_completed || (register_operand (operands[0], VOIDmode) && register_operand (operands[1], VOIDmode)))) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 367; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14107; L4245: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DFmode) goto L14333; x2 = XEXP (x1, 0); goto L14108; L14333: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case NEG: goto L4246; case ABS: goto L4402; default: break; } x2 = XEXP (x1, 0); goto L14108; L4246: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (memory_operand (x3, DFmode)) { operands[1] = x3; goto L4247; } x2 = XEXP (x1, 0); goto L14108; L4247: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L4248; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14108; L4248: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_unary_operator_ok (NEG, DFmode, operands))) { return 365; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14108; L4402: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (memory_operand (x3, DFmode)) { operands[1] = x3; goto L4403; } x2 = XEXP (x1, 0); goto L14108; L4403: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L4404; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14108; L4404: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_unary_operator_ok (ABS, DFmode, operands))) { return 380; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14108; L4291: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DFmode) goto L14335; goto ret0; L14335: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case NEG: goto L4292; case ABS: goto L4423; default: break; } goto ret0; L4292: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, DFmode)) { operands[1] = x3; goto L4293; } goto ret0; L4293: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L4294; goto ret0; L4294: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode) goto L14337; goto ret0; L14337: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG) goto L14339; goto ret0; L14339: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 0) == 17) goto L14341; goto ret0; L14341: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && TARGET_80387 && ix86_unary_operator_ok (NEG, DFmode, operands))) { return 368; } L14342: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && TARGET_80387 && ix86_unary_operator_ok (NEG, DFmode, operands))) { return 369; } goto ret0; L4423: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, DFmode)) { operands[1] = x3; goto L4424; } goto ret0; L4424: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); switch (GET_CODE (x1)) { case USE: goto L4425; case CLOBBER: goto L4450; default: break; } goto ret0; L4425: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, V2DFmode)) { operands[2] = x2; goto L4426; } goto ret0; L4426: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && TARGET_SSE2 && (reload_in_progress || reload_completed || (register_operand (operands[0], VOIDmode) && register_operand (operands[1], VOIDmode)))) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 381; } L4443: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && TARGET_SSE2 && (reload_in_progress || reload_completed || (register_operand (operands[0], VOIDmode) && register_operand (operands[1], VOIDmode)))) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 382; } goto ret0; L4450: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode) goto L14343; goto ret0; L14343: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG) goto L14345; goto ret0; L14345: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 0) == 17) goto L14347; goto ret0; L14347: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && TARGET_80387 && ix86_unary_operator_ok (ABS, DFmode, operands))) { return 383; } L14348: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && TARGET_80387 && ix86_unary_operator_ok (ABS, DFmode, operands))) { return 384; } goto ret0; ret0: return -1; } static int recog_28 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); switch (GET_CODE (x3)) { case PLUS: goto L1409; case NEG: goto L1422; case MINUS: goto L1919; case AND: goto L2965; case IOR: goto L3268; case XOR: goto L3651; case NOT: goto L4523; case ASHIFT: goto L4614; case ASHIFTRT: goto L4863; case LSHIFTRT: goto L5256; case SUBREG: case REG: case MEM: goto L14399; default: goto ret0; } L14399: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x3, DImode)) { operands[1] = x3; goto L1439; } goto ret0; L1409: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, DImode)) { operands[1] = x4; goto L1410; } goto ret0; L1410: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x86_64_general_operand (x4, DImode)) { operands[2] = x4; goto L1411; } goto ret0; L1411: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L1412; goto ret0; L1412: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); switch (GET_CODE (x1)) { case SET: goto L1413; case CLOBBER: goto L1458; default: break; } goto ret0; L1413: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DImode)) { operands[0] = x2; goto L1414; } goto ret0; L1414: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == PLUS) goto L1415; goto ret0; L1415: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L1416; goto ret0; L1416: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode) && ix86_binary_operator_ok (PLUS, DImode, operands) /* Current assemblers are broken and do not allow @GOTOFF in ought but a memory context. */ && ! pic_symbolic_operand (operands[2], VOIDmode))) { return 197; } goto ret0; L1458: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, DImode)) { operands[0] = x2; goto L1459; } goto ret0; L1459: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM) /* Current assemblers are broken and do not allow @GOTOFF in ought but a memory context. */ && ! pic_symbolic_operand (operands[2], VOIDmode))) { return 200; } goto ret0; L1422: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (x86_64_general_operand (x4, DImode)) { operands[2] = x4; goto L1423; } goto ret0; L1423: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x86_64_general_operand (x3, DImode)) { operands[1] = x3; goto L1424; } goto ret0; L1424: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L1425; goto ret0; L1425: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, DImode)) { operands[0] = x2; goto L1426; } goto ret0; L1426: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_match_ccmode (insn, CCZmode) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM) /* Current assemblers are broken and do not allow @GOTOFF in ought but a memory context. */ && ! pic_symbolic_operand (operands[2], VOIDmode))) { return 198; } goto ret0; L1919: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, DImode)) { operands[1] = x4; goto L1920; } goto ret0; L1920: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x86_64_general_operand (x4, DImode)) { operands[2] = x4; goto L1921; } goto ret0; L1921: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L1922; goto ret0; L1922: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L1923; goto ret0; L1923: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DImode)) { operands[0] = x2; goto L1924; } goto ret0; L1924: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == MINUS) goto L1925; goto ret0; L1925: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L1926; goto ret0; L1926: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode) && ix86_binary_operator_ok (MINUS, DImode, operands))) { return 228; } goto ret0; L2965: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, DImode)) { operands[1] = x4; goto L2966; } goto ret0; L2966: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x86_64_szext_general_operand (x4, DImode)) { operands[2] = x4; goto L2967; } goto ret0; L2967: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L2968; goto ret0; L2968: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L2969; goto ret0; L2969: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DImode)) { operands[0] = x2; goto L2970; } goto ret0; L2970: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == AND) goto L2971; goto ret0; L2971: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L2972; goto ret0; L2972: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode) && ix86_binary_operator_ok (AND, DImode, operands))) { return 289; } goto ret0; L3268: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, DImode)) { operands[1] = x4; goto L3269; } goto ret0; L3269: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x86_64_general_operand (x4, DImode)) { operands[2] = x4; goto L3270; } goto ret0; L3270: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L3271; goto ret0; L3271: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); switch (GET_CODE (x1)) { case SET: goto L3272; case CLOBBER: goto L3285; default: break; } goto ret0; L3272: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DImode)) { operands[0] = x2; goto L3273; } goto ret0; L3273: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == IOR) goto L3274; goto ret0; L3274: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L3275; goto ret0; L3275: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode) && ix86_binary_operator_ok (IOR, DImode, operands))) { return 306; } goto ret0; L3285: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, DImode)) { operands[0] = x2; goto L3286; } goto ret0; L3286: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode) && ix86_binary_operator_ok (IOR, DImode, operands))) { return 307; } goto ret0; L3651: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, DImode)) { operands[1] = x4; goto L3652; } goto ret0; L3652: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x86_64_general_operand (x4, DImode)) { operands[2] = x4; goto L3653; } goto ret0; L3653: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L3654; goto ret0; L3654: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); switch (GET_CODE (x1)) { case SET: goto L3655; case CLOBBER: goto L3668; default: break; } goto ret0; L3655: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DImode)) { operands[0] = x2; goto L3656; } goto ret0; L3656: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == XOR) goto L3657; goto ret0; L3657: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L3658; goto ret0; L3658: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode) && ix86_binary_operator_ok (XOR, DImode, operands))) { return 328; } goto ret0; L3668: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, DImode)) { operands[0] = x2; goto L3669; } goto ret0; L3669: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode) && ix86_binary_operator_ok (XOR, DImode, operands))) { return 329; } goto ret0; L4523: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, DImode)) { operands[1] = x4; goto L4524; } goto ret0; L4524: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L4525; goto ret0; L4525: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L4526; goto ret0; L4526: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DImode)) { operands[0] = x2; goto L4527; } goto ret0; L4527: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == NOT) goto L4528; goto ret0; L4528: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1]) && (TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode) && ix86_unary_operator_ok (NOT, DImode, operands))) { return 393; } goto ret0; L4614: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, DImode)) { operands[1] = x4; goto L4615; } goto ret0; L4615: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (immediate_operand (x4, QImode)) { operands[2] = x4; goto L4616; } goto ret0; L4616: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L4617; goto ret0; L4617: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L4618; goto ret0; L4618: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DImode)) { operands[0] = x2; goto L4619; } goto ret0; L4619: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == ASHIFT) goto L4620; goto ret0; L4620: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L4621; goto ret0; L4621: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode) && ix86_binary_operator_ok (ASHIFT, DImode, operands))) { return 403; } goto ret0; L4863: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, DImode)) { operands[1] = x4; goto L4864; } goto ret0; L4864: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_CODE (x4) == CONST_INT) goto L14442; goto ret0; L14442: ATTRIBUTE_UNUSED_LABEL if (const1_operand (x4, QImode)) { operands[2] = x4; goto L4865; } L14443: ATTRIBUTE_UNUSED_LABEL if (const_int_operand (x4, QImode)) { operands[2] = x4; goto L4878; } goto ret0; L4865: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L4866; x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14443; L4866: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L4867; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14443; L4867: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DImode)) { operands[0] = x2; goto L4868; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14443; L4868: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == ASHIFTRT) goto L4869; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14443; L4869: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L4870; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14443; L4870: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode) && (TARGET_SHIFT1 || optimize_size) && ix86_binary_operator_ok (ASHIFTRT, DImode, operands))) { return 420; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14443; L4878: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L4879; goto ret0; L4879: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L4880; goto ret0; L4880: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DImode)) { operands[0] = x2; goto L4881; } goto ret0; L4881: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == ASHIFTRT) goto L4882; goto ret0; L4882: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L4883; goto ret0; L4883: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode) && ix86_binary_operator_ok (ASHIFTRT, DImode, operands))) { return 421; } goto ret0; L5256: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, DImode)) { operands[1] = x4; goto L5257; } goto ret0; L5257: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_CODE (x4) == CONST_INT) goto L14444; goto ret0; L14444: ATTRIBUTE_UNUSED_LABEL if (const1_operand (x4, QImode)) { operands[2] = x4; goto L5258; } L14445: ATTRIBUTE_UNUSED_LABEL if (const_int_operand (x4, QImode)) { operands[2] = x4; goto L5271; } goto ret0; L5258: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L5259; x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14445; L5259: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L5260; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14445; L5260: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DImode)) { operands[0] = x2; goto L5261; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14445; L5261: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == LSHIFTRT) goto L5262; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14445; L5262: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L5263; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14445; L5263: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode) && (TARGET_SHIFT1 || optimize_size) && ix86_binary_operator_ok (LSHIFTRT, HImode, operands))) { return 447; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14445; L5271: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L5272; goto ret0; L5272: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L5273; goto ret0; L5273: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DImode)) { operands[0] = x2; goto L5274; } goto ret0; L5274: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == LSHIFTRT) goto L5275; goto ret0; L5275: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L5276; goto ret0; L5276: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode) && ix86_binary_operator_ok (LSHIFTRT, HImode, operands))) { return 448; } goto ret0; L1439: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x86_64_immediate_operand (x3, DImode)) { operands[2] = x3; goto L1440; } L1932: ATTRIBUTE_UNUSED_LABEL if (x86_64_general_operand (x3, DImode)) { operands[2] = x3; goto L1933; } goto ret0; L1440: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L1441; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L1932; L1441: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, DImode)) { operands[0] = x2; goto L1442; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L1932; L1442: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && ix86_match_ccmode (insn, CCGCmode))) { return 199; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L1932; L1933: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L1934; goto ret0; L1934: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DImode)) { operands[0] = x2; goto L1935; } goto ret0; L1935: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == MINUS) goto L1936; goto ret0; L1936: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L1937; goto ret0; L1937: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (TARGET_64BIT && ix86_match_ccmode (insn, CCmode) && ix86_binary_operator_ok (MINUS, SImode, operands))) { return 229; } goto ret0; ret0: return -1; } static int recog_29 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); switch (GET_CODE (x3)) { case PLUS: goto L1502; case NEG: goto L1529; case MINUS: goto L2047; case AND: goto L3008; case IOR: goto L3345; case XOR: goto L3728; case NOT: goto L4545; case ASHIFT: goto L4712; case ASHIFTRT: goto L5034; case LSHIFTRT: goto L5372; case SUBREG: case REG: case MEM: goto L14402; default: goto L14411; } L14402: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L1559; } L14411: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, SImode)) { operands[1] = x3; goto L2085; } goto ret0; L1502: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, SImode)) { operands[1] = x4; goto L1503; } goto ret0; L1503: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (general_operand (x4, SImode)) { operands[2] = x4; goto L1504; } goto ret0; L1504: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L1505; goto ret0; L1505: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); switch (GET_CODE (x1)) { case SET: goto L1506; case CLOBBER: goto L1578; default: break; } goto ret0; L1506: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case SImode: goto L14446; case DImode: goto L14447; default: break; } goto ret0; L14446: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, SImode)) { operands[0] = x2; goto L1507; } goto ret0; L1507: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS) goto L1508; goto ret0; L1508: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L1509; goto ret0; L1509: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCGOCmode) && ix86_binary_operator_ok (PLUS, SImode, operands) /* Current assemblers are broken and do not allow @GOTOFF in ought but a memory context. */ && ! pic_symbolic_operand (operands[2], VOIDmode))) { return 203; } goto ret0; L14447: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, DImode)) { operands[0] = x2; goto L1520; } goto ret0; L1520: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == ZERO_EXTEND) goto L1521; goto ret0; L1521: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == PLUS) goto L1522; goto ret0; L1522: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (rtx_equal_p (x4, operands[1])) goto L1523; goto ret0; L1523: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (rtx_equal_p (x4, operands[2]) && (TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode) && ix86_binary_operator_ok (PLUS, SImode, operands) /* Current assemblers are broken and do not allow @GOTOFF in ought but a memory context. */ && ! pic_symbolic_operand (operands[2], VOIDmode))) { return 204; } goto ret0; L1578: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[0] = x2; goto L1579; } goto ret0; L1579: ATTRIBUTE_UNUSED_LABEL if ((ix86_match_ccmode (insn, CCGOCmode) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM) /* Current assemblers are broken and do not allow @GOTOFF in ought but a memory context. */ && ! pic_symbolic_operand (operands[2], VOIDmode))) { return 208; } goto ret0; L1529: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (general_operand (x4, SImode)) { operands[2] = x4; goto L1530; } goto ret0; L1530: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L1531; } goto ret0; L1531: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); switch (GET_CODE (x1)) { case CLOBBER: goto L1532; case SET: goto L1549; default: break; } goto ret0; L1532: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[0] = x2; goto L1533; } goto ret0; L1533: ATTRIBUTE_UNUSED_LABEL if ((ix86_match_ccmode (insn, CCZmode) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM) /* Current assemblers are broken and do not allow @GOTOFF in ought but a memory context. */ && ! pic_symbolic_operand (operands[2], VOIDmode))) { return 205; } goto ret0; L1549: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[0] = x2; goto L1550; } goto ret0; L1550: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == ZERO_EXTEND) goto L1551; goto ret0; L1551: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == PLUS) goto L1552; goto ret0; L1552: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (rtx_equal_p (x4, operands[1])) goto L1553; goto ret0; L1553: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (rtx_equal_p (x4, operands[2]) && (TARGET_64BIT && ix86_match_ccmode (insn, CCZmode) && ix86_binary_operator_ok (PLUS, SImode, operands) /* Current assemblers are broken and do not allow @GOTOFF in ought but a memory context. */ && ! pic_symbolic_operand (operands[2], VOIDmode))) { return 206; } goto ret0; L2047: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == SImode) goto L14448; goto ret0; L14448: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x4, SImode)) { operands[1] = x4; goto L2048; } L14449: ATTRIBUTE_UNUSED_LABEL if (register_operand (x4, SImode)) { operands[1] = x4; goto L2061; } goto ret0; L2048: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (general_operand (x4, SImode)) { operands[2] = x4; goto L2049; } x4 = XEXP (x3, 0); goto L14449; L2049: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L2050; x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14449; L2050: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L2051; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14449; L2051: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SImode)) { operands[0] = x2; goto L2052; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14449; L2052: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == MINUS) goto L2053; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14449; L2053: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L2054; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14449; L2054: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCGOCmode) && ix86_binary_operator_ok (MINUS, SImode, operands))) { return 236; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14449; L2061: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (general_operand (x4, SImode)) { operands[2] = x4; goto L2062; } goto ret0; L2062: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L2063; goto ret0; L2063: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L2064; goto ret0; L2064: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[0] = x2; goto L2065; } goto ret0; L2065: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == ZERO_EXTEND) goto L2066; goto ret0; L2066: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == MINUS) goto L2067; goto ret0; L2067: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (rtx_equal_p (x4, operands[1])) goto L2068; goto ret0; L2068: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (rtx_equal_p (x4, operands[2]) && (TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode) && ix86_binary_operator_ok (MINUS, SImode, operands))) { return 237; } goto ret0; L3008: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == SImode) goto L14451; goto ret0; L14451: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x4) == ZERO_EXTRACT) goto L3146; if (nonimmediate_operand (x4, SImode)) { operands[1] = x4; goto L3009; } goto ret0; L3146: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (ext_register_operand (x5, VOIDmode)) { operands[1] = x5; goto L3147; } goto ret0; L3147: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 1); if (x5 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L3148; goto ret0; L3148: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 2); if (x5 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L3149; goto ret0; L3149: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (const_int_operand (x4, VOIDmode)) { operands[2] = x4; goto L3150; } goto ret0; L3150: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L3151; goto ret0; L3151: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L3152; goto ret0; L3152: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == ZERO_EXTRACT) goto L3153; goto ret0; L3153: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (ext_register_operand (x3, VOIDmode)) { operands[0] = x3; goto L3154; } goto ret0; L3154: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L3155; goto ret0; L3155: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L3156; goto ret0; L3156: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == AND) goto L3157; goto ret0; L3157: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == ZERO_EXTRACT) goto L3158; goto ret0; L3158: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (rtx_equal_p (x4, operands[1])) goto L3159; goto ret0; L3159: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L3160; goto ret0; L3160: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L3161; goto ret0; L3161: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCNOmode))) { return 301; } goto ret0; L3009: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (general_operand (x4, SImode)) { operands[2] = x4; goto L3010; } goto ret0; L3010: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L3011; goto ret0; L3011: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L3012; goto ret0; L3012: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case SImode: goto L14452; case DImode: goto L14453; default: break; } goto ret0; L14452: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, SImode)) { operands[0] = x2; goto L3013; } goto ret0; L3013: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == AND) goto L3014; goto ret0; L3014: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L3015; goto ret0; L3015: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCNOmode) && ix86_binary_operator_ok (AND, SImode, operands))) { return 292; } goto ret0; L14453: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, DImode)) { operands[0] = x2; goto L3026; } goto ret0; L3026: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == ZERO_EXTEND) goto L3027; goto ret0; L3027: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == AND) goto L3028; goto ret0; L3028: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (rtx_equal_p (x4, operands[1])) goto L3029; goto ret0; L3029: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (rtx_equal_p (x4, operands[2]) && (TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode) && ix86_binary_operator_ok (AND, SImode, operands))) { return 293; } goto ret0; L3345: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, SImode)) { operands[1] = x4; goto L3346; } goto ret0; L3346: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (general_operand (x4, SImode)) { operands[2] = x4; goto L3347; } L3373: ATTRIBUTE_UNUSED_LABEL if (x86_64_zext_immediate_operand (x4, VOIDmode)) { operands[2] = x4; goto L3374; } goto ret0; L3347: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L3348; x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L3373; L3348: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); switch (GET_CODE (x1)) { case SET: goto L3349; case CLOBBER: goto L3390; default: break; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L3373; L3349: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case SImode: goto L14454; case DImode: goto L14455; default: break; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L3373; L14454: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, SImode)) { operands[0] = x2; goto L3350; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L3373; L3350: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == IOR) goto L3351; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L3373; L3351: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L3352; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L3373; L3352: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCNOmode) && ix86_binary_operator_ok (IOR, SImode, operands))) { return 311; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L3373; L14455: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, DImode)) { operands[0] = x2; goto L3363; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L3373; L3363: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == ZERO_EXTEND) goto L3364; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L3373; L3364: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == IOR) goto L3365; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L3373; L3365: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (rtx_equal_p (x4, operands[1])) goto L3366; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L3373; L3366: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (rtx_equal_p (x4, operands[2]) && (TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode) && ix86_binary_operator_ok (IOR, SImode, operands))) { return 312; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L3373; L3390: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[0] = x2; goto L3391; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L3373; L3391: ATTRIBUTE_UNUSED_LABEL if ((ix86_match_ccmode (insn, CCNOmode) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 314; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L3373; L3374: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L3375; goto ret0; L3375: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L3376; goto ret0; L3376: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[0] = x2; goto L3377; } goto ret0; L3377: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == IOR) goto L3378; goto ret0; L3378: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode && GET_CODE (x3) == ZERO_EXTEND) goto L3379; goto ret0; L3379: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (rtx_equal_p (x4, operands[1])) goto L3380; goto ret0; L3380: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode) && ix86_binary_operator_ok (IOR, SImode, operands))) { return 313; } goto ret0; L3728: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == SImode) goto L14457; goto ret0; L14457: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x4) == ZERO_EXTRACT) goto L4021; if (nonimmediate_operand (x4, SImode)) { operands[1] = x4; goto L3729; } goto ret0; L4021: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (ext_register_operand (x5, VOIDmode)) { operands[1] = x5; goto L4022; } goto ret0; L4022: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 1); if (x5 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L4023; goto ret0; L4023: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 2); if (x5 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L4024; goto ret0; L4024: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (general_operand (x4, QImode)) { operands[2] = x4; goto L4025; } L4046: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x4, QImode)) { operands[2] = x4; goto L4047; } goto ret0; L4025: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L4026; x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L4046; L4026: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L4027; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L4046; L4027: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == ZERO_EXTRACT) goto L4028; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L4046; L4028: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (ext_register_operand (x3, VOIDmode)) { operands[0] = x3; goto L4029; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L4046; L4029: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L4030; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L4046; L4030: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L4031; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L4046; L4031: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == XOR) goto L4032; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L4046; L4032: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == ZERO_EXTRACT) goto L4033; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L4046; L4033: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (rtx_equal_p (x4, operands[1])) goto L4034; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L4046; L4034: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L4035; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L4046; L4035: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L4036; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L4046; L4036: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (!TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode))) { return 349; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L4046; L4047: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L4048; goto ret0; L4048: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L4049; goto ret0; L4049: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == ZERO_EXTRACT) goto L4050; goto ret0; L4050: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (ext_register_operand (x3, VOIDmode)) { operands[0] = x3; goto L4051; } goto ret0; L4051: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L4052; goto ret0; L4052: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L4053; goto ret0; L4053: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == XOR) goto L4054; goto ret0; L4054: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == ZERO_EXTRACT) goto L4055; goto ret0; L4055: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (rtx_equal_p (x4, operands[1])) goto L4056; goto ret0; L4056: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L4057; goto ret0; L4057: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L4058; goto ret0; L4058: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode))) { return 350; } goto ret0; L3729: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (general_operand (x4, SImode)) { operands[2] = x4; goto L3730; } L3756: ATTRIBUTE_UNUSED_LABEL if (x86_64_zext_immediate_operand (x4, VOIDmode)) { operands[2] = x4; goto L3757; } goto ret0; L3730: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L3731; x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L3756; L3731: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); switch (GET_CODE (x1)) { case SET: goto L3732; case CLOBBER: goto L3773; default: break; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L3756; L3732: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case SImode: goto L14458; case DImode: goto L14459; default: break; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L3756; L14458: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, SImode)) { operands[0] = x2; goto L3733; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L3756; L3733: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == XOR) goto L3734; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L3756; L3734: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L3735; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L3756; L3735: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCNOmode) && ix86_binary_operator_ok (XOR, SImode, operands))) { return 333; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L3756; L14459: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, DImode)) { operands[0] = x2; goto L3746; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L3756; L3746: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == ZERO_EXTEND) goto L3747; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L3756; L3747: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == XOR) goto L3748; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L3756; L3748: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (rtx_equal_p (x4, operands[1])) goto L3749; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L3756; L3749: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (rtx_equal_p (x4, operands[2]) && (TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode) && ix86_binary_operator_ok (XOR, SImode, operands))) { return 334; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L3756; L3773: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[0] = x2; goto L3774; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L3756; L3774: ATTRIBUTE_UNUSED_LABEL if ((ix86_match_ccmode (insn, CCNOmode) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 336; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L3756; L3757: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L3758; goto ret0; L3758: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L3759; goto ret0; L3759: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[0] = x2; goto L3760; } goto ret0; L3760: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == XOR) goto L3761; goto ret0; L3761: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode && GET_CODE (x3) == ZERO_EXTEND) goto L3762; goto ret0; L3762: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (rtx_equal_p (x4, operands[1])) goto L3763; goto ret0; L3763: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode) && ix86_binary_operator_ok (XOR, SImode, operands))) { return 335; } goto ret0; L4545: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == SImode) goto L14460; goto ret0; L14460: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x4, SImode)) { operands[1] = x4; goto L4546; } L14461: ATTRIBUTE_UNUSED_LABEL if (register_operand (x4, SImode)) { operands[1] = x4; goto L4557; } goto ret0; L4546: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L4547; x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14461; L4547: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L4548; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14461; L4548: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SImode)) { operands[0] = x2; goto L4549; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14461; L4549: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == NOT) goto L4550; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14461; L4550: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1]) && (ix86_match_ccmode (insn, CCNOmode) && ix86_unary_operator_ok (NOT, SImode, operands))) { return 396; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14461; L4557: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L4558; goto ret0; L4558: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L4559; goto ret0; L4559: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[0] = x2; goto L4560; } goto ret0; L4560: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == ZERO_EXTEND) goto L4561; goto ret0; L4561: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == NOT) goto L4562; goto ret0; L4562: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (rtx_equal_p (x4, operands[1]) && (TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode) && ix86_unary_operator_ok (NOT, SImode, operands))) { return 397; } goto ret0; L4712: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == SImode) goto L14462; goto ret0; L14462: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x4, SImode)) { operands[1] = x4; goto L4713; } L14463: ATTRIBUTE_UNUSED_LABEL if (register_operand (x4, SImode)) { operands[1] = x4; goto L4726; } goto ret0; L4713: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (const_int_1_31_operand (x4, QImode)) { operands[2] = x4; goto L4714; } x4 = XEXP (x3, 0); goto L14463; L4714: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L4715; x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14463; L4715: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L4716; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14463; L4716: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SImode)) { operands[0] = x2; goto L4717; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14463; L4717: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == ASHIFT) goto L4718; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14463; L4718: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L4719; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14463; L4719: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCGOCmode) && ix86_binary_operator_ok (ASHIFT, SImode, operands))) { return 409; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14463; L4726: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (const_int_1_31_operand (x4, QImode)) { operands[2] = x4; goto L4727; } goto ret0; L4727: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L4728; goto ret0; L4728: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L4729; goto ret0; L4729: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[0] = x2; goto L4730; } goto ret0; L4730: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == ZERO_EXTEND) goto L4731; goto ret0; L4731: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == ASHIFT) goto L4732; goto ret0; L4732: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (rtx_equal_p (x4, operands[1])) goto L4733; goto ret0; L4733: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (rtx_equal_p (x4, operands[2]) && (TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode) && ix86_binary_operator_ok (ASHIFT, SImode, operands))) { return 410; } goto ret0; L5034: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == SImode) goto L14464; goto ret0; L14464: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x4, SImode)) { operands[1] = x4; goto L5035; } L14465: ATTRIBUTE_UNUSED_LABEL if (register_operand (x4, SImode)) { operands[1] = x4; goto L5048; } goto ret0; L5035: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_CODE (x4) == CONST_INT) goto L14466; x4 = XEXP (x3, 0); goto L14465; L14466: ATTRIBUTE_UNUSED_LABEL if (const1_operand (x4, QImode)) { operands[2] = x4; goto L5036; } L14467: ATTRIBUTE_UNUSED_LABEL if (const_int_1_31_operand (x4, QImode)) { operands[2] = x4; goto L5063; } x4 = XEXP (x3, 0); goto L14465; L5036: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L5037; x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14467; L5037: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L5038; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14467; L5038: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SImode)) { operands[0] = x2; goto L5039; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14467; L5039: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == ASHIFTRT) goto L5040; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14467; L5040: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L5041; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14467; L5041: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCGOCmode) && (TARGET_SHIFT1 || optimize_size) && ix86_binary_operator_ok (ASHIFTRT, SImode, operands))) { return 431; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14467; L5063: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L5064; x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14465; L5064: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L5065; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14465; L5065: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SImode)) { operands[0] = x2; goto L5066; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14465; L5066: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == ASHIFTRT) goto L5067; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14465; L5067: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L5068; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14465; L5068: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCGOCmode) && ix86_binary_operator_ok (ASHIFTRT, SImode, operands))) { return 433; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14465; L5048: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_CODE (x4) == CONST_INT) goto L14468; goto ret0; L14468: ATTRIBUTE_UNUSED_LABEL if (const1_operand (x4, QImode)) { operands[2] = x4; goto L5049; } L14469: ATTRIBUTE_UNUSED_LABEL if (const_int_1_31_operand (x4, QImode)) { operands[2] = x4; goto L5076; } goto ret0; L5049: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L5050; x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14469; L5050: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L5051; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14469; L5051: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[0] = x2; goto L5052; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14469; L5052: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == ZERO_EXTEND) goto L5053; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14469; L5053: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == ASHIFTRT) goto L5054; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14469; L5054: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (rtx_equal_p (x4, operands[1])) goto L5055; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14469; L5055: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (rtx_equal_p (x4, operands[2]) && (TARGET_64BIT && ix86_match_ccmode (insn, CCmode) && (TARGET_SHIFT1 || optimize_size) && ix86_binary_operator_ok (ASHIFTRT, SImode, operands))) { return 432; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14469; L5076: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L5077; goto ret0; L5077: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L5078; goto ret0; L5078: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[0] = x2; goto L5079; } goto ret0; L5079: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == ZERO_EXTEND) goto L5080; goto ret0; L5080: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == ASHIFTRT) goto L5081; goto ret0; L5081: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (rtx_equal_p (x4, operands[1])) goto L5082; goto ret0; L5082: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (rtx_equal_p (x4, operands[2]) && (TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode) && ix86_binary_operator_ok (ASHIFTRT, SImode, operands))) { return 434; } goto ret0; L5372: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == SImode) goto L14470; goto ret0; L14470: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x4, SImode)) { operands[1] = x4; goto L5373; } L14471: ATTRIBUTE_UNUSED_LABEL if (register_operand (x4, SImode)) { operands[1] = x4; goto L5386; } goto ret0; L5373: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_CODE (x4) == CONST_INT) goto L14472; x4 = XEXP (x3, 0); goto L14471; L14472: ATTRIBUTE_UNUSED_LABEL if (const1_operand (x4, QImode)) { operands[2] = x4; goto L5374; } L14473: ATTRIBUTE_UNUSED_LABEL if (const_int_1_31_operand (x4, QImode)) { operands[2] = x4; goto L5401; } x4 = XEXP (x3, 0); goto L14471; L5374: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L5375; x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14473; L5375: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L5376; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14473; L5376: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SImode)) { operands[0] = x2; goto L5377; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14473; L5377: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == LSHIFTRT) goto L5378; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14473; L5378: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L5379; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14473; L5379: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCGOCmode) && (TARGET_SHIFT1 || optimize_size) && ix86_binary_operator_ok (LSHIFTRT, HImode, operands))) { return 455; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14473; L5401: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L5402; x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14471; L5402: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L5403; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14471; L5403: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SImode)) { operands[0] = x2; goto L5404; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14471; L5404: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == LSHIFTRT) goto L5405; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14471; L5405: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L5406; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14471; L5406: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCGOCmode) && ix86_binary_operator_ok (LSHIFTRT, HImode, operands))) { return 457; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14471; L5386: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_CODE (x4) == CONST_INT) goto L14474; goto ret0; L14474: ATTRIBUTE_UNUSED_LABEL if (const1_operand (x4, QImode)) { operands[2] = x4; goto L5387; } L14475: ATTRIBUTE_UNUSED_LABEL if (const_int_1_31_operand (x4, QImode)) { operands[2] = x4; goto L5414; } goto ret0; L5387: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L5388; x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14475; L5388: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L5389; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14475; L5389: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[0] = x2; goto L5390; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14475; L5390: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == LSHIFTRT) goto L5391; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14475; L5391: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode && GET_CODE (x3) == ZERO_EXTEND) goto L5392; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14475; L5392: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (rtx_equal_p (x4, operands[1])) goto L5393; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14475; L5393: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode) && (TARGET_SHIFT1 || optimize_size) && ix86_binary_operator_ok (LSHIFTRT, HImode, operands))) { return 456; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14475; L5414: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L5415; goto ret0; L5415: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L5416; goto ret0; L5416: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[0] = x2; goto L5417; } goto ret0; L5417: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == LSHIFTRT) goto L5418; goto ret0; L5418: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode && GET_CODE (x3) == ZERO_EXTEND) goto L5419; goto ret0; L5419: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (rtx_equal_p (x4, operands[1])) goto L5420; goto ret0; L5420: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (TARGET_64BIT && ix86_match_ccmode (insn, CCGOCmode) && ix86_binary_operator_ok (LSHIFTRT, HImode, operands))) { return 458; } goto ret0; L1559: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const_int_operand (x3, SImode)) { operands[2] = x3; goto L1560; } L2074: ATTRIBUTE_UNUSED_LABEL if (general_operand (x3, SImode)) { operands[2] = x3; goto L2075; } x3 = XEXP (x2, 0); goto L14411; L1560: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L1561; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L2074; L1561: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[0] = x2; goto L1562; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L2074; L1562: ATTRIBUTE_UNUSED_LABEL if ((ix86_match_ccmode (insn, CCGCmode) && (INTVAL (operands[2]) & 0xffffffff) != 0x80000000)) { return 207; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L2074; L2075: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L2076; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14411; L2076: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SImode)) { operands[0] = x2; goto L2077; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14411; L2077: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == MINUS) goto L2078; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14411; L2078: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L2079; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14411; L2079: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCmode) && ix86_binary_operator_ok (MINUS, SImode, operands))) { return 238; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14411; L2085: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, SImode)) { operands[2] = x3; goto L2086; } goto ret0; L2086: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L2087; goto ret0; L2087: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[0] = x2; goto L2088; } goto ret0; L2088: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == ZERO_EXTEND) goto L2089; goto ret0; L2089: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == MINUS) goto L2090; goto ret0; L2090: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (rtx_equal_p (x4, operands[1])) goto L2091; goto ret0; L2091: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (rtx_equal_p (x4, operands[2]) && (TARGET_64BIT && ix86_match_ccmode (insn, CCmode) && ix86_binary_operator_ok (MINUS, SImode, operands))) { return 239; } goto ret0; ret0: return -1; } static int recog_30 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); switch (GET_CODE (x3)) { case PLUS: goto L1620; case NEG: goto L1633; case MINUS: goto L2111; case AND: goto L3049; case IOR: goto L3418; case XOR: goto L3801; case NOT: goto L4573; case ASHIFT: goto L4767; case ASHIFTRT: goto L5116; case LSHIFTRT: goto L5454; case SUBREG: case REG: case MEM: goto L14405; default: goto ret0; } L14405: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x3, HImode)) { operands[1] = x3; goto L1650; } goto ret0; L1620: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, HImode)) { operands[1] = x4; goto L1621; } goto ret0; L1621: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (general_operand (x4, HImode)) { operands[2] = x4; goto L1622; } goto ret0; L1622: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L1623; goto ret0; L1623: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); switch (GET_CODE (x1)) { case SET: goto L1624; case CLOBBER: goto L1669; default: break; } goto ret0; L1624: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, HImode)) { operands[0] = x2; goto L1625; } goto ret0; L1625: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == PLUS) goto L1626; goto ret0; L1626: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L1627; goto ret0; L1627: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCGOCmode) && ix86_binary_operator_ok (PLUS, HImode, operands))) { return 211; } goto ret0; L1669: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, HImode)) { operands[0] = x2; goto L1670; } goto ret0; L1670: ATTRIBUTE_UNUSED_LABEL if ((ix86_match_ccmode (insn, CCGOCmode) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 214; } goto ret0; L1633: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (general_operand (x4, HImode)) { operands[2] = x4; goto L1634; } goto ret0; L1634: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, HImode)) { operands[1] = x3; goto L1635; } goto ret0; L1635: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L1636; goto ret0; L1636: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, HImode)) { operands[0] = x2; goto L1637; } goto ret0; L1637: ATTRIBUTE_UNUSED_LABEL if ((ix86_match_ccmode (insn, CCZmode) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 212; } goto ret0; L2111: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, HImode)) { operands[1] = x4; goto L2112; } goto ret0; L2112: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (general_operand (x4, HImode)) { operands[2] = x4; goto L2113; } goto ret0; L2113: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L2114; goto ret0; L2114: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L2115; goto ret0; L2115: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, HImode)) { operands[0] = x2; goto L2116; } goto ret0; L2116: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == MINUS) goto L2117; goto ret0; L2117: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L2118; goto ret0; L2118: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCGOCmode) && ix86_binary_operator_ok (MINUS, HImode, operands))) { return 241; } goto ret0; L3049: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, HImode)) { operands[1] = x4; goto L3050; } goto ret0; L3050: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (general_operand (x4, HImode)) { operands[2] = x4; goto L3051; } goto ret0; L3051: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L3052; goto ret0; L3052: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L3053; goto ret0; L3053: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, HImode)) { operands[0] = x2; goto L3054; } goto ret0; L3054: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == AND) goto L3055; goto ret0; L3055: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L3056; goto ret0; L3056: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCNOmode) && ix86_binary_operator_ok (AND, HImode, operands))) { return 295; } goto ret0; L3418: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, HImode)) { operands[1] = x4; goto L3419; } goto ret0; L3419: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (general_operand (x4, HImode)) { operands[2] = x4; goto L3420; } goto ret0; L3420: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L3421; goto ret0; L3421: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); switch (GET_CODE (x1)) { case SET: goto L3422; case CLOBBER: goto L3435; default: break; } goto ret0; L3422: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, HImode)) { operands[0] = x2; goto L3423; } goto ret0; L3423: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == IOR) goto L3424; goto ret0; L3424: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L3425; goto ret0; L3425: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCNOmode) && ix86_binary_operator_ok (IOR, HImode, operands))) { return 316; } goto ret0; L3435: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, HImode)) { operands[0] = x2; goto L3436; } goto ret0; L3436: ATTRIBUTE_UNUSED_LABEL if ((ix86_match_ccmode (insn, CCNOmode) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 317; } goto ret0; L3801: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, HImode)) { operands[1] = x4; goto L3802; } goto ret0; L3802: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (general_operand (x4, HImode)) { operands[2] = x4; goto L3803; } goto ret0; L3803: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L3804; goto ret0; L3804: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); switch (GET_CODE (x1)) { case SET: goto L3805; case CLOBBER: goto L3818; default: break; } goto ret0; L3805: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, HImode)) { operands[0] = x2; goto L3806; } goto ret0; L3806: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == XOR) goto L3807; goto ret0; L3807: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L3808; goto ret0; L3808: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCNOmode) && ix86_binary_operator_ok (XOR, HImode, operands))) { return 338; } goto ret0; L3818: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, HImode)) { operands[0] = x2; goto L3819; } goto ret0; L3819: ATTRIBUTE_UNUSED_LABEL if ((ix86_match_ccmode (insn, CCNOmode) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 339; } goto ret0; L4573: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, HImode)) { operands[1] = x4; goto L4574; } goto ret0; L4574: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L4575; goto ret0; L4575: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L4576; goto ret0; L4576: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, HImode)) { operands[0] = x2; goto L4577; } goto ret0; L4577: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == NOT) goto L4578; goto ret0; L4578: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1]) && (ix86_match_ccmode (insn, CCNOmode) && ix86_unary_operator_ok (NEG, HImode, operands))) { return 399; } goto ret0; L4767: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, HImode)) { operands[1] = x4; goto L4768; } goto ret0; L4768: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (const_int_1_31_operand (x4, QImode)) { operands[2] = x4; goto L4769; } goto ret0; L4769: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L4770; goto ret0; L4770: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L4771; goto ret0; L4771: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, HImode)) { operands[0] = x2; goto L4772; } goto ret0; L4772: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == ASHIFT) goto L4773; goto ret0; L4773: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L4774; goto ret0; L4774: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCGOCmode) && ix86_binary_operator_ok (ASHIFT, HImode, operands))) { return 413; } goto ret0; L5116: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, HImode)) { operands[1] = x4; goto L5117; } goto ret0; L5117: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_CODE (x4) == CONST_INT) goto L14476; goto ret0; L14476: ATTRIBUTE_UNUSED_LABEL if (const1_operand (x4, QImode)) { operands[2] = x4; goto L5118; } L14477: ATTRIBUTE_UNUSED_LABEL if (const_int_1_31_operand (x4, QImode)) { operands[2] = x4; goto L5131; } goto ret0; L5118: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L5119; x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14477; L5119: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L5120; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14477; L5120: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, HImode)) { operands[0] = x2; goto L5121; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14477; L5121: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == ASHIFTRT) goto L5122; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14477; L5122: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L5123; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14477; L5123: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCGOCmode) && (TARGET_SHIFT1 || optimize_size) && ix86_binary_operator_ok (ASHIFTRT, HImode, operands))) { return 437; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14477; L5131: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L5132; goto ret0; L5132: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L5133; goto ret0; L5133: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, HImode)) { operands[0] = x2; goto L5134; } goto ret0; L5134: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == ASHIFTRT) goto L5135; goto ret0; L5135: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L5136; goto ret0; L5136: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCGOCmode) && ix86_binary_operator_ok (ASHIFTRT, HImode, operands))) { return 438; } goto ret0; L5454: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, HImode)) { operands[1] = x4; goto L5455; } goto ret0; L5455: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_CODE (x4) == CONST_INT) goto L14478; goto ret0; L14478: ATTRIBUTE_UNUSED_LABEL if (const1_operand (x4, QImode)) { operands[2] = x4; goto L5456; } L14479: ATTRIBUTE_UNUSED_LABEL if (const_int_1_31_operand (x4, QImode)) { operands[2] = x4; goto L5469; } goto ret0; L5456: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L5457; x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14479; L5457: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L5458; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14479; L5458: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, HImode)) { operands[0] = x2; goto L5459; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14479; L5459: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == LSHIFTRT) goto L5460; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14479; L5460: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L5461; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14479; L5461: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCGOCmode) && (TARGET_SHIFT1 || optimize_size) && ix86_binary_operator_ok (LSHIFTRT, HImode, operands))) { return 461; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14479; L5469: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L5470; goto ret0; L5470: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L5471; goto ret0; L5471: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, HImode)) { operands[0] = x2; goto L5472; } goto ret0; L5472: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == LSHIFTRT) goto L5473; goto ret0; L5473: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L5474; goto ret0; L5474: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCGOCmode) && ix86_binary_operator_ok (LSHIFTRT, HImode, operands))) { return 462; } goto ret0; L1650: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const_int_operand (x3, HImode)) { operands[2] = x3; goto L1651; } L2124: ATTRIBUTE_UNUSED_LABEL if (general_operand (x3, HImode)) { operands[2] = x3; goto L2125; } goto ret0; L1651: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L1652; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L2124; L1652: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, HImode)) { operands[0] = x2; goto L1653; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L2124; L1653: ATTRIBUTE_UNUSED_LABEL if ((ix86_match_ccmode (insn, CCGCmode) && (INTVAL (operands[2]) & 0xffff) != 0x8000)) { return 213; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L2124; L2125: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L2126; goto ret0; L2126: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, HImode)) { operands[0] = x2; goto L2127; } goto ret0; L2127: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == MINUS) goto L2128; goto ret0; L2128: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L2129; goto ret0; L2129: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCmode) && ix86_binary_operator_ok (MINUS, HImode, operands))) { return 242; } goto ret0; ret0: return -1; } static int recog_31 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); switch (GET_CODE (x3)) { case PLUS: goto L1727; case NEG: goto L1740; case MINUS: goto L2165; case AND: goto L3092; case IOR: goto L3479; case XOR: goto L3975; case NOT: goto L4589; case ASHIFT: goto L4808; case ASHIFTRT: goto L5202; case LSHIFTRT: goto L5540; case SUBREG: case REG: case MEM: goto L14408; default: goto ret0; } L14408: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x3, QImode)) { operands[1] = x3; goto L1757; } goto ret0; L1727: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, QImode)) { operands[1] = x4; goto L1728; } goto ret0; L1728: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (general_operand (x4, QImode)) { operands[2] = x4; goto L1729; } goto ret0; L1729: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L1730; goto ret0; L1730: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); switch (GET_CODE (x1)) { case SET: goto L1731; case CLOBBER: goto L1776; default: break; } goto ret0; L1731: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, QImode)) { operands[0] = x2; goto L1732; } goto ret0; L1732: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == QImode && GET_CODE (x2) == PLUS) goto L1733; goto ret0; L1733: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L1734; goto ret0; L1734: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCGOCmode) && ix86_binary_operator_ok (PLUS, QImode, operands))) { return 218; } goto ret0; L1776: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, QImode)) { operands[0] = x2; goto L1777; } goto ret0; L1777: ATTRIBUTE_UNUSED_LABEL if ((ix86_match_ccmode (insn, CCGOCmode) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 221; } goto ret0; L1740: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (general_operand (x4, QImode)) { operands[2] = x4; goto L1741; } goto ret0; L1741: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, QImode)) { operands[1] = x3; goto L1742; } goto ret0; L1742: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L1743; goto ret0; L1743: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, QImode)) { operands[0] = x2; goto L1744; } goto ret0; L1744: ATTRIBUTE_UNUSED_LABEL if ((ix86_match_ccmode (insn, CCZmode) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 219; } goto ret0; L2165: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, QImode)) { operands[1] = x4; goto L2166; } goto ret0; L2166: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (general_operand (x4, QImode)) { operands[2] = x4; goto L2167; } goto ret0; L2167: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L2168; goto ret0; L2168: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L2169; goto ret0; L2169: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, HImode)) { operands[0] = x2; goto L2170; } goto ret0; L2170: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == MINUS) goto L2171; goto ret0; L2171: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L2172; goto ret0; L2172: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCGOCmode) && ix86_binary_operator_ok (MINUS, QImode, operands))) { return 245; } goto ret0; L3092: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == QImode) goto L14480; goto ret0; L14480: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x4, QImode)) { operands[1] = x4; goto L3093; } L14481: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x4, QImode)) { operands[0] = x4; goto L3106; } goto ret0; L3093: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (general_operand (x4, QImode)) { operands[2] = x4; goto L3094; } x4 = XEXP (x3, 0); goto L14481; L3094: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L3095; x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14481; L3095: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L3096; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14481; L3096: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, QImode)) { operands[0] = x2; goto L3097; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14481; L3097: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == QImode && GET_CODE (x2) == AND) goto L3098; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14481; L3098: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L3099; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14481; L3099: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCNOmode) && ix86_binary_operator_ok (AND, QImode, operands))) { return 298; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14481; L3106: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonimmediate_operand (x4, QImode)) { operands[1] = x4; goto L3107; } goto ret0; L3107: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L3108; goto ret0; L3108: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L3109; goto ret0; L3109: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_CODE (x2) == STRICT_LOW_PART) goto L3110; goto ret0; L3110: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[0])) goto L3111; goto ret0; L3111: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == QImode && GET_CODE (x2) == AND) goto L3112; goto ret0; L3112: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[0])) goto L3113; goto ret0; L3113: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[1]) && ((! TARGET_PARTIAL_REG_STALL || optimize_size) && ix86_match_ccmode (insn, CCNOmode) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 299; } goto ret0; L3479: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == QImode) goto L14482; goto ret0; L14482: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x4, QImode)) { operands[1] = x4; goto L3480; } L14483: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x4, QImode)) { operands[0] = x4; goto L3493; } goto ret0; L3480: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (general_operand (x4, QImode)) { operands[2] = x4; goto L3481; } x4 = XEXP (x3, 0); goto L14483; L3481: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L3482; x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14483; L3482: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); switch (GET_CODE (x1)) { case SET: goto L3483; case CLOBBER: goto L3510; default: break; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14483; L3483: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, QImode)) { operands[0] = x2; goto L3484; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14483; L3484: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == QImode && GET_CODE (x2) == IOR) goto L3485; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14483; L3485: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L3486; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14483; L3486: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCNOmode) && ix86_binary_operator_ok (IOR, QImode, operands))) { return 320; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14483; L3510: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, QImode)) { operands[0] = x2; goto L3511; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14483; L3511: ATTRIBUTE_UNUSED_LABEL if ((ix86_match_ccmode (insn, CCNOmode) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 322; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14483; L3493: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (general_operand (x4, QImode)) { operands[1] = x4; goto L3494; } goto ret0; L3494: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L3495; goto ret0; L3495: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L3496; goto ret0; L3496: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_CODE (x2) == STRICT_LOW_PART) goto L3497; goto ret0; L3497: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[0])) goto L3498; goto ret0; L3498: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == QImode && GET_CODE (x2) == IOR) goto L3499; goto ret0; L3499: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[0])) goto L3500; goto ret0; L3500: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[1]) && ((! TARGET_PARTIAL_REG_STALL || optimize_size) && ix86_match_ccmode (insn, CCNOmode) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 321; } goto ret0; L3975: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == QImode) goto L14484; goto ret0; L14484: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x4, QImode)) { operands[1] = x4; goto L3976; } L14485: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x4, QImode)) { operands[0] = x4; goto L3989; } goto ret0; L3976: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (general_operand (x4, QImode)) { operands[2] = x4; goto L3977; } x4 = XEXP (x3, 0); goto L14485; L3977: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L3978; x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14485; L3978: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); switch (GET_CODE (x1)) { case SET: goto L3979; case CLOBBER: goto L4006; default: break; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14485; L3979: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, QImode)) { operands[0] = x2; goto L3980; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14485; L3980: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == QImode && GET_CODE (x2) == XOR) goto L3981; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14485; L3981: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L3982; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14485; L3982: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCNOmode) && ix86_binary_operator_ok (XOR, QImode, operands))) { return 346; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14485; L4006: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, QImode)) { operands[0] = x2; goto L4007; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14485; L4007: ATTRIBUTE_UNUSED_LABEL if ((ix86_match_ccmode (insn, CCNOmode) && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 348; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14485; L3989: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (general_operand (x4, QImode)) { operands[1] = x4; goto L3990; } goto ret0; L3990: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L3991; goto ret0; L3991: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L3992; goto ret0; L3992: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_CODE (x2) == STRICT_LOW_PART) goto L3993; goto ret0; L3993: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[0])) goto L3994; goto ret0; L3994: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == QImode && GET_CODE (x2) == XOR) goto L3995; goto ret0; L3995: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[0])) goto L3996; goto ret0; L3996: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[1]) && ((! TARGET_PARTIAL_REG_STALL || optimize_size) && ix86_match_ccmode (insn, CCNOmode) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 347; } goto ret0; L4589: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, QImode)) { operands[1] = x4; goto L4590; } goto ret0; L4590: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L4591; goto ret0; L4591: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L4592; goto ret0; L4592: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, QImode)) { operands[0] = x2; goto L4593; } goto ret0; L4593: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == QImode && GET_CODE (x2) == NOT) goto L4594; goto ret0; L4594: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1]) && (ix86_match_ccmode (insn, CCNOmode) && ix86_unary_operator_ok (NOT, QImode, operands))) { return 401; } goto ret0; L4808: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, QImode)) { operands[1] = x4; goto L4809; } goto ret0; L4809: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (const_int_1_31_operand (x4, QImode)) { operands[2] = x4; goto L4810; } goto ret0; L4810: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L4811; goto ret0; L4811: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L4812; goto ret0; L4812: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, QImode)) { operands[0] = x2; goto L4813; } goto ret0; L4813: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == QImode && GET_CODE (x2) == ASHIFT) goto L4814; goto ret0; L4814: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L4815; goto ret0; L4815: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCGOCmode) && ix86_binary_operator_ok (ASHIFT, QImode, operands))) { return 416; } goto ret0; L5202: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, QImode)) { operands[1] = x4; goto L5203; } goto ret0; L5203: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_CODE (x4) == CONST_INT) goto L14486; goto ret0; L14486: ATTRIBUTE_UNUSED_LABEL if (const1_operand (x4, QImode)) { operands[2] = x4; goto L5204; } L14487: ATTRIBUTE_UNUSED_LABEL if (const_int_1_31_operand (x4, QImode)) { operands[2] = x4; goto L5217; } goto ret0; L5204: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L5205; x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14487; L5205: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L5206; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14487; L5206: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, QImode)) { operands[0] = x2; goto L5207; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14487; L5207: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == QImode && GET_CODE (x2) == ASHIFTRT) goto L5208; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14487; L5208: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L5209; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14487; L5209: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCGOCmode) && (TARGET_SHIFT1 || optimize_size) && ix86_binary_operator_ok (ASHIFTRT, QImode, operands))) { return 443; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14487; L5217: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L5218; goto ret0; L5218: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L5219; goto ret0; L5219: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, QImode)) { operands[0] = x2; goto L5220; } goto ret0; L5220: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == QImode && GET_CODE (x2) == ASHIFTRT) goto L5221; goto ret0; L5221: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L5222; goto ret0; L5222: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCGOCmode) && ix86_binary_operator_ok (ASHIFTRT, QImode, operands))) { return 444; } goto ret0; L5540: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, QImode)) { operands[1] = x4; goto L5541; } goto ret0; L5541: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_CODE (x4) == CONST_INT) goto L14488; goto ret0; L14488: ATTRIBUTE_UNUSED_LABEL if (const1_operand (x4, QImode)) { operands[2] = x4; goto L5542; } L14489: ATTRIBUTE_UNUSED_LABEL if (const_int_1_31_operand (x4, QImode)) { operands[2] = x4; goto L5555; } goto ret0; L5542: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L5543; x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14489; L5543: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L5544; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14489; L5544: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, QImode)) { operands[0] = x2; goto L5545; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14489; L5545: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == QImode && GET_CODE (x2) == LSHIFTRT) goto L5546; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14489; L5546: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L5547; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14489; L5547: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCGOCmode) && (TARGET_SHIFT1 || optimize_size) && ix86_binary_operator_ok (LSHIFTRT, QImode, operands))) { return 467; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L14489; L5555: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L5556; goto ret0; L5556: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L5557; goto ret0; L5557: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, QImode)) { operands[0] = x2; goto L5558; } goto ret0; L5558: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == QImode && GET_CODE (x2) == LSHIFTRT) goto L5559; goto ret0; L5559: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L5560; goto ret0; L5560: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCGOCmode) && ix86_binary_operator_ok (LSHIFTRT, QImode, operands))) { return 468; } goto ret0; L1757: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const_int_operand (x3, QImode)) { operands[2] = x3; goto L1758; } L2178: ATTRIBUTE_UNUSED_LABEL if (general_operand (x3, QImode)) { operands[2] = x3; goto L2179; } goto ret0; L1758: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L1759; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L2178; L1759: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, QImode)) { operands[0] = x2; goto L1760; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L2178; L1760: ATTRIBUTE_UNUSED_LABEL if ((ix86_match_ccmode (insn, CCGCmode) && (INTVAL (operands[2]) & 0xff) != 0x80)) { return 220; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L2178; L2179: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L2180; goto ret0; L2180: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, HImode)) { operands[0] = x2; goto L2181; } goto ret0; L2181: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == MINUS) goto L2182; goto ret0; L2182: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L2183; goto ret0; L2183: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_match_ccmode (insn, CCmode) && ix86_binary_operator_ok (MINUS, QImode, operands))) { return 246; } goto ret0; ret0: return -1; } static int recog_32 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case SImode: goto L14086; case HImode: goto L14089; case QImode: goto L14090; case DImode: goto L14091; case SFmode: goto L14094; case DFmode: goto L14095; case XFmode: goto L14096; case CCmode: goto L14100; case TImode: goto L14103; case CCZmode: goto L14104; default: break; } L374: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case STRICT_LOW_PART: goto L375; case REG: goto L14110; case PC: goto L6216; default: break; } L8241: ATTRIBUTE_UNUSED_LABEL operands[0] = x2; goto L8242; L14086: ATTRIBUTE_UNUSED_LABEL tem = recog_21 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; goto L374; L14089: ATTRIBUTE_UNUSED_LABEL tem = recog_22 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; goto L374; L14090: ATTRIBUTE_UNUSED_LABEL tem = recog_23 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; goto L374; L14091: ATTRIBUTE_UNUSED_LABEL if (push_operand (x2, DImode)) { operands[0] = x2; goto L507; } L14092: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, DImode)) { operands[0] = x2; goto L527; } L14093: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, DImode)) { operands[0] = x2; goto L579; } goto L374; L507: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (general_no_elim_operand (x2, DImode)) { operands[1] = x2; goto L508; } x2 = XEXP (x1, 0); goto L14092; L508: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L509; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14092; L509: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM) goto L510; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14092; L510: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == SCRATCH && (TARGET_64BIT)) { return 77; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14092; L527: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode) goto L14229; x2 = XEXP (x1, 0); goto L14093; L14229: ATTRIBUTE_UNUSED_LABEL tem = recog_24 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; x2 = XEXP (x1, 0); goto L14093; L579: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode) goto L14254; L6542: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == CALL) goto L6543; if (const0_operand (x2, DImode)) { operands[1] = x2; goto L538; } L547: ATTRIBUTE_UNUSED_LABEL if (const_int_operand (x2, DImode)) { operands[1] = x2; goto L548; } x2 = XEXP (x1, 0); goto L374; L14254: ATTRIBUTE_UNUSED_LABEL tem = recog_25 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; goto L6542; L6543: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == QImode && GET_CODE (x3) == MEM) goto L6544; x2 = XEXP (x1, 0); goto L374; L6544: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == DImode) goto L14288; x2 = XEXP (x1, 0); goto L374; L14288: ATTRIBUTE_UNUSED_LABEL if (call_insn_operand (x4, DImode)) { operands[2] = x4; goto L6545; } L14289: ATTRIBUTE_UNUSED_LABEL if (call_insn_operand (x4, DImode)) { operands[1] = x4; goto L6591; } x2 = XEXP (x1, 0); goto L374; L6545: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); operands[3] = x3; goto L6546; L6546: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_MODE (x1) == DImode && GET_CODE (x1) == UNSPEC && XVECLEN (x1, 0) == 1 && XINT (x1, 1) == 16) goto L6547; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14289; L6547: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (tls_symbolic_operand (x2, DImode)) { operands[1] = x2; goto L6548; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14289; L6548: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT)) { return 546; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14289; L6591: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); operands[2] = x3; goto L6592; L6592: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_MODE (x1) == DImode && GET_CODE (x1) == UNSPEC && XVECLEN (x1, 0) == 1 && XINT (x1, 1) == 17) goto L6593; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L6593: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_64BIT)) { return 549; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L538: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L539; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L547; L539: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && (!TARGET_USE_MOV0 || optimize_size) && reload_completed)) { return 80; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L547; L548: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L549; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L549: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && (TARGET_PENTIUM || optimize_size) && reload_completed && operands[1] == constm1_rtx)) { return 81; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L14094: ATTRIBUTE_UNUSED_LABEL tem = recog_26 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; goto L374; L14095: ATTRIBUTE_UNUSED_LABEL tem = recog_27 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; goto L374; L14096: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, XFmode)) { operands[0] = x2; goto L648; } L14109: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, XFmode)) { operands[0] = x2; goto L4315; } goto L374; L648: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == XFmode) goto L14350; x2 = XEXP (x1, 0); goto L14109; L14350: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == UNSPEC) goto L14357; if (register_operand (x2, XFmode)) { operands[1] = x2; goto L649; } x2 = XEXP (x1, 0); goto L14109; L14357: ATTRIBUTE_UNUSED_LABEL switch (XVECLEN (x2, 0)) { case 1: goto L14364; case 2: goto L14366; default: break; } x2 = XEXP (x1, 0); goto L14109; L14364: ATTRIBUTE_UNUSED_LABEL switch (XINT (x2, 1)) { case 80L: goto L6988; case 82L: goto L7015; case 84L: goto L7099; default: break; } x2 = XEXP (x1, 0); goto L14109; L6988: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (register_operand (x3, XFmode)) { operands[2] = x3; goto L6989; } x2 = XEXP (x1, 0); goto L14109; L6989: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L6990; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14109; L6990: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, XFmode)) { operands[1] = x2; goto L6991; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14109; L6991: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == XFmode && GET_CODE (x2) == UNSPEC && XVECLEN (x2, 0) == 1 && XINT (x2, 1) == 81) goto L6992; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14109; L6992: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (rtx_equal_p (x3, operands[2]) && (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_unsafe_math_optimizations)) { return 604; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14109; L7015: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (register_operand (x3, XFmode)) { operands[2] = x3; goto L7016; } x2 = XEXP (x1, 0); goto L14109; L7016: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L7017; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14109; L7017: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, XFmode)) { operands[1] = x2; goto L7018; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14109; L7018: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == XFmode && GET_CODE (x2) == UNSPEC && XVECLEN (x2, 0) == 1 && XINT (x2, 1) == 83) goto L7019; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14109; L7019: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (rtx_equal_p (x3, operands[2]) && (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_unsafe_math_optimizations)) { return 607; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14109; L7099: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (register_operand (x3, XFmode)) { operands[2] = x3; goto L7100; } x2 = XEXP (x1, 0); goto L14109; L7100: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L7101; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14109; L7101: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, XFmode)) { operands[1] = x2; goto L7102; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14109; L7102: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == XFmode && GET_CODE (x2) == UNSPEC && XVECLEN (x2, 0) == 1 && XINT (x2, 1) == 85) goto L7103; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14109; L7103: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (rtx_equal_p (x3, operands[2]) && (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_unsafe_math_optimizations)) { return 613; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14109; L14366: ATTRIBUTE_UNUSED_LABEL switch (XINT (x2, 1)) { case 65L: goto L7054; case 66L: goto L7069; case 67L: goto L7084; case 86L: goto L7118; default: break; } x2 = XEXP (x1, 0); goto L14109; L7054: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (register_operand (x3, XFmode)) { operands[2] = x3; goto L7055; } x2 = XEXP (x1, 0); goto L14109; L7055: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 1); if (register_operand (x3, XFmode)) { operands[1] = x3; goto L7056; } x2 = XEXP (x1, 0); goto L14109; L7056: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L7057; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14109; L7057: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, XFmode)) { operands[3] = x2; goto L7058; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14109; L7058: ATTRIBUTE_UNUSED_LABEL if ((! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_unsafe_math_optimizations)) { return 610; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14109; L7069: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (register_operand (x3, XFmode)) { operands[2] = x3; goto L7070; } x2 = XEXP (x1, 0); goto L14109; L7070: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 1); if (register_operand (x3, XFmode)) { operands[1] = x3; goto L7071; } x2 = XEXP (x1, 0); goto L14109; L7071: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L7072; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14109; L7072: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, XFmode)) { operands[3] = x2; goto L7073; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14109; L7073: ATTRIBUTE_UNUSED_LABEL if ((! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_unsafe_math_optimizations)) { return 611; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14109; L7084: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (register_operand (x3, XFmode)) { operands[2] = x3; goto L7085; } x2 = XEXP (x1, 0); goto L14109; L7085: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 1); if (register_operand (x3, XFmode)) { operands[1] = x3; goto L7086; } x2 = XEXP (x1, 0); goto L14109; L7086: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L7087; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14109; L7087: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, XFmode)) { operands[3] = x2; goto L7088; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14109; L7088: ATTRIBUTE_UNUSED_LABEL if ((! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_unsafe_math_optimizations)) { return 612; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14109; L7118: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (register_operand (x3, XFmode)) { operands[2] = x3; goto L7119; } x2 = XEXP (x1, 0); goto L14109; L7119: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 1); if (register_operand (x3, XFmode)) { operands[3] = x3; goto L7120; } x2 = XEXP (x1, 0); goto L14109; L7120: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L7121; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14109; L7121: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, XFmode)) { operands[1] = x2; goto L7122; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14109; L7122: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == XFmode && GET_CODE (x2) == UNSPEC && XVECLEN (x2, 0) == 2 && XINT (x2, 1) == 87) goto L7123; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14109; L7123: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (rtx_equal_p (x3, operands[2])) goto L7124; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14109; L7124: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 1); if (rtx_equal_p (x3, operands[3]) && (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_unsafe_math_optimizations)) { return 616; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14109; L649: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L650; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14109; L650: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[1])) goto L651; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14109; L651: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (rtx_equal_p (x2, operands[0])) { return 102; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14109; L4315: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == XFmode) goto L14371; x2 = XEXP (x1, 0); goto L374; L14371: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case NEG: goto L4316; case ABS: goto L4472; default: break; } x2 = XEXP (x1, 0); goto L374; L4316: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, XFmode)) { operands[1] = x3; goto L4317; } x2 = XEXP (x1, 0); goto L374; L4317: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L4318; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L4318: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_80387 && ix86_unary_operator_ok (NEG, XFmode, operands))) { return 370; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L4472: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, XFmode)) { operands[1] = x3; goto L4473; } x2 = XEXP (x1, 0); goto L374; L4473: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L4474; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L4474: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_80387 && ix86_unary_operator_ok (ABS, XFmode, operands))) { return 385; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L14100: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG && XINT (x2, 0) == 17) goto L1212; goto L374; L1212: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == UNSPEC && XVECLEN (x2, 0) == 2 && XINT (x2, 1) == 27) goto L1213; x2 = XEXP (x1, 0); goto L374; L1213: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); switch (GET_MODE (x3)) { case DImode: goto L14373; case SImode: goto L14374; case QImode: goto L14375; default: break; } x2 = XEXP (x1, 0); goto L374; L14373: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x3, DImode)) { operands[1] = x3; goto L1214; } x2 = XEXP (x1, 0); goto L374; L1214: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 1); if (x86_64_general_operand (x3, DImode)) { operands[2] = x3; goto L1215; } x2 = XEXP (x1, 0); goto L374; L1215: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L1216; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L1216: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DImode)) { operands[0] = x2; goto L1217; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L1217: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == PLUS) goto L1218; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L1218: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L1219; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L1219: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (TARGET_64BIT && ix86_binary_operator_ok (PLUS, DImode, operands))) { return 179; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L14374: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L1299; } x2 = XEXP (x1, 0); goto L374; L1299: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 1); if (general_operand (x3, SImode)) { operands[2] = x3; goto L1300; } x2 = XEXP (x1, 0); goto L374; L1300: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L1301; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L1301: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SImode)) { operands[0] = x2; goto L1302; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L1302: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS) goto L1303; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L1303: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L1304; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L1304: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_binary_operator_ok (PLUS, SImode, operands))) { return 184; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L14375: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x3, QImode)) { operands[1] = x3; goto L1310; } x2 = XEXP (x1, 0); goto L374; L1310: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 1); if (general_operand (x3, QImode)) { operands[2] = x3; goto L1311; } x2 = XEXP (x1, 0); goto L374; L1311: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L1312; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L1312: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, QImode)) { operands[0] = x2; goto L1313; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L1313: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == QImode && GET_CODE (x2) == PLUS) goto L1314; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L1314: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L1315; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L1315: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (ix86_binary_operator_ok (PLUS, QImode, operands))) { return 185; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L14103: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, TImode)) { operands[0] = x2; goto L2295; } goto L374; L2295: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == TImode && GET_CODE (x2) == MULT) goto L2296; x2 = XEXP (x1, 0); goto L374; L2296: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == TImode) goto L14376; x2 = XEXP (x1, 0); goto L374; L14376: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x3)) { case ZERO_EXTEND: goto L2297; case SIGN_EXTEND: goto L2333; default: break; } x2 = XEXP (x1, 0); goto L374; L2297: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, DImode)) { operands[1] = x4; goto L2298; } x2 = XEXP (x1, 0); goto L374; L2298: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == TImode && GET_CODE (x3) == ZERO_EXTEND) goto L2299; x2 = XEXP (x1, 0); goto L374; L2299: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, DImode)) { operands[2] = x4; goto L2300; } x2 = XEXP (x1, 0); goto L374; L2300: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L2301; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L2301: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 254; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L2333: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, DImode)) { operands[1] = x4; goto L2334; } x2 = XEXP (x1, 0); goto L374; L2334: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == TImode && GET_CODE (x3) == SIGN_EXTEND) goto L2335; x2 = XEXP (x1, 0); goto L374; L2335: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, DImode)) { operands[2] = x4; goto L2336; } x2 = XEXP (x1, 0); goto L374; L2336: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L2337; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L2337: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 256; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L14104: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG && XINT (x2, 0) == 17) goto L4086; goto L374; L4086: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == CCZmode && GET_CODE (x2) == COMPARE) goto L4087; x2 = XEXP (x1, 0); goto L374; L4087: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); switch (GET_MODE (x3)) { case DImode: goto L14378; case SImode: goto L14379; case HImode: goto L14381; case QImode: goto L14382; default: break; } x2 = XEXP (x1, 0); goto L374; L14378: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x3)) { case NEG: goto L4088; case LSHIFTRT: goto L4141; case SUBREG: case REG: case MEM: goto L14384; default: x2 = XEXP (x1, 0); goto L374; } L14384: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x3, DImode)) { operands[1] = x3; goto L6438; } x2 = XEXP (x1, 0); goto L374; L4088: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, DImode)) { operands[1] = x4; goto L4089; } x2 = XEXP (x1, 0); goto L374; L4089: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L4090; x2 = XEXP (x1, 0); goto L374; L4090: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L4091; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L4091: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DImode)) { operands[0] = x2; goto L4092; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L4092: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == NEG) goto L4093; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L4093: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1]) && (TARGET_64BIT && ix86_unary_operator_ok (NEG, DImode, operands))) { return 353; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L4141: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == DImode && GET_CODE (x4) == NEG) goto L4142; x2 = XEXP (x1, 0); goto L374; L4142: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (GET_MODE (x5) == DImode && GET_CODE (x5) == ASHIFT) goto L4143; x2 = XEXP (x1, 0); goto L374; L4143: ATTRIBUTE_UNUSED_LABEL x6 = XEXP (x5, 0); if (register_operand (x6, DImode)) { operands[1] = x6; goto L4144; } x2 = XEXP (x1, 0); goto L374; L4144: ATTRIBUTE_UNUSED_LABEL x6 = XEXP (x5, 1); if (x6 == const_int_rtx[MAX_SAVED_CONST_INT + (32)]) goto L4145; x2 = XEXP (x1, 0); goto L374; L4145: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (32)]) goto L4146; x2 = XEXP (x1, 0); goto L374; L4146: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L4147; x2 = XEXP (x1, 0); goto L374; L4147: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L4148; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L4148: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[0] = x2; goto L4149; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L4149: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == LSHIFTRT) goto L4150; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L4150: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode && GET_CODE (x3) == NEG) goto L4151; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L4151: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == DImode && GET_CODE (x4) == ASHIFT) goto L4152; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L4152: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (rtx_equal_p (x5, operands[1])) goto L4153; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L4153: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 1); if (x5 == const_int_rtx[MAX_SAVED_CONST_INT + (32)]) goto L4154; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L4154: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (32)] && (TARGET_64BIT && ix86_unary_operator_ok (NEG, SImode, operands))) { return 357; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L6438: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L6439; x2 = XEXP (x1, 0); goto L374; L6439: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L6440; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L6440: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[0] = x2; goto L6441; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L6441: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == CTZ) goto L6442; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L6442: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1]) && (TARGET_64BIT)) { return 539; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L14379: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x3) == NEG) goto L4130; if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L6414; } x2 = XEXP (x1, 0); goto L374; L4130: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, SImode)) { operands[1] = x4; goto L4131; } x2 = XEXP (x1, 0); goto L374; L4131: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L4132; x2 = XEXP (x1, 0); goto L374; L4132: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L4133; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L4133: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SImode)) { operands[0] = x2; goto L4134; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L4134: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == NEG) goto L4135; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L4135: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1]) && (ix86_unary_operator_ok (NEG, SImode, operands))) { return 356; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L6414: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L6415; x2 = XEXP (x1, 0); goto L374; L6415: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L6416; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L6416: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[0] = x2; goto L6417; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L6417: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == CTZ) goto L6418; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L6418: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) { return 537; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L14381: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x3) == NEG) goto L4172; x2 = XEXP (x1, 0); goto L374; L4172: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, HImode)) { operands[1] = x4; goto L4173; } x2 = XEXP (x1, 0); goto L374; L4173: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L4174; x2 = XEXP (x1, 0); goto L374; L4174: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L4175; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L4175: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, HImode)) { operands[0] = x2; goto L4176; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L4176: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == NEG) goto L4177; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L4177: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1]) && (ix86_unary_operator_ok (NEG, HImode, operands))) { return 359; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L14382: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x3) == NEG) goto L4195; x2 = XEXP (x1, 0); goto L374; L4195: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, QImode)) { operands[1] = x4; goto L4196; } x2 = XEXP (x1, 0); goto L374; L4196: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L4197; x2 = XEXP (x1, 0); goto L374; L4197: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L4198; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L4198: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, QImode)) { operands[0] = x2; goto L4199; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L4199: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == QImode && GET_CODE (x2) == NEG) goto L4200; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L4200: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1]) && (ix86_unary_operator_ok (NEG, QImode, operands))) { return 361; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L374; L375: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); switch (GET_MODE (x3)) { case HImode: goto L14385; case QImode: goto L14386; default: break; } goto L8241; L14385: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, HImode)) { operands[0] = x3; goto L376; } goto L8241; L376: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const0_operand (x2, HImode)) { operands[1] = x2; goto L377; } x2 = XEXP (x1, 0); goto L8241; L377: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L378; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L8241; L378: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (reload_completed && ((!TARGET_USE_MOV0 && !TARGET_PARTIAL_REG_STALL) || optimize_size))) { return 56; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L8241; L14386: ATTRIBUTE_UNUSED_LABEL if (q_regs_operand (x3, QImode)) { operands[0] = x3; goto L412; } L14387: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x3, QImode)) { operands[0] = x3; goto L1710; } goto L8241; L412: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const0_operand (x2, QImode)) { operands[1] = x2; goto L413; } x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L14387; L413: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L414; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L14387; L414: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (reload_completed && (!TARGET_USE_MOV0 || optimize_size))) { return 62; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L14387; L1710: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == QImode) goto L14388; x2 = XEXP (x1, 0); goto L8241; L14388: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case PLUS: goto L1711; case MINUS: goto L2149; case AND: goto L3076; case IOR: goto L3463; case XOR: goto L3846; case ASHIFTRT: goto L5156; case LSHIFTRT: goto L5494; case ROTATE: goto L5682; case ROTATERT: goto L5872; default: break; } x2 = XEXP (x1, 0); goto L8241; L1711: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[0])) goto L1712; x2 = XEXP (x1, 0); goto L8241; L1712: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, QImode)) { operands[1] = x3; goto L1713; } x2 = XEXP (x1, 0); goto L8241; L1713: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L1714; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L8241; L1714: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && ((! TARGET_PARTIAL_REG_STALL || optimize_size) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 217; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L8241; L2149: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[0])) goto L2150; x2 = XEXP (x1, 0); goto L8241; L2150: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, QImode)) { operands[1] = x3; goto L2151; } x2 = XEXP (x1, 0); goto L8241; L2151: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L2152; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L8241; L2152: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && ((! TARGET_PARTIAL_REG_STALL || optimize_size) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 244; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L8241; L3076: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[0])) goto L3077; x2 = XEXP (x1, 0); goto L8241; L3077: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, QImode)) { operands[1] = x3; goto L3078; } x2 = XEXP (x1, 0); goto L8241; L3078: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L3079; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L8241; L3079: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && ((! TARGET_PARTIAL_REG_STALL || optimize_size) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 297; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L8241; L3463: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[0])) goto L3464; x2 = XEXP (x1, 0); goto L8241; L3464: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, QImode)) { operands[1] = x3; goto L3465; } x2 = XEXP (x1, 0); goto L8241; L3465: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L3466; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L8241; L3466: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && ((! TARGET_PARTIAL_REG_STALL || optimize_size) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 319; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L8241; L3846: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[0])) goto L3847; x2 = XEXP (x1, 0); goto L8241; L3847: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, QImode)) { operands[1] = x3; goto L3848; } x2 = XEXP (x1, 0); goto L8241; L3848: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L3849; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L8241; L3849: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && ((! TARGET_PARTIAL_REG_STALL || optimize_size) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 341; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L8241; L5156: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[0])) goto L5157; x2 = XEXP (x1, 0); goto L8241; L5157: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const1_operand (x3, QImode)) { operands[1] = x3; goto L5158; } L5187: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x3, QImode)) { operands[1] = x3; goto L5188; } x2 = XEXP (x1, 0); goto L8241; L5158: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5159; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5187; L5159: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (ix86_binary_operator_ok (ASHIFTRT, QImode, operands) && (! TARGET_PARTIAL_REG_STALL || optimize_size) && (TARGET_SHIFT1 || optimize_size))) { return 440; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5187; L5188: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5189; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L8241; L5189: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && ((! TARGET_PARTIAL_REG_STALL || optimize_size) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 442; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L8241; L5494: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[0])) goto L5495; x2 = XEXP (x1, 0); goto L8241; L5495: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const1_operand (x3, QImode)) { operands[1] = x3; goto L5496; } L5525: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x3, QImode)) { operands[1] = x3; goto L5526; } x2 = XEXP (x1, 0); goto L8241; L5496: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5497; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5525; L5497: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && ((! TARGET_PARTIAL_REG_STALL || optimize_size) && (TARGET_SHIFT1 || optimize_size))) { return 464; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5525; L5526: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5527; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L8241; L5527: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && ((! TARGET_PARTIAL_REG_STALL || optimize_size) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 466; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L8241; L5682: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[0])) goto L5683; x2 = XEXP (x1, 0); goto L8241; L5683: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const1_operand (x3, QImode)) { operands[1] = x3; goto L5684; } L5713: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x3, QImode)) { operands[1] = x3; goto L5714; } x2 = XEXP (x1, 0); goto L8241; L5684: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5685; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5713; L5685: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && ((! TARGET_PARTIAL_REG_STALL || optimize_size) && (TARGET_SHIFT1 || optimize_size))) { return 477; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5713; L5714: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5715; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L8241; L5715: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && ((! TARGET_PARTIAL_REG_STALL || optimize_size) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 479; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L8241; L5872: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[0])) goto L5873; x2 = XEXP (x1, 0); goto L8241; L5873: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const1_operand (x3, QImode)) { operands[1] = x3; goto L5874; } L5903: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x3, QImode)) { operands[1] = x3; goto L5904; } x2 = XEXP (x1, 0); goto L8241; L5874: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5875; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5903; L5875: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && ((! TARGET_PARTIAL_REG_STALL || optimize_size) && (TARGET_SHIFT1 || optimize_size))) { return 490; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L5903; L5904: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5905; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L8241; L5905: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && ((! TARGET_PARTIAL_REG_STALL || optimize_size) && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM))) { return 492; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L8241; L14110: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 0) == 17) goto L1407; goto L8241; L1407: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == COMPARE) goto L1408; x2 = XEXP (x1, 0); goto L8241; L1408: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); switch (GET_MODE (x3)) { case DImode: goto L14397; case SImode: goto L14400; case HImode: goto L14403; case QImode: goto L14406; default: break; } x2 = XEXP (x1, 0); goto L8241; L14397: ATTRIBUTE_UNUSED_LABEL tem = recog_28 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; x2 = XEXP (x1, 0); goto L8241; L14400: ATTRIBUTE_UNUSED_LABEL tem = recog_29 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; x2 = XEXP (x1, 0); goto L8241; L14403: ATTRIBUTE_UNUSED_LABEL tem = recog_30 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; x2 = XEXP (x1, 0); goto L8241; L14406: ATTRIBUTE_UNUSED_LABEL tem = recog_31 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; x2 = XEXP (x1, 0); goto L8241; L6216: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); switch (GET_MODE (x2)) { case SImode: goto L14490; case DImode: goto L14491; default: break; } L6251: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == IF_THEN_ELSE) goto L6252; x2 = XEXP (x1, 0); goto L8241; L14490: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, SImode)) { operands[0] = x2; goto L6217; } goto L6251; L6217: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L6218; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L6251; L6218: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_CODE (x2) == LABEL_REF) goto L6219; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L6251; L6219: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); operands[1] = x3; goto L6220; L6220: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT)) { return 512; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L6251; L14491: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, DImode)) { operands[0] = x2; goto L6225; } goto L6251; L6225: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L6226; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L6251; L6226: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_CODE (x2) == LABEL_REF) goto L6227; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L6251; L6227: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); operands[1] = x3; goto L6228; L6228: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT)) { return 513; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L6251; L6252: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == NE) goto L6253; x2 = XEXP (x1, 0); goto L8241; L6253: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, SImode)) { operands[1] = x4; goto L6254; } x2 = XEXP (x1, 0); goto L8241; L6254: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L6255; x2 = XEXP (x1, 0); goto L8241; L6255: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == LABEL_REF) goto L6256; x2 = XEXP (x1, 0); goto L8241; L6256: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); operands[0] = x4; goto L6257; L6257: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (GET_CODE (x3) == PC) goto L6258; x2 = XEXP (x1, 0); goto L8241; L6258: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L6259; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L8241; L6259: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SImode)) { operands[2] = x2; goto L6260; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L8241; L6260: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS) goto L6261; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L8241; L6261: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L6262; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L8241; L6262: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (-1)] && (!TARGET_64BIT && TARGET_USE_LOOP && (reload_in_progress || reload_completed || register_operand (operands[2], VOIDmode))) && pnum_clobbers != NULL) { *pnum_clobbers = 2; return 514; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L8241; L8242: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == CALL) goto L8243; goto ret0; L8243: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == QImode && GET_CODE (x3) == MEM) goto L8244; goto ret0; L8244: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == SImode) goto L14492; goto ret0; L14492: ATTRIBUTE_UNUSED_LABEL if (constant_call_address_operand (x4, SImode)) { operands[1] = x4; goto L8245; } L14493: ATTRIBUTE_UNUSED_LABEL if (call_insn_operand (x4, SImode)) { operands[1] = x4; goto L8258; } goto ret0; L8245: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); operands[2] = x3; goto L8246; L8246: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L8247; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14493; L8247: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 7) goto L8248; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14493; L8248: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS) goto L8249; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14493; L8249: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == REG && XINT (x3, 0) == 7) goto L8250; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14493; L8250: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (immediate_operand (x3, SImode)) { operands[3] = x3; goto L8251; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14493; L8251: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT)) { return 687; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14493; L8258: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); operands[2] = x3; goto L8259; L8259: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L8260; goto ret0; L8260: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 7) goto L8261; goto ret0; L8261: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS) goto L8262; goto ret0; L8262: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == REG && XINT (x3, 0) == 7) goto L8263; goto ret0; L8263: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (immediate_operand (x3, SImode)) { operands[3] = x3; goto L8264; } goto ret0; L8264: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT)) { return 688; } goto ret0; ret0: return -1; } static int recog_33 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); switch (GET_CODE (x2)) { case REG: goto L14516; case MEM: goto L7384; default: break; } L14497: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, SImode)) { operands[0] = x2; goto L266; } L14500: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x2, SImode)) { operands[0] = x2; goto L1042; } L14503: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, SImode)) { operands[0] = x2; goto L2392; } L14509: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, SImode)) { operands[0] = x2; goto L6398; } goto ret0; L14516: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 0) == 7) goto L6358; goto L14497; L6358: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS) goto L6359; x2 = XEXP (x1, 0); goto L14497; L6359: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == REG && XINT (x3, 0) == 6) goto L6360; x2 = XEXP (x1, 0); goto L14497; L6360: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (4)]) goto L6361; x2 = XEXP (x1, 0); goto L14497; L6361: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L6362; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14497; L6362: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 6) goto L6363; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14497; L6363: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == MEM) goto L6364; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14497; L6364: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == REG && XINT (x3, 0) == 6) goto L6365; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14497; L6365: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L6366; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14497; L6366: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM) goto L6367; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14497; L6367: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == SCRATCH && (!TARGET_64BIT)) { return 533; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14497; L7384: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); switch (GET_MODE (x3)) { case DImode: goto L14517; case SImode: goto L14518; default: break; } goto L14497; L14517: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, DImode)) { operands[1] = x3; goto L7385; } goto L14497; L7385: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, SImode)) { operands[2] = x2; goto L7386; } x2 = XEXP (x1, 0); goto L14497; L7386: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L7387; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14497; L7387: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[0] = x2; goto L7388; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14497; L7388: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == PLUS) goto L7389; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14497; L7389: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L7390; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14497; L7390: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == CONST_INT) goto L14519; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14497; L14519: ATTRIBUTE_UNUSED_LABEL if ((int) XWINT (x3, 0) == XWINT (x3, 0)) switch ((int) XWINT (x3, 0)) { case 8L: goto L7391; case 4L: goto L7415; default: break; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14497; L7391: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L7392; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14497; L7392: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19 && (TARGET_64BIT && (TARGET_SINGLE_STRINGOP || optimize_size))) { return 630; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14497; L7415: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L7416; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14497; L7416: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19 && (TARGET_64BIT && (TARGET_SINGLE_STRINGOP || optimize_size))) { return 632; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14497; L14518: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, SImode)) { operands[1] = x3; goto L7397; } goto L14497; L7397: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, SImode)) { operands[2] = x2; goto L7398; } x2 = XEXP (x1, 0); goto L14497; L7398: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L7399; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14497; L7399: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[0] = x2; goto L7400; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14497; L7400: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS) goto L7401; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14497; L7401: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L7402; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14497; L7402: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (4)]) goto L7403; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14497; L7403: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L7404; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14497; L7404: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19 && (!TARGET_64BIT && (TARGET_SINGLE_STRINGOP || optimize_size))) { return 631; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14497; L266: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == MEM) goto L267; x2 = XEXP (x1, 0); goto L14500; L267: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == REG && XINT (x3, 0) == 7) goto L268; x2 = XEXP (x1, 0); goto L14500; L268: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L269; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14500; L269: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 7) goto L270; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14500; L270: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS) goto L271; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14500; L271: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == REG && XINT (x3, 0) == 7) goto L272; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14500; L272: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (4)]) goto L273; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14500; L273: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L274; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14500; L274: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM) goto L275; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14500; L275: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == SCRATCH && (!TARGET_64BIT)) { return 39; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14500; L1042: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == FIX) goto L1043; x2 = XEXP (x1, 0); goto L14503; L1043: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, VOIDmode)) { operands[1] = x3; goto L1044; } x2 = XEXP (x1, 0); goto L14503; L1044: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L1045; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14503; L1045: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, HImode)) { operands[2] = x2; goto L1046; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14503; L1046: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L1047; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14503; L1047: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, HImode)) { operands[3] = x2; goto L1048; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14503; L1048: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && FLOAT_MODE_P (GET_MODE (operands[1])) && !SSE_FLOAT_MODE_P (GET_MODE (operands[1])))) { return 154; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14503; L2392: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode) goto L14521; x2 = XEXP (x1, 0); goto L14509; L14521: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case TRUNCATE: goto L2393; case DIV: goto L2627; case UDIV: goto L2780; case FFS: goto L6385; case UNSPEC: goto L14527; case PLUS: goto L8008; default: break; } x2 = XEXP (x1, 0); goto L14509; L2393: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode && GET_CODE (x3) == LSHIFTRT) goto L2394; x2 = XEXP (x1, 0); goto L14509; L2394: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == DImode && GET_CODE (x4) == MULT) goto L2395; x2 = XEXP (x1, 0); goto L14509; L2395: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (GET_MODE (x5) == DImode) goto L14528; x2 = XEXP (x1, 0); goto L14509; L14528: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x5)) { case ZERO_EXTEND: goto L2396; case SIGN_EXTEND: goto L2473; default: break; } x2 = XEXP (x1, 0); goto L14509; L2396: ATTRIBUTE_UNUSED_LABEL x6 = XEXP (x5, 0); if (nonimmediate_operand (x6, SImode)) { operands[1] = x6; goto L2397; } x2 = XEXP (x1, 0); goto L14509; L2397: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 1); if (GET_MODE (x5) == DImode && GET_CODE (x5) == ZERO_EXTEND) goto L2398; x2 = XEXP (x1, 0); goto L14509; L2398: ATTRIBUTE_UNUSED_LABEL x6 = XEXP (x5, 0); if (nonimmediate_operand (x6, SImode)) { operands[2] = x6; goto L2399; } x2 = XEXP (x1, 0); goto L14509; L2399: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (32)]) goto L2400; x2 = XEXP (x1, 0); goto L14509; L2400: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L2401; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L2401: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[3] = x2; goto L2402; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L2402: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L2403; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L2403: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) { return 259; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L2473: ATTRIBUTE_UNUSED_LABEL x6 = XEXP (x5, 0); if (nonimmediate_operand (x6, SImode)) { operands[1] = x6; goto L2474; } x2 = XEXP (x1, 0); goto L14509; L2474: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 1); if (GET_MODE (x5) == DImode && GET_CODE (x5) == SIGN_EXTEND) goto L2475; x2 = XEXP (x1, 0); goto L14509; L2475: ATTRIBUTE_UNUSED_LABEL x6 = XEXP (x5, 0); if (nonimmediate_operand (x6, SImode)) { operands[2] = x6; goto L2476; } x2 = XEXP (x1, 0); goto L14509; L2476: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (32)]) goto L2477; x2 = XEXP (x1, 0); goto L14509; L2477: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L2478; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L2478: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[3] = x2; goto L2479; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L2479: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L2480; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L2480: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)) { return 262; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L2627: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode) goto L14530; x2 = XEXP (x1, 0); goto L14509; L14530: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, SImode)) { operands[2] = x3; goto L2628; } L14531: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, SImode)) { operands[1] = x3; goto L2691; } x2 = XEXP (x1, 0); goto L14509; L2628: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, SImode)) { operands[3] = x3; goto L2629; } x3 = XEXP (x2, 0); goto L14531; L2629: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L2630; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14531; L2630: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[1] = x2; goto L2631; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14531; L2631: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == MOD) goto L2632; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14531; L2632: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[2])) goto L2633; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14531; L2633: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[3])) goto L2634; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14531; L2634: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L2635; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14531; L2635: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode) goto L14532; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14531; L14532: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG) goto L14534; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14531; L14534: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 0) == 17) goto L14536; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14531; L14536: ATTRIBUTE_UNUSED_LABEL if ((!optimize_size && !TARGET_USE_CLTD)) { return 269; } L14537: ATTRIBUTE_UNUSED_LABEL if ((optimize_size || TARGET_USE_CLTD)) { return 270; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14531; L2691: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, SImode)) { operands[2] = x3; goto L2692; } x2 = XEXP (x1, 0); goto L14509; L2692: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L2693; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L2693: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[3] = x2; goto L2694; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L2694: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == MOD) goto L2695; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L2695: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L2696; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L2696: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2])) goto L2697; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L2697: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L2698; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L2698: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[4] = x2; goto L2699; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L2699: ATTRIBUTE_UNUSED_LABEL if (pnum_clobbers != NULL) { *pnum_clobbers = 1; return 271; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L2780: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SImode)) { operands[1] = x3; goto L2781; } x2 = XEXP (x1, 0); goto L14509; L2781: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, SImode)) { operands[2] = x3; goto L2782; } x2 = XEXP (x1, 0); goto L14509; L2782: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L2783; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L2783: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[3] = x2; goto L2784; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L2784: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == UMOD) goto L2785; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L2785: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L2786; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L2786: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2])) goto L2787; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L2787: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); switch (GET_CODE (x1)) { case CLOBBER: goto L2788; case USE: goto L2827; default: break; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L2788: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) { return 275; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L2827: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[3]) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 276; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L6385: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L6386; } x2 = XEXP (x1, 0); goto L14509; L6386: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L6387; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L6387: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[2] = x2; goto L6388; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L6388: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L6389; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L6389: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_CMOVE)) { return 535; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L14527: ATTRIBUTE_UNUSED_LABEL if (XVECLEN (x2, 0) == 4 && XINT (x2, 1) == 20) goto L7679; x2 = XEXP (x1, 0); goto L14509; L7679: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (GET_MODE (x3) == BLKmode && GET_CODE (x3) == MEM) goto L7680; x2 = XEXP (x1, 0); goto L14509; L7680: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, SImode)) { operands[5] = x4; goto L7681; } x2 = XEXP (x1, 0); goto L14509; L7681: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 1); if (register_operand (x3, QImode)) { operands[2] = x3; goto L7682; } x2 = XEXP (x1, 0); goto L14509; L7682: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 2); if (immediate_operand (x3, SImode)) { operands[3] = x3; goto L7683; } x2 = XEXP (x1, 0); goto L14509; L7683: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 3); if (register_operand (x3, SImode)) { operands[4] = x3; goto L7684; } x2 = XEXP (x1, 0); goto L14509; L7684: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L7685; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L7685: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19) goto L7686; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L7686: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L7687; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L7687: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[1] = x2; goto L7688; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L7688: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 646; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L8008: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SImode)) { operands[1] = x3; goto L8009; } x2 = XEXP (x1, 0); goto L14509; L8009: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (immediate_operand (x3, SImode)) { operands[2] = x3; goto L8010; } x2 = XEXP (x1, 0); goto L14509; L8010: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L8011; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L8011: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) goto L8012; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L8012: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L8013; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L8013: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM) goto L8014; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L8014: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == SCRATCH && (!TARGET_64BIT)) { return 670; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14509; L6398: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == FFS) goto L6399; goto ret0; L6399: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L6400; } goto ret0; L6400: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L6401; goto ret0; L6401: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[2] = x2; goto L6402; } goto ret0; L6402: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L6403; goto ret0; L6403: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) { return 536; } goto ret0; ret0: return -1; } static int recog_34 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); switch (GET_CODE (x2)) { case TRUNCATE: goto L2368; case ZERO_EXTEND: goto L2418; case DIV: goto L2550; case UDIV: goto L2728; case ASHIFT: goto L4626; case ASHIFTRT: goto L4888; case LSHIFTRT: goto L5281; case FFS: goto L6423; case UNSPEC: goto L14550; case PLUS: goto L8019; default: break; } goto ret0; L2368: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == TImode && GET_CODE (x3) == LSHIFTRT) goto L2369; goto ret0; L2369: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == TImode && GET_CODE (x4) == MULT) goto L2370; goto ret0; L2370: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (GET_MODE (x5) == TImode) goto L14551; goto ret0; L14551: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x5)) { case ZERO_EXTEND: goto L2371; case SIGN_EXTEND: goto L2448; default: break; } goto ret0; L2371: ATTRIBUTE_UNUSED_LABEL x6 = XEXP (x5, 0); if (nonimmediate_operand (x6, DImode)) { operands[1] = x6; goto L2372; } goto ret0; L2372: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 1); if (GET_MODE (x5) == TImode && GET_CODE (x5) == ZERO_EXTEND) goto L2373; goto ret0; L2373: ATTRIBUTE_UNUSED_LABEL x6 = XEXP (x5, 0); if (nonimmediate_operand (x6, DImode)) { operands[2] = x6; goto L2374; } goto ret0; L2374: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (64)]) goto L2375; goto ret0; L2375: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L2376; goto ret0; L2376: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, DImode)) { operands[3] = x2; goto L2377; } goto ret0; L2377: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L2378; goto ret0; L2378: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 258; } goto ret0; L2448: ATTRIBUTE_UNUSED_LABEL x6 = XEXP (x5, 0); if (nonimmediate_operand (x6, DImode)) { operands[1] = x6; goto L2449; } goto ret0; L2449: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 1); if (GET_MODE (x5) == TImode && GET_CODE (x5) == SIGN_EXTEND) goto L2450; goto ret0; L2450: ATTRIBUTE_UNUSED_LABEL x6 = XEXP (x5, 0); if (nonimmediate_operand (x6, DImode)) { operands[2] = x6; goto L2451; } goto ret0; L2451: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (64)]) goto L2452; goto ret0; L2452: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L2453; goto ret0; L2453: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, DImode)) { operands[3] = x2; goto L2454; } goto ret0; L2454: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L2455; goto ret0; L2455: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 261; } goto ret0; L2418: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == TRUNCATE) goto L2419; goto ret0; L2419: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == DImode && GET_CODE (x4) == LSHIFTRT) goto L2420; goto ret0; L2420: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (GET_MODE (x5) == DImode && GET_CODE (x5) == MULT) goto L2421; goto ret0; L2421: ATTRIBUTE_UNUSED_LABEL x6 = XEXP (x5, 0); if (GET_MODE (x6) == DImode) goto L14553; goto ret0; L14553: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x6)) { case ZERO_EXTEND: goto L2422; case SIGN_EXTEND: goto L2499; default: break; } goto ret0; L2422: ATTRIBUTE_UNUSED_LABEL x7 = XEXP (x6, 0); if (nonimmediate_operand (x7, SImode)) { operands[1] = x7; goto L2423; } goto ret0; L2423: ATTRIBUTE_UNUSED_LABEL x6 = XEXP (x5, 1); if (GET_MODE (x6) == DImode && GET_CODE (x6) == ZERO_EXTEND) goto L2424; goto ret0; L2424: ATTRIBUTE_UNUSED_LABEL x7 = XEXP (x6, 0); if (nonimmediate_operand (x7, SImode)) { operands[2] = x7; goto L2425; } goto ret0; L2425: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 1); if (x5 == const_int_rtx[MAX_SAVED_CONST_INT + (32)]) goto L2426; goto ret0; L2426: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L2427; goto ret0; L2427: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[3] = x2; goto L2428; } goto ret0; L2428: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L2429; goto ret0; L2429: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 260; } goto ret0; L2499: ATTRIBUTE_UNUSED_LABEL x7 = XEXP (x6, 0); if (nonimmediate_operand (x7, SImode)) { operands[1] = x7; goto L2500; } goto ret0; L2500: ATTRIBUTE_UNUSED_LABEL x6 = XEXP (x5, 1); if (GET_MODE (x6) == DImode && GET_CODE (x6) == SIGN_EXTEND) goto L2501; goto ret0; L2501: ATTRIBUTE_UNUSED_LABEL x7 = XEXP (x6, 0); if (nonimmediate_operand (x7, SImode)) { operands[2] = x7; goto L2502; } goto ret0; L2502: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 1); if (x5 == const_int_rtx[MAX_SAVED_CONST_INT + (32)]) goto L2503; goto ret0; L2503: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L2504; goto ret0; L2504: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[3] = x2; goto L2505; } goto ret0; L2505: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L2506; goto ret0; L2506: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM))) { return 263; } goto ret0; L2550: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode) goto L14555; goto ret0; L14555: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, DImode)) { operands[2] = x3; goto L2551; } L14556: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, DImode)) { operands[1] = x3; goto L2614; } goto ret0; L2551: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, DImode)) { operands[3] = x3; goto L2552; } x3 = XEXP (x2, 0); goto L14556; L2552: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L2553; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14556; L2553: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[1] = x2; goto L2554; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14556; L2554: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == MOD) goto L2555; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14556; L2555: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[2])) goto L2556; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14556; L2556: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[3])) goto L2557; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14556; L2557: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L2558; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14556; L2558: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode) goto L14557; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14556; L14557: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG) goto L14559; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14556; L14559: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 0) == 17) goto L14561; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14556; L14561: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && !optimize_size && !TARGET_USE_CLTD)) { return 266; } L14562: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && (optimize_size || TARGET_USE_CLTD))) { return 267; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14556; L2614: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, DImode)) { operands[2] = x3; goto L2615; } goto ret0; L2615: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L2616; goto ret0; L2616: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[3] = x2; goto L2617; } goto ret0; L2617: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == MOD) goto L2618; goto ret0; L2618: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L2619; goto ret0; L2619: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2])) goto L2620; goto ret0; L2620: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L2621; goto ret0; L2621: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[4] = x2; goto L2622; } goto ret0; L2622: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 268; } goto ret0; L2728: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[1] = x3; goto L2729; } goto ret0; L2729: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, DImode)) { operands[2] = x3; goto L2730; } goto ret0; L2730: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L2731; goto ret0; L2731: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[3] = x2; goto L2732; } goto ret0; L2732: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == UMOD) goto L2733; goto ret0; L2733: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L2734; goto ret0; L2734: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2])) goto L2735; goto ret0; L2735: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); switch (GET_CODE (x1)) { case CLOBBER: goto L2736; case USE: goto L2775; default: break; } goto ret0; L2736: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT)) { return 273; } goto ret0; L2775: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[3]) && (TARGET_64BIT) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 274; } goto ret0; L4626: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[1] = x3; goto L4627; } goto ret0; L4627: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L4628; } goto ret0; L4628: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L4629; goto ret0; L4629: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[3] = x2; goto L4630; } goto ret0; L4630: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L4631; goto ret0; L4631: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT && TARGET_CMOVE)) { return 404; } goto ret0; L4888: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[1] = x3; goto L4889; } goto ret0; L4889: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L4890; } goto ret0; L4890: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L4891; goto ret0; L4891: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[3] = x2; goto L4892; } goto ret0; L4892: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L4893; goto ret0; L4893: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT && TARGET_CMOVE)) { return 422; } goto ret0; L5281: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[1] = x3; goto L5282; } goto ret0; L5282: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L5283; } goto ret0; L5283: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5284; goto ret0; L5284: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[3] = x2; goto L5285; } goto ret0; L5285: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L5286; goto ret0; L5286: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT && TARGET_CMOVE)) { return 449; } goto ret0; L6423: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, DImode)) { operands[1] = x3; goto L6424; } goto ret0; L6424: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L6425; goto ret0; L6425: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, DImode)) { operands[2] = x2; goto L6426; } goto ret0; L6426: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L6427; goto ret0; L6427: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && TARGET_CMOVE)) { return 538; } goto ret0; L14550: ATTRIBUTE_UNUSED_LABEL if (XVECLEN (x2, 0) == 4 && XINT (x2, 1) == 20) goto L7708; goto ret0; L7708: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (GET_MODE (x3) == BLKmode && GET_CODE (x3) == MEM) goto L7709; goto ret0; L7709: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, DImode)) { operands[5] = x4; goto L7710; } goto ret0; L7710: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 1); if (register_operand (x3, QImode)) { operands[2] = x3; goto L7711; } goto ret0; L7711: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 2); if (immediate_operand (x3, DImode)) { operands[3] = x3; goto L7712; } goto ret0; L7712: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 3); if (register_operand (x3, DImode)) { operands[4] = x3; goto L7713; } goto ret0; L7713: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L7714; goto ret0; L7714: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19) goto L7715; goto ret0; L7715: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L7716; goto ret0; L7716: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[1] = x2; goto L7717; } goto ret0; L7717: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 647; } goto ret0; L8019: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[1] = x3; goto L8020; } goto ret0; L8020: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x86_64_immediate_operand (x3, DImode)) { operands[2] = x3; goto L8021; } goto ret0; L8021: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L8022; goto ret0; L8022: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) goto L8023; goto ret0; L8023: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L8024; goto ret0; L8024: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM) goto L8025; goto ret0; L8025: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == SCRATCH && (TARGET_64BIT)) { return 671; } goto ret0; ret0: return -1; } static int recog_35 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case SImode: goto L14507; case DImode: goto L14508; case HImode: goto L14512; case SFmode: goto L14505; case DFmode: goto L14506; case XFmode: goto L14510; case QImode: goto L14513; default: break; } L5956: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == PC) goto L5957; goto ret0; L14507: ATTRIBUTE_UNUSED_LABEL tem = recog_33 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; goto L5956; L14508: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG && XINT (x2, 0) == 7) goto L6371; L14498: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, DImode)) { operands[0] = x2; goto L514; } L14499: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x2, DImode)) { operands[0] = x2; goto L998; } L14502: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, DImode)) { operands[0] = x2; goto L2367; } goto L5956; L6371: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == PLUS) goto L6372; x2 = XEXP (x1, 0); goto L14498; L6372: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode && GET_CODE (x3) == REG && XINT (x3, 0) == 6) goto L6373; x2 = XEXP (x1, 0); goto L14498; L6373: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L6374; x2 = XEXP (x1, 0); goto L14498; L6374: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L6375; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14498; L6375: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode && GET_CODE (x2) == REG && XINT (x2, 0) == 6) goto L6376; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14498; L6376: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == MEM) goto L6377; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14498; L6377: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode && GET_CODE (x3) == REG && XINT (x3, 0) == 6) goto L6378; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14498; L6378: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L6379; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14498; L6379: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM) goto L6380; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14498; L6380: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == SCRATCH && (TARGET_64BIT)) { return 534; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14498; L514: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode) goto L14538; x2 = XEXP (x1, 0); goto L14499; L14538: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case MEM: goto L515; case SIGN_EXTEND: goto L775; default: break; } x2 = XEXP (x1, 0); goto L14499; L515: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode && GET_CODE (x3) == REG && XINT (x3, 0) == 7) goto L516; x2 = XEXP (x1, 0); goto L14499; L516: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L517; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14499; L517: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode && GET_CODE (x2) == REG && XINT (x2, 0) == 7) goto L518; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14499; L518: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == PLUS) goto L519; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14499; L519: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode && GET_CODE (x3) == REG && XINT (x3, 0) == 7) goto L520; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14499; L520: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L521; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14499; L521: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L522; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14499; L522: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM) goto L523; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14499; L523: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == SCRATCH && (TARGET_64BIT)) { return 78; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14499; L775: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SImode)) { operands[1] = x3; goto L776; } x2 = XEXP (x1, 0); goto L14499; L776: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L777; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14499; L777: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) goto L778; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14499; L778: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L779; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14499; L779: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[2] = x2; goto L780; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14499; L780: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT)) { return 117; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14499; L998: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == FIX) goto L999; x2 = XEXP (x1, 0); goto L14502; L999: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, VOIDmode)) { operands[1] = x3; goto L1000; } x2 = XEXP (x1, 0); goto L14502; L1000: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L1001; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14502; L1001: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, HImode)) { operands[2] = x2; goto L1002; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14502; L1002: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L1003; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14502; L1003: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, HImode)) { operands[3] = x2; goto L1004; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14502; L1004: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && FLOAT_MODE_P (GET_MODE (operands[1])) && (!SSE_FLOAT_MODE_P (GET_MODE (operands[1])) || !TARGET_64BIT)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 149; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14502; L2367: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode) goto L14540; x2 = XEXP (x1, 0); goto L5956; L14540: ATTRIBUTE_UNUSED_LABEL tem = recog_34 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; x2 = XEXP (x1, 0); goto L5956; L14512: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == MEM) goto L7420; L14501: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x2, HImode)) { operands[0] = x2; goto L1086; } L14504: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, HImode)) { operands[0] = x2; goto L2703; } goto L5956; L7420: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); switch (GET_MODE (x3)) { case SImode: goto L14563; case DImode: goto L14564; default: break; } goto L14501; L14563: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, SImode)) { operands[1] = x3; goto L7421; } goto L14501; L7421: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, HImode)) { operands[2] = x2; goto L7422; } x2 = XEXP (x1, 0); goto L14501; L7422: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L7423; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14501; L7423: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[0] = x2; goto L7424; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14501; L7424: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS) goto L7425; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14501; L7425: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L7426; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14501; L7426: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L7427; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14501; L7427: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L7428; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14501; L7428: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19 && (!TARGET_64BIT && (TARGET_SINGLE_STRINGOP || optimize_size))) { return 633; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14501; L14564: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, DImode)) { operands[1] = x3; goto L7433; } goto L14501; L7433: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, HImode)) { operands[2] = x2; goto L7434; } x2 = XEXP (x1, 0); goto L14501; L7434: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L7435; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14501; L7435: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[0] = x2; goto L7436; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14501; L7436: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == PLUS) goto L7437; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14501; L7437: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L7438; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14501; L7438: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L7439; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14501; L7439: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L7440; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14501; L7440: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19 && (TARGET_64BIT && (TARGET_SINGLE_STRINGOP || optimize_size))) { return 634; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14501; L1086: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == FIX) goto L1087; x2 = XEXP (x1, 0); goto L14504; L1087: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, VOIDmode)) { operands[1] = x3; goto L1088; } x2 = XEXP (x1, 0); goto L14504; L1088: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L1089; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14504; L1089: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, HImode)) { operands[2] = x2; goto L1090; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14504; L1090: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L1091; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14504; L1091: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, HImode)) { operands[3] = x2; goto L1092; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14504; L1092: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && FLOAT_MODE_P (GET_MODE (operands[1])) && !SSE_FLOAT_MODE_P (GET_MODE (operands[1])))) { return 159; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14504; L2703: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode) goto L14565; x2 = XEXP (x1, 0); goto L5956; L14565: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case DIV: goto L2704; case UDIV: goto L2847; default: break; } x2 = XEXP (x1, 0); goto L5956; L2704: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, HImode)) { operands[1] = x3; goto L2705; } x2 = XEXP (x1, 0); goto L5956; L2705: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, HImode)) { operands[2] = x3; goto L2706; } x2 = XEXP (x1, 0); goto L5956; L2706: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L2707; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L2707: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, HImode)) { operands[3] = x2; goto L2708; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L2708: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == MOD) goto L2709; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L2709: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L2710; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L2710: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2])) goto L2711; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L2711: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L2712; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L2712: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_HIMODE_MATH)) { return 272; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L2847: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, HImode)) { operands[1] = x3; goto L2848; } x2 = XEXP (x1, 0); goto L5956; L2848: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, HImode)) { operands[2] = x3; goto L2849; } x2 = XEXP (x1, 0); goto L5956; L2849: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L2850; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L2850: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, HImode)) { operands[3] = x2; goto L2851; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L2851: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == UMOD) goto L2852; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L2852: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L2853; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L2853: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2])) goto L2854; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L2854: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L2855; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L2855: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, HImode)) { operands[4] = x2; goto L2856; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L2856: ATTRIBUTE_UNUSED_LABEL if (pnum_clobbers != NULL) { *pnum_clobbers = 1; return 277; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L14505: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, SFmode)) { operands[0] = x2; goto L4216; } L14514: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, SFmode)) { operands[0] = x2; goto L8042; } goto L5956; L4216: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SFmode) goto L14567; x2 = XEXP (x1, 0); goto L14514; L14567: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case NEG: goto L4217; case ABS: goto L4373; default: break; } x2 = XEXP (x1, 0); goto L14514; L4217: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SFmode)) { operands[1] = x3; goto L4218; } x2 = XEXP (x1, 0); goto L14514; L4218: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L4219; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14514; L4219: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, V4SFmode)) { operands[2] = x2; goto L4220; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14514; L4220: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L4221; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14514; L4221: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_SSE && (reload_in_progress || reload_completed || (register_operand (operands[0], VOIDmode) && register_operand (operands[1], VOIDmode))))) { return 363; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14514; L4373: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SFmode)) { operands[1] = x3; goto L4374; } x2 = XEXP (x1, 0); goto L14514; L4374: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L4375; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14514; L4375: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, V4SFmode)) { operands[2] = x2; goto L4376; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14514; L4376: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L4377; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14514; L4377: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_SSE && (reload_in_progress || reload_completed || (register_operand (operands[0], VOIDmode) && register_operand (operands[1], VOIDmode))))) { return 378; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14514; L8042: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SFmode && GET_CODE (x2) == IF_THEN_ELSE) goto L8043; x2 = XEXP (x1, 0); goto L5956; L8043: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (sse_comparison_operator (x3, VOIDmode)) { operands[1] = x3; goto L8044; } L8065: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x3) == EQ) goto L8066; x2 = XEXP (x1, 0); goto L5956; L8044: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, SFmode)) { operands[4] = x4; goto L8045; } goto L8065; L8045: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonimmediate_operand (x4, SFmode)) { operands[5] = x4; goto L8046; } goto L8065; L8046: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, SFmode)) { operands[2] = x3; goto L8047; } x3 = XEXP (x2, 0); goto L8065; L8047: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (nonimmediate_operand (x3, SFmode)) { operands[3] = x3; goto L8048; } x3 = XEXP (x2, 0); goto L8065; L8048: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L8049; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L8065; L8049: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SFmode)) { operands[6] = x2; goto L8050; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L8065; L8050: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L8051; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L8065; L8051: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_SSE && (GET_CODE (operands[2]) != MEM || GET_CODE (operands[3]) != MEM) /* Avoid combine from being smart and converting min/max instruction patterns into conditional moves. */ && ((GET_CODE (operands[1]) != LT && GET_CODE (operands[1]) != GT && GET_CODE (operands[1]) != UNLE && GET_CODE (operands[1]) != UNGE) || !rtx_equal_p (operands[4], operands[2]) || !rtx_equal_p (operands[5], operands[3])) && (!TARGET_IEEE_FP || (GET_CODE (operands[1]) != EQ && GET_CODE (operands[1]) != NE)))) { return 673; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L8065; L8066: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, SFmode)) { operands[3] = x4; goto L8067; } x2 = XEXP (x1, 0); goto L5956; L8067: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonimmediate_operand (x4, SFmode)) { operands[4] = x4; goto L8068; } x2 = XEXP (x1, 0); goto L5956; L8068: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, SFmode)) { operands[1] = x3; goto L8069; } x2 = XEXP (x1, 0); goto L5956; L8069: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (nonimmediate_operand (x3, SFmode)) { operands[2] = x3; goto L8070; } x2 = XEXP (x1, 0); goto L5956; L8070: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L8071; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L8071: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SFmode)) { operands[5] = x2; goto L8072; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L8072: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L8073; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L8073: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_SSE && (GET_CODE (operands[2]) != MEM || GET_CODE (operands[3]) != MEM))) { return 674; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L14506: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, DFmode)) { operands[0] = x2; goto L4257; } L14515: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, DFmode)) { operands[0] = x2; goto L8086; } goto L5956; L4257: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DFmode) goto L14569; x2 = XEXP (x1, 0); goto L14515; L14569: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case NEG: goto L4258; case ABS: goto L4414; default: break; } x2 = XEXP (x1, 0); goto L14515; L4258: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, DFmode)) { operands[1] = x3; goto L4259; } x2 = XEXP (x1, 0); goto L14515; L4259: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L4260; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14515; L4260: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, V2DFmode)) { operands[2] = x2; goto L4261; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14515; L4261: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L4262; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14515; L4262: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode) goto L14571; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14515; L14571: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG) goto L14573; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14515; L14573: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 0) == 17) goto L14575; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14515; L14575: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && TARGET_SSE2 && (reload_in_progress || reload_completed || (register_operand (operands[0], VOIDmode) && register_operand (operands[1], VOIDmode))))) { return 366; } L14576: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && TARGET_SSE2 && (reload_in_progress || reload_completed || (register_operand (operands[0], VOIDmode) && register_operand (operands[1], VOIDmode))))) { return 367; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14515; L4414: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, DFmode)) { operands[1] = x3; goto L4415; } x2 = XEXP (x1, 0); goto L14515; L4415: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L4416; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14515; L4416: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, V2DFmode)) { operands[2] = x2; goto L4417; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14515; L4417: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L4418; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14515; L4418: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode) goto L14577; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14515; L14577: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG) goto L14579; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14515; L14579: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 0) == 17) goto L14581; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14515; L14581: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && TARGET_SSE2 && (reload_in_progress || reload_completed || (register_operand (operands[0], VOIDmode) && register_operand (operands[1], VOIDmode))))) { return 381; } L14582: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && TARGET_SSE2 && (reload_in_progress || reload_completed || (register_operand (operands[0], VOIDmode) && register_operand (operands[1], VOIDmode))))) { return 382; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14515; L8086: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DFmode && GET_CODE (x2) == IF_THEN_ELSE) goto L8087; x2 = XEXP (x1, 0); goto L5956; L8087: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (sse_comparison_operator (x3, VOIDmode)) { operands[1] = x3; goto L8088; } L8109: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x3) == EQ) goto L8110; x2 = XEXP (x1, 0); goto L5956; L8088: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, DFmode)) { operands[4] = x4; goto L8089; } goto L8109; L8089: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonimmediate_operand (x4, DFmode)) { operands[5] = x4; goto L8090; } goto L8109; L8090: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, DFmode)) { operands[2] = x3; goto L8091; } x3 = XEXP (x2, 0); goto L8109; L8091: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (nonimmediate_operand (x3, DFmode)) { operands[3] = x3; goto L8092; } x3 = XEXP (x2, 0); goto L8109; L8092: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L8093; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L8109; L8093: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, DFmode)) { operands[6] = x2; goto L8094; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L8109; L8094: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L8095; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L8109; L8095: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_SSE2 && (GET_CODE (operands[2]) != MEM || GET_CODE (operands[3]) != MEM) /* Avoid combine from being smart and converting min/max instruction patterns into conditional moves. */ && ((GET_CODE (operands[1]) != LT && GET_CODE (operands[1]) != GT && GET_CODE (operands[1]) != UNLE && GET_CODE (operands[1]) != UNGE) || !rtx_equal_p (operands[4], operands[2]) || !rtx_equal_p (operands[5], operands[3])) && (!TARGET_IEEE_FP || (GET_CODE (operands[1]) != EQ && GET_CODE (operands[1]) != NE)))) { return 675; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L8109; L8110: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, DFmode)) { operands[3] = x4; goto L8111; } x2 = XEXP (x1, 0); goto L5956; L8111: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonimmediate_operand (x4, DFmode)) { operands[4] = x4; goto L8112; } x2 = XEXP (x1, 0); goto L5956; L8112: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, DFmode)) { operands[1] = x3; goto L8113; } x2 = XEXP (x1, 0); goto L5956; L8113: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (nonimmediate_operand (x3, DFmode)) { operands[2] = x3; goto L8114; } x2 = XEXP (x1, 0); goto L5956; L8114: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L8115; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L8115: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, DFmode)) { operands[5] = x2; goto L8116; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L8116: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L8117; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L8117: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_SSE && (GET_CODE (operands[2]) != MEM || GET_CODE (operands[3]) != MEM))) { return 676; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L14510: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, XFmode)) { operands[0] = x2; goto L6886; } goto L5956; L6886: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == XFmode) goto L14583; x2 = XEXP (x1, 0); goto L5956; L14583: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == UNSPEC) goto L14585; x2 = XEXP (x1, 0); goto L5956; L14585: ATTRIBUTE_UNUSED_LABEL if (XVECLEN (x2, 0) == 2) goto L14587; x2 = XEXP (x1, 0); goto L5956; L14587: ATTRIBUTE_UNUSED_LABEL switch (XINT (x2, 1)) { case 88L: goto L6887; case 90L: goto L6902; default: break; } x2 = XEXP (x1, 0); goto L5956; L6887: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (register_operand (x3, XFmode)) { operands[2] = x3; goto L6888; } x2 = XEXP (x1, 0); goto L5956; L6888: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 1); if (register_operand (x3, XFmode)) { operands[3] = x3; goto L6889; } x2 = XEXP (x1, 0); goto L5956; L6889: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L6890; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L6890: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, XFmode)) { operands[1] = x2; goto L6891; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L6891: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == XFmode && GET_CODE (x2) == UNSPEC && XVECLEN (x2, 0) == 2 && XINT (x2, 1) == 89) goto L6892; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L6892: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (rtx_equal_p (x3, operands[2])) goto L6893; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L6893: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 1); if (rtx_equal_p (x3, operands[3])) goto L6894; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L6894: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == SET) goto L6895; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L6895: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCFPmode && GET_CODE (x2) == REG && XINT (x2, 0) == 18) goto L6896; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L6896: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == CCFPmode && GET_CODE (x2) == UNSPEC && XVECLEN (x2, 0) == 1 && XINT (x2, 1) == 45) goto L6897; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L6897: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_unsafe_math_optimizations)) { return 591; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L6902: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (register_operand (x3, XFmode)) { operands[2] = x3; goto L6903; } x2 = XEXP (x1, 0); goto L5956; L6903: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 1); if (register_operand (x3, XFmode)) { operands[3] = x3; goto L6904; } x2 = XEXP (x1, 0); goto L5956; L6904: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L6905; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L6905: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, XFmode)) { operands[1] = x2; goto L6906; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L6906: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == XFmode && GET_CODE (x2) == UNSPEC && XVECLEN (x2, 0) == 2 && XINT (x2, 1) == 91) goto L6907; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L6907: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (rtx_equal_p (x3, operands[2])) goto L6908; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L6908: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 1); if (rtx_equal_p (x3, operands[3])) goto L6909; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L6909: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == SET) goto L6910; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L6910: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCFPmode && GET_CODE (x2) == REG && XINT (x2, 0) == 18) goto L6911; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L6911: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == CCFPmode && GET_CODE (x2) == UNSPEC && XVECLEN (x2, 0) == 1 && XINT (x2, 1) == 45) goto L6912; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L6912: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_unsafe_math_optimizations)) { return 592; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L14513: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == MEM) goto L7444; goto L5956; L7444: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); switch (GET_MODE (x3)) { case SImode: goto L14589; case DImode: goto L14590; default: break; } goto L5956; L14589: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, SImode)) { operands[1] = x3; goto L7445; } goto L5956; L7445: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, QImode)) { operands[2] = x2; goto L7446; } x2 = XEXP (x1, 0); goto L5956; L7446: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L7447; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L7447: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[0] = x2; goto L7448; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L7448: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS) goto L7449; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L7449: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L7450; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L7450: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L7451; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L7451: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L7452; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L7452: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19 && (!TARGET_64BIT && (TARGET_SINGLE_STRINGOP || optimize_size))) { return 635; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L14590: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, DImode)) { operands[1] = x3; goto L7457; } goto L5956; L7457: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, QImode)) { operands[2] = x2; goto L7458; } x2 = XEXP (x1, 0); goto L5956; L7458: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L7459; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L7459: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[0] = x2; goto L7460; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L7460: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == PLUS) goto L7461; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L7461: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L7462; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L7462: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L7463; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L7463: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L7464; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L7464: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19 && (TARGET_64BIT && (TARGET_SINGLE_STRINGOP || optimize_size))) { return 636; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L5956; L5957: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == IF_THEN_ELSE) goto L5958; goto ret0; L5958: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (comparison_operator (x3, VOIDmode)) { operands[0] = x3; goto L5959; } goto ret0; L5959: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, VOIDmode)) { operands[1] = x4; goto L5960; } goto ret0; L5960: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (register_operand (x4, VOIDmode)) { operands[2] = x4; goto L5961; } L5983: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x4, VOIDmode)) { operands[2] = x4; goto L5984; } goto ret0; L5961: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); switch (GET_CODE (x3)) { case LABEL_REF: goto L5962; case PC: goto L6031; default: break; } x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L5983; L5962: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); operands[3] = x4; goto L5963; L5963: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (GET_CODE (x3) == PC) goto L5964; x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L5983; L5964: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5965; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L5983; L5965: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCFPmode && GET_CODE (x2) == REG && XINT (x2, 0) == 18) goto L5966; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L5983; L5966: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L5967; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L5983; L5967: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCFPmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_CMOVE && TARGET_80387 && !SSE_FLOAT_MODE_P (GET_MODE (operands[1])) && FLOAT_MODE_P (GET_MODE (operands[1])) && GET_MODE (operands[1]) == GET_MODE (operands[2]) && ix86_fp_jump_nontrivial_p (GET_CODE (operands[0])))) { return 499; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L5983; L6031: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (GET_CODE (x3) == LABEL_REF) goto L6032; x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L5983; L6032: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); operands[3] = x4; goto L6033; L6033: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L6034; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L5983; L6034: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCFPmode && GET_CODE (x2) == REG && XINT (x2, 0) == 18) goto L6035; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L5983; L6035: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L6036; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L5983; L6036: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCFPmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_CMOVE && TARGET_80387 && !SSE_FLOAT_MODE_P (GET_MODE (operands[1])) && FLOAT_MODE_P (GET_MODE (operands[1])) && GET_MODE (operands[1]) == GET_MODE (operands[2]) && ix86_fp_jump_nontrivial_p (GET_CODE (operands[0])))) { return 502; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L5983; L5984: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); switch (GET_CODE (x3)) { case LABEL_REF: goto L5985; case PC: goto L6055; default: break; } goto ret0; L5985: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); operands[3] = x4; goto L5986; L5986: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (GET_CODE (x3) == PC) goto L5987; goto ret0; L5987: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L5988; goto ret0; L5988: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCFPmode && GET_CODE (x2) == REG && XINT (x2, 0) == 18) goto L5989; goto ret0; L5989: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L5990; goto ret0; L5990: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCFPmode) goto L14591; goto ret0; L14591: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG) goto L14593; goto ret0; L14593: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 0) == 17) goto L14595; goto ret0; L14595: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && SSE_FLOAT_MODE_P (GET_MODE (operands[1])) && GET_MODE (operands[1]) == GET_MODE (operands[2]) && ix86_fp_jump_nontrivial_p (GET_CODE (operands[0])))) { return 500; } L14596: ATTRIBUTE_UNUSED_LABEL if ((SSE_FLOAT_MODE_P (GET_MODE (operands[1])) && GET_MODE (operands[1]) == GET_MODE (operands[2]) && ix86_fp_jump_nontrivial_p (GET_CODE (operands[0])))) { return 501; } goto ret0; L6055: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (GET_CODE (x3) == LABEL_REF) goto L6056; goto ret0; L6056: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); operands[3] = x4; goto L6057; L6057: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L6058; goto ret0; L6058: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCFPmode && GET_CODE (x2) == REG && XINT (x2, 0) == 18) goto L6059; goto ret0; L6059: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L6060; goto ret0; L6060: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCFPmode) goto L14597; goto ret0; L14597: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG) goto L14599; goto ret0; L14599: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 0) == 17) goto L14601; goto ret0; L14601: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && SSE_FLOAT_MODE_P (GET_MODE (operands[1])) && GET_MODE (operands[1]) == GET_MODE (operands[2]) && ix86_fp_jump_nontrivial_p (GET_CODE (operands[0])))) { return 503; } L14602: ATTRIBUTE_UNUSED_LABEL if ((SSE_FLOAT_MODE_P (GET_MODE (operands[1])) && GET_MODE (operands[1]) == GET_MODE (operands[2]) && ix86_fp_jump_nontrivial_p (GET_CODE (operands[0])))) { return 504; } goto ret0; ret0: return -1; } static int recog_36 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); if (GET_CODE (x2) == MEM) goto L7149; L14607: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, SImode)) { operands[0] = x2; goto L1030; } L14610: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, SImode)) { operands[0] = x2; goto L2674; } goto ret0; L7149: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); switch (GET_MODE (x3)) { case SImode: goto L14622; case DImode: goto L14623; default: break; } goto L14607; L14622: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, SImode)) { operands[2] = x3; goto L7150; } goto L14607; L7150: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == MEM) goto L7151; x2 = XEXP (x1, 0); goto L14607; L7151: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SImode)) { operands[3] = x3; goto L7152; } x2 = XEXP (x1, 0); goto L14607; L7152: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L7153; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14607; L7153: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[0] = x2; goto L7154; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14607; L7154: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS) goto L7155; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14607; L7155: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[2])) goto L7156; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14607; L7156: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (4)]) goto L7157; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14607; L7157: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == SET) goto L7158; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14607; L7158: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[1] = x2; goto L7159; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14607; L7159: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS) goto L7160; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14607; L7160: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[3])) goto L7161; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14607; L7161: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (4)]) goto L7162; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14607; L7162: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == USE) goto L7163; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14607; L7163: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19 && (!TARGET_64BIT && (TARGET_SINGLE_STRINGOP || optimize_size))) { return 619; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14607; L14623: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, DImode)) { operands[2] = x3; goto L7168; } goto L14607; L7168: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == MEM) goto L7169; x2 = XEXP (x1, 0); goto L14607; L7169: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[3] = x3; goto L7170; } x2 = XEXP (x1, 0); goto L14607; L7170: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L7171; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14607; L7171: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[0] = x2; goto L7172; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14607; L7172: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == PLUS) goto L7173; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14607; L7173: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[2])) goto L7174; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14607; L7174: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (4)]) goto L7175; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14607; L7175: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == SET) goto L7176; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14607; L7176: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[1] = x2; goto L7177; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14607; L7177: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == PLUS) goto L7178; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14607; L7178: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[3])) goto L7179; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14607; L7179: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (4)]) goto L7180; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14607; L7180: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == USE) goto L7181; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14607; L7181: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19 && (TARGET_64BIT && (TARGET_SINGLE_STRINGOP || optimize_size))) { return 620; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14607; L1030: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == FIX) goto L1031; x2 = XEXP (x1, 0); goto L14610; L1031: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, VOIDmode)) { operands[1] = x3; goto L1032; } x2 = XEXP (x1, 0); goto L14610; L1032: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L1033; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14610; L1033: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, HImode)) { operands[2] = x2; goto L1034; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14610; L1034: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L1035; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14610; L1035: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, HImode)) { operands[3] = x2; goto L1036; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14610; L1036: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L1037; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14610; L1037: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, SImode)) { operands[4] = x2; goto L1038; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14610; L1038: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && FLOAT_MODE_P (GET_MODE (operands[1])) && !SSE_FLOAT_MODE_P (GET_MODE (operands[1])))) { return 153; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14610; L2674: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode) goto L14624; goto ret0; L14624: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case DIV: goto L2675; case UDIV: goto L2804; case UNSPEC: goto L14630; case PLUS: goto L6598; default: break; } goto ret0; L2675: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SImode)) { operands[1] = x3; goto L2676; } goto ret0; L2676: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, SImode)) { operands[2] = x3; goto L2677; } goto ret0; L2677: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L2678; goto ret0; L2678: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[3] = x2; goto L2679; } goto ret0; L2679: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == MOD) goto L2680; goto ret0; L2680: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L2681; goto ret0; L2681: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2])) goto L2682; goto ret0; L2682: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L2683; goto ret0; L2683: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[4] = x2; goto L2684; } goto ret0; L2684: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L2685; goto ret0; L2685: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) { return 271; } goto ret0; L2804: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SImode)) { operands[1] = x3; goto L2805; } goto ret0; L2805: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, SImode)) { operands[2] = x3; goto L2806; } goto ret0; L2806: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L2807; goto ret0; L2807: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[3] = x2; goto L2808; } goto ret0; L2808: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == UMOD) goto L2809; goto ret0; L2809: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L2810; goto ret0; L2810: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2])) goto L2811; goto ret0; L2811: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L2812; goto ret0; L2812: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[3])) goto L2813; goto ret0; L2813: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L2814; goto ret0; L2814: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) { return 276; } goto ret0; L14630: ATTRIBUTE_UNUSED_LABEL switch (XVECLEN (x2, 0)) { case 3: goto L14633; case 2: goto L14634; case 4: goto L14635; default: break; } goto ret0; L14633: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 1) == 16) goto L6503; goto ret0; L6503: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (register_operand (x3, SImode)) { operands[1] = x3; goto L6504; } goto ret0; L6504: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 1); if (tls_symbolic_operand (x3, SImode)) { operands[2] = x3; goto L6505; } goto ret0; L6505: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 2); if (call_insn_operand (x3, SImode)) { operands[3] = x3; goto L6506; } goto ret0; L6506: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L6507; goto ret0; L6507: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[4] = x2; goto L6508; } goto ret0; L6508: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L6509; goto ret0; L6509: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[5] = x2; goto L6510; } goto ret0; L6510: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L6511; goto ret0; L6511: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode) goto L14636; goto ret0; L14636: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG) goto L14638; goto ret0; L14638: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 0) == 17) goto L14640; goto ret0; L14640: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && TARGET_GNU_TLS)) { return 544; } L14641: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && TARGET_SUN_TLS)) { return 545; } goto ret0; L14634: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 1) == 17) goto L6553; goto ret0; L6553: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (register_operand (x3, SImode)) { operands[1] = x3; goto L6554; } goto ret0; L6554: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 1); if (call_insn_operand (x3, SImode)) { operands[2] = x3; goto L6555; } goto ret0; L6555: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L6556; goto ret0; L6556: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[3] = x2; goto L6557; } goto ret0; L6557: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L6558; goto ret0; L6558: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[4] = x2; goto L6559; } goto ret0; L6559: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L6560; goto ret0; L6560: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode) goto L14642; goto ret0; L14642: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG) goto L14644; goto ret0; L14644: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 0) == 17) goto L14646; goto ret0; L14646: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && TARGET_GNU_TLS)) { return 547; } L14647: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && TARGET_SUN_TLS)) { return 548; } goto ret0; L14635: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 1) == 20) goto L7664; goto ret0; L7664: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (GET_MODE (x3) == BLKmode && GET_CODE (x3) == MEM) goto L7665; goto ret0; L7665: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, SImode)) { operands[5] = x4; goto L7666; } goto ret0; L7666: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 1); if (register_operand (x3, QImode)) { operands[2] = x3; goto L7667; } goto ret0; L7667: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 2); if (immediate_operand (x3, SImode)) { operands[3] = x3; goto L7668; } goto ret0; L7668: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 3); if (register_operand (x3, SImode)) { operands[4] = x3; goto L7669; } goto ret0; L7669: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L7670; goto ret0; L7670: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19) goto L7671; goto ret0; L7671: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L7672; goto ret0; L7672: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[1] = x2; goto L7673; } goto ret0; L7673: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L7674; goto ret0; L7674: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT)) { return 646; } goto ret0; L6598: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == UNSPEC && XVECLEN (x3, 0) == 2 && XINT (x3, 1) == 17) goto L6599; goto ret0; L6599: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (register_operand (x4, SImode)) { operands[1] = x4; goto L6600; } goto ret0; L6600: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (call_insn_operand (x4, SImode)) { operands[2] = x4; goto L6601; } goto ret0; L6601: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == SImode && GET_CODE (x3) == CONST) goto L6602; goto ret0; L6602: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == SImode && GET_CODE (x4) == UNSPEC && XVECLEN (x4, 0) == 1 && XINT (x4, 1) == 6) goto L6603; goto ret0; L6603: ATTRIBUTE_UNUSED_LABEL x5 = XVECEXP (x4, 0, 0); if (tls_symbolic_operand (x5, SImode)) { operands[3] = x5; goto L6604; } goto ret0; L6604: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L6605; goto ret0; L6605: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[4] = x2; goto L6606; } goto ret0; L6606: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L6607; goto ret0; L6607: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[5] = x2; goto L6608; } goto ret0; L6608: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L6609; goto ret0; L6609: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) { return 550; } goto ret0; ret0: return -1; } static int recog_37 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case DImode: goto L14612; case SImode: goto L14613; case HImode: goto L14614; case QImode: goto L14615; case BLKmode: goto L14616; default: break; } L6097: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == PC) goto L6098; goto ret0; L14612: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == MEM) goto L7131; L14605: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, DImode)) { operands[0] = x2; goto L974; } L14606: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x2, DImode)) { operands[0] = x2; goto L986; } L14609: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, DImode)) { operands[0] = x2; goto L2597; } goto L6097; L7131: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[2] = x3; goto L7132; } goto L14605; L7132: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == MEM) goto L7133; x2 = XEXP (x1, 0); goto L14605; L7133: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[3] = x3; goto L7134; } x2 = XEXP (x1, 0); goto L14605; L7134: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L7135; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14605; L7135: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[0] = x2; goto L7136; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14605; L7136: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == PLUS) goto L7137; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14605; L7137: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[2])) goto L7138; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14605; L7138: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L7139; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14605; L7139: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == SET) goto L7140; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14605; L7140: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[1] = x2; goto L7141; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14605; L7141: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == PLUS) goto L7142; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14605; L7142: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[3])) goto L7143; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14605; L7143: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L7144; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14605; L7144: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == USE) goto L7145; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14605; L7145: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19 && (TARGET_64BIT && (TARGET_SINGLE_STRINGOP || optimize_size))) { return 618; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14605; L974: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == FIX) goto L975; x2 = XEXP (x1, 0); goto L14606; L975: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, VOIDmode)) { operands[1] = x3; goto L976; } x2 = XEXP (x1, 0); goto L14606; L976: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L977; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14606; L977: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, HImode)) { operands[2] = x2; goto L978; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14606; L978: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L979; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14606; L979: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, HImode)) { operands[3] = x2; goto L980; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14606; L980: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L981; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14606; L981: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, DImode)) { operands[4] = x2; goto L982; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14606; L982: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && FLOAT_MODE_P (GET_MODE (operands[1])) && (!SSE_FLOAT_MODE_P (GET_MODE (operands[1])) || !TARGET_64BIT)) && pnum_clobbers != NULL) { *pnum_clobbers = 1; return 148; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14606; L986: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == FIX) goto L987; x2 = XEXP (x1, 0); goto L14609; L987: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, VOIDmode)) { operands[1] = x3; goto L988; } x2 = XEXP (x1, 0); goto L14609; L988: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L989; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14609; L989: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, HImode)) { operands[2] = x2; goto L990; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14609; L990: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L991; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14609; L991: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, HImode)) { operands[3] = x2; goto L992; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14609; L992: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L993; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14609; L993: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, DFmode)) { operands[4] = x2; goto L994; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14609; L994: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && FLOAT_MODE_P (GET_MODE (operands[1])) && (!SSE_FLOAT_MODE_P (GET_MODE (operands[1])) || !TARGET_64BIT))) { return 149; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14609; L2597: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode) goto L14617; x2 = XEXP (x1, 0); goto L6097; L14617: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case DIV: goto L2598; case UDIV: goto L2752; case UNSPEC: goto L14621; case PLUS: goto L8030; default: break; } x2 = XEXP (x1, 0); goto L6097; L2598: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[1] = x3; goto L2599; } x2 = XEXP (x1, 0); goto L6097; L2599: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, DImode)) { operands[2] = x3; goto L2600; } x2 = XEXP (x1, 0); goto L6097; L2600: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L2601; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L2601: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[3] = x2; goto L2602; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L2602: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == MOD) goto L2603; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L2603: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L2604; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L2604: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2])) goto L2605; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L2605: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L2606; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L2606: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[4] = x2; goto L2607; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L2607: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L2608; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L2608: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT)) { return 268; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L2752: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[1] = x3; goto L2753; } x2 = XEXP (x1, 0); goto L6097; L2753: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, DImode)) { operands[2] = x3; goto L2754; } x2 = XEXP (x1, 0); goto L6097; L2754: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L2755; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L2755: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[3] = x2; goto L2756; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L2756: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == UMOD) goto L2757; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L2757: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L2758; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L2758: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2])) goto L2759; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L2759: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L2760; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L2760: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[3])) goto L2761; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L2761: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L2762; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L2762: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT)) { return 274; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L14621: ATTRIBUTE_UNUSED_LABEL if (XVECLEN (x2, 0) == 4 && XINT (x2, 1) == 20) goto L7693; x2 = XEXP (x1, 0); goto L6097; L7693: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (GET_MODE (x3) == BLKmode && GET_CODE (x3) == MEM) goto L7694; x2 = XEXP (x1, 0); goto L6097; L7694: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, DImode)) { operands[5] = x4; goto L7695; } x2 = XEXP (x1, 0); goto L6097; L7695: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 1); if (register_operand (x3, QImode)) { operands[2] = x3; goto L7696; } x2 = XEXP (x1, 0); goto L6097; L7696: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 2); if (immediate_operand (x3, DImode)) { operands[3] = x3; goto L7697; } x2 = XEXP (x1, 0); goto L6097; L7697: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 3); if (register_operand (x3, DImode)) { operands[4] = x3; goto L7698; } x2 = XEXP (x1, 0); goto L6097; L7698: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L7699; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L7699: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19) goto L7700; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L7700: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L7701; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L7701: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[1] = x2; goto L7702; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L7702: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L7703; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L7703: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT)) { return 647; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L8030: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[1] = x3; goto L8031; } x2 = XEXP (x1, 0); goto L6097; L8031: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (immediate_operand (x3, DImode)) { operands[3] = x3; goto L8032; } x2 = XEXP (x1, 0); goto L6097; L8032: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L8033; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L8033: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[2] = x2; goto L8034; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L8034: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L8035; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L8035: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) goto L8036; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L8036: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L8037; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L8037: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM) goto L8038; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L8038: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == SCRATCH && (TARGET_64BIT)) { return 672; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L14613: ATTRIBUTE_UNUSED_LABEL tem = recog_36 (x0, insn, pnum_clobbers); if (tem >= 0) return tem; goto L6097; L14614: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == MEM) goto L7185; L14608: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, HImode)) { operands[0] = x2; goto L1074; } L14611: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, HImode)) { operands[0] = x2; goto L2831; } goto L6097; L7185: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); switch (GET_MODE (x3)) { case SImode: goto L14648; case DImode: goto L14649; default: break; } goto L14608; L14648: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, SImode)) { operands[2] = x3; goto L7186; } goto L14608; L7186: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == MEM) goto L7187; x2 = XEXP (x1, 0); goto L14608; L7187: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SImode)) { operands[3] = x3; goto L7188; } x2 = XEXP (x1, 0); goto L14608; L7188: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L7189; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14608; L7189: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[0] = x2; goto L7190; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14608; L7190: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS) goto L7191; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14608; L7191: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[2])) goto L7192; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14608; L7192: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L7193; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14608; L7193: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == SET) goto L7194; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14608; L7194: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[1] = x2; goto L7195; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14608; L7195: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS) goto L7196; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14608; L7196: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[3])) goto L7197; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14608; L7197: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L7198; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14608; L7198: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == USE) goto L7199; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14608; L7199: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19 && (!TARGET_64BIT && (TARGET_SINGLE_STRINGOP || optimize_size))) { return 621; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14608; L14649: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, DImode)) { operands[2] = x3; goto L7204; } goto L14608; L7204: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == MEM) goto L7205; x2 = XEXP (x1, 0); goto L14608; L7205: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[3] = x3; goto L7206; } x2 = XEXP (x1, 0); goto L14608; L7206: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L7207; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14608; L7207: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[0] = x2; goto L7208; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14608; L7208: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == PLUS) goto L7209; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14608; L7209: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[2])) goto L7210; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14608; L7210: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L7211; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14608; L7211: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == SET) goto L7212; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14608; L7212: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[1] = x2; goto L7213; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14608; L7213: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == PLUS) goto L7214; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14608; L7214: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[3])) goto L7215; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14608; L7215: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L7216; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14608; L7216: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == USE) goto L7217; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14608; L7217: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19 && (TARGET_64BIT && (TARGET_SINGLE_STRINGOP || optimize_size))) { return 622; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14608; L1074: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == FIX) goto L1075; x2 = XEXP (x1, 0); goto L14611; L1075: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, VOIDmode)) { operands[1] = x3; goto L1076; } x2 = XEXP (x1, 0); goto L14611; L1076: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L1077; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14611; L1077: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, HImode)) { operands[2] = x2; goto L1078; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14611; L1078: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L1079; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14611; L1079: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, HImode)) { operands[3] = x2; goto L1080; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14611; L1080: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L1081; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14611; L1081: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, HImode)) { operands[4] = x2; goto L1082; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14611; L1082: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && FLOAT_MODE_P (GET_MODE (operands[1])) && !SSE_FLOAT_MODE_P (GET_MODE (operands[1])))) { return 158; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14611; L2831: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == UDIV) goto L2832; x2 = XEXP (x1, 0); goto L6097; L2832: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, HImode)) { operands[1] = x3; goto L2833; } x2 = XEXP (x1, 0); goto L6097; L2833: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, HImode)) { operands[2] = x3; goto L2834; } x2 = XEXP (x1, 0); goto L6097; L2834: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L2835; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L2835: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, HImode)) { operands[3] = x2; goto L2836; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L2836: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == UMOD) goto L2837; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L2837: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L2838; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L2838: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2])) goto L2839; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L2839: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L2840; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L2840: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, HImode)) { operands[4] = x2; goto L2841; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L2841: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L2842; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L2842: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) { return 277; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L14615: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == MEM) goto L7221; goto L6097; L7221: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); switch (GET_MODE (x3)) { case SImode: goto L14650; case DImode: goto L14651; default: break; } goto L6097; L14650: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, SImode)) { operands[2] = x3; goto L7222; } goto L6097; L7222: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == QImode && GET_CODE (x2) == MEM) goto L7223; x2 = XEXP (x1, 0); goto L6097; L7223: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SImode)) { operands[3] = x3; goto L7224; } x2 = XEXP (x1, 0); goto L6097; L7224: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L7225; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L7225: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[0] = x2; goto L7226; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L7226: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS) goto L7227; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L7227: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[2])) goto L7228; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L7228: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L7229; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L7229: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == SET) goto L7230; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L7230: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[1] = x2; goto L7231; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L7231: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS) goto L7232; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L7232: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[3])) goto L7233; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L7233: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L7234; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L7234: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == USE) goto L7235; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L7235: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19 && (!TARGET_64BIT && (TARGET_SINGLE_STRINGOP || optimize_size))) { return 623; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L14651: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, DImode)) { operands[2] = x3; goto L7240; } goto L6097; L7240: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == QImode && GET_CODE (x2) == MEM) goto L7241; x2 = XEXP (x1, 0); goto L6097; L7241: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[3] = x3; goto L7242; } x2 = XEXP (x1, 0); goto L6097; L7242: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L7243; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L7243: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[0] = x2; goto L7244; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L7244: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == PLUS) goto L7245; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L7245: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[2])) goto L7246; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L7246: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L7247; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L7247: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == SET) goto L7248; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L7248: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[1] = x2; goto L7249; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L7249: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == PLUS) goto L7250; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L7250: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[3])) goto L7251; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L7251: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L7252; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L7252: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == USE) goto L7253; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L7253: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19 && (TARGET_64BIT && (TARGET_SINGLE_STRINGOP || optimize_size))) { return 624; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L14616: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == MEM) goto L9448; goto L6097; L9448: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode && GET_CODE (x3) == PLUS) goto L9449; goto L6097; L9449: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, DImode)) { operands[0] = x4; goto L9450; } goto L6097; L9450: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (const_int_operand (x4, DImode)) { operands[4] = x4; goto L9451; } goto L6097; L9451: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == UNSPEC && XVECLEN (x2, 0) == 8 && XINT (x2, 1) == 13) goto L9452; x2 = XEXP (x1, 0); goto L6097; L9452: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (GET_MODE (x3) == DImode && GET_CODE (x3) == REG && XINT (x3, 0) == 21) goto L9453; x2 = XEXP (x1, 0); goto L6097; L9453: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 1); if (GET_MODE (x3) == DImode && GET_CODE (x3) == REG && XINT (x3, 0) == 22) goto L9454; x2 = XEXP (x1, 0); goto L6097; L9454: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 2); if (GET_MODE (x3) == DImode && GET_CODE (x3) == REG && XINT (x3, 0) == 23) goto L9455; x2 = XEXP (x1, 0); goto L6097; L9455: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 3); if (GET_MODE (x3) == DImode && GET_CODE (x3) == REG && XINT (x3, 0) == 24) goto L9456; x2 = XEXP (x1, 0); goto L6097; L9456: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 4); if (GET_MODE (x3) == DImode && GET_CODE (x3) == REG && XINT (x3, 0) == 25) goto L9457; x2 = XEXP (x1, 0); goto L6097; L9457: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 5); if (GET_MODE (x3) == DImode && GET_CODE (x3) == REG && XINT (x3, 0) == 26) goto L9458; x2 = XEXP (x1, 0); goto L6097; L9458: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 6); if (GET_MODE (x3) == DImode && GET_CODE (x3) == REG && XINT (x3, 0) == 27) goto L9459; x2 = XEXP (x1, 0); goto L6097; L9459: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 7); if (GET_MODE (x3) == DImode && GET_CODE (x3) == REG && XINT (x3, 0) == 28) goto L9460; x2 = XEXP (x1, 0); goto L6097; L9460: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L9461; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L9461: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[1] = x2; goto L9462; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L9462: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L9463; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L9463: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (const_int_operand (x2, DImode)) { operands[2] = x2; goto L9464; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L9464: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == USE) goto L9465; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L9465: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode && GET_CODE (x2) == LABEL_REF) goto L9466; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L9466: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); operands[3] = x3; goto L9467; L9467: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && INTVAL (operands[4]) + SSE_REGPARM_MAX * 16 - 16 < 128 && INTVAL (operands[4]) + INTVAL (operands[2]) * 16 >= -128)) { return 853; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L6097; L6098: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == IF_THEN_ELSE) goto L6233; goto ret0; L6233: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == NE) goto L6234; L6099: ATTRIBUTE_UNUSED_LABEL if (comparison_operator (x3, VOIDmode)) { operands[0] = x3; goto L6100; } goto ret0; L6234: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, SImode)) { operands[1] = x4; goto L6235; } goto L6099; L6235: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L6236; goto L6099; L6236: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == LABEL_REF) goto L6237; x3 = XEXP (x2, 0); goto L6099; L6237: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); operands[0] = x4; goto L6238; L6238: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (GET_CODE (x3) == PC) goto L6239; x3 = XEXP (x2, 0); goto L6099; L6239: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L6240; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L6099; L6240: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SImode)) { operands[2] = x2; goto L6241; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L6099; L6241: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS) goto L6242; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L6099; L6242: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L6243; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L6099; L6243: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (-1)]) goto L6244; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L6099; L6244: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L6245; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L6099; L6245: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[3] = x2; goto L6246; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L6099; L6246: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L6247; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L6099; L6247: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT && TARGET_USE_LOOP && (reload_in_progress || reload_completed || register_operand (operands[2], VOIDmode)))) { return 514; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L6099; L6100: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, VOIDmode)) { operands[1] = x4; goto L6101; } goto ret0; L6101: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonimmediate_operand (x4, VOIDmode)) { operands[2] = x4; goto L6102; } L6154: ATTRIBUTE_UNUSED_LABEL if (register_operand (x4, VOIDmode)) { operands[2] = x4; goto L6155; } goto ret0; L6102: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); switch (GET_CODE (x3)) { case LABEL_REF: goto L6103; case PC: goto L6129; default: break; } x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L6154; L6103: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); operands[3] = x4; goto L6104; L6104: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (GET_CODE (x3) == PC) goto L6105; x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L6154; L6105: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L6106; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L6154; L6106: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCFPmode && GET_CODE (x2) == REG && XINT (x2, 0) == 18) goto L6107; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L6154; L6107: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L6108; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L6154; L6108: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCFPmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) goto L6109; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L6154; L6109: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L6110; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L6154; L6110: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, HImode)) { operands[4] = x2; goto L6111; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L6154; L6111: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && (GET_MODE (operands[1]) == SFmode || GET_MODE (operands[1]) == DFmode) && GET_MODE (operands[1]) == GET_MODE (operands[2]) && !ix86_use_fcomi_compare (GET_CODE (operands[0])) && SELECT_CC_MODE (GET_CODE (operands[0]), operands[1], operands[2]) == CCFPmode && ix86_fp_jump_nontrivial_p (GET_CODE (operands[0])))) { return 505; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L6154; L6129: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (GET_CODE (x3) == LABEL_REF) goto L6130; x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L6154; L6130: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); operands[3] = x4; goto L6131; L6131: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L6132; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L6154; L6132: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCFPmode && GET_CODE (x2) == REG && XINT (x2, 0) == 18) goto L6133; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L6154; L6133: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L6134; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L6154; L6134: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCFPmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) goto L6135; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L6154; L6135: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L6136; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L6154; L6136: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, HImode)) { operands[4] = x2; goto L6137; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L6154; L6137: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && (GET_MODE (operands[1]) == SFmode || GET_MODE (operands[1]) == DFmode) && GET_MODE (operands[1]) == GET_MODE (operands[2]) && !ix86_use_fcomi_compare (GET_CODE (operands[0])) && SELECT_CC_MODE (GET_CODE (operands[0]), operands[1], operands[2]) == CCFPmode && ix86_fp_jump_nontrivial_p (GET_CODE (operands[0])))) { return 506; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 1); goto L6154; L6155: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); switch (GET_CODE (x3)) { case LABEL_REF: goto L6156; case PC: goto L6182; default: break; } goto ret0; L6156: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); operands[3] = x4; goto L6157; L6157: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (GET_CODE (x3) == PC) goto L6158; goto ret0; L6158: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L6159; goto ret0; L6159: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCFPmode && GET_CODE (x2) == REG && XINT (x2, 0) == 18) goto L6160; goto ret0; L6160: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L6161; goto ret0; L6161: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCFPmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) goto L6162; goto ret0; L6162: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L6163; goto ret0; L6163: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, HImode)) { operands[4] = x2; goto L6164; } goto ret0; L6164: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && FLOAT_MODE_P (GET_MODE (operands[1])) && GET_MODE (operands[1]) == GET_MODE (operands[2]) && ix86_fp_jump_nontrivial_p (GET_CODE (operands[0])))) { return 507; } goto ret0; L6182: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (GET_CODE (x3) == LABEL_REF) goto L6183; goto ret0; L6183: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); operands[3] = x4; goto L6184; L6184: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L6185; goto ret0; L6185: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCFPmode && GET_CODE (x2) == REG && XINT (x2, 0) == 18) goto L6186; goto ret0; L6186: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L6187; goto ret0; L6187: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCFPmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) goto L6188; goto ret0; L6188: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L6189; goto ret0; L6189: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, HImode)) { operands[4] = x2; goto L6190; } goto ret0; L6190: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && FLOAT_MODE_P (GET_MODE (operands[1])) && GET_MODE (operands[1]) == GET_MODE (operands[2]) && ix86_fp_jump_nontrivial_p (GET_CODE (operands[0])))) { return 508; } goto ret0; ret0: return -1; } static int recog_38 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[2] = x2; goto L7257; } L14654: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, DImode)) { operands[1] = x2; goto L7468; } goto ret0; L7257: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L7258; x2 = XEXP (x1, 0); goto L14654; L7258: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L7259; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7259: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[0] = x2; goto L7260; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7260: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == PLUS) goto L7261; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7261: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode) goto L14656; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L14656: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x3) == ASHIFT) goto L7262; if (register_operand (x3, DImode)) { operands[3] = x3; goto L7366; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7262: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, DImode)) { operands[5] = x4; goto L7263; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7263: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_CODE (x4) == CONST_INT) goto L14658; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L14658: ATTRIBUTE_UNUSED_LABEL if ((int) XWINT (x4, 0) == XWINT (x4, 0)) switch ((int) XWINT (x4, 0)) { case 3L: goto L7264; case 2L: goto L7318; default: break; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7264: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, DImode)) { operands[3] = x3; goto L7265; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7265: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == SET) goto L7266; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7266: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[1] = x2; goto L7267; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7267: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == PLUS) goto L7268; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7268: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode && GET_CODE (x3) == ASHIFT) goto L7269; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7269: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (rtx_equal_p (x4, operands[5])) goto L7270; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7270: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (3)]) goto L7271; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7271: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, DImode)) { operands[4] = x3; goto L7272; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7272: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == SET) goto L7273; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7273: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM) goto L7274; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7274: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[3])) goto L7275; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7275: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM) goto L7276; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7276: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[4])) goto L7277; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7277: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 4); if (GET_CODE (x1) == USE) goto L7278; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7278: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[5])) goto L7279; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7279: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 5); if (GET_CODE (x1) == USE) goto L7280; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7280: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19 && (TARGET_64BIT)) { return 625; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7318: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, DImode)) { operands[3] = x3; goto L7319; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7319: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == SET) goto L7320; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7320: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[1] = x2; goto L7321; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7321: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == PLUS) goto L7322; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7322: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode && GET_CODE (x3) == ASHIFT) goto L7323; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7323: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (rtx_equal_p (x4, operands[5])) goto L7324; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7324: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L7325; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7325: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, DImode)) { operands[4] = x3; goto L7326; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7326: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == SET) goto L7327; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7327: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM) goto L7328; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7328: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[3])) goto L7329; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7329: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM) goto L7330; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7330: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[4])) goto L7331; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7331: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 4); if (GET_CODE (x1) == USE) goto L7332; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7332: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[5])) goto L7333; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7333: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 5); if (GET_CODE (x1) == USE) goto L7334; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7334: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19 && (TARGET_64BIT)) { return 627; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7366: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, DImode)) { operands[5] = x3; goto L7367; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7367: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == SET) goto L7368; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7368: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[1] = x2; goto L7369; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7369: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == PLUS) goto L7370; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7370: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[4] = x3; goto L7371; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7371: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[5])) goto L7372; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7372: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == SET) goto L7373; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7373: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM) goto L7374; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7374: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[3])) goto L7375; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7375: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM) goto L7376; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7376: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[4])) goto L7377; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7377: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 4); if (GET_CODE (x1) == USE) goto L7378; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7378: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[5])) goto L7379; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7379: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 5); if (GET_CODE (x1) == USE) goto L7380; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7380: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19 && (TARGET_64BIT)) { return 629; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14654; L7468: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L7469; goto ret0; L7469: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L7470; goto ret0; L7470: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[0] = x2; goto L7471; } goto ret0; L7471: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == PLUS) goto L7472; goto ret0; L7472: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode) goto L14660; goto ret0; L14660: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x3) == ASHIFT) goto L7473; if (register_operand (x3, DImode)) { operands[3] = x3; goto L7555; } goto ret0; L7473: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, DImode)) { operands[4] = x4; goto L7474; } goto ret0; L7474: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_CODE (x4) == CONST_INT) goto L14662; goto ret0; L14662: ATTRIBUTE_UNUSED_LABEL if ((int) XWINT (x4, 0) == XWINT (x4, 0)) switch ((int) XWINT (x4, 0)) { case 3L: goto L7475; case 2L: goto L7517; default: break; } goto ret0; L7475: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, DImode)) { operands[3] = x3; goto L7476; } goto ret0; L7476: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == SET) goto L7477; goto ret0; L7477: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM) goto L7478; goto ret0; L7478: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[3])) goto L7479; goto ret0; L7479: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L7480; goto ret0; L7480: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == USE) goto L7481; goto ret0; L7481: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[2] = x2; goto L7482; } goto ret0; L7482: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 4); if (GET_CODE (x1) == USE) goto L7483; goto ret0; L7483: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[4])) goto L7484; goto ret0; L7484: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 5); if (GET_CODE (x1) == USE) goto L7485; goto ret0; L7485: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19 && (TARGET_64BIT)) { return 637; } goto ret0; L7517: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, DImode)) { operands[3] = x3; goto L7518; } goto ret0; L7518: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == SET) goto L7519; goto ret0; L7519: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM) goto L7520; goto ret0; L7520: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[3])) goto L7521; goto ret0; L7521: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L7522; goto ret0; L7522: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == USE) goto L7523; goto ret0; L7523: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[2] = x2; goto L7524; } goto ret0; L7524: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 4); if (GET_CODE (x1) == USE) goto L7525; goto ret0; L7525: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[4])) goto L7526; goto ret0; L7526: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 5); if (GET_CODE (x1) == USE) goto L7527; goto ret0; L7527: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19 && (TARGET_64BIT)) { return 639; } goto ret0; L7555: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, DImode)) { operands[4] = x3; goto L7556; } goto ret0; L7556: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == SET) goto L7557; goto ret0; L7557: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM) goto L7558; goto ret0; L7558: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[3])) goto L7559; goto ret0; L7559: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L7560; goto ret0; L7560: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == USE) goto L7561; goto ret0; L7561: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, QImode)) { operands[2] = x2; goto L7562; } goto ret0; L7562: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 4); if (GET_CODE (x1) == USE) goto L7563; goto ret0; L7563: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[4])) goto L7564; goto ret0; L7564: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 5); if (GET_CODE (x1) == USE) goto L7565; goto ret0; L7565: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19 && (TARGET_64BIT)) { return 641; } goto ret0; ret0: return -1; } static int recog_39 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; switch (XVECLEN (x0, 0)) { case 2: goto L8212; case 3: goto L264; case 5: goto L958; case 4: goto L8200; case 6: goto L7255; case 7: goto L7567; case 17: goto L9398; default: break; } goto ret0; L8212: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 0); switch (GET_MODE (x1)) { case SImode: goto L14084; case DImode: goto L14085; default: break; } L257: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case SET: goto L258; case CALL: goto L6265; case RETURN: goto L6319; default: break; } goto ret0; L14084: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x1) == UNSPEC_VOLATILE && XVECLEN (x1, 0) == 1 && XINT (x1, 1) == 10) goto L8213; goto L257; L8213: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, SImode)) { operands[0] = x2; goto L8214; } goto L257; L8214: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L8215; x1 = XVECEXP (x0, 0, 0); goto L257; L8215: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 7) goto L8216; x1 = XVECEXP (x0, 0, 0); goto L257; L8216: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == MINUS) goto L8217; x1 = XVECEXP (x0, 0, 0); goto L257; L8217: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == REG && XINT (x3, 0) == 7) goto L8218; x1 = XVECEXP (x0, 0, 0); goto L257; L8218: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[0]) && (!TARGET_64BIT && TARGET_STACK_PROBE) && pnum_clobbers != NULL) { *pnum_clobbers = 2; return 685; } x1 = XVECEXP (x0, 0, 0); goto L257; L14085: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x1) == UNSPEC_VOLATILE && XVECLEN (x1, 0) == 1 && XINT (x1, 1) == 10) goto L8233; goto L257; L8233: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, DImode)) { operands[0] = x2; goto L8234; } goto L257; L8234: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L8235; x1 = XVECEXP (x0, 0, 0); goto L257; L8235: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode && GET_CODE (x2) == REG && XINT (x2, 0) == 7) goto L8236; x1 = XVECEXP (x0, 0, 0); goto L257; L8236: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == MINUS) goto L8237; x1 = XVECEXP (x0, 0, 0); goto L257; L8237: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode && GET_CODE (x3) == REG && XINT (x3, 0) == 7) goto L8238; x1 = XVECEXP (x0, 0, 0); goto L257; L8238: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[0]) && (TARGET_64BIT && TARGET_STACK_PROBE) && pnum_clobbers != NULL) { *pnum_clobbers = 2; return 686; } x1 = XVECEXP (x0, 0, 0); goto L257; L258: ATTRIBUTE_UNUSED_LABEL return recog_32 (x0, insn, pnum_clobbers); L6265: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == QImode && GET_CODE (x2) == MEM) goto L6266; goto ret0; L6266: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode) goto L14494; goto ret0; L14494: ATTRIBUTE_UNUSED_LABEL if (constant_call_address_operand (x3, SImode)) { operands[0] = x3; goto L6267; } L14495: ATTRIBUTE_UNUSED_LABEL if (call_insn_operand (x3, SImode)) { operands[0] = x3; goto L6278; } goto ret0; L6267: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); operands[1] = x2; goto L6268; L6268: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L6269; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L14495; L6269: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 7) goto L6270; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L14495; L6270: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS) goto L6271; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L14495; L6271: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == REG && XINT (x3, 0) == 7) goto L6272; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L14495; L6272: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (immediate_operand (x3, SImode)) { operands[2] = x3; goto L6273; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L14495; L6273: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT)) { return 515; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L14495; L6278: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); operands[1] = x2; goto L6279; L6279: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L6280; goto ret0; L6280: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 7) goto L6281; goto ret0; L6281: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS) goto L6282; goto ret0; L6282: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == REG && XINT (x3, 0) == 7) goto L6283; goto ret0; L6283: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (immediate_operand (x3, SImode)) { operands[2] = x3; goto L6284; } goto ret0; L6284: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT)) { return 516; } goto ret0; L6319: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); switch (GET_CODE (x1)) { case UNSPEC: goto L14496; case USE: goto L6329; default: break; } goto ret0; L14496: ATTRIBUTE_UNUSED_LABEL if (XVECLEN (x1, 0) == 1 && XINT (x1, 1) == 75) goto L6320; goto ret0; L6320: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (reload_completed)) { return 525; } goto ret0; L6329: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[0] = x2; goto L6330; } if (const_int_operand (x2, SImode)) { operands[0] = x2; goto L6325; } goto ret0; L6330: ATTRIBUTE_UNUSED_LABEL if ((reload_completed)) { return 527; } goto ret0; L6325: ATTRIBUTE_UNUSED_LABEL if ((reload_completed)) { return 526; } goto ret0; L264: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 0); if (GET_CODE (x1) == SET) goto L265; goto ret0; L265: ATTRIBUTE_UNUSED_LABEL return recog_35 (x0, insn, pnum_clobbers); L958: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 0); if (GET_CODE (x1) == SET) goto L959; goto ret0; L959: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DImode)) { operands[0] = x2; goto L960; } goto ret0; L960: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == FIX) goto L961; goto ret0; L961: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, VOIDmode)) { operands[1] = x3; goto L962; } goto ret0; L962: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L963; goto ret0; L963: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, HImode)) { operands[2] = x2; goto L964; } goto ret0; L964: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L965; goto ret0; L965: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, HImode)) { operands[3] = x2; goto L966; } goto ret0; L966: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L967; goto ret0; L967: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, DImode)) { operands[4] = x2; goto L968; } goto ret0; L968: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 4); if (GET_CODE (x1) == CLOBBER) goto L969; goto ret0; L969: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, DFmode)) { operands[5] = x2; goto L970; } goto ret0; L970: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && FLOAT_MODE_P (GET_MODE (operands[1])) && (!SSE_FLOAT_MODE_P (GET_MODE (operands[1])) || !TARGET_64BIT))) { return 148; } goto ret0; L8200: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 0); switch (GET_MODE (x1)) { case SImode: goto L14603; case DImode: goto L14604; default: break; } L972: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x1) == SET) goto L973; goto ret0; L14603: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x1) == UNSPEC_VOLATILE && XVECLEN (x1, 0) == 1 && XINT (x1, 1) == 10) goto L8201; goto L972; L8201: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, SImode)) { operands[0] = x2; goto L8202; } goto L972; L8202: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L8203; x1 = XVECEXP (x0, 0, 0); goto L972; L8203: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 7) goto L8204; x1 = XVECEXP (x0, 0, 0); goto L972; L8204: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == MINUS) goto L8205; x1 = XVECEXP (x0, 0, 0); goto L972; L8205: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == REG && XINT (x3, 0) == 7) goto L8206; x1 = XVECEXP (x0, 0, 0); goto L972; L8206: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[0])) goto L8207; x1 = XVECEXP (x0, 0, 0); goto L972; L8207: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L8208; x1 = XVECEXP (x0, 0, 0); goto L972; L8208: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[1] = x2; goto L8209; } x1 = XVECEXP (x0, 0, 0); goto L972; L8209: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L8210; x1 = XVECEXP (x0, 0, 0); goto L972; L8210: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT && TARGET_STACK_PROBE)) { return 685; } x1 = XVECEXP (x0, 0, 0); goto L972; L14604: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x1) == UNSPEC_VOLATILE && XVECLEN (x1, 0) == 1 && XINT (x1, 1) == 10) goto L8221; goto L972; L8221: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (register_operand (x2, DImode)) { operands[0] = x2; goto L8222; } goto L972; L8222: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L8223; x1 = XVECEXP (x0, 0, 0); goto L972; L8223: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode && GET_CODE (x2) == REG && XINT (x2, 0) == 7) goto L8224; x1 = XVECEXP (x0, 0, 0); goto L972; L8224: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == MINUS) goto L8225; x1 = XVECEXP (x0, 0, 0); goto L972; L8225: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode && GET_CODE (x3) == REG && XINT (x3, 0) == 7) goto L8226; x1 = XVECEXP (x0, 0, 0); goto L972; L8226: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[0])) goto L8227; x1 = XVECEXP (x0, 0, 0); goto L972; L8227: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L8228; x1 = XVECEXP (x0, 0, 0); goto L972; L8228: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, DImode)) { operands[1] = x2; goto L8229; } x1 = XVECEXP (x0, 0, 0); goto L972; L8229: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L8230; x1 = XVECEXP (x0, 0, 0); goto L972; L8230: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && TARGET_STACK_PROBE)) { return 686; } x1 = XVECEXP (x0, 0, 0); goto L972; L973: ATTRIBUTE_UNUSED_LABEL return recog_37 (x0, insn, pnum_clobbers); L7255: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 0); if (GET_CODE (x1) == SET) goto L7256; goto ret0; L7256: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case DImode: goto L14652; case SImode: goto L14653; default: break; } goto ret0; L14652: ATTRIBUTE_UNUSED_LABEL return recog_38 (x0, insn, pnum_clobbers); L14653: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, SImode)) { operands[2] = x2; goto L7284; } L14655: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, SImode)) { operands[1] = x2; goto L7489; } goto ret0; L7284: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L7285; x2 = XEXP (x1, 0); goto L14655; L7285: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L7286; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7286: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[0] = x2; goto L7287; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7287: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS) goto L7288; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7288: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode) goto L14664; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L14664: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x3) == ASHIFT) goto L7289; if (register_operand (x3, SImode)) { operands[3] = x3; goto L7343; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7289: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, SImode)) { operands[5] = x4; goto L7290; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7290: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L7291; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7291: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, SImode)) { operands[3] = x3; goto L7292; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7292: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == SET) goto L7293; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7293: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[1] = x2; goto L7294; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7294: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS) goto L7295; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7295: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == ASHIFT) goto L7296; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7296: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (rtx_equal_p (x4, operands[5])) goto L7297; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7297: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L7298; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7298: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, SImode)) { operands[4] = x3; goto L7299; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7299: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == SET) goto L7300; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7300: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM) goto L7301; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7301: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[3])) goto L7302; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7302: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM) goto L7303; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7303: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[4])) goto L7304; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7304: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 4); if (GET_CODE (x1) == USE) goto L7305; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7305: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[5])) goto L7306; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7306: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 5); if (GET_CODE (x1) == USE) goto L7307; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7307: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19 && (!TARGET_64BIT)) { return 626; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7343: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, SImode)) { operands[5] = x3; goto L7344; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7344: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == SET) goto L7345; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7345: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[1] = x2; goto L7346; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7346: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS) goto L7347; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7347: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SImode)) { operands[4] = x3; goto L7348; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7348: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[5])) goto L7349; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7349: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == SET) goto L7350; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7350: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM) goto L7351; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7351: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[3])) goto L7352; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7352: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM) goto L7353; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7353: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[4])) goto L7354; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7354: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 4); if (GET_CODE (x1) == USE) goto L7355; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7355: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[5])) goto L7356; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7356: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 5); if (GET_CODE (x1) == USE) goto L7357; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7357: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19 && (!TARGET_64BIT)) { return 628; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14655; L7489: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L7490; goto ret0; L7490: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L7491; goto ret0; L7491: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[0] = x2; goto L7492; } goto ret0; L7492: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS) goto L7493; goto ret0; L7493: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode) goto L14666; goto ret0; L14666: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x3) == ASHIFT) goto L7494; if (register_operand (x3, SImode)) { operands[3] = x3; goto L7536; } goto ret0; L7494: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, SImode)) { operands[4] = x4; goto L7495; } goto ret0; L7495: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (2)]) goto L7496; goto ret0; L7496: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, SImode)) { operands[3] = x3; goto L7497; } goto ret0; L7497: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == SET) goto L7498; goto ret0; L7498: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM) goto L7499; goto ret0; L7499: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[3])) goto L7500; goto ret0; L7500: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L7501; goto ret0; L7501: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == USE) goto L7502; goto ret0; L7502: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[2] = x2; goto L7503; } goto ret0; L7503: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 4); if (GET_CODE (x1) == USE) goto L7504; goto ret0; L7504: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[4])) goto L7505; goto ret0; L7505: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 5); if (GET_CODE (x1) == USE) goto L7506; goto ret0; L7506: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19 && (!TARGET_64BIT)) { return 638; } goto ret0; L7536: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, SImode)) { operands[4] = x3; goto L7537; } goto ret0; L7537: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == SET) goto L7538; goto ret0; L7538: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM) goto L7539; goto ret0; L7539: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[3])) goto L7540; goto ret0; L7540: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L7541; goto ret0; L7541: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == USE) goto L7542; goto ret0; L7542: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, QImode)) { operands[2] = x2; goto L7543; } goto ret0; L7543: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 4); if (GET_CODE (x1) == USE) goto L7544; goto ret0; L7544: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[4])) goto L7545; goto ret0; L7545: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 5); if (GET_CODE (x1) == USE) goto L7546; goto ret0; L7546: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19 && (!TARGET_64BIT)) { return 640; } goto ret0; L7567: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 0); if (GET_CODE (x1) == SET) goto L7568; goto ret0; L7568: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) goto L7569; goto ret0; L7569: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == CCmode) goto L14668; goto ret0; L14668: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case COMPARE: goto L7570; case IF_THEN_ELSE: goto L7612; default: break; } goto ret0; L7570: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == BLKmode && GET_CODE (x3) == MEM) goto L7571; goto ret0; L7571: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); switch (GET_MODE (x4)) { case SImode: goto L14670; case DImode: goto L14671; default: break; } goto ret0; L14670: ATTRIBUTE_UNUSED_LABEL if (register_operand (x4, SImode)) { operands[4] = x4; goto L7572; } goto ret0; L7572: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == BLKmode && GET_CODE (x3) == MEM) goto L7573; goto ret0; L7573: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, SImode)) { operands[5] = x4; goto L7574; } goto ret0; L7574: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L7575; goto ret0; L7575: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[6] = x2; goto L7576; } goto ret0; L7576: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L7577; goto ret0; L7577: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (immediate_operand (x2, SImode)) { operands[3] = x2; goto L7578; } goto ret0; L7578: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == USE) goto L7579; goto ret0; L7579: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19) goto L7580; goto ret0; L7580: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 4); if (GET_CODE (x1) == CLOBBER) goto L7581; goto ret0; L7581: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[0] = x2; goto L7582; } goto ret0; L7582: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 5); if (GET_CODE (x1) == CLOBBER) goto L7583; goto ret0; L7583: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[1] = x2; goto L7584; } goto ret0; L7584: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 6); if (GET_CODE (x1) == CLOBBER) goto L7585; goto ret0; L7585: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[2] = x2; goto L7586; } goto ret0; L7586: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT)) { return 642; } goto ret0; L14671: ATTRIBUTE_UNUSED_LABEL if (register_operand (x4, DImode)) { operands[4] = x4; goto L7593; } goto ret0; L7593: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == BLKmode && GET_CODE (x3) == MEM) goto L7594; goto ret0; L7594: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, DImode)) { operands[5] = x4; goto L7595; } goto ret0; L7595: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L7596; goto ret0; L7596: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[6] = x2; goto L7597; } goto ret0; L7597: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L7598; goto ret0; L7598: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (immediate_operand (x2, SImode)) { operands[3] = x2; goto L7599; } goto ret0; L7599: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == USE) goto L7600; goto ret0; L7600: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19) goto L7601; goto ret0; L7601: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 4); if (GET_CODE (x1) == CLOBBER) goto L7602; goto ret0; L7602: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[0] = x2; goto L7603; } goto ret0; L7603: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 5); if (GET_CODE (x1) == CLOBBER) goto L7604; goto ret0; L7604: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[1] = x2; goto L7605; } goto ret0; L7605: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 6); if (GET_CODE (x1) == CLOBBER) goto L7606; goto ret0; L7606: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[2] = x2; goto L7607; } goto ret0; L7607: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT)) { return 643; } goto ret0; L7612: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == NE) goto L7613; goto ret0; L7613: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); switch (GET_MODE (x4)) { case SImode: goto L14672; case DImode: goto L14673; default: break; } goto ret0; L14672: ATTRIBUTE_UNUSED_LABEL if (register_operand (x4, SImode)) { operands[6] = x4; goto L7614; } goto ret0; L7614: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L7615; goto ret0; L7615: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == CCmode && GET_CODE (x3) == COMPARE) goto L7616; goto ret0; L7616: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == BLKmode && GET_CODE (x4) == MEM) goto L7617; goto ret0; L7617: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (register_operand (x5, SImode)) { operands[4] = x5; goto L7618; } goto ret0; L7618: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_MODE (x4) == BLKmode && GET_CODE (x4) == MEM) goto L7619; goto ret0; L7619: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (register_operand (x5, SImode)) { operands[5] = x5; goto L7620; } goto ret0; L7620: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L7621; goto ret0; L7621: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L7622; goto ret0; L7622: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (immediate_operand (x2, SImode)) { operands[3] = x2; goto L7623; } goto ret0; L7623: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L7624; goto ret0; L7624: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) goto L7625; goto ret0; L7625: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == USE) goto L7626; goto ret0; L7626: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19) goto L7627; goto ret0; L7627: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 4); if (GET_CODE (x1) == CLOBBER) goto L7628; goto ret0; L7628: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[0] = x2; goto L7629; } goto ret0; L7629: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 5); if (GET_CODE (x1) == CLOBBER) goto L7630; goto ret0; L7630: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[1] = x2; goto L7631; } goto ret0; L7631: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 6); if (GET_CODE (x1) == CLOBBER) goto L7632; goto ret0; L7632: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[2] = x2; goto L7633; } goto ret0; L7633: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT)) { return 644; } goto ret0; L14673: ATTRIBUTE_UNUSED_LABEL if (register_operand (x4, DImode)) { operands[6] = x4; goto L7640; } goto ret0; L7640: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L7641; goto ret0; L7641: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == CCmode && GET_CODE (x3) == COMPARE) goto L7642; goto ret0; L7642: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == BLKmode && GET_CODE (x4) == MEM) goto L7643; goto ret0; L7643: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (register_operand (x5, DImode)) { operands[4] = x5; goto L7644; } goto ret0; L7644: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_MODE (x4) == BLKmode && GET_CODE (x4) == MEM) goto L7645; goto ret0; L7645: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (register_operand (x5, DImode)) { operands[5] = x5; goto L7646; } goto ret0; L7646: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L7647; goto ret0; L7647: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L7648; goto ret0; L7648: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (immediate_operand (x2, SImode)) { operands[3] = x2; goto L7649; } goto ret0; L7649: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L7650; goto ret0; L7650: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) goto L7651; goto ret0; L7651: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == USE) goto L7652; goto ret0; L7652: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19) goto L7653; goto ret0; L7653: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 4); if (GET_CODE (x1) == CLOBBER) goto L7654; goto ret0; L7654: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[0] = x2; goto L7655; } goto ret0; L7655: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 5); if (GET_CODE (x1) == CLOBBER) goto L7656; goto ret0; L7656: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[1] = x2; goto L7657; } goto ret0; L7657: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 6); if (GET_CODE (x1) == CLOBBER) goto L7658; goto ret0; L7658: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[2] = x2; goto L7659; } goto ret0; L7659: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT)) { return 645; } goto ret0; L9398: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 0); if (GET_CODE (x1) == UNSPEC_VOLATILE) goto L14674; goto ret0; L14674: ATTRIBUTE_UNUSED_LABEL if (XVECLEN (x1, 0) == 1) goto L14676; goto ret0; L14676: ATTRIBUTE_UNUSED_LABEL switch (XINT (x1, 1)) { case 31L: goto L9399; case 46L: goto L9524; default: break; } goto ret0; L9399: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L9400; goto ret0; L9400: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L9401; goto ret0; L9401: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == XFmode && GET_CODE (x2) == REG && XINT (x2, 0) == 8) goto L9402; goto ret0; L9402: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L9403; goto ret0; L9403: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == XFmode && GET_CODE (x2) == REG && XINT (x2, 0) == 9) goto L9404; goto ret0; L9404: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L9405; goto ret0; L9405: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == XFmode && GET_CODE (x2) == REG && XINT (x2, 0) == 10) goto L9406; goto ret0; L9406: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 4); if (GET_CODE (x1) == CLOBBER) goto L9407; goto ret0; L9407: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == XFmode && GET_CODE (x2) == REG && XINT (x2, 0) == 11) goto L9408; goto ret0; L9408: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 5); if (GET_CODE (x1) == CLOBBER) goto L9409; goto ret0; L9409: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == XFmode && GET_CODE (x2) == REG && XINT (x2, 0) == 12) goto L9410; goto ret0; L9410: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 6); if (GET_CODE (x1) == CLOBBER) goto L9411; goto ret0; L9411: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == XFmode && GET_CODE (x2) == REG && XINT (x2, 0) == 13) goto L9412; goto ret0; L9412: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 7); if (GET_CODE (x1) == CLOBBER) goto L9413; goto ret0; L9413: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == XFmode && GET_CODE (x2) == REG && XINT (x2, 0) == 14) goto L9414; goto ret0; L9414: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 8); if (GET_CODE (x1) == CLOBBER) goto L9415; goto ret0; L9415: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == XFmode && GET_CODE (x2) == REG && XINT (x2, 0) == 15) goto L9416; goto ret0; L9416: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 9); if (GET_CODE (x1) == CLOBBER) goto L9417; goto ret0; L9417: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode && GET_CODE (x2) == REG && XINT (x2, 0) == 29) goto L9418; goto ret0; L9418: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 10); if (GET_CODE (x1) == CLOBBER) goto L9419; goto ret0; L9419: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode && GET_CODE (x2) == REG && XINT (x2, 0) == 30) goto L9420; goto ret0; L9420: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 11); if (GET_CODE (x1) == CLOBBER) goto L9421; goto ret0; L9421: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode && GET_CODE (x2) == REG && XINT (x2, 0) == 31) goto L9422; goto ret0; L9422: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 12); if (GET_CODE (x1) == CLOBBER) goto L9423; goto ret0; L9423: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode && GET_CODE (x2) == REG && XINT (x2, 0) == 32) goto L9424; goto ret0; L9424: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 13); if (GET_CODE (x1) == CLOBBER) goto L9425; goto ret0; L9425: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode && GET_CODE (x2) == REG && XINT (x2, 0) == 33) goto L9426; goto ret0; L9426: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 14); if (GET_CODE (x1) == CLOBBER) goto L9427; goto ret0; L9427: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode && GET_CODE (x2) == REG && XINT (x2, 0) == 34) goto L9428; goto ret0; L9428: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 15); if (GET_CODE (x1) == CLOBBER) goto L9429; goto ret0; L9429: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode && GET_CODE (x2) == REG && XINT (x2, 0) == 35) goto L9430; goto ret0; L9430: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 16); if (GET_CODE (x1) == CLOBBER) goto L9431; goto ret0; L9431: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode && GET_CODE (x2) == REG && XINT (x2, 0) == 36 && (TARGET_MMX)) { return 849; } goto ret0; L9524: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L9525; goto ret0; L9525: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L9526; goto ret0; L9526: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == XFmode && GET_CODE (x2) == REG && XINT (x2, 0) == 8) goto L9527; goto ret0; L9527: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L9528; goto ret0; L9528: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == XFmode && GET_CODE (x2) == REG && XINT (x2, 0) == 9) goto L9529; goto ret0; L9529: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L9530; goto ret0; L9530: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == XFmode && GET_CODE (x2) == REG && XINT (x2, 0) == 10) goto L9531; goto ret0; L9531: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 4); if (GET_CODE (x1) == CLOBBER) goto L9532; goto ret0; L9532: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == XFmode && GET_CODE (x2) == REG && XINT (x2, 0) == 11) goto L9533; goto ret0; L9533: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 5); if (GET_CODE (x1) == CLOBBER) goto L9534; goto ret0; L9534: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == XFmode && GET_CODE (x2) == REG && XINT (x2, 0) == 12) goto L9535; goto ret0; L9535: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 6); if (GET_CODE (x1) == CLOBBER) goto L9536; goto ret0; L9536: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == XFmode && GET_CODE (x2) == REG && XINT (x2, 0) == 13) goto L9537; goto ret0; L9537: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 7); if (GET_CODE (x1) == CLOBBER) goto L9538; goto ret0; L9538: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == XFmode && GET_CODE (x2) == REG && XINT (x2, 0) == 14) goto L9539; goto ret0; L9539: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 8); if (GET_CODE (x1) == CLOBBER) goto L9540; goto ret0; L9540: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == XFmode && GET_CODE (x2) == REG && XINT (x2, 0) == 15) goto L9541; goto ret0; L9541: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 9); if (GET_CODE (x1) == CLOBBER) goto L9542; goto ret0; L9542: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode && GET_CODE (x2) == REG && XINT (x2, 0) == 29) goto L9543; goto ret0; L9543: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 10); if (GET_CODE (x1) == CLOBBER) goto L9544; goto ret0; L9544: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode && GET_CODE (x2) == REG && XINT (x2, 0) == 30) goto L9545; goto ret0; L9545: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 11); if (GET_CODE (x1) == CLOBBER) goto L9546; goto ret0; L9546: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode && GET_CODE (x2) == REG && XINT (x2, 0) == 31) goto L9547; goto ret0; L9547: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 12); if (GET_CODE (x1) == CLOBBER) goto L9548; goto ret0; L9548: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode && GET_CODE (x2) == REG && XINT (x2, 0) == 32) goto L9549; goto ret0; L9549: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 13); if (GET_CODE (x1) == CLOBBER) goto L9550; goto ret0; L9550: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode && GET_CODE (x2) == REG && XINT (x2, 0) == 33) goto L9551; goto ret0; L9551: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 14); if (GET_CODE (x1) == CLOBBER) goto L9552; goto ret0; L9552: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode && GET_CODE (x2) == REG && XINT (x2, 0) == 34) goto L9553; goto ret0; L9553: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 15); if (GET_CODE (x1) == CLOBBER) goto L9554; goto ret0; L9554: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode && GET_CODE (x2) == REG && XINT (x2, 0) == 35) goto L9555; goto ret0; L9555: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 16); if (GET_CODE (x1) == CLOBBER) goto L9556; goto ret0; L9556: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode && GET_CODE (x2) == REG && XINT (x2, 0) == 36 && (TARGET_3DNOW)) { return 863; } goto ret0; ret0: return -1; } int recog (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *pnum_clobbers ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; int tem ATTRIBUTE_UNUSED; recog_data.insn = NULL_RTX; switch (GET_CODE (x0)) { case SET: goto L138; case PARALLEL: goto L13092; case CALL: goto L6286; case UNSPEC_VOLATILE: goto L13096; case RETURN: goto L13097; case CONST_INT: goto L13098; case TRAP_IF: goto L8315; case PREFETCH: goto L9720; default: break; } goto ret0; L138: ATTRIBUTE_UNUSED_LABEL return recog_19 (x0, insn, pnum_clobbers); L13092: ATTRIBUTE_UNUSED_LABEL return recog_39 (x0, insn, pnum_clobbers); L6286: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 0); if (GET_MODE (x1) == QImode && GET_CODE (x1) == MEM) goto L6287; goto ret0; L6287: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (constant_call_address_operand (x2, VOIDmode)) { operands[0] = x2; goto L6288; } L6291: ATTRIBUTE_UNUSED_LABEL switch (GET_MODE (x2)) { case SImode: goto L14678; case DImode: goto L14680; default: break; } goto ret0; L6288: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); operands[1] = x1; return 517; L14678: ATTRIBUTE_UNUSED_LABEL if (call_insn_operand (x2, SImode)) { operands[0] = x2; goto L6292; } L14679: ATTRIBUTE_UNUSED_LABEL if (sibcall_insn_operand (x2, SImode)) { operands[0] = x2; goto L6297; } goto ret0; L6292: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); operands[1] = x1; goto L6293; L6293: ATTRIBUTE_UNUSED_LABEL if ((!SIBLING_CALL_P (insn) && !TARGET_64BIT)) { return 518; } x1 = XEXP (x0, 0); x2 = XEXP (x1, 0); goto L14679; L6297: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); operands[1] = x1; goto L6298; L6298: ATTRIBUTE_UNUSED_LABEL if ((SIBLING_CALL_P (insn) && !TARGET_64BIT)) { return 519; } goto ret0; L14680: ATTRIBUTE_UNUSED_LABEL if (call_insn_operand (x2, DImode)) { operands[0] = x2; goto L6302; } L14682: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG && XINT (x2, 0) == 40) goto L6312; if (constant_call_address_operand (x2, DImode)) { operands[0] = x2; goto L6307; } goto ret0; L6302: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); operands[1] = x1; goto L6303; L6303: ATTRIBUTE_UNUSED_LABEL if ((!SIBLING_CALL_P (insn) && TARGET_64BIT)) { return 520; } x1 = XEXP (x0, 0); x2 = XEXP (x1, 0); goto L14682; L6312: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); operands[0] = x1; goto L6313; L6313: ATTRIBUTE_UNUSED_LABEL if ((SIBLING_CALL_P (insn) && TARGET_64BIT)) { return 522; } goto ret0; L6307: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); operands[1] = x1; goto L6308; L6308: ATTRIBUTE_UNUSED_LABEL if ((SIBLING_CALL_P (insn) && TARGET_64BIT)) { return 521; } goto ret0; L13096: ATTRIBUTE_UNUSED_LABEL switch (XVECLEN (x0, 0)) { case 1: goto L14683; case 2: goto L14689; case 3: goto L14690; default: break; } goto ret0; L14683: ATTRIBUTE_UNUSED_LABEL switch (XINT (x0, 1)) { case 0L: goto L6315; case 68L: goto L6333; case 31L: goto L9433; case 37L: goto L9435; case 46L: goto L9558; case 57L: goto L10852; default: break; } goto ret0; L6315: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 0); operands[0] = x1; return 523; L6333: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 0); operands[0] = x1; return 529; L9433: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 0); if (x1 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_MMX) && pnum_clobbers != NULL) { *pnum_clobbers = 16; return 849; } goto ret0; L9435: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 0); if (memory_operand (x1, SImode)) { operands[0] = x1; goto L9436; } goto ret0; L9436: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE)) { return 850; } goto ret0; L9558: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 0); if (x1 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (TARGET_3DNOW) && pnum_clobbers != NULL) { *pnum_clobbers = 16; return 863; } goto ret0; L10852: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 0); if (address_operand (x1, VOIDmode)) { operands[0] = x1; goto L10853; } goto ret0; L10853: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return 1018; } goto ret0; L14689: ATTRIBUTE_UNUSED_LABEL if (XINT (x0, 1) == 70) goto L10863; goto ret0; L10863: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 0); if (register_operand (x1, SImode)) { operands[0] = x1; goto L10864; } goto ret0; L10864: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (register_operand (x1, SImode)) { operands[1] = x1; goto L10865; } goto ret0; L10865: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE3)) { return 1021; } goto ret0; L14690: ATTRIBUTE_UNUSED_LABEL if (XINT (x0, 1) == 69) goto L10867; goto ret0; L10867: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 0); if (register_operand (x1, SImode)) { operands[0] = x1; goto L10868; } goto ret0; L10868: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (register_operand (x1, SImode)) { operands[1] = x1; goto L10869; } goto ret0; L10869: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (register_operand (x1, SImode)) { operands[2] = x1; goto L10870; } goto ret0; L10870: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE3)) { return 1022; } goto ret0; L13097: ATTRIBUTE_UNUSED_LABEL if ((reload_completed)) { return 524; } goto ret0; L13098: ATTRIBUTE_UNUSED_LABEL if (XWINT (x0, 0) == 0L) { return 528; } goto ret0; L8315: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 0); if (x1 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L8316; if (comparison_operator (x1, VOIDmode)) { operands[0] = x1; goto L8319; } goto ret0; L8316: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (x1 == const_int_rtx[MAX_SAVED_CONST_INT + (5)]) { return 696; } goto ret0; L8319: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_CODE (x2) == REG && XINT (x2, 0) == 17) goto L8320; goto ret0; L8320: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L8321; goto ret0; L8321: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (const_int_operand (x1, VOIDmode)) { operands[1] = x1; return 697; } goto ret0; L9720: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 0); if (address_operand (x1, SImode)) { operands[0] = x1; goto L9721; } L9725: ATTRIBUTE_UNUSED_LABEL if (address_operand (x1, DImode)) { operands[0] = x1; goto L9726; } L9730: ATTRIBUTE_UNUSED_LABEL if (address_operand (x1, SImode)) { operands[0] = x1; goto L9731; } L9734: ATTRIBUTE_UNUSED_LABEL if (address_operand (x1, DImode)) { operands[0] = x1; goto L9735; } goto ret0; L9721: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (x1 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L9722; x1 = XEXP (x0, 0); goto L9725; L9722: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 2); if (const_int_operand (x1, SImode)) { operands[1] = x1; goto L9723; } x1 = XEXP (x0, 0); goto L9725; L9723: ATTRIBUTE_UNUSED_LABEL if ((TARGET_PREFETCH_SSE && !TARGET_64BIT)) { return 880; } x1 = XEXP (x0, 0); goto L9725; L9726: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (x1 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L9727; x1 = XEXP (x0, 0); goto L9730; L9727: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 2); if (const_int_operand (x1, SImode)) { operands[1] = x1; goto L9728; } x1 = XEXP (x0, 0); goto L9730; L9728: ATTRIBUTE_UNUSED_LABEL if ((TARGET_PREFETCH_SSE && TARGET_64BIT)) { return 881; } x1 = XEXP (x0, 0); goto L9730; L9731: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (const_int_operand (x1, SImode)) { operands[1] = x1; goto L9732; } x1 = XEXP (x0, 0); goto L9734; L9732: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 2); if (x1 == const_int_rtx[MAX_SAVED_CONST_INT + (3)] && (TARGET_3DNOW && !TARGET_64BIT)) { return 882; } x1 = XEXP (x0, 0); goto L9734; L9735: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (const_int_operand (x1, SImode)) { operands[1] = x1; goto L9736; } goto ret0; L9736: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 2); if (x1 == const_int_rtx[MAX_SAVED_CONST_INT + (3)] && (TARGET_3DNOW && TARGET_64BIT)) { return 883; } goto ret0; ret0: return -1; } static rtx split_1 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; rtx tem ATTRIBUTE_UNUSED; x1 = XEXP (x0, 0); switch (GET_MODE (x1)) { case CCFPmode: goto L14695; case DImode: goto L14696; case SFmode: goto L14699; case DFmode: goto L14700; case QImode: goto L14704; default: break; } L11006: ATTRIBUTE_UNUSED_LABEL if (push_operand (x1, VOIDmode)) { operands[0] = x1; goto L11007; } L11010: ATTRIBUTE_UNUSED_LABEL if (push_operand (x1, XFmode)) { operands[0] = x1; goto L11011; } switch (GET_CODE (x1)) { case REG: goto L14709; case STRICT_LOW_PART: goto L11963; case PC: goto L11986; default: break; } L11018: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, VOIDmode)) { operands[0] = x1; goto L11019; } L13077: ATTRIBUTE_UNUSED_LABEL if (push_operand (x1, VOIDmode)) { operands[0] = x1; goto L13078; } if (register_operand (x1, VOIDmode)) { operands[0] = x1; goto L11357; } L13069: ATTRIBUTE_UNUSED_LABEL switch (GET_MODE (x1)) { case V4SFmode: goto L14710; case V2DFmode: goto L14711; case TImode: goto L14712; case TFmode: goto L14713; default: break; } goto ret0; L14695: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x1) == REG && XINT (x1, 0) == 18) goto L10936; goto L11006; L10936: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == CCFPmode && GET_CODE (x1) == COMPARE) goto L10937; x1 = XEXP (x0, 0); goto L11006; L10937: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SFmode)) { operands[0] = x2; goto L10938; } x1 = XEXP (x0, 0); goto L11006; L10938: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == FLOAT) goto L10939; x1 = XEXP (x0, 0); goto L11006; L10939: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SImode)) { operands[1] = x3; goto L10940; } x1 = XEXP (x0, 0); goto L11006; L10940: ATTRIBUTE_UNUSED_LABEL if ((0 && TARGET_80387 && reload_completed)) { return gen_split_1044 (insn, operands); } x1 = XEXP (x0, 0); goto L11006; L14696: ATTRIBUTE_UNUSED_LABEL if (push_operand (x1, DImode)) { operands[0] = x1; goto L10953; } L14697: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, DImode)) { operands[0] = x1; goto L10961; } L14698: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x1, DImode)) { operands[0] = x1; goto L11076; } L14703: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, DImode)) { operands[0] = x1; goto L11365; } goto L11006; L10953: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (immediate_operand (x1, DImode)) { operands[1] = x1; goto L10954; } L10957: ATTRIBUTE_UNUSED_LABEL if (general_operand (x1, DImode)) { operands[1] = x1; goto L10958; } x1 = XEXP (x0, 0); goto L14697; L10954: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && (flow2_completed || (reload_completed && !flag_peephole2)) && !symbolic_operand (operands[1], DImode) && !x86_64_immediate_operand (operands[1], DImode))) { return gen_split_1054 (insn, operands); } x1 = XEXP (x0, 1); goto L10957; L10958: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && reload_completed && (! MMX_REG_P (operands[1]) && !SSE_REG_P (operands[1])))) { return gen_split_1055 (insn, operands); } x1 = XEXP (x0, 0); goto L14697; L10961: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (general_operand (x1, DImode)) { operands[1] = x1; goto L10962; } x1 = XEXP (x0, 0); goto L14698; L10962: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && reload_completed && (!MMX_REG_P (operands[0]) && !SSE_REG_P (operands[0])) && (!MMX_REG_P (operands[1]) && !SSE_REG_P (operands[1])))) { return gen_split_1056 (insn, operands); } x1 = XEXP (x0, 0); goto L14698; L11076: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == DImode && GET_CODE (x1) == ZERO_EXTEND) goto L11077; if (immediate_operand (x1, DImode)) { operands[1] = x1; goto L10976; } x1 = XEXP (x0, 0); goto L14703; L11077: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[0]) && (TARGET_64BIT)) { return gen_split_1086 (insn, operands); } x1 = XEXP (x0, 0); goto L14703; L10976: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && (flow2_completed || (reload_completed && !flag_peephole2)) && !symbolic_operand (operands[1], DImode) && !x86_64_immediate_operand (operands[1], DImode))) { return gen_split_1059 (insn, operands); } x1 = XEXP (x0, 0); goto L14703; L11365: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == DImode && GET_CODE (x1) == ZERO_EXTEND) goto L11366; x1 = XEXP (x0, 0); goto L11006; L11366: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS) goto L11367; x1 = XEXP (x0, 0); goto L11006; L11367: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode) goto L14714; x1 = XEXP (x0, 0); goto L11006; L14714: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x3)) { case PLUS: goto L11368; case MULT: goto L11385; default: break; } x1 = XEXP (x0, 0); goto L11006; L11368: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == SImode) goto L14717; x1 = XEXP (x0, 0); goto L11006; L14717: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x4) == MULT) goto L11405; if (index_register_operand (x4, SImode)) { operands[1] = x4; goto L11369; } x1 = XEXP (x0, 0); goto L11006; L11405: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (index_register_operand (x5, SImode)) { operands[1] = x5; goto L11406; } x1 = XEXP (x0, 0); goto L11006; L11406: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 1); if (const248_operand (x5, SImode)) { operands[2] = x5; goto L11407; } x1 = XEXP (x0, 0); goto L11006; L11407: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (register_operand (x4, SImode)) { operands[3] = x4; goto L11408; } x1 = XEXP (x0, 0); goto L11006; L11408: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (immediate_operand (x3, SImode)) { operands[4] = x3; goto L11409; } x1 = XEXP (x0, 0); goto L11006; L11409: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT&& reload_completed)) { return gen_split_1161 (insn, operands); } x1 = XEXP (x0, 0); goto L11006; L11369: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (register_operand (x4, SImode)) { operands[2] = x4; goto L11370; } x1 = XEXP (x0, 0); goto L11006; L11370: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (immediate_operand (x3, SImode)) { operands[3] = x3; goto L11371; } x1 = XEXP (x0, 0); goto L11006; L11371: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT&& reload_completed)) { return gen_split_1157 (insn, operands); } x1 = XEXP (x0, 0); goto L11006; L11385: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (index_register_operand (x4, SImode)) { operands[1] = x4; goto L11386; } x1 = XEXP (x0, 0); goto L11006; L11386: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (const248_operand (x4, SImode)) { operands[2] = x4; goto L11387; } x1 = XEXP (x0, 0); goto L11006; L11387: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonmemory_operand (x3, SImode)) { operands[3] = x3; goto L11388; } x1 = XEXP (x0, 0); goto L11006; L11388: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT&& reload_completed)) { return gen_split_1159 (insn, operands); } x1 = XEXP (x0, 0); goto L11006; L14699: ATTRIBUTE_UNUSED_LABEL if (push_operand (x1, SFmode)) { operands[0] = x1; goto L10979; } if (register_operand (x1, SFmode)) { operands[0] = x1; goto L11168; } goto L11006; L10979: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == SFmode) goto L14719; x1 = XEXP (x0, 0); goto L11006; L14719: ATTRIBUTE_UNUSED_LABEL if (any_fp_register_operand (x1, SFmode)) { operands[1] = x1; goto L10984; } if (memory_operand (x1, SFmode)) { operands[1] = x1; goto L10980; } x1 = XEXP (x0, 0); goto L11006; L10984: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT)) { return gen_split_1062 (insn, operands); } L10988: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT)) { return gen_split_1063 (insn, operands); } x1 = XEXP (x0, 0); goto L11006; L10980: ATTRIBUTE_UNUSED_LABEL if ((reload_completed && GET_CODE (operands[1]) == MEM && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))) { return gen_split_1061 (insn, operands); } x1 = XEXP (x0, 0); goto L11006; L11168: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == SFmode) goto L14720; L12509: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x1) == IF_THEN_ELSE) goto L12510; x1 = XEXP (x0, 0); goto L11006; L14720: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case FLOAT_TRUNCATE: goto L11169; case FLOAT: goto L11335; default: break; } goto L12509; L11169: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DFmode)) { operands[1] = x2; goto L11170; } goto L12509; L11170: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && reload_completed && SSE_REG_P (operands[0]) && TARGET_SSE_PARTIAL_REGS_FOR_CVTSD2SS)) { return gen_split_1105 (insn, operands); } x1 = XEXP (x0, 1); goto L12509; L11335: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case SImode: goto L14722; case DImode: goto L14723; default: break; } goto L12509; L14722: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, SImode)) { operands[1] = x2; goto L11336; } goto L12509; L11336: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && reload_completed && TARGET_SSE_PARTIAL_REGS && SSE_REG_P (operands[0]))) { return gen_split_1137 (insn, operands); } x1 = XEXP (x0, 1); goto L12509; L14723: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, DImode)) { operands[1] = x2; goto L11341; } goto L12509; L11341: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && reload_completed && TARGET_SSE_PARTIAL_REGS && SSE_REG_P (operands[0]))) { return gen_split_1139 (insn, operands); } x1 = XEXP (x0, 1); goto L12509; L12510: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (comparison_operator (x2, VOIDmode)) { operands[1] = x2; goto L12511; } x1 = XEXP (x0, 0); goto L11006; L12511: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SFmode)) { operands[4] = x3; goto L12512; } x1 = XEXP (x0, 0); goto L11006; L12512: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, SFmode)) { operands[5] = x3; goto L12513; } x1 = XEXP (x0, 0); goto L11006; L12513: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonmemory_operand (x2, SFmode)) { operands[2] = x2; goto L12514; } x1 = XEXP (x0, 0); goto L11006; L12514: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (nonmemory_operand (x2, SFmode)) { operands[3] = x2; goto L12515; } x1 = XEXP (x0, 0); goto L11006; L12515: ATTRIBUTE_UNUSED_LABEL if ((SSE_REG_P (operands[0]) && reload_completed && (const0_operand (operands[2], GET_MODE (operands[0])) || const0_operand (operands[3], GET_MODE (operands[0]))))) { return gen_split_1504 (insn, operands); } x1 = XEXP (x0, 0); goto L11006; L14700: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case MEM: goto L14724; case REG: goto L14725; default: break; } L14701: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, DFmode)) { operands[0] = x1; goto L11003; } L14707: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, DFmode)) { operands[0] = x1; goto L12518; } goto L11006; L14724: ATTRIBUTE_UNUSED_LABEL if (push_operand (x1, DFmode)) { operands[0] = x1; goto L10991; } goto L14701; L10991: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == DFmode) goto L14726; L10999: ATTRIBUTE_UNUSED_LABEL if (general_operand (x1, DFmode)) { operands[1] = x1; goto L11000; } x1 = XEXP (x0, 0); goto L14701; L14726: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case REG: goto L14728; case FLOAT_EXTEND: goto L11125; default: break; } goto L10999; L14728: ATTRIBUTE_UNUSED_LABEL if (any_fp_register_operand (x1, DFmode)) { operands[1] = x1; goto L10992; } goto L10999; L10992: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && reload_completed)) { return gen_split_1065 (insn, operands); } L10996: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && reload_completed)) { return gen_split_1066 (insn, operands); } x1 = XEXP (x0, 1); goto L10999; L11125: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (fp_register_operand (x2, SFmode)) { operands[1] = x2; goto L11126; } goto L10999; L11126: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT)) { return gen_split_1093 (insn, operands); } L11131: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT)) { return gen_split_1094 (insn, operands); } x1 = XEXP (x0, 1); goto L10999; L11000: ATTRIBUTE_UNUSED_LABEL if ((reload_completed)) { return gen_split_1067 (insn, operands); } x1 = XEXP (x0, 0); goto L14701; L14725: ATTRIBUTE_UNUSED_LABEL if (register_and_not_any_fp_reg_operand (x1, DFmode)) { operands[0] = x1; goto L12373; } goto L14701; L12373: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == DFmode && GET_CODE (x1) == IF_THEN_ELSE) goto L12374; x1 = XEXP (x0, 0); goto L14701; L12374: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (fcmov_comparison_operator (x2, VOIDmode)) { operands[1] = x2; goto L12375; } x1 = XEXP (x0, 0); goto L14701; L12375: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (flags_reg_operand (x3, VOIDmode)) { operands[4] = x3; goto L12376; } x1 = XEXP (x0, 0); goto L14701; L12376: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L12377; x1 = XEXP (x0, 0); goto L14701; L12377: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonimmediate_operand (x2, DFmode)) { operands[2] = x2; goto L12378; } x1 = XEXP (x0, 0); goto L14701; L12378: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (nonimmediate_operand (x2, DFmode)) { operands[3] = x2; goto L12379; } x1 = XEXP (x0, 0); goto L14701; L12379: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && reload_completed)) { return gen_split_1483 (insn, operands); } x1 = XEXP (x0, 0); goto L14701; L11003: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (general_operand (x1, DFmode)) { operands[1] = x1; goto L11004; } x1 = XEXP (x0, 0); goto L14707; L11004: ATTRIBUTE_UNUSED_LABEL if ((reload_completed && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM) && ! (ANY_FP_REG_P (operands[0]) || (GET_CODE (operands[0]) == SUBREG && ANY_FP_REG_P (SUBREG_REG (operands[0])))) && ! (ANY_FP_REG_P (operands[1]) || (GET_CODE (operands[1]) == SUBREG && ANY_FP_REG_P (SUBREG_REG (operands[1])))))) { return gen_split_1068 (insn, operands); } x1 = XEXP (x0, 0); goto L14707; L12518: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_CODE (x1) == IF_THEN_ELSE) goto L12519; x1 = XEXP (x0, 0); goto L11006; L12519: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (comparison_operator (x2, VOIDmode)) { operands[1] = x2; goto L12520; } x1 = XEXP (x0, 0); goto L11006; L12520: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, DFmode)) { operands[4] = x3; goto L12521; } x1 = XEXP (x0, 0); goto L11006; L12521: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, DFmode)) { operands[5] = x3; goto L12522; } x1 = XEXP (x0, 0); goto L11006; L12522: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonmemory_operand (x2, DFmode)) { operands[2] = x2; goto L12523; } x1 = XEXP (x0, 0); goto L11006; L12523: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (nonmemory_operand (x2, DFmode)) { operands[3] = x2; goto L12524; } x1 = XEXP (x0, 0); goto L11006; L12524: ATTRIBUTE_UNUSED_LABEL if ((SSE_REG_P (operands[0]) && reload_completed && (const0_operand (operands[2], GET_MODE (operands[0])) || const0_operand (operands[3], GET_MODE (operands[0]))))) { return gen_split_1505 (insn, operands); } x1 = XEXP (x0, 0); goto L11006; L14704: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, QImode)) { operands[0] = x1; goto L11956; } L14705: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, QImode)) { operands[0] = x1; goto L12364; } goto L11006; L11956: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == QImode) goto L14729; x1 = XEXP (x0, 0); goto L14705; L14729: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case NE: goto L11957; case EQ: goto L11972; default: break; } x1 = XEXP (x0, 0); goto L14705; L11957: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (ix86_comparison_operator (x2, VOIDmode)) { operands[1] = x2; goto L11958; } x1 = XEXP (x0, 0); goto L14705; L11958: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == REG && XINT (x3, 0) == 17) goto L11959; x1 = XEXP (x0, 0); goto L14705; L11959: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L11960; x1 = XEXP (x0, 0); goto L14705; L11960: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) { return gen_split_1334 (insn, operands); } x1 = XEXP (x0, 0); goto L14705; L11972: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (ix86_comparison_operator (x2, VOIDmode)) { operands[1] = x2; goto L11973; } x1 = XEXP (x0, 0); goto L14705; L11973: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == REG && XINT (x3, 0) == 17) goto L11974; x1 = XEXP (x0, 0); goto L14705; L11974: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L11975; x1 = XEXP (x0, 0); goto L14705; L11975: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) { return gen_split_1336 (insn, operands); } x1 = XEXP (x0, 0); goto L14705; L12364: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == QImode && GET_CODE (x1) == IF_THEN_ELSE) goto L12365; x1 = XEXP (x0, 0); goto L11006; L12365: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (ix86_comparison_operator (x2, VOIDmode)) { operands[1] = x2; goto L12366; } x1 = XEXP (x0, 0); goto L11006; L12366: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (flags_reg_operand (x3, VOIDmode)) { operands[4] = x3; goto L12367; } x1 = XEXP (x0, 0); goto L11006; L12367: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L12368; x1 = XEXP (x0, 0); goto L11006; L12368: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, QImode)) { operands[2] = x2; goto L12369; } x1 = XEXP (x0, 0); goto L11006; L12369: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (register_operand (x2, QImode)) { operands[3] = x2; goto L12370; } x1 = XEXP (x0, 0); goto L11006; L12370: ATTRIBUTE_UNUSED_LABEL if ((TARGET_CMOVE && !TARGET_PARTIAL_REG_STALL&& reload_completed)) { return gen_split_1480 (insn, operands); } x1 = XEXP (x0, 0); goto L11006; L11007: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (general_operand (x1, VOIDmode)) { operands[1] = x1; goto L11008; } x1 = XEXP (x0, 0); goto L11010; L11008: ATTRIBUTE_UNUSED_LABEL if ((reload_completed && (GET_MODE (operands[0]) == XFmode || GET_MODE (operands[0]) == DFmode) && !ANY_FP_REG_P (operands[1]))) { return gen_split_1070 (insn, operands); } x1 = XEXP (x0, 0); goto L11010; L11011: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == XFmode) goto L14731; x1 = XEXP (x0, 0); goto L11018; L14731: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case REG: goto L14733; case FLOAT_EXTEND: goto L11135; default: break; } x1 = XEXP (x0, 0); goto L11018; L14733: ATTRIBUTE_UNUSED_LABEL if (any_fp_register_operand (x1, XFmode)) { operands[1] = x1; goto L11012; } x1 = XEXP (x0, 0); goto L11018; L11012: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT)) { return gen_split_1071 (insn, operands); } L11016: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT)) { return gen_split_1072 (insn, operands); } x1 = XEXP (x0, 0); goto L11018; L11135: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case SFmode: goto L14734; case DFmode: goto L14736; default: break; } x1 = XEXP (x0, 0); goto L11018; L14734: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG) goto L14738; x1 = XEXP (x0, 0); goto L11018; L14738: ATTRIBUTE_UNUSED_LABEL if (fp_register_operand (x2, SFmode)) { operands[1] = x2; return gen_split_1095 (insn, operands); } L14739: ATTRIBUTE_UNUSED_LABEL if (fp_register_operand (x2, SFmode)) { operands[1] = x2; goto L11140; } x1 = XEXP (x0, 0); goto L11018; L11140: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT)) { return gen_split_1096 (insn, operands); } x1 = XEXP (x0, 0); goto L11018; L14736: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG) goto L14740; x1 = XEXP (x0, 0); goto L11018; L14740: ATTRIBUTE_UNUSED_LABEL if (fp_register_operand (x2, DFmode)) { operands[1] = x2; return gen_split_1097 (insn, operands); } L14741: ATTRIBUTE_UNUSED_LABEL if (fp_register_operand (x2, DFmode)) { operands[1] = x2; goto L11149; } x1 = XEXP (x0, 0); goto L11018; L11149: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT)) { return gen_split_1098 (insn, operands); } x1 = XEXP (x0, 0); goto L11018; L14709: ATTRIBUTE_UNUSED_LABEL if (XINT (x1, 0) == 17) goto L11497; L14708: ATTRIBUTE_UNUSED_LABEL if (fp_register_operand (x1, VOIDmode)) { operands[0] = x1; goto L11344; } goto L11018; L11497: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_CODE (x1) == COMPARE) goto L11498; x1 = XEXP (x0, 0); goto L14708; L11498: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_CODE (x2)) { case ZERO_EXTRACT: goto L11499; case AND: goto L11507; default: break; } x1 = XEXP (x0, 0); goto L14708; L11499: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, VOIDmode)) { operands[0] = x3; goto L11500; } x1 = XEXP (x0, 0); goto L14708; L11500: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const_int_operand (x3, VOIDmode)) { operands[1] = x3; goto L11501; } x1 = XEXP (x0, 0); goto L14708; L11501: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (const_int_operand (x3, VOIDmode)) { operands[2] = x3; goto L11502; } x1 = XEXP (x0, 0); goto L14708; L11502: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (ix86_match_ccmode (insn, CCNOmode))) { return gen_split_1208 (insn, operands); } x1 = XEXP (x0, 0); goto L14708; L11507: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, VOIDmode)) { operands[0] = x3; goto L11508; } L11514: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x3, VOIDmode)) { operands[0] = x3; goto L11515; } L12550: ATTRIBUTE_UNUSED_LABEL if (aligned_operand (x3, HImode)) { operands[0] = x3; goto L12551; } x1 = XEXP (x0, 0); goto L14708; L11508: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const_int_operand (x3, VOIDmode)) { operands[1] = x3; goto L11509; } x3 = XEXP (x2, 0); goto L11514; L11509: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (reload_completed && QI_REG_P (operands[0]) && ((ix86_match_ccmode (insn, CCZmode) && !(INTVAL (operands[1]) & ~(255 << 8))) || (ix86_match_ccmode (insn, CCNOmode) && !(INTVAL (operands[1]) & ~(127 << 8)))) && GET_MODE (operands[0]) != QImode)) { return gen_split_1209 (insn, operands); } x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L11514; L11515: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const_int_operand (x3, VOIDmode)) { operands[1] = x3; goto L11516; } x3 = XEXP (x2, 0); goto L12550; L11516: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (reload_completed && (!REG_P (operands[0]) || ANY_QI_REG_P (operands[0])) && ((ix86_match_ccmode (insn, CCZmode) && !(INTVAL (operands[1]) & ~255)) || (ix86_match_ccmode (insn, CCNOmode) && !(INTVAL (operands[1]) & ~127))) && GET_MODE (operands[0]) != QImode)) { return gen_split_1210 (insn, operands); } x2 = XEXP (x1, 0); x3 = XEXP (x2, 0); goto L12550; L12551: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const_int_operand (x3, HImode)) { operands[1] = x3; goto L12552; } x1 = XEXP (x0, 0); goto L14708; L12552: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (! TARGET_PARTIAL_REG_STALL && reload_completed /* Ensure that the operand will remain sign-extended immediate. */ && ix86_match_ccmode (insn, INTVAL (operands[1]) >= 0 ? CCNOmode : CCZmode) && ! TARGET_FAST_PREFIX && ! optimize_size)) { return gen_split_1513 (insn, operands); } x1 = XEXP (x0, 0); goto L14708; L11344: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_CODE (x1) == FLOAT) goto L11345; x1 = XEXP (x0, 0); goto L11018; L11345: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, VOIDmode)) { operands[1] = x2; goto L11346; } x1 = XEXP (x0, 0); goto L11018; L11346: ATTRIBUTE_UNUSED_LABEL if ((reload_completed && FLOAT_MODE_P (GET_MODE (operands[0])))) { return gen_split_1143 (insn, operands); } x1 = XEXP (x0, 0); goto L11018; L11963: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, QImode)) { operands[0] = x2; goto L11964; } goto L13069; L11964: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == QImode) goto L14742; x1 = XEXP (x0, 0); goto L13069; L14742: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case NE: goto L11965; case EQ: goto L11980; default: break; } x1 = XEXP (x0, 0); goto L13069; L11965: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (ix86_comparison_operator (x2, VOIDmode)) { operands[1] = x2; goto L11966; } x1 = XEXP (x0, 0); goto L13069; L11966: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == REG && XINT (x3, 0) == 17) goto L11967; x1 = XEXP (x0, 0); goto L13069; L11967: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L11968; x1 = XEXP (x0, 0); goto L13069; L11968: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) { return gen_split_1335 (insn, operands); } x1 = XEXP (x0, 0); goto L13069; L11980: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (ix86_comparison_operator (x2, VOIDmode)) { operands[1] = x2; goto L11981; } x1 = XEXP (x0, 0); goto L13069; L11981: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == REG && XINT (x3, 0) == 17) goto L11982; x1 = XEXP (x0, 0); goto L13069; L11982: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L11983; x1 = XEXP (x0, 0); goto L13069; L11983: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) { return gen_split_1337 (insn, operands); } x1 = XEXP (x0, 0); goto L13069; L11986: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); switch (GET_CODE (x1)) { case IF_THEN_ELSE: goto L11987; case UNSPEC: goto L14744; default: break; } x1 = XEXP (x0, 0); goto L13069; L11987: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_CODE (x2)) { case NE: goto L11988; case EQ: goto L11999; default: break; } x1 = XEXP (x0, 0); goto L13069; L11988: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (ix86_comparison_operator (x3, VOIDmode)) { operands[0] = x3; goto L11989; } x1 = XEXP (x0, 0); goto L13069; L11989: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_CODE (x4) == REG && XINT (x4, 0) == 17) goto L11990; x1 = XEXP (x0, 0); goto L13069; L11990: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L11991; x1 = XEXP (x0, 0); goto L13069; L11991: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L11992; x1 = XEXP (x0, 0); goto L13069; L11992: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == LABEL_REF) goto L11993; x1 = XEXP (x0, 0); goto L13069; L11993: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); operands[1] = x3; goto L11994; L11994: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (GET_CODE (x2) == PC) { return gen_split_1356 (insn, operands); } x1 = XEXP (x0, 0); goto L13069; L11999: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (ix86_comparison_operator (x3, VOIDmode)) { operands[0] = x3; goto L12000; } x1 = XEXP (x0, 0); goto L13069; L12000: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_CODE (x4) == REG && XINT (x4, 0) == 17) goto L12001; x1 = XEXP (x0, 0); goto L13069; L12001: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L12002; x1 = XEXP (x0, 0); goto L13069; L12002: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L12003; x1 = XEXP (x0, 0); goto L13069; L12003: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == LABEL_REF) goto L12004; x1 = XEXP (x0, 0); goto L13069; L12004: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); operands[1] = x3; goto L12005; L12005: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (GET_CODE (x2) == PC) { return gen_split_1357 (insn, operands); } x1 = XEXP (x0, 0); goto L13069; L14744: ATTRIBUTE_UNUSED_LABEL if (XVECLEN (x1, 0) == 1 && XINT (x1, 1) == 76) goto L12103; x1 = XEXP (x0, 0); goto L13069; L12103: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); switch (GET_MODE (x2)) { case SImode: goto L14745; case DImode: goto L14746; default: break; } x1 = XEXP (x0, 0); goto L13069; L14745: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, SImode)) { operands[0] = x2; goto L12104; } x1 = XEXP (x0, 0); goto L13069; L12104: ATTRIBUTE_UNUSED_LABEL if ((reload_completed)) { return gen_split_1379 (insn, operands); } x1 = XEXP (x0, 0); goto L13069; L14746: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, DImode)) { operands[0] = x2; goto L12109; } x1 = XEXP (x0, 0); goto L13069; L12109: ATTRIBUTE_UNUSED_LABEL if ((reload_completed)) { return gen_split_1380 (insn, operands); } x1 = XEXP (x0, 0); goto L13069; L11019: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (general_operand (x1, VOIDmode)) { operands[1] = x1; goto L11020; } x1 = XEXP (x0, 0); goto L13077; L11020: ATTRIBUTE_UNUSED_LABEL if ((reload_completed && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM) && GET_MODE (operands[0]) == XFmode && ! (ANY_FP_REG_P (operands[0]) || (GET_CODE (operands[0]) == SUBREG && ANY_FP_REG_P (SUBREG_REG (operands[0])))) && ! (ANY_FP_REG_P (operands[1]) || (GET_CODE (operands[1]) == SUBREG && ANY_FP_REG_P (SUBREG_REG (operands[1])))))) { return gen_split_1073 (insn, operands); } x1 = XEXP (x0, 0); goto L13077; L13078: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (register_operand (x1, VOIDmode)) { operands[1] = x1; goto L13079; } x1 = XEXP (x0, 0); goto L13069; L13079: ATTRIBUTE_UNUSED_LABEL if ((!TARGET_64BIT && reload_completed && (SSE_REG_P (operands[1]) || MMX_REG_P (operands[1])))) { return gen_split_1594 (insn, operands); } L13083: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && reload_completed && (SSE_REG_P (operands[1]) || MMX_REG_P (operands[1])))) { return gen_split_1595 (insn, operands); } x1 = XEXP (x0, 0); goto L13069; L11357: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); switch (GET_CODE (x1)) { case PLUS: goto L11358; case NOT: goto L12563; case IF_THEN_ELSE: goto L12568; case SUBREG: case MEM: goto L11023; default: goto L12155; } L11023: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x1, VOIDmode)) { operands[1] = x1; goto L11024; } L12155: ATTRIBUTE_UNUSED_LABEL if (binary_fp_operator (x1, VOIDmode)) { operands[3] = x1; goto L12156; } x1 = XEXP (x0, 0); goto L13069; L11358: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_CODE (x2)) { case PLUS: goto L11393; case MULT: goto L11376; default: break; } goto L12155; L11393: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == MULT) goto L11394; if (index_register_operand (x3, VOIDmode)) { operands[1] = x3; goto L11360; } goto L12155; L11394: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (index_register_operand (x4, VOIDmode)) { operands[1] = x4; goto L11395; } goto L12155; L11395: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (const248_operand (x4, VOIDmode)) { operands[2] = x4; goto L11396; } goto L12155; L11396: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, VOIDmode)) { operands[3] = x3; goto L11397; } goto L12155; L11397: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (immediate_operand (x2, VOIDmode)) { operands[4] = x2; goto L11398; } goto L12155; L11398: ATTRIBUTE_UNUSED_LABEL if (((GET_MODE (operands[0]) == QImode || GET_MODE (operands[0]) == HImode || (TARGET_64BIT && GET_MODE (operands[0]) == SImode)) && (!TARGET_PARTIAL_REG_STALL || optimize_size) && GET_MODE (operands[0]) == GET_MODE (operands[1]) && GET_MODE (operands[0]) == GET_MODE (operands[3])&& reload_completed)) { return gen_split_1160 (insn, operands); } x1 = XEXP (x0, 1); goto L12155; L11360: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, VOIDmode)) { operands[2] = x3; goto L11361; } goto L12155; L11361: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (immediate_operand (x2, VOIDmode)) { operands[3] = x2; goto L11362; } goto L12155; L11362: ATTRIBUTE_UNUSED_LABEL if (((GET_MODE (operands[0]) == QImode || GET_MODE (operands[0]) == HImode || (TARGET_64BIT && GET_MODE (operands[0]) == SImode)) && (!TARGET_PARTIAL_REG_STALL || optimize_size) && GET_MODE (operands[0]) == GET_MODE (operands[1]) && GET_MODE (operands[0]) == GET_MODE (operands[2]) && (GET_MODE (operands[0]) == GET_MODE (operands[3]) || GET_MODE (operands[3]) == VOIDmode)&& reload_completed)) { return gen_split_1156 (insn, operands); } x1 = XEXP (x0, 1); goto L12155; L11376: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (index_register_operand (x3, VOIDmode)) { operands[1] = x3; goto L11377; } goto L12155; L11377: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const248_operand (x3, VOIDmode)) { operands[2] = x3; goto L11378; } goto L12155; L11378: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonmemory_operand (x2, VOIDmode)) { operands[3] = x2; goto L11379; } goto L12155; L11379: ATTRIBUTE_UNUSED_LABEL if (((GET_MODE (operands[0]) == QImode || GET_MODE (operands[0]) == HImode || (TARGET_64BIT && GET_MODE (operands[0]) == SImode)) && (!TARGET_PARTIAL_REG_STALL || optimize_size) && GET_MODE (operands[0]) == GET_MODE (operands[1]) && (GET_MODE (operands[0]) == GET_MODE (operands[3]) || GET_MODE (operands[3]) == VOIDmode)&& reload_completed)) { return gen_split_1158 (insn, operands); } x1 = XEXP (x0, 1); goto L12155; L12563: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, VOIDmode)) { operands[1] = x2; goto L12564; } x1 = XEXP (x0, 0); goto L13069; L12564: ATTRIBUTE_UNUSED_LABEL if ((! TARGET_PARTIAL_REG_STALL && reload_completed && (GET_MODE (operands[0]) == HImode || (GET_MODE (operands[0]) == QImode && (TARGET_PROMOTE_QImode || optimize_size))))) { return gen_split_1515 (insn, operands); } x1 = XEXP (x0, 0); goto L13069; L12568: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (comparison_operator (x2, VOIDmode)) { operands[1] = x2; goto L12569; } x1 = XEXP (x0, 0); goto L13069; L12569: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == REG && XINT (x3, 0) == 17) goto L12570; x1 = XEXP (x0, 0); goto L13069; L12570: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L12571; x1 = XEXP (x0, 0); goto L13069; L12571: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, VOIDmode)) { operands[2] = x2; goto L12572; } x1 = XEXP (x0, 0); goto L13069; L12572: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 2); if (register_operand (x2, VOIDmode)) { operands[3] = x2; goto L12573; } x1 = XEXP (x0, 0); goto L13069; L12573: ATTRIBUTE_UNUSED_LABEL if ((! TARGET_PARTIAL_REG_STALL && TARGET_CMOVE && (GET_MODE (operands[0]) == HImode || (GET_MODE (operands[0]) == QImode && (TARGET_PROMOTE_QImode || optimize_size))))) { return gen_split_1516 (insn, operands); } x1 = XEXP (x0, 0); goto L13069; L11024: ATTRIBUTE_UNUSED_LABEL if ((reload_completed && GET_CODE (operands[1]) == MEM && (GET_MODE (operands[0]) == XFmode || GET_MODE (operands[0]) == SFmode || GET_MODE (operands[0]) == DFmode) && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))) { return gen_split_1074 (insn, operands); } x1 = XEXP (x0, 0); goto L13069; L12156: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_CODE (x2) == FLOAT) goto L12157; if (register_operand (x2, VOIDmode)) { operands[1] = x2; goto L12164; } x1 = XEXP (x0, 0); goto L13069; L12157: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SImode)) { operands[1] = x3; goto L12158; } x1 = XEXP (x0, 0); goto L13069; L12158: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (register_operand (x2, VOIDmode)) { operands[2] = x2; goto L12159; } x1 = XEXP (x0, 0); goto L13069; L12159: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && reload_completed && FLOAT_MODE_P (GET_MODE (operands[0])))) { return gen_split_1393 (insn, operands); } x1 = XEXP (x0, 0); goto L13069; L12164: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == FLOAT) goto L12165; x1 = XEXP (x0, 0); goto L13069; L12165: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SImode)) { operands[2] = x3; goto L12166; } x1 = XEXP (x0, 0); goto L13069; L12166: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && reload_completed && FLOAT_MODE_P (GET_MODE (operands[0])))) { return gen_split_1394 (insn, operands); } x1 = XEXP (x0, 0); goto L13069; L14710: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, V4SFmode)) { operands[0] = x1; goto L13070; } goto ret0; L13070: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (zero_extended_scalar_load_operand (x1, V4SFmode)) { operands[1] = x1; goto L13071; } goto ret0; L13071: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE)) { return gen_split_1580 (insn, operands); } goto ret0; L14711: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, V2DFmode)) { operands[0] = x1; goto L13074; } goto ret0; L13074: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (zero_extended_scalar_load_operand (x1, V2DFmode)) { operands[1] = x1; goto L13075; } goto ret0; L13075: ATTRIBUTE_UNUSED_LABEL if ((TARGET_SSE2)) { return gen_split_1581 (insn, operands); } goto ret0; L14712: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, TImode)) { operands[0] = x1; goto L13086; } goto ret0; L13086: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (general_operand (x1, TImode)) { operands[1] = x1; goto L13087; } goto ret0; L13087: ATTRIBUTE_UNUSED_LABEL if ((reload_completed && !SSE_REG_P (operands[0]) && !SSE_REG_P (operands[1]))) { return gen_split_1596 (insn, operands); } goto ret0; L14713: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, TFmode)) { operands[0] = x1; goto L13090; } goto ret0; L13090: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (general_operand (x1, TFmode)) { operands[1] = x1; goto L13091; } goto ret0; L13091: ATTRIBUTE_UNUSED_LABEL if ((reload_completed && !SSE_REG_P (operands[0]) && !SSE_REG_P (operands[1]))) { return gen_split_1597 (insn, operands); } goto ret0; ret0: return 0; } static rtx split_2 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; rtx tem ATTRIBUTE_UNUSED; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case SImode: goto L14747; case HImode: goto L14748; case DImode: goto L14749; case SFmode: goto L14751; case DFmode: goto L14753; case XFmode: goto L14762; default: break; } L11813: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG && XINT (x2, 0) == 17) goto L11814; L11420: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, VOIDmode)) { operands[0] = x2; goto L11421; } L11527: ATTRIBUTE_UNUSED_LABEL if (ext_register_operand (x2, VOIDmode)) { operands[0] = x2; goto L11528; } L11543: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, VOIDmode)) { operands[0] = x2; goto L11544; } L11639: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x2, VOIDmode)) { operands[0] = x2; goto L11640; } L12555: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, VOIDmode)) { operands[0] = x2; goto L12556; } goto ret0; L14747: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, SImode)) { operands[0] = x2; goto L11028; } L14755: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, SImode)) { operands[0] = x2; goto L11261; } goto L11813; L11028: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == ZERO_EXTEND) goto L11029; x2 = XEXP (x1, 0); goto L14755; L11029: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); switch (GET_MODE (x3)) { case HImode: goto L14765; case QImode: goto L14766; default: break; } x2 = XEXP (x1, 0); goto L14755; L14765: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, HImode)) { operands[1] = x3; goto L11030; } x2 = XEXP (x1, 0); goto L14755; L11030: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11031; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14755; L11031: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (reload_completed && TARGET_ZERO_EXTEND_WITH_AND && !optimize_size)) { return gen_split_1076 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14755; L14766: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x3, QImode)) { operands[1] = x3; goto L11058; } L14767: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, QImode)) { operands[1] = x3; goto L11072; } x2 = XEXP (x1, 0); goto L14755; L11058: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11059; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14767; L11059: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode) goto L14768; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14767; L14768: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG) goto L14770; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14767; L14770: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 0) == 17) goto L14772; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14767; L14772: ATTRIBUTE_UNUSED_LABEL if ((reload_completed && (!TARGET_ZERO_EXTEND_WITH_AND || optimize_size) && (!REG_P (operands[1]) || ANY_QI_REG_P (operands[1])))) { return gen_split_1082 (insn, operands); } L14773: ATTRIBUTE_UNUSED_LABEL if ((reload_completed && ANY_QI_REG_P (operands[0]) && (ANY_QI_REG_P (operands[1]) || GET_CODE (operands[1]) == MEM) && (TARGET_ZERO_EXTEND_WITH_AND && !optimize_size) && !reg_overlap_mentioned_p (operands[0], operands[1]))) { return gen_split_1083 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14767; L11072: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11073; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14755; L11073: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (reload_completed && true_regnum (operands[0]) == true_regnum (operands[1]))) { return gen_split_1084 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14755; L11261: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == FIX) goto L11262; x2 = XEXP (x1, 0); goto L11813; L11262: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, VOIDmode)) { operands[1] = x3; goto L11263; } x2 = XEXP (x1, 0); goto L11813; L11263: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11264; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11813; L11264: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_80387 && FLOAT_MODE_P (GET_MODE (operands[1])) && !reload_completed && !reload_in_progress && !SSE_FLOAT_MODE_P (GET_MODE (operands[1]))&& 1)) { return gen_split_1124 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11813; L14748: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, HImode)) { operands[0] = x2; goto L11035; } L14756: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, HImode)) { operands[0] = x2; goto L11304; } goto L11813; L11035: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == ZERO_EXTEND) goto L11036; x2 = XEXP (x1, 0); goto L14756; L11036: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == QImode) goto L14774; x2 = XEXP (x1, 0); goto L14756; L14774: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x3, QImode)) { operands[1] = x3; goto L11037; } L14775: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, QImode)) { operands[1] = x3; goto L11051; } x2 = XEXP (x1, 0); goto L14756; L11037: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11038; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14775; L11038: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode) goto L14776; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14775; L14776: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG) goto L14778; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14775; L14778: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 0) == 17) goto L14780; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14775; L14780: ATTRIBUTE_UNUSED_LABEL if ((reload_completed && (!TARGET_ZERO_EXTEND_WITH_AND || optimize_size) && (!REG_P (operands[1]) || ANY_QI_REG_P (operands[1])))) { return gen_split_1078 (insn, operands); } L14781: ATTRIBUTE_UNUSED_LABEL if ((reload_completed && ANY_QI_REG_P (operands[0]) && (TARGET_ZERO_EXTEND_WITH_AND && !optimize_size) && !reg_overlap_mentioned_p (operands[0], operands[1]))) { return gen_split_1079 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14775; L11051: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11052; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14756; L11052: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (reload_completed && true_regnum (operands[0]) == true_regnum (operands[1]))) { return gen_split_1080 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14756; L11304: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == FIX) goto L11305; x2 = XEXP (x1, 0); goto L11813; L11305: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, VOIDmode)) { operands[1] = x3; goto L11306; } x2 = XEXP (x1, 0); goto L11813; L11306: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11307; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11813; L11307: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) { return gen_split_1132 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11813; L14749: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, DImode)) { operands[0] = x2; goto L11081; } L14750: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, DImode)) { operands[0] = x2; goto L11088; } L14757: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, DImode)) { operands[0] = x2; goto L11413; } goto L11813; L11081: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == ZERO_EXTEND) goto L11082; x2 = XEXP (x1, 0); goto L14750; L11082: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SImode)) { operands[1] = x3; goto L11083; } x2 = XEXP (x1, 0); goto L14750; L11083: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11084; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14750; L11084: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT && reload_completed && true_regnum (operands[0]) == true_regnum (operands[1]))) { return gen_split_1087 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14750; L11088: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode) goto L14782; x2 = XEXP (x1, 0); goto L14757; L14782: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case ZERO_EXTEND: goto L11089; case FIX: goto L11215; case PLUS: goto L11351; case MINUS: goto L11439; case NEG: goto L11593; default: break; } x2 = XEXP (x1, 0); goto L14757; L11089: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (general_operand (x3, SImode)) { operands[1] = x3; goto L11090; } x2 = XEXP (x1, 0); goto L14757; L11090: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11091; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14757; L11091: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT && reload_completed && !SSE_REG_P (operands[0]) && !MMX_REG_P (operands[0]))) { return gen_split_1088 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14757; L11215: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, VOIDmode)) { operands[1] = x3; goto L11216; } x2 = XEXP (x1, 0); goto L14757; L11216: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11217; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14757; L11217: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_80387 && FLOAT_MODE_P (GET_MODE (operands[1])) && !reload_completed && !reload_in_progress && (!SSE_FLOAT_MODE_P (GET_MODE (operands[1])) || !TARGET_64BIT)&& 1)) { return gen_split_1116 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14757; L11351: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, DImode)) { operands[1] = x3; goto L11352; } x2 = XEXP (x1, 0); goto L14757; L11352: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, DImode)) { operands[2] = x3; goto L11353; } x2 = XEXP (x1, 0); goto L14757; L11353: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11354; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14757; L11354: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT && reload_completed)) { return gen_split_1154 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14757; L11439: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, DImode)) { operands[1] = x3; goto L11440; } x2 = XEXP (x1, 0); goto L14757; L11440: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (general_operand (x3, DImode)) { operands[2] = x3; goto L11441; } x2 = XEXP (x1, 0); goto L14757; L11441: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11442; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14757; L11442: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT && reload_completed)) { return gen_split_1171 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14757; L11593: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (general_operand (x3, DImode)) { operands[1] = x3; goto L11594; } x2 = XEXP (x1, 0); goto L14757; L11594: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11595; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14757; L11595: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT && reload_completed)) { return gen_split_1234 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14757; L11413: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode) goto L14787; x2 = XEXP (x1, 0); goto L11813; L14787: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case PLUS: goto L11414; case ZERO_EXTEND: goto L11430; case ASHIFT: goto L11871; case ASHIFTRT: goto L11932; case LSHIFTRT: goto L11950; default: break; } x2 = XEXP (x1, 0); goto L11813; L11414: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[1] = x3; goto L11415; } x2 = XEXP (x1, 0); goto L11813; L11415: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x86_64_nonmemory_operand (x3, DImode)) { operands[2] = x3; goto L11416; } x2 = XEXP (x1, 0); goto L11813; L11416: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11417; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11813; L11417: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && reload_completed && true_regnum (operands[0]) != true_regnum (operands[1]))) { return gen_split_1162 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11813; L11430: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == PLUS) goto L11431; if (GET_CODE (x3) == ASHIFT) goto L11914; x2 = XEXP (x1, 0); goto L11813; L11431: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, SImode)) { operands[1] = x4; goto L11432; } x2 = XEXP (x1, 0); goto L11813; L11432: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonmemory_operand (x4, SImode)) { operands[2] = x4; goto L11433; } x2 = XEXP (x1, 0); goto L11813; L11433: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11434; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11813; L11434: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && reload_completed && true_regnum (operands[0]) != true_regnum (operands[1]))) { return gen_split_1164 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11813; L11914: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, VOIDmode)) { operands[1] = x4; goto L11915; } x2 = XEXP (x1, 0); goto L11813; L11915: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (const_int_operand (x4, QImode)) { operands[2] = x4; goto L11916; } x2 = XEXP (x1, 0); goto L11813; L11916: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11917; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11813; L11917: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && reload_completed && true_regnum (operands[0]) != true_regnum (operands[1]))) { return gen_split_1289 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11813; L11871: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[1] = x3; goto L11872; } x2 = XEXP (x1, 0); goto L11813; L11872: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (immediate_operand (x3, QImode)) { operands[2] = x3; goto L11873; } L11890: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L11891; } x2 = XEXP (x1, 0); goto L11813; L11873: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11874; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L11890; L11874: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && reload_completed && true_regnum (operands[0]) != true_regnum (operands[1]))) { return gen_split_1281 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 1); goto L11890; L11891: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11892; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11813; L11892: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT && reload_completed)) { return gen_split_1283 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11813; L11932: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[1] = x3; goto L11933; } x2 = XEXP (x1, 0); goto L11813; L11933: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L11934; } x2 = XEXP (x1, 0); goto L11813; L11934: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11935; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11813; L11935: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT && reload_completed)) { return gen_split_1294 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11813; L11950: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[1] = x3; goto L11951; } x2 = XEXP (x1, 0); goto L11813; L11951: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L11952; } x2 = XEXP (x1, 0); goto L11813; L11952: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11953; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11813; L11953: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT && reload_completed)) { return gen_split_1301 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11813; L14751: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x2, SFmode)) { operands[0] = x2; goto L11153; } L14752: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, SFmode)) { operands[0] = x2; goto L11161; } L14758: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG) goto L14792; goto L11813; L11153: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SFmode && GET_CODE (x2) == FLOAT_TRUNCATE) goto L11154; x2 = XEXP (x1, 0); goto L14752; L11154: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); switch (GET_MODE (x3)) { case DFmode: goto L14794; case XFmode: goto L14795; default: break; } x2 = XEXP (x1, 0); goto L14752; L14794: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, DFmode)) { operands[1] = x3; goto L11155; } x2 = XEXP (x1, 0); goto L14752; L11155: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11156; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14752; L11156: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, SFmode)) { operands[2] = x2; goto L11157; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14752; L11157: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387)) { return gen_split_1103 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14752; L14795: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, XFmode)) { operands[1] = x3; goto L11184; } x2 = XEXP (x1, 0); goto L14752; L11184: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11185; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14752; L11185: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, SFmode)) { operands[2] = x2; goto L11186; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14752; L11186: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387)) { return gen_split_1108 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14752; L11161: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SFmode) goto L14796; x2 = XEXP (x1, 0); goto L14758; L14796: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case FLOAT_TRUNCATE: goto L11162; case UNSPEC: goto L14799; case IF_THEN_ELSE: goto L12384; default: break; } x2 = XEXP (x1, 0); goto L14758; L11162: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); switch (GET_MODE (x3)) { case DFmode: goto L14800; case XFmode: goto L14802; default: break; } x2 = XEXP (x1, 0); goto L14758; L14800: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x3, DFmode)) { operands[1] = x3; goto L11163; } L14801: ATTRIBUTE_UNUSED_LABEL if (fp_register_operand (x3, DFmode)) { operands[1] = x3; goto L11176; } x2 = XEXP (x1, 0); goto L14758; L11163: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11164; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14801; L11164: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); operands[2] = x2; goto L11165; L11165: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && reload_completed && SSE_REG_P (operands[0]) && !STACK_REG_P (operands[1]))) { return gen_split_1104 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14801; L11176: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11177; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14758; L11177: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, SFmode)) { operands[2] = x2; goto L11178; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14758; L11178: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && reload_completed)) { return gen_split_1106 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14758; L14802: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, XFmode)) { operands[1] = x3; goto L11192; } x2 = XEXP (x1, 0); goto L14758; L11192: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11193; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14758; L11193: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, SFmode)) { operands[2] = x2; goto L11194; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14758; L11194: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && reload_completed)) { return gen_split_1109 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14758; L14799: ATTRIBUTE_UNUSED_LABEL if (XVECLEN (x2, 0) == 1 && XINT (x2, 1) == 80) goto L12189; x2 = XEXP (x1, 0); goto L14758; L12189: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (register_operand (x3, SFmode)) { operands[2] = x3; goto L12190; } x2 = XEXP (x1, 0); goto L14758; L12190: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L12191; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14758; L12191: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SFmode)) { operands[1] = x2; goto L12192; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14758; L12192: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SFmode && GET_CODE (x2) == UNSPEC && XVECLEN (x2, 0) == 1 && XINT (x2, 1) == 81) goto L12193; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14758; L12193: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (rtx_equal_p (x3, operands[2]) && (find_regno_note (insn, REG_UNUSED, REGNO (operands[0])) && !reload_completed && !reload_in_progress)) { return gen_split_1405 (insn, operands); } L12202: ATTRIBUTE_UNUSED_LABEL if (rtx_equal_p (x3, operands[2]) && (find_regno_note (insn, REG_UNUSED, REGNO (operands[1])) && !reload_completed && !reload_in_progress)) { return gen_split_1406 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14758; L12384: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); switch (GET_CODE (x3)) { case LT: goto L12385; case GT: goto L12429; default: break; } x2 = XEXP (x1, 0); goto L14758; L12385: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, SFmode)) { operands[1] = x4; goto L12386; } x2 = XEXP (x1, 0); goto L14758; L12386: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonimmediate_operand (x4, SFmode)) { operands[2] = x4; goto L12387; } x2 = XEXP (x1, 0); goto L14758; L12387: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, SFmode)) { operands[3] = x3; goto L12388; } x2 = XEXP (x1, 0); goto L14758; L12388: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (nonimmediate_operand (x3, SFmode)) { operands[4] = x3; goto L12389; } x2 = XEXP (x1, 0); goto L14758; L12389: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12390; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14758; L12390: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (SSE_REG_P (operands[0]) && reload_completed && ((operands_match_p (operands[1], operands[3]) && operands_match_p (operands[2], operands[4])) || (operands_match_p (operands[1], operands[4]) && operands_match_p (operands[2], operands[3]))))) { return gen_split_1486 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14758; L12429: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, SFmode)) { operands[1] = x4; goto L12430; } x2 = XEXP (x1, 0); goto L14758; L12430: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonimmediate_operand (x4, SFmode)) { operands[2] = x4; goto L12431; } x2 = XEXP (x1, 0); goto L14758; L12431: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, SFmode)) { operands[3] = x3; goto L12432; } x2 = XEXP (x1, 0); goto L14758; L12432: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (nonimmediate_operand (x3, SFmode)) { operands[4] = x3; goto L12433; } x2 = XEXP (x1, 0); goto L14758; L12433: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12434; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14758; L12434: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (SSE_REG_P (operands[0]) && reload_completed && ((operands_match_p (operands[1], operands[3]) && operands_match_p (operands[2], operands[4])) || (operands_match_p (operands[1], operands[4]) && operands_match_p (operands[2], operands[3]))))) { return gen_split_1496 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14758; L14792: ATTRIBUTE_UNUSED_LABEL if (fp_register_operand (x2, SFmode)) { operands[0] = x2; goto L11626; } L14793: ATTRIBUTE_UNUSED_LABEL if (register_and_not_fp_reg_operand (x2, SFmode)) { operands[0] = x2; goto L11633; } goto L11813; L11626: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SFmode) goto L14803; x2 = XEXP (x1, 0); goto L14793; L14803: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case NEG: goto L11627; case ABS: goto L11739; case IF_THEN_ELSE: goto L12395; default: break; } x2 = XEXP (x1, 0); goto L14793; L11627: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SFmode)) { operands[1] = x3; goto L11628; } x2 = XEXP (x1, 0); goto L14793; L11628: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11629; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14793; L11629: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_80387 && reload_completed)) { return gen_split_1242 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14793; L11739: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SFmode)) { operands[1] = x3; goto L11740; } x2 = XEXP (x1, 0); goto L14793; L11740: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11741; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14793; L11741: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_80387 && reload_completed)) { return gen_split_1259 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14793; L12395: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); switch (GET_CODE (x3)) { case LT: goto L12396; case GT: goto L12440; default: break; } x2 = XEXP (x1, 0); goto L14793; L12396: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, SFmode)) { operands[1] = x4; goto L12397; } x2 = XEXP (x1, 0); goto L14793; L12397: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (register_operand (x4, SFmode)) { operands[2] = x4; goto L12398; } x2 = XEXP (x1, 0); goto L14793; L12398: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, SFmode)) { operands[3] = x3; goto L12399; } x2 = XEXP (x1, 0); goto L14793; L12399: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (register_operand (x3, SFmode)) { operands[4] = x3; goto L12400; } x2 = XEXP (x1, 0); goto L14793; L12400: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12401; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14793; L12401: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (reload_completed && ((operands_match_p (operands[1], operands[3]) && operands_match_p (operands[2], operands[4])) || (operands_match_p (operands[1], operands[4]) && operands_match_p (operands[2], operands[3]))))) { return gen_split_1491 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14793; L12440: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, SFmode)) { operands[1] = x4; goto L12441; } x2 = XEXP (x1, 0); goto L14793; L12441: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (register_operand (x4, SFmode)) { operands[2] = x4; goto L12442; } x2 = XEXP (x1, 0); goto L14793; L12442: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, SFmode)) { operands[3] = x3; goto L12443; } x2 = XEXP (x1, 0); goto L14793; L12443: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (register_operand (x3, SFmode)) { operands[4] = x3; goto L12444; } x2 = XEXP (x1, 0); goto L14793; L12444: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12445; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14793; L12445: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (reload_completed && ((operands_match_p (operands[1], operands[3]) && operands_match_p (operands[2], operands[4])) || (operands_match_p (operands[1], operands[4]) && operands_match_p (operands[2], operands[3]))))) { return gen_split_1497 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14793; L11633: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SFmode) goto L14806; x2 = XEXP (x1, 0); goto L11813; L14806: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case NEG: goto L11634; case ABS: goto L11746; default: break; } x2 = XEXP (x1, 0); goto L11813; L11634: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SFmode)) { operands[1] = x3; goto L11635; } x2 = XEXP (x1, 0); goto L11813; L11635: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11636; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11813; L11636: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_80387 && reload_completed)) { return gen_split_1243 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11813; L11746: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SFmode)) { operands[1] = x3; goto L11747; } x2 = XEXP (x1, 0); goto L11813; L11747: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11748; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11813; L11748: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_80387 && reload_completed)) { return gen_split_1260 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11813; L14753: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x2, DFmode)) { operands[0] = x2; goto L11198; } L14754: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, DFmode)) { operands[0] = x2; goto L11206; } L14760: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG) goto L14808; goto L11813; L11198: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DFmode && GET_CODE (x2) == FLOAT_TRUNCATE) goto L11199; x2 = XEXP (x1, 0); goto L14754; L11199: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, XFmode)) { operands[1] = x3; goto L11200; } x2 = XEXP (x1, 0); goto L14754; L11200: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11201; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14754; L11201: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, DFmode)) { operands[2] = x2; goto L11202; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14754; L11202: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387)) { return gen_split_1111 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14754; L11206: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DFmode) goto L14810; x2 = XEXP (x1, 0); goto L14760; L14810: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case FLOAT_TRUNCATE: goto L11207; case UNSPEC: goto L14813; case IF_THEN_ELSE: goto L12406; default: break; } x2 = XEXP (x1, 0); goto L14760; L11207: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, XFmode)) { operands[1] = x3; goto L11208; } x2 = XEXP (x1, 0); goto L14760; L11208: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11209; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14760; L11209: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, DFmode)) { operands[2] = x2; goto L11210; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14760; L11210: ATTRIBUTE_UNUSED_LABEL if ((TARGET_80387 && reload_completed)) { return gen_split_1112 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14760; L14813: ATTRIBUTE_UNUSED_LABEL if (XVECLEN (x2, 0) == 1 && XINT (x2, 1) == 80) goto L12171; x2 = XEXP (x1, 0); goto L14760; L12171: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (GET_MODE (x3) == DFmode) goto L14815; x2 = XEXP (x1, 0); goto L14760; L14815: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x3) == FLOAT_EXTEND) goto L12208; if (register_operand (x3, DFmode)) { operands[2] = x3; goto L12172; } x2 = XEXP (x1, 0); goto L14760; L12208: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, SFmode)) { operands[2] = x4; goto L12209; } x2 = XEXP (x1, 0); goto L14760; L12209: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L12210; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14760; L12210: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DFmode)) { operands[1] = x2; goto L12211; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14760; L12211: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DFmode && GET_CODE (x2) == UNSPEC && XVECLEN (x2, 0) == 1 && XINT (x2, 1) == 81) goto L12212; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14760; L12212: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (GET_MODE (x3) == DFmode && GET_CODE (x3) == FLOAT_EXTEND) goto L12213; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14760; L12213: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (rtx_equal_p (x4, operands[2]) && (find_regno_note (insn, REG_UNUSED, REGNO (operands[0])) && !reload_completed && !reload_in_progress)) { return gen_split_1407 (insn, operands); } L12224: ATTRIBUTE_UNUSED_LABEL if (rtx_equal_p (x4, operands[2]) && (find_regno_note (insn, REG_UNUSED, REGNO (operands[1])) && !reload_completed && !reload_in_progress)) { return gen_split_1408 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14760; L12172: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L12173; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14760; L12173: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DFmode)) { operands[1] = x2; goto L12174; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14760; L12174: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DFmode && GET_CODE (x2) == UNSPEC && XVECLEN (x2, 0) == 1 && XINT (x2, 1) == 81) goto L12175; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14760; L12175: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (rtx_equal_p (x3, operands[2]) && (find_regno_note (insn, REG_UNUSED, REGNO (operands[0])) && !reload_completed && !reload_in_progress)) { return gen_split_1403 (insn, operands); } L12184: ATTRIBUTE_UNUSED_LABEL if (rtx_equal_p (x3, operands[2]) && (find_regno_note (insn, REG_UNUSED, REGNO (operands[1])) && !reload_completed && !reload_in_progress)) { return gen_split_1404 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14760; L12406: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); switch (GET_CODE (x3)) { case LT: goto L12407; case GT: goto L12451; default: break; } x2 = XEXP (x1, 0); goto L14760; L12407: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, DFmode)) { operands[1] = x4; goto L12408; } x2 = XEXP (x1, 0); goto L14760; L12408: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonimmediate_operand (x4, DFmode)) { operands[2] = x4; goto L12409; } x2 = XEXP (x1, 0); goto L14760; L12409: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, DFmode)) { operands[3] = x3; goto L12410; } x2 = XEXP (x1, 0); goto L14760; L12410: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (nonimmediate_operand (x3, DFmode)) { operands[4] = x3; goto L12411; } x2 = XEXP (x1, 0); goto L14760; L12411: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12412; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14760; L12412: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (SSE_REG_P (operands[0]) && reload_completed && ((operands_match_p (operands[1], operands[3]) && operands_match_p (operands[2], operands[4])) || (operands_match_p (operands[1], operands[4]) && operands_match_p (operands[2], operands[3]))))) { return gen_split_1493 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14760; L12451: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, DFmode)) { operands[1] = x4; goto L12452; } x2 = XEXP (x1, 0); goto L14760; L12452: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonimmediate_operand (x4, DFmode)) { operands[2] = x4; goto L12453; } x2 = XEXP (x1, 0); goto L14760; L12453: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, DFmode)) { operands[3] = x3; goto L12454; } x2 = XEXP (x1, 0); goto L14760; L12454: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (nonimmediate_operand (x3, DFmode)) { operands[4] = x3; goto L12455; } x2 = XEXP (x1, 0); goto L14760; L12455: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12456; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14760; L12456: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (SSE_REG_P (operands[0]) && reload_completed && ((operands_match_p (operands[1], operands[3]) && operands_match_p (operands[2], operands[4])) || (operands_match_p (operands[1], operands[4]) && operands_match_p (operands[2], operands[3]))))) { return gen_split_1499 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14760; L14808: ATTRIBUTE_UNUSED_LABEL if (fp_register_operand (x2, DFmode)) { operands[0] = x2; goto L11683; } L14809: ATTRIBUTE_UNUSED_LABEL if (register_and_not_fp_reg_operand (x2, DFmode)) { operands[0] = x2; goto L11690; } goto L11813; L11683: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DFmode) goto L14816; x2 = XEXP (x1, 0); goto L14809; L14816: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case NEG: goto L11684; case ABS: goto L11787; case IF_THEN_ELSE: goto L12417; default: break; } x2 = XEXP (x1, 0); goto L14809; L11684: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DFmode)) { operands[1] = x3; goto L11685; } x2 = XEXP (x1, 0); goto L14809; L11685: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11686; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14809; L11686: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_80387 && reload_completed)) { return gen_split_1250 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14809; L11787: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DFmode)) { operands[1] = x3; goto L11788; } x2 = XEXP (x1, 0); goto L14809; L11788: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11789; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14809; L11789: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_80387 && reload_completed)) { return gen_split_1266 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14809; L12417: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); switch (GET_CODE (x3)) { case LT: goto L12418; case GT: goto L12462; default: break; } x2 = XEXP (x1, 0); goto L14809; L12418: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, DFmode)) { operands[1] = x4; goto L12419; } x2 = XEXP (x1, 0); goto L14809; L12419: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (register_operand (x4, DFmode)) { operands[2] = x4; goto L12420; } x2 = XEXP (x1, 0); goto L14809; L12420: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, DFmode)) { operands[3] = x3; goto L12421; } x2 = XEXP (x1, 0); goto L14809; L12421: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (register_operand (x3, DFmode)) { operands[4] = x3; goto L12422; } x2 = XEXP (x1, 0); goto L14809; L12422: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12423; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14809; L12423: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (reload_completed && ((operands_match_p (operands[1], operands[3]) && operands_match_p (operands[2], operands[4])) || (operands_match_p (operands[1], operands[4]) && operands_match_p (operands[2], operands[3]))))) { return gen_split_1494 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14809; L12462: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, DFmode)) { operands[1] = x4; goto L12463; } x2 = XEXP (x1, 0); goto L14809; L12463: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (register_operand (x4, DFmode)) { operands[2] = x4; goto L12464; } x2 = XEXP (x1, 0); goto L14809; L12464: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, DFmode)) { operands[3] = x3; goto L12465; } x2 = XEXP (x1, 0); goto L14809; L12465: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (register_operand (x3, DFmode)) { operands[4] = x3; goto L12466; } x2 = XEXP (x1, 0); goto L14809; L12466: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12467; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14809; L12467: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (reload_completed && ((operands_match_p (operands[1], operands[3]) && operands_match_p (operands[2], operands[4])) || (operands_match_p (operands[1], operands[4]) && operands_match_p (operands[2], operands[3]))))) { return gen_split_1500 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14809; L11690: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DFmode) goto L14819; x2 = XEXP (x1, 0); goto L11813; L14819: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case NEG: goto L11691; case ABS: goto L11794; default: break; } x2 = XEXP (x1, 0); goto L11813; L11691: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DFmode)) { operands[1] = x3; goto L11692; } x2 = XEXP (x1, 0); goto L11813; L11692: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11693; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11813; L11693: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT && TARGET_80387 && reload_completed)) { return gen_split_1251 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11813; L11794: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DFmode)) { operands[1] = x3; goto L11795; } x2 = XEXP (x1, 0); goto L11813; L11795: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11796; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11813; L11796: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT && TARGET_80387 && reload_completed)) { return gen_split_1267 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11813; L14762: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG) goto L14821; L14764: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, XFmode)) { operands[0] = x2; goto L12228; } goto L11813; L14821: ATTRIBUTE_UNUSED_LABEL if (fp_register_operand (x2, XFmode)) { operands[0] = x2; goto L11697; } L14822: ATTRIBUTE_UNUSED_LABEL if (register_and_not_fp_reg_operand (x2, XFmode)) { operands[0] = x2; goto L11704; } goto L14764; L11697: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == XFmode) goto L14823; x2 = XEXP (x1, 0); goto L14822; L14823: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case NEG: goto L11698; case ABS: goto L11801; default: break; } x2 = XEXP (x1, 0); goto L14822; L11698: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, XFmode)) { operands[1] = x3; goto L11699; } x2 = XEXP (x1, 0); goto L14822; L11699: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11700; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14822; L11700: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_80387 && reload_completed)) { return gen_split_1253 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14822; L11801: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, XFmode)) { operands[1] = x3; goto L11802; } x2 = XEXP (x1, 0); goto L14822; L11802: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11803; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14822; L11803: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_80387 && reload_completed)) { return gen_split_1269 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14822; L11704: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == XFmode) goto L14825; x2 = XEXP (x1, 0); goto L14764; L14825: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case NEG: goto L11705; case ABS: goto L11808; default: break; } x2 = XEXP (x1, 0); goto L14764; L11705: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, XFmode)) { operands[1] = x3; goto L11706; } x2 = XEXP (x1, 0); goto L14764; L11706: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11707; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14764; L11707: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_80387 && reload_completed)) { return gen_split_1254 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14764; L11808: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, XFmode)) { operands[1] = x3; goto L11809; } x2 = XEXP (x1, 0); goto L14764; L11809: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11810; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14764; L11810: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_80387 && reload_completed)) { return gen_split_1270 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14764; L12228: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == XFmode && GET_CODE (x2) == UNSPEC && XVECLEN (x2, 0) == 1 && XINT (x2, 1) == 80) goto L12229; x2 = XEXP (x1, 0); goto L11813; L12229: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (register_operand (x3, XFmode)) { operands[2] = x3; goto L12230; } x2 = XEXP (x1, 0); goto L11813; L12230: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L12231; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11813; L12231: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, XFmode)) { operands[1] = x2; goto L12232; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11813; L12232: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == XFmode && GET_CODE (x2) == UNSPEC && XVECLEN (x2, 0) == 1 && XINT (x2, 1) == 81) goto L12233; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11813; L12233: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (rtx_equal_p (x3, operands[2]) && (find_regno_note (insn, REG_UNUSED, REGNO (operands[0])) && !reload_completed && !reload_in_progress)) { return gen_split_1409 (insn, operands); } L12242: ATTRIBUTE_UNUSED_LABEL if (rtx_equal_p (x3, operands[2]) && (find_regno_note (insn, REG_UNUSED, REGNO (operands[1])) && !reload_completed && !reload_in_progress)) { return gen_split_1410 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11813; L11814: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == COMPARE) goto L11815; x2 = XEXP (x1, 0); goto L11420; L11815: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); switch (GET_MODE (x3)) { case DImode: goto L14827; case SImode: goto L14828; case HImode: goto L14829; case QImode: goto L14830; default: break; } L12537: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x3) == AND) goto L12538; x2 = XEXP (x1, 0); goto L11420; L14827: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x3) == NOT) goto L11816; goto L12537; L11816: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, DImode)) { operands[1] = x4; goto L11817; } goto L12537; L11817: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L11818; x3 = XEXP (x2, 0); goto L12537; L11818: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L11819; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L12537; L11819: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, DImode)) { operands[0] = x2; goto L11820; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L12537; L11820: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == NOT) goto L11821; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L12537; L11821: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1]) && (TARGET_64BIT && ix86_match_ccmode (insn, CCNOmode))) { return gen_split_1272 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L12537; L14828: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x3) == NOT) goto L11827; goto L12537; L11827: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == SImode) goto L14831; goto L12537; L14831: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x4, SImode)) { operands[1] = x4; goto L11828; } L14832: ATTRIBUTE_UNUSED_LABEL if (register_operand (x4, SImode)) { operands[1] = x4; goto L11839; } goto L12537; L11828: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L11829; x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14832; L11829: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L11830; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14832; L11830: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SImode)) { operands[0] = x2; goto L11831; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14832; L11831: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == NOT) goto L11832; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14832; L11832: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1]) && (ix86_match_ccmode (insn, CCNOmode))) { return gen_split_1274 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); x4 = XEXP (x3, 0); goto L14832; L11839: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L11840; x3 = XEXP (x2, 0); goto L12537; L11840: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L11841; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L12537; L11841: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[0] = x2; goto L11842; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L12537; L11842: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == ZERO_EXTEND) goto L11843; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L12537; L11843: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == NOT) goto L11844; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L12537; L11844: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (rtx_equal_p (x4, operands[1]) && (ix86_match_ccmode (insn, CCNOmode))) { return gen_split_1275 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L12537; L14829: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x3) == NOT) goto L11850; goto L12537; L11850: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, HImode)) { operands[1] = x4; goto L11851; } goto L12537; L11851: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L11852; x3 = XEXP (x2, 0); goto L12537; L11852: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L11853; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L12537; L11853: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, HImode)) { operands[0] = x2; goto L11854; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L12537; L11854: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == NOT) goto L11855; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L12537; L11855: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1]) && (ix86_match_ccmode (insn, CCNOmode))) { return gen_split_1277 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L12537; L14830: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x3) == NOT) goto L11861; goto L12537; L11861: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, QImode)) { operands[1] = x4; goto L11862; } goto L12537; L11862: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L11863; x3 = XEXP (x2, 0); goto L12537; L11863: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L11864; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L12537; L11864: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, QImode)) { operands[0] = x2; goto L11865; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L12537; L11865: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == QImode && GET_CODE (x2) == NOT) goto L11866; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L12537; L11866: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1]) && (ix86_match_ccmode (insn, CCNOmode))) { return gen_split_1279 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L12537; L12538: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (aligned_operand (x4, VOIDmode)) { operands[1] = x4; goto L12539; } x2 = XEXP (x1, 0); goto L11420; L12539: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (const_int_operand (x4, VOIDmode)) { operands[2] = x4; goto L12540; } x2 = XEXP (x1, 0); goto L11420; L12540: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L12541; x2 = XEXP (x1, 0); goto L11420; L12541: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L12542; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11420; L12542: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, VOIDmode)) { operands[0] = x2; goto L12543; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11420; L12543: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == AND) goto L12544; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11420; L12544: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L12545; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11420; L12545: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2]) && (! TARGET_PARTIAL_REG_STALL && reload_completed /* Ensure that the operand will remain sign-extended immediate. */ && ix86_match_ccmode (insn, INTVAL (operands[2]) >= 0 ? CCNOmode : CCZmode) && ! optimize_size && ((GET_MODE (operands[0]) == HImode && ! TARGET_FAST_PREFIX) || (GET_MODE (operands[0]) == QImode && TARGET_PROMOTE_QImode)))) { return gen_split_1512 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11420; L11421: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); switch (GET_CODE (x2)) { case PLUS: goto L11422; case AND: goto L11521; default: break; } x2 = XEXP (x1, 0); goto L11527; L11422: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, VOIDmode)) { operands[1] = x3; goto L11423; } x2 = XEXP (x1, 0); goto L11527; L11423: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonmemory_operand (x3, VOIDmode)) { operands[2] = x3; goto L11424; } x2 = XEXP (x1, 0); goto L11527; L11424: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11425; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11527; L11425: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (reload_completed && true_regnum (operands[0]) != true_regnum (operands[1]))) { return gen_split_1163 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11527; L11521: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[0])) goto L11522; x2 = XEXP (x1, 0); goto L11527; L11522: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == CONST_INT && XWINT (x3, 0) == -65536L) goto L11523; x2 = XEXP (x1, 0); goto L11527; L11523: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11524; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11527; L11524: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (optimize_size || (TARGET_FAST_PREFIX && !TARGET_PARTIAL_REG_STALL))) { return gen_split_1213 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11527; L11528: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == AND) goto L11529; x2 = XEXP (x1, 0); goto L11543; L11529: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[0])) goto L11530; x2 = XEXP (x1, 0); goto L11543; L11530: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == CONST_INT) goto L14833; x2 = XEXP (x1, 0); goto L11543; L14833: ATTRIBUTE_UNUSED_LABEL if ((int) XWINT (x3, 0) == XWINT (x3, 0)) switch ((int) XWINT (x3, 0)) { case -256L: goto L11531; case -65281L: goto L11539; default: break; } x2 = XEXP (x1, 0); goto L11543; L11531: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11532; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11543; L11532: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && ((optimize_size || !TARGET_PARTIAL_REG_STALL) && reload_completed)) { return gen_split_1214 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11543; L11539: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11540; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11543; L11540: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && ((optimize_size || !TARGET_PARTIAL_REG_STALL) && reload_completed)) { return gen_split_1215 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11543; L11544: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); switch (GET_CODE (x2)) { case AND: goto L11545; case IOR: goto L11561; case XOR: goto L11577; case ASHIFT: goto L11897; default: break; } L12528: ATTRIBUTE_UNUSED_LABEL if (promotable_binary_operator (x2, VOIDmode)) { operands[3] = x2; goto L12529; } x2 = XEXP (x1, 0); goto L11639; L11545: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, VOIDmode)) { operands[1] = x3; goto L11546; } L11553: ATTRIBUTE_UNUSED_LABEL if (general_operand (x3, VOIDmode)) { operands[1] = x3; goto L11554; } goto L12528; L11546: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const_int_operand (x3, VOIDmode)) { operands[2] = x3; goto L11547; } x3 = XEXP (x2, 0); goto L11553; L11547: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11548; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L11553; L11548: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (reload_completed && QI_REG_P (operands[0]) && (!TARGET_PARTIAL_REG_STALL || optimize_size) && !(~INTVAL (operands[2]) & ~(255 << 8)) && GET_MODE (operands[0]) != QImode)) { return gen_split_1218 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L11553; L11554: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const_int_operand (x3, VOIDmode)) { operands[2] = x3; goto L11555; } goto L12528; L11555: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11556; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L12528; L11556: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (reload_completed && ANY_QI_REG_P (operands[0]) && (!TARGET_PARTIAL_REG_STALL || optimize_size) && !(~INTVAL (operands[2]) & ~255) && !(INTVAL (operands[2]) & 128) && GET_MODE (operands[0]) != QImode)) { return gen_split_1219 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L12528; L11561: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, VOIDmode)) { operands[1] = x3; goto L11562; } L11569: ATTRIBUTE_UNUSED_LABEL if (general_operand (x3, VOIDmode)) { operands[1] = x3; goto L11570; } goto L12528; L11562: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const_int_operand (x3, VOIDmode)) { operands[2] = x3; goto L11563; } x3 = XEXP (x2, 0); goto L11569; L11563: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11564; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L11569; L11564: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (reload_completed && QI_REG_P (operands[0]) && (!TARGET_PARTIAL_REG_STALL || optimize_size) && !(INTVAL (operands[2]) & ~(255 << 8)) && GET_MODE (operands[0]) != QImode)) { return gen_split_1224 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L11569; L11570: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const_int_operand (x3, VOIDmode)) { operands[2] = x3; goto L11571; } goto L12528; L11571: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11572; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L12528; L11572: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (reload_completed && ANY_QI_REG_P (operands[0]) && (!TARGET_PARTIAL_REG_STALL || optimize_size) && !(INTVAL (operands[2]) & ~255) && (INTVAL (operands[2]) & 128) && GET_MODE (operands[0]) != QImode)) { return gen_split_1225 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L12528; L11577: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, VOIDmode)) { operands[1] = x3; goto L11578; } L11585: ATTRIBUTE_UNUSED_LABEL if (general_operand (x3, VOIDmode)) { operands[1] = x3; goto L11586; } goto L12528; L11578: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const_int_operand (x3, VOIDmode)) { operands[2] = x3; goto L11579; } x3 = XEXP (x2, 0); goto L11585; L11579: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11580; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L11585; L11580: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (reload_completed && QI_REG_P (operands[0]) && (!TARGET_PARTIAL_REG_STALL || optimize_size) && !(INTVAL (operands[2]) & ~(255 << 8)) && GET_MODE (operands[0]) != QImode)) { return gen_split_1231 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L11585; L11586: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const_int_operand (x3, VOIDmode)) { operands[2] = x3; goto L11587; } goto L12528; L11587: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11588; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L12528; L11588: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (reload_completed && ANY_QI_REG_P (operands[0]) && (!TARGET_PARTIAL_REG_STALL || optimize_size) && !(INTVAL (operands[2]) & ~255) && (INTVAL (operands[2]) & 128) && GET_MODE (operands[0]) != QImode)) { return gen_split_1232 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L12528; L11897: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (index_register_operand (x3, VOIDmode)) { operands[1] = x3; goto L11898; } L11905: ATTRIBUTE_UNUSED_LABEL if (register_operand (x3, VOIDmode)) { operands[1] = x3; goto L11906; } goto L12528; L11898: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const_int_operand (x3, QImode)) { operands[2] = x3; goto L11899; } x3 = XEXP (x2, 0); goto L11905; L11899: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11900; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L11905; L11900: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (reload_completed && true_regnum (operands[0]) != true_regnum (operands[1]))) { return gen_split_1287 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L11905; L11906: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const_int_operand (x3, QImode)) { operands[2] = x3; goto L11907; } goto L12528; L11907: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11908; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L12528; L11908: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (reload_completed && true_regnum (operands[0]) != true_regnum (operands[1]))) { return gen_split_1288 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); goto L12528; L12529: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, VOIDmode)) { operands[1] = x3; goto L12530; } x2 = XEXP (x1, 0); goto L11639; L12530: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (aligned_operand (x3, VOIDmode)) { operands[2] = x3; goto L12531; } x2 = XEXP (x1, 0); goto L11639; L12531: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12532; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11639; L12532: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (! TARGET_PARTIAL_REG_STALL && reload_completed && ((GET_MODE (operands[0]) == HImode && ((!optimize_size && !TARGET_FAST_PREFIX) || GET_CODE (operands[2]) != CONST_INT || CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'K'))) || (GET_MODE (operands[0]) == QImode && (TARGET_PROMOTE_QImode || optimize_size))))) { return gen_split_1511 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L11639; L11640: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); switch (GET_CODE (x2)) { case NEG: goto L11641; case ABS: goto L11753; default: break; } x2 = XEXP (x1, 0); goto L12555; L11641: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (memory_operand (x3, VOIDmode)) { operands[1] = x3; goto L11642; } x2 = XEXP (x1, 0); goto L12555; L11642: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11643; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12555; L11643: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_80387 && reload_completed && FLOAT_MODE_P (GET_MODE (operands[0])))) { return gen_split_1244 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12555; L11753: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (memory_operand (x3, VOIDmode)) { operands[1] = x3; goto L11754; } x2 = XEXP (x1, 0); goto L12555; L11754: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11755; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12555; L11755: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_80387 && reload_completed && FLOAT_MODE_P (GET_MODE (operands[0])))) { return gen_split_1261 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12555; L12556: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == NEG) goto L12557; goto ret0; L12557: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, VOIDmode)) { operands[1] = x3; goto L12558; } goto ret0; L12558: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12559; goto ret0; L12559: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (! TARGET_PARTIAL_REG_STALL && reload_completed && (GET_MODE (operands[0]) == HImode || (GET_MODE (operands[0]) == QImode && (TARGET_PROMOTE_QImode || optimize_size))))) { return gen_split_1514 (insn, operands); } goto ret0; ret0: return 0; } static rtx split_3 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; rtx tem ATTRIBUTE_UNUSED; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case DImode: goto L14835; case SImode: goto L14837; case SFmode: goto L14838; case DFmode: goto L14840; default: break; } L12008: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == PC) goto L12009; if (register_operand (x2, VOIDmode)) { operands[0] = x2; goto L12471; } L12483: ATTRIBUTE_UNUSED_LABEL switch (GET_MODE (x2)) { case SFmode: goto L14843; case DFmode: goto L14844; default: break; } goto ret0; L14835: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x2, DImode)) { operands[0] = x2; goto L11095; } L14836: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, DImode)) { operands[0] = x2; goto L11115; } goto L12008; L11095: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == SIGN_EXTEND) goto L11096; x2 = XEXP (x1, 0); goto L14836; L11096: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SImode)) { operands[1] = x3; goto L11097; } x2 = XEXP (x1, 0); goto L14836; L11097: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11098; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14836; L11098: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) goto L11099; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14836; L11099: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L11100; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14836; L11100: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[2] = x2; goto L11101; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14836; L11101: ATTRIBUTE_UNUSED_LABEL if (((reload_completed && dead_or_set_p (insn, operands[1]) && !reg_mentioned_p (operands[1], operands[0])))) { return gen_split_1090 (insn, operands); } L11111: ATTRIBUTE_UNUSED_LABEL if ((reload_completed)) { return gen_split_1091 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14836; L11115: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode) goto L14845; x2 = XEXP (x1, 0); goto L12008; L14845: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case SIGN_EXTEND: goto L11116; case DIV: goto L11447; case UDIV: goto L11473; case ASHIFT: goto L11879; case ASHIFTRT: goto L11922; case LSHIFTRT: goto L11940; case FFS: goto L12132; default: break; } x2 = XEXP (x1, 0); goto L12008; L11116: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SImode)) { operands[1] = x3; goto L11117; } x2 = XEXP (x1, 0); goto L12008; L11117: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11118; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11118: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) goto L11119; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11119: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L11120; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11120: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[2] = x2; goto L11121; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11121: ATTRIBUTE_UNUSED_LABEL if ((reload_completed)) { return gen_split_1092 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11447: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[1] = x3; goto L11448; } x2 = XEXP (x1, 0); goto L12008; L11448: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, DImode)) { operands[2] = x3; goto L11449; } x2 = XEXP (x1, 0); goto L12008; L11449: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L11450; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11450: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[3] = x2; goto L11451; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11451: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == MOD) goto L11452; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11452: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L11453; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11453: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2])) goto L11454; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11454: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L11455; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11455: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && reload_completed)) { return gen_split_1199 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11473: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[1] = x3; goto L11474; } x2 = XEXP (x1, 0); goto L12008; L11474: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, DImode)) { operands[2] = x3; goto L11475; } x2 = XEXP (x1, 0); goto L12008; L11475: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L11476; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11476: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DImode)) { operands[3] = x2; goto L11477; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11477: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == UMOD) goto L11478; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11478: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L11479; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11479: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2])) goto L11480; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11480: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L11481; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11481: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && reload_completed)) { return gen_split_1202 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11879: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[1] = x3; goto L11880; } x2 = XEXP (x1, 0); goto L12008; L11880: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L11881; } x2 = XEXP (x1, 0); goto L12008; L11881: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11882; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11882: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[3] = x2; goto L11883; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11883: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L11884; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11884: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT && TARGET_CMOVE && reload_completed)) { return gen_split_1282 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11922: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[1] = x3; goto L11923; } x2 = XEXP (x1, 0); goto L12008; L11923: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L11924; } x2 = XEXP (x1, 0); goto L12008; L11924: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11925; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11925: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[3] = x2; goto L11926; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11926: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L11927; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11927: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT && TARGET_CMOVE && reload_completed)) { return gen_split_1293 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11940: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[1] = x3; goto L11941; } x2 = XEXP (x1, 0); goto L12008; L11941: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonmemory_operand (x3, QImode)) { operands[2] = x3; goto L11942; } x2 = XEXP (x1, 0); goto L12008; L11942: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L11943; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11943: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[3] = x2; goto L11944; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11944: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L11945; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11945: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT && TARGET_CMOVE && reload_completed)) { return gen_split_1300 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L12132: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, DImode)) { operands[1] = x3; goto L12133; } x2 = XEXP (x1, 0); goto L12008; L12133: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12134; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L12134: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, DImode)) { operands[2] = x2; goto L12135; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L12135: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L12136; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L12136: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_64BIT && TARGET_CMOVE&& reload_completed)) { return gen_split_1385 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L14837: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, SImode)) { operands[0] = x2; goto L11459; } L14842: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, SImode)) { operands[0] = x2; goto L12122; } goto L12008; L11459: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode) goto L14852; x2 = XEXP (x1, 0); goto L14842; L14852: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case DIV: goto L11460; case UDIV: goto L11486; case FFS: goto L12114; default: break; } x2 = XEXP (x1, 0); goto L14842; L11460: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SImode)) { operands[1] = x3; goto L11461; } x2 = XEXP (x1, 0); goto L14842; L11461: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, SImode)) { operands[2] = x3; goto L11462; } x2 = XEXP (x1, 0); goto L14842; L11462: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L11463; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14842; L11463: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[3] = x2; goto L11464; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14842; L11464: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == MOD) goto L11465; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14842; L11465: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L11466; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14842; L11466: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2])) goto L11467; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14842; L11467: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L11468; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14842; L11468: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (reload_completed)) { return gen_split_1201 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14842; L11486: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SImode)) { operands[1] = x3; goto L11487; } x2 = XEXP (x1, 0); goto L14842; L11487: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, SImode)) { operands[2] = x3; goto L11488; } x2 = XEXP (x1, 0); goto L14842; L11488: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L11489; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14842; L11489: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SImode)) { operands[3] = x2; goto L11490; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14842; L11490: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == UMOD) goto L11491; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14842; L11491: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L11492; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14842; L11492: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[2])) goto L11493; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14842; L11493: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L11494; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14842; L11494: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (reload_completed)) { return gen_split_1203 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14842; L12114: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L12115; } x2 = XEXP (x1, 0); goto L14842; L12115: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12116; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14842; L12116: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[2] = x2; goto L12117; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14842; L12117: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L12118; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14842; L12118: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_CMOVE&& reload_completed)) { return gen_split_1382 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14842; L12122: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == FFS) goto L12123; x2 = XEXP (x1, 0); goto L12008; L12123: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L12124; } x2 = XEXP (x1, 0); goto L12008; L12124: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12125; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L12125: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[2] = x2; goto L12126; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L12126: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L12127; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L12127: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (reload_completed)) { return gen_split_1383 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L14838: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x2, SFmode)) { operands[0] = x2; goto L11599; } L14839: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, SFmode)) { operands[0] = x2; goto L11608; } goto L12008; L11599: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SFmode) goto L14855; x2 = XEXP (x1, 0); goto L14839; L14855: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case NEG: goto L11600; case ABS: goto L11712; default: break; } x2 = XEXP (x1, 0); goto L14839; L11600: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (memory_operand (x3, SFmode)) { operands[1] = x3; goto L11601; } x2 = XEXP (x1, 0); goto L14839; L11601: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L11602; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14839; L11602: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); operands[2] = x2; goto L11603; L11603: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L11604; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14839; L11604: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) { return gen_split_1239 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14839; L11712: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (memory_operand (x3, SFmode)) { operands[1] = x3; goto L11713; } x2 = XEXP (x1, 0); goto L14839; L11713: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L11714; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14839; L11714: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); operands[2] = x2; goto L11715; L11715: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L11716; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14839; L11716: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) { return gen_split_1256 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14839; L11608: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SFmode) goto L14857; x2 = XEXP (x1, 0); goto L12008; L14857: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case NEG: goto L11609; case ABS: goto L11721; default: break; } x2 = XEXP (x1, 0); goto L12008; L11609: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SFmode)) { operands[1] = x3; goto L11610; } x2 = XEXP (x1, 0); goto L12008; L11610: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L11611; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11611: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); operands[2] = x2; goto L11612; L11620: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, V4SFmode)) { operands[2] = x2; goto L11621; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11612: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L11613; x1 = XVECEXP (x0, 0, 1); x2 = XEXP (x1, 0); goto L11620; L11613: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (reload_completed && !SSE_REG_P (operands[0]))) { return gen_split_1240 (insn, operands); } x1 = XVECEXP (x0, 0, 1); x2 = XEXP (x1, 0); goto L11620; L11621: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L11622; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11622: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (reload_completed && SSE_REG_P (operands[0]))) { return gen_split_1241 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11721: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, SFmode)) { operands[1] = x3; goto L11722; } x2 = XEXP (x1, 0); goto L12008; L11722: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L11723; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11723: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); operands[2] = x2; goto L11724; L11732: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, V4SFmode)) { operands[2] = x2; goto L11733; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11724: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L11725; x1 = XVECEXP (x0, 0, 1); x2 = XEXP (x1, 0); goto L11732; L11725: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (reload_completed && !SSE_REG_P (operands[0]))) { return gen_split_1257 (insn, operands); } x1 = XVECEXP (x0, 0, 1); x2 = XEXP (x1, 0); goto L11732; L11733: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L11734; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11734: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (reload_completed && SSE_REG_P (operands[0]))) { return gen_split_1258 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L14840: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x2, DFmode)) { operands[0] = x2; goto L11647; } L14841: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, DFmode)) { operands[0] = x2; goto L11656; } goto L12008; L11647: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DFmode) goto L14859; x2 = XEXP (x1, 0); goto L14841; L14859: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case NEG: goto L11648; case ABS: goto L11760; default: break; } x2 = XEXP (x1, 0); goto L14841; L11648: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (memory_operand (x3, DFmode)) { operands[1] = x3; goto L11649; } x2 = XEXP (x1, 0); goto L14841; L11649: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L11650; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14841; L11650: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); operands[2] = x2; goto L11651; L11651: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L11652; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14841; L11652: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) { return gen_split_1246 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14841; L11760: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (memory_operand (x3, DFmode)) { operands[1] = x3; goto L11761; } x2 = XEXP (x1, 0); goto L14841; L11761: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L11762; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14841; L11762: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); operands[2] = x2; goto L11763; L11763: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L11764; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14841; L11764: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) { return gen_split_1263 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14841; L11656: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DFmode) goto L14861; x2 = XEXP (x1, 0); goto L12008; L14861: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case NEG: goto L11657; case ABS: goto L11769; default: break; } x2 = XEXP (x1, 0); goto L12008; L11657: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DFmode)) { operands[1] = x3; goto L11658; } x2 = XEXP (x1, 0); goto L12008; L11658: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L11659; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11659: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); operands[2] = x2; goto L11660; L11677: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, V2DFmode)) { operands[2] = x2; goto L11678; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11660: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L11661; x1 = XVECEXP (x0, 0, 1); x2 = XEXP (x1, 0); goto L11677; L11661: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode) goto L14863; x1 = XVECEXP (x0, 0, 1); x2 = XEXP (x1, 0); goto L11677; L14863: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG) goto L14865; x1 = XVECEXP (x0, 0, 1); x2 = XEXP (x1, 0); goto L11677; L14865: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 0) == 17) goto L14867; x1 = XVECEXP (x0, 0, 1); x2 = XEXP (x1, 0); goto L11677; L14867: ATTRIBUTE_UNUSED_LABEL if ((reload_completed && !SSE_REG_P (operands[0]) && (!TARGET_64BIT || FP_REG_P (operands[0])))) { return gen_split_1247 (insn, operands); } L14868: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && reload_completed && GENERAL_REG_P (operands[0]))) { return gen_split_1248 (insn, operands); } x1 = XVECEXP (x0, 0, 1); x2 = XEXP (x1, 0); goto L11677; L11678: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L11679; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11679: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (reload_completed && SSE_REG_P (operands[0]))) { return gen_split_1249 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11769: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DFmode)) { operands[1] = x3; goto L11770; } x2 = XEXP (x1, 0); goto L12008; L11770: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L11771; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11771: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); operands[2] = x2; goto L11772; L11780: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, V2DFmode)) { operands[2] = x2; goto L11781; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11772: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L11773; x1 = XVECEXP (x0, 0, 1); x2 = XEXP (x1, 0); goto L11780; L11773: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (reload_completed && !SSE_REG_P (operands[0]))) { return gen_split_1264 (insn, operands); } x1 = XVECEXP (x0, 0, 1); x2 = XEXP (x1, 0); goto L11780; L11781: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L11782; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L11782: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (reload_completed && SSE_REG_P (operands[0]))) { return gen_split_1265 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12008; L12009: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == IF_THEN_ELSE) goto L12010; x2 = XEXP (x1, 0); goto L12483; L12010: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (comparison_operator (x3, VOIDmode)) { operands[0] = x3; goto L12011; } x2 = XEXP (x1, 0); goto L12483; L12011: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, VOIDmode)) { operands[1] = x4; goto L12012; } x2 = XEXP (x1, 0); goto L12483; L12012: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonimmediate_operand (x4, VOIDmode)) { operands[2] = x4; goto L12013; } x2 = XEXP (x1, 0); goto L12483; L12013: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); operands[3] = x3; goto L12014; L12014: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); operands[4] = x3; goto L12015; L12015: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12016; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12483; L12016: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCFPmode && GET_CODE (x2) == REG && XINT (x2, 0) == 18) goto L12017; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12483; L12017: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L12018; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12483; L12018: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCFPmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (reload_completed)) { return gen_split_1358 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12483; L12471: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == IF_THEN_ELSE) goto L12472; x2 = XEXP (x1, 0); goto L12483; L12472: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (comparison_operator (x3, VOIDmode)) { operands[1] = x3; goto L12473; } x2 = XEXP (x1, 0); goto L12483; L12473: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (nonimmediate_operand (x4, VOIDmode)) { operands[4] = x4; goto L12474; } x2 = XEXP (x1, 0); goto L12483; L12474: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (register_operand (x4, VOIDmode)) { operands[5] = x4; goto L12475; } x2 = XEXP (x1, 0); goto L12483; L12475: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonimmediate_operand (x3, VOIDmode)) { operands[2] = x3; goto L12476; } x2 = XEXP (x1, 0); goto L12483; L12476: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (nonimmediate_operand (x3, VOIDmode)) { operands[3] = x3; goto L12477; } x2 = XEXP (x1, 0); goto L12483; L12477: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12478; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12483; L12478: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); operands[6] = x2; goto L12479; L12479: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L12480; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12483; L12480: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!SSE_REG_P (operands[0]) && reload_completed && VALID_SSE_REG_MODE (GET_MODE (operands[0])))) { return gen_split_1501 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12483; L14843: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, SFmode)) { operands[0] = x2; goto L12484; } goto ret0; L12484: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == IF_THEN_ELSE) goto L12485; goto ret0; L12485: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (sse_comparison_operator (x3, SFmode)) { operands[1] = x3; goto L12486; } goto ret0; L12486: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, SFmode)) { operands[4] = x4; goto L12487; } goto ret0; L12487: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonimmediate_operand (x4, SFmode)) { operands[5] = x4; goto L12488; } goto ret0; L12488: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, SFmode)) { operands[2] = x3; goto L12489; } goto ret0; L12489: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (register_operand (x3, SFmode)) { operands[3] = x3; goto L12490; } goto ret0; L12490: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12491; goto ret0; L12491: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); operands[6] = x2; goto L12492; L12492: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L12493; goto ret0; L12493: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (SSE_REG_P (operands[0]) && reload_completed)) { return gen_split_1502 (insn, operands); } goto ret0; L14844: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, DFmode)) { operands[0] = x2; goto L12497; } goto ret0; L12497: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == IF_THEN_ELSE) goto L12498; goto ret0; L12498: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (sse_comparison_operator (x3, DFmode)) { operands[1] = x3; goto L12499; } goto ret0; L12499: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, DFmode)) { operands[4] = x4; goto L12500; } goto ret0; L12500: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonimmediate_operand (x4, DFmode)) { operands[5] = x4; goto L12501; } goto ret0; L12501: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (register_operand (x3, DFmode)) { operands[2] = x3; goto L12502; } goto ret0; L12502: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (register_operand (x3, DFmode)) { operands[3] = x3; goto L12503; } goto ret0; L12503: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12504; goto ret0; L12504: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); operands[6] = x2; goto L12505; L12505: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L12506; goto ret0; L12506: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (SSE_REG_P (operands[0]) && reload_completed)) { return gen_split_1503 (insn, operands); } goto ret0; ret0: return 0; } static rtx split_4 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; rtx tem ATTRIBUTE_UNUSED; switch (XVECLEN (x0, 0)) { case 2: goto L11026; case 3: goto L11093; case 5: goto L11219; case 4: goto L11278; default: break; } goto ret0; L11026: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 0); if (GET_CODE (x1) == SET) goto L11027; goto ret0; L11027: ATTRIBUTE_UNUSED_LABEL return split_2 (x0, insn); L11093: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 0); if (GET_CODE (x1) == SET) goto L11094; goto ret0; L11094: ATTRIBUTE_UNUSED_LABEL return split_3 (x0, insn); L11219: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 0); if (GET_CODE (x1) == SET) goto L11220; goto ret0; L11220: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode) goto L14869; goto ret0; L14869: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, DImode)) { operands[0] = x2; goto L11221; } L14870: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x2, DImode)) { operands[0] = x2; goto L11235; } goto ret0; L11221: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == FIX) goto L11222; x2 = XEXP (x1, 0); goto L14870; L11222: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, VOIDmode)) { operands[1] = x3; goto L11223; } x2 = XEXP (x1, 0); goto L14870; L11223: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L11224; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14870; L11224: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, HImode)) { operands[2] = x2; goto L11225; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14870; L11225: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L11226; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14870; L11226: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, HImode)) { operands[3] = x2; goto L11227; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14870; L11227: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L11228; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14870; L11228: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, DImode)) { operands[4] = x2; goto L11229; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14870; L11229: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 4); if (GET_CODE (x1) == CLOBBER) goto L11230; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14870; L11230: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, VOIDmode)) { operands[5] = x2; goto L11231; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14870; L11231: ATTRIBUTE_UNUSED_LABEL if ((reload_completed)) { return gen_split_1117 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14870; L11235: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == FIX) goto L11236; goto ret0; L11236: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, VOIDmode)) { operands[1] = x3; goto L11237; } goto ret0; L11237: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L11238; goto ret0; L11238: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, HImode)) { operands[2] = x2; goto L11239; } goto ret0; L11239: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L11240; goto ret0; L11240: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, HImode)) { operands[3] = x2; goto L11241; } goto ret0; L11241: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L11242; goto ret0; L11242: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, DImode)) { operands[4] = x2; goto L11243; } goto ret0; L11243: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 4); if (GET_CODE (x1) == CLOBBER) goto L11244; goto ret0; L11244: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, VOIDmode)) { operands[5] = x2; goto L11245; } goto ret0; L11245: ATTRIBUTE_UNUSED_LABEL if ((reload_completed)) { return gen_split_1118 (insn, operands); } goto ret0; L11278: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 0); if (GET_CODE (x1) == SET) goto L11279; goto ret0; L11279: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case SImode: goto L14871; case HImode: goto L14873; default: break; } L12021: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == PC) goto L12022; goto ret0; L14871: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, SImode)) { operands[0] = x2; goto L11280; } L14872: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x2, SImode)) { operands[0] = x2; goto L11292; } goto L12021; L11280: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode) goto L14875; x2 = XEXP (x1, 0); goto L14872; L14875: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case FIX: goto L11281; case PLUS: goto L12141; default: break; } x2 = XEXP (x1, 0); goto L14872; L11281: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, VOIDmode)) { operands[1] = x3; goto L11282; } x2 = XEXP (x1, 0); goto L14872; L11282: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L11283; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14872; L11283: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, HImode)) { operands[2] = x2; goto L11284; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14872; L11284: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L11285; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14872; L11285: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, HImode)) { operands[3] = x2; goto L11286; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14872; L11286: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L11287; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14872; L11287: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, SImode)) { operands[4] = x2; goto L11288; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14872; L11288: ATTRIBUTE_UNUSED_LABEL if ((reload_completed)) { return gen_split_1127 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14872; L12141: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == UNSPEC && XVECLEN (x3, 0) == 2 && XINT (x3, 1) == 17) goto L12142; x2 = XEXP (x1, 0); goto L14872; L12142: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 0); if (register_operand (x4, SImode)) { operands[1] = x4; goto L12143; } x2 = XEXP (x1, 0); goto L14872; L12143: ATTRIBUTE_UNUSED_LABEL x4 = XVECEXP (x3, 0, 1); if (call_insn_operand (x4, SImode)) { operands[2] = x4; goto L12144; } x2 = XEXP (x1, 0); goto L14872; L12144: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == SImode && GET_CODE (x3) == CONST) goto L12145; x2 = XEXP (x1, 0); goto L14872; L12145: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == SImode && GET_CODE (x4) == UNSPEC && XVECLEN (x4, 0) == 1 && XINT (x4, 1) == 6) goto L12146; x2 = XEXP (x1, 0); goto L14872; L12146: ATTRIBUTE_UNUSED_LABEL x5 = XVECEXP (x4, 0, 0); if (tls_symbolic_operand (x5, SImode)) { operands[3] = x5; goto L12147; } x2 = XEXP (x1, 0); goto L14872; L12147: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12148; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14872; L12148: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[4] = x2; goto L12149; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14872; L12149: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L12150; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14872; L12150: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[5] = x2; goto L12151; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14872; L12151: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L12152; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14872; L12152: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) { return gen_split_1392 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14872; L11292: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == FIX) goto L11293; x2 = XEXP (x1, 0); goto L12021; L11293: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, VOIDmode)) { operands[1] = x3; goto L11294; } x2 = XEXP (x1, 0); goto L12021; L11294: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L11295; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12021; L11295: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, HImode)) { operands[2] = x2; goto L11296; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12021; L11296: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L11297; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12021; L11297: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, HImode)) { operands[3] = x2; goto L11298; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12021; L11298: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L11299; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12021; L11299: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, SImode)) { operands[4] = x2; goto L11300; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12021; L11300: ATTRIBUTE_UNUSED_LABEL if ((reload_completed)) { return gen_split_1128 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12021; L14873: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x2, HImode)) { operands[0] = x2; goto L11311; } L14874: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, HImode)) { operands[0] = x2; goto L11323; } goto L12021; L11311: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == FIX) goto L11312; x2 = XEXP (x1, 0); goto L14874; L11312: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, VOIDmode)) { operands[1] = x3; goto L11313; } x2 = XEXP (x1, 0); goto L14874; L11313: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L11314; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14874; L11314: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, HImode)) { operands[2] = x2; goto L11315; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14874; L11315: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L11316; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14874; L11316: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, HImode)) { operands[3] = x2; goto L11317; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14874; L11317: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L11318; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14874; L11318: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, HImode)) { operands[4] = x2; goto L11319; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14874; L11319: ATTRIBUTE_UNUSED_LABEL if ((reload_completed)) { return gen_split_1133 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14874; L11323: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == FIX) goto L11324; x2 = XEXP (x1, 0); goto L12021; L11324: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, VOIDmode)) { operands[1] = x3; goto L11325; } x2 = XEXP (x1, 0); goto L12021; L11325: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L11326; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12021; L11326: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, HImode)) { operands[2] = x2; goto L11327; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12021; L11327: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L11328; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12021; L11328: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, HImode)) { operands[3] = x2; goto L11329; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12021; L11329: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L11330; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12021; L11330: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (memory_operand (x2, HImode)) { operands[4] = x2; goto L11331; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12021; L11331: ATTRIBUTE_UNUSED_LABEL if ((reload_completed)) { return gen_split_1134 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L12021; L12022: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == IF_THEN_ELSE) goto L12039; goto ret0; L12039: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == NE) goto L12040; L12023: ATTRIBUTE_UNUSED_LABEL if (comparison_operator (x3, VOIDmode)) { operands[0] = x3; goto L12024; } goto ret0; L12040: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, SImode)) { operands[1] = x4; goto L12041; } goto L12023; L12041: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (1)]) goto L12042; goto L12023; L12042: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); operands[0] = x3; goto L12043; L12043: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (GET_CODE (x3) == PC) goto L12044; x3 = XEXP (x2, 0); goto L12023; L12044: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L12045; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L12023; L12045: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[1])) goto L12046; L12063: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x2, SImode)) { operands[2] = x2; goto L12064; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L12023; L12046: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS) goto L12047; x2 = XEXP (x1, 0); goto L12063; L12047: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L12048; x2 = XEXP (x1, 0); goto L12063; L12048: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (-1)]) goto L12049; x2 = XEXP (x1, 0); goto L12063; L12049: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L12050; x1 = XVECEXP (x0, 0, 1); x2 = XEXP (x1, 0); goto L12063; L12050: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[2] = x2; goto L12051; } x1 = XVECEXP (x0, 0, 1); x2 = XEXP (x1, 0); goto L12063; L12051: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L12052; x1 = XVECEXP (x0, 0, 1); x2 = XEXP (x1, 0); goto L12063; L12052: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT && TARGET_USE_LOOP && reload_completed && REGNO (operands[1]) != 2)) { return gen_split_1363 (insn, operands); } x1 = XVECEXP (x0, 0, 1); x2 = XEXP (x1, 0); goto L12063; L12064: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS) goto L12065; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L12023; L12065: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1])) goto L12066; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L12023; L12066: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (-1)]) goto L12067; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L12023; L12067: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L12068; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L12023; L12068: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, SImode)) { operands[3] = x2; goto L12069; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L12023; L12069: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L12070; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L12023; L12070: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (!TARGET_64BIT && TARGET_USE_LOOP && reload_completed && (! REG_P (operands[2]) || ! rtx_equal_p (operands[1], operands[2])))) { return gen_split_1364 (insn, operands); } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L12023; L12024: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, VOIDmode)) { operands[1] = x4; goto L12025; } goto ret0; L12025: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (nonimmediate_operand (x4, VOIDmode)) { operands[2] = x4; goto L12026; } goto ret0; L12026: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); operands[3] = x3; goto L12027; L12027: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); operands[4] = x3; goto L12028; L12028: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12029; goto ret0; L12029: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCFPmode && GET_CODE (x2) == REG && XINT (x2, 0) == 18) goto L12030; goto ret0; L12030: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L12031; goto ret0; L12031: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCFPmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) goto L12032; goto ret0; L12032: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == CLOBBER) goto L12033; goto ret0; L12033: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (scratch_operand (x2, HImode)) { operands[5] = x2; goto L12034; } goto ret0; L12034: ATTRIBUTE_UNUSED_LABEL if ((reload_completed)) { return gen_split_1359 (insn, operands); } goto ret0; ret0: return 0; } rtx split_insns (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; rtx tem ATTRIBUTE_UNUSED; recog_data.insn = NULL_RTX; switch (GET_CODE (x0)) { case SET: goto L10935; case PARALLEL: goto L14691; default: break; } goto ret0; L10935: ATTRIBUTE_UNUSED_LABEL return split_1 (x0, insn); L14691: ATTRIBUTE_UNUSED_LABEL return split_4 (x0, insn); ret0: return 0; } static rtx peephole2_1 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *_pmatch_len ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; rtx tem ATTRIBUTE_UNUSED; x1 = XEXP (x0, 0); switch (GET_MODE (x1)) { case DImode: goto L14880; case SImode: goto L14884; case SFmode: goto L14885; case HImode: goto L14886; case QImode: goto L14887; default: break; } L12073: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x1) == REG && XINT (x1, 0) == 17) goto L12074; L12634: ATTRIBUTE_UNUSED_LABEL switch (GET_MODE (x1)) { case SImode: goto L14891; case HImode: goto L14892; case QImode: goto L14893; case DImode: goto L14895; default: break; } L12719: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x1) == STRICT_LOW_PART) goto L12720; if (register_operand (x1, VOIDmode)) { operands[0] = x1; goto L12716; } goto ret0; L14880: ATTRIBUTE_UNUSED_LABEL if (push_operand (x1, DImode)) { operands[0] = x1; goto L12582; } L14881: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x1, DImode)) { operands[0] = x1; goto L10966; } L14882: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, DImode)) { operands[0] = x1; goto L11249; } goto L12073; L12582: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (memory_operand (x1, DImode)) { operands[1] = x1; goto L12583; } if (immediate_operand (x1, DImode)) { operands[1] = x1; goto L10945; } x1 = XEXP (x0, 0); goto L14881; L12583: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (0); if (tem == NULL_RTX) goto L14881; x1 = PATTERN (tem); if ((! optimize_size && ! TARGET_PUSH_MEMORY)) { *_pmatch_len = 0; tem = gen_peephole2_1518 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 0); goto L14881; L10945: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (0); if (tem == NULL_RTX) goto L14881; x1 = PATTERN (tem); if ((TARGET_64BIT && !symbolic_operand (operands[1], DImode) && !x86_64_immediate_operand (operands[1], DImode))) { *_pmatch_len = 0; tem = gen_peephole2_1052 (insn, operands); if (tem != 0) return tem; } L10950: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && !symbolic_operand (operands[1], DImode) && !x86_64_immediate_operand (operands[1], DImode) && 1)) { *_pmatch_len = 0; tem = gen_peephole2_1053 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 0); goto L14881; L10966: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (immediate_operand (x1, DImode)) { operands[1] = x1; goto L10967; } x1 = XEXP (x0, 0); goto L14882; L10967: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (0); if (tem == NULL_RTX) goto L14882; x1 = PATTERN (tem); if ((TARGET_64BIT && !symbolic_operand (operands[1], DImode) && !x86_64_immediate_operand (operands[1], DImode))) { *_pmatch_len = 0; tem = gen_peephole2_1057 (insn, operands); if (tem != 0) return tem; } L10972: ATTRIBUTE_UNUSED_LABEL if ((TARGET_64BIT && !symbolic_operand (operands[1], DImode) && !x86_64_immediate_operand (operands[1], DImode) && 1)) { *_pmatch_len = 0; tem = gen_peephole2_1058 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 0); goto L14882; L11249: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == DImode && GET_CODE (x1) == FIX) goto L11250; x1 = XEXP (x0, 0); goto L12073; L11250: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case SFmode: goto L14896; case DFmode: goto L14897; default: break; } x1 = XEXP (x0, 0); goto L12073; L14896: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x2, SFmode)) { operands[1] = x2; goto L11251; } x1 = XEXP (x0, 0); goto L12073; L11251: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (0); if (tem == NULL_RTX) goto L12073; x1 = PATTERN (tem); if ((TARGET_K8 && !optimize_size)) { *_pmatch_len = 0; tem = gen_peephole2_1119 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 0); goto L12073; L14897: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x2, DFmode)) { operands[1] = x2; goto L11257; } x1 = XEXP (x0, 0); goto L12073; L11257: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (0); if (tem == NULL_RTX) goto L12073; x1 = PATTERN (tem); if ((TARGET_K8 && !optimize_size)) { *_pmatch_len = 0; tem = gen_peephole2_1120 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 0); goto L12073; L14884: ATTRIBUTE_UNUSED_LABEL if (push_operand (x1, SImode)) { operands[0] = x1; goto L12577; } if (register_operand (x1, SImode)) { operands[0] = x1; goto L11268; } L14888: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x1, SImode)) { operands[0] = x1; goto L12602; } goto L12073; L12577: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (memory_operand (x1, SImode)) { operands[1] = x1; goto L12578; } x1 = XEXP (x0, 0); goto L14888; L12578: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (0); if (tem == NULL_RTX) goto L14888; x1 = PATTERN (tem); if ((! optimize_size && ! TARGET_PUSH_MEMORY)) { *_pmatch_len = 0; tem = gen_peephole2_1517 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 0); goto L14888; L11268: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == SImode && GET_CODE (x1) == FIX) goto L11269; x1 = XEXP (x0, 0); goto L14888; L11269: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case SFmode: goto L14898; case DFmode: goto L14899; default: break; } x1 = XEXP (x0, 0); goto L14888; L14898: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x2, SFmode)) { operands[1] = x2; goto L11270; } x1 = XEXP (x0, 0); goto L14888; L11270: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (0); if (tem == NULL_RTX) goto L14888; x1 = PATTERN (tem); if ((TARGET_K8 && !optimize_size)) { *_pmatch_len = 0; tem = gen_peephole2_1125 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 0); goto L14888; L14899: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x2, DFmode)) { operands[1] = x2; goto L11276; } x1 = XEXP (x0, 0); goto L14888; L11276: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (0); if (tem == NULL_RTX) goto L14888; x1 = PATTERN (tem); if ((TARGET_K8 && !optimize_size)) { *_pmatch_len = 0; tem = gen_peephole2_1126 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 0); goto L14888; L12602: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (x1 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (! optimize_size && ! TARGET_USE_MOV0 && TARGET_SPLIT_LONG_MOVES && get_attr_length (insn) >= ix86_cost->large_insn && peep2_regno_dead_p (0, FLAGS_REG))) { *_pmatch_len = 0; tem = gen_peephole2_1522 (insn, operands); if (tem != 0) return tem; } L12614: ATTRIBUTE_UNUSED_LABEL if (immediate_operand (x1, SImode)) { operands[1] = x1; goto L12615; } x1 = XEXP (x0, 0); goto L12073; L12615: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (0); if (tem == NULL_RTX) goto L12073; x1 = PATTERN (tem); if ((! optimize_size && get_attr_length (insn) >= ix86_cost->large_insn && TARGET_SPLIT_LONG_MOVES)) { *_pmatch_len = 0; tem = gen_peephole2_1525 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 0); goto L12073; L14885: ATTRIBUTE_UNUSED_LABEL if (push_operand (x1, SFmode)) { operands[0] = x1; goto L12587; } goto L12073; L12587: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (memory_operand (x1, SFmode)) { operands[1] = x1; goto L12588; } x1 = XEXP (x0, 0); goto L12073; L12588: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (0); if (tem == NULL_RTX) goto L12073; x1 = PATTERN (tem); if ((! optimize_size && ! TARGET_PUSH_MEMORY)) { *_pmatch_len = 0; tem = gen_peephole2_1519 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 0); goto L12073; L14886: ATTRIBUTE_UNUSED_LABEL if (push_operand (x1, HImode)) { operands[0] = x1; goto L12592; } L14889: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x1, HImode)) { operands[0] = x1; goto L12606; } goto L12073; L12592: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (memory_operand (x1, HImode)) { operands[1] = x1; goto L12593; } x1 = XEXP (x0, 0); goto L14889; L12593: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (0); if (tem == NULL_RTX) goto L14889; x1 = PATTERN (tem); if ((! optimize_size && ! TARGET_PUSH_MEMORY)) { *_pmatch_len = 0; tem = gen_peephole2_1520 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 0); goto L14889; L12606: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (x1 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (! optimize_size && ! TARGET_USE_MOV0 && TARGET_SPLIT_LONG_MOVES && get_attr_length (insn) >= ix86_cost->large_insn && peep2_regno_dead_p (0, FLAGS_REG))) { *_pmatch_len = 0; tem = gen_peephole2_1523 (insn, operands); if (tem != 0) return tem; } L12619: ATTRIBUTE_UNUSED_LABEL if (immediate_operand (x1, HImode)) { operands[1] = x1; goto L12620; } x1 = XEXP (x0, 0); goto L12073; L12620: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (0); if (tem == NULL_RTX) goto L12073; x1 = PATTERN (tem); if ((! optimize_size && get_attr_length (insn) >= ix86_cost->large_insn && TARGET_SPLIT_LONG_MOVES)) { *_pmatch_len = 0; tem = gen_peephole2_1526 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 0); goto L12073; L14887: ATTRIBUTE_UNUSED_LABEL if (push_operand (x1, QImode)) { operands[0] = x1; goto L12597; } L14890: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x1, QImode)) { operands[0] = x1; goto L12610; } goto L12073; L12597: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (memory_operand (x1, QImode)) { operands[1] = x1; goto L12598; } x1 = XEXP (x0, 0); goto L14890; L12598: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (0); if (tem == NULL_RTX) goto L14890; x1 = PATTERN (tem); if ((! optimize_size && ! TARGET_PUSH_MEMORY)) { *_pmatch_len = 0; tem = gen_peephole2_1521 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 0); goto L14890; L12610: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (x1 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (! optimize_size && ! TARGET_USE_MOV0 && TARGET_SPLIT_LONG_MOVES && get_attr_length (insn) >= ix86_cost->large_insn && peep2_regno_dead_p (0, FLAGS_REG))) { *_pmatch_len = 0; tem = gen_peephole2_1524 (insn, operands); if (tem != 0) return tem; } L12624: ATTRIBUTE_UNUSED_LABEL if (immediate_operand (x1, QImode)) { operands[1] = x1; goto L12625; } x1 = XEXP (x0, 0); goto L12073; L12625: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (0); if (tem == NULL_RTX) goto L12073; x1 = PATTERN (tem); if ((! optimize_size && get_attr_length (insn) >= ix86_cost->large_insn && TARGET_SPLIT_LONG_MOVES)) { *_pmatch_len = 0; tem = gen_peephole2_1527 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 0); goto L12073; L12074: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); operands[0] = x1; goto L12075; L12629: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x1) == COMPARE) goto L12630; x1 = XEXP (x0, 0); goto L12634; L12075: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (1); if (tem == NULL_RTX) goto L12629; x1 = PATTERN (tem); if (GET_CODE (x1) == SET) goto L12076; x1 = XEXP (x0, 1); goto L12629; L12076: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, QImode)) { operands[1] = x2; goto L12077; } x1 = XEXP (x0, 1); goto L12629; L12077: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (ix86_comparison_operator (x2, QImode)) { operands[2] = x2; goto L12078; } x1 = XEXP (x0, 1); goto L12629; L12078: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == REG && XINT (x3, 0) == 17) goto L12079; x1 = XEXP (x0, 1); goto L12629; L12079: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L12080; x1 = XEXP (x0, 1); goto L12629; L12080: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (2); if (tem == NULL_RTX) goto L12629; x1 = PATTERN (tem); switch (GET_CODE (x1)) { case SET: goto L12081; case PARALLEL: goto L14900; default: break; } x1 = XEXP (x0, 1); goto L12629; L12081: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (q_regs_operand (x2, VOIDmode)) { operands[3] = x2; goto L12082; } x1 = XEXP (x0, 1); goto L12629; L12082: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == ZERO_EXTEND) goto L12083; x1 = XEXP (x0, 1); goto L12629; L12083: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[1]) && ((peep2_reg_dead_p (3, operands[1]) || operands_match_p (operands[1], operands[3])) && ! reg_overlap_mentioned_p (operands[3], operands[0]))) { *_pmatch_len = 2; tem = gen_peephole2_1365 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 1); goto L12629; L14900: ATTRIBUTE_UNUSED_LABEL if (XVECLEN (x1, 0) == 2) goto L12094; x1 = XEXP (x0, 1); goto L12629; L12094: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 0); if (GET_CODE (x2) == SET) goto L12095; x1 = XEXP (x0, 1); goto L12629; L12095: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (q_regs_operand (x3, VOIDmode)) { operands[3] = x3; goto L12096; } x1 = XEXP (x0, 1); goto L12629; L12096: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == ZERO_EXTEND) goto L12097; x1 = XEXP (x0, 1); goto L12629; L12097: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (rtx_equal_p (x4, operands[1])) goto L12098; x1 = XEXP (x0, 1); goto L12629; L12098: ATTRIBUTE_UNUSED_LABEL x2 = XVECEXP (x1, 0, 1); if (GET_CODE (x2) == CLOBBER) goto L12099; x1 = XEXP (x0, 1); goto L12629; L12099: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == CCmode && GET_CODE (x3) == REG && XINT (x3, 0) == 17 && ((peep2_reg_dead_p (3, operands[1]) || operands_match_p (operands[1], operands[3])) && ! reg_overlap_mentioned_p (operands[3], operands[0]))) { *_pmatch_len = 2; tem = gen_peephole2_1366 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 1); goto L12629; L12630: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case SImode: goto L14902; case QImode: goto L14903; case HImode: goto L14905; default: break; } x1 = XEXP (x0, 0); goto L12634; L14902: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == AND) goto L12655; if (memory_operand (x2, SImode)) { operands[0] = x2; goto L12631; } L14904: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, SImode)) { operands[0] = x2; goto L12880; } x1 = XEXP (x0, 0); goto L12634; L12655: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode) goto L14908; x1 = XEXP (x0, 0); goto L12634; L14908: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x3) == ZERO_EXTRACT) goto L12672; if (register_operand (x3, SImode)) { operands[0] = x3; goto L12656; } x1 = XEXP (x0, 0); goto L12634; L12672: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (ext_register_operand (x4, VOIDmode)) { operands[0] = x4; goto L12673; } x1 = XEXP (x0, 0); goto L12634; L12673: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L12674; x1 = XEXP (x0, 0); goto L12634; L12674: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 2); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (8)]) goto L12675; x1 = XEXP (x0, 0); goto L12634; L12675: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const_int_operand (x3, VOIDmode)) { operands[1] = x3; goto L12676; } x1 = XEXP (x0, 0); goto L12634; L12676: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (! TARGET_PARTIAL_REG_STALL && ix86_match_ccmode (insn, CCNOmode) && true_regnum (operands[0]) != 0 && find_regno_note (insn, REG_DEAD, true_regnum (operands[0])))) { *_pmatch_len = 0; tem = gen_peephole2_1534 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 0); goto L12634; L12656: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (immediate_operand (x3, SImode)) { operands[1] = x3; goto L12657; } x1 = XEXP (x0, 0); goto L12634; L12657: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (ix86_match_ccmode (insn, CCNOmode) && (true_regnum (operands[0]) != 0 || (GET_CODE (operands[1]) == CONST_INT && CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'K'))) && find_regno_note (insn, REG_DEAD, true_regnum (operands[0])))) { *_pmatch_len = 0; tem = gen_peephole2_1532 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 0); goto L12634; L12631: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (ix86_match_ccmode (insn, CCNOmode) && ! optimize_size)) { *_pmatch_len = 0; tem = gen_peephole2_1528 (insn, operands); if (tem != 0) return tem; } x2 = XEXP (x1, 0); goto L14904; L12880: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == CONST_INT) goto L14909; x1 = XEXP (x0, 0); goto L12634; L14909: ATTRIBUTE_UNUSED_LABEL if (incdec_operand (x2, SImode)) { operands[1] = x2; goto L12881; } L14910: ATTRIBUTE_UNUSED_LABEL if (XWINT (x2, 0) == 128L && (ix86_match_ccmode (insn, CCGCmode) && find_regno_note (insn, REG_DEAD, true_regnum (operands[0])))) { *_pmatch_len = 0; tem = gen_peephole2_1561 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 0); goto L12634; L12881: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (0); if (tem == NULL_RTX) goto L14910; x1 = PATTERN (tem); if ((ix86_match_ccmode (insn, CCGCmode) && find_regno_note (insn, REG_DEAD, true_regnum (operands[0])))) { *_pmatch_len = 0; tem = gen_peephole2_1558 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L14910; L14903: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == AND) goto L12663; if (register_operand (x2, QImode)) { operands[0] = x2; goto L12894; } x1 = XEXP (x0, 0); goto L12634; L12663: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, QImode)) { operands[0] = x3; goto L12664; } x1 = XEXP (x0, 0); goto L12634; L12664: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (immediate_operand (x3, QImode)) { operands[1] = x3; goto L12665; } x1 = XEXP (x0, 0); goto L12634; L12665: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x2 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && (! TARGET_PARTIAL_REG_STALL && ix86_match_ccmode (insn, CCNOmode) && true_regnum (operands[0]) != 0 && find_regno_note (insn, REG_DEAD, true_regnum (operands[0])))) { *_pmatch_len = 0; tem = gen_peephole2_1533 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 0); goto L12634; L12894: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (incdec_operand (x2, QImode)) { operands[1] = x2; goto L12895; } x1 = XEXP (x0, 0); goto L12634; L12895: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (0); if (tem == NULL_RTX) goto L12634; x1 = PATTERN (tem); if ((ix86_match_ccmode (insn, CCGCmode) && find_regno_note (insn, REG_DEAD, true_regnum (operands[0])))) { *_pmatch_len = 0; tem = gen_peephole2_1560 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 0); goto L12634; L14905: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, HImode)) { operands[0] = x2; goto L12887; } x1 = XEXP (x0, 0); goto L12634; L12887: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == CONST_INT) goto L14911; x1 = XEXP (x0, 0); goto L12634; L14911: ATTRIBUTE_UNUSED_LABEL if (incdec_operand (x2, HImode)) { operands[1] = x2; goto L12888; } L14912: ATTRIBUTE_UNUSED_LABEL if (XWINT (x2, 0) == 128L && (ix86_match_ccmode (insn, CCGCmode) && find_regno_note (insn, REG_DEAD, true_regnum (operands[0])))) { *_pmatch_len = 0; tem = gen_peephole2_1562 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 0); goto L12634; L12888: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (0); if (tem == NULL_RTX) goto L14912; x1 = PATTERN (tem); if ((ix86_match_ccmode (insn, CCGCmode) && find_regno_note (insn, REG_DEAD, true_regnum (operands[0])))) { *_pmatch_len = 0; tem = gen_peephole2_1559 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 1); x2 = XEXP (x1, 1); goto L14912; L14891: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, SImode)) { operands[0] = x1; goto L12635; } L14894: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, SImode)) { operands[0] = x1; goto L12729; } goto L12719; L12635: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == SImode && GET_CODE (x1) == NOT) goto L12636; x1 = XEXP (x0, 0); goto L14894; L12636: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, SImode)) { operands[1] = x2; goto L12637; } x1 = XEXP (x0, 0); goto L14894; L12637: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (0); if (tem == NULL_RTX) goto L14894; x1 = PATTERN (tem); if ((!optimize_size && peep2_regno_dead_p (0, FLAGS_REG) && ((TARGET_PENTIUM && (GET_CODE (operands[0]) != MEM || !memory_displacement_operand (operands[0], SImode))) || (TARGET_K6 && long_memory_operand (operands[0], SImode))))) { *_pmatch_len = 0; tem = gen_peephole2_1529 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 0); goto L14894; L12729: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == SImode) goto L14913; x1 = XEXP (x0, 0); goto L12719; L14913: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case PLUS: goto L12730; case SUBREG: goto L14916; case MULT: goto L12752; default: break; } x1 = XEXP (x0, 0); goto L12719; L12730: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[0])) goto L12731; x1 = XEXP (x0, 0); goto L12719; L12731: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (nonmemory_operand (x2, SImode)) { operands[1] = x2; goto L12732; } x1 = XEXP (x0, 0); goto L12719; L12732: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (0); if (tem == NULL_RTX) goto L12719; x1 = PATTERN (tem); if ((peep2_regno_dead_p (0, FLAGS_REG))) { *_pmatch_len = 0; tem = gen_peephole2_1542 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 0); goto L12719; L14916: ATTRIBUTE_UNUSED_LABEL if (XINT (x1, 1) == 0) goto L12737; x1 = XEXP (x0, 0); goto L12719; L12737: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == DImode) goto L14917; x1 = XEXP (x0, 0); goto L12719; L14917: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case PLUS: goto L12738; case MULT: goto L12767; default: break; } x1 = XEXP (x0, 0); goto L12719; L12738: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[1] = x3; goto L12739; } x1 = XEXP (x0, 0); goto L12719; L12739: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonmemory_operand (x3, DImode)) { operands[2] = x3; goto L12740; } x1 = XEXP (x0, 0); goto L12719; L12740: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (0); if (tem == NULL_RTX) goto L12719; x1 = PATTERN (tem); if ((peep2_regno_dead_p (0, FLAGS_REG) && REGNO (operands[0]) == REGNO (operands[1]))) { *_pmatch_len = 0; tem = gen_peephole2_1543 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 0); goto L12719; L12767: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (register_operand (x3, DImode)) { operands[1] = x3; goto L12768; } x1 = XEXP (x0, 0); goto L12719; L12768: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const_int_operand (x3, DImode)) { operands[2] = x3; goto L12769; } x1 = XEXP (x0, 0); goto L12719; L12769: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (0); if (tem == NULL_RTX) goto L12719; x1 = PATTERN (tem); if ((exact_log2 (INTVAL (operands[2])) >= 0 && REGNO (operands[0]) == REGNO (operands[1]) && peep2_regno_dead_p (0, FLAGS_REG))) { *_pmatch_len = 0; tem = gen_peephole2_1547 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 0); goto L12719; L12752: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[0])) goto L12753; x1 = XEXP (x0, 0); goto L12719; L12753: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const_int_operand (x2, SImode)) { operands[1] = x2; goto L12754; } x1 = XEXP (x0, 0); goto L12719; L12754: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (0); if (tem == NULL_RTX) goto L12719; x1 = PATTERN (tem); if ((exact_log2 (INTVAL (operands[1])) >= 0 && peep2_regno_dead_p (0, FLAGS_REG))) { *_pmatch_len = 0; tem = gen_peephole2_1545 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 0); goto L12719; L14892: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, HImode)) { operands[0] = x1; goto L12641; } goto L12719; L12641: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == HImode && GET_CODE (x1) == NOT) goto L12642; x1 = XEXP (x0, 0); goto L12719; L12642: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, HImode)) { operands[1] = x2; goto L12643; } x1 = XEXP (x0, 0); goto L12719; L12643: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (0); if (tem == NULL_RTX) goto L12719; x1 = PATTERN (tem); if ((!optimize_size && peep2_regno_dead_p (0, FLAGS_REG) && ((TARGET_PENTIUM && (GET_CODE (operands[0]) != MEM || !memory_displacement_operand (operands[0], HImode))) || (TARGET_K6 && long_memory_operand (operands[0], HImode))))) { *_pmatch_len = 0; tem = gen_peephole2_1530 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 0); goto L12719; L14893: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x1, QImode)) { operands[0] = x1; goto L12647; } goto L12719; L12647: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == QImode && GET_CODE (x1) == NOT) goto L12648; x1 = XEXP (x0, 0); goto L12719; L12648: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (nonimmediate_operand (x2, QImode)) { operands[1] = x2; goto L12649; } x1 = XEXP (x0, 0); goto L12719; L12649: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (0); if (tem == NULL_RTX) goto L12719; x1 = PATTERN (tem); if ((!optimize_size && peep2_regno_dead_p (0, FLAGS_REG) && ((TARGET_PENTIUM && (GET_CODE (operands[0]) != MEM || !memory_displacement_operand (operands[0], QImode))) || (TARGET_K6 && long_memory_operand (operands[0], QImode))))) { *_pmatch_len = 0; tem = gen_peephole2_1531 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 0); goto L12719; L14895: ATTRIBUTE_UNUSED_LABEL if (register_operand (x1, DImode)) { operands[0] = x1; goto L12744; } goto L12719; L12744: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_MODE (x1) == DImode) goto L14919; x1 = XEXP (x0, 0); goto L12719; L14919: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x1)) { case PLUS: goto L12745; case MULT: goto L12759; default: break; } x1 = XEXP (x0, 0); goto L12719; L12745: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[0])) goto L12746; x1 = XEXP (x0, 0); goto L12719; L12746: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (x86_64_general_operand (x2, DImode)) { operands[1] = x2; goto L12747; } x1 = XEXP (x0, 0); goto L12719; L12747: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (0); if (tem == NULL_RTX) goto L12719; x1 = PATTERN (tem); if ((peep2_regno_dead_p (0, FLAGS_REG))) { *_pmatch_len = 0; tem = gen_peephole2_1544 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 0); goto L12719; L12759: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[0])) goto L12760; x1 = XEXP (x0, 0); goto L12719; L12760: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (const_int_operand (x2, DImode)) { operands[1] = x2; goto L12761; } x1 = XEXP (x0, 0); goto L12719; L12761: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (0); if (tem == NULL_RTX) goto L12719; x1 = PATTERN (tem); if ((exact_log2 (INTVAL (operands[1])) >= 0 && peep2_regno_dead_p (0, FLAGS_REG))) { *_pmatch_len = 0; tem = gen_peephole2_1546 (insn, operands); if (tem != 0) return tem; } x1 = XEXP (x0, 0); goto L12719; L12720: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, VOIDmode)) { operands[0] = x2; goto L12721; } goto ret0; L12721: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (x1 == const_int_rtx[MAX_SAVED_CONST_INT + (0)] && ((GET_MODE (operands[0]) == QImode || GET_MODE (operands[0]) == HImode) && (! TARGET_USE_MOV0 || optimize_size) && peep2_regno_dead_p (0, FLAGS_REG))) { *_pmatch_len = 0; tem = gen_peephole2_1540 (insn, operands); if (tem != 0) return tem; } goto ret0; L12716: ATTRIBUTE_UNUSED_LABEL x1 = XEXP (x0, 1); if (GET_CODE (x1) == CONST_INT) goto L14921; goto ret0; L14921: ATTRIBUTE_UNUSED_LABEL if ((int) XWINT (x1, 0) == XWINT (x1, 0)) switch ((int) XWINT (x1, 0)) { case 0L: goto L14923; case -1L: goto L14924; default: break; } goto ret0; L14923: ATTRIBUTE_UNUSED_LABEL if (((GET_MODE (operands[0]) == QImode || GET_MODE (operands[0]) == HImode || GET_MODE (operands[0]) == SImode || (GET_MODE (operands[0]) == DImode && TARGET_64BIT)) && (! TARGET_USE_MOV0 || optimize_size) && peep2_regno_dead_p (0, FLAGS_REG))) { *_pmatch_len = 0; tem = gen_peephole2_1539 (insn, operands); if (tem != 0) return tem; } goto ret0; L14924: ATTRIBUTE_UNUSED_LABEL if (((GET_MODE (operands[0]) == HImode || GET_MODE (operands[0]) == SImode || (GET_MODE (operands[0]) == DImode && TARGET_64BIT)) && (optimize_size || TARGET_PENTIUM) && peep2_regno_dead_p (0, FLAGS_REG))) { *_pmatch_len = 0; tem = gen_peephole2_1541 (insn, operands); if (tem != 0) return tem; } goto ret0; ret0: return 0; } static rtx peephole2_2 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *_pmatch_len ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; rtx tem ATTRIBUTE_UNUSED; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case DFmode: goto L14925; case SFmode: goto L14926; case XFmode: goto L14927; case SImode: goto L14930; case DImode: goto L14931; case HImode: goto L14934; default: break; } goto ret0; L14925: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, DFmode)) { operands[0] = x2; goto L12247; } goto ret0; L12247: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DFmode && GET_CODE (x2) == UNSPEC && XVECLEN (x2, 0) == 1 && XINT (x2, 1) == 82) goto L12248; goto ret0; L12248: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (register_operand (x3, DFmode)) { operands[2] = x3; goto L12249; } goto ret0; L12249: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L12250; goto ret0; L12250: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, DFmode)) { operands[1] = x2; goto L12251; } goto ret0; L12251: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DFmode && GET_CODE (x2) == UNSPEC && XVECLEN (x2, 0) == 1 && XINT (x2, 1) == 83) goto L12252; goto ret0; L12252: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (rtx_equal_p (x3, operands[2])) goto L12253; goto ret0; L12253: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (1); if (tem == NULL_RTX) goto ret0; x1 = PATTERN (tem); if (GET_CODE (x1) == SET) goto L12254; goto ret0; L12254: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[0])) goto L12255; goto ret0; L12255: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (immediate_operand (x2, DFmode)) { operands[3] = x2; goto L12256; } goto ret0; L12256: ATTRIBUTE_UNUSED_LABEL if ((standard_80387_constant_p (operands[3]) == 2)) { *_pmatch_len = 1; tem = gen_peephole2_1411 (insn, operands); if (tem != 0) return tem; } goto ret0; L14926: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, SFmode)) { operands[0] = x2; goto L12261; } goto ret0; L12261: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SFmode && GET_CODE (x2) == UNSPEC && XVECLEN (x2, 0) == 1 && XINT (x2, 1) == 82) goto L12262; goto ret0; L12262: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (register_operand (x3, SFmode)) { operands[2] = x3; goto L12263; } goto ret0; L12263: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L12264; goto ret0; L12264: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, SFmode)) { operands[1] = x2; goto L12265; } goto ret0; L12265: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SFmode && GET_CODE (x2) == UNSPEC && XVECLEN (x2, 0) == 1 && XINT (x2, 1) == 83) goto L12266; goto ret0; L12266: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (rtx_equal_p (x3, operands[2])) goto L12267; goto ret0; L12267: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (1); if (tem == NULL_RTX) goto ret0; x1 = PATTERN (tem); if (GET_CODE (x1) == SET) goto L12268; goto ret0; L12268: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[0])) goto L12269; goto ret0; L12269: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (immediate_operand (x2, SFmode)) { operands[3] = x2; goto L12270; } goto ret0; L12270: ATTRIBUTE_UNUSED_LABEL if ((standard_80387_constant_p (operands[3]) == 2)) { *_pmatch_len = 1; tem = gen_peephole2_1413 (insn, operands); if (tem != 0) return tem; } goto ret0; L14927: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, XFmode)) { operands[0] = x2; goto L12275; } goto ret0; L12275: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == XFmode && GET_CODE (x2) == UNSPEC && XVECLEN (x2, 0) == 1 && XINT (x2, 1) == 82) goto L12276; goto ret0; L12276: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (register_operand (x3, XFmode)) { operands[2] = x3; goto L12277; } goto ret0; L12277: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == SET) goto L12278; goto ret0; L12278: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, XFmode)) { operands[1] = x2; goto L12279; } goto ret0; L12279: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == XFmode && GET_CODE (x2) == UNSPEC && XVECLEN (x2, 0) == 1 && XINT (x2, 1) == 83) goto L12280; goto ret0; L12280: ATTRIBUTE_UNUSED_LABEL x3 = XVECEXP (x2, 0, 0); if (rtx_equal_p (x3, operands[2])) goto L12281; goto ret0; L12281: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (1); if (tem == NULL_RTX) goto ret0; x1 = PATTERN (tem); if (GET_CODE (x1) == SET) goto L12282; goto ret0; L12282: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (rtx_equal_p (x2, operands[0])) goto L12283; goto ret0; L12283: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (immediate_operand (x2, XFmode)) { operands[3] = x2; goto L12284; } goto ret0; L12284: ATTRIBUTE_UNUSED_LABEL if ((standard_80387_constant_p (operands[3]) == 2)) { *_pmatch_len = 1; tem = gen_peephole2_1415 (insn, operands); if (tem != 0) return tem; } goto ret0; L14930: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG && XINT (x2, 0) == 7) goto L12798; L14928: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, SImode)) { operands[0] = x2; goto L12681; } L14929: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x2, SImode)) { operands[0] = x2; goto L12699; } L14933: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, SImode)) { operands[0] = x2; goto L13026; } goto ret0; L12798: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS) goto L12799; x2 = XEXP (x1, 0); goto L14928; L12799: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == REG && XINT (x3, 0) == 7) goto L12800; x2 = XEXP (x1, 0); goto L14928; L12800: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == CONST_INT) goto L14935; x2 = XEXP (x1, 0); goto L14928; L14935: ATTRIBUTE_UNUSED_LABEL if ((int) XWINT (x3, 0) == XWINT (x3, 0)) switch ((int) XWINT (x3, 0)) { case -4L: goto L12801; case -8L: goto L12810; case 4L: goto L12855; case 8L: goto L12864; default: break; } x2 = XEXP (x1, 0); goto L14928; L12801: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12802; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14928; L12802: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (optimize_size || !TARGET_SUB_ESP_4)) { *_pmatch_len = 0; tem = gen_peephole2_1550 (insn, operands); if (tem != 0) return tem; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14928; L12810: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12811; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14928; L12811: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (optimize_size || !TARGET_SUB_ESP_8)) { *_pmatch_len = 0; tem = gen_peephole2_1551 (insn, operands); if (tem != 0) return tem; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14928; L12855: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12856; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14928; L12856: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) { *_pmatch_len = 0; tem = gen_peephole2_1555 (insn, operands); if (tem != 0) return tem; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14928; L12864: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12865; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14928; L12865: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode) goto L14939; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14928; L14939: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG) goto L14941; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14928; L14941: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 0) == 17) goto L14943; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14928; L14943: ATTRIBUTE_UNUSED_LABEL *_pmatch_len = 0; tem = gen_peephole2_1556 (insn, operands); if (tem != 0) return tem; L14944: ATTRIBUTE_UNUSED_LABEL if ((optimize_size)) { *_pmatch_len = 0; tem = gen_peephole2_1557 (insn, operands); if (tem != 0) return tem; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14928; L12681: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (arith_or_logical_operator (x2, SImode)) { operands[3] = x2; goto L12682; } x2 = XEXP (x1, 0); goto L14929; L12682: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[0])) goto L12683; L12691: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x3, SImode)) { operands[1] = x3; goto L12692; } x2 = XEXP (x1, 0); goto L14929; L12683: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (memory_operand (x3, SImode)) { operands[1] = x3; goto L12684; } x3 = XEXP (x2, 0); goto L12691; L12684: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12685; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L12691; L12685: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (! optimize_size && ! TARGET_READ_MODIFY)) { *_pmatch_len = 0; tem = gen_peephole2_1535 (insn, operands); if (tem != 0) return tem; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L12691; L12692: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[0])) goto L12693; x2 = XEXP (x1, 0); goto L14929; L12693: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12694; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14929; L12694: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (! optimize_size && ! TARGET_READ_MODIFY)) { *_pmatch_len = 0; tem = gen_peephole2_1536 (insn, operands); if (tem != 0) return tem; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14929; L12699: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (arith_or_logical_operator (x2, SImode)) { operands[3] = x2; goto L12700; } x2 = XEXP (x1, 0); goto L14933; L12700: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[0])) goto L12701; L12709: ATTRIBUTE_UNUSED_LABEL if (nonmemory_operand (x3, SImode)) { operands[1] = x3; goto L12710; } x2 = XEXP (x1, 0); goto L14933; L12701: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (nonmemory_operand (x3, SImode)) { operands[1] = x3; goto L12702; } x3 = XEXP (x2, 0); goto L12709; L12702: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12703; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L12709; L12703: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (! optimize_size && ! TARGET_READ_MODIFY_WRITE)) { *_pmatch_len = 0; tem = gen_peephole2_1537 (insn, operands); if (tem != 0) return tem; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L12709; L12710: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[0])) goto L12711; x2 = XEXP (x1, 0); goto L14933; L12711: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12712; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14933; L12712: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (! optimize_size && ! TARGET_READ_MODIFY_WRITE)) { *_pmatch_len = 0; tem = gen_peephole2_1538 (insn, operands); if (tem != 0) return tem; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14933; L13026: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == MULT) goto L13027; goto ret0; L13027: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode) goto L14945; goto ret0; L14945: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x3, SImode)) { operands[1] = x3; goto L13028; } L14946: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x3, SImode)) { operands[1] = x3; goto L13056; } goto ret0; L13028: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (immediate_operand (x3, SImode)) { operands[2] = x3; goto L13029; } x3 = XEXP (x2, 0); goto L14946; L13029: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L13030; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14946; L13030: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_K8 && !optimize_size && (GET_CODE (operands[2]) != CONST_INT || !CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'K')))) { *_pmatch_len = 0; tem = gen_peephole2_1574 (insn, operands); if (tem != 0) return tem; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14946; L13056: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const_int_operand (x3, SImode)) { operands[2] = x3; goto L13057; } goto ret0; L13057: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L13058; goto ret0; L13058: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_K8 && !optimize_size && CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'K'))) { *_pmatch_len = 0; tem = gen_peephole2_1577 (insn, operands); if (tem != 0) return tem; } goto ret0; L14931: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG && XINT (x2, 0) == 7) goto L12936; L14932: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, DImode)) { operands[0] = x2; goto L13017; } goto ret0; L12936: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == PLUS) goto L12937; x2 = XEXP (x1, 0); goto L14932; L12937: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode && GET_CODE (x3) == REG && XINT (x3, 0) == 7) goto L12938; x2 = XEXP (x1, 0); goto L14932; L12938: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == CONST_INT) goto L14947; x2 = XEXP (x1, 0); goto L14932; L14947: ATTRIBUTE_UNUSED_LABEL if ((int) XWINT (x3, 0) == XWINT (x3, 0)) switch ((int) XWINT (x3, 0)) { case -8L: goto L12939; case -16L: goto L12948; case 8L: goto L12993; case 16L: goto L13002; default: break; } x2 = XEXP (x1, 0); goto L14932; L12939: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12940; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14932; L12940: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (optimize_size || !TARGET_SUB_ESP_4)) { *_pmatch_len = 0; tem = gen_peephole2_1565 (insn, operands); if (tem != 0) return tem; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14932; L12948: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12949; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14932; L12949: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (optimize_size || !TARGET_SUB_ESP_8)) { *_pmatch_len = 0; tem = gen_peephole2_1566 (insn, operands); if (tem != 0) return tem; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14932; L12993: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12994; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14932; L12994: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) { *_pmatch_len = 0; tem = gen_peephole2_1570 (insn, operands); if (tem != 0) return tem; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14932; L13002: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L13003; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14932; L13003: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode) goto L14951; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14932; L14951: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG) goto L14953; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14932; L14953: ATTRIBUTE_UNUSED_LABEL if (XINT (x2, 0) == 17) goto L14955; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14932; L14955: ATTRIBUTE_UNUSED_LABEL *_pmatch_len = 0; tem = gen_peephole2_1571 (insn, operands); if (tem != 0) return tem; L14956: ATTRIBUTE_UNUSED_LABEL if ((optimize_size)) { *_pmatch_len = 0; tem = gen_peephole2_1572 (insn, operands); if (tem != 0) return tem; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 0); goto L14932; L13017: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode) goto L14957; goto ret0; L14957: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case MULT: goto L13018; case ZERO_EXTEND: goto L13036; default: break; } goto ret0; L13018: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode) goto L14959; goto ret0; L14959: ATTRIBUTE_UNUSED_LABEL if (memory_operand (x3, DImode)) { operands[1] = x3; goto L13019; } L14960: ATTRIBUTE_UNUSED_LABEL if (nonimmediate_operand (x3, DImode)) { operands[1] = x3; goto L13047; } goto ret0; L13019: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (immediate_operand (x3, DImode)) { operands[2] = x3; goto L13020; } x3 = XEXP (x2, 0); goto L14960; L13020: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L13021; x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14960; L13021: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_K8 && !optimize_size && (GET_CODE (operands[2]) != CONST_INT || !CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'K')))) { *_pmatch_len = 0; tem = gen_peephole2_1573 (insn, operands); if (tem != 0) return tem; } x1 = XVECEXP (x0, 0, 0); x2 = XEXP (x1, 1); x3 = XEXP (x2, 0); goto L14960; L13047: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (const_int_operand (x3, DImode)) { operands[2] = x3; goto L13048; } goto ret0; L13048: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L13049; goto ret0; L13049: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_K8 && !optimize_size && CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'K'))) { *_pmatch_len = 0; tem = gen_peephole2_1576 (insn, operands); if (tem != 0) return tem; } goto ret0; L13036: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == MULT) goto L13037; goto ret0; L13037: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (memory_operand (x4, SImode)) { operands[1] = x4; goto L13038; } goto ret0; L13038: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (immediate_operand (x4, SImode)) { operands[2] = x4; goto L13039; } goto ret0; L13039: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L13040; goto ret0; L13040: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_K8 && !optimize_size && (GET_CODE (operands[2]) != CONST_INT || !CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'K')))) { *_pmatch_len = 0; tem = gen_peephole2_1575 (insn, operands); if (tem != 0) return tem; } goto ret0; L14934: ATTRIBUTE_UNUSED_LABEL if (register_operand (x2, HImode)) { operands[0] = x2; goto L13063; } goto ret0; L13063: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == HImode && GET_CODE (x2) == MULT) goto L13064; goto ret0; L13064: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (nonimmediate_operand (x3, HImode)) { operands[1] = x3; goto L13065; } goto ret0; L13065: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (immediate_operand (x3, HImode)) { operands[2] = x3; goto L13066; } goto ret0; L13066: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L13067; goto ret0; L13067: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17 && (TARGET_K8 && !optimize_size)) { *_pmatch_len = 0; tem = gen_peephole2_1578 (insn, operands); if (tem != 0) return tem; } goto ret0; ret0: return 0; } static rtx peephole2_3 (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *_pmatch_len ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; rtx tem ATTRIBUTE_UNUSED; switch (XVECLEN (x0, 0)) { case 2: goto L12245; case 7: goto L12287; case 3: goto L12772; default: break; } goto ret0; L12245: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 0); if (GET_CODE (x1) == SET) goto L12246; goto ret0; L12246: ATTRIBUTE_UNUSED_LABEL return peephole2_2 (x0, insn, _pmatch_len); L12287: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 0); if (GET_CODE (x1) == SET) goto L12288; goto ret0; L12288: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) goto L12289; goto ret0; L12289: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == CCmode) goto L14961; goto ret0; L14961: ATTRIBUTE_UNUSED_LABEL switch (GET_CODE (x2)) { case COMPARE: goto L12290; case IF_THEN_ELSE: goto L12326; default: break; } goto ret0; L12290: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == BLKmode && GET_CODE (x3) == MEM) goto L12291; goto ret0; L12291: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, VOIDmode)) { operands[4] = x4; goto L12292; } goto ret0; L12292: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == BLKmode && GET_CODE (x3) == MEM) goto L12293; goto ret0; L12293: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, VOIDmode)) { operands[5] = x4; goto L12294; } goto ret0; L12294: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L12295; goto ret0; L12295: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, VOIDmode)) { operands[6] = x2; goto L12296; } goto ret0; L12296: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L12297; goto ret0; L12297: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (immediate_operand (x2, SImode)) { operands[3] = x2; goto L12298; } goto ret0; L12298: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == USE) goto L12299; goto ret0; L12299: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19) goto L12300; goto ret0; L12300: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 4); if (GET_CODE (x1) == CLOBBER) goto L12301; goto ret0; L12301: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, VOIDmode)) { operands[0] = x2; goto L12302; } goto ret0; L12302: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 5); if (GET_CODE (x1) == CLOBBER) goto L12303; goto ret0; L12303: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, VOIDmode)) { operands[1] = x2; goto L12304; } goto ret0; L12304: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 6); if (GET_CODE (x1) == CLOBBER) goto L12305; goto ret0; L12305: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, VOIDmode)) { operands[2] = x2; goto L12306; } goto ret0; L12306: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (1); if (tem == NULL_RTX) goto ret0; x1 = PATTERN (tem); if (GET_CODE (x1) == SET) goto L12307; goto ret0; L12307: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, QImode)) { operands[7] = x2; goto L12308; } goto ret0; L12308: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == QImode && GET_CODE (x2) == GTU) goto L12309; goto ret0; L12309: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == CCmode && GET_CODE (x3) == REG && XINT (x3, 0) == 17) goto L12310; goto ret0; L12310: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L12311; goto ret0; L12311: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (2); if (tem == NULL_RTX) goto ret0; x1 = PATTERN (tem); if (GET_CODE (x1) == SET) goto L12312; goto ret0; L12312: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, QImode)) { operands[8] = x2; goto L12313; } goto ret0; L12313: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == QImode && GET_CODE (x2) == LTU) goto L12314; goto ret0; L12314: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == CCmode && GET_CODE (x3) == REG && XINT (x3, 0) == 17) goto L12315; goto ret0; L12315: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L12316; goto ret0; L12316: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (3); if (tem == NULL_RTX) goto ret0; x1 = PATTERN (tem); if (GET_CODE (x1) == SET) goto L12317; goto ret0; L12317: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_CODE (x2) == REG && XINT (x2, 0) == 17) goto L12318; goto ret0; L12318: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == COMPARE) goto L12319; goto ret0; L12319: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[7])) goto L12320; goto ret0; L12320: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[8]) && (peep2_reg_dead_p (4, operands[7]) && peep2_reg_dead_p (4, operands[8]))) { *_pmatch_len = 3; tem = gen_peephole2_1474 (insn, operands); if (tem != 0) return tem; } goto ret0; L12326: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == NE) goto L12327; goto ret0; L12327: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (register_operand (x4, VOIDmode)) { operands[6] = x4; goto L12328; } goto ret0; L12328: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (x4 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L12329; goto ret0; L12329: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_MODE (x3) == CCmode && GET_CODE (x3) == COMPARE) goto L12330; goto ret0; L12330: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 0); if (GET_MODE (x4) == BLKmode && GET_CODE (x4) == MEM) goto L12331; goto ret0; L12331: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (register_operand (x5, VOIDmode)) { operands[4] = x5; goto L12332; } goto ret0; L12332: ATTRIBUTE_UNUSED_LABEL x4 = XEXP (x3, 1); if (GET_MODE (x4) == BLKmode && GET_CODE (x4) == MEM) goto L12333; goto ret0; L12333: ATTRIBUTE_UNUSED_LABEL x5 = XEXP (x4, 0); if (register_operand (x5, VOIDmode)) { operands[5] = x5; goto L12334; } goto ret0; L12334: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 2); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L12335; goto ret0; L12335: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == USE) goto L12336; goto ret0; L12336: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (immediate_operand (x2, SImode)) { operands[3] = x2; goto L12337; } goto ret0; L12337: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == USE) goto L12338; goto ret0; L12338: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) goto L12339; goto ret0; L12339: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 3); if (GET_CODE (x1) == USE) goto L12340; goto ret0; L12340: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == SImode && GET_CODE (x2) == REG && XINT (x2, 0) == 19) goto L12341; goto ret0; L12341: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 4); if (GET_CODE (x1) == CLOBBER) goto L12342; goto ret0; L12342: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, VOIDmode)) { operands[0] = x2; goto L12343; } goto ret0; L12343: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 5); if (GET_CODE (x1) == CLOBBER) goto L12344; goto ret0; L12344: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, VOIDmode)) { operands[1] = x2; goto L12345; } goto ret0; L12345: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 6); if (GET_CODE (x1) == CLOBBER) goto L12346; goto ret0; L12346: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, VOIDmode)) { operands[2] = x2; goto L12347; } goto ret0; L12347: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (1); if (tem == NULL_RTX) goto ret0; x1 = PATTERN (tem); if (GET_CODE (x1) == SET) goto L12348; goto ret0; L12348: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, QImode)) { operands[7] = x2; goto L12349; } goto ret0; L12349: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == QImode && GET_CODE (x2) == GTU) goto L12350; goto ret0; L12350: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == CCmode && GET_CODE (x3) == REG && XINT (x3, 0) == 17) goto L12351; goto ret0; L12351: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L12352; goto ret0; L12352: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (2); if (tem == NULL_RTX) goto ret0; x1 = PATTERN (tem); if (GET_CODE (x1) == SET) goto L12353; goto ret0; L12353: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (register_operand (x2, QImode)) { operands[8] = x2; goto L12354; } goto ret0; L12354: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == QImode && GET_CODE (x2) == LTU) goto L12355; goto ret0; L12355: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == CCmode && GET_CODE (x3) == REG && XINT (x3, 0) == 17) goto L12356; goto ret0; L12356: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (x3 == const_int_rtx[MAX_SAVED_CONST_INT + (0)]) goto L12357; goto ret0; L12357: ATTRIBUTE_UNUSED_LABEL tem = peep2_next_insn (3); if (tem == NULL_RTX) goto ret0; x1 = PATTERN (tem); if (GET_CODE (x1) == SET) goto L12358; goto ret0; L12358: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_CODE (x2) == REG && XINT (x2, 0) == 17) goto L12359; goto ret0; L12359: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_CODE (x2) == COMPARE) goto L12360; goto ret0; L12360: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (rtx_equal_p (x3, operands[7])) goto L12361; goto ret0; L12361: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (rtx_equal_p (x3, operands[8]) && (peep2_reg_dead_p (4, operands[7]) && peep2_reg_dead_p (4, operands[8]))) { *_pmatch_len = 3; tem = gen_peephole2_1475 (insn, operands); if (tem != 0) return tem; } goto ret0; L12772: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 0); if (GET_CODE (x1) == SET) goto L12773; goto ret0; L12773: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); switch (GET_MODE (x2)) { case SImode: goto L14963; case DImode: goto L14964; default: break; } goto ret0; L14963: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG && XINT (x2, 0) == 7) goto L12774; goto ret0; L12774: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == SImode && GET_CODE (x2) == PLUS) goto L12775; goto ret0; L12775: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == SImode && GET_CODE (x3) == REG && XINT (x3, 0) == 7) goto L12776; goto ret0; L12776: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == CONST_INT) goto L14965; goto ret0; L14965: ATTRIBUTE_UNUSED_LABEL if ((int) XWINT (x3, 0) == XWINT (x3, 0)) switch ((int) XWINT (x3, 0)) { case -4L: goto L12777; case -8L: goto L12789; case 4L: goto L12819; case 8L: goto L12831; default: break; } goto ret0; L12777: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12778; goto ret0; L12778: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) goto L12779; goto ret0; L12779: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L12780; goto ret0; L12780: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM) goto L12781; goto ret0; L12781: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == SCRATCH && (optimize_size || !TARGET_SUB_ESP_4)) { *_pmatch_len = 0; tem = gen_peephole2_1548 (insn, operands); if (tem != 0) return tem; } goto ret0; L12789: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12790; goto ret0; L12790: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) goto L12791; goto ret0; L12791: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L12792; goto ret0; L12792: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM) goto L12793; goto ret0; L12793: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == SCRATCH && (optimize_size || !TARGET_SUB_ESP_8)) { *_pmatch_len = 0; tem = gen_peephole2_1549 (insn, operands); if (tem != 0) return tem; } goto ret0; L12819: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12820; goto ret0; L12820: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) goto L12821; goto ret0; L12821: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L12822; goto ret0; L12822: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM) goto L12823; goto ret0; L12823: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == SCRATCH && (optimize_size || !TARGET_ADD_ESP_4)) { *_pmatch_len = 0; tem = gen_peephole2_1552 (insn, operands); if (tem != 0) return tem; } goto ret0; L12831: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12832; goto ret0; L12832: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) goto L12833; goto ret0; L12833: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L12834; goto ret0; L12834: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM) goto L12835; goto ret0; L12835: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == SCRATCH) goto L14969; goto ret0; L14969: ATTRIBUTE_UNUSED_LABEL if ((optimize_size || !TARGET_ADD_ESP_8)) { *_pmatch_len = 0; tem = gen_peephole2_1553 (insn, operands); if (tem != 0) return tem; } L14970: ATTRIBUTE_UNUSED_LABEL if ((optimize_size)) { *_pmatch_len = 0; tem = gen_peephole2_1554 (insn, operands); if (tem != 0) return tem; } goto ret0; L14964: ATTRIBUTE_UNUSED_LABEL if (GET_CODE (x2) == REG && XINT (x2, 0) == 7) goto L12912; goto ret0; L12912: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 1); if (GET_MODE (x2) == DImode && GET_CODE (x2) == PLUS) goto L12913; goto ret0; L12913: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_MODE (x3) == DImode && GET_CODE (x3) == REG && XINT (x3, 0) == 7) goto L12914; goto ret0; L12914: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 1); if (GET_CODE (x3) == CONST_INT) goto L14971; goto ret0; L14971: ATTRIBUTE_UNUSED_LABEL if ((int) XWINT (x3, 0) == XWINT (x3, 0)) switch ((int) XWINT (x3, 0)) { case -8L: goto L12915; case -16L: goto L12927; case 8L: goto L12957; case 16L: goto L12969; default: break; } goto ret0; L12915: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12916; goto ret0; L12916: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) goto L12917; goto ret0; L12917: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L12918; goto ret0; L12918: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM) goto L12919; goto ret0; L12919: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == SCRATCH && (optimize_size || !TARGET_SUB_ESP_4)) { *_pmatch_len = 0; tem = gen_peephole2_1563 (insn, operands); if (tem != 0) return tem; } goto ret0; L12927: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12928; goto ret0; L12928: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) goto L12929; goto ret0; L12929: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L12930; goto ret0; L12930: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM) goto L12931; goto ret0; L12931: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == SCRATCH && (optimize_size || !TARGET_SUB_ESP_8)) { *_pmatch_len = 0; tem = gen_peephole2_1564 (insn, operands); if (tem != 0) return tem; } goto ret0; L12957: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12958; goto ret0; L12958: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) goto L12959; goto ret0; L12959: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L12960; goto ret0; L12960: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM) goto L12961; goto ret0; L12961: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == SCRATCH && (optimize_size || !TARGET_ADD_ESP_4)) { *_pmatch_len = 0; tem = gen_peephole2_1567 (insn, operands); if (tem != 0) return tem; } goto ret0; L12969: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 1); if (GET_CODE (x1) == CLOBBER) goto L12970; goto ret0; L12970: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == CCmode && GET_CODE (x2) == REG && XINT (x2, 0) == 17) goto L12971; goto ret0; L12971: ATTRIBUTE_UNUSED_LABEL x1 = XVECEXP (x0, 0, 2); if (GET_CODE (x1) == CLOBBER) goto L12972; goto ret0; L12972: ATTRIBUTE_UNUSED_LABEL x2 = XEXP (x1, 0); if (GET_MODE (x2) == BLKmode && GET_CODE (x2) == MEM) goto L12973; goto ret0; L12973: ATTRIBUTE_UNUSED_LABEL x3 = XEXP (x2, 0); if (GET_CODE (x3) == SCRATCH) goto L14975; goto ret0; L14975: ATTRIBUTE_UNUSED_LABEL if ((optimize_size || !TARGET_ADD_ESP_8)) { *_pmatch_len = 0; tem = gen_peephole2_1568 (insn, operands); if (tem != 0) return tem; } L14976: ATTRIBUTE_UNUSED_LABEL if ((optimize_size)) { *_pmatch_len = 0; tem = gen_peephole2_1569 (insn, operands); if (tem != 0) return tem; } goto ret0; ret0: return 0; } rtx peephole2_insns (rtx x0 ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, int *_pmatch_len ATTRIBUTE_UNUSED) { rtx * const operands ATTRIBUTE_UNUSED = &recog_data.operand[0]; rtx x1 ATTRIBUTE_UNUSED; rtx x2 ATTRIBUTE_UNUSED; rtx x3 ATTRIBUTE_UNUSED; rtx x4 ATTRIBUTE_UNUSED; rtx x5 ATTRIBUTE_UNUSED; rtx x6 ATTRIBUTE_UNUSED; rtx x7 ATTRIBUTE_UNUSED; rtx tem ATTRIBUTE_UNUSED; recog_data.insn = NULL_RTX; switch (GET_CODE (x0)) { case SET: goto L10943; case PARALLEL: goto L14877; default: break; } goto ret0; L10943: ATTRIBUTE_UNUSED_LABEL return peephole2_1 (x0, insn, _pmatch_len); L14877: ATTRIBUTE_UNUSED_LABEL return peephole2_3 (x0, insn, _pmatch_len); ret0: return 0; } /* Generated automatically by gengenrtl from rtl.def. */ rtx gen_rtx_fmt_s (RTX_CODE code, enum machine_mode mode, const char *arg0) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XSTR (rt, 0) = arg0; return rt; } rtx gen_rtx_fmt_ee (RTX_CODE code, enum machine_mode mode, rtx arg0, rtx arg1) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XEXP (rt, 0) = arg0; XEXP (rt, 1) = arg1; return rt; } rtx gen_rtx_fmt_ue (RTX_CODE code, enum machine_mode mode, rtx arg0, rtx arg1) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XEXP (rt, 0) = arg0; XEXP (rt, 1) = arg1; return rt; } rtx gen_rtx_fmt_iss (RTX_CODE code, enum machine_mode mode, int arg0, const char *arg1, const char *arg2) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XINT (rt, 0) = arg0; XSTR (rt, 1) = arg1; XSTR (rt, 2) = arg2; return rt; } rtx gen_rtx_fmt_is (RTX_CODE code, enum machine_mode mode, int arg0, const char *arg1) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XINT (rt, 0) = arg0; XSTR (rt, 1) = arg1; return rt; } rtx gen_rtx_fmt_i (RTX_CODE code, enum machine_mode mode, int arg0) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XINT (rt, 0) = arg0; return rt; } rtx gen_rtx_fmt_isE (RTX_CODE code, enum machine_mode mode, int arg0, const char *arg1, rtvec arg2) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XINT (rt, 0) = arg0; XSTR (rt, 1) = arg1; XVEC (rt, 2) = arg2; return rt; } rtx gen_rtx_fmt_iE (RTX_CODE code, enum machine_mode mode, int arg0, rtvec arg1) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XINT (rt, 0) = arg0; XVEC (rt, 1) = arg1; return rt; } rtx gen_rtx_fmt_sEss (RTX_CODE code, enum machine_mode mode, const char *arg0, rtvec arg1, const char *arg2, const char *arg3) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XSTR (rt, 0) = arg0; XVEC (rt, 1) = arg1; XSTR (rt, 2) = arg2; XSTR (rt, 3) = arg3; return rt; } rtx gen_rtx_fmt_eE (RTX_CODE code, enum machine_mode mode, rtx arg0, rtvec arg1) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XEXP (rt, 0) = arg0; XVEC (rt, 1) = arg1; return rt; } rtx gen_rtx_fmt_Ess (RTX_CODE code, enum machine_mode mode, rtvec arg0, const char *arg1, const char *arg2) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XVEC (rt, 0) = arg0; XSTR (rt, 1) = arg1; XSTR (rt, 2) = arg2; return rt; } rtx gen_rtx_fmt_E (RTX_CODE code, enum machine_mode mode, rtvec arg0) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XVEC (rt, 0) = arg0; return rt; } rtx gen_rtx_fmt_e (RTX_CODE code, enum machine_mode mode, rtx arg0) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XEXP (rt, 0) = arg0; return rt; } rtx gen_rtx_fmt_ss (RTX_CODE code, enum machine_mode mode, const char *arg0, const char *arg1) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XSTR (rt, 0) = arg0; XSTR (rt, 1) = arg1; return rt; } rtx gen_rtx_fmt_sies (RTX_CODE code, enum machine_mode mode, const char *arg0, int arg1, rtx arg2, const char *arg3) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XSTR (rt, 0) = arg0; XINT (rt, 1) = arg1; XEXP (rt, 2) = arg2; XSTR (rt, 3) = arg3; return rt; } rtx gen_rtx_fmt_sse (RTX_CODE code, enum machine_mode mode, const char *arg0, const char *arg1, rtx arg2) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XSTR (rt, 0) = arg0; XSTR (rt, 1) = arg1; XEXP (rt, 2) = arg2; return rt; } rtx gen_rtx_fmt_sE (RTX_CODE code, enum machine_mode mode, const char *arg0, rtvec arg1) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XSTR (rt, 0) = arg0; XVEC (rt, 1) = arg1; return rt; } rtx gen_rtx_fmt_ii (RTX_CODE code, enum machine_mode mode, int arg0, int arg1) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XINT (rt, 0) = arg0; XINT (rt, 1) = arg1; return rt; } rtx gen_rtx_fmt_iuuBieiee (RTX_CODE code, enum machine_mode mode, int arg0, rtx arg1, rtx arg2, struct basic_block_def *arg3, int arg4, rtx arg5, int arg6, rtx arg7, rtx arg8) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XINT (rt, 0) = arg0; XEXP (rt, 1) = arg1; XEXP (rt, 2) = arg2; XBBDEF (rt, 3) = arg3; XINT (rt, 4) = arg4; XEXP (rt, 5) = arg5; XINT (rt, 6) = arg6; XEXP (rt, 7) = arg7; XEXP (rt, 8) = arg8; return rt; } rtx gen_rtx_fmt_iuuBieiee0 (RTX_CODE code, enum machine_mode mode, int arg0, rtx arg1, rtx arg2, struct basic_block_def *arg3, int arg4, rtx arg5, int arg6, rtx arg7, rtx arg8) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XINT (rt, 0) = arg0; XEXP (rt, 1) = arg1; XEXP (rt, 2) = arg2; XBBDEF (rt, 3) = arg3; XINT (rt, 4) = arg4; XEXP (rt, 5) = arg5; XINT (rt, 6) = arg6; XEXP (rt, 7) = arg7; XEXP (rt, 8) = arg8; X0EXP (rt, 9) = NULL_RTX; return rt; } rtx gen_rtx_fmt_iuuBieieee (RTX_CODE code, enum machine_mode mode, int arg0, rtx arg1, rtx arg2, struct basic_block_def *arg3, int arg4, rtx arg5, int arg6, rtx arg7, rtx arg8, rtx arg9) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XINT (rt, 0) = arg0; XEXP (rt, 1) = arg1; XEXP (rt, 2) = arg2; XBBDEF (rt, 3) = arg3; XINT (rt, 4) = arg4; XEXP (rt, 5) = arg5; XINT (rt, 6) = arg6; XEXP (rt, 7) = arg7; XEXP (rt, 8) = arg8; XEXP (rt, 9) = arg9; return rt; } rtx gen_rtx_fmt_iuu000000 (RTX_CODE code, enum machine_mode mode, int arg0, rtx arg1, rtx arg2) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XINT (rt, 0) = arg0; XEXP (rt, 1) = arg1; XEXP (rt, 2) = arg2; X0EXP (rt, 3) = NULL_RTX; X0EXP (rt, 4) = NULL_RTX; X0EXP (rt, 5) = NULL_RTX; X0EXP (rt, 6) = NULL_RTX; X0EXP (rt, 7) = NULL_RTX; X0EXP (rt, 8) = NULL_RTX; return rt; } rtx gen_rtx_fmt_iuuB00is (RTX_CODE code, enum machine_mode mode, int arg0, rtx arg1, rtx arg2, struct basic_block_def *arg3, int arg4, const char *arg5) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XINT (rt, 0) = arg0; XEXP (rt, 1) = arg1; XEXP (rt, 2) = arg2; XBBDEF (rt, 3) = arg3; X0EXP (rt, 4) = NULL_RTX; X0EXP (rt, 5) = NULL_RTX; XINT (rt, 6) = arg4; XSTR (rt, 7) = arg5; return rt; } rtx gen_rtx_fmt_ssiEEsi (RTX_CODE code, enum machine_mode mode, const char *arg0, const char *arg1, int arg2, rtvec arg3, rtvec arg4, const char *arg5, int arg6) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XSTR (rt, 0) = arg0; XSTR (rt, 1) = arg1; XINT (rt, 2) = arg2; XVEC (rt, 3) = arg3; XVEC (rt, 4) = arg4; XSTR (rt, 5) = arg5; XINT (rt, 6) = arg6; return rt; } rtx gen_rtx_fmt_Ei (RTX_CODE code, enum machine_mode mode, rtvec arg0, int arg1) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XVEC (rt, 0) = arg0; XINT (rt, 1) = arg1; return rt; } rtx gen_rtx_fmt_eEee0 (RTX_CODE code, enum machine_mode mode, rtx arg0, rtvec arg1, rtx arg2, rtx arg3) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XEXP (rt, 0) = arg0; XVEC (rt, 1) = arg1; XEXP (rt, 2) = arg2; XEXP (rt, 3) = arg3; X0EXP (rt, 4) = NULL_RTX; return rt; } rtx gen_rtx_fmt_eee (RTX_CODE code, enum machine_mode mode, rtx arg0, rtx arg1, rtx arg2) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XEXP (rt, 0) = arg0; XEXP (rt, 1) = arg1; XEXP (rt, 2) = arg2; return rt; } rtx gen_rtx_fmt_ (RTX_CODE code, enum machine_mode mode) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); return rt; } rtx gen_rtx_fmt_w (RTX_CODE code, enum machine_mode mode, HOST_WIDE_INT arg0) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XWINT (rt, 0) = arg0; return rt; } rtx gen_rtx_fmt_0 (RTX_CODE code, enum machine_mode mode) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); X0EXP (rt, 0) = NULL_RTX; return rt; } rtx gen_rtx_fmt_i00 (RTX_CODE code, enum machine_mode mode, int arg0) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XINT (rt, 0) = arg0; X0EXP (rt, 1) = NULL_RTX; X0EXP (rt, 2) = NULL_RTX; return rt; } rtx gen_rtx_fmt_ei (RTX_CODE code, enum machine_mode mode, rtx arg0, int arg1) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XEXP (rt, 0) = arg0; XINT (rt, 1) = arg1; return rt; } rtx gen_rtx_fmt_e0 (RTX_CODE code, enum machine_mode mode, rtx arg0) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XEXP (rt, 0) = arg0; X0EXP (rt, 1) = NULL_RTX; return rt; } rtx gen_rtx_fmt_u00 (RTX_CODE code, enum machine_mode mode, rtx arg0) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XEXP (rt, 0) = arg0; X0EXP (rt, 1) = NULL_RTX; X0EXP (rt, 2) = NULL_RTX; return rt; } rtx gen_rtx_fmt_s00 (RTX_CODE code, enum machine_mode mode, const char *arg0) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XSTR (rt, 0) = arg0; X0EXP (rt, 1) = NULL_RTX; X0EXP (rt, 2) = NULL_RTX; return rt; } rtx gen_rtx_fmt_eeeee (RTX_CODE code, enum machine_mode mode, rtx arg0, rtx arg1, rtx arg2, rtx arg3, rtx arg4) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XEXP (rt, 0) = arg0; XEXP (rt, 1) = arg1; XEXP (rt, 2) = arg2; XEXP (rt, 3) = arg3; XEXP (rt, 4) = arg4; return rt; } rtx gen_rtx_fmt_Ee (RTX_CODE code, enum machine_mode mode, rtvec arg0, rtx arg1) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XVEC (rt, 0) = arg0; XEXP (rt, 1) = arg1; return rt; } rtx gen_rtx_fmt_uuEiiiiiibbii (RTX_CODE code, enum machine_mode mode, rtx arg0, rtx arg1, rtvec arg2, int arg3, int arg4, int arg5, int arg6, int arg7, int arg8, struct bitmap_head_def *arg9, struct bitmap_head_def *arg10, int arg11, int arg12) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XEXP (rt, 0) = arg0; XEXP (rt, 1) = arg1; XVEC (rt, 2) = arg2; XINT (rt, 3) = arg3; XINT (rt, 4) = arg4; XINT (rt, 5) = arg5; XINT (rt, 6) = arg6; XINT (rt, 7) = arg7; XINT (rt, 8) = arg8; XBITMAP (rt, 9) = arg9; XBITMAP (rt, 10) = arg10; XINT (rt, 11) = arg11; XINT (rt, 12) = arg12; return rt; } rtx gen_rtx_fmt_iiiiiiiitt (RTX_CODE code, enum machine_mode mode, int arg0, int arg1, int arg2, int arg3, int arg4, int arg5, int arg6, int arg7, union tree_node *arg8, union tree_node *arg9) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XINT (rt, 0) = arg0; XINT (rt, 1) = arg1; XINT (rt, 2) = arg2; XINT (rt, 3) = arg3; XINT (rt, 4) = arg4; XINT (rt, 5) = arg5; XINT (rt, 6) = arg6; XINT (rt, 7) = arg7; XTREE (rt, 8) = arg8; XTREE (rt, 9) = arg9; return rt; } rtx gen_rtx_fmt_eti (RTX_CODE code, enum machine_mode mode, rtx arg0, union tree_node *arg1, int arg2) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XEXP (rt, 0) = arg0; XTREE (rt, 1) = arg1; XINT (rt, 2) = arg2; return rt; } rtx gen_rtx_fmt_bi (RTX_CODE code, enum machine_mode mode, struct bitmap_head_def *arg0, int arg1) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XBITMAP (rt, 0) = arg0; XINT (rt, 1) = arg1; return rt; } rtx gen_rtx_fmt_te (RTX_CODE code, enum machine_mode mode, union tree_node *arg0, rtx arg1) { rtx rt; rt = ggc_alloc_rtx (code); memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); PUT_MODE (rt, mode); XTREE (rt, 0) = arg0; XEXP (rt, 1) = arg1; return rt; } /* Type information for GCC. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ /* Common subexpression elimination for GNU compiler. Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_CSELIB_H #define GCC_CSELIB_H /* Describe a value. */ typedef struct cselib_val_struct GTY(()) { /* The hash value. */ unsigned int value; union cselib_val_u { /* A VALUE rtx that points back to this structure. */ rtx GTY ((tag ("1"))) val_rtx; /* Used to keep a list of free cselib_val structures. */ struct cselib_val_struct * GTY ((skip)) next_free; } GTY ((desc ("1"))) u; /* All rtl expressions that hold this value at the current time during a scan. */ struct elt_loc_list *locs; /* If this value is used as an address, points to a list of values that use it as an address in a MEM. */ struct elt_list *addr_list; struct cselib_val_struct *next_containing_mem; } cselib_val; /* A list of rtl expressions that hold the same value. */ struct elt_loc_list GTY(()) { /* Next element in the list. */ struct elt_loc_list *next; /* An rtl expression that holds the value. */ rtx loc; /* The insn that made the equivalence. */ rtx setting_insn; /* True when setting insn is inside libcall. */ bool in_libcall; }; /* A list of cselib_val structures. */ struct elt_list GTY(()) { struct elt_list *next; cselib_val *elt; }; extern cselib_val *cselib_lookup (rtx, enum machine_mode, int); extern void cselib_init (bool record_memory); extern void cselib_finish (void); extern void cselib_process_insn (rtx); extern enum machine_mode cselib_reg_set_mode (rtx); extern int rtx_equal_for_cselib_p (rtx, rtx); extern int references_value_p (rtx, int); extern rtx cselib_subst_to_values (rtx); #endif /* GCC_CSELIB_H */ /* Definitions for code generation pass of GNU compiler. Copyright (C) 2001 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_LIBFUNCS_H #define GCC_LIBFUNCS_H /* Enumeration of indexes into libfunc_table. */ enum libfunc_index { LTI_abort, LTI_memcpy, LTI_memmove, LTI_memcmp, LTI_memset, LTI_setbits, LTI_unwind_resume, LTI_eh_personality, LTI_setjmp, LTI_longjmp, LTI_unwind_sjlj_register, LTI_unwind_sjlj_unregister, LTI_profile_function_entry, LTI_profile_function_exit, LTI_gcov_flush, LTI_MAX }; /* SYMBOL_REF rtx's for the library functions that are called implicitly and not via optabs. */ extern GTY(()) rtx libfunc_table[LTI_MAX]; /* Accessor macros for libfunc_table. */ #define abort_libfunc (libfunc_table[LTI_abort]) #define memcpy_libfunc (libfunc_table[LTI_memcpy]) #define memmove_libfunc (libfunc_table[LTI_memmove]) #define memcmp_libfunc (libfunc_table[LTI_memcmp]) #define memset_libfunc (libfunc_table[LTI_memset]) #define setbits_libfunc (libfunc_table[LTI_setbits]) #define unwind_resume_libfunc (libfunc_table[LTI_unwind_resume]) #define eh_personality_libfunc (libfunc_table[LTI_eh_personality]) #define setjmp_libfunc (libfunc_table[LTI_setjmp]) #define longjmp_libfunc (libfunc_table[LTI_longjmp]) #define unwind_sjlj_register_libfunc (libfunc_table[LTI_unwind_sjlj_register]) #define unwind_sjlj_unregister_libfunc \ (libfunc_table[LTI_unwind_sjlj_unregister]) #define profile_function_entry_libfunc (libfunc_table[LTI_profile_function_entry]) #define profile_function_exit_libfunc (libfunc_table[LTI_profile_function_exit]) #define gcov_flush_libfunc (libfunc_table[LTI_gcov_flush]) #endif /* GCC_LIBFUNCS_H */ /* Debug hooks for GCC. Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_DEBUG_H #define GCC_DEBUG_H /* This structure contains hooks for the debug information output functions, accessed through the global instance debug_hooks set in toplev.c according to command line options. */ struct gcc_debug_hooks { /* Initialize debug output. MAIN_FILENAME is the name of the main input file. */ void (* init) (const char *main_filename); /* Output debug symbols. */ void (* finish) (const char *main_filename); /* Macro defined on line LINE with name and expansion TEXT. */ void (* define) (unsigned int line, const char *text); /* MACRO undefined on line LINE. */ void (* undef) (unsigned int line, const char *macro); /* Record the beginning of a new source file FILE from LINE number in the previous one. */ void (* start_source_file) (unsigned int line, const char *file); /* Record the resumption of a source file. LINE is the line number in the source file we are returning to. */ void (* end_source_file) (unsigned int line); /* Record the beginning of block N, counting from 1 and not including the function-scope block, at LINE. */ void (* begin_block) (unsigned int line, unsigned int n); /* Record the end of a block. Arguments as for begin_block. */ void (* end_block) (unsigned int line, unsigned int n); /* Returns nonzero if it is appropriate not to emit any debugging information for BLOCK, because it doesn't contain any instructions. This may not be the case for blocks containing nested functions, since we may actually call such a function even though the BLOCK information is messed up. Defaults to true. */ bool (* ignore_block) (tree); /* Record a source file location at (FILE, LINE). */ void (* source_line) (unsigned int line, const char *file); /* Called at start of prologue code. LINE is the first line in the function. This has been given the same prototype as source_line, so that the source_line hook can be substituted if appropriate. */ void (* begin_prologue) (unsigned int line, const char *file); /* Called at end of prologue code. LINE is the first line in the function. */ void (* end_prologue) (unsigned int line, const char *file); /* Record end of epilogue code. */ void (* end_epilogue) (unsigned int line, const char *file); /* Called at start of function DECL, before it is declared. */ void (* begin_function) (tree decl); /* Record end of function. LINE is highest line number in function. */ void (* end_function) (unsigned int line); /* Debug information for a function DECL. This might include the function name (a symbol), its parameters, and the block that makes up the function's body, and the local variables of the function. */ void (* function_decl) (tree decl); /* Debug information for a global DECL. Called from toplev.c after compilation proper has finished. */ void (* global_decl) (tree decl); /* Debug information for a type DECL. Called from toplev.c after compilation proper, also from various language front ends to record built-in types. The second argument is properly a boolean, which indicates whether or not the type is a "local" type as determined by the language. (It's not a boolean for legacy reasons.) */ void (* type_decl) (tree decl, int local); /* Debug information for imported modules and declarations. */ void (* imported_module_or_decl) (tree decl, tree context); /* DECL is an inline function, whose body is present, but which is not being output at this point. */ void (* deferred_inline_function) (tree decl); /* DECL is an inline function which is about to be emitted out of line. The hook is useful to, e.g., emit abstract debug info for the inline before it gets mangled by optimization. */ void (* outlining_inline_function) (tree decl); /* Called from final_scan_insn for any CODE_LABEL insn whose LABEL_NAME is non-null. */ void (* label) (rtx); /* Called after the start and before the end of writing a PCH file. The parameter is 0 if after the start, 1 if before the end. */ void (* handle_pch) (unsigned int); /* Called from final_scan_insn for any NOTE_INSN_VAR_LOCATION note. */ void (* var_location) (rtx); }; extern const struct gcc_debug_hooks *debug_hooks; /* The do-nothing hooks. */ extern void debug_nothing_void (void); extern void debug_nothing_charstar (const char *); extern void debug_nothing_int_charstar (unsigned int, const char *); extern void debug_nothing_int (unsigned int); extern void debug_nothing_int_int (unsigned int, unsigned int); extern void debug_nothing_tree (tree); extern void debug_nothing_tree_int (tree, int); extern void debug_nothing_tree_tree (tree, tree); extern bool debug_true_tree (tree); extern void debug_nothing_rtx (rtx); /* Hooks for various debug formats. */ extern const struct gcc_debug_hooks do_nothing_debug_hooks; extern const struct gcc_debug_hooks dbx_debug_hooks; extern const struct gcc_debug_hooks sdb_debug_hooks; extern const struct gcc_debug_hooks xcoff_debug_hooks; extern const struct gcc_debug_hooks dwarf2_debug_hooks; extern const struct gcc_debug_hooks vmsdbg_debug_hooks; /* Dwarf2 frame information. */ extern void dwarf2out_begin_prologue (unsigned int, const char *); extern void dwarf2out_end_epilogue (unsigned int, const char *); extern void dwarf2out_frame_init (void); extern void dwarf2out_frame_finish (void); /* Decide whether we want to emit frame unwind information for the current translation unit. */ extern int dwarf2out_do_frame (void); extern void debug_flush_symbol_queue (void); extern void debug_queue_symbol (tree); extern void debug_free_queue (void); extern int debug_nesting; extern int symbol_queue_index; #endif /* !GCC_DEBUG_H */ void gt_ggc_mx_edge_prediction (void *x_p) { struct edge_prediction * x = (struct edge_prediction *)x_p; struct edge_prediction * xlimit = x; while (ggc_test_and_set_mark (xlimit)) xlimit = ((*xlimit).next); while (x != xlimit) { gt_ggc_m_15edge_prediction ((*x).next); gt_ggc_m_8edge_def ((*x).edge); x = ((*x).next); } } void gt_ggc_mx_v_must_def_optype_d (void *x_p) { struct v_must_def_optype_d * const x = (struct v_must_def_optype_d *)x_p; if (ggc_test_and_set_mark (x)) { { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).num_v_must_defs); i0++) { gt_ggc_m_9tree_node ((*x).v_must_defs[i0]); } } } } void gt_ggc_mx_vuse_optype_d (void *x_p) { struct vuse_optype_d * const x = (struct vuse_optype_d *)x_p; if (ggc_test_and_set_mark (x)) { { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).num_vuses); i0++) { gt_ggc_m_9tree_node ((*x).vuses[i0]); } } } } void gt_ggc_mx_v_may_def_optype_d (void *x_p) { struct v_may_def_optype_d * const x = (struct v_may_def_optype_d *)x_p; if (ggc_test_and_set_mark (x)) { { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).num_v_may_defs * 2); i0++) { gt_ggc_m_9tree_node ((*x).v_may_defs[i0]); } } } } void gt_ggc_mx_use_optype_d (void *x_p) { struct use_optype_d * const x = (struct use_optype_d *)x_p; if (ggc_test_and_set_mark (x)) { { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).num_uses); i0++) { } } } } void gt_ggc_mx_def_optype_d (void *x_p) { struct def_optype_d * const x = (struct def_optype_d *)x_p; if (ggc_test_and_set_mark (x)) { { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).num_defs); i0++) { } } } } void gt_ggc_mx_dataflow_d (void *x_p) { struct dataflow_d * const x = (struct dataflow_d *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_15varray_head_tag ((*x).immediate_uses); { size_t i0; for (i0 = 0; i0 < (size_t)(2); i0++) { gt_ggc_m_9tree_node ((*x).uses[i0]); } } } } void gt_ggc_mx_cgraph_varpool_node (void *x_p) { struct cgraph_varpool_node * const x = (struct cgraph_varpool_node *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_9tree_node ((*x).decl); gt_ggc_m_19cgraph_varpool_node ((*x).next_needed); } } void gt_ggc_mx_cgraph_edge (void *x_p) { struct cgraph_edge * x = (struct cgraph_edge *)x_p; struct cgraph_edge * xlimit = x; while (ggc_test_and_set_mark (xlimit)) xlimit = ((*xlimit).next_caller); while (x != xlimit) { gt_ggc_m_11cgraph_node ((*x).caller); gt_ggc_m_11cgraph_node ((*x).callee); gt_ggc_m_11cgraph_edge ((*x).next_caller); gt_ggc_m_11cgraph_edge ((*x).next_callee); gt_ggc_m_9tree_node ((*x).call_expr); x = ((*x).next_caller); } } void gt_ggc_mx_cgraph_node (void *x_p) { struct cgraph_node * x = (struct cgraph_node *)x_p; struct cgraph_node * xlimit = x; while (ggc_test_and_set_mark (xlimit)) xlimit = ((*xlimit).next); if (x != xlimit) for (;;) { struct cgraph_node * const xprev = ((*x).previous); if (xprev == NULL) break; x = xprev; (void) ggc_test_and_set_mark (xprev); } while (x != xlimit) { gt_ggc_m_9tree_node ((*x).decl); gt_ggc_m_11cgraph_edge ((*x).callees); gt_ggc_m_11cgraph_edge ((*x).callers); gt_ggc_m_11cgraph_node ((*x).next); gt_ggc_m_11cgraph_node ((*x).previous); gt_ggc_m_11cgraph_node ((*x).origin); gt_ggc_m_11cgraph_node ((*x).nested); gt_ggc_m_11cgraph_node ((*x).next_nested); gt_ggc_m_11cgraph_node ((*x).next_needed); gt_ggc_m_11cgraph_node ((*x).next_clone); gt_ggc_m_11cgraph_node ((*x).global.inlined_to); x = ((*x).next); } } void gt_ggc_mx_bb_ann_d (void *x_p) { struct bb_ann_d * const x = (struct bb_ann_d *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_9tree_node ((*x).phi_nodes); gt_ggc_m_15edge_prediction ((*x).predictions); } } void gt_ggc_mx_elt_loc_list (void *x_p) { struct elt_loc_list * const x = (struct elt_loc_list *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_12elt_loc_list ((*x).next); gt_ggc_m_7rtx_def ((*x).loc); gt_ggc_m_7rtx_def ((*x).setting_insn); } } void gt_ggc_mx_cselib_val_struct (void *x_p) { struct cselib_val_struct * const x = (struct cselib_val_struct *)x_p; if (ggc_test_and_set_mark (x)) { switch (1) { case 1: gt_ggc_m_7rtx_def ((*x).u.val_rtx); break; default: break; } gt_ggc_m_12elt_loc_list ((*x).locs); gt_ggc_m_8elt_list ((*x).addr_list); gt_ggc_m_17cselib_val_struct ((*x).next_containing_mem); } } void gt_ggc_mx_elt_list (void *x_p) { struct elt_list * const x = (struct elt_list *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_8elt_list ((*x).next); gt_ggc_m_17cselib_val_struct ((*x).elt); } } void gt_ggc_mx_tree_statement_list_node (void *x_p) { struct tree_statement_list_node * x = (struct tree_statement_list_node *)x_p; struct tree_statement_list_node * xlimit = x; while (ggc_test_and_set_mark (xlimit)) xlimit = ((*xlimit).next); if (x != xlimit) for (;;) { struct tree_statement_list_node * const xprev = ((*x).prev); if (xprev == NULL) break; x = xprev; (void) ggc_test_and_set_mark (xprev); } while (x != xlimit) { gt_ggc_m_24tree_statement_list_node ((*x).prev); gt_ggc_m_24tree_statement_list_node ((*x).next); gt_ggc_m_9tree_node ((*x).stmt); x = ((*x).next); } } void gt_ggc_mx_alias_var_def (void *x_p) { union alias_var_def * const x = (union alias_var_def *)x_p; if (ggc_test_and_set_mark (x)) { switch ((*x).common.kind) { case -1: gt_ggc_m_9tree_node ((*x).common.decl); break; case ATERM_AVAR: gt_ggc_m_9tree_node ((*x).aterm.common.decl); break; default: break; } } } void gt_ggc_mx_edge_def (void *x_p) { struct edge_def * x = (struct edge_def *)x_p; struct edge_def * xlimit = x; while (ggc_test_and_set_mark (xlimit)) xlimit = ((*xlimit).pred_next); while (x != xlimit) { gt_ggc_m_8edge_def ((*x).pred_next); gt_ggc_m_8edge_def ((*x).succ_next); gt_ggc_m_15basic_block_def ((*x).src); gt_ggc_m_15basic_block_def ((*x).dest); switch (ir_type ()) { case 0: gt_ggc_m_7rtx_def ((*x).insns.r); break; case 1: gt_ggc_m_9tree_node ((*x).insns.t); break; default: break; } gt_ggc_m_10location_s ((*x).goto_locus); x = ((*x).pred_next); } } void gt_ggc_mx_ptr_info_def (void *x_p) { struct ptr_info_def * const x = (struct ptr_info_def *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_15bitmap_head_def ((*x).pt_vars); gt_ggc_m_9tree_node ((*x).name_mem_tag); } } void gt_ggc_mx_real_value (void *x_p) { struct real_value * const x = (struct real_value *)x_p; if (ggc_test_and_set_mark (x)) { } } void gt_ggc_mx_tree_ann_d (void *x_p) { union tree_ann_d * const x = (union tree_ann_d *)x_p; if (ggc_test_and_set_mark (x)) { switch (ann_type ((tree_ann_t)&((*x)))) { case TREE_ANN_COMMON: break; case VAR_ANN: gt_ggc_m_9tree_node ((*x).decl.type_mem_tag); gt_ggc_m_15varray_head_tag ((*x).decl.may_aliases); gt_ggc_m_9tree_node ((*x).decl.default_def); gt_ggc_m_9tree_node ((*x).decl.current_def); break; case STMT_ANN: gt_ggc_m_12def_optype_d ((*x).stmt.def_ops); gt_ggc_m_12use_optype_d ((*x).stmt.use_ops); gt_ggc_m_18v_may_def_optype_d ((*x).stmt.v_may_def_ops); gt_ggc_m_13vuse_optype_d ((*x).stmt.vuse_ops); gt_ggc_m_19v_must_def_optype_d ((*x).stmt.v_must_def_ops); gt_ggc_m_10dataflow_d ((*x).stmt.df); gt_ggc_m_15bitmap_head_def ((*x).stmt.addresses_taken); break; default: break; } } } void gt_ggc_mx_convert_optab (void *x_p) { struct convert_optab * const x = (struct convert_optab *)x_p; if (ggc_test_and_set_mark (x)) { { size_t i0; for (i0 = 0; i0 < (size_t)(NUM_MACHINE_MODES); i0++) { { size_t i1; for (i1 = 0; i1 < (size_t)(NUM_MACHINE_MODES); i1++) { gt_ggc_m_7rtx_def ((*x).handlers[i0][i1].libfunc); } } } } } } void gt_ggc_mx_optab (void *x_p) { struct optab * const x = (struct optab *)x_p; if (ggc_test_and_set_mark (x)) { { size_t i0; for (i0 = 0; i0 < (size_t)(NUM_MACHINE_MODES); i0++) { gt_ggc_m_7rtx_def ((*x).handlers[i0].libfunc); } } } } void gt_ggc_mx_basic_block_def (void *x_p) { struct basic_block_def * x = (struct basic_block_def *)x_p; struct basic_block_def * xlimit = x; while (ggc_test_and_set_mark (xlimit)) xlimit = ((*xlimit).next_bb); if (x != xlimit) for (;;) { struct basic_block_def * const xprev = ((*x).prev_bb); if (xprev == NULL) break; x = xprev; (void) ggc_test_and_set_mark (xprev); } while (x != xlimit) { gt_ggc_m_7rtx_def ((*x).head_); gt_ggc_m_7rtx_def ((*x).end_); gt_ggc_m_9tree_node ((*x).stmt_list); gt_ggc_m_8edge_def ((*x).pred); gt_ggc_m_8edge_def ((*x).succ); gt_ggc_m_15basic_block_def ((*x).prev_bb); gt_ggc_m_15basic_block_def ((*x).next_bb); gt_ggc_m_8bb_ann_d ((*x).tree_annotations); x = ((*x).next_bb); } } void gt_ggc_mx_reg_attrs (void *x_p) { struct reg_attrs * const x = (struct reg_attrs *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_9tree_node ((*x).decl); } } void gt_ggc_mx_mem_attrs (void *x_p) { struct mem_attrs * const x = (struct mem_attrs *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_9tree_node ((*x).expr); gt_ggc_m_7rtx_def ((*x).offset); gt_ggc_m_7rtx_def ((*x).size); } } void gt_ggc_mx_varray_head_tag (void *x_p) { struct varray_head_tag * const x = (struct varray_head_tag *)x_p; if (ggc_test_and_set_mark (x)) { switch ((*x).type) { case VARRAY_DATA_C: break; case VARRAY_DATA_UC: break; case VARRAY_DATA_S: break; case VARRAY_DATA_US: break; case VARRAY_DATA_I: break; case VARRAY_DATA_U: break; case VARRAY_DATA_L: break; case VARRAY_DATA_UL: break; case VARRAY_DATA_HINT: break; case VARRAY_DATA_UHINT: break; case VARRAY_DATA_GENERIC: abort(); break; case VARRAY_DATA_CPTR: { size_t i10; for (i10 = 0; i10 < (size_t)((*x).num_elements); i10++) { } } break; case VARRAY_DATA_RTX: { size_t i11; for (i11 = 0; i11 < (size_t)((*x).num_elements); i11++) { gt_ggc_m_7rtx_def ((*x).data.rtx[i11]); } } break; case VARRAY_DATA_RTVEC: { size_t i12; for (i12 = 0; i12 < (size_t)((*x).num_elements); i12++) { gt_ggc_m_9rtvec_def ((*x).data.rtvec[i12]); } } break; case VARRAY_DATA_TREE: { size_t i13; for (i13 = 0; i13 < (size_t)((*x).num_elements); i13++) { gt_ggc_m_9tree_node ((*x).data.tree[i13]); } } break; case VARRAY_DATA_BITMAP: { size_t i14; for (i14 = 0; i14 < (size_t)((*x).num_elements); i14++) { gt_ggc_m_15bitmap_head_def ((*x).data.bitmap[i14]); } } break; case VARRAY_DATA_CONST_EQUIV: { size_t i15; for (i15 = 0; i15 < (size_t)((*x).num_elements); i15++) { gt_ggc_m_7rtx_def ((*x).data.const_equiv[i15].rtx); } } break; case VARRAY_DATA_TE: { size_t i16; for (i16 = 0; i16 < (size_t)((*x).num_elements); i16++) { gt_ggc_m_8elt_list ((*x).data.te[i16]); } } break; case VARRAY_DATA_EDGE: { size_t i17; for (i17 = 0; i17 < (size_t)((*x).num_elements); i17++) { gt_ggc_m_8edge_def ((*x).data.e[i17]); } } break; default: break; } } } void gt_ggc_mx_function (void *x_p) { struct function * const x = (struct function *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_9eh_status ((*x).eh); gt_ggc_m_11stmt_status ((*x).stmt); gt_ggc_m_11expr_status ((*x).expr); gt_ggc_m_11emit_status ((*x).emit); gt_ggc_m_13varasm_status ((*x).varasm); gt_ggc_m_9tree_node ((*x).saved_tree); gt_ggc_m_9tree_node ((*x).saved_args); gt_ggc_m_9tree_node ((*x).decl); gt_ggc_m_8function ((*x).outer); gt_ggc_m_7rtx_def ((*x).arg_offset_rtx); gt_ggc_m_7rtx_def ((*x).return_rtx); gt_ggc_m_7rtx_def ((*x).internal_arg_pointer); gt_ggc_m_20initial_value_struct ((*x).hard_reg_initial_vals); gt_ggc_m_7rtx_def ((*x).x_nonlocal_goto_handler_labels); gt_ggc_m_7rtx_def ((*x).x_return_label); gt_ggc_m_7rtx_def ((*x).x_naked_return_label); gt_ggc_m_7rtx_def ((*x).x_stack_slot_list); gt_ggc_m_7rtx_def ((*x).x_tail_recursion_reentry); gt_ggc_m_7rtx_def ((*x).x_arg_pointer_save_area); gt_ggc_m_9tree_node ((*x).static_chain_decl); gt_ggc_m_9tree_node ((*x).nonlocal_goto_save_area); gt_ggc_m_7rtx_def ((*x).x_parm_birth_insn); gt_ggc_m_P9temp_slot15varray_head_tag ((*x).x_used_temp_slots); gt_ggc_m_9temp_slot ((*x).x_avail_temp_slots); gt_ggc_m_14var_refs_queue ((*x).fixup_var_refs_queue); gt_ggc_m_9rtvec_def ((*x).original_arg_vector); gt_ggc_m_9tree_node ((*x).original_decl_initial); gt_ggc_m_16machine_function ((*x).machine); gt_ggc_m_17language_function ((*x).language); gt_ggc_m_7rtx_def ((*x).epilogue_delay_list); gt_ggc_m_15varray_head_tag ((*x).ib_boundaries_block); gt_ggc_m_9tree_node ((*x).unexpanded_var_list); } } void gt_ggc_mx_expr_status (void *x_p) { struct expr_status * const x = (struct expr_status *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_7rtx_def ((*x).x_saveregs_value); gt_ggc_m_7rtx_def ((*x).x_apply_args_value); gt_ggc_m_7rtx_def ((*x).x_forced_labels); gt_ggc_m_7rtx_def ((*x).x_pending_chain); } } void gt_ggc_mx_emit_status (void *x_p) { struct emit_status * const x = (struct emit_status *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_7rtx_def ((*x).x_first_insn); gt_ggc_m_7rtx_def ((*x).x_last_insn); gt_ggc_m_14sequence_stack ((*x).sequence_stack); if ((*x).regno_pointer_align != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).x_reg_rtx_no); i0++) { } ggc_mark ((*x).regno_pointer_align); } if ((*x).x_regno_reg_rtx != NULL) { size_t i1; for (i1 = 0; i1 < (size_t)(((*x)).x_reg_rtx_no); i1++) { gt_ggc_m_7rtx_def ((*x).x_regno_reg_rtx[i1]); } ggc_mark ((*x).x_regno_reg_rtx); } } } void gt_ggc_mx_sequence_stack (void *x_p) { struct sequence_stack * const x = (struct sequence_stack *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_7rtx_def ((*x).first); gt_ggc_m_7rtx_def ((*x).last); gt_ggc_m_14sequence_stack ((*x).next); } } void gt_ggc_mx_var_refs_queue (void *x_p) { struct var_refs_queue * const x = (struct var_refs_queue *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_7rtx_def ((*x).modified); gt_ggc_m_14var_refs_queue ((*x).next); } } void gt_ggc_mx_bitmap_head_def (void *x_p) { struct bitmap_head_def * const x = (struct bitmap_head_def *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_18bitmap_element_def ((*x).first); gt_ggc_m_18bitmap_element_def ((*x).current); } } void gt_ggc_mx_bitmap_element_def (void *x_p) { struct bitmap_element_def * const x = (struct bitmap_element_def *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_18bitmap_element_def ((*x).next); gt_ggc_m_18bitmap_element_def ((*x).prev); } } void gt_ggc_mx_machine_function (void *x_p) { struct machine_function * const x = (struct machine_function *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_17stack_local_entry ((*x).stack_locals); } } void gt_ggc_mx_answer (void *x_p) { struct answer * const x = (struct answer *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_6answer ((*x).next); { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).count); i0++) { switch (cpp_token_val_index (&((*x).first[i0]))) { case CPP_TOKEN_FLD_NODE: { union tree_node * const x1 = ((*x).first[i0].val.node) ? HT_IDENT_TO_GCC_IDENT (HT_NODE (((*x).first[i0].val.node))) : NULL; gt_ggc_m_9tree_node (x1); } break; case CPP_TOKEN_FLD_SOURCE: gt_ggc_m_9cpp_token ((*x).first[i0].val.source); break; case CPP_TOKEN_FLD_STR: break; case CPP_TOKEN_FLD_ARG_NO: break; default: break; } } } } } void gt_ggc_mx_cpp_macro (void *x_p) { struct cpp_macro * const x = (struct cpp_macro *)x_p; if (ggc_test_and_set_mark (x)) { if ((*x).params != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).paramc); i0++) { { union tree_node * const x1 = ((*x).params[i0]) ? HT_IDENT_TO_GCC_IDENT (HT_NODE (((*x).params[i0]))) : NULL; gt_ggc_m_9tree_node (x1); } } ggc_mark ((*x).params); } switch (((*x)).traditional) { case 0: if ((*x).exp.tokens != NULL) { size_t i2; for (i2 = 0; i2 < (size_t)((*x).count); i2++) { switch (cpp_token_val_index (&((*x).exp.tokens[i2]))) { case CPP_TOKEN_FLD_NODE: { union tree_node * const x3 = ((*x).exp.tokens[i2].val.node) ? HT_IDENT_TO_GCC_IDENT (HT_NODE (((*x).exp.tokens[i2].val.node))) : NULL; gt_ggc_m_9tree_node (x3); } break; case CPP_TOKEN_FLD_SOURCE: gt_ggc_m_9cpp_token ((*x).exp.tokens[i2].val.source); break; case CPP_TOKEN_FLD_STR: break; case CPP_TOKEN_FLD_ARG_NO: break; default: break; } } ggc_mark ((*x).exp.tokens); } break; case 1: break; default: break; } } } void gt_ggc_mx_cpp_token (void *x_p) { struct cpp_token * const x = (struct cpp_token *)x_p; if (ggc_test_and_set_mark (x)) { switch (cpp_token_val_index (&((*x)))) { case CPP_TOKEN_FLD_NODE: { union tree_node * const x0 = ((*x).val.node) ? HT_IDENT_TO_GCC_IDENT (HT_NODE (((*x).val.node))) : NULL; gt_ggc_m_9tree_node (x0); } break; case CPP_TOKEN_FLD_SOURCE: gt_ggc_m_9cpp_token ((*x).val.source); break; case CPP_TOKEN_FLD_STR: break; case CPP_TOKEN_FLD_ARG_NO: break; default: break; } } } void gt_ggc_mx_rtvec_def (void *x_p) { struct rtvec_def * const x = (struct rtvec_def *)x_p; if (ggc_test_and_set_mark (x)) { { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).num_elem); i0++) { gt_ggc_m_7rtx_def ((*x).elem[i0]); } } } } void gt_ggc_mx_rtx_def (void *x_p) { struct rtx_def * x = (struct rtx_def *)x_p; struct rtx_def * xlimit = x; while (ggc_test_and_set_mark (xlimit)) xlimit = (RTX_NEXT (&(*xlimit))); if (x != xlimit) for (;;) { struct rtx_def * const xprev = (RTX_PREV (&(*x))); if (xprev == NULL) break; x = xprev; (void) ggc_test_and_set_mark (xprev); } while (x != xlimit) { switch (GET_CODE (&(*x))) { case VAR_LOCATION: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_9tree_node ((*x).u.fld[0].rttree); break; case US_TRUNCATE: gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case SS_TRUNCATE: gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case US_MINUS: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case SS_MINUS: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case US_PLUS: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case SS_PLUS: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case VEC_DUPLICATE: gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case VEC_CONCAT: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case VEC_SELECT: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case VEC_MERGE: gt_ggc_m_7rtx_def ((*x).u.fld[2].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case RANGE_LIVE: gt_ggc_m_18bitmap_element_def ((*x).u.fld[0].rtbit); break; case RANGE_VAR: gt_ggc_m_9tree_node ((*x).u.fld[1].rttree); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case RANGE_REG: gt_ggc_m_9tree_node ((*x).u.fld[9].rttree); gt_ggc_m_9tree_node ((*x).u.fld[8].rttree); break; case RANGE_INFO: gt_ggc_m_18bitmap_element_def ((*x).u.fld[10].rtbit); gt_ggc_m_18bitmap_element_def ((*x).u.fld[9].rtbit); gt_ggc_m_9rtvec_def ((*x).u.fld[2].rtvec); gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case LO_SUM: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case HIGH: gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case ZERO_EXTRACT: gt_ggc_m_7rtx_def ((*x).u.fld[2].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case SIGN_EXTRACT: gt_ggc_m_7rtx_def ((*x).u.fld[2].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case PARITY: gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case POPCOUNT: gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case CTZ: gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case CLZ: gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case FFS: gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case SQRT: gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case ABS: gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case UNSIGNED_FIX: gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case UNSIGNED_FLOAT: gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case FIX: gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case FLOAT: gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case FLOAT_TRUNCATE: gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case FLOAT_EXTEND: gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case TRUNCATE: gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case ZERO_EXTEND: gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case SIGN_EXTEND: gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case LTGT: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case UNLT: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case UNLE: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case UNGT: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case UNGE: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case UNEQ: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case ORDERED: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case UNORDERED: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case LTU: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case LEU: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case GTU: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case GEU: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case LT: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case LE: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case GT: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case GE: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case EQ: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case NE: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case POST_MODIFY: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case PRE_MODIFY: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case POST_INC: gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case POST_DEC: gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case PRE_INC: gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case PRE_DEC: gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case UMAX: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case UMIN: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case SMAX: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case SMIN: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case ROTATERT: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case LSHIFTRT: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case ASHIFTRT: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case ROTATE: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case ASHIFT: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case NOT: gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case XOR: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case IOR: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case AND: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case UMOD: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case UDIV: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case MOD: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case DIV: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case MULT: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case NEG: gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case MINUS: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case PLUS: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case COMPARE: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case COND: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_9rtvec_def ((*x).u.fld[0].rtvec); break; case IF_THEN_ELSE: gt_ggc_m_7rtx_def ((*x).u.fld[2].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case QUEUED: gt_ggc_m_7rtx_def ((*x).u.fld[4].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[3].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[2].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case CC0: break; case SYMBOL_REF: gt_ggc_m_9tree_node ((*x).u.fld[2].rttree); break; case LABEL_REF: gt_ggc_m_7rtx_def ((*x).u.fld[2].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case MEM: gt_ggc_m_9mem_attrs ((*x).u.fld[1].rtmem); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case CONCAT: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case STRICT_LOW_PART: gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case SUBREG: gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case SCRATCH: break; case REG: gt_ggc_m_9reg_attrs ((*x).u.fld[2].rtreg); break; case VALUE: break; case PC: break; case CONST: gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case CONST_STRING: break; case CONST_VECTOR: gt_ggc_m_9rtvec_def ((*x).u.fld[0].rtvec); break; case CONST_DOUBLE: break; case CONST_INT: break; case RESX: break; case TRAP_IF: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case RETURN: break; case CALL: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case CLOBBER: gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case USE: gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case SET: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case PREFETCH: gt_ggc_m_7rtx_def ((*x).u.fld[2].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case ADDR_DIFF_VEC: gt_ggc_m_7rtx_def ((*x).u.fld[3].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[2].rtx); gt_ggc_m_9rtvec_def ((*x).u.fld[1].rtvec); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case ADDR_VEC: gt_ggc_m_9rtvec_def ((*x).u.fld[0].rtvec); break; case UNSPEC_VOLATILE: gt_ggc_m_9rtvec_def ((*x).u.fld[0].rtvec); break; case UNSPEC: gt_ggc_m_9rtvec_def ((*x).u.fld[0].rtvec); break; case ASM_OPERANDS: gt_ggc_m_9rtvec_def ((*x).u.fld[4].rtvec); gt_ggc_m_9rtvec_def ((*x).u.fld[3].rtvec); break; case ASM_INPUT: break; case PARALLEL: gt_ggc_m_9rtvec_def ((*x).u.fld[0].rtvec); break; case COND_EXEC: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case NOTE: switch (NOTE_LINE_NUMBER (&(*x))) { default: break; case -79: gt_ggc_m_7rtx_def ((*x).u.fld[4].rtx); break; case -80: break; case -81: break; case -82: gt_ggc_m_7rtx_def ((*x).u.fld[4].rtx); break; case -83: break; case -84: break; case -85: break; case -86: break; case -87: break; case -88: break; case -89: break; case -90: break; case -91: break; case -92: break; case -93: break; case -94: break; case -95: break; case -96: break; case -97: gt_ggc_m_9tree_node ((*x).u.fld[4].rttree); break; case -98: gt_ggc_m_9tree_node ((*x).u.fld[4].rttree); break; case -99: break; case -100: break; } gt_ggc_m_7rtx_def ((*x).u.fld[2].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); break; case CODE_LABEL: gt_ggc_m_7rtx_def ((*x).u.fld[5].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[2].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); break; case BARRIER: gt_ggc_m_7rtx_def ((*x).u.fld[2].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); break; case CALL_INSN: gt_ggc_m_7rtx_def ((*x).u.fld[9].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[8].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[7].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[5].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[2].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); break; case JUMP_INSN: gt_ggc_m_7rtx_def ((*x).u.fld[9].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[8].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[7].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[5].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[2].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); break; case INSN: gt_ggc_m_7rtx_def ((*x).u.fld[8].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[7].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[5].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[2].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); break; case ATTR_FLAG: break; case EQ_ATTR_ALT: break; case EQ_ATTR: break; case SET_ATTR_ALTERNATIVE: gt_ggc_m_9rtvec_def ((*x).u.fld[1].rtvec); break; case SET_ATTR: break; case ATTR: break; case DEFINE_ATTR: gt_ggc_m_7rtx_def ((*x).u.fld[2].rtx); break; case DEFINE_INSN_RESERVATION: gt_ggc_m_7rtx_def ((*x).u.fld[2].rtx); break; case DEFINE_RESERVATION: break; case AUTOMATA_OPTION: break; case DEFINE_AUTOMATON: break; case DEFINE_BYPASS: break; case FINAL_ABSENCE_SET: break; case ABSENCE_SET: break; case FINAL_PRESENCE_SET: break; case PRESENCE_SET: break; case EXCLUSION_SET: break; case DEFINE_QUERY_CPU_UNIT: break; case DEFINE_CPU_UNIT: break; case ADDRESS: gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case SEQUENCE: gt_ggc_m_9rtvec_def ((*x).u.fld[0].rtvec); break; case DEFINE_COND_EXEC: gt_ggc_m_9rtvec_def ((*x).u.fld[0].rtvec); break; case DEFINE_ASM_ATTRIBUTES: gt_ggc_m_9rtvec_def ((*x).u.fld[0].rtvec); break; case DEFINE_FUNCTION_UNIT: gt_ggc_m_9rtvec_def ((*x).u.fld[6].rtvec); gt_ggc_m_7rtx_def ((*x).u.fld[3].rtx); break; case DEFINE_DELAY: gt_ggc_m_9rtvec_def ((*x).u.fld[1].rtvec); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case DEFINE_EXPAND: gt_ggc_m_9rtvec_def ((*x).u.fld[1].rtvec); break; case DEFINE_PEEPHOLE2: gt_ggc_m_9rtvec_def ((*x).u.fld[2].rtvec); gt_ggc_m_9rtvec_def ((*x).u.fld[0].rtvec); break; case DEFINE_INSN_AND_SPLIT: gt_ggc_m_9rtvec_def ((*x).u.fld[7].rtvec); gt_ggc_m_9rtvec_def ((*x).u.fld[5].rtvec); gt_ggc_m_9rtvec_def ((*x).u.fld[1].rtvec); break; case DEFINE_SPLIT: gt_ggc_m_9rtvec_def ((*x).u.fld[2].rtvec); gt_ggc_m_9rtvec_def ((*x).u.fld[0].rtvec); break; case DEFINE_PEEPHOLE: gt_ggc_m_9rtvec_def ((*x).u.fld[3].rtvec); gt_ggc_m_9rtvec_def ((*x).u.fld[0].rtvec); break; case DEFINE_INSN: gt_ggc_m_9rtvec_def ((*x).u.fld[4].rtvec); gt_ggc_m_9rtvec_def ((*x).u.fld[1].rtvec); break; case MATCH_PAR_DUP: gt_ggc_m_9rtvec_def ((*x).u.fld[1].rtvec); break; case MATCH_OP_DUP: gt_ggc_m_9rtvec_def ((*x).u.fld[1].rtvec); break; case MATCH_PARALLEL: gt_ggc_m_9rtvec_def ((*x).u.fld[2].rtvec); break; case MATCH_OPERATOR: gt_ggc_m_9rtvec_def ((*x).u.fld[2].rtvec); break; case MATCH_DUP: break; case MATCH_SCRATCH: break; case MATCH_OPERAND: break; case INSN_LIST: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case EXPR_LIST: gt_ggc_m_7rtx_def ((*x).u.fld[1].rtx); gt_ggc_m_7rtx_def ((*x).u.fld[0].rtx); break; case INCLUDE: break; case NIL: break; case UNKNOWN: break; default: break; } x = (RTX_NEXT (&(*x))); } } void gt_ggc_mx_location_s (void *x_p) { struct location_s * const x = (struct location_s *)x_p; if (ggc_test_and_set_mark (x)) { } } void gt_ggc_m_II17splay_tree_node_s (void *x_p) { struct splay_tree_node_s * const x = (struct splay_tree_node_s *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_II17splay_tree_node_s ((*x).left); gt_ggc_m_II17splay_tree_node_s ((*x).right); } } void gt_ggc_m_SP9tree_node17splay_tree_node_s (void *x_p) { struct splay_tree_node_s * const x = (struct splay_tree_node_s *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_9tree_node ((void *)(*x).value); gt_ggc_m_SP9tree_node17splay_tree_node_s ((*x).left); gt_ggc_m_SP9tree_node17splay_tree_node_s ((*x).right); } } void gt_ggc_m_P13alias_var_def15varray_head_tag (void *x_p) { struct varray_head_tag * const x = (struct varray_head_tag *)x_p; if (ggc_test_and_set_mark (x)) { switch ((*x).type) { case VARRAY_DATA_C: break; case VARRAY_DATA_UC: break; case VARRAY_DATA_S: break; case VARRAY_DATA_US: break; case VARRAY_DATA_I: break; case VARRAY_DATA_U: break; case VARRAY_DATA_L: break; case VARRAY_DATA_UL: break; case VARRAY_DATA_HINT: break; case VARRAY_DATA_UHINT: break; case VARRAY_DATA_GENERIC: { size_t i10; for (i10 = 0; i10 < (size_t)((*x).num_elements); i10++) { gt_ggc_m_13alias_var_def ((*x).data.generic[i10]); } } break; case VARRAY_DATA_CPTR: { size_t i11; for (i11 = 0; i11 < (size_t)((*x).num_elements); i11++) { } } break; case VARRAY_DATA_RTX: { size_t i12; for (i12 = 0; i12 < (size_t)((*x).num_elements); i12++) { gt_ggc_m_7rtx_def ((*x).data.rtx[i12]); } } break; case VARRAY_DATA_RTVEC: { size_t i13; for (i13 = 0; i13 < (size_t)((*x).num_elements); i13++) { gt_ggc_m_9rtvec_def ((*x).data.rtvec[i13]); } } break; case VARRAY_DATA_TREE: { size_t i14; for (i14 = 0; i14 < (size_t)((*x).num_elements); i14++) { gt_ggc_m_9tree_node ((*x).data.tree[i14]); } } break; case VARRAY_DATA_BITMAP: { size_t i15; for (i15 = 0; i15 < (size_t)((*x).num_elements); i15++) { gt_ggc_m_15bitmap_head_def ((*x).data.bitmap[i15]); } } break; case VARRAY_DATA_CONST_EQUIV: { size_t i16; for (i16 = 0; i16 < (size_t)((*x).num_elements); i16++) { gt_ggc_m_7rtx_def ((*x).data.const_equiv[i16].rtx); } } break; case VARRAY_DATA_TE: { size_t i17; for (i17 = 0; i17 < (size_t)((*x).num_elements); i17++) { gt_ggc_m_8elt_list ((*x).data.te[i17]); } } break; case VARRAY_DATA_EDGE: { size_t i18; for (i18 = 0; i18 < (size_t)((*x).num_elements); i18++) { gt_ggc_m_8edge_def ((*x).data.e[i18]); } } break; default: break; } } } void gt_ggc_m_P9tree_node4htab (void *x_p) { struct htab * const x = (struct htab *)x_p; if (ggc_test_and_set_mark (x)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { gt_ggc_m_9tree_node ((*x).entries[i0]); } ggc_mark ((*x).entries); } } } void gt_ggc_m_P9reg_attrs4htab (void *x_p) { struct htab * const x = (struct htab *)x_p; if (ggc_test_and_set_mark (x)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { gt_ggc_m_9reg_attrs ((*x).entries[i0]); } ggc_mark ((*x).entries); } } } void gt_ggc_m_P9mem_attrs4htab (void *x_p) { struct htab * const x = (struct htab *)x_p; if (ggc_test_and_set_mark (x)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { gt_ggc_m_9mem_attrs ((*x).entries[i0]); } ggc_mark ((*x).entries); } } } void gt_ggc_m_P7rtx_def4htab (void *x_p) { struct htab * const x = (struct htab *)x_p; if (ggc_test_and_set_mark (x)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { gt_ggc_m_7rtx_def ((*x).entries[i0]); } ggc_mark ((*x).entries); } } } void gt_ggc_m_SP9tree_node12splay_tree_s (void *x_p) { struct splay_tree_s * const x = (struct splay_tree_s *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_SP9tree_node17splay_tree_node_s ((*x).root); } } void gt_ggc_m_P19cgraph_varpool_node4htab (void *x_p) { struct htab * const x = (struct htab *)x_p; if (ggc_test_and_set_mark (x)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { gt_ggc_m_19cgraph_varpool_node ((*x).entries[i0]); } ggc_mark ((*x).entries); } } } void gt_ggc_m_P11cgraph_node4htab (void *x_p) { struct htab * const x = (struct htab *)x_p; if (ggc_test_and_set_mark (x)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { gt_ggc_m_11cgraph_node ((*x).entries[i0]); } ggc_mark ((*x).entries); } } } void gt_ggc_m_II12splay_tree_s (void *x_p) { struct splay_tree_s * const x = (struct splay_tree_s *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_II17splay_tree_node_s ((*x).root); } } void gt_pch_nx_edge_prediction (void *x_p) { struct edge_prediction * x = (struct edge_prediction *)x_p; struct edge_prediction * xlimit = x; while (gt_pch_note_object (xlimit, xlimit, gt_pch_p_15edge_prediction)) xlimit = ((*xlimit).next); while (x != xlimit) { gt_pch_n_15edge_prediction ((*x).next); gt_pch_n_8edge_def ((*x).edge); x = ((*x).next); } } void gt_pch_nx_v_must_def_optype_d (void *x_p) { struct v_must_def_optype_d * const x = (struct v_must_def_optype_d *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_19v_must_def_optype_d)) { { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).num_v_must_defs); i0++) { gt_pch_n_9tree_node ((*x).v_must_defs[i0]); } } } } void gt_pch_nx_vuse_optype_d (void *x_p) { struct vuse_optype_d * const x = (struct vuse_optype_d *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_13vuse_optype_d)) { { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).num_vuses); i0++) { gt_pch_n_9tree_node ((*x).vuses[i0]); } } } } void gt_pch_nx_v_may_def_optype_d (void *x_p) { struct v_may_def_optype_d * const x = (struct v_may_def_optype_d *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_18v_may_def_optype_d)) { { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).num_v_may_defs * 2); i0++) { gt_pch_n_9tree_node ((*x).v_may_defs[i0]); } } } } void gt_pch_nx_use_optype_d (void *x_p) { struct use_optype_d * const x = (struct use_optype_d *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_12use_optype_d)) { { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).num_uses); i0++) { } } } } void gt_pch_nx_def_optype_d (void *x_p) { struct def_optype_d * const x = (struct def_optype_d *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_12def_optype_d)) { { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).num_defs); i0++) { } } } } void gt_pch_nx_dataflow_d (void *x_p) { struct dataflow_d * const x = (struct dataflow_d *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_10dataflow_d)) { gt_pch_n_15varray_head_tag ((*x).immediate_uses); { size_t i0; for (i0 = 0; i0 < (size_t)(2); i0++) { gt_pch_n_9tree_node ((*x).uses[i0]); } } } } void gt_pch_nx_cgraph_varpool_node (void *x_p) { struct cgraph_varpool_node * const x = (struct cgraph_varpool_node *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_19cgraph_varpool_node)) { gt_pch_n_9tree_node ((*x).decl); gt_pch_n_19cgraph_varpool_node ((*x).next_needed); } } void gt_pch_nx_cgraph_edge (void *x_p) { struct cgraph_edge * x = (struct cgraph_edge *)x_p; struct cgraph_edge * xlimit = x; while (gt_pch_note_object (xlimit, xlimit, gt_pch_p_11cgraph_edge)) xlimit = ((*xlimit).next_caller); while (x != xlimit) { gt_pch_n_11cgraph_node ((*x).caller); gt_pch_n_11cgraph_node ((*x).callee); gt_pch_n_11cgraph_edge ((*x).next_caller); gt_pch_n_11cgraph_edge ((*x).next_callee); gt_pch_n_9tree_node ((*x).call_expr); gt_pch_n_S ((*x).inline_failed); x = ((*x).next_caller); } } void gt_pch_nx_cgraph_node (void *x_p) { struct cgraph_node * x = (struct cgraph_node *)x_p; struct cgraph_node * xlimit = x; while (gt_pch_note_object (xlimit, xlimit, gt_pch_p_11cgraph_node)) xlimit = ((*xlimit).next); if (x != xlimit) for (;;) { struct cgraph_node * const xprev = ((*x).previous); if (xprev == NULL) break; x = xprev; (void) gt_pch_note_object (xprev, xprev, gt_pch_p_11cgraph_node); } while (x != xlimit) { gt_pch_n_9tree_node ((*x).decl); gt_pch_n_11cgraph_edge ((*x).callees); gt_pch_n_11cgraph_edge ((*x).callers); gt_pch_n_11cgraph_node ((*x).next); gt_pch_n_11cgraph_node ((*x).previous); gt_pch_n_11cgraph_node ((*x).origin); gt_pch_n_11cgraph_node ((*x).nested); gt_pch_n_11cgraph_node ((*x).next_nested); gt_pch_n_11cgraph_node ((*x).next_needed); gt_pch_n_11cgraph_node ((*x).next_clone); gt_pch_n_11cgraph_node ((*x).global.inlined_to); x = ((*x).next); } } void gt_pch_nx_bb_ann_d (void *x_p) { struct bb_ann_d * const x = (struct bb_ann_d *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_8bb_ann_d)) { gt_pch_n_9tree_node ((*x).phi_nodes); gt_pch_n_15edge_prediction ((*x).predictions); } } void gt_pch_nx_elt_loc_list (void *x_p) { struct elt_loc_list * const x = (struct elt_loc_list *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_12elt_loc_list)) { gt_pch_n_12elt_loc_list ((*x).next); gt_pch_n_7rtx_def ((*x).loc); gt_pch_n_7rtx_def ((*x).setting_insn); } } void gt_pch_nx_cselib_val_struct (void *x_p) { struct cselib_val_struct * const x = (struct cselib_val_struct *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_17cselib_val_struct)) { switch (1) { case 1: gt_pch_n_7rtx_def ((*x).u.val_rtx); break; default: break; } gt_pch_n_12elt_loc_list ((*x).locs); gt_pch_n_8elt_list ((*x).addr_list); gt_pch_n_17cselib_val_struct ((*x).next_containing_mem); } } void gt_pch_nx_elt_list (void *x_p) { struct elt_list * const x = (struct elt_list *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_8elt_list)) { gt_pch_n_8elt_list ((*x).next); gt_pch_n_17cselib_val_struct ((*x).elt); } } void gt_pch_nx_tree_statement_list_node (void *x_p) { struct tree_statement_list_node * x = (struct tree_statement_list_node *)x_p; struct tree_statement_list_node * xlimit = x; while (gt_pch_note_object (xlimit, xlimit, gt_pch_p_24tree_statement_list_node)) xlimit = ((*xlimit).next); if (x != xlimit) for (;;) { struct tree_statement_list_node * const xprev = ((*x).prev); if (xprev == NULL) break; x = xprev; (void) gt_pch_note_object (xprev, xprev, gt_pch_p_24tree_statement_list_node); } while (x != xlimit) { gt_pch_n_24tree_statement_list_node ((*x).prev); gt_pch_n_24tree_statement_list_node ((*x).next); gt_pch_n_9tree_node ((*x).stmt); x = ((*x).next); } } void gt_pch_nx_alias_var_def (void *x_p) { union alias_var_def * const x = (union alias_var_def *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_13alias_var_def)) { switch ((*x).common.kind) { case -1: gt_pch_n_9tree_node ((*x).common.decl); break; case ATERM_AVAR: gt_pch_n_9tree_node ((*x).aterm.common.decl); break; default: break; } } } void gt_pch_nx_edge_def (void *x_p) { struct edge_def * x = (struct edge_def *)x_p; struct edge_def * xlimit = x; while (gt_pch_note_object (xlimit, xlimit, gt_pch_p_8edge_def)) xlimit = ((*xlimit).pred_next); while (x != xlimit) { gt_pch_n_8edge_def ((*x).pred_next); gt_pch_n_8edge_def ((*x).succ_next); gt_pch_n_15basic_block_def ((*x).src); gt_pch_n_15basic_block_def ((*x).dest); switch (ir_type ()) { case 0: gt_pch_n_7rtx_def ((*x).insns.r); break; case 1: gt_pch_n_9tree_node ((*x).insns.t); break; default: break; } gt_pch_n_10location_s ((*x).goto_locus); x = ((*x).pred_next); } } void gt_pch_nx_ptr_info_def (void *x_p) { struct ptr_info_def * const x = (struct ptr_info_def *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_12ptr_info_def)) { gt_pch_n_15bitmap_head_def ((*x).pt_vars); gt_pch_n_9tree_node ((*x).name_mem_tag); } } void gt_pch_nx_real_value (void *x_p) { struct real_value * const x = (struct real_value *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_10real_value)) { } } void gt_pch_nx_tree_ann_d (void *x_p) { union tree_ann_d * const x = (union tree_ann_d *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_10tree_ann_d)) { switch (ann_type ((tree_ann_t)&((*x)))) { case TREE_ANN_COMMON: break; case VAR_ANN: gt_pch_n_9tree_node ((*x).decl.type_mem_tag); gt_pch_n_15varray_head_tag ((*x).decl.may_aliases); gt_pch_n_9tree_node ((*x).decl.default_def); gt_pch_n_9tree_node ((*x).decl.current_def); break; case STMT_ANN: gt_pch_n_12def_optype_d ((*x).stmt.def_ops); gt_pch_n_12use_optype_d ((*x).stmt.use_ops); gt_pch_n_18v_may_def_optype_d ((*x).stmt.v_may_def_ops); gt_pch_n_13vuse_optype_d ((*x).stmt.vuse_ops); gt_pch_n_19v_must_def_optype_d ((*x).stmt.v_must_def_ops); gt_pch_n_10dataflow_d ((*x).stmt.df); gt_pch_n_15bitmap_head_def ((*x).stmt.addresses_taken); break; default: break; } } } void gt_pch_nx_convert_optab (void *x_p) { struct convert_optab * const x = (struct convert_optab *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_13convert_optab)) { { size_t i0; for (i0 = 0; i0 < (size_t)(NUM_MACHINE_MODES); i0++) { { size_t i1; for (i1 = 0; i1 < (size_t)(NUM_MACHINE_MODES); i1++) { gt_pch_n_7rtx_def ((*x).handlers[i0][i1].libfunc); } } } } } } void gt_pch_nx_optab (void *x_p) { struct optab * const x = (struct optab *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_5optab)) { { size_t i0; for (i0 = 0; i0 < (size_t)(NUM_MACHINE_MODES); i0++) { gt_pch_n_7rtx_def ((*x).handlers[i0].libfunc); } } } } void gt_pch_nx_basic_block_def (void *x_p) { struct basic_block_def * x = (struct basic_block_def *)x_p; struct basic_block_def * xlimit = x; while (gt_pch_note_object (xlimit, xlimit, gt_pch_p_15basic_block_def)) xlimit = ((*xlimit).next_bb); if (x != xlimit) for (;;) { struct basic_block_def * const xprev = ((*x).prev_bb); if (xprev == NULL) break; x = xprev; (void) gt_pch_note_object (xprev, xprev, gt_pch_p_15basic_block_def); } while (x != xlimit) { gt_pch_n_7rtx_def ((*x).head_); gt_pch_n_7rtx_def ((*x).end_); gt_pch_n_9tree_node ((*x).stmt_list); gt_pch_n_8edge_def ((*x).pred); gt_pch_n_8edge_def ((*x).succ); gt_pch_n_15basic_block_def ((*x).prev_bb); gt_pch_n_15basic_block_def ((*x).next_bb); gt_pch_n_8bb_ann_d ((*x).tree_annotations); x = ((*x).next_bb); } } void gt_pch_nx_reg_attrs (void *x_p) { struct reg_attrs * const x = (struct reg_attrs *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_9reg_attrs)) { gt_pch_n_9tree_node ((*x).decl); } } void gt_pch_nx_mem_attrs (void *x_p) { struct mem_attrs * const x = (struct mem_attrs *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_9mem_attrs)) { gt_pch_n_9tree_node ((*x).expr); gt_pch_n_7rtx_def ((*x).offset); gt_pch_n_7rtx_def ((*x).size); } } void gt_pch_nx_varray_head_tag (void *x_p) { struct varray_head_tag * const x = (struct varray_head_tag *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_15varray_head_tag)) { gt_pch_n_S ((*x).name); switch ((*x).type) { case VARRAY_DATA_C: break; case VARRAY_DATA_UC: break; case VARRAY_DATA_S: break; case VARRAY_DATA_US: break; case VARRAY_DATA_I: break; case VARRAY_DATA_U: break; case VARRAY_DATA_L: break; case VARRAY_DATA_UL: break; case VARRAY_DATA_HINT: break; case VARRAY_DATA_UHINT: break; case VARRAY_DATA_GENERIC: abort(); break; case VARRAY_DATA_CPTR: { size_t i10; for (i10 = 0; i10 < (size_t)((*x).num_elements); i10++) { gt_pch_n_S ((*x).data.cptr[i10]); } } break; case VARRAY_DATA_RTX: { size_t i11; for (i11 = 0; i11 < (size_t)((*x).num_elements); i11++) { gt_pch_n_7rtx_def ((*x).data.rtx[i11]); } } break; case VARRAY_DATA_RTVEC: { size_t i12; for (i12 = 0; i12 < (size_t)((*x).num_elements); i12++) { gt_pch_n_9rtvec_def ((*x).data.rtvec[i12]); } } break; case VARRAY_DATA_TREE: { size_t i13; for (i13 = 0; i13 < (size_t)((*x).num_elements); i13++) { gt_pch_n_9tree_node ((*x).data.tree[i13]); } } break; case VARRAY_DATA_BITMAP: { size_t i14; for (i14 = 0; i14 < (size_t)((*x).num_elements); i14++) { gt_pch_n_15bitmap_head_def ((*x).data.bitmap[i14]); } } break; case VARRAY_DATA_CONST_EQUIV: { size_t i15; for (i15 = 0; i15 < (size_t)((*x).num_elements); i15++) { gt_pch_n_7rtx_def ((*x).data.const_equiv[i15].rtx); } } break; case VARRAY_DATA_TE: { size_t i16; for (i16 = 0; i16 < (size_t)((*x).num_elements); i16++) { gt_pch_n_8elt_list ((*x).data.te[i16]); } } break; case VARRAY_DATA_EDGE: { size_t i17; for (i17 = 0; i17 < (size_t)((*x).num_elements); i17++) { gt_pch_n_8edge_def ((*x).data.e[i17]); } } break; default: break; } } } void gt_pch_nx_function (void *x_p) { struct function * const x = (struct function *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_8function)) { gt_pch_n_9eh_status ((*x).eh); gt_pch_n_11stmt_status ((*x).stmt); gt_pch_n_11expr_status ((*x).expr); gt_pch_n_11emit_status ((*x).emit); gt_pch_n_13varasm_status ((*x).varasm); gt_pch_n_9tree_node ((*x).saved_tree); gt_pch_n_9tree_node ((*x).saved_args); gt_pch_n_9tree_node ((*x).decl); gt_pch_n_8function ((*x).outer); gt_pch_n_7rtx_def ((*x).arg_offset_rtx); gt_pch_n_7rtx_def ((*x).return_rtx); gt_pch_n_7rtx_def ((*x).internal_arg_pointer); gt_pch_n_20initial_value_struct ((*x).hard_reg_initial_vals); gt_pch_n_7rtx_def ((*x).x_nonlocal_goto_handler_labels); gt_pch_n_7rtx_def ((*x).x_return_label); gt_pch_n_7rtx_def ((*x).x_naked_return_label); gt_pch_n_7rtx_def ((*x).x_stack_slot_list); gt_pch_n_7rtx_def ((*x).x_tail_recursion_reentry); gt_pch_n_7rtx_def ((*x).x_arg_pointer_save_area); gt_pch_n_9tree_node ((*x).static_chain_decl); gt_pch_n_9tree_node ((*x).nonlocal_goto_save_area); gt_pch_n_7rtx_def ((*x).x_parm_birth_insn); gt_pch_n_P9temp_slot15varray_head_tag ((*x).x_used_temp_slots); gt_pch_n_9temp_slot ((*x).x_avail_temp_slots); gt_pch_n_14var_refs_queue ((*x).fixup_var_refs_queue); gt_pch_n_9rtvec_def ((*x).original_arg_vector); gt_pch_n_9tree_node ((*x).original_decl_initial); gt_pch_n_16machine_function ((*x).machine); gt_pch_n_17language_function ((*x).language); gt_pch_n_7rtx_def ((*x).epilogue_delay_list); gt_pch_n_S ((*x).function_end_locus.file); gt_pch_n_15varray_head_tag ((*x).ib_boundaries_block); gt_pch_n_9tree_node ((*x).unexpanded_var_list); } } void gt_pch_nx_expr_status (void *x_p) { struct expr_status * const x = (struct expr_status *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_11expr_status)) { gt_pch_n_7rtx_def ((*x).x_saveregs_value); gt_pch_n_7rtx_def ((*x).x_apply_args_value); gt_pch_n_7rtx_def ((*x).x_forced_labels); gt_pch_n_7rtx_def ((*x).x_pending_chain); } } void gt_pch_nx_emit_status (void *x_p) { struct emit_status * const x = (struct emit_status *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_11emit_status)) { gt_pch_n_7rtx_def ((*x).x_first_insn); gt_pch_n_7rtx_def ((*x).x_last_insn); gt_pch_n_14sequence_stack ((*x).sequence_stack); gt_pch_n_S ((*x).x_last_location.file); if ((*x).regno_pointer_align != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).x_reg_rtx_no); i0++) { } gt_pch_note_object ((*x).regno_pointer_align, x, gt_pch_p_11emit_status); } if ((*x).x_regno_reg_rtx != NULL) { size_t i1; for (i1 = 0; i1 < (size_t)(((*x)).x_reg_rtx_no); i1++) { gt_pch_n_7rtx_def ((*x).x_regno_reg_rtx[i1]); } gt_pch_note_object ((*x).x_regno_reg_rtx, x, gt_pch_p_11emit_status); } } } void gt_pch_nx_sequence_stack (void *x_p) { struct sequence_stack * const x = (struct sequence_stack *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_14sequence_stack)) { gt_pch_n_7rtx_def ((*x).first); gt_pch_n_7rtx_def ((*x).last); gt_pch_n_14sequence_stack ((*x).next); } } void gt_pch_nx_var_refs_queue (void *x_p) { struct var_refs_queue * const x = (struct var_refs_queue *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_14var_refs_queue)) { gt_pch_n_7rtx_def ((*x).modified); gt_pch_n_14var_refs_queue ((*x).next); } } void gt_pch_nx_bitmap_head_def (void *x_p) { struct bitmap_head_def * const x = (struct bitmap_head_def *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_15bitmap_head_def)) { gt_pch_n_18bitmap_element_def ((*x).first); gt_pch_n_18bitmap_element_def ((*x).current); } } void gt_pch_nx_bitmap_element_def (void *x_p) { struct bitmap_element_def * const x = (struct bitmap_element_def *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_18bitmap_element_def)) { gt_pch_n_18bitmap_element_def ((*x).next); gt_pch_n_18bitmap_element_def ((*x).prev); } } void gt_pch_nx_machine_function (void *x_p) { struct machine_function * const x = (struct machine_function *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_16machine_function)) { gt_pch_n_17stack_local_entry ((*x).stack_locals); gt_pch_n_S ((*x).some_ld_name); } } void gt_pch_nx_answer (void *x_p) { struct answer * const x = (struct answer *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_6answer)) { gt_pch_n_6answer ((*x).next); { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).count); i0++) { switch (cpp_token_val_index (&((*x).first[i0]))) { case CPP_TOKEN_FLD_NODE: { union tree_node * const x1 = ((*x).first[i0].val.node) ? HT_IDENT_TO_GCC_IDENT (HT_NODE (((*x).first[i0].val.node))) : NULL; gt_pch_n_9tree_node (x1); } break; case CPP_TOKEN_FLD_SOURCE: gt_pch_n_9cpp_token ((*x).first[i0].val.source); break; case CPP_TOKEN_FLD_STR: gt_pch_n_S ((*x).first[i0].val.str.text); break; case CPP_TOKEN_FLD_ARG_NO: break; default: break; } } } } } void gt_pch_nx_cpp_macro (void *x_p) { struct cpp_macro * const x = (struct cpp_macro *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_9cpp_macro)) { if ((*x).params != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).paramc); i0++) { { union tree_node * const x1 = ((*x).params[i0]) ? HT_IDENT_TO_GCC_IDENT (HT_NODE (((*x).params[i0]))) : NULL; gt_pch_n_9tree_node (x1); } } gt_pch_note_object ((*x).params, x, gt_pch_p_9cpp_macro); } switch (((*x)).traditional) { case 0: if ((*x).exp.tokens != NULL) { size_t i2; for (i2 = 0; i2 < (size_t)((*x).count); i2++) { switch (cpp_token_val_index (&((*x).exp.tokens[i2]))) { case CPP_TOKEN_FLD_NODE: { union tree_node * const x3 = ((*x).exp.tokens[i2].val.node) ? HT_IDENT_TO_GCC_IDENT (HT_NODE (((*x).exp.tokens[i2].val.node))) : NULL; gt_pch_n_9tree_node (x3); } break; case CPP_TOKEN_FLD_SOURCE: gt_pch_n_9cpp_token ((*x).exp.tokens[i2].val.source); break; case CPP_TOKEN_FLD_STR: gt_pch_n_S ((*x).exp.tokens[i2].val.str.text); break; case CPP_TOKEN_FLD_ARG_NO: break; default: break; } } gt_pch_note_object ((*x).exp.tokens, x, gt_pch_p_9cpp_macro); } break; case 1: gt_pch_n_S ((*x).exp.text); break; default: break; } } } void gt_pch_nx_cpp_token (void *x_p) { struct cpp_token * const x = (struct cpp_token *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_9cpp_token)) { switch (cpp_token_val_index (&((*x)))) { case CPP_TOKEN_FLD_NODE: { union tree_node * const x0 = ((*x).val.node) ? HT_IDENT_TO_GCC_IDENT (HT_NODE (((*x).val.node))) : NULL; gt_pch_n_9tree_node (x0); } break; case CPP_TOKEN_FLD_SOURCE: gt_pch_n_9cpp_token ((*x).val.source); break; case CPP_TOKEN_FLD_STR: gt_pch_n_S ((*x).val.str.text); break; case CPP_TOKEN_FLD_ARG_NO: break; default: break; } } } void gt_pch_nx_rtvec_def (void *x_p) { struct rtvec_def * const x = (struct rtvec_def *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_9rtvec_def)) { { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).num_elem); i0++) { gt_pch_n_7rtx_def ((*x).elem[i0]); } } } } void gt_pch_nx_rtx_def (void *x_p) { struct rtx_def * x = (struct rtx_def *)x_p; struct rtx_def * xlimit = x; while (gt_pch_note_object (xlimit, xlimit, gt_pch_p_7rtx_def)) xlimit = (RTX_NEXT (&(*xlimit))); if (x != xlimit) for (;;) { struct rtx_def * const xprev = (RTX_PREV (&(*x))); if (xprev == NULL) break; x = xprev; (void) gt_pch_note_object (xprev, xprev, gt_pch_p_7rtx_def); } while (x != xlimit) { switch (GET_CODE (&(*x))) { case VAR_LOCATION: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_9tree_node ((*x).u.fld[0].rttree); break; case US_TRUNCATE: gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case SS_TRUNCATE: gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case US_MINUS: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case SS_MINUS: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case US_PLUS: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case SS_PLUS: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case VEC_DUPLICATE: gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case VEC_CONCAT: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case VEC_SELECT: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case VEC_MERGE: gt_pch_n_7rtx_def ((*x).u.fld[2].rtx); gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case RANGE_LIVE: gt_pch_n_18bitmap_element_def ((*x).u.fld[0].rtbit); break; case RANGE_VAR: gt_pch_n_9tree_node ((*x).u.fld[1].rttree); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case RANGE_REG: gt_pch_n_9tree_node ((*x).u.fld[9].rttree); gt_pch_n_9tree_node ((*x).u.fld[8].rttree); break; case RANGE_INFO: gt_pch_n_18bitmap_element_def ((*x).u.fld[10].rtbit); gt_pch_n_18bitmap_element_def ((*x).u.fld[9].rtbit); gt_pch_n_9rtvec_def ((*x).u.fld[2].rtvec); gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case LO_SUM: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case HIGH: gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case ZERO_EXTRACT: gt_pch_n_7rtx_def ((*x).u.fld[2].rtx); gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case SIGN_EXTRACT: gt_pch_n_7rtx_def ((*x).u.fld[2].rtx); gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case PARITY: gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case POPCOUNT: gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case CTZ: gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case CLZ: gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case FFS: gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case SQRT: gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case ABS: gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case UNSIGNED_FIX: gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case UNSIGNED_FLOAT: gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case FIX: gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case FLOAT: gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case FLOAT_TRUNCATE: gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case FLOAT_EXTEND: gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case TRUNCATE: gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case ZERO_EXTEND: gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case SIGN_EXTEND: gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case LTGT: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case UNLT: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case UNLE: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case UNGT: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case UNGE: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case UNEQ: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case ORDERED: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case UNORDERED: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case LTU: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case LEU: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case GTU: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case GEU: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case LT: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case LE: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case GT: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case GE: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case EQ: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case NE: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case POST_MODIFY: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case PRE_MODIFY: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case POST_INC: gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case POST_DEC: gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case PRE_INC: gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case PRE_DEC: gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case UMAX: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case UMIN: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case SMAX: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case SMIN: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case ROTATERT: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case LSHIFTRT: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case ASHIFTRT: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case ROTATE: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case ASHIFT: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case NOT: gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case XOR: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case IOR: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case AND: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case UMOD: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case UDIV: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case MOD: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case DIV: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case MULT: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case NEG: gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case MINUS: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case PLUS: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case COMPARE: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case COND: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_9rtvec_def ((*x).u.fld[0].rtvec); break; case IF_THEN_ELSE: gt_pch_n_7rtx_def ((*x).u.fld[2].rtx); gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case QUEUED: gt_pch_n_7rtx_def ((*x).u.fld[4].rtx); gt_pch_n_7rtx_def ((*x).u.fld[3].rtx); gt_pch_n_7rtx_def ((*x).u.fld[2].rtx); gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case CC0: break; case SYMBOL_REF: gt_pch_n_9tree_node ((*x).u.fld[2].rttree); gt_pch_n_S ((*x).u.fld[0].rtstr); break; case LABEL_REF: gt_pch_n_7rtx_def ((*x).u.fld[2].rtx); gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case MEM: gt_pch_n_9mem_attrs ((*x).u.fld[1].rtmem); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case CONCAT: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case STRICT_LOW_PART: gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case SUBREG: gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case SCRATCH: break; case REG: gt_pch_n_9reg_attrs ((*x).u.fld[2].rtreg); break; case VALUE: break; case PC: break; case CONST: gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case CONST_STRING: gt_pch_n_S ((*x).u.fld[0].rtstr); break; case CONST_VECTOR: gt_pch_n_9rtvec_def ((*x).u.fld[0].rtvec); break; case CONST_DOUBLE: break; case CONST_INT: break; case RESX: break; case TRAP_IF: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case RETURN: break; case CALL: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case CLOBBER: gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case USE: gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case SET: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case PREFETCH: gt_pch_n_7rtx_def ((*x).u.fld[2].rtx); gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case ADDR_DIFF_VEC: gt_pch_n_7rtx_def ((*x).u.fld[3].rtx); gt_pch_n_7rtx_def ((*x).u.fld[2].rtx); gt_pch_n_9rtvec_def ((*x).u.fld[1].rtvec); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case ADDR_VEC: gt_pch_n_9rtvec_def ((*x).u.fld[0].rtvec); break; case UNSPEC_VOLATILE: gt_pch_n_9rtvec_def ((*x).u.fld[0].rtvec); break; case UNSPEC: gt_pch_n_9rtvec_def ((*x).u.fld[0].rtvec); break; case ASM_OPERANDS: gt_pch_n_S ((*x).u.fld[5].rtstr); gt_pch_n_9rtvec_def ((*x).u.fld[4].rtvec); gt_pch_n_9rtvec_def ((*x).u.fld[3].rtvec); gt_pch_n_S ((*x).u.fld[1].rtstr); gt_pch_n_S ((*x).u.fld[0].rtstr); break; case ASM_INPUT: gt_pch_n_S ((*x).u.fld[0].rtstr); break; case PARALLEL: gt_pch_n_9rtvec_def ((*x).u.fld[0].rtvec); break; case COND_EXEC: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case NOTE: switch (NOTE_LINE_NUMBER (&(*x))) { default: gt_pch_n_S ((*x).u.fld[4].rtstr); break; case -79: gt_pch_n_7rtx_def ((*x).u.fld[4].rtx); break; case -80: break; case -81: break; case -82: gt_pch_n_7rtx_def ((*x).u.fld[4].rtx); break; case -83: break; case -84: break; case -85: break; case -86: break; case -87: break; case -88: break; case -89: break; case -90: break; case -91: break; case -92: break; case -93: break; case -94: break; case -95: break; case -96: break; case -97: gt_pch_n_9tree_node ((*x).u.fld[4].rttree); break; case -98: gt_pch_n_9tree_node ((*x).u.fld[4].rttree); break; case -99: break; case -100: break; } gt_pch_n_7rtx_def ((*x).u.fld[2].rtx); gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); break; case CODE_LABEL: gt_pch_n_S ((*x).u.fld[7].rtstr); gt_pch_n_7rtx_def ((*x).u.fld[5].rtx); gt_pch_n_7rtx_def ((*x).u.fld[2].rtx); gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); break; case BARRIER: gt_pch_n_7rtx_def ((*x).u.fld[2].rtx); gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); break; case CALL_INSN: gt_pch_n_7rtx_def ((*x).u.fld[9].rtx); gt_pch_n_7rtx_def ((*x).u.fld[8].rtx); gt_pch_n_7rtx_def ((*x).u.fld[7].rtx); gt_pch_n_7rtx_def ((*x).u.fld[5].rtx); gt_pch_n_7rtx_def ((*x).u.fld[2].rtx); gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); break; case JUMP_INSN: gt_pch_n_7rtx_def ((*x).u.fld[9].rtx); gt_pch_n_7rtx_def ((*x).u.fld[8].rtx); gt_pch_n_7rtx_def ((*x).u.fld[7].rtx); gt_pch_n_7rtx_def ((*x).u.fld[5].rtx); gt_pch_n_7rtx_def ((*x).u.fld[2].rtx); gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); break; case INSN: gt_pch_n_7rtx_def ((*x).u.fld[8].rtx); gt_pch_n_7rtx_def ((*x).u.fld[7].rtx); gt_pch_n_7rtx_def ((*x).u.fld[5].rtx); gt_pch_n_7rtx_def ((*x).u.fld[2].rtx); gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); break; case ATTR_FLAG: gt_pch_n_S ((*x).u.fld[0].rtstr); break; case EQ_ATTR_ALT: break; case EQ_ATTR: gt_pch_n_S ((*x).u.fld[1].rtstr); gt_pch_n_S ((*x).u.fld[0].rtstr); break; case SET_ATTR_ALTERNATIVE: gt_pch_n_9rtvec_def ((*x).u.fld[1].rtvec); gt_pch_n_S ((*x).u.fld[0].rtstr); break; case SET_ATTR: gt_pch_n_S ((*x).u.fld[1].rtstr); gt_pch_n_S ((*x).u.fld[0].rtstr); break; case ATTR: gt_pch_n_S ((*x).u.fld[0].rtstr); break; case DEFINE_ATTR: gt_pch_n_7rtx_def ((*x).u.fld[2].rtx); gt_pch_n_S ((*x).u.fld[1].rtstr); gt_pch_n_S ((*x).u.fld[0].rtstr); break; case DEFINE_INSN_RESERVATION: gt_pch_n_S ((*x).u.fld[3].rtstr); gt_pch_n_7rtx_def ((*x).u.fld[2].rtx); gt_pch_n_S ((*x).u.fld[0].rtstr); break; case DEFINE_RESERVATION: gt_pch_n_S ((*x).u.fld[1].rtstr); gt_pch_n_S ((*x).u.fld[0].rtstr); break; case AUTOMATA_OPTION: gt_pch_n_S ((*x).u.fld[0].rtstr); break; case DEFINE_AUTOMATON: gt_pch_n_S ((*x).u.fld[0].rtstr); break; case DEFINE_BYPASS: gt_pch_n_S ((*x).u.fld[3].rtstr); gt_pch_n_S ((*x).u.fld[2].rtstr); gt_pch_n_S ((*x).u.fld[1].rtstr); break; case FINAL_ABSENCE_SET: gt_pch_n_S ((*x).u.fld[1].rtstr); gt_pch_n_S ((*x).u.fld[0].rtstr); break; case ABSENCE_SET: gt_pch_n_S ((*x).u.fld[1].rtstr); gt_pch_n_S ((*x).u.fld[0].rtstr); break; case FINAL_PRESENCE_SET: gt_pch_n_S ((*x).u.fld[1].rtstr); gt_pch_n_S ((*x).u.fld[0].rtstr); break; case PRESENCE_SET: gt_pch_n_S ((*x).u.fld[1].rtstr); gt_pch_n_S ((*x).u.fld[0].rtstr); break; case EXCLUSION_SET: gt_pch_n_S ((*x).u.fld[1].rtstr); gt_pch_n_S ((*x).u.fld[0].rtstr); break; case DEFINE_QUERY_CPU_UNIT: gt_pch_n_S ((*x).u.fld[1].rtstr); gt_pch_n_S ((*x).u.fld[0].rtstr); break; case DEFINE_CPU_UNIT: gt_pch_n_S ((*x).u.fld[1].rtstr); gt_pch_n_S ((*x).u.fld[0].rtstr); break; case ADDRESS: gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case SEQUENCE: gt_pch_n_9rtvec_def ((*x).u.fld[0].rtvec); break; case DEFINE_COND_EXEC: gt_pch_n_S ((*x).u.fld[2].rtstr); gt_pch_n_S ((*x).u.fld[1].rtstr); gt_pch_n_9rtvec_def ((*x).u.fld[0].rtvec); break; case DEFINE_ASM_ATTRIBUTES: gt_pch_n_9rtvec_def ((*x).u.fld[0].rtvec); break; case DEFINE_FUNCTION_UNIT: gt_pch_n_9rtvec_def ((*x).u.fld[6].rtvec); gt_pch_n_7rtx_def ((*x).u.fld[3].rtx); gt_pch_n_S ((*x).u.fld[0].rtstr); break; case DEFINE_DELAY: gt_pch_n_9rtvec_def ((*x).u.fld[1].rtvec); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case DEFINE_EXPAND: gt_pch_n_S ((*x).u.fld[3].rtstr); gt_pch_n_S ((*x).u.fld[2].rtstr); gt_pch_n_9rtvec_def ((*x).u.fld[1].rtvec); gt_pch_n_S ((*x).u.fld[0].rtstr); break; case DEFINE_PEEPHOLE2: gt_pch_n_S ((*x).u.fld[3].rtstr); gt_pch_n_9rtvec_def ((*x).u.fld[2].rtvec); gt_pch_n_S ((*x).u.fld[1].rtstr); gt_pch_n_9rtvec_def ((*x).u.fld[0].rtvec); break; case DEFINE_INSN_AND_SPLIT: gt_pch_n_9rtvec_def ((*x).u.fld[7].rtvec); gt_pch_n_S ((*x).u.fld[6].rtstr); gt_pch_n_9rtvec_def ((*x).u.fld[5].rtvec); gt_pch_n_S ((*x).u.fld[4].rtstr); gt_pch_n_S ((*x).u.fld[3].rtstr); gt_pch_n_S ((*x).u.fld[2].rtstr); gt_pch_n_9rtvec_def ((*x).u.fld[1].rtvec); gt_pch_n_S ((*x).u.fld[0].rtstr); break; case DEFINE_SPLIT: gt_pch_n_S ((*x).u.fld[3].rtstr); gt_pch_n_9rtvec_def ((*x).u.fld[2].rtvec); gt_pch_n_S ((*x).u.fld[1].rtstr); gt_pch_n_9rtvec_def ((*x).u.fld[0].rtvec); break; case DEFINE_PEEPHOLE: gt_pch_n_9rtvec_def ((*x).u.fld[3].rtvec); gt_pch_n_S ((*x).u.fld[2].rtstr); gt_pch_n_S ((*x).u.fld[1].rtstr); gt_pch_n_9rtvec_def ((*x).u.fld[0].rtvec); break; case DEFINE_INSN: gt_pch_n_9rtvec_def ((*x).u.fld[4].rtvec); gt_pch_n_S ((*x).u.fld[3].rtstr); gt_pch_n_S ((*x).u.fld[2].rtstr); gt_pch_n_9rtvec_def ((*x).u.fld[1].rtvec); gt_pch_n_S ((*x).u.fld[0].rtstr); break; case MATCH_PAR_DUP: gt_pch_n_9rtvec_def ((*x).u.fld[1].rtvec); break; case MATCH_OP_DUP: gt_pch_n_9rtvec_def ((*x).u.fld[1].rtvec); break; case MATCH_PARALLEL: gt_pch_n_9rtvec_def ((*x).u.fld[2].rtvec); gt_pch_n_S ((*x).u.fld[1].rtstr); break; case MATCH_OPERATOR: gt_pch_n_9rtvec_def ((*x).u.fld[2].rtvec); gt_pch_n_S ((*x).u.fld[1].rtstr); break; case MATCH_DUP: break; case MATCH_SCRATCH: gt_pch_n_S ((*x).u.fld[1].rtstr); break; case MATCH_OPERAND: gt_pch_n_S ((*x).u.fld[2].rtstr); gt_pch_n_S ((*x).u.fld[1].rtstr); break; case INSN_LIST: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case EXPR_LIST: gt_pch_n_7rtx_def ((*x).u.fld[1].rtx); gt_pch_n_7rtx_def ((*x).u.fld[0].rtx); break; case INCLUDE: gt_pch_n_S ((*x).u.fld[0].rtstr); break; case NIL: break; case UNKNOWN: break; default: break; } x = (RTX_NEXT (&(*x))); } } void gt_pch_nx_location_s (void *x_p) { struct location_s * const x = (struct location_s *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_10location_s)) { gt_pch_n_S ((*x).file); } } void gt_pch_n_II17splay_tree_node_s (void *x_p) { struct splay_tree_node_s * const x = (struct splay_tree_node_s *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_II17splay_tree_node_s)) { gt_pch_n_II17splay_tree_node_s ((*x).left); gt_pch_n_II17splay_tree_node_s ((*x).right); } } void gt_pch_n_SP9tree_node17splay_tree_node_s (void *x_p) { struct splay_tree_node_s * const x = (struct splay_tree_node_s *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_SP9tree_node17splay_tree_node_s)) { gt_pch_n_S ((void *)(*x).key); gt_pch_n_9tree_node ((void *)(*x).value); gt_pch_n_SP9tree_node17splay_tree_node_s ((*x).left); gt_pch_n_SP9tree_node17splay_tree_node_s ((*x).right); } } void gt_pch_n_P13alias_var_def15varray_head_tag (void *x_p) { struct varray_head_tag * const x = (struct varray_head_tag *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_P13alias_var_def15varray_head_tag)) { gt_pch_n_S ((*x).name); switch ((*x).type) { case VARRAY_DATA_C: break; case VARRAY_DATA_UC: break; case VARRAY_DATA_S: break; case VARRAY_DATA_US: break; case VARRAY_DATA_I: break; case VARRAY_DATA_U: break; case VARRAY_DATA_L: break; case VARRAY_DATA_UL: break; case VARRAY_DATA_HINT: break; case VARRAY_DATA_UHINT: break; case VARRAY_DATA_GENERIC: { size_t i10; for (i10 = 0; i10 < (size_t)((*x).num_elements); i10++) { gt_pch_n_13alias_var_def ((*x).data.generic[i10]); } } break; case VARRAY_DATA_CPTR: { size_t i11; for (i11 = 0; i11 < (size_t)((*x).num_elements); i11++) { gt_pch_n_S ((*x).data.cptr[i11]); } } break; case VARRAY_DATA_RTX: { size_t i12; for (i12 = 0; i12 < (size_t)((*x).num_elements); i12++) { gt_pch_n_7rtx_def ((*x).data.rtx[i12]); } } break; case VARRAY_DATA_RTVEC: { size_t i13; for (i13 = 0; i13 < (size_t)((*x).num_elements); i13++) { gt_pch_n_9rtvec_def ((*x).data.rtvec[i13]); } } break; case VARRAY_DATA_TREE: { size_t i14; for (i14 = 0; i14 < (size_t)((*x).num_elements); i14++) { gt_pch_n_9tree_node ((*x).data.tree[i14]); } } break; case VARRAY_DATA_BITMAP: { size_t i15; for (i15 = 0; i15 < (size_t)((*x).num_elements); i15++) { gt_pch_n_15bitmap_head_def ((*x).data.bitmap[i15]); } } break; case VARRAY_DATA_CONST_EQUIV: { size_t i16; for (i16 = 0; i16 < (size_t)((*x).num_elements); i16++) { gt_pch_n_7rtx_def ((*x).data.const_equiv[i16].rtx); } } break; case VARRAY_DATA_TE: { size_t i17; for (i17 = 0; i17 < (size_t)((*x).num_elements); i17++) { gt_pch_n_8elt_list ((*x).data.te[i17]); } } break; case VARRAY_DATA_EDGE: { size_t i18; for (i18 = 0; i18 < (size_t)((*x).num_elements); i18++) { gt_pch_n_8edge_def ((*x).data.e[i18]); } } break; default: break; } } } void gt_pch_n_P9tree_node4htab (void *x_p) { struct htab * const x = (struct htab *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_P9tree_node4htab)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { gt_pch_n_9tree_node ((*x).entries[i0]); } gt_pch_note_object ((*x).entries, x, gt_pch_p_P9tree_node4htab); } } } void gt_pch_n_P9reg_attrs4htab (void *x_p) { struct htab * const x = (struct htab *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_P9reg_attrs4htab)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { gt_pch_n_9reg_attrs ((*x).entries[i0]); } gt_pch_note_object ((*x).entries, x, gt_pch_p_P9reg_attrs4htab); } } } void gt_pch_n_P9mem_attrs4htab (void *x_p) { struct htab * const x = (struct htab *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_P9mem_attrs4htab)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { gt_pch_n_9mem_attrs ((*x).entries[i0]); } gt_pch_note_object ((*x).entries, x, gt_pch_p_P9mem_attrs4htab); } } } void gt_pch_n_P7rtx_def4htab (void *x_p) { struct htab * const x = (struct htab *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_P7rtx_def4htab)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { gt_pch_n_7rtx_def ((*x).entries[i0]); } gt_pch_note_object ((*x).entries, x, gt_pch_p_P7rtx_def4htab); } } } void gt_pch_n_SP9tree_node12splay_tree_s (void *x_p) { struct splay_tree_s * const x = (struct splay_tree_s *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_SP9tree_node12splay_tree_s)) { gt_pch_n_SP9tree_node17splay_tree_node_s ((*x).root); } } void gt_pch_n_P19cgraph_varpool_node4htab (void *x_p) { struct htab * const x = (struct htab *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_P19cgraph_varpool_node4htab)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { gt_pch_n_19cgraph_varpool_node ((*x).entries[i0]); } gt_pch_note_object ((*x).entries, x, gt_pch_p_P19cgraph_varpool_node4htab); } } } void gt_pch_n_P11cgraph_node4htab (void *x_p) { struct htab * const x = (struct htab *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_P11cgraph_node4htab)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { gt_pch_n_11cgraph_node ((*x).entries[i0]); } gt_pch_note_object ((*x).entries, x, gt_pch_p_P11cgraph_node4htab); } } } void gt_pch_n_II12splay_tree_s (void *x_p) { struct splay_tree_s * const x = (struct splay_tree_s *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_II12splay_tree_s)) { gt_pch_n_II17splay_tree_node_s ((*x).root); } } void gt_pch_p_15edge_prediction (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct edge_prediction * const x ATTRIBUTE_UNUSED = (struct edge_prediction *)x_p; if ((void *)(x) == this_obj) op (&((*x).next), cookie); if ((void *)(x) == this_obj) op (&((*x).edge), cookie); } void gt_pch_p_19v_must_def_optype_d (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct v_must_def_optype_d * const x ATTRIBUTE_UNUSED = (struct v_must_def_optype_d *)x_p; { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).num_v_must_defs); i0++) { if ((void *)(x) == this_obj) op (&((*x).v_must_defs[i0]), cookie); } } } void gt_pch_p_13vuse_optype_d (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct vuse_optype_d * const x ATTRIBUTE_UNUSED = (struct vuse_optype_d *)x_p; { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).num_vuses); i0++) { if ((void *)(x) == this_obj) op (&((*x).vuses[i0]), cookie); } } } void gt_pch_p_18v_may_def_optype_d (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct v_may_def_optype_d * const x ATTRIBUTE_UNUSED = (struct v_may_def_optype_d *)x_p; { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).num_v_may_defs * 2); i0++) { if ((void *)(x) == this_obj) op (&((*x).v_may_defs[i0]), cookie); } } } void gt_pch_p_12use_optype_d (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct use_optype_d * const x ATTRIBUTE_UNUSED = (struct use_optype_d *)x_p; { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).num_uses); i0++) { } } } void gt_pch_p_12def_optype_d (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct def_optype_d * const x ATTRIBUTE_UNUSED = (struct def_optype_d *)x_p; { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).num_defs); i0++) { } } } void gt_pch_p_10dataflow_d (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct dataflow_d * const x ATTRIBUTE_UNUSED = (struct dataflow_d *)x_p; if ((void *)(x) == this_obj) op (&((*x).immediate_uses), cookie); { size_t i0; for (i0 = 0; i0 < (size_t)(2); i0++) { if ((void *)(x) == this_obj) op (&((*x).uses[i0]), cookie); } } } void gt_pch_p_19cgraph_varpool_node (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct cgraph_varpool_node * const x ATTRIBUTE_UNUSED = (struct cgraph_varpool_node *)x_p; if ((void *)(x) == this_obj) op (&((*x).decl), cookie); if ((void *)(x) == this_obj) op (&((*x).next_needed), cookie); } void gt_pch_p_11cgraph_edge (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct cgraph_edge * const x ATTRIBUTE_UNUSED = (struct cgraph_edge *)x_p; if ((void *)(x) == this_obj) op (&((*x).caller), cookie); if ((void *)(x) == this_obj) op (&((*x).callee), cookie); if ((void *)(x) == this_obj) op (&((*x).next_caller), cookie); if ((void *)(x) == this_obj) op (&((*x).next_callee), cookie); if ((void *)(x) == this_obj) op (&((*x).call_expr), cookie); if ((void *)(x) == this_obj) op (&((*x).inline_failed), cookie); } void gt_pch_p_11cgraph_node (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct cgraph_node * const x ATTRIBUTE_UNUSED = (struct cgraph_node *)x_p; if ((void *)(x) == this_obj) op (&((*x).decl), cookie); if ((void *)(x) == this_obj) op (&((*x).callees), cookie); if ((void *)(x) == this_obj) op (&((*x).callers), cookie); if ((void *)(x) == this_obj) op (&((*x).next), cookie); if ((void *)(x) == this_obj) op (&((*x).previous), cookie); if ((void *)(x) == this_obj) op (&((*x).origin), cookie); if ((void *)(x) == this_obj) op (&((*x).nested), cookie); if ((void *)(x) == this_obj) op (&((*x).next_nested), cookie); if ((void *)(x) == this_obj) op (&((*x).next_needed), cookie); if ((void *)(x) == this_obj) op (&((*x).next_clone), cookie); if ((void *)(x) == this_obj) op (&((*x).global.inlined_to), cookie); } void gt_pch_p_8bb_ann_d (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct bb_ann_d * const x ATTRIBUTE_UNUSED = (struct bb_ann_d *)x_p; if ((void *)(x) == this_obj) op (&((*x).phi_nodes), cookie); if ((void *)(x) == this_obj) op (&((*x).predictions), cookie); } void gt_pch_p_12elt_loc_list (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct elt_loc_list * const x ATTRIBUTE_UNUSED = (struct elt_loc_list *)x_p; if ((void *)(x) == this_obj) op (&((*x).next), cookie); if ((void *)(x) == this_obj) op (&((*x).loc), cookie); if ((void *)(x) == this_obj) op (&((*x).setting_insn), cookie); } void gt_pch_p_17cselib_val_struct (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct cselib_val_struct * const x ATTRIBUTE_UNUSED = (struct cselib_val_struct *)x_p; switch (1) { case 1: if ((void *)(x) == this_obj) op (&((*x).u.val_rtx), cookie); break; default: break; } if ((void *)(x) == this_obj) op (&((*x).locs), cookie); if ((void *)(x) == this_obj) op (&((*x).addr_list), cookie); if ((void *)(x) == this_obj) op (&((*x).next_containing_mem), cookie); } void gt_pch_p_8elt_list (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct elt_list * const x ATTRIBUTE_UNUSED = (struct elt_list *)x_p; if ((void *)(x) == this_obj) op (&((*x).next), cookie); if ((void *)(x) == this_obj) op (&((*x).elt), cookie); } void gt_pch_p_24tree_statement_list_node (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct tree_statement_list_node * const x ATTRIBUTE_UNUSED = (struct tree_statement_list_node *)x_p; if ((void *)(x) == this_obj) op (&((*x).prev), cookie); if ((void *)(x) == this_obj) op (&((*x).next), cookie); if ((void *)(x) == this_obj) op (&((*x).stmt), cookie); } void gt_pch_p_13alias_var_def (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { union alias_var_def * const x ATTRIBUTE_UNUSED = (union alias_var_def *)x_p; switch ((*x).common.kind) { case -1: if ((void *)(x) == this_obj) op (&((*x).common.decl), cookie); break; case ATERM_AVAR: if ((void *)(x) == this_obj) op (&((*x).aterm.common.decl), cookie); break; default: break; } } void gt_pch_p_8edge_def (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct edge_def * const x ATTRIBUTE_UNUSED = (struct edge_def *)x_p; if ((void *)(x) == this_obj) op (&((*x).pred_next), cookie); if ((void *)(x) == this_obj) op (&((*x).succ_next), cookie); if ((void *)(x) == this_obj) op (&((*x).src), cookie); if ((void *)(x) == this_obj) op (&((*x).dest), cookie); switch (ir_type ()) { case 0: if ((void *)(x) == this_obj) op (&((*x).insns.r), cookie); break; case 1: if ((void *)(x) == this_obj) op (&((*x).insns.t), cookie); break; default: break; } if ((void *)(x) == this_obj) op (&((*x).goto_locus), cookie); } void gt_pch_p_12ptr_info_def (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct ptr_info_def * const x ATTRIBUTE_UNUSED = (struct ptr_info_def *)x_p; if ((void *)(x) == this_obj) op (&((*x).pt_vars), cookie); if ((void *)(x) == this_obj) op (&((*x).name_mem_tag), cookie); } void gt_pch_p_10real_value (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct real_value * const x ATTRIBUTE_UNUSED = (struct real_value *)x_p; } void gt_pch_p_10tree_ann_d (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { union tree_ann_d * const x ATTRIBUTE_UNUSED = (union tree_ann_d *)x_p; switch (ann_type ((tree_ann_t)&((*x)))) { case TREE_ANN_COMMON: break; case VAR_ANN: if ((void *)(x) == this_obj) op (&((*x).decl.type_mem_tag), cookie); if ((void *)(x) == this_obj) op (&((*x).decl.may_aliases), cookie); if ((void *)(x) == this_obj) op (&((*x).decl.default_def), cookie); if ((void *)(x) == this_obj) op (&((*x).decl.current_def), cookie); break; case STMT_ANN: if ((void *)(x) == this_obj) op (&((*x).stmt.def_ops), cookie); if ((void *)(x) == this_obj) op (&((*x).stmt.use_ops), cookie); if ((void *)(x) == this_obj) op (&((*x).stmt.v_may_def_ops), cookie); if ((void *)(x) == this_obj) op (&((*x).stmt.vuse_ops), cookie); if ((void *)(x) == this_obj) op (&((*x).stmt.v_must_def_ops), cookie); if ((void *)(x) == this_obj) op (&((*x).stmt.df), cookie); if ((void *)(x) == this_obj) op (&((*x).stmt.addresses_taken), cookie); break; default: break; } } void gt_pch_p_13convert_optab (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct convert_optab * const x ATTRIBUTE_UNUSED = (struct convert_optab *)x_p; { size_t i0; for (i0 = 0; i0 < (size_t)(NUM_MACHINE_MODES); i0++) { { size_t i1; for (i1 = 0; i1 < (size_t)(NUM_MACHINE_MODES); i1++) { if ((void *)(x) == this_obj) op (&((*x).handlers[i0][i1].libfunc), cookie); } } } } } void gt_pch_p_5optab (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct optab * const x ATTRIBUTE_UNUSED = (struct optab *)x_p; { size_t i0; for (i0 = 0; i0 < (size_t)(NUM_MACHINE_MODES); i0++) { if ((void *)(x) == this_obj) op (&((*x).handlers[i0].libfunc), cookie); } } } void gt_pch_p_15basic_block_def (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct basic_block_def * const x ATTRIBUTE_UNUSED = (struct basic_block_def *)x_p; if ((void *)(x) == this_obj) op (&((*x).head_), cookie); if ((void *)(x) == this_obj) op (&((*x).end_), cookie); if ((void *)(x) == this_obj) op (&((*x).stmt_list), cookie); if ((void *)(x) == this_obj) op (&((*x).pred), cookie); if ((void *)(x) == this_obj) op (&((*x).succ), cookie); if ((void *)(x) == this_obj) op (&((*x).prev_bb), cookie); if ((void *)(x) == this_obj) op (&((*x).next_bb), cookie); if ((void *)(x) == this_obj) op (&((*x).tree_annotations), cookie); } void gt_pch_p_9reg_attrs (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct reg_attrs * const x ATTRIBUTE_UNUSED = (struct reg_attrs *)x_p; if ((void *)(x) == this_obj) op (&((*x).decl), cookie); } void gt_pch_p_9mem_attrs (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct mem_attrs * const x ATTRIBUTE_UNUSED = (struct mem_attrs *)x_p; if ((void *)(x) == this_obj) op (&((*x).expr), cookie); if ((void *)(x) == this_obj) op (&((*x).offset), cookie); if ((void *)(x) == this_obj) op (&((*x).size), cookie); } void gt_pch_p_15varray_head_tag (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct varray_head_tag * const x ATTRIBUTE_UNUSED = (struct varray_head_tag *)x_p; if ((void *)(x) == this_obj) op (&((*x).name), cookie); switch ((*x).type) { case VARRAY_DATA_C: break; case VARRAY_DATA_UC: break; case VARRAY_DATA_S: break; case VARRAY_DATA_US: break; case VARRAY_DATA_I: break; case VARRAY_DATA_U: break; case VARRAY_DATA_L: break; case VARRAY_DATA_UL: break; case VARRAY_DATA_HINT: break; case VARRAY_DATA_UHINT: break; case VARRAY_DATA_GENERIC: abort(); break; case VARRAY_DATA_CPTR: { size_t i10; for (i10 = 0; i10 < (size_t)((*x).num_elements); i10++) { if ((void *)(x) == this_obj) op (&((*x).data.cptr[i10]), cookie); } } break; case VARRAY_DATA_RTX: { size_t i11; for (i11 = 0; i11 < (size_t)((*x).num_elements); i11++) { if ((void *)(x) == this_obj) op (&((*x).data.rtx[i11]), cookie); } } break; case VARRAY_DATA_RTVEC: { size_t i12; for (i12 = 0; i12 < (size_t)((*x).num_elements); i12++) { if ((void *)(x) == this_obj) op (&((*x).data.rtvec[i12]), cookie); } } break; case VARRAY_DATA_TREE: { size_t i13; for (i13 = 0; i13 < (size_t)((*x).num_elements); i13++) { if ((void *)(x) == this_obj) op (&((*x).data.tree[i13]), cookie); } } break; case VARRAY_DATA_BITMAP: { size_t i14; for (i14 = 0; i14 < (size_t)((*x).num_elements); i14++) { if ((void *)(x) == this_obj) op (&((*x).data.bitmap[i14]), cookie); } } break; case VARRAY_DATA_CONST_EQUIV: { size_t i15; for (i15 = 0; i15 < (size_t)((*x).num_elements); i15++) { if ((void *)(x) == this_obj) op (&((*x).data.const_equiv[i15].rtx), cookie); } } break; case VARRAY_DATA_TE: { size_t i16; for (i16 = 0; i16 < (size_t)((*x).num_elements); i16++) { if ((void *)(x) == this_obj) op (&((*x).data.te[i16]), cookie); } } break; case VARRAY_DATA_EDGE: { size_t i17; for (i17 = 0; i17 < (size_t)((*x).num_elements); i17++) { if ((void *)(x) == this_obj) op (&((*x).data.e[i17]), cookie); } } break; default: break; } } void gt_pch_p_8function (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct function * const x ATTRIBUTE_UNUSED = (struct function *)x_p; if ((void *)(x) == this_obj) op (&((*x).eh), cookie); if ((void *)(x) == this_obj) op (&((*x).stmt), cookie); if ((void *)(x) == this_obj) op (&((*x).expr), cookie); if ((void *)(x) == this_obj) op (&((*x).emit), cookie); if ((void *)(x) == this_obj) op (&((*x).varasm), cookie); if ((void *)(x) == this_obj) op (&((*x).saved_tree), cookie); if ((void *)(x) == this_obj) op (&((*x).saved_args), cookie); if ((void *)(x) == this_obj) op (&((*x).decl), cookie); if ((void *)(x) == this_obj) op (&((*x).outer), cookie); if ((void *)(x) == this_obj) op (&((*x).arg_offset_rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).return_rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).internal_arg_pointer), cookie); if ((void *)(x) == this_obj) op (&((*x).hard_reg_initial_vals), cookie); if ((void *)(x) == this_obj) op (&((*x).x_nonlocal_goto_handler_labels), cookie); if ((void *)(x) == this_obj) op (&((*x).x_return_label), cookie); if ((void *)(x) == this_obj) op (&((*x).x_naked_return_label), cookie); if ((void *)(x) == this_obj) op (&((*x).x_stack_slot_list), cookie); if ((void *)(x) == this_obj) op (&((*x).x_tail_recursion_reentry), cookie); if ((void *)(x) == this_obj) op (&((*x).x_arg_pointer_save_area), cookie); if ((void *)(x) == this_obj) op (&((*x).static_chain_decl), cookie); if ((void *)(x) == this_obj) op (&((*x).nonlocal_goto_save_area), cookie); if ((void *)(x) == this_obj) op (&((*x).x_parm_birth_insn), cookie); if ((void *)(x) == this_obj) op (&((*x).x_used_temp_slots), cookie); if ((void *)(x) == this_obj) op (&((*x).x_avail_temp_slots), cookie); if ((void *)(x) == this_obj) op (&((*x).fixup_var_refs_queue), cookie); if ((void *)(x) == this_obj) op (&((*x).original_arg_vector), cookie); if ((void *)(x) == this_obj) op (&((*x).original_decl_initial), cookie); if ((void *)(x) == this_obj) op (&((*x).machine), cookie); if ((void *)(x) == this_obj) op (&((*x).language), cookie); if ((void *)(x) == this_obj) op (&((*x).epilogue_delay_list), cookie); if ((void *)(x) == this_obj) op (&((*x).function_end_locus.file), cookie); if ((void *)(x) == this_obj) op (&((*x).ib_boundaries_block), cookie); if ((void *)(x) == this_obj) op (&((*x).unexpanded_var_list), cookie); } void gt_pch_p_11expr_status (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct expr_status * const x ATTRIBUTE_UNUSED = (struct expr_status *)x_p; if ((void *)(x) == this_obj) op (&((*x).x_saveregs_value), cookie); if ((void *)(x) == this_obj) op (&((*x).x_apply_args_value), cookie); if ((void *)(x) == this_obj) op (&((*x).x_forced_labels), cookie); if ((void *)(x) == this_obj) op (&((*x).x_pending_chain), cookie); } void gt_pch_p_11emit_status (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct emit_status * const x ATTRIBUTE_UNUSED = (struct emit_status *)x_p; if ((void *)(x) == this_obj) op (&((*x).x_first_insn), cookie); if ((void *)(x) == this_obj) op (&((*x).x_last_insn), cookie); if ((void *)(x) == this_obj) op (&((*x).sequence_stack), cookie); if ((void *)(x) == this_obj) op (&((*x).x_last_location.file), cookie); if ((*x).regno_pointer_align != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).x_reg_rtx_no); i0++) { } if ((void *)(x) == this_obj) op (&((*x).regno_pointer_align), cookie); } if ((*x).x_regno_reg_rtx != NULL) { size_t i1; for (i1 = 0; i1 < (size_t)(((*x)).x_reg_rtx_no); i1++) { if ((void *)((*x).x_regno_reg_rtx) == this_obj) op (&((*x).x_regno_reg_rtx[i1]), cookie); } if ((void *)(x) == this_obj) op (&((*x).x_regno_reg_rtx), cookie); } } void gt_pch_p_14sequence_stack (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct sequence_stack * const x ATTRIBUTE_UNUSED = (struct sequence_stack *)x_p; if ((void *)(x) == this_obj) op (&((*x).first), cookie); if ((void *)(x) == this_obj) op (&((*x).last), cookie); if ((void *)(x) == this_obj) op (&((*x).next), cookie); } void gt_pch_p_14var_refs_queue (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct var_refs_queue * const x ATTRIBUTE_UNUSED = (struct var_refs_queue *)x_p; if ((void *)(x) == this_obj) op (&((*x).modified), cookie); if ((void *)(x) == this_obj) op (&((*x).next), cookie); } void gt_pch_p_15bitmap_head_def (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct bitmap_head_def * const x ATTRIBUTE_UNUSED = (struct bitmap_head_def *)x_p; if ((void *)(x) == this_obj) op (&((*x).first), cookie); if ((void *)(x) == this_obj) op (&((*x).current), cookie); } void gt_pch_p_18bitmap_element_def (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct bitmap_element_def * const x ATTRIBUTE_UNUSED = (struct bitmap_element_def *)x_p; if ((void *)(x) == this_obj) op (&((*x).next), cookie); if ((void *)(x) == this_obj) op (&((*x).prev), cookie); } void gt_pch_p_16machine_function (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct machine_function * const x ATTRIBUTE_UNUSED = (struct machine_function *)x_p; if ((void *)(x) == this_obj) op (&((*x).stack_locals), cookie); if ((void *)(x) == this_obj) op (&((*x).some_ld_name), cookie); } void gt_pch_p_6answer (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct answer * const x ATTRIBUTE_UNUSED = (struct answer *)x_p; if ((void *)(x) == this_obj) op (&((*x).next), cookie); { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).count); i0++) { switch (cpp_token_val_index (&((*x).first[i0]))) { case CPP_TOKEN_FLD_NODE: { union tree_node * x1 = ((*x).first[i0].val.node) ? HT_IDENT_TO_GCC_IDENT (HT_NODE (((*x).first[i0].val.node))) : NULL; if ((void *)(x) == this_obj) op (&(x1), cookie); (*x).first[i0].val.node = (x1) ? CPP_HASHNODE (GCC_IDENT_TO_HT_IDENT ((x1))) : NULL; } break; case CPP_TOKEN_FLD_SOURCE: if ((void *)(x) == this_obj) op (&((*x).first[i0].val.source), cookie); break; case CPP_TOKEN_FLD_STR: if ((void *)(x) == this_obj) op (&((*x).first[i0].val.str.text), cookie); break; case CPP_TOKEN_FLD_ARG_NO: break; default: break; } } } } void gt_pch_p_9cpp_macro (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct cpp_macro * const x ATTRIBUTE_UNUSED = (struct cpp_macro *)x_p; if ((*x).params != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).paramc); i0++) { { union tree_node * x1 = ((*x).params[i0]) ? HT_IDENT_TO_GCC_IDENT (HT_NODE (((*x).params[i0]))) : NULL; if ((void *)((*x).params) == this_obj) op (&(x1), cookie); (*x).params[i0] = (x1) ? CPP_HASHNODE (GCC_IDENT_TO_HT_IDENT ((x1))) : NULL; } } if ((void *)(x) == this_obj) op (&((*x).params), cookie); } switch (((*x)).traditional) { case 0: if ((*x).exp.tokens != NULL) { size_t i2; for (i2 = 0; i2 < (size_t)((*x).count); i2++) { switch (cpp_token_val_index (&((*x).exp.tokens[i2]))) { case CPP_TOKEN_FLD_NODE: { union tree_node * x3 = ((*x).exp.tokens[i2].val.node) ? HT_IDENT_TO_GCC_IDENT (HT_NODE (((*x).exp.tokens[i2].val.node))) : NULL; if ((void *)((*x).exp.tokens) == this_obj) op (&(x3), cookie); (*x).exp.tokens[i2].val.node = (x3) ? CPP_HASHNODE (GCC_IDENT_TO_HT_IDENT ((x3))) : NULL; } break; case CPP_TOKEN_FLD_SOURCE: if ((void *)((*x).exp.tokens) == this_obj) op (&((*x).exp.tokens[i2].val.source), cookie); break; case CPP_TOKEN_FLD_STR: if ((void *)((*x).exp.tokens) == this_obj) op (&((*x).exp.tokens[i2].val.str.text), cookie); break; case CPP_TOKEN_FLD_ARG_NO: break; default: break; } } if ((void *)(x) == this_obj) op (&((*x).exp.tokens), cookie); } break; case 1: if ((void *)(x) == this_obj) op (&((*x).exp.text), cookie); break; default: break; } } void gt_pch_p_9cpp_token (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct cpp_token * const x ATTRIBUTE_UNUSED = (struct cpp_token *)x_p; switch (cpp_token_val_index (&((*x)))) { case CPP_TOKEN_FLD_NODE: { union tree_node * x0 = ((*x).val.node) ? HT_IDENT_TO_GCC_IDENT (HT_NODE (((*x).val.node))) : NULL; if ((void *)(x) == this_obj) op (&(x0), cookie); (*x).val.node = (x0) ? CPP_HASHNODE (GCC_IDENT_TO_HT_IDENT ((x0))) : NULL; } break; case CPP_TOKEN_FLD_SOURCE: if ((void *)(x) == this_obj) op (&((*x).val.source), cookie); break; case CPP_TOKEN_FLD_STR: if ((void *)(x) == this_obj) op (&((*x).val.str.text), cookie); break; case CPP_TOKEN_FLD_ARG_NO: break; default: break; } } void gt_pch_p_9rtvec_def (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct rtvec_def * const x ATTRIBUTE_UNUSED = (struct rtvec_def *)x_p; { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).num_elem); i0++) { if ((void *)(x) == this_obj) op (&((*x).elem[i0]), cookie); } } } void gt_pch_p_7rtx_def (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct rtx_def * const x ATTRIBUTE_UNUSED = (struct rtx_def *)x_p; switch (GET_CODE (&(*x))) { case VAR_LOCATION: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rttree), cookie); break; case US_TRUNCATE: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case SS_TRUNCATE: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case US_MINUS: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case SS_MINUS: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case US_PLUS: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case SS_PLUS: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case VEC_DUPLICATE: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case VEC_CONCAT: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case VEC_SELECT: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case VEC_MERGE: if ((void *)(x) == this_obj) op (&((*x).u.fld[2].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case RANGE_LIVE: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtbit), cookie); break; case RANGE_VAR: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rttree), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case RANGE_REG: if ((void *)(x) == this_obj) op (&((*x).u.fld[9].rttree), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[8].rttree), cookie); break; case RANGE_INFO: if ((void *)(x) == this_obj) op (&((*x).u.fld[10].rtbit), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[9].rtbit), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[2].rtvec), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case LO_SUM: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case HIGH: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case ZERO_EXTRACT: if ((void *)(x) == this_obj) op (&((*x).u.fld[2].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case SIGN_EXTRACT: if ((void *)(x) == this_obj) op (&((*x).u.fld[2].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case PARITY: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case POPCOUNT: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case CTZ: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case CLZ: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case FFS: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case SQRT: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case ABS: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case UNSIGNED_FIX: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case UNSIGNED_FLOAT: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case FIX: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case FLOAT: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case FLOAT_TRUNCATE: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case FLOAT_EXTEND: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case TRUNCATE: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case ZERO_EXTEND: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case SIGN_EXTEND: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case LTGT: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case UNLT: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case UNLE: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case UNGT: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case UNGE: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case UNEQ: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case ORDERED: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case UNORDERED: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case LTU: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case LEU: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case GTU: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case GEU: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case LT: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case LE: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case GT: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case GE: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case EQ: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case NE: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case POST_MODIFY: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case PRE_MODIFY: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case POST_INC: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case POST_DEC: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case PRE_INC: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case PRE_DEC: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case UMAX: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case UMIN: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case SMAX: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case SMIN: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case ROTATERT: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case LSHIFTRT: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case ASHIFTRT: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case ROTATE: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case ASHIFT: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case NOT: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case XOR: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case IOR: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case AND: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case UMOD: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case UDIV: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case MOD: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case DIV: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case MULT: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case NEG: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case MINUS: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case PLUS: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case COMPARE: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case COND: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtvec), cookie); break; case IF_THEN_ELSE: if ((void *)(x) == this_obj) op (&((*x).u.fld[2].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case QUEUED: if ((void *)(x) == this_obj) op (&((*x).u.fld[4].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[3].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[2].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case CC0: break; case SYMBOL_REF: if ((void *)(x) == this_obj) op (&((*x).u.fld[2].rttree), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtstr), cookie); break; case LABEL_REF: if ((void *)(x) == this_obj) op (&((*x).u.fld[2].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case MEM: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtmem), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case CONCAT: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case STRICT_LOW_PART: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case SUBREG: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case SCRATCH: break; case REG: if ((void *)(x) == this_obj) op (&((*x).u.fld[2].rtreg), cookie); break; case VALUE: break; case PC: break; case CONST: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case CONST_STRING: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtstr), cookie); break; case CONST_VECTOR: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtvec), cookie); break; case CONST_DOUBLE: break; case CONST_INT: break; case RESX: break; case TRAP_IF: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case RETURN: break; case CALL: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case CLOBBER: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case USE: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case SET: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case PREFETCH: if ((void *)(x) == this_obj) op (&((*x).u.fld[2].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case ADDR_DIFF_VEC: if ((void *)(x) == this_obj) op (&((*x).u.fld[3].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[2].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtvec), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case ADDR_VEC: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtvec), cookie); break; case UNSPEC_VOLATILE: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtvec), cookie); break; case UNSPEC: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtvec), cookie); break; case ASM_OPERANDS: if ((void *)(x) == this_obj) op (&((*x).u.fld[5].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[4].rtvec), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[3].rtvec), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtstr), cookie); break; case ASM_INPUT: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtstr), cookie); break; case PARALLEL: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtvec), cookie); break; case COND_EXEC: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case NOTE: switch (NOTE_LINE_NUMBER (&(*x))) { default: if ((void *)(x) == this_obj) op (&((*x).u.fld[4].rtstr), cookie); break; case -79: if ((void *)(x) == this_obj) op (&((*x).u.fld[4].rtx), cookie); break; case -80: break; case -81: break; case -82: if ((void *)(x) == this_obj) op (&((*x).u.fld[4].rtx), cookie); break; case -83: break; case -84: break; case -85: break; case -86: break; case -87: break; case -88: break; case -89: break; case -90: break; case -91: break; case -92: break; case -93: break; case -94: break; case -95: break; case -96: break; case -97: if ((void *)(x) == this_obj) op (&((*x).u.fld[4].rttree), cookie); break; case -98: if ((void *)(x) == this_obj) op (&((*x).u.fld[4].rttree), cookie); break; case -99: break; case -100: break; } if ((void *)(x) == this_obj) op (&((*x).u.fld[2].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); break; case CODE_LABEL: if ((void *)(x) == this_obj) op (&((*x).u.fld[7].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[5].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[2].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); break; case BARRIER: if ((void *)(x) == this_obj) op (&((*x).u.fld[2].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); break; case CALL_INSN: if ((void *)(x) == this_obj) op (&((*x).u.fld[9].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[8].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[7].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[5].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[2].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); break; case JUMP_INSN: if ((void *)(x) == this_obj) op (&((*x).u.fld[9].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[8].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[7].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[5].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[2].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); break; case INSN: if ((void *)(x) == this_obj) op (&((*x).u.fld[8].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[7].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[5].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[2].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); break; case ATTR_FLAG: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtstr), cookie); break; case EQ_ATTR_ALT: break; case EQ_ATTR: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtstr), cookie); break; case SET_ATTR_ALTERNATIVE: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtvec), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtstr), cookie); break; case SET_ATTR: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtstr), cookie); break; case ATTR: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtstr), cookie); break; case DEFINE_ATTR: if ((void *)(x) == this_obj) op (&((*x).u.fld[2].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtstr), cookie); break; case DEFINE_INSN_RESERVATION: if ((void *)(x) == this_obj) op (&((*x).u.fld[3].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[2].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtstr), cookie); break; case DEFINE_RESERVATION: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtstr), cookie); break; case AUTOMATA_OPTION: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtstr), cookie); break; case DEFINE_AUTOMATON: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtstr), cookie); break; case DEFINE_BYPASS: if ((void *)(x) == this_obj) op (&((*x).u.fld[3].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[2].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtstr), cookie); break; case FINAL_ABSENCE_SET: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtstr), cookie); break; case ABSENCE_SET: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtstr), cookie); break; case FINAL_PRESENCE_SET: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtstr), cookie); break; case PRESENCE_SET: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtstr), cookie); break; case EXCLUSION_SET: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtstr), cookie); break; case DEFINE_QUERY_CPU_UNIT: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtstr), cookie); break; case DEFINE_CPU_UNIT: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtstr), cookie); break; case ADDRESS: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case SEQUENCE: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtvec), cookie); break; case DEFINE_COND_EXEC: if ((void *)(x) == this_obj) op (&((*x).u.fld[2].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtvec), cookie); break; case DEFINE_ASM_ATTRIBUTES: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtvec), cookie); break; case DEFINE_FUNCTION_UNIT: if ((void *)(x) == this_obj) op (&((*x).u.fld[6].rtvec), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[3].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtstr), cookie); break; case DEFINE_DELAY: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtvec), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case DEFINE_EXPAND: if ((void *)(x) == this_obj) op (&((*x).u.fld[3].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[2].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtvec), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtstr), cookie); break; case DEFINE_PEEPHOLE2: if ((void *)(x) == this_obj) op (&((*x).u.fld[3].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[2].rtvec), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtvec), cookie); break; case DEFINE_INSN_AND_SPLIT: if ((void *)(x) == this_obj) op (&((*x).u.fld[7].rtvec), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[6].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[5].rtvec), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[4].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[3].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[2].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtvec), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtstr), cookie); break; case DEFINE_SPLIT: if ((void *)(x) == this_obj) op (&((*x).u.fld[3].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[2].rtvec), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtvec), cookie); break; case DEFINE_PEEPHOLE: if ((void *)(x) == this_obj) op (&((*x).u.fld[3].rtvec), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[2].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtvec), cookie); break; case DEFINE_INSN: if ((void *)(x) == this_obj) op (&((*x).u.fld[4].rtvec), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[3].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[2].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtvec), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtstr), cookie); break; case MATCH_PAR_DUP: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtvec), cookie); break; case MATCH_OP_DUP: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtvec), cookie); break; case MATCH_PARALLEL: if ((void *)(x) == this_obj) op (&((*x).u.fld[2].rtvec), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtstr), cookie); break; case MATCH_OPERATOR: if ((void *)(x) == this_obj) op (&((*x).u.fld[2].rtvec), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtstr), cookie); break; case MATCH_DUP: break; case MATCH_SCRATCH: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtstr), cookie); break; case MATCH_OPERAND: if ((void *)(x) == this_obj) op (&((*x).u.fld[2].rtstr), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtstr), cookie); break; case INSN_LIST: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case EXPR_LIST: if ((void *)(x) == this_obj) op (&((*x).u.fld[1].rtx), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtx), cookie); break; case INCLUDE: if ((void *)(x) == this_obj) op (&((*x).u.fld[0].rtstr), cookie); break; case NIL: break; case UNKNOWN: break; default: break; } } void gt_pch_p_10location_s (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct location_s * const x ATTRIBUTE_UNUSED = (struct location_s *)x_p; if ((void *)(x) == this_obj) op (&((*x).file), cookie); } void gt_pch_p_II17splay_tree_node_s (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct splay_tree_node_s * const x ATTRIBUTE_UNUSED = (struct splay_tree_node_s *)x_p; if ((void *)(x) == this_obj) op (&((*x).left), cookie); if ((void *)(x) == this_obj) op (&((*x).right), cookie); } void gt_pch_p_SP9tree_node17splay_tree_node_s (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct splay_tree_node_s * const x ATTRIBUTE_UNUSED = (struct splay_tree_node_s *)x_p; if ((void *)(x) == this_obj) op (&((*x).key), cookie); if ((void *)(x) == this_obj) op (&((*x).value), cookie); if ((void *)(x) == this_obj) op (&((*x).left), cookie); if ((void *)(x) == this_obj) op (&((*x).right), cookie); } void gt_pch_p_P13alias_var_def15varray_head_tag (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct varray_head_tag * const x ATTRIBUTE_UNUSED = (struct varray_head_tag *)x_p; if ((void *)(x) == this_obj) op (&((*x).name), cookie); switch ((*x).type) { case VARRAY_DATA_C: break; case VARRAY_DATA_UC: break; case VARRAY_DATA_S: break; case VARRAY_DATA_US: break; case VARRAY_DATA_I: break; case VARRAY_DATA_U: break; case VARRAY_DATA_L: break; case VARRAY_DATA_UL: break; case VARRAY_DATA_HINT: break; case VARRAY_DATA_UHINT: break; case VARRAY_DATA_GENERIC: { size_t i10; for (i10 = 0; i10 < (size_t)((*x).num_elements); i10++) { if ((void *)(x) == this_obj) op (&((*x).data.generic[i10]), cookie); } } break; case VARRAY_DATA_CPTR: { size_t i11; for (i11 = 0; i11 < (size_t)((*x).num_elements); i11++) { if ((void *)(x) == this_obj) op (&((*x).data.cptr[i11]), cookie); } } break; case VARRAY_DATA_RTX: { size_t i12; for (i12 = 0; i12 < (size_t)((*x).num_elements); i12++) { if ((void *)(x) == this_obj) op (&((*x).data.rtx[i12]), cookie); } } break; case VARRAY_DATA_RTVEC: { size_t i13; for (i13 = 0; i13 < (size_t)((*x).num_elements); i13++) { if ((void *)(x) == this_obj) op (&((*x).data.rtvec[i13]), cookie); } } break; case VARRAY_DATA_TREE: { size_t i14; for (i14 = 0; i14 < (size_t)((*x).num_elements); i14++) { if ((void *)(x) == this_obj) op (&((*x).data.tree[i14]), cookie); } } break; case VARRAY_DATA_BITMAP: { size_t i15; for (i15 = 0; i15 < (size_t)((*x).num_elements); i15++) { if ((void *)(x) == this_obj) op (&((*x).data.bitmap[i15]), cookie); } } break; case VARRAY_DATA_CONST_EQUIV: { size_t i16; for (i16 = 0; i16 < (size_t)((*x).num_elements); i16++) { if ((void *)(x) == this_obj) op (&((*x).data.const_equiv[i16].rtx), cookie); } } break; case VARRAY_DATA_TE: { size_t i17; for (i17 = 0; i17 < (size_t)((*x).num_elements); i17++) { if ((void *)(x) == this_obj) op (&((*x).data.te[i17]), cookie); } } break; case VARRAY_DATA_EDGE: { size_t i18; for (i18 = 0; i18 < (size_t)((*x).num_elements); i18++) { if ((void *)(x) == this_obj) op (&((*x).data.e[i18]), cookie); } } break; default: break; } } void gt_pch_p_P9tree_node4htab (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct htab * const x ATTRIBUTE_UNUSED = (struct htab *)x_p; if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { if ((void *)((*x).entries) == this_obj) op (&((*x).entries[i0]), cookie); } if ((void *)(x) == this_obj) op (&((*x).entries), cookie); } } void gt_pch_p_P9reg_attrs4htab (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct htab * const x ATTRIBUTE_UNUSED = (struct htab *)x_p; if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { if ((void *)((*x).entries) == this_obj) op (&((*x).entries[i0]), cookie); } if ((void *)(x) == this_obj) op (&((*x).entries), cookie); } } void gt_pch_p_P9mem_attrs4htab (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct htab * const x ATTRIBUTE_UNUSED = (struct htab *)x_p; if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { if ((void *)((*x).entries) == this_obj) op (&((*x).entries[i0]), cookie); } if ((void *)(x) == this_obj) op (&((*x).entries), cookie); } } void gt_pch_p_P7rtx_def4htab (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct htab * const x ATTRIBUTE_UNUSED = (struct htab *)x_p; if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { if ((void *)((*x).entries) == this_obj) op (&((*x).entries[i0]), cookie); } if ((void *)(x) == this_obj) op (&((*x).entries), cookie); } } void gt_pch_p_SP9tree_node12splay_tree_s (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct splay_tree_s * const x ATTRIBUTE_UNUSED = (struct splay_tree_s *)x_p; if ((void *)(x) == this_obj) op (&((*x).root), cookie); } void gt_pch_p_P19cgraph_varpool_node4htab (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct htab * const x ATTRIBUTE_UNUSED = (struct htab *)x_p; if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { if ((void *)((*x).entries) == this_obj) op (&((*x).entries[i0]), cookie); } if ((void *)(x) == this_obj) op (&((*x).entries), cookie); } } void gt_pch_p_P11cgraph_node4htab (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct htab * const x ATTRIBUTE_UNUSED = (struct htab *)x_p; if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { if ((void *)((*x).entries) == this_obj) op (&((*x).entries[i0]), cookie); } if ((void *)(x) == this_obj) op (&((*x).entries), cookie); } } void gt_pch_p_II12splay_tree_s (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct splay_tree_s * const x ATTRIBUTE_UNUSED = (struct splay_tree_s *)x_p; if ((void *)(x) == this_obj) op (&((*x).root), cookie); } /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gtype_desc_c[] = { { &chrec_known, 1, sizeof (chrec_known), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &chrec_dont_know, 1, sizeof (chrec_dont_know), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &global_var, 1, sizeof (global_var), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &ssa_names, 1, sizeof (ssa_names), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, { &referenced_vars, 1, sizeof (referenced_vars), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, { ®_equiv_memory_loc_varray, 1, sizeof (reg_equiv_memory_loc_varray), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, { &cgraph_varpool_nodes_queue, 1, sizeof (cgraph_varpool_nodes_queue), >_ggc_mx_cgraph_varpool_node, >_pch_nx_cgraph_varpool_node }, { &cgraph_nodes_queue, 1, sizeof (cgraph_nodes_queue), >_ggc_mx_cgraph_node, >_pch_nx_cgraph_node }, { &cgraph_nodes, 1, sizeof (cgraph_nodes), >_ggc_mx_cgraph_node, >_pch_nx_cgraph_node }, { &EXIT_BLOCK_PTR, 1, sizeof (EXIT_BLOCK_PTR), >_ggc_mx_basic_block_def, >_pch_nx_basic_block_def }, { &ENTRY_BLOCK_PTR, 1, sizeof (ENTRY_BLOCK_PTR), >_ggc_mx_basic_block_def, >_pch_nx_basic_block_def }, { &label_value_list, 1, sizeof (label_value_list), >_ggc_mx_rtx_def, >_pch_nx_rtx_def }, { &basic_block_info, 1, sizeof (basic_block_info), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, { &insn_addresses_, 1, sizeof (insn_addresses_), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, { &libfunc_table[0], 1 * (LTI_MAX), sizeof (libfunc_table[0]), >_ggc_mx_rtx_def, >_pch_nx_rtx_def }, { ¤t_file_decl, 1, sizeof (current_file_decl), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { ¤t_function_func_begin_label, 1, sizeof (current_function_func_begin_label), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { ¤t_function_decl, 1, sizeof (current_function_decl), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &sizetype_tab[0], 1 * ((int) TYPE_KIND_LAST), sizeof (sizetype_tab[0]), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &frame_base_decl, 1, sizeof (frame_base_decl), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &integer_types[0], 1 * (itk_none), sizeof (integer_types[0]), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &global_trees[0], 1 * (TI_MAX), sizeof (global_trees[0]), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &implicit_built_in_decls[0], 1 * ((int) END_BUILTINS), sizeof (implicit_built_in_decls[0]), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &built_in_decls[0], 1 * ((int) END_BUILTINS), sizeof (built_in_decls[0]), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &code_to_optab[0], 1 * (NUM_RTX_CODE + 1), sizeof (code_to_optab[0]), >_ggc_mx_optab, >_pch_nx_optab }, { &convert_optab_table[0], 1 * (CONVERT_OPTAB_MAX), sizeof (convert_optab_table[0]), >_ggc_mx_convert_optab, >_pch_nx_convert_optab }, { &optab_table[0], 1 * (OTI_MAX), sizeof (optab_table[0]), >_ggc_mx_optab, >_pch_nx_optab }, { &stack_limit_rtx, 1, sizeof (stack_limit_rtx), >_ggc_mx_rtx_def, >_pch_nx_rtx_def }, { &return_address_pointer_rtx, 1, sizeof (return_address_pointer_rtx), >_ggc_mx_rtx_def, >_pch_nx_rtx_def }, { &static_chain_incoming_rtx, 1, sizeof (static_chain_incoming_rtx), >_ggc_mx_rtx_def, >_pch_nx_rtx_def }, { &static_chain_rtx, 1, sizeof (static_chain_rtx), >_ggc_mx_rtx_def, >_pch_nx_rtx_def }, { &pic_offset_table_rtx, 1, sizeof (pic_offset_table_rtx), >_ggc_mx_rtx_def, >_pch_nx_rtx_def }, { &global_rtl[0], 1 * (GR_MAX), sizeof (global_rtl[0]), >_ggc_mx_rtx_def, >_pch_nx_rtx_def }, { &const_tiny_rtx[0][0], 1 * (3) * ((int) MAX_MACHINE_MODE), sizeof (const_tiny_rtx[0][0]), >_ggc_mx_rtx_def, >_pch_nx_rtx_def }, { &const_true_rtx, 1, sizeof (const_true_rtx), >_ggc_mx_rtx_def, >_pch_nx_rtx_def }, { &const_int_rtx[0], 1 * (MAX_SAVED_CONST_INT * 2 + 1), sizeof (const_int_rtx[0]), >_ggc_mx_rtx_def, >_pch_nx_rtx_def }, { &outer_function_chain, 1, sizeof (outer_function_chain), >_ggc_mx_function, >_pch_nx_function }, { &cfun, 1, sizeof (cfun), >_ggc_mx_function, >_pch_nx_function }, LAST_GGC_ROOT_TAB }; const struct ggc_root_tab gt_pch_rs_gtype_desc_c[] = { { &cgraph_varpool_n_nodes, 1, sizeof (cgraph_varpool_n_nodes), NULL, NULL }, { &cgraph_max_uid, 1, sizeof (cgraph_max_uid), NULL, NULL }, { &cgraph_n_nodes, 1, sizeof (cgraph_n_nodes), NULL, NULL }, { &binfo_lang_slots, 1, sizeof (binfo_lang_slots), NULL, NULL }, LAST_GGC_ROOT_TAB }; /* Used to implement the RTX_NEXT macro. */ const unsigned char rtx_next[NUM_RTX_CODE] = { 0, 0, 0, RTX_HDR_SIZE + 1 * sizeof (rtunion), RTX_HDR_SIZE + 1 * sizeof (rtunion), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 3 * sizeof (rtunion), 0, 0, 0, RTX_HDR_SIZE + 0 * sizeof (rtunion), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, RTX_HDR_SIZE + 2 * sizeof (rtunion), RTX_HDR_SIZE + 2 * sizeof (rtunion), 0, 0, 0, 0, 0, 0, RTX_HDR_SIZE + 2 * sizeof (rtunion), RTX_HDR_SIZE + 2 * sizeof (rtunion), RTX_HDR_SIZE + 2 * sizeof (rtunion), RTX_HDR_SIZE + 2 * sizeof (rtunion), RTX_HDR_SIZE + 2 * sizeof (rtunion), RTX_HDR_SIZE + 2 * sizeof (rtunion), RTX_HDR_SIZE + 1 * sizeof (rtunion), 0, 0, 0, 0, 0, 0, RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 1 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), 0, RTX_HDR_SIZE + 0 * sizeof (rtunion), 0, 0, 0, 0, 0, RTX_HDR_SIZE + 0 * sizeof (rtunion), 0, 0, 0, 0, RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), 0, 0, RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 1 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), 0, RTX_HDR_SIZE + 0 * sizeof (rtunion), 0, RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 0 * sizeof (rtunion), RTX_HDR_SIZE + 1 * sizeof (rtunion), }; /* This file is auto-generated by opts.sh. */ /* intl.h - internationalization Copyright 1998, 2001, 2003, 2004 Free Software Foundation, Inc. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_INTL_H #define GCC_INTL_H #ifdef HAVE_LOCALE_H # include #endif #ifndef HAVE_SETLOCALE # define setlocale(category, locale) (locale) #endif #ifdef ENABLE_NLS #include extern void gcc_init_libintl (void); extern size_t gcc_gettext_width (const char *); #else /* Stubs. */ # undef textdomain # define textdomain(domain) (domain) # undef bindtextdomain # define bindtextdomain(domain, directory) (domain) # undef gettext # define gettext(msgid) (msgid) # define gcc_init_libintl() /* nothing */ # define gcc_gettext_width(s) strlen(s) #endif #ifndef _ # define _(msgid) gettext (msgid) #endif #ifndef N_ # define N_(msgid) msgid #endif extern const char *open_quote; extern const char *close_quote; #endif /* intl.h */ /* Command line option handling. Copyright (C) 2002, 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_OPTS_H #define GCC_OPTS_H extern void decode_options (unsigned int argc, const char **argv); extern void add_input_filename (const char *filename); struct cl_option { const char *opt_text; const char *help; unsigned short back_chain; unsigned char opt_len; unsigned int flags; int *flag_var; int has_set_value; int set_value; }; extern const struct cl_option cl_options[]; extern const unsigned int cl_options_count; extern const char *const lang_names[]; #define CL_REPORT (1 << 23) /* Report argument with -fverbose-asm */ #define CL_JOINED (1 << 24) /* If takes joined argument. */ #define CL_SEPARATE (1 << 25) /* If takes a separate argument. */ #define CL_REJECT_NEGATIVE (1 << 26) /* Reject no- form. */ #define CL_MISSING_OK (1 << 27) /* Missing argument OK (joined). */ #define CL_UINTEGER (1 << 28) /* Argument is an integer >=0. */ #define CL_COMMON (1 << 29) /* Language-independent. */ #define CL_UNDOCUMENTED (1 << 30) /* Do not output with --help. */ /* Input file names. */ extern const char **in_fnames; /* The count of input filenames. */ extern unsigned num_in_fnames; /* Current input filename index. */ extern unsigned cur_in_fname; #endif /* Set by -Wabi. Warn about things that will change when compiling with an ABI-compliant compiler */ int warn_abi; /* Set by -Waggregate-return. Warn about returning structures, unions or arrays */ int warn_aggregate_return; /* Set by -Wbad-function-cast. Warn about casting functions to incompatible types */ int warn_bad_function_cast; /* Set by -Wcast-align. Warn about pointer casts which increase alignment */ int warn_cast_align; /* Set by -Wcast-qual. Warn about casts which discard qualifiers */ int warn_cast_qual; /* Set by -Wchar-subscripts. Warn about subscripts whose type is \"char\" */ int warn_char_subscripts; /* Set by -Wconversion. Warn about possibly confusing type conversions */ int warn_conversion; /* Set by -Wctor-dtor-privacy. Warn when all constructors and destructors are private */ int warn_ctor_dtor_privacy; /* Set by -Wdeclaration-after-statement. Warn when a declaration is found after a statement */ int warn_declaration_after_statement; /* Set by -Wdeprecated. Warn about deprecated compiler features */ int warn_deprecated = 1; /* Set by -Wdeprecated-declarations. Warn about uses of __attribute__((deprecated)) declarations */ int warn_deprecated_decl = 1; /* Set by -Wdisabled-optimization. Warn when an optimization pass is disabled */ int warn_disabled_optimization; /* Set by -Wdiv-by-zero. Warn about compile-time integer division by zero */ int warn_div_by_zero = 1; /* Set by -Weffc++. Warn about violations of Effective C++ style rules */ int warn_ecpp; /* Set by -Werror. Treat all warnings as errors */ int warnings_are_errors; /* Set by -Wfatal-errors. Exit on the first error occurred */ int flag_fatal_errors; /* Set by -Wfloat-equal. Warn if testing floating point numbers for equality */ int warn_float_equal; /* Set by -Wformat-extra-args. Warn if passing too many arguments to a function for its format string */ int warn_format_extra_args; /* Set by -Wformat-nonliteral. Warn about format strings that are not literals */ int warn_format_nonliteral; /* Set by -Wformat-security. Warn about possible security problems with format functions */ int warn_format_security; /* Set by -Wformat-y2k. Warn about strftime formats yielding 2-digit years */ int warn_format_y2k; /* Set by -Wformat-zero-length. Warn about zero-length formats */ int warn_format_zero_length; /* Set by -Wimplicit-function-declaration. Warn about implicit function declarations */ int mesg_implicit_function_declaration = -1; /* Set by -Wimplicit-int. Warn when a declaration does not specify a type */ int warn_implicit_int; /* Set by -Winit-self. Warn about variables which are initialized to themselves. */ int warn_init_self; /* Set by -Winline. Warn when an inlined function cannot be inlined */ int warn_inline; /* Set by -Winvalid-offsetof. Warn about invalid uses of the \"offsetof\" macro */ int warn_invalid_offsetof = 1; /* Set by -Wlong-long. Do not warn about using \"long long\" when -pedantic */ int warn_long_long = 1; /* Set by -Wmissing-braces. Warn about possibly missing braces around initializers */ int warn_missing_braces; /* Set by -Wmissing-declarations. Warn about global functions without previous declarations */ int warn_missing_declarations; /* Set by -Wmissing-format-attribute. Warn about functions which might be candidates for format attributes */ int warn_missing_format_attribute; /* Set by -Wmissing-noreturn. Warn about functions which might be candidates for __attribute__((noreturn)) */ int warn_missing_noreturn; /* Set by -Wmissing-prototypes. Warn about global functions without prototypes */ int warn_missing_prototypes; /* Set by -Wnested-externs. Warn about \"extern\" declarations not at file scope */ int warn_nested_externs; /* Set by -Wnon-template-friend. Warn when non-templatized friend functions are declared within a template */ int warn_nontemplate_friend = 1; /* Set by -Wnon-virtual-dtor. Warn about non-virtual destructors */ int warn_nonvdtor; /* Set by -Wnonnull. Warn about NULL being passed to argument slots marked as requiring non-NULL */ int warn_nonnull; /* Set by -Wold-style-cast. Warn if a C-style cast is used in a program */ int warn_old_style_cast; /* Set by -Wold-style-definition. Warn if an old-style parameter definition is used */ int warn_old_style_definition; /* Set by -Woverloaded-virtual. Warn about overloaded virtual function names */ int warn_overloaded_virtual; /* Set by -Wpacked. Warn when the packed attribute has no effect on struct layout */ int warn_packed; /* Set by -Wpadded. Warn when padding is required to align structure members */ int warn_padded; /* Set by -Wparentheses. Warn about possibly missing parentheses */ int warn_parentheses; /* Set by -Wpmf-conversions. Warn when converting the type of pointers to member functions */ int warn_pmf2ptr = 1; /* Set by -Wpointer-arith. Warn about function pointer arithmetic */ int warn_pointer_arith; /* Set by -Wprotocol. Warn if inherited methods are unimplemented */ int warn_protocol = 1; /* Set by -Wredundant-decls. Warn about multiple declarations of the same object */ int warn_redundant_decls; /* Set by -Wreorder. Warn when the compiler reorders code */ int warn_reorder; /* Set by -Wselector. Warn if a selector has multiple methods */ int warn_selector; /* Set by -Wsequence-point. Warn about possible violations of sequence point rules */ int warn_sequence_point; /* Set by -Wshadow. Warn when one local variable shadows another */ int warn_shadow; /* Set by -Wsign-compare. Warn about signed-unsigned comparisons */ int warn_sign_compare = -1; /* Set by -Wsign-promo. Warn when overload promotes from unsigned to signed */ int warn_sign_promo; /* Set by -Wstrict-prototypes. Warn about unprototyped function declarations */ int warn_strict_prototypes; /* Set by -Wswitch. Warn about enumerated switches, with no default, missing a case */ int warn_switch; /* Set by -Wswitch-default. Warn about enumerated switches missing a \"default:\" statement */ int warn_switch_default; /* Set by -Wswitch-enum. Warn about all enumerated switches missing a specific case */ int warn_switch_enum; /* Set by -Wsynth. Warn when synthesis behavior differs from Cfront */ int warn_synth; /* Set by -Wsystem-headers. Do not suppress warnings from system headers */ int warn_system_headers; /* Set by -Wtraditional. Warn about features not present in traditional C */ int warn_traditional; /* Set by -Wundeclared-selector. Warn about @selector()s without previously declared methods */ int warn_undeclared_selector; /* Set by -Wuninitialized. Warn about uninitialized automatic variables */ int warn_uninitialized; /* Set by -Wunreachable-code. Warn about code that will never be executed */ int warn_notreached; /* Set by -Wunused-function. Warn when a function is unused */ int warn_unused_function; /* Set by -Wunused-label. Warn when a label is unused */ int warn_unused_label; /* Set by -Wunused-parameter. Warn when a function parameter is unused */ int warn_unused_parameter; /* Set by -Wunused-value. Warn when an expression value is unused */ int warn_unused_value; /* Set by -Wunused-variable. Warn when a variable is unused */ int warn_unused_variable; /* Set by -fPIC. */ int flag_pic; /* Set by -fPIE. */ int flag_pie; /* Set by -fabi-version=. */ int flag_abi_version = 2; /* Set by -falign-functions. Align the start of functions */ int align_functions; /* Set by -falign-jumps. Align labels which are only reached by jumping */ int align_jumps; /* Set by -falign-labels. Align all labels */ int align_labels; /* Set by -falign-loops. Align the start of loops */ int align_loops; /* Set by -fargument-alias. Specify that arguments may alias each other and globals */ int flag_argument_noalias; /* Set by -fasynchronous-unwind-tables. Generate unwind tables that are exact at each instruction boundary */ int flag_asynchronous_unwind_tables; /* Set by -fbounds-check. Generate code to check bounds before indexing arrays */ int flag_bounds_check; /* Set by -fbranch-count-reg. Replace add, compare, branch with branch on count register */ int flag_branch_on_count_reg = 1; /* Set by -fbranch-probabilities. Use profiling information for branch probabilities */ int flag_branch_probabilities; /* Set by -fbranch-target-load-optimize. Perform branch target load optimization before prologue / epilogue threading */ int flag_branch_target_load_optimize; /* Set by -fbranch-target-load-optimize2. Perform branch target load optimization after prologue / epilogue threading */ int flag_branch_target_load_optimize2; /* Set by -fbtr-bb-exclusive. Restrict target load migration not to re-use registers in any basic block */ int flag_btr_bb_exclusive; /* Set by -fcaller-saves. Save registers around function calls */ int flag_caller_saves; /* Set by -fcommon. Do not put uninitialized globals in the common section */ int flag_no_common; /* Set by -fcprop-registers. Perform a register copy-propagation optimization pass */ int flag_cprop_registers; /* Set by -fcrossjumping. Perform cross-jumping optimization */ int flag_crossjumping; /* Set by -fcse-follow-jumps. When running CSE, follow jumps to their targets */ int flag_cse_follow_jumps; /* Set by -fcse-skip-blocks. When running CSE, follow conditional jumps */ int flag_cse_skip_blocks; /* Set by -fdata-sections. Place data items into their own section */ int flag_data_sections; /* Set by -fdefer-pop. Defer popping functions args from stack until later */ int flag_defer_pop; /* Set by -fdelayed-branch. Attempt to fill delay slots of branch instructions */ int flag_delayed_branch; /* Set by -fdelete-null-pointer-checks. Delete useless null pointer checks */ int flag_delete_null_pointer_checks; /* Set by -feliminate-dwarf2-dups. Perform DWARF2 duplicate elimination */ int flag_eliminate_dwarf2_dups; /* Set by -feliminate-unused-debug-symbols. Perform unused type elimination in debug info */ int flag_debug_only_used_symbols; /* Set by -feliminate-unused-debug-types. Perform unused type elimination in debug info */ int flag_eliminate_unused_debug_types = 1; /* Set by -fexceptions. Enable exception handling */ int flag_exceptions; /* Set by -fexpensive-optimizations. Perform a number of minor, expensive optimizations */ int flag_expensive_optimizations; /* Set by -ffinite-math-only. Assume no NaNs or infinities are generated */ int flag_finite_math_only; /* Set by -ffloat-store. Do not store floats in registers */ int flag_float_store; /* Set by -fforce-addr. Copy memory address constants into registers before use */ int flag_force_addr; /* Set by -fforce-mem. Copy memory operands into registers before use */ int flag_force_mem; /* Set by -ffunction-cse. Allow function addresses to be held in registers */ int flag_no_function_cse; /* Set by -ffunction-sections. Place each function into its own section */ int flag_function_sections; /* Set by -fgcse. Perform global common subexpression elimination */ int flag_gcse; /* Set by -fgcse-after-reload. Perform global common subexpression elimination after register allocation */ int flag_gcse_after_reload; /* Set by -fgcse-las. Perform redundant load after store elimination in global common subexpression */ int flag_gcse_las = 1; /* Set by -fgcse-lm. Perform enhanced load motion during global common subexpression elimination */ int flag_gcse_lm = 1; /* Set by -fgcse-sm. Perform store motion after global common subexpression elimination */ int flag_gcse_sm = 1; /* Set by -fguess-branch-probability. Enable guessing of branch probabilities */ int flag_guess_branch_prob; /* Set by -fident. Process #ident directives */ int flag_no_ident; /* Set by -fif-conversion. Perform conversion of conditional jumps to branchless equivalents */ int flag_if_conversion; /* Set by -fif-conversion2. Perform conversion of conditional jumps to conditional execution */ int flag_if_conversion2; /* Set by -finhibit-size-directive. Do not generate .size directives */ int flag_inhibit_size_directive; /* Set by -finline. Pay attention to the \"inline\" keyword */ int flag_no_inline = 2; /* Set by -finline-functions. Integrate simple functions into their callers */ int flag_inline_functions; /* Set by -finstrument-functions. Instrument function entry and exit with profiling calls */ int flag_instrument_function_entry_exit; /* Set by -fkeep-inline-functions. Generate code for functions even if they are fully inlined */ int flag_keep_inline_functions; /* Set by -fkeep-static-consts. Emit static const variables even if they are not used */ int flag_keep_static_consts = 1; /* Set by -fleading-underscore. Give external symbols a leading underscore */ int flag_leading_underscore = -1; /* Set by -floop-optimize. Perform loop optimizations */ int flag_loop_optimize; /* Set by -floop-optimize2. Perform loop optimizations using the new loop optimizer */ int flag_loop_optimize2; /* Set by -fmath-errno. Set errno after built-in math functions */ int flag_errno_math = 1; /* Set by -fmem-report. Report on permanent memory allocation */ int mem_report; /* Set by -fmerge-all-constants. Attempt to merge identical constants and constant variables */ int flag_merge_constants = 1; /* Set by -fmodulo-sched. Perform SMS based modulo scheduling before the first scheduling pass */ int flag_modulo_sched; /* Set by -fmove-all-movables. Force all loop invariant computations out of loops */ int flag_move_all_movables; /* Set by -fmove-loop-invariants. Move loop invariant computations out of loops */ int flag_move_loop_invariants; /* Set by -fmudflap. Add mudflap bounds-checking instrumentation for single-threaded program. */ int flag_mudflap; /* Set by -fmudflapir. Ignore read operations when inserting mudflap instrumentation. */ int flag_mudflap_ignore_reads; /* Set by -fmudflapth. Add mudflap bounds-checking instrumentation for multi-threaded program. */ int flag_mudflap_threads; /* Set by -fnew-ra. Use graph-coloring register allocation */ int flag_new_regalloc; /* Set by -fnon-call-exceptions. Support synchronous non-call exceptions */ int flag_non_call_exceptions; /* Set by -fold-unroll-all-loops. Perform loop unrolling for all loops */ int flag_old_unroll_all_loops; /* Set by -fold-unroll-loops. Perform loop unrolling when iteration count is known */ int flag_old_unroll_loops; /* Set by -fomit-frame-pointer. When possible do not generate stack frames */ int flag_omit_frame_pointer; /* Set by -foptimize-register-move. Do the full register move optimization pass */ int flag_regmove; /* Set by -foptimize-sibling-calls. Optimize sibling and tail recursive calls */ int flag_optimize_sibling_calls; /* Set by -fpack-struct. Pack structure members together without holes */ int flag_pack_struct; /* Set by -fpeel-loops. Perform loop peeling */ int flag_peel_loops; /* Set by -fpeephole. Enable machine specific peephole optimizations */ int flag_no_peephole; /* Set by -fpeephole2. Enable an RTL peephole pass before sched2 */ int flag_peephole2; /* Set by -fprefetch-loop-arrays. Generate prefetch instructions, if available, for arrays in loops */ int flag_prefetch_loop_arrays; /* Set by -fprofile. Enable basic program profiling code */ int profile_flag; /* Set by -fprofile-arcs. Insert arc-based program profiling code */ int profile_arc_flag; /* Set by -fprofile-values. Insert code to profile values of expressions */ int flag_profile_values; /* Set by -freduce-all-givs. Strength reduce all loop general induction variables */ int flag_reduce_all_givs; /* Set by -fregmove. Enables a register move optimization */ int flag_regmove; /* Set by -frename-registers. Perform a register renaming optimization pass */ int flag_rename_registers; /* Set by -freorder-blocks. Reorder basic blocks to improve code placement */ int flag_reorder_blocks; /* Set by -freorder-blocks-and-partition. Reorder basic blocks and partition into hot and cold sections */ int flag_reorder_blocks_and_partition; /* Set by -freorder-functions. Reorder functions to improve code placement */ int flag_reorder_functions; /* Set by -frerun-cse-after-loop. Add a common subexpression elimination pass after loop optimizations */ int flag_rerun_cse_after_loop; /* Set by -frerun-loop-opt. Run the loop optimizer twice */ int flag_rerun_loop_opt; /* Set by -frounding-math. Disable optimizations that assume default FP rounding behavior */ int flag_rounding_math; /* Set by -fsched-interblock. Enable scheduling across basic blocks */ int flag_schedule_interblock = 1; /* Set by -fsched-spec. Allow speculative motion of non-loads */ int flag_schedule_speculative = 1; /* Set by -fsched-spec-load. Allow speculative motion of some loads */ int flag_schedule_speculative_load; /* Set by -fsched-spec-load-dangerous. Allow speculative motion of more loads */ int flag_schedule_speculative_load_dangerous; /* Set by -fsched-stalled-insns. Allow premature scheduling of queued insns */ int flag_sched_stalled_insns; /* Set by -fsched-stalled-insns-dep. Set dependence distance checking in premature scheduling of queued insns */ int flag_sched_stalled_insns_dep = 1; /* Set by -fsched2-use-superblocks. If scheduling post reload, do superblock scheduling */ int flag_sched2_use_superblocks; /* Set by -fsched2-use-traces. If scheduling post reload, do trace scheduling */ int flag_sched2_use_traces; /* Set by -fschedule-insns. Reschedule instructions before register allocation */ int flag_schedule_insns; /* Set by -fschedule-insns2. Reschedule instructions after register allocation */ int flag_schedule_insns_after_reload; /* Set by -fshared-data. Mark data as shared rather than private */ int flag_shared_data; /* Set by -fsignaling-nans. Disable optimizations observable by IEEE signaling NaNs */ int flag_signaling_nans; /* Set by -fsingle-precision-constant. Convert floating point constants to single precision constants */ int flag_single_precision_constant; /* Set by -fstack-check. Insert stack checking code into the program */ int flag_stack_check; /* Set by -fstrength-reduce. Perform strength reduction optimizations */ int flag_strength_reduce; /* Set by -fstrict-aliasing. Assume strict aliasing rules apply */ int flag_strict_aliasing; /* Set by -fsyntax-only. Check for syntax errors, then stop */ int flag_syntax_only; /* Set by -ftest-coverage. Create data files needed by \"gcov\" */ int flag_test_coverage; /* Set by -fthread-jumps. Perform jump threading optimizations */ int flag_thread_jumps; /* Set by -ftime-report. Report the time taken by each compiler pass */ int time_report; /* Set by -ftracer. Perform superblock formation via tail duplication */ int flag_tracer; /* Set by -ftrapping-math. Assume floating-point operations can trap */ int flag_trapping_math; /* Set by -ftrapv. Trap for signed overflow in addition, subtraction and multiplication */ int flag_trapv; /* Set by -ftree-based-profiling. Use tree-ssa based implementation of profiling */ int flag_tree_based_profiling; /* Set by -ftree-ccp. Enable SSA-CCP optimization on trees */ int flag_tree_ccp; /* Set by -ftree-ch. Enable loop header copying on trees */ int flag_tree_ch; /* Set by -ftree-combine-temps. Coalesce memory temporaries in the SSA->normal pass */ int flag_tree_combine_temps; /* Set by -ftree-copyrename. Replace SSA temporaries with better names in copies. */ int flag_tree_copyrename; /* Set by -ftree-dce. Enable SSA dead code elimination optimization on trees */ int flag_tree_dce; /* Set by -ftree-dominator-opts. Enable dominator optimizations */ int flag_tree_dom; /* Set by -ftree-dse. Enable dead store elimination */ int flag_tree_dse; /* Set by -ftree-fre. Enable Full Redundancy Elimination (FRE) on trees */ int flag_tree_fre; /* Set by -ftree-loop-optimize. Enable loop optimizations on tree level */ int flag_tree_loop_optimize = 1; /* Set by -ftree-lrs. Perform live range splitting during the SSA->normal pass. */ int flag_tree_live_range_split; /* Set by -ftree-pre. Enable SSA-PRE optimization on trees */ int flag_tree_pre; /* Set by -ftree-sra. Perform scalar replacement of aggregates */ int flag_tree_sra; /* Set by -ftree-ter. Replace temporary expressions in the SSA->normal pass */ int flag_tree_ter; /* Set by -funit-at-a-time. Compile whole compilation unit at a time */ int flag_unit_at_a_time; /* Set by -funroll-all-loops. Perform loop unrolling for all loops */ int flag_unroll_all_loops; /* Set by -funroll-loops. Perform loop unrolling when iteration count is known */ int flag_unroll_loops; /* Set by -funsafe-math-optimizations. Allow math optimizations that may violate IEEE or ISO standards */ int flag_unsafe_math_optimizations; /* Set by -funswitch-loops. Perform loop unswitching */ int flag_unswitch_loops; /* Set by -funwind-tables. Just generate unwind tables for exception handling */ int flag_unwind_tables; /* Set by -fverbose-asm. Add extra commentary to assembler output */ int flag_verbose_asm; /* Set by -fvpt. Use expression value profiles in optimizations */ int flag_value_profile_transformations; /* Set by -fwrapv. Assume signed arithmetic overflow wraps around */ int flag_wrapv; /* Set by -fzero-initialized-in-bss. Put zero initialized data in the bss section */ int flag_zero_initialized_in_bss = 1; /* Set by -p. Enable function profiling */ int profile_flag; /* Set by -pedantic. Issue warnings needed for strict compliance to the standard */ int pedantic; /* Set by -quiet. Do not display functions compiled or elapsed time */ int quiet_flag; /* Set by -version. Display the compiler's version */ int version_flag; /* Set by -w. Suppress warnings */ int inhibit_warnings; const char * const lang_names[] = { "C", "C++", "ObjC", "ObjC++", 0 }; const unsigned int cl_options_count = N_OPTS; const struct cl_option cl_options[] = { { "--help", N_("Display this information"), N_OPTS, 5, CL_COMMON, 0, 0, 0 }, { "--output-pch=", 0, N_OPTS, 12, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_JOINED | CL_SEPARATE, 0, 0, 0 }, { "--param", N_("--param = Set paramter to value. See below for a complete list of parameters"), N_OPTS, 6, CL_COMMON | CL_SEPARATE, 0, 0, 0 }, { "--target-help", 0, N_OPTS, 12, CL_COMMON, 0, 0, 0 }, { "--version", 0, N_OPTS, 8, CL_COMMON, 0, 0, 0 }, { "-A", N_("-A= Assert the to . Putting '-' before disables the to "), N_OPTS, 1, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_JOINED | CL_SEPARATE, 0, 0, 0 }, { "-C", N_("Do not discard comments"), N_OPTS, 1, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-CC", N_("Do not discard comments in macro expansions"), N_OPTS, 2, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-D", N_("-D[=] Define a with as its value. If just is given, is taken to be 1"), N_OPTS, 1, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_JOINED | CL_SEPARATE, 0, 0, 0 }, { "-E", 0, N_OPTS, 1, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_UNDOCUMENTED, 0, 0, 0 }, { "-F", N_("-F Add to the end of the main framework include path "), N_OPTS, 1, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_JOINED | CL_SEPARATE, 0, 0, 0 }, { "-G", N_("-G Put global and static data smaller than bytes into a special section (on some targets)"), N_OPTS, 1, CL_COMMON | CL_JOINED | CL_SEPARATE | CL_UINTEGER, 0, 0, 0 }, { "-H", N_("Print the name of header files as they are used"), N_OPTS, 1, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-I", N_("-I Add to the end of the main include path"), N_OPTS, 1, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_JOINED | CL_SEPARATE, 0, 0, 0 }, { "-M", N_("Generate make dependencies"), N_OPTS, 1, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-MD", N_("Generate make dependencies and compile"), N_OPTS, 2, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_SEPARATE, 0, 0, 0 }, { "-MF", N_("-MF Write dependency output to the given file"), N_OPTS, 2, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_JOINED | CL_SEPARATE, 0, 0, 0 }, { "-MG", N_("Treat missing header files as generated files"), N_OPTS, 2, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-MM", N_("Like -M but ignore system header files"), N_OPTS, 2, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-MMD", N_("Like -MD but ignore system header files"), N_OPTS, 3, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_SEPARATE, 0, 0, 0 }, { "-MP", N_("Generate phony targets for all headers"), N_OPTS, 2, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-MQ", N_("-MQ Add a MAKE-quoted target"), N_OPTS, 2, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_JOINED | CL_SEPARATE, 0, 0, 0 }, { "-MT", N_("-MT Add an unquoted target"), N_OPTS, 2, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_JOINED | CL_SEPARATE, 0, 0, 0 }, { "-O", N_("-O Set optimization level to "), N_OPTS, 1, CL_COMMON | CL_JOINED | CL_MISSING_OK, 0, 0, 0 }, { "-Os", N_("Optimize for space rather than speed"), OPT_O, 2, CL_COMMON, 0, 0, 0 }, { "-P", N_("Do not generate #line directives"), N_OPTS, 1, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-U", N_("-U Undefine "), N_OPTS, 1, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_JOINED | CL_SEPARATE, 0, 0, 0 }, { "-W", N_("This switch is deprecated; use -Wextra instead"), N_OPTS, 1, CL_COMMON | CL_REJECT_NEGATIVE, 0, 0, 0 }, { "-Wabi", N_("Warn about things that will change when compiling with an ABI-compliant compiler"), N_OPTS, 4, CL_CXX | CL_ObjCXX, &warn_abi, 0, 0 }, { "-Waggregate-return", N_("Warn about returning structures, unions or arrays"), N_OPTS, 17, CL_COMMON, &warn_aggregate_return, 0, 0 }, { "-Wall", N_("Enable most warning messages"), N_OPTS, 4, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-Wbad-function-cast", N_("Warn about casting functions to incompatible types"), N_OPTS, 18, CL_C | CL_ObjC, &warn_bad_function_cast, 0, 0 }, { "-Wcast-align", N_("Warn about pointer casts which increase alignment"), N_OPTS, 11, CL_COMMON, &warn_cast_align, 0, 0 }, { "-Wcast-qual", N_("Warn about casts which discard qualifiers"), N_OPTS, 10, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, &warn_cast_qual, 0, 0 }, { "-Wchar-subscripts", N_("Warn about subscripts whose type is \"char\""), N_OPTS, 16, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, &warn_char_subscripts, 0, 0 }, { "-Wcomment", N_("Warn about possibly nested block comments, and C++ comments spanning more than one physical line"), N_OPTS, 8, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-Wcomments", N_("Synonym for -Wcomment"), N_OPTS, 9, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-Wconversion", N_("Warn about possibly confusing type conversions"), N_OPTS, 11, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, &warn_conversion, 0, 0 }, { "-Wctor-dtor-privacy", N_("Warn when all constructors and destructors are private"), N_OPTS, 18, CL_CXX | CL_ObjCXX, &warn_ctor_dtor_privacy, 0, 0 }, { "-Wdeclaration-after-statement", N_("Warn when a declaration is found after a statement"), N_OPTS, 28, CL_C | CL_ObjC, &warn_declaration_after_statement, 0, 0 }, { "-Wdeprecated", N_("Warn about deprecated compiler features"), N_OPTS, 11, CL_CXX | CL_ObjCXX, &warn_deprecated, 0, 0 }, { "-Wdeprecated-declarations", N_("Warn about uses of __attribute__((deprecated)) declarations"), N_OPTS, 24, CL_COMMON, &warn_deprecated_decl, 0, 0 }, { "-Wdisabled-optimization", N_("Warn when an optimization pass is disabled"), N_OPTS, 22, CL_COMMON, &warn_disabled_optimization, 0, 0 }, { "-Wdiv-by-zero", N_("Warn about compile-time integer division by zero"), N_OPTS, 12, CL_C | CL_ObjC, &warn_div_by_zero, 0, 0 }, { "-Weffc++", N_("Warn about violations of Effective C++ style rules"), N_OPTS, 7, CL_CXX | CL_ObjCXX, &warn_ecpp, 0, 0 }, { "-Wendif-labels", N_("Warn about stray tokens after #elif and #endif"), N_OPTS, 13, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-Werror", N_("Treat all warnings as errors"), N_OPTS, 6, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_COMMON, &warnings_are_errors, 0, 0 }, { "-Werror-implicit-function-declaration", N_("Make implicit function declarations an error"), N_OPTS, 36, CL_C | CL_ObjC | CL_REJECT_NEGATIVE, 0, 0, 0 }, { "-Wextra", N_("Print extra (possibly unwanted) warnings"), N_OPTS, 6, CL_COMMON, 0, 0, 0 }, { "-Wfatal-errors", N_("Exit on the first error occurred"), N_OPTS, 13, CL_COMMON, &flag_fatal_errors, 0, 0 }, { "-Wfloat-equal", N_("Warn if testing floating point numbers for equality"), N_OPTS, 12, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, &warn_float_equal, 0, 0 }, { "-Wformat", N_("Warn about printf/scanf/strftime/strfmon format string anomalies"), N_OPTS, 7, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-Wformat-extra-args", N_("Warn if passing too many arguments to a function for its format string"), N_OPTS, 18, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, &warn_format_extra_args, 0, 0 }, { "-Wformat-nonliteral", N_("Warn about format strings that are not literals"), N_OPTS, 18, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, &warn_format_nonliteral, 0, 0 }, { "-Wformat-security", N_("Warn about possible security problems with format functions"), N_OPTS, 16, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, &warn_format_security, 0, 0 }, { "-Wformat-y2k", N_("Warn about strftime formats yielding 2-digit years"), N_OPTS, 11, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, &warn_format_y2k, 0, 0 }, { "-Wformat-zero-length", N_("Warn about zero-length formats"), N_OPTS, 19, CL_C | CL_ObjC, &warn_format_zero_length, 0, 0 }, { "-Wformat=", 0, N_OPTS, 8, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_JOINED, 0, 0, 0 }, { "-Wimplicit", 0, N_OPTS, 9, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-Wimplicit-function-declaration", N_("Warn about implicit function declarations"), N_OPTS, 30, CL_C | CL_ObjC, &mesg_implicit_function_declaration, 0, 0 }, { "-Wimplicit-int", N_("Warn when a declaration does not specify a type"), N_OPTS, 13, CL_C | CL_ObjC, &warn_implicit_int, 0, 0 }, { "-Wimport", N_("Deprecated. This switch has no effect."), N_OPTS, 7, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-Winit-self", N_("Warn about variables which are initialized to themselves."), N_OPTS, 10, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, &warn_init_self, 0, 0 }, { "-Winline", N_("Warn when an inlined function cannot be inlined"), N_OPTS, 7, CL_COMMON, &warn_inline, 0, 0 }, { "-Winvalid-offsetof", N_("Warn about invalid uses of the \"offsetof\" macro"), N_OPTS, 17, CL_CXX | CL_ObjCXX, &warn_invalid_offsetof, 0, 0 }, { "-Winvalid-pch", N_("Warn about PCH files that are found but not used"), N_OPTS, 12, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-Wlarger-than-", N_("-Wlarger-than- Warn if an object is larger than bytes"), N_OPTS, 13, CL_COMMON | CL_JOINED | CL_REJECT_NEGATIVE | CL_UINTEGER, 0, 0, 0 }, { "-Wlong-long", N_("Do not warn about using \"long long\" when -pedantic"), N_OPTS, 10, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, &warn_long_long, 0, 0 }, { "-Wmain", N_("Warn about suspicious declarations of \"main\""), N_OPTS, 5, CL_C | CL_ObjC, 0, 0, 0 }, { "-Wmissing-braces", N_("Warn about possibly missing braces around initializers"), N_OPTS, 15, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, &warn_missing_braces, 0, 0 }, { "-Wmissing-declarations", N_("Warn about global functions without previous declarations"), N_OPTS, 21, CL_C | CL_ObjC, &warn_missing_declarations, 0, 0 }, { "-Wmissing-format-attribute", N_("Warn about functions which might be candidates for format attributes"), N_OPTS, 25, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, &warn_missing_format_attribute, 0, 0 }, { "-Wmissing-include-dirs", N_("Warn about user-specified include directories that do not exist"), N_OPTS, 21, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-Wmissing-noreturn", N_("Warn about functions which might be candidates for __attribute__((noreturn))"), N_OPTS, 17, CL_COMMON, &warn_missing_noreturn, 0, 0 }, { "-Wmissing-prototypes", N_("Warn about global functions without prototypes"), N_OPTS, 19, CL_C | CL_ObjC, &warn_missing_prototypes, 0, 0 }, { "-Wmultichar", N_("Warn about use of multi-character character constants"), N_OPTS, 10, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-Wnested-externs", N_("Warn about \"extern\" declarations not at file scope"), N_OPTS, 15, CL_C | CL_ObjC, &warn_nested_externs, 0, 0 }, { "-Wnon-template-friend", N_("Warn when non-templatized friend functions are declared within a template"), N_OPTS, 20, CL_CXX | CL_ObjCXX, &warn_nontemplate_friend, 0, 0 }, { "-Wnon-virtual-dtor", N_("Warn about non-virtual destructors"), N_OPTS, 17, CL_CXX | CL_ObjCXX, &warn_nonvdtor, 0, 0 }, { "-Wnonnull", N_("Warn about NULL being passed to argument slots marked as requiring non-NULL"), N_OPTS, 8, CL_C | CL_ObjC, &warn_nonnull, 0, 0 }, { "-Wold-style-cast", N_("Warn if a C-style cast is used in a program"), N_OPTS, 15, CL_CXX | CL_ObjCXX, &warn_old_style_cast, 0, 0 }, { "-Wold-style-definition", N_("Warn if an old-style parameter definition is used"), N_OPTS, 21, CL_C | CL_ObjC, &warn_old_style_definition, 0, 0 }, { "-Woverloaded-virtual", N_("Warn about overloaded virtual function names"), N_OPTS, 19, CL_CXX | CL_ObjCXX, &warn_overloaded_virtual, 0, 0 }, { "-Wpacked", N_("Warn when the packed attribute has no effect on struct layout"), N_OPTS, 7, CL_COMMON, &warn_packed, 0, 0 }, { "-Wpadded", N_("Warn when padding is required to align structure members"), N_OPTS, 7, CL_COMMON, &warn_padded, 0, 0 }, { "-Wparentheses", N_("Warn about possibly missing parentheses"), N_OPTS, 12, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, &warn_parentheses, 0, 0 }, { "-Wpmf-conversions", N_("Warn when converting the type of pointers to member functions"), N_OPTS, 16, CL_CXX | CL_ObjCXX, &warn_pmf2ptr, 0, 0 }, { "-Wpointer-arith", N_("Warn about function pointer arithmetic"), N_OPTS, 14, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, &warn_pointer_arith, 0, 0 }, { "-Wprotocol", N_("Warn if inherited methods are unimplemented"), N_OPTS, 9, CL_ObjC | CL_ObjCXX, &warn_protocol, 0, 0 }, { "-Wredundant-decls", N_("Warn about multiple declarations of the same object"), N_OPTS, 16, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, &warn_redundant_decls, 0, 0 }, { "-Wreorder", N_("Warn when the compiler reorders code"), N_OPTS, 8, CL_CXX | CL_ObjCXX, &warn_reorder, 0, 0 }, { "-Wreturn-type", N_("Warn whenever a function's return type defaults to \"int\" (C), or about inconsistent return types (C++)"), N_OPTS, 12, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-Wselector", N_("Warn if a selector has multiple methods"), N_OPTS, 9, CL_ObjC | CL_ObjCXX, &warn_selector, 0, 0 }, { "-Wsequence-point", N_("Warn about possible violations of sequence point rules"), N_OPTS, 15, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, &warn_sequence_point, 0, 0 }, { "-Wshadow", N_("Warn when one local variable shadows another"), N_OPTS, 7, CL_COMMON, &warn_shadow, 0, 0 }, { "-Wsign-compare", N_("Warn about signed-unsigned comparisons"), N_OPTS, 13, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, &warn_sign_compare, 0, 0 }, { "-Wsign-promo", N_("Warn when overload promotes from unsigned to signed"), N_OPTS, 11, CL_CXX | CL_ObjCXX, &warn_sign_promo, 0, 0 }, { "-Wstrict-aliasing", N_("Warn about code which might break strict aliasing rules"), N_OPTS, 16, CL_COMMON, 0, 0, 0 }, { "-Wstrict-aliasing=", N_("Warn about code which might break strict aliasing rules"), N_OPTS, 17, CL_COMMON | CL_JOINED | CL_UINTEGER, 0, 0, 0 }, { "-Wstrict-prototypes", N_("Warn about unprototyped function declarations"), N_OPTS, 18, CL_C | CL_ObjC, &warn_strict_prototypes, 0, 0 }, { "-Wswitch", N_("Warn about enumerated switches, with no default, missing a case"), N_OPTS, 7, CL_COMMON, &warn_switch, 0, 0 }, { "-Wswitch-default", N_("Warn about enumerated switches missing a \"default:\" statement"), N_OPTS, 15, CL_COMMON, &warn_switch_default, 0, 0 }, { "-Wswitch-enum", N_("Warn about all enumerated switches missing a specific case"), N_OPTS, 12, CL_COMMON, &warn_switch_enum, 0, 0 }, { "-Wsynth", N_("Warn when synthesis behavior differs from Cfront"), N_OPTS, 6, CL_CXX | CL_ObjCXX, &warn_synth, 0, 0 }, { "-Wsystem-headers", N_("Do not suppress warnings from system headers"), N_OPTS, 15, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_COMMON, &warn_system_headers, 0, 0 }, { "-Wtraditional", N_("Warn about features not present in traditional C"), N_OPTS, 12, CL_C | CL_ObjC, &warn_traditional, 0, 0 }, { "-Wtrigraphs", N_("Warn if trigraphs are encountered that might affect the meaning of the program"), N_OPTS, 10, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-Wundeclared-selector", N_("Warn about @selector()s without previously declared methods"), N_OPTS, 20, CL_ObjC | CL_ObjCXX, &warn_undeclared_selector, 0, 0 }, { "-Wundef", N_("Warn if an undefined macro is used in an #if directive"), N_OPTS, 6, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-Wuninitialized", N_("Warn about uninitialized automatic variables"), N_OPTS, 14, CL_COMMON, &warn_uninitialized, 0, 0 }, { "-Wunknown-pragmas", N_("Warn about unrecognized pragmas"), N_OPTS, 16, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-Wunreachable-code", N_("Warn about code that will never be executed"), N_OPTS, 17, CL_COMMON, &warn_notreached, 0, 0 }, { "-Wunused", N_("Enable all -Wunused- warnings"), N_OPTS, 7, CL_COMMON, 0, 0, 0 }, { "-Wunused-function", N_("Warn when a function is unused"), N_OPTS, 16, CL_COMMON, &warn_unused_function, 0, 0 }, { "-Wunused-label", N_("Warn when a label is unused"), N_OPTS, 13, CL_COMMON, &warn_unused_label, 0, 0 }, { "-Wunused-macros", N_("Warn about macros defined in the main file that are not used"), N_OPTS, 14, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-Wunused-parameter", N_("Warn when a function parameter is unused"), N_OPTS, 17, CL_COMMON, &warn_unused_parameter, 0, 0 }, { "-Wunused-value", N_("Warn when an expression value is unused"), N_OPTS, 13, CL_COMMON, &warn_unused_value, 0, 0 }, { "-Wunused-variable", N_("Warn when a variable is unused"), N_OPTS, 16, CL_COMMON, &warn_unused_variable, 0, 0 }, { "-Wvariadic-macros", N_("Do not warn about using variadic macros when -pedantic"), N_OPTS, 16, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-Wwrite-strings", N_("Give strings the type \"array of char\""), N_OPTS, 14, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-ansi", N_("A synonym for -std=c89. In a future version of GCC it will become synonymous with -std=c99 instead"), N_OPTS, 4, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-aux-info", N_("-aux-info Emit declaration information into "), N_OPTS, 8, CL_COMMON | CL_SEPARATE, 0, 0, 0 }, { "-aux-info=", 0, N_OPTS, 9, CL_COMMON | CL_JOINED, 0, 0, 0 }, { "-auxbase", 0, N_OPTS, 7, CL_COMMON | CL_SEPARATE, 0, 0, 0 }, { "-auxbase-strip", 0, N_OPTS, 13, CL_COMMON | CL_SEPARATE, 0, 0, 0 }, { "-d", N_("-d Enable dumps from specific passes of the compiler"), N_OPTS, 1, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_COMMON | CL_JOINED, 0, 0, 0 }, { "-dumpbase", N_("-dumpbase Set the file basename to be used for dumps"), OPT_d, 8, CL_COMMON | CL_SEPARATE, 0, 0, 0 }, { "-fPIC", 0, N_OPTS, 4, CL_COMMON | CL_REPORT, &flag_pic, 1, 2 }, { "-fPIE", 0, N_OPTS, 4, CL_COMMON | CL_REPORT, &flag_pie, 1, 2 }, { "-fabi-version=", 0, N_OPTS, 13, CL_COMMON | CL_JOINED | CL_UINTEGER, &flag_abi_version, 0, 0 }, { "-faccess-control", N_("Enforce class member access control semantics"), N_OPTS, 15, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-falign-functions", N_("Align the start of functions"), N_OPTS, 16, CL_COMMON | CL_REPORT, &align_functions, 1, 0 }, { "-falign-functions=", 0, N_OPTS, 17, CL_COMMON | CL_JOINED | CL_REJECT_NEGATIVE | CL_UINTEGER, 0, 0, 0 }, { "-falign-jumps", N_("Align labels which are only reached by jumping"), N_OPTS, 12, CL_COMMON | CL_REPORT, &align_jumps, 1, 0 }, { "-falign-jumps=", 0, N_OPTS, 13, CL_COMMON | CL_JOINED | CL_REJECT_NEGATIVE | CL_UINTEGER, 0, 0, 0 }, { "-falign-labels", N_("Align all labels"), N_OPTS, 13, CL_COMMON | CL_REPORT, &align_labels, 1, 0 }, { "-falign-labels=", 0, N_OPTS, 14, CL_COMMON | CL_JOINED | CL_REJECT_NEGATIVE | CL_UINTEGER, 0, 0, 0 }, { "-falign-loops", N_("Align the start of loops"), N_OPTS, 12, CL_COMMON | CL_REPORT, &align_loops, 0, 0 }, { "-falign-loops=", 0, N_OPTS, 13, CL_COMMON | CL_JOINED | CL_REJECT_NEGATIVE | CL_UINTEGER, 0, 0, 0 }, { "-fall-virtual", 0, N_OPTS, 12, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-falt-external-templates", N_("Change when template instances are emitted"), N_OPTS, 23, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-fargument-alias", N_("Specify that arguments may alias each other and globals"), N_OPTS, 15, CL_COMMON | CL_REPORT, &flag_argument_noalias, 1, 0 }, { "-fargument-noalias", N_("Assume arguments may alias globals but not each other"), N_OPTS, 17, CL_COMMON | CL_REPORT, &flag_argument_noalias, 1, 1 }, { "-fargument-noalias-global", N_("Assume arguments alias neither each other nor globals"), N_OPTS, 24, CL_COMMON | CL_REPORT, &flag_argument_noalias, 1, 2 }, { "-fasm", N_("Recognize the \"asm\" keyword"), N_OPTS, 4, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-fasynchronous-unwind-tables", N_("Generate unwind tables that are exact at each instruction boundary"), N_OPTS, 27, CL_COMMON | CL_REPORT, &flag_asynchronous_unwind_tables, 0, 0 }, { "-fbounds-check", N_("Generate code to check bounds before indexing arrays"), N_OPTS, 13, CL_COMMON | CL_REPORT, &flag_bounds_check, 0, 0 }, { "-fbranch-count-reg", N_("Replace add, compare, branch with branch on count register"), N_OPTS, 17, CL_COMMON | CL_REPORT, &flag_branch_on_count_reg, 0, 0 }, { "-fbranch-probabilities", N_("Use profiling information for branch probabilities"), N_OPTS, 21, CL_COMMON | CL_REPORT, &flag_branch_probabilities, 0, 0 }, { "-fbranch-target-load-optimize", N_("Perform branch target load optimization before prologue / epilogue threading"), N_OPTS, 28, CL_COMMON | CL_REPORT, &flag_branch_target_load_optimize, 0, 0 }, { "-fbranch-target-load-optimize2", N_("Perform branch target load optimization after prologue / epilogue threading"), N_OPTS, 29, CL_COMMON | CL_REPORT, &flag_branch_target_load_optimize2, 0, 0 }, { "-fbtr-bb-exclusive", N_("Restrict target load migration not to re-use registers in any basic block"), N_OPTS, 17, CL_COMMON | CL_REPORT, &flag_btr_bb_exclusive, 0, 0 }, { "-fbuiltin", N_("Recognize built-in functions"), N_OPTS, 8, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-fbuiltin-", 0, N_OPTS, 9, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_JOINED, 0, 0, 0 }, { "-fcall-saved-", N_("-fcall-saved- Mark as being preserved across functions"), N_OPTS, 12, CL_COMMON | CL_JOINED | CL_REJECT_NEGATIVE, 0, 0, 0 }, { "-fcall-used-", N_("-fcall-used- Mark as being corrupted by function calls"), N_OPTS, 11, CL_COMMON | CL_JOINED | CL_REJECT_NEGATIVE, 0, 0, 0 }, { "-fcaller-saves", N_("Save registers around function calls"), N_OPTS, 13, CL_COMMON | CL_REPORT, &flag_caller_saves, 0, 0 }, { "-fcheck-new", N_("Check the return value of new"), N_OPTS, 10, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-fcommon", N_("Do not put uninitialized globals in the common section"), N_OPTS, 7, CL_COMMON | CL_REPORT, &flag_no_common, 1, 0 }, { "-fcond-mismatch", N_("Allow the arguments of the '?' operator to have different types"), N_OPTS, 14, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-fconserve-space", N_("Reduce the size of object files"), N_OPTS, 15, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-fconst-strings", N_("Make string literals \"const char[]\" not \"char[]\""), N_OPTS, 14, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-fconstant-string-class=", N_("-fconst-string-class= Use class for constant strings"), N_OPTS, 23, CL_ObjC | CL_ObjCXX | CL_JOINED, 0, 0, 0 }, { "-fcprop-registers", N_("Perform a register copy-propagation optimization pass"), N_OPTS, 16, CL_COMMON | CL_REPORT, &flag_cprop_registers, 0, 0 }, { "-fcrossjumping", N_("Perform cross-jumping optimization"), N_OPTS, 13, CL_COMMON | CL_REPORT, &flag_crossjumping, 0, 0 }, { "-fcse-follow-jumps", N_("When running CSE, follow jumps to their targets"), N_OPTS, 17, CL_COMMON | CL_REPORT, &flag_cse_follow_jumps, 0, 0 }, { "-fcse-skip-blocks", N_("When running CSE, follow conditional jumps"), N_OPTS, 16, CL_COMMON | CL_REPORT, &flag_cse_skip_blocks, 0, 0 }, { "-fdata-sections", N_("Place data items into their own section"), N_OPTS, 14, CL_COMMON | CL_REPORT, &flag_data_sections, 0, 0 }, { "-fdefault-inline", N_("Inline member functions by default"), N_OPTS, 15, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-fdefer-pop", N_("Defer popping functions args from stack until later"), N_OPTS, 10, CL_COMMON | CL_REPORT, &flag_defer_pop, 0, 0 }, { "-fdelayed-branch", N_("Attempt to fill delay slots of branch instructions"), N_OPTS, 15, CL_COMMON | CL_REPORT, &flag_delayed_branch, 0, 0 }, { "-fdelete-null-pointer-checks", N_("Delete useless null pointer checks"), N_OPTS, 27, CL_COMMON | CL_REPORT, &flag_delete_null_pointer_checks, 0, 0 }, { "-fdiagnostics-show-location=", N_("-fdiagnostics-show-location=[once|every-line] How often to emit source location at the beginning of line-wrapped diagnostics"), N_OPTS, 27, CL_COMMON | CL_JOINED | CL_REJECT_NEGATIVE, 0, 0, 0 }, { "-fdollars-in-identifiers", N_("Permit '$' as an identifier character"), N_OPTS, 23, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-fdump-", N_("-fdump- Dump various compiler internals to a file"), N_OPTS, 6, CL_COMMON | CL_JOINED | CL_REJECT_NEGATIVE, 0, 0, 0 }, { "-fdump-unnumbered", N_("Suppress output of instruction numbers and line number notes in debugging dumps"), OPT_fdump_, 16, CL_COMMON | CL_REPORT, &flag_dump_unnumbered, 0, 0 }, { "-felide-constructors", 0, N_OPTS, 19, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-feliminate-dwarf2-dups", N_("Perform DWARF2 duplicate elimination"), N_OPTS, 22, CL_COMMON | CL_REPORT, &flag_eliminate_dwarf2_dups, 0, 0 }, { "-feliminate-unused-debug-symbols", N_("Perform unused type elimination in debug info"), N_OPTS, 31, CL_COMMON | CL_REPORT, &flag_debug_only_used_symbols, 0, 0 }, { "-feliminate-unused-debug-types", N_("Perform unused type elimination in debug info"), N_OPTS, 29, CL_COMMON | CL_REPORT, &flag_eliminate_unused_debug_types, 0, 0 }, { "-fenforce-eh-specs", N_("Generate code to check exception specifications"), N_OPTS, 17, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-fenum-int-equiv", 0, N_OPTS, 15, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-fexceptions", N_("Enable exception handling"), N_OPTS, 11, CL_COMMON | CL_REPORT, &flag_exceptions, 0, 0 }, { "-fexec-charset=", N_("-fexec-charset= Convert all strings and character constants to character set "), N_OPTS, 14, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_JOINED | CL_REJECT_NEGATIVE, 0, 0, 0 }, { "-fexpensive-optimizations", N_("Perform a number of minor, expensive optimizations"), N_OPTS, 24, CL_COMMON | CL_REPORT, &flag_expensive_optimizations, 0, 0 }, { "-fexternal-templates", 0, N_OPTS, 19, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-ffast-math", 0, N_OPTS, 10, CL_COMMON, 0, 0, 0 }, { "-ffinite-math-only", N_("Assume no NaNs or infinities are generated"), N_OPTS, 17, CL_COMMON | CL_REPORT, &flag_finite_math_only, 0, 0 }, { "-ffixed-", N_("-ffixed- Mark as being unavailable to the compiler"), N_OPTS, 7, CL_COMMON | CL_JOINED | CL_REJECT_NEGATIVE, 0, 0, 0 }, { "-ffixed-form", 0, OPT_ffixed_, 11, CL_C | CL_ObjC, 0, 0, 0 }, { "-ffixed-line-length-", 0, OPT_ffixed_, 19, CL_C | CL_ObjC | CL_JOINED, 0, 0, 0 }, { "-ffloat-store", N_("Do not store floats in registers"), N_OPTS, 12, CL_COMMON | CL_REPORT, &flag_float_store, 0, 0 }, { "-ffor-scope", N_("Scope of for-init-statement variables is local to the loop"), N_OPTS, 10, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-fforce-addr", N_("Copy memory address constants into registers before use"), N_OPTS, 11, CL_COMMON | CL_REPORT, &flag_force_addr, 0, 0 }, { "-fforce-mem", N_("Copy memory operands into registers before use"), N_OPTS, 10, CL_COMMON | CL_REPORT, &flag_force_mem, 0, 0 }, { "-ffreestanding", N_("Do not assume that standard C libraries and \"main\" exist"), N_OPTS, 13, CL_C | CL_ObjC, 0, 0, 0 }, { "-ffunction-cse", N_("Allow function addresses to be held in registers"), N_OPTS, 13, CL_COMMON | CL_REPORT, &flag_no_function_cse, 1, 0 }, { "-ffunction-sections", N_("Place each function into its own section"), N_OPTS, 18, CL_COMMON | CL_REPORT, &flag_function_sections, 0, 0 }, { "-fgcse", N_("Perform global common subexpression elimination"), N_OPTS, 5, CL_COMMON | CL_REPORT, &flag_gcse, 0, 0 }, { "-fgcse-after-reload", N_("Perform global common subexpression elimination after register allocation"), N_OPTS, 18, CL_COMMON | CL_REPORT, &flag_gcse_after_reload, 0, 0 }, { "-fgcse-las", N_("Perform redundant load after store elimination in global common subexpression"), N_OPTS, 9, CL_COMMON | CL_REPORT, &flag_gcse_las, 0, 0 }, { "-fgcse-lm", N_("Perform enhanced load motion during global common subexpression elimination"), N_OPTS, 8, CL_COMMON | CL_REPORT, &flag_gcse_lm, 0, 0 }, { "-fgcse-sm", N_("Perform store motion after global common subexpression elimination"), N_OPTS, 8, CL_COMMON | CL_REPORT, &flag_gcse_sm, 0, 0 }, { "-fgnu-keywords", N_("Recognize GNU-defined keywords"), N_OPTS, 13, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-fgnu-runtime", N_("Generate code for GNU runtime environment"), N_OPTS, 12, CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-fguess-branch-probability", N_("Enable guessing of branch probabilities"), N_OPTS, 25, CL_COMMON | CL_REPORT, &flag_guess_branch_prob, 0, 0 }, { "-fguiding-decls", 0, N_OPTS, 14, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-fhandle-exceptions", 0, N_OPTS, 18, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-fhonor-std", 0, N_OPTS, 10, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-fhosted", N_("Assume normal C execution environment"), N_OPTS, 7, CL_C | CL_ObjC, 0, 0, 0 }, { "-fhuge-objects", N_("Enable support for huge objects"), N_OPTS, 13, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-fident", N_("Process #ident directives"), N_OPTS, 6, CL_COMMON | CL_REPORT, &flag_no_ident, 1, 0 }, { "-fif-conversion", N_("Perform conversion of conditional jumps to branchless equivalents"), N_OPTS, 14, CL_COMMON | CL_REPORT, &flag_if_conversion, 0, 0 }, { "-fif-conversion2", N_("Perform conversion of conditional jumps to conditional execution"), N_OPTS, 15, CL_COMMON | CL_REPORT, &flag_if_conversion2, 0, 0 }, { "-fimplement-inlines", N_("Export functions even if they can be inlined"), N_OPTS, 18, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-fimplicit-inline-templates", N_("Emit implicit instantiations of inline templates"), N_OPTS, 26, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-fimplicit-templates", N_("Emit implicit instantiations of templates"), N_OPTS, 19, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-finhibit-size-directive", N_("Do not generate .size directives"), N_OPTS, 23, CL_COMMON | CL_REPORT, &flag_inhibit_size_directive, 0, 0 }, { "-finline", N_("Pay attention to the \"inline\" keyword"), N_OPTS, 7, CL_COMMON | CL_REPORT, &flag_no_inline, 1, 0 }, { "-finline-functions", N_("Integrate simple functions into their callers"), N_OPTS, 17, CL_COMMON | CL_REPORT, &flag_inline_functions, 0, 0 }, { "-finline-limit-", 0, N_OPTS, 14, CL_COMMON | CL_JOINED | CL_REJECT_NEGATIVE | CL_UINTEGER, 0, 0, 0 }, { "-finline-limit=", N_("-finline-limit= Limit the size of inlined functions to "), N_OPTS, 14, CL_COMMON | CL_JOINED | CL_REJECT_NEGATIVE | CL_UINTEGER, 0, 0, 0 }, { "-finput-charset=", N_("-finput-charset= Specify the default character set for source files."), N_OPTS, 15, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_JOINED | CL_REJECT_NEGATIVE, 0, 0, 0 }, { "-finstrument-functions", N_("Instrument function entry and exit with profiling calls"), N_OPTS, 21, CL_COMMON | CL_REPORT, &flag_instrument_function_entry_exit, 0, 0 }, { "-fkeep-inline-functions", N_("Generate code for functions even if they are fully inlined"), N_OPTS, 22, CL_COMMON | CL_REPORT, &flag_keep_inline_functions, 0, 0 }, { "-fkeep-static-consts", N_("Emit static const variables even if they are not used"), N_OPTS, 19, CL_COMMON | CL_REPORT, &flag_keep_static_consts, 0, 0 }, { "-flabels-ok", 0, N_OPTS, 10, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-fleading-underscore", N_("Give external symbols a leading underscore"), N_OPTS, 19, CL_COMMON | CL_REPORT, &flag_leading_underscore, 0, 0 }, { "-floop-optimize", N_("Perform loop optimizations"), N_OPTS, 14, CL_COMMON | CL_REPORT, &flag_loop_optimize, 0, 0 }, { "-floop-optimize2", N_("Perform loop optimizations using the new loop optimizer"), N_OPTS, 15, CL_COMMON | CL_REPORT, &flag_loop_optimize2, 0, 0 }, { "-fmath-errno", N_("Set errno after built-in math functions"), N_OPTS, 11, CL_COMMON | CL_REPORT, &flag_errno_math, 0, 0 }, { "-fmem-report", N_("Report on permanent memory allocation"), N_OPTS, 11, CL_COMMON | CL_REPORT, &mem_report, 0, 0 }, { "-fmerge-all-constants", N_("Attempt to merge identical constants and constant variables"), N_OPTS, 20, CL_COMMON | CL_REPORT, &flag_merge_constants, 1, 2 }, { "-fmerge-constants", N_("Attempt to merge identical constants across compilation units"), N_OPTS, 16, CL_COMMON | CL_REPORT, &flag_merge_constants, 1, 1 }, { "-fmessage-length=", N_("-fmessage-length= Limit diagnostics to characters per line. 0 suppresses line-wrapping"), N_OPTS, 16, CL_COMMON | CL_JOINED | CL_REJECT_NEGATIVE | CL_UINTEGER, 0, 0, 0 }, { "-fmodulo-sched", N_("Perform SMS based modulo scheduling before the first scheduling pass"), N_OPTS, 13, CL_COMMON | CL_REPORT, &flag_modulo_sched, 0, 0 }, { "-fmove-all-movables", N_("Force all loop invariant computations out of loops"), N_OPTS, 18, CL_COMMON | CL_REPORT, &flag_move_all_movables, 0, 0 }, { "-fmove-loop-invariants", N_("Move loop invariant computations out of loops"), N_OPTS, 21, CL_COMMON | CL_REPORT, &flag_move_loop_invariants, 0, 0 }, { "-fms-extensions", N_("Don't warn about uses of Microsoft extensions"), N_OPTS, 14, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-fmudflap", N_("Add mudflap bounds-checking instrumentation for single-threaded program."), N_OPTS, 8, CL_COMMON | CL_REJECT_NEGATIVE | CL_REPORT, &flag_mudflap, 0, 0 }, { "-fmudflapir", N_("Ignore read operations when inserting mudflap instrumentation."), N_OPTS, 10, CL_COMMON | CL_REJECT_NEGATIVE | CL_REPORT, &flag_mudflap_ignore_reads, 0, 0 }, { "-fmudflapth", N_("Add mudflap bounds-checking instrumentation for multi-threaded program."), N_OPTS, 10, CL_COMMON | CL_REJECT_NEGATIVE | CL_REPORT, &flag_mudflap_threads, 0, 0 }, { "-fname-mangling-version-", 0, N_OPTS, 23, CL_CXX | CL_ObjCXX | CL_JOINED, 0, 0, 0 }, { "-fnew-abi", 0, N_OPTS, 8, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-fnew-ra", N_("Use graph-coloring register allocation"), N_OPTS, 7, CL_COMMON | CL_REPORT, &flag_new_regalloc, 0, 0 }, { "-fnext-runtime", N_("Generate code for NeXT (Apple Mac OS X) runtime environment"), N_OPTS, 13, CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-fnil-receivers", N_("Assume that receivers of Objective-C messages may be nil"), N_OPTS, 14, CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-fnon-call-exceptions", N_("Support synchronous non-call exceptions"), N_OPTS, 20, CL_COMMON | CL_REPORT, &flag_non_call_exceptions, 0, 0 }, { "-fnonansi-builtins", 0, N_OPTS, 17, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-fnonnull-objects", 0, N_OPTS, 16, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-fobjc-exceptions", N_("Enable Objective-C exception and synchronization syntax"), N_OPTS, 16, CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-fobjc-sjlj-exceptions", N_("Enable Objective-C setjmp exception handling runtime"), N_OPTS, 21, CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-fold-unroll-all-loops", N_("Perform loop unrolling for all loops"), N_OPTS, 21, CL_COMMON | CL_REPORT, &flag_old_unroll_all_loops, 0, 0 }, { "-fold-unroll-loops", N_("Perform loop unrolling when iteration count is known"), N_OPTS, 17, CL_COMMON | CL_REPORT, &flag_old_unroll_loops, 0, 0 }, { "-fomit-frame-pointer", N_("When possible do not generate stack frames"), N_OPTS, 19, CL_COMMON | CL_REPORT, &flag_omit_frame_pointer, 0, 0 }, { "-foperator-names", N_("Recognize C++ kewords like \"compl\" and \"xor\""), N_OPTS, 15, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-foptimize-register-move", N_("Do the full register move optimization pass"), N_OPTS, 23, CL_COMMON | CL_REPORT, &flag_regmove, 0, 0 }, { "-foptimize-sibling-calls", N_("Optimize sibling and tail recursive calls"), N_OPTS, 23, CL_COMMON | CL_REPORT, &flag_optimize_sibling_calls, 0, 0 }, { "-foptional-diags", N_("Enable optional diagnostics"), N_OPTS, 15, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-fpack-struct", N_("Pack structure members together without holes"), N_OPTS, 12, CL_COMMON | CL_REPORT, &flag_pack_struct, 0, 0 }, { "-fpcc-struct-return", N_("Return small aggregates in memory, not registers"), N_OPTS, 18, CL_COMMON | CL_REPORT, &flag_pcc_struct_return, 1, 1 }, { "-fpch-deps", 0, N_OPTS, 9, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-fpch-preprocess", N_("Look for and use PCH files even when preprocessing"), N_OPTS, 15, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-fpeel-loops", N_("Perform loop peeling"), N_OPTS, 11, CL_COMMON | CL_REPORT, &flag_peel_loops, 0, 0 }, { "-fpeephole", N_("Enable machine specific peephole optimizations"), N_OPTS, 9, CL_COMMON | CL_REPORT, &flag_no_peephole, 1, 0 }, { "-fpeephole2", N_("Enable an RTL peephole pass before sched2"), N_OPTS, 10, CL_COMMON | CL_REPORT, &flag_peephole2, 0, 0 }, { "-fpermissive", N_("Downgrade conformance errors to warnings"), N_OPTS, 11, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-fpic", N_("Generate position-independent code if possible"), N_OPTS, 4, CL_COMMON | CL_REPORT, &flag_pic, 1, 1 }, { "-fpie", N_("Generate position-independent code for executables if possible"), N_OPTS, 4, CL_COMMON | CL_REPORT, &flag_pie, 1, 1 }, { "-fprefetch-loop-arrays", N_("Generate prefetch instructions, if available, for arrays in loops"), N_OPTS, 21, CL_COMMON | CL_REPORT, &flag_prefetch_loop_arrays, 0, 0 }, { "-fpreprocessed", N_("Treat the input file as already preprocessed"), N_OPTS, 13, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-fprofile", N_("Enable basic program profiling code"), N_OPTS, 8, CL_COMMON | CL_REPORT, &profile_flag, 0, 0 }, { "-fprofile-arcs", N_("Insert arc-based program profiling code"), N_OPTS, 13, CL_COMMON | CL_REPORT, &profile_arc_flag, 0, 0 }, { "-fprofile-generate", N_("Enable common options for generating profile info for profile feedback directed optimizations"), N_OPTS, 17, CL_COMMON, 0, 0, 0 }, { "-fprofile-use", N_("Enable common options for performing profile feedback directed optimizations"), N_OPTS, 12, CL_COMMON, 0, 0, 0 }, { "-fprofile-values", N_("Insert code to profile values of expressions"), N_OPTS, 15, CL_COMMON | CL_REPORT, &flag_profile_values, 0, 0 }, { "-frandom-seed", 0, N_OPTS, 12, CL_COMMON, 0, 0, 0 }, { "-frandom-seed=", N_("-frandom-seed= Make compile reproducible using "), N_OPTS, 13, CL_COMMON | CL_JOINED | CL_REJECT_NEGATIVE, 0, 0, 0 }, { "-freduce-all-givs", N_("Strength reduce all loop general induction variables"), N_OPTS, 16, CL_COMMON | CL_REPORT, &flag_reduce_all_givs, 0, 0 }, { "-freg-struct-return", N_("Return small aggregates in registers"), N_OPTS, 18, CL_COMMON | CL_REPORT, &flag_pcc_struct_return, 1, 0 }, { "-fregmove", N_("Enables a register move optimization"), N_OPTS, 8, CL_COMMON | CL_REPORT, &flag_regmove, 0, 0 }, { "-frename-registers", N_("Perform a register renaming optimization pass"), N_OPTS, 17, CL_COMMON | CL_REPORT, &flag_rename_registers, 0, 0 }, { "-freorder-blocks", N_("Reorder basic blocks to improve code placement"), N_OPTS, 15, CL_COMMON | CL_REPORT, &flag_reorder_blocks, 0, 0 }, { "-freorder-blocks-and-partition", N_("Reorder basic blocks and partition into hot and cold sections"), N_OPTS, 29, CL_COMMON | CL_REPORT, &flag_reorder_blocks_and_partition, 0, 0 }, { "-freorder-functions", N_("Reorder functions to improve code placement"), N_OPTS, 18, CL_COMMON | CL_REPORT, &flag_reorder_functions, 0, 0 }, { "-freplace-objc-classes", N_("Used in Fix-and-Continue mode to indicate that object files may be swapped in at runtime"), N_OPTS, 21, CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-frepo", N_("Enable automatic template instantiation"), N_OPTS, 5, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-frerun-cse-after-loop", N_("Add a common subexpression elimination pass after loop optimizations"), N_OPTS, 21, CL_COMMON | CL_REPORT, &flag_rerun_cse_after_loop, 0, 0 }, { "-frerun-loop-opt", N_("Run the loop optimizer twice"), N_OPTS, 15, CL_COMMON | CL_REPORT, &flag_rerun_loop_opt, 0, 0 }, { "-frounding-math", N_("Disable optimizations that assume default FP rounding behavior"), N_OPTS, 14, CL_COMMON | CL_REPORT, &flag_rounding_math, 0, 0 }, { "-frtti", N_("Generate run time type descriptor information"), N_OPTS, 5, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-fsched-interblock", N_("Enable scheduling across basic blocks"), N_OPTS, 17, CL_COMMON | CL_REPORT, &flag_schedule_interblock, 0, 0 }, { "-fsched-spec", N_("Allow speculative motion of non-loads"), N_OPTS, 11, CL_COMMON | CL_REPORT, &flag_schedule_speculative, 0, 0 }, { "-fsched-spec-load", N_("Allow speculative motion of some loads"), N_OPTS, 16, CL_COMMON | CL_REPORT, &flag_schedule_speculative_load, 0, 0 }, { "-fsched-spec-load-dangerous", N_("Allow speculative motion of more loads"), N_OPTS, 26, CL_COMMON | CL_REPORT, &flag_schedule_speculative_load_dangerous, 0, 0 }, { "-fsched-stalled-insns", N_("Allow premature scheduling of queued insns"), N_OPTS, 20, CL_COMMON | CL_REPORT, &flag_sched_stalled_insns, 0, 0 }, { "-fsched-stalled-insns-dep", N_("Set dependence distance checking in premature scheduling of queued insns"), N_OPTS, 24, CL_COMMON | CL_REPORT, &flag_sched_stalled_insns_dep, 1, 1 }, { "-fsched-stalled-insns-dep=", N_("-fsched-stalled-insns-dep= Set dependence distance checking in premature scheduling of queued insns"), N_OPTS, 25, CL_COMMON | CL_JOINED | CL_REJECT_NEGATIVE | CL_UINTEGER, 0, 0, 0 }, { "-fsched-stalled-insns=", N_("-fsched-stalled-insns= Set number of queued insns that can be prematurely scheduled"), N_OPTS, 21, CL_COMMON | CL_JOINED | CL_REJECT_NEGATIVE | CL_UINTEGER, 0, 0, 0 }, { "-fsched-verbose=", N_("-fsched-verbose= Set the verbosity level of the scheduler"), N_OPTS, 15, CL_COMMON | CL_JOINED | CL_REJECT_NEGATIVE, 0, 0, 0 }, { "-fsched2-use-superblocks", N_("If scheduling post reload, do superblock scheduling"), N_OPTS, 23, CL_COMMON | CL_REPORT, &flag_sched2_use_superblocks, 0, 0 }, { "-fsched2-use-traces", N_("If scheduling post reload, do trace scheduling"), N_OPTS, 18, CL_COMMON | CL_REPORT, &flag_sched2_use_traces, 0, 0 }, { "-fschedule-insns", N_("Reschedule instructions before register allocation"), N_OPTS, 15, CL_COMMON | CL_REPORT, &flag_schedule_insns, 0, 0 }, { "-fschedule-insns2", N_("Reschedule instructions after register allocation"), N_OPTS, 16, CL_COMMON | CL_REPORT, &flag_schedule_insns_after_reload, 0, 0 }, { "-fshared-data", N_("Mark data as shared rather than private"), N_OPTS, 12, CL_COMMON | CL_REPORT, &flag_shared_data, 0, 0 }, { "-fshort-double", N_("Use the same size for double as for float"), N_OPTS, 13, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-fshort-enums", N_("Use the narrowest integer type possible for enumeration types"), N_OPTS, 12, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-fshort-wchar", N_("Force the underlying type for \"wchar_t\" to be \"unsigned short\""), N_OPTS, 12, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-fshow-column", 0, N_OPTS, 12, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-fsignaling-nans", N_("Disable optimizations observable by IEEE signaling NaNs"), N_OPTS, 15, CL_COMMON | CL_REPORT, &flag_signaling_nans, 0, 0 }, { "-fsigned-bitfields", N_("When \"signed\" or \"unsigned\" is not given make the bitfield signed"), N_OPTS, 17, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-fsigned-char", N_("Make \"char\" signed by default"), N_OPTS, 12, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-fsingle-precision-constant", N_("Convert floating point constants to single precision constants"), N_OPTS, 26, CL_COMMON | CL_REPORT, &flag_single_precision_constant, 0, 0 }, { "-fsquangle", 0, N_OPTS, 9, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-fstack-check", N_("Insert stack checking code into the program"), N_OPTS, 12, CL_COMMON | CL_REPORT, &flag_stack_check, 0, 0 }, { "-fstack-limit", 0, N_OPTS, 12, CL_COMMON, 0, 0, 0 }, { "-fstack-limit-register=", N_("-fstack-limit-register= Trap if the stack goes past "), N_OPTS, 22, CL_COMMON | CL_JOINED | CL_REJECT_NEGATIVE, 0, 0, 0 }, { "-fstack-limit-symbol=", N_("-fstack-limit-symbol= Trap if the stack goes past symbol "), N_OPTS, 20, CL_COMMON | CL_JOINED | CL_REJECT_NEGATIVE, 0, 0, 0 }, { "-fstats", N_("Display statistics accumulated during compilation"), N_OPTS, 6, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-fstrength-reduce", N_("Perform strength reduction optimizations"), N_OPTS, 16, CL_COMMON | CL_REPORT, &flag_strength_reduce, 0, 0 }, { "-fstrict-aliasing", N_("Assume strict aliasing rules apply"), N_OPTS, 16, CL_COMMON | CL_REPORT, &flag_strict_aliasing, 0, 0 }, { "-fstrict-prototype", 0, N_OPTS, 17, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-fsyntax-only", N_("Check for syntax errors, then stop"), N_OPTS, 12, CL_COMMON | CL_REPORT, &flag_syntax_only, 0, 0 }, { "-ftabstop=", N_("-ftabstop= Distance between tab stops for column reporting"), N_OPTS, 9, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_JOINED | CL_REJECT_NEGATIVE | CL_UINTEGER, 0, 0, 0 }, { "-ftemplate-depth-", N_("-ftemplate-depth- Specify maximum template instantiation depth"), N_OPTS, 16, CL_CXX | CL_ObjCXX | CL_JOINED | CL_REJECT_NEGATIVE | CL_UINTEGER, 0, 0, 0 }, { "-ftest-coverage", N_("Create data files needed by \"gcov\""), N_OPTS, 14, CL_COMMON | CL_REPORT, &flag_test_coverage, 0, 0 }, { "-fthis-is-variable", 0, N_OPTS, 17, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-fthread-jumps", N_("Perform jump threading optimizations"), N_OPTS, 13, CL_COMMON | CL_REPORT, &flag_thread_jumps, 0, 0 }, { "-ftime-report", N_("Report the time taken by each compiler pass"), N_OPTS, 12, CL_COMMON | CL_REPORT, &time_report, 0, 0 }, { "-ftls-model=", N_("-ftls-model=[global-dynamic|local-dynamic|initial-exec|local-exec] Set the default thread-local storage code generation model"), N_OPTS, 11, CL_COMMON | CL_JOINED | CL_REJECT_NEGATIVE, 0, 0, 0 }, { "-ftracer", N_("Perform superblock formation via tail duplication"), N_OPTS, 7, CL_COMMON | CL_REPORT, &flag_tracer, 0, 0 }, { "-ftrapping-math", N_("Assume floating-point operations can trap"), N_OPTS, 14, CL_COMMON | CL_REPORT, &flag_trapping_math, 0, 0 }, { "-ftrapv", N_("Trap for signed overflow in addition, subtraction and multiplication"), N_OPTS, 6, CL_COMMON | CL_REPORT, &flag_trapv, 0, 0 }, { "-ftree-based-profiling", N_("Use tree-ssa based implementation of profiling"), N_OPTS, 21, CL_COMMON | CL_REPORT, &flag_tree_based_profiling, 0, 0 }, { "-ftree-ccp", N_("Enable SSA-CCP optimization on trees"), N_OPTS, 9, CL_COMMON | CL_REPORT, &flag_tree_ccp, 0, 0 }, { "-ftree-ch", N_("Enable loop header copying on trees"), N_OPTS, 8, CL_COMMON | CL_REPORT, &flag_tree_ch, 0, 0 }, { "-ftree-combine-temps", N_("Coalesce memory temporaries in the SSA->normal pass"), N_OPTS, 19, CL_COMMON | CL_REPORT, &flag_tree_combine_temps, 0, 0 }, { "-ftree-copyrename", N_("Replace SSA temporaries with better names in copies."), N_OPTS, 16, CL_COMMON | CL_REPORT, &flag_tree_copyrename, 0, 0 }, { "-ftree-dce", N_("Enable SSA dead code elimination optimization on trees"), N_OPTS, 9, CL_COMMON | CL_REPORT, &flag_tree_dce, 0, 0 }, { "-ftree-dominator-opts", N_("Enable dominator optimizations"), N_OPTS, 20, CL_COMMON | CL_REPORT, &flag_tree_dom, 0, 0 }, { "-ftree-dse", N_("Enable dead store elimination"), N_OPTS, 9, CL_COMMON | CL_REPORT, &flag_tree_dse, 0, 0 }, { "-ftree-fre", N_("Enable Full Redundancy Elimination (FRE) on trees"), N_OPTS, 9, CL_COMMON | CL_REPORT, &flag_tree_fre, 0, 0 }, { "-ftree-loop-optimize", N_("Enable loop optimizations on tree level"), N_OPTS, 19, CL_COMMON | CL_REPORT, &flag_tree_loop_optimize, 0, 0 }, { "-ftree-lrs", N_("Perform live range splitting during the SSA->normal pass."), N_OPTS, 9, CL_COMMON | CL_REPORT, &flag_tree_live_range_split, 0, 0 }, { "-ftree-points-to=", 0, N_OPTS, 16, CL_COMMON | CL_JOINED | CL_REJECT_NEGATIVE, 0, 0, 0 }, { "-ftree-pre", N_("Enable SSA-PRE optimization on trees"), N_OPTS, 9, CL_COMMON | CL_REPORT, &flag_tree_pre, 0, 0 }, { "-ftree-sra", N_("Perform scalar replacement of aggregates"), N_OPTS, 9, CL_COMMON | CL_REPORT, &flag_tree_sra, 0, 0 }, { "-ftree-ter", N_("Replace temporary expressions in the SSA->normal pass"), N_OPTS, 9, CL_COMMON | CL_REPORT, &flag_tree_ter, 0, 0 }, { "-funit-at-a-time", N_("Compile whole compilation unit at a time"), N_OPTS, 15, CL_COMMON | CL_REPORT, &flag_unit_at_a_time, 0, 0 }, { "-funroll-all-loops", N_("Perform loop unrolling for all loops"), N_OPTS, 17, CL_COMMON | CL_REPORT, &flag_unroll_all_loops, 0, 0 }, { "-funroll-loops", N_("Perform loop unrolling when iteration count is known"), N_OPTS, 13, CL_COMMON | CL_REPORT, &flag_unroll_loops, 0, 0 }, { "-funsafe-math-optimizations", N_("Allow math optimizations that may violate IEEE or ISO standards"), N_OPTS, 26, CL_COMMON | CL_REPORT, &flag_unsafe_math_optimizations, 0, 0 }, { "-funsigned-bitfields", N_("When \"signed\" or \"unsigned\" is not given make the bitfield unsigned"), N_OPTS, 19, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-funsigned-char", N_("Make \"char\" unsigned by default"), N_OPTS, 14, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-funswitch-loops", N_("Perform loop unswitching"), N_OPTS, 15, CL_COMMON | CL_REPORT, &flag_unswitch_loops, 0, 0 }, { "-funwind-tables", N_("Just generate unwind tables for exception handling"), N_OPTS, 14, CL_COMMON | CL_REPORT, &flag_unwind_tables, 0, 0 }, { "-fuse-cxa-atexit", N_("Use __cxa_atexit to register destructors"), N_OPTS, 15, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-fvar-tracking", N_("Perform variable tracking"), N_OPTS, 13, CL_COMMON | CL_REPORT, &flag_var_tracking, 0, 0 }, { "-fverbose-asm", N_("Add extra commentary to assembler output"), N_OPTS, 12, CL_COMMON | CL_REPORT, &flag_verbose_asm, 0, 0 }, { "-fvpt", N_("Use expression value profiles in optimizations"), N_OPTS, 4, CL_COMMON | CL_REPORT, &flag_value_profile_transformations, 0, 0 }, { "-fvtable-gc", N_("Discard unused virtual functions"), N_OPTS, 10, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-fvtable-thunks", N_("Implement vtables using thunks"), N_OPTS, 14, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-fweak", N_("Emit common-like symbols as weak symbols"), N_OPTS, 5, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-fweb", N_("Construct webs and split unrelated uses of single variable"), N_OPTS, 4, CL_COMMON | CL_REPORT, &flag_web, 0, 0 }, { "-fwide-exec-charset=", N_("-fwide-exec-charset= Convert all wide strings and character constants to character set "), N_OPTS, 19, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_JOINED | CL_REJECT_NEGATIVE, 0, 0, 0 }, { "-fworking-directory", N_("Generate a #line directive pointing at the current working directory"), N_OPTS, 18, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-fwrapv", N_("Assume signed arithmetic overflow wraps around"), N_OPTS, 6, CL_COMMON | CL_REPORT, &flag_wrapv, 0, 0 }, { "-fxref", N_("Emit cross referencing information"), N_OPTS, 5, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-fzero-initialized-in-bss", N_("Put zero initialized data in the bss section"), N_OPTS, 24, CL_COMMON | CL_REPORT, &flag_zero_initialized_in_bss, 0, 0 }, { "-fzero-link", N_("Generate lazy class lookup (via objc_getClass()) for use in Zero-Link mode"), N_OPTS, 10, CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-g", N_("Generate debug information in default format"), N_OPTS, 1, CL_COMMON | CL_JOINED | CL_MISSING_OK, 0, 0, 0 }, { "-gcoff", N_("Generate debug information in COFF format"), OPT_g, 5, CL_COMMON | CL_JOINED | CL_MISSING_OK, 0, 0, 0 }, { "-gdwarf-2", N_("Generate debug information in DWARF v2 format"), OPT_g, 8, CL_COMMON | CL_JOINED | CL_MISSING_OK, 0, 0, 0 }, { "-gen-decls", N_("Dump declarations to a .decl file"), OPT_g, 9, CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-ggdb", N_("Generate debug information in default extended format"), OPT_g, 4, CL_COMMON | CL_JOINED | CL_MISSING_OK, 0, 0, 0 }, { "-gstabs", N_("Generate debug information in STABS format"), OPT_g, 6, CL_COMMON | CL_JOINED | CL_MISSING_OK, 0, 0, 0 }, { "-gstabs+", N_("Generate debug information in extended STABS format"), OPT_gstabs, 7, CL_COMMON | CL_JOINED | CL_MISSING_OK, 0, 0, 0 }, { "-gvms", N_("Generate debug information in VMS format"), OPT_g, 4, CL_COMMON | CL_JOINED | CL_MISSING_OK, 0, 0, 0 }, { "-gxcoff", N_("Generate debug information in XCOFF format"), OPT_g, 6, CL_COMMON | CL_JOINED | CL_MISSING_OK, 0, 0, 0 }, { "-gxcoff+", N_("Generate debug information in extended XCOFF format"), OPT_gxcoff, 7, CL_COMMON | CL_JOINED | CL_MISSING_OK, 0, 0, 0 }, { "-idirafter", N_("-idirafter Add to the end of the system include path"), N_OPTS, 9, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_JOINED | CL_SEPARATE, 0, 0, 0 }, { "-imacros", N_("-imacros Accept definition of macros in "), N_OPTS, 7, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_JOINED | CL_SEPARATE, 0, 0, 0 }, { "-include", N_("-include Include the contents of before other files"), N_OPTS, 7, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_JOINED | CL_SEPARATE, 0, 0, 0 }, { "-iprefix", N_("-iprefix Specify as a prefix for next two options"), N_OPTS, 7, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_JOINED | CL_SEPARATE, 0, 0, 0 }, { "-iquote", N_("-iquote Add to the end of the quote include path"), N_OPTS, 6, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_JOINED | CL_SEPARATE, 0, 0, 0 }, { "-isysroot", N_("-isysroot Set to be the system root directory"), N_OPTS, 8, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_JOINED | CL_SEPARATE, 0, 0, 0 }, { "-isystem", N_("-isystem Add to the start of the system include path"), N_OPTS, 7, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_JOINED | CL_SEPARATE, 0, 0, 0 }, { "-iwithprefix", N_("-iwithprefix Add to the end of the system include path"), N_OPTS, 11, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_JOINED | CL_SEPARATE, 0, 0, 0 }, { "-iwithprefixbefore", N_("-iwithprefixbefore Add to the end of the main include path"), OPT_iwithprefix, 17, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_JOINED | CL_SEPARATE, 0, 0, 0 }, { "-lang-asm", 0, N_OPTS, 8, CL_C | CL_UNDOCUMENTED, 0, 0, 0 }, { "-lang-objc", 0, N_OPTS, 9, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_UNDOCUMENTED, 0, 0, 0 }, { "-m", 0, N_OPTS, 1, CL_COMMON | CL_JOINED, 0, 0, 0 }, { "-nostdinc", N_("Do not search standard system include directories (those specified with -isystem will still be used)"), N_OPTS, 8, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-nostdinc++", N_("Do not search standard system include directories for C++"), N_OPTS, 10, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-o", N_("-o Place output into "), N_OPTS, 1, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_COMMON | CL_JOINED | CL_SEPARATE, 0, 0, 0 }, { "-p", N_("Enable function profiling"), N_OPTS, 1, CL_COMMON, &profile_flag, 0, 0 }, { "-pedantic", N_("Issue warnings needed for strict compliance to the standard"), N_OPTS, 8, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_COMMON, &pedantic, 0, 0 }, { "-pedantic-errors", N_("Like -pedantic but issue them as errors"), N_OPTS, 15, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_COMMON, 0, 0, 0 }, { "-print-objc-runtime-info", N_("Generate C header of platform-specific features"), N_OPTS, 23, CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-quiet", N_("Do not display functions compiled or elapsed time"), N_OPTS, 5, CL_COMMON, &quiet_flag, 0, 0 }, { "-remap", N_("Remap file names when including files"), N_OPTS, 5, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-std=c++98", N_("Conform to the ISO 1998 C++ standard"), N_OPTS, 9, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-std=c89", N_("Conform to the ISO 1990 C standard"), N_OPTS, 7, CL_C | CL_ObjC, 0, 0, 0 }, { "-std=c99", N_("Conform to the ISO 1999 C standard"), N_OPTS, 7, CL_C | CL_ObjC, 0, 0, 0 }, { "-std=c9x", N_("Deprecated in favor of -std=c99"), N_OPTS, 7, CL_C | CL_ObjC, 0, 0, 0 }, { "-std=gnu++98", N_("Conform to the ISO 1998 C++ standard with GNU extensions"), N_OPTS, 11, CL_CXX | CL_ObjCXX, 0, 0, 0 }, { "-std=gnu89", N_("Conform to the ISO 1990 C standard with GNU extensions"), N_OPTS, 9, CL_C | CL_ObjC, 0, 0, 0 }, { "-std=gnu99", N_("Conform to the ISO 1999 C standard with GNU extensions"), N_OPTS, 9, CL_C | CL_ObjC, 0, 0, 0 }, { "-std=gnu9x", N_("Deprecated in favor of -std=gnu99"), N_OPTS, 9, CL_C | CL_ObjC, 0, 0, 0 }, { "-std=iso9899:1990", N_("Deprecated in favor of -std=c89"), N_OPTS, 16, CL_C | CL_ObjC, 0, 0, 0 }, { "-std=iso9899:199409", N_("Conform to the ISO 1990 C standard as amended in 1994"), N_OPTS, 18, CL_C | CL_ObjC, 0, 0, 0 }, { "-std=iso9899:1999", N_("Deprecated in favor of -std=c99"), N_OPTS, 16, CL_C | CL_ObjC, 0, 0, 0 }, { "-std=iso9899:199x", N_("Deprecated in favor of -std=c99"), N_OPTS, 16, CL_C | CL_ObjC, 0, 0, 0 }, { "-traditional-cpp", N_("Enable traditional preprocessing"), N_OPTS, 15, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-trigraphs", N_("-trigraphs Support ISO C trigraphs"), N_OPTS, 9, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-undef", N_("Do not predefine system-specific and GCC-specific macros"), N_OPTS, 5, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-v", N_("Enable verbose output"), N_OPTS, 1, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX, 0, 0, 0 }, { "-version", N_("Display the compiler's version"), N_OPTS, 7, CL_COMMON, &version_flag, 0, 0 }, { "-w", N_("Suppress warnings"), N_OPTS, 1, CL_C | CL_CXX | CL_ObjC | CL_ObjCXX | CL_COMMON, &inhibit_warnings, 0, 0 } }; /* Alias analysis for GNU C Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by John Carr (jfc@mit.edu). This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* The alias sets assigned to MEMs assist the back-end in determining which MEMs can alias which other MEMs. In general, two MEMs in different alias sets cannot alias each other, with one important exception. Consider something like: struct S { int i; double d; }; a store to an `S' can alias something of either type `int' or type `double'. (However, a store to an `int' cannot alias a `double' and vice versa.) We indicate this via a tree structure that looks like: struct S / \ / \ |/_ _\| int double (The arrows are directed and point downwards.) In this situation we say the alias set for `struct S' is the `superset' and that those for `int' and `double' are `subsets'. To see whether two alias sets can point to the same memory, we must see if either alias set is a subset of the other. We need not trace past immediate descendants, however, since we propagate all grandchildren up one level. Alias set zero is implicitly a superset of all other alias sets. However, this is no actual entry for alias set zero. It is an error to attempt to explicitly construct a subset of zero. */ struct alias_set_entry GTY(()) { /* The alias set number, as stored in MEM_ALIAS_SET. */ HOST_WIDE_INT alias_set; /* The children of the alias set. These are not just the immediate children, but, in fact, all descendants. So, if we have: struct T { struct S s; float f; } continuing our example above, the children here will be all of `int', `double', `float', and `struct S'. */ splay_tree GTY((param1_is (int), param2_is (int))) children; /* Nonzero if would have a child of zero: this effectively makes this alias set the same as alias set zero. */ int has_zero_child; }; typedef struct alias_set_entry *alias_set_entry; static int rtx_equal_for_memref_p (rtx, rtx); static rtx find_symbolic_term (rtx); static int memrefs_conflict_p (int, rtx, int, rtx, HOST_WIDE_INT); static void record_set (rtx, rtx, void *); static int base_alias_check (rtx, rtx, enum machine_mode, enum machine_mode); static rtx find_base_value (rtx); static int mems_in_disjoint_alias_sets_p (rtx, rtx); static int insert_subset_children (splay_tree_node, void*); static tree find_base_decl (tree); static alias_set_entry get_alias_set_entry (HOST_WIDE_INT); static rtx fixed_scalar_and_varying_struct_p (rtx, rtx, rtx, rtx, int (*) (rtx, int)); static int aliases_everything_p (rtx); static bool nonoverlapping_component_refs_p (tree, tree); static tree decl_for_component_ref (tree); static rtx adjust_offset_for_component_ref (tree, rtx); static int nonoverlapping_memrefs_p (rtx, rtx); static int write_dependence_p (rtx, rtx, int, int); static int nonlocal_mentioned_p_1 (rtx *, void *); static int nonlocal_mentioned_p (rtx); static int nonlocal_referenced_p_1 (rtx *, void *); static int nonlocal_referenced_p (rtx); static int nonlocal_set_p_1 (rtx *, void *); static int nonlocal_set_p (rtx); static void memory_modified_1 (rtx, rtx, void *); /* Set up all info needed to perform alias analysis on memory references. */ /* Returns the size in bytes of the mode of X. */ #define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X))) /* Returns nonzero if MEM1 and MEM2 do not alias because they are in different alias sets. We ignore alias sets in functions making use of variable arguments because the va_arg macros on some systems are not legal ANSI C. */ #define DIFFERENT_ALIAS_SETS_P(MEM1, MEM2) \ mems_in_disjoint_alias_sets_p (MEM1, MEM2) /* Cap the number of passes we make over the insns propagating alias information through set chains. 10 is a completely arbitrary choice. */ #define MAX_ALIAS_LOOP_PASSES 10 /* reg_base_value[N] gives an address to which register N is related. If all sets after the first add or subtract to the current value or otherwise modify it so it does not point to a different top level object, reg_base_value[N] is equal to the address part of the source of the first set. A base address can be an ADDRESS, SYMBOL_REF, or LABEL_REF. ADDRESS expressions represent certain special values: function arguments and the stack, frame, and argument pointers. The contents of an ADDRESS is not normally used, the mode of the ADDRESS determines whether the ADDRESS is a function argument or some other special value. Pointer equality, not rtx_equal_p, determines whether two ADDRESS expressions refer to the same base address. The only use of the contents of an ADDRESS is for determining if the current function performs nonlocal memory memory references for the purposes of marking the function as a constant function. */ static GTY(()) varray_type reg_base_value; static rtx *new_reg_base_value; /* We preserve the copy of old array around to avoid amount of garbage produced. About 8% of garbage produced were attributed to this array. */ static GTY((deletable)) varray_type old_reg_base_value; /* Static hunks of RTL used by the aliasing code; these are initialized once per function to avoid unnecessary RTL allocations. */ static GTY (()) rtx static_reg_base_value[FIRST_PSEUDO_REGISTER]; #define REG_BASE_VALUE(X) \ (reg_base_value && REGNO (X) < VARRAY_SIZE (reg_base_value) \ ? VARRAY_RTX (reg_base_value, REGNO (X)) : 0) /* Vector of known invariant relationships between registers. Set in loop unrolling. Indexed by register number, if nonzero the value is an expression describing this register in terms of another. The length of this array is REG_BASE_VALUE_SIZE. Because this array contains only pseudo registers it has no effect after reload. */ static GTY((length("alias_invariant_size"))) rtx *alias_invariant; static GTY(()) unsigned int alias_invariant_size; /* Vector indexed by N giving the initial (unchanging) value known for pseudo-register N. This array is initialized in init_alias_analysis, and does not change until end_alias_analysis is called. */ static GTY((length("reg_known_value_size"))) rtx *reg_known_value; /* Indicates number of valid entries in reg_known_value. */ static GTY(()) unsigned int reg_known_value_size; /* Vector recording for each reg_known_value whether it is due to a REG_EQUIV note. Future passes (viz., reload) may replace the pseudo with the equivalent expression and so we account for the dependences that would be introduced if that happens. The REG_EQUIV notes created in assign_parms may mention the arg pointer, and there are explicit insns in the RTL that modify the arg pointer. Thus we must ensure that such insns don't get scheduled across each other because that would invalidate the REG_EQUIV notes. One could argue that the REG_EQUIV notes are wrong, but solving the problem in the scheduler will likely give better code, so we do it here. */ static bool *reg_known_equiv_p; /* True when scanning insns from the start of the rtl to the NOTE_INSN_FUNCTION_BEG note. */ static bool copying_arguments; /* The splay-tree used to store the various alias set entries. */ static GTY ((param_is (struct alias_set_entry))) varray_type alias_sets; /* Returns a pointer to the alias set entry for ALIAS_SET, if there is such an entry, or NULL otherwise. */ static inline alias_set_entry get_alias_set_entry (HOST_WIDE_INT alias_set) { return (alias_set_entry)VARRAY_GENERIC_PTR (alias_sets, alias_set); } /* Returns nonzero if the alias sets for MEM1 and MEM2 are such that the two MEMs cannot alias each other. */ static inline int mems_in_disjoint_alias_sets_p (rtx mem1, rtx mem2) { #ifdef ENABLE_CHECKING /* Perform a basic sanity check. Namely, that there are no alias sets if we're not using strict aliasing. This helps to catch bugs whereby someone uses PUT_CODE, but doesn't clear MEM_ALIAS_SET, or where a MEM is allocated in some way other than by the use of gen_rtx_MEM, and the MEM_ALIAS_SET is not cleared. If we begin to use alias sets to indicate that spilled registers cannot alias each other, we might need to remove this check. */ if (! flag_strict_aliasing && (MEM_ALIAS_SET (mem1) != 0 || MEM_ALIAS_SET (mem2) != 0)) abort (); #endif return ! alias_sets_conflict_p (MEM_ALIAS_SET (mem1), MEM_ALIAS_SET (mem2)); } /* Insert the NODE into the splay tree given by DATA. Used by record_alias_subset via splay_tree_foreach. */ static int insert_subset_children (splay_tree_node node, void *data) { splay_tree_insert ((splay_tree) data, node->key, node->value); return 0; } /* Return 1 if the two specified alias sets may conflict. */ int alias_sets_conflict_p (HOST_WIDE_INT set1, HOST_WIDE_INT set2) { alias_set_entry ase; /* If have no alias set information for one of the operands, we have to assume it can alias anything. */ if (set1 == 0 || set2 == 0 /* If the two alias sets are the same, they may alias. */ || set1 == set2) return 1; /* See if the first alias set is a subset of the second. */ ase = get_alias_set_entry (set1); if (ase != 0 && (ase->has_zero_child || splay_tree_lookup (ase->children, (splay_tree_key) set2))) return 1; /* Now do the same, but with the alias sets reversed. */ ase = get_alias_set_entry (set2); if (ase != 0 && (ase->has_zero_child || splay_tree_lookup (ase->children, (splay_tree_key) set1))) return 1; /* The two alias sets are distinct and neither one is the child of the other. Therefore, they cannot alias. */ return 0; } /* Return 1 if the two specified alias sets might conflict, or if any subtype of these alias sets might conflict. */ int alias_sets_might_conflict_p (HOST_WIDE_INT set1, HOST_WIDE_INT set2) { if (set1 == 0 || set2 == 0 || set1 == set2) return 1; return 0; } /* Return 1 if TYPE is a RECORD_TYPE, UNION_TYPE, or QUAL_UNION_TYPE and has has any readonly fields. If any of the fields have types that contain readonly fields, return true as well. */ int readonly_fields_p (tree type) { tree field; if (TREE_CODE (type) != RECORD_TYPE && TREE_CODE (type) != UNION_TYPE && TREE_CODE (type) != QUAL_UNION_TYPE) return 0; for (field = TYPE_FIELDS (type); field != 0; field = TREE_CHAIN (field)) if (TREE_CODE (field) == FIELD_DECL && (TREE_READONLY (field) || readonly_fields_p (TREE_TYPE (field)))) return 1; return 0; } /* Return 1 if any MEM object of type T1 will always conflict (using the dependency routines in this file) with any MEM object of type T2. This is used when allocating temporary storage. If T1 and/or T2 are NULL_TREE, it means we know nothing about the storage. */ int objects_must_conflict_p (tree t1, tree t2) { HOST_WIDE_INT set1, set2; /* If neither has a type specified, we don't know if they'll conflict because we may be using them to store objects of various types, for example the argument and local variables areas of inlined functions. */ if (t1 == 0 && t2 == 0) return 0; /* If one or the other has readonly fields or is readonly, then they may not conflict. */ if ((t1 != 0 && readonly_fields_p (t1)) || (t2 != 0 && readonly_fields_p (t2)) || (t1 != 0 && lang_hooks.honor_readonly && TYPE_READONLY (t1)) || (t2 != 0 && lang_hooks.honor_readonly && TYPE_READONLY (t2))) return 0; /* If they are the same type, they must conflict. */ if (t1 == t2 /* Likewise if both are volatile. */ || (t1 != 0 && TYPE_VOLATILE (t1) && t2 != 0 && TYPE_VOLATILE (t2))) return 1; set1 = t1 ? get_alias_set (t1) : 0; set2 = t2 ? get_alias_set (t2) : 0; /* Otherwise they conflict if they have no alias set or the same. We can't simply use alias_sets_conflict_p here, because we must make sure that every subtype of t1 will conflict with every subtype of t2 for which a pair of subobjects of these respective subtypes overlaps on the stack. */ return set1 == 0 || set2 == 0 || set1 == set2; } /* T is an expression with pointer type. Find the DECL on which this expression is based. (For example, in `a[i]' this would be `a'.) If there is no such DECL, or a unique decl cannot be determined, NULL_TREE is returned. */ static tree find_base_decl (tree t) { tree d0, d1, d2; if (t == 0 || t == error_mark_node || ! POINTER_TYPE_P (TREE_TYPE (t))) return 0; /* If this is a declaration, return it. */ if (TREE_CODE_CLASS (TREE_CODE (t)) == 'd') return t; /* Handle general expressions. It would be nice to deal with COMPONENT_REFs here. If we could tell that `a' and `b' were the same, then `a->f' and `b->f' are also the same. */ switch (TREE_CODE_CLASS (TREE_CODE (t))) { case '1': return find_base_decl (TREE_OPERAND (t, 0)); case '2': /* Return 0 if found in neither or both are the same. */ d0 = find_base_decl (TREE_OPERAND (t, 0)); d1 = find_base_decl (TREE_OPERAND (t, 1)); if (d0 == d1) return d0; else if (d0 == 0) return d1; else if (d1 == 0) return d0; else return 0; case '3': d0 = find_base_decl (TREE_OPERAND (t, 0)); d1 = find_base_decl (TREE_OPERAND (t, 1)); d2 = find_base_decl (TREE_OPERAND (t, 2)); /* Set any nonzero values from the last, then from the first. */ if (d1 == 0) d1 = d2; if (d0 == 0) d0 = d1; if (d1 == 0) d1 = d0; if (d2 == 0) d2 = d1; /* At this point all are nonzero or all are zero. If all three are the same, return it. Otherwise, return zero. */ return (d0 == d1 && d1 == d2) ? d0 : 0; default: return 0; } } /* Return 1 if all the nested component references handled by get_inner_reference in T are such that we can address the object in T. */ int can_address_p (tree t) { /* If we're at the end, it is vacuously addressable. */ if (! handled_component_p (t)) return 1; /* Bitfields are never addressable. */ else if (TREE_CODE (t) == BIT_FIELD_REF) return 0; /* Fields are addressable unless they are marked as nonaddressable or the containing type has alias set 0. */ else if (TREE_CODE (t) == COMPONENT_REF && ! DECL_NONADDRESSABLE_P (TREE_OPERAND (t, 1)) && get_alias_set (TREE_TYPE (TREE_OPERAND (t, 0))) != 0 && can_address_p (TREE_OPERAND (t, 0))) return 1; /* Likewise for arrays. */ else if ((TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF) && ! TYPE_NONALIASED_COMPONENT (TREE_TYPE (TREE_OPERAND (t, 0))) && get_alias_set (TREE_TYPE (TREE_OPERAND (t, 0))) != 0 && can_address_p (TREE_OPERAND (t, 0))) return 1; return 0; } /* Return the alias set for T, which may be either a type or an expression. Call language-specific routine for help, if needed. */ HOST_WIDE_INT get_alias_set (tree t) { HOST_WIDE_INT set; /* If we're not doing any alias analysis, just assume everything aliases everything else. Also return 0 if this or its type is an error. */ if (! flag_strict_aliasing || t == error_mark_node || (! TYPE_P (t) && (TREE_TYPE (t) == 0 || TREE_TYPE (t) == error_mark_node))) return 0; /* We can be passed either an expression or a type. This and the language-specific routine may make mutually-recursive calls to each other to figure out what to do. At each juncture, we see if this is a tree that the language may need to handle specially. First handle things that aren't types. */ if (! TYPE_P (t)) { tree inner = t; /* Remove any nops, then give the language a chance to do something with this tree before we look at it. */ STRIP_NOPS (t); set = lang_hooks.get_alias_set (t); if (set != -1) return set; /* First see if the actual object referenced is an INDIRECT_REF from a restrict-qualified pointer or a "void *". */ while (handled_component_p (inner)) { inner = TREE_OPERAND (inner, 0); STRIP_NOPS (inner); } /* Check for accesses through restrict-qualified pointers. */ if (TREE_CODE (inner) == INDIRECT_REF) { tree decl = find_base_decl (TREE_OPERAND (inner, 0)); if (decl && DECL_POINTER_ALIAS_SET_KNOWN_P (decl)) { /* If we haven't computed the actual alias set, do it now. */ if (DECL_POINTER_ALIAS_SET (decl) == -2) { tree pointed_to_type = TREE_TYPE (TREE_TYPE (decl)); /* No two restricted pointers can point at the same thing. However, a restricted pointer can point at the same thing as an unrestricted pointer, if that unrestricted pointer is based on the restricted pointer. So, we make the alias set for the restricted pointer a subset of the alias set for the type pointed to by the type of the decl. */ HOST_WIDE_INT pointed_to_alias_set = get_alias_set (pointed_to_type); if (pointed_to_alias_set == 0) /* It's not legal to make a subset of alias set zero. */ DECL_POINTER_ALIAS_SET (decl) = 0; else if (AGGREGATE_TYPE_P (pointed_to_type)) /* For an aggregate, we must treat the restricted pointer the same as an ordinary pointer. If we were to make the type pointed to by the restricted pointer a subset of the pointed-to type, then we would believe that other subsets of the pointed-to type (such as fields of that type) do not conflict with the type pointed to by the restricted pointer. */ DECL_POINTER_ALIAS_SET (decl) = pointed_to_alias_set; else { DECL_POINTER_ALIAS_SET (decl) = new_alias_set (); record_alias_subset (pointed_to_alias_set, DECL_POINTER_ALIAS_SET (decl)); } } /* We use the alias set indicated in the declaration. */ return DECL_POINTER_ALIAS_SET (decl); } /* If we have an INDIRECT_REF via a void pointer, we don't know anything about what that might alias. Likewise if the pointer is marked that way. */ else if (TREE_CODE (TREE_TYPE (inner)) == VOID_TYPE || (TYPE_REF_CAN_ALIAS_ALL (TREE_TYPE (TREE_OPERAND (inner, 0))))) return 0; } /* Otherwise, pick up the outermost object that we could have a pointer to, processing conversions as above. */ while (handled_component_p (t) && ! can_address_p (t)) { t = TREE_OPERAND (t, 0); STRIP_NOPS (t); } /* If we've already determined the alias set for a decl, just return it. This is necessary for C++ anonymous unions, whose component variables don't look like union members (boo!). */ if (TREE_CODE (t) == VAR_DECL && DECL_RTL_SET_P (t) && MEM_P (DECL_RTL (t))) return MEM_ALIAS_SET (DECL_RTL (t)); /* Now all we care about is the type. */ t = TREE_TYPE (t); } /* Variant qualifiers don't affect the alias set, so get the main variant. If this is a type with a known alias set, return it. */ t = TYPE_MAIN_VARIANT (t); if (TYPE_ALIAS_SET_KNOWN_P (t)) return TYPE_ALIAS_SET (t); /* See if the language has special handling for this type. */ set = lang_hooks.get_alias_set (t); if (set != -1) return set; /* There are no objects of FUNCTION_TYPE, so there's no point in using up an alias set for them. (There are, of course, pointers and references to functions, but that's different.) */ else if (TREE_CODE (t) == FUNCTION_TYPE) set = 0; /* Unless the language specifies otherwise, let vector types alias their components. This avoids some nasty type punning issues in normal usage. And indeed lets vectors be treated more like an array slice. */ else if (TREE_CODE (t) == VECTOR_TYPE) set = get_alias_set (TREE_TYPE (t)); else /* Otherwise make a new alias set for this type. */ set = new_alias_set (); TYPE_ALIAS_SET (t) = set; /* If this is an aggregate type, we must record any component aliasing information. */ if (AGGREGATE_TYPE_P (t) || TREE_CODE (t) == COMPLEX_TYPE) record_component_aliases (t); return set; } /* Return a brand-new alias set. */ static GTY(()) HOST_WIDE_INT last_alias_set; HOST_WIDE_INT new_alias_set (void) { if (flag_strict_aliasing) { if (!alias_sets) VARRAY_GENERIC_PTR_INIT (alias_sets, 10, "alias sets"); else VARRAY_GROW (alias_sets, last_alias_set + 2); return ++last_alias_set; } else return 0; } /* Indicate that things in SUBSET can alias things in SUPERSET, but that not everything that aliases SUPERSET also aliases SUBSET. For example, in C, a store to an `int' can alias a load of a structure containing an `int', and vice versa. But it can't alias a load of a 'double' member of the same structure. Here, the structure would be the SUPERSET and `int' the SUBSET. This relationship is also described in the comment at the beginning of this file. This function should be called only once per SUPERSET/SUBSET pair. It is illegal for SUPERSET to be zero; everything is implicitly a subset of alias set zero. */ void record_alias_subset (HOST_WIDE_INT superset, HOST_WIDE_INT subset) { alias_set_entry superset_entry; alias_set_entry subset_entry; /* It is possible in complex type situations for both sets to be the same, in which case we can ignore this operation. */ if (superset == subset) return; if (superset == 0) abort (); superset_entry = get_alias_set_entry (superset); if (superset_entry == 0) { /* Create an entry for the SUPERSET, so that we have a place to attach the SUBSET. */ superset_entry = ggc_alloc (sizeof (struct alias_set_entry)); superset_entry->alias_set = superset; superset_entry->children = splay_tree_new_ggc (splay_tree_compare_ints); superset_entry->has_zero_child = 0; VARRAY_GENERIC_PTR (alias_sets, superset) = superset_entry; } if (subset == 0) superset_entry->has_zero_child = 1; else { subset_entry = get_alias_set_entry (subset); /* If there is an entry for the subset, enter all of its children (if they are not already present) as children of the SUPERSET. */ if (subset_entry) { if (subset_entry->has_zero_child) superset_entry->has_zero_child = 1; splay_tree_foreach (subset_entry->children, insert_subset_children, superset_entry->children); } /* Enter the SUBSET itself as a child of the SUPERSET. */ splay_tree_insert (superset_entry->children, (splay_tree_key) subset, 0); } } /* Record that component types of TYPE, if any, are part of that type for aliasing purposes. For record types, we only record component types for fields that are marked addressable. For array types, we always record the component types, so the front end should not call this function if the individual component aren't addressable. */ void record_component_aliases (tree type) { HOST_WIDE_INT superset = get_alias_set (type); tree field; if (superset == 0) return; switch (TREE_CODE (type)) { case ARRAY_TYPE: if (! TYPE_NONALIASED_COMPONENT (type)) record_alias_subset (superset, get_alias_set (TREE_TYPE (type))); break; case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: /* Recursively record aliases for the base classes, if there are any. */ if (TYPE_BINFO (type) != NULL && TYPE_BINFO_BASETYPES (type) != NULL) { int i; for (i = 0; i < TREE_VEC_LENGTH (TYPE_BINFO_BASETYPES (type)); i++) { tree binfo = TREE_VEC_ELT (TYPE_BINFO_BASETYPES (type), i); record_alias_subset (superset, get_alias_set (BINFO_TYPE (binfo))); } } for (field = TYPE_FIELDS (type); field != 0; field = TREE_CHAIN (field)) if (TREE_CODE (field) == FIELD_DECL && ! DECL_NONADDRESSABLE_P (field)) record_alias_subset (superset, get_alias_set (TREE_TYPE (field))); break; case COMPLEX_TYPE: record_alias_subset (superset, get_alias_set (TREE_TYPE (type))); break; default: break; } } /* Allocate an alias set for use in storing and reading from the varargs spill area. */ static GTY(()) HOST_WIDE_INT varargs_set = -1; HOST_WIDE_INT get_varargs_alias_set (void) { #if 1 /* We now lower VA_ARG_EXPR, and there's currently no way to attach the varargs alias set to an INDIRECT_REF (FIXME!), so we can't consistently use the varargs alias set for loads from the varargs area. So don't use it anywhere. */ return 0; #else if (varargs_set == -1) varargs_set = new_alias_set (); return varargs_set; #endif } /* Likewise, but used for the fixed portions of the frame, e.g., register save areas. */ static GTY(()) HOST_WIDE_INT frame_set = -1; HOST_WIDE_INT get_frame_alias_set (void) { if (frame_set == -1) frame_set = new_alias_set (); return frame_set; } /* Inside SRC, the source of a SET, find a base address. */ static rtx find_base_value (rtx src) { unsigned int regno; switch (GET_CODE (src)) { case SYMBOL_REF: case LABEL_REF: return src; case REG: regno = REGNO (src); /* At the start of a function, argument registers have known base values which may be lost later. Returning an ADDRESS expression here allows optimization based on argument values even when the argument registers are used for other purposes. */ if (regno < FIRST_PSEUDO_REGISTER && copying_arguments) return new_reg_base_value[regno]; /* If a pseudo has a known base value, return it. Do not do this for non-fixed hard regs since it can result in a circular dependency chain for registers which have values at function entry. The test above is not sufficient because the scheduler may move a copy out of an arg reg past the NOTE_INSN_FUNCTION_BEGIN. */ if ((regno >= FIRST_PSEUDO_REGISTER || fixed_regs[regno]) && regno < VARRAY_SIZE (reg_base_value)) { /* If we're inside init_alias_analysis, use new_reg_base_value to reduce the number of relaxation iterations. */ if (new_reg_base_value && new_reg_base_value[regno] && REG_N_SETS (regno) == 1) return new_reg_base_value[regno]; if (VARRAY_RTX (reg_base_value, regno)) return VARRAY_RTX (reg_base_value, regno); } return 0; case MEM: /* Check for an argument passed in memory. Only record in the copying-arguments block; it is too hard to track changes otherwise. */ if (copying_arguments && (XEXP (src, 0) == arg_pointer_rtx || (GET_CODE (XEXP (src, 0)) == PLUS && XEXP (XEXP (src, 0), 0) == arg_pointer_rtx))) return gen_rtx_ADDRESS (VOIDmode, src); return 0; case CONST: src = XEXP (src, 0); if (GET_CODE (src) != PLUS && GET_CODE (src) != MINUS) break; /* ... fall through ... */ case PLUS: case MINUS: { rtx temp, src_0 = XEXP (src, 0), src_1 = XEXP (src, 1); /* If either operand is a REG that is a known pointer, then it is the base. */ if (REG_P (src_0) && REG_POINTER (src_0)) return find_base_value (src_0); if (REG_P (src_1) && REG_POINTER (src_1)) return find_base_value (src_1); /* If either operand is a REG, then see if we already have a known value for it. */ if (REG_P (src_0)) { temp = find_base_value (src_0); if (temp != 0) src_0 = temp; } if (REG_P (src_1)) { temp = find_base_value (src_1); if (temp!= 0) src_1 = temp; } /* If either base is named object or a special address (like an argument or stack reference), then use it for the base term. */ if (src_0 != 0 && (GET_CODE (src_0) == SYMBOL_REF || GET_CODE (src_0) == LABEL_REF || (GET_CODE (src_0) == ADDRESS && GET_MODE (src_0) != VOIDmode))) return src_0; if (src_1 != 0 && (GET_CODE (src_1) == SYMBOL_REF || GET_CODE (src_1) == LABEL_REF || (GET_CODE (src_1) == ADDRESS && GET_MODE (src_1) != VOIDmode))) return src_1; /* Guess which operand is the base address: If either operand is a symbol, then it is the base. If either operand is a CONST_INT, then the other is the base. */ if (GET_CODE (src_1) == CONST_INT || CONSTANT_P (src_0)) return find_base_value (src_0); else if (GET_CODE (src_0) == CONST_INT || CONSTANT_P (src_1)) return find_base_value (src_1); return 0; } case LO_SUM: /* The standard form is (lo_sum reg sym) so look only at the second operand. */ return find_base_value (XEXP (src, 1)); case AND: /* If the second operand is constant set the base address to the first operand. */ if (GET_CODE (XEXP (src, 1)) == CONST_INT && INTVAL (XEXP (src, 1)) != 0) return find_base_value (XEXP (src, 0)); return 0; case TRUNCATE: if (GET_MODE_SIZE (GET_MODE (src)) < GET_MODE_SIZE (Pmode)) break; /* Fall through. */ case HIGH: case PRE_INC: case PRE_DEC: case POST_INC: case POST_DEC: case PRE_MODIFY: case POST_MODIFY: return find_base_value (XEXP (src, 0)); case ZERO_EXTEND: case SIGN_EXTEND: /* used for NT/Alpha pointers */ { rtx temp = find_base_value (XEXP (src, 0)); if (temp != 0 && CONSTANT_P (temp)) temp = convert_memory_address (Pmode, temp); return temp; } default: break; } return 0; } /* Called from init_alias_analysis indirectly through note_stores. */ /* While scanning insns to find base values, reg_seen[N] is nonzero if register N has been set in this function. */ static char *reg_seen; /* Addresses which are known not to alias anything else are identified by a unique integer. */ static int unique_id; static void record_set (rtx dest, rtx set, void *data ATTRIBUTE_UNUSED) { unsigned regno; rtx src; int n; if (!REG_P (dest)) return; regno = REGNO (dest); if (regno >= VARRAY_SIZE (reg_base_value)) abort (); /* If this spans multiple hard registers, then we must indicate that every register has an unusable value. */ if (regno < FIRST_PSEUDO_REGISTER) n = hard_regno_nregs[regno][GET_MODE (dest)]; else n = 1; if (n != 1) { while (--n >= 0) { reg_seen[regno + n] = 1; new_reg_base_value[regno + n] = 0; } return; } if (set) { /* A CLOBBER wipes out any old value but does not prevent a previously unset register from acquiring a base address (i.e. reg_seen is not set). */ if (GET_CODE (set) == CLOBBER) { new_reg_base_value[regno] = 0; return; } src = SET_SRC (set); } else { if (reg_seen[regno]) { new_reg_base_value[regno] = 0; return; } reg_seen[regno] = 1; new_reg_base_value[regno] = gen_rtx_ADDRESS (Pmode, GEN_INT (unique_id++)); return; } /* If this is not the first set of REGNO, see whether the new value is related to the old one. There are two cases of interest: (1) The register might be assigned an entirely new value that has the same base term as the original set. (2) The set might be a simple self-modification that cannot change REGNO's base value. If neither case holds, reject the original base value as invalid. Note that the following situation is not detected: extern int x, y; int *p = &x; p += (&y-&x); ANSI C does not allow computing the difference of addresses of distinct top level objects. */ if (new_reg_base_value[regno] != 0 && find_base_value (src) != new_reg_base_value[regno]) switch (GET_CODE (src)) { case LO_SUM: case MINUS: if (XEXP (src, 0) != dest && XEXP (src, 1) != dest) new_reg_base_value[regno] = 0; break; case PLUS: /* If the value we add in the PLUS is also a valid base value, this might be the actual base value, and the original value an index. */ { rtx other = NULL_RTX; if (XEXP (src, 0) == dest) other = XEXP (src, 1); else if (XEXP (src, 1) == dest) other = XEXP (src, 0); if (! other || find_base_value (other)) new_reg_base_value[regno] = 0; break; } case AND: if (XEXP (src, 0) != dest || GET_CODE (XEXP (src, 1)) != CONST_INT) new_reg_base_value[regno] = 0; break; default: new_reg_base_value[regno] = 0; break; } /* If this is the first set of a register, record the value. */ else if ((regno >= FIRST_PSEUDO_REGISTER || ! fixed_regs[regno]) && ! reg_seen[regno] && new_reg_base_value[regno] == 0) new_reg_base_value[regno] = find_base_value (src); reg_seen[regno] = 1; } /* Called from loop optimization when a new pseudo-register is created. It indicates that REGNO is being set to VAL. f INVARIANT is true then this value also describes an invariant relationship which can be used to deduce that two registers with unknown values are different. */ void record_base_value (unsigned int regno, rtx val, int invariant) { if (invariant && alias_invariant && regno < alias_invariant_size) alias_invariant[regno] = val; if (regno >= VARRAY_SIZE (reg_base_value)) VARRAY_GROW (reg_base_value, max_reg_num ()); if (REG_P (val)) { VARRAY_RTX (reg_base_value, regno) = REG_BASE_VALUE (val); return; } VARRAY_RTX (reg_base_value, regno) = find_base_value (val); } /* Clear alias info for a register. This is used if an RTL transformation changes the value of a register. This is used in flow by AUTO_INC_DEC optimizations. We don't need to clear reg_base_value, since flow only changes the offset. */ void clear_reg_alias_info (rtx reg) { unsigned int regno = REGNO (reg); if (regno >= FIRST_PSEUDO_REGISTER) { regno -= FIRST_PSEUDO_REGISTER; if (regno < reg_known_value_size) { reg_known_value[regno] = reg; reg_known_equiv_p[regno] = false; } } } /* If a value is known for REGNO, return it. */ rtx get_reg_known_value (unsigned int regno) { if (regno >= FIRST_PSEUDO_REGISTER) { regno -= FIRST_PSEUDO_REGISTER; if (regno < reg_known_value_size) return reg_known_value[regno]; } return NULL; } /* Set it. */ static void set_reg_known_value (unsigned int regno, rtx val) { if (regno >= FIRST_PSEUDO_REGISTER) { regno -= FIRST_PSEUDO_REGISTER; if (regno < reg_known_value_size) reg_known_value[regno] = val; } } /* Similarly for reg_known_equiv_p. */ bool get_reg_known_equiv_p (unsigned int regno) { if (regno >= FIRST_PSEUDO_REGISTER) { regno -= FIRST_PSEUDO_REGISTER; if (regno < reg_known_value_size) return reg_known_equiv_p[regno]; } return false; } static void set_reg_known_equiv_p (unsigned int regno, bool val) { if (regno >= FIRST_PSEUDO_REGISTER) { regno -= FIRST_PSEUDO_REGISTER; if (regno < reg_known_value_size) reg_known_equiv_p[regno] = val; } } /* Returns a canonical version of X, from the point of view alias analysis. (For example, if X is a MEM whose address is a register, and the register has a known value (say a SYMBOL_REF), then a MEM whose address is the SYMBOL_REF is returned.) */ rtx canon_rtx (rtx x) { /* Recursively look for equivalences. */ if (REG_P (x) && REGNO (x) >= FIRST_PSEUDO_REGISTER) { rtx t = get_reg_known_value (REGNO (x)); if (t == x) return x; if (t) return canon_rtx (t); } if (GET_CODE (x) == PLUS) { rtx x0 = canon_rtx (XEXP (x, 0)); rtx x1 = canon_rtx (XEXP (x, 1)); if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1)) { if (GET_CODE (x0) == CONST_INT) return plus_constant (x1, INTVAL (x0)); else if (GET_CODE (x1) == CONST_INT) return plus_constant (x0, INTVAL (x1)); return gen_rtx_PLUS (GET_MODE (x), x0, x1); } } /* This gives us much better alias analysis when called from the loop optimizer. Note we want to leave the original MEM alone, but need to return the canonicalized MEM with all the flags with their original values. */ else if (MEM_P (x)) x = replace_equiv_address_nv (x, canon_rtx (XEXP (x, 0))); return x; } /* Return 1 if X and Y are identical-looking rtx's. Expect that X and Y has been already canonicalized. We use the data in reg_known_value above to see if two registers with different numbers are, in fact, equivalent. */ static int rtx_equal_for_memref_p (rtx x, rtx y) { int i; int j; enum rtx_code code; const char *fmt; if (x == 0 && y == 0) return 1; if (x == 0 || y == 0) return 0; if (x == y) return 1; code = GET_CODE (x); /* Rtx's of different codes cannot be equal. */ if (code != GET_CODE (y)) return 0; /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. (REG:SI x) and (REG:HI x) are NOT equivalent. */ if (GET_MODE (x) != GET_MODE (y)) return 0; /* Some RTL can be compared without a recursive examination. */ switch (code) { case REG: return REGNO (x) == REGNO (y); case LABEL_REF: return XEXP (x, 0) == XEXP (y, 0); case SYMBOL_REF: return XSTR (x, 0) == XSTR (y, 0); case VALUE: case CONST_INT: case CONST_DOUBLE: /* There's no need to compare the contents of CONST_DOUBLEs or CONST_INTs because pointer equality is a good enough comparison for these nodes. */ return 0; default: break; } /* canon_rtx knows how to handle plus. No need to canonicalize. */ if (code == PLUS) return ((rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0)) && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 1))) || (rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 1)) && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 0)))); /* For commutative operations, the RTX match if the operand match in any order. Also handle the simple binary and unary cases without a loop. */ if (COMMUTATIVE_P (x)) { rtx xop0 = canon_rtx (XEXP (x, 0)); rtx yop0 = canon_rtx (XEXP (y, 0)); rtx yop1 = canon_rtx (XEXP (y, 1)); return ((rtx_equal_for_memref_p (xop0, yop0) && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop1)) || (rtx_equal_for_memref_p (xop0, yop1) && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop0))); } else if (NON_COMMUTATIVE_P (x)) { return (rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)), canon_rtx (XEXP (y, 0))) && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), canon_rtx (XEXP (y, 1)))); } else if (UNARY_P (x)) return rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)), canon_rtx (XEXP (y, 0))); /* Compare the elements. If any pair of corresponding elements fail to match, return 0 for the whole things. Limit cases to types which actually appear in addresses. */ fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { switch (fmt[i]) { case 'i': if (XINT (x, i) != XINT (y, i)) return 0; break; case 'E': /* Two vectors must have the same length. */ if (XVECLEN (x, i) != XVECLEN (y, i)) return 0; /* And the corresponding elements must match. */ for (j = 0; j < XVECLEN (x, i); j++) if (rtx_equal_for_memref_p (canon_rtx (XVECEXP (x, i, j)), canon_rtx (XVECEXP (y, i, j))) == 0) return 0; break; case 'e': if (rtx_equal_for_memref_p (canon_rtx (XEXP (x, i)), canon_rtx (XEXP (y, i))) == 0) return 0; break; /* This can happen for asm operands. */ case 's': if (strcmp (XSTR (x, i), XSTR (y, i))) return 0; break; /* This can happen for an asm which clobbers memory. */ case '0': break; /* It is believed that rtx's at this level will never contain anything but integers and other rtx's, except for within LABEL_REFs and SYMBOL_REFs. */ default: abort (); } } return 1; } /* Given an rtx X, find a SYMBOL_REF or LABEL_REF within X and return it, or return 0 if none found. */ static rtx find_symbolic_term (rtx x) { int i; enum rtx_code code; const char *fmt; code = GET_CODE (x); if (code == SYMBOL_REF || code == LABEL_REF) return x; if (OBJECT_P (x)) return 0; fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { rtx t; if (fmt[i] == 'e') { t = find_symbolic_term (XEXP (x, i)); if (t != 0) return t; } else if (fmt[i] == 'E') break; } return 0; } rtx find_base_term (rtx x) { cselib_val *val; struct elt_loc_list *l; #if defined (FIND_BASE_TERM) /* Try machine-dependent ways to find the base term. */ x = FIND_BASE_TERM (x); #endif switch (GET_CODE (x)) { case REG: return REG_BASE_VALUE (x); case TRUNCATE: if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (Pmode)) return 0; /* Fall through. */ case HIGH: case PRE_INC: case PRE_DEC: case POST_INC: case POST_DEC: case PRE_MODIFY: case POST_MODIFY: return find_base_term (XEXP (x, 0)); case ZERO_EXTEND: case SIGN_EXTEND: /* Used for Alpha/NT pointers */ { rtx temp = find_base_term (XEXP (x, 0)); if (temp != 0 && CONSTANT_P (temp)) temp = convert_memory_address (Pmode, temp); return temp; } case VALUE: val = CSELIB_VAL_PTR (x); if (!val) return 0; for (l = val->locs; l; l = l->next) if ((x = find_base_term (l->loc)) != 0) return x; return 0; case CONST: x = XEXP (x, 0); if (GET_CODE (x) != PLUS && GET_CODE (x) != MINUS) return 0; /* Fall through. */ case LO_SUM: case PLUS: case MINUS: { rtx tmp1 = XEXP (x, 0); rtx tmp2 = XEXP (x, 1); /* This is a little bit tricky since we have to determine which of the two operands represents the real base address. Otherwise this routine may return the index register instead of the base register. That may cause us to believe no aliasing was possible, when in fact aliasing is possible. We use a few simple tests to guess the base register. Additional tests can certainly be added. For example, if one of the operands is a shift or multiply, then it must be the index register and the other operand is the base register. */ if (tmp1 == pic_offset_table_rtx && CONSTANT_P (tmp2)) return find_base_term (tmp2); /* If either operand is known to be a pointer, then use it to determine the base term. */ if (REG_P (tmp1) && REG_POINTER (tmp1)) return find_base_term (tmp1); if (REG_P (tmp2) && REG_POINTER (tmp2)) return find_base_term (tmp2); /* Neither operand was known to be a pointer. Go ahead and find the base term for both operands. */ tmp1 = find_base_term (tmp1); tmp2 = find_base_term (tmp2); /* If either base term is named object or a special address (like an argument or stack reference), then use it for the base term. */ if (tmp1 != 0 && (GET_CODE (tmp1) == SYMBOL_REF || GET_CODE (tmp1) == LABEL_REF || (GET_CODE (tmp1) == ADDRESS && GET_MODE (tmp1) != VOIDmode))) return tmp1; if (tmp2 != 0 && (GET_CODE (tmp2) == SYMBOL_REF || GET_CODE (tmp2) == LABEL_REF || (GET_CODE (tmp2) == ADDRESS && GET_MODE (tmp2) != VOIDmode))) return tmp2; /* We could not determine which of the two operands was the base register and which was the index. So we can determine nothing from the base alias check. */ return 0; } case AND: if (GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) != 0) return find_base_term (XEXP (x, 0)); return 0; case SYMBOL_REF: case LABEL_REF: return x; default: return 0; } } /* Return 0 if the addresses X and Y are known to point to different objects, 1 if they might be pointers to the same object. */ static int base_alias_check (rtx x, rtx y, enum machine_mode x_mode, enum machine_mode y_mode) { rtx x_base = find_base_term (x); rtx y_base = find_base_term (y); /* If the address itself has no known base see if a known equivalent value has one. If either address still has no known base, nothing is known about aliasing. */ if (x_base == 0) { rtx x_c; if (! flag_expensive_optimizations || (x_c = canon_rtx (x)) == x) return 1; x_base = find_base_term (x_c); if (x_base == 0) return 1; } if (y_base == 0) { rtx y_c; if (! flag_expensive_optimizations || (y_c = canon_rtx (y)) == y) return 1; y_base = find_base_term (y_c); if (y_base == 0) return 1; } /* If the base addresses are equal nothing is known about aliasing. */ if (rtx_equal_p (x_base, y_base)) return 1; /* The base addresses of the read and write are different expressions. If they are both symbols and they are not accessed via AND, there is no conflict. We can bring knowledge of object alignment into play here. For example, on alpha, "char a, b;" can alias one another, though "char a; long b;" cannot. */ if (GET_CODE (x_base) != ADDRESS && GET_CODE (y_base) != ADDRESS) { if (GET_CODE (x) == AND && GET_CODE (y) == AND) return 1; if (GET_CODE (x) == AND && (GET_CODE (XEXP (x, 1)) != CONST_INT || (int) GET_MODE_UNIT_SIZE (y_mode) < -INTVAL (XEXP (x, 1)))) return 1; if (GET_CODE (y) == AND && (GET_CODE (XEXP (y, 1)) != CONST_INT || (int) GET_MODE_UNIT_SIZE (x_mode) < -INTVAL (XEXP (y, 1)))) return 1; /* Differing symbols never alias. */ return 0; } /* If one address is a stack reference there can be no alias: stack references using different base registers do not alias, a stack reference can not alias a parameter, and a stack reference can not alias a global. */ if ((GET_CODE (x_base) == ADDRESS && GET_MODE (x_base) == Pmode) || (GET_CODE (y_base) == ADDRESS && GET_MODE (y_base) == Pmode)) return 0; if (! flag_argument_noalias) return 1; if (flag_argument_noalias > 1) return 0; /* Weak noalias assertion (arguments are distinct, but may match globals). */ return ! (GET_MODE (x_base) == VOIDmode && GET_MODE (y_base) == VOIDmode); } /* Convert the address X into something we can use. This is done by returning it unchanged unless it is a value; in the latter case we call cselib to get a more useful rtx. */ rtx get_addr (rtx x) { cselib_val *v; struct elt_loc_list *l; if (GET_CODE (x) != VALUE) return x; v = CSELIB_VAL_PTR (x); if (v) { for (l = v->locs; l; l = l->next) if (CONSTANT_P (l->loc)) return l->loc; for (l = v->locs; l; l = l->next) if (!REG_P (l->loc) && !MEM_P (l->loc)) return l->loc; if (v->locs) return v->locs->loc; } return x; } /* Return the address of the (N_REFS + 1)th memory reference to ADDR where SIZE is the size in bytes of the memory reference. If ADDR is not modified by the memory reference then ADDR is returned. */ rtx addr_side_effect_eval (rtx addr, int size, int n_refs) { int offset = 0; switch (GET_CODE (addr)) { case PRE_INC: offset = (n_refs + 1) * size; break; case PRE_DEC: offset = -(n_refs + 1) * size; break; case POST_INC: offset = n_refs * size; break; case POST_DEC: offset = -n_refs * size; break; default: return addr; } if (offset) addr = gen_rtx_PLUS (GET_MODE (addr), XEXP (addr, 0), GEN_INT (offset)); else addr = XEXP (addr, 0); addr = canon_rtx (addr); return addr; } /* Return nonzero if X and Y (memory addresses) could reference the same location in memory. C is an offset accumulator. When C is nonzero, we are testing aliases between X and Y + C. XSIZE is the size in bytes of the X reference, similarly YSIZE is the size in bytes for Y. Expect that canon_rtx has been already called for X and Y. If XSIZE or YSIZE is zero, we do not know the amount of memory being referenced (the reference was BLKmode), so make the most pessimistic assumptions. If XSIZE or YSIZE is negative, we may access memory outside the object being referenced as a side effect. This can happen when using AND to align memory references, as is done on the Alpha. Nice to notice that varying addresses cannot conflict with fp if no local variables had their addresses taken, but that's too hard now. */ static int memrefs_conflict_p (int xsize, rtx x, int ysize, rtx y, HOST_WIDE_INT c) { if (GET_CODE (x) == VALUE) x = get_addr (x); if (GET_CODE (y) == VALUE) y = get_addr (y); if (GET_CODE (x) == HIGH) x = XEXP (x, 0); else if (GET_CODE (x) == LO_SUM) x = XEXP (x, 1); else x = addr_side_effect_eval (x, xsize, 0); if (GET_CODE (y) == HIGH) y = XEXP (y, 0); else if (GET_CODE (y) == LO_SUM) y = XEXP (y, 1); else y = addr_side_effect_eval (y, ysize, 0); if (rtx_equal_for_memref_p (x, y)) { if (xsize <= 0 || ysize <= 0) return 1; if (c >= 0 && xsize > c) return 1; if (c < 0 && ysize+c > 0) return 1; return 0; } /* This code used to check for conflicts involving stack references and globals but the base address alias code now handles these cases. */ if (GET_CODE (x) == PLUS) { /* The fact that X is canonicalized means that this PLUS rtx is canonicalized. */ rtx x0 = XEXP (x, 0); rtx x1 = XEXP (x, 1); if (GET_CODE (y) == PLUS) { /* The fact that Y is canonicalized means that this PLUS rtx is canonicalized. */ rtx y0 = XEXP (y, 0); rtx y1 = XEXP (y, 1); if (rtx_equal_for_memref_p (x1, y1)) return memrefs_conflict_p (xsize, x0, ysize, y0, c); if (rtx_equal_for_memref_p (x0, y0)) return memrefs_conflict_p (xsize, x1, ysize, y1, c); if (GET_CODE (x1) == CONST_INT) { if (GET_CODE (y1) == CONST_INT) return memrefs_conflict_p (xsize, x0, ysize, y0, c - INTVAL (x1) + INTVAL (y1)); else return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1)); } else if (GET_CODE (y1) == CONST_INT) return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1)); return 1; } else if (GET_CODE (x1) == CONST_INT) return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1)); } else if (GET_CODE (y) == PLUS) { /* The fact that Y is canonicalized means that this PLUS rtx is canonicalized. */ rtx y0 = XEXP (y, 0); rtx y1 = XEXP (y, 1); if (GET_CODE (y1) == CONST_INT) return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1)); else return 1; } if (GET_CODE (x) == GET_CODE (y)) switch (GET_CODE (x)) { case MULT: { /* Handle cases where we expect the second operands to be the same, and check only whether the first operand would conflict or not. */ rtx x0, y0; rtx x1 = canon_rtx (XEXP (x, 1)); rtx y1 = canon_rtx (XEXP (y, 1)); if (! rtx_equal_for_memref_p (x1, y1)) return 1; x0 = canon_rtx (XEXP (x, 0)); y0 = canon_rtx (XEXP (y, 0)); if (rtx_equal_for_memref_p (x0, y0)) return (xsize == 0 || ysize == 0 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0)); /* Can't properly adjust our sizes. */ if (GET_CODE (x1) != CONST_INT) return 1; xsize /= INTVAL (x1); ysize /= INTVAL (x1); c /= INTVAL (x1); return memrefs_conflict_p (xsize, x0, ysize, y0, c); } case REG: /* Are these registers known not to be equal? */ if (alias_invariant) { unsigned int r_x = REGNO (x), r_y = REGNO (y); rtx i_x, i_y; /* invariant relationships of X and Y */ i_x = r_x >= alias_invariant_size ? 0 : alias_invariant[r_x]; i_y = r_y >= alias_invariant_size ? 0 : alias_invariant[r_y]; if (i_x == 0 && i_y == 0) break; if (! memrefs_conflict_p (xsize, i_x ? i_x : x, ysize, i_y ? i_y : y, c)) return 0; } break; default: break; } /* Treat an access through an AND (e.g. a subword access on an Alpha) as an access with indeterminate size. Assume that references besides AND are aligned, so if the size of the other reference is at least as large as the alignment, assume no other overlap. */ if (GET_CODE (x) == AND && GET_CODE (XEXP (x, 1)) == CONST_INT) { if (GET_CODE (y) == AND || ysize < -INTVAL (XEXP (x, 1))) xsize = -1; return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)), ysize, y, c); } if (GET_CODE (y) == AND && GET_CODE (XEXP (y, 1)) == CONST_INT) { /* ??? If we are indexing far enough into the array/structure, we may yet be able to determine that we can not overlap. But we also need to that we are far enough from the end not to overlap a following reference, so we do nothing with that for now. */ if (GET_CODE (x) == AND || xsize < -INTVAL (XEXP (y, 1))) ysize = -1; return memrefs_conflict_p (xsize, x, ysize, canon_rtx (XEXP (y, 0)), c); } if (CONSTANT_P (x)) { if (GET_CODE (x) == CONST_INT && GET_CODE (y) == CONST_INT) { c += (INTVAL (y) - INTVAL (x)); return (xsize <= 0 || ysize <= 0 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0)); } if (GET_CODE (x) == CONST) { if (GET_CODE (y) == CONST) return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)), ysize, canon_rtx (XEXP (y, 0)), c); else return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)), ysize, y, c); } if (GET_CODE (y) == CONST) return memrefs_conflict_p (xsize, x, ysize, canon_rtx (XEXP (y, 0)), c); if (CONSTANT_P (y)) return (xsize <= 0 || ysize <= 0 || (rtx_equal_for_memref_p (x, y) && ((c >= 0 && xsize > c) || (c < 0 && ysize+c > 0)))); return 1; } return 1; } /* Functions to compute memory dependencies. Since we process the insns in execution order, we can build tables to keep track of what registers are fixed (and not aliased), what registers are varying in known ways, and what registers are varying in unknown ways. If both memory references are volatile, then there must always be a dependence between the two references, since their order can not be changed. A volatile and non-volatile reference can be interchanged though. A MEM_IN_STRUCT reference at a non-AND varying address can never conflict with a non-MEM_IN_STRUCT reference at a fixed address. We also must allow AND addresses, because they may generate accesses outside the object being referenced. This is used to generate aligned addresses from unaligned addresses, for instance, the alpha storeqi_unaligned pattern. */ /* Read dependence: X is read after read in MEM takes place. There can only be a dependence here if both reads are volatile. */ int read_dependence (rtx mem, rtx x) { return MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem); } /* Returns MEM1 if and only if MEM1 is a scalar at a fixed address and MEM2 is a reference to a structure at a varying address, or returns MEM2 if vice versa. Otherwise, returns NULL_RTX. If a non-NULL value is returned MEM1 and MEM2 can never alias. VARIES_P is used to decide whether or not an address may vary; it should return nonzero whenever variation is possible. MEM1_ADDR and MEM2_ADDR are the addresses of MEM1 and MEM2. */ static rtx fixed_scalar_and_varying_struct_p (rtx mem1, rtx mem2, rtx mem1_addr, rtx mem2_addr, int (*varies_p) (rtx, int)) { if (! flag_strict_aliasing) return NULL_RTX; if (MEM_SCALAR_P (mem1) && MEM_IN_STRUCT_P (mem2) && !varies_p (mem1_addr, 1) && varies_p (mem2_addr, 1)) /* MEM1 is a scalar at a fixed address; MEM2 is a struct at a varying address. */ return mem1; if (MEM_IN_STRUCT_P (mem1) && MEM_SCALAR_P (mem2) && varies_p (mem1_addr, 1) && !varies_p (mem2_addr, 1)) /* MEM2 is a scalar at a fixed address; MEM1 is a struct at a varying address. */ return mem2; return NULL_RTX; } /* Returns nonzero if something about the mode or address format MEM1 indicates that it might well alias *anything*. */ static int aliases_everything_p (rtx mem) { if (GET_CODE (XEXP (mem, 0)) == AND) /* If the address is an AND, its very hard to know at what it is actually pointing. */ return 1; return 0; } /* Return true if we can determine that the fields referenced cannot overlap for any pair of objects. */ static bool nonoverlapping_component_refs_p (tree x, tree y) { tree fieldx, fieldy, typex, typey, orig_y; do { /* The comparison has to be done at a common type, since we don't know how the inheritance hierarchy works. */ orig_y = y; do { fieldx = TREE_OPERAND (x, 1); typex = DECL_FIELD_CONTEXT (fieldx); y = orig_y; do { fieldy = TREE_OPERAND (y, 1); typey = DECL_FIELD_CONTEXT (fieldy); if (typex == typey) goto found; y = TREE_OPERAND (y, 0); } while (y && TREE_CODE (y) == COMPONENT_REF); x = TREE_OPERAND (x, 0); } while (x && TREE_CODE (x) == COMPONENT_REF); /* Never found a common type. */ return false; found: /* If we're left with accessing different fields of a structure, then no overlap. */ if (TREE_CODE (typex) == RECORD_TYPE && fieldx != fieldy) return true; /* The comparison on the current field failed. If we're accessing a very nested structure, look at the next outer level. */ x = TREE_OPERAND (x, 0); y = TREE_OPERAND (y, 0); } while (x && y && TREE_CODE (x) == COMPONENT_REF && TREE_CODE (y) == COMPONENT_REF); return false; } /* Look at the bottom of the COMPONENT_REF list for a DECL, and return it. */ static tree decl_for_component_ref (tree x) { do { x = TREE_OPERAND (x, 0); } while (x && TREE_CODE (x) == COMPONENT_REF); return x && DECL_P (x) ? x : NULL_TREE; } /* Walk up the COMPONENT_REF list and adjust OFFSET to compensate for the offset of the field reference. */ static rtx adjust_offset_for_component_ref (tree x, rtx offset) { HOST_WIDE_INT ioffset; if (! offset) return NULL_RTX; ioffset = INTVAL (offset); do { tree offset = component_ref_field_offset (x); tree field = TREE_OPERAND (x, 1); if (! host_integerp (offset, 1)) return NULL_RTX; ioffset += (tree_low_cst (offset, 1) + (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1) / BITS_PER_UNIT)); x = TREE_OPERAND (x, 0); } while (x && TREE_CODE (x) == COMPONENT_REF); return GEN_INT (ioffset); } /* Return nonzero if we can determine the exprs corresponding to memrefs X and Y and they do not overlap. */ static int nonoverlapping_memrefs_p (rtx x, rtx y) { tree exprx = MEM_EXPR (x), expry = MEM_EXPR (y); rtx rtlx, rtly; rtx basex, basey; rtx moffsetx, moffsety; HOST_WIDE_INT offsetx = 0, offsety = 0, sizex, sizey, tem; /* Unless both have exprs, we can't tell anything. */ if (exprx == 0 || expry == 0) return 0; /* If both are field references, we may be able to determine something. */ if (TREE_CODE (exprx) == COMPONENT_REF && TREE_CODE (expry) == COMPONENT_REF && nonoverlapping_component_refs_p (exprx, expry)) return 1; /* If the field reference test failed, look at the DECLs involved. */ moffsetx = MEM_OFFSET (x); if (TREE_CODE (exprx) == COMPONENT_REF) { tree t = decl_for_component_ref (exprx); if (! t) return 0; moffsetx = adjust_offset_for_component_ref (exprx, moffsetx); exprx = t; } else if (TREE_CODE (exprx) == INDIRECT_REF) { exprx = TREE_OPERAND (exprx, 0); if (flag_argument_noalias < 2 || TREE_CODE (exprx) != PARM_DECL) return 0; } moffsety = MEM_OFFSET (y); if (TREE_CODE (expry) == COMPONENT_REF) { tree t = decl_for_component_ref (expry); if (! t) return 0; moffsety = adjust_offset_for_component_ref (expry, moffsety); expry = t; } else if (TREE_CODE (expry) == INDIRECT_REF) { expry = TREE_OPERAND (expry, 0); if (flag_argument_noalias < 2 || TREE_CODE (expry) != PARM_DECL) return 0; } if (! DECL_P (exprx) || ! DECL_P (expry)) return 0; rtlx = DECL_RTL (exprx); rtly = DECL_RTL (expry); /* If either RTL is not a MEM, it must be a REG or CONCAT, meaning they can't overlap unless they are the same because we never reuse that part of the stack frame used for locals for spilled pseudos. */ if ((!MEM_P (rtlx) || !MEM_P (rtly)) && ! rtx_equal_p (rtlx, rtly)) return 1; /* Get the base and offsets of both decls. If either is a register, we know both are and are the same, so use that as the base. The only we can avoid overlap is if we can deduce that they are nonoverlapping pieces of that decl, which is very rare. */ basex = MEM_P (rtlx) ? XEXP (rtlx, 0) : rtlx; if (GET_CODE (basex) == PLUS && GET_CODE (XEXP (basex, 1)) == CONST_INT) offsetx = INTVAL (XEXP (basex, 1)), basex = XEXP (basex, 0); basey = MEM_P (rtly) ? XEXP (rtly, 0) : rtly; if (GET_CODE (basey) == PLUS && GET_CODE (XEXP (basey, 1)) == CONST_INT) offsety = INTVAL (XEXP (basey, 1)), basey = XEXP (basey, 0); /* If the bases are different, we know they do not overlap if both are constants or if one is a constant and the other a pointer into the stack frame. Otherwise a different base means we can't tell if they overlap or not. */ if (! rtx_equal_p (basex, basey)) return ((CONSTANT_P (basex) && CONSTANT_P (basey)) || (CONSTANT_P (basex) && REG_P (basey) && REGNO_PTR_FRAME_P (REGNO (basey))) || (CONSTANT_P (basey) && REG_P (basex) && REGNO_PTR_FRAME_P (REGNO (basex)))); sizex = (!MEM_P (rtlx) ? (int) GET_MODE_SIZE (GET_MODE (rtlx)) : MEM_SIZE (rtlx) ? INTVAL (MEM_SIZE (rtlx)) : -1); sizey = (!MEM_P (rtly) ? (int) GET_MODE_SIZE (GET_MODE (rtly)) : MEM_SIZE (rtly) ? INTVAL (MEM_SIZE (rtly)) : -1); /* If we have an offset for either memref, it can update the values computed above. */ if (moffsetx) offsetx += INTVAL (moffsetx), sizex -= INTVAL (moffsetx); if (moffsety) offsety += INTVAL (moffsety), sizey -= INTVAL (moffsety); /* If a memref has both a size and an offset, we can use the smaller size. We can't do this if the offset isn't known because we must view this memref as being anywhere inside the DECL's MEM. */ if (MEM_SIZE (x) && moffsetx) sizex = INTVAL (MEM_SIZE (x)); if (MEM_SIZE (y) && moffsety) sizey = INTVAL (MEM_SIZE (y)); /* Put the values of the memref with the lower offset in X's values. */ if (offsetx > offsety) { tem = offsetx, offsetx = offsety, offsety = tem; tem = sizex, sizex = sizey, sizey = tem; } /* If we don't know the size of the lower-offset value, we can't tell if they conflict. Otherwise, we do the test. */ return sizex >= 0 && offsety >= offsetx + sizex; } /* True dependence: X is read after store in MEM takes place. */ int true_dependence (rtx mem, enum machine_mode mem_mode, rtx x, int (*varies) (rtx, int)) { rtx x_addr, mem_addr; rtx base; if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem)) return 1; /* (mem:BLK (scratch)) is a special mechanism to conflict with everything. This is used in epilogue deallocation functions. */ if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH) return 1; if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH) return 1; if (DIFFERENT_ALIAS_SETS_P (x, mem)) return 0; /* Unchanging memory can't conflict with non-unchanging memory. A non-unchanging read can conflict with a non-unchanging write. An unchanging read can conflict with an unchanging write since there may be a single store to this address to initialize it. Note that an unchanging store can conflict with a non-unchanging read since we have to make conservative assumptions when we have a record with readonly fields and we are copying the whole thing. Just fall through to the code below to resolve potential conflicts. This won't handle all cases optimally, but the possible performance loss should be negligible. */ if (RTX_UNCHANGING_P (x) && ! RTX_UNCHANGING_P (mem)) return 0; if (nonoverlapping_memrefs_p (mem, x)) return 0; if (mem_mode == VOIDmode) mem_mode = GET_MODE (mem); x_addr = get_addr (XEXP (x, 0)); mem_addr = get_addr (XEXP (mem, 0)); base = find_base_term (x_addr); if (base && (GET_CODE (base) == LABEL_REF || (GET_CODE (base) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (base)))) return 0; if (! base_alias_check (x_addr, mem_addr, GET_MODE (x), mem_mode)) return 0; x_addr = canon_rtx (x_addr); mem_addr = canon_rtx (mem_addr); if (! memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr, SIZE_FOR_MODE (x), x_addr, 0)) return 0; if (aliases_everything_p (x)) return 1; /* We cannot use aliases_everything_p to test MEM, since we must look at MEM_MODE, rather than GET_MODE (MEM). */ if (mem_mode == QImode || GET_CODE (mem_addr) == AND) return 1; /* In true_dependence we also allow BLKmode to alias anything. Why don't we do this in anti_dependence and output_dependence? */ if (mem_mode == BLKmode || GET_MODE (x) == BLKmode) return 1; return ! fixed_scalar_and_varying_struct_p (mem, x, mem_addr, x_addr, varies); } /* Canonical true dependence: X is read after store in MEM takes place. Variant of true_dependence which assumes MEM has already been canonicalized (hence we no longer do that here). The mem_addr argument has been added, since true_dependence computed this value prior to canonicalizing. */ int canon_true_dependence (rtx mem, enum machine_mode mem_mode, rtx mem_addr, rtx x, int (*varies) (rtx, int)) { rtx x_addr; if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem)) return 1; /* (mem:BLK (scratch)) is a special mechanism to conflict with everything. This is used in epilogue deallocation functions. */ if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH) return 1; if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH) return 1; if (DIFFERENT_ALIAS_SETS_P (x, mem)) return 0; /* If X is an unchanging read, then it can't possibly conflict with any non-unchanging store. It may conflict with an unchanging write though, because there may be a single store to this address to initialize it. Just fall through to the code below to resolve the case where we have both an unchanging read and an unchanging write. This won't handle all cases optimally, but the possible performance loss should be negligible. */ if (RTX_UNCHANGING_P (x) && ! RTX_UNCHANGING_P (mem)) return 0; if (nonoverlapping_memrefs_p (x, mem)) return 0; x_addr = get_addr (XEXP (x, 0)); if (! base_alias_check (x_addr, mem_addr, GET_MODE (x), mem_mode)) return 0; x_addr = canon_rtx (x_addr); if (! memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr, SIZE_FOR_MODE (x), x_addr, 0)) return 0; if (aliases_everything_p (x)) return 1; /* We cannot use aliases_everything_p to test MEM, since we must look at MEM_MODE, rather than GET_MODE (MEM). */ if (mem_mode == QImode || GET_CODE (mem_addr) == AND) return 1; /* In true_dependence we also allow BLKmode to alias anything. Why don't we do this in anti_dependence and output_dependence? */ if (mem_mode == BLKmode || GET_MODE (x) == BLKmode) return 1; return ! fixed_scalar_and_varying_struct_p (mem, x, mem_addr, x_addr, varies); } /* Returns nonzero if a write to X might alias a previous read from (or, if WRITEP is nonzero, a write to) MEM. If CONSTP is nonzero, honor the RTX_UNCHANGING_P flags on X and MEM. */ static int write_dependence_p (rtx mem, rtx x, int writep, int constp) { rtx x_addr, mem_addr; rtx fixed_scalar; rtx base; if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem)) return 1; /* (mem:BLK (scratch)) is a special mechanism to conflict with everything. This is used in epilogue deallocation functions. */ if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH) return 1; if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH) return 1; if (DIFFERENT_ALIAS_SETS_P (x, mem)) return 0; if (constp) { /* Unchanging memory can't conflict with non-unchanging memory. */ if (RTX_UNCHANGING_P (x) != RTX_UNCHANGING_P (mem)) return 0; /* If MEM is an unchanging read, then it can't possibly conflict with the store to X, because there is at most one store to MEM, and it must have occurred somewhere before MEM. */ if (! writep && RTX_UNCHANGING_P (mem)) return 0; } if (nonoverlapping_memrefs_p (x, mem)) return 0; x_addr = get_addr (XEXP (x, 0)); mem_addr = get_addr (XEXP (mem, 0)); if (! writep) { base = find_base_term (mem_addr); if (base && (GET_CODE (base) == LABEL_REF || (GET_CODE (base) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (base)))) return 0; } if (! base_alias_check (x_addr, mem_addr, GET_MODE (x), GET_MODE (mem))) return 0; x_addr = canon_rtx (x_addr); mem_addr = canon_rtx (mem_addr); if (!memrefs_conflict_p (SIZE_FOR_MODE (mem), mem_addr, SIZE_FOR_MODE (x), x_addr, 0)) return 0; fixed_scalar = fixed_scalar_and_varying_struct_p (mem, x, mem_addr, x_addr, rtx_addr_varies_p); return (!(fixed_scalar == mem && !aliases_everything_p (x)) && !(fixed_scalar == x && !aliases_everything_p (mem))); } /* Anti dependence: X is written after read in MEM takes place. */ int anti_dependence (rtx mem, rtx x) { return write_dependence_p (mem, x, /*writep=*/0, /*constp*/1); } /* Output dependence: X is written after store in MEM takes place. */ int output_dependence (rtx mem, rtx x) { return write_dependence_p (mem, x, /*writep=*/1, /*constp*/1); } /* Unchanging anti dependence: Like anti_dependence but ignores the UNCHANGING_RTX_P property on const variable references. */ int unchanging_anti_dependence (rtx mem, rtx x) { return write_dependence_p (mem, x, /*writep=*/0, /*constp*/0); } /* A subroutine of nonlocal_mentioned_p, returns 1 if *LOC mentions something which is not local to the function and is not constant. */ static int nonlocal_mentioned_p_1 (rtx *loc, void *data ATTRIBUTE_UNUSED) { rtx x = *loc; rtx base; int regno; if (! x) return 0; switch (GET_CODE (x)) { case SUBREG: if (REG_P (SUBREG_REG (x))) { /* Global registers are not local. */ if (REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER && global_regs[subreg_regno (x)]) return 1; return 0; } break; case REG: regno = REGNO (x); /* Global registers are not local. */ if (regno < FIRST_PSEUDO_REGISTER && global_regs[regno]) return 1; return 0; case SCRATCH: case PC: case CC0: case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case CONST: case LABEL_REF: return 0; case SYMBOL_REF: /* Constants in the function's constants pool are constant. */ if (CONSTANT_POOL_ADDRESS_P (x)) return 0; return 1; case CALL: /* Non-constant calls and recursion are not local. */ return 1; case MEM: /* Be overly conservative and consider any volatile memory reference as not local. */ if (MEM_VOLATILE_P (x)) return 1; base = find_base_term (XEXP (x, 0)); if (base) { /* A Pmode ADDRESS could be a reference via the structure value address or static chain. Such memory references are nonlocal. Thus, we have to examine the contents of the ADDRESS to find out if this is a local reference or not. */ if (GET_CODE (base) == ADDRESS && GET_MODE (base) == Pmode && (XEXP (base, 0) == stack_pointer_rtx || XEXP (base, 0) == arg_pointer_rtx #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM || XEXP (base, 0) == hard_frame_pointer_rtx #endif || XEXP (base, 0) == frame_pointer_rtx)) return 0; /* Constants in the function's constant pool are constant. */ if (GET_CODE (base) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (base)) return 0; } return 1; case UNSPEC_VOLATILE: case ASM_INPUT: return 1; case ASM_OPERANDS: if (MEM_VOLATILE_P (x)) return 1; /* Fall through. */ default: break; } return 0; } /* Returns nonzero if X might mention something which is not local to the function and is not constant. */ static int nonlocal_mentioned_p (rtx x) { if (INSN_P (x)) { if (GET_CODE (x) == CALL_INSN) { if (! CONST_OR_PURE_CALL_P (x)) return 1; x = CALL_INSN_FUNCTION_USAGE (x); if (x == 0) return 0; } else x = PATTERN (x); } return for_each_rtx (&x, nonlocal_mentioned_p_1, NULL); } /* A subroutine of nonlocal_referenced_p, returns 1 if *LOC references something which is not local to the function and is not constant. */ static int nonlocal_referenced_p_1 (rtx *loc, void *data ATTRIBUTE_UNUSED) { rtx x = *loc; if (! x) return 0; switch (GET_CODE (x)) { case MEM: case REG: case SYMBOL_REF: case SUBREG: return nonlocal_mentioned_p (x); case CALL: /* Non-constant calls and recursion are not local. */ return 1; case SET: if (nonlocal_mentioned_p (SET_SRC (x))) return 1; if (MEM_P (SET_DEST (x))) return nonlocal_mentioned_p (XEXP (SET_DEST (x), 0)); /* If the destination is anything other than a CC0, PC, MEM, REG, or a SUBREG of a REG that occupies all of the REG, then X references nonlocal memory if it is mentioned in the destination. */ if (GET_CODE (SET_DEST (x)) != CC0 && GET_CODE (SET_DEST (x)) != PC && !REG_P (SET_DEST (x)) && ! (GET_CODE (SET_DEST (x)) == SUBREG && REG_P (SUBREG_REG (SET_DEST (x))) && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x)))) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD) == ((GET_MODE_SIZE (GET_MODE (SET_DEST (x))) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))) return nonlocal_mentioned_p (SET_DEST (x)); return 0; case CLOBBER: if (MEM_P (XEXP (x, 0))) return nonlocal_mentioned_p (XEXP (XEXP (x, 0), 0)); return 0; case USE: return nonlocal_mentioned_p (XEXP (x, 0)); case ASM_INPUT: case UNSPEC_VOLATILE: return 1; case ASM_OPERANDS: if (MEM_VOLATILE_P (x)) return 1; /* Fall through. */ default: break; } return 0; } /* Returns nonzero if X might reference something which is not local to the function and is not constant. */ static int nonlocal_referenced_p (rtx x) { if (INSN_P (x)) { if (GET_CODE (x) == CALL_INSN) { if (! CONST_OR_PURE_CALL_P (x)) return 1; x = CALL_INSN_FUNCTION_USAGE (x); if (x == 0) return 0; } else x = PATTERN (x); } return for_each_rtx (&x, nonlocal_referenced_p_1, NULL); } /* A subroutine of nonlocal_set_p, returns 1 if *LOC sets something which is not local to the function and is not constant. */ static int nonlocal_set_p_1 (rtx *loc, void *data ATTRIBUTE_UNUSED) { rtx x = *loc; if (! x) return 0; switch (GET_CODE (x)) { case CALL: /* Non-constant calls and recursion are not local. */ return 1; case PRE_INC: case PRE_DEC: case POST_INC: case POST_DEC: case PRE_MODIFY: case POST_MODIFY: return nonlocal_mentioned_p (XEXP (x, 0)); case SET: if (nonlocal_mentioned_p (SET_DEST (x))) return 1; return nonlocal_set_p (SET_SRC (x)); case CLOBBER: return nonlocal_mentioned_p (XEXP (x, 0)); case USE: return 0; case ASM_INPUT: case UNSPEC_VOLATILE: return 1; case ASM_OPERANDS: if (MEM_VOLATILE_P (x)) return 1; /* Fall through. */ default: break; } return 0; } /* Returns nonzero if X might set something which is not local to the function and is not constant. */ static int nonlocal_set_p (rtx x) { if (INSN_P (x)) { if (GET_CODE (x) == CALL_INSN) { if (! CONST_OR_PURE_CALL_P (x)) return 1; x = CALL_INSN_FUNCTION_USAGE (x); if (x == 0) return 0; } else x = PATTERN (x); } return for_each_rtx (&x, nonlocal_set_p_1, NULL); } /* Mark the function if it is pure or constant. */ void mark_constant_function (void) { rtx insn; int nonlocal_memory_referenced; if (TREE_READONLY (current_function_decl) || DECL_IS_PURE (current_function_decl) || TREE_THIS_VOLATILE (current_function_decl) || current_function_has_nonlocal_goto || !targetm.binds_local_p (current_function_decl)) return; /* A loop might not return which counts as a side effect. */ if (mark_dfs_back_edges ()) return; nonlocal_memory_referenced = 0; init_alias_analysis (); /* Determine if this is a constant or pure function. */ for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) { if (! INSN_P (insn)) continue; if (nonlocal_set_p (insn) || global_reg_mentioned_p (insn) || volatile_refs_p (PATTERN (insn))) break; if (! nonlocal_memory_referenced) nonlocal_memory_referenced = nonlocal_referenced_p (insn); } end_alias_analysis (); /* Mark the function. */ if (insn) ; else if (nonlocal_memory_referenced) { cgraph_rtl_info (current_function_decl)->pure_function = 1; DECL_IS_PURE (current_function_decl) = 1; } else { cgraph_rtl_info (current_function_decl)->const_function = 1; TREE_READONLY (current_function_decl) = 1; } } void init_alias_once (void) { int i; for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) /* Check whether this register can hold an incoming pointer argument. FUNCTION_ARG_REGNO_P tests outgoing register numbers, so translate if necessary due to register windows. */ if (FUNCTION_ARG_REGNO_P (OUTGOING_REGNO (i)) && HARD_REGNO_MODE_OK (i, Pmode)) static_reg_base_value[i] = gen_rtx_ADDRESS (VOIDmode, gen_rtx_REG (Pmode, i)); static_reg_base_value[STACK_POINTER_REGNUM] = gen_rtx_ADDRESS (Pmode, stack_pointer_rtx); static_reg_base_value[ARG_POINTER_REGNUM] = gen_rtx_ADDRESS (Pmode, arg_pointer_rtx); static_reg_base_value[FRAME_POINTER_REGNUM] = gen_rtx_ADDRESS (Pmode, frame_pointer_rtx); #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM static_reg_base_value[HARD_FRAME_POINTER_REGNUM] = gen_rtx_ADDRESS (Pmode, hard_frame_pointer_rtx); #endif } /* Set MEMORY_MODIFIED when X modifies DATA (that is assumed to be memory reference. */ static bool memory_modified; static void memory_modified_1 (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data) { if (MEM_P (x)) { if (anti_dependence (x, (rtx)data) || output_dependence (x, (rtx)data)) memory_modified = true; } } /* Return true when INSN possibly modify memory contents of MEM (ie address can be modified). */ bool memory_modified_in_insn_p (rtx mem, rtx insn) { if (!INSN_P (insn)) return false; memory_modified = false; note_stores (PATTERN (insn), memory_modified_1, mem); return memory_modified; } /* Initialize the aliasing machinery. Initialize the REG_KNOWN_VALUE array. */ void init_alias_analysis (void) { unsigned int maxreg = max_reg_num (); int changed, pass; int i; unsigned int ui; rtx insn; timevar_push (TV_ALIAS_ANALYSIS); reg_known_value_size = maxreg - FIRST_PSEUDO_REGISTER; reg_known_value = ggc_calloc (reg_known_value_size, sizeof (rtx)); reg_known_equiv_p = xcalloc (reg_known_value_size, sizeof (bool)); /* Overallocate reg_base_value to allow some growth during loop optimization. Loop unrolling can create a large number of registers. */ if (old_reg_base_value) { reg_base_value = old_reg_base_value; /* If varray gets large zeroing cost may get important. */ if (VARRAY_SIZE (reg_base_value) > 256 && VARRAY_SIZE (reg_base_value) > 4 * maxreg) VARRAY_GROW (reg_base_value, maxreg); VARRAY_CLEAR (reg_base_value); if (VARRAY_SIZE (reg_base_value) < maxreg) VARRAY_GROW (reg_base_value, maxreg); } else { VARRAY_RTX_INIT (reg_base_value, maxreg, "reg_base_value"); } new_reg_base_value = xmalloc (maxreg * sizeof (rtx)); reg_seen = xmalloc (maxreg); if (! reload_completed && flag_old_unroll_loops) { alias_invariant = ggc_calloc (maxreg, sizeof (rtx)); alias_invariant_size = maxreg; } /* The basic idea is that each pass through this loop will use the "constant" information from the previous pass to propagate alias information through another level of assignments. This could get expensive if the assignment chains are long. Maybe we should throttle the number of iterations, possibly based on the optimization level or flag_expensive_optimizations. We could propagate more information in the first pass by making use of REG_N_SETS to determine immediately that the alias information for a pseudo is "constant". A program with an uninitialized variable can cause an infinite loop here. Instead of doing a full dataflow analysis to detect such problems we just cap the number of iterations for the loop. The state of the arrays for the set chain in question does not matter since the program has undefined behavior. */ pass = 0; do { /* Assume nothing will change this iteration of the loop. */ changed = 0; /* We want to assign the same IDs each iteration of this loop, so start counting from zero each iteration of the loop. */ unique_id = 0; /* We're at the start of the function each iteration through the loop, so we're copying arguments. */ copying_arguments = true; /* Wipe the potential alias information clean for this pass. */ memset (new_reg_base_value, 0, maxreg * sizeof (rtx)); /* Wipe the reg_seen array clean. */ memset (reg_seen, 0, maxreg); /* Mark all hard registers which may contain an address. The stack, frame and argument pointers may contain an address. An argument register which can hold a Pmode value may contain an address even if it is not in BASE_REGS. The address expression is VOIDmode for an argument and Pmode for other registers. */ memcpy (new_reg_base_value, static_reg_base_value, FIRST_PSEUDO_REGISTER * sizeof (rtx)); /* Walk the insns adding values to the new_reg_base_value array. */ for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) { if (INSN_P (insn)) { rtx note, set; #if defined (HAVE_prologue) || defined (HAVE_epilogue) /* The prologue/epilogue insns are not threaded onto the insn chain until after reload has completed. Thus, there is no sense wasting time checking if INSN is in the prologue/epilogue until after reload has completed. */ if (reload_completed && prologue_epilogue_contains (insn)) continue; #endif /* If this insn has a noalias note, process it, Otherwise, scan for sets. A simple set will have no side effects which could change the base value of any other register. */ if (GET_CODE (PATTERN (insn)) == SET && REG_NOTES (insn) != 0 && find_reg_note (insn, REG_NOALIAS, NULL_RTX)) record_set (SET_DEST (PATTERN (insn)), NULL_RTX, NULL); else note_stores (PATTERN (insn), record_set, NULL); set = single_set (insn); if (set != 0 && REG_P (SET_DEST (set)) && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER) { unsigned int regno = REGNO (SET_DEST (set)); rtx src = SET_SRC (set); rtx t; if (REG_NOTES (insn) != 0 && (((note = find_reg_note (insn, REG_EQUAL, 0)) != 0 && REG_N_SETS (regno) == 1) || (note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != 0) && GET_CODE (XEXP (note, 0)) != EXPR_LIST && ! rtx_varies_p (XEXP (note, 0), 1) && ! reg_overlap_mentioned_p (SET_DEST (set), XEXP (note, 0))) { set_reg_known_value (regno, XEXP (note, 0)); set_reg_known_equiv_p (regno, REG_NOTE_KIND (note) == REG_EQUIV); } else if (REG_N_SETS (regno) == 1 && GET_CODE (src) == PLUS && REG_P (XEXP (src, 0)) && (t = get_reg_known_value (REGNO (XEXP (src, 0)))) && GET_CODE (XEXP (src, 1)) == CONST_INT) { t = plus_constant (t, INTVAL (XEXP (src, 1))); set_reg_known_value (regno, t); set_reg_known_equiv_p (regno, 0); } else if (REG_N_SETS (regno) == 1 && ! rtx_varies_p (src, 1)) { set_reg_known_value (regno, src); set_reg_known_equiv_p (regno, 0); } } } else if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) == NOTE_INSN_FUNCTION_BEG) copying_arguments = false; } /* Now propagate values from new_reg_base_value to reg_base_value. */ if (maxreg != (unsigned int) max_reg_num()) abort (); for (ui = 0; ui < maxreg; ui++) { if (new_reg_base_value[ui] && new_reg_base_value[ui] != VARRAY_RTX (reg_base_value, ui) && ! rtx_equal_p (new_reg_base_value[ui], VARRAY_RTX (reg_base_value, ui))) { VARRAY_RTX (reg_base_value, ui) = new_reg_base_value[ui]; changed = 1; } } } while (changed && ++pass < MAX_ALIAS_LOOP_PASSES); /* Fill in the remaining entries. */ for (i = 0; i < (int)reg_known_value_size; i++) if (reg_known_value[i] == 0) reg_known_value[i] = regno_reg_rtx[i + FIRST_PSEUDO_REGISTER]; /* Simplify the reg_base_value array so that no register refers to another register, except to special registers indirectly through ADDRESS expressions. In theory this loop can take as long as O(registers^2), but unless there are very long dependency chains it will run in close to linear time. This loop may not be needed any longer now that the main loop does a better job at propagating alias information. */ pass = 0; do { changed = 0; pass++; for (ui = 0; ui < maxreg; ui++) { rtx base = VARRAY_RTX (reg_base_value, ui); if (base && REG_P (base)) { unsigned int base_regno = REGNO (base); if (base_regno == ui) /* register set from itself */ VARRAY_RTX (reg_base_value, ui) = 0; else VARRAY_RTX (reg_base_value, ui) = VARRAY_RTX (reg_base_value, base_regno); changed = 1; } } } while (changed && pass < MAX_ALIAS_LOOP_PASSES); /* Clean up. */ free (new_reg_base_value); new_reg_base_value = 0; free (reg_seen); reg_seen = 0; timevar_pop (TV_ALIAS_ANALYSIS); } void end_alias_analysis (void) { old_reg_base_value = reg_base_value; ggc_free (reg_known_value); reg_known_value = 0; reg_known_value_size = 0; free (reg_known_equiv_p); reg_known_equiv_p = 0; if (alias_invariant) { ggc_free (alias_invariant); alias_invariant = 0; alias_invariant_size = 0; } } /* Type information for alias.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ void gt_ggc_mx_alias_set_entry (void *x_p) { struct alias_set_entry * const x = (struct alias_set_entry *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_II12splay_tree_s ((*x).children); } } void gt_ggc_m_P15alias_set_entry15varray_head_tag (void *x_p) { struct varray_head_tag * const x = (struct varray_head_tag *)x_p; if (ggc_test_and_set_mark (x)) { switch ((*x).type) { case VARRAY_DATA_C: break; case VARRAY_DATA_UC: break; case VARRAY_DATA_S: break; case VARRAY_DATA_US: break; case VARRAY_DATA_I: break; case VARRAY_DATA_U: break; case VARRAY_DATA_L: break; case VARRAY_DATA_UL: break; case VARRAY_DATA_HINT: break; case VARRAY_DATA_UHINT: break; case VARRAY_DATA_GENERIC: { size_t i10; for (i10 = 0; i10 < (size_t)((*x).num_elements); i10++) { gt_ggc_m_15alias_set_entry ((*x).data.generic[i10]); } } break; case VARRAY_DATA_CPTR: { size_t i11; for (i11 = 0; i11 < (size_t)((*x).num_elements); i11++) { } } break; case VARRAY_DATA_RTX: { size_t i12; for (i12 = 0; i12 < (size_t)((*x).num_elements); i12++) { gt_ggc_m_7rtx_def ((*x).data.rtx[i12]); } } break; case VARRAY_DATA_RTVEC: { size_t i13; for (i13 = 0; i13 < (size_t)((*x).num_elements); i13++) { gt_ggc_m_9rtvec_def ((*x).data.rtvec[i13]); } } break; case VARRAY_DATA_TREE: { size_t i14; for (i14 = 0; i14 < (size_t)((*x).num_elements); i14++) { gt_ggc_m_9tree_node ((*x).data.tree[i14]); } } break; case VARRAY_DATA_BITMAP: { size_t i15; for (i15 = 0; i15 < (size_t)((*x).num_elements); i15++) { gt_ggc_m_15bitmap_head_def ((*x).data.bitmap[i15]); } } break; case VARRAY_DATA_CONST_EQUIV: { size_t i16; for (i16 = 0; i16 < (size_t)((*x).num_elements); i16++) { gt_ggc_m_7rtx_def ((*x).data.const_equiv[i16].rtx); } } break; case VARRAY_DATA_TE: { size_t i17; for (i17 = 0; i17 < (size_t)((*x).num_elements); i17++) { gt_ggc_m_8elt_list ((*x).data.te[i17]); } } break; case VARRAY_DATA_EDGE: { size_t i18; for (i18 = 0; i18 < (size_t)((*x).num_elements); i18++) { gt_ggc_m_8edge_def ((*x).data.e[i18]); } } break; default: break; } } } void gt_pch_nx_alias_set_entry (void *x_p) { struct alias_set_entry * const x = (struct alias_set_entry *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_15alias_set_entry)) { gt_pch_n_II12splay_tree_s ((*x).children); } } void gt_pch_n_P15alias_set_entry15varray_head_tag (void *x_p) { struct varray_head_tag * const x = (struct varray_head_tag *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_P15alias_set_entry15varray_head_tag)) { gt_pch_n_S ((*x).name); switch ((*x).type) { case VARRAY_DATA_C: break; case VARRAY_DATA_UC: break; case VARRAY_DATA_S: break; case VARRAY_DATA_US: break; case VARRAY_DATA_I: break; case VARRAY_DATA_U: break; case VARRAY_DATA_L: break; case VARRAY_DATA_UL: break; case VARRAY_DATA_HINT: break; case VARRAY_DATA_UHINT: break; case VARRAY_DATA_GENERIC: { size_t i10; for (i10 = 0; i10 < (size_t)((*x).num_elements); i10++) { gt_pch_n_15alias_set_entry ((*x).data.generic[i10]); } } break; case VARRAY_DATA_CPTR: { size_t i11; for (i11 = 0; i11 < (size_t)((*x).num_elements); i11++) { gt_pch_n_S ((*x).data.cptr[i11]); } } break; case VARRAY_DATA_RTX: { size_t i12; for (i12 = 0; i12 < (size_t)((*x).num_elements); i12++) { gt_pch_n_7rtx_def ((*x).data.rtx[i12]); } } break; case VARRAY_DATA_RTVEC: { size_t i13; for (i13 = 0; i13 < (size_t)((*x).num_elements); i13++) { gt_pch_n_9rtvec_def ((*x).data.rtvec[i13]); } } break; case VARRAY_DATA_TREE: { size_t i14; for (i14 = 0; i14 < (size_t)((*x).num_elements); i14++) { gt_pch_n_9tree_node ((*x).data.tree[i14]); } } break; case VARRAY_DATA_BITMAP: { size_t i15; for (i15 = 0; i15 < (size_t)((*x).num_elements); i15++) { gt_pch_n_15bitmap_head_def ((*x).data.bitmap[i15]); } } break; case VARRAY_DATA_CONST_EQUIV: { size_t i16; for (i16 = 0; i16 < (size_t)((*x).num_elements); i16++) { gt_pch_n_7rtx_def ((*x).data.const_equiv[i16].rtx); } } break; case VARRAY_DATA_TE: { size_t i17; for (i17 = 0; i17 < (size_t)((*x).num_elements); i17++) { gt_pch_n_8elt_list ((*x).data.te[i17]); } } break; case VARRAY_DATA_EDGE: { size_t i18; for (i18 = 0; i18 < (size_t)((*x).num_elements); i18++) { gt_pch_n_8edge_def ((*x).data.e[i18]); } } break; default: break; } } } void gt_pch_p_15alias_set_entry (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct alias_set_entry * const x ATTRIBUTE_UNUSED = (struct alias_set_entry *)x_p; if ((void *)(x) == this_obj) op (&((*x).children), cookie); } void gt_pch_p_P15alias_set_entry15varray_head_tag (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct varray_head_tag * const x ATTRIBUTE_UNUSED = (struct varray_head_tag *)x_p; if ((void *)(x) == this_obj) op (&((*x).name), cookie); switch ((*x).type) { case VARRAY_DATA_C: break; case VARRAY_DATA_UC: break; case VARRAY_DATA_S: break; case VARRAY_DATA_US: break; case VARRAY_DATA_I: break; case VARRAY_DATA_U: break; case VARRAY_DATA_L: break; case VARRAY_DATA_UL: break; case VARRAY_DATA_HINT: break; case VARRAY_DATA_UHINT: break; case VARRAY_DATA_GENERIC: { size_t i10; for (i10 = 0; i10 < (size_t)((*x).num_elements); i10++) { if ((void *)(x) == this_obj) op (&((*x).data.generic[i10]), cookie); } } break; case VARRAY_DATA_CPTR: { size_t i11; for (i11 = 0; i11 < (size_t)((*x).num_elements); i11++) { if ((void *)(x) == this_obj) op (&((*x).data.cptr[i11]), cookie); } } break; case VARRAY_DATA_RTX: { size_t i12; for (i12 = 0; i12 < (size_t)((*x).num_elements); i12++) { if ((void *)(x) == this_obj) op (&((*x).data.rtx[i12]), cookie); } } break; case VARRAY_DATA_RTVEC: { size_t i13; for (i13 = 0; i13 < (size_t)((*x).num_elements); i13++) { if ((void *)(x) == this_obj) op (&((*x).data.rtvec[i13]), cookie); } } break; case VARRAY_DATA_TREE: { size_t i14; for (i14 = 0; i14 < (size_t)((*x).num_elements); i14++) { if ((void *)(x) == this_obj) op (&((*x).data.tree[i14]), cookie); } } break; case VARRAY_DATA_BITMAP: { size_t i15; for (i15 = 0; i15 < (size_t)((*x).num_elements); i15++) { if ((void *)(x) == this_obj) op (&((*x).data.bitmap[i15]), cookie); } } break; case VARRAY_DATA_CONST_EQUIV: { size_t i16; for (i16 = 0; i16 < (size_t)((*x).num_elements); i16++) { if ((void *)(x) == this_obj) op (&((*x).data.const_equiv[i16].rtx), cookie); } } break; case VARRAY_DATA_TE: { size_t i17; for (i17 = 0; i17 < (size_t)((*x).num_elements); i17++) { if ((void *)(x) == this_obj) op (&((*x).data.te[i17]), cookie); } } break; case VARRAY_DATA_EDGE: { size_t i18; for (i18 = 0; i18 < (size_t)((*x).num_elements); i18++) { if ((void *)(x) == this_obj) op (&((*x).data.e[i18]), cookie); } } break; default: break; } } /* GC roots. */ static void gt_ggc_ma_reg_known_value (void *); static void gt_ggc_ma_reg_known_value (void *x_p ATTRIBUTE_UNUSED) { if (reg_known_value != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(reg_known_value_size); i0++) { gt_ggc_m_7rtx_def (reg_known_value[i0]); } ggc_mark (reg_known_value); } } static void gt_pch_pa_reg_known_value (void *, void *, gt_pointer_operator, void *); static void gt_pch_pa_reg_known_value (void *this_obj ATTRIBUTE_UNUSED, void *x_p ATTRIBUTE_UNUSED, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { if (reg_known_value != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(reg_known_value_size); i0++) { if ((void *)(reg_known_value) == this_obj) op (&(reg_known_value[i0]), cookie); } if ((void *)(®_known_value) == this_obj) op (&(reg_known_value), cookie); } } static void gt_pch_na_reg_known_value (void *); static void gt_pch_na_reg_known_value (void *x_p ATTRIBUTE_UNUSED) { if (reg_known_value != NULL) { size_t i1; for (i1 = 0; i1 < (size_t)(reg_known_value_size); i1++) { gt_pch_n_7rtx_def (reg_known_value[i1]); } gt_pch_note_object (reg_known_value, ®_known_value, gt_pch_pa_reg_known_value); } } static void gt_ggc_ma_alias_invariant (void *); static void gt_ggc_ma_alias_invariant (void *x_p ATTRIBUTE_UNUSED) { if (alias_invariant != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(alias_invariant_size); i0++) { gt_ggc_m_7rtx_def (alias_invariant[i0]); } ggc_mark (alias_invariant); } } static void gt_pch_pa_alias_invariant (void *, void *, gt_pointer_operator, void *); static void gt_pch_pa_alias_invariant (void *this_obj ATTRIBUTE_UNUSED, void *x_p ATTRIBUTE_UNUSED, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { if (alias_invariant != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(alias_invariant_size); i0++) { if ((void *)(alias_invariant) == this_obj) op (&(alias_invariant[i0]), cookie); } if ((void *)(&alias_invariant) == this_obj) op (&(alias_invariant), cookie); } } static void gt_pch_na_alias_invariant (void *); static void gt_pch_na_alias_invariant (void *x_p ATTRIBUTE_UNUSED) { if (alias_invariant != NULL) { size_t i1; for (i1 = 0; i1 < (size_t)(alias_invariant_size); i1++) { gt_pch_n_7rtx_def (alias_invariant[i1]); } gt_pch_note_object (alias_invariant, &alias_invariant, gt_pch_pa_alias_invariant); } } const struct ggc_root_tab gt_ggc_r_gt_alias_h[] = { { &alias_sets, 1, sizeof (alias_sets), >_ggc_m_P15alias_set_entry15varray_head_tag, >_pch_n_P15alias_set_entry15varray_head_tag }, { ®_known_value, 1, sizeof (reg_known_value), >_ggc_ma_reg_known_value, >_pch_na_reg_known_value }, { &alias_invariant, 1, sizeof (alias_invariant), >_ggc_ma_alias_invariant, >_pch_na_alias_invariant }, { &static_reg_base_value[0], 1 * (FIRST_PSEUDO_REGISTER), sizeof (static_reg_base_value[0]), >_ggc_mx_rtx_def, >_pch_nx_rtx_def }, { ®_base_value, 1, sizeof (reg_base_value), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, LAST_GGC_ROOT_TAB }; const struct ggc_root_tab gt_ggc_rd_gt_alias_h[] = { { &old_reg_base_value, 1, sizeof (old_reg_base_value), NULL, NULL }, LAST_GGC_ROOT_TAB }; const struct ggc_root_tab gt_pch_rs_gt_alias_h[] = { { &frame_set, 1, sizeof (frame_set), NULL, NULL }, { &varargs_set, 1, sizeof (varargs_set), NULL, NULL }, { &last_alias_set, 1, sizeof (last_alias_set), NULL, NULL }, { ®_known_value_size, 1, sizeof (reg_known_value_size), NULL, NULL }, { &alias_invariant_size, 1, sizeof (alias_invariant_size), NULL, NULL }, LAST_GGC_ROOT_TAB }; /* Basic block reordering routines for the GNU compiler. Copyright (C) 2000, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This (greedy) algorithm constructs traces in several rounds. The construction starts from "seeds". The seed for the first round is the entry point of function. When there are more than one seed that one is selected first that has the lowest key in the heap (see function bb_to_key). Then the algorithm repeatedly adds the most probable successor to the end of a trace. Finally it connects the traces. There are two parameters: Branch Threshold and Exec Threshold. If the edge to a successor of the actual basic block is lower than Branch Threshold or the frequency of the successor is lower than Exec Threshold the successor will be the seed in one of the next rounds. Each round has these parameters lower than the previous one. The last round has to have these parameters set to zero so that the remaining blocks are picked up. The algorithm selects the most probable successor from all unvisited successors and successors that have been added to this trace. The other successors (that has not been "sent" to the next round) will be other seeds for this round and the secondary traces will start in them. If the successor has not been visited in this trace it is added to the trace (however, there is some heuristic for simple branches). If the successor has been visited in this trace the loop has been found. If the loop has many iterations the loop is rotated so that the source block of the most probable edge going out from the loop is the last block of the trace. If the loop has few iterations and there is no edge from the last block of the loop going out from loop the loop header is duplicated. Finally, the construction of the trace is terminated. When connecting traces it first checks whether there is an edge from the last block of one trace to the first block of another trace. When there are still some unconnected traces it checks whether there exists a basic block BB such that BB is a successor of the last bb of one trace and BB is a predecessor of the first block of another trace. In this case, BB is duplicated and the traces are connected through this duplicate. The rest of traces are simply connected so there will be a jump to the beginning of the rest of trace. References: "Software Trace Cache" A. Ramirez, J. Larriba-Pey, C. Navarro, J. Torrellas and M. Valero; 1999 http://citeseer.nj.nec.com/15361.html */ /* Basic block reordering routines for the GNU compiler. Copyright (C) 2000, 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_CFGLAYOUT_H #define GCC_CFGLAYOUT_H extern rtx cfg_layout_function_footer; extern void cfg_layout_initialize (void); extern void cfg_layout_finalize (void); extern void insn_locators_initialize (void); extern void reemit_insn_block_notes (void); extern bool can_copy_bbs_p (basic_block *, unsigned); extern void copy_bbs (basic_block *, unsigned, basic_block *, edge *, unsigned, edge *, struct loop *); extern bool scan_ahead_for_unlikely_executed_note (rtx); extern rtx duplicate_insn_chain (rtx, rtx); #endif /* GCC_CFGLAYOUT_H */ /* The number of rounds. In most cases there will only be 4 rounds, but when partitioning hot and cold basic blocks into separate sections of the .o file there will be an extra round.*/ #define N_ROUNDS 5 /* Stubs in case we don't have a return insn. We have to check at runtime too, not only compiletime. */ #ifndef HAVE_return #define HAVE_return 0 #define gen_return() NULL_RTX #endif /* Branch thresholds in thousandths (per mille) of the REG_BR_PROB_BASE. */ static int branch_threshold[N_ROUNDS] = {400, 200, 100, 0, 0}; /* Exec thresholds in thousandths (per mille) of the frequency of bb 0. */ static int exec_threshold[N_ROUNDS] = {500, 200, 50, 0, 0}; /* If edge frequency is lower than DUPLICATION_THRESHOLD per mille of entry block the edge destination is not duplicated while connecting traces. */ #define DUPLICATION_THRESHOLD 100 /* Length of unconditional jump instruction. */ static int uncond_jump_length; /* Structure to hold needed information for each basic block. */ typedef struct bbro_basic_block_data_def { /* Which trace is the bb start of (-1 means it is not a start of a trace). */ int start_of_trace; /* Which trace is the bb end of (-1 means it is not an end of a trace). */ int end_of_trace; /* Which heap is BB in (if any)? */ fibheap_t heap; /* Which heap node is BB in (if any)? */ fibnode_t node; } bbro_basic_block_data; /* The current size of the following dynamic array. */ static int array_size; /* The array which holds needed information for basic blocks. */ static bbro_basic_block_data *bbd; /* To avoid frequent reallocation the size of arrays is greater than needed, the number of elements is (not less than) 1.25 * size_wanted. */ #define GET_ARRAY_SIZE(X) ((((X) / 4) + 1) * 5) /* Free the memory and set the pointer to NULL. */ #define FREE(P) \ do { if (P) { free (P); P = 0; } else { abort (); } } while (0) /* Structure for holding information about a trace. */ struct trace { /* First and last basic block of the trace. */ basic_block first, last; /* The round of the STC creation which this trace was found in. */ int round; /* The length (i.e. the number of basic blocks) of the trace. */ int length; }; /* Maximum frequency and count of one of the entry blocks. */ int max_entry_frequency; gcov_type max_entry_count; /* Local function prototypes. */ static void find_traces (int *, struct trace *); static basic_block rotate_loop (edge, struct trace *, int); static void mark_bb_visited (basic_block, int); static void find_traces_1_round (int, int, gcov_type, struct trace *, int *, int, fibheap_t *, int); static basic_block copy_bb (basic_block, edge, basic_block, int); static fibheapkey_t bb_to_key (basic_block); static bool better_edge_p (basic_block, edge, int, int, int, int, edge); static void connect_traces (int, struct trace *); static bool copy_bb_p (basic_block, int); static int get_uncond_jump_length (void); static bool push_to_next_round_p (basic_block, int, int, int, gcov_type); static void add_unlikely_executed_notes (void); static void find_rarely_executed_basic_blocks_and_crossing_edges (edge *, int *, int *); static void mark_bb_for_unlikely_executed_section (basic_block); static void add_labels_and_missing_jumps (edge *, int); static void add_reg_crossing_jump_notes (void); static void fix_up_fall_thru_edges (void); static void fix_edges_for_rarely_executed_code (edge *, int); static void fix_crossing_conditional_branches (void); static void fix_crossing_unconditional_branches (void); /* Check to see if bb should be pushed into the next round of trace collections or not. Reasons for pushing the block forward are 1). If the block is cold, we are doing partitioning, and there will be another round (cold partition blocks are not supposed to be collected into traces until the very last round); or 2). There will be another round, and the basic block is not "hot enough" for the current round of trace collection. */ static bool push_to_next_round_p (basic_block bb, int round, int number_of_rounds, int exec_th, gcov_type count_th) { bool there_exists_another_round; bool cold_block; bool block_not_hot_enough; there_exists_another_round = round < number_of_rounds - 1; cold_block = (flag_reorder_blocks_and_partition && bb->partition == COLD_PARTITION); block_not_hot_enough = (bb->frequency < exec_th || bb->count < count_th || probably_never_executed_bb_p (bb)); if (there_exists_another_round && (cold_block || block_not_hot_enough)) return true; else return false; } /* Find the traces for Software Trace Cache. Chain each trace through RBI()->next. Store the number of traces to N_TRACES and description of traces to TRACES. */ static void find_traces (int *n_traces, struct trace *traces) { int i; int number_of_rounds; edge e; fibheap_t heap; /* Add one extra round of trace collection when partitioning hot/cold basic blocks into separate sections. The last round is for all the cold blocks (and ONLY the cold blocks). */ number_of_rounds = N_ROUNDS - 1; if (flag_reorder_blocks_and_partition) number_of_rounds = N_ROUNDS; /* Insert entry points of function into heap. */ heap = fibheap_new (); max_entry_frequency = 0; max_entry_count = 0; for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) { bbd[e->dest->index].heap = heap; bbd[e->dest->index].node = fibheap_insert (heap, bb_to_key (e->dest), e->dest); if (e->dest->frequency > max_entry_frequency) max_entry_frequency = e->dest->frequency; if (e->dest->count > max_entry_count) max_entry_count = e->dest->count; } /* Find the traces. */ for (i = 0; i < number_of_rounds; i++) { gcov_type count_threshold; if (dump_file) fprintf (dump_file, "STC - round %d\n", i + 1); if (max_entry_count < INT_MAX / 1000) count_threshold = max_entry_count * exec_threshold[i] / 1000; else count_threshold = max_entry_count / 1000 * exec_threshold[i]; find_traces_1_round (REG_BR_PROB_BASE * branch_threshold[i] / 1000, max_entry_frequency * exec_threshold[i] / 1000, count_threshold, traces, n_traces, i, &heap, number_of_rounds); } fibheap_delete (heap); if (dump_file) { for (i = 0; i < *n_traces; i++) { basic_block bb; fprintf (dump_file, "Trace %d (round %d): ", i + 1, traces[i].round + 1); for (bb = traces[i].first; bb != traces[i].last; bb = bb->rbi->next) fprintf (dump_file, "%d [%d] ", bb->index, bb->frequency); fprintf (dump_file, "%d [%d]\n", bb->index, bb->frequency); } fflush (dump_file); } } /* Rotate loop whose back edge is BACK_EDGE in the tail of trace TRACE (with sequential number TRACE_N). */ static basic_block rotate_loop (edge back_edge, struct trace *trace, int trace_n) { basic_block bb; /* Information about the best end (end after rotation) of the loop. */ basic_block best_bb = NULL; edge best_edge = NULL; int best_freq = -1; gcov_type best_count = -1; /* The best edge is preferred when its destination is not visited yet or is a start block of some trace. */ bool is_preferred = false; /* Find the most frequent edge that goes out from current trace. */ bb = back_edge->dest; do { edge e; for (e = bb->succ; e; e = e->succ_next) if (e->dest != EXIT_BLOCK_PTR && e->dest->rbi->visited != trace_n && (e->flags & EDGE_CAN_FALLTHRU) && !(e->flags & EDGE_COMPLEX)) { if (is_preferred) { /* The best edge is preferred. */ if (!e->dest->rbi->visited || bbd[e->dest->index].start_of_trace >= 0) { /* The current edge E is also preferred. */ int freq = EDGE_FREQUENCY (e); if (freq > best_freq || e->count > best_count) { best_freq = freq; best_count = e->count; best_edge = e; best_bb = bb; } } } else { if (!e->dest->rbi->visited || bbd[e->dest->index].start_of_trace >= 0) { /* The current edge E is preferred. */ is_preferred = true; best_freq = EDGE_FREQUENCY (e); best_count = e->count; best_edge = e; best_bb = bb; } else { int freq = EDGE_FREQUENCY (e); if (!best_edge || freq > best_freq || e->count > best_count) { best_freq = freq; best_count = e->count; best_edge = e; best_bb = bb; } } } } bb = bb->rbi->next; } while (bb != back_edge->dest); if (best_bb) { /* Rotate the loop so that the BEST_EDGE goes out from the last block of the trace. */ if (back_edge->dest == trace->first) { trace->first = best_bb->rbi->next; } else { basic_block prev_bb; for (prev_bb = trace->first; prev_bb->rbi->next != back_edge->dest; prev_bb = prev_bb->rbi->next) ; prev_bb->rbi->next = best_bb->rbi->next; /* Try to get rid of uncond jump to cond jump. */ if (prev_bb->succ && !prev_bb->succ->succ_next) { basic_block header = prev_bb->succ->dest; /* Duplicate HEADER if it is a small block containing cond jump in the end. */ if (any_condjump_p (BB_END (header)) && copy_bb_p (header, 0)) { copy_bb (header, prev_bb->succ, prev_bb, trace_n); } } } } else { /* We have not found suitable loop tail so do no rotation. */ best_bb = back_edge->src; } best_bb->rbi->next = NULL; return best_bb; } /* This function marks BB that it was visited in trace number TRACE. */ static void mark_bb_visited (basic_block bb, int trace) { bb->rbi->visited = trace; if (bbd[bb->index].heap) { fibheap_delete_node (bbd[bb->index].heap, bbd[bb->index].node); bbd[bb->index].heap = NULL; bbd[bb->index].node = NULL; } } /* One round of finding traces. Find traces for BRANCH_TH and EXEC_TH i.e. do not include basic blocks their probability is lower than BRANCH_TH or their frequency is lower than EXEC_TH into traces (or count is lower than COUNT_TH). It stores the new traces into TRACES and modifies the number of traces *N_TRACES. Sets the round (which the trace belongs to) to ROUND. It expects that starting basic blocks are in *HEAP and at the end it deletes *HEAP and stores starting points for the next round into new *HEAP. */ static void find_traces_1_round (int branch_th, int exec_th, gcov_type count_th, struct trace *traces, int *n_traces, int round, fibheap_t *heap, int number_of_rounds) { /* The following variable refers to the last round in which non-"cold" blocks may be collected into a trace. */ int last_round = N_ROUNDS - 1; /* Heap for discarded basic blocks which are possible starting points for the next round. */ fibheap_t new_heap = fibheap_new (); while (!fibheap_empty (*heap)) { basic_block bb; struct trace *trace; edge best_edge, e; fibheapkey_t key; bb = fibheap_extract_min (*heap); bbd[bb->index].heap = NULL; bbd[bb->index].node = NULL; if (dump_file) fprintf (dump_file, "Getting bb %d\n", bb->index); /* If the BB's frequency is too low send BB to the next round. When partitioning hot/cold blocks into separate sections, make sure all the cold blocks (and ONLY the cold blocks) go into the (extra) final round. */ if (push_to_next_round_p (bb, round, number_of_rounds, exec_th, count_th)) { int key = bb_to_key (bb); bbd[bb->index].heap = new_heap; bbd[bb->index].node = fibheap_insert (new_heap, key, bb); if (dump_file) fprintf (dump_file, " Possible start point of next round: %d (key: %d)\n", bb->index, key); continue; } trace = traces + *n_traces; trace->first = bb; trace->round = round; trace->length = 0; (*n_traces)++; do { int prob, freq; /* The probability and frequency of the best edge. */ int best_prob = INT_MIN / 2; int best_freq = INT_MIN / 2; best_edge = NULL; mark_bb_visited (bb, *n_traces); trace->length++; if (dump_file) fprintf (dump_file, "Basic block %d was visited in trace %d\n", bb->index, *n_traces - 1); /* Select the successor that will be placed after BB. */ for (e = bb->succ; e; e = e->succ_next) { #ifdef ENABLE_CHECKING if (e->flags & EDGE_FAKE) abort (); #endif if (e->dest == EXIT_BLOCK_PTR) continue; if (e->dest->rbi->visited && e->dest->rbi->visited != *n_traces) continue; if (e->dest->partition == COLD_PARTITION && round < last_round) continue; prob = e->probability; freq = EDGE_FREQUENCY (e); /* Edge that cannot be fallthru or improbable or infrequent successor (ie. it is unsuitable successor). */ if (!(e->flags & EDGE_CAN_FALLTHRU) || (e->flags & EDGE_COMPLEX) || prob < branch_th || freq < exec_th || e->count < count_th) continue; /* If partitioning hot/cold basic blocks, don't consider edges that cross section boundaries. */ if (better_edge_p (bb, e, prob, freq, best_prob, best_freq, best_edge)) { best_edge = e; best_prob = prob; best_freq = freq; } } /* If the best destination has multiple predecessors, and can be duplicated cheaper than a jump, don't allow it to be added to a trace. We'll duplicate it when connecting traces. */ if (best_edge && best_edge->dest->pred->pred_next && copy_bb_p (best_edge->dest, 0)) best_edge = NULL; /* Add all non-selected successors to the heaps. */ for (e = bb->succ; e; e = e->succ_next) { if (e == best_edge || e->dest == EXIT_BLOCK_PTR || e->dest->rbi->visited) continue; key = bb_to_key (e->dest); if (bbd[e->dest->index].heap) { /* E->DEST is already in some heap. */ if (key != bbd[e->dest->index].node->key) { if (dump_file) { fprintf (dump_file, "Changing key for bb %d from %ld to %ld.\n", e->dest->index, (long) bbd[e->dest->index].node->key, key); } fibheap_replace_key (bbd[e->dest->index].heap, bbd[e->dest->index].node, key); } } else { fibheap_t which_heap = *heap; prob = e->probability; freq = EDGE_FREQUENCY (e); if (!(e->flags & EDGE_CAN_FALLTHRU) || (e->flags & EDGE_COMPLEX) || prob < branch_th || freq < exec_th || e->count < count_th) { /* When partitioning hot/cold basic blocks, make sure the cold blocks (and only the cold blocks) all get pushed to the last round of trace collection. */ if (push_to_next_round_p (e->dest, round, number_of_rounds, exec_th, count_th)) which_heap = new_heap; } bbd[e->dest->index].heap = which_heap; bbd[e->dest->index].node = fibheap_insert (which_heap, key, e->dest); if (dump_file) { fprintf (dump_file, " Possible start of %s round: %d (key: %ld)\n", (which_heap == new_heap) ? "next" : "this", e->dest->index, (long) key); } } } if (best_edge) /* Suitable successor was found. */ { if (best_edge->dest->rbi->visited == *n_traces) { /* We do nothing with one basic block loops. */ if (best_edge->dest != bb) { if (EDGE_FREQUENCY (best_edge) > 4 * best_edge->dest->frequency / 5) { /* The loop has at least 4 iterations. If the loop header is not the first block of the function we can rotate the loop. */ if (best_edge->dest != ENTRY_BLOCK_PTR->next_bb) { if (dump_file) { fprintf (dump_file, "Rotating loop %d - %d\n", best_edge->dest->index, bb->index); } bb->rbi->next = best_edge->dest; bb = rotate_loop (best_edge, trace, *n_traces); } } else { /* The loop has less than 4 iterations. */ /* Check whether there is another edge from BB. */ edge another_edge; for (another_edge = bb->succ; another_edge; another_edge = another_edge->succ_next) if (another_edge != best_edge) break; if (!another_edge && copy_bb_p (best_edge->dest, !optimize_size)) { bb = copy_bb (best_edge->dest, best_edge, bb, *n_traces); } } } /* Terminate the trace. */ break; } else { /* Check for a situation A /| B | \| C where EDGE_FREQUENCY (AB) + EDGE_FREQUENCY (BC) >= EDGE_FREQUENCY (AC). (i.e. 2 * B->frequency >= EDGE_FREQUENCY (AC) ) Best ordering is then A B C. This situation is created for example by: if (A) B; C; */ for (e = bb->succ; e; e = e->succ_next) if (e != best_edge && (e->flags & EDGE_CAN_FALLTHRU) && !(e->flags & EDGE_COMPLEX) && !e->dest->rbi->visited && !e->dest->pred->pred_next && !e->crossing_edge && e->dest->succ && (e->dest->succ->flags & EDGE_CAN_FALLTHRU) && !(e->dest->succ->flags & EDGE_COMPLEX) && !e->dest->succ->succ_next && e->dest->succ->dest == best_edge->dest && 2 * e->dest->frequency >= EDGE_FREQUENCY (best_edge)) { best_edge = e; if (dump_file) fprintf (dump_file, "Selecting BB %d\n", best_edge->dest->index); break; } bb->rbi->next = best_edge->dest; bb = best_edge->dest; } } } while (best_edge); trace->last = bb; bbd[trace->first->index].start_of_trace = *n_traces - 1; bbd[trace->last->index].end_of_trace = *n_traces - 1; /* The trace is terminated so we have to recount the keys in heap (some block can have a lower key because now one of its predecessors is an end of the trace). */ for (e = bb->succ; e; e = e->succ_next) { if (e->dest == EXIT_BLOCK_PTR || e->dest->rbi->visited) continue; if (bbd[e->dest->index].heap) { key = bb_to_key (e->dest); if (key != bbd[e->dest->index].node->key) { if (dump_file) { fprintf (dump_file, "Changing key for bb %d from %ld to %ld.\n", e->dest->index, (long) bbd[e->dest->index].node->key, key); } fibheap_replace_key (bbd[e->dest->index].heap, bbd[e->dest->index].node, key); } } } } fibheap_delete (*heap); /* "Return" the new heap. */ *heap = new_heap; } /* Create a duplicate of the basic block OLD_BB and redirect edge E to it, add it to trace after BB, mark OLD_BB visited and update pass' data structures (TRACE is a number of trace which OLD_BB is duplicated to). */ static basic_block copy_bb (basic_block old_bb, edge e, basic_block bb, int trace) { basic_block new_bb; new_bb = duplicate_block (old_bb, e); if (e->dest != new_bb) abort (); if (e->dest->rbi->visited) abort (); if (dump_file) fprintf (dump_file, "Duplicated bb %d (created bb %d)\n", old_bb->index, new_bb->index); new_bb->rbi->visited = trace; new_bb->rbi->next = bb->rbi->next; bb->rbi->next = new_bb; if (new_bb->index >= array_size || last_basic_block > array_size) { int i; int new_size; new_size = MAX (last_basic_block, new_bb->index + 1); new_size = GET_ARRAY_SIZE (new_size); bbd = xrealloc (bbd, new_size * sizeof (bbro_basic_block_data)); for (i = array_size; i < new_size; i++) { bbd[i].start_of_trace = -1; bbd[i].end_of_trace = -1; bbd[i].heap = NULL; bbd[i].node = NULL; } array_size = new_size; if (dump_file) { fprintf (dump_file, "Growing the dynamic array to %d elements.\n", array_size); } } return new_bb; } /* Compute and return the key (for the heap) of the basic block BB. */ static fibheapkey_t bb_to_key (basic_block bb) { edge e; int priority = 0; /* Do not start in probably never executed blocks. */ if (bb->partition == COLD_PARTITION || probably_never_executed_bb_p (bb)) return BB_FREQ_MAX; /* Prefer blocks whose predecessor is an end of some trace or whose predecessor edge is EDGE_DFS_BACK. */ for (e = bb->pred; e; e = e->pred_next) { if ((e->src != ENTRY_BLOCK_PTR && bbd[e->src->index].end_of_trace >= 0) || (e->flags & EDGE_DFS_BACK)) { int edge_freq = EDGE_FREQUENCY (e); if (edge_freq > priority) priority = edge_freq; } } if (priority) /* The block with priority should have significantly lower key. */ return -(100 * BB_FREQ_MAX + 100 * priority + bb->frequency); return -bb->frequency; } /* Return true when the edge E from basic block BB is better than the temporary best edge (details are in function). The probability of edge E is PROB. The frequency of the successor is FREQ. The current best probability is BEST_PROB, the best frequency is BEST_FREQ. The edge is considered to be equivalent when PROB does not differ much from BEST_PROB; similarly for frequency. */ static bool better_edge_p (basic_block bb, edge e, int prob, int freq, int best_prob, int best_freq, edge cur_best_edge) { bool is_better_edge; /* The BEST_* values do not have to be best, but can be a bit smaller than maximum values. */ int diff_prob = best_prob / 10; int diff_freq = best_freq / 10; if (prob > best_prob + diff_prob) /* The edge has higher probability than the temporary best edge. */ is_better_edge = true; else if (prob < best_prob - diff_prob) /* The edge has lower probability than the temporary best edge. */ is_better_edge = false; else if (freq < best_freq - diff_freq) /* The edge and the temporary best edge have almost equivalent probabilities. The higher frequency of a successor now means that there is another edge going into that successor. This successor has lower frequency so it is better. */ is_better_edge = true; else if (freq > best_freq + diff_freq) /* This successor has higher frequency so it is worse. */ is_better_edge = false; else if (e->dest->prev_bb == bb) /* The edges have equivalent probabilities and the successors have equivalent frequencies. Select the previous successor. */ is_better_edge = true; else is_better_edge = false; /* If we are doing hot/cold partitioning, make sure that we always favor non-crossing edges over crossing edges. */ if (!is_better_edge && flag_reorder_blocks_and_partition && cur_best_edge && cur_best_edge->crossing_edge && !e->crossing_edge) is_better_edge = true; return is_better_edge; } /* Connect traces in array TRACES, N_TRACES is the count of traces. */ static void connect_traces (int n_traces, struct trace *traces) { int i; int unconnected_hot_trace_count = 0; bool cold_connected = true; bool *connected; bool *cold_traces; int last_trace; int freq_threshold; gcov_type count_threshold; freq_threshold = max_entry_frequency * DUPLICATION_THRESHOLD / 1000; if (max_entry_count < INT_MAX / 1000) count_threshold = max_entry_count * DUPLICATION_THRESHOLD / 1000; else count_threshold = max_entry_count / 1000 * DUPLICATION_THRESHOLD; connected = xcalloc (n_traces, sizeof (bool)); last_trace = -1; /* If we are partitioning hot/cold basic blocks, mark the cold traces as already connected, to remove them from consideration for connection to the hot traces. After the hot traces have all been connected (determined by "unconnected_hot_trace_count"), we will go back and connect the cold traces. */ cold_traces = xcalloc (n_traces, sizeof (bool)); if (flag_reorder_blocks_and_partition) for (i = 0; i < n_traces; i++) { if (traces[i].first->partition == COLD_PARTITION) { connected[i] = true; cold_traces[i] = true; cold_connected = false; } else unconnected_hot_trace_count++; } for (i = 0; i < n_traces || !cold_connected ; i++) { int t = i; int t2; edge e, best; int best_len; /* If we are partitioning hot/cold basic blocks, check to see if all the hot traces have been connected. If so, go back and mark the cold traces as unconnected so we can connect them up too. Re-set "i" to the first (unconnected) cold trace. Use flag "cold_connected" to make sure we don't do this step more than once. */ if (flag_reorder_blocks_and_partition && (i >= n_traces || unconnected_hot_trace_count <= 0) && !cold_connected) { int j; int first_cold_trace = -1; for (j = 0; j < n_traces; j++) if (cold_traces[j]) { connected[j] = false; if (first_cold_trace == -1) first_cold_trace = j; } i = t = first_cold_trace; cold_connected = true; } if (connected[t]) continue; connected[t] = true; if (unconnected_hot_trace_count > 0) unconnected_hot_trace_count--; /* Find the predecessor traces. */ for (t2 = t; t2 > 0;) { best = NULL; best_len = 0; for (e = traces[t2].first->pred; e; e = e->pred_next) { int si = e->src->index; if (e->src != ENTRY_BLOCK_PTR && (e->flags & EDGE_CAN_FALLTHRU) && !(e->flags & EDGE_COMPLEX) && bbd[si].end_of_trace >= 0 && !connected[bbd[si].end_of_trace] && (!best || e->probability > best->probability || (e->probability == best->probability && traces[bbd[si].end_of_trace].length > best_len))) { best = e; best_len = traces[bbd[si].end_of_trace].length; } } if (best) { best->src->rbi->next = best->dest; t2 = bbd[best->src->index].end_of_trace; connected[t2] = true; if (unconnected_hot_trace_count > 0) unconnected_hot_trace_count--; if (dump_file) { fprintf (dump_file, "Connection: %d %d\n", best->src->index, best->dest->index); } } else break; } if (last_trace >= 0) traces[last_trace].last->rbi->next = traces[t2].first; last_trace = t; /* Find the successor traces. */ while (1) { /* Find the continuation of the chain. */ best = NULL; best_len = 0; for (e = traces[t].last->succ; e; e = e->succ_next) { int di = e->dest->index; if (e->dest != EXIT_BLOCK_PTR && (e->flags & EDGE_CAN_FALLTHRU) && !(e->flags & EDGE_COMPLEX) && bbd[di].start_of_trace >= 0 && !connected[bbd[di].start_of_trace] && (!best || e->probability > best->probability || (e->probability == best->probability && traces[bbd[di].start_of_trace].length > best_len))) { best = e; best_len = traces[bbd[di].start_of_trace].length; } } if (best) { if (dump_file) { fprintf (dump_file, "Connection: %d %d\n", best->src->index, best->dest->index); } t = bbd[best->dest->index].start_of_trace; traces[last_trace].last->rbi->next = traces[t].first; connected[t] = true; if (unconnected_hot_trace_count > 0) unconnected_hot_trace_count--; last_trace = t; } else { /* Try to connect the traces by duplication of 1 block. */ edge e2; basic_block next_bb = NULL; bool try_copy = false; for (e = traces[t].last->succ; e; e = e->succ_next) if (e->dest != EXIT_BLOCK_PTR && (e->flags & EDGE_CAN_FALLTHRU) && !(e->flags & EDGE_COMPLEX) && (!best || e->probability > best->probability)) { edge best2 = NULL; int best2_len = 0; /* If the destination is a start of a trace which is only one block long, then no need to search the successor blocks of the trace. Accept it. */ if (bbd[e->dest->index].start_of_trace >= 0 && traces[bbd[e->dest->index].start_of_trace].length == 1) { best = e; try_copy = true; continue; } for (e2 = e->dest->succ; e2; e2 = e2->succ_next) { int di = e2->dest->index; if (e2->dest == EXIT_BLOCK_PTR || ((e2->flags & EDGE_CAN_FALLTHRU) && !(e2->flags & EDGE_COMPLEX) && bbd[di].start_of_trace >= 0 && !connected[bbd[di].start_of_trace] && (EDGE_FREQUENCY (e2) >= freq_threshold) && (e2->count >= count_threshold) && (!best2 || e2->probability > best2->probability || (e2->probability == best2->probability && traces[bbd[di].start_of_trace].length > best2_len)))) { best = e; best2 = e2; if (e2->dest != EXIT_BLOCK_PTR) best2_len = traces[bbd[di].start_of_trace].length; else best2_len = INT_MAX; next_bb = e2->dest; try_copy = true; } } } if (flag_reorder_blocks_and_partition) try_copy = false; /* Copy tiny blocks always; copy larger blocks only when the edge is traversed frequently enough. */ if (try_copy && copy_bb_p (best->dest, !optimize_size && EDGE_FREQUENCY (best) >= freq_threshold && best->count >= count_threshold)) { basic_block new_bb; if (dump_file) { fprintf (dump_file, "Connection: %d %d ", traces[t].last->index, best->dest->index); if (!next_bb) fputc ('\n', dump_file); else if (next_bb == EXIT_BLOCK_PTR) fprintf (dump_file, "exit\n"); else fprintf (dump_file, "%d\n", next_bb->index); } new_bb = copy_bb (best->dest, best, traces[t].last, t); traces[t].last = new_bb; if (next_bb && next_bb != EXIT_BLOCK_PTR) { t = bbd[next_bb->index].start_of_trace; traces[last_trace].last->rbi->next = traces[t].first; connected[t] = true; if (unconnected_hot_trace_count > 0) unconnected_hot_trace_count--; last_trace = t; } else break; /* Stop finding the successor traces. */ } else break; /* Stop finding the successor traces. */ } } } if (dump_file) { basic_block bb; fprintf (dump_file, "Final order:\n"); for (bb = traces[0].first; bb; bb = bb->rbi->next) fprintf (dump_file, "%d ", bb->index); fprintf (dump_file, "\n"); fflush (dump_file); } FREE (connected); FREE (cold_traces); } /* Return true when BB can and should be copied. CODE_MAY_GROW is true when code size is allowed to grow by duplication. */ static bool copy_bb_p (basic_block bb, int code_may_grow) { int size = 0; int max_size = uncond_jump_length; rtx insn; int n_succ; edge e; if (!bb->frequency) return false; if (!bb->pred || !bb->pred->pred_next) return false; if (!can_duplicate_block_p (bb)) return false; /* Avoid duplicating blocks which have many successors (PR/13430). */ n_succ = 0; for (e = bb->succ; e; e = e->succ_next) { n_succ++; if (n_succ > 8) return false; } if (code_may_grow && maybe_hot_bb_p (bb)) max_size *= 8; for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = NEXT_INSN (insn)) { if (INSN_P (insn)) size += get_attr_length (insn); } if (size <= max_size) return true; if (dump_file) { fprintf (dump_file, "Block %d can't be copied because its size = %d.\n", bb->index, size); } return false; } /* Return the length of unconditional jump instruction. */ static int get_uncond_jump_length (void) { rtx label, jump; int length; label = emit_label_before (gen_label_rtx (), get_insns ()); jump = emit_jump_insn (gen_jump (label)); length = get_attr_length (jump); delete_insn (jump); delete_insn (label); return length; } static void add_unlikely_executed_notes (void) { basic_block bb; FOR_EACH_BB (bb) if (bb->partition == COLD_PARTITION) mark_bb_for_unlikely_executed_section (bb); } /* Find the basic blocks that are rarely executed and need to be moved to a separate section of the .o file (to cut down on paging and improve cache locality). */ static void find_rarely_executed_basic_blocks_and_crossing_edges (edge *crossing_edges, int *n_crossing_edges, int *max_idx) { basic_block bb; edge e; int i; /* Mark which partition (hot/cold) each basic block belongs in. */ FOR_EACH_BB (bb) { if (probably_never_executed_bb_p (bb)) bb->partition = COLD_PARTITION; else bb->partition = HOT_PARTITION; } /* Mark every edge that crosses between sections. */ i = 0; FOR_EACH_BB (bb) for (e = bb->succ; e; e = e->succ_next) { if (e->src != ENTRY_BLOCK_PTR && e->dest != EXIT_BLOCK_PTR && e->src->partition != e->dest->partition) { e->crossing_edge = true; if (i == *max_idx) { *max_idx *= 2; crossing_edges = xrealloc (crossing_edges, (*max_idx) * sizeof (edge)); } crossing_edges[i++] = e; } else e->crossing_edge = false; } *n_crossing_edges = i; } /* Add NOTE_INSN_UNLIKELY_EXECUTED_CODE to top of basic block. This note is later used to mark the basic block to be put in the unlikely-to-be-executed section of the .o file. */ static void mark_bb_for_unlikely_executed_section (basic_block bb) { rtx cur_insn; rtx insert_insn = NULL; rtx new_note; /* Find first non-note instruction and insert new NOTE before it (as long as new NOTE is not first instruction in basic block). */ for (cur_insn = BB_HEAD (bb); cur_insn != NEXT_INSN (BB_END (bb)); cur_insn = NEXT_INSN (cur_insn)) if (GET_CODE (cur_insn) != NOTE && GET_CODE (cur_insn) != CODE_LABEL) { insert_insn = cur_insn; break; } /* Insert note and assign basic block number to it. */ if (insert_insn) { new_note = emit_note_before (NOTE_INSN_UNLIKELY_EXECUTED_CODE, insert_insn); NOTE_BASIC_BLOCK (new_note) = bb; } else { new_note = emit_note_after (NOTE_INSN_UNLIKELY_EXECUTED_CODE, BB_END (bb)); NOTE_BASIC_BLOCK (new_note) = bb; } } /* If any destination of a crossing edge does not have a label, add label; Convert any fall-through crossing edges (for blocks that do not contain a jump) to unconditional jumps. */ static void add_labels_and_missing_jumps (edge *crossing_edges, int n_crossing_edges) { int i; basic_block src; basic_block dest; rtx label; rtx barrier; rtx new_jump; for (i=0; i < n_crossing_edges; i++) { if (crossing_edges[i]) { src = crossing_edges[i]->src; dest = crossing_edges[i]->dest; /* Make sure dest has a label. */ if (dest && (dest != EXIT_BLOCK_PTR)) { label = block_label (dest); /* Make sure source block ends with a jump. */ if (src && (src != ENTRY_BLOCK_PTR)) { if (GET_CODE (BB_END (src)) != JUMP_INSN) /* bb just falls through. */ { /* make sure there's only one successor */ if (src->succ && (src->succ->succ_next == NULL)) { /* Find label in dest block. */ label = block_label (dest); new_jump = emit_jump_insn_after (gen_jump (label), BB_END (src)); barrier = emit_barrier_after (new_jump); JUMP_LABEL (new_jump) = label; LABEL_NUSES (label) += 1; src->rbi->footer = unlink_insn_chain (barrier, barrier); /* Mark edge as non-fallthru. */ crossing_edges[i]->flags &= ~EDGE_FALLTHRU; } else { /* Basic block has two successors, but doesn't end in a jump; something is wrong here! */ abort(); } } /* end: 'if (GET_CODE ... ' */ } /* end: 'if (src && src->index...' */ } /* end: 'if (dest && dest->index...' */ } /* end: 'if (crossing_edges[i]...' */ } /* end for loop */ } /* Find any bb's where the fall-through edge is a crossing edge (note that these bb's must also contain a conditional jump; we've already dealt with fall-through edges for blocks that didn't have a conditional jump in the call to add_labels_and_missing_jumps). Convert the fall-through edge to non-crossing edge by inserting a new bb to fall-through into. The new bb will contain an unconditional jump (crossing edge) to the original fall through destination. */ static void fix_up_fall_thru_edges (void) { basic_block cur_bb; basic_block new_bb; edge succ1; edge succ2; edge fall_thru; edge cond_jump = NULL; edge e; bool cond_jump_crosses; int invert_worked; rtx old_jump; rtx fall_thru_label; rtx barrier; FOR_EACH_BB (cur_bb) { fall_thru = NULL; succ1 = cur_bb->succ; if (succ1) succ2 = succ1->succ_next; else succ2 = NULL; /* Find the fall-through edge. */ if (succ1 && (succ1->flags & EDGE_FALLTHRU)) { fall_thru = succ1; cond_jump = succ2; } else if (succ2 && (succ2->flags & EDGE_FALLTHRU)) { fall_thru = succ2; cond_jump = succ1; } if (fall_thru && (fall_thru->dest != EXIT_BLOCK_PTR)) { /* Check to see if the fall-thru edge is a crossing edge. */ if (fall_thru->crossing_edge) { /* The fall_thru edge crosses; now check the cond jump edge, if it exists. */ cond_jump_crosses = true; invert_worked = 0; old_jump = BB_END (cur_bb); /* Find the jump instruction, if there is one. */ if (cond_jump) { if (!cond_jump->crossing_edge) cond_jump_crosses = false; /* We know the fall-thru edge crosses; if the cond jump edge does NOT cross, and its destination is the next block in the bb order, invert the jump (i.e. fix it so the fall thru does not cross and the cond jump does). */ if (!cond_jump_crosses && cur_bb->rbi->next == cond_jump->dest) { /* Find label in fall_thru block. We've already added any missing labels, so there must be one. */ fall_thru_label = block_label (fall_thru->dest); if (old_jump && fall_thru_label) invert_worked = invert_jump (old_jump, fall_thru_label,0); if (invert_worked) { fall_thru->flags &= ~EDGE_FALLTHRU; cond_jump->flags |= EDGE_FALLTHRU; update_br_prob_note (cur_bb); e = fall_thru; fall_thru = cond_jump; cond_jump = e; cond_jump->crossing_edge = true; fall_thru->crossing_edge = false; } } } if (cond_jump_crosses || !invert_worked) { /* This is the case where both edges out of the basic block are crossing edges. Here we will fix up the fall through edge. The jump edge will be taken care of later. */ new_bb = force_nonfallthru (fall_thru); if (new_bb) { new_bb->rbi->next = cur_bb->rbi->next; cur_bb->rbi->next = new_bb; /* Make sure new fall-through bb is in same partition as bb it's falling through from. */ new_bb->partition = cur_bb->partition; new_bb->succ->crossing_edge = true; } /* Add barrier after new jump */ if (new_bb) { barrier = emit_barrier_after (BB_END (new_bb)); new_bb->rbi->footer = unlink_insn_chain (barrier, barrier); } else { barrier = emit_barrier_after (BB_END (cur_bb)); cur_bb->rbi->footer = unlink_insn_chain (barrier, barrier); } } } } } } /* This function checks the destination blockof a "crossing jump" to see if it has any crossing predecessors that begin with a code label and end with an unconditional jump. If so, it returns that predecessor block. (This is to avoid creating lots of new basic blocks that all contain unconditional jumps to the same destination). */ static basic_block find_jump_block (basic_block jump_dest) { basic_block source_bb = NULL; edge e; rtx insn; for (e = jump_dest->pred; e; e = e->pred_next) if (e->crossing_edge) { basic_block src = e->src; /* Check each predecessor to see if it has a label, and contains only one executable instruction, which is an unconditional jump. If so, we can use it. */ if (GET_CODE (BB_HEAD (src)) == CODE_LABEL) for (insn = BB_HEAD (src); !INSN_P (insn) && insn != NEXT_INSN (BB_END (src)); insn = NEXT_INSN (insn)) { if (INSN_P (insn) && insn == BB_END (src) && GET_CODE (insn) == JUMP_INSN && !any_condjump_p (insn)) { source_bb = src; break; } } if (source_bb) break; } return source_bb; } /* Find all BB's with conditional jumps that are crossing edges; insert a new bb and make the conditional jump branch to the new bb instead (make the new bb same color so conditional branch won't be a 'crossing' edge). Insert an unconditional jump from the new bb to the original destination of the conditional jump. */ static void fix_crossing_conditional_branches (void) { basic_block cur_bb; basic_block new_bb; basic_block last_bb; basic_block dest; basic_block prev_bb; edge succ1; edge succ2; edge crossing_edge; edge new_edge; rtx old_jump; rtx set_src; rtx old_label = NULL_RTX; rtx new_label; rtx new_jump; rtx barrier; last_bb = EXIT_BLOCK_PTR->prev_bb; FOR_EACH_BB (cur_bb) { crossing_edge = NULL; succ1 = cur_bb->succ; if (succ1) succ2 = succ1->succ_next; else succ2 = NULL; /* We already took care of fall-through edges, so only one successor can be a crossing edge. */ if (succ1 && succ1->crossing_edge) crossing_edge = succ1; else if (succ2 && succ2->crossing_edge) crossing_edge = succ2; if (crossing_edge) { old_jump = BB_END (cur_bb); /* Check to make sure the jump instruction is a conditional jump. */ set_src = NULL_RTX; if (any_condjump_p (old_jump)) { if (GET_CODE (PATTERN (old_jump)) == SET) set_src = SET_SRC (PATTERN (old_jump)); else if (GET_CODE (PATTERN (old_jump)) == PARALLEL) { set_src = XVECEXP (PATTERN (old_jump), 0,0); if (GET_CODE (set_src) == SET) set_src = SET_SRC (set_src); else set_src = NULL_RTX; } } if (set_src && (GET_CODE (set_src) == IF_THEN_ELSE)) { if (GET_CODE (XEXP (set_src, 1)) == PC) old_label = XEXP (set_src, 2); else if (GET_CODE (XEXP (set_src, 2)) == PC) old_label = XEXP (set_src, 1); /* Check to see if new bb for jumping to that dest has already been created; if so, use it; if not, create a new one. */ new_bb = find_jump_block (crossing_edge->dest); if (new_bb) new_label = block_label (new_bb); else { /* Create new basic block to be dest for conditional jump. */ new_bb = create_basic_block (NULL, NULL, last_bb); new_bb->rbi->next = last_bb->rbi->next; last_bb->rbi->next = new_bb; prev_bb = last_bb; last_bb = new_bb; /* Update register liveness information. */ new_bb->global_live_at_start = OBSTACK_ALLOC_REG_SET (&flow_obstack); new_bb->global_live_at_end = OBSTACK_ALLOC_REG_SET (&flow_obstack); COPY_REG_SET (new_bb->global_live_at_end, prev_bb->global_live_at_end); COPY_REG_SET (new_bb->global_live_at_start, prev_bb->global_live_at_end); /* Put appropriate instructions in new bb. */ new_label = gen_label_rtx (); emit_label_before (new_label, BB_HEAD (new_bb)); BB_HEAD (new_bb) = new_label; if (GET_CODE (old_label) == LABEL_REF) { old_label = JUMP_LABEL (old_jump); new_jump = emit_jump_insn_after (gen_jump (old_label), BB_END (new_bb)); } else if (HAVE_return && GET_CODE (old_label) == RETURN) new_jump = emit_jump_insn_after (gen_return (), BB_END (new_bb)); else abort (); barrier = emit_barrier_after (new_jump); JUMP_LABEL (new_jump) = old_label; new_bb->rbi->footer = unlink_insn_chain (barrier, barrier); /* Make sure new bb is in same partition as source of conditional branch. */ new_bb->partition = cur_bb->partition; } /* Make old jump branch to new bb. */ redirect_jump (old_jump, new_label, 0); /* Remove crossing_edge as predecessor of 'dest'. */ dest = crossing_edge->dest; redirect_edge_succ (crossing_edge, new_bb); /* Make a new edge from new_bb to old dest; new edge will be a successor for new_bb and a predecessor for 'dest'. */ if (!new_bb->succ) new_edge = make_edge (new_bb, dest, 0); else new_edge = new_bb->succ; crossing_edge->crossing_edge = false; new_edge->crossing_edge = true; } } } } /* Find any unconditional branches that cross between hot and cold sections. Convert them into indirect jumps instead. */ static void fix_crossing_unconditional_branches (void) { basic_block cur_bb; rtx last_insn; rtx label; rtx label_addr; rtx indirect_jump_sequence; rtx jump_insn = NULL_RTX; rtx new_reg; rtx cur_insn; edge succ; FOR_EACH_BB (cur_bb) { last_insn = BB_END (cur_bb); succ = cur_bb->succ; /* Check to see if bb ends in a crossing (unconditional) jump. At this point, no crossing jumps should be conditional. */ if (GET_CODE (last_insn) == JUMP_INSN && succ->crossing_edge) { rtx label2, table; if (any_condjump_p (last_insn)) abort (); /* Make sure the jump is not already an indirect or table jump. */ else if (!computed_jump_p (last_insn) && !tablejump_p (last_insn, &label2, &table)) { /* We have found a "crossing" unconditional branch. Now we must convert it to an indirect jump. First create reference of label, as target for jump. */ label = JUMP_LABEL (last_insn); label_addr = gen_rtx_LABEL_REF (Pmode, label); LABEL_NUSES (label) += 1; /* Get a register to use for the indirect jump. */ new_reg = gen_reg_rtx (Pmode); /* Generate indirect the jump sequence. */ start_sequence (); emit_move_insn (new_reg, label_addr); emit_indirect_jump (new_reg); indirect_jump_sequence = get_insns (); end_sequence (); /* Make sure every instruction in the new jump sequence has its basic block set to be cur_bb. */ for (cur_insn = indirect_jump_sequence; cur_insn; cur_insn = NEXT_INSN (cur_insn)) { BLOCK_FOR_INSN (cur_insn) = cur_bb; if (GET_CODE (cur_insn) == JUMP_INSN) jump_insn = cur_insn; } /* Insert the new (indirect) jump sequence immediately before the unconditional jump, then delete the unconditional jump. */ emit_insn_before (indirect_jump_sequence, last_insn); delete_insn (last_insn); /* Make BB_END for cur_bb be the jump instruction (NOT the barrier instruction at the end of the sequence...). */ BB_END (cur_bb) = jump_insn; } } } } /* Add REG_CROSSING_JUMP note to all crossing jump insns. */ static void add_reg_crossing_jump_notes (void) { basic_block bb; edge e; FOR_EACH_BB (bb) for (e = bb->succ; e; e = e->succ_next) if (e->crossing_edge && GET_CODE (BB_END (e->src)) == JUMP_INSN) REG_NOTES (BB_END (e->src)) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP, NULL_RTX, REG_NOTES (BB_END (e->src))); } /* Basic blocks containing NOTE_INSN_UNLIKELY_EXECUTED_CODE will be put in a separate section of the .o file, to reduce paging and improve cache performance (hopefully). This can result in bits of code from the same function being widely separated in the .o file. However this is not obvious to the current bb structure. Therefore we must take care to ensure that: 1). There are no fall_thru edges that cross between sections; 2). For those architectures which have "short" conditional branches, all conditional branches that attempt to cross between sections are converted to unconditional branches; and, 3). For those architectures which have "short" unconditional branches, all unconditional branches that attempt to cross between sections are converted to indirect jumps. The code for fixing up fall_thru edges that cross between hot and cold basic blocks does so by creating new basic blocks containing unconditional branches to the appropriate label in the "other" section. The new basic block is then put in the same (hot or cold) section as the original conditional branch, and the fall_thru edge is modified to fall into the new basic block instead. By adding this level of indirection we end up with only unconditional branches crossing between hot and cold sections. Conditional branches are dealt with by adding a level of indirection. A new basic block is added in the same (hot/cold) section as the conditional branch, and the conditional branch is retargeted to the new basic block. The new basic block contains an unconditional branch to the original target of the conditional branch (in the other section). Unconditional branches are dealt with by converting them into indirect jumps. */ static void fix_edges_for_rarely_executed_code (edge *crossing_edges, int n_crossing_edges) { /* Make sure the source of any crossing edge ends in a jump and the destination of any crossing edge has a label. */ add_labels_and_missing_jumps (crossing_edges, n_crossing_edges); /* Convert all crossing fall_thru edges to non-crossing fall thrus to unconditional jumps (that jump to the original fall thru dest). */ fix_up_fall_thru_edges (); /* If the architecture does not have conditional branches that can span all of memory, convert crossing conditional branches into crossing unconditional branches. */ if (!HAS_LONG_COND_BRANCH) fix_crossing_conditional_branches (); /* If the architecture does not have unconditional branches that can span all of memory, convert crossing unconditional branches into indirect jumps. Since adding an indirect jump also adds a new register usage, update the register usage information as well. */ if (!HAS_LONG_UNCOND_BRANCH) { fix_crossing_unconditional_branches (); reg_scan (get_insns(), max_reg_num (), 1); } add_reg_crossing_jump_notes (); } /* Reorder basic blocks. The main entry point to this file. */ void reorder_basic_blocks (void) { int n_traces; int i; struct trace *traces; if (n_basic_blocks <= 1) return; if (targetm.cannot_modify_jumps_p ()) return; timevar_push (TV_REORDER_BLOCKS); cfg_layout_initialize (); set_edge_can_fallthru_flag (); mark_dfs_back_edges (); /* We are estimating the length of uncond jump insn only once since the code for getting the insn length always returns the minimal length now. */ if (uncond_jump_length == 0) uncond_jump_length = get_uncond_jump_length (); /* We need to know some information for each basic block. */ array_size = GET_ARRAY_SIZE (last_basic_block); bbd = xmalloc (array_size * sizeof (bbro_basic_block_data)); for (i = 0; i < array_size; i++) { bbd[i].start_of_trace = -1; bbd[i].end_of_trace = -1; bbd[i].heap = NULL; bbd[i].node = NULL; } traces = xmalloc (n_basic_blocks * sizeof (struct trace)); n_traces = 0; find_traces (&n_traces, traces); connect_traces (n_traces, traces); FREE (traces); FREE (bbd); if (dump_file) dump_flow_info (dump_file); if (flag_reorder_blocks_and_partition) add_unlikely_executed_notes (); cfg_layout_finalize (); timevar_pop (TV_REORDER_BLOCKS); } /* This function is the main 'entrance' for the optimization that partitions hot and cold basic blocks into separate sections of the .o file (to improve performance and cache locality). Ideally it would be called after all optimizations that rearrange the CFG have been called. However part of this optimization may introduce new register usage, so it must be called before register allocation has occurred. This means that this optimization is actually called well before the optimization that reorders basic blocks (see function above). This optimization checks the feedback information to determine which basic blocks are hot/cold and adds NOTE_INSN_UNLIKELY_EXECUTED_CODE to non-hot basic blocks. The presence or absence of this note is later used for writing out sections in the .o file. This optimization must also modify the CFG to make sure there are no fallthru edges between hot & cold blocks, as those blocks will not necessarily be contiguous in the .o (or assembly) file; and in those cases where the architecture requires it, conditional and unconditional branches that cross between sections are converted into unconditional or indirect jumps, depending on what is appropriate. */ void partition_hot_cold_basic_blocks (void) { basic_block cur_bb; edge *crossing_edges; int n_crossing_edges; int max_edges = 2 * last_basic_block; if (n_basic_blocks <= 1) return; crossing_edges = xcalloc (max_edges, sizeof (edge)); cfg_layout_initialize (); FOR_EACH_BB (cur_bb) if (cur_bb->index >= 0 && cur_bb->next_bb->index >= 0) cur_bb->rbi->next = cur_bb->next_bb; find_rarely_executed_basic_blocks_and_crossing_edges (crossing_edges, &n_crossing_edges, &max_edges); if (n_crossing_edges > 0) fix_edges_for_rarely_executed_code (crossing_edges, n_crossing_edges); free (crossing_edges); cfg_layout_finalize(); } /* Functions to support general ended bitmaps. Copyright (C) 1997, 1998, 1999, 2000, 2001, 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Obstack to allocate bitmap elements from. */ static struct obstack bitmap_obstack; static int bitmap_obstack_init = FALSE; #ifndef INLINE #ifndef __GNUC__ #define INLINE #else #define INLINE __inline__ #endif #endif /* Global data */ bitmap_element bitmap_zero_bits; /* An element of all zero bits. */ static bitmap_element *bitmap_free; /* Freelist of bitmap elements. */ static GTY((deletable)) bitmap_element *bitmap_ggc_free; static void bitmap_elem_to_freelist (bitmap, bitmap_element *); static void bitmap_element_free (bitmap, bitmap_element *); static bitmap_element *bitmap_element_allocate (bitmap); static int bitmap_element_zerop (bitmap_element *); static void bitmap_element_link (bitmap, bitmap_element *); static bitmap_element *bitmap_find_bit (bitmap, unsigned int); /* Add ELEM to the appropriate freelist. */ static INLINE void bitmap_elem_to_freelist (bitmap head, bitmap_element *elt) { if (head->using_obstack) { elt->next = bitmap_free; bitmap_free = elt; } else { elt->next = bitmap_ggc_free; bitmap_ggc_free = elt; } } /* Free a bitmap element. Since these are allocated off the bitmap_obstack, "free" actually means "put onto the freelist". */ static INLINE void bitmap_element_free (bitmap head, bitmap_element *elt) { bitmap_element *next = elt->next; bitmap_element *prev = elt->prev; if (prev) prev->next = next; if (next) next->prev = prev; if (head->first == elt) head->first = next; /* Since the first thing we try is to insert before current, make current the next entry in preference to the previous. */ if (head->current == elt) { head->current = next != 0 ? next : prev; if (head->current) head->indx = head->current->indx; } bitmap_elem_to_freelist (head, elt); } /* Allocate a bitmap element. The bits are cleared, but nothing else is. */ static INLINE bitmap_element * bitmap_element_allocate (bitmap head) { bitmap_element *element; if (head->using_obstack) { if (bitmap_free != 0) { element = bitmap_free; bitmap_free = element->next; } else { /* We can't use gcc_obstack_init to initialize the obstack since print-rtl.c now calls bitmap functions, and bitmap is linked into the gen* functions. */ if (!bitmap_obstack_init) { bitmap_obstack_init = TRUE; #if !defined(__GNUC__) || (__GNUC__ < 2) #define __alignof__(type) 0 #endif obstack_specify_allocation (&bitmap_obstack, OBSTACK_CHUNK_SIZE, __alignof__ (bitmap_element), obstack_chunk_alloc, obstack_chunk_free); } element = obstack_alloc (&bitmap_obstack, sizeof (bitmap_element)); } } else { if (bitmap_ggc_free != NULL) { element = bitmap_ggc_free; bitmap_ggc_free = element->next; } else element = ggc_alloc (sizeof (bitmap_element)); } memset (element->bits, 0, sizeof (element->bits)); return element; } /* Release any memory allocated by bitmaps. */ void bitmap_release_memory (void) { bitmap_free = 0; if (bitmap_obstack_init) { bitmap_obstack_init = FALSE; obstack_free (&bitmap_obstack, NULL); } } /* Return nonzero if all bits in an element are zero. */ static INLINE int bitmap_element_zerop (bitmap_element *element) { #if BITMAP_ELEMENT_WORDS == 2 return (element->bits[0] | element->bits[1]) == 0; #else int i; for (i = 0; i < BITMAP_ELEMENT_WORDS; i++) if (element->bits[i] != 0) return 0; return 1; #endif } /* Link the bitmap element into the current bitmap linked list. */ static INLINE void bitmap_element_link (bitmap head, bitmap_element *element) { unsigned int indx = element->indx; bitmap_element *ptr; /* If this is the first and only element, set it in. */ if (head->first == 0) { element->next = element->prev = 0; head->first = element; } /* If this index is less than that of the current element, it goes someplace before the current element. */ else if (indx < head->indx) { for (ptr = head->current; ptr->prev != 0 && ptr->prev->indx > indx; ptr = ptr->prev) ; if (ptr->prev) ptr->prev->next = element; else head->first = element; element->prev = ptr->prev; element->next = ptr; ptr->prev = element; } /* Otherwise, it must go someplace after the current element. */ else { for (ptr = head->current; ptr->next != 0 && ptr->next->indx < indx; ptr = ptr->next) ; if (ptr->next) ptr->next->prev = element; element->next = ptr->next; element->prev = ptr; ptr->next = element; } /* Set up so this is the first element searched. */ head->current = element; head->indx = indx; } /* Clear a bitmap by freeing the linked list. */ void bitmap_clear (bitmap head) { bitmap_element *element, *next; for (element = head->first; element != 0; element = next) { next = element->next; bitmap_elem_to_freelist (head, element); } head->first = head->current = 0; } /* Copy a bitmap to another bitmap. */ void bitmap_copy (bitmap to, bitmap from) { bitmap_element *from_ptr, *to_ptr = 0; #if BITMAP_ELEMENT_WORDS != 2 int i; #endif bitmap_clear (to); /* Copy elements in forward direction one at a time. */ for (from_ptr = from->first; from_ptr; from_ptr = from_ptr->next) { bitmap_element *to_elt = bitmap_element_allocate (to); to_elt->indx = from_ptr->indx; #if BITMAP_ELEMENT_WORDS == 2 to_elt->bits[0] = from_ptr->bits[0]; to_elt->bits[1] = from_ptr->bits[1]; #else for (i = 0; i < BITMAP_ELEMENT_WORDS; i++) to_elt->bits[i] = from_ptr->bits[i]; #endif /* Here we have a special case of bitmap_element_link, for the case where we know the links are being entered in sequence. */ if (to_ptr == 0) { to->first = to->current = to_elt; to->indx = from_ptr->indx; to_elt->next = to_elt->prev = 0; } else { to_elt->prev = to_ptr; to_elt->next = 0; to_ptr->next = to_elt; } to_ptr = to_elt; } } /* Find a bitmap element that would hold a bitmap's bit. Update the `current' field even if we can't find an element that would hold the bitmap's bit to make eventual allocation faster. */ static INLINE bitmap_element * bitmap_find_bit (bitmap head, unsigned int bit) { bitmap_element *element; unsigned int indx = bit / BITMAP_ELEMENT_ALL_BITS; if (head->current == 0 || head->indx == indx) return head->current; if (head->indx > indx) for (element = head->current; element->prev != 0 && element->indx > indx; element = element->prev) ; else for (element = head->current; element->next != 0 && element->indx < indx; element = element->next) ; /* `element' is the nearest to the one we want. If it's not the one we want, the one we want doesn't exist. */ head->current = element; head->indx = element->indx; if (element != 0 && element->indx != indx) element = 0; return element; } /* Clear a single bit in a bitmap. */ void bitmap_clear_bit (bitmap head, int bit) { bitmap_element *ptr = bitmap_find_bit (head, bit); if (ptr != 0) { unsigned bit_num = bit % BITMAP_WORD_BITS; unsigned word_num = bit / BITMAP_WORD_BITS % BITMAP_ELEMENT_WORDS; ptr->bits[word_num] &= ~ (((BITMAP_WORD) 1) << bit_num); /* If we cleared the entire word, free up the element. */ if (bitmap_element_zerop (ptr)) bitmap_element_free (head, ptr); } } /* Set a single bit in a bitmap. */ void bitmap_set_bit (bitmap head, int bit) { bitmap_element *ptr = bitmap_find_bit (head, bit); unsigned word_num = bit / BITMAP_WORD_BITS % BITMAP_ELEMENT_WORDS; unsigned bit_num = bit % BITMAP_WORD_BITS; BITMAP_WORD bit_val = ((BITMAP_WORD) 1) << bit_num; if (ptr == 0) { ptr = bitmap_element_allocate (head); ptr->indx = bit / BITMAP_ELEMENT_ALL_BITS; ptr->bits[word_num] = bit_val; bitmap_element_link (head, ptr); } else ptr->bits[word_num] |= bit_val; } /* Return whether a bit is set within a bitmap. */ int bitmap_bit_p (bitmap head, int bit) { bitmap_element *ptr; unsigned bit_num; unsigned word_num; ptr = bitmap_find_bit (head, bit); if (ptr == 0) return 0; bit_num = bit % BITMAP_WORD_BITS; word_num = bit / BITMAP_WORD_BITS % BITMAP_ELEMENT_WORDS; return (ptr->bits[word_num] >> bit_num) & 1; } /* Return the bit number of the first set bit in the bitmap, or -1 if the bitmap is empty. */ int bitmap_first_set_bit (bitmap a) { bitmap_element *ptr = a->first; BITMAP_WORD word; unsigned word_num, bit_num; if (ptr == NULL) return -1; #if BITMAP_ELEMENT_WORDS == 2 word_num = 0, word = ptr->bits[0]; if (word == 0) word_num = 1, word = ptr->bits[1]; #else for (word_num = 0; word_num < BITMAP_ELEMENT_WORDS; ++word_num) if ((word = ptr->bits[word_num]) != 0) goto word_found; abort (); word_found: #endif /* Binary search for the first set bit. */ /* ??? It'd be nice to know if ffs or ffsl was available. */ bit_num = 0; word = word & -word; #if nBITMAP_WORD_BITS > 64 #error "Fill out the table." #endif #if nBITMAP_WORD_BITS > 32 if ((word & 0xffffffff) == 0) word >>= 32, bit_num += 32; #endif if ((word & 0xffff) == 0) word >>= 16, bit_num += 16; if ((word & 0xff) == 0) word >>= 8, bit_num += 8; if (word & 0xf0) bit_num += 4; if (word & 0xcc) bit_num += 2; if (word & 0xaa) bit_num += 1; return (ptr->indx * BITMAP_ELEMENT_ALL_BITS + word_num * BITMAP_WORD_BITS + bit_num); } /* Return the bit number of the last set bit in the bitmap, or -1 if the bitmap is empty. */ int bitmap_last_set_bit (bitmap a) { bitmap_element *ptr = a->first; BITMAP_WORD word; unsigned word_num, bit_num; if (ptr == NULL) return -1; while (ptr->next != NULL) ptr = ptr->next; #if BITMAP_ELEMENT_WORDS == 2 word_num = 1, word = ptr->bits[1]; if (word == 0) word_num = 0, word = ptr->bits[0]; #else for (word_num = BITMAP_ELEMENT_WORDS; word_num-- > 0; ) if ((word = ptr->bits[word_num]) != 0) goto word_found; abort (); word_found: #endif /* Binary search for the last set bit. */ bit_num = 0; #if nBITMAP_WORD_BITS > 64 #error "Fill out the table." #endif #if nBITMAP_WORD_BITS > 32 if (word & ~(BITMAP_WORD)0xffffffff) word >>= 32, bit_num += 32; #endif if (word & 0xffff0000) word >>= 16, bit_num += 16; if (word & 0xff00) word >>= 8, bit_num += 8; if (word & 0xf0) word >>= 4, bit_num += 4; if (word & 0xc) word >>= 2, bit_num += 2; if (word & 0x2) bit_num += 1; return (ptr->indx * BITMAP_ELEMENT_ALL_BITS + word_num * BITMAP_WORD_BITS + bit_num); } /* Store in bitmap TO the result of combining bitmap FROM1 and FROM2 using a specific bit manipulation. Return true if TO changes. */ int bitmap_operation (bitmap to, bitmap from1, bitmap from2, enum bitmap_bits operation) { #define HIGHEST_INDEX (unsigned int) ~0 bitmap_element *from1_ptr = from1->first; bitmap_element *from2_ptr = from2->first; unsigned int indx1 = (from1_ptr) ? from1_ptr->indx : HIGHEST_INDEX; unsigned int indx2 = (from2_ptr) ? from2_ptr->indx : HIGHEST_INDEX; bitmap_element *to_ptr = to->first; bitmap_element *from1_tmp; bitmap_element *from2_tmp; bitmap_element *to_tmp; unsigned int indx; int changed = 0; #if BITMAP_ELEMENT_WORDS == 2 #define DOIT(OP) \ do { \ BITMAP_WORD t0, t1, f10, f11, f20, f21; \ f10 = from1_tmp->bits[0]; \ f20 = from2_tmp->bits[0]; \ t0 = f10 OP f20; \ changed |= (t0 != to_tmp->bits[0]); \ f11 = from1_tmp->bits[1]; \ f21 = from2_tmp->bits[1]; \ t1 = f11 OP f21; \ changed |= (t1 != to_tmp->bits[1]); \ to_tmp->bits[0] = t0; \ to_tmp->bits[1] = t1; \ } while (0) #else #define DOIT(OP) \ do { \ BITMAP_WORD t, f1, f2; \ int i; \ for (i = 0; i < BITMAP_ELEMENT_WORDS; ++i) \ { \ f1 = from1_tmp->bits[i]; \ f2 = from2_tmp->bits[i]; \ t = f1 OP f2; \ changed |= (t != to_tmp->bits[i]); \ to_tmp->bits[i] = t; \ } \ } while (0) #endif to->first = to->current = 0; while (from1_ptr != 0 || from2_ptr != 0) { /* Figure out whether we need to substitute zero elements for missing links. */ if (indx1 == indx2) { indx = indx1; from1_tmp = from1_ptr; from2_tmp = from2_ptr; from1_ptr = from1_ptr->next; indx1 = (from1_ptr) ? from1_ptr->indx : HIGHEST_INDEX; from2_ptr = from2_ptr->next; indx2 = (from2_ptr) ? from2_ptr->indx : HIGHEST_INDEX; } else if (indx1 < indx2) { indx = indx1; from1_tmp = from1_ptr; from2_tmp = &bitmap_zero_bits; from1_ptr = from1_ptr->next; indx1 = (from1_ptr) ? from1_ptr->indx : HIGHEST_INDEX; } else { indx = indx2; from1_tmp = &bitmap_zero_bits; from2_tmp = from2_ptr; from2_ptr = from2_ptr->next; indx2 = (from2_ptr) ? from2_ptr->indx : HIGHEST_INDEX; } /* Find the appropriate element from TO. Begin by discarding elements that we've skipped. */ while (to_ptr && to_ptr->indx < indx) { changed = 1; to_tmp = to_ptr; to_ptr = to_ptr->next; bitmap_elem_to_freelist (to, to_tmp); } if (to_ptr && to_ptr->indx == indx) { to_tmp = to_ptr; to_ptr = to_ptr->next; } else to_tmp = bitmap_element_allocate (to); /* Do the operation, and if any bits are set, link it into the linked list. */ switch (operation) { default: abort (); case BITMAP_AND: DOIT (&); break; case BITMAP_AND_COMPL: DOIT (&~); break; case BITMAP_IOR: DOIT (|); break; case BITMAP_IOR_COMPL: DOIT (|~); break; case BITMAP_XOR: DOIT (^); break; } if (! bitmap_element_zerop (to_tmp)) { to_tmp->indx = indx; bitmap_element_link (to, to_tmp); } else { bitmap_elem_to_freelist (to, to_tmp); } } /* If we have elements of TO left over, free the lot. */ if (to_ptr) { changed = 1; for (to_tmp = to_ptr; to_tmp->next ; to_tmp = to_tmp->next) continue; if (to->using_obstack) { to_tmp->next = bitmap_free; bitmap_free = to_ptr; } else { to_tmp->next = bitmap_ggc_free; bitmap_ggc_free = to_ptr; } } #undef DOIT return changed; } /* Return true if two bitmaps are identical. */ int bitmap_equal_p (bitmap a, bitmap b) { bitmap_head c; int ret; memset (&c, 0, sizeof (c)); ret = ! bitmap_operation (&c, a, b, BITMAP_XOR); bitmap_clear (&c); return ret; } /* Or into bitmap TO bitmap FROM1 and'ed with the complement of bitmap FROM2. */ void bitmap_ior_and_compl (bitmap to, bitmap from1, bitmap from2) { bitmap_head tmp; tmp.first = tmp.current = 0; tmp.using_obstack = 0; bitmap_operation (&tmp, from1, from2, BITMAP_AND_COMPL); bitmap_operation (to, to, &tmp, BITMAP_IOR); bitmap_clear (&tmp); } int bitmap_union_of_diff (bitmap dst, bitmap a, bitmap b, bitmap c) { bitmap_head tmp; int changed; tmp.first = tmp.current = 0; tmp.using_obstack = 0; bitmap_operation (&tmp, b, c, BITMAP_AND_COMPL); changed = bitmap_operation (dst, &tmp, a, BITMAP_IOR); bitmap_clear (&tmp); return changed; } /* Initialize a bitmap header. */ bitmap bitmap_initialize (bitmap head, int using_obstack) { if (head == NULL && ! using_obstack) head = ggc_alloc (sizeof (*head)); head->first = head->current = 0; head->using_obstack = using_obstack; return head; } /* Debugging function to print out the contents of a bitmap. */ void debug_bitmap_file (FILE *file, bitmap head) { bitmap_element *ptr; fprintf (file, "\nfirst = " HOST_PTR_PRINTF " current = " HOST_PTR_PRINTF " indx = %u\n", (void *) head->first, (void *) head->current, head->indx); for (ptr = head->first; ptr; ptr = ptr->next) { unsigned int i, j, col = 26; fprintf (file, "\t" HOST_PTR_PRINTF " next = " HOST_PTR_PRINTF " prev = " HOST_PTR_PRINTF " indx = %u\n\t\tbits = {", (void*) ptr, (void*) ptr->next, (void*) ptr->prev, ptr->indx); for (i = 0; i < BITMAP_ELEMENT_WORDS; i++) for (j = 0; j < BITMAP_WORD_BITS; j++) if ((ptr->bits[i] >> j) & 1) { if (col > 70) { fprintf (file, "\n\t\t\t"); col = 24; } fprintf (file, " %u", (ptr->indx * BITMAP_ELEMENT_ALL_BITS + i * BITMAP_WORD_BITS + j)); col += 4; } fprintf (file, " }\n"); } } /* Function to be called from the debugger to print the contents of a bitmap. */ void debug_bitmap (bitmap head) { debug_bitmap_file (stdout, head); } /* Function to print out the contents of a bitmap. Unlike debug_bitmap_file, it does not print anything but the bits. */ void bitmap_print (FILE *file, bitmap head, const char *prefix, const char *suffix) { const char *comma = ""; int i; fputs (prefix, file); EXECUTE_IF_SET_IN_BITMAP (head, 0, i, { fprintf (file, "%s%d", comma, i); comma = ", "; }); fputs (suffix, file); } /* Type information for bitmap.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ /* GC roots. */ const struct ggc_root_tab gt_ggc_rd_gt_bitmap_h[] = { { &bitmap_ggc_free, 1, sizeof (bitmap_ggc_free), NULL, NULL }, LAST_GGC_ROOT_TAB }; /* Expand builtin functions. Copyright (C) 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Type class enum Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* As a special exception, if you link this library with other files, some of which are compiled with GCC, to produce an executable, this library does not by itself cause the resulting executable to be covered by the GNU General Public License. This exception does not however invalidate any other reasons why the executable file might be covered by the GNU General Public License. */ #ifndef GCC_TYPECLASS_H #define GCC_TYPECLASS_H /* Values returned by __builtin_classify_type. */ enum type_class { no_type_class = -1, void_type_class, integer_type_class, char_type_class, enumeral_type_class, boolean_type_class, pointer_type_class, reference_type_class, offset_type_class, real_type_class, complex_type_class, function_type_class, method_type_class, record_type_class, union_type_class, array_type_class, string_type_class, set_type_class, file_type_class, lang_type_class }; #endif /* GCC_TYPECLASS_H */ #define CALLED_AS_BUILT_IN(NODE) \ (!strncmp (IDENTIFIER_POINTER (DECL_NAME (NODE)), "__builtin_", 10)) #ifndef PAD_VARARGS_DOWN #define PAD_VARARGS_DOWN BYTES_BIG_ENDIAN #endif /* Define the names of the builtin function types and codes. */ const char *const built_in_class_names[4] = {"NOT_BUILT_IN", "BUILT_IN_FRONTEND", "BUILT_IN_MD", "BUILT_IN_NORMAL"}; #define DEF_BUILTIN(X, N, C, T, LT, B, F, NA, AT, IM) #X, const char *const built_in_names[(int) END_BUILTINS] = { /* This file contains the definitions and documentation for the builtins used in the GNU compiler. Copyright (C) 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Before including this file, you should define a macro: DEF_BUILTIN (ENUM, NAME, CLASS, TYPE, LIBTYPE, BOTH_P, FALLBACK_P, NONANSI_P, ATTRS, IMPLICIT) This macro will be called once for each builtin function. The ENUM will be of type `enum built_in_function', and will indicate which builtin function is being processed. The NAME of the builtin function (which will always start with `__builtin_') is a string literal. The CLASS is of type `enum built_in_class' and indicates what kind of builtin is being processed. Some builtins are actually two separate functions. For example, for `strcmp' there are two builtin functions; `__builtin_strcmp' and `strcmp' itself. Both behave identically. Other builtins define only the `__builtin' variant. If BOTH_P is TRUE, then this builtin has both variants; otherwise, it is has only the first variant. TYPE indicates the type of the function. The symbols correspond to enumerals from builtin-types.def. If BOTH_P is true, then LIBTYPE is the type of the non-`__builtin_' variant. Otherwise, LIBTYPE should be ignored. If FALLBACK_P is true then, if for some reason, the compiler cannot expand the builtin function directly, it will call the corresponding library function (which does not have the `__builtin_' prefix. If NONANSI_P is true, then the non-`__builtin_' variant is not an ANSI/ISO library function, and so we should pretend it does not exist when compiling in ANSI conformant mode. ATTRs is an attribute list as defined in builtin-attrs.def that describes the attributes of this builtin function. IMPLICIT specifies condition when the builtin can be produced by compiler. For instance C90 reserves floorf function, but does not define it's meaning. When user uses floorf we may assume that the floorf has the meaning we expect, but we can't produce floorf by simplifying floor((double)float) since the runtime need not implement it. */ /* A GCC builtin (like __builtin_saveregs) is provided by the compiler, but does not correspond to a function in the standard library. */ #undef DEF_GCC_BUILTIN #define DEF_GCC_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, BT_LAST, \ false, false, false, ATTRS, true) /* A library builtin (like __builtin_strchr) is a builtin equivalent of an ANSI/ISO standard library function. In addition to the `__builtin' version, we will create an ordinary version (e.g, `strchr') as well. If we cannot compute the answer using the builtin function, we will fall back to the standard library version. */ #undef DEF_LIB_BUILTIN #define DEF_LIB_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \ true, true, false, ATTRS, true) /* Like DEF_LIB_BUILTIN, except that the function is not one that is specified by ANSI/ISO C. So, when we're being fully conformant we ignore the version of these builtins that does not begin with __builtin. */ #undef DEF_EXT_LIB_BUILTIN #define DEF_EXT_LIB_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \ true, true, true, ATTRS, false) /* Like DEF_LIB_BUILTIN, except that the function is only a part of the standard in C94 or above. */ #undef DEF_C94_BUILTIN #define DEF_C94_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \ true, true, !flag_isoc94, ATTRS, TARGET_C99_FUNCTIONS) /* Like DEF_LIB_BUILTIN, except that the function is only a part of the standard in C99 or above. */ #undef DEF_C99_BUILTIN #define DEF_C99_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \ true, true, !flag_isoc99, ATTRS, TARGET_C99_FUNCTIONS) /* Builtin that is specified by C99 and C90 reserve the name for future use. We can still recognize the builtin in C90 mode but we can't produce it implicitly. */ #undef DEF_C99_C90RES_BUILTIN #define DEF_C99_C90RES_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \ true, true, !flag_isoc99, ATTRS, TARGET_C99_FUNCTIONS) /* Define an attribute list for math functions that are normally "impure" because some of them may write into global memory for `errno'. If !flag_errno_math they are instead "const". */ #undef ATTR_MATHFN_ERRNO #define ATTR_MATHFN_ERRNO (flag_errno_math ? \ ATTR_NOTHROW_LIST : ATTR_CONST_NOTHROW_LIST) /* Define an attribute list for math functions that are normally "pure" but if flag_unsafe_math_optimizations is set they are instead "const". This distinction accounts for the fact that some math functions check the rounding mode which is akin to examining global memory. In "unsafe" mode we can be less careful. */ #undef ATTR_MATHFN_FPROUNDING #define ATTR_MATHFN_FPROUNDING (flag_unsafe_math_optimizations ? \ ATTR_CONST_NOTHROW_LIST : ATTR_PURE_NOTHROW_LIST) /* Define an attribute list for math functions that are normally "impure" because some of them may write into global memory for `errno'. If !flag_errno_math, we can possibly use "pure" or "const" depending on whether we care about FP rounding. */ #undef ATTR_MATHFN_FPROUNDING_ERRNO #define ATTR_MATHFN_FPROUNDING_ERRNO (flag_errno_math ? \ ATTR_NOTHROW_LIST : ATTR_MATHFN_FPROUNDING) /* Define an attribute list for math functions that need to mind FP rounding, but because they store into memory they are never "const" or "pure". Use of this macro is mainly for documentation and maintenance purposes. */ #undef ATTR_MATHFN_FPROUNDING_STORE #define ATTR_MATHFN_FPROUNDING_STORE ATTR_NOTHROW_LIST /* Category: math builtins. */ DEF_LIB_BUILTIN (BUILT_IN_ACOS, "acos", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_ACOSF, "acosf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ACOSH, "acosh", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ACOSHF, "acoshf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ACOSHL, "acoshl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_ACOSL, "acosl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_ASIN, "asin", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_ASINF, "asinf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ASINH, "asinh", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_ASINHF, "asinhf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_ASINHL, "asinhl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_C90RES_BUILTIN (BUILT_IN_ASINL, "asinl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_ATAN, "atan", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_LIB_BUILTIN (BUILT_IN_ATAN2, "atan2", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_ATAN2F, "atan2f", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_ATAN2L, "atan2l", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_ATANF, "atanf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_ATANH, "atanh", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ATANHF, "atanhf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ATANHL, "atanhl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_ATANL, "atanl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CBRT, "cbrt", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CBRTF, "cbrtf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CBRTL, "cbrtl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_LIB_BUILTIN (BUILT_IN_CEIL, "ceil", BT_FN_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_C90RES_BUILTIN (BUILT_IN_CEILF, "ceilf", BT_FN_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_C90RES_BUILTIN (BUILT_IN_CEILL, "ceill", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_COPYSIGN, "copysign", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_COPYSIGNF, "copysignf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_COPYSIGNL, "copysignl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_COS, "cos", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_C90RES_BUILTIN (BUILT_IN_COSF, "cosf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_LIB_BUILTIN (BUILT_IN_COSH, "cosh", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_COSHF, "coshf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_COSHL, "coshl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_COSL, "cosl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_EXT_LIB_BUILTIN (BUILT_IN_DREM, "drem", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_DREMF, "dremf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_DREML, "dreml", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ERF, "erf", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_ERFC, "erfc", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ERFCF, "erfcf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ERFCL, "erfcl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ERFF, "erff", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_ERFL, "erfl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_LIB_BUILTIN (BUILT_IN_EXP, "exp", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_EXP10, "exp10", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_EXP10F, "exp10f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_EXP10L, "exp10l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_EXP2, "exp2", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_EXP2F, "exp2f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_EXP2L, "exp2l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_EXPF, "expf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_EXPL, "expl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_EXPM1, "expm1", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_EXPM1F, "expm1f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_EXPM1L, "expm1l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_FABS, "fabs", BT_FN_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_C90RES_BUILTIN (BUILT_IN_FABSF, "fabsf", BT_FN_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_C90RES_BUILTIN (BUILT_IN_FABSL, "fabsl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_FDIM, "fdim", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_FDIMF, "fdimf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_FDIML, "fdiml", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_FLOOR, "floor", BT_FN_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_C90RES_BUILTIN (BUILT_IN_FLOORF, "floorf", BT_FN_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_C90RES_BUILTIN (BUILT_IN_FLOORL, "floorl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_FMA, "fma", BT_FN_DOUBLE_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_FMAF, "fmaf", BT_FN_FLOAT_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_FMAL, "fmal", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_FMAX, "fmax", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_FMAXF, "fmaxf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_FMAXL, "fmaxl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_FMIN, "fmin", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_FMINF, "fminf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_FMINL, "fminl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_FMOD, "fmod", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_FMODF, "fmodf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_FMODL, "fmodl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_FREXP, "frexp", BT_FN_DOUBLE_DOUBLE_INTPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_C99_C90RES_BUILTIN (BUILT_IN_FREXPF, "frexpf", BT_FN_FLOAT_FLOAT_INTPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_C99_C90RES_BUILTIN (BUILT_IN_FREXPL, "frexpl", BT_FN_LONGDOUBLE_LONGDOUBLE_INTPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_EXT_LIB_BUILTIN (BUILT_IN_GAMMA, "gamma", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_GAMMAF, "gammaf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_GAMMAL, "gammal", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_GCC_BUILTIN (BUILT_IN_HUGE_VAL, "huge_val", BT_FN_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_HUGE_VALF, "huge_valf", BT_FN_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_HUGE_VALL, "huge_vall", BT_FN_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_HYPOT, "hypot", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_HYPOTF, "hypotf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_HYPOTL, "hypotl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ILOGB, "ilogb", BT_FN_INT_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ILOGBF, "ilogbf", BT_FN_INT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ILOGBL, "ilogbl", BT_FN_INT_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_GCC_BUILTIN (BUILT_IN_INF, "inf", BT_FN_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_INFF, "inff", BT_FN_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_INFL, "infl", BT_FN_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_J0, "j0", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_J0F, "j0f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_J0L, "j0l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_J1, "j1", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_J1F, "j1f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_J1L, "j1l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_JN, "jn", BT_FN_DOUBLE_INT_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_JNF, "jnf", BT_FN_FLOAT_INT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_JNL, "jnl", BT_FN_LONGDOUBLE_INT_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_LDEXP, "ldexp", BT_FN_DOUBLE_DOUBLE_INT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_LDEXPF, "ldexpf", BT_FN_FLOAT_FLOAT_INT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_LDEXPL, "ldexpl", BT_FN_LONGDOUBLE_LONGDOUBLE_INT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LGAMMA, "lgamma", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LGAMMAF, "lgammaf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LGAMMAL, "lgammal", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LLRINT, "llrint", BT_FN_LONGLONG_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LLRINTF, "llrintf", BT_FN_LONGLONG_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LLRINTL, "llrintl", BT_FN_LONGLONG_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LLROUND, "llround", BT_FN_LONGLONG_DOUBLE, ATTR_MATHFN_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LLROUNDF, "llroundf", BT_FN_LONGLONG_FLOAT, ATTR_MATHFN_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LLROUNDL, "llroundl", BT_FN_LONGLONG_LONGDOUBLE, ATTR_MATHFN_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_LOG, "log", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_LOG10, "log10", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_LOG10F, "log10f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_LOG10L, "log10l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LOG1P, "log1p", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LOG1PF, "log1pf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LOG1PL, "log1pl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LOG2, "log2", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LOG2F, "log2f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LOG2L, "log2l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LOGB, "logb", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LOGBF, "logbf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LOGBL, "logbl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_LOGF, "logf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_LOGL, "logl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LRINT, "lrint", BT_FN_LONG_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LRINTF, "lrintf", BT_FN_LONG_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LRINTL, "lrintl", BT_FN_LONG_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LROUND, "lround", BT_FN_LONG_DOUBLE, ATTR_MATHFN_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LROUNDF, "lroundf", BT_FN_LONG_FLOAT, ATTR_MATHFN_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LROUNDL, "lroundl", BT_FN_LONG_LONGDOUBLE, ATTR_MATHFN_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_MODF, "modf", BT_FN_DOUBLE_DOUBLE_DOUBLEPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_C99_C90RES_BUILTIN (BUILT_IN_MODFF, "modff", BT_FN_FLOAT_FLOAT_FLOATPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_C99_C90RES_BUILTIN (BUILT_IN_MODFL, "modfl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLEPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_GCC_BUILTIN (BUILT_IN_NAN, "nan", BT_FN_DOUBLE_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL_1) DEF_GCC_BUILTIN (BUILT_IN_NANF, "nanf", BT_FN_FLOAT_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL_1) DEF_GCC_BUILTIN (BUILT_IN_NANL, "nanl", BT_FN_LONGDOUBLE_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL_1) DEF_GCC_BUILTIN (BUILT_IN_NANS, "nans", BT_FN_DOUBLE_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL_1) DEF_GCC_BUILTIN (BUILT_IN_NANSF, "nansf", BT_FN_FLOAT_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL_1) DEF_GCC_BUILTIN (BUILT_IN_NANSL, "nansl", BT_FN_LONGDOUBLE_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL_1) DEF_C99_BUILTIN (BUILT_IN_NEARBYINT, "nearbyint", BT_FN_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_NEARBYINTF, "nearbyintf", BT_FN_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_NEARBYINTL, "nearbyintl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_NEXTAFTER, "nextafter", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_NEXTAFTERF, "nextafterf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_NEXTAFTERL, "nextafterl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_NEXTTOWARD, "nexttoward", BT_FN_DOUBLE_DOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_NEXTTOWARDF, "nexttowardf", BT_FN_FLOAT_FLOAT_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_NEXTTOWARDL, "nexttowardl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_POW, "pow", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_POW10, "pow10", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_POW10F, "pow10f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_POW10L, "pow10l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_POWF, "powf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_POWL, "powl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_REMAINDER, "remainder", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_REMAINDERF, "remainderf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_REMAINDERL, "remainderl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_REMQUO, "remquo", BT_FN_DOUBLE_DOUBLE_DOUBLE_INTPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_C99_BUILTIN (BUILT_IN_REMQUOF, "remquof", BT_FN_FLOAT_FLOAT_FLOAT_INTPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_C99_BUILTIN (BUILT_IN_REMQUOL, "remquol", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE_INTPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_C99_BUILTIN (BUILT_IN_RINT, "rint", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_RINTF, "rintf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_RINTL, "rintl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ROUND, "round", BT_FN_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_ROUNDF, "roundf", BT_FN_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_ROUNDL, "roundl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_SCALB, "scalb", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_SCALBF, "scalbf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_SCALBL, "scalbl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_SCALBLN, "scalbln", BT_FN_DOUBLE_DOUBLE_LONG, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_SCALBLNF, "scalblnf", BT_FN_FLOAT_FLOAT_LONG, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_SCALBLNL, "scalblnl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONG, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_SCALBN, "scalbn", BT_FN_DOUBLE_DOUBLE_INT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_SCALBNF, "scalbnf", BT_FN_FLOAT_FLOAT_INT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_SCALBNL, "scalbnl", BT_FN_LONGDOUBLE_LONGDOUBLE_INT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNBIT, "signbit", BT_FN_INT_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNBITF, "signbitf", BT_FN_INT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNBITL, "signbitl", BT_FN_INT_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNIFICAND, "significand", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNIFICANDF, "significandf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNIFICANDL, "significandl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_SIN, "sin", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_EXT_LIB_BUILTIN (BUILT_IN_SINCOS, "sincos", BT_FN_VOID_DOUBLE_DOUBLEPTR_DOUBLEPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_EXT_LIB_BUILTIN (BUILT_IN_SINCOSF, "sincosf", BT_FN_VOID_FLOAT_FLOATPTR_FLOATPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_EXT_LIB_BUILTIN (BUILT_IN_SINCOSL, "sincosl", BT_FN_VOID_LONGDOUBLE_LONGDOUBLEPTR_LONGDOUBLEPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_C99_C90RES_BUILTIN (BUILT_IN_SINF, "sinf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_LIB_BUILTIN (BUILT_IN_SINH, "sinh", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_SINHF, "sinhf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_SINHL, "sinhl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_SINL, "sinl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_LIB_BUILTIN (BUILT_IN_SQRT, "sqrt", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_SQRTF, "sqrtf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_SQRTL, "sqrtl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_TAN, "tan", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_C90RES_BUILTIN (BUILT_IN_TANF, "tanf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_LIB_BUILTIN (BUILT_IN_TANH, "tanh", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_C90RES_BUILTIN (BUILT_IN_TANHF, "tanhf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_C90RES_BUILTIN (BUILT_IN_TANHL, "tanhl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_C90RES_BUILTIN (BUILT_IN_TANL, "tanl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_TGAMMA, "tgamma", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_TGAMMAF, "tgammaf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_TGAMMAL, "tgammal", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_TRUNC, "trunc", BT_FN_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_TRUNCF, "truncf", BT_FN_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_TRUNCL, "truncl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_Y0, "y0", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_Y0F, "y0f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_Y0L, "y0l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_Y1, "y1", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_Y1F, "y1f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_Y1L, "y1l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_YN, "yn", BT_FN_DOUBLE_INT_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_YNF, "ynf", BT_FN_FLOAT_INT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_YNL, "ynl", BT_FN_LONGDOUBLE_INT_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) /* Category: _Complex math builtins. */ /* The C99 clog function conflicts with C++ iostreams clog, see http://gcc.gnu.org/ml/gcc-patches/2003-09/msg00510.html */ DEF_C99_BUILTIN (BUILT_IN_CABS, "cabs", BT_FN_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CABSF, "cabsf", BT_FN_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CABSL, "cabsl", BT_FN_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CACOS, "cacos", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CACOSF, "cacosf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CACOSH, "cacosh", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CACOSHF, "cacoshf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CACOSHL, "cacoshl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CACOSL, "cacosl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CARG, "carg", BT_FN_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CARGF, "cargf", BT_FN_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CARGL, "cargl", BT_FN_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CASIN, "casin", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CASINF, "casinf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CASINH, "casinh", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CASINHF, "casinhf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CASINHL, "casinhl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CASINL, "casinl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CATAN, "catan", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CATANF, "catanf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CATANH, "catanh", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CATANHF, "catanhf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CATANHL, "catanhl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CATANL, "catanl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CCOS, "ccos", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CCOSF, "ccosf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CCOSH, "ccosh", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CCOSHF, "ccoshf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CCOSHL, "ccoshl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CCOSL, "ccosl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CEXP, "cexp", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CEXPF, "cexpf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CEXPL, "cexpl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CIMAG, "cimag", BT_FN_DOUBLE_COMPLEX_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_CIMAGF, "cimagf", BT_FN_FLOAT_COMPLEX_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_CIMAGL, "cimagl", BT_FN_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) /*DEF_C99_BUILTIN (BUILT_IN_CLOG, "clog", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING)*/ /*DEF_C99_BUILTIN (BUILT_IN_CLOGF, "clogf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING)*/ /*DEF_C99_BUILTIN (BUILT_IN_CLOGL, "clogl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)*/ DEF_C99_BUILTIN (BUILT_IN_CONJ, "conj", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_CONJF, "conjf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_CONJL, "conjl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_CPOW, "cpow", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CPOWF, "cpowf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CPOWL, "cpowl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CPROJ, "cproj", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CPROJF, "cprojf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CPROJL, "cprojl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CREAL, "creal", BT_FN_DOUBLE_COMPLEX_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_CREALF, "crealf", BT_FN_FLOAT_COMPLEX_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_CREALL, "creall", BT_FN_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_CSIN, "csin", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CSINF, "csinf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CSINH, "csinh", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CSINHF, "csinhf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CSINHL, "csinhl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CSINL, "csinl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CSQRT, "csqrt", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CSQRTF, "csqrtf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CSQRTL, "csqrtl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CTAN, "ctan", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CTANF, "ctanf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CTANH, "ctanh", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CTANHF, "ctanhf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CTANHL, "ctanhl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CTANL, "ctanl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) /* Category: string/memory builtins. */ /* bcmp, bcopy and bzero have traditionally accepted NULL pointers when the length parameter is zero, so don't apply attribute "nonnull". */ DEF_EXT_LIB_BUILTIN (BUILT_IN_BCMP, "bcmp", BT_FN_INT_CONST_PTR_CONST_PTR_SIZE, ATTR_PURE_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_BCOPY, "bcopy", BT_FN_VOID_CONST_PTR_PTR_SIZE, ATTR_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_BZERO, "bzero", BT_FN_VOID_PTR_SIZE, ATTR_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_FFS, "ffs", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_FFSL, "ffsl", BT_FN_INT_LONG, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_FFSLL, "ffsll", BT_FN_INT_LONGLONG, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_INDEX, "index", BT_FN_STRING_CONST_STRING_INT, ATTR_PURE_NOTHROW_NONNULL_1) DEF_LIB_BUILTIN (BUILT_IN_MEMCMP, "memcmp", BT_FN_INT_CONST_PTR_CONST_PTR_SIZE, ATTR_PURE_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_MEMCPY, "memcpy", BT_FN_PTR_PTR_CONST_PTR_SIZE, ATTR_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_MEMMOVE, "memmove", BT_FN_PTR_PTR_CONST_PTR_SIZE, ATTR_NOTHROW_NONNULL_1_2) DEF_EXT_LIB_BUILTIN (BUILT_IN_MEMPCPY, "mempcpy", BT_FN_PTR_PTR_CONST_PTR_SIZE, ATTR_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_MEMSET, "memset", BT_FN_PTR_PTR_INT_SIZE, ATTR_NOTHROW_NONNULL_1) DEF_EXT_LIB_BUILTIN (BUILT_IN_RINDEX, "rindex", BT_FN_STRING_CONST_STRING_INT, ATTR_PURE_NOTHROW_NONNULL_1) DEF_EXT_LIB_BUILTIN (BUILT_IN_STPCPY, "stpcpy", BT_FN_STRING_STRING_CONST_STRING, ATTR_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_STRCAT, "strcat", BT_FN_STRING_STRING_CONST_STRING, ATTR_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_STRCHR, "strchr", BT_FN_STRING_CONST_STRING_INT, ATTR_PURE_NOTHROW_NONNULL_1) DEF_LIB_BUILTIN (BUILT_IN_STRCMP, "strcmp", BT_FN_INT_CONST_STRING_CONST_STRING, ATTR_PURE_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_STRCPY, "strcpy", BT_FN_STRING_STRING_CONST_STRING, ATTR_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_STRCSPN, "strcspn", BT_FN_SIZE_CONST_STRING_CONST_STRING, ATTR_PURE_NOTHROW_NONNULL_1_2) DEF_EXT_LIB_BUILTIN (BUILT_IN_STRDUP, "strdup", BT_FN_STRING_CONST_STRING, ATTR_MALLOC_NOTHROW_NONNULL_1) DEF_LIB_BUILTIN (BUILT_IN_STRLEN, "strlen", BT_FN_SIZE_CONST_STRING, ATTR_PURE_NOTHROW_NONNULL_1) DEF_LIB_BUILTIN (BUILT_IN_STRNCAT, "strncat", BT_FN_STRING_STRING_CONST_STRING_SIZE, ATTR_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_STRNCMP, "strncmp", BT_FN_INT_CONST_STRING_CONST_STRING_SIZE, ATTR_PURE_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_STRNCPY, "strncpy", BT_FN_STRING_STRING_CONST_STRING_SIZE, ATTR_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_STRPBRK, "strpbrk", BT_FN_STRING_CONST_STRING_CONST_STRING, ATTR_PURE_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_STRRCHR, "strrchr", BT_FN_STRING_CONST_STRING_INT, ATTR_PURE_NOTHROW_NONNULL_1) DEF_LIB_BUILTIN (BUILT_IN_STRSPN, "strspn", BT_FN_SIZE_CONST_STRING_CONST_STRING, ATTR_PURE_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_STRSTR, "strstr", BT_FN_STRING_CONST_STRING_CONST_STRING, ATTR_PURE_NOTHROW_NONNULL_1_2) /* Category: stdio builtins. */ DEF_LIB_BUILTIN (BUILT_IN_FPRINTF, "fprintf", BT_FN_INT_FILEPTR_CONST_STRING_VAR, ATTR_FORMAT_PRINTF_2_3) DEF_EXT_LIB_BUILTIN (BUILT_IN_FPRINTF_UNLOCKED, "fprintf_unlocked", BT_FN_INT_FILEPTR_CONST_STRING_VAR, ATTR_FORMAT_PRINTF_2_3) DEF_LIB_BUILTIN (BUILT_IN_FPUTC, "fputc", BT_FN_INT_INT_FILEPTR, ATTR_NOTHROW_NONNULL_2) DEF_EXT_LIB_BUILTIN (BUILT_IN_FPUTC_UNLOCKED, "fputc_unlocked", BT_FN_INT_INT_FILEPTR, ATTR_NOTHROW_NONNULL_2) DEF_LIB_BUILTIN (BUILT_IN_FPUTS, "fputs", BT_FN_INT_CONST_STRING_FILEPTR, ATTR_NOTHROW_NONNULL_1_2) DEF_EXT_LIB_BUILTIN (BUILT_IN_FPUTS_UNLOCKED, "fputs_unlocked", BT_FN_INT_CONST_STRING_FILEPTR, ATTR_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_FSCANF, "fscanf", BT_FN_INT_FILEPTR_CONST_STRING_VAR, ATTR_FORMAT_SCANF_2_3) DEF_LIB_BUILTIN (BUILT_IN_FWRITE, "fwrite", BT_FN_SIZE_CONST_PTR_SIZE_SIZE_FILEPTR, ATTR_NOTHROW_NONNULL_1_4) DEF_EXT_LIB_BUILTIN (BUILT_IN_FWRITE_UNLOCKED, "fwrite_unlocked", BT_FN_SIZE_CONST_PTR_SIZE_SIZE_FILEPTR, ATTR_NOTHROW_NONNULL_1_4) DEF_LIB_BUILTIN (BUILT_IN_PRINTF, "printf", BT_FN_INT_CONST_STRING_VAR, ATTR_FORMAT_PRINTF_1_2) DEF_EXT_LIB_BUILTIN (BUILT_IN_PRINTF_UNLOCKED, "printf_unlocked", BT_FN_INT_CONST_STRING_VAR, ATTR_FORMAT_PRINTF_1_2) DEF_LIB_BUILTIN (BUILT_IN_PUTCHAR, "putchar", BT_FN_INT_INT, ATTR_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_PUTCHAR_UNLOCKED, "putchar_unlocked", BT_FN_INT_INT, ATTR_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_PUTS, "puts", BT_FN_INT_CONST_STRING, ATTR_NOTHROW_NONNULL_1) DEF_EXT_LIB_BUILTIN (BUILT_IN_PUTS_UNLOCKED, "puts_unlocked", BT_FN_INT_CONST_STRING, ATTR_NOTHROW_NONNULL_1) DEF_LIB_BUILTIN (BUILT_IN_SCANF, "scanf", BT_FN_INT_CONST_STRING_VAR, ATTR_FORMAT_SCANF_1_2) DEF_C99_BUILTIN (BUILT_IN_SNPRINTF, "snprintf", BT_FN_INT_STRING_SIZE_CONST_STRING_VAR, ATTR_FORMAT_PRINTF_3_4) DEF_LIB_BUILTIN (BUILT_IN_SPRINTF, "sprintf", BT_FN_INT_STRING_CONST_STRING_VAR, ATTR_FORMAT_PRINTF_2_3) DEF_LIB_BUILTIN (BUILT_IN_SSCANF, "sscanf", BT_FN_INT_CONST_STRING_CONST_STRING_VAR, ATTR_FORMAT_SCANF_2_3) DEF_LIB_BUILTIN (BUILT_IN_VFPRINTF, "vfprintf", BT_FN_INT_FILEPTR_CONST_STRING_VALIST_ARG, ATTR_FORMAT_PRINTF_2_0) DEF_C99_BUILTIN (BUILT_IN_VFSCANF, "vfscanf", BT_FN_INT_FILEPTR_CONST_STRING_VALIST_ARG, ATTR_FORMAT_SCANF_2_0) DEF_LIB_BUILTIN (BUILT_IN_VPRINTF, "vprintf", BT_FN_INT_CONST_STRING_VALIST_ARG, ATTR_FORMAT_PRINTF_1_0) DEF_C99_BUILTIN (BUILT_IN_VSCANF, "vscanf", BT_FN_INT_CONST_STRING_VALIST_ARG, ATTR_FORMAT_SCANF_1_0) DEF_C99_BUILTIN (BUILT_IN_VSNPRINTF, "vsnprintf", BT_FN_INT_STRING_SIZE_CONST_STRING_VALIST_ARG, ATTR_FORMAT_PRINTF_3_0) DEF_LIB_BUILTIN (BUILT_IN_VSPRINTF, "vsprintf", BT_FN_INT_STRING_CONST_STRING_VALIST_ARG, ATTR_FORMAT_PRINTF_2_0) DEF_C99_BUILTIN (BUILT_IN_VSSCANF, "vsscanf", BT_FN_INT_CONST_STRING_CONST_STRING_VALIST_ARG, ATTR_FORMAT_SCANF_2_0) /* Category: ctype builtins. */ DEF_LIB_BUILTIN (BUILT_IN_ISALNUM, "isalnum", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ISALPHA, "isalpha", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_ISASCII, "isascii", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_ISBLANK, "isblank", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ISCNTRL, "iscntrl", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ISDIGIT, "isdigit", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ISGRAPH, "isgraph", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ISLOWER, "islower", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ISPRINT, "isprint", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ISPUNCT, "ispunct", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ISSPACE, "isspace", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ISUPPER, "isupper", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ISXDIGIT, "isxdigit", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_TOASCII, "toascii", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_TOLOWER, "tolower", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_TOUPPER, "toupper", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) /* Category: wctype builtins. */ DEF_C94_BUILTIN (BUILT_IN_ISWALNUM, "iswalnum", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_ISWALPHA, "iswalpha", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_ISWBLANK, "iswblank", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_ISWCNTRL, "iswcntrl", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_ISWDIGIT, "iswdigit", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_ISWGRAPH, "iswgraph", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_ISWLOWER, "iswlower", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_ISWPRINT, "iswprint", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_ISWPUNCT, "iswpunct", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_ISWSPACE, "iswspace", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_ISWUPPER, "iswupper", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_ISWXDIGIT, "iswxdigit", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_TOWLOWER, "towlower", BT_FN_WINT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_TOWUPPER, "towupper", BT_FN_WINT_WINT, ATTR_PURE_NOTHROW_LIST) /* Category: miscellaneous builtins. */ DEF_LIB_BUILTIN (BUILT_IN_ABORT, "abort", BT_FN_VOID, ATTR_NORETURN_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ABS, "abs", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_AGGREGATE_INCOMING_ADDRESS, "aggregate_incoming_address", BT_FN_PTR_VAR, ATTR_NULL) DEF_EXT_LIB_BUILTIN (BUILT_IN_ALLOCA, "alloca", BT_FN_PTR_SIZE, ATTR_MALLOC_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_APPLY, "apply", BT_FN_PTR_PTR_FN_VOID_VAR_PTR_SIZE, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_APPLY_ARGS, "apply_args", BT_FN_PTR_VAR, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_ARGS_INFO, "args_info", BT_FN_INT_INT, ATTR_NULL) DEF_LIB_BUILTIN (BUILT_IN_CALLOC, "calloc", BT_FN_PTR_SIZE_SIZE, ATTR_MALLOC_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_CLASSIFY_TYPE, "classify_type", BT_FN_INT_VAR, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_CLZ, "clz", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_CLZL, "clzl", BT_FN_INT_LONG, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_CLZLL, "clzll", BT_FN_INT_LONGLONG, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_CONSTANT_P, "constant_p", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_CTZ, "ctz", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_CTZL, "ctzl", BT_FN_INT_LONG, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_CTZLL, "ctzll", BT_FN_INT_LONGLONG, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_DCGETTEXT, "dcgettext", BT_FN_STRING_CONST_STRING_CONST_STRING_INT, ATTR_FORMAT_ARG_2) DEF_EXT_LIB_BUILTIN (BUILT_IN_DGETTEXT, "dgettext", BT_FN_STRING_CONST_STRING_CONST_STRING, ATTR_FORMAT_ARG_2) DEF_GCC_BUILTIN (BUILT_IN_DWARF_CFA, "dwarf_cfa", BT_FN_PTR, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_DWARF_SP_COLUMN, "dwarf_sp_column", BT_FN_UNSIGNED, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_EH_RETURN, "eh_return", BT_FN_VOID_PTRMODE_PTR, ATTR_NORETURN_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_EH_RETURN_DATA_REGNO, "eh_return_data_regno", BT_FN_INT_INT, ATTR_NULL) DEF_EXT_LIB_BUILTIN (BUILT_IN_EXECL, "execl", BT_FN_INT_CONST_STRING_CONST_STRING_VAR, ATTR_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_EXECLP, "execlp", BT_FN_INT_CONST_STRING_CONST_STRING_VAR, ATTR_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_EXECLE, "execle", BT_FN_INT_CONST_STRING_CONST_STRING_VAR, ATTR_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_EXECV, "execv", BT_FN_INT_CONST_STRING_PTR_CONST_STRING, ATTR_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_EXECVP, "execvp", BT_FN_INT_CONST_STRING_PTR_CONST_STRING, ATTR_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_EXECVE, "execve", BT_FN_INT_CONST_STRING_PTR_CONST_STRING_PTR_CONST_STRING, ATTR_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_EXIT, "exit", BT_FN_VOID_INT, ATTR_NORETURN_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_EXPECT, "expect", BT_FN_LONG_LONG_LONG, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_EXTEND_POINTER, "extend_pointer", BT_FN_WORD_PTR, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_EXTRACT_RETURN_ADDR, "extract_return_addr", BT_FN_PTR_PTR, ATTR_NULL) DEF_EXT_LIB_BUILTIN (BUILT_IN_FORK, "fork", BT_FN_PID, ATTR_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_FRAME_ADDRESS, "frame_address", BT_FN_PTR_UNSIGNED, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_FROB_RETURN_ADDR, "frob_return_addr", BT_FN_PTR_PTR, ATTR_NULL) DEF_EXT_LIB_BUILTIN (BUILT_IN_GETTEXT, "gettext", BT_FN_STRING_CONST_STRING, ATTR_FORMAT_ARG_1) DEF_C99_BUILTIN (BUILT_IN_IMAXABS, "imaxabs", BT_FN_INTMAX_INTMAX, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_INIT_DWARF_REG_SIZES, "init_dwarf_reg_size_table", BT_FN_VOID_PTR, ATTR_NULL) DEF_EXT_LIB_BUILTIN (BUILT_IN_FINITE, "finite", BT_FN_INT_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_FINITEF, "finitef", BT_FN_INT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_FINITEL, "finitel", BT_FN_INT_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_C90RES_BUILTIN (BUILT_IN_ISINF, "isinf", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_ISINFF, "isinff", BT_FN_INT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_ISINFL, "isinfl", BT_FN_INT_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_C90RES_BUILTIN (BUILT_IN_ISNAN, "isnan", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_ISNANF, "isnanf", BT_FN_INT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_ISNANL, "isnanl", BT_FN_INT_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_ISGREATER, "isgreater", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_ISGREATEREQUAL, "isgreaterequal", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_ISLESS, "isless", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_ISLESSEQUAL, "islessequal", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_ISLESSGREATER, "islessgreater", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_ISUNORDERED, "isunordered", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_LABS, "labs", BT_FN_LONG_LONG, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_LLABS, "llabs", BT_FN_LONGLONG_LONGLONG, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_LONGJMP, "longjmp", BT_FN_VOID_PTR_INT, ATTR_NORETURN_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_MALLOC, "malloc", BT_FN_PTR_SIZE, ATTR_MALLOC_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_NEXT_ARG, "next_arg", BT_FN_PTR_VAR, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_PARITY, "parity", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_PARITYL, "parityl", BT_FN_INT_LONG, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_PARITYLL, "parityll", BT_FN_INT_LONGLONG, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_POPCOUNT, "popcount", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_POPCOUNTL, "popcountl", BT_FN_INT_LONG, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_POPCOUNTLL, "popcountll", BT_FN_INT_LONGLONG, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_PREFETCH, "prefetch", BT_FN_VOID_CONST_PTR_VAR, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_RETURN, "return", BT_FN_VOID_PTR, ATTR_NORETURN_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_RETURN_ADDRESS, "return_address", BT_FN_PTR_UNSIGNED, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_SAVEREGS, "saveregs", BT_FN_PTR_VAR, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_SETJMP, "setjmp", BT_FN_INT_PTR, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_STACK_ALLOC, "stack_alloc", BT_FN_VOID_PTR_SIZE, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_STACK_SAVE, "stack_save", BT_FN_PTR, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_STACK_RESTORE, "stack_restore", BT_FN_VOID_PTR, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_STDARG_START, "stdarg_start", BT_FN_VOID_VALIST_REF_VAR, ATTR_NULL) DEF_EXT_LIB_BUILTIN (BUILT_IN_STRFMON, "strfmon", BT_FN_SSIZE_STRING_SIZE_CONST_STRING_VAR, ATTR_FORMAT_STRFMON_3_4) DEF_LIB_BUILTIN (BUILT_IN_STRFTIME, "strftime", BT_FN_SIZE_STRING_SIZE_CONST_STRING_CONST_PTR, ATTR_FORMAT_STRFTIME_3_0) DEF_GCC_BUILTIN (BUILT_IN_TRAP, "trap", BT_FN_VOID, ATTR_NORETURN_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_UNWIND_INIT, "unwind_init", BT_FN_VOID, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_UPDATE_SETJMP_BUF, "update_setjmp_buf", BT_FN_VOID_PTR_INT, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_VA_COPY, "va_copy", BT_FN_VOID_VALIST_REF_VALIST_ARG, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_VA_END, "va_end", BT_FN_VOID_VALIST_REF, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_VA_START, "va_start", BT_FN_VOID_VALIST_REF_VAR, ATTR_NULL) DEF_EXT_LIB_BUILTIN (BUILT_IN__EXIT, "_exit", BT_FN_VOID_INT, ATTR_NORETURN_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN__EXIT2, "_Exit", BT_FN_VOID_INT, ATTR_NORETURN_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_INIT_TRAMPOLINE, "init_trampoline", BT_FN_VOID_PTR_PTR_PTR, ATTR_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_ADJUST_TRAMPOLINE, "adjust_trampoline", BT_FN_PTR_PTR, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_NONLOCAL_GOTO, "nonlocal_goto", BT_FN_PTR_PTR, ATTR_NORETURN_NOTHROW_LIST) /* Profiling hooks. */ DEF_GCC_BUILTIN (BUILT_IN_PROFILE_FUNC_ENTER, "profile_func_enter", BT_FN_VOID, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_PROFILE_FUNC_EXIT, "profile_func_exit", BT_FN_VOID, ATTR_NULL) }; #undef DEF_BUILTIN /* Setup an array of _DECL trees, make sure each element is initialized to NULL_TREE. */ tree built_in_decls[(int) END_BUILTINS]; /* Declarations used when constructing the builtin implicitly in the compiler. It may be NULL_TREE when this is invalid (for instance runtime is not required to implement the function call in all cases). */ tree implicit_built_in_decls[(int) END_BUILTINS]; static int get_pointer_alignment (tree, unsigned int); static const char *c_getstr (tree); static rtx c_readstr (const char *, enum machine_mode); static int target_char_cast (tree, char *); static rtx get_memory_rtx (tree); static tree build_string_literal (int, const char *); static int apply_args_size (void); static int apply_result_size (void); #if defined (HAVE_untyped_call) || defined (HAVE_untyped_return) static rtx result_vector (int, rtx); #endif static rtx expand_builtin_setjmp (tree, rtx); static void expand_builtin_update_setjmp_buf (rtx); static void expand_builtin_prefetch (tree); static rtx expand_builtin_apply_args (void); static rtx expand_builtin_apply_args_1 (void); static rtx expand_builtin_apply (rtx, rtx, rtx); static void expand_builtin_return (rtx); static enum type_class type_to_class (tree); static rtx expand_builtin_classify_type (tree); static void expand_errno_check (tree, rtx); static rtx expand_builtin_mathfn (tree, rtx, rtx); static rtx expand_builtin_mathfn_2 (tree, rtx, rtx); static rtx expand_builtin_mathfn_3 (tree, rtx, rtx); static rtx expand_builtin_args_info (tree); static rtx expand_builtin_next_arg (tree); static rtx expand_builtin_va_start (tree); static rtx expand_builtin_va_end (tree); static rtx expand_builtin_va_copy (tree); static rtx expand_builtin_memcmp (tree, tree, rtx, enum machine_mode); static rtx expand_builtin_strcmp (tree, rtx, enum machine_mode); static rtx expand_builtin_strncmp (tree, rtx, enum machine_mode); static rtx builtin_memcpy_read_str (void *, HOST_WIDE_INT, enum machine_mode); static rtx expand_builtin_strcat (tree, rtx, enum machine_mode); static rtx expand_builtin_strncat (tree, rtx, enum machine_mode); static rtx expand_builtin_strspn (tree, rtx, enum machine_mode); static rtx expand_builtin_strcspn (tree, rtx, enum machine_mode); static rtx expand_builtin_memcpy (tree, rtx, enum machine_mode); static rtx expand_builtin_mempcpy (tree, rtx, enum machine_mode, int); static rtx expand_builtin_memmove (tree, rtx, enum machine_mode); static rtx expand_builtin_bcopy (tree); static rtx expand_builtin_strcpy (tree, rtx, enum machine_mode); static rtx expand_builtin_stpcpy (tree, rtx, enum machine_mode); static rtx builtin_strncpy_read_str (void *, HOST_WIDE_INT, enum machine_mode); static rtx expand_builtin_strncpy (tree, rtx, enum machine_mode); static rtx builtin_memset_read_str (void *, HOST_WIDE_INT, enum machine_mode); static rtx builtin_memset_gen_str (void *, HOST_WIDE_INT, enum machine_mode); static rtx expand_builtin_memset (tree, rtx, enum machine_mode); static rtx expand_builtin_bzero (tree); static rtx expand_builtin_strlen (tree, rtx, enum machine_mode); static rtx expand_builtin_strstr (tree, rtx, enum machine_mode); static rtx expand_builtin_strpbrk (tree, rtx, enum machine_mode); static rtx expand_builtin_strchr (tree, rtx, enum machine_mode); static rtx expand_builtin_strrchr (tree, rtx, enum machine_mode); static rtx expand_builtin_alloca (tree, rtx); static rtx expand_builtin_unop (enum machine_mode, tree, rtx, rtx, optab); static rtx expand_builtin_frame_address (tree, tree); static rtx expand_builtin_fputs (tree, rtx, bool); static rtx expand_builtin_printf (tree, rtx, enum machine_mode, bool); static rtx expand_builtin_fprintf (tree, rtx, enum machine_mode, bool); static rtx expand_builtin_sprintf (tree, rtx, enum machine_mode); static tree stabilize_va_list (tree, int); static rtx expand_builtin_expect (tree, rtx); static tree fold_builtin_constant_p (tree); static tree fold_builtin_classify_type (tree); static tree fold_builtin_inf (tree, int); static tree fold_builtin_nan (tree, tree, int); static int validate_arglist (tree, ...); static bool integer_valued_real_p (tree); static tree fold_trunc_transparent_mathfn (tree); static bool readonly_data_expr (tree); static rtx expand_builtin_fabs (tree, rtx, rtx); static rtx expand_builtin_cabs (tree, rtx); static rtx expand_builtin_signbit (tree, rtx); static tree fold_builtin_cabs (tree, tree); static tree fold_builtin_trunc (tree); static tree fold_builtin_floor (tree); static tree fold_builtin_ceil (tree); static tree fold_builtin_round (tree); static tree fold_builtin_bitop (tree); static tree fold_builtin_memcpy (tree); static tree fold_builtin_mempcpy (tree); static tree fold_builtin_memmove (tree); static tree fold_builtin_strcpy (tree); static tree fold_builtin_strncpy (tree); static tree fold_builtin_strchr (tree, bool); static tree fold_builtin_memcmp (tree); static tree fold_builtin_strcmp (tree); static tree fold_builtin_strncmp (tree); static tree fold_builtin_signbit (tree); static tree fold_builtin_copysign (tree, tree); static tree fold_builtin_isascii (tree); static tree fold_builtin_toascii (tree); static tree fold_builtin_isdigit (tree); static tree fold_builtin_fabs (tree, tree); static tree fold_builtin_abs (tree, tree); static tree fold_builtin_unordered_cmp (tree, enum tree_code, enum tree_code); static tree simplify_builtin_memcmp (tree); static tree simplify_builtin_strcmp (tree); static tree simplify_builtin_strncmp (tree); static tree simplify_builtin_strpbrk (tree); static tree simplify_builtin_strstr (tree); static tree simplify_builtin_strchr (tree); static tree simplify_builtin_strrchr (tree); static tree simplify_builtin_strcat (tree); static tree simplify_builtin_strncat (tree); static tree simplify_builtin_strspn (tree); static tree simplify_builtin_strcspn (tree); static void simplify_builtin_next_arg (tree); static void simplify_builtin_va_start (tree); static tree simplify_builtin_sprintf (tree, int); /* Return the alignment in bits of EXP, a pointer valued expression. But don't return more than MAX_ALIGN no matter what. The alignment returned is, by default, the alignment of the thing that EXP points to. If it is not a POINTER_TYPE, 0 is returned. Otherwise, look at the expression to see if we can do better, i.e., if the expression is actually pointing at an object whose alignment is tighter. */ static int get_pointer_alignment (tree exp, unsigned int max_align) { unsigned int align, inner; if (TREE_CODE (TREE_TYPE (exp)) != POINTER_TYPE) return 0; align = TYPE_ALIGN (TREE_TYPE (TREE_TYPE (exp))); align = MIN (align, max_align); while (1) { switch (TREE_CODE (exp)) { case NOP_EXPR: case CONVERT_EXPR: case NON_LVALUE_EXPR: exp = TREE_OPERAND (exp, 0); if (TREE_CODE (TREE_TYPE (exp)) != POINTER_TYPE) return align; inner = TYPE_ALIGN (TREE_TYPE (TREE_TYPE (exp))); align = MIN (inner, max_align); break; case PLUS_EXPR: /* If sum of pointer + int, restrict our maximum alignment to that imposed by the integer. If not, we can't do any better than ALIGN. */ if (! host_integerp (TREE_OPERAND (exp, 1), 1)) return align; while (((tree_low_cst (TREE_OPERAND (exp, 1), 1)) & (max_align / BITS_PER_UNIT - 1)) != 0) max_align >>= 1; exp = TREE_OPERAND (exp, 0); break; case ADDR_EXPR: /* See what we are pointing at and look at its alignment. */ exp = TREE_OPERAND (exp, 0); if (TREE_CODE (exp) == FUNCTION_DECL) align = FUNCTION_BOUNDARY; else if (DECL_P (exp)) align = DECL_ALIGN (exp); #ifdef CONSTANT_ALIGNMENT else if (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c') align = CONSTANT_ALIGNMENT (exp, align); #endif return MIN (align, max_align); default: return align; } } } /* Compute the length of a C string. TREE_STRING_LENGTH is not the right way, because it could contain a zero byte in the middle. TREE_STRING_LENGTH is the size of the character array, not the string. ONLY_VALUE should be nonzero if the result is not going to be emitted into the instruction stream and zero if it is going to be expanded. E.g. with i++ ? "foo" : "bar", if ONLY_VALUE is nonzero, constant 3 is returned, otherwise NULL, since len = c_strlen (src, 1); if (len) expand_expr (len, ...); would not evaluate the side-effects. The value returned is of type `ssizetype'. Unfortunately, string_constant can't access the values of const char arrays with initializers, so neither can we do so here. */ tree c_strlen (tree src, int only_value) { tree offset_node; HOST_WIDE_INT offset; int max; const char *ptr; STRIP_NOPS (src); if (TREE_CODE (src) == COND_EXPR && (only_value || !TREE_SIDE_EFFECTS (TREE_OPERAND (src, 0)))) { tree len1, len2; len1 = c_strlen (TREE_OPERAND (src, 1), only_value); len2 = c_strlen (TREE_OPERAND (src, 2), only_value); if (tree_int_cst_equal (len1, len2)) return len1; } if (TREE_CODE (src) == COMPOUND_EXPR && (only_value || !TREE_SIDE_EFFECTS (TREE_OPERAND (src, 0)))) return c_strlen (TREE_OPERAND (src, 1), only_value); src = string_constant (src, &offset_node); if (src == 0) return 0; max = TREE_STRING_LENGTH (src) - 1; ptr = TREE_STRING_POINTER (src); if (offset_node && TREE_CODE (offset_node) != INTEGER_CST) { /* If the string has an internal zero byte (e.g., "foo\0bar"), we can't compute the offset to the following null if we don't know where to start searching for it. */ int i; for (i = 0; i < max; i++) if (ptr[i] == 0) return 0; /* We don't know the starting offset, but we do know that the string has no internal zero bytes. We can assume that the offset falls within the bounds of the string; otherwise, the programmer deserves what he gets. Subtract the offset from the length of the string, and return that. This would perhaps not be valid if we were dealing with named arrays in addition to literal string constants. */ return size_diffop (size_int (max), offset_node); } /* We have a known offset into the string. Start searching there for a null character if we can represent it as a single HOST_WIDE_INT. */ if (offset_node == 0) offset = 0; else if (! host_integerp (offset_node, 0)) offset = -1; else offset = tree_low_cst (offset_node, 0); /* If the offset is known to be out of bounds, warn, and call strlen at runtime. */ if (offset < 0 || offset > max) { warning ("offset outside bounds of constant string"); return 0; } /* Use strlen to search for the first zero byte. Since any strings constructed with build_string will have nulls appended, we win even if we get handed something like (char[4])"abcd". Since OFFSET is our starting index into the string, no further calculation is needed. */ return ssize_int (strlen (ptr + offset)); } /* Return a char pointer for a C string if it is a string constant or sum of string constant and integer constant. */ static const char * c_getstr (tree src) { tree offset_node; src = string_constant (src, &offset_node); if (src == 0) return 0; if (offset_node == 0) return TREE_STRING_POINTER (src); else if (!host_integerp (offset_node, 1) || compare_tree_int (offset_node, TREE_STRING_LENGTH (src) - 1) > 0) return 0; return TREE_STRING_POINTER (src) + tree_low_cst (offset_node, 1); } /* Return a CONST_INT or CONST_DOUBLE corresponding to target reading GET_MODE_BITSIZE (MODE) bits from string constant STR. */ static rtx c_readstr (const char *str, enum machine_mode mode) { HOST_WIDE_INT c[2]; HOST_WIDE_INT ch; unsigned int i, j; if (GET_MODE_CLASS (mode) != MODE_INT) abort (); c[0] = 0; c[1] = 0; ch = 1; for (i = 0; i < GET_MODE_SIZE (mode); i++) { j = i; if (WORDS_BIG_ENDIAN) j = GET_MODE_SIZE (mode) - i - 1; if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN && GET_MODE_SIZE (mode) > UNITS_PER_WORD) j = j + UNITS_PER_WORD - 2 * (j % UNITS_PER_WORD) - 1; j *= BITS_PER_UNIT; if (j > 2 * HOST_BITS_PER_WIDE_INT) abort (); if (ch) ch = (unsigned char) str[i]; c[j / HOST_BITS_PER_WIDE_INT] |= ch << (j % HOST_BITS_PER_WIDE_INT); } return immed_double_const (c[0], c[1], mode); } /* Cast a target constant CST to target CHAR and if that value fits into host char type, return zero and put that value into variable pointed by P. */ static int target_char_cast (tree cst, char *p) { unsigned HOST_WIDE_INT val, hostval; if (!host_integerp (cst, 1) || CHAR_TYPE_SIZE > HOST_BITS_PER_WIDE_INT) return 1; val = tree_low_cst (cst, 1); if (CHAR_TYPE_SIZE < HOST_BITS_PER_WIDE_INT) val &= (((unsigned HOST_WIDE_INT) 1) << CHAR_TYPE_SIZE) - 1; hostval = val; if (HOST_BITS_PER_CHAR < HOST_BITS_PER_WIDE_INT) hostval &= (((unsigned HOST_WIDE_INT) 1) << HOST_BITS_PER_CHAR) - 1; if (val != hostval) return 1; *p = hostval; return 0; } /* Similar to save_expr, but assumes that arbitrary code is not executed in between the multiple evaluations. In particular, we assume that a non-addressable local variable will not be modified. */ static tree builtin_save_expr (tree exp) { if (TREE_ADDRESSABLE (exp) == 0 && (TREE_CODE (exp) == PARM_DECL || (TREE_CODE (exp) == VAR_DECL && !TREE_STATIC (exp)))) return exp; return save_expr (exp); } /* Given TEM, a pointer to a stack frame, follow the dynamic chain COUNT times to get the address of either a higher stack frame, or a return address located within it (depending on FNDECL_CODE). */ rtx expand_builtin_return_addr (enum built_in_function fndecl_code, int count, rtx tem) { int i; /* Some machines need special handling before we can access arbitrary frames. For example, on the sparc, we must first flush all register windows to the stack. */ #ifdef SETUP_FRAME_ADDRESSES if (count > 0) SETUP_FRAME_ADDRESSES (); #endif /* On the sparc, the return address is not in the frame, it is in a register. There is no way to access it off of the current frame pointer, but it can be accessed off the previous frame pointer by reading the value from the register window save area. */ #ifdef RETURN_ADDR_IN_PREVIOUS_FRAME if (fndecl_code == BUILT_IN_RETURN_ADDRESS) count--; #endif /* Scan back COUNT frames to the specified frame. */ for (i = 0; i < count; i++) { /* Assume the dynamic chain pointer is in the word that the frame address points to, unless otherwise specified. */ #ifdef DYNAMIC_CHAIN_ADDRESS tem = DYNAMIC_CHAIN_ADDRESS (tem); #endif tem = memory_address (Pmode, tem); tem = gen_rtx_MEM (Pmode, tem); set_mem_alias_set (tem, get_frame_alias_set ()); tem = copy_to_reg (tem); } /* For __builtin_frame_address, return what we've got. */ if (fndecl_code == BUILT_IN_FRAME_ADDRESS) return tem; /* For __builtin_return_address, Get the return address from that frame. */ #ifdef RETURN_ADDR_RTX tem = RETURN_ADDR_RTX (count, tem); #else tem = memory_address (Pmode, plus_constant (tem, GET_MODE_SIZE (Pmode))); tem = gen_rtx_MEM (Pmode, tem); set_mem_alias_set (tem, get_frame_alias_set ()); #endif return tem; } /* Alias set used for setjmp buffer. */ static HOST_WIDE_INT setjmp_alias_set = -1; /* Construct the leading half of a __builtin_setjmp call. Control will return to RECEIVER_LABEL. This is used directly by sjlj exception handling code. */ void expand_builtin_setjmp_setup (rtx buf_addr, rtx receiver_label) { enum machine_mode sa_mode = STACK_SAVEAREA_MODE (SAVE_NONLOCAL); rtx stack_save; rtx mem; if (setjmp_alias_set == -1) setjmp_alias_set = new_alias_set (); buf_addr = convert_memory_address (Pmode, buf_addr); buf_addr = force_reg (Pmode, force_operand (buf_addr, NULL_RTX)); emit_queue (); /* We store the frame pointer and the address of receiver_label in the buffer and use the rest of it for the stack save area, which is machine-dependent. */ mem = gen_rtx_MEM (Pmode, buf_addr); set_mem_alias_set (mem, setjmp_alias_set); emit_move_insn (mem, targetm.builtin_setjmp_frame_value ()); mem = gen_rtx_MEM (Pmode, plus_constant (buf_addr, GET_MODE_SIZE (Pmode))), set_mem_alias_set (mem, setjmp_alias_set); emit_move_insn (validize_mem (mem), force_reg (Pmode, gen_rtx_LABEL_REF (Pmode, receiver_label))); stack_save = gen_rtx_MEM (sa_mode, plus_constant (buf_addr, 2 * GET_MODE_SIZE (Pmode))); set_mem_alias_set (stack_save, setjmp_alias_set); emit_stack_save (SAVE_NONLOCAL, &stack_save, NULL_RTX); /* If there is further processing to do, do it. */ #ifdef HAVE_builtin_setjmp_setup if (HAVE_builtin_setjmp_setup) emit_insn (gen_builtin_setjmp_setup (buf_addr)); #endif /* Tell optimize_save_area_alloca that extra work is going to need to go on during alloca. */ current_function_calls_setjmp = 1; /* Set this so all the registers get saved in our frame; we need to be able to copy the saved values for any registers from frames we unwind. */ current_function_has_nonlocal_label = 1; } /* Construct the trailing part of a __builtin_setjmp call. This is used directly by sjlj exception handling code. */ void expand_builtin_setjmp_receiver (rtx receiver_label ATTRIBUTE_UNUSED) { /* Clobber the FP when we get here, so we have to make sure it's marked as used by this function. */ emit_insn (gen_rtx_USE (VOIDmode, hard_frame_pointer_rtx)); /* Mark the static chain as clobbered here so life information doesn't get messed up for it. */ emit_insn (gen_rtx_CLOBBER (VOIDmode, static_chain_rtx)); /* Now put in the code to restore the frame pointer, and argument pointer, if needed. The code below is from expand_end_bindings in stmt.c; see detailed documentation there. */ #ifdef HAVE_nonlocal_goto if (! HAVE_nonlocal_goto) #endif emit_move_insn (virtual_stack_vars_rtx, hard_frame_pointer_rtx); #if ARG_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM if (fixed_regs[ARG_POINTER_REGNUM]) { #ifdef ELIMINABLE_REGS size_t i; static const struct elims {const int from, to;} elim_regs[] = ELIMINABLE_REGS; for (i = 0; i < ARRAY_SIZE (elim_regs); i++) if (elim_regs[i].from == ARG_POINTER_REGNUM && elim_regs[i].to == HARD_FRAME_POINTER_REGNUM) break; if (i == ARRAY_SIZE (elim_regs)) #endif { /* Now restore our arg pointer from the address at which it was saved in our stack frame. */ emit_move_insn (virtual_incoming_args_rtx, copy_to_reg (get_arg_pointer_save_area (cfun))); } } #endif #ifdef HAVE_builtin_setjmp_receiver if (HAVE_builtin_setjmp_receiver) emit_insn (gen_builtin_setjmp_receiver (receiver_label)); else #endif #ifdef HAVE_nonlocal_goto_receiver if (HAVE_nonlocal_goto_receiver) emit_insn (gen_nonlocal_goto_receiver ()); else #endif { /* Nothing */ } /* @@@ This is a kludge. Not all machine descriptions define a blockage insn, but we must not allow the code we just generated to be reordered by scheduling. Specifically, the update of the frame pointer must happen immediately, not later. So emit an ASM_INPUT to act as blockage insn. */ emit_insn (gen_rtx_ASM_INPUT (VOIDmode, "")); } /* __builtin_setjmp is passed a pointer to an array of five words (not all will be used on all machines). It operates similarly to the C library function of the same name, but is more efficient. Much of the code below (and for longjmp) is copied from the handling of non-local gotos. NOTE: This is intended for use by GNAT and the exception handling scheme in the compiler and will only work in the method used by them. */ static rtx expand_builtin_setjmp (tree arglist, rtx target) { rtx buf_addr, next_lab, cont_lab; if (!validate_arglist (arglist, POINTER_TYPE, VOID_TYPE)) return NULL_RTX; if (target == 0 || !REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER) target = gen_reg_rtx (TYPE_MODE (integer_type_node)); buf_addr = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0); next_lab = gen_label_rtx (); cont_lab = gen_label_rtx (); expand_builtin_setjmp_setup (buf_addr, next_lab); /* Set TARGET to zero and branch to the continue label. Use emit_jump to ensure that pending stack adjustments are flushed. */ emit_move_insn (target, const0_rtx); emit_jump (cont_lab); emit_label (next_lab); expand_builtin_setjmp_receiver (next_lab); /* Set TARGET to one. */ emit_move_insn (target, const1_rtx); emit_label (cont_lab); /* Tell flow about the strange goings on. Putting `next_lab' on `nonlocal_goto_handler_labels' to indicates that function calls may traverse the arc back to this label. */ current_function_has_nonlocal_label = 1; nonlocal_goto_handler_labels = gen_rtx_EXPR_LIST (VOIDmode, next_lab, nonlocal_goto_handler_labels); return target; } /* __builtin_longjmp is passed a pointer to an array of five words (not all will be used on all machines). It operates similarly to the C library function of the same name, but is more efficient. Much of the code below is copied from the handling of non-local gotos. NOTE: This is intended for use by GNAT and the exception handling scheme in the compiler and will only work in the method used by them. */ void expand_builtin_longjmp (rtx buf_addr, rtx value) { rtx fp, lab, stack, insn, last; enum machine_mode sa_mode = STACK_SAVEAREA_MODE (SAVE_NONLOCAL); if (setjmp_alias_set == -1) setjmp_alias_set = new_alias_set (); buf_addr = convert_memory_address (Pmode, buf_addr); buf_addr = force_reg (Pmode, buf_addr); /* We used to store value in static_chain_rtx, but that fails if pointers are smaller than integers. We instead require that the user must pass a second argument of 1, because that is what builtin_setjmp will return. This also makes EH slightly more efficient, since we are no longer copying around a value that we don't care about. */ if (value != const1_rtx) abort (); current_function_calls_longjmp = 1; last = get_last_insn (); #ifdef HAVE_builtin_longjmp if (HAVE_builtin_longjmp) emit_insn (gen_builtin_longjmp (buf_addr)); else #endif { fp = gen_rtx_MEM (Pmode, buf_addr); lab = gen_rtx_MEM (Pmode, plus_constant (buf_addr, GET_MODE_SIZE (Pmode))); stack = gen_rtx_MEM (sa_mode, plus_constant (buf_addr, 2 * GET_MODE_SIZE (Pmode))); set_mem_alias_set (fp, setjmp_alias_set); set_mem_alias_set (lab, setjmp_alias_set); set_mem_alias_set (stack, setjmp_alias_set); /* Pick up FP, label, and SP from the block and jump. This code is from expand_goto in stmt.c; see there for detailed comments. */ #if HAVE_nonlocal_goto if (HAVE_nonlocal_goto) /* We have to pass a value to the nonlocal_goto pattern that will get copied into the static_chain pointer, but it does not matter what that value is, because builtin_setjmp does not use it. */ emit_insn (gen_nonlocal_goto (value, lab, stack, fp)); else #endif { lab = copy_to_reg (lab); emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode)))); emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_MEM (BLKmode, hard_frame_pointer_rtx))); emit_move_insn (hard_frame_pointer_rtx, fp); emit_stack_restore (SAVE_NONLOCAL, stack, NULL_RTX); emit_insn (gen_rtx_USE (VOIDmode, hard_frame_pointer_rtx)); emit_insn (gen_rtx_USE (VOIDmode, stack_pointer_rtx)); emit_indirect_jump (lab); } } /* Search backwards and mark the jump insn as a non-local goto. Note that this precludes the use of __builtin_longjmp to a __builtin_setjmp target in the same function. However, we've already cautioned the user that these functions are for internal exception handling use only. */ for (insn = get_last_insn (); insn; insn = PREV_INSN (insn)) { if (insn == last) abort (); if (GET_CODE (insn) == JUMP_INSN) { REG_NOTES (insn) = alloc_EXPR_LIST (REG_NON_LOCAL_GOTO, const0_rtx, REG_NOTES (insn)); break; } else if (GET_CODE (insn) == CALL_INSN) break; } } /* Expand a call to __builtin_nonlocal_goto. We're passed the target label and the address of the save area. */ static rtx expand_builtin_nonlocal_goto (tree arglist) { tree t_label, t_save_area; rtx r_label, r_save_area, r_fp, r_sp, insn; if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) return NULL_RTX; t_label = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); t_save_area = TREE_VALUE (arglist); r_label = expand_expr (t_label, NULL_RTX, VOIDmode, 0); r_save_area = expand_expr (t_save_area, NULL_RTX, VOIDmode, 0); r_fp = gen_rtx_MEM (Pmode, r_save_area); r_sp = gen_rtx_MEM (STACK_SAVEAREA_MODE (SAVE_NONLOCAL), plus_constant (r_save_area, GET_MODE_SIZE (Pmode))); current_function_has_nonlocal_goto = 1; #if HAVE_nonlocal_goto /* ??? We no longer need to pass the static chain value, afaik. */ if (HAVE_nonlocal_goto) emit_insn (gen_nonlocal_goto (const0_rtx, r_label, r_sp, r_fp)); else #endif { r_label = copy_to_reg (r_label); emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode)))); emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_MEM (BLKmode, hard_frame_pointer_rtx))); /* Restore frame pointer for containing function. This sets the actual hard register used for the frame pointer to the location of the function's incoming static chain info. The non-local goto handler will then adjust it to contain the proper value and reload the argument pointer, if needed. */ emit_move_insn (hard_frame_pointer_rtx, r_fp); emit_stack_restore (SAVE_NONLOCAL, r_sp, NULL_RTX); /* USE of hard_frame_pointer_rtx added for consistency; not clear if really needed. */ emit_insn (gen_rtx_USE (VOIDmode, hard_frame_pointer_rtx)); emit_insn (gen_rtx_USE (VOIDmode, stack_pointer_rtx)); emit_indirect_jump (r_label); } /* Search backwards to the jump insn and mark it as a non-local goto. */ for (insn = get_last_insn (); insn; insn = PREV_INSN (insn)) { if (GET_CODE (insn) == JUMP_INSN) { REG_NOTES (insn) = alloc_EXPR_LIST (REG_NON_LOCAL_GOTO, const0_rtx, REG_NOTES (insn)); break; } else if (GET_CODE (insn) == CALL_INSN) break; } return const0_rtx; } /* __builtin_update_setjmp_buf is passed a pointer to an array of five words (not all will be used on all machines) that was passed to __builtin_setjmp. It updates the stack pointer in that block to correspond to the current stack pointer. */ static void expand_builtin_update_setjmp_buf (rtx buf_addr) { enum machine_mode sa_mode = Pmode; rtx stack_save; #ifdef HAVE_save_stack_nonlocal if (HAVE_save_stack_nonlocal) sa_mode = insn_data[(int) CODE_FOR_save_stack_nonlocal].operand[0].mode; #endif #ifdef STACK_SAVEAREA_MODE sa_mode = STACK_SAVEAREA_MODE (SAVE_NONLOCAL); #endif stack_save = gen_rtx_MEM (sa_mode, memory_address (sa_mode, plus_constant (buf_addr, 2 * GET_MODE_SIZE (Pmode)))); #ifdef HAVE_setjmp if (HAVE_setjmp) emit_insn (gen_setjmp ()); #endif emit_stack_save (SAVE_NONLOCAL, &stack_save, NULL_RTX); } /* Expand a call to __builtin_prefetch. For a target that does not support data prefetch, evaluate the memory address argument in case it has side effects. */ static void expand_builtin_prefetch (tree arglist) { tree arg0, arg1, arg2; rtx op0, op1, op2; if (!validate_arglist (arglist, POINTER_TYPE, 0)) return; arg0 = TREE_VALUE (arglist); /* Arguments 1 and 2 are optional; argument 1 (read/write) defaults to zero (read) and argument 2 (locality) defaults to 3 (high degree of locality). */ if (TREE_CHAIN (arglist)) { arg1 = TREE_VALUE (TREE_CHAIN (arglist)); if (TREE_CHAIN (TREE_CHAIN (arglist))) arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); else arg2 = build_int_2 (3, 0); } else { arg1 = integer_zero_node; arg2 = build_int_2 (3, 0); } /* Argument 0 is an address. */ op0 = expand_expr (arg0, NULL_RTX, Pmode, EXPAND_NORMAL); /* Argument 1 (read/write flag) must be a compile-time constant int. */ if (TREE_CODE (arg1) != INTEGER_CST) { error ("second arg to `__builtin_prefetch' must be a constant"); arg1 = integer_zero_node; } op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); /* Argument 1 must be either zero or one. */ if (INTVAL (op1) != 0 && INTVAL (op1) != 1) { warning ("invalid second arg to __builtin_prefetch; using zero"); op1 = const0_rtx; } /* Argument 2 (locality) must be a compile-time constant int. */ if (TREE_CODE (arg2) != INTEGER_CST) { error ("third arg to `__builtin_prefetch' must be a constant"); arg2 = integer_zero_node; } op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0); /* Argument 2 must be 0, 1, 2, or 3. */ if (INTVAL (op2) < 0 || INTVAL (op2) > 3) { warning ("invalid third arg to __builtin_prefetch; using zero"); op2 = const0_rtx; } #ifdef HAVE_prefetch if (HAVE_prefetch) { if ((! (*insn_data[(int) CODE_FOR_prefetch].operand[0].predicate) (op0, insn_data[(int) CODE_FOR_prefetch].operand[0].mode)) || (GET_MODE (op0) != Pmode)) { op0 = convert_memory_address (Pmode, op0); op0 = force_reg (Pmode, op0); } emit_insn (gen_prefetch (op0, op1, op2)); } else #endif op0 = protect_from_queue (op0, 0); /* Don't do anything with direct references to volatile memory, but generate code to handle other side effects. */ if (!MEM_P (op0) && side_effects_p (op0)) emit_insn (op0); } /* Get a MEM rtx for expression EXP which is the address of an operand to be used to be used in a string instruction (cmpstrsi, movstrsi, ..). */ static rtx get_memory_rtx (tree exp) { rtx addr = expand_expr (exp, NULL_RTX, ptr_mode, EXPAND_SUM); rtx mem; addr = convert_memory_address (Pmode, addr); mem = gen_rtx_MEM (BLKmode, memory_address (BLKmode, addr)); /* Get an expression we can use to find the attributes to assign to MEM. If it is an ADDR_EXPR, use the operand. Otherwise, dereference it if we can. First remove any nops. */ while ((TREE_CODE (exp) == NOP_EXPR || TREE_CODE (exp) == CONVERT_EXPR || TREE_CODE (exp) == NON_LVALUE_EXPR) && POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (exp, 0)))) exp = TREE_OPERAND (exp, 0); if (TREE_CODE (exp) == ADDR_EXPR) { exp = TREE_OPERAND (exp, 0); set_mem_attributes (mem, exp, 0); } else if (POINTER_TYPE_P (TREE_TYPE (exp))) { exp = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (exp)), exp); /* memcpy, memset and other builtin stringops can alias with anything. */ set_mem_alias_set (mem, 0); } return mem; } /* Built-in functions to perform an untyped call and return. */ /* For each register that may be used for calling a function, this gives a mode used to copy the register's value. VOIDmode indicates the register is not used for calling a function. If the machine has register windows, this gives only the outbound registers. INCOMING_REGNO gives the corresponding inbound register. */ static enum machine_mode apply_args_mode[FIRST_PSEUDO_REGISTER]; /* For each register that may be used for returning values, this gives a mode used to copy the register's value. VOIDmode indicates the register is not used for returning values. If the machine has register windows, this gives only the outbound registers. INCOMING_REGNO gives the corresponding inbound register. */ static enum machine_mode apply_result_mode[FIRST_PSEUDO_REGISTER]; /* For each register that may be used for calling a function, this gives the offset of that register into the block returned by __builtin_apply_args. 0 indicates that the register is not used for calling a function. */ static int apply_args_reg_offset[FIRST_PSEUDO_REGISTER]; /* Return the size required for the block returned by __builtin_apply_args, and initialize apply_args_mode. */ static int apply_args_size (void) { static int size = -1; int align; unsigned int regno; enum machine_mode mode; /* The values computed by this function never change. */ if (size < 0) { /* The first value is the incoming arg-pointer. */ size = GET_MODE_SIZE (Pmode); /* The second value is the structure value address unless this is passed as an "invisible" first argument. */ if (targetm.calls.struct_value_rtx (cfun ? TREE_TYPE (cfun->decl) : 0, 0)) size += GET_MODE_SIZE (Pmode); for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) if (FUNCTION_ARG_REGNO_P (regno)) { mode = reg_raw_mode[regno]; if (mode == VOIDmode) abort (); align = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT; if (size % align != 0) size = CEIL (size, align) * align; apply_args_reg_offset[regno] = size; size += GET_MODE_SIZE (mode); apply_args_mode[regno] = mode; } else { apply_args_mode[regno] = VOIDmode; apply_args_reg_offset[regno] = 0; } } return size; } /* Return the size required for the block returned by __builtin_apply, and initialize apply_result_mode. */ static int apply_result_size (void) { static int size = -1; int align, regno; enum machine_mode mode; /* The values computed by this function never change. */ if (size < 0) { size = 0; for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) if (FUNCTION_VALUE_REGNO_P (regno)) { mode = reg_raw_mode[regno]; if (mode == VOIDmode) abort (); align = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT; if (size % align != 0) size = CEIL (size, align) * align; size += GET_MODE_SIZE (mode); apply_result_mode[regno] = mode; } else apply_result_mode[regno] = VOIDmode; /* Allow targets that use untyped_call and untyped_return to override the size so that machine-specific information can be stored here. */ #ifdef APPLY_RESULT_SIZE size = APPLY_RESULT_SIZE; #endif } return size; } #if defined (HAVE_untyped_call) || defined (HAVE_untyped_return) /* Create a vector describing the result block RESULT. If SAVEP is true, the result block is used to save the values; otherwise it is used to restore the values. */ static rtx result_vector (int savep, rtx result) { int regno, size, align, nelts; enum machine_mode mode; rtx reg, mem; rtx *savevec = alloca (FIRST_PSEUDO_REGISTER * sizeof (rtx)); size = nelts = 0; for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) if ((mode = apply_result_mode[regno]) != VOIDmode) { align = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT; if (size % align != 0) size = CEIL (size, align) * align; reg = gen_rtx_REG (mode, savep ? regno : INCOMING_REGNO (regno)); mem = adjust_address (result, mode, size); savevec[nelts++] = (savep ? gen_rtx_SET (VOIDmode, mem, reg) : gen_rtx_SET (VOIDmode, reg, mem)); size += GET_MODE_SIZE (mode); } return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelts, savevec)); } #endif /* HAVE_untyped_call or HAVE_untyped_return */ /* Save the state required to perform an untyped call with the same arguments as were passed to the current function. */ static rtx expand_builtin_apply_args_1 (void) { rtx registers, tem; int size, align, regno; enum machine_mode mode; rtx struct_incoming_value = targetm.calls.struct_value_rtx (cfun ? TREE_TYPE (cfun->decl) : 0, 1); /* Create a block where the arg-pointer, structure value address, and argument registers can be saved. */ registers = assign_stack_local (BLKmode, apply_args_size (), -1); /* Walk past the arg-pointer and structure value address. */ size = GET_MODE_SIZE (Pmode); if (targetm.calls.struct_value_rtx (cfun ? TREE_TYPE (cfun->decl) : 0, 0)) size += GET_MODE_SIZE (Pmode); /* Save each register used in calling a function to the block. */ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) if ((mode = apply_args_mode[regno]) != VOIDmode) { align = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT; if (size % align != 0) size = CEIL (size, align) * align; tem = gen_rtx_REG (mode, INCOMING_REGNO (regno)); emit_move_insn (adjust_address (registers, mode, size), tem); size += GET_MODE_SIZE (mode); } /* Save the arg pointer to the block. */ tem = copy_to_reg (virtual_incoming_args_rtx); #ifdef STACK_GROWS_DOWNWARD /* We need the pointer as the caller actually passed them to us, not as we might have pretended they were passed. Make sure it's a valid operand, as emit_move_insn isn't expected to handle a PLUS. */ tem = force_operand (plus_constant (tem, current_function_pretend_args_size), NULL_RTX); #endif emit_move_insn (adjust_address (registers, Pmode, 0), tem); size = GET_MODE_SIZE (Pmode); /* Save the structure value address unless this is passed as an "invisible" first argument. */ if (struct_incoming_value) { emit_move_insn (adjust_address (registers, Pmode, size), copy_to_reg (struct_incoming_value)); size += GET_MODE_SIZE (Pmode); } /* Return the address of the block. */ return copy_addr_to_reg (XEXP (registers, 0)); } /* __builtin_apply_args returns block of memory allocated on the stack into which is stored the arg pointer, structure value address, static chain, and all the registers that might possibly be used in performing a function call. The code is moved to the start of the function so the incoming values are saved. */ static rtx expand_builtin_apply_args (void) { /* Don't do __builtin_apply_args more than once in a function. Save the result of the first call and reuse it. */ if (apply_args_value != 0) return apply_args_value; { /* When this function is called, it means that registers must be saved on entry to this function. So we migrate the call to the first insn of this function. */ rtx temp; rtx seq; start_sequence (); temp = expand_builtin_apply_args_1 (); seq = get_insns (); end_sequence (); apply_args_value = temp; /* Put the insns after the NOTE that starts the function. If this is inside a start_sequence, make the outer-level insn chain current, so the code is placed at the start of the function. */ push_topmost_sequence (); emit_insn_before (seq, NEXT_INSN (entry_of_function ())); pop_topmost_sequence (); return temp; } } /* Perform an untyped call and save the state required to perform an untyped return of whatever value was returned by the given function. */ static rtx expand_builtin_apply (rtx function, rtx arguments, rtx argsize) { int size, align, regno; enum machine_mode mode; rtx incoming_args, result, reg, dest, src, call_insn; rtx old_stack_level = 0; rtx call_fusage = 0; rtx struct_value = targetm.calls.struct_value_rtx (cfun ? TREE_TYPE (cfun->decl) : 0, 0); arguments = convert_memory_address (Pmode, arguments); /* Create a block where the return registers can be saved. */ result = assign_stack_local (BLKmode, apply_result_size (), -1); /* Fetch the arg pointer from the ARGUMENTS block. */ incoming_args = gen_reg_rtx (Pmode); emit_move_insn (incoming_args, gen_rtx_MEM (Pmode, arguments)); #ifndef STACK_GROWS_DOWNWARD incoming_args = expand_simple_binop (Pmode, MINUS, incoming_args, argsize, incoming_args, 0, OPTAB_LIB_WIDEN); #endif /* Perform postincrements before actually calling the function. */ emit_queue (); /* Push a new argument block and copy the arguments. Do not allow the (potential) memcpy call below to interfere with our stack manipulations. */ do_pending_stack_adjust (); NO_DEFER_POP; /* Save the stack with nonlocal if available. */ #ifdef HAVE_save_stack_nonlocal if (HAVE_save_stack_nonlocal) emit_stack_save (SAVE_NONLOCAL, &old_stack_level, NULL_RTX); else #endif emit_stack_save (SAVE_BLOCK, &old_stack_level, NULL_RTX); /* Allocate a block of memory onto the stack and copy the memory arguments to the outgoing arguments address. */ allocate_dynamic_stack_space (argsize, 0, BITS_PER_UNIT); dest = virtual_outgoing_args_rtx; #ifndef STACK_GROWS_DOWNWARD if (GET_CODE (argsize) == CONST_INT) dest = plus_constant (dest, -INTVAL (argsize)); else dest = gen_rtx_PLUS (Pmode, dest, negate_rtx (Pmode, argsize)); #endif dest = gen_rtx_MEM (BLKmode, dest); set_mem_align (dest, PARM_BOUNDARY); src = gen_rtx_MEM (BLKmode, incoming_args); set_mem_align (src, PARM_BOUNDARY); emit_block_move (dest, src, argsize, BLOCK_OP_NORMAL); /* Refer to the argument block. */ apply_args_size (); arguments = gen_rtx_MEM (BLKmode, arguments); set_mem_align (arguments, PARM_BOUNDARY); /* Walk past the arg-pointer and structure value address. */ size = GET_MODE_SIZE (Pmode); if (struct_value) size += GET_MODE_SIZE (Pmode); /* Restore each of the registers previously saved. Make USE insns for each of these registers for use in making the call. */ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) if ((mode = apply_args_mode[regno]) != VOIDmode) { align = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT; if (size % align != 0) size = CEIL (size, align) * align; reg = gen_rtx_REG (mode, regno); emit_move_insn (reg, adjust_address (arguments, mode, size)); use_reg (&call_fusage, reg); size += GET_MODE_SIZE (mode); } /* Restore the structure value address unless this is passed as an "invisible" first argument. */ size = GET_MODE_SIZE (Pmode); if (struct_value) { rtx value = gen_reg_rtx (Pmode); emit_move_insn (value, adjust_address (arguments, Pmode, size)); emit_move_insn (struct_value, value); if (REG_P (struct_value)) use_reg (&call_fusage, struct_value); size += GET_MODE_SIZE (Pmode); } /* All arguments and registers used for the call are set up by now! */ function = prepare_call_address (function, NULL, &call_fusage, 0, 0); /* Ensure address is valid. SYMBOL_REF is already valid, so no need, and we don't want to load it into a register as an optimization, because prepare_call_address already did it if it should be done. */ if (GET_CODE (function) != SYMBOL_REF) function = memory_address (FUNCTION_MODE, function); /* Generate the actual call instruction and save the return value. */ #ifdef HAVE_untyped_call if (HAVE_untyped_call) emit_call_insn (gen_untyped_call (gen_rtx_MEM (FUNCTION_MODE, function), result, result_vector (1, result))); else #endif #ifdef HAVE_call_value if (HAVE_call_value) { rtx valreg = 0; /* Locate the unique return register. It is not possible to express a call that sets more than one return register using call_value; use untyped_call for that. In fact, untyped_call only needs to save the return registers in the given block. */ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) if ((mode = apply_result_mode[regno]) != VOIDmode) { if (valreg) abort (); /* HAVE_untyped_call required. */ valreg = gen_rtx_REG (mode, regno); } emit_call_insn (GEN_CALL_VALUE (valreg, gen_rtx_MEM (FUNCTION_MODE, function), const0_rtx, NULL_RTX, const0_rtx)); emit_move_insn (adjust_address (result, GET_MODE (valreg), 0), valreg); } else #endif abort (); /* Find the CALL insn we just emitted, and attach the register usage information. */ call_insn = last_call_insn (); add_function_usage_to (call_insn, call_fusage); /* Restore the stack. */ #ifdef HAVE_save_stack_nonlocal if (HAVE_save_stack_nonlocal) emit_stack_restore (SAVE_NONLOCAL, old_stack_level, NULL_RTX); else #endif emit_stack_restore (SAVE_BLOCK, old_stack_level, NULL_RTX); OK_DEFER_POP; /* Return the address of the result block. */ result = copy_addr_to_reg (XEXP (result, 0)); return convert_memory_address (ptr_mode, result); } /* Perform an untyped return. */ static void expand_builtin_return (rtx result) { int size, align, regno; enum machine_mode mode; rtx reg; rtx call_fusage = 0; result = convert_memory_address (Pmode, result); apply_result_size (); result = gen_rtx_MEM (BLKmode, result); #ifdef HAVE_untyped_return if (HAVE_untyped_return) { emit_jump_insn (gen_untyped_return (result, result_vector (0, result))); emit_barrier (); return; } #endif /* Restore the return value and note that each value is used. */ size = 0; for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) if ((mode = apply_result_mode[regno]) != VOIDmode) { align = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT; if (size % align != 0) size = CEIL (size, align) * align; reg = gen_rtx_REG (mode, INCOMING_REGNO (regno)); emit_move_insn (reg, adjust_address (result, mode, size)); push_to_sequence (call_fusage); emit_insn (gen_rtx_USE (VOIDmode, reg)); call_fusage = get_insns (); end_sequence (); size += GET_MODE_SIZE (mode); } /* Put the USE insns before the return. */ emit_insn (call_fusage); /* Return whatever values was restored by jumping directly to the end of the function. */ expand_naked_return (); } /* Used by expand_builtin_classify_type and fold_builtin_classify_type. */ static enum type_class type_to_class (tree type) { switch (TREE_CODE (type)) { case VOID_TYPE: return void_type_class; case INTEGER_TYPE: return integer_type_class; case CHAR_TYPE: return char_type_class; case ENUMERAL_TYPE: return enumeral_type_class; case BOOLEAN_TYPE: return boolean_type_class; case POINTER_TYPE: return pointer_type_class; case REFERENCE_TYPE: return reference_type_class; case OFFSET_TYPE: return offset_type_class; case REAL_TYPE: return real_type_class; case COMPLEX_TYPE: return complex_type_class; case FUNCTION_TYPE: return function_type_class; case METHOD_TYPE: return method_type_class; case RECORD_TYPE: return record_type_class; case UNION_TYPE: case QUAL_UNION_TYPE: return union_type_class; case ARRAY_TYPE: return (TYPE_STRING_FLAG (type) ? string_type_class : array_type_class); case SET_TYPE: return set_type_class; case FILE_TYPE: return file_type_class; case LANG_TYPE: return lang_type_class; default: return no_type_class; } } /* Expand a call to __builtin_classify_type with arguments found in ARGLIST. */ static rtx expand_builtin_classify_type (tree arglist) { if (arglist != 0) return GEN_INT (type_to_class (TREE_TYPE (TREE_VALUE (arglist)))); return GEN_INT (no_type_class); } /* This helper macro, meant to be used in mathfn_built_in below, determines which among a set of three builtin math functions is appropriate for a given type mode. The `F' and `L' cases are automatically generated from the `double' case. */ #define CASE_MATHFN(BUILT_IN_MATHFN) \ case BUILT_IN_MATHFN: case BUILT_IN_MATHFN##F: case BUILT_IN_MATHFN##L: \ fcode = BUILT_IN_MATHFN; fcodef = BUILT_IN_MATHFN##F ; \ fcodel = BUILT_IN_MATHFN##L ; break; /* Return mathematic function equivalent to FN but operating directly on TYPE, if available. If we can't do the conversion, return zero. */ tree mathfn_built_in (tree type, enum built_in_function fn) { enum built_in_function fcode, fcodef, fcodel; switch (fn) { CASE_MATHFN (BUILT_IN_ACOS) CASE_MATHFN (BUILT_IN_ACOSH) CASE_MATHFN (BUILT_IN_ASIN) CASE_MATHFN (BUILT_IN_ASINH) CASE_MATHFN (BUILT_IN_ATAN) CASE_MATHFN (BUILT_IN_ATAN2) CASE_MATHFN (BUILT_IN_ATANH) CASE_MATHFN (BUILT_IN_CBRT) CASE_MATHFN (BUILT_IN_CEIL) CASE_MATHFN (BUILT_IN_COPYSIGN) CASE_MATHFN (BUILT_IN_COS) CASE_MATHFN (BUILT_IN_COSH) CASE_MATHFN (BUILT_IN_DREM) CASE_MATHFN (BUILT_IN_ERF) CASE_MATHFN (BUILT_IN_ERFC) CASE_MATHFN (BUILT_IN_EXP) CASE_MATHFN (BUILT_IN_EXP10) CASE_MATHFN (BUILT_IN_EXP2) CASE_MATHFN (BUILT_IN_EXPM1) CASE_MATHFN (BUILT_IN_FABS) CASE_MATHFN (BUILT_IN_FDIM) CASE_MATHFN (BUILT_IN_FLOOR) CASE_MATHFN (BUILT_IN_FMA) CASE_MATHFN (BUILT_IN_FMAX) CASE_MATHFN (BUILT_IN_FMIN) CASE_MATHFN (BUILT_IN_FMOD) CASE_MATHFN (BUILT_IN_FREXP) CASE_MATHFN (BUILT_IN_GAMMA) CASE_MATHFN (BUILT_IN_HUGE_VAL) CASE_MATHFN (BUILT_IN_HYPOT) CASE_MATHFN (BUILT_IN_ILOGB) CASE_MATHFN (BUILT_IN_INF) CASE_MATHFN (BUILT_IN_J0) CASE_MATHFN (BUILT_IN_J1) CASE_MATHFN (BUILT_IN_JN) CASE_MATHFN (BUILT_IN_LDEXP) CASE_MATHFN (BUILT_IN_LGAMMA) CASE_MATHFN (BUILT_IN_LLRINT) CASE_MATHFN (BUILT_IN_LLROUND) CASE_MATHFN (BUILT_IN_LOG) CASE_MATHFN (BUILT_IN_LOG10) CASE_MATHFN (BUILT_IN_LOG1P) CASE_MATHFN (BUILT_IN_LOG2) CASE_MATHFN (BUILT_IN_LOGB) CASE_MATHFN (BUILT_IN_LRINT) CASE_MATHFN (BUILT_IN_LROUND) CASE_MATHFN (BUILT_IN_MODF) CASE_MATHFN (BUILT_IN_NAN) CASE_MATHFN (BUILT_IN_NANS) CASE_MATHFN (BUILT_IN_NEARBYINT) CASE_MATHFN (BUILT_IN_NEXTAFTER) CASE_MATHFN (BUILT_IN_NEXTTOWARD) CASE_MATHFN (BUILT_IN_POW) CASE_MATHFN (BUILT_IN_POW10) CASE_MATHFN (BUILT_IN_REMAINDER) CASE_MATHFN (BUILT_IN_REMQUO) CASE_MATHFN (BUILT_IN_RINT) CASE_MATHFN (BUILT_IN_ROUND) CASE_MATHFN (BUILT_IN_SCALB) CASE_MATHFN (BUILT_IN_SCALBLN) CASE_MATHFN (BUILT_IN_SCALBN) CASE_MATHFN (BUILT_IN_SIGNIFICAND) CASE_MATHFN (BUILT_IN_SIN) CASE_MATHFN (BUILT_IN_SINCOS) CASE_MATHFN (BUILT_IN_SINH) CASE_MATHFN (BUILT_IN_SQRT) CASE_MATHFN (BUILT_IN_TAN) CASE_MATHFN (BUILT_IN_TANH) CASE_MATHFN (BUILT_IN_TGAMMA) CASE_MATHFN (BUILT_IN_TRUNC) CASE_MATHFN (BUILT_IN_Y0) CASE_MATHFN (BUILT_IN_Y1) CASE_MATHFN (BUILT_IN_YN) #undef CASE_MATHFN default: return 0; } if (TYPE_MAIN_VARIANT (type) == double_type_node) return implicit_built_in_decls[fcode]; else if (TYPE_MAIN_VARIANT (type) == float_type_node) return implicit_built_in_decls[fcodef]; else if (TYPE_MAIN_VARIANT (type) == long_double_type_node) return implicit_built_in_decls[fcodel]; else return 0; } /* If errno must be maintained, expand the RTL to check if the result, TARGET, of a built-in function call, EXP, is NaN, and if so set errno to EDOM. */ static void expand_errno_check (tree exp, rtx target) { rtx lab = gen_label_rtx (); /* Test the result; if it is NaN, set errno=EDOM because the argument was not in the domain. */ emit_cmp_and_jump_insns (target, target, EQ, 0, GET_MODE (target), 0, lab); #ifdef TARGET_EDOM /* If this built-in doesn't throw an exception, set errno directly. */ if (TREE_NOTHROW (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))) { #ifdef GEN_ERRNO_RTX rtx errno_rtx = GEN_ERRNO_RTX; #else rtx errno_rtx = gen_rtx_MEM (word_mode, gen_rtx_SYMBOL_REF (Pmode, "errno")); #endif emit_move_insn (errno_rtx, GEN_INT (TARGET_EDOM)); emit_label (lab); return; } #endif /* We can't set errno=EDOM directly; let the library call do it. Pop the arguments right away in case the call gets deleted. */ NO_DEFER_POP; expand_call (exp, target, 0); OK_DEFER_POP; emit_label (lab); } /* Expand a call to one of the builtin math functions (sqrt, exp, or log). Return 0 if a normal call should be emitted rather than expanding the function in-line. EXP is the expression that is a call to the builtin function; if convenient, the result should be placed in TARGET. SUBTARGET may be used as the target for computing one of EXP's operands. */ static rtx expand_builtin_mathfn (tree exp, rtx target, rtx subtarget) { optab builtin_optab; rtx op0, insns, before_call; tree fndecl = get_callee_fndecl (exp); tree arglist = TREE_OPERAND (exp, 1); enum machine_mode mode; bool errno_set = false; tree arg, narg; if (!validate_arglist (arglist, REAL_TYPE, VOID_TYPE)) return 0; arg = TREE_VALUE (arglist); switch (DECL_FUNCTION_CODE (fndecl)) { case BUILT_IN_SQRT: case BUILT_IN_SQRTF: case BUILT_IN_SQRTL: errno_set = ! tree_expr_nonnegative_p (arg); builtin_optab = sqrt_optab; break; case BUILT_IN_EXP: case BUILT_IN_EXPF: case BUILT_IN_EXPL: errno_set = true; builtin_optab = exp_optab; break; case BUILT_IN_EXP10: case BUILT_IN_EXP10F: case BUILT_IN_EXP10L: case BUILT_IN_POW10: case BUILT_IN_POW10F: case BUILT_IN_POW10L: errno_set = true; builtin_optab = exp10_optab; break; case BUILT_IN_EXP2: case BUILT_IN_EXP2F: case BUILT_IN_EXP2L: errno_set = true; builtin_optab = exp2_optab; break; case BUILT_IN_EXPM1: case BUILT_IN_EXPM1F: case BUILT_IN_EXPM1L: errno_set = true; builtin_optab = expm1_optab; break; case BUILT_IN_LOGB: case BUILT_IN_LOGBF: case BUILT_IN_LOGBL: errno_set = true; builtin_optab = logb_optab; break; case BUILT_IN_ILOGB: case BUILT_IN_ILOGBF: case BUILT_IN_ILOGBL: errno_set = true; builtin_optab = ilogb_optab; break; case BUILT_IN_LOG: case BUILT_IN_LOGF: case BUILT_IN_LOGL: errno_set = true; builtin_optab = log_optab; break; case BUILT_IN_LOG10: case BUILT_IN_LOG10F: case BUILT_IN_LOG10L: errno_set = true; builtin_optab = log10_optab; break; case BUILT_IN_LOG2: case BUILT_IN_LOG2F: case BUILT_IN_LOG2L: errno_set = true; builtin_optab = log2_optab; break; case BUILT_IN_LOG1P: case BUILT_IN_LOG1PF: case BUILT_IN_LOG1PL: errno_set = true; builtin_optab = log1p_optab; break; case BUILT_IN_ASIN: case BUILT_IN_ASINF: case BUILT_IN_ASINL: builtin_optab = asin_optab; break; case BUILT_IN_ACOS: case BUILT_IN_ACOSF: case BUILT_IN_ACOSL: builtin_optab = acos_optab; break; case BUILT_IN_TAN: case BUILT_IN_TANF: case BUILT_IN_TANL: builtin_optab = tan_optab; break; case BUILT_IN_ATAN: case BUILT_IN_ATANF: case BUILT_IN_ATANL: builtin_optab = atan_optab; break; case BUILT_IN_FLOOR: case BUILT_IN_FLOORF: case BUILT_IN_FLOORL: builtin_optab = floor_optab; break; case BUILT_IN_CEIL: case BUILT_IN_CEILF: case BUILT_IN_CEILL: builtin_optab = ceil_optab; break; case BUILT_IN_TRUNC: case BUILT_IN_TRUNCF: case BUILT_IN_TRUNCL: builtin_optab = btrunc_optab; break; case BUILT_IN_ROUND: case BUILT_IN_ROUNDF: case BUILT_IN_ROUNDL: builtin_optab = round_optab; break; case BUILT_IN_NEARBYINT: case BUILT_IN_NEARBYINTF: case BUILT_IN_NEARBYINTL: builtin_optab = nearbyint_optab; break; default: abort (); } /* Make a suitable register to place result in. */ mode = TYPE_MODE (TREE_TYPE (exp)); if (! flag_errno_math || ! HONOR_NANS (mode)) errno_set = false; /* Before working hard, check whether the instruction is available. */ if (builtin_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing) { target = gen_reg_rtx (mode); /* Wrap the computation of the argument in a SAVE_EXPR, as we may need to expand the argument again. This way, we will not perform side-effects more the once. */ narg = builtin_save_expr (arg); if (narg != arg) { arglist = build_tree_list (NULL_TREE, arg); exp = build_function_call_expr (fndecl, arglist); } op0 = expand_expr (arg, subtarget, VOIDmode, 0); emit_queue (); start_sequence (); /* Compute into TARGET. Set TARGET to wherever the result comes back. */ target = expand_unop (mode, builtin_optab, op0, target, 0); if (target != 0) { if (errno_set) expand_errno_check (exp, target); /* Output the entire sequence. */ insns = get_insns (); end_sequence (); emit_insn (insns); return target; } /* If we were unable to expand via the builtin, stop the sequence (without outputting the insns) and call to the library function with the stabilized argument list. */ end_sequence (); } before_call = get_last_insn (); target = expand_call (exp, target, target == const0_rtx); /* If this is a sqrt operation and we don't care about errno, try to attach a REG_EQUAL note with a SQRT rtx to the emitted libcall. This allows the semantics of the libcall to be visible to the RTL optimizers. */ if (builtin_optab == sqrt_optab && !errno_set) { /* Search backwards through the insns emitted by expand_call looking for the instruction with the REG_RETVAL note. */ rtx last = get_last_insn (); while (last != before_call) { if (find_reg_note (last, REG_RETVAL, NULL)) { rtx note = find_reg_note (last, REG_EQUAL, NULL); /* Check that the REQ_EQUAL note is an EXPR_LIST with two elements, i.e. symbol_ref(sqrt) and the operand. */ if (note && GET_CODE (note) == EXPR_LIST && GET_CODE (XEXP (note, 0)) == EXPR_LIST && XEXP (XEXP (note, 0), 1) != NULL_RTX && XEXP (XEXP (XEXP (note, 0), 1), 1) == NULL_RTX) { rtx operand = XEXP (XEXP (XEXP (note, 0), 1), 0); /* Check operand is a register with expected mode. */ if (operand && REG_P (operand) && GET_MODE (operand) == mode) { /* Replace the REG_EQUAL note with a SQRT rtx. */ rtx equiv = gen_rtx_SQRT (mode, operand); set_unique_reg_note (last, REG_EQUAL, equiv); } } break; } last = PREV_INSN (last); } } return target; } /* Expand a call to the builtin binary math functions (pow and atan2). Return 0 if a normal call should be emitted rather than expanding the function in-line. EXP is the expression that is a call to the builtin function; if convenient, the result should be placed in TARGET. SUBTARGET may be used as the target for computing one of EXP's operands. */ static rtx expand_builtin_mathfn_2 (tree exp, rtx target, rtx subtarget) { optab builtin_optab; rtx op0, op1, insns; tree fndecl = get_callee_fndecl (exp); tree arglist = TREE_OPERAND (exp, 1); tree arg0, arg1, temp, narg; enum machine_mode mode; bool errno_set = true; bool stable = true; if (!validate_arglist (arglist, REAL_TYPE, REAL_TYPE, VOID_TYPE)) return 0; arg0 = TREE_VALUE (arglist); arg1 = TREE_VALUE (TREE_CHAIN (arglist)); switch (DECL_FUNCTION_CODE (fndecl)) { case BUILT_IN_POW: case BUILT_IN_POWF: case BUILT_IN_POWL: builtin_optab = pow_optab; break; case BUILT_IN_ATAN2: case BUILT_IN_ATAN2F: case BUILT_IN_ATAN2L: builtin_optab = atan2_optab; break; case BUILT_IN_FMOD: case BUILT_IN_FMODF: case BUILT_IN_FMODL: builtin_optab = fmod_optab; break; case BUILT_IN_DREM: case BUILT_IN_DREMF: case BUILT_IN_DREML: builtin_optab = drem_optab; break; default: abort (); } /* Make a suitable register to place result in. */ mode = TYPE_MODE (TREE_TYPE (exp)); /* Before working hard, check whether the instruction is available. */ if (builtin_optab->handlers[(int) mode].insn_code == CODE_FOR_nothing) return 0; target = gen_reg_rtx (mode); if (! flag_errno_math || ! HONOR_NANS (mode)) errno_set = false; /* Always stabilize the argument list. */ narg = builtin_save_expr (arg1); if (narg != arg1) { temp = build_tree_list (NULL_TREE, narg); stable = false; } else temp = TREE_CHAIN (arglist); narg = builtin_save_expr (arg0); if (narg != arg0) { arglist = tree_cons (NULL_TREE, narg, temp); stable = false; } else if (! stable) arglist = tree_cons (NULL_TREE, arg0, temp); if (! stable) exp = build_function_call_expr (fndecl, arglist); op0 = expand_expr (arg0, subtarget, VOIDmode, 0); op1 = expand_expr (arg1, 0, VOIDmode, 0); emit_queue (); start_sequence (); /* Compute into TARGET. Set TARGET to wherever the result comes back. */ target = expand_binop (mode, builtin_optab, op0, op1, target, 0, OPTAB_DIRECT); /* If we were unable to expand via the builtin, stop the sequence (without outputting the insns) and call to the library function with the stabilized argument list. */ if (target == 0) { end_sequence (); return expand_call (exp, target, target == const0_rtx); } if (errno_set) expand_errno_check (exp, target); /* Output the entire sequence. */ insns = get_insns (); end_sequence (); emit_insn (insns); return target; } /* Expand a call to the builtin sin and cos math functions. Return 0 if a normal call should be emitted rather than expanding the function in-line. EXP is the expression that is a call to the builtin function; if convenient, the result should be placed in TARGET. SUBTARGET may be used as the target for computing one of EXP's operands. */ static rtx expand_builtin_mathfn_3 (tree exp, rtx target, rtx subtarget) { optab builtin_optab; rtx op0, insns, before_call; tree fndecl = get_callee_fndecl (exp); tree arglist = TREE_OPERAND (exp, 1); enum machine_mode mode; bool errno_set = false; tree arg, narg; if (!validate_arglist (arglist, REAL_TYPE, VOID_TYPE)) return 0; arg = TREE_VALUE (arglist); switch (DECL_FUNCTION_CODE (fndecl)) { case BUILT_IN_SIN: case BUILT_IN_SINF: case BUILT_IN_SINL: case BUILT_IN_COS: case BUILT_IN_COSF: case BUILT_IN_COSL: builtin_optab = sincos_optab; break; default: abort (); } /* Make a suitable register to place result in. */ mode = TYPE_MODE (TREE_TYPE (exp)); if (! flag_errno_math || ! HONOR_NANS (mode)) errno_set = false; /* Check if sincos insn is available, otherwise fallback to sin or cos insn. */ if (builtin_optab->handlers[(int) mode].insn_code == CODE_FOR_nothing) { switch (DECL_FUNCTION_CODE (fndecl)) { case BUILT_IN_SIN: case BUILT_IN_SINF: case BUILT_IN_SINL: builtin_optab = sin_optab; break; case BUILT_IN_COS: case BUILT_IN_COSF: case BUILT_IN_COSL: builtin_optab = cos_optab; break; default: abort(); } } /* Before working hard, check whether the instruction is available. */ if (builtin_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing) { target = gen_reg_rtx (mode); /* Wrap the computation of the argument in a SAVE_EXPR, as we may need to expand the argument again. This way, we will not perform side-effects more the once. */ narg = save_expr (arg); if (narg != arg) { arglist = build_tree_list (NULL_TREE, arg); exp = build_function_call_expr (fndecl, arglist); } op0 = expand_expr (arg, subtarget, VOIDmode, 0); emit_queue (); start_sequence (); /* Compute into TARGET. Set TARGET to wherever the result comes back. */ if (builtin_optab == sincos_optab) { switch (DECL_FUNCTION_CODE (fndecl)) { case BUILT_IN_SIN: case BUILT_IN_SINF: case BUILT_IN_SINL: if (!expand_twoval_unop (builtin_optab, op0, 0, target, 0)) abort(); break; case BUILT_IN_COS: case BUILT_IN_COSF: case BUILT_IN_COSL: if (!expand_twoval_unop (builtin_optab, op0, target, 0, 0)) abort(); break; default: abort(); } } else { target = expand_unop (mode, builtin_optab, op0, target, 0); } if (target != 0) { if (errno_set) expand_errno_check (exp, target); /* Output the entire sequence. */ insns = get_insns (); end_sequence (); emit_insn (insns); return target; } /* If we were unable to expand via the builtin, stop the sequence (without outputting the insns) and call to the library function with the stabilized argument list. */ end_sequence (); } before_call = get_last_insn (); target = expand_call (exp, target, target == const0_rtx); return target; } /* To evaluate powi(x,n), the floating point value x raised to the constant integer exponent n, we use a hybrid algorithm that combines the "window method" with look-up tables. For an introduction to exponentiation algorithms and "addition chains", see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth, "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming", 3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998. */ /* Provide a default value for POWI_MAX_MULTS, the maximum number of multiplications to inline before calling the system library's pow function. powi(x,n) requires at worst 2*bits(n)-2 multiplications, so this default never requires calling pow, powf or powl. */ #ifndef POWI_MAX_MULTS #define POWI_MAX_MULTS (2*HOST_BITS_PER_WIDE_INT-2) #endif /* The size of the "optimal power tree" lookup table. All exponents less than this value are simply looked up in the powi_table below. This threshold is also used to size the cache of pseudo registers that hold intermediate results. */ #define POWI_TABLE_SIZE 256 /* The size, in bits of the window, used in the "window method" exponentiation algorithm. This is equivalent to a radix of (1<= POWI_TABLE_SIZE) { if (val & 1) { digit = val & ((1 << POWI_WINDOW_SIZE) - 1); result += powi_lookup_cost (digit, cache) + POWI_WINDOW_SIZE + 1; val >>= POWI_WINDOW_SIZE; } else { val >>= 1; result++; } } return result + powi_lookup_cost (val, cache); } /* Recursive subroutine of expand_powi. This function takes the array, CACHE, of already calculated exponents and an exponent N and returns an RTX that corresponds to CACHE[1]**N, as calculated in mode MODE. */ static rtx expand_powi_1 (enum machine_mode mode, unsigned HOST_WIDE_INT n, rtx *cache) { unsigned HOST_WIDE_INT digit; rtx target, result; rtx op0, op1; if (n < POWI_TABLE_SIZE) { if (cache[n]) return cache[n]; target = gen_reg_rtx (mode); cache[n] = target; op0 = expand_powi_1 (mode, n - powi_table[n], cache); op1 = expand_powi_1 (mode, powi_table[n], cache); } else if (n & 1) { target = gen_reg_rtx (mode); digit = n & ((1 << POWI_WINDOW_SIZE) - 1); op0 = expand_powi_1 (mode, n - digit, cache); op1 = expand_powi_1 (mode, digit, cache); } else { target = gen_reg_rtx (mode); op0 = expand_powi_1 (mode, n >> 1, cache); op1 = op0; } result = expand_mult (mode, op0, op1, target, 0); if (result != target) emit_move_insn (target, result); return target; } /* Expand the RTL to evaluate powi(x,n) in mode MODE. X is the floating point operand in mode MODE, and N is the exponent. This function needs to be kept in sync with powi_cost above. */ static rtx expand_powi (rtx x, enum machine_mode mode, HOST_WIDE_INT n) { unsigned HOST_WIDE_INT val; rtx cache[POWI_TABLE_SIZE]; rtx result; if (n == 0) return CONST1_RTX (mode); val = (n < 0) ? -n : n; memset (cache, 0, sizeof (cache)); cache[1] = x; result = expand_powi_1 (mode, (n < 0) ? -n : n, cache); /* If the original exponent was negative, reciprocate the result. */ if (n < 0) result = expand_binop (mode, sdiv_optab, CONST1_RTX (mode), result, NULL_RTX, 0, OPTAB_LIB_WIDEN); return result; } /* Expand a call to the pow built-in mathematical function. Return 0 if a normal call should be emitted rather than expanding the function in-line. EXP is the expression that is a call to the builtin function; if convenient, the result should be placed in TARGET. */ static rtx expand_builtin_pow (tree exp, rtx target, rtx subtarget) { tree arglist = TREE_OPERAND (exp, 1); tree arg0, arg1; if (! validate_arglist (arglist, REAL_TYPE, REAL_TYPE, VOID_TYPE)) return 0; arg0 = TREE_VALUE (arglist); arg1 = TREE_VALUE (TREE_CHAIN (arglist)); if (TREE_CODE (arg1) == REAL_CST && ! TREE_CONSTANT_OVERFLOW (arg1)) { REAL_VALUE_TYPE cint; REAL_VALUE_TYPE c; HOST_WIDE_INT n; c = TREE_REAL_CST (arg1); n = real_to_integer (&c); real_from_integer (&cint, VOIDmode, n, n < 0 ? -1 : 0, 0); if (real_identical (&c, &cint)) { /* If the exponent is -1, 0, 1 or 2, then expand_powi is exact. Otherwise, check the number of multiplications required. Note that pow never sets errno for an integer exponent. */ if ((n >= -1 && n <= 2) || (flag_unsafe_math_optimizations && ! optimize_size && powi_cost (n) <= POWI_MAX_MULTS)) { enum machine_mode mode = TYPE_MODE (TREE_TYPE (exp)); rtx op = expand_expr (arg0, subtarget, VOIDmode, 0); op = force_reg (mode, op); return expand_powi (op, mode, n); } } } if (! flag_unsafe_math_optimizations) return NULL_RTX; return expand_builtin_mathfn_2 (exp, target, subtarget); } /* Expand expression EXP which is a call to the strlen builtin. Return 0 if we failed the caller should emit a normal call, otherwise try to get the result in TARGET, if convenient. */ static rtx expand_builtin_strlen (tree arglist, rtx target, enum machine_mode target_mode) { if (!validate_arglist (arglist, POINTER_TYPE, VOID_TYPE)) return 0; else { rtx pat; tree len, src = TREE_VALUE (arglist); rtx result, src_reg, char_rtx, before_strlen; enum machine_mode insn_mode = target_mode, char_mode; enum insn_code icode = CODE_FOR_nothing; int align; /* If the length can be computed at compile-time, return it. */ len = c_strlen (src, 0); if (len) return expand_expr (len, target, target_mode, EXPAND_NORMAL); /* If the length can be computed at compile-time and is constant integer, but there are side-effects in src, evaluate src for side-effects, then return len. E.g. x = strlen (i++ ? "xfoo" + 1 : "bar"); can be optimized into: i++; x = 3; */ len = c_strlen (src, 1); if (len && TREE_CODE (len) == INTEGER_CST) { expand_expr (src, const0_rtx, VOIDmode, EXPAND_NORMAL); return expand_expr (len, target, target_mode, EXPAND_NORMAL); } align = get_pointer_alignment (src, BIGGEST_ALIGNMENT) / BITS_PER_UNIT; /* If SRC is not a pointer type, don't do this operation inline. */ if (align == 0) return 0; /* Bail out if we can't compute strlen in the right mode. */ while (insn_mode != VOIDmode) { icode = strlen_optab->handlers[(int) insn_mode].insn_code; if (icode != CODE_FOR_nothing) break; insn_mode = GET_MODE_WIDER_MODE (insn_mode); } if (insn_mode == VOIDmode) return 0; /* Make a place to write the result of the instruction. */ result = target; if (! (result != 0 && REG_P (result) && GET_MODE (result) == insn_mode && REGNO (result) >= FIRST_PSEUDO_REGISTER)) result = gen_reg_rtx (insn_mode); /* Make a place to hold the source address. We will not expand the actual source until we are sure that the expansion will not fail -- there are trees that cannot be expanded twice. */ src_reg = gen_reg_rtx (Pmode); /* Mark the beginning of the strlen sequence so we can emit the source operand later. */ before_strlen = get_last_insn (); char_rtx = const0_rtx; char_mode = insn_data[(int) icode].operand[2].mode; if (! (*insn_data[(int) icode].operand[2].predicate) (char_rtx, char_mode)) char_rtx = copy_to_mode_reg (char_mode, char_rtx); pat = GEN_FCN (icode) (result, gen_rtx_MEM (BLKmode, src_reg), char_rtx, GEN_INT (align)); if (! pat) return 0; emit_insn (pat); /* Now that we are assured of success, expand the source. */ start_sequence (); pat = memory_address (BLKmode, expand_expr (src, src_reg, ptr_mode, EXPAND_SUM)); if (pat != src_reg) emit_move_insn (src_reg, pat); pat = get_insns (); end_sequence (); if (before_strlen) emit_insn_after (pat, before_strlen); else emit_insn_before (pat, get_insns ()); /* Return the value in the proper mode for this function. */ if (GET_MODE (result) == target_mode) target = result; else if (target != 0) convert_move (target, result, 0); else target = convert_to_mode (target_mode, result, 0); return target; } } /* Expand a call to the strstr builtin. Return 0 if we failed the caller should emit a normal call, otherwise try to get the result in TARGET, if convenient (and in mode MODE if that's convenient). */ static rtx expand_builtin_strstr (tree arglist, rtx target, enum machine_mode mode) { if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) return 0; else { tree s1 = TREE_VALUE (arglist), s2 = TREE_VALUE (TREE_CHAIN (arglist)); tree fn, tmp; const char *p1, *p2; p2 = c_getstr (s2); if (p2 == NULL) return 0; p1 = c_getstr (s1); if (p1 != NULL) { const char *r = strstr (p1, p2); if (r == NULL) return const0_rtx; /* Return an offset into the constant string argument. */ tmp = fold (build2 (PLUS_EXPR, TREE_TYPE (s1), s1, fold_convert (TREE_TYPE (s1), ssize_int (r - p1)))); return expand_expr (tmp, target, mode, EXPAND_NORMAL); } if (p2[0] == '\0') return expand_expr (s1, target, mode, EXPAND_NORMAL); if (p2[1] != '\0') return 0; fn = implicit_built_in_decls[BUILT_IN_STRCHR]; if (!fn) return 0; /* New argument list transforming strstr(s1, s2) to strchr(s1, s2[0]). */ arglist = build_tree_list (NULL_TREE, build_int_2 (p2[0], 0)); arglist = tree_cons (NULL_TREE, s1, arglist); return expand_expr (build_function_call_expr (fn, arglist), target, mode, EXPAND_NORMAL); } } /* Expand a call to the strchr builtin. Return 0 if we failed the caller should emit a normal call, otherwise try to get the result in TARGET, if convenient (and in mode MODE if that's convenient). */ static rtx expand_builtin_strchr (tree arglist, rtx target, enum machine_mode mode) { if (!validate_arglist (arglist, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) return 0; else { tree s1 = TREE_VALUE (arglist), s2 = TREE_VALUE (TREE_CHAIN (arglist)); const char *p1; if (TREE_CODE (s2) != INTEGER_CST) return 0; p1 = c_getstr (s1); if (p1 != NULL) { char c; const char *r; tree tmp; if (target_char_cast (s2, &c)) return 0; r = strchr (p1, c); if (r == NULL) return const0_rtx; /* Return an offset into the constant string argument. */ tmp = fold (build2 (PLUS_EXPR, TREE_TYPE (s1), s1, fold_convert (TREE_TYPE (s1), ssize_int (r - p1)))); return expand_expr (tmp, target, mode, EXPAND_NORMAL); } /* FIXME: Should use here strchrM optab so that ports can optimize this. */ return 0; } } /* Expand a call to the strrchr builtin. Return 0 if we failed the caller should emit a normal call, otherwise try to get the result in TARGET, if convenient (and in mode MODE if that's convenient). */ static rtx expand_builtin_strrchr (tree arglist, rtx target, enum machine_mode mode) { if (!validate_arglist (arglist, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) return 0; else { tree s1 = TREE_VALUE (arglist), s2 = TREE_VALUE (TREE_CHAIN (arglist)); tree fn, tmp; const char *p1; if (TREE_CODE (s2) != INTEGER_CST) return 0; p1 = c_getstr (s1); if (p1 != NULL) { char c; const char *r; if (target_char_cast (s2, &c)) return 0; r = strrchr (p1, c); if (r == NULL) return const0_rtx; /* Return an offset into the constant string argument. */ tmp = fold (build2 (PLUS_EXPR, TREE_TYPE (s1), s1, fold_convert (TREE_TYPE (s1), ssize_int (r - p1)))); return expand_expr (tmp, target, mode, EXPAND_NORMAL); } if (! integer_zerop (s2)) return 0; fn = implicit_built_in_decls[BUILT_IN_STRCHR]; if (!fn) return 0; /* Transform strrchr(s1, '\0') to strchr(s1, '\0'). */ return expand_expr (build_function_call_expr (fn, arglist), target, mode, EXPAND_NORMAL); } } /* Expand a call to the strpbrk builtin. Return 0 if we failed the caller should emit a normal call, otherwise try to get the result in TARGET, if convenient (and in mode MODE if that's convenient). */ static rtx expand_builtin_strpbrk (tree arglist, rtx target, enum machine_mode mode) { if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) return 0; else { tree s1 = TREE_VALUE (arglist), s2 = TREE_VALUE (TREE_CHAIN (arglist)); tree fn, tmp; const char *p1, *p2; p2 = c_getstr (s2); if (p2 == NULL) return 0; p1 = c_getstr (s1); if (p1 != NULL) { const char *r = strpbrk (p1, p2); if (r == NULL) return const0_rtx; /* Return an offset into the constant string argument. */ tmp = fold (build2 (PLUS_EXPR, TREE_TYPE (s1), s1, fold_convert (TREE_TYPE (s1), ssize_int (r - p1)))); return expand_expr (tmp, target, mode, EXPAND_NORMAL); } if (p2[0] == '\0') { /* strpbrk(x, "") == NULL. Evaluate and ignore the arguments in case they had side-effects. */ expand_expr (s1, const0_rtx, VOIDmode, EXPAND_NORMAL); return const0_rtx; } if (p2[1] != '\0') return 0; /* Really call strpbrk. */ fn = implicit_built_in_decls[BUILT_IN_STRCHR]; if (!fn) return 0; /* New argument list transforming strpbrk(s1, s2) to strchr(s1, s2[0]). */ arglist = build_tree_list (NULL_TREE, build_int_2 (p2[0], 0)); arglist = tree_cons (NULL_TREE, s1, arglist); return expand_expr (build_function_call_expr (fn, arglist), target, mode, EXPAND_NORMAL); } } /* Callback routine for store_by_pieces. Read GET_MODE_BITSIZE (MODE) bytes from constant string DATA + OFFSET and return it as target constant. */ static rtx builtin_memcpy_read_str (void *data, HOST_WIDE_INT offset, enum machine_mode mode) { const char *str = (const char *) data; if (offset < 0 || ((unsigned HOST_WIDE_INT) offset + GET_MODE_SIZE (mode) > strlen (str) + 1)) abort (); /* Attempt to read past the end of constant string. */ return c_readstr (str + offset, mode); } /* Expand a call to the memcpy builtin, with arguments in ARGLIST. Return 0 if we failed, the caller should emit a normal call, otherwise try to get the result in TARGET, if convenient (and in mode MODE if that's convenient). */ static rtx expand_builtin_memcpy (tree arglist, rtx target, enum machine_mode mode) { if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) return 0; else { tree dest = TREE_VALUE (arglist); tree src = TREE_VALUE (TREE_CHAIN (arglist)); tree len = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); const char *src_str; unsigned int src_align = get_pointer_alignment (src, BIGGEST_ALIGNMENT); unsigned int dest_align = get_pointer_alignment (dest, BIGGEST_ALIGNMENT); rtx dest_mem, src_mem, dest_addr, len_rtx; /* If DEST is not a pointer type, call the normal function. */ if (dest_align == 0) return 0; /* If the LEN parameter is zero, return DEST. */ if (integer_zerop (len)) { /* Evaluate and ignore SRC in case it has side-effects. */ expand_expr (src, const0_rtx, VOIDmode, EXPAND_NORMAL); return expand_expr (dest, target, mode, EXPAND_NORMAL); } /* If SRC and DEST are the same (and not volatile), return DEST. */ if (operand_equal_p (src, dest, 0)) { /* Evaluate and ignore LEN in case it has side-effects. */ expand_expr (len, const0_rtx, VOIDmode, EXPAND_NORMAL); return expand_expr (dest, target, mode, EXPAND_NORMAL); } /* If either SRC is not a pointer type, don't do this operation in-line. */ if (src_align == 0) return 0; dest_mem = get_memory_rtx (dest); set_mem_align (dest_mem, dest_align); len_rtx = expand_expr (len, NULL_RTX, VOIDmode, 0); src_str = c_getstr (src); /* If SRC is a string constant and block move would be done by pieces, we can avoid loading the string from memory and only stored the computed constants. */ if (src_str && GET_CODE (len_rtx) == CONST_INT && (unsigned HOST_WIDE_INT) INTVAL (len_rtx) <= strlen (src_str) + 1 && can_store_by_pieces (INTVAL (len_rtx), builtin_memcpy_read_str, (void *) src_str, dest_align)) { dest_mem = store_by_pieces (dest_mem, INTVAL (len_rtx), builtin_memcpy_read_str, (void *) src_str, dest_align, 0); dest_mem = force_operand (XEXP (dest_mem, 0), NULL_RTX); dest_mem = convert_memory_address (ptr_mode, dest_mem); return dest_mem; } src_mem = get_memory_rtx (src); set_mem_align (src_mem, src_align); /* Copy word part most expediently. */ dest_addr = emit_block_move (dest_mem, src_mem, len_rtx, BLOCK_OP_NORMAL); if (dest_addr == 0) { dest_addr = force_operand (XEXP (dest_mem, 0), NULL_RTX); dest_addr = convert_memory_address (ptr_mode, dest_addr); } return dest_addr; } } /* Expand a call to the mempcpy builtin, with arguments in ARGLIST. Return 0 if we failed the caller should emit a normal call, otherwise try to get the result in TARGET, if convenient (and in mode MODE if that's convenient). If ENDP is 0 return the destination pointer, if ENDP is 1 return the end pointer ala mempcpy, and if ENDP is 2 return the end pointer minus one ala stpcpy. */ static rtx expand_builtin_mempcpy (tree arglist, rtx target, enum machine_mode mode, int endp) { if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) return 0; /* If return value is ignored, transform mempcpy into memcpy. */ else if (target == const0_rtx) { tree fn = implicit_built_in_decls[BUILT_IN_MEMCPY]; if (!fn) return 0; return expand_expr (build_function_call_expr (fn, arglist), target, mode, EXPAND_NORMAL); } else { tree dest = TREE_VALUE (arglist); tree src = TREE_VALUE (TREE_CHAIN (arglist)); tree len = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); const char *src_str; unsigned int src_align = get_pointer_alignment (src, BIGGEST_ALIGNMENT); unsigned int dest_align = get_pointer_alignment (dest, BIGGEST_ALIGNMENT); rtx dest_mem, src_mem, len_rtx; /* If DEST is not a pointer type, call the normal function. */ if (dest_align == 0) return 0; /* If SRC and DEST are the same (and not volatile), do nothing. */ if (operand_equal_p (src, dest, 0)) { tree expr; if (endp == 0) { /* Evaluate and ignore LEN in case it has side-effects. */ expand_expr (len, const0_rtx, VOIDmode, EXPAND_NORMAL); return expand_expr (dest, target, mode, EXPAND_NORMAL); } if (endp == 2) len = fold (build2 (MINUS_EXPR, TREE_TYPE (len), len, integer_one_node)); len = fold_convert (TREE_TYPE (dest), len); expr = fold (build2 (PLUS_EXPR, TREE_TYPE (dest), dest, len)); return expand_expr (expr, target, mode, EXPAND_NORMAL); } /* If LEN is not constant, call the normal function. */ if (! host_integerp (len, 1)) return 0; /* If the LEN parameter is zero, return DEST. */ if (tree_low_cst (len, 1) == 0) { /* Evaluate and ignore SRC in case it has side-effects. */ expand_expr (src, const0_rtx, VOIDmode, EXPAND_NORMAL); return expand_expr (dest, target, mode, EXPAND_NORMAL); } /* If either SRC is not a pointer type, don't do this operation in-line. */ if (src_align == 0) return 0; len_rtx = expand_expr (len, NULL_RTX, VOIDmode, 0); src_str = c_getstr (src); /* If SRC is a string constant and block move would be done by pieces, we can avoid loading the string from memory and only stored the computed constants. */ if (src_str && GET_CODE (len_rtx) == CONST_INT && (unsigned HOST_WIDE_INT) INTVAL (len_rtx) <= strlen (src_str) + 1 && can_store_by_pieces (INTVAL (len_rtx), builtin_memcpy_read_str, (void *) src_str, dest_align)) { dest_mem = get_memory_rtx (dest); set_mem_align (dest_mem, dest_align); dest_mem = store_by_pieces (dest_mem, INTVAL (len_rtx), builtin_memcpy_read_str, (void *) src_str, dest_align, endp); dest_mem = force_operand (XEXP (dest_mem, 0), NULL_RTX); dest_mem = convert_memory_address (ptr_mode, dest_mem); return dest_mem; } if (GET_CODE (len_rtx) == CONST_INT && can_move_by_pieces (INTVAL (len_rtx), MIN (dest_align, src_align))) { dest_mem = get_memory_rtx (dest); set_mem_align (dest_mem, dest_align); src_mem = get_memory_rtx (src); set_mem_align (src_mem, src_align); dest_mem = move_by_pieces (dest_mem, src_mem, INTVAL (len_rtx), MIN (dest_align, src_align), endp); dest_mem = force_operand (XEXP (dest_mem, 0), NULL_RTX); dest_mem = convert_memory_address (ptr_mode, dest_mem); return dest_mem; } return 0; } } /* Expand expression EXP, which is a call to the memmove builtin. Return 0 if we failed the caller should emit a normal call. */ static rtx expand_builtin_memmove (tree arglist, rtx target, enum machine_mode mode) { if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) return 0; else { tree dest = TREE_VALUE (arglist); tree src = TREE_VALUE (TREE_CHAIN (arglist)); tree len = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); unsigned int src_align = get_pointer_alignment (src, BIGGEST_ALIGNMENT); unsigned int dest_align = get_pointer_alignment (dest, BIGGEST_ALIGNMENT); /* If DEST is not a pointer type, call the normal function. */ if (dest_align == 0) return 0; /* If the LEN parameter is zero, return DEST. */ if (integer_zerop (len)) { /* Evaluate and ignore SRC in case it has side-effects. */ expand_expr (src, const0_rtx, VOIDmode, EXPAND_NORMAL); return expand_expr (dest, target, mode, EXPAND_NORMAL); } /* If SRC and DEST are the same (and not volatile), return DEST. */ if (operand_equal_p (src, dest, 0)) { /* Evaluate and ignore LEN in case it has side-effects. */ expand_expr (len, const0_rtx, VOIDmode, EXPAND_NORMAL); return expand_expr (dest, target, mode, EXPAND_NORMAL); } /* If either SRC is not a pointer type, don't do this operation in-line. */ if (src_align == 0) return 0; /* If src is categorized for a readonly section we can use normal memcpy. */ if (readonly_data_expr (src)) { tree const fn = implicit_built_in_decls[BUILT_IN_MEMCPY]; if (!fn) return 0; return expand_expr (build_function_call_expr (fn, arglist), target, mode, EXPAND_NORMAL); } /* Otherwise, call the normal function. */ return 0; } } /* Expand expression EXP, which is a call to the bcopy builtin. Return 0 if we failed the caller should emit a normal call. */ static rtx expand_builtin_bcopy (tree arglist) { tree src, dest, size, newarglist; if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) return NULL_RTX; src = TREE_VALUE (arglist); dest = TREE_VALUE (TREE_CHAIN (arglist)); size = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); /* New argument list transforming bcopy(ptr x, ptr y, int z) to memmove(ptr y, ptr x, size_t z). This is done this way so that if it isn't expanded inline, we fallback to calling bcopy instead of memmove. */ newarglist = build_tree_list (NULL_TREE, fold_convert (sizetype, size)); newarglist = tree_cons (NULL_TREE, src, newarglist); newarglist = tree_cons (NULL_TREE, dest, newarglist); return expand_builtin_memmove (newarglist, const0_rtx, VOIDmode); } /* Expand expression EXP, which is a call to the strcpy builtin. Return 0 if we failed the caller should emit a normal call, otherwise try to get the result in TARGET, if convenient (and in mode MODE if that's convenient). */ static rtx expand_builtin_strcpy (tree arglist, rtx target, enum machine_mode mode) { tree fn, len, src, dst; if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) return 0; src = TREE_VALUE (TREE_CHAIN (arglist)); dst = TREE_VALUE (arglist); /* If SRC and DST are equal (and not volatile), return DST. */ if (operand_equal_p (src, dst, 0)) return expand_expr (dst, target, mode, EXPAND_NORMAL); fn = implicit_built_in_decls[BUILT_IN_MEMCPY]; if (!fn) return 0; len = c_strlen (src, 1); if (len == 0 || TREE_SIDE_EFFECTS (len)) return 0; len = size_binop (PLUS_EXPR, len, ssize_int (1)); arglist = build_tree_list (NULL_TREE, len); arglist = tree_cons (NULL_TREE, src, arglist); arglist = tree_cons (NULL_TREE, dst, arglist); return expand_expr (build_function_call_expr (fn, arglist), target, mode, EXPAND_NORMAL); } /* Expand a call to the stpcpy builtin, with arguments in ARGLIST. Return 0 if we failed the caller should emit a normal call, otherwise try to get the result in TARGET, if convenient (and in mode MODE if that's convenient). */ static rtx expand_builtin_stpcpy (tree arglist, rtx target, enum machine_mode mode) { if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) return 0; else { tree dst, src, len; /* If return value is ignored, transform stpcpy into strcpy. */ if (target == const0_rtx) { tree fn = implicit_built_in_decls[BUILT_IN_STRCPY]; if (!fn) return 0; return expand_expr (build_function_call_expr (fn, arglist), target, mode, EXPAND_NORMAL); } /* Ensure we get an actual string whose length can be evaluated at compile-time, not an expression containing a string. This is because the latter will potentially produce pessimized code when used to produce the return value. */ src = TREE_VALUE (TREE_CHAIN (arglist)); if (! c_getstr (src) || ! (len = c_strlen (src, 0))) return 0; dst = TREE_VALUE (arglist); len = fold (size_binop (PLUS_EXPR, len, ssize_int (1))); arglist = build_tree_list (NULL_TREE, len); arglist = tree_cons (NULL_TREE, src, arglist); arglist = tree_cons (NULL_TREE, dst, arglist); return expand_builtin_mempcpy (arglist, target, mode, /*endp=*/2); } } /* Callback routine for store_by_pieces. Read GET_MODE_BITSIZE (MODE) bytes from constant string DATA + OFFSET and return it as target constant. */ static rtx builtin_strncpy_read_str (void *data, HOST_WIDE_INT offset, enum machine_mode mode) { const char *str = (const char *) data; if ((unsigned HOST_WIDE_INT) offset > strlen (str)) return const0_rtx; return c_readstr (str + offset, mode); } /* Expand expression EXP, which is a call to the strncpy builtin. Return 0 if we failed the caller should emit a normal call. */ static rtx expand_builtin_strncpy (tree arglist, rtx target, enum machine_mode mode) { if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) return 0; else { tree slen = c_strlen (TREE_VALUE (TREE_CHAIN (arglist)), 1); tree len = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); tree fn; /* We must be passed a constant len parameter. */ if (TREE_CODE (len) != INTEGER_CST) return 0; /* If the len parameter is zero, return the dst parameter. */ if (integer_zerop (len)) { /* Evaluate and ignore the src argument in case it has side-effects. */ expand_expr (TREE_VALUE (TREE_CHAIN (arglist)), const0_rtx, VOIDmode, EXPAND_NORMAL); /* Return the dst parameter. */ return expand_expr (TREE_VALUE (arglist), target, mode, EXPAND_NORMAL); } /* Now, we must be passed a constant src ptr parameter. */ if (slen == 0 || TREE_CODE (slen) != INTEGER_CST) return 0; slen = size_binop (PLUS_EXPR, slen, ssize_int (1)); /* We're required to pad with trailing zeros if the requested len is greater than strlen(s2)+1. In that case try to use store_by_pieces, if it fails, punt. */ if (tree_int_cst_lt (slen, len)) { tree dest = TREE_VALUE (arglist); unsigned int dest_align = get_pointer_alignment (dest, BIGGEST_ALIGNMENT); const char *p = c_getstr (TREE_VALUE (TREE_CHAIN (arglist))); rtx dest_mem; if (!p || dest_align == 0 || !host_integerp (len, 1) || !can_store_by_pieces (tree_low_cst (len, 1), builtin_strncpy_read_str, (void *) p, dest_align)) return 0; dest_mem = get_memory_rtx (dest); store_by_pieces (dest_mem, tree_low_cst (len, 1), builtin_strncpy_read_str, (void *) p, dest_align, 0); dest_mem = force_operand (XEXP (dest_mem, 0), NULL_RTX); dest_mem = convert_memory_address (ptr_mode, dest_mem); return dest_mem; } /* OK transform into builtin memcpy. */ fn = implicit_built_in_decls[BUILT_IN_MEMCPY]; if (!fn) return 0; return expand_expr (build_function_call_expr (fn, arglist), target, mode, EXPAND_NORMAL); } } /* Callback routine for store_by_pieces. Read GET_MODE_BITSIZE (MODE) bytes from constant string DATA + OFFSET and return it as target constant. */ static rtx builtin_memset_read_str (void *data, HOST_WIDE_INT offset ATTRIBUTE_UNUSED, enum machine_mode mode) { const char *c = (const char *) data; char *p = alloca (GET_MODE_SIZE (mode)); memset (p, *c, GET_MODE_SIZE (mode)); return c_readstr (p, mode); } /* Callback routine for store_by_pieces. Return the RTL of a register containing GET_MODE_SIZE (MODE) consecutive copies of the unsigned char value given in the RTL register data. For example, if mode is 4 bytes wide, return the RTL for 0x01010101*data. */ static rtx builtin_memset_gen_str (void *data, HOST_WIDE_INT offset ATTRIBUTE_UNUSED, enum machine_mode mode) { rtx target, coeff; size_t size; char *p; size = GET_MODE_SIZE (mode); if (size == 1) return (rtx) data; p = alloca (size); memset (p, 1, size); coeff = c_readstr (p, mode); target = convert_to_mode (mode, (rtx) data, 1); target = expand_mult (mode, target, coeff, NULL_RTX, 1); return force_reg (mode, target); } /* Expand expression EXP, which is a call to the memset builtin. Return 0 if we failed the caller should emit a normal call, otherwise try to get the result in TARGET, if convenient (and in mode MODE if that's convenient). */ static rtx expand_builtin_memset (tree arglist, rtx target, enum machine_mode mode) { if (!validate_arglist (arglist, POINTER_TYPE, INTEGER_TYPE, INTEGER_TYPE, VOID_TYPE)) return 0; else { tree dest = TREE_VALUE (arglist); tree val = TREE_VALUE (TREE_CHAIN (arglist)); tree len = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); char c; unsigned int dest_align = get_pointer_alignment (dest, BIGGEST_ALIGNMENT); rtx dest_mem, dest_addr, len_rtx; /* If DEST is not a pointer type, don't do this operation in-line. */ if (dest_align == 0) return 0; /* If the LEN parameter is zero, return DEST. */ if (integer_zerop (len)) { /* Evaluate and ignore VAL in case it has side-effects. */ expand_expr (val, const0_rtx, VOIDmode, EXPAND_NORMAL); return expand_expr (dest, target, mode, EXPAND_NORMAL); } if (TREE_CODE (val) != INTEGER_CST) { rtx val_rtx; if (!host_integerp (len, 1)) return 0; if (optimize_size && tree_low_cst (len, 1) > 1) return 0; /* Assume that we can memset by pieces if we can store the * the coefficients by pieces (in the required modes). * We can't pass builtin_memset_gen_str as that emits RTL. */ c = 1; if (!can_store_by_pieces (tree_low_cst (len, 1), builtin_memset_read_str, &c, dest_align)) return 0; val = fold (build1 (CONVERT_EXPR, unsigned_char_type_node, val)); val_rtx = expand_expr (val, NULL_RTX, VOIDmode, 0); val_rtx = force_reg (TYPE_MODE (unsigned_char_type_node), val_rtx); dest_mem = get_memory_rtx (dest); store_by_pieces (dest_mem, tree_low_cst (len, 1), builtin_memset_gen_str, val_rtx, dest_align, 0); dest_mem = force_operand (XEXP (dest_mem, 0), NULL_RTX); dest_mem = convert_memory_address (ptr_mode, dest_mem); return dest_mem; } if (target_char_cast (val, &c)) return 0; if (c) { if (!host_integerp (len, 1)) return 0; if (!can_store_by_pieces (tree_low_cst (len, 1), builtin_memset_read_str, &c, dest_align)) return 0; dest_mem = get_memory_rtx (dest); store_by_pieces (dest_mem, tree_low_cst (len, 1), builtin_memset_read_str, &c, dest_align, 0); dest_mem = force_operand (XEXP (dest_mem, 0), NULL_RTX); dest_mem = convert_memory_address (ptr_mode, dest_mem); return dest_mem; } len_rtx = expand_expr (len, NULL_RTX, VOIDmode, 0); dest_mem = get_memory_rtx (dest); set_mem_align (dest_mem, dest_align); dest_addr = clear_storage (dest_mem, len_rtx); if (dest_addr == 0) { dest_addr = force_operand (XEXP (dest_mem, 0), NULL_RTX); dest_addr = convert_memory_address (ptr_mode, dest_addr); } return dest_addr; } } /* Expand expression EXP, which is a call to the bzero builtin. Return 0 if we failed the caller should emit a normal call. */ static rtx expand_builtin_bzero (tree arglist) { tree dest, size, newarglist; if (!validate_arglist (arglist, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) return NULL_RTX; dest = TREE_VALUE (arglist); size = TREE_VALUE (TREE_CHAIN (arglist)); /* New argument list transforming bzero(ptr x, int y) to memset(ptr x, int 0, size_t y). This is done this way so that if it isn't expanded inline, we fallback to calling bzero instead of memset. */ newarglist = build_tree_list (NULL_TREE, fold_convert (sizetype, size)); newarglist = tree_cons (NULL_TREE, integer_zero_node, newarglist); newarglist = tree_cons (NULL_TREE, dest, newarglist); return expand_builtin_memset (newarglist, const0_rtx, VOIDmode); } /* Expand expression EXP, which is a call to the memcmp built-in function. ARGLIST is the argument list for this call. Return 0 if we failed and the caller should emit a normal call, otherwise try to get the result in TARGET, if convenient (and in mode MODE, if that's convenient). */ static rtx expand_builtin_memcmp (tree exp ATTRIBUTE_UNUSED, tree arglist, rtx target, enum machine_mode mode) { tree arg1, arg2, len; const char *p1, *p2; if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) return 0; arg1 = TREE_VALUE (arglist); arg2 = TREE_VALUE (TREE_CHAIN (arglist)); len = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); /* If the len parameter is zero, return zero. */ if (integer_zerop (len)) { /* Evaluate and ignore arg1 and arg2 in case they have side-effects. */ expand_expr (arg1, const0_rtx, VOIDmode, EXPAND_NORMAL); expand_expr (arg2, const0_rtx, VOIDmode, EXPAND_NORMAL); return const0_rtx; } /* If both arguments are equal (and not volatile), return zero. */ if (operand_equal_p (arg1, arg2, 0)) { /* Evaluate and ignore len in case it has side-effects. */ expand_expr (len, const0_rtx, VOIDmode, EXPAND_NORMAL); return const0_rtx; } p1 = c_getstr (arg1); p2 = c_getstr (arg2); /* If all arguments are constant, and the value of len is not greater than the lengths of arg1 and arg2, evaluate at compile-time. */ if (host_integerp (len, 1) && p1 && p2 && compare_tree_int (len, strlen (p1) + 1) <= 0 && compare_tree_int (len, strlen (p2) + 1) <= 0) { const int r = memcmp (p1, p2, tree_low_cst (len, 1)); return (r < 0 ? constm1_rtx : (r > 0 ? const1_rtx : const0_rtx)); } /* If len parameter is one, return an expression corresponding to (*(const unsigned char*)arg1 - (const unsigned char*)arg2). */ if (integer_onep (len)) { tree cst_uchar_node = build_type_variant (unsigned_char_type_node, 1, 0); tree cst_uchar_ptr_node = build_pointer_type (cst_uchar_node); tree ind1 = fold (build1 (CONVERT_EXPR, integer_type_node, build1 (INDIRECT_REF, cst_uchar_node, fold_convert (cst_uchar_ptr_node, arg1)))); tree ind2 = fold (build1 (CONVERT_EXPR, integer_type_node, build1 (INDIRECT_REF, cst_uchar_node, fold_convert (cst_uchar_ptr_node, arg2)))); tree result = fold (build2 (MINUS_EXPR, integer_type_node, ind1, ind2)); return expand_expr (result, target, mode, EXPAND_NORMAL); } #if defined HAVE_cmpmemsi || defined HAVE_cmpstrsi { rtx arg1_rtx, arg2_rtx, arg3_rtx; rtx result; rtx insn; int arg1_align = get_pointer_alignment (arg1, BIGGEST_ALIGNMENT) / BITS_PER_UNIT; int arg2_align = get_pointer_alignment (arg2, BIGGEST_ALIGNMENT) / BITS_PER_UNIT; enum machine_mode insn_mode; #ifdef HAVE_cmpmemsi if (HAVE_cmpmemsi) insn_mode = insn_data[(int) CODE_FOR_cmpmemsi].operand[0].mode; else #endif #ifdef HAVE_cmpstrsi if (HAVE_cmpstrsi) insn_mode = insn_data[(int) CODE_FOR_cmpstrsi].operand[0].mode; else #endif return 0; /* If we don't have POINTER_TYPE, call the function. */ if (arg1_align == 0 || arg2_align == 0) return 0; /* Make a place to write the result of the instruction. */ result = target; if (! (result != 0 && REG_P (result) && GET_MODE (result) == insn_mode && REGNO (result) >= FIRST_PSEUDO_REGISTER)) result = gen_reg_rtx (insn_mode); arg1_rtx = get_memory_rtx (arg1); arg2_rtx = get_memory_rtx (arg2); arg3_rtx = expand_expr (len, NULL_RTX, VOIDmode, 0); #ifdef HAVE_cmpmemsi if (HAVE_cmpmemsi) insn = gen_cmpmemsi (result, arg1_rtx, arg2_rtx, arg3_rtx, GEN_INT (MIN (arg1_align, arg2_align))); else #endif #ifdef HAVE_cmpstrsi if (HAVE_cmpstrsi) insn = gen_cmpstrsi (result, arg1_rtx, arg2_rtx, arg3_rtx, GEN_INT (MIN (arg1_align, arg2_align))); else #endif abort (); if (insn) emit_insn (insn); else emit_library_call_value (memcmp_libfunc, result, LCT_PURE_MAKE_BLOCK, TYPE_MODE (integer_type_node), 3, XEXP (arg1_rtx, 0), Pmode, XEXP (arg2_rtx, 0), Pmode, convert_to_mode (TYPE_MODE (sizetype), arg3_rtx, TYPE_UNSIGNED (sizetype)), TYPE_MODE (sizetype)); /* Return the value in the proper mode for this function. */ mode = TYPE_MODE (TREE_TYPE (exp)); if (GET_MODE (result) == mode) return result; else if (target != 0) { convert_move (target, result, 0); return target; } else return convert_to_mode (mode, result, 0); } #endif return 0; } /* Expand expression EXP, which is a call to the strcmp builtin. Return 0 if we failed the caller should emit a normal call, otherwise try to get the result in TARGET, if convenient. */ static rtx expand_builtin_strcmp (tree exp, rtx target, enum machine_mode mode) { tree arglist = TREE_OPERAND (exp, 1); tree arg1, arg2; const char *p1, *p2; if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) return 0; arg1 = TREE_VALUE (arglist); arg2 = TREE_VALUE (TREE_CHAIN (arglist)); /* If both arguments are equal (and not volatile), return zero. */ if (operand_equal_p (arg1, arg2, 0)) return const0_rtx; p1 = c_getstr (arg1); p2 = c_getstr (arg2); if (p1 && p2) { const int i = strcmp (p1, p2); return (i < 0 ? constm1_rtx : (i > 0 ? const1_rtx : const0_rtx)); } /* If either arg is "", return an expression corresponding to (*(const unsigned char*)arg1 - (const unsigned char*)arg2). */ if ((p1 && *p1 == '\0') || (p2 && *p2 == '\0')) { tree cst_uchar_node = build_type_variant (unsigned_char_type_node, 1, 0); tree cst_uchar_ptr_node = build_pointer_type (cst_uchar_node); tree ind1 = fold (build1 (CONVERT_EXPR, integer_type_node, build1 (INDIRECT_REF, cst_uchar_node, fold_convert (cst_uchar_ptr_node, arg1)))); tree ind2 = fold (build1 (CONVERT_EXPR, integer_type_node, build1 (INDIRECT_REF, cst_uchar_node, fold_convert (cst_uchar_ptr_node, arg2)))); tree result = fold (build2 (MINUS_EXPR, integer_type_node, ind1, ind2)); return expand_expr (result, target, mode, EXPAND_NORMAL); } #ifdef HAVE_cmpstrsi if (HAVE_cmpstrsi) { tree len, len1, len2; rtx arg1_rtx, arg2_rtx, arg3_rtx; rtx result, insn; tree fndecl; int arg1_align = get_pointer_alignment (arg1, BIGGEST_ALIGNMENT) / BITS_PER_UNIT; int arg2_align = get_pointer_alignment (arg2, BIGGEST_ALIGNMENT) / BITS_PER_UNIT; enum machine_mode insn_mode = insn_data[(int) CODE_FOR_cmpstrsi].operand[0].mode; len1 = c_strlen (arg1, 1); len2 = c_strlen (arg2, 1); if (len1) len1 = size_binop (PLUS_EXPR, ssize_int (1), len1); if (len2) len2 = size_binop (PLUS_EXPR, ssize_int (1), len2); /* If we don't have a constant length for the first, use the length of the second, if we know it. We don't require a constant for this case; some cost analysis could be done if both are available but neither is constant. For now, assume they're equally cheap, unless one has side effects. If both strings have constant lengths, use the smaller. */ if (!len1) len = len2; else if (!len2) len = len1; else if (TREE_SIDE_EFFECTS (len1)) len = len2; else if (TREE_SIDE_EFFECTS (len2)) len = len1; else if (TREE_CODE (len1) != INTEGER_CST) len = len2; else if (TREE_CODE (len2) != INTEGER_CST) len = len1; else if (tree_int_cst_lt (len1, len2)) len = len1; else len = len2; /* If both arguments have side effects, we cannot optimize. */ if (!len || TREE_SIDE_EFFECTS (len)) return 0; /* If we don't have POINTER_TYPE, call the function. */ if (arg1_align == 0 || arg2_align == 0) return 0; /* Make a place to write the result of the instruction. */ result = target; if (! (result != 0 && REG_P (result) && GET_MODE (result) == insn_mode && REGNO (result) >= FIRST_PSEUDO_REGISTER)) result = gen_reg_rtx (insn_mode); /* Stabilize the arguments in case gen_cmpstrsi fails. */ arg1 = builtin_save_expr (arg1); arg2 = builtin_save_expr (arg2); arg1_rtx = get_memory_rtx (arg1); arg2_rtx = get_memory_rtx (arg2); arg3_rtx = expand_expr (len, NULL_RTX, VOIDmode, 0); insn = gen_cmpstrsi (result, arg1_rtx, arg2_rtx, arg3_rtx, GEN_INT (MIN (arg1_align, arg2_align))); if (insn) { emit_insn (insn); /* Return the value in the proper mode for this function. */ mode = TYPE_MODE (TREE_TYPE (exp)); if (GET_MODE (result) == mode) return result; if (target == 0) return convert_to_mode (mode, result, 0); convert_move (target, result, 0); return target; } /* Expand the library call ourselves using a stabilized argument list to avoid re-evaluating the function's arguments twice. */ arglist = build_tree_list (NULL_TREE, arg2); arglist = tree_cons (NULL_TREE, arg1, arglist); fndecl = get_callee_fndecl (exp); exp = build_function_call_expr (fndecl, arglist); return expand_call (exp, target, target == const0_rtx); } #endif return 0; } /* Expand expression EXP, which is a call to the strncmp builtin. Return 0 if we failed the caller should emit a normal call, otherwise try to get the result in TARGET, if convenient. */ static rtx expand_builtin_strncmp (tree exp, rtx target, enum machine_mode mode) { tree arglist = TREE_OPERAND (exp, 1); tree arg1, arg2, arg3; const char *p1, *p2; if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) return 0; arg1 = TREE_VALUE (arglist); arg2 = TREE_VALUE (TREE_CHAIN (arglist)); arg3 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); /* If the len parameter is zero, return zero. */ if (integer_zerop (arg3)) { /* Evaluate and ignore arg1 and arg2 in case they have side-effects. */ expand_expr (arg1, const0_rtx, VOIDmode, EXPAND_NORMAL); expand_expr (arg2, const0_rtx, VOIDmode, EXPAND_NORMAL); return const0_rtx; } /* If arg1 and arg2 are equal (and not volatile), return zero. */ if (operand_equal_p (arg1, arg2, 0)) { /* Evaluate and ignore arg3 in case it has side-effects. */ expand_expr (arg3, const0_rtx, VOIDmode, EXPAND_NORMAL); return const0_rtx; } p1 = c_getstr (arg1); p2 = c_getstr (arg2); /* If all arguments are constant, evaluate at compile-time. */ if (host_integerp (arg3, 1) && p1 && p2) { const int r = strncmp (p1, p2, tree_low_cst (arg3, 1)); return (r < 0 ? constm1_rtx : (r > 0 ? const1_rtx : const0_rtx)); } /* If len == 1 or (either string parameter is "" and (len >= 1)), return (*(const u_char*)arg1 - *(const u_char*)arg2). */ if (host_integerp (arg3, 1) && (tree_low_cst (arg3, 1) == 1 || (tree_low_cst (arg3, 1) > 1 && ((p1 && *p1 == '\0') || (p2 && *p2 == '\0'))))) { tree cst_uchar_node = build_type_variant (unsigned_char_type_node, 1, 0); tree cst_uchar_ptr_node = build_pointer_type (cst_uchar_node); tree ind1 = fold (build1 (CONVERT_EXPR, integer_type_node, build1 (INDIRECT_REF, cst_uchar_node, fold_convert (cst_uchar_ptr_node, arg1)))); tree ind2 = fold (build1 (CONVERT_EXPR, integer_type_node, build1 (INDIRECT_REF, cst_uchar_node, fold_convert (cst_uchar_ptr_node, arg2)))); tree result = fold (build2 (MINUS_EXPR, integer_type_node, ind1, ind2)); return expand_expr (result, target, mode, EXPAND_NORMAL); } /* If c_strlen can determine an expression for one of the string lengths, and it doesn't have side effects, then emit cmpstrsi using length MIN(strlen(string)+1, arg3). */ #ifdef HAVE_cmpstrsi if (HAVE_cmpstrsi) { tree len, len1, len2; rtx arg1_rtx, arg2_rtx, arg3_rtx; rtx result, insn; tree fndecl; int arg1_align = get_pointer_alignment (arg1, BIGGEST_ALIGNMENT) / BITS_PER_UNIT; int arg2_align = get_pointer_alignment (arg2, BIGGEST_ALIGNMENT) / BITS_PER_UNIT; enum machine_mode insn_mode = insn_data[(int) CODE_FOR_cmpstrsi].operand[0].mode; len1 = c_strlen (arg1, 1); len2 = c_strlen (arg2, 1); if (len1) len1 = size_binop (PLUS_EXPR, ssize_int (1), len1); if (len2) len2 = size_binop (PLUS_EXPR, ssize_int (1), len2); /* If we don't have a constant length for the first, use the length of the second, if we know it. We don't require a constant for this case; some cost analysis could be done if both are available but neither is constant. For now, assume they're equally cheap, unless one has side effects. If both strings have constant lengths, use the smaller. */ if (!len1) len = len2; else if (!len2) len = len1; else if (TREE_SIDE_EFFECTS (len1)) len = len2; else if (TREE_SIDE_EFFECTS (len2)) len = len1; else if (TREE_CODE (len1) != INTEGER_CST) len = len2; else if (TREE_CODE (len2) != INTEGER_CST) len = len1; else if (tree_int_cst_lt (len1, len2)) len = len1; else len = len2; /* If both arguments have side effects, we cannot optimize. */ if (!len || TREE_SIDE_EFFECTS (len)) return 0; /* The actual new length parameter is MIN(len,arg3). */ len = fold (build2 (MIN_EXPR, TREE_TYPE (len), len, arg3)); /* If we don't have POINTER_TYPE, call the function. */ if (arg1_align == 0 || arg2_align == 0) return 0; /* Make a place to write the result of the instruction. */ result = target; if (! (result != 0 && REG_P (result) && GET_MODE (result) == insn_mode && REGNO (result) >= FIRST_PSEUDO_REGISTER)) result = gen_reg_rtx (insn_mode); /* Stabilize the arguments in case gen_cmpstrsi fails. */ arg1 = builtin_save_expr (arg1); arg2 = builtin_save_expr (arg2); len = builtin_save_expr (len); arg1_rtx = get_memory_rtx (arg1); arg2_rtx = get_memory_rtx (arg2); arg3_rtx = expand_expr (len, NULL_RTX, VOIDmode, 0); insn = gen_cmpstrsi (result, arg1_rtx, arg2_rtx, arg3_rtx, GEN_INT (MIN (arg1_align, arg2_align))); if (insn) { emit_insn (insn); /* Return the value in the proper mode for this function. */ mode = TYPE_MODE (TREE_TYPE (exp)); if (GET_MODE (result) == mode) return result; if (target == 0) return convert_to_mode (mode, result, 0); convert_move (target, result, 0); return target; } /* Expand the library call ourselves using a stabilized argument list to avoid re-evaluating the function's arguments twice. */ arglist = build_tree_list (NULL_TREE, len); arglist = tree_cons (NULL_TREE, arg2, arglist); arglist = tree_cons (NULL_TREE, arg1, arglist); fndecl = get_callee_fndecl (exp); exp = build_function_call_expr (fndecl, arglist); return expand_call (exp, target, target == const0_rtx); } #endif return 0; } /* Expand expression EXP, which is a call to the strcat builtin. Return 0 if we failed the caller should emit a normal call, otherwise try to get the result in TARGET, if convenient. */ static rtx expand_builtin_strcat (tree arglist, rtx target, enum machine_mode mode) { if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) return 0; else { tree dst = TREE_VALUE (arglist), src = TREE_VALUE (TREE_CHAIN (arglist)); const char *p = c_getstr (src); if (p) { /* If the string length is zero, return the dst parameter. */ if (*p == '\0') return expand_expr (dst, target, mode, EXPAND_NORMAL); else if (!optimize_size) { /* Otherwise if !optimize_size, see if we can store by pieces into (dst + strlen(dst)). */ tree newdst, arglist, strlen_fn = implicit_built_in_decls[BUILT_IN_STRLEN]; /* This is the length argument. */ arglist = build_tree_list (NULL_TREE, fold (size_binop (PLUS_EXPR, c_strlen (src, 0), ssize_int (1)))); /* Prepend src argument. */ arglist = tree_cons (NULL_TREE, src, arglist); /* We're going to use dst more than once. */ dst = builtin_save_expr (dst); /* Create strlen (dst). */ newdst = fold (build_function_call_expr (strlen_fn, build_tree_list (NULL_TREE, dst))); /* Create (dst + strlen (dst)). */ newdst = fold (build2 (PLUS_EXPR, TREE_TYPE (dst), dst, newdst)); /* Prepend the new dst argument. */ arglist = tree_cons (NULL_TREE, newdst, arglist); /* We don't want to get turned into a memcpy if the target is const0_rtx, i.e. when the return value isn't used. That would produce pessimized code so pass in a target of zero, it should never actually be used. If this was successful return the original dst, not the result of mempcpy. */ if (expand_builtin_mempcpy (arglist, /*target=*/0, mode, /*endp=*/0)) return expand_expr (dst, target, mode, EXPAND_NORMAL); else return 0; } } return 0; } } /* Expand expression EXP, which is a call to the strncat builtin. Return 0 if we failed the caller should emit a normal call, otherwise try to get the result in TARGET, if convenient. */ static rtx expand_builtin_strncat (tree arglist, rtx target, enum machine_mode mode) { if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) return 0; else { tree dst = TREE_VALUE (arglist), src = TREE_VALUE (TREE_CHAIN (arglist)), len = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); const char *p = c_getstr (src); /* If the requested length is zero, or the src parameter string length is zero, return the dst parameter. */ if (integer_zerop (len) || (p && *p == '\0')) { /* Evaluate and ignore the src and len parameters in case they have side-effects. */ expand_expr (src, const0_rtx, VOIDmode, EXPAND_NORMAL); expand_expr (len, const0_rtx, VOIDmode, EXPAND_NORMAL); return expand_expr (dst, target, mode, EXPAND_NORMAL); } /* If the requested len is greater than or equal to the string length, call strcat. */ if (TREE_CODE (len) == INTEGER_CST && p && compare_tree_int (len, strlen (p)) >= 0) { tree newarglist = tree_cons (NULL_TREE, dst, build_tree_list (NULL_TREE, src)); tree fn = implicit_built_in_decls[BUILT_IN_STRCAT]; /* If the replacement _DECL isn't initialized, don't do the transformation. */ if (!fn) return 0; return expand_expr (build_function_call_expr (fn, newarglist), target, mode, EXPAND_NORMAL); } return 0; } } /* Expand expression EXP, which is a call to the strspn builtin. Return 0 if we failed the caller should emit a normal call, otherwise try to get the result in TARGET, if convenient. */ static rtx expand_builtin_strspn (tree arglist, rtx target, enum machine_mode mode) { if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) return 0; else { tree s1 = TREE_VALUE (arglist), s2 = TREE_VALUE (TREE_CHAIN (arglist)); const char *p1 = c_getstr (s1), *p2 = c_getstr (s2); /* If both arguments are constants, evaluate at compile-time. */ if (p1 && p2) { const size_t r = strspn (p1, p2); return expand_expr (size_int (r), target, mode, EXPAND_NORMAL); } /* If either argument is "", return 0. */ if ((p1 && *p1 == '\0') || (p2 && *p2 == '\0')) { /* Evaluate and ignore both arguments in case either one has side-effects. */ expand_expr (s1, const0_rtx, VOIDmode, EXPAND_NORMAL); expand_expr (s2, const0_rtx, VOIDmode, EXPAND_NORMAL); return const0_rtx; } return 0; } } /* Expand expression EXP, which is a call to the strcspn builtin. Return 0 if we failed the caller should emit a normal call, otherwise try to get the result in TARGET, if convenient. */ static rtx expand_builtin_strcspn (tree arglist, rtx target, enum machine_mode mode) { if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) return 0; else { tree s1 = TREE_VALUE (arglist), s2 = TREE_VALUE (TREE_CHAIN (arglist)); const char *p1 = c_getstr (s1), *p2 = c_getstr (s2); /* If both arguments are constants, evaluate at compile-time. */ if (p1 && p2) { const size_t r = strcspn (p1, p2); return expand_expr (size_int (r), target, mode, EXPAND_NORMAL); } /* If the first argument is "", return 0. */ if (p1 && *p1 == '\0') { /* Evaluate and ignore argument s2 in case it has side-effects. */ expand_expr (s2, const0_rtx, VOIDmode, EXPAND_NORMAL); return const0_rtx; } /* If the second argument is "", return __builtin_strlen(s1). */ if (p2 && *p2 == '\0') { tree newarglist = build_tree_list (NULL_TREE, s1), fn = implicit_built_in_decls[BUILT_IN_STRLEN]; /* If the replacement _DECL isn't initialized, don't do the transformation. */ if (!fn) return 0; return expand_expr (build_function_call_expr (fn, newarglist), target, mode, EXPAND_NORMAL); } return 0; } } /* Expand a call to __builtin_saveregs, generating the result in TARGET, if that's convenient. */ rtx expand_builtin_saveregs (void) { rtx val, seq; /* Don't do __builtin_saveregs more than once in a function. Save the result of the first call and reuse it. */ if (saveregs_value != 0) return saveregs_value; /* When this function is called, it means that registers must be saved on entry to this function. So we migrate the call to the first insn of this function. */ start_sequence (); /* Do whatever the machine needs done in this case. */ val = targetm.calls.expand_builtin_saveregs (); seq = get_insns (); end_sequence (); saveregs_value = val; /* Put the insns after the NOTE that starts the function. If this is inside a start_sequence, make the outer-level insn chain current, so the code is placed at the start of the function. */ push_topmost_sequence (); emit_insn_after (seq, entry_of_function ()); pop_topmost_sequence (); return val; } /* __builtin_args_info (N) returns word N of the arg space info for the current function. The number and meanings of words is controlled by the definition of CUMULATIVE_ARGS. */ static rtx expand_builtin_args_info (tree arglist) { int nwords = sizeof (CUMULATIVE_ARGS) / sizeof (int); int *word_ptr = (int *) ¤t_function_args_info; if (sizeof (CUMULATIVE_ARGS) % sizeof (int) != 0) abort (); if (arglist != 0) { if (!host_integerp (TREE_VALUE (arglist), 0)) error ("argument of `__builtin_args_info' must be constant"); else { HOST_WIDE_INT wordnum = tree_low_cst (TREE_VALUE (arglist), 0); if (wordnum < 0 || wordnum >= nwords) error ("argument of `__builtin_args_info' out of range"); else return GEN_INT (word_ptr[wordnum]); } } else error ("missing argument in `__builtin_args_info'"); return const0_rtx; } /* Expand ARGLIST, from a call to __builtin_next_arg. */ static rtx expand_builtin_next_arg (tree arglist) { tree fntype = TREE_TYPE (current_function_decl); if (TYPE_ARG_TYPES (fntype) == 0 || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype))) == void_type_node)) { error ("`va_start' used in function with fixed args"); return const0_rtx; } if (arglist) { tree last_parm = tree_last (DECL_ARGUMENTS (current_function_decl)); tree arg = TREE_VALUE (arglist); /* Strip off all nops for the sake of the comparison. This is not quite the same as STRIP_NOPS. It does more. We must also strip off INDIRECT_EXPR for C++ reference parameters. */ while (TREE_CODE (arg) == NOP_EXPR || TREE_CODE (arg) == CONVERT_EXPR || TREE_CODE (arg) == NON_LVALUE_EXPR || TREE_CODE (arg) == INDIRECT_REF) arg = TREE_OPERAND (arg, 0); if (arg != last_parm) warning ("second parameter of `va_start' not last named argument"); } else /* Evidently an out of date version of ; can't validate va_start's second argument, but can still work as intended. */ warning ("`__builtin_next_arg' called without an argument"); return expand_binop (Pmode, add_optab, current_function_internal_arg_pointer, current_function_arg_offset_rtx, NULL_RTX, 0, OPTAB_LIB_WIDEN); } /* Make it easier for the backends by protecting the valist argument from multiple evaluations. */ static tree stabilize_va_list (tree valist, int needs_lvalue) { if (TREE_CODE (va_list_type_node) == ARRAY_TYPE) { if (TREE_SIDE_EFFECTS (valist)) valist = save_expr (valist); /* For this case, the backends will be expecting a pointer to TREE_TYPE (va_list_type_node), but it's possible we've actually been given an array (an actual va_list_type_node). So fix it. */ if (TREE_CODE (TREE_TYPE (valist)) == ARRAY_TYPE) { tree p1 = build_pointer_type (TREE_TYPE (va_list_type_node)); valist = build_fold_addr_expr_with_type (valist, p1); } } else { tree pt; if (! needs_lvalue) { if (! TREE_SIDE_EFFECTS (valist)) return valist; pt = build_pointer_type (va_list_type_node); valist = fold (build1 (ADDR_EXPR, pt, valist)); TREE_SIDE_EFFECTS (valist) = 1; } if (TREE_SIDE_EFFECTS (valist)) valist = save_expr (valist); valist = build_fold_indirect_ref (valist); } return valist; } /* The "standard" definition of va_list is void*. */ tree std_build_builtin_va_list (void) { return ptr_type_node; } /* The "standard" implementation of va_start: just assign `nextarg' to the variable. */ void std_expand_builtin_va_start (tree valist, rtx nextarg) { tree t; t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, make_tree (ptr_type_node, nextarg)); TREE_SIDE_EFFECTS (t) = 1; expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); } /* Expand ARGLIST, from a call to __builtin_va_start. */ static rtx expand_builtin_va_start (tree arglist) { rtx nextarg; tree chain, valist; chain = TREE_CHAIN (arglist); if (TREE_CHAIN (chain)) error ("too many arguments to function `va_start'"); nextarg = expand_builtin_next_arg (chain); valist = stabilize_va_list (TREE_VALUE (arglist), 1); #ifdef EXPAND_BUILTIN_VA_START EXPAND_BUILTIN_VA_START (valist, nextarg); #else std_expand_builtin_va_start (valist, nextarg); #endif return const0_rtx; } /* The "standard" implementation of va_arg: read the value from the current (padded) address and increment by the (padded) size. */ rtx std_expand_builtin_va_arg (tree valist, tree type) { tree addr_tree, t, type_size = NULL; tree align, alignm1; tree rounded_size; rtx addr; HOST_WIDE_INT boundary; /* Compute the rounded size of the type. */ align = size_int (PARM_BOUNDARY / BITS_PER_UNIT); alignm1 = size_int (PARM_BOUNDARY / BITS_PER_UNIT - 1); boundary = FUNCTION_ARG_BOUNDARY (TYPE_MODE (type), type); /* va_list pointer is aligned to PARM_BOUNDARY. If argument actually requires greater alignment, we must perform dynamic alignment. */ if (boundary > PARM_BOUNDARY) { if (!PAD_VARARGS_DOWN) { t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, build2 (PLUS_EXPR, TREE_TYPE (valist), valist, build_int_2 (boundary / BITS_PER_UNIT - 1, 0))); TREE_SIDE_EFFECTS (t) = 1; expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); } t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, build2 (BIT_AND_EXPR, TREE_TYPE (valist), valist, build_int_2 (~(boundary / BITS_PER_UNIT - 1), -1))); TREE_SIDE_EFFECTS (t) = 1; expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); } if (type == error_mark_node || (type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type))) == NULL || TREE_OVERFLOW (type_size)) rounded_size = size_zero_node; else { rounded_size = fold (build2 (PLUS_EXPR, sizetype, type_size, alignm1)); rounded_size = fold (build2 (TRUNC_DIV_EXPR, sizetype, rounded_size, align)); rounded_size = fold (build2 (MULT_EXPR, sizetype, rounded_size, align)); } /* Get AP. */ addr_tree = valist; if (PAD_VARARGS_DOWN && ! integer_zerop (rounded_size)) { /* Small args are padded downward. */ addr_tree = fold (build2 (PLUS_EXPR, TREE_TYPE (addr_tree), addr_tree, fold (build3 (COND_EXPR, sizetype, fold (build2 (GT_EXPR, sizetype, rounded_size, align)), size_zero_node, fold (build2 (MINUS_EXPR, sizetype, rounded_size, type_size)))))); } addr = expand_expr (addr_tree, NULL_RTX, Pmode, EXPAND_NORMAL); addr = copy_to_reg (addr); /* Compute new value for AP. */ if (! integer_zerop (rounded_size)) { t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, build2 (PLUS_EXPR, TREE_TYPE (valist), valist, rounded_size)); TREE_SIDE_EFFECTS (t) = 1; expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); } return addr; } /* Expand __builtin_va_arg, which is not really a builtin function, but a very special sort of operator. */ rtx expand_builtin_va_arg (tree valist, tree type) { rtx addr, result; tree promoted_type, want_va_type, have_va_type; /* Verify that valist is of the proper type. */ want_va_type = va_list_type_node; have_va_type = TREE_TYPE (valist); if (TREE_CODE (want_va_type) == ARRAY_TYPE) { /* If va_list is an array type, the argument may have decayed to a pointer type, e.g. by being passed to another function. In that case, unwrap both types so that we can compare the underlying records. */ if (TREE_CODE (have_va_type) == ARRAY_TYPE || TREE_CODE (have_va_type) == POINTER_TYPE) { want_va_type = TREE_TYPE (want_va_type); have_va_type = TREE_TYPE (have_va_type); } } if (TYPE_MAIN_VARIANT (want_va_type) != TYPE_MAIN_VARIANT (have_va_type)) { error ("first argument to `va_arg' not of type `va_list'"); addr = const0_rtx; } /* Generate a diagnostic for requesting data of a type that cannot be passed through `...' due to type promotion at the call site. */ else if ((promoted_type = lang_hooks.types.type_promotes_to (type)) != type) { const char *name = "", *pname = 0; static bool gave_help; if (TYPE_NAME (type)) { if (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE) name = IDENTIFIER_POINTER (TYPE_NAME (type)); else if (TREE_CODE (TYPE_NAME (type)) == TYPE_DECL && DECL_NAME (TYPE_NAME (type))) name = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type))); } if (TYPE_NAME (promoted_type)) { if (TREE_CODE (TYPE_NAME (promoted_type)) == IDENTIFIER_NODE) pname = IDENTIFIER_POINTER (TYPE_NAME (promoted_type)); else if (TREE_CODE (TYPE_NAME (promoted_type)) == TYPE_DECL && DECL_NAME (TYPE_NAME (promoted_type))) pname = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (promoted_type))); } /* Unfortunately, this is merely undefined, rather than a constraint violation, so we cannot make this an error. If this call is never executed, the program is still strictly conforming. */ warning ("`%s' is promoted to `%s' when passed through `...'", name, pname); if (! gave_help) { gave_help = true; warning ("(so you should pass `%s' not `%s' to `va_arg')", pname, name); } /* We can, however, treat "undefined" any way we please. Call abort to encourage the user to fix the program. */ inform ("if this code is reached, the program will abort"); expand_builtin_trap (); /* This is dead code, but go ahead and finish so that the mode of the result comes out right. */ addr = const0_rtx; } else { /* Make it easier for the backends by protecting the valist argument from multiple evaluations. */ valist = stabilize_va_list (valist, 0); #ifdef EXPAND_BUILTIN_VA_ARG addr = EXPAND_BUILTIN_VA_ARG (valist, type); #else addr = std_expand_builtin_va_arg (valist, type); #endif } addr = convert_memory_address (Pmode, addr); result = gen_rtx_MEM (TYPE_MODE (type), addr); set_mem_alias_set (result, get_varargs_alias_set ()); return result; } /* Like std_expand_builtin_va_arg, but gimplify instead of expanding. */ tree std_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p) { tree addr, t, type_size = NULL; tree align, alignm1; tree rounded_size; HOST_WIDE_INT boundary; /* Compute the rounded size of the type. */ align = size_int (PARM_BOUNDARY / BITS_PER_UNIT); alignm1 = size_int (PARM_BOUNDARY / BITS_PER_UNIT - 1); boundary = FUNCTION_ARG_BOUNDARY (TYPE_MODE (type), type); /* va_list pointer is aligned to PARM_BOUNDARY. If argument actually requires greater alignment, we must perform dynamic alignment. */ if (boundary > PARM_BOUNDARY) { if (!PAD_VARARGS_DOWN) { t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, build2 (PLUS_EXPR, TREE_TYPE (valist), valist, build_int_2 (boundary / BITS_PER_UNIT - 1, 0))); gimplify_stmt (&t); append_to_statement_list (t, pre_p); } t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, build2 (BIT_AND_EXPR, TREE_TYPE (valist), valist, build_int_2 (~(boundary / BITS_PER_UNIT - 1), -1))); gimplify_stmt (&t); append_to_statement_list (t, pre_p); } if (type == error_mark_node || (type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type))) == NULL || TREE_OVERFLOW (type_size)) rounded_size = size_zero_node; else { rounded_size = fold (build2 (PLUS_EXPR, sizetype, type_size, alignm1)); rounded_size = fold (build2 (TRUNC_DIV_EXPR, sizetype, rounded_size, align)); rounded_size = fold (build2 (MULT_EXPR, sizetype, rounded_size, align)); } /* Reduce rounded_size so it's sharable with the postqueue. */ gimplify_expr (&rounded_size, pre_p, post_p, is_gimple_val, fb_rvalue); /* Get AP. */ addr = valist; if (PAD_VARARGS_DOWN && ! integer_zerop (rounded_size)) { /* Small args are padded downward. */ addr = fold (build2 (PLUS_EXPR, TREE_TYPE (addr), addr, fold (build3 (COND_EXPR, sizetype, fold (build2 (GT_EXPR, sizetype, rounded_size, align)), size_zero_node, fold (build2 (MINUS_EXPR, sizetype, rounded_size, type_size)))))); } /* Compute new value for AP. */ if (! integer_zerop (rounded_size)) { t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, build2 (PLUS_EXPR, TREE_TYPE (valist), valist, rounded_size)); gimplify_stmt (&t); append_to_statement_list (t, post_p); } addr = fold_convert (build_pointer_type (type), addr); return build_fold_indirect_ref (addr); } /* Return a dummy expression of type TYPE in order to keep going after an error. */ static tree dummy_object (tree type) { tree t = convert (build_pointer_type (type), null_pointer_node); return build1 (INDIRECT_REF, type, t); } /* Like expand_builtin_va_arg, but gimplify instead of expanding. */ enum gimplify_status gimplify_va_arg_expr (tree *expr_p, tree *pre_p, tree *post_p) { tree promoted_type, want_va_type, have_va_type; tree valist = TREE_OPERAND (*expr_p, 0); tree type = TREE_TYPE (*expr_p); tree t; /* Verify that valist is of the proper type. */ want_va_type = va_list_type_node; have_va_type = TREE_TYPE (valist); if (have_va_type == error_mark_node) return GS_ERROR; if (TREE_CODE (want_va_type) == ARRAY_TYPE) { /* If va_list is an array type, the argument may have decayed to a pointer type, e.g. by being passed to another function. In that case, unwrap both types so that we can compare the underlying records. */ if (TREE_CODE (have_va_type) == ARRAY_TYPE || TREE_CODE (have_va_type) == POINTER_TYPE) { want_va_type = TREE_TYPE (want_va_type); have_va_type = TREE_TYPE (have_va_type); } } if (TYPE_MAIN_VARIANT (want_va_type) != TYPE_MAIN_VARIANT (have_va_type)) { error ("first argument to `va_arg' not of type `va_list'"); return GS_ERROR; } /* Generate a diagnostic for requesting data of a type that cannot be passed through `...' due to type promotion at the call site. */ else if ((promoted_type = lang_hooks.types.type_promotes_to (type)) != type) { static bool gave_help; /* Unfortunately, this is merely undefined, rather than a constraint violation, so we cannot make this an error. If this call is never executed, the program is still strictly conforming. */ warning ("`%T' is promoted to `%T' when passed through `...'", type, promoted_type); if (! gave_help) { gave_help = true; warning ("(so you should pass `%T' not `%T' to `va_arg')", promoted_type, type); } /* We can, however, treat "undefined" any way we please. Call abort to encourage the user to fix the program. */ inform ("if this code is reached, the program will abort"); t = build_function_call_expr (implicit_built_in_decls[BUILT_IN_TRAP], NULL); append_to_statement_list (t, pre_p); /* This is dead code, but go ahead and finish so that the mode of the result comes out right. */ *expr_p = dummy_object (type); return GS_ALL_DONE; } else { /* Make it easier for the backends by protecting the valist argument from multiple evaluations. */ if (TREE_CODE (va_list_type_node) == ARRAY_TYPE) { /* For this case, the backends will be expecting a pointer to TREE_TYPE (va_list_type_node), but it's possible we've actually been given an array (an actual va_list_type_node). So fix it. */ if (TREE_CODE (TREE_TYPE (valist)) == ARRAY_TYPE) { tree p1 = build_pointer_type (TREE_TYPE (va_list_type_node)); valist = build_fold_addr_expr_with_type (valist, p1); } gimplify_expr (&valist, pre_p, post_p, is_gimple_val, fb_rvalue); } else gimplify_expr (&valist, pre_p, post_p, is_gimple_min_lval, fb_lvalue); if (!targetm.calls.gimplify_va_arg_expr) /* Once most targets are converted this should abort. */ return GS_ALL_DONE; *expr_p = targetm.calls.gimplify_va_arg_expr (valist, type, pre_p, post_p); return GS_OK; } } /* Expand ARGLIST, from a call to __builtin_va_end. */ static rtx expand_builtin_va_end (tree arglist) { tree valist = TREE_VALUE (arglist); /* Evaluate for side effects, if needed. I hate macros that don't do that. */ if (TREE_SIDE_EFFECTS (valist)) expand_expr (valist, const0_rtx, VOIDmode, EXPAND_NORMAL); return const0_rtx; } /* Expand ARGLIST, from a call to __builtin_va_copy. We do this as a builtin rather than just as an assignment in stdarg.h because of the nastiness of array-type va_list types. */ static rtx expand_builtin_va_copy (tree arglist) { tree dst, src, t; dst = TREE_VALUE (arglist); src = TREE_VALUE (TREE_CHAIN (arglist)); dst = stabilize_va_list (dst, 1); src = stabilize_va_list (src, 0); if (TREE_CODE (va_list_type_node) != ARRAY_TYPE) { t = build2 (MODIFY_EXPR, va_list_type_node, dst, src); TREE_SIDE_EFFECTS (t) = 1; expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); } else { rtx dstb, srcb, size; /* Evaluate to pointers. */ dstb = expand_expr (dst, NULL_RTX, Pmode, EXPAND_NORMAL); srcb = expand_expr (src, NULL_RTX, Pmode, EXPAND_NORMAL); size = expand_expr (TYPE_SIZE_UNIT (va_list_type_node), NULL_RTX, VOIDmode, EXPAND_NORMAL); dstb = convert_memory_address (Pmode, dstb); srcb = convert_memory_address (Pmode, srcb); /* "Dereference" to BLKmode memories. */ dstb = gen_rtx_MEM (BLKmode, dstb); set_mem_alias_set (dstb, get_alias_set (TREE_TYPE (TREE_TYPE (dst)))); set_mem_align (dstb, TYPE_ALIGN (va_list_type_node)); srcb = gen_rtx_MEM (BLKmode, srcb); set_mem_alias_set (srcb, get_alias_set (TREE_TYPE (TREE_TYPE (src)))); set_mem_align (srcb, TYPE_ALIGN (va_list_type_node)); /* Copy. */ emit_block_move (dstb, srcb, size, BLOCK_OP_NORMAL); } return const0_rtx; } /* Expand a call to one of the builtin functions __builtin_frame_address or __builtin_return_address. */ static rtx expand_builtin_frame_address (tree fndecl, tree arglist) { /* The argument must be a nonnegative integer constant. It counts the number of frames to scan up the stack. The value is the return address saved in that frame. */ if (arglist == 0) /* Warning about missing arg was already issued. */ return const0_rtx; else if (! host_integerp (TREE_VALUE (arglist), 1)) { if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_FRAME_ADDRESS) error ("invalid arg to `__builtin_frame_address'"); else error ("invalid arg to `__builtin_return_address'"); return const0_rtx; } else { rtx tem = expand_builtin_return_addr (DECL_FUNCTION_CODE (fndecl), tree_low_cst (TREE_VALUE (arglist), 1), hard_frame_pointer_rtx); /* Some ports cannot access arbitrary stack frames. */ if (tem == NULL) { if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_FRAME_ADDRESS) warning ("unsupported arg to `__builtin_frame_address'"); else warning ("unsupported arg to `__builtin_return_address'"); return const0_rtx; } /* For __builtin_frame_address, return what we've got. */ if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_FRAME_ADDRESS) return tem; if (!REG_P (tem) && ! CONSTANT_P (tem)) tem = copy_to_mode_reg (Pmode, tem); return tem; } } /* Expand a call to the alloca builtin, with arguments ARGLIST. Return 0 if we failed and the caller should emit a normal call, otherwise try to get the result in TARGET, if convenient. */ static rtx expand_builtin_alloca (tree arglist, rtx target) { rtx op0; rtx result; /* In -fmudflap-instrumented code, alloca() and __builtin_alloca() should always expand to function calls. These can be intercepted in libmudflap. */ if (flag_mudflap) return 0; if (!validate_arglist (arglist, INTEGER_TYPE, VOID_TYPE)) return 0; /* Compute the argument. */ op0 = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0); /* Allocate the desired space. */ result = allocate_dynamic_stack_space (op0, target, BITS_PER_UNIT); result = convert_memory_address (ptr_mode, result); return result; } /* Expand a call to a unary builtin. The arguments are in ARGLIST. Return 0 if a normal call should be emitted rather than expanding the function in-line. If convenient, the result should be placed in TARGET. SUBTARGET may be used as the target for computing one of EXP's operands. */ static rtx expand_builtin_unop (enum machine_mode target_mode, tree arglist, rtx target, rtx subtarget, optab op_optab) { rtx op0; if (!validate_arglist (arglist, INTEGER_TYPE, VOID_TYPE)) return 0; /* Compute the argument. */ op0 = expand_expr (TREE_VALUE (arglist), subtarget, VOIDmode, 0); /* Compute op, into TARGET if possible. Set TARGET to wherever the result comes back. */ target = expand_unop (TYPE_MODE (TREE_TYPE (TREE_VALUE (arglist))), op_optab, op0, target, 1); if (target == 0) abort (); return convert_to_mode (target_mode, target, 0); } /* If the string passed to fputs is a constant and is one character long, we attempt to transform this call into __builtin_fputc(). */ static rtx expand_builtin_fputs (tree arglist, rtx target, bool unlocked) { tree len, fn; tree fn_fputc = unlocked ? implicit_built_in_decls[BUILT_IN_FPUTC_UNLOCKED] : implicit_built_in_decls[BUILT_IN_FPUTC]; tree fn_fwrite = unlocked ? implicit_built_in_decls[BUILT_IN_FWRITE_UNLOCKED] : implicit_built_in_decls[BUILT_IN_FWRITE]; /* If the return value is used, or the replacement _DECL isn't initialized, don't do the transformation. */ if (target != const0_rtx || !fn_fputc || !fn_fwrite) return 0; /* Verify the arguments in the original call. */ if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) return 0; /* Get the length of the string passed to fputs. If the length can't be determined, punt. */ if (!(len = c_strlen (TREE_VALUE (arglist), 1)) || TREE_CODE (len) != INTEGER_CST) return 0; switch (compare_tree_int (len, 1)) { case -1: /* length is 0, delete the call entirely . */ { /* Evaluate and ignore the argument in case it has side-effects. */ expand_expr (TREE_VALUE (TREE_CHAIN (arglist)), const0_rtx, VOIDmode, EXPAND_NORMAL); return const0_rtx; } case 0: /* length is 1, call fputc. */ { const char *p = c_getstr (TREE_VALUE (arglist)); if (p != NULL) { /* New argument list transforming fputs(string, stream) to fputc(string[0], stream). */ arglist = build_tree_list (NULL_TREE, TREE_VALUE (TREE_CHAIN (arglist))); arglist = tree_cons (NULL_TREE, build_int_2 (p[0], 0), arglist); fn = fn_fputc; break; } } /* Fall through. */ case 1: /* length is greater than 1, call fwrite. */ { tree string_arg; /* If optimizing for size keep fputs. */ if (optimize_size) return 0; string_arg = TREE_VALUE (arglist); /* New argument list transforming fputs(string, stream) to fwrite(string, 1, len, stream). */ arglist = build_tree_list (NULL_TREE, TREE_VALUE (TREE_CHAIN (arglist))); arglist = tree_cons (NULL_TREE, len, arglist); arglist = tree_cons (NULL_TREE, size_one_node, arglist); arglist = tree_cons (NULL_TREE, string_arg, arglist); fn = fn_fwrite; break; } default: abort (); } return expand_expr (build_function_call_expr (fn, arglist), const0_rtx, VOIDmode, EXPAND_NORMAL); } /* Expand a call to __builtin_expect. We return our argument and emit a NOTE_INSN_EXPECTED_VALUE note. This is the expansion of __builtin_expect in a non-jump context. */ static rtx expand_builtin_expect (tree arglist, rtx target) { tree exp, c; rtx note, rtx_c; if (arglist == NULL_TREE || TREE_CHAIN (arglist) == NULL_TREE) return const0_rtx; exp = TREE_VALUE (arglist); c = TREE_VALUE (TREE_CHAIN (arglist)); if (TREE_CODE (c) != INTEGER_CST) { error ("second arg to `__builtin_expect' must be a constant"); c = integer_zero_node; } target = expand_expr (exp, target, VOIDmode, EXPAND_NORMAL); /* Don't bother with expected value notes for integral constants. */ if (flag_guess_branch_prob && GET_CODE (target) != CONST_INT) { /* We do need to force this into a register so that we can be moderately sure to be able to correctly interpret the branch condition later. */ target = force_reg (GET_MODE (target), target); rtx_c = expand_expr (c, NULL_RTX, GET_MODE (target), EXPAND_NORMAL); note = emit_note (NOTE_INSN_EXPECTED_VALUE); NOTE_EXPECTED_VALUE (note) = gen_rtx_EQ (VOIDmode, target, rtx_c); } return target; } /* Like expand_builtin_expect, except do this in a jump context. This is called from do_jump if the conditional is a __builtin_expect. Return either a list of insns to emit the jump or NULL if we cannot optimize __builtin_expect. We need to optimize this at jump time so that machines like the PowerPC don't turn the test into a SCC operation, and then jump based on the test being 0/1. */ rtx expand_builtin_expect_jump (tree exp, rtx if_false_label, rtx if_true_label) { tree arglist = TREE_OPERAND (exp, 1); tree arg0 = TREE_VALUE (arglist); tree arg1 = TREE_VALUE (TREE_CHAIN (arglist)); rtx ret = NULL_RTX; /* Only handle __builtin_expect (test, 0) and __builtin_expect (test, 1). */ if (TREE_CODE (TREE_TYPE (arg1)) == INTEGER_TYPE && (integer_zerop (arg1) || integer_onep (arg1))) { rtx insn, drop_through_label, temp; /* Expand the jump insns. */ start_sequence (); do_jump (arg0, if_false_label, if_true_label); ret = get_insns (); drop_through_label = get_last_insn (); if (drop_through_label && GET_CODE (drop_through_label) == NOTE) drop_through_label = prev_nonnote_insn (drop_through_label); if (drop_through_label && GET_CODE (drop_through_label) != CODE_LABEL) drop_through_label = NULL_RTX; end_sequence (); if (! if_true_label) if_true_label = drop_through_label; if (! if_false_label) if_false_label = drop_through_label; /* Go through and add the expect's to each of the conditional jumps. */ insn = ret; while (insn != NULL_RTX) { rtx next = NEXT_INSN (insn); if (GET_CODE (insn) == JUMP_INSN && any_condjump_p (insn)) { rtx ifelse = SET_SRC (pc_set (insn)); rtx then_dest = XEXP (ifelse, 1); rtx else_dest = XEXP (ifelse, 2); int taken = -1; /* First check if we recognize any of the labels. */ if (GET_CODE (then_dest) == LABEL_REF && XEXP (then_dest, 0) == if_true_label) taken = 1; else if (GET_CODE (then_dest) == LABEL_REF && XEXP (then_dest, 0) == if_false_label) taken = 0; else if (GET_CODE (else_dest) == LABEL_REF && XEXP (else_dest, 0) == if_false_label) taken = 1; else if (GET_CODE (else_dest) == LABEL_REF && XEXP (else_dest, 0) == if_true_label) taken = 0; /* Otherwise check where we drop through. */ else if (else_dest == pc_rtx) { if (next && GET_CODE (next) == NOTE) next = next_nonnote_insn (next); if (next && GET_CODE (next) == JUMP_INSN && any_uncondjump_p (next)) temp = XEXP (SET_SRC (pc_set (next)), 0); else temp = next; /* TEMP is either a CODE_LABEL, NULL_RTX or something else that can't possibly match either target label. */ if (temp == if_false_label) taken = 1; else if (temp == if_true_label) taken = 0; } else if (then_dest == pc_rtx) { if (next && GET_CODE (next) == NOTE) next = next_nonnote_insn (next); if (next && GET_CODE (next) == JUMP_INSN && any_uncondjump_p (next)) temp = XEXP (SET_SRC (pc_set (next)), 0); else temp = next; if (temp == if_false_label) taken = 0; else if (temp == if_true_label) taken = 1; } if (taken != -1) { /* If the test is expected to fail, reverse the probabilities. */ if (integer_zerop (arg1)) taken = 1 - taken; predict_insn_def (insn, PRED_BUILTIN_EXPECT, taken); } } insn = next; } } return ret; } void expand_builtin_trap (void) { #ifdef HAVE_trap if (HAVE_trap) emit_insn (gen_trap ()); else #endif emit_library_call (abort_libfunc, LCT_NORETURN, VOIDmode, 0); emit_barrier (); } /* Expand a call to fabs, fabsf or fabsl with arguments ARGLIST. Return 0 if a normal call should be emitted rather than expanding the function inline. If convenient, the result should be placed in TARGET. SUBTARGET may be used as the target for computing the operand. */ static rtx expand_builtin_fabs (tree arglist, rtx target, rtx subtarget) { enum machine_mode mode; tree arg; rtx op0; if (!validate_arglist (arglist, REAL_TYPE, VOID_TYPE)) return 0; arg = TREE_VALUE (arglist); mode = TYPE_MODE (TREE_TYPE (arg)); op0 = expand_expr (arg, subtarget, VOIDmode, 0); return expand_abs (mode, op0, target, 0, safe_from_p (target, arg, 1)); } /* Expand a call to cabs, cabsf or cabsl with arguments ARGLIST. Return 0 if a normal call should be emitted rather than expanding the function inline. If convenient, the result should be placed in target. */ static rtx expand_builtin_cabs (tree arglist, rtx target) { enum machine_mode mode; tree arg; rtx op0; if (arglist == 0 || TREE_CHAIN (arglist)) return 0; arg = TREE_VALUE (arglist); if (TREE_CODE (TREE_TYPE (arg)) != COMPLEX_TYPE || TREE_CODE (TREE_TYPE (TREE_TYPE (arg))) != REAL_TYPE) return 0; mode = TYPE_MODE (TREE_TYPE (arg)); op0 = expand_expr (arg, NULL_RTX, VOIDmode, 0); return expand_complex_abs (mode, op0, target, 0); } /* Create a new constant string literal and return a char* pointer to it. The STRING_CST value is the LEN characters at STR. */ static tree build_string_literal (int len, const char *str) { tree t, elem, index, type; t = build_string (len, str); elem = build_type_variant (char_type_node, 1, 0); index = build_index_type (build_int_2 (len - 1, 0)); type = build_array_type (elem, index); TREE_TYPE (t) = type; TREE_CONSTANT (t) = 1; TREE_INVARIANT (t) = 1; TREE_READONLY (t) = 1; TREE_STATIC (t) = 1; type = build_pointer_type (type); t = build1 (ADDR_EXPR, type, t); type = build_pointer_type (elem); t = build1 (NOP_EXPR, type, t); return t; } /* Expand a call to printf or printf_unlocked with argument list ARGLIST. Return 0 if a normal call should be emitted rather than transforming the function inline. If convenient, the result should be placed in TARGET with mode MODE. UNLOCKED indicates this is a printf_unlocked call. */ static rtx expand_builtin_printf (tree arglist, rtx target, enum machine_mode mode, bool unlocked) { tree fn_putchar = unlocked ? implicit_built_in_decls[BUILT_IN_PUTCHAR_UNLOCKED] : implicit_built_in_decls[BUILT_IN_PUTCHAR]; tree fn_puts = unlocked ? implicit_built_in_decls[BUILT_IN_PUTS_UNLOCKED] : implicit_built_in_decls[BUILT_IN_PUTS]; const char *fmt_str; tree fn, fmt, arg; /* If the return value is used, don't do the transformation. */ if (target != const0_rtx) return 0; /* Verify the required arguments in the original call. */ if (! arglist) return 0; fmt = TREE_VALUE (arglist); if (TREE_CODE (TREE_TYPE (fmt)) != POINTER_TYPE) return 0; arglist = TREE_CHAIN (arglist); /* Check whether the format is a literal string constant. */ fmt_str = c_getstr (fmt); if (fmt_str == NULL) return 0; /* If the format specifier was "%s\n", call __builtin_puts(arg). */ if (strcmp (fmt_str, "%s\n") == 0) { if (! arglist || TREE_CODE (TREE_TYPE (TREE_VALUE (arglist))) != POINTER_TYPE || TREE_CHAIN (arglist)) return 0; fn = fn_puts; } /* If the format specifier was "%c", call __builtin_putchar(arg). */ else if (strcmp (fmt_str, "%c") == 0) { if (! arglist || TREE_CODE (TREE_TYPE (TREE_VALUE (arglist))) != INTEGER_TYPE || TREE_CHAIN (arglist)) return 0; fn = fn_putchar; } else { /* We can't handle anything else with % args or %% ... yet. */ if (strchr (fmt_str, '%')) return 0; if (arglist) return 0; /* If the format specifier was "", printf does nothing. */ if (fmt_str[0] == '\0') return const0_rtx; /* If the format specifier has length of 1, call putchar. */ if (fmt_str[1] == '\0') { /* Given printf("c"), (where c is any one character,) convert "c"[0] to an int and pass that to the replacement function. */ arg = build_int_2 (fmt_str[0], 0); arglist = build_tree_list (NULL_TREE, arg); fn = fn_putchar; } else { /* If the format specifier was "string\n", call puts("string"). */ size_t len = strlen (fmt_str); if (fmt_str[len - 1] == '\n') { /* Create a NUL-terminated string that's one char shorter than the original, stripping off the trailing '\n'. */ char *newstr = alloca (len); memcpy (newstr, fmt_str, len - 1); newstr[len - 1] = 0; arg = build_string_literal (len, newstr); arglist = build_tree_list (NULL_TREE, arg); fn = fn_puts; } else /* We'd like to arrange to call fputs(string,stdout) here, but we need stdout and don't have a way to get it yet. */ return 0; } } if (!fn) return 0; return expand_expr (build_function_call_expr (fn, arglist), target, mode, EXPAND_NORMAL); } /* Expand a call to fprintf or fprintf_unlocked with argument list ARGLIST. Return 0 if a normal call should be emitted rather than transforming the function inline. If convenient, the result should be placed in TARGET with mode MODE. UNLOCKED indicates this is a fprintf_unlocked call. */ static rtx expand_builtin_fprintf (tree arglist, rtx target, enum machine_mode mode, bool unlocked) { tree fn_fputc = unlocked ? implicit_built_in_decls[BUILT_IN_FPUTC_UNLOCKED] : implicit_built_in_decls[BUILT_IN_FPUTC]; tree fn_fputs = unlocked ? implicit_built_in_decls[BUILT_IN_FPUTS_UNLOCKED] : implicit_built_in_decls[BUILT_IN_FPUTS]; const char *fmt_str; tree fn, fmt, fp, arg; /* If the return value is used, don't do the transformation. */ if (target != const0_rtx) return 0; /* Verify the required arguments in the original call. */ if (! arglist) return 0; fp = TREE_VALUE (arglist); if (TREE_CODE (TREE_TYPE (fp)) != POINTER_TYPE) return 0; arglist = TREE_CHAIN (arglist); if (! arglist) return 0; fmt = TREE_VALUE (arglist); if (TREE_CODE (TREE_TYPE (fmt)) != POINTER_TYPE) return 0; arglist = TREE_CHAIN (arglist); /* Check whether the format is a literal string constant. */ fmt_str = c_getstr (fmt); if (fmt_str == NULL) return 0; /* If the format specifier was "%s", call __builtin_fputs(arg,fp). */ if (strcmp (fmt_str, "%s") == 0) { if (! arglist || TREE_CODE (TREE_TYPE (TREE_VALUE (arglist))) != POINTER_TYPE || TREE_CHAIN (arglist)) return 0; arg = TREE_VALUE (arglist); arglist = build_tree_list (NULL_TREE, fp); arglist = tree_cons (NULL_TREE, arg, arglist); fn = fn_fputs; } /* If the format specifier was "%c", call __builtin_fputc(arg,fp). */ else if (strcmp (fmt_str, "%c") == 0) { if (! arglist || TREE_CODE (TREE_TYPE (TREE_VALUE (arglist))) != INTEGER_TYPE || TREE_CHAIN (arglist)) return 0; arg = TREE_VALUE (arglist); arglist = build_tree_list (NULL_TREE, fp); arglist = tree_cons (NULL_TREE, arg, arglist); fn = fn_fputc; } else { /* We can't handle anything else with % args or %% ... yet. */ if (strchr (fmt_str, '%')) return 0; if (arglist) return 0; /* If the format specifier was "", fprintf does nothing. */ if (fmt_str[0] == '\0') { /* Evaluate and ignore FILE* argument for side-effects. */ expand_expr (fp, const0_rtx, VOIDmode, EXPAND_NORMAL); return const0_rtx; } /* When "string" doesn't contain %, replace all cases of fprintf(stream,string) with fputs(string,stream). The fputs builtin will take care of special cases like length == 1. */ arglist = build_tree_list (NULL_TREE, fp); arglist = tree_cons (NULL_TREE, fmt, arglist); fn = fn_fputs; } if (!fn) return 0; return expand_expr (build_function_call_expr (fn, arglist), target, mode, EXPAND_NORMAL); } /* Expand a call to sprintf with argument list ARGLIST. Return 0 if a normal call should be emitted rather than expanding the function inline. If convenient, the result should be placed in TARGET with mode MODE. */ static rtx expand_builtin_sprintf (tree arglist, rtx target, enum machine_mode mode) { tree orig_arglist, dest, fmt; const char *fmt_str; orig_arglist = arglist; /* Verify the required arguments in the original call. */ if (! arglist) return 0; dest = TREE_VALUE (arglist); if (TREE_CODE (TREE_TYPE (dest)) != POINTER_TYPE) return 0; arglist = TREE_CHAIN (arglist); if (! arglist) return 0; fmt = TREE_VALUE (arglist); if (TREE_CODE (TREE_TYPE (fmt)) != POINTER_TYPE) return 0; arglist = TREE_CHAIN (arglist); /* Check whether the format is a literal string constant. */ fmt_str = c_getstr (fmt); if (fmt_str == NULL) return 0; /* If the format doesn't contain % args or %%, use strcpy. */ if (strchr (fmt_str, '%') == 0) { tree fn = implicit_built_in_decls[BUILT_IN_STRCPY]; tree exp; if (arglist || ! fn) return 0; expand_expr (build_function_call_expr (fn, orig_arglist), const0_rtx, VOIDmode, EXPAND_NORMAL); if (target == const0_rtx) return const0_rtx; exp = build_int_2 (strlen (fmt_str), 0); exp = fold_convert (integer_type_node, exp); return expand_expr (exp, target, mode, EXPAND_NORMAL); } /* If the format is "%s", use strcpy if the result isn't used. */ else if (strcmp (fmt_str, "%s") == 0) { tree fn, arg, len; fn = implicit_built_in_decls[BUILT_IN_STRCPY]; if (! fn) return 0; if (! arglist || TREE_CHAIN (arglist)) return 0; arg = TREE_VALUE (arglist); if (TREE_CODE (TREE_TYPE (arg)) != POINTER_TYPE) return 0; if (target != const0_rtx) { len = c_strlen (arg, 1); if (! len || TREE_CODE (len) != INTEGER_CST) return 0; } else len = NULL_TREE; arglist = build_tree_list (NULL_TREE, arg); arglist = tree_cons (NULL_TREE, dest, arglist); expand_expr (build_function_call_expr (fn, arglist), const0_rtx, VOIDmode, EXPAND_NORMAL); if (target == const0_rtx) return const0_rtx; return expand_expr (len, target, mode, EXPAND_NORMAL); } return 0; } /* Expand a call to either the entry or exit function profiler. */ static rtx expand_builtin_profile_func (bool exitp) { rtx this, which; this = DECL_RTL (current_function_decl); if (MEM_P (this)) this = XEXP (this, 0); else abort (); if (exitp) which = profile_function_exit_libfunc; else which = profile_function_entry_libfunc; emit_library_call (which, LCT_NORMAL, VOIDmode, 2, this, Pmode, expand_builtin_return_addr (BUILT_IN_RETURN_ADDRESS, 0, hard_frame_pointer_rtx), Pmode); return const0_rtx; } /* Given a trampoline address, make sure it satisfies TRAMPOLINE_ALIGNMENT. */ static rtx round_trampoline_addr (rtx tramp) { rtx temp, addend, mask; /* If we don't need too much alignment, we'll have been guaranteed proper alignment by get_trampoline_type. */ if (TRAMPOLINE_ALIGNMENT <= STACK_BOUNDARY) return tramp; /* Round address up to desired boundary. */ temp = gen_reg_rtx (Pmode); addend = GEN_INT (TRAMPOLINE_ALIGNMENT / BITS_PER_UNIT - 1); mask = GEN_INT (-TRAMPOLINE_ALIGNMENT / BITS_PER_UNIT); temp = expand_simple_binop (Pmode, PLUS, tramp, addend, temp, 0, OPTAB_LIB_WIDEN); tramp = expand_simple_binop (Pmode, AND, temp, mask, temp, 0, OPTAB_LIB_WIDEN); return tramp; } static rtx expand_builtin_init_trampoline (tree arglist) { tree t_tramp, t_func, t_chain; rtx r_tramp, r_func, r_chain; #ifdef TRAMPOLINE_TEMPLATE rtx blktramp; #endif if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) return NULL_RTX; t_tramp = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); t_func = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); t_chain = TREE_VALUE (arglist); r_tramp = expand_expr (t_tramp, NULL_RTX, VOIDmode, 0); r_func = expand_expr (t_func, NULL_RTX, VOIDmode, 0); r_chain = expand_expr (t_chain, NULL_RTX, VOIDmode, 0); /* Generate insns to initialize the trampoline. */ r_tramp = round_trampoline_addr (r_tramp); #ifdef TRAMPOLINE_TEMPLATE blktramp = gen_rtx_MEM (BLKmode, r_tramp); set_mem_align (blktramp, TRAMPOLINE_ALIGNMENT); emit_block_move (blktramp, assemble_trampoline_template (), GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL); #endif trampolines_created = 1; INITIALIZE_TRAMPOLINE (r_tramp, r_func, r_chain); return const0_rtx; } static rtx expand_builtin_adjust_trampoline (tree arglist) { rtx tramp; if (!validate_arglist (arglist, POINTER_TYPE, VOID_TYPE)) return NULL_RTX; tramp = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0); tramp = round_trampoline_addr (tramp); #ifdef TRAMPOLINE_ADJUST_ADDRESS TRAMPOLINE_ADJUST_ADDRESS (tramp); #endif return tramp; } /* Expand a call to the built-in signbit, signbitf or signbitl function. Return NULL_RTX if a normal call should be emitted rather than expanding the function in-line. EXP is the expression that is a call to the builtin function; if convenient, the result should be placed in TARGET. */ static rtx expand_builtin_signbit (tree exp, rtx target) { const struct real_format *fmt; enum machine_mode fmode, imode, rmode; HOST_WIDE_INT hi, lo; tree arg, arglist; int bitpos; rtx temp; arglist = TREE_OPERAND (exp, 1); if (!validate_arglist (arglist, REAL_TYPE, VOID_TYPE)) return 0; arg = TREE_VALUE (arglist); fmode = TYPE_MODE (TREE_TYPE (arg)); rmode = TYPE_MODE (TREE_TYPE (exp)); fmt = REAL_MODE_FORMAT (fmode); /* For floating point formats without a sign bit, implement signbit as "ARG < 0.0". */ if (fmt->signbit < 0) { /* But we can't do this if the format supports signed zero. */ if (fmt->has_signed_zero && HONOR_SIGNED_ZEROS (fmode)) return 0; arg = fold (build2 (LT_EXPR, TREE_TYPE (exp), arg, build_real (TREE_TYPE (arg), dconst0))); return expand_expr (arg, target, VOIDmode, EXPAND_NORMAL); } imode = int_mode_for_mode (fmode); if (imode == BLKmode) return 0; bitpos = fmt->signbit; /* Handle targets with different FP word orders. */ if (FLOAT_WORDS_BIG_ENDIAN != WORDS_BIG_ENDIAN) { int nwords = GET_MODE_BITSIZE (fmode) / BITS_PER_WORD; int word = nwords - (bitpos / BITS_PER_WORD) - 1; bitpos = word * BITS_PER_WORD + bitpos % BITS_PER_WORD; } /* If the sign bit is not in the lowpart and the floating point format is wider than an integer, check that is twice the size of an integer so that we can use gen_highpart below. */ if (bitpos >= GET_MODE_BITSIZE (rmode) && GET_MODE_BITSIZE (imode) != 2 * GET_MODE_BITSIZE (rmode)) return 0; temp = expand_expr (arg, NULL_RTX, VOIDmode, 0); temp = gen_lowpart (imode, temp); if (GET_MODE_BITSIZE (imode) > GET_MODE_BITSIZE (rmode)) { if (BYTES_BIG_ENDIAN) bitpos = GET_MODE_BITSIZE (imode) - 1 - bitpos; temp = copy_to_mode_reg (imode, temp); temp = extract_bit_field (temp, 1, bitpos, 1, NULL_RTX, rmode, rmode, GET_MODE_SIZE (imode)); } else { if (GET_MODE_BITSIZE (imode) < GET_MODE_BITSIZE (rmode)) temp = gen_lowpart (rmode, temp); if (bitpos < HOST_BITS_PER_WIDE_INT) { hi = 0; lo = (HOST_WIDE_INT) 1 << bitpos; } else { hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT); lo = 0; } temp = force_reg (rmode, temp); temp = expand_binop (rmode, and_optab, temp, immed_double_const (lo, hi, rmode), target, 1, OPTAB_LIB_WIDEN); } return temp; } /* Expand fork or exec calls. TARGET is the desired target of the call. ARGLIST is the list of arguments of the call. FN is the identificator of the actual function. IGNORE is nonzero if the value is to be ignored. */ static rtx expand_builtin_fork_or_exec (tree fn, tree arglist, rtx target, int ignore) { tree id, decl; tree call; /* If we are not profiling, just call the function. */ if (!profile_arc_flag) return NULL_RTX; /* Otherwise call the wrapper. This should be equivalent for the rest of compiler, so the code does not diverge, and the wrapper may run the code necessary for keeping the profiling sane. */ switch (DECL_FUNCTION_CODE (fn)) { case BUILT_IN_FORK: id = get_identifier ("__gcov_fork"); break; case BUILT_IN_EXECL: id = get_identifier ("__gcov_execl"); break; case BUILT_IN_EXECV: id = get_identifier ("__gcov_execv"); break; case BUILT_IN_EXECLP: id = get_identifier ("__gcov_execlp"); break; case BUILT_IN_EXECLE: id = get_identifier ("__gcov_execle"); break; case BUILT_IN_EXECVP: id = get_identifier ("__gcov_execvp"); break; case BUILT_IN_EXECVE: id = get_identifier ("__gcov_execve"); break; default: abort (); } decl = build_decl (FUNCTION_DECL, id, TREE_TYPE (fn)); DECL_EXTERNAL (decl) = 1; TREE_PUBLIC (decl) = 1; DECL_ARTIFICIAL (decl) = 1; TREE_NOTHROW (decl) = 1; call = build_function_call_expr (decl, arglist); return expand_call (call, target, ignore); } /* Expand an expression EXP that calls a built-in function, with result going to TARGET if that's convenient (and in mode MODE if that's convenient). SUBTARGET may be used as the target for computing one of EXP's operands. IGNORE is nonzero if the value is to be ignored. */ rtx expand_builtin (tree exp, rtx target, rtx subtarget, enum machine_mode mode, int ignore) { tree fndecl = get_callee_fndecl (exp); tree arglist = TREE_OPERAND (exp, 1); enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); enum machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp)); /* Perform postincrements before expanding builtin functions. */ emit_queue (); if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD) return targetm.expand_builtin (exp, target, subtarget, mode, ignore); /* When not optimizing, generate calls to library functions for a certain set of builtins. */ if (!optimize && !CALLED_AS_BUILT_IN (fndecl) && DECL_ASSEMBLER_NAME_SET_P (fndecl) && fcode != BUILT_IN_ALLOCA) return expand_call (exp, target, ignore); /* The built-in function expanders test for target == const0_rtx to determine whether the function's result will be ignored. */ if (ignore) target = const0_rtx; /* If the result of a pure or const built-in function is ignored, and none of its arguments are volatile, we can avoid expanding the built-in call and just evaluate the arguments for side-effects. */ if (target == const0_rtx && (DECL_IS_PURE (fndecl) || TREE_READONLY (fndecl))) { bool volatilep = false; tree arg; for (arg = arglist; arg; arg = TREE_CHAIN (arg)) if (TREE_THIS_VOLATILE (TREE_VALUE (arg))) { volatilep = true; break; } if (! volatilep) { for (arg = arglist; arg; arg = TREE_CHAIN (arg)) expand_expr (TREE_VALUE (arg), const0_rtx, VOIDmode, EXPAND_NORMAL); return const0_rtx; } } switch (fcode) { case BUILT_IN_FABS: case BUILT_IN_FABSF: case BUILT_IN_FABSL: target = expand_builtin_fabs (arglist, target, subtarget); if (target) return target; break; case BUILT_IN_CABS: case BUILT_IN_CABSF: case BUILT_IN_CABSL: if (flag_unsafe_math_optimizations) { target = expand_builtin_cabs (arglist, target); if (target) return target; } break; case BUILT_IN_EXP: case BUILT_IN_EXPF: case BUILT_IN_EXPL: case BUILT_IN_EXP10: case BUILT_IN_EXP10F: case BUILT_IN_EXP10L: case BUILT_IN_POW10: case BUILT_IN_POW10F: case BUILT_IN_POW10L: case BUILT_IN_EXP2: case BUILT_IN_EXP2F: case BUILT_IN_EXP2L: case BUILT_IN_EXPM1: case BUILT_IN_EXPM1F: case BUILT_IN_EXPM1L: case BUILT_IN_LOGB: case BUILT_IN_LOGBF: case BUILT_IN_LOGBL: case BUILT_IN_ILOGB: case BUILT_IN_ILOGBF: case BUILT_IN_ILOGBL: case BUILT_IN_LOG: case BUILT_IN_LOGF: case BUILT_IN_LOGL: case BUILT_IN_LOG10: case BUILT_IN_LOG10F: case BUILT_IN_LOG10L: case BUILT_IN_LOG2: case BUILT_IN_LOG2F: case BUILT_IN_LOG2L: case BUILT_IN_LOG1P: case BUILT_IN_LOG1PF: case BUILT_IN_LOG1PL: case BUILT_IN_TAN: case BUILT_IN_TANF: case BUILT_IN_TANL: case BUILT_IN_ASIN: case BUILT_IN_ASINF: case BUILT_IN_ASINL: case BUILT_IN_ACOS: case BUILT_IN_ACOSF: case BUILT_IN_ACOSL: case BUILT_IN_ATAN: case BUILT_IN_ATANF: case BUILT_IN_ATANL: /* Treat these like sqrt only if unsafe math optimizations are allowed, because of possible accuracy problems. */ if (! flag_unsafe_math_optimizations) break; case BUILT_IN_SQRT: case BUILT_IN_SQRTF: case BUILT_IN_SQRTL: case BUILT_IN_FLOOR: case BUILT_IN_FLOORF: case BUILT_IN_FLOORL: case BUILT_IN_CEIL: case BUILT_IN_CEILF: case BUILT_IN_CEILL: case BUILT_IN_TRUNC: case BUILT_IN_TRUNCF: case BUILT_IN_TRUNCL: case BUILT_IN_ROUND: case BUILT_IN_ROUNDF: case BUILT_IN_ROUNDL: case BUILT_IN_NEARBYINT: case BUILT_IN_NEARBYINTF: case BUILT_IN_NEARBYINTL: target = expand_builtin_mathfn (exp, target, subtarget); if (target) return target; break; case BUILT_IN_POW: case BUILT_IN_POWF: case BUILT_IN_POWL: target = expand_builtin_pow (exp, target, subtarget); if (target) return target; break; case BUILT_IN_ATAN2: case BUILT_IN_ATAN2F: case BUILT_IN_ATAN2L: case BUILT_IN_FMOD: case BUILT_IN_FMODF: case BUILT_IN_FMODL: case BUILT_IN_DREM: case BUILT_IN_DREMF: case BUILT_IN_DREML: if (! flag_unsafe_math_optimizations) break; target = expand_builtin_mathfn_2 (exp, target, subtarget); if (target) return target; break; case BUILT_IN_SIN: case BUILT_IN_SINF: case BUILT_IN_SINL: case BUILT_IN_COS: case BUILT_IN_COSF: case BUILT_IN_COSL: if (! flag_unsafe_math_optimizations) break; target = expand_builtin_mathfn_3 (exp, target, subtarget); if (target) return target; break; case BUILT_IN_APPLY_ARGS: return expand_builtin_apply_args (); /* __builtin_apply (FUNCTION, ARGUMENTS, ARGSIZE) invokes FUNCTION with a copy of the parameters described by ARGUMENTS, and ARGSIZE. It returns a block of memory allocated on the stack into which is stored all the registers that might possibly be used for returning the result of a function. ARGUMENTS is the value returned by __builtin_apply_args. ARGSIZE is the number of bytes of arguments that must be copied. ??? How should this value be computed? We'll also need a safe worst case value for varargs functions. */ case BUILT_IN_APPLY: if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE) && !validate_arglist (arglist, REFERENCE_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) return const0_rtx; else { int i; tree t; rtx ops[3]; for (t = arglist, i = 0; t; t = TREE_CHAIN (t), i++) ops[i] = expand_expr (TREE_VALUE (t), NULL_RTX, VOIDmode, 0); return expand_builtin_apply (ops[0], ops[1], ops[2]); } /* __builtin_return (RESULT) causes the function to return the value described by RESULT. RESULT is address of the block of memory returned by __builtin_apply. */ case BUILT_IN_RETURN: if (validate_arglist (arglist, POINTER_TYPE, VOID_TYPE)) expand_builtin_return (expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0)); return const0_rtx; case BUILT_IN_SAVEREGS: return expand_builtin_saveregs (); case BUILT_IN_ARGS_INFO: return expand_builtin_args_info (arglist); /* Return the address of the first anonymous stack arg. */ case BUILT_IN_NEXT_ARG: simplify_builtin_next_arg (arglist); return expand_builtin_next_arg (arglist); case BUILT_IN_CLASSIFY_TYPE: return expand_builtin_classify_type (arglist); case BUILT_IN_CONSTANT_P: return const0_rtx; case BUILT_IN_FRAME_ADDRESS: case BUILT_IN_RETURN_ADDRESS: return expand_builtin_frame_address (fndecl, arglist); /* Returns the address of the area where the structure is returned. 0 otherwise. */ case BUILT_IN_AGGREGATE_INCOMING_ADDRESS: if (arglist != 0 || ! AGGREGATE_TYPE_P (TREE_TYPE (TREE_TYPE (current_function_decl))) || !MEM_P (DECL_RTL (DECL_RESULT (current_function_decl)))) return const0_rtx; else return XEXP (DECL_RTL (DECL_RESULT (current_function_decl)), 0); case BUILT_IN_ALLOCA: target = expand_builtin_alloca (arglist, target); if (target) return target; break; case BUILT_IN_STACK_ALLOC: expand_stack_alloc (TREE_VALUE (arglist), TREE_VALUE (TREE_CHAIN (arglist))); return const0_rtx; case BUILT_IN_STACK_SAVE: return expand_stack_save (); case BUILT_IN_STACK_RESTORE: expand_stack_restore (TREE_VALUE (arglist)); return const0_rtx; case BUILT_IN_FFS: case BUILT_IN_FFSL: case BUILT_IN_FFSLL: target = expand_builtin_unop (target_mode, arglist, target, subtarget, ffs_optab); if (target) return target; break; case BUILT_IN_CLZ: case BUILT_IN_CLZL: case BUILT_IN_CLZLL: target = expand_builtin_unop (target_mode, arglist, target, subtarget, clz_optab); if (target) return target; break; case BUILT_IN_CTZ: case BUILT_IN_CTZL: case BUILT_IN_CTZLL: target = expand_builtin_unop (target_mode, arglist, target, subtarget, ctz_optab); if (target) return target; break; case BUILT_IN_POPCOUNT: case BUILT_IN_POPCOUNTL: case BUILT_IN_POPCOUNTLL: target = expand_builtin_unop (target_mode, arglist, target, subtarget, popcount_optab); if (target) return target; break; case BUILT_IN_PARITY: case BUILT_IN_PARITYL: case BUILT_IN_PARITYLL: target = expand_builtin_unop (target_mode, arglist, target, subtarget, parity_optab); if (target) return target; break; case BUILT_IN_STRLEN: target = expand_builtin_strlen (arglist, target, target_mode); if (target) return target; break; case BUILT_IN_STRCPY: target = expand_builtin_strcpy (arglist, target, mode); if (target) return target; break; case BUILT_IN_STRNCPY: target = expand_builtin_strncpy (arglist, target, mode); if (target) return target; break; case BUILT_IN_STPCPY: target = expand_builtin_stpcpy (arglist, target, mode); if (target) return target; break; case BUILT_IN_STRCAT: target = expand_builtin_strcat (arglist, target, mode); if (target) return target; break; case BUILT_IN_STRNCAT: target = expand_builtin_strncat (arglist, target, mode); if (target) return target; break; case BUILT_IN_STRSPN: target = expand_builtin_strspn (arglist, target, mode); if (target) return target; break; case BUILT_IN_STRCSPN: target = expand_builtin_strcspn (arglist, target, mode); if (target) return target; break; case BUILT_IN_STRSTR: target = expand_builtin_strstr (arglist, target, mode); if (target) return target; break; case BUILT_IN_STRPBRK: target = expand_builtin_strpbrk (arglist, target, mode); if (target) return target; break; case BUILT_IN_INDEX: case BUILT_IN_STRCHR: target = expand_builtin_strchr (arglist, target, mode); if (target) return target; break; case BUILT_IN_RINDEX: case BUILT_IN_STRRCHR: target = expand_builtin_strrchr (arglist, target, mode); if (target) return target; break; case BUILT_IN_MEMCPY: target = expand_builtin_memcpy (arglist, target, mode); if (target) return target; break; case BUILT_IN_MEMPCPY: target = expand_builtin_mempcpy (arglist, target, mode, /*endp=*/ 1); if (target) return target; break; case BUILT_IN_MEMMOVE: target = expand_builtin_memmove (arglist, target, mode); if (target) return target; break; case BUILT_IN_BCOPY: target = expand_builtin_bcopy (arglist); if (target) return target; break; case BUILT_IN_MEMSET: target = expand_builtin_memset (arglist, target, mode); if (target) return target; break; case BUILT_IN_BZERO: target = expand_builtin_bzero (arglist); if (target) return target; break; case BUILT_IN_STRCMP: target = expand_builtin_strcmp (exp, target, mode); if (target) return target; break; case BUILT_IN_STRNCMP: target = expand_builtin_strncmp (exp, target, mode); if (target) return target; break; case BUILT_IN_BCMP: case BUILT_IN_MEMCMP: target = expand_builtin_memcmp (exp, arglist, target, mode); if (target) return target; break; case BUILT_IN_SETJMP: target = expand_builtin_setjmp (arglist, target); if (target) return target; break; /* __builtin_longjmp is passed a pointer to an array of five words. It's similar to the C library longjmp function but works with __builtin_setjmp above. */ case BUILT_IN_LONGJMP: if (!validate_arglist (arglist, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) break; else { rtx buf_addr = expand_expr (TREE_VALUE (arglist), subtarget, VOIDmode, 0); rtx value = expand_expr (TREE_VALUE (TREE_CHAIN (arglist)), NULL_RTX, VOIDmode, 0); if (value != const1_rtx) { error ("__builtin_longjmp second argument must be 1"); return const0_rtx; } expand_builtin_longjmp (buf_addr, value); return const0_rtx; } case BUILT_IN_NONLOCAL_GOTO: target = expand_builtin_nonlocal_goto (arglist); if (target) return target; break; /* This updates the setjmp buffer that is its argument with the value of the current stack pointer. */ case BUILT_IN_UPDATE_SETJMP_BUF: if (validate_arglist (arglist, POINTER_TYPE, VOID_TYPE)) { rtx buf_addr = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0); expand_builtin_update_setjmp_buf (buf_addr); return const0_rtx; } break; case BUILT_IN_TRAP: expand_builtin_trap (); return const0_rtx; case BUILT_IN_PRINTF: target = expand_builtin_printf (arglist, target, mode, false); if (target) return target; break; case BUILT_IN_PRINTF_UNLOCKED: target = expand_builtin_printf (arglist, target, mode, true); if (target) return target; break; case BUILT_IN_FPUTS: target = expand_builtin_fputs (arglist, target, false); if (target) return target; break; case BUILT_IN_FPUTS_UNLOCKED: target = expand_builtin_fputs (arglist, target, true); if (target) return target; break; case BUILT_IN_FPRINTF: target = expand_builtin_fprintf (arglist, target, mode, false); if (target) return target; break; case BUILT_IN_FPRINTF_UNLOCKED: target = expand_builtin_fprintf (arglist, target, mode, true); if (target) return target; break; case BUILT_IN_SPRINTF: target = expand_builtin_sprintf (arglist, target, mode); if (target) return target; break; case BUILT_IN_SIGNBIT: case BUILT_IN_SIGNBITF: case BUILT_IN_SIGNBITL: target = expand_builtin_signbit (exp, target); if (target) return target; break; /* Various hooks for the DWARF 2 __throw routine. */ case BUILT_IN_UNWIND_INIT: expand_builtin_unwind_init (); return const0_rtx; case BUILT_IN_DWARF_CFA: return virtual_cfa_rtx; #ifdef DWARF2_UNWIND_INFO case BUILT_IN_DWARF_SP_COLUMN: return expand_builtin_dwarf_sp_column (); case BUILT_IN_INIT_DWARF_REG_SIZES: expand_builtin_init_dwarf_reg_sizes (TREE_VALUE (arglist)); return const0_rtx; #endif case BUILT_IN_FROB_RETURN_ADDR: return expand_builtin_frob_return_addr (TREE_VALUE (arglist)); case BUILT_IN_EXTRACT_RETURN_ADDR: return expand_builtin_extract_return_addr (TREE_VALUE (arglist)); case BUILT_IN_EH_RETURN: expand_builtin_eh_return (TREE_VALUE (arglist), TREE_VALUE (TREE_CHAIN (arglist))); return const0_rtx; #ifdef EH_RETURN_DATA_REGNO case BUILT_IN_EH_RETURN_DATA_REGNO: return expand_builtin_eh_return_data_regno (arglist); #endif case BUILT_IN_EXTEND_POINTER: return expand_builtin_extend_pointer (TREE_VALUE (arglist)); case BUILT_IN_VA_START: case BUILT_IN_STDARG_START: return expand_builtin_va_start (arglist); case BUILT_IN_VA_END: return expand_builtin_va_end (arglist); case BUILT_IN_VA_COPY: return expand_builtin_va_copy (arglist); case BUILT_IN_EXPECT: return expand_builtin_expect (arglist, target); case BUILT_IN_PREFETCH: expand_builtin_prefetch (arglist); return const0_rtx; case BUILT_IN_PROFILE_FUNC_ENTER: return expand_builtin_profile_func (false); case BUILT_IN_PROFILE_FUNC_EXIT: return expand_builtin_profile_func (true); case BUILT_IN_INIT_TRAMPOLINE: return expand_builtin_init_trampoline (arglist); case BUILT_IN_ADJUST_TRAMPOLINE: return expand_builtin_adjust_trampoline (arglist); case BUILT_IN_FORK: case BUILT_IN_EXECL: case BUILT_IN_EXECV: case BUILT_IN_EXECLP: case BUILT_IN_EXECLE: case BUILT_IN_EXECVP: case BUILT_IN_EXECVE: target = expand_builtin_fork_or_exec (fndecl, arglist, target, ignore); if (target) return target; break; default: /* just do library call, if unknown builtin */ break; } /* The switch statement above can drop through to cause the function to be called normally. */ return expand_call (exp, target, ignore); } /* Determine whether a tree node represents a call to a built-in function. If the tree T is a call to a built-in function with the right number of arguments of the appropriate types, return the DECL_FUNCTION_CODE of the call, e.g. BUILT_IN_SQRT. Otherwise the return value is END_BUILTINS. */ enum built_in_function builtin_mathfn_code (tree t) { tree fndecl, arglist, parmlist; tree argtype, parmtype; if (TREE_CODE (t) != CALL_EXPR || TREE_CODE (TREE_OPERAND (t, 0)) != ADDR_EXPR) return END_BUILTINS; fndecl = get_callee_fndecl (t); if (fndecl == NULL_TREE || TREE_CODE (fndecl) != FUNCTION_DECL || ! DECL_BUILT_IN (fndecl) || DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD) return END_BUILTINS; arglist = TREE_OPERAND (t, 1); parmlist = TYPE_ARG_TYPES (TREE_TYPE (fndecl)); for (; parmlist; parmlist = TREE_CHAIN (parmlist)) { /* If a function doesn't take a variable number of arguments, the last element in the list will have type `void'. */ parmtype = TREE_VALUE (parmlist); if (VOID_TYPE_P (parmtype)) { if (arglist) return END_BUILTINS; return DECL_FUNCTION_CODE (fndecl); } if (! arglist) return END_BUILTINS; argtype = TREE_TYPE (TREE_VALUE (arglist)); if (SCALAR_FLOAT_TYPE_P (parmtype)) { if (! SCALAR_FLOAT_TYPE_P (argtype)) return END_BUILTINS; } else if (COMPLEX_FLOAT_TYPE_P (parmtype)) { if (! COMPLEX_FLOAT_TYPE_P (argtype)) return END_BUILTINS; } else if (POINTER_TYPE_P (parmtype)) { if (! POINTER_TYPE_P (argtype)) return END_BUILTINS; } else if (INTEGRAL_TYPE_P (parmtype)) { if (! INTEGRAL_TYPE_P (argtype)) return END_BUILTINS; } else return END_BUILTINS; arglist = TREE_CHAIN (arglist); } /* Variable-length argument list. */ return DECL_FUNCTION_CODE (fndecl); } /* Fold a call to __builtin_constant_p, if we know it will evaluate to a constant. ARGLIST is the argument list of the call. */ static tree fold_builtin_constant_p (tree arglist) { if (arglist == 0) return 0; arglist = TREE_VALUE (arglist); /* We return 1 for a numeric type that's known to be a constant value at compile-time or for an aggregate type that's a literal constant. */ STRIP_NOPS (arglist); /* If we know this is a constant, emit the constant of one. */ if (TREE_CODE_CLASS (TREE_CODE (arglist)) == 'c' || (TREE_CODE (arglist) == CONSTRUCTOR && TREE_CONSTANT (arglist)) || (TREE_CODE (arglist) == ADDR_EXPR && TREE_CODE (TREE_OPERAND (arglist, 0)) == STRING_CST)) return integer_one_node; /* If this expression has side effects, show we don't know it to be a constant. Likewise if it's a pointer or aggregate type since in those case we only want literals, since those are only optimized when generating RTL, not later. And finally, if we are compiling an initializer, not code, we need to return a definite result now; there's not going to be any more optimization done. */ if (TREE_SIDE_EFFECTS (arglist) || AGGREGATE_TYPE_P (TREE_TYPE (arglist)) || POINTER_TYPE_P (TREE_TYPE (arglist)) || cfun == 0) return integer_zero_node; return 0; } /* Fold a call to __builtin_expect, if we expect that a comparison against the argument will fold to a constant. In practice, this means a true constant or the address of a non-weak symbol. ARGLIST is the argument list of the call. */ static tree fold_builtin_expect (tree arglist) { tree arg, inner; if (arglist == 0) return 0; arg = TREE_VALUE (arglist); /* If the argument isn't invariant, then there's nothing we can do. */ if (!TREE_INVARIANT (arg)) return 0; /* If we're looking at an address of a weak decl, then do not fold. */ inner = arg; STRIP_NOPS (inner); if (TREE_CODE (inner) == ADDR_EXPR) { do { inner = TREE_OPERAND (inner, 0); } while (TREE_CODE (inner) == COMPONENT_REF || TREE_CODE (inner) == ARRAY_REF); if (DECL_P (inner) && DECL_WEAK (inner)) return 0; } /* Otherwise, ARG already has the proper type for the return value. */ return arg; } /* Fold a call to __builtin_classify_type. */ static tree fold_builtin_classify_type (tree arglist) { if (arglist == 0) return build_int_2 (no_type_class, 0); return build_int_2 (type_to_class (TREE_TYPE (TREE_VALUE (arglist))), 0); } /* Fold a call to __builtin_inf or __builtin_huge_val. */ static tree fold_builtin_inf (tree type, int warn) { REAL_VALUE_TYPE real; if (!MODE_HAS_INFINITIES (TYPE_MODE (type)) && warn) warning ("target format does not support infinity"); real_inf (&real); return build_real (type, real); } /* Fold a call to __builtin_nan or __builtin_nans. */ static tree fold_builtin_nan (tree arglist, tree type, int quiet) { REAL_VALUE_TYPE real; const char *str; if (!validate_arglist (arglist, POINTER_TYPE, VOID_TYPE)) return 0; str = c_getstr (TREE_VALUE (arglist)); if (!str) return 0; if (!real_nan (&real, str, quiet, TYPE_MODE (type))) return 0; return build_real (type, real); } /* Return true if the floating point expression T has an integer value. We also allow +Inf, -Inf and NaN to be considered integer values. */ static bool integer_valued_real_p (tree t) { switch (TREE_CODE (t)) { case FLOAT_EXPR: return true; case ABS_EXPR: case SAVE_EXPR: case NON_LVALUE_EXPR: return integer_valued_real_p (TREE_OPERAND (t, 0)); case COMPOUND_EXPR: case MODIFY_EXPR: case BIND_EXPR: return integer_valued_real_p (TREE_OPERAND (t, 1)); case PLUS_EXPR: case MINUS_EXPR: case MULT_EXPR: case MIN_EXPR: case MAX_EXPR: return integer_valued_real_p (TREE_OPERAND (t, 0)) && integer_valued_real_p (TREE_OPERAND (t, 1)); case COND_EXPR: return integer_valued_real_p (TREE_OPERAND (t, 1)) && integer_valued_real_p (TREE_OPERAND (t, 2)); case REAL_CST: if (! TREE_CONSTANT_OVERFLOW (t)) { REAL_VALUE_TYPE c, cint; c = TREE_REAL_CST (t); real_trunc (&cint, TYPE_MODE (TREE_TYPE (t)), &c); return real_identical (&c, &cint); } case NOP_EXPR: { tree type = TREE_TYPE (TREE_OPERAND (t, 0)); if (TREE_CODE (type) == INTEGER_TYPE) return true; if (TREE_CODE (type) == REAL_TYPE) return integer_valued_real_p (TREE_OPERAND (t, 0)); break; } case CALL_EXPR: switch (builtin_mathfn_code (t)) { case BUILT_IN_CEIL: case BUILT_IN_CEILF: case BUILT_IN_CEILL: case BUILT_IN_FLOOR: case BUILT_IN_FLOORF: case BUILT_IN_FLOORL: case BUILT_IN_NEARBYINT: case BUILT_IN_NEARBYINTF: case BUILT_IN_NEARBYINTL: case BUILT_IN_RINT: case BUILT_IN_RINTF: case BUILT_IN_RINTL: case BUILT_IN_ROUND: case BUILT_IN_ROUNDF: case BUILT_IN_ROUNDL: case BUILT_IN_TRUNC: case BUILT_IN_TRUNCF: case BUILT_IN_TRUNCL: return true; default: break; } break; default: break; } return false; } /* EXP is assumed to be builtin call where truncation can be propagated across (for instance floor((double)f) == (double)floorf (f). Do the transformation. */ static tree fold_trunc_transparent_mathfn (tree exp) { tree fndecl = get_callee_fndecl (exp); tree arglist = TREE_OPERAND (exp, 1); enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); tree arg; if (! validate_arglist (arglist, REAL_TYPE, VOID_TYPE)) return 0; arg = TREE_VALUE (arglist); /* Integer rounding functions are idempotent. */ if (fcode == builtin_mathfn_code (arg)) return arg; /* If argument is already integer valued, and we don't need to worry about setting errno, there's no need to perform rounding. */ if (! flag_errno_math && integer_valued_real_p (arg)) return arg; if (optimize) { tree arg0 = strip_float_extensions (arg); tree ftype = TREE_TYPE (exp); tree newtype = TREE_TYPE (arg0); tree decl; if (TYPE_PRECISION (newtype) < TYPE_PRECISION (ftype) && (decl = mathfn_built_in (newtype, fcode))) { arglist = build_tree_list (NULL_TREE, fold_convert (newtype, arg0)); return fold_convert (ftype, build_function_call_expr (decl, arglist)); } } return 0; } /* EXP is assumed to be builtin call which can narrow the FP type of the argument, for instance lround((double)f) -> lroundf (f). */ static tree fold_fixed_mathfn (tree exp) { tree fndecl = get_callee_fndecl (exp); tree arglist = TREE_OPERAND (exp, 1); enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); tree arg; if (! validate_arglist (arglist, REAL_TYPE, VOID_TYPE)) return 0; arg = TREE_VALUE (arglist); /* If argument is already integer valued, and we don't need to worry about setting errno, there's no need to perform rounding. */ if (! flag_errno_math && integer_valued_real_p (arg)) return fold (build1 (FIX_TRUNC_EXPR, TREE_TYPE (exp), arg)); if (optimize) { tree ftype = TREE_TYPE (arg); tree arg0 = strip_float_extensions (arg); tree newtype = TREE_TYPE (arg0); tree decl; if (TYPE_PRECISION (newtype) < TYPE_PRECISION (ftype) && (decl = mathfn_built_in (newtype, fcode))) { arglist = build_tree_list (NULL_TREE, fold_convert (newtype, arg0)); return build_function_call_expr (decl, arglist); } } return 0; } /* Fold function call to builtin cabs, cabsf or cabsl. ARGLIST is the argument list and TYPE is the return type. Return NULL_TREE if no if no simplification can be made. */ static tree fold_builtin_cabs (tree arglist, tree type) { tree arg; if (!arglist || TREE_CHAIN (arglist)) return NULL_TREE; arg = TREE_VALUE (arglist); if (TREE_CODE (TREE_TYPE (arg)) != COMPLEX_TYPE || TREE_CODE (TREE_TYPE (TREE_TYPE (arg))) != REAL_TYPE) return NULL_TREE; /* Evaluate cabs of a constant at compile-time. */ if (flag_unsafe_math_optimizations && TREE_CODE (arg) == COMPLEX_CST && TREE_CODE (TREE_REALPART (arg)) == REAL_CST && TREE_CODE (TREE_IMAGPART (arg)) == REAL_CST && ! TREE_CONSTANT_OVERFLOW (TREE_REALPART (arg)) && ! TREE_CONSTANT_OVERFLOW (TREE_IMAGPART (arg))) { REAL_VALUE_TYPE r, i; r = TREE_REAL_CST (TREE_REALPART (arg)); i = TREE_REAL_CST (TREE_IMAGPART (arg)); real_arithmetic (&r, MULT_EXPR, &r, &r); real_arithmetic (&i, MULT_EXPR, &i, &i); real_arithmetic (&r, PLUS_EXPR, &r, &i); if (real_sqrt (&r, TYPE_MODE (type), &r) || ! flag_trapping_math) return build_real (type, r); } /* If either part is zero, cabs is fabs of the other. */ if (TREE_CODE (arg) == COMPLEX_EXPR && real_zerop (TREE_OPERAND (arg, 0))) return fold (build1 (ABS_EXPR, type, TREE_OPERAND (arg, 1))); if (TREE_CODE (arg) == COMPLEX_EXPR && real_zerop (TREE_OPERAND (arg, 1))) return fold (build1 (ABS_EXPR, type, TREE_OPERAND (arg, 0))); if (flag_unsafe_math_optimizations) { tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT); if (sqrtfn != NULL_TREE) { tree rpart, ipart, result, arglist; arg = builtin_save_expr (arg); rpart = fold (build1 (REALPART_EXPR, type, arg)); ipart = fold (build1 (IMAGPART_EXPR, type, arg)); rpart = builtin_save_expr (rpart); ipart = builtin_save_expr (ipart); result = fold (build2 (PLUS_EXPR, type, fold (build2 (MULT_EXPR, type, rpart, rpart)), fold (build2 (MULT_EXPR, type, ipart, ipart)))); arglist = build_tree_list (NULL_TREE, result); return build_function_call_expr (sqrtfn, arglist); } } return NULL_TREE; } /* Fold function call to builtin trunc, truncf or truncl. Return NULL_TREE if no simplification can be made. */ static tree fold_builtin_trunc (tree exp) { tree arglist = TREE_OPERAND (exp, 1); tree arg; if (! validate_arglist (arglist, REAL_TYPE, VOID_TYPE)) return 0; /* Optimize trunc of constant value. */ arg = TREE_VALUE (arglist); if (TREE_CODE (arg) == REAL_CST && ! TREE_CONSTANT_OVERFLOW (arg)) { REAL_VALUE_TYPE r, x; tree type = TREE_TYPE (exp); x = TREE_REAL_CST (arg); real_trunc (&r, TYPE_MODE (type), &x); return build_real (type, r); } return fold_trunc_transparent_mathfn (exp); } /* Fold function call to builtin floor, floorf or floorl. Return NULL_TREE if no simplification can be made. */ static tree fold_builtin_floor (tree exp) { tree arglist = TREE_OPERAND (exp, 1); tree arg; if (! validate_arglist (arglist, REAL_TYPE, VOID_TYPE)) return 0; /* Optimize floor of constant value. */ arg = TREE_VALUE (arglist); if (TREE_CODE (arg) == REAL_CST && ! TREE_CONSTANT_OVERFLOW (arg)) { REAL_VALUE_TYPE x; x = TREE_REAL_CST (arg); if (! REAL_VALUE_ISNAN (x) || ! flag_errno_math) { tree type = TREE_TYPE (exp); REAL_VALUE_TYPE r; real_floor (&r, TYPE_MODE (type), &x); return build_real (type, r); } } return fold_trunc_transparent_mathfn (exp); } /* Fold function call to builtin ceil, ceilf or ceill. Return NULL_TREE if no simplification can be made. */ static tree fold_builtin_ceil (tree exp) { tree arglist = TREE_OPERAND (exp, 1); tree arg; if (! validate_arglist (arglist, REAL_TYPE, VOID_TYPE)) return 0; /* Optimize ceil of constant value. */ arg = TREE_VALUE (arglist); if (TREE_CODE (arg) == REAL_CST && ! TREE_CONSTANT_OVERFLOW (arg)) { REAL_VALUE_TYPE x; x = TREE_REAL_CST (arg); if (! REAL_VALUE_ISNAN (x) || ! flag_errno_math) { tree type = TREE_TYPE (exp); REAL_VALUE_TYPE r; real_ceil (&r, TYPE_MODE (type), &x); return build_real (type, r); } } return fold_trunc_transparent_mathfn (exp); } /* Fold function call to builtin round, roundf or roundl. Return NULL_TREE if no simplification can be made. */ static tree fold_builtin_round (tree exp) { tree arglist = TREE_OPERAND (exp, 1); tree arg; if (! validate_arglist (arglist, REAL_TYPE, VOID_TYPE)) return 0; /* Optimize round of constant value. */ arg = TREE_VALUE (arglist); if (TREE_CODE (arg) == REAL_CST && ! TREE_CONSTANT_OVERFLOW (arg)) { REAL_VALUE_TYPE x; x = TREE_REAL_CST (arg); if (! REAL_VALUE_ISNAN (x) || ! flag_errno_math) { tree type = TREE_TYPE (exp); REAL_VALUE_TYPE r; real_round (&r, TYPE_MODE (type), &x); return build_real (type, r); } } return fold_trunc_transparent_mathfn (exp); } /* Fold function call to builtin lround, lroundf or lroundl (or the corresponding long long versions). Return NULL_TREE if no simplification can be made. */ static tree fold_builtin_lround (tree exp) { tree arglist = TREE_OPERAND (exp, 1); tree arg; if (! validate_arglist (arglist, REAL_TYPE, VOID_TYPE)) return 0; /* Optimize lround of constant value. */ arg = TREE_VALUE (arglist); if (TREE_CODE (arg) == REAL_CST && ! TREE_CONSTANT_OVERFLOW (arg)) { const REAL_VALUE_TYPE x = TREE_REAL_CST (arg); if (! REAL_VALUE_ISNAN (x) && ! REAL_VALUE_ISINF (x)) { tree itype = TREE_TYPE (exp), ftype = TREE_TYPE (arg), result; HOST_WIDE_INT hi, lo; REAL_VALUE_TYPE r; real_round (&r, TYPE_MODE (ftype), &x); REAL_VALUE_TO_INT (&lo, &hi, r); result = build_int_2 (lo, hi); if (int_fits_type_p (result, itype)) return fold_convert (itype, result); } } return fold_fixed_mathfn (exp); } /* Fold function call to builtin ffs, clz, ctz, popcount and parity and their long and long long variants (i.e. ffsl and ffsll). Return NULL_TREE if no simplification can be made. */ static tree fold_builtin_bitop (tree exp) { tree fndecl = get_callee_fndecl (exp); tree arglist = TREE_OPERAND (exp, 1); tree arg; if (! validate_arglist (arglist, INTEGER_TYPE, VOID_TYPE)) return NULL_TREE; /* Optimize for constant argument. */ arg = TREE_VALUE (arglist); if (TREE_CODE (arg) == INTEGER_CST && ! TREE_CONSTANT_OVERFLOW (arg)) { HOST_WIDE_INT hi, width, result; unsigned HOST_WIDE_INT lo; tree type, t; type = TREE_TYPE (arg); width = TYPE_PRECISION (type); lo = TREE_INT_CST_LOW (arg); /* Clear all the bits that are beyond the type's precision. */ if (width > HOST_BITS_PER_WIDE_INT) { hi = TREE_INT_CST_HIGH (arg); if (width < 2 * HOST_BITS_PER_WIDE_INT) hi &= ~((HOST_WIDE_INT) (-1) >> (width - HOST_BITS_PER_WIDE_INT)); } else { hi = 0; if (width < HOST_BITS_PER_WIDE_INT) lo &= ~((unsigned HOST_WIDE_INT) (-1) << width); } switch (DECL_FUNCTION_CODE (fndecl)) { case BUILT_IN_FFS: case BUILT_IN_FFSL: case BUILT_IN_FFSLL: if (lo != 0) result = exact_log2 (lo & -lo) + 1; else if (hi != 0) result = HOST_BITS_PER_WIDE_INT + exact_log2 (hi & -hi) + 1; else result = 0; break; case BUILT_IN_CLZ: case BUILT_IN_CLZL: case BUILT_IN_CLZLL: if (hi != 0) result = width - floor_log2 (hi) - 1 - HOST_BITS_PER_WIDE_INT; else if (lo != 0) result = width - floor_log2 (lo) - 1; else if (! CLZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (type), result)) result = width; break; case BUILT_IN_CTZ: case BUILT_IN_CTZL: case BUILT_IN_CTZLL: if (lo != 0) result = exact_log2 (lo & -lo); else if (hi != 0) result = HOST_BITS_PER_WIDE_INT + exact_log2 (hi & -hi); else if (! CTZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (type), result)) result = width; break; case BUILT_IN_POPCOUNT: case BUILT_IN_POPCOUNTL: case BUILT_IN_POPCOUNTLL: result = 0; while (lo) result++, lo &= lo - 1; while (hi) result++, hi &= hi - 1; break; case BUILT_IN_PARITY: case BUILT_IN_PARITYL: case BUILT_IN_PARITYLL: result = 0; while (lo) result++, lo &= lo - 1; while (hi) result++, hi &= hi - 1; result &= 1; break; default: abort(); } t = build_int_2 (result, 0); TREE_TYPE (t) = TREE_TYPE (exp); return t; } return NULL_TREE; } /* Return true if EXPR is the real constant contained in VALUE. */ static bool real_dconstp (tree expr, const REAL_VALUE_TYPE *value) { STRIP_NOPS (expr); return ((TREE_CODE (expr) == REAL_CST && ! TREE_CONSTANT_OVERFLOW (expr) && REAL_VALUES_EQUAL (TREE_REAL_CST (expr), *value)) || (TREE_CODE (expr) == COMPLEX_CST && real_dconstp (TREE_REALPART (expr), value) && real_zerop (TREE_IMAGPART (expr)))); } /* A subroutine of fold_builtin to fold the various logarithmic functions. EXP is the CALL_EXPR of a call to a builtin logN function. VALUE is the base of the logN function. */ static tree fold_builtin_logarithm (tree exp, const REAL_VALUE_TYPE *value) { tree arglist = TREE_OPERAND (exp, 1); if (validate_arglist (arglist, REAL_TYPE, VOID_TYPE)) { tree fndecl = get_callee_fndecl (exp); tree type = TREE_TYPE (TREE_TYPE (fndecl)); tree arg = TREE_VALUE (arglist); const enum built_in_function fcode = builtin_mathfn_code (arg); /* Optimize logN(1.0) = 0.0. */ if (real_onep (arg)) return build_real (type, dconst0); /* Optimize logN(N) = 1.0. If N can't be truncated to MODE exactly, then only do this if flag_unsafe_math_optimizations. */ if (exact_real_truncate (TYPE_MODE (type), value) || flag_unsafe_math_optimizations) { const REAL_VALUE_TYPE value_truncate = real_value_truncate (TYPE_MODE (type), *value); if (real_dconstp (arg, &value_truncate)) return build_real (type, dconst1); } /* Special case, optimize logN(expN(x)) = x. */ if (flag_unsafe_math_optimizations && ((value == &dconste && (fcode == BUILT_IN_EXP || fcode == BUILT_IN_EXPF || fcode == BUILT_IN_EXPL)) || (value == &dconst2 && (fcode == BUILT_IN_EXP2 || fcode == BUILT_IN_EXP2F || fcode == BUILT_IN_EXP2L)) || (value == &dconst10 && (BUILTIN_EXP10_P (fcode))))) return fold_convert (type, TREE_VALUE (TREE_OPERAND (arg, 1))); /* Optimize logN(func()) for various exponential functions. We want to determine the value "x" and the power "exponent" in order to transform logN(x**exponent) into exponent*logN(x). */ if (flag_unsafe_math_optimizations) { tree exponent = 0, x = 0; switch (fcode) { case BUILT_IN_EXP: case BUILT_IN_EXPF: case BUILT_IN_EXPL: /* Prepare to do logN(exp(exponent) -> exponent*logN(e). */ x = build_real (type, real_value_truncate (TYPE_MODE (type), dconste)); exponent = TREE_VALUE (TREE_OPERAND (arg, 1)); break; case BUILT_IN_EXP2: case BUILT_IN_EXP2F: case BUILT_IN_EXP2L: /* Prepare to do logN(exp2(exponent) -> exponent*logN(2). */ x = build_real (type, dconst2); exponent = TREE_VALUE (TREE_OPERAND (arg, 1)); break; case BUILT_IN_EXP10: case BUILT_IN_EXP10F: case BUILT_IN_EXP10L: case BUILT_IN_POW10: case BUILT_IN_POW10F: case BUILT_IN_POW10L: /* Prepare to do logN(exp10(exponent) -> exponent*logN(10). */ x = build_real (type, dconst10); exponent = TREE_VALUE (TREE_OPERAND (arg, 1)); break; case BUILT_IN_SQRT: case BUILT_IN_SQRTF: case BUILT_IN_SQRTL: /* Prepare to do logN(sqrt(x) -> 0.5*logN(x). */ x = TREE_VALUE (TREE_OPERAND (arg, 1)); exponent = build_real (type, dconsthalf); break; case BUILT_IN_CBRT: case BUILT_IN_CBRTF: case BUILT_IN_CBRTL: /* Prepare to do logN(cbrt(x) -> (1/3)*logN(x). */ x = TREE_VALUE (TREE_OPERAND (arg, 1)); exponent = build_real (type, real_value_truncate (TYPE_MODE (type), dconstthird)); break; case BUILT_IN_POW: case BUILT_IN_POWF: case BUILT_IN_POWL: /* Prepare to do logN(pow(x,exponent) -> exponent*logN(x). */ x = TREE_VALUE (TREE_OPERAND (arg, 1)); exponent = TREE_VALUE (TREE_CHAIN (TREE_OPERAND (arg, 1))); break; default: break; } /* Now perform the optimization. */ if (x && exponent) { tree logfn; arglist = build_tree_list (NULL_TREE, x); logfn = build_function_call_expr (fndecl, arglist); return fold (build2 (MULT_EXPR, type, exponent, logfn)); } } } return 0; } /* A subroutine of fold_builtin to fold the various exponent functions. EXP is the CALL_EXPR of a call to a builtin function. VALUE is the value which will be raised to a power. */ static tree fold_builtin_exponent (tree exp, const REAL_VALUE_TYPE *value) { tree arglist = TREE_OPERAND (exp, 1); if (validate_arglist (arglist, REAL_TYPE, VOID_TYPE)) { tree fndecl = get_callee_fndecl (exp); tree type = TREE_TYPE (TREE_TYPE (fndecl)); tree arg = TREE_VALUE (arglist); /* Optimize exp*(0.0) = 1.0. */ if (real_zerop (arg)) return build_real (type, dconst1); /* Optimize expN(1.0) = N. */ if (real_onep (arg)) { REAL_VALUE_TYPE cst; real_convert (&cst, TYPE_MODE (type), value); return build_real (type, cst); } /* Attempt to evaluate expN(integer) at compile-time. */ if (flag_unsafe_math_optimizations && TREE_CODE (arg) == REAL_CST && ! TREE_CONSTANT_OVERFLOW (arg)) { REAL_VALUE_TYPE cint; REAL_VALUE_TYPE c; HOST_WIDE_INT n; c = TREE_REAL_CST (arg); n = real_to_integer (&c); real_from_integer (&cint, VOIDmode, n, n < 0 ? -1 : 0, 0); if (real_identical (&c, &cint)) { REAL_VALUE_TYPE x; real_powi (&x, TYPE_MODE (type), value, n); return build_real (type, x); } } /* Optimize expN(logN(x)) = x. */ if (flag_unsafe_math_optimizations) { const enum built_in_function fcode = builtin_mathfn_code (arg); if ((value == &dconste && (fcode == BUILT_IN_LOG || fcode == BUILT_IN_LOGF || fcode == BUILT_IN_LOGL)) || (value == &dconst2 && (fcode == BUILT_IN_LOG2 || fcode == BUILT_IN_LOG2F || fcode == BUILT_IN_LOG2L)) || (value == &dconst10 && (fcode == BUILT_IN_LOG10 || fcode == BUILT_IN_LOG10F || fcode == BUILT_IN_LOG10L))) return fold_convert (type, TREE_VALUE (TREE_OPERAND (arg, 1))); } } return 0; } /* Fold function call to builtin memcpy. Return NULL_TREE if no simplification can be made. */ static tree fold_builtin_memcpy (tree exp) { tree arglist = TREE_OPERAND (exp, 1); tree dest, src, len; if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) return 0; dest = TREE_VALUE (arglist); src = TREE_VALUE (TREE_CHAIN (arglist)); len = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); /* If the LEN parameter is zero, return DEST. */ if (integer_zerop (len)) return omit_one_operand (TREE_TYPE (exp), dest, src); /* If SRC and DEST are the same (and not volatile), return DEST. */ if (operand_equal_p (src, dest, 0)) return omit_one_operand (TREE_TYPE (exp), dest, len); return 0; } /* Fold function call to builtin mempcpy. Return NULL_TREE if no simplification can be made. */ static tree fold_builtin_mempcpy (tree exp) { tree arglist = TREE_OPERAND (exp, 1); tree dest, src, len; if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) return 0; dest = TREE_VALUE (arglist); src = TREE_VALUE (TREE_CHAIN (arglist)); len = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); /* If the LEN parameter is zero, return DEST. */ if (integer_zerop (len)) return omit_one_operand (TREE_TYPE (exp), dest, src); /* If SRC and DEST are the same (and not volatile), return DEST+LEN. */ if (operand_equal_p (src, dest, 0)) { tree temp = fold_convert (TREE_TYPE (dest), len); temp = fold (build2 (PLUS_EXPR, TREE_TYPE (dest), dest, temp)); return fold_convert (TREE_TYPE (exp), temp); } return 0; } /* Fold function call to builtin memmove. Return NULL_TREE if no simplification can be made. */ static tree fold_builtin_memmove (tree exp) { tree arglist = TREE_OPERAND (exp, 1); tree dest, src, len; if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) return 0; dest = TREE_VALUE (arglist); src = TREE_VALUE (TREE_CHAIN (arglist)); len = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); /* If the LEN parameter is zero, return DEST. */ if (integer_zerop (len)) return omit_one_operand (TREE_TYPE (exp), dest, src); /* If SRC and DEST are the same (and not volatile), return DEST. */ if (operand_equal_p (src, dest, 0)) return omit_one_operand (TREE_TYPE (exp), dest, len); return 0; } /* Fold function call to builtin strcpy. Return NULL_TREE if no simplification can be made. */ static tree fold_builtin_strcpy (tree exp) { tree arglist = TREE_OPERAND (exp, 1); tree dest, src; if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) return 0; dest = TREE_VALUE (arglist); src = TREE_VALUE (TREE_CHAIN (arglist)); /* If SRC and DEST are the same (and not volatile), return DEST. */ if (operand_equal_p (src, dest, 0)) return fold_convert (TREE_TYPE (exp), dest); return 0; } /* Fold function call to builtin strncpy. Return NULL_TREE if no simplification can be made. */ static tree fold_builtin_strncpy (tree exp) { tree arglist = TREE_OPERAND (exp, 1); tree dest, src, len; if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) return 0; dest = TREE_VALUE (arglist); src = TREE_VALUE (TREE_CHAIN (arglist)); len = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); /* If the LEN parameter is zero, return DEST. */ if (integer_zerop (len)) return omit_one_operand (TREE_TYPE (exp), dest, src); return 0; } /* Fold function call to builtin strchr and strrchr. Return NULL_TREE if no simplification can be made. */ static tree fold_builtin_strchr (tree exp, bool actually_strrchr) { tree arglist = TREE_OPERAND (exp, 1); if (!validate_arglist (arglist, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) return 0; else { tree s1 = TREE_VALUE (arglist), s2 = TREE_VALUE (TREE_CHAIN (arglist)); const char *p1; if (TREE_CODE (s2) != INTEGER_CST) return 0; p1 = c_getstr (s1); if (p1 != NULL) { char c; const char *r; if (target_char_cast (s2, &c)) return 0; r = actually_strrchr ? strrchr (p1, c) : strchr (p1, c); if (r == NULL) return fold_convert (TREE_TYPE (s1), integer_zero_node); /* Return an offset into the constant string argument. */ return fold (build2 (PLUS_EXPR, TREE_TYPE (s1), s1, fold_convert (TREE_TYPE (s1), ssize_int (r - p1)))); } if (actually_strrchr) { tree fn; if (!integer_zerop (s2)) return 0; fn = implicit_built_in_decls[BUILT_IN_STRCHR]; if (!fn) return 0; /* Transform strrchr(s1, '\0') to strchr(s1, '\0'). */ return build_function_call_expr (fn, arglist); } return 0; } } /* Fold function call to builtin memcmp. Return NULL_TREE if no simplification can be made. */ static tree fold_builtin_memcmp (tree exp) { tree arglist = TREE_OPERAND (exp, 1); tree arg1, arg2, len; if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) return 0; arg1 = TREE_VALUE (arglist); arg2 = TREE_VALUE (TREE_CHAIN (arglist)); len = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); /* If the LEN parameter is zero, return zero. */ if (integer_zerop (len)) { tree temp = omit_one_operand (TREE_TYPE (exp), integer_zero_node, arg2); return omit_one_operand (TREE_TYPE (exp), temp, arg1); } /* If ARG1 and ARG2 are the same (and not volatile), return zero. */ if (operand_equal_p (arg1, arg2, 0)) return omit_one_operand (TREE_TYPE (exp), integer_zero_node, len); return 0; } /* Fold function call to builtin strcmp. Return NULL_TREE if no simplification can be made. */ static tree fold_builtin_strcmp (tree exp) { tree arglist = TREE_OPERAND (exp, 1); tree arg1, arg2; const char *p1, *p2; if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) return 0; arg1 = TREE_VALUE (arglist); arg2 = TREE_VALUE (TREE_CHAIN (arglist)); /* If ARG1 and ARG2 are the same (and not volatile), return zero. */ if (operand_equal_p (arg1, arg2, 0)) return fold_convert (TREE_TYPE (exp), integer_zero_node); p1 = c_getstr (arg1); p2 = c_getstr (arg2); if (p1 && p2) { tree temp; const int i = strcmp (p1, p2); if (i < 0) temp = integer_minus_one_node; else if (i > 0) temp = integer_one_node; else temp = integer_zero_node; return fold_convert (TREE_TYPE (exp), temp); } return 0; } /* Fold function call to builtin strncmp. Return NULL_TREE if no simplification can be made. */ static tree fold_builtin_strncmp (tree exp) { tree arglist = TREE_OPERAND (exp, 1); tree arg1, arg2, len; const char *p1, *p2; if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) return 0; arg1 = TREE_VALUE (arglist); arg2 = TREE_VALUE (TREE_CHAIN (arglist)); len = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); /* If the LEN parameter is zero, return zero. */ if (integer_zerop (len)) { tree temp = omit_one_operand (TREE_TYPE (exp), integer_zero_node, arg2); return omit_one_operand (TREE_TYPE (exp), temp, arg1); } /* If ARG1 and ARG2 are the same (and not volatile), return zero. */ if (operand_equal_p (arg1, arg2, 0)) return omit_one_operand (TREE_TYPE (exp), integer_zero_node, len); p1 = c_getstr (arg1); p2 = c_getstr (arg2); if (host_integerp (len, 1) && p1 && p2) { tree temp; const int i = strncmp (p1, p2, tree_low_cst (len, 1)); if (i < 0) temp = integer_minus_one_node; else if (i > 0) temp = integer_one_node; else temp = integer_zero_node; return fold_convert (TREE_TYPE (exp), temp); } return 0; } /* Fold function call to builtin signbit, signbitf or signbitl. Return NULL_TREE if no simplification can be made. */ static tree fold_builtin_signbit (tree exp) { tree arglist = TREE_OPERAND (exp, 1); tree arg, temp; if (!validate_arglist (arglist, REAL_TYPE, VOID_TYPE)) return NULL_TREE; arg = TREE_VALUE (arglist); /* If ARG is a compile-time constant, determine the result. */ if (TREE_CODE (arg) == REAL_CST && !TREE_CONSTANT_OVERFLOW (arg)) { REAL_VALUE_TYPE c; c = TREE_REAL_CST (arg); temp = REAL_VALUE_NEGATIVE (c) ? integer_one_node : integer_zero_node; return fold_convert (TREE_TYPE (exp), temp); } /* If ARG is non-negative, the result is always zero. */ if (tree_expr_nonnegative_p (arg)) return omit_one_operand (TREE_TYPE (exp), integer_zero_node, arg); /* If ARG's format doesn't have signed zeros, return "arg < 0.0". */ if (!HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg)))) return fold (build2 (LT_EXPR, TREE_TYPE (exp), arg, build_real (TREE_TYPE (arg), dconst0))); return NULL_TREE; } /* Fold function call to builtin copysign, copysignf or copysignl. Return NULL_TREE if no simplification can be made. */ static tree fold_builtin_copysign (tree arglist, tree type) { tree arg1, arg2; if (!validate_arglist (arglist, REAL_TYPE, REAL_TYPE, VOID_TYPE)) return NULL_TREE; arg1 = TREE_VALUE (arglist); arg2 = TREE_VALUE (TREE_CHAIN (arglist)); /* copysign(X,X) is X. */ if (operand_equal_p (arg1, arg2, 0)) return fold_convert (type, arg1); /* If ARG1 and ARG2 are compile-time constants, determine the result. */ if (TREE_CODE (arg1) == REAL_CST && TREE_CODE (arg2) == REAL_CST && !TREE_CONSTANT_OVERFLOW (arg1) && !TREE_CONSTANT_OVERFLOW (arg2)) { REAL_VALUE_TYPE c1, c2; c1 = TREE_REAL_CST (arg1); c2 = TREE_REAL_CST (arg2); real_copysign (&c1, &c2); return build_real (type, c1); c1.sign = c2.sign; } /* copysign(X, Y) is fabs(X) when Y is always non-negative. Remember to evaluate Y for side-effects. */ if (tree_expr_nonnegative_p (arg2)) return omit_one_operand (type, fold (build1 (ABS_EXPR, type, arg1)), arg2); return NULL_TREE; } /* Fold a call to builtin isascii. */ static tree fold_builtin_isascii (tree arglist) { if (! validate_arglist (arglist, INTEGER_TYPE, VOID_TYPE)) return 0; else { /* Transform isascii(c) -> ((c & ~0x7f) == 0). */ tree arg = TREE_VALUE (arglist); arg = fold (build2 (EQ_EXPR, integer_type_node, build2 (BIT_AND_EXPR, integer_type_node, arg, build_int_2 (~ (unsigned HOST_WIDE_INT) 0x7f, ~ (HOST_WIDE_INT) 0)), integer_zero_node)); if (in_gimple_form && !TREE_CONSTANT (arg)) return NULL_TREE; else return arg; } } /* Fold a call to builtin toascii. */ static tree fold_builtin_toascii (tree arglist) { if (! validate_arglist (arglist, INTEGER_TYPE, VOID_TYPE)) return 0; else { /* Transform toascii(c) -> (c & 0x7f). */ tree arg = TREE_VALUE (arglist); return fold (build2 (BIT_AND_EXPR, integer_type_node, arg, build_int_2 (0x7f, 0))); } } /* Fold a call to builtin isdigit. */ static tree fold_builtin_isdigit (tree arglist) { if (! validate_arglist (arglist, INTEGER_TYPE, VOID_TYPE)) return 0; else { /* Transform isdigit(c) -> (unsigned)(c) - '0' <= 9. */ /* According to the C standard, isdigit is unaffected by locale. */ tree arg = TREE_VALUE (arglist); arg = fold_convert (unsigned_type_node, arg); arg = build2 (MINUS_EXPR, unsigned_type_node, arg, fold_convert (unsigned_type_node, build_int_2 (TARGET_DIGIT0, 0))); arg = build2 (LE_EXPR, integer_type_node, arg, fold_convert (unsigned_type_node, build_int_2 (9, 0))); arg = fold (arg); if (in_gimple_form && !TREE_CONSTANT (arg)) return NULL_TREE; else return arg; } } /* Fold a call to fabs, fabsf or fabsl. */ static tree fold_builtin_fabs (tree arglist, tree type) { tree arg; if (!validate_arglist (arglist, REAL_TYPE, VOID_TYPE)) return 0; arg = TREE_VALUE (arglist); if (TREE_CODE (arg) == REAL_CST) return fold_abs_const (arg, type); return fold (build1 (ABS_EXPR, type, arg)); } /* Fold a call to abs, labs, llabs or imaxabs. */ static tree fold_builtin_abs (tree arglist, tree type) { tree arg; if (!validate_arglist (arglist, INTEGER_TYPE, VOID_TYPE)) return 0; arg = TREE_VALUE (arglist); if (TREE_CODE (arg) == INTEGER_CST) return fold_abs_const (arg, type); return fold (build1 (ABS_EXPR, type, arg)); } /* Fold a call to __builtin_isnan(), __builtin_isinf, __builtin_finite. EXP is the CALL_EXPR for the call. */ static tree fold_builtin_classify (tree exp, int builtin_index) { tree fndecl = get_callee_fndecl (exp); tree arglist = TREE_OPERAND (exp, 1); tree type = TREE_TYPE (TREE_TYPE (fndecl)); tree arg; REAL_VALUE_TYPE r; if (!validate_arglist (arglist, REAL_TYPE, VOID_TYPE)) { /* Check that we have exactly one argument. */ if (arglist == 0) { error ("too few arguments to function `%s'", IDENTIFIER_POINTER (DECL_NAME (fndecl))); return error_mark_node; } else if (TREE_CHAIN (arglist) != 0) { error ("too many arguments to function `%s'", IDENTIFIER_POINTER (DECL_NAME (fndecl))); return error_mark_node; } else { error ("non-floating-point argument to function `%s'", IDENTIFIER_POINTER (DECL_NAME (fndecl))); return error_mark_node; } } arg = TREE_VALUE (arglist); switch (builtin_index) { case BUILT_IN_ISINF: if (!MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (arg)))) return omit_one_operand (type, integer_zero_node, arg); if (TREE_CODE (arg) == REAL_CST) { r = TREE_REAL_CST (arg); if (real_isinf (&r)) return real_compare (GT_EXPR, &r, &dconst0) ? integer_one_node : integer_minus_one_node; else return integer_zero_node; } return NULL_TREE; case BUILT_IN_FINITE: if (!MODE_HAS_NANS (TYPE_MODE (TREE_TYPE (arg))) && !MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (arg)))) return omit_one_operand (type, integer_zero_node, arg); if (TREE_CODE (arg) == REAL_CST) { r = TREE_REAL_CST (arg); return real_isinf (&r) || real_isnan (&r) ? integer_zero_node : integer_one_node; } return NULL_TREE; case BUILT_IN_ISNAN: if (!MODE_HAS_NANS (TYPE_MODE (TREE_TYPE (arg)))) return omit_one_operand (type, integer_zero_node, arg); if (TREE_CODE (arg) == REAL_CST) { r = TREE_REAL_CST (arg); return real_isnan (&r) ? integer_one_node : integer_zero_node; } arg = builtin_save_expr (arg); return fold (build2 (UNORDERED_EXPR, type, arg, arg)); default: abort (); } } /* Fold a call to an unordered comparison function such as __builtin_isgreater(). EXP is the CALL_EXPR for the call. UNORDERED_CODE and ORDERED_CODE are comparison codes that give the opposite of the desired result. UNORDERED_CODE is used for modes that can hold NaNs and ORDERED_CODE is used for the rest. */ static tree fold_builtin_unordered_cmp (tree exp, enum tree_code unordered_code, enum tree_code ordered_code) { tree fndecl = get_callee_fndecl (exp); tree arglist = TREE_OPERAND (exp, 1); tree type = TREE_TYPE (TREE_TYPE (fndecl)); enum tree_code code; tree arg0, arg1; if (!validate_arglist (arglist, REAL_TYPE, REAL_TYPE, VOID_TYPE)) { enum tree_code code0, code1; tree type0, type1; tree cmp_type = 0; /* Check that we have exactly two arguments. */ if (arglist == 0 || TREE_CHAIN (arglist) == 0) { error ("too few arguments to function `%s'", IDENTIFIER_POINTER (DECL_NAME (fndecl))); return error_mark_node; } else if (TREE_CHAIN (TREE_CHAIN (arglist)) != 0) { error ("too many arguments to function `%s'", IDENTIFIER_POINTER (DECL_NAME (fndecl))); return error_mark_node; } arg0 = TREE_VALUE (arglist); arg1 = TREE_VALUE (TREE_CHAIN (arglist)); type0 = TREE_TYPE (arg0); type1 = TREE_TYPE (arg1); code0 = TREE_CODE (type0); code1 = TREE_CODE (type1); if (code0 == REAL_TYPE && code1 == REAL_TYPE) /* Choose the wider of two real types. */ cmp_type = TYPE_PRECISION (type0) >= TYPE_PRECISION (type1) ? type0 : type1; else if (code0 == REAL_TYPE && code1 == INTEGER_TYPE) cmp_type = type0; else if (code0 == INTEGER_TYPE && code1 == REAL_TYPE) cmp_type = type1; else { error ("non-floating-point argument to function `%s'", IDENTIFIER_POINTER (DECL_NAME (fndecl))); return error_mark_node; } arg0 = fold_convert (cmp_type, arg0); arg1 = fold_convert (cmp_type, arg1); } else { arg0 = TREE_VALUE (arglist); arg1 = TREE_VALUE (TREE_CHAIN (arglist)); } if (unordered_code == UNORDERED_EXPR) { if (!MODE_HAS_NANS (TYPE_MODE (TREE_TYPE (arg0)))) return omit_two_operands (type, integer_zero_node, arg0, arg1); return fold (build2 (UNORDERED_EXPR, type, arg0, arg1)); } code = MODE_HAS_NANS (TYPE_MODE (TREE_TYPE (arg0))) ? unordered_code : ordered_code; return fold (build1 (TRUTH_NOT_EXPR, type, fold (build2 (code, type, arg0, arg1)))); } /* Used by constant folding to eliminate some builtin calls early. EXP is the CALL_EXPR of a call to a builtin function. */ static tree fold_builtin_1 (tree exp) { tree fndecl = get_callee_fndecl (exp); tree arglist = TREE_OPERAND (exp, 1); tree type = TREE_TYPE (TREE_TYPE (fndecl)); if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD) return 0; switch (DECL_FUNCTION_CODE (fndecl)) { case BUILT_IN_CONSTANT_P: return fold_builtin_constant_p (arglist); case BUILT_IN_EXPECT: return fold_builtin_expect (arglist); case BUILT_IN_CLASSIFY_TYPE: return fold_builtin_classify_type (arglist); case BUILT_IN_STRLEN: if (validate_arglist (arglist, POINTER_TYPE, VOID_TYPE)) { tree len = c_strlen (TREE_VALUE (arglist), 0); if (len) { /* Convert from the internal "sizetype" type to "size_t". */ if (size_type_node) len = fold_convert (size_type_node, len); return len; } } break; case BUILT_IN_FABS: case BUILT_IN_FABSF: case BUILT_IN_FABSL: return fold_builtin_fabs (arglist, type); case BUILT_IN_ABS: case BUILT_IN_LABS: case BUILT_IN_LLABS: case BUILT_IN_IMAXABS: return fold_builtin_abs (arglist, type); case BUILT_IN_CONJ: case BUILT_IN_CONJF: case BUILT_IN_CONJL: if (validate_arglist (arglist, COMPLEX_TYPE, VOID_TYPE)) return fold (build1 (CONJ_EXPR, type, TREE_VALUE (arglist))); break; case BUILT_IN_CREAL: case BUILT_IN_CREALF: case BUILT_IN_CREALL: if (validate_arglist (arglist, COMPLEX_TYPE, VOID_TYPE)) return non_lvalue (fold (build1 (REALPART_EXPR, type, TREE_VALUE (arglist)))); break; case BUILT_IN_CIMAG: case BUILT_IN_CIMAGF: case BUILT_IN_CIMAGL: if (validate_arglist (arglist, COMPLEX_TYPE, VOID_TYPE)) return non_lvalue (fold (build1 (IMAGPART_EXPR, type, TREE_VALUE (arglist)))); break; case BUILT_IN_CABS: case BUILT_IN_CABSF: case BUILT_IN_CABSL: return fold_builtin_cabs (arglist, type); case BUILT_IN_SQRT: case BUILT_IN_SQRTF: case BUILT_IN_SQRTL: if (validate_arglist (arglist, REAL_TYPE, VOID_TYPE)) { enum built_in_function fcode; tree arg = TREE_VALUE (arglist); /* Optimize sqrt of constant value. */ if (TREE_CODE (arg) == REAL_CST && ! TREE_CONSTANT_OVERFLOW (arg)) { REAL_VALUE_TYPE r, x; x = TREE_REAL_CST (arg); if (real_sqrt (&r, TYPE_MODE (type), &x) || (!flag_trapping_math && !flag_errno_math)) return build_real (type, r); } /* Optimize sqrt(expN(x)) = expN(x*0.5). */ fcode = builtin_mathfn_code (arg); if (flag_unsafe_math_optimizations && BUILTIN_EXPONENT_P (fcode)) { tree expfn = TREE_OPERAND (TREE_OPERAND (arg, 0), 0); arg = fold (build2 (MULT_EXPR, type, TREE_VALUE (TREE_OPERAND (arg, 1)), build_real (type, dconsthalf))); arglist = build_tree_list (NULL_TREE, arg); return build_function_call_expr (expfn, arglist); } /* Optimize sqrt(Nroot(x)) -> pow(x,1/(2*N)). */ if (flag_unsafe_math_optimizations && BUILTIN_ROOT_P (fcode)) { tree powfn = mathfn_built_in (type, BUILT_IN_POW); if (powfn) { tree arg0 = TREE_VALUE (TREE_OPERAND (arg, 1)); tree tree_root; /* The inner root was either sqrt or cbrt. */ REAL_VALUE_TYPE dconstroot = BUILTIN_SQRT_P (fcode) ? dconsthalf : dconstthird; /* Adjust for the outer root. */ SET_REAL_EXP (&dconstroot, REAL_EXP (&dconstroot) - 1); dconstroot = real_value_truncate (TYPE_MODE (type), dconstroot); tree_root = build_real (type, dconstroot); arglist = tree_cons (NULL_TREE, arg0, build_tree_list (NULL_TREE, tree_root)); return build_function_call_expr (powfn, arglist); } } /* Optimize sqrt(pow(x,y)) = pow(x,y*0.5). */ if (flag_unsafe_math_optimizations && (fcode == BUILT_IN_POW || fcode == BUILT_IN_POWF || fcode == BUILT_IN_POWL)) { tree powfn = TREE_OPERAND (TREE_OPERAND (arg, 0), 0); tree arg0 = TREE_VALUE (TREE_OPERAND (arg, 1)); tree arg1 = TREE_VALUE (TREE_CHAIN (TREE_OPERAND (arg, 1))); tree narg1 = fold (build2 (MULT_EXPR, type, arg1, build_real (type, dconsthalf))); arglist = tree_cons (NULL_TREE, arg0, build_tree_list (NULL_TREE, narg1)); return build_function_call_expr (powfn, arglist); } } break; case BUILT_IN_CBRT: case BUILT_IN_CBRTF: case BUILT_IN_CBRTL: if (validate_arglist (arglist, REAL_TYPE, VOID_TYPE)) { tree arg = TREE_VALUE (arglist); const enum built_in_function fcode = builtin_mathfn_code (arg); /* Optimize cbrt of constant value. */ if (real_zerop (arg) || real_onep (arg) || real_minus_onep (arg)) return arg; /* Optimize cbrt(expN(x)) -> expN(x/3). */ if (flag_unsafe_math_optimizations && BUILTIN_EXPONENT_P (fcode)) { tree expfn = TREE_OPERAND (TREE_OPERAND (arg, 0), 0); const REAL_VALUE_TYPE third_trunc = real_value_truncate (TYPE_MODE (type), dconstthird); arg = fold (build2 (MULT_EXPR, type, TREE_VALUE (TREE_OPERAND (arg, 1)), build_real (type, third_trunc))); arglist = build_tree_list (NULL_TREE, arg); return build_function_call_expr (expfn, arglist); } /* Optimize cbrt(sqrt(x)) -> pow(x,1/6). */ /* We don't optimize cbrt(cbrt(x)) -> pow(x,1/9) because if x is negative pow will error but cbrt won't. */ if (flag_unsafe_math_optimizations && BUILTIN_SQRT_P (fcode)) { tree powfn = mathfn_built_in (type, BUILT_IN_POW); if (powfn) { tree arg0 = TREE_VALUE (TREE_OPERAND (arg, 1)); tree tree_root; REAL_VALUE_TYPE dconstroot = dconstthird; SET_REAL_EXP (&dconstroot, REAL_EXP (&dconstroot) - 1); dconstroot = real_value_truncate (TYPE_MODE (type), dconstroot); tree_root = build_real (type, dconstroot); arglist = tree_cons (NULL_TREE, arg0, build_tree_list (NULL_TREE, tree_root)); return build_function_call_expr (powfn, arglist); } } } break; case BUILT_IN_SIN: case BUILT_IN_SINF: case BUILT_IN_SINL: if (validate_arglist (arglist, REAL_TYPE, VOID_TYPE)) { tree arg = TREE_VALUE (arglist); /* Optimize sin(0.0) = 0.0. */ if (real_zerop (arg)) return arg; } break; case BUILT_IN_COS: case BUILT_IN_COSF: case BUILT_IN_COSL: if (validate_arglist (arglist, REAL_TYPE, VOID_TYPE)) { tree arg = TREE_VALUE (arglist); /* Optimize cos(0.0) = 1.0. */ if (real_zerop (arg)) return build_real (type, dconst1); /* Optimize cos(-x) into cos(x). */ if (TREE_CODE (arg) == NEGATE_EXPR) { tree arglist = build_tree_list (NULL_TREE, TREE_OPERAND (arg, 0)); return build_function_call_expr (fndecl, arglist); } } break; case BUILT_IN_EXP: case BUILT_IN_EXPF: case BUILT_IN_EXPL: return fold_builtin_exponent (exp, &dconste); case BUILT_IN_EXP2: case BUILT_IN_EXP2F: case BUILT_IN_EXP2L: return fold_builtin_exponent (exp, &dconst2); case BUILT_IN_EXP10: case BUILT_IN_EXP10F: case BUILT_IN_EXP10L: case BUILT_IN_POW10: case BUILT_IN_POW10F: case BUILT_IN_POW10L: return fold_builtin_exponent (exp, &dconst10); case BUILT_IN_LOG: case BUILT_IN_LOGF: case BUILT_IN_LOGL: return fold_builtin_logarithm (exp, &dconste); case BUILT_IN_LOG2: case BUILT_IN_LOG2F: case BUILT_IN_LOG2L: return fold_builtin_logarithm (exp, &dconst2); case BUILT_IN_LOG10: case BUILT_IN_LOG10F: case BUILT_IN_LOG10L: return fold_builtin_logarithm (exp, &dconst10); case BUILT_IN_TAN: case BUILT_IN_TANF: case BUILT_IN_TANL: if (validate_arglist (arglist, REAL_TYPE, VOID_TYPE)) { enum built_in_function fcode; tree arg = TREE_VALUE (arglist); /* Optimize tan(0.0) = 0.0. */ if (real_zerop (arg)) return arg; /* Optimize tan(atan(x)) = x. */ fcode = builtin_mathfn_code (arg); if (flag_unsafe_math_optimizations && (fcode == BUILT_IN_ATAN || fcode == BUILT_IN_ATANF || fcode == BUILT_IN_ATANL)) return TREE_VALUE (TREE_OPERAND (arg, 1)); } break; case BUILT_IN_ATAN: case BUILT_IN_ATANF: case BUILT_IN_ATANL: if (validate_arglist (arglist, REAL_TYPE, VOID_TYPE)) { tree arg = TREE_VALUE (arglist); /* Optimize atan(0.0) = 0.0. */ if (real_zerop (arg)) return arg; /* Optimize atan(1.0) = pi/4. */ if (real_onep (arg)) { REAL_VALUE_TYPE cst; real_convert (&cst, TYPE_MODE (type), &dconstpi); SET_REAL_EXP (&cst, REAL_EXP (&cst) - 2); return build_real (type, cst); } } break; case BUILT_IN_POW: case BUILT_IN_POWF: case BUILT_IN_POWL: if (validate_arglist (arglist, REAL_TYPE, REAL_TYPE, VOID_TYPE)) { enum built_in_function fcode; tree arg0 = TREE_VALUE (arglist); tree arg1 = TREE_VALUE (TREE_CHAIN (arglist)); /* Optimize pow(1.0,y) = 1.0. */ if (real_onep (arg0)) return omit_one_operand (type, build_real (type, dconst1), arg1); if (TREE_CODE (arg1) == REAL_CST && ! TREE_CONSTANT_OVERFLOW (arg1)) { REAL_VALUE_TYPE c; c = TREE_REAL_CST (arg1); /* Optimize pow(x,0.0) = 1.0. */ if (REAL_VALUES_EQUAL (c, dconst0)) return omit_one_operand (type, build_real (type, dconst1), arg0); /* Optimize pow(x,1.0) = x. */ if (REAL_VALUES_EQUAL (c, dconst1)) return arg0; /* Optimize pow(x,-1.0) = 1.0/x. */ if (REAL_VALUES_EQUAL (c, dconstm1)) return fold (build2 (RDIV_EXPR, type, build_real (type, dconst1), arg0)); /* Optimize pow(x,0.5) = sqrt(x). */ if (flag_unsafe_math_optimizations && REAL_VALUES_EQUAL (c, dconsthalf)) { tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT); if (sqrtfn != NULL_TREE) { tree arglist = build_tree_list (NULL_TREE, arg0); return build_function_call_expr (sqrtfn, arglist); } } /* Attempt to evaluate pow at compile-time. */ if (TREE_CODE (arg0) == REAL_CST && ! TREE_CONSTANT_OVERFLOW (arg0)) { REAL_VALUE_TYPE cint; HOST_WIDE_INT n; n = real_to_integer (&c); real_from_integer (&cint, VOIDmode, n, n < 0 ? -1 : 0, 0); if (real_identical (&c, &cint)) { REAL_VALUE_TYPE x; bool inexact; x = TREE_REAL_CST (arg0); inexact = real_powi (&x, TYPE_MODE (type), &x, n); if (flag_unsafe_math_optimizations || !inexact) return build_real (type, x); } } } /* Optimize pow(expN(x),y) = expN(x*y). */ fcode = builtin_mathfn_code (arg0); if (flag_unsafe_math_optimizations && BUILTIN_EXPONENT_P (fcode)) { tree expfn = TREE_OPERAND (TREE_OPERAND (arg0, 0), 0); tree arg = TREE_VALUE (TREE_OPERAND (arg0, 1)); arg = fold (build2 (MULT_EXPR, type, arg, arg1)); arglist = build_tree_list (NULL_TREE, arg); return build_function_call_expr (expfn, arglist); } /* Optimize pow(sqrt(x),y) = pow(x,y*0.5). */ if (flag_unsafe_math_optimizations && BUILTIN_SQRT_P (fcode)) { tree narg0 = TREE_VALUE (TREE_OPERAND (arg0, 1)); tree narg1 = fold (build2 (MULT_EXPR, type, arg1, build_real (type, dconsthalf))); arglist = tree_cons (NULL_TREE, narg0, build_tree_list (NULL_TREE, narg1)); return build_function_call_expr (fndecl, arglist); } /* Optimize pow(pow(x,y),z) = pow(x,y*z). */ if (flag_unsafe_math_optimizations && (fcode == BUILT_IN_POW || fcode == BUILT_IN_POWF || fcode == BUILT_IN_POWL)) { tree arg00 = TREE_VALUE (TREE_OPERAND (arg0, 1)); tree arg01 = TREE_VALUE (TREE_CHAIN (TREE_OPERAND (arg0, 1))); tree narg1 = fold (build2 (MULT_EXPR, type, arg01, arg1)); arglist = tree_cons (NULL_TREE, arg00, build_tree_list (NULL_TREE, narg1)); return build_function_call_expr (fndecl, arglist); } } break; case BUILT_IN_INF: case BUILT_IN_INFF: case BUILT_IN_INFL: return fold_builtin_inf (type, true); case BUILT_IN_HUGE_VAL: case BUILT_IN_HUGE_VALF: case BUILT_IN_HUGE_VALL: return fold_builtin_inf (type, false); case BUILT_IN_NAN: case BUILT_IN_NANF: case BUILT_IN_NANL: return fold_builtin_nan (arglist, type, true); case BUILT_IN_NANS: case BUILT_IN_NANSF: case BUILT_IN_NANSL: return fold_builtin_nan (arglist, type, false); case BUILT_IN_FLOOR: case BUILT_IN_FLOORF: case BUILT_IN_FLOORL: return fold_builtin_floor (exp); case BUILT_IN_CEIL: case BUILT_IN_CEILF: case BUILT_IN_CEILL: return fold_builtin_ceil (exp); case BUILT_IN_TRUNC: case BUILT_IN_TRUNCF: case BUILT_IN_TRUNCL: return fold_builtin_trunc (exp); case BUILT_IN_ROUND: case BUILT_IN_ROUNDF: case BUILT_IN_ROUNDL: return fold_builtin_round (exp); case BUILT_IN_NEARBYINT: case BUILT_IN_NEARBYINTF: case BUILT_IN_NEARBYINTL: case BUILT_IN_RINT: case BUILT_IN_RINTF: case BUILT_IN_RINTL: return fold_trunc_transparent_mathfn (exp); case BUILT_IN_LROUND: case BUILT_IN_LROUNDF: case BUILT_IN_LROUNDL: case BUILT_IN_LLROUND: case BUILT_IN_LLROUNDF: case BUILT_IN_LLROUNDL: return fold_builtin_lround (exp); case BUILT_IN_LRINT: case BUILT_IN_LRINTF: case BUILT_IN_LRINTL: case BUILT_IN_LLRINT: case BUILT_IN_LLRINTF: case BUILT_IN_LLRINTL: return fold_fixed_mathfn (exp); case BUILT_IN_FFS: case BUILT_IN_FFSL: case BUILT_IN_FFSLL: case BUILT_IN_CLZ: case BUILT_IN_CLZL: case BUILT_IN_CLZLL: case BUILT_IN_CTZ: case BUILT_IN_CTZL: case BUILT_IN_CTZLL: case BUILT_IN_POPCOUNT: case BUILT_IN_POPCOUNTL: case BUILT_IN_POPCOUNTLL: case BUILT_IN_PARITY: case BUILT_IN_PARITYL: case BUILT_IN_PARITYLL: return fold_builtin_bitop (exp); case BUILT_IN_MEMCPY: return fold_builtin_memcpy (exp); case BUILT_IN_MEMPCPY: return fold_builtin_mempcpy (exp); case BUILT_IN_MEMMOVE: return fold_builtin_memmove (exp); case BUILT_IN_STRCPY: return fold_builtin_strcpy (exp); case BUILT_IN_STRNCPY: return fold_builtin_strncpy (exp); case BUILT_IN_INDEX: case BUILT_IN_STRCHR: return fold_builtin_strchr (exp, false); case BUILT_IN_RINDEX: case BUILT_IN_STRRCHR: return fold_builtin_strchr (exp, true); case BUILT_IN_MEMCMP: return fold_builtin_memcmp (exp); case BUILT_IN_STRCMP: return fold_builtin_strcmp (exp); case BUILT_IN_STRNCMP: return fold_builtin_strncmp (exp); case BUILT_IN_SIGNBIT: case BUILT_IN_SIGNBITF: case BUILT_IN_SIGNBITL: return fold_builtin_signbit (exp); case BUILT_IN_ISASCII: return fold_builtin_isascii (arglist); case BUILT_IN_TOASCII: return fold_builtin_toascii (arglist); case BUILT_IN_ISDIGIT: return fold_builtin_isdigit (arglist); case BUILT_IN_COPYSIGN: case BUILT_IN_COPYSIGNF: case BUILT_IN_COPYSIGNL: return fold_builtin_copysign (arglist, type); case BUILT_IN_FINITE: case BUILT_IN_FINITEF: case BUILT_IN_FINITEL: return fold_builtin_classify (exp, BUILT_IN_FINITE); case BUILT_IN_ISINF: case BUILT_IN_ISINFF: case BUILT_IN_ISINFL: return fold_builtin_classify (exp, BUILT_IN_ISINF); case BUILT_IN_ISNAN: case BUILT_IN_ISNANF: case BUILT_IN_ISNANL: return fold_builtin_classify (exp, BUILT_IN_ISNAN); case BUILT_IN_ISGREATER: return fold_builtin_unordered_cmp (exp, UNLE_EXPR, LE_EXPR); case BUILT_IN_ISGREATEREQUAL: return fold_builtin_unordered_cmp (exp, UNLT_EXPR, LT_EXPR); case BUILT_IN_ISLESS: return fold_builtin_unordered_cmp (exp, UNGE_EXPR, GE_EXPR); case BUILT_IN_ISLESSEQUAL: return fold_builtin_unordered_cmp (exp, UNGT_EXPR, GT_EXPR); case BUILT_IN_ISLESSGREATER: return fold_builtin_unordered_cmp (exp, UNEQ_EXPR, EQ_EXPR); case BUILT_IN_ISUNORDERED: return fold_builtin_unordered_cmp (exp, UNORDERED_EXPR, NOP_EXPR); default: break; } return 0; } /* A wrapper function for builtin folding that prevents warnings for "statement without effect" and the like, caused by removing the call node earlier than the warning is generated. */ tree fold_builtin (tree exp) { exp = fold_builtin_1 (exp); if (exp) { /* ??? Don't clobber shared nodes such as integer_zero_node. */ if (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c') exp = build1 (NOP_EXPR, TREE_TYPE (exp), exp); TREE_NO_WARNING (exp) = 1; } return exp; } /* Conveniently construct a function call expression. */ tree build_function_call_expr (tree fn, tree arglist) { tree call_expr; call_expr = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (fn)), fn); call_expr = build3 (CALL_EXPR, TREE_TYPE (TREE_TYPE (fn)), call_expr, arglist, NULL_TREE); return fold (call_expr); } /* This function validates the types of a function call argument list represented as a tree chain of parameters against a specified list of tree_codes. If the last specifier is a 0, that represents an ellipses, otherwise the last specifier must be a VOID_TYPE. */ static int validate_arglist (tree arglist, ...) { enum tree_code code; int res = 0; va_list ap; va_start (ap, arglist); do { code = va_arg (ap, enum tree_code); switch (code) { case 0: /* This signifies an ellipses, any further arguments are all ok. */ res = 1; goto end; case VOID_TYPE: /* This signifies an endlink, if no arguments remain, return true, otherwise return false. */ res = arglist == 0; goto end; default: /* If no parameters remain or the parameter's code does not match the specified code, return false. Otherwise continue checking any remaining arguments. */ if (arglist == 0 || code != TREE_CODE (TREE_TYPE (TREE_VALUE (arglist)))) goto end; break; } arglist = TREE_CHAIN (arglist); } while (1); /* We need gotos here since we can only have one VA_CLOSE in a function. */ end: ; va_end (ap); return res; } /* Default target-specific builtin expander that does nothing. */ rtx default_expand_builtin (tree exp ATTRIBUTE_UNUSED, rtx target ATTRIBUTE_UNUSED, rtx subtarget ATTRIBUTE_UNUSED, enum machine_mode mode ATTRIBUTE_UNUSED, int ignore ATTRIBUTE_UNUSED) { return NULL_RTX; } /* Returns true is EXP represents data that would potentially reside in a readonly section. */ static bool readonly_data_expr (tree exp) { STRIP_NOPS (exp); if (TREE_CODE (exp) != ADDR_EXPR) return false; exp = get_base_address (TREE_OPERAND (exp, 0)); if (!exp) return false; /* Make sure we call decl_readonly_section only for trees it can handle (since it returns true for everything it doesn't understand). */ if (TREE_CODE (exp) == STRING_CST || TREE_CODE (exp) == CONSTRUCTOR || (TREE_CODE (exp) == VAR_DECL && TREE_STATIC (exp))) return decl_readonly_section (exp, 0); else return false; } /* Front-end to the simplify_builtin_XXX routines. EXP is a call to a builtin function. If possible try to simplify that into a constant, expression or call to a more efficient builtin function. If IGNORE is nonzero, then the result of this builtin function call is ignored. If simplification is possible, return the simplified tree, otherwise return NULL_TREE. */ tree simplify_builtin (tree exp, int ignore) { tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0); tree arglist = TREE_OPERAND (exp, 1); enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl); tree val; switch (fcode) { case BUILT_IN_FPUTS: val = simplify_builtin_fputs (arglist, ignore, 0, NULL_TREE); break; case BUILT_IN_FPUTS_UNLOCKED: val = simplify_builtin_fputs (arglist, ignore, 1, NULL_TREE); break; case BUILT_IN_STRSTR: val = simplify_builtin_strstr (arglist); break; case BUILT_IN_STRCAT: val = simplify_builtin_strcat (arglist); break; case BUILT_IN_STRNCAT: val = simplify_builtin_strncat (arglist); break; case BUILT_IN_STRSPN: val = simplify_builtin_strspn (arglist); break; case BUILT_IN_STRCSPN: val = simplify_builtin_strcspn (arglist); break; case BUILT_IN_STRCHR: case BUILT_IN_INDEX: val = simplify_builtin_strchr (arglist); break; case BUILT_IN_STRRCHR: case BUILT_IN_RINDEX: val = simplify_builtin_strrchr (arglist); break; case BUILT_IN_STRCPY: val = simplify_builtin_strcpy (arglist, NULL_TREE); break; case BUILT_IN_STRNCPY: val = simplify_builtin_strncpy (arglist, NULL_TREE); break; case BUILT_IN_STRCMP: val = simplify_builtin_strcmp (arglist); break; case BUILT_IN_STRNCMP: val = simplify_builtin_strncmp (arglist); break; case BUILT_IN_STRPBRK: val = simplify_builtin_strpbrk (arglist); break; case BUILT_IN_BCMP: case BUILT_IN_MEMCMP: val = simplify_builtin_memcmp (arglist); break; case BUILT_IN_VA_START: simplify_builtin_va_start (arglist); val = NULL_TREE; break; case BUILT_IN_SPRINTF: val = simplify_builtin_sprintf (arglist, ignore); break; case BUILT_IN_CONSTANT_P: val = fold_builtin_constant_p (arglist); /* Gimplification will pull the CALL_EXPR for the builtin out of an if condition. When not optimizing, we'll not CSE it back. To avoid link error types of regressions, return false now. */ if (!val && !optimize) val = integer_zero_node; break; default: val = NULL_TREE; break; } if (val) val = fold_convert (TREE_TYPE (exp), val); return val; } /* Simplify a call to the strstr builtin. Return 0 if no simplification was possible, otherwise return the simplified form of the call as a tree. The simplified form may be a constant or other expression which computes the same value, but in a more efficient manner (including calls to other builtin functions). The call may contain arguments which need to be evaluated, but which are not useful to determine the result of the call. In this case we return a chain of COMPOUND_EXPRs. The LHS of each COMPOUND_EXPR will be an argument which must be evaluated. COMPOUND_EXPRs are chained through their RHS. The RHS of the last COMPOUND_EXPR in the chain will contain the tree for the simplified form of the builtin function call. */ static tree simplify_builtin_strstr (tree arglist) { if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) return 0; else { tree s1 = TREE_VALUE (arglist), s2 = TREE_VALUE (TREE_CHAIN (arglist)); tree fn; const char *p1, *p2; p2 = c_getstr (s2); if (p2 == NULL) return 0; p1 = c_getstr (s1); if (p1 != NULL) { const char *r = strstr (p1, p2); if (r == NULL) return fold_convert (TREE_TYPE (s1), integer_zero_node); /* Return an offset into the constant string argument. */ return fold (build2 (PLUS_EXPR, TREE_TYPE (s1), s1, fold_convert (TREE_TYPE (s1), ssize_int (r - p1)))); } if (p2[0] == '\0') return s1; if (p2[1] != '\0') return 0; fn = implicit_built_in_decls[BUILT_IN_STRCHR]; if (!fn) return 0; /* New argument list transforming strstr(s1, s2) to strchr(s1, s2[0]). */ arglist = build_tree_list (NULL_TREE, build_int_2 (p2[0], 0)); arglist = tree_cons (NULL_TREE, s1, arglist); return build_function_call_expr (fn, arglist); } } /* Simplify a call to the strstr builtin. Return 0 if no simplification was possible, otherwise return the simplified form of the call as a tree. The simplified form may be a constant or other expression which computes the same value, but in a more efficient manner (including calls to other builtin functions). The call may contain arguments which need to be evaluated, but which are not useful to determine the result of the call. In this case we return a chain of COMPOUND_EXPRs. The LHS of each COMPOUND_EXPR will be an argument which must be evaluated. COMPOUND_EXPRs are chained through their RHS. The RHS of the last COMPOUND_EXPR in the chain will contain the tree for the simplified form of the builtin function call. */ static tree simplify_builtin_strchr (tree arglist) { if (!validate_arglist (arglist, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) return 0; else { tree s1 = TREE_VALUE (arglist), s2 = TREE_VALUE (TREE_CHAIN (arglist)); const char *p1; if (TREE_CODE (s2) != INTEGER_CST) return 0; p1 = c_getstr (s1); if (p1 != NULL) { char c; const char *r; if (target_char_cast (s2, &c)) return 0; r = strchr (p1, c); if (r == NULL) return fold_convert (TREE_TYPE (s1), integer_zero_node); /* Return an offset into the constant string argument. */ return fold (build2 (PLUS_EXPR, TREE_TYPE (s1), s1, fold_convert (TREE_TYPE (s1), ssize_int (r - p1)))); } /* FIXME: Should use here strchrM optab so that ports can optimize this. */ return 0; } } /* Simplify a call to the strrchr builtin. Return 0 if no simplification was possible, otherwise return the simplified form of the call as a tree. The simplified form may be a constant or other expression which computes the same value, but in a more efficient manner (including calls to other builtin functions). The call may contain arguments which need to be evaluated, but which are not useful to determine the result of the call. In this case we return a chain of COMPOUND_EXPRs. The LHS of each COMPOUND_EXPR will be an argument which must be evaluated. COMPOUND_EXPRs are chained through their RHS. The RHS of the last COMPOUND_EXPR in the chain will contain the tree for the simplified form of the builtin function call. */ static tree simplify_builtin_strrchr (tree arglist) { if (!validate_arglist (arglist, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) return 0; else { tree s1 = TREE_VALUE (arglist), s2 = TREE_VALUE (TREE_CHAIN (arglist)); tree fn; const char *p1; if (TREE_CODE (s2) != INTEGER_CST) return 0; p1 = c_getstr (s1); if (p1 != NULL) { char c; const char *r; if (target_char_cast (s2, &c)) return 0; r = strrchr (p1, c); if (r == NULL) return fold_convert (TREE_TYPE (s1), integer_zero_node); /* Return an offset into the constant string argument. */ return fold (build2 (PLUS_EXPR, TREE_TYPE (s1), s1, fold_convert (TREE_TYPE (s1), ssize_int (r - p1)))); } if (! integer_zerop (s2)) return 0; fn = implicit_built_in_decls[BUILT_IN_STRCHR]; if (!fn) return 0; /* Transform strrchr(s1, '\0') to strchr(s1, '\0'). */ return build_function_call_expr (fn, arglist); } } /* Simplify a call to the strpbrk builtin. Return 0 if no simplification was possible, otherwise return the simplified form of the call as a tree. The simplified form may be a constant or other expression which computes the same value, but in a more efficient manner (including calls to other builtin functions). The call may contain arguments which need to be evaluated, but which are not useful to determine the result of the call. In this case we return a chain of COMPOUND_EXPRs. The LHS of each COMPOUND_EXPR will be an argument which must be evaluated. COMPOUND_EXPRs are chained through their RHS. The RHS of the last COMPOUND_EXPR in the chain will contain the tree for the simplified form of the builtin function call. */ static tree simplify_builtin_strpbrk (tree arglist) { if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) return 0; else { tree s1 = TREE_VALUE (arglist), s2 = TREE_VALUE (TREE_CHAIN (arglist)); tree fn; const char *p1, *p2; p2 = c_getstr (s2); if (p2 == NULL) return 0; p1 = c_getstr (s1); if (p1 != NULL) { const char *r = strpbrk (p1, p2); if (r == NULL) return fold_convert (TREE_TYPE (s1), integer_zero_node); /* Return an offset into the constant string argument. */ return fold (build2 (PLUS_EXPR, TREE_TYPE (s1), s1, fold_convert (TREE_TYPE (s1), ssize_int (r - p1)))); } if (p2[0] == '\0') /* strpbrk(x, "") == NULL. Evaluate and ignore s1 in case it had side-effects. */ return omit_one_operand (TREE_TYPE (s1), integer_zero_node, s1); if (p2[1] != '\0') return 0; /* Really call strpbrk. */ fn = implicit_built_in_decls[BUILT_IN_STRCHR]; if (!fn) return 0; /* New argument list transforming strpbrk(s1, s2) to strchr(s1, s2[0]). */ arglist = build_tree_list (NULL_TREE, build_int_2 (p2[0], 0)); arglist = tree_cons (NULL_TREE, s1, arglist); return build_function_call_expr (fn, arglist); } } /* Simplify a call to the strcpy builtin. Return 0 if no simplification was possible, otherwise return the simplified form of the call as a tree. The simplified form may be a constant or other expression which computes the same value, but in a more efficient manner (including calls to other builtin functions). The call may contain arguments which need to be evaluated, but which are not useful to determine the result of the call. In this case we return a chain of COMPOUND_EXPRs. The LHS of each COMPOUND_EXPR will be an argument which must be evaluated. COMPOUND_EXPRs are chained through their RHS. The RHS of the last COMPOUND_EXPR in the chain will contain the tree for the simplified form of the builtin function call. */ tree simplify_builtin_strcpy (tree arglist, tree len) { tree fn, src, dst; if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) return 0; fn = implicit_built_in_decls[BUILT_IN_MEMCPY]; if (!fn) return 0; src = TREE_VALUE (TREE_CHAIN (arglist)); dst = TREE_VALUE (arglist); if (!len) { len = c_strlen (src, 1); if (!len || TREE_SIDE_EFFECTS (len)) return 0; } len = size_binop (PLUS_EXPR, len, ssize_int (1)); arglist = build_tree_list (NULL_TREE, len); arglist = tree_cons (NULL_TREE, src, arglist); arglist = tree_cons (NULL_TREE, dst, arglist); return build_function_call_expr (fn, arglist); } /* Simplify a call to the strncpy builtin. Return 0 if no simplification was possible, otherwise return the simplified form of the call as a tree. The simplified form may be a constant or other expression which computes the same value, but in a more efficient manner (including calls to other builtin functions). The call may contain arguments which need to be evaluated, but which are not useful to determine the result of the call. In this case we return a chain of COMPOUND_EXPRs. The LHS of each COMPOUND_EXPR will be an argument which must be evaluated. COMPOUND_EXPRs are chained through their RHS. The RHS of the last COMPOUND_EXPR in the chain will contain the tree for the simplified form of the builtin function call. */ tree simplify_builtin_strncpy (tree arglist, tree slen) { if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) return 0; else { tree len = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); tree fn; /* We must be passed a constant len parameter. */ if (TREE_CODE (len) != INTEGER_CST) return 0; /* If the len parameter is zero, return the dst parameter. */ if (integer_zerop (len)) /* Evaluate and ignore the src argument in case it has side-effects and return the dst parameter. */ return omit_one_operand (TREE_TYPE (TREE_VALUE (arglist)), TREE_VALUE (arglist), TREE_VALUE (TREE_CHAIN (arglist))); if (!slen) slen = c_strlen (TREE_VALUE (TREE_CHAIN (arglist)), 0); /* Now, we must be passed a constant src ptr parameter. */ if (slen == 0 || TREE_CODE (slen) != INTEGER_CST) return 0; slen = size_binop (PLUS_EXPR, slen, ssize_int (1)); /* We do not support simplification of this case, though we do support it when expanding trees into RTL. */ /* FIXME: generate a call to __builtin_memset. */ if (tree_int_cst_lt (slen, len)) return 0; /* OK transform into builtin memcpy. */ fn = implicit_built_in_decls[BUILT_IN_MEMCPY]; if (!fn) return 0; return build_function_call_expr (fn, arglist); } } /* Simplify a call to the memcmp builtin. Return 0 if no simplification was possible, otherwise return the simplified form of the call as a tree. The simplified form may be a constant or other expression which computes the same value, but in a more efficient manner (including calls to other builtin functions). The call may contain arguments which need to be evaluated, but which are not useful to determine the result of the call. In this case we return a chain of COMPOUND_EXPRs. The LHS of each COMPOUND_EXPR will be an argument which must be evaluated. COMPOUND_EXPRs are chained through their RHS. The RHS of the last COMPOUND_EXPR in the chain will contain the tree for the simplified form of the builtin function call. */ static tree simplify_builtin_memcmp (tree arglist) { tree arg1, arg2, len; const char *p1, *p2; if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) return 0; arg1 = TREE_VALUE (arglist); arg2 = TREE_VALUE (TREE_CHAIN (arglist)); len = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); /* If the len parameter is zero, return zero. */ if (host_integerp (len, 1) && tree_low_cst (len, 1) == 0) /* Evaluate and ignore arg1 and arg2 in case they have side-effects. */ return omit_two_operands (integer_type_node, integer_zero_node, arg1, arg2); p1 = c_getstr (arg1); p2 = c_getstr (arg2); /* If all arguments are constant, and the value of len is not greater than the lengths of arg1 and arg2, evaluate at compile-time. */ if (host_integerp (len, 1) && p1 && p2 && compare_tree_int (len, strlen (p1) + 1) <= 0 && compare_tree_int (len, strlen (p2) + 1) <= 0) { const int r = memcmp (p1, p2, tree_low_cst (len, 1)); return (r < 0 ? integer_minus_one_node : (r > 0 ? integer_one_node : integer_zero_node)); } /* If len parameter is one, return an expression corresponding to (*(const unsigned char*)arg1 - (const unsigned char*)arg2). */ if (host_integerp (len, 1) && tree_low_cst (len, 1) == 1) { tree cst_uchar_node = build_type_variant (unsigned_char_type_node, 1, 0); tree cst_uchar_ptr_node = build_pointer_type (cst_uchar_node); tree ind1 = fold (build1 (CONVERT_EXPR, integer_type_node, build1 (INDIRECT_REF, cst_uchar_node, build1 (NOP_EXPR, cst_uchar_ptr_node, arg1)))); tree ind2 = fold (build1 (CONVERT_EXPR, integer_type_node, build1 (INDIRECT_REF, cst_uchar_node, build1 (NOP_EXPR, cst_uchar_ptr_node, arg2)))); return fold (build2 (MINUS_EXPR, integer_type_node, ind1, ind2)); } return 0; } /* Simplify a call to the strcmp builtin. Return 0 if no simplification was possible, otherwise return the simplified form of the call as a tree. The simplified form may be a constant or other expression which computes the same value, but in a more efficient manner (including calls to other builtin functions). The call may contain arguments which need to be evaluated, but which are not useful to determine the result of the call. In this case we return a chain of COMPOUND_EXPRs. The LHS of each COMPOUND_EXPR will be an argument which must be evaluated. COMPOUND_EXPRs are chained through their RHS. The RHS of the last COMPOUND_EXPR in the chain will contain the tree for the simplified form of the builtin function call. */ static tree simplify_builtin_strcmp (tree arglist) { tree arg1, arg2; const char *p1, *p2; if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) return 0; arg1 = TREE_VALUE (arglist); arg2 = TREE_VALUE (TREE_CHAIN (arglist)); /* If both arguments are equal (and not volatile), return zero. */ if (operand_equal_p (arg1, arg2, 0)) return integer_zero_node; p1 = c_getstr (arg1); p2 = c_getstr (arg2); if (p1 && p2) { const int i = strcmp (p1, p2); return (i < 0 ? integer_minus_one_node : (i > 0 ? integer_one_node : integer_zero_node)); } /* If either arg is "", return an expression corresponding to (*(const unsigned char*)arg1 - (const unsigned char*)arg2). */ if ((p1 && *p1 == '\0') || (p2 && *p2 == '\0')) { tree cst_uchar_node = build_type_variant (unsigned_char_type_node, 1, 0); tree cst_uchar_ptr_node = build_pointer_type (cst_uchar_node); tree ind1 = fold (build1 (CONVERT_EXPR, integer_type_node, build1 (INDIRECT_REF, cst_uchar_node, build1 (NOP_EXPR, cst_uchar_ptr_node, arg1)))); tree ind2 = fold (build1 (CONVERT_EXPR, integer_type_node, build1 (INDIRECT_REF, cst_uchar_node, build1 (NOP_EXPR, cst_uchar_ptr_node, arg2)))); return fold (build2 (MINUS_EXPR, integer_type_node, ind1, ind2)); } return 0; } /* Simplify a call to the strncmp builtin. Return 0 if no simplification was possible, otherwise return the simplified form of the call as a tree. The simplified form may be a constant or other expression which computes the same value, but in a more efficient manner (including calls to other builtin functions). The call may contain arguments which need to be evaluated, but which are not useful to determine the result of the call. In this case we return a chain of COMPOUND_EXPRs. The LHS of each COMPOUND_EXPR will be an argument which must be evaluated. COMPOUND_EXPRs are chained through their RHS. The RHS of the last COMPOUND_EXPR in the chain will contain the tree for the simplified form of the builtin function call. */ static tree simplify_builtin_strncmp (tree arglist) { tree arg1, arg2, arg3; const char *p1, *p2; if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) return 0; arg1 = TREE_VALUE (arglist); arg2 = TREE_VALUE (TREE_CHAIN (arglist)); arg3 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); /* If the len parameter is zero, return zero. */ if (integer_zerop (arg3)) /* Evaluate and ignore arg1 and arg2 in case they have side-effects. */ return omit_two_operands (integer_type_node, integer_zero_node, arg1, arg2); /* If arg1 and arg2 are equal (and not volatile), return zero. */ if (operand_equal_p (arg1, arg2, 0)) /* Evaluate and ignore arg3 in case it has side-effects. */ return omit_one_operand (integer_type_node, integer_zero_node, arg3); p1 = c_getstr (arg1); p2 = c_getstr (arg2); /* If all arguments are constant, evaluate at compile-time. */ if (host_integerp (arg3, 1) && p1 && p2) { const int r = strncmp (p1, p2, tree_low_cst (arg3, 1)); return (r < 0 ? integer_minus_one_node : (r > 0 ? integer_one_node : integer_zero_node)); } /* If len == 1 or (either string parameter is "" and (len >= 1)), return (*(const u_char*)arg1 - *(const u_char*)arg2). */ if (host_integerp (arg3, 1) && (tree_low_cst (arg3, 1) == 1 || (tree_low_cst (arg3, 1) > 1 && ((p1 && *p1 == '\0') || (p2 && *p2 == '\0'))))) { tree cst_uchar_node = build_type_variant (unsigned_char_type_node, 1, 0); tree cst_uchar_ptr_node = build_pointer_type (cst_uchar_node); tree ind1 = fold (build1 (CONVERT_EXPR, integer_type_node, build1 (INDIRECT_REF, cst_uchar_node, build1 (NOP_EXPR, cst_uchar_ptr_node, arg1)))); tree ind2 = fold (build1 (CONVERT_EXPR, integer_type_node, build1 (INDIRECT_REF, cst_uchar_node, build1 (NOP_EXPR, cst_uchar_ptr_node, arg2)))); return fold (build2 (MINUS_EXPR, integer_type_node, ind1, ind2)); } return 0; } /* Simplify a call to the strcat builtin. Return 0 if no simplification was possible, otherwise return the simplified form of the call as a tree. The simplified form may be a constant or other expression which computes the same value, but in a more efficient manner (including calls to other builtin functions). The call may contain arguments which need to be evaluated, but which are not useful to determine the result of the call. In this case we return a chain of COMPOUND_EXPRs. The LHS of each COMPOUND_EXPR will be an argument which must be evaluated. COMPOUND_EXPRs are chained through their RHS. The RHS of the last COMPOUND_EXPR in the chain will contain the tree for the simplified form of the builtin function call. */ static tree simplify_builtin_strcat (tree arglist) { if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) return 0; else { tree dst = TREE_VALUE (arglist), src = TREE_VALUE (TREE_CHAIN (arglist)); const char *p = c_getstr (src); /* If the string length is zero, return the dst parameter. */ if (p && *p == '\0') return dst; return 0; } } /* Simplify a call to the strncat builtin. Return 0 if no simplification was possible, otherwise return the simplified form of the call as a tree. The simplified form may be a constant or other expression which computes the same value, but in a more efficient manner (including calls to other builtin functions). The call may contain arguments which need to be evaluated, but which are not useful to determine the result of the call. In this case we return a chain of COMPOUND_EXPRs. The LHS of each COMPOUND_EXPR will be an argument which must be evaluated. COMPOUND_EXPRs are chained through their RHS. The RHS of the last COMPOUND_EXPR in the chain will contain the tree for the simplified form of the builtin function call. */ static tree simplify_builtin_strncat (tree arglist) { if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) return 0; else { tree dst = TREE_VALUE (arglist); tree src = TREE_VALUE (TREE_CHAIN (arglist)); tree len = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); const char *p = c_getstr (src); /* If the requested length is zero, or the src parameter string length is zero, return the dst parameter. */ if (integer_zerop (len) || (p && *p == '\0')) return omit_two_operands (TREE_TYPE (dst), dst, src, len); /* If the requested len is greater than or equal to the string length, call strcat. */ if (TREE_CODE (len) == INTEGER_CST && p && compare_tree_int (len, strlen (p)) >= 0) { tree newarglist = tree_cons (NULL_TREE, dst, build_tree_list (NULL_TREE, src)); tree fn = implicit_built_in_decls[BUILT_IN_STRCAT]; /* If the replacement _DECL isn't initialized, don't do the transformation. */ if (!fn) return 0; return build_function_call_expr (fn, newarglist); } return 0; } } /* Simplify a call to the strspn builtin. Return 0 if no simplification was possible, otherwise return the simplified form of the call as a tree. The simplified form may be a constant or other expression which computes the same value, but in a more efficient manner (including calls to other builtin functions). The call may contain arguments which need to be evaluated, but which are not useful to determine the result of the call. In this case we return a chain of COMPOUND_EXPRs. The LHS of each COMPOUND_EXPR will be an argument which must be evaluated. COMPOUND_EXPRs are chained through their RHS. The RHS of the last COMPOUND_EXPR in the chain will contain the tree for the simplified form of the builtin function call. */ static tree simplify_builtin_strspn (tree arglist) { if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) return 0; else { tree s1 = TREE_VALUE (arglist), s2 = TREE_VALUE (TREE_CHAIN (arglist)); const char *p1 = c_getstr (s1), *p2 = c_getstr (s2); /* If both arguments are constants, evaluate at compile-time. */ if (p1 && p2) { const size_t r = strspn (p1, p2); return size_int (r); } /* If either argument is "", return 0. */ if ((p1 && *p1 == '\0') || (p2 && *p2 == '\0')) /* Evaluate and ignore both arguments in case either one has side-effects. */ return omit_two_operands (integer_type_node, integer_zero_node, s1, s2); return 0; } } /* Simplify a call to the strcspn builtin. Return 0 if no simplification was possible, otherwise return the simplified form of the call as a tree. The simplified form may be a constant or other expression which computes the same value, but in a more efficient manner (including calls to other builtin functions). The call may contain arguments which need to be evaluated, but which are not useful to determine the result of the call. In this case we return a chain of COMPOUND_EXPRs. The LHS of each COMPOUND_EXPR will be an argument which must be evaluated. COMPOUND_EXPRs are chained through their RHS. The RHS of the last COMPOUND_EXPR in the chain will contain the tree for the simplified form of the builtin function call. */ static tree simplify_builtin_strcspn (tree arglist) { if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) return 0; else { tree s1 = TREE_VALUE (arglist), s2 = TREE_VALUE (TREE_CHAIN (arglist)); const char *p1 = c_getstr (s1), *p2 = c_getstr (s2); /* If both arguments are constants, evaluate at compile-time. */ if (p1 && p2) { const size_t r = strcspn (p1, p2); return size_int (r); } /* If the first argument is "", return 0. */ if (p1 && *p1 == '\0') { /* Evaluate and ignore argument s2 in case it has side-effects. */ return omit_one_operand (integer_type_node, integer_zero_node, s2); } /* If the second argument is "", return __builtin_strlen(s1). */ if (p2 && *p2 == '\0') { tree newarglist = build_tree_list (NULL_TREE, s1), fn = implicit_built_in_decls[BUILT_IN_STRLEN]; /* If the replacement _DECL isn't initialized, don't do the transformation. */ if (!fn) return 0; return build_function_call_expr (fn, newarglist); } return 0; } } /* Simplify a call to the fputs builtin. Return 0 if no simplification was possible, otherwise return the simplified form of the call as a tree. The simplified form may be a constant or other expression which computes the same value, but in a more efficient manner (including calls to other builtin functions). The call may contain arguments which need to be evaluated, but which are not useful to determine the result of the call. In this case we return a chain of COMPOUND_EXPRs. The LHS of each COMPOUND_EXPR will be an argument which must be evaluated. COMPOUND_EXPRs are chained through their RHS. The RHS of the last COMPOUND_EXPR in the chain will contain the tree for the simplified form of the builtin function call. If KNOWN_LEN is non-NULL, it represents the known length of the string. This is determined by SSA-CCP in cases where the string itself is not known to be constant but its length is always the same constant. */ tree simplify_builtin_fputs (tree arglist, int ignore, int unlocked, tree known_len) { tree len, fn; tree fn_fputc = unlocked ? implicit_built_in_decls[BUILT_IN_FPUTC_UNLOCKED] : implicit_built_in_decls[BUILT_IN_FPUTC]; tree fn_fwrite = unlocked ? implicit_built_in_decls[BUILT_IN_FWRITE_UNLOCKED] : implicit_built_in_decls[BUILT_IN_FWRITE]; /* If the return value is used, or the replacement _DECL isn't initialized, don't do the transformation. */ if (!ignore || !fn_fputc || !fn_fwrite) return 0; /* Verify the arguments in the original call. */ if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) return 0; len = (known_len) ? known_len : c_strlen (TREE_VALUE (arglist), 0); /* Get the length of the string passed to fputs. If the length can't be determined, punt. */ if (!len || TREE_CODE (len) != INTEGER_CST) return 0; switch (compare_tree_int (len, 1)) { case -1: /* length is 0, delete the call entirely . */ return omit_one_operand (integer_type_node, integer_zero_node, TREE_VALUE (TREE_CHAIN (arglist))); case 0: /* length is 1, call fputc. */ { const char *p = c_getstr (TREE_VALUE (arglist)); if (p != NULL) { /* New argument list transforming fputs(string, stream) to fputc(string[0], stream). */ arglist = build_tree_list (NULL_TREE, TREE_VALUE (TREE_CHAIN (arglist))); arglist = tree_cons (NULL_TREE, build_int_2 (p[0], 0), arglist); fn = fn_fputc; break; } } /* FALLTHROUGH */ case 1: /* length is greater than 1, call fwrite. */ { tree string_arg; /* If optimizing for size keep fputs. */ if (optimize_size) return 0; string_arg = TREE_VALUE (arglist); /* New argument list transforming fputs(string, stream) to fwrite(string, 1, len, stream). */ arglist = build_tree_list (NULL_TREE, TREE_VALUE (TREE_CHAIN (arglist))); arglist = tree_cons (NULL_TREE, len, arglist); arglist = tree_cons (NULL_TREE, size_one_node, arglist); arglist = tree_cons (NULL_TREE, string_arg, arglist); fn = fn_fwrite; break; } default: abort (); } return build_function_call_expr (fn, arglist); } static void simplify_builtin_va_start (tree arglist) { tree chain = TREE_CHAIN (arglist); if (TREE_CHAIN (chain)) error ("too many arguments to function `va_start'"); simplify_builtin_next_arg (chain); } static void simplify_builtin_next_arg (tree arglist) { tree fntype = TREE_TYPE (current_function_decl); if (TYPE_ARG_TYPES (fntype) == 0 || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype))) == void_type_node)) error ("`va_start' used in function with fixed args"); else if (arglist) { tree last_parm = tree_last (DECL_ARGUMENTS (current_function_decl)); tree arg = TREE_VALUE (arglist); /* Strip off all nops for the sake of the comparison. This is not quite the same as STRIP_NOPS. It does more. We must also strip off INDIRECT_EXPR for C++ reference parameters. */ while (TREE_CODE (arg) == NOP_EXPR || TREE_CODE (arg) == CONVERT_EXPR || TREE_CODE (arg) == NON_LVALUE_EXPR || TREE_CODE (arg) == INDIRECT_REF) arg = TREE_OPERAND (arg, 0); if (arg != last_parm) warning ("second parameter of `va_start' not last named argument"); TREE_VALUE (arglist) = arg; } else /* Evidently an out of date version of ; can't validate va_start's second argument, but can still work as intended. */ warning ("`__builtin_next_arg' called without an argument"); } /* Simplify a call to the sprintf builtin. Return 0 if no simplification was possible, otherwise return the simplified form of the call as a tree. If IGNORED is true, it means that the caller does not use the returned value of the function. */ static tree simplify_builtin_sprintf (tree arglist, int ignored) { tree call, retval, dest, fmt; const char *fmt_str = NULL; /* Verify the required arguments in the original call. We deal with two types of sprintf() calls: 'sprintf (str, fmt)' and 'sprintf (dest, "%s", orig)'. */ if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, VOID_TYPE) && !validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, POINTER_TYPE, VOID_TYPE)) return NULL_TREE; /* Get the destination string and the format specifier. */ dest = TREE_VALUE (arglist); fmt = TREE_VALUE (TREE_CHAIN (arglist)); /* Check whether the format is a literal string constant. */ fmt_str = c_getstr (fmt); if (fmt_str == NULL) return NULL_TREE; call = NULL_TREE; retval = NULL_TREE; /* If the format doesn't contain % args or %%, use strcpy. */ if (strchr (fmt_str, '%') == NULL) { tree fn = implicit_built_in_decls[BUILT_IN_STRCPY]; if (!fn) return NULL_TREE; /* Convert sprintf (str, fmt) into strcpy (str, fmt) when 'format' is known to contain no % formats. */ arglist = build_tree_list (NULL_TREE, fmt); arglist = tree_cons (NULL_TREE, dest, arglist); call = build_function_call_expr (fn, arglist); if (!ignored) retval = build_int_2 (strlen (fmt_str), 0); } /* If the format is "%s", use strcpy if the result isn't used. */ else if (fmt_str && strcmp (fmt_str, "%s") == 0) { tree fn, orig; fn = implicit_built_in_decls[BUILT_IN_STRCPY]; if (!fn) return NULL_TREE; /* Convert sprintf (str1, "%s", str2) into strcpy (str1, str2). */ orig = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); arglist = build_tree_list (NULL_TREE, orig); arglist = tree_cons (NULL_TREE, dest, arglist); if (!ignored) { retval = c_strlen (orig, 1); if (!retval || TREE_CODE (retval) != INTEGER_CST) return NULL_TREE; } call = build_function_call_expr (fn, arglist); } if (call && retval) { retval = convert (TREE_TYPE (TREE_TYPE (implicit_built_in_decls[BUILT_IN_SPRINTF])), retval); return build2 (COMPOUND_EXPR, TREE_TYPE (retval), call, retval); } else return call; } /* Save and restore call-clobbered registers which are live across a call. Copyright (C) 1989, 1992, 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef MAX_MOVE_MAX #define MAX_MOVE_MAX MOVE_MAX #endif #ifndef MIN_UNITS_PER_WORD #define MIN_UNITS_PER_WORD UNITS_PER_WORD #endif #define MOVE_MAX_WORDS (MOVE_MAX / UNITS_PER_WORD) /* Modes for each hard register that we can save. The smallest mode is wide enough to save the entire contents of the register. When saving the register because it is live we first try to save in multi-register modes. If that is not possible the save is done one register at a time. */ static enum machine_mode regno_save_mode[FIRST_PSEUDO_REGISTER][MAX_MOVE_MAX / MIN_UNITS_PER_WORD + 1]; /* For each hard register, a place on the stack where it can be saved, if needed. */ static rtx regno_save_mem[FIRST_PSEUDO_REGISTER][MAX_MOVE_MAX / MIN_UNITS_PER_WORD + 1]; /* We will only make a register eligible for caller-save if it can be saved in its widest mode with a simple SET insn as long as the memory address is valid. We record the INSN_CODE is those insns here since when we emit them, the addresses might not be valid, so they might not be recognized. */ static int reg_save_code[FIRST_PSEUDO_REGISTER][MAX_MACHINE_MODE]; static int reg_restore_code[FIRST_PSEUDO_REGISTER][MAX_MACHINE_MODE]; /* Set of hard regs currently residing in save area (during insn scan). */ static HARD_REG_SET hard_regs_saved; /* Number of registers currently in hard_regs_saved. */ static int n_regs_saved; /* Computed by mark_referenced_regs, all regs referenced in a given insn. */ static HARD_REG_SET referenced_regs; /* Computed in mark_set_regs_cs, holds all registers set by the current instruction. */ static HARD_REG_SET this_insn_sets; static void mark_set_regs_cs (rtx, rtx, void *); static void mark_referenced_regs (rtx); static int insert_save (struct insn_chain *, int, int, HARD_REG_SET *, enum machine_mode *); static int insert_restore (struct insn_chain *, int, int, int, enum machine_mode *); static struct insn_chain *insert_one_insn (struct insn_chain *, int, int, rtx); static void add_stored_regs (rtx, rtx, void *); /* Initialize for caller-save. Look at all the hard registers that are used by a call and for which regclass.c has not already excluded from being used across a call. Ensure that we can find a mode to save the register and that there is a simple insn to save and restore the register. This latter check avoids problems that would occur if we tried to save the MQ register of some machines directly into memory. */ void init_caller_save (void) { rtx addr_reg; int offset; rtx address; int i, j; enum machine_mode mode; rtx savepat, restpat; rtx test_reg, test_mem; rtx saveinsn, restinsn; /* First find all the registers that we need to deal with and all the modes that they can have. If we can't find a mode to use, we can't have the register live over calls. */ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) { if (call_used_regs[i] && ! call_fixed_regs[i]) { for (j = 1; j <= MOVE_MAX_WORDS; j++) { regno_save_mode[i][j] = HARD_REGNO_CALLER_SAVE_MODE (i, j, VOIDmode); if (regno_save_mode[i][j] == VOIDmode && j == 1) { call_fixed_regs[i] = 1; SET_HARD_REG_BIT (call_fixed_reg_set, i); } } } else regno_save_mode[i][1] = VOIDmode; } /* The following code tries to approximate the conditions under which we can easily save and restore a register without scratch registers or other complexities. It will usually work, except under conditions where the validity of an insn operand is dependent on the address offset. No such cases are currently known. We first find a typical offset from some BASE_REG_CLASS register. This address is chosen by finding the first register in the class and by finding the smallest power of two that is a valid offset from that register in every mode we will use to save registers. */ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (TEST_HARD_REG_BIT (reg_class_contents [(int) MODE_BASE_REG_CLASS (regno_save_mode [i][1])], i)) break; if (i == FIRST_PSEUDO_REGISTER) abort (); addr_reg = gen_rtx_REG (Pmode, i); for (offset = 1 << (HOST_BITS_PER_INT / 2); offset; offset >>= 1) { address = gen_rtx_PLUS (Pmode, addr_reg, GEN_INT (offset)); for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (regno_save_mode[i][1] != VOIDmode && ! strict_memory_address_p (regno_save_mode[i][1], address)) break; if (i == FIRST_PSEUDO_REGISTER) break; } /* If we didn't find a valid address, we must use register indirect. */ if (offset == 0) address = addr_reg; /* Next we try to form an insn to save and restore the register. We see if such an insn is recognized and meets its constraints. To avoid lots of unnecessary RTL allocation, we construct all the RTL once, then modify the memory and register operands in-place. */ test_reg = gen_rtx_REG (VOIDmode, 0); test_mem = gen_rtx_MEM (VOIDmode, address); savepat = gen_rtx_SET (VOIDmode, test_mem, test_reg); restpat = gen_rtx_SET (VOIDmode, test_reg, test_mem); saveinsn = gen_rtx_INSN (VOIDmode, 0, 0, 0, 0, 0, savepat, -1, 0, 0); restinsn = gen_rtx_INSN (VOIDmode, 0, 0, 0, 0, 0, restpat, -1, 0, 0); for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) for (mode = 0 ; mode < MAX_MACHINE_MODE; mode++) if (HARD_REGNO_MODE_OK (i, mode)) { int ok; /* Update the register number and modes of the register and memory operand. */ REGNO (test_reg) = i; PUT_MODE (test_reg, mode); PUT_MODE (test_mem, mode); /* Force re-recognition of the modified insns. */ INSN_CODE (saveinsn) = -1; INSN_CODE (restinsn) = -1; reg_save_code[i][mode] = recog_memoized (saveinsn); reg_restore_code[i][mode] = recog_memoized (restinsn); /* Now extract both insns and see if we can meet their constraints. */ ok = (reg_save_code[i][mode] != -1 && reg_restore_code[i][mode] != -1); if (ok) { extract_insn (saveinsn); ok = constrain_operands (1); extract_insn (restinsn); ok &= constrain_operands (1); } if (! ok) { reg_save_code[i][mode] = -1; reg_restore_code[i][mode] = -1; } } else { reg_save_code[i][mode] = -1; reg_restore_code[i][mode] = -1; } for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) for (j = 1; j <= MOVE_MAX_WORDS; j++) if (reg_save_code [i][regno_save_mode[i][j]] == -1) { regno_save_mode[i][j] = VOIDmode; if (j == 1) { call_fixed_regs[i] = 1; SET_HARD_REG_BIT (call_fixed_reg_set, i); } } } /* Initialize save areas by showing that we haven't allocated any yet. */ void init_save_areas (void) { int i, j; for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) for (j = 1; j <= MOVE_MAX_WORDS; j++) regno_save_mem[i][j] = 0; } /* Allocate save areas for any hard registers that might need saving. We take a conservative approach here and look for call-clobbered hard registers that are assigned to pseudos that cross calls. This may overestimate slightly (especially if some of these registers are later used as spill registers), but it should not be significant. Future work: In the fallback case we should iterate backwards across all possible modes for the save, choosing the largest available one instead of falling back to the smallest mode immediately. (eg TF -> DF -> SF). We do not try to use "move multiple" instructions that exist on some machines (such as the 68k moveml). It could be a win to try and use them when possible. The hard part is doing it in a way that is machine independent since they might be saving non-consecutive registers. (imagine caller-saving d0,d1,a0,a1 on the 68k) */ void setup_save_areas (void) { int i, j, k; unsigned int r; HARD_REG_SET hard_regs_used; /* Allocate space in the save area for the largest multi-register pseudos first, then work backwards to single register pseudos. */ /* Find and record all call-used hard-registers in this function. */ CLEAR_HARD_REG_SET (hard_regs_used); for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++) if (reg_renumber[i] >= 0 && REG_N_CALLS_CROSSED (i) > 0) { unsigned int regno = reg_renumber[i]; unsigned int endregno = regno + hard_regno_nregs[regno][GET_MODE (regno_reg_rtx[i])]; for (r = regno; r < endregno; r++) if (call_used_regs[r]) SET_HARD_REG_BIT (hard_regs_used, r); } /* Now run through all the call-used hard-registers and allocate space for them in the caller-save area. Try to allocate space in a manner which allows multi-register saves/restores to be done. */ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) for (j = MOVE_MAX_WORDS; j > 0; j--) { int do_save = 1; /* If no mode exists for this size, try another. Also break out if we have already saved this hard register. */ if (regno_save_mode[i][j] == VOIDmode || regno_save_mem[i][1] != 0) continue; /* See if any register in this group has been saved. */ for (k = 0; k < j; k++) if (regno_save_mem[i + k][1]) { do_save = 0; break; } if (! do_save) continue; for (k = 0; k < j; k++) if (! TEST_HARD_REG_BIT (hard_regs_used, i + k)) { do_save = 0; break; } if (! do_save) continue; /* We have found an acceptable mode to store in. */ regno_save_mem[i][j] = assign_stack_local (regno_save_mode[i][j], GET_MODE_SIZE (regno_save_mode[i][j]), 0); /* Setup single word save area just in case... */ for (k = 0; k < j; k++) /* This should not depend on WORDS_BIG_ENDIAN. The order of words in regs is the same as in memory. */ regno_save_mem[i + k][1] = adjust_address_nv (regno_save_mem[i][j], regno_save_mode[i + k][1], k * UNITS_PER_WORD); } /* Now loop again and set the alias set of any save areas we made to the alias set used to represent frame objects. */ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) for (j = MOVE_MAX_WORDS; j > 0; j--) if (regno_save_mem[i][j] != 0) set_mem_alias_set (regno_save_mem[i][j], get_frame_alias_set ()); } /* Find the places where hard regs are live across calls and save them. */ void save_call_clobbered_regs (void) { struct insn_chain *chain, *next; enum machine_mode save_mode [FIRST_PSEUDO_REGISTER]; CLEAR_HARD_REG_SET (hard_regs_saved); n_regs_saved = 0; for (chain = reload_insn_chain; chain != 0; chain = next) { rtx insn = chain->insn; enum rtx_code code = GET_CODE (insn); next = chain->next; if (chain->is_caller_save_insn) abort (); if (INSN_P (insn)) { /* If some registers have been saved, see if INSN references any of them. We must restore them before the insn if so. */ if (n_regs_saved) { int regno; if (code == JUMP_INSN) /* Restore all registers if this is a JUMP_INSN. */ COPY_HARD_REG_SET (referenced_regs, hard_regs_saved); else { CLEAR_HARD_REG_SET (referenced_regs); mark_referenced_regs (PATTERN (insn)); AND_HARD_REG_SET (referenced_regs, hard_regs_saved); } for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) if (TEST_HARD_REG_BIT (referenced_regs, regno)) regno += insert_restore (chain, 1, regno, MOVE_MAX_WORDS, save_mode); } if (code == CALL_INSN && ! find_reg_note (insn, REG_NORETURN, NULL)) { int regno; HARD_REG_SET hard_regs_to_save; /* Use the register life information in CHAIN to compute which regs are live during the call. */ REG_SET_TO_HARD_REG_SET (hard_regs_to_save, &chain->live_throughout); /* Save hard registers always in the widest mode available. */ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) if (TEST_HARD_REG_BIT (hard_regs_to_save, regno)) save_mode [regno] = regno_save_mode [regno][1]; else save_mode [regno] = VOIDmode; /* Look through all live pseudos, mark their hard registers and choose proper mode for saving. */ EXECUTE_IF_SET_IN_REG_SET (&chain->live_throughout, FIRST_PSEUDO_REGISTER, regno, { int r = reg_renumber[regno]; int nregs; if (r >= 0) { enum machine_mode mode; nregs = hard_regno_nregs[r][PSEUDO_REGNO_MODE (regno)]; mode = HARD_REGNO_CALLER_SAVE_MODE (r, nregs, PSEUDO_REGNO_MODE (regno)); if (GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (save_mode[r])) save_mode[r] = mode; while (nregs-- > 0) SET_HARD_REG_BIT (hard_regs_to_save, r + nregs); } else abort (); }); /* Record all registers set in this call insn. These don't need to be saved. N.B. the call insn might set a subreg of a multi-hard-reg pseudo; then the pseudo is considered live during the call, but the subreg that is set isn't. */ CLEAR_HARD_REG_SET (this_insn_sets); note_stores (PATTERN (insn), mark_set_regs_cs, NULL); /* Compute which hard regs must be saved before this call. */ AND_COMPL_HARD_REG_SET (hard_regs_to_save, call_fixed_reg_set); AND_COMPL_HARD_REG_SET (hard_regs_to_save, this_insn_sets); AND_COMPL_HARD_REG_SET (hard_regs_to_save, hard_regs_saved); AND_HARD_REG_SET (hard_regs_to_save, call_used_reg_set); for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) if (TEST_HARD_REG_BIT (hard_regs_to_save, regno)) regno += insert_save (chain, 1, regno, &hard_regs_to_save, save_mode); /* Must recompute n_regs_saved. */ n_regs_saved = 0; for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) if (TEST_HARD_REG_BIT (hard_regs_saved, regno)) n_regs_saved++; } } if (chain->next == 0 || chain->next->block > chain->block) { int regno; /* At the end of the basic block, we must restore any registers that remain saved. If the last insn in the block is a JUMP_INSN, put the restore before the insn, otherwise, put it after the insn. */ if (n_regs_saved) for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) if (TEST_HARD_REG_BIT (hard_regs_saved, regno)) regno += insert_restore (chain, GET_CODE (insn) == JUMP_INSN, regno, MOVE_MAX_WORDS, save_mode); } } } /* Here from note_stores when an insn stores a value in a register. Set the proper bit or bits in this_insn_sets. All pseudos that have been assigned hard regs have had their register number changed already, so we can ignore pseudos. */ static void mark_set_regs_cs (rtx reg, rtx setter ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED) { int regno, endregno, i; enum machine_mode mode = GET_MODE (reg); if (GET_CODE (reg) == SUBREG) { rtx inner = SUBREG_REG (reg); if (!REG_P (inner) || REGNO (inner) >= FIRST_PSEUDO_REGISTER) return; regno = subreg_hard_regno (reg, 1); } else if (REG_P (reg) && REGNO (reg) < FIRST_PSEUDO_REGISTER) regno = REGNO (reg); else return; endregno = regno + hard_regno_nregs[regno][mode]; for (i = regno; i < endregno; i++) SET_HARD_REG_BIT (this_insn_sets, i); } /* Here from note_stores when an insn stores a value in a register. Set the proper bit or bits in the passed regset. All pseudos that have been assigned hard regs have had their register number changed already, so we can ignore pseudos. */ static void add_stored_regs (rtx reg, rtx setter, void *data) { int regno, endregno, i; enum machine_mode mode = GET_MODE (reg); int offset = 0; if (GET_CODE (setter) == CLOBBER) return; if (GET_CODE (reg) == SUBREG && REG_P (SUBREG_REG (reg))) { offset = subreg_regno_offset (REGNO (SUBREG_REG (reg)), GET_MODE (SUBREG_REG (reg)), SUBREG_BYTE (reg), GET_MODE (reg)); reg = SUBREG_REG (reg); } if (!REG_P (reg) || REGNO (reg) >= FIRST_PSEUDO_REGISTER) return; regno = REGNO (reg) + offset; endregno = regno + hard_regno_nregs[regno][mode]; for (i = regno; i < endregno; i++) SET_REGNO_REG_SET ((regset) data, i); } /* Walk X and record all referenced registers in REFERENCED_REGS. */ static void mark_referenced_regs (rtx x) { enum rtx_code code = GET_CODE (x); const char *fmt; int i, j; if (code == SET) mark_referenced_regs (SET_SRC (x)); if (code == SET || code == CLOBBER) { x = SET_DEST (x); code = GET_CODE (x); if ((code == REG && REGNO (x) < FIRST_PSEUDO_REGISTER) || code == PC || code == CC0 || (code == SUBREG && REG_P (SUBREG_REG (x)) && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER /* If we're setting only part of a multi-word register, we shall mark it as referenced, because the words that are not being set should be restored. */ && ((GET_MODE_SIZE (GET_MODE (x)) >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))) || (GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))) <= UNITS_PER_WORD)))) return; } if (code == MEM || code == SUBREG) { x = XEXP (x, 0); code = GET_CODE (x); } if (code == REG) { int regno = REGNO (x); int hardregno = (regno < FIRST_PSEUDO_REGISTER ? regno : reg_renumber[regno]); if (hardregno >= 0) { int nregs = hard_regno_nregs[hardregno][GET_MODE (x)]; while (nregs-- > 0) SET_HARD_REG_BIT (referenced_regs, hardregno + nregs); } /* If this is a pseudo that did not get a hard register, scan its memory location, since it might involve the use of another register, which might be saved. */ else if (reg_equiv_mem[regno] != 0) mark_referenced_regs (XEXP (reg_equiv_mem[regno], 0)); else if (reg_equiv_address[regno] != 0) mark_referenced_regs (reg_equiv_address[regno]); return; } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') mark_referenced_regs (XEXP (x, i)); else if (fmt[i] == 'E') for (j = XVECLEN (x, i) - 1; j >= 0; j--) mark_referenced_regs (XVECEXP (x, i, j)); } } /* Insert a sequence of insns to restore. Place these insns in front of CHAIN if BEFORE_P is nonzero, behind the insn otherwise. MAXRESTORE is the maximum number of registers which should be restored during this call. It should never be less than 1 since we only work with entire registers. Note that we have verified in init_caller_save that we can do this with a simple SET, so use it. Set INSN_CODE to what we save there since the address might not be valid so the insn might not be recognized. These insns will be reloaded and have register elimination done by find_reload, so we need not worry about that here. Return the extra number of registers saved. */ static int insert_restore (struct insn_chain *chain, int before_p, int regno, int maxrestore, enum machine_mode *save_mode) { int i, k; rtx pat = NULL_RTX; int code; unsigned int numregs = 0; struct insn_chain *new; rtx mem; /* A common failure mode if register status is not correct in the RTL is for this routine to be called with a REGNO we didn't expect to save. That will cause us to write an insn with a (nil) SET_DEST or SET_SRC. Instead of doing so and causing a crash later, check for this common case and abort here instead. This will remove one step in debugging such problems. */ if (regno_save_mem[regno][1] == 0) abort (); /* Get the pattern to emit and update our status. See if we can restore `maxrestore' registers at once. Work backwards to the single register case. */ for (i = maxrestore; i > 0; i--) { int j; int ok = 1; if (regno_save_mem[regno][i] == 0) continue; for (j = 0; j < i; j++) if (! TEST_HARD_REG_BIT (hard_regs_saved, regno + j)) { ok = 0; break; } /* Must do this one restore at a time. */ if (! ok) continue; numregs = i; break; } mem = regno_save_mem [regno][numregs]; if (save_mode [regno] != VOIDmode && save_mode [regno] != GET_MODE (mem) && numregs == (unsigned int) hard_regno_nregs[regno][save_mode [regno]]) mem = adjust_address (mem, save_mode[regno], 0); else mem = copy_rtx (mem); pat = gen_rtx_SET (VOIDmode, gen_rtx_REG (GET_MODE (mem), regno), mem); code = reg_restore_code[regno][GET_MODE (mem)]; new = insert_one_insn (chain, before_p, code, pat); /* Clear status for all registers we restored. */ for (k = 0; k < i; k++) { CLEAR_HARD_REG_BIT (hard_regs_saved, regno + k); SET_REGNO_REG_SET (&new->dead_or_set, regno + k); n_regs_saved--; } /* Tell our callers how many extra registers we saved/restored. */ return numregs - 1; } /* Like insert_restore above, but save registers instead. */ static int insert_save (struct insn_chain *chain, int before_p, int regno, HARD_REG_SET (*to_save), enum machine_mode *save_mode) { int i; unsigned int k; rtx pat = NULL_RTX; int code; unsigned int numregs = 0; struct insn_chain *new; rtx mem; /* A common failure mode if register status is not correct in the RTL is for this routine to be called with a REGNO we didn't expect to save. That will cause us to write an insn with a (nil) SET_DEST or SET_SRC. Instead of doing so and causing a crash later, check for this common case and abort here instead. This will remove one step in debugging such problems. */ if (regno_save_mem[regno][1] == 0) abort (); /* Get the pattern to emit and update our status. See if we can save several registers with a single instruction. Work backwards to the single register case. */ for (i = MOVE_MAX_WORDS; i > 0; i--) { int j; int ok = 1; if (regno_save_mem[regno][i] == 0) continue; for (j = 0; j < i; j++) if (! TEST_HARD_REG_BIT (*to_save, regno + j)) { ok = 0; break; } /* Must do this one save at a time. */ if (! ok) continue; numregs = i; break; } mem = regno_save_mem [regno][numregs]; if (save_mode [regno] != VOIDmode && save_mode [regno] != GET_MODE (mem) && numregs == (unsigned int) hard_regno_nregs[regno][save_mode [regno]]) mem = adjust_address (mem, save_mode[regno], 0); else mem = copy_rtx (mem); pat = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (GET_MODE (mem), regno)); code = reg_save_code[regno][GET_MODE (mem)]; new = insert_one_insn (chain, before_p, code, pat); /* Set hard_regs_saved and dead_or_set for all the registers we saved. */ for (k = 0; k < numregs; k++) { SET_HARD_REG_BIT (hard_regs_saved, regno + k); SET_REGNO_REG_SET (&new->dead_or_set, regno + k); n_regs_saved++; } /* Tell our callers how many extra registers we saved/restored. */ return numregs - 1; } /* Emit a new caller-save insn and set the code. */ static struct insn_chain * insert_one_insn (struct insn_chain *chain, int before_p, int code, rtx pat) { rtx insn = chain->insn; struct insn_chain *new; #ifdef HAVE_cc0 /* If INSN references CC0, put our insns in front of the insn that sets CC0. This is always safe, since the only way we could be passed an insn that references CC0 is for a restore, and doing a restore earlier isn't a problem. We do, however, assume here that CALL_INSNs don't reference CC0. Guard against non-INSN's like CODE_LABEL. */ if ((GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN) && before_p && reg_referenced_p (cc0_rtx, PATTERN (insn))) chain = chain->prev, insn = chain->insn; #endif new = new_insn_chain (); if (before_p) { rtx link; new->prev = chain->prev; if (new->prev != 0) new->prev->next = new; else reload_insn_chain = new; chain->prev = new; new->next = chain; new->insn = emit_insn_before (pat, insn); /* ??? It would be nice if we could exclude the already / still saved registers from the live sets. */ COPY_REG_SET (&new->live_throughout, &chain->live_throughout); /* Registers that die in CHAIN->INSN still live in the new insn. */ for (link = REG_NOTES (chain->insn); link; link = XEXP (link, 1)) { if (REG_NOTE_KIND (link) == REG_DEAD) { rtx reg = XEXP (link, 0); int regno, i; if (!REG_P (reg)) abort (); regno = REGNO (reg); if (regno >= FIRST_PSEUDO_REGISTER) regno = reg_renumber[regno]; if (regno < 0) continue; for (i = hard_regno_nregs[regno][GET_MODE (reg)] - 1; i >= 0; i--) SET_REGNO_REG_SET (&new->live_throughout, regno + i); } } CLEAR_REG_SET (&new->dead_or_set); if (chain->insn == BB_HEAD (BASIC_BLOCK (chain->block))) BB_HEAD (BASIC_BLOCK (chain->block)) = new->insn; } else { new->next = chain->next; if (new->next != 0) new->next->prev = new; chain->next = new; new->prev = chain; new->insn = emit_insn_after (pat, insn); /* ??? It would be nice if we could exclude the already / still saved registers from the live sets, and observe REG_UNUSED notes. */ COPY_REG_SET (&new->live_throughout, &chain->live_throughout); /* Registers that are set in CHAIN->INSN live in the new insn. (Unless there is a REG_UNUSED note for them, but we don't look for them here.) */ note_stores (PATTERN (chain->insn), add_stored_regs, &new->live_throughout); CLEAR_REG_SET (&new->dead_or_set); if (chain->insn == BB_END (BASIC_BLOCK (chain->block))) BB_END (BASIC_BLOCK (chain->block)) = new->insn; } new->block = chain->block; new->is_caller_save_insn = 1; INSN_CODE (new->insn) = code; return new; } /* Convert function calls to rtl insns, for GNU C compiler. Copyright (C) 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Like PREFERRED_STACK_BOUNDARY but in units of bytes, not bits. */ #define PREF_STACK_BYTES (PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT) /* Data structure and subroutines used within expand_call. */ struct arg_data { /* Tree node for this argument. */ tree tree_value; /* Mode for value; TYPE_MODE unless promoted. */ enum machine_mode mode; /* Current RTL value for argument, or 0 if it isn't precomputed. */ rtx value; /* Initially-compute RTL value for argument; only for const functions. */ rtx initial_value; /* Register to pass this argument in, 0 if passed on stack, or an PARALLEL if the arg is to be copied into multiple non-contiguous registers. */ rtx reg; /* Register to pass this argument in when generating tail call sequence. This is not the same register as for normal calls on machines with register windows. */ rtx tail_call_reg; /* If REG was promoted from the actual mode of the argument expression, indicates whether the promotion is sign- or zero-extended. */ int unsignedp; /* Number of registers to use. 0 means put the whole arg in registers. Also 0 if not passed in registers. */ int partial; /* Nonzero if argument must be passed on stack. Note that some arguments may be passed on the stack even though pass_on_stack is zero, just because FUNCTION_ARG says so. pass_on_stack identifies arguments that *cannot* go in registers. */ int pass_on_stack; /* Some fields packaged up for locate_and_pad_parm. */ struct locate_and_pad_arg_data locate; /* Location on the stack at which parameter should be stored. The store has already been done if STACK == VALUE. */ rtx stack; /* Location on the stack of the start of this argument slot. This can differ from STACK if this arg pads downward. This location is known to be aligned to FUNCTION_ARG_BOUNDARY. */ rtx stack_slot; /* Place that this stack area has been saved, if needed. */ rtx save_area; /* If an argument's alignment does not permit direct copying into registers, copy in smaller-sized pieces into pseudos. These are stored in a block pointed to by this field. The next field says how many word-sized pseudos we made. */ rtx *aligned_regs; int n_aligned_regs; }; /* A vector of one char per byte of stack space. A byte if nonzero if the corresponding stack location has been used. This vector is used to prevent a function call within an argument from clobbering any stack already set up. */ static char *stack_usage_map; /* Size of STACK_USAGE_MAP. */ static int highest_outgoing_arg_in_use; /* A bitmap of virtual-incoming stack space. Bit is set if the corresponding stack location's tail call argument has been already stored into the stack. This bitmap is used to prevent sibling call optimization if function tries to use parent's incoming argument slots when they have been already overwritten with tail call arguments. */ static sbitmap stored_args_map; /* stack_arg_under_construction is nonzero when an argument may be initialized with a constructor call (including a C function that returns a BLKmode struct) and expand_call must take special action to make sure the object being constructed does not overlap the argument list for the constructor call. */ int stack_arg_under_construction; static void emit_call_1 (rtx, tree, tree, tree, HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT, rtx, rtx, int, rtx, int, CUMULATIVE_ARGS *); static void precompute_register_parameters (int, struct arg_data *, int *); static int store_one_arg (struct arg_data *, rtx, int, int, int); static void store_unaligned_arguments_into_pseudos (struct arg_data *, int); static int finalize_must_preallocate (int, int, struct arg_data *, struct args_size *); static void precompute_arguments (int, int, struct arg_data *); static int compute_argument_block_size (int, struct args_size *, int); static void initialize_argument_information (int, struct arg_data *, struct args_size *, int, tree, tree, CUMULATIVE_ARGS *, int, rtx *, int *, int *, int *, bool *, bool); static void compute_argument_addresses (struct arg_data *, rtx, int); static rtx rtx_for_function_call (tree, tree); static void load_register_parameters (struct arg_data *, int, rtx *, int, int, int *); static rtx emit_library_call_value_1 (int, rtx, rtx, enum libcall_type, enum machine_mode, int, va_list); static int special_function_p (tree, int); static int check_sibcall_argument_overlap_1 (rtx); static int check_sibcall_argument_overlap (rtx, struct arg_data *, int); static int combine_pending_stack_adjustment_and_call (int, struct args_size *, int); static tree fix_unsafe_tree (tree); static bool shift_returned_value (tree, rtx *); #ifdef REG_PARM_STACK_SPACE static rtx save_fixed_argument_area (int, rtx, int *, int *); static void restore_fixed_argument_area (rtx, rtx, int, int); #endif /* Force FUNEXP into a form suitable for the address of a CALL, and return that as an rtx. Also load the static chain register if FNDECL is a nested function. CALL_FUSAGE points to a variable holding the prospective CALL_INSN_FUNCTION_USAGE information. */ rtx prepare_call_address (rtx funexp, rtx static_chain_value, rtx *call_fusage, int reg_parm_seen, int sibcallp) { funexp = protect_from_queue (funexp, 0); /* Make a valid memory address and copy constants through pseudo-regs, but not for a constant address if -fno-function-cse. */ if (GET_CODE (funexp) != SYMBOL_REF) /* If we are using registers for parameters, force the function address into a register now. */ funexp = ((SMALL_REGISTER_CLASSES && reg_parm_seen) ? force_not_mem (memory_address (FUNCTION_MODE, funexp)) : memory_address (FUNCTION_MODE, funexp)); else if (! sibcallp) { #ifndef NO_FUNCTION_CSE if (optimize && ! flag_no_function_cse) funexp = force_reg (Pmode, funexp); #endif } if (static_chain_value != 0) { emit_move_insn (static_chain_rtx, static_chain_value); if (REG_P (static_chain_rtx)) use_reg (call_fusage, static_chain_rtx); } return funexp; } /* Generate instructions to call function FUNEXP, and optionally pop the results. The CALL_INSN is the first insn generated. FNDECL is the declaration node of the function. This is given to the macro RETURN_POPS_ARGS to determine whether this function pops its own args. FUNTYPE is the data type of the function. This is given to the macro RETURN_POPS_ARGS to determine whether this function pops its own args. We used to allow an identifier for library functions, but that doesn't work when the return type is an aggregate type and the calling convention says that the pointer to this aggregate is to be popped by the callee. STACK_SIZE is the number of bytes of arguments on the stack, ROUNDED_STACK_SIZE is that number rounded up to PREFERRED_STACK_BOUNDARY; zero if the size is variable. This is both to put into the call insn and to generate explicit popping code if necessary. STRUCT_VALUE_SIZE is the number of bytes wanted in a structure value. It is zero if this call doesn't want a structure value. NEXT_ARG_REG is the rtx that results from executing FUNCTION_ARG (args_so_far, VOIDmode, void_type_node, 1) just after all the args have had their registers assigned. This could be whatever you like, but normally it is the first arg-register beyond those used for args in this call, or 0 if all the arg-registers are used in this call. It is passed on to `gen_call' so you can put this info in the call insn. VALREG is a hard register in which a value is returned, or 0 if the call does not return a value. OLD_INHIBIT_DEFER_POP is the value that `inhibit_defer_pop' had before the args to this call were processed. We restore `inhibit_defer_pop' to that value. CALL_FUSAGE is either empty or an EXPR_LIST of USE expressions that denote registers used by the called function. */ static void emit_call_1 (rtx funexp, tree fntree, tree fndecl ATTRIBUTE_UNUSED, tree funtype ATTRIBUTE_UNUSED, HOST_WIDE_INT stack_size ATTRIBUTE_UNUSED, HOST_WIDE_INT rounded_stack_size, HOST_WIDE_INT struct_value_size ATTRIBUTE_UNUSED, rtx next_arg_reg ATTRIBUTE_UNUSED, rtx valreg, int old_inhibit_defer_pop, rtx call_fusage, int ecf_flags, CUMULATIVE_ARGS *args_so_far ATTRIBUTE_UNUSED) { rtx rounded_stack_size_rtx = GEN_INT (rounded_stack_size); rtx call_insn; int already_popped = 0; HOST_WIDE_INT n_popped = RETURN_POPS_ARGS (fndecl, funtype, stack_size); #if defined (HAVE_call) && defined (HAVE_call_value) rtx struct_value_size_rtx; struct_value_size_rtx = GEN_INT (struct_value_size); #endif #ifdef CALL_POPS_ARGS n_popped += CALL_POPS_ARGS (* args_so_far); #endif /* Ensure address is valid. SYMBOL_REF is already valid, so no need, and we don't want to load it into a register as an optimization, because prepare_call_address already did it if it should be done. */ if (GET_CODE (funexp) != SYMBOL_REF) funexp = memory_address (FUNCTION_MODE, funexp); #if defined (HAVE_sibcall_pop) && defined (HAVE_sibcall_value_pop) if ((ecf_flags & ECF_SIBCALL) && HAVE_sibcall_pop && HAVE_sibcall_value_pop && (n_popped > 0 || stack_size == 0)) { rtx n_pop = GEN_INT (n_popped); rtx pat; /* If this subroutine pops its own args, record that in the call insn if possible, for the sake of frame pointer elimination. */ if (valreg) pat = GEN_SIBCALL_VALUE_POP (valreg, gen_rtx_MEM (FUNCTION_MODE, funexp), rounded_stack_size_rtx, next_arg_reg, n_pop); else pat = GEN_SIBCALL_POP (gen_rtx_MEM (FUNCTION_MODE, funexp), rounded_stack_size_rtx, next_arg_reg, n_pop); emit_call_insn (pat); already_popped = 1; } else #endif #if defined (HAVE_call_pop) && defined (HAVE_call_value_pop) /* If the target has "call" or "call_value" insns, then prefer them if no arguments are actually popped. If the target does not have "call" or "call_value" insns, then we must use the popping versions even if the call has no arguments to pop. */ #if defined (HAVE_call) && defined (HAVE_call_value) if (HAVE_call && HAVE_call_value && HAVE_call_pop && HAVE_call_value_pop && n_popped > 0 && ! (ecf_flags & ECF_SP_DEPRESSED)) #else if (HAVE_call_pop && HAVE_call_value_pop) #endif { rtx n_pop = GEN_INT (n_popped); rtx pat; /* If this subroutine pops its own args, record that in the call insn if possible, for the sake of frame pointer elimination. */ if (valreg) pat = GEN_CALL_VALUE_POP (valreg, gen_rtx_MEM (FUNCTION_MODE, funexp), rounded_stack_size_rtx, next_arg_reg, n_pop); else pat = GEN_CALL_POP (gen_rtx_MEM (FUNCTION_MODE, funexp), rounded_stack_size_rtx, next_arg_reg, n_pop); emit_call_insn (pat); already_popped = 1; } else #endif #if defined (HAVE_sibcall) && defined (HAVE_sibcall_value) if ((ecf_flags & ECF_SIBCALL) && HAVE_sibcall && HAVE_sibcall_value) { if (valreg) emit_call_insn (GEN_SIBCALL_VALUE (valreg, gen_rtx_MEM (FUNCTION_MODE, funexp), rounded_stack_size_rtx, next_arg_reg, NULL_RTX)); else emit_call_insn (GEN_SIBCALL (gen_rtx_MEM (FUNCTION_MODE, funexp), rounded_stack_size_rtx, next_arg_reg, struct_value_size_rtx)); } else #endif #if defined (HAVE_call) && defined (HAVE_call_value) if (HAVE_call && HAVE_call_value) { if (valreg) emit_call_insn (GEN_CALL_VALUE (valreg, gen_rtx_MEM (FUNCTION_MODE, funexp), rounded_stack_size_rtx, next_arg_reg, NULL_RTX)); else emit_call_insn (GEN_CALL (gen_rtx_MEM (FUNCTION_MODE, funexp), rounded_stack_size_rtx, next_arg_reg, struct_value_size_rtx)); } else #endif abort (); /* Find the call we just emitted. */ call_insn = last_call_insn (); /* Mark memory as used for "pure" function call. */ if (ecf_flags & ECF_PURE) call_fusage = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode))), call_fusage); /* Put the register usage information there. */ add_function_usage_to (call_insn, call_fusage); /* If this is a const call, then set the insn's unchanging bit. */ if (ecf_flags & (ECF_CONST | ECF_PURE)) CONST_OR_PURE_CALL_P (call_insn) = 1; /* If this call can't throw, attach a REG_EH_REGION reg note to that effect. */ if (ecf_flags & ECF_NOTHROW) REG_NOTES (call_insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, const0_rtx, REG_NOTES (call_insn)); else { int rn = lookup_stmt_eh_region (fntree); /* If rn < 0, then either (1) tree-ssa not used or (2) doesn't throw, which we already took care of. */ if (rn > 0) REG_NOTES (call_insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, GEN_INT (rn), REG_NOTES (call_insn)); note_current_region_may_contain_throw (); } if (ecf_flags & ECF_NORETURN) REG_NOTES (call_insn) = gen_rtx_EXPR_LIST (REG_NORETURN, const0_rtx, REG_NOTES (call_insn)); if (ecf_flags & ECF_ALWAYS_RETURN) REG_NOTES (call_insn) = gen_rtx_EXPR_LIST (REG_ALWAYS_RETURN, const0_rtx, REG_NOTES (call_insn)); if (ecf_flags & ECF_RETURNS_TWICE) { REG_NOTES (call_insn) = gen_rtx_EXPR_LIST (REG_SETJMP, const0_rtx, REG_NOTES (call_insn)); current_function_calls_setjmp = 1; } SIBLING_CALL_P (call_insn) = ((ecf_flags & ECF_SIBCALL) != 0); /* Restore this now, so that we do defer pops for this call's args if the context of the call as a whole permits. */ inhibit_defer_pop = old_inhibit_defer_pop; if (n_popped > 0) { if (!already_popped) CALL_INSN_FUNCTION_USAGE (call_insn) = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_CLOBBER (VOIDmode, stack_pointer_rtx), CALL_INSN_FUNCTION_USAGE (call_insn)); rounded_stack_size -= n_popped; rounded_stack_size_rtx = GEN_INT (rounded_stack_size); stack_pointer_delta -= n_popped; } if (!ACCUMULATE_OUTGOING_ARGS) { /* If returning from the subroutine does not automatically pop the args, we need an instruction to pop them sooner or later. Perhaps do it now; perhaps just record how much space to pop later. If returning from the subroutine does pop the args, indicate that the stack pointer will be changed. */ if (rounded_stack_size != 0) { if (ecf_flags & (ECF_SP_DEPRESSED | ECF_NORETURN | ECF_LONGJMP)) /* Just pretend we did the pop. */ stack_pointer_delta -= rounded_stack_size; else if (flag_defer_pop && inhibit_defer_pop == 0 && ! (ecf_flags & (ECF_CONST | ECF_PURE))) pending_stack_adjust += rounded_stack_size; else adjust_stack (rounded_stack_size_rtx); } } /* When we accumulate outgoing args, we must avoid any stack manipulations. Restore the stack pointer to its original value now. Usually ACCUMULATE_OUTGOING_ARGS targets don't get here, but there are exceptions. On i386 ACCUMULATE_OUTGOING_ARGS can be enabled on demand, and popping variants of functions exist as well. ??? We may optimize similar to defer_pop above, but it is probably not worthwhile. ??? It will be worthwhile to enable combine_stack_adjustments even for such machines. */ else if (n_popped) anti_adjust_stack (GEN_INT (n_popped)); } /* Determine if the function identified by NAME and FNDECL is one with special properties we wish to know about. For example, if the function might return more than one time (setjmp), then set RETURNS_TWICE to a nonzero value. Similarly set LONGJMP for if the function is in the longjmp family. Set MAY_BE_ALLOCA for any memory allocation function that might allocate space from the stack such as alloca. */ static int special_function_p (tree fndecl, int flags) { if (fndecl && DECL_NAME (fndecl) && IDENTIFIER_LENGTH (DECL_NAME (fndecl)) <= 17 /* Exclude functions not at the file scope, or not `extern', since they are not the magic functions we would otherwise think they are. FIXME: this should be handled with attributes, not with this hacky imitation of DECL_ASSEMBLER_NAME. It's (also) wrong because you can declare fork() inside a function if you wish. */ && (DECL_CONTEXT (fndecl) == NULL_TREE || TREE_CODE (DECL_CONTEXT (fndecl)) == TRANSLATION_UNIT_DECL) && TREE_PUBLIC (fndecl)) { const char *name = IDENTIFIER_POINTER (DECL_NAME (fndecl)); const char *tname = name; /* We assume that alloca will always be called by name. It makes no sense to pass it as a pointer-to-function to anything that does not understand its behavior. */ if (((IDENTIFIER_LENGTH (DECL_NAME (fndecl)) == 6 && name[0] == 'a' && ! strcmp (name, "alloca")) || (IDENTIFIER_LENGTH (DECL_NAME (fndecl)) == 16 && name[0] == '_' && ! strcmp (name, "__builtin_alloca")))) flags |= ECF_MAY_BE_ALLOCA; /* Disregard prefix _, __ or __x. */ if (name[0] == '_') { if (name[1] == '_' && name[2] == 'x') tname += 3; else if (name[1] == '_') tname += 2; else tname += 1; } if (tname[0] == 's') { if ((tname[1] == 'e' && (! strcmp (tname, "setjmp") || ! strcmp (tname, "setjmp_syscall"))) || (tname[1] == 'i' && ! strcmp (tname, "sigsetjmp")) || (tname[1] == 'a' && ! strcmp (tname, "savectx"))) flags |= ECF_RETURNS_TWICE; if (tname[1] == 'i' && ! strcmp (tname, "siglongjmp")) flags |= ECF_LONGJMP; } else if ((tname[0] == 'q' && tname[1] == 's' && ! strcmp (tname, "qsetjmp")) || (tname[0] == 'v' && tname[1] == 'f' && ! strcmp (tname, "vfork"))) flags |= ECF_RETURNS_TWICE; else if (tname[0] == 'l' && tname[1] == 'o' && ! strcmp (tname, "longjmp")) flags |= ECF_LONGJMP; } return flags; } /* Return nonzero when tree represent call to longjmp. */ int setjmp_call_p (tree fndecl) { return special_function_p (fndecl, 0) & ECF_RETURNS_TWICE; } /* Return true when exp contains alloca call. */ bool alloca_call_p (tree exp) { if (TREE_CODE (exp) == CALL_EXPR && TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR && (TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)) == FUNCTION_DECL) && (special_function_p (TREE_OPERAND (TREE_OPERAND (exp, 0), 0), 0) & ECF_MAY_BE_ALLOCA)) return true; return false; } /* Detect flags (function attributes) from the function decl or type node. */ int flags_from_decl_or_type (tree exp) { int flags = 0; tree type = exp; if (DECL_P (exp)) { struct cgraph_rtl_info *i = cgraph_rtl_info (exp); type = TREE_TYPE (exp); if (i) { if (i->pure_function) flags |= ECF_PURE | ECF_LIBCALL_BLOCK; if (i->const_function) flags |= ECF_CONST | ECF_LIBCALL_BLOCK; } /* The function exp may have the `malloc' attribute. */ if (DECL_IS_MALLOC (exp)) flags |= ECF_MALLOC; /* The function exp may have the `pure' attribute. */ if (DECL_IS_PURE (exp)) flags |= ECF_PURE | ECF_LIBCALL_BLOCK; if (TREE_NOTHROW (exp)) flags |= ECF_NOTHROW; if (TREE_READONLY (exp) && ! TREE_THIS_VOLATILE (exp)) flags |= ECF_LIBCALL_BLOCK | ECF_CONST; flags = special_function_p (exp, flags); } else if (TYPE_P (exp) && TYPE_READONLY (exp) && ! TREE_THIS_VOLATILE (exp)) flags |= ECF_CONST; if (TREE_THIS_VOLATILE (exp)) flags |= ECF_NORETURN; /* Mark if the function returns with the stack pointer depressed. We cannot consider it pure or constant in that case. */ if (TREE_CODE (type) == FUNCTION_TYPE && TYPE_RETURNS_STACK_DEPRESSED (type)) { flags |= ECF_SP_DEPRESSED; flags &= ~(ECF_PURE | ECF_CONST | ECF_LIBCALL_BLOCK); } return flags; } /* Detect flags from a CALL_EXPR. */ int call_expr_flags (tree t) { int flags; tree decl = get_callee_fndecl (t); if (decl) flags = flags_from_decl_or_type (decl); else { t = TREE_TYPE (TREE_OPERAND (t, 0)); if (t && TREE_CODE (t) == POINTER_TYPE) flags = flags_from_decl_or_type (TREE_TYPE (t)); else flags = 0; } return flags; } /* Precompute all register parameters as described by ARGS, storing values into fields within the ARGS array. NUM_ACTUALS indicates the total number elements in the ARGS array. Set REG_PARM_SEEN if we encounter a register parameter. */ static void precompute_register_parameters (int num_actuals, struct arg_data *args, int *reg_parm_seen) { int i; *reg_parm_seen = 0; for (i = 0; i < num_actuals; i++) if (args[i].reg != 0 && ! args[i].pass_on_stack) { *reg_parm_seen = 1; if (args[i].value == 0) { push_temp_slots (); args[i].value = expand_expr (args[i].tree_value, NULL_RTX, VOIDmode, 0); preserve_temp_slots (args[i].value); pop_temp_slots (); /* ANSI doesn't require a sequence point here, but PCC has one, so this will avoid some problems. */ emit_queue (); } /* If the value is a non-legitimate constant, force it into a pseudo now. TLS symbols sometimes need a call to resolve. */ if (CONSTANT_P (args[i].value) && !LEGITIMATE_CONSTANT_P (args[i].value)) args[i].value = force_reg (args[i].mode, args[i].value); /* If we are to promote the function arg to a wider mode, do it now. */ if (args[i].mode != TYPE_MODE (TREE_TYPE (args[i].tree_value))) args[i].value = convert_modes (args[i].mode, TYPE_MODE (TREE_TYPE (args[i].tree_value)), args[i].value, args[i].unsignedp); /* If the value is expensive, and we are inside an appropriately short loop, put the value into a pseudo and then put the pseudo into the hard reg. For small register classes, also do this if this call uses register parameters. This is to avoid reload conflicts while loading the parameters registers. */ if ((! (REG_P (args[i].value) || (GET_CODE (args[i].value) == SUBREG && REG_P (SUBREG_REG (args[i].value))))) && args[i].mode != BLKmode && rtx_cost (args[i].value, SET) > COSTS_N_INSNS (1) && ((SMALL_REGISTER_CLASSES && *reg_parm_seen) || preserve_subexpressions_p ())) args[i].value = copy_to_mode_reg (args[i].mode, args[i].value); } } #ifdef REG_PARM_STACK_SPACE /* The argument list is the property of the called routine and it may clobber it. If the fixed area has been used for previous parameters, we must save and restore it. */ static rtx save_fixed_argument_area (int reg_parm_stack_space, rtx argblock, int *low_to_save, int *high_to_save) { int low; int high; /* Compute the boundary of the area that needs to be saved, if any. */ high = reg_parm_stack_space; #ifdef ARGS_GROW_DOWNWARD high += 1; #endif if (high > highest_outgoing_arg_in_use) high = highest_outgoing_arg_in_use; for (low = 0; low < high; low++) if (stack_usage_map[low] != 0) { int num_to_save; enum machine_mode save_mode; int delta; rtx stack_area; rtx save_area; while (stack_usage_map[--high] == 0) ; *low_to_save = low; *high_to_save = high; num_to_save = high - low + 1; save_mode = mode_for_size (num_to_save * BITS_PER_UNIT, MODE_INT, 1); /* If we don't have the required alignment, must do this in BLKmode. */ if ((low & (MIN (GET_MODE_SIZE (save_mode), BIGGEST_ALIGNMENT / UNITS_PER_WORD) - 1))) save_mode = BLKmode; #ifdef ARGS_GROW_DOWNWARD delta = -high; #else delta = low; #endif stack_area = gen_rtx_MEM (save_mode, memory_address (save_mode, plus_constant (argblock, delta))); set_mem_align (stack_area, PARM_BOUNDARY); if (save_mode == BLKmode) { save_area = assign_stack_temp (BLKmode, num_to_save, 0); emit_block_move (validize_mem (save_area), stack_area, GEN_INT (num_to_save), BLOCK_OP_CALL_PARM); } else { save_area = gen_reg_rtx (save_mode); emit_move_insn (save_area, stack_area); } return save_area; } return NULL_RTX; } static void restore_fixed_argument_area (rtx save_area, rtx argblock, int high_to_save, int low_to_save) { enum machine_mode save_mode = GET_MODE (save_area); int delta; rtx stack_area; #ifdef ARGS_GROW_DOWNWARD delta = -high_to_save; #else delta = low_to_save; #endif stack_area = gen_rtx_MEM (save_mode, memory_address (save_mode, plus_constant (argblock, delta))); set_mem_align (stack_area, PARM_BOUNDARY); if (save_mode != BLKmode) emit_move_insn (stack_area, save_area); else emit_block_move (stack_area, validize_mem (save_area), GEN_INT (high_to_save - low_to_save + 1), BLOCK_OP_CALL_PARM); } #endif /* REG_PARM_STACK_SPACE */ /* If any elements in ARGS refer to parameters that are to be passed in registers, but not in memory, and whose alignment does not permit a direct copy into registers. Copy the values into a group of pseudos which we will later copy into the appropriate hard registers. Pseudos for each unaligned argument will be stored into the array args[argnum].aligned_regs. The caller is responsible for deallocating the aligned_regs array if it is nonzero. */ static void store_unaligned_arguments_into_pseudos (struct arg_data *args, int num_actuals) { int i, j; for (i = 0; i < num_actuals; i++) if (args[i].reg != 0 && ! args[i].pass_on_stack && args[i].mode == BLKmode && (TYPE_ALIGN (TREE_TYPE (args[i].tree_value)) < (unsigned int) MIN (BIGGEST_ALIGNMENT, BITS_PER_WORD))) { int bytes = int_size_in_bytes (TREE_TYPE (args[i].tree_value)); int nregs = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD; int endian_correction = 0; args[i].n_aligned_regs = args[i].partial ? args[i].partial : nregs; args[i].aligned_regs = xmalloc (sizeof (rtx) * args[i].n_aligned_regs); /* Structures smaller than a word are normally aligned to the least significant byte. On a BYTES_BIG_ENDIAN machine, this means we must skip the empty high order bytes when calculating the bit offset. */ if (bytes < UNITS_PER_WORD #ifdef BLOCK_REG_PADDING && (BLOCK_REG_PADDING (args[i].mode, TREE_TYPE (args[i].tree_value), 1) == downward) #else && BYTES_BIG_ENDIAN #endif ) endian_correction = BITS_PER_WORD - bytes * BITS_PER_UNIT; for (j = 0; j < args[i].n_aligned_regs; j++) { rtx reg = gen_reg_rtx (word_mode); rtx word = operand_subword_force (args[i].value, j, BLKmode); int bitsize = MIN (bytes * BITS_PER_UNIT, BITS_PER_WORD); args[i].aligned_regs[j] = reg; word = extract_bit_field (word, bitsize, 0, 1, NULL_RTX, word_mode, word_mode, BITS_PER_WORD); /* There is no need to restrict this code to loading items in TYPE_ALIGN sized hunks. The bitfield instructions can load up entire word sized registers efficiently. ??? This may not be needed anymore. We use to emit a clobber here but that doesn't let later passes optimize the instructions we emit. By storing 0 into the register later passes know the first AND to zero out the bitfield being set in the register is unnecessary. The store of 0 will be deleted as will at least the first AND. */ emit_move_insn (reg, const0_rtx); bytes -= bitsize / BITS_PER_UNIT; store_bit_field (reg, bitsize, endian_correction, word_mode, word, BITS_PER_WORD); } } } /* Fill in ARGS_SIZE and ARGS array based on the parameters found in ACTPARMS. NUM_ACTUALS is the total number of parameters. N_NAMED_ARGS is the total number of named arguments. FNDECL is the tree code for the target of this call (if known) ARGS_SO_FAR holds state needed by the target to know where to place the next argument. REG_PARM_STACK_SPACE is the number of bytes of stack space reserved for arguments which are passed in registers. OLD_STACK_LEVEL is a pointer to an rtx which olds the old stack level and may be modified by this routine. OLD_PENDING_ADJ, MUST_PREALLOCATE and FLAGS are pointers to integer flags which may may be modified by this routine. MAY_TAILCALL is cleared if we encounter an invisible pass-by-reference that requires allocation of stack space. CALL_FROM_THUNK_P is true if this call is the jump from a thunk to the thunked-to function. */ static void initialize_argument_information (int num_actuals ATTRIBUTE_UNUSED, struct arg_data *args, struct args_size *args_size, int n_named_args ATTRIBUTE_UNUSED, tree actparms, tree fndecl, CUMULATIVE_ARGS *args_so_far, int reg_parm_stack_space, rtx *old_stack_level, int *old_pending_adj, int *must_preallocate, int *ecf_flags, bool *may_tailcall, bool call_from_thunk_p) { /* 1 if scanning parms front to back, -1 if scanning back to front. */ int inc; /* Count arg position in order args appear. */ int argpos; int i; tree p; args_size->constant = 0; args_size->var = 0; /* In this loop, we consider args in the order they are written. We fill up ARGS from the front or from the back if necessary so that in any case the first arg to be pushed ends up at the front. */ if (PUSH_ARGS_REVERSED) { i = num_actuals - 1, inc = -1; /* In this case, must reverse order of args so that we compute and push the last arg first. */ } else { i = 0, inc = 1; } /* I counts args in order (to be) pushed; ARGPOS counts in order written. */ for (p = actparms, argpos = 0; p; p = TREE_CHAIN (p), i += inc, argpos++) { tree type = TREE_TYPE (TREE_VALUE (p)); int unsignedp; enum machine_mode mode; args[i].tree_value = TREE_VALUE (p); /* Replace erroneous argument with constant zero. */ if (type == error_mark_node || !COMPLETE_TYPE_P (type)) args[i].tree_value = integer_zero_node, type = integer_type_node; /* If TYPE is a transparent union, pass things the way we would pass the first field of the union. We have already verified that the modes are the same. */ if (TREE_CODE (type) == UNION_TYPE && TYPE_TRANSPARENT_UNION (type)) type = TREE_TYPE (TYPE_FIELDS (type)); /* Decide where to pass this arg. args[i].reg is nonzero if all or part is passed in registers. args[i].partial is nonzero if part but not all is passed in registers, and the exact value says how many words are passed in registers. args[i].pass_on_stack is nonzero if the argument must at least be computed on the stack. It may then be loaded back into registers if args[i].reg is nonzero. These decisions are driven by the FUNCTION_... macros and must agree with those made by function.c. */ /* See if this argument should be passed by invisible reference. */ if (CONTAINS_PLACEHOLDER_P (TYPE_SIZE (type)) || TREE_ADDRESSABLE (type) #ifdef FUNCTION_ARG_PASS_BY_REFERENCE || FUNCTION_ARG_PASS_BY_REFERENCE (*args_so_far, TYPE_MODE (type), type, argpos < n_named_args) #endif ) { /* If we're compiling a thunk, pass through invisible references instead of making a copy. */ if (call_from_thunk_p #ifdef FUNCTION_ARG_CALLEE_COPIES || (FUNCTION_ARG_CALLEE_COPIES (*args_so_far, TYPE_MODE (type), type, argpos < n_named_args) /* If it's in a register, we must make a copy of it too. */ /* ??? Is this a sufficient test? Is there a better one? */ && !(TREE_CODE (args[i].tree_value) == VAR_DECL && REG_P (DECL_RTL (args[i].tree_value))) && ! TREE_ADDRESSABLE (type)) #endif ) { /* C++ uses a TARGET_EXPR to indicate that we want to make a new object from the argument. If we are passing by invisible reference, the callee will do that for us, so we can strip off the TARGET_EXPR. This is not always safe, but it is safe in the only case where this is a useful optimization; namely, when the argument is a plain object. In that case, the frontend is just asking the backend to make a bitwise copy of the argument. */ if (TREE_CODE (args[i].tree_value) == TARGET_EXPR && (DECL_P (TREE_OPERAND (args[i].tree_value, 1))) && ! REG_P (DECL_RTL (TREE_OPERAND (args[i].tree_value, 1)))) args[i].tree_value = TREE_OPERAND (args[i].tree_value, 1); /* We can't use sibcalls if a callee-copied argument is stored in the current function's frame. */ if (!call_from_thunk_p && (!DECL_P (args[i].tree_value) || !TREE_STATIC (args[i].tree_value))) *may_tailcall = false; args[i].tree_value = build1 (ADDR_EXPR, build_pointer_type (type), args[i].tree_value); type = build_pointer_type (type); } else if (TREE_CODE (args[i].tree_value) == TARGET_EXPR) { /* In the V3 C++ ABI, parameters are destroyed in the caller. We implement this by passing the address of the temporary rather than expanding it into another allocated slot. */ args[i].tree_value = build1 (ADDR_EXPR, build_pointer_type (type), args[i].tree_value); type = build_pointer_type (type); *may_tailcall = false; } else { /* We make a copy of the object and pass the address to the function being called. */ rtx copy; if (!COMPLETE_TYPE_P (type) || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST || (flag_stack_check && ! STACK_CHECK_BUILTIN && (0 < compare_tree_int (TYPE_SIZE_UNIT (type), STACK_CHECK_MAX_VAR_SIZE)))) { /* This is a variable-sized object. Make space on the stack for it. */ rtx size_rtx = expr_size (TREE_VALUE (p)); if (*old_stack_level == 0) { emit_stack_save (SAVE_BLOCK, old_stack_level, NULL_RTX); *old_pending_adj = pending_stack_adjust; pending_stack_adjust = 0; } copy = gen_rtx_MEM (BLKmode, allocate_dynamic_stack_space (size_rtx, NULL_RTX, TYPE_ALIGN (type))); set_mem_attributes (copy, type, 1); } else copy = assign_temp (type, 0, 1, 0); store_expr (args[i].tree_value, copy, 0); *ecf_flags &= ~(ECF_CONST | ECF_PURE | ECF_LIBCALL_BLOCK); args[i].tree_value = build1 (ADDR_EXPR, build_pointer_type (type), make_tree (type, copy)); type = build_pointer_type (type); *may_tailcall = false; } } mode = TYPE_MODE (type); unsignedp = TYPE_UNSIGNED (type); if (targetm.calls.promote_function_args (fndecl ? TREE_TYPE (fndecl) : 0)) mode = promote_mode (type, mode, &unsignedp, 1); args[i].unsignedp = unsignedp; args[i].mode = mode; args[i].reg = FUNCTION_ARG (*args_so_far, mode, type, argpos < n_named_args); #ifdef FUNCTION_INCOMING_ARG /* If this is a sibling call and the machine has register windows, the register window has to be unwinded before calling the routine, so arguments have to go into the incoming registers. */ args[i].tail_call_reg = FUNCTION_INCOMING_ARG (*args_so_far, mode, type, argpos < n_named_args); #else args[i].tail_call_reg = args[i].reg; #endif #ifdef FUNCTION_ARG_PARTIAL_NREGS if (args[i].reg) args[i].partial = FUNCTION_ARG_PARTIAL_NREGS (*args_so_far, mode, type, argpos < n_named_args); #endif args[i].pass_on_stack = MUST_PASS_IN_STACK (mode, type); /* If FUNCTION_ARG returned a (parallel [(expr_list (nil) ...) ...]), it means that we are to pass this arg in the register(s) designated by the PARALLEL, but also to pass it in the stack. */ if (args[i].reg && GET_CODE (args[i].reg) == PARALLEL && XEXP (XVECEXP (args[i].reg, 0, 0), 0) == 0) args[i].pass_on_stack = 1; /* If this is an addressable type, we must preallocate the stack since we must evaluate the object into its final location. If this is to be passed in both registers and the stack, it is simpler to preallocate. */ if (TREE_ADDRESSABLE (type) || (args[i].pass_on_stack && args[i].reg != 0)) *must_preallocate = 1; /* If this is an addressable type, we cannot pre-evaluate it. Thus, we cannot consider this function call constant. */ if (TREE_ADDRESSABLE (type)) *ecf_flags &= ~ECF_LIBCALL_BLOCK; /* Compute the stack-size of this argument. */ if (args[i].reg == 0 || args[i].partial != 0 || reg_parm_stack_space > 0 || args[i].pass_on_stack) locate_and_pad_parm (mode, type, #ifdef STACK_PARMS_IN_REG_PARM_AREA 1, #else args[i].reg != 0, #endif args[i].pass_on_stack ? 0 : args[i].partial, fndecl, args_size, &args[i].locate); #ifdef BLOCK_REG_PADDING else /* The argument is passed entirely in registers. See at which end it should be padded. */ args[i].locate.where_pad = BLOCK_REG_PADDING (mode, type, int_size_in_bytes (type) <= UNITS_PER_WORD); #endif /* Update ARGS_SIZE, the total stack space for args so far. */ args_size->constant += args[i].locate.size.constant; if (args[i].locate.size.var) ADD_PARM_SIZE (*args_size, args[i].locate.size.var); /* Increment ARGS_SO_FAR, which has info about which arg-registers have been used, etc. */ FUNCTION_ARG_ADVANCE (*args_so_far, TYPE_MODE (type), type, argpos < n_named_args); } } /* Update ARGS_SIZE to contain the total size for the argument block. Return the original constant component of the argument block's size. REG_PARM_STACK_SPACE holds the number of bytes of stack space reserved for arguments passed in registers. */ static int compute_argument_block_size (int reg_parm_stack_space, struct args_size *args_size, int preferred_stack_boundary ATTRIBUTE_UNUSED) { int unadjusted_args_size = args_size->constant; /* For accumulate outgoing args mode we don't need to align, since the frame will be already aligned. Align to STACK_BOUNDARY in order to prevent backends from generating misaligned frame sizes. */ if (ACCUMULATE_OUTGOING_ARGS && preferred_stack_boundary > STACK_BOUNDARY) preferred_stack_boundary = STACK_BOUNDARY; /* Compute the actual size of the argument block required. The variable and constant sizes must be combined, the size may have to be rounded, and there may be a minimum required size. */ if (args_size->var) { args_size->var = ARGS_SIZE_TREE (*args_size); args_size->constant = 0; preferred_stack_boundary /= BITS_PER_UNIT; if (preferred_stack_boundary > 1) { /* We don't handle this case yet. To handle it correctly we have to add the delta, round and subtract the delta. Currently no machine description requires this support. */ if (stack_pointer_delta & (preferred_stack_boundary - 1)) abort (); args_size->var = round_up (args_size->var, preferred_stack_boundary); } if (reg_parm_stack_space > 0) { args_size->var = size_binop (MAX_EXPR, args_size->var, ssize_int (reg_parm_stack_space)); #ifndef OUTGOING_REG_PARM_STACK_SPACE /* The area corresponding to register parameters is not to count in the size of the block we need. So make the adjustment. */ args_size->var = size_binop (MINUS_EXPR, args_size->var, ssize_int (reg_parm_stack_space)); #endif } } else { preferred_stack_boundary /= BITS_PER_UNIT; if (preferred_stack_boundary < 1) preferred_stack_boundary = 1; args_size->constant = (((args_size->constant + stack_pointer_delta + preferred_stack_boundary - 1) / preferred_stack_boundary * preferred_stack_boundary) - stack_pointer_delta); args_size->constant = MAX (args_size->constant, reg_parm_stack_space); #ifndef OUTGOING_REG_PARM_STACK_SPACE args_size->constant -= reg_parm_stack_space; #endif } return unadjusted_args_size; } /* Precompute parameters as needed for a function call. FLAGS is mask of ECF_* constants. NUM_ACTUALS is the number of arguments. ARGS is an array containing information for each argument; this routine fills in the INITIAL_VALUE and VALUE fields for each precomputed argument. */ static void precompute_arguments (int flags, int num_actuals, struct arg_data *args) { int i; /* If this is a libcall, then precompute all arguments so that we do not get extraneous instructions emitted as part of the libcall sequence. */ if ((flags & ECF_LIBCALL_BLOCK) == 0) return; for (i = 0; i < num_actuals; i++) { enum machine_mode mode; /* If this is an addressable type, we cannot pre-evaluate it. */ if (TREE_ADDRESSABLE (TREE_TYPE (args[i].tree_value))) abort (); args[i].value = expand_expr (args[i].tree_value, NULL_RTX, VOIDmode, 0); /* ANSI doesn't require a sequence point here, but PCC has one, so this will avoid some problems. */ emit_queue (); args[i].initial_value = args[i].value = protect_from_queue (args[i].value, 0); mode = TYPE_MODE (TREE_TYPE (args[i].tree_value)); if (mode != args[i].mode) { args[i].value = convert_modes (args[i].mode, mode, args[i].value, args[i].unsignedp); #if defined(PROMOTE_FUNCTION_MODE) && !defined(PROMOTE_MODE) /* CSE will replace this only if it contains args[i].value pseudo, so convert it down to the declared mode using a SUBREG. */ if (REG_P (args[i].value) && GET_MODE_CLASS (args[i].mode) == MODE_INT) { args[i].initial_value = gen_lowpart_SUBREG (mode, args[i].value); SUBREG_PROMOTED_VAR_P (args[i].initial_value) = 1; SUBREG_PROMOTED_UNSIGNED_SET (args[i].initial_value, args[i].unsignedp); } #endif } } } /* Given the current state of MUST_PREALLOCATE and information about arguments to a function call in NUM_ACTUALS, ARGS and ARGS_SIZE, compute and return the final value for MUST_PREALLOCATE. */ static int finalize_must_preallocate (int must_preallocate, int num_actuals, struct arg_data *args, struct args_size *args_size) { /* See if we have or want to preallocate stack space. If we would have to push a partially-in-regs parm before other stack parms, preallocate stack space instead. If the size of some parm is not a multiple of the required stack alignment, we must preallocate. If the total size of arguments that would otherwise create a copy in a temporary (such as a CALL) is more than half the total argument list size, preallocation is faster. Another reason to preallocate is if we have a machine (like the m88k) where stack alignment is required to be maintained between every pair of insns, not just when the call is made. However, we assume here that such machines either do not have push insns (and hence preallocation would occur anyway) or the problem is taken care of with PUSH_ROUNDING. */ if (! must_preallocate) { int partial_seen = 0; int copy_to_evaluate_size = 0; int i; for (i = 0; i < num_actuals && ! must_preallocate; i++) { if (args[i].partial > 0 && ! args[i].pass_on_stack) partial_seen = 1; else if (partial_seen && args[i].reg == 0) must_preallocate = 1; if (TYPE_MODE (TREE_TYPE (args[i].tree_value)) == BLKmode && (TREE_CODE (args[i].tree_value) == CALL_EXPR || TREE_CODE (args[i].tree_value) == TARGET_EXPR || TREE_CODE (args[i].tree_value) == COND_EXPR || TREE_ADDRESSABLE (TREE_TYPE (args[i].tree_value)))) copy_to_evaluate_size += int_size_in_bytes (TREE_TYPE (args[i].tree_value)); } if (copy_to_evaluate_size * 2 >= args_size->constant && args_size->constant > 0) must_preallocate = 1; } return must_preallocate; } /* If we preallocated stack space, compute the address of each argument and store it into the ARGS array. We need not ensure it is a valid memory address here; it will be validized when it is used. ARGBLOCK is an rtx for the address of the outgoing arguments. */ static void compute_argument_addresses (struct arg_data *args, rtx argblock, int num_actuals) { if (argblock) { rtx arg_reg = argblock; int i, arg_offset = 0; if (GET_CODE (argblock) == PLUS) arg_reg = XEXP (argblock, 0), arg_offset = INTVAL (XEXP (argblock, 1)); for (i = 0; i < num_actuals; i++) { rtx offset = ARGS_SIZE_RTX (args[i].locate.offset); rtx slot_offset = ARGS_SIZE_RTX (args[i].locate.slot_offset); rtx addr; /* Skip this parm if it will not be passed on the stack. */ if (! args[i].pass_on_stack && args[i].reg != 0) continue; if (GET_CODE (offset) == CONST_INT) addr = plus_constant (arg_reg, INTVAL (offset)); else addr = gen_rtx_PLUS (Pmode, arg_reg, offset); addr = plus_constant (addr, arg_offset); args[i].stack = gen_rtx_MEM (args[i].mode, addr); set_mem_align (args[i].stack, PARM_BOUNDARY); set_mem_attributes (args[i].stack, TREE_TYPE (args[i].tree_value), 1); if (GET_CODE (slot_offset) == CONST_INT) addr = plus_constant (arg_reg, INTVAL (slot_offset)); else addr = gen_rtx_PLUS (Pmode, arg_reg, slot_offset); addr = plus_constant (addr, arg_offset); args[i].stack_slot = gen_rtx_MEM (args[i].mode, addr); set_mem_align (args[i].stack_slot, PARM_BOUNDARY); set_mem_attributes (args[i].stack_slot, TREE_TYPE (args[i].tree_value), 1); /* Function incoming arguments may overlap with sibling call outgoing arguments and we cannot allow reordering of reads from function arguments with stores to outgoing arguments of sibling calls. */ set_mem_alias_set (args[i].stack, 0); set_mem_alias_set (args[i].stack_slot, 0); } } } /* Given a FNDECL and EXP, return an rtx suitable for use as a target address in a call instruction. FNDECL is the tree node for the target function. For an indirect call FNDECL will be NULL_TREE. ADDR is the operand 0 of CALL_EXPR for this call. */ static rtx rtx_for_function_call (tree fndecl, tree addr) { rtx funexp; /* Get the function to call, in the form of RTL. */ if (fndecl) { /* If this is the first use of the function, see if we need to make an external definition for it. */ if (! TREE_USED (fndecl)) { assemble_external (fndecl); TREE_USED (fndecl) = 1; } /* Get a SYMBOL_REF rtx for the function address. */ funexp = XEXP (DECL_RTL (fndecl), 0); } else /* Generate an rtx (probably a pseudo-register) for the address. */ { push_temp_slots (); funexp = expand_expr (addr, NULL_RTX, VOIDmode, 0); pop_temp_slots (); /* FUNEXP can't be BLKmode. */ emit_queue (); } return funexp; } /* Do the register loads required for any wholly-register parms or any parms which are passed both on the stack and in a register. Their expressions were already evaluated. Mark all register-parms as living through the call, putting these USE insns in the CALL_INSN_FUNCTION_USAGE field. When IS_SIBCALL, perform the check_sibcall_overlap_argument_overlap checking, setting *SIBCALL_FAILURE if appropriate. */ static void load_register_parameters (struct arg_data *args, int num_actuals, rtx *call_fusage, int flags, int is_sibcall, int *sibcall_failure) { int i, j; for (i = 0; i < num_actuals; i++) { rtx reg = ((flags & ECF_SIBCALL) ? args[i].tail_call_reg : args[i].reg); if (reg) { int partial = args[i].partial; int nregs; int size = 0; rtx before_arg = get_last_insn (); /* Set to non-negative if must move a word at a time, even if just one word (e.g, partial == 1 && mode == DFmode). Set to -1 if we just use a normal move insn. This value can be zero if the argument is a zero size structure with no fields. */ nregs = -1; if (partial) nregs = partial; else if (TYPE_MODE (TREE_TYPE (args[i].tree_value)) == BLKmode) { size = int_size_in_bytes (TREE_TYPE (args[i].tree_value)); nregs = (size + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD; } else size = GET_MODE_SIZE (args[i].mode); /* Handle calls that pass values in multiple non-contiguous locations. The Irix 6 ABI has examples of this. */ if (GET_CODE (reg) == PARALLEL) { tree type = TREE_TYPE (args[i].tree_value); emit_group_load (reg, args[i].value, type, int_size_in_bytes (type)); } /* If simple case, just do move. If normal partial, store_one_arg has already loaded the register for us. In all other cases, load the register(s) from memory. */ else if (nregs == -1) { emit_move_insn (reg, args[i].value); #ifdef BLOCK_REG_PADDING /* Handle case where we have a value that needs shifting up to the msb. eg. a QImode value and we're padding upward on a BYTES_BIG_ENDIAN machine. */ if (size < UNITS_PER_WORD && (args[i].locate.where_pad == (BYTES_BIG_ENDIAN ? upward : downward))) { rtx x; int shift = (UNITS_PER_WORD - size) * BITS_PER_UNIT; /* Assigning REG here rather than a temp makes CALL_FUSAGE report the whole reg as used. Strictly speaking, the call only uses SIZE bytes at the msb end, but it doesn't seem worth generating rtl to say that. */ reg = gen_rtx_REG (word_mode, REGNO (reg)); x = expand_shift (LSHIFT_EXPR, word_mode, reg, build_int_2 (shift, 0), reg, 1); if (x != reg) emit_move_insn (reg, x); } #endif } /* If we have pre-computed the values to put in the registers in the case of non-aligned structures, copy them in now. */ else if (args[i].n_aligned_regs != 0) for (j = 0; j < args[i].n_aligned_regs; j++) emit_move_insn (gen_rtx_REG (word_mode, REGNO (reg) + j), args[i].aligned_regs[j]); else if (partial == 0 || args[i].pass_on_stack) { rtx mem = validize_mem (args[i].value); /* Handle a BLKmode that needs shifting. */ if (nregs == 1 && size < UNITS_PER_WORD #ifdef BLOCK_REG_PADDING && args[i].locate.where_pad == downward #else && BYTES_BIG_ENDIAN #endif ) { rtx tem = operand_subword_force (mem, 0, args[i].mode); rtx ri = gen_rtx_REG (word_mode, REGNO (reg)); rtx x = gen_reg_rtx (word_mode); int shift = (UNITS_PER_WORD - size) * BITS_PER_UNIT; enum tree_code dir = BYTES_BIG_ENDIAN ? RSHIFT_EXPR : LSHIFT_EXPR; emit_move_insn (x, tem); x = expand_shift (dir, word_mode, x, build_int_2 (shift, 0), ri, 1); if (x != ri) emit_move_insn (ri, x); } else move_block_to_reg (REGNO (reg), mem, nregs, args[i].mode); } /* When a parameter is a block, and perhaps in other cases, it is possible that it did a load from an argument slot that was already clobbered. */ if (is_sibcall && check_sibcall_argument_overlap (before_arg, &args[i], 0)) *sibcall_failure = 1; /* Handle calls that pass values in multiple non-contiguous locations. The Irix 6 ABI has examples of this. */ if (GET_CODE (reg) == PARALLEL) use_group_regs (call_fusage, reg); else if (nregs == -1) use_reg (call_fusage, reg); else use_regs (call_fusage, REGNO (reg), nregs == 0 ? 1 : nregs); } } } /* We need to pop PENDING_STACK_ADJUST bytes. But, if the arguments wouldn't fill up an even multiple of PREFERRED_UNIT_STACK_BOUNDARY bytes, then we would need to push some additional bytes to pad the arguments. So, we compute an adjust to the stack pointer for an amount that will leave the stack under-aligned by UNADJUSTED_ARGS_SIZE bytes. Then, when the arguments are pushed the stack will be perfectly aligned. ARGS_SIZE->CONSTANT is set to the number of bytes that should be popped after the call. Returns the adjustment. */ static int combine_pending_stack_adjustment_and_call (int unadjusted_args_size, struct args_size *args_size, int preferred_unit_stack_boundary) { /* The number of bytes to pop so that the stack will be under-aligned by UNADJUSTED_ARGS_SIZE bytes. */ HOST_WIDE_INT adjustment; /* The alignment of the stack after the arguments are pushed, if we just pushed the arguments without adjust the stack here. */ HOST_WIDE_INT unadjusted_alignment; unadjusted_alignment = ((stack_pointer_delta + unadjusted_args_size) % preferred_unit_stack_boundary); /* We want to get rid of as many of the PENDING_STACK_ADJUST bytes as possible -- leaving just enough left to cancel out the UNADJUSTED_ALIGNMENT. In other words, we want to ensure that the PENDING_STACK_ADJUST is non-negative, and congruent to -UNADJUSTED_ALIGNMENT modulo the PREFERRED_UNIT_STACK_BOUNDARY. */ /* Begin by trying to pop all the bytes. */ unadjusted_alignment = (unadjusted_alignment - (pending_stack_adjust % preferred_unit_stack_boundary)); adjustment = pending_stack_adjust; /* Push enough additional bytes that the stack will be aligned after the arguments are pushed. */ if (preferred_unit_stack_boundary > 1) { if (unadjusted_alignment > 0) adjustment -= preferred_unit_stack_boundary - unadjusted_alignment; else adjustment += unadjusted_alignment; } /* Now, sets ARGS_SIZE->CONSTANT so that we pop the right number of bytes after the call. The right number is the entire PENDING_STACK_ADJUST less our ADJUSTMENT plus the amount required by the arguments in the first place. */ args_size->constant = pending_stack_adjust - adjustment + unadjusted_args_size; return adjustment; } /* Scan X expression if it does not dereference any argument slots we already clobbered by tail call arguments (as noted in stored_args_map bitmap). Return nonzero if X expression dereferences such argument slots, zero otherwise. */ static int check_sibcall_argument_overlap_1 (rtx x) { RTX_CODE code; int i, j; unsigned int k; const char *fmt; if (x == NULL_RTX) return 0; code = GET_CODE (x); if (code == MEM) { if (XEXP (x, 0) == current_function_internal_arg_pointer) i = 0; else if (GET_CODE (XEXP (x, 0)) == PLUS && XEXP (XEXP (x, 0), 0) == current_function_internal_arg_pointer && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT) i = INTVAL (XEXP (XEXP (x, 0), 1)); else return 0; #ifdef ARGS_GROW_DOWNWARD i = -i - GET_MODE_SIZE (GET_MODE (x)); #endif for (k = 0; k < GET_MODE_SIZE (GET_MODE (x)); k++) if (i + k < stored_args_map->n_bits && TEST_BIT (stored_args_map, i + k)) return 1; return 0; } /* Scan all subexpressions. */ fmt = GET_RTX_FORMAT (code); for (i = 0; i < GET_RTX_LENGTH (code); i++, fmt++) { if (*fmt == 'e') { if (check_sibcall_argument_overlap_1 (XEXP (x, i))) return 1; } else if (*fmt == 'E') { for (j = 0; j < XVECLEN (x, i); j++) if (check_sibcall_argument_overlap_1 (XVECEXP (x, i, j))) return 1; } } return 0; } /* Scan sequence after INSN if it does not dereference any argument slots we already clobbered by tail call arguments (as noted in stored_args_map bitmap). If MARK_STORED_ARGS_MAP, add stack slots for ARG to stored_args_map bitmap afterwards (when ARG is a register MARK_STORED_ARGS_MAP should be 0). Return nonzero if sequence after INSN dereferences such argument slots, zero otherwise. */ static int check_sibcall_argument_overlap (rtx insn, struct arg_data *arg, int mark_stored_args_map) { int low, high; if (insn == NULL_RTX) insn = get_insns (); else insn = NEXT_INSN (insn); for (; insn; insn = NEXT_INSN (insn)) if (INSN_P (insn) && check_sibcall_argument_overlap_1 (PATTERN (insn))) break; if (mark_stored_args_map) { #ifdef ARGS_GROW_DOWNWARD low = -arg->locate.slot_offset.constant - arg->locate.size.constant; #else low = arg->locate.slot_offset.constant; #endif for (high = low + arg->locate.size.constant; low < high; low++) SET_BIT (stored_args_map, low); } return insn != NULL_RTX; } static tree fix_unsafe_tree (tree t) { switch (unsafe_for_reeval (t)) { case 0: /* Safe. */ break; case 1: /* Mildly unsafe. */ t = unsave_expr (t); break; case 2: /* Wildly unsafe. */ { tree var = build_decl (VAR_DECL, NULL_TREE, TREE_TYPE (t)); SET_DECL_RTL (var, expand_expr (t, NULL_RTX, VOIDmode, EXPAND_NORMAL)); t = var; } break; default: abort (); } return t; } /* If function value *VALUE was returned at the most significant end of a register, shift it towards the least significant end and convert it to TYPE's mode. Return true and update *VALUE if some action was needed. TYPE is the type of the function's return value, which is known not to have mode BLKmode. */ static bool shift_returned_value (tree type, rtx *value) { if (targetm.calls.return_in_msb (type)) { HOST_WIDE_INT shift; shift = (GET_MODE_BITSIZE (GET_MODE (*value)) - BITS_PER_UNIT * int_size_in_bytes (type)); if (shift > 0) { /* Shift the value into the low part of the register. */ *value = expand_binop (GET_MODE (*value), lshr_optab, *value, GEN_INT (shift), 0, 1, OPTAB_WIDEN); /* Truncate it to the type's mode, or its integer equivalent. This is subject to TRULY_NOOP_TRUNCATION. */ *value = convert_to_mode (int_mode_for_mode (TYPE_MODE (type)), *value, 0); /* Now convert it to the final form. */ *value = gen_lowpart (TYPE_MODE (type), *value); return true; } } return false; } /* Remove all REG_EQUIV notes found in the insn chain. */ static void purge_reg_equiv_notes (void) { rtx insn; for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) { while (1) { rtx note = find_reg_note (insn, REG_EQUIV, 0); if (note) { /* Remove the note and keep looking at the notes for this insn. */ remove_note (insn, note); continue; } break; } } } /* Clear RTX_UNCHANGING_P flag of incoming argument MEMs. */ static void purge_mem_unchanging_flag (rtx x) { RTX_CODE code; int i, j; const char *fmt; if (x == NULL_RTX) return; code = GET_CODE (x); if (code == MEM) { if (RTX_UNCHANGING_P (x) && (XEXP (x, 0) == current_function_internal_arg_pointer || (GET_CODE (XEXP (x, 0)) == PLUS && XEXP (XEXP (x, 0), 0) == current_function_internal_arg_pointer && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))) RTX_UNCHANGING_P (x) = 0; return; } /* Scan all subexpressions. */ fmt = GET_RTX_FORMAT (code); for (i = 0; i < GET_RTX_LENGTH (code); i++, fmt++) { if (*fmt == 'e') purge_mem_unchanging_flag (XEXP (x, i)); else if (*fmt == 'E') for (j = 0; j < XVECLEN (x, i); j++) purge_mem_unchanging_flag (XVECEXP (x, i, j)); } } /* Generate all the code for a function call and return an rtx for its value. Store the value in TARGET (specified as an rtx) if convenient. If the value is stored in TARGET then TARGET is returned. If IGNORE is nonzero, then we ignore the value of the function call. */ rtx expand_call (tree exp, rtx target, int ignore) { /* Nonzero if we are currently expanding a call. */ static int currently_expanding_call = 0; /* List of actual parameters. */ tree actparms = TREE_OPERAND (exp, 1); /* RTX for the function to be called. */ rtx funexp; /* Sequence of insns to perform a normal "call". */ rtx normal_call_insns = NULL_RTX; /* Sequence of insns to perform a tail "call". */ rtx tail_call_insns = NULL_RTX; /* Data type of the function. */ tree funtype; tree type_arg_types; /* Declaration of the function being called, or 0 if the function is computed (not known by name). */ tree fndecl = 0; /* The type of the function being called. */ tree fntype; bool try_tail_call = CALL_EXPR_TAILCALL (exp); int pass; /* Register in which non-BLKmode value will be returned, or 0 if no value or if value is BLKmode. */ rtx valreg; /* Address where we should return a BLKmode value; 0 if value not BLKmode. */ rtx structure_value_addr = 0; /* Nonzero if that address is being passed by treating it as an extra, implicit first parameter. Otherwise, it is passed by being copied directly into struct_value_rtx. */ int structure_value_addr_parm = 0; /* Size of aggregate value wanted, or zero if none wanted or if we are using the non-reentrant PCC calling convention or expecting the value in registers. */ HOST_WIDE_INT struct_value_size = 0; /* Nonzero if called function returns an aggregate in memory PCC style, by returning the address of where to find it. */ int pcc_struct_value = 0; rtx struct_value = 0; /* Number of actual parameters in this call, including struct value addr. */ int num_actuals; /* Number of named args. Args after this are anonymous ones and they must all go on the stack. */ int n_named_args; /* Vector of information about each argument. Arguments are numbered in the order they will be pushed, not the order they are written. */ struct arg_data *args; /* Total size in bytes of all the stack-parms scanned so far. */ struct args_size args_size; struct args_size adjusted_args_size; /* Size of arguments before any adjustments (such as rounding). */ int unadjusted_args_size; /* Data on reg parms scanned so far. */ CUMULATIVE_ARGS args_so_far; /* Nonzero if a reg parm has been scanned. */ int reg_parm_seen; /* Nonzero if this is an indirect function call. */ /* Nonzero if we must avoid push-insns in the args for this call. If stack space is allocated for register parameters, but not by the caller, then it is preallocated in the fixed part of the stack frame. So the entire argument block must then be preallocated (i.e., we ignore PUSH_ROUNDING in that case). */ int must_preallocate = !PUSH_ARGS; /* Size of the stack reserved for parameter registers. */ int reg_parm_stack_space = 0; /* Address of space preallocated for stack parms (on machines that lack push insns), or 0 if space not preallocated. */ rtx argblock = 0; /* Mask of ECF_ flags. */ int flags = 0; #ifdef REG_PARM_STACK_SPACE /* Define the boundary of the register parm stack space that needs to be saved, if any. */ int low_to_save, high_to_save; rtx save_area = 0; /* Place that it is saved */ #endif int initial_highest_arg_in_use = highest_outgoing_arg_in_use; char *initial_stack_usage_map = stack_usage_map; int old_stack_allocated; /* State variables to track stack modifications. */ rtx old_stack_level = 0; int old_stack_arg_under_construction = 0; int old_pending_adj = 0; int old_inhibit_defer_pop = inhibit_defer_pop; /* Some stack pointer alterations we make are performed via allocate_dynamic_stack_space. This modifies the stack_pointer_delta, which we then also need to save/restore along the way. */ int old_stack_pointer_delta = 0; rtx call_fusage; tree p = TREE_OPERAND (exp, 0); tree addr = TREE_OPERAND (exp, 0); int i; /* The alignment of the stack, in bits. */ HOST_WIDE_INT preferred_stack_boundary; /* The alignment of the stack, in bytes. */ HOST_WIDE_INT preferred_unit_stack_boundary; /* The static chain value to use for this call. */ rtx static_chain_value; /* See if this is "nothrow" function call. */ if (TREE_NOTHROW (exp)) flags |= ECF_NOTHROW; /* See if we can find a DECL-node for the actual function, and get the function attributes (flags) from the function decl or type node. */ fndecl = get_callee_fndecl (exp); if (fndecl) { fntype = TREE_TYPE (fndecl); flags |= flags_from_decl_or_type (fndecl); } else { fntype = TREE_TYPE (TREE_TYPE (p)); flags |= flags_from_decl_or_type (fntype); } struct_value = targetm.calls.struct_value_rtx (fntype, 0); /* Warn if this value is an aggregate type, regardless of which calling convention we are using for it. */ if (warn_aggregate_return && AGGREGATE_TYPE_P (TREE_TYPE (exp))) warning ("function call has aggregate value"); /* If the result of a pure or const function call is ignored (or void), and none of its arguments are volatile, we can avoid expanding the call and just evaluate the arguments for side-effects. */ if ((flags & (ECF_CONST | ECF_PURE)) && (ignore || target == const0_rtx || TYPE_MODE (TREE_TYPE (exp)) == VOIDmode)) { bool volatilep = false; tree arg; for (arg = actparms; arg; arg = TREE_CHAIN (arg)) if (TREE_THIS_VOLATILE (TREE_VALUE (arg))) { volatilep = true; break; } if (! volatilep) { for (arg = actparms; arg; arg = TREE_CHAIN (arg)) expand_expr (TREE_VALUE (arg), const0_rtx, VOIDmode, EXPAND_NORMAL); return const0_rtx; } } #ifdef REG_PARM_STACK_SPACE reg_parm_stack_space = REG_PARM_STACK_SPACE (fndecl); #endif #ifndef OUTGOING_REG_PARM_STACK_SPACE if (reg_parm_stack_space > 0 && PUSH_ARGS) must_preallocate = 1; #endif /* Set up a place to return a structure. */ /* Cater to broken compilers. */ if (aggregate_value_p (exp, fndecl)) { /* This call returns a big structure. */ flags &= ~(ECF_CONST | ECF_PURE | ECF_LIBCALL_BLOCK); #ifdef PCC_STATIC_STRUCT_RETURN { pcc_struct_value = 1; } #else /* not PCC_STATIC_STRUCT_RETURN */ { struct_value_size = int_size_in_bytes (TREE_TYPE (exp)); if (CALL_EXPR_HAS_RETURN_SLOT_ADDR (exp)) { /* The structure value address arg is already in actparms. Pull it out. It might be nice to just leave it there, but we need to set structure_value_addr. */ tree return_arg = TREE_VALUE (actparms); actparms = TREE_CHAIN (actparms); structure_value_addr = expand_expr (return_arg, NULL_RTX, VOIDmode, EXPAND_NORMAL); } else if (target && MEM_P (target)) structure_value_addr = XEXP (target, 0); else { /* For variable-sized objects, we must be called with a target specified. If we were to allocate space on the stack here, we would have no way of knowing when to free it. */ rtx d = assign_temp (TREE_TYPE (exp), 1, 1, 1); mark_temp_addr_taken (d); structure_value_addr = XEXP (d, 0); target = 0; } } #endif /* not PCC_STATIC_STRUCT_RETURN */ } /* Figure out the amount to which the stack should be aligned. */ preferred_stack_boundary = PREFERRED_STACK_BOUNDARY; if (fndecl) { struct cgraph_rtl_info *i = cgraph_rtl_info (fndecl); if (i && i->preferred_incoming_stack_boundary) preferred_stack_boundary = i->preferred_incoming_stack_boundary; } /* Operand 0 is a pointer-to-function; get the type of the function. */ funtype = TREE_TYPE (addr); if (! POINTER_TYPE_P (funtype)) abort (); funtype = TREE_TYPE (funtype); /* Munge the tree to split complex arguments into their imaginary and real parts. */ if (targetm.calls.split_complex_arg) { type_arg_types = split_complex_types (TYPE_ARG_TYPES (funtype)); actparms = split_complex_values (actparms); } else type_arg_types = TYPE_ARG_TYPES (funtype); if (flags & ECF_MAY_BE_ALLOCA) current_function_calls_alloca = 1; /* If struct_value_rtx is 0, it means pass the address as if it were an extra parameter. */ if (structure_value_addr && struct_value == 0) { /* If structure_value_addr is a REG other than virtual_outgoing_args_rtx, we can use always use it. If it is not a REG, we must always copy it into a register. If it is virtual_outgoing_args_rtx, we must copy it to another register in some cases. */ rtx temp = (!REG_P (structure_value_addr) || (ACCUMULATE_OUTGOING_ARGS && stack_arg_under_construction && structure_value_addr == virtual_outgoing_args_rtx) ? copy_addr_to_reg (convert_memory_address (Pmode, structure_value_addr)) : structure_value_addr); actparms = tree_cons (error_mark_node, make_tree (build_pointer_type (TREE_TYPE (funtype)), temp), actparms); structure_value_addr_parm = 1; } /* Count the arguments and set NUM_ACTUALS. */ for (p = actparms, num_actuals = 0; p; p = TREE_CHAIN (p)) num_actuals++; /* Compute number of named args. First, do a raw count of the args for INIT_CUMULATIVE_ARGS. */ if (type_arg_types != 0) n_named_args = (list_length (type_arg_types) /* Count the struct value address, if it is passed as a parm. */ + structure_value_addr_parm); else /* If we know nothing, treat all args as named. */ n_named_args = num_actuals; /* Start updating where the next arg would go. On some machines (such as the PA) indirect calls have a different calling convention than normal calls. The fourth argument in INIT_CUMULATIVE_ARGS tells the backend if this is an indirect call or not. */ INIT_CUMULATIVE_ARGS (args_so_far, funtype, NULL_RTX, fndecl, n_named_args); /* Now possibly adjust the number of named args. Normally, don't include the last named arg if anonymous args follow. We do include the last named arg if targetm.calls.strict_argument_naming() returns nonzero. (If no anonymous args follow, the result of list_length is actually one too large. This is harmless.) If targetm.calls.pretend_outgoing_varargs_named() returns nonzero, and targetm.calls.strict_argument_naming() returns zero, this machine will be able to place unnamed args that were passed in registers into the stack. So treat all args as named. This allows the insns emitting for a specific argument list to be independent of the function declaration. If targetm.calls.pretend_outgoing_varargs_named() returns zero, we do not have any reliable way to pass unnamed args in registers, so we must force them into memory. */ if (type_arg_types != 0 && targetm.calls.strict_argument_naming (&args_so_far)) ; else if (type_arg_types != 0 && ! targetm.calls.pretend_outgoing_varargs_named (&args_so_far)) /* Don't include the last named arg. */ --n_named_args; else /* Treat all args as named. */ n_named_args = num_actuals; /* Make a vector to hold all the information about each arg. */ args = alloca (num_actuals * sizeof (struct arg_data)); memset (args, 0, num_actuals * sizeof (struct arg_data)); /* Build up entries in the ARGS array, compute the size of the arguments into ARGS_SIZE, etc. */ initialize_argument_information (num_actuals, args, &args_size, n_named_args, actparms, fndecl, &args_so_far, reg_parm_stack_space, &old_stack_level, &old_pending_adj, &must_preallocate, &flags, &try_tail_call, CALL_FROM_THUNK_P (exp)); if (args_size.var) { /* If this function requires a variable-sized argument list, don't try to make a cse'able block for this call. We may be able to do this eventually, but it is too complicated to keep track of what insns go in the cse'able block and which don't. */ flags &= ~ECF_LIBCALL_BLOCK; must_preallocate = 1; } /* Now make final decision about preallocating stack space. */ must_preallocate = finalize_must_preallocate (must_preallocate, num_actuals, args, &args_size); /* If the structure value address will reference the stack pointer, we must stabilize it. We don't need to do this if we know that we are not going to adjust the stack pointer in processing this call. */ if (structure_value_addr && (reg_mentioned_p (virtual_stack_dynamic_rtx, structure_value_addr) || reg_mentioned_p (virtual_outgoing_args_rtx, structure_value_addr)) && (args_size.var || (!ACCUMULATE_OUTGOING_ARGS && args_size.constant))) structure_value_addr = copy_to_reg (structure_value_addr); /* Tail calls can make things harder to debug, and we're traditionally pushed these optimizations into -O2. Don't try if we're already expanding a call, as that means we're an argument. Don't try if there's cleanups, as we know there's code to follow the call. If rtx_equal_function_value_matters is false, that means we've finished with regular parsing. Which means that some of the machinery we use to generate tail-calls is no longer in place. This is most often true of sjlj-exceptions, which we couldn't tail-call to anyway. If current_nesting_level () == 0, we're being called after the function body has been expanded. This can happen when setting up trampolines in expand_function_end. */ if (currently_expanding_call++ != 0 || !flag_optimize_sibling_calls || !rtx_equal_function_value_matters || current_nesting_level () == 0 || any_pending_cleanups () || args_size.var || lookup_stmt_eh_region (exp) >= 0) try_tail_call = 0; /* Rest of purposes for tail call optimizations to fail. */ if ( #ifdef HAVE_sibcall_epilogue !HAVE_sibcall_epilogue #else 1 #endif || !try_tail_call /* Doing sibling call optimization needs some work, since structure_value_addr can be allocated on the stack. It does not seem worth the effort since few optimizable sibling calls will return a structure. */ || structure_value_addr != NULL_RTX /* Check whether the target is able to optimize the call into a sibcall. */ || !targetm.function_ok_for_sibcall (fndecl, exp) /* Functions that do not return exactly once may not be sibcall optimized. */ || (flags & (ECF_RETURNS_TWICE | ECF_LONGJMP | ECF_NORETURN)) || TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (addr))) /* If the called function is nested in the current one, it might access some of the caller's arguments, but could clobber them beforehand if the argument areas are shared. */ || (fndecl && decl_function_context (fndecl) == current_function_decl) /* If this function requires more stack slots than the current function, we cannot change it into a sibling call. */ || args_size.constant > current_function_args_size /* If the callee pops its own arguments, then it must pop exactly the same number of arguments as the current function. */ || (RETURN_POPS_ARGS (fndecl, funtype, args_size.constant) != RETURN_POPS_ARGS (current_function_decl, TREE_TYPE (current_function_decl), current_function_args_size)) || !lang_hooks.decls.ok_for_sibcall (fndecl)) try_tail_call = 0; if (try_tail_call) { int end, inc; actparms = NULL_TREE; /* Ok, we're going to give the tail call the old college try. This means we're going to evaluate the function arguments up to three times. There are two degrees of badness we can encounter, those that can be unsaved and those that can't. (See unsafe_for_reeval commentary for details.) Generate a new argument list. Pass safe arguments through unchanged. For the easy badness wrap them in UNSAVE_EXPRs. For hard badness, evaluate them now and put their resulting rtx in a temporary VAR_DECL. initialize_argument_information has ordered the array for the order to be pushed, and we must remember this when reconstructing the original argument order. */ if (PUSH_ARGS_REVERSED) { inc = 1; i = 0; end = num_actuals; } else { inc = -1; i = num_actuals - 1; end = -1; } for (; i != end; i += inc) { args[i].tree_value = fix_unsafe_tree (args[i].tree_value); } /* Do the same for the function address if it is an expression. */ if (!fndecl) addr = fix_unsafe_tree (addr); /* Expanding one of those dangerous arguments could have added cleanups, but otherwise give it a whirl. */ if (any_pending_cleanups ()) try_tail_call = 0; } /* Ensure current function's preferred stack boundary is at least what we need. We don't have to increase alignment for recursive functions. */ if (cfun->preferred_stack_boundary < preferred_stack_boundary && fndecl != current_function_decl) cfun->preferred_stack_boundary = preferred_stack_boundary; if (fndecl == current_function_decl) cfun->recursive_call_emit = true; preferred_unit_stack_boundary = preferred_stack_boundary / BITS_PER_UNIT; /* We want to make two insn chains; one for a sibling call, the other for a normal call. We will select one of the two chains after initial RTL generation is complete. */ for (pass = try_tail_call ? 0 : 1; pass < 2; pass++) { int sibcall_failure = 0; /* We want to emit any pending stack adjustments before the tail recursion "call". That way we know any adjustment after the tail recursion call can be ignored if we indeed use the tail call expansion. */ int save_pending_stack_adjust = 0; int save_stack_pointer_delta = 0; rtx insns; rtx before_call, next_arg_reg; if (pass == 0) { /* Emit any queued insns now; otherwise they would end up in only one of the alternates. */ emit_queue (); /* State variables we need to save and restore between iterations. */ save_pending_stack_adjust = pending_stack_adjust; save_stack_pointer_delta = stack_pointer_delta; } if (pass) flags &= ~ECF_SIBCALL; else flags |= ECF_SIBCALL; /* Other state variables that we must reinitialize each time through the loop (that are not initialized by the loop itself). */ argblock = 0; call_fusage = 0; /* Start a new sequence for the normal call case. From this point on, if the sibling call fails, we want to set sibcall_failure instead of continuing the loop. */ start_sequence (); if (pass == 0) { /* We know at this point that there are not currently any pending cleanups. If, however, in the process of evaluating the arguments we were to create some, we'll need to be able to get rid of them. */ expand_start_target_temps (); } /* Don't let pending stack adjusts add up to too much. Also, do all pending adjustments now if there is any chance this might be a call to alloca or if we are expanding a sibling call sequence or if we are calling a function that is to return with stack pointer depressed. */ if (pending_stack_adjust >= 32 || (pending_stack_adjust > 0 && (flags & (ECF_MAY_BE_ALLOCA | ECF_SP_DEPRESSED))) || pass == 0) do_pending_stack_adjust (); /* When calling a const function, we must pop the stack args right away, so that the pop is deleted or moved with the call. */ if (pass && (flags & ECF_LIBCALL_BLOCK)) NO_DEFER_POP; /* Precompute any arguments as needed. */ if (pass) precompute_arguments (flags, num_actuals, args); /* Now we are about to start emitting insns that can be deleted if a libcall is deleted. */ if (pass && (flags & (ECF_LIBCALL_BLOCK | ECF_MALLOC))) start_sequence (); adjusted_args_size = args_size; /* Compute the actual size of the argument block required. The variable and constant sizes must be combined, the size may have to be rounded, and there may be a minimum required size. When generating a sibcall pattern, do not round up, since we'll be re-using whatever space our caller provided. */ unadjusted_args_size = compute_argument_block_size (reg_parm_stack_space, &adjusted_args_size, (pass == 0 ? 0 : preferred_stack_boundary)); old_stack_allocated = stack_pointer_delta - pending_stack_adjust; /* The argument block when performing a sibling call is the incoming argument block. */ if (pass == 0) { argblock = virtual_incoming_args_rtx; argblock #ifdef STACK_GROWS_DOWNWARD = plus_constant (argblock, current_function_pretend_args_size); #else = plus_constant (argblock, -current_function_pretend_args_size); #endif stored_args_map = sbitmap_alloc (args_size.constant); sbitmap_zero (stored_args_map); } /* If we have no actual push instructions, or shouldn't use them, make space for all args right now. */ else if (adjusted_args_size.var != 0) { if (old_stack_level == 0) { emit_stack_save (SAVE_BLOCK, &old_stack_level, NULL_RTX); old_stack_pointer_delta = stack_pointer_delta; old_pending_adj = pending_stack_adjust; pending_stack_adjust = 0; /* stack_arg_under_construction says whether a stack arg is being constructed at the old stack level. Pushing the stack gets a clean outgoing argument block. */ old_stack_arg_under_construction = stack_arg_under_construction; stack_arg_under_construction = 0; } argblock = push_block (ARGS_SIZE_RTX (adjusted_args_size), 0, 0); } else { /* Note that we must go through the motions of allocating an argument block even if the size is zero because we may be storing args in the area reserved for register arguments, which may be part of the stack frame. */ int needed = adjusted_args_size.constant; /* Store the maximum argument space used. It will be pushed by the prologue (if ACCUMULATE_OUTGOING_ARGS, or stack overflow checking). */ if (needed > current_function_outgoing_args_size) current_function_outgoing_args_size = needed; if (must_preallocate) { if (ACCUMULATE_OUTGOING_ARGS) { /* Since the stack pointer will never be pushed, it is possible for the evaluation of a parm to clobber something we have already written to the stack. Since most function calls on RISC machines do not use the stack, this is uncommon, but must work correctly. Therefore, we save any area of the stack that was already written and that we are using. Here we set up to do this by making a new stack usage map from the old one. The actual save will be done by store_one_arg. Another approach might be to try to reorder the argument evaluations to avoid this conflicting stack usage. */ #ifndef OUTGOING_REG_PARM_STACK_SPACE /* Since we will be writing into the entire argument area, the map must be allocated for its entire size, not just the part that is the responsibility of the caller. */ needed += reg_parm_stack_space; #endif #ifdef ARGS_GROW_DOWNWARD highest_outgoing_arg_in_use = MAX (initial_highest_arg_in_use, needed + 1); #else highest_outgoing_arg_in_use = MAX (initial_highest_arg_in_use, needed); #endif stack_usage_map = alloca (highest_outgoing_arg_in_use); if (initial_highest_arg_in_use) memcpy (stack_usage_map, initial_stack_usage_map, initial_highest_arg_in_use); if (initial_highest_arg_in_use != highest_outgoing_arg_in_use) memset (&stack_usage_map[initial_highest_arg_in_use], 0, (highest_outgoing_arg_in_use - initial_highest_arg_in_use)); needed = 0; /* The address of the outgoing argument list must not be copied to a register here, because argblock would be left pointing to the wrong place after the call to allocate_dynamic_stack_space below. */ argblock = virtual_outgoing_args_rtx; } else { if (inhibit_defer_pop == 0) { /* Try to reuse some or all of the pending_stack_adjust to get this space. */ needed = (combine_pending_stack_adjustment_and_call (unadjusted_args_size, &adjusted_args_size, preferred_unit_stack_boundary)); /* combine_pending_stack_adjustment_and_call computes an adjustment before the arguments are allocated. Account for them and see whether or not the stack needs to go up or down. */ needed = unadjusted_args_size - needed; if (needed < 0) { /* We're releasing stack space. */ /* ??? We can avoid any adjustment at all if we're already aligned. FIXME. */ pending_stack_adjust = -needed; do_pending_stack_adjust (); needed = 0; } else /* We need to allocate space. We'll do that in push_block below. */ pending_stack_adjust = 0; } /* Special case this because overhead of `push_block' in this case is non-trivial. */ if (needed == 0) argblock = virtual_outgoing_args_rtx; else { argblock = push_block (GEN_INT (needed), 0, 0); #ifdef ARGS_GROW_DOWNWARD argblock = plus_constant (argblock, needed); #endif } /* We only really need to call `copy_to_reg' in the case where push insns are going to be used to pass ARGBLOCK to a function call in ARGS. In that case, the stack pointer changes value from the allocation point to the call point, and hence the value of VIRTUAL_OUTGOING_ARGS_RTX changes as well. But might as well always do it. */ argblock = copy_to_reg (argblock); } } } if (ACCUMULATE_OUTGOING_ARGS) { /* The save/restore code in store_one_arg handles all cases except one: a constructor call (including a C function returning a BLKmode struct) to initialize an argument. */ if (stack_arg_under_construction) { #ifndef OUTGOING_REG_PARM_STACK_SPACE rtx push_size = GEN_INT (reg_parm_stack_space + adjusted_args_size.constant); #else rtx push_size = GEN_INT (adjusted_args_size.constant); #endif if (old_stack_level == 0) { emit_stack_save (SAVE_BLOCK, &old_stack_level, NULL_RTX); old_stack_pointer_delta = stack_pointer_delta; old_pending_adj = pending_stack_adjust; pending_stack_adjust = 0; /* stack_arg_under_construction says whether a stack arg is being constructed at the old stack level. Pushing the stack gets a clean outgoing argument block. */ old_stack_arg_under_construction = stack_arg_under_construction; stack_arg_under_construction = 0; /* Make a new map for the new argument list. */ stack_usage_map = alloca (highest_outgoing_arg_in_use); memset (stack_usage_map, 0, highest_outgoing_arg_in_use); highest_outgoing_arg_in_use = 0; } allocate_dynamic_stack_space (push_size, NULL_RTX, BITS_PER_UNIT); } /* If argument evaluation might modify the stack pointer, copy the address of the argument list to a register. */ for (i = 0; i < num_actuals; i++) if (args[i].pass_on_stack) { argblock = copy_addr_to_reg (argblock); break; } } compute_argument_addresses (args, argblock, num_actuals); /* If we push args individually in reverse order, perform stack alignment before the first push (the last arg). */ if (PUSH_ARGS_REVERSED && argblock == 0 && adjusted_args_size.constant != unadjusted_args_size) { /* When the stack adjustment is pending, we get better code by combining the adjustments. */ if (pending_stack_adjust && ! (flags & ECF_LIBCALL_BLOCK) && ! inhibit_defer_pop) { pending_stack_adjust = (combine_pending_stack_adjustment_and_call (unadjusted_args_size, &adjusted_args_size, preferred_unit_stack_boundary)); do_pending_stack_adjust (); } else if (argblock == 0) anti_adjust_stack (GEN_INT (adjusted_args_size.constant - unadjusted_args_size)); } /* Now that the stack is properly aligned, pops can't safely be deferred during the evaluation of the arguments. */ NO_DEFER_POP; funexp = rtx_for_function_call (fndecl, addr); /* Figure out the register where the value, if any, will come back. */ valreg = 0; if (TYPE_MODE (TREE_TYPE (exp)) != VOIDmode && ! structure_value_addr) { if (pcc_struct_value) valreg = hard_function_value (build_pointer_type (TREE_TYPE (exp)), fndecl, (pass == 0)); else valreg = hard_function_value (TREE_TYPE (exp), fndecl, (pass == 0)); } /* Precompute all register parameters. It isn't safe to compute anything once we have started filling any specific hard regs. */ precompute_register_parameters (num_actuals, args, ®_parm_seen); if (TREE_OPERAND (exp, 2)) static_chain_value = expand_expr (TREE_OPERAND (exp, 2), NULL_RTX, VOIDmode, 0); else static_chain_value = 0; #ifdef REG_PARM_STACK_SPACE /* Save the fixed argument area if it's part of the caller's frame and is clobbered by argument setup for this call. */ if (ACCUMULATE_OUTGOING_ARGS && pass) save_area = save_fixed_argument_area (reg_parm_stack_space, argblock, &low_to_save, &high_to_save); #endif /* Now store (and compute if necessary) all non-register parms. These come before register parms, since they can require block-moves, which could clobber the registers used for register parms. Parms which have partial registers are not stored here, but we do preallocate space here if they want that. */ for (i = 0; i < num_actuals; i++) if (args[i].reg == 0 || args[i].pass_on_stack) { rtx before_arg = get_last_insn (); if (store_one_arg (&args[i], argblock, flags, adjusted_args_size.var != 0, reg_parm_stack_space) || (pass == 0 && check_sibcall_argument_overlap (before_arg, &args[i], 1))) sibcall_failure = 1; if (flags & ECF_CONST && args[i].stack && args[i].value == args[i].stack) call_fusage = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, args[i].value), call_fusage); } /* If we have a parm that is passed in registers but not in memory and whose alignment does not permit a direct copy into registers, make a group of pseudos that correspond to each register that we will later fill. */ if (STRICT_ALIGNMENT) store_unaligned_arguments_into_pseudos (args, num_actuals); /* Now store any partially-in-registers parm. This is the last place a block-move can happen. */ if (reg_parm_seen) for (i = 0; i < num_actuals; i++) if (args[i].partial != 0 && ! args[i].pass_on_stack) { rtx before_arg = get_last_insn (); if (store_one_arg (&args[i], argblock, flags, adjusted_args_size.var != 0, reg_parm_stack_space) || (pass == 0 && check_sibcall_argument_overlap (before_arg, &args[i], 1))) sibcall_failure = 1; } /* If we pushed args in forward order, perform stack alignment after pushing the last arg. */ if (!PUSH_ARGS_REVERSED && argblock == 0) anti_adjust_stack (GEN_INT (adjusted_args_size.constant - unadjusted_args_size)); /* If register arguments require space on the stack and stack space was not preallocated, allocate stack space here for arguments passed in registers. */ #ifdef OUTGOING_REG_PARM_STACK_SPACE if (!ACCUMULATE_OUTGOING_ARGS && must_preallocate == 0 && reg_parm_stack_space > 0) anti_adjust_stack (GEN_INT (reg_parm_stack_space)); #endif /* Pass the function the address in which to return a structure value. */ if (pass != 0 && structure_value_addr && ! structure_value_addr_parm) { structure_value_addr = convert_memory_address (Pmode, structure_value_addr); emit_move_insn (struct_value, force_reg (Pmode, force_operand (structure_value_addr, NULL_RTX))); if (REG_P (struct_value)) use_reg (&call_fusage, struct_value); } funexp = prepare_call_address (funexp, static_chain_value, &call_fusage, reg_parm_seen, pass == 0); load_register_parameters (args, num_actuals, &call_fusage, flags, pass == 0, &sibcall_failure); /* Perform postincrements before actually calling the function. */ emit_queue (); /* Save a pointer to the last insn before the call, so that we can later safely search backwards to find the CALL_INSN. */ before_call = get_last_insn (); /* Set up next argument register. For sibling calls on machines with register windows this should be the incoming register. */ #ifdef FUNCTION_INCOMING_ARG if (pass == 0) next_arg_reg = FUNCTION_INCOMING_ARG (args_so_far, VOIDmode, void_type_node, 1); else #endif next_arg_reg = FUNCTION_ARG (args_so_far, VOIDmode, void_type_node, 1); /* All arguments and registers used for the call must be set up by now! */ /* Stack must be properly aligned now. */ if (pass && stack_pointer_delta % preferred_unit_stack_boundary) abort (); /* Generate the actual call instruction. */ emit_call_1 (funexp, exp, fndecl, funtype, unadjusted_args_size, adjusted_args_size.constant, struct_value_size, next_arg_reg, valreg, old_inhibit_defer_pop, call_fusage, flags, & args_so_far); /* If call is cse'able, make appropriate pair of reg-notes around it. Test valreg so we don't crash; may safely ignore `const' if return type is void. Disable for PARALLEL return values, because we have no way to move such values into a pseudo register. */ if (pass && (flags & ECF_LIBCALL_BLOCK)) { rtx insns; rtx insn; bool failed = valreg == 0 || GET_CODE (valreg) == PARALLEL; insns = get_insns (); /* Expansion of block moves possibly introduced a loop that may not appear inside libcall block. */ for (insn = insns; insn; insn = NEXT_INSN (insn)) if (GET_CODE (insn) == JUMP_INSN) failed = true; if (failed) { end_sequence (); emit_insn (insns); } else { rtx note = 0; rtx temp = gen_reg_rtx (GET_MODE (valreg)); /* Mark the return value as a pointer if needed. */ if (TREE_CODE (TREE_TYPE (exp)) == POINTER_TYPE) mark_reg_pointer (temp, TYPE_ALIGN (TREE_TYPE (TREE_TYPE (exp)))); end_sequence (); if (flag_unsafe_math_optimizations && fndecl && DECL_BUILT_IN (fndecl) && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_SQRT || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_SQRTF || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_SQRTL)) note = gen_rtx_fmt_e (SQRT, GET_MODE (temp), args[0].initial_value); else { /* Construct an "equal form" for the value which mentions all the arguments in order as well as the function name. */ for (i = 0; i < num_actuals; i++) note = gen_rtx_EXPR_LIST (VOIDmode, args[i].initial_value, note); note = gen_rtx_EXPR_LIST (VOIDmode, funexp, note); if (flags & ECF_PURE) note = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode))), note); } emit_libcall_block (insns, temp, valreg, note); valreg = temp; } } else if (pass && (flags & ECF_MALLOC)) { rtx temp = gen_reg_rtx (GET_MODE (valreg)); rtx last, insns; /* The return value from a malloc-like function is a pointer. */ if (TREE_CODE (TREE_TYPE (exp)) == POINTER_TYPE) mark_reg_pointer (temp, BIGGEST_ALIGNMENT); emit_move_insn (temp, valreg); /* The return value from a malloc-like function can not alias anything else. */ last = get_last_insn (); REG_NOTES (last) = gen_rtx_EXPR_LIST (REG_NOALIAS, temp, REG_NOTES (last)); /* Write out the sequence. */ insns = get_insns (); end_sequence (); emit_insn (insns); valreg = temp; } /* For calls to `setjmp', etc., inform flow.c it should complain if nonvolatile values are live. For functions that cannot return, inform flow that control does not fall through. */ if ((flags & (ECF_NORETURN | ECF_LONGJMP)) || pass == 0) { /* The barrier must be emitted immediately after the CALL_INSN. Some ports emit more than just a CALL_INSN above, so we must search for it here. */ rtx last = get_last_insn (); while (GET_CODE (last) != CALL_INSN) { last = PREV_INSN (last); /* There was no CALL_INSN? */ if (last == before_call) abort (); } emit_barrier_after (last); /* Stack adjustments after a noreturn call are dead code. However when NO_DEFER_POP is in effect, we must preserve stack_pointer_delta. */ if (inhibit_defer_pop == 0) { stack_pointer_delta = old_stack_allocated; pending_stack_adjust = 0; } } if (flags & ECF_LONGJMP) current_function_calls_longjmp = 1; /* If value type not void, return an rtx for the value. */ /* If there are cleanups to be called, don't use a hard reg as target. We need to double check this and see if it matters anymore. */ if (any_pending_cleanups ()) { if (target && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER) target = 0; sibcall_failure = 1; } if (TYPE_MODE (TREE_TYPE (exp)) == VOIDmode || ignore) target = const0_rtx; else if (structure_value_addr) { if (target == 0 || !MEM_P (target)) { target = gen_rtx_MEM (TYPE_MODE (TREE_TYPE (exp)), memory_address (TYPE_MODE (TREE_TYPE (exp)), structure_value_addr)); set_mem_attributes (target, exp, 1); } } else if (pcc_struct_value) { /* This is the special C++ case where we need to know what the true target was. We take care to never use this value more than once in one expression. */ target = gen_rtx_MEM (TYPE_MODE (TREE_TYPE (exp)), copy_to_reg (valreg)); set_mem_attributes (target, exp, 1); } /* Handle calls that return values in multiple non-contiguous locations. The Irix 6 ABI has examples of this. */ else if (GET_CODE (valreg) == PARALLEL) { if (target == 0) { /* This will only be assigned once, so it can be readonly. */ tree nt = build_qualified_type (TREE_TYPE (exp), (TYPE_QUALS (TREE_TYPE (exp)) | TYPE_QUAL_CONST)); target = assign_temp (nt, 0, 1, 1); preserve_temp_slots (target); } if (! rtx_equal_p (target, valreg)) emit_group_store (target, valreg, TREE_TYPE (exp), int_size_in_bytes (TREE_TYPE (exp))); /* We can not support sibling calls for this case. */ sibcall_failure = 1; } else if (target && GET_MODE (target) == TYPE_MODE (TREE_TYPE (exp)) && GET_MODE (target) == GET_MODE (valreg)) { /* TARGET and VALREG cannot be equal at this point because the latter would not have REG_FUNCTION_VALUE_P true, while the former would if it were referring to the same register. If they refer to the same register, this move will be a no-op, except when function inlining is being done. */ emit_move_insn (target, valreg); /* If we are setting a MEM, this code must be executed. Since it is emitted after the call insn, sibcall optimization cannot be performed in that case. */ if (MEM_P (target)) sibcall_failure = 1; } else if (TYPE_MODE (TREE_TYPE (exp)) == BLKmode) { target = copy_blkmode_from_reg (target, valreg, TREE_TYPE (exp)); /* We can not support sibling calls for this case. */ sibcall_failure = 1; } else { if (shift_returned_value (TREE_TYPE (exp), &valreg)) sibcall_failure = 1; target = copy_to_reg (valreg); } if (targetm.calls.promote_function_return(funtype)) { /* If we promoted this return value, make the proper SUBREG. TARGET might be const0_rtx here, so be careful. */ if (REG_P (target) && TYPE_MODE (TREE_TYPE (exp)) != BLKmode && GET_MODE (target) != TYPE_MODE (TREE_TYPE (exp))) { tree type = TREE_TYPE (exp); int unsignedp = TYPE_UNSIGNED (type); int offset = 0; /* If we don't promote as expected, something is wrong. */ if (GET_MODE (target) != promote_mode (type, TYPE_MODE (type), &unsignedp, 1)) abort (); if ((WORDS_BIG_ENDIAN || BYTES_BIG_ENDIAN) && GET_MODE_SIZE (GET_MODE (target)) > GET_MODE_SIZE (TYPE_MODE (type))) { offset = GET_MODE_SIZE (GET_MODE (target)) - GET_MODE_SIZE (TYPE_MODE (type)); if (! BYTES_BIG_ENDIAN) offset = (offset / UNITS_PER_WORD) * UNITS_PER_WORD; else if (! WORDS_BIG_ENDIAN) offset %= UNITS_PER_WORD; } target = gen_rtx_SUBREG (TYPE_MODE (type), target, offset); SUBREG_PROMOTED_VAR_P (target) = 1; SUBREG_PROMOTED_UNSIGNED_SET (target, unsignedp); } } /* If size of args is variable or this was a constructor call for a stack argument, restore saved stack-pointer value. */ if (old_stack_level && ! (flags & ECF_SP_DEPRESSED)) { emit_stack_restore (SAVE_BLOCK, old_stack_level, NULL_RTX); stack_pointer_delta = old_stack_pointer_delta; pending_stack_adjust = old_pending_adj; stack_arg_under_construction = old_stack_arg_under_construction; highest_outgoing_arg_in_use = initial_highest_arg_in_use; stack_usage_map = initial_stack_usage_map; sibcall_failure = 1; } else if (ACCUMULATE_OUTGOING_ARGS && pass) { #ifdef REG_PARM_STACK_SPACE if (save_area) restore_fixed_argument_area (save_area, argblock, high_to_save, low_to_save); #endif /* If we saved any argument areas, restore them. */ for (i = 0; i < num_actuals; i++) if (args[i].save_area) { enum machine_mode save_mode = GET_MODE (args[i].save_area); rtx stack_area = gen_rtx_MEM (save_mode, memory_address (save_mode, XEXP (args[i].stack_slot, 0))); if (save_mode != BLKmode) emit_move_insn (stack_area, args[i].save_area); else emit_block_move (stack_area, args[i].save_area, GEN_INT (args[i].locate.size.constant), BLOCK_OP_CALL_PARM); } highest_outgoing_arg_in_use = initial_highest_arg_in_use; stack_usage_map = initial_stack_usage_map; } /* If this was alloca, record the new stack level for nonlocal gotos. Check for the handler slots since we might not have a save area for non-local gotos. */ if ((flags & ECF_MAY_BE_ALLOCA) && cfun->nonlocal_goto_save_area != 0) update_nonlocal_goto_save_area (); /* Free up storage we no longer need. */ for (i = 0; i < num_actuals; ++i) if (args[i].aligned_regs) free (args[i].aligned_regs); if (pass == 0) { /* Undo the fake expand_start_target_temps we did earlier. If there had been any cleanups created, we've already set sibcall_failure. */ expand_end_target_temps (); } /* If this function is returning into a memory location marked as readonly, it means it is initializing that location. We normally treat functions as not clobbering such locations, so we need to specify that this one does. We do this by adding the appropriate CLOBBER to the CALL_INSN function usage list. This cannot be done by emitting a standalone CLOBBER after the call because the latter would be ignored by at least the delay slot scheduling pass. We do this now instead of adding to call_fusage before the call to emit_call_1 because TARGET may be modified in the meantime. */ if (structure_value_addr != 0 && target != 0 && MEM_P (target) && RTX_UNCHANGING_P (target)) add_function_usage_to (last_call_insn (), gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_CLOBBER (VOIDmode, target), NULL_RTX)); insns = get_insns (); end_sequence (); if (pass == 0) { tail_call_insns = insns; /* Restore the pending stack adjustment now that we have finished generating the sibling call sequence. */ pending_stack_adjust = save_pending_stack_adjust; stack_pointer_delta = save_stack_pointer_delta; /* Prepare arg structure for next iteration. */ for (i = 0; i < num_actuals; i++) { args[i].value = 0; args[i].aligned_regs = 0; args[i].stack = 0; } sbitmap_free (stored_args_map); } else { normal_call_insns = insns; /* Verify that we've deallocated all the stack we used. */ if (! (flags & (ECF_NORETURN | ECF_LONGJMP)) && old_stack_allocated != stack_pointer_delta - pending_stack_adjust) abort (); } /* If something prevents making this a sibling call, zero out the sequence. */ if (sibcall_failure) tail_call_insns = NULL_RTX; else break; } /* If tail call production succeeded, we need to remove REG_EQUIV notes on arguments too, as argument area is now clobbered by the call. */ if (tail_call_insns) { emit_insn (tail_call_insns); cfun->tail_call_emit = true; } else emit_insn (normal_call_insns); currently_expanding_call--; /* If this function returns with the stack pointer depressed, ensure this block saves and restores the stack pointer, show it was changed, and adjust for any outgoing arg space. */ if (flags & ECF_SP_DEPRESSED) { clear_pending_stack_adjust (); emit_insn (gen_rtx_CLOBBER (VOIDmode, stack_pointer_rtx)); emit_move_insn (virtual_stack_dynamic_rtx, stack_pointer_rtx); save_stack_pointer (); } return target; } /* A sibling call sequence invalidates any REG_EQUIV notes made for this function's incoming arguments. At the start of RTL generation we know the only REG_EQUIV notes in the rtl chain are those for incoming arguments, so we can safely flush any REG_EQUIV note. This is (slight) overkill. We could keep track of the highest argument we clobber and be more selective in removing notes, but it does not seem to be worth the effort. */ void fixup_tail_calls (void) { rtx insn; tree arg; purge_reg_equiv_notes (); /* A sibling call sequence also may invalidate RTX_UNCHANGING_P flag of some incoming arguments MEM RTLs, because it can write into those slots. We clear all those bits now. This is (slight) overkill, we could keep track of which arguments we actually write into. */ for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) { if (INSN_P (insn)) purge_mem_unchanging_flag (PATTERN (insn)); } /* Similarly, invalidate RTX_UNCHANGING_P for any incoming arguments passed in registers. */ for (arg = DECL_ARGUMENTS (current_function_decl); arg; arg = TREE_CHAIN (arg)) { if (REG_P (DECL_RTL (arg))) RTX_UNCHANGING_P (DECL_RTL (arg)) = false; } } /* Traverse an argument list in VALUES and expand all complex arguments into their components. */ tree split_complex_values (tree values) { tree p; /* Before allocating memory, check for the common case of no complex. */ for (p = values; p; p = TREE_CHAIN (p)) { tree type = TREE_TYPE (TREE_VALUE (p)); if (type && TREE_CODE (type) == COMPLEX_TYPE && targetm.calls.split_complex_arg (type)) goto found; } return values; found: values = copy_list (values); for (p = values; p; p = TREE_CHAIN (p)) { tree complex_value = TREE_VALUE (p); tree complex_type; complex_type = TREE_TYPE (complex_value); if (!complex_type) continue; if (TREE_CODE (complex_type) == COMPLEX_TYPE && targetm.calls.split_complex_arg (complex_type)) { tree subtype; tree real, imag, next; subtype = TREE_TYPE (complex_type); complex_value = save_expr (complex_value); real = build1 (REALPART_EXPR, subtype, complex_value); imag = build1 (IMAGPART_EXPR, subtype, complex_value); TREE_VALUE (p) = real; next = TREE_CHAIN (p); imag = build_tree_list (NULL_TREE, imag); TREE_CHAIN (p) = imag; TREE_CHAIN (imag) = next; /* Skip the newly created node. */ p = TREE_CHAIN (p); } } return values; } /* Traverse a list of TYPES and expand all complex types into their components. */ tree split_complex_types (tree types) { tree p; /* Before allocating memory, check for the common case of no complex. */ for (p = types; p; p = TREE_CHAIN (p)) { tree type = TREE_VALUE (p); if (TREE_CODE (type) == COMPLEX_TYPE && targetm.calls.split_complex_arg (type)) goto found; } return types; found: types = copy_list (types); for (p = types; p; p = TREE_CHAIN (p)) { tree complex_type = TREE_VALUE (p); if (TREE_CODE (complex_type) == COMPLEX_TYPE && targetm.calls.split_complex_arg (complex_type)) { tree next, imag; /* Rewrite complex type with component type. */ TREE_VALUE (p) = TREE_TYPE (complex_type); next = TREE_CHAIN (p); /* Add another component type for the imaginary part. */ imag = build_tree_list (NULL_TREE, TREE_VALUE (p)); TREE_CHAIN (p) = imag; TREE_CHAIN (imag) = next; /* Skip the newly created node. */ p = TREE_CHAIN (p); } } return types; } /* Output a library call to function FUN (a SYMBOL_REF rtx). The RETVAL parameter specifies whether return value needs to be saved, other parameters are documented in the emit_library_call function below. */ static rtx emit_library_call_value_1 (int retval, rtx orgfun, rtx value, enum libcall_type fn_type, enum machine_mode outmode, int nargs, va_list p) { /* Total size in bytes of all the stack-parms scanned so far. */ struct args_size args_size; /* Size of arguments before any adjustments (such as rounding). */ struct args_size original_args_size; int argnum; rtx fun; int inc; int count; rtx argblock = 0; CUMULATIVE_ARGS args_so_far; struct arg { rtx value; enum machine_mode mode; rtx reg; int partial; struct locate_and_pad_arg_data locate; rtx save_area; }; struct arg *argvec; int old_inhibit_defer_pop = inhibit_defer_pop; rtx call_fusage = 0; rtx mem_value = 0; rtx valreg; int pcc_struct_value = 0; int struct_value_size = 0; int flags; int reg_parm_stack_space = 0; int needed; rtx before_call; tree tfom; /* type_for_mode (outmode, 0) */ #ifdef REG_PARM_STACK_SPACE /* Define the boundary of the register parm stack space that needs to be save, if any. */ int low_to_save, high_to_save; rtx save_area = 0; /* Place that it is saved. */ #endif /* Size of the stack reserved for parameter registers. */ int initial_highest_arg_in_use = highest_outgoing_arg_in_use; char *initial_stack_usage_map = stack_usage_map; rtx struct_value = targetm.calls.struct_value_rtx (0, 0); #ifdef REG_PARM_STACK_SPACE reg_parm_stack_space = REG_PARM_STACK_SPACE ((tree) 0); #endif /* By default, library functions can not throw. */ flags = ECF_NOTHROW; switch (fn_type) { case LCT_NORMAL: break; case LCT_CONST: flags |= ECF_CONST; break; case LCT_PURE: flags |= ECF_PURE; break; case LCT_CONST_MAKE_BLOCK: flags |= ECF_CONST | ECF_LIBCALL_BLOCK; break; case LCT_PURE_MAKE_BLOCK: flags |= ECF_PURE | ECF_LIBCALL_BLOCK; break; case LCT_NORETURN: flags |= ECF_NORETURN; break; case LCT_THROW: flags = ECF_NORETURN; break; case LCT_ALWAYS_RETURN: flags = ECF_ALWAYS_RETURN; break; case LCT_RETURNS_TWICE: flags = ECF_RETURNS_TWICE; break; } fun = orgfun; /* Ensure current function's preferred stack boundary is at least what we need. */ if (cfun->preferred_stack_boundary < PREFERRED_STACK_BOUNDARY) cfun->preferred_stack_boundary = PREFERRED_STACK_BOUNDARY; /* If this kind of value comes back in memory, decide where in memory it should come back. */ if (outmode != VOIDmode) { tfom = lang_hooks.types.type_for_mode (outmode, 0); if (aggregate_value_p (tfom, 0)) { #ifdef PCC_STATIC_STRUCT_RETURN rtx pointer_reg = hard_function_value (build_pointer_type (tfom), 0, 0); mem_value = gen_rtx_MEM (outmode, pointer_reg); pcc_struct_value = 1; if (value == 0) value = gen_reg_rtx (outmode); #else /* not PCC_STATIC_STRUCT_RETURN */ struct_value_size = GET_MODE_SIZE (outmode); if (value != 0 && MEM_P (value)) mem_value = value; else mem_value = assign_temp (tfom, 0, 1, 1); #endif /* This call returns a big structure. */ flags &= ~(ECF_CONST | ECF_PURE | ECF_LIBCALL_BLOCK); } } else tfom = void_type_node; /* ??? Unfinished: must pass the memory address as an argument. */ /* Copy all the libcall-arguments out of the varargs data and into a vector ARGVEC. Compute how to pass each argument. We only support a very small subset of the full argument passing conventions to limit complexity here since library functions shouldn't have many args. */ argvec = alloca ((nargs + 1) * sizeof (struct arg)); memset (argvec, 0, (nargs + 1) * sizeof (struct arg)); #ifdef INIT_CUMULATIVE_LIBCALL_ARGS INIT_CUMULATIVE_LIBCALL_ARGS (args_so_far, outmode, fun); #else INIT_CUMULATIVE_ARGS (args_so_far, NULL_TREE, fun, 0, nargs); #endif args_size.constant = 0; args_size.var = 0; count = 0; /* Now we are about to start emitting insns that can be deleted if a libcall is deleted. */ if (flags & ECF_LIBCALL_BLOCK) start_sequence (); push_temp_slots (); /* If there's a structure value address to be passed, either pass it in the special place, or pass it as an extra argument. */ if (mem_value && struct_value == 0 && ! pcc_struct_value) { rtx addr = XEXP (mem_value, 0); nargs++; /* Make sure it is a reasonable operand for a move or push insn. */ if (!REG_P (addr) && !MEM_P (addr) && ! (CONSTANT_P (addr) && LEGITIMATE_CONSTANT_P (addr))) addr = force_operand (addr, NULL_RTX); argvec[count].value = addr; argvec[count].mode = Pmode; argvec[count].partial = 0; argvec[count].reg = FUNCTION_ARG (args_so_far, Pmode, NULL_TREE, 1); #ifdef FUNCTION_ARG_PARTIAL_NREGS if (FUNCTION_ARG_PARTIAL_NREGS (args_so_far, Pmode, NULL_TREE, 1)) abort (); #endif locate_and_pad_parm (Pmode, NULL_TREE, #ifdef STACK_PARMS_IN_REG_PARM_AREA 1, #else argvec[count].reg != 0, #endif 0, NULL_TREE, &args_size, &argvec[count].locate); if (argvec[count].reg == 0 || argvec[count].partial != 0 || reg_parm_stack_space > 0) args_size.constant += argvec[count].locate.size.constant; FUNCTION_ARG_ADVANCE (args_so_far, Pmode, (tree) 0, 1); count++; } for (; count < nargs; count++) { rtx val = va_arg (p, rtx); enum machine_mode mode = va_arg (p, enum machine_mode); /* We cannot convert the arg value to the mode the library wants here; must do it earlier where we know the signedness of the arg. */ if (mode == BLKmode || (GET_MODE (val) != mode && GET_MODE (val) != VOIDmode)) abort (); /* There's no need to call protect_from_queue, because either emit_move_insn or emit_push_insn will do that. */ /* Make sure it is a reasonable operand for a move or push insn. */ if (!REG_P (val) && !MEM_P (val) && ! (CONSTANT_P (val) && LEGITIMATE_CONSTANT_P (val))) val = force_operand (val, NULL_RTX); #ifdef FUNCTION_ARG_PASS_BY_REFERENCE if (FUNCTION_ARG_PASS_BY_REFERENCE (args_so_far, mode, NULL_TREE, 1)) { rtx slot; int must_copy = 1 #ifdef FUNCTION_ARG_CALLEE_COPIES && ! FUNCTION_ARG_CALLEE_COPIES (args_so_far, mode, NULL_TREE, 1) #endif ; /* loop.c won't look at CALL_INSN_FUNCTION_USAGE of const/pure functions, so we have to pretend this isn't such a function. */ if (flags & ECF_LIBCALL_BLOCK) { rtx insns = get_insns (); end_sequence (); emit_insn (insns); } flags &= ~(ECF_CONST | ECF_PURE | ECF_LIBCALL_BLOCK); /* If this was a CONST function, it is now PURE since it now reads memory. */ if (flags & ECF_CONST) { flags &= ~ECF_CONST; flags |= ECF_PURE; } if (GET_MODE (val) == MEM && ! must_copy) slot = val; else if (must_copy) { slot = assign_temp (lang_hooks.types.type_for_mode (mode, 0), 0, 1, 1); emit_move_insn (slot, val); } else { tree type = lang_hooks.types.type_for_mode (mode, 0); slot = gen_rtx_MEM (mode, expand_expr (build1 (ADDR_EXPR, build_pointer_type (type), make_tree (type, val)), NULL_RTX, VOIDmode, 0)); } call_fusage = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, slot), call_fusage); if (must_copy) call_fusage = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_CLOBBER (VOIDmode, slot), call_fusage); mode = Pmode; val = force_operand (XEXP (slot, 0), NULL_RTX); } #endif argvec[count].value = val; argvec[count].mode = mode; argvec[count].reg = FUNCTION_ARG (args_so_far, mode, NULL_TREE, 1); #ifdef FUNCTION_ARG_PARTIAL_NREGS argvec[count].partial = FUNCTION_ARG_PARTIAL_NREGS (args_so_far, mode, NULL_TREE, 1); #else argvec[count].partial = 0; #endif locate_and_pad_parm (mode, NULL_TREE, #ifdef STACK_PARMS_IN_REG_PARM_AREA 1, #else argvec[count].reg != 0, #endif argvec[count].partial, NULL_TREE, &args_size, &argvec[count].locate); if (argvec[count].locate.size.var) abort (); if (argvec[count].reg == 0 || argvec[count].partial != 0 || reg_parm_stack_space > 0) args_size.constant += argvec[count].locate.size.constant; FUNCTION_ARG_ADVANCE (args_so_far, mode, (tree) 0, 1); } /* If this machine requires an external definition for library functions, write one out. */ assemble_external_libcall (fun); original_args_size = args_size; args_size.constant = (((args_size.constant + stack_pointer_delta + PREF_STACK_BYTES - 1) / PREF_STACK_BYTES * PREF_STACK_BYTES) - stack_pointer_delta); args_size.constant = MAX (args_size.constant, reg_parm_stack_space); #ifndef OUTGOING_REG_PARM_STACK_SPACE args_size.constant -= reg_parm_stack_space; #endif if (args_size.constant > current_function_outgoing_args_size) current_function_outgoing_args_size = args_size.constant; if (ACCUMULATE_OUTGOING_ARGS) { /* Since the stack pointer will never be pushed, it is possible for the evaluation of a parm to clobber something we have already written to the stack. Since most function calls on RISC machines do not use the stack, this is uncommon, but must work correctly. Therefore, we save any area of the stack that was already written and that we are using. Here we set up to do this by making a new stack usage map from the old one. Another approach might be to try to reorder the argument evaluations to avoid this conflicting stack usage. */ needed = args_size.constant; #ifndef OUTGOING_REG_PARM_STACK_SPACE /* Since we will be writing into the entire argument area, the map must be allocated for its entire size, not just the part that is the responsibility of the caller. */ needed += reg_parm_stack_space; #endif #ifdef ARGS_GROW_DOWNWARD highest_outgoing_arg_in_use = MAX (initial_highest_arg_in_use, needed + 1); #else highest_outgoing_arg_in_use = MAX (initial_highest_arg_in_use, needed); #endif stack_usage_map = alloca (highest_outgoing_arg_in_use); if (initial_highest_arg_in_use) memcpy (stack_usage_map, initial_stack_usage_map, initial_highest_arg_in_use); if (initial_highest_arg_in_use != highest_outgoing_arg_in_use) memset (&stack_usage_map[initial_highest_arg_in_use], 0, highest_outgoing_arg_in_use - initial_highest_arg_in_use); needed = 0; /* We must be careful to use virtual regs before they're instantiated, and real regs afterwards. Loop optimization, for example, can create new libcalls after we've instantiated the virtual regs, and if we use virtuals anyway, they won't match the rtl patterns. */ if (virtuals_instantiated) argblock = plus_constant (stack_pointer_rtx, STACK_POINTER_OFFSET); else argblock = virtual_outgoing_args_rtx; } else { if (!PUSH_ARGS) argblock = push_block (GEN_INT (args_size.constant), 0, 0); } /* If we push args individually in reverse order, perform stack alignment before the first push (the last arg). */ if (argblock == 0 && PUSH_ARGS_REVERSED) anti_adjust_stack (GEN_INT (args_size.constant - original_args_size.constant)); if (PUSH_ARGS_REVERSED) { inc = -1; argnum = nargs - 1; } else { inc = 1; argnum = 0; } #ifdef REG_PARM_STACK_SPACE if (ACCUMULATE_OUTGOING_ARGS) { /* The argument list is the property of the called routine and it may clobber it. If the fixed area has been used for previous parameters, we must save and restore it. */ save_area = save_fixed_argument_area (reg_parm_stack_space, argblock, &low_to_save, &high_to_save); } #endif /* Push the args that need to be pushed. */ /* ARGNUM indexes the ARGVEC array in the order in which the arguments are to be pushed. */ for (count = 0; count < nargs; count++, argnum += inc) { enum machine_mode mode = argvec[argnum].mode; rtx val = argvec[argnum].value; rtx reg = argvec[argnum].reg; int partial = argvec[argnum].partial; int lower_bound = 0, upper_bound = 0, i; if (! (reg != 0 && partial == 0)) { if (ACCUMULATE_OUTGOING_ARGS) { /* If this is being stored into a pre-allocated, fixed-size, stack area, save any previous data at that location. */ #ifdef ARGS_GROW_DOWNWARD /* stack_slot is negative, but we want to index stack_usage_map with positive values. */ upper_bound = -argvec[argnum].locate.offset.constant + 1; lower_bound = upper_bound - argvec[argnum].locate.size.constant; #else lower_bound = argvec[argnum].locate.offset.constant; upper_bound = lower_bound + argvec[argnum].locate.size.constant; #endif i = lower_bound; /* Don't worry about things in the fixed argument area; it has already been saved. */ if (i < reg_parm_stack_space) i = reg_parm_stack_space; while (i < upper_bound && stack_usage_map[i] == 0) i++; if (i < upper_bound) { /* We need to make a save area. */ unsigned int size = argvec[argnum].locate.size.constant * BITS_PER_UNIT; enum machine_mode save_mode = mode_for_size (size, MODE_INT, 1); rtx adr = plus_constant (argblock, argvec[argnum].locate.offset.constant); rtx stack_area = gen_rtx_MEM (save_mode, memory_address (save_mode, adr)); if (save_mode == BLKmode) { argvec[argnum].save_area = assign_stack_temp (BLKmode, argvec[argnum].locate.size.constant, 0); emit_block_move (validize_mem (argvec[argnum].save_area), stack_area, GEN_INT (argvec[argnum].locate.size.constant), BLOCK_OP_CALL_PARM); } else { argvec[argnum].save_area = gen_reg_rtx (save_mode); emit_move_insn (argvec[argnum].save_area, stack_area); } } } emit_push_insn (val, mode, NULL_TREE, NULL_RTX, PARM_BOUNDARY, partial, reg, 0, argblock, GEN_INT (argvec[argnum].locate.offset.constant), reg_parm_stack_space, ARGS_SIZE_RTX (argvec[argnum].locate.alignment_pad)); /* Now mark the segment we just used. */ if (ACCUMULATE_OUTGOING_ARGS) for (i = lower_bound; i < upper_bound; i++) stack_usage_map[i] = 1; NO_DEFER_POP; } } /* If we pushed args in forward order, perform stack alignment after pushing the last arg. */ if (argblock == 0 && !PUSH_ARGS_REVERSED) anti_adjust_stack (GEN_INT (args_size.constant - original_args_size.constant)); if (PUSH_ARGS_REVERSED) argnum = nargs - 1; else argnum = 0; fun = prepare_call_address (fun, NULL, &call_fusage, 0, 0); /* Now load any reg parms into their regs. */ /* ARGNUM indexes the ARGVEC array in the order in which the arguments are to be pushed. */ for (count = 0; count < nargs; count++, argnum += inc) { rtx val = argvec[argnum].value; rtx reg = argvec[argnum].reg; int partial = argvec[argnum].partial; /* Handle calls that pass values in multiple non-contiguous locations. The PA64 has examples of this for library calls. */ if (reg != 0 && GET_CODE (reg) == PARALLEL) emit_group_load (reg, val, NULL_TREE, GET_MODE_SIZE (GET_MODE (val))); else if (reg != 0 && partial == 0) emit_move_insn (reg, val); NO_DEFER_POP; } /* Any regs containing parms remain in use through the call. */ for (count = 0; count < nargs; count++) { rtx reg = argvec[count].reg; if (reg != 0 && GET_CODE (reg) == PARALLEL) use_group_regs (&call_fusage, reg); else if (reg != 0) use_reg (&call_fusage, reg); } /* Pass the function the address in which to return a structure value. */ if (mem_value != 0 && struct_value != 0 && ! pcc_struct_value) { emit_move_insn (struct_value, force_reg (Pmode, force_operand (XEXP (mem_value, 0), NULL_RTX))); if (REG_P (struct_value)) use_reg (&call_fusage, struct_value); } /* Don't allow popping to be deferred, since then cse'ing of library calls could delete a call and leave the pop. */ NO_DEFER_POP; valreg = (mem_value == 0 && outmode != VOIDmode ? hard_libcall_value (outmode) : NULL_RTX); /* Stack must be properly aligned now. */ if (stack_pointer_delta & (PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)) abort (); before_call = get_last_insn (); /* We pass the old value of inhibit_defer_pop + 1 to emit_call_1, which will set inhibit_defer_pop to that value. */ /* The return type is needed to decide how many bytes the function pops. Signedness plays no role in that, so for simplicity, we pretend it's always signed. We also assume that the list of arguments passed has no impact, so we pretend it is unknown. */ emit_call_1 (fun, NULL, get_identifier (XSTR (orgfun, 0)), build_function_type (tfom, NULL_TREE), original_args_size.constant, args_size.constant, struct_value_size, FUNCTION_ARG (args_so_far, VOIDmode, void_type_node, 1), valreg, old_inhibit_defer_pop + 1, call_fusage, flags, & args_so_far); /* For calls to `setjmp', etc., inform flow.c it should complain if nonvolatile values are live. For functions that cannot return, inform flow that control does not fall through. */ if (flags & (ECF_NORETURN | ECF_LONGJMP)) { /* The barrier note must be emitted immediately after the CALL_INSN. Some ports emit more than just a CALL_INSN above, so we must search for it here. */ rtx last = get_last_insn (); while (GET_CODE (last) != CALL_INSN) { last = PREV_INSN (last); /* There was no CALL_INSN? */ if (last == before_call) abort (); } emit_barrier_after (last); } /* Now restore inhibit_defer_pop to its actual original value. */ OK_DEFER_POP; /* If call is cse'able, make appropriate pair of reg-notes around it. Test valreg so we don't crash; may safely ignore `const' if return type is void. Disable for PARALLEL return values, because we have no way to move such values into a pseudo register. */ if (flags & ECF_LIBCALL_BLOCK) { rtx insns; if (valreg == 0) { insns = get_insns (); end_sequence (); emit_insn (insns); } else { rtx note = 0; rtx temp; int i; if (GET_CODE (valreg) == PARALLEL) { temp = gen_reg_rtx (outmode); emit_group_store (temp, valreg, NULL_TREE, GET_MODE_SIZE (outmode)); valreg = temp; } temp = gen_reg_rtx (GET_MODE (valreg)); /* Construct an "equal form" for the value which mentions all the arguments in order as well as the function name. */ for (i = 0; i < nargs; i++) note = gen_rtx_EXPR_LIST (VOIDmode, argvec[i].value, note); note = gen_rtx_EXPR_LIST (VOIDmode, fun, note); insns = get_insns (); end_sequence (); if (flags & ECF_PURE) note = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode))), note); emit_libcall_block (insns, temp, valreg, note); valreg = temp; } } pop_temp_slots (); /* Copy the value to the right place. */ if (outmode != VOIDmode && retval) { if (mem_value) { if (value == 0) value = mem_value; if (value != mem_value) emit_move_insn (value, mem_value); } else if (GET_CODE (valreg) == PARALLEL) { if (value == 0) value = gen_reg_rtx (outmode); emit_group_store (value, valreg, NULL_TREE, GET_MODE_SIZE (outmode)); } else if (value != 0) emit_move_insn (value, valreg); else value = valreg; } if (ACCUMULATE_OUTGOING_ARGS) { #ifdef REG_PARM_STACK_SPACE if (save_area) restore_fixed_argument_area (save_area, argblock, high_to_save, low_to_save); #endif /* If we saved any argument areas, restore them. */ for (count = 0; count < nargs; count++) if (argvec[count].save_area) { enum machine_mode save_mode = GET_MODE (argvec[count].save_area); rtx adr = plus_constant (argblock, argvec[count].locate.offset.constant); rtx stack_area = gen_rtx_MEM (save_mode, memory_address (save_mode, adr)); if (save_mode == BLKmode) emit_block_move (stack_area, validize_mem (argvec[count].save_area), GEN_INT (argvec[count].locate.size.constant), BLOCK_OP_CALL_PARM); else emit_move_insn (stack_area, argvec[count].save_area); } highest_outgoing_arg_in_use = initial_highest_arg_in_use; stack_usage_map = initial_stack_usage_map; } return value; } /* Output a library call to function FUN (a SYMBOL_REF rtx) (emitting the queue unless NO_QUEUE is nonzero), for a value of mode OUTMODE, with NARGS different arguments, passed as alternating rtx values and machine_modes to convert them to. The rtx values should have been passed through protect_from_queue already. FN_TYPE should be LCT_NORMAL for `normal' calls, LCT_CONST for `const' calls, LCT_PURE for `pure' calls, LCT_CONST_MAKE_BLOCK for `const' calls which should be enclosed in REG_LIBCALL/REG_RETVAL notes, LCT_PURE_MAKE_BLOCK for `purep' calls which should be enclosed in REG_LIBCALL/REG_RETVAL notes with extra (use (memory (scratch)), or other LCT_ value for other types of library calls. */ void emit_library_call (rtx orgfun, enum libcall_type fn_type, enum machine_mode outmode, int nargs, ...) { va_list p; va_start (p, nargs); emit_library_call_value_1 (0, orgfun, NULL_RTX, fn_type, outmode, nargs, p); va_end (p); } /* Like emit_library_call except that an extra argument, VALUE, comes second and says where to store the result. (If VALUE is zero, this function chooses a convenient way to return the value. This function returns an rtx for where the value is to be found. If VALUE is nonzero, VALUE is returned. */ rtx emit_library_call_value (rtx orgfun, rtx value, enum libcall_type fn_type, enum machine_mode outmode, int nargs, ...) { rtx result; va_list p; va_start (p, nargs); result = emit_library_call_value_1 (1, orgfun, value, fn_type, outmode, nargs, p); va_end (p); return result; } /* Store a single argument for a function call into the register or memory area where it must be passed. *ARG describes the argument value and where to pass it. ARGBLOCK is the address of the stack-block for all the arguments, or 0 on a machine where arguments are pushed individually. MAY_BE_ALLOCA nonzero says this could be a call to `alloca' so must be careful about how the stack is used. VARIABLE_SIZE nonzero says that this was a variable-sized outgoing argument stack. This is used if ACCUMULATE_OUTGOING_ARGS to indicate that we need not worry about saving and restoring the stack. FNDECL is the declaration of the function we are calling. Return nonzero if this arg should cause sibcall failure, zero otherwise. */ static int store_one_arg (struct arg_data *arg, rtx argblock, int flags, int variable_size ATTRIBUTE_UNUSED, int reg_parm_stack_space) { tree pval = arg->tree_value; rtx reg = 0; int partial = 0; int used = 0; int i, lower_bound = 0, upper_bound = 0; int sibcall_failure = 0; if (TREE_CODE (pval) == ERROR_MARK) return 1; /* Push a new temporary level for any temporaries we make for this argument. */ push_temp_slots (); if (ACCUMULATE_OUTGOING_ARGS && !(flags & ECF_SIBCALL)) { /* If this is being stored into a pre-allocated, fixed-size, stack area, save any previous data at that location. */ if (argblock && ! variable_size && arg->stack) { #ifdef ARGS_GROW_DOWNWARD /* stack_slot is negative, but we want to index stack_usage_map with positive values. */ if (GET_CODE (XEXP (arg->stack_slot, 0)) == PLUS) upper_bound = -INTVAL (XEXP (XEXP (arg->stack_slot, 0), 1)) + 1; else upper_bound = 0; lower_bound = upper_bound - arg->locate.size.constant; #else if (GET_CODE (XEXP (arg->stack_slot, 0)) == PLUS) lower_bound = INTVAL (XEXP (XEXP (arg->stack_slot, 0), 1)); else lower_bound = 0; upper_bound = lower_bound + arg->locate.size.constant; #endif i = lower_bound; /* Don't worry about things in the fixed argument area; it has already been saved. */ if (i < reg_parm_stack_space) i = reg_parm_stack_space; while (i < upper_bound && stack_usage_map[i] == 0) i++; if (i < upper_bound) { /* We need to make a save area. */ unsigned int size = arg->locate.size.constant * BITS_PER_UNIT; enum machine_mode save_mode = mode_for_size (size, MODE_INT, 1); rtx adr = memory_address (save_mode, XEXP (arg->stack_slot, 0)); rtx stack_area = gen_rtx_MEM (save_mode, adr); if (save_mode == BLKmode) { tree ot = TREE_TYPE (arg->tree_value); tree nt = build_qualified_type (ot, (TYPE_QUALS (ot) | TYPE_QUAL_CONST)); arg->save_area = assign_temp (nt, 0, 1, 1); preserve_temp_slots (arg->save_area); emit_block_move (validize_mem (arg->save_area), stack_area, expr_size (arg->tree_value), BLOCK_OP_CALL_PARM); } else { arg->save_area = gen_reg_rtx (save_mode); emit_move_insn (arg->save_area, stack_area); } } } } /* If this isn't going to be placed on both the stack and in registers, set up the register and number of words. */ if (! arg->pass_on_stack) { if (flags & ECF_SIBCALL) reg = arg->tail_call_reg; else reg = arg->reg; partial = arg->partial; } if (reg != 0 && partial == 0) /* Being passed entirely in a register. We shouldn't be called in this case. */ abort (); /* If this arg needs special alignment, don't load the registers here. */ if (arg->n_aligned_regs != 0) reg = 0; /* If this is being passed partially in a register, we can't evaluate it directly into its stack slot. Otherwise, we can. */ if (arg->value == 0) { /* stack_arg_under_construction is nonzero if a function argument is being evaluated directly into the outgoing argument list and expand_call must take special action to preserve the argument list if it is called recursively. For scalar function arguments stack_usage_map is sufficient to determine which stack slots must be saved and restored. Scalar arguments in general have pass_on_stack == 0. If this argument is initialized by a function which takes the address of the argument (a C++ constructor or a C function returning a BLKmode structure), then stack_usage_map is insufficient and expand_call must push the stack around the function call. Such arguments have pass_on_stack == 1. Note that it is always safe to set stack_arg_under_construction, but this generates suboptimal code if set when not needed. */ if (arg->pass_on_stack) stack_arg_under_construction++; arg->value = expand_expr (pval, (partial || TYPE_MODE (TREE_TYPE (pval)) != arg->mode) ? NULL_RTX : arg->stack, VOIDmode, EXPAND_STACK_PARM); /* If we are promoting object (or for any other reason) the mode doesn't agree, convert the mode. */ if (arg->mode != TYPE_MODE (TREE_TYPE (pval))) arg->value = convert_modes (arg->mode, TYPE_MODE (TREE_TYPE (pval)), arg->value, arg->unsignedp); if (arg->pass_on_stack) stack_arg_under_construction--; } /* Don't allow anything left on stack from computation of argument to alloca. */ if (flags & ECF_MAY_BE_ALLOCA) do_pending_stack_adjust (); if (arg->value == arg->stack) /* If the value is already in the stack slot, we are done. */ ; else if (arg->mode != BLKmode) { int size; /* Argument is a scalar, not entirely passed in registers. (If part is passed in registers, arg->partial says how much and emit_push_insn will take care of putting it there.) Push it, and if its size is less than the amount of space allocated to it, also bump stack pointer by the additional space. Note that in C the default argument promotions will prevent such mismatches. */ size = GET_MODE_SIZE (arg->mode); /* Compute how much space the push instruction will push. On many machines, pushing a byte will advance the stack pointer by a halfword. */ #ifdef PUSH_ROUNDING size = PUSH_ROUNDING (size); #endif used = size; /* Compute how much space the argument should get: round up to a multiple of the alignment for arguments. */ if (none != FUNCTION_ARG_PADDING (arg->mode, TREE_TYPE (pval))) used = (((size + PARM_BOUNDARY / BITS_PER_UNIT - 1) / (PARM_BOUNDARY / BITS_PER_UNIT)) * (PARM_BOUNDARY / BITS_PER_UNIT)); /* This isn't already where we want it on the stack, so put it there. This can either be done with push or copy insns. */ emit_push_insn (arg->value, arg->mode, TREE_TYPE (pval), NULL_RTX, PARM_BOUNDARY, partial, reg, used - size, argblock, ARGS_SIZE_RTX (arg->locate.offset), reg_parm_stack_space, ARGS_SIZE_RTX (arg->locate.alignment_pad)); /* Unless this is a partially-in-register argument, the argument is now in the stack. */ if (partial == 0) arg->value = arg->stack; } else { /* BLKmode, at least partly to be pushed. */ unsigned int parm_align; int excess; rtx size_rtx; /* Pushing a nonscalar. If part is passed in registers, PARTIAL says how much and emit_push_insn will take care of putting it there. */ /* Round its size up to a multiple of the allocation unit for arguments. */ if (arg->locate.size.var != 0) { excess = 0; size_rtx = ARGS_SIZE_RTX (arg->locate.size); } else { /* PUSH_ROUNDING has no effect on us, because emit_push_insn for BLKmode is careful to avoid it. */ if (reg && GET_CODE (reg) == PARALLEL) { /* Use the size of the elt to compute excess. */ rtx elt = XEXP (XVECEXP (reg, 0, 0), 0); excess = (arg->locate.size.constant - int_size_in_bytes (TREE_TYPE (pval)) + partial * GET_MODE_SIZE (GET_MODE (elt))); } else excess = (arg->locate.size.constant - int_size_in_bytes (TREE_TYPE (pval)) + partial * UNITS_PER_WORD); size_rtx = expand_expr (size_in_bytes (TREE_TYPE (pval)), NULL_RTX, TYPE_MODE (sizetype), 0); } /* Some types will require stricter alignment, which will be provided for elsewhere in argument layout. */ parm_align = MAX (PARM_BOUNDARY, TYPE_ALIGN (TREE_TYPE (pval))); /* When an argument is padded down, the block is aligned to PARM_BOUNDARY, but the actual argument isn't. */ if (FUNCTION_ARG_PADDING (arg->mode, TREE_TYPE (pval)) == downward) { if (arg->locate.size.var) parm_align = BITS_PER_UNIT; else if (excess) { unsigned int excess_align = (excess & -excess) * BITS_PER_UNIT; parm_align = MIN (parm_align, excess_align); } } if ((flags & ECF_SIBCALL) && MEM_P (arg->value)) { /* emit_push_insn might not work properly if arg->value and argblock + arg->locate.offset areas overlap. */ rtx x = arg->value; int i = 0; if (XEXP (x, 0) == current_function_internal_arg_pointer || (GET_CODE (XEXP (x, 0)) == PLUS && XEXP (XEXP (x, 0), 0) == current_function_internal_arg_pointer && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)) { if (XEXP (x, 0) != current_function_internal_arg_pointer) i = INTVAL (XEXP (XEXP (x, 0), 1)); /* expand_call should ensure this. */ if (arg->locate.offset.var || GET_CODE (size_rtx) != CONST_INT) abort (); if (arg->locate.offset.constant > i) { if (arg->locate.offset.constant < i + INTVAL (size_rtx)) sibcall_failure = 1; } else if (arg->locate.offset.constant < i) { if (i < arg->locate.offset.constant + INTVAL (size_rtx)) sibcall_failure = 1; } } } emit_push_insn (arg->value, arg->mode, TREE_TYPE (pval), size_rtx, parm_align, partial, reg, excess, argblock, ARGS_SIZE_RTX (arg->locate.offset), reg_parm_stack_space, ARGS_SIZE_RTX (arg->locate.alignment_pad)); /* Unless this is a partially-in-register argument, the argument is now in the stack. ??? Unlike the case above, in which we want the actual address of the data, so that we can load it directly into a register, here we want the address of the stack slot, so that it's properly aligned for word-by-word copying or something like that. It's not clear that this is always correct. */ if (partial == 0) arg->value = arg->stack_slot; } /* Mark all slots this store used. */ if (ACCUMULATE_OUTGOING_ARGS && !(flags & ECF_SIBCALL) && argblock && ! variable_size && arg->stack) for (i = lower_bound; i < upper_bound; i++) stack_usage_map[i] = 1; /* Once we have pushed something, pops can't safely be deferred during the rest of the arguments. */ NO_DEFER_POP; /* ANSI doesn't require a sequence point here, but PCC has one, so this will avoid some problems. */ emit_queue (); /* Free any temporary slots made in processing this argument. Show that we might have taken the address of something and pushed that as an operand. */ preserve_temp_slots (NULL_RTX); free_temp_slots (); pop_temp_slots (); return sibcall_failure; } /* Nonzero if we do not know how to pass TYPE solely in registers. We cannot do so in the following cases: - if the type has variable size - if the type is marked as addressable (it is required to be constructed into the stack) - if the padding and mode of the type is such that a copy into a register would put it into the wrong part of the register. Which padding can't be supported depends on the byte endianness. A value in a register is implicitly padded at the most significant end. On a big-endian machine, that is the lower end in memory. So a value padded in memory at the upper end can't go in a register. For a little-endian machine, the reverse is true. */ bool default_must_pass_in_stack (enum machine_mode mode, tree type) { if (!type) return false; /* If the type has variable size... */ if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST) return true; /* If the type is marked as addressable (it is required to be constructed into the stack)... */ if (TREE_ADDRESSABLE (type)) return true; /* If the padding and mode of the type is such that a copy into a register would put it into the wrong part of the register. */ if (mode == BLKmode && int_size_in_bytes (type) % (PARM_BOUNDARY / BITS_PER_UNIT) && (FUNCTION_ARG_PADDING (mode, type) == (BYTES_BIG_ENDIAN ? upward : downward))) return true; return false; } /* Control flow graph manipulation code for GNU compiler. Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file contains low level functions to manipulate the CFG and analyze it. All other modules should not transform the data structure directly and use abstraction instead. The file is supposed to be ordered bottom-up and should not contain any code dependent on a particular intermediate language (RTL or trees). Available functionality: - Initialization/deallocation init_flow, clear_edges - Low level basic block manipulation alloc_block, expunge_block - Edge manipulation make_edge, make_single_succ_edge, cached_make_edge, remove_edge - Low level edge redirection (without updating instruction chain) redirect_edge_succ, redirect_edge_succ_nodup, redirect_edge_pred - Dumping and debugging dump_flow_info, debug_flow_info, dump_edge_info - Allocation of AUX fields for basic blocks alloc_aux_for_blocks, free_aux_for_blocks, alloc_aux_for_block - clear_bb_flags - Consistency checking verify_flow_info - Dumping and debugging print_rtl_with_bb, dump_bb, debug_bb, debug_bb_n */ /* The obstack on which the flow graph components are allocated. */ struct obstack flow_obstack; static char *flow_firstobj; /* Number of basic blocks in the current function. */ int n_basic_blocks; /* First free basic block number. */ int last_basic_block; /* Number of edges in the current function. */ int n_edges; /* The basic block array. */ varray_type basic_block_info; /* The special entry and exit blocks. */ basic_block ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR; /* Memory alloc pool for bb member rbi. */ alloc_pool rbi_pool; void debug_flow_info (void); static void free_edge (edge); /* Called once at initialization time. */ void init_flow (void) { static int initialized; n_edges = 0; if (!initialized) { gcc_obstack_init (&flow_obstack); flow_firstobj = obstack_alloc (&flow_obstack, 0); initialized = 1; } else { obstack_free (&flow_obstack, flow_firstobj); flow_firstobj = obstack_alloc (&flow_obstack, 0); } ENTRY_BLOCK_PTR = ggc_alloc_cleared (sizeof (*ENTRY_BLOCK_PTR)); ENTRY_BLOCK_PTR->index = ENTRY_BLOCK; EXIT_BLOCK_PTR = ggc_alloc_cleared (sizeof (*EXIT_BLOCK_PTR)); EXIT_BLOCK_PTR->index = EXIT_BLOCK; ENTRY_BLOCK_PTR->next_bb = EXIT_BLOCK_PTR; EXIT_BLOCK_PTR->prev_bb = ENTRY_BLOCK_PTR; } /* Helper function for remove_edge and clear_edges. Frees edge structure without actually unlinking it from the pred/succ lists. */ static void free_edge (edge e ATTRIBUTE_UNUSED) { n_edges--; /* ggc_free (e); */ } /* Free the memory associated with the edge structures. */ void clear_edges (void) { basic_block bb; edge e; FOR_EACH_BB (bb) { edge e = bb->succ; while (e) { edge next = e->succ_next; free_edge (e); e = next; } bb->succ = NULL; bb->pred = NULL; } e = ENTRY_BLOCK_PTR->succ; while (e) { edge next = e->succ_next; free_edge (e); e = next; } EXIT_BLOCK_PTR->pred = NULL; ENTRY_BLOCK_PTR->succ = NULL; if (n_edges) abort (); } /* Allocate memory for basic_block. */ basic_block alloc_block (void) { basic_block bb; bb = ggc_alloc_cleared (sizeof (*bb)); return bb; } /* Create memory pool for rbi_pool. */ void alloc_rbi_pool (void) { rbi_pool = create_alloc_pool ("rbi pool", sizeof (struct reorder_block_def), n_basic_blocks + 2); } /* Free rbi_pool. */ void free_rbi_pool (void) { free_alloc_pool (rbi_pool); } /* Initialize rbi (the structure containing data used by basic block duplication and reordering) for the given basic block. */ void initialize_bb_rbi (basic_block bb) { if (bb->rbi) abort (); bb->rbi = pool_alloc (rbi_pool); memset (bb->rbi, 0, sizeof (struct reorder_block_def)); } /* Link block B to chain after AFTER. */ void link_block (basic_block b, basic_block after) { b->next_bb = after->next_bb; b->prev_bb = after; after->next_bb = b; b->next_bb->prev_bb = b; } /* Unlink block B from chain. */ void unlink_block (basic_block b) { b->next_bb->prev_bb = b->prev_bb; b->prev_bb->next_bb = b->next_bb; b->prev_bb = NULL; b->next_bb = NULL; } /* Sequentially order blocks and compact the arrays. */ void compact_blocks (void) { int i; basic_block bb; i = 0; FOR_EACH_BB (bb) { BASIC_BLOCK (i) = bb; bb->index = i; i++; } if (i != n_basic_blocks) abort (); for (; i < last_basic_block; i++) BASIC_BLOCK (i) = NULL; last_basic_block = n_basic_blocks; } /* Remove block B from the basic block array. */ void expunge_block (basic_block b) { unlink_block (b); BASIC_BLOCK (b->index) = NULL; n_basic_blocks--; /* ggc_free (b); */ } /* Create an edge connecting SRC and DEST with flags FLAGS. Return newly created edge. Use this only if you are sure that this edge can't possibly already exist. */ edge unchecked_make_edge (basic_block src, basic_block dst, int flags) { edge e; e = ggc_alloc_cleared (sizeof (*e)); n_edges++; e->succ_next = src->succ; e->pred_next = dst->pred; e->src = src; e->dest = dst; e->flags = flags; src->succ = e; dst->pred = e; return e; } /* Create an edge connecting SRC and DST with FLAGS optionally using edge cache CACHE. Return the new edge, NULL if already exist. */ edge cached_make_edge (sbitmap *edge_cache, basic_block src, basic_block dst, int flags) { int use_edge_cache; edge e; /* Don't bother with edge cache for ENTRY or EXIT, if there aren't that many edges to them, or we didn't allocate memory for it. */ use_edge_cache = (edge_cache && src != ENTRY_BLOCK_PTR && dst != EXIT_BLOCK_PTR); /* Make sure we don't add duplicate edges. */ switch (use_edge_cache) { default: /* Quick test for non-existence of the edge. */ if (! TEST_BIT (edge_cache[src->index], dst->index)) break; /* The edge exists; early exit if no work to do. */ if (flags == 0) return NULL; /* Fall through. */ case 0: for (e = src->succ; e; e = e->succ_next) if (e->dest == dst) { e->flags |= flags; return NULL; } break; } e = unchecked_make_edge (src, dst, flags); if (use_edge_cache) SET_BIT (edge_cache[src->index], dst->index); return e; } /* Create an edge connecting SRC and DEST with flags FLAGS. Return newly created edge or NULL if already exist. */ edge make_edge (basic_block src, basic_block dest, int flags) { return cached_make_edge (NULL, src, dest, flags); } /* Create an edge connecting SRC to DEST and set probability by knowing that it is the single edge leaving SRC. */ edge make_single_succ_edge (basic_block src, basic_block dest, int flags) { edge e = make_edge (src, dest, flags); e->probability = REG_BR_PROB_BASE; e->count = src->count; return e; } /* This function will remove an edge from the flow graph. */ void remove_edge (edge e) { edge last_pred = NULL; edge last_succ = NULL; edge tmp; basic_block src, dest; src = e->src; dest = e->dest; for (tmp = src->succ; tmp && tmp != e; tmp = tmp->succ_next) last_succ = tmp; if (!tmp) abort (); if (last_succ) last_succ->succ_next = e->succ_next; else src->succ = e->succ_next; for (tmp = dest->pred; tmp && tmp != e; tmp = tmp->pred_next) last_pred = tmp; if (!tmp) abort (); if (last_pred) last_pred->pred_next = e->pred_next; else dest->pred = e->pred_next; free_edge (e); } /* Redirect an edge's successor from one block to another. */ void redirect_edge_succ (edge e, basic_block new_succ) { edge *pe; /* Disconnect the edge from the old successor block. */ for (pe = &e->dest->pred; *pe != e; pe = &(*pe)->pred_next) continue; *pe = (*pe)->pred_next; /* Reconnect the edge to the new successor block. */ e->pred_next = new_succ->pred; new_succ->pred = e; e->dest = new_succ; } /* Like previous but avoid possible duplicate edge. */ edge redirect_edge_succ_nodup (edge e, basic_block new_succ) { edge s; /* Check whether the edge is already present. */ for (s = e->src->succ; s; s = s->succ_next) if (s->dest == new_succ && s != e) break; if (s) { s->flags |= e->flags; s->probability += e->probability; if (s->probability > REG_BR_PROB_BASE) s->probability = REG_BR_PROB_BASE; s->count += e->count; remove_edge (e); e = s; } else redirect_edge_succ (e, new_succ); return e; } /* Redirect an edge's predecessor from one block to another. */ void redirect_edge_pred (edge e, basic_block new_pred) { edge *pe; /* Disconnect the edge from the old predecessor block. */ for (pe = &e->src->succ; *pe != e; pe = &(*pe)->succ_next) continue; *pe = (*pe)->succ_next; /* Reconnect the edge to the new predecessor block. */ e->succ_next = new_pred->succ; new_pred->succ = e; e->src = new_pred; } void clear_bb_flags (void) { basic_block bb; FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) bb->flags = 0; } void dump_flow_info (FILE *file) { int i; basic_block bb; static const char * const reg_class_names[] = REG_CLASS_NAMES; if (reg_n_info) { int max_regno = max_reg_num (); fprintf (file, "%d registers.\n", max_regno); for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++) if (REG_N_REFS (i)) { enum reg_class class, altclass; fprintf (file, "\nRegister %d used %d times across %d insns", i, REG_N_REFS (i), REG_LIVE_LENGTH (i)); if (REG_BASIC_BLOCK (i) >= 0) fprintf (file, " in block %d", REG_BASIC_BLOCK (i)); if (REG_N_SETS (i)) fprintf (file, "; set %d time%s", REG_N_SETS (i), (REG_N_SETS (i) == 1) ? "" : "s"); if (regno_reg_rtx[i] != NULL && REG_USERVAR_P (regno_reg_rtx[i])) fprintf (file, "; user var"); if (REG_N_DEATHS (i) != 1) fprintf (file, "; dies in %d places", REG_N_DEATHS (i)); if (REG_N_CALLS_CROSSED (i) == 1) fprintf (file, "; crosses 1 call"); else if (REG_N_CALLS_CROSSED (i)) fprintf (file, "; crosses %d calls", REG_N_CALLS_CROSSED (i)); if (regno_reg_rtx[i] != NULL && PSEUDO_REGNO_BYTES (i) != UNITS_PER_WORD) fprintf (file, "; %d bytes", PSEUDO_REGNO_BYTES (i)); class = reg_preferred_class (i); altclass = reg_alternate_class (i); if (class != GENERAL_REGS || altclass != ALL_REGS) { if (altclass == ALL_REGS || class == ALL_REGS) fprintf (file, "; pref %s", reg_class_names[(int) class]); else if (altclass == NO_REGS) fprintf (file, "; %s or none", reg_class_names[(int) class]); else fprintf (file, "; pref %s, else %s", reg_class_names[(int) class], reg_class_names[(int) altclass]); } if (regno_reg_rtx[i] != NULL && REG_POINTER (regno_reg_rtx[i])) fprintf (file, "; pointer"); fprintf (file, ".\n"); } } fprintf (file, "\n%d basic blocks, %d edges.\n", n_basic_blocks, n_edges); FOR_EACH_BB (bb) { edge e; int sum; gcov_type lsum; fprintf (file, "\nBasic block %d ", bb->index); fprintf (file, "prev %d, next %d, ", bb->prev_bb->index, bb->next_bb->index); fprintf (file, "loop_depth %d, count ", bb->loop_depth); fprintf (file, HOST_WIDEST_INT_PRINT_DEC, bb->count); fprintf (file, ", freq %i", bb->frequency); if (maybe_hot_bb_p (bb)) fprintf (file, ", maybe hot"); if (probably_never_executed_bb_p (bb)) fprintf (file, ", probably never executed"); fprintf (file, ".\n"); fprintf (file, "Predecessors: "); for (e = bb->pred; e; e = e->pred_next) dump_edge_info (file, e, 0); fprintf (file, "\nSuccessors: "); for (e = bb->succ; e; e = e->succ_next) dump_edge_info (file, e, 1); fprintf (file, "\nRegisters live at start:"); dump_regset (bb->global_live_at_start, file); fprintf (file, "\nRegisters live at end:"); dump_regset (bb->global_live_at_end, file); putc ('\n', file); /* Check the consistency of profile information. We can't do that in verify_flow_info, as the counts may get invalid for incompletely solved graphs, later eliminating of conditionals or roundoff errors. It is still practical to have them reported for debugging of simple testcases. */ sum = 0; for (e = bb->succ; e; e = e->succ_next) sum += e->probability; if (bb->succ && abs (sum - REG_BR_PROB_BASE) > 100) fprintf (file, "Invalid sum of outgoing probabilities %.1f%%\n", sum * 100.0 / REG_BR_PROB_BASE); sum = 0; for (e = bb->pred; e; e = e->pred_next) sum += EDGE_FREQUENCY (e); if (abs (sum - bb->frequency) > 100) fprintf (file, "Invalid sum of incomming frequencies %i, should be %i\n", sum, bb->frequency); lsum = 0; for (e = bb->pred; e; e = e->pred_next) lsum += e->count; if (lsum - bb->count > 100 || lsum - bb->count < -100) fprintf (file, "Invalid sum of incomming counts %i, should be %i\n", (int)lsum, (int)bb->count); lsum = 0; for (e = bb->succ; e; e = e->succ_next) lsum += e->count; if (bb->succ && (lsum - bb->count > 100 || lsum - bb->count < -100)) fprintf (file, "Invalid sum of incomming counts %i, should be %i\n", (int)lsum, (int)bb->count); } putc ('\n', file); } void debug_flow_info (void) { dump_flow_info (stderr); } void dump_edge_info (FILE *file, edge e, int do_succ) { basic_block side = (do_succ ? e->dest : e->src); if (side == ENTRY_BLOCK_PTR) fputs (" ENTRY", file); else if (side == EXIT_BLOCK_PTR) fputs (" EXIT", file); else fprintf (file, " %d", side->index); if (e->probability) fprintf (file, " [%.1f%%] ", e->probability * 100.0 / REG_BR_PROB_BASE); if (e->count) { fprintf (file, " count:"); fprintf (file, HOST_WIDEST_INT_PRINT_DEC, e->count); } if (e->flags) { static const char * const bitnames[] = { "fallthru", "ab", "abcall", "eh", "fake", "dfs_back", "can_fallthru", "irreducible", "sibcall", "loop_exit", "true", "false", "exec" }; int comma = 0; int i, flags = e->flags; fputs (" (", file); for (i = 0; flags; i++) if (flags & (1 << i)) { flags &= ~(1 << i); if (comma) fputc (',', file); if (i < (int) ARRAY_SIZE (bitnames)) fputs (bitnames[i], file); else fprintf (file, "%d", i); comma = 1; } fputc (')', file); } } /* Simple routines to easily allocate AUX fields of basic blocks. */ static struct obstack block_aux_obstack; static void *first_block_aux_obj = 0; static struct obstack edge_aux_obstack; static void *first_edge_aux_obj = 0; /* Allocate a memory block of SIZE as BB->aux. The obstack must be first initialized by alloc_aux_for_blocks. */ inline void alloc_aux_for_block (basic_block bb, int size) { /* Verify that aux field is clear. */ if (bb->aux || !first_block_aux_obj) abort (); bb->aux = obstack_alloc (&block_aux_obstack, size); memset (bb->aux, 0, size); } /* Initialize the block_aux_obstack and if SIZE is nonzero, call alloc_aux_for_block for each basic block. */ void alloc_aux_for_blocks (int size) { static int initialized; if (!initialized) { gcc_obstack_init (&block_aux_obstack); initialized = 1; } /* Check whether AUX data are still allocated. */ else if (first_block_aux_obj) abort (); first_block_aux_obj = obstack_alloc (&block_aux_obstack, 0); if (size) { basic_block bb; FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) alloc_aux_for_block (bb, size); } } /* Clear AUX pointers of all blocks. */ void clear_aux_for_blocks (void) { basic_block bb; FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) bb->aux = NULL; } /* Free data allocated in block_aux_obstack and clear AUX pointers of all blocks. */ void free_aux_for_blocks (void) { if (!first_block_aux_obj) abort (); obstack_free (&block_aux_obstack, first_block_aux_obj); first_block_aux_obj = NULL; clear_aux_for_blocks (); } /* Allocate a memory edge of SIZE as BB->aux. The obstack must be first initialized by alloc_aux_for_edges. */ inline void alloc_aux_for_edge (edge e, int size) { /* Verify that aux field is clear. */ if (e->aux || !first_edge_aux_obj) abort (); e->aux = obstack_alloc (&edge_aux_obstack, size); memset (e->aux, 0, size); } /* Initialize the edge_aux_obstack and if SIZE is nonzero, call alloc_aux_for_edge for each basic edge. */ void alloc_aux_for_edges (int size) { static int initialized; if (!initialized) { gcc_obstack_init (&edge_aux_obstack); initialized = 1; } /* Check whether AUX data are still allocated. */ else if (first_edge_aux_obj) abort (); first_edge_aux_obj = obstack_alloc (&edge_aux_obstack, 0); if (size) { basic_block bb; FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { edge e; for (e = bb->succ; e; e = e->succ_next) alloc_aux_for_edge (e, size); } } } /* Clear AUX pointers of all edges. */ void clear_aux_for_edges (void) { basic_block bb; edge e; FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { for (e = bb->succ; e; e = e->succ_next) e->aux = NULL; } } /* Free data allocated in edge_aux_obstack and clear AUX pointers of all edges. */ void free_aux_for_edges (void) { if (!first_edge_aux_obj) abort (); obstack_free (&edge_aux_obstack, first_edge_aux_obj); first_edge_aux_obj = NULL; clear_aux_for_edges (); } void debug_bb (basic_block bb) { dump_bb (bb, stderr, 0); } basic_block debug_bb_n (int n) { basic_block bb = BASIC_BLOCK (n); dump_bb (bb, stderr, 0); return bb; } /* Dumps cfg related information about basic block BB to FILE. */ static void dump_cfg_bb_info (FILE *file, basic_block bb) { unsigned i; bool first = true; static const char * const bb_bitnames[] = { "dirty", "new", "reachable", "visited", "irreducible_loop", "superblock" }; const unsigned n_bitnames = sizeof (bb_bitnames) / sizeof (char *); edge e; fprintf (file, "Basic block %d", bb->index); for (i = 0; i < n_bitnames; i++) if (bb->flags & (1 << i)) { if (first) fprintf (file, " ("); else fprintf (file, ", "); first = false; fprintf (file, bb_bitnames[i]); } if (!first) fprintf (file, ")"); fprintf (file, "\n"); fprintf (file, "Predecessors: "); for (e = bb->pred; e; e = e->pred_next) dump_edge_info (file, e, 0); fprintf (file, "\nSuccessors: "); for (e = bb->succ; e; e = e->succ_next) dump_edge_info (file, e, 1); fprintf (file, "\n\n"); } /* Dumps a brief description of cfg to FILE. */ void brief_dump_cfg (FILE *file) { basic_block bb; FOR_EACH_BB (bb) { dump_cfg_bb_info (file, bb); } } /* Control flow graph analysis code for GNU compiler. Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file contains various simple utilities to analyze the CFG. */ /* Store the data structures necessary for depth-first search. */ struct depth_first_search_dsS { /* stack for backtracking during the algorithm */ basic_block *stack; /* number of edges in the stack. That is, positions 0, ..., sp-1 have edges. */ unsigned int sp; /* record of basic blocks already seen by depth-first search */ sbitmap visited_blocks; }; typedef struct depth_first_search_dsS *depth_first_search_ds; static void flow_dfs_compute_reverse_init (depth_first_search_ds); static void flow_dfs_compute_reverse_add_bb (depth_first_search_ds, basic_block); static basic_block flow_dfs_compute_reverse_execute (depth_first_search_ds); static void flow_dfs_compute_reverse_finish (depth_first_search_ds); static void remove_fake_successors (basic_block); static bool flow_active_insn_p (rtx); /* Like active_insn_p, except keep the return value clobber around even after reload. */ static bool flow_active_insn_p (rtx insn) { if (active_insn_p (insn)) return true; /* A clobber of the function return value exists for buggy programs that fail to return a value. Its effect is to keep the return value from being live across the entire function. If we allow it to be skipped, we introduce the possibility for register livetime aborts. */ if (GET_CODE (PATTERN (insn)) == CLOBBER && REG_P (XEXP (PATTERN (insn), 0)) && REG_FUNCTION_VALUE_P (XEXP (PATTERN (insn), 0))) return true; return false; } /* Return true if the block has no effect and only forwards control flow to its single destination. */ bool forwarder_block_p (basic_block bb) { rtx insn; if (bb == EXIT_BLOCK_PTR || bb == ENTRY_BLOCK_PTR || !bb->succ || bb->succ->succ_next) return false; for (insn = BB_HEAD (bb); insn != BB_END (bb); insn = NEXT_INSN (insn)) if (INSN_P (insn) && flow_active_insn_p (insn)) return false; return (!INSN_P (insn) || (GET_CODE (insn) == JUMP_INSN && simplejump_p (insn)) || !flow_active_insn_p (insn)); } /* Return nonzero if we can reach target from src by falling through. */ bool can_fallthru (basic_block src, basic_block target) { rtx insn = BB_END (src); rtx insn2; edge e; if (target == EXIT_BLOCK_PTR) return true; if (src->next_bb != target) return 0; for (e = src->succ; e; e = e->succ_next) if (e->dest == EXIT_BLOCK_PTR && e->flags & EDGE_FALLTHRU) return 0; insn2 = BB_HEAD (target); if (insn2 && !active_insn_p (insn2)) insn2 = next_active_insn (insn2); /* ??? Later we may add code to move jump tables offline. */ return next_active_insn (insn) == insn2; } /* Return nonzero if we could reach target from src by falling through, if the target was made adjacent. If we already have a fall-through edge to the exit block, we can't do that. */ bool could_fall_through (basic_block src, basic_block target) { edge e; if (target == EXIT_BLOCK_PTR) return true; for (e = src->succ; e; e = e->succ_next) if (e->dest == EXIT_BLOCK_PTR && e->flags & EDGE_FALLTHRU) return 0; return true; } /* Mark the back edges in DFS traversal. Return nonzero if a loop (natural or otherwise) is present. Inspired by Depth_First_Search_PP described in: Advanced Compiler Design and Implementation Steven Muchnick Morgan Kaufmann, 1997 and heavily borrowed from flow_depth_first_order_compute. */ bool mark_dfs_back_edges (void) { edge *stack; int *pre; int *post; int sp; int prenum = 1; int postnum = 1; sbitmap visited; bool found = false; /* Allocate the preorder and postorder number arrays. */ pre = xcalloc (last_basic_block, sizeof (int)); post = xcalloc (last_basic_block, sizeof (int)); /* Allocate stack for back-tracking up CFG. */ stack = xmalloc ((n_basic_blocks + 1) * sizeof (edge)); sp = 0; /* Allocate bitmap to track nodes that have been visited. */ visited = sbitmap_alloc (last_basic_block); /* None of the nodes in the CFG have been visited yet. */ sbitmap_zero (visited); /* Push the first edge on to the stack. */ stack[sp++] = ENTRY_BLOCK_PTR->succ; while (sp) { edge e; basic_block src; basic_block dest; /* Look at the edge on the top of the stack. */ e = stack[sp - 1]; src = e->src; dest = e->dest; e->flags &= ~EDGE_DFS_BACK; /* Check if the edge destination has been visited yet. */ if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index)) { /* Mark that we have visited the destination. */ SET_BIT (visited, dest->index); pre[dest->index] = prenum++; if (dest->succ) { /* Since the DEST node has been visited for the first time, check its successors. */ stack[sp++] = dest->succ; } else post[dest->index] = postnum++; } else { if (dest != EXIT_BLOCK_PTR && src != ENTRY_BLOCK_PTR && pre[src->index] >= pre[dest->index] && post[dest->index] == 0) e->flags |= EDGE_DFS_BACK, found = true; if (! e->succ_next && src != ENTRY_BLOCK_PTR) post[src->index] = postnum++; if (e->succ_next) stack[sp - 1] = e->succ_next; else sp--; } } free (pre); free (post); free (stack); sbitmap_free (visited); return found; } /* Set the flag EDGE_CAN_FALLTHRU for edges that can be fallthru. */ void set_edge_can_fallthru_flag (void) { basic_block bb; FOR_EACH_BB (bb) { edge e; for (e = bb->succ; e; e = e->succ_next) { e->flags &= ~EDGE_CAN_FALLTHRU; /* The FALLTHRU edge is also CAN_FALLTHRU edge. */ if (e->flags & EDGE_FALLTHRU) e->flags |= EDGE_CAN_FALLTHRU; } /* If the BB ends with an invertible condjump all (2) edges are CAN_FALLTHRU edges. */ if (!bb->succ || !bb->succ->succ_next || bb->succ->succ_next->succ_next) continue; if (!any_condjump_p (BB_END (bb))) continue; if (!invert_jump (BB_END (bb), JUMP_LABEL (BB_END (bb)), 0)) continue; invert_jump (BB_END (bb), JUMP_LABEL (BB_END (bb)), 0); bb->succ->flags |= EDGE_CAN_FALLTHRU; bb->succ->succ_next->flags |= EDGE_CAN_FALLTHRU; } } /* Find unreachable blocks. An unreachable block will have 0 in the reachable bit in block->flags. A nonzero value indicates the block is reachable. */ void find_unreachable_blocks (void) { edge e; basic_block *tos, *worklist, bb; tos = worklist = xmalloc (sizeof (basic_block) * n_basic_blocks); /* Clear all the reachability flags. */ FOR_EACH_BB (bb) bb->flags &= ~BB_REACHABLE; /* Add our starting points to the worklist. Almost always there will be only one. It isn't inconceivable that we might one day directly support Fortran alternate entry points. */ for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) { *tos++ = e->dest; /* Mark the block reachable. */ e->dest->flags |= BB_REACHABLE; } /* Iterate: find everything reachable from what we've already seen. */ while (tos != worklist) { basic_block b = *--tos; for (e = b->succ; e; e = e->succ_next) if (!(e->dest->flags & BB_REACHABLE)) { *tos++ = e->dest; e->dest->flags |= BB_REACHABLE; } } free (worklist); } /* Functions to access an edge list with a vector representation. Enough data is kept such that given an index number, the pred and succ that edge represents can be determined, or given a pred and a succ, its index number can be returned. This allows algorithms which consume a lot of memory to represent the normally full matrix of edge (pred,succ) with a single indexed vector, edge (EDGE_INDEX (pred, succ)), with no wasted space in the client code due to sparse flow graphs. */ /* This functions initializes the edge list. Basically the entire flowgraph is processed, and all edges are assigned a number, and the data structure is filled in. */ struct edge_list * create_edge_list (void) { struct edge_list *elist; edge e; int num_edges; int block_count; basic_block bb; block_count = n_basic_blocks + 2; /* Include the entry and exit blocks. */ num_edges = 0; /* Determine the number of edges in the flow graph by counting successor edges on each basic block. */ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { for (e = bb->succ; e; e = e->succ_next) num_edges++; } elist = xmalloc (sizeof (struct edge_list)); elist->num_blocks = block_count; elist->num_edges = num_edges; elist->index_to_edge = xmalloc (sizeof (edge) * num_edges); num_edges = 0; /* Follow successors of blocks, and register these edges. */ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) for (e = bb->succ; e; e = e->succ_next) elist->index_to_edge[num_edges++] = e; return elist; } /* This function free's memory associated with an edge list. */ void free_edge_list (struct edge_list *elist) { if (elist) { free (elist->index_to_edge); free (elist); } } /* This function provides debug output showing an edge list. */ void print_edge_list (FILE *f, struct edge_list *elist) { int x; fprintf (f, "Compressed edge list, %d BBs + entry & exit, and %d edges\n", elist->num_blocks - 2, elist->num_edges); for (x = 0; x < elist->num_edges; x++) { fprintf (f, " %-4d - edge(", x); if (INDEX_EDGE_PRED_BB (elist, x) == ENTRY_BLOCK_PTR) fprintf (f, "entry,"); else fprintf (f, "%d,", INDEX_EDGE_PRED_BB (elist, x)->index); if (INDEX_EDGE_SUCC_BB (elist, x) == EXIT_BLOCK_PTR) fprintf (f, "exit)\n"); else fprintf (f, "%d)\n", INDEX_EDGE_SUCC_BB (elist, x)->index); } } /* This function provides an internal consistency check of an edge list, verifying that all edges are present, and that there are no extra edges. */ void verify_edge_list (FILE *f, struct edge_list *elist) { int pred, succ, index; edge e; basic_block bb, p, s; FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { for (e = bb->succ; e; e = e->succ_next) { pred = e->src->index; succ = e->dest->index; index = EDGE_INDEX (elist, e->src, e->dest); if (index == EDGE_INDEX_NO_EDGE) { fprintf (f, "*p* No index for edge from %d to %d\n", pred, succ); continue; } if (INDEX_EDGE_PRED_BB (elist, index)->index != pred) fprintf (f, "*p* Pred for index %d should be %d not %d\n", index, pred, INDEX_EDGE_PRED_BB (elist, index)->index); if (INDEX_EDGE_SUCC_BB (elist, index)->index != succ) fprintf (f, "*p* Succ for index %d should be %d not %d\n", index, succ, INDEX_EDGE_SUCC_BB (elist, index)->index); } } /* We've verified that all the edges are in the list, now lets make sure there are no spurious edges in the list. */ FOR_BB_BETWEEN (p, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) FOR_BB_BETWEEN (s, ENTRY_BLOCK_PTR->next_bb, NULL, next_bb) { int found_edge = 0; for (e = p->succ; e; e = e->succ_next) if (e->dest == s) { found_edge = 1; break; } for (e = s->pred; e; e = e->pred_next) if (e->src == p) { found_edge = 1; break; } if (EDGE_INDEX (elist, p, s) == EDGE_INDEX_NO_EDGE && found_edge != 0) fprintf (f, "*** Edge (%d, %d) appears to not have an index\n", p->index, s->index); if (EDGE_INDEX (elist, p, s) != EDGE_INDEX_NO_EDGE && found_edge == 0) fprintf (f, "*** Edge (%d, %d) has index %d, but there is no edge\n", p->index, s->index, EDGE_INDEX (elist, p, s)); } } /* Given PRED and SUCC blocks, return the edge which connects the blocks. If no such edge exists, return NULL. */ edge find_edge (basic_block pred, basic_block succ) { edge e; for (e = pred->succ; e; e = e->succ_next) if (e->dest == succ) return e; return NULL; } /* This routine will determine what, if any, edge there is between a specified predecessor and successor. */ int find_edge_index (struct edge_list *edge_list, basic_block pred, basic_block succ) { int x; for (x = 0; x < NUM_EDGES (edge_list); x++) if (INDEX_EDGE_PRED_BB (edge_list, x) == pred && INDEX_EDGE_SUCC_BB (edge_list, x) == succ) return x; return (EDGE_INDEX_NO_EDGE); } /* Dump the list of basic blocks in the bitmap NODES. */ void flow_nodes_print (const char *str, const sbitmap nodes, FILE *file) { int node; if (! nodes) return; fprintf (file, "%s { ", str); EXECUTE_IF_SET_IN_SBITMAP (nodes, 0, node, {fprintf (file, "%d ", node);}); fputs ("}\n", file); } /* Dump the list of edges in the array EDGE_LIST. */ void flow_edge_list_print (const char *str, const edge *edge_list, int num_edges, FILE *file) { int i; if (! edge_list) return; fprintf (file, "%s { ", str); for (i = 0; i < num_edges; i++) fprintf (file, "%d->%d ", edge_list[i]->src->index, edge_list[i]->dest->index); fputs ("}\n", file); } /* This routine will remove any fake successor edges for a basic block. When the edge is removed, it is also removed from whatever predecessor list it is in. */ static void remove_fake_successors (basic_block bb) { edge e; for (e = bb->succ; e;) { edge tmp = e; e = e->succ_next; if ((tmp->flags & EDGE_FAKE) == EDGE_FAKE) remove_edge (tmp); } } /* This routine will remove all fake edges from the flow graph. If we remove all fake successors, it will automatically remove all fake predecessors. */ void remove_fake_edges (void) { basic_block bb; FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) remove_fake_successors (bb); } /* This function will add a fake edge between any block which has no successors, and the exit block. Some data flow equations require these edges to exist. */ void add_noreturn_fake_exit_edges (void) { basic_block bb; FOR_EACH_BB (bb) if (bb->succ == NULL) make_single_succ_edge (bb, EXIT_BLOCK_PTR, EDGE_FAKE); } /* This function adds a fake edge between any infinite loops to the exit block. Some optimizations require a path from each node to the exit node. See also Morgan, Figure 3.10, pp. 82-83. The current implementation is ugly, not attempting to minimize the number of inserted fake edges. To reduce the number of fake edges to insert, add fake edges from _innermost_ loops containing only nodes not reachable from the exit block. */ void connect_infinite_loops_to_exit (void) { basic_block unvisited_block; struct depth_first_search_dsS dfs_ds; /* Perform depth-first search in the reverse graph to find nodes reachable from the exit block. */ flow_dfs_compute_reverse_init (&dfs_ds); flow_dfs_compute_reverse_add_bb (&dfs_ds, EXIT_BLOCK_PTR); /* Repeatedly add fake edges, updating the unreachable nodes. */ while (1) { unvisited_block = flow_dfs_compute_reverse_execute (&dfs_ds); if (!unvisited_block) break; make_edge (unvisited_block, EXIT_BLOCK_PTR, EDGE_FAKE); flow_dfs_compute_reverse_add_bb (&dfs_ds, unvisited_block); } flow_dfs_compute_reverse_finish (&dfs_ds); return; } /* Compute reverse top sort order. */ void flow_reverse_top_sort_order_compute (int *rts_order) { edge *stack; int sp; int postnum = 0; sbitmap visited; /* Allocate stack for back-tracking up CFG. */ stack = xmalloc ((n_basic_blocks + 1) * sizeof (edge)); sp = 0; /* Allocate bitmap to track nodes that have been visited. */ visited = sbitmap_alloc (last_basic_block); /* None of the nodes in the CFG have been visited yet. */ sbitmap_zero (visited); /* Push the first edge on to the stack. */ stack[sp++] = ENTRY_BLOCK_PTR->succ; while (sp) { edge e; basic_block src; basic_block dest; /* Look at the edge on the top of the stack. */ e = stack[sp - 1]; src = e->src; dest = e->dest; /* Check if the edge destination has been visited yet. */ if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index)) { /* Mark that we have visited the destination. */ SET_BIT (visited, dest->index); if (dest->succ) /* Since the DEST node has been visited for the first time, check its successors. */ stack[sp++] = dest->succ; else rts_order[postnum++] = dest->index; } else { if (! e->succ_next && src != ENTRY_BLOCK_PTR) rts_order[postnum++] = src->index; if (e->succ_next) stack[sp - 1] = e->succ_next; else sp--; } } free (stack); sbitmap_free (visited); } /* Compute the depth first search order and store in the array DFS_ORDER if nonzero, marking the nodes visited in VISITED. If RC_ORDER is nonzero, return the reverse completion number for each node. Returns the number of nodes visited. A depth first search tries to get as far away from the starting point as quickly as possible. */ int flow_depth_first_order_compute (int *dfs_order, int *rc_order) { edge *stack; int sp; int dfsnum = 0; int rcnum = n_basic_blocks - 1; sbitmap visited; /* Allocate stack for back-tracking up CFG. */ stack = xmalloc ((n_basic_blocks + 1) * sizeof (edge)); sp = 0; /* Allocate bitmap to track nodes that have been visited. */ visited = sbitmap_alloc (last_basic_block); /* None of the nodes in the CFG have been visited yet. */ sbitmap_zero (visited); /* Push the first edge on to the stack. */ stack[sp++] = ENTRY_BLOCK_PTR->succ; while (sp) { edge e; basic_block src; basic_block dest; /* Look at the edge on the top of the stack. */ e = stack[sp - 1]; src = e->src; dest = e->dest; /* Check if the edge destination has been visited yet. */ if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index)) { /* Mark that we have visited the destination. */ SET_BIT (visited, dest->index); if (dfs_order) dfs_order[dfsnum] = dest->index; dfsnum++; if (dest->succ) /* Since the DEST node has been visited for the first time, check its successors. */ stack[sp++] = dest->succ; else if (rc_order) /* There are no successors for the DEST node so assign its reverse completion number. */ rc_order[rcnum--] = dest->index; } else { if (! e->succ_next && src != ENTRY_BLOCK_PTR && rc_order) /* There are no more successors for the SRC node so assign its reverse completion number. */ rc_order[rcnum--] = src->index; if (e->succ_next) stack[sp - 1] = e->succ_next; else sp--; } } free (stack); sbitmap_free (visited); /* The number of nodes visited should not be greater than n_basic_blocks. */ if (dfsnum > n_basic_blocks) abort (); /* There are some nodes left in the CFG that are unreachable. */ if (dfsnum < n_basic_blocks) abort (); return dfsnum; } struct dfst_node { unsigned nnodes; struct dfst_node **node; struct dfst_node *up; }; /* Compute a preorder transversal ordering such that a sub-tree which is the source of a cross edge appears before the sub-tree which is the destination of the cross edge. This allows for easy detection of all the entry blocks for a loop. The ordering is compute by: 1) Generating a depth first spanning tree. 2) Walking the resulting tree from right to left. */ void flow_preorder_transversal_compute (int *pot_order) { edge e; edge *stack; int i; int max_successors; int sp; sbitmap visited; struct dfst_node *node; struct dfst_node *dfst; basic_block bb; /* Allocate stack for back-tracking up CFG. */ stack = xmalloc ((n_basic_blocks + 1) * sizeof (edge)); sp = 0; /* Allocate the tree. */ dfst = xcalloc (last_basic_block, sizeof (struct dfst_node)); FOR_EACH_BB (bb) { max_successors = 0; for (e = bb->succ; e; e = e->succ_next) max_successors++; dfst[bb->index].node = (max_successors ? xcalloc (max_successors, sizeof (struct dfst_node *)) : NULL); } /* Allocate bitmap to track nodes that have been visited. */ visited = sbitmap_alloc (last_basic_block); /* None of the nodes in the CFG have been visited yet. */ sbitmap_zero (visited); /* Push the first edge on to the stack. */ stack[sp++] = ENTRY_BLOCK_PTR->succ; while (sp) { basic_block src; basic_block dest; /* Look at the edge on the top of the stack. */ e = stack[sp - 1]; src = e->src; dest = e->dest; /* Check if the edge destination has been visited yet. */ if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index)) { /* Mark that we have visited the destination. */ SET_BIT (visited, dest->index); /* Add the destination to the preorder tree. */ if (src != ENTRY_BLOCK_PTR) { dfst[src->index].node[dfst[src->index].nnodes++] = &dfst[dest->index]; dfst[dest->index].up = &dfst[src->index]; } if (dest->succ) /* Since the DEST node has been visited for the first time, check its successors. */ stack[sp++] = dest->succ; } else if (e->succ_next) stack[sp - 1] = e->succ_next; else sp--; } free (stack); sbitmap_free (visited); /* Record the preorder transversal order by walking the tree from right to left. */ i = 0; node = &dfst[ENTRY_BLOCK_PTR->next_bb->index]; pot_order[i++] = 0; while (node) { if (node->nnodes) { node = node->node[--node->nnodes]; pot_order[i++] = node - dfst; } else node = node->up; } /* Free the tree. */ for (i = 0; i < last_basic_block; i++) if (dfst[i].node) free (dfst[i].node); free (dfst); } /* Compute the depth first search order on the _reverse_ graph and store in the array DFS_ORDER, marking the nodes visited in VISITED. Returns the number of nodes visited. The computation is split into three pieces: flow_dfs_compute_reverse_init () creates the necessary data structures. flow_dfs_compute_reverse_add_bb () adds a basic block to the data structures. The block will start the search. flow_dfs_compute_reverse_execute () continues (or starts) the search using the block on the top of the stack, stopping when the stack is empty. flow_dfs_compute_reverse_finish () destroys the necessary data structures. Thus, the user will probably call ..._init(), call ..._add_bb() to add a beginning basic block to the stack, call ..._execute(), possibly add another bb to the stack and again call ..._execute(), ..., and finally call _finish(). */ /* Initialize the data structures used for depth-first search on the reverse graph. If INITIALIZE_STACK is nonzero, the exit block is added to the basic block stack. DATA is the current depth-first search context. If INITIALIZE_STACK is nonzero, there is an element on the stack. */ static void flow_dfs_compute_reverse_init (depth_first_search_ds data) { /* Allocate stack for back-tracking up CFG. */ data->stack = xmalloc ((n_basic_blocks - (INVALID_BLOCK + 1)) * sizeof (basic_block)); data->sp = 0; /* Allocate bitmap to track nodes that have been visited. */ data->visited_blocks = sbitmap_alloc (last_basic_block - (INVALID_BLOCK + 1)); /* None of the nodes in the CFG have been visited yet. */ sbitmap_zero (data->visited_blocks); return; } /* Add the specified basic block to the top of the dfs data structures. When the search continues, it will start at the block. */ static void flow_dfs_compute_reverse_add_bb (depth_first_search_ds data, basic_block bb) { data->stack[data->sp++] = bb; SET_BIT (data->visited_blocks, bb->index - (INVALID_BLOCK + 1)); } /* Continue the depth-first search through the reverse graph starting with the block at the stack's top and ending when the stack is empty. Visited nodes are marked. Returns an unvisited basic block, or NULL if there is none available. */ static basic_block flow_dfs_compute_reverse_execute (depth_first_search_ds data) { basic_block bb; edge e; while (data->sp > 0) { bb = data->stack[--data->sp]; /* Perform depth-first search on adjacent vertices. */ for (e = bb->pred; e; e = e->pred_next) if (!TEST_BIT (data->visited_blocks, e->src->index - (INVALID_BLOCK + 1))) flow_dfs_compute_reverse_add_bb (data, e->src); } /* Determine if there are unvisited basic blocks. */ FOR_BB_BETWEEN (bb, EXIT_BLOCK_PTR, NULL, prev_bb) if (!TEST_BIT (data->visited_blocks, bb->index - (INVALID_BLOCK + 1))) return bb; return NULL; } /* Destroy the data structures needed for depth-first search on the reverse graph. */ static void flow_dfs_compute_reverse_finish (depth_first_search_ds data) { free (data->stack); sbitmap_free (data->visited_blocks); } /* Performs dfs search from BB over vertices satisfying PREDICATE; if REVERSE, go against direction of edges. Returns number of blocks found and their list in RSLT. RSLT can contain at most RSLT_MAX items. */ int dfs_enumerate_from (basic_block bb, int reverse, bool (*predicate) (basic_block, void *), basic_block *rslt, int rslt_max, void *data) { basic_block *st, lbb; int sp = 0, tv = 0; st = xcalloc (rslt_max, sizeof (basic_block)); rslt[tv++] = st[sp++] = bb; bb->flags |= BB_VISITED; while (sp) { edge e; lbb = st[--sp]; if (reverse) { for (e = lbb->pred; e; e = e->pred_next) if (!(e->src->flags & BB_VISITED) && predicate (e->src, data)) { if (tv == rslt_max) abort (); rslt[tv++] = st[sp++] = e->src; e->src->flags |= BB_VISITED; } } else { for (e = lbb->succ; e; e = e->succ_next) if (!(e->dest->flags & BB_VISITED) && predicate (e->dest, data)) { if (tv == rslt_max) abort (); rslt[tv++] = st[sp++] = e->dest; e->dest->flags |= BB_VISITED; } } } free (st); for (sp = 0; sp < tv; sp++) rslt[sp]->flags &= ~BB_VISITED; return tv; } /* Control flow graph building code for GNU compiler. Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* find_basic_blocks divides the current function's rtl into basic blocks and constructs the CFG. The blocks are recorded in the basic_block_info array; the CFG exists in the edge structures referenced by the blocks. find_basic_blocks also finds any unreachable loops and deletes them. Available functionality: - CFG construction find_basic_blocks - Local CFG construction find_sub_basic_blocks */ static int count_basic_blocks (rtx); static void find_basic_blocks_1 (rtx); static void make_edges (basic_block, basic_block, int); static void make_label_edge (sbitmap *, basic_block, rtx, int); static void find_bb_boundaries (basic_block); static void compute_outgoing_frequencies (basic_block); /* Return true if insn is something that should be contained inside basic block. */ bool inside_basic_block_p (rtx insn) { switch (GET_CODE (insn)) { case CODE_LABEL: /* Avoid creating of basic block for jumptables. */ return (NEXT_INSN (insn) == 0 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN || (GET_CODE (PATTERN (NEXT_INSN (insn))) != ADDR_VEC && GET_CODE (PATTERN (NEXT_INSN (insn))) != ADDR_DIFF_VEC)); case JUMP_INSN: return (GET_CODE (PATTERN (insn)) != ADDR_VEC && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC); case CALL_INSN: case INSN: return true; case BARRIER: case NOTE: return false; default: abort (); } } /* Return true if INSN may cause control flow transfer, so it should be last in the basic block. */ bool control_flow_insn_p (rtx insn) { rtx note; switch (GET_CODE (insn)) { case NOTE: case CODE_LABEL: return false; case JUMP_INSN: /* Jump insn always causes control transfer except for tablejumps. */ return (GET_CODE (PATTERN (insn)) != ADDR_VEC && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC); case CALL_INSN: /* Noreturn and sibling call instructions terminate the basic blocks (but only if they happen unconditionally). */ if ((SIBLING_CALL_P (insn) || find_reg_note (insn, REG_NORETURN, 0)) && GET_CODE (PATTERN (insn)) != COND_EXEC) return true; /* Call insn may return to the nonlocal goto handler. */ return ((nonlocal_goto_handler_labels && (0 == (note = find_reg_note (insn, REG_EH_REGION, NULL_RTX)) || INTVAL (XEXP (note, 0)) >= 0)) /* Or may trap. */ || can_throw_internal (insn)); case INSN: return (flag_non_call_exceptions && can_throw_internal (insn)); case BARRIER: /* It is nonsense to reach barrier when looking for the end of basic block, but before dead code is eliminated this may happen. */ return false; default: abort (); } } /* Count the basic blocks of the function. */ static int count_basic_blocks (rtx f) { int count = 0; bool saw_insn = false; rtx insn; for (insn = f; insn; insn = NEXT_INSN (insn)) { /* Code labels and barriers causes current basic block to be terminated at previous real insn. */ if ((GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == BARRIER) && saw_insn) count++, saw_insn = false; /* Start basic block if needed. */ if (!saw_insn && inside_basic_block_p (insn)) saw_insn = true; /* Control flow insn causes current basic block to be terminated. */ if (saw_insn && control_flow_insn_p (insn)) count++, saw_insn = false; } if (saw_insn) count++; /* The rest of the compiler works a bit smoother when we don't have to check for the edge case of do-nothing functions with no basic blocks. */ if (count == 0) { emit_insn (gen_rtx_USE (VOIDmode, const0_rtx)); count = 1; } return count; } /* Create an edge between two basic blocks. FLAGS are auxiliary information about the edge that is accumulated between calls. */ /* Create an edge from a basic block to a label. */ static void make_label_edge (sbitmap *edge_cache, basic_block src, rtx label, int flags) { if (GET_CODE (label) != CODE_LABEL) abort (); /* If the label was never emitted, this insn is junk, but avoid a crash trying to refer to BLOCK_FOR_INSN (label). This can happen as a result of a syntax error and a diagnostic has already been printed. */ if (INSN_UID (label) == 0) return; cached_make_edge (edge_cache, src, BLOCK_FOR_INSN (label), flags); } /* Create the edges generated by INSN in REGION. */ void rtl_make_eh_edge (sbitmap *edge_cache, basic_block src, rtx insn) { int is_call = GET_CODE (insn) == CALL_INSN ? EDGE_ABNORMAL_CALL : 0; rtx handlers, i; handlers = reachable_handlers (insn); for (i = handlers; i; i = XEXP (i, 1)) make_label_edge (edge_cache, src, XEXP (i, 0), EDGE_ABNORMAL | EDGE_EH | is_call); free_INSN_LIST_list (&handlers); } /* Identify the edges between basic blocks MIN to MAX. NONLOCAL_LABEL_LIST is a list of non-local labels in the function. Blocks that are otherwise unreachable may be reachable with a non-local goto. BB_EH_END is an array indexed by basic block number in which we record the list of exception regions active at the end of the basic block. */ static void make_edges (basic_block min, basic_block max, int update_p) { basic_block bb; sbitmap *edge_cache = NULL; /* Assume no computed jump; revise as we create edges. */ current_function_has_computed_jump = 0; /* If we are partitioning hot and cold basic blocks into separate sections, we cannot assume there is no computed jump. */ if (flag_reorder_blocks_and_partition) current_function_has_computed_jump = 1; /* Heavy use of computed goto in machine-generated code can lead to nearly fully-connected CFGs. In that case we spend a significant amount of time searching the edge lists for duplicates. */ if (forced_labels || cfun->max_jumptable_ents > 100) { edge_cache = sbitmap_vector_alloc (last_basic_block, last_basic_block); sbitmap_vector_zero (edge_cache, last_basic_block); if (update_p) FOR_BB_BETWEEN (bb, min, max->next_bb, next_bb) { edge e; for (e = bb->succ; e ; e = e->succ_next) if (e->dest != EXIT_BLOCK_PTR) SET_BIT (edge_cache[bb->index], e->dest->index); } } /* By nature of the way these get numbered, ENTRY_BLOCK_PTR->next_bb block is always the entry. */ if (min == ENTRY_BLOCK_PTR->next_bb) cached_make_edge (edge_cache, ENTRY_BLOCK_PTR, min, EDGE_FALLTHRU); FOR_BB_BETWEEN (bb, min, max->next_bb, next_bb) { rtx insn, x; enum rtx_code code; int force_fallthru = 0; edge e; if (GET_CODE (BB_HEAD (bb)) == CODE_LABEL && LABEL_ALT_ENTRY_P (BB_HEAD (bb))) cached_make_edge (NULL, ENTRY_BLOCK_PTR, bb, 0); /* Examine the last instruction of the block, and discover the ways we can leave the block. */ insn = BB_END (bb); code = GET_CODE (insn); /* A branch. */ if (code == JUMP_INSN) { rtx tmp; /* Recognize exception handling placeholders. */ if (GET_CODE (PATTERN (insn)) == RESX) rtl_make_eh_edge (edge_cache, bb, insn); /* Recognize a non-local goto as a branch outside the current function. */ else if (find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX)) ; /* Recognize a tablejump and do the right thing. */ else if (tablejump_p (insn, NULL, &tmp)) { rtvec vec; int j; if (GET_CODE (PATTERN (tmp)) == ADDR_VEC) vec = XVEC (PATTERN (tmp), 0); else vec = XVEC (PATTERN (tmp), 1); for (j = GET_NUM_ELEM (vec) - 1; j >= 0; --j) make_label_edge (edge_cache, bb, XEXP (RTVEC_ELT (vec, j), 0), 0); /* Some targets (eg, ARM) emit a conditional jump that also contains the out-of-range target. Scan for these and add an edge if necessary. */ if ((tmp = single_set (insn)) != NULL && SET_DEST (tmp) == pc_rtx && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE && GET_CODE (XEXP (SET_SRC (tmp), 2)) == LABEL_REF) make_label_edge (edge_cache, bb, XEXP (XEXP (SET_SRC (tmp), 2), 0), 0); #ifdef CASE_DROPS_THROUGH /* Silly VAXen. The ADDR_VEC is going to be in the way of us naturally detecting fallthru into the next block. */ force_fallthru = 1; #endif } /* If this is a computed jump, then mark it as reaching everything on the forced_labels list. */ else if (computed_jump_p (insn)) { current_function_has_computed_jump = 1; for (x = forced_labels; x; x = XEXP (x, 1)) make_label_edge (edge_cache, bb, XEXP (x, 0), EDGE_ABNORMAL); } /* Returns create an exit out. */ else if (returnjump_p (insn)) cached_make_edge (edge_cache, bb, EXIT_BLOCK_PTR, 0); /* Otherwise, we have a plain conditional or unconditional jump. */ else { if (! JUMP_LABEL (insn)) abort (); make_label_edge (edge_cache, bb, JUMP_LABEL (insn), 0); } } /* If this is a sibling call insn, then this is in effect a combined call and return, and so we need an edge to the exit block. No need to worry about EH edges, since we wouldn't have created the sibling call in the first place. */ if (code == CALL_INSN && SIBLING_CALL_P (insn)) cached_make_edge (edge_cache, bb, EXIT_BLOCK_PTR, EDGE_SIBCALL | EDGE_ABNORMAL); /* If this is a CALL_INSN, then mark it as reaching the active EH handler for this CALL_INSN. If we're handling non-call exceptions then any insn can reach any of the active handlers. Also mark the CALL_INSN as reaching any nonlocal goto handler. */ else if (code == CALL_INSN || flag_non_call_exceptions) { /* Add any appropriate EH edges. */ rtl_make_eh_edge (edge_cache, bb, insn); if (code == CALL_INSN && nonlocal_goto_handler_labels) { /* ??? This could be made smarter: in some cases it's possible to tell that certain calls will not do a nonlocal goto. For example, if the nested functions that do the nonlocal gotos do not have their addresses taken, then only calls to those functions or to other nested functions that use them could possibly do nonlocal gotos. */ /* We do know that a REG_EH_REGION note with a value less than 0 is guaranteed not to perform a non-local goto. */ rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX); if (!note || INTVAL (XEXP (note, 0)) >= 0) for (x = nonlocal_goto_handler_labels; x; x = XEXP (x, 1)) make_label_edge (edge_cache, bb, XEXP (x, 0), EDGE_ABNORMAL | EDGE_ABNORMAL_CALL); } } /* Find out if we can drop through to the next block. */ insn = NEXT_INSN (insn); for (e = bb->succ; e; e = e->succ_next) if (e->dest == EXIT_BLOCK_PTR && e->flags & EDGE_FALLTHRU) { insn = 0; break; } while (insn && GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) != NOTE_INSN_BASIC_BLOCK) insn = NEXT_INSN (insn); if (!insn || (bb->next_bb == EXIT_BLOCK_PTR && force_fallthru)) cached_make_edge (edge_cache, bb, EXIT_BLOCK_PTR, EDGE_FALLTHRU); else if (bb->next_bb != EXIT_BLOCK_PTR) { if (force_fallthru || insn == BB_HEAD (bb->next_bb)) cached_make_edge (edge_cache, bb, bb->next_bb, EDGE_FALLTHRU); } } if (edge_cache) sbitmap_vector_free (edge_cache); } /* Find all basic blocks of the function whose first insn is F. Collect and return a list of labels whose addresses are taken. This will be used in make_edges for use with computed gotos. */ static void find_basic_blocks_1 (rtx f) { rtx insn, next; rtx bb_note = NULL_RTX; rtx head = NULL_RTX; rtx end = NULL_RTX; basic_block prev = ENTRY_BLOCK_PTR; /* We process the instructions in a slightly different way than we did previously. This is so that we see a NOTE_BASIC_BLOCK after we have closed out the previous block, so that it gets attached at the proper place. Since this form should be equivalent to the previous, count_basic_blocks continues to use the old form as a check. */ for (insn = f; insn; insn = next) { enum rtx_code code = GET_CODE (insn); next = NEXT_INSN (insn); if ((GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == BARRIER) && head) { prev = create_basic_block_structure (head, end, bb_note, prev); head = end = NULL_RTX; bb_note = NULL_RTX; } if (inside_basic_block_p (insn)) { if (head == NULL_RTX) head = insn; end = insn; } if (head && control_flow_insn_p (insn)) { prev = create_basic_block_structure (head, end, bb_note, prev); head = end = NULL_RTX; bb_note = NULL_RTX; } switch (code) { case NOTE: { int kind = NOTE_LINE_NUMBER (insn); /* Look for basic block notes with which to keep the basic_block_info pointers stable. Unthread the note now; we'll put it back at the right place in create_basic_block. Or not at all if we've already found a note in this block. */ if (kind == NOTE_INSN_BASIC_BLOCK) { if (bb_note == NULL_RTX) bb_note = insn; else next = delete_insn (insn); } break; } case CODE_LABEL: case JUMP_INSN: case CALL_INSN: case INSN: case BARRIER: break; default: abort (); } } if (head != NULL_RTX) create_basic_block_structure (head, end, bb_note, prev); else if (bb_note) delete_insn (bb_note); if (last_basic_block != n_basic_blocks) abort (); clear_aux_for_blocks (); } /* Find basic blocks of the current function. F is the first insn of the function and NREGS the number of register numbers in use. */ void find_basic_blocks (rtx f, int nregs ATTRIBUTE_UNUSED, FILE *file ATTRIBUTE_UNUSED) { basic_block bb; timevar_push (TV_CFG); /* Flush out existing data. */ if (basic_block_info != NULL) { clear_edges (); /* Clear bb->aux on all extant basic blocks. We'll use this as a tag for reuse during create_basic_block, just in case some pass copies around basic block notes improperly. */ FOR_EACH_BB (bb) bb->aux = NULL; basic_block_info = NULL; } n_basic_blocks = count_basic_blocks (f); last_basic_block = 0; ENTRY_BLOCK_PTR->next_bb = EXIT_BLOCK_PTR; EXIT_BLOCK_PTR->prev_bb = ENTRY_BLOCK_PTR; /* Size the basic block table. The actual structures will be allocated by find_basic_blocks_1, since we want to keep the structure pointers stable across calls to find_basic_blocks. */ /* ??? This whole issue would be much simpler if we called find_basic_blocks exactly once, and thereafter we don't have a single long chain of instructions at all until close to the end of compilation when we actually lay them out. */ VARRAY_BB_INIT (basic_block_info, n_basic_blocks, "basic_block_info"); find_basic_blocks_1 (f); /* Discover the edges of our cfg. */ make_edges (ENTRY_BLOCK_PTR->next_bb, EXIT_BLOCK_PTR->prev_bb, 0); /* Do very simple cleanup now, for the benefit of code that runs between here and cleanup_cfg, e.g. thread_prologue_and_epilogue_insns. */ tidy_fallthru_edges (); #ifdef ENABLE_CHECKING verify_flow_info (); #endif timevar_pop (TV_CFG); } /* State of basic block as seen by find_sub_basic_blocks. */ enum state {BLOCK_NEW = 0, BLOCK_ORIGINAL, BLOCK_TO_SPLIT}; #define STATE(BB) (enum state) ((size_t) (BB)->aux) #define SET_STATE(BB, STATE) ((BB)->aux = (void *) (size_t) (STATE)) /* Scan basic block BB for possible BB boundaries inside the block and create new basic blocks in the progress. */ static void find_bb_boundaries (basic_block bb) { rtx insn = BB_HEAD (bb); rtx end = BB_END (bb); rtx flow_transfer_insn = NULL_RTX; edge fallthru = NULL; if (insn == BB_END (bb)) return; if (GET_CODE (insn) == CODE_LABEL) insn = NEXT_INSN (insn); /* Scan insn chain and try to find new basic block boundaries. */ while (1) { enum rtx_code code = GET_CODE (insn); /* On code label, split current basic block. */ if (code == CODE_LABEL) { fallthru = split_block (bb, PREV_INSN (insn)); if (flow_transfer_insn) BB_END (bb) = flow_transfer_insn; bb = fallthru->dest; remove_edge (fallthru); flow_transfer_insn = NULL_RTX; if (LABEL_ALT_ENTRY_P (insn)) make_edge (ENTRY_BLOCK_PTR, bb, 0); } /* In case we've previously seen an insn that effects a control flow transfer, split the block. */ if (flow_transfer_insn && inside_basic_block_p (insn)) { fallthru = split_block (bb, PREV_INSN (insn)); BB_END (bb) = flow_transfer_insn; bb = fallthru->dest; remove_edge (fallthru); flow_transfer_insn = NULL_RTX; } if (control_flow_insn_p (insn)) flow_transfer_insn = insn; if (insn == end) break; insn = NEXT_INSN (insn); } /* In case expander replaced normal insn by sequence terminating by return and barrier, or possibly other sequence not behaving like ordinary jump, we need to take care and move basic block boundary. */ if (flow_transfer_insn) BB_END (bb) = flow_transfer_insn; /* We've possibly replaced the conditional jump by conditional jump followed by cleanup at fallthru edge, so the outgoing edges may be dead. */ purge_dead_edges (bb); } /* Assume that frequency of basic block B is known. Compute frequencies and probabilities of outgoing edges. */ static void compute_outgoing_frequencies (basic_block b) { edge e, f; if (b->succ && b->succ->succ_next && !b->succ->succ_next->succ_next) { rtx note = find_reg_note (BB_END (b), REG_BR_PROB, NULL); int probability; if (!note) return; probability = INTVAL (XEXP (note, 0)); e = BRANCH_EDGE (b); e->probability = probability; e->count = ((b->count * probability + REG_BR_PROB_BASE / 2) / REG_BR_PROB_BASE); f = FALLTHRU_EDGE (b); f->probability = REG_BR_PROB_BASE - probability; f->count = b->count - e->count; } if (b->succ && !b->succ->succ_next) { e = b->succ; e->probability = REG_BR_PROB_BASE; e->count = b->count; } } /* Assume that someone emitted code with control flow instructions to the basic block. Update the data structure. */ void find_many_sub_basic_blocks (sbitmap blocks) { basic_block bb, min, max; FOR_EACH_BB (bb) SET_STATE (bb, TEST_BIT (blocks, bb->index) ? BLOCK_TO_SPLIT : BLOCK_ORIGINAL); FOR_EACH_BB (bb) if (STATE (bb) == BLOCK_TO_SPLIT) find_bb_boundaries (bb); FOR_EACH_BB (bb) if (STATE (bb) != BLOCK_ORIGINAL) break; min = max = bb; for (; bb != EXIT_BLOCK_PTR; bb = bb->next_bb) if (STATE (bb) != BLOCK_ORIGINAL) max = bb; /* Now re-scan and wire in all edges. This expect simple (conditional) jumps at the end of each new basic blocks. */ make_edges (min, max, 1); /* Update branch probabilities. Expect only (un)conditional jumps to be created with only the forward edges. */ FOR_BB_BETWEEN (bb, min, max->next_bb, next_bb) { edge e; if (STATE (bb) == BLOCK_ORIGINAL) continue; if (STATE (bb) == BLOCK_NEW) { bb->count = 0; bb->frequency = 0; for (e = bb->pred; e; e = e->pred_next) { bb->count += e->count; bb->frequency += EDGE_FREQUENCY (e); } } compute_outgoing_frequencies (bb); } FOR_EACH_BB (bb) SET_STATE (bb, 0); } /* Like above but for single basic block only. */ void find_sub_basic_blocks (basic_block bb) { basic_block min, max, b; basic_block next = bb->next_bb; min = bb; find_bb_boundaries (bb); max = next->prev_bb; /* Now re-scan and wire in all edges. This expect simple (conditional) jumps at the end of each new basic blocks. */ make_edges (min, max, 1); /* Update branch probabilities. Expect only (un)conditional jumps to be created with only the forward edges. */ FOR_BB_BETWEEN (b, min, max->next_bb, next_bb) { edge e; if (b != min) { b->count = 0; b->frequency = 0; for (e = b->pred; e; e = e->pred_next) { b->count += e->count; b->frequency += EDGE_FREQUENCY (e); } } compute_outgoing_frequencies (b); } } /* Control flow optimization code for GNU compiler. Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file contains optimizer of the control flow. The main entry point is cleanup_cfg. Following optimizations are performed: - Unreachable blocks removal - Edge forwarding (edge to the forwarder block is forwarded to its successor. Simplification of the branch instruction is performed by underlying infrastructure so branch can be converted to simplejump or eliminated). - Cross jumping (tail merging) - Conditional jump-around-simplejump simplification - Basic block merging. */ /* cleanup_cfg maintains following flags for each basic block. */ enum bb_flags { /* Set if BB is the forwarder block to avoid too many forwarder_block_p calls. */ BB_FORWARDER_BLOCK = 1, BB_NONTHREADABLE_BLOCK = 2 }; #define BB_FLAGS(BB) (enum bb_flags) (BB)->aux #define BB_SET_FLAG(BB, FLAG) \ (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux | (FLAG)) #define BB_CLEAR_FLAG(BB, FLAG) \ (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux & ~(FLAG)) #define FORWARDER_BLOCK_P(BB) (BB_FLAGS (BB) & BB_FORWARDER_BLOCK) /* Set to true when we are running first pass of try_optimize_cfg loop. */ static bool first_pass; static bool try_crossjump_to_edge (int, edge, edge); static bool try_crossjump_bb (int, basic_block); static bool outgoing_edges_match (int, basic_block, basic_block); static int flow_find_cross_jump (int, basic_block, basic_block, rtx *, rtx *); static bool insns_match_p (int, rtx, rtx); static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block); static void merge_blocks_move_successor_nojumps (basic_block, basic_block); static bool try_optimize_cfg (int); static bool try_simplify_condjump (basic_block); static bool try_forward_edges (int, basic_block); static edge thread_jump (int, edge, basic_block); static bool mark_effect (rtx, bitmap); static void notice_new_block (basic_block); static void update_forwarder_flag (basic_block); static int mentions_nonequal_regs (rtx *, void *); static void merge_memattrs (rtx, rtx); /* Set flags for newly created block. */ static void notice_new_block (basic_block bb) { if (!bb) return; if (forwarder_block_p (bb)) BB_SET_FLAG (bb, BB_FORWARDER_BLOCK); } /* Recompute forwarder flag after block has been modified. */ static void update_forwarder_flag (basic_block bb) { if (forwarder_block_p (bb)) BB_SET_FLAG (bb, BB_FORWARDER_BLOCK); else BB_CLEAR_FLAG (bb, BB_FORWARDER_BLOCK); } /* Simplify a conditional jump around an unconditional jump. Return true if something changed. */ static bool try_simplify_condjump (basic_block cbranch_block) { basic_block jump_block, jump_dest_block, cbranch_dest_block; edge cbranch_jump_edge, cbranch_fallthru_edge; rtx cbranch_insn; rtx insn, next; rtx end; /* Verify that there are exactly two successors. */ if (!cbranch_block->succ || !cbranch_block->succ->succ_next || cbranch_block->succ->succ_next->succ_next) return false; /* Verify that we've got a normal conditional branch at the end of the block. */ cbranch_insn = BB_END (cbranch_block); if (!any_condjump_p (cbranch_insn)) return false; cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block); cbranch_jump_edge = BRANCH_EDGE (cbranch_block); /* The next block must not have multiple predecessors, must not be the last block in the function, and must contain just the unconditional jump. */ jump_block = cbranch_fallthru_edge->dest; if (jump_block->pred->pred_next || jump_block->next_bb == EXIT_BLOCK_PTR || !FORWARDER_BLOCK_P (jump_block)) return false; jump_dest_block = jump_block->succ->dest; /* If we are partitioning hot/cold basic blocks, we don't want to mess up unconditional or indirect jumps that cross between hot and cold sections. */ if (flag_reorder_blocks_and_partition && (jump_block->partition != jump_dest_block->partition || cbranch_jump_edge->crossing_edge)) return false; /* The conditional branch must target the block after the unconditional branch. */ cbranch_dest_block = cbranch_jump_edge->dest; if (cbranch_dest_block == EXIT_BLOCK_PTR || !can_fallthru (jump_block, cbranch_dest_block)) return false; /* Invert the conditional branch. */ if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0)) return false; if (dump_file) fprintf (dump_file, "Simplifying condjump %i around jump %i\n", INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block))); /* Success. Update the CFG to match. Note that after this point the edge variable names appear backwards; the redirection is done this way to preserve edge profile data. */ cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge, cbranch_dest_block); cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge, jump_dest_block); cbranch_jump_edge->flags |= EDGE_FALLTHRU; cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU; update_br_prob_note (cbranch_block); end = BB_END (jump_block); /* Deleting a block may produce unreachable code warning even when we are not deleting anything live. Suppress it by moving all the line number notes out of the block. */ for (insn = BB_HEAD (jump_block); insn != NEXT_INSN (BB_END (jump_block)); insn = next) { next = NEXT_INSN (insn); if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0) { if (insn == BB_END (jump_block)) { BB_END (jump_block) = PREV_INSN (insn); if (insn == end) break; } reorder_insns_nobb (insn, insn, end); end = insn; } } /* Delete the block with the unconditional jump, and clean up the mess. */ delete_basic_block (jump_block); tidy_fallthru_edge (cbranch_jump_edge); return true; } /* Attempt to prove that operation is NOOP using CSElib or mark the effect on register. Used by jump threading. */ static bool mark_effect (rtx exp, regset nonequal) { int regno; rtx dest; switch (GET_CODE (exp)) { /* In case we do clobber the register, mark it as equal, as we know the value is dead so it don't have to match. */ case CLOBBER: if (REG_P (XEXP (exp, 0))) { dest = XEXP (exp, 0); regno = REGNO (dest); CLEAR_REGNO_REG_SET (nonequal, regno); if (regno < FIRST_PSEUDO_REGISTER) { int n = hard_regno_nregs[regno][GET_MODE (dest)]; while (--n > 0) CLEAR_REGNO_REG_SET (nonequal, regno + n); } } return false; case SET: if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp))) return false; dest = SET_DEST (exp); if (dest == pc_rtx) return false; if (!REG_P (dest)) return true; regno = REGNO (dest); SET_REGNO_REG_SET (nonequal, regno); if (regno < FIRST_PSEUDO_REGISTER) { int n = hard_regno_nregs[regno][GET_MODE (dest)]; while (--n > 0) SET_REGNO_REG_SET (nonequal, regno + n); } return false; default: return false; } } /* Return nonzero if X is a register set in regset DATA. Called via for_each_rtx. */ static int mentions_nonequal_regs (rtx *x, void *data) { regset nonequal = (regset) data; if (REG_P (*x)) { int regno; regno = REGNO (*x); if (REGNO_REG_SET_P (nonequal, regno)) return 1; if (regno < FIRST_PSEUDO_REGISTER) { int n = hard_regno_nregs[regno][GET_MODE (*x)]; while (--n > 0) if (REGNO_REG_SET_P (nonequal, regno + n)) return 1; } } return 0; } /* Attempt to prove that the basic block B will have no side effects and always continues in the same edge if reached via E. Return the edge if exist, NULL otherwise. */ static edge thread_jump (int mode, edge e, basic_block b) { rtx set1, set2, cond1, cond2, insn; enum rtx_code code1, code2, reversed_code2; bool reverse1 = false; int i; regset nonequal; bool failed = false; if (BB_FLAGS (b) & BB_NONTHREADABLE_BLOCK) return NULL; /* At the moment, we do handle only conditional jumps, but later we may want to extend this code to tablejumps and others. */ if (!e->src->succ->succ_next || e->src->succ->succ_next->succ_next) return NULL; if (!b->succ || !b->succ->succ_next || b->succ->succ_next->succ_next) { BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK); return NULL; } /* Second branch must end with onlyjump, as we will eliminate the jump. */ if (!any_condjump_p (BB_END (e->src))) return NULL; if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b))) { BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK); return NULL; } set1 = pc_set (BB_END (e->src)); set2 = pc_set (BB_END (b)); if (((e->flags & EDGE_FALLTHRU) != 0) != (XEXP (SET_SRC (set1), 1) == pc_rtx)) reverse1 = true; cond1 = XEXP (SET_SRC (set1), 0); cond2 = XEXP (SET_SRC (set2), 0); if (reverse1) code1 = reversed_comparison_code (cond1, BB_END (e->src)); else code1 = GET_CODE (cond1); code2 = GET_CODE (cond2); reversed_code2 = reversed_comparison_code (cond2, BB_END (b)); if (!comparison_dominates_p (code1, code2) && !comparison_dominates_p (code1, reversed_code2)) return NULL; /* Ensure that the comparison operators are equivalent. ??? This is far too pessimistic. We should allow swapped operands, different CCmodes, or for example comparisons for interval, that dominate even when operands are not equivalent. */ if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0)) || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1))) return NULL; /* Short circuit cases where block B contains some side effects, as we can't safely bypass it. */ for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b)); insn = NEXT_INSN (insn)) if (INSN_P (insn) && side_effects_p (PATTERN (insn))) { BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK); return NULL; } cselib_init (false); /* First process all values computed in the source basic block. */ for (insn = NEXT_INSN (BB_HEAD (e->src)); insn != NEXT_INSN (BB_END (e->src)); insn = NEXT_INSN (insn)) if (INSN_P (insn)) cselib_process_insn (insn); nonequal = BITMAP_XMALLOC(); CLEAR_REG_SET (nonequal); /* Now assume that we've continued by the edge E to B and continue processing as if it were same basic block. Our goal is to prove that whole block is an NOOP. */ for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b)) && !failed; insn = NEXT_INSN (insn)) { if (INSN_P (insn)) { rtx pat = PATTERN (insn); if (GET_CODE (pat) == PARALLEL) { for (i = 0; i < XVECLEN (pat, 0); i++) failed |= mark_effect (XVECEXP (pat, 0, i), nonequal); } else failed |= mark_effect (pat, nonequal); } cselib_process_insn (insn); } /* Later we should clear nonequal of dead registers. So far we don't have life information in cfg_cleanup. */ if (failed) { BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK); goto failed_exit; } /* cond2 must not mention any register that is not equal to the former block. */ if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal)) goto failed_exit; /* In case liveness information is available, we need to prove equivalence only of the live values. */ if (mode & CLEANUP_UPDATE_LIFE) AND_REG_SET (nonequal, b->global_live_at_end); EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, goto failed_exit;); BITMAP_XFREE (nonequal); cselib_finish (); if ((comparison_dominates_p (code1, code2) != 0) != (XEXP (SET_SRC (set2), 1) == pc_rtx)) return BRANCH_EDGE (b); else return FALLTHRU_EDGE (b); failed_exit: BITMAP_XFREE (nonequal); cselib_finish (); return NULL; } /* Attempt to forward edges leaving basic block B. Return true if successful. */ static bool try_forward_edges (int mode, basic_block b) { bool changed = false; edge e, next, *threaded_edges = NULL; /* If we are partitioning hot/cold basic blocks, we don't want to mess up unconditional or indirect jumps that cross between hot and cold sections. */ if (flag_reorder_blocks_and_partition && find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)) return false; for (e = b->succ; e; e = next) { basic_block target, first; int counter; bool threaded = false; int nthreaded_edges = 0; bool may_thread = first_pass | (b->flags & BB_DIRTY); next = e->succ_next; /* Skip complex edges because we don't know how to update them. Still handle fallthru edges, as we can succeed to forward fallthru edge to the same place as the branch edge of conditional branch and turn conditional branch to an unconditional branch. */ if (e->flags & EDGE_COMPLEX) continue; target = first = e->dest; counter = 0; while (counter < n_basic_blocks) { basic_block new_target = NULL; bool new_target_threaded = false; may_thread |= target->flags & BB_DIRTY; if (FORWARDER_BLOCK_P (target) && target->succ->dest != EXIT_BLOCK_PTR) { /* Bypass trivial infinite loops. */ if (target == target->succ->dest) counter = n_basic_blocks; new_target = target->succ->dest; } /* Allow to thread only over one edge at time to simplify updating of probabilities. */ else if ((mode & CLEANUP_THREADING) && may_thread) { edge t = thread_jump (mode, e, target); if (t) { if (!threaded_edges) threaded_edges = xmalloc (sizeof (*threaded_edges) * n_basic_blocks); else { int i; /* Detect an infinite loop across blocks not including the start block. */ for (i = 0; i < nthreaded_edges; ++i) if (threaded_edges[i] == t) break; if (i < nthreaded_edges) { counter = n_basic_blocks; break; } } /* Detect an infinite loop across the start block. */ if (t->dest == b) break; if (nthreaded_edges >= n_basic_blocks) abort (); threaded_edges[nthreaded_edges++] = t; new_target = t->dest; new_target_threaded = true; } } if (!new_target) break; /* Avoid killing of loop pre-headers, as it is the place loop optimizer wants to hoist code to. For fallthru forwarders, the LOOP_BEG note must appear between the header of block and CODE_LABEL of the loop, for non forwarders it must appear before the JUMP_INSN. */ if ((mode & CLEANUP_PRE_LOOP) && optimize) { rtx insn = (target->succ->flags & EDGE_FALLTHRU ? BB_HEAD (target) : prev_nonnote_insn (BB_END (target))); if (GET_CODE (insn) != NOTE) insn = NEXT_INSN (insn); for (; insn && GET_CODE (insn) != CODE_LABEL && !INSN_P (insn); insn = NEXT_INSN (insn)) if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG) break; if (GET_CODE (insn) == NOTE) break; /* Do not clean up branches to just past the end of a loop at this time; it can mess up the loop optimizer's recognition of some patterns. */ insn = PREV_INSN (BB_HEAD (target)); if (insn && GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END) break; } counter++; target = new_target; threaded |= new_target_threaded; } if (counter >= n_basic_blocks) { if (dump_file) fprintf (dump_file, "Infinite loop in BB %i.\n", target->index); } else if (target == first) ; /* We didn't do anything. */ else { /* Save the values now, as the edge may get removed. */ gcov_type edge_count = e->count; int edge_probability = e->probability; int edge_frequency; int n = 0; /* Don't force if target is exit block. */ if (threaded && target != EXIT_BLOCK_PTR) { notice_new_block (redirect_edge_and_branch_force (e, target)); if (dump_file) fprintf (dump_file, "Conditionals threaded.\n"); } else if (!redirect_edge_and_branch (e, target)) { if (dump_file) fprintf (dump_file, "Forwarding edge %i->%i to %i failed.\n", b->index, e->dest->index, target->index); continue; } /* We successfully forwarded the edge. Now update profile data: for each edge we traversed in the chain, remove the original edge's execution count. */ edge_frequency = ((edge_probability * b->frequency + REG_BR_PROB_BASE / 2) / REG_BR_PROB_BASE); if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b)) BB_SET_FLAG (b, BB_FORWARDER_BLOCK); do { edge t; first->count -= edge_count; if (first->count < 0) first->count = 0; first->frequency -= edge_frequency; if (first->frequency < 0) first->frequency = 0; if (first->succ->succ_next) { edge e; int prob; if (n >= nthreaded_edges) abort (); t = threaded_edges [n++]; if (t->src != first) abort (); if (first->frequency) prob = edge_frequency * REG_BR_PROB_BASE / first->frequency; else prob = 0; if (prob > t->probability) prob = t->probability; t->probability -= prob; prob = REG_BR_PROB_BASE - prob; if (prob <= 0) { first->succ->probability = REG_BR_PROB_BASE; first->succ->succ_next->probability = 0; } else for (e = first->succ; e; e = e->succ_next) e->probability = ((e->probability * REG_BR_PROB_BASE) / (double) prob); update_br_prob_note (first); } else { /* It is possible that as the result of threading we've removed edge as it is threaded to the fallthru edge. Avoid getting out of sync. */ if (n < nthreaded_edges && first == threaded_edges [n]->src) n++; t = first->succ; } t->count -= edge_count; if (t->count < 0) t->count = 0; first = t->dest; } while (first != target); changed = true; } } if (threaded_edges) free (threaded_edges); return changed; } /* Blocks A and B are to be merged into a single block. A has no incoming fallthru edge, so it can be moved before B without adding or modifying any jumps (aside from the jump from A to B). */ static void merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b) { rtx barrier; /* If we are partitioning hot/cold basic blocks, we don't want to mess up unconditional or indirect jumps that cross between hot and cold sections. */ if (flag_reorder_blocks_and_partition && (a->partition != b->partition || find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX))) return; barrier = next_nonnote_insn (BB_END (a)); if (GET_CODE (barrier) != BARRIER) abort (); delete_insn (barrier); /* Move block and loop notes out of the chain so that we do not disturb their order. ??? A better solution would be to squeeze out all the non-nested notes and adjust the block trees appropriately. Even better would be to have a tighter connection between block trees and rtl so that this is not necessary. */ if (squeeze_notes (&BB_HEAD (a), &BB_END (a))) abort (); /* Scramble the insn chain. */ if (BB_END (a) != PREV_INSN (BB_HEAD (b))) reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b))); a->flags |= BB_DIRTY; if (dump_file) fprintf (dump_file, "Moved block %d before %d and merged.\n", a->index, b->index); /* Swap the records for the two blocks around. */ unlink_block (a); link_block (a, b->prev_bb); /* Now blocks A and B are contiguous. Merge them. */ merge_blocks (a, b); } /* Blocks A and B are to be merged into a single block. B has no outgoing fallthru edge, so it can be moved after A without adding or modifying any jumps (aside from the jump from A to B). */ static void merge_blocks_move_successor_nojumps (basic_block a, basic_block b) { rtx barrier, real_b_end; rtx label, table; /* If we are partitioning hot/cold basic blocks, we don't want to mess up unconditional or indirect jumps that cross between hot and cold sections. */ if (flag_reorder_blocks_and_partition && (find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX) || a->partition != b->partition)) return; real_b_end = BB_END (b); /* If there is a jump table following block B temporarily add the jump table to block B so that it will also be moved to the correct location. */ if (tablejump_p (BB_END (b), &label, &table) && prev_active_insn (label) == BB_END (b)) { BB_END (b) = table; } /* There had better have been a barrier there. Delete it. */ barrier = NEXT_INSN (BB_END (b)); if (barrier && GET_CODE (barrier) == BARRIER) delete_insn (barrier); /* Move block and loop notes out of the chain so that we do not disturb their order. ??? A better solution would be to squeeze out all the non-nested notes and adjust the block trees appropriately. Even better would be to have a tighter connection between block trees and rtl so that this is not necessary. */ if (squeeze_notes (&BB_HEAD (b), &BB_END (b))) abort (); /* Scramble the insn chain. */ reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a)); /* Restore the real end of b. */ BB_END (b) = real_b_end; if (dump_file) fprintf (dump_file, "Moved block %d after %d and merged.\n", b->index, a->index); /* Now blocks A and B are contiguous. Merge them. */ merge_blocks (a, b); } /* Attempt to merge basic blocks that are potentially non-adjacent. Return NULL iff the attempt failed, otherwise return basic block where cleanup_cfg should continue. Because the merging commonly moves basic block away or introduces another optimization possibility, return basic block just before B so cleanup_cfg don't need to iterate. It may be good idea to return basic block before C in the case C has been moved after B and originally appeared earlier in the insn sequence, but we have no information available about the relative ordering of these two. Hopefully it is not too common. */ static basic_block merge_blocks_move (edge e, basic_block b, basic_block c, int mode) { basic_block next; /* If we are partitioning hot/cold basic blocks, we don't want to mess up unconditional or indirect jumps that cross between hot and cold sections. */ if (flag_reorder_blocks_and_partition && (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX) || find_reg_note (BB_END (c), REG_CROSSING_JUMP, NULL_RTX) || b->partition != c->partition)) return NULL; /* If B has a fallthru edge to C, no need to move anything. */ if (e->flags & EDGE_FALLTHRU) { int b_index = b->index, c_index = c->index; merge_blocks (b, c); update_forwarder_flag (b); if (dump_file) fprintf (dump_file, "Merged %d and %d without moving.\n", b_index, c_index); return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb; } /* Otherwise we will need to move code around. Do that only if expensive transformations are allowed. */ else if (mode & CLEANUP_EXPENSIVE) { edge tmp_edge, b_fallthru_edge; bool c_has_outgoing_fallthru; bool b_has_incoming_fallthru; /* Avoid overactive code motion, as the forwarder blocks should be eliminated by edge redirection instead. One exception might have been if B is a forwarder block and C has no fallthru edge, but that should be cleaned up by bb-reorder instead. */ if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c)) return NULL; /* We must make sure to not munge nesting of lexical blocks, and loop notes. This is done by squeezing out all the notes and leaving them there to lie. Not ideal, but functional. */ for (tmp_edge = c->succ; tmp_edge; tmp_edge = tmp_edge->succ_next) if (tmp_edge->flags & EDGE_FALLTHRU) break; c_has_outgoing_fallthru = (tmp_edge != NULL); for (tmp_edge = b->pred; tmp_edge; tmp_edge = tmp_edge->pred_next) if (tmp_edge->flags & EDGE_FALLTHRU) break; b_has_incoming_fallthru = (tmp_edge != NULL); b_fallthru_edge = tmp_edge; next = b->prev_bb; if (next == c) next = next->prev_bb; /* Otherwise, we're going to try to move C after B. If C does not have an outgoing fallthru, then it can be moved immediately after B without introducing or modifying jumps. */ if (! c_has_outgoing_fallthru) { merge_blocks_move_successor_nojumps (b, c); return next == ENTRY_BLOCK_PTR ? next->next_bb : next; } /* If B does not have an incoming fallthru, then it can be moved immediately before C without introducing or modifying jumps. C cannot be the first block, so we do not have to worry about accessing a non-existent block. */ if (b_has_incoming_fallthru) { basic_block bb; if (b_fallthru_edge->src == ENTRY_BLOCK_PTR) return NULL; bb = force_nonfallthru (b_fallthru_edge); if (bb) notice_new_block (bb); } merge_blocks_move_predecessor_nojumps (b, c); return next == ENTRY_BLOCK_PTR ? next->next_bb : next; } return NULL; } /* Removes the memory attributes of MEM expression if they are not equal. */ void merge_memattrs (rtx x, rtx y) { int i; int j; enum rtx_code code; const char *fmt; if (x == y) return; if (x == 0 || y == 0) return; code = GET_CODE (x); if (code != GET_CODE (y)) return; if (GET_MODE (x) != GET_MODE (y)) return; if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y)) { if (! MEM_ATTRS (x)) MEM_ATTRS (y) = 0; else if (! MEM_ATTRS (y)) MEM_ATTRS (x) = 0; else { if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y)) { set_mem_alias_set (x, 0); set_mem_alias_set (y, 0); } if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y))) { set_mem_expr (x, 0); set_mem_expr (y, 0); set_mem_offset (x, 0); set_mem_offset (y, 0); } else if (MEM_OFFSET (x) != MEM_OFFSET (y)) { set_mem_offset (x, 0); set_mem_offset (y, 0); } set_mem_size (x, MAX (MEM_SIZE (x), MEM_SIZE (y))); set_mem_size (y, MEM_SIZE (x)); set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y))); set_mem_align (y, MEM_ALIGN (x)); } } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { switch (fmt[i]) { case 'E': /* Two vectors must have the same length. */ if (XVECLEN (x, i) != XVECLEN (y, i)) return; for (j = 0; j < XVECLEN (x, i); j++) merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j)); break; case 'e': merge_memattrs (XEXP (x, i), XEXP (y, i)); } } return; } /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */ static bool insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2) { rtx p1, p2; /* Verify that I1 and I2 are equivalent. */ if (GET_CODE (i1) != GET_CODE (i2)) return false; p1 = PATTERN (i1); p2 = PATTERN (i2); if (GET_CODE (p1) != GET_CODE (p2)) return false; /* If this is a CALL_INSN, compare register usage information. If we don't check this on stack register machines, the two CALL_INSNs might be merged leaving reg-stack.c with mismatching numbers of stack registers in the same basic block. If we don't check this on machines with delay slots, a delay slot may be filled that clobbers a parameter expected by the subroutine. ??? We take the simple route for now and assume that if they're equal, they were constructed identically. */ if (GET_CODE (i1) == CALL_INSN && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1), CALL_INSN_FUNCTION_USAGE (i2)) || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2))) return false; #ifdef STACK_REGS /* If cross_jump_death_matters is not 0, the insn's mode indicates whether or not the insn contains any stack-like regs. */ if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1)) { /* If register stack conversion has already been done, then death notes must also be compared before it is certain that the two instruction streams match. */ rtx note; HARD_REG_SET i1_regset, i2_regset; CLEAR_HARD_REG_SET (i1_regset); CLEAR_HARD_REG_SET (i2_regset); for (note = REG_NOTES (i1); note; note = XEXP (note, 1)) if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0))) SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0))); for (note = REG_NOTES (i2); note; note = XEXP (note, 1)) if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0))) SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0))); GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done); return false; done: ; } #endif if (reload_completed ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2)) return true; /* Do not do EQUIV substitution after reload. First, we're undoing the work of reload_cse. Second, we may be undoing the work of the post- reload splitting pass. */ /* ??? Possibly add a new phase switch variable that can be used by targets to disallow the troublesome insns after splitting. */ if (!reload_completed) { /* The following code helps take care of G++ cleanups. */ rtx equiv1 = find_reg_equal_equiv_note (i1); rtx equiv2 = find_reg_equal_equiv_note (i2); if (equiv1 && equiv2 /* If the equivalences are not to a constant, they may reference pseudos that no longer exist, so we can't use them. */ && (! reload_completed || (CONSTANT_P (XEXP (equiv1, 0)) && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0))))) { rtx s1 = single_set (i1); rtx s2 = single_set (i2); if (s1 != 0 && s2 != 0 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2))) { validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1); validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1); if (! rtx_renumbered_equal_p (p1, p2)) cancel_changes (0); else if (apply_change_group ()) return true; } } } return false; } /* Look through the insns at the end of BB1 and BB2 and find the longest sequence that are equivalent. Store the first insns for that sequence in *F1 and *F2 and return the sequence length. To simplify callers of this function, if the blocks match exactly, store the head of the blocks in *F1 and *F2. */ static int flow_find_cross_jump (int mode ATTRIBUTE_UNUSED, basic_block bb1, basic_block bb2, rtx *f1, rtx *f2) { rtx i1, i2, last1, last2, afterlast1, afterlast2; int ninsns = 0; /* Skip simple jumps at the end of the blocks. Complex jumps still need to be compared for equivalence, which we'll do below. */ i1 = BB_END (bb1); last1 = afterlast1 = last2 = afterlast2 = NULL_RTX; if (onlyjump_p (i1) || (returnjump_p (i1) && !side_effects_p (PATTERN (i1)))) { last1 = i1; i1 = PREV_INSN (i1); } i2 = BB_END (bb2); if (onlyjump_p (i2) || (returnjump_p (i2) && !side_effects_p (PATTERN (i2)))) { last2 = i2; /* Count everything except for unconditional jump as insn. */ if (!simplejump_p (i2) && !returnjump_p (i2) && last1) ninsns++; i2 = PREV_INSN (i2); } while (true) { /* Ignore notes. */ while (!INSN_P (i1) && i1 != BB_HEAD (bb1)) i1 = PREV_INSN (i1); while (!INSN_P (i2) && i2 != BB_HEAD (bb2)) i2 = PREV_INSN (i2); if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2)) break; if (!insns_match_p (mode, i1, i2)) break; merge_memattrs (i1, i2); /* Don't begin a cross-jump with a NOTE insn. */ if (INSN_P (i1)) { /* If the merged insns have different REG_EQUAL notes, then remove them. */ rtx equiv1 = find_reg_equal_equiv_note (i1); rtx equiv2 = find_reg_equal_equiv_note (i2); if (equiv1 && !equiv2) remove_note (i1, equiv1); else if (!equiv1 && equiv2) remove_note (i2, equiv2); else if (equiv1 && equiv2 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0))) { remove_note (i1, equiv1); remove_note (i2, equiv2); } afterlast1 = last1, afterlast2 = last2; last1 = i1, last2 = i2; ninsns++; } i1 = PREV_INSN (i1); i2 = PREV_INSN (i2); } #ifdef HAVE_cc0 /* Don't allow the insn after a compare to be shared by cross-jumping unless the compare is also shared. */ if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1)) last1 = afterlast1, last2 = afterlast2, ninsns--; #endif /* Include preceding notes and labels in the cross-jump. One, this may bring us to the head of the blocks as requested above. Two, it keeps line number notes as matched as may be. */ if (ninsns) { while (last1 != BB_HEAD (bb1) && !INSN_P (PREV_INSN (last1))) last1 = PREV_INSN (last1); if (last1 != BB_HEAD (bb1) && GET_CODE (PREV_INSN (last1)) == CODE_LABEL) last1 = PREV_INSN (last1); while (last2 != BB_HEAD (bb2) && !INSN_P (PREV_INSN (last2))) last2 = PREV_INSN (last2); if (last2 != BB_HEAD (bb2) && GET_CODE (PREV_INSN (last2)) == CODE_LABEL) last2 = PREV_INSN (last2); *f1 = last1; *f2 = last2; } return ninsns; } /* Return true iff outgoing edges of BB1 and BB2 match, together with the branch instruction. This means that if we commonize the control flow before end of the basic block, the semantic remains unchanged. We may assume that there exists one edge with a common destination. */ static bool outgoing_edges_match (int mode, basic_block bb1, basic_block bb2) { int nehedges1 = 0, nehedges2 = 0; edge fallthru1 = 0, fallthru2 = 0; edge e1, e2; /* If BB1 has only one successor, we may be looking at either an unconditional jump, or a fake edge to exit. */ if (bb1->succ && !bb1->succ->succ_next && (bb1->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0 && (GET_CODE (BB_END (bb1)) != JUMP_INSN || simplejump_p (BB_END (bb1)))) return (bb2->succ && !bb2->succ->succ_next && (bb2->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0 && (GET_CODE (BB_END (bb2)) != JUMP_INSN || simplejump_p (BB_END (bb2)))); /* Match conditional jumps - this may get tricky when fallthru and branch edges are crossed. */ if (bb1->succ && bb1->succ->succ_next && !bb1->succ->succ_next->succ_next && any_condjump_p (BB_END (bb1)) && onlyjump_p (BB_END (bb1))) { edge b1, f1, b2, f2; bool reverse, match; rtx set1, set2, cond1, cond2; enum rtx_code code1, code2; if (!bb2->succ || !bb2->succ->succ_next || bb2->succ->succ_next->succ_next || !any_condjump_p (BB_END (bb2)) || !onlyjump_p (BB_END (bb2))) return false; b1 = BRANCH_EDGE (bb1); b2 = BRANCH_EDGE (bb2); f1 = FALLTHRU_EDGE (bb1); f2 = FALLTHRU_EDGE (bb2); /* Get around possible forwarders on fallthru edges. Other cases should be optimized out already. */ if (FORWARDER_BLOCK_P (f1->dest)) f1 = f1->dest->succ; if (FORWARDER_BLOCK_P (f2->dest)) f2 = f2->dest->succ; /* To simplify use of this function, return false if there are unneeded forwarder blocks. These will get eliminated later during cleanup_cfg. */ if (FORWARDER_BLOCK_P (f1->dest) || FORWARDER_BLOCK_P (f2->dest) || FORWARDER_BLOCK_P (b1->dest) || FORWARDER_BLOCK_P (b2->dest)) return false; if (f1->dest == f2->dest && b1->dest == b2->dest) reverse = false; else if (f1->dest == b2->dest && b1->dest == f2->dest) reverse = true; else return false; set1 = pc_set (BB_END (bb1)); set2 = pc_set (BB_END (bb2)); if ((XEXP (SET_SRC (set1), 1) == pc_rtx) != (XEXP (SET_SRC (set2), 1) == pc_rtx)) reverse = !reverse; cond1 = XEXP (SET_SRC (set1), 0); cond2 = XEXP (SET_SRC (set2), 0); code1 = GET_CODE (cond1); if (reverse) code2 = reversed_comparison_code (cond2, BB_END (bb2)); else code2 = GET_CODE (cond2); if (code2 == UNKNOWN) return false; /* Verify codes and operands match. */ match = ((code1 == code2 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0)) && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1))) || (code1 == swap_condition (code2) && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 0)) && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 1)))); /* If we return true, we will join the blocks. Which means that we will only have one branch prediction bit to work with. Thus we require the existing branches to have probabilities that are roughly similar. */ if (match && !optimize_size && maybe_hot_bb_p (bb1) && maybe_hot_bb_p (bb2)) { int prob2; if (b1->dest == b2->dest) prob2 = b2->probability; else /* Do not use f2 probability as f2 may be forwarded. */ prob2 = REG_BR_PROB_BASE - b2->probability; /* Fail if the difference in probabilities is greater than 50%. This rules out two well-predicted branches with opposite outcomes. */ if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2) { if (dump_file) fprintf (dump_file, "Outcomes of branch in bb %i and %i differs to much (%i %i)\n", bb1->index, bb2->index, b1->probability, prob2); return false; } } if (dump_file && match) fprintf (dump_file, "Conditionals in bb %i and %i match.\n", bb1->index, bb2->index); return match; } /* Generic case - we are seeing a computed jump, table jump or trapping instruction. */ #ifndef CASE_DROPS_THROUGH /* Check whether there are tablejumps in the end of BB1 and BB2. Return true if they are identical. */ { rtx label1, label2; rtx table1, table2; if (tablejump_p (BB_END (bb1), &label1, &table1) && tablejump_p (BB_END (bb2), &label2, &table2) && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2))) { /* The labels should never be the same rtx. If they really are same the jump tables are same too. So disable crossjumping of blocks BB1 and BB2 because when deleting the common insns in the end of BB1 by delete_basic_block () the jump table would be deleted too. */ /* If LABEL2 is referenced in BB1->END do not do anything because we would loose information when replacing LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */ if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1))) { /* Set IDENTICAL to true when the tables are identical. */ bool identical = false; rtx p1, p2; p1 = PATTERN (table1); p2 = PATTERN (table2); if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2)) { identical = true; } else if (GET_CODE (p1) == ADDR_DIFF_VEC && (XVECLEN (p1, 1) == XVECLEN (p2, 1)) && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2)) && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3))) { int i; identical = true; for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--) if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i))) identical = false; } if (identical) { replace_label_data rr; bool match; /* Temporarily replace references to LABEL1 with LABEL2 in BB1->END so that we could compare the instructions. */ rr.r1 = label1; rr.r2 = label2; rr.update_label_nuses = false; for_each_rtx (&BB_END (bb1), replace_label, &rr); match = insns_match_p (mode, BB_END (bb1), BB_END (bb2)); if (dump_file && match) fprintf (dump_file, "Tablejumps in bb %i and %i match.\n", bb1->index, bb2->index); /* Set the original label in BB1->END because when deleting a block whose end is a tablejump, the tablejump referenced from the instruction is deleted too. */ rr.r1 = label2; rr.r2 = label1; for_each_rtx (&BB_END (bb1), replace_label, &rr); return match; } } return false; } } #endif /* First ensure that the instructions match. There may be many outgoing edges so this test is generally cheaper. */ if (!insns_match_p (mode, BB_END (bb1), BB_END (bb2))) return false; /* Search the outgoing edges, ensure that the counts do match, find possible fallthru and exception handling edges since these needs more validation. */ for (e1 = bb1->succ, e2 = bb2->succ; e1 && e2; e1 = e1->succ_next, e2 = e2->succ_next) { if (e1->flags & EDGE_EH) nehedges1++; if (e2->flags & EDGE_EH) nehedges2++; if (e1->flags & EDGE_FALLTHRU) fallthru1 = e1; if (e2->flags & EDGE_FALLTHRU) fallthru2 = e2; } /* If number of edges of various types does not match, fail. */ if (e1 || e2 || nehedges1 != nehedges2 || (fallthru1 != 0) != (fallthru2 != 0)) return false; /* fallthru edges must be forwarded to the same destination. */ if (fallthru1) { basic_block d1 = (forwarder_block_p (fallthru1->dest) ? fallthru1->dest->succ->dest: fallthru1->dest); basic_block d2 = (forwarder_block_p (fallthru2->dest) ? fallthru2->dest->succ->dest: fallthru2->dest); if (d1 != d2) return false; } /* Ensure the same EH region. */ { rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0); rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0); if (!n1 && n2) return false; if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0))) return false; } /* We don't need to match the rest of edges as above checks should be enough to ensure that they are equivalent. */ return true; } /* E1 and E2 are edges with the same destination block. Search their predecessors for common code. If found, redirect control flow from (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */ static bool try_crossjump_to_edge (int mode, edge e1, edge e2) { int nmatch; basic_block src1 = e1->src, src2 = e2->src; basic_block redirect_to, redirect_from, to_remove; rtx newpos1, newpos2; edge s; newpos1 = newpos2 = NULL_RTX; /* If we have partitioned hot/cold basic blocks, it is a bad idea to try this optimization. */ if (flag_reorder_blocks_and_partition && no_new_pseudos) return false; /* Search backward through forwarder blocks. We don't need to worry about multiple entry or chained forwarders, as they will be optimized away. We do this to look past the unconditional jump following a conditional jump that is required due to the current CFG shape. */ if (src1->pred && !src1->pred->pred_next && FORWARDER_BLOCK_P (src1)) e1 = src1->pred, src1 = e1->src; if (src2->pred && !src2->pred->pred_next && FORWARDER_BLOCK_P (src2)) e2 = src2->pred, src2 = e2->src; /* Nothing to do if we reach ENTRY, or a common source block. */ if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR) return false; if (src1 == src2) return false; /* Seeing more than 1 forwarder blocks would confuse us later... */ if (FORWARDER_BLOCK_P (e1->dest) && FORWARDER_BLOCK_P (e1->dest->succ->dest)) return false; if (FORWARDER_BLOCK_P (e2->dest) && FORWARDER_BLOCK_P (e2->dest->succ->dest)) return false; /* Likewise with dead code (possibly newly created by the other optimizations of cfg_cleanup). */ if (!src1->pred || !src2->pred) return false; /* Look for the common insn sequence, part the first ... */ if (!outgoing_edges_match (mode, src1, src2)) return false; /* ... and part the second. */ nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2); if (!nmatch) return false; #ifndef CASE_DROPS_THROUGH /* Here we know that the insns in the end of SRC1 which are common with SRC2 will be deleted. If we have tablejumps in the end of SRC1 and SRC2 they have been already compared for equivalence in outgoing_edges_match () so replace the references to TABLE1 by references to TABLE2. */ { rtx label1, label2; rtx table1, table2; if (tablejump_p (BB_END (src1), &label1, &table1) && tablejump_p (BB_END (src2), &label2, &table2) && label1 != label2) { replace_label_data rr; rtx insn; /* Replace references to LABEL1 with LABEL2. */ rr.r1 = label1; rr.r2 = label2; rr.update_label_nuses = true; for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) { /* Do not replace the label in SRC1->END because when deleting a block whose end is a tablejump, the tablejump referenced from the instruction is deleted too. */ if (insn != BB_END (src1)) for_each_rtx (&insn, replace_label, &rr); } } } #endif /* Avoid splitting if possible. */ if (newpos2 == BB_HEAD (src2)) redirect_to = src2; else { if (dump_file) fprintf (dump_file, "Splitting bb %i before %i insns\n", src2->index, nmatch); redirect_to = split_block (src2, PREV_INSN (newpos2))->dest; } if (dump_file) fprintf (dump_file, "Cross jumping from bb %i to bb %i; %i common insns\n", src1->index, src2->index, nmatch); redirect_to->count += src1->count; redirect_to->frequency += src1->frequency; /* We may have some registers visible trought the block. */ redirect_to->flags |= BB_DIRTY; /* Recompute the frequencies and counts of outgoing edges. */ for (s = redirect_to->succ; s; s = s->succ_next) { edge s2; basic_block d = s->dest; if (FORWARDER_BLOCK_P (d)) d = d->succ->dest; for (s2 = src1->succ; ; s2 = s2->succ_next) { basic_block d2 = s2->dest; if (FORWARDER_BLOCK_P (d2)) d2 = d2->succ->dest; if (d == d2) break; } s->count += s2->count; /* Take care to update possible forwarder blocks. We verified that there is no more than one in the chain, so we can't run into infinite loop. */ if (FORWARDER_BLOCK_P (s->dest)) { s->dest->succ->count += s2->count; s->dest->count += s2->count; s->dest->frequency += EDGE_FREQUENCY (s); } if (FORWARDER_BLOCK_P (s2->dest)) { s2->dest->succ->count -= s2->count; if (s2->dest->succ->count < 0) s2->dest->succ->count = 0; s2->dest->count -= s2->count; s2->dest->frequency -= EDGE_FREQUENCY (s); if (s2->dest->frequency < 0) s2->dest->frequency = 0; if (s2->dest->count < 0) s2->dest->count = 0; } if (!redirect_to->frequency && !src1->frequency) s->probability = (s->probability + s2->probability) / 2; else s->probability = ((s->probability * redirect_to->frequency + s2->probability * src1->frequency) / (redirect_to->frequency + src1->frequency)); } update_br_prob_note (redirect_to); /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */ /* Skip possible basic block header. */ if (GET_CODE (newpos1) == CODE_LABEL) newpos1 = NEXT_INSN (newpos1); if (GET_CODE (newpos1) == NOTE) newpos1 = NEXT_INSN (newpos1); redirect_from = split_block (src1, PREV_INSN (newpos1))->src; to_remove = redirect_from->succ->dest; redirect_edge_and_branch_force (redirect_from->succ, redirect_to); delete_basic_block (to_remove); update_forwarder_flag (redirect_from); return true; } /* Search the predecessors of BB for common insn sequences. When found, share code between them by redirecting control flow. Return true if any changes made. */ static bool try_crossjump_bb (int mode, basic_block bb) { edge e, e2, nexte2, nexte, fallthru; bool changed; int n = 0, max; /* Nothing to do if there is not at least two incoming edges. */ if (!bb->pred || !bb->pred->pred_next) return false; /* If we are partitioning hot/cold basic blocks, we don't want to mess up unconditional or indirect jumps that cross between hot and cold sections. */ if (flag_reorder_blocks_and_partition && (bb->pred->src->partition != bb->pred->pred_next->src->partition || bb->pred->crossing_edge)) return false; /* It is always cheapest to redirect a block that ends in a branch to a block that falls through into BB, as that adds no branches to the program. We'll try that combination first. */ fallthru = NULL; max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES); for (e = bb->pred; e ; e = e->pred_next, n++) { if (e->flags & EDGE_FALLTHRU) fallthru = e; if (n > max) return false; } changed = false; for (e = bb->pred; e; e = nexte) { nexte = e->pred_next; /* As noted above, first try with the fallthru predecessor. */ if (fallthru) { /* Don't combine the fallthru edge into anything else. If there is a match, we'll do it the other way around. */ if (e == fallthru) continue; /* If nothing changed since the last attempt, there is nothing we can do. */ if (!first_pass && (!(e->src->flags & BB_DIRTY) && !(fallthru->src->flags & BB_DIRTY))) continue; if (try_crossjump_to_edge (mode, e, fallthru)) { changed = true; nexte = bb->pred; continue; } } /* Non-obvious work limiting check: Recognize that we're going to call try_crossjump_bb on every basic block. So if we have two blocks with lots of outgoing edges (a switch) and they share lots of common destinations, then we would do the cross-jump check once for each common destination. Now, if the blocks actually are cross-jump candidates, then all of their destinations will be shared. Which means that we only need check them for cross-jump candidacy once. We can eliminate redundant checks of crossjump(A,B) by arbitrarily choosing to do the check from the block for which the edge in question is the first successor of A. */ if (e->src->succ != e) continue; for (e2 = bb->pred; e2; e2 = nexte2) { nexte2 = e2->pred_next; if (e2 == e) continue; /* We've already checked the fallthru edge above. */ if (e2 == fallthru) continue; /* The "first successor" check above only prevents multiple checks of crossjump(A,B). In order to prevent redundant checks of crossjump(B,A), require that A be the block with the lowest index. */ if (e->src->index > e2->src->index) continue; /* If nothing changed since the last attempt, there is nothing we can do. */ if (!first_pass && (!(e->src->flags & BB_DIRTY) && !(e2->src->flags & BB_DIRTY))) continue; if (try_crossjump_to_edge (mode, e, e2)) { changed = true; nexte = bb->pred; break; } } } return changed; } /* Do simple CFG optimizations - basic block merging, simplifying of jump instructions etc. Return nonzero if changes were made. */ static bool try_optimize_cfg (int mode) { bool changed_overall = false; bool changed; int iterations = 0; basic_block bb, b, next; if (mode & CLEANUP_CROSSJUMP) add_noreturn_fake_exit_edges (); FOR_EACH_BB (bb) update_forwarder_flag (bb); if (mode & (CLEANUP_UPDATE_LIFE | CLEANUP_CROSSJUMP | CLEANUP_THREADING)) clear_bb_flags (); if (! targetm.cannot_modify_jumps_p ()) { first_pass = true; /* Attempt to merge blocks as made possible by edge removal. If a block has only one successor, and the successor has only one predecessor, they may be combined. */ do { changed = false; iterations++; if (dump_file) fprintf (dump_file, "\n\ntry_optimize_cfg iteration %i\n\n", iterations); for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;) { basic_block c; edge s; bool changed_here = false; /* Delete trivially dead basic blocks. */ while (b->pred == NULL) { c = b->prev_bb; if (dump_file) fprintf (dump_file, "Deleting block %i.\n", b->index); delete_basic_block (b); if (!(mode & CLEANUP_CFGLAYOUT)) changed = true; b = c; } /* Remove code labels no longer used. */ if (b->pred->pred_next == NULL && (b->pred->flags & EDGE_FALLTHRU) && !(b->pred->flags & EDGE_COMPLEX) && GET_CODE (BB_HEAD (b)) == CODE_LABEL /* If the previous block ends with a branch to this block, we can't delete the label. Normally this is a condjump that is yet to be simplified, but if CASE_DROPS_THRU, this can be a tablejump with some element going to the same place as the default (fallthru). */ && (b->pred->src == ENTRY_BLOCK_PTR || GET_CODE (BB_END (b->pred->src)) != JUMP_INSN || ! label_is_jump_target_p (BB_HEAD (b), BB_END (b->pred->src)))) { rtx label = BB_HEAD (b); delete_insn_chain (label, label); /* In the case label is undeletable, move it after the BASIC_BLOCK note. */ if (NOTE_LINE_NUMBER (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL) { rtx bb_note = NEXT_INSN (BB_HEAD (b)); reorder_insns_nobb (label, label, bb_note); BB_HEAD (b) = bb_note; } if (dump_file) fprintf (dump_file, "Deleted label in block %i.\n", b->index); } /* If we fall through an empty block, we can remove it. */ if (!(mode & CLEANUP_CFGLAYOUT) && b->pred->pred_next == NULL && (b->pred->flags & EDGE_FALLTHRU) && GET_CODE (BB_HEAD (b)) != CODE_LABEL && FORWARDER_BLOCK_P (b) /* Note that forwarder_block_p true ensures that there is a successor for this block. */ && (b->succ->flags & EDGE_FALLTHRU) && n_basic_blocks > 1) { if (dump_file) fprintf (dump_file, "Deleting fallthru block %i.\n", b->index); c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb; redirect_edge_succ_nodup (b->pred, b->succ->dest); delete_basic_block (b); changed = true; b = c; } if ((s = b->succ) != NULL && s->succ_next == NULL && !(s->flags & EDGE_COMPLEX) && (c = s->dest) != EXIT_BLOCK_PTR && c->pred->pred_next == NULL && b != c) { /* When not in cfg_layout mode use code aware of reordering INSN. This code possibly creates new basic blocks so it does not fit merge_blocks interface and is kept here in hope that it will become useless once more of compiler is transformed to use cfg_layout mode. */ if ((mode & CLEANUP_CFGLAYOUT) && can_merge_blocks_p (b, c)) { merge_blocks (b, c); update_forwarder_flag (b); changed_here = true; } else if (!(mode & CLEANUP_CFGLAYOUT) /* If the jump insn has side effects, we can't kill the edge. */ && (GET_CODE (BB_END (b)) != JUMP_INSN || (reload_completed ? simplejump_p (BB_END (b)) : (onlyjump_p (BB_END (b)) && !tablejump_p (BB_END (b), NULL, NULL)))) && (next = merge_blocks_move (s, b, c, mode))) { b = next; changed_here = true; } } /* Simplify branch over branch. */ if ((mode & CLEANUP_EXPENSIVE) && !(mode & CLEANUP_CFGLAYOUT) && try_simplify_condjump (b)) changed_here = true; /* If B has a single outgoing edge, but uses a non-trivial jump instruction without side-effects, we can either delete the jump entirely, or replace it with a simple unconditional jump. */ if (b->succ && ! b->succ->succ_next && b->succ->dest != EXIT_BLOCK_PTR && onlyjump_p (BB_END (b)) && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX) && try_redirect_by_replacing_jump (b->succ, b->succ->dest, (mode & CLEANUP_CFGLAYOUT) != 0)) { update_forwarder_flag (b); changed_here = true; } /* Simplify branch to branch. */ if (try_forward_edges (mode, b)) changed_here = true; /* Look for shared code between blocks. */ if ((mode & CLEANUP_CROSSJUMP) && try_crossjump_bb (mode, b)) changed_here = true; /* Don't get confused by the index shift caused by deleting blocks. */ if (!changed_here) b = b->next_bb; else changed = true; } if ((mode & CLEANUP_CROSSJUMP) && try_crossjump_bb (mode, EXIT_BLOCK_PTR)) changed = true; #ifdef ENABLE_CHECKING if (changed) verify_flow_info (); #endif changed_overall |= changed; first_pass = false; } while (changed); } if (mode & CLEANUP_CROSSJUMP) remove_fake_edges (); clear_aux_for_blocks (); return changed_overall; } /* Delete all unreachable basic blocks. */ bool delete_unreachable_blocks (void) { bool changed = false; basic_block b, next_bb; find_unreachable_blocks (); /* Delete all unreachable basic blocks. */ for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb) { next_bb = b->next_bb; if (!(b->flags & BB_REACHABLE)) { delete_basic_block (b); changed = true; } } if (changed) tidy_fallthru_edges (); return changed; } /* Merges sequential blocks if possible. */ bool merge_seq_blocks (void) { basic_block bb; bool changed = false; for (bb = ENTRY_BLOCK_PTR->next_bb; bb != EXIT_BLOCK_PTR; ) { if (bb->succ && !bb->succ->succ_next && can_merge_blocks_p (bb, bb->succ->dest)) { /* Merge the blocks and retry. */ merge_blocks (bb, bb->succ->dest); changed = true; continue; } bb = bb->next_bb; } return changed; } /* Tidy the CFG by deleting unreachable code and whatnot. */ bool cleanup_cfg (int mode) { bool changed = false; timevar_push (TV_CLEANUP_CFG); if (delete_unreachable_blocks ()) { changed = true; /* We've possibly created trivially dead code. Cleanup it right now to introduce more opportunities for try_optimize_cfg. */ if (!(mode & (CLEANUP_NO_INSN_DEL | CLEANUP_UPDATE_LIFE)) && !reload_completed) delete_trivially_dead_insns (get_insns(), max_reg_num ()); } compact_blocks (); while (try_optimize_cfg (mode)) { delete_unreachable_blocks (), changed = true; if (mode & CLEANUP_UPDATE_LIFE) { /* Cleaning up CFG introduces more opportunities for dead code removal that in turn may introduce more opportunities for cleaning up the CFG. */ if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES, PROP_DEATH_NOTES | PROP_SCAN_DEAD_CODE | PROP_KILL_DEAD_CODE | ((mode & CLEANUP_LOG_LINKS) ? PROP_LOG_LINKS : 0))) break; } else if (!(mode & CLEANUP_NO_INSN_DEL) && (mode & CLEANUP_EXPENSIVE) && !reload_completed) { if (!delete_trivially_dead_insns (get_insns(), max_reg_num ())) break; } else break; delete_dead_jumptables (); } /* Kill the data we won't maintain. */ free_EXPR_LIST_list (&label_value_list); timevar_pop (TV_CLEANUP_CFG); return changed; } /* Basic block reordering routines for the GNU compiler. Copyright (C) 2000, 2001, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* The contents of the current function definition are allocated in this obstack, and all are freed at the end of the function. */ extern struct obstack flow_obstack; /* Holds the interesting trailing notes for the function. */ rtx cfg_layout_function_footer, cfg_layout_function_header; static rtx skip_insns_after_block (basic_block); static void record_effective_endpoints (void); static rtx get_label_for_bb (basic_block); static void fixup_reorder_chain (void); static void set_block_levels (tree, int); static void change_scope (rtx, tree, tree); void verify_insn_chain (void); static void fixup_fallthru_exit_predecessor (void); static tree insn_scope (rtx); static void update_unlikely_executed_notes (basic_block); rtx unlink_insn_chain (rtx first, rtx last) { rtx prevfirst = PREV_INSN (first); rtx nextlast = NEXT_INSN (last); PREV_INSN (first) = NULL; NEXT_INSN (last) = NULL; if (prevfirst) NEXT_INSN (prevfirst) = nextlast; if (nextlast) PREV_INSN (nextlast) = prevfirst; else set_last_insn (prevfirst); if (!prevfirst) set_first_insn (nextlast); return first; } /* Skip over inter-block insns occurring after BB which are typically associated with BB (e.g., barriers). If there are any such insns, we return the last one. Otherwise, we return the end of BB. */ static rtx skip_insns_after_block (basic_block bb) { rtx insn, last_insn, next_head, prev; next_head = NULL_RTX; if (bb->next_bb != EXIT_BLOCK_PTR) next_head = BB_HEAD (bb->next_bb); for (last_insn = insn = BB_END (bb); (insn = NEXT_INSN (insn)) != 0; ) { if (insn == next_head) break; switch (GET_CODE (insn)) { case BARRIER: last_insn = insn; continue; case NOTE: switch (NOTE_LINE_NUMBER (insn)) { case NOTE_INSN_LOOP_END: case NOTE_INSN_BLOCK_END: last_insn = insn; continue; case NOTE_INSN_DELETED: case NOTE_INSN_DELETED_LABEL: continue; default: continue; break; } break; case CODE_LABEL: if (NEXT_INSN (insn) && GET_CODE (NEXT_INSN (insn)) == JUMP_INSN && (GET_CODE (PATTERN (NEXT_INSN (insn))) == ADDR_VEC || GET_CODE (PATTERN (NEXT_INSN (insn))) == ADDR_DIFF_VEC)) { insn = NEXT_INSN (insn); last_insn = insn; continue; } break; default: break; } break; } /* It is possible to hit contradictory sequence. For instance: jump_insn NOTE_INSN_LOOP_BEG barrier Where barrier belongs to jump_insn, but the note does not. This can be created by removing the basic block originally following NOTE_INSN_LOOP_BEG. In such case reorder the notes. */ for (insn = last_insn; insn != BB_END (bb); insn = prev) { prev = PREV_INSN (insn); if (GET_CODE (insn) == NOTE) switch (NOTE_LINE_NUMBER (insn)) { case NOTE_INSN_LOOP_END: case NOTE_INSN_BLOCK_END: case NOTE_INSN_DELETED: case NOTE_INSN_DELETED_LABEL: continue; default: reorder_insns (insn, insn, last_insn); } } return last_insn; } /* Locate or create a label for a given basic block. */ static rtx get_label_for_bb (basic_block bb) { rtx label = BB_HEAD (bb); if (GET_CODE (label) != CODE_LABEL) { if (dump_file) fprintf (dump_file, "Emitting label for block %d\n", bb->index); label = block_label (bb); } return label; } /* Locate the effective beginning and end of the insn chain for each block, as defined by skip_insns_after_block above. */ static void record_effective_endpoints (void) { rtx next_insn; basic_block bb; rtx insn; for (insn = get_insns (); insn && GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) != NOTE_INSN_BASIC_BLOCK; insn = NEXT_INSN (insn)) continue; if (!insn) abort (); /* No basic blocks at all? */ if (PREV_INSN (insn)) cfg_layout_function_header = unlink_insn_chain (get_insns (), PREV_INSN (insn)); else cfg_layout_function_header = NULL_RTX; next_insn = get_insns (); FOR_EACH_BB (bb) { rtx end; if (PREV_INSN (BB_HEAD (bb)) && next_insn != BB_HEAD (bb)) bb->rbi->header = unlink_insn_chain (next_insn, PREV_INSN (BB_HEAD (bb))); end = skip_insns_after_block (bb); if (NEXT_INSN (BB_END (bb)) && BB_END (bb) != end) bb->rbi->footer = unlink_insn_chain (NEXT_INSN (BB_END (bb)), end); next_insn = NEXT_INSN (BB_END (bb)); } cfg_layout_function_footer = next_insn; if (cfg_layout_function_footer) cfg_layout_function_footer = unlink_insn_chain (cfg_layout_function_footer, get_last_insn ()); } /* Data structures representing mapping of INSN_LOCATOR into scope blocks, line numbers and files. In order to be GGC friendly we need to use separate varrays. This also slightly improve the memory locality in binary search. The _locs array contains locators where the given property change. The block_locators_blocks contains the scope block that is used for all insn locator greater than corresponding block_locators_locs value and smaller than the following one. Similarly for the other properties. */ static GTY(()) varray_type block_locators_locs; static GTY(()) varray_type block_locators_blocks; static GTY(()) varray_type line_locators_locs; static GTY(()) varray_type line_locators_lines; static GTY(()) varray_type file_locators_locs; static GTY(()) varray_type file_locators_files; int prologue_locator; int epilogue_locator; /* During the RTL expansion the lexical blocks and line numbers are represented via INSN_NOTEs. Replace them by representation using INSN_LOCATORs. */ void insn_locators_initialize (void) { tree block = NULL; tree last_block = NULL; rtx insn, next; int loc = 0; int line_number = 0, last_line_number = 0; const char *file_name = NULL, *last_file_name = NULL; prologue_locator = epilogue_locator = 0; VARRAY_INT_INIT (block_locators_locs, 32, "block_locators_locs"); VARRAY_TREE_INIT (block_locators_blocks, 32, "block_locators_blocks"); VARRAY_INT_INIT (line_locators_locs, 32, "line_locators_locs"); VARRAY_INT_INIT (line_locators_lines, 32, "line_locators_lines"); VARRAY_INT_INIT (file_locators_locs, 32, "file_locators_locs"); VARRAY_CHAR_PTR_INIT (file_locators_files, 32, "file_locators_files"); for (insn = get_insns (); insn; insn = next) { next = NEXT_INSN (insn); if ((active_insn_p (insn) && GET_CODE (PATTERN (insn)) != ADDR_VEC && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC) || !NEXT_INSN (insn) || (!prologue_locator && file_name)) { if (last_block != block) { loc++; VARRAY_PUSH_INT (block_locators_locs, loc); VARRAY_PUSH_TREE (block_locators_blocks, block); last_block = block; } if (last_line_number != line_number) { loc++; VARRAY_PUSH_INT (line_locators_locs, loc); VARRAY_PUSH_INT (line_locators_lines, line_number); last_line_number = line_number; } if (last_file_name != file_name) { loc++; VARRAY_PUSH_INT (file_locators_locs, loc); VARRAY_PUSH_CHAR_PTR (file_locators_files, (char *) file_name); last_file_name = file_name; } } if (!prologue_locator && file_name) prologue_locator = loc; if (!NEXT_INSN (insn)) epilogue_locator = loc; if (active_insn_p (insn)) INSN_LOCATOR (insn) = loc; else if (GET_CODE (insn) == NOTE) { switch (NOTE_LINE_NUMBER (insn)) { case NOTE_INSN_BLOCK_BEG: case NOTE_INSN_BLOCK_END: abort (); default: if (NOTE_LINE_NUMBER (insn) > 0) { expanded_location xloc; NOTE_EXPANDED_LOCATION (xloc, insn); line_number = xloc.line; file_name = xloc.file; } break; } } check_block_change (insn, &block); } /* Tag the blocks with a depth number so that change_scope can find the common parent easily. */ set_block_levels (DECL_INITIAL (cfun->decl), 0); free_block_changes (); } /* For each lexical block, set BLOCK_NUMBER to the depth at which it is found in the block tree. */ static void set_block_levels (tree block, int level) { while (block) { BLOCK_NUMBER (block) = level; set_block_levels (BLOCK_SUBBLOCKS (block), level + 1); block = BLOCK_CHAIN (block); } } /* Return sope resulting from combination of S1 and S2. */ tree choose_inner_scope (tree s1, tree s2) { if (!s1) return s2; if (!s2) return s1; if (BLOCK_NUMBER (s1) > BLOCK_NUMBER (s2)) return s1; return s2; } /* Emit lexical block notes needed to change scope from S1 to S2. */ static void change_scope (rtx orig_insn, tree s1, tree s2) { rtx insn = orig_insn; tree com = NULL_TREE; tree ts1 = s1, ts2 = s2; tree s; while (ts1 != ts2) { if (ts1 == NULL || ts2 == NULL) abort (); if (BLOCK_NUMBER (ts1) > BLOCK_NUMBER (ts2)) ts1 = BLOCK_SUPERCONTEXT (ts1); else if (BLOCK_NUMBER (ts1) < BLOCK_NUMBER (ts2)) ts2 = BLOCK_SUPERCONTEXT (ts2); else { ts1 = BLOCK_SUPERCONTEXT (ts1); ts2 = BLOCK_SUPERCONTEXT (ts2); } } com = ts1; /* Close scopes. */ s = s1; while (s != com) { rtx note = emit_note_before (NOTE_INSN_BLOCK_END, insn); NOTE_BLOCK (note) = s; s = BLOCK_SUPERCONTEXT (s); } /* Open scopes. */ s = s2; while (s != com) { insn = emit_note_before (NOTE_INSN_BLOCK_BEG, insn); NOTE_BLOCK (insn) = s; s = BLOCK_SUPERCONTEXT (s); } } /* Return lexical scope block insn belong to. */ static tree insn_scope (rtx insn) { int max = VARRAY_ACTIVE_SIZE (block_locators_locs); int min = 0; int loc = INSN_LOCATOR (insn); /* When block_locators_locs was initialized, the pro- and epilogue insns didn't exist yet and can therefore not be found this way. But we know that they belong to the outer most block of the current function. Without this test, the prologue would be put inside the block of the first valid instruction in the function and when that first insn is part of an inlined function then the low_pc of that inlined function is messed up. Likewise for the epilogue and the last valid instruction. */ if (loc == prologue_locator || loc == epilogue_locator) return DECL_INITIAL (cfun->decl); if (!max || !loc) return NULL; while (1) { int pos = (min + max) / 2; int tmp = VARRAY_INT (block_locators_locs, pos); if (tmp <= loc && min != pos) min = pos; else if (tmp > loc && max != pos) max = pos; else { min = pos; break; } } return VARRAY_TREE (block_locators_blocks, min); } /* Return line number of the statement specified by the locator. */ int locator_line (int loc) { int max = VARRAY_ACTIVE_SIZE (line_locators_locs); int min = 0; if (!max || !loc) return 0; while (1) { int pos = (min + max) / 2; int tmp = VARRAY_INT (line_locators_locs, pos); if (tmp <= loc && min != pos) min = pos; else if (tmp > loc && max != pos) max = pos; else { min = pos; break; } } return VARRAY_INT (line_locators_lines, min); } /* Return line number of the statement that produced this insn. */ int insn_line (rtx insn) { return locator_line (INSN_LOCATOR (insn)); } /* Return source file of the statement specified by LOC. */ const char * locator_file (int loc) { int max = VARRAY_ACTIVE_SIZE (file_locators_locs); int min = 0; if (!max || !loc) return NULL; while (1) { int pos = (min + max) / 2; int tmp = VARRAY_INT (file_locators_locs, pos); if (tmp <= loc && min != pos) min = pos; else if (tmp > loc && max != pos) max = pos; else { min = pos; break; } } return VARRAY_CHAR_PTR (file_locators_files, min); } /* Return source file of the statement that produced this insn. */ const char * insn_file (rtx insn) { return locator_file (INSN_LOCATOR (insn)); } /* Rebuild all the NOTE_INSN_BLOCK_BEG and NOTE_INSN_BLOCK_END notes based on the scope tree and the newly reordered instructions. */ void reemit_insn_block_notes (void) { tree cur_block = DECL_INITIAL (cfun->decl); rtx insn, note; insn = get_insns (); if (!active_insn_p (insn)) insn = next_active_insn (insn); for (; insn; insn = next_active_insn (insn)) { tree this_block; this_block = insn_scope (insn); /* For sequences compute scope resulting from merging all scopes of instructions nested inside. */ if (GET_CODE (PATTERN (insn)) == SEQUENCE) { int i; rtx body = PATTERN (insn); this_block = NULL; for (i = 0; i < XVECLEN (body, 0); i++) this_block = choose_inner_scope (this_block, insn_scope (XVECEXP (body, 0, i))); } if (! this_block) continue; if (this_block != cur_block) { change_scope (insn, cur_block, this_block); cur_block = this_block; } } /* change_scope emits before the insn, not after. */ note = emit_note (NOTE_INSN_DELETED); change_scope (note, cur_block, DECL_INITIAL (cfun->decl)); delete_insn (note); reorder_blocks (); } /* Given a reorder chain, rearrange the code to match. */ static void fixup_reorder_chain (void) { basic_block bb, prev_bb; int index; rtx insn = NULL; if (cfg_layout_function_header) { set_first_insn (cfg_layout_function_header); insn = cfg_layout_function_header; while (NEXT_INSN (insn)) insn = NEXT_INSN (insn); } /* First do the bulk reordering -- rechain the blocks without regard to the needed changes to jumps and labels. */ for (bb = ENTRY_BLOCK_PTR->next_bb, index = 0; bb != 0; bb = bb->rbi->next, index++) { if (bb->rbi->header) { if (insn) NEXT_INSN (insn) = bb->rbi->header; else set_first_insn (bb->rbi->header); PREV_INSN (bb->rbi->header) = insn; insn = bb->rbi->header; while (NEXT_INSN (insn)) insn = NEXT_INSN (insn); } if (insn) NEXT_INSN (insn) = BB_HEAD (bb); else set_first_insn (BB_HEAD (bb)); PREV_INSN (BB_HEAD (bb)) = insn; insn = BB_END (bb); if (bb->rbi->footer) { NEXT_INSN (insn) = bb->rbi->footer; PREV_INSN (bb->rbi->footer) = insn; while (NEXT_INSN (insn)) insn = NEXT_INSN (insn); } } if (index != n_basic_blocks) abort (); NEXT_INSN (insn) = cfg_layout_function_footer; if (cfg_layout_function_footer) PREV_INSN (cfg_layout_function_footer) = insn; while (NEXT_INSN (insn)) insn = NEXT_INSN (insn); set_last_insn (insn); #ifdef ENABLE_CHECKING verify_insn_chain (); #endif delete_dead_jumptables (); /* Now add jumps and labels as needed to match the blocks new outgoing edges. */ for (bb = ENTRY_BLOCK_PTR->next_bb; bb ; bb = bb->rbi->next) { edge e_fall, e_taken, e; rtx bb_end_insn; basic_block nb; basic_block old_bb; if (bb->succ == NULL) continue; /* Find the old fallthru edge, and another non-EH edge for a taken jump. */ e_taken = e_fall = NULL; for (e = bb->succ; e ; e = e->succ_next) if (e->flags & EDGE_FALLTHRU) e_fall = e; else if (! (e->flags & EDGE_EH)) e_taken = e; bb_end_insn = BB_END (bb); if (GET_CODE (bb_end_insn) == JUMP_INSN) { if (any_condjump_p (bb_end_insn)) { /* If the old fallthru is still next, nothing to do. */ if (bb->rbi->next == e_fall->dest || e_fall->dest == EXIT_BLOCK_PTR) continue; /* The degenerated case of conditional jump jumping to the next instruction can happen on target having jumps with side effects. Create temporarily the duplicated edge representing branch. It will get unidentified by force_nonfallthru_and_redirect that would otherwise get confused by fallthru edge not pointing to the next basic block. */ if (!e_taken) { rtx note; edge e_fake; e_fake = unchecked_make_edge (bb, e_fall->dest, 0); if (!redirect_jump (BB_END (bb), block_label (bb), 0)) abort (); note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX); if (note) { int prob = INTVAL (XEXP (note, 0)); e_fake->probability = prob; e_fake->count = e_fall->count * prob / REG_BR_PROB_BASE; e_fall->probability -= e_fall->probability; e_fall->count -= e_fake->count; if (e_fall->probability < 0) e_fall->probability = 0; if (e_fall->count < 0) e_fall->count = 0; } } /* There is one special case: if *neither* block is next, such as happens at the very end of a function, then we'll need to add a new unconditional jump. Choose the taken edge based on known or assumed probability. */ else if (bb->rbi->next != e_taken->dest) { rtx note = find_reg_note (bb_end_insn, REG_BR_PROB, 0); if (note && INTVAL (XEXP (note, 0)) < REG_BR_PROB_BASE / 2 && invert_jump (bb_end_insn, (e_fall->dest == EXIT_BLOCK_PTR ? NULL_RTX : get_label_for_bb (e_fall->dest)), 0)) { e_fall->flags &= ~EDGE_FALLTHRU; #ifdef ENABLE_CHECKING if (!could_fall_through (e_taken->src, e_taken->dest)) abort (); #endif e_taken->flags |= EDGE_FALLTHRU; update_br_prob_note (bb); e = e_fall, e_fall = e_taken, e_taken = e; } } /* If the "jumping" edge is a crossing edge, and the fall through edge is non-crossing, leave things as they are. */ else if (e_taken->crossing_edge && !e_fall->crossing_edge) continue; /* Otherwise we can try to invert the jump. This will basically never fail, however, keep up the pretense. */ else if (invert_jump (bb_end_insn, (e_fall->dest == EXIT_BLOCK_PTR ? NULL_RTX : get_label_for_bb (e_fall->dest)), 0)) { e_fall->flags &= ~EDGE_FALLTHRU; #ifdef ENABLE_CHECKING if (!could_fall_through (e_taken->src, e_taken->dest)) abort (); #endif e_taken->flags |= EDGE_FALLTHRU; update_br_prob_note (bb); continue; } } else if (returnjump_p (bb_end_insn)) continue; else { /* Otherwise we have some switch or computed jump. In the 99% case, there should not have been a fallthru edge. */ if (! e_fall) continue; #ifdef CASE_DROPS_THROUGH /* Except for VAX. Since we didn't have predication for the tablejump, the fallthru block should not have moved. */ if (bb->rbi->next == e_fall->dest) continue; bb_end_insn = skip_insns_after_block (bb); #else abort (); #endif } } else { /* No fallthru implies a noreturn function with EH edges, or something similarly bizarre. In any case, we don't need to do anything. */ if (! e_fall) continue; /* If the fallthru block is still next, nothing to do. */ if (bb->rbi->next == e_fall->dest) continue; /* A fallthru to exit block. */ if (e_fall->dest == EXIT_BLOCK_PTR) continue; } /* We got here if we need to add a new jump insn. */ nb = force_nonfallthru (e_fall); if (nb) { initialize_bb_rbi (nb); nb->rbi->visited = 1; nb->rbi->next = bb->rbi->next; bb->rbi->next = nb; /* Don't process this new block. */ old_bb = bb; bb = nb; /* Make sure new bb is tagged for correct section (same as fall-thru source). */ e_fall->src->partition = bb->pred->src->partition; if (flag_reorder_blocks_and_partition) { if (bb->pred->src->partition == COLD_PARTITION) { rtx new_note; rtx note = BB_HEAD (e_fall->src); while (!INSN_P (note) && note != BB_END (e_fall->src)) note = NEXT_INSN (note); new_note = emit_note_before (NOTE_INSN_UNLIKELY_EXECUTED_CODE, note); NOTE_BASIC_BLOCK (new_note) = bb; } if (GET_CODE (BB_END (bb)) == JUMP_INSN && !any_condjump_p (BB_END (bb)) && bb->succ->crossing_edge ) REG_NOTES (BB_END (bb)) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP, NULL_RTX, REG_NOTES (BB_END (bb))); } } } /* Put basic_block_info in the new order. */ if (dump_file) { fprintf (dump_file, "Reordered sequence:\n"); for (bb = ENTRY_BLOCK_PTR->next_bb, index = 0; bb; bb = bb->rbi->next, index++) { fprintf (dump_file, " %i ", index); if (bb->rbi->original) fprintf (dump_file, "duplicate of %i ", bb->rbi->original->index); else if (forwarder_block_p (bb) && GET_CODE (BB_HEAD (bb)) != CODE_LABEL) fprintf (dump_file, "compensation "); else fprintf (dump_file, "bb %i ", bb->index); fprintf (dump_file, " [%i]\n", bb->frequency); } } prev_bb = ENTRY_BLOCK_PTR; bb = ENTRY_BLOCK_PTR->next_bb; index = 0; for (; bb; prev_bb = bb, bb = bb->rbi->next, index ++) { bb->index = index; BASIC_BLOCK (index) = bb; update_unlikely_executed_notes (bb); bb->prev_bb = prev_bb; prev_bb->next_bb = bb; } prev_bb->next_bb = EXIT_BLOCK_PTR; EXIT_BLOCK_PTR->prev_bb = prev_bb; /* Annoying special case - jump around dead jumptables left in the code. */ FOR_EACH_BB (bb) { edge e; for (e = bb->succ; e && !(e->flags & EDGE_FALLTHRU); e = e->succ_next) continue; if (e && !can_fallthru (e->src, e->dest)) force_nonfallthru (e); } } /* Update the basic block number information in any NOTE_INSN_UNLIKELY_EXECUTED_CODE notes within the basic block. */ static void update_unlikely_executed_notes (basic_block bb) { rtx cur_insn; for (cur_insn = BB_HEAD (bb); cur_insn != BB_END (bb); cur_insn = NEXT_INSN (cur_insn)) if (GET_CODE (cur_insn) == NOTE && NOTE_LINE_NUMBER (cur_insn) == NOTE_INSN_UNLIKELY_EXECUTED_CODE) NOTE_BASIC_BLOCK (cur_insn) = bb; } /* Perform sanity checks on the insn chain. 1. Check that next/prev pointers are consistent in both the forward and reverse direction. 2. Count insns in chain, going both directions, and check if equal. 3. Check that get_last_insn () returns the actual end of chain. */ void verify_insn_chain (void) { rtx x, prevx, nextx; int insn_cnt1, insn_cnt2; for (prevx = NULL, insn_cnt1 = 1, x = get_insns (); x != 0; prevx = x, insn_cnt1++, x = NEXT_INSN (x)) if (PREV_INSN (x) != prevx) abort (); if (prevx != get_last_insn ()) abort (); for (nextx = NULL, insn_cnt2 = 1, x = get_last_insn (); x != 0; nextx = x, insn_cnt2++, x = PREV_INSN (x)) if (NEXT_INSN (x) != nextx) abort (); if (insn_cnt1 != insn_cnt2) abort (); } /* If we have assembler epilogues, the block falling through to exit must be the last one in the reordered chain when we reach final. Ensure that this condition is met. */ static void fixup_fallthru_exit_predecessor (void) { edge e; basic_block bb = NULL; /* This transformation is not valid before reload, because we might separate a call from the instruction that copies the return value. */ if (! reload_completed) abort (); for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next) if (e->flags & EDGE_FALLTHRU) bb = e->src; if (bb && bb->rbi->next) { basic_block c = ENTRY_BLOCK_PTR->next_bb; /* If the very first block is the one with the fall-through exit edge, we have to split that block. */ if (c == bb) { bb = split_block (bb, NULL)->dest; initialize_bb_rbi (bb); bb->rbi->next = c->rbi->next; c->rbi->next = bb; bb->rbi->footer = c->rbi->footer; c->rbi->footer = NULL; } while (c->rbi->next != bb) c = c->rbi->next; c->rbi->next = bb->rbi->next; while (c->rbi->next) c = c->rbi->next; c->rbi->next = bb; bb->rbi->next = NULL; } } /* Return true in case it is possible to duplicate the basic block BB. */ /* We do not want to declare the function in a header file, since it should only be used through the cfghooks interface, and we do not want to move it to cfgrtl.c since it would require also moving quite a lot of related code. */ extern bool cfg_layout_can_duplicate_bb_p (basic_block); bool cfg_layout_can_duplicate_bb_p (basic_block bb) { /* Do not attempt to duplicate tablejumps, as we need to unshare the dispatch table. This is difficult to do, as the instructions computing jump destination may be hoisted outside the basic block. */ if (tablejump_p (BB_END (bb), NULL, NULL)) return false; /* Do not duplicate blocks containing insns that can't be copied. */ if (targetm.cannot_copy_insn_p) { rtx insn = BB_HEAD (bb); while (1) { if (INSN_P (insn) && targetm.cannot_copy_insn_p (insn)) return false; if (insn == BB_END (bb)) break; insn = NEXT_INSN (insn); } } return true; } rtx duplicate_insn_chain (rtx from, rtx to) { rtx insn, last; /* Avoid updating of boundaries of previous basic block. The note will get removed from insn stream in fixup. */ last = emit_note (NOTE_INSN_DELETED); /* Create copy at the end of INSN chain. The chain will be reordered later. */ for (insn = from; insn != NEXT_INSN (to); insn = NEXT_INSN (insn)) { switch (GET_CODE (insn)) { case INSN: case CALL_INSN: case JUMP_INSN: /* Avoid copying of dispatch tables. We never duplicate tablejumps, so this can hit only in case the table got moved far from original jump. */ if (GET_CODE (PATTERN (insn)) == ADDR_VEC || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC) break; emit_copy_of_insn_after (insn, get_last_insn ()); break; case CODE_LABEL: break; case BARRIER: emit_barrier (); break; case NOTE: switch (NOTE_LINE_NUMBER (insn)) { /* In case prologue is empty and function contain label in first BB, we may want to copy the block. */ case NOTE_INSN_PROLOGUE_END: case NOTE_INSN_LOOP_VTOP: case NOTE_INSN_LOOP_CONT: case NOTE_INSN_LOOP_BEG: case NOTE_INSN_LOOP_END: /* Strip down the loop notes - we don't really want to keep them consistent in loop copies. */ case NOTE_INSN_DELETED: case NOTE_INSN_DELETED_LABEL: /* No problem to strip these. */ case NOTE_INSN_EPILOGUE_BEG: case NOTE_INSN_FUNCTION_END: /* Debug code expect these notes to exist just once. Keep them in the master copy. ??? It probably makes more sense to duplicate them for each epilogue copy. */ case NOTE_INSN_FUNCTION_BEG: /* There is always just single entry to function. */ case NOTE_INSN_BASIC_BLOCK: break; /* There is no purpose to duplicate prologue. */ case NOTE_INSN_BLOCK_BEG: case NOTE_INSN_BLOCK_END: /* The BLOCK_BEG/BLOCK_END notes should be eliminated when BB reordering is in the progress. */ case NOTE_INSN_EH_REGION_BEG: case NOTE_INSN_EH_REGION_END: /* Should never exist at BB duplication time. */ abort (); break; case NOTE_INSN_REPEATED_LINE_NUMBER: case NOTE_INSN_UNLIKELY_EXECUTED_CODE: emit_note_copy (insn); break; default: if (NOTE_LINE_NUMBER (insn) < 0) abort (); /* It is possible that no_line_number is set and the note won't be emitted. */ emit_note_copy (insn); } break; default: abort (); } } insn = NEXT_INSN (last); delete_insn (last); return insn; } /* Create a duplicate of the basic block BB. */ /* We do not want to declare the function in a header file, since it should only be used through the cfghooks interface, and we do not want to move it to cfgrtl.c since it would require also moving quite a lot of related code. */ extern basic_block cfg_layout_duplicate_bb (basic_block); basic_block cfg_layout_duplicate_bb (basic_block bb) { rtx insn; basic_block new_bb; insn = duplicate_insn_chain (BB_HEAD (bb), BB_END (bb)); new_bb = create_basic_block (insn, insn ? get_last_insn () : NULL, EXIT_BLOCK_PTR->prev_bb); if (bb->rbi->header) { insn = bb->rbi->header; while (NEXT_INSN (insn)) insn = NEXT_INSN (insn); insn = duplicate_insn_chain (bb->rbi->header, insn); if (insn) new_bb->rbi->header = unlink_insn_chain (insn, get_last_insn ()); } if (bb->rbi->footer) { insn = bb->rbi->footer; while (NEXT_INSN (insn)) insn = NEXT_INSN (insn); insn = duplicate_insn_chain (bb->rbi->footer, insn); if (insn) new_bb->rbi->footer = unlink_insn_chain (insn, get_last_insn ()); } if (bb->global_live_at_start) { new_bb->global_live_at_start = OBSTACK_ALLOC_REG_SET (&flow_obstack); new_bb->global_live_at_end = OBSTACK_ALLOC_REG_SET (&flow_obstack); COPY_REG_SET (new_bb->global_live_at_start, bb->global_live_at_start); COPY_REG_SET (new_bb->global_live_at_end, bb->global_live_at_end); } return new_bb; } /* Main entry point to this module - initialize the data structures for CFG layout changes. It keeps LOOPS up-to-date if not null. */ void cfg_layout_initialize (void) { basic_block bb; /* Our algorithm depends on fact that there are no dead jumptables around the code. */ alloc_rbi_pool (); FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) initialize_bb_rbi (bb); cfg_layout_rtl_register_cfg_hooks (); record_effective_endpoints (); cleanup_cfg (CLEANUP_CFGLAYOUT); } /* Splits superblocks. */ void break_superblocks (void) { sbitmap superblocks; bool need = false; basic_block bb; superblocks = sbitmap_alloc (last_basic_block); sbitmap_zero (superblocks); FOR_EACH_BB (bb) if (bb->flags & BB_SUPERBLOCK) { bb->flags &= ~BB_SUPERBLOCK; SET_BIT (superblocks, bb->index); need = true; } if (need) { rebuild_jump_labels (get_insns ()); find_many_sub_basic_blocks (superblocks); } free (superblocks); } /* Finalize the changes: reorder insn list according to the sequence, enter compensation code, rebuild scope forest. */ void cfg_layout_finalize (void) { basic_block bb; #ifdef ENABLE_CHECKING verify_flow_info (); #endif rtl_register_cfg_hooks (); if (reload_completed #ifdef HAVE_epilogue && !HAVE_epilogue #endif ) fixup_fallthru_exit_predecessor (); fixup_reorder_chain (); #ifdef ENABLE_CHECKING verify_insn_chain (); #endif free_rbi_pool (); FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) bb->rbi = NULL; break_superblocks (); #ifdef ENABLE_CHECKING verify_flow_info (); #endif } /* Checks whether all N blocks in BBS array can be copied. */ bool can_copy_bbs_p (basic_block *bbs, unsigned n) { unsigned i; edge e; int ret = true; for (i = 0; i < n; i++) bbs[i]->rbi->duplicated = 1; for (i = 0; i < n; i++) { /* In case we should redirect abnormal edge during duplication, fail. */ for (e = bbs[i]->succ; e; e = e->succ_next) if ((e->flags & EDGE_ABNORMAL) && e->dest->rbi->duplicated) { ret = false; goto end; } if (!can_duplicate_block_p (bbs[i])) { ret = false; break; } } end: for (i = 0; i < n; i++) bbs[i]->rbi->duplicated = 0; return ret; } /* Duplicates N basic blocks stored in array BBS. Newly created basic blocks are placed into array NEW_BBS in the same order. Edges from basic blocks in BBS are also duplicated and copies of those of them that lead into BBS are redirected to appropriate newly created block. The function assigns bbs into loops (copy of basic block bb is assigned to bb->loop_father->copy loop, so this must be set up correctly in advance) and updates dominators locally (LOOPS structure that contains the information about dominators is passed to enable this). BASE is the superloop to that basic block belongs; if its header or latch is copied, we do not set the new blocks as header or latch. Created copies of N_EDGES edges in array EDGES are stored in array NEW_EDGES, also in the same order. */ void copy_bbs (basic_block *bbs, unsigned n, basic_block *new_bbs, edge *edges, unsigned n_edges, edge *new_edges, struct loop *base) { unsigned i, j; basic_block bb, new_bb, dom_bb; edge e; /* Duplicate bbs, update dominators, assign bbs to loops. */ for (i = 0; i < n; i++) { /* Duplicate. */ bb = bbs[i]; new_bb = new_bbs[i] = duplicate_block (bb, NULL); bb->rbi->duplicated = 1; /* Add to loop. */ add_bb_to_loop (new_bb, bb->loop_father->copy); /* Possibly set header. */ if (bb->loop_father->header == bb && bb->loop_father != base) new_bb->loop_father->header = new_bb; /* Or latch. */ if (bb->loop_father->latch == bb && bb->loop_father != base) new_bb->loop_father->latch = new_bb; } /* Set dominators. */ for (i = 0; i < n; i++) { bb = bbs[i]; new_bb = new_bbs[i]; dom_bb = get_immediate_dominator (CDI_DOMINATORS, bb); if (dom_bb->rbi->duplicated) { dom_bb = dom_bb->rbi->copy; set_immediate_dominator (CDI_DOMINATORS, new_bb, dom_bb); } } /* Redirect edges. */ for (j = 0; j < n_edges; j++) new_edges[j] = NULL; for (i = 0; i < n; i++) { new_bb = new_bbs[i]; bb = bbs[i]; for (e = new_bb->succ; e; e = e->succ_next) { for (j = 0; j < n_edges; j++) if (edges[j] && edges[j]->src == bb && edges[j]->dest == e->dest) new_edges[j] = e; if (!e->dest->rbi->duplicated) continue; redirect_edge_and_branch_force (e, e->dest->rbi->copy); } } /* Clear information about duplicates. */ for (i = 0; i < n; i++) bbs[i]->rbi->duplicated = 0; } /* Type information for cfglayout.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_cfglayout_h[] = { { &file_locators_files, 1, sizeof (file_locators_files), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, { &file_locators_locs, 1, sizeof (file_locators_locs), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, { &line_locators_lines, 1, sizeof (line_locators_lines), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, { &line_locators_locs, 1, sizeof (line_locators_locs), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, { &block_locators_blocks, 1, sizeof (block_locators_blocks), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, { &block_locators_locs, 1, sizeof (block_locators_locs), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, LAST_GGC_ROOT_TAB }; /* Natural loop discovery code for GNU compiler. Copyright (C) 2000, 2001, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Ratio of frequencies of edges so that one of more latch edges is considered to belong to inner loop with same header. */ #define HEAVY_EDGE_RATIO 8 #define HEADER_BLOCK(B) (* (int *) (B)->aux) #define LATCH_EDGE(E) (*(int *) (E)->aux) static void flow_loops_cfg_dump (const struct loops *, FILE *); static void flow_loop_entry_edges_find (struct loop *); static void flow_loop_exit_edges_find (struct loop *); static int flow_loop_nodes_find (basic_block, struct loop *); static void flow_loop_pre_header_scan (struct loop *); static basic_block flow_loop_pre_header_find (basic_block); static int flow_loop_level_compute (struct loop *); static int flow_loops_level_compute (struct loops *); static void establish_preds (struct loop *); static void canonicalize_loop_headers (void); static bool glb_enum_p (basic_block, void *); /* Dump loop related CFG information. */ static void flow_loops_cfg_dump (const struct loops *loops, FILE *file) { int i; basic_block bb; if (! loops->num || ! file) return; FOR_EACH_BB (bb) { edge succ; fprintf (file, ";; %d succs { ", bb->index); for (succ = bb->succ; succ; succ = succ->succ_next) fprintf (file, "%d ", succ->dest->index); fprintf (file, "}\n"); } /* Dump the DFS node order. */ if (loops->cfg.dfs_order) { fputs (";; DFS order: ", file); for (i = 0; i < n_basic_blocks; i++) fprintf (file, "%d ", loops->cfg.dfs_order[i]); fputs ("\n", file); } /* Dump the reverse completion node order. */ if (loops->cfg.rc_order) { fputs (";; RC order: ", file); for (i = 0; i < n_basic_blocks; i++) fprintf (file, "%d ", loops->cfg.rc_order[i]); fputs ("\n", file); } } /* Return nonzero if the nodes of LOOP are a subset of OUTER. */ bool flow_loop_nested_p (const struct loop *outer, const struct loop *loop) { return loop->depth > outer->depth && loop->pred[outer->depth] == outer; } /* Dump the loop information specified by LOOP to the stream FILE using auxiliary dump callback function LOOP_DUMP_AUX if non null. */ void flow_loop_dump (const struct loop *loop, FILE *file, void (*loop_dump_aux) (const struct loop *, FILE *, int), int verbose) { basic_block *bbs; unsigned i; if (! loop || ! loop->header) return; fprintf (file, ";;\n;; Loop %d:%s\n", loop->num, loop->invalid ? " invalid" : ""); fprintf (file, ";; header %d, latch %d, pre-header %d\n", loop->header->index, loop->latch->index, loop->pre_header ? loop->pre_header->index : -1); fprintf (file, ";; depth %d, level %d, outer %ld\n", loop->depth, loop->level, (long) (loop->outer ? loop->outer->num : -1)); if (loop->pre_header_edges) flow_edge_list_print (";; pre-header edges", loop->pre_header_edges, loop->num_pre_header_edges, file); flow_edge_list_print (";; entry edges", loop->entry_edges, loop->num_entries, file); fprintf (file, ";; nodes:"); bbs = get_loop_body (loop); for (i = 0; i < loop->num_nodes; i++) fprintf (file, " %d", bbs[i]->index); free (bbs); fprintf (file, "\n"); flow_edge_list_print (";; exit edges", loop->exit_edges, loop->num_exits, file); if (loop_dump_aux) loop_dump_aux (loop, file, verbose); } /* Dump the loop information specified by LOOPS to the stream FILE, using auxiliary dump callback function LOOP_DUMP_AUX if non null. */ void flow_loops_dump (const struct loops *loops, FILE *file, void (*loop_dump_aux) (const struct loop *, FILE *, int), int verbose) { int i; int num_loops; num_loops = loops->num; if (! num_loops || ! file) return; fprintf (file, ";; %d loops found, %d levels\n", num_loops, loops->levels); for (i = 0; i < num_loops; i++) { struct loop *loop = loops->parray[i]; if (!loop) continue; flow_loop_dump (loop, file, loop_dump_aux, verbose); } if (verbose) flow_loops_cfg_dump (loops, file); } /* Free data allocated for LOOP. */ void flow_loop_free (struct loop *loop) { if (loop->pre_header_edges) free (loop->pre_header_edges); if (loop->entry_edges) free (loop->entry_edges); if (loop->exit_edges) free (loop->exit_edges); if (loop->pred) free (loop->pred); free (loop); } /* Free all the memory allocated for LOOPS. */ void flow_loops_free (struct loops *loops) { if (loops->parray) { unsigned i; if (! loops->num) abort (); /* Free the loop descriptors. */ for (i = 0; i < loops->num; i++) { struct loop *loop = loops->parray[i]; if (!loop) continue; flow_loop_free (loop); } free (loops->parray); loops->parray = NULL; if (loops->cfg.dfs_order) free (loops->cfg.dfs_order); if (loops->cfg.rc_order) free (loops->cfg.rc_order); } } /* Find the entry edges into the LOOP. */ static void flow_loop_entry_edges_find (struct loop *loop) { edge e; int num_entries; num_entries = 0; for (e = loop->header->pred; e; e = e->pred_next) { if (flow_loop_outside_edge_p (loop, e)) num_entries++; } if (! num_entries) abort (); loop->entry_edges = xmalloc (num_entries * sizeof (edge *)); num_entries = 0; for (e = loop->header->pred; e; e = e->pred_next) { if (flow_loop_outside_edge_p (loop, e)) loop->entry_edges[num_entries++] = e; } loop->num_entries = num_entries; } /* Find the exit edges from the LOOP. */ static void flow_loop_exit_edges_find (struct loop *loop) { edge e; basic_block node, *bbs; unsigned num_exits, i; loop->exit_edges = NULL; loop->num_exits = 0; /* Check all nodes within the loop to see if there are any successors not in the loop. Note that a node may have multiple exiting edges. */ num_exits = 0; bbs = get_loop_body (loop); for (i = 0; i < loop->num_nodes; i++) { node = bbs[i]; for (e = node->succ; e; e = e->succ_next) { basic_block dest = e->dest; if (!flow_bb_inside_loop_p (loop, dest)) num_exits++; } } if (! num_exits) { free (bbs); return; } loop->exit_edges = xmalloc (num_exits * sizeof (edge *)); /* Store all exiting edges into an array. */ num_exits = 0; for (i = 0; i < loop->num_nodes; i++) { node = bbs[i]; for (e = node->succ; e; e = e->succ_next) { basic_block dest = e->dest; if (!flow_bb_inside_loop_p (loop, dest)) loop->exit_edges[num_exits++] = e; } } free (bbs); loop->num_exits = num_exits; } /* Find the nodes contained within the LOOP with header HEADER. Return the number of nodes within the loop. */ static int flow_loop_nodes_find (basic_block header, struct loop *loop) { basic_block *stack; int sp; int num_nodes = 1; header->loop_father = loop; header->loop_depth = loop->depth; if (loop->latch->loop_father != loop) { stack = xmalloc (n_basic_blocks * sizeof (basic_block)); sp = 0; num_nodes++; stack[sp++] = loop->latch; loop->latch->loop_father = loop; loop->latch->loop_depth = loop->depth; while (sp) { basic_block node; edge e; node = stack[--sp]; for (e = node->pred; e; e = e->pred_next) { basic_block ancestor = e->src; if (ancestor != ENTRY_BLOCK_PTR && ancestor->loop_father != loop) { ancestor->loop_father = loop; ancestor->loop_depth = loop->depth; num_nodes++; stack[sp++] = ancestor; } } } free (stack); } return num_nodes; } /* Find the root node of the loop pre-header extended basic block and the edges along the trace from the root node to the loop header. */ static void flow_loop_pre_header_scan (struct loop *loop) { int num; basic_block ebb; edge e; loop->num_pre_header_edges = 0; if (loop->num_entries != 1) return; ebb = loop->entry_edges[0]->src; if (ebb == ENTRY_BLOCK_PTR) return; /* Count number of edges along trace from loop header to root of pre-header extended basic block. Usually this is only one or two edges. */ for (num = 1; ebb->pred->src != ENTRY_BLOCK_PTR && ! ebb->pred->pred_next; num++) ebb = ebb->pred->src; loop->pre_header_edges = xmalloc (num * sizeof (edge)); loop->num_pre_header_edges = num; /* Store edges in order that they are followed. The source of the first edge is the root node of the pre-header extended basic block and the destination of the last last edge is the loop header. */ for (e = loop->entry_edges[0]; num; e = e->src->pred) loop->pre_header_edges[--num] = e; } /* Return the block for the pre-header of the loop with header HEADER. Return NULL if there is no pre-header. */ static basic_block flow_loop_pre_header_find (basic_block header) { basic_block pre_header; edge e; /* If block p is a predecessor of the header and is the only block that the header does not dominate, then it is the pre-header. */ pre_header = NULL; for (e = header->pred; e; e = e->pred_next) { basic_block node = e->src; if (node != ENTRY_BLOCK_PTR && ! dominated_by_p (CDI_DOMINATORS, node, header)) { if (pre_header == NULL) pre_header = node; else { /* There are multiple edges into the header from outside the loop so there is no pre-header block. */ pre_header = NULL; break; } } } return pre_header; } static void establish_preds (struct loop *loop) { struct loop *ploop, *father = loop->outer; loop->depth = father->depth + 1; if (loop->pred) free (loop->pred); loop->pred = xmalloc (sizeof (struct loop *) * loop->depth); memcpy (loop->pred, father->pred, sizeof (struct loop *) * father->depth); loop->pred[father->depth] = father; for (ploop = loop->inner; ploop; ploop = ploop->next) establish_preds (ploop); } /* Add LOOP to the loop hierarchy tree where FATHER is father of the added loop. If LOOP has some children, take care of that their pred field will be initialized correctly. */ void flow_loop_tree_node_add (struct loop *father, struct loop *loop) { loop->next = father->inner; father->inner = loop; loop->outer = father; establish_preds (loop); } /* Remove LOOP from the loop hierarchy tree. */ void flow_loop_tree_node_remove (struct loop *loop) { struct loop *prev, *father; father = loop->outer; loop->outer = NULL; /* Remove loop from the list of sons. */ if (father->inner == loop) father->inner = loop->next; else { for (prev = father->inner; prev->next != loop; prev = prev->next); prev->next = loop->next; } loop->depth = -1; free (loop->pred); loop->pred = NULL; } /* Helper function to compute loop nesting depth and enclosed loop level for the natural loop specified by LOOP. Returns the loop level. */ static int flow_loop_level_compute (struct loop *loop) { struct loop *inner; int level = 1; if (! loop) return 0; /* Traverse loop tree assigning depth and computing level as the maximum level of all the inner loops of this loop. The loop level is equivalent to the height of the loop in the loop tree and corresponds to the number of enclosed loop levels (including itself). */ for (inner = loop->inner; inner; inner = inner->next) { int ilevel = flow_loop_level_compute (inner) + 1; if (ilevel > level) level = ilevel; } loop->level = level; return level; } /* Compute the loop nesting depth and enclosed loop level for the loop hierarchy tree specified by LOOPS. Return the maximum enclosed loop level. */ static int flow_loops_level_compute (struct loops *loops) { return flow_loop_level_compute (loops->tree_root); } /* Scan a single natural loop specified by LOOP collecting information about it specified by FLAGS. */ int flow_loop_scan (struct loop *loop, int flags) { if (flags & LOOP_ENTRY_EDGES) { /* Find edges which enter the loop header. Note that the entry edges should only enter the header of a natural loop. */ flow_loop_entry_edges_find (loop); } if (flags & LOOP_EXIT_EDGES) { /* Find edges which exit the loop. */ flow_loop_exit_edges_find (loop); } if (flags & LOOP_PRE_HEADER) { /* Look to see if the loop has a pre-header node. */ loop->pre_header = flow_loop_pre_header_find (loop->header); /* Find the blocks within the extended basic block of the loop pre-header. */ flow_loop_pre_header_scan (loop); } return 1; } /* A callback to update latch and header info for basic block JUMP created by redirecting an edge. */ static void update_latch_info (basic_block jump) { alloc_aux_for_block (jump, sizeof (int)); HEADER_BLOCK (jump) = 0; alloc_aux_for_edge (jump->pred, sizeof (int)); LATCH_EDGE (jump->pred) = 0; } /* A callback for make_forwarder block, to redirect all edges except for MFB_KJ_EDGE to the entry part. E is the edge for that we should decide whether to redirect it. */ static edge mfb_kj_edge; static bool mfb_keep_just (edge e) { return e != mfb_kj_edge; } /* A callback for make_forwarder block, to redirect the latch edges into an entry part. E is the edge for that we should decide whether to redirect it. */ static bool mfb_keep_nonlatch (edge e) { return LATCH_EDGE (e); } /* Takes care of merging natural loops with shared headers. */ static void canonicalize_loop_headers (void) { basic_block header; edge e; /* Compute the dominators. */ calculate_dominance_info (CDI_DOMINATORS); alloc_aux_for_blocks (sizeof (int)); alloc_aux_for_edges (sizeof (int)); /* Split blocks so that each loop has only single latch. */ FOR_EACH_BB (header) { int num_latches = 0; int have_abnormal_edge = 0; for (e = header->pred; e; e = e->pred_next) { basic_block latch = e->src; if (e->flags & EDGE_ABNORMAL) have_abnormal_edge = 1; if (latch != ENTRY_BLOCK_PTR && dominated_by_p (CDI_DOMINATORS, latch, header)) { num_latches++; LATCH_EDGE (e) = 1; } } if (have_abnormal_edge) HEADER_BLOCK (header) = 0; else HEADER_BLOCK (header) = num_latches; } free_dominance_info (CDI_DOMINATORS); if (HEADER_BLOCK (ENTRY_BLOCK_PTR->succ->dest)) { basic_block bb; /* We could not redirect edges freely here. On the other hand, we can simply split the edge from entry block. */ bb = split_edge (ENTRY_BLOCK_PTR->succ); alloc_aux_for_edge (bb->succ, sizeof (int)); LATCH_EDGE (bb->succ) = 0; alloc_aux_for_block (bb, sizeof (int)); HEADER_BLOCK (bb) = 0; } FOR_EACH_BB (header) { int max_freq, is_heavy; edge heavy, tmp_edge; if (HEADER_BLOCK (header) <= 1) continue; /* Find a heavy edge. */ is_heavy = 1; heavy = NULL; max_freq = 0; for (e = header->pred; e; e = e->pred_next) if (LATCH_EDGE (e) && EDGE_FREQUENCY (e) > max_freq) max_freq = EDGE_FREQUENCY (e); for (e = header->pred; e; e = e->pred_next) if (LATCH_EDGE (e) && EDGE_FREQUENCY (e) >= max_freq / HEAVY_EDGE_RATIO) { if (heavy) { is_heavy = 0; break; } else heavy = e; } if (is_heavy) { /* Split out the heavy edge, and create inner loop for it. */ mfb_kj_edge = heavy; tmp_edge = make_forwarder_block (header, mfb_keep_just, update_latch_info); alloc_aux_for_block (tmp_edge->dest, sizeof (int)); HEADER_BLOCK (tmp_edge->dest) = 1; alloc_aux_for_edge (tmp_edge, sizeof (int)); LATCH_EDGE (tmp_edge) = 0; HEADER_BLOCK (header)--; } if (HEADER_BLOCK (header) > 1) { /* Create a new latch block. */ tmp_edge = make_forwarder_block (header, mfb_keep_nonlatch, update_latch_info); alloc_aux_for_block (tmp_edge->dest, sizeof (int)); HEADER_BLOCK (tmp_edge->src) = 0; HEADER_BLOCK (tmp_edge->dest) = 1; alloc_aux_for_edge (tmp_edge, sizeof (int)); LATCH_EDGE (tmp_edge) = 1; } } free_aux_for_blocks (); free_aux_for_edges (); } /* Find all the natural loops in the function and save in LOOPS structure and recalculate loop_depth information in basic block structures. FLAGS controls which loop information is collected. Return the number of natural loops found. */ int flow_loops_find (struct loops *loops, int flags) { int i; int b; int num_loops; edge e; sbitmap headers; int *dfs_order; int *rc_order; basic_block header; basic_block bb; /* This function cannot be repeatedly called with different flags to build up the loop information. The loop tree must always be built if this function is called. */ if (! (flags & LOOP_TREE)) abort (); memset (loops, 0, sizeof *loops); /* Taking care of this degenerate case makes the rest of this code simpler. */ if (n_basic_blocks == 0) return 0; dfs_order = NULL; rc_order = NULL; /* Join loops with shared headers. */ canonicalize_loop_headers (); /* Compute the dominators. */ calculate_dominance_info (CDI_DOMINATORS); /* Count the number of loop headers. This should be the same as the number of natural loops. */ headers = sbitmap_alloc (last_basic_block); sbitmap_zero (headers); num_loops = 0; FOR_EACH_BB (header) { int more_latches = 0; header->loop_depth = 0; /* If we have an abnormal predecessor, do not consider the loop (not worth the problems). */ for (e = header->pred; e; e = e->pred_next) if (e->flags & EDGE_ABNORMAL) break; if (e) continue; for (e = header->pred; e; e = e->pred_next) { basic_block latch = e->src; if (e->flags & EDGE_ABNORMAL) abort (); /* Look for back edges where a predecessor is dominated by this block. A natural loop has a single entry node (header) that dominates all the nodes in the loop. It also has single back edge to the header from a latch node. */ if (latch != ENTRY_BLOCK_PTR && dominated_by_p (CDI_DOMINATORS, latch, header)) { /* Shared headers should be eliminated by now. */ if (more_latches) abort (); more_latches = 1; SET_BIT (headers, header->index); num_loops++; } } } /* Allocate loop structures. */ loops->parray = xcalloc (num_loops + 1, sizeof (struct loop *)); /* Dummy loop containing whole function. */ loops->parray[0] = xcalloc (1, sizeof (struct loop)); loops->parray[0]->next = NULL; loops->parray[0]->inner = NULL; loops->parray[0]->outer = NULL; loops->parray[0]->depth = 0; loops->parray[0]->pred = NULL; loops->parray[0]->num_nodes = n_basic_blocks + 2; loops->parray[0]->latch = EXIT_BLOCK_PTR; loops->parray[0]->header = ENTRY_BLOCK_PTR; ENTRY_BLOCK_PTR->loop_father = loops->parray[0]; EXIT_BLOCK_PTR->loop_father = loops->parray[0]; loops->tree_root = loops->parray[0]; /* Find and record information about all the natural loops in the CFG. */ loops->num = 1; FOR_EACH_BB (bb) bb->loop_father = loops->tree_root; if (num_loops) { /* Compute depth first search order of the CFG so that outer natural loops will be found before inner natural loops. */ dfs_order = xmalloc (n_basic_blocks * sizeof (int)); rc_order = xmalloc (n_basic_blocks * sizeof (int)); flow_depth_first_order_compute (dfs_order, rc_order); /* Save CFG derived information to avoid recomputing it. */ loops->cfg.dfs_order = dfs_order; loops->cfg.rc_order = rc_order; num_loops = 1; for (b = 0; b < n_basic_blocks; b++) { struct loop *loop; /* Search the nodes of the CFG in reverse completion order so that we can find outer loops first. */ if (!TEST_BIT (headers, rc_order[b])) continue; header = BASIC_BLOCK (rc_order[b]); loop = loops->parray[num_loops] = xcalloc (1, sizeof (struct loop)); loop->header = header; loop->num = num_loops; num_loops++; /* Look for the latch for this header block. */ for (e = header->pred; e; e = e->pred_next) { basic_block latch = e->src; if (latch != ENTRY_BLOCK_PTR && dominated_by_p (CDI_DOMINATORS, latch, header)) { loop->latch = latch; break; } } flow_loop_tree_node_add (header->loop_father, loop); loop->num_nodes = flow_loop_nodes_find (loop->header, loop); } /* Assign the loop nesting depth and enclosed loop level for each loop. */ loops->levels = flow_loops_level_compute (loops); /* Scan the loops. */ for (i = 1; i < num_loops; i++) flow_loop_scan (loops->parray[i], flags); loops->num = num_loops; } else { free_dominance_info (CDI_DOMINATORS); } sbitmap_free (headers); loops->state = 0; #ifdef ENABLE_CHECKING verify_flow_info (); verify_loop_structure (loops); #endif return loops->num; } /* Update the information regarding the loops in the CFG specified by LOOPS. */ int flow_loops_update (struct loops *loops, int flags) { /* One day we may want to update the current loop data. For now throw away the old stuff and rebuild what we need. */ if (loops->parray) flow_loops_free (loops); return flow_loops_find (loops, flags); } /* Return nonzero if basic block BB belongs to LOOP. */ bool flow_bb_inside_loop_p (const struct loop *loop, const basic_block bb) { struct loop *source_loop; if (bb == ENTRY_BLOCK_PTR || bb == EXIT_BLOCK_PTR) return 0; source_loop = bb->loop_father; return loop == source_loop || flow_loop_nested_p (loop, source_loop); } /* Return nonzero if edge E enters header of LOOP from outside of LOOP. */ bool flow_loop_outside_edge_p (const struct loop *loop, edge e) { if (e->dest != loop->header) abort (); return !flow_bb_inside_loop_p (loop, e->src); } /* Enumeration predicate for get_loop_body. */ static bool glb_enum_p (basic_block bb, void *glb_header) { return bb != (basic_block) glb_header; } /* Gets basic blocks of a LOOP. Header is the 0-th block, rest is in dfs order against direction of edges from latch. Specially, if header != latch, latch is the 1-st block. */ basic_block * get_loop_body (const struct loop *loop) { basic_block *tovisit, bb; unsigned tv = 0; if (!loop->num_nodes) abort (); tovisit = xcalloc (loop->num_nodes, sizeof (basic_block)); tovisit[tv++] = loop->header; if (loop->latch == EXIT_BLOCK_PTR) { /* There may be blocks unreachable from EXIT_BLOCK. */ if (loop->num_nodes != (unsigned) n_basic_blocks + 2) abort (); FOR_EACH_BB (bb) tovisit[tv++] = bb; tovisit[tv++] = EXIT_BLOCK_PTR; } else if (loop->latch != loop->header) { tv = dfs_enumerate_from (loop->latch, 1, glb_enum_p, tovisit + 1, loop->num_nodes - 1, loop->header) + 1; } if (tv != loop->num_nodes) abort (); return tovisit; } /* Fills dominance descendants inside LOOP of the basic block BB into array TOVISIT from index *TV. */ static void fill_sons_in_loop (const struct loop *loop, basic_block bb, basic_block *tovisit, int *tv) { basic_block son, postpone = NULL; tovisit[(*tv)++] = bb; for (son = first_dom_son (CDI_DOMINATORS, bb); son; son = next_dom_son (CDI_DOMINATORS, son)) { if (!flow_bb_inside_loop_p (loop, son)) continue; if (dominated_by_p (CDI_DOMINATORS, loop->latch, son)) { postpone = son; continue; } fill_sons_in_loop (loop, son, tovisit, tv); } if (postpone) fill_sons_in_loop (loop, postpone, tovisit, tv); } /* Gets body of a LOOP (that must be different from the outermost loop) sorted by dominance relation. Additionally, if a basic block s dominates the latch, then only blocks dominated by s are be after it. */ basic_block * get_loop_body_in_dom_order (const struct loop *loop) { basic_block *tovisit; int tv; if (!loop->num_nodes) abort (); tovisit = xcalloc (loop->num_nodes, sizeof (basic_block)); if (loop->latch == EXIT_BLOCK_PTR) abort (); tv = 0; fill_sons_in_loop (loop, loop->header, tovisit, &tv); if (tv != (int) loop->num_nodes) abort (); return tovisit; } /* Gets exit edges of a LOOP, returning their number in N_EDGES. */ edge * get_loop_exit_edges (const struct loop *loop, unsigned int *n_edges) { edge *edges, e; unsigned i, n; basic_block * body; if (loop->latch == EXIT_BLOCK_PTR) abort (); body = get_loop_body (loop); n = 0; for (i = 0; i < loop->num_nodes; i++) for (e = body[i]->succ; e; e = e->succ_next) if (!flow_bb_inside_loop_p (loop, e->dest)) n++; edges = xmalloc (n * sizeof (edge)); *n_edges = n; n = 0; for (i = 0; i < loop->num_nodes; i++) for (e = body[i]->succ; e; e = e->succ_next) if (!flow_bb_inside_loop_p (loop, e->dest)) edges[n++] = e; free (body); return edges; } /* Counts the number of conditional branches inside LOOP. */ unsigned num_loop_branches (const struct loop *loop) { unsigned i, n; basic_block * body; if (loop->latch == EXIT_BLOCK_PTR) abort (); body = get_loop_body (loop); n = 0; for (i = 0; i < loop->num_nodes; i++) if (body[i]->succ && body[i]->succ->succ_next) n++; free (body); return n; } /* Adds basic block BB to LOOP. */ void add_bb_to_loop (basic_block bb, struct loop *loop) { int i; bb->loop_father = loop; bb->loop_depth = loop->depth; loop->num_nodes++; for (i = 0; i < loop->depth; i++) loop->pred[i]->num_nodes++; } /* Remove basic block BB from loops. */ void remove_bb_from_loops (basic_block bb) { int i; struct loop *loop = bb->loop_father; loop->num_nodes--; for (i = 0; i < loop->depth; i++) loop->pred[i]->num_nodes--; bb->loop_father = NULL; bb->loop_depth = 0; } /* Finds nearest common ancestor in loop tree for given loops. */ struct loop * find_common_loop (struct loop *loop_s, struct loop *loop_d) { if (!loop_s) return loop_d; if (!loop_d) return loop_s; if (loop_s->depth < loop_d->depth) loop_d = loop_d->pred[loop_s->depth]; else if (loop_s->depth > loop_d->depth) loop_s = loop_s->pred[loop_d->depth]; while (loop_s != loop_d) { loop_s = loop_s->outer; loop_d = loop_d->outer; } return loop_s; } /* Cancels the LOOP; it must be innermost one. */ void cancel_loop (struct loops *loops, struct loop *loop) { basic_block *bbs; unsigned i; if (loop->inner) abort (); /* Move blocks up one level (they should be removed as soon as possible). */ bbs = get_loop_body (loop); for (i = 0; i < loop->num_nodes; i++) bbs[i]->loop_father = loop->outer; /* Remove the loop from structure. */ flow_loop_tree_node_remove (loop); /* Remove loop from loops array. */ loops->parray[loop->num] = NULL; /* Free loop data. */ flow_loop_free (loop); } /* Cancels LOOP and all its subloops. */ void cancel_loop_tree (struct loops *loops, struct loop *loop) { while (loop->inner) cancel_loop_tree (loops, loop->inner); cancel_loop (loops, loop); } /* Checks that LOOPS are all right: -- sizes of loops are all right -- results of get_loop_body really belong to the loop -- loop header have just single entry edge and single latch edge -- loop latches have only single successor that is header of their loop -- irreducible loops are correctly marked */ void verify_loop_structure (struct loops *loops) { unsigned *sizes, i, j; sbitmap irreds; basic_block *bbs, bb; struct loop *loop; int err = 0; edge e; /* Check sizes. */ sizes = xcalloc (loops->num, sizeof (int)); sizes[0] = 2; FOR_EACH_BB (bb) for (loop = bb->loop_father; loop; loop = loop->outer) sizes[loop->num]++; for (i = 0; i < loops->num; i++) { if (!loops->parray[i]) continue; if (loops->parray[i]->num_nodes != sizes[i]) { error ("Size of loop %d should be %d, not %d.", i, sizes[i], loops->parray[i]->num_nodes); err = 1; } } free (sizes); /* Check get_loop_body. */ for (i = 1; i < loops->num; i++) { loop = loops->parray[i]; if (!loop) continue; bbs = get_loop_body (loop); for (j = 0; j < loop->num_nodes; j++) if (!flow_bb_inside_loop_p (loop, bbs[j])) { error ("Bb %d do not belong to loop %d.", bbs[j]->index, i); err = 1; } free (bbs); } /* Check headers and latches. */ for (i = 1; i < loops->num; i++) { loop = loops->parray[i]; if (!loop) continue; if ((loops->state & LOOPS_HAVE_PREHEADERS) && (!loop->header->pred->pred_next || loop->header->pred->pred_next->pred_next)) { error ("Loop %d's header does not have exactly 2 entries.", i); err = 1; } if (loops->state & LOOPS_HAVE_SIMPLE_LATCHES) { if (!loop->latch->succ || loop->latch->succ->succ_next) { error ("Loop %d's latch does not have exactly 1 successor.", i); err = 1; } if (loop->latch->succ->dest != loop->header) { error ("Loop %d's latch does not have header as successor.", i); err = 1; } if (loop->latch->loop_father != loop) { error ("Loop %d's latch does not belong directly to it.", i); err = 1; } } if (loop->header->loop_father != loop) { error ("Loop %d's header does not belong directly to it.", i); err = 1; } if ((loops->state & LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS) && (loop_latch_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP)) { error ("Loop %d's latch is marked as part of irreducible region.", i); err = 1; } } /* Check irreducible loops. */ if (loops->state & LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS) { /* Record old info. */ irreds = sbitmap_alloc (last_basic_block); FOR_EACH_BB (bb) { if (bb->flags & BB_IRREDUCIBLE_LOOP) SET_BIT (irreds, bb->index); else RESET_BIT (irreds, bb->index); for (e = bb->succ; e; e = e->succ_next) if (e->flags & EDGE_IRREDUCIBLE_LOOP) e->flags |= EDGE_ALL_FLAGS + 1; } /* Recount it. */ mark_irreducible_loops (loops); /* Compare. */ FOR_EACH_BB (bb) { if ((bb->flags & BB_IRREDUCIBLE_LOOP) && !TEST_BIT (irreds, bb->index)) { error ("Basic block %d should be marked irreducible.", bb->index); err = 1; } else if (!(bb->flags & BB_IRREDUCIBLE_LOOP) && TEST_BIT (irreds, bb->index)) { error ("Basic block %d should not be marked irreducible.", bb->index); err = 1; } for (e = bb->succ; e; e = e->succ_next) { if ((e->flags & EDGE_IRREDUCIBLE_LOOP) && !(e->flags & (EDGE_ALL_FLAGS + 1))) { error ("Edge from %d to %d should be marked irreducible.", e->src->index, e->dest->index); err = 1; } else if (!(e->flags & EDGE_IRREDUCIBLE_LOOP) && (e->flags & (EDGE_ALL_FLAGS + 1))) { error ("Edge from %d to %d should not be marked irreducible.", e->src->index, e->dest->index); err = 1; } e->flags &= ~(EDGE_ALL_FLAGS + 1); } } free (irreds); } if (err) abort (); } /* Returns latch edge of LOOP. */ edge loop_latch_edge (const struct loop *loop) { edge e; for (e = loop->header->pred; e->src != loop->latch; e = e->pred_next) continue; return e; } /* Returns preheader edge of LOOP. */ edge loop_preheader_edge (const struct loop *loop) { edge e; for (e = loop->header->pred; e->src == loop->latch; e = e->pred_next) continue; return e; } /* Natural loop analysis code for GNU compiler. Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Checks whether BB is executed exactly once in each LOOP iteration. */ bool just_once_each_iteration_p (struct loop *loop, basic_block bb) { /* It must be executed at least once each iteration. */ if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb)) return false; /* And just once. */ if (bb->loop_father != loop) return false; /* But this was not enough. We might have some irreducible loop here. */ if (bb->flags & BB_IRREDUCIBLE_LOOP) return false; return true; } /* Structure representing edge of a graph. */ struct edge { int src, dest; /* Source and destination. */ struct edge *pred_next, *succ_next; /* Next edge in predecessor and successor lists. */ void *data; /* Data attached to the edge. */ }; /* Structure representing vertex of a graph. */ struct vertex { struct edge *pred, *succ; /* Lists of predecessors and successors. */ int component; /* Number of dfs restarts before reaching the vertex. */ int post; /* Postorder number. */ }; /* Structure representing a graph. */ struct graph { int n_vertices; /* Number of vertices. */ struct vertex *vertices; /* The vertices. */ }; /* Dumps graph G into F. */ extern void dump_graph (FILE *, struct graph *); void dump_graph (FILE *f, struct graph *g) { int i; struct edge *e; for (i = 0; i < g->n_vertices; i++) { if (!g->vertices[i].pred && !g->vertices[i].succ) continue; fprintf (f, "%d (%d)\t<-", i, g->vertices[i].component); for (e = g->vertices[i].pred; e; e = e->pred_next) fprintf (f, " %d", e->src); fprintf (f, "\n"); fprintf (f, "\t->"); for (e = g->vertices[i].succ; e; e = e->succ_next) fprintf (f, " %d", e->dest); fprintf (f, "\n"); } } /* Creates a new graph with N_VERTICES vertices. */ static struct graph * new_graph (int n_vertices) { struct graph *g = xmalloc (sizeof (struct graph)); g->n_vertices = n_vertices; g->vertices = xcalloc (n_vertices, sizeof (struct vertex)); return g; } /* Adds an edge from F to T to graph G, with DATA attached. */ static void add_edge (struct graph *g, int f, int t, void *data) { struct edge *e = xmalloc (sizeof (struct edge)); e->src = f; e->dest = t; e->data = data; e->pred_next = g->vertices[t].pred; g->vertices[t].pred = e; e->succ_next = g->vertices[f].succ; g->vertices[f].succ = e; } /* Runs dfs search over vertices of G, from NQ vertices in queue QS. The vertices in postorder are stored into QT. If FORWARD is false, backward dfs is run. */ static void dfs (struct graph *g, int *qs, int nq, int *qt, bool forward) { int i, tick = 0, v, comp = 0, top; struct edge *e; struct edge **stack = xmalloc (sizeof (struct edge *) * g->n_vertices); for (i = 0; i < g->n_vertices; i++) { g->vertices[i].component = -1; g->vertices[i].post = -1; } #define FST_EDGE(V) (forward ? g->vertices[(V)].succ : g->vertices[(V)].pred) #define NEXT_EDGE(E) (forward ? (E)->succ_next : (E)->pred_next) #define EDGE_SRC(E) (forward ? (E)->src : (E)->dest) #define EDGE_DEST(E) (forward ? (E)->dest : (E)->src) for (i = 0; i < nq; i++) { v = qs[i]; if (g->vertices[v].post != -1) continue; g->vertices[v].component = comp++; e = FST_EDGE (v); top = 0; while (1) { while (e && g->vertices[EDGE_DEST (e)].component != -1) e = NEXT_EDGE (e); if (!e) { if (qt) qt[tick] = v; g->vertices[v].post = tick++; if (!top) break; e = stack[--top]; v = EDGE_SRC (e); e = NEXT_EDGE (e); continue; } stack[top++] = e; v = EDGE_DEST (e); e = FST_EDGE (v); g->vertices[v].component = comp - 1; } } free (stack); } /* Marks the edge E in graph G irreducible if it connects two vertices in the same scc. */ static void check_irred (struct graph *g, struct edge *e) { edge real = e->data; /* All edges should lead from a component with higher number to the one with lower one. */ if (g->vertices[e->src].component < g->vertices[e->dest].component) abort (); if (g->vertices[e->src].component != g->vertices[e->dest].component) return; real->flags |= EDGE_IRREDUCIBLE_LOOP; if (flow_bb_inside_loop_p (real->src->loop_father, real->dest)) real->src->flags |= BB_IRREDUCIBLE_LOOP; } /* Runs CALLBACK for all edges in G. */ static void for_each_edge (struct graph *g, void (callback) (struct graph *, struct edge *)) { struct edge *e; int i; for (i = 0; i < g->n_vertices; i++) for (e = g->vertices[i].succ; e; e = e->succ_next) callback (g, e); } /* Releases the memory occupied by G. */ static void free_graph (struct graph *g) { struct edge *e, *n; int i; for (i = 0; i < g->n_vertices; i++) for (e = g->vertices[i].succ; e; e = n) { n = e->succ_next; free (e); } free (g->vertices); free (g); } /* Marks blocks and edges that are part of non-recognized loops; i.e. we throw away all latch edges and mark blocks inside any remaining cycle. Everything is a bit complicated due to fact we do not want to do this for parts of cycles that only "pass" through some loop -- i.e. for each cycle, we want to mark blocks that belong directly to innermost loop containing the whole cycle. LOOPS is the loop tree. */ #define LOOP_REPR(LOOP) ((LOOP)->num + last_basic_block) #define BB_REPR(BB) ((BB)->index + 1) void mark_irreducible_loops (struct loops *loops) { basic_block act; edge e; int i, src, dest; struct graph *g; int *queue1 = xmalloc ((last_basic_block + loops->num) * sizeof (int)); int *queue2 = xmalloc ((last_basic_block + loops->num) * sizeof (int)); int nq, depth; struct loop *cloop; /* Reset the flags. */ FOR_BB_BETWEEN (act, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { act->flags &= ~BB_IRREDUCIBLE_LOOP; for (e = act->succ; e; e = e->succ_next) e->flags &= ~EDGE_IRREDUCIBLE_LOOP; } /* Create the edge lists. */ g = new_graph (last_basic_block + loops->num); FOR_BB_BETWEEN (act, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) for (e = act->succ; e; e = e->succ_next) { /* Ignore edges to exit. */ if (e->dest == EXIT_BLOCK_PTR) continue; /* And latch edges. */ if (e->dest->loop_father->header == e->dest && e->dest->loop_father->latch == act) continue; /* Edges inside a single loop should be left where they are. Edges to subloop headers should lead to representative of the subloop, but from the same place. Edges exiting loops should lead from representative of the son of nearest common ancestor of the loops in that act lays. */ src = BB_REPR (act); dest = BB_REPR (e->dest); if (e->dest->loop_father->header == e->dest) dest = LOOP_REPR (e->dest->loop_father); if (!flow_bb_inside_loop_p (act->loop_father, e->dest)) { depth = find_common_loop (act->loop_father, e->dest->loop_father)->depth + 1; if (depth == act->loop_father->depth) cloop = act->loop_father; else cloop = act->loop_father->pred[depth]; src = LOOP_REPR (cloop); } add_edge (g, src, dest, e); } /* Find the strongly connected components. Use the algorithm of Tarjan -- first determine the postorder dfs numbering in reversed graph, then run the dfs on the original graph in the order given by decreasing numbers assigned by the previous pass. */ nq = 0; FOR_BB_BETWEEN (act, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { queue1[nq++] = BB_REPR (act); } for (i = 1; i < (int) loops->num; i++) if (loops->parray[i]) queue1[nq++] = LOOP_REPR (loops->parray[i]); dfs (g, queue1, nq, queue2, false); for (i = 0; i < nq; i++) queue1[i] = queue2[nq - i - 1]; dfs (g, queue1, nq, NULL, true); /* Mark the irreducible loops. */ for_each_edge (g, check_irred); free_graph (g); free (queue1); free (queue2); loops->state |= LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS; } /* Counts number of insns inside LOOP. */ int num_loop_insns (struct loop *loop) { basic_block *bbs, bb; unsigned i, ninsns = 0; rtx insn; bbs = get_loop_body (loop); for (i = 0; i < loop->num_nodes; i++) { bb = bbs[i]; ninsns++; for (insn = BB_HEAD (bb); insn != BB_END (bb); insn = NEXT_INSN (insn)) if (INSN_P (insn)) ninsns++; } free(bbs); return ninsns; } /* Counts number of insns executed on average per iteration LOOP. */ int average_num_loop_insns (struct loop *loop) { basic_block *bbs, bb; unsigned i, binsns, ninsns, ratio; rtx insn; ninsns = 0; bbs = get_loop_body (loop); for (i = 0; i < loop->num_nodes; i++) { bb = bbs[i]; binsns = 1; for (insn = BB_HEAD (bb); insn != BB_END (bb); insn = NEXT_INSN (insn)) if (INSN_P (insn)) binsns++; ratio = loop->header->frequency == 0 ? BB_FREQ_MAX : (bb->frequency * BB_FREQ_MAX) / loop->header->frequency; ninsns += binsns * ratio; } free(bbs); ninsns /= BB_FREQ_MAX; if (!ninsns) ninsns = 1; /* To avoid division by zero. */ return ninsns; } /* Returns expected number of LOOP iterations. Compute upper bound on number of iterations in case they do not fit integer to help loop peeling heuristics. Use exact counts if at all possible. */ unsigned expected_loop_iterations (const struct loop *loop) { edge e; if (loop->header->count) { gcov_type count_in, count_latch, expected; count_in = 0; count_latch = 0; for (e = loop->header->pred; e; e = e->pred_next) if (e->src == loop->latch) count_latch = e->count; else count_in += e->count; if (count_in == 0) expected = count_latch * 2; else expected = (count_latch + count_in - 1) / count_in; /* Avoid overflows. */ return (expected > REG_BR_PROB_BASE ? REG_BR_PROB_BASE : expected); } else { int freq_in, freq_latch; freq_in = 0; freq_latch = 0; for (e = loop->header->pred; e; e = e->pred_next) if (e->src == loop->latch) freq_latch = EDGE_FREQUENCY (e); else freq_in += EDGE_FREQUENCY (e); if (freq_in == 0) return freq_latch * 2; return (freq_latch + freq_in - 1) / freq_in; } } /* Returns the maximum level of nesting of subloops of LOOP. */ unsigned get_loop_level (const struct loop *loop) { const struct loop *ploop; unsigned mx = 0, l; for (ploop = loop->inner; ploop; ploop = ploop->next) { l = get_loop_level (ploop); if (l >= mx) mx = l + 1; } return mx; } /* Returns estimate on cost of computing SEQ. */ static unsigned seq_cost (rtx seq) { unsigned cost = 0; rtx set; for (; seq; seq = NEXT_INSN (seq)) { set = single_set (seq); if (set) cost += rtx_cost (set, SET); else cost++; } return cost; } /* The properties of the target. */ static unsigned avail_regs; /* Number of available registers. */ static unsigned res_regs; /* Number of reserved registers. */ static unsigned small_cost; /* The cost for register when there is a free one. */ static unsigned pres_cost; /* The cost for register when there are not too many free ones. */ static unsigned spill_cost_cfg; /* The cost for register when we need to spill. */ /* Initialize the constants for computing set costs. */ void init_set_costs (void) { rtx seq; rtx reg1 = gen_raw_REG (SImode, FIRST_PSEUDO_REGISTER); rtx reg2 = gen_raw_REG (SImode, FIRST_PSEUDO_REGISTER + 1); rtx addr = gen_raw_REG (Pmode, FIRST_PSEUDO_REGISTER + 2); rtx mem = validize_mem (gen_rtx_MEM (SImode, addr)); unsigned i; for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (TEST_HARD_REG_BIT (reg_class_contents[GENERAL_REGS], i) && !fixed_regs[i]) avail_regs++; res_regs = 3; /* These are really just heuristic values. */ start_sequence (); emit_move_insn (reg1, reg2); seq = get_insns (); end_sequence (); small_cost = seq_cost (seq); pres_cost = 2 * small_cost; start_sequence (); emit_move_insn (mem, reg1); emit_move_insn (reg2, mem); seq = get_insns (); end_sequence (); spill_cost_cfg = seq_cost (seq); } /* Calculates cost for having SIZE new loop global variables. REGS_USED is the number of global registers used in loop. N_USES is the number of relevant variable uses. */ unsigned global_cost_for_size (unsigned size, unsigned regs_used, unsigned n_uses) { unsigned regs_needed = regs_used + size; unsigned cost = 0; if (regs_needed + res_regs <= avail_regs) cost += small_cost * size; else if (regs_needed <= avail_regs) cost += pres_cost * size; else { cost += pres_cost * size; cost += spill_cost_cfg * n_uses * (regs_needed - avail_regs) / regs_needed; } return cost; } /* Loop manipulation code for GNU compiler. Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ static struct loop * duplicate_loop (struct loops *, struct loop *, struct loop *); static void duplicate_subloops (struct loops *, struct loop *, struct loop *); static void copy_loops_to (struct loops *, struct loop **, int, struct loop *); static void loop_redirect_edge (edge, basic_block); static bool loop_delete_branch_edge (edge, int); static void remove_bbs (basic_block *, int); static bool rpe_enum_p (basic_block, void *); static int find_path (edge, basic_block **); static bool alp_enum_p (basic_block, void *); static void add_loop (struct loops *, struct loop *); static void fix_loop_placements (struct loops *, struct loop *); static bool fix_bb_placement (struct loops *, basic_block); static void fix_bb_placements (struct loops *, basic_block); static void place_new_loop (struct loops *, struct loop *); static void scale_loop_frequencies (struct loop *, int, int); static void scale_bbs_frequencies (basic_block *, int, int, int); static basic_block create_preheader (struct loop *, int); static void fix_irreducible_loops (basic_block); #define RDIV(X,Y) (((X) + (Y) / 2) / (Y)) /* Splits basic block BB after INSN, returns created edge. Updates loops and dominators. */ edge split_loop_bb (basic_block bb, rtx insn) { edge e; /* Split the block. */ e = split_block (bb, insn); /* Add dest to loop. */ add_bb_to_loop (e->dest, e->src->loop_father); return e; } /* Checks whether basic block BB is dominated by DATA. */ static bool rpe_enum_p (basic_block bb, void *data) { return dominated_by_p (CDI_DOMINATORS, bb, data); } /* Remove basic blocks BBS from loop structure and dominance info, and delete them afterwards. */ static void remove_bbs (basic_block *bbs, int nbbs) { int i; for (i = 0; i < nbbs; i++) { remove_bb_from_loops (bbs[i]); delete_basic_block (bbs[i]); } } /* Find path -- i.e. the basic blocks dominated by edge E and put them into array BBS, that will be allocated large enough to contain them. E->dest must have exactly one predecessor for this to work (it is easy to achieve and we do not put it here because we do not want to alter anything by this function). The number of basic blocks in the path is returned. */ static int find_path (edge e, basic_block **bbs) { if (e->dest->pred->pred_next) abort (); /* Find bbs in the path. */ *bbs = xcalloc (n_basic_blocks, sizeof (basic_block)); return dfs_enumerate_from (e->dest, 0, rpe_enum_p, *bbs, n_basic_blocks, e->dest); } /* Fix placement of basic block BB inside loop hierarchy stored in LOOPS -- Let L be a loop to that BB belongs. Then every successor of BB must either 1) belong to some superloop of loop L, or 2) be a header of loop K such that K->outer is superloop of L Returns true if we had to move BB into other loop to enforce this condition, false if the placement of BB was already correct (provided that placements of its successors are correct). */ static bool fix_bb_placement (struct loops *loops, basic_block bb) { edge e; struct loop *loop = loops->tree_root, *act; for (e = bb->succ; e; e = e->succ_next) { if (e->dest == EXIT_BLOCK_PTR) continue; act = e->dest->loop_father; if (act->header == e->dest) act = act->outer; if (flow_loop_nested_p (loop, act)) loop = act; } if (loop == bb->loop_father) return false; remove_bb_from_loops (bb); add_bb_to_loop (bb, loop); return true; } /* Fix placements of basic blocks inside loop hierarchy stored in loops; i.e. enforce condition condition stated in description of fix_bb_placement. We start from basic block FROM that had some of its successors removed, so that his placement no longer has to be correct, and iteratively fix placement of its predecessors that may change if placement of FROM changed. Also fix placement of subloops of FROM->loop_father, that might also be altered due to this change; the condition for them is similar, except that instead of successors we consider edges coming out of the loops. */ static void fix_bb_placements (struct loops *loops, basic_block from) { sbitmap in_queue; basic_block *queue, *qtop, *qbeg, *qend; struct loop *base_loop; edge e; /* We pass through blocks back-reachable from FROM, testing whether some of their successors moved to outer loop. It may be necessary to iterate several times, but it is finite, as we stop unless we move the basic block up the loop structure. The whole story is a bit more complicated due to presence of subloops, those are moved using fix_loop_placement. */ base_loop = from->loop_father; if (base_loop == loops->tree_root) return; in_queue = sbitmap_alloc (last_basic_block); sbitmap_zero (in_queue); SET_BIT (in_queue, from->index); /* Prevent us from going out of the base_loop. */ SET_BIT (in_queue, base_loop->header->index); queue = xmalloc ((base_loop->num_nodes + 1) * sizeof (basic_block)); qtop = queue + base_loop->num_nodes + 1; qbeg = queue; qend = queue + 1; *qbeg = from; while (qbeg != qend) { from = *qbeg; qbeg++; if (qbeg == qtop) qbeg = queue; RESET_BIT (in_queue, from->index); if (from->loop_father->header == from) { /* Subloop header, maybe move the loop upward. */ if (!fix_loop_placement (from->loop_father)) continue; } else { /* Ordinary basic block. */ if (!fix_bb_placement (loops, from)) continue; } /* Something has changed, insert predecessors into queue. */ for (e = from->pred; e; e = e->pred_next) { basic_block pred = e->src; struct loop *nca; if (TEST_BIT (in_queue, pred->index)) continue; /* If it is subloop, then it either was not moved, or the path up the loop tree from base_loop do not contain it. */ nca = find_common_loop (pred->loop_father, base_loop); if (pred->loop_father != base_loop && (nca == base_loop || nca != pred->loop_father)) pred = pred->loop_father->header; else if (!flow_loop_nested_p (from->loop_father, pred->loop_father)) { /* No point in processing it. */ continue; } if (TEST_BIT (in_queue, pred->index)) continue; /* Schedule the basic block. */ *qend = pred; qend++; if (qend == qtop) qend = queue; SET_BIT (in_queue, pred->index); } } free (in_queue); free (queue); } /* Basic block from has lost one or more of its predecessors, so it might mo longer be part irreducible loop. Fix it and proceed recursively for its successors if needed. */ static void fix_irreducible_loops (basic_block from) { basic_block bb; basic_block *stack; int stack_top; sbitmap on_stack; edge *edges, e; unsigned n_edges, i; if (!(from->flags & BB_IRREDUCIBLE_LOOP)) return; on_stack = sbitmap_alloc (last_basic_block); sbitmap_zero (on_stack); SET_BIT (on_stack, from->index); stack = xmalloc (from->loop_father->num_nodes * sizeof (basic_block)); stack[0] = from; stack_top = 1; while (stack_top) { bb = stack[--stack_top]; RESET_BIT (on_stack, bb->index); for (e = bb->pred; e; e = e->pred_next) if (e->flags & EDGE_IRREDUCIBLE_LOOP) break; if (e) continue; bb->flags &= ~BB_IRREDUCIBLE_LOOP; if (bb->loop_father->header == bb) edges = get_loop_exit_edges (bb->loop_father, &n_edges); else { n_edges = 0; for (e = bb->succ; e; e = e->succ_next) n_edges++; edges = xmalloc (n_edges * sizeof (edge)); n_edges = 0; for (e = bb->succ; e; e = e->succ_next) edges[n_edges++] = e; } for (i = 0; i < n_edges; i++) { e = edges[i]; if (e->flags & EDGE_IRREDUCIBLE_LOOP) { if (!flow_bb_inside_loop_p (from->loop_father, e->dest)) continue; e->flags &= ~EDGE_IRREDUCIBLE_LOOP; if (TEST_BIT (on_stack, e->dest->index)) continue; SET_BIT (on_stack, e->dest->index); stack[stack_top++] = e->dest; } } free (edges); } free (on_stack); free (stack); } /* Removes path beginning at edge E, i.e. remove basic blocks dominated by E and update loop structure stored in LOOPS and dominators. Return true if we were able to remove the path, false otherwise (and nothing is affected then). */ bool remove_path (struct loops *loops, edge e) { edge ae; basic_block *rem_bbs, *bord_bbs, *dom_bbs, from, bb; int i, nrem, n_bord_bbs, n_dom_bbs; sbitmap seen; if (!loop_delete_branch_edge (e, 0)) return false; /* We need to check whether basic blocks are dominated by the edge e, but we only have basic block dominators. This is easy to fix -- when e->dest has exactly one predecessor, this corresponds to blocks dominated by e->dest, if not, split the edge. */ if (e->dest->pred->pred_next) e = loop_split_edge_with (e, NULL_RTX)->pred; /* It may happen that by removing path we remove one or more loops we belong to. In this case first unloop the loops, then proceed normally. We may assume that e->dest is not a header of any loop, as it now has exactly one predecessor. */ while (e->src->loop_father->outer && dominated_by_p (CDI_DOMINATORS, e->src->loop_father->latch, e->dest)) unloop (loops, e->src->loop_father); /* Identify the path. */ nrem = find_path (e, &rem_bbs); n_bord_bbs = 0; bord_bbs = xcalloc (n_basic_blocks, sizeof (basic_block)); seen = sbitmap_alloc (last_basic_block); sbitmap_zero (seen); /* Find "border" hexes -- i.e. those with predecessor in removed path. */ for (i = 0; i < nrem; i++) SET_BIT (seen, rem_bbs[i]->index); for (i = 0; i < nrem; i++) { bb = rem_bbs[i]; for (ae = rem_bbs[i]->succ; ae; ae = ae->succ_next) if (ae->dest != EXIT_BLOCK_PTR && !TEST_BIT (seen, ae->dest->index)) { SET_BIT (seen, ae->dest->index); bord_bbs[n_bord_bbs++] = ae->dest; } } /* Remove the path. */ from = e->src; if (!loop_delete_branch_edge (e, 1)) abort (); dom_bbs = xcalloc (n_basic_blocks, sizeof (basic_block)); /* Cancel loops contained in the path. */ for (i = 0; i < nrem; i++) if (rem_bbs[i]->loop_father->header == rem_bbs[i]) cancel_loop_tree (loops, rem_bbs[i]->loop_father); remove_bbs (rem_bbs, nrem); free (rem_bbs); /* Find blocks whose dominators may be affected. */ n_dom_bbs = 0; sbitmap_zero (seen); for (i = 0; i < n_bord_bbs; i++) { basic_block ldom; bb = get_immediate_dominator (CDI_DOMINATORS, bord_bbs[i]); if (TEST_BIT (seen, bb->index)) continue; SET_BIT (seen, bb->index); for (ldom = first_dom_son (CDI_DOMINATORS, bb); ldom; ldom = next_dom_son (CDI_DOMINATORS, ldom)) if (!dominated_by_p (CDI_DOMINATORS, from, ldom)) dom_bbs[n_dom_bbs++] = ldom; } free (seen); /* Recount dominators. */ iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, n_dom_bbs); free (dom_bbs); /* These blocks have lost some predecessor(s), thus their irreducible status could be changed. */ for (i = 0; i < n_bord_bbs; i++) fix_irreducible_loops (bord_bbs[i]); free (bord_bbs); /* Fix placements of basic blocks inside loops and the placement of loops in the loop tree. */ fix_bb_placements (loops, from); fix_loop_placements (loops, from->loop_father); return true; } /* Predicate for enumeration in add_loop. */ static bool alp_enum_p (basic_block bb, void *alp_header) { return bb != (basic_block) alp_header; } /* Given LOOP structure with filled header and latch, find the body of the corresponding loop and add it to LOOPS tree. */ static void add_loop (struct loops *loops, struct loop *loop) { basic_block *bbs; int i, n; /* Add it to loop structure. */ place_new_loop (loops, loop); loop->level = 1; /* Find its nodes. */ bbs = xcalloc (n_basic_blocks, sizeof (basic_block)); n = dfs_enumerate_from (loop->latch, 1, alp_enum_p, bbs, n_basic_blocks, loop->header); for (i = 0; i < n; i++) add_bb_to_loop (bbs[i], loop); add_bb_to_loop (loop->header, loop); free (bbs); } /* Multiply all frequencies of basic blocks in array BBS of length NBBS by NUM/DEN. */ static void scale_bbs_frequencies (basic_block *bbs, int nbbs, int num, int den) { int i; edge e; for (i = 0; i < nbbs; i++) { bbs[i]->frequency = (bbs[i]->frequency * num) / den; bbs[i]->count = RDIV (bbs[i]->count * num, den); for (e = bbs[i]->succ; e; e = e->succ_next) e->count = (e->count * num) /den; } } /* Multiply all frequencies in LOOP by NUM/DEN. */ static void scale_loop_frequencies (struct loop *loop, int num, int den) { basic_block *bbs; bbs = get_loop_body (loop); scale_bbs_frequencies (bbs, loop->num_nodes, num, den); free (bbs); } /* Make area between HEADER_EDGE and LATCH_EDGE a loop by connecting latch to header and update loop tree stored in LOOPS and dominators accordingly. Everything between them plus LATCH_EDGE destination must be dominated by HEADER_EDGE destination, and back-reachable from LATCH_EDGE source. HEADER_EDGE is redirected to basic block SWITCH_BB, FALLTHRU_EDGE (SWITCH_BB) to original destination of HEADER_EDGE and BRANCH_EDGE (SWITCH_BB) to original destination of LATCH_EDGE. Returns newly created loop. */ struct loop * loopify (struct loops *loops, edge latch_edge, edge header_edge, basic_block switch_bb) { basic_block succ_bb = latch_edge->dest; basic_block pred_bb = header_edge->src; basic_block *dom_bbs, *body; unsigned n_dom_bbs, i; sbitmap seen; struct loop *loop = xcalloc (1, sizeof (struct loop)); struct loop *outer = succ_bb->loop_father->outer; int freq, prob, tot_prob; gcov_type cnt; edge e; loop->header = header_edge->dest; loop->latch = latch_edge->src; freq = EDGE_FREQUENCY (header_edge); cnt = header_edge->count; prob = switch_bb->succ->probability; tot_prob = prob + switch_bb->succ->succ_next->probability; if (tot_prob == 0) tot_prob = 1; /* Redirect edges. */ loop_redirect_edge (latch_edge, loop->header); loop_redirect_edge (BRANCH_EDGE (switch_bb), succ_bb); loop_redirect_edge (header_edge, switch_bb); loop_redirect_edge (FALLTHRU_EDGE (switch_bb), loop->header); /* Update dominators. */ set_immediate_dominator (CDI_DOMINATORS, switch_bb, pred_bb); set_immediate_dominator (CDI_DOMINATORS, loop->header, switch_bb); set_immediate_dominator (CDI_DOMINATORS, succ_bb, switch_bb); /* Compute new loop. */ add_loop (loops, loop); flow_loop_tree_node_add (outer, loop); /* Add switch_bb to appropriate loop. */ add_bb_to_loop (switch_bb, outer); /* Fix frequencies. */ switch_bb->frequency = freq; switch_bb->count = cnt; for (e = switch_bb->succ; e; e = e->succ_next) e->count = (switch_bb->count * e->probability) / REG_BR_PROB_BASE; scale_loop_frequencies (loop, prob, tot_prob); scale_loop_frequencies (succ_bb->loop_father, tot_prob - prob, tot_prob); /* Update dominators of blocks outside of LOOP. */ dom_bbs = xcalloc (n_basic_blocks, sizeof (basic_block)); n_dom_bbs = 0; seen = sbitmap_alloc (last_basic_block); sbitmap_zero (seen); body = get_loop_body (loop); for (i = 0; i < loop->num_nodes; i++) SET_BIT (seen, body[i]->index); for (i = 0; i < loop->num_nodes; i++) { basic_block ldom; for (ldom = first_dom_son (CDI_DOMINATORS, body[i]); ldom; ldom = next_dom_son (CDI_DOMINATORS, ldom)) if (!TEST_BIT (seen, ldom->index)) { SET_BIT (seen, ldom->index); dom_bbs[n_dom_bbs++] = ldom; } } iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, n_dom_bbs); free (body); free (seen); free (dom_bbs); return loop; } /* Remove the latch edge of a LOOP and update LOOPS tree to indicate that the LOOP was removed. After this function, original loop latch will have no successor, which caller is expected to fix somehow. */ void unloop (struct loops *loops, struct loop *loop) { basic_block *body; struct loop *ploop; unsigned i, n; basic_block latch = loop->latch; edge *edges; unsigned n_edges; /* This is relatively straightforward. The dominators are unchanged, as loop header dominates loop latch, so the only thing we have to care of is the placement of loops and basic blocks inside the loop tree. We move them all to the loop->outer, and then let fix_bb_placements do its work. */ body = get_loop_body (loop); edges = get_loop_exit_edges (loop, &n_edges); n = loop->num_nodes; for (i = 0; i < n; i++) if (body[i]->loop_father == loop) { remove_bb_from_loops (body[i]); add_bb_to_loop (body[i], loop->outer); } free(body); while (loop->inner) { ploop = loop->inner; flow_loop_tree_node_remove (ploop); flow_loop_tree_node_add (loop->outer, ploop); } /* Remove the loop and free its data. */ flow_loop_tree_node_remove (loop); loops->parray[loop->num] = NULL; flow_loop_free (loop); remove_edge (latch->succ); fix_bb_placements (loops, latch); /* If the loop was inside an irreducible region, we would have to somehow update the irreducible marks inside its body. While it is certainly possible to do, it is a bit complicated and this situation should be very rare, so we just remark all loops in this case. */ for (i = 0; i < n_edges; i++) if (edges[i]->flags & EDGE_IRREDUCIBLE_LOOP) break; if (i != n_edges) mark_irreducible_loops (loops); free (edges); } /* Fix placement of LOOP inside loop tree, i.e. find the innermost superloop FATHER of LOOP such that all of the edges coming out of LOOP belong to FATHER, and set it as outer loop of LOOP. Return 1 if placement of LOOP changed. */ int fix_loop_placement (struct loop *loop) { basic_block *body; unsigned i; edge e; struct loop *father = loop->pred[0], *act; body = get_loop_body (loop); for (i = 0; i < loop->num_nodes; i++) for (e = body[i]->succ; e; e = e->succ_next) if (!flow_bb_inside_loop_p (loop, e->dest)) { act = find_common_loop (loop, e->dest->loop_father); if (flow_loop_nested_p (father, act)) father = act; } free (body); if (father != loop->outer) { for (act = loop->outer; act != father; act = act->outer) act->num_nodes -= loop->num_nodes; flow_loop_tree_node_remove (loop); flow_loop_tree_node_add (father, loop); return 1; } return 0; } /* Fix placement of superloops of LOOP inside loop tree, i.e. ensure that condition stated in description of fix_loop_placement holds for them. It is used in case when we removed some edges coming out of LOOP, which may cause the right placement of LOOP inside loop tree to change. */ static void fix_loop_placements (struct loops *loops, struct loop *loop) { struct loop *outer; while (loop->outer) { outer = loop->outer; if (!fix_loop_placement (loop)) break; /* Changing the placement of a loop in the loop tree may alter the validity of condition 2) of the description of fix_bb_placement for its preheader, because the successor is the header and belongs to the loop. So call fix_bb_placements to fix up the placement of the preheader and (possibly) of its predecessors. */ fix_bb_placements (loops, loop_preheader_edge (loop)->src); loop = outer; } } /* Creates place for a new LOOP in LOOPS structure. */ static void place_new_loop (struct loops *loops, struct loop *loop) { loops->parray = xrealloc (loops->parray, (loops->num + 1) * sizeof (struct loop *)); loops->parray[loops->num] = loop; loop->num = loops->num++; } /* Copies copy of LOOP as subloop of TARGET loop, placing newly created loop into LOOPS structure. */ static struct loop * duplicate_loop (struct loops *loops, struct loop *loop, struct loop *target) { struct loop *cloop; cloop = xcalloc (1, sizeof (struct loop)); place_new_loop (loops, cloop); /* Initialize copied loop. */ cloop->level = loop->level; /* Set it as copy of loop. */ loop->copy = cloop; /* Add it to target. */ flow_loop_tree_node_add (target, cloop); return cloop; } /* Copies structure of subloops of LOOP into TARGET loop, placing newly created loops into loop tree stored in LOOPS. */ static void duplicate_subloops (struct loops *loops, struct loop *loop, struct loop *target) { struct loop *aloop, *cloop; for (aloop = loop->inner; aloop; aloop = aloop->next) { cloop = duplicate_loop (loops, aloop, target); duplicate_subloops (loops, aloop, cloop); } } /* Copies structure of subloops of N loops, stored in array COPIED_LOOPS, into TARGET loop, placing newly created loops into loop tree LOOPS. */ static void copy_loops_to (struct loops *loops, struct loop **copied_loops, int n, struct loop *target) { struct loop *aloop; int i; for (i = 0; i < n; i++) { aloop = duplicate_loop (loops, copied_loops[i], target); duplicate_subloops (loops, copied_loops[i], aloop); } } /* Redirects edge E to basic block DEST. */ static void loop_redirect_edge (edge e, basic_block dest) { if (e->dest == dest) return; redirect_edge_and_branch_force (e, dest); } /* Deletes edge E from a branch if possible. Unless REALLY_DELETE is set, just test whether it is possible to remove the edge. */ static bool loop_delete_branch_edge (edge e, int really_delete) { basic_block src = e->src; int irr; edge snd; if (src->succ->succ_next) { basic_block newdest; /* Cannot handle more than two exit edges. */ if (src->succ->succ_next->succ_next) return false; /* And it must be just a simple branch. */ if (!any_condjump_p (BB_END (src))) return false; snd = e == src->succ ? src->succ->succ_next : src->succ; newdest = snd->dest; if (newdest == EXIT_BLOCK_PTR) return false; /* Hopefully the above conditions should suffice. */ if (!really_delete) return true; /* Redirecting behaves wrongly wrto this flag. */ irr = snd->flags & EDGE_IRREDUCIBLE_LOOP; if (!redirect_edge_and_branch (e, newdest)) return false; src->succ->flags &= ~EDGE_IRREDUCIBLE_LOOP; src->succ->flags |= irr; return true; } else { /* Cannot happen -- we are using this only to remove an edge from branch. */ abort (); } return false; /* To avoid warning, cannot get here. */ } /* Check whether LOOP's body can be duplicated. */ bool can_duplicate_loop_p (struct loop *loop) { int ret; basic_block *bbs = get_loop_body (loop); ret = can_copy_bbs_p (bbs, loop->num_nodes); free (bbs); return ret; } /* Duplicates body of LOOP to given edge E NDUPL times. Takes care of updating LOOPS structure and dominators. E's destination must be LOOP header for this to work, i.e. it must be entry or latch edge of this loop; these are unique, as the loops must have preheaders for this function to work correctly (in case E is latch, the function unrolls the loop, if E is entry edge, it peels the loop). Store edges created by copying ORIG edge from copies corresponding to set bits in WONT_EXIT bitmap (bit 0 corresponds to original LOOP body, the other copies are numbered in order given by control flow through them) into TO_REMOVE array. Returns false if duplication is impossible. */ int duplicate_loop_to_header_edge (struct loop *loop, edge e, struct loops *loops, unsigned int ndupl, sbitmap wont_exit, edge orig, edge *to_remove, unsigned int *n_to_remove, int flags) { struct loop *target, *aloop; struct loop **orig_loops; unsigned n_orig_loops; basic_block header = loop->header, latch = loop->latch; basic_block *new_bbs, *bbs, *first_active; basic_block new_bb, bb, first_active_latch = NULL; edge ae, latch_edge; edge spec_edges[2], new_spec_edges[2]; #define SE_LATCH 0 #define SE_ORIG 1 unsigned i, j, n; int is_latch = (latch == e->src); int scale_act = 0, *scale_step = NULL, scale_main = 0; int p, freq_in, freq_le, freq_out_orig; int prob_pass_thru, prob_pass_wont_exit, prob_pass_main; int add_irreducible_flag; if (e->dest != loop->header) abort (); if (ndupl <= 0) abort (); if (orig) { /* Orig must be edge out of the loop. */ if (!flow_bb_inside_loop_p (loop, orig->src)) abort (); if (flow_bb_inside_loop_p (loop, orig->dest)) abort (); } bbs = get_loop_body (loop); /* Check whether duplication is possible. */ if (!can_copy_bbs_p (bbs, loop->num_nodes)) { free (bbs); return false; } new_bbs = xmalloc (sizeof (basic_block) * loop->num_nodes); /* In case we are doing loop peeling and the loop is in the middle of irreducible region, the peeled copies will be inside it too. */ add_irreducible_flag = e->flags & EDGE_IRREDUCIBLE_LOOP; if (is_latch && add_irreducible_flag) abort (); /* Find edge from latch. */ latch_edge = loop_latch_edge (loop); if (flags & DLTHE_FLAG_UPDATE_FREQ) { /* Calculate coefficients by that we have to scale frequencies of duplicated loop bodies. */ freq_in = header->frequency; freq_le = EDGE_FREQUENCY (latch_edge); if (freq_in == 0) freq_in = 1; if (freq_in < freq_le) freq_in = freq_le; freq_out_orig = orig ? EDGE_FREQUENCY (orig) : freq_in - freq_le; if (freq_out_orig > freq_in - freq_le) freq_out_orig = freq_in - freq_le; prob_pass_thru = RDIV (REG_BR_PROB_BASE * freq_le, freq_in); prob_pass_wont_exit = RDIV (REG_BR_PROB_BASE * (freq_le + freq_out_orig), freq_in); scale_step = xmalloc (ndupl * sizeof (int)); for (i = 1; i <= ndupl; i++) scale_step[i - 1] = TEST_BIT (wont_exit, i) ? prob_pass_wont_exit : prob_pass_thru; if (is_latch) { prob_pass_main = TEST_BIT (wont_exit, 0) ? prob_pass_wont_exit : prob_pass_thru; p = prob_pass_main; scale_main = REG_BR_PROB_BASE; for (i = 0; i < ndupl; i++) { scale_main += p; p = RDIV (p * scale_step[i], REG_BR_PROB_BASE); } scale_main = RDIV (REG_BR_PROB_BASE * REG_BR_PROB_BASE, scale_main); scale_act = RDIV (scale_main * prob_pass_main, REG_BR_PROB_BASE); } else { scale_main = REG_BR_PROB_BASE; for (i = 0; i < ndupl; i++) scale_main = RDIV (scale_main * scale_step[i], REG_BR_PROB_BASE); scale_act = REG_BR_PROB_BASE - prob_pass_thru; } for (i = 0; i < ndupl; i++) if (scale_step[i] < 0 || scale_step[i] > REG_BR_PROB_BASE) abort (); if (scale_main < 0 || scale_main > REG_BR_PROB_BASE || scale_act < 0 || scale_act > REG_BR_PROB_BASE) abort (); } /* Loop the new bbs will belong to. */ target = e->src->loop_father; /* Original loops. */ n_orig_loops = 0; for (aloop = loop->inner; aloop; aloop = aloop->next) n_orig_loops++; orig_loops = xcalloc (n_orig_loops, sizeof (struct loop *)); for (aloop = loop->inner, i = 0; aloop; aloop = aloop->next, i++) orig_loops[i] = aloop; loop->copy = target; n = loop->num_nodes; first_active = xmalloc (n * sizeof (basic_block)); if (is_latch) { memcpy (first_active, bbs, n * sizeof (basic_block)); first_active_latch = latch; } /* Record exit edge in original loop body. */ if (orig && TEST_BIT (wont_exit, 0)) to_remove[(*n_to_remove)++] = orig; spec_edges[SE_ORIG] = orig; spec_edges[SE_LATCH] = latch_edge; for (j = 0; j < ndupl; j++) { /* Copy loops. */ copy_loops_to (loops, orig_loops, n_orig_loops, target); /* Copy bbs. */ copy_bbs (bbs, n, new_bbs, spec_edges, 2, new_spec_edges, loop); /* Note whether the blocks and edges belong to an irreducible loop. */ if (add_irreducible_flag) { for (i = 0; i < n; i++) new_bbs[i]->rbi->duplicated = 1; for (i = 0; i < n; i++) { new_bb = new_bbs[i]; if (new_bb->loop_father == target) new_bb->flags |= BB_IRREDUCIBLE_LOOP; for (ae = new_bb->succ; ae; ae = ae->succ_next) if (ae->dest->rbi->duplicated && (ae->src->loop_father == target || ae->dest->loop_father == target)) ae->flags |= EDGE_IRREDUCIBLE_LOOP; } for (i = 0; i < n; i++) new_bbs[i]->rbi->duplicated = 0; } /* Redirect the special edges. */ if (is_latch) { redirect_edge_and_branch_force (latch_edge, new_bbs[0]); redirect_edge_and_branch_force (new_spec_edges[SE_LATCH], loop->header); set_immediate_dominator (CDI_DOMINATORS, new_bbs[0], latch); latch = loop->latch = new_bbs[1]; e = latch_edge = new_spec_edges[SE_LATCH]; } else { redirect_edge_and_branch_force (new_spec_edges[SE_LATCH], loop->header); redirect_edge_and_branch_force (e, new_bbs[0]); set_immediate_dominator (CDI_DOMINATORS, new_bbs[0], e->src); e = new_spec_edges[SE_LATCH]; } /* Record exit edge in this copy. */ if (orig && TEST_BIT (wont_exit, j + 1)) to_remove[(*n_to_remove)++] = new_spec_edges[SE_ORIG]; /* Record the first copy in the control flow order if it is not the original loop (i.e. in case of peeling). */ if (!first_active_latch) { memcpy (first_active, new_bbs, n * sizeof (basic_block)); first_active_latch = new_bbs[1]; } /* Set counts and frequencies. */ if (flags & DLTHE_FLAG_UPDATE_FREQ) { scale_bbs_frequencies (new_bbs, n, scale_act, REG_BR_PROB_BASE); scale_act = RDIV (scale_act * scale_step[j], REG_BR_PROB_BASE); } } free (new_bbs); free (orig_loops); /* Update the original loop. */ if (!is_latch) set_immediate_dominator (CDI_DOMINATORS, e->dest, e->src); if (flags & DLTHE_FLAG_UPDATE_FREQ) { scale_bbs_frequencies (bbs, n, scale_main, REG_BR_PROB_BASE); free (scale_step); } /* Update dominators of outer blocks if affected. */ for (i = 0; i < n; i++) { basic_block dominated, dom_bb, *dom_bbs; int n_dom_bbs,j; bb = bbs[i]; n_dom_bbs = get_dominated_by (CDI_DOMINATORS, bb, &dom_bbs); for (j = 0; j < n_dom_bbs; j++) { dominated = dom_bbs[j]; if (flow_bb_inside_loop_p (loop, dominated)) continue; dom_bb = nearest_common_dominator ( CDI_DOMINATORS, first_active[i], first_active_latch); set_immediate_dominator (CDI_DOMINATORS, dominated, dom_bb); } free (dom_bbs); } free (first_active); free (bbs); return true; } /* A callback for make_forwarder block, to redirect all edges except for MFB_KJ_EDGE to the entry part. E is the edge for that we should decide whether to redirect it. */ /* XXX duplicates mfb_keep_just in cfgloop.c */ static edge mfb_kj_edge2; static bool mfb_keep_just2 (edge e) { return e != mfb_kj_edge2; } /* A callback for make_forwarder block, to update data structures for a basic block JUMP created by redirecting an edge (only the latch edge is being redirected). */ static void mfb_update_loops (basic_block jump) { struct loop *loop = jump->succ->dest->loop_father; if (dom_computed[CDI_DOMINATORS]) set_immediate_dominator (CDI_DOMINATORS, jump, jump->pred->src); add_bb_to_loop (jump, loop); loop->latch = jump; } /* Creates a pre-header for a LOOP. Returns newly created block. Unless CP_SIMPLE_PREHEADERS is set in FLAGS, we only force LOOP to have single entry; otherwise we also force preheader block to have only one successor. The function also updates dominators. */ static basic_block create_preheader (struct loop *loop, int flags) { edge e, fallthru; basic_block dummy; struct loop *cloop, *ploop; int nentry = 0; bool irred = false; cloop = loop->outer; for (e = loop->header->pred; e; e = e->pred_next) { if (e->src == loop->latch) continue; irred |= (e->flags & EDGE_IRREDUCIBLE_LOOP) != 0; nentry++; } if (!nentry) abort (); if (nentry == 1) { for (e = loop->header->pred; e->src == loop->latch; e = e->pred_next); if (!(flags & CP_SIMPLE_PREHEADERS) || !e->src->succ->succ_next) return NULL; } mfb_kj_edge2 = loop_latch_edge (loop); fallthru = make_forwarder_block (loop->header, mfb_keep_just2, mfb_update_loops); dummy = fallthru->src; loop->header = fallthru->dest; /* The header could be a latch of some superloop(s); due to design of split_block, it would now move to fallthru->dest. */ for (ploop = loop; ploop; ploop = ploop->outer) if (ploop->latch == dummy) ploop->latch = fallthru->dest; /* Reorganize blocks so that the preheader is not stuck in the middle of the loop. */ for (e = dummy->pred; e; e = e->pred_next) if (e->src != loop->latch) break; move_block_after (dummy, e->src); loop->header->loop_father = loop; add_bb_to_loop (dummy, cloop); if (irred) { dummy->flags |= BB_IRREDUCIBLE_LOOP; dummy->succ->flags |= EDGE_IRREDUCIBLE_LOOP; } if (dump_file) fprintf (dump_file, "Created preheader block for loop %i\n", loop->num); return dummy; } /* Create preheaders for each loop from loop tree stored in LOOPS; for meaning of FLAGS see create_preheader. */ void create_preheaders (struct loops *loops, int flags) { unsigned i; for (i = 1; i < loops->num; i++) create_preheader (loops->parray[i], flags); loops->state |= LOOPS_HAVE_PREHEADERS; } /* Forces all loop latches of loops from loop tree LOOPS to have only single successor. */ void force_single_succ_latches (struct loops *loops) { unsigned i; struct loop *loop; edge e; for (i = 1; i < loops->num; i++) { loop = loops->parray[i]; if (loop->latch != loop->header && !loop->latch->succ->succ_next) continue; for (e = loop->header->pred; e->src != loop->latch; e = e->pred_next) continue; loop_split_edge_with (e, NULL_RTX); } loops->state |= LOOPS_HAVE_SIMPLE_LATCHES; } /* A quite stupid function to put INSNS on edge E. They are supposed to form just one basic block. Jumps in INSNS are not handled, so cfg do not have to be ok after this function. The created block is placed on correct place in LOOPS structure and its dominator is set. */ basic_block loop_split_edge_with (edge e, rtx insns) { basic_block src, dest, new_bb; struct loop *loop_c; edge new_e; src = e->src; dest = e->dest; loop_c = find_common_loop (src->loop_father, dest->loop_father); /* Create basic block for it. */ new_bb = split_edge (e); add_bb_to_loop (new_bb, loop_c); new_bb->flags = insns ? BB_SUPERBLOCK : 0; new_e = new_bb->succ; if (e->flags & EDGE_IRREDUCIBLE_LOOP) { new_bb->flags |= BB_IRREDUCIBLE_LOOP; new_e->flags |= EDGE_IRREDUCIBLE_LOOP; } if (insns) emit_insn_after (insns, BB_END (new_bb)); if (dest->loop_father->latch == src) dest->loop_father->latch = new_bb; return new_bb; } /* Uses the natural loop discovery to recreate loop notes. */ void create_loop_notes (void) { rtx insn, head, end; struct loops loops; struct loop *loop; basic_block *first, *last, bb, pbb; struct loop **stack, **top; #ifdef ENABLE_CHECKING /* Verify that there really are no loop notes. */ for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG) abort (); #endif flow_loops_find (&loops, LOOP_TREE); free_dominance_info (CDI_DOMINATORS); if (loops.num > 1) { last = xcalloc (loops.num, sizeof (basic_block)); FOR_EACH_BB (bb) { for (loop = bb->loop_father; loop->outer; loop = loop->outer) last[loop->num] = bb; } first = xcalloc (loops.num, sizeof (basic_block)); stack = xcalloc (loops.num, sizeof (struct loop *)); top = stack; FOR_EACH_BB (bb) { for (loop = bb->loop_father; loop->outer; loop = loop->outer) { if (!first[loop->num]) { *top++ = loop; first[loop->num] = bb; } if (bb == last[loop->num]) { /* Prevent loops from overlapping. */ while (*--top != loop) last[(*top)->num] = EXIT_BLOCK_PTR; /* If loop starts with jump into it, place the note in front of the jump. */ insn = PREV_INSN (BB_HEAD (first[loop->num])); if (insn && GET_CODE (insn) == BARRIER) insn = PREV_INSN (insn); if (insn && GET_CODE (insn) == JUMP_INSN && any_uncondjump_p (insn) && onlyjump_p (insn)) { pbb = BLOCK_FOR_INSN (insn); if (!pbb || !pbb->succ || pbb->succ->succ_next) abort (); if (!flow_bb_inside_loop_p (loop, pbb->succ->dest)) insn = BB_HEAD (first[loop->num]); } else insn = BB_HEAD (first[loop->num]); head = BB_HEAD (first[loop->num]); emit_note_before (NOTE_INSN_LOOP_BEG, insn); BB_HEAD (first[loop->num]) = head; /* Position the note correctly wrto barrier. */ insn = BB_END (last[loop->num]); if (NEXT_INSN (insn) && GET_CODE (NEXT_INSN (insn)) == BARRIER) insn = NEXT_INSN (insn); end = BB_END (last[loop->num]); emit_note_after (NOTE_INSN_LOOP_END, insn); BB_END (last[loop->num]) = end; } } } free (first); free (last); free (stack); } flow_loops_free (&loops); } /* Loop optimizer initialization routines. Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Initialize loop optimizer. */ struct loops * loop_optimizer_init (FILE *dumpfile) { struct loops *loops = xcalloc (1, sizeof (struct loops)); edge e; static bool first_time = true; if (first_time) { first_time = false; init_set_costs (); } /* Avoid annoying special cases of edges going to exit block. */ for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next) if ((e->flags & EDGE_FALLTHRU) && e->src->succ->succ_next) split_edge (e); /* Find the loops. */ if (flow_loops_find (loops, LOOP_TREE) <= 1) { /* No loops. */ flow_loops_free (loops); free_dominance_info (CDI_DOMINATORS); free (loops); return NULL; } /* Not going to update these. */ free (loops->cfg.rc_order); loops->cfg.rc_order = NULL; free (loops->cfg.dfs_order); loops->cfg.dfs_order = NULL; /* Create pre-headers. */ create_preheaders (loops, CP_SIMPLE_PREHEADERS); /* Force all latches to have only single successor. */ force_single_succ_latches (loops); /* Mark irreducible loops. */ mark_irreducible_loops (loops); /* Dump loops. */ flow_loops_dump (loops, dumpfile, NULL, 1); #ifdef ENABLE_CHECKING verify_dominators (CDI_DOMINATORS); verify_loop_structure (loops); #endif return loops; } /* Finalize loop optimizer. */ void loop_optimizer_finalize (struct loops *loops, FILE *dumpfile) { unsigned i; if (!loops) return; for (i = 1; i < loops->num; i++) if (loops->parray[i]) free_simple_loop_desc (loops->parray[i]); /* Another dump. */ flow_loops_dump (loops, dumpfile, NULL, 1); /* Clean up. */ flow_loops_free (loops); free_dominance_info (CDI_DOMINATORS); free (loops); /* Checking. */ #ifdef ENABLE_CHECKING verify_flow_info (); #endif } /* Loop unswitching for GNU compiler. Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This pass moves constant conditions out of loops, duplicating the loop in progress, i.e. this code: while (loop_cond) { A; if (cond) branch1; else branch2; B; if (cond) branch3; C; } where nothing inside the loop alters cond is transformed into if (cond) { while (loop_cond) { A; branch1; B; branch3; C; } } else { while (loop_cond) { A; branch2; B; C; } } Duplicating the loop might lead to code growth exponential in number of branches inside loop, so we limit the number of unswitchings performed in a single loop to PARAM_MAX_UNSWITCH_LEVEL. We only perform the transformation on innermost loops, as the benefit of doing it on loops containing subloops would not be very large compared to complications with handling this case. */ static struct loop *unswitch_loop (struct loops *, struct loop *, basic_block, rtx, rtx); static void unswitch_single_loop (struct loops *, struct loop *, rtx, int); static rtx may_unswitch_on (basic_block, struct loop *, rtx *); /* Prepare a sequence comparing OP0 with OP1 using COMP and jumping to LABEL if true, with probability PROB. If CINSN is not NULL, it is the insn to copy in order to create a jump. */ rtx compare_and_jump_seq (rtx op0, rtx op1, enum rtx_code comp, rtx label, int prob, rtx cinsn) { rtx seq, jump, cond; enum machine_mode mode; mode = GET_MODE (op0); if (mode == VOIDmode) mode = GET_MODE (op1); start_sequence (); if (GET_MODE_CLASS (mode) == MODE_CC) { /* A hack -- there seems to be no easy generic way how to make a conditional jump from a ccmode comparison. */ if (!cinsn) abort (); cond = XEXP (SET_SRC (pc_set (cinsn)), 0); if (GET_CODE (cond) != comp || !rtx_equal_p (op0, XEXP (cond, 0)) || !rtx_equal_p (op1, XEXP (cond, 1))) abort (); emit_jump_insn (copy_insn (PATTERN (cinsn))); jump = get_last_insn (); JUMP_LABEL (jump) = JUMP_LABEL (cinsn); LABEL_NUSES (JUMP_LABEL (jump))++; redirect_jump (jump, label, 0); } else { if (cinsn) abort (); op0 = force_operand (op0, NULL_RTX); op1 = force_operand (op1, NULL_RTX); do_compare_rtx_and_jump (op0, op1, comp, 0, mode, NULL_RTX, NULL_RTX, label); jump = get_last_insn (); JUMP_LABEL (jump) = label; LABEL_NUSES (label)++; } REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_BR_PROB, GEN_INT (prob), REG_NOTES (jump)); seq = get_insns (); end_sequence (); return seq; } /* Main entry point. Perform loop unswitching on all suitable LOOPS. */ void unswitch_loops (struct loops *loops) { int i, num; struct loop *loop; /* Go through inner loops (only original ones). */ num = loops->num; for (i = 1; i < num; i++) { /* Removed loop? */ loop = loops->parray[i]; if (!loop) continue; if (loop->inner) continue; unswitch_single_loop (loops, loop, NULL_RTX, 0); #ifdef ENABLE_CHECKING verify_dominators (CDI_DOMINATORS); verify_loop_structure (loops); #endif } iv_analysis_done (); } /* Checks whether we can unswitch LOOP on condition at end of BB -- one of its basic blocks (for what it means see comments below). In case condition compares loop invariant cc mode register, return the jump in CINSN. */ static rtx may_unswitch_on (basic_block bb, struct loop *loop, rtx *cinsn) { rtx test, at, insn, op[2], stest; struct rtx_iv iv; unsigned i; enum machine_mode mode; /* BB must end in a simple conditional jump. */ if (!bb->succ || !bb->succ->succ_next || bb->succ->succ_next->succ_next) return NULL_RTX; if (!any_condjump_p (BB_END (bb))) return NULL_RTX; /* With branches inside loop. */ if (!flow_bb_inside_loop_p (loop, bb->succ->dest) || !flow_bb_inside_loop_p (loop, bb->succ->succ_next->dest)) return NULL_RTX; /* It must be executed just once each iteration (because otherwise we are unable to update dominator/irreducible loop information correctly). */ if (!just_once_each_iteration_p (loop, bb)) return NULL_RTX; /* Condition must be invariant. */ test = get_condition (BB_END (bb), &at, true); if (!test) return NULL_RTX; for (i = 0; i < 2; i++) { op[i] = XEXP (test, i); if (CONSTANT_P (op[i])) continue; insn = iv_get_reaching_def (at, op[i]); if (!iv_analyze (insn, op[i], &iv)) return NULL_RTX; if (iv.step != const0_rtx || iv.first_special) return NULL_RTX; op[i] = get_iv_value (&iv, const0_rtx); } mode = GET_MODE (op[0]); if (mode == VOIDmode) mode = GET_MODE (op[1]); if (GET_MODE_CLASS (mode) == MODE_CC) { if (at != BB_END (bb)) return NULL_RTX; *cinsn = BB_END (bb); if (!rtx_equal_p (op[0], XEXP (test, 0)) || !rtx_equal_p (op[1], XEXP (test, 1))) return NULL_RTX; return test; } stest = simplify_gen_relational (GET_CODE (test), SImode, mode, op[0], op[1]); if (stest == const0_rtx || stest == const_true_rtx) return stest; return canon_condition (gen_rtx_fmt_ee (GET_CODE (test), SImode, op[0], op[1])); } /* Reverses CONDition; returns NULL if we cannot. */ rtx reversed_condition (rtx cond) { enum rtx_code reversed; reversed = reversed_comparison_code (cond, NULL); if (reversed == UNKNOWN) return NULL_RTX; else return gen_rtx_fmt_ee (reversed, GET_MODE (cond), XEXP (cond, 0), XEXP (cond, 1)); } /* Unswitch single LOOP. COND_CHECKED holds list of conditions we already unswitched on and are therefore known to be true in this LOOP. NUM is number of unswitchings done; do not allow it to grow too much, it is too easy to create example on that the code would grow exponentially. */ static void unswitch_single_loop (struct loops *loops, struct loop *loop, rtx cond_checked, int num) { basic_block *bbs; struct loop *nloop; unsigned i; rtx cond, rcond = NULL_RTX, conds, rconds, acond, cinsn = NULL_RTX; int repeat; edge e; /* Do not unswitch too much. */ if (num > PARAM_VALUE (PARAM_MAX_UNSWITCH_LEVEL)) { if (dump_file) fprintf (dump_file, ";; Not unswitching anymore, hit max level\n"); return; } /* Only unswitch innermost loops. */ if (loop->inner) { if (dump_file) fprintf (dump_file, ";; Not unswitching, not innermost loop\n"); return; } /* We must be able to duplicate loop body. */ if (!can_duplicate_loop_p (loop)) { if (dump_file) fprintf (dump_file, ";; Not unswitching, can't duplicate loop\n"); return; } /* The loop should not be too large, to limit code growth. */ if (num_loop_insns (loop) > PARAM_VALUE (PARAM_MAX_UNSWITCH_INSNS)) { if (dump_file) fprintf (dump_file, ";; Not unswitching, loop too big\n"); return; } /* Do not unswitch in cold areas. */ if (!maybe_hot_bb_p (loop->header)) { if (dump_file) fprintf (dump_file, ";; Not unswitching, not hot area\n"); return; } /* Nor if the loop usually does not roll. */ if (expected_loop_iterations (loop) < 1) { if (dump_file) fprintf (dump_file, ";; Not unswitching, loop iterations < 1\n"); return; } do { repeat = 0; /* Find a bb to unswitch on. */ bbs = get_loop_body (loop); iv_analysis_loop_init (loop); for (i = 0; i < loop->num_nodes; i++) if ((cond = may_unswitch_on (bbs[i], loop, &cinsn))) break; if (i == loop->num_nodes) { free (bbs); return; } if (cond != const0_rtx && cond != const_true_rtx) { rcond = reversed_condition (cond); if (rcond) rcond = canon_condition (rcond); /* Check whether the result can be predicted. */ for (acond = cond_checked; acond; acond = XEXP (acond, 1)) simplify_using_condition (XEXP (acond, 0), &cond, NULL); } if (cond == const_true_rtx) { /* Remove false path. */ e = FALLTHRU_EDGE (bbs[i]); remove_path (loops, e); free (bbs); repeat = 1; } else if (cond == const0_rtx) { /* Remove true path. */ e = BRANCH_EDGE (bbs[i]); remove_path (loops, e); free (bbs); repeat = 1; } } while (repeat); /* We found the condition we can unswitch on. */ conds = alloc_EXPR_LIST (0, cond, cond_checked); if (rcond) rconds = alloc_EXPR_LIST (0, rcond, cond_checked); else rconds = cond_checked; if (dump_file) fprintf (dump_file, ";; Unswitching loop\n"); /* Unswitch the loop on this condition. */ nloop = unswitch_loop (loops, loop, bbs[i], cond, cinsn); if (!nloop) abort (); /* Invoke itself on modified loops. */ unswitch_single_loop (loops, nloop, rconds, num + 1); unswitch_single_loop (loops, loop, conds, num + 1); free_EXPR_LIST_node (conds); if (rcond) free_EXPR_LIST_node (rconds); free (bbs); } /* Unswitch a LOOP w.r. to given basic block UNSWITCH_ON. We only support unswitching of innermost loops. UNSWITCH_ON must be executed in every iteration, i.e. it must dominate LOOP latch. COND is the condition determining which loop is entered. Returns NULL if impossible, new loop otherwise. The new loop is entered if COND is true. If CINSN is not NULL, it is the insn in that COND is compared. */ static struct loop * unswitch_loop (struct loops *loops, struct loop *loop, basic_block unswitch_on, rtx cond, rtx cinsn) { edge entry, latch_edge, true_edge, false_edge, e; basic_block switch_bb, unswitch_on_alt, src; struct loop *nloop; sbitmap zero_bitmap; int irred_flag, prob; rtx seq; /* Some sanity checking. */ if (!flow_bb_inside_loop_p (loop, unswitch_on)) abort (); if (!unswitch_on->succ || !unswitch_on->succ->succ_next || unswitch_on->succ->succ_next->succ_next) abort (); if (!just_once_each_iteration_p (loop, unswitch_on)) abort (); if (loop->inner) abort (); if (!flow_bb_inside_loop_p (loop, unswitch_on->succ->dest)) abort (); if (!flow_bb_inside_loop_p (loop, unswitch_on->succ->succ_next->dest)) abort (); entry = loop_preheader_edge (loop); /* Make a copy. */ src = entry->src; irred_flag = entry->flags & EDGE_IRREDUCIBLE_LOOP; entry->flags &= ~EDGE_IRREDUCIBLE_LOOP; zero_bitmap = sbitmap_alloc (2); sbitmap_zero (zero_bitmap); if (!duplicate_loop_to_header_edge (loop, entry, loops, 1, zero_bitmap, NULL, NULL, NULL, 0)) return NULL; free (zero_bitmap); entry->flags |= irred_flag; /* Record the block with condition we unswitch on. */ unswitch_on_alt = unswitch_on->rbi->copy; true_edge = BRANCH_EDGE (unswitch_on_alt); false_edge = FALLTHRU_EDGE (unswitch_on); latch_edge = loop->latch->rbi->copy->succ; /* Create a block with the condition. */ prob = true_edge->probability; switch_bb = create_empty_bb (EXIT_BLOCK_PTR->prev_bb); seq = compare_and_jump_seq (XEXP (cond, 0), XEXP (cond, 1), GET_CODE (cond), block_label (true_edge->dest), prob, cinsn); emit_insn_after (seq, BB_END (switch_bb)); e = make_edge (switch_bb, true_edge->dest, 0); e->probability = prob; e->count = latch_edge->count * prob / REG_BR_PROB_BASE; e = make_edge (switch_bb, FALLTHRU_EDGE (unswitch_on)->dest, EDGE_FALLTHRU); e->probability = false_edge->probability; e->count = latch_edge->count * (false_edge->probability) / REG_BR_PROB_BASE; if (irred_flag) { switch_bb->flags |= BB_IRREDUCIBLE_LOOP; switch_bb->succ->flags |= EDGE_IRREDUCIBLE_LOOP; switch_bb->succ->succ_next->flags |= EDGE_IRREDUCIBLE_LOOP; } else { switch_bb->flags &= ~BB_IRREDUCIBLE_LOOP; switch_bb->succ->flags &= ~EDGE_IRREDUCIBLE_LOOP; switch_bb->succ->succ_next->flags &= ~EDGE_IRREDUCIBLE_LOOP; } /* Loopify from the copy of LOOP body, constructing the new loop. */ nloop = loopify (loops, latch_edge, loop->header->rbi->copy->pred, switch_bb); /* Remove branches that are now unreachable in new loops. */ remove_path (loops, true_edge); remove_path (loops, false_edge); /* One of created loops do not have to be subloop of the outer loop now, so fix its placement in loop data structure. */ fix_loop_placement (loop); fix_loop_placement (nloop); /* Preserve the simple loop preheaders. */ loop_split_edge_with (loop_preheader_edge (loop), NULL_RTX); loop_split_edge_with (loop_preheader_edge (nloop), NULL_RTX); return nloop; } /* Loop unrolling and peeling. Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This pass performs loop unrolling and peeling. We only perform these optimizations on innermost loops (with single exception) because the impact on performance is greatest here, and we want to avoid unnecessary code size growth. The gain is caused by greater sequentiality of code, better code to optimize for further passes and in some cases by fewer testings of exit conditions. The main problem is code growth, that impacts performance negatively due to effect of caches. What we do: -- complete peeling of once-rolling loops; this is the above mentioned exception, as this causes loop to be cancelled completely and does not cause code growth -- complete peeling of loops that roll (small) constant times. -- simple peeling of first iterations of loops that do not roll much (according to profile feedback) -- unrolling of loops that roll constant times; this is almost always win, as we get rid of exit condition tests. -- unrolling of loops that roll number of times that we can compute in runtime; we also get rid of exit condition tests here, but there is the extra expense for calculating the number of iterations -- simple unrolling of remaining loops; this is performed only if we are asked to, as the gain is questionable in this case and often it may even slow down the code For more detailed descriptions of each of those, see comments at appropriate function below. There is a lot of parameters (defined and described in params.def) that control how much we unroll/peel. ??? A great problem is that we don't have a good way how to determine how many times we should unroll the loop; the experiments I have made showed that this choice may affect performance in order of several %. */ static void decide_unrolling_and_peeling (struct loops *, int); static void peel_loops_completely (struct loops *, int); static void decide_peel_simple (struct loop *, int); static void decide_peel_once_rolling (struct loop *, int); static void decide_peel_completely (struct loop *, int); static void decide_unroll_stupid (struct loop *, int); static void decide_unroll_constant_iterations (struct loop *, int); static void decide_unroll_runtime_iterations (struct loop *, int); static void peel_loop_simple (struct loops *, struct loop *); static void peel_loop_completely (struct loops *, struct loop *); static void unroll_loop_stupid (struct loops *, struct loop *); static void unroll_loop_constant_iterations (struct loops *, struct loop *); static void unroll_loop_runtime_iterations (struct loops *, struct loop *); /* Unroll and/or peel (depending on FLAGS) LOOPS. */ void unroll_and_peel_loops (struct loops *loops, int flags) { struct loop *loop, *next; bool check; /* First perform complete loop peeling (it is almost surely a win, and affects parameters for further decision a lot). */ peel_loops_completely (loops, flags); /* Now decide rest of unrolling and peeling. */ decide_unrolling_and_peeling (loops, flags); loop = loops->tree_root; while (loop->inner) loop = loop->inner; /* Scan the loops, inner ones first. */ while (loop != loops->tree_root) { if (loop->next) { next = loop->next; while (next->inner) next = next->inner; } else next = loop->outer; check = true; /* And perform the appropriate transformations. */ switch (loop->lpt_decision.decision) { case LPT_PEEL_COMPLETELY: /* Already done. */ abort (); case LPT_PEEL_SIMPLE: peel_loop_simple (loops, loop); break; case LPT_UNROLL_CONSTANT: unroll_loop_constant_iterations (loops, loop); break; case LPT_UNROLL_RUNTIME: unroll_loop_runtime_iterations (loops, loop); break; case LPT_UNROLL_STUPID: unroll_loop_stupid (loops, loop); break; case LPT_NONE: check = false; break; default: abort (); } if (check) { #ifdef ENABLE_CHECKING verify_dominators (CDI_DOMINATORS); verify_loop_structure (loops); #endif } loop = next; } iv_analysis_done (); } /* Check whether exit of the LOOP is at the end of loop body. */ static bool loop_exit_at_end_p (struct loop *loop) { struct niter_desc *desc = get_simple_loop_desc (loop); rtx insn; if (desc->in_edge->dest != loop->latch) return false; /* Check that the latch is empty. */ FOR_BB_INSNS (loop->latch, insn) { if (INSN_P (insn)) return false; } return true; } /* Check whether to peel LOOPS (depending on FLAGS) completely and do so. */ static void peel_loops_completely (struct loops *loops, int flags) { struct loop *loop, *next; loop = loops->tree_root; while (loop->inner) loop = loop->inner; while (loop != loops->tree_root) { if (loop->next) { next = loop->next; while (next->inner) next = next->inner; } else next = loop->outer; loop->lpt_decision.decision = LPT_NONE; if (dump_file) fprintf (dump_file, "\n;; *** Considering loop %d for complete peeling ***\n", loop->num); loop->ninsns = num_loop_insns (loop); decide_peel_once_rolling (loop, flags); if (loop->lpt_decision.decision == LPT_NONE) decide_peel_completely (loop, flags); if (loop->lpt_decision.decision == LPT_PEEL_COMPLETELY) { peel_loop_completely (loops, loop); #ifdef ENABLE_CHECKING verify_dominators (CDI_DOMINATORS); verify_loop_structure (loops); #endif } loop = next; } } /* Decide whether unroll or peel LOOPS (depending on FLAGS) and how much. */ static void decide_unrolling_and_peeling (struct loops *loops, int flags) { struct loop *loop = loops->tree_root, *next; while (loop->inner) loop = loop->inner; /* Scan the loops, inner ones first. */ while (loop != loops->tree_root) { if (loop->next) { next = loop->next; while (next->inner) next = next->inner; } else next = loop->outer; loop->lpt_decision.decision = LPT_NONE; if (dump_file) fprintf (dump_file, "\n;; *** Considering loop %d ***\n", loop->num); /* Do not peel cold areas. */ if (!maybe_hot_bb_p (loop->header)) { if (dump_file) fprintf (dump_file, ";; Not considering loop, cold area\n"); loop = next; continue; } /* Can the loop be manipulated? */ if (!can_duplicate_loop_p (loop)) { if (dump_file) fprintf (dump_file, ";; Not considering loop, cannot duplicate\n"); loop = next; continue; } /* Skip non-innermost loops. */ if (loop->inner) { if (dump_file) fprintf (dump_file, ";; Not considering loop, is not innermost\n"); loop = next; continue; } loop->ninsns = num_loop_insns (loop); loop->av_ninsns = average_num_loop_insns (loop); /* Try transformations one by one in decreasing order of priority. */ decide_unroll_constant_iterations (loop, flags); if (loop->lpt_decision.decision == LPT_NONE) decide_unroll_runtime_iterations (loop, flags); if (loop->lpt_decision.decision == LPT_NONE) decide_unroll_stupid (loop, flags); if (loop->lpt_decision.decision == LPT_NONE) decide_peel_simple (loop, flags); loop = next; } } /* Decide whether the LOOP is once rolling and suitable for complete peeling. */ static void decide_peel_once_rolling (struct loop *loop, int flags ATTRIBUTE_UNUSED) { struct niter_desc *desc; if (dump_file) fprintf (dump_file, "\n;; Considering peeling once rolling loop\n"); /* Is the loop small enough? */ if ((unsigned) PARAM_VALUE (PARAM_MAX_ONCE_PEELED_INSNS) < loop->ninsns) { if (dump_file) fprintf (dump_file, ";; Not considering loop, is too big\n"); return; } /* Check for simple loops. */ desc = get_simple_loop_desc (loop); /* Check number of iterations. */ if (!desc->simple_p || desc->assumptions || !desc->const_iter || desc->niter != 0) { if (dump_file) fprintf (dump_file, ";; Unable to prove that the loop rolls exactly once\n"); return; } /* Success. */ if (dump_file) fprintf (dump_file, ";; Decided to peel exactly once rolling loop\n"); loop->lpt_decision.decision = LPT_PEEL_COMPLETELY; } /* Decide whether the LOOP is suitable for complete peeling. */ static void decide_peel_completely (struct loop *loop, int flags ATTRIBUTE_UNUSED) { unsigned npeel; struct niter_desc *desc; if (dump_file) fprintf (dump_file, "\n;; Considering peeling completely\n"); /* Skip non-innermost loops. */ if (loop->inner) { if (dump_file) fprintf (dump_file, ";; Not considering loop, is not innermost\n"); return; } /* Do not peel cold areas. */ if (!maybe_hot_bb_p (loop->header)) { if (dump_file) fprintf (dump_file, ";; Not considering loop, cold area\n"); return; } /* Can the loop be manipulated? */ if (!can_duplicate_loop_p (loop)) { if (dump_file) fprintf (dump_file, ";; Not considering loop, cannot duplicate\n"); return; } /* npeel = number of iterations to peel. */ npeel = PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS) / loop->ninsns; if (npeel > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEEL_TIMES)) npeel = PARAM_VALUE (PARAM_MAX_COMPLETELY_PEEL_TIMES); /* Is the loop small enough? */ if (!npeel) { if (dump_file) fprintf (dump_file, ";; Not considering loop, is too big\n"); return; } /* Check for simple loops. */ desc = get_simple_loop_desc (loop); /* Check number of iterations. */ if (!desc->simple_p || desc->assumptions || !desc->const_iter) { if (dump_file) fprintf (dump_file, ";; Unable to prove that the loop iterates constant times\n"); return; } if (desc->niter > npeel - 1) { if (dump_file) { fprintf (dump_file, ";; Not peeling loop completely, rolls too much ("); fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC, desc->niter); fprintf (dump_file, " iterations > %d [maximum peelings])\n", npeel); } return; } /* Success. */ if (dump_file) fprintf (dump_file, ";; Decided to peel loop completely\n"); loop->lpt_decision.decision = LPT_PEEL_COMPLETELY; } /* Peel all iterations of LOOP, remove exit edges and cancel the loop completely. The transformation done: for (i = 0; i < 4; i++) body; ==> i = 0; body; i++; body; i++; body; i++; body; i++; */ static void peel_loop_completely (struct loops *loops, struct loop *loop) { sbitmap wont_exit; unsigned HOST_WIDE_INT npeel; unsigned n_remove_edges, i; edge *remove_edges, ei; struct niter_desc *desc = get_simple_loop_desc (loop); npeel = desc->niter; if (npeel) { wont_exit = sbitmap_alloc (npeel + 1); sbitmap_ones (wont_exit); RESET_BIT (wont_exit, 0); if (desc->noloop_assumptions) RESET_BIT (wont_exit, 1); remove_edges = xcalloc (npeel, sizeof (edge)); n_remove_edges = 0; if (!duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop), loops, npeel, wont_exit, desc->out_edge, remove_edges, &n_remove_edges, DLTHE_FLAG_UPDATE_FREQ)) abort (); free (wont_exit); /* Remove the exit edges. */ for (i = 0; i < n_remove_edges; i++) remove_path (loops, remove_edges[i]); free (remove_edges); } ei = desc->in_edge; free_simple_loop_desc (loop); /* Now remove the unreachable part of the last iteration and cancel the loop. */ remove_path (loops, ei); if (dump_file) fprintf (dump_file, ";; Peeled loop completely, %d times\n", (int) npeel); } /* Decide whether to unroll LOOP iterating constant number of times and how much. */ static void decide_unroll_constant_iterations (struct loop *loop, int flags) { unsigned nunroll, nunroll_by_av, best_copies, best_unroll = 0, n_copies, i; struct niter_desc *desc; if (!(flags & UAP_UNROLL)) { /* We were not asked to, just return back silently. */ return; } if (dump_file) fprintf (dump_file, "\n;; Considering unrolling loop with constant " "number of iterations\n"); /* nunroll = total number of copies of the original loop body in unrolled loop (i.e. if it is 2, we have to duplicate loop body once. */ nunroll = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / loop->ninsns; nunroll_by_av = PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS) / loop->av_ninsns; if (nunroll > nunroll_by_av) nunroll = nunroll_by_av; if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES)) nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES); /* Skip big loops. */ if (nunroll <= 1) { if (dump_file) fprintf (dump_file, ";; Not considering loop, is too big\n"); return; } /* Check for simple loops. */ desc = get_simple_loop_desc (loop); /* Check number of iterations. */ if (!desc->simple_p || !desc->const_iter || desc->assumptions) { if (dump_file) fprintf (dump_file, ";; Unable to prove that the loop iterates constant times\n"); return; } /* Check whether the loop rolls enough to consider. */ if (desc->niter < 2 * nunroll) { if (dump_file) fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n"); return; } /* Success; now compute number of iterations to unroll. We alter nunroll so that as few as possible copies of loop body are necessary, while still not decreasing the number of unrollings too much (at most by 1). */ best_copies = 2 * nunroll + 10; i = 2 * nunroll + 2; if (i - 1 >= desc->niter) i = desc->niter - 2; for (; i >= nunroll - 1; i--) { unsigned exit_mod = desc->niter % (i + 1); if (!loop_exit_at_end_p (loop)) n_copies = exit_mod + i + 1; else if (exit_mod != (unsigned) i || desc->noloop_assumptions != NULL_RTX) n_copies = exit_mod + i + 2; else n_copies = i + 1; if (n_copies < best_copies) { best_copies = n_copies; best_unroll = i; } } if (dump_file) fprintf (dump_file, ";; max_unroll %d (%d copies, initial %d).\n", best_unroll + 1, best_copies, nunroll); loop->lpt_decision.decision = LPT_UNROLL_CONSTANT; loop->lpt_decision.times = best_unroll; if (dump_file) fprintf (dump_file, ";; Decided to unroll the constant times rolling loop, %d times.\n", loop->lpt_decision.times); } /* Unroll LOOP with constant number of iterations LOOP->LPT_DECISION.TIMES + 1 times. The transformation does this: for (i = 0; i < 102; i++) body; ==> i = 0; body; i++; body; i++; while (i < 102) { body; i++; body; i++; body; i++; body; i++; } */ static void unroll_loop_constant_iterations (struct loops *loops, struct loop *loop) { unsigned HOST_WIDE_INT niter; unsigned exit_mod; sbitmap wont_exit; unsigned n_remove_edges, i; edge *remove_edges; unsigned max_unroll = loop->lpt_decision.times; struct niter_desc *desc = get_simple_loop_desc (loop); bool exit_at_end = loop_exit_at_end_p (loop); niter = desc->niter; if (niter <= max_unroll + 1) abort (); /* Should not get here (such loop should be peeled instead). */ exit_mod = niter % (max_unroll + 1); wont_exit = sbitmap_alloc (max_unroll + 1); sbitmap_ones (wont_exit); remove_edges = xcalloc (max_unroll + exit_mod + 1, sizeof (edge)); n_remove_edges = 0; if (!exit_at_end) { /* The exit is not at the end of the loop; leave exit test in the first copy, so that the loops that start with test of exit condition have continuous body after unrolling. */ if (dump_file) fprintf (dump_file, ";; Condition on beginning of loop.\n"); /* Peel exit_mod iterations. */ RESET_BIT (wont_exit, 0); if (desc->noloop_assumptions) RESET_BIT (wont_exit, 1); if (exit_mod) { if (!duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop), loops, exit_mod, wont_exit, desc->out_edge, remove_edges, &n_remove_edges, DLTHE_FLAG_UPDATE_FREQ)) abort (); desc->noloop_assumptions = NULL_RTX; desc->niter -= exit_mod; desc->niter_max -= exit_mod; } SET_BIT (wont_exit, 1); } else { /* Leave exit test in last copy, for the same reason as above if the loop tests the condition at the end of loop body. */ if (dump_file) fprintf (dump_file, ";; Condition on end of loop.\n"); /* We know that niter >= max_unroll + 2; so we do not need to care of case when we would exit before reaching the loop. So just peel exit_mod + 1 iterations. */ if (exit_mod != max_unroll || desc->noloop_assumptions) { RESET_BIT (wont_exit, 0); if (desc->noloop_assumptions) RESET_BIT (wont_exit, 1); if (!duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop), loops, exit_mod + 1, wont_exit, desc->out_edge, remove_edges, &n_remove_edges, DLTHE_FLAG_UPDATE_FREQ)) abort (); desc->niter -= exit_mod + 1; desc->niter_max -= exit_mod + 1; desc->noloop_assumptions = NULL_RTX; SET_BIT (wont_exit, 0); SET_BIT (wont_exit, 1); } RESET_BIT (wont_exit, max_unroll); } /* Now unroll the loop. */ if (!duplicate_loop_to_header_edge (loop, loop_latch_edge (loop), loops, max_unroll, wont_exit, desc->out_edge, remove_edges, &n_remove_edges, DLTHE_FLAG_UPDATE_FREQ)) abort (); free (wont_exit); if (exit_at_end) { basic_block exit_block = desc->in_edge->src->rbi->copy; /* Find a new in and out edge; they are in the last copy we have made. */ if (exit_block->succ->dest == desc->out_edge->dest) { desc->out_edge = exit_block->succ; desc->in_edge = exit_block->succ->succ_next; } else { desc->out_edge = exit_block->succ->succ_next; desc->in_edge = exit_block->succ; } } desc->niter /= max_unroll + 1; desc->niter_max /= max_unroll + 1; desc->niter_expr = GEN_INT (desc->niter); /* Remove the edges. */ for (i = 0; i < n_remove_edges; i++) remove_path (loops, remove_edges[i]); free (remove_edges); if (dump_file) fprintf (dump_file, ";; Unrolled loop %d times, constant # of iterations %i insns\n", max_unroll, num_loop_insns (loop)); } /* Decide whether to unroll LOOP iterating runtime computable number of times and how much. */ static void decide_unroll_runtime_iterations (struct loop *loop, int flags) { unsigned nunroll, nunroll_by_av, i; struct niter_desc *desc; if (!(flags & UAP_UNROLL)) { /* We were not asked to, just return back silently. */ return; } if (dump_file) fprintf (dump_file, "\n;; Considering unrolling loop with runtime " "computable number of iterations\n"); /* nunroll = total number of copies of the original loop body in unrolled loop (i.e. if it is 2, we have to duplicate loop body once. */ nunroll = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / loop->ninsns; nunroll_by_av = PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS) / loop->av_ninsns; if (nunroll > nunroll_by_av) nunroll = nunroll_by_av; if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES)) nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES); /* Skip big loops. */ if (nunroll <= 1) { if (dump_file) fprintf (dump_file, ";; Not considering loop, is too big\n"); return; } /* Check for simple loops. */ desc = get_simple_loop_desc (loop); /* Check simpleness. */ if (!desc->simple_p || desc->assumptions) { if (dump_file) fprintf (dump_file, ";; Unable to prove that the number of iterations " "can be counted in runtime\n"); return; } if (desc->const_iter) { if (dump_file) fprintf (dump_file, ";; Loop iterates constant times\n"); return; } /* If we have profile feedback, check whether the loop rolls. */ if (loop->header->count && expected_loop_iterations (loop) < 2 * nunroll) { if (dump_file) fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n"); return; } /* Success; now force nunroll to be power of 2, as we are unable to cope with overflows in computation of number of iterations. */ for (i = 1; 2 * i <= nunroll; i *= 2) continue; loop->lpt_decision.decision = LPT_UNROLL_RUNTIME; loop->lpt_decision.times = i - 1; if (dump_file) fprintf (dump_file, ";; Decided to unroll the runtime computable " "times rolling loop, %d times.\n", loop->lpt_decision.times); } /* Unroll LOOP for that we are able to count number of iterations in runtime LOOP->LPT_DECISION.TIMES + 1 times. The transformation does this (with some extra care for case n < 0): for (i = 0; i < n; i++) body; ==> i = 0; mod = n % 4; switch (mod) { case 3: body; i++; case 2: body; i++; case 1: body; i++; case 0: ; } while (i < n) { body; i++; body; i++; body; i++; body; i++; } */ static void unroll_loop_runtime_iterations (struct loops *loops, struct loop *loop) { rtx old_niter, niter, init_code, branch_code, tmp; unsigned i, j, p; basic_block preheader, *body, *dom_bbs, swtch, ezc_swtch; unsigned n_dom_bbs; sbitmap wont_exit; int may_exit_copy; unsigned n_peel, n_remove_edges; edge *remove_edges, e; bool extra_zero_check, last_may_exit; unsigned max_unroll = loop->lpt_decision.times; struct niter_desc *desc = get_simple_loop_desc (loop); bool exit_at_end = loop_exit_at_end_p (loop); /* Remember blocks whose dominators will have to be updated. */ dom_bbs = xcalloc (n_basic_blocks, sizeof (basic_block)); n_dom_bbs = 0; body = get_loop_body (loop); for (i = 0; i < loop->num_nodes; i++) { unsigned nldom; basic_block *ldom; nldom = get_dominated_by (CDI_DOMINATORS, body[i], &ldom); for (j = 0; j < nldom; j++) if (!flow_bb_inside_loop_p (loop, ldom[j])) dom_bbs[n_dom_bbs++] = ldom[j]; free (ldom); } free (body); if (!exit_at_end) { /* Leave exit in first copy (for explanation why see comment in unroll_loop_constant_iterations). */ may_exit_copy = 0; n_peel = max_unroll - 1; extra_zero_check = true; last_may_exit = false; } else { /* Leave exit in last copy (for explanation why see comment in unroll_loop_constant_iterations). */ may_exit_copy = max_unroll; n_peel = max_unroll; extra_zero_check = false; last_may_exit = true; } /* Get expression for number of iterations. */ start_sequence (); old_niter = niter = gen_reg_rtx (desc->mode); tmp = force_operand (copy_rtx (desc->niter_expr), niter); if (tmp != niter) emit_move_insn (niter, tmp); /* Count modulo by ANDing it with max_unroll; we use the fact that the number of unrollings is a power of two, and thus this is correct even if there is overflow in the computation. */ niter = expand_simple_binop (desc->mode, AND, niter, GEN_INT (max_unroll), NULL_RTX, 0, OPTAB_LIB_WIDEN); init_code = get_insns (); end_sequence (); /* Precondition the loop. */ loop_split_edge_with (loop_preheader_edge (loop), init_code); remove_edges = xcalloc (max_unroll + n_peel + 1, sizeof (edge)); n_remove_edges = 0; wont_exit = sbitmap_alloc (max_unroll + 2); /* Peel the first copy of loop body (almost always we must leave exit test here; the only exception is when we have extra zero check and the number of iterations is reliable. Also record the place of (possible) extra zero check. */ sbitmap_zero (wont_exit); if (extra_zero_check && !desc->noloop_assumptions) SET_BIT (wont_exit, 1); ezc_swtch = loop_preheader_edge (loop)->src; if (!duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop), loops, 1, wont_exit, desc->out_edge, remove_edges, &n_remove_edges, DLTHE_FLAG_UPDATE_FREQ)) abort (); /* Record the place where switch will be built for preconditioning. */ swtch = loop_split_edge_with (loop_preheader_edge (loop), NULL_RTX); for (i = 0; i < n_peel; i++) { /* Peel the copy. */ sbitmap_zero (wont_exit); if (i != n_peel - 1 || !last_may_exit) SET_BIT (wont_exit, 1); if (!duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop), loops, 1, wont_exit, desc->out_edge, remove_edges, &n_remove_edges, DLTHE_FLAG_UPDATE_FREQ)) abort (); /* Create item for switch. */ j = n_peel - i - (extra_zero_check ? 0 : 1); p = REG_BR_PROB_BASE / (i + 2); preheader = loop_split_edge_with (loop_preheader_edge (loop), NULL_RTX); branch_code = compare_and_jump_seq (copy_rtx (niter), GEN_INT (j), EQ, block_label (preheader), p, NULL_RTX); swtch = loop_split_edge_with (swtch->pred, branch_code); set_immediate_dominator (CDI_DOMINATORS, preheader, swtch); swtch->succ->probability = REG_BR_PROB_BASE - p; e = make_edge (swtch, preheader, swtch->succ->flags & EDGE_IRREDUCIBLE_LOOP); e->probability = p; } if (extra_zero_check) { /* Add branch for zero iterations. */ p = REG_BR_PROB_BASE / (max_unroll + 1); swtch = ezc_swtch; preheader = loop_split_edge_with (loop_preheader_edge (loop), NULL_RTX); branch_code = compare_and_jump_seq (copy_rtx (niter), const0_rtx, EQ, block_label (preheader), p, NULL_RTX); swtch = loop_split_edge_with (swtch->succ, branch_code); set_immediate_dominator (CDI_DOMINATORS, preheader, swtch); swtch->succ->probability = REG_BR_PROB_BASE - p; e = make_edge (swtch, preheader, swtch->succ->flags & EDGE_IRREDUCIBLE_LOOP); e->probability = p; } /* Recount dominators for outer blocks. */ iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, n_dom_bbs); /* And unroll loop. */ sbitmap_ones (wont_exit); RESET_BIT (wont_exit, may_exit_copy); if (!duplicate_loop_to_header_edge (loop, loop_latch_edge (loop), loops, max_unroll, wont_exit, desc->out_edge, remove_edges, &n_remove_edges, DLTHE_FLAG_UPDATE_FREQ)) abort (); free (wont_exit); if (exit_at_end) { basic_block exit_block = desc->in_edge->src->rbi->copy; /* Find a new in and out edge; they are in the last copy we have made. */ if (exit_block->succ->dest == desc->out_edge->dest) { desc->out_edge = exit_block->succ; desc->in_edge = exit_block->succ->succ_next; } else { desc->out_edge = exit_block->succ->succ_next; desc->in_edge = exit_block->succ; } } /* Remove the edges. */ for (i = 0; i < n_remove_edges; i++) remove_path (loops, remove_edges[i]); free (remove_edges); /* We must be careful when updating the number of iterations due to preconditioning and the fact that the value must be valid at entry of the loop. After passing through the above code, we see that the correct new number of iterations is this: */ if (desc->const_iter) abort (); desc->niter_expr = simplify_gen_binary (UDIV, desc->mode, old_niter, GEN_INT (max_unroll + 1)); desc->niter_max /= max_unroll + 1; if (exit_at_end) { desc->niter_expr = simplify_gen_binary (MINUS, desc->mode, desc->niter_expr, const1_rtx); desc->noloop_assumptions = NULL_RTX; desc->niter_max--; } if (dump_file) fprintf (dump_file, ";; Unrolled loop %d times, counting # of iterations " "in runtime, %i insns\n", max_unroll, num_loop_insns (loop)); } /* Decide whether to simply peel LOOP and how much. */ static void decide_peel_simple (struct loop *loop, int flags) { unsigned npeel; struct niter_desc *desc; if (!(flags & UAP_PEEL)) { /* We were not asked to, just return back silently. */ return; } if (dump_file) fprintf (dump_file, "\n;; Considering simply peeling loop\n"); /* npeel = number of iterations to peel. */ npeel = PARAM_VALUE (PARAM_MAX_PEELED_INSNS) / loop->ninsns; if (npeel > (unsigned) PARAM_VALUE (PARAM_MAX_PEEL_TIMES)) npeel = PARAM_VALUE (PARAM_MAX_PEEL_TIMES); /* Skip big loops. */ if (!npeel) { if (dump_file) fprintf (dump_file, ";; Not considering loop, is too big\n"); return; } /* Check for simple loops. */ desc = get_simple_loop_desc (loop); /* Check number of iterations. */ if (desc->simple_p && !desc->assumptions && desc->const_iter) { if (dump_file) fprintf (dump_file, ";; Loop iterates constant times\n"); return; } /* Do not simply peel loops with branches inside -- it increases number of mispredicts. */ if (num_loop_branches (loop) > 1) { if (dump_file) fprintf (dump_file, ";; Not peeling, contains branches\n"); return; } if (loop->header->count) { unsigned niter = expected_loop_iterations (loop); if (niter + 1 > npeel) { if (dump_file) { fprintf (dump_file, ";; Not peeling loop, rolls too much ("); fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC, (HOST_WIDEST_INT) (niter + 1)); fprintf (dump_file, " iterations > %d [maximum peelings])\n", npeel); } return; } npeel = niter + 1; } else { /* For now we have no good heuristics to decide whether loop peeling will be effective, so disable it. */ if (dump_file) fprintf (dump_file, ";; Not peeling loop, no evidence it will be profitable\n"); return; } /* Success. */ loop->lpt_decision.decision = LPT_PEEL_SIMPLE; loop->lpt_decision.times = npeel; if (dump_file) fprintf (dump_file, ";; Decided to simply peel the loop, %d times.\n", loop->lpt_decision.times); } /* Peel a LOOP LOOP->LPT_DECISION.TIMES times. The transformation: while (cond) body; ==> if (!cond) goto end; body; if (!cond) goto end; body; while (cond) body; end: ; */ static void peel_loop_simple (struct loops *loops, struct loop *loop) { sbitmap wont_exit; unsigned npeel = loop->lpt_decision.times; struct niter_desc *desc = get_simple_loop_desc (loop); wont_exit = sbitmap_alloc (npeel + 1); sbitmap_zero (wont_exit); if (!duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop), loops, npeel, wont_exit, NULL, NULL, NULL, DLTHE_FLAG_UPDATE_FREQ)) abort (); free (wont_exit); if (desc->simple_p) { if (desc->const_iter) { desc->niter -= npeel; desc->niter_expr = GEN_INT (desc->niter); desc->noloop_assumptions = NULL_RTX; } else { /* We cannot just update niter_expr, as its value might be clobbered inside loop. We could handle this by counting the number into temporary just like we do in runtime unrolling, but it does not seem worthwhile. */ free_simple_loop_desc (loop); } } if (dump_file) fprintf (dump_file, ";; Peeling loop %d times\n", npeel); } /* Decide whether to unroll LOOP stupidly and how much. */ static void decide_unroll_stupid (struct loop *loop, int flags) { unsigned nunroll, nunroll_by_av, i; struct niter_desc *desc; if (!(flags & UAP_UNROLL_ALL)) { /* We were not asked to, just return back silently. */ return; } if (dump_file) fprintf (dump_file, "\n;; Considering unrolling loop stupidly\n"); /* nunroll = total number of copies of the original loop body in unrolled loop (i.e. if it is 2, we have to duplicate loop body once. */ nunroll = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / loop->ninsns; nunroll_by_av = PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS) / loop->av_ninsns; if (nunroll > nunroll_by_av) nunroll = nunroll_by_av; if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES)) nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES); /* Skip big loops. */ if (nunroll <= 1) { if (dump_file) fprintf (dump_file, ";; Not considering loop, is too big\n"); return; } /* Check for simple loops. */ desc = get_simple_loop_desc (loop); /* Check simpleness. */ if (desc->simple_p && !desc->assumptions) { if (dump_file) fprintf (dump_file, ";; The loop is simple\n"); return; } /* Do not unroll loops with branches inside -- it increases number of mispredicts. */ if (num_loop_branches (loop) > 1) { if (dump_file) fprintf (dump_file, ";; Not unrolling, contains branches\n"); return; } /* If we have profile feedback, check whether the loop rolls. */ if (loop->header->count && expected_loop_iterations (loop) < 2 * nunroll) { if (dump_file) fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n"); return; } /* Success. Now force nunroll to be power of 2, as it seems that this improves results (partially because of better alignments, partially because of some dark magic). */ for (i = 1; 2 * i <= nunroll; i *= 2) continue; loop->lpt_decision.decision = LPT_UNROLL_STUPID; loop->lpt_decision.times = i - 1; if (dump_file) fprintf (dump_file, ";; Decided to unroll the loop stupidly, %d times.\n", loop->lpt_decision.times); } /* Unroll a LOOP LOOP->LPT_DECISION.TIMES times. The transformation: while (cond) body; ==> while (cond) { body; if (!cond) break; body; if (!cond) break; body; if (!cond) break; body; } */ static void unroll_loop_stupid (struct loops *loops, struct loop *loop) { sbitmap wont_exit; unsigned nunroll = loop->lpt_decision.times; struct niter_desc *desc = get_simple_loop_desc (loop); wont_exit = sbitmap_alloc (nunroll + 1); sbitmap_zero (wont_exit); if (!duplicate_loop_to_header_edge (loop, loop_latch_edge (loop), loops, nunroll, wont_exit, NULL, NULL, NULL, DLTHE_FLAG_UPDATE_FREQ)) abort (); free (wont_exit); if (desc->simple_p) { /* We indeed may get here provided that there are nontrivial assumptions for a loop to be really simple. We could update the counts, but the problem is that we are unable to decide which exit will be taken (not really true in case the number of iterations is constant, but noone will do anything with this information, so we do not worry about it). */ desc->simple_p = false; } if (dump_file) fprintf (dump_file, ";; Unrolled loop %d times, %i insns\n", nunroll, num_loop_insns (loop)); } /* Control flow graph manipulation code for GNU compiler. Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file contains low level functions to manipulate the CFG and analyze it that are aware of the RTL intermediate language. Available functionality: - Basic CFG/RTL manipulation API documented in cfghooks.h - CFG-aware instruction chain manipulation delete_insn, delete_insn_chain - Edge splitting and committing to edges insert_insn_on_edge, commit_edge_insertions - CFG updating after insn simplification purge_dead_edges, purge_all_dead_edges Functions not supposed for generic use: - Infrastructure to determine quickly basic block for insn compute_bb_for_insn, update_bb_for_insn, set_block_for_insn, - Edge redirection with updating and optimizing of insn chain block_label, tidy_fallthru_edge, force_nonfallthru */ /* The labels mentioned in non-jump rtl. Valid during find_basic_blocks. */ /* ??? Should probably be using LABEL_NUSES instead. It would take a bit of surgery to be able to use or co-opt the routines in jump. */ rtx label_value_list; static int can_delete_note_p (rtx); static int can_delete_label_p (rtx); static void commit_one_edge_insertion (edge, int); static rtx last_loop_beg_note (rtx); static bool back_edge_of_syntactic_loop_p (basic_block, basic_block); basic_block force_nonfallthru_and_redirect (edge, basic_block); static basic_block rtl_split_edge (edge); static bool rtl_move_block_after (basic_block, basic_block); static int rtl_verify_flow_info (void); static basic_block cfg_layout_split_block (basic_block, void *); static edge cfg_layout_redirect_edge_and_branch (edge, basic_block); static basic_block cfg_layout_redirect_edge_and_branch_force (edge, basic_block); static void cfg_layout_delete_block (basic_block); static void rtl_delete_block (basic_block); static basic_block rtl_redirect_edge_and_branch_force (edge, basic_block); static edge rtl_redirect_edge_and_branch (edge, basic_block); static basic_block rtl_split_block (basic_block, void *); static void rtl_dump_bb (basic_block, FILE *, int); static int rtl_verify_flow_info_1 (void); static void mark_killed_regs (rtx, rtx, void *); static void rtl_make_forwarder_block (edge); /* Return true if NOTE is not one of the ones that must be kept paired, so that we may simply delete it. */ static int can_delete_note_p (rtx note) { return (NOTE_LINE_NUMBER (note) == NOTE_INSN_DELETED || NOTE_LINE_NUMBER (note) == NOTE_INSN_BASIC_BLOCK || NOTE_LINE_NUMBER (note) == NOTE_INSN_UNLIKELY_EXECUTED_CODE || NOTE_LINE_NUMBER (note) == NOTE_INSN_PREDICTION); } /* True if a given label can be deleted. */ static int can_delete_label_p (rtx label) { return (!LABEL_PRESERVE_P (label) /* User declared labels must be preserved. */ && LABEL_NAME (label) == 0 && !in_expr_list_p (forced_labels, label) && !in_expr_list_p (label_value_list, label)); } /* Delete INSN by patching it out. Return the next insn. */ rtx delete_insn (rtx insn) { rtx next = NEXT_INSN (insn); rtx note; bool really_delete = true; if (GET_CODE (insn) == CODE_LABEL) { /* Some labels can't be directly removed from the INSN chain, as they might be references via variables, constant pool etc. Convert them to the special NOTE_INSN_DELETED_LABEL note. */ if (! can_delete_label_p (insn)) { const char *name = LABEL_NAME (insn); really_delete = false; PUT_CODE (insn, NOTE); NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED_LABEL; NOTE_DELETED_LABEL_NAME (insn) = name; } remove_node_from_expr_list (insn, &nonlocal_goto_handler_labels); } if (really_delete) { /* If this insn has already been deleted, something is very wrong. */ if (INSN_DELETED_P (insn)) abort (); remove_insn (insn); INSN_DELETED_P (insn) = 1; } /* If deleting a jump, decrement the use count of the label. Deleting the label itself should happen in the normal course of block merging. */ if (GET_CODE (insn) == JUMP_INSN && JUMP_LABEL (insn) && GET_CODE (JUMP_LABEL (insn)) == CODE_LABEL) LABEL_NUSES (JUMP_LABEL (insn))--; /* Also if deleting an insn that references a label. */ else { while ((note = find_reg_note (insn, REG_LABEL, NULL_RTX)) != NULL_RTX && GET_CODE (XEXP (note, 0)) == CODE_LABEL) { LABEL_NUSES (XEXP (note, 0))--; remove_note (insn, note); } } if (GET_CODE (insn) == JUMP_INSN && (GET_CODE (PATTERN (insn)) == ADDR_VEC || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)) { rtx pat = PATTERN (insn); int diff_vec_p = GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC; int len = XVECLEN (pat, diff_vec_p); int i; for (i = 0; i < len; i++) { rtx label = XEXP (XVECEXP (pat, diff_vec_p, i), 0); /* When deleting code in bulk (e.g. removing many unreachable blocks) we can delete a label that's a target of the vector before deleting the vector itself. */ if (GET_CODE (label) != NOTE) LABEL_NUSES (label)--; } } return next; } /* Like delete_insn but also purge dead edges from BB. */ rtx delete_insn_and_edges (rtx insn) { rtx x; bool purge = false; if (INSN_P (insn) && BLOCK_FOR_INSN (insn) && BB_END (BLOCK_FOR_INSN (insn)) == insn) purge = true; x = delete_insn (insn); if (purge) purge_dead_edges (BLOCK_FOR_INSN (insn)); return x; } /* Unlink a chain of insns between START and FINISH, leaving notes that must be paired. */ void delete_insn_chain (rtx start, rtx finish) { rtx next; /* Unchain the insns one by one. It would be quicker to delete all of these with a single unchaining, rather than one at a time, but we need to keep the NOTE's. */ while (1) { next = NEXT_INSN (start); if (GET_CODE (start) == NOTE && !can_delete_note_p (start)) ; else next = delete_insn (start); if (start == finish) break; start = next; } } /* Like delete_insn but also purge dead edges from BB. */ void delete_insn_chain_and_edges (rtx first, rtx last) { bool purge = false; if (INSN_P (last) && BLOCK_FOR_INSN (last) && BB_END (BLOCK_FOR_INSN (last)) == last) purge = true; delete_insn_chain (first, last); if (purge) purge_dead_edges (BLOCK_FOR_INSN (last)); } /* Create a new basic block consisting of the instructions between HEAD and END inclusive. This function is designed to allow fast BB construction - reuses the note and basic block struct in BB_NOTE, if any and do not grow BASIC_BLOCK chain and should be used directly only by CFG construction code. END can be NULL in to create new empty basic block before HEAD. Both END and HEAD can be NULL to create basic block at the end of INSN chain. AFTER is the basic block we should be put after. */ basic_block create_basic_block_structure (rtx head, rtx end, rtx bb_note, basic_block after) { basic_block bb; if (bb_note && (bb = NOTE_BASIC_BLOCK (bb_note)) != NULL && bb->aux == NULL) { /* If we found an existing note, thread it back onto the chain. */ rtx after; if (GET_CODE (head) == CODE_LABEL) after = head; else { after = PREV_INSN (head); head = bb_note; } if (after != bb_note && NEXT_INSN (after) != bb_note) reorder_insns_nobb (bb_note, bb_note, after); } else { /* Otherwise we must create a note and a basic block structure. */ bb = alloc_block (); if (!head && !end) head = end = bb_note = emit_note_after (NOTE_INSN_BASIC_BLOCK, get_last_insn ()); else if (GET_CODE (head) == CODE_LABEL && end) { bb_note = emit_note_after (NOTE_INSN_BASIC_BLOCK, head); if (head == end) end = bb_note; } else { bb_note = emit_note_before (NOTE_INSN_BASIC_BLOCK, head); head = bb_note; if (!end) end = head; } NOTE_BASIC_BLOCK (bb_note) = bb; } /* Always include the bb note in the block. */ if (NEXT_INSN (end) == bb_note) end = bb_note; BB_HEAD (bb) = head; BB_END (bb) = end; bb->index = last_basic_block++; bb->flags = BB_NEW; link_block (bb, after); BASIC_BLOCK (bb->index) = bb; update_bb_for_insn (bb); bb->partition = UNPARTITIONED; /* Tag the block so that we know it has been used when considering other basic block notes. */ bb->aux = bb; return bb; } /* Create new basic block consisting of instructions in between HEAD and END and place it to the BB chain after block AFTER. END can be NULL in to create new empty basic block before HEAD. Both END and HEAD can be NULL to create basic block at the end of INSN chain. */ static basic_block rtl_create_basic_block (void *headp, void *endp, basic_block after) { rtx head = headp, end = endp; basic_block bb; /* Grow the basic block array if needed. */ if ((size_t) last_basic_block >= VARRAY_SIZE (basic_block_info)) { size_t new_size = last_basic_block + (last_basic_block + 3) / 4; VARRAY_GROW (basic_block_info, new_size); } n_basic_blocks++; bb = create_basic_block_structure (head, end, NULL, after); bb->aux = NULL; return bb; } static basic_block cfg_layout_create_basic_block (void *head, void *end, basic_block after) { basic_block newbb = rtl_create_basic_block (head, end, after); initialize_bb_rbi (newbb); return newbb; } /* Delete the insns in a (non-live) block. We physically delete every non-deleted-note insn, and update the flow graph appropriately. Return nonzero if we deleted an exception handler. */ /* ??? Preserving all such notes strikes me as wrong. It would be nice to post-process the stream to remove empty blocks, loops, ranges, etc. */ static void rtl_delete_block (basic_block b) { rtx insn, end, tmp; /* If the head of this block is a CODE_LABEL, then it might be the label for an exception handler which can't be reached. We need to remove the label from the exception_handler_label list and remove the associated NOTE_INSN_EH_REGION_BEG and NOTE_INSN_EH_REGION_END notes. */ /* Get rid of all NOTE_INSN_PREDICTIONs and NOTE_INSN_LOOP_CONTs hanging before the block. */ for (insn = PREV_INSN (BB_HEAD (b)); insn; insn = PREV_INSN (insn)) { if (GET_CODE (insn) != NOTE) break; if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_PREDICTION || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT) NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED; } insn = BB_HEAD (b); if (GET_CODE (insn) == CODE_LABEL) maybe_remove_eh_handler (insn); /* Include any jump table following the basic block. */ end = BB_END (b); if (tablejump_p (end, NULL, &tmp)) end = tmp; /* Include any barrier that may follow the basic block. */ tmp = next_nonnote_insn (end); if (tmp && GET_CODE (tmp) == BARRIER) end = tmp; /* Selectively delete the entire chain. */ BB_HEAD (b) = NULL; delete_insn_chain (insn, end); } /* Records the basic block struct in BLOCK_FOR_INSN for every insn. */ void compute_bb_for_insn (void) { basic_block bb; FOR_EACH_BB (bb) { rtx end = BB_END (bb); rtx insn; for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn)) { BLOCK_FOR_INSN (insn) = bb; if (insn == end) break; } } } /* Release the basic_block_for_insn array. */ void free_bb_for_insn (void) { rtx insn; for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) if (GET_CODE (insn) != BARRIER) BLOCK_FOR_INSN (insn) = NULL; } /* Return RTX to emit after when we want to emit code on the entry of function. */ rtx entry_of_function (void) { return (n_basic_blocks ? BB_HEAD (ENTRY_BLOCK_PTR->next_bb) : get_insns ()); } /* Update insns block within BB. */ void update_bb_for_insn (basic_block bb) { rtx insn; for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn)) { if (GET_CODE (insn) != BARRIER) set_block_for_insn (insn, bb); if (insn == BB_END (bb)) break; } } /* Creates a new basic block just after basic block B by splitting everything after specified instruction I. */ static basic_block rtl_split_block (basic_block bb, void *insnp) { basic_block new_bb; rtx insn = insnp; edge e; if (!insn) { insn = first_insn_after_basic_block_note (bb); if (insn) insn = PREV_INSN (insn); else insn = get_last_insn (); } /* We probably should check type of the insn so that we do not create inconsistent cfg. It is checked in verify_flow_info anyway, so do not bother. */ if (insn == BB_END (bb)) emit_note_after (NOTE_INSN_DELETED, insn); /* Create the new basic block. */ new_bb = create_basic_block (NEXT_INSN (insn), BB_END (bb), bb); BB_END (bb) = insn; /* Redirect the outgoing edges. */ new_bb->succ = bb->succ; bb->succ = NULL; for (e = new_bb->succ; e; e = e->succ_next) e->src = new_bb; if (bb->global_live_at_start) { new_bb->global_live_at_start = OBSTACK_ALLOC_REG_SET (&flow_obstack); new_bb->global_live_at_end = OBSTACK_ALLOC_REG_SET (&flow_obstack); COPY_REG_SET (new_bb->global_live_at_end, bb->global_live_at_end); /* We now have to calculate which registers are live at the end of the split basic block and at the start of the new basic block. Start with those registers that are known to be live at the end of the original basic block and get propagate_block to determine which registers are live. */ COPY_REG_SET (new_bb->global_live_at_start, bb->global_live_at_end); propagate_block (new_bb, new_bb->global_live_at_start, NULL, NULL, 0); COPY_REG_SET (bb->global_live_at_end, new_bb->global_live_at_start); #ifdef HAVE_conditional_execution /* In the presence of conditional execution we are not able to update liveness precisely. */ if (reload_completed) { bb->flags |= BB_DIRTY; new_bb->flags |= BB_DIRTY; } #endif } return new_bb; } /* Blocks A and B are to be merged into a single block A. The insns are already contiguous. */ static void rtl_merge_blocks (basic_block a, basic_block b) { rtx b_head = BB_HEAD (b), b_end = BB_END (b), a_end = BB_END (a); rtx del_first = NULL_RTX, del_last = NULL_RTX; int b_empty = 0; /* If there was a CODE_LABEL beginning B, delete it. */ if (GET_CODE (b_head) == CODE_LABEL) { /* Detect basic blocks with nothing but a label. This can happen in particular at the end of a function. */ if (b_head == b_end) b_empty = 1; del_first = del_last = b_head; b_head = NEXT_INSN (b_head); } /* Delete the basic block note and handle blocks containing just that note. */ if (NOTE_INSN_BASIC_BLOCK_P (b_head)) { if (b_head == b_end) b_empty = 1; if (! del_last) del_first = b_head; del_last = b_head; b_head = NEXT_INSN (b_head); } /* If there was a jump out of A, delete it. */ if (GET_CODE (a_end) == JUMP_INSN) { rtx prev; for (prev = PREV_INSN (a_end); ; prev = PREV_INSN (prev)) if (GET_CODE (prev) != NOTE || NOTE_LINE_NUMBER (prev) == NOTE_INSN_BASIC_BLOCK || prev == BB_HEAD (a)) break; del_first = a_end; #ifdef HAVE_cc0 /* If this was a conditional jump, we need to also delete the insn that set cc0. */ if (only_sets_cc0_p (prev)) { rtx tmp = prev; prev = prev_nonnote_insn (prev); if (!prev) prev = BB_HEAD (a); del_first = tmp; } #endif a_end = PREV_INSN (del_first); } else if (GET_CODE (NEXT_INSN (a_end)) == BARRIER) del_first = NEXT_INSN (a_end); /* Delete everything marked above as well as crap that might be hanging out between the two blocks. */ BB_HEAD (b) = NULL; delete_insn_chain (del_first, del_last); /* Reassociate the insns of B with A. */ if (!b_empty) { rtx x; for (x = a_end; x != b_end; x = NEXT_INSN (x)) set_block_for_insn (x, a); set_block_for_insn (b_end, a); a_end = b_end; } BB_END (a) = a_end; } /* Return true when block A and B can be merged. */ static bool rtl_can_merge_blocks (basic_block a,basic_block b) { bool partitions_ok = true; /* If we are partitioning hot/cold basic blocks, we don't want to mess up unconditional or indirect jumps that cross between hot and cold sections. */ if (flag_reorder_blocks_and_partition && (find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX) || find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX) || a->partition != b->partition)) partitions_ok = false; /* There must be exactly one edge in between the blocks. */ return (a->succ && !a->succ->succ_next && a->succ->dest == b && !b->pred->pred_next && a != b /* Must be simple edge. */ && !(a->succ->flags & EDGE_COMPLEX) && partitions_ok && a->next_bb == b && a != ENTRY_BLOCK_PTR && b != EXIT_BLOCK_PTR /* If the jump insn has side effects, we can't kill the edge. */ && (GET_CODE (BB_END (a)) != JUMP_INSN || (reload_completed ? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a))))); } /* Return the label in the head of basic block BLOCK. Create one if it doesn't exist. */ rtx block_label (basic_block block) { if (block == EXIT_BLOCK_PTR) return NULL_RTX; if (GET_CODE (BB_HEAD (block)) != CODE_LABEL) { BB_HEAD (block) = emit_label_before (gen_label_rtx (), BB_HEAD (block)); } return BB_HEAD (block); } /* Attempt to perform edge redirection by replacing possibly complex jump instruction by unconditional jump or removing jump completely. This can apply only if all edges now point to the same block. The parameters and return values are equivalent to redirect_edge_and_branch. */ edge try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout) { basic_block src = e->src; rtx insn = BB_END (src), kill_from; edge tmp; rtx set; int fallthru = 0; /* If we are partitioning hot/cold basic blocks, we don't want to mess up unconditional or indirect jumps that cross between hot and cold sections. */ if (flag_reorder_blocks_and_partition && find_reg_note (insn, REG_CROSSING_JUMP, NULL_RTX)) return NULL; /* Verify that all targets will be TARGET. */ for (tmp = src->succ; tmp; tmp = tmp->succ_next) if (tmp->dest != target && tmp != e) break; if (tmp || !onlyjump_p (insn)) return NULL; if ((!optimize || reload_completed) && tablejump_p (insn, NULL, NULL)) return NULL; /* Avoid removing branch with side effects. */ set = single_set (insn); if (!set || side_effects_p (set)) return NULL; /* In case we zap a conditional jump, we'll need to kill the cc0 setter too. */ kill_from = insn; #ifdef HAVE_cc0 if (reg_mentioned_p (cc0_rtx, PATTERN (insn))) kill_from = PREV_INSN (insn); #endif /* See if we can create the fallthru edge. */ if (in_cfglayout || can_fallthru (src, target)) { if (dump_file) fprintf (dump_file, "Removing jump %i.\n", INSN_UID (insn)); fallthru = 1; /* Selectively unlink whole insn chain. */ if (in_cfglayout) { rtx insn = src->rbi->footer; delete_insn_chain (kill_from, BB_END (src)); /* Remove barriers but keep jumptables. */ while (insn) { if (GET_CODE (insn) == BARRIER) { if (PREV_INSN (insn)) NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn); else src->rbi->footer = NEXT_INSN (insn); if (NEXT_INSN (insn)) PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn); } if (GET_CODE (insn) == CODE_LABEL) break; insn = NEXT_INSN (insn); } } else delete_insn_chain (kill_from, PREV_INSN (BB_HEAD (target))); } /* If this already is simplejump, redirect it. */ else if (simplejump_p (insn)) { if (e->dest == target) return NULL; if (dump_file) fprintf (dump_file, "Redirecting jump %i from %i to %i.\n", INSN_UID (insn), e->dest->index, target->index); if (!redirect_jump (insn, block_label (target), 0)) { if (target == EXIT_BLOCK_PTR) return NULL; abort (); } } /* Cannot do anything for target exit block. */ else if (target == EXIT_BLOCK_PTR) return NULL; /* Or replace possibly complicated jump insn by simple jump insn. */ else { rtx target_label = block_label (target); rtx barrier, label, table; emit_jump_insn_after (gen_jump (target_label), insn); JUMP_LABEL (BB_END (src)) = target_label; LABEL_NUSES (target_label)++; if (dump_file) fprintf (dump_file, "Replacing insn %i by jump %i\n", INSN_UID (insn), INSN_UID (BB_END (src))); delete_insn_chain (kill_from, insn); /* Recognize a tablejump that we are converting to a simple jump and remove its associated CODE_LABEL and ADDR_VEC or ADDR_DIFF_VEC. */ if (tablejump_p (insn, &label, &table)) delete_insn_chain (label, table); barrier = next_nonnote_insn (BB_END (src)); if (!barrier || GET_CODE (barrier) != BARRIER) emit_barrier_after (BB_END (src)); else { if (barrier != NEXT_INSN (BB_END (src))) { /* Move the jump before barrier so that the notes which originally were or were created before jump table are inside the basic block. */ rtx new_insn = BB_END (src); rtx tmp; for (tmp = NEXT_INSN (BB_END (src)); tmp != barrier; tmp = NEXT_INSN (tmp)) set_block_for_insn (tmp, src); NEXT_INSN (PREV_INSN (new_insn)) = NEXT_INSN (new_insn); PREV_INSN (NEXT_INSN (new_insn)) = PREV_INSN (new_insn); NEXT_INSN (new_insn) = barrier; NEXT_INSN (PREV_INSN (barrier)) = new_insn; PREV_INSN (new_insn) = PREV_INSN (barrier); PREV_INSN (barrier) = new_insn; } } } /* Keep only one edge out and set proper flags. */ while (src->succ->succ_next) remove_edge (src->succ); e = src->succ; if (fallthru) e->flags = EDGE_FALLTHRU; else e->flags = 0; e->probability = REG_BR_PROB_BASE; e->count = src->count; /* We don't want a block to end on a line-number note since that has the potential of changing the code between -g and not -g. */ while (GET_CODE (BB_END (e->src)) == NOTE && NOTE_LINE_NUMBER (BB_END (e->src)) >= 0) delete_insn (BB_END (e->src)); if (e->dest != target) redirect_edge_succ (e, target); return e; } /* Return last loop_beg note appearing after INSN, before start of next basic block. Return INSN if there are no such notes. When emitting jump to redirect a fallthru edge, it should always appear after the LOOP_BEG notes, as loop optimizer expect loop to either start by fallthru edge or jump following the LOOP_BEG note jumping to the loop exit test. */ static rtx last_loop_beg_note (rtx insn) { rtx last = insn; for (insn = NEXT_INSN (insn); insn && GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) != NOTE_INSN_BASIC_BLOCK; insn = NEXT_INSN (insn)) if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG) last = insn; return last; } /* Redirect edge representing branch of (un)conditional jump or tablejump, NULL on failure */ static edge redirect_branch_edge (edge e, basic_block target) { rtx tmp; rtx old_label = BB_HEAD (e->dest); basic_block src = e->src; rtx insn = BB_END (src); /* We can only redirect non-fallthru edges of jump insn. */ if (e->flags & EDGE_FALLTHRU) return NULL; else if (GET_CODE (insn) != JUMP_INSN) return NULL; /* Recognize a tablejump and adjust all matching cases. */ if (tablejump_p (insn, NULL, &tmp)) { rtvec vec; int j; rtx new_label = block_label (target); if (target == EXIT_BLOCK_PTR) return NULL; if (GET_CODE (PATTERN (tmp)) == ADDR_VEC) vec = XVEC (PATTERN (tmp), 0); else vec = XVEC (PATTERN (tmp), 1); for (j = GET_NUM_ELEM (vec) - 1; j >= 0; --j) if (XEXP (RTVEC_ELT (vec, j), 0) == old_label) { RTVEC_ELT (vec, j) = gen_rtx_LABEL_REF (Pmode, new_label); --LABEL_NUSES (old_label); ++LABEL_NUSES (new_label); } /* Handle casesi dispatch insns. */ if ((tmp = single_set (insn)) != NULL && SET_DEST (tmp) == pc_rtx && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE && GET_CODE (XEXP (SET_SRC (tmp), 2)) == LABEL_REF && XEXP (XEXP (SET_SRC (tmp), 2), 0) == old_label) { XEXP (SET_SRC (tmp), 2) = gen_rtx_LABEL_REF (VOIDmode, new_label); --LABEL_NUSES (old_label); ++LABEL_NUSES (new_label); } } else { /* ?? We may play the games with moving the named labels from one basic block to the other in case only one computed_jump is available. */ if (computed_jump_p (insn) /* A return instruction can't be redirected. */ || returnjump_p (insn)) return NULL; /* If the insn doesn't go where we think, we're confused. */ if (JUMP_LABEL (insn) != old_label) abort (); /* If the substitution doesn't succeed, die. This can happen if the back end emitted unrecognizable instructions or if target is exit block on some arches. */ if (!redirect_jump (insn, block_label (target), 0)) { if (target == EXIT_BLOCK_PTR) return NULL; abort (); } } if (dump_file) fprintf (dump_file, "Edge %i->%i redirected to %i\n", e->src->index, e->dest->index, target->index); if (e->dest != target) e = redirect_edge_succ_nodup (e, target); return e; } /* Attempt to change code to redirect edge E to TARGET. Don't do that on expense of adding new instructions or reordering basic blocks. Function can be also called with edge destination equivalent to the TARGET. Then it should try the simplifications and do nothing if none is possible. Return edge representing the branch if transformation succeeded. Return NULL on failure. We still return NULL in case E already destinated TARGET and we didn't managed to simplify instruction stream. */ static edge rtl_redirect_edge_and_branch (edge e, basic_block target) { edge ret; basic_block src = e->src; if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH)) return NULL; if (e->dest == target) return e; if ((ret = try_redirect_by_replacing_jump (e, target, false)) != NULL) { src->flags |= BB_DIRTY; return ret; } ret = redirect_branch_edge (e, target); if (!ret) return NULL; src->flags |= BB_DIRTY; return ret; } /* Like force_nonfallthru below, but additionally performs redirection Used by redirect_edge_and_branch_force. */ basic_block force_nonfallthru_and_redirect (edge e, basic_block target) { basic_block jump_block, new_bb = NULL, src = e->src; rtx note; edge new_edge; int abnormal_edge_flags = 0; /* In the case the last instruction is conditional jump to the next instruction, first redirect the jump itself and then continue by creating a basic block afterwards to redirect fallthru edge. */ if (e->src != ENTRY_BLOCK_PTR && e->dest != EXIT_BLOCK_PTR && any_condjump_p (BB_END (e->src)) /* When called from cfglayout, fallthru edges do not necessarily go to the next block. */ && e->src->next_bb == e->dest && JUMP_LABEL (BB_END (e->src)) == BB_HEAD (e->dest)) { rtx note; edge b = unchecked_make_edge (e->src, target, 0); if (!redirect_jump (BB_END (e->src), block_label (target), 0)) abort (); note = find_reg_note (BB_END (e->src), REG_BR_PROB, NULL_RTX); if (note) { int prob = INTVAL (XEXP (note, 0)); b->probability = prob; b->count = e->count * prob / REG_BR_PROB_BASE; e->probability -= e->probability; e->count -= b->count; if (e->probability < 0) e->probability = 0; if (e->count < 0) e->count = 0; } } if (e->flags & EDGE_ABNORMAL) { /* Irritating special case - fallthru edge to the same block as abnormal edge. We can't redirect abnormal edge, but we still can split the fallthru one and create separate abnormal edge to original destination. This allows bb-reorder to make such edge non-fallthru. */ if (e->dest != target) abort (); abnormal_edge_flags = e->flags & ~(EDGE_FALLTHRU | EDGE_CAN_FALLTHRU); e->flags &= EDGE_FALLTHRU | EDGE_CAN_FALLTHRU; } else if (!(e->flags & EDGE_FALLTHRU)) abort (); else if (e->src == ENTRY_BLOCK_PTR) { /* We can't redirect the entry block. Create an empty block at the start of the function which we use to add the new jump. */ edge *pe1; basic_block bb = create_basic_block (BB_HEAD (e->dest), NULL, ENTRY_BLOCK_PTR); /* Change the existing edge's source to be the new block, and add a new edge from the entry block to the new block. */ e->src = bb; for (pe1 = &ENTRY_BLOCK_PTR->succ; *pe1; pe1 = &(*pe1)->succ_next) if (*pe1 == e) { *pe1 = e->succ_next; break; } e->succ_next = 0; bb->succ = e; make_single_succ_edge (ENTRY_BLOCK_PTR, bb, EDGE_FALLTHRU); } if (e->src->succ->succ_next || abnormal_edge_flags) { /* Create the new structures. */ /* If the old block ended with a tablejump, skip its table by searching forward from there. Otherwise start searching forward from the last instruction of the old block. */ if (!tablejump_p (BB_END (e->src), NULL, ¬e)) note = BB_END (e->src); /* Position the new block correctly relative to loop notes. */ note = last_loop_beg_note (note); note = NEXT_INSN (note); jump_block = create_basic_block (note, NULL, e->src); jump_block->count = e->count; jump_block->frequency = EDGE_FREQUENCY (e); jump_block->loop_depth = target->loop_depth; if (target->global_live_at_start) { jump_block->global_live_at_start = OBSTACK_ALLOC_REG_SET (&flow_obstack); jump_block->global_live_at_end = OBSTACK_ALLOC_REG_SET (&flow_obstack); COPY_REG_SET (jump_block->global_live_at_start, target->global_live_at_start); COPY_REG_SET (jump_block->global_live_at_end, target->global_live_at_start); } /* Make sure new block ends up in correct hot/cold section. */ jump_block->partition = e->src->partition; if (flag_reorder_blocks_and_partition) { if (e->src->partition == COLD_PARTITION) { rtx bb_note, new_note; for (bb_note = BB_HEAD (jump_block); bb_note && bb_note != NEXT_INSN (BB_END (jump_block)); bb_note = NEXT_INSN (bb_note)) if (GET_CODE (bb_note) == NOTE && NOTE_LINE_NUMBER (bb_note) == NOTE_INSN_BASIC_BLOCK) break; new_note = emit_note_after (NOTE_INSN_UNLIKELY_EXECUTED_CODE, bb_note); NOTE_BASIC_BLOCK (new_note) = jump_block; jump_block->partition = COLD_PARTITION; } if (GET_CODE (BB_END (jump_block)) == JUMP_INSN && !any_condjump_p (BB_END (jump_block)) && jump_block->succ->crossing_edge ) REG_NOTES (BB_END (jump_block)) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP, NULL_RTX, REG_NOTES (BB_END (jump_block))); } /* Wire edge in. */ new_edge = make_edge (e->src, jump_block, EDGE_FALLTHRU); new_edge->probability = e->probability; new_edge->count = e->count; /* Redirect old edge. */ redirect_edge_pred (e, jump_block); e->probability = REG_BR_PROB_BASE; new_bb = jump_block; } else jump_block = e->src; e->flags &= ~EDGE_FALLTHRU; if (target == EXIT_BLOCK_PTR) { #ifdef HAVE_return emit_jump_insn_after (gen_return (), BB_END (jump_block)); #else abort (); #endif } else { rtx label = block_label (target); emit_jump_insn_after (gen_jump (label), BB_END (jump_block)); JUMP_LABEL (BB_END (jump_block)) = label; LABEL_NUSES (label)++; } emit_barrier_after (BB_END (jump_block)); redirect_edge_succ_nodup (e, target); if (abnormal_edge_flags) make_edge (src, target, abnormal_edge_flags); return new_bb; } /* Edge E is assumed to be fallthru edge. Emit needed jump instruction (and possibly create new basic block) to make edge non-fallthru. Return newly created BB or NULL if none. */ basic_block force_nonfallthru (edge e) { return force_nonfallthru_and_redirect (e, e->dest); } /* Redirect edge even at the expense of creating new jump insn or basic block. Return new basic block if created, NULL otherwise. Abort if conversion is impossible. */ static basic_block rtl_redirect_edge_and_branch_force (edge e, basic_block target) { if (redirect_edge_and_branch (e, target) || e->dest == target) return NULL; /* In case the edge redirection failed, try to force it to be non-fallthru and redirect newly created simplejump. */ return force_nonfallthru_and_redirect (e, target); } /* The given edge should potentially be a fallthru edge. If that is in fact true, delete the jump and barriers that are in the way. */ static void rtl_tidy_fallthru_edge (edge e) { rtx q; basic_block b = e->src, c = b->next_bb; /* ??? In a late-running flow pass, other folks may have deleted basic blocks by nopping out blocks, leaving multiple BARRIERs between here and the target label. They ought to be chastized and fixed. We can also wind up with a sequence of undeletable labels between one block and the next. So search through a sequence of barriers, labels, and notes for the head of block C and assert that we really do fall through. */ for (q = NEXT_INSN (BB_END (b)); q != BB_HEAD (c); q = NEXT_INSN (q)) if (INSN_P (q)) return; /* Remove what will soon cease being the jump insn from the source block. If block B consisted only of this single jump, turn it into a deleted note. */ q = BB_END (b); if (GET_CODE (q) == JUMP_INSN && onlyjump_p (q) && (any_uncondjump_p (q) || (b->succ == e && e->succ_next == NULL))) { #ifdef HAVE_cc0 /* If this was a conditional jump, we need to also delete the insn that set cc0. */ if (any_condjump_p (q) && only_sets_cc0_p (PREV_INSN (q))) q = PREV_INSN (q); #endif q = PREV_INSN (q); /* We don't want a block to end on a line-number note since that has the potential of changing the code between -g and not -g. */ while (GET_CODE (q) == NOTE && NOTE_LINE_NUMBER (q) >= 0) q = PREV_INSN (q); } /* Selectively unlink the sequence. */ if (q != PREV_INSN (BB_HEAD (c))) delete_insn_chain (NEXT_INSN (q), PREV_INSN (BB_HEAD (c))); e->flags |= EDGE_FALLTHRU; } /* Helper function for split_edge. Return true in case edge BB2 to BB1 is back edge of syntactic loop. */ static bool back_edge_of_syntactic_loop_p (basic_block bb1, basic_block bb2) { rtx insn; int count = 0; basic_block bb; if (bb1 == bb2) return true; /* ??? Could we guarantee that bb indices are monotone, so that we could just compare them? */ for (bb = bb1; bb && bb != bb2; bb = bb->next_bb) continue; if (!bb) return false; for (insn = BB_END (bb1); insn != BB_HEAD (bb2) && count >= 0; insn = NEXT_INSN (insn)) if (GET_CODE (insn) == NOTE) { if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG) count++; else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END) count--; } return count >= 0; } /* Should move basic block BB after basic block AFTER. NIY. */ static bool rtl_move_block_after (basic_block bb ATTRIBUTE_UNUSED, basic_block after ATTRIBUTE_UNUSED) { return false; } /* Split a (typically critical) edge. Return the new block. Abort on abnormal edges. ??? The code generally expects to be called on critical edges. The case of a block ending in an unconditional jump to a block with multiple predecessors is not handled optimally. */ static basic_block rtl_split_edge (edge edge_in) { basic_block bb; rtx before; /* Abnormal edges cannot be split. */ if ((edge_in->flags & EDGE_ABNORMAL) != 0) abort (); /* We are going to place the new block in front of edge destination. Avoid existence of fallthru predecessors. */ if ((edge_in->flags & EDGE_FALLTHRU) == 0) { edge e; for (e = edge_in->dest->pred; e; e = e->pred_next) if (e->flags & EDGE_FALLTHRU) break; if (e) force_nonfallthru (e); } /* Create the basic block note. Where we place the note can have a noticeable impact on the generated code. Consider this cfg: E | 0 / \ +->1-->2--->E | | +--+ If we need to insert an insn on the edge from block 0 to block 1, we want to ensure the instructions we insert are outside of any loop notes that physically sit between block 0 and block 1. Otherwise we confuse the loop optimizer into thinking the loop is a phony. */ if (edge_in->dest != EXIT_BLOCK_PTR && PREV_INSN (BB_HEAD (edge_in->dest)) && GET_CODE (PREV_INSN (BB_HEAD (edge_in->dest))) == NOTE && (NOTE_LINE_NUMBER (PREV_INSN (BB_HEAD (edge_in->dest))) == NOTE_INSN_LOOP_BEG) && !back_edge_of_syntactic_loop_p (edge_in->dest, edge_in->src)) before = PREV_INSN (BB_HEAD (edge_in->dest)); else if (edge_in->dest != EXIT_BLOCK_PTR) before = BB_HEAD (edge_in->dest); else before = NULL_RTX; /* If this is a fall through edge to the exit block, the blocks might be not adjacent, and the right place is the after the source. */ if (edge_in->flags & EDGE_FALLTHRU && edge_in->dest == EXIT_BLOCK_PTR) { before = NEXT_INSN (BB_END (edge_in->src)); if (before && GET_CODE (before) == NOTE && NOTE_LINE_NUMBER (before) == NOTE_INSN_LOOP_END) before = NEXT_INSN (before); bb = create_basic_block (before, NULL, edge_in->src); } else bb = create_basic_block (before, NULL, edge_in->dest->prev_bb); /* ??? This info is likely going to be out of date very soon. */ if (edge_in->dest->global_live_at_start) { bb->global_live_at_start = OBSTACK_ALLOC_REG_SET (&flow_obstack); bb->global_live_at_end = OBSTACK_ALLOC_REG_SET (&flow_obstack); COPY_REG_SET (bb->global_live_at_start, edge_in->dest->global_live_at_start); COPY_REG_SET (bb->global_live_at_end, edge_in->dest->global_live_at_start); } make_single_succ_edge (bb, edge_in->dest, EDGE_FALLTHRU); /* For non-fallthru edges, we must adjust the predecessor's jump instruction to target our new block. */ if ((edge_in->flags & EDGE_FALLTHRU) == 0) { if (!redirect_edge_and_branch (edge_in, bb)) abort (); } else redirect_edge_succ (edge_in, bb); return bb; } /* Queue instructions for insertion on an edge between two basic blocks. The new instructions and basic blocks (if any) will not appear in the CFG until commit_edge_insertions is called. */ void insert_insn_on_edge (rtx pattern, edge e) { /* We cannot insert instructions on an abnormal critical edge. It will be easier to find the culprit if we die now. */ if ((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e)) abort (); if (e->insns.r == NULL_RTX) start_sequence (); else push_to_sequence (e->insns.r); emit_insn (pattern); e->insns.r = get_insns (); end_sequence (); } /* Called from safe_insert_insn_on_edge through note_stores, marks live registers that are killed by the store. */ static void mark_killed_regs (rtx reg, rtx set ATTRIBUTE_UNUSED, void *data) { regset killed = data; int regno, i; if (GET_CODE (reg) == SUBREG) reg = SUBREG_REG (reg); if (!REG_P (reg)) return; regno = REGNO (reg); if (regno >= FIRST_PSEUDO_REGISTER) SET_REGNO_REG_SET (killed, regno); else { for (i = 0; i < (int) hard_regno_nregs[regno][GET_MODE (reg)]; i++) SET_REGNO_REG_SET (killed, regno + i); } } /* Similar to insert_insn_on_edge, tries to put INSN to edge E. Additionally it checks whether this will not clobber the registers that are live on the edge (i.e. it requires liveness information to be up-to-date) and if there are some, then it tries to save and restore them. Returns true if successful. */ bool safe_insert_insn_on_edge (rtx insn, edge e) { rtx x; regset_head killed_head; regset killed = INITIALIZE_REG_SET (killed_head); rtx save_regs = NULL_RTX; int regno, noccmode; enum machine_mode mode; #ifdef AVOID_CCMODE_COPIES noccmode = true; #else noccmode = false; #endif for (x = insn; x; x = NEXT_INSN (x)) if (INSN_P (x)) note_stores (PATTERN (x), mark_killed_regs, killed); bitmap_operation (killed, killed, e->dest->global_live_at_start, BITMAP_AND); EXECUTE_IF_SET_IN_REG_SET (killed, 0, regno, { mode = regno < FIRST_PSEUDO_REGISTER ? reg_raw_mode[regno] : GET_MODE (regno_reg_rtx[regno]); if (mode == VOIDmode) return false; if (noccmode && mode == CCmode) return false; save_regs = alloc_EXPR_LIST (0, alloc_EXPR_LIST (0, gen_reg_rtx (mode), gen_raw_REG (mode, regno)), save_regs); }); if (save_regs) { rtx from, to; start_sequence (); for (x = save_regs; x; x = XEXP (x, 1)) { from = XEXP (XEXP (x, 0), 1); to = XEXP (XEXP (x, 0), 0); emit_move_insn (to, from); } emit_insn (insn); for (x = save_regs; x; x = XEXP (x, 1)) { from = XEXP (XEXP (x, 0), 0); to = XEXP (XEXP (x, 0), 1); emit_move_insn (to, from); } insn = get_insns (); end_sequence (); free_EXPR_LIST_list (&save_regs); } insert_insn_on_edge (insn, e); FREE_REG_SET (killed); return true; } /* Update the CFG for the instructions queued on edge E. */ static void commit_one_edge_insertion (edge e, int watch_calls) { rtx before = NULL_RTX, after = NULL_RTX, insns, tmp, last; basic_block bb = NULL; /* Pull the insns off the edge now since the edge might go away. */ insns = e->insns.r; e->insns.r = NULL_RTX; /* Special case -- avoid inserting code between call and storing its return value. */ if (watch_calls && (e->flags & EDGE_FALLTHRU) && !e->dest->pred->pred_next && e->src != ENTRY_BLOCK_PTR && GET_CODE (BB_END (e->src)) == CALL_INSN) { rtx next = next_nonnote_insn (BB_END (e->src)); after = BB_HEAD (e->dest); /* The first insn after the call may be a stack pop, skip it. */ while (next && keep_with_call_p (next)) { after = next; next = next_nonnote_insn (next); } bb = e->dest; } if (!before && !after) { /* Figure out where to put these things. If the destination has one predecessor, insert there. Except for the exit block. */ if (e->dest->pred->pred_next == NULL && e->dest != EXIT_BLOCK_PTR) { bb = e->dest; /* Get the location correct wrt a code label, and "nice" wrt a basic block note, and before everything else. */ tmp = BB_HEAD (bb); if (GET_CODE (tmp) == CODE_LABEL) tmp = NEXT_INSN (tmp); if (NOTE_INSN_BASIC_BLOCK_P (tmp)) tmp = NEXT_INSN (tmp); if (tmp && GET_CODE (tmp) == NOTE && NOTE_LINE_NUMBER (tmp) == NOTE_INSN_UNLIKELY_EXECUTED_CODE) tmp = NEXT_INSN (tmp); if (tmp == BB_HEAD (bb)) before = tmp; else if (tmp) after = PREV_INSN (tmp); else after = get_last_insn (); } /* If the source has one successor and the edge is not abnormal, insert there. Except for the entry block. */ else if ((e->flags & EDGE_ABNORMAL) == 0 && e->src->succ->succ_next == NULL && e->src != ENTRY_BLOCK_PTR) { bb = e->src; /* It is possible to have a non-simple jump here. Consider a target where some forms of unconditional jumps clobber a register. This happens on the fr30 for example. We know this block has a single successor, so we can just emit the queued insns before the jump. */ if (GET_CODE (BB_END (bb)) == JUMP_INSN) for (before = BB_END (bb); GET_CODE (PREV_INSN (before)) == NOTE && NOTE_LINE_NUMBER (PREV_INSN (before)) == NOTE_INSN_LOOP_BEG; before = PREV_INSN (before)) ; else { /* We'd better be fallthru, or we've lost track of what's what. */ if ((e->flags & EDGE_FALLTHRU) == 0) abort (); after = BB_END (bb); } } /* Otherwise we must split the edge. */ else { bb = split_edge (e); after = BB_END (bb); /* If we are partitioning hot/cold basic blocks, we must make sure that the new basic block ends up in the correct section. */ bb->partition = e->src->partition; if (flag_reorder_blocks_and_partition && e->src != ENTRY_BLOCK_PTR && e->src->partition == COLD_PARTITION) { rtx bb_note, new_note, cur_insn; bb_note = NULL_RTX; for (cur_insn = BB_HEAD (bb); cur_insn != NEXT_INSN (BB_END (bb)); cur_insn = NEXT_INSN (cur_insn)) if (GET_CODE (cur_insn) == NOTE && NOTE_LINE_NUMBER (cur_insn) == NOTE_INSN_BASIC_BLOCK) { bb_note = cur_insn; break; } new_note = emit_note_after (NOTE_INSN_UNLIKELY_EXECUTED_CODE, bb_note); NOTE_BASIC_BLOCK (new_note) = bb; if (GET_CODE (BB_END (bb)) == JUMP_INSN && !any_condjump_p (BB_END (bb)) && bb->succ->crossing_edge ) REG_NOTES (BB_END (bb)) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP, NULL_RTX, REG_NOTES (BB_END (bb))); if (after == bb_note) after = new_note; } } } /* Now that we've found the spot, do the insertion. */ if (before) { emit_insn_before (insns, before); last = prev_nonnote_insn (before); } else last = emit_insn_after (insns, after); if (returnjump_p (last)) { /* ??? Remove all outgoing edges from BB and add one for EXIT. This is not currently a problem because this only happens for the (single) epilogue, which already has a fallthru edge to EXIT. */ e = bb->succ; if (e->dest != EXIT_BLOCK_PTR || e->succ_next != NULL || (e->flags & EDGE_FALLTHRU) == 0) abort (); e->flags &= ~EDGE_FALLTHRU; emit_barrier_after (last); if (before) delete_insn (before); } else if (GET_CODE (last) == JUMP_INSN) abort (); /* Mark the basic block for find_sub_basic_blocks. */ bb->aux = &bb->aux; } /* Update the CFG for all queued instructions. */ void commit_edge_insertions (void) { basic_block bb; sbitmap blocks; bool changed = false; #ifdef ENABLE_CHECKING verify_flow_info (); #endif FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { edge e, next; for (e = bb->succ; e; e = next) { next = e->succ_next; if (e->insns.r) { changed = true; commit_one_edge_insertion (e, false); } } } if (!changed) return; blocks = sbitmap_alloc (last_basic_block); sbitmap_zero (blocks); FOR_EACH_BB (bb) if (bb->aux) { SET_BIT (blocks, bb->index); /* Check for forgotten bb->aux values before commit_edge_insertions call. */ if (bb->aux != &bb->aux) abort (); bb->aux = NULL; } find_many_sub_basic_blocks (blocks); sbitmap_free (blocks); } /* Update the CFG for all queued instructions, taking special care of inserting code on edges between call and storing its return value. */ void commit_edge_insertions_watch_calls (void) { basic_block bb; sbitmap blocks; bool changed = false; #ifdef ENABLE_CHECKING verify_flow_info (); #endif FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { edge e, next; for (e = bb->succ; e; e = next) { next = e->succ_next; if (e->insns.r) { changed = true; commit_one_edge_insertion (e, true); } } } if (!changed) return; blocks = sbitmap_alloc (last_basic_block); sbitmap_zero (blocks); FOR_EACH_BB (bb) if (bb->aux) { SET_BIT (blocks, bb->index); /* Check for forgotten bb->aux values before commit_edge_insertions call. */ if (bb->aux != &bb->aux) abort (); bb->aux = NULL; } find_many_sub_basic_blocks (blocks); sbitmap_free (blocks); } /* Print out RTL-specific basic block information (live information at start and end). */ static void rtl_dump_bb (basic_block bb, FILE *outf, int indent) { rtx insn; rtx last; char *s_indent; s_indent = alloca ((size_t) indent + 1); memset (s_indent, ' ', (size_t) indent); s_indent[indent] = '\0'; fprintf (outf, ";;%s Registers live at start: ", s_indent); dump_regset (bb->global_live_at_start, outf); putc ('\n', outf); for (insn = BB_HEAD (bb), last = NEXT_INSN (BB_END (bb)); insn != last; insn = NEXT_INSN (insn)) print_rtl_single (outf, insn); fprintf (outf, ";;%s Registers live at end: ", s_indent); dump_regset (bb->global_live_at_end, outf); putc ('\n', outf); } /* Like print_rtl, but also print out live information for the start of each basic block. */ void print_rtl_with_bb (FILE *outf, rtx rtx_first) { rtx tmp_rtx; if (rtx_first == 0) fprintf (outf, "(nil)\n"); else { enum bb_state { NOT_IN_BB, IN_ONE_BB, IN_MULTIPLE_BB }; int max_uid = get_max_uid (); basic_block *start = xcalloc (max_uid, sizeof (basic_block)); basic_block *end = xcalloc (max_uid, sizeof (basic_block)); enum bb_state *in_bb_p = xcalloc (max_uid, sizeof (enum bb_state)); basic_block bb; FOR_EACH_BB_REVERSE (bb) { rtx x; start[INSN_UID (BB_HEAD (bb))] = bb; end[INSN_UID (BB_END (bb))] = bb; for (x = BB_HEAD (bb); x != NULL_RTX; x = NEXT_INSN (x)) { enum bb_state state = IN_MULTIPLE_BB; if (in_bb_p[INSN_UID (x)] == NOT_IN_BB) state = IN_ONE_BB; in_bb_p[INSN_UID (x)] = state; if (x == BB_END (bb)) break; } } for (tmp_rtx = rtx_first; NULL != tmp_rtx; tmp_rtx = NEXT_INSN (tmp_rtx)) { int did_output; if ((bb = start[INSN_UID (tmp_rtx)]) != NULL) { fprintf (outf, ";; Start of basic block %d, registers live:", bb->index); dump_regset (bb->global_live_at_start, outf); putc ('\n', outf); } if (in_bb_p[INSN_UID (tmp_rtx)] == NOT_IN_BB && GET_CODE (tmp_rtx) != NOTE && GET_CODE (tmp_rtx) != BARRIER) fprintf (outf, ";; Insn is not within a basic block\n"); else if (in_bb_p[INSN_UID (tmp_rtx)] == IN_MULTIPLE_BB) fprintf (outf, ";; Insn is in multiple basic blocks\n"); did_output = print_rtl_single (outf, tmp_rtx); if ((bb = end[INSN_UID (tmp_rtx)]) != NULL) { fprintf (outf, ";; End of basic block %d, registers live:\n", bb->index); dump_regset (bb->global_live_at_end, outf); putc ('\n', outf); } if (did_output) putc ('\n', outf); } free (start); free (end); free (in_bb_p); } if (current_function_epilogue_delay_list != 0) { fprintf (outf, "\n;; Insns in epilogue delay list:\n\n"); for (tmp_rtx = current_function_epilogue_delay_list; tmp_rtx != 0; tmp_rtx = XEXP (tmp_rtx, 1)) print_rtl_single (outf, XEXP (tmp_rtx, 0)); } } void update_br_prob_note (basic_block bb) { rtx note; if (GET_CODE (BB_END (bb)) != JUMP_INSN) return; note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX); if (!note || INTVAL (XEXP (note, 0)) == BRANCH_EDGE (bb)->probability) return; XEXP (note, 0) = GEN_INT (BRANCH_EDGE (bb)->probability); } /* Verify the CFG and RTL consistency common for both underlying RTL and cfglayout RTL. Currently it does following checks: - test head/end pointers - overlapping of basic blocks - headers of basic blocks (the NOTE_INSN_BASIC_BLOCK note) - tails of basic blocks (ensure that boundary is necessary) - scans body of the basic block for JUMP_INSN, CODE_LABEL and NOTE_INSN_BASIC_BLOCK - verify that no fall_thru edge crosses hot/cold partition boundaries In future it can be extended check a lot of other stuff as well (reachability of basic blocks, life information, etc. etc.). */ static int rtl_verify_flow_info_1 (void) { const int max_uid = get_max_uid (); rtx last_head = get_last_insn (); basic_block *bb_info; rtx x; int err = 0; basic_block bb, last_bb_seen; bb_info = xcalloc (max_uid, sizeof (basic_block)); /* Check bb chain & numbers. */ last_bb_seen = ENTRY_BLOCK_PTR; FOR_EACH_BB_REVERSE (bb) { rtx head = BB_HEAD (bb); rtx end = BB_END (bb); /* Verify the end of the basic block is in the INSN chain. */ for (x = last_head; x != NULL_RTX; x = PREV_INSN (x)) if (x == end) break; if (!x) { error ("end insn %d for block %d not found in the insn stream", INSN_UID (end), bb->index); err = 1; } /* Work backwards from the end to the head of the basic block to verify the head is in the RTL chain. */ for (; x != NULL_RTX; x = PREV_INSN (x)) { /* While walking over the insn chain, verify insns appear in only one basic block and initialize the BB_INFO array used by other passes. */ if (bb_info[INSN_UID (x)] != NULL) { error ("insn %d is in multiple basic blocks (%d and %d)", INSN_UID (x), bb->index, bb_info[INSN_UID (x)]->index); err = 1; } bb_info[INSN_UID (x)] = bb; if (x == head) break; } if (!x) { error ("head insn %d for block %d not found in the insn stream", INSN_UID (head), bb->index); err = 1; } last_head = x; } /* Now check the basic blocks (boundaries etc.) */ FOR_EACH_BB_REVERSE (bb) { int n_fallthru = 0, n_eh = 0, n_call = 0, n_abnormal = 0, n_branch = 0; edge e, fallthru = NULL; rtx note; if (INSN_P (BB_END (bb)) && (note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX)) && bb->succ && bb->succ->succ_next && any_condjump_p (BB_END (bb))) { if (INTVAL (XEXP (note, 0)) != BRANCH_EDGE (bb)->probability) { error ("verify_flow_info: REG_BR_PROB does not match cfg %wi %i", INTVAL (XEXP (note, 0)), BRANCH_EDGE (bb)->probability); err = 1; } } for (e = bb->succ; e; e = e->succ_next) { if (e->flags & EDGE_FALLTHRU) { n_fallthru++, fallthru = e; if (e->crossing_edge) { error ("Fallthru edge crosses section boundary (bb %i)", e->src->index); err = 1; } } if ((e->flags & ~(EDGE_DFS_BACK | EDGE_CAN_FALLTHRU | EDGE_IRREDUCIBLE_LOOP | EDGE_LOOP_EXIT)) == 0) n_branch++; if (e->flags & EDGE_ABNORMAL_CALL) n_call++; if (e->flags & EDGE_EH) n_eh++; else if (e->flags & EDGE_ABNORMAL) n_abnormal++; } if (n_eh && GET_CODE (PATTERN (BB_END (bb))) != RESX && !find_reg_note (BB_END (bb), REG_EH_REGION, NULL_RTX)) { error ("Missing REG_EH_REGION note in the end of bb %i", bb->index); err = 1; } if (n_branch && (GET_CODE (BB_END (bb)) != JUMP_INSN || (n_branch > 1 && (any_uncondjump_p (BB_END (bb)) || any_condjump_p (BB_END (bb)))))) { error ("Too many outgoing branch edges from bb %i", bb->index); err = 1; } if (n_fallthru && any_uncondjump_p (BB_END (bb))) { error ("Fallthru edge after unconditional jump %i", bb->index); err = 1; } if (n_branch != 1 && any_uncondjump_p (BB_END (bb))) { error ("Wrong amount of branch edges after unconditional jump %i", bb->index); err = 1; } if (n_branch != 1 && any_condjump_p (BB_END (bb)) && JUMP_LABEL (BB_END (bb)) != BB_HEAD (fallthru->dest)) { error ("Wrong amount of branch edges after conditional jump %i", bb->index); err = 1; } if (n_call && GET_CODE (BB_END (bb)) != CALL_INSN) { error ("Call edges for non-call insn in bb %i", bb->index); err = 1; } if (n_abnormal && (GET_CODE (BB_END (bb)) != CALL_INSN && n_call != n_abnormal) && (GET_CODE (BB_END (bb)) != JUMP_INSN || any_condjump_p (BB_END (bb)) || any_uncondjump_p (BB_END (bb)))) { error ("Abnormal edges for no purpose in bb %i", bb->index); err = 1; } for (x = BB_HEAD (bb); x != NEXT_INSN (BB_END (bb)); x = NEXT_INSN (x)) if (BLOCK_FOR_INSN (x) != bb) { debug_rtx (x); if (! BLOCK_FOR_INSN (x)) error ("insn %d inside basic block %d but block_for_insn is NULL", INSN_UID (x), bb->index); else error ("insn %d inside basic block %d but block_for_insn is %i", INSN_UID (x), bb->index, BLOCK_FOR_INSN (x)->index); err = 1; } /* OK pointers are correct. Now check the header of basic block. It ought to contain optional CODE_LABEL followed by NOTE_BASIC_BLOCK. */ x = BB_HEAD (bb); if (GET_CODE (x) == CODE_LABEL) { if (BB_END (bb) == x) { error ("NOTE_INSN_BASIC_BLOCK is missing for block %d", bb->index); err = 1; } x = NEXT_INSN (x); } if (!NOTE_INSN_BASIC_BLOCK_P (x) || NOTE_BASIC_BLOCK (x) != bb) { error ("NOTE_INSN_BASIC_BLOCK is missing for block %d", bb->index); err = 1; } if (BB_END (bb) == x) /* Do checks for empty blocks her. e */ ; else for (x = NEXT_INSN (x); x; x = NEXT_INSN (x)) { if (NOTE_INSN_BASIC_BLOCK_P (x)) { error ("NOTE_INSN_BASIC_BLOCK %d in middle of basic block %d", INSN_UID (x), bb->index); err = 1; } if (x == BB_END (bb)) break; if (control_flow_insn_p (x)) { error ("in basic block %d:", bb->index); fatal_insn ("flow control insn inside a basic block", x); } } } /* Clean up. */ free (bb_info); return err; } /* Verify the CFG and RTL consistency common for both underlying RTL and cfglayout RTL. Currently it does following checks: - all checks of rtl_verify_flow_info_1 - check that all insns are in the basic blocks (except the switch handling code, barriers and notes) - check that all returns are followed by barriers - check that all fallthru edge points to the adjacent blocks. */ static int rtl_verify_flow_info (void) { basic_block bb; int err = rtl_verify_flow_info_1 (); rtx x; int num_bb_notes; const rtx rtx_first = get_insns (); basic_block last_bb_seen = ENTRY_BLOCK_PTR, curr_bb = NULL; FOR_EACH_BB_REVERSE (bb) { edge e; for (e = bb->succ; e; e = e->succ_next) if (e->flags & EDGE_FALLTHRU) break; if (!e) { rtx insn; /* Ensure existence of barrier in BB with no fallthru edges. */ for (insn = BB_END (bb); !insn || GET_CODE (insn) != BARRIER; insn = NEXT_INSN (insn)) if (!insn || (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)) { error ("missing barrier after block %i", bb->index); err = 1; break; } } else if (e->src != ENTRY_BLOCK_PTR && e->dest != EXIT_BLOCK_PTR) { rtx insn; if (e->src->next_bb != e->dest) { error ("verify_flow_info: Incorrect blocks for fallthru %i->%i", e->src->index, e->dest->index); err = 1; } else for (insn = NEXT_INSN (BB_END (e->src)); insn != BB_HEAD (e->dest); insn = NEXT_INSN (insn)) if (GET_CODE (insn) == BARRIER #ifndef CASE_DROPS_THROUGH || INSN_P (insn) #else || (INSN_P (insn) && ! JUMP_TABLE_DATA_P (insn)) #endif ) { error ("verify_flow_info: Incorrect fallthru %i->%i", e->src->index, e->dest->index); fatal_insn ("wrong insn in the fallthru edge", insn); err = 1; } } } num_bb_notes = 0; last_bb_seen = ENTRY_BLOCK_PTR; for (x = rtx_first; x; x = NEXT_INSN (x)) { if (NOTE_INSN_BASIC_BLOCK_P (x)) { bb = NOTE_BASIC_BLOCK (x); num_bb_notes++; if (bb != last_bb_seen->next_bb) internal_error ("basic blocks not laid down consecutively"); curr_bb = last_bb_seen = bb; } if (!curr_bb) { switch (GET_CODE (x)) { case BARRIER: case NOTE: break; case CODE_LABEL: /* An addr_vec is placed outside any basic block. */ if (NEXT_INSN (x) && GET_CODE (NEXT_INSN (x)) == JUMP_INSN && (GET_CODE (PATTERN (NEXT_INSN (x))) == ADDR_DIFF_VEC || GET_CODE (PATTERN (NEXT_INSN (x))) == ADDR_VEC)) x = NEXT_INSN (x); /* But in any case, non-deletable labels can appear anywhere. */ break; default: fatal_insn ("insn outside basic block", x); } } if (INSN_P (x) && GET_CODE (x) == JUMP_INSN && returnjump_p (x) && ! condjump_p (x) && ! (NEXT_INSN (x) && GET_CODE (NEXT_INSN (x)) == BARRIER)) fatal_insn ("return not followed by barrier", x); if (curr_bb && x == BB_END (curr_bb)) curr_bb = NULL; } if (num_bb_notes != n_basic_blocks) internal_error ("number of bb notes in insn chain (%d) != n_basic_blocks (%d)", num_bb_notes, n_basic_blocks); return err; } /* Assume that the preceding pass has possibly eliminated jump instructions or converted the unconditional jumps. Eliminate the edges from CFG. Return true if any edges are eliminated. */ bool purge_dead_edges (basic_block bb) { edge e, next; rtx insn = BB_END (bb), note; bool purged = false; /* If this instruction cannot trap, remove REG_EH_REGION notes. */ if (GET_CODE (insn) == INSN && (note = find_reg_note (insn, REG_EH_REGION, NULL))) { rtx eqnote; if (! may_trap_p (PATTERN (insn)) || ((eqnote = find_reg_equal_equiv_note (insn)) && ! may_trap_p (XEXP (eqnote, 0)))) remove_note (insn, note); } /* Cleanup abnormal edges caused by exceptions or non-local gotos. */ for (e = bb->succ; e; e = next) { next = e->succ_next; if (e->flags & EDGE_EH) { if (can_throw_internal (BB_END (bb))) continue; } else if (e->flags & EDGE_ABNORMAL_CALL) { if (GET_CODE (BB_END (bb)) == CALL_INSN && (! (note = find_reg_note (insn, REG_EH_REGION, NULL)) || INTVAL (XEXP (note, 0)) >= 0)) continue; } else continue; remove_edge (e); bb->flags |= BB_DIRTY; purged = true; } if (GET_CODE (insn) == JUMP_INSN) { rtx note; edge b,f; /* We do care only about conditional jumps and simplejumps. */ if (!any_condjump_p (insn) && !returnjump_p (insn) && !simplejump_p (insn)) return purged; /* Branch probability/prediction notes are defined only for condjumps. We've possibly turned condjump into simplejump. */ if (simplejump_p (insn)) { note = find_reg_note (insn, REG_BR_PROB, NULL); if (note) remove_note (insn, note); while ((note = find_reg_note (insn, REG_BR_PRED, NULL))) remove_note (insn, note); } for (e = bb->succ; e; e = next) { next = e->succ_next; /* Avoid abnormal flags to leak from computed jumps turned into simplejumps. */ e->flags &= ~EDGE_ABNORMAL; /* See if this edge is one we should keep. */ if ((e->flags & EDGE_FALLTHRU) && any_condjump_p (insn)) /* A conditional jump can fall through into the next block, so we should keep the edge. */ continue; else if (e->dest != EXIT_BLOCK_PTR && BB_HEAD (e->dest) == JUMP_LABEL (insn)) /* If the destination block is the target of the jump, keep the edge. */ continue; else if (e->dest == EXIT_BLOCK_PTR && returnjump_p (insn)) /* If the destination block is the exit block, and this instruction is a return, then keep the edge. */ continue; else if ((e->flags & EDGE_EH) && can_throw_internal (insn)) /* Keep the edges that correspond to exceptions thrown by this instruction and rematerialize the EDGE_ABNORMAL flag we just cleared above. */ { e->flags |= EDGE_ABNORMAL; continue; } /* We do not need this edge. */ bb->flags |= BB_DIRTY; purged = true; remove_edge (e); } if (!bb->succ || !purged) return purged; if (dump_file) fprintf (dump_file, "Purged edges from bb %i\n", bb->index); if (!optimize) return purged; /* Redistribute probabilities. */ if (!bb->succ->succ_next) { bb->succ->probability = REG_BR_PROB_BASE; bb->succ->count = bb->count; } else { note = find_reg_note (insn, REG_BR_PROB, NULL); if (!note) return purged; b = BRANCH_EDGE (bb); f = FALLTHRU_EDGE (bb); b->probability = INTVAL (XEXP (note, 0)); f->probability = REG_BR_PROB_BASE - b->probability; b->count = bb->count * b->probability / REG_BR_PROB_BASE; f->count = bb->count * f->probability / REG_BR_PROB_BASE; } return purged; } else if (GET_CODE (insn) == CALL_INSN && SIBLING_CALL_P (insn)) { /* First, there should not be any EH or ABCALL edges resulting from non-local gotos and the like. If there were, we shouldn't have created the sibcall in the first place. Second, there should of course never have been a fallthru edge. */ if (!bb->succ || bb->succ->succ_next) abort (); if (bb->succ->flags != (EDGE_SIBCALL | EDGE_ABNORMAL)) abort (); return 0; } /* If we don't see a jump insn, we don't know exactly why the block would have been broken at this point. Look for a simple, non-fallthru edge, as these are only created by conditional branches. If we find such an edge we know that there used to be a jump here and can then safely remove all non-fallthru edges. */ for (e = bb->succ; e && (e->flags & (EDGE_COMPLEX | EDGE_FALLTHRU)); e = e->succ_next) ; if (!e) return purged; for (e = bb->succ; e; e = next) { next = e->succ_next; if (!(e->flags & EDGE_FALLTHRU)) { bb->flags |= BB_DIRTY; remove_edge (e); purged = true; } } if (!bb->succ || bb->succ->succ_next) abort (); bb->succ->probability = REG_BR_PROB_BASE; bb->succ->count = bb->count; if (dump_file) fprintf (dump_file, "Purged non-fallthru edges from bb %i\n", bb->index); return purged; } /* Search all basic blocks for potentially dead edges and purge them. Return true if some edge has been eliminated. */ bool purge_all_dead_edges (int update_life_p) { int purged = false; sbitmap blocks = 0; basic_block bb; if (update_life_p) { blocks = sbitmap_alloc (last_basic_block); sbitmap_zero (blocks); } FOR_EACH_BB (bb) { bool purged_here = purge_dead_edges (bb); purged |= purged_here; if (purged_here && update_life_p) SET_BIT (blocks, bb->index); } if (update_life_p && purged) update_life_info (blocks, UPDATE_LIFE_GLOBAL, PROP_DEATH_NOTES | PROP_SCAN_DEAD_CODE | PROP_KILL_DEAD_CODE); if (update_life_p) sbitmap_free (blocks); return purged; } /* Same as split_block but update cfg_layout structures. */ static basic_block cfg_layout_split_block (basic_block bb, void *insnp) { rtx insn = insnp; basic_block new_bb = rtl_split_block (bb, insn); new_bb->rbi->footer = bb->rbi->footer; bb->rbi->footer = NULL; return new_bb; } /* Redirect Edge to DEST. */ static edge cfg_layout_redirect_edge_and_branch (edge e, basic_block dest) { basic_block src = e->src; edge ret; if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH)) return NULL; if (e->dest == dest) return e; if (e->src != ENTRY_BLOCK_PTR && (ret = try_redirect_by_replacing_jump (e, dest, true))) { src->flags |= BB_DIRTY; return ret; } if (e->src == ENTRY_BLOCK_PTR && (e->flags & EDGE_FALLTHRU) && !(e->flags & EDGE_COMPLEX)) { if (dump_file) fprintf (dump_file, "Redirecting entry edge from bb %i to %i\n", e->src->index, dest->index); e->src->flags |= BB_DIRTY; redirect_edge_succ (e, dest); return e; } /* Redirect_edge_and_branch may decide to turn branch into fallthru edge in the case the basic block appears to be in sequence. Avoid this transformation. */ if (e->flags & EDGE_FALLTHRU) { /* Redirect any branch edges unified with the fallthru one. */ if (GET_CODE (BB_END (src)) == JUMP_INSN && label_is_jump_target_p (BB_HEAD (e->dest), BB_END (src))) { if (dump_file) fprintf (dump_file, "Fallthru edge unified with branch " "%i->%i redirected to %i\n", e->src->index, e->dest->index, dest->index); e->flags &= ~EDGE_FALLTHRU; if (!redirect_branch_edge (e, dest)) abort (); e->flags |= EDGE_FALLTHRU; e->src->flags |= BB_DIRTY; return e; } /* In case we are redirecting fallthru edge to the branch edge of conditional jump, remove it. */ if (src->succ->succ_next && !src->succ->succ_next->succ_next) { edge s = e->succ_next ? e->succ_next : src->succ; if (s->dest == dest && any_condjump_p (BB_END (src)) && onlyjump_p (BB_END (src))) delete_insn (BB_END (src)); } ret = redirect_edge_succ_nodup (e, dest); if (dump_file) fprintf (dump_file, "Fallthru edge %i->%i redirected to %i\n", e->src->index, e->dest->index, dest->index); } else ret = redirect_branch_edge (e, dest); /* We don't want simplejumps in the insn stream during cfglayout. */ if (simplejump_p (BB_END (src))) abort (); src->flags |= BB_DIRTY; return ret; } /* Simple wrapper as we always can redirect fallthru edges. */ static basic_block cfg_layout_redirect_edge_and_branch_force (edge e, basic_block dest) { if (!cfg_layout_redirect_edge_and_branch (e, dest)) abort (); return NULL; } /* Same as delete_basic_block but update cfg_layout structures. */ static void cfg_layout_delete_block (basic_block bb) { rtx insn, next, prev = PREV_INSN (BB_HEAD (bb)), *to, remaints; if (bb->rbi->header) { next = BB_HEAD (bb); if (prev) NEXT_INSN (prev) = bb->rbi->header; else set_first_insn (bb->rbi->header); PREV_INSN (bb->rbi->header) = prev; insn = bb->rbi->header; while (NEXT_INSN (insn)) insn = NEXT_INSN (insn); NEXT_INSN (insn) = next; PREV_INSN (next) = insn; } next = NEXT_INSN (BB_END (bb)); if (bb->rbi->footer) { insn = bb->rbi->footer; while (insn) { if (GET_CODE (insn) == BARRIER) { if (PREV_INSN (insn)) NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn); else bb->rbi->footer = NEXT_INSN (insn); if (NEXT_INSN (insn)) PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn); } if (GET_CODE (insn) == CODE_LABEL) break; insn = NEXT_INSN (insn); } if (bb->rbi->footer) { insn = BB_END (bb); NEXT_INSN (insn) = bb->rbi->footer; PREV_INSN (bb->rbi->footer) = insn; while (NEXT_INSN (insn)) insn = NEXT_INSN (insn); NEXT_INSN (insn) = next; if (next) PREV_INSN (next) = insn; else set_last_insn (insn); } } if (bb->next_bb != EXIT_BLOCK_PTR) to = &bb->next_bb->rbi->header; else to = &cfg_layout_function_footer; rtl_delete_block (bb); if (prev) prev = NEXT_INSN (prev); else prev = get_insns (); if (next) next = PREV_INSN (next); else next = get_last_insn (); if (next && NEXT_INSN (next) != prev) { remaints = unlink_insn_chain (prev, next); insn = remaints; while (NEXT_INSN (insn)) insn = NEXT_INSN (insn); NEXT_INSN (insn) = *to; if (*to) PREV_INSN (*to) = insn; *to = remaints; } } /* Return true when blocks A and B can be safely merged. */ static bool cfg_layout_can_merge_blocks_p (basic_block a, basic_block b) { bool partitions_ok = true; /* If we are partitioning hot/cold basic blocks, we don't want to mess up unconditional or indirect jumps that cross between hot and cold sections. */ if (flag_reorder_blocks_and_partition && (find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX) || find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX) || a->partition != b->partition)) partitions_ok = false; /* There must be exactly one edge in between the blocks. */ return (a->succ && !a->succ->succ_next && a->succ->dest == b && !b->pred->pred_next && a != b /* Must be simple edge. */ && !(a->succ->flags & EDGE_COMPLEX) && partitions_ok && a != ENTRY_BLOCK_PTR && b != EXIT_BLOCK_PTR /* If the jump insn has side effects, we can't kill the edge. */ && (GET_CODE (BB_END (a)) != JUMP_INSN || (reload_completed ? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a))))); } /* Merge block A and B, abort when it is not possible. */ static void cfg_layout_merge_blocks (basic_block a, basic_block b) { #ifdef ENABLE_CHECKING if (!cfg_layout_can_merge_blocks_p (a, b)) abort (); #endif /* If there was a CODE_LABEL beginning B, delete it. */ if (GET_CODE (BB_HEAD (b)) == CODE_LABEL) delete_insn (BB_HEAD (b)); /* We should have fallthru edge in a, or we can do dummy redirection to get it cleaned up. */ if (GET_CODE (BB_END (a)) == JUMP_INSN) try_redirect_by_replacing_jump (a->succ, b, true); if (GET_CODE (BB_END (a)) == JUMP_INSN) abort (); /* Possible line number notes should appear in between. */ if (b->rbi->header) { rtx first = BB_END (a), last; last = emit_insn_after (b->rbi->header, BB_END (a)); delete_insn_chain (NEXT_INSN (first), last); b->rbi->header = NULL; } /* In the case basic blocks are not adjacent, move them around. */ if (NEXT_INSN (BB_END (a)) != BB_HEAD (b)) { rtx first = unlink_insn_chain (BB_HEAD (b), BB_END (b)); emit_insn_after (first, BB_END (a)); /* Skip possible DELETED_LABEL insn. */ if (!NOTE_INSN_BASIC_BLOCK_P (first)) first = NEXT_INSN (first); if (!NOTE_INSN_BASIC_BLOCK_P (first)) abort (); BB_HEAD (b) = NULL; delete_insn (first); } /* Otherwise just re-associate the instructions. */ else { rtx insn; for (insn = BB_HEAD (b); insn != NEXT_INSN (BB_END (b)); insn = NEXT_INSN (insn)) set_block_for_insn (insn, a); insn = BB_HEAD (b); /* Skip possible DELETED_LABEL insn. */ if (!NOTE_INSN_BASIC_BLOCK_P (insn)) insn = NEXT_INSN (insn); if (!NOTE_INSN_BASIC_BLOCK_P (insn)) abort (); BB_HEAD (b) = NULL; BB_END (a) = BB_END (b); delete_insn (insn); } /* Possible tablejumps and barriers should appear after the block. */ if (b->rbi->footer) { if (!a->rbi->footer) a->rbi->footer = b->rbi->footer; else { rtx last = a->rbi->footer; while (NEXT_INSN (last)) last = NEXT_INSN (last); NEXT_INSN (last) = b->rbi->footer; PREV_INSN (b->rbi->footer) = last; } b->rbi->footer = NULL; } if (dump_file) fprintf (dump_file, "Merged blocks %d and %d.\n", a->index, b->index); } /* Split edge E. */ static basic_block cfg_layout_split_edge (edge e) { edge new_e; basic_block new_bb = create_basic_block (e->src != ENTRY_BLOCK_PTR ? NEXT_INSN (BB_END (e->src)) : get_insns (), NULL_RTX, e->src); new_e = make_edge (new_bb, e->dest, EDGE_FALLTHRU); redirect_edge_and_branch_force (e, new_bb); return new_bb; } /* Do postprocessing after making a forwarder block joined by edge FALLTHRU. */ static void rtl_make_forwarder_block (edge fallthru ATTRIBUTE_UNUSED) { } /* Return 1 if BB ends with a call, possibly followed by some instructions that must stay with the call, 0 otherwise. */ static bool rtl_block_ends_with_call_p (basic_block bb) { rtx insn = BB_END (bb); while (GET_CODE (insn) != CALL_INSN && insn != BB_HEAD (bb) && keep_with_call_p (insn)) insn = PREV_INSN (insn); return (GET_CODE (insn) == CALL_INSN); } /* Return 1 if BB ends with a conditional branch, 0 otherwise. */ static bool rtl_block_ends_with_condjump_p (basic_block bb) { return any_condjump_p (BB_END (bb)); } /* Return true if we need to add fake edge to exit. Helper function for rtl_flow_call_edges_add. */ static bool need_fake_rtl_edge_p (rtx insn) { if (!INSN_P (insn)) return false; if ((GET_CODE (insn) == CALL_INSN && !SIBLING_CALL_P (insn) && !find_reg_note (insn, REG_NORETURN, NULL) && !find_reg_note (insn, REG_ALWAYS_RETURN, NULL) && !CONST_OR_PURE_CALL_P (insn))) return true; return ((GET_CODE (PATTERN (insn)) == ASM_OPERANDS && MEM_VOLATILE_P (PATTERN (insn))) || (GET_CODE (PATTERN (insn)) == PARALLEL && asm_noperands (insn) != -1 && MEM_VOLATILE_P (XVECEXP (PATTERN (insn), 0, 0))) || GET_CODE (PATTERN (insn)) == ASM_INPUT); } /* Add fake edges to the function exit for any non constant and non noreturn calls, volatile inline assembly in the bitmap of blocks specified by BLOCKS or to the whole CFG if BLOCKS is zero. Return the number of blocks that were split. The goal is to expose cases in which entering a basic block does not imply that all subsequent instructions must be executed. */ static int rtl_flow_call_edges_add (sbitmap blocks) { int i; int blocks_split = 0; int last_bb = last_basic_block; bool check_last_block = false; if (n_basic_blocks == 0) return 0; if (! blocks) check_last_block = true; else check_last_block = TEST_BIT (blocks, EXIT_BLOCK_PTR->prev_bb->index); /* In the last basic block, before epilogue generation, there will be a fallthru edge to EXIT. Special care is required if the last insn of the last basic block is a call because make_edge folds duplicate edges, which would result in the fallthru edge also being marked fake, which would result in the fallthru edge being removed by remove_fake_edges, which would result in an invalid CFG. Moreover, we can't elide the outgoing fake edge, since the block profiler needs to take this into account in order to solve the minimal spanning tree in the case that the call doesn't return. Handle this by adding a dummy instruction in a new last basic block. */ if (check_last_block) { basic_block bb = EXIT_BLOCK_PTR->prev_bb; rtx insn = BB_END (bb); /* Back up past insns that must be kept in the same block as a call. */ while (insn != BB_HEAD (bb) && keep_with_call_p (insn)) insn = PREV_INSN (insn); if (need_fake_rtl_edge_p (insn)) { edge e; for (e = bb->succ; e; e = e->succ_next) if (e->dest == EXIT_BLOCK_PTR) { insert_insn_on_edge (gen_rtx_USE (VOIDmode, const0_rtx), e); commit_edge_insertions (); break; } } } /* Now add fake edges to the function exit for any non constant calls since there is no way that we can determine if they will return or not... */ for (i = 0; i < last_bb; i++) { basic_block bb = BASIC_BLOCK (i); rtx insn; rtx prev_insn; if (!bb) continue; if (blocks && !TEST_BIT (blocks, i)) continue; for (insn = BB_END (bb); ; insn = prev_insn) { prev_insn = PREV_INSN (insn); if (need_fake_rtl_edge_p (insn)) { edge e; rtx split_at_insn = insn; /* Don't split the block between a call and an insn that should remain in the same block as the call. */ if (GET_CODE (insn) == CALL_INSN) while (split_at_insn != BB_END (bb) && keep_with_call_p (NEXT_INSN (split_at_insn))) split_at_insn = NEXT_INSN (split_at_insn); /* The handling above of the final block before the epilogue should be enough to verify that there is no edge to the exit block in CFG already. Calling make_edge in such case would cause us to mark that edge as fake and remove it later. */ #ifdef ENABLE_CHECKING if (split_at_insn == BB_END (bb)) for (e = bb->succ; e; e = e->succ_next) if (e->dest == EXIT_BLOCK_PTR) abort (); #endif /* Note that the following may create a new basic block and renumber the existing basic blocks. */ if (split_at_insn != BB_END (bb)) { e = split_block (bb, split_at_insn); if (e) blocks_split++; } make_edge (bb, EXIT_BLOCK_PTR, EDGE_FAKE); } if (insn == BB_HEAD (bb)) break; } } if (blocks_split) verify_flow_info (); return blocks_split; } /* Implementation of CFG manipulation for linearized RTL. */ struct cfg_hooks rtl_cfg_hooks = { "rtl", rtl_verify_flow_info, rtl_dump_bb, rtl_create_basic_block, rtl_redirect_edge_and_branch, rtl_redirect_edge_and_branch_force, rtl_delete_block, rtl_split_block, rtl_move_block_after, rtl_can_merge_blocks, /* can_merge_blocks_p */ rtl_merge_blocks, rtl_predict_edge, rtl_predicted_by_p, NULL, /* can_duplicate_block_p */ NULL, /* duplicate_block */ rtl_split_edge, rtl_make_forwarder_block, rtl_tidy_fallthru_edge, rtl_block_ends_with_call_p, rtl_block_ends_with_condjump_p, rtl_flow_call_edges_add }; /* Implementation of CFG manipulation for cfg layout RTL, where basic block connected via fallthru edges does not have to be adjacent. This representation will hopefully become the default one in future version of the compiler. */ /* We do not want to declare these functions in a header file, since they should only be used through the cfghooks interface, and we do not want to move them here since it would require also moving quite a lot of related code. */ extern bool cfg_layout_can_duplicate_bb_p (basic_block); extern basic_block cfg_layout_duplicate_bb (basic_block); struct cfg_hooks cfg_layout_rtl_cfg_hooks = { "cfglayout mode", rtl_verify_flow_info_1, rtl_dump_bb, cfg_layout_create_basic_block, cfg_layout_redirect_edge_and_branch, cfg_layout_redirect_edge_and_branch_force, cfg_layout_delete_block, cfg_layout_split_block, rtl_move_block_after, cfg_layout_can_merge_blocks_p, cfg_layout_merge_blocks, rtl_predict_edge, rtl_predicted_by_p, cfg_layout_can_duplicate_bb_p, cfg_layout_duplicate_bb, cfg_layout_split_edge, rtl_make_forwarder_block, NULL, rtl_block_ends_with_call_p, rtl_block_ends_with_condjump_p, rtl_flow_call_edges_add }; /* Optimize by combining instructions for GNU compiler. Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This module is essentially the "combiner" phase of the U. of Arizona Portable Optimizer, but redone to work on our list-structured representation for RTL instead of their string representation. The LOG_LINKS of each insn identify the most recent assignment to each REG used in the insn. It is a list of previous insns, each of which contains a SET for a REG that is used in this insn and not used or set in between. LOG_LINKs never cross basic blocks. They were set up by the preceding pass (lifetime analysis). We try to combine each pair of insns joined by a logical link. We also try to combine triples of insns A, B and C when C has a link back to B and B has a link back to A. LOG_LINKS does not have links for use of the CC0. They don't need to, because the insn that sets the CC0 is always immediately before the insn that tests it. So we always regard a branch insn as having a logical link to the preceding insn. The same is true for an insn explicitly using CC0. We check (with use_crosses_set_p) to avoid combining in such a way as to move a computation to a place where its value would be different. Combination is done by mathematically substituting the previous insn(s) values for the regs they set into the expressions in the later insns that refer to these regs. If the result is a valid insn for our target machine, according to the machine description, we install it, delete the earlier insns, and update the data flow information (LOG_LINKS and REG_NOTES) for what we did. There are a few exceptions where the dataflow information created by flow.c aren't completely updated: - reg_live_length is not updated - a LOG_LINKS entry that refers to an insn with multiple SETs may be removed because there is no way to know which register it was linking To simplify substitution, we combine only when the earlier insn(s) consist of only a single assignment. To simplify updating afterward, we never combine when a subroutine call appears in the middle. Since we do not represent assignments to CC0 explicitly except when that is all an insn does, there is no LOG_LINKS entry in an insn that uses the condition code for the insn that set the condition code. Fortunately, these two insns must be consecutive. Therefore, every JUMP_INSN is taken to have an implicit logical link to the preceding insn. This is not quite right, since non-jumps can also use the condition code; but in practice such insns would not combine anyway. */ /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */ /* Default macros to initialize an rtl_hooks data structure. Copyright 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_RTL_HOOKS_DEF_H #define GCC_RTL_HOOKS_DEF_H #define RTL_HOOKS_GEN_LOWPART gen_lowpart_general #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_general #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_general /* The structure is defined in rtl.h. */ #define RTL_HOOKS_INITIALIZER { \ RTL_HOOKS_GEN_LOWPART, \ RTL_HOOKS_REG_NONZERO_REG_BITS, \ RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES, \ } extern rtx gen_lowpart_general (enum machine_mode, rtx); extern rtx reg_nonzero_bits_general (rtx, enum machine_mode, rtx, enum machine_mode, unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT *); extern rtx reg_num_sign_bit_copies_general (rtx, enum machine_mode, rtx, enum machine_mode, unsigned int, unsigned int *); #endif /* GCC_RTL_HOOKS_DEF_H */ /* Include output.h for dump_file. */ /* Number of attempts to combine instructions in this function. */ static int combine_attempts; /* Number of attempts that got as far as substitution in this function. */ static int combine_merges; /* Number of instructions combined with added SETs in this function. */ static int combine_extras; /* Number of instructions combined in this function. */ static int combine_successes; /* Totals over entire compilation. */ static int total_attempts, total_merges, total_extras, total_successes; /* Vector mapping INSN_UIDs to cuids. The cuids are like uids but increase monotonically always. Combine always uses cuids so that it can compare them. But actually renumbering the uids, which we used to do, proves to be a bad idea because it makes it hard to compare the dumps produced by earlier passes with those from later passes. */ static int *uid_cuid; static int max_uid_cuid; /* Get the cuid of an insn. */ #undef INSN_CUID #define INSN_CUID(INSN) \ (INSN_UID (INSN) > max_uid_cuid ? insn_cuid (INSN) : uid_cuid[INSN_UID (INSN)]) /* In case BITS_PER_WORD == HOST_BITS_PER_WIDE_INT, shifting by BITS_PER_WORD would invoke undefined behavior. Work around it. */ #define UWIDE_SHIFT_LEFT_BY_BITS_PER_WORD(val) \ (((unsigned HOST_WIDE_INT) (val) << (BITS_PER_WORD - 1)) << 1) /* Maximum register number, which is the size of the tables below. */ static unsigned int combine_max_regno; struct reg_stat { /* Record last point of death of (hard or pseudo) register n. */ rtx last_death; /* Record last point of modification of (hard or pseudo) register n. */ rtx last_set; /* The next group of fields allows the recording of the last value assigned to (hard or pseudo) register n. We use this information to see if an operation being processed is redundant given a prior operation performed on the register. For example, an `and' with a constant is redundant if all the zero bits are already known to be turned off. We use an approach similar to that used by cse, but change it in the following ways: (1) We do not want to reinitialize at each label. (2) It is useful, but not critical, to know the actual value assigned to a register. Often just its form is helpful. Therefore, we maintain the following fields: last_set_value the last value assigned last_set_label records the value of label_tick when the register was assigned last_set_table_tick records the value of label_tick when a value using the register is assigned last_set_invalid set to nonzero when it is not valid to use the value of this register in some register's value To understand the usage of these tables, it is important to understand the distinction between the value in last_set_value being valid and the register being validly contained in some other expression in the table. (The next two parameters are out of date). reg_stat[i].last_set_value is valid if it is nonzero, and either reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick. Register I may validly appear in any expression returned for the value of another register if reg_n_sets[i] is 1. It may also appear in the value for register J if reg_stat[j].last_set_invalid is zero, or reg_stat[i].last_set_label < reg_stat[j].last_set_label. If an expression is found in the table containing a register which may not validly appear in an expression, the register is replaced by something that won't match, (clobber (const_int 0)). */ /* Record last value assigned to (hard or pseudo) register n. */ rtx last_set_value; /* Record the value of label_tick when an expression involving register n is placed in last_set_value. */ int last_set_table_tick; /* Record the value of label_tick when the value for register n is placed in last_set_value. */ int last_set_label; /* These fields are maintained in parallel with last_set_value and are used to store the mode in which the register was last set, te bits that were known to be zero when it was last set, and the number of sign bits copies it was known to have when it was last set. */ unsigned HOST_WIDE_INT last_set_nonzero_bits; char last_set_sign_bit_copies; ENUM_BITFIELD(machine_mode) last_set_mode : 8; /* Set nonzero if references to register n in expressions should not be used. last_set_invalid is set nonzero when this register is being assigned to and last_set_table_tick == label_tick. */ char last_set_invalid; /* Some registers that are set more than once and used in more than one basic block are nevertheless always set in similar ways. For example, a QImode register may be loaded from memory in two places on a machine where byte loads zero extend. We record in the following fields if a register has some leading bits that are always equal to the sign bit, and what we know about the nonzero bits of a register, specifically which bits are known to be zero. If an entry is zero, it means that we don't know anything special. */ unsigned char sign_bit_copies; unsigned HOST_WIDE_INT nonzero_bits; }; static struct reg_stat *reg_stat; /* Record the cuid of the last insn that invalidated memory (anything that writes memory, and subroutine calls, but not pushes). */ static int mem_last_set; /* Record the cuid of the last CALL_INSN so we can tell whether a potential combination crosses any calls. */ static int last_call_cuid; /* When `subst' is called, this is the insn that is being modified (by combining in a previous insn). The PATTERN of this insn is still the old pattern partially modified and it should not be looked at, but this may be used to examine the successors of the insn to judge whether a simplification is valid. */ static rtx subst_insn; /* This is the lowest CUID that `subst' is currently dealing with. get_last_value will not return a value if the register was set at or after this CUID. If not for this mechanism, we could get confused if I2 or I1 in try_combine were an insn that used the old value of a register to obtain a new value. In that case, we might erroneously get the new value of the register when we wanted the old one. */ static int subst_low_cuid; /* This contains any hard registers that are used in newpat; reg_dead_at_p must consider all these registers to be always live. */ static HARD_REG_SET newpat_used_regs; /* This is an insn to which a LOG_LINKS entry has been added. If this insn is the earlier than I2 or I3, combine should rescan starting at that location. */ static rtx added_links_insn; /* Basic block in which we are performing combines. */ static basic_block this_basic_block; /* A bitmap indicating which blocks had registers go dead at entry. After combine, we'll need to re-do global life analysis with those blocks as starting points. */ static sbitmap refresh_blocks; /* The following array records the combine_insn_cost for every insn in the instruction stream. */ static int *uid_insn_cost; /* Length of the currently allocated uid_insn_cost array. */ static int last_insn_cost; /* Incremented for each label. */ static int label_tick; /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */ static enum machine_mode nonzero_bits_mode; /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can be safely used. It is zero while computing them and after combine has completed. This former test prevents propagating values based on previously set values, which can be incorrect if a variable is modified in a loop. */ static int nonzero_sign_valid; /* Record one modification to rtl structure to be undone by storing old_contents into *where. is_int is 1 if the contents are an int. */ struct undo { struct undo *next; int is_int; union {rtx r; int i;} old_contents; union {rtx *r; int *i;} where; }; /* Record a bunch of changes to be undone, up to MAX_UNDO of them. num_undo says how many are currently recorded. other_insn is nonzero if we have modified some other insn in the process of working on subst_insn. It must be verified too. */ struct undobuf { struct undo *undos; struct undo *frees; rtx other_insn; }; static struct undobuf undobuf; /* Number of times the pseudo being substituted for was found and replaced. */ static int n_pseudo_occurrences; static rtx reg_nonzero_bits_for_combine (rtx, enum machine_mode, rtx, enum machine_mode, unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT *); static rtx reg_num_sign_bit_copies_for_combine (rtx, enum machine_mode, rtx, enum machine_mode, unsigned int, unsigned int *); static void do_SUBST (rtx *, rtx); static void do_SUBST_INT (int *, int); static void init_reg_last (void); static void setup_incoming_promotions (void); static void set_nonzero_bits_and_sign_copies (rtx, rtx, void *); static int cant_combine_insn_p (rtx); static int can_combine_p (rtx, rtx, rtx, rtx, rtx *, rtx *); static int combinable_i3pat (rtx, rtx *, rtx, rtx, int, rtx *); static int contains_muldiv (rtx); static rtx try_combine (rtx, rtx, rtx, int *); static void undo_all (void); static void undo_commit (void); static rtx *find_split_point (rtx *, rtx); static rtx subst (rtx, rtx, rtx, int, int); static rtx combine_simplify_rtx (rtx, enum machine_mode, int); static rtx simplify_if_then_else (rtx); static rtx simplify_set (rtx); static rtx simplify_logical (rtx); static rtx expand_compound_operation (rtx); static rtx expand_field_assignment (rtx); static rtx make_extraction (enum machine_mode, rtx, HOST_WIDE_INT, rtx, unsigned HOST_WIDE_INT, int, int, int); static rtx extract_left_shift (rtx, int); static rtx make_compound_operation (rtx, enum rtx_code); static int get_pos_from_mask (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT *); static rtx force_to_mode (rtx, enum machine_mode, unsigned HOST_WIDE_INT, rtx, int); static rtx if_then_else_cond (rtx, rtx *, rtx *); static rtx known_cond (rtx, enum rtx_code, rtx, rtx); static int rtx_equal_for_field_assignment_p (rtx, rtx); static rtx make_field_assignment (rtx); static rtx apply_distributive_law (rtx); static rtx simplify_and_const_int (rtx, enum machine_mode, rtx, unsigned HOST_WIDE_INT); static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code, HOST_WIDE_INT, enum machine_mode, int *); static rtx simplify_shift_const (rtx, enum rtx_code, enum machine_mode, rtx, int); static int recog_for_combine (rtx *, rtx, rtx *); static rtx gen_lowpart_for_combine (enum machine_mode, rtx); static rtx gen_binary (enum rtx_code, enum machine_mode, rtx, rtx); static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *); static void update_table_tick (rtx); static void record_value_for_reg (rtx, rtx, rtx); static void check_promoted_subreg (rtx, rtx); static void record_dead_and_set_regs_1 (rtx, rtx, void *); static void record_dead_and_set_regs (rtx); static int get_last_value_validate (rtx *, rtx, int, int); static rtx get_last_value (rtx); static int use_crosses_set_p (rtx, int); static void reg_dead_at_p_1 (rtx, rtx, void *); static int reg_dead_at_p (rtx, rtx); static void move_deaths (rtx, rtx, int, rtx, rtx *); static int reg_bitfield_target_p (rtx, rtx); static void distribute_notes (rtx, rtx, rtx, rtx); static void distribute_links (rtx); static void mark_used_regs_combine (rtx); static int insn_cuid (rtx); static void record_promoted_value (rtx, rtx); static rtx reversed_comparison (rtx, enum machine_mode, rtx, rtx); static enum rtx_code combine_reversed_comparison_code (rtx); static int unmentioned_reg_p_1 (rtx *, void *); static bool unmentioned_reg_p (rtx, rtx); /* It is not safe to use ordinary gen_lowpart in combine. See comments in gen_lowpart_for_combine. */ #undef RTL_HOOKS_GEN_LOWPART #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine #undef RTL_HOOKS_REG_NONZERO_REG_BITS #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER; /* Substitute NEWVAL, an rtx expression, into INTO, a place in some insn. The substitution can be undone by undo_all. If INTO is already set to NEWVAL, do not record this change. Because computing NEWVAL might also call SUBST, we have to compute it before we put anything into the undo table. */ static void do_SUBST (rtx *into, rtx newval) { struct undo *buf; rtx oldval = *into; if (oldval == newval) return; /* We'd like to catch as many invalid transformations here as possible. Unfortunately, there are way too many mode changes that are perfectly valid, so we'd waste too much effort for little gain doing the checks here. Focus on catching invalid transformations involving integer constants. */ if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT && GET_CODE (newval) == CONST_INT) { /* Sanity check that we're replacing oldval with a CONST_INT that is a valid sign-extension for the original mode. */ if (INTVAL (newval) != trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval))) abort (); /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a CONST_INT is not valid, because after the replacement, the original mode would be gone. Unfortunately, we can't tell when do_SUBST is called to replace the operand thereof, so we perform this test on oldval instead, checking whether an invalid replacement took place before we got here. */ if ((GET_CODE (oldval) == SUBREG && GET_CODE (SUBREG_REG (oldval)) == CONST_INT) || (GET_CODE (oldval) == ZERO_EXTEND && GET_CODE (XEXP (oldval, 0)) == CONST_INT)) abort (); } if (undobuf.frees) buf = undobuf.frees, undobuf.frees = buf->next; else buf = xmalloc (sizeof (struct undo)); buf->is_int = 0; buf->where.r = into; buf->old_contents.r = oldval; *into = newval; buf->next = undobuf.undos, undobuf.undos = buf; } #define SUBST(INTO, NEWVAL) do_SUBST(&(INTO), (NEWVAL)) /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution for the value of a HOST_WIDE_INT value (including CONST_INT) is not safe. */ static void do_SUBST_INT (int *into, int newval) { struct undo *buf; int oldval = *into; if (oldval == newval) return; if (undobuf.frees) buf = undobuf.frees, undobuf.frees = buf->next; else buf = xmalloc (sizeof (struct undo)); buf->is_int = 1; buf->where.i = into; buf->old_contents.i = oldval; *into = newval; buf->next = undobuf.undos, undobuf.undos = buf; } #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT(&(INTO), (NEWVAL)) /* Calculate the rtx_cost of a single instruction. A return value of zero indicates an instruction without a known cost. */ static int combine_insn_cost (rtx pat) { int i, cost; rtx set; /* Extract the single set rtx from the instruction pattern. We can't use single_set since we only have the pattern. */ if (GET_CODE (pat) == SET) set = pat; else if (GET_CODE (pat) == PARALLEL) { set = NULL_RTX; for (i = 0; i < XVECLEN (pat, 0); i++) { rtx x = XVECEXP (pat, 0, i); if (GET_CODE (x) == SET) { if (set) return 0; set = x; } } if (!set) return 0; } else return 0; cost = rtx_cost (SET_SRC (set), SET); return cost > 0 ? cost : COSTS_N_INSNS (1); } /* Subroutine of try_combine. Determine whether the combine replacement patterns NEWPAT and NEWI2PAT are cheaper according to combine_insn_cost that the original instruction sequence I1, I2 and I3. Note that I1 and/or NEWI2PAT may be NULL_RTX. This function returns false, if the costs of all instructions can be estimated, and the replacements are more expensive than the original sequence. */ static bool combine_validate_cost (rtx i1, rtx i2, rtx i3, rtx newpat, rtx newi2pat) { int i1_cost, i2_cost, i3_cost; int new_i2_cost, new_i3_cost; int old_cost, new_cost; /* Lookup the original combine_insn_costs. */ i2_cost = INSN_UID (i2) <= last_insn_cost ? uid_insn_cost[INSN_UID (i2)] : 0; i3_cost = INSN_UID (i3) <= last_insn_cost ? uid_insn_cost[INSN_UID (i3)] : 0; if (i1) { i1_cost = INSN_UID (i1) <= last_insn_cost ? uid_insn_cost[INSN_UID (i1)] : 0; old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0) ? i1_cost + i2_cost + i3_cost : 0; } else { old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0; i1_cost = 0; } /* Calculate the replacement combine_insn_costs. */ new_i3_cost = combine_insn_cost (newpat); if (newi2pat) { new_i2_cost = combine_insn_cost (newi2pat); new_cost = (new_i2_cost > 0 && new_i3_cost > 0) ? new_i2_cost + new_i3_cost : 0; } else { new_cost = new_i3_cost; new_i2_cost = 0; } /* Disallow this recombination if both new_cost and old_cost are greater than zero, and new_cost is greater than old cost. */ if (!undobuf.other_insn && old_cost > 0 && new_cost > old_cost) { if (dump_file) { if (i1) { fprintf (dump_file, "rejecting combination of insns %d, %d and %d\n", INSN_UID (i1), INSN_UID (i2), INSN_UID (i3)); fprintf (dump_file, "original costs %d + %d + %d = %d\n", i1_cost, i2_cost, i3_cost, old_cost); } else { fprintf (dump_file, "rejecting combination of insns %d and %d\n", INSN_UID (i2), INSN_UID (i3)); fprintf (dump_file, "original costs %d + %d = %d\n", i2_cost, i3_cost, old_cost); } if (newi2pat) { fprintf (dump_file, "replacement costs %d + %d = %d\n", new_i2_cost, new_i3_cost, new_cost); } else fprintf (dump_file, "replacement cost %d\n", new_cost); } return false; } /* Update the uid_insn_cost array with the replacement costs. */ uid_insn_cost[INSN_UID (i2)] = new_i2_cost; uid_insn_cost[INSN_UID (i3)] = new_i3_cost; if (i1) uid_insn_cost[INSN_UID (i1)] = 0; return true; } /* Main entry point for combiner. F is the first insn of the function. NREGS is the first unused pseudo-reg number. Return nonzero if the combiner has turned an indirect jump instruction into a direct jump. */ int combine_instructions (rtx f, unsigned int nregs) { rtx insn, next; #ifdef HAVE_cc0 rtx prev; #endif int i; rtx links, nextlinks; int new_direct_jump_p = 0; combine_attempts = 0; combine_merges = 0; combine_extras = 0; combine_successes = 0; combine_max_regno = nregs; rtl_hooks = combine_rtl_hooks; reg_stat = xcalloc (nregs, sizeof (struct reg_stat)); init_recog_no_volatile (); /* Compute maximum uid value so uid_cuid can be allocated. */ for (insn = f, i = 0; insn; insn = NEXT_INSN (insn)) if (INSN_UID (insn) > i) i = INSN_UID (insn); uid_cuid = xmalloc ((i + 1) * sizeof (int)); max_uid_cuid = i; nonzero_bits_mode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0); /* Don't use reg_stat[].nonzero_bits when computing it. This can cause problems when, for example, we have j <<= 1 in a loop. */ nonzero_sign_valid = 0; /* Compute the mapping from uids to cuids. Cuids are numbers assigned to insns, like uids, except that cuids increase monotonically through the code. Scan all SETs and see if we can deduce anything about what bits are known to be zero for some registers and how many copies of the sign bit are known to exist for those registers. Also set any known values so that we can use it while searching for what bits are known to be set. */ label_tick = 1; setup_incoming_promotions (); refresh_blocks = sbitmap_alloc (last_basic_block); sbitmap_zero (refresh_blocks); /* Allocate array of current combine_insn_costs. */ uid_insn_cost = xcalloc (max_uid_cuid + 1, sizeof (int)); last_insn_cost = max_uid_cuid; for (insn = f, i = 0; insn; insn = NEXT_INSN (insn)) { uid_cuid[INSN_UID (insn)] = ++i; subst_low_cuid = i; subst_insn = insn; if (INSN_P (insn)) { note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies, NULL); record_dead_and_set_regs (insn); #ifdef AUTO_INC_DEC for (links = REG_NOTES (insn); links; links = XEXP (links, 1)) if (REG_NOTE_KIND (links) == REG_INC) set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX, NULL); #endif /* Record the current combine_insn_cost of this instruction. */ uid_insn_cost[INSN_UID (insn)] = combine_insn_cost (PATTERN (insn)); if (dump_file) fprintf(dump_file, "insn_cost %d: %d\n", INSN_UID (insn), uid_insn_cost[INSN_UID (insn)]); } if (GET_CODE (insn) == CODE_LABEL) label_tick++; } nonzero_sign_valid = 1; /* Now scan all the insns in forward order. */ label_tick = 1; last_call_cuid = 0; mem_last_set = 0; init_reg_last (); setup_incoming_promotions (); FOR_EACH_BB (this_basic_block) { for (insn = BB_HEAD (this_basic_block); insn != NEXT_INSN (BB_END (this_basic_block)); insn = next ? next : NEXT_INSN (insn)) { next = 0; if (GET_CODE (insn) == CODE_LABEL) label_tick++; else if (INSN_P (insn)) { /* See if we know about function return values before this insn based upon SUBREG flags. */ check_promoted_subreg (insn, PATTERN (insn)); /* Try this insn with each insn it links back to. */ for (links = LOG_LINKS (insn); links; links = XEXP (links, 1)) if ((next = try_combine (insn, XEXP (links, 0), NULL_RTX, &new_direct_jump_p)) != 0) goto retry; /* Try each sequence of three linked insns ending with this one. */ for (links = LOG_LINKS (insn); links; links = XEXP (links, 1)) { rtx link = XEXP (links, 0); /* If the linked insn has been replaced by a note, then there is no point in pursuing this chain any further. */ if (GET_CODE (link) == NOTE) continue; for (nextlinks = LOG_LINKS (link); nextlinks; nextlinks = XEXP (nextlinks, 1)) if ((next = try_combine (insn, link, XEXP (nextlinks, 0), &new_direct_jump_p)) != 0) goto retry; } #ifdef HAVE_cc0 /* Try to combine a jump insn that uses CC0 with a preceding insn that sets CC0, and maybe with its logical predecessor as well. This is how we make decrement-and-branch insns. We need this special code because data flow connections via CC0 do not get entered in LOG_LINKS. */ if (GET_CODE (insn) == JUMP_INSN && (prev = prev_nonnote_insn (insn)) != 0 && GET_CODE (prev) == INSN && sets_cc0_p (PATTERN (prev))) { if ((next = try_combine (insn, prev, NULL_RTX, &new_direct_jump_p)) != 0) goto retry; for (nextlinks = LOG_LINKS (prev); nextlinks; nextlinks = XEXP (nextlinks, 1)) if ((next = try_combine (insn, prev, XEXP (nextlinks, 0), &new_direct_jump_p)) != 0) goto retry; } /* Do the same for an insn that explicitly references CC0. */ if (GET_CODE (insn) == INSN && (prev = prev_nonnote_insn (insn)) != 0 && GET_CODE (prev) == INSN && sets_cc0_p (PATTERN (prev)) && GET_CODE (PATTERN (insn)) == SET && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn)))) { if ((next = try_combine (insn, prev, NULL_RTX, &new_direct_jump_p)) != 0) goto retry; for (nextlinks = LOG_LINKS (prev); nextlinks; nextlinks = XEXP (nextlinks, 1)) if ((next = try_combine (insn, prev, XEXP (nextlinks, 0), &new_direct_jump_p)) != 0) goto retry; } /* Finally, see if any of the insns that this insn links to explicitly references CC0. If so, try this insn, that insn, and its predecessor if it sets CC0. */ for (links = LOG_LINKS (insn); links; links = XEXP (links, 1)) if (GET_CODE (XEXP (links, 0)) == INSN && GET_CODE (PATTERN (XEXP (links, 0))) == SET && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (XEXP (links, 0)))) && (prev = prev_nonnote_insn (XEXP (links, 0))) != 0 && GET_CODE (prev) == INSN && sets_cc0_p (PATTERN (prev)) && (next = try_combine (insn, XEXP (links, 0), prev, &new_direct_jump_p)) != 0) goto retry; #endif /* Try combining an insn with two different insns whose results it uses. */ for (links = LOG_LINKS (insn); links; links = XEXP (links, 1)) for (nextlinks = XEXP (links, 1); nextlinks; nextlinks = XEXP (nextlinks, 1)) if ((next = try_combine (insn, XEXP (links, 0), XEXP (nextlinks, 0), &new_direct_jump_p)) != 0) goto retry; /* Try this insn with each REG_EQUAL note it links back to. */ for (links = LOG_LINKS (insn); links; links = XEXP (links, 1)) { rtx set, note; rtx temp = XEXP (links, 0); if ((set = single_set (temp)) != 0 && (note = find_reg_equal_equiv_note (temp)) != 0 && GET_CODE (XEXP (note, 0)) != EXPR_LIST /* Avoid using a register that may already been marked dead by an earlier instruction. */ && ! unmentioned_reg_p (XEXP (note, 0), SET_SRC (set))) { /* Temporarily replace the set's source with the contents of the REG_EQUAL note. The insn will be deleted or recognized by try_combine. */ rtx orig = SET_SRC (set); SET_SRC (set) = XEXP (note, 0); next = try_combine (insn, temp, NULL_RTX, &new_direct_jump_p); if (next) goto retry; SET_SRC (set) = orig; } } if (GET_CODE (insn) != NOTE) record_dead_and_set_regs (insn); retry: ; } } } clear_bb_flags (); EXECUTE_IF_SET_IN_SBITMAP (refresh_blocks, 0, i, BASIC_BLOCK (i)->flags |= BB_DIRTY); new_direct_jump_p |= purge_all_dead_edges (0); delete_noop_moves (); update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES, PROP_DEATH_NOTES | PROP_SCAN_DEAD_CODE | PROP_KILL_DEAD_CODE); /* Clean up. */ sbitmap_free (refresh_blocks); free (uid_insn_cost); free (reg_stat); free (uid_cuid); { struct undo *undo, *next; for (undo = undobuf.frees; undo; undo = next) { next = undo->next; free (undo); } undobuf.frees = 0; } total_attempts += combine_attempts; total_merges += combine_merges; total_extras += combine_extras; total_successes += combine_successes; nonzero_sign_valid = 0; rtl_hooks = general_rtl_hooks; /* Make recognizer allow volatile MEMs again. */ init_recog (); return new_direct_jump_p; } /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */ static void init_reg_last (void) { unsigned int i; for (i = 0; i < combine_max_regno; i++) memset (reg_stat + i, 0, offsetof (struct reg_stat, sign_bit_copies)); } /* Set up any promoted values for incoming argument registers. */ static void setup_incoming_promotions (void) { unsigned int regno; rtx reg; enum machine_mode mode; int unsignedp; rtx first = get_insns (); if (targetm.calls.promote_function_args (TREE_TYPE (cfun->decl))) { for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) /* Check whether this register can hold an incoming pointer argument. FUNCTION_ARG_REGNO_P tests outgoing register numbers, so translate if necessary due to register windows. */ if (FUNCTION_ARG_REGNO_P (OUTGOING_REGNO (regno)) && (reg = promoted_input_arg (regno, &mode, &unsignedp)) != 0) { record_value_for_reg (reg, first, gen_rtx_fmt_e ((unsignedp ? ZERO_EXTEND : SIGN_EXTEND), GET_MODE (reg), gen_rtx_CLOBBER (mode, const0_rtx))); } } } /* Called via note_stores. If X is a pseudo that is narrower than HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero. If we are setting only a portion of X and we can't figure out what portion, assume all bits will be used since we don't know what will be happening. Similarly, set how many bits of X are known to be copies of the sign bit at all locations in the function. This is the smallest number implied by any set of X. */ static void set_nonzero_bits_and_sign_copies (rtx x, rtx set, void *data ATTRIBUTE_UNUSED) { unsigned int num; if (REG_P (x) && REGNO (x) >= FIRST_PSEUDO_REGISTER /* If this register is undefined at the start of the file, we can't say what its contents were. */ && ! REGNO_REG_SET_P (ENTRY_BLOCK_PTR->next_bb->global_live_at_start, REGNO (x)) && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT) { if (set == 0 || GET_CODE (set) == CLOBBER) { reg_stat[REGNO (x)].nonzero_bits = GET_MODE_MASK (GET_MODE (x)); reg_stat[REGNO (x)].sign_bit_copies = 1; return; } /* If this is a complex assignment, see if we can convert it into a simple assignment. */ set = expand_field_assignment (set); /* If this is a simple assignment, or we have a paradoxical SUBREG, set what we know about X. */ if (SET_DEST (set) == x || (GET_CODE (SET_DEST (set)) == SUBREG && (GET_MODE_SIZE (GET_MODE (SET_DEST (set))) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (set))))) && SUBREG_REG (SET_DEST (set)) == x)) { rtx src = SET_SRC (set); #ifdef SHORT_IMMEDIATES_SIGN_EXTEND /* If X is narrower than a word and SRC is a non-negative constant that would appear negative in the mode of X, sign-extend it for use in reg_stat[].nonzero_bits because some machines (maybe most) will actually do the sign-extension and this is the conservative approach. ??? For 2.5, try to tighten up the MD files in this regard instead of this kludge. */ if (GET_MODE_BITSIZE (GET_MODE (x)) < BITS_PER_WORD && GET_CODE (src) == CONST_INT && INTVAL (src) > 0 && 0 != (INTVAL (src) & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (GET_MODE (x)) - 1)))) src = GEN_INT (INTVAL (src) | ((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (GET_MODE (x)))); #endif /* Don't call nonzero_bits if it cannot change anything. */ if (reg_stat[REGNO (x)].nonzero_bits != ~(unsigned HOST_WIDE_INT) 0) reg_stat[REGNO (x)].nonzero_bits |= nonzero_bits (src, nonzero_bits_mode); num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x)); if (reg_stat[REGNO (x)].sign_bit_copies == 0 || reg_stat[REGNO (x)].sign_bit_copies > num) reg_stat[REGNO (x)].sign_bit_copies = num; } else { reg_stat[REGNO (x)].nonzero_bits = GET_MODE_MASK (GET_MODE (x)); reg_stat[REGNO (x)].sign_bit_copies = 1; } } } /* See if INSN can be combined into I3. PRED and SUCC are optionally insns that were previously combined into I3 or that will be combined into the merger of INSN and I3. Return 0 if the combination is not allowed for any reason. If the combination is allowed, *PDEST will be set to the single destination of INSN and *PSRC to the single source, and this function will return 1. */ static int can_combine_p (rtx insn, rtx i3, rtx pred ATTRIBUTE_UNUSED, rtx succ, rtx *pdest, rtx *psrc) { int i; rtx set = 0, src, dest; rtx p; #ifdef AUTO_INC_DEC rtx link; #endif int all_adjacent = (succ ? (next_active_insn (insn) == succ && next_active_insn (succ) == i3) : next_active_insn (insn) == i3); /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0. or a PARALLEL consisting of such a SET and CLOBBERs. If INSN has CLOBBER parallel parts, ignore them for our processing. By definition, these happen during the execution of the insn. When it is merged with another insn, all bets are off. If they are, in fact, needed and aren't also supplied in I3, they may be added by recog_for_combine. Otherwise, it won't match. We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED note. Get the source and destination of INSN. If more than one, can't combine. */ if (GET_CODE (PATTERN (insn)) == SET) set = PATTERN (insn); else if (GET_CODE (PATTERN (insn)) == PARALLEL && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET) { for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++) { rtx elt = XVECEXP (PATTERN (insn), 0, i); rtx note; switch (GET_CODE (elt)) { /* This is important to combine floating point insns for the SH4 port. */ case USE: /* Combining an isolated USE doesn't make sense. We depend here on combinable_i3pat to reject them. */ /* The code below this loop only verifies that the inputs of the SET in INSN do not change. We call reg_set_between_p to verify that the REG in the USE does not change between I3 and INSN. If the USE in INSN was for a pseudo register, the matching insn pattern will likely match any register; combining this with any other USE would only be safe if we knew that the used registers have identical values, or if there was something to tell them apart, e.g. different modes. For now, we forgo such complicated tests and simply disallow combining of USES of pseudo registers with any other USE. */ if (REG_P (XEXP (elt, 0)) && GET_CODE (PATTERN (i3)) == PARALLEL) { rtx i3pat = PATTERN (i3); int i = XVECLEN (i3pat, 0) - 1; unsigned int regno = REGNO (XEXP (elt, 0)); do { rtx i3elt = XVECEXP (i3pat, 0, i); if (GET_CODE (i3elt) == USE && REG_P (XEXP (i3elt, 0)) && (REGNO (XEXP (i3elt, 0)) == regno ? reg_set_between_p (XEXP (elt, 0), PREV_INSN (insn), i3) : regno >= FIRST_PSEUDO_REGISTER)) return 0; } while (--i >= 0); } break; /* We can ignore CLOBBERs. */ case CLOBBER: break; case SET: /* Ignore SETs whose result isn't used but not those that have side-effects. */ if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt)) && (!(note = find_reg_note (insn, REG_EH_REGION, NULL_RTX)) || INTVAL (XEXP (note, 0)) <= 0) && ! side_effects_p (elt)) break; /* If we have already found a SET, this is a second one and so we cannot combine with this insn. */ if (set) return 0; set = elt; break; default: /* Anything else means we can't combine. */ return 0; } } if (set == 0 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs, so don't do anything with it. */ || GET_CODE (SET_SRC (set)) == ASM_OPERANDS) return 0; } else return 0; if (set == 0) return 0; set = expand_field_assignment (set); src = SET_SRC (set), dest = SET_DEST (set); /* Don't eliminate a store in the stack pointer. */ if (dest == stack_pointer_rtx /* Don't combine with an insn that sets a register to itself if it has a REG_EQUAL note. This may be part of a REG_NO_CONFLICT sequence. */ || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX)) /* Can't merge an ASM_OPERANDS. */ || GET_CODE (src) == ASM_OPERANDS /* Can't merge a function call. */ || GET_CODE (src) == CALL /* Don't eliminate a function call argument. */ || (GET_CODE (i3) == CALL_INSN && (find_reg_fusage (i3, USE, dest) || (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER && global_regs[REGNO (dest)]))) /* Don't substitute into an incremented register. */ || FIND_REG_INC_NOTE (i3, dest) || (succ && FIND_REG_INC_NOTE (succ, dest)) #if 0 /* Don't combine the end of a libcall into anything. */ /* ??? This gives worse code, and appears to be unnecessary, since no pass after flow uses REG_LIBCALL/REG_RETVAL notes. Local-alloc does use REG_RETVAL notes for noconflict blocks, but other code here makes sure that those insns don't disappear. */ || find_reg_note (insn, REG_RETVAL, NULL_RTX) #endif /* Make sure that DEST is not used after SUCC but before I3. */ || (succ && ! all_adjacent && reg_used_between_p (dest, succ, i3)) /* Make sure that the value that is to be substituted for the register does not use any registers whose values alter in between. However, If the insns are adjacent, a use can't cross a set even though we think it might (this can happen for a sequence of insns each setting the same destination; last_set of that register might point to a NOTE). If INSN has a REG_EQUIV note, the register is always equivalent to the memory so the substitution is valid even if there are intervening stores. Also, don't move a volatile asm or UNSPEC_VOLATILE across any other insns. */ || (! all_adjacent && (((!MEM_P (src) || ! find_reg_note (insn, REG_EQUIV, src)) && use_crosses_set_p (src, INSN_CUID (insn))) || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src)) || GET_CODE (src) == UNSPEC_VOLATILE)) /* If there is a REG_NO_CONFLICT note for DEST in I3 or SUCC, we get better register allocation by not doing the combine. */ || find_reg_note (i3, REG_NO_CONFLICT, dest) || (succ && find_reg_note (succ, REG_NO_CONFLICT, dest)) /* Don't combine across a CALL_INSN, because that would possibly change whether the life span of some REGs crosses calls or not, and it is a pain to update that information. Exception: if source is a constant, moving it later can't hurt. Accept that special case, because it helps -fforce-addr a lot. */ || (INSN_CUID (insn) < last_call_cuid && ! CONSTANT_P (src))) return 0; /* DEST must either be a REG or CC0. */ if (REG_P (dest)) { /* If register alignment is being enforced for multi-word items in all cases except for parameters, it is possible to have a register copy insn referencing a hard register that is not allowed to contain the mode being copied and which would not be valid as an operand of most insns. Eliminate this problem by not combining with such an insn. Also, on some machines we don't want to extend the life of a hard register. */ if (REG_P (src) && ((REGNO (dest) < FIRST_PSEUDO_REGISTER && ! HARD_REGNO_MODE_OK (REGNO (dest), GET_MODE (dest))) /* Don't extend the life of a hard register unless it is user variable (if we have few registers) or it can't fit into the desired register (meaning something special is going on). Also avoid substituting a return register into I3, because reload can't handle a conflict with constraints of other inputs. */ || (REGNO (src) < FIRST_PSEUDO_REGISTER && ! HARD_REGNO_MODE_OK (REGNO (src), GET_MODE (src))))) return 0; } else if (GET_CODE (dest) != CC0) return 0; if (GET_CODE (PATTERN (i3)) == PARALLEL) for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--) if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER) { /* Don't substitute for a register intended as a clobberable operand. */ rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0); if (rtx_equal_p (reg, dest)) return 0; /* If the clobber represents an earlyclobber operand, we must not substitute an expression containing the clobbered register. As we do not analyse the constraint strings here, we have to make the conservative assumption. However, if the register is a fixed hard reg, the clobber cannot represent any operand; we leave it up to the machine description to either accept or reject use-and-clobber patterns. */ if (!REG_P (reg) || REGNO (reg) >= FIRST_PSEUDO_REGISTER || !fixed_regs[REGNO (reg)]) if (reg_overlap_mentioned_p (reg, src)) return 0; } /* If INSN contains anything volatile, or is an `asm' (whether volatile or not), reject, unless nothing volatile comes between it and I3 */ if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src)) { /* Make sure succ doesn't contain a volatile reference. */ if (succ != 0 && volatile_refs_p (PATTERN (succ))) return 0; for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p)) if (INSN_P (p) && p != succ && volatile_refs_p (PATTERN (p))) return 0; } /* If INSN is an asm, and DEST is a hard register, reject, since it has to be an explicit register variable, and was chosen for a reason. */ if (GET_CODE (src) == ASM_OPERANDS && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER) return 0; /* If there are any volatile insns between INSN and I3, reject, because they might affect machine state. */ for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p)) if (INSN_P (p) && p != succ && volatile_insn_p (PATTERN (p))) return 0; /* If INSN or I2 contains an autoincrement or autodecrement, make sure that register is not used between there and I3, and not already used in I3 either. Also insist that I3 not be a jump; if it were one and the incremented register were spilled, we would lose. */ #ifdef AUTO_INC_DEC for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) if (REG_NOTE_KIND (link) == REG_INC && (GET_CODE (i3) == JUMP_INSN || reg_used_between_p (XEXP (link, 0), insn, i3) || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3)))) return 0; #endif #ifdef HAVE_cc0 /* Don't combine an insn that follows a CC0-setting insn. An insn that uses CC0 must not be separated from the one that sets it. We do, however, allow I2 to follow a CC0-setting insn if that insn is passed as I1; in that case it will be deleted also. We also allow combining in this case if all the insns are adjacent because that would leave the two CC0 insns adjacent as well. It would be more logical to test whether CC0 occurs inside I1 or I2, but that would be much slower, and this ought to be equivalent. */ p = prev_nonnote_insn (insn); if (p && p != pred && GET_CODE (p) == INSN && sets_cc0_p (PATTERN (p)) && ! all_adjacent) return 0; #endif /* If we get here, we have passed all the tests and the combination is to be allowed. */ *pdest = dest; *psrc = src; return 1; } /* LOC is the location within I3 that contains its pattern or the component of a PARALLEL of the pattern. We validate that it is valid for combining. One problem is if I3 modifies its output, as opposed to replacing it entirely, we can't allow the output to contain I2DEST or I1DEST as doing so would produce an insn that is not equivalent to the original insns. Consider: (set (reg:DI 101) (reg:DI 100)) (set (subreg:SI (reg:DI 101) 0) ) This is NOT equivalent to: (parallel [(set (subreg:SI (reg:DI 100) 0) ) (set (reg:DI 101) (reg:DI 100))]) Not only does this modify 100 (in which case it might still be valid if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100. We can also run into a problem if I2 sets a register that I1 uses and I1 gets directly substituted into I3 (not via I2). In that case, we would be getting the wrong value of I2DEST into I3, so we must reject the combination. This case occurs when I2 and I1 both feed into I3, rather than when I1 feeds into I2, which feeds into I3. If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source of a SET must prevent combination from occurring. Before doing the above check, we first try to expand a field assignment into a set of logical operations. If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which we place a register that is both set and used within I3. If more than one such register is detected, we fail. Return 1 if the combination is valid, zero otherwise. */ static int combinable_i3pat (rtx i3, rtx *loc, rtx i2dest, rtx i1dest, int i1_not_in_src, rtx *pi3dest_killed) { rtx x = *loc; if (GET_CODE (x) == SET) { rtx set = x ; rtx dest = SET_DEST (set); rtx src = SET_SRC (set); rtx inner_dest = dest; while (GET_CODE (inner_dest) == STRICT_LOW_PART || GET_CODE (inner_dest) == SUBREG || GET_CODE (inner_dest) == ZERO_EXTRACT) inner_dest = XEXP (inner_dest, 0); /* Check for the case where I3 modifies its output, as discussed above. We don't want to prevent pseudos from being combined into the address of a MEM, so only prevent the combination if i1 or i2 set the same MEM. */ if ((inner_dest != dest && (!MEM_P (inner_dest) || rtx_equal_p (i2dest, inner_dest) || (i1dest && rtx_equal_p (i1dest, inner_dest))) && (reg_overlap_mentioned_p (i2dest, inner_dest) || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest)))) /* This is the same test done in can_combine_p except we can't test all_adjacent; we don't have to, since this instruction will stay in place, thus we are not considering increasing the lifetime of INNER_DEST. Also, if this insn sets a function argument, combining it with something that might need a spill could clobber a previous function argument; the all_adjacent test in can_combine_p also checks this; here, we do a more specific test for this case. */ || (REG_P (inner_dest) && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER && (! HARD_REGNO_MODE_OK (REGNO (inner_dest), GET_MODE (inner_dest)))) || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src))) return 0; /* If DEST is used in I3, it is being killed in this insn, so record that for later. Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the STACK_POINTER_REGNUM, since these are always considered to be live. Similarly for ARG_POINTER_REGNUM if it is fixed. */ if (pi3dest_killed && REG_P (dest) && reg_referenced_p (dest, PATTERN (i3)) && REGNO (dest) != FRAME_POINTER_REGNUM #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM && REGNO (dest) != HARD_FRAME_POINTER_REGNUM #endif #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM && (REGNO (dest) != ARG_POINTER_REGNUM || ! fixed_regs [REGNO (dest)]) #endif && REGNO (dest) != STACK_POINTER_REGNUM) { if (*pi3dest_killed) return 0; *pi3dest_killed = dest; } } else if (GET_CODE (x) == PARALLEL) { int i; for (i = 0; i < XVECLEN (x, 0); i++) if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest, i1_not_in_src, pi3dest_killed)) return 0; } return 1; } /* Return 1 if X is an arithmetic expression that contains a multiplication and division. We don't count multiplications by powers of two here. */ static int contains_muldiv (rtx x) { switch (GET_CODE (x)) { case MOD: case DIV: case UMOD: case UDIV: return 1; case MULT: return ! (GET_CODE (XEXP (x, 1)) == CONST_INT && exact_log2 (INTVAL (XEXP (x, 1))) >= 0); default: if (BINARY_P (x)) return contains_muldiv (XEXP (x, 0)) || contains_muldiv (XEXP (x, 1)); if (UNARY_P (x)) return contains_muldiv (XEXP (x, 0)); return 0; } } /* Determine whether INSN can be used in a combination. Return nonzero if not. This is used in try_combine to detect early some cases where we can't perform combinations. */ static int cant_combine_insn_p (rtx insn) { rtx set; rtx src, dest; /* If this isn't really an insn, we can't do anything. This can occur when flow deletes an insn that it has merged into an auto-increment address. */ if (! INSN_P (insn)) return 1; /* Never combine loads and stores involving hard regs that are likely to be spilled. The register allocator can usually handle such reg-reg moves by tying. If we allow the combiner to make substitutions of likely-spilled regs, we may abort in reload. As an exception, we allow combinations involving fixed regs; these are not available to the register allocator so there's no risk involved. */ set = single_set (insn); if (! set) return 0; src = SET_SRC (set); dest = SET_DEST (set); if (GET_CODE (src) == SUBREG) src = SUBREG_REG (src); if (GET_CODE (dest) == SUBREG) dest = SUBREG_REG (dest); if (REG_P (src) && REG_P (dest) && ((REGNO (src) < FIRST_PSEUDO_REGISTER && ! fixed_regs[REGNO (src)] && CLASS_LIKELY_SPILLED_P (REGNO_REG_CLASS (REGNO (src)))) || (REGNO (dest) < FIRST_PSEUDO_REGISTER && ! fixed_regs[REGNO (dest)] && CLASS_LIKELY_SPILLED_P (REGNO_REG_CLASS (REGNO (dest)))))) return 1; return 0; } /* Adjust INSN after we made a change to its destination. Changing the destination can invalidate notes that say something about the results of the insn and a LOG_LINK pointing to the insn. */ static void adjust_for_new_dest (rtx insn) { rtx *loc; /* For notes, be conservative and simply remove them. */ loc = ®_NOTES (insn); while (*loc) { enum reg_note kind = REG_NOTE_KIND (*loc); if (kind == REG_EQUAL || kind == REG_EQUIV) *loc = XEXP (*loc, 1); else loc = &XEXP (*loc, 1); } /* The new insn will have a destination that was previously the destination of an insn just above it. Call distribute_links to make a LOG_LINK from the next use of that destination. */ distribute_links (gen_rtx_INSN_LIST (VOIDmode, insn, NULL_RTX)); } /* Try to combine the insns I1 and I2 into I3. Here I1 and I2 appear earlier than I3. I1 can be zero; then we combine just I2 into I3. If we are combining three insns and the resulting insn is not recognized, try splitting it into two insns. If that happens, I2 and I3 are retained and I1 is pseudo-deleted by turning it into a NOTE. Otherwise, I1 and I2 are pseudo-deleted. Return 0 if the combination does not work. Then nothing is changed. If we did the combination, return the insn at which combine should resume scanning. Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a new direct jump instruction. */ static rtx try_combine (rtx i3, rtx i2, rtx i1, int *new_direct_jump_p) { /* New patterns for I3 and I2, respectively. */ rtx newpat, newi2pat = 0; int substed_i2 = 0, substed_i1 = 0; /* Indicates need to preserve SET in I1 or I2 in I3 if it is not dead. */ int added_sets_1, added_sets_2; /* Total number of SETs to put into I3. */ int total_sets; /* Nonzero if I2's body now appears in I3. */ int i2_is_used; /* INSN_CODEs for new I3, new I2, and user of condition code. */ int insn_code_number, i2_code_number = 0, other_code_number = 0; /* Contains I3 if the destination of I3 is used in its source, which means that the old life of I3 is being killed. If that usage is placed into I2 and not in I3, a REG_DEAD note must be made. */ rtx i3dest_killed = 0; /* SET_DEST and SET_SRC of I2 and I1. */ rtx i2dest, i2src, i1dest = 0, i1src = 0; /* PATTERN (I2), or a copy of it in certain cases. */ rtx i2pat; /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */ int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0; int i1_feeds_i3 = 0; /* Notes that must be added to REG_NOTES in I3 and I2. */ rtx new_i3_notes, new_i2_notes; /* Notes that we substituted I3 into I2 instead of the normal case. */ int i3_subst_into_i2 = 0; /* Notes that I1, I2 or I3 is a MULT operation. */ int have_mult = 0; int maxreg; rtx temp; rtx link; int i; /* Exit early if one of the insns involved can't be used for combinations. */ if (cant_combine_insn_p (i3) || cant_combine_insn_p (i2) || (i1 && cant_combine_insn_p (i1)) /* We also can't do anything if I3 has a REG_LIBCALL note since we don't want to disrupt the contiguity of a libcall. */ #if 0 /* ??? This gives worse code, and appears to be unnecessary, since no pass after flow uses REG_LIBCALL/REG_RETVAL notes. */ || find_reg_note (i3, REG_LIBCALL, NULL_RTX) #endif ) return 0; combine_attempts++; undobuf.other_insn = 0; /* Reset the hard register usage information. */ CLEAR_HARD_REG_SET (newpat_used_regs); /* If I1 and I2 both feed I3, they can be in any order. To simplify the code below, set I1 to be the earlier of the two insns. */ if (i1 && INSN_CUID (i1) > INSN_CUID (i2)) temp = i1, i1 = i2, i2 = temp; added_links_insn = 0; /* First check for one important special-case that the code below will not handle. Namely, the case where I1 is zero, I2 is a PARALLEL and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case, we may be able to replace that destination with the destination of I3. This occurs in the common code where we compute both a quotient and remainder into a structure, in which case we want to do the computation directly into the structure to avoid register-register copies. Note that this case handles both multiple sets in I2 and also cases where I2 has a number of CLOBBER or PARALLELs. We make very conservative checks below and only try to handle the most common cases of this. For example, we only handle the case where I2 and I3 are adjacent to avoid making difficult register usage tests. */ if (i1 == 0 && GET_CODE (i3) == INSN && GET_CODE (PATTERN (i3)) == SET && REG_P (SET_SRC (PATTERN (i3))) && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3))) && GET_CODE (PATTERN (i2)) == PARALLEL && ! side_effects_p (SET_DEST (PATTERN (i3))) /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code below would need to check what is inside (and reg_overlap_mentioned_p doesn't support those codes anyway). Don't allow those destinations; the resulting insn isn't likely to be recognized anyway. */ && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)), SET_DEST (PATTERN (i3))) && next_real_insn (i2) == i3) { rtx p2 = PATTERN (i2); /* Make sure that the destination of I3, which we are going to substitute into one output of I2, is not used within another output of I2. We must avoid making this: (parallel [(set (mem (reg 69)) ...) (set (reg 69) ...)]) which is not well-defined as to order of actions. (Besides, reload can't handle output reloads for this.) The problem can also happen if the dest of I3 is a memory ref, if another dest in I2 is an indirect memory ref. */ for (i = 0; i < XVECLEN (p2, 0); i++) if ((GET_CODE (XVECEXP (p2, 0, i)) == SET || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER) && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)), SET_DEST (XVECEXP (p2, 0, i)))) break; if (i == XVECLEN (p2, 0)) for (i = 0; i < XVECLEN (p2, 0); i++) if ((GET_CODE (XVECEXP (p2, 0, i)) == SET || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER) && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3))) { combine_merges++; subst_insn = i3; subst_low_cuid = INSN_CUID (i2); added_sets_2 = added_sets_1 = 0; i2dest = SET_SRC (PATTERN (i3)); /* Replace the dest in I2 with our dest and make the resulting insn the new pattern for I3. Then skip to where we validate the pattern. Everything was set up above. */ SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3))); newpat = p2; i3_subst_into_i2 = 1; goto validate_replacement; } } /* If I2 is setting a double-word pseudo to a constant and I3 is setting one of those words to another constant, merge them by making a new constant. */ if (i1 == 0 && (temp = single_set (i2)) != 0 && (GET_CODE (SET_SRC (temp)) == CONST_INT || GET_CODE (SET_SRC (temp)) == CONST_DOUBLE) && REG_P (SET_DEST (temp)) && GET_MODE_CLASS (GET_MODE (SET_DEST (temp))) == MODE_INT && GET_MODE_SIZE (GET_MODE (SET_DEST (temp))) == 2 * UNITS_PER_WORD && GET_CODE (PATTERN (i3)) == SET && GET_CODE (SET_DEST (PATTERN (i3))) == SUBREG && SUBREG_REG (SET_DEST (PATTERN (i3))) == SET_DEST (temp) && GET_MODE_CLASS (GET_MODE (SET_DEST (PATTERN (i3)))) == MODE_INT && GET_MODE_SIZE (GET_MODE (SET_DEST (PATTERN (i3)))) == UNITS_PER_WORD && GET_CODE (SET_SRC (PATTERN (i3))) == CONST_INT) { HOST_WIDE_INT lo, hi; if (GET_CODE (SET_SRC (temp)) == CONST_INT) lo = INTVAL (SET_SRC (temp)), hi = lo < 0 ? -1 : 0; else { lo = CONST_DOUBLE_LOW (SET_SRC (temp)); hi = CONST_DOUBLE_HIGH (SET_SRC (temp)); } if (subreg_lowpart_p (SET_DEST (PATTERN (i3)))) { /* We don't handle the case of the target word being wider than a host wide int. */ if (HOST_BITS_PER_WIDE_INT < BITS_PER_WORD) abort (); lo &= ~(UWIDE_SHIFT_LEFT_BY_BITS_PER_WORD (1) - 1); lo |= (INTVAL (SET_SRC (PATTERN (i3))) & (UWIDE_SHIFT_LEFT_BY_BITS_PER_WORD (1) - 1)); } else if (HOST_BITS_PER_WIDE_INT == BITS_PER_WORD) hi = INTVAL (SET_SRC (PATTERN (i3))); else if (HOST_BITS_PER_WIDE_INT >= 2 * BITS_PER_WORD) { int sign = -(int) ((unsigned HOST_WIDE_INT) lo >> (HOST_BITS_PER_WIDE_INT - 1)); lo &= ~ (UWIDE_SHIFT_LEFT_BY_BITS_PER_WORD (UWIDE_SHIFT_LEFT_BY_BITS_PER_WORD (1) - 1)); lo |= (UWIDE_SHIFT_LEFT_BY_BITS_PER_WORD (INTVAL (SET_SRC (PATTERN (i3))))); if (hi == sign) hi = lo < 0 ? -1 : 0; } else /* We don't handle the case of the higher word not fitting entirely in either hi or lo. */ abort (); combine_merges++; subst_insn = i3; subst_low_cuid = INSN_CUID (i2); added_sets_2 = added_sets_1 = 0; i2dest = SET_DEST (temp); SUBST (SET_SRC (temp), immed_double_const (lo, hi, GET_MODE (SET_DEST (temp)))); newpat = PATTERN (i2); goto validate_replacement; } #ifndef HAVE_cc0 /* If we have no I1 and I2 looks like: (parallel [(set (reg:CC X) (compare:CC OP (const_int 0))) (set Y OP)]) make up a dummy I1 that is (set Y OP) and change I2 to be (set (reg:CC X) (compare:CC Y (const_int 0))) (We can ignore any trailing CLOBBERs.) This undoes a previous combination and allows us to match a branch-and- decrement insn. */ if (i1 == 0 && GET_CODE (PATTERN (i2)) == PARALLEL && XVECLEN (PATTERN (i2), 0) >= 2 && GET_CODE (XVECEXP (PATTERN (i2), 0, 0)) == SET && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)))) == MODE_CC) && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx && GET_CODE (XVECEXP (PATTERN (i2), 0, 1)) == SET && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, 1))) && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0), SET_SRC (XVECEXP (PATTERN (i2), 0, 1)))) { for (i = XVECLEN (PATTERN (i2), 0) - 1; i >= 2; i--) if (GET_CODE (XVECEXP (PATTERN (i2), 0, i)) != CLOBBER) break; if (i == 1) { /* We make I1 with the same INSN_UID as I2. This gives it the same INSN_CUID for value tracking. Our fake I1 will never appear in the insn stream so giving it the same INSN_UID as I2 will not cause a problem. */ i1 = gen_rtx_INSN (VOIDmode, INSN_UID (i2), NULL_RTX, i2, BLOCK_FOR_INSN (i2), INSN_LOCATOR (i2), XVECEXP (PATTERN (i2), 0, 1), -1, NULL_RTX, NULL_RTX); SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0)); SUBST (XEXP (SET_SRC (PATTERN (i2)), 0), SET_DEST (PATTERN (i1))); } } #endif /* Verify that I2 and I1 are valid for combining. */ if (! can_combine_p (i2, i3, i1, NULL_RTX, &i2dest, &i2src) || (i1 && ! can_combine_p (i1, i3, NULL_RTX, i2, &i1dest, &i1src))) { undo_all (); return 0; } /* Record whether I2DEST is used in I2SRC and similarly for the other cases. Knowing this will help in register status updating below. */ i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src); i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src); i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src); /* See if I1 directly feeds into I3. It does if I1DEST is not used in I2SRC. */ i1_feeds_i3 = i1 && ! reg_overlap_mentioned_p (i1dest, i2src); /* Ensure that I3's pattern can be the destination of combines. */ if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest, i1 && i2dest_in_i1src && i1_feeds_i3, &i3dest_killed)) { undo_all (); return 0; } /* See if any of the insns is a MULT operation. Unless one is, we will reject a combination that is, since it must be slower. Be conservative here. */ if (GET_CODE (i2src) == MULT || (i1 != 0 && GET_CODE (i1src) == MULT) || (GET_CODE (PATTERN (i3)) == SET && GET_CODE (SET_SRC (PATTERN (i3))) == MULT)) have_mult = 1; /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd. We used to do this EXCEPT in one case: I3 has a post-inc in an output operand. However, that exception can give rise to insns like mov r3,(r3)+ which is a famous insn on the PDP-11 where the value of r3 used as the source was model-dependent. Avoid this sort of thing. */ #if 0 if (!(GET_CODE (PATTERN (i3)) == SET && REG_P (SET_SRC (PATTERN (i3))) && MEM_P (SET_DEST (PATTERN (i3))) && (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC))) /* It's not the exception. */ #endif #ifdef AUTO_INC_DEC for (link = REG_NOTES (i3); link; link = XEXP (link, 1)) if (REG_NOTE_KIND (link) == REG_INC && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2)) || (i1 != 0 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1))))) { undo_all (); return 0; } #endif /* See if the SETs in I1 or I2 need to be kept around in the merged instruction: whenever the value set there is still needed past I3. For the SETs in I2, this is easy: we see if I2DEST dies or is set in I3. For the SET in I1, we have two cases: If I1 and I2 independently feed into I3, the set in I1 needs to be kept around if I1DEST dies or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set in I1 needs to be kept around unless I1DEST dies or is set in either I2 or I3. We can distinguish these cases by seeing if I2SRC mentions I1DEST. If so, we know I1 feeds into I2. */ added_sets_2 = ! dead_or_set_p (i3, i2dest); added_sets_1 = i1 && ! (i1_feeds_i3 ? dead_or_set_p (i3, i1dest) : (dead_or_set_p (i3, i1dest) || dead_or_set_p (i2, i1dest))); /* If the set in I2 needs to be kept around, we must make a copy of PATTERN (I2), so that when we substitute I1SRC for I1DEST in PATTERN (I2), we are only substituting for the original I1DEST, not into an already-substituted copy. This also prevents making self-referential rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to I2DEST. */ i2pat = (GET_CODE (PATTERN (i2)) == PARALLEL ? gen_rtx_SET (VOIDmode, i2dest, i2src) : PATTERN (i2)); if (added_sets_2) i2pat = copy_rtx (i2pat); combine_merges++; /* Substitute in the latest insn for the regs set by the earlier ones. */ maxreg = max_reg_num (); subst_insn = i3; /* It is possible that the source of I2 or I1 may be performing an unneeded operation, such as a ZERO_EXTEND of something that is known to have the high part zero. Handle that case by letting subst look at the innermost one of them. Another way to do this would be to have a function that tries to simplify a single insn instead of merging two or more insns. We don't do this because of the potential of infinite loops and because of the potential extra memory required. However, doing it the way we are is a bit of a kludge and doesn't catch all cases. But only do this if -fexpensive-optimizations since it slows things down and doesn't usually win. */ if (flag_expensive_optimizations) { /* Pass pc_rtx so no substitutions are done, just simplifications. */ if (i1) { subst_low_cuid = INSN_CUID (i1); i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0); } else { subst_low_cuid = INSN_CUID (i2); i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0); } } #ifndef HAVE_cc0 /* Many machines that don't use CC0 have insns that can both perform an arithmetic operation and set the condition code. These operations will be represented as a PARALLEL with the first element of the vector being a COMPARE of an arithmetic operation with the constant zero. The second element of the vector will set some pseudo to the result of the same arithmetic operation. If we simplify the COMPARE, we won't match such a pattern and so will generate an extra insn. Here we test for this case, where both the comparison and the operation result are needed, and make the PARALLEL by just replacing I2DEST in I3SRC with I2SRC. Later we will make the PARALLEL that contains I2. */ if (i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE && XEXP (SET_SRC (PATTERN (i3)), 1) == const0_rtx && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest)) { #ifdef SELECT_CC_MODE rtx *cc_use; enum machine_mode compare_mode; #endif newpat = PATTERN (i3); SUBST (XEXP (SET_SRC (newpat), 0), i2src); i2_is_used = 1; #ifdef SELECT_CC_MODE /* See if a COMPARE with the operand we substituted in should be done with the mode that is currently being used. If not, do the same processing we do in `subst' for a SET; namely, if the destination is used only once, try to replace it with a register of the proper mode and also replace the COMPARE. */ if (undobuf.other_insn == 0 && (cc_use = find_single_use (SET_DEST (newpat), i3, &undobuf.other_insn)) && ((compare_mode = SELECT_CC_MODE (GET_CODE (*cc_use), i2src, const0_rtx)) != GET_MODE (SET_DEST (newpat)))) { unsigned int regno = REGNO (SET_DEST (newpat)); rtx new_dest = gen_rtx_REG (compare_mode, regno); if (regno < FIRST_PSEUDO_REGISTER || (REG_N_SETS (regno) == 1 && ! added_sets_2 && ! REG_USERVAR_P (SET_DEST (newpat)))) { if (regno >= FIRST_PSEUDO_REGISTER) SUBST (regno_reg_rtx[regno], new_dest); SUBST (SET_DEST (newpat), new_dest); SUBST (XEXP (*cc_use, 0), new_dest); SUBST (SET_SRC (newpat), gen_rtx_COMPARE (compare_mode, i2src, const0_rtx)); } else undobuf.other_insn = 0; } #endif } else #endif { n_pseudo_occurrences = 0; /* `subst' counts here */ /* If I1 feeds into I2 (not into I3) and I1DEST is in I1SRC, we need to make a unique copy of I2SRC each time we substitute it to avoid self-referential rtl. */ subst_low_cuid = INSN_CUID (i2); newpat = subst (PATTERN (i3), i2dest, i2src, 0, ! i1_feeds_i3 && i1dest_in_i1src); substed_i2 = 1; /* Record whether i2's body now appears within i3's body. */ i2_is_used = n_pseudo_occurrences; } /* If we already got a failure, don't try to do more. Otherwise, try to substitute in I1 if we have it. */ if (i1 && GET_CODE (newpat) != CLOBBER) { /* Before we can do this substitution, we must redo the test done above (see detailed comments there) that ensures that I1DEST isn't mentioned in any SETs in NEWPAT that are field assignments. */ if (! combinable_i3pat (NULL_RTX, &newpat, i1dest, NULL_RTX, 0, (rtx*) 0)) { undo_all (); return 0; } n_pseudo_occurrences = 0; subst_low_cuid = INSN_CUID (i1); newpat = subst (newpat, i1dest, i1src, 0, 0); substed_i1 = 1; } /* Fail if an autoincrement side-effect has been duplicated. Be careful to count all the ways that I2SRC and I1SRC can be used. */ if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0 && i2_is_used + added_sets_2 > 1) || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0 && (n_pseudo_occurrences + added_sets_1 + (added_sets_2 && ! i1_feeds_i3) > 1)) /* Fail if we tried to make a new register (we used to abort, but there's really no reason to). */ || max_reg_num () != maxreg /* Fail if we couldn't do something and have a CLOBBER. */ || GET_CODE (newpat) == CLOBBER /* Fail if this new pattern is a MULT and we didn't have one before at the outer level. */ || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT && ! have_mult)) { undo_all (); return 0; } /* If the actions of the earlier insns must be kept in addition to substituting them into the latest one, we must make a new PARALLEL for the latest insn to hold additional the SETs. */ if (added_sets_1 || added_sets_2) { combine_extras++; if (GET_CODE (newpat) == PARALLEL) { rtvec old = XVEC (newpat, 0); total_sets = XVECLEN (newpat, 0) + added_sets_1 + added_sets_2; newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets)); memcpy (XVEC (newpat, 0)->elem, &old->elem[0], sizeof (old->elem[0]) * old->num_elem); } else { rtx old = newpat; total_sets = 1 + added_sets_1 + added_sets_2; newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets)); XVECEXP (newpat, 0, 0) = old; } if (added_sets_1) XVECEXP (newpat, 0, --total_sets) = (GET_CODE (PATTERN (i1)) == PARALLEL ? gen_rtx_SET (VOIDmode, i1dest, i1src) : PATTERN (i1)); if (added_sets_2) { /* If there is no I1, use I2's body as is. We used to also not do the subst call below if I2 was substituted into I3, but that could lose a simplification. */ if (i1 == 0) XVECEXP (newpat, 0, --total_sets) = i2pat; else /* See comment where i2pat is assigned. */ XVECEXP (newpat, 0, --total_sets) = subst (i2pat, i1dest, i1src, 0, 0); } } /* We come here when we are replacing a destination in I2 with the destination of I3. */ validate_replacement: /* Note which hard regs this insn has as inputs. */ mark_used_regs_combine (newpat); /* Is the result of combination a valid instruction? */ insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes); /* If the result isn't valid, see if it is a PARALLEL of two SETs where the second SET's destination is a register that is unused and isn't marked as an instruction that might trap in an EH region. In that case, we just need the first SET. This can occur when simplifying a divmod insn. We *must* test for this case here because the code below that splits two independent SETs doesn't handle this case correctly when it updates the register status. It's pointless doing this if we originally had two sets, one from i3, and one from i2. Combining then splitting the parallel results in the original i2 again plus an invalid insn (which we delete). The net effect is only to move instructions around, which makes debug info less accurate. Also check the case where the first SET's destination is unused. That would not cause incorrect code, but does cause an unneeded insn to remain. */ if (insn_code_number < 0 && !(added_sets_2 && i1 == 0) && GET_CODE (newpat) == PARALLEL && XVECLEN (newpat, 0) == 2 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET && GET_CODE (XVECEXP (newpat, 0, 1)) == SET && asm_noperands (newpat) < 0) { rtx set0 = XVECEXP (newpat, 0, 0); rtx set1 = XVECEXP (newpat, 0, 1); rtx note; if (((REG_P (SET_DEST (set1)) && find_reg_note (i3, REG_UNUSED, SET_DEST (set1))) || (GET_CODE (SET_DEST (set1)) == SUBREG && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1))))) && (!(note = find_reg_note (i3, REG_EH_REGION, NULL_RTX)) || INTVAL (XEXP (note, 0)) <= 0) && ! side_effects_p (SET_SRC (set1))) { newpat = set0; insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes); } else if (((REG_P (SET_DEST (set0)) && find_reg_note (i3, REG_UNUSED, SET_DEST (set0))) || (GET_CODE (SET_DEST (set0)) == SUBREG && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set0))))) && (!(note = find_reg_note (i3, REG_EH_REGION, NULL_RTX)) || INTVAL (XEXP (note, 0)) <= 0) && ! side_effects_p (SET_SRC (set0))) { newpat = set1; insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes); if (insn_code_number >= 0) { /* If we will be able to accept this, we have made a change to the destination of I3. This requires us to do a few adjustments. */ PATTERN (i3) = newpat; adjust_for_new_dest (i3); } } } /* If we were combining three insns and the result is a simple SET with no ASM_OPERANDS that wasn't recognized, try to split it into two insns. There are two ways to do this. It can be split using a machine-specific method (like when you have an addition of a large constant) or by combine in the function find_split_point. */ if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET && asm_noperands (newpat) < 0) { rtx m_split, *split; rtx ni2dest = i2dest; /* See if the MD file can split NEWPAT. If it can't, see if letting it use I2DEST as a scratch register will help. In the latter case, convert I2DEST to the mode of the source of NEWPAT if we can. */ m_split = split_insns (newpat, i3); /* We can only use I2DEST as a scratch reg if it doesn't overlap any inputs of NEWPAT. */ /* ??? If I2DEST is not safe, and I1DEST exists, then it would be possible to try that as a scratch reg. This would require adding more code to make it work though. */ if (m_split == 0 && ! reg_overlap_mentioned_p (ni2dest, newpat)) { /* If I2DEST is a hard register or the only use of a pseudo, we can change its mode. */ if (GET_MODE (SET_DEST (newpat)) != GET_MODE (i2dest) && GET_MODE (SET_DEST (newpat)) != VOIDmode && REG_P (i2dest) && (REGNO (i2dest) < FIRST_PSEUDO_REGISTER || (REG_N_SETS (REGNO (i2dest)) == 1 && ! added_sets_2 && ! REG_USERVAR_P (i2dest)))) ni2dest = gen_rtx_REG (GET_MODE (SET_DEST (newpat)), REGNO (i2dest)); m_split = split_insns (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, newpat, gen_rtx_CLOBBER (VOIDmode, ni2dest))), i3); /* If the split with the mode-changed register didn't work, try the original register. */ if (! m_split && ni2dest != i2dest) { ni2dest = i2dest; m_split = split_insns (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, newpat, gen_rtx_CLOBBER (VOIDmode, i2dest))), i3); } } if (m_split && NEXT_INSN (m_split) == NULL_RTX) { m_split = PATTERN (m_split); insn_code_number = recog_for_combine (&m_split, i3, &new_i3_notes); if (insn_code_number >= 0) newpat = m_split; } else if (m_split && NEXT_INSN (NEXT_INSN (m_split)) == NULL_RTX && (next_real_insn (i2) == i3 || ! use_crosses_set_p (PATTERN (m_split), INSN_CUID (i2)))) { rtx i2set, i3set; rtx newi3pat = PATTERN (NEXT_INSN (m_split)); newi2pat = PATTERN (m_split); i3set = single_set (NEXT_INSN (m_split)); i2set = single_set (m_split); /* In case we changed the mode of I2DEST, replace it in the pseudo-register table here. We can't do it above in case this code doesn't get executed and we do a split the other way. */ if (REGNO (i2dest) >= FIRST_PSEUDO_REGISTER) SUBST (regno_reg_rtx[REGNO (i2dest)], ni2dest); i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes); /* If I2 or I3 has multiple SETs, we won't know how to track register status, so don't use these insns. If I2's destination is used between I2 and I3, we also can't use these insns. */ if (i2_code_number >= 0 && i2set && i3set && (next_real_insn (i2) == i3 || ! reg_used_between_p (SET_DEST (i2set), i2, i3))) insn_code_number = recog_for_combine (&newi3pat, i3, &new_i3_notes); if (insn_code_number >= 0) newpat = newi3pat; /* It is possible that both insns now set the destination of I3. If so, we must show an extra use of it. */ if (insn_code_number >= 0) { rtx new_i3_dest = SET_DEST (i3set); rtx new_i2_dest = SET_DEST (i2set); while (GET_CODE (new_i3_dest) == ZERO_EXTRACT || GET_CODE (new_i3_dest) == STRICT_LOW_PART || GET_CODE (new_i3_dest) == SUBREG) new_i3_dest = XEXP (new_i3_dest, 0); while (GET_CODE (new_i2_dest) == ZERO_EXTRACT || GET_CODE (new_i2_dest) == STRICT_LOW_PART || GET_CODE (new_i2_dest) == SUBREG) new_i2_dest = XEXP (new_i2_dest, 0); if (REG_P (new_i3_dest) && REG_P (new_i2_dest) && REGNO (new_i3_dest) == REGNO (new_i2_dest)) REG_N_SETS (REGNO (new_i2_dest))++; } } /* If we can split it and use I2DEST, go ahead and see if that helps things be recognized. Verify that none of the registers are set between I2 and I3. */ if (insn_code_number < 0 && (split = find_split_point (&newpat, i3)) != 0 #ifdef HAVE_cc0 && REG_P (i2dest) #endif /* We need I2DEST in the proper mode. If it is a hard register or the only use of a pseudo, we can change its mode. */ && (GET_MODE (*split) == GET_MODE (i2dest) || GET_MODE (*split) == VOIDmode || REGNO (i2dest) < FIRST_PSEUDO_REGISTER || (REG_N_SETS (REGNO (i2dest)) == 1 && ! added_sets_2 && ! REG_USERVAR_P (i2dest))) && (next_real_insn (i2) == i3 || ! use_crosses_set_p (*split, INSN_CUID (i2))) /* We can't overwrite I2DEST if its value is still used by NEWPAT. */ && ! reg_referenced_p (i2dest, newpat)) { rtx newdest = i2dest; enum rtx_code split_code = GET_CODE (*split); enum machine_mode split_mode = GET_MODE (*split); /* Get NEWDEST as a register in the proper mode. We have already validated that we can do this. */ if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode) { newdest = gen_rtx_REG (split_mode, REGNO (i2dest)); if (REGNO (i2dest) >= FIRST_PSEUDO_REGISTER) SUBST (regno_reg_rtx[REGNO (i2dest)], newdest); } /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to an ASHIFT. This can occur if it was inside a PLUS and hence appeared to be a memory address. This is a kludge. */ if (split_code == MULT && GET_CODE (XEXP (*split, 1)) == CONST_INT && INTVAL (XEXP (*split, 1)) > 0 && (i = exact_log2 (INTVAL (XEXP (*split, 1)))) >= 0) { SUBST (*split, gen_rtx_ASHIFT (split_mode, XEXP (*split, 0), GEN_INT (i))); /* Update split_code because we may not have a multiply anymore. */ split_code = GET_CODE (*split); } #ifdef INSN_SCHEDULING /* If *SPLIT is a paradoxical SUBREG, when we split it, it should be written as a ZERO_EXTEND. */ if (split_code == SUBREG && MEM_P (SUBREG_REG (*split))) { #ifdef LOAD_EXTEND_OP /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's what it really is. */ if (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (*split))) == SIGN_EXTEND) SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode, SUBREG_REG (*split))); else #endif SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode, SUBREG_REG (*split))); } #endif newi2pat = gen_rtx_SET (VOIDmode, newdest, *split); SUBST (*split, newdest); i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes); /* If the split point was a MULT and we didn't have one before, don't use one now. */ if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult)) insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes); } } /* Check for a case where we loaded from memory in a narrow mode and then sign extended it, but we need both registers. In that case, we have a PARALLEL with both loads from the same memory location. We can split this into a load from memory followed by a register-register copy. This saves at least one insn, more if register allocation can eliminate the copy. We cannot do this if the destination of the first assignment is a condition code register or cc0. We eliminate this case by making sure the SET_DEST and SET_SRC have the same mode. We cannot do this if the destination of the second assignment is a register that we have already assumed is zero-extended. Similarly for a SUBREG of such a register. */ else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0 && GET_CODE (newpat) == PARALLEL && XVECLEN (newpat, 0) == 2 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0))) == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0)))) && GET_CODE (XVECEXP (newpat, 0, 1)) == SET && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)), XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0)) && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)), INSN_CUID (i2)) && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART && ! (temp = SET_DEST (XVECEXP (newpat, 0, 1)), (REG_P (temp) && reg_stat[REGNO (temp)].nonzero_bits != 0 && GET_MODE_BITSIZE (GET_MODE (temp)) < BITS_PER_WORD && GET_MODE_BITSIZE (GET_MODE (temp)) < HOST_BITS_PER_INT && (reg_stat[REGNO (temp)].nonzero_bits != GET_MODE_MASK (word_mode)))) && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG && (temp = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))), (REG_P (temp) && reg_stat[REGNO (temp)].nonzero_bits != 0 && GET_MODE_BITSIZE (GET_MODE (temp)) < BITS_PER_WORD && GET_MODE_BITSIZE (GET_MODE (temp)) < HOST_BITS_PER_INT && (reg_stat[REGNO (temp)].nonzero_bits != GET_MODE_MASK (word_mode))))) && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)), SET_SRC (XVECEXP (newpat, 0, 1))) && ! find_reg_note (i3, REG_UNUSED, SET_DEST (XVECEXP (newpat, 0, 0)))) { rtx ni2dest; newi2pat = XVECEXP (newpat, 0, 0); ni2dest = SET_DEST (XVECEXP (newpat, 0, 0)); newpat = XVECEXP (newpat, 0, 1); SUBST (SET_SRC (newpat), gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest)); i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes); if (i2_code_number >= 0) insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes); if (insn_code_number >= 0) { rtx insn; rtx link; /* If we will be able to accept this, we have made a change to the destination of I3. This requires us to do a few adjustments. */ PATTERN (i3) = newpat; adjust_for_new_dest (i3); /* I3 now uses what used to be its destination and which is now I2's destination. That means we need a LOG_LINK from I3 to I2. But we used to have one, so we still will. However, some later insn might be using I2's dest and have a LOG_LINK pointing at I3. We must remove this link. The simplest way to remove the link is to point it at I1, which we know will be a NOTE. */ for (insn = NEXT_INSN (i3); insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR || insn != BB_HEAD (this_basic_block->next_bb)); insn = NEXT_INSN (insn)) { if (INSN_P (insn) && reg_referenced_p (ni2dest, PATTERN (insn))) { for (link = LOG_LINKS (insn); link; link = XEXP (link, 1)) if (XEXP (link, 0) == i3) XEXP (link, 0) = i1; break; } } } } /* Similarly, check for a case where we have a PARALLEL of two independent SETs but we started with three insns. In this case, we can do the sets as two separate insns. This case occurs when some SET allows two other insns to combine, but the destination of that SET is still live. */ else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0 && GET_CODE (newpat) == PARALLEL && XVECLEN (newpat, 0) == 2 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART && GET_CODE (XVECEXP (newpat, 0, 1)) == SET && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)), INSN_CUID (i2)) /* Don't pass sets with (USE (MEM ...)) dests to the following. */ && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != USE && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != USE && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)), XVECEXP (newpat, 0, 0)) && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)), XVECEXP (newpat, 0, 1)) && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0))) && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1))))) { /* Normally, it doesn't matter which of the two is done first, but it does if one references cc0. In that case, it has to be first. */ #ifdef HAVE_cc0 if (reg_referenced_p (cc0_rtx, XVECEXP (newpat, 0, 0))) { newi2pat = XVECEXP (newpat, 0, 0); newpat = XVECEXP (newpat, 0, 1); } else #endif { newi2pat = XVECEXP (newpat, 0, 1); newpat = XVECEXP (newpat, 0, 0); } i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes); if (i2_code_number >= 0) insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes); } /* If it still isn't recognized, fail and change things back the way they were. */ if ((insn_code_number < 0 /* Is the result a reasonable ASM_OPERANDS? */ && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2))) { undo_all (); return 0; } /* If we had to change another insn, make sure it is valid also. */ if (undobuf.other_insn) { rtx other_pat = PATTERN (undobuf.other_insn); rtx new_other_notes; rtx note, next; CLEAR_HARD_REG_SET (newpat_used_regs); other_code_number = recog_for_combine (&other_pat, undobuf.other_insn, &new_other_notes); if (other_code_number < 0 && ! check_asm_operands (other_pat)) { undo_all (); return 0; } PATTERN (undobuf.other_insn) = other_pat; /* If any of the notes in OTHER_INSN were REG_UNUSED, ensure that they are still valid. Then add any non-duplicate notes added by recog_for_combine. */ for (note = REG_NOTES (undobuf.other_insn); note; note = next) { next = XEXP (note, 1); if (REG_NOTE_KIND (note) == REG_UNUSED && ! reg_set_p (XEXP (note, 0), PATTERN (undobuf.other_insn))) { if (REG_P (XEXP (note, 0))) REG_N_DEATHS (REGNO (XEXP (note, 0)))--; remove_note (undobuf.other_insn, note); } } for (note = new_other_notes; note; note = XEXP (note, 1)) if (REG_P (XEXP (note, 0))) REG_N_DEATHS (REGNO (XEXP (note, 0)))++; distribute_notes (new_other_notes, undobuf.other_insn, undobuf.other_insn, NULL_RTX); } #ifdef HAVE_cc0 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether they are adjacent to each other or not. */ { rtx p = prev_nonnote_insn (i3); if (p && p != i2 && GET_CODE (p) == INSN && newi2pat && sets_cc0_p (newi2pat)) { undo_all (); return 0; } } #endif /* Only allow this combination if combine_insn_costs reports that the replacement instructions are cheaper than the originals. */ if (!combine_validate_cost (i1, i2, i3, newpat, newi2pat)) { undo_all (); return 0; } /* We now know that we can do this combination. Merge the insns and update the status of registers and LOG_LINKS. */ { rtx i3notes, i2notes, i1notes = 0; rtx i3links, i2links, i1links = 0; rtx midnotes = 0; unsigned int regno; /* Get the old REG_NOTES and LOG_LINKS from all our insns and clear them. */ i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3); i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2); if (i1) i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1); /* Ensure that we do not have something that should not be shared but occurs multiple times in the new insns. Check this by first resetting all the `used' flags and then copying anything is shared. */ reset_used_flags (i3notes); reset_used_flags (i2notes); reset_used_flags (i1notes); reset_used_flags (newpat); reset_used_flags (newi2pat); if (undobuf.other_insn) reset_used_flags (PATTERN (undobuf.other_insn)); i3notes = copy_rtx_if_shared (i3notes); i2notes = copy_rtx_if_shared (i2notes); i1notes = copy_rtx_if_shared (i1notes); newpat = copy_rtx_if_shared (newpat); newi2pat = copy_rtx_if_shared (newi2pat); if (undobuf.other_insn) reset_used_flags (PATTERN (undobuf.other_insn)); INSN_CODE (i3) = insn_code_number; PATTERN (i3) = newpat; if (GET_CODE (i3) == CALL_INSN && CALL_INSN_FUNCTION_USAGE (i3)) { rtx call_usage = CALL_INSN_FUNCTION_USAGE (i3); reset_used_flags (call_usage); call_usage = copy_rtx (call_usage); if (substed_i2) replace_rtx (call_usage, i2dest, i2src); if (substed_i1) replace_rtx (call_usage, i1dest, i1src); CALL_INSN_FUNCTION_USAGE (i3) = call_usage; } if (undobuf.other_insn) INSN_CODE (undobuf.other_insn) = other_code_number; /* We had one special case above where I2 had more than one set and we replaced a destination of one of those sets with the destination of I3. In that case, we have to update LOG_LINKS of insns later in this basic block. Note that this (expensive) case is rare. Also, in this case, we must pretend that all REG_NOTEs for I2 actually came from I3, so that REG_UNUSED notes from I2 will be properly handled. */ if (i3_subst_into_i2) { for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++) if (GET_CODE (XVECEXP (PATTERN (i2), 0, i)) != USE && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i))) && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest && ! find_reg_note (i2, REG_UNUSED, SET_DEST (XVECEXP (PATTERN (i2), 0, i)))) for (temp = NEXT_INSN (i2); temp && (this_basic_block->next_bb == EXIT_BLOCK_PTR || BB_HEAD (this_basic_block) != temp); temp = NEXT_INSN (temp)) if (temp != i3 && INSN_P (temp)) for (link = LOG_LINKS (temp); link; link = XEXP (link, 1)) if (XEXP (link, 0) == i2) XEXP (link, 0) = i3; if (i3notes) { rtx link = i3notes; while (XEXP (link, 1)) link = XEXP (link, 1); XEXP (link, 1) = i2notes; } else i3notes = i2notes; i2notes = 0; } LOG_LINKS (i3) = 0; REG_NOTES (i3) = 0; LOG_LINKS (i2) = 0; REG_NOTES (i2) = 0; if (newi2pat) { INSN_CODE (i2) = i2_code_number; PATTERN (i2) = newi2pat; } else SET_INSN_DELETED (i2); if (i1) { LOG_LINKS (i1) = 0; REG_NOTES (i1) = 0; SET_INSN_DELETED (i1); } /* Get death notes for everything that is now used in either I3 or I2 and used to die in a previous insn. If we built two new patterns, move from I1 to I2 then I2 to I3 so that we get the proper movement on registers that I2 modifies. */ if (newi2pat) { move_deaths (newi2pat, NULL_RTX, INSN_CUID (i1), i2, &midnotes); move_deaths (newpat, newi2pat, INSN_CUID (i1), i3, &midnotes); } else move_deaths (newpat, NULL_RTX, i1 ? INSN_CUID (i1) : INSN_CUID (i2), i3, &midnotes); /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */ if (i3notes) distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL_RTX); if (i2notes) distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL_RTX); if (i1notes) distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL_RTX); if (midnotes) distribute_notes (midnotes, NULL_RTX, i3, newi2pat ? i2 : NULL_RTX); /* Distribute any notes added to I2 or I3 by recog_for_combine. We know these are REG_UNUSED and want them to go to the desired insn, so we always pass it as i3. We have not counted the notes in reg_n_deaths yet, so we need to do so now. */ if (newi2pat && new_i2_notes) { for (temp = new_i2_notes; temp; temp = XEXP (temp, 1)) if (REG_P (XEXP (temp, 0))) REG_N_DEATHS (REGNO (XEXP (temp, 0)))++; distribute_notes (new_i2_notes, i2, i2, NULL_RTX); } if (new_i3_notes) { for (temp = new_i3_notes; temp; temp = XEXP (temp, 1)) if (REG_P (XEXP (temp, 0))) REG_N_DEATHS (REGNO (XEXP (temp, 0)))++; distribute_notes (new_i3_notes, i3, i3, NULL_RTX); } /* If I3DEST was used in I3SRC, it really died in I3. We may need to put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets I3DEST, the death must be somewhere before I2, not I3. If we passed I3 in that case, it might delete I2. Similarly for I2 and I1. Show an additional death due to the REG_DEAD note we make here. If we discard it in distribute_notes, we will decrement it again. */ if (i3dest_killed) { if (REG_P (i3dest_killed)) REG_N_DEATHS (REGNO (i3dest_killed))++; if (newi2pat && reg_set_p (i3dest_killed, newi2pat)) distribute_notes (gen_rtx_EXPR_LIST (REG_DEAD, i3dest_killed, NULL_RTX), NULL_RTX, i2, NULL_RTX); else distribute_notes (gen_rtx_EXPR_LIST (REG_DEAD, i3dest_killed, NULL_RTX), NULL_RTX, i3, newi2pat ? i2 : NULL_RTX); } if (i2dest_in_i2src) { if (REG_P (i2dest)) REG_N_DEATHS (REGNO (i2dest))++; if (newi2pat && reg_set_p (i2dest, newi2pat)) distribute_notes (gen_rtx_EXPR_LIST (REG_DEAD, i2dest, NULL_RTX), NULL_RTX, i2, NULL_RTX); else distribute_notes (gen_rtx_EXPR_LIST (REG_DEAD, i2dest, NULL_RTX), NULL_RTX, i3, newi2pat ? i2 : NULL_RTX); } if (i1dest_in_i1src) { if (REG_P (i1dest)) REG_N_DEATHS (REGNO (i1dest))++; if (newi2pat && reg_set_p (i1dest, newi2pat)) distribute_notes (gen_rtx_EXPR_LIST (REG_DEAD, i1dest, NULL_RTX), NULL_RTX, i2, NULL_RTX); else distribute_notes (gen_rtx_EXPR_LIST (REG_DEAD, i1dest, NULL_RTX), NULL_RTX, i3, newi2pat ? i2 : NULL_RTX); } distribute_links (i3links); distribute_links (i2links); distribute_links (i1links); if (REG_P (i2dest)) { rtx link; rtx i2_insn = 0, i2_val = 0, set; /* The insn that used to set this register doesn't exist, and this life of the register may not exist either. See if one of I3's links points to an insn that sets I2DEST. If it does, that is now the last known value for I2DEST. If we don't update this and I2 set the register to a value that depended on its old contents, we will get confused. If this insn is used, thing will be set correctly in combine_instructions. */ for (link = LOG_LINKS (i3); link; link = XEXP (link, 1)) if ((set = single_set (XEXP (link, 0))) != 0 && rtx_equal_p (i2dest, SET_DEST (set))) i2_insn = XEXP (link, 0), i2_val = SET_SRC (set); record_value_for_reg (i2dest, i2_insn, i2_val); /* If the reg formerly set in I2 died only once and that was in I3, zero its use count so it won't make `reload' do any work. */ if (! added_sets_2 && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat)) && ! i2dest_in_i2src) { regno = REGNO (i2dest); REG_N_SETS (regno)--; } } if (i1 && REG_P (i1dest)) { rtx link; rtx i1_insn = 0, i1_val = 0, set; for (link = LOG_LINKS (i3); link; link = XEXP (link, 1)) if ((set = single_set (XEXP (link, 0))) != 0 && rtx_equal_p (i1dest, SET_DEST (set))) i1_insn = XEXP (link, 0), i1_val = SET_SRC (set); record_value_for_reg (i1dest, i1_insn, i1_val); regno = REGNO (i1dest); if (! added_sets_1 && ! i1dest_in_i1src) REG_N_SETS (regno)--; } /* Update reg_stat[].nonzero_bits et al for any changes that may have been made to this insn. The order of set_nonzero_bits_and_sign_copies() is important. Because newi2pat can affect nonzero_bits of newpat */ if (newi2pat) note_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL); note_stores (newpat, set_nonzero_bits_and_sign_copies, NULL); /* Set new_direct_jump_p if a new return or simple jump instruction has been created. If I3 is now an unconditional jump, ensure that it has a BARRIER following it since it may have initially been a conditional jump. It may also be the last nonnote insn. */ if (returnjump_p (i3) || any_uncondjump_p (i3)) { *new_direct_jump_p = 1; mark_jump_label (PATTERN (i3), i3, 0); if ((temp = next_nonnote_insn (i3)) == NULL_RTX || GET_CODE (temp) != BARRIER) emit_barrier_after (i3); } if (undobuf.other_insn != NULL_RTX && (returnjump_p (undobuf.other_insn) || any_uncondjump_p (undobuf.other_insn))) { *new_direct_jump_p = 1; if ((temp = next_nonnote_insn (undobuf.other_insn)) == NULL_RTX || GET_CODE (temp) != BARRIER) emit_barrier_after (undobuf.other_insn); } /* An NOOP jump does not need barrier, but it does need cleaning up of CFG. */ if (GET_CODE (newpat) == SET && SET_SRC (newpat) == pc_rtx && SET_DEST (newpat) == pc_rtx) *new_direct_jump_p = 1; } combine_successes++; undo_commit (); if (added_links_insn && (newi2pat == 0 || INSN_CUID (added_links_insn) < INSN_CUID (i2)) && INSN_CUID (added_links_insn) < INSN_CUID (i3)) return added_links_insn; else return newi2pat ? i2 : i3; } /* Undo all the modifications recorded in undobuf. */ static void undo_all (void) { struct undo *undo, *next; for (undo = undobuf.undos; undo; undo = next) { next = undo->next; if (undo->is_int) *undo->where.i = undo->old_contents.i; else *undo->where.r = undo->old_contents.r; undo->next = undobuf.frees; undobuf.frees = undo; } undobuf.undos = 0; } /* We've committed to accepting the changes we made. Move all of the undos to the free list. */ static void undo_commit (void) { struct undo *undo, *next; for (undo = undobuf.undos; undo; undo = next) { next = undo->next; undo->next = undobuf.frees; undobuf.frees = undo; } undobuf.undos = 0; } /* Find the innermost point within the rtx at LOC, possibly LOC itself, where we have an arithmetic expression and return that point. LOC will be inside INSN. try_combine will call this function to see if an insn can be split into two insns. */ static rtx * find_split_point (rtx *loc, rtx insn) { rtx x = *loc; enum rtx_code code = GET_CODE (x); rtx *split; unsigned HOST_WIDE_INT len = 0; HOST_WIDE_INT pos = 0; int unsignedp = 0; rtx inner = NULL_RTX; /* First special-case some codes. */ switch (code) { case SUBREG: #ifdef INSN_SCHEDULING /* If we are making a paradoxical SUBREG invalid, it becomes a split point. */ if (MEM_P (SUBREG_REG (x))) return loc; #endif return find_split_point (&SUBREG_REG (x), insn); case MEM: #ifdef HAVE_lo_sum /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it using LO_SUM and HIGH. */ if (GET_CODE (XEXP (x, 0)) == CONST || GET_CODE (XEXP (x, 0)) == SYMBOL_REF) { SUBST (XEXP (x, 0), gen_rtx_LO_SUM (Pmode, gen_rtx_HIGH (Pmode, XEXP (x, 0)), XEXP (x, 0))); return &XEXP (XEXP (x, 0), 0); } #endif /* If we have a PLUS whose second operand is a constant and the address is not valid, perhaps will can split it up using the machine-specific way to split large constants. We use the first pseudo-reg (one of the virtual regs) as a placeholder; it will not remain in the result. */ if (GET_CODE (XEXP (x, 0)) == PLUS && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT && ! memory_address_p (GET_MODE (x), XEXP (x, 0))) { rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER]; rtx seq = split_insns (gen_rtx_SET (VOIDmode, reg, XEXP (x, 0)), subst_insn); /* This should have produced two insns, each of which sets our placeholder. If the source of the second is a valid address, we can make put both sources together and make a split point in the middle. */ if (seq && NEXT_INSN (seq) != NULL_RTX && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX && GET_CODE (seq) == INSN && GET_CODE (PATTERN (seq)) == SET && SET_DEST (PATTERN (seq)) == reg && ! reg_mentioned_p (reg, SET_SRC (PATTERN (seq))) && GET_CODE (NEXT_INSN (seq)) == INSN && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg && memory_address_p (GET_MODE (x), SET_SRC (PATTERN (NEXT_INSN (seq))))) { rtx src1 = SET_SRC (PATTERN (seq)); rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq))); /* Replace the placeholder in SRC2 with SRC1. If we can find where in SRC2 it was placed, that can become our split point and we can replace this address with SRC2. Just try two obvious places. */ src2 = replace_rtx (src2, reg, src1); split = 0; if (XEXP (src2, 0) == src1) split = &XEXP (src2, 0); else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e' && XEXP (XEXP (src2, 0), 0) == src1) split = &XEXP (XEXP (src2, 0), 0); if (split) { SUBST (XEXP (x, 0), src2); return split; } } /* If that didn't work, perhaps the first operand is complex and needs to be computed separately, so make a split point there. This will occur on machines that just support REG + CONST and have a constant moved through some previous computation. */ else if (!OBJECT_P (XEXP (XEXP (x, 0), 0)) && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0))))) return &XEXP (XEXP (x, 0), 0); } break; case SET: #ifdef HAVE_cc0 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a ZERO_EXTRACT, the most likely reason why this doesn't match is that we need to put the operand into a register. So split at that point. */ if (SET_DEST (x) == cc0_rtx && GET_CODE (SET_SRC (x)) != COMPARE && GET_CODE (SET_SRC (x)) != ZERO_EXTRACT && !OBJECT_P (SET_SRC (x)) && ! (GET_CODE (SET_SRC (x)) == SUBREG && OBJECT_P (SUBREG_REG (SET_SRC (x))))) return &SET_SRC (x); #endif /* See if we can split SET_SRC as it stands. */ split = find_split_point (&SET_SRC (x), insn); if (split && split != &SET_SRC (x)) return split; /* See if we can split SET_DEST as it stands. */ split = find_split_point (&SET_DEST (x), insn); if (split && split != &SET_DEST (x)) return split; /* See if this is a bitfield assignment with everything constant. If so, this is an IOR of an AND, so split it into that. */ if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT && (GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0))) <= HOST_BITS_PER_WIDE_INT) && GET_CODE (XEXP (SET_DEST (x), 1)) == CONST_INT && GET_CODE (XEXP (SET_DEST (x), 2)) == CONST_INT && GET_CODE (SET_SRC (x)) == CONST_INT && ((INTVAL (XEXP (SET_DEST (x), 1)) + INTVAL (XEXP (SET_DEST (x), 2))) <= GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0)))) && ! side_effects_p (XEXP (SET_DEST (x), 0))) { HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2)); unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1)); unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x)); rtx dest = XEXP (SET_DEST (x), 0); enum machine_mode mode = GET_MODE (dest); unsigned HOST_WIDE_INT mask = ((HOST_WIDE_INT) 1 << len) - 1; if (BITS_BIG_ENDIAN) pos = GET_MODE_BITSIZE (mode) - len - pos; if (src == mask) SUBST (SET_SRC (x), gen_binary (IOR, mode, dest, GEN_INT (src << pos))); else SUBST (SET_SRC (x), gen_binary (IOR, mode, gen_binary (AND, mode, dest, gen_int_mode (~(mask << pos), mode)), GEN_INT (src << pos))); SUBST (SET_DEST (x), dest); split = find_split_point (&SET_SRC (x), insn); if (split && split != &SET_SRC (x)) return split; } /* Otherwise, see if this is an operation that we can split into two. If so, try to split that. */ code = GET_CODE (SET_SRC (x)); switch (code) { case AND: /* If we are AND'ing with a large constant that is only a single bit and the result is only being used in a context where we need to know if it is zero or nonzero, replace it with a bit extraction. This will avoid the large constant, which might have taken more than one insn to make. If the constant were not a valid argument to the AND but took only one insn to make, this is no worse, but if it took more than one insn, it will be better. */ if (GET_CODE (XEXP (SET_SRC (x), 1)) == CONST_INT && REG_P (XEXP (SET_SRC (x), 0)) && (pos = exact_log2 (INTVAL (XEXP (SET_SRC (x), 1)))) >= 7 && REG_P (SET_DEST (x)) && (split = find_single_use (SET_DEST (x), insn, (rtx*) 0)) != 0 && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE) && XEXP (*split, 0) == SET_DEST (x) && XEXP (*split, 1) == const0_rtx) { rtx extraction = make_extraction (GET_MODE (SET_DEST (x)), XEXP (SET_SRC (x), 0), pos, NULL_RTX, 1, 1, 0, 0); if (extraction != 0) { SUBST (SET_SRC (x), extraction); return find_split_point (loc, insn); } } break; case NE: /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X is known to be on, this can be converted into a NEG of a shift. */ if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0)) && 1 <= (pos = exact_log2 (nonzero_bits (XEXP (SET_SRC (x), 0), GET_MODE (XEXP (SET_SRC (x), 0)))))) { enum machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0)); SUBST (SET_SRC (x), gen_rtx_NEG (mode, gen_rtx_LSHIFTRT (mode, XEXP (SET_SRC (x), 0), GEN_INT (pos)))); split = find_split_point (&SET_SRC (x), insn); if (split && split != &SET_SRC (x)) return split; } break; case SIGN_EXTEND: inner = XEXP (SET_SRC (x), 0); /* We can't optimize if either mode is a partial integer mode as we don't know how many bits are significant in those modes. */ if (GET_MODE_CLASS (GET_MODE (inner)) == MODE_PARTIAL_INT || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT) break; pos = 0; len = GET_MODE_BITSIZE (GET_MODE (inner)); unsignedp = 0; break; case SIGN_EXTRACT: case ZERO_EXTRACT: if (GET_CODE (XEXP (SET_SRC (x), 1)) == CONST_INT && GET_CODE (XEXP (SET_SRC (x), 2)) == CONST_INT) { inner = XEXP (SET_SRC (x), 0); len = INTVAL (XEXP (SET_SRC (x), 1)); pos = INTVAL (XEXP (SET_SRC (x), 2)); if (BITS_BIG_ENDIAN) pos = GET_MODE_BITSIZE (GET_MODE (inner)) - len - pos; unsignedp = (code == ZERO_EXTRACT); } break; default: break; } if (len && pos >= 0 && pos + len <= GET_MODE_BITSIZE (GET_MODE (inner))) { enum machine_mode mode = GET_MODE (SET_SRC (x)); /* For unsigned, we have a choice of a shift followed by an AND or two shifts. Use two shifts for field sizes where the constant might be too large. We assume here that we can always at least get 8-bit constants in an AND insn, which is true for every current RISC. */ if (unsignedp && len <= 8) { SUBST (SET_SRC (x), gen_rtx_AND (mode, gen_rtx_LSHIFTRT (mode, gen_lowpart (mode, inner), GEN_INT (pos)), GEN_INT (((HOST_WIDE_INT) 1 << len) - 1))); split = find_split_point (&SET_SRC (x), insn); if (split && split != &SET_SRC (x)) return split; } else { SUBST (SET_SRC (x), gen_rtx_fmt_ee (unsignedp ? LSHIFTRT : ASHIFTRT, mode, gen_rtx_ASHIFT (mode, gen_lowpart (mode, inner), GEN_INT (GET_MODE_BITSIZE (mode) - len - pos)), GEN_INT (GET_MODE_BITSIZE (mode) - len))); split = find_split_point (&SET_SRC (x), insn); if (split && split != &SET_SRC (x)) return split; } } /* See if this is a simple operation with a constant as the second operand. It might be that this constant is out of range and hence could be used as a split point. */ if (BINARY_P (SET_SRC (x)) && CONSTANT_P (XEXP (SET_SRC (x), 1)) && (OBJECT_P (XEXP (SET_SRC (x), 0)) || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0)))))) return &XEXP (SET_SRC (x), 1); /* Finally, see if this is a simple operation with its first operand not in a register. The operation might require this operand in a register, so return it as a split point. We can always do this because if the first operand were another operation, we would have already found it as a split point. */ if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x))) && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode)) return &XEXP (SET_SRC (x), 0); return 0; case AND: case IOR: /* We write NOR as (and (not A) (not B)), but if we don't have a NOR, it is better to write this as (not (ior A B)) so we can split it. Similarly for IOR. */ if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT) { SUBST (*loc, gen_rtx_NOT (GET_MODE (x), gen_rtx_fmt_ee (code == IOR ? AND : IOR, GET_MODE (x), XEXP (XEXP (x, 0), 0), XEXP (XEXP (x, 1), 0)))); return find_split_point (loc, insn); } /* Many RISC machines have a large set of logical insns. If the second operand is a NOT, put it first so we will try to split the other operand first. */ if (GET_CODE (XEXP (x, 1)) == NOT) { rtx tem = XEXP (x, 0); SUBST (XEXP (x, 0), XEXP (x, 1)); SUBST (XEXP (x, 1), tem); } break; default: break; } /* Otherwise, select our actions depending on our rtx class. */ switch (GET_RTX_CLASS (code)) { case RTX_BITFIELD_OPS: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */ case RTX_TERNARY: split = find_split_point (&XEXP (x, 2), insn); if (split) return split; /* ... fall through ... */ case RTX_BIN_ARITH: case RTX_COMM_ARITH: case RTX_COMPARE: case RTX_COMM_COMPARE: split = find_split_point (&XEXP (x, 1), insn); if (split) return split; /* ... fall through ... */ case RTX_UNARY: /* Some machines have (and (shift ...) ...) insns. If X is not an AND, but XEXP (X, 0) is, use it as our split point. */ if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND) return &XEXP (x, 0); split = find_split_point (&XEXP (x, 0), insn); if (split) return split; return loc; default: /* Otherwise, we don't have a split point. */ return 0; } } /* Throughout X, replace FROM with TO, and return the result. The result is TO if X is FROM; otherwise the result is X, but its contents may have been modified. If they were modified, a record was made in undobuf so that undo_all will (among other things) return X to its original state. If the number of changes necessary is too much to record to undo, the excess changes are not made, so the result is invalid. The changes already made can still be undone. undobuf.num_undo is incremented for such changes, so by testing that the caller can tell whether the result is valid. `n_pseudo_occurrences' is incremented each time FROM is replaced. IN_DEST is nonzero if we are processing the SET_DEST of a SET. UNIQUE_COPY is nonzero if each substitution must be unique. We do this by copying if `n_pseudo_occurrences' is nonzero. */ static rtx subst (rtx x, rtx from, rtx to, int in_dest, int unique_copy) { enum rtx_code code = GET_CODE (x); enum machine_mode op0_mode = VOIDmode; const char *fmt; int len, i; rtx new; /* Two expressions are equal if they are identical copies of a shared RTX or if they are both registers with the same register number and mode. */ #define COMBINE_RTX_EQUAL_P(X,Y) \ ((X) == (Y) \ || (REG_P (X) && REG_P (Y) \ && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y))) if (! in_dest && COMBINE_RTX_EQUAL_P (x, from)) { n_pseudo_occurrences++; return (unique_copy && n_pseudo_occurrences > 1 ? copy_rtx (to) : to); } /* If X and FROM are the same register but different modes, they will not have been seen as equal above. However, flow.c will make a LOG_LINKS entry for that case. If we do nothing, we will try to rerecognize our original insn and, when it succeeds, we will delete the feeding insn, which is incorrect. So force this insn not to match in this (rare) case. */ if (! in_dest && code == REG && REG_P (from) && REGNO (x) == REGNO (from)) return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx); /* If this is an object, we are done unless it is a MEM or LO_SUM, both of which may contain things that can be combined. */ if (code != MEM && code != LO_SUM && OBJECT_P (x)) return x; /* It is possible to have a subexpression appear twice in the insn. Suppose that FROM is a register that appears within TO. Then, after that subexpression has been scanned once by `subst', the second time it is scanned, TO may be found. If we were to scan TO here, we would find FROM within it and create a self-referent rtl structure which is completely wrong. */ if (COMBINE_RTX_EQUAL_P (x, to)) return to; /* Parallel asm_operands need special attention because all of the inputs are shared across the arms. Furthermore, unsharing the rtl results in recognition failures. Failure to handle this case specially can result in circular rtl. Solve this by doing a normal pass across the first entry of the parallel, and only processing the SET_DESTs of the subsequent entries. Ug. */ if (code == PARALLEL && GET_CODE (XVECEXP (x, 0, 0)) == SET && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS) { new = subst (XVECEXP (x, 0, 0), from, to, 0, unique_copy); /* If this substitution failed, this whole thing fails. */ if (GET_CODE (new) == CLOBBER && XEXP (new, 0) == const0_rtx) return new; SUBST (XVECEXP (x, 0, 0), new); for (i = XVECLEN (x, 0) - 1; i >= 1; i--) { rtx dest = SET_DEST (XVECEXP (x, 0, i)); if (!REG_P (dest) && GET_CODE (dest) != CC0 && GET_CODE (dest) != PC) { new = subst (dest, from, to, 0, unique_copy); /* If this substitution failed, this whole thing fails. */ if (GET_CODE (new) == CLOBBER && XEXP (new, 0) == const0_rtx) return new; SUBST (SET_DEST (XVECEXP (x, 0, i)), new); } } } else { len = GET_RTX_LENGTH (code); fmt = GET_RTX_FORMAT (code); /* We don't need to process a SET_DEST that is a register, CC0, or PC, so set up to skip this common case. All other cases where we want to suppress replacing something inside a SET_SRC are handled via the IN_DEST operand. */ if (code == SET && (REG_P (SET_DEST (x)) || GET_CODE (SET_DEST (x)) == CC0 || GET_CODE (SET_DEST (x)) == PC)) fmt = "ie"; /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a constant. */ if (fmt[0] == 'e') op0_mode = GET_MODE (XEXP (x, 0)); for (i = 0; i < len; i++) { if (fmt[i] == 'E') { int j; for (j = XVECLEN (x, i) - 1; j >= 0; j--) { if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from)) { new = (unique_copy && n_pseudo_occurrences ? copy_rtx (to) : to); n_pseudo_occurrences++; } else { new = subst (XVECEXP (x, i, j), from, to, 0, unique_copy); /* If this substitution failed, this whole thing fails. */ if (GET_CODE (new) == CLOBBER && XEXP (new, 0) == const0_rtx) return new; } SUBST (XVECEXP (x, i, j), new); } } else if (fmt[i] == 'e') { /* If this is a register being set, ignore it. */ new = XEXP (x, i); if (in_dest && (code == SUBREG || code == STRICT_LOW_PART || code == ZERO_EXTRACT) && i == 0 && REG_P (new)) ; else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from)) { /* In general, don't install a subreg involving two modes not tieable. It can worsen register allocation, and can even make invalid reload insns, since the reg inside may need to be copied from in the outside mode, and that may be invalid if it is an fp reg copied in integer mode. We allow two exceptions to this: It is valid if it is inside another SUBREG and the mode of that SUBREG and the mode of the inside of TO is tieable and it is valid if X is a SET that copies FROM to CC0. */ if (GET_CODE (to) == SUBREG && ! MODES_TIEABLE_P (GET_MODE (to), GET_MODE (SUBREG_REG (to))) && ! (code == SUBREG && MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (to)))) #ifdef HAVE_cc0 && ! (code == SET && i == 1 && XEXP (x, 0) == cc0_rtx) #endif ) return gen_rtx_CLOBBER (VOIDmode, const0_rtx); #ifdef CANNOT_CHANGE_MODE_CLASS if (code == SUBREG && REG_P (to) && REGNO (to) < FIRST_PSEUDO_REGISTER && REG_CANNOT_CHANGE_MODE_P (REGNO (to), GET_MODE (to), GET_MODE (x))) return gen_rtx_CLOBBER (VOIDmode, const0_rtx); #endif new = (unique_copy && n_pseudo_occurrences ? copy_rtx (to) : to); n_pseudo_occurrences++; } else /* If we are in a SET_DEST, suppress most cases unless we have gone inside a MEM, in which case we want to simplify the address. We assume here that things that are actually part of the destination have their inner parts in the first expression. This is true for SUBREG, STRICT_LOW_PART, and ZERO_EXTRACT, which are the only things aside from REG and MEM that should appear in a SET_DEST. */ new = subst (XEXP (x, i), from, to, (((in_dest && (code == SUBREG || code == STRICT_LOW_PART || code == ZERO_EXTRACT)) || code == SET) && i == 0), unique_copy); /* If we found that we will have to reject this combination, indicate that by returning the CLOBBER ourselves, rather than an expression containing it. This will speed things up as well as prevent accidents where two CLOBBERs are considered to be equal, thus producing an incorrect simplification. */ if (GET_CODE (new) == CLOBBER && XEXP (new, 0) == const0_rtx) return new; if (GET_CODE (x) == SUBREG && (GET_CODE (new) == CONST_INT || GET_CODE (new) == CONST_DOUBLE)) { enum machine_mode mode = GET_MODE (x); x = simplify_subreg (GET_MODE (x), new, GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x)); if (! x) x = gen_rtx_CLOBBER (mode, const0_rtx); } else if (GET_CODE (new) == CONST_INT && GET_CODE (x) == ZERO_EXTEND) { x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x), new, GET_MODE (XEXP (x, 0))); if (! x) abort (); } else SUBST (XEXP (x, i), new); } } } /* Try to simplify X. If the simplification changed the code, it is likely that further simplification will help, so loop, but limit the number of repetitions that will be performed. */ for (i = 0; i < 4; i++) { /* If X is sufficiently simple, don't bother trying to do anything with it. */ if (code != CONST_INT && code != REG && code != CLOBBER) x = combine_simplify_rtx (x, op0_mode, in_dest); if (GET_CODE (x) == code) break; code = GET_CODE (x); /* We no longer know the original mode of operand 0 since we have changed the form of X) */ op0_mode = VOIDmode; } return x; } /* Simplify X, a piece of RTL. We just operate on the expression at the outer level; call `subst' to simplify recursively. Return the new expression. OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero if we are inside a SET_DEST. */ static rtx combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest) { enum rtx_code code = GET_CODE (x); enum machine_mode mode = GET_MODE (x); rtx temp; rtx reversed; int i; /* If this is a commutative operation, put a constant last and a complex expression first. We don't need to do this for comparisons here. */ if (COMMUTATIVE_ARITH_P (x) && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1))) { temp = XEXP (x, 0); SUBST (XEXP (x, 0), XEXP (x, 1)); SUBST (XEXP (x, 1), temp); } /* If this is a PLUS, MINUS, or MULT, and the first operand is the sign extension of a PLUS with a constant, reverse the order of the sign extension and the addition. Note that this not the same as the original code, but overflow is undefined for signed values. Also note that the PLUS will have been partially moved "inside" the sign-extension, so that the first operand of X will really look like: (ashiftrt (plus (ashift A C4) C5) C4). We convert this to (plus (ashiftrt (ashift A C4) C2) C4) and replace the first operand of X with that expression. Later parts of this function may simplify the expression further. For example, if we start with (mult (sign_extend (plus A C1)) C2), we swap the SIGN_EXTEND and PLUS. Later code will apply the distributive law to produce (plus (mult (sign_extend X) C1) C3). We do this to simplify address expressions. */ if ((code == PLUS || code == MINUS || code == MULT) && GET_CODE (XEXP (x, 0)) == ASHIFTRT && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ASHIFT && GET_CODE (XEXP (XEXP (XEXP (XEXP (x, 0), 0), 0), 1)) == CONST_INT && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT && XEXP (XEXP (XEXP (XEXP (x, 0), 0), 0), 1) == XEXP (XEXP (x, 0), 1) && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT && (temp = simplify_binary_operation (ASHIFTRT, mode, XEXP (XEXP (XEXP (x, 0), 0), 1), XEXP (XEXP (x, 0), 1))) != 0) { rtx new = simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (XEXP (XEXP (XEXP (x, 0), 0), 0), 0), INTVAL (XEXP (XEXP (x, 0), 1))); new = simplify_shift_const (NULL_RTX, ASHIFTRT, mode, new, INTVAL (XEXP (XEXP (x, 0), 1))); SUBST (XEXP (x, 0), gen_binary (PLUS, mode, new, temp)); } /* If this is a simple operation applied to an IF_THEN_ELSE, try applying it to the arms of the IF_THEN_ELSE. This often simplifies things. Check for cases where both arms are testing the same condition. Don't do anything if all operands are very simple. */ if ((BINARY_P (x) && ((!OBJECT_P (XEXP (x, 0)) && ! (GET_CODE (XEXP (x, 0)) == SUBREG && OBJECT_P (SUBREG_REG (XEXP (x, 0))))) || (!OBJECT_P (XEXP (x, 1)) && ! (GET_CODE (XEXP (x, 1)) == SUBREG && OBJECT_P (SUBREG_REG (XEXP (x, 1))))))) || (UNARY_P (x) && (!OBJECT_P (XEXP (x, 0)) && ! (GET_CODE (XEXP (x, 0)) == SUBREG && OBJECT_P (SUBREG_REG (XEXP (x, 0))))))) { rtx cond, true_rtx, false_rtx; cond = if_then_else_cond (x, &true_rtx, &false_rtx); if (cond != 0 /* If everything is a comparison, what we have is highly unlikely to be simpler, so don't use it. */ && ! (COMPARISON_P (x) && (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx)))) { rtx cop1 = const0_rtx; enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1); if (cond_code == NE && COMPARISON_P (cond)) return x; /* Simplify the alternative arms; this may collapse the true and false arms to store-flag values. Be careful to use copy_rtx here since true_rtx or false_rtx might share RTL with x as a result of the if_then_else_cond call above. */ true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0); false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0); /* If true_rtx and false_rtx are not general_operands, an if_then_else is unlikely to be simpler. */ if (general_operand (true_rtx, VOIDmode) && general_operand (false_rtx, VOIDmode)) { enum rtx_code reversed; /* Restarting if we generate a store-flag expression will cause us to loop. Just drop through in this case. */ /* If the result values are STORE_FLAG_VALUE and zero, we can just make the comparison operation. */ if (true_rtx == const_true_rtx && false_rtx == const0_rtx) x = gen_binary (cond_code, mode, cond, cop1); else if (true_rtx == const0_rtx && false_rtx == const_true_rtx && ((reversed = reversed_comparison_code_parts (cond_code, cond, cop1, NULL)) != UNKNOWN)) x = gen_binary (reversed, mode, cond, cop1); /* Likewise, we can make the negate of a comparison operation if the result values are - STORE_FLAG_VALUE and zero. */ else if (GET_CODE (true_rtx) == CONST_INT && INTVAL (true_rtx) == - STORE_FLAG_VALUE && false_rtx == const0_rtx) x = simplify_gen_unary (NEG, mode, gen_binary (cond_code, mode, cond, cop1), mode); else if (GET_CODE (false_rtx) == CONST_INT && INTVAL (false_rtx) == - STORE_FLAG_VALUE && true_rtx == const0_rtx && ((reversed = reversed_comparison_code_parts (cond_code, cond, cop1, NULL)) != UNKNOWN)) x = simplify_gen_unary (NEG, mode, gen_binary (reversed, mode, cond, cop1), mode); else return gen_rtx_IF_THEN_ELSE (mode, gen_binary (cond_code, VOIDmode, cond, cop1), true_rtx, false_rtx); code = GET_CODE (x); op0_mode = VOIDmode; } } } /* Try to fold this expression in case we have constants that weren't present before. */ temp = 0; switch (GET_RTX_CLASS (code)) { case RTX_UNARY: if (op0_mode == VOIDmode) op0_mode = GET_MODE (XEXP (x, 0)); temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode); break; case RTX_COMPARE: case RTX_COMM_COMPARE: { enum machine_mode cmp_mode = GET_MODE (XEXP (x, 0)); if (cmp_mode == VOIDmode) { cmp_mode = GET_MODE (XEXP (x, 1)); if (cmp_mode == VOIDmode) cmp_mode = op0_mode; } temp = simplify_relational_operation (code, mode, cmp_mode, XEXP (x, 0), XEXP (x, 1)); } break; case RTX_COMM_ARITH: case RTX_BIN_ARITH: temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1)); break; case RTX_BITFIELD_OPS: case RTX_TERNARY: temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0), XEXP (x, 1), XEXP (x, 2)); break; default: break; } if (temp) { x = temp; code = GET_CODE (temp); op0_mode = VOIDmode; mode = GET_MODE (temp); } /* First see if we can apply the inverse distributive law. */ if (code == PLUS || code == MINUS || code == AND || code == IOR || code == XOR) { x = apply_distributive_law (x); code = GET_CODE (x); op0_mode = VOIDmode; } /* If CODE is an associative operation not otherwise handled, see if we can associate some operands. This can win if they are constants or if they are logically related (i.e. (a & b) & a). */ if ((code == PLUS || code == MINUS || code == MULT || code == DIV || code == AND || code == IOR || code == XOR || code == SMAX || code == SMIN || code == UMAX || code == UMIN) && ((INTEGRAL_MODE_P (mode) && code != DIV) || (flag_unsafe_math_optimizations && FLOAT_MODE_P (mode)))) { if (GET_CODE (XEXP (x, 0)) == code) { rtx other = XEXP (XEXP (x, 0), 0); rtx inner_op0 = XEXP (XEXP (x, 0), 1); rtx inner_op1 = XEXP (x, 1); rtx inner; /* Make sure we pass the constant operand if any as the second one if this is a commutative operation. */ if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x)) { rtx tem = inner_op0; inner_op0 = inner_op1; inner_op1 = tem; } inner = simplify_binary_operation (code == MINUS ? PLUS : code == DIV ? MULT : code, mode, inner_op0, inner_op1); /* For commutative operations, try the other pair if that one didn't simplify. */ if (inner == 0 && COMMUTATIVE_ARITH_P (x)) { other = XEXP (XEXP (x, 0), 1); inner = simplify_binary_operation (code, mode, XEXP (XEXP (x, 0), 0), XEXP (x, 1)); } if (inner) return gen_binary (code, mode, other, inner); } } /* A little bit of algebraic simplification here. */ switch (code) { case MEM: /* Ensure that our address has any ASHIFTs converted to MULT in case address-recognizing predicates are called later. */ temp = make_compound_operation (XEXP (x, 0), MEM); SUBST (XEXP (x, 0), temp); break; case SUBREG: if (op0_mode == VOIDmode) op0_mode = GET_MODE (SUBREG_REG (x)); /* See if this can be moved to simplify_subreg. */ if (CONSTANT_P (SUBREG_REG (x)) && subreg_lowpart_offset (mode, op0_mode) == SUBREG_BYTE (x) /* Don't call gen_lowpart if the inner mode is VOIDmode and we cannot simplify it, as SUBREG without inner mode is invalid. */ && (GET_MODE (SUBREG_REG (x)) != VOIDmode || gen_lowpart_common (mode, SUBREG_REG (x)))) return gen_lowpart (mode, SUBREG_REG (x)); if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC) break; { rtx temp; temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode, SUBREG_BYTE (x)); if (temp) return temp; } /* Don't change the mode of the MEM if that would change the meaning of the address. */ if (MEM_P (SUBREG_REG (x)) && (MEM_VOLATILE_P (SUBREG_REG (x)) || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0)))) return gen_rtx_CLOBBER (mode, const0_rtx); /* Note that we cannot do any narrowing for non-constants since we might have been counting on using the fact that some bits were zero. We now do this in the SET. */ break; case NOT: if (GET_CODE (XEXP (x, 0)) == SUBREG && subreg_lowpart_p (XEXP (x, 0)) && (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) < GET_MODE_SIZE (GET_MODE (SUBREG_REG (XEXP (x, 0))))) && GET_CODE (SUBREG_REG (XEXP (x, 0))) == ASHIFT && XEXP (SUBREG_REG (XEXP (x, 0)), 0) == const1_rtx) { enum machine_mode inner_mode = GET_MODE (SUBREG_REG (XEXP (x, 0))); x = gen_rtx_ROTATE (inner_mode, simplify_gen_unary (NOT, inner_mode, const1_rtx, inner_mode), XEXP (SUBREG_REG (XEXP (x, 0)), 1)); return gen_lowpart (mode, x); } /* Apply De Morgan's laws to reduce number of patterns for machines with negating logical insns (and-not, nand, etc.). If result has only one NOT, put it first, since that is how the patterns are coded. */ if (GET_CODE (XEXP (x, 0)) == IOR || GET_CODE (XEXP (x, 0)) == AND) { rtx in1 = XEXP (XEXP (x, 0), 0), in2 = XEXP (XEXP (x, 0), 1); enum machine_mode op_mode; op_mode = GET_MODE (in1); in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode); op_mode = GET_MODE (in2); if (op_mode == VOIDmode) op_mode = mode; in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode); if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT) { rtx tem = in2; in2 = in1; in1 = tem; } return gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)) == IOR ? AND : IOR, mode, in1, in2); } break; case NEG: /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */ if (GET_CODE (XEXP (x, 0)) == XOR && XEXP (XEXP (x, 0), 1) == const1_rtx && nonzero_bits (XEXP (XEXP (x, 0), 0), mode) == 1) return gen_binary (PLUS, mode, XEXP (XEXP (x, 0), 0), constm1_rtx); temp = expand_compound_operation (XEXP (x, 0)); /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be replaced by (lshiftrt X C). This will convert (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */ if (GET_CODE (temp) == ASHIFTRT && GET_CODE (XEXP (temp, 1)) == CONST_INT && INTVAL (XEXP (temp, 1)) == GET_MODE_BITSIZE (mode) - 1) return simplify_shift_const (temp, LSHIFTRT, mode, XEXP (temp, 0), INTVAL (XEXP (temp, 1))); /* If X has only a single bit that might be nonzero, say, bit I, convert (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to (sign_extract X 1 Y). But only do this if TEMP isn't a register or a SUBREG of one since we'd be making the expression more complex if it was just a register. */ if (!REG_P (temp) && ! (GET_CODE (temp) == SUBREG && REG_P (SUBREG_REG (temp))) && (i = exact_log2 (nonzero_bits (temp, mode))) >= 0) { rtx temp1 = simplify_shift_const (NULL_RTX, ASHIFTRT, mode, simplify_shift_const (NULL_RTX, ASHIFT, mode, temp, GET_MODE_BITSIZE (mode) - 1 - i), GET_MODE_BITSIZE (mode) - 1 - i); /* If all we did was surround TEMP with the two shifts, we haven't improved anything, so don't use it. Otherwise, we are better off with TEMP1. */ if (GET_CODE (temp1) != ASHIFTRT || GET_CODE (XEXP (temp1, 0)) != ASHIFT || XEXP (XEXP (temp1, 0), 0) != temp) return temp1; } break; case TRUNCATE: /* We can't handle truncation to a partial integer mode here because we don't know the real bitsize of the partial integer mode. */ if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT) break; if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode), GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))))) SUBST (XEXP (x, 0), force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)), GET_MODE_MASK (mode), NULL_RTX, 0)); /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */ if ((GET_CODE (XEXP (x, 0)) == SIGN_EXTEND || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND) && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode) return XEXP (XEXP (x, 0), 0); /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is (OP:SI foo:SI) if OP is NEG or ABS. */ if ((GET_CODE (XEXP (x, 0)) == ABS || GET_CODE (XEXP (x, 0)) == NEG) && (GET_CODE (XEXP (XEXP (x, 0), 0)) == SIGN_EXTEND || GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND) && GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == mode) return simplify_gen_unary (GET_CODE (XEXP (x, 0)), mode, XEXP (XEXP (XEXP (x, 0), 0), 0), mode); /* (truncate:SI (subreg:DI (truncate:SI X) 0)) is (truncate:SI x). */ if (GET_CODE (XEXP (x, 0)) == SUBREG && GET_CODE (SUBREG_REG (XEXP (x, 0))) == TRUNCATE && subreg_lowpart_p (XEXP (x, 0))) return SUBREG_REG (XEXP (x, 0)); /* If we know that the value is already truncated, we can replace the TRUNCATE with a SUBREG if TRULY_NOOP_TRUNCATION is nonzero for the corresponding modes. But don't do this for an (LSHIFTRT (MULT ...)) since this will cause problems with the umulXi3_highpart patterns. */ if (TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode), GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))) && num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))) >= (unsigned int) (GET_MODE_BITSIZE (mode) + 1) && ! (GET_CODE (XEXP (x, 0)) == LSHIFTRT && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)) return gen_lowpart (mode, XEXP (x, 0)); /* A truncate of a comparison can be replaced with a subreg if STORE_FLAG_VALUE permits. This is like the previous test, but it works even if the comparison is done in a mode larger than HOST_BITS_PER_WIDE_INT. */ if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT && COMPARISON_P (XEXP (x, 0)) && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0) return gen_lowpart (mode, XEXP (x, 0)); /* Similarly, a truncate of a register whose value is a comparison can be replaced with a subreg if STORE_FLAG_VALUE permits. */ if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0 && (temp = get_last_value (XEXP (x, 0))) && COMPARISON_P (temp)) return gen_lowpart (mode, XEXP (x, 0)); break; case FLOAT_TRUNCATE: /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */ if (GET_CODE (XEXP (x, 0)) == FLOAT_EXTEND && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode) return XEXP (XEXP (x, 0), 0); /* (float_truncate:SF (float_truncate:DF foo:XF)) = (float_truncate:SF foo:XF). This may eliminate double rounding, so it is unsafe. (float_truncate:SF (float_extend:XF foo:DF)) = (float_truncate:SF foo:DF). (float_truncate:DF (float_extend:XF foo:SF)) = (float_extend:SF foo:DF). */ if ((GET_CODE (XEXP (x, 0)) == FLOAT_TRUNCATE && flag_unsafe_math_optimizations) || GET_CODE (XEXP (x, 0)) == FLOAT_EXTEND) return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (XEXP (x, 0), 0))) > GET_MODE_SIZE (mode) ? FLOAT_TRUNCATE : FLOAT_EXTEND, mode, XEXP (XEXP (x, 0), 0), mode); /* (float_truncate (float x)) is (float x) */ if (GET_CODE (XEXP (x, 0)) == FLOAT && (flag_unsafe_math_optimizations || ((unsigned)significand_size (GET_MODE (XEXP (x, 0))) >= (GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (x, 0), 0))) - num_sign_bit_copies (XEXP (XEXP (x, 0), 0), GET_MODE (XEXP (XEXP (x, 0), 0))))))) return simplify_gen_unary (FLOAT, mode, XEXP (XEXP (x, 0), 0), GET_MODE (XEXP (XEXP (x, 0), 0))); /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is (OP:SF foo:SF) if OP is NEG or ABS. */ if ((GET_CODE (XEXP (x, 0)) == ABS || GET_CODE (XEXP (x, 0)) == NEG) && GET_CODE (XEXP (XEXP (x, 0), 0)) == FLOAT_EXTEND && GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == mode) return simplify_gen_unary (GET_CODE (XEXP (x, 0)), mode, XEXP (XEXP (XEXP (x, 0), 0), 0), mode); /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0)) is (float_truncate:SF x). */ if (GET_CODE (XEXP (x, 0)) == SUBREG && subreg_lowpart_p (XEXP (x, 0)) && GET_CODE (SUBREG_REG (XEXP (x, 0))) == FLOAT_TRUNCATE) return SUBREG_REG (XEXP (x, 0)); break; case FLOAT_EXTEND: /* (float_extend (float_extend x)) is (float_extend x) (float_extend (float x)) is (float x) assuming that double rounding can't happen. */ if (GET_CODE (XEXP (x, 0)) == FLOAT_EXTEND || (GET_CODE (XEXP (x, 0)) == FLOAT && ((unsigned)significand_size (GET_MODE (XEXP (x, 0))) >= (GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (x, 0), 0))) - num_sign_bit_copies (XEXP (XEXP (x, 0), 0), GET_MODE (XEXP (XEXP (x, 0), 0))))))) return simplify_gen_unary (GET_CODE (XEXP (x, 0)), mode, XEXP (XEXP (x, 0), 0), GET_MODE (XEXP (XEXP (x, 0), 0))); break; #ifdef HAVE_cc0 case COMPARE: /* Convert (compare FOO (const_int 0)) to FOO unless we aren't using cc0, in which case we want to leave it as a COMPARE so we can distinguish it from a register-register-copy. */ if (XEXP (x, 1) == const0_rtx) return XEXP (x, 0); /* x - 0 is the same as x unless x's mode has signed zeros and allows rounding towards -infinity. Under those conditions, 0 - 0 is -0. */ if (!(HONOR_SIGNED_ZEROS (GET_MODE (XEXP (x, 0))) && HONOR_SIGN_DEPENDENT_ROUNDING (GET_MODE (XEXP (x, 0)))) && XEXP (x, 1) == CONST0_RTX (GET_MODE (XEXP (x, 0)))) return XEXP (x, 0); break; #endif case CONST: /* (const (const X)) can become (const X). Do it this way rather than returning the inner CONST since CONST can be shared with a REG_EQUAL note. */ if (GET_CODE (XEXP (x, 0)) == CONST) SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0)); break; #ifdef HAVE_lo_sum case LO_SUM: /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we can add in an offset. find_split_point will split this address up again if it doesn't match. */ if (GET_CODE (XEXP (x, 0)) == HIGH && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1))) return XEXP (x, 1); break; #endif case PLUS: /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */ if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (XEXP (x, 0), 0)) == NEG) { rtx in1, in2; in1 = XEXP (XEXP (XEXP (x, 0), 0), 0); in2 = XEXP (XEXP (x, 0), 1); return gen_binary (MINUS, mode, XEXP (x, 1), gen_binary (MULT, mode, in1, in2)); } /* If we have (plus (plus (A const) B)), associate it so that CONST is outermost. That's because that's the way indexed addresses are supposed to appear. This code used to check many more cases, but they are now checked elsewhere. */ if (GET_CODE (XEXP (x, 0)) == PLUS && CONSTANT_ADDRESS_P (XEXP (XEXP (x, 0), 1))) return gen_binary (PLUS, mode, gen_binary (PLUS, mode, XEXP (XEXP (x, 0), 0), XEXP (x, 1)), XEXP (XEXP (x, 0), 1)); /* (plus (xor (and (const_int pow2 - 1)) ) <-c>) when c is (const_int (pow2 + 1) / 2) is a sign extension of a bit-field and can be replaced by either a sign_extend or a sign_extract. The `and' may be a zero_extend and the two , - constants may be reversed. */ if (GET_CODE (XEXP (x, 0)) == XOR && GET_CODE (XEXP (x, 1)) == CONST_INT && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1)) && ((i = exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1)))) >= 0 || (i = exact_log2 (INTVAL (XEXP (x, 1)))) >= 0) && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT && (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)) == ((HOST_WIDE_INT) 1 << (i + 1)) - 1)) || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND && (GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0))) == (unsigned int) i + 1)))) return simplify_shift_const (NULL_RTX, ASHIFTRT, mode, simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (XEXP (XEXP (x, 0), 0), 0), GET_MODE_BITSIZE (mode) - (i + 1)), GET_MODE_BITSIZE (mode) - (i + 1)); /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE is 1. This produces better code than the alternative immediately below. */ if (COMPARISON_P (XEXP (x, 0)) && ((STORE_FLAG_VALUE == -1 && XEXP (x, 1) == const1_rtx) || (STORE_FLAG_VALUE == 1 && XEXP (x, 1) == constm1_rtx)) && (reversed = reversed_comparison (XEXP (x, 0), mode, XEXP (XEXP (x, 0), 0), XEXP (XEXP (x, 0), 1)))) return simplify_gen_unary (NEG, mode, reversed, mode); /* If only the low-order bit of X is possibly nonzero, (plus x -1) can become (ashiftrt (ashift (xor x 1) C) C) where C is the bitsize of the mode - 1. This allows simplification of "a = (b & 8) == 0;" */ if (XEXP (x, 1) == constm1_rtx && !REG_P (XEXP (x, 0)) && ! (GET_CODE (XEXP (x, 0)) == SUBREG && REG_P (SUBREG_REG (XEXP (x, 0)))) && nonzero_bits (XEXP (x, 0), mode) == 1) return simplify_shift_const (NULL_RTX, ASHIFTRT, mode, simplify_shift_const (NULL_RTX, ASHIFT, mode, gen_rtx_XOR (mode, XEXP (x, 0), const1_rtx), GET_MODE_BITSIZE (mode) - 1), GET_MODE_BITSIZE (mode) - 1); /* If we are adding two things that have no bits in common, convert the addition into an IOR. This will often be further simplified, for example in cases like ((a & 1) + (a & 2)), which can become a & 3. */ if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT && (nonzero_bits (XEXP (x, 0), mode) & nonzero_bits (XEXP (x, 1), mode)) == 0) { /* Try to simplify the expression further. */ rtx tor = gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1)); temp = combine_simplify_rtx (tor, mode, in_dest); /* If we could, great. If not, do not go ahead with the IOR replacement, since PLUS appears in many special purpose address arithmetic instructions. */ if (GET_CODE (temp) != CLOBBER && temp != tor) return temp; } break; case MINUS: /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done by reversing the comparison code if valid. */ if (STORE_FLAG_VALUE == 1 && XEXP (x, 0) == const1_rtx && COMPARISON_P (XEXP (x, 1)) && (reversed = reversed_comparison (XEXP (x, 1), mode, XEXP (XEXP (x, 1), 0), XEXP (XEXP (x, 1), 1)))) return reversed; /* (minus (and (const_int -pow2))) becomes (and (const_int pow2-1)) */ if (GET_CODE (XEXP (x, 1)) == AND && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT && exact_log2 (-INTVAL (XEXP (XEXP (x, 1), 1))) >= 0 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0))) return simplify_and_const_int (NULL_RTX, mode, XEXP (x, 0), -INTVAL (XEXP (XEXP (x, 1), 1)) - 1); /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */ if (GET_CODE (XEXP (x, 1)) == MULT && GET_CODE (XEXP (XEXP (x, 1), 0)) == NEG) { rtx in1, in2; in1 = XEXP (XEXP (XEXP (x, 1), 0), 0); in2 = XEXP (XEXP (x, 1), 1); return gen_binary (PLUS, mode, gen_binary (MULT, mode, in1, in2), XEXP (x, 0)); } /* Canonicalize (minus (neg A) (mult B C)) to (minus (mult (neg B) C) A). */ if (GET_CODE (XEXP (x, 1)) == MULT && GET_CODE (XEXP (x, 0)) == NEG) { rtx in1, in2; in1 = simplify_gen_unary (NEG, mode, XEXP (XEXP (x, 1), 0), mode); in2 = XEXP (XEXP (x, 1), 1); return gen_binary (MINUS, mode, gen_binary (MULT, mode, in1, in2), XEXP (XEXP (x, 0), 0)); } /* Canonicalize (minus A (plus B C)) to (minus (minus A B) C) for integers. */ if (GET_CODE (XEXP (x, 1)) == PLUS && INTEGRAL_MODE_P (mode)) return gen_binary (MINUS, mode, gen_binary (MINUS, mode, XEXP (x, 0), XEXP (XEXP (x, 1), 0)), XEXP (XEXP (x, 1), 1)); break; case MULT: /* If we have (mult (plus A B) C), apply the distributive law and then the inverse distributive law to see if things simplify. This occurs mostly in addresses, often when unrolling loops. */ if (GET_CODE (XEXP (x, 0)) == PLUS) { x = apply_distributive_law (gen_binary (PLUS, mode, gen_binary (MULT, mode, XEXP (XEXP (x, 0), 0), XEXP (x, 1)), gen_binary (MULT, mode, XEXP (XEXP (x, 0), 1), copy_rtx (XEXP (x, 1))))); if (GET_CODE (x) != MULT) return x; } /* Try simplify a*(b/c) as (a*b)/c. */ if (FLOAT_MODE_P (mode) && flag_unsafe_math_optimizations && GET_CODE (XEXP (x, 0)) == DIV) { rtx tem = simplify_binary_operation (MULT, mode, XEXP (XEXP (x, 0), 0), XEXP (x, 1)); if (tem) return gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1)); } break; case UDIV: /* If this is a divide by a power of two, treat it as a shift if its first operand is a shift. */ if (GET_CODE (XEXP (x, 1)) == CONST_INT && (i = exact_log2 (INTVAL (XEXP (x, 1)))) >= 0 && (GET_CODE (XEXP (x, 0)) == ASHIFT || GET_CODE (XEXP (x, 0)) == LSHIFTRT || GET_CODE (XEXP (x, 0)) == ASHIFTRT || GET_CODE (XEXP (x, 0)) == ROTATE || GET_CODE (XEXP (x, 0)) == ROTATERT)) return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (x, 0), i); break; case EQ: case NE: case GT: case GTU: case GE: case GEU: case LT: case LTU: case LE: case LEU: case UNEQ: case LTGT: case UNGT: case UNGE: case UNLT: case UNLE: case UNORDERED: case ORDERED: /* If the first operand is a condition code, we can't do anything with it. */ if (GET_CODE (XEXP (x, 0)) == COMPARE || (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC && ! CC0_P (XEXP (x, 0)))) { rtx op0 = XEXP (x, 0); rtx op1 = XEXP (x, 1); enum rtx_code new_code; if (GET_CODE (op0) == COMPARE) op1 = XEXP (op0, 1), op0 = XEXP (op0, 0); /* Simplify our comparison, if possible. */ new_code = simplify_comparison (code, &op0, &op1); /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X if only the low-order bit is possibly nonzero in X (such as when X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to (xor X 1) or (minus 1 X); we use the former. Finally, if X is known to be either 0 or -1, NE becomes a NEG and EQ becomes (plus X 1). Remove any ZERO_EXTRACT we made when thinking this was a comparison. It may now be simpler to use, e.g., an AND. If a ZERO_EXTRACT is indeed appropriate, it will be placed back by the call to make_compound_operation in the SET case. */ if (STORE_FLAG_VALUE == 1 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT && op1 == const0_rtx && mode == GET_MODE (op0) && nonzero_bits (op0, mode) == 1) return gen_lowpart (mode, expand_compound_operation (op0)); else if (STORE_FLAG_VALUE == 1 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT && op1 == const0_rtx && mode == GET_MODE (op0) && (num_sign_bit_copies (op0, mode) == GET_MODE_BITSIZE (mode))) { op0 = expand_compound_operation (op0); return simplify_gen_unary (NEG, mode, gen_lowpart (mode, op0), mode); } else if (STORE_FLAG_VALUE == 1 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT && op1 == const0_rtx && mode == GET_MODE (op0) && nonzero_bits (op0, mode) == 1) { op0 = expand_compound_operation (op0); return gen_binary (XOR, mode, gen_lowpart (mode, op0), const1_rtx); } else if (STORE_FLAG_VALUE == 1 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT && op1 == const0_rtx && mode == GET_MODE (op0) && (num_sign_bit_copies (op0, mode) == GET_MODE_BITSIZE (mode))) { op0 = expand_compound_operation (op0); return plus_constant (gen_lowpart (mode, op0), 1); } /* If STORE_FLAG_VALUE is -1, we have cases similar to those above. */ if (STORE_FLAG_VALUE == -1 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT && op1 == const0_rtx && (num_sign_bit_copies (op0, mode) == GET_MODE_BITSIZE (mode))) return gen_lowpart (mode, expand_compound_operation (op0)); else if (STORE_FLAG_VALUE == -1 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT && op1 == const0_rtx && mode == GET_MODE (op0) && nonzero_bits (op0, mode) == 1) { op0 = expand_compound_operation (op0); return simplify_gen_unary (NEG, mode, gen_lowpart (mode, op0), mode); } else if (STORE_FLAG_VALUE == -1 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT && op1 == const0_rtx && mode == GET_MODE (op0) && (num_sign_bit_copies (op0, mode) == GET_MODE_BITSIZE (mode))) { op0 = expand_compound_operation (op0); return simplify_gen_unary (NOT, mode, gen_lowpart (mode, op0), mode); } /* If X is 0/1, (eq X 0) is X-1. */ else if (STORE_FLAG_VALUE == -1 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT && op1 == const0_rtx && mode == GET_MODE (op0) && nonzero_bits (op0, mode) == 1) { op0 = expand_compound_operation (op0); return plus_constant (gen_lowpart (mode, op0), -1); } /* If STORE_FLAG_VALUE says to just test the sign bit and X has just one bit that might be nonzero, we can convert (ne x 0) to (ashift x c) where C puts the bit in the sign bit. Remove any AND with STORE_FLAG_VALUE when we are done, since we are only going to test the sign bit. */ if (new_code == NE && GET_MODE_CLASS (mode) == MODE_INT && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode)) == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)) && op1 == const0_rtx && mode == GET_MODE (op0) && (i = exact_log2 (nonzero_bits (op0, mode))) >= 0) { x = simplify_shift_const (NULL_RTX, ASHIFT, mode, expand_compound_operation (op0), GET_MODE_BITSIZE (mode) - 1 - i); if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx) return XEXP (x, 0); else return x; } /* If the code changed, return a whole new comparison. */ if (new_code != code) return gen_rtx_fmt_ee (new_code, mode, op0, op1); /* Otherwise, keep this operation, but maybe change its operands. This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */ SUBST (XEXP (x, 0), op0); SUBST (XEXP (x, 1), op1); } break; case IF_THEN_ELSE: return simplify_if_then_else (x); case ZERO_EXTRACT: case SIGN_EXTRACT: case ZERO_EXTEND: case SIGN_EXTEND: /* If we are processing SET_DEST, we are done. */ if (in_dest) return x; return expand_compound_operation (x); case SET: return simplify_set (x); case AND: case IOR: case XOR: return simplify_logical (x); case ABS: /* (abs (neg )) -> (abs ) */ if (GET_CODE (XEXP (x, 0)) == NEG) SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0)); /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS), do nothing. */ if (GET_MODE (XEXP (x, 0)) == VOIDmode) break; /* If operand is something known to be positive, ignore the ABS. */ if (GET_CODE (XEXP (x, 0)) == FFS || GET_CODE (XEXP (x, 0)) == ABS || ((GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) <= HOST_BITS_PER_WIDE_INT) && ((nonzero_bits (XEXP (x, 0), GET_MODE (XEXP (x, 0))) & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - 1))) == 0))) return XEXP (x, 0); /* If operand is known to be only -1 or 0, convert ABS to NEG. */ if (num_sign_bit_copies (XEXP (x, 0), mode) == GET_MODE_BITSIZE (mode)) return gen_rtx_NEG (mode, XEXP (x, 0)); break; case FFS: /* (ffs (*_extend )) = (ffs ) */ if (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND) SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0)); break; case POPCOUNT: case PARITY: /* (pop* (zero_extend )) = (pop* ) */ if (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND) SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0)); break; case FLOAT: /* (float (sign_extend )) = (float ). */ if (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND) SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0)); break; case ASHIFT: case LSHIFTRT: case ASHIFTRT: case ROTATE: case ROTATERT: /* If this is a shift by a constant amount, simplify it. */ if (GET_CODE (XEXP (x, 1)) == CONST_INT) return simplify_shift_const (x, code, mode, XEXP (x, 0), INTVAL (XEXP (x, 1))); else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1))) SUBST (XEXP (x, 1), force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)), ((HOST_WIDE_INT) 1 << exact_log2 (GET_MODE_BITSIZE (GET_MODE (x)))) - 1, NULL_RTX, 0)); break; case VEC_SELECT: { rtx op0 = XEXP (x, 0); rtx op1 = XEXP (x, 1); int len; if (GET_CODE (op1) != PARALLEL) abort (); len = XVECLEN (op1, 0); if (len == 1 && GET_CODE (XVECEXP (op1, 0, 0)) == CONST_INT && GET_CODE (op0) == VEC_CONCAT) { int offset = INTVAL (XVECEXP (op1, 0, 0)) * GET_MODE_SIZE (GET_MODE (x)); /* Try to find the element in the VEC_CONCAT. */ for (;;) { if (GET_MODE (op0) == GET_MODE (x)) return op0; if (GET_CODE (op0) == VEC_CONCAT) { HOST_WIDE_INT op0_size = GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))); if (op0_size < offset) op0 = XEXP (op0, 0); else { offset -= op0_size; op0 = XEXP (op0, 1); } } else break; } } } break; default: break; } return x; } /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */ static rtx simplify_if_then_else (rtx x) { enum machine_mode mode = GET_MODE (x); rtx cond = XEXP (x, 0); rtx true_rtx = XEXP (x, 1); rtx false_rtx = XEXP (x, 2); enum rtx_code true_code = GET_CODE (cond); int comparison_p = COMPARISON_P (cond); rtx temp; int i; enum rtx_code false_code; rtx reversed; /* Simplify storing of the truth value. */ if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx) return gen_binary (true_code, mode, XEXP (cond, 0), XEXP (cond, 1)); /* Also when the truth value has to be reversed. */ if (comparison_p && true_rtx == const0_rtx && false_rtx == const_true_rtx && (reversed = reversed_comparison (cond, mode, XEXP (cond, 0), XEXP (cond, 1)))) return reversed; /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used in it is being compared against certain values. Get the true and false comparisons and see if that says anything about the value of each arm. */ if (comparison_p && ((false_code = combine_reversed_comparison_code (cond)) != UNKNOWN) && REG_P (XEXP (cond, 0))) { HOST_WIDE_INT nzb; rtx from = XEXP (cond, 0); rtx true_val = XEXP (cond, 1); rtx false_val = true_val; int swapped = 0; /* If FALSE_CODE is EQ, swap the codes and arms. */ if (false_code == EQ) { swapped = 1, true_code = EQ, false_code = NE; temp = true_rtx, true_rtx = false_rtx, false_rtx = temp; } /* If we are comparing against zero and the expression being tested has only a single bit that might be nonzero, that is its value when it is not equal to zero. Similarly if it is known to be -1 or 0. */ if (true_code == EQ && true_val == const0_rtx && exact_log2 (nzb = nonzero_bits (from, GET_MODE (from))) >= 0) false_code = EQ, false_val = GEN_INT (nzb); else if (true_code == EQ && true_val == const0_rtx && (num_sign_bit_copies (from, GET_MODE (from)) == GET_MODE_BITSIZE (GET_MODE (from)))) false_code = EQ, false_val = constm1_rtx; /* Now simplify an arm if we know the value of the register in the branch and it is used in the arm. Be careful due to the potential of locally-shared RTL. */ if (reg_mentioned_p (from, true_rtx)) true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code, from, true_val), pc_rtx, pc_rtx, 0, 0); if (reg_mentioned_p (from, false_rtx)) false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code, from, false_val), pc_rtx, pc_rtx, 0, 0); SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx); SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx); true_rtx = XEXP (x, 1); false_rtx = XEXP (x, 2); true_code = GET_CODE (cond); } /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be reversed, do so to avoid needing two sets of patterns for subtract-and-branch insns. Similarly if we have a constant in the true arm, the false arm is the same as the first operand of the comparison, or the false arm is more complicated than the true arm. */ if (comparison_p && combine_reversed_comparison_code (cond) != UNKNOWN && (true_rtx == pc_rtx || (CONSTANT_P (true_rtx) && GET_CODE (false_rtx) != CONST_INT && false_rtx != pc_rtx) || true_rtx == const0_rtx || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx)) || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx)) && !OBJECT_P (false_rtx)) || reg_mentioned_p (true_rtx, false_rtx) || rtx_equal_p (false_rtx, XEXP (cond, 0)))) { true_code = reversed_comparison_code (cond, NULL); SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond), XEXP (cond, 0), XEXP (cond, 1))); SUBST (XEXP (x, 1), false_rtx); SUBST (XEXP (x, 2), true_rtx); temp = true_rtx, true_rtx = false_rtx, false_rtx = temp; cond = XEXP (x, 0); /* It is possible that the conditional has been simplified out. */ true_code = GET_CODE (cond); comparison_p = COMPARISON_P (cond); } /* If the two arms are identical, we don't need the comparison. */ if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond)) return true_rtx; /* Convert a == b ? b : a to "a". */ if (true_code == EQ && ! side_effects_p (cond) && !HONOR_NANS (mode) && rtx_equal_p (XEXP (cond, 0), false_rtx) && rtx_equal_p (XEXP (cond, 1), true_rtx)) return false_rtx; else if (true_code == NE && ! side_effects_p (cond) && !HONOR_NANS (mode) && rtx_equal_p (XEXP (cond, 0), true_rtx) && rtx_equal_p (XEXP (cond, 1), false_rtx)) return true_rtx; /* Look for cases where we have (abs x) or (neg (abs X)). */ if (GET_MODE_CLASS (mode) == MODE_INT && GET_CODE (false_rtx) == NEG && rtx_equal_p (true_rtx, XEXP (false_rtx, 0)) && comparison_p && rtx_equal_p (true_rtx, XEXP (cond, 0)) && ! side_effects_p (true_rtx)) switch (true_code) { case GT: case GE: return simplify_gen_unary (ABS, mode, true_rtx, mode); case LT: case LE: return simplify_gen_unary (NEG, mode, simplify_gen_unary (ABS, mode, true_rtx, mode), mode); default: break; } /* Look for MIN or MAX. */ if ((! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations) && comparison_p && rtx_equal_p (XEXP (cond, 0), true_rtx) && rtx_equal_p (XEXP (cond, 1), false_rtx) && ! side_effects_p (cond)) switch (true_code) { case GE: case GT: return gen_binary (SMAX, mode, true_rtx, false_rtx); case LE: case LT: return gen_binary (SMIN, mode, true_rtx, false_rtx); case GEU: case GTU: return gen_binary (UMAX, mode, true_rtx, false_rtx); case LEU: case LTU: return gen_binary (UMIN, mode, true_rtx, false_rtx); default: break; } /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its second operand is zero, this can be done as (OP Z (mult COND C2)) where C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or SIGN_EXTEND as long as Z is already extended (so we don't destroy it). We can do this kind of thing in some cases when STORE_FLAG_VALUE is neither 1 or -1, but it isn't worth checking for. */ if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1) && comparison_p && GET_MODE_CLASS (mode) == MODE_INT && ! side_effects_p (x)) { rtx t = make_compound_operation (true_rtx, SET); rtx f = make_compound_operation (false_rtx, SET); rtx cond_op0 = XEXP (cond, 0); rtx cond_op1 = XEXP (cond, 1); enum rtx_code op = NIL, extend_op = NIL; enum machine_mode m = mode; rtx z = 0, c1 = NULL_RTX; if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS || GET_CODE (t) == IOR || GET_CODE (t) == XOR || GET_CODE (t) == ASHIFT || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT) && rtx_equal_p (XEXP (t, 0), f)) c1 = XEXP (t, 1), op = GET_CODE (t), z = f; /* If an identity-zero op is commutative, check whether there would be a match if we swapped the operands. */ else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR || GET_CODE (t) == XOR) && rtx_equal_p (XEXP (t, 1), f)) c1 = XEXP (t, 0), op = GET_CODE (t), z = f; else if (GET_CODE (t) == SIGN_EXTEND && (GET_CODE (XEXP (t, 0)) == PLUS || GET_CODE (XEXP (t, 0)) == MINUS || GET_CODE (XEXP (t, 0)) == IOR || GET_CODE (XEXP (t, 0)) == XOR || GET_CODE (XEXP (t, 0)) == ASHIFT || GET_CODE (XEXP (t, 0)) == LSHIFTRT || GET_CODE (XEXP (t, 0)) == ASHIFTRT) && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG && subreg_lowpart_p (XEXP (XEXP (t, 0), 0)) && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f) && (num_sign_bit_copies (f, GET_MODE (f)) > (unsigned int) (GET_MODE_BITSIZE (mode) - GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (t, 0), 0)))))) { c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0)); extend_op = SIGN_EXTEND; m = GET_MODE (XEXP (t, 0)); } else if (GET_CODE (t) == SIGN_EXTEND && (GET_CODE (XEXP (t, 0)) == PLUS || GET_CODE (XEXP (t, 0)) == IOR || GET_CODE (XEXP (t, 0)) == XOR) && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG && subreg_lowpart_p (XEXP (XEXP (t, 0), 1)) && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f) && (num_sign_bit_copies (f, GET_MODE (f)) > (unsigned int) (GET_MODE_BITSIZE (mode) - GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (t, 0), 1)))))) { c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0)); extend_op = SIGN_EXTEND; m = GET_MODE (XEXP (t, 0)); } else if (GET_CODE (t) == ZERO_EXTEND && (GET_CODE (XEXP (t, 0)) == PLUS || GET_CODE (XEXP (t, 0)) == MINUS || GET_CODE (XEXP (t, 0)) == IOR || GET_CODE (XEXP (t, 0)) == XOR || GET_CODE (XEXP (t, 0)) == ASHIFT || GET_CODE (XEXP (t, 0)) == LSHIFTRT || GET_CODE (XEXP (t, 0)) == ASHIFTRT) && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT && subreg_lowpart_p (XEXP (XEXP (t, 0), 0)) && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f) && ((nonzero_bits (f, GET_MODE (f)) & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 0)))) == 0)) { c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0)); extend_op = ZERO_EXTEND; m = GET_MODE (XEXP (t, 0)); } else if (GET_CODE (t) == ZERO_EXTEND && (GET_CODE (XEXP (t, 0)) == PLUS || GET_CODE (XEXP (t, 0)) == IOR || GET_CODE (XEXP (t, 0)) == XOR) && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT && subreg_lowpart_p (XEXP (XEXP (t, 0), 1)) && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f) && ((nonzero_bits (f, GET_MODE (f)) & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 1)))) == 0)) { c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0)); extend_op = ZERO_EXTEND; m = GET_MODE (XEXP (t, 0)); } if (z) { temp = subst (gen_binary (true_code, m, cond_op0, cond_op1), pc_rtx, pc_rtx, 0, 0); temp = gen_binary (MULT, m, temp, gen_binary (MULT, m, c1, const_true_rtx)); temp = subst (temp, pc_rtx, pc_rtx, 0, 0); temp = gen_binary (op, m, gen_lowpart (m, z), temp); if (extend_op != NIL) temp = simplify_gen_unary (extend_op, mode, temp, m); return temp; } } /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the negation of a single bit, we can convert this operation to a shift. We can actually do this more generally, but it doesn't seem worth it. */ if (true_code == NE && XEXP (cond, 1) == const0_rtx && false_rtx == const0_rtx && GET_CODE (true_rtx) == CONST_INT && ((1 == nonzero_bits (XEXP (cond, 0), mode) && (i = exact_log2 (INTVAL (true_rtx))) >= 0) || ((num_sign_bit_copies (XEXP (cond, 0), mode) == GET_MODE_BITSIZE (mode)) && (i = exact_log2 (-INTVAL (true_rtx))) >= 0))) return simplify_shift_const (NULL_RTX, ASHIFT, mode, gen_lowpart (mode, XEXP (cond, 0)), i); /* (IF_THEN_ELSE (NE REG 0) (0) (8)) is REG for nonzero_bits (REG) == 8. */ if (true_code == NE && XEXP (cond, 1) == const0_rtx && false_rtx == const0_rtx && GET_CODE (true_rtx) == CONST_INT && GET_MODE (XEXP (cond, 0)) == mode && (INTVAL (true_rtx) & GET_MODE_MASK (mode)) == nonzero_bits (XEXP (cond, 0), mode) && (i = exact_log2 (INTVAL (true_rtx) & GET_MODE_MASK (mode))) >= 0) return XEXP (cond, 0); return x; } /* Simplify X, a SET expression. Return the new expression. */ static rtx simplify_set (rtx x) { rtx src = SET_SRC (x); rtx dest = SET_DEST (x); enum machine_mode mode = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest); rtx other_insn; rtx *cc_use; /* (set (pc) (return)) gets written as (return). */ if (GET_CODE (dest) == PC && GET_CODE (src) == RETURN) return src; /* Now that we know for sure which bits of SRC we are using, see if we can simplify the expression for the object knowing that we only need the low-order bits. */ if (GET_MODE_CLASS (mode) == MODE_INT && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT) { src = force_to_mode (src, mode, ~(HOST_WIDE_INT) 0, NULL_RTX, 0); SUBST (SET_SRC (x), src); } /* If we are setting CC0 or if the source is a COMPARE, look for the use of the comparison result and try to simplify it unless we already have used undobuf.other_insn. */ if ((GET_MODE_CLASS (mode) == MODE_CC || GET_CODE (src) == COMPARE || CC0_P (dest)) && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0 && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn) && COMPARISON_P (*cc_use) && rtx_equal_p (XEXP (*cc_use, 0), dest)) { enum rtx_code old_code = GET_CODE (*cc_use); enum rtx_code new_code; rtx op0, op1, tmp; int other_changed = 0; enum machine_mode compare_mode = GET_MODE (dest); if (GET_CODE (src) == COMPARE) op0 = XEXP (src, 0), op1 = XEXP (src, 1); else op0 = src, op1 = const0_rtx; tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode, op0, op1); if (!tmp) new_code = old_code; else if (!CONSTANT_P (tmp)) { new_code = GET_CODE (tmp); op0 = XEXP (tmp, 0); op1 = XEXP (tmp, 1); } else { rtx pat = PATTERN (other_insn); undobuf.other_insn = other_insn; SUBST (*cc_use, tmp); /* Attempt to simplify CC user. */ if (GET_CODE (pat) == SET) { rtx new = simplify_rtx (SET_SRC (pat)); if (new != NULL_RTX) SUBST (SET_SRC (pat), new); } /* Convert X into a no-op move. */ SUBST (SET_DEST (x), pc_rtx); SUBST (SET_SRC (x), pc_rtx); return x; } /* Simplify our comparison, if possible. */ new_code = simplify_comparison (new_code, &op0, &op1); #ifdef SELECT_CC_MODE /* If this machine has CC modes other than CCmode, check to see if we need to use a different CC mode here. */ if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC) compare_mode = GET_MODE (op0); else compare_mode = SELECT_CC_MODE (new_code, op0, op1); #ifndef HAVE_cc0 /* If the mode changed, we have to change SET_DEST, the mode in the compare, and the mode in the place SET_DEST is used. If SET_DEST is a hard register, just build new versions with the proper mode. If it is a pseudo, we lose unless it is only time we set the pseudo, in which case we can safely change its mode. */ if (compare_mode != GET_MODE (dest)) { unsigned int regno = REGNO (dest); rtx new_dest = gen_rtx_REG (compare_mode, regno); if (regno < FIRST_PSEUDO_REGISTER || (REG_N_SETS (regno) == 1 && ! REG_USERVAR_P (dest))) { if (regno >= FIRST_PSEUDO_REGISTER) SUBST (regno_reg_rtx[regno], new_dest); SUBST (SET_DEST (x), new_dest); SUBST (XEXP (*cc_use, 0), new_dest); other_changed = 1; dest = new_dest; } } #endif /* cc0 */ #endif /* SELECT_CC_MODE */ /* If the code changed, we have to build a new comparison in undobuf.other_insn. */ if (new_code != old_code) { int other_changed_previously = other_changed; unsigned HOST_WIDE_INT mask; SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use), dest, const0_rtx)); other_changed = 1; /* If the only change we made was to change an EQ into an NE or vice versa, OP0 has only one bit that might be nonzero, and OP1 is zero, check if changing the user of the condition code will produce a valid insn. If it won't, we can keep the original code in that insn by surrounding our operation with an XOR. */ if (((old_code == NE && new_code == EQ) || (old_code == EQ && new_code == NE)) && ! other_changed_previously && op1 == const0_rtx && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT && exact_log2 (mask = nonzero_bits (op0, GET_MODE (op0))) >= 0) { rtx pat = PATTERN (other_insn), note = 0; if ((recog_for_combine (&pat, other_insn, ¬e) < 0 && ! check_asm_operands (pat))) { PUT_CODE (*cc_use, old_code); other_changed = 0; op0 = gen_binary (XOR, GET_MODE (op0), op0, GEN_INT (mask)); } } } if (other_changed) undobuf.other_insn = other_insn; #ifdef HAVE_cc0 /* If we are now comparing against zero, change our source if needed. If we do not use cc0, we always have a COMPARE. */ if (op1 == const0_rtx && dest == cc0_rtx) { SUBST (SET_SRC (x), op0); src = op0; } else #endif /* Otherwise, if we didn't previously have a COMPARE in the correct mode, we need one. */ if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode) { SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1)); src = SET_SRC (x); } else { /* Otherwise, update the COMPARE if needed. */ SUBST (XEXP (src, 0), op0); SUBST (XEXP (src, 1), op1); } } else { /* Get SET_SRC in a form where we have placed back any compound expressions. Then do the checks below. */ src = make_compound_operation (src, SET); SUBST (SET_SRC (x), src); } /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation, and X being a REG or (subreg (reg)), we may be able to convert this to (set (subreg:m2 x) (op)). We can always do this if M1 is narrower than M2 because that means that we only care about the low bits of the result. However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot perform a narrower operation than requested since the high-order bits will be undefined. On machine where it is defined, this transformation is safe as long as M1 and M2 have the same number of words. */ if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src) && !OBJECT_P (SUBREG_REG (src)) && (((GET_MODE_SIZE (GET_MODE (src)) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD) == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)) #ifndef WORD_REGISTER_OPERATIONS && (GET_MODE_SIZE (GET_MODE (src)) < GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))) #endif #ifdef CANNOT_CHANGE_MODE_CLASS && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER && REG_CANNOT_CHANGE_MODE_P (REGNO (dest), GET_MODE (SUBREG_REG (src)), GET_MODE (src))) #endif && (REG_P (dest) || (GET_CODE (dest) == SUBREG && REG_P (SUBREG_REG (dest))))) { SUBST (SET_DEST (x), gen_lowpart (GET_MODE (SUBREG_REG (src)), dest)); SUBST (SET_SRC (x), SUBREG_REG (src)); src = SET_SRC (x), dest = SET_DEST (x); } #ifdef HAVE_cc0 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg in SRC. */ if (dest == cc0_rtx && GET_CODE (src) == SUBREG && subreg_lowpart_p (src) && (GET_MODE_BITSIZE (GET_MODE (src)) < GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (src))))) { rtx inner = SUBREG_REG (src); enum machine_mode inner_mode = GET_MODE (inner); /* Here we make sure that we don't have a sign bit on. */ if (GET_MODE_BITSIZE (inner_mode) <= HOST_BITS_PER_WIDE_INT && (nonzero_bits (inner, inner_mode) < ((unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (GET_MODE (src)) - 1)))) { SUBST (SET_SRC (x), inner); src = SET_SRC (x); } } #endif #ifdef LOAD_EXTEND_OP /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this would require a paradoxical subreg. Replace the subreg with a zero_extend to avoid the reload that would otherwise be required. */ if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src) && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))) != NIL && SUBREG_BYTE (src) == 0 && (GET_MODE_SIZE (GET_MODE (src)) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))) && MEM_P (SUBREG_REG (src))) { SUBST (SET_SRC (x), gen_rtx_fmt_e (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))), GET_MODE (src), SUBREG_REG (src))); src = SET_SRC (x); } #endif /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we are comparing an item known to be 0 or -1 against 0, use a logical operation instead. Check for one of the arms being an IOR of the other arm with some value. We compute three terms to be IOR'ed together. In practice, at most two will be nonzero. Then we do the IOR's. */ if (GET_CODE (dest) != PC && GET_CODE (src) == IF_THEN_ELSE && GET_MODE_CLASS (GET_MODE (src)) == MODE_INT && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE) && XEXP (XEXP (src, 0), 1) == const0_rtx && GET_MODE (src) == GET_MODE (XEXP (XEXP (src, 0), 0)) #ifdef HAVE_conditional_move && ! can_conditionally_move_p (GET_MODE (src)) #endif && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0), GET_MODE (XEXP (XEXP (src, 0), 0))) == GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (src, 0), 0)))) && ! side_effects_p (src)) { rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE ? XEXP (src, 1) : XEXP (src, 2)); rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE ? XEXP (src, 2) : XEXP (src, 1)); rtx term1 = const0_rtx, term2, term3; if (GET_CODE (true_rtx) == IOR && rtx_equal_p (XEXP (true_rtx, 0), false_rtx)) term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx; else if (GET_CODE (true_rtx) == IOR && rtx_equal_p (XEXP (true_rtx, 1), false_rtx)) term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx; else if (GET_CODE (false_rtx) == IOR && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)) term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx; else if (GET_CODE (false_rtx) == IOR && rtx_equal_p (XEXP (false_rtx, 1), true_rtx)) term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx; term2 = gen_binary (AND, GET_MODE (src), XEXP (XEXP (src, 0), 0), true_rtx); term3 = gen_binary (AND, GET_MODE (src), simplify_gen_unary (NOT, GET_MODE (src), XEXP (XEXP (src, 0), 0), GET_MODE (src)), false_rtx); SUBST (SET_SRC (x), gen_binary (IOR, GET_MODE (src), gen_binary (IOR, GET_MODE (src), term1, term2), term3)); src = SET_SRC (x); } /* If either SRC or DEST is a CLOBBER of (const_int 0), make this whole thing fail. */ if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx) return src; else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx) return dest; else /* Convert this into a field assignment operation, if possible. */ return make_field_assignment (x); } /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified result. */ static rtx simplify_logical (rtx x) { enum machine_mode mode = GET_MODE (x); rtx op0 = XEXP (x, 0); rtx op1 = XEXP (x, 1); rtx reversed; switch (GET_CODE (x)) { case AND: /* Convert (A ^ B) & A to A & (~B) since the latter is often a single insn (and may simplify more). */ if (GET_CODE (op0) == XOR && rtx_equal_p (XEXP (op0, 0), op1) && ! side_effects_p (op1)) x = gen_binary (AND, mode, simplify_gen_unary (NOT, mode, XEXP (op0, 1), mode), op1); if (GET_CODE (op0) == XOR && rtx_equal_p (XEXP (op0, 1), op1) && ! side_effects_p (op1)) x = gen_binary (AND, mode, simplify_gen_unary (NOT, mode, XEXP (op0, 0), mode), op1); /* Similarly for (~(A ^ B)) & A. */ if (GET_CODE (op0) == NOT && GET_CODE (XEXP (op0, 0)) == XOR && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1) && ! side_effects_p (op1)) x = gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1); if (GET_CODE (op0) == NOT && GET_CODE (XEXP (op0, 0)) == XOR && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1) && ! side_effects_p (op1)) x = gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1); /* We can call simplify_and_const_int only if we don't lose any (sign) bits when converting INTVAL (op1) to "unsigned HOST_WIDE_INT". */ if (GET_CODE (op1) == CONST_INT && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT || INTVAL (op1) > 0)) { x = simplify_and_const_int (x, mode, op0, INTVAL (op1)); /* If we have (ior (and (X C1) C2)) and the next restart would be the last, simplify this by making C1 as small as possible and then exit. Only do this if C1 actually changes: for now this only saves memory but, should this transformation be moved to simplify-rtx.c, we'd risk unbounded recursion there. */ if (GET_CODE (x) == IOR && GET_CODE (op0) == AND && GET_CODE (XEXP (op0, 1)) == CONST_INT && GET_CODE (op1) == CONST_INT && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0) return gen_binary (IOR, mode, gen_binary (AND, mode, XEXP (op0, 0), GEN_INT (INTVAL (XEXP (op0, 1)) & ~INTVAL (op1))), op1); if (GET_CODE (x) != AND) return x; op0 = XEXP (x, 0); op1 = XEXP (x, 1); } /* Convert (A | B) & A to A. */ if (GET_CODE (op0) == IOR && (rtx_equal_p (XEXP (op0, 0), op1) || rtx_equal_p (XEXP (op0, 1), op1)) && ! side_effects_p (XEXP (op0, 0)) && ! side_effects_p (XEXP (op0, 1))) return op1; /* In the following group of tests (and those in case IOR below), we start with some combination of logical operations and apply the distributive law followed by the inverse distributive law. Most of the time, this results in no change. However, if some of the operands are the same or inverses of each other, simplifications will result. For example, (and (ior A B) (not B)) can occur as the result of expanding a bit field assignment. When we apply the distributive law to this, we get (ior (and (A (not B))) (and (B (not B)))), which then simplifies to (and (A (not B))). If we have (and (ior A B) C), apply the distributive law and then the inverse distributive law to see if things simplify. */ if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR) { x = apply_distributive_law (gen_binary (GET_CODE (op0), mode, gen_binary (AND, mode, XEXP (op0, 0), op1), gen_binary (AND, mode, XEXP (op0, 1), copy_rtx (op1)))); if (GET_CODE (x) != AND) return x; } if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR) return apply_distributive_law (gen_binary (GET_CODE (op1), mode, gen_binary (AND, mode, XEXP (op1, 0), op0), gen_binary (AND, mode, XEXP (op1, 1), copy_rtx (op0)))); /* Similarly, taking advantage of the fact that (and (not A) (xor B C)) == (xor (ior A B) (ior A C)) */ if (GET_CODE (op0) == NOT && GET_CODE (op1) == XOR) return apply_distributive_law (gen_binary (XOR, mode, gen_binary (IOR, mode, XEXP (op0, 0), XEXP (op1, 0)), gen_binary (IOR, mode, copy_rtx (XEXP (op0, 0)), XEXP (op1, 1)))); else if (GET_CODE (op1) == NOT && GET_CODE (op0) == XOR) return apply_distributive_law (gen_binary (XOR, mode, gen_binary (IOR, mode, XEXP (op1, 0), XEXP (op0, 0)), gen_binary (IOR, mode, copy_rtx (XEXP (op1, 0)), XEXP (op0, 1)))); break; case IOR: /* (ior A C) is C if all bits of A that might be nonzero are on in C. */ if (GET_CODE (op1) == CONST_INT && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0) return op1; /* Convert (A & B) | A to A. */ if (GET_CODE (op0) == AND && (rtx_equal_p (XEXP (op0, 0), op1) || rtx_equal_p (XEXP (op0, 1), op1)) && ! side_effects_p (XEXP (op0, 0)) && ! side_effects_p (XEXP (op0, 1))) return op1; /* If we have (ior (and A B) C), apply the distributive law and then the inverse distributive law to see if things simplify. */ if (GET_CODE (op0) == AND) { x = apply_distributive_law (gen_binary (AND, mode, gen_binary (IOR, mode, XEXP (op0, 0), op1), gen_binary (IOR, mode, XEXP (op0, 1), copy_rtx (op1)))); if (GET_CODE (x) != IOR) return x; } if (GET_CODE (op1) == AND) { x = apply_distributive_law (gen_binary (AND, mode, gen_binary (IOR, mode, XEXP (op1, 0), op0), gen_binary (IOR, mode, XEXP (op1, 1), copy_rtx (op0)))); if (GET_CODE (x) != IOR) return x; } /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the mode size to (rotate A CX). */ if (((GET_CODE (op0) == ASHIFT && GET_CODE (op1) == LSHIFTRT) || (GET_CODE (op1) == ASHIFT && GET_CODE (op0) == LSHIFTRT)) && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0)) && GET_CODE (XEXP (op0, 1)) == CONST_INT && GET_CODE (XEXP (op1, 1)) == CONST_INT && (INTVAL (XEXP (op0, 1)) + INTVAL (XEXP (op1, 1)) == GET_MODE_BITSIZE (mode))) return gen_rtx_ROTATE (mode, XEXP (op0, 0), (GET_CODE (op0) == ASHIFT ? XEXP (op0, 1) : XEXP (op1, 1))); /* If OP0 is (ashiftrt (plus ...) C), it might actually be a (sign_extend (plus ...)). If so, OP1 is a CONST_INT, and the PLUS does not affect any of the bits in OP1, it can really be done as a PLUS and we can associate. We do this by seeing if OP1 can be safely shifted left C bits. */ if (GET_CODE (op1) == CONST_INT && GET_CODE (op0) == ASHIFTRT && GET_CODE (XEXP (op0, 0)) == PLUS && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT && GET_CODE (XEXP (op0, 1)) == CONST_INT && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT) { int count = INTVAL (XEXP (op0, 1)); HOST_WIDE_INT mask = INTVAL (op1) << count; if (mask >> count == INTVAL (op1) && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0) { SUBST (XEXP (XEXP (op0, 0), 1), GEN_INT (INTVAL (XEXP (XEXP (op0, 0), 1)) | mask)); return op0; } } break; case XOR: /* If we are XORing two things that have no bits in common, convert them into an IOR. This helps to detect rotation encoded using those methods and possibly other simplifications. */ if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT && (nonzero_bits (op0, mode) & nonzero_bits (op1, mode)) == 0) return (gen_binary (IOR, mode, op0, op1)); /* Convert (XOR (NOT x) (NOT y)) to (XOR x y). Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for (NOT y). */ { int num_negated = 0; if (GET_CODE (op0) == NOT) num_negated++, op0 = XEXP (op0, 0); if (GET_CODE (op1) == NOT) num_negated++, op1 = XEXP (op1, 0); if (num_negated == 2) { SUBST (XEXP (x, 0), op0); SUBST (XEXP (x, 1), op1); } else if (num_negated == 1) return simplify_gen_unary (NOT, mode, gen_binary (XOR, mode, op0, op1), mode); } /* Convert (xor (and A B) B) to (and (not A) B). The latter may correspond to a machine insn or result in further simplifications if B is a constant. */ if (GET_CODE (op0) == AND && rtx_equal_p (XEXP (op0, 1), op1) && ! side_effects_p (op1)) return gen_binary (AND, mode, simplify_gen_unary (NOT, mode, XEXP (op0, 0), mode), op1); else if (GET_CODE (op0) == AND && rtx_equal_p (XEXP (op0, 0), op1) && ! side_effects_p (op1)) return gen_binary (AND, mode, simplify_gen_unary (NOT, mode, XEXP (op0, 1), mode), op1); /* (xor (comparison foo bar) (const_int 1)) can become the reversed comparison if STORE_FLAG_VALUE is 1. */ if (STORE_FLAG_VALUE == 1 && op1 == const1_rtx && COMPARISON_P (op0) && (reversed = reversed_comparison (op0, mode, XEXP (op0, 0), XEXP (op0, 1)))) return reversed; /* (lshiftrt foo C) where C is the number of bits in FOO minus 1 is (lt foo (const_int 0)), so we can perform the above simplification if STORE_FLAG_VALUE is 1. */ if (STORE_FLAG_VALUE == 1 && op1 == const1_rtx && GET_CODE (op0) == LSHIFTRT && GET_CODE (XEXP (op0, 1)) == CONST_INT && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1) return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx); /* (xor (comparison foo bar) (const_int sign-bit)) when STORE_FLAG_VALUE is the sign bit. */ if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode)) == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)) && op1 == const_true_rtx && COMPARISON_P (op0) && (reversed = reversed_comparison (op0, mode, XEXP (op0, 0), XEXP (op0, 1)))) return reversed; break; default: abort (); } return x; } /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound operations" because they can be replaced with two more basic operations. ZERO_EXTEND is also considered "compound" because it can be replaced with an AND operation, which is simpler, though only one operation. The function expand_compound_operation is called with an rtx expression and will convert it to the appropriate shifts and AND operations, simplifying at each stage. The function make_compound_operation is called to convert an expression consisting of shifts and ANDs into the equivalent compound expression. It is the inverse of this function, loosely speaking. */ static rtx expand_compound_operation (rtx x) { unsigned HOST_WIDE_INT pos = 0, len; int unsignedp = 0; unsigned int modewidth; rtx tem; switch (GET_CODE (x)) { case ZERO_EXTEND: unsignedp = 1; case SIGN_EXTEND: /* We can't necessarily use a const_int for a multiword mode; it depends on implicitly extending the value. Since we don't know the right way to extend it, we can't tell whether the implicit way is right. Even for a mode that is no wider than a const_int, we can't win, because we need to sign extend one of its bits through the rest of it, and we don't know which bit. */ if (GET_CODE (XEXP (x, 0)) == CONST_INT) return x; /* Return if (subreg:MODE FROM 0) is not a safe replacement for (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM because (SUBREG (MEM...)) is guaranteed to cause the MEM to be reloaded. If not for that, MEM's would very rarely be safe. Reject MODEs bigger than a word, because we might not be able to reference a two-register group starting with an arbitrary register (and currently gen_lowpart might crash for a SUBREG). */ if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) > UNITS_PER_WORD) return x; /* Reject MODEs that aren't scalar integers because turning vector or complex modes into shifts causes problems. */ if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x, 0)))) return x; len = GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))); /* If the inner object has VOIDmode (the only way this can happen is if it is an ASM_OPERANDS), we can't do anything since we don't know how much masking to do. */ if (len == 0) return x; break; case ZERO_EXTRACT: unsignedp = 1; case SIGN_EXTRACT: /* If the operand is a CLOBBER, just return it. */ if (GET_CODE (XEXP (x, 0)) == CLOBBER) return XEXP (x, 0); if (GET_CODE (XEXP (x, 1)) != CONST_INT || GET_CODE (XEXP (x, 2)) != CONST_INT || GET_MODE (XEXP (x, 0)) == VOIDmode) return x; /* Reject MODEs that aren't scalar integers because turning vector or complex modes into shifts causes problems. */ if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x, 0)))) return x; len = INTVAL (XEXP (x, 1)); pos = INTVAL (XEXP (x, 2)); /* If this goes outside the object being extracted, replace the object with a (use (mem ...)) construct that only combine understands and is used only for this purpose. */ if (len + pos > GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))) SUBST (XEXP (x, 0), gen_rtx_USE (GET_MODE (x), XEXP (x, 0))); if (BITS_BIG_ENDIAN) pos = GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - len - pos; break; default: return x; } /* Convert sign extension to zero extension, if we know that the high bit is not set, as this is easier to optimize. It will be converted back to cheaper alternative in make_extraction. */ if (GET_CODE (x) == SIGN_EXTEND && (GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT && ((nonzero_bits (XEXP (x, 0), GET_MODE (XEXP (x, 0))) & ~(((unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) >> 1)) == 0))) { rtx temp = gen_rtx_ZERO_EXTEND (GET_MODE (x), XEXP (x, 0)); rtx temp2 = expand_compound_operation (temp); /* Make sure this is a profitable operation. */ if (rtx_cost (x, SET) > rtx_cost (temp2, SET)) return temp2; else if (rtx_cost (x, SET) > rtx_cost (temp, SET)) return temp; else return x; } /* We can optimize some special cases of ZERO_EXTEND. */ if (GET_CODE (x) == ZERO_EXTEND) { /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we know that the last value didn't have any inappropriate bits set. */ if (GET_CODE (XEXP (x, 0)) == TRUNCATE && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x) && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT && (nonzero_bits (XEXP (XEXP (x, 0), 0), GET_MODE (x)) & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0) return XEXP (XEXP (x, 0), 0); /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */ if (GET_CODE (XEXP (x, 0)) == SUBREG && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x) && subreg_lowpart_p (XEXP (x, 0)) && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), GET_MODE (x)) & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0) return SUBREG_REG (XEXP (x, 0)); /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo is a comparison and STORE_FLAG_VALUE permits. This is like the first case, but it works even when GET_MODE (x) is larger than HOST_WIDE_INT. */ if (GET_CODE (XEXP (x, 0)) == TRUNCATE && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x) && COMPARISON_P (XEXP (XEXP (x, 0), 0)) && (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) <= HOST_BITS_PER_WIDE_INT) && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0) return XEXP (XEXP (x, 0), 0); /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */ if (GET_CODE (XEXP (x, 0)) == SUBREG && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x) && subreg_lowpart_p (XEXP (x, 0)) && COMPARISON_P (SUBREG_REG (XEXP (x, 0))) && (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) <= HOST_BITS_PER_WIDE_INT) && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0) return SUBREG_REG (XEXP (x, 0)); } /* If we reach here, we want to return a pair of shifts. The inner shift is a left shift of BITSIZE - POS - LEN bits. The outer shift is a right shift of BITSIZE - LEN bits. It is arithmetic or logical depending on the value of UNSIGNEDP. If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be converted into an AND of a shift. We must check for the case where the left shift would have a negative count. This can happen in a case like (x >> 31) & 255 on machines that can't shift by a constant. On those machines, we would first combine the shift with the AND to produce a variable-position extraction. Then the constant of 31 would be substituted in to produce a such a position. */ modewidth = GET_MODE_BITSIZE (GET_MODE (x)); if (modewidth + len >= pos) tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT, GET_MODE (x), simplify_shift_const (NULL_RTX, ASHIFT, GET_MODE (x), XEXP (x, 0), modewidth - pos - len), modewidth - len); else if (unsignedp && len < HOST_BITS_PER_WIDE_INT) tem = simplify_and_const_int (NULL_RTX, GET_MODE (x), simplify_shift_const (NULL_RTX, LSHIFTRT, GET_MODE (x), XEXP (x, 0), pos), ((HOST_WIDE_INT) 1 << len) - 1); else /* Any other cases we can't handle. */ return x; /* If we couldn't do this for some reason, return the original expression. */ if (GET_CODE (tem) == CLOBBER) return x; return tem; } /* X is a SET which contains an assignment of one object into a part of another (such as a bit-field assignment, STRICT_LOW_PART, or certain SUBREGS). If possible, convert it into a series of logical operations. We half-heartedly support variable positions, but do not at all support variable lengths. */ static rtx expand_field_assignment (rtx x) { rtx inner; rtx pos; /* Always counts from low bit. */ int len; rtx mask; enum machine_mode compute_mode; /* Loop until we find something we can't simplify. */ while (1) { if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG) { inner = SUBREG_REG (XEXP (SET_DEST (x), 0)); len = GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0))); pos = GEN_INT (subreg_lsb (XEXP (SET_DEST (x), 0))); } else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT && GET_CODE (XEXP (SET_DEST (x), 1)) == CONST_INT) { inner = XEXP (SET_DEST (x), 0); len = INTVAL (XEXP (SET_DEST (x), 1)); pos = XEXP (SET_DEST (x), 2); /* If the position is constant and spans the width of INNER, surround INNER with a USE to indicate this. */ if (GET_CODE (pos) == CONST_INT && INTVAL (pos) + len > GET_MODE_BITSIZE (GET_MODE (inner))) inner = gen_rtx_USE (GET_MODE (SET_DEST (x)), inner); if (BITS_BIG_ENDIAN) { if (GET_CODE (pos) == CONST_INT) pos = GEN_INT (GET_MODE_BITSIZE (GET_MODE (inner)) - len - INTVAL (pos)); else if (GET_CODE (pos) == MINUS && GET_CODE (XEXP (pos, 1)) == CONST_INT && (INTVAL (XEXP (pos, 1)) == GET_MODE_BITSIZE (GET_MODE (inner)) - len)) /* If position is ADJUST - X, new position is X. */ pos = XEXP (pos, 0); else pos = gen_binary (MINUS, GET_MODE (pos), GEN_INT (GET_MODE_BITSIZE (GET_MODE (inner)) - len), pos); } } /* A SUBREG between two modes that occupy the same numbers of words can be done by moving the SUBREG to the source. */ else if (GET_CODE (SET_DEST (x)) == SUBREG /* We need SUBREGs to compute nonzero_bits properly. */ && nonzero_sign_valid && (((GET_MODE_SIZE (GET_MODE (SET_DEST (x))) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD) == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x)))) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))) { x = gen_rtx_SET (VOIDmode, SUBREG_REG (SET_DEST (x)), gen_lowpart (GET_MODE (SUBREG_REG (SET_DEST (x))), SET_SRC (x))); continue; } else break; while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner)) inner = SUBREG_REG (inner); compute_mode = GET_MODE (inner); /* Don't attempt bitwise arithmetic on non scalar integer modes. */ if (! SCALAR_INT_MODE_P (compute_mode)) { enum machine_mode imode; /* Don't do anything for vector or complex integral types. */ if (! FLOAT_MODE_P (compute_mode)) break; /* Try to find an integral mode to pun with. */ imode = mode_for_size (GET_MODE_BITSIZE (compute_mode), MODE_INT, 0); if (imode == BLKmode) break; compute_mode = imode; inner = gen_lowpart (imode, inner); } /* Compute a mask of LEN bits, if we can do this on the host machine. */ if (len < HOST_BITS_PER_WIDE_INT) mask = GEN_INT (((HOST_WIDE_INT) 1 << len) - 1); else break; /* Now compute the equivalent expression. Make a copy of INNER for the SET_DEST in case it is a MEM into which we will substitute; we don't want shared RTL in that case. */ x = gen_rtx_SET (VOIDmode, copy_rtx (inner), gen_binary (IOR, compute_mode, gen_binary (AND, compute_mode, simplify_gen_unary (NOT, compute_mode, gen_binary (ASHIFT, compute_mode, mask, pos), compute_mode), inner), gen_binary (ASHIFT, compute_mode, gen_binary (AND, compute_mode, gen_lowpart (compute_mode, SET_SRC (x)), mask), pos))); } return x; } /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero, it is an RTX that represents a variable starting position; otherwise, POS is the (constant) starting bit position (counted from the LSB). INNER may be a USE. This will occur when we started with a bitfield that went outside the boundary of the object in memory, which is allowed on most machines. To isolate this case, we produce a USE whose mode is wide enough and surround the MEM with it. The only code that understands the USE is this routine. If it is not removed, it will cause the resulting insn not to match. UNSIGNEDP is nonzero for an unsigned reference and zero for a signed reference. IN_DEST is nonzero if this is a reference in the destination of a SET. This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero, a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will be used. IN_COMPARE is nonzero if we are in a COMPARE. This means that a ZERO_EXTRACT should be built even for bits starting at bit 0. MODE is the desired mode of the result (if IN_DEST == 0). The result is an RTX for the extraction or NULL_RTX if the target can't handle it. */ static rtx make_extraction (enum machine_mode mode, rtx inner, HOST_WIDE_INT pos, rtx pos_rtx, unsigned HOST_WIDE_INT len, int unsignedp, int in_dest, int in_compare) { /* This mode describes the size of the storage area to fetch the overall value from. Within that, we ignore the POS lowest bits, etc. */ enum machine_mode is_mode = GET_MODE (inner); enum machine_mode inner_mode; enum machine_mode wanted_inner_mode = byte_mode; enum machine_mode wanted_inner_reg_mode = word_mode; enum machine_mode pos_mode = word_mode; enum machine_mode extraction_mode = word_mode; enum machine_mode tmode = mode_for_size (len, MODE_INT, 1); int spans_byte = 0; rtx new = 0; rtx orig_pos_rtx = pos_rtx; HOST_WIDE_INT orig_pos; /* Get some information about INNER and get the innermost object. */ if (GET_CODE (inner) == USE) /* (use:SI (mem:QI foo)) stands for (mem:SI foo). */ /* We don't need to adjust the position because we set up the USE to pretend that it was a full-word object. */ spans_byte = 1, inner = XEXP (inner, 0); else if (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner)) { /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...), consider just the QI as the memory to extract from. The subreg adds or removes high bits; its mode is irrelevant to the meaning of this extraction, since POS and LEN count from the lsb. */ if (MEM_P (SUBREG_REG (inner))) is_mode = GET_MODE (SUBREG_REG (inner)); inner = SUBREG_REG (inner); } else if (GET_CODE (inner) == ASHIFT && GET_CODE (XEXP (inner, 1)) == CONST_INT && pos_rtx == 0 && pos == 0 && len > (unsigned HOST_WIDE_INT) INTVAL (XEXP (inner, 1))) { /* We're extracting the least significant bits of an rtx (ashift X (const_int C)), where LEN > C. Extract the least significant (LEN - C) bits of X, giving an rtx whose mode is MODE, then shift it left C times. */ new = make_extraction (mode, XEXP (inner, 0), 0, 0, len - INTVAL (XEXP (inner, 1)), unsignedp, in_dest, in_compare); if (new != 0) return gen_rtx_ASHIFT (mode, new, XEXP (inner, 1)); } inner_mode = GET_MODE (inner); if (pos_rtx && GET_CODE (pos_rtx) == CONST_INT) pos = INTVAL (pos_rtx), pos_rtx = 0; /* See if this can be done without an extraction. We never can if the width of the field is not the same as that of some integer mode. For registers, we can only avoid the extraction if the position is at the low-order bit and this is either not in the destination or we have the appropriate STRICT_LOW_PART operation available. For MEM, we can avoid an extract if the field starts on an appropriate boundary and we can change the mode of the memory reference. However, we cannot directly access the MEM if we have a USE and the underlying MEM is not TMODE. This combination means that MEM was being used in a context where bits outside its mode were being referenced; that is only valid in bit-field insns. */ if (tmode != BLKmode && ! (spans_byte && inner_mode != tmode) && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0 && !MEM_P (inner) && (! in_dest || (REG_P (inner) && have_insn_for (STRICT_LOW_PART, tmode)))) || (MEM_P (inner) && pos_rtx == 0 && (pos % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode) : BITS_PER_UNIT)) == 0 /* We can't do this if we are widening INNER_MODE (it may not be aligned, for one thing). */ && GET_MODE_BITSIZE (inner_mode) >= GET_MODE_BITSIZE (tmode) && (inner_mode == tmode || (! mode_dependent_address_p (XEXP (inner, 0)) && ! MEM_VOLATILE_P (inner)))))) { /* If INNER is a MEM, make a new MEM that encompasses just the desired field. If the original and current mode are the same, we need not adjust the offset. Otherwise, we do if bytes big endian. If INNER is not a MEM, get a piece consisting of just the field of interest (in this case POS % BITS_PER_WORD must be 0). */ if (MEM_P (inner)) { HOST_WIDE_INT offset; /* POS counts from lsb, but make OFFSET count in memory order. */ if (BYTES_BIG_ENDIAN) offset = (GET_MODE_BITSIZE (is_mode) - len - pos) / BITS_PER_UNIT; else offset = pos / BITS_PER_UNIT; new = adjust_address_nv (inner, tmode, offset); } else if (REG_P (inner)) { if (tmode != inner_mode) { /* We can't call gen_lowpart in a DEST since we always want a SUBREG (see below) and it would sometimes return a new hard register. */ if (pos || in_dest) { HOST_WIDE_INT final_word = pos / BITS_PER_WORD; if (WORDS_BIG_ENDIAN && GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD) final_word = ((GET_MODE_SIZE (inner_mode) - GET_MODE_SIZE (tmode)) / UNITS_PER_WORD) - final_word; final_word *= UNITS_PER_WORD; if (BYTES_BIG_ENDIAN && GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (tmode)) final_word += (GET_MODE_SIZE (inner_mode) - GET_MODE_SIZE (tmode)) % UNITS_PER_WORD; /* Avoid creating invalid subregs, for example when simplifying (x>>32)&255. */ if (final_word >= GET_MODE_SIZE (inner_mode)) return NULL_RTX; new = gen_rtx_SUBREG (tmode, inner, final_word); } else new = gen_lowpart (tmode, inner); } else new = inner; } else new = force_to_mode (inner, tmode, len >= HOST_BITS_PER_WIDE_INT ? ~(unsigned HOST_WIDE_INT) 0 : ((unsigned HOST_WIDE_INT) 1 << len) - 1, NULL_RTX, 0); /* If this extraction is going into the destination of a SET, make a STRICT_LOW_PART unless we made a MEM. */ if (in_dest) return (MEM_P (new) ? new : (GET_CODE (new) != SUBREG ? gen_rtx_CLOBBER (tmode, const0_rtx) : gen_rtx_STRICT_LOW_PART (VOIDmode, new))); if (mode == tmode) return new; if (GET_CODE (new) == CONST_INT) return gen_int_mode (INTVAL (new), mode); /* If we know that no extraneous bits are set, and that the high bit is not set, convert the extraction to the cheaper of sign and zero extension, that are equivalent in these cases. */ if (flag_expensive_optimizations && (GET_MODE_BITSIZE (tmode) <= HOST_BITS_PER_WIDE_INT && ((nonzero_bits (new, tmode) & ~(((unsigned HOST_WIDE_INT) GET_MODE_MASK (tmode)) >> 1)) == 0))) { rtx temp = gen_rtx_ZERO_EXTEND (mode, new); rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new); /* Prefer ZERO_EXTENSION, since it gives more information to backends. */ if (rtx_cost (temp, SET) <= rtx_cost (temp1, SET)) return temp; return temp1; } /* Otherwise, sign- or zero-extend unless we already are in the proper mode. */ return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND, mode, new)); } /* Unless this is a COMPARE or we have a funny memory reference, don't do anything with zero-extending field extracts starting at the low-order bit since they are simple AND operations. */ if (pos_rtx == 0 && pos == 0 && ! in_dest && ! in_compare && ! spans_byte && unsignedp) return 0; /* Unless we are allowed to span bytes or INNER is not MEM, reject this if we would be spanning bytes or if the position is not a constant and the length is not 1. In all other cases, we would only be going outside our object in cases when an original shift would have been undefined. */ if (! spans_byte && MEM_P (inner) && ((pos_rtx == 0 && pos + len > GET_MODE_BITSIZE (is_mode)) || (pos_rtx != 0 && len != 1))) return 0; /* Get the mode to use should INNER not be a MEM, the mode for the position, and the mode for the result. */ if (in_dest && mode_for_extraction (EP_insv, -1) != MAX_MACHINE_MODE) { wanted_inner_reg_mode = mode_for_extraction (EP_insv, 0); pos_mode = mode_for_extraction (EP_insv, 2); extraction_mode = mode_for_extraction (EP_insv, 3); } if (! in_dest && unsignedp && mode_for_extraction (EP_extzv, -1) != MAX_MACHINE_MODE) { wanted_inner_reg_mode = mode_for_extraction (EP_extzv, 1); pos_mode = mode_for_extraction (EP_extzv, 3); extraction_mode = mode_for_extraction (EP_extzv, 0); } if (! in_dest && ! unsignedp && mode_for_extraction (EP_extv, -1) != MAX_MACHINE_MODE) { wanted_inner_reg_mode = mode_for_extraction (EP_extv, 1); pos_mode = mode_for_extraction (EP_extv, 3); extraction_mode = mode_for_extraction (EP_extv, 0); } /* Never narrow an object, since that might not be safe. */ if (mode != VOIDmode && GET_MODE_SIZE (extraction_mode) < GET_MODE_SIZE (mode)) extraction_mode = mode; if (pos_rtx && GET_MODE (pos_rtx) != VOIDmode && GET_MODE_SIZE (pos_mode) < GET_MODE_SIZE (GET_MODE (pos_rtx))) pos_mode = GET_MODE (pos_rtx); /* If this is not from memory, the desired mode is wanted_inner_reg_mode; if we have to change the mode of memory and cannot, the desired mode is EXTRACTION_MODE. */ if (!MEM_P (inner)) wanted_inner_mode = wanted_inner_reg_mode; else if (inner_mode != wanted_inner_mode && (mode_dependent_address_p (XEXP (inner, 0)) || MEM_VOLATILE_P (inner))) wanted_inner_mode = extraction_mode; orig_pos = pos; if (BITS_BIG_ENDIAN) { /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to BITS_BIG_ENDIAN style. If position is constant, compute new position. Otherwise, build subtraction. Note that POS is relative to the mode of the original argument. If it's a MEM we need to recompute POS relative to that. However, if we're extracting from (or inserting into) a register, we want to recompute POS relative to wanted_inner_mode. */ int width = (MEM_P (inner) ? GET_MODE_BITSIZE (is_mode) : GET_MODE_BITSIZE (wanted_inner_mode)); if (pos_rtx == 0) pos = width - len - pos; else pos_rtx = gen_rtx_MINUS (GET_MODE (pos_rtx), GEN_INT (width - len), pos_rtx); /* POS may be less than 0 now, but we check for that below. Note that it can only be less than 0 if !MEM_P (inner). */ } /* If INNER has a wider mode, make it smaller. If this is a constant extract, try to adjust the byte to point to the byte containing the value. */ if (wanted_inner_mode != VOIDmode && GET_MODE_SIZE (wanted_inner_mode) < GET_MODE_SIZE (is_mode) && ((MEM_P (inner) && (inner_mode == wanted_inner_mode || (! mode_dependent_address_p (XEXP (inner, 0)) && ! MEM_VOLATILE_P (inner)))))) { int offset = 0; /* The computations below will be correct if the machine is big endian in both bits and bytes or little endian in bits and bytes. If it is mixed, we must adjust. */ /* If bytes are big endian and we had a paradoxical SUBREG, we must adjust OFFSET to compensate. */ if (BYTES_BIG_ENDIAN && ! spans_byte && GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (is_mode)) offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode); /* If this is a constant position, we can move to the desired byte. */ if (pos_rtx == 0) { offset += pos / BITS_PER_UNIT; pos %= GET_MODE_BITSIZE (wanted_inner_mode); } if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN && ! spans_byte && is_mode != wanted_inner_mode) offset = (GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (wanted_inner_mode) - offset); if (offset != 0 || inner_mode != wanted_inner_mode) inner = adjust_address_nv (inner, wanted_inner_mode, offset); } /* If INNER is not memory, we can always get it into the proper mode. If we are changing its mode, POS must be a constant and smaller than the size of the new mode. */ else if (!MEM_P (inner)) { if (GET_MODE (inner) != wanted_inner_mode && (pos_rtx != 0 || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode))) return 0; inner = force_to_mode (inner, wanted_inner_mode, pos_rtx || len + orig_pos >= HOST_BITS_PER_WIDE_INT ? ~(unsigned HOST_WIDE_INT) 0 : ((((unsigned HOST_WIDE_INT) 1 << len) - 1) << orig_pos), NULL_RTX, 0); } /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we have to zero extend. Otherwise, we can just use a SUBREG. */ if (pos_rtx != 0 && GET_MODE_SIZE (pos_mode) > GET_MODE_SIZE (GET_MODE (pos_rtx))) { rtx temp = gen_rtx_ZERO_EXTEND (pos_mode, pos_rtx); /* If we know that no extraneous bits are set, and that the high bit is not set, convert extraction to cheaper one - either SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these cases. */ if (flag_expensive_optimizations && (GET_MODE_BITSIZE (GET_MODE (pos_rtx)) <= HOST_BITS_PER_WIDE_INT && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx)) & ~(((unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (pos_rtx))) >> 1)) == 0))) { rtx temp1 = gen_rtx_SIGN_EXTEND (pos_mode, pos_rtx); /* Prefer ZERO_EXTENSION, since it gives more information to backends. */ if (rtx_cost (temp1, SET) < rtx_cost (temp, SET)) temp = temp1; } pos_rtx = temp; } else if (pos_rtx != 0 && GET_MODE_SIZE (pos_mode) < GET_MODE_SIZE (GET_MODE (pos_rtx))) pos_rtx = gen_lowpart (pos_mode, pos_rtx); /* Make POS_RTX unless we already have it and it is correct. If we don't have a POS_RTX but we do have an ORIG_POS_RTX, the latter must be a CONST_INT. */ if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos) pos_rtx = orig_pos_rtx; else if (pos_rtx == 0) pos_rtx = GEN_INT (pos); /* Make the required operation. See if we can use existing rtx. */ new = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT, extraction_mode, inner, GEN_INT (len), pos_rtx); if (! in_dest) new = gen_lowpart (mode, new); return new; } /* See if X contains an ASHIFT of COUNT or more bits that can be commuted with any other operations in X. Return X without that shift if so. */ static rtx extract_left_shift (rtx x, int count) { enum rtx_code code = GET_CODE (x); enum machine_mode mode = GET_MODE (x); rtx tem; switch (code) { case ASHIFT: /* This is the shift itself. If it is wide enough, we will return either the value being shifted if the shift count is equal to COUNT or a shift for the difference. */ if (GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) >= count) return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0), INTVAL (XEXP (x, 1)) - count); break; case NEG: case NOT: if ((tem = extract_left_shift (XEXP (x, 0), count)) != 0) return simplify_gen_unary (code, mode, tem, mode); break; case PLUS: case IOR: case XOR: case AND: /* If we can safely shift this constant and we find the inner shift, make a new operation. */ if (GET_CODE (XEXP (x, 1)) == CONST_INT && (INTVAL (XEXP (x, 1)) & ((((HOST_WIDE_INT) 1 << count)) - 1)) == 0 && (tem = extract_left_shift (XEXP (x, 0), count)) != 0) return gen_binary (code, mode, tem, GEN_INT (INTVAL (XEXP (x, 1)) >> count)); break; default: break; } return 0; } /* Look at the expression rooted at X. Look for expressions equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND. Form these expressions. Return the new rtx, usually just X. Also, for machines like the VAX that don't have logical shift insns, try to convert logical to arithmetic shift operations in cases where they are equivalent. This undoes the canonicalizations to logical shifts done elsewhere. We try, as much as possible, to re-use rtl expressions to save memory. IN_CODE says what kind of expression we are processing. Normally, it is SET. In a memory address (inside a MEM, PLUS or minus, the latter two being kludges), it is MEM. When processing the arguments of a comparison or a COMPARE against zero, it is COMPARE. */ static rtx make_compound_operation (rtx x, enum rtx_code in_code) { enum rtx_code code = GET_CODE (x); enum machine_mode mode = GET_MODE (x); int mode_width = GET_MODE_BITSIZE (mode); rtx rhs, lhs; enum rtx_code next_code; int i; rtx new = 0; rtx tem; const char *fmt; /* Select the code to be used in recursive calls. Once we are inside an address, we stay there. If we have a comparison, set to COMPARE, but once inside, go back to our default of SET. */ next_code = (code == MEM || code == PLUS || code == MINUS ? MEM : ((code == COMPARE || COMPARISON_P (x)) && XEXP (x, 1) == const0_rtx) ? COMPARE : in_code == COMPARE ? SET : in_code); /* Process depending on the code of this operation. If NEW is set nonzero, it will be returned. */ switch (code) { case ASHIFT: /* Convert shifts by constants into multiplications if inside an address. */ if (in_code == MEM && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT && INTVAL (XEXP (x, 1)) >= 0) { new = make_compound_operation (XEXP (x, 0), next_code); new = gen_rtx_MULT (mode, new, GEN_INT ((HOST_WIDE_INT) 1 << INTVAL (XEXP (x, 1)))); } break; case AND: /* If the second operand is not a constant, we can't do anything with it. */ if (GET_CODE (XEXP (x, 1)) != CONST_INT) break; /* If the constant is a power of two minus one and the first operand is a logical right shift, make an extraction. */ if (GET_CODE (XEXP (x, 0)) == LSHIFTRT && (i = exact_log2 (INTVAL (XEXP (x, 1)) + 1)) >= 0) { new = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code); new = make_extraction (mode, new, 0, XEXP (XEXP (x, 0), 1), i, 1, 0, in_code == COMPARE); } /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */ else if (GET_CODE (XEXP (x, 0)) == SUBREG && subreg_lowpart_p (XEXP (x, 0)) && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT && (i = exact_log2 (INTVAL (XEXP (x, 1)) + 1)) >= 0) { new = make_compound_operation (XEXP (SUBREG_REG (XEXP (x, 0)), 0), next_code); new = make_extraction (GET_MODE (SUBREG_REG (XEXP (x, 0))), new, 0, XEXP (SUBREG_REG (XEXP (x, 0)), 1), i, 1, 0, in_code == COMPARE); } /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */ else if ((GET_CODE (XEXP (x, 0)) == XOR || GET_CODE (XEXP (x, 0)) == IOR) && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT && (i = exact_log2 (INTVAL (XEXP (x, 1)) + 1)) >= 0) { /* Apply the distributive law, and then try to make extractions. */ new = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode, gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0), XEXP (x, 1)), gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1), XEXP (x, 1))); new = make_compound_operation (new, in_code); } /* If we are have (and (rotate X C) M) and C is larger than the number of bits in M, this is an extraction. */ else if (GET_CODE (XEXP (x, 0)) == ROTATE && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT && (i = exact_log2 (INTVAL (XEXP (x, 1)) + 1)) >= 0 && i <= INTVAL (XEXP (XEXP (x, 0), 1))) { new = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code); new = make_extraction (mode, new, (GET_MODE_BITSIZE (mode) - INTVAL (XEXP (XEXP (x, 0), 1))), NULL_RTX, i, 1, 0, in_code == COMPARE); } /* On machines without logical shifts, if the operand of the AND is a logical shift and our mask turns off all the propagated sign bits, we can replace the logical shift with an arithmetic shift. */ else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT && !have_insn_for (LSHIFTRT, mode) && have_insn_for (ASHIFTRT, mode) && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT && mode_width <= HOST_BITS_PER_WIDE_INT) { unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode); mask >>= INTVAL (XEXP (XEXP (x, 0), 1)); if ((INTVAL (XEXP (x, 1)) & ~mask) == 0) SUBST (XEXP (x, 0), gen_rtx_ASHIFTRT (mode, make_compound_operation (XEXP (XEXP (x, 0), 0), next_code), XEXP (XEXP (x, 0), 1))); } /* If the constant is one less than a power of two, this might be representable by an extraction even if no shift is present. If it doesn't end up being a ZERO_EXTEND, we will ignore it unless we are in a COMPARE. */ else if ((i = exact_log2 (INTVAL (XEXP (x, 1)) + 1)) >= 0) new = make_extraction (mode, make_compound_operation (XEXP (x, 0), next_code), 0, NULL_RTX, i, 1, 0, in_code == COMPARE); /* If we are in a comparison and this is an AND with a power of two, convert this into the appropriate bit extract. */ else if (in_code == COMPARE && (i = exact_log2 (INTVAL (XEXP (x, 1)))) >= 0) new = make_extraction (mode, make_compound_operation (XEXP (x, 0), next_code), i, NULL_RTX, 1, 1, 0, 1); break; case LSHIFTRT: /* If the sign bit is known to be zero, replace this with an arithmetic shift. */ if (have_insn_for (ASHIFTRT, mode) && ! have_insn_for (LSHIFTRT, mode) && mode_width <= HOST_BITS_PER_WIDE_INT && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0) { new = gen_rtx_ASHIFTRT (mode, make_compound_operation (XEXP (x, 0), next_code), XEXP (x, 1)); break; } /* ... fall through ... */ case ASHIFTRT: lhs = XEXP (x, 0); rhs = XEXP (x, 1); /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1, this is a SIGN_EXTRACT. */ if (GET_CODE (rhs) == CONST_INT && GET_CODE (lhs) == ASHIFT && GET_CODE (XEXP (lhs, 1)) == CONST_INT && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1))) { new = make_compound_operation (XEXP (lhs, 0), next_code); new = make_extraction (mode, new, INTVAL (rhs) - INTVAL (XEXP (lhs, 1)), NULL_RTX, mode_width - INTVAL (rhs), code == LSHIFTRT, 0, in_code == COMPARE); break; } /* See if we have operations between an ASHIFTRT and an ASHIFT. If so, try to merge the shifts into a SIGN_EXTEND. We could also do this for some cases of SIGN_EXTRACT, but it doesn't seem worth the effort; the case checked for occurs on Alpha. */ if (!OBJECT_P (lhs) && ! (GET_CODE (lhs) == SUBREG && (OBJECT_P (SUBREG_REG (lhs)))) && GET_CODE (rhs) == CONST_INT && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT && (new = extract_left_shift (lhs, INTVAL (rhs))) != 0) new = make_extraction (mode, make_compound_operation (new, next_code), 0, NULL_RTX, mode_width - INTVAL (rhs), code == LSHIFTRT, 0, in_code == COMPARE); break; case SUBREG: /* Call ourselves recursively on the inner expression. If we are narrowing the object and it has a different RTL code from what it originally did, do this SUBREG as a force_to_mode. */ tem = make_compound_operation (SUBREG_REG (x), in_code); if (GET_CODE (tem) != GET_CODE (SUBREG_REG (x)) && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (tem)) && subreg_lowpart_p (x)) { rtx newer = force_to_mode (tem, mode, ~(HOST_WIDE_INT) 0, NULL_RTX, 0); /* If we have something other than a SUBREG, we might have done an expansion, so rerun ourselves. */ if (GET_CODE (newer) != SUBREG) newer = make_compound_operation (newer, in_code); return newer; } /* If this is a paradoxical subreg, and the new code is a sign or zero extension, omit the subreg and widen the extension. If it is a regular subreg, we can still get rid of the subreg by not widening so much, or in fact removing the extension entirely. */ if ((GET_CODE (tem) == SIGN_EXTEND || GET_CODE (tem) == ZERO_EXTEND) && subreg_lowpart_p (x)) { if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (tem)) || (GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (XEXP (tem, 0))))) { if (! SCALAR_INT_MODE_P (mode)) break; tem = gen_rtx_fmt_e (GET_CODE (tem), mode, XEXP (tem, 0)); } else tem = gen_lowpart (mode, XEXP (tem, 0)); return tem; } break; default: break; } if (new) { x = gen_lowpart (mode, new); code = GET_CODE (x); } /* Now recursively process each operand of this operation. */ fmt = GET_RTX_FORMAT (code); for (i = 0; i < GET_RTX_LENGTH (code); i++) if (fmt[i] == 'e') { new = make_compound_operation (XEXP (x, i), next_code); SUBST (XEXP (x, i), new); } return x; } /* Given M see if it is a value that would select a field of bits within an item, but not the entire word. Return -1 if not. Otherwise, return the starting position of the field, where 0 is the low-order bit. *PLEN is set to the length of the field. */ static int get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen) { /* Get the bit number of the first 1 bit from the right, -1 if none. */ int pos = exact_log2 (m & -m); int len = 0; if (pos >= 0) /* Now shift off the low-order zero bits and see if we have a power of two minus 1. */ len = exact_log2 ((m >> pos) + 1); if (len <= 0) pos = -1; *plen = len; return pos; } /* See if X can be simplified knowing that we will only refer to it in MODE and will only refer to those bits that are nonzero in MASK. If other bits are being computed or if masking operations are done that select a superset of the bits in MASK, they can sometimes be ignored. Return a possibly simplified expression, but always convert X to MODE. If X is a CONST_INT, AND the CONST_INT with MASK. Also, if REG is nonzero and X is a register equal in value to REG, replace X with REG. If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK are all off in X. This is used when X will be complemented, by either NOT, NEG, or XOR. */ static rtx force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask, rtx reg, int just_select) { enum rtx_code code = GET_CODE (x); int next_select = just_select || code == XOR || code == NOT || code == NEG; enum machine_mode op_mode; unsigned HOST_WIDE_INT fuller_mask, nonzero; rtx op0, op1, temp; /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the code below will do the wrong thing since the mode of such an expression is VOIDmode. Also do nothing if X is a CLOBBER; this can happen if X was the return value from a call to gen_lowpart. */ if (code == CALL || code == ASM_OPERANDS || code == CLOBBER) return x; /* We want to perform the operation is its present mode unless we know that the operation is valid in MODE, in which case we do the operation in MODE. */ op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x)) && have_insn_for (code, mode)) ? mode : GET_MODE (x)); /* It is not valid to do a right-shift in a narrower mode than the one it came in with. */ if ((code == LSHIFTRT || code == ASHIFTRT) && GET_MODE_BITSIZE (mode) < GET_MODE_BITSIZE (GET_MODE (x))) op_mode = GET_MODE (x); /* Truncate MASK to fit OP_MODE. */ if (op_mode) mask &= GET_MODE_MASK (op_mode); /* When we have an arithmetic operation, or a shift whose count we do not know, we need to assume that all bits up to the highest-order bit in MASK will be needed. This is how we form such a mask. */ if (mask & ((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1))) fuller_mask = ~(unsigned HOST_WIDE_INT) 0; else fuller_mask = (((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mask) + 1)) - 1); /* Determine what bits of X are guaranteed to be (non)zero. */ nonzero = nonzero_bits (x, mode); /* If none of the bits in X are needed, return a zero. */ if (! just_select && (nonzero & mask) == 0) x = const0_rtx; /* If X is a CONST_INT, return a new one. Do this here since the test below will fail. */ if (GET_CODE (x) == CONST_INT) { if (SCALAR_INT_MODE_P (mode)) return gen_int_mode (INTVAL (x) & mask, mode); else { x = GEN_INT (INTVAL (x) & mask); return gen_lowpart_common (mode, x); } } /* If X is narrower than MODE and we want all the bits in X's mode, just get X in the proper mode. */ if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (mode) && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0) return gen_lowpart (mode, x); /* If we aren't changing the mode, X is not a SUBREG, and all zero bits in MASK are already known to be zero in X, we need not do anything. */ if (GET_MODE (x) == mode && code != SUBREG && (~mask & nonzero) == 0) return x; switch (code) { case CLOBBER: /* If X is a (clobber (const_int)), return it since we know we are generating something that won't match. */ return x; case USE: /* X is a (use (mem ..)) that was made from a bit-field extraction that spanned the boundary of the MEM. If we are now masking so it is within that boundary, we don't need the USE any more. */ if (! BITS_BIG_ENDIAN && (mask & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0) return force_to_mode (XEXP (x, 0), mode, mask, reg, next_select); break; case SIGN_EXTEND: case ZERO_EXTEND: case ZERO_EXTRACT: case SIGN_EXTRACT: x = expand_compound_operation (x); if (GET_CODE (x) != code) return force_to_mode (x, mode, mask, reg, next_select); break; case REG: if (reg != 0 && (rtx_equal_p (get_last_value (reg), x) || rtx_equal_p (reg, get_last_value (x)))) x = reg; break; case SUBREG: if (subreg_lowpart_p (x) /* We can ignore the effect of this SUBREG if it narrows the mode or if the constant masks to zero all the bits the mode doesn't have. */ && ((GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))) || (0 == (mask & GET_MODE_MASK (GET_MODE (x)) & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x))))))) return force_to_mode (SUBREG_REG (x), mode, mask, reg, next_select); break; case AND: /* If this is an AND with a constant, convert it into an AND whose constant is the AND of that constant with MASK. If it remains an AND of MASK, delete it since it is redundant. */ if (GET_CODE (XEXP (x, 1)) == CONST_INT) { x = simplify_and_const_int (x, op_mode, XEXP (x, 0), mask & INTVAL (XEXP (x, 1))); /* If X is still an AND, see if it is an AND with a mask that is just some low-order bits. If so, and it is MASK, we don't need it. */ if (GET_CODE (x) == AND && GET_CODE (XEXP (x, 1)) == CONST_INT && ((INTVAL (XEXP (x, 1)) & GET_MODE_MASK (GET_MODE (x))) == mask)) x = XEXP (x, 0); /* If it remains an AND, try making another AND with the bits in the mode mask that aren't in MASK turned on. If the constant in the AND is wide enough, this might make a cheaper constant. */ if (GET_CODE (x) == AND && GET_CODE (XEXP (x, 1)) == CONST_INT && GET_MODE_MASK (GET_MODE (x)) != mask && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT) { HOST_WIDE_INT cval = (INTVAL (XEXP (x, 1)) | (GET_MODE_MASK (GET_MODE (x)) & ~mask)); int width = GET_MODE_BITSIZE (GET_MODE (x)); rtx y; /* If MODE is narrower than HOST_WIDE_INT and CVAL is a negative number, sign extend it. */ if (width > 0 && width < HOST_BITS_PER_WIDE_INT && (cval & ((HOST_WIDE_INT) 1 << (width - 1))) != 0) cval |= (HOST_WIDE_INT) -1 << width; y = gen_binary (AND, GET_MODE (x), XEXP (x, 0), GEN_INT (cval)); if (rtx_cost (y, SET) < rtx_cost (x, SET)) x = y; } break; } goto binop; case PLUS: /* In (and (plus FOO C1) M), if M is a mask that just turns off low-order bits (as in an alignment operation) and FOO is already aligned to that boundary, mask C1 to that boundary as well. This may eliminate that PLUS and, later, the AND. */ { unsigned int width = GET_MODE_BITSIZE (mode); unsigned HOST_WIDE_INT smask = mask; /* If MODE is narrower than HOST_WIDE_INT and mask is a negative number, sign extend it. */ if (width < HOST_BITS_PER_WIDE_INT && (smask & ((HOST_WIDE_INT) 1 << (width - 1))) != 0) smask |= (HOST_WIDE_INT) -1 << width; if (GET_CODE (XEXP (x, 1)) == CONST_INT && exact_log2 (- smask) >= 0 && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0 && (INTVAL (XEXP (x, 1)) & ~smask) != 0) return force_to_mode (plus_constant (XEXP (x, 0), (INTVAL (XEXP (x, 1)) & smask)), mode, smask, reg, next_select); } /* ... fall through ... */ case MULT: /* For PLUS, MINUS and MULT, we need any bits less significant than the most significant bit in MASK since carries from those bits will affect the bits we are interested in. */ mask = fuller_mask; goto binop; case MINUS: /* If X is (minus C Y) where C's least set bit is larger than any bit in the mask, then we may replace with (neg Y). */ if (GET_CODE (XEXP (x, 0)) == CONST_INT && (((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 0)) & -INTVAL (XEXP (x, 0)))) > mask)) { x = simplify_gen_unary (NEG, GET_MODE (x), XEXP (x, 1), GET_MODE (x)); return force_to_mode (x, mode, mask, reg, next_select); } /* Similarly, if C contains every bit in the fuller_mask, then we may replace with (not Y). */ if (GET_CODE (XEXP (x, 0)) == CONST_INT && ((INTVAL (XEXP (x, 0)) | (HOST_WIDE_INT) fuller_mask) == INTVAL (XEXP (x, 0)))) { x = simplify_gen_unary (NOT, GET_MODE (x), XEXP (x, 1), GET_MODE (x)); return force_to_mode (x, mode, mask, reg, next_select); } mask = fuller_mask; goto binop; case IOR: case XOR: /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...) operation which may be a bitfield extraction. Ensure that the constant we form is not wider than the mode of X. */ if (GET_CODE (XEXP (x, 0)) == LSHIFTRT && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT && GET_CODE (XEXP (x, 1)) == CONST_INT && ((INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (INTVAL (XEXP (x, 1)))) < GET_MODE_BITSIZE (GET_MODE (x))) && (INTVAL (XEXP (x, 1)) & ~nonzero_bits (XEXP (x, 0), GET_MODE (x))) == 0) { temp = GEN_INT ((INTVAL (XEXP (x, 1)) & mask) << INTVAL (XEXP (XEXP (x, 0), 1))); temp = gen_binary (GET_CODE (x), GET_MODE (x), XEXP (XEXP (x, 0), 0), temp); x = gen_binary (LSHIFTRT, GET_MODE (x), temp, XEXP (XEXP (x, 0), 1)); return force_to_mode (x, mode, mask, reg, next_select); } binop: /* For most binary operations, just propagate into the operation and change the mode if we have an operation of that mode. */ op0 = gen_lowpart (op_mode, force_to_mode (XEXP (x, 0), mode, mask, reg, next_select)); op1 = gen_lowpart (op_mode, force_to_mode (XEXP (x, 1), mode, mask, reg, next_select)); if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0) || op1 != XEXP (x, 1)) x = gen_binary (code, op_mode, op0, op1); break; case ASHIFT: /* For left shifts, do the same, but just for the first operand. However, we cannot do anything with shifts where we cannot guarantee that the counts are smaller than the size of the mode because such a count will have a different meaning in a wider mode. */ if (! (GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) >= 0 && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (mode)) && ! (GET_MODE (XEXP (x, 1)) != VOIDmode && (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1))) < (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))) break; /* If the shift count is a constant and we can do arithmetic in the mode of the shift, refine which bits we need. Otherwise, use the conservative form of the mask. */ if (GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) >= 0 && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (op_mode) && GET_MODE_BITSIZE (op_mode) <= HOST_BITS_PER_WIDE_INT) mask >>= INTVAL (XEXP (x, 1)); else mask = fuller_mask; op0 = gen_lowpart (op_mode, force_to_mode (XEXP (x, 0), op_mode, mask, reg, next_select)); if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0)) x = gen_binary (code, op_mode, op0, XEXP (x, 1)); break; case LSHIFTRT: /* Here we can only do something if the shift count is a constant, this shift constant is valid for the host, and we can do arithmetic in OP_MODE. */ if (GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT && GET_MODE_BITSIZE (op_mode) <= HOST_BITS_PER_WIDE_INT) { rtx inner = XEXP (x, 0); unsigned HOST_WIDE_INT inner_mask; /* Select the mask of the bits we need for the shift operand. */ inner_mask = mask << INTVAL (XEXP (x, 1)); /* We can only change the mode of the shift if we can do arithmetic in the mode of the shift and INNER_MASK is no wider than the width of OP_MODE. */ if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT || (inner_mask & ~GET_MODE_MASK (op_mode)) != 0) op_mode = GET_MODE (x); inner = force_to_mode (inner, op_mode, inner_mask, reg, next_select); if (GET_MODE (x) != op_mode || inner != XEXP (x, 0)) x = gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1)); } /* If we have (and (lshiftrt FOO C1) C2) where the combination of the shift and AND produces only copies of the sign bit (C2 is one less than a power of two), we can do this with just a shift. */ if (GET_CODE (x) == LSHIFTRT && GET_CODE (XEXP (x, 1)) == CONST_INT /* The shift puts one of the sign bit copies in the least significant bit. */ && ((INTVAL (XEXP (x, 1)) + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))) >= GET_MODE_BITSIZE (GET_MODE (x))) && exact_log2 (mask + 1) >= 0 /* Number of bits left after the shift must be more than the mask needs. */ && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1)) <= GET_MODE_BITSIZE (GET_MODE (x))) /* Must be more sign bit copies than the mask needs. */ && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))) >= exact_log2 (mask + 1))) x = gen_binary (LSHIFTRT, GET_MODE (x), XEXP (x, 0), GEN_INT (GET_MODE_BITSIZE (GET_MODE (x)) - exact_log2 (mask + 1))); goto shiftrt; case ASHIFTRT: /* If we are just looking for the sign bit, we don't need this shift at all, even if it has a variable count. */ if (GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT && (mask == ((unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (GET_MODE (x)) - 1)))) return force_to_mode (XEXP (x, 0), mode, mask, reg, next_select); /* If this is a shift by a constant, get a mask that contains those bits that are not copies of the sign bit. We then have two cases: If MASK only includes those bits, this can be a logical shift, which may allow simplifications. If MASK is a single-bit field not within those bits, we are requesting a copy of the sign bit and hence can shift the sign bit to the appropriate location. */ if (GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) >= 0 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT) { int i = -1; /* If the considered data is wider than HOST_WIDE_INT, we can't represent a mask for all its bits in a single scalar. But we only care about the lower bits, so calculate these. */ if (GET_MODE_BITSIZE (GET_MODE (x)) > HOST_BITS_PER_WIDE_INT) { nonzero = ~(HOST_WIDE_INT) 0; /* GET_MODE_BITSIZE (GET_MODE (x)) - INTVAL (XEXP (x, 1)) is the number of bits a full-width mask would have set. We need only shift if these are fewer than nonzero can hold. If not, we must keep all bits set in nonzero. */ if (GET_MODE_BITSIZE (GET_MODE (x)) - INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT) nonzero >>= INTVAL (XEXP (x, 1)) + HOST_BITS_PER_WIDE_INT - GET_MODE_BITSIZE (GET_MODE (x)) ; } else { nonzero = GET_MODE_MASK (GET_MODE (x)); nonzero >>= INTVAL (XEXP (x, 1)); } if ((mask & ~nonzero) == 0 || (i = exact_log2 (mask)) >= 0) { x = simplify_shift_const (x, LSHIFTRT, GET_MODE (x), XEXP (x, 0), i < 0 ? INTVAL (XEXP (x, 1)) : GET_MODE_BITSIZE (GET_MODE (x)) - 1 - i); if (GET_CODE (x) != ASHIFTRT) return force_to_mode (x, mode, mask, reg, next_select); } } /* If MASK is 1, convert this to an LSHIFTRT. This can be done even if the shift count isn't a constant. */ if (mask == 1) x = gen_binary (LSHIFTRT, GET_MODE (x), XEXP (x, 0), XEXP (x, 1)); shiftrt: /* If this is a zero- or sign-extension operation that just affects bits we don't care about, remove it. Be sure the call above returned something that is still a shift. */ if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT) && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) >= 0 && (INTVAL (XEXP (x, 1)) <= GET_MODE_BITSIZE (GET_MODE (x)) - (floor_log2 (mask) + 1)) && GET_CODE (XEXP (x, 0)) == ASHIFT && XEXP (XEXP (x, 0), 1) == XEXP (x, 1)) return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask, reg, next_select); break; case ROTATE: case ROTATERT: /* If the shift count is constant and we can do computations in the mode of X, compute where the bits we care about are. Otherwise, we can't do anything. Don't change the mode of the shift or propagate MODE into the shift, though. */ if (GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) >= 0) { temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE, GET_MODE (x), GEN_INT (mask), XEXP (x, 1)); if (temp && GET_CODE (temp) == CONST_INT) SUBST (XEXP (x, 0), force_to_mode (XEXP (x, 0), GET_MODE (x), INTVAL (temp), reg, next_select)); } break; case NEG: /* If we just want the low-order bit, the NEG isn't needed since it won't change the low-order bit. */ if (mask == 1) return force_to_mode (XEXP (x, 0), mode, mask, reg, just_select); /* We need any bits less significant than the most significant bit in MASK since carries from those bits will affect the bits we are interested in. */ mask = fuller_mask; goto unop; case NOT: /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the same as the XOR case above. Ensure that the constant we form is not wider than the mode of X. */ if (GET_CODE (XEXP (x, 0)) == LSHIFTRT && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0 && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask) < GET_MODE_BITSIZE (GET_MODE (x))) && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT) { temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)), GET_MODE (x)); temp = gen_binary (XOR, GET_MODE (x), XEXP (XEXP (x, 0), 0), temp); x = gen_binary (LSHIFTRT, GET_MODE (x), temp, XEXP (XEXP (x, 0), 1)); return force_to_mode (x, mode, mask, reg, next_select); } /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must use the full mask inside the NOT. */ mask = fuller_mask; unop: op0 = gen_lowpart (op_mode, force_to_mode (XEXP (x, 0), mode, mask, reg, next_select)); if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0)) x = simplify_gen_unary (code, op_mode, op0, op_mode); break; case NE: /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero, which is equal to STORE_FLAG_VALUE. */ if ((mask & ~STORE_FLAG_VALUE) == 0 && XEXP (x, 1) == const0_rtx && exact_log2 (nonzero_bits (XEXP (x, 0), mode)) >= 0 && (nonzero_bits (XEXP (x, 0), mode) == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE)) return force_to_mode (XEXP (x, 0), mode, mask, reg, next_select); break; case IF_THEN_ELSE: /* We have no way of knowing if the IF_THEN_ELSE can itself be written in a narrower mode. We play it safe and do not do so. */ SUBST (XEXP (x, 1), gen_lowpart (GET_MODE (x), force_to_mode (XEXP (x, 1), mode, mask, reg, next_select))); SUBST (XEXP (x, 2), gen_lowpart (GET_MODE (x), force_to_mode (XEXP (x, 2), mode, mask, reg, next_select))); break; default: break; } /* Ensure we return a value of the proper mode. */ return gen_lowpart (mode, x); } /* Return nonzero if X is an expression that has one of two values depending on whether some other value is zero or nonzero. In that case, we return the value that is being tested, *PTRUE is set to the value if the rtx being returned has a nonzero value, and *PFALSE is set to the other alternative. If we return zero, we set *PTRUE and *PFALSE to X. */ static rtx if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse) { enum machine_mode mode = GET_MODE (x); enum rtx_code code = GET_CODE (x); rtx cond0, cond1, true0, true1, false0, false1; unsigned HOST_WIDE_INT nz; /* If we are comparing a value against zero, we are done. */ if ((code == NE || code == EQ) && XEXP (x, 1) == const0_rtx) { *ptrue = (code == NE) ? const_true_rtx : const0_rtx; *pfalse = (code == NE) ? const0_rtx : const_true_rtx; return XEXP (x, 0); } /* If this is a unary operation whose operand has one of two values, apply our opcode to compute those values. */ else if (UNARY_P (x) && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0) { *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0))); *pfalse = simplify_gen_unary (code, mode, false0, GET_MODE (XEXP (x, 0))); return cond0; } /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would make can't possibly match and would suppress other optimizations. */ else if (code == COMPARE) ; /* If this is a binary operation, see if either side has only one of two values. If either one does or if both do and they are conditional on the same value, compute the new true and false values. */ else if (BINARY_P (x)) { cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0); cond1 = if_then_else_cond (XEXP (x, 1), &true1, &false1); if ((cond0 != 0 || cond1 != 0) && ! (cond0 != 0 && cond1 != 0 && ! rtx_equal_p (cond0, cond1))) { /* If if_then_else_cond returned zero, then true/false are the same rtl. We must copy one of them to prevent invalid rtl sharing. */ if (cond0 == 0) true0 = copy_rtx (true0); else if (cond1 == 0) true1 = copy_rtx (true1); *ptrue = gen_binary (code, mode, true0, true1); *pfalse = gen_binary (code, mode, false0, false1); return cond0 ? cond0 : cond1; } /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the operands is zero when the other is nonzero, and vice-versa, and STORE_FLAG_VALUE is 1 or -1. */ if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1) && (code == PLUS || code == IOR || code == XOR || code == MINUS || code == UMAX) && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT) { rtx op0 = XEXP (XEXP (x, 0), 1); rtx op1 = XEXP (XEXP (x, 1), 1); cond0 = XEXP (XEXP (x, 0), 0); cond1 = XEXP (XEXP (x, 1), 0); if (COMPARISON_P (cond0) && COMPARISON_P (cond1) && ((GET_CODE (cond0) == combine_reversed_comparison_code (cond1) && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0)) && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1))) || ((swap_condition (GET_CODE (cond0)) == combine_reversed_comparison_code (cond1)) && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1)) && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0)))) && ! side_effects_p (x)) { *ptrue = gen_binary (MULT, mode, op0, const_true_rtx); *pfalse = gen_binary (MULT, mode, (code == MINUS ? simplify_gen_unary (NEG, mode, op1, mode) : op1), const_true_rtx); return cond0; } } /* Similarly for MULT, AND and UMIN, except that for these the result is always zero. */ if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1) && (code == MULT || code == AND || code == UMIN) && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT) { cond0 = XEXP (XEXP (x, 0), 0); cond1 = XEXP (XEXP (x, 1), 0); if (COMPARISON_P (cond0) && COMPARISON_P (cond1) && ((GET_CODE (cond0) == combine_reversed_comparison_code (cond1) && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0)) && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1))) || ((swap_condition (GET_CODE (cond0)) == combine_reversed_comparison_code (cond1)) && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1)) && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0)))) && ! side_effects_p (x)) { *ptrue = *pfalse = const0_rtx; return cond0; } } } else if (code == IF_THEN_ELSE) { /* If we have IF_THEN_ELSE already, extract the condition and canonicalize it if it is NE or EQ. */ cond0 = XEXP (x, 0); *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2); if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx) return XEXP (cond0, 0); else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx) { *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1); return XEXP (cond0, 0); } else return cond0; } /* If X is a SUBREG, we can narrow both the true and false values if the inner expression, if there is a condition. */ else if (code == SUBREG && 0 != (cond0 = if_then_else_cond (SUBREG_REG (x), &true0, &false0))) { true0 = simplify_gen_subreg (mode, true0, GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x)); false0 = simplify_gen_subreg (mode, false0, GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x)); if (true0 && false0) { *ptrue = true0; *pfalse = false0; return cond0; } } /* If X is a constant, this isn't special and will cause confusions if we treat it as such. Likewise if it is equivalent to a constant. */ else if (CONSTANT_P (x) || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0))) ; /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that will be least confusing to the rest of the compiler. */ else if (mode == BImode) { *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx; return x; } /* If X is known to be either 0 or -1, those are the true and false values when testing X. */ else if (x == constm1_rtx || x == const0_rtx || (mode != VOIDmode && num_sign_bit_copies (x, mode) == GET_MODE_BITSIZE (mode))) { *ptrue = constm1_rtx, *pfalse = const0_rtx; return x; } /* Likewise for 0 or a single bit. */ else if (SCALAR_INT_MODE_P (mode) && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT && exact_log2 (nz = nonzero_bits (x, mode)) >= 0) { *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx; return x; } /* Otherwise fail; show no condition with true and false values the same. */ *ptrue = *pfalse = x; return 0; } /* Return the value of expression X given the fact that condition COND is known to be true when applied to REG as its first operand and VAL as its second. X is known to not be shared and so can be modified in place. We only handle the simplest cases, and specifically those cases that arise with IF_THEN_ELSE expressions. */ static rtx known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val) { enum rtx_code code = GET_CODE (x); rtx temp; const char *fmt; int i, j; if (side_effects_p (x)) return x; /* If either operand of the condition is a floating point value, then we have to avoid collapsing an EQ comparison. */ if (cond == EQ && rtx_equal_p (x, reg) && ! FLOAT_MODE_P (GET_MODE (x)) && ! FLOAT_MODE_P (GET_MODE (val))) return val; if (cond == UNEQ && rtx_equal_p (x, reg)) return val; /* If X is (abs REG) and we know something about REG's relationship with zero, we may be able to simplify this. */ if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx) switch (cond) { case GE: case GT: case EQ: return XEXP (x, 0); case LT: case LE: return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)), XEXP (x, 0), GET_MODE (XEXP (x, 0))); default: break; } /* The only other cases we handle are MIN, MAX, and comparisons if the operands are the same as REG and VAL. */ else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x)) { if (rtx_equal_p (XEXP (x, 0), val)) cond = swap_condition (cond), temp = val, val = reg, reg = temp; if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val)) { if (COMPARISON_P (x)) { if (comparison_dominates_p (cond, code)) return const_true_rtx; code = combine_reversed_comparison_code (x); if (code != UNKNOWN && comparison_dominates_p (cond, code)) return const0_rtx; else return x; } else if (code == SMAX || code == SMIN || code == UMIN || code == UMAX) { int unsignedp = (code == UMIN || code == UMAX); /* Do not reverse the condition when it is NE or EQ. This is because we cannot conclude anything about the value of 'SMAX (x, y)' when x is not equal to y, but we can when x equals y. */ if ((code == SMAX || code == UMAX) && ! (cond == EQ || cond == NE)) cond = reverse_condition (cond); switch (cond) { case GE: case GT: return unsignedp ? x : XEXP (x, 1); case LE: case LT: return unsignedp ? x : XEXP (x, 0); case GEU: case GTU: return unsignedp ? XEXP (x, 1) : x; case LEU: case LTU: return unsignedp ? XEXP (x, 0) : x; default: break; } } } } else if (code == SUBREG) { enum machine_mode inner_mode = GET_MODE (SUBREG_REG (x)); rtx new, r = known_cond (SUBREG_REG (x), cond, reg, val); if (SUBREG_REG (x) != r) { /* We must simplify subreg here, before we lose track of the original inner_mode. */ new = simplify_subreg (GET_MODE (x), r, inner_mode, SUBREG_BYTE (x)); if (new) return new; else SUBST (SUBREG_REG (x), r); } return x; } /* We don't have to handle SIGN_EXTEND here, because even in the case of replacing something with a modeless CONST_INT, a CONST_INT is already (supposed to be) a valid sign extension for its narrower mode, which implies it's already properly sign-extended for the wider mode. Now, for ZERO_EXTEND, the story is different. */ else if (code == ZERO_EXTEND) { enum machine_mode inner_mode = GET_MODE (XEXP (x, 0)); rtx new, r = known_cond (XEXP (x, 0), cond, reg, val); if (XEXP (x, 0) != r) { /* We must simplify the zero_extend here, before we lose track of the original inner_mode. */ new = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x), r, inner_mode); if (new) return new; else SUBST (XEXP (x, 0), r); } return x; } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val)); else if (fmt[i] == 'E') for (j = XVECLEN (x, i) - 1; j >= 0; j--) SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j), cond, reg, val)); } return x; } /* See if X and Y are equal for the purposes of seeing if we can rewrite an assignment as a field assignment. */ static int rtx_equal_for_field_assignment_p (rtx x, rtx y) { if (x == y || rtx_equal_p (x, y)) return 1; if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y)) return 0; /* Check for a paradoxical SUBREG of a MEM compared with the MEM. Note that all SUBREGs of MEM are paradoxical; otherwise they would have been rewritten. */ if (MEM_P (x) && GET_CODE (y) == SUBREG && MEM_P (SUBREG_REG (y)) && rtx_equal_p (SUBREG_REG (y), gen_lowpart (GET_MODE (SUBREG_REG (y)), x))) return 1; if (MEM_P (y) && GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)) && rtx_equal_p (SUBREG_REG (x), gen_lowpart (GET_MODE (SUBREG_REG (x)), y))) return 1; /* We used to see if get_last_value of X and Y were the same but that's not correct. In one direction, we'll cause the assignment to have the wrong destination and in the case, we'll import a register into this insn that might have already have been dead. So fail if none of the above cases are true. */ return 0; } /* See if X, a SET operation, can be rewritten as a bit-field assignment. Return that assignment if so. We only handle the most common cases. */ static rtx make_field_assignment (rtx x) { rtx dest = SET_DEST (x); rtx src = SET_SRC (x); rtx assign; rtx rhs, lhs; HOST_WIDE_INT c1; HOST_WIDE_INT pos; unsigned HOST_WIDE_INT len; rtx other; enum machine_mode mode; /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is a clear of a one-bit field. We will have changed it to (and (rotate (const_int -2) POS) DEST), so check for that. Also check for a SUBREG. */ if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE && GET_CODE (XEXP (XEXP (src, 0), 0)) == CONST_INT && INTVAL (XEXP (XEXP (src, 0), 0)) == -2 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1))) { assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1), 1, 1, 1, 0); if (assign != 0) return gen_rtx_SET (VOIDmode, assign, const0_rtx); return x; } else if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG && subreg_lowpart_p (XEXP (src, 0)) && (GET_MODE_SIZE (GET_MODE (XEXP (src, 0))) < GET_MODE_SIZE (GET_MODE (SUBREG_REG (XEXP (src, 0))))) && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE && GET_CODE (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == CONST_INT && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1))) { assign = make_extraction (VOIDmode, dest, 0, XEXP (SUBREG_REG (XEXP (src, 0)), 1), 1, 1, 1, 0); if (assign != 0) return gen_rtx_SET (VOIDmode, assign, const0_rtx); return x; } /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a one-bit field. */ else if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT && XEXP (XEXP (src, 0), 0) == const1_rtx && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1))) { assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1), 1, 1, 1, 0); if (assign != 0) return gen_rtx_SET (VOIDmode, assign, const1_rtx); return x; } /* The other case we handle is assignments into a constant-position field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents a mask that has all one bits except for a group of zero bits and OTHER is known to have zeros where C1 has ones, this is such an assignment. Compute the position and length from C1. Shift OTHER to the appropriate position, force it to the required mode, and make the extraction. Check for the AND in both operands. */ if (GET_CODE (src) != IOR && GET_CODE (src) != XOR) return x; rhs = expand_compound_operation (XEXP (src, 0)); lhs = expand_compound_operation (XEXP (src, 1)); if (GET_CODE (rhs) == AND && GET_CODE (XEXP (rhs, 1)) == CONST_INT && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest)) c1 = INTVAL (XEXP (rhs, 1)), other = lhs; else if (GET_CODE (lhs) == AND && GET_CODE (XEXP (lhs, 1)) == CONST_INT && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest)) c1 = INTVAL (XEXP (lhs, 1)), other = rhs; else return x; pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (GET_MODE (dest)), &len); if (pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (dest)) || GET_MODE_BITSIZE (GET_MODE (dest)) > HOST_BITS_PER_WIDE_INT || (c1 & nonzero_bits (other, GET_MODE (dest))) != 0) return x; assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0); if (assign == 0) return x; /* The mode to use for the source is the mode of the assignment, or of what is inside a possible STRICT_LOW_PART. */ mode = (GET_CODE (assign) == STRICT_LOW_PART ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign)); /* Shift OTHER right POS places and make it the source, restricting it to the proper length and mode. */ src = force_to_mode (simplify_shift_const (NULL_RTX, LSHIFTRT, GET_MODE (src), other, pos), mode, GET_MODE_BITSIZE (mode) >= HOST_BITS_PER_WIDE_INT ? ~(unsigned HOST_WIDE_INT) 0 : ((unsigned HOST_WIDE_INT) 1 << len) - 1, dest, 0); /* If SRC is masked by an AND that does not make a difference in the value being stored, strip it. */ if (GET_CODE (assign) == ZERO_EXTRACT && GET_CODE (XEXP (assign, 1)) == CONST_INT && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT && GET_CODE (src) == AND && GET_CODE (XEXP (src, 1)) == CONST_INT && ((unsigned HOST_WIDE_INT) INTVAL (XEXP (src, 1)) == ((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (assign, 1))) - 1)) src = XEXP (src, 0); return gen_rtx_SET (VOIDmode, assign, src); } /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c) if so. */ static rtx apply_distributive_law (rtx x) { enum rtx_code code = GET_CODE (x); enum rtx_code inner_code; rtx lhs, rhs, other; rtx tem; /* Distributivity is not true for floating point as it can change the value. So we don't do it unless -funsafe-math-optimizations. */ if (FLOAT_MODE_P (GET_MODE (x)) && ! flag_unsafe_math_optimizations) return x; /* The outer operation can only be one of the following: */ if (code != IOR && code != AND && code != XOR && code != PLUS && code != MINUS) return x; lhs = XEXP (x, 0); rhs = XEXP (x, 1); /* If either operand is a primitive we can't do anything, so get out fast. */ if (OBJECT_P (lhs) || OBJECT_P (rhs)) return x; lhs = expand_compound_operation (lhs); rhs = expand_compound_operation (rhs); inner_code = GET_CODE (lhs); if (inner_code != GET_CODE (rhs)) return x; /* See if the inner and outer operations distribute. */ switch (inner_code) { case LSHIFTRT: case ASHIFTRT: case AND: case IOR: /* These all distribute except over PLUS. */ if (code == PLUS || code == MINUS) return x; break; case MULT: if (code != PLUS && code != MINUS) return x; break; case ASHIFT: /* This is also a multiply, so it distributes over everything. */ break; case SUBREG: /* Non-paradoxical SUBREGs distributes over all operations, provided the inner modes and byte offsets are the same, this is an extraction of a low-order part, we don't convert an fp operation to int or vice versa, and we would not be converting a single-word operation into a multi-word operation. The latter test is not required, but it prevents generating unneeded multi-word operations. Some of the previous tests are redundant given the latter test, but are retained because they are required for correctness. We produce the result slightly differently in this case. */ if (GET_MODE (SUBREG_REG (lhs)) != GET_MODE (SUBREG_REG (rhs)) || SUBREG_BYTE (lhs) != SUBREG_BYTE (rhs) || ! subreg_lowpart_p (lhs) || (GET_MODE_CLASS (GET_MODE (lhs)) != GET_MODE_CLASS (GET_MODE (SUBREG_REG (lhs)))) || (GET_MODE_SIZE (GET_MODE (lhs)) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (lhs)))) || GET_MODE_SIZE (GET_MODE (SUBREG_REG (lhs))) > UNITS_PER_WORD) return x; tem = gen_binary (code, GET_MODE (SUBREG_REG (lhs)), SUBREG_REG (lhs), SUBREG_REG (rhs)); return gen_lowpart (GET_MODE (x), tem); default: return x; } /* Set LHS and RHS to the inner operands (A and B in the example above) and set OTHER to the common operand (C in the example). There is only one way to do this unless the inner operation is commutative. */ if (COMMUTATIVE_ARITH_P (lhs) && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0))) other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1); else if (COMMUTATIVE_ARITH_P (lhs) && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1))) other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0); else if (COMMUTATIVE_ARITH_P (lhs) && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0))) other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1); else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1))) other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0); else return x; /* Form the new inner operation, seeing if it simplifies first. */ tem = gen_binary (code, GET_MODE (x), lhs, rhs); /* There is one exception to the general way of distributing: (a | c) ^ (b | c) -> (a ^ b) & ~c */ if (code == XOR && inner_code == IOR) { inner_code = AND; other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x)); } /* We may be able to continuing distributing the result, so call ourselves recursively on the inner operation before forming the outer operation, which we return. */ return gen_binary (inner_code, GET_MODE (x), apply_distributive_law (tem), other); } /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done in MODE. Return an equivalent form, if different from X. Otherwise, return X. If X is zero, we are to always construct the equivalent form. */ static rtx simplify_and_const_int (rtx x, enum machine_mode mode, rtx varop, unsigned HOST_WIDE_INT constop) { unsigned HOST_WIDE_INT nonzero; int i; /* Simplify VAROP knowing that we will be only looking at some of the bits in it. Note by passing in CONSTOP, we guarantee that the bits not set in CONSTOP are not significant and will never be examined. We must ensure that is the case by explicitly masking out those bits before returning. */ varop = force_to_mode (varop, mode, constop, NULL_RTX, 0); /* If VAROP is a CLOBBER, we will fail so return it. */ if (GET_CODE (varop) == CLOBBER) return varop; /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP to VAROP and return the new constant. */ if (GET_CODE (varop) == CONST_INT) return GEN_INT (trunc_int_for_mode (INTVAL (varop) & constop, mode)); /* See what bits may be nonzero in VAROP. Unlike the general case of a call to nonzero_bits, here we don't care about bits outside MODE. */ nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode); /* Turn off all bits in the constant that are known to already be zero. Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS which is tested below. */ constop &= nonzero; /* If we don't have any bits left, return zero. */ if (constop == 0) return const0_rtx; /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is a power of two, we can replace this with an ASHIFT. */ if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1 && (i = exact_log2 (constop)) >= 0) return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i); /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR or XOR, then try to apply the distributive law. This may eliminate operations if either branch can be simplified because of the AND. It may also make some cases more complex, but those cases probably won't match a pattern either with or without this. */ if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR) return gen_lowpart (mode, apply_distributive_law (gen_binary (GET_CODE (varop), GET_MODE (varop), simplify_and_const_int (NULL_RTX, GET_MODE (varop), XEXP (varop, 0), constop), simplify_and_const_int (NULL_RTX, GET_MODE (varop), XEXP (varop, 1), constop)))); /* If VAROP is PLUS, and the constant is a mask of low bite, distribute the AND and see if one of the operands simplifies to zero. If so, we may eliminate it. */ if (GET_CODE (varop) == PLUS && exact_log2 (constop + 1) >= 0) { rtx o0, o1; o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop); o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop); if (o0 == const0_rtx) return o1; if (o1 == const0_rtx) return o0; } /* Get VAROP in MODE. Try to get a SUBREG if not. Don't make a new SUBREG if we already had one (just check for the simplest cases). */ if (x && GET_CODE (XEXP (x, 0)) == SUBREG && GET_MODE (XEXP (x, 0)) == mode && SUBREG_REG (XEXP (x, 0)) == varop) varop = XEXP (x, 0); else varop = gen_lowpart (mode, varop); /* If we can't make the SUBREG, try to return what we were given. */ if (GET_CODE (varop) == CLOBBER) return x ? x : varop; /* If we are only masking insignificant bits, return VAROP. */ if (constop == nonzero) x = varop; else { /* Otherwise, return an AND. */ constop = trunc_int_for_mode (constop, mode); /* See how much, if any, of X we can use. */ if (x == 0 || GET_CODE (x) != AND || GET_MODE (x) != mode) x = gen_binary (AND, mode, varop, GEN_INT (constop)); else { if (GET_CODE (XEXP (x, 1)) != CONST_INT || (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) != constop) SUBST (XEXP (x, 1), GEN_INT (constop)); SUBST (XEXP (x, 0), varop); } } return x; } /* Given a REG, X, compute which bits in X can be nonzero. We don't care about bits outside of those defined in MODE. For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is a shift, AND, or zero_extract, we can do better. */ static rtx reg_nonzero_bits_for_combine (rtx x, enum machine_mode mode, rtx known_x ATTRIBUTE_UNUSED, enum machine_mode known_mode ATTRIBUTE_UNUSED, unsigned HOST_WIDE_INT known_ret ATTRIBUTE_UNUSED, unsigned HOST_WIDE_INT *nonzero) { rtx tem; /* If X is a register whose nonzero bits value is current, use it. Otherwise, if X is a register whose value we can find, use that value. Otherwise, use the previously-computed global nonzero bits for this register. */ if (reg_stat[REGNO (x)].last_set_value != 0 && (reg_stat[REGNO (x)].last_set_mode == mode || (GET_MODE_CLASS (reg_stat[REGNO (x)].last_set_mode) == MODE_INT && GET_MODE_CLASS (mode) == MODE_INT)) && (reg_stat[REGNO (x)].last_set_label == label_tick || (REGNO (x) >= FIRST_PSEUDO_REGISTER && REG_N_SETS (REGNO (x)) == 1 && ! REGNO_REG_SET_P (ENTRY_BLOCK_PTR->next_bb->global_live_at_start, REGNO (x)))) && INSN_CUID (reg_stat[REGNO (x)].last_set) < subst_low_cuid) { *nonzero &= reg_stat[REGNO (x)].last_set_nonzero_bits; return NULL; } tem = get_last_value (x); if (tem) { #ifdef SHORT_IMMEDIATES_SIGN_EXTEND /* If X is narrower than MODE and TEM is a non-negative constant that would appear negative in the mode of X, sign-extend it for use in reg_nonzero_bits because some machines (maybe most) will actually do the sign-extension and this is the conservative approach. ??? For 2.5, try to tighten up the MD files in this regard instead of this kludge. */ if (GET_MODE_BITSIZE (GET_MODE (x)) < GET_MODE_BITSIZE (mode) && GET_CODE (tem) == CONST_INT && INTVAL (tem) > 0 && 0 != (INTVAL (tem) & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (GET_MODE (x)) - 1)))) tem = GEN_INT (INTVAL (tem) | ((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (GET_MODE (x)))); #endif return tem; } else if (nonzero_sign_valid && reg_stat[REGNO (x)].nonzero_bits) { unsigned HOST_WIDE_INT mask = reg_stat[REGNO (x)].nonzero_bits; if (GET_MODE_BITSIZE (GET_MODE (x)) < GET_MODE_BITSIZE (mode)) /* We don't know anything about the upper bits. */ mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (GET_MODE (x)); *nonzero &= mask; } return NULL; } /* Return the number of bits at the high-order end of X that are known to be equal to the sign bit. X will be used in mode MODE; if MODE is VOIDmode, X will be used in its own mode. The returned value will always be between 1 and the number of bits in MODE. */ static rtx reg_num_sign_bit_copies_for_combine (rtx x, enum machine_mode mode, rtx known_x ATTRIBUTE_UNUSED, enum machine_mode known_mode ATTRIBUTE_UNUSED, unsigned int known_ret ATTRIBUTE_UNUSED, unsigned int *result) { rtx tem; if (reg_stat[REGNO (x)].last_set_value != 0 && reg_stat[REGNO (x)].last_set_mode == mode && (reg_stat[REGNO (x)].last_set_label == label_tick || (REGNO (x) >= FIRST_PSEUDO_REGISTER && REG_N_SETS (REGNO (x)) == 1 && ! REGNO_REG_SET_P (ENTRY_BLOCK_PTR->next_bb->global_live_at_start, REGNO (x)))) && INSN_CUID (reg_stat[REGNO (x)].last_set) < subst_low_cuid) { *result = reg_stat[REGNO (x)].last_set_sign_bit_copies; return NULL; } tem = get_last_value (x); if (tem != 0) return tem; if (nonzero_sign_valid && reg_stat[REGNO (x)].sign_bit_copies != 0 && GET_MODE_BITSIZE (GET_MODE (x)) == GET_MODE_BITSIZE (mode)) *result = reg_stat[REGNO (x)].sign_bit_copies; return NULL; } /* Return the number of "extended" bits there are in X, when interpreted as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For unsigned quantities, this is the number of high-order zero bits. For signed quantities, this is the number of copies of the sign bit minus 1. In both case, this function returns the number of "spare" bits. For example, if two quantities for which this function returns at least 1 are added, the addition is known not to overflow. This function will always return 0 unless called during combine, which implies that it must be called from a define_split. */ unsigned int extended_count (rtx x, enum machine_mode mode, int unsignedp) { if (nonzero_sign_valid == 0) return 0; return (unsignedp ? (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT ? (unsigned int) (GET_MODE_BITSIZE (mode) - 1 - floor_log2 (nonzero_bits (x, mode))) : 0) : num_sign_bit_copies (x, mode) - 1); } /* This function is called from `simplify_shift_const' to merge two outer operations. Specifically, we have already found that we need to perform operation *POP0 with constant *PCONST0 at the outermost position. We would now like to also perform OP1 with constant CONST1 (with *POP0 being done last). Return 1 if we can do the operation and update *POP0 and *PCONST0 with the resulting operation. *PCOMP_P is set to 1 if we would need to complement the innermost operand, otherwise it is unchanged. MODE is the mode in which the operation will be done. No bits outside the width of this mode matter. It is assumed that the width of this mode is smaller than or equal to HOST_BITS_PER_WIDE_INT. If *POP0 or OP1 are NIL, it means no operation is required. Only NEG, PLUS, IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper result is simply *PCONST0. If the resulting operation cannot be expressed as one operation, we return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */ static int merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, enum rtx_code op1, HOST_WIDE_INT const1, enum machine_mode mode, int *pcomp_p) { enum rtx_code op0 = *pop0; HOST_WIDE_INT const0 = *pconst0; const0 &= GET_MODE_MASK (mode); const1 &= GET_MODE_MASK (mode); /* If OP0 is an AND, clear unimportant bits in CONST1. */ if (op0 == AND) const1 &= const0; /* If OP0 or OP1 is NIL, this is easy. Similarly if they are the same or if OP0 is SET. */ if (op1 == NIL || op0 == SET) return 1; else if (op0 == NIL) op0 = op1, const0 = const1; else if (op0 == op1) { switch (op0) { case AND: const0 &= const1; break; case IOR: const0 |= const1; break; case XOR: const0 ^= const1; break; case PLUS: const0 += const1; break; case NEG: op0 = NIL; break; default: break; } } /* Otherwise, if either is a PLUS or NEG, we can't do anything. */ else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG) return 0; /* If the two constants aren't the same, we can't do anything. The remaining six cases can all be done. */ else if (const0 != const1) return 0; else switch (op0) { case IOR: if (op1 == AND) /* (a & b) | b == b */ op0 = SET; else /* op1 == XOR */ /* (a ^ b) | b == a | b */ {;} break; case XOR: if (op1 == AND) /* (a & b) ^ b == (~a) & b */ op0 = AND, *pcomp_p = 1; else /* op1 == IOR */ /* (a | b) ^ b == a & ~b */ op0 = AND, const0 = ~const0; break; case AND: if (op1 == IOR) /* (a | b) & b == b */ op0 = SET; else /* op1 == XOR */ /* (a ^ b) & b) == (~a) & b */ *pcomp_p = 1; break; default: break; } /* Check for NO-OP cases. */ const0 &= GET_MODE_MASK (mode); if (const0 == 0 && (op0 == IOR || op0 == XOR || op0 == PLUS)) op0 = NIL; else if (const0 == 0 && op0 == AND) op0 = SET; else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode) && op0 == AND) op0 = NIL; /* ??? Slightly redundant with the above mask, but not entirely. Moving this above means we'd have to sign-extend the mode mask for the final test. */ const0 = trunc_int_for_mode (const0, mode); *pop0 = op0; *pconst0 = const0; return 1; } /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift. The result of the shift is RESULT_MODE. X, if nonzero, is an expression that we started with. The shift is normally computed in the widest mode we find in VAROP, as long as it isn't a different number of words than RESULT_MODE. Exceptions are ASHIFTRT and ROTATE, which are always done in their original mode, */ static rtx simplify_shift_const (rtx x, enum rtx_code code, enum machine_mode result_mode, rtx varop, int orig_count) { enum rtx_code orig_code = code; unsigned int count; int signed_count; enum machine_mode mode = result_mode; enum machine_mode shift_mode, tmode; unsigned int mode_words = (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD; /* We form (outer_op (code varop count) (outer_const)). */ enum rtx_code outer_op = NIL; HOST_WIDE_INT outer_const = 0; rtx const_rtx; int complement_p = 0; rtx new; /* Make sure and truncate the "natural" shift on the way in. We don't want to do this inside the loop as it makes it more difficult to combine shifts. */ if (SHIFT_COUNT_TRUNCATED) orig_count &= GET_MODE_BITSIZE (mode) - 1; /* If we were given an invalid count, don't do anything except exactly what was requested. */ if (orig_count < 0 || orig_count >= (int) GET_MODE_BITSIZE (mode)) { if (x) return x; return gen_rtx_fmt_ee (code, mode, varop, GEN_INT (orig_count)); } count = orig_count; /* Unless one of the branches of the `if' in this loop does a `continue', we will `break' the loop after the `if'. */ while (count != 0) { /* If we have an operand of (clobber (const_int 0)), just return that value. */ if (GET_CODE (varop) == CLOBBER) return varop; /* If we discovered we had to complement VAROP, leave. Making a NOT here would cause an infinite loop. */ if (complement_p) break; /* Convert ROTATERT to ROTATE. */ if (code == ROTATERT) { unsigned int bitsize = GET_MODE_BITSIZE (result_mode);; code = ROTATE; if (VECTOR_MODE_P (result_mode)) count = bitsize / GET_MODE_NUNITS (result_mode) - count; else count = bitsize - count; } /* We need to determine what mode we will do the shift in. If the shift is a right shift or a ROTATE, we must always do it in the mode it was originally done in. Otherwise, we can do it in MODE, the widest mode encountered. */ shift_mode = (code == ASHIFTRT || code == LSHIFTRT || code == ROTATE ? result_mode : mode); /* Handle cases where the count is greater than the size of the mode minus 1. For ASHIFT, use the size minus one as the count (this can occur when simplifying (lshiftrt (ashiftrt ..))). For rotates, take the count modulo the size. For other shifts, the result is zero. Since these shifts are being produced by the compiler by combining multiple operations, each of which are defined, we know what the result is supposed to be. */ if (count > (unsigned int) (GET_MODE_BITSIZE (shift_mode) - 1)) { if (code == ASHIFTRT) count = GET_MODE_BITSIZE (shift_mode) - 1; else if (code == ROTATE || code == ROTATERT) count %= GET_MODE_BITSIZE (shift_mode); else { /* We can't simply return zero because there may be an outer op. */ varop = const0_rtx; count = 0; break; } } /* An arithmetic right shift of a quantity known to be -1 or 0 is a no-op. */ if (code == ASHIFTRT && (num_sign_bit_copies (varop, shift_mode) == GET_MODE_BITSIZE (shift_mode))) { count = 0; break; } /* If we are doing an arithmetic right shift and discarding all but the sign bit copies, this is equivalent to doing a shift by the bitsize minus one. Convert it into that shift because it will often allow other simplifications. */ if (code == ASHIFTRT && (count + num_sign_bit_copies (varop, shift_mode) >= GET_MODE_BITSIZE (shift_mode))) count = GET_MODE_BITSIZE (shift_mode) - 1; /* We simplify the tests below and elsewhere by converting ASHIFTRT to LSHIFTRT if we know the sign bit is clear. `make_compound_operation' will convert it to an ASHIFTRT for those machines (such as VAX) that don't have an LSHIFTRT. */ if (GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT && code == ASHIFTRT && ((nonzero_bits (varop, shift_mode) & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (shift_mode) - 1))) == 0)) code = LSHIFTRT; if (code == LSHIFTRT && GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT && !(nonzero_bits (varop, shift_mode) >> count)) varop = const0_rtx; if (code == ASHIFT && GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT && !((nonzero_bits (varop, shift_mode) << count) & GET_MODE_MASK (shift_mode))) varop = const0_rtx; switch (GET_CODE (varop)) { case SIGN_EXTEND: case ZERO_EXTEND: case SIGN_EXTRACT: case ZERO_EXTRACT: new = expand_compound_operation (varop); if (new != varop) { varop = new; continue; } break; case MEM: /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH minus the width of a smaller mode, we can do this with a SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */ if ((code == ASHIFTRT || code == LSHIFTRT) && ! mode_dependent_address_p (XEXP (varop, 0)) && ! MEM_VOLATILE_P (varop) && (tmode = mode_for_size (GET_MODE_BITSIZE (mode) - count, MODE_INT, 1)) != BLKmode) { new = adjust_address_nv (varop, tmode, BYTES_BIG_ENDIAN ? 0 : count / BITS_PER_UNIT); varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND : ZERO_EXTEND, mode, new); count = 0; continue; } break; case USE: /* Similar to the case above, except that we can only do this if the resulting mode is the same as that of the underlying MEM and adjust the address depending on the *bits* endianness because of the way that bit-field extract insns are defined. */ if ((code == ASHIFTRT || code == LSHIFTRT) && (tmode = mode_for_size (GET_MODE_BITSIZE (mode) - count, MODE_INT, 1)) != BLKmode && tmode == GET_MODE (XEXP (varop, 0))) { if (BITS_BIG_ENDIAN) new = XEXP (varop, 0); else { new = copy_rtx (XEXP (varop, 0)); SUBST (XEXP (new, 0), plus_constant (XEXP (new, 0), count / BITS_PER_UNIT)); } varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND : ZERO_EXTEND, mode, new); count = 0; continue; } break; case SUBREG: /* If VAROP is a SUBREG, strip it as long as the inner operand has the same number of words as what we've seen so far. Then store the widest mode in MODE. */ if (subreg_lowpart_p (varop) && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop))) > GET_MODE_SIZE (GET_MODE (varop))) && (unsigned int) ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop))) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD) == mode_words) { varop = SUBREG_REG (varop); if (GET_MODE_SIZE (GET_MODE (varop)) > GET_MODE_SIZE (mode)) mode = GET_MODE (varop); continue; } break; case MULT: /* Some machines use MULT instead of ASHIFT because MULT is cheaper. But it is still better on those machines to merge two shifts into one. */ if (GET_CODE (XEXP (varop, 1)) == CONST_INT && exact_log2 (INTVAL (XEXP (varop, 1))) >= 0) { varop = gen_binary (ASHIFT, GET_MODE (varop), XEXP (varop, 0), GEN_INT (exact_log2 (INTVAL (XEXP (varop, 1))))); continue; } break; case UDIV: /* Similar, for when divides are cheaper. */ if (GET_CODE (XEXP (varop, 1)) == CONST_INT && exact_log2 (INTVAL (XEXP (varop, 1))) >= 0) { varop = gen_binary (LSHIFTRT, GET_MODE (varop), XEXP (varop, 0), GEN_INT (exact_log2 (INTVAL (XEXP (varop, 1))))); continue; } break; case ASHIFTRT: /* If we are extracting just the sign bit of an arithmetic right shift, that shift is not needed. However, the sign bit of a wider mode may be different from what would be interpreted as the sign bit in a narrower mode, so, if the result is narrower, don't discard the shift. */ if (code == LSHIFTRT && count == (unsigned int) (GET_MODE_BITSIZE (result_mode) - 1) && (GET_MODE_BITSIZE (result_mode) >= GET_MODE_BITSIZE (GET_MODE (varop)))) { varop = XEXP (varop, 0); continue; } /* ... fall through ... */ case LSHIFTRT: case ASHIFT: case ROTATE: /* Here we have two nested shifts. The result is usually the AND of a new shift with a mask. We compute the result below. */ if (GET_CODE (XEXP (varop, 1)) == CONST_INT && INTVAL (XEXP (varop, 1)) >= 0 && INTVAL (XEXP (varop, 1)) < GET_MODE_BITSIZE (GET_MODE (varop)) && GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT) { enum rtx_code first_code = GET_CODE (varop); unsigned int first_count = INTVAL (XEXP (varop, 1)); unsigned HOST_WIDE_INT mask; rtx mask_rtx; /* We have one common special case. We can't do any merging if the inner code is an ASHIFTRT of a smaller mode. However, if we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2) with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2), we can convert it to (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0 C2) C3) C1). This simplifies certain SIGN_EXTEND operations. */ if (code == ASHIFT && first_code == ASHIFTRT && count == (unsigned int) (GET_MODE_BITSIZE (result_mode) - GET_MODE_BITSIZE (GET_MODE (varop)))) { /* C3 has the low-order C1 bits zero. */ mask = (GET_MODE_MASK (mode) & ~(((HOST_WIDE_INT) 1 << first_count) - 1)); varop = simplify_and_const_int (NULL_RTX, result_mode, XEXP (varop, 0), mask); varop = simplify_shift_const (NULL_RTX, ASHIFT, result_mode, varop, count); count = first_count; code = ASHIFTRT; continue; } /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more than C1 high-order bits equal to the sign bit, we can convert this to either an ASHIFT or an ASHIFTRT depending on the two counts. We cannot do this if VAROP's mode is not SHIFT_MODE. */ if (code == ASHIFTRT && first_code == ASHIFT && GET_MODE (varop) == shift_mode && (num_sign_bit_copies (XEXP (varop, 0), shift_mode) > first_count)) { varop = XEXP (varop, 0); signed_count = count - first_count; if (signed_count < 0) count = -signed_count, code = ASHIFT; else count = signed_count; continue; } /* There are some cases we can't do. If CODE is ASHIFTRT, we can only do this if FIRST_CODE is also ASHIFTRT. We can't do the case when CODE is ROTATE and FIRST_CODE is ASHIFTRT. If the mode of this shift is not the mode of the outer shift, we can't do this if either shift is a right shift or ROTATE. Finally, we can't do any of these if the mode is too wide unless the codes are the same. Handle the case where the shift codes are the same first. */ if (code == first_code) { if (GET_MODE (varop) != result_mode && (code == ASHIFTRT || code == LSHIFTRT || code == ROTATE)) break; count += first_count; varop = XEXP (varop, 0); continue; } if (code == ASHIFTRT || (code == ROTATE && first_code == ASHIFTRT) || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT || (GET_MODE (varop) != result_mode && (first_code == ASHIFTRT || first_code == LSHIFTRT || first_code == ROTATE || code == ROTATE))) break; /* To compute the mask to apply after the shift, shift the nonzero bits of the inner shift the same way the outer shift will. */ mask_rtx = GEN_INT (nonzero_bits (varop, GET_MODE (varop))); mask_rtx = simplify_binary_operation (code, result_mode, mask_rtx, GEN_INT (count)); /* Give up if we can't compute an outer operation to use. */ if (mask_rtx == 0 || GET_CODE (mask_rtx) != CONST_INT || ! merge_outer_ops (&outer_op, &outer_const, AND, INTVAL (mask_rtx), result_mode, &complement_p)) break; /* If the shifts are in the same direction, we add the counts. Otherwise, we subtract them. */ signed_count = count; if ((code == ASHIFTRT || code == LSHIFTRT) == (first_code == ASHIFTRT || first_code == LSHIFTRT)) signed_count += first_count; else signed_count -= first_count; /* If COUNT is positive, the new shift is usually CODE, except for the two exceptions below, in which case it is FIRST_CODE. If the count is negative, FIRST_CODE should always be used */ if (signed_count > 0 && ((first_code == ROTATE && code == ASHIFT) || (first_code == ASHIFTRT && code == LSHIFTRT))) code = first_code, count = signed_count; else if (signed_count < 0) code = first_code, count = -signed_count; else count = signed_count; varop = XEXP (varop, 0); continue; } /* If we have (A << B << C) for any shift, we can convert this to (A << C << B). This wins if A is a constant. Only try this if B is not a constant. */ else if (GET_CODE (varop) == code && GET_CODE (XEXP (varop, 1)) != CONST_INT && 0 != (new = simplify_binary_operation (code, mode, XEXP (varop, 0), GEN_INT (count)))) { varop = gen_rtx_fmt_ee (code, mode, new, XEXP (varop, 1)); count = 0; continue; } break; case NOT: /* Make this fit the case below. */ varop = gen_rtx_XOR (mode, XEXP (varop, 0), GEN_INT (GET_MODE_MASK (mode))); continue; case IOR: case AND: case XOR: /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C) with C the size of VAROP - 1 and the shift is logical if STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1, we have an (le X 0) operation. If we have an arithmetic shift and STORE_FLAG_VALUE is 1 or we have a logical shift with STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */ if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS && XEXP (XEXP (varop, 0), 1) == constm1_rtx && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1) && (code == LSHIFTRT || code == ASHIFTRT) && count == (unsigned int) (GET_MODE_BITSIZE (GET_MODE (varop)) - 1) && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1))) { count = 0; varop = gen_rtx_LE (GET_MODE (varop), XEXP (varop, 1), const0_rtx); if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT) varop = gen_rtx_NEG (GET_MODE (varop), varop); continue; } /* If we have (shift (logical)), move the logical to the outside to allow it to possibly combine with another logical and the shift to combine with another shift. This also canonicalizes to what a ZERO_EXTRACT looks like. Also, some machines have (and (shift)) insns. */ if (GET_CODE (XEXP (varop, 1)) == CONST_INT /* We can't do this if we have (ashiftrt (xor)) and the constant has its sign bit set in shift_mode. */ && !(code == ASHIFTRT && GET_CODE (varop) == XOR && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)), shift_mode)) && (new = simplify_binary_operation (code, result_mode, XEXP (varop, 1), GEN_INT (count))) != 0 && GET_CODE (new) == CONST_INT && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop), INTVAL (new), result_mode, &complement_p)) { varop = XEXP (varop, 0); continue; } /* If we can't do that, try to simplify the shift in each arm of the logical expression, make a new logical expression, and apply the inverse distributive law. This also can't be done for some (ashiftrt (xor)). */ if (GET_CODE (XEXP (varop, 1)) == CONST_INT && !(code == ASHIFTRT && GET_CODE (varop) == XOR && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)), shift_mode))) { rtx lhs = simplify_shift_const (NULL_RTX, code, shift_mode, XEXP (varop, 0), count); rtx rhs = simplify_shift_const (NULL_RTX, code, shift_mode, XEXP (varop, 1), count); varop = gen_binary (GET_CODE (varop), shift_mode, lhs, rhs); varop = apply_distributive_law (varop); count = 0; continue; } break; case EQ: /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE says that the sign bit can be tested, FOO has mode MODE, C is GET_MODE_BITSIZE (MODE) - 1, and FOO has only its low-order bit that may be nonzero. */ if (code == LSHIFTRT && XEXP (varop, 1) == const0_rtx && GET_MODE (XEXP (varop, 0)) == result_mode && count == (unsigned int) (GET_MODE_BITSIZE (result_mode) - 1) && GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT && ((STORE_FLAG_VALUE & ((HOST_WIDE_INT) 1 < (GET_MODE_BITSIZE (result_mode) - 1)))) && nonzero_bits (XEXP (varop, 0), result_mode) == 1 && merge_outer_ops (&outer_op, &outer_const, XOR, (HOST_WIDE_INT) 1, result_mode, &complement_p)) { varop = XEXP (varop, 0); count = 0; continue; } break; case NEG: /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less than the number of bits in the mode is equivalent to A. */ if (code == LSHIFTRT && count == (unsigned int) (GET_MODE_BITSIZE (result_mode) - 1) && nonzero_bits (XEXP (varop, 0), result_mode) == 1) { varop = XEXP (varop, 0); count = 0; continue; } /* NEG commutes with ASHIFT since it is multiplication. Move the NEG outside to allow shifts to combine. */ if (code == ASHIFT && merge_outer_ops (&outer_op, &outer_const, NEG, (HOST_WIDE_INT) 0, result_mode, &complement_p)) { varop = XEXP (varop, 0); continue; } break; case PLUS: /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C is one less than the number of bits in the mode is equivalent to (xor A 1). */ if (code == LSHIFTRT && count == (unsigned int) (GET_MODE_BITSIZE (result_mode) - 1) && XEXP (varop, 1) == constm1_rtx && nonzero_bits (XEXP (varop, 0), result_mode) == 1 && merge_outer_ops (&outer_op, &outer_const, XOR, (HOST_WIDE_INT) 1, result_mode, &complement_p)) { count = 0; varop = XEXP (varop, 0); continue; } /* If we have (xshiftrt (plus FOO BAR) C), and the only bits that might be nonzero in BAR are those being shifted out and those bits are known zero in FOO, we can replace the PLUS with FOO. Similarly in the other operand order. This code occurs when we are computing the size of a variable-size array. */ if ((code == ASHIFTRT || code == LSHIFTRT) && count < HOST_BITS_PER_WIDE_INT && nonzero_bits (XEXP (varop, 1), result_mode) >> count == 0 && (nonzero_bits (XEXP (varop, 1), result_mode) & nonzero_bits (XEXP (varop, 0), result_mode)) == 0) { varop = XEXP (varop, 0); continue; } else if ((code == ASHIFTRT || code == LSHIFTRT) && count < HOST_BITS_PER_WIDE_INT && GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT && 0 == (nonzero_bits (XEXP (varop, 0), result_mode) >> count) && 0 == (nonzero_bits (XEXP (varop, 0), result_mode) & nonzero_bits (XEXP (varop, 1), result_mode))) { varop = XEXP (varop, 1); continue; } /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */ if (code == ASHIFT && GET_CODE (XEXP (varop, 1)) == CONST_INT && (new = simplify_binary_operation (ASHIFT, result_mode, XEXP (varop, 1), GEN_INT (count))) != 0 && GET_CODE (new) == CONST_INT && merge_outer_ops (&outer_op, &outer_const, PLUS, INTVAL (new), result_mode, &complement_p)) { varop = XEXP (varop, 0); continue; } break; case MINUS: /* If we have (xshiftrt (minus (ashiftrt X C)) X) C) with C the size of VAROP - 1 and the shift is logical if STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1, we have a (gt X 0) operation. If the shift is arithmetic with STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1, we have a (neg (gt X 0)) operation. */ if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1) && GET_CODE (XEXP (varop, 0)) == ASHIFTRT && count == (unsigned int) (GET_MODE_BITSIZE (GET_MODE (varop)) - 1) && (code == LSHIFTRT || code == ASHIFTRT) && GET_CODE (XEXP (XEXP (varop, 0), 1)) == CONST_INT && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (varop, 0), 1)) == count && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1))) { count = 0; varop = gen_rtx_GT (GET_MODE (varop), XEXP (varop, 1), const0_rtx); if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT) varop = gen_rtx_NEG (GET_MODE (varop), varop); continue; } break; case TRUNCATE: /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt)) if the truncate does not affect the value. */ if (code == LSHIFTRT && GET_CODE (XEXP (varop, 0)) == LSHIFTRT && GET_CODE (XEXP (XEXP (varop, 0), 1)) == CONST_INT && (INTVAL (XEXP (XEXP (varop, 0), 1)) >= (GET_MODE_BITSIZE (GET_MODE (XEXP (varop, 0))) - GET_MODE_BITSIZE (GET_MODE (varop))))) { rtx varop_inner = XEXP (varop, 0); varop_inner = gen_rtx_LSHIFTRT (GET_MODE (varop_inner), XEXP (varop_inner, 0), GEN_INT (count + INTVAL (XEXP (varop_inner, 1)))); varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner); count = 0; continue; } break; default: break; } break; } /* We need to determine what mode to do the shift in. If the shift is a right shift or ROTATE, we must always do it in the mode it was originally done in. Otherwise, we can do it in MODE, the widest mode encountered. The code we care about is that of the shift that will actually be done, not the shift that was originally requested. */ shift_mode = (code == ASHIFTRT || code == LSHIFTRT || code == ROTATE ? result_mode : mode); /* We have now finished analyzing the shift. The result should be a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If OUTER_OP is non-NIL, it is an operation that needs to be applied to the result of the shift. OUTER_CONST is the relevant constant, but we must turn off all bits turned off in the shift. If we were passed a value for X, see if we can use any pieces of it. If not, make new rtx. */ if (x && GET_RTX_CLASS (GET_CODE (x)) == RTX_BIN_ARITH && GET_CODE (XEXP (x, 1)) == CONST_INT && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) == count) const_rtx = XEXP (x, 1); else const_rtx = GEN_INT (count); if (x && GET_CODE (XEXP (x, 0)) == SUBREG && GET_MODE (XEXP (x, 0)) == shift_mode && SUBREG_REG (XEXP (x, 0)) == varop) varop = XEXP (x, 0); else if (GET_MODE (varop) != shift_mode) varop = gen_lowpart (shift_mode, varop); /* If we can't make the SUBREG, try to return what we were given. */ if (GET_CODE (varop) == CLOBBER) return x ? x : varop; new = simplify_binary_operation (code, shift_mode, varop, const_rtx); if (new != 0) x = new; else x = gen_rtx_fmt_ee (code, shift_mode, varop, const_rtx); /* If we have an outer operation and we just made a shift, it is possible that we could have simplified the shift were it not for the outer operation. So try to do the simplification recursively. */ if (outer_op != NIL && GET_CODE (x) == code && GET_CODE (XEXP (x, 1)) == CONST_INT) x = simplify_shift_const (x, code, shift_mode, XEXP (x, 0), INTVAL (XEXP (x, 1))); /* If we were doing an LSHIFTRT in a wider mode than it was originally, turn off all the bits that the shift would have turned off. */ if (orig_code == LSHIFTRT && result_mode != shift_mode) x = simplify_and_const_int (NULL_RTX, shift_mode, x, GET_MODE_MASK (result_mode) >> orig_count); /* Do the remainder of the processing in RESULT_MODE. */ x = gen_lowpart (result_mode, x); /* If COMPLEMENT_P is set, we have to complement X before doing the outer operation. */ if (complement_p) x = simplify_gen_unary (NOT, result_mode, x, result_mode); if (outer_op != NIL) { if (GET_MODE_BITSIZE (result_mode) < HOST_BITS_PER_WIDE_INT) outer_const = trunc_int_for_mode (outer_const, result_mode); if (outer_op == AND) x = simplify_and_const_int (NULL_RTX, result_mode, x, outer_const); else if (outer_op == SET) /* This means that we have determined that the result is equivalent to a constant. This should be rare. */ x = GEN_INT (outer_const); else if (GET_RTX_CLASS (outer_op) == RTX_UNARY) x = simplify_gen_unary (outer_op, result_mode, x, result_mode); else x = gen_binary (outer_op, result_mode, x, GEN_INT (outer_const)); } return x; } /* Like recog, but we receive the address of a pointer to a new pattern. We try to match the rtx that the pointer points to. If that fails, we may try to modify or replace the pattern, storing the replacement into the same pointer object. Modifications include deletion or addition of CLOBBERs. PNOTES is a pointer to a location where any REG_UNUSED notes added for the CLOBBERs are placed. The value is the final insn code from the pattern ultimately matched, or -1. */ static int recog_for_combine (rtx *pnewpat, rtx insn, rtx *pnotes) { rtx pat = *pnewpat; int insn_code_number; int num_clobbers_to_add = 0; int i; rtx notes = 0; rtx old_notes, old_pat; /* If PAT is a PARALLEL, check to see if it contains the CLOBBER we use to indicate that something didn't match. If we find such a thing, force rejection. */ if (GET_CODE (pat) == PARALLEL) for (i = XVECLEN (pat, 0) - 1; i >= 0; i--) if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx) return -1; old_pat = PATTERN (insn); old_notes = REG_NOTES (insn); PATTERN (insn) = pat; REG_NOTES (insn) = 0; insn_code_number = recog (pat, insn, &num_clobbers_to_add); /* If it isn't, there is the possibility that we previously had an insn that clobbered some register as a side effect, but the combined insn doesn't need to do that. So try once more without the clobbers unless this represents an ASM insn. */ if (insn_code_number < 0 && ! check_asm_operands (pat) && GET_CODE (pat) == PARALLEL) { int pos; for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++) if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER) { if (i != pos) SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i)); pos++; } SUBST_INT (XVECLEN (pat, 0), pos); if (pos == 1) pat = XVECEXP (pat, 0, 0); PATTERN (insn) = pat; insn_code_number = recog (pat, insn, &num_clobbers_to_add); } PATTERN (insn) = old_pat; REG_NOTES (insn) = old_notes; /* Recognize all noop sets, these will be killed by followup pass. */ if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat)) insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0; /* If we had any clobbers to add, make a new pattern than contains them. Then check to make sure that all of them are dead. */ if (num_clobbers_to_add) { rtx newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (GET_CODE (pat) == PARALLEL ? (XVECLEN (pat, 0) + num_clobbers_to_add) : num_clobbers_to_add + 1)); if (GET_CODE (pat) == PARALLEL) for (i = 0; i < XVECLEN (pat, 0); i++) XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i); else XVECEXP (newpat, 0, 0) = pat; add_clobbers (newpat, insn_code_number); for (i = XVECLEN (newpat, 0) - num_clobbers_to_add; i < XVECLEN (newpat, 0); i++) { if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)) && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn)) return -1; notes = gen_rtx_EXPR_LIST (REG_UNUSED, XEXP (XVECEXP (newpat, 0, i), 0), notes); } pat = newpat; } *pnewpat = pat; *pnotes = notes; return insn_code_number; } /* Like gen_lowpart_general but for use by combine. In combine it is not possible to create any new pseudoregs. However, it is safe to create invalid memory addresses, because combine will try to recognize them and all they will do is make the combine attempt fail. If for some reason this cannot do its job, an rtx (clobber (const_int 0)) is returned. An insn containing that will not be recognized. */ static rtx gen_lowpart_for_combine (enum machine_mode mode, rtx x) { rtx result; if (GET_MODE (x) == mode) return x; /* Return identity if this is a CONST or symbolic reference. */ if (mode == Pmode && (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)) return x; /* We can only support MODE being wider than a word if X is a constant integer or has a mode the same size. */ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD && ! ((GET_MODE (x) == VOIDmode && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)) || GET_MODE_SIZE (GET_MODE (x)) == GET_MODE_SIZE (mode))) return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx); /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart won't know what to do. So we will strip off the SUBREG here and process normally. */ if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x))) { x = SUBREG_REG (x); if (GET_MODE (x) == mode) return x; } result = gen_lowpart_common (mode, x); #ifdef CANNOT_CHANGE_MODE_CLASS if (result != 0 && GET_CODE (result) == SUBREG && REG_P (SUBREG_REG (result)) && REGNO (SUBREG_REG (result)) >= FIRST_PSEUDO_REGISTER) bitmap_set_bit (&subregs_of_mode, REGNO (SUBREG_REG (result)) * MAX_MACHINE_MODE + GET_MODE (result)); #endif if (result) return result; if (MEM_P (x)) { int offset = 0; /* Refuse to work on a volatile memory ref or one with a mode-dependent address. */ if (MEM_VOLATILE_P (x) || mode_dependent_address_p (XEXP (x, 0))) return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx); /* If we want to refer to something bigger than the original memref, generate a paradoxical subreg instead. That will force a reload of the original memref X. */ if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (mode)) return gen_rtx_SUBREG (mode, x, 0); if (WORDS_BIG_ENDIAN) offset = (MAX (GET_MODE_SIZE (GET_MODE (x)), UNITS_PER_WORD) - MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD)); if (BYTES_BIG_ENDIAN) { /* Adjust the address so that the address-after-the-data is unchanged. */ offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (mode)) - MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (x)))); } return adjust_address_nv (x, mode, offset); } /* If X is a comparison operator, rewrite it in a new mode. This probably won't match, but may allow further simplifications. */ else if (COMPARISON_P (x)) return gen_rtx_fmt_ee (GET_CODE (x), mode, XEXP (x, 0), XEXP (x, 1)); /* If we couldn't simplify X any other way, just enclose it in a SUBREG. Normally, this SUBREG won't match, but some patterns may include an explicit SUBREG or we may simplify it further in combine. */ else { int offset = 0; rtx res; enum machine_mode sub_mode = GET_MODE (x); offset = subreg_lowpart_offset (mode, sub_mode); if (sub_mode == VOIDmode) { sub_mode = int_mode_for_mode (mode); x = gen_lowpart_common (sub_mode, x); if (x == 0) return gen_rtx_CLOBBER (VOIDmode, const0_rtx); } res = simplify_gen_subreg (mode, x, sub_mode, offset); if (res) return res; return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx); } } /* These routines make binary and unary operations by first seeing if they fold; if not, a new expression is allocated. */ static rtx gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0, rtx op1) { rtx result; rtx tem; if (GET_CODE (op0) == CLOBBER) return op0; else if (GET_CODE (op1) == CLOBBER) return op1; if (GET_RTX_CLASS (code) == RTX_COMM_ARITH && swap_commutative_operands_p (op0, op1)) tem = op0, op0 = op1, op1 = tem; if (GET_RTX_CLASS (code) == RTX_COMPARE || GET_RTX_CLASS (code) == RTX_COMM_COMPARE) { enum machine_mode op_mode = GET_MODE (op0); /* Strip the COMPARE from (REL_OP (compare X Y) 0) to get just (REL_OP X Y). */ if (GET_CODE (op0) == COMPARE && op1 == const0_rtx) { op1 = XEXP (op0, 1); op0 = XEXP (op0, 0); op_mode = GET_MODE (op0); } if (op_mode == VOIDmode) op_mode = GET_MODE (op1); result = simplify_relational_operation (code, mode, op_mode, op0, op1); } else result = simplify_binary_operation (code, mode, op0, op1); if (result) return result; /* Put complex operands first and constants second. */ if (GET_RTX_CLASS (code) == RTX_COMM_ARITH && swap_commutative_operands_p (op0, op1)) return gen_rtx_fmt_ee (code, mode, op1, op0); /* If we are turning off bits already known off in OP0, we need not do an AND. */ else if (code == AND && GET_CODE (op1) == CONST_INT && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0) return op0; return gen_rtx_fmt_ee (code, mode, op0, op1); } /* Simplify a comparison between *POP0 and *POP1 where CODE is the comparison code that will be tested. The result is a possibly different comparison code to use. *POP0 and *POP1 may be updated. It is possible that we might detect that a comparison is either always true or always false. However, we do not perform general constant folding in combine, so this knowledge isn't useful. Such tautologies should have been detected earlier. Hence we ignore all such cases. */ static enum rtx_code simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) { rtx op0 = *pop0; rtx op1 = *pop1; rtx tem, tem1; int i; enum machine_mode mode, tmode; /* Try a few ways of applying the same transformation to both operands. */ while (1) { #ifndef WORD_REGISTER_OPERATIONS /* The test below this one won't handle SIGN_EXTENDs on these machines, so check specially. */ if (code != GTU && code != GEU && code != LTU && code != LEU && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT && GET_CODE (XEXP (op0, 0)) == ASHIFT && GET_CODE (XEXP (op1, 0)) == ASHIFT && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG && (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))) == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0)))) && GET_CODE (XEXP (op0, 1)) == CONST_INT && XEXP (op0, 1) == XEXP (op1, 1) && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1) && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1) && (INTVAL (XEXP (op0, 1)) == (GET_MODE_BITSIZE (GET_MODE (op0)) - (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0)))))))) { op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0)); op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0)); } #endif /* If both operands are the same constant shift, see if we can ignore the shift. We can if the shift is a rotate or if the bits shifted out of this shift are known to be zero for both inputs and if the type of comparison is compatible with the shift. */ if (GET_CODE (op0) == GET_CODE (op1) && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ)) || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT) && (code != GT && code != LT && code != GE && code != LE)) || (GET_CODE (op0) == ASHIFTRT && (code != GTU && code != LTU && code != GEU && code != LEU))) && GET_CODE (XEXP (op0, 1)) == CONST_INT && INTVAL (XEXP (op0, 1)) >= 0 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT && XEXP (op0, 1) == XEXP (op1, 1)) { enum machine_mode mode = GET_MODE (op0); unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode); int shift_count = INTVAL (XEXP (op0, 1)); if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT) mask &= (mask >> shift_count) << shift_count; else if (GET_CODE (op0) == ASHIFT) mask = (mask & (mask << shift_count)) >> shift_count; if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0 && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0) op0 = XEXP (op0, 0), op1 = XEXP (op1, 0); else break; } /* If both operands are AND's of a paradoxical SUBREG by constant, the SUBREGs are of the same mode, and, in both cases, the AND would be redundant if the comparison was done in the narrower mode, do the comparison in the narrower mode (e.g., we are AND'ing with 1 and the operand's possibly nonzero bits are 0xffffff01; in that case if we only care about QImode, we don't need the AND). This case occurs if the output mode of an scc insn is not SImode and STORE_FLAG_VALUE == 1 (e.g., the 386). Similarly, check for a case where the AND's are ZERO_EXTEND operations from some narrower mode even though a SUBREG is not present. */ else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND && GET_CODE (XEXP (op0, 1)) == CONST_INT && GET_CODE (XEXP (op1, 1)) == CONST_INT) { rtx inner_op0 = XEXP (op0, 0); rtx inner_op1 = XEXP (op1, 0); HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1)); HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1)); int changed = 0; if (GET_CODE (inner_op0) == SUBREG && GET_CODE (inner_op1) == SUBREG && (GET_MODE_SIZE (GET_MODE (inner_op0)) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner_op0)))) && (GET_MODE (SUBREG_REG (inner_op0)) == GET_MODE (SUBREG_REG (inner_op1))) && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (inner_op0))) <= HOST_BITS_PER_WIDE_INT) && (0 == ((~c0) & nonzero_bits (SUBREG_REG (inner_op0), GET_MODE (SUBREG_REG (inner_op0))))) && (0 == ((~c1) & nonzero_bits (SUBREG_REG (inner_op1), GET_MODE (SUBREG_REG (inner_op1)))))) { op0 = SUBREG_REG (inner_op0); op1 = SUBREG_REG (inner_op1); /* The resulting comparison is always unsigned since we masked off the original sign bit. */ code = unsigned_condition (code); changed = 1; } else if (c0 == c1) for (tmode = GET_CLASS_NARROWEST_MODE (GET_MODE_CLASS (GET_MODE (op0))); tmode != GET_MODE (op0); tmode = GET_MODE_WIDER_MODE (tmode)) if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode)) { op0 = gen_lowpart (tmode, inner_op0); op1 = gen_lowpart (tmode, inner_op1); code = unsigned_condition (code); changed = 1; break; } if (! changed) break; } /* If both operands are NOT, we can strip off the outer operation and adjust the comparison code for swapped operands; similarly for NEG, except that this must be an equality comparison. */ else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT) || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG && (code == EQ || code == NE))) op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code); else break; } /* If the first operand is a constant, swap the operands and adjust the comparison code appropriately, but don't do this if the second operand is already a constant integer. */ if (swap_commutative_operands_p (op0, op1)) { tem = op0, op0 = op1, op1 = tem; code = swap_condition (code); } /* We now enter a loop during which we will try to simplify the comparison. For the most part, we only are concerned with comparisons with zero, but some things may really be comparisons with zero but not start out looking that way. */ while (GET_CODE (op1) == CONST_INT) { enum machine_mode mode = GET_MODE (op0); unsigned int mode_width = GET_MODE_BITSIZE (mode); unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode); int equality_comparison_p; int sign_bit_comparison_p; int unsigned_comparison_p; HOST_WIDE_INT const_op; /* We only want to handle integral modes. This catches VOIDmode, CCmode, and the floating-point modes. An exception is that we can handle VOIDmode if OP0 is a COMPARE or a comparison operation. */ if (GET_MODE_CLASS (mode) != MODE_INT && ! (mode == VOIDmode && (GET_CODE (op0) == COMPARE || COMPARISON_P (op0)))) break; /* Get the constant we are comparing against and turn off all bits not on in our mode. */ const_op = INTVAL (op1); if (mode != VOIDmode) const_op = trunc_int_for_mode (const_op, mode); op1 = GEN_INT (const_op); /* If we are comparing against a constant power of two and the value being compared can only have that single bit nonzero (e.g., it was `and'ed with that bit), we can replace this with a comparison with zero. */ if (const_op && (code == EQ || code == NE || code == GE || code == GEU || code == LT || code == LTU) && mode_width <= HOST_BITS_PER_WIDE_INT && exact_log2 (const_op) >= 0 && nonzero_bits (op0, mode) == (unsigned HOST_WIDE_INT) const_op) { code = (code == EQ || code == GE || code == GEU ? NE : EQ); op1 = const0_rtx, const_op = 0; } /* Similarly, if we are comparing a value known to be either -1 or 0 with -1, change it to the opposite comparison against zero. */ if (const_op == -1 && (code == EQ || code == NE || code == GT || code == LE || code == GEU || code == LTU) && num_sign_bit_copies (op0, mode) == mode_width) { code = (code == EQ || code == LE || code == GEU ? NE : EQ); op1 = const0_rtx, const_op = 0; } /* Do some canonicalizations based on the comparison code. We prefer comparisons against zero and then prefer equality comparisons. If we can reduce the size of a constant, we will do that too. */ switch (code) { case LT: /* < C is equivalent to <= (C - 1) */ if (const_op > 0) { const_op -= 1; op1 = GEN_INT (const_op); code = LE; /* ... fall through to LE case below. */ } else break; case LE: /* <= C is equivalent to < (C + 1); we do this for C < 0 */ if (const_op < 0) { const_op += 1; op1 = GEN_INT (const_op); code = LT; } /* If we are doing a <= 0 comparison on a value known to have a zero sign bit, we can replace this with == 0. */ else if (const_op == 0 && mode_width <= HOST_BITS_PER_WIDE_INT && (nonzero_bits (op0, mode) & ((HOST_WIDE_INT) 1 << (mode_width - 1))) == 0) code = EQ; break; case GE: /* >= C is equivalent to > (C - 1). */ if (const_op > 0) { const_op -= 1; op1 = GEN_INT (const_op); code = GT; /* ... fall through to GT below. */ } else break; case GT: /* > C is equivalent to >= (C + 1); we do this for C < 0. */ if (const_op < 0) { const_op += 1; op1 = GEN_INT (const_op); code = GE; } /* If we are doing a > 0 comparison on a value known to have a zero sign bit, we can replace this with != 0. */ else if (const_op == 0 && mode_width <= HOST_BITS_PER_WIDE_INT && (nonzero_bits (op0, mode) & ((HOST_WIDE_INT) 1 << (mode_width - 1))) == 0) code = NE; break; case LTU: /* < C is equivalent to <= (C - 1). */ if (const_op > 0) { const_op -= 1; op1 = GEN_INT (const_op); code = LEU; /* ... fall through ... */ } /* (unsigned) < 0x80000000 is equivalent to >= 0. */ else if ((mode_width <= HOST_BITS_PER_WIDE_INT) && (const_op == (HOST_WIDE_INT) 1 << (mode_width - 1))) { const_op = 0, op1 = const0_rtx; code = GE; break; } else break; case LEU: /* unsigned <= 0 is equivalent to == 0 */ if (const_op == 0) code = EQ; /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */ else if ((mode_width <= HOST_BITS_PER_WIDE_INT) && (const_op == ((HOST_WIDE_INT) 1 << (mode_width - 1)) - 1)) { const_op = 0, op1 = const0_rtx; code = GE; } break; case GEU: /* >= C is equivalent to > (C - 1). */ if (const_op > 1) { const_op -= 1; op1 = GEN_INT (const_op); code = GTU; /* ... fall through ... */ } /* (unsigned) >= 0x80000000 is equivalent to < 0. */ else if ((mode_width <= HOST_BITS_PER_WIDE_INT) && (const_op == (HOST_WIDE_INT) 1 << (mode_width - 1))) { const_op = 0, op1 = const0_rtx; code = LT; break; } else break; case GTU: /* unsigned > 0 is equivalent to != 0 */ if (const_op == 0) code = NE; /* (unsigned) > 0x7fffffff is equivalent to < 0. */ else if ((mode_width <= HOST_BITS_PER_WIDE_INT) && (const_op == ((HOST_WIDE_INT) 1 << (mode_width - 1)) - 1)) { const_op = 0, op1 = const0_rtx; code = LT; } break; default: break; } /* Compute some predicates to simplify code below. */ equality_comparison_p = (code == EQ || code == NE); sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0); unsigned_comparison_p = (code == LTU || code == LEU || code == GTU || code == GEU); /* If this is a sign bit comparison and we can do arithmetic in MODE, say that we will only be needing the sign bit of OP0. */ if (sign_bit_comparison_p && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT) op0 = force_to_mode (op0, mode, ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)), NULL_RTX, 0); /* Now try cases based on the opcode of OP0. If none of the cases does a "continue", we exit this loop immediately after the switch. */ switch (GET_CODE (op0)) { case ZERO_EXTRACT: /* If we are extracting a single bit from a variable position in a constant that has only a single bit set and are comparing it with zero, we can convert this into an equality comparison between the position and the location of the single bit. */ /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might have already reduced the shift count modulo the word size. */ if (!SHIFT_COUNT_TRUNCATED && GET_CODE (XEXP (op0, 0)) == CONST_INT && XEXP (op0, 1) == const1_rtx && equality_comparison_p && const_op == 0 && (i = exact_log2 (INTVAL (XEXP (op0, 0)))) >= 0) { if (BITS_BIG_ENDIAN) { enum machine_mode new_mode = mode_for_extraction (EP_extzv, 1); if (new_mode == MAX_MACHINE_MODE) i = BITS_PER_WORD - 1 - i; else { mode = new_mode; i = (GET_MODE_BITSIZE (mode) - 1 - i); } } op0 = XEXP (op0, 2); op1 = GEN_INT (i); const_op = i; /* Result is nonzero iff shift count is equal to I. */ code = reverse_condition (code); continue; } /* ... fall through ... */ case SIGN_EXTRACT: tem = expand_compound_operation (op0); if (tem != op0) { op0 = tem; continue; } break; case NOT: /* If testing for equality, we can take the NOT of the constant. */ if (equality_comparison_p && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0) { op0 = XEXP (op0, 0); op1 = tem; continue; } /* If just looking at the sign bit, reverse the sense of the comparison. */ if (sign_bit_comparison_p) { op0 = XEXP (op0, 0); code = (code == GE ? LT : GE); continue; } break; case NEG: /* If testing for equality, we can take the NEG of the constant. */ if (equality_comparison_p && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0) { op0 = XEXP (op0, 0); op1 = tem; continue; } /* The remaining cases only apply to comparisons with zero. */ if (const_op != 0) break; /* When X is ABS or is known positive, (neg X) is < 0 if and only if X != 0. */ if (sign_bit_comparison_p && (GET_CODE (XEXP (op0, 0)) == ABS || (mode_width <= HOST_BITS_PER_WIDE_INT && (nonzero_bits (XEXP (op0, 0), mode) & ((HOST_WIDE_INT) 1 << (mode_width - 1))) == 0))) { op0 = XEXP (op0, 0); code = (code == LT ? NE : EQ); continue; } /* If we have NEG of something whose two high-order bits are the same, we know that "(-a) < 0" is equivalent to "a > 0". */ if (num_sign_bit_copies (op0, mode) >= 2) { op0 = XEXP (op0, 0); code = swap_condition (code); continue; } break; case ROTATE: /* If we are testing equality and our count is a constant, we can perform the inverse operation on our RHS. */ if (equality_comparison_p && GET_CODE (XEXP (op0, 1)) == CONST_INT && (tem = simplify_binary_operation (ROTATERT, mode, op1, XEXP (op0, 1))) != 0) { op0 = XEXP (op0, 0); op1 = tem; continue; } /* If we are doing a < 0 or >= 0 comparison, it means we are testing a particular bit. Convert it to an AND of a constant of that bit. This will be converted into a ZERO_EXTRACT. */ if (const_op == 0 && sign_bit_comparison_p && GET_CODE (XEXP (op0, 1)) == CONST_INT && mode_width <= HOST_BITS_PER_WIDE_INT) { op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), ((HOST_WIDE_INT) 1 << (mode_width - 1 - INTVAL (XEXP (op0, 1))))); code = (code == LT ? NE : EQ); continue; } /* Fall through. */ case ABS: /* ABS is ignorable inside an equality comparison with zero. */ if (const_op == 0 && equality_comparison_p) { op0 = XEXP (op0, 0); continue; } break; case SIGN_EXTEND: /* Can simplify (compare (zero/sign_extend FOO) CONST) to (compare FOO CONST) if CONST fits in FOO's mode and we are either testing inequality or have an unsigned comparison with ZERO_EXTEND or a signed comparison with SIGN_EXTEND. */ if (! unsigned_comparison_p && (GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 0))) <= HOST_BITS_PER_WIDE_INT) && ((unsigned HOST_WIDE_INT) const_op < (((unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 0))) - 1))))) { op0 = XEXP (op0, 0); continue; } break; case SUBREG: /* Check for the case where we are comparing A - C1 with C2, both constants are smaller than 1/2 the maximum positive value in MODE, and the comparison is equality or unsigned. In that case, if A is either zero-extended to MODE or has sufficient sign bits so that the high-order bit in MODE is a copy of the sign in the inner mode, we can prove that it is safe to do the operation in the wider mode. This simplifies many range checks. */ if (mode_width <= HOST_BITS_PER_WIDE_INT && subreg_lowpart_p (op0) && GET_CODE (SUBREG_REG (op0)) == PLUS && GET_CODE (XEXP (SUBREG_REG (op0), 1)) == CONST_INT && INTVAL (XEXP (SUBREG_REG (op0), 1)) < 0 && (-INTVAL (XEXP (SUBREG_REG (op0), 1)) < (HOST_WIDE_INT) (GET_MODE_MASK (mode) / 2)) && (unsigned HOST_WIDE_INT) const_op < GET_MODE_MASK (mode) / 2 && (0 == (nonzero_bits (XEXP (SUBREG_REG (op0), 0), GET_MODE (SUBREG_REG (op0))) & ~GET_MODE_MASK (mode)) || (num_sign_bit_copies (XEXP (SUBREG_REG (op0), 0), GET_MODE (SUBREG_REG (op0))) > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0))) - GET_MODE_BITSIZE (mode))))) { op0 = SUBREG_REG (op0); continue; } /* If the inner mode is narrower and we are extracting the low part, we can treat the SUBREG as if it were a ZERO_EXTEND. */ if (subreg_lowpart_p (op0) && GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0))) < mode_width) /* Fall through */ ; else break; /* ... fall through ... */ case ZERO_EXTEND: if ((unsigned_comparison_p || equality_comparison_p) && (GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 0))) <= HOST_BITS_PER_WIDE_INT) && ((unsigned HOST_WIDE_INT) const_op < GET_MODE_MASK (GET_MODE (XEXP (op0, 0))))) { op0 = XEXP (op0, 0); continue; } break; case PLUS: /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do this for equality comparisons due to pathological cases involving overflows. */ if (equality_comparison_p && 0 != (tem = simplify_binary_operation (MINUS, mode, op1, XEXP (op0, 1)))) { op0 = XEXP (op0, 0); op1 = tem; continue; } /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */ if (const_op == 0 && XEXP (op0, 1) == constm1_rtx && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p) { op0 = XEXP (XEXP (op0, 0), 0); code = (code == LT ? EQ : NE); continue; } break; case MINUS: /* We used to optimize signed comparisons against zero, but that was incorrect. Unsigned comparisons against zero (GTU, LEU) arrive here as equality comparisons, or (GEU, LTU) are optimized away. No need to special-case them. */ /* (eq (minus A B) C) -> (eq A (plus B C)) or (eq B (minus A C)), whichever simplifies. We can only do this for equality comparisons due to pathological cases involving overflows. */ if (equality_comparison_p && 0 != (tem = simplify_binary_operation (PLUS, mode, XEXP (op0, 1), op1))) { op0 = XEXP (op0, 0); op1 = tem; continue; } if (equality_comparison_p && 0 != (tem = simplify_binary_operation (MINUS, mode, XEXP (op0, 0), op1))) { op0 = XEXP (op0, 1); op1 = tem; continue; } /* The sign bit of (minus (ashiftrt X C) X), where C is the number of bits in X minus 1, is one iff X > 0. */ if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1))) { op0 = XEXP (op0, 1); code = (code == GE ? LE : GT); continue; } break; case XOR: /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification if C is zero or B is a constant. */ if (equality_comparison_p && 0 != (tem = simplify_binary_operation (XOR, mode, XEXP (op0, 1), op1))) { op0 = XEXP (op0, 0); op1 = tem; continue; } break; case EQ: case NE: case UNEQ: case LTGT: case LT: case LTU: case UNLT: case LE: case LEU: case UNLE: case GT: case GTU: case UNGT: case GE: case GEU: case UNGE: case UNORDERED: case ORDERED: /* We can't do anything if OP0 is a condition code value, rather than an actual data value. */ if (const_op != 0 || CC0_P (XEXP (op0, 0)) || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC) break; /* Get the two operands being compared. */ if (GET_CODE (XEXP (op0, 0)) == COMPARE) tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1); else tem = XEXP (op0, 0), tem1 = XEXP (op0, 1); /* Check for the cases where we simply want the result of the earlier test or the opposite of that result. */ if (code == NE || code == EQ || (GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT && (STORE_FLAG_VALUE & (((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1)))) && (code == LT || code == GE))) { enum rtx_code new_code; if (code == LT || code == NE) new_code = GET_CODE (op0); else new_code = combine_reversed_comparison_code (op0); if (new_code != UNKNOWN) { code = new_code; op0 = tem; op1 = tem1; continue; } } break; case IOR: /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero iff X <= 0. */ if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS && XEXP (XEXP (op0, 0), 1) == constm1_rtx && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1))) { op0 = XEXP (op0, 1); code = (code == GE ? GT : LE); continue; } break; case AND: /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This will be converted to a ZERO_EXTRACT later. */ if (const_op == 0 && equality_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFT && XEXP (XEXP (op0, 0), 0) == const1_rtx) { op0 = simplify_and_const_int (op0, mode, gen_rtx_LSHIFTRT (mode, XEXP (op0, 1), XEXP (XEXP (op0, 0), 1)), (HOST_WIDE_INT) 1); continue; } /* If we are comparing (and (lshiftrt X C1) C2) for equality with zero and X is a comparison and C1 and C2 describe only bits set in STORE_FLAG_VALUE, we can compare with X. */ if (const_op == 0 && equality_comparison_p && mode_width <= HOST_BITS_PER_WIDE_INT && GET_CODE (XEXP (op0, 1)) == CONST_INT && GET_CODE (XEXP (op0, 0)) == LSHIFTRT && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0 && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT) { mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode)) << INTVAL (XEXP (XEXP (op0, 0), 1))); if ((~STORE_FLAG_VALUE & mask) == 0 && (COMPARISON_P (XEXP (XEXP (op0, 0), 0)) || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0 && COMPARISON_P (tem)))) { op0 = XEXP (XEXP (op0, 0), 0); continue; } } /* If we are doing an equality comparison of an AND of a bit equal to the sign bit, replace this with a LT or GE comparison of the underlying value. */ if (equality_comparison_p && const_op == 0 && GET_CODE (XEXP (op0, 1)) == CONST_INT && mode_width <= HOST_BITS_PER_WIDE_INT && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode)) == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1))) { op0 = XEXP (op0, 0); code = (code == EQ ? GE : LT); continue; } /* If this AND operation is really a ZERO_EXTEND from a narrower mode, the constant fits within that mode, and this is either an equality or unsigned comparison, try to do this comparison in the narrower mode. */ if ((equality_comparison_p || unsigned_comparison_p) && GET_CODE (XEXP (op0, 1)) == CONST_INT && (i = exact_log2 ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode)) + 1)) >= 0 && const_op >> i == 0 && (tmode = mode_for_size (i, MODE_INT, 1)) != BLKmode) { op0 = gen_lowpart (tmode, XEXP (op0, 0)); continue; } /* If this is (and:M1 (subreg:M2 X 0) (const_int C1)) where C1 fits in both M1 and M2 and the SUBREG is either paradoxical or represents the low part, permute the SUBREG and the AND and try again. */ if (GET_CODE (XEXP (op0, 0)) == SUBREG) { unsigned HOST_WIDE_INT c1; tmode = GET_MODE (SUBREG_REG (XEXP (op0, 0))); /* Require an integral mode, to avoid creating something like (AND:SF ...). */ if (SCALAR_INT_MODE_P (tmode) /* It is unsafe to commute the AND into the SUBREG if the SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is not defined. As originally written the upper bits have a defined value due to the AND operation. However, if we commute the AND inside the SUBREG then they no longer have defined values and the meaning of the code has been changed. */ && (0 #ifdef WORD_REGISTER_OPERATIONS || (mode_width > GET_MODE_BITSIZE (tmode) && mode_width <= BITS_PER_WORD) #endif || (mode_width <= GET_MODE_BITSIZE (tmode) && subreg_lowpart_p (XEXP (op0, 0)))) && GET_CODE (XEXP (op0, 1)) == CONST_INT && mode_width <= HOST_BITS_PER_WIDE_INT && GET_MODE_BITSIZE (tmode) <= HOST_BITS_PER_WIDE_INT && ((c1 = INTVAL (XEXP (op0, 1))) & ~mask) == 0 && (c1 & ~GET_MODE_MASK (tmode)) == 0 && c1 != mask && c1 != GET_MODE_MASK (tmode)) { op0 = gen_binary (AND, tmode, SUBREG_REG (XEXP (op0, 0)), gen_int_mode (c1, tmode)); op0 = gen_lowpart (mode, op0); continue; } } /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */ if (const_op == 0 && equality_comparison_p && XEXP (op0, 1) == const1_rtx && GET_CODE (XEXP (op0, 0)) == NOT) { op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (XEXP (op0, 0), 0), (HOST_WIDE_INT) 1); code = (code == NE ? EQ : NE); continue; } /* Convert (ne (and (lshiftrt (not X)) 1) 0) to (eq (and (lshiftrt X) 1) 0). Also handle the case where (not X) is expressed using xor. */ if (const_op == 0 && equality_comparison_p && XEXP (op0, 1) == const1_rtx && GET_CODE (XEXP (op0, 0)) == LSHIFTRT) { rtx shift_op = XEXP (XEXP (op0, 0), 0); rtx shift_count = XEXP (XEXP (op0, 0), 1); if (GET_CODE (shift_op) == NOT || (GET_CODE (shift_op) == XOR && GET_CODE (XEXP (shift_op, 1)) == CONST_INT && GET_CODE (shift_count) == CONST_INT && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT && (INTVAL (XEXP (shift_op, 1)) == (HOST_WIDE_INT) 1 << INTVAL (shift_count)))) { op0 = simplify_and_const_int (NULL_RTX, mode, gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count), (HOST_WIDE_INT) 1); code = (code == NE ? EQ : NE); continue; } } break; case ASHIFT: /* If we have (compare (ashift FOO N) (const_int C)) and the high order N bits of FOO (N+1 if an inequality comparison) are known to be zero, we can do this by comparing FOO with C shifted right N bits so long as the low-order N bits of C are zero. */ if (GET_CODE (XEXP (op0, 1)) == CONST_INT && INTVAL (XEXP (op0, 1)) >= 0 && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p) < HOST_BITS_PER_WIDE_INT) && ((const_op & (((HOST_WIDE_INT) 1 << INTVAL (XEXP (op0, 1))) - 1)) == 0) && mode_width <= HOST_BITS_PER_WIDE_INT && (nonzero_bits (XEXP (op0, 0), mode) & ~(mask >> (INTVAL (XEXP (op0, 1)) + ! equality_comparison_p))) == 0) { /* We must perform a logical shift, not an arithmetic one, as we want the top N bits of C to be zero. */ unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode); temp >>= INTVAL (XEXP (op0, 1)); op1 = gen_int_mode (temp, mode); op0 = XEXP (op0, 0); continue; } /* If we are doing a sign bit comparison, it means we are testing a particular bit. Convert it to the appropriate AND. */ if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 1)) == CONST_INT && mode_width <= HOST_BITS_PER_WIDE_INT) { op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), ((HOST_WIDE_INT) 1 << (mode_width - 1 - INTVAL (XEXP (op0, 1))))); code = (code == LT ? NE : EQ); continue; } /* If this an equality comparison with zero and we are shifting the low bit to the sign bit, we can convert this to an AND of the low-order bit. */ if (const_op == 0 && equality_comparison_p && GET_CODE (XEXP (op0, 1)) == CONST_INT && (unsigned HOST_WIDE_INT) INTVAL (XEXP (op0, 1)) == mode_width - 1) { op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), (HOST_WIDE_INT) 1); continue; } break; case ASHIFTRT: /* If this is an equality comparison with zero, we can do this as a logical shift, which might be much simpler. */ if (equality_comparison_p && const_op == 0 && GET_CODE (XEXP (op0, 1)) == CONST_INT) { op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (op0, 0), INTVAL (XEXP (op0, 1))); continue; } /* If OP0 is a sign extension and CODE is not an unsigned comparison, do the comparison in a narrower mode. */ if (! unsigned_comparison_p && GET_CODE (XEXP (op0, 1)) == CONST_INT && GET_CODE (XEXP (op0, 0)) == ASHIFT && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1) && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), MODE_INT, 1)) != BLKmode && (((unsigned HOST_WIDE_INT) const_op + (GET_MODE_MASK (tmode) >> 1) + 1) <= GET_MODE_MASK (tmode))) { op0 = gen_lowpart (tmode, XEXP (XEXP (op0, 0), 0)); continue; } /* Likewise if OP0 is a PLUS of a sign extension with a constant, which is usually represented with the PLUS between the shifts. */ if (! unsigned_comparison_p && GET_CODE (XEXP (op0, 1)) == CONST_INT && GET_CODE (XEXP (op0, 0)) == PLUS && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1) && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), MODE_INT, 1)) != BLKmode && (((unsigned HOST_WIDE_INT) const_op + (GET_MODE_MASK (tmode) >> 1) + 1) <= GET_MODE_MASK (tmode))) { rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0); rtx add_const = XEXP (XEXP (op0, 0), 1); rtx new_const = gen_binary (ASHIFTRT, GET_MODE (op0), add_const, XEXP (op0, 1)); op0 = gen_binary (PLUS, tmode, gen_lowpart (tmode, inner), new_const); continue; } /* ... fall through ... */ case LSHIFTRT: /* If we have (compare (xshiftrt FOO N) (const_int C)) and the low order N bits of FOO are known to be zero, we can do this by comparing FOO with C shifted left N bits so long as no overflow occurs. */ if (GET_CODE (XEXP (op0, 1)) == CONST_INT && INTVAL (XEXP (op0, 1)) >= 0 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT && mode_width <= HOST_BITS_PER_WIDE_INT && (nonzero_bits (XEXP (op0, 0), mode) & (((HOST_WIDE_INT) 1 << INTVAL (XEXP (op0, 1))) - 1)) == 0 && (((unsigned HOST_WIDE_INT) const_op + (GET_CODE (op0) != LSHIFTRT ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1) + 1) : 0)) <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)))) { /* If the shift was logical, then we must make the condition unsigned. */ if (GET_CODE (op0) == LSHIFTRT) code = unsigned_condition (code); const_op <<= INTVAL (XEXP (op0, 1)); op1 = GEN_INT (const_op); op0 = XEXP (op0, 0); continue; } /* If we are using this shift to extract just the sign bit, we can replace this with an LT or GE comparison. */ if (const_op == 0 && (equality_comparison_p || sign_bit_comparison_p) && GET_CODE (XEXP (op0, 1)) == CONST_INT && (unsigned HOST_WIDE_INT) INTVAL (XEXP (op0, 1)) == mode_width - 1) { op0 = XEXP (op0, 0); code = (code == NE || code == GT ? LT : GE); continue; } break; default: break; } break; } /* Now make any compound operations involved in this comparison. Then, check for an outmost SUBREG on OP0 that is not doing anything or is paradoxical. The latter transformation must only be performed when it is known that the "extra" bits will be the same in op0 and op1 or that they don't matter. There are three cases to consider: 1. SUBREG_REG (op0) is a register. In this case the bits are don't care bits and we can assume they have any convenient value. So making the transformation is safe. 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not defined. In this case the upper bits of op0 are undefined. We should not make the simplification in that case as we do not know the contents of those bits. 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is defined and not NIL. In that case we know those bits are zeros or ones. We must also be sure that they are the same as the upper bits of op1. We can never remove a SUBREG for a non-equality comparison because the sign bit is in a different place in the underlying object. */ op0 = make_compound_operation (op0, op1 == const0_rtx ? COMPARE : SET); op1 = make_compound_operation (op1, SET); if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0) && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0))) == MODE_INT && (code == NE || code == EQ)) { if (GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))) { /* For paradoxical subregs, allow case 1 as above. Case 3 isn't implemented. */ if (REG_P (SUBREG_REG (op0))) { op0 = SUBREG_REG (op0); op1 = gen_lowpart (GET_MODE (op0), op1); } } else if ((GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0))) <= HOST_BITS_PER_WIDE_INT) && (nonzero_bits (SUBREG_REG (op0), GET_MODE (SUBREG_REG (op0))) & ~GET_MODE_MASK (GET_MODE (op0))) == 0) { tem = gen_lowpart (GET_MODE (SUBREG_REG (op0)), op1); if ((nonzero_bits (tem, GET_MODE (SUBREG_REG (op0))) & ~GET_MODE_MASK (GET_MODE (op0))) == 0) op0 = SUBREG_REG (op0), op1 = tem; } } /* We now do the opposite procedure: Some machines don't have compare insns in all modes. If OP0's mode is an integer mode smaller than a word and we can't do a compare in that mode, see if there is a larger mode for which we can do the compare. There are a number of cases in which we can use the wider mode. */ mode = GET_MODE (op0); if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD && ! have_insn_for (COMPARE, mode)) for (tmode = GET_MODE_WIDER_MODE (mode); (tmode != VOIDmode && GET_MODE_BITSIZE (tmode) <= HOST_BITS_PER_WIDE_INT); tmode = GET_MODE_WIDER_MODE (tmode)) if (have_insn_for (COMPARE, tmode)) { int zero_extended; /* If the only nonzero bits in OP0 and OP1 are those in the narrower mode and this is an equality or unsigned comparison, we can use the wider mode. Similarly for sign-extended values, in which case it is true for all comparisons. */ zero_extended = ((code == EQ || code == NE || code == GEU || code == GTU || code == LEU || code == LTU) && (nonzero_bits (op0, tmode) & ~GET_MODE_MASK (mode)) == 0 && ((GET_CODE (op1) == CONST_INT || (nonzero_bits (op1, tmode) & ~GET_MODE_MASK (mode)) == 0))); if (zero_extended || ((num_sign_bit_copies (op0, tmode) > (unsigned int) (GET_MODE_BITSIZE (tmode) - GET_MODE_BITSIZE (mode))) && (num_sign_bit_copies (op1, tmode) > (unsigned int) (GET_MODE_BITSIZE (tmode) - GET_MODE_BITSIZE (mode))))) { /* If OP0 is an AND and we don't have an AND in MODE either, make a new AND in the proper mode. */ if (GET_CODE (op0) == AND && !have_insn_for (AND, mode)) op0 = gen_binary (AND, tmode, gen_lowpart (tmode, XEXP (op0, 0)), gen_lowpart (tmode, XEXP (op0, 1))); op0 = gen_lowpart (tmode, op0); if (zero_extended && GET_CODE (op1) == CONST_INT) op1 = GEN_INT (INTVAL (op1) & GET_MODE_MASK (mode)); op1 = gen_lowpart (tmode, op1); break; } /* If this is a test for negative, we can make an explicit test of the sign bit. */ if (op1 == const0_rtx && (code == LT || code == GE) && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT) { op0 = gen_binary (AND, tmode, gen_lowpart (tmode, op0), GEN_INT ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))); code = (code == LT) ? NE : EQ; break; } } #ifdef CANONICALIZE_COMPARISON /* If this machine only supports a subset of valid comparisons, see if we can convert an unsupported one into a supported one. */ CANONICALIZE_COMPARISON (code, op0, op1); #endif *pop0 = op0; *pop1 = op1; return code; } /* Like jump.c' reversed_comparison_code, but use combine infrastructure for searching backward. */ static enum rtx_code combine_reversed_comparison_code (rtx exp) { enum rtx_code code1 = reversed_comparison_code (exp, NULL); rtx x; if (code1 != UNKNOWN || GET_MODE_CLASS (GET_MODE (XEXP (exp, 0))) != MODE_CC) return code1; /* Otherwise try and find where the condition codes were last set and use that. */ x = get_last_value (XEXP (exp, 0)); if (!x || GET_CODE (x) != COMPARE) return UNKNOWN; return reversed_comparison_code_parts (GET_CODE (exp), XEXP (x, 0), XEXP (x, 1), NULL); } /* Return comparison with reversed code of EXP and operands OP0 and OP1. Return NULL_RTX in case we fail to do the reversal. */ static rtx reversed_comparison (rtx exp, enum machine_mode mode, rtx op0, rtx op1) { enum rtx_code reversed_code = combine_reversed_comparison_code (exp); if (reversed_code == UNKNOWN) return NULL_RTX; else return gen_binary (reversed_code, mode, op0, op1); } /* Utility function for following routine. Called when X is part of a value being stored into last_set_value. Sets last_set_table_tick for each register mentioned. Similar to mention_regs in cse.c */ static void update_table_tick (rtx x) { enum rtx_code code = GET_CODE (x); const char *fmt = GET_RTX_FORMAT (code); int i; if (code == REG) { unsigned int regno = REGNO (x); unsigned int endregno = regno + (regno < FIRST_PSEUDO_REGISTER ? hard_regno_nregs[regno][GET_MODE (x)] : 1); unsigned int r; for (r = regno; r < endregno; r++) reg_stat[r].last_set_table_tick = label_tick; return; } for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) /* Note that we can't have an "E" in values stored; see get_last_value_validate. */ if (fmt[i] == 'e') { /* Check for identical subexpressions. If x contains identical subexpression we only have to traverse one of them. */ if (i == 0 && ARITHMETIC_P (x)) { /* Note that at this point x1 has already been processed. */ rtx x0 = XEXP (x, 0); rtx x1 = XEXP (x, 1); /* If x0 and x1 are identical then there is no need to process x0. */ if (x0 == x1) break; /* If x0 is identical to a subexpression of x1 then while processing x1, x0 has already been processed. Thus we are done with x. */ if (ARITHMETIC_P (x1) && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1))) break; /* If x1 is identical to a subexpression of x0 then we still have to process the rest of x0. */ if (ARITHMETIC_P (x0) && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1))) { update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0)); break; } } update_table_tick (XEXP (x, i)); } } /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we are saying that the register is clobbered and we no longer know its value. If INSN is zero, don't update reg_stat[].last_set; this is only permitted with VALUE also zero and is used to invalidate the register. */ static void record_value_for_reg (rtx reg, rtx insn, rtx value) { unsigned int regno = REGNO (reg); unsigned int endregno = regno + (regno < FIRST_PSEUDO_REGISTER ? hard_regno_nregs[regno][GET_MODE (reg)] : 1); unsigned int i; /* If VALUE contains REG and we have a previous value for REG, substitute the previous value. */ if (value && insn && reg_overlap_mentioned_p (reg, value)) { rtx tem; /* Set things up so get_last_value is allowed to see anything set up to our insn. */ subst_low_cuid = INSN_CUID (insn); tem = get_last_value (reg); /* If TEM is simply a binary operation with two CLOBBERs as operands, it isn't going to be useful and will take a lot of time to process, so just use the CLOBBER. */ if (tem) { if (ARITHMETIC_P (tem) && GET_CODE (XEXP (tem, 0)) == CLOBBER && GET_CODE (XEXP (tem, 1)) == CLOBBER) tem = XEXP (tem, 0); value = replace_rtx (copy_rtx (value), reg, tem); } } /* For each register modified, show we don't know its value, that we don't know about its bitwise content, that its value has been updated, and that we don't know the location of the death of the register. */ for (i = regno; i < endregno; i++) { if (insn) reg_stat[i].last_set = insn; reg_stat[i].last_set_value = 0; reg_stat[i].last_set_mode = 0; reg_stat[i].last_set_nonzero_bits = 0; reg_stat[i].last_set_sign_bit_copies = 0; reg_stat[i].last_death = 0; } /* Mark registers that are being referenced in this value. */ if (value) update_table_tick (value); /* Now update the status of each register being set. If someone is using this register in this block, set this register to invalid since we will get confused between the two lives in this basic block. This makes using this register always invalid. In cse, we scan the table to invalidate all entries using this register, but this is too much work for us. */ for (i = regno; i < endregno; i++) { reg_stat[i].last_set_label = label_tick; if (value && reg_stat[i].last_set_table_tick == label_tick) reg_stat[i].last_set_invalid = 1; else reg_stat[i].last_set_invalid = 0; } /* The value being assigned might refer to X (like in "x++;"). In that case, we must replace it with (clobber (const_int 0)) to prevent infinite loops. */ if (value && ! get_last_value_validate (&value, insn, reg_stat[regno].last_set_label, 0)) { value = copy_rtx (value); if (! get_last_value_validate (&value, insn, reg_stat[regno].last_set_label, 1)) value = 0; } /* For the main register being modified, update the value, the mode, the nonzero bits, and the number of sign bit copies. */ reg_stat[regno].last_set_value = value; if (value) { enum machine_mode mode = GET_MODE (reg); subst_low_cuid = INSN_CUID (insn); reg_stat[regno].last_set_mode = mode; if (GET_MODE_CLASS (mode) == MODE_INT && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT) mode = nonzero_bits_mode; reg_stat[regno].last_set_nonzero_bits = nonzero_bits (value, mode); reg_stat[regno].last_set_sign_bit_copies = num_sign_bit_copies (value, GET_MODE (reg)); } } /* Called via note_stores from record_dead_and_set_regs to handle one SET or CLOBBER in an insn. DATA is the instruction in which the set is occurring. */ static void record_dead_and_set_regs_1 (rtx dest, rtx setter, void *data) { rtx record_dead_insn = (rtx) data; if (GET_CODE (dest) == SUBREG) dest = SUBREG_REG (dest); if (REG_P (dest)) { /* If we are setting the whole register, we know its value. Otherwise show that we don't know the value. We can handle SUBREG in some cases. */ if (GET_CODE (setter) == SET && dest == SET_DEST (setter)) record_value_for_reg (dest, record_dead_insn, SET_SRC (setter)); else if (GET_CODE (setter) == SET && GET_CODE (SET_DEST (setter)) == SUBREG && SUBREG_REG (SET_DEST (setter)) == dest && GET_MODE_BITSIZE (GET_MODE (dest)) <= BITS_PER_WORD && subreg_lowpart_p (SET_DEST (setter))) record_value_for_reg (dest, record_dead_insn, gen_lowpart (GET_MODE (dest), SET_SRC (setter))); else record_value_for_reg (dest, record_dead_insn, NULL_RTX); } else if (MEM_P (dest) /* Ignore pushes, they clobber nothing. */ && ! push_operand (dest, GET_MODE (dest))) mem_last_set = INSN_CUID (record_dead_insn); } /* Update the records of when each REG was most recently set or killed for the things done by INSN. This is the last thing done in processing INSN in the combiner loop. We update reg_stat[], in particular fields last_set, last_set_value, last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies, last_death, and also the similar information mem_last_set (which insn most recently modified memory) and last_call_cuid (which insn was the most recent subroutine call). */ static void record_dead_and_set_regs (rtx insn) { rtx link; unsigned int i; for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) { if (REG_NOTE_KIND (link) == REG_DEAD && REG_P (XEXP (link, 0))) { unsigned int regno = REGNO (XEXP (link, 0)); unsigned int endregno = regno + (regno < FIRST_PSEUDO_REGISTER ? hard_regno_nregs[regno][GET_MODE (XEXP (link, 0))] : 1); for (i = regno; i < endregno; i++) reg_stat[i].last_death = insn; } else if (REG_NOTE_KIND (link) == REG_INC) record_value_for_reg (XEXP (link, 0), insn, NULL_RTX); } if (GET_CODE (insn) == CALL_INSN) { for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)) { reg_stat[i].last_set_value = 0; reg_stat[i].last_set_mode = 0; reg_stat[i].last_set_nonzero_bits = 0; reg_stat[i].last_set_sign_bit_copies = 0; reg_stat[i].last_death = 0; } last_call_cuid = mem_last_set = INSN_CUID (insn); /* Don't bother recording what this insn does. It might set the return value register, but we can't combine into a call pattern anyway, so there's no point trying (and it may cause a crash, if e.g. we wind up asking for last_set_value of a SUBREG of the return value register). */ return; } note_stores (PATTERN (insn), record_dead_and_set_regs_1, insn); } /* If a SUBREG has the promoted bit set, it is in fact a property of the register present in the SUBREG, so for each such SUBREG go back and adjust nonzero and sign bit information of the registers that are known to have some zero/sign bits set. This is needed because when combine blows the SUBREGs away, the information on zero/sign bits is lost and further combines can be missed because of that. */ static void record_promoted_value (rtx insn, rtx subreg) { rtx links, set; unsigned int regno = REGNO (SUBREG_REG (subreg)); enum machine_mode mode = GET_MODE (subreg); if (GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT) return; for (links = LOG_LINKS (insn); links;) { insn = XEXP (links, 0); set = single_set (insn); if (! set || !REG_P (SET_DEST (set)) || REGNO (SET_DEST (set)) != regno || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg))) { links = XEXP (links, 1); continue; } if (reg_stat[regno].last_set == insn) { if (SUBREG_PROMOTED_UNSIGNED_P (subreg) > 0) reg_stat[regno].last_set_nonzero_bits &= GET_MODE_MASK (mode); } if (REG_P (SET_SRC (set))) { regno = REGNO (SET_SRC (set)); links = LOG_LINKS (insn); } else break; } } /* Scan X for promoted SUBREGs. For each one found, note what it implies to the registers used in it. */ static void check_promoted_subreg (rtx insn, rtx x) { if (GET_CODE (x) == SUBREG && SUBREG_PROMOTED_VAR_P (x) && REG_P (SUBREG_REG (x))) record_promoted_value (insn, x); else { const char *format = GET_RTX_FORMAT (GET_CODE (x)); int i, j; for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++) switch (format[i]) { case 'e': check_promoted_subreg (insn, XEXP (x, i)); break; case 'V': case 'E': if (XVEC (x, i) != 0) for (j = 0; j < XVECLEN (x, i); j++) check_promoted_subreg (insn, XVECEXP (x, i, j)); break; } } } /* Utility routine for the following function. Verify that all the registers mentioned in *LOC are valid when *LOC was part of a value set when label_tick == TICK. Return 0 if some are not. If REPLACE is nonzero, replace the invalid reference with (clobber (const_int 0)) and return 1. This replacement is useful because we often can get useful information about the form of a value (e.g., if it was produced by a shift that always produces -1 or 0) even though we don't know exactly what registers it was produced from. */ static int get_last_value_validate (rtx *loc, rtx insn, int tick, int replace) { rtx x = *loc; const char *fmt = GET_RTX_FORMAT (GET_CODE (x)); int len = GET_RTX_LENGTH (GET_CODE (x)); int i; if (REG_P (x)) { unsigned int regno = REGNO (x); unsigned int endregno = regno + (regno < FIRST_PSEUDO_REGISTER ? hard_regno_nregs[regno][GET_MODE (x)] : 1); unsigned int j; for (j = regno; j < endregno; j++) if (reg_stat[j].last_set_invalid /* If this is a pseudo-register that was only set once and not live at the beginning of the function, it is always valid. */ || (! (regno >= FIRST_PSEUDO_REGISTER && REG_N_SETS (regno) == 1 && (! REGNO_REG_SET_P (ENTRY_BLOCK_PTR->next_bb->global_live_at_start, regno))) && reg_stat[j].last_set_label > tick)) { if (replace) *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx); return replace; } return 1; } /* If this is a memory reference, make sure that there were no stores after it that might have clobbered the value. We don't have alias info, so we assume any store invalidates it. */ else if (MEM_P (x) && ! RTX_UNCHANGING_P (x) && INSN_CUID (insn) <= mem_last_set) { if (replace) *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx); return replace; } for (i = 0; i < len; i++) { if (fmt[i] == 'e') { /* Check for identical subexpressions. If x contains identical subexpression we only have to traverse one of them. */ if (i == 1 && ARITHMETIC_P (x)) { /* Note that at this point x0 has already been checked and found valid. */ rtx x0 = XEXP (x, 0); rtx x1 = XEXP (x, 1); /* If x0 and x1 are identical then x is also valid. */ if (x0 == x1) return 1; /* If x1 is identical to a subexpression of x0 then while checking x0, x1 has already been checked. Thus it is valid and so as x. */ if (ARITHMETIC_P (x0) && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1))) return 1; /* If x0 is identical to a subexpression of x1 then x is valid iff the rest of x1 is valid. */ if (ARITHMETIC_P (x1) && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1))) return get_last_value_validate (&XEXP (x1, x0 == XEXP (x1, 0) ? 1 : 0), insn, tick, replace); } if (get_last_value_validate (&XEXP (x, i), insn, tick, replace) == 0) return 0; } /* Don't bother with these. They shouldn't occur anyway. */ else if (fmt[i] == 'E') return 0; } /* If we haven't found a reason for it to be invalid, it is valid. */ return 1; } /* Get the last value assigned to X, if known. Some registers in the value may be replaced with (clobber (const_int 0)) if their value is known longer known reliably. */ static rtx get_last_value (rtx x) { unsigned int regno; rtx value; /* If this is a non-paradoxical SUBREG, get the value of its operand and then convert it to the desired mode. If this is a paradoxical SUBREG, we cannot predict what values the "extra" bits might have. */ if (GET_CODE (x) == SUBREG && subreg_lowpart_p (x) && (GET_MODE_SIZE (GET_MODE (x)) <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))) && (value = get_last_value (SUBREG_REG (x))) != 0) return gen_lowpart (GET_MODE (x), value); if (!REG_P (x)) return 0; regno = REGNO (x); value = reg_stat[regno].last_set_value; /* If we don't have a value, or if it isn't for this basic block and it's either a hard register, set more than once, or it's a live at the beginning of the function, return 0. Because if it's not live at the beginning of the function then the reg is always set before being used (is never used without being set). And, if it's set only once, and it's always set before use, then all uses must have the same last value, even if it's not from this basic block. */ if (value == 0 || (reg_stat[regno].last_set_label != label_tick && (regno < FIRST_PSEUDO_REGISTER || REG_N_SETS (regno) != 1 || (REGNO_REG_SET_P (ENTRY_BLOCK_PTR->next_bb->global_live_at_start, regno))))) return 0; /* If the value was set in a later insn than the ones we are processing, we can't use it even if the register was only set once. */ if (INSN_CUID (reg_stat[regno].last_set) >= subst_low_cuid) return 0; /* If the value has all its registers valid, return it. */ if (get_last_value_validate (&value, reg_stat[regno].last_set, reg_stat[regno].last_set_label, 0)) return value; /* Otherwise, make a copy and replace any invalid register with (clobber (const_int 0)). If that fails for some reason, return 0. */ value = copy_rtx (value); if (get_last_value_validate (&value, reg_stat[regno].last_set, reg_stat[regno].last_set_label, 1)) return value; return 0; } /* Return nonzero if expression X refers to a REG or to memory that is set in an instruction more recent than FROM_CUID. */ static int use_crosses_set_p (rtx x, int from_cuid) { const char *fmt; int i; enum rtx_code code = GET_CODE (x); if (code == REG) { unsigned int regno = REGNO (x); unsigned endreg = regno + (regno < FIRST_PSEUDO_REGISTER ? hard_regno_nregs[regno][GET_MODE (x)] : 1); #ifdef PUSH_ROUNDING /* Don't allow uses of the stack pointer to be moved, because we don't know whether the move crosses a push insn. */ if (regno == STACK_POINTER_REGNUM && PUSH_ARGS) return 1; #endif for (; regno < endreg; regno++) if (reg_stat[regno].last_set && INSN_CUID (reg_stat[regno].last_set) > from_cuid) return 1; return 0; } if (code == MEM && mem_last_set > from_cuid) return 1; fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'E') { int j; for (j = XVECLEN (x, i) - 1; j >= 0; j--) if (use_crosses_set_p (XVECEXP (x, i, j), from_cuid)) return 1; } else if (fmt[i] == 'e' && use_crosses_set_p (XEXP (x, i), from_cuid)) return 1; } return 0; } /* Define three variables used for communication between the following routines. */ static unsigned int reg_dead_regno, reg_dead_endregno; static int reg_dead_flag; /* Function called via note_stores from reg_dead_at_p. If DEST is within [reg_dead_regno, reg_dead_endregno), set reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */ static void reg_dead_at_p_1 (rtx dest, rtx x, void *data ATTRIBUTE_UNUSED) { unsigned int regno, endregno; if (!REG_P (dest)) return; regno = REGNO (dest); endregno = regno + (regno < FIRST_PSEUDO_REGISTER ? hard_regno_nregs[regno][GET_MODE (dest)] : 1); if (reg_dead_endregno > regno && reg_dead_regno < endregno) reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1; } /* Return nonzero if REG is known to be dead at INSN. We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER referencing REG, it is dead. If we hit a SET referencing REG, it is live. Otherwise, see if it is live or dead at the start of the basic block we are in. Hard regs marked as being live in NEWPAT_USED_REGS must be assumed to be always live. */ static int reg_dead_at_p (rtx reg, rtx insn) { basic_block block; unsigned int i; /* Set variables for reg_dead_at_p_1. */ reg_dead_regno = REGNO (reg); reg_dead_endregno = reg_dead_regno + (reg_dead_regno < FIRST_PSEUDO_REGISTER ? hard_regno_nregs[reg_dead_regno] [GET_MODE (reg)] : 1); reg_dead_flag = 0; /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers we allow the machine description to decide whether use-and-clobber patterns are OK. */ if (reg_dead_regno < FIRST_PSEUDO_REGISTER) { for (i = reg_dead_regno; i < reg_dead_endregno; i++) if (!fixed_regs[i] && TEST_HARD_REG_BIT (newpat_used_regs, i)) return 0; } /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, label, or beginning of function. */ for (; insn && GET_CODE (insn) != CODE_LABEL && GET_CODE (insn) != BARRIER; insn = prev_nonnote_insn (insn)) { note_stores (PATTERN (insn), reg_dead_at_p_1, NULL); if (reg_dead_flag) return reg_dead_flag == 1 ? 1 : 0; if (find_regno_note (insn, REG_DEAD, reg_dead_regno)) return 1; } /* Get the basic block that we were in. */ if (insn == 0) block = ENTRY_BLOCK_PTR->next_bb; else { FOR_EACH_BB (block) if (insn == BB_HEAD (block)) break; if (block == EXIT_BLOCK_PTR) return 0; } for (i = reg_dead_regno; i < reg_dead_endregno; i++) if (REGNO_REG_SET_P (block->global_live_at_start, i)) return 0; return 1; } /* Note hard registers in X that are used. This code is similar to that in flow.c, but much simpler since we don't care about pseudos. */ static void mark_used_regs_combine (rtx x) { RTX_CODE code = GET_CODE (x); unsigned int regno; int i; switch (code) { case LABEL_REF: case SYMBOL_REF: case CONST_INT: case CONST: case CONST_DOUBLE: case CONST_VECTOR: case PC: case ADDR_VEC: case ADDR_DIFF_VEC: case ASM_INPUT: #ifdef HAVE_cc0 /* CC0 must die in the insn after it is set, so we don't need to take special note of it here. */ case CC0: #endif return; case CLOBBER: /* If we are clobbering a MEM, mark any hard registers inside the address as used. */ if (MEM_P (XEXP (x, 0))) mark_used_regs_combine (XEXP (XEXP (x, 0), 0)); return; case REG: regno = REGNO (x); /* A hard reg in a wide mode may really be multiple registers. If so, mark all of them just like the first. */ if (regno < FIRST_PSEUDO_REGISTER) { unsigned int endregno, r; /* None of this applies to the stack, frame or arg pointers. */ if (regno == STACK_POINTER_REGNUM #if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM || regno == HARD_FRAME_POINTER_REGNUM #endif #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM || (regno == ARG_POINTER_REGNUM && fixed_regs[regno]) #endif || regno == FRAME_POINTER_REGNUM) return; endregno = regno + hard_regno_nregs[regno][GET_MODE (x)]; for (r = regno; r < endregno; r++) SET_HARD_REG_BIT (newpat_used_regs, r); } return; case SET: { /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in the address. */ rtx testreg = SET_DEST (x); while (GET_CODE (testreg) == SUBREG || GET_CODE (testreg) == ZERO_EXTRACT || GET_CODE (testreg) == SIGN_EXTRACT || GET_CODE (testreg) == STRICT_LOW_PART) testreg = XEXP (testreg, 0); if (MEM_P (testreg)) mark_used_regs_combine (XEXP (testreg, 0)); mark_used_regs_combine (SET_SRC (x)); } return; default: break; } /* Recursively scan the operands of this expression. */ { const char *fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') mark_used_regs_combine (XEXP (x, i)); else if (fmt[i] == 'E') { int j; for (j = 0; j < XVECLEN (x, i); j++) mark_used_regs_combine (XVECEXP (x, i, j)); } } } } /* Remove register number REGNO from the dead registers list of INSN. Return the note used to record the death, if there was one. */ rtx remove_death (unsigned int regno, rtx insn) { rtx note = find_regno_note (insn, REG_DEAD, regno); if (note) { REG_N_DEATHS (regno)--; remove_note (insn, note); } return note; } /* For each register (hardware or pseudo) used within expression X, if its death is in an instruction with cuid between FROM_CUID (inclusive) and TO_INSN (exclusive), put a REG_DEAD note for that register in the list headed by PNOTES. That said, don't move registers killed by maybe_kill_insn. This is done when X is being merged by combination into TO_INSN. These notes will then be distributed as needed. */ static void move_deaths (rtx x, rtx maybe_kill_insn, int from_cuid, rtx to_insn, rtx *pnotes) { const char *fmt; int len, i; enum rtx_code code = GET_CODE (x); if (code == REG) { unsigned int regno = REGNO (x); rtx where_dead = reg_stat[regno].last_death; rtx before_dead, after_dead; /* Don't move the register if it gets killed in between from and to. */ if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn) && ! reg_referenced_p (x, maybe_kill_insn)) return; /* WHERE_DEAD could be a USE insn made by combine, so first we make sure that we have insns with valid INSN_CUID values. */ before_dead = where_dead; while (before_dead && INSN_UID (before_dead) > max_uid_cuid) before_dead = PREV_INSN (before_dead); after_dead = where_dead; while (after_dead && INSN_UID (after_dead) > max_uid_cuid) after_dead = NEXT_INSN (after_dead); if (before_dead && after_dead && INSN_CUID (before_dead) >= from_cuid && (INSN_CUID (after_dead) < INSN_CUID (to_insn) || (where_dead != after_dead && INSN_CUID (after_dead) == INSN_CUID (to_insn)))) { rtx note = remove_death (regno, where_dead); /* It is possible for the call above to return 0. This can occur when last_death points to I2 or I1 that we combined with. In that case make a new note. We must also check for the case where X is a hard register and NOTE is a death note for a range of hard registers including X. In that case, we must put REG_DEAD notes for the remaining registers in place of NOTE. */ if (note != 0 && regno < FIRST_PSEUDO_REGISTER && (GET_MODE_SIZE (GET_MODE (XEXP (note, 0))) > GET_MODE_SIZE (GET_MODE (x)))) { unsigned int deadregno = REGNO (XEXP (note, 0)); unsigned int deadend = (deadregno + hard_regno_nregs[deadregno] [GET_MODE (XEXP (note, 0))]); unsigned int ourend = regno + hard_regno_nregs[regno][GET_MODE (x)]; unsigned int i; for (i = deadregno; i < deadend; i++) if (i < regno || i >= ourend) REG_NOTES (where_dead) = gen_rtx_EXPR_LIST (REG_DEAD, regno_reg_rtx[i], REG_NOTES (where_dead)); } /* If we didn't find any note, or if we found a REG_DEAD note that covers only part of the given reg, and we have a multi-reg hard register, then to be safe we must check for REG_DEAD notes for each register other than the first. They could have their own REG_DEAD notes lying around. */ else if ((note == 0 || (note != 0 && (GET_MODE_SIZE (GET_MODE (XEXP (note, 0))) < GET_MODE_SIZE (GET_MODE (x))))) && regno < FIRST_PSEUDO_REGISTER && hard_regno_nregs[regno][GET_MODE (x)] > 1) { unsigned int ourend = regno + hard_regno_nregs[regno][GET_MODE (x)]; unsigned int i, offset; rtx oldnotes = 0; if (note) offset = hard_regno_nregs[regno][GET_MODE (XEXP (note, 0))]; else offset = 1; for (i = regno + offset; i < ourend; i++) move_deaths (regno_reg_rtx[i], maybe_kill_insn, from_cuid, to_insn, &oldnotes); } if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x)) { XEXP (note, 1) = *pnotes; *pnotes = note; } else *pnotes = gen_rtx_EXPR_LIST (REG_DEAD, x, *pnotes); REG_N_DEATHS (regno)++; } return; } else if (GET_CODE (x) == SET) { rtx dest = SET_DEST (x); move_deaths (SET_SRC (x), maybe_kill_insn, from_cuid, to_insn, pnotes); /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG that accesses one word of a multi-word item, some piece of everything register in the expression is used by this insn, so remove any old death. */ /* ??? So why do we test for equality of the sizes? */ if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == STRICT_LOW_PART || (GET_CODE (dest) == SUBREG && (((GET_MODE_SIZE (GET_MODE (dest)) + UNITS_PER_WORD - 1) / UNITS_PER_WORD) == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))) { move_deaths (dest, maybe_kill_insn, from_cuid, to_insn, pnotes); return; } /* If this is some other SUBREG, we know it replaces the entire value, so use that as the destination. */ if (GET_CODE (dest) == SUBREG) dest = SUBREG_REG (dest); /* If this is a MEM, adjust deaths of anything used in the address. For a REG (the only other possibility), the entire value is being replaced so the old value is not used in this insn. */ if (MEM_P (dest)) move_deaths (XEXP (dest, 0), maybe_kill_insn, from_cuid, to_insn, pnotes); return; } else if (GET_CODE (x) == CLOBBER) return; len = GET_RTX_LENGTH (code); fmt = GET_RTX_FORMAT (code); for (i = 0; i < len; i++) { if (fmt[i] == 'E') { int j; for (j = XVECLEN (x, i) - 1; j >= 0; j--) move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_cuid, to_insn, pnotes); } else if (fmt[i] == 'e') move_deaths (XEXP (x, i), maybe_kill_insn, from_cuid, to_insn, pnotes); } } /* Return 1 if X is the target of a bit-field assignment in BODY, the pattern of an insn. X must be a REG. */ static int reg_bitfield_target_p (rtx x, rtx body) { int i; if (GET_CODE (body) == SET) { rtx dest = SET_DEST (body); rtx target; unsigned int regno, tregno, endregno, endtregno; if (GET_CODE (dest) == ZERO_EXTRACT) target = XEXP (dest, 0); else if (GET_CODE (dest) == STRICT_LOW_PART) target = SUBREG_REG (XEXP (dest, 0)); else return 0; if (GET_CODE (target) == SUBREG) target = SUBREG_REG (target); if (!REG_P (target)) return 0; tregno = REGNO (target), regno = REGNO (x); if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER) return target == x; endtregno = tregno + hard_regno_nregs[tregno][GET_MODE (target)]; endregno = regno + hard_regno_nregs[regno][GET_MODE (x)]; return endregno > tregno && regno < endtregno; } else if (GET_CODE (body) == PARALLEL) for (i = XVECLEN (body, 0) - 1; i >= 0; i--) if (reg_bitfield_target_p (x, XVECEXP (body, 0, i))) return 1; return 0; } /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them as appropriate. I3 and I2 are the insns resulting from the combination insns including FROM (I2 may be zero). Each note in the list is either ignored or placed on some insns, depending on the type of note. */ static void distribute_notes (rtx notes, rtx from_insn, rtx i3, rtx i2) { rtx note, next_note; rtx tem; for (note = notes; note; note = next_note) { rtx place = 0, place2 = 0; /* If this NOTE references a pseudo register, ensure it references the latest copy of that register. */ if (XEXP (note, 0) && REG_P (XEXP (note, 0)) && REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER) XEXP (note, 0) = regno_reg_rtx[REGNO (XEXP (note, 0))]; next_note = XEXP (note, 1); switch (REG_NOTE_KIND (note)) { case REG_BR_PROB: case REG_BR_PRED: /* Doesn't matter much where we put this, as long as it's somewhere. It is preferable to keep these notes on branches, which is most likely to be i3. */ place = i3; break; case REG_VALUE_PROFILE: /* Just get rid of this note, as it is unused later anyway. */ break; case REG_VTABLE_REF: /* ??? Should remain with *a particular* memory load. Given the nature of vtable data, the last insn seems relatively safe. */ place = i3; break; case REG_NON_LOCAL_GOTO: if (GET_CODE (i3) == JUMP_INSN) place = i3; else if (i2 && GET_CODE (i2) == JUMP_INSN) place = i2; else abort (); break; case REG_EH_REGION: /* These notes must remain with the call or trapping instruction. */ if (GET_CODE (i3) == CALL_INSN) place = i3; else if (i2 && GET_CODE (i2) == CALL_INSN) place = i2; else if (flag_non_call_exceptions) { if (may_trap_p (i3)) place = i3; else if (i2 && may_trap_p (i2)) place = i2; /* ??? Otherwise assume we've combined things such that we can now prove that the instructions can't trap. Drop the note in this case. */ } else abort (); break; case REG_ALWAYS_RETURN: case REG_NORETURN: case REG_SETJMP: /* These notes must remain with the call. It should not be possible for both I2 and I3 to be a call. */ if (GET_CODE (i3) == CALL_INSN) place = i3; else if (i2 && GET_CODE (i2) == CALL_INSN) place = i2; else abort (); break; case REG_UNUSED: /* Any clobbers for i3 may still exist, and so we must process REG_UNUSED notes from that insn. Any clobbers from i2 or i1 can only exist if they were added by recog_for_combine. In that case, recog_for_combine created the necessary REG_UNUSED notes. Trying to keep any original REG_UNUSED notes from these insns can cause incorrect output if it is for the same register as the original i3 dest. In that case, we will notice that the register is set in i3, and then add a REG_UNUSED note for the destination of i3, which is wrong. However, it is possible to have REG_UNUSED notes from i2 or i1 for register which were both used and clobbered, so we keep notes from i2 or i1 if they will turn into REG_DEAD notes. */ /* If this register is set or clobbered in I3, put the note there unless there is one already. */ if (reg_set_p (XEXP (note, 0), PATTERN (i3))) { if (from_insn != i3) break; if (! (REG_P (XEXP (note, 0)) ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0))) : find_reg_note (i3, REG_UNUSED, XEXP (note, 0)))) place = i3; } /* Otherwise, if this register is used by I3, then this register now dies here, so we must put a REG_DEAD note here unless there is one already. */ else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)) && ! (REG_P (XEXP (note, 0)) ? find_regno_note (i3, REG_DEAD, REGNO (XEXP (note, 0))) : find_reg_note (i3, REG_DEAD, XEXP (note, 0)))) { PUT_REG_NOTE_KIND (note, REG_DEAD); place = i3; } break; case REG_EQUAL: case REG_EQUIV: case REG_NOALIAS: /* These notes say something about results of an insn. We can only support them if they used to be on I3 in which case they remain on I3. Otherwise they are ignored. If the note refers to an expression that is not a constant, we must also ignore the note since we cannot tell whether the equivalence is still true. It might be possible to do slightly better than this (we only have a problem if I2DEST or I1DEST is present in the expression), but it doesn't seem worth the trouble. */ if (from_insn == i3 && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0)))) place = i3; break; case REG_INC: case REG_NO_CONFLICT: /* These notes say something about how a register is used. They must be present on any use of the register in I2 or I3. */ if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3))) place = i3; if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2))) { if (place) place2 = i2; else place = i2; } break; case REG_LABEL: /* This can show up in several ways -- either directly in the pattern, or hidden off in the constant pool with (or without?) a REG_EQUAL note. */ /* ??? Ignore the without-reg_equal-note problem for now. */ if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)) || ((tem = find_reg_note (i3, REG_EQUAL, NULL_RTX)) && GET_CODE (XEXP (tem, 0)) == LABEL_REF && XEXP (XEXP (tem, 0), 0) == XEXP (note, 0))) place = i3; if (i2 && (reg_mentioned_p (XEXP (note, 0), PATTERN (i2)) || ((tem = find_reg_note (i2, REG_EQUAL, NULL_RTX)) && GET_CODE (XEXP (tem, 0)) == LABEL_REF && XEXP (XEXP (tem, 0), 0) == XEXP (note, 0)))) { if (place) place2 = i2; else place = i2; } /* Don't attach REG_LABEL note to a JUMP_INSN which has JUMP_LABEL already. Instead, decrement LABEL_NUSES. */ if (place && GET_CODE (place) == JUMP_INSN && JUMP_LABEL (place)) { if (JUMP_LABEL (place) != XEXP (note, 0)) abort (); if (GET_CODE (JUMP_LABEL (place)) == CODE_LABEL) LABEL_NUSES (JUMP_LABEL (place))--; place = 0; } if (place2 && GET_CODE (place2) == JUMP_INSN && JUMP_LABEL (place2)) { if (JUMP_LABEL (place2) != XEXP (note, 0)) abort (); if (GET_CODE (JUMP_LABEL (place2)) == CODE_LABEL) LABEL_NUSES (JUMP_LABEL (place2))--; place2 = 0; } break; case REG_NONNEG: /* This note says something about the value of a register prior to the execution of an insn. It is too much trouble to see if the note is still correct in all situations. It is better to simply delete it. */ break; case REG_RETVAL: /* If the insn previously containing this note still exists, put it back where it was. Otherwise move it to the previous insn. Adjust the corresponding REG_LIBCALL note. */ if (GET_CODE (from_insn) != NOTE) place = from_insn; else { tem = find_reg_note (XEXP (note, 0), REG_LIBCALL, NULL_RTX); place = prev_real_insn (from_insn); if (tem && place) XEXP (tem, 0) = place; /* If we're deleting the last remaining instruction of a libcall sequence, don't add the notes. */ else if (XEXP (note, 0) == from_insn) tem = place = 0; /* Don't add the dangling REG_RETVAL note. */ else if (! tem) place = 0; } break; case REG_LIBCALL: /* This is handled similarly to REG_RETVAL. */ if (GET_CODE (from_insn) != NOTE) place = from_insn; else { tem = find_reg_note (XEXP (note, 0), REG_RETVAL, NULL_RTX); place = next_real_insn (from_insn); if (tem && place) XEXP (tem, 0) = place; /* If we're deleting the last remaining instruction of a libcall sequence, don't add the notes. */ else if (XEXP (note, 0) == from_insn) tem = place = 0; /* Don't add the dangling REG_LIBCALL note. */ else if (! tem) place = 0; } break; case REG_DEAD: /* If the register is used as an input in I3, it dies there. Similarly for I2, if it is nonzero and adjacent to I3. If the register is not used as an input in either I3 or I2 and it is not one of the registers we were supposed to eliminate, there are two possibilities. We might have a non-adjacent I2 or we might have somehow eliminated an additional register from a computation. For example, we might have had A & B where we discover that B will always be zero. In this case we will eliminate the reference to A. In both cases, we must search to see if we can find a previous use of A and put the death note there. */ if (from_insn && GET_CODE (from_insn) == CALL_INSN && find_reg_fusage (from_insn, USE, XEXP (note, 0))) place = from_insn; else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))) place = i3; else if (i2 != 0 && next_nonnote_insn (i2) == i3 && reg_referenced_p (XEXP (note, 0), PATTERN (i2))) place = i2; if (place == 0) { basic_block bb = this_basic_block; for (tem = PREV_INSN (i3); place == 0; tem = PREV_INSN (tem)) { if (! INSN_P (tem)) { if (tem == BB_HEAD (bb)) break; continue; } /* If the register is being set at TEM, see if that is all TEM is doing. If so, delete TEM. Otherwise, make this into a REG_UNUSED note instead. Don't delete sets to global register vars. */ if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER || !global_regs[REGNO (XEXP (note, 0))]) && reg_set_p (XEXP (note, 0), PATTERN (tem))) { rtx set = single_set (tem); rtx inner_dest = 0; #ifdef HAVE_cc0 rtx cc0_setter = NULL_RTX; #endif if (set != 0) for (inner_dest = SET_DEST (set); (GET_CODE (inner_dest) == STRICT_LOW_PART || GET_CODE (inner_dest) == SUBREG || GET_CODE (inner_dest) == ZERO_EXTRACT); inner_dest = XEXP (inner_dest, 0)) ; /* Verify that it was the set, and not a clobber that modified the register. CC0 targets must be careful to maintain setter/user pairs. If we cannot delete the setter due to side effects, mark the user with an UNUSED note instead of deleting it. */ if (set != 0 && ! side_effects_p (SET_SRC (set)) && rtx_equal_p (XEXP (note, 0), inner_dest) #ifdef HAVE_cc0 && (! reg_mentioned_p (cc0_rtx, SET_SRC (set)) || ((cc0_setter = prev_cc0_setter (tem)) != NULL && sets_cc0_p (PATTERN (cc0_setter)) > 0)) #endif ) { /* Move the notes and links of TEM elsewhere. This might delete other dead insns recursively. First set the pattern to something that won't use any register. */ rtx old_notes = REG_NOTES (tem); PATTERN (tem) = pc_rtx; REG_NOTES (tem) = NULL; distribute_notes (old_notes, tem, tem, NULL_RTX); distribute_links (LOG_LINKS (tem)); SET_INSN_DELETED (tem); #ifdef HAVE_cc0 /* Delete the setter too. */ if (cc0_setter) { PATTERN (cc0_setter) = pc_rtx; old_notes = REG_NOTES (cc0_setter); REG_NOTES (cc0_setter) = NULL; distribute_notes (old_notes, cc0_setter, cc0_setter, NULL_RTX); distribute_links (LOG_LINKS (cc0_setter)); SET_INSN_DELETED (cc0_setter); } #endif } else { PUT_REG_NOTE_KIND (note, REG_UNUSED); /* If there isn't already a REG_UNUSED note, put one here. Do not place a REG_DEAD note, even if the register is also used here; that would not match the algorithm used in lifetime analysis and can cause the consistency check in the scheduler to fail. */ if (! find_regno_note (tem, REG_UNUSED, REGNO (XEXP (note, 0)))) place = tem; break; } } else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem)) || (GET_CODE (tem) == CALL_INSN && find_reg_fusage (tem, USE, XEXP (note, 0)))) { place = tem; /* If we are doing a 3->2 combination, and we have a register which formerly died in i3 and was not used by i2, which now no longer dies in i3 and is used in i2 but does not die in i2, and place is between i2 and i3, then we may need to move a link from place to i2. */ if (i2 && INSN_UID (place) <= max_uid_cuid && INSN_CUID (place) > INSN_CUID (i2) && from_insn && INSN_CUID (from_insn) > INSN_CUID (i2) && reg_referenced_p (XEXP (note, 0), PATTERN (i2))) { rtx links = LOG_LINKS (place); LOG_LINKS (place) = 0; distribute_links (links); } break; } if (tem == BB_HEAD (bb)) break; } /* We haven't found an insn for the death note and it is still a REG_DEAD note, but we have hit the beginning of the block. If the existing life info says the reg was dead, there's nothing left to do. Otherwise, we'll need to do a global life update after combine. */ if (REG_NOTE_KIND (note) == REG_DEAD && place == 0 && REGNO_REG_SET_P (bb->global_live_at_start, REGNO (XEXP (note, 0)))) SET_BIT (refresh_blocks, this_basic_block->index); } /* If the register is set or already dead at PLACE, we needn't do anything with this note if it is still a REG_DEAD note. We check here if it is set at all, not if is it totally replaced, which is what `dead_or_set_p' checks, so also check for it being set partially. */ if (place && REG_NOTE_KIND (note) == REG_DEAD) { unsigned int regno = REGNO (XEXP (note, 0)); /* Similarly, if the instruction on which we want to place the note is a noop, we'll need do a global live update after we remove them in delete_noop_moves. */ if (noop_move_p (place)) SET_BIT (refresh_blocks, this_basic_block->index); if (dead_or_set_p (place, XEXP (note, 0)) || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place))) { /* Unless the register previously died in PLACE, clear last_death. [I no longer understand why this is being done.] */ if (reg_stat[regno].last_death != place) reg_stat[regno].last_death = 0; place = 0; } else reg_stat[regno].last_death = place; /* If this is a death note for a hard reg that is occupying multiple registers, ensure that we are still using all parts of the object. If we find a piece of the object that is unused, we must arrange for an appropriate REG_DEAD note to be added for it. However, we can't just emit a USE and tag the note to it, since the register might actually be dead; so we recourse, and the recursive call then finds the previous insn that used this register. */ if (place && regno < FIRST_PSEUDO_REGISTER && hard_regno_nregs[regno][GET_MODE (XEXP (note, 0))] > 1) { unsigned int endregno = regno + hard_regno_nregs[regno] [GET_MODE (XEXP (note, 0))]; int all_used = 1; unsigned int i; for (i = regno; i < endregno; i++) if ((! refers_to_regno_p (i, i + 1, PATTERN (place), 0) && ! find_regno_fusage (place, USE, i)) || dead_or_set_regno_p (place, i)) all_used = 0; if (! all_used) { /* Put only REG_DEAD notes for pieces that are not already dead or set. */ for (i = regno; i < endregno; i += hard_regno_nregs[i][reg_raw_mode[i]]) { rtx piece = regno_reg_rtx[i]; basic_block bb = this_basic_block; if (! dead_or_set_p (place, piece) && ! reg_bitfield_target_p (piece, PATTERN (place))) { rtx new_note = gen_rtx_EXPR_LIST (REG_DEAD, piece, NULL_RTX); distribute_notes (new_note, place, place, NULL_RTX); } else if (! refers_to_regno_p (i, i + 1, PATTERN (place), 0) && ! find_regno_fusage (place, USE, i)) for (tem = PREV_INSN (place); ; tem = PREV_INSN (tem)) { if (! INSN_P (tem)) { if (tem == BB_HEAD (bb)) { SET_BIT (refresh_blocks, this_basic_block->index); break; } continue; } if (dead_or_set_p (tem, piece) || reg_bitfield_target_p (piece, PATTERN (tem))) { REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_UNUSED, piece, REG_NOTES (tem)); break; } } } place = 0; } } } break; default: /* Any other notes should not be present at this point in the compilation. */ abort (); } if (place) { XEXP (note, 1) = REG_NOTES (place); REG_NOTES (place) = note; } else if ((REG_NOTE_KIND (note) == REG_DEAD || REG_NOTE_KIND (note) == REG_UNUSED) && REG_P (XEXP (note, 0))) REG_N_DEATHS (REGNO (XEXP (note, 0)))--; if (place2) { if ((REG_NOTE_KIND (note) == REG_DEAD || REG_NOTE_KIND (note) == REG_UNUSED) && REG_P (XEXP (note, 0))) REG_N_DEATHS (REGNO (XEXP (note, 0)))++; REG_NOTES (place2) = gen_rtx_fmt_ee (GET_CODE (note), REG_NOTE_KIND (note), XEXP (note, 0), REG_NOTES (place2)); } } } /* Similarly to above, distribute the LOG_LINKS that used to be present on I3, I2, and I1 to new locations. This is also called to add a link pointing at I3 when I3's destination is changed. */ static void distribute_links (rtx links) { rtx link, next_link; for (link = links; link; link = next_link) { rtx place = 0; rtx insn; rtx set, reg; next_link = XEXP (link, 1); /* If the insn that this link points to is a NOTE or isn't a single set, ignore it. In the latter case, it isn't clear what we can do other than ignore the link, since we can't tell which register it was for. Such links wouldn't be used by combine anyway. It is not possible for the destination of the target of the link to have been changed by combine. The only potential of this is if we replace I3, I2, and I1 by I3 and I2. But in that case the destination of I2 also remains unchanged. */ if (GET_CODE (XEXP (link, 0)) == NOTE || (set = single_set (XEXP (link, 0))) == 0) continue; reg = SET_DEST (set); while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT || GET_CODE (reg) == SIGN_EXTRACT || GET_CODE (reg) == STRICT_LOW_PART) reg = XEXP (reg, 0); /* A LOG_LINK is defined as being placed on the first insn that uses a register and points to the insn that sets the register. Start searching at the next insn after the target of the link and stop when we reach a set of the register or the end of the basic block. Note that this correctly handles the link that used to point from I3 to I2. Also note that not much searching is typically done here since most links don't point very far away. */ for (insn = NEXT_INSN (XEXP (link, 0)); (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR || BB_HEAD (this_basic_block->next_bb) != insn)); insn = NEXT_INSN (insn)) if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn))) { if (reg_referenced_p (reg, PATTERN (insn))) place = insn; break; } else if (GET_CODE (insn) == CALL_INSN && find_reg_fusage (insn, USE, reg)) { place = insn; break; } else if (INSN_P (insn) && reg_set_p (reg, insn)) break; /* If we found a place to put the link, place it there unless there is already a link to the same insn as LINK at that point. */ if (place) { rtx link2; for (link2 = LOG_LINKS (place); link2; link2 = XEXP (link2, 1)) if (XEXP (link2, 0) == XEXP (link, 0)) break; if (link2 == 0) { XEXP (link, 1) = LOG_LINKS (place); LOG_LINKS (place) = link; /* Set added_links_insn to the earliest insn we added a link to. */ if (added_links_insn == 0 || INSN_CUID (added_links_insn) > INSN_CUID (place)) added_links_insn = place; } } } } /* Subroutine of unmentioned_reg_p and callback from for_each_rtx. Check whether the expression pointer to by LOC is a register or memory, and if so return 1 if it isn't mentioned in the rtx EXPR. Otherwise return zero. */ static int unmentioned_reg_p_1 (rtx *loc, void *expr) { rtx x = *loc; if (x != NULL_RTX && (REG_P (x) || MEM_P (x)) && ! reg_mentioned_p (x, (rtx) expr)) return 1; return 0; } /* Check for any register or memory mentioned in EQUIV that is not mentioned in EXPR. This is used to restrict EQUIV to "specializations" of EXPR where some registers may have been replaced by constants. */ static bool unmentioned_reg_p (rtx equiv, rtx expr) { return for_each_rtx (&equiv, unmentioned_reg_p_1, expr); } /* Compute INSN_CUID for INSN, which is an insn made by combine. */ static int insn_cuid (rtx insn) { while (insn != 0 && INSN_UID (insn) > max_uid_cuid && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == USE) insn = NEXT_INSN (insn); if (INSN_UID (insn) > max_uid_cuid) abort (); return INSN_CUID (insn); } void dump_combine_stats (FILE *file) { fnotice (file, ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n", combine_attempts, combine_merges, combine_extras, combine_successes); } void dump_combine_total_stats (FILE *file) { fnotice (file, "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n", total_attempts, total_merges, total_extras, total_successes); } /* Register conflict graph computation routines. Copyright (C) 2000, 2003 Free Software Foundation, Inc. Contributed by CodeSourcery, LLC This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* References: Building an Optimizing Compiler Robert Morgan Butterworth-Heinemann, 1998 */ /* A register conflict graph is an undirected graph containing nodes for some or all of the regs used in a function. Arcs represent conflicts, i.e. two nodes are connected by an arc if there is a point in the function at which the regs corresponding to the two nodes are both live. The conflict graph is represented by the data structures described in Morgan section 11.3.1. Nodes are not stored explicitly; only arcs are. An arc stores the numbers of the regs it connects. Arcs can be located by two methods: - The two reg numbers for each arc are hashed into a single value, and the arc is placed in a hash table according to this value. This permits quick determination of whether a specific conflict is present in the graph. - Additionally, the arc data structures are threaded by a set of linked lists by single reg number. Since each arc references two regs, there are two next pointers, one for the smaller-numbered reg and one for the larger-numbered reg. This permits the quick enumeration of conflicts for a single register. Arcs are allocated from an obstack. */ /* An arc in a conflict graph. */ struct conflict_graph_arc_def { /* The next element of the list of conflicts involving the smaller-numbered reg, as an index in the table of arcs of this graph. Contains NULL if this is the tail. */ struct conflict_graph_arc_def *smaller_next; /* The next element of the list of conflicts involving the larger-numbered reg, as an index in the table of arcs of this graph. Contains NULL if this is the tail. */ struct conflict_graph_arc_def *larger_next; /* The smaller-numbered reg involved in this conflict. */ int smaller; /* The larger-numbered reg involved in this conflict. */ int larger; }; typedef struct conflict_graph_arc_def *conflict_graph_arc; typedef const struct conflict_graph_arc_def *const_conflict_graph_arc; /* A conflict graph. */ struct conflict_graph_def { /* A hash table of arcs. Used to search for a specific conflict. */ htab_t arc_hash_table; /* The number of regs this conflict graph handles. */ int num_regs; /* For each reg, the arc at the head of a list that threads through all the arcs involving that reg. An entry is NULL if no conflicts exist involving that reg. */ conflict_graph_arc *neighbor_heads; /* Arcs are allocated from here. */ struct obstack arc_obstack; }; /* The initial capacity (number of conflict arcs) for newly-created conflict graphs. */ #define INITIAL_ARC_CAPACITY 64 /* Computes the hash value of the conflict graph arc connecting regs R1 and R2. R1 is assumed to be smaller or equal to R2. */ #define CONFLICT_HASH_FN(R1, R2) ((R2) * ((R2) - 1) / 2 + (R1)) static hashval_t arc_hash (const void *); static int arc_eq (const void *, const void *); static int print_conflict (int, int, void *); static void mark_reg_conflict (rtx, rtx, void *); /* Callback function to compute the hash value of an arc. Uses current_graph to locate the graph to which the arc belongs. */ static hashval_t arc_hash (const void *arcp) { const_conflict_graph_arc arc = (const_conflict_graph_arc) arcp; return CONFLICT_HASH_FN (arc->smaller, arc->larger); } /* Callback function to determine the equality of two arcs in the hash table. */ static int arc_eq (const void *arcp1, const void *arcp2) { const_conflict_graph_arc arc1 = (const_conflict_graph_arc) arcp1; const_conflict_graph_arc arc2 = (const_conflict_graph_arc) arcp2; return arc1->smaller == arc2->smaller && arc1->larger == arc2->larger; } /* Creates an empty conflict graph to hold conflicts among NUM_REGS registers. */ conflict_graph conflict_graph_new (int num_regs) { conflict_graph graph = xmalloc (sizeof (struct conflict_graph_def)); graph->num_regs = num_regs; /* Set up the hash table. No delete action is specified; memory management of arcs is through the obstack. */ graph->arc_hash_table = htab_create (INITIAL_ARC_CAPACITY, &arc_hash, &arc_eq, NULL); /* Create an obstack for allocating arcs. */ obstack_init (&graph->arc_obstack); /* Create and zero the lookup table by register number. */ graph->neighbor_heads = xcalloc (num_regs, sizeof (conflict_graph_arc)); return graph; } /* Deletes a conflict graph. */ void conflict_graph_delete (conflict_graph graph) { obstack_free (&graph->arc_obstack, NULL); htab_delete (graph->arc_hash_table); free (graph->neighbor_heads); free (graph); } /* Adds a conflict to GRAPH between regs REG1 and REG2, which must be distinct. Returns nonzero, unless the conflict is already present in GRAPH, in which case it does nothing and returns zero. */ int conflict_graph_add (conflict_graph graph, int reg1, int reg2) { int smaller = MIN (reg1, reg2); int larger = MAX (reg1, reg2); struct conflict_graph_arc_def dummy; conflict_graph_arc arc; void **slot; /* A reg cannot conflict with itself. */ if (reg1 == reg2) abort (); dummy.smaller = smaller; dummy.larger = larger; slot = htab_find_slot (graph->arc_hash_table, (void *) &dummy, INSERT); /* If the conflict is already there, do nothing. */ if (*slot != NULL) return 0; /* Allocate an arc. */ arc = obstack_alloc (&graph->arc_obstack, sizeof (struct conflict_graph_arc_def)); /* Record the reg numbers. */ arc->smaller = smaller; arc->larger = larger; /* Link the conflict into into two lists, one for each reg. */ arc->smaller_next = graph->neighbor_heads[smaller]; graph->neighbor_heads[smaller] = arc; arc->larger_next = graph->neighbor_heads[larger]; graph->neighbor_heads[larger] = arc; /* Put it in the hash table. */ *slot = (void *) arc; return 1; } /* Returns nonzero if a conflict exists in GRAPH between regs REG1 and REG2. */ int conflict_graph_conflict_p (conflict_graph graph, int reg1, int reg2) { /* Build an arc to search for. */ struct conflict_graph_arc_def arc; arc.smaller = MIN (reg1, reg2); arc.larger = MAX (reg1, reg2); return htab_find (graph->arc_hash_table, (void *) &arc) != NULL; } /* Calls ENUM_FN for each conflict in GRAPH involving REG. EXTRA is passed back to ENUM_FN. */ void conflict_graph_enum (conflict_graph graph, int reg, conflict_graph_enum_fn enum_fn, void *extra) { conflict_graph_arc arc = graph->neighbor_heads[reg]; while (arc != NULL) { /* Invoke the callback. */ if ((*enum_fn) (arc->smaller, arc->larger, extra)) /* Stop if requested. */ break; /* Which next pointer to follow depends on whether REG is the smaller or larger reg in this conflict. */ if (reg < arc->larger) arc = arc->smaller_next; else arc = arc->larger_next; } } /* For each conflict between a register x and SRC in GRAPH, adds a conflict to GRAPH between x and TARGET. */ void conflict_graph_merge_regs (conflict_graph graph, int target, int src) { conflict_graph_arc arc = graph->neighbor_heads[src]; if (target == src) return; while (arc != NULL) { int other = arc->smaller; if (other == src) other = arc->larger; conflict_graph_add (graph, target, other); /* Which next pointer to follow depends on whether REG is the smaller or larger reg in this conflict. */ if (src < arc->larger) arc = arc->smaller_next; else arc = arc->larger_next; } } /* Holds context information while a conflict graph is being traversed for printing. */ struct print_context { /* The file pointer to which we're printing. */ FILE *fp; /* The reg whose conflicts we're printing. */ int reg; /* Whether a conflict has already been printed for this reg. */ int started; }; /* Callback function when enumerating conflicts during printing. */ static int print_conflict (int reg1, int reg2, void *contextp) { struct print_context *context = (struct print_context *) contextp; int reg; /* If this is the first conflict printed for this reg, start a new line. */ if (! context->started) { fprintf (context->fp, " %d:", context->reg); context->started = 1; } /* Figure out the reg whose conflicts we're printing. The other reg is the interesting one. */ if (reg1 == context->reg) reg = reg2; else if (reg2 == context->reg) reg = reg1; else abort (); /* Print the conflict. */ fprintf (context->fp, " %d", reg); /* Continue enumerating. */ return 0; } /* Prints the conflicts in GRAPH to FP. */ void conflict_graph_print (conflict_graph graph, FILE *fp) { int reg; struct print_context context; context.fp = fp; fprintf (fp, "Conflicts:\n"); /* Loop over registers supported in this graph. */ for (reg = 0; reg < graph->num_regs; ++reg) { context.reg = reg; context.started = 0; /* Scan the conflicts for reg, printing as we go. A label for this line will be printed the first time a conflict is printed for the reg; we won't start a new line if this reg has no conflicts. */ conflict_graph_enum (graph, reg, &print_conflict, &context); /* If this reg does have conflicts, end the line. */ if (context.started) fputc ('\n', fp); } } /* Callback function for note_stores. */ static void mark_reg_conflict (rtx reg, rtx setter ATTRIBUTE_UNUSED, void *data) { regset set = (regset) data; if (GET_CODE (reg) == SUBREG) reg = SUBREG_REG (reg); /* We're only interested in regs. */ if (!REG_P (reg)) return; SET_REGNO_REG_SET (set, REGNO (reg)); } /* Allocates a conflict graph and computes conflicts over the current function for the registers set in REGS. The caller is responsible for deallocating the return value. Preconditions: the flow graph must be in SSA form, and life analysis (specifically, regs live at exit from each block) must be up-to-date. This algorithm determines conflicts by walking the insns in each block backwards. We maintain the set of live regs at each insn, starting with the regs live on exit from the block. For each insn: 1. If a reg is set in this insns, it must be born here, since we're in SSA. Therefore, it was not live before this insns, so remove it from the set of live regs. 2. For each reg born in this insn, record a conflict between it and every other reg live coming into this insn. For each existing conflict, one of the two regs must be born while the other is alive. See Morgan or elsewhere for a proof of this. 3. Regs clobbered by this insn must have been live coming into it, so record them as such. The resulting conflict graph is not built for regs in REGS themselves; rather, partition P is used to obtain the canonical reg for each of these. The nodes of the conflict graph are these canonical regs instead. */ conflict_graph conflict_graph_compute (regset regs, partition p) { conflict_graph graph = conflict_graph_new (max_reg_num ()); regset_head live_head; regset live = &live_head; regset_head born_head; regset born = &born_head; basic_block bb; INIT_REG_SET (live); INIT_REG_SET (born); FOR_EACH_BB_REVERSE (bb) { rtx insn; rtx head; /* Start with the regs that are live on exit, limited to those we're interested in. */ COPY_REG_SET (live, bb->global_live_at_end); AND_REG_SET (live, regs); /* Walk the instruction stream backwards. */ head = BB_HEAD (bb); insn = BB_END (bb); for (insn = BB_END (bb); insn != head; insn = PREV_INSN (insn)) { int born_reg; int live_reg; rtx link; /* Are we interested in this insn? */ if (INSN_P (insn)) { /* Determine which regs are set in this insn. Since we're in SSA form, if a reg is set here it isn't set anywhere else, so this insn is where the reg is born. */ CLEAR_REG_SET (born); note_stores (PATTERN (insn), mark_reg_conflict, born); AND_REG_SET (born, regs); /* Regs born here were not live before this insn. */ AND_COMPL_REG_SET (live, born); /* For every reg born here, add a conflict with every other reg live coming into this insn. */ EXECUTE_IF_SET_IN_REG_SET (born, FIRST_PSEUDO_REGISTER, born_reg, { EXECUTE_IF_SET_IN_REG_SET (live, FIRST_PSEUDO_REGISTER, live_reg, { /* Build the conflict graph in terms of canonical regnos. */ int b = partition_find (p, born_reg); int l = partition_find (p, live_reg); if (b != l) conflict_graph_add (graph, b, l); }); }); /* Morgan's algorithm checks the operands of the insn and adds them to the set of live regs. Instead, we use death information added by life analysis. Regs dead after this instruction were live before it. */ for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) if (REG_NOTE_KIND (link) == REG_DEAD) { unsigned int regno = REGNO (XEXP (link, 0)); if (REGNO_REG_SET_P (regs, regno)) SET_REGNO_REG_SET (live, regno); } } } } FREE_REG_SET (live); FREE_REG_SET (born); return graph; } /* Utility routines for data type conversion for GCC. Copyright (C) 1987, 1988, 1991, 1992, 1993, 1994, 1995, 1997, 1998, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* These routines are somewhat language-independent utility function intended to be called by the language-specific convert () functions. */ /* Convert EXPR to some pointer or reference type TYPE. EXPR must be pointer, reference, integer, enumeral, or literal zero; in other cases error is called. */ tree convert_to_pointer (tree type, tree expr) { if (integer_zerop (expr)) { expr = build_int_2 (0, 0); TREE_TYPE (expr) = type; return expr; } switch (TREE_CODE (TREE_TYPE (expr))) { case POINTER_TYPE: case REFERENCE_TYPE: return build1 (NOP_EXPR, type, expr); case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: case CHAR_TYPE: if (TYPE_PRECISION (TREE_TYPE (expr)) == POINTER_SIZE) return build1 (CONVERT_EXPR, type, expr); return convert_to_pointer (type, convert (lang_hooks.types.type_for_size (POINTER_SIZE, 0), expr)); default: error ("cannot convert to a pointer type"); return convert_to_pointer (type, integer_zero_node); } } /* Avoid any floating point extensions from EXP. */ tree strip_float_extensions (tree exp) { tree sub, expt, subt; /* For floating point constant look up the narrowest type that can hold it properly and handle it like (type)(narrowest_type)constant. This way we can optimize for instance a=a*2.0 where "a" is float but 2.0 is double constant. */ if (TREE_CODE (exp) == REAL_CST) { REAL_VALUE_TYPE orig; tree type = NULL; orig = TREE_REAL_CST (exp); if (TYPE_PRECISION (TREE_TYPE (exp)) > TYPE_PRECISION (float_type_node) && exact_real_truncate (TYPE_MODE (float_type_node), &orig)) type = float_type_node; else if (TYPE_PRECISION (TREE_TYPE (exp)) > TYPE_PRECISION (double_type_node) && exact_real_truncate (TYPE_MODE (double_type_node), &orig)) type = double_type_node; if (type) return build_real (type, real_value_truncate (TYPE_MODE (type), orig)); } if (TREE_CODE (exp) != NOP_EXPR && TREE_CODE (exp) != CONVERT_EXPR) return exp; sub = TREE_OPERAND (exp, 0); subt = TREE_TYPE (sub); expt = TREE_TYPE (exp); if (!FLOAT_TYPE_P (subt)) return exp; if (TYPE_PRECISION (subt) > TYPE_PRECISION (expt)) return exp; return strip_float_extensions (sub); } /* Convert EXPR to some floating-point type TYPE. EXPR must be float, integer, or enumeral; in other cases error is called. */ tree convert_to_real (tree type, tree expr) { enum built_in_function fcode = builtin_mathfn_code (expr); tree itype = TREE_TYPE (expr); /* Disable until we figure out how to decide whether the functions are present in runtime. */ /* Convert (float)sqrt((double)x) where x is float into sqrtf(x) */ if (optimize && (TYPE_MODE (type) == TYPE_MODE (double_type_node) || TYPE_MODE (type) == TYPE_MODE (float_type_node))) { switch (fcode) { #define CASE_MATHFN(FN) case BUILT_IN_##FN: case BUILT_IN_##FN##L: CASE_MATHFN (ACOS) CASE_MATHFN (ACOSH) CASE_MATHFN (ASIN) CASE_MATHFN (ASINH) CASE_MATHFN (ATAN) CASE_MATHFN (ATANH) CASE_MATHFN (CBRT) CASE_MATHFN (COS) CASE_MATHFN (COSH) CASE_MATHFN (ERF) CASE_MATHFN (ERFC) CASE_MATHFN (EXP) CASE_MATHFN (EXP10) CASE_MATHFN (EXP2) CASE_MATHFN (EXPM1) CASE_MATHFN (FABS) CASE_MATHFN (GAMMA) CASE_MATHFN (J0) CASE_MATHFN (J1) CASE_MATHFN (LGAMMA) CASE_MATHFN (LOG) CASE_MATHFN (LOG10) CASE_MATHFN (LOG1P) CASE_MATHFN (LOG2) CASE_MATHFN (LOGB) CASE_MATHFN (POW10) CASE_MATHFN (SIN) CASE_MATHFN (SINH) CASE_MATHFN (SQRT) CASE_MATHFN (TAN) CASE_MATHFN (TANH) CASE_MATHFN (TGAMMA) CASE_MATHFN (Y0) CASE_MATHFN (Y1) #undef CASE_MATHFN { tree arg0 = strip_float_extensions (TREE_VALUE (TREE_OPERAND (expr, 1))); tree newtype = type; /* We have (outertype)sqrt((innertype)x). Choose the wider mode from the both as the safe type for operation. */ if (TYPE_PRECISION (TREE_TYPE (arg0)) > TYPE_PRECISION (type)) newtype = TREE_TYPE (arg0); /* Be careful about integer to fp conversions. These may overflow still. */ if (FLOAT_TYPE_P (TREE_TYPE (arg0)) && TYPE_PRECISION (newtype) < TYPE_PRECISION (itype) && (TYPE_MODE (newtype) == TYPE_MODE (double_type_node) || TYPE_MODE (newtype) == TYPE_MODE (float_type_node))) { tree arglist; tree fn = mathfn_built_in (newtype, fcode); if (fn) { arglist = build_tree_list (NULL_TREE, fold (convert_to_real (newtype, arg0))); expr = build_function_call_expr (fn, arglist); if (newtype == type) return expr; } } } default: break; } } if (optimize && (((fcode == BUILT_IN_FLOORL || fcode == BUILT_IN_CEILL || fcode == BUILT_IN_ROUNDL || fcode == BUILT_IN_RINTL || fcode == BUILT_IN_TRUNCL || fcode == BUILT_IN_NEARBYINTL) && (TYPE_MODE (type) == TYPE_MODE (double_type_node) || TYPE_MODE (type) == TYPE_MODE (float_type_node))) || ((fcode == BUILT_IN_FLOOR || fcode == BUILT_IN_CEIL || fcode == BUILT_IN_ROUND || fcode == BUILT_IN_RINT || fcode == BUILT_IN_TRUNC || fcode == BUILT_IN_NEARBYINT) && (TYPE_MODE (type) == TYPE_MODE (float_type_node))))) { tree fn = mathfn_built_in (type, fcode); if (fn) { tree arg0 = strip_float_extensions (TREE_VALUE (TREE_OPERAND (expr, 1))); tree arglist = build_tree_list (NULL_TREE, fold (convert_to_real (type, arg0))); return build_function_call_expr (fn, arglist); } } /* Propagate the cast into the operation. */ if (itype != type && FLOAT_TYPE_P (type)) switch (TREE_CODE (expr)) { /* Convert (float)-x into -(float)x. This is always safe. */ case ABS_EXPR: case NEGATE_EXPR: if (TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (expr))) return build1 (TREE_CODE (expr), type, fold (convert_to_real (type, TREE_OPERAND (expr, 0)))); break; /* Convert (outertype)((innertype0)a+(innertype1)b) into ((newtype)a+(newtype)b) where newtype is the widest mode from all of these. */ case PLUS_EXPR: case MINUS_EXPR: case MULT_EXPR: case RDIV_EXPR: { tree arg0 = strip_float_extensions (TREE_OPERAND (expr, 0)); tree arg1 = strip_float_extensions (TREE_OPERAND (expr, 1)); if (FLOAT_TYPE_P (TREE_TYPE (arg0)) && FLOAT_TYPE_P (TREE_TYPE (arg1))) { tree newtype = type; if (TYPE_PRECISION (TREE_TYPE (arg0)) > TYPE_PRECISION (newtype)) newtype = TREE_TYPE (arg0); if (TYPE_PRECISION (TREE_TYPE (arg1)) > TYPE_PRECISION (newtype)) newtype = TREE_TYPE (arg1); if (TYPE_PRECISION (newtype) < TYPE_PRECISION (itype)) { expr = build (TREE_CODE (expr), newtype, fold (convert_to_real (newtype, arg0)), fold (convert_to_real (newtype, arg1))); if (newtype == type) return expr; } } } break; default: break; } switch (TREE_CODE (TREE_TYPE (expr))) { case REAL_TYPE: return build1 (flag_float_store ? CONVERT_EXPR : NOP_EXPR, type, expr); case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: case CHAR_TYPE: return build1 (FLOAT_EXPR, type, expr); case COMPLEX_TYPE: return convert (type, fold (build1 (REALPART_EXPR, TREE_TYPE (TREE_TYPE (expr)), expr))); case POINTER_TYPE: case REFERENCE_TYPE: error ("pointer value used where a floating point value was expected"); return convert_to_real (type, integer_zero_node); default: error ("aggregate value used where a float was expected"); return convert_to_real (type, integer_zero_node); } } /* Convert EXPR to some integer (or enum) type TYPE. EXPR must be pointer, integer, discrete (enum, char, or bool), float, or vector; in other cases error is called. The result of this is always supposed to be a newly created tree node not in use in any existing structure. */ tree convert_to_integer (tree type, tree expr) { enum tree_code ex_form = TREE_CODE (expr); tree intype = TREE_TYPE (expr); unsigned int inprec = TYPE_PRECISION (intype); unsigned int outprec = TYPE_PRECISION (type); /* An INTEGER_TYPE cannot be incomplete, but an ENUMERAL_TYPE can be. Consider `enum E = { a, b = (enum E) 3 };'. */ if (!COMPLETE_TYPE_P (type)) { error ("conversion to incomplete type"); return error_mark_node; } /* Convert e.g. (long)round(d) -> lround(d). */ /* If we're converting to char, we may encounter differing behavior between converting from double->char vs double->long->char. We're in "undefined" territory but we prefer to be conservative, so only proceed in "unsafe" math mode. */ if (optimize && (flag_unsafe_math_optimizations || (long_integer_type_node && outprec >= TYPE_PRECISION (long_integer_type_node)))) { tree s_expr = strip_float_extensions (expr); tree s_intype = TREE_TYPE (s_expr); const enum built_in_function fcode = builtin_mathfn_code (s_expr); tree fn = 0; switch (fcode) { case BUILT_IN_ROUND: case BUILT_IN_ROUNDF: case BUILT_IN_ROUNDL: if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (long_long_integer_type_node)) fn = mathfn_built_in (s_intype, BUILT_IN_LLROUND); else fn = mathfn_built_in (s_intype, BUILT_IN_LROUND); break; case BUILT_IN_RINT: case BUILT_IN_RINTF: case BUILT_IN_RINTL: /* Only convert rint* if we can ignore math exceptions. */ if (flag_trapping_math) break; /* ... Fall through ... */ case BUILT_IN_NEARBYINT: case BUILT_IN_NEARBYINTF: case BUILT_IN_NEARBYINTL: if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (long_long_integer_type_node)) fn = mathfn_built_in (s_intype, BUILT_IN_LLRINT); else fn = mathfn_built_in (s_intype, BUILT_IN_LRINT); break; default: break; } if (fn) { tree arglist = TREE_OPERAND (s_expr, 1); tree newexpr = build_function_call_expr (fn, arglist); return convert_to_integer (type, newexpr); } } switch (TREE_CODE (intype)) { case POINTER_TYPE: case REFERENCE_TYPE: if (integer_zerop (expr)) expr = integer_zero_node; else expr = fold (build1 (CONVERT_EXPR, lang_hooks.types.type_for_size (POINTER_SIZE, 0), expr)); return convert_to_integer (type, expr); case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: case CHAR_TYPE: /* If this is a logical operation, which just returns 0 or 1, we can change the type of the expression. For some logical operations, we must also change the types of the operands to maintain type correctness. */ if (TREE_CODE_CLASS (ex_form) == '<') { expr = copy_node (expr); TREE_TYPE (expr) = type; return expr; } else if (ex_form == TRUTH_AND_EXPR || ex_form == TRUTH_ANDIF_EXPR || ex_form == TRUTH_OR_EXPR || ex_form == TRUTH_ORIF_EXPR || ex_form == TRUTH_XOR_EXPR) { expr = copy_node (expr); TREE_OPERAND (expr, 0) = convert (type, TREE_OPERAND (expr, 0)); TREE_OPERAND (expr, 1) = convert (type, TREE_OPERAND (expr, 1)); TREE_TYPE (expr) = type; return expr; } else if (ex_form == TRUTH_NOT_EXPR) { expr = copy_node (expr); TREE_OPERAND (expr, 0) = convert (type, TREE_OPERAND (expr, 0)); TREE_TYPE (expr) = type; return expr; } /* If we are widening the type, put in an explicit conversion. Similarly if we are not changing the width. After this, we know we are truncating EXPR. */ else if (outprec >= inprec) { enum tree_code code; /* If the precision of the EXPR's type is K bits and the destination mode has more bits, and the sign is changing, it is not safe to use a NOP_EXPR. For example, suppose that EXPR's type is a 3-bit unsigned integer type, the TYPE is a 3-bit signed integer type, and the machine mode for the types is 8-bit QImode. In that case, the conversion necessitates an explicit sign-extension. In the signed-to-unsigned case the high-order bits have to be cleared. */ if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (TREE_TYPE (expr)) && (TYPE_PRECISION (TREE_TYPE (expr)) != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (expr))))) code = CONVERT_EXPR; else code = NOP_EXPR; return build1 (code, type, expr); } /* If TYPE is an enumeral type or a type with a precision less than the number of bits in its mode, do the conversion to the type corresponding to its mode, then do a nop conversion to TYPE. */ else if (TREE_CODE (type) == ENUMERAL_TYPE || outprec != GET_MODE_BITSIZE (TYPE_MODE (type))) return build1 (NOP_EXPR, type, convert (lang_hooks.types.type_for_mode (TYPE_MODE (type), TYPE_UNSIGNED (type)), expr)); /* Here detect when we can distribute the truncation down past some arithmetic. For example, if adding two longs and converting to an int, we can equally well convert both to ints and then add. For the operations handled here, such truncation distribution is always safe. It is desirable in these cases: 1) when truncating down to full-word from a larger size 2) when truncating takes no work. 3) when at least one operand of the arithmetic has been extended (as by C's default conversions). In this case we need two conversions if we do the arithmetic as already requested, so we might as well truncate both and then combine. Perhaps that way we need only one. Note that in general we cannot do the arithmetic in a type shorter than the desired result of conversion, even if the operands are both extended from a shorter type, because they might overflow if combined in that type. The exceptions to this--the times when two narrow values can be combined in their narrow type even to make a wider result--are handled by "shorten" in build_binary_op. */ switch (ex_form) { case RSHIFT_EXPR: /* We can pass truncation down through right shifting when the shift count is a nonpositive constant. */ if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST && tree_int_cst_lt (TREE_OPERAND (expr, 1), convert (TREE_TYPE (TREE_OPERAND (expr, 1)), integer_one_node))) goto trunc1; break; case LSHIFT_EXPR: /* We can pass truncation down through left shifting when the shift count is a nonnegative constant and the target type is unsigned. */ if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST && tree_int_cst_sgn (TREE_OPERAND (expr, 1)) >= 0 && TYPE_UNSIGNED (type) && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST) { /* If shift count is less than the width of the truncated type, really shift. */ if (tree_int_cst_lt (TREE_OPERAND (expr, 1), TYPE_SIZE (type))) /* In this case, shifting is like multiplication. */ goto trunc1; else { /* If it is >= that width, result is zero. Handling this with trunc1 would give the wrong result: (int) ((long long) a << 32) is well defined (as 0) but (int) a << 32 is undefined and would get a warning. */ tree t = convert_to_integer (type, integer_zero_node); /* If the original expression had side-effects, we must preserve it. */ if (TREE_SIDE_EFFECTS (expr)) return build (COMPOUND_EXPR, type, expr, t); else return t; } } break; case MAX_EXPR: case MIN_EXPR: case MULT_EXPR: { tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type); tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type); /* Don't distribute unless the output precision is at least as big as the actual inputs. Otherwise, the comparison of the truncated values will be wrong. */ if (outprec >= TYPE_PRECISION (TREE_TYPE (arg0)) && outprec >= TYPE_PRECISION (TREE_TYPE (arg1)) /* If signedness of arg0 and arg1 don't match, we can't necessarily find a type to compare them in. */ && (TYPE_UNSIGNED (TREE_TYPE (arg0)) == TYPE_UNSIGNED (TREE_TYPE (arg1)))) goto trunc1; break; } case PLUS_EXPR: case MINUS_EXPR: case BIT_AND_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: trunc1: { tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type); tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type); if (outprec >= BITS_PER_WORD || TRULY_NOOP_TRUNCATION (outprec, inprec) || inprec > TYPE_PRECISION (TREE_TYPE (arg0)) || inprec > TYPE_PRECISION (TREE_TYPE (arg1))) { /* Do the arithmetic in type TYPEX, then convert result to TYPE. */ tree typex = type; /* Can't do arithmetic in enumeral types so use an integer type that will hold the values. */ if (TREE_CODE (typex) == ENUMERAL_TYPE) typex = lang_hooks.types.type_for_size (TYPE_PRECISION (typex), TYPE_UNSIGNED (typex)); /* But now perhaps TYPEX is as wide as INPREC. In that case, do nothing special here. (Otherwise would recurse infinitely in convert. */ if (TYPE_PRECISION (typex) != inprec) { /* Don't do unsigned arithmetic where signed was wanted, or vice versa. Exception: if both of the original operands were unsigned then we can safely do the work as unsigned. Exception: shift operations take their type solely from the first argument. Exception: the LSHIFT_EXPR case above requires that we perform this operation unsigned lest we produce signed-overflow undefinedness. And we may need to do it as unsigned if we truncate to the original size. */ if (TYPE_UNSIGNED (TREE_TYPE (expr)) || (TYPE_UNSIGNED (TREE_TYPE (arg0)) && (TYPE_UNSIGNED (TREE_TYPE (arg1)) || ex_form == LSHIFT_EXPR || ex_form == RSHIFT_EXPR || ex_form == LROTATE_EXPR || ex_form == RROTATE_EXPR)) || ex_form == LSHIFT_EXPR) typex = lang_hooks.types.unsigned_type (typex); else typex = lang_hooks.types.signed_type (typex); return convert (type, fold (build (ex_form, typex, convert (typex, arg0), convert (typex, arg1)))); } } } break; case NEGATE_EXPR: case BIT_NOT_EXPR: /* This is not correct for ABS_EXPR, since we must test the sign before truncation. */ { tree typex = type; /* Can't do arithmetic in enumeral types so use an integer type that will hold the values. */ if (TREE_CODE (typex) == ENUMERAL_TYPE) typex = lang_hooks.types.type_for_size (TYPE_PRECISION (typex), TYPE_UNSIGNED (typex)); /* But now perhaps TYPEX is as wide as INPREC. In that case, do nothing special here. (Otherwise would recurse infinitely in convert. */ if (TYPE_PRECISION (typex) != inprec) { /* Don't do unsigned arithmetic where signed was wanted, or vice versa. */ if (TYPE_UNSIGNED (TREE_TYPE (expr))) typex = lang_hooks.types.unsigned_type (typex); else typex = lang_hooks.types.signed_type (typex); return convert (type, fold (build1 (ex_form, typex, convert (typex, TREE_OPERAND (expr, 0))))); } } case NOP_EXPR: /* Don't introduce a "can't convert between vector values of different size" error. */ if (TREE_CODE (TREE_TYPE (TREE_OPERAND (expr, 0))) == VECTOR_TYPE && (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_OPERAND (expr, 0)))) != GET_MODE_SIZE (TYPE_MODE (type)))) break; /* If truncating after truncating, might as well do all at once. If truncating after extending, we may get rid of wasted work. */ return convert (type, get_unwidened (TREE_OPERAND (expr, 0), type)); case COND_EXPR: /* It is sometimes worthwhile to push the narrowing down through the conditional and never loses. */ return fold (build (COND_EXPR, type, TREE_OPERAND (expr, 0), convert (type, TREE_OPERAND (expr, 1)), convert (type, TREE_OPERAND (expr, 2)))); default: break; } return build1 (CONVERT_EXPR, type, expr); case REAL_TYPE: return build1 (FIX_TRUNC_EXPR, type, expr); case COMPLEX_TYPE: return convert (type, fold (build1 (REALPART_EXPR, TREE_TYPE (TREE_TYPE (expr)), expr))); case VECTOR_TYPE: if (GET_MODE_SIZE (TYPE_MODE (type)) != GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (expr)))) { error ("can't convert between vector values of different size"); return error_mark_node; } return build1 (NOP_EXPR, type, expr); default: error ("aggregate value used where an integer was expected"); return convert (type, integer_zero_node); } } /* Convert EXPR to the complex type TYPE in the usual ways. */ tree convert_to_complex (tree type, tree expr) { tree subtype = TREE_TYPE (type); switch (TREE_CODE (TREE_TYPE (expr))) { case REAL_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: case CHAR_TYPE: return build (COMPLEX_EXPR, type, convert (subtype, expr), convert (subtype, integer_zero_node)); case COMPLEX_TYPE: { tree elt_type = TREE_TYPE (TREE_TYPE (expr)); if (TYPE_MAIN_VARIANT (elt_type) == TYPE_MAIN_VARIANT (subtype)) return expr; else if (TREE_CODE (expr) == COMPLEX_EXPR) return fold (build (COMPLEX_EXPR, type, convert (subtype, TREE_OPERAND (expr, 0)), convert (subtype, TREE_OPERAND (expr, 1)))); else { expr = save_expr (expr); return fold (build (COMPLEX_EXPR, type, convert (subtype, fold (build1 (REALPART_EXPR, TREE_TYPE (TREE_TYPE (expr)), expr))), convert (subtype, fold (build1 (IMAGPART_EXPR, TREE_TYPE (TREE_TYPE (expr)), expr))))); } } case POINTER_TYPE: case REFERENCE_TYPE: error ("pointer value used where a complex was expected"); return convert_to_complex (type, integer_zero_node); default: error ("aggregate value used where a complex was expected"); return convert_to_complex (type, integer_zero_node); } } /* Convert EXPR to the vector type TYPE in the usual ways. */ tree convert_to_vector (tree type, tree expr) { switch (TREE_CODE (TREE_TYPE (expr))) { case INTEGER_TYPE: case VECTOR_TYPE: if (GET_MODE_SIZE (TYPE_MODE (type)) != GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (expr)))) { error ("can't convert between vector values of different size"); return error_mark_node; } return build1 (NOP_EXPR, type, expr); default: error ("can't convert value to a vector"); return convert_to_vector (type, integer_zero_node); } } /* Read and write coverage files, and associated functionality. Copyright (C) 1990, 1991, 1992, 1993, 1994, 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004 Free Software Foundation, Inc. Contributed by James E. Wilson, UC Berkeley/Cygnus Support; based on some ideas from Dain Samples of UC Berkeley. Further mangling by Bob Manson, Cygnus Support. Further mangled by Nathan Sidwell, CodeSourcery This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define GCOV_LINKAGE /* coverage.h - Defines data exported from coverage.c Copyright (C) 1998, 1999, 2000, 2001, 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_COVERAGE_H #define GCC_COVERAGE_H /* File format for coverage information Copyright (C) 1996, 1997, 1998, 2000, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Bob Manson . Completely remangled by Nathan Sidwell . This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* As a special exception, if you link this library with other files, some of which are compiled with GCC, to produce an executable, this library does not by itself cause the resulting executable to be covered by the GNU General Public License. This exception does not however invalidate any other reasons why the executable file might be covered by the GNU General Public License. */ /* Coverage information is held in two files. A notes file, which is generated by the compiler, and a data file, which is generated by the program under test. Both files use a similar structure. We do not attempt to make these files backwards compatible with previous versions, as you only need coverage information when developing a program. We do hold version information, so that mismatches can be detected, and we use a format that allows tools to skip information they do not understand or are not interested in. Numbers are recorded in the 32 bit unsigned binary form of the endianness of the machine generating the file. 64 bit numbers are stored as two 32 bit numbers, the low part first. Strings are padded with 1 to 4 NUL bytes, to bring the length up to a multiple of 4. The number of 4 bytes is stored, followed by the padded string. Zero length and NULL strings are simply stored as a length of zero (they have no trailing NUL or padding). int32: byte3 byte2 byte1 byte0 | byte0 byte1 byte2 byte3 int64: int32:low int32:high string: int32:0 | int32:length char* char:0 padding padding: | char:0 | char:0 char:0 | char:0 char:0 char:0 item: int32 | int64 | string The basic format of the files is file : int32:magic int32:version int32:stamp record* The magic ident is different for the notes and the data files. The magic ident is used to determine the endianness of the file, when reading. The version is the same for both files and is derived from gcc's version number. The stamp value is used to synchronize note and data files and to synchronize merging within a data file. It need not be an absolute time stamp, merely a ticker that increments fast enough and cycles slow enough to distinguish different compile/run/compile cycles. Although the ident and version are formally 32 bit numbers, they are derived from 4 character ASCII strings. The version number consists of the single character major version number, a two character minor version number (leading zero for versions less than 10), and a single character indicating the status of the release. That will be 'e' experimental, 'p' prerelease and 'r' for release. Because, by good fortune, these are in alphabetical order, string collating can be used to compare version strings. Be aware that the 'e' designation will (naturally) be unstable and might be incompatible with itself. For gcc 3.4 experimental, it would be '304e' (0x33303465). When the major version reaches 10, the letters A-Z will be used. Assuming minor increments releases every 6 months, we have to make a major increment every 50 years. Assuming major increments releases every 5 years, we're ok for the next 155 years -- good enough for me. A record has a tag, length and variable amount of data. record: header data header: int32:tag int32:length data: item* Records are not nested, but there is a record hierarchy. Tag numbers reflect this hierarchy. Tags are unique across note and data files. Some record types have a varying amount of data. The LENGTH is the number of 4bytes that follow and is usually used to determine how much data. The tag value is split into 4 8-bit fields, one for each of four possible levels. The most significant is allocated first. Unused levels are zero. Active levels are odd-valued, so that the LSB of the level is one. A sub-level incorporates the values of its superlevels. This formatting allows you to determine the tag hierarchy, without understanding the tags themselves, and is similar to the standard section numbering used in technical documents. Level values [1..3f] are used for common tags, values [41..9f] for the notes file and [a1..ff] for the data file. The basic block graph file contains the following records note: unit function-graph* unit: header int32:checksum string:source function-graph: announce_function basic_blocks {arcs | lines}* announce_function: header int32:ident int32:checksum string:name string:source int32:lineno basic_block: header int32:flags* arcs: header int32:block_no arc* arc: int32:dest_block int32:flags lines: header int32:block_no line* int32:0 string:NULL line: int32:line_no | int32:0 string:filename The BASIC_BLOCK record holds per-bb flags. The number of blocks can be inferred from its data length. There is one ARCS record per basic block. The number of arcs from a bb is implicit from the data length. It enumerates the destination bb and per-arc flags. There is one LINES record per basic block, it enumerates the source lines which belong to that basic block. Source file names are introduced by a line number of 0, following lines are from the new source file. The initial source file for the function is NULL, but the current source file should be remembered from one LINES record to the next. The end of a block is indicated by an empty filename - this does not reset the current source file. Note there is no ordering of the ARCS and LINES records: they may be in any order, interleaved in any manner. The current filename follows the order the LINES records are stored in the file, *not* the ordering of the blocks they are for. The data file contains the following records. data: {unit function-data* summary:object summary:program*}* unit: header int32:checksum function-data: announce_function arc_counts announce_function: header int32:ident int32:checksum arc_counts: header int64:count* summary: int32:checksum {count-summary}GCOV_COUNTERS count-summary: int32:num int32:runs int64:sum int64:max int64:sum_max The ANNOUNCE_FUNCTION record is the same as that in the note file, but without the source location. The ARC_COUNTS gives the counter values for those arcs that are instrumented. The SUMMARY records give information about the whole object file and about the whole program. The checksum is used for whole program summaries, and disambiguates different programs which include the same instrumented object file. There may be several program summaries, each with a unique checksum. The object summary's checksum is zero. Note that the data file might contain information from several runs concatenated, or the data might be merged. This file is included by both the compiler, gcov tools and the runtime support library libgcov. IN_LIBGCOV and IN_GCOV are used to distinguish which case is which. If IN_LIBGCOV is nonzero, libgcov is being built. If IN_GCOV is nonzero, the gcov tools are being built. Otherwise the compiler is being built. IN_GCOV may be positive or negative. If positive, we are compiling a tool that requires additional functions (see the code for knowledge of what those functions are). */ #ifndef GCC_GCOV_IO_H #define GCC_GCOV_IO_H #if IN_LIBGCOV /* About the target */ #if BITS_PER_UNIT == 8 typedef unsigned gcov_unsigned_t __attribute__ ((mode (SI))); typedef unsigned gcov_position_t __attribute__ ((mode (SI))); #if LONG_LONG_TYPE_SIZE > 32 typedef signed gcov_type __attribute__ ((mode (DI))); #else typedef signed gcov_type __attribute__ ((mode (SI))); #endif #else #if BITS_PER_UNIT == 16 typedef unsigned gcov_unsigned_t __attribute__ ((mode (HI))); typedef unsigned gcov_position_t __attribute__ ((mode (HI))); #if LONG_LONG_TYPE_SIZE > 32 typedef signed gcov_type __attribute__ ((mode (SI))); #else typedef signed gcov_type __attribute__ ((mode (HI))); #endif #else typedef unsigned gcov_unsigned_t __attribute__ ((mode (QI))); typedef unsigned gcov_position_t __attribute__ ((mode (QI))); #if LONG_LONG_TYPE_SIZE > 32 typedef signed gcov_type __attribute__ ((mode (HI))); #else typedef signed gcov_type __attribute__ ((mode (QI))); #endif #endif #endif #if defined (TARGET_HAS_F_SETLKW) #define GCOV_LOCKED 1 #else #define GCOV_LOCKED 0 #endif #else /* !IN_LIBGCOV */ /* About the host */ typedef unsigned gcov_unsigned_t; typedef unsigned gcov_position_t; /* gcov_type is typedef'd elsewhere for the compiler */ #if IN_GCOV #define GCOV_LINKAGE static typedef HOST_WIDEST_INT gcov_type; #if IN_GCOV > 0 #include #endif #else /*!IN_GCOV */ #if LONG_LONG_TYPE_SIZE > 32 #define GCOV_TYPE_NODE intDI_type_node #else #define GCOV_TYPE_NODE intSI_type_node #endif #endif #if defined (HOST_HAS_F_SETLKW) #define GCOV_LOCKED 1 #else #define GCOV_LOCKED 0 #endif #endif /* !IN_LIBGCOV */ /* In gcov we want function linkage to be static. In libgcov we need these functions to be extern, so prefix them with __gcov. In the compiler we want it extern, so that they can be accessed from elsewhere. */ #if IN_LIBGCOV #define gcov_var __gcov_var #define gcov_open __gcov_open #define gcov_close __gcov_close #define gcov_write_tag_length __gcov_write_tag_length #define gcov_position __gcov_position #define gcov_seek __gcov_seek #define gcov_rewrite __gcov_rewrite #define gcov_is_error __gcov_is_error #define gcov_is_eof __gcov_is_eof #define gcov_write_unsigned __gcov_write_unsigned #define gcov_write_counter __gcov_write_counter #define gcov_write_summary __gcov_write_summary #define gcov_read_unsigned __gcov_read_unsigned #define gcov_read_counter __gcov_read_counter #define gcov_read_summary __gcov_read_summary /* Poison these, so they don't accidentally slip in. */ #pragma GCC poison gcov_write_string gcov_write_tag gcov_write_length #pragma GCC poison gcov_read_string gcov_sync gcov_time gcov_magic #endif #ifndef GCOV_LINKAGE #define GCOV_LINKAGE extern #endif /* File suffixes. */ #define GCOV_DATA_SUFFIX ".gcda" #define GCOV_NOTE_SUFFIX ".gcno" /* File magic. Must not be palindromes. */ #define GCOV_DATA_MAGIC ((gcov_unsigned_t)0x67636461) /* "gcda" */ #define GCOV_NOTE_MAGIC ((gcov_unsigned_t)0x67636e6f) /* "gcno" */ /* gcov-iov.h is automatically generated by the makefile from version.c, it looks like #define GCOV_VERSION ((gcov_unsigned_t)0x89abcdef) */ /* Generated automatically by the program `./gcov-iov' from `3.5.0 20040706 (experimental)'. */ #define GCOV_VERSION ((gcov_unsigned_t)0x33303565) /* 305e */ /* Convert a magic or version number to a 4 character string. */ #define GCOV_UNSIGNED2STRING(ARRAY,VALUE) \ ((ARRAY)[0] = (char)((VALUE) >> 24), \ (ARRAY)[1] = (char)((VALUE) >> 16), \ (ARRAY)[2] = (char)((VALUE) >> 8), \ (ARRAY)[3] = (char)((VALUE) >> 0)) /* The record tags. Values [1..3f] are for tags which may be in either file. Values [41..9f] for those in the note file and [a1..ff] for the data file. */ #define GCOV_TAG_FUNCTION ((gcov_unsigned_t)0x01000000) #define GCOV_TAG_FUNCTION_LENGTH (2) #define GCOV_TAG_BLOCKS ((gcov_unsigned_t)0x01410000) #define GCOV_TAG_BLOCKS_LENGTH(NUM) (NUM) #define GCOV_TAG_BLOCKS_NUM(LENGTH) (LENGTH) #define GCOV_TAG_ARCS ((gcov_unsigned_t)0x01430000) #define GCOV_TAG_ARCS_LENGTH(NUM) (1 + (NUM) * 2) #define GCOV_TAG_ARCS_NUM(LENGTH) (((LENGTH) - 1) / 2) #define GCOV_TAG_LINES ((gcov_unsigned_t)0x01450000) #define GCOV_TAG_COUNTER_BASE ((gcov_unsigned_t)0x01a10000) #define GCOV_TAG_COUNTER_LENGTH(NUM) ((NUM) * 2) #define GCOV_TAG_COUNTER_NUM(LENGTH) ((LENGTH) / 2) #define GCOV_TAG_OBJECT_SUMMARY ((gcov_unsigned_t)0xa1000000) #define GCOV_TAG_PROGRAM_SUMMARY ((gcov_unsigned_t)0xa3000000) #define GCOV_TAG_SUMMARY_LENGTH \ (1 + GCOV_COUNTERS_SUMMABLE * (2 + 3 * 2)) /* Counters that are collected. */ #define GCOV_COUNTER_ARCS 0 /* Arc transitions. */ #define GCOV_COUNTERS_SUMMABLE 1 /* Counters which can be summaried. */ #define GCOV_FIRST_VALUE_COUNTER 1 /* The first of counters used for value profiling. They must form a consecutive interval and their order must match the order of HIST_TYPEs in value-prof.h. */ #define GCOV_COUNTER_V_INTERVAL 1 /* Histogram of value inside an interval. */ #define GCOV_COUNTER_V_POW2 2 /* Histogram of exact power2 logarithm of a value. */ #define GCOV_COUNTER_V_SINGLE 3 /* The most common value of expression. */ #define GCOV_COUNTER_V_DELTA 4 /* The most common difference between consecutive values of expression. */ #define GCOV_LAST_VALUE_COUNTER 4 /* The last of counters used for value profiling. */ #define GCOV_COUNTERS 5 /* Number of counters used for value profiling. */ #define GCOV_N_VALUE_COUNTERS \ (GCOV_LAST_VALUE_COUNTER - GCOV_FIRST_VALUE_COUNTER + 1) /* A list of human readable names of the counters */ #define GCOV_COUNTER_NAMES {"arcs", "interval", "pow2", "single", "delta"} /* Names of merge functions for counters. */ #define GCOV_MERGE_FUNCTIONS {"__gcov_merge_add", \ "__gcov_merge_add", \ "__gcov_merge_add", \ "__gcov_merge_single", \ "__gcov_merge_delta"} /* Convert a counter index to a tag. */ #define GCOV_TAG_FOR_COUNTER(COUNT) \ (GCOV_TAG_COUNTER_BASE + ((gcov_unsigned_t)(COUNT) << 17)) /* Convert a tag to a counter. */ #define GCOV_COUNTER_FOR_TAG(TAG) \ ((unsigned)(((TAG) - GCOV_TAG_COUNTER_BASE) >> 17)) /* Check whether a tag is a counter tag. */ #define GCOV_TAG_IS_COUNTER(TAG) \ (!((TAG) & 0xFFFF) && GCOV_COUNTER_FOR_TAG (TAG) < GCOV_COUNTERS) /* The tag level mask has 1's in the position of the inner levels, & the lsb of the current level, and zero on the current and outer levels. */ #define GCOV_TAG_MASK(TAG) (((TAG) - 1) ^ (TAG)) /* Return nonzero if SUB is an immediate subtag of TAG. */ #define GCOV_TAG_IS_SUBTAG(TAG,SUB) \ (GCOV_TAG_MASK (TAG) >> 8 == GCOV_TAG_MASK (SUB) \ && !(((SUB) ^ (TAG)) & ~GCOV_TAG_MASK(TAG))) /* Return nonzero if SUB is at a sublevel to TAG. */ #define GCOV_TAG_IS_SUBLEVEL(TAG,SUB) \ (GCOV_TAG_MASK (TAG) > GCOV_TAG_MASK (SUB)) /* Basic block flags. */ #define GCOV_BLOCK_UNEXPECTED (1 << 1) /* Arc flags. */ #define GCOV_ARC_ON_TREE (1 << 0) #define GCOV_ARC_FAKE (1 << 1) #define GCOV_ARC_FALLTHROUGH (1 << 2) /* Structured records. */ /* Cumulative counter data. */ struct gcov_ctr_summary { gcov_unsigned_t num; /* number of counters. */ gcov_unsigned_t runs; /* number of program runs */ gcov_type sum_all; /* sum of all counters accumulated. */ gcov_type run_max; /* maximum value on a single run. */ gcov_type sum_max; /* sum of individual run max values. */ }; /* Object & program summary record. */ struct gcov_summary { gcov_unsigned_t checksum; /* checksum of program */ struct gcov_ctr_summary ctrs[GCOV_COUNTERS_SUMMABLE]; }; /* Structures embedded in coveraged program. The structures generated by write_profile must match these. */ #if IN_LIBGCOV /* Information about a single function. This uses the trailing array idiom. The number of counters is determined from the counter_mask in gcov_info. We hold an array of function info, so have to explicitly calculate the correct array stride. */ struct gcov_fn_info { gcov_unsigned_t ident; /* unique ident of function */ gcov_unsigned_t checksum; /* function checksum */ unsigned n_ctrs[0]; /* instrumented counters */ }; /* Type of function used to merge counters. */ typedef void (*gcov_merge_fn) (gcov_type *, gcov_unsigned_t); /* Information about counters. */ struct gcov_ctr_info { gcov_unsigned_t num; /* number of counters. */ gcov_type *values; /* their values. */ gcov_merge_fn merge; /* The function used to merge them. */ }; /* Information about a single object file. */ struct gcov_info { gcov_unsigned_t version; /* expected version number */ struct gcov_info *next; /* link to next, used by libgcov */ gcov_unsigned_t stamp; /* uniquifying time stamp */ const char *filename; /* output file name */ unsigned n_functions; /* number of functions */ const struct gcov_fn_info *functions; /* table of functions */ unsigned ctr_mask; /* mask of counters instrumented. */ struct gcov_ctr_info counts[0]; /* count data. The number of bits set in the ctr_mask field determines how big this array is. */ }; /* Register a new object file module. */ extern void __gcov_init (struct gcov_info *); /* Called before fork, to avoid double counting. */ extern void __gcov_flush (void); /* The merge function that just sums the counters. */ extern void __gcov_merge_add (gcov_type *, unsigned); /* The merge function to choose the most common value. */ extern void __gcov_merge_single (gcov_type *, unsigned); /* The merge function to choose the most common difference between consecutive values. */ extern void __gcov_merge_delta (gcov_type *, unsigned); #ifndef inhibit_libc /* The wrappers around some library functions.. */ extern pid_t __gcov_fork (void); extern int __gcov_execl (const char *, const char *, ...); extern int __gcov_execlp (const char *, const char *, ...); extern int __gcov_execle (const char *, const char *, ...); extern int __gcov_execv (const char *, char *const []); extern int __gcov_execvp (const char *, char *const []); extern int __gcov_execve (const char *, char *const [], char *const []); #endif #endif /* IN_LIBGCOV */ #if IN_LIBGCOV >= 0 /* Optimum number of gcov_unsigned_t's read from or written to disk. */ #define GCOV_BLOCK_SIZE (1 << 10) GCOV_LINKAGE struct gcov_var { FILE *file; gcov_position_t start; /* Position of first byte of block */ unsigned offset; /* Read/write position within the block. */ unsigned length; /* Read limit in the block. */ unsigned overread; /* Number of words overread. */ int error; /* < 0 overflow, > 0 disk error. */ int mode; /* < 0 writing, > 0 reading */ #if IN_LIBGCOV /* Holds one block plus 4 bytes, thus all coverage reads & writes fit within this buffer and we always can transfer GCOV_BLOCK_SIZE to and from the disk. libgcov never backtracks and only writes 4 or 8 byte objects. */ gcov_unsigned_t buffer[GCOV_BLOCK_SIZE + 1]; #else int endian; /* Swap endianness. */ /* Holds a variable length block, as the compiler can write strings and needs to backtrack. */ size_t alloc; gcov_unsigned_t *buffer; #endif } gcov_var; /* Functions for reading and writing gcov files. In libgcov you can open the file for reading then writing. Elsewhere you can open the file either for reading or for writing. When reading a file you may use the gcov_read_* functions, gcov_sync, gcov_position, & gcov_error. When writing a file you may use the gcov_write functions, gcov_seek & gcov_error. When a file is to be rewritten you use the functions for reading, then gcov_rewrite then the functions for writing. Your file may become corrupted if you break these invariants. */ #if IN_LIBGCOV GCOV_LINKAGE int gcov_open (const char */*name*/); #else GCOV_LINKAGE int gcov_open (const char */*name*/, int /*direction*/); GCOV_LINKAGE int gcov_magic (gcov_unsigned_t, gcov_unsigned_t); #endif GCOV_LINKAGE int gcov_close (void); /* Available everywhere. */ static gcov_position_t gcov_position (void); static int gcov_is_error (void); static int gcov_is_eof (void); GCOV_LINKAGE gcov_unsigned_t gcov_read_unsigned (void); GCOV_LINKAGE gcov_type gcov_read_counter (void); GCOV_LINKAGE void gcov_read_summary (struct gcov_summary *); #if IN_LIBGCOV /* Available only in libgcov */ GCOV_LINKAGE void gcov_write_counter (gcov_type); GCOV_LINKAGE void gcov_write_tag_length (gcov_unsigned_t, gcov_unsigned_t); GCOV_LINKAGE void gcov_write_summary (gcov_unsigned_t /*tag*/, const struct gcov_summary *); static void gcov_truncate (void); static void gcov_rewrite (void); GCOV_LINKAGE void gcov_seek (gcov_position_t /*position*/); #else /* Available outside libgcov */ GCOV_LINKAGE const char *gcov_read_string (void); GCOV_LINKAGE void gcov_sync (gcov_position_t /*base*/, gcov_unsigned_t /*length */); #endif #if !IN_GCOV /* Available outside gcov */ GCOV_LINKAGE void gcov_write_unsigned (gcov_unsigned_t); #endif #if !IN_GCOV && !IN_LIBGCOV /* Available only in compiler */ GCOV_LINKAGE void gcov_write_string (const char *); GCOV_LINKAGE gcov_position_t gcov_write_tag (gcov_unsigned_t); GCOV_LINKAGE void gcov_write_length (gcov_position_t /*position*/); #endif #if IN_GCOV > 0 /* Available in gcov */ GCOV_LINKAGE time_t gcov_time (void); #endif /* Make sure the library is used correctly. */ #if ENABLE_CHECKING #define GCOV_CHECK(expr) ((expr) ? (void)0 : (void)abort ()) #else #define GCOV_CHECK(expr) #endif #define GCOV_CHECK_READING() GCOV_CHECK(gcov_var.mode > 0) #define GCOV_CHECK_WRITING() GCOV_CHECK(gcov_var.mode < 0) /* Save the current position in the gcov file. */ static inline gcov_position_t gcov_position (void) { GCOV_CHECK_READING (); return gcov_var.start + gcov_var.offset; } /* Return nonzero if we read to end of file. */ static inline int gcov_is_eof (void) { return !gcov_var.overread; } /* Return nonzero if the error flag is set. */ static inline int gcov_is_error (void) { return gcov_var.file ? gcov_var.error : 1; } #if IN_LIBGCOV /* Move to beginning of file and initialize for writing. */ static inline void gcov_rewrite (void) { GCOV_CHECK_READING (); gcov_var.mode = -1; gcov_var.start = 0; gcov_var.offset = 0; fseek (gcov_var.file, 0L, SEEK_SET); } #ifdef __MINGW32__ #define ftruncate _chsize #endif static inline void gcov_truncate (void) { ftruncate (fileno (gcov_var.file), 0L); } #endif #endif /* IN_LIBGCOV >= 0 */ #endif /* GCC_GCOV_IO_H */ extern void coverage_init (const char *); extern void coverage_finish (void); extern void coverage_read_counts_file (void); /* Complete the coverage information for the current function. Once per function. */ extern void coverage_end_function (void); /* Start outputting coverage information for the current function. Repeatable per function. */ extern int coverage_begin_output (void); /* Allocate some counters. Repeatable per function. */ extern int coverage_counter_alloc (unsigned /*counter*/, unsigned/*num*/); /* Use a counter from the most recent allocation. */ extern rtx rtl_coverage_counter_ref (unsigned /*counter*/, unsigned/*num*/); /* Use a counter from the most recent allocation. */ extern tree tree_coverage_counter_ref (unsigned /*counter*/, unsigned/*num*/); /* Get all the counters for the current function. */ extern gcov_type *get_coverage_counts (unsigned /*counter*/, unsigned /*expected*/, const struct gcov_ctr_summary **); #endif /* File format for coverage information Copyright (C) 1996, 1997, 1998, 2000, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Bob Manson . Completely remangled by Nathan Sidwell . This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Routines declared in gcov-io.h. This file should be #included by another source file, after having #included gcov-io.h. */ #if !IN_GCOV static void gcov_write_block (unsigned); static gcov_unsigned_t *gcov_write_words (unsigned); #endif static const gcov_unsigned_t *gcov_read_words (unsigned); #if !IN_LIBGCOV static void gcov_allocate (unsigned); #endif static inline gcov_unsigned_t from_file (gcov_unsigned_t value) { #if !IN_LIBGCOV if (gcov_var.endian) { value = (value >> 16) | (value << 16); value = ((value & 0xff00ff) << 8) | ((value >> 8) & 0xff00ff); } #endif return value; } /* Open a gcov file. NAME is the name of the file to open and MODE indicates whether a new file should be created, or an existing file opened for modification. If MODE is >= 0 an existing file will be opened, if possible, and if MODE is <= 0, a new file will be created. Use MODE=0 to attempt to reopen an existing file and then fall back on creating a new one. Return zero on failure, >0 on opening an existing file and <0 on creating a new one. */ GCOV_LINKAGE int #if IN_LIBGCOV gcov_open (const char *name) #else gcov_open (const char *name, int mode) #endif { #if IN_LIBGCOV const int mode = 0; #endif #if GCOV_LOCKED struct flock s_flock; int fd; s_flock.l_type = F_WRLCK; s_flock.l_whence = SEEK_SET; s_flock.l_start = 0; s_flock.l_len = 0; /* Until EOF. */ s_flock.l_pid = getpid (); #endif if (gcov_var.file) abort (); gcov_var.start = 0; gcov_var.offset = gcov_var.length = 0; gcov_var.overread = -1u; gcov_var.error = 0; #if !IN_LIBGCOV gcov_var.endian = 0; #endif #if GCOV_LOCKED if (mode > 0) fd = open (name, O_RDWR); else fd = open (name, O_RDWR | O_CREAT, 0666); if (fd < 0) return 0; while (fcntl (fd, F_SETLKW, &s_flock) && errno == EINTR) continue; gcov_var.file = fdopen (fd, "r+b"); if (!gcov_var.file) { close (fd); return 0; } if (mode > 0) gcov_var.mode = 1; else if (mode == 0) { struct stat st; if (fstat (fd, &st) < 0) { fclose (gcov_var.file); gcov_var.file = 0; return 0; } if (st.st_size != 0) gcov_var.mode = 1; else gcov_var.mode = mode * 2 + 1; } else gcov_var.mode = mode * 2 + 1; #else if (mode >= 0) gcov_var.file = fopen (name, "r+b"); if (gcov_var.file) gcov_var.mode = 1; else if (mode <= 0) { gcov_var.file = fopen (name, "w+b"); if (gcov_var.file) gcov_var.mode = mode * 2 + 1; } if (!gcov_var.file) return 0; #endif setbuf (gcov_var.file, (char *)0); return 1; } /* Close the current gcov file. Flushes data to disk. Returns nonzero on failure or error flag set. */ GCOV_LINKAGE int gcov_close (void) { if (gcov_var.file) { #if !IN_GCOV if (gcov_var.offset && gcov_var.mode < 0) gcov_write_block (gcov_var.offset); #endif fclose (gcov_var.file); gcov_var.file = 0; gcov_var.length = 0; } #if !IN_LIBGCOV free (gcov_var.buffer); gcov_var.alloc = 0; gcov_var.buffer = 0; #endif gcov_var.mode = 0; return gcov_var.error; } #if !IN_LIBGCOV /* Check if MAGIC is EXPECTED. Use it to determine endianness of the file. Returns +1 for same endian, -1 for other endian and zero for not EXPECTED. */ GCOV_LINKAGE int gcov_magic (gcov_unsigned_t magic, gcov_unsigned_t expected) { if (magic == expected) return 1; magic = (magic >> 16) | (magic << 16); magic = ((magic & 0xff00ff) << 8) | ((magic >> 8) & 0xff00ff); if (magic == expected) { gcov_var.endian = 1; return -1; } return 0; } #endif #if !IN_LIBGCOV static void gcov_allocate (unsigned length) { size_t new_size = gcov_var.alloc; if (!new_size) new_size = GCOV_BLOCK_SIZE; new_size += length; new_size *= 2; gcov_var.alloc = new_size; gcov_var.buffer = xrealloc (gcov_var.buffer, new_size << 2); } #endif #if !IN_GCOV /* Write out the current block, if needs be. */ static void gcov_write_block (unsigned size) { if (fwrite (gcov_var.buffer, size << 2, 1, gcov_var.file) != 1) gcov_var.error = 1; gcov_var.start += size; gcov_var.offset -= size; } /* Allocate space to write BYTES bytes to the gcov file. Return a pointer to those bytes, or NULL on failure. */ static gcov_unsigned_t * gcov_write_words (unsigned words) { gcov_unsigned_t *result; GCOV_CHECK_WRITING (); #if IN_LIBGCOV if (gcov_var.offset >= GCOV_BLOCK_SIZE) { gcov_write_block (GCOV_BLOCK_SIZE); if (gcov_var.offset) { GCOV_CHECK (gcov_var.offset == 1); memcpy (gcov_var.buffer, gcov_var.buffer + GCOV_BLOCK_SIZE, 4); } } #else if (gcov_var.offset + words > gcov_var.alloc) gcov_allocate (gcov_var.offset + words); #endif result = &gcov_var.buffer[gcov_var.offset]; gcov_var.offset += words; return result; } /* Write unsigned VALUE to coverage file. Sets error flag appropriately. */ GCOV_LINKAGE void gcov_write_unsigned (gcov_unsigned_t value) { gcov_unsigned_t *buffer = gcov_write_words (1); buffer[0] = value; } /* Write counter VALUE to coverage file. Sets error flag appropriately. */ #if IN_LIBGCOV GCOV_LINKAGE void gcov_write_counter (gcov_type value) { gcov_unsigned_t *buffer = gcov_write_words (2); buffer[0] = (gcov_unsigned_t) value; if (sizeof (value) > sizeof (gcov_unsigned_t)) buffer[1] = (gcov_unsigned_t) (value >> 32); else buffer[1] = 0; if (value < 0) gcov_var.error = -1; } #endif /* IN_LIBGCOV */ #if !IN_LIBGCOV /* Write STRING to coverage file. Sets error flag on file error, overflow flag on overflow */ GCOV_LINKAGE void gcov_write_string (const char *string) { unsigned length = 0; unsigned alloc = 0; gcov_unsigned_t *buffer; if (string) { length = strlen (string); alloc = (length + 4) >> 2; } buffer = gcov_write_words (1 + alloc); buffer[0] = alloc; buffer[alloc] = 0; memcpy (&buffer[1], string, length); } #endif #if !IN_LIBGCOV /* Write a tag TAG and reserve space for the record length. Return a value to be used for gcov_write_length. */ GCOV_LINKAGE gcov_position_t gcov_write_tag (gcov_unsigned_t tag) { gcov_position_t result = gcov_var.start + gcov_var.offset; gcov_unsigned_t *buffer = gcov_write_words (2); buffer[0] = tag; buffer[1] = 0; return result; } /* Write a record length using POSITION, which was returned by gcov_write_tag. The current file position is the end of the record, and is restored before returning. Returns nonzero on overflow. */ GCOV_LINKAGE void gcov_write_length (gcov_position_t position) { unsigned offset; gcov_unsigned_t length; gcov_unsigned_t *buffer; GCOV_CHECK_WRITING (); GCOV_CHECK (position + 2 <= gcov_var.start + gcov_var.offset); GCOV_CHECK (position >= gcov_var.start); offset = position - gcov_var.start; length = gcov_var.offset - offset - 2; buffer = (gcov_unsigned_t *) &gcov_var.buffer[offset]; buffer[1] = length; if (gcov_var.offset >= GCOV_BLOCK_SIZE) gcov_write_block (gcov_var.offset); } #else /* IN_LIBGCOV */ /* Write a tag TAG and length LENGTH. */ GCOV_LINKAGE void gcov_write_tag_length (gcov_unsigned_t tag, gcov_unsigned_t length) { gcov_unsigned_t *buffer = gcov_write_words (2); buffer[0] = tag; buffer[1] = length; } /* Write a summary structure to the gcov file. Return nonzero on overflow. */ GCOV_LINKAGE void gcov_write_summary (gcov_unsigned_t tag, const struct gcov_summary *summary) { unsigned ix; const struct gcov_ctr_summary *csum; gcov_write_tag_length (tag, GCOV_TAG_SUMMARY_LENGTH); gcov_write_unsigned (summary->checksum); for (csum = summary->ctrs, ix = GCOV_COUNTERS_SUMMABLE; ix--; csum++) { gcov_write_unsigned (csum->num); gcov_write_unsigned (csum->runs); gcov_write_counter (csum->sum_all); gcov_write_counter (csum->run_max); gcov_write_counter (csum->sum_max); } } #endif /* IN_LIBGCOV */ #endif /*!IN_GCOV */ /* Return a pointer to read BYTES bytes from the gcov file. Returns NULL on failure (read past EOF). */ static const gcov_unsigned_t * gcov_read_words (unsigned words) { const gcov_unsigned_t *result; unsigned excess = gcov_var.length - gcov_var.offset; GCOV_CHECK_READING (); if (excess < words) { gcov_var.start += gcov_var.offset; #if IN_LIBGCOV if (excess) { GCOV_CHECK (excess == 1); memcpy (gcov_var.buffer, gcov_var.buffer + gcov_var.offset, 4); } #else memmove (gcov_var.buffer, gcov_var.buffer + gcov_var.offset, excess * 4); #endif gcov_var.offset = 0; gcov_var.length = excess; #if IN_LIBGCOV GCOV_CHECK (!gcov_var.length || gcov_var.length == 1); excess = GCOV_BLOCK_SIZE; #else if (gcov_var.length + words > gcov_var.alloc) gcov_allocate (gcov_var.length + words); excess = gcov_var.alloc - gcov_var.length; #endif excess = fread (gcov_var.buffer + gcov_var.length, 1, excess << 2, gcov_var.file) >> 2; gcov_var.length += excess; if (gcov_var.length < words) { gcov_var.overread += words - gcov_var.length; gcov_var.length = 0; return 0; } } result = &gcov_var.buffer[gcov_var.offset]; gcov_var.offset += words; return result; } /* Read unsigned value from a coverage file. Sets error flag on file error, overflow flag on overflow */ GCOV_LINKAGE gcov_unsigned_t gcov_read_unsigned (void) { gcov_unsigned_t value; const gcov_unsigned_t *buffer = gcov_read_words (1); if (!buffer) return 0; value = from_file (buffer[0]); return value; } /* Read counter value from a coverage file. Sets error flag on file error, overflow flag on overflow */ GCOV_LINKAGE gcov_type gcov_read_counter (void) { gcov_type value; const gcov_unsigned_t *buffer = gcov_read_words (2); if (!buffer) return 0; value = from_file (buffer[0]); if (sizeof (value) > sizeof (gcov_unsigned_t)) value |= ((gcov_type) from_file (buffer[1])) << 32; else if (buffer[1]) gcov_var.error = -1; if (value < 0) gcov_var.error = -1; return value; } /* Read string from coverage file. Returns a pointer to a static buffer, or NULL on empty string. You must copy the string before calling another gcov function. */ #if !IN_LIBGCOV GCOV_LINKAGE const char * gcov_read_string (void) { unsigned length = gcov_read_unsigned (); if (!length) return 0; return (const char *) gcov_read_words (length); } #endif GCOV_LINKAGE void gcov_read_summary (struct gcov_summary *summary) { unsigned ix; struct gcov_ctr_summary *csum; summary->checksum = gcov_read_unsigned (); for (csum = summary->ctrs, ix = GCOV_COUNTERS_SUMMABLE; ix--; csum++) { csum->num = gcov_read_unsigned (); csum->runs = gcov_read_unsigned (); csum->sum_all = gcov_read_counter (); csum->run_max = gcov_read_counter (); csum->sum_max = gcov_read_counter (); } } #if !IN_LIBGCOV /* Reset to a known position. BASE should have been obtained from gcov_position, LENGTH should be a record length. */ GCOV_LINKAGE void gcov_sync (gcov_position_t base, gcov_unsigned_t length) { GCOV_CHECK_READING (); base += length; if (base - gcov_var.start <= gcov_var.length) gcov_var.offset = base - gcov_var.start; else { gcov_var.offset = gcov_var.length = 0; fseek (gcov_var.file, base << 2, SEEK_SET); gcov_var.start = ftell (gcov_var.file) >> 2; } } #endif #if IN_LIBGCOV /* Move to the a set position in a gcov file. BASE is zero to move to the end, and nonzero to move to that position. */ GCOV_LINKAGE void gcov_seek (gcov_position_t base) { GCOV_CHECK_WRITING (); if (gcov_var.offset) gcov_write_block (gcov_var.offset); fseek (gcov_var.file, base << 2, base ? SEEK_SET : SEEK_END); gcov_var.start = ftell (gcov_var.file) >> 2; } #endif #if IN_GCOV > 0 /* Return the modification time of the current gcov file. */ GCOV_LINKAGE time_t gcov_time (void) { struct stat status; if (fstat (fileno (gcov_var.file), &status)) return 0; else return status.st_mtime; } #endif /* IN_GCOV */ struct function_list { struct function_list *next; /* next function */ unsigned ident; /* function ident */ unsigned checksum; /* function checksum */ unsigned n_ctrs[GCOV_COUNTERS];/* number of counters. */ }; /* Counts information for a function. */ typedef struct counts_entry { /* We hash by */ unsigned ident; unsigned ctr; /* Store */ unsigned checksum; gcov_type *counts; struct gcov_ctr_summary summary; /* Workspace */ struct counts_entry *chain; } counts_entry_t; static struct function_list *functions_head = 0; static struct function_list **functions_tail = &functions_head; static unsigned no_coverage = 0; /* Cumulative counter information for whole program. */ static unsigned prg_ctr_mask; /* Mask of counter types generated. */ static unsigned prg_n_ctrs[GCOV_COUNTERS]; /* Total counters allocated. */ /* Counter information for current function. */ static unsigned fn_ctr_mask; /* Mask of counters used. */ static unsigned fn_n_ctrs[GCOV_COUNTERS]; /* Counters allocated. */ static unsigned fn_b_ctrs[GCOV_COUNTERS]; /* Allocation base. */ /* Name of the output file for coverage output file. */ static char *bbg_file_name; static unsigned bbg_file_opened; static int bbg_function_announced; /* Name of the count data file. */ static char *da_file_name; /* Hash table of count data. */ static htab_t counts_hash = NULL; /* Trees representing the counter table arrays. */ static GTY(()) tree tree_ctr_tables[GCOV_COUNTERS]; /* The names of the counter tables. Not used if we're generating counters at tree level. */ static GTY(()) rtx ctr_labels[GCOV_COUNTERS]; /* The names of merge functions for counters. */ static const char *const ctr_merge_functions[GCOV_COUNTERS] = GCOV_MERGE_FUNCTIONS; static const char *const ctr_names[GCOV_COUNTERS] = GCOV_COUNTER_NAMES; /* Forward declarations. */ static hashval_t htab_counts_entry_hash (const void *); static int htab_counts_entry_eq (const void *, const void *); static void htab_counts_entry_del (void *); static void read_counts_file (void); static unsigned compute_checksum (void); static unsigned coverage_checksum_string (unsigned, const char *); static tree build_fn_info_type (unsigned); static tree build_fn_info_value (const struct function_list *, tree); static tree build_ctr_info_type (void); static tree build_ctr_info_value (unsigned, tree); static tree build_gcov_info (void); static void create_coverage (void); static hashval_t htab_counts_entry_hash (const void *of) { const counts_entry_t *entry = of; return entry->ident * GCOV_COUNTERS + entry->ctr; } static int htab_counts_entry_eq (const void *of1, const void *of2) { const counts_entry_t *entry1 = of1; const counts_entry_t *entry2 = of2; return entry1->ident == entry2->ident && entry1->ctr == entry2->ctr; } static void htab_counts_entry_del (void *of) { counts_entry_t *entry = of; free (entry->counts); free (entry); } /* Read in the counts file, if available. */ static void read_counts_file (void) { gcov_unsigned_t fn_ident = 0; gcov_unsigned_t checksum = -1; counts_entry_t *summaried = NULL; unsigned seen_summary = 0; gcov_unsigned_t tag; int is_error = 0; if (!gcov_open (da_file_name, 1)) return; if (!gcov_magic (gcov_read_unsigned (), GCOV_DATA_MAGIC)) { warning ("`%s' is not a gcov data file", da_file_name); gcov_close (); return; } else if ((tag = gcov_read_unsigned ()) != GCOV_VERSION) { char v[4], e[4]; GCOV_UNSIGNED2STRING (v, tag); GCOV_UNSIGNED2STRING (e, GCOV_VERSION); warning ("`%s' is version `%.*s', expected version `%.*s'", da_file_name, 4, v, 4, e); gcov_close (); return; } /* Read and discard the stamp. */ gcov_read_unsigned (); counts_hash = htab_create (10, htab_counts_entry_hash, htab_counts_entry_eq, htab_counts_entry_del); while ((tag = gcov_read_unsigned ())) { gcov_unsigned_t length; gcov_position_t offset; length = gcov_read_unsigned (); offset = gcov_position (); if (tag == GCOV_TAG_FUNCTION) { fn_ident = gcov_read_unsigned (); checksum = gcov_read_unsigned (); if (seen_summary) { /* We have already seen a summary, this means that this new function begins a new set of program runs. We must unlink the summaried chain. */ counts_entry_t *entry, *chain; for (entry = summaried; entry; entry = chain) { chain = entry->chain; entry->chain = NULL; } summaried = NULL; seen_summary = 0; } } else if (tag == GCOV_TAG_PROGRAM_SUMMARY) { counts_entry_t *entry; struct gcov_summary summary; gcov_read_summary (&summary); seen_summary = 1; for (entry = summaried; entry; entry = entry->chain) { struct gcov_ctr_summary *csum = &summary.ctrs[entry->ctr]; entry->summary.runs += csum->runs; entry->summary.sum_all += csum->sum_all; if (entry->summary.run_max < csum->run_max) entry->summary.run_max = csum->run_max; entry->summary.sum_max += csum->sum_max; } } else if (GCOV_TAG_IS_COUNTER (tag) && fn_ident) { counts_entry_t **slot, *entry, elt; unsigned n_counts = GCOV_TAG_COUNTER_NUM (length); unsigned ix; elt.ident = fn_ident; elt.ctr = GCOV_COUNTER_FOR_TAG (tag); slot = (counts_entry_t **) htab_find_slot (counts_hash, &elt, INSERT); entry = *slot; if (!entry) { *slot = entry = xcalloc (1, sizeof (counts_entry_t)); entry->ident = elt.ident; entry->ctr = elt.ctr; entry->checksum = checksum; entry->summary.num = n_counts; entry->counts = xcalloc (n_counts, sizeof (gcov_type)); } else if (entry->checksum != checksum) { error ("coverage mismatch for function %u while reading execution counters.", fn_ident); error ("checksum is %x instead of %x", entry->checksum, checksum); htab_delete (counts_hash); break; } else if (entry->summary.num != n_counts) { error ("coverage mismatch for function %u while reading execution counters.", fn_ident); error ("number of counters is %d instead of %d", entry->summary.num, n_counts); htab_delete (counts_hash); break; } else if (elt.ctr >= GCOV_COUNTERS_SUMMABLE) { error ("cannot merge separate %s counters for function %u", ctr_names[elt.ctr], fn_ident); goto skip_merge; } if (elt.ctr < GCOV_COUNTERS_SUMMABLE /* This should always be true for a just allocated entry, and always false for an existing one. Check this way, in case the gcov file is corrupt. */ && (!entry->chain || summaried != entry)) { entry->chain = summaried; summaried = entry; } for (ix = 0; ix != n_counts; ix++) entry->counts[ix] += gcov_read_counter (); skip_merge:; } gcov_sync (offset, length); if ((is_error = gcov_is_error ())) break; } if (!gcov_is_eof ()) { error (is_error < 0 ? "`%s' has overflowed" : "`%s' is corrupted", da_file_name); htab_delete (counts_hash); } gcov_close (); } /* Returns the counters for a particular tag. */ gcov_type * get_coverage_counts (unsigned counter, unsigned expected, const struct gcov_ctr_summary **summary) { counts_entry_t *entry, elt; gcov_unsigned_t checksum = -1; /* No hash table, no counts. */ if (!counts_hash) { static int warned = 0; if (!warned++) inform ((flag_guess_branch_prob ? "file %s not found, execution counts estimated" : "file %s not found, execution counts assumed to be zero"), da_file_name); return NULL; } elt.ident = current_function_funcdef_no + 1; elt.ctr = counter; entry = htab_find (counts_hash, &elt); if (!entry) { warning ("no coverage for function '%s' found.", IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (current_function_decl))); return 0; } checksum = compute_checksum (); if (entry->checksum != checksum) { error ("coverage mismatch for function '%s' while reading counter '%s'.", IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (current_function_decl)), ctr_names[counter]); error ("checksum is %x instead of %x", entry->checksum, checksum); return 0; } else if (entry->summary.num != expected) { error ("coverage mismatch for function '%s' while reading counter '%s'.", IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (current_function_decl)), ctr_names[counter]); error ("number of counters is %d instead of %d", entry->summary.num, expected); return 0; } if (summary) *summary = &entry->summary; return entry->counts; } /* Allocate NUM counters of type COUNTER. Returns nonzero if the allocation succeeded. */ int coverage_counter_alloc (unsigned counter, unsigned num) { if (no_coverage) return 0; if (!num) return 1; if (!tree_ctr_tables[counter]) { /* Generate and save a copy of this so it can be shared. */ /* We don't know the size yet; make it big enough that nobody will make any clever transformation on it. */ char buf[20]; tree domain_tree = build_index_type (build_int_2 (1000, 0)); /* replaced later */ tree gcov_type_array_type = build_array_type (GCOV_TYPE_NODE, domain_tree); tree_ctr_tables[counter] = build_decl (VAR_DECL, NULL_TREE, gcov_type_array_type); TREE_STATIC (tree_ctr_tables[counter]) = 1; ASM_GENERATE_INTERNAL_LABEL (buf, "LPBX", counter + 1); DECL_NAME (tree_ctr_tables[counter]) = get_identifier (buf); DECL_ALIGN (tree_ctr_tables[counter]) = TYPE_ALIGN (GCOV_TYPE_NODE); } fn_b_ctrs[counter] = fn_n_ctrs[counter]; fn_n_ctrs[counter] += num; fn_ctr_mask |= 1 << counter; return 1; } /* Generate a MEM rtl to access COUNTER NO. */ rtx rtl_coverage_counter_ref (unsigned counter, unsigned no) { unsigned gcov_size = tree_low_cst (TYPE_SIZE (GCOV_TYPE_NODE), 1); enum machine_mode mode = mode_for_size (gcov_size, MODE_INT, 0); rtx ref; if (no >= fn_n_ctrs[counter] - fn_b_ctrs[counter]) abort (); no += prg_n_ctrs[counter] + fn_b_ctrs[counter]; if (!ctr_labels[counter]) { ctr_labels[counter] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (IDENTIFIER_POINTER (DECL_NAME (tree_ctr_tables[counter])))); SYMBOL_REF_FLAGS (ctr_labels[counter]) = SYMBOL_FLAG_LOCAL; } ref = plus_constant (ctr_labels[counter], gcov_size / BITS_PER_UNIT * no); ref = gen_rtx_MEM (mode, ref); set_mem_alias_set (ref, new_alias_set ()); MEM_NOTRAP_P (ref) = 1; return ref; } /* Generate a tree to access COUNTER NO. */ tree tree_coverage_counter_ref (unsigned counter, unsigned no) { tree domain_type = TYPE_DOMAIN (TREE_TYPE (tree_ctr_tables[counter])); if (no >= fn_n_ctrs[counter] - fn_b_ctrs[counter]) abort (); no += prg_n_ctrs[counter] + fn_b_ctrs[counter]; /* "no" here is an array index, scaled to bytes later. */ return build (ARRAY_REF, GCOV_TYPE_NODE, tree_ctr_tables[counter], fold_convert (domain_type, build_int_2 (no, 0)), TYPE_MIN_VALUE (domain_type), size_binop (EXACT_DIV_EXPR, TYPE_SIZE_UNIT (GCOV_TYPE_NODE), size_int (TYPE_ALIGN (GCOV_TYPE_NODE)))); } /* Generate a checksum for a string. CHKSUM is the current checksum. */ static unsigned coverage_checksum_string (unsigned chksum, const char *string) { int i; char *dup = NULL; /* Look for everything that looks if it were produced by get_file_function_name_long and zero out the second part that may result from flag_random_seed. This is not critical as the checksums are used only for sanity checking. */ for (i = 0; string[i]; i++) { if (!strncmp (string + i, "_GLOBAL__", 9)) for (i = i + 9; string[i]; i++) if (string[i]=='_') { int y; unsigned seed; for (y = 1; y < 9; y++) if (!(string[i + y] >= '0' && string[i + y] <= '9') && !(string[i + y] >= 'A' && string[i + y] <= 'F')) break; if (y != 9 || string[i + 9] != '_') continue; for (y = 10; y < 18; y++) if (!(string[i + y] >= '0' && string[i + y] <= '9') && !(string[i + y] >= 'A' && string[i + y] <= 'F')) break; if (y != 18) continue; if (!sscanf (string + i + 10, "%X", &seed)) abort (); if (seed != crc32_string (0, flag_random_seed)) continue; string = dup = xstrdup (string); for (y = 10; y < 18; y++) dup[i + y] = '0'; break; } break; } chksum = crc32_string (chksum, string); if (dup) free (dup); return chksum; } /* Compute checksum for the current function. We generate a CRC32. */ static unsigned compute_checksum (void) { expanded_location xloc = expand_location (DECL_SOURCE_LOCATION (current_function_decl)); unsigned chksum = xloc.line; chksum = coverage_checksum_string (chksum, xloc.file); chksum = coverage_checksum_string (chksum, IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (current_function_decl))); return chksum; } /* Begin output to the graph file for the current function. Opens the output file, if not already done. Writes the function header, if not already done. Returns nonzero if data should be output. */ int coverage_begin_output (void) { if (no_coverage) return 0; if (!bbg_function_announced) { expanded_location xloc = expand_location (DECL_SOURCE_LOCATION (current_function_decl)); unsigned long offset; if (!bbg_file_opened) { if (!gcov_open (bbg_file_name, -1)) error ("cannot open %s", bbg_file_name); else { gcov_write_unsigned (GCOV_NOTE_MAGIC); gcov_write_unsigned (GCOV_VERSION); gcov_write_unsigned (local_tick); } bbg_file_opened = 1; } /* Announce function */ offset = gcov_write_tag (GCOV_TAG_FUNCTION); gcov_write_unsigned (current_function_funcdef_no + 1); gcov_write_unsigned (compute_checksum ()); gcov_write_string (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (current_function_decl))); gcov_write_string (xloc.file); gcov_write_unsigned (xloc.line); gcov_write_length (offset); bbg_function_announced = 1; } return !gcov_is_error (); } /* Finish coverage data for the current function. Verify no output error has occurred. Save function coverage counts. */ void coverage_end_function (void) { unsigned i; if (bbg_file_opened > 1 && gcov_is_error ()) { warning ("error writing `%s'", bbg_file_name); bbg_file_opened = -1; } if (fn_ctr_mask) { struct function_list *item; item = xmalloc (sizeof (struct function_list)); *functions_tail = item; functions_tail = &item->next; item->next = 0; item->ident = current_function_funcdef_no + 1; item->checksum = compute_checksum (); for (i = 0; i != GCOV_COUNTERS; i++) { item->n_ctrs[i] = fn_n_ctrs[i]; prg_n_ctrs[i] += fn_n_ctrs[i]; fn_n_ctrs[i] = fn_b_ctrs[i] = 0; } prg_ctr_mask |= fn_ctr_mask; fn_ctr_mask = 0; } bbg_function_announced = 0; } /* Creates the gcov_fn_info RECORD_TYPE. */ static tree build_fn_info_type (unsigned int counters) { tree type = lang_hooks.types.make_type (RECORD_TYPE); tree field, fields; tree array_type; /* ident */ fields = build_decl (FIELD_DECL, NULL_TREE, unsigned_intSI_type_node); /* checksum */ field = build_decl (FIELD_DECL, NULL_TREE, unsigned_intSI_type_node); TREE_CHAIN (field) = fields; fields = field; array_type = build_index_type (build_int_2 (counters - 1, 0)); array_type = build_array_type (unsigned_type_node, array_type); /* counters */ field = build_decl (FIELD_DECL, NULL_TREE, array_type); TREE_CHAIN (field) = fields; fields = field; finish_builtin_struct (type, "__gcov_fn_info", fields, NULL_TREE); return type; } /* Creates a CONSTRUCTOR for a gcov_fn_info. FUNCTION is the function being processed and TYPE is the gcov_fn_info RECORD_TYPE. */ static tree build_fn_info_value (const struct function_list *function, tree type) { tree value = NULL_TREE; tree fields = TYPE_FIELDS (type); unsigned ix; tree array_value = NULL_TREE; /* ident */ value = tree_cons (fields, convert (unsigned_intSI_type_node, build_int_2 (function->ident, 0)), value); fields = TREE_CHAIN (fields); /* checksum */ value = tree_cons (fields, convert (unsigned_intSI_type_node, build_int_2 (function->checksum, 0)), value); fields = TREE_CHAIN (fields); /* counters */ for (ix = 0; ix != GCOV_COUNTERS; ix++) if (prg_ctr_mask & (1 << ix)) { tree counters = convert (unsigned_type_node, build_int_2 (function->n_ctrs[ix], 0)); array_value = tree_cons (NULL_TREE, counters, array_value); } array_value = build_constructor (TREE_TYPE (fields), nreverse (array_value)); value = tree_cons (fields, array_value, value); value = build_constructor (type, nreverse (value)); return value; } /* Creates the gcov_ctr_info RECORD_TYPE. */ static tree build_ctr_info_type (void) { tree type = lang_hooks.types.make_type (RECORD_TYPE); tree field, fields = NULL_TREE; tree gcov_ptr_type = build_pointer_type (GCOV_TYPE_NODE); tree gcov_merge_fn_type; /* counters */ field = build_decl (FIELD_DECL, NULL_TREE, unsigned_intSI_type_node); TREE_CHAIN (field) = fields; fields = field; /* values */ field = build_decl (FIELD_DECL, NULL_TREE, gcov_ptr_type); TREE_CHAIN (field) = fields; fields = field; /* merge */ gcov_merge_fn_type = build_function_type_list (void_type_node, gcov_ptr_type, unsigned_type_node, NULL_TREE); field = build_decl (FIELD_DECL, NULL_TREE, build_pointer_type (gcov_merge_fn_type)); TREE_CHAIN (field) = fields; fields = field; finish_builtin_struct (type, "__gcov_ctr_info", fields, NULL_TREE); return type; } /* Creates a CONSTRUCTOR for a gcov_ctr_info. COUNTER is the counter being processed and TYPE is the gcov_ctr_info RECORD_TYPE. */ static tree build_ctr_info_value (unsigned int counter, tree type) { tree value = NULL_TREE; tree fields = TYPE_FIELDS (type); tree fn; /* counters */ value = tree_cons (fields, convert (unsigned_intSI_type_node, build_int_2 (prg_n_ctrs[counter], 0)), value); fields = TREE_CHAIN (fields); if (prg_n_ctrs[counter]) { tree array_type; array_type = build_index_type (build_int_2 (prg_n_ctrs[counter] - 1, 0)); array_type = build_array_type (TREE_TYPE (TREE_TYPE (fields)), array_type); TREE_TYPE (tree_ctr_tables[counter]) = array_type; DECL_SIZE (tree_ctr_tables[counter]) = TYPE_SIZE (array_type); DECL_SIZE_UNIT (tree_ctr_tables[counter]) = TYPE_SIZE_UNIT (array_type); assemble_variable (tree_ctr_tables[counter], 0, 0, 0); value = tree_cons (fields, build1 (ADDR_EXPR, TREE_TYPE (fields), tree_ctr_tables[counter]), value); } else value = tree_cons (fields, null_pointer_node, value); fields = TREE_CHAIN (fields); fn = build_decl (FUNCTION_DECL, get_identifier (ctr_merge_functions[counter]), TREE_TYPE (TREE_TYPE (fields))); DECL_EXTERNAL (fn) = 1; TREE_PUBLIC (fn) = 1; DECL_ARTIFICIAL (fn) = 1; TREE_NOTHROW (fn) = 1; value = tree_cons (fields, build1 (ADDR_EXPR, TREE_TYPE (fields), fn), value); value = build_constructor (type, nreverse (value)); return value; } /* Creates the gcov_info RECORD_TYPE and initializer for it. Returns a CONSTRUCTOR. */ static tree build_gcov_info (void) { unsigned n_ctr_types, ix; tree type, const_type; tree fn_info_type, fn_info_value = NULL_TREE; tree fn_info_ptr_type; tree ctr_info_type, ctr_info_ary_type, ctr_info_value = NULL_TREE; tree field, fields = NULL_TREE; tree value = NULL_TREE; tree filename_string; char *filename; int filename_len; unsigned n_fns; const struct function_list *fn; tree string_type; /* Count the number of active counters. */ for (n_ctr_types = 0, ix = 0; ix != GCOV_COUNTERS; ix++) if (prg_ctr_mask & (1 << ix)) n_ctr_types++; type = lang_hooks.types.make_type (RECORD_TYPE); const_type = build_qualified_type (type, TYPE_QUAL_CONST); /* Version ident */ field = build_decl (FIELD_DECL, NULL_TREE, unsigned_intSI_type_node); TREE_CHAIN (field) = fields; fields = field; value = tree_cons (field, convert (unsigned_intSI_type_node, build_int_2 (GCOV_VERSION, 0)), value); /* next -- NULL */ field = build_decl (FIELD_DECL, NULL_TREE, build_pointer_type (const_type)); TREE_CHAIN (field) = fields; fields = field; value = tree_cons (field, null_pointer_node, value); /* stamp */ field = build_decl (FIELD_DECL, NULL_TREE, unsigned_intSI_type_node); TREE_CHAIN (field) = fields; fields = field; value = tree_cons (field, convert (unsigned_intSI_type_node, build_int_2 (local_tick, 0)), value); /* Filename */ string_type = build_pointer_type (build_qualified_type (char_type_node, TYPE_QUAL_CONST)); field = build_decl (FIELD_DECL, NULL_TREE, string_type); TREE_CHAIN (field) = fields; fields = field; filename = getpwd (); filename = (filename && da_file_name[0] != '/' ? concat (filename, "/", da_file_name, NULL) : da_file_name); filename_len = strlen (filename); filename_string = build_string (filename_len + 1, filename); if (filename != da_file_name) free (filename); TREE_TYPE (filename_string) = build_array_type (char_type_node, build_index_type (build_int_2 (filename_len, 0))); value = tree_cons (field, build1 (ADDR_EXPR, string_type, filename_string), value); /* Build the fn_info type and initializer. */ fn_info_type = build_fn_info_type (n_ctr_types); fn_info_ptr_type = build_pointer_type (build_qualified_type (fn_info_type, TYPE_QUAL_CONST)); for (fn = functions_head, n_fns = 0; fn; fn = fn->next, n_fns++) fn_info_value = tree_cons (NULL_TREE, build_fn_info_value (fn, fn_info_type), fn_info_value); if (n_fns) { tree array_type; array_type = build_index_type (build_int_2 (n_fns - 1, 0)); array_type = build_array_type (fn_info_type, array_type); fn_info_value = build_constructor (array_type, nreverse (fn_info_value)); fn_info_value = build1 (ADDR_EXPR, fn_info_ptr_type, fn_info_value); } else fn_info_value = null_pointer_node; /* number of functions */ field = build_decl (FIELD_DECL, NULL_TREE, unsigned_type_node); TREE_CHAIN (field) = fields; fields = field; value = tree_cons (field, convert (unsigned_type_node, build_int_2 (n_fns, 0)), value); /* fn_info table */ field = build_decl (FIELD_DECL, NULL_TREE, fn_info_ptr_type); TREE_CHAIN (field) = fields; fields = field; value = tree_cons (field, fn_info_value, value); /* counter_mask */ field = build_decl (FIELD_DECL, NULL_TREE, unsigned_type_node); TREE_CHAIN (field) = fields; fields = field; value = tree_cons (field, convert (unsigned_type_node, build_int_2 (prg_ctr_mask, 0)), value); /* counters */ ctr_info_type = build_ctr_info_type (); ctr_info_ary_type = build_index_type (build_int_2 (n_ctr_types, 0)); ctr_info_ary_type = build_array_type (ctr_info_type, ctr_info_ary_type); for (ix = 0; ix != GCOV_COUNTERS; ix++) if (prg_ctr_mask & (1 << ix)) ctr_info_value = tree_cons (NULL_TREE, build_ctr_info_value (ix, ctr_info_type), ctr_info_value); ctr_info_value = build_constructor (ctr_info_ary_type, nreverse (ctr_info_value)); field = build_decl (FIELD_DECL, NULL_TREE, ctr_info_ary_type); TREE_CHAIN (field) = fields; fields = field; value = tree_cons (field, ctr_info_value, value); finish_builtin_struct (type, "__gcov_info", fields, NULL_TREE); value = build_constructor (type, nreverse (value)); return value; } /* Write out the structure which libgcov uses to locate all the counters. The structures used here must match those defined in gcov-io.h. Write out the constructor to call __gcov_init. */ static void create_coverage (void) { tree gcov_info, gcov_init, body, t; char name_buf[32]; no_coverage = 1; /* Disable any further coverage. */ if (!prg_ctr_mask) return; t = build_gcov_info (); gcov_info = build_decl (VAR_DECL, NULL_TREE, TREE_TYPE (t)); TREE_STATIC (gcov_info) = 1; ASM_GENERATE_INTERNAL_LABEL (name_buf, "LPBX", 0); DECL_NAME (gcov_info) = get_identifier (name_buf); DECL_INITIAL (gcov_info) = t; /* Build structure. */ assemble_variable (gcov_info, 0, 0, 0); /* Build a decl for __gcov_init. */ t = build_pointer_type (TREE_TYPE (gcov_info)); t = build_function_type_list (void_type_node, t, NULL); t = build_decl (FUNCTION_DECL, get_identifier ("__gcov_init"), t); TREE_PUBLIC (t) = 1; DECL_EXTERNAL (t) = 1; gcov_init = t; /* Generate a call to __gcov_init(&gcov_info). */ body = NULL; t = build_fold_addr_expr (gcov_info); t = tree_cons (NULL, t, NULL); t = build_function_call_expr (gcov_init, t); append_to_statement_list (t, &body); /* Generate a constructor to run it. */ cgraph_build_static_cdtor ('I', body); } /* Perform file-level initialization. Read in data file, generate name of graph file. */ void coverage_init (const char *filename) { int len = strlen (filename); /* Name of da file. */ da_file_name = xmalloc (len + strlen (GCOV_DATA_SUFFIX) + 1); strcpy (da_file_name, filename); strcat (da_file_name, GCOV_DATA_SUFFIX); /* Name of bbg file. */ bbg_file_name = xmalloc (len + strlen (GCOV_NOTE_SUFFIX) + 1); strcpy (bbg_file_name, filename); strcat (bbg_file_name, GCOV_NOTE_SUFFIX); read_counts_file (); } /* Performs file-level cleanup. Close graph file, generate coverage variables and constructor. */ void coverage_finish (void) { create_coverage (); if (bbg_file_opened) { int error = gcov_close (); if (error) unlink (bbg_file_name); if (!local_tick) /* Only remove the da file, if we cannot stamp it. If we can stamp it, libgcov will DTRT. */ unlink (da_file_name); } } /* Type information for coverage.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_coverage_h[] = { { &ctr_labels[0], 1 * (GCOV_COUNTERS), sizeof (ctr_labels[0]), >_ggc_mx_rtx_def, >_pch_nx_rtx_def }, { &tree_ctr_tables[0], 1 * (GCOV_COUNTERS), sizeof (tree_ctr_tables[0]), >_ggc_mx_tree_node, >_pch_nx_tree_node }, LAST_GGC_ROOT_TAB }; /* Common subexpression elimination for GNU compiler. Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* stdio.h must precede rtl.h for FFS. */ /* The basic idea of common subexpression elimination is to go through the code, keeping a record of expressions that would have the same value at the current scan point, and replacing expressions encountered with the cheapest equivalent expression. It is too complicated to keep track of the different possibilities when control paths merge in this code; so, at each label, we forget all that is known and start fresh. This can be described as processing each extended basic block separately. We have a separate pass to perform global CSE. Note CSE can turn a conditional or computed jump into a nop or an unconditional jump. When this occurs we arrange to run the jump optimizer after CSE to delete the unreachable code. We use two data structures to record the equivalent expressions: a hash table for most expressions, and a vector of "quantity numbers" to record equivalent (pseudo) registers. The use of the special data structure for registers is desirable because it is faster. It is possible because registers references contain a fairly small number, the register number, taken from a contiguously allocated series, and two register references are identical if they have the same number. General expressions do not have any such thing, so the only way to retrieve the information recorded on an expression other than a register is to keep it in a hash table. Registers and "quantity numbers": At the start of each basic block, all of the (hardware and pseudo) registers used in the function are given distinct quantity numbers to indicate their contents. During scan, when the code copies one register into another, we copy the quantity number. When a register is loaded in any other way, we allocate a new quantity number to describe the value generated by this operation. `reg_qty' records what quantity a register is currently thought of as containing. All real quantity numbers are greater than or equal to `max_reg'. If register N has not been assigned a quantity, reg_qty[N] will equal N. Quantity numbers below `max_reg' do not exist and none of the `qty_table' entries should be referenced with an index below `max_reg'. We also maintain a bidirectional chain of registers for each quantity number. The `qty_table` members `first_reg' and `last_reg', and `reg_eqv_table' members `next' and `prev' hold these chains. The first register in a chain is the one whose lifespan is least local. Among equals, it is the one that was seen first. We replace any equivalent register with that one. If two registers have the same quantity number, it must be true that REG expressions with qty_table `mode' must be in the hash table for both registers and must be in the same class. The converse is not true. Since hard registers may be referenced in any mode, two REG expressions might be equivalent in the hash table but not have the same quantity number if the quantity number of one of the registers is not the same mode as those expressions. Constants and quantity numbers When a quantity has a known constant value, that value is stored in the appropriate qty_table `const_rtx'. This is in addition to putting the constant in the hash table as is usual for non-regs. Whether a reg or a constant is preferred is determined by the configuration macro CONST_COSTS and will often depend on the constant value. In any event, expressions containing constants can be simplified, by fold_rtx. When a quantity has a known nearly constant value (such as an address of a stack slot), that value is stored in the appropriate qty_table `const_rtx'. Integer constants don't have a machine mode. However, cse determines the intended machine mode from the destination of the instruction that moves the constant. The machine mode is recorded in the hash table along with the actual RTL constant expression so that different modes are kept separate. Other expressions: To record known equivalences among expressions in general we use a hash table called `equiv_table'. It has a fixed number of buckets that contain chains of `struct table_elt' elements for expressions. These chains connect the elements whose expressions have the same hash codes. Other chains through the same elements connect the elements which currently have equivalent values. Register references in an expression are canonicalized before hashing the expression. This is done using `reg_qty' and qty_table `first_reg'. The hash code of a register reference is computed using the quantity number, not the register number. When the value of an expression changes, it is necessary to remove from the hash table not just that expression but all expressions whose values could be different as a result. 1. If the value changing is in memory, except in special cases ANYTHING referring to memory could be changed. That is because nobody knows where a pointer does not point. The function `invalidate_memory' removes what is necessary. The special cases are when the address is constant or is a constant plus a fixed register such as the frame pointer or a static chain pointer. When such addresses are stored in, we can tell exactly which other such addresses must be invalidated due to overlap. `invalidate' does this. All expressions that refer to non-constant memory addresses are also invalidated. `invalidate_memory' does this. 2. If the value changing is a register, all expressions containing references to that register, and only those, must be removed. Because searching the entire hash table for expressions that contain a register is very slow, we try to figure out when it isn't necessary. Precisely, this is necessary only when expressions have been entered in the hash table using this register, and then the value has changed, and then another expression wants to be added to refer to the register's new value. This sequence of circumstances is rare within any one basic block. The vectors `reg_tick' and `reg_in_table' are used to detect this case. reg_tick[i] is incremented whenever a value is stored in register i. reg_in_table[i] holds -1 if no references to register i have been entered in the table; otherwise, it contains the value reg_tick[i] had when the references were entered. If we want to enter a reference and reg_in_table[i] != reg_tick[i], we must scan and remove old references. Until we want to enter a new entry, the mere fact that the two vectors don't match makes the entries be ignored if anyone tries to match them. Registers themselves are entered in the hash table as well as in the equivalent-register chains. However, the vectors `reg_tick' and `reg_in_table' do not apply to expressions which are simple register references. These expressions are removed from the table immediately when they become invalid, and this can be done even if we do not immediately search for all the expressions that refer to the register. A CLOBBER rtx in an instruction invalidates its operand for further reuse. A CLOBBER or SET rtx whose operand is a MEM:BLK invalidates everything that resides in memory. Related expressions: Constant expressions that differ only by an additive integer are called related. When a constant expression is put in the table, the related expression with no constant term is also entered. These are made to point at each other so that it is possible to find out if there exists any register equivalent to an expression related to a given expression. */ /* One plus largest register number used in this function. */ static int max_reg; /* One plus largest instruction UID used in this function at time of cse_main call. */ static int max_insn_uid; /* Length of qty_table vector. We know in advance we will not need a quantity number this big. */ static int max_qty; /* Next quantity number to be allocated. This is 1 + the largest number needed so far. */ static int next_qty; /* Per-qty information tracking. `first_reg' and `last_reg' track the head and tail of the chain of registers which currently contain this quantity. `mode' contains the machine mode of this quantity. `const_rtx' holds the rtx of the constant value of this quantity, if known. A summations of the frame/arg pointer and a constant can also be entered here. When this holds a known value, `const_insn' is the insn which stored the constant value. `comparison_{code,const,qty}' are used to track when a comparison between a quantity and some constant or register has been passed. In such a case, we know the results of the comparison in case we see it again. These members record a comparison that is known to be true. `comparison_code' holds the rtx code of such a comparison, else it is set to UNKNOWN and the other two comparison members are undefined. `comparison_const' holds the constant being compared against, or zero if the comparison is not against a constant. `comparison_qty' holds the quantity being compared against when the result is known. If the comparison is not with a register, `comparison_qty' is -1. */ struct qty_table_elem { rtx const_rtx; rtx const_insn; rtx comparison_const; int comparison_qty; unsigned int first_reg, last_reg; /* The sizes of these fields should match the sizes of the code and mode fields of struct rtx_def (see rtl.h). */ ENUM_BITFIELD(rtx_code) comparison_code : 16; ENUM_BITFIELD(machine_mode) mode : 8; }; /* The table of all qtys, indexed by qty number. */ static struct qty_table_elem *qty_table; #ifdef HAVE_cc0 /* For machines that have a CC0, we do not record its value in the hash table since its use is guaranteed to be the insn immediately following its definition and any other insn is presumed to invalidate it. Instead, we store below the value last assigned to CC0. If it should happen to be a constant, it is stored in preference to the actual assigned value. In case it is a constant, we store the mode in which the constant should be interpreted. */ static rtx prev_insn_cc0; static enum machine_mode prev_insn_cc0_mode; /* Previous actual insn. 0 if at first insn of basic block. */ static rtx prev_insn; #endif /* Insn being scanned. */ static rtx this_insn; /* Index by register number, gives the number of the next (or previous) register in the chain of registers sharing the same value. Or -1 if this register is at the end of the chain. If reg_qty[N] == N, reg_eqv_table[N].next is undefined. */ /* Per-register equivalence chain. */ struct reg_eqv_elem { int next, prev; }; /* The table of all register equivalence chains. */ static struct reg_eqv_elem *reg_eqv_table; struct cse_reg_info { /* Next in hash chain. */ struct cse_reg_info *hash_next; /* The next cse_reg_info structure in the free or used list. */ struct cse_reg_info *next; /* Search key */ unsigned int regno; /* The quantity number of the register's current contents. */ int reg_qty; /* The number of times the register has been altered in the current basic block. */ int reg_tick; /* The REG_TICK value at which rtx's containing this register are valid in the hash table. If this does not equal the current reg_tick value, such expressions existing in the hash table are invalid. */ int reg_in_table; /* The SUBREG that was set when REG_TICK was last incremented. Set to -1 if the last store was to the whole register, not a subreg. */ unsigned int subreg_ticked; }; /* A free list of cse_reg_info entries. */ static struct cse_reg_info *cse_reg_info_free_list; /* A used list of cse_reg_info entries. */ static struct cse_reg_info *cse_reg_info_used_list; static struct cse_reg_info *cse_reg_info_used_list_end; /* A mapping from registers to cse_reg_info data structures. */ #define REGHASH_SHIFT 7 #define REGHASH_SIZE (1 << REGHASH_SHIFT) #define REGHASH_MASK (REGHASH_SIZE - 1) static struct cse_reg_info *reg_hash[REGHASH_SIZE]; #define REGHASH_FN(REGNO) \ (((REGNO) ^ ((REGNO) >> REGHASH_SHIFT)) & REGHASH_MASK) /* The last lookup we did into the cse_reg_info_tree. This allows us to cache repeated lookups. */ static unsigned int cached_regno; static struct cse_reg_info *cached_cse_reg_info; /* A HARD_REG_SET containing all the hard registers for which there is currently a REG expression in the hash table. Note the difference from the above variables, which indicate if the REG is mentioned in some expression in the table. */ static HARD_REG_SET hard_regs_in_table; /* CUID of insn that starts the basic block currently being cse-processed. */ static int cse_basic_block_start; /* CUID of insn that ends the basic block currently being cse-processed. */ static int cse_basic_block_end; /* Vector mapping INSN_UIDs to cuids. The cuids are like uids but increase monotonically always. We use them to see whether a reg is used outside a given basic block. */ static int *uid_cuid; /* Highest UID in UID_CUID. */ static int max_uid; /* Get the cuid of an insn. */ #undef INSN_CUID #define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)]) /* Nonzero if this pass has made changes, and therefore it's worthwhile to run the garbage collector. */ static int cse_altered; /* Nonzero if cse has altered conditional jump insns in such a way that jump optimization should be redone. */ static int cse_jumps_altered; /* Nonzero if we put a LABEL_REF into the hash table for an INSN without a REG_LABEL, we have to rerun jump after CSE to put in the note. */ static int recorded_label_ref; /* canon_hash stores 1 in do_not_record if it notices a reference to CC0, PC, or some other volatile subexpression. */ static int do_not_record; #ifdef LOAD_EXTEND_OP /* Scratch rtl used when looking for load-extended copy of a MEM. */ static rtx memory_extend_rtx; #endif /* canon_hash stores 1 in hash_arg_in_memory if it notices a reference to memory within the expression being hashed. */ static int hash_arg_in_memory; /* The hash table contains buckets which are chains of `struct table_elt's, each recording one expression's information. That expression is in the `exp' field. The canon_exp field contains a canonical (from the point of view of alias analysis) version of the `exp' field. Those elements with the same hash code are chained in both directions through the `next_same_hash' and `prev_same_hash' fields. Each set of expressions with equivalent values are on a two-way chain through the `next_same_value' and `prev_same_value' fields, and all point with the `first_same_value' field at the first element in that chain. The chain is in order of increasing cost. Each element's cost value is in its `cost' field. The `in_memory' field is nonzero for elements that involve any reference to memory. These elements are removed whenever a write is done to an unidentified location in memory. To be safe, we assume that a memory address is unidentified unless the address is either a symbol constant or a constant plus the frame pointer or argument pointer. The `related_value' field is used to connect related expressions (that differ by adding an integer). The related expressions are chained in a circular fashion. `related_value' is zero for expressions for which this chain is not useful. The `cost' field stores the cost of this element's expression. The `regcost' field stores the value returned by approx_reg_cost for this element's expression. The `is_const' flag is set if the element is a constant (including a fixed address). The `flag' field is used as a temporary during some search routines. The `mode' field is usually the same as GET_MODE (`exp'), but if `exp' is a CONST_INT and has no machine mode then the `mode' field is the mode it was being used as. Each constant is recorded separately for each mode it is used with. */ struct table_elt { rtx exp; rtx canon_exp; struct table_elt *next_same_hash; struct table_elt *prev_same_hash; struct table_elt *next_same_value; struct table_elt *prev_same_value; struct table_elt *first_same_value; struct table_elt *related_value; int cost; int regcost; /* The size of this field should match the size of the mode field of struct rtx_def (see rtl.h). */ ENUM_BITFIELD(machine_mode) mode : 8; char in_memory; char is_const; char flag; }; /* We don't want a lot of buckets, because we rarely have very many things stored in the hash table, and a lot of buckets slows down a lot of loops that happen frequently. */ #define HASH_SHIFT 5 #define CSE_HASH_SIZE (1 << HASH_SHIFT) #define HASH_MASK (CSE_HASH_SIZE - 1) /* Compute hash code of X in mode M. Special-case case where X is a pseudo register (hard registers may require `do_not_record' to be set). */ #define HASH(X, M) \ ((REG_P (X) && REGNO (X) >= FIRST_PSEUDO_REGISTER \ ? (((unsigned) REG << 7) + (unsigned) REG_QTY (REGNO (X))) \ : canon_hash (X, M)) & HASH_MASK) /* Determine whether register number N is considered a fixed register for the purpose of approximating register costs. It is desirable to replace other regs with fixed regs, to reduce need for non-fixed hard regs. A reg wins if it is either the frame pointer or designated as fixed. */ #define FIXED_REGNO_P(N) \ ((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \ || fixed_regs[N] || global_regs[N]) /* Compute cost of X, as stored in the `cost' field of a table_elt. Fixed hard registers and pointers into the frame are the cheapest with a cost of 0. Next come pseudos with a cost of one and other hard registers with a cost of 2. Aside from these special cases, call `rtx_cost'. */ #define CHEAP_REGNO(N) \ ((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \ || (N) == STACK_POINTER_REGNUM || (N) == ARG_POINTER_REGNUM \ || ((N) >= FIRST_VIRTUAL_REGISTER && (N) <= LAST_VIRTUAL_REGISTER) \ || ((N) < FIRST_PSEUDO_REGISTER \ && FIXED_REGNO_P (N) && REGNO_REG_CLASS (N) != NO_REGS)) #define COST(X) (REG_P (X) ? 0 : notreg_cost (X, SET)) #define COST_IN(X,OUTER) (REG_P (X) ? 0 : notreg_cost (X, OUTER)) /* Get the info associated with register N. */ #define GET_CSE_REG_INFO(N) \ (((N) == cached_regno && cached_cse_reg_info) \ ? cached_cse_reg_info : get_cse_reg_info ((N))) /* Get the number of times this register has been updated in this basic block. */ #define REG_TICK(N) ((GET_CSE_REG_INFO (N))->reg_tick) /* Get the point at which REG was recorded in the table. */ #define REG_IN_TABLE(N) ((GET_CSE_REG_INFO (N))->reg_in_table) /* Get the SUBREG set at the last increment to REG_TICK (-1 if not a SUBREG). */ #define SUBREG_TICKED(N) ((GET_CSE_REG_INFO (N))->subreg_ticked) /* Get the quantity number for REG. */ #define REG_QTY(N) ((GET_CSE_REG_INFO (N))->reg_qty) /* Determine if the quantity number for register X represents a valid index into the qty_table. */ #define REGNO_QTY_VALID_P(N) (REG_QTY (N) != (int) (N)) static struct table_elt *equiv_table[CSE_HASH_SIZE]; /* Chain of `struct table_elt's made so far for this function but currently removed from the table. */ static struct table_elt *free_element_chain; /* Number of `struct table_elt' structures made so far for this function. */ static int n_elements_made; /* Maximum value `n_elements_made' has had so far in this compilation for functions previously processed. */ static int max_elements_made; /* Surviving equivalence class when two equivalence classes are merged by recording the effects of a jump in the last insn. Zero if the last insn was not a conditional jump. */ static struct table_elt *last_jump_equiv_class; /* Set to the cost of a constant pool reference if one was found for a symbolic constant. If this was found, it means we should try to convert constants into constant pool entries if they don't fit in the insn. */ static int constant_pool_entries_cost; static int constant_pool_entries_regcost; /* This data describes a block that will be processed by cse_basic_block. */ struct cse_basic_block_data { /* Lowest CUID value of insns in block. */ int low_cuid; /* Highest CUID value of insns in block. */ int high_cuid; /* Total number of SETs in block. */ int nsets; /* Last insn in the block. */ rtx last; /* Size of current branch path, if any. */ int path_size; /* Current branch path, indicating which branches will be taken. */ struct branch_path { /* The branch insn. */ rtx branch; /* Whether it should be taken or not. AROUND is the same as taken except that it is used when the destination label is not preceded by a BARRIER. */ enum taken {PATH_TAKEN, PATH_NOT_TAKEN, PATH_AROUND} status; } *path; }; static bool fixed_base_plus_p (rtx x); static int notreg_cost (rtx, enum rtx_code); static int approx_reg_cost_1 (rtx *, void *); static int approx_reg_cost (rtx); static int preferable (int, int, int, int); static void new_basic_block (void); static void make_new_qty (unsigned int, enum machine_mode); static void make_regs_eqv (unsigned int, unsigned int); static void delete_reg_equiv (unsigned int); static int mention_regs (rtx); static int insert_regs (rtx, struct table_elt *, int); static void remove_from_table (struct table_elt *, unsigned); static struct table_elt *lookup (rtx, unsigned, enum machine_mode); static struct table_elt *lookup_for_remove (rtx, unsigned, enum machine_mode); static rtx lookup_as_function (rtx, enum rtx_code); static struct table_elt *hash_insert (rtx, struct table_elt *, unsigned, enum machine_mode); static void merge_equiv_classes (struct table_elt *, struct table_elt *); static void invalidate (rtx, enum machine_mode); static int cse_rtx_varies_p (rtx, int); static void remove_invalid_refs (unsigned int); static void remove_invalid_subreg_refs (unsigned int, unsigned int, enum machine_mode); static void rehash_using_reg (rtx); static void invalidate_memory (void); static void invalidate_for_call (void); static rtx use_related_value (rtx, struct table_elt *); static unsigned canon_hash (rtx, enum machine_mode); static unsigned canon_hash_string (const char *); static unsigned safe_hash (rtx, enum machine_mode); static int exp_equiv_p (rtx, rtx, int, int); static rtx canon_reg (rtx, rtx); static void find_best_addr (rtx, rtx *, enum machine_mode); static enum rtx_code find_comparison_args (enum rtx_code, rtx *, rtx *, enum machine_mode *, enum machine_mode *); static rtx fold_rtx (rtx, rtx); static rtx equiv_constant (rtx); static void record_jump_equiv (rtx, int); static void record_jump_cond (enum rtx_code, enum machine_mode, rtx, rtx, int); static void cse_insn (rtx, rtx); static void cse_end_of_basic_block (rtx, struct cse_basic_block_data *, int, int, int); static int addr_affects_sp_p (rtx); static void invalidate_from_clobbers (rtx); static rtx cse_process_notes (rtx, rtx); static void cse_around_loop (rtx); static void invalidate_skipped_set (rtx, rtx, void *); static void invalidate_skipped_block (rtx); static void cse_check_loop_start (rtx, rtx, void *); static void cse_set_around_loop (rtx, rtx, rtx); static rtx cse_basic_block (rtx, rtx, struct branch_path *, int); static void count_reg_usage (rtx, int *, int); static int check_for_label_ref (rtx *, void *); extern void dump_class (struct table_elt*); static struct cse_reg_info * get_cse_reg_info (unsigned int); static int check_dependence (rtx *, void *); static void flush_hash_table (void); static bool insn_live_p (rtx, int *); static bool set_live_p (rtx, rtx, int *); static bool dead_libcall_p (rtx, int *); static int cse_change_cc_mode (rtx *, void *); static void cse_change_cc_mode_insns (rtx, rtx, rtx); static enum machine_mode cse_cc_succs (basic_block, rtx, rtx, bool); #undef RTL_HOOKS_GEN_LOWPART #define RTL_HOOKS_GEN_LOWPART gen_lowpart_if_possible static const struct rtl_hooks cse_rtl_hooks = RTL_HOOKS_INITIALIZER; /* Nonzero if X has the form (PLUS frame-pointer integer). We check for virtual regs here because the simplify_*_operation routines are called by integrate.c, which is called before virtual register instantiation. */ static bool fixed_base_plus_p (rtx x) { switch (GET_CODE (x)) { case REG: if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx) return true; if (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]) return true; if (REGNO (x) >= FIRST_VIRTUAL_REGISTER && REGNO (x) <= LAST_VIRTUAL_REGISTER) return true; return false; case PLUS: if (GET_CODE (XEXP (x, 1)) != CONST_INT) return false; return fixed_base_plus_p (XEXP (x, 0)); default: return false; } } /* Dump the expressions in the equivalence class indicated by CLASSP. This function is used only for debugging. */ void dump_class (struct table_elt *classp) { struct table_elt *elt; fprintf (stderr, "Equivalence chain for "); print_rtl (stderr, classp->exp); fprintf (stderr, ": \n"); for (elt = classp->first_same_value; elt; elt = elt->next_same_value) { print_rtl (stderr, elt->exp); fprintf (stderr, "\n"); } } /* Subroutine of approx_reg_cost; called through for_each_rtx. */ static int approx_reg_cost_1 (rtx *xp, void *data) { rtx x = *xp; int *cost_p = data; if (x && REG_P (x)) { unsigned int regno = REGNO (x); if (! CHEAP_REGNO (regno)) { if (regno < FIRST_PSEUDO_REGISTER) { if (SMALL_REGISTER_CLASSES) return 1; *cost_p += 2; } else *cost_p += 1; } } return 0; } /* Return an estimate of the cost of the registers used in an rtx. This is mostly the number of different REG expressions in the rtx; however for some exceptions like fixed registers we use a cost of 0. If any other hard register reference occurs, return MAX_COST. */ static int approx_reg_cost (rtx x) { int cost = 0; if (for_each_rtx (&x, approx_reg_cost_1, (void *) &cost)) return MAX_COST; return cost; } /* Return a negative value if an rtx A, whose costs are given by COST_A and REGCOST_A, is more desirable than an rtx B. Return a positive value if A is less desirable, or 0 if the two are equally good. */ static int preferable (int cost_a, int regcost_a, int cost_b, int regcost_b) { /* First, get rid of cases involving expressions that are entirely unwanted. */ if (cost_a != cost_b) { if (cost_a == MAX_COST) return 1; if (cost_b == MAX_COST) return -1; } /* Avoid extending lifetimes of hardregs. */ if (regcost_a != regcost_b) { if (regcost_a == MAX_COST) return 1; if (regcost_b == MAX_COST) return -1; } /* Normal operation costs take precedence. */ if (cost_a != cost_b) return cost_a - cost_b; /* Only if these are identical consider effects on register pressure. */ if (regcost_a != regcost_b) return regcost_a - regcost_b; return 0; } /* Internal function, to compute cost when X is not a register; called from COST macro to keep it simple. */ static int notreg_cost (rtx x, enum rtx_code outer) { return ((GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)) && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT && GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_INT && (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))) && subreg_lowpart_p (x) && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (x)), GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))))) ? 0 : rtx_cost (x, outer) * 2); } static struct cse_reg_info * get_cse_reg_info (unsigned int regno) { struct cse_reg_info **hash_head = ®_hash[REGHASH_FN (regno)]; struct cse_reg_info *p; for (p = *hash_head; p != NULL; p = p->hash_next) if (p->regno == regno) break; if (p == NULL) { /* Get a new cse_reg_info structure. */ if (cse_reg_info_free_list) { p = cse_reg_info_free_list; cse_reg_info_free_list = p->next; } else p = xmalloc (sizeof (struct cse_reg_info)); /* Insert into hash table. */ p->hash_next = *hash_head; *hash_head = p; /* Initialize it. */ p->reg_tick = 1; p->reg_in_table = -1; p->subreg_ticked = -1; p->reg_qty = regno; p->regno = regno; p->next = cse_reg_info_used_list; cse_reg_info_used_list = p; if (!cse_reg_info_used_list_end) cse_reg_info_used_list_end = p; } /* Cache this lookup; we tend to be looking up information about the same register several times in a row. */ cached_regno = regno; cached_cse_reg_info = p; return p; } /* Clear the hash table and initialize each register with its own quantity, for a new basic block. */ static void new_basic_block (void) { int i; next_qty = max_reg; /* Clear out hash table state for this pass. */ memset (reg_hash, 0, sizeof reg_hash); if (cse_reg_info_used_list) { cse_reg_info_used_list_end->next = cse_reg_info_free_list; cse_reg_info_free_list = cse_reg_info_used_list; cse_reg_info_used_list = cse_reg_info_used_list_end = 0; } cached_cse_reg_info = 0; CLEAR_HARD_REG_SET (hard_regs_in_table); /* The per-quantity values used to be initialized here, but it is much faster to initialize each as it is made in `make_new_qty'. */ for (i = 0; i < CSE_HASH_SIZE; i++) { struct table_elt *first; first = equiv_table[i]; if (first != NULL) { struct table_elt *last = first; equiv_table[i] = NULL; while (last->next_same_hash != NULL) last = last->next_same_hash; /* Now relink this hash entire chain into the free element list. */ last->next_same_hash = free_element_chain; free_element_chain = first; } } #ifdef HAVE_cc0 prev_insn = 0; prev_insn_cc0 = 0; #endif } /* Say that register REG contains a quantity in mode MODE not in any register before and initialize that quantity. */ static void make_new_qty (unsigned int reg, enum machine_mode mode) { int q; struct qty_table_elem *ent; struct reg_eqv_elem *eqv; if (next_qty >= max_qty) abort (); q = REG_QTY (reg) = next_qty++; ent = &qty_table[q]; ent->first_reg = reg; ent->last_reg = reg; ent->mode = mode; ent->const_rtx = ent->const_insn = NULL_RTX; ent->comparison_code = UNKNOWN; eqv = ®_eqv_table[reg]; eqv->next = eqv->prev = -1; } /* Make reg NEW equivalent to reg OLD. OLD is not changing; NEW is. */ static void make_regs_eqv (unsigned int new, unsigned int old) { unsigned int lastr, firstr; int q = REG_QTY (old); struct qty_table_elem *ent; ent = &qty_table[q]; /* Nothing should become eqv until it has a "non-invalid" qty number. */ if (! REGNO_QTY_VALID_P (old)) abort (); REG_QTY (new) = q; firstr = ent->first_reg; lastr = ent->last_reg; /* Prefer fixed hard registers to anything. Prefer pseudo regs to other hard regs. Among pseudos, if NEW will live longer than any other reg of the same qty, and that is beyond the current basic block, make it the new canonical replacement for this qty. */ if (! (firstr < FIRST_PSEUDO_REGISTER && FIXED_REGNO_P (firstr)) /* Certain fixed registers might be of the class NO_REGS. This means that not only can they not be allocated by the compiler, but they cannot be used in substitutions or canonicalizations either. */ && (new >= FIRST_PSEUDO_REGISTER || REGNO_REG_CLASS (new) != NO_REGS) && ((new < FIRST_PSEUDO_REGISTER && FIXED_REGNO_P (new)) || (new >= FIRST_PSEUDO_REGISTER && (firstr < FIRST_PSEUDO_REGISTER || ((uid_cuid[REGNO_LAST_UID (new)] > cse_basic_block_end || (uid_cuid[REGNO_FIRST_UID (new)] < cse_basic_block_start)) && (uid_cuid[REGNO_LAST_UID (new)] > uid_cuid[REGNO_LAST_UID (firstr)])))))) { reg_eqv_table[firstr].prev = new; reg_eqv_table[new].next = firstr; reg_eqv_table[new].prev = -1; ent->first_reg = new; } else { /* If NEW is a hard reg (known to be non-fixed), insert at end. Otherwise, insert before any non-fixed hard regs that are at the end. Registers of class NO_REGS cannot be used as an equivalent for anything. */ while (lastr < FIRST_PSEUDO_REGISTER && reg_eqv_table[lastr].prev >= 0 && (REGNO_REG_CLASS (lastr) == NO_REGS || ! FIXED_REGNO_P (lastr)) && new >= FIRST_PSEUDO_REGISTER) lastr = reg_eqv_table[lastr].prev; reg_eqv_table[new].next = reg_eqv_table[lastr].next; if (reg_eqv_table[lastr].next >= 0) reg_eqv_table[reg_eqv_table[lastr].next].prev = new; else qty_table[q].last_reg = new; reg_eqv_table[lastr].next = new; reg_eqv_table[new].prev = lastr; } } /* Remove REG from its equivalence class. */ static void delete_reg_equiv (unsigned int reg) { struct qty_table_elem *ent; int q = REG_QTY (reg); int p, n; /* If invalid, do nothing. */ if (q == (int) reg) return; ent = &qty_table[q]; p = reg_eqv_table[reg].prev; n = reg_eqv_table[reg].next; if (n != -1) reg_eqv_table[n].prev = p; else ent->last_reg = p; if (p != -1) reg_eqv_table[p].next = n; else ent->first_reg = n; REG_QTY (reg) = reg; } /* Remove any invalid expressions from the hash table that refer to any of the registers contained in expression X. Make sure that newly inserted references to those registers as subexpressions will be considered valid. mention_regs is not called when a register itself is being stored in the table. Return 1 if we have done something that may have changed the hash code of X. */ static int mention_regs (rtx x) { enum rtx_code code; int i, j; const char *fmt; int changed = 0; if (x == 0) return 0; code = GET_CODE (x); if (code == REG) { unsigned int regno = REGNO (x); unsigned int endregno = regno + (regno >= FIRST_PSEUDO_REGISTER ? 1 : hard_regno_nregs[regno][GET_MODE (x)]); unsigned int i; for (i = regno; i < endregno; i++) { if (REG_IN_TABLE (i) >= 0 && REG_IN_TABLE (i) != REG_TICK (i)) remove_invalid_refs (i); REG_IN_TABLE (i) = REG_TICK (i); SUBREG_TICKED (i) = -1; } return 0; } /* If this is a SUBREG, we don't want to discard other SUBREGs of the same pseudo if they don't use overlapping words. We handle only pseudos here for simplicity. */ if (code == SUBREG && REG_P (SUBREG_REG (x)) && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER) { unsigned int i = REGNO (SUBREG_REG (x)); if (REG_IN_TABLE (i) >= 0 && REG_IN_TABLE (i) != REG_TICK (i)) { /* If REG_IN_TABLE (i) differs from REG_TICK (i) by one, and the last store to this register really stored into this subreg, then remove the memory of this subreg. Otherwise, remove any memory of the entire register and all its subregs from the table. */ if (REG_TICK (i) - REG_IN_TABLE (i) > 1 || SUBREG_TICKED (i) != REGNO (SUBREG_REG (x))) remove_invalid_refs (i); else remove_invalid_subreg_refs (i, SUBREG_BYTE (x), GET_MODE (x)); } REG_IN_TABLE (i) = REG_TICK (i); SUBREG_TICKED (i) = REGNO (SUBREG_REG (x)); return 0; } /* If X is a comparison or a COMPARE and either operand is a register that does not have a quantity, give it one. This is so that a later call to record_jump_equiv won't cause X to be assigned a different hash code and not found in the table after that call. It is not necessary to do this here, since rehash_using_reg can fix up the table later, but doing this here eliminates the need to call that expensive function in the most common case where the only use of the register is in the comparison. */ if (code == COMPARE || COMPARISON_P (x)) { if (REG_P (XEXP (x, 0)) && ! REGNO_QTY_VALID_P (REGNO (XEXP (x, 0)))) if (insert_regs (XEXP (x, 0), NULL, 0)) { rehash_using_reg (XEXP (x, 0)); changed = 1; } if (REG_P (XEXP (x, 1)) && ! REGNO_QTY_VALID_P (REGNO (XEXP (x, 1)))) if (insert_regs (XEXP (x, 1), NULL, 0)) { rehash_using_reg (XEXP (x, 1)); changed = 1; } } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) if (fmt[i] == 'e') changed |= mention_regs (XEXP (x, i)); else if (fmt[i] == 'E') for (j = 0; j < XVECLEN (x, i); j++) changed |= mention_regs (XVECEXP (x, i, j)); return changed; } /* Update the register quantities for inserting X into the hash table with a value equivalent to CLASSP. (If the class does not contain a REG, it is irrelevant.) If MODIFIED is nonzero, X is a destination; it is being modified. Note that delete_reg_equiv should be called on a register before insert_regs is done on that register with MODIFIED != 0. Nonzero value means that elements of reg_qty have changed so X's hash code may be different. */ static int insert_regs (rtx x, struct table_elt *classp, int modified) { if (REG_P (x)) { unsigned int regno = REGNO (x); int qty_valid; /* If REGNO is in the equivalence table already but is of the wrong mode for that equivalence, don't do anything here. */ qty_valid = REGNO_QTY_VALID_P (regno); if (qty_valid) { struct qty_table_elem *ent = &qty_table[REG_QTY (regno)]; if (ent->mode != GET_MODE (x)) return 0; } if (modified || ! qty_valid) { if (classp) for (classp = classp->first_same_value; classp != 0; classp = classp->next_same_value) if (REG_P (classp->exp) && GET_MODE (classp->exp) == GET_MODE (x)) { make_regs_eqv (regno, REGNO (classp->exp)); return 1; } /* Mention_regs for a SUBREG checks if REG_TICK is exactly one larger than REG_IN_TABLE to find out if there was only a single preceding invalidation - for the SUBREG - or another one, which would be for the full register. However, if we find here that REG_TICK indicates that the register is invalid, it means that it has been invalidated in a separate operation. The SUBREG might be used now (then this is a recursive call), or we might use the full REG now and a SUBREG of it later. So bump up REG_TICK so that mention_regs will do the right thing. */ if (! modified && REG_IN_TABLE (regno) >= 0 && REG_TICK (regno) == REG_IN_TABLE (regno) + 1) REG_TICK (regno)++; make_new_qty (regno, GET_MODE (x)); return 1; } return 0; } /* If X is a SUBREG, we will likely be inserting the inner register in the table. If that register doesn't have an assigned quantity number at this point but does later, the insertion that we will be doing now will not be accessible because its hash code will have changed. So assign a quantity number now. */ else if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)) && ! REGNO_QTY_VALID_P (REGNO (SUBREG_REG (x)))) { insert_regs (SUBREG_REG (x), NULL, 0); mention_regs (x); return 1; } else return mention_regs (x); } /* Look in or update the hash table. */ /* Remove table element ELT from use in the table. HASH is its hash code, made using the HASH macro. It's an argument because often that is known in advance and we save much time not recomputing it. */ static void remove_from_table (struct table_elt *elt, unsigned int hash) { if (elt == 0) return; /* Mark this element as removed. See cse_insn. */ elt->first_same_value = 0; /* Remove the table element from its equivalence class. */ { struct table_elt *prev = elt->prev_same_value; struct table_elt *next = elt->next_same_value; if (next) next->prev_same_value = prev; if (prev) prev->next_same_value = next; else { struct table_elt *newfirst = next; while (next) { next->first_same_value = newfirst; next = next->next_same_value; } } } /* Remove the table element from its hash bucket. */ { struct table_elt *prev = elt->prev_same_hash; struct table_elt *next = elt->next_same_hash; if (next) next->prev_same_hash = prev; if (prev) prev->next_same_hash = next; else if (equiv_table[hash] == elt) equiv_table[hash] = next; else { /* This entry is not in the proper hash bucket. This can happen when two classes were merged by `merge_equiv_classes'. Search for the hash bucket that it heads. This happens only very rarely, so the cost is acceptable. */ for (hash = 0; hash < CSE_HASH_SIZE; hash++) if (equiv_table[hash] == elt) equiv_table[hash] = next; } } /* Remove the table element from its related-value circular chain. */ if (elt->related_value != 0 && elt->related_value != elt) { struct table_elt *p = elt->related_value; while (p->related_value != elt) p = p->related_value; p->related_value = elt->related_value; if (p->related_value == p) p->related_value = 0; } /* Now add it to the free element chain. */ elt->next_same_hash = free_element_chain; free_element_chain = elt; } /* Look up X in the hash table and return its table element, or 0 if X is not in the table. MODE is the machine-mode of X, or if X is an integer constant with VOIDmode then MODE is the mode with which X will be used. Here we are satisfied to find an expression whose tree structure looks like X. */ static struct table_elt * lookup (rtx x, unsigned int hash, enum machine_mode mode) { struct table_elt *p; for (p = equiv_table[hash]; p; p = p->next_same_hash) if (mode == p->mode && ((x == p->exp && REG_P (x)) || exp_equiv_p (x, p->exp, !REG_P (x), 0))) return p; return 0; } /* Like `lookup' but don't care whether the table element uses invalid regs. Also ignore discrepancies in the machine mode of a register. */ static struct table_elt * lookup_for_remove (rtx x, unsigned int hash, enum machine_mode mode) { struct table_elt *p; if (REG_P (x)) { unsigned int regno = REGNO (x); /* Don't check the machine mode when comparing registers; invalidating (REG:SI 0) also invalidates (REG:DF 0). */ for (p = equiv_table[hash]; p; p = p->next_same_hash) if (REG_P (p->exp) && REGNO (p->exp) == regno) return p; } else { for (p = equiv_table[hash]; p; p = p->next_same_hash) if (mode == p->mode && (x == p->exp || exp_equiv_p (x, p->exp, 0, 0))) return p; } return 0; } /* Look for an expression equivalent to X and with code CODE. If one is found, return that expression. */ static rtx lookup_as_function (rtx x, enum rtx_code code) { struct table_elt *p = lookup (x, safe_hash (x, VOIDmode) & HASH_MASK, GET_MODE (x)); /* If we are looking for a CONST_INT, the mode doesn't really matter, as long as we are narrowing. So if we looked in vain for a mode narrower than word_mode before, look for word_mode now. */ if (p == 0 && code == CONST_INT && GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (word_mode)) { x = copy_rtx (x); PUT_MODE (x, word_mode); p = lookup (x, safe_hash (x, VOIDmode) & HASH_MASK, word_mode); } if (p == 0) return 0; for (p = p->first_same_value; p; p = p->next_same_value) if (GET_CODE (p->exp) == code /* Make sure this is a valid entry in the table. */ && exp_equiv_p (p->exp, p->exp, 1, 0)) return p->exp; return 0; } /* Insert X in the hash table, assuming HASH is its hash code and CLASSP is an element of the class it should go in (or 0 if a new class should be made). It is inserted at the proper position to keep the class in the order cheapest first. MODE is the machine-mode of X, or if X is an integer constant with VOIDmode then MODE is the mode with which X will be used. For elements of equal cheapness, the most recent one goes in front, except that the first element in the list remains first unless a cheaper element is added. The order of pseudo-registers does not matter, as canon_reg will be called to find the cheapest when a register is retrieved from the table. The in_memory field in the hash table element is set to 0. The caller must set it nonzero if appropriate. You should call insert_regs (X, CLASSP, MODIFY) before calling here, and if insert_regs returns a nonzero value you must then recompute its hash code before calling here. If necessary, update table showing constant values of quantities. */ #define CHEAPER(X, Y) \ (preferable ((X)->cost, (X)->regcost, (Y)->cost, (Y)->regcost) < 0) static struct table_elt * hash_insert (rtx x, struct table_elt *classp, unsigned int hash, enum machine_mode mode) { struct table_elt *elt; /* If X is a register and we haven't made a quantity for it, something is wrong. */ if (REG_P (x) && ! REGNO_QTY_VALID_P (REGNO (x))) abort (); /* If X is a hard register, show it is being put in the table. */ if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER) { unsigned int regno = REGNO (x); unsigned int endregno = regno + hard_regno_nregs[regno][GET_MODE (x)]; unsigned int i; for (i = regno; i < endregno; i++) SET_HARD_REG_BIT (hard_regs_in_table, i); } /* Put an element for X into the right hash bucket. */ elt = free_element_chain; if (elt) free_element_chain = elt->next_same_hash; else { n_elements_made++; elt = xmalloc (sizeof (struct table_elt)); } elt->exp = x; elt->canon_exp = NULL_RTX; elt->cost = COST (x); elt->regcost = approx_reg_cost (x); elt->next_same_value = 0; elt->prev_same_value = 0; elt->next_same_hash = equiv_table[hash]; elt->prev_same_hash = 0; elt->related_value = 0; elt->in_memory = 0; elt->mode = mode; elt->is_const = (CONSTANT_P (x) /* GNU C++ takes advantage of this for `this' (and other const values). */ || (REG_P (x) && RTX_UNCHANGING_P (x) && REGNO (x) >= FIRST_PSEUDO_REGISTER) || fixed_base_plus_p (x)); if (equiv_table[hash]) equiv_table[hash]->prev_same_hash = elt; equiv_table[hash] = elt; /* Put it into the proper value-class. */ if (classp) { classp = classp->first_same_value; if (CHEAPER (elt, classp)) /* Insert at the head of the class. */ { struct table_elt *p; elt->next_same_value = classp; classp->prev_same_value = elt; elt->first_same_value = elt; for (p = classp; p; p = p->next_same_value) p->first_same_value = elt; } else { /* Insert not at head of the class. */ /* Put it after the last element cheaper than X. */ struct table_elt *p, *next; for (p = classp; (next = p->next_same_value) && CHEAPER (next, elt); p = next); /* Put it after P and before NEXT. */ elt->next_same_value = next; if (next) next->prev_same_value = elt; elt->prev_same_value = p; p->next_same_value = elt; elt->first_same_value = classp; } } else elt->first_same_value = elt; /* If this is a constant being set equivalent to a register or a register being set equivalent to a constant, note the constant equivalence. If this is a constant, it cannot be equivalent to a different constant, and a constant is the only thing that can be cheaper than a register. So we know the register is the head of the class (before the constant was inserted). If this is a register that is not already known equivalent to a constant, we must check the entire class. If this is a register that is already known equivalent to an insn, update the qtys `const_insn' to show that `this_insn' is the latest insn making that quantity equivalent to the constant. */ if (elt->is_const && classp && REG_P (classp->exp) && !REG_P (x)) { int exp_q = REG_QTY (REGNO (classp->exp)); struct qty_table_elem *exp_ent = &qty_table[exp_q]; exp_ent->const_rtx = gen_lowpart (exp_ent->mode, x); exp_ent->const_insn = this_insn; } else if (REG_P (x) && classp && ! qty_table[REG_QTY (REGNO (x))].const_rtx && ! elt->is_const) { struct table_elt *p; for (p = classp; p != 0; p = p->next_same_value) { if (p->is_const && !REG_P (p->exp)) { int x_q = REG_QTY (REGNO (x)); struct qty_table_elem *x_ent = &qty_table[x_q]; x_ent->const_rtx = gen_lowpart (GET_MODE (x), p->exp); x_ent->const_insn = this_insn; break; } } } else if (REG_P (x) && qty_table[REG_QTY (REGNO (x))].const_rtx && GET_MODE (x) == qty_table[REG_QTY (REGNO (x))].mode) qty_table[REG_QTY (REGNO (x))].const_insn = this_insn; /* If this is a constant with symbolic value, and it has a term with an explicit integer value, link it up with related expressions. */ if (GET_CODE (x) == CONST) { rtx subexp = get_related_value (x); unsigned subhash; struct table_elt *subelt, *subelt_prev; if (subexp != 0) { /* Get the integer-free subexpression in the hash table. */ subhash = safe_hash (subexp, mode) & HASH_MASK; subelt = lookup (subexp, subhash, mode); if (subelt == 0) subelt = hash_insert (subexp, NULL, subhash, mode); /* Initialize SUBELT's circular chain if it has none. */ if (subelt->related_value == 0) subelt->related_value = subelt; /* Find the element in the circular chain that precedes SUBELT. */ subelt_prev = subelt; while (subelt_prev->related_value != subelt) subelt_prev = subelt_prev->related_value; /* Put new ELT into SUBELT's circular chain just before SUBELT. This way the element that follows SUBELT is the oldest one. */ elt->related_value = subelt_prev->related_value; subelt_prev->related_value = elt; } } return elt; } /* Given two equivalence classes, CLASS1 and CLASS2, put all the entries from CLASS2 into CLASS1. This is done when we have reached an insn which makes the two classes equivalent. CLASS1 will be the surviving class; CLASS2 should not be used after this call. Any invalid entries in CLASS2 will not be copied. */ static void merge_equiv_classes (struct table_elt *class1, struct table_elt *class2) { struct table_elt *elt, *next, *new; /* Ensure we start with the head of the classes. */ class1 = class1->first_same_value; class2 = class2->first_same_value; /* If they were already equal, forget it. */ if (class1 == class2) return; for (elt = class2; elt; elt = next) { unsigned int hash; rtx exp = elt->exp; enum machine_mode mode = elt->mode; next = elt->next_same_value; /* Remove old entry, make a new one in CLASS1's class. Don't do this for invalid entries as we cannot find their hash code (it also isn't necessary). */ if (REG_P (exp) || exp_equiv_p (exp, exp, 1, 0)) { bool need_rehash = false; hash_arg_in_memory = 0; hash = HASH (exp, mode); if (REG_P (exp)) { need_rehash = (unsigned) REG_QTY (REGNO (exp)) != REGNO (exp); delete_reg_equiv (REGNO (exp)); } remove_from_table (elt, hash); if (insert_regs (exp, class1, 0) || need_rehash) { rehash_using_reg (exp); hash = HASH (exp, mode); } new = hash_insert (exp, class1, hash, mode); new->in_memory = hash_arg_in_memory; } } } /* Flush the entire hash table. */ static void flush_hash_table (void) { int i; struct table_elt *p; for (i = 0; i < CSE_HASH_SIZE; i++) for (p = equiv_table[i]; p; p = equiv_table[i]) { /* Note that invalidate can remove elements after P in the current hash chain. */ if (REG_P (p->exp)) invalidate (p->exp, p->mode); else remove_from_table (p, i); } } /* Function called for each rtx to check whether true dependence exist. */ struct check_dependence_data { enum machine_mode mode; rtx exp; rtx addr; }; static int check_dependence (rtx *x, void *data) { struct check_dependence_data *d = (struct check_dependence_data *) data; if (*x && MEM_P (*x)) return canon_true_dependence (d->exp, d->mode, d->addr, *x, cse_rtx_varies_p); else return 0; } /* Remove from the hash table, or mark as invalid, all expressions whose values could be altered by storing in X. X is a register, a subreg, or a memory reference with nonvarying address (because, when a memory reference with a varying address is stored in, all memory references are removed by invalidate_memory so specific invalidation is superfluous). FULL_MODE, if not VOIDmode, indicates that this much should be invalidated instead of just the amount indicated by the mode of X. This is only used for bitfield stores into memory. A nonvarying address may be just a register or just a symbol reference, or it may be either of those plus a numeric offset. */ static void invalidate (rtx x, enum machine_mode full_mode) { int i; struct table_elt *p; rtx addr; switch (GET_CODE (x)) { case REG: { /* If X is a register, dependencies on its contents are recorded through the qty number mechanism. Just change the qty number of the register, mark it as invalid for expressions that refer to it, and remove it itself. */ unsigned int regno = REGNO (x); unsigned int hash = HASH (x, GET_MODE (x)); /* Remove REGNO from any quantity list it might be on and indicate that its value might have changed. If it is a pseudo, remove its entry from the hash table. For a hard register, we do the first two actions above for any additional hard registers corresponding to X. Then, if any of these registers are in the table, we must remove any REG entries that overlap these registers. */ delete_reg_equiv (regno); REG_TICK (regno)++; SUBREG_TICKED (regno) = -1; if (regno >= FIRST_PSEUDO_REGISTER) { /* Because a register can be referenced in more than one mode, we might have to remove more than one table entry. */ struct table_elt *elt; while ((elt = lookup_for_remove (x, hash, GET_MODE (x)))) remove_from_table (elt, hash); } else { HOST_WIDE_INT in_table = TEST_HARD_REG_BIT (hard_regs_in_table, regno); unsigned int endregno = regno + hard_regno_nregs[regno][GET_MODE (x)]; unsigned int tregno, tendregno, rn; struct table_elt *p, *next; CLEAR_HARD_REG_BIT (hard_regs_in_table, regno); for (rn = regno + 1; rn < endregno; rn++) { in_table |= TEST_HARD_REG_BIT (hard_regs_in_table, rn); CLEAR_HARD_REG_BIT (hard_regs_in_table, rn); delete_reg_equiv (rn); REG_TICK (rn)++; SUBREG_TICKED (rn) = -1; } if (in_table) for (hash = 0; hash < CSE_HASH_SIZE; hash++) for (p = equiv_table[hash]; p; p = next) { next = p->next_same_hash; if (!REG_P (p->exp) || REGNO (p->exp) >= FIRST_PSEUDO_REGISTER) continue; tregno = REGNO (p->exp); tendregno = tregno + hard_regno_nregs[tregno][GET_MODE (p->exp)]; if (tendregno > regno && tregno < endregno) remove_from_table (p, hash); } } } return; case SUBREG: invalidate (SUBREG_REG (x), VOIDmode); return; case PARALLEL: for (i = XVECLEN (x, 0) - 1; i >= 0; --i) invalidate (XVECEXP (x, 0, i), VOIDmode); return; case EXPR_LIST: /* This is part of a disjoint return value; extract the location in question ignoring the offset. */ invalidate (XEXP (x, 0), VOIDmode); return; case MEM: addr = canon_rtx (get_addr (XEXP (x, 0))); /* Calculate the canonical version of X here so that true_dependence doesn't generate new RTL for X on each call. */ x = canon_rtx (x); /* Remove all hash table elements that refer to overlapping pieces of memory. */ if (full_mode == VOIDmode) full_mode = GET_MODE (x); for (i = 0; i < CSE_HASH_SIZE; i++) { struct table_elt *next; for (p = equiv_table[i]; p; p = next) { next = p->next_same_hash; if (p->in_memory) { struct check_dependence_data d; /* Just canonicalize the expression once; otherwise each time we call invalidate true_dependence will canonicalize the expression again. */ if (!p->canon_exp) p->canon_exp = canon_rtx (p->exp); d.exp = x; d.addr = addr; d.mode = full_mode; if (for_each_rtx (&p->canon_exp, check_dependence, &d)) remove_from_table (p, i); } } } return; default: abort (); } } /* Remove all expressions that refer to register REGNO, since they are already invalid, and we are about to mark that register valid again and don't want the old expressions to reappear as valid. */ static void remove_invalid_refs (unsigned int regno) { unsigned int i; struct table_elt *p, *next; for (i = 0; i < CSE_HASH_SIZE; i++) for (p = equiv_table[i]; p; p = next) { next = p->next_same_hash; if (!REG_P (p->exp) && refers_to_regno_p (regno, regno + 1, p->exp, (rtx *) 0)) remove_from_table (p, i); } } /* Likewise for a subreg with subreg_reg REGNO, subreg_byte OFFSET, and mode MODE. */ static void remove_invalid_subreg_refs (unsigned int regno, unsigned int offset, enum machine_mode mode) { unsigned int i; struct table_elt *p, *next; unsigned int end = offset + (GET_MODE_SIZE (mode) - 1); for (i = 0; i < CSE_HASH_SIZE; i++) for (p = equiv_table[i]; p; p = next) { rtx exp = p->exp; next = p->next_same_hash; if (!REG_P (exp) && (GET_CODE (exp) != SUBREG || !REG_P (SUBREG_REG (exp)) || REGNO (SUBREG_REG (exp)) != regno || (((SUBREG_BYTE (exp) + (GET_MODE_SIZE (GET_MODE (exp)) - 1)) >= offset) && SUBREG_BYTE (exp) <= end)) && refers_to_regno_p (regno, regno + 1, p->exp, (rtx *) 0)) remove_from_table (p, i); } } /* Recompute the hash codes of any valid entries in the hash table that reference X, if X is a register, or SUBREG_REG (X) if X is a SUBREG. This is called when we make a jump equivalence. */ static void rehash_using_reg (rtx x) { unsigned int i; struct table_elt *p, *next; unsigned hash; if (GET_CODE (x) == SUBREG) x = SUBREG_REG (x); /* If X is not a register or if the register is known not to be in any valid entries in the table, we have no work to do. */ if (!REG_P (x) || REG_IN_TABLE (REGNO (x)) < 0 || REG_IN_TABLE (REGNO (x)) != REG_TICK (REGNO (x))) return; /* Scan all hash chains looking for valid entries that mention X. If we find one and it is in the wrong hash chain, move it. */ for (i = 0; i < CSE_HASH_SIZE; i++) for (p = equiv_table[i]; p; p = next) { next = p->next_same_hash; if (reg_mentioned_p (x, p->exp) && exp_equiv_p (p->exp, p->exp, 1, 0) && i != (hash = safe_hash (p->exp, p->mode) & HASH_MASK)) { if (p->next_same_hash) p->next_same_hash->prev_same_hash = p->prev_same_hash; if (p->prev_same_hash) p->prev_same_hash->next_same_hash = p->next_same_hash; else equiv_table[i] = p->next_same_hash; p->next_same_hash = equiv_table[hash]; p->prev_same_hash = 0; if (equiv_table[hash]) equiv_table[hash]->prev_same_hash = p; equiv_table[hash] = p; } } } /* Remove from the hash table any expression that is a call-clobbered register. Also update their TICK values. */ static void invalidate_for_call (void) { unsigned int regno, endregno; unsigned int i; unsigned hash; struct table_elt *p, *next; int in_table = 0; /* Go through all the hard registers. For each that is clobbered in a CALL_INSN, remove the register from quantity chains and update reg_tick if defined. Also see if any of these registers is currently in the table. */ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno)) { delete_reg_equiv (regno); if (REG_TICK (regno) >= 0) { REG_TICK (regno)++; SUBREG_TICKED (regno) = -1; } in_table |= (TEST_HARD_REG_BIT (hard_regs_in_table, regno) != 0); } /* In the case where we have no call-clobbered hard registers in the table, we are done. Otherwise, scan the table and remove any entry that overlaps a call-clobbered register. */ if (in_table) for (hash = 0; hash < CSE_HASH_SIZE; hash++) for (p = equiv_table[hash]; p; p = next) { next = p->next_same_hash; if (!REG_P (p->exp) || REGNO (p->exp) >= FIRST_PSEUDO_REGISTER) continue; regno = REGNO (p->exp); endregno = regno + hard_regno_nregs[regno][GET_MODE (p->exp)]; for (i = regno; i < endregno; i++) if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)) { remove_from_table (p, hash); break; } } } /* Given an expression X of type CONST, and ELT which is its table entry (or 0 if it is not in the hash table), return an alternate expression for X as a register plus integer. If none can be found, return 0. */ static rtx use_related_value (rtx x, struct table_elt *elt) { struct table_elt *relt = 0; struct table_elt *p, *q; HOST_WIDE_INT offset; /* First, is there anything related known? If we have a table element, we can tell from that. Otherwise, must look it up. */ if (elt != 0 && elt->related_value != 0) relt = elt; else if (elt == 0 && GET_CODE (x) == CONST) { rtx subexp = get_related_value (x); if (subexp != 0) relt = lookup (subexp, safe_hash (subexp, GET_MODE (subexp)) & HASH_MASK, GET_MODE (subexp)); } if (relt == 0) return 0; /* Search all related table entries for one that has an equivalent register. */ p = relt; while (1) { /* This loop is strange in that it is executed in two different cases. The first is when X is already in the table. Then it is searching the RELATED_VALUE list of X's class (RELT). The second case is when X is not in the table. Then RELT points to a class for the related value. Ensure that, whatever case we are in, that we ignore classes that have the same value as X. */ if (rtx_equal_p (x, p->exp)) q = 0; else for (q = p->first_same_value; q; q = q->next_same_value) if (REG_P (q->exp)) break; if (q) break; p = p->related_value; /* We went all the way around, so there is nothing to be found. Alternatively, perhaps RELT was in the table for some other reason and it has no related values recorded. */ if (p == relt || p == 0) break; } if (q == 0) return 0; offset = (get_integer_term (x) - get_integer_term (p->exp)); /* Note: OFFSET may be 0 if P->xexp and X are related by commutativity. */ return plus_constant (q->exp, offset); } /* Hash a string. Just add its bytes up. */ static inline unsigned canon_hash_string (const char *ps) { unsigned hash = 0; const unsigned char *p = (const unsigned char *) ps; if (p) while (*p) hash += *p++; return hash; } /* Hash an rtx. We are careful to make sure the value is never negative. Equivalent registers hash identically. MODE is used in hashing for CONST_INTs only; otherwise the mode of X is used. Store 1 in do_not_record if any subexpression is volatile. Store 1 in hash_arg_in_memory if X contains a MEM rtx which does not have the RTX_UNCHANGING_P bit set. Note that cse_insn knows that the hash code of a MEM expression is just (int) MEM plus the hash code of the address. */ static unsigned canon_hash (rtx x, enum machine_mode mode) { int i, j; unsigned hash = 0; enum rtx_code code; const char *fmt; /* repeat is used to turn tail-recursion into iteration. */ repeat: if (x == 0) return hash; code = GET_CODE (x); switch (code) { case REG: { unsigned int regno = REGNO (x); bool record; /* On some machines, we can't record any non-fixed hard register, because extending its life will cause reload problems. We consider ap, fp, sp, gp to be fixed for this purpose. We also consider CCmode registers to be fixed for this purpose; failure to do so leads to failure to simplify 0<100 type of conditionals. On all machines, we can't record any global registers. Nor should we record any register that is in a small class, as defined by CLASS_LIKELY_SPILLED_P. */ if (regno >= FIRST_PSEUDO_REGISTER) record = true; else if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx || x == arg_pointer_rtx || x == stack_pointer_rtx || x == pic_offset_table_rtx) record = true; else if (global_regs[regno]) record = false; else if (fixed_regs[regno]) record = true; else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC) record = true; else if (SMALL_REGISTER_CLASSES) record = false; else if (CLASS_LIKELY_SPILLED_P (REGNO_REG_CLASS (regno))) record = false; else record = true; if (!record) { do_not_record = 1; return 0; } hash += ((unsigned) REG << 7) + (unsigned) REG_QTY (regno); return hash; } /* We handle SUBREG of a REG specially because the underlying reg changes its hash value with every value change; we don't want to have to forget unrelated subregs when one subreg changes. */ case SUBREG: { if (REG_P (SUBREG_REG (x))) { hash += (((unsigned) SUBREG << 7) + REGNO (SUBREG_REG (x)) + (SUBREG_BYTE (x) / UNITS_PER_WORD)); return hash; } break; } case CONST_INT: { unsigned HOST_WIDE_INT tem = INTVAL (x); hash += ((unsigned) CONST_INT << 7) + (unsigned) mode + tem; return hash; } case CONST_DOUBLE: /* This is like the general case, except that it only counts the integers representing the constant. */ hash += (unsigned) code + (unsigned) GET_MODE (x); if (GET_MODE (x) != VOIDmode) hash += real_hash (CONST_DOUBLE_REAL_VALUE (x)); else hash += ((unsigned) CONST_DOUBLE_LOW (x) + (unsigned) CONST_DOUBLE_HIGH (x)); return hash; case CONST_VECTOR: { int units; rtx elt; units = CONST_VECTOR_NUNITS (x); for (i = 0; i < units; ++i) { elt = CONST_VECTOR_ELT (x, i); hash += canon_hash (elt, GET_MODE (elt)); } return hash; } /* Assume there is only one rtx object for any given label. */ case LABEL_REF: hash += ((unsigned) LABEL_REF << 7) + (unsigned long) XEXP (x, 0); return hash; case SYMBOL_REF: hash += ((unsigned) SYMBOL_REF << 7) + (unsigned long) XSTR (x, 0); return hash; case MEM: /* We don't record if marked volatile or if BLKmode since we don't know the size of the move. */ if (MEM_VOLATILE_P (x) || GET_MODE (x) == BLKmode) { do_not_record = 1; return 0; } if (! RTX_UNCHANGING_P (x) || fixed_base_plus_p (XEXP (x, 0))) hash_arg_in_memory = 1; /* Now that we have already found this special case, might as well speed it up as much as possible. */ hash += (unsigned) MEM; x = XEXP (x, 0); goto repeat; case USE: /* A USE that mentions non-volatile memory needs special handling since the MEM may be BLKmode which normally prevents an entry from being made. Pure calls are marked by a USE which mentions BLKmode memory. */ if (MEM_P (XEXP (x, 0)) && ! MEM_VOLATILE_P (XEXP (x, 0))) { hash += (unsigned) USE; x = XEXP (x, 0); if (! RTX_UNCHANGING_P (x) || fixed_base_plus_p (XEXP (x, 0))) hash_arg_in_memory = 1; /* Now that we have already found this special case, might as well speed it up as much as possible. */ hash += (unsigned) MEM; x = XEXP (x, 0); goto repeat; } break; case PRE_DEC: case PRE_INC: case POST_DEC: case POST_INC: case PRE_MODIFY: case POST_MODIFY: case PC: case CC0: case CALL: case UNSPEC_VOLATILE: do_not_record = 1; return 0; case ASM_OPERANDS: if (MEM_VOLATILE_P (x)) { do_not_record = 1; return 0; } else { /* We don't want to take the filename and line into account. */ hash += (unsigned) code + (unsigned) GET_MODE (x) + canon_hash_string (ASM_OPERANDS_TEMPLATE (x)) + canon_hash_string (ASM_OPERANDS_OUTPUT_CONSTRAINT (x)) + (unsigned) ASM_OPERANDS_OUTPUT_IDX (x); if (ASM_OPERANDS_INPUT_LENGTH (x)) { for (i = 1; i < ASM_OPERANDS_INPUT_LENGTH (x); i++) { hash += (canon_hash (ASM_OPERANDS_INPUT (x, i), GET_MODE (ASM_OPERANDS_INPUT (x, i))) + canon_hash_string (ASM_OPERANDS_INPUT_CONSTRAINT (x, i))); } hash += canon_hash_string (ASM_OPERANDS_INPUT_CONSTRAINT (x, 0)); x = ASM_OPERANDS_INPUT (x, 0); mode = GET_MODE (x); goto repeat; } return hash; } break; default: break; } i = GET_RTX_LENGTH (code) - 1; hash += (unsigned) code + (unsigned) GET_MODE (x); fmt = GET_RTX_FORMAT (code); for (; i >= 0; i--) { if (fmt[i] == 'e') { rtx tem = XEXP (x, i); /* If we are about to do the last recursive call needed at this level, change it into iteration. This function is called enough to be worth it. */ if (i == 0) { x = tem; goto repeat; } hash += canon_hash (tem, 0); } else if (fmt[i] == 'E') for (j = 0; j < XVECLEN (x, i); j++) hash += canon_hash (XVECEXP (x, i, j), 0); else if (fmt[i] == 's') hash += canon_hash_string (XSTR (x, i)); else if (fmt[i] == 'i') { unsigned tem = XINT (x, i); hash += tem; } else if (fmt[i] == '0' || fmt[i] == 't') /* Unused. */ ; else abort (); } return hash; } /* Like canon_hash but with no side effects. */ static unsigned safe_hash (rtx x, enum machine_mode mode) { int save_do_not_record = do_not_record; int save_hash_arg_in_memory = hash_arg_in_memory; unsigned hash = canon_hash (x, mode); hash_arg_in_memory = save_hash_arg_in_memory; do_not_record = save_do_not_record; return hash; } /* Return 1 iff X and Y would canonicalize into the same thing, without actually constructing the canonicalization of either one. If VALIDATE is nonzero, we assume X is an expression being processed from the rtl and Y was found in the hash table. We check register refs in Y for being marked as valid. If EQUAL_VALUES is nonzero, we allow a register to match a constant value that is known to be in the register. Ordinarily, we don't allow them to match, because letting them match would cause unpredictable results in all the places that search a hash table chain for an equivalent for a given value. A possible equivalent that has different structure has its hash code computed from different data. Whether the hash code is the same as that of the given value is pure luck. */ static int exp_equiv_p (rtx x, rtx y, int validate, int equal_values) { int i, j; enum rtx_code code; const char *fmt; /* Note: it is incorrect to assume an expression is equivalent to itself if VALIDATE is nonzero. */ if (x == y && !validate) return 1; if (x == 0 || y == 0) return x == y; code = GET_CODE (x); if (code != GET_CODE (y)) { if (!equal_values) return 0; /* If X is a constant and Y is a register or vice versa, they may be equivalent. We only have to validate if Y is a register. */ if (CONSTANT_P (x) && REG_P (y) && REGNO_QTY_VALID_P (REGNO (y))) { int y_q = REG_QTY (REGNO (y)); struct qty_table_elem *y_ent = &qty_table[y_q]; if (GET_MODE (y) == y_ent->mode && rtx_equal_p (x, y_ent->const_rtx) && (! validate || REG_IN_TABLE (REGNO (y)) == REG_TICK (REGNO (y)))) return 1; } if (CONSTANT_P (y) && code == REG && REGNO_QTY_VALID_P (REGNO (x))) { int x_q = REG_QTY (REGNO (x)); struct qty_table_elem *x_ent = &qty_table[x_q]; if (GET_MODE (x) == x_ent->mode && rtx_equal_p (y, x_ent->const_rtx)) return 1; } return 0; } /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */ if (GET_MODE (x) != GET_MODE (y)) return 0; switch (code) { case PC: case CC0: case CONST_INT: return x == y; case LABEL_REF: return XEXP (x, 0) == XEXP (y, 0); case SYMBOL_REF: return XSTR (x, 0) == XSTR (y, 0); case REG: { unsigned int regno = REGNO (y); unsigned int endregno = regno + (regno >= FIRST_PSEUDO_REGISTER ? 1 : hard_regno_nregs[regno][GET_MODE (y)]); unsigned int i; /* If the quantities are not the same, the expressions are not equivalent. If there are and we are not to validate, they are equivalent. Otherwise, ensure all regs are up-to-date. */ if (REG_QTY (REGNO (x)) != REG_QTY (regno)) return 0; if (! validate) return 1; for (i = regno; i < endregno; i++) if (REG_IN_TABLE (i) != REG_TICK (i)) return 0; return 1; } /* For commutative operations, check both orders. */ case PLUS: case MULT: case AND: case IOR: case XOR: case NE: case EQ: return ((exp_equiv_p (XEXP (x, 0), XEXP (y, 0), validate, equal_values) && exp_equiv_p (XEXP (x, 1), XEXP (y, 1), validate, equal_values)) || (exp_equiv_p (XEXP (x, 0), XEXP (y, 1), validate, equal_values) && exp_equiv_p (XEXP (x, 1), XEXP (y, 0), validate, equal_values))); case ASM_OPERANDS: /* We don't use the generic code below because we want to disregard filename and line numbers. */ /* A volatile asm isn't equivalent to any other. */ if (MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y)) return 0; if (GET_MODE (x) != GET_MODE (y) || strcmp (ASM_OPERANDS_TEMPLATE (x), ASM_OPERANDS_TEMPLATE (y)) || strcmp (ASM_OPERANDS_OUTPUT_CONSTRAINT (x), ASM_OPERANDS_OUTPUT_CONSTRAINT (y)) || ASM_OPERANDS_OUTPUT_IDX (x) != ASM_OPERANDS_OUTPUT_IDX (y) || ASM_OPERANDS_INPUT_LENGTH (x) != ASM_OPERANDS_INPUT_LENGTH (y)) return 0; if (ASM_OPERANDS_INPUT_LENGTH (x)) { for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--) if (! exp_equiv_p (ASM_OPERANDS_INPUT (x, i), ASM_OPERANDS_INPUT (y, i), validate, equal_values) || strcmp (ASM_OPERANDS_INPUT_CONSTRAINT (x, i), ASM_OPERANDS_INPUT_CONSTRAINT (y, i))) return 0; } return 1; default: break; } /* Compare the elements. If any pair of corresponding elements fail to match, return 0 for the whole things. */ fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { switch (fmt[i]) { case 'e': if (! exp_equiv_p (XEXP (x, i), XEXP (y, i), validate, equal_values)) return 0; break; case 'E': if (XVECLEN (x, i) != XVECLEN (y, i)) return 0; for (j = 0; j < XVECLEN (x, i); j++) if (! exp_equiv_p (XVECEXP (x, i, j), XVECEXP (y, i, j), validate, equal_values)) return 0; break; case 's': if (strcmp (XSTR (x, i), XSTR (y, i))) return 0; break; case 'i': if (XINT (x, i) != XINT (y, i)) return 0; break; case 'w': if (XWINT (x, i) != XWINT (y, i)) return 0; break; case '0': case 't': break; default: abort (); } } return 1; } /* Return 1 if X has a value that can vary even between two executions of the program. 0 means X can be compared reliably against certain constants or near-constants. */ static int cse_rtx_varies_p (rtx x, int from_alias) { /* We need not check for X and the equivalence class being of the same mode because if X is equivalent to a constant in some mode, it doesn't vary in any mode. */ if (REG_P (x) && REGNO_QTY_VALID_P (REGNO (x))) { int x_q = REG_QTY (REGNO (x)); struct qty_table_elem *x_ent = &qty_table[x_q]; if (GET_MODE (x) == x_ent->mode && x_ent->const_rtx != NULL_RTX) return 0; } if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT && REG_P (XEXP (x, 0)) && REGNO_QTY_VALID_P (REGNO (XEXP (x, 0)))) { int x0_q = REG_QTY (REGNO (XEXP (x, 0))); struct qty_table_elem *x0_ent = &qty_table[x0_q]; if ((GET_MODE (XEXP (x, 0)) == x0_ent->mode) && x0_ent->const_rtx != NULL_RTX) return 0; } /* This can happen as the result of virtual register instantiation, if the initial constant is too large to be a valid address. This gives us a three instruction sequence, load large offset into a register, load fp minus a constant into a register, then a MEM which is the sum of the two `constant' registers. */ if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0)) && REG_P (XEXP (x, 1)) && REGNO_QTY_VALID_P (REGNO (XEXP (x, 0))) && REGNO_QTY_VALID_P (REGNO (XEXP (x, 1)))) { int x0_q = REG_QTY (REGNO (XEXP (x, 0))); int x1_q = REG_QTY (REGNO (XEXP (x, 1))); struct qty_table_elem *x0_ent = &qty_table[x0_q]; struct qty_table_elem *x1_ent = &qty_table[x1_q]; if ((GET_MODE (XEXP (x, 0)) == x0_ent->mode) && x0_ent->const_rtx != NULL_RTX && (GET_MODE (XEXP (x, 1)) == x1_ent->mode) && x1_ent->const_rtx != NULL_RTX) return 0; } return rtx_varies_p (x, from_alias); } /* Canonicalize an expression: replace each register reference inside it with the "oldest" equivalent register. If INSN is nonzero and we are replacing a pseudo with a hard register or vice versa, validate_change is used to ensure that INSN remains valid after we make our substitution. The calls are made with IN_GROUP nonzero so apply_change_group must be called upon the outermost return from this function (unless INSN is zero). The result of apply_change_group can generally be discarded since the changes we are making are optional. */ static rtx canon_reg (rtx x, rtx insn) { int i; enum rtx_code code; const char *fmt; if (x == 0) return x; code = GET_CODE (x); switch (code) { case PC: case CC0: case CONST: case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case SYMBOL_REF: case LABEL_REF: case ADDR_VEC: case ADDR_DIFF_VEC: return x; case REG: { int first; int q; struct qty_table_elem *ent; /* Never replace a hard reg, because hard regs can appear in more than one machine mode, and we must preserve the mode of each occurrence. Also, some hard regs appear in MEMs that are shared and mustn't be altered. Don't try to replace any reg that maps to a reg of class NO_REGS. */ if (REGNO (x) < FIRST_PSEUDO_REGISTER || ! REGNO_QTY_VALID_P (REGNO (x))) return x; q = REG_QTY (REGNO (x)); ent = &qty_table[q]; first = ent->first_reg; return (first >= FIRST_PSEUDO_REGISTER ? regno_reg_rtx[first] : REGNO_REG_CLASS (first) == NO_REGS ? x : gen_rtx_REG (ent->mode, first)); } default: break; } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { int j; if (fmt[i] == 'e') { rtx new = canon_reg (XEXP (x, i), insn); int insn_code; /* If replacing pseudo with hard reg or vice versa, ensure the insn remains valid. Likewise if the insn has MATCH_DUPs. */ if (insn != 0 && new != 0 && REG_P (new) && REG_P (XEXP (x, i)) && (((REGNO (new) < FIRST_PSEUDO_REGISTER) != (REGNO (XEXP (x, i)) < FIRST_PSEUDO_REGISTER)) || (insn_code = recog_memoized (insn)) < 0 || insn_data[insn_code].n_dups > 0)) validate_change (insn, &XEXP (x, i), new, 1); else XEXP (x, i) = new; } else if (fmt[i] == 'E') for (j = 0; j < XVECLEN (x, i); j++) XVECEXP (x, i, j) = canon_reg (XVECEXP (x, i, j), insn); } return x; } /* LOC is a location within INSN that is an operand address (the contents of a MEM). Find the best equivalent address to use that is valid for this insn. On most CISC machines, complicated address modes are costly, and rtx_cost is a good approximation for that cost. However, most RISC machines have only a few (usually only one) memory reference formats. If an address is valid at all, it is often just as cheap as any other address. Hence, for RISC machines, we use `address_cost' to compare the costs of various addresses. For two addresses of equal cost, choose the one with the highest `rtx_cost' value as that has the potential of eliminating the most insns. For equal costs, we choose the first in the equivalence class. Note that we ignore the fact that pseudo registers are cheaper than hard registers here because we would also prefer the pseudo registers. */ static void find_best_addr (rtx insn, rtx *loc, enum machine_mode mode) { struct table_elt *elt; rtx addr = *loc; struct table_elt *p; int found_better = 1; int save_do_not_record = do_not_record; int save_hash_arg_in_memory = hash_arg_in_memory; int addr_volatile; int regno; unsigned hash; /* Do not try to replace constant addresses or addresses of local and argument slots. These MEM expressions are made only once and inserted in many instructions, as well as being used to control symbol table output. It is not safe to clobber them. There are some uncommon cases where the address is already in a register for some reason, but we cannot take advantage of that because we have no easy way to unshare the MEM. In addition, looking up all stack addresses is costly. */ if ((GET_CODE (addr) == PLUS && REG_P (XEXP (addr, 0)) && GET_CODE (XEXP (addr, 1)) == CONST_INT && (regno = REGNO (XEXP (addr, 0)), regno == FRAME_POINTER_REGNUM || regno == HARD_FRAME_POINTER_REGNUM || regno == ARG_POINTER_REGNUM)) || (REG_P (addr) && (regno = REGNO (addr), regno == FRAME_POINTER_REGNUM || regno == HARD_FRAME_POINTER_REGNUM || regno == ARG_POINTER_REGNUM)) || CONSTANT_ADDRESS_P (addr)) return; /* If this address is not simply a register, try to fold it. This will sometimes simplify the expression. Many simplifications will not be valid, but some, usually applying the associative rule, will be valid and produce better code. */ if (!REG_P (addr)) { rtx folded = fold_rtx (copy_rtx (addr), NULL_RTX); int addr_folded_cost = address_cost (folded, mode); int addr_cost = address_cost (addr, mode); if ((addr_folded_cost < addr_cost || (addr_folded_cost == addr_cost /* ??? The rtx_cost comparison is left over from an older version of this code. It is probably no longer helpful. */ && (rtx_cost (folded, MEM) > rtx_cost (addr, MEM) || approx_reg_cost (folded) < approx_reg_cost (addr)))) && validate_change (insn, loc, folded, 0)) addr = folded; } /* If this address is not in the hash table, we can't look for equivalences of the whole address. Also, ignore if volatile. */ do_not_record = 0; hash = HASH (addr, Pmode); addr_volatile = do_not_record; do_not_record = save_do_not_record; hash_arg_in_memory = save_hash_arg_in_memory; if (addr_volatile) return; elt = lookup (addr, hash, Pmode); if (elt) { /* We need to find the best (under the criteria documented above) entry in the class that is valid. We use the `flag' field to indicate choices that were invalid and iterate until we can't find a better one that hasn't already been tried. */ for (p = elt->first_same_value; p; p = p->next_same_value) p->flag = 0; while (found_better) { int best_addr_cost = address_cost (*loc, mode); int best_rtx_cost = (elt->cost + 1) >> 1; int exp_cost; struct table_elt *best_elt = elt; found_better = 0; for (p = elt->first_same_value; p; p = p->next_same_value) if (! p->flag) { if ((REG_P (p->exp) || exp_equiv_p (p->exp, p->exp, 1, 0)) && ((exp_cost = address_cost (p->exp, mode)) < best_addr_cost || (exp_cost == best_addr_cost && ((p->cost + 1) >> 1) > best_rtx_cost))) { found_better = 1; best_addr_cost = exp_cost; best_rtx_cost = (p->cost + 1) >> 1; best_elt = p; } } if (found_better) { if (validate_change (insn, loc, canon_reg (copy_rtx (best_elt->exp), NULL_RTX), 0)) return; else best_elt->flag = 1; } } } /* If the address is a binary operation with the first operand a register and the second a constant, do the same as above, but looking for equivalences of the register. Then try to simplify before checking for the best address to use. This catches a few cases: First is when we have REG+const and the register is another REG+const. We can often merge the constants and eliminate one insn and one register. It may also be that a machine has a cheap REG+REG+const. Finally, this improves the code on the Alpha for unaligned byte stores. */ if (flag_expensive_optimizations && ARITHMETIC_P (*loc) && REG_P (XEXP (*loc, 0))) { rtx op1 = XEXP (*loc, 1); do_not_record = 0; hash = HASH (XEXP (*loc, 0), Pmode); do_not_record = save_do_not_record; hash_arg_in_memory = save_hash_arg_in_memory; elt = lookup (XEXP (*loc, 0), hash, Pmode); if (elt == 0) return; /* We need to find the best (under the criteria documented above) entry in the class that is valid. We use the `flag' field to indicate choices that were invalid and iterate until we can't find a better one that hasn't already been tried. */ for (p = elt->first_same_value; p; p = p->next_same_value) p->flag = 0; while (found_better) { int best_addr_cost = address_cost (*loc, mode); int best_rtx_cost = (COST (*loc) + 1) >> 1; struct table_elt *best_elt = elt; rtx best_rtx = *loc; int count; /* This is at worst case an O(n^2) algorithm, so limit our search to the first 32 elements on the list. This avoids trouble compiling code with very long basic blocks that can easily call simplify_gen_binary so many times that we run out of memory. */ found_better = 0; for (p = elt->first_same_value, count = 0; p && count < 32; p = p->next_same_value, count++) if (! p->flag && (REG_P (p->exp) || exp_equiv_p (p->exp, p->exp, 1, 0))) { rtx new = simplify_gen_binary (GET_CODE (*loc), Pmode, p->exp, op1); int new_cost; new_cost = address_cost (new, mode); if (new_cost < best_addr_cost || (new_cost == best_addr_cost && (COST (new) + 1) >> 1 > best_rtx_cost)) { found_better = 1; best_addr_cost = new_cost; best_rtx_cost = (COST (new) + 1) >> 1; best_elt = p; best_rtx = new; } } if (found_better) { if (validate_change (insn, loc, canon_reg (copy_rtx (best_rtx), NULL_RTX), 0)) return; else best_elt->flag = 1; } } } } /* Given an operation (CODE, *PARG1, *PARG2), where code is a comparison operation (EQ, NE, GT, etc.), follow it back through the hash table and what values are being compared. *PARG1 and *PARG2 are updated to contain the rtx representing the values actually being compared. For example, if *PARG1 was (cc0) and *PARG2 was (const_int 0), *PARG1 and *PARG2 will be set to the objects that were compared to produce cc0. The return value is the comparison operator and is either the code of A or the code corresponding to the inverse of the comparison. */ static enum rtx_code find_comparison_args (enum rtx_code code, rtx *parg1, rtx *parg2, enum machine_mode *pmode1, enum machine_mode *pmode2) { rtx arg1, arg2; arg1 = *parg1, arg2 = *parg2; /* If ARG2 is const0_rtx, see what ARG1 is equivalent to. */ while (arg2 == CONST0_RTX (GET_MODE (arg1))) { /* Set nonzero when we find something of interest. */ rtx x = 0; int reverse_code = 0; struct table_elt *p = 0; /* If arg1 is a COMPARE, extract the comparison arguments from it. On machines with CC0, this is the only case that can occur, since fold_rtx will return the COMPARE or item being compared with zero when given CC0. */ if (GET_CODE (arg1) == COMPARE && arg2 == const0_rtx) x = arg1; /* If ARG1 is a comparison operator and CODE is testing for STORE_FLAG_VALUE, get the inner arguments. */ else if (COMPARISON_P (arg1)) { #ifdef FLOAT_STORE_FLAG_VALUE REAL_VALUE_TYPE fsfv; #endif if (code == NE || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_INT && code == LT && STORE_FLAG_VALUE == -1) #ifdef FLOAT_STORE_FLAG_VALUE || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_FLOAT && (fsfv = FLOAT_STORE_FLAG_VALUE (GET_MODE (arg1)), REAL_VALUE_NEGATIVE (fsfv))) #endif ) x = arg1; else if (code == EQ || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_INT && code == GE && STORE_FLAG_VALUE == -1) #ifdef FLOAT_STORE_FLAG_VALUE || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_FLOAT && (fsfv = FLOAT_STORE_FLAG_VALUE (GET_MODE (arg1)), REAL_VALUE_NEGATIVE (fsfv))) #endif ) x = arg1, reverse_code = 1; } /* ??? We could also check for (ne (and (eq (...) (const_int 1))) (const_int 0)) and related forms, but let's wait until we see them occurring. */ if (x == 0) /* Look up ARG1 in the hash table and see if it has an equivalence that lets us see what is being compared. */ p = lookup (arg1, safe_hash (arg1, GET_MODE (arg1)) & HASH_MASK, GET_MODE (arg1)); if (p) { p = p->first_same_value; /* If what we compare is already known to be constant, that is as good as it gets. We need to break the loop in this case, because otherwise we can have an infinite loop when looking at a reg that is known to be a constant which is the same as a comparison of a reg against zero which appears later in the insn stream, which in turn is constant and the same as the comparison of the first reg against zero... */ if (p->is_const) break; } for (; p; p = p->next_same_value) { enum machine_mode inner_mode = GET_MODE (p->exp); #ifdef FLOAT_STORE_FLAG_VALUE REAL_VALUE_TYPE fsfv; #endif /* If the entry isn't valid, skip it. */ if (! exp_equiv_p (p->exp, p->exp, 1, 0)) continue; if (GET_CODE (p->exp) == COMPARE /* Another possibility is that this machine has a compare insn that includes the comparison code. In that case, ARG1 would be equivalent to a comparison operation that would set ARG1 to either STORE_FLAG_VALUE or zero. If this is an NE operation, ORIG_CODE is the actual comparison being done; if it is an EQ, we must reverse ORIG_CODE. On machine with a negative value for STORE_FLAG_VALUE, also look at LT and GE operations. */ || ((code == NE || (code == LT && GET_MODE_CLASS (inner_mode) == MODE_INT && (GET_MODE_BITSIZE (inner_mode) <= HOST_BITS_PER_WIDE_INT) && (STORE_FLAG_VALUE & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (inner_mode) - 1)))) #ifdef FLOAT_STORE_FLAG_VALUE || (code == LT && GET_MODE_CLASS (inner_mode) == MODE_FLOAT && (fsfv = FLOAT_STORE_FLAG_VALUE (GET_MODE (arg1)), REAL_VALUE_NEGATIVE (fsfv))) #endif ) && COMPARISON_P (p->exp))) { x = p->exp; break; } else if ((code == EQ || (code == GE && GET_MODE_CLASS (inner_mode) == MODE_INT && (GET_MODE_BITSIZE (inner_mode) <= HOST_BITS_PER_WIDE_INT) && (STORE_FLAG_VALUE & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (inner_mode) - 1)))) #ifdef FLOAT_STORE_FLAG_VALUE || (code == GE && GET_MODE_CLASS (inner_mode) == MODE_FLOAT && (fsfv = FLOAT_STORE_FLAG_VALUE (GET_MODE (arg1)), REAL_VALUE_NEGATIVE (fsfv))) #endif ) && COMPARISON_P (p->exp)) { reverse_code = 1; x = p->exp; break; } /* If this non-trapping address, e.g. fp + constant, the equivalent is a better operand since it may let us predict the value of the comparison. */ else if (!rtx_addr_can_trap_p (p->exp)) { arg1 = p->exp; continue; } } /* If we didn't find a useful equivalence for ARG1, we are done. Otherwise, set up for the next iteration. */ if (x == 0) break; /* If we need to reverse the comparison, make sure that that is possible -- we can't necessarily infer the value of GE from LT with floating-point operands. */ if (reverse_code) { enum rtx_code reversed = reversed_comparison_code (x, NULL_RTX); if (reversed == UNKNOWN) break; else code = reversed; } else if (COMPARISON_P (x)) code = GET_CODE (x); arg1 = XEXP (x, 0), arg2 = XEXP (x, 1); } /* Return our results. Return the modes from before fold_rtx because fold_rtx might produce const_int, and then it's too late. */ *pmode1 = GET_MODE (arg1), *pmode2 = GET_MODE (arg2); *parg1 = fold_rtx (arg1, 0), *parg2 = fold_rtx (arg2, 0); return code; } /* If X is a nontrivial arithmetic operation on an argument for which a constant value can be determined, return the result of operating on that value, as a constant. Otherwise, return X, possibly with one or more operands modified by recursive calls to this function. If X is a register whose contents are known, we do NOT return those contents here. equiv_constant is called to perform that task. INSN is the insn that we may be modifying. If it is 0, make a copy of X before modifying it. */ static rtx fold_rtx (rtx x, rtx insn) { enum rtx_code code; enum machine_mode mode; const char *fmt; int i; rtx new = 0; int copied = 0; int must_swap = 0; /* Folded equivalents of first two operands of X. */ rtx folded_arg0; rtx folded_arg1; /* Constant equivalents of first three operands of X; 0 when no such equivalent is known. */ rtx const_arg0; rtx const_arg1; rtx const_arg2; /* The mode of the first operand of X. We need this for sign and zero extends. */ enum machine_mode mode_arg0; if (x == 0) return x; mode = GET_MODE (x); code = GET_CODE (x); switch (code) { case CONST: case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case SYMBOL_REF: case LABEL_REF: case REG: /* No use simplifying an EXPR_LIST since they are used only for lists of args in a function call's REG_EQUAL note. */ case EXPR_LIST: return x; #ifdef HAVE_cc0 case CC0: return prev_insn_cc0; #endif case PC: /* If the next insn is a CODE_LABEL followed by a jump table, PC's value is a LABEL_REF pointing to that label. That lets us fold switch statements on the VAX. */ { rtx next; if (insn && tablejump_p (insn, &next, NULL)) return gen_rtx_LABEL_REF (Pmode, next); } break; case SUBREG: /* See if we previously assigned a constant value to this SUBREG. */ if ((new = lookup_as_function (x, CONST_INT)) != 0 || (new = lookup_as_function (x, CONST_DOUBLE)) != 0) return new; /* If this is a paradoxical SUBREG, we have no idea what value the extra bits would have. However, if the operand is equivalent to a SUBREG whose operand is the same as our mode, and all the modes are within a word, we can just use the inner operand because these SUBREGs just say how to treat the register. Similarly if we find an integer constant. */ if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))) { enum machine_mode imode = GET_MODE (SUBREG_REG (x)); struct table_elt *elt; if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD && GET_MODE_SIZE (imode) <= UNITS_PER_WORD && (elt = lookup (SUBREG_REG (x), HASH (SUBREG_REG (x), imode), imode)) != 0) for (elt = elt->first_same_value; elt; elt = elt->next_same_value) { if (CONSTANT_P (elt->exp) && GET_MODE (elt->exp) == VOIDmode) return elt->exp; if (GET_CODE (elt->exp) == SUBREG && GET_MODE (SUBREG_REG (elt->exp)) == mode && exp_equiv_p (elt->exp, elt->exp, 1, 0)) return copy_rtx (SUBREG_REG (elt->exp)); } return x; } /* Fold SUBREG_REG. If it changed, see if we can simplify the SUBREG. We might be able to if the SUBREG is extracting a single word in an integral mode or extracting the low part. */ folded_arg0 = fold_rtx (SUBREG_REG (x), insn); const_arg0 = equiv_constant (folded_arg0); if (const_arg0) folded_arg0 = const_arg0; if (folded_arg0 != SUBREG_REG (x)) { new = simplify_subreg (mode, folded_arg0, GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x)); if (new) return new; } if (REG_P (folded_arg0) && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (folded_arg0))) { struct table_elt *elt; /* We can use HASH here since we know that canon_hash won't be called. */ elt = lookup (folded_arg0, HASH (folded_arg0, GET_MODE (folded_arg0)), GET_MODE (folded_arg0)); if (elt) elt = elt->first_same_value; if (subreg_lowpart_p (x)) /* If this is a narrowing SUBREG and our operand is a REG, see if we can find an equivalence for REG that is an arithmetic operation in a wider mode where both operands are paradoxical SUBREGs from objects of our result mode. In that case, we couldn-t report an equivalent value for that operation, since we don't know what the extra bits will be. But we can find an equivalence for this SUBREG by folding that operation in the narrow mode. This allows us to fold arithmetic in narrow modes when the machine only supports word-sized arithmetic. Also look for a case where we have a SUBREG whose operand is the same as our result. If both modes are smaller than a word, we are simply interpreting a register in different modes and we can use the inner value. */ for (; elt; elt = elt->next_same_value) { enum rtx_code eltcode = GET_CODE (elt->exp); /* Just check for unary and binary operations. */ if (UNARY_P (elt->exp) && eltcode != SIGN_EXTEND && eltcode != ZERO_EXTEND && GET_CODE (XEXP (elt->exp, 0)) == SUBREG && GET_MODE (SUBREG_REG (XEXP (elt->exp, 0))) == mode && (GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (XEXP (elt->exp, 0))))) { rtx op0 = SUBREG_REG (XEXP (elt->exp, 0)); if (!REG_P (op0) && ! CONSTANT_P (op0)) op0 = fold_rtx (op0, NULL_RTX); op0 = equiv_constant (op0); if (op0) new = simplify_unary_operation (GET_CODE (elt->exp), mode, op0, mode); } else if (ARITHMETIC_P (elt->exp) && eltcode != DIV && eltcode != MOD && eltcode != UDIV && eltcode != UMOD && eltcode != ASHIFTRT && eltcode != LSHIFTRT && eltcode != ROTATE && eltcode != ROTATERT && ((GET_CODE (XEXP (elt->exp, 0)) == SUBREG && (GET_MODE (SUBREG_REG (XEXP (elt->exp, 0))) == mode)) || CONSTANT_P (XEXP (elt->exp, 0))) && ((GET_CODE (XEXP (elt->exp, 1)) == SUBREG && (GET_MODE (SUBREG_REG (XEXP (elt->exp, 1))) == mode)) || CONSTANT_P (XEXP (elt->exp, 1)))) { rtx op0 = gen_lowpart_common (mode, XEXP (elt->exp, 0)); rtx op1 = gen_lowpart_common (mode, XEXP (elt->exp, 1)); if (op0 && !REG_P (op0) && ! CONSTANT_P (op0)) op0 = fold_rtx (op0, NULL_RTX); if (op0) op0 = equiv_constant (op0); if (op1 && !REG_P (op1) && ! CONSTANT_P (op1)) op1 = fold_rtx (op1, NULL_RTX); if (op1) op1 = equiv_constant (op1); /* If we are looking for the low SImode part of (ashift:DI c (const_int 32)), it doesn't work to compute that in SImode, because a 32-bit shift in SImode is unpredictable. We know the value is 0. */ if (op0 && op1 && GET_CODE (elt->exp) == ASHIFT && GET_CODE (op1) == CONST_INT && INTVAL (op1) >= GET_MODE_BITSIZE (mode)) { if (INTVAL (op1) < GET_MODE_BITSIZE (GET_MODE (elt->exp))) /* If the count fits in the inner mode's width, but exceeds the outer mode's width, the value will get truncated to 0 by the subreg. */ new = CONST0_RTX (mode); else /* If the count exceeds even the inner mode's width, don't fold this expression. */ new = 0; } else if (op0 && op1) new = simplify_binary_operation (GET_CODE (elt->exp), mode, op0, op1); } else if (GET_CODE (elt->exp) == SUBREG && GET_MODE (SUBREG_REG (elt->exp)) == mode && (GET_MODE_SIZE (GET_MODE (folded_arg0)) <= UNITS_PER_WORD) && exp_equiv_p (elt->exp, elt->exp, 1, 0)) new = copy_rtx (SUBREG_REG (elt->exp)); if (new) return new; } else /* A SUBREG resulting from a zero extension may fold to zero if it extracts higher bits than the ZERO_EXTEND's source bits. FIXME: if combine tried to, er, combine these instructions, this transformation may be moved to simplify_subreg. */ for (; elt; elt = elt->next_same_value) { if (GET_CODE (elt->exp) == ZERO_EXTEND && subreg_lsb (x) >= GET_MODE_BITSIZE (GET_MODE (XEXP (elt->exp, 0)))) return CONST0_RTX (mode); } } return x; case NOT: case NEG: /* If we have (NOT Y), see if Y is known to be (NOT Z). If so, (NOT Y) simplifies to Z. Similarly for NEG. */ new = lookup_as_function (XEXP (x, 0), code); if (new) return fold_rtx (copy_rtx (XEXP (new, 0)), insn); break; case MEM: /* If we are not actually processing an insn, don't try to find the best address. Not only don't we care, but we could modify the MEM in an invalid way since we have no insn to validate against. */ if (insn != 0) find_best_addr (insn, &XEXP (x, 0), GET_MODE (x)); { /* Even if we don't fold in the insn itself, we can safely do so here, in hopes of getting a constant. */ rtx addr = fold_rtx (XEXP (x, 0), NULL_RTX); rtx base = 0; HOST_WIDE_INT offset = 0; if (REG_P (addr) && REGNO_QTY_VALID_P (REGNO (addr))) { int addr_q = REG_QTY (REGNO (addr)); struct qty_table_elem *addr_ent = &qty_table[addr_q]; if (GET_MODE (addr) == addr_ent->mode && addr_ent->const_rtx != NULL_RTX) addr = addr_ent->const_rtx; } /* If address is constant, split it into a base and integer offset. */ if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF) base = addr; else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT) { base = XEXP (XEXP (addr, 0), 0); offset = INTVAL (XEXP (XEXP (addr, 0), 1)); } else if (GET_CODE (addr) == LO_SUM && GET_CODE (XEXP (addr, 1)) == SYMBOL_REF) base = XEXP (addr, 1); /* If this is a constant pool reference, we can fold it into its constant to allow better value tracking. */ if (base && GET_CODE (base) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (base)) { rtx constant = get_pool_constant (base); enum machine_mode const_mode = get_pool_mode (base); rtx new; if (CONSTANT_P (constant) && GET_CODE (constant) != CONST_INT) { constant_pool_entries_cost = COST (constant); constant_pool_entries_regcost = approx_reg_cost (constant); } /* If we are loading the full constant, we have an equivalence. */ if (offset == 0 && mode == const_mode) return constant; /* If this actually isn't a constant (weird!), we can't do anything. Otherwise, handle the two most common cases: extracting a word from a multi-word constant, and extracting the low-order bits. Other cases don't seem common enough to worry about. */ if (! CONSTANT_P (constant)) return x; if (GET_MODE_CLASS (mode) == MODE_INT && GET_MODE_SIZE (mode) == UNITS_PER_WORD && offset % UNITS_PER_WORD == 0 && (new = operand_subword (constant, offset / UNITS_PER_WORD, 0, const_mode)) != 0) return new; if (((BYTES_BIG_ENDIAN && offset == GET_MODE_SIZE (GET_MODE (constant)) - 1) || (! BYTES_BIG_ENDIAN && offset == 0)) && (new = gen_lowpart (mode, constant)) != 0) return new; } /* If this is a reference to a label at a known position in a jump table, we also know its value. */ if (base && GET_CODE (base) == LABEL_REF) { rtx label = XEXP (base, 0); rtx table_insn = NEXT_INSN (label); if (table_insn && GET_CODE (table_insn) == JUMP_INSN && GET_CODE (PATTERN (table_insn)) == ADDR_VEC) { rtx table = PATTERN (table_insn); if (offset >= 0 && (offset / GET_MODE_SIZE (GET_MODE (table)) < XVECLEN (table, 0))) return XVECEXP (table, 0, offset / GET_MODE_SIZE (GET_MODE (table))); } if (table_insn && GET_CODE (table_insn) == JUMP_INSN && GET_CODE (PATTERN (table_insn)) == ADDR_DIFF_VEC) { rtx table = PATTERN (table_insn); if (offset >= 0 && (offset / GET_MODE_SIZE (GET_MODE (table)) < XVECLEN (table, 1))) { offset /= GET_MODE_SIZE (GET_MODE (table)); new = gen_rtx_MINUS (Pmode, XVECEXP (table, 1, offset), XEXP (table, 0)); if (GET_MODE (table) != Pmode) new = gen_rtx_TRUNCATE (GET_MODE (table), new); /* Indicate this is a constant. This isn't a valid form of CONST, but it will only be used to fold the next insns and then discarded, so it should be safe. Note this expression must be explicitly discarded, by cse_insn, else it may end up in a REG_EQUAL note and "escape" to cause problems elsewhere. */ return gen_rtx_CONST (GET_MODE (new), new); } } } return x; } #ifdef NO_FUNCTION_CSE case CALL: if (CONSTANT_P (XEXP (XEXP (x, 0), 0))) return x; break; #endif case ASM_OPERANDS: for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--) validate_change (insn, &ASM_OPERANDS_INPUT (x, i), fold_rtx (ASM_OPERANDS_INPUT (x, i), insn), 0); break; default: break; } const_arg0 = 0; const_arg1 = 0; const_arg2 = 0; mode_arg0 = VOIDmode; /* Try folding our operands. Then see which ones have constant values known. */ fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) if (fmt[i] == 'e') { rtx arg = XEXP (x, i); rtx folded_arg = arg, const_arg = 0; enum machine_mode mode_arg = GET_MODE (arg); rtx cheap_arg, expensive_arg; rtx replacements[2]; int j; int old_cost = COST_IN (XEXP (x, i), code); /* Most arguments are cheap, so handle them specially. */ switch (GET_CODE (arg)) { case REG: /* This is the same as calling equiv_constant; it is duplicated here for speed. */ if (REGNO_QTY_VALID_P (REGNO (arg))) { int arg_q = REG_QTY (REGNO (arg)); struct qty_table_elem *arg_ent = &qty_table[arg_q]; if (arg_ent->const_rtx != NULL_RTX && !REG_P (arg_ent->const_rtx) && GET_CODE (arg_ent->const_rtx) != PLUS) const_arg = gen_lowpart (GET_MODE (arg), arg_ent->const_rtx); } break; case CONST: case CONST_INT: case SYMBOL_REF: case LABEL_REF: case CONST_DOUBLE: case CONST_VECTOR: const_arg = arg; break; #ifdef HAVE_cc0 case CC0: folded_arg = prev_insn_cc0; mode_arg = prev_insn_cc0_mode; const_arg = equiv_constant (folded_arg); break; #endif default: folded_arg = fold_rtx (arg, insn); const_arg = equiv_constant (folded_arg); } /* For the first three operands, see if the operand is constant or equivalent to a constant. */ switch (i) { case 0: folded_arg0 = folded_arg; const_arg0 = const_arg; mode_arg0 = mode_arg; break; case 1: folded_arg1 = folded_arg; const_arg1 = const_arg; break; case 2: const_arg2 = const_arg; break; } /* Pick the least expensive of the folded argument and an equivalent constant argument. */ if (const_arg == 0 || const_arg == folded_arg || COST_IN (const_arg, code) > COST_IN (folded_arg, code)) cheap_arg = folded_arg, expensive_arg = const_arg; else cheap_arg = const_arg, expensive_arg = folded_arg; /* Try to replace the operand with the cheapest of the two possibilities. If it doesn't work and this is either of the first two operands of a commutative operation, try swapping them. If THAT fails, try the more expensive, provided it is cheaper than what is already there. */ if (cheap_arg == XEXP (x, i)) continue; if (insn == 0 && ! copied) { x = copy_rtx (x); copied = 1; } /* Order the replacements from cheapest to most expensive. */ replacements[0] = cheap_arg; replacements[1] = expensive_arg; for (j = 0; j < 2 && replacements[j]; j++) { int new_cost = COST_IN (replacements[j], code); /* Stop if what existed before was cheaper. Prefer constants in the case of a tie. */ if (new_cost > old_cost || (new_cost == old_cost && CONSTANT_P (XEXP (x, i)))) break; /* It's not safe to substitute the operand of a conversion operator with a constant, as the conversion's identity depends upon the mode of it's operand. This optimization is handled by the call to simplify_unary_operation. */ if (GET_RTX_CLASS (code) == RTX_UNARY && GET_MODE (replacements[j]) != mode_arg0 && (code == ZERO_EXTEND || code == SIGN_EXTEND || code == TRUNCATE || code == FLOAT_TRUNCATE || code == FLOAT_EXTEND || code == FLOAT || code == FIX || code == UNSIGNED_FLOAT || code == UNSIGNED_FIX)) continue; if (validate_change (insn, &XEXP (x, i), replacements[j], 0)) break; if (GET_RTX_CLASS (code) == RTX_COMM_COMPARE || GET_RTX_CLASS (code) == RTX_COMM_ARITH) { validate_change (insn, &XEXP (x, i), XEXP (x, 1 - i), 1); validate_change (insn, &XEXP (x, 1 - i), replacements[j], 1); if (apply_change_group ()) { /* Swap them back to be invalid so that this loop can continue and flag them to be swapped back later. */ rtx tem; tem = XEXP (x, 0); XEXP (x, 0) = XEXP (x, 1); XEXP (x, 1) = tem; must_swap = 1; break; } } } } else { if (fmt[i] == 'E') /* Don't try to fold inside of a vector of expressions. Doing nothing is harmless. */ {;} } /* If a commutative operation, place a constant integer as the second operand unless the first operand is also a constant integer. Otherwise, place any constant second unless the first operand is also a constant. */ if (COMMUTATIVE_P (x)) { if (must_swap || swap_commutative_operands_p (const_arg0 ? const_arg0 : XEXP (x, 0), const_arg1 ? const_arg1 : XEXP (x, 1))) { rtx tem = XEXP (x, 0); if (insn == 0 && ! copied) { x = copy_rtx (x); copied = 1; } validate_change (insn, &XEXP (x, 0), XEXP (x, 1), 1); validate_change (insn, &XEXP (x, 1), tem, 1); if (apply_change_group ()) { tem = const_arg0, const_arg0 = const_arg1, const_arg1 = tem; tem = folded_arg0, folded_arg0 = folded_arg1, folded_arg1 = tem; } } } /* If X is an arithmetic operation, see if we can simplify it. */ switch (GET_RTX_CLASS (code)) { case RTX_UNARY: { int is_const = 0; /* We can't simplify extension ops unless we know the original mode. */ if ((code == ZERO_EXTEND || code == SIGN_EXTEND) && mode_arg0 == VOIDmode) break; /* If we had a CONST, strip it off and put it back later if we fold. */ if (const_arg0 != 0 && GET_CODE (const_arg0) == CONST) is_const = 1, const_arg0 = XEXP (const_arg0, 0); new = simplify_unary_operation (code, mode, const_arg0 ? const_arg0 : folded_arg0, mode_arg0); if (new != 0 && is_const) new = gen_rtx_CONST (mode, new); } break; case RTX_COMPARE: case RTX_COMM_COMPARE: /* See what items are actually being compared and set FOLDED_ARG[01] to those values and CODE to the actual comparison code. If any are constant, set CONST_ARG0 and CONST_ARG1 appropriately. We needn't do anything if both operands are already known to be constant. */ if (const_arg0 == 0 || const_arg1 == 0) { struct table_elt *p0, *p1; rtx true_rtx = const_true_rtx, false_rtx = const0_rtx; enum machine_mode mode_arg1; #ifdef FLOAT_STORE_FLAG_VALUE if (GET_MODE_CLASS (mode) == MODE_FLOAT) { true_rtx = (CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode), mode)); false_rtx = CONST0_RTX (mode); } #endif code = find_comparison_args (code, &folded_arg0, &folded_arg1, &mode_arg0, &mode_arg1); const_arg0 = equiv_constant (folded_arg0); const_arg1 = equiv_constant (folded_arg1); /* If the mode is VOIDmode or a MODE_CC mode, we don't know what kinds of things are being compared, so we can't do anything with this comparison. */ if (mode_arg0 == VOIDmode || GET_MODE_CLASS (mode_arg0) == MODE_CC) break; /* If we do not now have two constants being compared, see if we can nevertheless deduce some things about the comparison. */ if (const_arg0 == 0 || const_arg1 == 0) { /* Some addresses are known to be nonzero. We don't know their sign, but equality comparisons are known. */ if (const_arg1 == const0_rtx && nonzero_address_p (folded_arg0)) { if (code == EQ) return false_rtx; else if (code == NE) return true_rtx; } /* See if the two operands are the same. */ if (folded_arg0 == folded_arg1 || (REG_P (folded_arg0) && REG_P (folded_arg1) && (REG_QTY (REGNO (folded_arg0)) == REG_QTY (REGNO (folded_arg1)))) || ((p0 = lookup (folded_arg0, (safe_hash (folded_arg0, mode_arg0) & HASH_MASK), mode_arg0)) && (p1 = lookup (folded_arg1, (safe_hash (folded_arg1, mode_arg0) & HASH_MASK), mode_arg0)) && p0->first_same_value == p1->first_same_value)) { /* Sadly two equal NaNs are not equivalent. */ if (!HONOR_NANS (mode_arg0)) return ((code == EQ || code == LE || code == GE || code == LEU || code == GEU || code == UNEQ || code == UNLE || code == UNGE || code == ORDERED) ? true_rtx : false_rtx); /* Take care for the FP compares we can resolve. */ if (code == UNEQ || code == UNLE || code == UNGE) return true_rtx; if (code == LTGT || code == LT || code == GT) return false_rtx; } /* If FOLDED_ARG0 is a register, see if the comparison we are doing now is either the same as we did before or the reverse (we only check the reverse if not floating-point). */ else if (REG_P (folded_arg0)) { int qty = REG_QTY (REGNO (folded_arg0)); if (REGNO_QTY_VALID_P (REGNO (folded_arg0))) { struct qty_table_elem *ent = &qty_table[qty]; if ((comparison_dominates_p (ent->comparison_code, code) || (! FLOAT_MODE_P (mode_arg0) && comparison_dominates_p (ent->comparison_code, reverse_condition (code)))) && (rtx_equal_p (ent->comparison_const, folded_arg1) || (const_arg1 && rtx_equal_p (ent->comparison_const, const_arg1)) || (REG_P (folded_arg1) && (REG_QTY (REGNO (folded_arg1)) == ent->comparison_qty)))) return (comparison_dominates_p (ent->comparison_code, code) ? true_rtx : false_rtx); } } } } /* If we are comparing against zero, see if the first operand is equivalent to an IOR with a constant. If so, we may be able to determine the result of this comparison. */ if (const_arg1 == const0_rtx) { rtx y = lookup_as_function (folded_arg0, IOR); rtx inner_const; if (y != 0 && (inner_const = equiv_constant (XEXP (y, 1))) != 0 && GET_CODE (inner_const) == CONST_INT && INTVAL (inner_const) != 0) { int sign_bitnum = GET_MODE_BITSIZE (mode_arg0) - 1; int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum && (INTVAL (inner_const) & ((HOST_WIDE_INT) 1 << sign_bitnum))); rtx true_rtx = const_true_rtx, false_rtx = const0_rtx; #ifdef FLOAT_STORE_FLAG_VALUE if (GET_MODE_CLASS (mode) == MODE_FLOAT) { true_rtx = (CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode), mode)); false_rtx = CONST0_RTX (mode); } #endif switch (code) { case EQ: return false_rtx; case NE: return true_rtx; case LT: case LE: if (has_sign) return true_rtx; break; case GT: case GE: if (has_sign) return false_rtx; break; default: break; } } } { rtx op0 = const_arg0 ? const_arg0 : folded_arg0; rtx op1 = const_arg1 ? const_arg1 : folded_arg1; new = simplify_relational_operation (code, mode, mode_arg0, op0, op1); } break; case RTX_BIN_ARITH: case RTX_COMM_ARITH: switch (code) { case PLUS: /* If the second operand is a LABEL_REF, see if the first is a MINUS with that LABEL_REF as its second operand. If so, the result is the first operand of that MINUS. This handles switches with an ADDR_DIFF_VEC table. */ if (const_arg1 && GET_CODE (const_arg1) == LABEL_REF) { rtx y = GET_CODE (folded_arg0) == MINUS ? folded_arg0 : lookup_as_function (folded_arg0, MINUS); if (y != 0 && GET_CODE (XEXP (y, 1)) == LABEL_REF && XEXP (XEXP (y, 1), 0) == XEXP (const_arg1, 0)) return XEXP (y, 0); /* Now try for a CONST of a MINUS like the above. */ if ((y = (GET_CODE (folded_arg0) == CONST ? folded_arg0 : lookup_as_function (folded_arg0, CONST))) != 0 && GET_CODE (XEXP (y, 0)) == MINUS && GET_CODE (XEXP (XEXP (y, 0), 1)) == LABEL_REF && XEXP (XEXP (XEXP (y, 0), 1), 0) == XEXP (const_arg1, 0)) return XEXP (XEXP (y, 0), 0); } /* Likewise if the operands are in the other order. */ if (const_arg0 && GET_CODE (const_arg0) == LABEL_REF) { rtx y = GET_CODE (folded_arg1) == MINUS ? folded_arg1 : lookup_as_function (folded_arg1, MINUS); if (y != 0 && GET_CODE (XEXP (y, 1)) == LABEL_REF && XEXP (XEXP (y, 1), 0) == XEXP (const_arg0, 0)) return XEXP (y, 0); /* Now try for a CONST of a MINUS like the above. */ if ((y = (GET_CODE (folded_arg1) == CONST ? folded_arg1 : lookup_as_function (folded_arg1, CONST))) != 0 && GET_CODE (XEXP (y, 0)) == MINUS && GET_CODE (XEXP (XEXP (y, 0), 1)) == LABEL_REF && XEXP (XEXP (XEXP (y, 0), 1), 0) == XEXP (const_arg0, 0)) return XEXP (XEXP (y, 0), 0); } /* If second operand is a register equivalent to a negative CONST_INT, see if we can find a register equivalent to the positive constant. Make a MINUS if so. Don't do this for a non-negative constant since we might then alternate between choosing positive and negative constants. Having the positive constant previously-used is the more common case. Be sure the resulting constant is non-negative; if const_arg1 were the smallest negative number this would overflow: depending on the mode, this would either just be the same value (and hence not save anything) or be incorrect. */ if (const_arg1 != 0 && GET_CODE (const_arg1) == CONST_INT && INTVAL (const_arg1) < 0 /* This used to test -INTVAL (const_arg1) >= 0 But The Sun V5.0 compilers mis-compiled that test. So instead we test for the problematic value in a more direct manner and hope the Sun compilers get it correct. */ && INTVAL (const_arg1) != ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)) && REG_P (folded_arg1)) { rtx new_const = GEN_INT (-INTVAL (const_arg1)); struct table_elt *p = lookup (new_const, safe_hash (new_const, mode) & HASH_MASK, mode); if (p) for (p = p->first_same_value; p; p = p->next_same_value) if (REG_P (p->exp)) return simplify_gen_binary (MINUS, mode, folded_arg0, canon_reg (p->exp, NULL_RTX)); } goto from_plus; case MINUS: /* If we have (MINUS Y C), see if Y is known to be (PLUS Z C2). If so, produce (PLUS Z C2-C). */ if (const_arg1 != 0 && GET_CODE (const_arg1) == CONST_INT) { rtx y = lookup_as_function (XEXP (x, 0), PLUS); if (y && GET_CODE (XEXP (y, 1)) == CONST_INT) return fold_rtx (plus_constant (copy_rtx (y), -INTVAL (const_arg1)), NULL_RTX); } /* Fall through. */ from_plus: case SMIN: case SMAX: case UMIN: case UMAX: case IOR: case AND: case XOR: case MULT: case ASHIFT: case LSHIFTRT: case ASHIFTRT: /* If we have ( ) for an associative OP and REG is known to be of similar form, we may be able to replace the operation with a combined operation. This may eliminate the intermediate operation if every use is simplified in this way. Note that the similar optimization done by combine.c only works if the intermediate operation's result has only one reference. */ if (REG_P (folded_arg0) && const_arg1 && GET_CODE (const_arg1) == CONST_INT) { int is_shift = (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT); rtx y = lookup_as_function (folded_arg0, code); rtx inner_const; enum rtx_code associate_code; rtx new_const; if (y == 0 || 0 == (inner_const = equiv_constant (fold_rtx (XEXP (y, 1), 0))) || GET_CODE (inner_const) != CONST_INT /* If we have compiled a statement like "if (x == (x & mask1))", and now are looking at "x & mask2", we will have a case where the first operand of Y is the same as our first operand. Unless we detect this case, an infinite loop will result. */ || XEXP (y, 0) == folded_arg0) break; /* Don't associate these operations if they are a PLUS with the same constant and it is a power of two. These might be doable with a pre- or post-increment. Similarly for two subtracts of identical powers of two with post decrement. */ if (code == PLUS && const_arg1 == inner_const && ((HAVE_PRE_INCREMENT && exact_log2 (INTVAL (const_arg1)) >= 0) || (HAVE_POST_INCREMENT && exact_log2 (INTVAL (const_arg1)) >= 0) || (HAVE_PRE_DECREMENT && exact_log2 (- INTVAL (const_arg1)) >= 0) || (HAVE_POST_DECREMENT && exact_log2 (- INTVAL (const_arg1)) >= 0))) break; /* Compute the code used to compose the constants. For example, A-C1-C2 is A-(C1 + C2), so if CODE == MINUS, we want PLUS. */ associate_code = (is_shift || code == MINUS ? PLUS : code); new_const = simplify_binary_operation (associate_code, mode, const_arg1, inner_const); if (new_const == 0) break; /* If we are associating shift operations, don't let this produce a shift of the size of the object or larger. This could occur when we follow a sign-extend by a right shift on a machine that does a sign-extend as a pair of shifts. */ if (is_shift && GET_CODE (new_const) == CONST_INT && INTVAL (new_const) >= GET_MODE_BITSIZE (mode)) { /* As an exception, we can turn an ASHIFTRT of this form into a shift of the number of bits - 1. */ if (code == ASHIFTRT) new_const = GEN_INT (GET_MODE_BITSIZE (mode) - 1); else break; } y = copy_rtx (XEXP (y, 0)); /* If Y contains our first operand (the most common way this can happen is if Y is a MEM), we would do into an infinite loop if we tried to fold it. So don't in that case. */ if (! reg_mentioned_p (folded_arg0, y)) y = fold_rtx (y, insn); return simplify_gen_binary (code, mode, y, new_const); } break; case DIV: case UDIV: /* ??? The associative optimization performed immediately above is also possible for DIV and UDIV using associate_code of MULT. However, we would need extra code to verify that the multiplication does not overflow, that is, there is no overflow in the calculation of new_const. */ break; default: break; } new = simplify_binary_operation (code, mode, const_arg0 ? const_arg0 : folded_arg0, const_arg1 ? const_arg1 : folded_arg1); break; case RTX_OBJ: /* (lo_sum (high X) X) is simply X. */ if (code == LO_SUM && const_arg0 != 0 && GET_CODE (const_arg0) == HIGH && rtx_equal_p (XEXP (const_arg0, 0), const_arg1)) return const_arg1; break; case RTX_TERNARY: case RTX_BITFIELD_OPS: new = simplify_ternary_operation (code, mode, mode_arg0, const_arg0 ? const_arg0 : folded_arg0, const_arg1 ? const_arg1 : folded_arg1, const_arg2 ? const_arg2 : XEXP (x, 2)); break; default: break; } return new ? new : x; } /* Return a constant value currently equivalent to X. Return 0 if we don't know one. */ static rtx equiv_constant (rtx x) { if (REG_P (x) && REGNO_QTY_VALID_P (REGNO (x))) { int x_q = REG_QTY (REGNO (x)); struct qty_table_elem *x_ent = &qty_table[x_q]; if (x_ent->const_rtx) x = gen_lowpart (GET_MODE (x), x_ent->const_rtx); } if (x == 0 || CONSTANT_P (x)) return x; /* If X is a MEM, try to fold it outside the context of any insn to see if it might be equivalent to a constant. That handles the case where it is a constant-pool reference. Then try to look it up in the hash table in case it is something whose value we have seen before. */ if (MEM_P (x)) { struct table_elt *elt; x = fold_rtx (x, NULL_RTX); if (CONSTANT_P (x)) return x; elt = lookup (x, safe_hash (x, GET_MODE (x)) & HASH_MASK, GET_MODE (x)); if (elt == 0) return 0; for (elt = elt->first_same_value; elt; elt = elt->next_same_value) if (elt->is_const && CONSTANT_P (elt->exp)) return elt->exp; } return 0; } /* Assuming that X is an rtx (e.g., MEM, REG or SUBREG) for a fixed-point number, return an rtx (MEM, SUBREG, or CONST_INT) that refers to the least-significant part of X. MODE specifies how big a part of X to return. If the requested operation cannot be done, 0 is returned. This is similar to gen_lowpart_general in emit-rtl.c. */ rtx gen_lowpart_if_possible (enum machine_mode mode, rtx x) { rtx result = gen_lowpart_common (mode, x); if (result) return result; else if (MEM_P (x)) { /* This is the only other case we handle. */ int offset = 0; rtx new; if (WORDS_BIG_ENDIAN) offset = (MAX (GET_MODE_SIZE (GET_MODE (x)), UNITS_PER_WORD) - MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD)); if (BYTES_BIG_ENDIAN) /* Adjust the address so that the address-after-the-data is unchanged. */ offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (mode)) - MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (x)))); new = adjust_address_nv (x, mode, offset); if (! memory_address_p (mode, XEXP (new, 0))) return 0; return new; } else return 0; } /* Given INSN, a jump insn, PATH_TAKEN indicates if we are following the "taken" branch. It will be zero if not. In certain cases, this can cause us to add an equivalence. For example, if we are following the taken case of if (i == 2) we can add the fact that `i' and '2' are now equivalent. In any case, we can record that this comparison was passed. If the same comparison is seen later, we will know its value. */ static void record_jump_equiv (rtx insn, int taken) { int cond_known_true; rtx op0, op1; rtx set; enum machine_mode mode, mode0, mode1; int reversed_nonequality = 0; enum rtx_code code; /* Ensure this is the right kind of insn. */ if (! any_condjump_p (insn)) return; set = pc_set (insn); /* See if this jump condition is known true or false. */ if (taken) cond_known_true = (XEXP (SET_SRC (set), 2) == pc_rtx); else cond_known_true = (XEXP (SET_SRC (set), 1) == pc_rtx); /* Get the type of comparison being done and the operands being compared. If we had to reverse a non-equality condition, record that fact so we know that it isn't valid for floating-point. */ code = GET_CODE (XEXP (SET_SRC (set), 0)); op0 = fold_rtx (XEXP (XEXP (SET_SRC (set), 0), 0), insn); op1 = fold_rtx (XEXP (XEXP (SET_SRC (set), 0), 1), insn); code = find_comparison_args (code, &op0, &op1, &mode0, &mode1); if (! cond_known_true) { code = reversed_comparison_code_parts (code, op0, op1, insn); /* Don't remember if we can't find the inverse. */ if (code == UNKNOWN) return; } /* The mode is the mode of the non-constant. */ mode = mode0; if (mode1 != VOIDmode) mode = mode1; record_jump_cond (code, mode, op0, op1, reversed_nonequality); } /* We know that comparison CODE applied to OP0 and OP1 in MODE is true. REVERSED_NONEQUALITY is nonzero if CODE had to be swapped. Make any useful entries we can with that information. Called from above function and called recursively. */ static void record_jump_cond (enum rtx_code code, enum machine_mode mode, rtx op0, rtx op1, int reversed_nonequality) { unsigned op0_hash, op1_hash; int op0_in_memory, op1_in_memory; struct table_elt *op0_elt, *op1_elt; /* If OP0 and OP1 are known equal, and either is a paradoxical SUBREG, we know that they are also equal in the smaller mode (this is also true for all smaller modes whether or not there is a SUBREG, but is not worth testing for with no SUBREG). */ /* Note that GET_MODE (op0) may not equal MODE. */ if (code == EQ && GET_CODE (op0) == SUBREG && (GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0))))) { enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0)); rtx tem = gen_lowpart (inner_mode, op1); record_jump_cond (code, mode, SUBREG_REG (op0), tem ? tem : gen_rtx_SUBREG (inner_mode, op1, 0), reversed_nonequality); } if (code == EQ && GET_CODE (op1) == SUBREG && (GET_MODE_SIZE (GET_MODE (op1)) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1))))) { enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op1)); rtx tem = gen_lowpart (inner_mode, op0); record_jump_cond (code, mode, SUBREG_REG (op1), tem ? tem : gen_rtx_SUBREG (inner_mode, op0, 0), reversed_nonequality); } /* Similarly, if this is an NE comparison, and either is a SUBREG making a smaller mode, we know the whole thing is also NE. */ /* Note that GET_MODE (op0) may not equal MODE; if we test MODE instead, we can get an infinite recursion alternating between two modes each wider than MODE. */ if (code == NE && GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0) && (GET_MODE_SIZE (GET_MODE (op0)) < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0))))) { enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0)); rtx tem = gen_lowpart (inner_mode, op1); record_jump_cond (code, mode, SUBREG_REG (op0), tem ? tem : gen_rtx_SUBREG (inner_mode, op1, 0), reversed_nonequality); } if (code == NE && GET_CODE (op1) == SUBREG && subreg_lowpart_p (op1) && (GET_MODE_SIZE (GET_MODE (op1)) < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1))))) { enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op1)); rtx tem = gen_lowpart (inner_mode, op0); record_jump_cond (code, mode, SUBREG_REG (op1), tem ? tem : gen_rtx_SUBREG (inner_mode, op0, 0), reversed_nonequality); } /* Hash both operands. */ do_not_record = 0; hash_arg_in_memory = 0; op0_hash = HASH (op0, mode); op0_in_memory = hash_arg_in_memory; if (do_not_record) return; do_not_record = 0; hash_arg_in_memory = 0; op1_hash = HASH (op1, mode); op1_in_memory = hash_arg_in_memory; if (do_not_record) return; /* Look up both operands. */ op0_elt = lookup (op0, op0_hash, mode); op1_elt = lookup (op1, op1_hash, mode); /* If both operands are already equivalent or if they are not in the table but are identical, do nothing. */ if ((op0_elt != 0 && op1_elt != 0 && op0_elt->first_same_value == op1_elt->first_same_value) || op0 == op1 || rtx_equal_p (op0, op1)) return; /* If we aren't setting two things equal all we can do is save this comparison. Similarly if this is floating-point. In the latter case, OP1 might be zero and both -0.0 and 0.0 are equal to it. If we record the equality, we might inadvertently delete code whose intent was to change -0 to +0. */ if (code != EQ || FLOAT_MODE_P (GET_MODE (op0))) { struct qty_table_elem *ent; int qty; /* If we reversed a floating-point comparison, if OP0 is not a register, or if OP1 is neither a register or constant, we can't do anything. */ if (!REG_P (op1)) op1 = equiv_constant (op1); if ((reversed_nonequality && FLOAT_MODE_P (mode)) || !REG_P (op0) || op1 == 0) return; /* Put OP0 in the hash table if it isn't already. This gives it a new quantity number. */ if (op0_elt == 0) { if (insert_regs (op0, NULL, 0)) { rehash_using_reg (op0); op0_hash = HASH (op0, mode); /* If OP0 is contained in OP1, this changes its hash code as well. Faster to rehash than to check, except for the simple case of a constant. */ if (! CONSTANT_P (op1)) op1_hash = HASH (op1,mode); } op0_elt = hash_insert (op0, NULL, op0_hash, mode); op0_elt->in_memory = op0_in_memory; } qty = REG_QTY (REGNO (op0)); ent = &qty_table[qty]; ent->comparison_code = code; if (REG_P (op1)) { /* Look it up again--in case op0 and op1 are the same. */ op1_elt = lookup (op1, op1_hash, mode); /* Put OP1 in the hash table so it gets a new quantity number. */ if (op1_elt == 0) { if (insert_regs (op1, NULL, 0)) { rehash_using_reg (op1); op1_hash = HASH (op1, mode); } op1_elt = hash_insert (op1, NULL, op1_hash, mode); op1_elt->in_memory = op1_in_memory; } ent->comparison_const = NULL_RTX; ent->comparison_qty = REG_QTY (REGNO (op1)); } else { ent->comparison_const = op1; ent->comparison_qty = -1; } return; } /* If either side is still missing an equivalence, make it now, then merge the equivalences. */ if (op0_elt == 0) { if (insert_regs (op0, NULL, 0)) { rehash_using_reg (op0); op0_hash = HASH (op0, mode); } op0_elt = hash_insert (op0, NULL, op0_hash, mode); op0_elt->in_memory = op0_in_memory; } if (op1_elt == 0) { if (insert_regs (op1, NULL, 0)) { rehash_using_reg (op1); op1_hash = HASH (op1, mode); } op1_elt = hash_insert (op1, NULL, op1_hash, mode); op1_elt->in_memory = op1_in_memory; } merge_equiv_classes (op0_elt, op1_elt); last_jump_equiv_class = op0_elt; } /* CSE processing for one instruction. First simplify sources and addresses of all assignments in the instruction, using previously-computed equivalents values. Then install the new sources and destinations in the table of available values. If LIBCALL_INSN is nonzero, don't record any equivalence made in the insn. It means that INSN is inside libcall block. In this case LIBCALL_INSN is the corresponding insn with REG_LIBCALL. */ /* Data on one SET contained in the instruction. */ struct set { /* The SET rtx itself. */ rtx rtl; /* The SET_SRC of the rtx (the original value, if it is changing). */ rtx src; /* The hash-table element for the SET_SRC of the SET. */ struct table_elt *src_elt; /* Hash value for the SET_SRC. */ unsigned src_hash; /* Hash value for the SET_DEST. */ unsigned dest_hash; /* The SET_DEST, with SUBREG, etc., stripped. */ rtx inner_dest; /* Nonzero if the SET_SRC is in memory. */ char src_in_memory; /* Nonzero if the SET_SRC contains something whose value cannot be predicted and understood. */ char src_volatile; /* Original machine mode, in case it becomes a CONST_INT. The size of this field should match the size of the mode field of struct rtx_def (see rtl.h). */ ENUM_BITFIELD(machine_mode) mode : 8; /* A constant equivalent for SET_SRC, if any. */ rtx src_const; /* Original SET_SRC value used for libcall notes. */ rtx orig_src; /* Hash value of constant equivalent for SET_SRC. */ unsigned src_const_hash; /* Table entry for constant equivalent for SET_SRC, if any. */ struct table_elt *src_const_elt; }; static void cse_insn (rtx insn, rtx libcall_insn) { rtx x = PATTERN (insn); int i; rtx tem; int n_sets = 0; #ifdef HAVE_cc0 /* Records what this insn does to set CC0. */ rtx this_insn_cc0 = 0; enum machine_mode this_insn_cc0_mode = VOIDmode; #endif rtx src_eqv = 0; struct table_elt *src_eqv_elt = 0; int src_eqv_volatile = 0; int src_eqv_in_memory = 0; unsigned src_eqv_hash = 0; struct set *sets = (struct set *) 0; this_insn = insn; /* Find all the SETs and CLOBBERs in this instruction. Record all the SETs in the array `set' and count them. Also determine whether there is a CLOBBER that invalidates all memory references, or all references at varying addresses. */ if (GET_CODE (insn) == CALL_INSN) { for (tem = CALL_INSN_FUNCTION_USAGE (insn); tem; tem = XEXP (tem, 1)) { if (GET_CODE (XEXP (tem, 0)) == CLOBBER) invalidate (SET_DEST (XEXP (tem, 0)), VOIDmode); XEXP (tem, 0) = canon_reg (XEXP (tem, 0), insn); } } if (GET_CODE (x) == SET) { sets = alloca (sizeof (struct set)); sets[0].rtl = x; /* Ignore SETs that are unconditional jumps. They never need cse processing, so this does not hurt. The reason is not efficiency but rather so that we can test at the end for instructions that have been simplified to unconditional jumps and not be misled by unchanged instructions that were unconditional jumps to begin with. */ if (SET_DEST (x) == pc_rtx && GET_CODE (SET_SRC (x)) == LABEL_REF) ; /* Don't count call-insns, (set (reg 0) (call ...)), as a set. The hard function value register is used only once, to copy to someplace else, so it isn't worth cse'ing (and on 80386 is unsafe)! Ensure we invalidate the destination register. On the 80386 no other code would invalidate it since it is a fixed_reg. We need not check the return of apply_change_group; see canon_reg. */ else if (GET_CODE (SET_SRC (x)) == CALL) { canon_reg (SET_SRC (x), insn); apply_change_group (); fold_rtx (SET_SRC (x), insn); invalidate (SET_DEST (x), VOIDmode); } else n_sets = 1; } else if (GET_CODE (x) == PARALLEL) { int lim = XVECLEN (x, 0); sets = alloca (lim * sizeof (struct set)); /* Find all regs explicitly clobbered in this insn, and ensure they are not replaced with any other regs elsewhere in this insn. When a reg that is clobbered is also used for input, we should presume that that is for a reason, and we should not substitute some other register which is not supposed to be clobbered. Therefore, this loop cannot be merged into the one below because a CALL may precede a CLOBBER and refer to the value clobbered. We must not let a canonicalization do anything in that case. */ for (i = 0; i < lim; i++) { rtx y = XVECEXP (x, 0, i); if (GET_CODE (y) == CLOBBER) { rtx clobbered = XEXP (y, 0); if (REG_P (clobbered) || GET_CODE (clobbered) == SUBREG) invalidate (clobbered, VOIDmode); else if (GET_CODE (clobbered) == STRICT_LOW_PART || GET_CODE (clobbered) == ZERO_EXTRACT) invalidate (XEXP (clobbered, 0), GET_MODE (clobbered)); } } for (i = 0; i < lim; i++) { rtx y = XVECEXP (x, 0, i); if (GET_CODE (y) == SET) { /* As above, we ignore unconditional jumps and call-insns and ignore the result of apply_change_group. */ if (GET_CODE (SET_SRC (y)) == CALL) { canon_reg (SET_SRC (y), insn); apply_change_group (); fold_rtx (SET_SRC (y), insn); invalidate (SET_DEST (y), VOIDmode); } else if (SET_DEST (y) == pc_rtx && GET_CODE (SET_SRC (y)) == LABEL_REF) ; else sets[n_sets++].rtl = y; } else if (GET_CODE (y) == CLOBBER) { /* If we clobber memory, canon the address. This does nothing when a register is clobbered because we have already invalidated the reg. */ if (MEM_P (XEXP (y, 0))) canon_reg (XEXP (y, 0), NULL_RTX); } else if (GET_CODE (y) == USE && ! (REG_P (XEXP (y, 0)) && REGNO (XEXP (y, 0)) < FIRST_PSEUDO_REGISTER)) canon_reg (y, NULL_RTX); else if (GET_CODE (y) == CALL) { /* The result of apply_change_group can be ignored; see canon_reg. */ canon_reg (y, insn); apply_change_group (); fold_rtx (y, insn); } } } else if (GET_CODE (x) == CLOBBER) { if (MEM_P (XEXP (x, 0))) canon_reg (XEXP (x, 0), NULL_RTX); } /* Canonicalize a USE of a pseudo register or memory location. */ else if (GET_CODE (x) == USE && ! (REG_P (XEXP (x, 0)) && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER)) canon_reg (XEXP (x, 0), NULL_RTX); else if (GET_CODE (x) == CALL) { /* The result of apply_change_group can be ignored; see canon_reg. */ canon_reg (x, insn); apply_change_group (); fold_rtx (x, insn); } /* Store the equivalent value in SRC_EQV, if different, or if the DEST is a STRICT_LOW_PART. The latter condition is necessary because SRC_EQV is handled specially for this case, and if it isn't set, then there will be no equivalence for the destination. */ if (n_sets == 1 && REG_NOTES (insn) != 0 && (tem = find_reg_note (insn, REG_EQUAL, NULL_RTX)) != 0 && (! rtx_equal_p (XEXP (tem, 0), SET_SRC (sets[0].rtl)) || GET_CODE (SET_DEST (sets[0].rtl)) == STRICT_LOW_PART)) { src_eqv = fold_rtx (canon_reg (XEXP (tem, 0), NULL_RTX), insn); XEXP (tem, 0) = src_eqv; } /* Canonicalize sources and addresses of destinations. We do this in a separate pass to avoid problems when a MATCH_DUP is present in the insn pattern. In that case, we want to ensure that we don't break the duplicate nature of the pattern. So we will replace both operands at the same time. Otherwise, we would fail to find an equivalent substitution in the loop calling validate_change below. We used to suppress canonicalization of DEST if it appears in SRC, but we don't do this any more. */ for (i = 0; i < n_sets; i++) { rtx dest = SET_DEST (sets[i].rtl); rtx src = SET_SRC (sets[i].rtl); rtx new = canon_reg (src, insn); int insn_code; sets[i].orig_src = src; if ((REG_P (new) && REG_P (src) && ((REGNO (new) < FIRST_PSEUDO_REGISTER) != (REGNO (src) < FIRST_PSEUDO_REGISTER))) || (insn_code = recog_memoized (insn)) < 0 || insn_data[insn_code].n_dups > 0) validate_change (insn, &SET_SRC (sets[i].rtl), new, 1); else SET_SRC (sets[i].rtl) = new; if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT) { validate_change (insn, &XEXP (dest, 1), canon_reg (XEXP (dest, 1), insn), 1); validate_change (insn, &XEXP (dest, 2), canon_reg (XEXP (dest, 2), insn), 1); } while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT) dest = XEXP (dest, 0); if (MEM_P (dest)) canon_reg (dest, insn); } /* Now that we have done all the replacements, we can apply the change group and see if they all work. Note that this will cause some canonicalizations that would have worked individually not to be applied because some other canonicalization didn't work, but this should not occur often. The result of apply_change_group can be ignored; see canon_reg. */ apply_change_group (); /* Set sets[i].src_elt to the class each source belongs to. Detect assignments from or to volatile things and set set[i] to zero so they will be ignored in the rest of this function. Nothing in this loop changes the hash table or the register chains. */ for (i = 0; i < n_sets; i++) { rtx src, dest; rtx src_folded; struct table_elt *elt = 0, *p; enum machine_mode mode; rtx src_eqv_here; rtx src_const = 0; rtx src_related = 0; struct table_elt *src_const_elt = 0; int src_cost = MAX_COST; int src_eqv_cost = MAX_COST; int src_folded_cost = MAX_COST; int src_related_cost = MAX_COST; int src_elt_cost = MAX_COST; int src_regcost = MAX_COST; int src_eqv_regcost = MAX_COST; int src_folded_regcost = MAX_COST; int src_related_regcost = MAX_COST; int src_elt_regcost = MAX_COST; /* Set nonzero if we need to call force_const_mem on with the contents of src_folded before using it. */ int src_folded_force_flag = 0; dest = SET_DEST (sets[i].rtl); src = SET_SRC (sets[i].rtl); /* If SRC is a constant that has no machine mode, hash it with the destination's machine mode. This way we can keep different modes separate. */ mode = GET_MODE (src) == VOIDmode ? GET_MODE (dest) : GET_MODE (src); sets[i].mode = mode; if (src_eqv) { enum machine_mode eqvmode = mode; if (GET_CODE (dest) == STRICT_LOW_PART) eqvmode = GET_MODE (SUBREG_REG (XEXP (dest, 0))); do_not_record = 0; hash_arg_in_memory = 0; src_eqv_hash = HASH (src_eqv, eqvmode); /* Find the equivalence class for the equivalent expression. */ if (!do_not_record) src_eqv_elt = lookup (src_eqv, src_eqv_hash, eqvmode); src_eqv_volatile = do_not_record; src_eqv_in_memory = hash_arg_in_memory; } /* If this is a STRICT_LOW_PART assignment, src_eqv corresponds to the value of the INNER register, not the destination. So it is not a valid substitution for the source. But save it for later. */ if (GET_CODE (dest) == STRICT_LOW_PART) src_eqv_here = 0; else src_eqv_here = src_eqv; /* Simplify and foldable subexpressions in SRC. Then get the fully- simplified result, which may not necessarily be valid. */ src_folded = fold_rtx (src, insn); #if 0 /* ??? This caused bad code to be generated for the m68k port with -O2. Suppose src is (CONST_INT -1), and that after truncation src_folded is (CONST_INT 3). Suppose src_folded is then used for src_const. At the end we will add src and src_const to the same equivalence class. We now have 3 and -1 on the same equivalence class. This causes later instructions to be mis-optimized. */ /* If storing a constant in a bitfield, pre-truncate the constant so we will be able to record it later. */ if (GET_CODE (SET_DEST (sets[i].rtl)) == ZERO_EXTRACT || GET_CODE (SET_DEST (sets[i].rtl)) == SIGN_EXTRACT) { rtx width = XEXP (SET_DEST (sets[i].rtl), 1); if (GET_CODE (src) == CONST_INT && GET_CODE (width) == CONST_INT && INTVAL (width) < HOST_BITS_PER_WIDE_INT && (INTVAL (src) & ((HOST_WIDE_INT) (-1) << INTVAL (width)))) src_folded = GEN_INT (INTVAL (src) & (((HOST_WIDE_INT) 1 << INTVAL (width)) - 1)); } #endif /* Compute SRC's hash code, and also notice if it should not be recorded at all. In that case, prevent any further processing of this assignment. */ do_not_record = 0; hash_arg_in_memory = 0; sets[i].src = src; sets[i].src_hash = HASH (src, mode); sets[i].src_volatile = do_not_record; sets[i].src_in_memory = hash_arg_in_memory; /* If SRC is a MEM, there is a REG_EQUIV note for SRC, and DEST is a pseudo, do not record SRC. Using SRC as a replacement for anything else will be incorrect in that situation. Note that this usually occurs only for stack slots, in which case all the RTL would be referring to SRC, so we don't lose any optimization opportunities by not having SRC in the hash table. */ if (MEM_P (src) && find_reg_note (insn, REG_EQUIV, NULL_RTX) != 0 && REG_P (dest) && REGNO (dest) >= FIRST_PSEUDO_REGISTER) sets[i].src_volatile = 1; #if 0 /* It is no longer clear why we used to do this, but it doesn't appear to still be needed. So let's try without it since this code hurts cse'ing widened ops. */ /* If source is a paradoxical subreg (such as QI treated as an SI), treat it as volatile. It may do the work of an SI in one context where the extra bits are not being used, but cannot replace an SI in general. */ if (GET_CODE (src) == SUBREG && (GET_MODE_SIZE (GET_MODE (src)) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))) sets[i].src_volatile = 1; #endif /* Locate all possible equivalent forms for SRC. Try to replace SRC in the insn with each cheaper equivalent. We have the following types of equivalents: SRC itself, a folded version, a value given in a REG_EQUAL note, or a value related to a constant. Each of these equivalents may be part of an additional class of equivalents (if more than one is in the table, they must be in the same class; we check for this). If the source is volatile, we don't do any table lookups. We note any constant equivalent for possible later use in a REG_NOTE. */ if (!sets[i].src_volatile) elt = lookup (src, sets[i].src_hash, mode); sets[i].src_elt = elt; if (elt && src_eqv_here && src_eqv_elt) { if (elt->first_same_value != src_eqv_elt->first_same_value) { /* The REG_EQUAL is indicating that two formerly distinct classes are now equivalent. So merge them. */ merge_equiv_classes (elt, src_eqv_elt); src_eqv_hash = HASH (src_eqv, elt->mode); src_eqv_elt = lookup (src_eqv, src_eqv_hash, elt->mode); } src_eqv_here = 0; } else if (src_eqv_elt) elt = src_eqv_elt; /* Try to find a constant somewhere and record it in `src_const'. Record its table element, if any, in `src_const_elt'. Look in any known equivalences first. (If the constant is not in the table, also set `sets[i].src_const_hash'). */ if (elt) for (p = elt->first_same_value; p; p = p->next_same_value) if (p->is_const) { src_const = p->exp; src_const_elt = elt; break; } if (src_const == 0 && (CONSTANT_P (src_folded) /* Consider (minus (label_ref L1) (label_ref L2)) as "constant" here so we will record it. This allows us to fold switch statements when an ADDR_DIFF_VEC is used. */ || (GET_CODE (src_folded) == MINUS && GET_CODE (XEXP (src_folded, 0)) == LABEL_REF && GET_CODE (XEXP (src_folded, 1)) == LABEL_REF))) src_const = src_folded, src_const_elt = elt; else if (src_const == 0 && src_eqv_here && CONSTANT_P (src_eqv_here)) src_const = src_eqv_here, src_const_elt = src_eqv_elt; /* If we don't know if the constant is in the table, get its hash code and look it up. */ if (src_const && src_const_elt == 0) { sets[i].src_const_hash = HASH (src_const, mode); src_const_elt = lookup (src_const, sets[i].src_const_hash, mode); } sets[i].src_const = src_const; sets[i].src_const_elt = src_const_elt; /* If the constant and our source are both in the table, mark them as equivalent. Otherwise, if a constant is in the table but the source isn't, set ELT to it. */ if (src_const_elt && elt && src_const_elt->first_same_value != elt->first_same_value) merge_equiv_classes (elt, src_const_elt); else if (src_const_elt && elt == 0) elt = src_const_elt; /* See if there is a register linearly related to a constant equivalent of SRC. */ if (src_const && (GET_CODE (src_const) == CONST || (src_const_elt && src_const_elt->related_value != 0))) { src_related = use_related_value (src_const, src_const_elt); if (src_related) { struct table_elt *src_related_elt = lookup (src_related, HASH (src_related, mode), mode); if (src_related_elt && elt) { if (elt->first_same_value != src_related_elt->first_same_value) /* This can occur when we previously saw a CONST involving a SYMBOL_REF and then see the SYMBOL_REF twice. Merge the involved classes. */ merge_equiv_classes (elt, src_related_elt); src_related = 0; src_related_elt = 0; } else if (src_related_elt && elt == 0) elt = src_related_elt; } } /* See if we have a CONST_INT that is already in a register in a wider mode. */ if (src_const && src_related == 0 && GET_CODE (src_const) == CONST_INT && GET_MODE_CLASS (mode) == MODE_INT && GET_MODE_BITSIZE (mode) < BITS_PER_WORD) { enum machine_mode wider_mode; for (wider_mode = GET_MODE_WIDER_MODE (mode); GET_MODE_BITSIZE (wider_mode) <= BITS_PER_WORD && src_related == 0; wider_mode = GET_MODE_WIDER_MODE (wider_mode)) { struct table_elt *const_elt = lookup (src_const, HASH (src_const, wider_mode), wider_mode); if (const_elt == 0) continue; for (const_elt = const_elt->first_same_value; const_elt; const_elt = const_elt->next_same_value) if (REG_P (const_elt->exp)) { src_related = gen_lowpart (mode, const_elt->exp); break; } } } /* Another possibility is that we have an AND with a constant in a mode narrower than a word. If so, it might have been generated as part of an "if" which would narrow the AND. If we already have done the AND in a wider mode, we can use a SUBREG of that value. */ if (flag_expensive_optimizations && ! src_related && GET_CODE (src) == AND && GET_CODE (XEXP (src, 1)) == CONST_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD) { enum machine_mode tmode; rtx new_and = gen_rtx_AND (VOIDmode, NULL_RTX, XEXP (src, 1)); for (tmode = GET_MODE_WIDER_MODE (mode); GET_MODE_SIZE (tmode) <= UNITS_PER_WORD; tmode = GET_MODE_WIDER_MODE (tmode)) { rtx inner = gen_lowpart (tmode, XEXP (src, 0)); struct table_elt *larger_elt; if (inner) { PUT_MODE (new_and, tmode); XEXP (new_and, 0) = inner; larger_elt = lookup (new_and, HASH (new_and, tmode), tmode); if (larger_elt == 0) continue; for (larger_elt = larger_elt->first_same_value; larger_elt; larger_elt = larger_elt->next_same_value) if (REG_P (larger_elt->exp)) { src_related = gen_lowpart (mode, larger_elt->exp); break; } if (src_related) break; } } } #ifdef LOAD_EXTEND_OP /* See if a MEM has already been loaded with a widening operation; if it has, we can use a subreg of that. Many CISC machines also have such operations, but this is only likely to be beneficial on these machines. */ if (flag_expensive_optimizations && src_related == 0 && (GET_MODE_SIZE (mode) < UNITS_PER_WORD) && GET_MODE_CLASS (mode) == MODE_INT && MEM_P (src) && ! do_not_record && LOAD_EXTEND_OP (mode) != NIL) { enum machine_mode tmode; /* Set what we are trying to extend and the operation it might have been extended with. */ PUT_CODE (memory_extend_rtx, LOAD_EXTEND_OP (mode)); XEXP (memory_extend_rtx, 0) = src; for (tmode = GET_MODE_WIDER_MODE (mode); GET_MODE_SIZE (tmode) <= UNITS_PER_WORD; tmode = GET_MODE_WIDER_MODE (tmode)) { struct table_elt *larger_elt; PUT_MODE (memory_extend_rtx, tmode); larger_elt = lookup (memory_extend_rtx, HASH (memory_extend_rtx, tmode), tmode); if (larger_elt == 0) continue; for (larger_elt = larger_elt->first_same_value; larger_elt; larger_elt = larger_elt->next_same_value) if (REG_P (larger_elt->exp)) { src_related = gen_lowpart (mode, larger_elt->exp); break; } if (src_related) break; } } #endif /* LOAD_EXTEND_OP */ if (src == src_folded) src_folded = 0; /* At this point, ELT, if nonzero, points to a class of expressions equivalent to the source of this SET and SRC, SRC_EQV, SRC_FOLDED, and SRC_RELATED, if nonzero, each contain additional equivalent expressions. Prune these latter expressions by deleting expressions already in the equivalence class. Check for an equivalent identical to the destination. If found, this is the preferred equivalent since it will likely lead to elimination of the insn. Indicate this by placing it in `src_related'. */ if (elt) elt = elt->first_same_value; for (p = elt; p; p = p->next_same_value) { enum rtx_code code = GET_CODE (p->exp); /* If the expression is not valid, ignore it. Then we do not have to check for validity below. In most cases, we can use `rtx_equal_p', since canonicalization has already been done. */ if (code != REG && ! exp_equiv_p (p->exp, p->exp, 1, 0)) continue; /* Also skip paradoxical subregs, unless that's what we're looking for. */ if (code == SUBREG && (GET_MODE_SIZE (GET_MODE (p->exp)) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (p->exp)))) && ! (src != 0 && GET_CODE (src) == SUBREG && GET_MODE (src) == GET_MODE (p->exp) && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))) < GET_MODE_SIZE (GET_MODE (SUBREG_REG (p->exp)))))) continue; if (src && GET_CODE (src) == code && rtx_equal_p (src, p->exp)) src = 0; else if (src_folded && GET_CODE (src_folded) == code && rtx_equal_p (src_folded, p->exp)) src_folded = 0; else if (src_eqv_here && GET_CODE (src_eqv_here) == code && rtx_equal_p (src_eqv_here, p->exp)) src_eqv_here = 0; else if (src_related && GET_CODE (src_related) == code && rtx_equal_p (src_related, p->exp)) src_related = 0; /* This is the same as the destination of the insns, we want to prefer it. Copy it to src_related. The code below will then give it a negative cost. */ if (GET_CODE (dest) == code && rtx_equal_p (p->exp, dest)) src_related = dest; } /* Find the cheapest valid equivalent, trying all the available possibilities. Prefer items not in the hash table to ones that are when they are equal cost. Note that we can never worsen an insn as the current contents will also succeed. If we find an equivalent identical to the destination, use it as best, since this insn will probably be eliminated in that case. */ if (src) { if (rtx_equal_p (src, dest)) src_cost = src_regcost = -1; else { src_cost = COST (src); src_regcost = approx_reg_cost (src); } } if (src_eqv_here) { if (rtx_equal_p (src_eqv_here, dest)) src_eqv_cost = src_eqv_regcost = -1; else { src_eqv_cost = COST (src_eqv_here); src_eqv_regcost = approx_reg_cost (src_eqv_here); } } if (src_folded) { if (rtx_equal_p (src_folded, dest)) src_folded_cost = src_folded_regcost = -1; else { src_folded_cost = COST (src_folded); src_folded_regcost = approx_reg_cost (src_folded); } } if (src_related) { if (rtx_equal_p (src_related, dest)) src_related_cost = src_related_regcost = -1; else { src_related_cost = COST (src_related); src_related_regcost = approx_reg_cost (src_related); } } /* If this was an indirect jump insn, a known label will really be cheaper even though it looks more expensive. */ if (dest == pc_rtx && src_const && GET_CODE (src_const) == LABEL_REF) src_folded = src_const, src_folded_cost = src_folded_regcost = -1; /* Terminate loop when replacement made. This must terminate since the current contents will be tested and will always be valid. */ while (1) { rtx trial; /* Skip invalid entries. */ while (elt && !REG_P (elt->exp) && ! exp_equiv_p (elt->exp, elt->exp, 1, 0)) elt = elt->next_same_value; /* A paradoxical subreg would be bad here: it'll be the right size, but later may be adjusted so that the upper bits aren't what we want. So reject it. */ if (elt != 0 && GET_CODE (elt->exp) == SUBREG && (GET_MODE_SIZE (GET_MODE (elt->exp)) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (elt->exp)))) /* It is okay, though, if the rtx we're trying to match will ignore any of the bits we can't predict. */ && ! (src != 0 && GET_CODE (src) == SUBREG && GET_MODE (src) == GET_MODE (elt->exp) && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))) < GET_MODE_SIZE (GET_MODE (SUBREG_REG (elt->exp)))))) { elt = elt->next_same_value; continue; } if (elt) { src_elt_cost = elt->cost; src_elt_regcost = elt->regcost; } /* Find cheapest and skip it for the next time. For items of equal cost, use this order: src_folded, src, src_eqv, src_related and hash table entry. */ if (src_folded && preferable (src_folded_cost, src_folded_regcost, src_cost, src_regcost) <= 0 && preferable (src_folded_cost, src_folded_regcost, src_eqv_cost, src_eqv_regcost) <= 0 && preferable (src_folded_cost, src_folded_regcost, src_related_cost, src_related_regcost) <= 0 && preferable (src_folded_cost, src_folded_regcost, src_elt_cost, src_elt_regcost) <= 0) { trial = src_folded, src_folded_cost = MAX_COST; if (src_folded_force_flag) { rtx forced = force_const_mem (mode, trial); if (forced) trial = forced; } } else if (src && preferable (src_cost, src_regcost, src_eqv_cost, src_eqv_regcost) <= 0 && preferable (src_cost, src_regcost, src_related_cost, src_related_regcost) <= 0 && preferable (src_cost, src_regcost, src_elt_cost, src_elt_regcost) <= 0) trial = src, src_cost = MAX_COST; else if (src_eqv_here && preferable (src_eqv_cost, src_eqv_regcost, src_related_cost, src_related_regcost) <= 0 && preferable (src_eqv_cost, src_eqv_regcost, src_elt_cost, src_elt_regcost) <= 0) trial = copy_rtx (src_eqv_here), src_eqv_cost = MAX_COST; else if (src_related && preferable (src_related_cost, src_related_regcost, src_elt_cost, src_elt_regcost) <= 0) trial = copy_rtx (src_related), src_related_cost = MAX_COST; else { trial = copy_rtx (elt->exp); elt = elt->next_same_value; src_elt_cost = MAX_COST; } /* We don't normally have an insn matching (set (pc) (pc)), so check for this separately here. We will delete such an insn below. For other cases such as a table jump or conditional jump where we know the ultimate target, go ahead and replace the operand. While that may not make a valid insn, we will reemit the jump below (and also insert any necessary barriers). */ if (n_sets == 1 && dest == pc_rtx && (trial == pc_rtx || (GET_CODE (trial) == LABEL_REF && ! condjump_p (insn)))) { SET_SRC (sets[i].rtl) = trial; cse_jumps_altered = 1; break; } /* Look for a substitution that makes a valid insn. */ else if (validate_change (insn, &SET_SRC (sets[i].rtl), trial, 0)) { rtx new = canon_reg (SET_SRC (sets[i].rtl), insn); /* If we just made a substitution inside a libcall, then we need to make the same substitution in any notes attached to the RETVAL insn. */ if (libcall_insn && (REG_P (sets[i].orig_src) || GET_CODE (sets[i].orig_src) == SUBREG || MEM_P (sets[i].orig_src))) { rtx note = find_reg_equal_equiv_note (libcall_insn); if (note != 0) XEXP (note, 0) = simplify_replace_rtx (XEXP (note, 0), sets[i].orig_src, copy_rtx (new)); } /* The result of apply_change_group can be ignored; see canon_reg. */ validate_change (insn, &SET_SRC (sets[i].rtl), new, 1); apply_change_group (); break; } /* If we previously found constant pool entries for constants and this is a constant, try making a pool entry. Put it in src_folded unless we already have done this since that is where it likely came from. */ else if (constant_pool_entries_cost && CONSTANT_P (trial) /* Reject cases that will abort in decode_rtx_const. On the alpha when simplifying a switch, we get (const (truncate (minus (label_ref) (label_ref)))). */ && ! (GET_CODE (trial) == CONST && GET_CODE (XEXP (trial, 0)) == TRUNCATE) /* Likewise on IA-64, except without the truncate. */ && ! (GET_CODE (trial) == CONST && GET_CODE (XEXP (trial, 0)) == MINUS && GET_CODE (XEXP (XEXP (trial, 0), 0)) == LABEL_REF && GET_CODE (XEXP (XEXP (trial, 0), 1)) == LABEL_REF) && (src_folded == 0 || (!MEM_P (src_folded) && ! src_folded_force_flag)) && GET_MODE_CLASS (mode) != MODE_CC && mode != VOIDmode) { src_folded_force_flag = 1; src_folded = trial; src_folded_cost = constant_pool_entries_cost; src_folded_regcost = constant_pool_entries_regcost; } } src = SET_SRC (sets[i].rtl); /* In general, it is good to have a SET with SET_SRC == SET_DEST. However, there is an important exception: If both are registers that are not the head of their equivalence class, replace SET_SRC with the head of the class. If we do not do this, we will have both registers live over a portion of the basic block. This way, their lifetimes will likely abut instead of overlapping. */ if (REG_P (dest) && REGNO_QTY_VALID_P (REGNO (dest))) { int dest_q = REG_QTY (REGNO (dest)); struct qty_table_elem *dest_ent = &qty_table[dest_q]; if (dest_ent->mode == GET_MODE (dest) && dest_ent->first_reg != REGNO (dest) && REG_P (src) && REGNO (src) == REGNO (dest) /* Don't do this if the original insn had a hard reg as SET_SRC or SET_DEST. */ && (!REG_P (sets[i].src) || REGNO (sets[i].src) >= FIRST_PSEUDO_REGISTER) && (!REG_P (dest) || REGNO (dest) >= FIRST_PSEUDO_REGISTER)) /* We can't call canon_reg here because it won't do anything if SRC is a hard register. */ { int src_q = REG_QTY (REGNO (src)); struct qty_table_elem *src_ent = &qty_table[src_q]; int first = src_ent->first_reg; rtx new_src = (first >= FIRST_PSEUDO_REGISTER ? regno_reg_rtx[first] : gen_rtx_REG (GET_MODE (src), first)); /* We must use validate-change even for this, because this might be a special no-op instruction, suitable only to tag notes onto. */ if (validate_change (insn, &SET_SRC (sets[i].rtl), new_src, 0)) { src = new_src; /* If we had a constant that is cheaper than what we are now setting SRC to, use that constant. We ignored it when we thought we could make this into a no-op. */ if (src_const && COST (src_const) < COST (src) && validate_change (insn, &SET_SRC (sets[i].rtl), src_const, 0)) src = src_const; } } } /* If we made a change, recompute SRC values. */ if (src != sets[i].src) { cse_altered = 1; do_not_record = 0; hash_arg_in_memory = 0; sets[i].src = src; sets[i].src_hash = HASH (src, mode); sets[i].src_volatile = do_not_record; sets[i].src_in_memory = hash_arg_in_memory; sets[i].src_elt = lookup (src, sets[i].src_hash, mode); } /* If this is a single SET, we are setting a register, and we have an equivalent constant, we want to add a REG_NOTE. We don't want to write a REG_EQUAL note for a constant pseudo since verifying that that pseudo hasn't been eliminated is a pain. Such a note also won't help anything. Avoid a REG_EQUAL note for (CONST (MINUS (LABEL_REF) (LABEL_REF))) which can be created for a reference to a compile time computable entry in a jump table. */ if (n_sets == 1 && src_const && REG_P (dest) && !REG_P (src_const) && ! (GET_CODE (src_const) == CONST && GET_CODE (XEXP (src_const, 0)) == MINUS && GET_CODE (XEXP (XEXP (src_const, 0), 0)) == LABEL_REF && GET_CODE (XEXP (XEXP (src_const, 0), 1)) == LABEL_REF)) { /* We only want a REG_EQUAL note if src_const != src. */ if (! rtx_equal_p (src, src_const)) { /* Make sure that the rtx is not shared. */ src_const = copy_rtx (src_const); /* Record the actual constant value in a REG_EQUAL note, making a new one if one does not already exist. */ set_unique_reg_note (insn, REG_EQUAL, src_const); } } /* Now deal with the destination. */ do_not_record = 0; /* Look within any SIGN_EXTRACT or ZERO_EXTRACT to the MEM or REG within it. */ while (GET_CODE (dest) == SIGN_EXTRACT || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART) dest = XEXP (dest, 0); sets[i].inner_dest = dest; if (MEM_P (dest)) { #ifdef PUSH_ROUNDING /* Stack pushes invalidate the stack pointer. */ rtx addr = XEXP (dest, 0); if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC && XEXP (addr, 0) == stack_pointer_rtx) invalidate (stack_pointer_rtx, Pmode); #endif dest = fold_rtx (dest, insn); } /* Compute the hash code of the destination now, before the effects of this instruction are recorded, since the register values used in the address computation are those before this instruction. */ sets[i].dest_hash = HASH (dest, mode); /* Don't enter a bit-field in the hash table because the value in it after the store may not equal what was stored, due to truncation. */ if (GET_CODE (SET_DEST (sets[i].rtl)) == ZERO_EXTRACT || GET_CODE (SET_DEST (sets[i].rtl)) == SIGN_EXTRACT) { rtx width = XEXP (SET_DEST (sets[i].rtl), 1); if (src_const != 0 && GET_CODE (src_const) == CONST_INT && GET_CODE (width) == CONST_INT && INTVAL (width) < HOST_BITS_PER_WIDE_INT && ! (INTVAL (src_const) & ((HOST_WIDE_INT) (-1) << INTVAL (width)))) /* Exception: if the value is constant, and it won't be truncated, record it. */ ; else { /* This is chosen so that the destination will be invalidated but no new value will be recorded. We must invalidate because sometimes constant values can be recorded for bitfields. */ sets[i].src_elt = 0; sets[i].src_volatile = 1; src_eqv = 0; src_eqv_elt = 0; } } /* If only one set in a JUMP_INSN and it is now a no-op, we can delete the insn. */ else if (n_sets == 1 && dest == pc_rtx && src == pc_rtx) { /* One less use of the label this insn used to jump to. */ delete_insn (insn); cse_jumps_altered = 1; /* No more processing for this set. */ sets[i].rtl = 0; } /* If this SET is now setting PC to a label, we know it used to be a conditional or computed branch. */ else if (dest == pc_rtx && GET_CODE (src) == LABEL_REF) { /* Now emit a BARRIER after the unconditional jump. */ if (NEXT_INSN (insn) == 0 || GET_CODE (NEXT_INSN (insn)) != BARRIER) emit_barrier_after (insn); /* We reemit the jump in as many cases as possible just in case the form of an unconditional jump is significantly different than a computed jump or conditional jump. If this insn has multiple sets, then reemitting the jump is nontrivial. So instead we just force rerecognition and hope for the best. */ if (n_sets == 1) { rtx new, note; new = emit_jump_insn_after (gen_jump (XEXP (src, 0)), insn); JUMP_LABEL (new) = XEXP (src, 0); LABEL_NUSES (XEXP (src, 0))++; /* Make sure to copy over REG_NON_LOCAL_GOTO. */ note = find_reg_note (insn, REG_NON_LOCAL_GOTO, 0); if (note) { XEXP (note, 1) = NULL_RTX; REG_NOTES (new) = note; } delete_insn (insn); insn = new; /* Now emit a BARRIER after the unconditional jump. */ if (NEXT_INSN (insn) == 0 || GET_CODE (NEXT_INSN (insn)) != BARRIER) emit_barrier_after (insn); } else INSN_CODE (insn) = -1; /* Do not bother deleting any unreachable code, let jump/flow do that. */ cse_jumps_altered = 1; sets[i].rtl = 0; } /* If destination is volatile, invalidate it and then do no further processing for this assignment. */ else if (do_not_record) { if (REG_P (dest) || GET_CODE (dest) == SUBREG) invalidate (dest, VOIDmode); else if (MEM_P (dest)) { /* Outgoing arguments for a libcall don't affect any recorded expressions. */ if (! libcall_insn || insn == libcall_insn) invalidate (dest, VOIDmode); } else if (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == ZERO_EXTRACT) invalidate (XEXP (dest, 0), GET_MODE (dest)); sets[i].rtl = 0; } if (sets[i].rtl != 0 && dest != SET_DEST (sets[i].rtl)) sets[i].dest_hash = HASH (SET_DEST (sets[i].rtl), mode); #ifdef HAVE_cc0 /* If setting CC0, record what it was set to, or a constant, if it is equivalent to a constant. If it is being set to a floating-point value, make a COMPARE with the appropriate constant of 0. If we don't do this, later code can interpret this as a test against const0_rtx, which can cause problems if we try to put it into an insn as a floating-point operand. */ if (dest == cc0_rtx) { this_insn_cc0 = src_const && mode != VOIDmode ? src_const : src; this_insn_cc0_mode = mode; if (FLOAT_MODE_P (mode)) this_insn_cc0 = gen_rtx_COMPARE (VOIDmode, this_insn_cc0, CONST0_RTX (mode)); } #endif } /* Now enter all non-volatile source expressions in the hash table if they are not already present. Record their equivalence classes in src_elt. This way we can insert the corresponding destinations into the same classes even if the actual sources are no longer in them (having been invalidated). */ if (src_eqv && src_eqv_elt == 0 && sets[0].rtl != 0 && ! src_eqv_volatile && ! rtx_equal_p (src_eqv, SET_DEST (sets[0].rtl))) { struct table_elt *elt; struct table_elt *classp = sets[0].src_elt; rtx dest = SET_DEST (sets[0].rtl); enum machine_mode eqvmode = GET_MODE (dest); if (GET_CODE (dest) == STRICT_LOW_PART) { eqvmode = GET_MODE (SUBREG_REG (XEXP (dest, 0))); classp = 0; } if (insert_regs (src_eqv, classp, 0)) { rehash_using_reg (src_eqv); src_eqv_hash = HASH (src_eqv, eqvmode); } elt = hash_insert (src_eqv, classp, src_eqv_hash, eqvmode); elt->in_memory = src_eqv_in_memory; src_eqv_elt = elt; /* Check to see if src_eqv_elt is the same as a set source which does not yet have an elt, and if so set the elt of the set source to src_eqv_elt. */ for (i = 0; i < n_sets; i++) if (sets[i].rtl && sets[i].src_elt == 0 && rtx_equal_p (SET_SRC (sets[i].rtl), src_eqv)) sets[i].src_elt = src_eqv_elt; } for (i = 0; i < n_sets; i++) if (sets[i].rtl && ! sets[i].src_volatile && ! rtx_equal_p (SET_SRC (sets[i].rtl), SET_DEST (sets[i].rtl))) { if (GET_CODE (SET_DEST (sets[i].rtl)) == STRICT_LOW_PART) { /* REG_EQUAL in setting a STRICT_LOW_PART gives an equivalent for the entire destination register, not just for the subreg being stored in now. This is a more interesting equivalence, so we arrange later to treat the entire reg as the destination. */ sets[i].src_elt = src_eqv_elt; sets[i].src_hash = src_eqv_hash; } else { /* Insert source and constant equivalent into hash table, if not already present. */ struct table_elt *classp = src_eqv_elt; rtx src = sets[i].src; rtx dest = SET_DEST (sets[i].rtl); enum machine_mode mode = GET_MODE (src) == VOIDmode ? GET_MODE (dest) : GET_MODE (src); /* It's possible that we have a source value known to be constant but don't have a REG_EQUAL note on the insn. Lack of a note will mean src_eqv_elt will be NULL. This can happen where we've generated a SUBREG to access a CONST_INT that is already in a register in a wider mode. Ensure that the source expression is put in the proper constant class. */ if (!classp) classp = sets[i].src_const_elt; if (sets[i].src_elt == 0) { /* Don't put a hard register source into the table if this is the last insn of a libcall. In this case, we only need to put src_eqv_elt in src_elt. */ if (! find_reg_note (insn, REG_RETVAL, NULL_RTX)) { struct table_elt *elt; /* Note that these insert_regs calls cannot remove any of the src_elt's, because they would have failed to match if not still valid. */ if (insert_regs (src, classp, 0)) { rehash_using_reg (src); sets[i].src_hash = HASH (src, mode); } elt = hash_insert (src, classp, sets[i].src_hash, mode); elt->in_memory = sets[i].src_in_memory; sets[i].src_elt = classp = elt; } else sets[i].src_elt = classp; } if (sets[i].src_const && sets[i].src_const_elt == 0 && src != sets[i].src_const && ! rtx_equal_p (sets[i].src_const, src)) sets[i].src_elt = hash_insert (sets[i].src_const, classp, sets[i].src_const_hash, mode); } } else if (sets[i].src_elt == 0) /* If we did not insert the source into the hash table (e.g., it was volatile), note the equivalence class for the REG_EQUAL value, if any, so that the destination goes into that class. */ sets[i].src_elt = src_eqv_elt; invalidate_from_clobbers (x); /* Some registers are invalidated by subroutine calls. Memory is invalidated by non-constant calls. */ if (GET_CODE (insn) == CALL_INSN) { if (! CONST_OR_PURE_CALL_P (insn)) invalidate_memory (); invalidate_for_call (); } /* Now invalidate everything set by this instruction. If a SUBREG or other funny destination is being set, sets[i].rtl is still nonzero, so here we invalidate the reg a part of which is being set. */ for (i = 0; i < n_sets; i++) if (sets[i].rtl) { /* We can't use the inner dest, because the mode associated with a ZERO_EXTRACT is significant. */ rtx dest = SET_DEST (sets[i].rtl); /* Needed for registers to remove the register from its previous quantity's chain. Needed for memory if this is a nonvarying address, unless we have just done an invalidate_memory that covers even those. */ if (REG_P (dest) || GET_CODE (dest) == SUBREG) invalidate (dest, VOIDmode); else if (MEM_P (dest)) { /* Outgoing arguments for a libcall don't affect any recorded expressions. */ if (! libcall_insn || insn == libcall_insn) invalidate (dest, VOIDmode); } else if (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == ZERO_EXTRACT) invalidate (XEXP (dest, 0), GET_MODE (dest)); } /* A volatile ASM invalidates everything. */ if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == ASM_OPERANDS && MEM_VOLATILE_P (PATTERN (insn))) flush_hash_table (); /* Make sure registers mentioned in destinations are safe for use in an expression to be inserted. This removes from the hash table any invalid entry that refers to one of these registers. We don't care about the return value from mention_regs because we are going to hash the SET_DEST values unconditionally. */ for (i = 0; i < n_sets; i++) { if (sets[i].rtl) { rtx x = SET_DEST (sets[i].rtl); if (!REG_P (x)) mention_regs (x); else { /* We used to rely on all references to a register becoming inaccessible when a register changes to a new quantity, since that changes the hash code. However, that is not safe, since after CSE_HASH_SIZE new quantities we get a hash 'collision' of a register with its own invalid entries. And since SUBREGs have been changed not to change their hash code with the hash code of the register, it wouldn't work any longer at all. So we have to check for any invalid references lying around now. This code is similar to the REG case in mention_regs, but it knows that reg_tick has been incremented, and it leaves reg_in_table as -1 . */ unsigned int regno = REGNO (x); unsigned int endregno = regno + (regno >= FIRST_PSEUDO_REGISTER ? 1 : hard_regno_nregs[regno][GET_MODE (x)]); unsigned int i; for (i = regno; i < endregno; i++) { if (REG_IN_TABLE (i) >= 0) { remove_invalid_refs (i); REG_IN_TABLE (i) = -1; } } } } } /* We may have just removed some of the src_elt's from the hash table. So replace each one with the current head of the same class. */ for (i = 0; i < n_sets; i++) if (sets[i].rtl) { if (sets[i].src_elt && sets[i].src_elt->first_same_value == 0) /* If elt was removed, find current head of same class, or 0 if nothing remains of that class. */ { struct table_elt *elt = sets[i].src_elt; while (elt && elt->prev_same_value) elt = elt->prev_same_value; while (elt && elt->first_same_value == 0) elt = elt->next_same_value; sets[i].src_elt = elt ? elt->first_same_value : 0; } } /* Now insert the destinations into their equivalence classes. */ for (i = 0; i < n_sets; i++) if (sets[i].rtl) { rtx dest = SET_DEST (sets[i].rtl); struct table_elt *elt; /* Don't record value if we are not supposed to risk allocating floating-point values in registers that might be wider than memory. */ if ((flag_float_store && MEM_P (dest) && FLOAT_MODE_P (GET_MODE (dest))) /* Don't record BLKmode values, because we don't know the size of it, and can't be sure that other BLKmode values have the same or smaller size. */ || GET_MODE (dest) == BLKmode /* Don't record values of destinations set inside a libcall block since we might delete the libcall. Things should have been set up so we won't want to reuse such a value, but we play it safe here. */ || libcall_insn /* If we didn't put a REG_EQUAL value or a source into the hash table, there is no point is recording DEST. */ || sets[i].src_elt == 0 /* If DEST is a paradoxical SUBREG and SRC is a ZERO_EXTEND or SIGN_EXTEND, don't record DEST since it can cause some tracking to be wrong. ??? Think about this more later. */ || (GET_CODE (dest) == SUBREG && (GET_MODE_SIZE (GET_MODE (dest)) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))) && (GET_CODE (sets[i].src) == SIGN_EXTEND || GET_CODE (sets[i].src) == ZERO_EXTEND))) continue; /* STRICT_LOW_PART isn't part of the value BEING set, and neither is the SUBREG inside it. Note that in this case SETS[I].SRC_ELT is really SRC_EQV_ELT. */ if (GET_CODE (dest) == STRICT_LOW_PART) dest = SUBREG_REG (XEXP (dest, 0)); if (REG_P (dest) || GET_CODE (dest) == SUBREG) /* Registers must also be inserted into chains for quantities. */ if (insert_regs (dest, sets[i].src_elt, 1)) { /* If `insert_regs' changes something, the hash code must be recalculated. */ rehash_using_reg (dest); sets[i].dest_hash = HASH (dest, GET_MODE (dest)); } elt = hash_insert (dest, sets[i].src_elt, sets[i].dest_hash, GET_MODE (dest)); elt->in_memory = (MEM_P (sets[i].inner_dest) && (! RTX_UNCHANGING_P (sets[i].inner_dest) || fixed_base_plus_p (XEXP (sets[i].inner_dest, 0)))); /* If we have (set (subreg:m1 (reg:m2 foo) 0) (bar:m1)), M1 is no narrower than M2, and both M1 and M2 are the same number of words, we are also doing (set (reg:m2 foo) (subreg:m2 (bar:m1) 0)) so make that equivalence as well. However, BAR may have equivalences for which gen_lowpart will produce a simpler value than gen_lowpart applied to BAR (e.g., if BAR was ZERO_EXTENDed from M2), so we will scan all BAR's equivalences. If we don't get a simplified form, make the SUBREG. It will not be used in an equivalence, but will cause two similar assignments to be detected. Note the loop below will find SUBREG_REG (DEST) since we have already entered SRC and DEST of the SET in the table. */ if (GET_CODE (dest) == SUBREG && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))) - 1) / UNITS_PER_WORD) == (GET_MODE_SIZE (GET_MODE (dest)) - 1) / UNITS_PER_WORD) && (GET_MODE_SIZE (GET_MODE (dest)) >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))) && sets[i].src_elt != 0) { enum machine_mode new_mode = GET_MODE (SUBREG_REG (dest)); struct table_elt *elt, *classp = 0; for (elt = sets[i].src_elt->first_same_value; elt; elt = elt->next_same_value) { rtx new_src = 0; unsigned src_hash; struct table_elt *src_elt; int byte = 0; /* Ignore invalid entries. */ if (!REG_P (elt->exp) && ! exp_equiv_p (elt->exp, elt->exp, 1, 0)) continue; /* We may have already been playing subreg games. If the mode is already correct for the destination, use it. */ if (GET_MODE (elt->exp) == new_mode) new_src = elt->exp; else { /* Calculate big endian correction for the SUBREG_BYTE. We have already checked that M1 (GET_MODE (dest)) is not narrower than M2 (new_mode). */ if (BYTES_BIG_ENDIAN) byte = (GET_MODE_SIZE (GET_MODE (dest)) - GET_MODE_SIZE (new_mode)); new_src = simplify_gen_subreg (new_mode, elt->exp, GET_MODE (dest), byte); } /* The call to simplify_gen_subreg fails if the value is VOIDmode, yet we can't do any simplification, e.g. for EXPR_LISTs denoting function call results. It is invalid to construct a SUBREG with a VOIDmode SUBREG_REG, hence a zero new_src means we can't do this substitution. */ if (! new_src) continue; src_hash = HASH (new_src, new_mode); src_elt = lookup (new_src, src_hash, new_mode); /* Put the new source in the hash table is if isn't already. */ if (src_elt == 0) { if (insert_regs (new_src, classp, 0)) { rehash_using_reg (new_src); src_hash = HASH (new_src, new_mode); } src_elt = hash_insert (new_src, classp, src_hash, new_mode); src_elt->in_memory = elt->in_memory; } else if (classp && classp != src_elt->first_same_value) /* Show that two things that we've seen before are actually the same. */ merge_equiv_classes (src_elt, classp); classp = src_elt->first_same_value; /* Ignore invalid entries. */ while (classp && !REG_P (classp->exp) && ! exp_equiv_p (classp->exp, classp->exp, 1, 0)) classp = classp->next_same_value; } } } /* Special handling for (set REG0 REG1) where REG0 is the "cheapest", cheaper than REG1. After cse, REG1 will probably not be used in the sequel, so (if easily done) change this insn to (set REG1 REG0) and replace REG1 with REG0 in the previous insn that computed their value. Then REG1 will become a dead store and won't cloud the situation for later optimizations. Do not make this change if REG1 is a hard register, because it will then be used in the sequel and we may be changing a two-operand insn into a three-operand insn. Also do not do this if we are operating on a copy of INSN. Also don't do this if INSN ends a libcall; this would cause an unrelated register to be set in the middle of a libcall, and we then get bad code if the libcall is deleted. */ if (n_sets == 1 && sets[0].rtl && REG_P (SET_DEST (sets[0].rtl)) && NEXT_INSN (PREV_INSN (insn)) == insn && REG_P (SET_SRC (sets[0].rtl)) && REGNO (SET_SRC (sets[0].rtl)) >= FIRST_PSEUDO_REGISTER && REGNO_QTY_VALID_P (REGNO (SET_SRC (sets[0].rtl)))) { int src_q = REG_QTY (REGNO (SET_SRC (sets[0].rtl))); struct qty_table_elem *src_ent = &qty_table[src_q]; if ((src_ent->first_reg == REGNO (SET_DEST (sets[0].rtl))) && ! find_reg_note (insn, REG_RETVAL, NULL_RTX)) { rtx prev = insn; /* Scan for the previous nonnote insn, but stop at a basic block boundary. */ do { prev = PREV_INSN (prev); } while (prev && GET_CODE (prev) == NOTE && NOTE_LINE_NUMBER (prev) != NOTE_INSN_BASIC_BLOCK); /* Do not swap the registers around if the previous instruction attaches a REG_EQUIV note to REG1. ??? It's not entirely clear whether we can transfer a REG_EQUIV from the pseudo that originally shadowed an incoming argument to another register. Some uses of REG_EQUIV might rely on it being attached to REG1 rather than REG2. This section previously turned the REG_EQUIV into a REG_EQUAL note. We cannot do that because REG_EQUIV may provide an uninitialized stack slot when REG_PARM_STACK_SPACE is used. */ if (prev != 0 && GET_CODE (prev) == INSN && GET_CODE (PATTERN (prev)) == SET && SET_DEST (PATTERN (prev)) == SET_SRC (sets[0].rtl) && ! find_reg_note (prev, REG_EQUIV, NULL_RTX)) { rtx dest = SET_DEST (sets[0].rtl); rtx src = SET_SRC (sets[0].rtl); rtx note; validate_change (prev, &SET_DEST (PATTERN (prev)), dest, 1); validate_change (insn, &SET_DEST (sets[0].rtl), src, 1); validate_change (insn, &SET_SRC (sets[0].rtl), dest, 1); apply_change_group (); /* If INSN has a REG_EQUAL note, and this note mentions REG0, then we must delete it, because the value in REG0 has changed. If the note's value is REG1, we must also delete it because that is now this insn's dest. */ note = find_reg_note (insn, REG_EQUAL, NULL_RTX); if (note != 0 && (reg_mentioned_p (dest, XEXP (note, 0)) || rtx_equal_p (src, XEXP (note, 0)))) remove_note (insn, note); } } } /* If this is a conditional jump insn, record any known equivalences due to the condition being tested. */ last_jump_equiv_class = 0; if (GET_CODE (insn) == JUMP_INSN && n_sets == 1 && GET_CODE (x) == SET && GET_CODE (SET_SRC (x)) == IF_THEN_ELSE) record_jump_equiv (insn, 0); #ifdef HAVE_cc0 /* If the previous insn set CC0 and this insn no longer references CC0, delete the previous insn. Here we use the fact that nothing expects CC0 to be valid over an insn, which is true until the final pass. */ if (prev_insn && GET_CODE (prev_insn) == INSN && (tem = single_set (prev_insn)) != 0 && SET_DEST (tem) == cc0_rtx && ! reg_mentioned_p (cc0_rtx, x)) delete_insn (prev_insn); prev_insn_cc0 = this_insn_cc0; prev_insn_cc0_mode = this_insn_cc0_mode; prev_insn = insn; #endif } /* Remove from the hash table all expressions that reference memory. */ static void invalidate_memory (void) { int i; struct table_elt *p, *next; for (i = 0; i < CSE_HASH_SIZE; i++) for (p = equiv_table[i]; p; p = next) { next = p->next_same_hash; if (p->in_memory) remove_from_table (p, i); } } /* If ADDR is an address that implicitly affects the stack pointer, return 1 and update the register tables to show the effect. Else, return 0. */ static int addr_affects_sp_p (rtx addr) { if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC && REG_P (XEXP (addr, 0)) && REGNO (XEXP (addr, 0)) == STACK_POINTER_REGNUM) { if (REG_TICK (STACK_POINTER_REGNUM) >= 0) { REG_TICK (STACK_POINTER_REGNUM)++; /* Is it possible to use a subreg of SP? */ SUBREG_TICKED (STACK_POINTER_REGNUM) = -1; } /* This should be *very* rare. */ if (TEST_HARD_REG_BIT (hard_regs_in_table, STACK_POINTER_REGNUM)) invalidate (stack_pointer_rtx, VOIDmode); return 1; } return 0; } /* Perform invalidation on the basis of everything about an insn except for invalidating the actual places that are SET in it. This includes the places CLOBBERed, and anything that might alias with something that is SET or CLOBBERed. X is the pattern of the insn. */ static void invalidate_from_clobbers (rtx x) { if (GET_CODE (x) == CLOBBER) { rtx ref = XEXP (x, 0); if (ref) { if (REG_P (ref) || GET_CODE (ref) == SUBREG || MEM_P (ref)) invalidate (ref, VOIDmode); else if (GET_CODE (ref) == STRICT_LOW_PART || GET_CODE (ref) == ZERO_EXTRACT) invalidate (XEXP (ref, 0), GET_MODE (ref)); } } else if (GET_CODE (x) == PARALLEL) { int i; for (i = XVECLEN (x, 0) - 1; i >= 0; i--) { rtx y = XVECEXP (x, 0, i); if (GET_CODE (y) == CLOBBER) { rtx ref = XEXP (y, 0); if (REG_P (ref) || GET_CODE (ref) == SUBREG || MEM_P (ref)) invalidate (ref, VOIDmode); else if (GET_CODE (ref) == STRICT_LOW_PART || GET_CODE (ref) == ZERO_EXTRACT) invalidate (XEXP (ref, 0), GET_MODE (ref)); } } } } /* Process X, part of the REG_NOTES of an insn. Look at any REG_EQUAL notes and replace any registers in them with either an equivalent constant or the canonical form of the register. If we are inside an address, only do this if the address remains valid. OBJECT is 0 except when within a MEM in which case it is the MEM. Return the replacement for X. */ static rtx cse_process_notes (rtx x, rtx object) { enum rtx_code code = GET_CODE (x); const char *fmt = GET_RTX_FORMAT (code); int i; switch (code) { case CONST_INT: case CONST: case SYMBOL_REF: case LABEL_REF: case CONST_DOUBLE: case CONST_VECTOR: case PC: case CC0: case LO_SUM: return x; case MEM: validate_change (x, &XEXP (x, 0), cse_process_notes (XEXP (x, 0), x), 0); return x; case EXPR_LIST: case INSN_LIST: if (REG_NOTE_KIND (x) == REG_EQUAL) XEXP (x, 0) = cse_process_notes (XEXP (x, 0), NULL_RTX); if (XEXP (x, 1)) XEXP (x, 1) = cse_process_notes (XEXP (x, 1), NULL_RTX); return x; case SIGN_EXTEND: case ZERO_EXTEND: case SUBREG: { rtx new = cse_process_notes (XEXP (x, 0), object); /* We don't substitute VOIDmode constants into these rtx, since they would impede folding. */ if (GET_MODE (new) != VOIDmode) validate_change (object, &XEXP (x, 0), new, 0); return x; } case REG: i = REG_QTY (REGNO (x)); /* Return a constant or a constant register. */ if (REGNO_QTY_VALID_P (REGNO (x))) { struct qty_table_elem *ent = &qty_table[i]; if (ent->const_rtx != NULL_RTX && (CONSTANT_P (ent->const_rtx) || REG_P (ent->const_rtx))) { rtx new = gen_lowpart (GET_MODE (x), ent->const_rtx); if (new) return new; } } /* Otherwise, canonicalize this register. */ return canon_reg (x, NULL_RTX); default: break; } for (i = 0; i < GET_RTX_LENGTH (code); i++) if (fmt[i] == 'e') validate_change (object, &XEXP (x, i), cse_process_notes (XEXP (x, i), object), 0); return x; } /* Find common subexpressions between the end test of a loop and the beginning of the loop. LOOP_START is the CODE_LABEL at the start of a loop. Often we have a loop where an expression in the exit test is used in the body of the loop. For example "while (*p) *q++ = *p++;". Because of the way we duplicate the loop exit test in front of the loop, however, we don't detect that common subexpression. This will be caught when global cse is implemented, but this is a quite common case. This function handles the most common cases of these common expressions. It is called after we have processed the basic block ending with the NOTE_INSN_LOOP_END note that ends a loop and the previous JUMP_INSN jumps to a label used only once. */ static void cse_around_loop (rtx loop_start) { rtx insn; int i; struct table_elt *p; /* If the jump at the end of the loop doesn't go to the start, we don't do anything. */ for (insn = PREV_INSN (loop_start); insn && (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) >= 0); insn = PREV_INSN (insn)) ; if (insn == 0 || GET_CODE (insn) != NOTE || NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG) return; /* If the last insn of the loop (the end test) was an NE comparison, we will interpret it as an EQ comparison, since we fell through the loop. Any equivalences resulting from that comparison are therefore not valid and must be invalidated. */ if (last_jump_equiv_class) for (p = last_jump_equiv_class->first_same_value; p; p = p->next_same_value) { if (MEM_P (p->exp) || REG_P (p->exp) || (GET_CODE (p->exp) == SUBREG && REG_P (SUBREG_REG (p->exp)))) invalidate (p->exp, VOIDmode); else if (GET_CODE (p->exp) == STRICT_LOW_PART || GET_CODE (p->exp) == ZERO_EXTRACT) invalidate (XEXP (p->exp, 0), GET_MODE (p->exp)); } /* Process insns starting after LOOP_START until we hit a CALL_INSN or a CODE_LABEL (we could handle a CALL_INSN, but it isn't worth it). The only thing we do with SET_DEST is invalidate entries, so we can safely process each SET in order. It is slightly less efficient to do so, but we only want to handle the most common cases. The gen_move_insn call in cse_set_around_loop may create new pseudos. These pseudos won't have valid entries in any of the tables indexed by register number, such as reg_qty. We avoid out-of-range array accesses by not processing any instructions created after cse started. */ for (insn = NEXT_INSN (loop_start); GET_CODE (insn) != CALL_INSN && GET_CODE (insn) != CODE_LABEL && INSN_UID (insn) < max_insn_uid && ! (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END); insn = NEXT_INSN (insn)) { if (INSN_P (insn) && (GET_CODE (PATTERN (insn)) == SET || GET_CODE (PATTERN (insn)) == CLOBBER)) cse_set_around_loop (PATTERN (insn), insn, loop_start); else if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == PARALLEL) for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--) if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET || GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == CLOBBER) cse_set_around_loop (XVECEXP (PATTERN (insn), 0, i), insn, loop_start); } } /* Process one SET of an insn that was skipped. We ignore CLOBBERs since they are done elsewhere. This function is called via note_stores. */ static void invalidate_skipped_set (rtx dest, rtx set, void *data ATTRIBUTE_UNUSED) { enum rtx_code code = GET_CODE (dest); if (code == MEM && ! addr_affects_sp_p (dest) /* If this is not a stack push ... */ /* There are times when an address can appear varying and be a PLUS during this scan when it would be a fixed address were we to know the proper equivalences. So invalidate all memory if there is a BLKmode or nonscalar memory reference or a reference to a variable address. */ && (MEM_IN_STRUCT_P (dest) || GET_MODE (dest) == BLKmode || cse_rtx_varies_p (XEXP (dest, 0), 0))) { invalidate_memory (); return; } if (GET_CODE (set) == CLOBBER || CC0_P (dest) || dest == pc_rtx) return; if (code == STRICT_LOW_PART || code == ZERO_EXTRACT) invalidate (XEXP (dest, 0), GET_MODE (dest)); else if (code == REG || code == SUBREG || code == MEM) invalidate (dest, VOIDmode); } /* Invalidate all insns from START up to the end of the function or the next label. This called when we wish to CSE around a block that is conditionally executed. */ static void invalidate_skipped_block (rtx start) { rtx insn; for (insn = start; insn && GET_CODE (insn) != CODE_LABEL; insn = NEXT_INSN (insn)) { if (! INSN_P (insn)) continue; if (GET_CODE (insn) == CALL_INSN) { if (! CONST_OR_PURE_CALL_P (insn)) invalidate_memory (); invalidate_for_call (); } invalidate_from_clobbers (PATTERN (insn)); note_stores (PATTERN (insn), invalidate_skipped_set, NULL); } } /* If modifying X will modify the value in *DATA (which is really an `rtx *'), indicate that fact by setting the pointed to value to NULL_RTX. */ static void cse_check_loop_start (rtx x, rtx set ATTRIBUTE_UNUSED, void *data) { rtx *cse_check_loop_start_value = (rtx *) data; if (*cse_check_loop_start_value == NULL_RTX || GET_CODE (x) == CC0 || GET_CODE (x) == PC) return; if ((MEM_P (x) && MEM_P (*cse_check_loop_start_value)) || reg_overlap_mentioned_p (x, *cse_check_loop_start_value)) *cse_check_loop_start_value = NULL_RTX; } /* X is a SET or CLOBBER contained in INSN that was found near the start of a loop that starts with the label at LOOP_START. If X is a SET, we see if its SET_SRC is currently in our hash table. If so, we see if it has a value equal to some register used only in the loop exit code (as marked by jump.c). If those two conditions are true, we search backwards from the start of the loop to see if that same value was loaded into a register that still retains its value at the start of the loop. If so, we insert an insn after the load to copy the destination of that load into the equivalent register and (try to) replace our SET_SRC with that register. In any event, we invalidate whatever this SET or CLOBBER modifies. */ static void cse_set_around_loop (rtx x, rtx insn, rtx loop_start) { struct table_elt *src_elt; /* If this is a SET, see if we can replace SET_SRC, but ignore SETs that are setting PC or CC0 or whose SET_SRC is already a register. */ if (GET_CODE (x) == SET && GET_CODE (SET_DEST (x)) != PC && GET_CODE (SET_DEST (x)) != CC0 && !REG_P (SET_SRC (x))) { src_elt = lookup (SET_SRC (x), HASH (SET_SRC (x), GET_MODE (SET_DEST (x))), GET_MODE (SET_DEST (x))); if (src_elt) for (src_elt = src_elt->first_same_value; src_elt; src_elt = src_elt->next_same_value) if (REG_P (src_elt->exp) && REG_LOOP_TEST_P (src_elt->exp) && COST (src_elt->exp) < COST (SET_SRC (x))) { rtx p, set; /* Look for an insn in front of LOOP_START that sets something in the desired mode to SET_SRC (x) before we hit a label or CALL_INSN. */ for (p = prev_nonnote_insn (loop_start); p && GET_CODE (p) != CALL_INSN && GET_CODE (p) != CODE_LABEL; p = prev_nonnote_insn (p)) if ((set = single_set (p)) != 0 && REG_P (SET_DEST (set)) && GET_MODE (SET_DEST (set)) == src_elt->mode && rtx_equal_p (SET_SRC (set), SET_SRC (x))) { /* We now have to ensure that nothing between P and LOOP_START modified anything referenced in SET_SRC (x). We know that nothing within the loop can modify it, or we would have invalidated it in the hash table. */ rtx q; rtx cse_check_loop_start_value = SET_SRC (x); for (q = p; q != loop_start; q = NEXT_INSN (q)) if (INSN_P (q)) note_stores (PATTERN (q), cse_check_loop_start, &cse_check_loop_start_value); /* If nothing was changed and we can replace our SET_SRC, add an insn after P to copy its destination to what we will be replacing SET_SRC with. */ if (cse_check_loop_start_value && single_set (p) && !can_throw_internal (insn) && validate_change (insn, &SET_SRC (x), src_elt->exp, 0)) { /* If this creates new pseudos, this is unsafe, because the regno of new pseudo is unsuitable to index into reg_qty when cse_insn processes the new insn. Therefore, if a new pseudo was created, discard this optimization. */ int nregs = max_reg_num (); rtx move = gen_move_insn (src_elt->exp, SET_DEST (set)); if (nregs != max_reg_num ()) { if (! validate_change (insn, &SET_SRC (x), SET_SRC (set), 0)) abort (); } else { if (CONSTANT_P (SET_SRC (set)) && ! find_reg_equal_equiv_note (insn)) set_unique_reg_note (insn, REG_EQUAL, SET_SRC (set)); if (control_flow_insn_p (p)) /* p can cause a control flow transfer so it is the last insn of a basic block. We can't therefore use emit_insn_after. */ emit_insn_before (move, next_nonnote_insn (p)); else emit_insn_after (move, p); } } break; } } } /* Deal with the destination of X affecting the stack pointer. */ addr_affects_sp_p (SET_DEST (x)); /* See comment on similar code in cse_insn for explanation of these tests. */ if (REG_P (SET_DEST (x)) || GET_CODE (SET_DEST (x)) == SUBREG || MEM_P (SET_DEST (x))) invalidate (SET_DEST (x), VOIDmode); else if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART || GET_CODE (SET_DEST (x)) == ZERO_EXTRACT) invalidate (XEXP (SET_DEST (x), 0), GET_MODE (SET_DEST (x))); } /* Find the end of INSN's basic block and return its range, the total number of SETs in all the insns of the block, the last insn of the block, and the branch path. The branch path indicates which branches should be followed. If a nonzero path size is specified, the block should be rescanned and a different set of branches will be taken. The branch path is only used if FLAG_CSE_FOLLOW_JUMPS or FLAG_CSE_SKIP_BLOCKS is nonzero. DATA is a pointer to a struct cse_basic_block_data, defined below, that is used to describe the block. It is filled in with the information about the current block. The incoming structure's branch path, if any, is used to construct the output branch path. */ static void cse_end_of_basic_block (rtx insn, struct cse_basic_block_data *data, int follow_jumps, int after_loop, int skip_blocks) { rtx p = insn, q; int nsets = 0; int low_cuid = INSN_CUID (insn), high_cuid = INSN_CUID (insn); rtx next = INSN_P (insn) ? insn : next_real_insn (insn); int path_size = data->path_size; int path_entry = 0; int i; /* Update the previous branch path, if any. If the last branch was previously PATH_TAKEN, mark it PATH_NOT_TAKEN. If it was previously PATH_NOT_TAKEN, shorten the path by one and look at the previous branch. We know that at least one branch must have been taken if PATH_SIZE is nonzero. */ while (path_size > 0) { if (data->path[path_size - 1].status != PATH_NOT_TAKEN) { data->path[path_size - 1].status = PATH_NOT_TAKEN; break; } else path_size--; } /* If the first instruction is marked with QImode, that means we've already processed this block. Our caller will look at DATA->LAST to figure out where to go next. We want to return the next block in the instruction stream, not some branched-to block somewhere else. We accomplish this by pretending our called forbid us to follow jumps, or skip blocks. */ if (GET_MODE (insn) == QImode) follow_jumps = skip_blocks = 0; /* Scan to end of this basic block. */ while (p && GET_CODE (p) != CODE_LABEL) { /* Don't cse out the end of a loop. This makes a difference only for the unusual loops that always execute at least once; all other loops have labels there so we will stop in any case. Cse'ing out the end of the loop is dangerous because it might cause an invariant expression inside the loop to be reused after the end of the loop. This would make it hard to move the expression out of the loop in loop.c, especially if it is one of several equivalent expressions and loop.c would like to eliminate it. If we are running after loop.c has finished, we can ignore the NOTE_INSN_LOOP_END. */ if (! after_loop && GET_CODE (p) == NOTE && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END) break; /* Don't cse over a call to setjmp; on some machines (eg VAX) the regs restored by the longjmp come from a later time than the setjmp. */ if (PREV_INSN (p) && GET_CODE (PREV_INSN (p)) == CALL_INSN && find_reg_note (PREV_INSN (p), REG_SETJMP, NULL)) break; /* A PARALLEL can have lots of SETs in it, especially if it is really an ASM_OPERANDS. */ if (INSN_P (p) && GET_CODE (PATTERN (p)) == PARALLEL) nsets += XVECLEN (PATTERN (p), 0); else if (GET_CODE (p) != NOTE) nsets += 1; /* Ignore insns made by CSE; they cannot affect the boundaries of the basic block. */ if (INSN_UID (p) <= max_uid && INSN_CUID (p) > high_cuid) high_cuid = INSN_CUID (p); if (INSN_UID (p) <= max_uid && INSN_CUID (p) < low_cuid) low_cuid = INSN_CUID (p); /* See if this insn is in our branch path. If it is and we are to take it, do so. */ if (path_entry < path_size && data->path[path_entry].branch == p) { if (data->path[path_entry].status != PATH_NOT_TAKEN) p = JUMP_LABEL (p); /* Point to next entry in path, if any. */ path_entry++; } /* If this is a conditional jump, we can follow it if -fcse-follow-jumps was specified, we haven't reached our maximum path length, there are insns following the target of the jump, this is the only use of the jump label, and the target label is preceded by a BARRIER. Alternatively, we can follow the jump if it branches around a block of code and there are no other branches into the block. In this case invalidate_skipped_block will be called to invalidate any registers set in the block when following the jump. */ else if ((follow_jumps || skip_blocks) && path_size < PARAM_VALUE (PARAM_MAX_CSE_PATH_LENGTH) - 1 && GET_CODE (p) == JUMP_INSN && GET_CODE (PATTERN (p)) == SET && GET_CODE (SET_SRC (PATTERN (p))) == IF_THEN_ELSE && JUMP_LABEL (p) != 0 && LABEL_NUSES (JUMP_LABEL (p)) == 1 && NEXT_INSN (JUMP_LABEL (p)) != 0) { for (q = PREV_INSN (JUMP_LABEL (p)); q; q = PREV_INSN (q)) if ((GET_CODE (q) != NOTE || NOTE_LINE_NUMBER (q) == NOTE_INSN_LOOP_END || (PREV_INSN (q) && GET_CODE (PREV_INSN (q)) == CALL_INSN && find_reg_note (PREV_INSN (q), REG_SETJMP, NULL))) && (GET_CODE (q) != CODE_LABEL || LABEL_NUSES (q) != 0)) break; /* If we ran into a BARRIER, this code is an extension of the basic block when the branch is taken. */ if (follow_jumps && q != 0 && GET_CODE (q) == BARRIER) { /* Don't allow ourself to keep walking around an always-executed loop. */ if (next_real_insn (q) == next) { p = NEXT_INSN (p); continue; } /* Similarly, don't put a branch in our path more than once. */ for (i = 0; i < path_entry; i++) if (data->path[i].branch == p) break; if (i != path_entry) break; data->path[path_entry].branch = p; data->path[path_entry++].status = PATH_TAKEN; /* This branch now ends our path. It was possible that we didn't see this branch the last time around (when the insn in front of the target was a JUMP_INSN that was turned into a no-op). */ path_size = path_entry; p = JUMP_LABEL (p); /* Mark block so we won't scan it again later. */ PUT_MODE (NEXT_INSN (p), QImode); } /* Detect a branch around a block of code. */ else if (skip_blocks && q != 0 && GET_CODE (q) != CODE_LABEL) { rtx tmp; if (next_real_insn (q) == next) { p = NEXT_INSN (p); continue; } for (i = 0; i < path_entry; i++) if (data->path[i].branch == p) break; if (i != path_entry) break; /* This is no_labels_between_p (p, q) with an added check for reaching the end of a function (in case Q precedes P). */ for (tmp = NEXT_INSN (p); tmp && tmp != q; tmp = NEXT_INSN (tmp)) if (GET_CODE (tmp) == CODE_LABEL) break; if (tmp == q) { data->path[path_entry].branch = p; data->path[path_entry++].status = PATH_AROUND; path_size = path_entry; p = JUMP_LABEL (p); /* Mark block so we won't scan it again later. */ PUT_MODE (NEXT_INSN (p), QImode); } } } p = NEXT_INSN (p); } data->low_cuid = low_cuid; data->high_cuid = high_cuid; data->nsets = nsets; data->last = p; /* If all jumps in the path are not taken, set our path length to zero so a rescan won't be done. */ for (i = path_size - 1; i >= 0; i--) if (data->path[i].status != PATH_NOT_TAKEN) break; if (i == -1) data->path_size = 0; else data->path_size = path_size; /* End the current branch path. */ data->path[path_size].branch = 0; } /* Perform cse on the instructions of a function. F is the first instruction. NREGS is one plus the highest pseudo-reg number used in the instruction. AFTER_LOOP is 1 if this is the cse call done after loop optimization (only if -frerun-cse-after-loop). Returns 1 if jump_optimize should be redone due to simplifications in conditional jump instructions. */ int cse_main (rtx f, int nregs, int after_loop, FILE *file) { struct cse_basic_block_data val; rtx insn = f; int i; val.path = xmalloc (sizeof (struct branch_path) * PARAM_VALUE (PARAM_MAX_CSE_PATH_LENGTH)); cse_jumps_altered = 0; recorded_label_ref = 0; constant_pool_entries_cost = 0; constant_pool_entries_regcost = 0; val.path_size = 0; rtl_hooks = cse_rtl_hooks; init_recog (); init_alias_analysis (); max_reg = nregs; max_insn_uid = get_max_uid (); reg_eqv_table = xmalloc (nregs * sizeof (struct reg_eqv_elem)); #ifdef LOAD_EXTEND_OP /* Allocate scratch rtl here. cse_insn will fill in the memory reference and change the code and mode as appropriate. */ memory_extend_rtx = gen_rtx_ZERO_EXTEND (VOIDmode, NULL_RTX); #endif /* Reset the counter indicating how many elements have been made thus far. */ n_elements_made = 0; /* Find the largest uid. */ max_uid = get_max_uid (); uid_cuid = xcalloc (max_uid + 1, sizeof (int)); /* Compute the mapping from uids to cuids. CUIDs are numbers assigned to insns, like uids, except that cuids increase monotonically through the code. Don't assign cuids to line-number NOTEs, so that the distance in cuids between two insns is not affected by -g. */ for (insn = f, i = 0; insn; insn = NEXT_INSN (insn)) { if (GET_CODE (insn) != NOTE || NOTE_LINE_NUMBER (insn) < 0) INSN_CUID (insn) = ++i; else /* Give a line number note the same cuid as preceding insn. */ INSN_CUID (insn) = i; } ggc_push_context (); /* Loop over basic blocks. Compute the maximum number of qty's needed for each basic block (which is 2 for each SET). */ insn = f; while (insn) { cse_altered = 0; cse_end_of_basic_block (insn, &val, flag_cse_follow_jumps, after_loop, flag_cse_skip_blocks); /* If this basic block was already processed or has no sets, skip it. */ if (val.nsets == 0 || GET_MODE (insn) == QImode) { PUT_MODE (insn, VOIDmode); insn = (val.last ? NEXT_INSN (val.last) : 0); val.path_size = 0; continue; } cse_basic_block_start = val.low_cuid; cse_basic_block_end = val.high_cuid; max_qty = val.nsets * 2; if (file) fnotice (file, ";; Processing block from %d to %d, %d sets.\n", INSN_UID (insn), val.last ? INSN_UID (val.last) : 0, val.nsets); /* Make MAX_QTY bigger to give us room to optimize past the end of this basic block, if that should prove useful. */ if (max_qty < 500) max_qty = 500; max_qty += max_reg; /* If this basic block is being extended by following certain jumps, (see `cse_end_of_basic_block'), we reprocess the code from the start. Otherwise, we start after this basic block. */ if (val.path_size > 0) cse_basic_block (insn, val.last, val.path, 0); else { int old_cse_jumps_altered = cse_jumps_altered; rtx temp; /* When cse changes a conditional jump to an unconditional jump, we want to reprocess the block, since it will give us a new branch path to investigate. */ cse_jumps_altered = 0; temp = cse_basic_block (insn, val.last, val.path, ! after_loop); if (cse_jumps_altered == 0 || (flag_cse_follow_jumps == 0 && flag_cse_skip_blocks == 0)) insn = temp; cse_jumps_altered |= old_cse_jumps_altered; } if (cse_altered) ggc_collect (); #ifdef USE_C_ALLOCA alloca (0); #endif } ggc_pop_context (); if (max_elements_made < n_elements_made) max_elements_made = n_elements_made; /* Clean up. */ end_alias_analysis (); free (uid_cuid); free (reg_eqv_table); free (val.path); rtl_hooks = general_rtl_hooks; return cse_jumps_altered || recorded_label_ref; } /* Process a single basic block. FROM and TO and the limits of the basic block. NEXT_BRANCH points to the branch path when following jumps or a null path when not following jumps. AROUND_LOOP is nonzero if we are to try to cse around to the start of a loop. This is true when we are being called for the last time on a block and this CSE pass is before loop.c. */ static rtx cse_basic_block (rtx from, rtx to, struct branch_path *next_branch, int around_loop) { rtx insn; int to_usage = 0; rtx libcall_insn = NULL_RTX; int num_insns = 0; int no_conflict = 0; /* This array is undefined before max_reg, so only allocate the space actually needed and adjust the start. */ qty_table = xmalloc ((max_qty - max_reg) * sizeof (struct qty_table_elem)); qty_table -= max_reg; new_basic_block (); /* TO might be a label. If so, protect it from being deleted. */ if (to != 0 && GET_CODE (to) == CODE_LABEL) ++LABEL_NUSES (to); for (insn = from; insn != to; insn = NEXT_INSN (insn)) { enum rtx_code code = GET_CODE (insn); /* If we have processed 1,000 insns, flush the hash table to avoid extreme quadratic behavior. We must not include NOTEs in the count since there may be more of them when generating debugging information. If we clear the table at different times, code generated with -g -O might be different than code generated with -O but not -g. ??? This is a real kludge and needs to be done some other way. Perhaps for 2.9. */ if (code != NOTE && num_insns++ > 1000) { flush_hash_table (); num_insns = 0; } /* See if this is a branch that is part of the path. If so, and it is to be taken, do so. */ if (next_branch->branch == insn) { enum taken status = next_branch++->status; if (status != PATH_NOT_TAKEN) { if (status == PATH_TAKEN) record_jump_equiv (insn, 1); else invalidate_skipped_block (NEXT_INSN (insn)); /* Set the last insn as the jump insn; it doesn't affect cc0. Then follow this branch. */ #ifdef HAVE_cc0 prev_insn_cc0 = 0; prev_insn = insn; #endif insn = JUMP_LABEL (insn); continue; } } if (GET_MODE (insn) == QImode) PUT_MODE (insn, VOIDmode); if (GET_RTX_CLASS (code) == RTX_INSN) { rtx p; /* Process notes first so we have all notes in canonical forms when looking for duplicate operations. */ if (REG_NOTES (insn)) REG_NOTES (insn) = cse_process_notes (REG_NOTES (insn), NULL_RTX); /* Track when we are inside in LIBCALL block. Inside such a block, we do not want to record destinations. The last insn of a LIBCALL block is not considered to be part of the block, since its destination is the result of the block and hence should be recorded. */ if (REG_NOTES (insn) != 0) { if ((p = find_reg_note (insn, REG_LIBCALL, NULL_RTX))) libcall_insn = XEXP (p, 0); else if (find_reg_note (insn, REG_RETVAL, NULL_RTX)) { /* Keep libcall_insn for the last SET insn of a no-conflict block to prevent changing the destination. */ if (! no_conflict) libcall_insn = 0; else no_conflict = -1; } else if (find_reg_note (insn, REG_NO_CONFLICT, NULL_RTX)) no_conflict = 1; } cse_insn (insn, libcall_insn); if (no_conflict == -1) { libcall_insn = 0; no_conflict = 0; } /* If we haven't already found an insn where we added a LABEL_REF, check this one. */ if (GET_CODE (insn) == INSN && ! recorded_label_ref && for_each_rtx (&PATTERN (insn), check_for_label_ref, (void *) insn)) recorded_label_ref = 1; } /* If INSN is now an unconditional jump, skip to the end of our basic block by pretending that we just did the last insn in the basic block. If we are jumping to the end of our block, show that we can have one usage of TO. */ if (any_uncondjump_p (insn)) { if (to == 0) { free (qty_table + max_reg); return 0; } if (JUMP_LABEL (insn) == to) to_usage = 1; /* Maybe TO was deleted because the jump is unconditional. If so, there is nothing left in this basic block. */ /* ??? Perhaps it would be smarter to set TO to whatever follows this insn, and pretend the basic block had always ended here. */ if (INSN_DELETED_P (to)) break; insn = PREV_INSN (to); } /* See if it is ok to keep on going past the label which used to end our basic block. Remember that we incremented the count of that label, so we decrement it here. If we made a jump unconditional, TO_USAGE will be one; in that case, we don't want to count the use in that jump. */ if (to != 0 && NEXT_INSN (insn) == to && GET_CODE (to) == CODE_LABEL && --LABEL_NUSES (to) == to_usage) { struct cse_basic_block_data val; rtx prev; insn = NEXT_INSN (to); /* If TO was the last insn in the function, we are done. */ if (insn == 0) { free (qty_table + max_reg); return 0; } /* If TO was preceded by a BARRIER we are done with this block because it has no continuation. */ prev = prev_nonnote_insn (to); if (prev && GET_CODE (prev) == BARRIER) { free (qty_table + max_reg); return insn; } /* Find the end of the following block. Note that we won't be following branches in this case. */ to_usage = 0; val.path_size = 0; val.path = xmalloc (sizeof (struct branch_path) * PARAM_VALUE (PARAM_MAX_CSE_PATH_LENGTH)); cse_end_of_basic_block (insn, &val, 0, 0, 0); free (val.path); /* If the tables we allocated have enough space left to handle all the SETs in the next basic block, continue through it. Otherwise, return, and that block will be scanned individually. */ if (val.nsets * 2 + next_qty > max_qty) break; cse_basic_block_start = val.low_cuid; cse_basic_block_end = val.high_cuid; to = val.last; /* Prevent TO from being deleted if it is a label. */ if (to != 0 && GET_CODE (to) == CODE_LABEL) ++LABEL_NUSES (to); /* Back up so we process the first insn in the extension. */ insn = PREV_INSN (insn); } } if (next_qty > max_qty) abort (); /* If we are running before loop.c, we stopped on a NOTE_INSN_LOOP_END, and the previous insn is the only insn that branches to the head of a loop, we can cse into the loop. Don't do this if we changed the jump structure of a loop unless we aren't going to be following jumps. */ insn = prev_nonnote_insn (to); if ((cse_jumps_altered == 0 || (flag_cse_follow_jumps == 0 && flag_cse_skip_blocks == 0)) && around_loop && to != 0 && GET_CODE (to) == NOTE && NOTE_LINE_NUMBER (to) == NOTE_INSN_LOOP_END && GET_CODE (insn) == JUMP_INSN && JUMP_LABEL (insn) != 0 && LABEL_NUSES (JUMP_LABEL (insn)) == 1) cse_around_loop (JUMP_LABEL (insn)); free (qty_table + max_reg); return to ? NEXT_INSN (to) : 0; } /* Called via for_each_rtx to see if an insn is using a LABEL_REF for which there isn't a REG_LABEL note. Return one if so. DATA is the insn. */ static int check_for_label_ref (rtx *rtl, void *data) { rtx insn = (rtx) data; /* If this insn uses a LABEL_REF and there isn't a REG_LABEL note for it, we must rerun jump since it needs to place the note. If this is a LABEL_REF for a CODE_LABEL that isn't in the insn chain, don't do this since no REG_LABEL will be added. */ return (GET_CODE (*rtl) == LABEL_REF && ! LABEL_REF_NONLOCAL_P (*rtl) && LABEL_P (XEXP (*rtl, 0)) && INSN_UID (XEXP (*rtl, 0)) != 0 && ! find_reg_note (insn, REG_LABEL, XEXP (*rtl, 0))); } /* Count the number of times registers are used (not set) in X. COUNTS is an array in which we accumulate the count, INCR is how much we count each register usage. */ static void count_reg_usage (rtx x, int *counts, int incr) { enum rtx_code code; rtx note; const char *fmt; int i, j; if (x == 0) return; switch (code = GET_CODE (x)) { case REG: counts[REGNO (x)] += incr; return; case PC: case CC0: case CONST: case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case SYMBOL_REF: case LABEL_REF: return; case CLOBBER: /* If we are clobbering a MEM, mark any registers inside the address as being used. */ if (MEM_P (XEXP (x, 0))) count_reg_usage (XEXP (XEXP (x, 0), 0), counts, incr); return; case SET: /* Unless we are setting a REG, count everything in SET_DEST. */ if (!REG_P (SET_DEST (x))) count_reg_usage (SET_DEST (x), counts, incr); count_reg_usage (SET_SRC (x), counts, incr); return; case CALL_INSN: count_reg_usage (CALL_INSN_FUNCTION_USAGE (x), counts, incr); /* Fall through. */ case INSN: case JUMP_INSN: count_reg_usage (PATTERN (x), counts, incr); /* Things used in a REG_EQUAL note aren't dead since loop may try to use them. */ note = find_reg_equal_equiv_note (x); if (note) { rtx eqv = XEXP (note, 0); if (GET_CODE (eqv) == EXPR_LIST) /* This REG_EQUAL note describes the result of a function call. Process all the arguments. */ do { count_reg_usage (XEXP (eqv, 0), counts, incr); eqv = XEXP (eqv, 1); } while (eqv && GET_CODE (eqv) == EXPR_LIST); else count_reg_usage (eqv, counts, incr); } return; case EXPR_LIST: if (REG_NOTE_KIND (x) == REG_EQUAL || (REG_NOTE_KIND (x) != REG_NONNEG && GET_CODE (XEXP (x,0)) == USE) /* FUNCTION_USAGE expression lists may include (CLOBBER (mem /u)), involving registers in the address. */ || GET_CODE (XEXP (x, 0)) == CLOBBER) count_reg_usage (XEXP (x, 0), counts, incr); count_reg_usage (XEXP (x, 1), counts, incr); return; case ASM_OPERANDS: /* Iterate over just the inputs, not the constraints as well. */ for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--) count_reg_usage (ASM_OPERANDS_INPUT (x, i), counts, incr); return; case INSN_LIST: abort (); default: break; } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') count_reg_usage (XEXP (x, i), counts, incr); else if (fmt[i] == 'E') for (j = XVECLEN (x, i) - 1; j >= 0; j--) count_reg_usage (XVECEXP (x, i, j), counts, incr); } } /* Return true if set is live. */ static bool set_live_p (rtx set, rtx insn ATTRIBUTE_UNUSED, /* Only used with HAVE_cc0. */ int *counts) { #ifdef HAVE_cc0 rtx tem; #endif if (set_noop_p (set)) ; #ifdef HAVE_cc0 else if (GET_CODE (SET_DEST (set)) == CC0 && !side_effects_p (SET_SRC (set)) && ((tem = next_nonnote_insn (insn)) == 0 || !INSN_P (tem) || !reg_referenced_p (cc0_rtx, PATTERN (tem)))) return false; #endif else if (!REG_P (SET_DEST (set)) || REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER || counts[REGNO (SET_DEST (set))] != 0 || side_effects_p (SET_SRC (set))) return true; return false; } /* Return true if insn is live. */ static bool insn_live_p (rtx insn, int *counts) { int i; if (flag_non_call_exceptions && may_trap_p (PATTERN (insn))) return true; else if (GET_CODE (PATTERN (insn)) == SET) return set_live_p (PATTERN (insn), insn, counts); else if (GET_CODE (PATTERN (insn)) == PARALLEL) { for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--) { rtx elt = XVECEXP (PATTERN (insn), 0, i); if (GET_CODE (elt) == SET) { if (set_live_p (elt, insn, counts)) return true; } else if (GET_CODE (elt) != CLOBBER && GET_CODE (elt) != USE) return true; } return false; } else return true; } /* Return true if libcall is dead as a whole. */ static bool dead_libcall_p (rtx insn, int *counts) { rtx note, set, new; /* See if there's a REG_EQUAL note on this insn and try to replace the source with the REG_EQUAL expression. We assume that insns with REG_RETVALs can only be reg->reg copies at this point. */ note = find_reg_note (insn, REG_EQUAL, NULL_RTX); if (!note) return false; set = single_set (insn); if (!set) return false; new = simplify_rtx (XEXP (note, 0)); if (!new) new = XEXP (note, 0); /* While changing insn, we must update the counts accordingly. */ count_reg_usage (insn, counts, -1); if (validate_change (insn, &SET_SRC (set), new, 0)) { count_reg_usage (insn, counts, 1); remove_note (insn, find_reg_note (insn, REG_RETVAL, NULL_RTX)); remove_note (insn, note); return true; } if (CONSTANT_P (new)) { new = force_const_mem (GET_MODE (SET_DEST (set)), new); if (new && validate_change (insn, &SET_SRC (set), new, 0)) { count_reg_usage (insn, counts, 1); remove_note (insn, find_reg_note (insn, REG_RETVAL, NULL_RTX)); remove_note (insn, note); return true; } } count_reg_usage (insn, counts, 1); return false; } /* Scan all the insns and delete any that are dead; i.e., they store a register that is never used or they copy a register to itself. This is used to remove insns made obviously dead by cse, loop or other optimizations. It improves the heuristics in loop since it won't try to move dead invariants out of loops or make givs for dead quantities. The remaining passes of the compilation are also sped up. */ int delete_trivially_dead_insns (rtx insns, int nreg) { int *counts; rtx insn, prev; int in_libcall = 0, dead_libcall = 0; int ndead = 0, nlastdead, niterations = 0; timevar_push (TV_DELETE_TRIVIALLY_DEAD); /* First count the number of times each register is used. */ counts = xcalloc (nreg, sizeof (int)); for (insn = next_real_insn (insns); insn; insn = next_real_insn (insn)) count_reg_usage (insn, counts, 1); do { nlastdead = ndead; niterations++; /* Go from the last insn to the first and delete insns that only set unused registers or copy a register to itself. As we delete an insn, remove usage counts for registers it uses. The first jump optimization pass may leave a real insn as the last insn in the function. We must not skip that insn or we may end up deleting code that is not really dead. */ insn = get_last_insn (); if (! INSN_P (insn)) insn = prev_real_insn (insn); for (; insn; insn = prev) { int live_insn = 0; prev = prev_real_insn (insn); /* Don't delete any insns that are part of a libcall block unless we can delete the whole libcall block. Flow or loop might get confused if we did that. Remember that we are scanning backwards. */ if (find_reg_note (insn, REG_RETVAL, NULL_RTX)) { in_libcall = 1; live_insn = 1; dead_libcall = dead_libcall_p (insn, counts); } else if (in_libcall) live_insn = ! dead_libcall; else live_insn = insn_live_p (insn, counts); /* If this is a dead insn, delete it and show registers in it aren't being used. */ if (! live_insn) { count_reg_usage (insn, counts, -1); delete_insn_and_edges (insn); ndead++; } if (find_reg_note (insn, REG_LIBCALL, NULL_RTX)) { in_libcall = 0; dead_libcall = 0; } } } while (ndead != nlastdead); if (dump_file && ndead) fprintf (dump_file, "Deleted %i trivially dead insns; %i iterations\n", ndead, niterations); /* Clean up. */ free (counts); timevar_pop (TV_DELETE_TRIVIALLY_DEAD); return ndead; } /* This function is called via for_each_rtx. The argument, NEWREG, is a condition code register with the desired mode. If we are looking at the same register in a different mode, replace it with NEWREG. */ static int cse_change_cc_mode (rtx *loc, void *data) { rtx newreg = (rtx) data; if (*loc && REG_P (*loc) && REGNO (*loc) == REGNO (newreg) && GET_MODE (*loc) != GET_MODE (newreg)) { *loc = newreg; return -1; } return 0; } /* Change the mode of any reference to the register REGNO (NEWREG) to GET_MODE (NEWREG), starting at START. Stop before END. Stop at any instruction which modifies NEWREG. */ static void cse_change_cc_mode_insns (rtx start, rtx end, rtx newreg) { rtx insn; for (insn = start; insn != end; insn = NEXT_INSN (insn)) { if (! INSN_P (insn)) continue; if (reg_set_p (newreg, insn)) return; for_each_rtx (&PATTERN (insn), cse_change_cc_mode, newreg); for_each_rtx (®_NOTES (insn), cse_change_cc_mode, newreg); } } /* BB is a basic block which finishes with CC_REG as a condition code register which is set to CC_SRC. Look through the successors of BB to find blocks which have a single predecessor (i.e., this one), and look through those blocks for an assignment to CC_REG which is equivalent to CC_SRC. CAN_CHANGE_MODE indicates whether we are permitted to change the mode of CC_SRC to a compatible mode. This returns VOIDmode if no equivalent assignments were found. Otherwise it returns the mode which CC_SRC should wind up with. The main complexity in this function is handling the mode issues. We may have more than one duplicate which we can eliminate, and we try to find a mode which will work for multiple duplicates. */ static enum machine_mode cse_cc_succs (basic_block bb, rtx cc_reg, rtx cc_src, bool can_change_mode) { bool found_equiv; enum machine_mode mode; unsigned int insn_count; edge e; rtx insns[2]; enum machine_mode modes[2]; rtx last_insns[2]; unsigned int i; rtx newreg; /* We expect to have two successors. Look at both before picking the final mode for the comparison. If we have more successors (i.e., some sort of table jump, although that seems unlikely), then we require all beyond the first two to use the same mode. */ found_equiv = false; mode = GET_MODE (cc_src); insn_count = 0; for (e = bb->succ; e; e = e->succ_next) { rtx insn; rtx end; if (e->flags & EDGE_COMPLEX) continue; if (! e->dest->pred || e->dest->pred->pred_next || e->dest == EXIT_BLOCK_PTR) continue; end = NEXT_INSN (BB_END (e->dest)); for (insn = BB_HEAD (e->dest); insn != end; insn = NEXT_INSN (insn)) { rtx set; if (! INSN_P (insn)) continue; /* If CC_SRC is modified, we have to stop looking for something which uses it. */ if (modified_in_p (cc_src, insn)) break; /* Check whether INSN sets CC_REG to CC_SRC. */ set = single_set (insn); if (set && REG_P (SET_DEST (set)) && REGNO (SET_DEST (set)) == REGNO (cc_reg)) { bool found; enum machine_mode set_mode; enum machine_mode comp_mode; found = false; set_mode = GET_MODE (SET_SRC (set)); comp_mode = set_mode; if (rtx_equal_p (cc_src, SET_SRC (set))) found = true; else if (GET_CODE (cc_src) == COMPARE && GET_CODE (SET_SRC (set)) == COMPARE && mode != set_mode && rtx_equal_p (XEXP (cc_src, 0), XEXP (SET_SRC (set), 0)) && rtx_equal_p (XEXP (cc_src, 1), XEXP (SET_SRC (set), 1))) { comp_mode = targetm.cc_modes_compatible (mode, set_mode); if (comp_mode != VOIDmode && (can_change_mode || comp_mode == mode)) found = true; } if (found) { found_equiv = true; if (insn_count < ARRAY_SIZE (insns)) { insns[insn_count] = insn; modes[insn_count] = set_mode; last_insns[insn_count] = end; ++insn_count; if (mode != comp_mode) { if (! can_change_mode) abort (); mode = comp_mode; PUT_MODE (cc_src, mode); } } else { if (set_mode != mode) { /* We found a matching expression in the wrong mode, but we don't have room to store it in the array. Punt. This case should be rare. */ break; } /* INSN sets CC_REG to a value equal to CC_SRC with the right mode. We can simply delete it. */ delete_insn (insn); } /* We found an instruction to delete. Keep looking, in the hopes of finding a three-way jump. */ continue; } /* We found an instruction which sets the condition code, so don't look any farther. */ break; } /* If INSN sets CC_REG in some other way, don't look any farther. */ if (reg_set_p (cc_reg, insn)) break; } /* If we fell off the bottom of the block, we can keep looking through successors. We pass CAN_CHANGE_MODE as false because we aren't prepared to handle compatibility between the further blocks and this block. */ if (insn == end) { enum machine_mode submode; submode = cse_cc_succs (e->dest, cc_reg, cc_src, false); if (submode != VOIDmode) { if (submode != mode) abort (); found_equiv = true; can_change_mode = false; } } } if (! found_equiv) return VOIDmode; /* Now INSN_COUNT is the number of instructions we found which set CC_REG to a value equivalent to CC_SRC. The instructions are in INSNS. The modes used by those instructions are in MODES. */ newreg = NULL_RTX; for (i = 0; i < insn_count; ++i) { if (modes[i] != mode) { /* We need to change the mode of CC_REG in INSNS[i] and subsequent instructions. */ if (! newreg) { if (GET_MODE (cc_reg) == mode) newreg = cc_reg; else newreg = gen_rtx_REG (mode, REGNO (cc_reg)); } cse_change_cc_mode_insns (NEXT_INSN (insns[i]), last_insns[i], newreg); } delete_insn (insns[i]); } return mode; } /* If we have a fixed condition code register (or two), walk through the instructions and try to eliminate duplicate assignments. */ void cse_condition_code_reg (void) { unsigned int cc_regno_1; unsigned int cc_regno_2; rtx cc_reg_1; rtx cc_reg_2; basic_block bb; if (! targetm.fixed_condition_code_regs (&cc_regno_1, &cc_regno_2)) return; cc_reg_1 = gen_rtx_REG (CCmode, cc_regno_1); if (cc_regno_2 != INVALID_REGNUM) cc_reg_2 = gen_rtx_REG (CCmode, cc_regno_2); else cc_reg_2 = NULL_RTX; FOR_EACH_BB (bb) { rtx last_insn; rtx cc_reg; rtx insn; rtx cc_src_insn; rtx cc_src; enum machine_mode mode; enum machine_mode orig_mode; /* Look for blocks which end with a conditional jump based on a condition code register. Then look for the instruction which sets the condition code register. Then look through the successor blocks for instructions which set the condition code register to the same value. There are other possible uses of the condition code register, but these are by far the most common and the ones which we are most likely to be able to optimize. */ last_insn = BB_END (bb); if (GET_CODE (last_insn) != JUMP_INSN) continue; if (reg_referenced_p (cc_reg_1, PATTERN (last_insn))) cc_reg = cc_reg_1; else if (cc_reg_2 && reg_referenced_p (cc_reg_2, PATTERN (last_insn))) cc_reg = cc_reg_2; else continue; cc_src_insn = NULL_RTX; cc_src = NULL_RTX; for (insn = PREV_INSN (last_insn); insn && insn != PREV_INSN (BB_HEAD (bb)); insn = PREV_INSN (insn)) { rtx set; if (! INSN_P (insn)) continue; set = single_set (insn); if (set && REG_P (SET_DEST (set)) && REGNO (SET_DEST (set)) == REGNO (cc_reg)) { cc_src_insn = insn; cc_src = SET_SRC (set); break; } else if (reg_set_p (cc_reg, insn)) break; } if (! cc_src_insn) continue; if (modified_between_p (cc_src, cc_src_insn, NEXT_INSN (last_insn))) continue; /* Now CC_REG is a condition code register used for a conditional jump at the end of the block, and CC_SRC, in CC_SRC_INSN, is the value to which that condition code register is set, and CC_SRC is still meaningful at the end of the basic block. */ orig_mode = GET_MODE (cc_src); mode = cse_cc_succs (bb, cc_reg, cc_src, true); if (mode != VOIDmode) { if (mode != GET_MODE (cc_src)) abort (); if (mode != orig_mode) { rtx newreg = gen_rtx_REG (mode, REGNO (cc_reg)); /* Change the mode of CC_REG in CC_SRC_INSN to GET_MODE (NEWREG). */ for_each_rtx (&PATTERN (cc_src_insn), cse_change_cc_mode, newreg); for_each_rtx (®_NOTES (cc_src_insn), cse_change_cc_mode, newreg); /* Do the same in the following insns that use the current value of CC_REG within BB. */ cse_change_cc_mode_insns (NEXT_INSN (cc_src_insn), NEXT_INSN (last_insn), newreg); } } } } /* Common subexpression elimination library for GNU compiler. Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ static bool cselib_record_memory; static int entry_and_rtx_equal_p (const void *, const void *); static hashval_t get_value_hash (const void *); static struct elt_list *new_elt_list (struct elt_list *, cselib_val *); static struct elt_loc_list *new_elt_loc_list (struct elt_loc_list *, rtx); static void unchain_one_value (cselib_val *); static void unchain_one_elt_list (struct elt_list **); static void unchain_one_elt_loc_list (struct elt_loc_list **); static void clear_table (void); static int discard_useless_locs (void **, void *); static int discard_useless_values (void **, void *); static void remove_useless_values (void); static rtx wrap_constant (enum machine_mode, rtx); static unsigned int hash_rtx (rtx, enum machine_mode, int); static cselib_val *new_cselib_val (unsigned int, enum machine_mode); static void add_mem_for_addr (cselib_val *, cselib_val *, rtx); static cselib_val *cselib_lookup_mem (rtx, int); static void cselib_invalidate_regno (unsigned int, enum machine_mode); static void cselib_invalidate_mem (rtx); static void cselib_invalidate_rtx (rtx, rtx, void *); static void cselib_record_set (rtx, cselib_val *, cselib_val *); static void cselib_record_sets (rtx); /* There are three ways in which cselib can look up an rtx: - for a REG, the reg_values table (which is indexed by regno) is used - for a MEM, we recursively look up its address and then follow the addr_list of that value - for everything else, we compute a hash value and go through the hash table. Since different rtx's can still have the same hash value, this involves walking the table entries for a given value and comparing the locations of the entries with the rtx we are looking up. */ /* A table that enables us to look up elts by their value. */ static htab_t elt_hash_table; /* This is a global so we don't have to pass this through every function. It is used in new_elt_loc_list to set SETTING_INSN. */ static rtx cselib_current_insn; static bool cselib_current_insn_in_libcall; /* Every new unknown value gets a unique number. */ static unsigned int next_unknown_value; /* The number of registers we had when the varrays were last resized. */ static unsigned int cselib_nregs; /* Count values without known locations. Whenever this grows too big, we remove these useless values from the table. */ static int n_useless_values; /* Number of useless values before we remove them from the hash table. */ #define MAX_USELESS_VALUES 32 /* This table maps from register number to values. It does not contain pointers to cselib_val structures, but rather elt_lists. The purpose is to be able to refer to the same register in different modes. The first element of the list defines the mode in which the register was set; if the mode is unknown or the value is no longer valid in that mode, ELT will be NULL for the first element. */ struct elt_list **reg_values; unsigned int reg_values_size; #define REG_VALUES(i) reg_values[i] /* The largest number of hard regs used by any entry added to the REG_VALUES table. Cleared on each clear_table() invocation. */ static unsigned int max_value_regs; /* Here the set of indices I with REG_VALUES(I) != 0 is saved. This is used in clear_table() for fast emptying. */ static unsigned int *used_regs; static unsigned int n_used_regs; /* We pass this to cselib_invalidate_mem to invalidate all of memory for a non-const call instruction. */ static GTY(()) rtx callmem; /* Set by discard_useless_locs if it deleted the last location of any value. */ static int values_became_useless; /* Used as stop element of the containing_mem list so we can check presence in the list by checking the next pointer. */ static cselib_val dummy_val; /* Used to list all values that contain memory reference. May or may not contain the useless values - the list is compacted each time memory is invalidated. */ static cselib_val *first_containing_mem = &dummy_val; static alloc_pool elt_loc_list_pool, elt_list_pool, cselib_val_pool, value_pool; /* Allocate a struct elt_list and fill in its two elements with the arguments. */ static inline struct elt_list * new_elt_list (struct elt_list *next, cselib_val *elt) { struct elt_list *el; el = pool_alloc (elt_list_pool); el->next = next; el->elt = elt; return el; } /* Allocate a struct elt_loc_list and fill in its two elements with the arguments. */ static inline struct elt_loc_list * new_elt_loc_list (struct elt_loc_list *next, rtx loc) { struct elt_loc_list *el; el = pool_alloc (elt_loc_list_pool); el->next = next; el->loc = loc; el->setting_insn = cselib_current_insn; el->in_libcall = cselib_current_insn_in_libcall; return el; } /* The elt_list at *PL is no longer needed. Unchain it and free its storage. */ static inline void unchain_one_elt_list (struct elt_list **pl) { struct elt_list *l = *pl; *pl = l->next; pool_free (elt_list_pool, l); } /* Likewise for elt_loc_lists. */ static void unchain_one_elt_loc_list (struct elt_loc_list **pl) { struct elt_loc_list *l = *pl; *pl = l->next; pool_free (elt_loc_list_pool, l); } /* Likewise for cselib_vals. This also frees the addr_list associated with V. */ static void unchain_one_value (cselib_val *v) { while (v->addr_list) unchain_one_elt_list (&v->addr_list); pool_free (cselib_val_pool, v); } /* Remove all entries from the hash table. Also used during initialization. If CLEAR_ALL isn't set, then only clear the entries which are known to have been used. */ static void clear_table (void) { unsigned int i; for (i = 0; i < n_used_regs; i++) REG_VALUES (used_regs[i]) = 0; max_value_regs = 0; n_used_regs = 0; htab_empty (elt_hash_table); n_useless_values = 0; next_unknown_value = 0; first_containing_mem = &dummy_val; } /* The equality test for our hash table. The first argument ENTRY is a table element (i.e. a cselib_val), while the second arg X is an rtx. We know that all callers of htab_find_slot_with_hash will wrap CONST_INTs into a CONST of an appropriate mode. */ static int entry_and_rtx_equal_p (const void *entry, const void *x_arg) { struct elt_loc_list *l; const cselib_val *v = (const cselib_val *) entry; rtx x = (rtx) x_arg; enum machine_mode mode = GET_MODE (x); if (GET_CODE (x) == CONST_INT || (mode == VOIDmode && GET_CODE (x) == CONST_DOUBLE)) abort (); if (mode != GET_MODE (v->u.val_rtx)) return 0; /* Unwrap X if necessary. */ if (GET_CODE (x) == CONST && (GET_CODE (XEXP (x, 0)) == CONST_INT || GET_CODE (XEXP (x, 0)) == CONST_DOUBLE)) x = XEXP (x, 0); /* We don't guarantee that distinct rtx's have different hash values, so we need to do a comparison. */ for (l = v->locs; l; l = l->next) if (rtx_equal_for_cselib_p (l->loc, x)) return 1; return 0; } /* The hash function for our hash table. The value is always computed with hash_rtx when adding an element; this function just extracts the hash value from a cselib_val structure. */ static hashval_t get_value_hash (const void *entry) { const cselib_val *v = (const cselib_val *) entry; return v->value; } /* Return true if X contains a VALUE rtx. If ONLY_USELESS is set, we only return true for values which point to a cselib_val whose value element has been set to zero, which implies the cselib_val will be removed. */ int references_value_p (rtx x, int only_useless) { enum rtx_code code = GET_CODE (x); const char *fmt = GET_RTX_FORMAT (code); int i, j; if (GET_CODE (x) == VALUE && (! only_useless || CSELIB_VAL_PTR (x)->locs == 0)) return 1; for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e' && references_value_p (XEXP (x, i), only_useless)) return 1; else if (fmt[i] == 'E') for (j = 0; j < XVECLEN (x, i); j++) if (references_value_p (XVECEXP (x, i, j), only_useless)) return 1; } return 0; } /* For all locations found in X, delete locations that reference useless values (i.e. values without any location). Called through htab_traverse. */ static int discard_useless_locs (void **x, void *info ATTRIBUTE_UNUSED) { cselib_val *v = (cselib_val *)*x; struct elt_loc_list **p = &v->locs; int had_locs = v->locs != 0; while (*p) { if (references_value_p ((*p)->loc, 1)) unchain_one_elt_loc_list (p); else p = &(*p)->next; } if (had_locs && v->locs == 0) { n_useless_values++; values_became_useless = 1; } return 1; } /* If X is a value with no locations, remove it from the hashtable. */ static int discard_useless_values (void **x, void *info ATTRIBUTE_UNUSED) { cselib_val *v = (cselib_val *)*x; if (v->locs == 0) { CSELIB_VAL_PTR (v->u.val_rtx) = NULL; htab_clear_slot (elt_hash_table, x); unchain_one_value (v); n_useless_values--; } return 1; } /* Clean out useless values (i.e. those which no longer have locations associated with them) from the hash table. */ static void remove_useless_values (void) { cselib_val **p, *v; /* First pass: eliminate locations that reference the value. That in turn can make more values useless. */ do { values_became_useless = 0; htab_traverse (elt_hash_table, discard_useless_locs, 0); } while (values_became_useless); /* Second pass: actually remove the values. */ p = &first_containing_mem; for (v = *p; v != &dummy_val; v = v->next_containing_mem) if (v->locs) { *p = v; p = &(*p)->next_containing_mem; } *p = &dummy_val; htab_traverse (elt_hash_table, discard_useless_values, 0); if (n_useless_values != 0) abort (); } /* Return the mode in which a register was last set. If X is not a register, return its mode. If the mode in which the register was set is not known, or the value was already clobbered, return VOIDmode. */ enum machine_mode cselib_reg_set_mode (rtx x) { if (!REG_P (x)) return GET_MODE (x); if (REG_VALUES (REGNO (x)) == NULL || REG_VALUES (REGNO (x))->elt == NULL) return VOIDmode; return GET_MODE (REG_VALUES (REGNO (x))->elt->u.val_rtx); } /* Return nonzero if we can prove that X and Y contain the same value, taking our gathered information into account. */ int rtx_equal_for_cselib_p (rtx x, rtx y) { enum rtx_code code; const char *fmt; int i; if (REG_P (x) || MEM_P (x)) { cselib_val *e = cselib_lookup (x, GET_MODE (x), 0); if (e) x = e->u.val_rtx; } if (REG_P (y) || MEM_P (y)) { cselib_val *e = cselib_lookup (y, GET_MODE (y), 0); if (e) y = e->u.val_rtx; } if (x == y) return 1; if (GET_CODE (x) == VALUE && GET_CODE (y) == VALUE) return CSELIB_VAL_PTR (x) == CSELIB_VAL_PTR (y); if (GET_CODE (x) == VALUE) { cselib_val *e = CSELIB_VAL_PTR (x); struct elt_loc_list *l; for (l = e->locs; l; l = l->next) { rtx t = l->loc; /* Avoid infinite recursion. */ if (REG_P (t) || MEM_P (t)) continue; else if (rtx_equal_for_cselib_p (t, y)) return 1; } return 0; } if (GET_CODE (y) == VALUE) { cselib_val *e = CSELIB_VAL_PTR (y); struct elt_loc_list *l; for (l = e->locs; l; l = l->next) { rtx t = l->loc; if (REG_P (t) || MEM_P (t)) continue; else if (rtx_equal_for_cselib_p (x, t)) return 1; } return 0; } if (GET_CODE (x) != GET_CODE (y) || GET_MODE (x) != GET_MODE (y)) return 0; /* This won't be handled correctly by the code below. */ if (GET_CODE (x) == LABEL_REF) return XEXP (x, 0) == XEXP (y, 0); code = GET_CODE (x); fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { int j; switch (fmt[i]) { case 'w': if (XWINT (x, i) != XWINT (y, i)) return 0; break; case 'n': case 'i': if (XINT (x, i) != XINT (y, i)) return 0; break; case 'V': case 'E': /* Two vectors must have the same length. */ if (XVECLEN (x, i) != XVECLEN (y, i)) return 0; /* And the corresponding elements must match. */ for (j = 0; j < XVECLEN (x, i); j++) if (! rtx_equal_for_cselib_p (XVECEXP (x, i, j), XVECEXP (y, i, j))) return 0; break; case 'e': if (! rtx_equal_for_cselib_p (XEXP (x, i), XEXP (y, i))) return 0; break; case 'S': case 's': if (strcmp (XSTR (x, i), XSTR (y, i))) return 0; break; case 'u': /* These are just backpointers, so they don't matter. */ break; case '0': case 't': break; /* It is believed that rtx's at this level will never contain anything but integers and other rtx's, except for within LABEL_REFs and SYMBOL_REFs. */ default: abort (); } } return 1; } /* We need to pass down the mode of constants through the hash table functions. For that purpose, wrap them in a CONST of the appropriate mode. */ static rtx wrap_constant (enum machine_mode mode, rtx x) { if (GET_CODE (x) != CONST_INT && (GET_CODE (x) != CONST_DOUBLE || GET_MODE (x) != VOIDmode)) return x; if (mode == VOIDmode) abort (); return gen_rtx_CONST (mode, x); } /* Hash an rtx. Return 0 if we couldn't hash the rtx. For registers and memory locations, we look up their cselib_val structure and return its VALUE element. Possible reasons for return 0 are: the object is volatile, or we couldn't find a register or memory location in the table and CREATE is zero. If CREATE is nonzero, table elts are created for regs and mem. MODE is used in hashing for CONST_INTs only; otherwise the mode of X is used. */ static unsigned int hash_rtx (rtx x, enum machine_mode mode, int create) { cselib_val *e; int i, j; enum rtx_code code; const char *fmt; unsigned int hash = 0; code = GET_CODE (x); hash += (unsigned) code + (unsigned) GET_MODE (x); switch (code) { case MEM: case REG: e = cselib_lookup (x, GET_MODE (x), create); if (! e) return 0; return e->value; case CONST_INT: hash += ((unsigned) CONST_INT << 7) + (unsigned) mode + INTVAL (x); return hash ? hash : (unsigned int) CONST_INT; case CONST_DOUBLE: /* This is like the general case, except that it only counts the integers representing the constant. */ hash += (unsigned) code + (unsigned) GET_MODE (x); if (GET_MODE (x) != VOIDmode) hash += real_hash (CONST_DOUBLE_REAL_VALUE (x)); else hash += ((unsigned) CONST_DOUBLE_LOW (x) + (unsigned) CONST_DOUBLE_HIGH (x)); return hash ? hash : (unsigned int) CONST_DOUBLE; case CONST_VECTOR: { int units; rtx elt; units = CONST_VECTOR_NUNITS (x); for (i = 0; i < units; ++i) { elt = CONST_VECTOR_ELT (x, i); hash += hash_rtx (elt, GET_MODE (elt), 0); } return hash; } /* Assume there is only one rtx object for any given label. */ case LABEL_REF: hash += ((unsigned) LABEL_REF << 7) + (unsigned long) XEXP (x, 0); return hash ? hash : (unsigned int) LABEL_REF; case SYMBOL_REF: hash += ((unsigned) SYMBOL_REF << 7) + (unsigned long) XSTR (x, 0); return hash ? hash : (unsigned int) SYMBOL_REF; case PRE_DEC: case PRE_INC: case POST_DEC: case POST_INC: case POST_MODIFY: case PRE_MODIFY: case PC: case CC0: case CALL: case UNSPEC_VOLATILE: return 0; case ASM_OPERANDS: if (MEM_VOLATILE_P (x)) return 0; break; default: break; } i = GET_RTX_LENGTH (code) - 1; fmt = GET_RTX_FORMAT (code); for (; i >= 0; i--) { if (fmt[i] == 'e') { rtx tem = XEXP (x, i); unsigned int tem_hash = hash_rtx (tem, 0, create); if (tem_hash == 0) return 0; hash += tem_hash; } else if (fmt[i] == 'E') for (j = 0; j < XVECLEN (x, i); j++) { unsigned int tem_hash = hash_rtx (XVECEXP (x, i, j), 0, create); if (tem_hash == 0) return 0; hash += tem_hash; } else if (fmt[i] == 's') { const unsigned char *p = (const unsigned char *) XSTR (x, i); if (p) while (*p) hash += *p++; } else if (fmt[i] == 'i') hash += XINT (x, i); else if (fmt[i] == '0' || fmt[i] == 't') /* unused */; else abort (); } return hash ? hash : 1 + (unsigned int) GET_CODE (x); } /* Create a new value structure for VALUE and initialize it. The mode of the value is MODE. */ static inline cselib_val * new_cselib_val (unsigned int value, enum machine_mode mode) { cselib_val *e = pool_alloc (cselib_val_pool); #ifdef ENABLE_CHECKING if (value == 0) abort (); #endif e->value = value; /* We use custom method to allocate this RTL construct because it accounts about 8% of overall memory usage. */ e->u.val_rtx = pool_alloc (value_pool); memset (e->u.val_rtx, 0, RTX_HDR_SIZE); PUT_CODE (e->u.val_rtx, VALUE); PUT_MODE (e->u.val_rtx, mode); CSELIB_VAL_PTR (e->u.val_rtx) = e; e->addr_list = 0; e->locs = 0; e->next_containing_mem = 0; return e; } /* ADDR_ELT is a value that is used as address. MEM_ELT is the value that contains the data at this address. X is a MEM that represents the value. Update the two value structures to represent this situation. */ static void add_mem_for_addr (cselib_val *addr_elt, cselib_val *mem_elt, rtx x) { struct elt_loc_list *l; /* Avoid duplicates. */ for (l = mem_elt->locs; l; l = l->next) if (MEM_P (l->loc) && CSELIB_VAL_PTR (XEXP (l->loc, 0)) == addr_elt) return; addr_elt->addr_list = new_elt_list (addr_elt->addr_list, mem_elt); mem_elt->locs = new_elt_loc_list (mem_elt->locs, replace_equiv_address_nv (x, addr_elt->u.val_rtx)); if (mem_elt->next_containing_mem == NULL) { mem_elt->next_containing_mem = first_containing_mem; first_containing_mem = mem_elt; } } /* Subroutine of cselib_lookup. Return a value for X, which is a MEM rtx. If CREATE, make a new one if we haven't seen it before. */ static cselib_val * cselib_lookup_mem (rtx x, int create) { enum machine_mode mode = GET_MODE (x); void **slot; cselib_val *addr; cselib_val *mem_elt; struct elt_list *l; if (MEM_VOLATILE_P (x) || mode == BLKmode || !cselib_record_memory || (FLOAT_MODE_P (mode) && flag_float_store)) return 0; /* Look up the value for the address. */ addr = cselib_lookup (XEXP (x, 0), mode, create); if (! addr) return 0; /* Find a value that describes a value of our mode at that address. */ for (l = addr->addr_list; l; l = l->next) if (GET_MODE (l->elt->u.val_rtx) == mode) return l->elt; if (! create) return 0; mem_elt = new_cselib_val (++next_unknown_value, mode); add_mem_for_addr (addr, mem_elt, x); slot = htab_find_slot_with_hash (elt_hash_table, wrap_constant (mode, x), mem_elt->value, INSERT); *slot = mem_elt; return mem_elt; } /* Walk rtx X and replace all occurrences of REG and MEM subexpressions with VALUE expressions. This way, it becomes independent of changes to registers and memory. X isn't actually modified; if modifications are needed, new rtl is allocated. However, the return value can share rtl with X. */ rtx cselib_subst_to_values (rtx x) { enum rtx_code code = GET_CODE (x); const char *fmt = GET_RTX_FORMAT (code); cselib_val *e; struct elt_list *l; rtx copy = x; int i; switch (code) { case REG: l = REG_VALUES (REGNO (x)); if (l && l->elt == NULL) l = l->next; for (; l; l = l->next) if (GET_MODE (l->elt->u.val_rtx) == GET_MODE (x)) return l->elt->u.val_rtx; abort (); case MEM: e = cselib_lookup_mem (x, 0); if (! e) { /* This happens for autoincrements. Assign a value that doesn't match any other. */ e = new_cselib_val (++next_unknown_value, GET_MODE (x)); } return e->u.val_rtx; case CONST_DOUBLE: case CONST_VECTOR: case CONST_INT: return x; case POST_INC: case PRE_INC: case POST_DEC: case PRE_DEC: case POST_MODIFY: case PRE_MODIFY: e = new_cselib_val (++next_unknown_value, GET_MODE (x)); return e->u.val_rtx; default: break; } for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') { rtx t = cselib_subst_to_values (XEXP (x, i)); if (t != XEXP (x, i) && x == copy) copy = shallow_copy_rtx (x); XEXP (copy, i) = t; } else if (fmt[i] == 'E') { int j, k; for (j = 0; j < XVECLEN (x, i); j++) { rtx t = cselib_subst_to_values (XVECEXP (x, i, j)); if (t != XVECEXP (x, i, j) && XVEC (x, i) == XVEC (copy, i)) { if (x == copy) copy = shallow_copy_rtx (x); XVEC (copy, i) = rtvec_alloc (XVECLEN (x, i)); for (k = 0; k < j; k++) XVECEXP (copy, i, k) = XVECEXP (x, i, k); } XVECEXP (copy, i, j) = t; } } } return copy; } /* Look up the rtl expression X in our tables and return the value it has. If CREATE is zero, we return NULL if we don't know the value. Otherwise, we create a new one if possible, using mode MODE if X doesn't have a mode (i.e. because it's a constant). */ cselib_val * cselib_lookup (rtx x, enum machine_mode mode, int create) { void **slot; cselib_val *e; unsigned int hashval; if (GET_MODE (x) != VOIDmode) mode = GET_MODE (x); if (GET_CODE (x) == VALUE) return CSELIB_VAL_PTR (x); if (REG_P (x)) { struct elt_list *l; unsigned int i = REGNO (x); l = REG_VALUES (i); if (l && l->elt == NULL) l = l->next; for (; l; l = l->next) if (mode == GET_MODE (l->elt->u.val_rtx)) return l->elt; if (! create) return 0; if (i < FIRST_PSEUDO_REGISTER) { unsigned int n = hard_regno_nregs[i][mode]; if (n > max_value_regs) max_value_regs = n; } e = new_cselib_val (++next_unknown_value, GET_MODE (x)); e->locs = new_elt_loc_list (e->locs, x); if (REG_VALUES (i) == 0) { /* Maintain the invariant that the first entry of REG_VALUES, if present, must be the value used to set the register, or NULL. */ used_regs[n_used_regs++] = i; REG_VALUES (i) = new_elt_list (REG_VALUES (i), NULL); } REG_VALUES (i)->next = new_elt_list (REG_VALUES (i)->next, e); slot = htab_find_slot_with_hash (elt_hash_table, x, e->value, INSERT); *slot = e; return e; } if (MEM_P (x)) return cselib_lookup_mem (x, create); hashval = hash_rtx (x, mode, create); /* Can't even create if hashing is not possible. */ if (! hashval) return 0; slot = htab_find_slot_with_hash (elt_hash_table, wrap_constant (mode, x), hashval, create ? INSERT : NO_INSERT); if (slot == 0) return 0; e = (cselib_val *) *slot; if (e) return e; e = new_cselib_val (hashval, mode); /* We have to fill the slot before calling cselib_subst_to_values: the hash table is inconsistent until we do so, and cselib_subst_to_values will need to do lookups. */ *slot = (void *) e; e->locs = new_elt_loc_list (e->locs, cselib_subst_to_values (x)); return e; } /* Invalidate any entries in reg_values that overlap REGNO. This is called if REGNO is changing. MODE is the mode of the assignment to REGNO, which is used to determine how many hard registers are being changed. If MODE is VOIDmode, then only REGNO is being changed; this is used when invalidating call clobbered registers across a call. */ static void cselib_invalidate_regno (unsigned int regno, enum machine_mode mode) { unsigned int endregno; unsigned int i; /* If we see pseudos after reload, something is _wrong_. */ if (reload_completed && regno >= FIRST_PSEUDO_REGISTER && reg_renumber[regno] >= 0) abort (); /* Determine the range of registers that must be invalidated. For pseudos, only REGNO is affected. For hard regs, we must take MODE into account, and we must also invalidate lower register numbers if they contain values that overlap REGNO. */ if (regno < FIRST_PSEUDO_REGISTER) { if (mode == VOIDmode) abort (); if (regno < max_value_regs) i = 0; else i = regno - max_value_regs; endregno = regno + hard_regno_nregs[regno][mode]; } else { i = regno; endregno = regno + 1; } for (; i < endregno; i++) { struct elt_list **l = ®_VALUES (i); /* Go through all known values for this reg; if it overlaps the range we're invalidating, remove the value. */ while (*l) { cselib_val *v = (*l)->elt; struct elt_loc_list **p; unsigned int this_last = i; if (i < FIRST_PSEUDO_REGISTER && v != NULL) this_last += hard_regno_nregs[i][GET_MODE (v->u.val_rtx)] - 1; if (this_last < regno || v == NULL) { l = &(*l)->next; continue; } /* We have an overlap. */ if (*l == REG_VALUES (i)) { /* Maintain the invariant that the first entry of REG_VALUES, if present, must be the value used to set the register, or NULL. This is also nice because then we won't push the same regno onto user_regs multiple times. */ (*l)->elt = NULL; l = &(*l)->next; } else unchain_one_elt_list (l); /* Now, we clear the mapping from value to reg. It must exist, so this code will crash intentionally if it doesn't. */ for (p = &v->locs; ; p = &(*p)->next) { rtx x = (*p)->loc; if (REG_P (x) && REGNO (x) == i) { unchain_one_elt_loc_list (p); break; } } if (v->locs == 0) n_useless_values++; } } } /* Return 1 if X has a value that can vary even between two executions of the program. 0 means X can be compared reliably against certain constants or near-constants. */ static int cselib_rtx_varies_p (rtx x ATTRIBUTE_UNUSED, int from_alias ATTRIBUTE_UNUSED) { /* We actually don't need to verify very hard. This is because if X has actually changed, we invalidate the memory anyway, so assume that all common memory addresses are invariant. */ return 0; } /* Invalidate any locations in the table which are changed because of a store to MEM_RTX. If this is called because of a non-const call instruction, MEM_RTX is (mem:BLK const0_rtx). */ static void cselib_invalidate_mem (rtx mem_rtx) { cselib_val **vp, *v, *next; int num_mems = 0; rtx mem_addr; mem_addr = canon_rtx (get_addr (XEXP (mem_rtx, 0))); mem_rtx = canon_rtx (mem_rtx); vp = &first_containing_mem; for (v = *vp; v != &dummy_val; v = next) { bool has_mem = false; struct elt_loc_list **p = &v->locs; int had_locs = v->locs != 0; while (*p) { rtx x = (*p)->loc; cselib_val *addr; struct elt_list **mem_chain; /* MEMs may occur in locations only at the top level; below that every MEM or REG is substituted by its VALUE. */ if (!MEM_P (x)) { p = &(*p)->next; continue; } if (num_mems < PARAM_VALUE (PARAM_MAX_CSELIB_MEMORY_LOCATIONS) && ! canon_true_dependence (mem_rtx, GET_MODE (mem_rtx), mem_addr, x, cselib_rtx_varies_p)) { has_mem = true; num_mems++; p = &(*p)->next; continue; } /* This one overlaps. */ /* We must have a mapping from this MEM's address to the value (E). Remove that, too. */ addr = cselib_lookup (XEXP (x, 0), VOIDmode, 0); mem_chain = &addr->addr_list; for (;;) { if ((*mem_chain)->elt == v) { unchain_one_elt_list (mem_chain); break; } mem_chain = &(*mem_chain)->next; } unchain_one_elt_loc_list (p); } if (had_locs && v->locs == 0) n_useless_values++; next = v->next_containing_mem; if (has_mem) { *vp = v; vp = &(*vp)->next_containing_mem; } else v->next_containing_mem = NULL; } *vp = &dummy_val; } /* Invalidate DEST, which is being assigned to or clobbered. The second and the third parameter exist so that this function can be passed to note_stores; they are ignored. */ static void cselib_invalidate_rtx (rtx dest, rtx ignore ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED) { while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SIGN_EXTRACT || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG) dest = XEXP (dest, 0); if (REG_P (dest)) cselib_invalidate_regno (REGNO (dest), GET_MODE (dest)); else if (MEM_P (dest)) cselib_invalidate_mem (dest); /* Some machines don't define AUTO_INC_DEC, but they still use push instructions. We need to catch that case here in order to invalidate the stack pointer correctly. Note that invalidating the stack pointer is different from invalidating DEST. */ if (push_operand (dest, GET_MODE (dest))) cselib_invalidate_rtx (stack_pointer_rtx, NULL_RTX, NULL); } /* Record the result of a SET instruction. DEST is being set; the source contains the value described by SRC_ELT. If DEST is a MEM, DEST_ADDR_ELT describes its address. */ static void cselib_record_set (rtx dest, cselib_val *src_elt, cselib_val *dest_addr_elt) { int dreg = REG_P (dest) ? (int) REGNO (dest) : -1; if (src_elt == 0 || side_effects_p (dest)) return; if (dreg >= 0) { if (dreg < FIRST_PSEUDO_REGISTER) { unsigned int n = hard_regno_nregs[dreg][GET_MODE (dest)]; if (n > max_value_regs) max_value_regs = n; } if (REG_VALUES (dreg) == 0) { used_regs[n_used_regs++] = dreg; REG_VALUES (dreg) = new_elt_list (REG_VALUES (dreg), src_elt); } else { if (REG_VALUES (dreg)->elt == 0) REG_VALUES (dreg)->elt = src_elt; else /* The register should have been invalidated. */ abort (); } if (src_elt->locs == 0) n_useless_values--; src_elt->locs = new_elt_loc_list (src_elt->locs, dest); } else if (MEM_P (dest) && dest_addr_elt != 0 && cselib_record_memory) { if (src_elt->locs == 0) n_useless_values--; add_mem_for_addr (dest_addr_elt, src_elt, dest); } } /* Describe a single set that is part of an insn. */ struct single_set { rtx src; rtx dest; cselib_val *src_elt; cselib_val *dest_addr_elt; }; /* There is no good way to determine how many elements there can be in a PARALLEL. Since it's fairly cheap, use a really large number. */ #define MAX_SETS (FIRST_PSEUDO_REGISTER * 2) /* Record the effects of any sets in INSN. */ static void cselib_record_sets (rtx insn) { int n_sets = 0; int i; struct single_set sets[MAX_SETS]; rtx body = PATTERN (insn); rtx cond = 0; body = PATTERN (insn); if (GET_CODE (body) == COND_EXEC) { cond = COND_EXEC_TEST (body); body = COND_EXEC_CODE (body); } /* Find all sets. */ if (GET_CODE (body) == SET) { sets[0].src = SET_SRC (body); sets[0].dest = SET_DEST (body); n_sets = 1; } else if (GET_CODE (body) == PARALLEL) { /* Look through the PARALLEL and record the values being set, if possible. Also handle any CLOBBERs. */ for (i = XVECLEN (body, 0) - 1; i >= 0; --i) { rtx x = XVECEXP (body, 0, i); if (GET_CODE (x) == SET) { sets[n_sets].src = SET_SRC (x); sets[n_sets].dest = SET_DEST (x); n_sets++; } } } /* Look up the values that are read. Do this before invalidating the locations that are written. */ for (i = 0; i < n_sets; i++) { rtx dest = sets[i].dest; /* A STRICT_LOW_PART can be ignored; we'll record the equivalence for the low part after invalidating any knowledge about larger modes. */ if (GET_CODE (sets[i].dest) == STRICT_LOW_PART) sets[i].dest = dest = XEXP (dest, 0); /* We don't know how to record anything but REG or MEM. */ if (REG_P (dest) || (MEM_P (dest) && cselib_record_memory)) { rtx src = sets[i].src; if (cond) src = gen_rtx_IF_THEN_ELSE (GET_MODE (src), cond, src, dest); sets[i].src_elt = cselib_lookup (src, GET_MODE (dest), 1); if (MEM_P (dest)) sets[i].dest_addr_elt = cselib_lookup (XEXP (dest, 0), Pmode, 1); else sets[i].dest_addr_elt = 0; } } /* Invalidate all locations written by this insn. Note that the elts we looked up in the previous loop aren't affected, just some of their locations may go away. */ note_stores (body, cselib_invalidate_rtx, NULL); /* If this is an asm, look for duplicate sets. This can happen when the user uses the same value as an output multiple times. This is valid if the outputs are not actually used thereafter. Treat this case as if the value isn't actually set. We do this by smashing the destination to pc_rtx, so that we won't record the value later. */ if (n_sets >= 2 && asm_noperands (body) >= 0) { for (i = 0; i < n_sets; i++) { rtx dest = sets[i].dest; if (REG_P (dest) || MEM_P (dest)) { int j; for (j = i + 1; j < n_sets; j++) if (rtx_equal_p (dest, sets[j].dest)) { sets[i].dest = pc_rtx; sets[j].dest = pc_rtx; } } } } /* Now enter the equivalences in our tables. */ for (i = 0; i < n_sets; i++) { rtx dest = sets[i].dest; if (REG_P (dest) || (MEM_P (dest) && cselib_record_memory)) cselib_record_set (dest, sets[i].src_elt, sets[i].dest_addr_elt); } } /* Record the effects of INSN. */ void cselib_process_insn (rtx insn) { int i; rtx x; if (find_reg_note (insn, REG_LIBCALL, NULL)) cselib_current_insn_in_libcall = true; if (find_reg_note (insn, REG_RETVAL, NULL)) cselib_current_insn_in_libcall = false; cselib_current_insn = insn; /* Forget everything at a CODE_LABEL, a volatile asm, or a setjmp. */ if (GET_CODE (insn) == CODE_LABEL || (GET_CODE (insn) == CALL_INSN && find_reg_note (insn, REG_SETJMP, NULL)) || (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == ASM_OPERANDS && MEM_VOLATILE_P (PATTERN (insn)))) { clear_table (); return; } if (! INSN_P (insn)) { cselib_current_insn = 0; return; } /* If this is a call instruction, forget anything stored in a call clobbered register, or, if this is not a const call, in memory. */ if (GET_CODE (insn) == CALL_INSN) { for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (call_used_regs[i]) cselib_invalidate_regno (i, reg_raw_mode[i]); if (! CONST_OR_PURE_CALL_P (insn)) cselib_invalidate_mem (callmem); } cselib_record_sets (insn); #ifdef AUTO_INC_DEC /* Clobber any registers which appear in REG_INC notes. We could keep track of the changes to their values, but it is unlikely to help. */ for (x = REG_NOTES (insn); x; x = XEXP (x, 1)) if (REG_NOTE_KIND (x) == REG_INC) cselib_invalidate_rtx (XEXP (x, 0), NULL_RTX, NULL); #endif /* Look for any CLOBBERs in CALL_INSN_FUNCTION_USAGE, but only after we have processed the insn. */ if (GET_CODE (insn) == CALL_INSN) for (x = CALL_INSN_FUNCTION_USAGE (insn); x; x = XEXP (x, 1)) if (GET_CODE (XEXP (x, 0)) == CLOBBER) cselib_invalidate_rtx (XEXP (XEXP (x, 0), 0), NULL_RTX, NULL); cselib_current_insn = 0; if (n_useless_values > MAX_USELESS_VALUES) remove_useless_values (); } /* Initialize cselib for one pass. The caller must also call init_alias_analysis. */ void cselib_init (bool record_memory) { elt_list_pool = create_alloc_pool ("elt_list", sizeof (struct elt_list), 10); elt_loc_list_pool = create_alloc_pool ("elt_loc_list", sizeof (struct elt_loc_list), 10); cselib_val_pool = create_alloc_pool ("cselib_val_list", sizeof (cselib_val), 10); value_pool = create_alloc_pool ("value", RTX_SIZE (VALUE), 100); cselib_record_memory = record_memory; /* This is only created once. */ if (! callmem) callmem = gen_rtx_MEM (BLKmode, const0_rtx); cselib_nregs = max_reg_num (); /* We preserve reg_values to allow expensive clearing of the whole thing. Reallocate it however if it happens to be too large. */ if (!reg_values || reg_values_size < cselib_nregs || (reg_values_size > 10 && reg_values_size > cselib_nregs * 4)) { if (reg_values) free (reg_values); /* Some space for newly emit instructions so we don't end up reallocating in between passes. */ reg_values_size = cselib_nregs + (63 + cselib_nregs) / 16; reg_values = xcalloc (reg_values_size, sizeof (reg_values)); } used_regs = xmalloc (sizeof (*used_regs) * cselib_nregs); n_used_regs = 0; elt_hash_table = htab_create (31, get_value_hash, entry_and_rtx_equal_p, NULL); cselib_current_insn_in_libcall = false; } /* Called when the current user is done with cselib. */ void cselib_finish (void) { free_alloc_pool (elt_list_pool); free_alloc_pool (elt_loc_list_pool); free_alloc_pool (cselib_val_pool); free_alloc_pool (value_pool); clear_table (); htab_delete (elt_hash_table); free (used_regs); used_regs = 0; elt_hash_table = 0; n_useless_values = 0; next_unknown_value = 0; } /* Type information for cselib.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_cselib_h[] = { { &callmem, 1, sizeof (callmem), >_ggc_mx_rtx_def, >_pch_nx_rtx_def }, LAST_GGC_ROOT_TAB }; /* Output dbx-format symbol table information from GNU compiler. Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Output dbx-format symbol table data. This consists of many symbol table entries, each of them a .stabs assembler pseudo-op with four operands: a "name" which is really a description of one symbol and its type, a "code", which is a symbol defined in stab.h whose name starts with N_, an unused operand always 0, and a "value" which is an address or an offset. The name is enclosed in doublequote characters. Each function, variable, typedef, and structure tag has a symbol table entry to define it. The beginning and end of each level of name scoping within a function are also marked by special symbol table entries. The "name" consists of the symbol name, a colon, a kind-of-symbol letter, and a data type number. The data type number may be followed by "=" and a type definition; normally this will happen the first time the type number is mentioned. The type definition may refer to other types by number, and those type numbers may be followed by "=" and nested definitions. This can make the "name" quite long. When a name is more than 80 characters, we split the .stabs pseudo-op into two .stabs pseudo-ops, both sharing the same "code" and "value". The first one is marked as continued with a double-backslash at the end of its "name". The kind-of-symbol letter distinguished function names from global variables from file-scope variables from parameters from auto variables in memory from typedef names from register variables. See `dbxout_symbol'. The "code" is mostly redundant with the kind-of-symbol letter that goes in the "name", but not entirely: for symbols located in static storage, the "code" says which segment the address is in, which controls how it is relocated. The "value" for a symbol in static storage is the core address of the symbol (actually, the assembler label for the symbol). For a symbol located in a stack slot it is the stack offset; for one in a register, the register number. For a typedef symbol, it is zero. If DEBUG_SYMS_TEXT is defined, all debugging symbols must be output while in the text section. For more on data type definitions, see `dbxout_type'. */ /* dbxout.h - Various declarations for functions found in dbxout.c Copyright (C) 1998, 1999, 2000, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_DBXOUT_H #define GCC_DBXOUT_H extern int dbxout_symbol (tree, int); extern void dbxout_parms (tree); extern void dbxout_reg_parms (tree); extern int dbxout_syms (tree); #endif /* GCC_DBXOUT_H */ #ifdef XCOFF_DEBUGGING_INFO /* XCOFF definitions. These are needed in dbxout.c, final.c, and xcoffout.h. Copyright (C) 1998, 2000, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define ASM_STABS_OP "\t.stabx\t" /* Tags and typedefs are C_DECL in XCOFF, not C_LSYM. */ #define DBX_TYPE_DECL_STABS_CODE N_DECL /* Use the XCOFF predefined type numbers. */ #define DBX_ASSIGN_FUNDAMENTAL_TYPE_NUMBER(TYPE) \ xcoff_assign_fundamental_type_number (TYPE) /* Any type with a negative type index has already been output. */ #define DBX_TYPE_DEFINED(TYPE) (TYPE_SYMTAB_ADDRESS (TYPE) < 0) /* Must use N_STSYM for static const variables (those in the text section) instead of N_FUN. */ #define DBX_STATIC_CONST_VAR_CODE N_STSYM /* For static variables, output code to define the start of a static block. */ #define DBX_STATIC_BLOCK_START(ASMFILE,CODE) \ { \ if ((CODE) == N_STSYM) \ fprintf ((ASMFILE), "\t.bs\t%s[RW]\n", xcoff_private_data_section_name);\ else if ((CODE) == N_LCSYM) \ fprintf ((ASMFILE), "\t.bs\t%s\n", xcoff_bss_section_name); \ } /* For static variables, output code to define the end of a static block. */ #define DBX_STATIC_BLOCK_END(ASMFILE,CODE) \ { \ if ((CODE) == N_STSYM || (CODE) == N_LCSYM) \ fputs ("\t.es\n", (ASMFILE)); \ } /* We must use N_RPYSM instead of N_RSYM for register parameters. */ #define DBX_REGPARM_STABS_CODE N_RPSYM /* We must use 'R' instead of 'P' for register parameters. */ #define DBX_REGPARM_STABS_LETTER 'R' /* Define our own finish symbol function, since xcoff stabs have their own different format. */ #define DBX_FINISH_SYMBOL(SYM) \ { \ if (current_sym_addr && current_sym_code == N_FUN) \ fprintf (asmfile, "\",."); \ else \ fprintf (asmfile, "\","); \ /* If we are writing a function name, we must ensure that \ there is no storage-class suffix on the name. */ \ if (current_sym_addr && current_sym_code == N_FUN \ && GET_CODE (current_sym_addr) == SYMBOL_REF) \ { \ const char *_p = XSTR (current_sym_addr, 0); \ if (*_p == '*') \ fprintf (asmfile, "%s", _p+1); \ else \ for (; *_p != '[' && *_p; _p++) \ fprintf (asmfile, "%c", *_p); \ } \ else if (current_sym_addr) \ output_addr_const (asmfile, current_sym_addr); \ else if (current_sym_code == N_GSYM) \ assemble_name (asmfile, XSTR (XEXP (DECL_RTL (sym), 0), 0)); \ else \ fprintf (asmfile, "%d", current_sym_value); \ fprintf (asmfile, ",%d,0\n", stab_to_sclass (current_sym_code)); \ } /* These are IBM XCOFF extensions we need to reference in dbxout.c and xcoffout.c. */ /* AIX XCOFF uses this for typedefs. This can have any value, since it is only used for translation into a C_DECL storage class. */ #ifndef N_DECL #define N_DECL 0x8c #endif /* AIX XCOFF uses this for parameters passed in registers. This can have any value, since it is only used for translation into a C_RPSYM storage class. */ #ifndef N_RPSYM #define N_RPSYM 0x8e #endif /* Name of the current include file. */ extern const char *xcoff_current_include_file; /* Names of bss and data sections. These should be unique names for each compilation unit. */ extern char *xcoff_bss_section_name; extern char *xcoff_private_data_section_name; extern char *xcoff_read_only_section_name; /* Last source file name mentioned in a NOTE insn. */ extern const char *xcoff_lastfile; /* Don't write out path name for main source file. */ #define DBX_OUTPUT_MAIN_SOURCE_DIRECTORY(FILE,FILENAME) /* Write out main source file name using ".file" rather than ".stabs". We don't actually do this here, because the assembler gets confused if there is more than one .file directive. rs6000_xcoff_file_start is already emitting a .file directory, so we don't output one here also. Initialize xcoff_lastfile. */ #define DBX_OUTPUT_MAIN_SOURCE_FILENAME(FILE,FILENAME) \ xcoff_lastfile = (FILENAME) /* If we are still in an include file, its end must be marked. */ #define DBX_OUTPUT_MAIN_SOURCE_FILE_END(FILE, FILENAME) \ { \ if (xcoff_current_include_file) \ { \ fputs ("\t.ei\t", (FILE)); \ output_quoted_string ((FILE), xcoff_current_include_file); \ putc ('\n', (FILE)); \ xcoff_current_include_file = NULL; \ } \ } /* .stabx has the type in a different place. */ #if 0 /* Do not emit any marker for XCOFF until assembler allows XFT_CV. */ #define DBX_OUTPUT_GCC_MARKER(FILE) \ fprintf ((FILE), "%s\"%s\",0,%d,0\n", ASM_STABS_OP, STABS_GCC_MARKER, \ stab_to_sclass (N_GSYM)) #else #define DBX_OUTPUT_GCC_MARKER(FILE) #endif /* Do not break .stabs pseudos into continuations. */ #define DBX_CONTIN_LENGTH 0 /* Don't try to use the `x' type-cross-reference character in DBX data. Also has the consequence of putting each struct, union or enum into a separate .stabs, containing only cross-refs to the others. */ #define DBX_NO_XREFS /* We must put stabs in the text section. If we don't the assembler won't handle them correctly; it will sometimes put stabs where gdb can't find them. */ #define DEBUG_SYMS_TEXT /* Prototype functions in xcoffout.c. */ extern int stab_to_sclass (int); extern void xcoffout_begin_prologue (unsigned int, const char *); extern void xcoffout_begin_block (unsigned, unsigned); extern void xcoffout_end_epilogue (unsigned int, const char *); extern void xcoffout_end_function (unsigned int); extern void xcoffout_end_block (unsigned, unsigned); extern int xcoff_assign_fundamental_type_number (tree); extern void xcoffout_declare_function (FILE *, tree, const char *); extern void xcoffout_source_line (unsigned int, const char *); #endif #undef DBXOUT_DECR_NESTING #define DBXOUT_DECR_NESTING \ if (--debug_nesting == 0 && symbol_queue_index > 0) \ { emit_pending_bincls_if_required (); debug_flush_symbol_queue (); } #undef DBXOUT_DECR_NESTING_AND_RETURN #define DBXOUT_DECR_NESTING_AND_RETURN(x) \ do {--debug_nesting; return (x);} while (0) #ifndef ASM_STABS_OP #define ASM_STABS_OP "\t.stabs\t" #endif #ifndef ASM_STABN_OP #define ASM_STABN_OP "\t.stabn\t" #endif #ifndef DBX_TYPE_DECL_STABS_CODE #define DBX_TYPE_DECL_STABS_CODE N_LSYM #endif #ifndef DBX_STATIC_CONST_VAR_CODE #define DBX_STATIC_CONST_VAR_CODE N_FUN #endif #ifndef DBX_REGPARM_STABS_CODE #define DBX_REGPARM_STABS_CODE N_RSYM #endif #ifndef DBX_REGPARM_STABS_LETTER #define DBX_REGPARM_STABS_LETTER 'P' #endif /* This is used for parameters passed by invisible reference in a register. */ #ifndef GDB_INV_REF_REGPARM_STABS_LETTER #define GDB_INV_REF_REGPARM_STABS_LETTER 'a' #endif #ifndef DBX_MEMPARM_STABS_LETTER #define DBX_MEMPARM_STABS_LETTER 'p' #endif #ifndef FILE_NAME_JOINER #define FILE_NAME_JOINER "/" #endif /* GDB needs to know that the stabs were generated by GCC. We emit an N_OPT stab at the beginning of the source file to indicate this. The string is historical, and different on a very few targets. */ #ifndef STABS_GCC_MARKER #define STABS_GCC_MARKER "gcc2_compiled." #endif enum typestatus {TYPE_UNSEEN, TYPE_XREF, TYPE_DEFINED}; /* Structure recording information about a C data type. The status element says whether we have yet output the definition of the type. TYPE_XREF says we have output it as a cross-reference only. The file_number and type_number elements are used if DBX_USE_BINCL is defined. */ struct typeinfo GTY(()) { enum typestatus status; int file_number; int type_number; }; /* Vector recording information about C data types. When we first notice a data type (a tree node), we assign it a number using next_type_number. That is its index in this vector. */ static GTY ((length ("typevec_len"))) struct typeinfo *typevec; /* Number of elements of space allocated in `typevec'. */ static GTY(()) int typevec_len; /* In dbx output, each type gets a unique number. This is the number for the next type output. The number, once assigned, is in the TYPE_SYMTAB_ADDRESS field. */ static GTY(()) int next_type_number; /* The C front end may call dbxout_symbol before dbxout_init runs. We save all such decls in this list and output them when we get to dbxout_init. */ static GTY(()) tree preinit_symbols; enum binclstatus {BINCL_NOT_REQUIRED, BINCL_PENDING, BINCL_PROCESSED}; /* When using N_BINCL in dbx output, each type number is actually a pair of the file number and the type number within the file. This is a stack of input files. */ struct dbx_file { struct dbx_file *next; int file_number; int next_type_number; enum binclstatus bincl_status; /* Keep track of lazy bincl. */ const char *pending_bincl_name; /* Name of bincl. */ struct dbx_file *prev; /* Chain to traverse all pending bincls. */ }; /* This is the top of the stack. This is not saved for PCH, because restoring a PCH should not change it. next_file_number does have to be saved, because the PCH may use some file numbers; however, just before restoring a PCH, next_file_number should always be 0 because we should not have needed any file numbers yet. */ #if (defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)) \ && defined (DBX_USE_BINCL) static struct dbx_file *current_file; #endif /* This is the next file number to use. */ static GTY(()) int next_file_number; /* A counter for dbxout_function_end. */ static GTY(()) int scope_labelno; /* A counter for dbxout_source_line. */ static GTY(()) int dbxout_source_line_counter; /* Nonzero if we have actually used any of the GDB extensions to the debugging format. The idea is that we use them for the first time only if there's a strong reason, but once we have done that, we use them whenever convenient. */ static GTY(()) int have_used_extensions = 0; /* Number for the next N_SOL filename stabs label. The number 0 is reserved for the N_SO filename stabs label. */ static GTY(()) int source_label_number = 1; /* Last source file name mentioned in a NOTE insn. */ static GTY(()) const char *lastfile; /* Used by PCH machinery to detect if 'lastfile' should be reset to base_input_file. */ static GTY(()) int lastfile_is_base; /* Typical USG systems don't have stab.h, and they also have no use for DBX-format debugging info. */ #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO) #ifdef DBX_USE_BINCL /* If zero then there is no pending BINCL. */ static int pending_bincls = 0; #endif /* The original input file name. */ static const char *base_input_file; /* Current working directory. */ static const char *cwd; #ifdef DEBUG_SYMS_TEXT #define FORCE_TEXT function_section (current_function_decl); #else #define FORCE_TEXT #endif #ifndef GCC_GSTAB_H #define GCC_GSTAB_H #define __define_stab(NAME, CODE, STRING) NAME=CODE, enum __stab_debug_code { /* Table of DBX symbol codes for the GNU system. Copyright (C) 1988, 1997, 1998 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with the GNU C Library; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This contains contribution from Cygnus Support. */ /* Global variable. Only the name is significant. To find the address, look in the corresponding external symbol. */ __define_stab (N_GSYM, 0x20, "GSYM") /* Function name for BSD Fortran. Only the name is significant. To find the address, look in the corresponding external symbol. */ __define_stab (N_FNAME, 0x22, "FNAME") /* Function name or text-segment variable for C. Value is its address. Desc is supposedly starting line number, but GCC doesn't set it and DBX seems not to miss it. */ __define_stab (N_FUN, 0x24, "FUN") /* Data-segment variable with internal linkage. Value is its address. "Static Sym". */ __define_stab (N_STSYM, 0x26, "STSYM") /* BSS-segment variable with internal linkage. Value is its address. */ __define_stab (N_LCSYM, 0x28, "LCSYM") /* Name of main routine. Only the name is significant. This is not used in C. */ __define_stab (N_MAIN, 0x2a, "MAIN") /* Global symbol in Pascal. Supposedly the value is its line number; I'm skeptical. */ __define_stab (N_PC, 0x30, "PC") /* Number of symbols: 0, files,,funcs,lines according to Ultrix V4.0. */ __define_stab (N_NSYMS, 0x32, "NSYMS") /* "No DST map for sym: name, ,0,type,ignored" according to Ultrix V4.0. */ __define_stab (N_NOMAP, 0x34, "NOMAP") /* New stab from Solaris. I don't know what it means, but it don't seem to contain useful information. */ __define_stab (N_OBJ, 0x38, "OBJ") /* New stab from Solaris. I don't know what it means, but it don't seem to contain useful information. Possibly related to the optimization flags used in this module. */ __define_stab (N_OPT, 0x3c, "OPT") /* Register variable. Value is number of register. */ __define_stab (N_RSYM, 0x40, "RSYM") /* Modula-2 compilation unit. Can someone say what info it contains? */ __define_stab (N_M2C, 0x42, "M2C") /* Line number in text segment. Desc is the line number; value is corresponding address. */ __define_stab (N_SLINE, 0x44, "SLINE") /* Similar, for data segment. */ __define_stab (N_DSLINE, 0x46, "DSLINE") /* Similar, for bss segment. */ __define_stab (N_BSLINE, 0x48, "BSLINE") /* Sun's source-code browser stabs. ?? Don't know what the fields are. Supposedly the field is "path to associated .cb file". THIS VALUE OVERLAPS WITH N_BSLINE! */ __define_stab (N_BROWS, 0x48, "BROWS") /* GNU Modula-2 definition module dependency. Value is the modification time of the definition file. Other is nonzero if it is imported with the GNU M2 keyword %INITIALIZE. Perhaps N_M2C can be used if there are enough empty fields? */ __define_stab(N_DEFD, 0x4a, "DEFD") /* THE FOLLOWING TWO STAB VALUES CONFLICT. Happily, one is for Modula-2 and one is for C++. Still,... */ /* GNU C++ exception variable. Name is variable name. */ __define_stab (N_EHDECL, 0x50, "EHDECL") /* Modula2 info "for imc": name,,0,0,0 according to Ultrix V4.0. */ __define_stab (N_MOD2, 0x50, "MOD2") /* GNU C++ `catch' clause. Value is its address. Desc is nonzero if this entry is immediately followed by a CAUGHT stab saying what exception was caught. Multiple CAUGHT stabs means that multiple exceptions can be caught here. If Desc is 0, it means all exceptions are caught here. */ __define_stab (N_CATCH, 0x54, "CATCH") /* Structure or union element. Value is offset in the structure. */ __define_stab (N_SSYM, 0x60, "SSYM") /* Name of main source file. Value is starting text address of the compilation. */ __define_stab (N_SO, 0x64, "SO") /* Automatic variable in the stack. Value is offset from frame pointer. Also used for type descriptions. */ __define_stab (N_LSYM, 0x80, "LSYM") /* Beginning of an include file. Only Sun uses this. In an object file, only the name is significant. The Sun linker puts data into some of the other fields. */ __define_stab (N_BINCL, 0x82, "BINCL") /* Name of sub-source file (#include file). Value is starting text address of the compilation. */ __define_stab (N_SOL, 0x84, "SOL") /* Parameter variable. Value is offset from argument pointer. (On most machines the argument pointer is the same as the frame pointer. */ __define_stab (N_PSYM, 0xa0, "PSYM") /* End of an include file. No name. This and N_BINCL act as brackets around the file's output. In an object file, there is no significant data in this entry. The Sun linker puts data into some of the fields. */ __define_stab (N_EINCL, 0xa2, "EINCL") /* Alternate entry point. Value is its address. */ __define_stab (N_ENTRY, 0xa4, "ENTRY") /* Beginning of lexical block. The desc is the nesting level in lexical blocks. The value is the address of the start of the text for the block. The variables declared inside the block *precede* the N_LBRAC symbol. */ __define_stab (N_LBRAC, 0xc0, "LBRAC") /* Place holder for deleted include file. Replaces a N_BINCL and everything up to the corresponding N_EINCL. The Sun linker generates these when it finds multiple identical copies of the symbols from an include file. This appears only in output from the Sun linker. */ __define_stab (N_EXCL, 0xc2, "EXCL") /* Modula-2 scope information. Can someone say what info it contains? */ __define_stab (N_SCOPE, 0xc4, "SCOPE") /* End of a lexical block. Desc matches the N_LBRAC's desc. The value is the address of the end of the text for the block. */ __define_stab (N_RBRAC, 0xe0, "RBRAC") /* Begin named common block. Only the name is significant. */ __define_stab (N_BCOMM, 0xe2, "BCOMM") /* End named common block. Only the name is significant (and it should match the N_BCOMM). */ __define_stab (N_ECOMM, 0xe4, "ECOMM") /* End common (local name): value is address. I'm not sure how this is used. */ __define_stab (N_ECOML, 0xe8, "ECOML") /* These STAB's are used on Gould systems for Non-Base register symbols or something like that. FIXME. I have assigned the values at random since I don't have a Gould here. Fixups from Gould folk welcome... */ __define_stab (N_NBTEXT, 0xF0, "NBTEXT") __define_stab (N_NBDATA, 0xF2, "NBDATA") __define_stab (N_NBBSS, 0xF4, "NBBSS") __define_stab (N_NBSTS, 0xF6, "NBSTS") __define_stab (N_NBLCS, 0xF8, "NBLCS") /* Second symbol entry containing a length-value for the preceding entry. The value is the length. */ __define_stab (N_LENG, 0xfe, "LENG") /* The above information, in matrix format. STAB MATRIX _________________________________________________ | 00 - 1F are not dbx stab symbols | | In most cases, the low bit is the EXTernal bit| | 00 UNDEF | 02 ABS | 04 TEXT | 06 DATA | | 01 |EXT | 03 |EXT | 05 |EXT | 07 |EXT | | 08 BSS | 0A INDR | 0C FN_SEQ | 0E | | 09 |EXT | 0B | 0D | 0F | | 10 | 12 COMM | 14 SETA | 16 SETT | | 11 | 13 | 15 | 17 | | 18 SETD | 1A SETB | 1C SETV | 1E WARNING| | 19 | 1B | 1D | 1F FN | |_______________________________________________| | Debug entries with bit 01 set are unused. | | 20 GSYM | 22 FNAME | 24 FUN | 26 STSYM | | 28 LCSYM | 2A MAIN | 2C | 2E | | 30 PC | 32 NSYMS | 34 NOMAP | 36 | | 38 OBJ | 3A | 3C OPT | 3E | | 40 RSYM | 42 M2C | 44 SLINE | 46 DSLINE | | 48 BSLINE*| 4A DEFD | 4C | 4E | | 50 EHDECL*| 52 | 54 CATCH | 56 | | 58 | 5A | 5C | 5E | | 60 SSYM | 62 | 64 SO | 66 | | 68 | 6A | 6C | 6E | | 70 | 72 | 74 | 76 | | 78 | 7A | 7C | 7E | | 80 LSYM | 82 BINCL | 84 SOL | 86 | | 88 | 8A | 8C | 8E | | 90 | 92 | 94 | 96 | | 98 | 9A | 9C | 9E | | A0 PSYM | A2 EINCL | A4 ENTRY | A6 | | A8 | AA | AC | AE | | B0 | B2 | B4 | B6 | | B8 | BA | BC | BE | | C0 LBRAC | C2 EXCL | C4 SCOPE | C6 | | C8 | CA | CC | CE | | D0 | D2 | D4 | D6 | | D8 | DA | DC | DE | | E0 RBRAC | E2 BCOMM | E4 ECOMM | E6 | | E8 ECOML | EA | EC | EE | | F0 | F2 | F4 | F6 | | F8 | FA | FC | FE LENG | +-----------------------------------------------+ * 50 EHDECL is also MOD2. * 48 BSLINE is also BROWS. */ LAST_UNUSED_STAB_CODE }; #undef __define_stab #endif /* ! GCC_GSTAB_H */ #define STAB_CODE_TYPE enum __stab_debug_code /* 1 if PARM is passed to this function in memory. */ #define PARM_PASSED_IN_MEMORY(PARM) \ (MEM_P (DECL_INCOMING_RTL (PARM))) /* A C expression for the integer offset value of an automatic variable (N_LSYM) having address X (an RTX). */ #ifndef DEBUGGER_AUTO_OFFSET #define DEBUGGER_AUTO_OFFSET(X) \ (GET_CODE (X) == PLUS ? INTVAL (XEXP (X, 1)) : 0) #endif /* A C expression for the integer offset value of an argument (N_PSYM) having address X (an RTX). The nominal offset is OFFSET. */ #ifndef DEBUGGER_ARG_OFFSET #define DEBUGGER_ARG_OFFSET(OFFSET, X) (OFFSET) #endif /* Stream for writing to assembler file. */ static FILE *asmfile; /* These variables are for dbxout_symbol to communicate to dbxout_finish_symbol. current_sym_code is the symbol-type-code, a symbol N_... define in stab.h. current_sym_value and current_sym_addr are two ways to address the value to store in the symtab entry. current_sym_addr if nonzero represents the value as an rtx. If that is zero, current_sym_value is used. This is used when the value is an offset (such as for auto variables, register variables and parms). */ static STAB_CODE_TYPE current_sym_code; static int current_sym_value; static rtx current_sym_addr; /* Number of chars of symbol-description generated so far for the current symbol. Used by CHARS and CONTIN. */ static int current_sym_nchars; /* Report having output N chars of the current symbol-description. */ #define CHARS(N) (current_sym_nchars += (N)) /* Break the current symbol-description, generating a continuation, if it has become long. */ #ifndef DBX_CONTIN_LENGTH #define DBX_CONTIN_LENGTH 80 #endif #if DBX_CONTIN_LENGTH > 0 #define CONTIN \ do {if (current_sym_nchars > DBX_CONTIN_LENGTH) dbxout_continue ();} while (0) #else #define CONTIN do { } while (0) #endif #ifdef DBX_USE_BINCL static void emit_bincl_stab (const char *c); static void emit_pending_bincls (void); #endif static inline void emit_pending_bincls_if_required (void); static void dbxout_init (const char *); static void dbxout_finish (const char *); static void dbxout_start_source_file (unsigned, const char *); static void dbxout_end_source_file (unsigned); static void dbxout_typedefs (tree); static void dbxout_type_index (tree); #if DBX_CONTIN_LENGTH > 0 static void dbxout_continue (void); #endif static void dbxout_args (tree); static void dbxout_type_fields (tree); static void dbxout_type_method_1 (tree, const char *); static void dbxout_type_methods (tree); static void dbxout_range_type (tree); static void dbxout_type (tree, int); static bool print_int_cst_bounds_in_octal_p (tree); static void print_int_cst_octal (tree); static void print_octal (unsigned HOST_WIDE_INT, int); static void print_wide_int (HOST_WIDE_INT); static void dbxout_type_name (tree); static void dbxout_class_name_qualifiers (tree); static int dbxout_symbol_location (tree, tree, const char *, rtx); static void dbxout_symbol_name (tree, const char *, int); static void dbxout_prepare_symbol (tree); static void dbxout_finish_symbol (tree); static void dbxout_block (tree, int, tree); static void dbxout_global_decl (tree); static void dbxout_type_decl (tree, int); static void dbxout_handle_pch (unsigned); /* The debug hooks structure. */ #if defined (DBX_DEBUGGING_INFO) static void dbxout_source_line (unsigned int, const char *); static void dbxout_source_file (FILE *, const char *); static void dbxout_function_end (void); static void dbxout_begin_function (tree); static void dbxout_begin_block (unsigned, unsigned); static void dbxout_end_block (unsigned, unsigned); static void dbxout_function_decl (tree); const struct gcc_debug_hooks dbx_debug_hooks = { dbxout_init, dbxout_finish, debug_nothing_int_charstar, debug_nothing_int_charstar, dbxout_start_source_file, dbxout_end_source_file, dbxout_begin_block, dbxout_end_block, debug_true_tree, /* ignore_block */ dbxout_source_line, /* source_line */ dbxout_source_line, /* begin_prologue: just output line info */ debug_nothing_int_charstar, /* end_prologue */ debug_nothing_int_charstar, /* end_epilogue */ #ifdef DBX_FUNCTION_FIRST dbxout_begin_function, #else debug_nothing_tree, /* begin_function */ #endif debug_nothing_int, /* end_function */ dbxout_function_decl, dbxout_global_decl, /* global_decl */ dbxout_type_decl, /* type_decl */ debug_nothing_tree_tree, /* imported_module_or_decl */ debug_nothing_tree, /* deferred_inline_function */ debug_nothing_tree, /* outlining_inline_function */ debug_nothing_rtx, /* label */ dbxout_handle_pch, /* handle_pch */ debug_nothing_rtx /* var_location */ }; #endif /* DBX_DEBUGGING_INFO */ #if defined (XCOFF_DEBUGGING_INFO) const struct gcc_debug_hooks xcoff_debug_hooks = { dbxout_init, dbxout_finish, debug_nothing_int_charstar, debug_nothing_int_charstar, dbxout_start_source_file, dbxout_end_source_file, xcoffout_begin_block, xcoffout_end_block, debug_true_tree, /* ignore_block */ xcoffout_source_line, xcoffout_begin_prologue, /* begin_prologue */ debug_nothing_int_charstar, /* end_prologue */ xcoffout_end_epilogue, debug_nothing_tree, /* begin_function */ xcoffout_end_function, debug_nothing_tree, /* function_decl */ dbxout_global_decl, /* global_decl */ dbxout_type_decl, /* type_decl */ debug_nothing_tree_tree, /* imported_module_or_decl */ debug_nothing_tree, /* deferred_inline_function */ debug_nothing_tree, /* outlining_inline_function */ debug_nothing_rtx, /* label */ dbxout_handle_pch, /* handle_pch */ debug_nothing_rtx /* var_location */ }; #endif /* XCOFF_DEBUGGING_INFO */ #if defined (DBX_DEBUGGING_INFO) static void dbxout_function_end (void) { char lscope_label_name[100]; function_section (current_function_decl); /* Convert Ltext into the appropriate format for local labels in case the system doesn't insert underscores in front of user generated labels. */ ASM_GENERATE_INTERNAL_LABEL (lscope_label_name, "Lscope", scope_labelno); targetm.asm_out.internal_label (asmfile, "Lscope", scope_labelno); scope_labelno++; /* By convention, GCC will mark the end of a function with an N_FUN symbol and an empty string. */ #ifdef DBX_OUTPUT_NFUN DBX_OUTPUT_NFUN (asmfile, lscope_label_name, current_function_decl); #else fprintf (asmfile, "%s\"\",%d,0,0,", ASM_STABS_OP, N_FUN); assemble_name (asmfile, lscope_label_name); putc ('-', asmfile); assemble_name (asmfile, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0)); fprintf (asmfile, "\n"); #endif } #endif /* DBX_DEBUGGING_INFO */ /* At the beginning of compilation, start writing the symbol table. Initialize `typevec' and output the standard data types of C. */ static void dbxout_init (const char *input_file_name) { char ltext_label_name[100]; tree syms = lang_hooks.decls.getdecls (); asmfile = asm_out_file; typevec_len = 100; typevec = ggc_calloc (typevec_len, sizeof typevec[0]); /* Convert Ltext into the appropriate format for local labels in case the system doesn't insert underscores in front of user generated labels. */ ASM_GENERATE_INTERNAL_LABEL (ltext_label_name, "Ltext", 0); /* Put the current working directory in an N_SO symbol. */ if (use_gnu_debug_info_extensions) { if (!cwd && (cwd = get_src_pwd ()) && (!*cwd || cwd[strlen (cwd) - 1] != '/')) cwd = concat (cwd, FILE_NAME_JOINER, NULL); if (cwd) { #ifdef DBX_OUTPUT_MAIN_SOURCE_DIRECTORY DBX_OUTPUT_MAIN_SOURCE_DIRECTORY (asmfile, cwd); #else /* no DBX_OUTPUT_MAIN_SOURCE_DIRECTORY */ fprintf (asmfile, "%s", ASM_STABS_OP); output_quoted_string (asmfile, cwd); fprintf (asmfile, ",%d,0,0,", N_SO); assemble_name (asmfile, ltext_label_name); fputc ('\n', asmfile); #endif /* no DBX_OUTPUT_MAIN_SOURCE_DIRECTORY */ } } #ifdef DBX_OUTPUT_MAIN_SOURCE_FILENAME DBX_OUTPUT_MAIN_SOURCE_FILENAME (asmfile, input_file_name); #else /* no DBX_OUTPUT_MAIN_SOURCE_FILENAME */ /* We include outputting `Ltext:' here, because that gives you a way to override it. */ /* Used to put `Ltext:' before the reference, but that loses on sun 4. */ fprintf (asmfile, "%s", ASM_STABS_OP); output_quoted_string (asmfile, input_file_name); fprintf (asmfile, ",%d,0,0,", N_SO); assemble_name (asmfile, ltext_label_name); fputc ('\n', asmfile); text_section (); targetm.asm_out.internal_label (asmfile, "Ltext", 0); #endif /* no DBX_OUTPUT_MAIN_SOURCE_FILENAME */ #ifdef DBX_OUTPUT_GCC_MARKER DBX_OUTPUT_GCC_MARKER (asmfile); #else /* Emit an N_OPT stab to indicate that this file was compiled by GCC. */ fprintf (asmfile, "%s\"%s\",%d,0,0,0\n", ASM_STABS_OP, STABS_GCC_MARKER, N_OPT); #endif base_input_file = lastfile = input_file_name; next_type_number = 1; #ifdef DBX_USE_BINCL current_file = xmalloc (sizeof *current_file); current_file->next = NULL; current_file->file_number = 0; current_file->next_type_number = 1; next_file_number = 1; current_file->prev = NULL; current_file->bincl_status = BINCL_NOT_REQUIRED; current_file->pending_bincl_name = NULL; #endif /* Get all permanent types that have typedef names, and output them all, except for those already output. Some language front ends put these declarations in the top-level scope; some do not; the latter are responsible for calling debug_hooks->type_decl from their record_builtin_type function. */ dbxout_typedefs (syms); if (preinit_symbols) { tree t; for (t = nreverse (preinit_symbols); t; t = TREE_CHAIN (t)) dbxout_symbol (TREE_VALUE (t), 0); preinit_symbols = 0; } } /* Output any typedef names for types described by TYPE_DECLs in SYMS. */ static void dbxout_typedefs (tree syms) { for (; syms != NULL_TREE; syms = TREE_CHAIN (syms)) { if (TREE_CODE (syms) == TYPE_DECL) { tree type = TREE_TYPE (syms); if (TYPE_NAME (type) && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL && COMPLETE_OR_VOID_TYPE_P (type) && ! TREE_ASM_WRITTEN (TYPE_NAME (type))) dbxout_symbol (TYPE_NAME (type), 0); } } } #ifdef DBX_USE_BINCL /* Emit BINCL stab using given name. */ static void emit_bincl_stab (const char *name) { fprintf (asmfile, "%s", ASM_STABS_OP); output_quoted_string (asmfile, name); fprintf (asmfile, ",%d,0,0,0\n", N_BINCL); } /* If there are pending bincls then it is time to emit all of them. */ static inline void emit_pending_bincls_if_required (void) { if (pending_bincls) emit_pending_bincls (); } /* Emit all pending bincls. */ static void emit_pending_bincls (void) { struct dbx_file *f = current_file; /* Find first pending bincl. */ while (f->bincl_status == BINCL_PENDING) f = f->next; /* Now emit all bincls. */ f = f->prev; while (f) { if (f->bincl_status == BINCL_PENDING) { emit_bincl_stab (f->pending_bincl_name); /* Update file number and status. */ f->file_number = next_file_number++; f->bincl_status = BINCL_PROCESSED; } if (f == current_file) break; f = f->prev; } /* All pending bincls have been emitted. */ pending_bincls = 0; } #else static inline void emit_pending_bincls_if_required (void) {} #endif /* Change to reading from a new source file. Generate a N_BINCL stab. */ static void dbxout_start_source_file (unsigned int line ATTRIBUTE_UNUSED, const char *filename ATTRIBUTE_UNUSED) { #ifdef DBX_USE_BINCL struct dbx_file *n = xmalloc (sizeof *n); n->next = current_file; n->next_type_number = 1; /* Do not assign file number now. Delay it until we actually emit BINCL. */ n->file_number = 0; n->prev = NULL; current_file->prev = n; n->bincl_status = BINCL_PENDING; n->pending_bincl_name = filename; pending_bincls = 1; current_file = n; #endif } /* Revert to reading a previous source file. Generate a N_EINCL stab. */ static void dbxout_end_source_file (unsigned int line ATTRIBUTE_UNUSED) { #ifdef DBX_USE_BINCL /* Emit EINCL stab only if BINCL is not pending. */ if (current_file->bincl_status == BINCL_PROCESSED) fprintf (asmfile, "%s%d,0,0,0\n", ASM_STABN_OP, N_EINCL); current_file->bincl_status = BINCL_NOT_REQUIRED; current_file = current_file->next; #endif } /* Handle a few odd cases that occur when trying to make PCH files work. */ static void dbxout_handle_pch (unsigned at_end) { if (! at_end) { /* When using the PCH, this file will be included, so we need to output a BINCL. */ dbxout_start_source_file (0, lastfile); /* The base file when using the PCH won't be the same as the base file when it's being generated. */ lastfile = NULL; } else { /* ... and an EINCL. */ dbxout_end_source_file (0); /* Deal with cases where 'lastfile' was never actually changed. */ lastfile_is_base = lastfile == NULL; } } #if defined (DBX_DEBUGGING_INFO) /* Output debugging info to FILE to switch to sourcefile FILENAME. */ static void dbxout_source_file (FILE *file, const char *filename) { if (lastfile == 0 && lastfile_is_base) { lastfile = base_input_file; lastfile_is_base = 0; } if (filename && (lastfile == 0 || strcmp (filename, lastfile))) { char ltext_label_name[100]; ASM_GENERATE_INTERNAL_LABEL (ltext_label_name, "Ltext", source_label_number); fprintf (file, "%s", ASM_STABS_OP); output_quoted_string (file, filename); fprintf (asmfile, ",%d,0,0,", N_SOL); assemble_name (asmfile, ltext_label_name); fputc ('\n', asmfile); if (current_function_decl != NULL_TREE && DECL_SECTION_NAME (current_function_decl) != NULL_TREE) ; /* Don't change section amid function. */ else { if (!in_text_section () && !in_unlikely_text_section ()) text_section (); } targetm.asm_out.internal_label (file, "Ltext", source_label_number); source_label_number++; lastfile = filename; } } /* Output a line number symbol entry for source file FILENAME and line number LINENO. */ static void dbxout_source_line (unsigned int lineno, const char *filename) { dbxout_source_file (asmfile, filename); #ifdef ASM_OUTPUT_SOURCE_LINE dbxout_source_line_counter += 1; ASM_OUTPUT_SOURCE_LINE (asmfile, lineno, dbxout_source_line_counter); #else fprintf (asmfile, "%s%d,0,%d\n", ASM_STABD_OP, N_SLINE, lineno); #endif } /* Describe the beginning of an internal block within a function. */ static void dbxout_begin_block (unsigned int line ATTRIBUTE_UNUSED, unsigned int n) { emit_pending_bincls_if_required (); targetm.asm_out.internal_label (asmfile, "LBB", n); } /* Describe the end line-number of an internal block within a function. */ static void dbxout_end_block (unsigned int line ATTRIBUTE_UNUSED, unsigned int n) { emit_pending_bincls_if_required (); targetm.asm_out.internal_label (asmfile, "LBE", n); } /* Output dbx data for a function definition. This includes a definition of the function name itself (a symbol), definitions of the parameters (locating them in the parameter list) and then output the block that makes up the function's body (including all the auto variables of the function). */ static void dbxout_function_decl (tree decl) { emit_pending_bincls_if_required (); #ifndef DBX_FUNCTION_FIRST dbxout_begin_function (decl); #endif dbxout_block (DECL_INITIAL (decl), 0, DECL_ARGUMENTS (decl)); #ifdef DBX_OUTPUT_FUNCTION_END DBX_OUTPUT_FUNCTION_END (asmfile, decl); #endif if (use_gnu_debug_info_extensions #if defined(NO_DBX_FUNCTION_END) && ! NO_DBX_FUNCTION_END #endif && targetm.have_named_sections) dbxout_function_end (); } #endif /* DBX_DEBUGGING_INFO */ /* Debug information for a global DECL. Called from toplev.c after compilation proper has finished. */ static void dbxout_global_decl (tree decl) { if (TREE_CODE (decl) == VAR_DECL && ! DECL_EXTERNAL (decl) && DECL_RTL_SET_P (decl)) /* Not necessary? */ { int saved_tree_used = TREE_USED (decl); TREE_USED (decl) = 1; dbxout_symbol (decl, 0); TREE_USED (decl) = saved_tree_used; } } /* This is just a function-type adapter; dbxout_symbol does exactly what we want but returns an int. */ static void dbxout_type_decl (tree decl, int local) { dbxout_symbol (decl, local); } /* At the end of compilation, finish writing the symbol table. Unless you define DBX_OUTPUT_MAIN_SOURCE_FILE_END, the default is to do nothing. */ static void dbxout_finish (const char *filename ATTRIBUTE_UNUSED) { #ifdef DBX_OUTPUT_MAIN_SOURCE_FILE_END DBX_OUTPUT_MAIN_SOURCE_FILE_END (asmfile, filename); #endif /* DBX_OUTPUT_MAIN_SOURCE_FILE_END */ debug_free_queue (); } /* Output the index of a type. */ static void dbxout_type_index (tree type) { #ifndef DBX_USE_BINCL fprintf (asmfile, "%d", TYPE_SYMTAB_ADDRESS (type)); CHARS (3); #else struct typeinfo *t = &typevec[TYPE_SYMTAB_ADDRESS (type)]; fprintf (asmfile, "(%d,%d)", t->file_number, t->type_number); CHARS (9); #endif } #if DBX_CONTIN_LENGTH > 0 /* Continue a symbol-description that gets too big. End one symbol table entry with a double-backslash and start a new one, eventually producing something like .stabs "start......\\",code,0,value .stabs "...rest",code,0,value */ static void dbxout_continue (void) { emit_pending_bincls_if_required (); #ifdef DBX_CONTIN_CHAR fprintf (asmfile, "%c", DBX_CONTIN_CHAR); #else fprintf (asmfile, "\\\\"); #endif dbxout_finish_symbol (NULL_TREE); fprintf (asmfile, "%s\"", ASM_STABS_OP); current_sym_nchars = 0; } #endif /* DBX_CONTIN_LENGTH > 0 */ /* Subroutine of `dbxout_type'. Output the type fields of TYPE. This must be a separate function because anonymous unions require recursive calls. */ static void dbxout_type_fields (tree type) { tree tem; /* Output the name, type, position (in bits), size (in bits) of each field that we can support. */ for (tem = TYPE_FIELDS (type); tem; tem = TREE_CHAIN (tem)) { /* If one of the nodes is an error_mark or its type is then return early. */ if (tem == error_mark_node || TREE_TYPE (tem) == error_mark_node) return; /* Omit here local type decls until we know how to support them. */ if (TREE_CODE (tem) == TYPE_DECL /* Omit fields whose position or size are variable or too large to represent. */ || (TREE_CODE (tem) == FIELD_DECL && (! host_integerp (bit_position (tem), 0) || ! DECL_SIZE (tem) || ! host_integerp (DECL_SIZE (tem), 1))) /* Omit here the nameless fields that are used to skip bits. */ || DECL_IGNORED_P (tem)) continue; else if (TREE_CODE (tem) != CONST_DECL) { /* Continue the line if necessary, but not before the first field. */ if (tem != TYPE_FIELDS (type)) CONTIN; if (DECL_NAME (tem)) { fprintf (asmfile, "%s:", IDENTIFIER_POINTER (DECL_NAME (tem))); CHARS (2 + IDENTIFIER_LENGTH (DECL_NAME (tem))); } else { fprintf (asmfile, ":"); CHARS (1); } if (use_gnu_debug_info_extensions && (TREE_PRIVATE (tem) || TREE_PROTECTED (tem) || TREE_CODE (tem) != FIELD_DECL)) { have_used_extensions = 1; putc ('/', asmfile); putc ((TREE_PRIVATE (tem) ? '0' : TREE_PROTECTED (tem) ? '1' : '2'), asmfile); CHARS (2); } dbxout_type ((TREE_CODE (tem) == FIELD_DECL && DECL_BIT_FIELD_TYPE (tem)) ? DECL_BIT_FIELD_TYPE (tem) : TREE_TYPE (tem), 0); if (TREE_CODE (tem) == VAR_DECL) { if (TREE_STATIC (tem) && use_gnu_debug_info_extensions) { tree name = DECL_ASSEMBLER_NAME (tem); have_used_extensions = 1; fprintf (asmfile, ":%s;", IDENTIFIER_POINTER (name)); CHARS (IDENTIFIER_LENGTH (name) + 2); } else { /* If TEM is non-static, GDB won't understand it. */ fprintf (asmfile, ",0,0;"); CHARS (5); } } else { putc (',', asmfile); print_wide_int (int_bit_position (tem)); putc (',', asmfile); print_wide_int (tree_low_cst (DECL_SIZE (tem), 1)); putc (';', asmfile); CHARS (3); } } } } /* Subroutine of `dbxout_type_methods'. Output debug info about the method described DECL. DEBUG_NAME is an encoding of the method's type signature. ??? We may be able to do without DEBUG_NAME altogether now. */ static void dbxout_type_method_1 (tree decl, const char *debug_name) { char c1 = 'A', c2; if (TREE_CODE (TREE_TYPE (decl)) == FUNCTION_TYPE) c2 = '?'; else /* it's a METHOD_TYPE. */ { tree firstarg = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (decl))); /* A for normal functions. B for `const' member functions. C for `volatile' member functions. D for `const volatile' member functions. */ if (TYPE_READONLY (TREE_TYPE (firstarg))) c1 += 1; if (TYPE_VOLATILE (TREE_TYPE (firstarg))) c1 += 2; if (DECL_VINDEX (decl)) c2 = '*'; else c2 = '.'; } fprintf (asmfile, ":%s;%c%c%c", debug_name, TREE_PRIVATE (decl) ? '0' : TREE_PROTECTED (decl) ? '1' : '2', c1, c2); CHARS (IDENTIFIER_LENGTH (DECL_ASSEMBLER_NAME (decl)) + 6 - (debug_name - IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)))); if (DECL_VINDEX (decl) && host_integerp (DECL_VINDEX (decl), 0)) { print_wide_int (tree_low_cst (DECL_VINDEX (decl), 0)); putc (';', asmfile); CHARS (1); dbxout_type (DECL_CONTEXT (decl), 0); fprintf (asmfile, ";"); CHARS (1); } } /* Subroutine of `dbxout_type'. Output debug info about the methods defined in TYPE. */ static void dbxout_type_methods (tree type) { /* C++: put out the method names and their parameter lists */ tree methods = TYPE_METHODS (type); tree type_encoding; tree fndecl; tree last; char formatted_type_identifier_length[16]; int type_identifier_length; if (methods == NULL_TREE) return; type_encoding = DECL_NAME (TYPE_NAME (type)); #if 0 /* C++: Template classes break some assumptions made by this code about the class names, constructor names, and encodings for assembler label names. For now, disable output of dbx info for them. */ { const char *ptr = IDENTIFIER_POINTER (type_encoding); /* This should use index. (mrs) */ while (*ptr && *ptr != '<') ptr++; if (*ptr != 0) { static int warned; if (!warned) warned = 1; return; } } #endif type_identifier_length = IDENTIFIER_LENGTH (type_encoding); sprintf (formatted_type_identifier_length, "%d", type_identifier_length); if (TREE_CODE (methods) != TREE_VEC) fndecl = methods; else if (TREE_VEC_ELT (methods, 0) != NULL_TREE) fndecl = TREE_VEC_ELT (methods, 0); else fndecl = TREE_VEC_ELT (methods, 1); while (fndecl) { int need_prefix = 1; /* Group together all the methods for the same operation. These differ in the types of the arguments. */ for (last = NULL_TREE; fndecl && (last == NULL_TREE || DECL_NAME (fndecl) == DECL_NAME (last)); fndecl = TREE_CHAIN (fndecl)) /* Output the name of the field (after overloading), as well as the name of the field before overloading, along with its parameter list */ { /* This is the "mangled" name of the method. It encodes the argument types. */ const char *debug_name; /* Skip methods that aren't FUNCTION_DECLs. (In C++, these include TEMPLATE_DECLs.) The debugger doesn't know what to do with such entities anyhow. */ if (TREE_CODE (fndecl) != FUNCTION_DECL) continue; debug_name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (fndecl)); CONTIN; last = fndecl; /* Also ignore abstract methods; those are only interesting to the DWARF backends. */ if (DECL_IGNORED_P (fndecl) || DECL_ABSTRACT (fndecl)) continue; /* Redundantly output the plain name, since that's what gdb expects. */ if (need_prefix) { tree name = DECL_NAME (fndecl); fprintf (asmfile, "%s::", IDENTIFIER_POINTER (name)); CHARS (IDENTIFIER_LENGTH (name) + 2); need_prefix = 0; } dbxout_type (TREE_TYPE (fndecl), 0); dbxout_type_method_1 (fndecl, debug_name); } if (!need_prefix) { putc (';', asmfile); CHARS (1); } } } /* Emit a "range" type specification, which has the form: "r;;;". TYPE is an INTEGER_TYPE. */ static void dbxout_range_type (tree type) { fprintf (asmfile, "r"); if (TREE_TYPE (type)) dbxout_type (TREE_TYPE (type), 0); else if (TREE_CODE (type) != INTEGER_TYPE) dbxout_type (type, 0); /* E.g. Pascal's ARRAY [BOOLEAN] of INTEGER */ else { /* Traditionally, we made sure 'int' was type 1, and builtin types were defined to be sub-ranges of int. Unfortunately, this does not allow us to distinguish true sub-ranges from integer types. So, instead we define integer (non-sub-range) types as sub-ranges of themselves. This matters for Chill. If this isn't a subrange type, then we want to define it in terms of itself. However, in C, this may be an anonymous integer type, and we don't want to emit debug info referring to it. Just calling dbxout_type_index won't work anyways, because the type hasn't been defined yet. We make this work for both cases by checked to see whether this is a defined type, referring to it if it is, and using 'int' otherwise. */ if (TYPE_SYMTAB_ADDRESS (type) != 0) dbxout_type_index (type); else dbxout_type_index (integer_type_node); } if (TYPE_MIN_VALUE (type) != 0 && host_integerp (TYPE_MIN_VALUE (type), 0)) { putc (';', asmfile); CHARS (1); if (print_int_cst_bounds_in_octal_p (type)) print_int_cst_octal (TYPE_MIN_VALUE (type)); else print_wide_int (tree_low_cst (TYPE_MIN_VALUE (type), 0)); } else { fprintf (asmfile, ";0"); CHARS (2); } if (TYPE_MAX_VALUE (type) != 0 && host_integerp (TYPE_MAX_VALUE (type), 0)) { putc (';', asmfile); CHARS (1); if (print_int_cst_bounds_in_octal_p (type)) print_int_cst_octal (TYPE_MAX_VALUE (type)); else print_wide_int (tree_low_cst (TYPE_MAX_VALUE (type), 0)); putc (';', asmfile); CHARS (1); } else { fprintf (asmfile, ";-1;"); CHARS (4); } } /* Output a reference to a type. If the type has not yet been described in the dbx output, output its definition now. For a type already defined, just refer to its definition using the type number. If FULL is nonzero, and the type has been described only with a forward-reference, output the definition now. If FULL is zero in this case, just refer to the forward-reference using the number previously allocated. */ static void dbxout_type (tree type, int full) { tree tem; tree main_variant; static int anonymous_type_number = 0; if (TREE_CODE (type) == VECTOR_TYPE) /* The frontend feeds us a representation for the vector as a struct containing an array. Pull out the array type. */ type = TREE_TYPE (TYPE_FIELDS (TYPE_DEBUG_REPRESENTATION_TYPE (type))); /* If there was an input error and we don't really have a type, avoid crashing and write something that is at least valid by assuming `int'. */ if (type == error_mark_node) type = integer_type_node; else { if (TYPE_NAME (type) && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL && TYPE_DECL_SUPPRESS_DEBUG (TYPE_NAME (type))) full = 0; } /* Try to find the "main variant" with the same name. */ if (TYPE_NAME (type) && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL && DECL_ORIGINAL_TYPE (TYPE_NAME (type))) main_variant = TREE_TYPE (TYPE_NAME (type)); else main_variant = TYPE_MAIN_VARIANT (type); /* If we are not using extensions, stabs does not distinguish const and volatile, so there is no need to make them separate types. */ if (!use_gnu_debug_info_extensions) type = main_variant; if (TYPE_SYMTAB_ADDRESS (type) == 0) { /* Type has no dbx number assigned. Assign next available number. */ TYPE_SYMTAB_ADDRESS (type) = next_type_number++; /* Make sure type vector is long enough to record about this type. */ if (next_type_number == typevec_len) { typevec = ggc_realloc (typevec, (typevec_len * 2 * sizeof typevec[0])); memset (typevec + typevec_len, 0, typevec_len * sizeof typevec[0]); typevec_len *= 2; } #ifdef DBX_USE_BINCL emit_pending_bincls_if_required (); typevec[TYPE_SYMTAB_ADDRESS (type)].file_number = current_file->file_number; typevec[TYPE_SYMTAB_ADDRESS (type)].type_number = current_file->next_type_number++; #endif } if (flag_debug_only_used_symbols) { if ((TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == QUAL_UNION_TYPE || TREE_CODE (type) == ENUMERAL_TYPE) && TYPE_STUB_DECL (type) && TREE_CODE_CLASS (TREE_CODE (TYPE_STUB_DECL (type))) == 'd' && ! DECL_IGNORED_P (TYPE_STUB_DECL (type))) debug_queue_symbol (TYPE_STUB_DECL (type)); else if (TYPE_NAME (type) && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL) debug_queue_symbol (TYPE_NAME (type)); } /* Output the number of this type, to refer to it. */ dbxout_type_index (type); #ifdef DBX_TYPE_DEFINED if (DBX_TYPE_DEFINED (type)) return; #endif /* If this type's definition has been output or is now being output, that is all. */ switch (typevec[TYPE_SYMTAB_ADDRESS (type)].status) { case TYPE_UNSEEN: break; case TYPE_XREF: /* If we have already had a cross reference, and either that's all we want or that's the best we could do, don't repeat the cross reference. Sun dbx crashes if we do. */ if (! full || !COMPLETE_TYPE_P (type) /* No way in DBX fmt to describe a variable size. */ || ! host_integerp (TYPE_SIZE (type), 1)) return; break; case TYPE_DEFINED: return; } #ifdef DBX_NO_XREFS /* For systems where dbx output does not allow the `=xsNAME:' syntax, leave the type-number completely undefined rather than output a cross-reference. If we have already used GNU debug info extensions, then it is OK to output a cross reference. This is necessary to get proper C++ debug output. */ if ((TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == QUAL_UNION_TYPE || TREE_CODE (type) == ENUMERAL_TYPE) && ! use_gnu_debug_info_extensions) /* We must use the same test here as we use twice below when deciding whether to emit a cross-reference. */ if ((TYPE_NAME (type) != 0 && ! (TREE_CODE (TYPE_NAME (type)) == TYPE_DECL && DECL_IGNORED_P (TYPE_NAME (type))) && !full) || !COMPLETE_TYPE_P (type) /* No way in DBX fmt to describe a variable size. */ || ! host_integerp (TYPE_SIZE (type), 1)) { typevec[TYPE_SYMTAB_ADDRESS (type)].status = TYPE_XREF; return; } #endif /* Output a definition now. */ fprintf (asmfile, "="); CHARS (1); /* Mark it as defined, so that if it is self-referent we will not get into an infinite recursion of definitions. */ typevec[TYPE_SYMTAB_ADDRESS (type)].status = TYPE_DEFINED; /* If this type is a variant of some other, hand off. Types with different names are usefully distinguished. We only distinguish cv-qualified types if we're using extensions. */ if (TYPE_READONLY (type) > TYPE_READONLY (main_variant)) { putc ('k', asmfile); CHARS (1); dbxout_type (build_type_variant (type, 0, TYPE_VOLATILE (type)), 0); return; } else if (TYPE_VOLATILE (type) > TYPE_VOLATILE (main_variant)) { putc ('B', asmfile); CHARS (1); dbxout_type (build_type_variant (type, TYPE_READONLY (type), 0), 0); return; } else if (main_variant != TYPE_MAIN_VARIANT (type)) { if (flag_debug_only_used_symbols) { tree orig_type = DECL_ORIGINAL_TYPE (TYPE_NAME (type)); if ((TREE_CODE (orig_type) == RECORD_TYPE || TREE_CODE (orig_type) == UNION_TYPE || TREE_CODE (orig_type) == QUAL_UNION_TYPE || TREE_CODE (orig_type) == ENUMERAL_TYPE) && TYPE_STUB_DECL (orig_type) && ! DECL_IGNORED_P (TYPE_STUB_DECL (orig_type))) debug_queue_symbol (TYPE_STUB_DECL (orig_type)); } /* 'type' is a typedef; output the type it refers to. */ dbxout_type (DECL_ORIGINAL_TYPE (TYPE_NAME (type)), 0); return; } /* else continue. */ switch (TREE_CODE (type)) { case VOID_TYPE: case LANG_TYPE: /* For a void type, just define it as itself; ie, "5=5". This makes us consider it defined without saying what it is. The debugger will make it a void type when the reference is seen, and nothing will ever override that default. */ dbxout_type_index (type); break; case INTEGER_TYPE: if (type == char_type_node && ! TYPE_UNSIGNED (type)) { /* Output the type `char' as a subrange of itself! I don't understand this definition, just copied it from the output of pcc. This used to use `r2' explicitly and we used to take care to make sure that `char' was type number 2. */ fprintf (asmfile, "r"); CHARS (1); dbxout_type_index (type); fprintf (asmfile, ";0;127;"); CHARS (7); } /* If this is a subtype of another integer type, always prefer to write it as a subtype. */ else if (TREE_TYPE (type) != 0 && TREE_CODE (TREE_TYPE (type)) == INTEGER_TYPE) { /* If the size is non-standard, say what it is if we can use GDB extensions. */ if (use_gnu_debug_info_extensions && TYPE_PRECISION (type) != TYPE_PRECISION (integer_type_node)) { have_used_extensions = 1; fprintf (asmfile, "@s%d;", TYPE_PRECISION (type)); CHARS (5); } dbxout_range_type (type); } else { /* If the size is non-standard, say what it is if we can use GDB extensions. */ if (use_gnu_debug_info_extensions && TYPE_PRECISION (type) != TYPE_PRECISION (integer_type_node)) { have_used_extensions = 1; fprintf (asmfile, "@s%d;", TYPE_PRECISION (type)); CHARS (5); } if (print_int_cst_bounds_in_octal_p (type)) { fprintf (asmfile, "r"); CHARS (1); /* If this type derives from another type, output type index of parent type. This is particularly important when parent type is an enumerated type, because not generating the parent type index would transform the definition of this enumerated type into a plain unsigned type. */ if (TREE_TYPE (type) != 0) dbxout_type_index (TREE_TYPE (type)); else dbxout_type_index (type); fprintf (asmfile, ";"); CHARS (1); print_int_cst_octal (TYPE_MIN_VALUE (type)); fprintf (asmfile, ";"); CHARS (1); print_int_cst_octal (TYPE_MAX_VALUE (type)); fprintf (asmfile, ";"); CHARS (1); } else /* Output other integer types as subranges of `int'. */ dbxout_range_type (type); } break; case REAL_TYPE: /* This used to say `r1' and we used to take care to make sure that `int' was type number 1. */ fprintf (asmfile, "r"); CHARS (1); dbxout_type_index (integer_type_node); putc (';', asmfile); CHARS (1); print_wide_int (int_size_in_bytes (type)); fputs (";0;", asmfile); CHARS (3); break; case CHAR_TYPE: if (use_gnu_debug_info_extensions) { have_used_extensions = 1; fputs ("@s", asmfile); CHARS (2); print_wide_int (BITS_PER_UNIT * int_size_in_bytes (type)); fputs (";-20;", asmfile); CHARS (4); } else { /* Output the type `char' as a subrange of itself. That is what pcc seems to do. */ fprintf (asmfile, "r"); CHARS (1); dbxout_type_index (char_type_node); fprintf (asmfile, ";0;%d;", TYPE_UNSIGNED (type) ? 255 : 127); CHARS (7); } break; case BOOLEAN_TYPE: if (use_gnu_debug_info_extensions) { have_used_extensions = 1; fputs ("@s", asmfile); CHARS (2); print_wide_int (BITS_PER_UNIT * int_size_in_bytes (type)); fputs (";-16;", asmfile); CHARS (4); } else /* Define as enumeral type (False, True) */ { fprintf (asmfile, "eFalse:0,True:1,;"); CHARS (17); } break; case FILE_TYPE: putc ('d', asmfile); CHARS (1); dbxout_type (TREE_TYPE (type), 0); break; case COMPLEX_TYPE: /* Differs from the REAL_TYPE by its new data type number. R3 is NF_COMPLEX. We don't try to use any of the other NF_* codes since gdb doesn't care anyway. */ if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE) { fputs ("R3;", asmfile); CHARS (3); print_wide_int (2 * int_size_in_bytes (TREE_TYPE (type))); fputs (";0;", asmfile); CHARS (3); } else { /* Output a complex integer type as a structure, pending some other way to do it. */ putc ('s', asmfile); CHARS (1); print_wide_int (int_size_in_bytes (type)); fprintf (asmfile, "real:"); CHARS (5); dbxout_type (TREE_TYPE (type), 0); fprintf (asmfile, ",0,%d;", TYPE_PRECISION (TREE_TYPE (type))); CHARS (7); fprintf (asmfile, "imag:"); CHARS (5); dbxout_type (TREE_TYPE (type), 0); fprintf (asmfile, ",%d,%d;;", TYPE_PRECISION (TREE_TYPE (type)), TYPE_PRECISION (TREE_TYPE (type))); CHARS (10); } break; case SET_TYPE: if (use_gnu_debug_info_extensions) { have_used_extensions = 1; fputs ("@s", asmfile); CHARS (2); print_wide_int (BITS_PER_UNIT * int_size_in_bytes (type)); putc (';', asmfile); CHARS (1); /* Check if a bitstring type, which in Chill is different from a [power]set. */ if (TYPE_STRING_FLAG (type)) { fprintf (asmfile, "@S;"); CHARS (3); } } putc ('S', asmfile); CHARS (1); dbxout_type (TYPE_DOMAIN (type), 0); break; case ARRAY_TYPE: /* Make arrays of packed bits look like bitstrings for chill. */ if (TYPE_PACKED (type) && use_gnu_debug_info_extensions) { have_used_extensions = 1; fputs ("@s", asmfile); CHARS (2); print_wide_int (BITS_PER_UNIT * int_size_in_bytes (type)); fprintf (asmfile, ";@S;S"); CHARS (5); dbxout_type (TYPE_DOMAIN (type), 0); break; } /* Output "a" followed by a range type definition for the index type of the array followed by a reference to the target-type. ar1;0;N;M for a C array of type M and size N+1. */ /* Check if a character string type, which in Chill is different from an array of characters. */ if (TYPE_STRING_FLAG (type) && use_gnu_debug_info_extensions) { have_used_extensions = 1; fprintf (asmfile, "@S;"); CHARS (3); } tem = TYPE_DOMAIN (type); if (tem == NULL) { fprintf (asmfile, "ar"); CHARS (2); dbxout_type_index (integer_type_node); fprintf (asmfile, ";0;-1;"); CHARS (6); } else { fprintf (asmfile, "a"); CHARS (1); dbxout_range_type (tem); } dbxout_type (TREE_TYPE (type), 0); break; case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: { int i, n_baseclasses = 0; if (TYPE_BINFO (type) != 0 && TYPE_BINFO_BASETYPES (type) != 0) n_baseclasses = TREE_VEC_LENGTH (TYPE_BINFO_BASETYPES (type)); /* Output a structure type. We must use the same test here as we use in the DBX_NO_XREFS case above. */ if ((TYPE_NAME (type) != 0 && ! (TREE_CODE (TYPE_NAME (type)) == TYPE_DECL && DECL_IGNORED_P (TYPE_NAME (type))) && !full) || !COMPLETE_TYPE_P (type) /* No way in DBX fmt to describe a variable size. */ || ! host_integerp (TYPE_SIZE (type), 1)) { /* If the type is just a cross reference, output one and mark the type as partially described. If it later becomes defined, we will output its real definition. If the type has a name, don't nest its definition within another type's definition; instead, output an xref and let the definition come when the name is defined. */ fputs ((TREE_CODE (type) == RECORD_TYPE) ? "xs" : "xu", asmfile); CHARS (2); #if 0 /* This assertion is legitimately false in C++. */ /* We shouldn't be outputting a reference to a type before its definition unless the type has a tag name. A typedef name without a tag name should be impossible. */ if (TREE_CODE (TYPE_NAME (type)) != IDENTIFIER_NODE) abort (); #endif if (TYPE_NAME (type) != 0) dbxout_type_name (type); else { fprintf (asmfile, "$$%d", anonymous_type_number++); CHARS (5); } fprintf (asmfile, ":"); CHARS (1); typevec[TYPE_SYMTAB_ADDRESS (type)].status = TYPE_XREF; break; } /* Identify record or union, and print its size. */ putc (((TREE_CODE (type) == RECORD_TYPE) ? 's' : 'u'), asmfile); CHARS (1); print_wide_int (int_size_in_bytes (type)); if (use_gnu_debug_info_extensions) { if (n_baseclasses) { have_used_extensions = 1; fprintf (asmfile, "!%d,", n_baseclasses); CHARS (8); } } for (i = 0; i < n_baseclasses; i++) { tree binfo = TYPE_BINFO (type); tree child = BINFO_BASETYPE (binfo, i); tree access = (BINFO_BASEACCESSES (binfo) ? BINFO_BASEACCESS (binfo, i) : access_public_node); if (use_gnu_debug_info_extensions) { have_used_extensions = 1; putc (BINFO_VIRTUAL_P (child) ? '1' : '0', asmfile); putc (access == access_public_node ? '2' : (access == access_protected_node ? '1' :'0'), asmfile); CHARS (2); if (BINFO_VIRTUAL_P (child) && strcmp (lang_hooks.name, "GNU C++") == 0) /* For a virtual base, print the (negative) offset within the vtable where we must look to find the necessary adjustment. */ print_wide_int (tree_low_cst (BINFO_VPTR_FIELD (child), 0) * BITS_PER_UNIT); else print_wide_int (tree_low_cst (BINFO_OFFSET (child), 0) * BITS_PER_UNIT); putc (',', asmfile); CHARS (1); dbxout_type (BINFO_TYPE (child), 0); putc (';', asmfile); CHARS (1); } else { /* Print out the base class information with fields which have the same names at the types they hold. */ dbxout_type_name (BINFO_TYPE (child)); putc (':', asmfile); CHARS (1); dbxout_type (BINFO_TYPE (child), full); putc (',', asmfile); CHARS (1); print_wide_int (tree_low_cst (BINFO_OFFSET (child), 0) * BITS_PER_UNIT); putc (',', asmfile); CHARS (1); print_wide_int (tree_low_cst (TYPE_SIZE (BINFO_TYPE (child)), 0) * BITS_PER_UNIT); putc (';', asmfile); CHARS (1); } } } /* Write out the field declarations. */ dbxout_type_fields (type); if (use_gnu_debug_info_extensions && TYPE_METHODS (type) != NULL_TREE) { have_used_extensions = 1; dbxout_type_methods (type); } putc (';', asmfile); CHARS (1); if (use_gnu_debug_info_extensions && TREE_CODE (type) == RECORD_TYPE /* Avoid the ~ if we don't really need it--it confuses dbx. */ && TYPE_VFIELD (type)) { have_used_extensions = 1; /* Tell GDB+ that it may keep reading. */ putc ('~', asmfile); CHARS (1); /* We need to write out info about what field this class uses as its "main" vtable pointer field, because if this field is inherited from a base class, GDB cannot necessarily figure out which field it's using in time. */ if (TYPE_VFIELD (type)) { putc ('%', asmfile); CHARS (1); dbxout_type (DECL_FCONTEXT (TYPE_VFIELD (type)), 0); } putc (';', asmfile); CHARS (1); } break; case ENUMERAL_TYPE: /* We must use the same test here as we use in the DBX_NO_XREFS case above. We simplify it a bit since an enum will never have a variable size. */ if ((TYPE_NAME (type) != 0 && ! (TREE_CODE (TYPE_NAME (type)) == TYPE_DECL && DECL_IGNORED_P (TYPE_NAME (type))) && !full) || !COMPLETE_TYPE_P (type)) { fprintf (asmfile, "xe"); CHARS (2); dbxout_type_name (type); typevec[TYPE_SYMTAB_ADDRESS (type)].status = TYPE_XREF; putc (':', asmfile); CHARS (1); return; } if (use_gnu_debug_info_extensions && TYPE_PRECISION (type) != TYPE_PRECISION (integer_type_node)) { fprintf (asmfile, "@s%d;", TYPE_PRECISION (type)); CHARS (5); } putc ('e', asmfile); CHARS (1); for (tem = TYPE_VALUES (type); tem; tem = TREE_CHAIN (tem)) { fprintf (asmfile, "%s:", IDENTIFIER_POINTER (TREE_PURPOSE (tem))); CHARS (IDENTIFIER_LENGTH (TREE_PURPOSE (tem)) + 1); if (TREE_INT_CST_HIGH (TREE_VALUE (tem)) == 0) print_wide_int (TREE_INT_CST_LOW (TREE_VALUE (tem))); else if (TREE_INT_CST_HIGH (TREE_VALUE (tem)) == -1 && (HOST_WIDE_INT) TREE_INT_CST_LOW (TREE_VALUE (tem)) < 0) print_wide_int (TREE_INT_CST_LOW (TREE_VALUE (tem))); else print_int_cst_octal (TREE_VALUE (tem)); putc (',', asmfile); CHARS (1); if (TREE_CHAIN (tem) != 0) CONTIN; } putc (';', asmfile); CHARS (1); break; case POINTER_TYPE: putc ('*', asmfile); CHARS (1); dbxout_type (TREE_TYPE (type), 0); break; case METHOD_TYPE: if (use_gnu_debug_info_extensions) { have_used_extensions = 1; putc ('#', asmfile); CHARS (1); /* Write the argument types out longhand. */ dbxout_type (TYPE_METHOD_BASETYPE (type), 0); putc (',', asmfile); CHARS (1); dbxout_type (TREE_TYPE (type), 0); dbxout_args (TYPE_ARG_TYPES (type)); putc (';', asmfile); CHARS (1); } else /* Treat it as a function type. */ dbxout_type (TREE_TYPE (type), 0); break; case OFFSET_TYPE: if (use_gnu_debug_info_extensions) { have_used_extensions = 1; putc ('@', asmfile); CHARS (1); dbxout_type (TYPE_OFFSET_BASETYPE (type), 0); putc (',', asmfile); CHARS (1); dbxout_type (TREE_TYPE (type), 0); } else /* Should print as an int, because it is really just an offset. */ dbxout_type (integer_type_node, 0); break; case REFERENCE_TYPE: if (use_gnu_debug_info_extensions) have_used_extensions = 1; putc (use_gnu_debug_info_extensions ? '&' : '*', asmfile); CHARS (1); dbxout_type (TREE_TYPE (type), 0); break; case FUNCTION_TYPE: putc ('f', asmfile); CHARS (1); dbxout_type (TREE_TYPE (type), 0); break; default: abort (); } } /* Return nonzero if the given type represents an integer whose bounds should be printed in octal format. */ static bool print_int_cst_bounds_in_octal_p (tree type) { /* If we can use GDB extensions and the size is wider than a long (the size used by GDB to read them) or we may have trouble writing the bounds the usual way, write them in octal. Note the test is for the *target's* size of "long", not that of the host. The host test is just to make sure we can write it out in case the host wide int is narrower than the target "long". For unsigned types, we use octal if they are the same size or larger. This is because we print the bounds as signed decimal, and hence they can't span same size unsigned types. */ if (use_gnu_debug_info_extensions && TYPE_MIN_VALUE (type) != 0 && TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST && TYPE_MAX_VALUE (type) != 0 && TREE_CODE (TYPE_MAX_VALUE (type)) == INTEGER_CST && (TYPE_PRECISION (type) > TYPE_PRECISION (integer_type_node) || ((TYPE_PRECISION (type) == TYPE_PRECISION (integer_type_node)) && TYPE_UNSIGNED (type)) || TYPE_PRECISION (type) > HOST_BITS_PER_WIDE_INT || (TYPE_PRECISION (type) == HOST_BITS_PER_WIDE_INT && TYPE_UNSIGNED (type)))) return TRUE; else return FALSE; } /* Print the value of integer constant C, in octal, handling double precision. */ static void print_int_cst_octal (tree c) { unsigned HOST_WIDE_INT high = TREE_INT_CST_HIGH (c); unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (c); int excess = (3 - (HOST_BITS_PER_WIDE_INT % 3)); unsigned int width = TYPE_PRECISION (TREE_TYPE (c)); /* GDB wants constants with no extra leading "1" bits, so we need to remove any sign-extension that might be present. */ if (width == HOST_BITS_PER_WIDE_INT * 2) ; else if (width > HOST_BITS_PER_WIDE_INT) high &= (((HOST_WIDE_INT) 1 << (width - HOST_BITS_PER_WIDE_INT)) - 1); else if (width == HOST_BITS_PER_WIDE_INT) high = 0; else high = 0, low &= (((HOST_WIDE_INT) 1 << width) - 1); fprintf (asmfile, "0"); CHARS (1); if (excess == 3) { print_octal (high, HOST_BITS_PER_WIDE_INT / 3); print_octal (low, HOST_BITS_PER_WIDE_INT / 3); } else { unsigned HOST_WIDE_INT beg = high >> excess; unsigned HOST_WIDE_INT middle = ((high & (((HOST_WIDE_INT) 1 << excess) - 1)) << (3 - excess) | (low >> (HOST_BITS_PER_WIDE_INT / 3 * 3))); unsigned HOST_WIDE_INT end = low & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 3 * 3)) - 1); fprintf (asmfile, "%o%01o", (int) beg, (int) middle); CHARS (2); print_octal (end, HOST_BITS_PER_WIDE_INT / 3); } } static void print_octal (unsigned HOST_WIDE_INT value, int digits) { int i; for (i = digits - 1; i >= 0; i--) fprintf (asmfile, "%01o", (int) ((value >> (3 * i)) & 7)); CHARS (digits); } /* Output C in decimal while adjusting the number of digits written. */ static void print_wide_int (HOST_WIDE_INT c) { int digs = 0; fprintf (asmfile, HOST_WIDE_INT_PRINT_DEC, c); if (c < 0) digs++, c = -c; while (c > 0) c /= 10; digs++; CHARS (digs); } /* Output the name of type TYPE, with no punctuation. Such names can be set up either by typedef declarations or by struct, enum and union tags. */ static void dbxout_type_name (tree type) { tree t; if (TYPE_NAME (type) == 0) abort (); if (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE) { t = TYPE_NAME (type); } else if (TREE_CODE (TYPE_NAME (type)) == TYPE_DECL) { t = DECL_NAME (TYPE_NAME (type)); } else abort (); fprintf (asmfile, "%s", IDENTIFIER_POINTER (t)); CHARS (IDENTIFIER_LENGTH (t)); } /* Output leading leading struct or class names needed for qualifying type whose scope is limited to a struct or class. */ static void dbxout_class_name_qualifiers (tree decl) { tree context = decl_type_context (decl); if (context != NULL_TREE && TREE_CODE(context) == RECORD_TYPE && TYPE_NAME (context) != 0 && (TREE_CODE (TYPE_NAME (context)) == IDENTIFIER_NODE || (DECL_NAME (TYPE_NAME (context)) != 0))) { tree name = TYPE_NAME (context); emit_pending_bincls_if_required (); if (TREE_CODE (name) == TYPE_DECL) { dbxout_class_name_qualifiers (name); name = DECL_NAME (name); } fprintf (asmfile, "%s::", IDENTIFIER_POINTER (name)); CHARS (IDENTIFIER_LENGTH (name) + 2); } } /* Output a .stabs for the symbol defined by DECL, which must be a ..._DECL node in the normal namespace. It may be a CONST_DECL, a FUNCTION_DECL, a PARM_DECL or a VAR_DECL. LOCAL is nonzero if the scope is less than the entire file. Return 1 if a stabs might have been emitted. */ int dbxout_symbol (tree decl, int local ATTRIBUTE_UNUSED) { tree type = TREE_TYPE (decl); tree context = NULL_TREE; int result = 0; /* "Intercept" dbxout_symbol() calls like we do all debug_hooks. */ ++debug_nesting; /* Ignore nameless syms, but don't ignore type tags. */ if ((DECL_NAME (decl) == 0 && TREE_CODE (decl) != TYPE_DECL) || DECL_IGNORED_P (decl)) DBXOUT_DECR_NESTING_AND_RETURN (0); /* If we are to generate only the symbols actually used then such symbol nodees are flagged with TREE_USED. Ignore any that aren't flaged as TREE_USED. */ if (flag_debug_only_used_symbols && (!TREE_USED (decl) && (TREE_CODE (decl) != VAR_DECL || !DECL_INITIAL (decl)))) DBXOUT_DECR_NESTING_AND_RETURN (0); /* If dbxout_init has not yet run, queue this symbol for later. */ if (!typevec) { preinit_symbols = tree_cons (0, decl, preinit_symbols); DBXOUT_DECR_NESTING_AND_RETURN (0); } if (flag_debug_only_used_symbols) { tree t; /* We now have a used symbol. We need to generate the info for the symbol's type in addition to the symbol itself. These type symbols are queued to be generated after were done with the symbol itself (done because the symbol's info is generated with fprintf's, etc. as it determines what's needed). Note, because the TREE_TYPE(type) might be something like a pointer to a named type we need to look for the first name we see following the TREE_TYPE chain. */ t = type; while (POINTER_TYPE_P (t)) t = TREE_TYPE (t); /* RECORD_TYPE, UNION_TYPE, QUAL_UNION_TYPE, and ENUMERAL_TYPE need special treatment. The TYPE_STUB_DECL field in these types generally represents the tag name type we want to output. In addition there could be a typedef type with a different name. In that case we also want to output that. */ if ((TREE_CODE (t) == RECORD_TYPE || TREE_CODE (t) == UNION_TYPE || TREE_CODE (t) == QUAL_UNION_TYPE || TREE_CODE (t) == ENUMERAL_TYPE) && TYPE_STUB_DECL (t) && TYPE_STUB_DECL (t) != decl && TREE_CODE_CLASS (TREE_CODE (TYPE_STUB_DECL (t))) == 'd' && ! DECL_IGNORED_P (TYPE_STUB_DECL (t))) { debug_queue_symbol (TYPE_STUB_DECL (t)); if (TYPE_NAME (t) && TYPE_NAME (t) != TYPE_STUB_DECL (t) && TYPE_NAME (t) != decl && TREE_CODE_CLASS (TREE_CODE (TYPE_NAME (t))) == 'd') debug_queue_symbol (TYPE_NAME (t)); } else if (TYPE_NAME (t) && TYPE_NAME (t) != decl && TREE_CODE_CLASS (TREE_CODE (TYPE_NAME (t))) == 'd') debug_queue_symbol (TYPE_NAME (t)); } emit_pending_bincls_if_required (); dbxout_prepare_symbol (decl); /* The output will always start with the symbol name, so always count that in the length-output-so-far. */ if (DECL_NAME (decl) != 0) current_sym_nchars = 2 + IDENTIFIER_LENGTH (DECL_NAME (decl)); switch (TREE_CODE (decl)) { case CONST_DECL: /* Enum values are defined by defining the enum type. */ break; case FUNCTION_DECL: if (DECL_RTL (decl) == 0) DBXOUT_DECR_NESTING_AND_RETURN (0); if (DECL_EXTERNAL (decl)) break; /* Don't mention a nested function under its parent. */ context = decl_function_context (decl); if (context == current_function_decl) break; if (!MEM_P (DECL_RTL (decl)) || GET_CODE (XEXP (DECL_RTL (decl), 0)) != SYMBOL_REF) break; FORCE_TEXT; fprintf (asmfile, "%s\"%s:%c", ASM_STABS_OP, IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), TREE_PUBLIC (decl) ? 'F' : 'f'); result = 1; current_sym_code = N_FUN; current_sym_addr = XEXP (DECL_RTL (decl), 0); if (TREE_TYPE (type)) dbxout_type (TREE_TYPE (type), 0); else dbxout_type (void_type_node, 0); /* For a nested function, when that function is compiled, mention the containing function name as well as (since dbx wants it) our own assembler-name. */ if (context != 0) fprintf (asmfile, ",%s,%s", IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), IDENTIFIER_POINTER (DECL_NAME (context))); dbxout_finish_symbol (decl); break; case TYPE_DECL: /* Don't output the same typedef twice. And don't output what language-specific stuff doesn't want output. */ if (TREE_ASM_WRITTEN (decl) || TYPE_DECL_SUPPRESS_DEBUG (decl)) DBXOUT_DECR_NESTING_AND_RETURN (0); /* Don't output typedefs for types with magic type numbers (XCOFF). */ #ifdef DBX_ASSIGN_FUNDAMENTAL_TYPE_NUMBER { int fundamental_type_number = DBX_ASSIGN_FUNDAMENTAL_TYPE_NUMBER (decl); if (fundamental_type_number != 0) { TREE_ASM_WRITTEN (decl) = 1; TYPE_SYMTAB_ADDRESS (TREE_TYPE (decl)) = fundamental_type_number; DBXOUT_DECR_NESTING_AND_RETURN (0); } } #endif FORCE_TEXT; result = 1; { int tag_needed = 1; int did_output = 0; if (DECL_NAME (decl)) { /* Nonzero means we must output a tag as well as a typedef. */ tag_needed = 0; /* Handle the case of a C++ structure or union where the TYPE_NAME is a TYPE_DECL which gives both a typedef name and a tag. */ /* dbx requires the tag first and the typedef second. */ if ((TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == QUAL_UNION_TYPE) && TYPE_NAME (type) == decl && !(use_gnu_debug_info_extensions && have_used_extensions) && !TREE_ASM_WRITTEN (TYPE_NAME (type)) /* Distinguish the implicit typedefs of C++ from explicit ones that might be found in C. */ && DECL_ARTIFICIAL (decl) /* Do not generate a tag for incomplete records. */ && COMPLETE_TYPE_P (type) /* Do not generate a tag for records of variable size, since this type can not be properly described in the DBX format, and it confuses some tools such as objdump. */ && host_integerp (TYPE_SIZE (type), 1)) { tree name = TYPE_NAME (type); if (TREE_CODE (name) == TYPE_DECL) name = DECL_NAME (name); current_sym_code = DBX_TYPE_DECL_STABS_CODE; current_sym_value = 0; current_sym_addr = 0; current_sym_nchars = 2 + IDENTIFIER_LENGTH (name); fprintf (asmfile, "%s\"%s:T", ASM_STABS_OP, IDENTIFIER_POINTER (name)); dbxout_type (type, 1); dbxout_finish_symbol (NULL_TREE); } /* Output .stabs (or whatever) and leading double quote. */ fprintf (asmfile, "%s\"", ASM_STABS_OP); if (use_gnu_debug_info_extensions) { /* Output leading class/struct qualifiers. */ dbxout_class_name_qualifiers (decl); } /* Output typedef name. */ fprintf (asmfile, "%s:", IDENTIFIER_POINTER (DECL_NAME (decl))); /* Short cut way to output a tag also. */ if ((TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == QUAL_UNION_TYPE) && TYPE_NAME (type) == decl /* Distinguish the implicit typedefs of C++ from explicit ones that might be found in C. */ && DECL_ARTIFICIAL (decl)) { if (use_gnu_debug_info_extensions && have_used_extensions) { putc ('T', asmfile); TREE_ASM_WRITTEN (TYPE_NAME (type)) = 1; } #if 0 /* Now we generate the tag for this case up above. */ else tag_needed = 1; #endif } putc ('t', asmfile); current_sym_code = DBX_TYPE_DECL_STABS_CODE; dbxout_type (type, 1); dbxout_finish_symbol (decl); did_output = 1; } /* Don't output a tag if this is an incomplete type. This prevents the sun4 Sun OS 4.x dbx from crashing. */ if (tag_needed && TYPE_NAME (type) != 0 && (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE || (DECL_NAME (TYPE_NAME (type)) != 0)) && COMPLETE_TYPE_P (type) && !TREE_ASM_WRITTEN (TYPE_NAME (type))) { /* For a TYPE_DECL with no name, but the type has a name, output a tag. This is what represents `struct foo' with no typedef. */ /* In C++, the name of a type is the corresponding typedef. In C, it is an IDENTIFIER_NODE. */ tree name = TYPE_NAME (type); if (TREE_CODE (name) == TYPE_DECL) name = DECL_NAME (name); current_sym_code = DBX_TYPE_DECL_STABS_CODE; current_sym_value = 0; current_sym_addr = 0; current_sym_nchars = 2 + IDENTIFIER_LENGTH (name); fprintf (asmfile, "%s\"%s:T", ASM_STABS_OP, IDENTIFIER_POINTER (name)); dbxout_type (type, 1); dbxout_finish_symbol (NULL_TREE); did_output = 1; } /* If an enum type has no name, it cannot be referred to, but we must output it anyway, since the enumeration constants can be referred to. */ if (!did_output && TREE_CODE (type) == ENUMERAL_TYPE) { current_sym_code = DBX_TYPE_DECL_STABS_CODE; current_sym_value = 0; current_sym_addr = 0; current_sym_nchars = 2; /* Some debuggers fail when given NULL names, so give this a harmless name of ` '. */ fprintf (asmfile, "%s\" :T", ASM_STABS_OP); dbxout_type (type, 1); dbxout_finish_symbol (NULL_TREE); } /* Prevent duplicate output of a typedef. */ TREE_ASM_WRITTEN (decl) = 1; break; } case PARM_DECL: /* Parm decls go in their own separate chains and are output by dbxout_reg_parms and dbxout_parms. */ abort (); case RESULT_DECL: /* Named return value, treat like a VAR_DECL. */ case VAR_DECL: if (! DECL_RTL_SET_P (decl)) DBXOUT_DECR_NESTING_AND_RETURN (0); /* Don't mention a variable that is external. Let the file that defines it describe it. */ if (DECL_EXTERNAL (decl)) break; /* If the variable is really a constant and not written in memory, inform the debugger. */ if (TREE_STATIC (decl) && TREE_READONLY (decl) && DECL_INITIAL (decl) != 0 && host_integerp (DECL_INITIAL (decl), 0) && ! TREE_ASM_WRITTEN (decl) && (DECL_CONTEXT (decl) == NULL_TREE || TREE_CODE (DECL_CONTEXT (decl)) == BLOCK)) { if (TREE_PUBLIC (decl) == 0) { /* The sun4 assembler does not grok this. */ const char *name = IDENTIFIER_POINTER (DECL_NAME (decl)); if (TREE_CODE (TREE_TYPE (decl)) == INTEGER_TYPE || TREE_CODE (TREE_TYPE (decl)) == ENUMERAL_TYPE) { HOST_WIDE_INT ival = tree_low_cst (DECL_INITIAL (decl), 0); fprintf (asmfile, "%s\"%s:c=i" HOST_WIDE_INT_PRINT_DEC "\",0x%x,0,0,0\n", ASM_STABS_OP, name, ival, N_LSYM); DBXOUT_DECR_NESTING; return 1; } else if (TREE_CODE (TREE_TYPE (decl)) == REAL_TYPE) { /* Don't know how to do this yet. */ } break; } /* else it is something we handle like a normal variable. */ } SET_DECL_RTL (decl, eliminate_regs (DECL_RTL (decl), 0, NULL_RTX)); #ifdef LEAF_REG_REMAP if (current_function_uses_only_leaf_regs) leaf_renumber_regs_insn (DECL_RTL (decl)); #endif result = dbxout_symbol_location (decl, type, 0, DECL_RTL (decl)); break; default: break; } DBXOUT_DECR_NESTING; return result; } /* Output the stab for DECL, a VAR_DECL, RESULT_DECL or PARM_DECL. Add SUFFIX to its name, if SUFFIX is not 0. Describe the variable as residing in HOME (usually HOME is DECL_RTL (DECL), but not always). Returns 1 if the stab was really emitted. */ static int dbxout_symbol_location (tree decl, tree type, const char *suffix, rtx home) { int letter = 0; int regno = -1; emit_pending_bincls_if_required (); /* Don't mention a variable at all if it was completely optimized into nothingness. If the decl was from an inline function, then its rtl is not identically the rtl that was used in this particular compilation. */ if (GET_CODE (home) == SUBREG) { rtx value = home; while (GET_CODE (value) == SUBREG) value = SUBREG_REG (value); if (REG_P (value)) { if (REGNO (value) >= FIRST_PSEUDO_REGISTER) return 0; } home = alter_subreg (&home); } if (REG_P (home)) { regno = REGNO (home); if (regno >= FIRST_PSEUDO_REGISTER) return 0; } /* The kind-of-variable letter depends on where the variable is and on the scope of its name: G and N_GSYM for static storage and global scope, S for static storage and file scope, V for static storage and local scope, for those two, use N_LCSYM if data is in bss segment, N_STSYM if in data segment, N_FUN otherwise. (We used N_FUN originally, then changed to N_STSYM to please GDB. However, it seems that confused ld. Now GDB has been fixed to like N_FUN, says Kingdon.) no letter at all, and N_LSYM, for auto variable, r and N_RSYM for register variable. */ if (MEM_P (home) && GET_CODE (XEXP (home, 0)) == SYMBOL_REF) { if (TREE_PUBLIC (decl)) { letter = 'G'; current_sym_code = N_GSYM; } else { current_sym_addr = XEXP (home, 0); letter = decl_function_context (decl) ? 'V' : 'S'; /* This should be the same condition as in assemble_variable, but we don't have access to dont_output_data here. So, instead, we rely on the fact that error_mark_node initializers always end up in bss for C++ and never end up in bss for C. */ if (DECL_INITIAL (decl) == 0 || (!strcmp (lang_hooks.name, "GNU C++") && DECL_INITIAL (decl) == error_mark_node)) current_sym_code = N_LCSYM; else if (DECL_IN_TEXT_SECTION (decl)) /* This is not quite right, but it's the closest of all the codes that Unix defines. */ current_sym_code = DBX_STATIC_CONST_VAR_CODE; else { /* Some ports can transform a symbol ref into a label ref, because the symbol ref is too far away and has to be dumped into a constant pool. Alternatively, the symbol in the constant pool might be referenced by a different symbol. */ if (GET_CODE (current_sym_addr) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (current_sym_addr)) { bool marked; rtx tmp = get_pool_constant_mark (current_sym_addr, &marked); if (GET_CODE (tmp) == SYMBOL_REF) { current_sym_addr = tmp; if (CONSTANT_POOL_ADDRESS_P (current_sym_addr)) get_pool_constant_mark (current_sym_addr, &marked); else marked = true; } else if (GET_CODE (tmp) == LABEL_REF) { current_sym_addr = tmp; marked = true; } /* If all references to the constant pool were optimized out, we just ignore the symbol. */ if (!marked) return 0; } /* Ultrix `as' seems to need this. */ #ifdef DBX_STATIC_STAB_DATA_SECTION data_section (); #endif current_sym_code = N_STSYM; } } } else if (regno >= 0) { letter = 'r'; current_sym_code = N_RSYM; current_sym_value = DBX_REGISTER_NUMBER (regno); } else if (MEM_P (home) && (MEM_P (XEXP (home, 0)) || (REG_P (XEXP (home, 0)) && REGNO (XEXP (home, 0)) != HARD_FRAME_POINTER_REGNUM && REGNO (XEXP (home, 0)) != STACK_POINTER_REGNUM #if ARG_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM && REGNO (XEXP (home, 0)) != ARG_POINTER_REGNUM #endif ))) /* If the value is indirect by memory or by a register that isn't the frame pointer then it means the object is variable-sized and address through that register or stack slot. DBX has no way to represent this so all we can do is output the variable as a pointer. If it's not a parameter, ignore it. */ { if (REG_P (XEXP (home, 0))) { letter = 'r'; current_sym_code = N_RSYM; if (REGNO (XEXP (home, 0)) >= FIRST_PSEUDO_REGISTER) return 0; current_sym_value = DBX_REGISTER_NUMBER (REGNO (XEXP (home, 0))); } else { current_sym_code = N_LSYM; /* RTL looks like (MEM (MEM (PLUS (REG...) (CONST_INT...)))). We want the value of that CONST_INT. */ current_sym_value = DEBUGGER_AUTO_OFFSET (XEXP (XEXP (home, 0), 0)); } /* Effectively do build_pointer_type, but don't cache this type, since it might be temporary whereas the type it points to might have been saved for inlining. */ /* Don't use REFERENCE_TYPE because dbx can't handle that. */ type = make_node (POINTER_TYPE); TREE_TYPE (type) = TREE_TYPE (decl); } else if (MEM_P (home) && REG_P (XEXP (home, 0))) { current_sym_code = N_LSYM; current_sym_value = DEBUGGER_AUTO_OFFSET (XEXP (home, 0)); } else if (MEM_P (home) && GET_CODE (XEXP (home, 0)) == PLUS && GET_CODE (XEXP (XEXP (home, 0), 1)) == CONST_INT) { current_sym_code = N_LSYM; /* RTL looks like (MEM (PLUS (REG...) (CONST_INT...))) We want the value of that CONST_INT. */ current_sym_value = DEBUGGER_AUTO_OFFSET (XEXP (home, 0)); } else if (MEM_P (home) && GET_CODE (XEXP (home, 0)) == CONST) { /* Handle an obscure case which can arise when optimizing and when there are few available registers. (This is *always* the case for i386/i486 targets). The RTL looks like (MEM (CONST ...)) even though this variable is a local `auto' or a local `register' variable. In effect, what has happened is that the reload pass has seen that all assignments and references for one such a local variable can be replaced by equivalent assignments and references to some static storage variable, thereby avoiding the need for a register. In such cases we're forced to lie to debuggers and tell them that this variable was itself `static'. */ current_sym_code = N_LCSYM; letter = 'V'; current_sym_addr = XEXP (XEXP (home, 0), 0); } else if (GET_CODE (home) == CONCAT) { tree subtype; /* If TYPE is not a COMPLEX_TYPE (it might be a RECORD_TYPE, for example), then there is no easy way to figure out what SUBTYPE should be. So, we give up. */ if (TREE_CODE (type) != COMPLEX_TYPE) return 0; subtype = TREE_TYPE (type); /* If the variable's storage is in two parts, output each as a separate stab with a modified name. */ if (WORDS_BIG_ENDIAN) dbxout_symbol_location (decl, subtype, "$imag", XEXP (home, 0)); else dbxout_symbol_location (decl, subtype, "$real", XEXP (home, 0)); dbxout_prepare_symbol (decl); if (WORDS_BIG_ENDIAN) dbxout_symbol_location (decl, subtype, "$real", XEXP (home, 1)); else dbxout_symbol_location (decl, subtype, "$imag", XEXP (home, 1)); return 1; } else /* Address might be a MEM, when DECL is a variable-sized object. Or it might be const0_rtx, meaning previous passes want us to ignore this variable. */ return 0; /* Ok, start a symtab entry and output the variable name. */ FORCE_TEXT; #ifdef DBX_STATIC_BLOCK_START DBX_STATIC_BLOCK_START (asmfile, current_sym_code); #endif dbxout_symbol_name (decl, suffix, letter); dbxout_type (type, 0); dbxout_finish_symbol (decl); #ifdef DBX_STATIC_BLOCK_END DBX_STATIC_BLOCK_END (asmfile, current_sym_code); #endif return 1; } /* Output the symbol name of DECL for a stabs, with suffix SUFFIX. Then output LETTER to indicate the kind of location the symbol has. */ static void dbxout_symbol_name (tree decl, const char *suffix, int letter) { const char *name; if (DECL_CONTEXT (decl) && (TYPE_P (DECL_CONTEXT (decl)) || TREE_CODE (DECL_CONTEXT (decl)) == NAMESPACE_DECL)) /* One slight hitch: if this is a VAR_DECL which is a class member or a namespace member, we must put out the mangled name instead of the DECL_NAME. Note also that static member (variable) names DO NOT begin with underscores in .stabs directives. */ name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)); else /* ...but if we're function-local, we don't want to include the junk added by ASM_FORMAT_PRIVATE_NAME. */ name = IDENTIFIER_POINTER (DECL_NAME (decl)); if (name == 0) name = "(anon)"; fprintf (asmfile, "%s\"%s%s:", ASM_STABS_OP, name, (suffix ? suffix : "")); if (letter) putc (letter, asmfile); } static void dbxout_prepare_symbol (tree decl ATTRIBUTE_UNUSED) { #ifdef WINNING_GDB const char *filename = DECL_SOURCE_FILE (decl); dbxout_source_file (asmfile, filename); #endif /* Initialize variables used to communicate each symbol's debug information to dbxout_finish_symbol with zeroes. */ /* Cast avoids warning in old compilers. */ current_sym_code = (STAB_CODE_TYPE) 0; current_sym_value = 0; current_sym_addr = 0; } static void dbxout_finish_symbol (tree sym) { #ifdef DBX_FINISH_SYMBOL DBX_FINISH_SYMBOL (sym); #else int line = 0; if (use_gnu_debug_info_extensions && sym != 0) line = DECL_SOURCE_LINE (sym); fprintf (asmfile, "\",%d,0,%d,", current_sym_code, line); if (current_sym_addr) output_addr_const (asmfile, current_sym_addr); else fprintf (asmfile, "%d", current_sym_value); putc ('\n', asmfile); #endif } /* Output definitions of all the decls in a chain. Return nonzero if anything was output */ int dbxout_syms (tree syms) { int result = 0; while (syms) { result += dbxout_symbol (syms, 1); syms = TREE_CHAIN (syms); } return result; } /* The following two functions output definitions of function parameters. Each parameter gets a definition locating it in the parameter list. Each parameter that is a register variable gets a second definition locating it in the register. Printing or argument lists in gdb uses the definitions that locate in the parameter list. But reference to the variable in expressions uses preferentially the definition as a register. */ /* Output definitions, referring to storage in the parmlist, of all the parms in PARMS, which is a chain of PARM_DECL nodes. */ void dbxout_parms (tree parms) { ++debug_nesting; emit_pending_bincls_if_required (); for (; parms; parms = TREE_CHAIN (parms)) if (DECL_NAME (parms) && TREE_TYPE (parms) != error_mark_node && DECL_RTL_SET_P (parms) && DECL_INCOMING_RTL (parms)) { dbxout_prepare_symbol (parms); /* Perform any necessary register eliminations on the parameter's rtl, so that the debugging output will be accurate. */ DECL_INCOMING_RTL (parms) = eliminate_regs (DECL_INCOMING_RTL (parms), 0, NULL_RTX); SET_DECL_RTL (parms, eliminate_regs (DECL_RTL (parms), 0, NULL_RTX)); #ifdef LEAF_REG_REMAP if (current_function_uses_only_leaf_regs) { leaf_renumber_regs_insn (DECL_INCOMING_RTL (parms)); leaf_renumber_regs_insn (DECL_RTL (parms)); } #endif if (PARM_PASSED_IN_MEMORY (parms)) { rtx addr = XEXP (DECL_INCOMING_RTL (parms), 0); /* ??? Here we assume that the parm address is indexed off the frame pointer or arg pointer. If that is not true, we produce meaningless results, but do not crash. */ if (GET_CODE (addr) == PLUS && GET_CODE (XEXP (addr, 1)) == CONST_INT) current_sym_value = INTVAL (XEXP (addr, 1)); else current_sym_value = 0; current_sym_code = N_PSYM; current_sym_addr = 0; FORCE_TEXT; if (DECL_NAME (parms)) { current_sym_nchars = 2 + IDENTIFIER_LENGTH (DECL_NAME (parms)); fprintf (asmfile, "%s\"%s:%c", ASM_STABS_OP, IDENTIFIER_POINTER (DECL_NAME (parms)), DBX_MEMPARM_STABS_LETTER); } else { current_sym_nchars = 8; fprintf (asmfile, "%s\"(anon):%c", ASM_STABS_OP, DBX_MEMPARM_STABS_LETTER); } /* It is quite tempting to use: dbxout_type (TREE_TYPE (parms), 0); as the next statement, rather than using DECL_ARG_TYPE(), so that gcc reports the actual type of the parameter, rather than the promoted type. This certainly makes GDB's life easier, at least for some ports. The change is a bad idea however, since GDB expects to be able access the type without performing any conversions. So for example, if we were passing a float to an unprototyped function, gcc will store a double on the stack, but if we emit a stab saying the type is a float, then gdb will only read in a single value, and this will produce an erroneous value. */ dbxout_type (DECL_ARG_TYPE (parms), 0); current_sym_value = DEBUGGER_ARG_OFFSET (current_sym_value, addr); dbxout_finish_symbol (parms); } else if (REG_P (DECL_RTL (parms))) { rtx best_rtl; char regparm_letter; tree parm_type; /* Parm passed in registers and lives in registers or nowhere. */ current_sym_code = DBX_REGPARM_STABS_CODE; regparm_letter = DBX_REGPARM_STABS_LETTER; current_sym_addr = 0; /* If parm lives in a register, use that register; pretend the parm was passed there. It would be more consistent to describe the register where the parm was passed, but in practice that register usually holds something else. If we use DECL_RTL, then we must use the declared type of the variable, not the type that it arrived in. */ if (REGNO (DECL_RTL (parms)) < FIRST_PSEUDO_REGISTER) { best_rtl = DECL_RTL (parms); parm_type = TREE_TYPE (parms); } /* If the parm lives nowhere, use the register where it was passed. It is also better to use the declared type here. */ else { best_rtl = DECL_INCOMING_RTL (parms); parm_type = TREE_TYPE (parms); } current_sym_value = DBX_REGISTER_NUMBER (REGNO (best_rtl)); FORCE_TEXT; if (DECL_NAME (parms)) { current_sym_nchars = 2 + IDENTIFIER_LENGTH (DECL_NAME (parms)); fprintf (asmfile, "%s\"%s:%c", ASM_STABS_OP, IDENTIFIER_POINTER (DECL_NAME (parms)), regparm_letter); } else { current_sym_nchars = 8; fprintf (asmfile, "%s\"(anon):%c", ASM_STABS_OP, regparm_letter); } dbxout_type (parm_type, 0); dbxout_finish_symbol (parms); } else if (MEM_P (DECL_RTL (parms)) && REG_P (XEXP (DECL_RTL (parms), 0)) && REGNO (XEXP (DECL_RTL (parms), 0)) != HARD_FRAME_POINTER_REGNUM && REGNO (XEXP (DECL_RTL (parms), 0)) != STACK_POINTER_REGNUM #if ARG_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM && REGNO (XEXP (DECL_RTL (parms), 0)) != ARG_POINTER_REGNUM #endif ) { /* Parm was passed via invisible reference. That is, its address was passed in a register. Output it as if it lived in that register. The debugger will know from the type that it was actually passed by invisible reference. */ char regparm_letter; /* Parm passed in registers and lives in registers or nowhere. */ current_sym_code = DBX_REGPARM_STABS_CODE; if (use_gnu_debug_info_extensions) regparm_letter = GDB_INV_REF_REGPARM_STABS_LETTER; else regparm_letter = DBX_REGPARM_STABS_LETTER; /* DECL_RTL looks like (MEM (REG...). Get the register number. If it is an unallocated pseudo-reg, then use the register where it was passed instead. */ if (REGNO (XEXP (DECL_RTL (parms), 0)) < FIRST_PSEUDO_REGISTER) current_sym_value = REGNO (XEXP (DECL_RTL (parms), 0)); else current_sym_value = REGNO (DECL_INCOMING_RTL (parms)); current_sym_addr = 0; FORCE_TEXT; if (DECL_NAME (parms)) { current_sym_nchars = 2 + strlen (IDENTIFIER_POINTER (DECL_NAME (parms))); fprintf (asmfile, "%s\"%s:%c", ASM_STABS_OP, IDENTIFIER_POINTER (DECL_NAME (parms)), regparm_letter); } else { current_sym_nchars = 8; fprintf (asmfile, "%s\"(anon):%c", ASM_STABS_OP, regparm_letter); } dbxout_type (TREE_TYPE (parms), 0); dbxout_finish_symbol (parms); } else if (MEM_P (DECL_RTL (parms)) && MEM_P (XEXP (DECL_RTL (parms), 0))) { /* Parm was passed via invisible reference, with the reference living on the stack. DECL_RTL looks like (MEM (MEM (PLUS (REG ...) (CONST_INT ...)))) or it could look like (MEM (MEM (REG))). */ const char *const decl_name = (DECL_NAME (parms) ? IDENTIFIER_POINTER (DECL_NAME (parms)) : "(anon)"); if (REG_P (XEXP (XEXP (DECL_RTL (parms), 0), 0))) current_sym_value = 0; else current_sym_value = INTVAL (XEXP (XEXP (XEXP (DECL_RTL (parms), 0), 0), 1)); current_sym_addr = 0; current_sym_code = N_PSYM; FORCE_TEXT; fprintf (asmfile, "%s\"%s:v", ASM_STABS_OP, decl_name); current_sym_value = DEBUGGER_ARG_OFFSET (current_sym_value, XEXP (XEXP (DECL_RTL (parms), 0), 0)); dbxout_type (TREE_TYPE (parms), 0); dbxout_finish_symbol (parms); } else if (MEM_P (DECL_RTL (parms)) && XEXP (DECL_RTL (parms), 0) != const0_rtx /* ??? A constant address for a parm can happen when the reg it lives in is equiv to a constant in memory. Should make this not happen, after 2.4. */ && ! CONSTANT_P (XEXP (DECL_RTL (parms), 0))) { /* Parm was passed in registers but lives on the stack. */ current_sym_code = N_PSYM; /* DECL_RTL looks like (MEM (PLUS (REG...) (CONST_INT...))), in which case we want the value of that CONST_INT, or (MEM (REG ...)), in which case we use a value of zero. */ if (REG_P (XEXP (DECL_RTL (parms), 0))) current_sym_value = 0; else current_sym_value = INTVAL (XEXP (XEXP (DECL_RTL (parms), 0), 1)); current_sym_addr = 0; /* Make a big endian correction if the mode of the type of the parameter is not the same as the mode of the rtl. */ if (BYTES_BIG_ENDIAN && TYPE_MODE (TREE_TYPE (parms)) != GET_MODE (DECL_RTL (parms)) && GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (parms))) < UNITS_PER_WORD) { current_sym_value += GET_MODE_SIZE (GET_MODE (DECL_RTL (parms))) - GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (parms))); } FORCE_TEXT; if (DECL_NAME (parms)) { current_sym_nchars = 2 + strlen (IDENTIFIER_POINTER (DECL_NAME (parms))); fprintf (asmfile, "%s\"%s:%c", ASM_STABS_OP, IDENTIFIER_POINTER (DECL_NAME (parms)), DBX_MEMPARM_STABS_LETTER); } else { current_sym_nchars = 8; fprintf (asmfile, "%s\"(anon):%c", ASM_STABS_OP, DBX_MEMPARM_STABS_LETTER); } current_sym_value = DEBUGGER_ARG_OFFSET (current_sym_value, XEXP (DECL_RTL (parms), 0)); dbxout_type (TREE_TYPE (parms), 0); dbxout_finish_symbol (parms); } } DBXOUT_DECR_NESTING; } /* Output definitions for the places where parms live during the function, when different from where they were passed, when the parms were passed in memory. It is not useful to do this for parms passed in registers that live during the function in different registers, because it is impossible to look in the passed register for the passed value, so we use the within-the-function register to begin with. PARMS is a chain of PARM_DECL nodes. */ void dbxout_reg_parms (tree parms) { ++debug_nesting; for (; parms; parms = TREE_CHAIN (parms)) if (DECL_NAME (parms) && PARM_PASSED_IN_MEMORY (parms)) { dbxout_prepare_symbol (parms); /* Report parms that live in registers during the function but were passed in memory. */ if (REG_P (DECL_RTL (parms)) && REGNO (DECL_RTL (parms)) < FIRST_PSEUDO_REGISTER) dbxout_symbol_location (parms, TREE_TYPE (parms), 0, DECL_RTL (parms)); else if (GET_CODE (DECL_RTL (parms)) == CONCAT) dbxout_symbol_location (parms, TREE_TYPE (parms), 0, DECL_RTL (parms)); /* Report parms that live in memory but not where they were passed. */ else if (MEM_P (DECL_RTL (parms)) && ! rtx_equal_p (DECL_RTL (parms), DECL_INCOMING_RTL (parms))) dbxout_symbol_location (parms, TREE_TYPE (parms), 0, DECL_RTL (parms)); } DBXOUT_DECR_NESTING; } /* Given a chain of ..._TYPE nodes (as come in a parameter list), output definitions of those names, in raw form */ static void dbxout_args (tree args) { while (args) { putc (',', asmfile); dbxout_type (TREE_VALUE (args), 0); CHARS (1); args = TREE_CHAIN (args); } } /* Output everything about a symbol block (a BLOCK node that represents a scope level), including recursive output of contained blocks. BLOCK is the BLOCK node. DEPTH is its depth within containing symbol blocks. ARGS is usually zero; but for the outermost block of the body of a function, it is a chain of PARM_DECLs for the function parameters. We output definitions of all the register parms as if they were local variables of that block. If -g1 was used, we count blocks just the same, but output nothing except for the outermost block. Actually, BLOCK may be several blocks chained together. We handle them all in sequence. */ static void dbxout_block (tree block, int depth, tree args) { int blocknum = -1; #if DBX_BLOCKS_FUNCTION_RELATIVE const char *begin_label; if (current_function_func_begin_label != NULL_TREE) begin_label = IDENTIFIER_POINTER (current_function_func_begin_label); else begin_label = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0); #endif while (block) { /* Ignore blocks never expanded or otherwise marked as real. */ if (TREE_USED (block) && TREE_ASM_WRITTEN (block)) { int did_output; /* In dbx format, the syms of a block come before the N_LBRAC. If nothing is output, we don't need the N_LBRAC, either. */ did_output = 0; if (debug_info_level != DINFO_LEVEL_TERSE || depth == 0) did_output = dbxout_syms (BLOCK_VARS (block)); if (args) dbxout_reg_parms (args); /* Now output an N_LBRAC symbol to represent the beginning of the block. Use the block's tree-walk order to generate the assembler symbols LBBn and LBEn that final will define around the code in this block. */ if (depth > 0 && did_output) { char buf[20]; blocknum = BLOCK_NUMBER (block); ASM_GENERATE_INTERNAL_LABEL (buf, "LBB", blocknum); if (BLOCK_HANDLER_BLOCK (block)) { /* A catch block. Must precede N_LBRAC. */ tree decl = BLOCK_VARS (block); while (decl) { fprintf (asmfile, "%s\"%s:C1\",%d,0,0,", ASM_STABS_OP, IDENTIFIER_POINTER (DECL_NAME (decl)), N_CATCH); assemble_name (asmfile, buf); fprintf (asmfile, "\n"); decl = TREE_CHAIN (decl); } } #ifdef DBX_OUTPUT_LBRAC DBX_OUTPUT_LBRAC (asmfile, buf); #else fprintf (asmfile, "%s%d,0,0,", ASM_STABN_OP, N_LBRAC); assemble_name (asmfile, buf); #if DBX_BLOCKS_FUNCTION_RELATIVE putc ('-', asmfile); assemble_name (asmfile, begin_label); #endif fprintf (asmfile, "\n"); #endif } /* Output the subblocks. */ dbxout_block (BLOCK_SUBBLOCKS (block), depth + 1, NULL_TREE); /* Refer to the marker for the end of the block. */ if (depth > 0 && did_output) { char buf[20]; ASM_GENERATE_INTERNAL_LABEL (buf, "LBE", blocknum); #ifdef DBX_OUTPUT_RBRAC DBX_OUTPUT_RBRAC (asmfile, buf); #else fprintf (asmfile, "%s%d,0,0,", ASM_STABN_OP, N_RBRAC); assemble_name (asmfile, buf); #if DBX_BLOCKS_FUNCTION_RELATIVE putc ('-', asmfile); assemble_name (asmfile, begin_label); #endif fprintf (asmfile, "\n"); #endif } } block = BLOCK_CHAIN (block); } } /* Output the information about a function and its arguments and result. Usually this follows the function's code, but on some systems, it comes before. */ #if defined (DBX_DEBUGGING_INFO) static void dbxout_begin_function (tree decl) { int saved_tree_used1 = TREE_USED (decl); TREE_USED (decl) = 1; if (DECL_NAME (DECL_RESULT (decl)) != 0) { int saved_tree_used2 = TREE_USED (DECL_RESULT (decl)); TREE_USED (DECL_RESULT (decl)) = 1; dbxout_symbol (decl, 0); TREE_USED (DECL_RESULT (decl)) = saved_tree_used2; } else dbxout_symbol (decl, 0); TREE_USED (decl) = saved_tree_used1; dbxout_parms (DECL_ARGUMENTS (decl)); if (DECL_NAME (DECL_RESULT (decl)) != 0) dbxout_symbol (DECL_RESULT (decl), 1); } #endif /* DBX_DEBUGGING_INFO */ #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */ /* Type information for dbxout.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ void gt_ggc_mx_typeinfo (void *x_p) { struct typeinfo * const x = (struct typeinfo *)x_p; if (ggc_test_and_set_mark (x)) { } } void gt_pch_nx_typeinfo (void *x_p) { struct typeinfo * const x = (struct typeinfo *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_8typeinfo)) { } } void gt_pch_p_8typeinfo (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct typeinfo * const x ATTRIBUTE_UNUSED = (struct typeinfo *)x_p; } /* GC roots. */ static void gt_ggc_ma_typevec (void *); static void gt_ggc_ma_typevec (void *x_p ATTRIBUTE_UNUSED) { if (typevec != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(typevec_len); i0++) { } ggc_mark (typevec); } } static void gt_pch_pa_typevec (void *, void *, gt_pointer_operator, void *); static void gt_pch_pa_typevec (void *this_obj ATTRIBUTE_UNUSED, void *x_p ATTRIBUTE_UNUSED, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { if (typevec != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(typevec_len); i0++) { } if ((void *)(&typevec) == this_obj) op (&(typevec), cookie); } } static void gt_pch_na_typevec (void *); static void gt_pch_na_typevec (void *x_p ATTRIBUTE_UNUSED) { if (typevec != NULL) { size_t i1; for (i1 = 0; i1 < (size_t)(typevec_len); i1++) { } gt_pch_note_object (typevec, &typevec, gt_pch_pa_typevec); } } const struct ggc_root_tab gt_ggc_r_gt_dbxout_h[] = { { &lastfile, 1, sizeof (lastfile), >_ggc_m_S, (gt_pointer_walker) >_pch_n_S }, { &preinit_symbols, 1, sizeof (preinit_symbols), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &typevec, 1, sizeof (typevec), >_ggc_ma_typevec, >_pch_na_typevec }, LAST_GGC_ROOT_TAB }; const struct ggc_root_tab gt_pch_rs_gt_dbxout_h[] = { { &lastfile_is_base, 1, sizeof (lastfile_is_base), NULL, NULL }, { &source_label_number, 1, sizeof (source_label_number), NULL, NULL }, { &have_used_extensions, 1, sizeof (have_used_extensions), NULL, NULL }, { &dbxout_source_line_counter, 1, sizeof (dbxout_source_line_counter), NULL, NULL }, { &scope_labelno, 1, sizeof (scope_labelno), NULL, NULL }, { &next_file_number, 1, sizeof (next_file_number), NULL, NULL }, { &next_type_number, 1, sizeof (next_type_number), NULL, NULL }, { &typevec_len, 1, sizeof (typevec_len), NULL, NULL }, LAST_GGC_ROOT_TAB }; /* DDG - Data Dependence Graph implementation. Copyright (C) 2004 Free Software Foundation, Inc. Contributed by Ayal Zaks and Mustafa Hagog This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Instruction scheduling pass. This file contains definitions used internally in the scheduler. Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_SCHED_INT_H #define GCC_SCHED_INT_H /* For state_t. */ /* For regset_head. */ /* For reg_note. */ /* Pointer to data describing the current DFA state. */ extern state_t curr_state; /* Forward declaration. */ struct ready_list; /* Describe state of dependencies used during sched_analyze phase. */ struct deps { /* The *_insns and *_mems are paired lists. Each pending memory operation will have a pointer to the MEM rtx on one list and a pointer to the containing insn on the other list in the same place in the list. */ /* We can't use add_dependence like the old code did, because a single insn may have multiple memory accesses, and hence needs to be on the list once for each memory access. Add_dependence won't let you add an insn to a list more than once. */ /* An INSN_LIST containing all insns with pending read operations. */ rtx pending_read_insns; /* An EXPR_LIST containing all MEM rtx's which are pending reads. */ rtx pending_read_mems; /* An INSN_LIST containing all insns with pending write operations. */ rtx pending_write_insns; /* An EXPR_LIST containing all MEM rtx's which are pending writes. */ rtx pending_write_mems; /* Indicates the combined length of the two pending lists. We must prevent these lists from ever growing too large since the number of dependencies produced is at least O(N*N), and execution time is at least O(4*N*N), as a function of the length of these pending lists. */ int pending_lists_length; /* Length of the pending memory flush list. Large functions with no calls may build up extremely large lists. */ int pending_flush_length; /* The last insn upon which all memory references must depend. This is an insn which flushed the pending lists, creating a dependency between it and all previously pending memory references. This creates a barrier (or a checkpoint) which no memory reference is allowed to cross. This includes all non constant CALL_INSNs. When we do interprocedural alias analysis, this restriction can be relaxed. This may also be an INSN that writes memory if the pending lists grow too large. */ rtx last_pending_memory_flush; /* A list of the last function calls we have seen. We use a list to represent last function calls from multiple predecessor blocks. Used to prevent register lifetimes from expanding unnecessarily. */ rtx last_function_call; /* A list of insns which use a pseudo register that does not already cross a call. We create dependencies between each of those insn and the next call insn, to ensure that they won't cross a call after scheduling is done. */ rtx sched_before_next_call; /* Used to keep post-call pseudo/hard reg movements together with the call. */ enum { not_post_call, post_call, post_call_initial } in_post_call_group_p; /* Set to the tail insn of the outermost libcall block. When nonzero, we will mark each insn processed by sched_analyze_insn with SCHED_GROUP_P to ensure libcalls are scheduled as a unit. */ rtx libcall_block_tail_insn; /* The maximum register number for the following arrays. Before reload this is max_reg_num; after reload it is FIRST_PSEUDO_REGISTER. */ int max_reg; /* Element N is the next insn that sets (hard or pseudo) register N within the current basic block; or zero, if there is no such insn. Needed for new registers which may be introduced by splitting insns. */ struct deps_reg { rtx uses; rtx sets; rtx clobbers; int uses_length; int clobbers_length; } *reg_last; /* Element N is set for each register that has any nonzero element in reg_last[N].{uses,sets,clobbers}. */ regset_head reg_last_in_use; /* Element N is set for each register that is conditionally set. */ regset_head reg_conditional_sets; }; /* This structure holds some state of the current scheduling pass, and contains some function pointers that abstract out some of the non-generic functionality from functions such as schedule_block or schedule_insn. There is one global variable, current_sched_info, which points to the sched_info structure currently in use. */ struct sched_info { /* Add all insns that are initially ready to the ready list. Called once before scheduling a set of insns. */ void (*init_ready_list) (struct ready_list *); /* Called after taking an insn from the ready list. Returns nonzero if this insn can be scheduled, nonzero if we should silently discard it. */ int (*can_schedule_ready_p) (rtx); /* Return nonzero if there are more insns that should be scheduled. */ int (*schedule_more_p) (void); /* Called after an insn has all its dependencies resolved. Return nonzero if it should be moved to the ready list or the queue, or zero if we should silently discard it. */ int (*new_ready) (rtx); /* Compare priority of two insns. Return a positive number if the second insn is to be preferred for scheduling, and a negative one if the first is to be preferred. Zero if they are equally good. */ int (*rank) (rtx, rtx); /* Return a string that contains the insn uid and optionally anything else necessary to identify this insn in an output. It's valid to use a static buffer for this. The ALIGNED parameter should cause the string to be formatted so that multiple output lines will line up nicely. */ const char *(*print_insn) (rtx, int); /* Return nonzero if an insn should be included in priority calculations. */ int (*contributes_to_priority) (rtx, rtx); /* Called when computing dependencies for a JUMP_INSN. This function should store the set of registers that must be considered as set by the jump in the regset. */ void (*compute_jump_reg_dependencies) (rtx, regset, regset, regset); /* The boundaries of the set of insns to be scheduled. */ rtx prev_head, next_tail; /* Filled in after the schedule is finished; the first and last scheduled insns. */ rtx head, tail; /* If nonzero, enables an additional sanity check in schedule_block. */ unsigned int queue_must_finish_empty:1; /* Nonzero if we should use cselib for better alias analysis. This must be 0 if the dependency information is used after sched_analyze has completed, e.g. if we're using it to initialize state for successor blocks in region scheduling. */ unsigned int use_cselib:1; /* Maximum priority that has been assigned to an insn. */ int sched_max_insns_priority; }; extern struct sched_info *current_sched_info; /* Indexed by INSN_UID, the collection of all data associated with a single instruction. */ struct haifa_insn_data { /* A list of insns which depend on the instruction. Unlike LOG_LINKS, it represents forward dependencies. */ rtx depend; /* The line number note in effect for each insn. For line number notes, this indicates whether the note may be reused. */ rtx line_note; /* Logical uid gives the original ordering of the insns. */ int luid; /* A priority for each insn. */ int priority; /* The number of incoming edges in the forward dependency graph. As scheduling proceeds, counts are decreased. An insn moves to the ready queue when its counter reaches zero. */ int dep_count; /* An encoding of the blockage range function. Both unit and range are coded. This member is used only for old pipeline interface. */ unsigned int blockage; /* Number of instructions referring to this insn. */ int ref_count; /* The minimum clock tick at which the insn becomes ready. This is used to note timing constraints for the insns in the pending list. */ int tick; short cost; /* An encoding of the function units used. This member is used only for old pipeline interface. */ short units; /* This weight is an estimation of the insn's contribution to register pressure. */ short reg_weight; /* Some insns (e.g. call) are not allowed to move across blocks. */ unsigned int cant_move : 1; /* Set if there's DEF-USE dependence between some speculatively moved load insn and this one. */ unsigned int fed_by_spec_load : 1; unsigned int is_load_insn : 1; /* Nonzero if priority has been computed already. */ unsigned int priority_known : 1; }; extern struct haifa_insn_data *h_i_d; /* Accessor macros for h_i_d. There are more in haifa-sched.c and sched-rgn.c. */ #define INSN_DEPEND(INSN) (h_i_d[INSN_UID (INSN)].depend) #define SCHED_INSN_LUID(INSN) (h_i_d[INSN_UID (INSN)].luid) #define CANT_MOVE(insn) (h_i_d[INSN_UID (insn)].cant_move) #define INSN_DEP_COUNT(INSN) (h_i_d[INSN_UID (INSN)].dep_count) #define INSN_PRIORITY(INSN) (h_i_d[INSN_UID (INSN)].priority) #define INSN_PRIORITY_KNOWN(INSN) (h_i_d[INSN_UID (INSN)].priority_known) #define INSN_COST(INSN) (h_i_d[INSN_UID (INSN)].cost) #define INSN_UNIT(INSN) (h_i_d[INSN_UID (INSN)].units) #define INSN_REG_WEIGHT(INSN) (h_i_d[INSN_UID (INSN)].reg_weight) #define INSN_BLOCKAGE(INSN) (h_i_d[INSN_UID (INSN)].blockage) #define UNIT_BITS 5 #define BLOCKAGE_MASK ((1 << BLOCKAGE_BITS) - 1) #define ENCODE_BLOCKAGE(U, R) \ (((U) << BLOCKAGE_BITS \ | MIN_BLOCKAGE_COST (R)) << BLOCKAGE_BITS \ | MAX_BLOCKAGE_COST (R)) #define UNIT_BLOCKED(B) ((B) >> (2 * BLOCKAGE_BITS)) #define BLOCKAGE_RANGE(B) \ (((((B) >> BLOCKAGE_BITS) & BLOCKAGE_MASK) << (HOST_BITS_PER_INT / 2)) \ | ((B) & BLOCKAGE_MASK)) /* Encodings of the `_unit_blockage_range' function. */ #define MIN_BLOCKAGE_COST(R) ((R) >> (HOST_BITS_PER_INT / 2)) #define MAX_BLOCKAGE_COST(R) ((R) & ((1 << (HOST_BITS_PER_INT / 2)) - 1)) extern FILE *sched_dump; extern int sched_verbose; /* Exception Free Loads: We define five classes of speculative loads: IFREE, IRISKY, PFREE, PRISKY, and MFREE. IFREE loads are loads that are proved to be exception-free, just by examining the load insn. Examples for such loads are loads from TOC and loads of global data. IRISKY loads are loads that are proved to be exception-risky, just by examining the load insn. Examples for such loads are volatile loads and loads from shared memory. PFREE loads are loads for which we can prove, by examining other insns, that they are exception-free. Currently, this class consists of loads for which we are able to find a "similar load", either in the target block, or, if only one split-block exists, in that split block. Load2 is similar to load1 if both have same single base register. We identify only part of the similar loads, by finding an insn upon which both load1 and load2 have a DEF-USE dependence. PRISKY loads are loads for which we can prove, by examining other insns, that they are exception-risky. Currently we have two proofs for such loads. The first proof detects loads that are probably guarded by a test on the memory address. This proof is based on the backward and forward data dependence information for the region. Let load-insn be the examined load. Load-insn is PRISKY iff ALL the following hold: - insn1 is not in the same block as load-insn - there is a DEF-USE dependence chain (insn1, ..., load-insn) - test-insn is either a compare or a branch, not in the same block as load-insn - load-insn is reachable from test-insn - there is a DEF-USE dependence chain (insn1, ..., test-insn) This proof might fail when the compare and the load are fed by an insn not in the region. To solve this, we will add to this group all loads that have no input DEF-USE dependence. The second proof detects loads that are directly or indirectly fed by a speculative load. This proof is affected by the scheduling process. We will use the flag fed_by_spec_load. Initially, all insns have this flag reset. After a speculative motion of an insn, if insn is either a load, or marked as fed_by_spec_load, we will also mark as fed_by_spec_load every insn1 for which a DEF-USE dependence (insn, insn1) exists. A load which is fed_by_spec_load is also PRISKY. MFREE (maybe-free) loads are all the remaining loads. They may be exception-free, but we cannot prove it. Now, all loads in IFREE and PFREE classes are considered exception-free, while all loads in IRISKY and PRISKY classes are considered exception-risky. As for loads in the MFREE class, these are considered either exception-free or exception-risky, depending on whether we are pessimistic or optimistic. We have to take the pessimistic approach to assure the safety of speculative scheduling, but we can take the optimistic approach by invoking the -fsched_spec_load_dangerous option. */ enum INSN_TRAP_CLASS { TRAP_FREE = 0, IFREE = 1, PFREE_CANDIDATE = 2, PRISKY_CANDIDATE = 3, IRISKY = 4, TRAP_RISKY = 5 }; #define WORST_CLASS(class1, class2) \ ((class1 > class2) ? class1 : class2) #ifndef __GNUC__ #define __inline #endif #ifndef HAIFA_INLINE #define HAIFA_INLINE __inline #endif /* Functions in sched-vis.c. */ extern void init_target_units (void); extern void insn_print_units (rtx); extern void init_block_visualization (void); extern void print_block_visualization (const char *); extern void visualize_scheduled_insns (int); extern void visualize_no_unit (rtx); extern void visualize_stall_cycles (int); extern void visualize_alloc (void); extern void visualize_free (void); /* Functions in sched-deps.c. */ extern int add_dependence (rtx, rtx, enum reg_note); extern void add_insn_mem_dependence (struct deps *, rtx *, rtx *, rtx, rtx); extern void sched_analyze (struct deps *, rtx, rtx); extern void init_deps (struct deps *); extern void free_deps (struct deps *); extern void init_deps_global (void); extern void finish_deps_global (void); extern void add_forward_dependence (rtx, rtx, enum reg_note); extern void compute_forward_dependences (rtx, rtx); extern rtx find_insn_list (rtx, rtx); extern void init_dependency_caches (int); extern void free_dependency_caches (void); /* Functions in haifa-sched.c. */ extern int haifa_classify_insn (rtx); extern void get_block_head_tail (int, rtx *, rtx *); extern int no_real_insns_p (rtx, rtx); extern void rm_line_notes (rtx, rtx); extern void save_line_notes (int, rtx, rtx); extern void restore_line_notes (rtx, rtx); extern void rm_redundant_line_notes (void); extern void rm_other_notes (rtx, rtx); extern int insn_issue_delay (rtx); extern int set_priorities (rtx, rtx); extern void schedule_block (int, int); extern void sched_init (FILE *); extern void sched_finish (void); extern void ready_add (struct ready_list *, rtx); /* The following are exported for the benefit of debugging functions. It would be nicer to keep them private to haifa-sched.c. */ extern int insn_unit (rtx); extern int insn_cost (rtx, rtx, rtx); extern rtx get_unit_last_insn (int); extern int actual_hazard_this_instance (int, int, rtx, int, int); extern void print_insn (char *, rtx, int); #endif /* GCC_SCHED_INT_H */ /* Form lists of pseudo register references for autoinc optimization for GNU compiler. This is part of flow optimization. Copyright (C) 1999, 2000, 2001, 2003, 2004 Free Software Foundation, Inc. Contributed by Michael P. Hayes (m.hayes@elec.canterbury.ac.nz) This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_DF_H #define GCC_DF_H #define DF_RD 1 /* Reaching definitions. */ #define DF_RU 2 /* Reaching uses. */ #define DF_LR 4 /* Live registers. */ #define DF_DU_CHAIN 8 /* Def-use chain. */ #define DF_UD_CHAIN 16 /* Use-def chain. */ #define DF_REG_INFO 32 /* Register info. */ #define DF_RD_CHAIN 64 /* Reg-def chain. */ #define DF_RU_CHAIN 128 /* Reg-use chain. */ #define DF_ALL 255 #define DF_HARD_REGS 1024 /* Mark hard registers. */ #define DF_EQUIV_NOTES 2048 /* Mark uses present in EQUIV/EQUAL notes. */ #define DF_FOR_REGALLOC 4096 /* If called for the register allocator. */ enum df_ref_type {DF_REF_REG_DEF, DF_REF_REG_USE, DF_REF_REG_MEM_LOAD, DF_REF_REG_MEM_STORE}; #define DF_REF_TYPE_NAMES {"def", "use", "mem load", "mem store"} /* Link on a def-use or use-def chain. */ struct df_link { struct df_link *next; struct ref *ref; }; enum df_ref_flags { /* Read-modify-write refs generate both a use and a def and these are marked with this flag to show that they are not independent. */ DF_REF_READ_WRITE = 1, /* This flag is set on register references inside a subreg on machines which have CANNOT_CHANGE_MODE_CLASS. Note, that this flag can also be set on df_refs representing the REG itself (i.e., one might not see the subreg anymore). Also note, that this flag is set also for hardreg refs, i.e., you must check yourself if it's a pseudo. */ DF_REF_MODE_CHANGE = 2, /* This flag is set, if we stripped the subreg from the reference. In this case we must make conservative guesses, at what the outer mode was. */ DF_REF_STRIPPED = 4, /* This flag is set during register allocation if it's okay for the reference's INSN to have one of its operands replaced with a memory reference. */ DF_REF_MEM_OK = 8 }; /* Define a register reference structure. One of these is allocated for every register reference (use or def). Note some register references (e.g., post_inc, subreg) generate both a def and a use. */ struct ref { rtx reg; /* The register referenced. */ rtx insn; /* Insn containing ref. */ rtx *loc; /* The location of the reg. */ struct df_link *chain; /* Head of def-use or use-def chain. */ unsigned int id; /* Ref index. */ enum df_ref_type type; /* Type of ref. */ enum df_ref_flags flags; /* Various flags. */ void *data; /* The data assigned to it by user. */ }; /* One of these structures is allocated for every insn. */ struct insn_df_info { struct df_link *defs; /* Head of insn-def chain. */ struct df_link *uses; /* Head of insn-use chain. */ /* ???? The following luid field should be considered private so that we can change it on the fly to accommodate new insns? */ int luid; /* Logical UID. */ }; /* One of these structures is allocated for every reg. */ struct reg_info { struct df_link *defs; /* Head of reg-def chain. */ struct df_link *uses; /* Head of reg-use chain. */ int lifetime; int n_defs; int n_uses; }; /* One of these structures is allocated for every basic block. */ struct bb_df_info { /* Reaching def bitmaps have def_id elements. */ bitmap rd_kill; bitmap rd_gen; bitmap rd_in; bitmap rd_out; /* Reaching use bitmaps have use_id elements. */ bitmap ru_kill; bitmap ru_gen; bitmap ru_in; bitmap ru_out; /* Live variable bitmaps have n_regs elements. */ bitmap lr_def; bitmap lr_use; bitmap lr_in; bitmap lr_out; int rd_valid; int ru_valid; int lr_valid; }; struct df { int flags; /* Indicates what's recorded. */ struct bb_df_info *bbs; /* Basic block table. */ struct ref **defs; /* Def table, indexed by def_id. */ struct ref **uses; /* Use table, indexed by use_id. */ struct ref **reg_def_last; /* Indexed by regno. */ struct reg_info *regs; /* Regs table, index by regno. */ unsigned int reg_size; /* Size of regs table. */ struct insn_df_info *insns; /* Insn table, indexed by insn UID. */ unsigned int insn_size; /* Size of insn table. */ unsigned int def_id; /* Next def ID. */ unsigned int def_size; /* Size of def table. */ unsigned int n_defs; /* Size of def bitmaps. */ unsigned int use_id; /* Next use ID. */ unsigned int use_size; /* Size of use table. */ unsigned int n_uses; /* Size of use bitmaps. */ unsigned int n_bbs; /* Number of basic blocks. */ unsigned int n_regs; /* Number of regs. */ unsigned int def_id_save; /* Saved next def ID. */ unsigned int use_id_save; /* Saved next use ID. */ bitmap insns_modified; /* Insns that (may) have changed. */ bitmap bbs_modified; /* Blocks that (may) have changed. */ bitmap all_blocks; /* All blocks in CFG. */ int *dfs_order; /* DFS order -> block number. */ int *rc_order; /* Reverse completion order -> block number. */ int *rts_order; /* Reverse top sort order -> block number. */ int *inverse_rc_map; /* Block number -> reverse completion order. */ int *inverse_dfs_map; /* Block number -> DFS order. */ int *inverse_rts_map; /* Block number -> reverse top-sort order. */ }; struct df_map { rtx old; rtx new; }; #define DF_BB_INFO(REFS, BB) (&REFS->bbs[(BB)->index]) /* Macros to access the elements within the ref structure. */ #define DF_REF_REAL_REG(REF) (GET_CODE ((REF)->reg) == SUBREG \ ? SUBREG_REG ((REF)->reg) : ((REF)->reg)) #define DF_REF_REGNO(REF) REGNO (DF_REF_REAL_REG (REF)) #define DF_REF_REAL_LOC(REF) (GET_CODE ((REF)->reg) == SUBREG \ ? &SUBREG_REG ((REF)->reg) : ((REF)->loc)) #define DF_REF_REG(REF) ((REF)->reg) #define DF_REF_LOC(REF) ((REF)->loc) #define DF_REF_BB(REF) (BLOCK_FOR_INSN ((REF)->insn)) #define DF_REF_BBNO(REF) (BLOCK_FOR_INSN ((REF)->insn)->index) #define DF_REF_INSN(REF) ((REF)->insn) #define DF_REF_INSN_UID(REF) (INSN_UID ((REF)->insn)) #define DF_REF_TYPE(REF) ((REF)->type) #define DF_REF_CHAIN(REF) ((REF)->chain) #define DF_REF_ID(REF) ((REF)->id) #define DF_REF_FLAGS(REF) ((REF)->flags) #define DF_REF_DATA(REF) ((REF)->data) /* Macros to determine the reference type. */ #define DF_REF_REG_DEF_P(REF) (DF_REF_TYPE (REF) == DF_REF_REG_DEF) #define DF_REF_REG_USE_P(REF) ((REF) && ! DF_REF_REG_DEF_P (REF)) #define DF_REF_REG_MEM_STORE_P(REF) (DF_REF_TYPE (REF) == DF_REF_REG_MEM_STORE) #define DF_REF_REG_MEM_LOAD_P(REF) (DF_REF_TYPE (REF) == DF_REF_REG_MEM_LOAD) #define DF_REF_REG_MEM_P(REF) (DF_REF_REG_MEM_STORE_P (REF) \ || DF_REF_REG_MEM_LOAD_P (REF)) /* Macros to access the elements within the reg_info structure table. */ #define DF_REGNO_FIRST_DEF(DF, REGNUM) \ ((DF)->regs[REGNUM].defs ? (DF)->regs[REGNUM].defs->ref : 0) #define DF_REGNO_LAST_USE(DF, REGNUM) \ ((DF)->regs[REGNUM].uses ? (DF)->regs[REGNUM].uses->ref : 0) #define DF_REGNO_FIRST_BB(DF, REGNUM) \ (DF_REGNO_FIRST_DEF (DF, REGNUM) \ ? DF_REF_BB (DF_REGNO_FIRST_DEF (DF, REGNUM)) : 0) #define DF_REGNO_LAST_BB(DF, REGNUM) \ (DF_REGNO_LAST_USE (DF, REGNUM) \ ? DF_REF_BB (DF_REGNO_LAST_USE (DF, REGNUM)) : 0) /* Macros to access the elements within the insn_df_info structure table. */ #define DF_INSN_LUID(DF, INSN) ((DF)->insns[INSN_UID (INSN)].luid) #define DF_INSN_DEFS(DF, INSN) ((DF)->insns[INSN_UID (INSN)].defs) #define DF_INSN_USES(DF, INSN) ((DF)->insns[INSN_UID (INSN)].uses) /* Functions to build and analyze dataflow information. */ extern struct df *df_init (void); extern int df_analyze (struct df *, bitmap, int); extern void df_analyze_subcfg (struct df *, bitmap, int); extern void df_finish (struct df *); extern void df_dump (struct df *, int, FILE *); /* Functions to modify insns. */ extern void df_insn_modify (struct df *, basic_block, rtx); extern rtx df_insn_delete (struct df *, basic_block, rtx); extern rtx df_pattern_emit_before (struct df *, rtx, basic_block, rtx); extern rtx df_jump_pattern_emit_after (struct df *, rtx, basic_block, rtx); extern rtx df_pattern_emit_after (struct df *, rtx, basic_block, rtx); extern rtx df_insn_move_before (struct df *, basic_block, rtx, basic_block, rtx); extern int df_reg_replace (struct df *, bitmap, rtx, rtx); extern int df_ref_reg_replace (struct df *, struct ref *, rtx, rtx); extern int df_ref_remove (struct df *, struct ref *); extern int df_insn_reg_replace (struct df *, basic_block, rtx, rtx, rtx); extern int df_insn_mem_replace (struct df *, basic_block, rtx, rtx, rtx); extern struct ref *df_bb_def_use_swap (struct df *, basic_block, rtx, rtx, unsigned int); /* Functions to query dataflow information. */ extern basic_block df_regno_bb (struct df *, unsigned int); extern int df_reg_lifetime (struct df *, rtx); extern int df_reg_global_p (struct df *, rtx); extern int df_insn_regno_def_p (struct df *, basic_block, rtx, unsigned int); extern int df_insn_dominates_all_uses_p (struct df *, basic_block, rtx); extern int df_insn_dominates_uses_p (struct df *, basic_block, rtx, bitmap); extern int df_bb_reg_live_start_p (struct df *, basic_block, rtx); extern int df_bb_reg_live_end_p (struct df *, basic_block, rtx); extern int df_bb_regs_lives_compare (struct df *, basic_block, rtx, rtx); extern rtx df_bb_single_def_use_insn_find (struct df *, basic_block, rtx, rtx); extern struct ref *df_bb_regno_last_use_find (struct df *, basic_block, unsigned int); extern struct ref *df_bb_regno_first_def_find (struct df *, basic_block, unsigned int); extern struct ref *df_bb_regno_last_def_find (struct df *, basic_block, unsigned int); extern struct ref *df_find_def (struct df *, rtx, rtx); extern int df_reg_used (struct df *, rtx, rtx); /* Functions for debugging from GDB. */ extern void debug_df_insn (rtx); extern void debug_df_regno (unsigned int); extern void debug_df_reg (rtx); extern void debug_df_defno (unsigned int); extern void debug_df_useno (unsigned int); extern void debug_df_ref (struct ref *); extern void debug_df_chain (struct df_link *); extern void df_insn_debug (struct df *, rtx, FILE *); extern void df_insn_debug_regno (struct df *, rtx, FILE *); /* Meet over any path (UNION) or meet over all paths (INTERSECTION). */ enum df_confluence_op { DF_UNION, DF_INTERSECTION }; /* Dataflow direction. */ enum df_flow_dir { DF_FORWARD, DF_BACKWARD }; typedef void (*transfer_function) (int, int *, void *, void *, void *, void *, void *); /* The description of a dataflow problem to solve. */ enum set_representation { SR_SBITMAP, /* Represent sets by bitmaps. */ SR_BITMAP /* Represent sets by sbitmaps. */ }; struct dataflow { enum set_representation repr; /* The way the sets are represented. */ /* The following arrays are indexed by block indices, so they must always be large enough even if we restrict ourselves just to a subset of cfg. */ void **gen, **kill; /* Gen and kill sets. */ void **in, **out; /* Results. */ enum df_flow_dir dir; /* Dataflow direction. */ enum df_confluence_op conf_op; /* Confluence operator. */ unsigned n_blocks; /* Number of basic blocks in the order. */ int *order; /* The list of basic blocks to work with, in the order they should be processed in. */ transfer_function transfun; /* The transfer function. */ void *data; /* Data used by the transfer function. */ }; extern void iterative_dataflow (struct dataflow *); extern bool read_modify_subreg_p (rtx); #endif /* GCC_DF_H */ /* DDG - Data Dependence Graph - interface. Copyright (C) 2004 Free Software Foundation, Inc. Contributed by Ayal Zaks and Mustafa Hagog This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_DDG_H #define GCC_DDG_H /* For sbitmap. */ /* For basic_block. */ /* For struct df. */ typedef struct ddg_node *ddg_node_ptr; typedef struct ddg_edge *ddg_edge_ptr; typedef struct ddg *ddg_ptr; typedef struct ddg_scc *ddg_scc_ptr; typedef struct ddg_all_sccs *ddg_all_sccs_ptr; typedef enum {TRUE_DEP, OUTPUT_DEP, ANTI_DEP} dep_type; typedef enum {REG_OR_MEM_DEP, REG_DEP, MEM_DEP, REG_AND_MEM_DEP} dep_data_type; /* The following two macros enables direct access to the successors and predecessors bitmaps held in each ddg_node. Do not make changes to these bitmaps, unless you want to change the DDG. */ #define NODE_SUCCESSORS(x) ((x)->successors) #define NODE_PREDECESSORS(x) ((x)->predecessors) /* A structure that represents a node in the DDG. */ struct ddg_node { /* Each node has a unique CUID index. These indices increase monotonically (according to the order of the corresponding INSN in the BB), starting from 0 with no gaps. */ int cuid; /* The insn represented by the node. */ rtx insn; /* A note preceding INSN (or INSN itself), such that all insns linked from FIRST_NOTE until INSN (inclusive of both) are moved together when reordering the insns. This takes care of notes that should continue to precede INSN. */ rtx first_note; /* Incoming and outgoing dependency edges. */ ddg_edge_ptr in; ddg_edge_ptr out; /* Each bit corresponds to a ddg_node according to its cuid, and is set iff the node is a successor/predecessor of "this" node. */ sbitmap successors; sbitmap predecessors; /* For general use by algorithms manipulating the ddg. */ union { int count; void *info; } aux; }; /* A structure that represents an edge in the DDG. */ struct ddg_edge { /* The source and destination nodes of the dependency edge. */ ddg_node_ptr src; ddg_node_ptr dest; /* TRUE, OUTPUT or ANTI dependency. */ dep_type type; /* REG or MEM dependency. */ dep_data_type data_type; /* Latency of the dependency. */ int latency; /* The distance: number of loop iterations the dependency crosses. */ int distance; /* The following two fields are used to form a linked list of the in/out going edges to/from each node. */ ddg_edge_ptr next_in; ddg_edge_ptr next_out; /* For general use by algorithms manipulating the ddg. */ union { int count; void *info; } aux; }; /* This structure holds the Data Dependence Graph for a basic block. */ struct ddg { /* The basic block for which this DDG is built. */ basic_block bb; /* Number of instructions in the basic block. */ int num_nodes; /* Number of load/store instructions in the BB - statistics. */ int num_loads; int num_stores; /* This array holds the nodes in the graph; it is indexed by the node cuid, which follows the order of the instructions in the BB. */ ddg_node_ptr nodes; /* The branch closing the loop. */ ddg_node_ptr closing_branch; /* Build dependence edges for closing_branch, when set. In certain cases, the closing branch can be dealt with separately from the insns of the loop, and then no such deps are needed. */ int closing_branch_deps; /* Array and number of backarcs (edges with distance > 0) in the DDG. */ ddg_edge_ptr *backarcs; int num_backarcs; }; /* Holds information on an SCC (Strongly Connected Component) of the DDG. */ struct ddg_scc { /* A bitmap that represents the nodes of the DDG that are in the SCC. */ sbitmap nodes; /* Array and number of backarcs (edges with distance > 0) in the SCC. */ ddg_edge_ptr *backarcs; int num_backarcs; /* The maximum of (total_latency/total_distance) over all cycles in SCC. */ int recurrence_length; }; /* This structure holds the SCCs of the DDG. */ struct ddg_all_sccs { /* Array that holds the SCCs in the DDG, and their number. */ ddg_scc_ptr *sccs; int num_sccs; ddg_ptr ddg; }; ddg_ptr create_ddg (basic_block, struct df *, int closing_branch_deps); void free_ddg (ddg_ptr); void print_ddg (FILE *, ddg_ptr); void vcg_print_ddg (FILE *, ddg_ptr); void print_ddg_edge (FILE *, ddg_edge_ptr); ddg_node_ptr get_node_of_insn (ddg_ptr, rtx); void find_successors (sbitmap result, ddg_ptr, sbitmap); void find_predecessors (sbitmap result, ddg_ptr, sbitmap); ddg_all_sccs_ptr create_ddg_all_sccs (ddg_ptr); void free_ddg_all_sccs (ddg_all_sccs_ptr); int find_nodes_on_paths (sbitmap result, ddg_ptr, sbitmap from, sbitmap to); int longest_simple_path (ddg_ptr, int from, int to, sbitmap via); #endif /* GCC_DDG_H */ /* A flag indicating that a ddg edge belongs to an SCC or not. */ enum edge_flag {NOT_IN_SCC = 0, IN_SCC}; /* Forward declarations. */ static void add_backarc_to_ddg (ddg_ptr, ddg_edge_ptr); static void add_backarc_to_scc (ddg_scc_ptr, ddg_edge_ptr); static void add_scc_to_ddg (ddg_all_sccs_ptr, ddg_scc_ptr); static void create_ddg_dependence (ddg_ptr, ddg_node_ptr, ddg_node_ptr, rtx); static void create_ddg_dep_no_link (ddg_ptr, ddg_node_ptr, ddg_node_ptr, dep_type, dep_data_type, int); static ddg_edge_ptr create_ddg_edge (ddg_node_ptr, ddg_node_ptr, dep_type, dep_data_type, int, int); static void add_edge_to_ddg (ddg_ptr g, ddg_edge_ptr); /* Auxiliary variable for mem_read_insn_p/mem_write_insn_p. */ static bool mem_ref_p; /* Auxiliary function for mem_read_insn_p. */ static int mark_mem_use (rtx *x, void *data ATTRIBUTE_UNUSED) { if (MEM_P (*x)) mem_ref_p = true; return 0; } /* Auxiliary function for mem_read_insn_p. */ static void mark_mem_use_1 (rtx *x, void *data) { for_each_rtx (x, mark_mem_use, data); } /* Returns nonzero if INSN reads from memory. */ static bool mem_read_insn_p (rtx insn) { mem_ref_p = false; note_uses (&PATTERN (insn), mark_mem_use_1, NULL); return mem_ref_p; } static void mark_mem_store (rtx loc, rtx setter ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED) { if (MEM_P (loc)) mem_ref_p = true; } /* Returns nonzero if INSN writes to memory. */ static bool mem_write_insn_p (rtx insn) { mem_ref_p = false; note_stores (PATTERN (insn), mark_mem_store, NULL); return mem_ref_p; } /* Returns nonzero if X has access to memory. */ static bool rtx_mem_access_p (rtx x) { int i, j; const char *fmt; enum rtx_code code; if (x == 0) return false; if (MEM_P (x)) return true; code = GET_CODE (x); fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') { if (rtx_mem_access_p (XEXP (x, i))) return true; } else if (fmt[i] == 'E') for (j = 0; j < XVECLEN (x, i); j++) { if (rtx_mem_access_p (XVECEXP (x, i, j))) return true; } } return false; } /* Returns nonzero if INSN reads to or writes from memory. */ static bool mem_access_insn_p (rtx insn) { return rtx_mem_access_p (PATTERN (insn)); } /* Computes the dependence parameters (latency, distance etc.), creates a ddg_edge and adds it to the given DDG. */ static void create_ddg_dependence (ddg_ptr g, ddg_node_ptr src_node, ddg_node_ptr dest_node, rtx link) { ddg_edge_ptr e; int latency, distance = 0; int interloop = (src_node->cuid >= dest_node->cuid); dep_type t = TRUE_DEP; dep_data_type dt = (mem_access_insn_p (src_node->insn) && mem_access_insn_p (dest_node->insn) ? MEM_DEP : REG_DEP); /* For now we don't have an exact calculation of the distance, so assume 1 conservatively. */ if (interloop) distance = 1; if (!link) abort (); /* Note: REG_DEP_ANTI applies to MEM ANTI_DEP as well!! */ if (REG_NOTE_KIND (link) == REG_DEP_ANTI) t = ANTI_DEP; else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT) t = OUTPUT_DEP; latency = insn_cost (src_node->insn, link, dest_node->insn); e = create_ddg_edge (src_node, dest_node, t, dt, latency, distance); if (interloop) { /* Some interloop dependencies are relaxed: 1. Every insn is output dependent on itself; ignore such deps. 2. Every true/flow dependence is an anti dependence in the opposite direction with distance 1; such register deps will be removed by renaming if broken --- ignore them. */ if (!(t == OUTPUT_DEP && src_node == dest_node) && !(t == ANTI_DEP && dt == REG_DEP)) add_backarc_to_ddg (g, e); else free (e); } else add_edge_to_ddg (g, e); } /* The same as the above function, but it doesn't require a link parameter. */ static void create_ddg_dep_no_link (ddg_ptr g, ddg_node_ptr from, ddg_node_ptr to, dep_type d_t, dep_data_type d_dt, int distance) { ddg_edge_ptr e; int l; rtx link = alloc_INSN_LIST (to->insn, NULL_RTX); if (d_t == ANTI_DEP) PUT_REG_NOTE_KIND (link, REG_DEP_ANTI); else if (d_t == OUTPUT_DEP) PUT_REG_NOTE_KIND (link, REG_DEP_OUTPUT); l = insn_cost (from->insn, link, to->insn); free_INSN_LIST_node (link); e = create_ddg_edge (from, to, d_t, d_dt, l, distance); if (distance > 0) add_backarc_to_ddg (g, e); else add_edge_to_ddg (g, e); } /* Given a downwards exposed register def RD, add inter-loop true dependences for all its uses in the next iteration, and an output dependence to the first def of the next iteration. */ static void add_deps_for_def (ddg_ptr g, struct df *df, struct ref *rd) { int regno = DF_REF_REGNO (rd); struct bb_df_info *bb_info = DF_BB_INFO (df, g->bb); struct df_link *r_use; int use_before_def = false; rtx def_insn = DF_REF_INSN (rd); ddg_node_ptr src_node = get_node_of_insn (g, def_insn); /* Create and inter-loop true dependence between RD and each of its uses that is upwards exposed in RD's block. */ for (r_use = DF_REF_CHAIN (rd); r_use != NULL; r_use = r_use->next) { if (bitmap_bit_p (bb_info->ru_gen, r_use->ref->id)) { rtx use_insn = DF_REF_INSN (r_use->ref); ddg_node_ptr dest_node = get_node_of_insn (g, use_insn); if (!src_node || !dest_node) abort (); /* Any such upwards exposed use appears before the rd def. */ use_before_def = true; create_ddg_dep_no_link (g, src_node, dest_node, TRUE_DEP, REG_DEP, 1); } } /* Create an inter-loop output dependence between RD (which is the last def in its block, being downwards exposed) and the first def in its block. Avoid creating a self output dependence. Avoid creating an output dependence if there is a dependence path between the two defs starting with a true dependence followed by an anti dependence (i.e. if there is a use between the two defs. */ if (! use_before_def) { struct ref *def = df_bb_regno_first_def_find (df, g->bb, regno); int i; ddg_node_ptr dest_node; if (!def || rd->id == def->id) return; /* Check if there are uses after RD. */ for (i = src_node->cuid + 1; i < g->num_nodes; i++) if (df_reg_used (df, g->nodes[i].insn, rd->reg)) return; dest_node = get_node_of_insn (g, def->insn); create_ddg_dep_no_link (g, src_node, dest_node, OUTPUT_DEP, REG_DEP, 1); } } /* Given a register USE, add an inter-loop anti dependence to the first (nearest BLOCK_BEGIN) def of the next iteration, unless USE is followed by a def in the block. */ static void add_deps_for_use (ddg_ptr g, struct df *df, struct ref *use) { int i; int regno = DF_REF_REGNO (use); struct ref *first_def = df_bb_regno_first_def_find (df, g->bb, regno); ddg_node_ptr use_node; ddg_node_ptr def_node; struct bb_df_info *bb_info; bb_info = DF_BB_INFO (df, g->bb); if (!first_def) return; use_node = get_node_of_insn (g, use->insn); def_node = get_node_of_insn (g, first_def->insn); if (!use_node || !def_node) abort (); /* Make sure there are no defs after USE. */ for (i = use_node->cuid + 1; i < g->num_nodes; i++) if (df_find_def (df, g->nodes[i].insn, use->reg)) return; /* We must not add ANTI dep when there is an intra-loop TRUE dep in the opozite direction. If the first_def reaches the USE then there is such a dep. */ if (! bitmap_bit_p (bb_info->rd_gen, first_def->id)) create_ddg_dep_no_link (g, use_node, def_node, ANTI_DEP, REG_DEP, 1); } /* Build inter-loop dependencies, by looking at DF analysis backwards. */ static void build_inter_loop_deps (ddg_ptr g, struct df *df) { int rd_num, u_num; struct bb_df_info *bb_info; bb_info = DF_BB_INFO (df, g->bb); /* Find inter-loop output and true deps by connecting downward exposed defs to the first def of the BB and to upwards exposed uses. */ EXECUTE_IF_SET_IN_BITMAP (bb_info->rd_gen, 0, rd_num, { struct ref *rd = df->defs[rd_num]; add_deps_for_def (g, df, rd); }); /* Find inter-loop anti deps. We are interested in uses of the block that appear below all defs; this implies that these uses are killed. */ EXECUTE_IF_SET_IN_BITMAP (bb_info->ru_kill, 0, u_num, { struct ref *use = df->uses[u_num]; /* We are interested in uses of this BB. */ if (BLOCK_FOR_INSN (use->insn) == g->bb) add_deps_for_use (g, df,use); }); } /* Given two nodes, analyze their RTL insns and add inter-loop mem deps to ddg G. */ static void add_inter_loop_mem_dep (ddg_ptr g, ddg_node_ptr from, ddg_node_ptr to) { if (mem_write_insn_p (from->insn)) { if (mem_read_insn_p (to->insn)) create_ddg_dep_no_link (g, from, to, TRUE_DEP, MEM_DEP, 1); else if (from->cuid != to->cuid) create_ddg_dep_no_link (g, from, to, OUTPUT_DEP, MEM_DEP, 1); } else { if (mem_read_insn_p (to->insn)) return; else if (from->cuid != to->cuid) { create_ddg_dep_no_link (g, from, to, ANTI_DEP, MEM_DEP, 1); create_ddg_dep_no_link (g, to, from, TRUE_DEP, MEM_DEP, 1); } } } /* Perform intra-block Data Dependency analysis and connect the nodes in the DDG. We assume the loop has a single basic block. */ static void build_intra_loop_deps (ddg_ptr g) { int i; /* Hold the dependency analysis state during dependency calculations. */ struct deps tmp_deps; rtx head, tail, link; /* Build the dependence information, using the sched_analyze function. */ init_deps_global (); init_deps (&tmp_deps); /* Do the intra-block data dependence analysis for the given block. */ get_block_head_tail (g->bb->index, &head, &tail); sched_analyze (&tmp_deps, head, tail); /* Build intra-loop data dependencies using the scheduler dependency analysis. */ for (i = 0; i < g->num_nodes; i++) { ddg_node_ptr dest_node = &g->nodes[i]; if (! INSN_P (dest_node->insn)) continue; for (link = LOG_LINKS (dest_node->insn); link; link = XEXP (link, 1)) { ddg_node_ptr src_node = get_node_of_insn (g, XEXP (link, 0)); if (!src_node) continue; add_forward_dependence (XEXP (link, 0), dest_node->insn, REG_NOTE_KIND (link)); create_ddg_dependence (g, src_node, dest_node, INSN_DEPEND (src_node->insn)); } /* If this insn modifies memory, add an edge to all insns that access memory. */ if (mem_access_insn_p (dest_node->insn)) { int j; for (j = 0; j <= i; j++) { ddg_node_ptr j_node = &g->nodes[j]; if (mem_access_insn_p (j_node->insn)) /* Don't bother calculating inter-loop dep if an intra-loop dep already exists. */ if (! TEST_BIT (dest_node->successors, j)) add_inter_loop_mem_dep (g, dest_node, j_node); } } } /* Free the INSN_LISTs. */ finish_deps_global (); free_deps (&tmp_deps); } /* Given a basic block, create its DDG and return a pointer to a variable of ddg type that represents it. Initialize the ddg structure fields to the appropriate values. */ ddg_ptr create_ddg (basic_block bb, struct df *df, int closing_branch_deps) { ddg_ptr g; rtx insn, first_note; int i; int num_nodes = 0; g = (ddg_ptr) xcalloc (1, sizeof (struct ddg)); g->bb = bb; g->closing_branch_deps = closing_branch_deps; /* Count the number of insns in the BB. */ for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = NEXT_INSN (insn)) { if (! INSN_P (insn) || GET_CODE (PATTERN (insn)) == USE) continue; if (mem_read_insn_p (insn)) g->num_loads++; if (mem_write_insn_p (insn)) g->num_stores++; num_nodes++; } /* There is nothing to do for this BB. */ if (num_nodes <= 1) { free (g); return NULL; } /* Allocate the nodes array, and initialize the nodes. */ g->num_nodes = num_nodes; g->nodes = (ddg_node_ptr) xcalloc (num_nodes, sizeof (struct ddg_node)); g->closing_branch = NULL; i = 0; first_note = NULL_RTX; for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = NEXT_INSN (insn)) { if (! INSN_P (insn)) { if (! first_note && GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) != NOTE_INSN_BASIC_BLOCK) first_note = insn; continue; } if (GET_CODE (insn) == JUMP_INSN) { if (g->closing_branch) abort (); /* Found two branches in DDG. */ else g->closing_branch = &g->nodes[i]; } else if (GET_CODE (PATTERN (insn)) == USE) { if (! first_note) first_note = insn; continue; } g->nodes[i].cuid = i; g->nodes[i].successors = sbitmap_alloc (num_nodes); sbitmap_zero (g->nodes[i].successors); g->nodes[i].predecessors = sbitmap_alloc (num_nodes); sbitmap_zero (g->nodes[i].predecessors); g->nodes[i].first_note = (first_note ? first_note : insn); g->nodes[i++].insn = insn; first_note = NULL_RTX; } if (!g->closing_branch) abort (); /* Found no branch in DDG. */ /* Build the data dependency graph. */ build_intra_loop_deps (g); build_inter_loop_deps (g, df); return g; } /* Free all the memory allocated for the DDG. */ void free_ddg (ddg_ptr g) { int i; if (!g) return; for (i = 0; i < g->num_nodes; i++) { ddg_edge_ptr e = g->nodes[i].out; while (e) { ddg_edge_ptr next = e->next_out; free (e); e = next; } sbitmap_free (g->nodes[i].successors); sbitmap_free (g->nodes[i].predecessors); } if (g->num_backarcs > 0) free (g->backarcs); free (g->nodes); free (g); } void print_ddg_edge (FILE *dump_file, ddg_edge_ptr e) { char dep_c; switch (e->type) { case OUTPUT_DEP : dep_c = 'O'; break; case ANTI_DEP : dep_c = 'A'; break; default: dep_c = 'T'; } fprintf (dump_file, " [%d -(%c,%d,%d)-> %d] ", INSN_UID (e->src->insn), dep_c, e->latency, e->distance, INSN_UID (e->dest->insn)); } /* Print the DDG nodes with there in/out edges to the dump file. */ void print_ddg (FILE *dump_file, ddg_ptr g) { int i; for (i = 0; i < g->num_nodes; i++) { ddg_edge_ptr e; print_rtl_single (dump_file, g->nodes[i].insn); fprintf (dump_file, "OUT ARCS: "); for (e = g->nodes[i].out; e; e = e->next_out) print_ddg_edge (dump_file, e); fprintf (dump_file, "\nIN ARCS: "); for (e = g->nodes[i].in; e; e = e->next_in) print_ddg_edge (dump_file, e); fprintf (dump_file, "\n"); } } /* Print the given DDG in VCG format. */ void vcg_print_ddg (FILE *dump_file, ddg_ptr g) { int src_cuid; fprintf (dump_file, "graph: {\n"); for (src_cuid = 0; src_cuid < g->num_nodes; src_cuid++) { ddg_edge_ptr e; int src_uid = INSN_UID (g->nodes[src_cuid].insn); fprintf (dump_file, "node: {title: \"%d_%d\" info1: \"", src_cuid, src_uid); print_rtl_single (dump_file, g->nodes[src_cuid].insn); fprintf (dump_file, "\"}\n"); for (e = g->nodes[src_cuid].out; e; e = e->next_out) { int dst_uid = INSN_UID (e->dest->insn); int dst_cuid = e->dest->cuid; /* Give the backarcs a different color. */ if (e->distance > 0) fprintf (dump_file, "backedge: {color: red "); else fprintf (dump_file, "edge: { "); fprintf (dump_file, "sourcename: \"%d_%d\" ", src_cuid, src_uid); fprintf (dump_file, "targetname: \"%d_%d\" ", dst_cuid, dst_uid); fprintf (dump_file, "label: \"%d_%d\"}\n", e->latency, e->distance); } } fprintf (dump_file, "}\n"); } /* Create an edge and initialize it with given values. */ static ddg_edge_ptr create_ddg_edge (ddg_node_ptr src, ddg_node_ptr dest, dep_type t, dep_data_type dt, int l, int d) { ddg_edge_ptr e = (ddg_edge_ptr) xmalloc (sizeof (struct ddg_edge)); e->src = src; e->dest = dest; e->type = t; e->data_type = dt; e->latency = l; e->distance = d; e->next_in = e->next_out = NULL; e->aux.info = 0; return e; } /* Add the given edge to the in/out linked lists of the DDG nodes. */ static void add_edge_to_ddg (ddg_ptr g ATTRIBUTE_UNUSED, ddg_edge_ptr e) { ddg_node_ptr src = e->src; ddg_node_ptr dest = e->dest; if (!src->successors || !dest->predecessors) abort (); /* Should have allocated the sbitmaps. */ SET_BIT (src->successors, dest->cuid); SET_BIT (dest->predecessors, src->cuid); e->next_in = dest->in; dest->in = e; e->next_out = src->out; src->out = e; } /* Algorithm for computing the recurrence_length of an scc. We assume at for now that cycles in the data dependence graph contain a single backarc. This simplifies the algorithm, and can be generalized later. */ static void set_recurrence_length (ddg_scc_ptr scc, ddg_ptr g) { int j; int result = -1; for (j = 0; j < scc->num_backarcs; j++) { ddg_edge_ptr backarc = scc->backarcs[j]; int length; int distance = backarc->distance; ddg_node_ptr src = backarc->dest; ddg_node_ptr dest = backarc->src; length = longest_simple_path (g, src->cuid, dest->cuid, scc->nodes); if (length < 0 ) { /* fprintf (stderr, "Backarc not on simple cycle in SCC.\n"); */ continue; } length += backarc->latency; result = MAX (result, (length / distance)); } scc->recurrence_length = result; } /* Create a new SCC given the set of its nodes. Compute its recurrence_length and mark edges that belong to this scc as IN_SCC. */ static ddg_scc_ptr create_scc (ddg_ptr g, sbitmap nodes) { ddg_scc_ptr scc; int u; scc = (ddg_scc_ptr) xmalloc (sizeof (struct ddg_scc)); scc->backarcs = NULL; scc->num_backarcs = 0; scc->nodes = sbitmap_alloc (g->num_nodes); sbitmap_copy (scc->nodes, nodes); /* Mark the backarcs that belong to this SCC. */ EXECUTE_IF_SET_IN_SBITMAP (nodes, 0, u, { ddg_edge_ptr e; ddg_node_ptr n = &g->nodes[u]; for (e = n->out; e; e = e->next_out) if (TEST_BIT (nodes, e->dest->cuid)) { e->aux.count = IN_SCC; if (e->distance > 0) add_backarc_to_scc (scc, e); } }); set_recurrence_length (scc, g); return scc; } /* Cleans the memory allocation of a given SCC. */ static void free_scc (ddg_scc_ptr scc) { if (!scc) return; sbitmap_free (scc->nodes); if (scc->num_backarcs > 0) free (scc->backarcs); free (scc); } /* Add a given edge known to be a backarc to the given DDG. */ static void add_backarc_to_ddg (ddg_ptr g, ddg_edge_ptr e) { int size = (g->num_backarcs + 1) * sizeof (ddg_edge_ptr); add_edge_to_ddg (g, e); g->backarcs = (ddg_edge_ptr *) xrealloc (g->backarcs, size); g->backarcs[g->num_backarcs++] = e; } /* Add backarc to an SCC. */ static void add_backarc_to_scc (ddg_scc_ptr scc, ddg_edge_ptr e) { int size = (scc->num_backarcs + 1) * sizeof (ddg_edge_ptr); scc->backarcs = (ddg_edge_ptr *) xrealloc (scc->backarcs, size); scc->backarcs[scc->num_backarcs++] = e; } /* Add the given SCC to the DDG. */ static void add_scc_to_ddg (ddg_all_sccs_ptr g, ddg_scc_ptr scc) { int size = (g->num_sccs + 1) * sizeof (ddg_scc_ptr); g->sccs = (ddg_scc_ptr *) xrealloc (g->sccs, size); g->sccs[g->num_sccs++] = scc; } /* Given the instruction INSN return the node that represents it. */ ddg_node_ptr get_node_of_insn (ddg_ptr g, rtx insn) { int i; for (i = 0; i < g->num_nodes; i++) if (insn == g->nodes[i].insn) return &g->nodes[i]; return NULL; } /* Given a set OPS of nodes in the DDG, find the set of their successors which are not in OPS, and set their bits in SUCC. Bits corresponding to OPS are cleared from SUCC. Leaves the other bits in SUCC unchanged. */ void find_successors (sbitmap succ, ddg_ptr g, sbitmap ops) { int i; EXECUTE_IF_SET_IN_SBITMAP (ops, 0, i, { const sbitmap node_succ = NODE_SUCCESSORS (&g->nodes[i]); sbitmap_a_or_b (succ, succ, node_succ); }); /* We want those that are not in ops. */ sbitmap_difference (succ, succ, ops); } /* Given a set OPS of nodes in the DDG, find the set of their predecessors which are not in OPS, and set their bits in PREDS. Bits corresponding to OPS are cleared from PREDS. Leaves the other bits in PREDS unchanged. */ void find_predecessors (sbitmap preds, ddg_ptr g, sbitmap ops) { int i; EXECUTE_IF_SET_IN_SBITMAP (ops, 0, i, { const sbitmap node_preds = NODE_PREDECESSORS (&g->nodes[i]); sbitmap_a_or_b (preds, preds, node_preds); }); /* We want those that are not in ops. */ sbitmap_difference (preds, preds, ops); } /* Compare function to be passed to qsort to order the backarcs in descending recMII order. */ static int compare_sccs (const void *s1, const void *s2) { int rec_l1 = (*(ddg_scc_ptr *)s1)->recurrence_length; int rec_l2 = (*(ddg_scc_ptr *)s2)->recurrence_length; return ((rec_l2 > rec_l1) - (rec_l2 < rec_l1)); } /* Order the backarcs in descending recMII order using compare_sccs. */ static void order_sccs (ddg_all_sccs_ptr g) { qsort (g->sccs, g->num_sccs, sizeof (ddg_scc_ptr), (int (*) (const void *, const void *)) compare_sccs); } /* Perform the Strongly Connected Components decomposing algorithm on the DDG and return DDG_ALL_SCCS structure that contains them. */ ddg_all_sccs_ptr create_ddg_all_sccs (ddg_ptr g) { int i; int num_nodes = g->num_nodes; sbitmap from = sbitmap_alloc (num_nodes); sbitmap to = sbitmap_alloc (num_nodes); sbitmap scc_nodes = sbitmap_alloc (num_nodes); ddg_all_sccs_ptr sccs = (ddg_all_sccs_ptr) xmalloc (sizeof (struct ddg_all_sccs)); sccs->ddg = g; sccs->sccs = NULL; sccs->num_sccs = 0; for (i = 0; i < g->num_backarcs; i++) { ddg_scc_ptr scc; ddg_edge_ptr backarc = g->backarcs[i]; ddg_node_ptr src = backarc->src; ddg_node_ptr dest = backarc->dest; /* If the backarc already belongs to an SCC, continue. */ if (backarc->aux.count == IN_SCC) continue; sbitmap_zero (from); sbitmap_zero (to); SET_BIT (from, dest->cuid); SET_BIT (to, src->cuid); if (find_nodes_on_paths (scc_nodes, g, from, to)) { scc = create_scc (g, scc_nodes); add_scc_to_ddg (sccs, scc); } } order_sccs (sccs); sbitmap_free (from); sbitmap_free (to); sbitmap_free (scc_nodes); return sccs; } /* Frees the memory allocated for all SCCs of the DDG, but keeps the DDG. */ void free_ddg_all_sccs (ddg_all_sccs_ptr all_sccs) { int i; if (!all_sccs) return; for (i = 0; i < all_sccs->num_sccs; i++) free_scc (all_sccs->sccs[i]); free (all_sccs); } /* Given FROM - a bitmap of source nodes - and TO - a bitmap of destination nodes - find all nodes that lie on paths from FROM to TO (not excluding nodes from FROM and TO). Return non zero if nodes exist. */ int find_nodes_on_paths (sbitmap result, ddg_ptr g, sbitmap from, sbitmap to) { int answer; int change, u; int num_nodes = g->num_nodes; sbitmap workset = sbitmap_alloc (num_nodes); sbitmap reachable_from = sbitmap_alloc (num_nodes); sbitmap reach_to = sbitmap_alloc (num_nodes); sbitmap tmp = sbitmap_alloc (num_nodes); sbitmap_copy (reachable_from, from); sbitmap_copy (tmp, from); change = 1; while (change) { change = 0; sbitmap_copy (workset, tmp); sbitmap_zero (tmp); EXECUTE_IF_SET_IN_SBITMAP (workset, 0, u, { ddg_edge_ptr e; ddg_node_ptr u_node = &g->nodes[u]; for (e = u_node->out; e != (ddg_edge_ptr) 0; e = e->next_out) { ddg_node_ptr v_node = e->dest; int v = v_node->cuid; if (!TEST_BIT (reachable_from, v)) { SET_BIT (reachable_from, v); SET_BIT (tmp, v); change = 1; } } }); } sbitmap_copy (reach_to, to); sbitmap_copy (tmp, to); change = 1; while (change) { change = 0; sbitmap_copy (workset, tmp); sbitmap_zero (tmp); EXECUTE_IF_SET_IN_SBITMAP (workset, 0, u, { ddg_edge_ptr e; ddg_node_ptr u_node = &g->nodes[u]; for (e = u_node->in; e != (ddg_edge_ptr) 0; e = e->next_in) { ddg_node_ptr v_node = e->src; int v = v_node->cuid; if (!TEST_BIT (reach_to, v)) { SET_BIT (reach_to, v); SET_BIT (tmp, v); change = 1; } } }); } answer = sbitmap_a_and_b_cg (result, reachable_from, reach_to); sbitmap_free (workset); sbitmap_free (reachable_from); sbitmap_free (reach_to); sbitmap_free (tmp); return answer; } /* Updates the counts of U_NODE's successors (that belong to NODES) to be at-least as large as the count of U_NODE plus the latency between them. Sets a bit in TMP for each successor whose count was changed (increased). Returns nonzero if any count was changed. */ static int update_dist_to_successors (ddg_node_ptr u_node, sbitmap nodes, sbitmap tmp) { ddg_edge_ptr e; int result = 0; for (e = u_node->out; e; e = e->next_out) { ddg_node_ptr v_node = e->dest; int v = v_node->cuid; if (TEST_BIT (nodes, v) && (e->distance == 0) && (v_node->aux.count < u_node->aux.count + e->latency)) { v_node->aux.count = u_node->aux.count + e->latency; SET_BIT (tmp, v); result = 1; } } return result; } /* Find the length of a longest path from SRC to DEST in G, going only through NODES, and disregarding backarcs. */ int longest_simple_path (struct ddg * g, int src, int dest, sbitmap nodes) { int i, u; int change = 1; int result; int num_nodes = g->num_nodes; sbitmap workset = sbitmap_alloc (num_nodes); sbitmap tmp = sbitmap_alloc (num_nodes); /* Data will hold the distance of the longest path found so far from src to each node. Initialize to -1 = less than minimum. */ for (i = 0; i < g->num_nodes; i++) g->nodes[i].aux.count = -1; g->nodes[src].aux.count = 0; sbitmap_zero (tmp); SET_BIT (tmp, src); while (change) { change = 0; sbitmap_copy (workset, tmp); sbitmap_zero (tmp); EXECUTE_IF_SET_IN_SBITMAP (workset, 0, u, { ddg_node_ptr u_node = &g->nodes[u]; change |= update_dist_to_successors (u_node, nodes, tmp); }); } result = g->nodes[dest].aux.count; sbitmap_free (workset); sbitmap_free (tmp); return result; } /* Loop header copying on trees. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Duplicates headers of loops if they are small enough, so that the statements in the loop body are always executed when the loop is entered. This increases effectivity of code motion optimizations, and reduces the need for loop preconditioning. */ /* Check whether we should duplicate HEADER of LOOP. At most *LIMIT instructions should be duplicated, limit is decreased by the actual amount. */ static bool should_duplicate_loop_header_p (basic_block header, struct loop *loop, int *limit) { block_stmt_iterator bsi; tree last; /* Do not copy one block more than once (we do not really want to do loop peeling here). */ if (header->aux) return false; if (!header->succ) abort (); if (!header->succ->succ_next) return false; if (header->succ->succ_next->succ_next) return false; if (flow_bb_inside_loop_p (loop, header->succ->dest) && flow_bb_inside_loop_p (loop, header->succ->succ_next->dest)) return false; /* If this is not the original loop header, we want it to have just one predecessor in order to match the && pattern. */ if (header != loop->header && header->pred->pred_next) return false; last = last_stmt (header); if (TREE_CODE (last) != COND_EXPR) return false; /* Approximately copy the conditions that used to be used in jump.c -- at most 20 insns and no calls. */ for (bsi = bsi_start (header); !bsi_end_p (bsi); bsi_next (&bsi)) { last = bsi_stmt (bsi); if (TREE_CODE (last) == LABEL_EXPR) continue; if (get_call_expr_in (last)) return false; *limit -= estimate_num_insns (last); if (*limit < 0) return false; } return true; } /* Marks variables defined in basic block BB for rewriting. */ static void mark_defs_for_rewrite (basic_block bb) { tree stmt, var; block_stmt_iterator bsi; stmt_ann_t ann; def_optype defs; v_may_def_optype v_may_defs; v_must_def_optype v_must_defs; unsigned i; for (stmt = phi_nodes (bb); stmt; stmt = TREE_CHAIN (stmt)) { var = PHI_RESULT (stmt); bitmap_set_bit (vars_to_rename, SSA_NAME_VERSION (var)); } for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) { stmt = bsi_stmt (bsi); get_stmt_operands (stmt); ann = stmt_ann (stmt); defs = DEF_OPS (ann); for (i = 0; i < NUM_DEFS (defs); i++) { var = DEF_OP (defs, i); bitmap_set_bit (vars_to_rename, SSA_NAME_VERSION (var)); } v_may_defs = V_MAY_DEF_OPS (ann); for (i = 0; i < NUM_V_MAY_DEFS (v_may_defs); i++) { var = V_MAY_DEF_RESULT (v_may_defs, i); bitmap_set_bit (vars_to_rename, SSA_NAME_VERSION (var)); } v_must_defs = V_MUST_DEF_OPS (ann); for (i = 0; i < NUM_V_MUST_DEFS (v_must_defs); i++) { var = V_MUST_DEF_OP (v_must_defs, i); bitmap_set_bit (vars_to_rename, SSA_NAME_VERSION (var)); } } } /* Duplicates destinations of edges in BBS_TO_DUPLICATE. */ static void duplicate_blocks (varray_type bbs_to_duplicate) { unsigned i; edge preheader_edge, e, e1; basic_block header, new_header; tree phi, new_phi, var; /* TODO: It should be quite easy to keep the dominance information up-to-date. */ free_dominance_info (CDI_DOMINATORS); for (i = 0; i < VARRAY_ACTIVE_SIZE (bbs_to_duplicate); i++) { preheader_edge = VARRAY_GENERIC_PTR_NOGC (bbs_to_duplicate, i); header = preheader_edge->dest; /* It is sufficient to rewrite the definitions, since the uses of the operands defined outside of the duplicated basic block are still valid (every basic block that dominates the original block also dominates the duplicate). */ mark_defs_for_rewrite (header); } for (i = 0; i < VARRAY_ACTIVE_SIZE (bbs_to_duplicate); i++) { preheader_edge = VARRAY_GENERIC_PTR_NOGC (bbs_to_duplicate, i); header = preheader_edge->dest; if (!header->aux) abort (); header->aux = NULL; new_header = duplicate_block (header, preheader_edge); /* Create the phi nodes on on entry to new_header. */ for (phi = phi_nodes (header), var = PENDING_STMT (preheader_edge); phi; phi = TREE_CHAIN (phi), var = TREE_CHAIN (var)) { new_phi = create_phi_node (PHI_RESULT (phi), new_header); add_phi_arg (&new_phi, TREE_VALUE (var), preheader_edge); } PENDING_STMT (preheader_edge) = NULL; /* Add the phi arguments to the outgoing edges. */ for (e = header->succ; e; e = e->succ_next) { for (e1 = new_header->succ; e1->dest != e->dest; e1 = e1->succ_next) continue; for (phi = phi_nodes (e->dest); phi; phi = TREE_CHAIN (phi)) { tree def = PHI_ARG_DEF_FROM_EDGE (phi, e); add_phi_arg (&phi, def, e1); } } } calculate_dominance_info (CDI_DOMINATORS); rewrite_ssa_into_ssa (vars_to_rename); bitmap_clear (vars_to_rename); } /* Checks whether LOOP is a do-while style loop. */ static bool do_while_loop_p (struct loop *loop) { tree stmt = last_stmt (loop->latch); /* If the latch of the loop is not empty, it is not a do-while loop. */ if (stmt && TREE_CODE (stmt) != LABEL_EXPR) return false; /* If the header contains just a condition, it is not a do-while loop. */ stmt = last_and_only_stmt (loop->header); if (stmt && TREE_CODE (stmt) == COND_EXPR) return false; return true; } /* For all loops, copy the condition at the end of the loop body in front of the loop. This is beneficial since it increases efficiency of code motion optimizations. It also saves one jump on entry to the loop. */ static void copy_loop_headers (void) { struct loops *loops; unsigned i; struct loop *loop; basic_block header; edge preheader_edge; varray_type bbs_to_duplicate = NULL; loops = loop_optimizer_init (dump_file); if (!loops) return; /* We do not try to keep the information about irreducible regions up-to-date. */ loops->state &= ~LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS; #ifdef ENABLE_CHECKING verify_loop_structure (loops); #endif for (i = 1; i < loops->num; i++) { /* Copy at most 20 insns. */ int limit = 20; loop = loops->parray[i]; preheader_edge = loop_preheader_edge (loop); header = preheader_edge->dest; /* If the loop is already a do-while style one (either because it was written as such, or because jump threading transformed it into one), we might be in fact peeling the first iteration of the loop. This in general is not a good idea. */ if (do_while_loop_p (loop)) continue; /* Iterate the header copying up to limit; this takes care of the cases like while (a && b) {...}, where we want to have both of the conditions copied. TODO -- handle while (a || b) - like cases, by not requiring the header to have just a single successor and copying up to postdominator. We do not really copy the blocks immediately, so that we do not have to worry about updating loop structures, and also so that we do not have to rewrite variables out of and into ssa form for each block. Instead we just record the block into worklist and duplicate all of them at once. */ while (should_duplicate_loop_header_p (header, loop, &limit)) { if (!bbs_to_duplicate) VARRAY_GENERIC_PTR_NOGC_INIT (bbs_to_duplicate, 10, "bbs_to_duplicate"); VARRAY_PUSH_GENERIC_PTR_NOGC (bbs_to_duplicate, preheader_edge); header->aux = &header->aux; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Scheduled basic block %d for duplication.\n", header->index); /* Find a successor of header that is inside a loop; i.e. the new header after the condition is copied. */ if (flow_bb_inside_loop_p (loop, header->succ->dest)) preheader_edge = header->succ; else preheader_edge = header->succ->succ_next; header = preheader_edge->dest; } } loop_optimizer_finalize (loops, NULL); if (bbs_to_duplicate) { duplicate_blocks (bbs_to_duplicate); VARRAY_FREE (bbs_to_duplicate); } /* Run cleanup_tree_cfg here regardless of whether we have done anything, so that we cleanup the blocks created in order to get the loops into a canonical shape. */ cleanup_tree_cfg (); } static bool gate_ch (void) { return flag_tree_ch != 0; } struct tree_opt_pass pass_ch = { "ch", /* name */ gate_ch, /* gate */ copy_loop_headers, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ TV_TREE_CH, /* tv_id */ PROP_cfg | PROP_ssa, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ (TODO_dump_func | TODO_verify_ssa) /* todo_flags_finish */ }; /* Rtl-level loop invariant motion. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This implements the loop invariant motion pass. It is very simple (no calls, libcalls, etc.). This should be sufficient to cleanup things like address arithmetics -- other more complicated invariants should be eliminated on tree level either in tree-ssa-loop-im.c or in tree-ssa-pre.c. We proceed loop by loop -- it is simpler than trying to handle things globally and should not lose much. First we inspect all sets inside loop and create a dependency graph on insns (saying "to move this insn, you must also move the following insns"). We then need to determine what to move. We estimate the number of registers used and move as many invariants as possible while we still have enough free registers. We prefer the expensive invariants. Then we move the selected invariants out of the loop, creating a new temporaries for them if necessary. */ /* The data stored for the loop. */ struct loop_data { struct loop *outermost_exit; /* The outermost exit of the loop. */ bool has_call; /* True if the loop contains a call. */ }; #define LOOP_DATA(LOOP) ((struct loop_data *) (LOOP)->aux) /* The description of an use. */ struct use { rtx *pos; /* Position of the use. */ rtx insn; /* The insn in that the use occurs. */ struct use *next; /* Next use in the list. */ }; /* The description of a def. */ struct def { struct use *uses; /* The list of uses that are uniquely reached by it. */ unsigned n_uses; /* Number of such uses. */ unsigned invno; /* The corresponding invariant. */ }; /* The data stored for each invariant. */ struct invariant { /* The number of the invariant. */ unsigned invno; /* Whether we already processed the invariant. */ bool processed; /* The definition of the invariant. */ struct def *def; /* The insn in that it is defined. */ rtx insn; /* Whether it is always executed. */ bool always_executed; /* Whether to move the invariant. */ bool move; /* Cost if the invariant. */ unsigned cost; /* The invariants it depends on. */ bitmap depends_on; /* Used for detecting already visited invariants during determining costs of movements. */ unsigned stamp; }; /* The actual stamp for marking already visited invariants during determining costs of movements. */ static unsigned actual_stamp; /* The invariants. */ static varray_type invariants; /* Test for possibility of invariantness of X. */ static bool check_maybe_invariant (rtx x) { enum rtx_code code = GET_CODE (x); int i, j; const char *fmt; switch (code) { case CONST_INT: case CONST_DOUBLE: case SYMBOL_REF: case CONST: case LABEL_REF: return true; case PC: case CC0: case UNSPEC_VOLATILE: case CALL: return false; case REG: return true; case MEM: /* Load/store motion is done elsewhere. ??? Perhaps also add it here? It should not be hard, and might be faster than "elsewhere". */ /* Just handle the most trivial case where we load from an unchanging location (most importantly, pic tables). */ if (RTX_UNCHANGING_P (x)) break; return false; case ASM_OPERANDS: /* Don't mess with insns declared volatile. */ if (MEM_VOLATILE_P (x)) return false; break; default: break; } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') { if (!check_maybe_invariant (XEXP (x, i))) return false; } else if (fmt[i] == 'E') { for (j = 0; j < XVECLEN (x, i); j++) if (!check_maybe_invariant (XVECEXP (x, i, j))) return false; } } return true; } /* Determines the basic blocks inside LOOP that are always executed and stores their bitmap to ALWAYS_REACHED. MAY_EXIT is a bitmap of basic blocks that may either exit the loop, or contain the call that does not have to return. BODY is body of the loop obtained by get_loop_body_in_dom_order. */ static void compute_always_reached (struct loop *loop, basic_block *body, bitmap may_exit, bitmap always_reached) { unsigned i; for (i = 0; i < loop->num_nodes; i++) { if (dominated_by_p (CDI_DOMINATORS, loop->latch, body[i])) bitmap_set_bit (always_reached, i); if (bitmap_bit_p (may_exit, i)) return; } } /* Finds exits out of the LOOP with body BODY. Marks blocks in that we may exit the loop by cfg edge to HAS_EXIT and MAY_EXIT. In MAY_EXIT additionally mark blocks that may exit due to a call. */ static void find_exits (struct loop *loop, basic_block *body, bitmap may_exit, bitmap has_exit) { unsigned i; edge e; struct loop *outermost_exit = loop, *aexit; bool has_call = false; rtx insn; for (i = 0; i < loop->num_nodes; i++) { if (body[i]->loop_father == loop) { FOR_BB_INSNS (body[i], insn) { if (GET_CODE (insn) == CALL_INSN && !CONST_OR_PURE_CALL_P (insn)) { has_call = true; bitmap_set_bit (may_exit, i); break; } } for (e = body[i]->succ; e; e = e->succ_next) { if (flow_bb_inside_loop_p (loop, e->dest)) continue; bitmap_set_bit (may_exit, i); bitmap_set_bit (has_exit, i); outermost_exit = find_common_loop (outermost_exit, e->dest->loop_father); } continue; } /* Use the data stored for the subloop to decide whether we may exit through it. It is sufficient to do this for header of the loop, as other basic blocks inside it must be dominated by it. */ if (body[i]->loop_father->header != body[i]) continue; if (LOOP_DATA (body[i]->loop_father)->has_call) { has_call = true; bitmap_set_bit (may_exit, i); } aexit = LOOP_DATA (body[i]->loop_father)->outermost_exit; if (aexit != loop) { bitmap_set_bit (may_exit, i); bitmap_set_bit (has_exit, i); if (flow_loop_nested_p (aexit, outermost_exit)) outermost_exit = aexit; } } loop->aux = xcalloc (1, sizeof (struct loop_data)); LOOP_DATA (loop)->outermost_exit = outermost_exit; LOOP_DATA (loop)->has_call = has_call; } /* Check whether we may assign a value to X from a register. */ static bool may_assign_reg_p (rtx x) { return can_copy_p (GET_MODE (x)); } /* Finds definitions that may correspond to invariants in LOOP with body BODY. DF is the dataflow object. */ static void find_defs (struct loop *loop, basic_block *body, struct df *df) { unsigned i; bitmap blocks = BITMAP_XMALLOC (); for (i = 0; i < loop->num_nodes; i++) bitmap_set_bit (blocks, body[i]->index); df_analyze_subcfg (df, blocks, DF_UD_CHAIN | DF_HARD_REGS | DF_EQUIV_NOTES); BITMAP_XFREE (blocks); } /* Creates a new invariant for definition DEF in INSN, depending on invariants in DEPENDS_ON. ALWAYS_EXECUTED is true if the insn is always executed, unless the program ends due to a function call. */ static void create_new_invariant (struct def *def, rtx insn, bitmap depends_on, bool always_executed) { struct invariant *inv = xmalloc (sizeof (struct invariant)); rtx set = single_set (insn); inv->def = def; inv->always_executed = always_executed; inv->depends_on = depends_on; /* If the set is simple, usually by moving it we move the whole store out of the loop. Otherwise we save only cost of the computation. */ if (def) inv->cost = rtx_cost (set, SET); else inv->cost = rtx_cost (SET_SRC (set), SET); inv->move = false; inv->processed = false; inv->stamp = 0; inv->insn = insn; inv->invno = VARRAY_ACTIVE_SIZE (invariants); if (def) def->invno = inv->invno; VARRAY_PUSH_GENERIC_PTR_NOGC (invariants, inv); if (dump_file) { fprintf (dump_file, "Set in insn %d is invariant (%d), cost %d, depends on ", INSN_UID (insn), inv->invno, inv->cost); dump_bitmap (dump_file, inv->depends_on); } } /* Record USE at DEF. */ static void record_use (struct def *def, rtx *use, rtx insn) { struct use *u = xmalloc (sizeof (struct use)); if (GET_CODE (*use) == SUBREG) use = &SUBREG_REG (*use); if (!REG_P (*use)) abort (); u->pos = use; u->insn = insn; u->next = def->uses; def->uses = u; def->n_uses++; } /* Finds the invariants INSN depends on and store them to the DEPENDS_ON bitmap. DF is the dataflow object. */ static bool check_dependencies (rtx insn, struct df *df, bitmap depends_on) { struct df_link *uses, *defs; struct ref *use, *def; basic_block bb = BLOCK_FOR_INSN (insn), def_bb; struct def *def_data; for (uses = DF_INSN_USES (df, insn); uses; uses = uses->next) { use = uses->ref; defs = DF_REF_CHAIN (use); if (!defs) continue; if (defs->next) return false; def = defs->ref; def_data = DF_REF_DATA (def); if (!def_data) return false; def_bb = DF_REF_BB (def); if (!dominated_by_p (CDI_DOMINATORS, bb, def_bb)) return false; bitmap_set_bit (depends_on, def_data->invno); } return true; } /* Finds invariant in INSN. ALWAYS_REACHED is true if the insn is always executed. ALWAYS_EXECUTED is true if the insn is always executed, unless the program ends due to a function call. DF is the dataflow object. */ static void find_invariant_insn (rtx insn, bool always_reached, bool always_executed, struct df *df) { struct ref *ref; struct def *def; bitmap depends_on; rtx set, dest; bool simple = true; /* Until we get rid of LIBCALLS. */ if (find_reg_note (insn, REG_RETVAL, NULL_RTX) || find_reg_note (insn, REG_LIBCALL, NULL_RTX) || find_reg_note (insn, REG_NO_CONFLICT, NULL_RTX)) return; set = single_set (insn); if (!set) return; dest = SET_DEST (set); if (GET_CODE (dest) != REG || HARD_REGISTER_P (dest)) simple = false; if (!check_maybe_invariant (SET_SRC (set)) || !may_assign_reg_p (SET_DEST (set))) return; if (may_trap_p (PATTERN (insn))) { if (!always_reached) return; /* Unless the exceptions are handled, the behavior is undefined if the trap occurs. */ if (flag_non_call_exceptions) return; } depends_on = BITMAP_XMALLOC (); if (!check_dependencies (insn, df, depends_on)) { BITMAP_XFREE (depends_on); return; } if (simple) { ref = df_find_def (df, insn, dest); def = xcalloc (1, sizeof (struct def)); DF_REF_DATA (ref) = def; } else def = NULL; create_new_invariant (def, insn, depends_on, always_executed); } /* Record registers used in INSN that have an unique invariant definition. DF is the dataflow object. */ static void record_uses (rtx insn, struct df *df) { struct df_link *uses, *defs; struct ref *use, *def; basic_block bb = BLOCK_FOR_INSN (insn), def_bb; for (uses = DF_INSN_USES (df, insn); uses; uses = uses->next) { use = uses->ref; defs = DF_REF_CHAIN (use); if (!defs || defs->next) continue; def = defs->ref; if (!DF_REF_DATA (def)) continue; def_bb = DF_REF_BB (def); if (!dominated_by_p (CDI_DOMINATORS, bb, def_bb)) continue; record_use (DF_REF_DATA (def), DF_REF_LOC (use), DF_REF_INSN (use)); } } /* Finds invariants in INSN. ALWAYS_REACHED is true if the insn is always executed. ALWAYS_EXECUTED is true if the insn is always executed, unless the program ends due to a function call. DF is the dataflow object. */ static void find_invariants_insn (rtx insn, bool always_reached, bool always_executed, struct df *df) { find_invariant_insn (insn, always_reached, always_executed, df); record_uses (insn, df); } /* Finds invariants in basic block BB. ALWAYS_REACHED is true if the basic block is always executed. ALWAYS_EXECUTED is true if the basic block is always executed, unless the program ends due to a function call. DF is the dataflow object. */ static void find_invariants_bb (basic_block bb, bool always_reached, bool always_executed, struct df *df) { rtx insn; FOR_BB_INSNS (bb, insn) { if (!INSN_P (insn)) continue; find_invariants_insn (insn, always_reached, always_executed, df); if (always_reached && GET_CODE (insn) == CALL_INSN && !CONST_OR_PURE_CALL_P (insn)) always_reached = false; } } /* Finds invariants in LOOP with body BODY. ALWAYS_REACHED is the bitmap of basic blocks in BODY that are always executed. ALWAYS_EXECUTED is the bitmap of basic blocks in BODY that are always executed unless the program ends due to a function call. DF is the dataflow object. */ static void find_invariants_body (struct loop *loop, basic_block *body, bitmap always_reached, bitmap always_executed, struct df *df) { unsigned i; for (i = 0; i < loop->num_nodes; i++) find_invariants_bb (body[i], bitmap_bit_p (always_reached, i), bitmap_bit_p (always_executed, i), df); } /* Finds invariants in LOOP. DF is the dataflow object. */ static void find_invariants (struct loop *loop, struct df *df) { bitmap may_exit = BITMAP_XMALLOC (); bitmap always_reached = BITMAP_XMALLOC (); bitmap has_exit = BITMAP_XMALLOC (); bitmap always_executed = BITMAP_XMALLOC (); basic_block *body = get_loop_body_in_dom_order (loop); find_exits (loop, body, may_exit, has_exit); compute_always_reached (loop, body, may_exit, always_reached); compute_always_reached (loop, body, has_exit, always_executed); find_defs (loop, body, df); find_invariants_body (loop, body, always_reached, always_executed, df); BITMAP_XFREE (always_reached); BITMAP_XFREE (always_executed); BITMAP_XFREE (may_exit); BITMAP_XFREE (has_exit); free (body); } /* Frees a list of uses USE. */ static void free_use_list (struct use *use) { struct use *next; for (; use; use = next) { next = use->next; free (use); } } /* Calculates cost and number of registers needed for moving invariant INV out of the loop and stores them to *COST and *REGS_NEEDED. */ static void get_inv_cost (struct invariant *inv, int *comp_cost, unsigned *regs_needed) { int acomp_cost; unsigned aregs_needed; unsigned depno; struct invariant *dep; *comp_cost = 0; *regs_needed = 0; if (inv->move || inv->stamp == actual_stamp) return; inv->stamp = actual_stamp; (*regs_needed)++; (*comp_cost) += inv->cost; EXECUTE_IF_SET_IN_BITMAP (inv->depends_on, 0, depno, { dep = VARRAY_GENERIC_PTR_NOGC (invariants, depno); get_inv_cost (dep, &acomp_cost, &aregs_needed); if (aregs_needed /* We need to check always_executed, since if the original value of the invariant may be preserved, we may need to keep it in a separate register. TODO check whether the register has an use outside of the loop. */ && dep->always_executed && !dep->def->uses->next) { /* If this is a single use, after moving the dependency we will not need a new register. */ aregs_needed--; } (*regs_needed) += aregs_needed; (*comp_cost) += acomp_cost; }); } /* Calculates gain for eliminating invariant INV. REGS_USED is the number of registers used in the loop, N_INV_USES is the number of uses of invariants, NEW_REGS is the number of new variables already added due to the invariant motion. The number of registers needed for it is stored in *REGS_NEEDED. */ static int gain_for_invariant (struct invariant *inv, unsigned *regs_needed, unsigned new_regs, unsigned regs_used, unsigned n_inv_uses) { int comp_cost, size_cost; get_inv_cost (inv, &comp_cost, regs_needed); actual_stamp++; size_cost = (global_cost_for_size (new_regs + *regs_needed, regs_used, n_inv_uses) - global_cost_for_size (new_regs, regs_used, n_inv_uses)); return comp_cost - size_cost; } /* Finds invariant with best gain for moving. Returns the gain, stores the invariant in *BEST and number of registers needed for it to *REGS_NEEDED. REGS_USED is the number of registers used in the loop, N_INV_USES is the number of uses of invariants. NEW_REGS is the number of new variables already added due to invariant motion. */ static int best_gain_for_invariant (struct invariant **best, unsigned *regs_needed, unsigned new_regs, unsigned regs_used, unsigned n_inv_uses) { struct invariant *inv; int gain = 0, again; unsigned aregs_needed, invno; for (invno = 0; invno < VARRAY_ACTIVE_SIZE (invariants); invno++) { inv = VARRAY_GENERIC_PTR_NOGC (invariants, invno); if (inv->move) continue; again = gain_for_invariant (inv, &aregs_needed, new_regs, regs_used, n_inv_uses); if (again > gain) { gain = again; *best = inv; *regs_needed = aregs_needed; } } return gain; } /* Marks invariant INVNO and all its dependencies for moving. */ static void set_move_mark (unsigned invno) { struct invariant *inv = VARRAY_GENERIC_PTR_NOGC (invariants, invno); if (inv->move) return; inv->move = true; if (dump_file) fprintf (dump_file, "Decided to move invariant %d\n", invno); EXECUTE_IF_SET_IN_BITMAP (inv->depends_on, 0, invno, set_move_mark (invno)); } /* Determines which invariants to move. DF is the dataflow object. */ static void find_invariants_to_move (struct df *df) { unsigned i, regs_used, n_inv_uses, regs_needed = 0, new_regs; struct invariant *inv = NULL; if (flag_move_all_movables) { /* This is easy & stupid. */ for (i = 0; i < VARRAY_ACTIVE_SIZE (invariants); i++) { inv = VARRAY_GENERIC_PTR_NOGC (invariants, i); inv->move = true; } return; } if (!VARRAY_ACTIVE_SIZE (invariants)) return; /* Now something slightly more involved. First estimate the number of used registers. */ n_inv_uses = 0; /* We do not really do a good job in this estimation; put some initial bound here to stand for induction variables etc. that we do not detect. */ regs_used = 2; for (i = 0; i < df->n_regs; i++) { if (!DF_REGNO_FIRST_DEF (df, i) && DF_REGNO_LAST_USE (df, i)) { /* This is a value that is used but not changed inside loop. */ regs_used++; } } for (i = 0; i < VARRAY_ACTIVE_SIZE (invariants); i++) { inv = VARRAY_GENERIC_PTR_NOGC (invariants, i); if (inv->def) n_inv_uses += inv->def->n_uses; } new_regs = 0; while (best_gain_for_invariant (&inv, ®s_needed, new_regs, regs_used, n_inv_uses) > 0) { set_move_mark (inv->invno); new_regs += regs_needed; } } /* Move invariant INVNO out of the LOOP. DF is the dataflow object. */ static void move_invariant_reg (struct loop *loop, unsigned invno, struct df *df) { struct invariant *inv = VARRAY_GENERIC_PTR_NOGC (invariants, invno); unsigned i; basic_block preheader = loop_preheader_edge (loop)->src; rtx reg, set; struct use *use; if (inv->processed) return; inv->processed = true; if (inv->depends_on) { EXECUTE_IF_SET_IN_BITMAP (inv->depends_on, 0, i, { move_invariant_reg (loop, i, df); }); } /* Move the set out of the loop. If the set is always executed (we could omit this condition if we know that the register is unused outside of the loop, but it does not seem worth finding out) and it has no uses that would not be dominated by it, we may just move it (TODO). Otherwise we need to create a temporary register. */ set = single_set (inv->insn); reg = gen_reg_rtx (GET_MODE (SET_DEST (set))); df_pattern_emit_after (df, gen_move_insn (SET_DEST (set), reg), BLOCK_FOR_INSN (inv->insn), inv->insn); SET_DEST (set) = reg; reorder_insns (inv->insn, inv->insn, BB_END (preheader)); df_insn_modify (df, preheader, inv->insn); /* Replace the uses we know to be dominated. It saves work for copy propagation, and also it is necessary so that dependent invariants are computed right. */ if (inv->def) { for (use = inv->def->uses; use; use = use->next) { *use->pos = reg; df_insn_modify (df, BLOCK_FOR_INSN (use->insn), use->insn); } } } /* Move selected invariant out of the LOOP. Newly created regs are marked in TEMPORARY_REGS. DF is the dataflow object. */ static void move_invariants (struct loop *loop, struct df *df) { struct invariant *inv; unsigned i; for (i = 0; i < VARRAY_ACTIVE_SIZE (invariants); i++) { inv = VARRAY_GENERIC_PTR_NOGC (invariants, i); if (inv->move) move_invariant_reg (loop, i, df); } } /* Initializes invariant motion data. */ static void init_inv_motion_data (void) { actual_stamp = 1; if (!invariants) VARRAY_GENERIC_PTR_NOGC_INIT (invariants, 100, "invariants"); } /* Frees the data allocated by invariant motion. DF is the dataflow object. */ static void free_inv_motion_data (struct df *df) { unsigned i; struct def *def; struct invariant *inv; for (i = 0; i < df->n_defs; i++) { if (!df->defs[i]) continue; def = DF_REF_DATA (df->defs[i]); if (!def) continue; free_use_list (def->uses); free (def); DF_REF_DATA (df->defs[i]) = NULL; } for (i = 0; i < VARRAY_ACTIVE_SIZE (invariants); i++) { inv = VARRAY_GENERIC_PTR_NOGC (invariants, i); BITMAP_XFREE (inv->depends_on); free (inv); } VARRAY_POP_ALL (invariants); } /* Move the invariants out of the LOOP. DF is the dataflow object. */ static void move_single_loop_invariants (struct loop *loop, struct df *df) { init_inv_motion_data (); find_invariants (loop, df); find_invariants_to_move (df); move_invariants (loop, df); free_inv_motion_data (df); } /* Releases the auxiliary data for LOOP. */ static void free_loop_data (struct loop *loop) { struct loop_data *data = LOOP_DATA (loop); free (data); loop->aux = NULL; } /* Move the invariants out of the LOOPS. */ void move_loop_invariants (struct loops *loops) { struct loop *loop; unsigned i; struct df *df = df_init (); /* Process the loops, innermost first. */ loop = loops->tree_root; while (loop->inner) loop = loop->inner; while (loop != loops->tree_root) { move_single_loop_invariants (loop, df); if (loop->next) { loop = loop->next; while (loop->inner) loop = loop->inner; } else loop = loop->outer; } for (i = 1; i < loops->num; i++) if (loops->parray[i]) free_loop_data (loops->parray[i]); df_finish (df); } /* Do-nothing debug hooks for GCC. Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* The do-nothing debug hooks. */ const struct gcc_debug_hooks do_nothing_debug_hooks = { debug_nothing_charstar, debug_nothing_charstar, debug_nothing_int_charstar, debug_nothing_int_charstar, debug_nothing_int_charstar, debug_nothing_int, debug_nothing_int_int, /* begin_block */ debug_nothing_int_int, /* end_block */ debug_true_tree, /* ignore_block */ debug_nothing_int_charstar, /* source_line */ debug_nothing_int_charstar, /* begin_prologue */ debug_nothing_int_charstar, /* end_prologue */ debug_nothing_int_charstar, /* end_epilogue */ debug_nothing_tree, /* begin_function */ debug_nothing_int, /* end_function */ debug_nothing_tree, /* function_decl */ debug_nothing_tree, /* global_decl */ debug_nothing_tree_int, /* type_decl */ debug_nothing_tree_tree, /* imported_module_or_decl */ debug_nothing_tree, /* deferred_inline_function */ debug_nothing_tree, /* outlining_inline_function */ debug_nothing_rtx, /* label */ debug_nothing_int, /* handle_pch */ debug_nothing_rtx /* var_location */ }; /* This file contains implementations of each debug hook that do nothing. */ void debug_nothing_void (void) { } void debug_nothing_tree (tree decl ATTRIBUTE_UNUSED) { } void debug_nothing_tree_tree (tree t1 ATTRIBUTE_UNUSED, tree t2 ATTRIBUTE_UNUSED) { } bool debug_true_tree (tree block ATTRIBUTE_UNUSED) { return true; } void debug_nothing_rtx (rtx insn ATTRIBUTE_UNUSED) { } void debug_nothing_charstar (const char *main_filename ATTRIBUTE_UNUSED) { } void debug_nothing_int_charstar (unsigned int line ATTRIBUTE_UNUSED, const char *text ATTRIBUTE_UNUSED) { } void debug_nothing_int (unsigned int line ATTRIBUTE_UNUSED) { } void debug_nothing_int_int (unsigned int line ATTRIBUTE_UNUSED, unsigned int n ATTRIBUTE_UNUSED) { } void debug_nothing_tree_int (tree decl ATTRIBUTE_UNUSED, int local ATTRIBUTE_UNUSED) { } /* Dataflow support routines. Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Michael P. Hayes (m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com) This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. OVERVIEW: This file provides some dataflow routines for computing reaching defs, upward exposed uses, live variables, def-use chains, and use-def chains. The global dataflow is performed using simple iterative methods with a worklist and could be sped up by ordering the blocks with a depth first search order. A `struct ref' data structure (ref) is allocated for every register reference (def or use) and this records the insn and bb the ref is found within. The refs are linked together in chains of uses and defs for each insn and for each register. Each ref also has a chain field that links all the use refs for a def or all the def refs for a use. This is used to create use-def or def-use chains. USAGE: Here's an example of using the dataflow routines. struct df *df; df = df_init (); df_analyze (df, 0, DF_ALL); df_dump (df, DF_ALL, stderr); df_finish (df); df_init simply creates a poor man's object (df) that needs to be passed to all the dataflow routines. df_finish destroys this object and frees up any allocated memory. DF_ALL says to analyze everything. df_analyze performs the following: 1. Records defs and uses by scanning the insns in each basic block or by scanning the insns queued by df_insn_modify. 2. Links defs and uses into insn-def and insn-use chains. 3. Links defs and uses into reg-def and reg-use chains. 4. Assigns LUIDs to each insn (for modified blocks). 5. Calculates local reaching definitions. 6. Calculates global reaching definitions. 7. Creates use-def chains. 8. Calculates local reaching uses (upwards exposed uses). 9. Calculates global reaching uses. 10. Creates def-use chains. 11. Calculates local live registers. 12. Calculates global live registers. 13. Calculates register lifetimes and determines local registers. PHILOSOPHY: Note that the dataflow information is not updated for every newly deleted or created insn. If the dataflow information requires updating then all the changed, new, or deleted insns needs to be marked with df_insn_modify (or df_insns_modify) either directly or indirectly (say through calling df_insn_delete). df_insn_modify marks all the modified insns to get processed the next time df_analyze is called. Beware that tinkering with insns may invalidate the dataflow information. The philosophy behind these routines is that once the dataflow information has been gathered, the user should store what they require before they tinker with any insn. Once a reg is replaced, for example, then the reg-def/reg-use chains will point to the wrong place. Once a whole lot of changes have been made, df_analyze can be called again to update the dataflow information. Currently, this is not very smart with regard to propagating changes to the dataflow so it should not be called very often. DATA STRUCTURES: The basic object is a REF (reference) and this may either be a DEF (definition) or a USE of a register. These are linked into a variety of lists; namely reg-def, reg-use, insn-def, insn-use, def-use, and use-def lists. For example, the reg-def lists contain all the refs that define a given register while the insn-use lists contain all the refs used by an insn. Note that the reg-def and reg-use chains are generally short (except for the hard registers) and thus it is much faster to search these chains rather than searching the def or use bitmaps. If the insns are in SSA form then the reg-def and use-def lists should only contain the single defining ref. TODO: 1) Incremental dataflow analysis. Note that if a loop invariant insn is hoisted (or sunk), we do not need to change the def-use or use-def chains. All we have to do is to change the bb field for all the associated defs and uses and to renumber the LUIDs for the original and new basic blocks of the insn. When shadowing loop mems we create new uses and defs for new pseudos so we do not affect the existing dataflow information. My current strategy is to queue up all modified, created, or deleted insns so when df_analyze is called we can easily determine all the new or deleted refs. Currently the global dataflow information is recomputed from scratch but this could be propagated more efficiently. 2) Reduced memory requirements. We could operate a pool of ref structures. When a ref is deleted it gets returned to the pool (say by linking on to a chain of free refs). This will require a pair of bitmaps for defs and uses so that we can tell which ones have been changed. Alternatively, we could periodically squeeze the def and use tables and associated bitmaps and renumber the def and use ids. 3) Ordering of reg-def and reg-use lists. Should the first entry in the def list be the first def (within a BB)? Similarly, should the first entry in the use list be the last use (within a BB)? 4) Working with a sub-CFG. Often the whole CFG does not need to be analyzed, for example, when optimizing a loop, only certain registers are of interest. Perhaps there should be a bitmap argument to df_analyze to specify which registers should be analyzed? NOTES: Embedded addressing side-effects, such as POST_INC or PRE_INC, generate both a use and a def. These are both marked read/write to show that they are dependent. For example, (set (reg 40) (mem (post_inc (reg 42)))) will generate a use of reg 42 followed by a def of reg 42 (both marked read/write). Similarly, (set (reg 40) (mem (pre_dec (reg 41)))) generates a use of reg 41 then a def of reg 41 (both marked read/write), even though reg 41 is decremented before it is used for the memory address in this second example. A set to a REG inside a ZERO_EXTRACT, SIGN_EXTRACT, or SUBREG invokes a read-modify write operation. We generate both a use and a def and again mark them read/write. */ #define FOR_EACH_BB_IN_BITMAP(BITMAP, MIN, BB, CODE) \ do \ { \ unsigned int node_; \ EXECUTE_IF_SET_IN_BITMAP (BITMAP, MIN, node_, \ {(BB) = BASIC_BLOCK (node_); CODE;}); \ } \ while (0) static alloc_pool df_ref_pool; static alloc_pool df_link_pool; static struct df *ddf; static void df_reg_table_realloc (struct df *, int); static void df_insn_table_realloc (struct df *, unsigned int); static void df_bb_table_realloc (struct df *, unsigned int); static void df_bitmaps_alloc (struct df *, bitmap, int); static void df_bitmaps_free (struct df *, int); static void df_free (struct df *); static void df_alloc (struct df *, int); static rtx df_reg_use_gen (unsigned int); static inline struct df_link *df_link_create (struct ref *, struct df_link *); static struct df_link *df_ref_unlink (struct df_link **, struct ref *); static void df_def_unlink (struct df *, struct ref *); static void df_use_unlink (struct df *, struct ref *); static void df_insn_refs_unlink (struct df *, basic_block, rtx); #if 0 static void df_bb_refs_unlink (struct df *, basic_block); static void df_refs_unlink (struct df *, bitmap); #endif static struct ref *df_ref_create (struct df *, rtx, rtx *, rtx, enum df_ref_type, enum df_ref_flags); static void df_ref_record_1 (struct df *, rtx, rtx *, rtx, enum df_ref_type, enum df_ref_flags); static void df_ref_record (struct df *, rtx, rtx *, rtx, enum df_ref_type, enum df_ref_flags); static void df_def_record_1 (struct df *, rtx, basic_block, rtx); static void df_defs_record (struct df *, rtx, basic_block, rtx); static void df_uses_record (struct df *, rtx *, enum df_ref_type, basic_block, rtx, enum df_ref_flags); static void df_insn_refs_record (struct df *, basic_block, rtx); static void df_bb_refs_record (struct df *, basic_block); static void df_refs_record (struct df *, bitmap); static void df_bb_reg_def_chain_create (struct df *, basic_block); static void df_reg_def_chain_create (struct df *, bitmap, bool); static void df_bb_reg_use_chain_create (struct df *, basic_block); static void df_reg_use_chain_create (struct df *, bitmap, bool); static void df_bb_du_chain_create (struct df *, basic_block, bitmap); static void df_du_chain_create (struct df *, bitmap); static void df_bb_ud_chain_create (struct df *, basic_block); static void df_ud_chain_create (struct df *, bitmap); static void df_bb_rd_local_compute (struct df *, basic_block, bitmap); static void df_rd_local_compute (struct df *, bitmap); static void df_bb_ru_local_compute (struct df *, basic_block); static void df_ru_local_compute (struct df *, bitmap); static void df_bb_lr_local_compute (struct df *, basic_block); static void df_lr_local_compute (struct df *, bitmap); static void df_bb_reg_info_compute (struct df *, basic_block, bitmap); static void df_reg_info_compute (struct df *, bitmap); static int df_bb_luids_set (struct df *df, basic_block); static int df_luids_set (struct df *df, bitmap); static int df_modified_p (struct df *, bitmap); static int df_refs_queue (struct df *); static int df_refs_process (struct df *); static int df_bb_refs_update (struct df *, basic_block); static int df_refs_update (struct df *, bitmap); static void df_analyze_1 (struct df *, bitmap, int, int); static void df_insns_modify (struct df *, basic_block, rtx, rtx); static int df_rtx_mem_replace (rtx *, void *); static int df_rtx_reg_replace (rtx *, void *); void df_refs_reg_replace (struct df *, bitmap, struct df_link *, rtx, rtx); static int df_def_dominates_all_uses_p (struct df *, struct ref *def); static int df_def_dominates_uses_p (struct df *, struct ref *def, bitmap); static struct ref *df_bb_insn_regno_last_use_find (struct df *, basic_block, rtx, unsigned int); static struct ref *df_bb_insn_regno_first_def_find (struct df *, basic_block, rtx, unsigned int); static void df_chain_dump (struct df_link *, FILE *file); static void df_chain_dump_regno (struct df_link *, FILE *file); static void df_regno_debug (struct df *, unsigned int, FILE *); static void df_ref_debug (struct df *, struct ref *, FILE *); static void df_rd_transfer_function (int, int *, void *, void *, void *, void *, void *); static void df_ru_transfer_function (int, int *, void *, void *, void *, void *, void *); static void df_lr_transfer_function (int, int *, void *, void *, void *, void *, void *); static void hybrid_search (basic_block, struct dataflow *, sbitmap, sbitmap, sbitmap); /* Local memory allocation/deallocation routines. */ /* Increase the insn info table to have space for at least SIZE + 1 elements. */ static void df_insn_table_realloc (struct df *df, unsigned int size) { size++; if (size <= df->insn_size) return; /* Make the table a little larger than requested, so we do not need to enlarge it so often. */ size += df->insn_size / 4; df->insns = xrealloc (df->insns, size * sizeof (struct insn_df_info)); memset (df->insns + df->insn_size, 0, (size - df->insn_size) * sizeof (struct insn_df_info)); df->insn_size = size; if (! df->insns_modified) { df->insns_modified = BITMAP_XMALLOC (); bitmap_zero (df->insns_modified); } } /* Increase the bb info table to have space for at least SIZE + 1 elements. */ static void df_bb_table_realloc (struct df *df, unsigned int size) { size++; if (size <= df->n_bbs) return; /* Make the table a little larger than requested, so we do not need to enlarge it so often. */ size += df->n_bbs / 4; df->bbs = xrealloc (df->bbs, size * sizeof (struct bb_df_info)); memset (df->bbs + df->n_bbs, 0, (size - df->n_bbs) * sizeof (struct bb_df_info)); df->n_bbs = size; } /* Increase the reg info table by SIZE more elements. */ static void df_reg_table_realloc (struct df *df, int size) { /* Make table 25 percent larger by default. */ if (! size) size = df->reg_size / 4; size += df->reg_size; if (size < max_reg_num ()) size = max_reg_num (); df->regs = xrealloc (df->regs, size * sizeof (struct reg_info)); df->reg_def_last = xrealloc (df->reg_def_last, size * sizeof (struct ref *)); /* Zero the new entries. */ memset (df->regs + df->reg_size, 0, (size - df->reg_size) * sizeof (struct reg_info)); df->reg_size = size; } /* Allocate bitmaps for each basic block. */ static void df_bitmaps_alloc (struct df *df, bitmap blocks, int flags) { basic_block bb; df->n_defs = df->def_id; df->n_uses = df->use_id; if (!blocks) blocks = df->all_blocks; FOR_EACH_BB_IN_BITMAP (blocks, 0, bb, { struct bb_df_info *bb_info = DF_BB_INFO (df, bb); if (flags & DF_RD) { if (!bb_info->rd_in) { /* Allocate bitmaps for reaching definitions. */ bb_info->rd_kill = BITMAP_XMALLOC (); bb_info->rd_gen = BITMAP_XMALLOC (); bb_info->rd_in = BITMAP_XMALLOC (); bb_info->rd_out = BITMAP_XMALLOC (); } else { bitmap_clear (bb_info->rd_kill); bitmap_clear (bb_info->rd_gen); bitmap_clear (bb_info->rd_in); bitmap_clear (bb_info->rd_out); } } if (flags & DF_RU) { if (!bb_info->ru_in) { /* Allocate bitmaps for upward exposed uses. */ bb_info->ru_kill = BITMAP_XMALLOC (); bb_info->ru_gen = BITMAP_XMALLOC (); bb_info->ru_in = BITMAP_XMALLOC (); bb_info->ru_out = BITMAP_XMALLOC (); } else { bitmap_clear (bb_info->ru_kill); bitmap_clear (bb_info->ru_gen); bitmap_clear (bb_info->ru_in); bitmap_clear (bb_info->ru_out); } } if (flags & DF_LR) { if (!bb_info->lr_in) { /* Allocate bitmaps for live variables. */ bb_info->lr_def = BITMAP_XMALLOC (); bb_info->lr_use = BITMAP_XMALLOC (); bb_info->lr_in = BITMAP_XMALLOC (); bb_info->lr_out = BITMAP_XMALLOC (); } else { bitmap_clear (bb_info->lr_def); bitmap_clear (bb_info->lr_use); bitmap_clear (bb_info->lr_in); bitmap_clear (bb_info->lr_out); } } }); } /* Free bitmaps for each basic block. */ static void df_bitmaps_free (struct df *df, int flags) { basic_block bb; FOR_EACH_BB (bb) { struct bb_df_info *bb_info = DF_BB_INFO (df, bb); if (!bb_info) continue; if ((flags & DF_RD) && bb_info->rd_in) { /* Free bitmaps for reaching definitions. */ BITMAP_XFREE (bb_info->rd_kill); bb_info->rd_kill = NULL; BITMAP_XFREE (bb_info->rd_gen); bb_info->rd_gen = NULL; BITMAP_XFREE (bb_info->rd_in); bb_info->rd_in = NULL; BITMAP_XFREE (bb_info->rd_out); bb_info->rd_out = NULL; } if ((flags & DF_RU) && bb_info->ru_in) { /* Free bitmaps for upward exposed uses. */ BITMAP_XFREE (bb_info->ru_kill); bb_info->ru_kill = NULL; BITMAP_XFREE (bb_info->ru_gen); bb_info->ru_gen = NULL; BITMAP_XFREE (bb_info->ru_in); bb_info->ru_in = NULL; BITMAP_XFREE (bb_info->ru_out); bb_info->ru_out = NULL; } if ((flags & DF_LR) && bb_info->lr_in) { /* Free bitmaps for live variables. */ BITMAP_XFREE (bb_info->lr_def); bb_info->lr_def = NULL; BITMAP_XFREE (bb_info->lr_use); bb_info->lr_use = NULL; BITMAP_XFREE (bb_info->lr_in); bb_info->lr_in = NULL; BITMAP_XFREE (bb_info->lr_out); bb_info->lr_out = NULL; } } df->flags &= ~(flags & (DF_RD | DF_RU | DF_LR)); } /* Allocate and initialize dataflow memory. */ static void df_alloc (struct df *df, int n_regs) { int n_insns; basic_block bb; df_link_pool = create_alloc_pool ("df_link pool", sizeof (struct df_link), 100); df_ref_pool = create_alloc_pool ("df_ref pool", sizeof (struct ref), 100); /* Perhaps we should use LUIDs to save memory for the insn_refs table. This is only a small saving; a few pointers. */ n_insns = get_max_uid () + 1; df->def_id = 0; df->n_defs = 0; /* Approximate number of defs by number of insns. */ df->def_size = n_insns; df->defs = xmalloc (df->def_size * sizeof (*df->defs)); df->use_id = 0; df->n_uses = 0; /* Approximate number of uses by twice number of insns. */ df->use_size = n_insns * 2; df->uses = xmalloc (df->use_size * sizeof (*df->uses)); df->n_regs = n_regs; df->n_bbs = last_basic_block; /* Allocate temporary working array used during local dataflow analysis. */ df_insn_table_realloc (df, n_insns); df_reg_table_realloc (df, df->n_regs); df->bbs_modified = BITMAP_XMALLOC (); bitmap_zero (df->bbs_modified); df->flags = 0; df->bbs = xcalloc (last_basic_block, sizeof (struct bb_df_info)); df->all_blocks = BITMAP_XMALLOC (); FOR_EACH_BB (bb) bitmap_set_bit (df->all_blocks, bb->index); } /* Free all the dataflow info. */ static void df_free (struct df *df) { df_bitmaps_free (df, DF_ALL); if (df->bbs) free (df->bbs); df->bbs = 0; if (df->insns) free (df->insns); df->insns = 0; df->insn_size = 0; if (df->defs) free (df->defs); df->defs = 0; df->def_size = 0; df->def_id = 0; if (df->uses) free (df->uses); df->uses = 0; df->use_size = 0; df->use_id = 0; if (df->regs) free (df->regs); df->regs = 0; df->reg_size = 0; if (df->bbs_modified) BITMAP_XFREE (df->bbs_modified); df->bbs_modified = 0; if (df->insns_modified) BITMAP_XFREE (df->insns_modified); df->insns_modified = 0; BITMAP_XFREE (df->all_blocks); df->all_blocks = 0; free_alloc_pool (df_ref_pool); free_alloc_pool (df_link_pool); } /* Local miscellaneous routines. */ /* Return a USE for register REGNO. */ static rtx df_reg_use_gen (unsigned int regno) { rtx reg; rtx use; reg = regno_reg_rtx[regno]; use = gen_rtx_USE (GET_MODE (reg), reg); return use; } /* Local chain manipulation routines. */ /* Create a link in a def-use or use-def chain. */ static inline struct df_link * df_link_create (struct ref *ref, struct df_link *next) { struct df_link *link; link = pool_alloc (df_link_pool); link->next = next; link->ref = ref; return link; } /* Releases members of the CHAIN. */ static void free_reg_ref_chain (struct df_link **chain) { struct df_link *act, *next; for (act = *chain; act; act = next) { next = act->next; pool_free (df_link_pool, act); } *chain = NULL; } /* Add REF to chain head pointed to by PHEAD. */ static struct df_link * df_ref_unlink (struct df_link **phead, struct ref *ref) { struct df_link *link = *phead; if (link) { if (! link->next) { /* Only a single ref. It must be the one we want. If not, the def-use and use-def chains are likely to be inconsistent. */ if (link->ref != ref) abort (); /* Now have an empty chain. */ *phead = NULL; } else { /* Multiple refs. One of them must be us. */ if (link->ref == ref) *phead = link->next; else { /* Follow chain. */ for (; link->next; link = link->next) { if (link->next->ref == ref) { /* Unlink from list. */ link->next = link->next->next; return link->next; } } } } } return link; } /* Unlink REF from all def-use/use-def chains, etc. */ int df_ref_remove (struct df *df, struct ref *ref) { if (DF_REF_REG_DEF_P (ref)) { df_def_unlink (df, ref); df_ref_unlink (&df->insns[DF_REF_INSN_UID (ref)].defs, ref); } else { df_use_unlink (df, ref); df_ref_unlink (&df->insns[DF_REF_INSN_UID (ref)].uses, ref); } return 1; } /* Unlink DEF from use-def and reg-def chains. */ static void df_def_unlink (struct df *df ATTRIBUTE_UNUSED, struct ref *def) { struct df_link *du_link; unsigned int dregno = DF_REF_REGNO (def); /* Follow def-use chain to find all the uses of this def. */ for (du_link = DF_REF_CHAIN (def); du_link; du_link = du_link->next) { struct ref *use = du_link->ref; /* Unlink this def from the use-def chain. */ df_ref_unlink (&DF_REF_CHAIN (use), def); } DF_REF_CHAIN (def) = 0; /* Unlink def from reg-def chain. */ df_ref_unlink (&df->regs[dregno].defs, def); df->defs[DF_REF_ID (def)] = 0; } /* Unlink use from def-use and reg-use chains. */ static void df_use_unlink (struct df *df ATTRIBUTE_UNUSED, struct ref *use) { struct df_link *ud_link; unsigned int uregno = DF_REF_REGNO (use); /* Follow use-def chain to find all the defs of this use. */ for (ud_link = DF_REF_CHAIN (use); ud_link; ud_link = ud_link->next) { struct ref *def = ud_link->ref; /* Unlink this use from the def-use chain. */ df_ref_unlink (&DF_REF_CHAIN (def), use); } DF_REF_CHAIN (use) = 0; /* Unlink use from reg-use chain. */ df_ref_unlink (&df->regs[uregno].uses, use); df->uses[DF_REF_ID (use)] = 0; } /* Local routines for recording refs. */ /* Create a new ref of type DF_REF_TYPE for register REG at address LOC within INSN of BB. */ static struct ref * df_ref_create (struct df *df, rtx reg, rtx *loc, rtx insn, enum df_ref_type ref_type, enum df_ref_flags ref_flags) { struct ref *this_ref; this_ref = pool_alloc (df_ref_pool); DF_REF_REG (this_ref) = reg; DF_REF_LOC (this_ref) = loc; DF_REF_INSN (this_ref) = insn; DF_REF_CHAIN (this_ref) = 0; DF_REF_TYPE (this_ref) = ref_type; DF_REF_FLAGS (this_ref) = ref_flags; DF_REF_DATA (this_ref) = NULL; if (ref_type == DF_REF_REG_DEF) { if (df->def_id >= df->def_size) { /* Make table 25 percent larger. */ df->def_size += (df->def_size / 4); df->defs = xrealloc (df->defs, df->def_size * sizeof (*df->defs)); } DF_REF_ID (this_ref) = df->def_id; df->defs[df->def_id++] = this_ref; } else { if (df->use_id >= df->use_size) { /* Make table 25 percent larger. */ df->use_size += (df->use_size / 4); df->uses = xrealloc (df->uses, df->use_size * sizeof (*df->uses)); } DF_REF_ID (this_ref) = df->use_id; df->uses[df->use_id++] = this_ref; } return this_ref; } /* Create a new reference of type DF_REF_TYPE for a single register REG, used inside the LOC rtx of INSN. */ static void df_ref_record_1 (struct df *df, rtx reg, rtx *loc, rtx insn, enum df_ref_type ref_type, enum df_ref_flags ref_flags) { df_ref_create (df, reg, loc, insn, ref_type, ref_flags); } /* Create new references of type DF_REF_TYPE for each part of register REG at address LOC within INSN of BB. */ static void df_ref_record (struct df *df, rtx reg, rtx *loc, rtx insn, enum df_ref_type ref_type, enum df_ref_flags ref_flags) { unsigned int regno; if (!REG_P (reg) && GET_CODE (reg) != SUBREG) abort (); /* For the reg allocator we are interested in some SUBREG rtx's, but not all. Notably only those representing a word extraction from a multi-word reg. As written in the docu those should have the form (subreg:SI (reg:M A) N), with size(SImode) > size(Mmode). XXX Is that true? We could also use the global word_mode variable. */ if (GET_CODE (reg) == SUBREG && (GET_MODE_SIZE (GET_MODE (reg)) < GET_MODE_SIZE (word_mode) || GET_MODE_SIZE (GET_MODE (reg)) >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (reg))))) { loc = &SUBREG_REG (reg); reg = *loc; ref_flags |= DF_REF_STRIPPED; } regno = REGNO (GET_CODE (reg) == SUBREG ? SUBREG_REG (reg) : reg); if (regno < FIRST_PSEUDO_REGISTER) { int i; int endregno; if (! (df->flags & DF_HARD_REGS)) return; /* GET_MODE (reg) is correct here. We do not want to go into a SUBREG for the mode, because we only want to add references to regs, which are really referenced. E.g., a (subreg:SI (reg:DI 0) 0) does _not_ reference the whole reg 0 in DI mode (which would also include reg 1, at least, if 0 and 1 are SImode registers). */ endregno = hard_regno_nregs[regno][GET_MODE (reg)]; if (GET_CODE (reg) == SUBREG) regno += subreg_regno_offset (regno, GET_MODE (SUBREG_REG (reg)), SUBREG_BYTE (reg), GET_MODE (reg)); endregno += regno; for (i = regno; i < endregno; i++) df_ref_record_1 (df, regno_reg_rtx[i], loc, insn, ref_type, ref_flags); } else { df_ref_record_1 (df, reg, loc, insn, ref_type, ref_flags); } } /* Return nonzero if writes to paradoxical SUBREGs, or SUBREGs which are too narrow, are read-modify-write. */ bool read_modify_subreg_p (rtx x) { unsigned int isize, osize; if (GET_CODE (x) != SUBREG) return false; isize = GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))); osize = GET_MODE_SIZE (GET_MODE (x)); /* Paradoxical subreg writes don't leave a trace of the old content. */ return (isize > osize && isize > UNITS_PER_WORD); } /* Process all the registers defined in the rtx, X. */ static void df_def_record_1 (struct df *df, rtx x, basic_block bb, rtx insn) { rtx *loc; rtx dst; enum df_ref_flags flags = 0; /* We may recursively call ourselves on EXPR_LIST when dealing with PARALLEL construct. */ if (GET_CODE (x) == EXPR_LIST || GET_CODE (x) == CLOBBER) loc = &XEXP (x, 0); else loc = &SET_DEST (x); dst = *loc; /* Some targets place small structures in registers for return values of functions. */ if (GET_CODE (dst) == PARALLEL && GET_MODE (dst) == BLKmode) { int i; for (i = XVECLEN (dst, 0) - 1; i >= 0; i--) { rtx temp = XVECEXP (dst, 0, i); if (GET_CODE (temp) == EXPR_LIST || GET_CODE (temp) == CLOBBER || GET_CODE (temp) == SET) df_def_record_1 (df, temp, bb, insn); } return; } /* Maybe, we should flag the use of STRICT_LOW_PART somehow. It might be handy for the reg allocator. */ while (GET_CODE (dst) == STRICT_LOW_PART || GET_CODE (dst) == ZERO_EXTRACT || GET_CODE (dst) == SIGN_EXTRACT || ((df->flags & DF_FOR_REGALLOC) == 0 && read_modify_subreg_p (dst))) { /* Strict low part always contains SUBREG, but we do not want to make it appear outside, as whole register is always considered. */ if (GET_CODE (dst) == STRICT_LOW_PART) { loc = &XEXP (dst, 0); dst = *loc; } loc = &XEXP (dst, 0); dst = *loc; flags |= DF_REF_READ_WRITE; } if (REG_P (dst) || (GET_CODE (dst) == SUBREG && REG_P (SUBREG_REG (dst)))) df_ref_record (df, dst, loc, insn, DF_REF_REG_DEF, flags); } /* Process all the registers defined in the pattern rtx, X. */ static void df_defs_record (struct df *df, rtx x, basic_block bb, rtx insn) { RTX_CODE code = GET_CODE (x); if (code == SET || code == CLOBBER) { /* Mark the single def within the pattern. */ df_def_record_1 (df, x, bb, insn); } else if (code == PARALLEL) { int i; /* Mark the multiple defs within the pattern. */ for (i = XVECLEN (x, 0) - 1; i >= 0; i--) { code = GET_CODE (XVECEXP (x, 0, i)); if (code == SET || code == CLOBBER) df_def_record_1 (df, XVECEXP (x, 0, i), bb, insn); } } } /* Process all the registers used in the rtx at address LOC. */ static void df_uses_record (struct df *df, rtx *loc, enum df_ref_type ref_type, basic_block bb, rtx insn, enum df_ref_flags flags) { RTX_CODE code; rtx x; retry: x = *loc; if (!x) return; code = GET_CODE (x); switch (code) { case LABEL_REF: case SYMBOL_REF: case CONST_INT: case CONST: case CONST_DOUBLE: case CONST_VECTOR: case PC: case CC0: case ADDR_VEC: case ADDR_DIFF_VEC: return; case CLOBBER: /* If we are clobbering a MEM, mark any registers inside the address as being used. */ if (MEM_P (XEXP (x, 0))) df_uses_record (df, &XEXP (XEXP (x, 0), 0), DF_REF_REG_MEM_STORE, bb, insn, flags); /* If we're clobbering a REG then we have a def so ignore. */ return; case MEM: df_uses_record (df, &XEXP (x, 0), DF_REF_REG_MEM_LOAD, bb, insn, 0); return; case SUBREG: /* While we're here, optimize this case. */ /* In case the SUBREG is not of a REG, do not optimize. */ if (!REG_P (SUBREG_REG (x))) { loc = &SUBREG_REG (x); df_uses_record (df, loc, ref_type, bb, insn, flags); return; } /* ... Fall through ... */ case REG: df_ref_record (df, x, loc, insn, ref_type, flags); return; case SET: { rtx dst = SET_DEST (x); df_uses_record (df, &SET_SRC (x), DF_REF_REG_USE, bb, insn, 0); switch (GET_CODE (dst)) { case SUBREG: if ((df->flags & DF_FOR_REGALLOC) == 0 && read_modify_subreg_p (dst)) { df_uses_record (df, &SUBREG_REG (dst), DF_REF_REG_USE, bb, insn, DF_REF_READ_WRITE); break; } /* Fall through. */ case REG: case PARALLEL: case PC: case CC0: break; case MEM: df_uses_record (df, &XEXP (dst, 0), DF_REF_REG_MEM_STORE, bb, insn, 0); break; case STRICT_LOW_PART: /* A strict_low_part uses the whole REG and not just the SUBREG. */ dst = XEXP (dst, 0); if (GET_CODE (dst) != SUBREG) abort (); df_uses_record (df, &SUBREG_REG (dst), DF_REF_REG_USE, bb, insn, DF_REF_READ_WRITE); break; case ZERO_EXTRACT: case SIGN_EXTRACT: df_uses_record (df, &XEXP (dst, 0), DF_REF_REG_USE, bb, insn, DF_REF_READ_WRITE); df_uses_record (df, &XEXP (dst, 1), DF_REF_REG_USE, bb, insn, 0); df_uses_record (df, &XEXP (dst, 2), DF_REF_REG_USE, bb, insn, 0); dst = XEXP (dst, 0); break; default: abort (); } return; } case RETURN: break; case ASM_OPERANDS: case UNSPEC_VOLATILE: case TRAP_IF: case ASM_INPUT: { /* Traditional and volatile asm instructions must be considered to use and clobber all hard registers, all pseudo-registers and all of memory. So must TRAP_IF and UNSPEC_VOLATILE operations. Consider for instance a volatile asm that changes the fpu rounding mode. An insn should not be moved across this even if it only uses pseudo-regs because it might give an incorrectly rounded result. For now, just mark any regs we can find in ASM_OPERANDS as used. */ /* For all ASM_OPERANDS, we must traverse the vector of input operands. We can not just fall through here since then we would be confused by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate traditional asms unlike their normal usage. */ if (code == ASM_OPERANDS) { int j; for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++) df_uses_record (df, &ASM_OPERANDS_INPUT (x, j), DF_REF_REG_USE, bb, insn, 0); return; } break; } case PRE_DEC: case POST_DEC: case PRE_INC: case POST_INC: case PRE_MODIFY: case POST_MODIFY: /* Catch the def of the register being modified. */ df_ref_record (df, XEXP (x, 0), &XEXP (x, 0), insn, DF_REF_REG_DEF, DF_REF_READ_WRITE); /* ... Fall through to handle uses ... */ default: break; } /* Recursively scan the operands of this expression. */ { const char *fmt = GET_RTX_FORMAT (code); int i; for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') { /* Tail recursive case: save a function call level. */ if (i == 0) { loc = &XEXP (x, 0); goto retry; } df_uses_record (df, &XEXP (x, i), ref_type, bb, insn, flags); } else if (fmt[i] == 'E') { int j; for (j = 0; j < XVECLEN (x, i); j++) df_uses_record (df, &XVECEXP (x, i, j), ref_type, bb, insn, flags); } } } } /* Record all the df within INSN of basic block BB. */ static void df_insn_refs_record (struct df *df, basic_block bb, rtx insn) { int i; if (INSN_P (insn)) { rtx note; /* Record register defs. */ df_defs_record (df, PATTERN (insn), bb, insn); if (df->flags & DF_EQUIV_NOTES) for (note = REG_NOTES (insn); note; note = XEXP (note, 1)) { switch (REG_NOTE_KIND (note)) { case REG_EQUIV: case REG_EQUAL: df_uses_record (df, &XEXP (note, 0), DF_REF_REG_USE, bb, insn, 0); default: break; } } if (GET_CODE (insn) == CALL_INSN) { rtx note; rtx x; /* Record the registers used to pass arguments. */ for (note = CALL_INSN_FUNCTION_USAGE (insn); note; note = XEXP (note, 1)) { if (GET_CODE (XEXP (note, 0)) == USE) df_uses_record (df, &XEXP (XEXP (note, 0), 0), DF_REF_REG_USE, bb, insn, 0); } /* The stack ptr is used (honorarily) by a CALL insn. */ x = df_reg_use_gen (STACK_POINTER_REGNUM); df_uses_record (df, &XEXP (x, 0), DF_REF_REG_USE, bb, insn, 0); if (df->flags & DF_HARD_REGS) { /* Calls may also reference any of the global registers, so they are recorded as used. */ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (global_regs[i]) { x = df_reg_use_gen (i); df_uses_record (df, &SET_DEST (x), DF_REF_REG_USE, bb, insn, 0); } } } /* Record the register uses. */ df_uses_record (df, &PATTERN (insn), DF_REF_REG_USE, bb, insn, 0); if (GET_CODE (insn) == CALL_INSN) { rtx note; /* We do not record hard registers clobbered by the call, since there are awfully many of them and "defs" created through them are not interesting (since no use can be legally reached by them). So we must just make sure we include them when computing kill bitmaps. */ /* There may be extra registers to be clobbered. */ for (note = CALL_INSN_FUNCTION_USAGE (insn); note; note = XEXP (note, 1)) if (GET_CODE (XEXP (note, 0)) == CLOBBER) df_defs_record (df, XEXP (note, 0), bb, insn); } } } /* Record all the refs within the basic block BB. */ static void df_bb_refs_record (struct df *df, basic_block bb) { rtx insn; /* Scan the block an insn at a time from beginning to end. */ FOR_BB_INSNS (bb, insn) { if (INSN_P (insn)) { /* Record defs within INSN. */ df_insn_refs_record (df, bb, insn); } } } /* Record all the refs in the basic blocks specified by BLOCKS. */ static void df_refs_record (struct df *df, bitmap blocks) { basic_block bb; FOR_EACH_BB_IN_BITMAP (blocks, 0, bb, { df_bb_refs_record (df, bb); }); } /* Dataflow analysis routines. */ /* Create reg-def chains for basic block BB. These are a list of definitions for each register. */ static void df_bb_reg_def_chain_create (struct df *df, basic_block bb) { rtx insn; /* Perhaps the defs should be sorted using a depth first search of the CFG (or possibly a breadth first search). */ FOR_BB_INSNS_REVERSE (bb, insn) { struct df_link *link; unsigned int uid = INSN_UID (insn); if (! INSN_P (insn)) continue; for (link = df->insns[uid].defs; link; link = link->next) { struct ref *def = link->ref; unsigned int dregno = DF_REF_REGNO (def); /* Do not add ref's to the chain twice, i.e., only add new refs. XXX the same could be done by testing if the current insn is a modified (or a new) one. This would be faster. */ if (DF_REF_ID (def) < df->def_id_save) continue; df->regs[dregno].defs = df_link_create (def, df->regs[dregno].defs); } } } /* Create reg-def chains for each basic block within BLOCKS. These are a list of definitions for each register. If REDO is true, add all defs, otherwise just add the new defs. */ static void df_reg_def_chain_create (struct df *df, bitmap blocks, bool redo) { basic_block bb; #ifdef ENABLE_CHECKING unsigned regno; #endif unsigned old_def_id_save = df->def_id_save; if (redo) { #ifdef ENABLE_CHECKING for (regno = 0; regno < df->n_regs; regno++) if (df->regs[regno].defs) abort (); #endif /* Pretend that all defs are new. */ df->def_id_save = 0; } FOR_EACH_BB_IN_BITMAP (blocks, 0, bb, { df_bb_reg_def_chain_create (df, bb); }); df->def_id_save = old_def_id_save; } /* Remove all reg-def chains stored in the dataflow object DF. */ static void df_reg_def_chain_clean (struct df *df) { unsigned regno; for (regno = 0; regno < df->n_regs; regno++) free_reg_ref_chain (&df->regs[regno].defs); } /* Create reg-use chains for basic block BB. These are a list of uses for each register. */ static void df_bb_reg_use_chain_create (struct df *df, basic_block bb) { rtx insn; /* Scan in forward order so that the last uses appear at the start of the chain. */ FOR_BB_INSNS (bb, insn) { struct df_link *link; unsigned int uid = INSN_UID (insn); if (! INSN_P (insn)) continue; for (link = df->insns[uid].uses; link; link = link->next) { struct ref *use = link->ref; unsigned int uregno = DF_REF_REGNO (use); /* Do not add ref's to the chain twice, i.e., only add new refs. XXX the same could be done by testing if the current insn is a modified (or a new) one. This would be faster. */ if (DF_REF_ID (use) < df->use_id_save) continue; df->regs[uregno].uses = df_link_create (use, df->regs[uregno].uses); } } } /* Create reg-use chains for each basic block within BLOCKS. These are a list of uses for each register. If REDO is true, remove the old reg-use chains first, otherwise just add new uses to them. */ static void df_reg_use_chain_create (struct df *df, bitmap blocks, bool redo) { basic_block bb; #ifdef ENABLE_CHECKING unsigned regno; #endif unsigned old_use_id_save = df->use_id_save; if (redo) { #ifdef ENABLE_CHECKING for (regno = 0; regno < df->n_regs; regno++) if (df->regs[regno].uses) abort (); #endif /* Pretend that all uses are new. */ df->use_id_save = 0; } FOR_EACH_BB_IN_BITMAP (blocks, 0, bb, { df_bb_reg_use_chain_create (df, bb); }); df->use_id_save = old_use_id_save; } /* Remove all reg-use chains stored in the dataflow object DF. */ static void df_reg_use_chain_clean (struct df *df) { unsigned regno; for (regno = 0; regno < df->n_regs; regno++) free_reg_ref_chain (&df->regs[regno].uses); } /* Create def-use chains from reaching use bitmaps for basic block BB. */ static void df_bb_du_chain_create (struct df *df, basic_block bb, bitmap ru) { struct bb_df_info *bb_info = DF_BB_INFO (df, bb); rtx insn; bitmap_copy (ru, bb_info->ru_out); /* For each def in BB create a linked list (chain) of uses reached from the def. */ FOR_BB_INSNS_REVERSE (bb, insn) { struct df_link *def_link; struct df_link *use_link; unsigned int uid = INSN_UID (insn); if (! INSN_P (insn)) continue; /* For each def in insn... */ for (def_link = df->insns[uid].defs; def_link; def_link = def_link->next) { struct ref *def = def_link->ref; unsigned int dregno = DF_REF_REGNO (def); DF_REF_CHAIN (def) = 0; /* While the reg-use chains are not essential, it is _much_ faster to search these short lists rather than all the reaching uses, especially for large functions. */ for (use_link = df->regs[dregno].uses; use_link; use_link = use_link->next) { struct ref *use = use_link->ref; if (bitmap_bit_p (ru, DF_REF_ID (use))) { DF_REF_CHAIN (def) = df_link_create (use, DF_REF_CHAIN (def)); bitmap_clear_bit (ru, DF_REF_ID (use)); } } } /* For each use in insn... */ for (use_link = df->insns[uid].uses; use_link; use_link = use_link->next) { struct ref *use = use_link->ref; bitmap_set_bit (ru, DF_REF_ID (use)); } } } /* Create def-use chains from reaching use bitmaps for basic blocks in BLOCKS. */ static void df_du_chain_create (struct df *df, bitmap blocks) { bitmap ru; basic_block bb; ru = BITMAP_XMALLOC (); FOR_EACH_BB_IN_BITMAP (blocks, 0, bb, { df_bb_du_chain_create (df, bb, ru); }); BITMAP_XFREE (ru); } /* Create use-def chains from reaching def bitmaps for basic block BB. */ static void df_bb_ud_chain_create (struct df *df, basic_block bb) { struct bb_df_info *bb_info = DF_BB_INFO (df, bb); struct ref **reg_def_last = df->reg_def_last; rtx insn; memset (reg_def_last, 0, df->n_regs * sizeof (struct ref *)); /* For each use in BB create a linked list (chain) of defs that reach the use. */ FOR_BB_INSNS (bb, insn) { unsigned int uid = INSN_UID (insn); struct df_link *use_link; struct df_link *def_link; if (! INSN_P (insn)) continue; /* For each use in insn... */ for (use_link = df->insns[uid].uses; use_link; use_link = use_link->next) { struct ref *use = use_link->ref; unsigned int regno = DF_REF_REGNO (use); DF_REF_CHAIN (use) = 0; /* Has regno been defined in this BB yet? If so, use the last def as the single entry for the use-def chain for this use. Otherwise, we need to add all the defs using this regno that reach the start of this BB. */ if (reg_def_last[regno]) { DF_REF_CHAIN (use) = df_link_create (reg_def_last[regno], 0); } else { /* While the reg-def chains are not essential, it is _much_ faster to search these short lists rather than all the reaching defs, especially for large functions. */ for (def_link = df->regs[regno].defs; def_link; def_link = def_link->next) { struct ref *def = def_link->ref; if (bitmap_bit_p (bb_info->rd_in, DF_REF_ID (def))) { DF_REF_CHAIN (use) = df_link_create (def, DF_REF_CHAIN (use)); } } } } /* For each def in insn... record the last def of each reg. */ for (def_link = df->insns[uid].defs; def_link; def_link = def_link->next) { struct ref *def = def_link->ref; int dregno = DF_REF_REGNO (def); reg_def_last[dregno] = def; } } } /* Create use-def chains from reaching def bitmaps for basic blocks within BLOCKS. */ static void df_ud_chain_create (struct df *df, bitmap blocks) { basic_block bb; FOR_EACH_BB_IN_BITMAP (blocks, 0, bb, { df_bb_ud_chain_create (df, bb); }); } static void df_rd_transfer_function (int bb ATTRIBUTE_UNUSED, int *changed, void *in, void *out, void *gen, void *kill, void *data ATTRIBUTE_UNUSED) { *changed = bitmap_union_of_diff (out, gen, in, kill); } static void df_ru_transfer_function (int bb ATTRIBUTE_UNUSED, int *changed, void *in, void *out, void *gen, void *kill, void *data ATTRIBUTE_UNUSED) { *changed = bitmap_union_of_diff (in, gen, out, kill); } static void df_lr_transfer_function (int bb ATTRIBUTE_UNUSED, int *changed, void *in, void *out, void *use, void *def, void *data ATTRIBUTE_UNUSED) { *changed = bitmap_union_of_diff (in, use, out, def); } /* Compute local reaching def info for basic block BB. */ static void df_bb_rd_local_compute (struct df *df, basic_block bb, bitmap call_killed_defs) { struct bb_df_info *bb_info = DF_BB_INFO (df, bb); rtx insn; bitmap seen = BITMAP_XMALLOC (); bool call_seen = false; FOR_BB_INSNS_REVERSE (bb, insn) { unsigned int uid = INSN_UID (insn); struct df_link *def_link; if (! INSN_P (insn)) continue; for (def_link = df->insns[uid].defs; def_link; def_link = def_link->next) { struct ref *def = def_link->ref; unsigned int regno = DF_REF_REGNO (def); struct df_link *def2_link; if (bitmap_bit_p (seen, regno) || (call_seen && regno < FIRST_PSEUDO_REGISTER && TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))) continue; for (def2_link = df->regs[regno].defs; def2_link; def2_link = def2_link->next) { struct ref *def2 = def2_link->ref; /* Add all defs of this reg to the set of kills. This is greedy since many of these defs will not actually be killed by this BB but it keeps things a lot simpler. */ bitmap_set_bit (bb_info->rd_kill, DF_REF_ID (def2)); } bitmap_set_bit (bb_info->rd_gen, DF_REF_ID (def)); bitmap_set_bit (seen, regno); } if (GET_CODE (insn) == CALL_INSN && (df->flags & DF_HARD_REGS)) { bitmap_operation (bb_info->rd_kill, bb_info->rd_kill, call_killed_defs, BITMAP_IOR); call_seen = 1; } } BITMAP_XFREE (seen); } /* Compute local reaching def info for each basic block within BLOCKS. */ static void df_rd_local_compute (struct df *df, bitmap blocks) { basic_block bb; bitmap killed_by_call = NULL; unsigned regno; struct df_link *def_link; if (df->flags & DF_HARD_REGS) { killed_by_call = BITMAP_XMALLOC (); for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) { if (!TEST_HARD_REG_BIT (regs_invalidated_by_call, regno)) continue; for (def_link = df->regs[regno].defs; def_link; def_link = def_link->next) bitmap_set_bit (killed_by_call, DF_REF_ID (def_link->ref)); } } FOR_EACH_BB_IN_BITMAP (blocks, 0, bb, { df_bb_rd_local_compute (df, bb, killed_by_call); }); if (df->flags & DF_HARD_REGS) BITMAP_XFREE (killed_by_call); } /* Compute local reaching use (upward exposed use) info for basic block BB. */ static void df_bb_ru_local_compute (struct df *df, basic_block bb) { /* This is much more tricky than computing reaching defs. With reaching defs, defs get killed by other defs. With upwards exposed uses, these get killed by defs with the same regno. */ struct bb_df_info *bb_info = DF_BB_INFO (df, bb); rtx insn; FOR_BB_INSNS_REVERSE (bb, insn) { unsigned int uid = INSN_UID (insn); struct df_link *def_link; struct df_link *use_link; if (! INSN_P (insn)) continue; for (def_link = df->insns[uid].defs; def_link; def_link = def_link->next) { struct ref *def = def_link->ref; unsigned int dregno = DF_REF_REGNO (def); for (use_link = df->regs[dregno].uses; use_link; use_link = use_link->next) { struct ref *use = use_link->ref; /* Add all uses of this reg to the set of kills. This is greedy since many of these uses will not actually be killed by this BB but it keeps things a lot simpler. */ bitmap_set_bit (bb_info->ru_kill, DF_REF_ID (use)); /* Zap from the set of gens for this BB. */ bitmap_clear_bit (bb_info->ru_gen, DF_REF_ID (use)); } } for (use_link = df->insns[uid].uses; use_link; use_link = use_link->next) { struct ref *use = use_link->ref; /* Add use to set of gens in this BB. */ bitmap_set_bit (bb_info->ru_gen, DF_REF_ID (use)); } } } /* Compute local reaching use (upward exposed use) info for each basic block within BLOCKS. */ static void df_ru_local_compute (struct df *df, bitmap blocks) { basic_block bb; FOR_EACH_BB_IN_BITMAP (blocks, 0, bb, { df_bb_ru_local_compute (df, bb); }); } /* Compute local live variable info for basic block BB. */ static void df_bb_lr_local_compute (struct df *df, basic_block bb) { struct bb_df_info *bb_info = DF_BB_INFO (df, bb); rtx insn; FOR_BB_INSNS_REVERSE (bb, insn) { unsigned int uid = INSN_UID (insn); struct df_link *link; if (! INSN_P (insn)) continue; for (link = df->insns[uid].defs; link; link = link->next) { struct ref *def = link->ref; unsigned int dregno = DF_REF_REGNO (def); /* Add def to set of defs in this BB. */ bitmap_set_bit (bb_info->lr_def, dregno); bitmap_clear_bit (bb_info->lr_use, dregno); } for (link = df->insns[uid].uses; link; link = link->next) { struct ref *use = link->ref; /* Add use to set of uses in this BB. */ bitmap_set_bit (bb_info->lr_use, DF_REF_REGNO (use)); } } } /* Compute local live variable info for each basic block within BLOCKS. */ static void df_lr_local_compute (struct df *df, bitmap blocks) { basic_block bb; FOR_EACH_BB_IN_BITMAP (blocks, 0, bb, { df_bb_lr_local_compute (df, bb); }); } /* Compute register info: lifetime, bb, and number of defs and uses for basic block BB. */ static void df_bb_reg_info_compute (struct df *df, basic_block bb, bitmap live) { struct reg_info *reg_info = df->regs; struct bb_df_info *bb_info = DF_BB_INFO (df, bb); rtx insn; bitmap_copy (live, bb_info->lr_out); FOR_BB_INSNS_REVERSE (bb, insn) { unsigned int uid = INSN_UID (insn); unsigned int regno; struct df_link *link; if (! INSN_P (insn)) continue; for (link = df->insns[uid].defs; link; link = link->next) { struct ref *def = link->ref; unsigned int dregno = DF_REF_REGNO (def); /* Kill this register. */ bitmap_clear_bit (live, dregno); reg_info[dregno].n_defs++; } for (link = df->insns[uid].uses; link; link = link->next) { struct ref *use = link->ref; unsigned int uregno = DF_REF_REGNO (use); /* This register is now live. */ bitmap_set_bit (live, uregno); reg_info[uregno].n_uses++; } /* Increment lifetimes of all live registers. */ EXECUTE_IF_SET_IN_BITMAP (live, 0, regno, { reg_info[regno].lifetime++; }); } } /* Compute register info: lifetime, bb, and number of defs and uses. */ static void df_reg_info_compute (struct df *df, bitmap blocks) { basic_block bb; bitmap live; live = BITMAP_XMALLOC (); FOR_EACH_BB_IN_BITMAP (blocks, 0, bb, { df_bb_reg_info_compute (df, bb, live); }); BITMAP_XFREE (live); } /* Assign LUIDs for BB. */ static int df_bb_luids_set (struct df *df, basic_block bb) { rtx insn; int luid = 0; /* The LUIDs are monotonically increasing for each basic block. */ FOR_BB_INSNS (bb, insn) { if (INSN_P (insn)) DF_INSN_LUID (df, insn) = luid++; DF_INSN_LUID (df, insn) = luid; } return luid; } /* Assign LUIDs for each basic block within BLOCKS. */ static int df_luids_set (struct df *df, bitmap blocks) { basic_block bb; int total = 0; FOR_EACH_BB_IN_BITMAP (blocks, 0, bb, { total += df_bb_luids_set (df, bb); }); return total; } /* Perform dataflow analysis using existing DF structure for blocks within BLOCKS. If BLOCKS is zero, use all basic blocks in the CFG. */ static void df_analyze_1 (struct df *df, bitmap blocks, int flags, int update) { int aflags; int dflags; int i; basic_block bb; struct dataflow dflow; dflags = 0; aflags = flags; if (flags & DF_UD_CHAIN) aflags |= DF_RD | DF_RD_CHAIN; if (flags & DF_DU_CHAIN) aflags |= DF_RU; if (flags & DF_RU) aflags |= DF_RU_CHAIN; if (flags & DF_REG_INFO) aflags |= DF_LR; if (! blocks) blocks = df->all_blocks; df->flags = flags; if (update) { df_refs_update (df, NULL); /* More fine grained incremental dataflow analysis would be nice. For now recompute the whole shebang for the modified blocks. */ #if 0 df_refs_unlink (df, blocks); #endif /* All the def-use, use-def chains can be potentially modified by changes in one block. The size of the bitmaps can also change. */ } else { /* Scan the function for all register defs and uses. */ df_refs_queue (df); df_refs_record (df, blocks); /* Link all the new defs and uses to the insns. */ df_refs_process (df); } /* Allocate the bitmaps now the total number of defs and uses are known. If the number of defs or uses have changed, then these bitmaps need to be reallocated. */ df_bitmaps_alloc (df, NULL, aflags); /* Set the LUIDs for each specified basic block. */ df_luids_set (df, blocks); /* Recreate reg-def and reg-use chains from scratch so that first def is at the head of the reg-def chain and the last use is at the head of the reg-use chain. This is only important for regs local to a basic block as it speeds up searching. */ if (aflags & DF_RD_CHAIN) { df_reg_def_chain_create (df, blocks, false); } if (aflags & DF_RU_CHAIN) { df_reg_use_chain_create (df, blocks, false); } df->dfs_order = xmalloc (sizeof (int) * n_basic_blocks); df->rc_order = xmalloc (sizeof (int) * n_basic_blocks); df->rts_order = xmalloc (sizeof (int) * n_basic_blocks); df->inverse_dfs_map = xmalloc (sizeof (int) * last_basic_block); df->inverse_rc_map = xmalloc (sizeof (int) * last_basic_block); df->inverse_rts_map = xmalloc (sizeof (int) * last_basic_block); flow_depth_first_order_compute (df->dfs_order, df->rc_order); flow_reverse_top_sort_order_compute (df->rts_order); for (i = 0; i < n_basic_blocks; i++) { df->inverse_dfs_map[df->dfs_order[i]] = i; df->inverse_rc_map[df->rc_order[i]] = i; df->inverse_rts_map[df->rts_order[i]] = i; } if (aflags & DF_RD) { /* Compute the sets of gens and kills for the defs of each bb. */ dflow.in = xmalloc (sizeof (bitmap) * last_basic_block); dflow.out = xmalloc (sizeof (bitmap) * last_basic_block); dflow.gen = xmalloc (sizeof (bitmap) * last_basic_block); dflow.kill = xmalloc (sizeof (bitmap) * last_basic_block); df_rd_local_compute (df, df->flags & DF_RD ? blocks : df->all_blocks); FOR_EACH_BB (bb) { dflow.in[bb->index] = DF_BB_INFO (df, bb)->rd_in; dflow.out[bb->index] = DF_BB_INFO (df, bb)->rd_out; dflow.gen[bb->index] = DF_BB_INFO (df, bb)->rd_gen; dflow.kill[bb->index] = DF_BB_INFO (df, bb)->rd_kill; } dflow.repr = SR_BITMAP; dflow.dir = DF_FORWARD; dflow.conf_op = DF_UNION; dflow.transfun = df_rd_transfer_function; dflow.n_blocks = n_basic_blocks; dflow.order = df->rc_order; dflow.data = NULL; iterative_dataflow (&dflow); free (dflow.in); free (dflow.out); free (dflow.gen); free (dflow.kill); } if (aflags & DF_UD_CHAIN) { /* Create use-def chains. */ df_ud_chain_create (df, df->all_blocks); if (! (flags & DF_RD)) dflags |= DF_RD; } if (aflags & DF_RU) { /* Compute the sets of gens and kills for the upwards exposed uses in each bb. */ dflow.in = xmalloc (sizeof (bitmap) * last_basic_block); dflow.out = xmalloc (sizeof (bitmap) * last_basic_block); dflow.gen = xmalloc (sizeof (bitmap) * last_basic_block); dflow.kill = xmalloc (sizeof (bitmap) * last_basic_block); df_ru_local_compute (df, df->flags & DF_RU ? blocks : df->all_blocks); FOR_EACH_BB (bb) { dflow.in[bb->index] = DF_BB_INFO (df, bb)->ru_in; dflow.out[bb->index] = DF_BB_INFO (df, bb)->ru_out; dflow.gen[bb->index] = DF_BB_INFO (df, bb)->ru_gen; dflow.kill[bb->index] = DF_BB_INFO (df, bb)->ru_kill; } dflow.repr = SR_BITMAP; dflow.dir = DF_BACKWARD; dflow.conf_op = DF_UNION; dflow.transfun = df_ru_transfer_function; dflow.n_blocks = n_basic_blocks; dflow.order = df->rts_order; dflow.data = NULL; iterative_dataflow (&dflow); free (dflow.in); free (dflow.out); free (dflow.gen); free (dflow.kill); } if (aflags & DF_DU_CHAIN) { /* Create def-use chains. */ df_du_chain_create (df, df->all_blocks); if (! (flags & DF_RU)) dflags |= DF_RU; } /* Free up bitmaps that are no longer required. */ if (dflags) df_bitmaps_free (df, dflags); if (aflags & DF_LR) { /* Compute the sets of defs and uses of live variables. */ dflow.in = xmalloc (sizeof (bitmap) * last_basic_block); dflow.out = xmalloc (sizeof (bitmap) * last_basic_block); dflow.gen = xmalloc (sizeof (bitmap) * last_basic_block); dflow.kill = xmalloc (sizeof (bitmap) * last_basic_block); df_lr_local_compute (df, df->flags & DF_LR ? blocks : df->all_blocks); FOR_EACH_BB (bb) { dflow.in[bb->index] = DF_BB_INFO (df, bb)->lr_in; dflow.out[bb->index] = DF_BB_INFO (df, bb)->lr_out; dflow.gen[bb->index] = DF_BB_INFO (df, bb)->lr_use; dflow.kill[bb->index] = DF_BB_INFO (df, bb)->lr_def; } dflow.repr = SR_BITMAP; dflow.dir = DF_BACKWARD; dflow.conf_op = DF_UNION; dflow.transfun = df_lr_transfer_function; dflow.n_blocks = n_basic_blocks; dflow.order = df->rts_order; dflow.data = NULL; iterative_dataflow (&dflow); free (dflow.in); free (dflow.out); free (dflow.gen); free (dflow.kill); } if (aflags & DF_REG_INFO) { df_reg_info_compute (df, df->all_blocks); } free (df->dfs_order); free (df->rc_order); free (df->rts_order); free (df->inverse_rc_map); free (df->inverse_dfs_map); free (df->inverse_rts_map); } /* Initialize dataflow analysis. */ struct df * df_init (void) { struct df *df; df = xcalloc (1, sizeof (struct df)); /* Squirrel away a global for debugging. */ ddf = df; return df; } /* Start queuing refs. */ static int df_refs_queue (struct df *df) { df->def_id_save = df->def_id; df->use_id_save = df->use_id; /* ???? Perhaps we should save current obstack state so that we can unwind it. */ return 0; } /* Process queued refs. */ static int df_refs_process (struct df *df) { unsigned int i; /* Build new insn-def chains. */ for (i = df->def_id_save; i != df->def_id; i++) { struct ref *def = df->defs[i]; unsigned int uid = DF_REF_INSN_UID (def); /* Add def to head of def list for INSN. */ df->insns[uid].defs = df_link_create (def, df->insns[uid].defs); } /* Build new insn-use chains. */ for (i = df->use_id_save; i != df->use_id; i++) { struct ref *use = df->uses[i]; unsigned int uid = DF_REF_INSN_UID (use); /* Add use to head of use list for INSN. */ df->insns[uid].uses = df_link_create (use, df->insns[uid].uses); } return 0; } /* Update refs for basic block BB. */ static int df_bb_refs_update (struct df *df, basic_block bb) { rtx insn; int count = 0; /* While we have to scan the chain of insns for this BB, we do not need to allocate and queue a long chain of BB/INSN pairs. Using a bitmap for insns_modified saves memory and avoids queuing duplicates. */ FOR_BB_INSNS (bb, insn) { unsigned int uid; uid = INSN_UID (insn); if (bitmap_bit_p (df->insns_modified, uid)) { /* Delete any allocated refs of this insn. MPH, FIXME. */ df_insn_refs_unlink (df, bb, insn); /* Scan the insn for refs. */ df_insn_refs_record (df, bb, insn); count++; } } return count; } /* Process all the modified/deleted insns that were queued. */ static int df_refs_update (struct df *df, bitmap blocks) { basic_block bb; int count = 0, bbno; df->n_regs = max_reg_num (); if (df->n_regs >= df->reg_size) df_reg_table_realloc (df, 0); df_refs_queue (df); if (!blocks) { FOR_EACH_BB_IN_BITMAP (df->bbs_modified, 0, bb, { count += df_bb_refs_update (df, bb); }); } else { EXECUTE_IF_AND_IN_BITMAP (df->bbs_modified, blocks, 0, bbno, { count += df_bb_refs_update (df, BASIC_BLOCK (bbno)); }); } df_refs_process (df); return count; } /* Return nonzero if any of the requested blocks in the bitmap BLOCKS have been modified. */ static int df_modified_p (struct df *df, bitmap blocks) { int update = 0; basic_block bb; if (!df->n_bbs) return 0; FOR_EACH_BB (bb) if (bitmap_bit_p (df->bbs_modified, bb->index) && (! blocks || (blocks == (bitmap) -1) || bitmap_bit_p (blocks, bb->index))) { update = 1; break; } return update; } /* Analyze dataflow info for the basic blocks specified by the bitmap BLOCKS, or for the whole CFG if BLOCKS is zero, or just for the modified blocks if BLOCKS is -1. */ int df_analyze (struct df *df, bitmap blocks, int flags) { int update; /* We could deal with additional basic blocks being created by rescanning everything again. */ if (df->n_bbs && df->n_bbs != (unsigned int) last_basic_block) abort (); update = df_modified_p (df, blocks); if (update || (flags != df->flags)) { if (! blocks) { if (df->n_bbs) { /* Recompute everything from scratch. */ df_free (df); } /* Allocate and initialize data structures. */ df_alloc (df, max_reg_num ()); df_analyze_1 (df, 0, flags, 0); update = 1; } else { if (blocks == (bitmap) -1) blocks = df->bbs_modified; if (! df->n_bbs) abort (); df_analyze_1 (df, blocks, flags, 1); bitmap_zero (df->bbs_modified); bitmap_zero (df->insns_modified); } } return update; } /* Remove the entries not in BLOCKS from the LIST of length LEN, preserving the order of the remaining entries. Returns the length of the resulting list. */ static unsigned prune_to_subcfg (int list[], unsigned len, bitmap blocks) { unsigned act, last; for (act = 0, last = 0; act < len; act++) if (bitmap_bit_p (blocks, list[act])) list[last++] = list[act]; return last; } /* Alternative entry point to the analysis. Analyze just the part of the cfg graph induced by BLOCKS. TODO I am not quite sure how to avoid code duplication with df_analyze_1 here, and simultaneously not make even greater chaos in it. We behave slightly differently in some details, especially in handling modified insns. */ void df_analyze_subcfg (struct df *df, bitmap blocks, int flags) { rtx insn; basic_block bb; struct dataflow dflow; unsigned n_blocks; if (flags & DF_UD_CHAIN) flags |= DF_RD | DF_RD_CHAIN; if (flags & DF_DU_CHAIN) flags |= DF_RU; if (flags & DF_RU) flags |= DF_RU_CHAIN; if (flags & DF_REG_INFO) flags |= DF_LR; if (!df->n_bbs) { df_alloc (df, max_reg_num ()); /* Mark all insns as modified. */ FOR_EACH_BB (bb) { FOR_BB_INSNS (bb, insn) { df_insn_modify (df, bb, insn); } } } df->flags = flags; df_reg_def_chain_clean (df); df_reg_use_chain_clean (df); df_refs_update (df, blocks); /* Clear the updated stuff from ``modified'' bitmaps. */ FOR_EACH_BB_IN_BITMAP (blocks, 0, bb, { if (bitmap_bit_p (df->bbs_modified, bb->index)) { FOR_BB_INSNS (bb, insn) { bitmap_clear_bit (df->insns_modified, INSN_UID (insn)); } bitmap_clear_bit (df->bbs_modified, bb->index); } }); /* Allocate the bitmaps now the total number of defs and uses are known. If the number of defs or uses have changed, then these bitmaps need to be reallocated. */ df_bitmaps_alloc (df, blocks, flags); /* Set the LUIDs for each specified basic block. */ df_luids_set (df, blocks); /* Recreate reg-def and reg-use chains from scratch so that first def is at the head of the reg-def chain and the last use is at the head of the reg-use chain. This is only important for regs local to a basic block as it speeds up searching. */ if (flags & DF_RD_CHAIN) { df_reg_def_chain_create (df, blocks, true); } if (flags & DF_RU_CHAIN) { df_reg_use_chain_create (df, blocks, true); } df->dfs_order = xmalloc (sizeof (int) * n_basic_blocks); df->rc_order = xmalloc (sizeof (int) * n_basic_blocks); df->rts_order = xmalloc (sizeof (int) * n_basic_blocks); flow_depth_first_order_compute (df->dfs_order, df->rc_order); flow_reverse_top_sort_order_compute (df->rts_order); n_blocks = prune_to_subcfg (df->dfs_order, n_basic_blocks, blocks); prune_to_subcfg (df->rc_order, n_basic_blocks, blocks); prune_to_subcfg (df->rts_order, n_basic_blocks, blocks); dflow.in = xmalloc (sizeof (bitmap) * last_basic_block); dflow.out = xmalloc (sizeof (bitmap) * last_basic_block); dflow.gen = xmalloc (sizeof (bitmap) * last_basic_block); dflow.kill = xmalloc (sizeof (bitmap) * last_basic_block); if (flags & DF_RD) { /* Compute the sets of gens and kills for the defs of each bb. */ df_rd_local_compute (df, blocks); FOR_EACH_BB_IN_BITMAP (blocks, 0, bb, { dflow.in[bb->index] = DF_BB_INFO (df, bb)->rd_in; dflow.out[bb->index] = DF_BB_INFO (df, bb)->rd_out; dflow.gen[bb->index] = DF_BB_INFO (df, bb)->rd_gen; dflow.kill[bb->index] = DF_BB_INFO (df, bb)->rd_kill; }); dflow.repr = SR_BITMAP; dflow.dir = DF_FORWARD; dflow.conf_op = DF_UNION; dflow.transfun = df_rd_transfer_function; dflow.n_blocks = n_blocks; dflow.order = df->rc_order; dflow.data = NULL; iterative_dataflow (&dflow); } if (flags & DF_UD_CHAIN) { /* Create use-def chains. */ df_ud_chain_create (df, blocks); } if (flags & DF_RU) { /* Compute the sets of gens and kills for the upwards exposed uses in each bb. */ df_ru_local_compute (df, blocks); FOR_EACH_BB_IN_BITMAP (blocks, 0, bb, { dflow.in[bb->index] = DF_BB_INFO (df, bb)->ru_in; dflow.out[bb->index] = DF_BB_INFO (df, bb)->ru_out; dflow.gen[bb->index] = DF_BB_INFO (df, bb)->ru_gen; dflow.kill[bb->index] = DF_BB_INFO (df, bb)->ru_kill; }); dflow.repr = SR_BITMAP; dflow.dir = DF_BACKWARD; dflow.conf_op = DF_UNION; dflow.transfun = df_ru_transfer_function; dflow.n_blocks = n_blocks; dflow.order = df->rts_order; dflow.data = NULL; iterative_dataflow (&dflow); } if (flags & DF_DU_CHAIN) { /* Create def-use chains. */ df_du_chain_create (df, blocks); } if (flags & DF_LR) { /* Compute the sets of defs and uses of live variables. */ df_lr_local_compute (df, blocks); FOR_EACH_BB (bb) { dflow.in[bb->index] = DF_BB_INFO (df, bb)->lr_in; dflow.out[bb->index] = DF_BB_INFO (df, bb)->lr_out; dflow.gen[bb->index] = DF_BB_INFO (df, bb)->lr_use; dflow.kill[bb->index] = DF_BB_INFO (df, bb)->lr_def; } dflow.repr = SR_BITMAP; dflow.dir = DF_BACKWARD; dflow.conf_op = DF_UNION; dflow.transfun = df_lr_transfer_function; dflow.n_blocks = n_blocks; dflow.order = df->rts_order; dflow.data = NULL; iterative_dataflow (&dflow); } if (flags & DF_REG_INFO) { df_reg_info_compute (df, blocks); } free (dflow.in); free (dflow.out); free (dflow.gen); free (dflow.kill); free (df->dfs_order); free (df->rc_order); free (df->rts_order); } /* Free all the dataflow info and the DF structure. */ void df_finish (struct df *df) { df_free (df); free (df); } /* Unlink INSN from its reference information. */ static void df_insn_refs_unlink (struct df *df, basic_block bb ATTRIBUTE_UNUSED, rtx insn) { struct df_link *link; unsigned int uid; uid = INSN_UID (insn); /* Unlink all refs defined by this insn. */ for (link = df->insns[uid].defs; link; link = link->next) df_def_unlink (df, link->ref); /* Unlink all refs used by this insn. */ for (link = df->insns[uid].uses; link; link = link->next) df_use_unlink (df, link->ref); df->insns[uid].defs = 0; df->insns[uid].uses = 0; } #if 0 /* Unlink all the insns within BB from their reference information. */ static void df_bb_refs_unlink (struct df *df, basic_block bb) { rtx insn; /* Scan the block an insn at a time from beginning to end. */ for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn)) { if (INSN_P (insn)) { /* Unlink refs for INSN. */ df_insn_refs_unlink (df, bb, insn); } if (insn == BB_END (bb)) break; } } /* Unlink all the refs in the basic blocks specified by BLOCKS. Not currently used. */ static void df_refs_unlink (struct df *df, bitmap blocks) { basic_block bb; if (blocks) { FOR_EACH_BB_IN_BITMAP (blocks, 0, bb, { df_bb_refs_unlink (df, bb); }); } else { FOR_EACH_BB (bb) df_bb_refs_unlink (df, bb); } } #endif /* Functions to modify insns. */ /* Delete INSN and all its reference information. */ rtx df_insn_delete (struct df *df, basic_block bb ATTRIBUTE_UNUSED, rtx insn) { /* If the insn is a jump, we should perhaps call delete_insn to handle the JUMP_LABEL? */ /* We should not be deleting the NOTE_INSN_BASIC_BLOCK or label. */ if (insn == BB_HEAD (bb)) abort (); /* Delete the insn. */ delete_insn (insn); df_insn_modify (df, bb, insn); return NEXT_INSN (insn); } /* Mark that basic block BB was modified. */ static void df_bb_modify (struct df *df, basic_block bb) { if ((unsigned) bb->index >= df->n_bbs) df_bb_table_realloc (df, df->n_bbs); bitmap_set_bit (df->bbs_modified, bb->index); } /* Mark that INSN within BB may have changed (created/modified/deleted). This may be called multiple times for the same insn. There is no harm calling this function if the insn wasn't changed; it will just slow down the rescanning of refs. */ void df_insn_modify (struct df *df, basic_block bb, rtx insn) { unsigned int uid; uid = INSN_UID (insn); if (uid >= df->insn_size) df_insn_table_realloc (df, uid); df_bb_modify (df, bb); bitmap_set_bit (df->insns_modified, uid); /* For incremental updating on the fly, perhaps we could make a copy of all the refs of the original insn and turn them into anti-refs. When df_refs_update finds these anti-refs, it annihilates the original refs. If validate_change fails then these anti-refs will just get ignored. */ } typedef struct replace_args { rtx match; rtx replacement; rtx insn; int modified; } replace_args; /* Replace mem pointed to by PX with its associated pseudo register. DATA is actually a pointer to a structure describing the instruction currently being scanned and the MEM we are currently replacing. */ static int df_rtx_mem_replace (rtx *px, void *data) { replace_args *args = (replace_args *) data; rtx mem = *px; if (mem == NULL_RTX) return 0; switch (GET_CODE (mem)) { case MEM: break; case CONST_DOUBLE: /* We're not interested in the MEM associated with a CONST_DOUBLE, so there's no need to traverse into one. */ return -1; default: /* This is not a MEM. */ return 0; } if (!rtx_equal_p (args->match, mem)) /* This is not the MEM we are currently replacing. */ return 0; /* Actually replace the MEM. */ validate_change (args->insn, px, args->replacement, 1); args->modified++; return 0; } int df_insn_mem_replace (struct df *df, basic_block bb, rtx insn, rtx mem, rtx reg) { replace_args args; args.insn = insn; args.match = mem; args.replacement = reg; args.modified = 0; /* Search and replace all matching mems within insn. */ for_each_rtx (&insn, df_rtx_mem_replace, &args); if (args.modified) df_insn_modify (df, bb, insn); /* ???? FIXME. We may have a new def or one or more new uses of REG in INSN. REG should be a new pseudo so it won't affect the dataflow information that we currently have. We should add the new uses and defs to INSN and then recreate the chains when df_analyze is called. */ return args.modified; } /* Replace one register with another. Called through for_each_rtx; PX points to the rtx being scanned. DATA is actually a pointer to a structure of arguments. */ static int df_rtx_reg_replace (rtx *px, void *data) { rtx x = *px; replace_args *args = (replace_args *) data; if (x == NULL_RTX) return 0; if (x == args->match) { validate_change (args->insn, px, args->replacement, 1); args->modified++; } return 0; } /* Replace the reg within every ref on CHAIN that is within the set BLOCKS of basic blocks with NEWREG. Also update the regs within REG_NOTES. */ void df_refs_reg_replace (struct df *df, bitmap blocks, struct df_link *chain, rtx oldreg, rtx newreg) { struct df_link *link; replace_args args; if (! blocks) blocks = df->all_blocks; args.match = oldreg; args.replacement = newreg; args.modified = 0; for (link = chain; link; link = link->next) { struct ref *ref = link->ref; rtx insn = DF_REF_INSN (ref); if (! INSN_P (insn)) continue; if (bitmap_bit_p (blocks, DF_REF_BBNO (ref))) { df_ref_reg_replace (df, ref, oldreg, newreg); /* Replace occurrences of the reg within the REG_NOTES. */ if ((! link->next || DF_REF_INSN (ref) != DF_REF_INSN (link->next->ref)) && REG_NOTES (insn)) { args.insn = insn; for_each_rtx (®_NOTES (insn), df_rtx_reg_replace, &args); } } else { /* Temporary check to ensure that we have a grip on which regs should be replaced. */ abort (); } } } /* Replace all occurrences of register OLDREG with register NEWREG in blocks defined by bitmap BLOCKS. This also replaces occurrences of OLDREG in the REG_NOTES but only for insns containing OLDREG. This routine expects the reg-use and reg-def chains to be valid. */ int df_reg_replace (struct df *df, bitmap blocks, rtx oldreg, rtx newreg) { unsigned int oldregno = REGNO (oldreg); df_refs_reg_replace (df, blocks, df->regs[oldregno].defs, oldreg, newreg); df_refs_reg_replace (df, blocks, df->regs[oldregno].uses, oldreg, newreg); return 1; } /* Try replacing the reg within REF with NEWREG. Do not modify def-use/use-def chains. */ int df_ref_reg_replace (struct df *df, struct ref *ref, rtx oldreg, rtx newreg) { /* Check that insn was deleted by being converted into a NOTE. If so ignore this insn. */ if (! INSN_P (DF_REF_INSN (ref))) return 0; if (oldreg && oldreg != DF_REF_REG (ref)) abort (); if (! validate_change (DF_REF_INSN (ref), DF_REF_LOC (ref), newreg, 1)) return 0; df_insn_modify (df, DF_REF_BB (ref), DF_REF_INSN (ref)); return 1; } struct ref* df_bb_def_use_swap (struct df *df, basic_block bb, rtx def_insn, rtx use_insn, unsigned int regno) { struct ref *def; struct ref *use; int def_uid; int use_uid; struct df_link *link; def = df_bb_insn_regno_first_def_find (df, bb, def_insn, regno); if (! def) return 0; use = df_bb_insn_regno_last_use_find (df, bb, use_insn, regno); if (! use) return 0; /* The USE no longer exists. */ use_uid = INSN_UID (use_insn); df_use_unlink (df, use); df_ref_unlink (&df->insns[use_uid].uses, use); /* The DEF requires shifting so remove it from DEF_INSN and add it to USE_INSN by reusing LINK. */ def_uid = INSN_UID (def_insn); link = df_ref_unlink (&df->insns[def_uid].defs, def); link->ref = def; link->next = df->insns[use_uid].defs; df->insns[use_uid].defs = link; #if 0 link = df_ref_unlink (&df->regs[regno].defs, def); link->ref = def; link->next = df->regs[regno].defs; df->insns[regno].defs = link; #endif DF_REF_INSN (def) = use_insn; return def; } /* Record df between FIRST_INSN and LAST_INSN inclusive. All new insns must be processed by this routine. */ static void df_insns_modify (struct df *df, basic_block bb, rtx first_insn, rtx last_insn) { rtx insn; for (insn = first_insn; ; insn = NEXT_INSN (insn)) { unsigned int uid; /* A non-const call should not have slipped through the net. If it does, we need to create a new basic block. Ouch. The same applies for a label. */ if ((GET_CODE (insn) == CALL_INSN && ! CONST_OR_PURE_CALL_P (insn)) || GET_CODE (insn) == CODE_LABEL) abort (); uid = INSN_UID (insn); if (uid >= df->insn_size) df_insn_table_realloc (df, uid); df_insn_modify (df, bb, insn); if (insn == last_insn) break; } } /* Emit PATTERN before INSN within BB. */ rtx df_pattern_emit_before (struct df *df, rtx pattern, basic_block bb, rtx insn) { rtx ret_insn; rtx prev_insn = PREV_INSN (insn); /* We should not be inserting before the start of the block. */ if (insn == BB_HEAD (bb)) abort (); ret_insn = emit_insn_before (pattern, insn); if (ret_insn == insn) return ret_insn; df_insns_modify (df, bb, NEXT_INSN (prev_insn), ret_insn); return ret_insn; } /* Emit PATTERN after INSN within BB. */ rtx df_pattern_emit_after (struct df *df, rtx pattern, basic_block bb, rtx insn) { rtx ret_insn; ret_insn = emit_insn_after (pattern, insn); if (ret_insn == insn) return ret_insn; df_insns_modify (df, bb, NEXT_INSN (insn), ret_insn); return ret_insn; } /* Emit jump PATTERN after INSN within BB. */ rtx df_jump_pattern_emit_after (struct df *df, rtx pattern, basic_block bb, rtx insn) { rtx ret_insn; ret_insn = emit_jump_insn_after (pattern, insn); if (ret_insn == insn) return ret_insn; df_insns_modify (df, bb, NEXT_INSN (insn), ret_insn); return ret_insn; } /* Move INSN within BB before BEFORE_INSN within BEFORE_BB. This function should only be used to move loop invariant insns out of a loop where it has been proven that the def-use info will still be valid. */ rtx df_insn_move_before (struct df *df, basic_block bb, rtx insn, basic_block before_bb, rtx before_insn) { struct df_link *link; unsigned int uid; if (! bb) return df_pattern_emit_before (df, insn, before_bb, before_insn); uid = INSN_UID (insn); /* Change bb for all df defined and used by this insn. */ for (link = df->insns[uid].defs; link; link = link->next) DF_REF_BB (link->ref) = before_bb; for (link = df->insns[uid].uses; link; link = link->next) DF_REF_BB (link->ref) = before_bb; /* The lifetimes of the registers used in this insn will be reduced while the lifetimes of the registers defined in this insn are likely to be increased. */ /* ???? Perhaps all the insns moved should be stored on a list which df_analyze removes when it recalculates data flow. */ return emit_insn_before (insn, before_insn); } /* Functions to query dataflow information. */ int df_insn_regno_def_p (struct df *df, basic_block bb ATTRIBUTE_UNUSED, rtx insn, unsigned int regno) { unsigned int uid; struct df_link *link; uid = INSN_UID (insn); for (link = df->insns[uid].defs; link; link = link->next) { struct ref *def = link->ref; if (DF_REF_REGNO (def) == regno) return 1; } return 0; } /* Finds the reference corresponding to the definition of REG in INSN. DF is the dataflow object. */ struct ref * df_find_def (struct df *df, rtx insn, rtx reg) { struct df_link *defs; for (defs = DF_INSN_DEFS (df, insn); defs; defs = defs->next) if (rtx_equal_p (DF_REF_REG (defs->ref), reg)) return defs->ref; return NULL; } /* Return 1 if REG is referenced in INSN, zero otherwise. */ int df_reg_used (struct df *df, rtx insn, rtx reg) { struct df_link *uses; for (uses = DF_INSN_USES (df, insn); uses; uses = uses->next) if (rtx_equal_p (DF_REF_REG (uses->ref), reg)) return 1; return 0; } static int df_def_dominates_all_uses_p (struct df *df ATTRIBUTE_UNUSED, struct ref *def) { struct df_link *du_link; /* Follow def-use chain to find all the uses of this def. */ for (du_link = DF_REF_CHAIN (def); du_link; du_link = du_link->next) { struct ref *use = du_link->ref; struct df_link *ud_link; /* Follow use-def chain to check all the defs for this use. */ for (ud_link = DF_REF_CHAIN (use); ud_link; ud_link = ud_link->next) if (ud_link->ref != def) return 0; } return 1; } int df_insn_dominates_all_uses_p (struct df *df, basic_block bb ATTRIBUTE_UNUSED, rtx insn) { unsigned int uid; struct df_link *link; uid = INSN_UID (insn); for (link = df->insns[uid].defs; link; link = link->next) { struct ref *def = link->ref; if (! df_def_dominates_all_uses_p (df, def)) return 0; } return 1; } /* Return nonzero if all DF dominates all the uses within the bitmap BLOCKS. */ static int df_def_dominates_uses_p (struct df *df ATTRIBUTE_UNUSED, struct ref *def, bitmap blocks) { struct df_link *du_link; /* Follow def-use chain to find all the uses of this def. */ for (du_link = DF_REF_CHAIN (def); du_link; du_link = du_link->next) { struct ref *use = du_link->ref; struct df_link *ud_link; /* Only worry about the uses within BLOCKS. For example, consider a register defined within a loop that is live at the loop exits. */ if (bitmap_bit_p (blocks, DF_REF_BBNO (use))) { /* Follow use-def chain to check all the defs for this use. */ for (ud_link = DF_REF_CHAIN (use); ud_link; ud_link = ud_link->next) if (ud_link->ref != def) return 0; } } return 1; } /* Return nonzero if all the defs of INSN within BB dominates all the corresponding uses. */ int df_insn_dominates_uses_p (struct df *df, basic_block bb ATTRIBUTE_UNUSED, rtx insn, bitmap blocks) { unsigned int uid; struct df_link *link; uid = INSN_UID (insn); for (link = df->insns[uid].defs; link; link = link->next) { struct ref *def = link->ref; /* Only consider the defs within BLOCKS. */ if (bitmap_bit_p (blocks, DF_REF_BBNO (def)) && ! df_def_dominates_uses_p (df, def, blocks)) return 0; } return 1; } /* Return the basic block that REG referenced in or NULL if referenced in multiple basic blocks. */ basic_block df_regno_bb (struct df *df, unsigned int regno) { struct df_link *defs = df->regs[regno].defs; struct df_link *uses = df->regs[regno].uses; struct ref *def = defs ? defs->ref : 0; struct ref *use = uses ? uses->ref : 0; basic_block bb_def = def ? DF_REF_BB (def) : 0; basic_block bb_use = use ? DF_REF_BB (use) : 0; /* Compare blocks of first def and last use. ???? FIXME. What if the reg-def and reg-use lists are not correctly ordered. */ return bb_def == bb_use ? bb_def : 0; } /* Return nonzero if REG used in multiple basic blocks. */ int df_reg_global_p (struct df *df, rtx reg) { return df_regno_bb (df, REGNO (reg)) != 0; } /* Return total lifetime (in insns) of REG. */ int df_reg_lifetime (struct df *df, rtx reg) { return df->regs[REGNO (reg)].lifetime; } /* Return nonzero if REG live at start of BB. */ int df_bb_reg_live_start_p (struct df *df, basic_block bb, rtx reg) { struct bb_df_info *bb_info = DF_BB_INFO (df, bb); #ifdef ENABLE_CHECKING if (! bb_info->lr_in) abort (); #endif return bitmap_bit_p (bb_info->lr_in, REGNO (reg)); } /* Return nonzero if REG live at end of BB. */ int df_bb_reg_live_end_p (struct df *df, basic_block bb, rtx reg) { struct bb_df_info *bb_info = DF_BB_INFO (df, bb); #ifdef ENABLE_CHECKING if (! bb_info->lr_in) abort (); #endif return bitmap_bit_p (bb_info->lr_out, REGNO (reg)); } /* Return -1 if life of REG1 before life of REG2, 1 if life of REG1 after life of REG2, or 0, if the lives overlap. */ int df_bb_regs_lives_compare (struct df *df, basic_block bb, rtx reg1, rtx reg2) { unsigned int regno1 = REGNO (reg1); unsigned int regno2 = REGNO (reg2); struct ref *def1; struct ref *use1; struct ref *def2; struct ref *use2; /* The regs must be local to BB. */ if (df_regno_bb (df, regno1) != bb || df_regno_bb (df, regno2) != bb) abort (); def2 = df_bb_regno_first_def_find (df, bb, regno2); use1 = df_bb_regno_last_use_find (df, bb, regno1); if (DF_INSN_LUID (df, DF_REF_INSN (def2)) > DF_INSN_LUID (df, DF_REF_INSN (use1))) return -1; def1 = df_bb_regno_first_def_find (df, bb, regno1); use2 = df_bb_regno_last_use_find (df, bb, regno2); if (DF_INSN_LUID (df, DF_REF_INSN (def1)) > DF_INSN_LUID (df, DF_REF_INSN (use2))) return 1; return 0; } /* Return last use of REGNO within BB. */ struct ref * df_bb_regno_last_use_find (struct df *df, basic_block bb, unsigned int regno) { struct df_link *link; /* This assumes that the reg-use list is ordered such that for any BB, the last use is found first. However, since the BBs are not ordered, the first use in the chain is not necessarily the last use in the function. */ for (link = df->regs[regno].uses; link; link = link->next) { struct ref *use = link->ref; if (DF_REF_BB (use) == bb) return use; } return 0; } /* Return first def of REGNO within BB. */ struct ref * df_bb_regno_first_def_find (struct df *df, basic_block bb, unsigned int regno) { struct df_link *link; /* This assumes that the reg-def list is ordered such that for any BB, the first def is found first. However, since the BBs are not ordered, the first def in the chain is not necessarily the first def in the function. */ for (link = df->regs[regno].defs; link; link = link->next) { struct ref *def = link->ref; if (DF_REF_BB (def) == bb) return def; } return 0; } /* Return last def of REGNO within BB. */ struct ref * df_bb_regno_last_def_find (struct df *df, basic_block bb, unsigned int regno) { struct df_link *link; struct ref *last_def = NULL; int in_bb = 0; /* This assumes that the reg-def list is ordered such that for any BB, the first def is found first. However, since the BBs are not ordered, the first def in the chain is not necessarily the first def in the function. */ for (link = df->regs[regno].defs; link; link = link->next) { struct ref *def = link->ref; /* The first time in the desired block. */ if (DF_REF_BB (def) == bb) in_bb = 1; /* The last def in the desired block. */ else if (in_bb) return last_def; last_def = def; } return last_def; } /* Return first use of REGNO inside INSN within BB. */ static struct ref * df_bb_insn_regno_last_use_find (struct df *df, basic_block bb ATTRIBUTE_UNUSED, rtx insn, unsigned int regno) { unsigned int uid; struct df_link *link; uid = INSN_UID (insn); for (link = df->insns[uid].uses; link; link = link->next) { struct ref *use = link->ref; if (DF_REF_REGNO (use) == regno) return use; } return 0; } /* Return first def of REGNO inside INSN within BB. */ static struct ref * df_bb_insn_regno_first_def_find (struct df *df, basic_block bb ATTRIBUTE_UNUSED, rtx insn, unsigned int regno) { unsigned int uid; struct df_link *link; uid = INSN_UID (insn); for (link = df->insns[uid].defs; link; link = link->next) { struct ref *def = link->ref; if (DF_REF_REGNO (def) == regno) return def; } return 0; } /* Return insn using REG if the BB contains only a single use and def of REG. */ rtx df_bb_single_def_use_insn_find (struct df *df, basic_block bb, rtx insn, rtx reg) { struct ref *def; struct ref *use; struct df_link *du_link; def = df_bb_insn_regno_first_def_find (df, bb, insn, REGNO (reg)); if (! def) abort (); du_link = DF_REF_CHAIN (def); if (! du_link) return NULL_RTX; use = du_link->ref; /* Check if def is dead. */ if (! use) return NULL_RTX; /* Check for multiple uses. */ if (du_link->next) return NULL_RTX; return DF_REF_INSN (use); } /* Functions for debugging/dumping dataflow information. */ /* Dump a def-use or use-def chain for REF to FILE. */ static void df_chain_dump (struct df_link *link, FILE *file) { fprintf (file, "{ "); for (; link; link = link->next) { fprintf (file, "%c%d ", DF_REF_REG_DEF_P (link->ref) ? 'd' : 'u', DF_REF_ID (link->ref)); } fprintf (file, "}"); } /* Dump a chain of refs with the associated regno. */ static void df_chain_dump_regno (struct df_link *link, FILE *file) { fprintf (file, "{ "); for (; link; link = link->next) { fprintf (file, "%c%d(%d) ", DF_REF_REG_DEF_P (link->ref) ? 'd' : 'u', DF_REF_ID (link->ref), DF_REF_REGNO (link->ref)); } fprintf (file, "}"); } /* Dump dataflow info. */ void df_dump (struct df *df, int flags, FILE *file) { unsigned int j; basic_block bb; if (! df || ! file) return; fprintf (file, "\nDataflow summary:\n"); fprintf (file, "n_regs = %d, n_defs = %d, n_uses = %d, n_bbs = %d\n", df->n_regs, df->n_defs, df->n_uses, df->n_bbs); if (flags & DF_RD) { basic_block bb; fprintf (file, "Reaching defs:\n"); FOR_EACH_BB (bb) { struct bb_df_info *bb_info = DF_BB_INFO (df, bb); if (! bb_info->rd_in) continue; fprintf (file, "bb %d in \t", bb->index); dump_bitmap (file, bb_info->rd_in); fprintf (file, "bb %d gen \t", bb->index); dump_bitmap (file, bb_info->rd_gen); fprintf (file, "bb %d kill\t", bb->index); dump_bitmap (file, bb_info->rd_kill); fprintf (file, "bb %d out \t", bb->index); dump_bitmap (file, bb_info->rd_out); } } if (flags & DF_UD_CHAIN) { fprintf (file, "Use-def chains:\n"); for (j = 0; j < df->n_defs; j++) { if (df->defs[j]) { fprintf (file, "d%d bb %d luid %d insn %d reg %d ", j, DF_REF_BBNO (df->defs[j]), DF_INSN_LUID (df, DF_REF_INSN (df->defs[j])), DF_REF_INSN_UID (df->defs[j]), DF_REF_REGNO (df->defs[j])); if (df->defs[j]->flags & DF_REF_READ_WRITE) fprintf (file, "read/write "); df_chain_dump (DF_REF_CHAIN (df->defs[j]), file); fprintf (file, "\n"); } } } if (flags & DF_RU) { fprintf (file, "Reaching uses:\n"); FOR_EACH_BB (bb) { struct bb_df_info *bb_info = DF_BB_INFO (df, bb); if (! bb_info->ru_in) continue; fprintf (file, "bb %d in \t", bb->index); dump_bitmap (file, bb_info->ru_in); fprintf (file, "bb %d gen \t", bb->index); dump_bitmap (file, bb_info->ru_gen); fprintf (file, "bb %d kill\t", bb->index); dump_bitmap (file, bb_info->ru_kill); fprintf (file, "bb %d out \t", bb->index); dump_bitmap (file, bb_info->ru_out); } } if (flags & DF_DU_CHAIN) { fprintf (file, "Def-use chains:\n"); for (j = 0; j < df->n_uses; j++) { if (df->uses[j]) { fprintf (file, "u%d bb %d luid %d insn %d reg %d ", j, DF_REF_BBNO (df->uses[j]), DF_INSN_LUID (df, DF_REF_INSN (df->uses[j])), DF_REF_INSN_UID (df->uses[j]), DF_REF_REGNO (df->uses[j])); if (df->uses[j]->flags & DF_REF_READ_WRITE) fprintf (file, "read/write "); df_chain_dump (DF_REF_CHAIN (df->uses[j]), file); fprintf (file, "\n"); } } } if (flags & DF_LR) { fprintf (file, "Live regs:\n"); FOR_EACH_BB (bb) { struct bb_df_info *bb_info = DF_BB_INFO (df, bb); if (! bb_info->lr_in) continue; fprintf (file, "bb %d in \t", bb->index); dump_bitmap (file, bb_info->lr_in); fprintf (file, "bb %d use \t", bb->index); dump_bitmap (file, bb_info->lr_use); fprintf (file, "bb %d def \t", bb->index); dump_bitmap (file, bb_info->lr_def); fprintf (file, "bb %d out \t", bb->index); dump_bitmap (file, bb_info->lr_out); } } if (flags & (DF_REG_INFO | DF_RD_CHAIN | DF_RU_CHAIN)) { struct reg_info *reg_info = df->regs; fprintf (file, "Register info:\n"); for (j = 0; j < df->n_regs; j++) { if (((flags & DF_REG_INFO) && (reg_info[j].n_uses || reg_info[j].n_defs)) || ((flags & DF_RD_CHAIN) && reg_info[j].defs) || ((flags & DF_RU_CHAIN) && reg_info[j].uses)) { fprintf (file, "reg %d", j); if ((flags & DF_RD_CHAIN) && (flags & DF_RU_CHAIN)) { basic_block bb = df_regno_bb (df, j); if (bb) fprintf (file, " bb %d", bb->index); else fprintf (file, " bb ?"); } if (flags & DF_REG_INFO) { fprintf (file, " life %d", reg_info[j].lifetime); } if ((flags & DF_REG_INFO) || (flags & DF_RD_CHAIN)) { fprintf (file, " defs "); if (flags & DF_REG_INFO) fprintf (file, "%d ", reg_info[j].n_defs); if (flags & DF_RD_CHAIN) df_chain_dump (reg_info[j].defs, file); } if ((flags & DF_REG_INFO) || (flags & DF_RU_CHAIN)) { fprintf (file, " uses "); if (flags & DF_REG_INFO) fprintf (file, "%d ", reg_info[j].n_uses); if (flags & DF_RU_CHAIN) df_chain_dump (reg_info[j].uses, file); } fprintf (file, "\n"); } } } fprintf (file, "\n"); } void df_insn_debug (struct df *df, rtx insn, FILE *file) { unsigned int uid; int bbi; uid = INSN_UID (insn); if (uid >= df->insn_size) return; if (df->insns[uid].defs) bbi = DF_REF_BBNO (df->insns[uid].defs->ref); else if (df->insns[uid].uses) bbi = DF_REF_BBNO (df->insns[uid].uses->ref); else bbi = -1; fprintf (file, "insn %d bb %d luid %d defs ", uid, bbi, DF_INSN_LUID (df, insn)); df_chain_dump (df->insns[uid].defs, file); fprintf (file, " uses "); df_chain_dump (df->insns[uid].uses, file); fprintf (file, "\n"); } void df_insn_debug_regno (struct df *df, rtx insn, FILE *file) { unsigned int uid; int bbi; uid = INSN_UID (insn); if (uid >= df->insn_size) return; if (df->insns[uid].defs) bbi = DF_REF_BBNO (df->insns[uid].defs->ref); else if (df->insns[uid].uses) bbi = DF_REF_BBNO (df->insns[uid].uses->ref); else bbi = -1; fprintf (file, "insn %d bb %d luid %d defs ", uid, bbi, DF_INSN_LUID (df, insn)); df_chain_dump_regno (df->insns[uid].defs, file); fprintf (file, " uses "); df_chain_dump_regno (df->insns[uid].uses, file); fprintf (file, "\n"); } static void df_regno_debug (struct df *df, unsigned int regno, FILE *file) { if (regno >= df->reg_size) return; fprintf (file, "reg %d life %d defs ", regno, df->regs[regno].lifetime); df_chain_dump (df->regs[regno].defs, file); fprintf (file, " uses "); df_chain_dump (df->regs[regno].uses, file); fprintf (file, "\n"); } static void df_ref_debug (struct df *df, struct ref *ref, FILE *file) { fprintf (file, "%c%d ", DF_REF_REG_DEF_P (ref) ? 'd' : 'u', DF_REF_ID (ref)); fprintf (file, "reg %d bb %d luid %d insn %d chain ", DF_REF_REGNO (ref), DF_REF_BBNO (ref), DF_INSN_LUID (df, DF_REF_INSN (ref)), INSN_UID (DF_REF_INSN (ref))); df_chain_dump (DF_REF_CHAIN (ref), file); fprintf (file, "\n"); } /* Functions for debugging from GDB. */ void debug_df_insn (rtx insn) { df_insn_debug (ddf, insn, stderr); debug_rtx (insn); } void debug_df_reg (rtx reg) { df_regno_debug (ddf, REGNO (reg), stderr); } void debug_df_regno (unsigned int regno) { df_regno_debug (ddf, regno, stderr); } void debug_df_ref (struct ref *ref) { df_ref_debug (ddf, ref, stderr); } void debug_df_defno (unsigned int defno) { df_ref_debug (ddf, ddf->defs[defno], stderr); } void debug_df_useno (unsigned int defno) { df_ref_debug (ddf, ddf->uses[defno], stderr); } void debug_df_chain (struct df_link *link) { df_chain_dump (link, stderr); fputc ('\n', stderr); } static void dataflow_set_a_op_b (enum set_representation repr, enum df_confluence_op op, void *rslt, void *op1, void *op2) { switch (repr) { case SR_SBITMAP: switch (op) { case DF_UNION: sbitmap_a_or_b (rslt, op1, op2); break; case DF_INTERSECTION: sbitmap_a_and_b (rslt, op1, op2); break; default: abort (); } break; case SR_BITMAP: switch (op) { case DF_UNION: bitmap_a_or_b (rslt, op1, op2); break; case DF_INTERSECTION: bitmap_a_and_b (rslt, op1, op2); break; default: abort (); } break; default: abort (); } } static void dataflow_set_copy_df (enum set_representation repr, void *dest, void *src) { switch (repr) { case SR_SBITMAP: sbitmap_copy (dest, src); break; case SR_BITMAP: bitmap_copy (dest, src); break; default: abort (); } } /* Hybrid search algorithm from "Implementation Techniques for Efficient Data-Flow Analysis of Large Programs". */ static void hybrid_search (basic_block bb, struct dataflow *dataflow, sbitmap visited, sbitmap pending, sbitmap considered) { int changed; int i = bb->index; edge e; SET_BIT (visited, bb->index); if (!TEST_BIT (pending, bb->index)) abort (); RESET_BIT (pending, i); #define HS(E_ANTI, E_ANTI_NEXT, E_ANTI_BB, E_ANTI_START_BB, IN_SET, \ E, E_NEXT, E_BB, E_START_BB, OUT_SET) \ do \ { \ /* Calculate of predecessor_outs. */ \ bitmap_zero (IN_SET[i]); \ for (e = bb->E_ANTI; e; e = e->E_ANTI_NEXT) \ { \ if (e->E_ANTI_BB == E_ANTI_START_BB) \ continue; \ if (!TEST_BIT (considered, e->E_ANTI_BB->index)) \ continue; \ \ dataflow_set_a_op_b (dataflow->repr, dataflow->conf_op, \ IN_SET[i], IN_SET[i], \ OUT_SET[e->E_ANTI_BB->index]); \ } \ \ (*dataflow->transfun)(i, &changed, \ dataflow->in[i], dataflow->out[i], \ dataflow->gen[i], dataflow->kill[i], \ dataflow->data); \ \ if (!changed) \ break; \ \ for (e = bb->E; e; e = e->E_NEXT) \ { \ if (e->E_BB == E_START_BB || e->E_BB->index == i) \ continue; \ \ if (!TEST_BIT (considered, e->E_BB->index)) \ continue; \ \ SET_BIT (pending, e->E_BB->index); \ } \ \ for (e = bb->E; e; e = e->E_NEXT) \ { \ if (e->E_BB == E_START_BB || e->E_BB->index == i) \ continue; \ \ if (!TEST_BIT (considered, e->E_BB->index)) \ continue; \ \ if (!TEST_BIT (visited, e->E_BB->index)) \ hybrid_search (e->E_BB, dataflow, visited, pending, considered); \ } \ } while (0) if (dataflow->dir == DF_FORWARD) HS (pred, pred_next, src, ENTRY_BLOCK_PTR, dataflow->in, succ, succ_next, dest, EXIT_BLOCK_PTR, dataflow->out); else HS (succ, succ_next, dest, EXIT_BLOCK_PTR, dataflow->out, pred, pred_next, src, ENTRY_BLOCK_PTR, dataflow->in); } /* This function will perform iterative bitvector dataflow described by DATAFLOW, producing the in and out sets. Only the part of the cfg induced by blocks in DATAFLOW->order is taken into account. For forward problems, you probably want to pass in a mapping of block number to rc_order (like df->inverse_rc_map). */ void iterative_dataflow (struct dataflow *dataflow) { unsigned i, idx; sbitmap visited, pending, considered; pending = sbitmap_alloc (last_basic_block); visited = sbitmap_alloc (last_basic_block); considered = sbitmap_alloc (last_basic_block); sbitmap_zero (pending); sbitmap_zero (visited); sbitmap_zero (considered); for (i = 0; i < dataflow->n_blocks; i++) { idx = dataflow->order[i]; SET_BIT (pending, idx); SET_BIT (considered, idx); if (dataflow->dir == DF_FORWARD) dataflow_set_copy_df (dataflow->repr, dataflow->out[idx], dataflow->gen[idx]); else dataflow_set_copy_df (dataflow->repr, dataflow->in[idx], dataflow->gen[idx]); }; while (1) { for (i = 0; i < dataflow->n_blocks; i++) { idx = dataflow->order[i]; if (TEST_BIT (pending, idx) && !TEST_BIT (visited, idx)) hybrid_search (BASIC_BLOCK (idx), dataflow, visited, pending, considered); } if (sbitmap_first_set_bit (pending) == -1) break; sbitmap_zero (visited); } sbitmap_free (pending); sbitmap_free (visited); sbitmap_free (considered); } /* Language-independent diagnostic subroutines for the GNU Compiler Collection Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Gabriel Dos Reis This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file implements the language independent aspect of diagnostic message module. */ #undef FLOAT /* This is for hpux. They should change hpux. */ #undef FFS /* Some systems define this in param.h. */ #ifndef GCC_VERSION_H #define GCC_VERSION_H extern const char version_string[]; extern const char bug_report_url[]; #endif /* ! GCC_VERSION_H */ /* Prototypes. */ static char *build_message_string (const char *, ...) ATTRIBUTE_PRINTF_1; static void default_diagnostic_starter (diagnostic_context *, diagnostic_info *); static void default_diagnostic_finalizer (diagnostic_context *, diagnostic_info *); static void error_recursion (diagnostic_context *) ATTRIBUTE_NORETURN; static bool text_specifies_location (text_info *, location_t *); static bool diagnostic_count_diagnostic (diagnostic_context *, diagnostic_info *); static void diagnostic_action_after_output (diagnostic_context *, diagnostic_info *); static void real_abort (void) ATTRIBUTE_NORETURN; /* A diagnostic_context surrogate for stderr. */ static diagnostic_context global_diagnostic_context; diagnostic_context *global_dc = &global_diagnostic_context; /* Boilerplate text used in two locations. */ #define bug_report_request \ "Please submit a full bug report,\n\ with preprocessed source if appropriate.\n\ See %s for instructions.\n" /* Return a malloc'd string containing MSG formatted a la printf. The caller is responsible for freeing the memory. */ static char * build_message_string (const char *msg, ...) { char *str; va_list ap; va_start (ap, msg); vasprintf (&str, msg, ap); va_end (ap); return str; } /* Same as diagnostic_build_prefix, but only the source FILE is given. */ char * file_name_as_prefix (const char *f) { return build_message_string ("%s: ", f); } /* Initialize the diagnostic message outputting machinery. */ void diagnostic_initialize (diagnostic_context *context) { /* Allocate a basic pretty-printer. Clients will replace this a much more elaborated pretty-printer if they wish. */ context->printer = xmalloc (sizeof (pretty_printer)); pp_construct (context->printer, NULL, 0); /* By default, diagnostics are sent to stderr. */ context->printer->buffer->stream = stderr; /* By default, we emit prefixes once per message. */ context->printer->prefixing_rule = DIAGNOSTICS_SHOW_PREFIX_ONCE; memset (context->diagnostic_count, 0, sizeof context->diagnostic_count); context->warnings_are_errors_message = warnings_are_errors; context->abort_on_error = false; context->internal_error = NULL; diagnostic_starter (context) = default_diagnostic_starter; diagnostic_finalizer (context) = default_diagnostic_finalizer; context->last_module = 0; context->last_function = NULL; context->lock = 0; context->x_data = NULL; } /* Returns true if the next format specifier in TEXT is a format specifier for a location_t. If so, update the object pointed by LOCUS to reflect the specified location in *TEXT->args_ptr. */ static bool text_specifies_location (text_info *text, location_t *locus) { const char *p; /* Skip any leading text. */ for (p = text->format_spec; *p && *p != '%'; ++p) ; /* Extract the location information if any. */ if (p[0] == '%' && p[1] == 'H') { *locus = *va_arg (*text->args_ptr, location_t *); text->format_spec = p + 2; return true; } else if (p[0] == '%' && p[1] == 'J') { tree t = va_arg (*text->args_ptr, tree); *locus = DECL_SOURCE_LOCATION (t); text->format_spec = p + 2; return true; } return false; } void diagnostic_set_info (diagnostic_info *diagnostic, const char *msgid, va_list *args, location_t location, diagnostic_t kind) { diagnostic->message.err_no = errno; diagnostic->message.args_ptr = args; diagnostic->message.format_spec = _(msgid); /* If the diagnostic message doesn't specify a location, use LOCATION. */ if (!text_specifies_location (&diagnostic->message, &diagnostic->location)) diagnostic->location = location; diagnostic->kind = kind; } /* Return a malloc'd string describing a location. The caller is responsible for freeing the memory. */ char * diagnostic_build_prefix (diagnostic_info *diagnostic) { static const char *const diagnostic_kind_text[] = { #define DEFINE_DIAGNOSTIC_KIND(K, T) (T), DEFINE_DIAGNOSTIC_KIND (DK_FATAL, "fatal error: ") DEFINE_DIAGNOSTIC_KIND (DK_ICE, "internal compiler error: ") DEFINE_DIAGNOSTIC_KIND (DK_ERROR, "error: ") DEFINE_DIAGNOSTIC_KIND (DK_SORRY, "sorry, unimplemented: ") DEFINE_DIAGNOSTIC_KIND (DK_WARNING, "warning: ") DEFINE_DIAGNOSTIC_KIND (DK_ANACHRONISM, "anachronism: ") DEFINE_DIAGNOSTIC_KIND (DK_NOTE, "note: ") DEFINE_DIAGNOSTIC_KIND (DK_DEBUG, "debug: ") #undef DEFINE_DIAGNOSTIC_KIND "must-not-happen" }; expanded_location s = expand_location (diagnostic->location); if (diagnostic->kind >= DK_LAST_DIAGNOSTIC_KIND) abort(); return s.file ? build_message_string ("%s:%d: %s", s.file, s.line, _(diagnostic_kind_text[diagnostic->kind])) : build_message_string ("%s: %s", progname, _(diagnostic_kind_text[diagnostic->kind])); } /* Count a diagnostic. Return true if the message should be printed. */ static bool diagnostic_count_diagnostic (diagnostic_context *context, diagnostic_info *diagnostic) { diagnostic_t kind = diagnostic->kind; switch (kind) { default: abort(); break; case DK_ICE: #ifndef ENABLE_CHECKING /* When not checking, ICEs are converted to fatal errors when an error has already occurred. This is counteracted by abort_on_error. */ if ((diagnostic_kind_count (context, DK_ERROR) > 0 || diagnostic_kind_count (context, DK_SORRY) > 0) && !context->abort_on_error) { expanded_location s = expand_location (diagnostic->location); fnotice (stderr, "%s:%d: confused by earlier errors, bailing out\n", s.file, s.line); exit (FATAL_EXIT_CODE); } #endif if (context->internal_error) (*context->internal_error) (diagnostic->message.format_spec, diagnostic->message.args_ptr); /* Fall through. */ case DK_FATAL: case DK_SORRY: case DK_ANACHRONISM: case DK_NOTE: ++diagnostic_kind_count (context, kind); break; case DK_WARNING: if (!diagnostic_report_warnings_p ()) return false; if (!warnings_are_errors) { ++diagnostic_kind_count (context, DK_WARNING); break; } if (context->warnings_are_errors_message) { pp_verbatim (context->printer, "%s: warnings being treated as errors\n", progname); context->warnings_are_errors_message = false; } /* And fall through. */ case DK_ERROR: ++diagnostic_kind_count (context, DK_ERROR); break; } return true; } /* Take any action which is expected to happen after the diagnostic is written out. This function does not always return. */ static void diagnostic_action_after_output (diagnostic_context *context, diagnostic_info *diagnostic) { switch (diagnostic->kind) { case DK_DEBUG: case DK_NOTE: case DK_ANACHRONISM: case DK_WARNING: break; case DK_ERROR: case DK_SORRY: if (context->abort_on_error) real_abort (); if (flag_fatal_errors) { fnotice (stderr, "compilation terminated due to -Wfatal-errors.\n"); exit (FATAL_EXIT_CODE); } break; case DK_ICE: if (context->abort_on_error) real_abort (); fnotice (stderr, bug_report_request, bug_report_url); exit (FATAL_EXIT_CODE); case DK_FATAL: if (context->abort_on_error) real_abort (); fnotice (stderr, "compilation terminated.\n"); exit (FATAL_EXIT_CODE); default: real_abort (); } } /* Prints out, if necessary, the name of the current function that caused an error. Called from all error and warning functions. We ignore the FILE parameter, as it cannot be relied upon. */ void diagnostic_report_current_function (diagnostic_context *context) { diagnostic_report_current_module (context); lang_hooks.print_error_function (context, input_filename); } void diagnostic_report_current_module (diagnostic_context *context) { struct file_stack *p; if (pp_needs_newline (context->printer)) { pp_newline (context->printer); pp_needs_newline (context->printer) = false; } p = input_file_stack; if (p && diagnostic_last_module_changed (context)) { expanded_location xloc = expand_location (p->location); pp_verbatim (context->printer, "In file included from %s:%d", xloc.file, xloc.line); while ((p = p->next) != NULL) { xloc = expand_location (p->location); pp_verbatim (context->printer, ",\n from %s:%d", xloc.file, xloc.line); } pp_verbatim (context->printer, ":\n"); diagnostic_set_last_module (context); } } static void default_diagnostic_starter (diagnostic_context *context, diagnostic_info *diagnostic) { diagnostic_report_current_function (context); pp_set_prefix (context->printer, diagnostic_build_prefix (diagnostic)); } static void default_diagnostic_finalizer (diagnostic_context *context, diagnostic_info *diagnostic __attribute__((unused))) { pp_destroy_prefix (context->printer); } /* Report a diagnostic message (an error or a warning) as specified by DC. This function is *the* subroutine in terms of which front-ends should implement their specific diagnostic handling modules. The front-end independent format specifiers are exactly those described in the documentation of output_format. */ void diagnostic_report_diagnostic (diagnostic_context *context, diagnostic_info *diagnostic) { if (context->lock++ && diagnostic->kind < DK_SORRY) error_recursion (context); if (diagnostic_count_diagnostic (context, diagnostic)) { (*diagnostic_starter (context)) (context, diagnostic); pp_format_text (context->printer, &diagnostic->message); (*diagnostic_finalizer (context)) (context, diagnostic); pp_flush (context->printer); diagnostic_action_after_output (context, diagnostic); } context->lock--; } /* Given a partial pathname as input, return another pathname that shares no directory elements with the pathname of __FILE__. This is used by fancy_abort() to print `Internal compiler error in expr.c' instead of `Internal compiler error in ../../GCC/gcc/expr.c'. */ const char * trim_filename (const char *name) { static const char this_file[] = __FILE__; const char *p = name, *q = this_file; /* First skip any "../" in each filename. This allows us to give a proper reference to a file in a subdirectory. */ while (p[0] == '.' && p[1] == '.' && (p[2] == DIR_SEPARATOR #ifdef DIR_SEPARATOR_2 || p[2] == DIR_SEPARATOR_2 #endif )) p += 3; while (q[0] == '.' && q[1] == '.' && (q[2] == DIR_SEPARATOR #ifdef DIR_SEPARATOR_2 || p[2] == DIR_SEPARATOR_2 #endif )) q += 3; /* Now skip any parts the two filenames have in common. */ while (*p == *q && *p != 0 && *q != 0) p++, q++; /* Now go backwards until the previous directory separator. */ while (p > name && p[-1] != DIR_SEPARATOR #ifdef DIR_SEPARATOR_2 && p[-1] != DIR_SEPARATOR_2 #endif ) p--; return p; } /* Standard error reporting routines in increasing order of severity. All of these take arguments like printf. */ /* Text to be emitted verbatim to the error message stream; this produces no prefix and disables line-wrapping. Use rarely. */ void verbatim (const char *msgid, ...) { text_info text; va_list ap; va_start (ap, msgid); text.err_no = errno; text.args_ptr = ≈ text.format_spec = _(msgid); pp_format_verbatim (global_dc->printer, &text); pp_flush (global_dc->printer); va_end (ap); } /* An informative note. Use this for additional details on an error message. */ void inform (const char *msgid, ...) { diagnostic_info diagnostic; va_list ap; va_start (ap, msgid); diagnostic_set_info (&diagnostic, msgid, &ap, input_location, DK_NOTE); report_diagnostic (&diagnostic); va_end (ap); } /* A warning. Use this for code which is correct according to the relevant language specification but is likely to be buggy anyway. */ void warning (const char *msgid, ...) { diagnostic_info diagnostic; va_list ap; va_start (ap, msgid); diagnostic_set_info (&diagnostic, msgid, &ap, input_location, DK_WARNING); report_diagnostic (&diagnostic); va_end (ap); } /* A "pedantic" warning: issues a warning unless -pedantic-errors was given on the command line, in which case it issues an error. Use this for diagnostics required by the relevant language standard, if you have chosen not to make them errors. Note that these diagnostics are issued independent of the setting of the -pedantic command-line switch. To get a warning enabled only with that switch, write "if (pedantic) pedwarn (...);" */ void pedwarn (const char *msgid, ...) { diagnostic_info diagnostic; va_list ap; va_start (ap, msgid); diagnostic_set_info (&diagnostic, msgid, &ap, input_location, pedantic_error_kind ()); report_diagnostic (&diagnostic); va_end (ap); } /* A hard error: the code is definitely ill-formed, and an object file will not be produced. */ void error (const char *msgid, ...) { diagnostic_info diagnostic; va_list ap; va_start (ap, msgid); diagnostic_set_info (&diagnostic, msgid, &ap, input_location, DK_ERROR); report_diagnostic (&diagnostic); va_end (ap); } /* "Sorry, not implemented." Use for a language feature which is required by the relevant specification but not implemented by GCC. An object file will not be produced. */ void sorry (const char *msgid, ...) { diagnostic_info diagnostic; va_list ap; va_start (ap, msgid); diagnostic_set_info (&diagnostic, msgid, &ap, input_location, DK_SORRY); report_diagnostic (&diagnostic); va_end (ap); } /* An error which is severe enough that we make no attempt to continue. Do not use this for internal consistency checks; that's internal_error. Use of this function should be rare. */ void fatal_error (const char *msgid, ...) { diagnostic_info diagnostic; va_list ap; va_start (ap, msgid); diagnostic_set_info (&diagnostic, msgid, &ap, input_location, DK_FATAL); report_diagnostic (&diagnostic); va_end (ap); /* NOTREACHED */ real_abort (); } /* An internal consistency check has failed. We make no attempt to continue. Note that unless there is debugging value to be had from a more specific message, or some other good reason, you should use abort () instead of calling this function directly. */ void internal_error (const char *msgid, ...) { diagnostic_info diagnostic; va_list ap; va_start (ap, msgid); diagnostic_set_info (&diagnostic, msgid, &ap, input_location, DK_ICE); report_diagnostic (&diagnostic); va_end (ap); /* NOTREACHED */ real_abort (); } /* Special case error functions. Most are implemented in terms of the above, or should be. */ /* Print a diagnostic MSGID on FILE. This is just fprintf, except it runs its second argument through gettext. */ void fnotice (FILE *file, const char *msgid, ...) { va_list ap; va_start (ap, msgid); vfprintf (file, _(msgid), ap); va_end (ap); } /* Inform the user that an error occurred while trying to report some other error. This indicates catastrophic internal inconsistencies, so give up now. But do try to flush out the previous error. This mustn't use internal_error, that will cause infinite recursion. */ static void error_recursion (diagnostic_context *context) { if (context->lock < 3) pp_flush (context->printer); fnotice (stderr, "Internal compiler error: Error reporting routines re-entered.\n"); fnotice (stderr, bug_report_request, bug_report_url); exit (FATAL_EXIT_CODE); } /* Report an internal compiler error in a friendly manner. This is the function that gets called upon use of abort() in the source code generally, thanks to a special macro. */ void fancy_abort (const char *file, int line, const char *function) { internal_error ("in %s, at %s:%d", function, trim_filename (file), line); } /* Really call the system 'abort'. This has to go right at the end of this file, so that there are no functions after it that call abort and get the system abort instead of our macro. */ #undef abort static void real_abort (void) { abort (); } /* Convert tree expression to rtl instructions, for GNU compiler. Copyright (C) 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */ static bool prefer_and_bit_test (enum machine_mode, int); static void do_jump_by_parts_greater (tree, int, rtx, rtx); static void do_jump_by_parts_equality (tree, rtx, rtx); static void do_compare_and_jump (tree, enum rtx_code, enum rtx_code, rtx, rtx); /* At the start of a function, record that we have no previously-pushed arguments waiting to be popped. */ void init_pending_stack_adjust (void) { pending_stack_adjust = 0; } /* When exiting from function, if safe, clear out any pending stack adjust so the adjustment won't get done. Note, if the current function calls alloca, then it must have a frame pointer regardless of the value of flag_omit_frame_pointer. */ void clear_pending_stack_adjust (void) { if (optimize > 0 && (! flag_omit_frame_pointer || current_function_calls_alloca) && EXIT_IGNORE_STACK && ! (DECL_INLINE (current_function_decl) && ! flag_no_inline) && ! flag_inline_functions) { stack_pointer_delta -= pending_stack_adjust, pending_stack_adjust = 0; } } /* Pop any previously-pushed arguments that have not been popped yet. */ void do_pending_stack_adjust (void) { if (inhibit_defer_pop == 0) { if (pending_stack_adjust != 0) adjust_stack (GEN_INT (pending_stack_adjust)); pending_stack_adjust = 0; } } /* Expand conditional expressions. */ /* Generate code to evaluate EXP and jump to LABEL if the value is zero. LABEL is an rtx of code CODE_LABEL, in this function and all the functions here. */ void jumpifnot (tree exp, rtx label) { do_jump (exp, label, NULL_RTX); } /* Generate code to evaluate EXP and jump to LABEL if the value is nonzero. */ void jumpif (tree exp, rtx label) { do_jump (exp, NULL_RTX, label); } /* Used internally by prefer_and_bit_test. */ static GTY(()) rtx and_reg; static GTY(()) rtx and_test; static GTY(()) rtx shift_test; /* Compare the relative costs of "(X & (1 << BITNUM))" and "(X >> BITNUM) & 1", where X is an arbitrary register of mode MODE. Return true if the former is preferred. */ static bool prefer_and_bit_test (enum machine_mode mode, int bitnum) { if (and_test == 0) { /* Set up rtxes for the two variations. Use NULL as a placeholder for the BITNUM-based constants. */ and_reg = gen_rtx_REG (mode, FIRST_PSEUDO_REGISTER); and_test = gen_rtx_AND (mode, and_reg, NULL); shift_test = gen_rtx_AND (mode, gen_rtx_ASHIFTRT (mode, and_reg, NULL), const1_rtx); } else { /* Change the mode of the previously-created rtxes. */ PUT_MODE (and_reg, mode); PUT_MODE (and_test, mode); PUT_MODE (shift_test, mode); PUT_MODE (XEXP (shift_test, 0), mode); } /* Fill in the integers. */ XEXP (and_test, 1) = GEN_INT ((unsigned HOST_WIDE_INT) 1 << bitnum); XEXP (XEXP (shift_test, 0), 1) = GEN_INT (bitnum); return (rtx_cost (and_test, IF_THEN_ELSE) <= rtx_cost (shift_test, IF_THEN_ELSE)); } /* Generate code to evaluate EXP and jump to IF_FALSE_LABEL if the result is zero, or IF_TRUE_LABEL if the result is one. Either of IF_FALSE_LABEL and IF_TRUE_LABEL may be zero, meaning fall through in that case. do_jump always does any pending stack adjust except when it does not actually perform a jump. An example where there is no jump is when EXP is `(foo (), 0)' and IF_FALSE_LABEL is null. This function is responsible for optimizing cases such as &&, || and comparison operators in EXP. */ void do_jump (tree exp, rtx if_false_label, rtx if_true_label) { enum tree_code code = TREE_CODE (exp); /* Some cases need to create a label to jump to in order to properly fall through. These cases set DROP_THROUGH_LABEL nonzero. */ rtx drop_through_label = 0; rtx temp; int i; tree type; enum machine_mode mode; emit_queue (); switch (code) { case ERROR_MARK: break; case INTEGER_CST: temp = integer_zerop (exp) ? if_false_label : if_true_label; if (temp) emit_jump (temp); break; #if 0 /* This is not true with #pragma weak */ case ADDR_EXPR: /* The address of something can never be zero. */ if (if_true_label) emit_jump (if_true_label); break; #endif case UNSAVE_EXPR: do_jump (TREE_OPERAND (exp, 0), if_false_label, if_true_label); TREE_OPERAND (exp, 0) = lang_hooks.unsave_expr_now (TREE_OPERAND (exp, 0)); break; case NOP_EXPR: if (TREE_CODE (TREE_OPERAND (exp, 0)) == COMPONENT_REF || TREE_CODE (TREE_OPERAND (exp, 0)) == BIT_FIELD_REF || TREE_CODE (TREE_OPERAND (exp, 0)) == ARRAY_REF || TREE_CODE (TREE_OPERAND (exp, 0)) == ARRAY_RANGE_REF) goto normal; case CONVERT_EXPR: /* If we are narrowing the operand, we have to do the compare in the narrower mode. */ if ((TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (exp, 0))))) goto normal; case NON_LVALUE_EXPR: case REFERENCE_EXPR: case ABS_EXPR: case NEGATE_EXPR: case LROTATE_EXPR: case RROTATE_EXPR: /* These cannot change zero->nonzero or vice versa. */ do_jump (TREE_OPERAND (exp, 0), if_false_label, if_true_label); break; #if 0 /* This is never less insns than evaluating the PLUS_EXPR followed by a test and can be longer if the test is eliminated. */ case PLUS_EXPR: /* Reduce to minus. */ exp = build (MINUS_EXPR, TREE_TYPE (exp), TREE_OPERAND (exp, 0), fold (build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (exp, 1)), TREE_OPERAND (exp, 1)))); /* Process as MINUS. */ #endif case MINUS_EXPR: /* Nonzero iff operands of minus differ. */ do_compare_and_jump (build (NE_EXPR, TREE_TYPE (exp), TREE_OPERAND (exp, 0), TREE_OPERAND (exp, 1)), NE, NE, if_false_label, if_true_label); break; case BIT_AND_EXPR: /* fold_single_bit_test() converts (X & (1 << C)) into (X >> C) & 1. See if the former is preferred for jump tests and restore it if so. */ if (TREE_CODE (TREE_OPERAND (exp, 0)) == RSHIFT_EXPR && integer_onep (TREE_OPERAND (exp, 1))) { tree arg = TREE_OPERAND (TREE_OPERAND (exp, 0), 0); tree shift = TREE_OPERAND (TREE_OPERAND (exp, 0), 1); tree one = TREE_OPERAND (exp, 1); tree argtype = TREE_TYPE (arg); if (TREE_CODE (shift) == INTEGER_CST && compare_tree_int (shift, 0) > 0 && compare_tree_int (shift, HOST_BITS_PER_WIDE_INT) < 0 && prefer_and_bit_test (TYPE_MODE (argtype), TREE_INT_CST_LOW (shift))) { do_jump (build (BIT_AND_EXPR, argtype, arg, fold (build (LSHIFT_EXPR, argtype, one, shift))), if_false_label, if_true_label); break; } } /* If we are AND'ing with a small constant, do this comparison in the smallest type that fits. If the machine doesn't have comparisons that small, it will be converted back to the wider comparison. This helps if we are testing the sign bit of a narrower object. combine can't do this for us because it can't know whether a ZERO_EXTRACT or a compare in a smaller mode exists, but we do. */ if (! SLOW_BYTE_ACCESS && TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST && TYPE_PRECISION (TREE_TYPE (exp)) <= HOST_BITS_PER_WIDE_INT && (i = tree_floor_log2 (TREE_OPERAND (exp, 1))) >= 0 && (mode = mode_for_size (i + 1, MODE_INT, 0)) != BLKmode && (type = lang_hooks.types.type_for_mode (mode, 1)) != 0 && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (exp)) && (cmp_optab->handlers[(int) TYPE_MODE (type)].insn_code != CODE_FOR_nothing)) { do_jump (convert (type, exp), if_false_label, if_true_label); break; } goto normal; case TRUTH_NOT_EXPR: do_jump (TREE_OPERAND (exp, 0), if_true_label, if_false_label); break; case TRUTH_ANDIF_EXPR: if (if_false_label == 0) if_false_label = drop_through_label = gen_label_rtx (); do_jump (TREE_OPERAND (exp, 0), if_false_label, NULL_RTX); start_cleanup_deferral (); do_jump (TREE_OPERAND (exp, 1), if_false_label, if_true_label); end_cleanup_deferral (); break; case TRUTH_ORIF_EXPR: if (if_true_label == 0) if_true_label = drop_through_label = gen_label_rtx (); do_jump (TREE_OPERAND (exp, 0), NULL_RTX, if_true_label); start_cleanup_deferral (); do_jump (TREE_OPERAND (exp, 1), if_false_label, if_true_label); end_cleanup_deferral (); break; case COMPOUND_EXPR: push_temp_slots (); expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode, 0); preserve_temp_slots (NULL_RTX); free_temp_slots (); pop_temp_slots (); emit_queue (); do_pending_stack_adjust (); do_jump (TREE_OPERAND (exp, 1), if_false_label, if_true_label); break; case COMPONENT_REF: case BIT_FIELD_REF: case ARRAY_REF: case ARRAY_RANGE_REF: { HOST_WIDE_INT bitsize, bitpos; int unsignedp; enum machine_mode mode; tree type; tree offset; int volatilep = 0; /* Get description of this reference. We don't actually care about the underlying object here. */ get_inner_reference (exp, &bitsize, &bitpos, &offset, &mode, &unsignedp, &volatilep); type = lang_hooks.types.type_for_size (bitsize, unsignedp); if (! SLOW_BYTE_ACCESS && type != 0 && bitsize >= 0 && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (exp)) && (cmp_optab->handlers[(int) TYPE_MODE (type)].insn_code != CODE_FOR_nothing)) { do_jump (convert (type, exp), if_false_label, if_true_label); break; } goto normal; } case COND_EXPR: /* Do (a ? 1 : 0) and (a ? 0 : 1) as special cases. */ if (integer_onep (TREE_OPERAND (exp, 1)) && integer_zerop (TREE_OPERAND (exp, 2))) do_jump (TREE_OPERAND (exp, 0), if_false_label, if_true_label); else if (integer_zerop (TREE_OPERAND (exp, 1)) && integer_onep (TREE_OPERAND (exp, 2))) do_jump (TREE_OPERAND (exp, 0), if_true_label, if_false_label); else { rtx label1 = gen_label_rtx (); drop_through_label = gen_label_rtx (); do_jump (TREE_OPERAND (exp, 0), label1, NULL_RTX); start_cleanup_deferral (); /* Now the THEN-expression. */ do_jump (TREE_OPERAND (exp, 1), if_false_label ? if_false_label : drop_through_label, if_true_label ? if_true_label : drop_through_label); /* In case the do_jump just above never jumps. */ do_pending_stack_adjust (); emit_label (label1); /* Now the ELSE-expression. */ do_jump (TREE_OPERAND (exp, 2), if_false_label ? if_false_label : drop_through_label, if_true_label ? if_true_label : drop_through_label); end_cleanup_deferral (); } break; case EQ_EXPR: { tree inner_type = TREE_TYPE (TREE_OPERAND (exp, 0)); if (GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_COMPLEX_FLOAT || GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_COMPLEX_INT) { tree exp0 = save_expr (TREE_OPERAND (exp, 0)); tree exp1 = save_expr (TREE_OPERAND (exp, 1)); do_jump (fold (build (TRUTH_ANDIF_EXPR, TREE_TYPE (exp), fold (build (EQ_EXPR, TREE_TYPE (exp), fold (build1 (REALPART_EXPR, TREE_TYPE (inner_type), exp0)), fold (build1 (REALPART_EXPR, TREE_TYPE (inner_type), exp1)))), fold (build (EQ_EXPR, TREE_TYPE (exp), fold (build1 (IMAGPART_EXPR, TREE_TYPE (inner_type), exp0)), fold (build1 (IMAGPART_EXPR, TREE_TYPE (inner_type), exp1)))))), if_false_label, if_true_label); } else if (integer_zerop (TREE_OPERAND (exp, 1))) do_jump (TREE_OPERAND (exp, 0), if_true_label, if_false_label); else if (GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_INT && !can_compare_p (EQ, TYPE_MODE (inner_type), ccp_jump)) do_jump_by_parts_equality (exp, if_false_label, if_true_label); else do_compare_and_jump (exp, EQ, EQ, if_false_label, if_true_label); break; } case NE_EXPR: { tree inner_type = TREE_TYPE (TREE_OPERAND (exp, 0)); if (GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_COMPLEX_FLOAT || GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_COMPLEX_INT) { tree exp0 = save_expr (TREE_OPERAND (exp, 0)); tree exp1 = save_expr (TREE_OPERAND (exp, 1)); do_jump (fold (build (TRUTH_ORIF_EXPR, TREE_TYPE (exp), fold (build (NE_EXPR, TREE_TYPE (exp), fold (build1 (REALPART_EXPR, TREE_TYPE (inner_type), exp0)), fold (build1 (REALPART_EXPR, TREE_TYPE (inner_type), exp1)))), fold (build (NE_EXPR, TREE_TYPE (exp), fold (build1 (IMAGPART_EXPR, TREE_TYPE (inner_type), exp0)), fold (build1 (IMAGPART_EXPR, TREE_TYPE (inner_type), exp1)))))), if_false_label, if_true_label); } else if (integer_zerop (TREE_OPERAND (exp, 1))) do_jump (TREE_OPERAND (exp, 0), if_false_label, if_true_label); else if (GET_MODE_CLASS (TYPE_MODE (inner_type)) == MODE_INT && !can_compare_p (NE, TYPE_MODE (inner_type), ccp_jump)) do_jump_by_parts_equality (exp, if_true_label, if_false_label); else do_compare_and_jump (exp, NE, NE, if_false_label, if_true_label); break; } case LT_EXPR: mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))); if (GET_MODE_CLASS (mode) == MODE_INT && ! can_compare_p (LT, mode, ccp_jump)) do_jump_by_parts_greater (exp, 1, if_false_label, if_true_label); else do_compare_and_jump (exp, LT, LTU, if_false_label, if_true_label); break; case LE_EXPR: mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))); if (GET_MODE_CLASS (mode) == MODE_INT && ! can_compare_p (LE, mode, ccp_jump)) do_jump_by_parts_greater (exp, 0, if_true_label, if_false_label); else do_compare_and_jump (exp, LE, LEU, if_false_label, if_true_label); break; case GT_EXPR: mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))); if (GET_MODE_CLASS (mode) == MODE_INT && ! can_compare_p (GT, mode, ccp_jump)) do_jump_by_parts_greater (exp, 0, if_false_label, if_true_label); else do_compare_and_jump (exp, GT, GTU, if_false_label, if_true_label); break; case GE_EXPR: mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))); if (GET_MODE_CLASS (mode) == MODE_INT && ! can_compare_p (GE, mode, ccp_jump)) do_jump_by_parts_greater (exp, 1, if_true_label, if_false_label); else do_compare_and_jump (exp, GE, GEU, if_false_label, if_true_label); break; case UNORDERED_EXPR: case ORDERED_EXPR: { enum rtx_code cmp, rcmp; int do_rev; if (code == UNORDERED_EXPR) cmp = UNORDERED, rcmp = ORDERED; else cmp = ORDERED, rcmp = UNORDERED; mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))); do_rev = 0; if (! can_compare_p (cmp, mode, ccp_jump) && (can_compare_p (rcmp, mode, ccp_jump) /* If the target doesn't provide either UNORDERED or ORDERED comparisons, canonicalize on UNORDERED for the library. */ || rcmp == UNORDERED)) do_rev = 1; if (! do_rev) do_compare_and_jump (exp, cmp, cmp, if_false_label, if_true_label); else do_compare_and_jump (exp, rcmp, rcmp, if_true_label, if_false_label); } break; { enum rtx_code rcode1; enum tree_code tcode1, tcode2; case UNLT_EXPR: rcode1 = UNLT; tcode1 = UNORDERED_EXPR; tcode2 = LT_EXPR; goto unordered_bcc; case UNLE_EXPR: rcode1 = UNLE; tcode1 = UNORDERED_EXPR; tcode2 = LE_EXPR; goto unordered_bcc; case UNGT_EXPR: rcode1 = UNGT; tcode1 = UNORDERED_EXPR; tcode2 = GT_EXPR; goto unordered_bcc; case UNGE_EXPR: rcode1 = UNGE; tcode1 = UNORDERED_EXPR; tcode2 = GE_EXPR; goto unordered_bcc; case UNEQ_EXPR: rcode1 = UNEQ; tcode1 = UNORDERED_EXPR; tcode2 = EQ_EXPR; goto unordered_bcc; case LTGT_EXPR: /* It is ok for LTGT_EXPR to trap when the result is unordered, so expand to (a < b) || (a > b). */ rcode1 = LTGT; tcode1 = LT_EXPR; tcode2 = GT_EXPR; goto unordered_bcc; unordered_bcc: mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))); if (can_compare_p (rcode1, mode, ccp_jump)) do_compare_and_jump (exp, rcode1, rcode1, if_false_label, if_true_label); else { tree op0 = save_expr (TREE_OPERAND (exp, 0)); tree op1 = save_expr (TREE_OPERAND (exp, 1)); tree cmp0, cmp1; /* If the target doesn't support combined unordered compares, decompose into two comparisons. */ cmp0 = fold (build (tcode1, TREE_TYPE (exp), op0, op1)); cmp1 = fold (build (tcode2, TREE_TYPE (exp), op0, op1)); exp = build (TRUTH_ORIF_EXPR, TREE_TYPE (exp), cmp0, cmp1); do_jump (exp, if_false_label, if_true_label); } } break; /* Special case: __builtin_expect (, 0) and __builtin_expect (, 1) We need to do this here, so that is not converted to a SCC operation on machines that use condition code registers and COMPARE like the PowerPC, and then the jump is done based on whether the SCC operation produced a 1 or 0. */ case CALL_EXPR: /* Check for a built-in function. */ { tree fndecl = get_callee_fndecl (exp); tree arglist = TREE_OPERAND (exp, 1); if (fndecl && DECL_BUILT_IN (fndecl) && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_EXPECT && arglist != NULL_TREE && TREE_CHAIN (arglist) != NULL_TREE) { rtx seq = expand_builtin_expect_jump (exp, if_false_label, if_true_label); if (seq != NULL_RTX) { emit_insn (seq); return; } } } /* Fall through and generate the normal code. */ default: normal: temp = expand_expr (exp, NULL_RTX, VOIDmode, 0); #if 0 /* This is not needed any more and causes poor code since it causes comparisons and tests from non-SI objects to have different code sequences. */ /* Copy to register to avoid generating bad insns by cse from (set (mem ...) (arithop)) (set (cc0) (mem ...)). */ if (!cse_not_expected && MEM_P (temp)) temp = copy_to_reg (temp); #endif do_pending_stack_adjust (); /* Do any postincrements in the expression that was tested. */ emit_queue (); if (GET_CODE (temp) == CONST_INT || (GET_CODE (temp) == CONST_DOUBLE && GET_MODE (temp) == VOIDmode) || GET_CODE (temp) == LABEL_REF) { rtx target = temp == const0_rtx ? if_false_label : if_true_label; if (target) emit_jump (target); } else if (GET_MODE_CLASS (GET_MODE (temp)) == MODE_INT && ! can_compare_p (NE, GET_MODE (temp), ccp_jump)) /* Note swapping the labels gives us not-equal. */ do_jump_by_parts_equality_rtx (temp, if_true_label, if_false_label); else if (GET_MODE (temp) != VOIDmode) { /* The RTL optimizers prefer comparisons against pseudos. */ if (GET_CODE (temp) == SUBREG) { /* Compare promoted variables in their promoted mode. */ if (SUBREG_PROMOTED_VAR_P (temp) && REG_P (XEXP (temp, 0))) temp = XEXP (temp, 0); else temp = copy_to_reg (temp); } do_compare_rtx_and_jump (temp, CONST0_RTX (GET_MODE (temp)), NE, TYPE_UNSIGNED (TREE_TYPE (exp)), GET_MODE (temp), NULL_RTX, if_false_label, if_true_label); } else abort (); } if (drop_through_label) { /* If do_jump produces code that might be jumped around, do any stack adjusts from that code, before the place where control merges in. */ do_pending_stack_adjust (); emit_label (drop_through_label); } } /* Given a comparison expression EXP for values too wide to be compared with one insn, test the comparison and jump to the appropriate label. The code of EXP is ignored; we always test GT if SWAP is 0, and LT if SWAP is 1. */ static void do_jump_by_parts_greater (tree exp, int swap, rtx if_false_label, rtx if_true_label) { rtx op0 = expand_expr (TREE_OPERAND (exp, swap), NULL_RTX, VOIDmode, 0); rtx op1 = expand_expr (TREE_OPERAND (exp, !swap), NULL_RTX, VOIDmode, 0); enum machine_mode mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))); int unsignedp = TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0))); do_jump_by_parts_greater_rtx (mode, unsignedp, op0, op1, if_false_label, if_true_label); } /* Compare OP0 with OP1, word at a time, in mode MODE. UNSIGNEDP says to do unsigned comparison. Jump to IF_TRUE_LABEL if OP0 is greater, IF_FALSE_LABEL otherwise. */ void do_jump_by_parts_greater_rtx (enum machine_mode mode, int unsignedp, rtx op0, rtx op1, rtx if_false_label, rtx if_true_label) { int nwords = (GET_MODE_SIZE (mode) / UNITS_PER_WORD); rtx drop_through_label = 0; int i; if (! if_true_label || ! if_false_label) drop_through_label = gen_label_rtx (); if (! if_true_label) if_true_label = drop_through_label; if (! if_false_label) if_false_label = drop_through_label; /* Compare a word at a time, high order first. */ for (i = 0; i < nwords; i++) { rtx op0_word, op1_word; if (WORDS_BIG_ENDIAN) { op0_word = operand_subword_force (op0, i, mode); op1_word = operand_subword_force (op1, i, mode); } else { op0_word = operand_subword_force (op0, nwords - 1 - i, mode); op1_word = operand_subword_force (op1, nwords - 1 - i, mode); } /* All but high-order word must be compared as unsigned. */ do_compare_rtx_and_jump (op0_word, op1_word, GT, (unsignedp || i > 0), word_mode, NULL_RTX, NULL_RTX, if_true_label); /* Consider lower words only if these are equal. */ do_compare_rtx_and_jump (op0_word, op1_word, NE, unsignedp, word_mode, NULL_RTX, NULL_RTX, if_false_label); } if (if_false_label) emit_jump (if_false_label); if (drop_through_label) emit_label (drop_through_label); } /* Given an EQ_EXPR expression EXP for values too wide to be compared with one insn, test the comparison and jump to the appropriate label. */ static void do_jump_by_parts_equality (tree exp, rtx if_false_label, rtx if_true_label) { rtx op0 = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX, VOIDmode, 0); rtx op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, 0); enum machine_mode mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))); int nwords = (GET_MODE_SIZE (mode) / UNITS_PER_WORD); int i; rtx drop_through_label = 0; if (! if_false_label) drop_through_label = if_false_label = gen_label_rtx (); for (i = 0; i < nwords; i++) do_compare_rtx_and_jump (operand_subword_force (op0, i, mode), operand_subword_force (op1, i, mode), EQ, TYPE_UNSIGNED (TREE_TYPE (exp)), word_mode, NULL_RTX, if_false_label, NULL_RTX); if (if_true_label) emit_jump (if_true_label); if (drop_through_label) emit_label (drop_through_label); } /* Jump according to whether OP0 is 0. We assume that OP0 has an integer mode that is too wide for the available compare insns. */ void do_jump_by_parts_equality_rtx (rtx op0, rtx if_false_label, rtx if_true_label) { int nwords = GET_MODE_SIZE (GET_MODE (op0)) / UNITS_PER_WORD; rtx part; int i; rtx drop_through_label = 0; /* The fastest way of doing this comparison on almost any machine is to "or" all the words and compare the result. If all have to be loaded from memory and this is a very wide item, it's possible this may be slower, but that's highly unlikely. */ part = gen_reg_rtx (word_mode); emit_move_insn (part, operand_subword_force (op0, 0, GET_MODE (op0))); for (i = 1; i < nwords && part != 0; i++) part = expand_binop (word_mode, ior_optab, part, operand_subword_force (op0, i, GET_MODE (op0)), part, 1, OPTAB_WIDEN); if (part != 0) { do_compare_rtx_and_jump (part, const0_rtx, EQ, 1, word_mode, NULL_RTX, if_false_label, if_true_label); return; } /* If we couldn't do the "or" simply, do this with a series of compares. */ if (! if_false_label) drop_through_label = if_false_label = gen_label_rtx (); for (i = 0; i < nwords; i++) do_compare_rtx_and_jump (operand_subword_force (op0, i, GET_MODE (op0)), const0_rtx, EQ, 1, word_mode, NULL_RTX, if_false_label, NULL_RTX); if (if_true_label) emit_jump (if_true_label); if (drop_through_label) emit_label (drop_through_label); } /* Generate code for a comparison of OP0 and OP1 with rtx code CODE. (including code to compute the values to be compared) and set (CC0) according to the result. The decision as to signed or unsigned comparison must be made by the caller. We force a stack adjustment unless there are currently things pushed on the stack that aren't yet used. If MODE is BLKmode, SIZE is an RTX giving the size of the objects being compared. */ rtx compare_from_rtx (rtx op0, rtx op1, enum rtx_code code, int unsignedp, enum machine_mode mode, rtx size) { rtx tem; /* If one operand is constant, make it the second one. Only do this if the other operand is not constant as well. */ if (swap_commutative_operands_p (op0, op1)) { tem = op0; op0 = op1; op1 = tem; code = swap_condition (code); } if (flag_force_mem) { op0 = force_not_mem (op0); op1 = force_not_mem (op1); } do_pending_stack_adjust (); code = unsignedp ? unsigned_condition (code) : code; if (0 != (tem = simplify_relational_operation (code, mode, VOIDmode, op0, op1))) { if (CONSTANT_P (tem)) return tem; code = GET_CODE (tem); mode = GET_MODE (tem); op0 = XEXP (tem, 0); op1 = XEXP (tem, 1); unsignedp = (code == GTU || code == LTU || code == GEU || code == LEU); } emit_cmp_insn (op0, op1, code, size, mode, unsignedp); #if HAVE_cc0 return gen_rtx_fmt_ee (code, VOIDmode, cc0_rtx, const0_rtx); #else return gen_rtx_fmt_ee (code, VOIDmode, op0, op1); #endif } /* Like do_compare_and_jump but expects the values to compare as two rtx's. The decision as to signed or unsigned comparison must be made by the caller. If MODE is BLKmode, SIZE is an RTX giving the size of the objects being compared. */ void do_compare_rtx_and_jump (rtx op0, rtx op1, enum rtx_code code, int unsignedp, enum machine_mode mode, rtx size, rtx if_false_label, rtx if_true_label) { rtx tem; int dummy_true_label = 0; /* Reverse the comparison if that is safe and we want to jump if it is false. */ if (! if_true_label && ! FLOAT_MODE_P (mode)) { if_true_label = if_false_label; if_false_label = 0; code = reverse_condition (code); } /* If one operand is constant, make it the second one. Only do this if the other operand is not constant as well. */ if (swap_commutative_operands_p (op0, op1)) { tem = op0; op0 = op1; op1 = tem; code = swap_condition (code); } if (flag_force_mem) { op0 = force_not_mem (op0); op1 = force_not_mem (op1); } do_pending_stack_adjust (); code = unsignedp ? unsigned_condition (code) : code; if (0 != (tem = simplify_relational_operation (code, mode, VOIDmode, op0, op1))) { if (CONSTANT_P (tem)) { rtx label = (tem == const0_rtx || tem == CONST0_RTX (mode)) ? if_false_label : if_true_label; if (label) emit_jump (label); return; } code = GET_CODE (tem); mode = GET_MODE (tem); op0 = XEXP (tem, 0); op1 = XEXP (tem, 1); unsignedp = (code == GTU || code == LTU || code == GEU || code == LEU); } if (! if_true_label) { dummy_true_label = 1; if_true_label = gen_label_rtx (); } emit_cmp_and_jump_insns (op0, op1, code, size, mode, unsignedp, if_true_label); if (if_false_label) emit_jump (if_false_label); if (dummy_true_label) emit_label (if_true_label); } /* Generate code for a comparison expression EXP (including code to compute the values to be compared) and a conditional jump to IF_FALSE_LABEL and/or IF_TRUE_LABEL. One of the labels can be NULL_RTX, in which case the generated code will drop through. SIGNED_CODE should be the rtx operation for this comparison for signed data; UNSIGNED_CODE, likewise for use if data is unsigned. We force a stack adjustment unless there are currently things pushed on the stack that aren't yet used. */ static void do_compare_and_jump (tree exp, enum rtx_code signed_code, enum rtx_code unsigned_code, rtx if_false_label, rtx if_true_label) { rtx op0, op1; tree type; enum machine_mode mode; int unsignedp; enum rtx_code code; /* Don't crash if the comparison was erroneous. */ op0 = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX, VOIDmode, 0); if (TREE_CODE (TREE_OPERAND (exp, 0)) == ERROR_MARK) return; op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, 0); if (TREE_CODE (TREE_OPERAND (exp, 1)) == ERROR_MARK) return; type = TREE_TYPE (TREE_OPERAND (exp, 0)); mode = TYPE_MODE (type); if (TREE_CODE (TREE_OPERAND (exp, 0)) == INTEGER_CST && (TREE_CODE (TREE_OPERAND (exp, 1)) != INTEGER_CST || (GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 1))))))) { /* op0 might have been replaced by promoted constant, in which case the type of second argument should be used. */ type = TREE_TYPE (TREE_OPERAND (exp, 1)); mode = TYPE_MODE (type); } unsignedp = TYPE_UNSIGNED (type); code = unsignedp ? unsigned_code : signed_code; #ifdef HAVE_canonicalize_funcptr_for_compare /* If function pointers need to be "canonicalized" before they can be reliably compared, then canonicalize them. */ if (HAVE_canonicalize_funcptr_for_compare && TREE_CODE (TREE_TYPE (TREE_OPERAND (exp, 0))) == POINTER_TYPE && (TREE_CODE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0)))) == FUNCTION_TYPE)) { rtx new_op0 = gen_reg_rtx (mode); emit_insn (gen_canonicalize_funcptr_for_compare (new_op0, op0)); op0 = new_op0; } if (HAVE_canonicalize_funcptr_for_compare && TREE_CODE (TREE_TYPE (TREE_OPERAND (exp, 1))) == POINTER_TYPE && (TREE_CODE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 1)))) == FUNCTION_TYPE)) { rtx new_op1 = gen_reg_rtx (mode); emit_insn (gen_canonicalize_funcptr_for_compare (new_op1, op1)); op1 = new_op1; } #endif /* Do any postincrements in the expression that was tested. */ emit_queue (); do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, ((mode == BLKmode) ? expr_size (TREE_OPERAND (exp, 0)) : NULL_RTX), if_false_label, if_true_label); } /* Type information for dojump.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_dojump_h[] = { { &shift_test, 1, sizeof (shift_test), >_ggc_mx_rtx_def, >_pch_nx_rtx_def }, { &and_test, 1, sizeof (and_test), >_ggc_mx_rtx_def, >_pch_nx_rtx_def }, { &and_reg, 1, sizeof (and_reg), >_ggc_mx_rtx_def, >_pch_nx_rtx_def }, LAST_GGC_ROOT_TAB }; /* Calculate (post)dominators in slightly super-linear time. Copyright (C) 2000, 2003, 2004 Free Software Foundation, Inc. Contributed by Michael Matz (matz@ifh.de). This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file implements the well known algorithm from Lengauer and Tarjan to compute the dominators in a control flow graph. A basic block D is said to dominate another block X, when all paths from the entry node of the CFG to X go also over D. The dominance relation is a transitive reflexive relation and its minimal transitive reduction is a tree, called the dominator tree. So for each block X besides the entry block exists a block I(X), called the immediate dominator of X, which is the parent of X in the dominator tree. The algorithm computes this dominator tree implicitly by computing for each block its immediate dominator. We use tree balancing and path compression, so its the O(e*a(e,v)) variant, where a(e,v) is the very slowly growing functional inverse of the Ackerman function. */ /* Et-forest data structure implementation. Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This package implements ET forest data structure. Each tree in the structure maintains a tree structure and offers logarithmic time for tree operations (insertion and removal of nodes and edges) and poly-logarithmic time for nearest common ancestor. ET tree stores its structure as a sequence of symbols obtained by dfs(root) dfs (node) { s = node; for each child c of node do s = concat (s, c, node); return s; } For example for tree 1 / | \ 2 3 4 / | 4 5 the sequence is 1 2 4 2 5 3 1 3 1 4 1. The sequence is stored in a slightly modified splay tree. In order to support various types of node values, a hashtable is used to convert node values to the internal representation. */ #ifndef _ET_TREE_H #define _ET_TREE_H #include #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ /* The node representing the node in an et tree. */ struct et_node { void *data; /* The data represented by the node. */ int dfs_num_in, dfs_num_out; /* Number of the node in the dfs ordering. */ struct et_node *father; /* Father of the node. */ struct et_node *son; /* The first of the sons of the node. */ struct et_node *left; struct et_node *right; /* The brothers of the node. */ struct et_occ *rightmost_occ; /* The rightmost occurrence. */ struct et_occ *parent_occ; /* The occurrence of the parent node. */ }; struct et_node *et_new_tree (void *data); void et_free_tree (struct et_node *); void et_set_father (struct et_node *, struct et_node *); void et_split (struct et_node *); struct et_node *et_nca (struct et_node *, struct et_node *); bool et_below (struct et_node *, struct et_node *); #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* _ET_TREE_H */ /* Whether the dominators and the postdominators are available. */ enum dom_state dom_computed[2]; /* We name our nodes with integers, beginning with 1. Zero is reserved for 'undefined' or 'end of list'. The name of each node is given by the dfs number of the corresponding basic block. Please note, that we include the artificial ENTRY_BLOCK (or EXIT_BLOCK in the post-dom case) in our lists to support multiple entry points. As it has no real basic block index we use 'last_basic_block' for that. Its dfs number is of course 1. */ /* Type of Basic Block aka. TBB */ typedef unsigned int TBB; /* We work in a poor-mans object oriented fashion, and carry an instance of this structure through all our 'methods'. It holds various arrays reflecting the (sub)structure of the flowgraph. Most of them are of type TBB and are also indexed by TBB. */ struct dom_info { /* The parent of a node in the DFS tree. */ TBB *dfs_parent; /* For a node x key[x] is roughly the node nearest to the root from which exists a way to x only over nodes behind x. Such a node is also called semidominator. */ TBB *key; /* The value in path_min[x] is the node y on the path from x to the root of the tree x is in with the smallest key[y]. */ TBB *path_min; /* bucket[x] points to the first node of the set of nodes having x as key. */ TBB *bucket; /* And next_bucket[x] points to the next node. */ TBB *next_bucket; /* After the algorithm is done, dom[x] contains the immediate dominator of x. */ TBB *dom; /* The following few fields implement the structures needed for disjoint sets. */ /* set_chain[x] is the next node on the path from x to the representant of the set containing x. If set_chain[x]==0 then x is a root. */ TBB *set_chain; /* set_size[x] is the number of elements in the set named by x. */ unsigned int *set_size; /* set_child[x] is used for balancing the tree representing a set. It can be understood as the next sibling of x. */ TBB *set_child; /* If b is the number of a basic block (BB->index), dfs_order[b] is the number of that node in DFS order counted from 1. This is an index into most of the other arrays in this structure. */ TBB *dfs_order; /* If x is the DFS-index of a node which corresponds with a basic block, dfs_to_bb[x] is that basic block. Note, that in our structure there are more nodes that basic blocks, so only dfs_to_bb[dfs_order[bb->index]]==bb is true for every basic block bb, but not the opposite. */ basic_block *dfs_to_bb; /* This is the next free DFS number when creating the DFS tree or forest. */ unsigned int dfsnum; /* The number of nodes in the DFS tree (==dfsnum-1). */ unsigned int nodes; }; static void init_dom_info (struct dom_info *); static void free_dom_info (struct dom_info *); static void calc_dfs_tree_nonrec (struct dom_info *, basic_block, enum cdi_direction); static void calc_dfs_tree (struct dom_info *, enum cdi_direction); static void compress (struct dom_info *, TBB); static TBB eval (struct dom_info *, TBB); static void link_roots (struct dom_info *, TBB, TBB); static void calc_idoms (struct dom_info *, enum cdi_direction); void debug_dominance_info (enum cdi_direction); /* Keeps track of the*/ static unsigned n_bbs_in_dom_tree[2]; /* Helper macro for allocating and initializing an array, for aesthetic reasons. */ #define init_ar(var, type, num, content) \ do \ { \ unsigned int i = 1; /* Catch content == i. */ \ if (! (content)) \ (var) = xcalloc ((num), sizeof (type)); \ else \ { \ (var) = xmalloc ((num) * sizeof (type)); \ for (i = 0; i < num; i++) \ (var)[i] = (content); \ } \ } \ while (0) /* Allocate all needed memory in a pessimistic fashion (so we round up). This initializes the contents of DI, which already must be allocated. */ static void init_dom_info (struct dom_info *di) { /* We need memory for n_basic_blocks nodes and the ENTRY_BLOCK or EXIT_BLOCK. */ unsigned int num = n_basic_blocks + 1 + 1; init_ar (di->dfs_parent, TBB, num, 0); init_ar (di->path_min, TBB, num, i); init_ar (di->key, TBB, num, i); init_ar (di->dom, TBB, num, 0); init_ar (di->bucket, TBB, num, 0); init_ar (di->next_bucket, TBB, num, 0); init_ar (di->set_chain, TBB, num, 0); init_ar (di->set_size, unsigned int, num, 1); init_ar (di->set_child, TBB, num, 0); init_ar (di->dfs_order, TBB, (unsigned int) last_basic_block + 1, 0); init_ar (di->dfs_to_bb, basic_block, num, 0); di->dfsnum = 1; di->nodes = 0; } #undef init_ar /* Free all allocated memory in DI, but not DI itself. */ static void free_dom_info (struct dom_info *di) { free (di->dfs_parent); free (di->path_min); free (di->key); free (di->dom); free (di->bucket); free (di->next_bucket); free (di->set_chain); free (di->set_size); free (di->set_child); free (di->dfs_order); free (di->dfs_to_bb); } /* The nonrecursive variant of creating a DFS tree. DI is our working structure, BB the starting basic block for this tree and REVERSE is true, if predecessors should be visited instead of successors of a node. After this is done all nodes reachable from BB were visited, have assigned their dfs number and are linked together to form a tree. */ static void calc_dfs_tree_nonrec (struct dom_info *di, basic_block bb, enum cdi_direction reverse) { /* We never call this with bb==EXIT_BLOCK_PTR (ENTRY_BLOCK_PTR if REVERSE). */ /* We call this _only_ if bb is not already visited. */ edge e; TBB child_i, my_i = 0; edge *stack; int sp; /* Start block (ENTRY_BLOCK_PTR for forward problem, EXIT_BLOCK for backward problem). */ basic_block en_block; /* Ending block. */ basic_block ex_block; stack = xmalloc ((n_basic_blocks + 3) * sizeof (edge)); sp = 0; /* Initialize our border blocks, and the first edge. */ if (reverse) { e = bb->pred; en_block = EXIT_BLOCK_PTR; ex_block = ENTRY_BLOCK_PTR; } else { e = bb->succ; en_block = ENTRY_BLOCK_PTR; ex_block = EXIT_BLOCK_PTR; } /* When the stack is empty we break out of this loop. */ while (1) { basic_block bn; /* This loop traverses edges e in depth first manner, and fills the stack. */ while (e) { edge e_next; /* Deduce from E the current and the next block (BB and BN), and the next edge. */ if (reverse) { bn = e->src; /* If the next node BN is either already visited or a border block the current edge is useless, and simply overwritten with the next edge out of the current node. */ if (bn == ex_block || di->dfs_order[bn->index]) { e = e->pred_next; continue; } bb = e->dest; e_next = bn->pred; } else { bn = e->dest; if (bn == ex_block || di->dfs_order[bn->index]) { e = e->succ_next; continue; } bb = e->src; e_next = bn->succ; } if (bn == en_block) abort (); /* Fill the DFS tree info calculatable _before_ recursing. */ if (bb != en_block) my_i = di->dfs_order[bb->index]; else my_i = di->dfs_order[last_basic_block]; child_i = di->dfs_order[bn->index] = di->dfsnum++; di->dfs_to_bb[child_i] = bn; di->dfs_parent[child_i] = my_i; /* Save the current point in the CFG on the stack, and recurse. */ stack[sp++] = e; e = e_next; } if (!sp) break; e = stack[--sp]; /* OK. The edge-list was exhausted, meaning normally we would end the recursion. After returning from the recursive call, there were (may be) other statements which were run after a child node was completely considered by DFS. Here is the point to do it in the non-recursive variant. E.g. The block just completed is in e->dest for forward DFS, the block not yet completed (the parent of the one above) in e->src. This could be used e.g. for computing the number of descendants or the tree depth. */ if (reverse) e = e->pred_next; else e = e->succ_next; } free (stack); } /* The main entry for calculating the DFS tree or forest. DI is our working structure and REVERSE is true, if we are interested in the reverse flow graph. In that case the result is not necessarily a tree but a forest, because there may be nodes from which the EXIT_BLOCK is unreachable. */ static void calc_dfs_tree (struct dom_info *di, enum cdi_direction reverse) { /* The first block is the ENTRY_BLOCK (or EXIT_BLOCK if REVERSE). */ basic_block begin = reverse ? EXIT_BLOCK_PTR : ENTRY_BLOCK_PTR; di->dfs_order[last_basic_block] = di->dfsnum; di->dfs_to_bb[di->dfsnum] = begin; di->dfsnum++; calc_dfs_tree_nonrec (di, begin, reverse); if (reverse) { /* In the post-dom case we may have nodes without a path to EXIT_BLOCK. They are reverse-unreachable. In the dom-case we disallow such nodes, but in post-dom we have to deal with them, so we simply include them in the DFS tree which actually becomes a forest. */ basic_block b; FOR_EACH_BB_REVERSE (b) { if (di->dfs_order[b->index]) continue; di->dfs_order[b->index] = di->dfsnum; di->dfs_to_bb[di->dfsnum] = b; di->dfsnum++; calc_dfs_tree_nonrec (di, b, reverse); } } di->nodes = di->dfsnum - 1; /* This aborts e.g. when there is _no_ path from ENTRY to EXIT at all. */ if (di->nodes != (unsigned int) n_basic_blocks + 1) abort (); } /* Compress the path from V to the root of its set and update path_min at the same time. After compress(di, V) set_chain[V] is the root of the set V is in and path_min[V] is the node with the smallest key[] value on the path from V to that root. */ static void compress (struct dom_info *di, TBB v) { /* Btw. It's not worth to unrecurse compress() as the depth is usually not greater than 5 even for huge graphs (I've not seen call depth > 4). Also performance wise compress() ranges _far_ behind eval(). */ TBB parent = di->set_chain[v]; if (di->set_chain[parent]) { compress (di, parent); if (di->key[di->path_min[parent]] < di->key[di->path_min[v]]) di->path_min[v] = di->path_min[parent]; di->set_chain[v] = di->set_chain[parent]; } } /* Compress the path from V to the set root of V if needed (when the root has changed since the last call). Returns the node with the smallest key[] value on the path from V to the root. */ static inline TBB eval (struct dom_info *di, TBB v) { /* The representant of the set V is in, also called root (as the set representation is a tree). */ TBB rep = di->set_chain[v]; /* V itself is the root. */ if (!rep) return di->path_min[v]; /* Compress only if necessary. */ if (di->set_chain[rep]) { compress (di, v); rep = di->set_chain[v]; } if (di->key[di->path_min[rep]] >= di->key[di->path_min[v]]) return di->path_min[v]; else return di->path_min[rep]; } /* This essentially merges the two sets of V and W, giving a single set with the new root V. The internal representation of these disjoint sets is a balanced tree. Currently link(V,W) is only used with V being the parent of W. */ static void link_roots (struct dom_info *di, TBB v, TBB w) { TBB s = w; /* Rebalance the tree. */ while (di->key[di->path_min[w]] < di->key[di->path_min[di->set_child[s]]]) { if (di->set_size[s] + di->set_size[di->set_child[di->set_child[s]]] >= 2 * di->set_size[di->set_child[s]]) { di->set_chain[di->set_child[s]] = s; di->set_child[s] = di->set_child[di->set_child[s]]; } else { di->set_size[di->set_child[s]] = di->set_size[s]; s = di->set_chain[s] = di->set_child[s]; } } di->path_min[s] = di->path_min[w]; di->set_size[v] += di->set_size[w]; if (di->set_size[v] < 2 * di->set_size[w]) { TBB tmp = s; s = di->set_child[v]; di->set_child[v] = tmp; } /* Merge all subtrees. */ while (s) { di->set_chain[s] = v; s = di->set_child[s]; } } /* This calculates the immediate dominators (or post-dominators if REVERSE is true). DI is our working structure and should hold the DFS forest. On return the immediate dominator to node V is in di->dom[V]. */ static void calc_idoms (struct dom_info *di, enum cdi_direction reverse) { TBB v, w, k, par; basic_block en_block; if (reverse) en_block = EXIT_BLOCK_PTR; else en_block = ENTRY_BLOCK_PTR; /* Go backwards in DFS order, to first look at the leafs. */ v = di->nodes; while (v > 1) { basic_block bb = di->dfs_to_bb[v]; edge e, e_next; par = di->dfs_parent[v]; k = v; if (reverse) e = bb->succ; else e = bb->pred; /* Search all direct predecessors for the smallest node with a path to them. That way we have the smallest node with also a path to us only over nodes behind us. In effect we search for our semidominator. */ for (; e; e = e_next) { TBB k1; basic_block b; if (reverse) { b = e->dest; e_next = e->succ_next; } else { b = e->src; e_next = e->pred_next; } if (b == en_block) k1 = di->dfs_order[last_basic_block]; else k1 = di->dfs_order[b->index]; /* Call eval() only if really needed. If k1 is above V in DFS tree, then we know, that eval(k1) == k1 and key[k1] == k1. */ if (k1 > v) k1 = di->key[eval (di, k1)]; if (k1 < k) k = k1; } di->key[v] = k; link_roots (di, par, v); di->next_bucket[v] = di->bucket[k]; di->bucket[k] = v; /* Transform semidominators into dominators. */ for (w = di->bucket[par]; w; w = di->next_bucket[w]) { k = eval (di, w); if (di->key[k] < di->key[w]) di->dom[w] = k; else di->dom[w] = par; } /* We don't need to cleanup next_bucket[]. */ di->bucket[par] = 0; v--; } /* Explicitly define the dominators. */ di->dom[1] = 0; for (v = 2; v <= di->nodes; v++) if (di->dom[v] != di->key[v]) di->dom[v] = di->dom[di->dom[v]]; } /* Assign dfs numbers starting from NUM to NODE and its sons. */ static void assign_dfs_numbers (struct et_node *node, int *num) { struct et_node *son; node->dfs_num_in = (*num)++; if (node->son) { assign_dfs_numbers (node->son, num); for (son = node->son->right; son != node->son; son = son->right) assign_dfs_numbers (son, num); } node->dfs_num_out = (*num)++; } /* Compute the data necessary for fast resolving of dominator queries in a static dominator tree. */ static void compute_dom_fast_query (enum cdi_direction dir) { int num = 0; basic_block bb; if (dom_computed[dir] < DOM_NO_FAST_QUERY) abort (); if (dom_computed[dir] == DOM_OK) return; FOR_ALL_BB (bb) { if (!bb->dom[dir]->father) assign_dfs_numbers (bb->dom[dir], &num); } dom_computed[dir] = DOM_OK; } /* The main entry point into this module. DIR is set depending on whether we want to compute dominators or postdominators. */ void calculate_dominance_info (enum cdi_direction dir) { struct dom_info di; basic_block b; if (dom_computed[dir] == DOM_OK) return; if (dom_computed[dir] != DOM_NO_FAST_QUERY) { if (dom_computed[dir] != DOM_NONE) free_dominance_info (dir); if (n_bbs_in_dom_tree[dir]) abort (); FOR_ALL_BB (b) { b->dom[dir] = et_new_tree (b); } n_bbs_in_dom_tree[dir] = n_basic_blocks + 2; init_dom_info (&di); calc_dfs_tree (&di, dir); calc_idoms (&di, dir); FOR_EACH_BB (b) { TBB d = di.dom[di.dfs_order[b->index]]; if (di.dfs_to_bb[d]) et_set_father (b->dom[dir], di.dfs_to_bb[d]->dom[dir]); } free_dom_info (&di); dom_computed[dir] = DOM_NO_FAST_QUERY; } compute_dom_fast_query (dir); } /* Free dominance information for direction DIR. */ void free_dominance_info (enum cdi_direction dir) { basic_block bb; if (!dom_computed[dir]) return; FOR_ALL_BB (bb) { delete_from_dominance_info (dir, bb); } /* If there are any nodes left, something is wrong. */ if (n_bbs_in_dom_tree[dir]) abort (); dom_computed[dir] = DOM_NONE; } /* Return the immediate dominator of basic block BB. */ basic_block get_immediate_dominator (enum cdi_direction dir, basic_block bb) { struct et_node *node = bb->dom[dir]; if (!dom_computed[dir]) abort (); if (!node->father) return NULL; return node->father->data; } /* Set the immediate dominator of the block possibly removing existing edge. NULL can be used to remove any edge. */ void set_immediate_dominator (enum cdi_direction dir, basic_block bb, basic_block dominated_by) { struct et_node *node = bb->dom[dir]; if (!dom_computed[dir]) abort (); if (node->father) { if (node->father->data == dominated_by) return; et_split (node); } if (dominated_by) et_set_father (node, dominated_by->dom[dir]); if (dom_computed[dir] == DOM_OK) dom_computed[dir] = DOM_NO_FAST_QUERY; } /* Store all basic blocks immediately dominated by BB into BBS and return their number. */ int get_dominated_by (enum cdi_direction dir, basic_block bb, basic_block **bbs) { int n; struct et_node *node = bb->dom[dir], *son = node->son, *ason; if (!dom_computed[dir]) abort (); if (!son) { *bbs = NULL; return 0; } for (ason = son->right, n = 1; ason != son; ason = ason->right) n++; *bbs = xmalloc (n * sizeof (basic_block)); (*bbs)[0] = son->data; for (ason = son->right, n = 1; ason != son; ason = ason->right) (*bbs)[n++] = ason->data; return n; } /* Redirect all edges pointing to BB to TO. */ void redirect_immediate_dominators (enum cdi_direction dir, basic_block bb, basic_block to) { struct et_node *bb_node = bb->dom[dir], *to_node = to->dom[dir], *son; if (!dom_computed[dir]) abort (); if (!bb_node->son) return; while (bb_node->son) { son = bb_node->son; et_split (son); et_set_father (son, to_node); } if (dom_computed[dir] == DOM_OK) dom_computed[dir] = DOM_NO_FAST_QUERY; } /* Find first basic block in the tree dominating both BB1 and BB2. */ basic_block nearest_common_dominator (enum cdi_direction dir, basic_block bb1, basic_block bb2) { if (!dom_computed[dir]) abort (); if (!bb1) return bb2; if (!bb2) return bb1; return et_nca (bb1->dom[dir], bb2->dom[dir])->data; } /* Return TRUE in case BB1 is dominated by BB2. */ bool dominated_by_p (enum cdi_direction dir, basic_block bb1, basic_block bb2) { struct et_node *n1 = bb1->dom[dir], *n2 = bb2->dom[dir]; if (!dom_computed[dir]) abort (); if (dom_computed[dir] == DOM_OK) return (n1->dfs_num_in >= n2->dfs_num_in && n1->dfs_num_out <= n2->dfs_num_out); return et_below (n1, n2); } /* Verify invariants of dominator structure. */ void verify_dominators (enum cdi_direction dir) { int err = 0; basic_block bb; if (!dom_computed[dir]) abort (); FOR_EACH_BB (bb) { basic_block dom_bb; dom_bb = recount_dominator (dir, bb); if (dom_bb != get_immediate_dominator (dir, bb)) { error ("dominator of %d should be %d, not %d", bb->index, dom_bb->index, get_immediate_dominator(dir, bb)->index); err = 1; } } if (err) abort (); } /* Determine immediate dominator (or postdominator, according to DIR) of BB, assuming that dominators of other blocks are correct. We also use it to recompute the dominators in a restricted area, by iterating it until it reaches a fixed point. */ basic_block recount_dominator (enum cdi_direction dir, basic_block bb) { basic_block dom_bb = NULL; edge e; if (!dom_computed[dir]) abort (); if (dir == CDI_DOMINATORS) { for (e = bb->pred; e; e = e->pred_next) { if (!dominated_by_p (dir, e->src, bb)) dom_bb = nearest_common_dominator (dir, dom_bb, e->src); } } else { for (e = bb->succ; e; e = e->succ_next) { if (!dominated_by_p (dir, e->dest, bb)) dom_bb = nearest_common_dominator (dir, dom_bb, e->dest); } } return dom_bb; } /* Iteratively recount dominators of BBS. The change is supposed to be local and not to grow further. */ void iterate_fix_dominators (enum cdi_direction dir, basic_block *bbs, int n) { int i, changed = 1; basic_block old_dom, new_dom; if (!dom_computed[dir]) abort (); while (changed) { changed = 0; for (i = 0; i < n; i++) { old_dom = get_immediate_dominator (dir, bbs[i]); new_dom = recount_dominator (dir, bbs[i]); if (old_dom != new_dom) { changed = 1; set_immediate_dominator (dir, bbs[i], new_dom); } } } } void add_to_dominance_info (enum cdi_direction dir, basic_block bb) { if (!dom_computed[dir]) abort (); if (bb->dom[dir]) abort (); n_bbs_in_dom_tree[dir]++; bb->dom[dir] = et_new_tree (bb); if (dom_computed[dir] == DOM_OK) dom_computed[dir] = DOM_NO_FAST_QUERY; } void delete_from_dominance_info (enum cdi_direction dir, basic_block bb) { if (!dom_computed[dir]) abort (); et_free_tree (bb->dom[dir]); bb->dom[dir] = NULL; n_bbs_in_dom_tree[dir]--; if (dom_computed[dir] == DOM_OK) dom_computed[dir] = DOM_NO_FAST_QUERY; } /* Returns the first son of BB in the dominator or postdominator tree as determined by DIR. */ basic_block first_dom_son (enum cdi_direction dir, basic_block bb) { struct et_node *son = bb->dom[dir]->son; return son ? son->data : NULL; } /* Returns the next dominance son after BB in the dominator or postdominator tree as determined by DIR, or NULL if it was the last one. */ basic_block next_dom_son (enum cdi_direction dir, basic_block bb) { struct et_node *next = bb->dom[dir]->right; return next->father->son == next ? NULL : next->data; } void debug_dominance_info (enum cdi_direction dir) { basic_block bb, bb2; FOR_EACH_BB (bb) if ((bb2 = get_immediate_dominator (dir, bb))) fprintf (stderr, "%i %i\n", bb->index, bb2->index); } /* Perform doloop optimizations Copyright (C) 2004 Free Software Foundation, Inc. Based on code by Michael P. Hayes (m.hayes@elec.canterbury.ac.nz) This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This module is used to modify loops with a determinable number of iterations to use special low-overhead looping instructions. It first validates whether the loop is well behaved and has a determinable number of iterations (either at compile or run-time). It then modifies the loop to use a low-overhead looping pattern as follows: 1. A pseudo register is allocated as the loop iteration counter. 2. The number of loop iterations is calculated and is stored in the loop counter. 3. At the end of the loop, the jump insn is replaced by the doloop_end pattern. The compare must remain because it might be used elsewhere. If the loop-variable or condition register are used elsewhere, they will be eliminated by flow. 4. An optional doloop_begin pattern is inserted at the top of the loop. TODO The optimization should only performed when either the biv used for exit condition is unused at all except for the exit test, or if we do not have to change its value, since otherwise we have to add a new induction variable, which usually will not pay up (unless the cost of the doloop pattern is somehow extremely lower than the cost of compare & jump, or unless the bct register cannot be used for anything else but doloop -- ??? detect these cases). */ #ifdef HAVE_doloop_end /* Return the loop termination condition for PATTERN or zero if it is not a decrement and branch jump insn. */ static rtx doloop_condition_get (rtx pattern) { rtx cmp; rtx inc; rtx reg; rtx condition; /* The canonical doloop pattern we expect is: (parallel [(set (pc) (if_then_else (condition) (label_ref (label)) (pc))) (set (reg) (plus (reg) (const_int -1))) (additional clobbers and uses)]) Some machines (IA-64) make the decrement conditional on the condition as well, so we don't bother verifying the actual decrement. In summary, the branch must be the first entry of the parallel (also required by jump.c), and the second entry of the parallel must be a set of the loop counter register. */ if (GET_CODE (pattern) != PARALLEL) return 0; cmp = XVECEXP (pattern, 0, 0); inc = XVECEXP (pattern, 0, 1); /* Check for (set (reg) (something)). */ if (GET_CODE (inc) != SET || ! REG_P (SET_DEST (inc))) return 0; /* Extract loop counter register. */ reg = SET_DEST (inc); /* Check for (set (pc) (if_then_else (condition) (label_ref (label)) (pc))). */ if (GET_CODE (cmp) != SET || SET_DEST (cmp) != pc_rtx || GET_CODE (SET_SRC (cmp)) != IF_THEN_ELSE || GET_CODE (XEXP (SET_SRC (cmp), 1)) != LABEL_REF || XEXP (SET_SRC (cmp), 2) != pc_rtx) return 0; /* Extract loop termination condition. */ condition = XEXP (SET_SRC (cmp), 0); if ((GET_CODE (condition) != GE && GET_CODE (condition) != NE) || GET_CODE (XEXP (condition, 1)) != CONST_INT) return 0; if (XEXP (condition, 0) == reg) return condition; if (GET_CODE (XEXP (condition, 0)) == PLUS && XEXP (XEXP (condition, 0), 0) == reg) return condition; /* ??? If a machine uses a funny comparison, we could return a canonicalized form here. */ return 0; } /* Return nonzero if the loop specified by LOOP is suitable for the use of special low-overhead looping instructions. DESC describes the number of iterations of the loop. */ static bool doloop_valid_p (struct loop *loop, struct niter_desc *desc) { basic_block *body = get_loop_body (loop), bb; rtx insn; unsigned i; bool result = true; /* Check for loops that may not terminate under special conditions. */ if (!desc->simple_p || desc->assumptions || desc->infinite) { /* There are some cases that would require a special attention. For example if the comparison is LEU and the comparison value is UINT_MAX then the loop will not terminate. Similarly, if the comparison code is GEU and the comparison value is 0, the loop will not terminate. If the absolute increment is not 1, the loop can be infinite even with LTU/GTU, e.g. for (i = 3; i > 0; i -= 2) ??? We could compute these conditions at run-time and have a additional jump around the loop to ensure an infinite loop. However, it is very unlikely that this is the intended behavior of the loop and checking for these rare boundary conditions would pessimize all other code. If the loop is executed only a few times an extra check to restart the loop could use up most of the benefits of using a count register loop. Note however, that normally, this restart branch would never execute, so it could be predicted well by the CPU. We should generate the pessimistic code by default, and have an option, e.g. -funsafe-loops that would enable count-register loops in this case. */ if (dump_file) fprintf (dump_file, "Doloop: Possible infinite iteration case.\n"); result = false; goto cleanup; } for (i = 0; i < loop->num_nodes; i++) { bb = body[i]; for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = NEXT_INSN (insn)) { /* A called function may clobber any special registers required for low-overhead looping. */ if (GET_CODE (insn) == CALL_INSN) { if (dump_file) fprintf (dump_file, "Doloop: Function call in loop.\n"); result = false; goto cleanup; } /* Some targets (eg, PPC) use the count register for branch on table instructions. ??? This should be a target specific check. */ if (GET_CODE (insn) == JUMP_INSN && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC || GET_CODE (PATTERN (insn)) == ADDR_VEC)) { if (dump_file) fprintf (dump_file, "Doloop: Computed branch in the loop.\n"); result = false; goto cleanup; } } } result = true; cleanup: free (body); return result; } /* Adds test of COND jumping to DEST to the end of BB. */ static void add_test (rtx cond, basic_block bb, basic_block dest) { rtx seq, jump, label; enum machine_mode mode; rtx op0 = XEXP (cond, 0), op1 = XEXP (cond, 1); enum rtx_code code = GET_CODE (cond); mode = GET_MODE (XEXP (cond, 0)); if (mode == VOIDmode) mode = GET_MODE (XEXP (cond, 1)); start_sequence (); op0 = force_operand (op0, NULL_RTX); op1 = force_operand (op1, NULL_RTX); label = block_label (dest); do_compare_rtx_and_jump (op0, op1, code, 0, mode, NULL_RTX, NULL_RTX, label); jump = get_last_insn (); JUMP_LABEL (jump) = label; /* The jump is supposed to handle an unlikely special case. */ REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_BR_PROB, const0_rtx, REG_NOTES (jump)); LABEL_NUSES (label)++; seq = get_insns (); end_sequence (); emit_insn_after (seq, BB_END (bb)); } /* Modify the loop to use the low-overhead looping insn where LOOP describes the loop, DESC describes the number of iterations of the loop, and DOLOOP_INSN is the low-overhead looping insn to emit at the end of the loop. CONDITION is the condition separated from the DOLOOP_SEQ. */ static void doloop_modify (struct loop *loop, struct niter_desc *desc, rtx doloop_seq, rtx condition) { rtx counter_reg; rtx count, tmp, noloop = NULL_RTX; rtx sequence; rtx jump_insn; rtx jump_label; int nonneg = 0, irr; bool increment_count; basic_block loop_end = desc->out_edge->src; jump_insn = BB_END (loop_end); if (dump_file) { fprintf (dump_file, "Doloop: Inserting doloop pattern ("); if (desc->const_iter) fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC, desc->niter); else fputs ("runtime", dump_file); fputs (" iterations).\n", dump_file); } /* Discard original jump to continue loop. The original compare result may still be live, so it cannot be discarded explicitly. */ delete_insn (jump_insn); counter_reg = XEXP (condition, 0); if (GET_CODE (counter_reg) == PLUS) counter_reg = XEXP (counter_reg, 0); count = desc->niter_expr; increment_count = false; switch (GET_CODE (condition)) { case NE: /* Currently only NE tests against zero and one are supported. */ if (XEXP (condition, 1) == const1_rtx) { increment_count = true; noloop = const1_rtx; } else if (XEXP (condition, 1) == const0_rtx) noloop = const0_rtx; else abort (); break; case GE: /* Currently only GE tests against zero are supported. */ if (XEXP (condition, 1) != const0_rtx) abort (); noloop = constm1_rtx; /* The iteration count does not need incrementing for a GE test. */ increment_count = false; /* Determine if the iteration counter will be non-negative. Note that the maximum value loaded is iterations_max - 1. */ if (desc->niter_max <= ((unsigned HOST_WIDEST_INT) 1 << (GET_MODE_BITSIZE (GET_MODE (counter_reg)) - 1))) nonneg = 1; break; /* Abort if an invalid doloop pattern has been generated. */ default: abort (); } if (increment_count) count = simplify_gen_binary (PLUS, desc->mode, count, const1_rtx); /* Insert initialization of the count register into the loop header. */ start_sequence (); tmp = force_operand (count, counter_reg); convert_move (counter_reg, tmp, 1); sequence = get_insns (); end_sequence (); emit_insn_after (sequence, BB_END (loop_preheader_edge (loop)->src)); if (desc->noloop_assumptions) { rtx ass = desc->noloop_assumptions; basic_block preheader = loop_preheader_edge (loop)->src; basic_block set_zero = loop_split_edge_with (loop_preheader_edge (loop), NULL_RTX); basic_block new_preheader = loop_split_edge_with (loop_preheader_edge (loop), NULL_RTX); basic_block bb; edge te; gcov_type cnt; /* Expand the condition testing the assumptions and if it does not pass, reset the count register to 0. */ add_test (XEXP (ass, 0), preheader, set_zero); preheader->succ->flags &= ~EDGE_FALLTHRU; cnt = preheader->succ->count; preheader->succ->probability = 0; preheader->succ->count = 0; irr = preheader->succ->flags & EDGE_IRREDUCIBLE_LOOP; te = make_edge (preheader, new_preheader, EDGE_FALLTHRU | irr); te->probability = REG_BR_PROB_BASE; te->count = cnt; set_immediate_dominator (CDI_DOMINATORS, new_preheader, preheader); set_zero->count = 0; set_zero->frequency = 0; for (ass = XEXP (ass, 1); ass; ass = XEXP (ass, 1)) { bb = loop_split_edge_with (te, NULL_RTX); te = bb->succ; add_test (XEXP (ass, 0), bb, set_zero); make_edge (bb, set_zero, irr); } start_sequence (); convert_move (counter_reg, noloop, 0); sequence = get_insns (); end_sequence (); emit_insn_after (sequence, BB_END (set_zero)); } /* Some targets (eg, C4x) need to initialize special looping registers. */ #ifdef HAVE_doloop_begin { rtx init; unsigned level = get_loop_level (loop) + 1; init = gen_doloop_begin (counter_reg, desc->const_iter ? desc->niter_expr : const0_rtx, desc->niter_max, GEN_INT (level)); if (init) { start_sequence (); emit_insn (init); sequence = get_insns (); end_sequence (); emit_insn_after (sequence, BB_END (loop_preheader_edge (loop)->src)); } } #endif /* Insert the new low-overhead looping insn. */ emit_jump_insn_after (doloop_seq, BB_END (loop_end)); jump_insn = BB_END (loop_end); jump_label = block_label (desc->in_edge->dest); JUMP_LABEL (jump_insn) = jump_label; LABEL_NUSES (jump_label)++; /* Ensure the right fallthru edge is marked, for case we have reversed the condition. */ desc->in_edge->flags &= ~EDGE_FALLTHRU; desc->out_edge->flags |= EDGE_FALLTHRU; /* Add a REG_NONNEG note if the actual or estimated maximum number of iterations is non-negative. */ if (nonneg) { REG_NOTES (jump_insn) = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX, REG_NOTES (jump_insn)); } } /* Process loop described by LOOP validating that the loop is suitable for conversion to use a low overhead looping instruction, replacing the jump insn where suitable. Returns true if the loop was successfully modified. */ static bool doloop_optimize (struct loop *loop) { enum machine_mode mode; rtx doloop_seq, doloop_pat, doloop_reg; rtx iterations; rtx iterations_max; rtx start_label; rtx condition; unsigned level, est_niter; struct niter_desc *desc; if (dump_file) fprintf (dump_file, "Doloop: Processing loop %d.\n", loop->num); iv_analysis_loop_init (loop); /* Find the simple exit of a LOOP. */ desc = get_simple_loop_desc (loop); /* Check that loop is a candidate for a low-overhead looping insn. */ if (!doloop_valid_p (loop, desc)) { if (dump_file) fprintf (dump_file, "Doloop: The loop is not suitable.\n"); return false; } mode = desc->mode; est_niter = 3; if (desc->const_iter) est_niter = desc->niter; /* If the estimate on number of iterations is reliable (comes from profile feedback), use it. Do not use it normally, since the expected number of iterations of an unrolled loop is 2. */ if (loop->header->count) est_niter = expected_loop_iterations (loop); if (est_niter < 3) { if (dump_file) fprintf (dump_file, "Doloop: Too few iterations (%u) to be profitable.\n", est_niter); return false; } iterations = desc->const_iter ? desc->niter_expr : const0_rtx; iterations_max = GEN_INT (desc->niter_max); level = get_loop_level (loop) + 1; /* Generate looping insn. If the pattern FAILs then give up trying to modify the loop since there is some aspect the back-end does not like. */ start_label = block_label (desc->in_edge->dest); doloop_reg = gen_reg_rtx (mode); doloop_seq = gen_doloop_end (doloop_reg, iterations, iterations_max, GEN_INT (level), start_label); if (! doloop_seq && mode != word_mode) { PUT_MODE (doloop_reg, word_mode); doloop_seq = gen_doloop_end (doloop_reg, iterations, iterations_max, GEN_INT (level), start_label); } if (! doloop_seq) { if (dump_file) fprintf (dump_file, "Doloop: Target unwilling to use doloop pattern!\n"); return false; } /* If multiple instructions were created, the last must be the jump instruction. Also, a raw define_insn may yield a plain pattern. */ doloop_pat = doloop_seq; if (INSN_P (doloop_pat)) { while (NEXT_INSN (doloop_pat) != NULL_RTX) doloop_pat = NEXT_INSN (doloop_pat); if (GET_CODE (doloop_pat) == JUMP_INSN) doloop_pat = PATTERN (doloop_pat); else doloop_pat = NULL_RTX; } if (! doloop_pat || ! (condition = doloop_condition_get (doloop_pat))) { if (dump_file) fprintf (dump_file, "Doloop: Unrecognizable doloop pattern!\n"); return false; } doloop_modify (loop, desc, doloop_seq, condition); return true; } /* This is the main entry point. Process all LOOPS using doloop_optimize. */ void doloop_optimize_loops (struct loops *loops) { unsigned i; struct loop *loop; for (i = 1; i < loops->num; i++) { loop = loops->parray[i]; if (!loop) continue; doloop_optimize (loop); } iv_analysis_done (); #ifdef ENABLE_CHECKING verify_dominators (CDI_DOMINATORS); verify_loop_structure (loops); #endif } #endif /* HAVE_doloop_end */ /* Dwarf2 assembler output helper routines. Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Dwarf2 assembler output helper routines. Copyright (C) 2001, 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ extern void dw2_assemble_integer (int, rtx); extern void dw2_asm_output_data (int, unsigned HOST_WIDE_INT, const char *, ...) ATTRIBUTE_NULL_PRINTF_3; extern void dw2_asm_output_delta (int, const char *, const char *, const char *, ...) ATTRIBUTE_NULL_PRINTF_4; extern void dw2_asm_output_offset (int, const char *, const char *, ...) ATTRIBUTE_NULL_PRINTF_3; extern void dw2_asm_output_pcrel (int, const char *, const char *, ...) ATTRIBUTE_NULL_PRINTF_3; extern void dw2_asm_output_addr (int, const char *, const char *, ...) ATTRIBUTE_NULL_PRINTF_3; extern void dw2_asm_output_addr_rtx (int, rtx, const char *, ...) ATTRIBUTE_NULL_PRINTF_3; extern void dw2_asm_output_encoded_addr_rtx (int, rtx, const char *, ...) ATTRIBUTE_NULL_PRINTF_3; extern void dw2_asm_output_nstring (const char *, size_t, const char *, ...) ATTRIBUTE_NULL_PRINTF_3; extern void dw2_asm_output_data_uleb128 (unsigned HOST_WIDE_INT, const char *, ...) ATTRIBUTE_NULL_PRINTF_2; extern void dw2_asm_output_data_sleb128 (HOST_WIDE_INT, const char *, ...) ATTRIBUTE_NULL_PRINTF_2; extern void dw2_asm_output_delta_uleb128 (const char *, const char *, const char *, ...) ATTRIBUTE_NULL_PRINTF_3; extern void dw2_asm_output_delta_sleb128 (const char *, const char *, const char *, ...) ATTRIBUTE_NULL_PRINTF_3; extern int size_of_uleb128 (unsigned HOST_WIDE_INT); extern int size_of_sleb128 (HOST_WIDE_INT); extern int size_of_encoded_value (int); extern const char *eh_data_format_name (int); extern void dw2_output_indirect_constants (void); /* Declarations and definitions of codes relating to the DWARF2 symbolic debugging information format. Copyright (C) 1992, 1993, 1995, 1996, 1997, 1999, 2000, 2001, 2002 Free Software Foundation, Inc. Written by Gary Funck (gary@intrepid.com) The Ada Joint Program Office (AJPO), Florida State University and Silicon Graphics Inc. provided support for this effort -- June 21, 1995. Derived from the DWARF 1 implementation written by Ron Guilmette (rfg@netcom.com), November 1990. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is derived from the DWARF specification (a public document) Revision 2.0.0 (July 27, 1993) developed by the UNIX International Programming Languages Special Interest Group (UI/PLSIG) and distributed by UNIX International. Copies of this specification are available from UNIX International, 20 Waterview Boulevard, Parsippany, NJ, 07054. This file also now contains definitions from the DWARF 3 specification. */ /* This file is shared between GCC and GDB, and should not contain prototypes. */ #ifndef GCC_DWARF2_H #define GCC_DWARF2_H /* Tag names and codes. */ enum dwarf_tag { DW_TAG_padding = 0x00, DW_TAG_array_type = 0x01, DW_TAG_class_type = 0x02, DW_TAG_entry_point = 0x03, DW_TAG_enumeration_type = 0x04, DW_TAG_formal_parameter = 0x05, DW_TAG_imported_declaration = 0x08, DW_TAG_label = 0x0a, DW_TAG_lexical_block = 0x0b, DW_TAG_member = 0x0d, DW_TAG_pointer_type = 0x0f, DW_TAG_reference_type = 0x10, DW_TAG_compile_unit = 0x11, DW_TAG_string_type = 0x12, DW_TAG_structure_type = 0x13, DW_TAG_subroutine_type = 0x15, DW_TAG_typedef = 0x16, DW_TAG_union_type = 0x17, DW_TAG_unspecified_parameters = 0x18, DW_TAG_variant = 0x19, DW_TAG_common_block = 0x1a, DW_TAG_common_inclusion = 0x1b, DW_TAG_inheritance = 0x1c, DW_TAG_inlined_subroutine = 0x1d, DW_TAG_module = 0x1e, DW_TAG_ptr_to_member_type = 0x1f, DW_TAG_set_type = 0x20, DW_TAG_subrange_type = 0x21, DW_TAG_with_stmt = 0x22, DW_TAG_access_declaration = 0x23, DW_TAG_base_type = 0x24, DW_TAG_catch_block = 0x25, DW_TAG_const_type = 0x26, DW_TAG_constant = 0x27, DW_TAG_enumerator = 0x28, DW_TAG_file_type = 0x29, DW_TAG_friend = 0x2a, DW_TAG_namelist = 0x2b, DW_TAG_namelist_item = 0x2c, DW_TAG_packed_type = 0x2d, DW_TAG_subprogram = 0x2e, DW_TAG_template_type_param = 0x2f, DW_TAG_template_value_param = 0x30, DW_TAG_thrown_type = 0x31, DW_TAG_try_block = 0x32, DW_TAG_variant_part = 0x33, DW_TAG_variable = 0x34, DW_TAG_volatile_type = 0x35, /* DWARF 3. */ DW_TAG_dwarf_procedure = 0x36, DW_TAG_restrict_type = 0x37, DW_TAG_interface_type = 0x38, DW_TAG_namespace = 0x39, DW_TAG_imported_module = 0x3a, DW_TAG_unspecified_type = 0x3b, DW_TAG_partial_unit = 0x3c, DW_TAG_imported_unit = 0x3d, /* SGI/MIPS Extensions. */ DW_TAG_MIPS_loop = 0x4081, /* GNU extensions. */ DW_TAG_format_label = 0x4101, /* For FORTRAN 77 and Fortran 90. */ DW_TAG_function_template = 0x4102, /* For C++. */ DW_TAG_class_template = 0x4103, /* For C++. */ DW_TAG_GNU_BINCL = 0x4104, DW_TAG_GNU_EINCL = 0x4105 }; #define DW_TAG_lo_user 0x4080 #define DW_TAG_hi_user 0xffff /* Flag that tells whether entry has a child or not. */ #define DW_children_no 0 #define DW_children_yes 1 /* Form names and codes. */ enum dwarf_form { DW_FORM_addr = 0x01, DW_FORM_block2 = 0x03, DW_FORM_block4 = 0x04, DW_FORM_data2 = 0x05, DW_FORM_data4 = 0x06, DW_FORM_data8 = 0x07, DW_FORM_string = 0x08, DW_FORM_block = 0x09, DW_FORM_block1 = 0x0a, DW_FORM_data1 = 0x0b, DW_FORM_flag = 0x0c, DW_FORM_sdata = 0x0d, DW_FORM_strp = 0x0e, DW_FORM_udata = 0x0f, DW_FORM_ref_addr = 0x10, DW_FORM_ref1 = 0x11, DW_FORM_ref2 = 0x12, DW_FORM_ref4 = 0x13, DW_FORM_ref8 = 0x14, DW_FORM_ref_udata = 0x15, DW_FORM_indirect = 0x16 }; /* Attribute names and codes. */ enum dwarf_attribute { DW_AT_sibling = 0x01, DW_AT_location = 0x02, DW_AT_name = 0x03, DW_AT_ordering = 0x09, DW_AT_subscr_data = 0x0a, DW_AT_byte_size = 0x0b, DW_AT_bit_offset = 0x0c, DW_AT_bit_size = 0x0d, DW_AT_element_list = 0x0f, DW_AT_stmt_list = 0x10, DW_AT_low_pc = 0x11, DW_AT_high_pc = 0x12, DW_AT_language = 0x13, DW_AT_member = 0x14, DW_AT_discr = 0x15, DW_AT_discr_value = 0x16, DW_AT_visibility = 0x17, DW_AT_import = 0x18, DW_AT_string_length = 0x19, DW_AT_common_reference = 0x1a, DW_AT_comp_dir = 0x1b, DW_AT_const_value = 0x1c, DW_AT_containing_type = 0x1d, DW_AT_default_value = 0x1e, DW_AT_inline = 0x20, DW_AT_is_optional = 0x21, DW_AT_lower_bound = 0x22, DW_AT_producer = 0x25, DW_AT_prototyped = 0x27, DW_AT_return_addr = 0x2a, DW_AT_start_scope = 0x2c, DW_AT_stride_size = 0x2e, DW_AT_upper_bound = 0x2f, DW_AT_abstract_origin = 0x31, DW_AT_accessibility = 0x32, DW_AT_address_class = 0x33, DW_AT_artificial = 0x34, DW_AT_base_types = 0x35, DW_AT_calling_convention = 0x36, DW_AT_count = 0x37, DW_AT_data_member_location = 0x38, DW_AT_decl_column = 0x39, DW_AT_decl_file = 0x3a, DW_AT_decl_line = 0x3b, DW_AT_declaration = 0x3c, DW_AT_discr_list = 0x3d, DW_AT_encoding = 0x3e, DW_AT_external = 0x3f, DW_AT_frame_base = 0x40, DW_AT_friend = 0x41, DW_AT_identifier_case = 0x42, DW_AT_macro_info = 0x43, DW_AT_namelist_items = 0x44, DW_AT_priority = 0x45, DW_AT_segment = 0x46, DW_AT_specification = 0x47, DW_AT_static_link = 0x48, DW_AT_type = 0x49, DW_AT_use_location = 0x4a, DW_AT_variable_parameter = 0x4b, DW_AT_virtuality = 0x4c, DW_AT_vtable_elem_location = 0x4d, /* DWARF 3 values. */ DW_AT_allocated = 0x4e, DW_AT_associated = 0x4f, DW_AT_data_location = 0x50, DW_AT_stride = 0x51, DW_AT_entry_pc = 0x52, DW_AT_use_UTF8 = 0x53, DW_AT_extension = 0x54, DW_AT_ranges = 0x55, DW_AT_trampoline = 0x56, DW_AT_call_column = 0x57, DW_AT_call_file = 0x58, DW_AT_call_line = 0x59, /* SGI/MIPS Extensions. */ DW_AT_MIPS_fde = 0x2001, DW_AT_MIPS_loop_begin = 0x2002, DW_AT_MIPS_tail_loop_begin = 0x2003, DW_AT_MIPS_epilog_begin = 0x2004, DW_AT_MIPS_loop_unroll_factor = 0x2005, DW_AT_MIPS_software_pipeline_depth = 0x2006, DW_AT_MIPS_linkage_name = 0x2007, DW_AT_MIPS_stride = 0x2008, DW_AT_MIPS_abstract_name = 0x2009, DW_AT_MIPS_clone_origin = 0x200a, DW_AT_MIPS_has_inlines = 0x200b, /* GNU extensions. */ DW_AT_sf_names = 0x2101, DW_AT_src_info = 0x2102, DW_AT_mac_info = 0x2103, DW_AT_src_coords = 0x2104, DW_AT_body_begin = 0x2105, DW_AT_body_end = 0x2106, DW_AT_GNU_vector = 0x2107, /* VMS Extensions. */ DW_AT_VMS_rtnbeg_pd_address = 0x2201 }; #define DW_AT_lo_user 0x2000 /* Implementation-defined range start. */ #define DW_AT_hi_user 0x3ff0 /* Implementation-defined range end. */ /* Location atom names and codes. */ enum dwarf_location_atom { DW_OP_addr = 0x03, DW_OP_deref = 0x06, DW_OP_const1u = 0x08, DW_OP_const1s = 0x09, DW_OP_const2u = 0x0a, DW_OP_const2s = 0x0b, DW_OP_const4u = 0x0c, DW_OP_const4s = 0x0d, DW_OP_const8u = 0x0e, DW_OP_const8s = 0x0f, DW_OP_constu = 0x10, DW_OP_consts = 0x11, DW_OP_dup = 0x12, DW_OP_drop = 0x13, DW_OP_over = 0x14, DW_OP_pick = 0x15, DW_OP_swap = 0x16, DW_OP_rot = 0x17, DW_OP_xderef = 0x18, DW_OP_abs = 0x19, DW_OP_and = 0x1a, DW_OP_div = 0x1b, DW_OP_minus = 0x1c, DW_OP_mod = 0x1d, DW_OP_mul = 0x1e, DW_OP_neg = 0x1f, DW_OP_not = 0x20, DW_OP_or = 0x21, DW_OP_plus = 0x22, DW_OP_plus_uconst = 0x23, DW_OP_shl = 0x24, DW_OP_shr = 0x25, DW_OP_shra = 0x26, DW_OP_xor = 0x27, DW_OP_bra = 0x28, DW_OP_eq = 0x29, DW_OP_ge = 0x2a, DW_OP_gt = 0x2b, DW_OP_le = 0x2c, DW_OP_lt = 0x2d, DW_OP_ne = 0x2e, DW_OP_skip = 0x2f, DW_OP_lit0 = 0x30, DW_OP_lit1 = 0x31, DW_OP_lit2 = 0x32, DW_OP_lit3 = 0x33, DW_OP_lit4 = 0x34, DW_OP_lit5 = 0x35, DW_OP_lit6 = 0x36, DW_OP_lit7 = 0x37, DW_OP_lit8 = 0x38, DW_OP_lit9 = 0x39, DW_OP_lit10 = 0x3a, DW_OP_lit11 = 0x3b, DW_OP_lit12 = 0x3c, DW_OP_lit13 = 0x3d, DW_OP_lit14 = 0x3e, DW_OP_lit15 = 0x3f, DW_OP_lit16 = 0x40, DW_OP_lit17 = 0x41, DW_OP_lit18 = 0x42, DW_OP_lit19 = 0x43, DW_OP_lit20 = 0x44, DW_OP_lit21 = 0x45, DW_OP_lit22 = 0x46, DW_OP_lit23 = 0x47, DW_OP_lit24 = 0x48, DW_OP_lit25 = 0x49, DW_OP_lit26 = 0x4a, DW_OP_lit27 = 0x4b, DW_OP_lit28 = 0x4c, DW_OP_lit29 = 0x4d, DW_OP_lit30 = 0x4e, DW_OP_lit31 = 0x4f, DW_OP_reg0 = 0x50, DW_OP_reg1 = 0x51, DW_OP_reg2 = 0x52, DW_OP_reg3 = 0x53, DW_OP_reg4 = 0x54, DW_OP_reg5 = 0x55, DW_OP_reg6 = 0x56, DW_OP_reg7 = 0x57, DW_OP_reg8 = 0x58, DW_OP_reg9 = 0x59, DW_OP_reg10 = 0x5a, DW_OP_reg11 = 0x5b, DW_OP_reg12 = 0x5c, DW_OP_reg13 = 0x5d, DW_OP_reg14 = 0x5e, DW_OP_reg15 = 0x5f, DW_OP_reg16 = 0x60, DW_OP_reg17 = 0x61, DW_OP_reg18 = 0x62, DW_OP_reg19 = 0x63, DW_OP_reg20 = 0x64, DW_OP_reg21 = 0x65, DW_OP_reg22 = 0x66, DW_OP_reg23 = 0x67, DW_OP_reg24 = 0x68, DW_OP_reg25 = 0x69, DW_OP_reg26 = 0x6a, DW_OP_reg27 = 0x6b, DW_OP_reg28 = 0x6c, DW_OP_reg29 = 0x6d, DW_OP_reg30 = 0x6e, DW_OP_reg31 = 0x6f, DW_OP_breg0 = 0x70, DW_OP_breg1 = 0x71, DW_OP_breg2 = 0x72, DW_OP_breg3 = 0x73, DW_OP_breg4 = 0x74, DW_OP_breg5 = 0x75, DW_OP_breg6 = 0x76, DW_OP_breg7 = 0x77, DW_OP_breg8 = 0x78, DW_OP_breg9 = 0x79, DW_OP_breg10 = 0x7a, DW_OP_breg11 = 0x7b, DW_OP_breg12 = 0x7c, DW_OP_breg13 = 0x7d, DW_OP_breg14 = 0x7e, DW_OP_breg15 = 0x7f, DW_OP_breg16 = 0x80, DW_OP_breg17 = 0x81, DW_OP_breg18 = 0x82, DW_OP_breg19 = 0x83, DW_OP_breg20 = 0x84, DW_OP_breg21 = 0x85, DW_OP_breg22 = 0x86, DW_OP_breg23 = 0x87, DW_OP_breg24 = 0x88, DW_OP_breg25 = 0x89, DW_OP_breg26 = 0x8a, DW_OP_breg27 = 0x8b, DW_OP_breg28 = 0x8c, DW_OP_breg29 = 0x8d, DW_OP_breg30 = 0x8e, DW_OP_breg31 = 0x8f, DW_OP_regx = 0x90, DW_OP_fbreg = 0x91, DW_OP_bregx = 0x92, DW_OP_piece = 0x93, DW_OP_deref_size = 0x94, DW_OP_xderef_size = 0x95, DW_OP_nop = 0x96, /* DWARF 3 extensions. */ DW_OP_push_object_address = 0x97, DW_OP_call2 = 0x98, DW_OP_call4 = 0x99, DW_OP_call_ref = 0x9a, /* GNU extensions. */ DW_OP_GNU_push_tls_address = 0xe0 }; #define DW_OP_lo_user 0xe0 /* Implementation-defined range start. */ #define DW_OP_hi_user 0xff /* Implementation-defined range end. */ /* Type encodings. */ enum dwarf_type { DW_ATE_void = 0x0, DW_ATE_address = 0x1, DW_ATE_boolean = 0x2, DW_ATE_complex_float = 0x3, DW_ATE_float = 0x4, DW_ATE_signed = 0x5, DW_ATE_signed_char = 0x6, DW_ATE_unsigned = 0x7, DW_ATE_unsigned_char = 0x8, /* DWARF 3. */ DW_ATE_imaginary_float = 0x9 }; #define DW_ATE_lo_user 0x80 #define DW_ATE_hi_user 0xff /* Array ordering names and codes. */ enum dwarf_array_dim_ordering { DW_ORD_row_major = 0, DW_ORD_col_major = 1 }; /* Access attribute. */ enum dwarf_access_attribute { DW_ACCESS_public = 1, DW_ACCESS_protected = 2, DW_ACCESS_private = 3 }; /* Visibility. */ enum dwarf_visibility_attribute { DW_VIS_local = 1, DW_VIS_exported = 2, DW_VIS_qualified = 3 }; /* Virtuality. */ enum dwarf_virtuality_attribute { DW_VIRTUALITY_none = 0, DW_VIRTUALITY_virtual = 1, DW_VIRTUALITY_pure_virtual = 2 }; /* Case sensitivity. */ enum dwarf_id_case { DW_ID_case_sensitive = 0, DW_ID_up_case = 1, DW_ID_down_case = 2, DW_ID_case_insensitive = 3 }; /* Calling convention. */ enum dwarf_calling_convention { DW_CC_normal = 0x1, DW_CC_program = 0x2, DW_CC_nocall = 0x3 }; #define DW_CC_lo_user 0x40 #define DW_CC_hi_user 0xff /* Inline attribute. */ enum dwarf_inline_attribute { DW_INL_not_inlined = 0, DW_INL_inlined = 1, DW_INL_declared_not_inlined = 2, DW_INL_declared_inlined = 3 }; /* Discriminant lists. */ enum dwarf_discrim_list { DW_DSC_label = 0, DW_DSC_range = 1 }; /* Line number opcodes. */ enum dwarf_line_number_ops { DW_LNS_extended_op = 0, DW_LNS_copy = 1, DW_LNS_advance_pc = 2, DW_LNS_advance_line = 3, DW_LNS_set_file = 4, DW_LNS_set_column = 5, DW_LNS_negate_stmt = 6, DW_LNS_set_basic_block = 7, DW_LNS_const_add_pc = 8, DW_LNS_fixed_advance_pc = 9, /* DWARF 3. */ DW_LNS_set_prologue_end = 10, DW_LNS_set_epilogue_begin = 11, DW_LNS_set_isa = 12 }; /* Line number extended opcodes. */ enum dwarf_line_number_x_ops { DW_LNE_end_sequence = 1, DW_LNE_set_address = 2, DW_LNE_define_file = 3 }; /* Call frame information. */ enum dwarf_call_frame_info { DW_CFA_advance_loc = 0x40, DW_CFA_offset = 0x80, DW_CFA_restore = 0xc0, DW_CFA_nop = 0x00, DW_CFA_set_loc = 0x01, DW_CFA_advance_loc1 = 0x02, DW_CFA_advance_loc2 = 0x03, DW_CFA_advance_loc4 = 0x04, DW_CFA_offset_extended = 0x05, DW_CFA_restore_extended = 0x06, DW_CFA_undefined = 0x07, DW_CFA_same_value = 0x08, DW_CFA_register = 0x09, DW_CFA_remember_state = 0x0a, DW_CFA_restore_state = 0x0b, DW_CFA_def_cfa = 0x0c, DW_CFA_def_cfa_register = 0x0d, DW_CFA_def_cfa_offset = 0x0e, /* DWARF 3. */ DW_CFA_def_cfa_expression = 0x0f, DW_CFA_expression = 0x10, DW_CFA_offset_extended_sf = 0x11, DW_CFA_def_cfa_sf = 0x12, DW_CFA_def_cfa_offset_sf = 0x13, /* SGI/MIPS specific. */ DW_CFA_MIPS_advance_loc8 = 0x1d, /* GNU extensions. */ DW_CFA_GNU_window_save = 0x2d, DW_CFA_GNU_args_size = 0x2e, DW_CFA_GNU_negative_offset_extended = 0x2f }; #define DW_CIE_ID 0xffffffff #define DW_CIE_VERSION 1 #define DW_CFA_extended 0 #define DW_CFA_low_user 0x1c #define DW_CFA_high_user 0x3f #define DW_CHILDREN_no 0x00 #define DW_CHILDREN_yes 0x01 #define DW_ADDR_none 0 /* Source language names and codes. */ enum dwarf_source_language { DW_LANG_C89 = 0x0001, DW_LANG_C = 0x0002, DW_LANG_Ada83 = 0x0003, DW_LANG_C_plus_plus = 0x0004, DW_LANG_Cobol74 = 0x0005, DW_LANG_Cobol85 = 0x0006, DW_LANG_Fortran77 = 0x0007, DW_LANG_Fortran90 = 0x0008, DW_LANG_Pascal83 = 0x0009, DW_LANG_Modula2 = 0x000a, DW_LANG_Java = 0x000b, /* DWARF 3. */ DW_LANG_C99 = 0x000c, DW_LANG_Ada95 = 0x000d, DW_LANG_Fortran95 = 0x000e, /* MIPS. */ DW_LANG_Mips_Assembler = 0x8001 }; #define DW_LANG_lo_user 0x8000 /* Implementation-defined range start. */ #define DW_LANG_hi_user 0xffff /* Implementation-defined range start. */ /* Names and codes for macro information. */ enum dwarf_macinfo_record_type { DW_MACINFO_define = 1, DW_MACINFO_undef = 2, DW_MACINFO_start_file = 3, DW_MACINFO_end_file = 4, DW_MACINFO_vendor_ext = 255 }; /* @@@ For use with GNU frame unwind information. */ #define DW_EH_PE_absptr 0x00 #define DW_EH_PE_omit 0xff #define DW_EH_PE_uleb128 0x01 #define DW_EH_PE_udata2 0x02 #define DW_EH_PE_udata4 0x03 #define DW_EH_PE_udata8 0x04 #define DW_EH_PE_sleb128 0x09 #define DW_EH_PE_sdata2 0x0A #define DW_EH_PE_sdata4 0x0B #define DW_EH_PE_sdata8 0x0C #define DW_EH_PE_signed 0x08 #define DW_EH_PE_pcrel 0x10 #define DW_EH_PE_textrel 0x20 #define DW_EH_PE_datarel 0x30 #define DW_EH_PE_funcrel 0x40 #define DW_EH_PE_aligned 0x50 #define DW_EH_PE_indirect 0x80 #endif /* dwarf2.h */ /* How to start an assembler comment. */ #ifndef ASM_COMMENT_START #define ASM_COMMENT_START ";#" #endif /* Output an unaligned integer with the given value and size. Prefer not to print a newline, since the caller may want to add a comment. */ void dw2_assemble_integer (int size, rtx x) { const char *op = integer_asm_op (size, FALSE); if (op) { fputs (op, asm_out_file); if (GET_CODE (x) == CONST_INT) fprintf (asm_out_file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x)); else output_addr_const (asm_out_file, x); } else assemble_integer (x, size, BITS_PER_UNIT, 1); } /* Output an immediate constant in a given size. */ void dw2_asm_output_data (int size, unsigned HOST_WIDE_INT value, const char *comment, ...) { va_list ap; va_start (ap, comment); if (size * 8 < HOST_BITS_PER_WIDE_INT) value &= ~(~(unsigned HOST_WIDE_INT) 0 << (size * 8)); dw2_assemble_integer (size, GEN_INT (value)); if (flag_debug_asm && comment) { fprintf (asm_out_file, "\t%s ", ASM_COMMENT_START); vfprintf (asm_out_file, comment, ap); } fputc ('\n', asm_out_file); va_end (ap); } /* Output the difference between two symbols in a given size. */ /* ??? There appear to be assemblers that do not like such subtraction, but do support ASM_SET_OP. It's unfortunately impossible to do here, since the ASM_SET_OP for the difference symbol must appear after both symbols are defined. */ void dw2_asm_output_delta (int size, const char *lab1, const char *lab2, const char *comment, ...) { va_list ap; va_start (ap, comment); #ifdef ASM_OUTPUT_DWARF_DELTA ASM_OUTPUT_DWARF_DELTA (asm_out_file, size, lab1, lab2); #else dw2_assemble_integer (size, gen_rtx_MINUS (Pmode, gen_rtx_SYMBOL_REF (Pmode, lab1), gen_rtx_SYMBOL_REF (Pmode, lab2))); #endif if (flag_debug_asm && comment) { fprintf (asm_out_file, "\t%s ", ASM_COMMENT_START); vfprintf (asm_out_file, comment, ap); } fputc ('\n', asm_out_file); va_end (ap); } /* Output a section-relative reference to a label. In general this can only be done for debugging symbols. E.g. on most targets with the GNU linker, this is accomplished with a direct reference and the knowledge that the debugging section will be placed at VMA 0. Some targets have special relocations for this that we must use. */ void dw2_asm_output_offset (int size, const char *label, const char *comment, ...) { va_list ap; va_start (ap, comment); #ifdef ASM_OUTPUT_DWARF_OFFSET ASM_OUTPUT_DWARF_OFFSET (asm_out_file, size, label); #else dw2_assemble_integer (size, gen_rtx_SYMBOL_REF (Pmode, label)); #endif if (flag_debug_asm && comment) { fprintf (asm_out_file, "\t%s ", ASM_COMMENT_START); vfprintf (asm_out_file, comment, ap); } fputc ('\n', asm_out_file); va_end (ap); } /* Output a self-relative reference to a label, possibly in a different section or object file. */ void dw2_asm_output_pcrel (int size ATTRIBUTE_UNUSED, const char *label ATTRIBUTE_UNUSED, const char *comment, ...) { va_list ap; va_start (ap, comment); #ifdef ASM_OUTPUT_DWARF_PCREL ASM_OUTPUT_DWARF_PCREL (asm_out_file, size, label); #else dw2_assemble_integer (size, gen_rtx_MINUS (Pmode, gen_rtx_SYMBOL_REF (Pmode, label), pc_rtx)); #endif if (flag_debug_asm && comment) { fprintf (asm_out_file, "\t%s ", ASM_COMMENT_START); vfprintf (asm_out_file, comment, ap); } fputc ('\n', asm_out_file); va_end (ap); } /* Output an absolute reference to a label. */ void dw2_asm_output_addr (int size, const char *label, const char *comment, ...) { va_list ap; va_start (ap, comment); dw2_assemble_integer (size, gen_rtx_SYMBOL_REF (Pmode, label)); if (flag_debug_asm && comment) { fprintf (asm_out_file, "\t%s ", ASM_COMMENT_START); vfprintf (asm_out_file, comment, ap); } fputc ('\n', asm_out_file); va_end (ap); } /* Similar, but use an RTX expression instead of a text label. */ void dw2_asm_output_addr_rtx (int size, rtx addr, const char *comment, ...) { va_list ap; va_start (ap, comment); dw2_assemble_integer (size, addr); if (flag_debug_asm && comment) { fprintf (asm_out_file, "\t%s ", ASM_COMMENT_START); vfprintf (asm_out_file, comment, ap); } fputc ('\n', asm_out_file); va_end (ap); } /* Output the first ORIG_LEN characters of STR as a string. If ORIG_LEN is equal to -1, ignore this parameter and output the entire STR instead. If COMMENT is not NULL and comments in the debug information have been requested by the user, append the given COMMENT to the generated output. */ void dw2_asm_output_nstring (const char *str, size_t orig_len, const char *comment, ...) { size_t i, len; va_list ap; va_start (ap, comment); len = orig_len; if (len == (size_t) -1) len = strlen (str); if (flag_debug_asm && comment) { fputs ("\t.ascii \"", asm_out_file); for (i = 0; i < len; i++) { int c = str[i]; if (c == '\"' || c == '\\') fputc ('\\', asm_out_file); if (ISPRINT(c)) fputc (c, asm_out_file); else fprintf (asm_out_file, "\\%o", c); } fprintf (asm_out_file, "\\0\"\t%s ", ASM_COMMENT_START); vfprintf (asm_out_file, comment, ap); fputc ('\n', asm_out_file); } else { /* If an explicit length was given, we can't assume there is a null termination in the string buffer. */ if (orig_len == (size_t) -1) len += 1; ASM_OUTPUT_ASCII (asm_out_file, str, len); if (orig_len != (size_t) -1) assemble_integer (const0_rtx, 1, BITS_PER_UNIT, 1); } va_end (ap); } /* Return the size of an unsigned LEB128 quantity. */ int size_of_uleb128 (unsigned HOST_WIDE_INT value) { int size = 0; do { value >>= 7; size += 1; } while (value != 0); return size; } /* Return the size of a signed LEB128 quantity. */ int size_of_sleb128 (HOST_WIDE_INT value) { int size = 0, byte; do { byte = (value & 0x7f); value >>= 7; size += 1; } while (!((value == 0 && (byte & 0x40) == 0) || (value == -1 && (byte & 0x40) != 0))); return size; } /* Given an encoding, return the number of bytes the format occupies. This is only defined for fixed-size encodings, and so does not include leb128. */ int size_of_encoded_value (int encoding) { if (encoding == DW_EH_PE_omit) return 0; switch (encoding & 0x07) { case DW_EH_PE_absptr: return POINTER_SIZE / BITS_PER_UNIT; case DW_EH_PE_udata2: return 2; case DW_EH_PE_udata4: return 4; case DW_EH_PE_udata8: return 8; } abort (); } /* Yield a name for a given pointer encoding. */ const char * eh_data_format_name (int format) { #if HAVE_DESIGNATED_INITIALIZERS #define S(p, v) [p] = v, #else #define S(p, v) case p: return v; #endif #if HAVE_DESIGNATED_INITIALIZERS __extension__ static const char * const format_names[256] = { #else switch (format) { #endif S(DW_EH_PE_absptr, "absolute") S(DW_EH_PE_omit, "omit") S(DW_EH_PE_aligned, "aligned absolute") S(DW_EH_PE_uleb128, "uleb128") S(DW_EH_PE_udata2, "udata2") S(DW_EH_PE_udata4, "udata4") S(DW_EH_PE_udata8, "udata8") S(DW_EH_PE_sleb128, "sleb128") S(DW_EH_PE_sdata2, "sdata2") S(DW_EH_PE_sdata4, "sdata4") S(DW_EH_PE_sdata8, "sdata8") S(DW_EH_PE_absptr | DW_EH_PE_pcrel, "pcrel") S(DW_EH_PE_uleb128 | DW_EH_PE_pcrel, "pcrel uleb128") S(DW_EH_PE_udata2 | DW_EH_PE_pcrel, "pcrel udata2") S(DW_EH_PE_udata4 | DW_EH_PE_pcrel, "pcrel udata4") S(DW_EH_PE_udata8 | DW_EH_PE_pcrel, "pcrel udata8") S(DW_EH_PE_sleb128 | DW_EH_PE_pcrel, "pcrel sleb128") S(DW_EH_PE_sdata2 | DW_EH_PE_pcrel, "pcrel sdata2") S(DW_EH_PE_sdata4 | DW_EH_PE_pcrel, "pcrel sdata4") S(DW_EH_PE_sdata8 | DW_EH_PE_pcrel, "pcrel sdata8") S(DW_EH_PE_absptr | DW_EH_PE_textrel, "textrel") S(DW_EH_PE_uleb128 | DW_EH_PE_textrel, "textrel uleb128") S(DW_EH_PE_udata2 | DW_EH_PE_textrel, "textrel udata2") S(DW_EH_PE_udata4 | DW_EH_PE_textrel, "textrel udata4") S(DW_EH_PE_udata8 | DW_EH_PE_textrel, "textrel udata8") S(DW_EH_PE_sleb128 | DW_EH_PE_textrel, "textrel sleb128") S(DW_EH_PE_sdata2 | DW_EH_PE_textrel, "textrel sdata2") S(DW_EH_PE_sdata4 | DW_EH_PE_textrel, "textrel sdata4") S(DW_EH_PE_sdata8 | DW_EH_PE_textrel, "textrel sdata8") S(DW_EH_PE_absptr | DW_EH_PE_datarel, "datarel") S(DW_EH_PE_uleb128 | DW_EH_PE_datarel, "datarel uleb128") S(DW_EH_PE_udata2 | DW_EH_PE_datarel, "datarel udata2") S(DW_EH_PE_udata4 | DW_EH_PE_datarel, "datarel udata4") S(DW_EH_PE_udata8 | DW_EH_PE_datarel, "datarel udata8") S(DW_EH_PE_sleb128 | DW_EH_PE_datarel, "datarel sleb128") S(DW_EH_PE_sdata2 | DW_EH_PE_datarel, "datarel sdata2") S(DW_EH_PE_sdata4 | DW_EH_PE_datarel, "datarel sdata4") S(DW_EH_PE_sdata8 | DW_EH_PE_datarel, "datarel sdata8") S(DW_EH_PE_absptr | DW_EH_PE_funcrel, "funcrel") S(DW_EH_PE_uleb128 | DW_EH_PE_funcrel, "funcrel uleb128") S(DW_EH_PE_udata2 | DW_EH_PE_funcrel, "funcrel udata2") S(DW_EH_PE_udata4 | DW_EH_PE_funcrel, "funcrel udata4") S(DW_EH_PE_udata8 | DW_EH_PE_funcrel, "funcrel udata8") S(DW_EH_PE_sleb128 | DW_EH_PE_funcrel, "funcrel sleb128") S(DW_EH_PE_sdata2 | DW_EH_PE_funcrel, "funcrel sdata2") S(DW_EH_PE_sdata4 | DW_EH_PE_funcrel, "funcrel sdata4") S(DW_EH_PE_sdata8 | DW_EH_PE_funcrel, "funcrel sdata8") S(DW_EH_PE_indirect | DW_EH_PE_absptr | DW_EH_PE_pcrel, "indirect pcrel") S(DW_EH_PE_indirect | DW_EH_PE_uleb128 | DW_EH_PE_pcrel, "indirect pcrel uleb128") S(DW_EH_PE_indirect | DW_EH_PE_udata2 | DW_EH_PE_pcrel, "indirect pcrel udata2") S(DW_EH_PE_indirect | DW_EH_PE_udata4 | DW_EH_PE_pcrel, "indirect pcrel udata4") S(DW_EH_PE_indirect | DW_EH_PE_udata8 | DW_EH_PE_pcrel, "indirect pcrel udata8") S(DW_EH_PE_indirect | DW_EH_PE_sleb128 | DW_EH_PE_pcrel, "indirect pcrel sleb128") S(DW_EH_PE_indirect | DW_EH_PE_sdata2 | DW_EH_PE_pcrel, "indirect pcrel sdata2") S(DW_EH_PE_indirect | DW_EH_PE_sdata4 | DW_EH_PE_pcrel, "indirect pcrel sdata4") S(DW_EH_PE_indirect | DW_EH_PE_sdata8 | DW_EH_PE_pcrel, "indirect pcrel sdata8") S(DW_EH_PE_indirect | DW_EH_PE_absptr | DW_EH_PE_textrel, "indirect textrel") S(DW_EH_PE_indirect | DW_EH_PE_uleb128 | DW_EH_PE_textrel, "indirect textrel uleb128") S(DW_EH_PE_indirect | DW_EH_PE_udata2 | DW_EH_PE_textrel, "indirect textrel udata2") S(DW_EH_PE_indirect | DW_EH_PE_udata4 | DW_EH_PE_textrel, "indirect textrel udata4") S(DW_EH_PE_indirect | DW_EH_PE_udata8 | DW_EH_PE_textrel, "indirect textrel udata8") S(DW_EH_PE_indirect | DW_EH_PE_sleb128 | DW_EH_PE_textrel, "indirect textrel sleb128") S(DW_EH_PE_indirect | DW_EH_PE_sdata2 | DW_EH_PE_textrel, "indirect textrel sdata2") S(DW_EH_PE_indirect | DW_EH_PE_sdata4 | DW_EH_PE_textrel, "indirect textrel sdata4") S(DW_EH_PE_indirect | DW_EH_PE_sdata8 | DW_EH_PE_textrel, "indirect textrel sdata8") S(DW_EH_PE_indirect | DW_EH_PE_absptr | DW_EH_PE_datarel, "indirect datarel") S(DW_EH_PE_indirect | DW_EH_PE_uleb128 | DW_EH_PE_datarel, "indirect datarel uleb128") S(DW_EH_PE_indirect | DW_EH_PE_udata2 | DW_EH_PE_datarel, "indirect datarel udata2") S(DW_EH_PE_indirect | DW_EH_PE_udata4 | DW_EH_PE_datarel, "indirect datarel udata4") S(DW_EH_PE_indirect | DW_EH_PE_udata8 | DW_EH_PE_datarel, "indirect datarel udata8") S(DW_EH_PE_indirect | DW_EH_PE_sleb128 | DW_EH_PE_datarel, "indirect datarel sleb128") S(DW_EH_PE_indirect | DW_EH_PE_sdata2 | DW_EH_PE_datarel, "indirect datarel sdata2") S(DW_EH_PE_indirect | DW_EH_PE_sdata4 | DW_EH_PE_datarel, "indirect datarel sdata4") S(DW_EH_PE_indirect | DW_EH_PE_sdata8 | DW_EH_PE_datarel, "indirect datarel sdata8") S(DW_EH_PE_indirect | DW_EH_PE_absptr | DW_EH_PE_funcrel, "indirect funcrel") S(DW_EH_PE_indirect | DW_EH_PE_uleb128 | DW_EH_PE_funcrel, "indirect funcrel uleb128") S(DW_EH_PE_indirect | DW_EH_PE_udata2 | DW_EH_PE_funcrel, "indirect funcrel udata2") S(DW_EH_PE_indirect | DW_EH_PE_udata4 | DW_EH_PE_funcrel, "indirect funcrel udata4") S(DW_EH_PE_indirect | DW_EH_PE_udata8 | DW_EH_PE_funcrel, "indirect funcrel udata8") S(DW_EH_PE_indirect | DW_EH_PE_sleb128 | DW_EH_PE_funcrel, "indirect funcrel sleb128") S(DW_EH_PE_indirect | DW_EH_PE_sdata2 | DW_EH_PE_funcrel, "indirect funcrel sdata2") S(DW_EH_PE_indirect | DW_EH_PE_sdata4 | DW_EH_PE_funcrel, "indirect funcrel sdata4") S(DW_EH_PE_indirect | DW_EH_PE_sdata8 | DW_EH_PE_funcrel, "indirect funcrel sdata8") #if HAVE_DESIGNATED_INITIALIZERS }; if (format < 0 || format > 0xff || format_names[format] == NULL) abort (); return format_names[format]; #else } abort (); #endif } /* Output an unsigned LEB128 quantity. */ void dw2_asm_output_data_uleb128 (unsigned HOST_WIDE_INT value, const char *comment, ...) { va_list ap; va_start (ap, comment); #ifdef HAVE_AS_LEB128 fprintf (asm_out_file, "\t.uleb128 " HOST_WIDE_INT_PRINT_HEX , value); if (flag_debug_asm && comment) { fprintf (asm_out_file, "\t%s ", ASM_COMMENT_START); vfprintf (asm_out_file, comment, ap); } #else { unsigned HOST_WIDE_INT work = value; const char *byte_op = targetm.asm_out.byte_op; if (byte_op) fputs (byte_op, asm_out_file); do { int byte = (work & 0x7f); work >>= 7; if (work != 0) /* More bytes to follow. */ byte |= 0x80; if (byte_op) { fprintf (asm_out_file, "0x%x", byte); if (work != 0) fputc (',', asm_out_file); } else assemble_integer (GEN_INT (byte), 1, BITS_PER_UNIT, 1); } while (work != 0); if (flag_debug_asm) { fprintf (asm_out_file, "\t%s uleb128 " HOST_WIDE_INT_PRINT_HEX, ASM_COMMENT_START, value); if (comment) { fputs ("; ", asm_out_file); vfprintf (asm_out_file, comment, ap); } } } #endif fputc ('\n', asm_out_file); va_end (ap); } /* Output a signed LEB128 quantity. */ void dw2_asm_output_data_sleb128 (HOST_WIDE_INT value, const char *comment, ...) { va_list ap; va_start (ap, comment); #ifdef HAVE_AS_LEB128 fprintf (asm_out_file, "\t.sleb128 " HOST_WIDE_INT_PRINT_DEC, value); if (flag_debug_asm && comment) { fprintf (asm_out_file, "\t%s ", ASM_COMMENT_START); vfprintf (asm_out_file, comment, ap); } #else { HOST_WIDE_INT work = value; int more, byte; const char *byte_op = targetm.asm_out.byte_op; if (byte_op) fputs (byte_op, asm_out_file); do { byte = (work & 0x7f); /* arithmetic shift */ work >>= 7; more = !((work == 0 && (byte & 0x40) == 0) || (work == -1 && (byte & 0x40) != 0)); if (more) byte |= 0x80; if (byte_op) { fprintf (asm_out_file, "0x%x", byte); if (more) fputc (',', asm_out_file); } else assemble_integer (GEN_INT (byte), 1, BITS_PER_UNIT, 1); } while (more); if (flag_debug_asm) { fprintf (asm_out_file, "\t%s sleb128 " HOST_WIDE_INT_PRINT_DEC, ASM_COMMENT_START, value); if (comment) { fputs ("; ", asm_out_file); vfprintf (asm_out_file, comment, ap); } } } #endif fputc ('\n', asm_out_file); va_end (ap); } void dw2_asm_output_delta_uleb128 (const char *lab1 ATTRIBUTE_UNUSED, const char *lab2 ATTRIBUTE_UNUSED, const char *comment, ...) { va_list ap; va_start (ap, comment); #ifdef HAVE_AS_LEB128 fputs ("\t.uleb128 ", asm_out_file); assemble_name (asm_out_file, lab1); fputc ('-', asm_out_file); assemble_name (asm_out_file, lab2); #else abort (); #endif if (flag_debug_asm && comment) { fprintf (asm_out_file, "\t%s ", ASM_COMMENT_START); vfprintf (asm_out_file, comment, ap); } fputc ('\n', asm_out_file); va_end (ap); } void dw2_asm_output_delta_sleb128 (const char *lab1 ATTRIBUTE_UNUSED, const char *lab2 ATTRIBUTE_UNUSED, const char *comment, ...) { va_list ap; va_start (ap, comment); #ifdef HAVE_AS_LEB128 fputs ("\t.sleb128 ", asm_out_file); assemble_name (asm_out_file, lab1); fputc ('-', asm_out_file); assemble_name (asm_out_file, lab2); #else abort (); #endif if (flag_debug_asm && comment) { fprintf (asm_out_file, "\t%s ", ASM_COMMENT_START); vfprintf (asm_out_file, comment, ap); } fputc ('\n', asm_out_file); va_end (ap); } static rtx dw2_force_const_mem (rtx); static int dw2_output_indirect_constant_1 (splay_tree_node, void *); static GTY((param1_is (char *), param2_is (tree))) splay_tree indirect_pool; static GTY(()) int dw2_const_labelno; #if defined(HAVE_GAS_HIDDEN) && defined(SUPPORTS_ONE_ONLY) # define USE_LINKONCE_INDIRECT 1 #else # define USE_LINKONCE_INDIRECT 0 #endif /* Put X, a SYMBOL_REF, in memory. Return a SYMBOL_REF to the allocated memory. Differs from force_const_mem in that a single pool is used for the entire unit of translation, and the memory is not guaranteed to be "near" the function in any interesting sense. */ static rtx dw2_force_const_mem (rtx x) { splay_tree_node node; const char *str; tree decl; if (! indirect_pool) indirect_pool = splay_tree_new_ggc (splay_tree_compare_pointers); if (GET_CODE (x) != SYMBOL_REF) abort (); str = targetm.strip_name_encoding (XSTR (x, 0)); node = splay_tree_lookup (indirect_pool, (splay_tree_key) str); if (node) decl = (tree) node->value; else { tree id; if (USE_LINKONCE_INDIRECT) { char *ref_name = alloca (strlen (str) + sizeof "DW.ref."); sprintf (ref_name, "DW.ref.%s", str); id = get_identifier (ref_name); decl = build_decl (VAR_DECL, id, ptr_type_node); DECL_ARTIFICIAL (decl) = 1; TREE_PUBLIC (decl) = 1; DECL_INITIAL (decl) = decl; make_decl_one_only (decl); } else { char label[32]; ASM_GENERATE_INTERNAL_LABEL (label, "LDFCM", dw2_const_labelno); ++dw2_const_labelno; id = get_identifier (label); decl = build_decl (VAR_DECL, id, ptr_type_node); DECL_ARTIFICIAL (decl) = 1; TREE_STATIC (decl) = 1; DECL_INITIAL (decl) = decl; } id = maybe_get_identifier (str); if (id) TREE_SYMBOL_REFERENCED (id) = 1; splay_tree_insert (indirect_pool, (splay_tree_key) str, (splay_tree_value) decl); } return XEXP (DECL_RTL (decl), 0); } /* A helper function for dw2_output_indirect_constants called through splay_tree_foreach. Emit one queued constant to memory. */ static int dw2_output_indirect_constant_1 (splay_tree_node node, void *data ATTRIBUTE_UNUSED) { const char *sym; rtx sym_ref; sym = (const char *) node->key; sym_ref = gen_rtx_SYMBOL_REF (Pmode, sym); if (USE_LINKONCE_INDIRECT) fprintf (asm_out_file, "\t.hidden %sDW.ref.%s\n", user_label_prefix, sym); assemble_variable ((tree) node->value, 1, 1, 1); assemble_integer (sym_ref, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1); return 0; } /* Emit the constants queued through dw2_force_const_mem. */ void dw2_output_indirect_constants (void) { if (indirect_pool) splay_tree_foreach (indirect_pool, dw2_output_indirect_constant_1, NULL); } /* Like dw2_asm_output_addr_rtx, but encode the pointer as directed. */ void dw2_asm_output_encoded_addr_rtx (int encoding, rtx addr, const char *comment, ...) { int size; va_list ap; va_start (ap, comment); size = size_of_encoded_value (encoding); if (encoding == DW_EH_PE_aligned) { assemble_align (POINTER_SIZE); assemble_integer (addr, size, POINTER_SIZE, 1); return; } /* NULL is _always_ represented as a plain zero, as is 1 for Ada's "all others". */ if (addr == const0_rtx || addr == const1_rtx) assemble_integer (addr, size, BITS_PER_UNIT, 1); else { restart: /* Allow the target first crack at emitting this. Some of the special relocations require special directives instead of just ".4byte" or whatever. */ #ifdef ASM_MAYBE_OUTPUT_ENCODED_ADDR_RTX ASM_MAYBE_OUTPUT_ENCODED_ADDR_RTX (asm_out_file, encoding, size, addr, done); #endif /* Indirection is used to get dynamic relocations out of a read-only section. */ if (encoding & DW_EH_PE_indirect) { /* It is very tempting to use force_const_mem so that we share data with the normal constant pool. However, we've already emitted the constant pool for this function. Moreover, we'd like to share these constants across the entire unit of translation, or better, across the entire application (or DSO). */ addr = dw2_force_const_mem (addr); encoding &= ~DW_EH_PE_indirect; goto restart; } switch (encoding & 0xF0) { case DW_EH_PE_absptr: dw2_assemble_integer (size, addr); break; case DW_EH_PE_pcrel: if (GET_CODE (addr) != SYMBOL_REF) abort (); #ifdef ASM_OUTPUT_DWARF_PCREL ASM_OUTPUT_DWARF_PCREL (asm_out_file, size, XSTR (addr, 0)); #else dw2_assemble_integer (size, gen_rtx_MINUS (Pmode, addr, pc_rtx)); #endif break; default: /* Other encodings should have been handled by ASM_MAYBE_OUTPUT_ENCODED_ADDR_RTX. */ abort (); } #ifdef ASM_MAYBE_OUTPUT_ENCODED_ADDR_RTX done:; #endif } if (flag_debug_asm && comment) { fprintf (asm_out_file, "\t%s ", ASM_COMMENT_START); vfprintf (asm_out_file, comment, ap); } fputc ('\n', asm_out_file); va_end (ap); } /* Type information for dwarf2asm.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_dwarf2asm_h[] = { { &indirect_pool, 1, sizeof (indirect_pool), >_ggc_m_SP9tree_node12splay_tree_s, >_pch_n_SP9tree_node12splay_tree_s }, LAST_GGC_ROOT_TAB }; const struct ggc_root_tab gt_pch_rs_gt_dwarf2asm_h[] = { { &dw2_const_labelno, 1, sizeof (dw2_const_labelno), NULL, NULL }, LAST_GGC_ROOT_TAB }; /* Output Dwarf2 format symbol table information from GCC. Copyright (C) 1992, 1993, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Gary Funck (gary@intrepid.com). Derived from DWARF 1 implementation of Ron Guilmette (rfg@monkeys.com). Extensively modified by Jason Merrill (jason@cygnus.com). This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* TODO: Emit .debug_line header even when there are no functions, since the file numbers are used by .debug_info. Alternately, leave out locations for types and decls. Avoid talking about ctors and op= for PODs. Factor out common prologue sequences into multiple CIEs. */ /* The first part of this file deals with the DWARF 2 frame unwind information, which is also used by the GCC efficient exception handling mechanism. The second part, controlled only by an #ifdef DWARF2_DEBUGGING_INFO, deals with the other DWARF 2 debugging information. */ /* dwarf2out.h - Various declarations for functions found in dwarf2out.c Copyright (C) 1998, 1999, 2000, 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ extern void dwarf2out_decl (tree); extern void dwarf2out_frame_debug (rtx); extern void debug_dwarf (void); struct die_struct; extern void debug_dwarf_die (struct die_struct *); extern void dwarf2out_set_demangle_name_func (const char *(*) (const char *)); extern void dwarf2out_add_library_unit_info (const char *, const char *); #ifdef DWARF2_DEBUGGING_INFO static void dwarf2out_source_line (unsigned int, const char *); #endif /* DWARF2 Abbreviation Glossary: CFA = Canonical Frame Address a fixed address on the stack which identifies a call frame. We define it to be the value of SP just before the call insn. The CFA register and offset, which may change during the course of the function, are used to calculate its value at runtime. CFI = Call Frame Instruction an instruction for the DWARF2 abstract machine CIE = Common Information Entry information describing information common to one or more FDEs DIE = Debugging Information Entry FDE = Frame Description Entry information describing the stack call frame, in particular, how to restore registers DW_CFA_... = DWARF2 CFA call frame instruction DW_TAG_... = DWARF2 DIE tag */ /* Decide whether we want to emit frame unwind information for the current translation unit. */ int dwarf2out_do_frame (void) { return (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG #ifdef DWARF2_FRAME_INFO || DWARF2_FRAME_INFO #endif #ifdef DWARF2_UNWIND_INFO || flag_unwind_tables || (flag_exceptions && ! USING_SJLJ_EXCEPTIONS) #endif ); } /* The size of the target's pointer type. */ #ifndef PTR_SIZE #define PTR_SIZE (POINTER_SIZE / BITS_PER_UNIT) #endif /* Various versions of targetm.eh_frame_section. Note these must appear outside the DWARF2_DEBUGGING_INFO || DWARF2_UNWIND_INFO macro guards. */ /* Version of targetm.eh_frame_section for systems with named sections. */ void named_section_eh_frame_section (void) { #ifdef EH_FRAME_SECTION_NAME #ifdef HAVE_LD_RO_RW_SECTION_MIXING int fde_encoding = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0); int per_encoding = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2, /*global=*/1); int lsda_encoding = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0, /*global=*/0); int flags; flags = (! flag_pic || ((fde_encoding & 0x70) != DW_EH_PE_absptr && (fde_encoding & 0x70) != DW_EH_PE_aligned && (per_encoding & 0x70) != DW_EH_PE_absptr && (per_encoding & 0x70) != DW_EH_PE_aligned && (lsda_encoding & 0x70) != DW_EH_PE_absptr && (lsda_encoding & 0x70) != DW_EH_PE_aligned)) ? 0 : SECTION_WRITE; named_section_flags (EH_FRAME_SECTION_NAME, flags); #else named_section_flags (EH_FRAME_SECTION_NAME, SECTION_WRITE); #endif #endif } /* Version of targetm.eh_frame_section for systems using collect2. */ void collect2_eh_frame_section (void) { tree label = get_file_function_name ('F'); data_section (); ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (PTR_SIZE)); targetm.asm_out.globalize_label (asm_out_file, IDENTIFIER_POINTER (label)); ASM_OUTPUT_LABEL (asm_out_file, IDENTIFIER_POINTER (label)); } /* Default version of targetm.eh_frame_section. */ void default_eh_frame_section (void) { #ifdef EH_FRAME_SECTION_NAME named_section_eh_frame_section (); #else collect2_eh_frame_section (); #endif } /* Array of RTXes referenced by the debugging information, which therefore must be kept around forever. */ static GTY(()) varray_type used_rtx_varray; /* A pointer to the base of a list of incomplete types which might be completed at some later time. incomplete_types_list needs to be a VARRAY because we want to tell the garbage collector about it. */ static GTY(()) varray_type incomplete_types; /* A pointer to the base of a table of references to declaration scopes. This table is a display which tracks the nesting of declaration scopes at the current scope and containing scopes. This table is used to find the proper place to define type declaration DIE's. */ static GTY(()) varray_type decl_scope_table; /* How to start an assembler comment. */ #ifndef ASM_COMMENT_START #define ASM_COMMENT_START ";#" #endif typedef struct dw_cfi_struct *dw_cfi_ref; typedef struct dw_fde_struct *dw_fde_ref; typedef union dw_cfi_oprnd_struct *dw_cfi_oprnd_ref; /* Call frames are described using a sequence of Call Frame Information instructions. The register number, offset and address fields are provided as possible operands; their use is selected by the opcode field. */ enum dw_cfi_oprnd_type { dw_cfi_oprnd_unused, dw_cfi_oprnd_reg_num, dw_cfi_oprnd_offset, dw_cfi_oprnd_addr, dw_cfi_oprnd_loc }; typedef union dw_cfi_oprnd_struct GTY(()) { unsigned long GTY ((tag ("dw_cfi_oprnd_reg_num"))) dw_cfi_reg_num; HOST_WIDE_INT GTY ((tag ("dw_cfi_oprnd_offset"))) dw_cfi_offset; const char * GTY ((tag ("dw_cfi_oprnd_addr"))) dw_cfi_addr; struct dw_loc_descr_struct * GTY ((tag ("dw_cfi_oprnd_loc"))) dw_cfi_loc; } dw_cfi_oprnd; typedef struct dw_cfi_struct GTY(()) { dw_cfi_ref dw_cfi_next; enum dwarf_call_frame_info dw_cfi_opc; dw_cfi_oprnd GTY ((desc ("dw_cfi_oprnd1_desc (%1.dw_cfi_opc)"))) dw_cfi_oprnd1; dw_cfi_oprnd GTY ((desc ("dw_cfi_oprnd2_desc (%1.dw_cfi_opc)"))) dw_cfi_oprnd2; } dw_cfi_node; /* This is how we define the location of the CFA. We use to handle it as REG + OFFSET all the time, but now it can be more complex. It can now be either REG + CFA_OFFSET or *(REG + BASE_OFFSET) + CFA_OFFSET. Instead of passing around REG and OFFSET, we pass a copy of this structure. */ typedef struct cfa_loc GTY(()) { unsigned long reg; HOST_WIDE_INT offset; HOST_WIDE_INT base_offset; int indirect; /* 1 if CFA is accessed via a dereference. */ } dw_cfa_location; /* All call frame descriptions (FDE's) in the GCC generated DWARF refer to a single Common Information Entry (CIE), defined at the beginning of the .debug_frame section. This use of a single CIE obviates the need to keep track of multiple CIE's in the DWARF generation routines below. */ typedef struct dw_fde_struct GTY(()) { tree decl; const char *dw_fde_begin; const char *dw_fde_current_label; const char *dw_fde_end; dw_cfi_ref dw_fde_cfi; unsigned funcdef_number; unsigned all_throwers_are_sibcalls : 1; unsigned nothrow : 1; unsigned uses_eh_lsda : 1; } dw_fde_node; /* Maximum size (in bytes) of an artificially generated label. */ #define MAX_ARTIFICIAL_LABEL_BYTES 30 /* The size of addresses as they appear in the Dwarf 2 data. Some architectures use word addresses to refer to code locations, but Dwarf 2 info always uses byte addresses. On such machines, Dwarf 2 addresses need to be larger than the architecture's pointers. */ #ifndef DWARF2_ADDR_SIZE #define DWARF2_ADDR_SIZE (POINTER_SIZE / BITS_PER_UNIT) #endif /* The size in bytes of a DWARF field indicating an offset or length relative to a debug info section, specified to be 4 bytes in the DWARF-2 specification. The SGI/MIPS ABI defines it to be the same as PTR_SIZE. */ #ifndef DWARF_OFFSET_SIZE #define DWARF_OFFSET_SIZE 4 #endif /* According to the (draft) DWARF 3 specification, the initial length should either be 4 or 12 bytes. When it's 12 bytes, the first 4 bytes are 0xffffffff, followed by the length stored in the next 8 bytes. However, the SGI/MIPS ABI uses an initial length which is equal to DWARF_OFFSET_SIZE. It is defined (elsewhere) accordingly. */ #ifndef DWARF_INITIAL_LENGTH_SIZE #define DWARF_INITIAL_LENGTH_SIZE (DWARF_OFFSET_SIZE == 4 ? 4 : 12) #endif #define DWARF_VERSION 2 /* Round SIZE up to the nearest BOUNDARY. */ #define DWARF_ROUND(SIZE,BOUNDARY) \ ((((SIZE) + (BOUNDARY) - 1) / (BOUNDARY)) * (BOUNDARY)) /* Offsets recorded in opcodes are a multiple of this alignment factor. */ #ifndef DWARF_CIE_DATA_ALIGNMENT #ifdef STACK_GROWS_DOWNWARD #define DWARF_CIE_DATA_ALIGNMENT (-((int) UNITS_PER_WORD)) #else #define DWARF_CIE_DATA_ALIGNMENT ((int) UNITS_PER_WORD) #endif #endif /* A pointer to the base of a table that contains frame description information for each routine. */ static GTY((length ("fde_table_allocated"))) dw_fde_ref fde_table; /* Number of elements currently allocated for fde_table. */ static GTY(()) unsigned fde_table_allocated; /* Number of elements in fde_table currently in use. */ static GTY(()) unsigned fde_table_in_use; /* Size (in elements) of increments by which we may expand the fde_table. */ #define FDE_TABLE_INCREMENT 256 /* A list of call frame insns for the CIE. */ static GTY(()) dw_cfi_ref cie_cfi_head; #if defined (DWARF2_DEBUGGING_INFO) || defined (DWARF2_UNWIND_INFO) /* Some DWARF extensions (e.g., MIPS/SGI) implement a subprogram attribute that accelerates the lookup of the FDE associated with the subprogram. This variable holds the table index of the FDE associated with the current function (body) definition. */ static unsigned current_funcdef_fde; #endif struct indirect_string_node GTY(()) { const char *str; unsigned int refcount; unsigned int form; char *label; }; static GTY ((param_is (struct indirect_string_node))) htab_t debug_str_hash; static GTY(()) int dw2_string_counter; static GTY(()) unsigned long dwarf2out_cfi_label_num; #if defined (DWARF2_DEBUGGING_INFO) || defined (DWARF2_UNWIND_INFO) /* Forward declarations for functions defined in this file. */ static char *stripattributes (const char *); static const char *dwarf_cfi_name (unsigned); static dw_cfi_ref new_cfi (void); static void add_cfi (dw_cfi_ref *, dw_cfi_ref); static void add_fde_cfi (const char *, dw_cfi_ref); static void lookup_cfa_1 (dw_cfi_ref, dw_cfa_location *); static void lookup_cfa (dw_cfa_location *); static void reg_save (const char *, unsigned, unsigned, HOST_WIDE_INT); static void initial_return_save (rtx); static HOST_WIDE_INT stack_adjust_offset (rtx); static void output_cfi (dw_cfi_ref, dw_fde_ref, int); static void output_call_frame_info (int); static void dwarf2out_stack_adjust (rtx); static void flush_queued_reg_saves (void); static bool clobbers_queued_reg_save (rtx); static void dwarf2out_frame_debug_expr (rtx, const char *); /* Support for complex CFA locations. */ static void output_cfa_loc (dw_cfi_ref); static void get_cfa_from_loc_descr (dw_cfa_location *, struct dw_loc_descr_struct *); static struct dw_loc_descr_struct *build_cfa_loc (dw_cfa_location *); static void def_cfa_1 (const char *, dw_cfa_location *); /* How to start an assembler comment. */ #ifndef ASM_COMMENT_START #define ASM_COMMENT_START ";#" #endif /* Data and reference forms for relocatable data. */ #define DW_FORM_data (DWARF_OFFSET_SIZE == 8 ? DW_FORM_data8 : DW_FORM_data4) #define DW_FORM_ref (DWARF_OFFSET_SIZE == 8 ? DW_FORM_ref8 : DW_FORM_ref4) #ifndef DEBUG_FRAME_SECTION #define DEBUG_FRAME_SECTION ".debug_frame" #endif #ifndef FUNC_BEGIN_LABEL #define FUNC_BEGIN_LABEL "LFB" #endif #ifndef FUNC_END_LABEL #define FUNC_END_LABEL "LFE" #endif #ifndef FRAME_BEGIN_LABEL #define FRAME_BEGIN_LABEL "Lframe" #endif #define CIE_AFTER_SIZE_LABEL "LSCIE" #define CIE_END_LABEL "LECIE" #define FDE_LABEL "LSFDE" #define FDE_AFTER_SIZE_LABEL "LASFDE" #define FDE_END_LABEL "LEFDE" #define LINE_NUMBER_BEGIN_LABEL "LSLT" #define LINE_NUMBER_END_LABEL "LELT" #define LN_PROLOG_AS_LABEL "LASLTP" #define LN_PROLOG_END_LABEL "LELTP" #define DIE_LABEL_PREFIX "DW" /* The DWARF 2 CFA column which tracks the return address. Normally this is the column for PC, or the first column after all of the hard registers. */ #ifndef DWARF_FRAME_RETURN_COLUMN #ifdef PC_REGNUM #define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (PC_REGNUM) #else #define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGISTERS #endif #endif /* The mapping from gcc register number to DWARF 2 CFA column number. By default, we just provide columns for all registers. */ #ifndef DWARF_FRAME_REGNUM #define DWARF_FRAME_REGNUM(REG) DBX_REGISTER_NUMBER (REG) #endif /* The offset from the incoming value of %sp to the top of the stack frame for the current function. */ #ifndef INCOMING_FRAME_SP_OFFSET #define INCOMING_FRAME_SP_OFFSET 0 #endif /* Hook used by __throw. */ rtx expand_builtin_dwarf_sp_column (void) { return GEN_INT (DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM)); } /* Return a pointer to a copy of the section string name S with all attributes stripped off, and an asterisk prepended (for assemble_name). */ static inline char * stripattributes (const char *s) { char *stripped = xmalloc (strlen (s) + 2); char *p = stripped; *p++ = '*'; while (*s && *s != ',') *p++ = *s++; *p = '\0'; return stripped; } /* Generate code to initialize the register size table. */ void expand_builtin_init_dwarf_reg_sizes (tree address) { int i; enum machine_mode mode = TYPE_MODE (char_type_node); rtx addr = expand_expr (address, NULL_RTX, VOIDmode, 0); rtx mem = gen_rtx_MEM (BLKmode, addr); bool wrote_return_column = false; for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (DWARF_FRAME_REGNUM (i) < DWARF_FRAME_REGISTERS) { HOST_WIDE_INT offset = DWARF_FRAME_REGNUM (i) * GET_MODE_SIZE (mode); enum machine_mode save_mode = reg_raw_mode[i]; HOST_WIDE_INT size; if (HARD_REGNO_CALL_PART_CLOBBERED (i, save_mode)) save_mode = choose_hard_reg_mode (i, 1, true); if (DWARF_FRAME_REGNUM (i) == DWARF_FRAME_RETURN_COLUMN) { if (save_mode == VOIDmode) continue; wrote_return_column = true; } size = GET_MODE_SIZE (save_mode); if (offset < 0) continue; emit_move_insn (adjust_address (mem, mode, offset), GEN_INT (size)); } #ifdef DWARF_ALT_FRAME_RETURN_COLUMN if (! wrote_return_column) abort (); i = DWARF_ALT_FRAME_RETURN_COLUMN; wrote_return_column = false; #else i = DWARF_FRAME_RETURN_COLUMN; #endif if (! wrote_return_column) { enum machine_mode save_mode = Pmode; HOST_WIDE_INT offset = i * GET_MODE_SIZE (mode); HOST_WIDE_INT size = GET_MODE_SIZE (save_mode); emit_move_insn (adjust_address (mem, mode, offset), GEN_INT (size)); } } /* Convert a DWARF call frame info. operation to its string name */ static const char * dwarf_cfi_name (unsigned int cfi_opc) { switch (cfi_opc) { case DW_CFA_advance_loc: return "DW_CFA_advance_loc"; case DW_CFA_offset: return "DW_CFA_offset"; case DW_CFA_restore: return "DW_CFA_restore"; case DW_CFA_nop: return "DW_CFA_nop"; case DW_CFA_set_loc: return "DW_CFA_set_loc"; case DW_CFA_advance_loc1: return "DW_CFA_advance_loc1"; case DW_CFA_advance_loc2: return "DW_CFA_advance_loc2"; case DW_CFA_advance_loc4: return "DW_CFA_advance_loc4"; case DW_CFA_offset_extended: return "DW_CFA_offset_extended"; case DW_CFA_restore_extended: return "DW_CFA_restore_extended"; case DW_CFA_undefined: return "DW_CFA_undefined"; case DW_CFA_same_value: return "DW_CFA_same_value"; case DW_CFA_register: return "DW_CFA_register"; case DW_CFA_remember_state: return "DW_CFA_remember_state"; case DW_CFA_restore_state: return "DW_CFA_restore_state"; case DW_CFA_def_cfa: return "DW_CFA_def_cfa"; case DW_CFA_def_cfa_register: return "DW_CFA_def_cfa_register"; case DW_CFA_def_cfa_offset: return "DW_CFA_def_cfa_offset"; /* DWARF 3 */ case DW_CFA_def_cfa_expression: return "DW_CFA_def_cfa_expression"; case DW_CFA_expression: return "DW_CFA_expression"; case DW_CFA_offset_extended_sf: return "DW_CFA_offset_extended_sf"; case DW_CFA_def_cfa_sf: return "DW_CFA_def_cfa_sf"; case DW_CFA_def_cfa_offset_sf: return "DW_CFA_def_cfa_offset_sf"; /* SGI/MIPS specific */ case DW_CFA_MIPS_advance_loc8: return "DW_CFA_MIPS_advance_loc8"; /* GNU extensions */ case DW_CFA_GNU_window_save: return "DW_CFA_GNU_window_save"; case DW_CFA_GNU_args_size: return "DW_CFA_GNU_args_size"; case DW_CFA_GNU_negative_offset_extended: return "DW_CFA_GNU_negative_offset_extended"; default: return "DW_CFA_"; } } /* Return a pointer to a newly allocated Call Frame Instruction. */ static inline dw_cfi_ref new_cfi (void) { dw_cfi_ref cfi = ggc_alloc (sizeof (dw_cfi_node)); cfi->dw_cfi_next = NULL; cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0; cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0; return cfi; } /* Add a Call Frame Instruction to list of instructions. */ static inline void add_cfi (dw_cfi_ref *list_head, dw_cfi_ref cfi) { dw_cfi_ref *p; /* Find the end of the chain. */ for (p = list_head; (*p) != NULL; p = &(*p)->dw_cfi_next) ; *p = cfi; } /* Generate a new label for the CFI info to refer to. */ char * dwarf2out_cfi_label (void) { static char label[20]; ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", dwarf2out_cfi_label_num++); ASM_OUTPUT_LABEL (asm_out_file, label); return label; } /* Add CFI to the current fde at the PC value indicated by LABEL if specified, or to the CIE if LABEL is NULL. */ static void add_fde_cfi (const char *label, dw_cfi_ref cfi) { if (label) { dw_fde_ref fde = &fde_table[fde_table_in_use - 1]; if (*label == 0) label = dwarf2out_cfi_label (); if (fde->dw_fde_current_label == NULL || strcmp (label, fde->dw_fde_current_label) != 0) { dw_cfi_ref xcfi; fde->dw_fde_current_label = label = xstrdup (label); /* Set the location counter to the new label. */ xcfi = new_cfi (); xcfi->dw_cfi_opc = DW_CFA_advance_loc4; xcfi->dw_cfi_oprnd1.dw_cfi_addr = label; add_cfi (&fde->dw_fde_cfi, xcfi); } add_cfi (&fde->dw_fde_cfi, cfi); } else add_cfi (&cie_cfi_head, cfi); } /* Subroutine of lookup_cfa. */ static inline void lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc) { switch (cfi->dw_cfi_opc) { case DW_CFA_def_cfa_offset: loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset; break; case DW_CFA_def_cfa_register: loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num; break; case DW_CFA_def_cfa: loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num; loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset; break; case DW_CFA_def_cfa_expression: get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc); break; default: break; } } /* Find the previous value for the CFA. */ static void lookup_cfa (dw_cfa_location *loc) { dw_cfi_ref cfi; loc->reg = (unsigned long) -1; loc->offset = 0; loc->indirect = 0; loc->base_offset = 0; for (cfi = cie_cfi_head; cfi; cfi = cfi->dw_cfi_next) lookup_cfa_1 (cfi, loc); if (fde_table_in_use) { dw_fde_ref fde = &fde_table[fde_table_in_use - 1]; for (cfi = fde->dw_fde_cfi; cfi; cfi = cfi->dw_cfi_next) lookup_cfa_1 (cfi, loc); } } /* The current rule for calculating the DWARF2 canonical frame address. */ static dw_cfa_location cfa; /* The register used for saving registers to the stack, and its offset from the CFA. */ static dw_cfa_location cfa_store; /* The running total of the size of arguments pushed onto the stack. */ static HOST_WIDE_INT args_size; /* The last args_size we actually output. */ static HOST_WIDE_INT old_args_size; /* Entry point to update the canonical frame address (CFA). LABEL is passed to add_fde_cfi. The value of CFA is now to be calculated from REG+OFFSET. */ void dwarf2out_def_cfa (const char *label, unsigned int reg, HOST_WIDE_INT offset) { dw_cfa_location loc; loc.indirect = 0; loc.base_offset = 0; loc.reg = reg; loc.offset = offset; def_cfa_1 (label, &loc); } /* This routine does the actual work. The CFA is now calculated from the dw_cfa_location structure. */ static void def_cfa_1 (const char *label, dw_cfa_location *loc_p) { dw_cfi_ref cfi; dw_cfa_location old_cfa, loc; cfa = *loc_p; loc = *loc_p; if (cfa_store.reg == loc.reg && loc.indirect == 0) cfa_store.offset = loc.offset; loc.reg = DWARF_FRAME_REGNUM (loc.reg); lookup_cfa (&old_cfa); /* If nothing changed, no need to issue any call frame instructions. */ if (loc.reg == old_cfa.reg && loc.offset == old_cfa.offset && loc.indirect == old_cfa.indirect && (loc.indirect == 0 || loc.base_offset == old_cfa.base_offset)) return; cfi = new_cfi (); if (loc.reg == old_cfa.reg && !loc.indirect) { /* Construct a "DW_CFA_def_cfa_offset " instruction, indicating the CFA register did not change but the offset did. */ cfi->dw_cfi_opc = DW_CFA_def_cfa_offset; cfi->dw_cfi_oprnd1.dw_cfi_offset = loc.offset; } #ifndef MIPS_DEBUGGING_INFO /* SGI dbx thinks this means no offset. */ else if (loc.offset == old_cfa.offset && old_cfa.reg != (unsigned long) -1 && !loc.indirect) { /* Construct a "DW_CFA_def_cfa_register " instruction, indicating the CFA register has changed to but the offset has not changed. */ cfi->dw_cfi_opc = DW_CFA_def_cfa_register; cfi->dw_cfi_oprnd1.dw_cfi_reg_num = loc.reg; } #endif else if (loc.indirect == 0) { /* Construct a "DW_CFA_def_cfa " instruction, indicating the CFA register has changed to with the specified offset. */ cfi->dw_cfi_opc = DW_CFA_def_cfa; cfi->dw_cfi_oprnd1.dw_cfi_reg_num = loc.reg; cfi->dw_cfi_oprnd2.dw_cfi_offset = loc.offset; } else { /* Construct a DW_CFA_def_cfa_expression instruction to calculate the CFA using a full location expression since no register-offset pair is available. */ struct dw_loc_descr_struct *loc_list; cfi->dw_cfi_opc = DW_CFA_def_cfa_expression; loc_list = build_cfa_loc (&loc); cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list; } add_fde_cfi (label, cfi); } /* Add the CFI for saving a register. REG is the CFA column number. LABEL is passed to add_fde_cfi. If SREG is -1, the register is saved at OFFSET from the CFA; otherwise it is saved in SREG. */ static void reg_save (const char *label, unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset) { dw_cfi_ref cfi = new_cfi (); cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg; if (sreg == INVALID_REGNUM) { if (reg & ~0x3f) /* The register number won't fit in 6 bits, so we have to use the long form. */ cfi->dw_cfi_opc = DW_CFA_offset_extended; else cfi->dw_cfi_opc = DW_CFA_offset; #ifdef ENABLE_CHECKING { /* If we get an offset that is not a multiple of DWARF_CIE_DATA_ALIGNMENT, there is either a bug in the definition of DWARF_CIE_DATA_ALIGNMENT, or a bug in the machine description. */ HOST_WIDE_INT check_offset = offset / DWARF_CIE_DATA_ALIGNMENT; if (check_offset * DWARF_CIE_DATA_ALIGNMENT != offset) abort (); } #endif offset /= DWARF_CIE_DATA_ALIGNMENT; if (offset < 0) cfi->dw_cfi_opc = DW_CFA_offset_extended_sf; cfi->dw_cfi_oprnd2.dw_cfi_offset = offset; } else if (sreg == reg) cfi->dw_cfi_opc = DW_CFA_same_value; else { cfi->dw_cfi_opc = DW_CFA_register; cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg; } add_fde_cfi (label, cfi); } /* Add the CFI for saving a register window. LABEL is passed to reg_save. This CFI tells the unwinder that it needs to restore the window registers from the previous frame's window save area. ??? Perhaps we should note in the CIE where windows are saved (instead of assuming 0(cfa)) and what registers are in the window. */ void dwarf2out_window_save (const char *label) { dw_cfi_ref cfi = new_cfi (); cfi->dw_cfi_opc = DW_CFA_GNU_window_save; add_fde_cfi (label, cfi); } /* Add a CFI to update the running total of the size of arguments pushed onto the stack. */ void dwarf2out_args_size (const char *label, HOST_WIDE_INT size) { dw_cfi_ref cfi; if (size == old_args_size) return; old_args_size = size; cfi = new_cfi (); cfi->dw_cfi_opc = DW_CFA_GNU_args_size; cfi->dw_cfi_oprnd1.dw_cfi_offset = size; add_fde_cfi (label, cfi); } /* Entry point for saving a register to the stack. REG is the GCC register number. LABEL and OFFSET are passed to reg_save. */ void dwarf2out_reg_save (const char *label, unsigned int reg, HOST_WIDE_INT offset) { reg_save (label, DWARF_FRAME_REGNUM (reg), INVALID_REGNUM, offset); } /* Entry point for saving the return address in the stack. LABEL and OFFSET are passed to reg_save. */ void dwarf2out_return_save (const char *label, HOST_WIDE_INT offset) { reg_save (label, DWARF_FRAME_RETURN_COLUMN, INVALID_REGNUM, offset); } /* Entry point for saving the return address in a register. LABEL and SREG are passed to reg_save. */ void dwarf2out_return_reg (const char *label, unsigned int sreg) { reg_save (label, DWARF_FRAME_RETURN_COLUMN, DWARF_FRAME_REGNUM (sreg), 0); } /* Record the initial position of the return address. RTL is INCOMING_RETURN_ADDR_RTX. */ static void initial_return_save (rtx rtl) { unsigned int reg = INVALID_REGNUM; HOST_WIDE_INT offset = 0; switch (GET_CODE (rtl)) { case REG: /* RA is in a register. */ reg = DWARF_FRAME_REGNUM (REGNO (rtl)); break; case MEM: /* RA is on the stack. */ rtl = XEXP (rtl, 0); switch (GET_CODE (rtl)) { case REG: if (REGNO (rtl) != STACK_POINTER_REGNUM) abort (); offset = 0; break; case PLUS: if (REGNO (XEXP (rtl, 0)) != STACK_POINTER_REGNUM) abort (); offset = INTVAL (XEXP (rtl, 1)); break; case MINUS: if (REGNO (XEXP (rtl, 0)) != STACK_POINTER_REGNUM) abort (); offset = -INTVAL (XEXP (rtl, 1)); break; default: abort (); } break; case PLUS: /* The return address is at some offset from any value we can actually load. For instance, on the SPARC it is in %i7+8. Just ignore the offset for now; it doesn't matter for unwinding frames. */ if (GET_CODE (XEXP (rtl, 1)) != CONST_INT) abort (); initial_return_save (XEXP (rtl, 0)); return; default: abort (); } if (reg != DWARF_FRAME_RETURN_COLUMN) reg_save (NULL, DWARF_FRAME_RETURN_COLUMN, reg, offset - cfa.offset); } /* Given a SET, calculate the amount of stack adjustment it contains. */ static HOST_WIDE_INT stack_adjust_offset (rtx pattern) { rtx src = SET_SRC (pattern); rtx dest = SET_DEST (pattern); HOST_WIDE_INT offset = 0; enum rtx_code code; if (dest == stack_pointer_rtx) { /* (set (reg sp) (plus (reg sp) (const_int))) */ code = GET_CODE (src); if (! (code == PLUS || code == MINUS) || XEXP (src, 0) != stack_pointer_rtx || GET_CODE (XEXP (src, 1)) != CONST_INT) return 0; offset = INTVAL (XEXP (src, 1)); if (code == PLUS) offset = -offset; } else if (MEM_P (dest)) { /* (set (mem (pre_dec (reg sp))) (foo)) */ src = XEXP (dest, 0); code = GET_CODE (src); switch (code) { case PRE_MODIFY: case POST_MODIFY: if (XEXP (src, 0) == stack_pointer_rtx) { rtx val = XEXP (XEXP (src, 1), 1); /* We handle only adjustments by constant amount. */ if (GET_CODE (XEXP (src, 1)) != PLUS || GET_CODE (val) != CONST_INT) abort (); offset = -INTVAL (val); break; } return 0; case PRE_DEC: case POST_DEC: if (XEXP (src, 0) == stack_pointer_rtx) { offset = GET_MODE_SIZE (GET_MODE (dest)); break; } return 0; case PRE_INC: case POST_INC: if (XEXP (src, 0) == stack_pointer_rtx) { offset = -GET_MODE_SIZE (GET_MODE (dest)); break; } return 0; default: return 0; } } else return 0; return offset; } /* Check INSN to see if it looks like a push or a stack adjustment, and make a note of it if it does. EH uses this information to find out how much extra space it needs to pop off the stack. */ static void dwarf2out_stack_adjust (rtx insn) { HOST_WIDE_INT offset; const char *label; int i; /* Don't handle epilogues at all. Certainly it would be wrong to do so with this function. Proper support would require all frame-related insns to be marked, and to be able to handle saving state around epilogues textually in the middle of the function. */ if (prologue_epilogue_contains (insn) || sibcall_epilogue_contains (insn)) return; if (!flag_asynchronous_unwind_tables && GET_CODE (insn) == CALL_INSN) { /* Extract the size of the args from the CALL rtx itself. */ insn = PATTERN (insn); if (GET_CODE (insn) == PARALLEL) insn = XVECEXP (insn, 0, 0); if (GET_CODE (insn) == SET) insn = SET_SRC (insn); if (GET_CODE (insn) != CALL) abort (); dwarf2out_args_size ("", INTVAL (XEXP (insn, 1))); return; } /* If only calls can throw, and we have a frame pointer, save up adjustments until we see the CALL_INSN. */ else if (!flag_asynchronous_unwind_tables && cfa.reg != STACK_POINTER_REGNUM) return; if (GET_CODE (insn) == BARRIER) { /* When we see a BARRIER, we know to reset args_size to 0. Usually the compiler will have already emitted a stack adjustment, but doesn't bother for calls to noreturn functions. */ #ifdef STACK_GROWS_DOWNWARD offset = -args_size; #else offset = args_size; #endif } else if (GET_CODE (PATTERN (insn)) == SET) offset = stack_adjust_offset (PATTERN (insn)); else if (GET_CODE (PATTERN (insn)) == PARALLEL || GET_CODE (PATTERN (insn)) == SEQUENCE) { /* There may be stack adjustments inside compound insns. Search for them. */ for (offset = 0, i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--) if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET) offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i)); } else return; if (offset == 0) return; if (cfa.reg == STACK_POINTER_REGNUM) cfa.offset += offset; #ifndef STACK_GROWS_DOWNWARD offset = -offset; #endif args_size += offset; if (args_size < 0) args_size = 0; label = dwarf2out_cfi_label (); def_cfa_1 (label, &cfa); dwarf2out_args_size (label, args_size); } #endif /* We delay emitting a register save until either (a) we reach the end of the prologue or (b) the register is clobbered. This clusters register saves so that there are fewer pc advances. */ struct queued_reg_save GTY(()) { struct queued_reg_save *next; rtx reg; HOST_WIDE_INT cfa_offset; rtx saved_reg; }; static GTY(()) struct queued_reg_save *queued_reg_saves; /* The caller's ORIG_REG is saved in SAVED_IN_REG. */ struct reg_saved_in_data GTY(()) { rtx orig_reg; rtx saved_in_reg; }; /* A list of registers saved in other registers. The list intentionally has a small maximum capacity of 4; if your port needs more than that, you might consider implementing a more efficient data structure. */ static GTY(()) struct reg_saved_in_data regs_saved_in_regs[4]; static GTY(()) size_t num_regs_saved_in_regs; #if defined (DWARF2_DEBUGGING_INFO) || defined (DWARF2_UNWIND_INFO) static const char *last_reg_save_label; /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */ static void queue_reg_save (const char *label, rtx reg, rtx sreg, HOST_WIDE_INT offset) { struct queued_reg_save *q; /* Duplicates waste space, but it's also necessary to remove them for correctness, since the queue gets output in reverse order. */ for (q = queued_reg_saves; q != NULL; q = q->next) if (REGNO (q->reg) == REGNO (reg)) break; if (q == NULL) { q = ggc_alloc (sizeof (*q)); q->next = queued_reg_saves; queued_reg_saves = q; } q->reg = reg; q->cfa_offset = offset; q->saved_reg = sreg; last_reg_save_label = label; } /* Output all the entries in QUEUED_REG_SAVES. */ static void flush_queued_reg_saves (void) { struct queued_reg_save *q; for (q = queued_reg_saves; q; q = q->next) { size_t i; unsigned int reg, sreg; for (i = 0; i < num_regs_saved_in_regs; i++) if (REGNO (regs_saved_in_regs[i].orig_reg) == REGNO (q->reg)) break; if (q->saved_reg && i == num_regs_saved_in_regs) { if (i == ARRAY_SIZE (regs_saved_in_regs)) abort (); num_regs_saved_in_regs++; } if (i != num_regs_saved_in_regs) { regs_saved_in_regs[i].orig_reg = q->reg; regs_saved_in_regs[i].saved_in_reg = q->saved_reg; } reg = DWARF_FRAME_REGNUM (REGNO (q->reg)); if (q->saved_reg) sreg = DWARF_FRAME_REGNUM (REGNO (q->saved_reg)); else sreg = INVALID_REGNUM; reg_save (last_reg_save_label, reg, sreg, q->cfa_offset); } queued_reg_saves = NULL; last_reg_save_label = NULL; } /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved location for? Or, does it clobber a register which we've previously said that some other register is saved in, and for which we now have a new location for? */ static bool clobbers_queued_reg_save (rtx insn) { struct queued_reg_save *q; for (q = queued_reg_saves; q; q = q->next) { size_t i; if (modified_in_p (q->reg, insn)) return true; for (i = 0; i < num_regs_saved_in_regs; i++) if (REGNO (q->reg) == REGNO (regs_saved_in_regs[i].orig_reg) && modified_in_p (regs_saved_in_regs[i].saved_in_reg, insn)) return true; } return false; } /* What register, if any, is currently saved in REG? */ static rtx reg_saved_in (rtx reg) { unsigned int regn = REGNO (reg); size_t i; struct queued_reg_save *q; for (q = queued_reg_saves; q; q = q->next) if (q->saved_reg && regn == REGNO (q->saved_reg)) return q->reg; for (i = 0; i < num_regs_saved_in_regs; i++) if (regs_saved_in_regs[i].saved_in_reg && regn == REGNO (regs_saved_in_regs[i].saved_in_reg)) return regs_saved_in_regs[i].orig_reg; return NULL_RTX; } /* A temporary register holding an integral value used in adjusting SP or setting up the store_reg. The "offset" field holds the integer value, not an offset. */ static dw_cfa_location cfa_temp; /* Record call frame debugging information for an expression EXPR, which either sets SP or FP (adjusting how we calculate the frame address) or saves a register to the stack or another register. LABEL indicates the address of EXPR. This function encodes a state machine mapping rtxes to actions on cfa, cfa_store, and cfa_temp.reg. We describe these rules so users need not read the source code. The High-Level Picture Changes in the register we use to calculate the CFA: Currently we assume that if you copy the CFA register into another register, we should take the other one as the new CFA register; this seems to work pretty well. If it's wrong for some target, it's simple enough not to set RTX_FRAME_RELATED_P on the insn in question. Changes in the register we use for saving registers to the stack: This is usually SP, but not always. Again, we deduce that if you copy SP into another register (and SP is not the CFA register), then the new register is the one we will be using for register saves. This also seems to work. Register saves: There's not much guesswork about this one; if RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a register save, and the register used to calculate the destination had better be the one we think we're using for this purpose. It's also assumed that a copy from a call-saved register to another register is saving that register if RTX_FRAME_RELATED_P is set on that instruction. If the copy is from a call-saved register to the *same* register, that means that the register is now the same value as in the caller. Except: If the register being saved is the CFA register, and the offset is nonzero, we are saving the CFA, so we assume we have to use DW_CFA_def_cfa_expression. If the offset is 0, we assume that the intent is to save the value of SP from the previous frame. In addition, if a register has previously been saved to a different register, Invariants / Summaries of Rules cfa current rule for calculating the CFA. It usually consists of a register and an offset. cfa_store register used by prologue code to save things to the stack cfa_store.offset is the offset from the value of cfa_store.reg to the actual CFA cfa_temp register holding an integral value. cfa_temp.offset stores the value, which will be used to adjust the stack pointer. cfa_temp is also used like cfa_store, to track stores to the stack via fp or a temp reg. Rules 1- 4: Setting a register's value to cfa.reg or an expression with cfa.reg as the first operand changes the cfa.reg and its cfa.offset. Rule 1 and 4 also set cfa_temp.reg and cfa_temp.offset. Rules 6- 9: Set a non-cfa.reg register value to a constant or an expression yielding a constant. This sets cfa_temp.reg and cfa_temp.offset. Rule 5: Create a new register cfa_store used to save items to the stack. Rules 10-14: Save a register to the stack. Define offset as the difference of the original location and cfa_store's location (or cfa_temp's location if cfa_temp is used). The Rules "{a,b}" indicates a choice of a xor b. ":cfa.reg" indicates that must equal cfa.reg. Rule 1: (set :cfa.reg) effects: cfa.reg = cfa.offset unchanged cfa_temp.reg = cfa_temp.offset = cfa.offset Rule 2: (set sp ({minus,plus,losum} {sp,fp}:cfa.reg {,:cfa_temp.reg})) effects: cfa.reg = sp if fp used cfa.offset += {+/- , cfa_temp.offset} if cfa.reg==sp cfa_store.offset += {+/- , cfa_temp.offset} if cfa_store.reg==sp Rule 3: (set fp ({minus,plus,losum} :cfa.reg )) effects: cfa.reg = fp cfa_offset += +/- Rule 4: (set ({plus,losum} :cfa.reg )) constraints: != fp != sp effects: cfa.reg = cfa_temp.reg = cfa_temp.offset = cfa.offset Rule 5: (set (plus :cfa_temp.reg sp:cfa.reg)) constraints: != fp != sp effects: cfa_store.reg = cfa_store.offset = cfa.offset - cfa_temp.offset Rule 6: (set ) effects: cfa_temp.reg = cfa_temp.offset = Rule 7: (set :cfa_temp.reg (ior :cfa_temp.reg )) effects: cfa_temp.reg = cfa_temp.offset |= Rule 8: (set (high )) effects: none Rule 9: (set (lo_sum )) effects: cfa_temp.reg = cfa_temp.offset = Rule 10: (set (mem (pre_modify sp:cfa_store (???? ))) ) effects: cfa_store.offset -= cfa.offset = cfa_store.offset if cfa.reg == sp cfa.reg = sp cfa.base_offset = -cfa_store.offset Rule 11: (set (mem ({pre_inc,pre_dec} sp:cfa_store.reg)) ) effects: cfa_store.offset += -/+ mode_size(mem) cfa.offset = cfa_store.offset if cfa.reg == sp cfa.reg = sp cfa.base_offset = -cfa_store.offset Rule 12: (set (mem ({minus,plus,losum} :{cfa_store,cfa_temp} )) ) effects: cfa.reg = cfa.base_offset = -/+ - {cfa_store,cfa_temp}.offset Rule 13: (set (mem :{cfa_store,cfa_temp}) ) effects: cfa.reg = cfa.base_offset = -{cfa_store,cfa_temp}.offset Rule 14: (set (mem (postinc :cfa_temp )) ) effects: cfa.reg = cfa.base_offset = -cfa_temp.offset cfa_temp.offset -= mode_size(mem) */ static void dwarf2out_frame_debug_expr (rtx expr, const char *label) { rtx src, dest; HOST_WIDE_INT offset; /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of the PARALLEL independently. The first element is always processed if it is a SET. This is for backward compatibility. Other elements are processed only if they are SETs and the RTX_FRAME_RELATED_P flag is set in them. */ if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE) { int par_index; int limit = XVECLEN (expr, 0); for (par_index = 0; par_index < limit; par_index++) if (GET_CODE (XVECEXP (expr, 0, par_index)) == SET && (RTX_FRAME_RELATED_P (XVECEXP (expr, 0, par_index)) || par_index == 0)) dwarf2out_frame_debug_expr (XVECEXP (expr, 0, par_index), label); return; } if (GET_CODE (expr) != SET) abort (); src = SET_SRC (expr); dest = SET_DEST (expr); if (GET_CODE (src) == REG) { rtx rsi = reg_saved_in (src); if (rsi) src = rsi; } switch (GET_CODE (dest)) { case REG: switch (GET_CODE (src)) { /* Setting FP from SP. */ case REG: if (cfa.reg == (unsigned) REGNO (src)) { /* Rule 1 */ /* Update the CFA rule wrt SP or FP. Make sure src is relative to the current CFA register. We used to require that dest be either SP or FP, but the ARM copies SP to a temporary register, and from there to FP. So we just rely on the backends to only set RTX_FRAME_RELATED_P on appropriate insns. */ cfa.reg = REGNO (dest); cfa_temp.reg = cfa.reg; cfa_temp.offset = cfa.offset; } else if (call_used_regs [REGNO (dest)] && ! fixed_regs [REGNO (dest)]) { /* Saving a register in a register. */ queue_reg_save (label, src, dest, 0); } else abort (); break; case PLUS: case MINUS: case LO_SUM: if (dest == stack_pointer_rtx) { /* Rule 2 */ /* Adjusting SP. */ switch (GET_CODE (XEXP (src, 1))) { case CONST_INT: offset = INTVAL (XEXP (src, 1)); break; case REG: if ((unsigned) REGNO (XEXP (src, 1)) != cfa_temp.reg) abort (); offset = cfa_temp.offset; break; default: abort (); } if (XEXP (src, 0) == hard_frame_pointer_rtx) { /* Restoring SP from FP in the epilogue. */ if (cfa.reg != (unsigned) HARD_FRAME_POINTER_REGNUM) abort (); cfa.reg = STACK_POINTER_REGNUM; } else if (GET_CODE (src) == LO_SUM) /* Assume we've set the source reg of the LO_SUM from sp. */ ; else if (XEXP (src, 0) != stack_pointer_rtx) abort (); if (GET_CODE (src) != MINUS) offset = -offset; if (cfa.reg == STACK_POINTER_REGNUM) cfa.offset += offset; if (cfa_store.reg == STACK_POINTER_REGNUM) cfa_store.offset += offset; } else if (dest == hard_frame_pointer_rtx) { /* Rule 3 */ /* Either setting the FP from an offset of the SP, or adjusting the FP */ if (! frame_pointer_needed) abort (); if (REG_P (XEXP (src, 0)) && (unsigned) REGNO (XEXP (src, 0)) == cfa.reg && GET_CODE (XEXP (src, 1)) == CONST_INT) { offset = INTVAL (XEXP (src, 1)); if (GET_CODE (src) != MINUS) offset = -offset; cfa.offset += offset; cfa.reg = HARD_FRAME_POINTER_REGNUM; } else abort (); } else { if (GET_CODE (src) == MINUS) abort (); /* Rule 4 */ if (REG_P (XEXP (src, 0)) && REGNO (XEXP (src, 0)) == cfa.reg && GET_CODE (XEXP (src, 1)) == CONST_INT) { /* Setting a temporary CFA register that will be copied into the FP later on. */ offset = - INTVAL (XEXP (src, 1)); cfa.offset += offset; cfa.reg = REGNO (dest); /* Or used to save regs to the stack. */ cfa_temp.reg = cfa.reg; cfa_temp.offset = cfa.offset; } /* Rule 5 */ else if (REG_P (XEXP (src, 0)) && REGNO (XEXP (src, 0)) == cfa_temp.reg && XEXP (src, 1) == stack_pointer_rtx) { /* Setting a scratch register that we will use instead of SP for saving registers to the stack. */ if (cfa.reg != STACK_POINTER_REGNUM) abort (); cfa_store.reg = REGNO (dest); cfa_store.offset = cfa.offset - cfa_temp.offset; } /* Rule 9 */ else if (GET_CODE (src) == LO_SUM && GET_CODE (XEXP (src, 1)) == CONST_INT) { cfa_temp.reg = REGNO (dest); cfa_temp.offset = INTVAL (XEXP (src, 1)); } else abort (); } break; /* Rule 6 */ case CONST_INT: cfa_temp.reg = REGNO (dest); cfa_temp.offset = INTVAL (src); break; /* Rule 7 */ case IOR: if (!REG_P (XEXP (src, 0)) || (unsigned) REGNO (XEXP (src, 0)) != cfa_temp.reg || GET_CODE (XEXP (src, 1)) != CONST_INT) abort (); if ((unsigned) REGNO (dest) != cfa_temp.reg) cfa_temp.reg = REGNO (dest); cfa_temp.offset |= INTVAL (XEXP (src, 1)); break; /* Skip over HIGH, assuming it will be followed by a LO_SUM, which will fill in all of the bits. */ /* Rule 8 */ case HIGH: break; default: abort (); } def_cfa_1 (label, &cfa); break; case MEM: if (!REG_P (src)) abort (); /* Saving a register to the stack. Make sure dest is relative to the CFA register. */ switch (GET_CODE (XEXP (dest, 0))) { /* Rule 10 */ /* With a push. */ case PRE_MODIFY: /* We can't handle variable size modifications. */ if (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1)) != CONST_INT) abort (); offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1)); if (REGNO (XEXP (XEXP (dest, 0), 0)) != STACK_POINTER_REGNUM || cfa_store.reg != STACK_POINTER_REGNUM) abort (); cfa_store.offset += offset; if (cfa.reg == STACK_POINTER_REGNUM) cfa.offset = cfa_store.offset; offset = -cfa_store.offset; break; /* Rule 11 */ case PRE_INC: case PRE_DEC: offset = GET_MODE_SIZE (GET_MODE (dest)); if (GET_CODE (XEXP (dest, 0)) == PRE_INC) offset = -offset; if (REGNO (XEXP (XEXP (dest, 0), 0)) != STACK_POINTER_REGNUM || cfa_store.reg != STACK_POINTER_REGNUM) abort (); cfa_store.offset += offset; if (cfa.reg == STACK_POINTER_REGNUM) cfa.offset = cfa_store.offset; offset = -cfa_store.offset; break; /* Rule 12 */ /* With an offset. */ case PLUS: case MINUS: case LO_SUM: if (GET_CODE (XEXP (XEXP (dest, 0), 1)) != CONST_INT) abort (); offset = INTVAL (XEXP (XEXP (dest, 0), 1)); if (GET_CODE (XEXP (dest, 0)) == MINUS) offset = -offset; if (cfa_store.reg == (unsigned) REGNO (XEXP (XEXP (dest, 0), 0))) offset -= cfa_store.offset; else if (cfa_temp.reg == (unsigned) REGNO (XEXP (XEXP (dest, 0), 0))) offset -= cfa_temp.offset; else abort (); break; /* Rule 13 */ /* Without an offset. */ case REG: if (cfa_store.reg == (unsigned) REGNO (XEXP (dest, 0))) offset = -cfa_store.offset; else if (cfa_temp.reg == (unsigned) REGNO (XEXP (dest, 0))) offset = -cfa_temp.offset; else abort (); break; /* Rule 14 */ case POST_INC: if (cfa_temp.reg != (unsigned) REGNO (XEXP (XEXP (dest, 0), 0))) abort (); offset = -cfa_temp.offset; cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest)); break; default: abort (); } if (REGNO (src) != STACK_POINTER_REGNUM && REGNO (src) != HARD_FRAME_POINTER_REGNUM && (unsigned) REGNO (src) == cfa.reg) { /* We're storing the current CFA reg into the stack. */ if (cfa.offset == 0) { /* If the source register is exactly the CFA, assume we're saving SP like any other register; this happens on the ARM. */ def_cfa_1 (label, &cfa); queue_reg_save (label, stack_pointer_rtx, NULL_RTX, offset); break; } else { /* Otherwise, we'll need to look in the stack to calculate the CFA. */ rtx x = XEXP (dest, 0); if (!REG_P (x)) x = XEXP (x, 0); if (!REG_P (x)) abort (); cfa.reg = REGNO (x); cfa.base_offset = offset; cfa.indirect = 1; def_cfa_1 (label, &cfa); break; } } def_cfa_1 (label, &cfa); queue_reg_save (label, src, NULL_RTX, offset); break; default: abort (); } } /* Record call frame debugging information for INSN, which either sets SP or FP (adjusting how we calculate the frame address) or saves a register to the stack. If INSN is NULL_RTX, initialize our state. */ void dwarf2out_frame_debug (rtx insn) { const char *label; rtx src; if (insn == NULL_RTX) { size_t i; /* Flush any queued register saves. */ flush_queued_reg_saves (); /* Set up state for generating call frame debug info. */ lookup_cfa (&cfa); if (cfa.reg != (unsigned long) DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM)) abort (); cfa.reg = STACK_POINTER_REGNUM; cfa_store = cfa; cfa_temp.reg = -1; cfa_temp.offset = 0; for (i = 0; i < num_regs_saved_in_regs; i++) { regs_saved_in_regs[i].orig_reg = NULL_RTX; regs_saved_in_regs[i].saved_in_reg = NULL_RTX; } num_regs_saved_in_regs = 0; return; } if (GET_CODE (insn) != INSN || clobbers_queued_reg_save (insn)) flush_queued_reg_saves (); if (! RTX_FRAME_RELATED_P (insn)) { if (!ACCUMULATE_OUTGOING_ARGS) dwarf2out_stack_adjust (insn); return; } label = dwarf2out_cfi_label (); src = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX); if (src) insn = XEXP (src, 0); else insn = PATTERN (insn); dwarf2out_frame_debug_expr (insn, label); } #endif /* Describe for the GTY machinery what parts of dw_cfi_oprnd1 are used. */ static enum dw_cfi_oprnd_type dw_cfi_oprnd1_desc (enum dwarf_call_frame_info cfi); static enum dw_cfi_oprnd_type dw_cfi_oprnd1_desc (enum dwarf_call_frame_info cfi) { switch (cfi) { case DW_CFA_nop: case DW_CFA_GNU_window_save: return dw_cfi_oprnd_unused; case DW_CFA_set_loc: case DW_CFA_advance_loc1: case DW_CFA_advance_loc2: case DW_CFA_advance_loc4: case DW_CFA_MIPS_advance_loc8: return dw_cfi_oprnd_addr; case DW_CFA_offset: case DW_CFA_offset_extended: case DW_CFA_def_cfa: case DW_CFA_offset_extended_sf: case DW_CFA_def_cfa_sf: case DW_CFA_restore_extended: case DW_CFA_undefined: case DW_CFA_same_value: case DW_CFA_def_cfa_register: case DW_CFA_register: return dw_cfi_oprnd_reg_num; case DW_CFA_def_cfa_offset: case DW_CFA_GNU_args_size: case DW_CFA_def_cfa_offset_sf: return dw_cfi_oprnd_offset; case DW_CFA_def_cfa_expression: case DW_CFA_expression: return dw_cfi_oprnd_loc; default: abort (); } } /* Describe for the GTY machinery what parts of dw_cfi_oprnd2 are used. */ static enum dw_cfi_oprnd_type dw_cfi_oprnd2_desc (enum dwarf_call_frame_info cfi); static enum dw_cfi_oprnd_type dw_cfi_oprnd2_desc (enum dwarf_call_frame_info cfi) { switch (cfi) { case DW_CFA_def_cfa: case DW_CFA_def_cfa_sf: case DW_CFA_offset: case DW_CFA_offset_extended_sf: case DW_CFA_offset_extended: return dw_cfi_oprnd_offset; case DW_CFA_register: return dw_cfi_oprnd_reg_num; default: return dw_cfi_oprnd_unused; } } #if defined (DWARF2_DEBUGGING_INFO) || defined (DWARF2_UNWIND_INFO) /* Map register numbers held in the call frame info that gcc has collected using DWARF_FRAME_REGNUM to those that should be output in .debug_frame and .eh_frame. */ #ifndef DWARF2_FRAME_REG_OUT #define DWARF2_FRAME_REG_OUT(REGNO, FOR_EH) (REGNO) #endif /* Output a Call Frame Information opcode and its operand(s). */ static void output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh) { unsigned long r; if (cfi->dw_cfi_opc == DW_CFA_advance_loc) dw2_asm_output_data (1, (cfi->dw_cfi_opc | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)), "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX, cfi->dw_cfi_oprnd1.dw_cfi_offset); else if (cfi->dw_cfi_opc == DW_CFA_offset) { r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh); dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)), "DW_CFA_offset, column 0x%lx", r); dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL); } else if (cfi->dw_cfi_opc == DW_CFA_restore) { r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh); dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)), "DW_CFA_restore, column 0x%lx", r); } else { dw2_asm_output_data (1, cfi->dw_cfi_opc, "%s", dwarf_cfi_name (cfi->dw_cfi_opc)); switch (cfi->dw_cfi_opc) { case DW_CFA_set_loc: if (for_eh) dw2_asm_output_encoded_addr_rtx ( ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0), gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr), NULL); else dw2_asm_output_addr (DWARF2_ADDR_SIZE, cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL); break; case DW_CFA_advance_loc1: dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr, fde->dw_fde_current_label, NULL); fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr; break; case DW_CFA_advance_loc2: dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr, fde->dw_fde_current_label, NULL); fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr; break; case DW_CFA_advance_loc4: dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr, fde->dw_fde_current_label, NULL); fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr; break; case DW_CFA_MIPS_advance_loc8: dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr, fde->dw_fde_current_label, NULL); fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr; break; case DW_CFA_offset_extended: case DW_CFA_def_cfa: r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh); dw2_asm_output_data_uleb128 (r, NULL); dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL); break; case DW_CFA_offset_extended_sf: case DW_CFA_def_cfa_sf: r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh); dw2_asm_output_data_uleb128 (r, NULL); dw2_asm_output_data_sleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL); break; case DW_CFA_restore_extended: case DW_CFA_undefined: case DW_CFA_same_value: case DW_CFA_def_cfa_register: r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh); dw2_asm_output_data_uleb128 (r, NULL); break; case DW_CFA_register: r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh); dw2_asm_output_data_uleb128 (r, NULL); r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh); dw2_asm_output_data_uleb128 (r, NULL); break; case DW_CFA_def_cfa_offset: case DW_CFA_GNU_args_size: dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL); break; case DW_CFA_def_cfa_offset_sf: dw2_asm_output_data_sleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL); break; case DW_CFA_GNU_window_save: break; case DW_CFA_def_cfa_expression: case DW_CFA_expression: output_cfa_loc (cfi); break; case DW_CFA_GNU_negative_offset_extended: /* Obsoleted by DW_CFA_offset_extended_sf. */ abort (); default: break; } } } /* Output the call frame information used to record information that relates to calculating the frame pointer, and records the location of saved registers. */ static void output_call_frame_info (int for_eh) { unsigned int i; dw_fde_ref fde; dw_cfi_ref cfi; char l1[20], l2[20], section_start_label[20]; bool any_lsda_needed = false; char augmentation[6]; int augmentation_size; int fde_encoding = DW_EH_PE_absptr; int per_encoding = DW_EH_PE_absptr; int lsda_encoding = DW_EH_PE_absptr; /* Don't emit a CIE if there won't be any FDEs. */ if (fde_table_in_use == 0) return; /* If we make FDEs linkonce, we may have to emit an empty label for an FDE that wouldn't otherwise be emitted. We want to avoid having an FDE kept around when the function it refers to is discarded. (Example where this matters: a primary function template in C++ requires EH information, but an explicit specialization doesn't. */ if (TARGET_USES_WEAK_UNWIND_INFO && ! flag_asynchronous_unwind_tables && for_eh) for (i = 0; i < fde_table_in_use; i++) if ((fde_table[i].nothrow || fde_table[i].all_throwers_are_sibcalls) && !fde_table[i].uses_eh_lsda && ! DECL_ONE_ONLY (fde_table[i].decl)) targetm.asm_out.unwind_label (asm_out_file, fde_table[i].decl, for_eh, /* empty */ 1); /* If we don't have any functions we'll want to unwind out of, don't emit any EH unwind information. Note that if exceptions aren't enabled, we won't have collected nothrow information, and if we asked for asynchronous tables, we always want this info. */ if (for_eh) { bool any_eh_needed = !flag_exceptions || flag_asynchronous_unwind_tables; for (i = 0; i < fde_table_in_use; i++) if (fde_table[i].uses_eh_lsda) any_eh_needed = any_lsda_needed = true; else if (TARGET_USES_WEAK_UNWIND_INFO && DECL_ONE_ONLY (fde_table[i].decl)) any_eh_needed = 1; else if (! fde_table[i].nothrow && ! fde_table[i].all_throwers_are_sibcalls) any_eh_needed = true; if (! any_eh_needed) return; } /* We're going to be generating comments, so turn on app. */ if (flag_debug_asm) app_enable (); if (for_eh) targetm.asm_out.eh_frame_section (); else named_section_flags (DEBUG_FRAME_SECTION, SECTION_DEBUG); ASM_GENERATE_INTERNAL_LABEL (section_start_label, FRAME_BEGIN_LABEL, for_eh); ASM_OUTPUT_LABEL (asm_out_file, section_start_label); /* Output the CIE. */ ASM_GENERATE_INTERNAL_LABEL (l1, CIE_AFTER_SIZE_LABEL, for_eh); ASM_GENERATE_INTERNAL_LABEL (l2, CIE_END_LABEL, for_eh); dw2_asm_output_delta (for_eh ? 4 : DWARF_OFFSET_SIZE, l2, l1, "Length of Common Information Entry"); ASM_OUTPUT_LABEL (asm_out_file, l1); /* Now that the CIE pointer is PC-relative for EH, use 0 to identify the CIE. */ dw2_asm_output_data ((for_eh ? 4 : DWARF_OFFSET_SIZE), (for_eh ? 0 : DW_CIE_ID), "CIE Identifier Tag"); dw2_asm_output_data (1, DW_CIE_VERSION, "CIE Version"); augmentation[0] = 0; augmentation_size = 0; if (for_eh) { char *p; /* Augmentation: z Indicates that a uleb128 is present to size the augmentation section. L Indicates the encoding (and thus presence) of an LSDA pointer in the FDE augmentation. R Indicates a non-default pointer encoding for FDE code pointers. P Indicates the presence of an encoding + language personality routine in the CIE augmentation. */ fde_encoding = TARGET_USES_WEAK_UNWIND_INFO ? ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2, /*global=*/1) : ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0); per_encoding = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2, /*global=*/1); lsda_encoding = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0, /*global=*/0); p = augmentation + 1; if (eh_personality_libfunc) { *p++ = 'P'; augmentation_size += 1 + size_of_encoded_value (per_encoding); } if (any_lsda_needed) { *p++ = 'L'; augmentation_size += 1; } if (fde_encoding != DW_EH_PE_absptr) { *p++ = 'R'; augmentation_size += 1; } if (p > augmentation + 1) { augmentation[0] = 'z'; *p = '\0'; } /* Ug. Some platforms can't do unaligned dynamic relocations at all. */ if (eh_personality_libfunc && per_encoding == DW_EH_PE_aligned) { int offset = ( 4 /* Length */ + 4 /* CIE Id */ + 1 /* CIE version */ + strlen (augmentation) + 1 /* Augmentation */ + size_of_uleb128 (1) /* Code alignment */ + size_of_sleb128 (DWARF_CIE_DATA_ALIGNMENT) + 1 /* RA column */ + 1 /* Augmentation size */ + 1 /* Personality encoding */ ); int pad = -offset & (PTR_SIZE - 1); augmentation_size += pad; /* Augmentations should be small, so there's scarce need to iterate for a solution. Die if we exceed one uleb128 byte. */ if (size_of_uleb128 (augmentation_size) != 1) abort (); } } dw2_asm_output_nstring (augmentation, -1, "CIE Augmentation"); dw2_asm_output_data_uleb128 (1, "CIE Code Alignment Factor"); dw2_asm_output_data_sleb128 (DWARF_CIE_DATA_ALIGNMENT, "CIE Data Alignment Factor"); if (DW_CIE_VERSION == 1) dw2_asm_output_data (1, DWARF_FRAME_RETURN_COLUMN, "CIE RA Column"); else dw2_asm_output_data_uleb128 (DWARF_FRAME_RETURN_COLUMN, "CIE RA Column"); if (augmentation[0]) { dw2_asm_output_data_uleb128 (augmentation_size, "Augmentation size"); if (eh_personality_libfunc) { dw2_asm_output_data (1, per_encoding, "Personality (%s)", eh_data_format_name (per_encoding)); dw2_asm_output_encoded_addr_rtx (per_encoding, eh_personality_libfunc, NULL); } if (any_lsda_needed) dw2_asm_output_data (1, lsda_encoding, "LSDA Encoding (%s)", eh_data_format_name (lsda_encoding)); if (fde_encoding != DW_EH_PE_absptr) dw2_asm_output_data (1, fde_encoding, "FDE Encoding (%s)", eh_data_format_name (fde_encoding)); } for (cfi = cie_cfi_head; cfi != NULL; cfi = cfi->dw_cfi_next) output_cfi (cfi, NULL, for_eh); /* Pad the CIE out to an address sized boundary. */ ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (for_eh ? PTR_SIZE : DWARF2_ADDR_SIZE)); ASM_OUTPUT_LABEL (asm_out_file, l2); /* Loop through all of the FDE's. */ for (i = 0; i < fde_table_in_use; i++) { fde = &fde_table[i]; /* Don't emit EH unwind info for leaf functions that don't need it. */ if (for_eh && !flag_asynchronous_unwind_tables && flag_exceptions && (fde->nothrow || fde->all_throwers_are_sibcalls) && (! TARGET_USES_WEAK_UNWIND_INFO || ! DECL_ONE_ONLY (fde->decl)) && !fde->uses_eh_lsda) continue; targetm.asm_out.unwind_label (asm_out_file, fde->decl, for_eh, /* empty */ 0); targetm.asm_out.internal_label (asm_out_file, FDE_LABEL, for_eh + i * 2); ASM_GENERATE_INTERNAL_LABEL (l1, FDE_AFTER_SIZE_LABEL, for_eh + i * 2); ASM_GENERATE_INTERNAL_LABEL (l2, FDE_END_LABEL, for_eh + i * 2); dw2_asm_output_delta (for_eh ? 4 : DWARF_OFFSET_SIZE, l2, l1, "FDE Length"); ASM_OUTPUT_LABEL (asm_out_file, l1); if (for_eh) dw2_asm_output_delta (4, l1, section_start_label, "FDE CIE offset"); else dw2_asm_output_offset (DWARF_OFFSET_SIZE, section_start_label, "FDE CIE offset"); if (for_eh) { if (TARGET_USES_WEAK_UNWIND_INFO && DECL_ONE_ONLY (fde->decl)) dw2_asm_output_encoded_addr_rtx (fde_encoding, gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (fde->decl))), "FDE initial location"); else dw2_asm_output_encoded_addr_rtx (fde_encoding, gen_rtx_SYMBOL_REF (Pmode, fde->dw_fde_begin), "FDE initial location"); dw2_asm_output_delta (size_of_encoded_value (fde_encoding), fde->dw_fde_end, fde->dw_fde_begin, "FDE address range"); } else { dw2_asm_output_addr (DWARF2_ADDR_SIZE, fde->dw_fde_begin, "FDE initial location"); dw2_asm_output_delta (DWARF2_ADDR_SIZE, fde->dw_fde_end, fde->dw_fde_begin, "FDE address range"); } if (augmentation[0]) { if (any_lsda_needed) { int size = size_of_encoded_value (lsda_encoding); if (lsda_encoding == DW_EH_PE_aligned) { int offset = ( 4 /* Length */ + 4 /* CIE offset */ + 2 * size_of_encoded_value (fde_encoding) + 1 /* Augmentation size */ ); int pad = -offset & (PTR_SIZE - 1); size += pad; if (size_of_uleb128 (size) != 1) abort (); } dw2_asm_output_data_uleb128 (size, "Augmentation size"); if (fde->uses_eh_lsda) { ASM_GENERATE_INTERNAL_LABEL (l1, "LLSDA", fde->funcdef_number); dw2_asm_output_encoded_addr_rtx ( lsda_encoding, gen_rtx_SYMBOL_REF (Pmode, l1), "Language Specific Data Area"); } else { if (lsda_encoding == DW_EH_PE_aligned) ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (PTR_SIZE)); dw2_asm_output_data (size_of_encoded_value (lsda_encoding), 0, "Language Specific Data Area (none)"); } } else dw2_asm_output_data_uleb128 (0, "Augmentation size"); } /* Loop through the Call Frame Instructions associated with this FDE. */ fde->dw_fde_current_label = fde->dw_fde_begin; for (cfi = fde->dw_fde_cfi; cfi != NULL; cfi = cfi->dw_cfi_next) output_cfi (cfi, fde, for_eh); /* Pad the FDE out to an address sized boundary. */ ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 ((for_eh ? PTR_SIZE : DWARF2_ADDR_SIZE))); ASM_OUTPUT_LABEL (asm_out_file, l2); } if (for_eh && targetm.terminate_dw2_eh_frame_info) dw2_asm_output_data (4, 0, "End of Table"); #ifdef MIPS_DEBUGGING_INFO /* Work around Irix 6 assembler bug whereby labels at the end of a section get a value of 0. Putting .align 0 after the label fixes it. */ ASM_OUTPUT_ALIGN (asm_out_file, 0); #endif /* Turn off app to make assembly quicker. */ if (flag_debug_asm) app_disable (); } /* Output a marker (i.e. a label) for the beginning of a function, before the prologue. */ void dwarf2out_begin_prologue (unsigned int line ATTRIBUTE_UNUSED, const char *file ATTRIBUTE_UNUSED) { char label[MAX_ARTIFICIAL_LABEL_BYTES]; dw_fde_ref fde; current_function_func_begin_label = 0; #ifdef IA64_UNWIND_INFO /* ??? current_function_func_begin_label is also used by except.c for call-site information. We must emit this label if it might be used. */ if ((! flag_exceptions || USING_SJLJ_EXCEPTIONS) && ! dwarf2out_do_frame ()) return; #else if (! dwarf2out_do_frame ()) return; #endif function_section (current_function_decl); ASM_GENERATE_INTERNAL_LABEL (label, FUNC_BEGIN_LABEL, current_function_funcdef_no); ASM_OUTPUT_DEBUG_LABEL (asm_out_file, FUNC_BEGIN_LABEL, current_function_funcdef_no); current_function_func_begin_label = get_identifier (label); #ifdef IA64_UNWIND_INFO /* We can elide the fde allocation if we're not emitting debug info. */ if (! dwarf2out_do_frame ()) return; #endif /* Expand the fde table if necessary. */ if (fde_table_in_use == fde_table_allocated) { fde_table_allocated += FDE_TABLE_INCREMENT; fde_table = ggc_realloc (fde_table, fde_table_allocated * sizeof (dw_fde_node)); memset (fde_table + fde_table_in_use, 0, FDE_TABLE_INCREMENT * sizeof (dw_fde_node)); } /* Record the FDE associated with this function. */ current_funcdef_fde = fde_table_in_use; /* Add the new FDE at the end of the fde_table. */ fde = &fde_table[fde_table_in_use++]; fde->decl = current_function_decl; fde->dw_fde_begin = xstrdup (label); fde->dw_fde_current_label = NULL; fde->dw_fde_end = NULL; fde->dw_fde_cfi = NULL; fde->funcdef_number = current_function_funcdef_no; fde->nothrow = current_function_nothrow; fde->uses_eh_lsda = cfun->uses_eh_lsda; fde->all_throwers_are_sibcalls = cfun->all_throwers_are_sibcalls; args_size = old_args_size = 0; /* We only want to output line number information for the genuine dwarf2 prologue case, not the eh frame case. */ #ifdef DWARF2_DEBUGGING_INFO if (file) dwarf2out_source_line (line, file); #endif } /* Output a marker (i.e. a label) for the absolute end of the generated code for a function definition. This gets called *after* the epilogue code has been generated. */ void dwarf2out_end_epilogue (unsigned int line ATTRIBUTE_UNUSED, const char *file ATTRIBUTE_UNUSED) { dw_fde_ref fde; char label[MAX_ARTIFICIAL_LABEL_BYTES]; /* Output a label to mark the endpoint of the code generated for this function. */ ASM_GENERATE_INTERNAL_LABEL (label, FUNC_END_LABEL, current_function_funcdef_no); ASM_OUTPUT_LABEL (asm_out_file, label); fde = &fde_table[fde_table_in_use - 1]; fde->dw_fde_end = xstrdup (label); } void dwarf2out_frame_init (void) { /* Allocate the initial hunk of the fde_table. */ fde_table = ggc_alloc_cleared (FDE_TABLE_INCREMENT * sizeof (dw_fde_node)); fde_table_allocated = FDE_TABLE_INCREMENT; fde_table_in_use = 0; /* Generate the CFA instructions common to all FDE's. Do it now for the sake of lookup_cfa. */ #ifdef DWARF2_UNWIND_INFO /* On entry, the Canonical Frame Address is at SP. */ dwarf2out_def_cfa (NULL, STACK_POINTER_REGNUM, INCOMING_FRAME_SP_OFFSET); initial_return_save (INCOMING_RETURN_ADDR_RTX); #endif } void dwarf2out_frame_finish (void) { /* Output call frame information. */ if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG) output_call_frame_info (0); if (! USING_SJLJ_EXCEPTIONS && (flag_unwind_tables || flag_exceptions)) output_call_frame_info (1); } #endif /* And now, the subset of the debugging information support code necessary for emitting location expressions. */ /* We need some way to distinguish DW_OP_addr with a direct symbol relocation from DW_OP_addr with a dtp-relative symbol relocation. */ #define INTERNAL_DW_OP_tls_addr (0x100 + DW_OP_addr) typedef struct dw_val_struct *dw_val_ref; typedef struct die_struct *dw_die_ref; typedef struct dw_loc_descr_struct *dw_loc_descr_ref; typedef struct dw_loc_list_struct *dw_loc_list_ref; /* Each DIE may have a series of attribute/value pairs. Values can take on several forms. The forms that are used in this implementation are listed below. */ enum dw_val_class { dw_val_class_addr, dw_val_class_offset, dw_val_class_loc, dw_val_class_loc_list, dw_val_class_range_list, dw_val_class_const, dw_val_class_unsigned_const, dw_val_class_long_long, dw_val_class_vec, dw_val_class_flag, dw_val_class_die_ref, dw_val_class_fde_ref, dw_val_class_lbl_id, dw_val_class_lbl_offset, dw_val_class_str }; /* Describe a double word constant value. */ /* ??? Every instance of long_long in the code really means CONST_DOUBLE. */ typedef struct dw_long_long_struct GTY(()) { unsigned long hi; unsigned long low; } dw_long_long_const; /* Describe a floating point constant value, or a vector constant value. */ typedef struct dw_vec_struct GTY(()) { unsigned char * GTY((length ("%h.length"))) array; unsigned length; unsigned elt_size; } dw_vec_const; /* The dw_val_node describes an attribute's value, as it is represented internally. */ typedef struct dw_val_struct GTY(()) { enum dw_val_class val_class; union dw_val_struct_union { rtx GTY ((tag ("dw_val_class_addr"))) val_addr; unsigned HOST_WIDE_INT GTY ((tag ("dw_val_class_offset"))) val_offset; dw_loc_list_ref GTY ((tag ("dw_val_class_loc_list"))) val_loc_list; dw_loc_descr_ref GTY ((tag ("dw_val_class_loc"))) val_loc; HOST_WIDE_INT GTY ((default)) val_int; unsigned HOST_WIDE_INT GTY ((tag ("dw_val_class_unsigned_const"))) val_unsigned; dw_long_long_const GTY ((tag ("dw_val_class_long_long"))) val_long_long; dw_vec_const GTY ((tag ("dw_val_class_vec"))) val_vec; struct dw_val_die_union { dw_die_ref die; int external; } GTY ((tag ("dw_val_class_die_ref"))) val_die_ref; unsigned GTY ((tag ("dw_val_class_fde_ref"))) val_fde_index; struct indirect_string_node * GTY ((tag ("dw_val_class_str"))) val_str; char * GTY ((tag ("dw_val_class_lbl_id"))) val_lbl_id; unsigned char GTY ((tag ("dw_val_class_flag"))) val_flag; } GTY ((desc ("%1.val_class"))) v; } dw_val_node; /* Locations in memory are described using a sequence of stack machine operations. */ typedef struct dw_loc_descr_struct GTY(()) { dw_loc_descr_ref dw_loc_next; enum dwarf_location_atom dw_loc_opc; dw_val_node dw_loc_oprnd1; dw_val_node dw_loc_oprnd2; int dw_loc_addr; } dw_loc_descr_node; /* Location lists are ranges + location descriptions for that range, so you can track variables that are in different places over their entire life. */ typedef struct dw_loc_list_struct GTY(()) { dw_loc_list_ref dw_loc_next; const char *begin; /* Label for begin address of range */ const char *end; /* Label for end address of range */ char *ll_symbol; /* Label for beginning of location list. Only on head of list */ const char *section; /* Section this loclist is relative to */ dw_loc_descr_ref expr; } dw_loc_list_node; #if defined (DWARF2_DEBUGGING_INFO) || defined (DWARF2_UNWIND_INFO) static const char *dwarf_stack_op_name (unsigned); static dw_loc_descr_ref new_loc_descr (enum dwarf_location_atom, unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT); static void add_loc_descr (dw_loc_descr_ref *, dw_loc_descr_ref); static unsigned long size_of_loc_descr (dw_loc_descr_ref); static unsigned long size_of_locs (dw_loc_descr_ref); static void output_loc_operands (dw_loc_descr_ref); static void output_loc_sequence (dw_loc_descr_ref); /* Convert a DWARF stack opcode into its string name. */ static const char * dwarf_stack_op_name (unsigned int op) { switch (op) { case DW_OP_addr: case INTERNAL_DW_OP_tls_addr: return "DW_OP_addr"; case DW_OP_deref: return "DW_OP_deref"; case DW_OP_const1u: return "DW_OP_const1u"; case DW_OP_const1s: return "DW_OP_const1s"; case DW_OP_const2u: return "DW_OP_const2u"; case DW_OP_const2s: return "DW_OP_const2s"; case DW_OP_const4u: return "DW_OP_const4u"; case DW_OP_const4s: return "DW_OP_const4s"; case DW_OP_const8u: return "DW_OP_const8u"; case DW_OP_const8s: return "DW_OP_const8s"; case DW_OP_constu: return "DW_OP_constu"; case DW_OP_consts: return "DW_OP_consts"; case DW_OP_dup: return "DW_OP_dup"; case DW_OP_drop: return "DW_OP_drop"; case DW_OP_over: return "DW_OP_over"; case DW_OP_pick: return "DW_OP_pick"; case DW_OP_swap: return "DW_OP_swap"; case DW_OP_rot: return "DW_OP_rot"; case DW_OP_xderef: return "DW_OP_xderef"; case DW_OP_abs: return "DW_OP_abs"; case DW_OP_and: return "DW_OP_and"; case DW_OP_div: return "DW_OP_div"; case DW_OP_minus: return "DW_OP_minus"; case DW_OP_mod: return "DW_OP_mod"; case DW_OP_mul: return "DW_OP_mul"; case DW_OP_neg: return "DW_OP_neg"; case DW_OP_not: return "DW_OP_not"; case DW_OP_or: return "DW_OP_or"; case DW_OP_plus: return "DW_OP_plus"; case DW_OP_plus_uconst: return "DW_OP_plus_uconst"; case DW_OP_shl: return "DW_OP_shl"; case DW_OP_shr: return "DW_OP_shr"; case DW_OP_shra: return "DW_OP_shra"; case DW_OP_xor: return "DW_OP_xor"; case DW_OP_bra: return "DW_OP_bra"; case DW_OP_eq: return "DW_OP_eq"; case DW_OP_ge: return "DW_OP_ge"; case DW_OP_gt: return "DW_OP_gt"; case DW_OP_le: return "DW_OP_le"; case DW_OP_lt: return "DW_OP_lt"; case DW_OP_ne: return "DW_OP_ne"; case DW_OP_skip: return "DW_OP_skip"; case DW_OP_lit0: return "DW_OP_lit0"; case DW_OP_lit1: return "DW_OP_lit1"; case DW_OP_lit2: return "DW_OP_lit2"; case DW_OP_lit3: return "DW_OP_lit3"; case DW_OP_lit4: return "DW_OP_lit4"; case DW_OP_lit5: return "DW_OP_lit5"; case DW_OP_lit6: return "DW_OP_lit6"; case DW_OP_lit7: return "DW_OP_lit7"; case DW_OP_lit8: return "DW_OP_lit8"; case DW_OP_lit9: return "DW_OP_lit9"; case DW_OP_lit10: return "DW_OP_lit10"; case DW_OP_lit11: return "DW_OP_lit11"; case DW_OP_lit12: return "DW_OP_lit12"; case DW_OP_lit13: return "DW_OP_lit13"; case DW_OP_lit14: return "DW_OP_lit14"; case DW_OP_lit15: return "DW_OP_lit15"; case DW_OP_lit16: return "DW_OP_lit16"; case DW_OP_lit17: return "DW_OP_lit17"; case DW_OP_lit18: return "DW_OP_lit18"; case DW_OP_lit19: return "DW_OP_lit19"; case DW_OP_lit20: return "DW_OP_lit20"; case DW_OP_lit21: return "DW_OP_lit21"; case DW_OP_lit22: return "DW_OP_lit22"; case DW_OP_lit23: return "DW_OP_lit23"; case DW_OP_lit24: return "DW_OP_lit24"; case DW_OP_lit25: return "DW_OP_lit25"; case DW_OP_lit26: return "DW_OP_lit26"; case DW_OP_lit27: return "DW_OP_lit27"; case DW_OP_lit28: return "DW_OP_lit28"; case DW_OP_lit29: return "DW_OP_lit29"; case DW_OP_lit30: return "DW_OP_lit30"; case DW_OP_lit31: return "DW_OP_lit31"; case DW_OP_reg0: return "DW_OP_reg0"; case DW_OP_reg1: return "DW_OP_reg1"; case DW_OP_reg2: return "DW_OP_reg2"; case DW_OP_reg3: return "DW_OP_reg3"; case DW_OP_reg4: return "DW_OP_reg4"; case DW_OP_reg5: return "DW_OP_reg5"; case DW_OP_reg6: return "DW_OP_reg6"; case DW_OP_reg7: return "DW_OP_reg7"; case DW_OP_reg8: return "DW_OP_reg8"; case DW_OP_reg9: return "DW_OP_reg9"; case DW_OP_reg10: return "DW_OP_reg10"; case DW_OP_reg11: return "DW_OP_reg11"; case DW_OP_reg12: return "DW_OP_reg12"; case DW_OP_reg13: return "DW_OP_reg13"; case DW_OP_reg14: return "DW_OP_reg14"; case DW_OP_reg15: return "DW_OP_reg15"; case DW_OP_reg16: return "DW_OP_reg16"; case DW_OP_reg17: return "DW_OP_reg17"; case DW_OP_reg18: return "DW_OP_reg18"; case DW_OP_reg19: return "DW_OP_reg19"; case DW_OP_reg20: return "DW_OP_reg20"; case DW_OP_reg21: return "DW_OP_reg21"; case DW_OP_reg22: return "DW_OP_reg22"; case DW_OP_reg23: return "DW_OP_reg23"; case DW_OP_reg24: return "DW_OP_reg24"; case DW_OP_reg25: return "DW_OP_reg25"; case DW_OP_reg26: return "DW_OP_reg26"; case DW_OP_reg27: return "DW_OP_reg27"; case DW_OP_reg28: return "DW_OP_reg28"; case DW_OP_reg29: return "DW_OP_reg29"; case DW_OP_reg30: return "DW_OP_reg30"; case DW_OP_reg31: return "DW_OP_reg31"; case DW_OP_breg0: return "DW_OP_breg0"; case DW_OP_breg1: return "DW_OP_breg1"; case DW_OP_breg2: return "DW_OP_breg2"; case DW_OP_breg3: return "DW_OP_breg3"; case DW_OP_breg4: return "DW_OP_breg4"; case DW_OP_breg5: return "DW_OP_breg5"; case DW_OP_breg6: return "DW_OP_breg6"; case DW_OP_breg7: return "DW_OP_breg7"; case DW_OP_breg8: return "DW_OP_breg8"; case DW_OP_breg9: return "DW_OP_breg9"; case DW_OP_breg10: return "DW_OP_breg10"; case DW_OP_breg11: return "DW_OP_breg11"; case DW_OP_breg12: return "DW_OP_breg12"; case DW_OP_breg13: return "DW_OP_breg13"; case DW_OP_breg14: return "DW_OP_breg14"; case DW_OP_breg15: return "DW_OP_breg15"; case DW_OP_breg16: return "DW_OP_breg16"; case DW_OP_breg17: return "DW_OP_breg17"; case DW_OP_breg18: return "DW_OP_breg18"; case DW_OP_breg19: return "DW_OP_breg19"; case DW_OP_breg20: return "DW_OP_breg20"; case DW_OP_breg21: return "DW_OP_breg21"; case DW_OP_breg22: return "DW_OP_breg22"; case DW_OP_breg23: return "DW_OP_breg23"; case DW_OP_breg24: return "DW_OP_breg24"; case DW_OP_breg25: return "DW_OP_breg25"; case DW_OP_breg26: return "DW_OP_breg26"; case DW_OP_breg27: return "DW_OP_breg27"; case DW_OP_breg28: return "DW_OP_breg28"; case DW_OP_breg29: return "DW_OP_breg29"; case DW_OP_breg30: return "DW_OP_breg30"; case DW_OP_breg31: return "DW_OP_breg31"; case DW_OP_regx: return "DW_OP_regx"; case DW_OP_fbreg: return "DW_OP_fbreg"; case DW_OP_bregx: return "DW_OP_bregx"; case DW_OP_piece: return "DW_OP_piece"; case DW_OP_deref_size: return "DW_OP_deref_size"; case DW_OP_xderef_size: return "DW_OP_xderef_size"; case DW_OP_nop: return "DW_OP_nop"; case DW_OP_push_object_address: return "DW_OP_push_object_address"; case DW_OP_call2: return "DW_OP_call2"; case DW_OP_call4: return "DW_OP_call4"; case DW_OP_call_ref: return "DW_OP_call_ref"; case DW_OP_GNU_push_tls_address: return "DW_OP_GNU_push_tls_address"; default: return "OP_"; } } /* Return a pointer to a newly allocated location description. Location descriptions are simple expression terms that can be strung together to form more complicated location (address) descriptions. */ static inline dw_loc_descr_ref new_loc_descr (enum dwarf_location_atom op, unsigned HOST_WIDE_INT oprnd1, unsigned HOST_WIDE_INT oprnd2) { dw_loc_descr_ref descr = ggc_alloc_cleared (sizeof (dw_loc_descr_node)); descr->dw_loc_opc = op; descr->dw_loc_oprnd1.val_class = dw_val_class_unsigned_const; descr->dw_loc_oprnd1.v.val_unsigned = oprnd1; descr->dw_loc_oprnd2.val_class = dw_val_class_unsigned_const; descr->dw_loc_oprnd2.v.val_unsigned = oprnd2; return descr; } /* Add a location description term to a location description expression. */ static inline void add_loc_descr (dw_loc_descr_ref *list_head, dw_loc_descr_ref descr) { dw_loc_descr_ref *d; /* Find the end of the chain. */ for (d = list_head; (*d) != NULL; d = &(*d)->dw_loc_next) ; *d = descr; } /* Return the size of a location descriptor. */ static unsigned long size_of_loc_descr (dw_loc_descr_ref loc) { unsigned long size = 1; switch (loc->dw_loc_opc) { case DW_OP_addr: case INTERNAL_DW_OP_tls_addr: size += DWARF2_ADDR_SIZE; break; case DW_OP_const1u: case DW_OP_const1s: size += 1; break; case DW_OP_const2u: case DW_OP_const2s: size += 2; break; case DW_OP_const4u: case DW_OP_const4s: size += 4; break; case DW_OP_const8u: case DW_OP_const8s: size += 8; break; case DW_OP_constu: size += size_of_uleb128 (loc->dw_loc_oprnd1.v.val_unsigned); break; case DW_OP_consts: size += size_of_sleb128 (loc->dw_loc_oprnd1.v.val_int); break; case DW_OP_pick: size += 1; break; case DW_OP_plus_uconst: size += size_of_uleb128 (loc->dw_loc_oprnd1.v.val_unsigned); break; case DW_OP_skip: case DW_OP_bra: size += 2; break; case DW_OP_breg0: case DW_OP_breg1: case DW_OP_breg2: case DW_OP_breg3: case DW_OP_breg4: case DW_OP_breg5: case DW_OP_breg6: case DW_OP_breg7: case DW_OP_breg8: case DW_OP_breg9: case DW_OP_breg10: case DW_OP_breg11: case DW_OP_breg12: case DW_OP_breg13: case DW_OP_breg14: case DW_OP_breg15: case DW_OP_breg16: case DW_OP_breg17: case DW_OP_breg18: case DW_OP_breg19: case DW_OP_breg20: case DW_OP_breg21: case DW_OP_breg22: case DW_OP_breg23: case DW_OP_breg24: case DW_OP_breg25: case DW_OP_breg26: case DW_OP_breg27: case DW_OP_breg28: case DW_OP_breg29: case DW_OP_breg30: case DW_OP_breg31: size += size_of_sleb128 (loc->dw_loc_oprnd1.v.val_int); break; case DW_OP_regx: size += size_of_uleb128 (loc->dw_loc_oprnd1.v.val_unsigned); break; case DW_OP_fbreg: size += size_of_sleb128 (loc->dw_loc_oprnd1.v.val_int); break; case DW_OP_bregx: size += size_of_uleb128 (loc->dw_loc_oprnd1.v.val_unsigned); size += size_of_sleb128 (loc->dw_loc_oprnd2.v.val_int); break; case DW_OP_piece: size += size_of_uleb128 (loc->dw_loc_oprnd1.v.val_unsigned); break; case DW_OP_deref_size: case DW_OP_xderef_size: size += 1; break; case DW_OP_call2: size += 2; break; case DW_OP_call4: size += 4; break; case DW_OP_call_ref: size += DWARF2_ADDR_SIZE; break; default: break; } return size; } /* Return the size of a series of location descriptors. */ static unsigned long size_of_locs (dw_loc_descr_ref loc) { unsigned long size; for (size = 0; loc != NULL; loc = loc->dw_loc_next) { loc->dw_loc_addr = size; size += size_of_loc_descr (loc); } return size; } /* Output location description stack opcode's operands (if any). */ static void output_loc_operands (dw_loc_descr_ref loc) { dw_val_ref val1 = &loc->dw_loc_oprnd1; dw_val_ref val2 = &loc->dw_loc_oprnd2; switch (loc->dw_loc_opc) { #ifdef DWARF2_DEBUGGING_INFO case DW_OP_addr: dw2_asm_output_addr_rtx (DWARF2_ADDR_SIZE, val1->v.val_addr, NULL); break; case DW_OP_const2u: case DW_OP_const2s: dw2_asm_output_data (2, val1->v.val_int, NULL); break; case DW_OP_const4u: case DW_OP_const4s: dw2_asm_output_data (4, val1->v.val_int, NULL); break; case DW_OP_const8u: case DW_OP_const8s: if (HOST_BITS_PER_LONG < 64) abort (); dw2_asm_output_data (8, val1->v.val_int, NULL); break; case DW_OP_skip: case DW_OP_bra: { int offset; if (val1->val_class == dw_val_class_loc) offset = val1->v.val_loc->dw_loc_addr - (loc->dw_loc_addr + 3); else abort (); dw2_asm_output_data (2, offset, NULL); } break; #else case DW_OP_addr: case DW_OP_const2u: case DW_OP_const2s: case DW_OP_const4u: case DW_OP_const4s: case DW_OP_const8u: case DW_OP_const8s: case DW_OP_skip: case DW_OP_bra: /* We currently don't make any attempt to make sure these are aligned properly like we do for the main unwind info, so don't support emitting things larger than a byte if we're only doing unwinding. */ abort (); #endif case DW_OP_const1u: case DW_OP_const1s: dw2_asm_output_data (1, val1->v.val_int, NULL); break; case DW_OP_constu: dw2_asm_output_data_uleb128 (val1->v.val_unsigned, NULL); break; case DW_OP_consts: dw2_asm_output_data_sleb128 (val1->v.val_int, NULL); break; case DW_OP_pick: dw2_asm_output_data (1, val1->v.val_int, NULL); break; case DW_OP_plus_uconst: dw2_asm_output_data_uleb128 (val1->v.val_unsigned, NULL); break; case DW_OP_breg0: case DW_OP_breg1: case DW_OP_breg2: case DW_OP_breg3: case DW_OP_breg4: case DW_OP_breg5: case DW_OP_breg6: case DW_OP_breg7: case DW_OP_breg8: case DW_OP_breg9: case DW_OP_breg10: case DW_OP_breg11: case DW_OP_breg12: case DW_OP_breg13: case DW_OP_breg14: case DW_OP_breg15: case DW_OP_breg16: case DW_OP_breg17: case DW_OP_breg18: case DW_OP_breg19: case DW_OP_breg20: case DW_OP_breg21: case DW_OP_breg22: case DW_OP_breg23: case DW_OP_breg24: case DW_OP_breg25: case DW_OP_breg26: case DW_OP_breg27: case DW_OP_breg28: case DW_OP_breg29: case DW_OP_breg30: case DW_OP_breg31: dw2_asm_output_data_sleb128 (val1->v.val_int, NULL); break; case DW_OP_regx: dw2_asm_output_data_uleb128 (val1->v.val_unsigned, NULL); break; case DW_OP_fbreg: dw2_asm_output_data_sleb128 (val1->v.val_int, NULL); break; case DW_OP_bregx: dw2_asm_output_data_uleb128 (val1->v.val_unsigned, NULL); dw2_asm_output_data_sleb128 (val2->v.val_int, NULL); break; case DW_OP_piece: dw2_asm_output_data_uleb128 (val1->v.val_unsigned, NULL); break; case DW_OP_deref_size: case DW_OP_xderef_size: dw2_asm_output_data (1, val1->v.val_int, NULL); break; case INTERNAL_DW_OP_tls_addr: #ifdef ASM_OUTPUT_DWARF_DTPREL ASM_OUTPUT_DWARF_DTPREL (asm_out_file, DWARF2_ADDR_SIZE, val1->v.val_addr); fputc ('\n', asm_out_file); #else abort (); #endif break; default: /* Other codes have no operands. */ break; } } /* Output a sequence of location operations. */ static void output_loc_sequence (dw_loc_descr_ref loc) { for (; loc != NULL; loc = loc->dw_loc_next) { /* Output the opcode. */ dw2_asm_output_data (1, loc->dw_loc_opc, "%s", dwarf_stack_op_name (loc->dw_loc_opc)); /* Output the operand(s) (if any). */ output_loc_operands (loc); } } /* This routine will generate the correct assembly data for a location description based on a cfi entry with a complex address. */ static void output_cfa_loc (dw_cfi_ref cfi) { dw_loc_descr_ref loc; unsigned long size; /* Output the size of the block. */ loc = cfi->dw_cfi_oprnd1.dw_cfi_loc; size = size_of_locs (loc); dw2_asm_output_data_uleb128 (size, NULL); /* Now output the operations themselves. */ output_loc_sequence (loc); } /* This function builds a dwarf location descriptor sequence from a dw_cfa_location. */ static struct dw_loc_descr_struct * build_cfa_loc (dw_cfa_location *cfa) { struct dw_loc_descr_struct *head, *tmp; if (cfa->indirect == 0) abort (); if (cfa->base_offset) { if (cfa->reg <= 31) head = new_loc_descr (DW_OP_breg0 + cfa->reg, cfa->base_offset, 0); else head = new_loc_descr (DW_OP_bregx, cfa->reg, cfa->base_offset); } else if (cfa->reg <= 31) head = new_loc_descr (DW_OP_reg0 + cfa->reg, 0, 0); else head = new_loc_descr (DW_OP_regx, cfa->reg, 0); head->dw_loc_oprnd1.val_class = dw_val_class_const; tmp = new_loc_descr (DW_OP_deref, 0, 0); add_loc_descr (&head, tmp); if (cfa->offset != 0) { tmp = new_loc_descr (DW_OP_plus_uconst, cfa->offset, 0); add_loc_descr (&head, tmp); } return head; } /* This function fills in aa dw_cfa_location structure from a dwarf location descriptor sequence. */ static void get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_struct *loc) { struct dw_loc_descr_struct *ptr; cfa->offset = 0; cfa->base_offset = 0; cfa->indirect = 0; cfa->reg = -1; for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next) { enum dwarf_location_atom op = ptr->dw_loc_opc; switch (op) { case DW_OP_reg0: case DW_OP_reg1: case DW_OP_reg2: case DW_OP_reg3: case DW_OP_reg4: case DW_OP_reg5: case DW_OP_reg6: case DW_OP_reg7: case DW_OP_reg8: case DW_OP_reg9: case DW_OP_reg10: case DW_OP_reg11: case DW_OP_reg12: case DW_OP_reg13: case DW_OP_reg14: case DW_OP_reg15: case DW_OP_reg16: case DW_OP_reg17: case DW_OP_reg18: case DW_OP_reg19: case DW_OP_reg20: case DW_OP_reg21: case DW_OP_reg22: case DW_OP_reg23: case DW_OP_reg24: case DW_OP_reg25: case DW_OP_reg26: case DW_OP_reg27: case DW_OP_reg28: case DW_OP_reg29: case DW_OP_reg30: case DW_OP_reg31: cfa->reg = op - DW_OP_reg0; break; case DW_OP_regx: cfa->reg = ptr->dw_loc_oprnd1.v.val_int; break; case DW_OP_breg0: case DW_OP_breg1: case DW_OP_breg2: case DW_OP_breg3: case DW_OP_breg4: case DW_OP_breg5: case DW_OP_breg6: case DW_OP_breg7: case DW_OP_breg8: case DW_OP_breg9: case DW_OP_breg10: case DW_OP_breg11: case DW_OP_breg12: case DW_OP_breg13: case DW_OP_breg14: case DW_OP_breg15: case DW_OP_breg16: case DW_OP_breg17: case DW_OP_breg18: case DW_OP_breg19: case DW_OP_breg20: case DW_OP_breg21: case DW_OP_breg22: case DW_OP_breg23: case DW_OP_breg24: case DW_OP_breg25: case DW_OP_breg26: case DW_OP_breg27: case DW_OP_breg28: case DW_OP_breg29: case DW_OP_breg30: case DW_OP_breg31: cfa->reg = op - DW_OP_breg0; cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int; break; case DW_OP_bregx: cfa->reg = ptr->dw_loc_oprnd1.v.val_int; cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int; break; case DW_OP_deref: cfa->indirect = 1; break; case DW_OP_plus_uconst: cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned; break; default: internal_error ("DW_LOC_OP %s not implemented\n", dwarf_stack_op_name (ptr->dw_loc_opc)); } } } #endif /* .debug_frame support */ /* And now, the support for symbolic debugging information. */ #ifdef DWARF2_DEBUGGING_INFO /* .debug_str support. */ static int output_indirect_string (void **, void *); static void dwarf2out_init (const char *); static void dwarf2out_finish (const char *); static void dwarf2out_define (unsigned int, const char *); static void dwarf2out_undef (unsigned int, const char *); static void dwarf2out_start_source_file (unsigned, const char *); static void dwarf2out_end_source_file (unsigned); static void dwarf2out_begin_block (unsigned, unsigned); static void dwarf2out_end_block (unsigned, unsigned); static bool dwarf2out_ignore_block (tree); static void dwarf2out_global_decl (tree); static void dwarf2out_type_decl (tree, int); static void dwarf2out_imported_module_or_decl (tree, tree); static void dwarf2out_abstract_function (tree); static void dwarf2out_var_location (rtx); static void dwarf2out_begin_function (tree); /* The debug hooks structure. */ const struct gcc_debug_hooks dwarf2_debug_hooks = { dwarf2out_init, dwarf2out_finish, dwarf2out_define, dwarf2out_undef, dwarf2out_start_source_file, dwarf2out_end_source_file, dwarf2out_begin_block, dwarf2out_end_block, dwarf2out_ignore_block, dwarf2out_source_line, dwarf2out_begin_prologue, debug_nothing_int_charstar, /* end_prologue */ dwarf2out_end_epilogue, dwarf2out_begin_function, debug_nothing_int, /* end_function */ dwarf2out_decl, /* function_decl */ dwarf2out_global_decl, dwarf2out_type_decl, /* type_decl */ dwarf2out_imported_module_or_decl, debug_nothing_tree, /* deferred_inline_function */ /* The DWARF 2 backend tries to reduce debugging bloat by not emitting the abstract description of inline functions until something tries to reference them. */ dwarf2out_abstract_function, /* outlining_inline_function */ debug_nothing_rtx, /* label */ debug_nothing_int, /* handle_pch */ dwarf2out_var_location }; #endif /* NOTE: In the comments in this file, many references are made to "Debugging Information Entries". This term is abbreviated as `DIE' throughout the remainder of this file. */ /* An internal representation of the DWARF output is built, and then walked to generate the DWARF debugging info. The walk of the internal representation is done after the entire program has been compiled. The types below are used to describe the internal representation. */ /* Various DIE's use offsets relative to the beginning of the .debug_info section to refer to each other. */ typedef long int dw_offset; /* Define typedefs here to avoid circular dependencies. */ typedef struct dw_attr_struct *dw_attr_ref; typedef struct dw_line_info_struct *dw_line_info_ref; typedef struct dw_separate_line_info_struct *dw_separate_line_info_ref; typedef struct pubname_struct *pubname_ref; typedef struct dw_ranges_struct *dw_ranges_ref; /* Each entry in the line_info_table maintains the file and line number associated with the label generated for that entry. The label gives the PC value associated with the line number entry. */ typedef struct dw_line_info_struct GTY(()) { unsigned long dw_file_num; unsigned long dw_line_num; } dw_line_info_entry; /* Line information for functions in separate sections; each one gets its own sequence. */ typedef struct dw_separate_line_info_struct GTY(()) { unsigned long dw_file_num; unsigned long dw_line_num; unsigned long function; } dw_separate_line_info_entry; /* Each DIE attribute has a field specifying the attribute kind, a link to the next attribute in the chain, and an attribute value. Attributes are typically linked below the DIE they modify. */ typedef struct dw_attr_struct GTY(()) { enum dwarf_attribute dw_attr; dw_attr_ref dw_attr_next; dw_val_node dw_attr_val; } dw_attr_node; /* The Debugging Information Entry (DIE) structure */ typedef struct die_struct GTY(()) { enum dwarf_tag die_tag; char *die_symbol; dw_attr_ref die_attr; dw_die_ref die_parent; dw_die_ref die_child; dw_die_ref die_sib; dw_die_ref die_definition; /* ref from a specification to its definition */ dw_offset die_offset; unsigned long die_abbrev; int die_mark; unsigned int decl_id; } die_node; /* The pubname structure */ typedef struct pubname_struct GTY(()) { dw_die_ref die; char *name; } pubname_entry; struct dw_ranges_struct GTY(()) { int block_num; }; /* The limbo die list structure. */ typedef struct limbo_die_struct GTY(()) { dw_die_ref die; tree created_for; struct limbo_die_struct *next; } limbo_die_node; /* How to start an assembler comment. */ #ifndef ASM_COMMENT_START #define ASM_COMMENT_START ";#" #endif /* Define a macro which returns nonzero for a TYPE_DECL which was implicitly generated for a tagged type. Note that unlike the gcc front end (which generates a NULL named TYPE_DECL node for each complete tagged type, each array type, and each function type node created) the g++ front end generates a _named_ TYPE_DECL node for each tagged type node created. These TYPE_DECLs have DECL_ARTIFICIAL set, so we know not to generate a DW_TAG_typedef DIE for them. */ #define TYPE_DECL_IS_STUB(decl) \ (DECL_NAME (decl) == NULL_TREE \ || (DECL_ARTIFICIAL (decl) \ && is_tagged_type (TREE_TYPE (decl)) \ && ((decl == TYPE_STUB_DECL (TREE_TYPE (decl))) \ /* This is necessary for stub decls that \ appear in nested inline functions. */ \ || (DECL_ABSTRACT_ORIGIN (decl) != NULL_TREE \ && (decl_ultimate_origin (decl) \ == TYPE_STUB_DECL (TREE_TYPE (decl))))))) /* Information concerning the compilation unit's programming language, and compiler version. */ /* Fixed size portion of the DWARF compilation unit header. */ #define DWARF_COMPILE_UNIT_HEADER_SIZE \ (DWARF_INITIAL_LENGTH_SIZE + DWARF_OFFSET_SIZE + 3) /* Fixed size portion of public names info. */ #define DWARF_PUBNAMES_HEADER_SIZE (2 * DWARF_OFFSET_SIZE + 2) /* Fixed size portion of the address range info. */ #define DWARF_ARANGES_HEADER_SIZE \ (DWARF_ROUND (DWARF_INITIAL_LENGTH_SIZE + DWARF_OFFSET_SIZE + 4, \ DWARF2_ADDR_SIZE * 2) \ - DWARF_INITIAL_LENGTH_SIZE) /* Size of padding portion in the address range info. It must be aligned to twice the pointer size. */ #define DWARF_ARANGES_PAD_SIZE \ (DWARF_ROUND (DWARF_INITIAL_LENGTH_SIZE + DWARF_OFFSET_SIZE + 4, \ DWARF2_ADDR_SIZE * 2) \ - (DWARF_INITIAL_LENGTH_SIZE + DWARF_OFFSET_SIZE + 4)) /* Use assembler line directives if available. */ #ifndef DWARF2_ASM_LINE_DEBUG_INFO #ifdef HAVE_AS_DWARF2_DEBUG_LINE #define DWARF2_ASM_LINE_DEBUG_INFO 1 #else #define DWARF2_ASM_LINE_DEBUG_INFO 0 #endif #endif /* Minimum line offset in a special line info. opcode. This value was chosen to give a reasonable range of values. */ #define DWARF_LINE_BASE -10 /* First special line opcode - leave room for the standard opcodes. */ #define DWARF_LINE_OPCODE_BASE 10 /* Range of line offsets in a special line info. opcode. */ #define DWARF_LINE_RANGE (254-DWARF_LINE_OPCODE_BASE+1) /* Flag that indicates the initial value of the is_stmt_start flag. In the present implementation, we do not mark any lines as the beginning of a source statement, because that information is not made available by the GCC front-end. */ #define DWARF_LINE_DEFAULT_IS_STMT_START 1 #ifdef DWARF2_DEBUGGING_INFO /* This location is used by calc_die_sizes() to keep track the offset of each DIE within the .debug_info section. */ static unsigned long next_die_offset; #endif /* Record the root of the DIE's built for the current compilation unit. */ static GTY(()) dw_die_ref comp_unit_die; /* A list of DIEs with a NULL parent waiting to be relocated. */ static GTY(()) limbo_die_node *limbo_die_list; /* Filenames referenced by this compilation unit. */ static GTY(()) varray_type file_table; static GTY(()) varray_type file_table_emitted; static GTY(()) size_t file_table_last_lookup_index; /* A hash table of references to DIE's that describe declarations. The key is a DECL_UID() which is a unique number identifying each decl. */ static GTY ((param_is (struct die_struct))) htab_t decl_die_table; /* Node of the variable location list. */ struct var_loc_node GTY ((chain_next ("%h.next"))) { rtx GTY (()) var_loc_note; const char * GTY (()) label; struct var_loc_node * GTY (()) next; }; /* Variable location list. */ struct var_loc_list_def GTY (()) { struct var_loc_node * GTY (()) first; /* Do not mark the last element of the chained list because it is marked through the chain. */ struct var_loc_node * GTY ((skip ("%h"))) last; /* DECL_UID of the variable decl. */ unsigned int decl_id; }; typedef struct var_loc_list_def var_loc_list; /* Table of decl location linked lists. */ static GTY ((param_is (var_loc_list))) htab_t decl_loc_table; /* A pointer to the base of a list of references to DIE's that are uniquely identified by their tag, presence/absence of children DIE's, and list of attribute/value pairs. */ static GTY((length ("abbrev_die_table_allocated"))) dw_die_ref *abbrev_die_table; /* Number of elements currently allocated for abbrev_die_table. */ static GTY(()) unsigned abbrev_die_table_allocated; /* Number of elements in type_die_table currently in use. */ static GTY(()) unsigned abbrev_die_table_in_use; /* Size (in elements) of increments by which we may expand the abbrev_die_table. */ #define ABBREV_DIE_TABLE_INCREMENT 256 /* A pointer to the base of a table that contains line information for each source code line in .text in the compilation unit. */ static GTY((length ("line_info_table_allocated"))) dw_line_info_ref line_info_table; /* Number of elements currently allocated for line_info_table. */ static GTY(()) unsigned line_info_table_allocated; /* Number of elements in line_info_table currently in use. */ static GTY(()) unsigned line_info_table_in_use; /* A pointer to the base of a table that contains line information for each source code line outside of .text in the compilation unit. */ static GTY ((length ("separate_line_info_table_allocated"))) dw_separate_line_info_ref separate_line_info_table; /* Number of elements currently allocated for separate_line_info_table. */ static GTY(()) unsigned separate_line_info_table_allocated; /* Number of elements in separate_line_info_table currently in use. */ static GTY(()) unsigned separate_line_info_table_in_use; /* Size (in elements) of increments by which we may expand the line_info_table. */ #define LINE_INFO_TABLE_INCREMENT 1024 /* A pointer to the base of a table that contains a list of publicly accessible names. */ static GTY ((length ("pubname_table_allocated"))) pubname_ref pubname_table; /* Number of elements currently allocated for pubname_table. */ static GTY(()) unsigned pubname_table_allocated; /* Number of elements in pubname_table currently in use. */ static GTY(()) unsigned pubname_table_in_use; /* Size (in elements) of increments by which we may expand the pubname_table. */ #define PUBNAME_TABLE_INCREMENT 64 /* Array of dies for which we should generate .debug_arange info. */ static GTY((length ("arange_table_allocated"))) dw_die_ref *arange_table; /* Number of elements currently allocated for arange_table. */ static GTY(()) unsigned arange_table_allocated; /* Number of elements in arange_table currently in use. */ static GTY(()) unsigned arange_table_in_use; /* Size (in elements) of increments by which we may expand the arange_table. */ #define ARANGE_TABLE_INCREMENT 64 /* Array of dies for which we should generate .debug_ranges info. */ static GTY ((length ("ranges_table_allocated"))) dw_ranges_ref ranges_table; /* Number of elements currently allocated for ranges_table. */ static GTY(()) unsigned ranges_table_allocated; /* Number of elements in ranges_table currently in use. */ static GTY(()) unsigned ranges_table_in_use; /* Size (in elements) of increments by which we may expand the ranges_table. */ #define RANGES_TABLE_INCREMENT 64 /* Whether we have location lists that need outputting */ static GTY(()) unsigned have_location_lists; /* Unique label counter. */ static GTY(()) unsigned int loclabel_num; #ifdef DWARF2_DEBUGGING_INFO /* Record whether the function being analyzed contains inlined functions. */ static int current_function_has_inlines; #endif #if 0 && defined (MIPS_DEBUGGING_INFO) static int comp_unit_has_inlines; #endif /* Number of file tables emitted in maybe_emit_file(). */ static GTY(()) int emitcount = 0; /* Number of internal labels generated by gen_internal_sym(). */ static GTY(()) int label_num; #ifdef DWARF2_DEBUGGING_INFO /* Forward declarations for functions defined in this file. */ static int is_pseudo_reg (rtx); static tree type_main_variant (tree); static int is_tagged_type (tree); static const char *dwarf_tag_name (unsigned); static const char *dwarf_attr_name (unsigned); static const char *dwarf_form_name (unsigned); #if 0 static const char *dwarf_type_encoding_name (unsigned); #endif static tree decl_ultimate_origin (tree); static tree block_ultimate_origin (tree); static tree decl_class_context (tree); static void add_dwarf_attr (dw_die_ref, dw_attr_ref); static inline enum dw_val_class AT_class (dw_attr_ref); static void add_AT_flag (dw_die_ref, enum dwarf_attribute, unsigned); static inline unsigned AT_flag (dw_attr_ref); static void add_AT_int (dw_die_ref, enum dwarf_attribute, HOST_WIDE_INT); static inline HOST_WIDE_INT AT_int (dw_attr_ref); static void add_AT_unsigned (dw_die_ref, enum dwarf_attribute, unsigned HOST_WIDE_INT); static inline unsigned HOST_WIDE_INT AT_unsigned (dw_attr_ref); static void add_AT_long_long (dw_die_ref, enum dwarf_attribute, unsigned long, unsigned long); static inline void add_AT_vec (dw_die_ref, enum dwarf_attribute, unsigned int, unsigned int, unsigned char *); static hashval_t debug_str_do_hash (const void *); static int debug_str_eq (const void *, const void *); static void add_AT_string (dw_die_ref, enum dwarf_attribute, const char *); static inline const char *AT_string (dw_attr_ref); static int AT_string_form (dw_attr_ref); static void add_AT_die_ref (dw_die_ref, enum dwarf_attribute, dw_die_ref); static void add_AT_specification (dw_die_ref, dw_die_ref); static inline dw_die_ref AT_ref (dw_attr_ref); static inline int AT_ref_external (dw_attr_ref); static inline void set_AT_ref_external (dw_attr_ref, int); static void add_AT_fde_ref (dw_die_ref, enum dwarf_attribute, unsigned); static void add_AT_loc (dw_die_ref, enum dwarf_attribute, dw_loc_descr_ref); static inline dw_loc_descr_ref AT_loc (dw_attr_ref); static void add_AT_loc_list (dw_die_ref, enum dwarf_attribute, dw_loc_list_ref); static inline dw_loc_list_ref AT_loc_list (dw_attr_ref); static void add_AT_addr (dw_die_ref, enum dwarf_attribute, rtx); static inline rtx AT_addr (dw_attr_ref); static void add_AT_lbl_id (dw_die_ref, enum dwarf_attribute, const char *); static void add_AT_lbl_offset (dw_die_ref, enum dwarf_attribute, const char *); static void add_AT_offset (dw_die_ref, enum dwarf_attribute, unsigned HOST_WIDE_INT); static void add_AT_range_list (dw_die_ref, enum dwarf_attribute, unsigned long); static inline const char *AT_lbl (dw_attr_ref); static dw_attr_ref get_AT (dw_die_ref, enum dwarf_attribute); static const char *get_AT_low_pc (dw_die_ref); static const char *get_AT_hi_pc (dw_die_ref); static const char *get_AT_string (dw_die_ref, enum dwarf_attribute); static int get_AT_flag (dw_die_ref, enum dwarf_attribute); static unsigned get_AT_unsigned (dw_die_ref, enum dwarf_attribute); static inline dw_die_ref get_AT_ref (dw_die_ref, enum dwarf_attribute); static bool is_c_family (void); static bool is_cxx (void); static bool is_java (void); static bool is_fortran (void); static bool is_ada (void); static void remove_AT (dw_die_ref, enum dwarf_attribute); static void remove_child_TAG (dw_die_ref, enum dwarf_tag); static inline void free_die (dw_die_ref); static void remove_children (dw_die_ref); static void add_child_die (dw_die_ref, dw_die_ref); static dw_die_ref new_die (enum dwarf_tag, dw_die_ref, tree); static dw_die_ref lookup_type_die (tree); static void equate_type_number_to_die (tree, dw_die_ref); static hashval_t decl_die_table_hash (const void *); static int decl_die_table_eq (const void *, const void *); static dw_die_ref lookup_decl_die (tree); static hashval_t decl_loc_table_hash (const void *); static int decl_loc_table_eq (const void *, const void *); static var_loc_list *lookup_decl_loc (tree); static void equate_decl_number_to_die (tree, dw_die_ref); static void add_var_loc_to_decl (tree, struct var_loc_node *); static void print_spaces (FILE *); static void print_die (dw_die_ref, FILE *); static void print_dwarf_line_table (FILE *); static void reverse_die_lists (dw_die_ref); static void reverse_all_dies (dw_die_ref); static dw_die_ref push_new_compile_unit (dw_die_ref, dw_die_ref); static dw_die_ref pop_compile_unit (dw_die_ref); static void loc_checksum (dw_loc_descr_ref, struct md5_ctx *); static void attr_checksum (dw_attr_ref, struct md5_ctx *, int *); static void die_checksum (dw_die_ref, struct md5_ctx *, int *); static int same_loc_p (dw_loc_descr_ref, dw_loc_descr_ref, int *); static int same_dw_val_p (dw_val_node *, dw_val_node *, int *); static int same_attr_p (dw_attr_ref, dw_attr_ref, int *); static int same_die_p (dw_die_ref, dw_die_ref, int *); static int same_die_p_wrap (dw_die_ref, dw_die_ref); static void compute_section_prefix (dw_die_ref); static int is_type_die (dw_die_ref); static int is_comdat_die (dw_die_ref); static int is_symbol_die (dw_die_ref); static void assign_symbol_names (dw_die_ref); static void break_out_includes (dw_die_ref); static hashval_t htab_cu_hash (const void *); static int htab_cu_eq (const void *, const void *); static void htab_cu_del (void *); static int check_duplicate_cu (dw_die_ref, htab_t, unsigned *); static void record_comdat_symbol_number (dw_die_ref, htab_t, unsigned); static void add_sibling_attributes (dw_die_ref); static void build_abbrev_table (dw_die_ref); static void output_location_lists (dw_die_ref); static int constant_size (long unsigned); static unsigned long size_of_die (dw_die_ref); static void calc_die_sizes (dw_die_ref); static void mark_dies (dw_die_ref); static void unmark_dies (dw_die_ref); static void unmark_all_dies (dw_die_ref); static unsigned long size_of_pubnames (void); static unsigned long size_of_aranges (void); static enum dwarf_form value_format (dw_attr_ref); static void output_value_format (dw_attr_ref); static void output_abbrev_section (void); static void output_die_symbol (dw_die_ref); static void output_die (dw_die_ref); static void output_compilation_unit_header (void); static void output_comp_unit (dw_die_ref, int); static const char *dwarf2_name (tree, int); static void add_pubname (tree, dw_die_ref); static void output_pubnames (void); static void add_arange (tree, dw_die_ref); static void output_aranges (void); static unsigned int add_ranges (tree); static void output_ranges (void); static void output_line_info (void); static void output_file_names (void); static dw_die_ref base_type_die (tree); static tree root_type (tree); static int is_base_type (tree); static bool is_subrange_type (tree); static dw_die_ref subrange_type_die (tree, dw_die_ref); static dw_die_ref modified_type_die (tree, int, int, dw_die_ref); static int type_is_enum (tree); static unsigned int dbx_reg_number (rtx); static dw_loc_descr_ref reg_loc_descriptor (rtx); static dw_loc_descr_ref one_reg_loc_descriptor (unsigned int); static dw_loc_descr_ref multiple_reg_loc_descriptor (rtx, rtx); static dw_loc_descr_ref int_loc_descriptor (HOST_WIDE_INT); static dw_loc_descr_ref based_loc_descr (unsigned, HOST_WIDE_INT, bool); static int is_based_loc (rtx); static dw_loc_descr_ref mem_loc_descriptor (rtx, enum machine_mode mode, bool); static dw_loc_descr_ref concat_loc_descriptor (rtx, rtx); static dw_loc_descr_ref loc_descriptor (rtx, bool); static dw_loc_descr_ref loc_descriptor_from_tree (tree, int); static HOST_WIDE_INT ceiling (HOST_WIDE_INT, unsigned int); static tree field_type (tree); static unsigned int simple_type_align_in_bits (tree); static unsigned int simple_decl_align_in_bits (tree); static unsigned HOST_WIDE_INT simple_type_size_in_bits (tree); static HOST_WIDE_INT field_byte_offset (tree); static void add_AT_location_description (dw_die_ref, enum dwarf_attribute, dw_loc_descr_ref); static void add_data_member_location_attribute (dw_die_ref, tree); static void add_const_value_attribute (dw_die_ref, rtx); static void insert_int (HOST_WIDE_INT, unsigned, unsigned char *); static HOST_WIDE_INT extract_int (const unsigned char *, unsigned); static void insert_float (rtx, unsigned char *); static rtx rtl_for_decl_location (tree); static void add_location_or_const_value_attribute (dw_die_ref, tree, enum dwarf_attribute); static void tree_add_const_value_attribute (dw_die_ref, tree); static void add_name_attribute (dw_die_ref, const char *); static void add_comp_dir_attribute (dw_die_ref); static void add_bound_info (dw_die_ref, enum dwarf_attribute, tree); static void add_subscript_info (dw_die_ref, tree); static void add_byte_size_attribute (dw_die_ref, tree); static void add_bit_offset_attribute (dw_die_ref, tree); static void add_bit_size_attribute (dw_die_ref, tree); static void add_prototyped_attribute (dw_die_ref, tree); static void add_abstract_origin_attribute (dw_die_ref, tree); static void add_pure_or_virtual_attribute (dw_die_ref, tree); static void add_src_coords_attributes (dw_die_ref, tree); static void add_name_and_src_coords_attributes (dw_die_ref, tree); static void push_decl_scope (tree); static void pop_decl_scope (void); static dw_die_ref scope_die_for (tree, dw_die_ref); static inline int local_scope_p (dw_die_ref); static inline int class_or_namespace_scope_p (dw_die_ref); static void add_type_attribute (dw_die_ref, tree, int, int, dw_die_ref); static const char *type_tag (tree); static tree member_declared_type (tree); #if 0 static const char *decl_start_label (tree); #endif static void gen_array_type_die (tree, dw_die_ref); static void gen_set_type_die (tree, dw_die_ref); #if 0 static void gen_entry_point_die (tree, dw_die_ref); #endif static void gen_inlined_enumeration_type_die (tree, dw_die_ref); static void gen_inlined_structure_type_die (tree, dw_die_ref); static void gen_inlined_union_type_die (tree, dw_die_ref); static dw_die_ref gen_enumeration_type_die (tree, dw_die_ref); static dw_die_ref gen_formal_parameter_die (tree, dw_die_ref); static void gen_unspecified_parameters_die (tree, dw_die_ref); static void gen_formal_types_die (tree, dw_die_ref); static void gen_subprogram_die (tree, dw_die_ref); static void gen_variable_die (tree, dw_die_ref); static void gen_label_die (tree, dw_die_ref); static void gen_lexical_block_die (tree, dw_die_ref, int); static void gen_inlined_subroutine_die (tree, dw_die_ref, int); static void gen_field_die (tree, dw_die_ref); static void gen_ptr_to_mbr_type_die (tree, dw_die_ref); static dw_die_ref gen_compile_unit_die (const char *); static void gen_string_type_die (tree, dw_die_ref); static void gen_inheritance_die (tree, tree, dw_die_ref); static void gen_member_die (tree, dw_die_ref); static void gen_struct_or_union_type_die (tree, dw_die_ref); static void gen_subroutine_type_die (tree, dw_die_ref); static void gen_typedef_die (tree, dw_die_ref); static void gen_type_die (tree, dw_die_ref); static void gen_tagged_type_instantiation_die (tree, dw_die_ref); static void gen_block_die (tree, dw_die_ref, int); static void decls_for_scope (tree, dw_die_ref, int); static int is_redundant_typedef (tree); static void gen_namespace_die (tree); static void gen_decl_die (tree, dw_die_ref); static dw_die_ref force_decl_die (tree); static dw_die_ref force_type_die (tree); static dw_die_ref setup_namespace_context (tree, dw_die_ref); static void declare_in_namespace (tree, dw_die_ref); static unsigned lookup_filename (const char *); static void init_file_table (void); static void retry_incomplete_types (void); static void gen_type_die_for_member (tree, tree, dw_die_ref); static void splice_child_die (dw_die_ref, dw_die_ref); static int file_info_cmp (const void *, const void *); static dw_loc_list_ref new_loc_list (dw_loc_descr_ref, const char *, const char *, const char *, unsigned); static void add_loc_descr_to_loc_list (dw_loc_list_ref *, dw_loc_descr_ref, const char *, const char *, const char *); static void output_loc_list (dw_loc_list_ref); static char *gen_internal_sym (const char *); static void prune_unmark_dies (dw_die_ref); static void prune_unused_types_mark (dw_die_ref, int); static void prune_unused_types_walk (dw_die_ref); static void prune_unused_types_walk_attribs (dw_die_ref); static void prune_unused_types_prune (dw_die_ref); static void prune_unused_types (void); static int maybe_emit_file (int); /* Section names used to hold DWARF debugging information. */ #ifndef DEBUG_INFO_SECTION #define DEBUG_INFO_SECTION ".debug_info" #endif #ifndef DEBUG_ABBREV_SECTION #define DEBUG_ABBREV_SECTION ".debug_abbrev" #endif #ifndef DEBUG_ARANGES_SECTION #define DEBUG_ARANGES_SECTION ".debug_aranges" #endif #ifndef DEBUG_MACINFO_SECTION #define DEBUG_MACINFO_SECTION ".debug_macinfo" #endif #ifndef DEBUG_LINE_SECTION #define DEBUG_LINE_SECTION ".debug_line" #endif #ifndef DEBUG_LOC_SECTION #define DEBUG_LOC_SECTION ".debug_loc" #endif #ifndef DEBUG_PUBNAMES_SECTION #define DEBUG_PUBNAMES_SECTION ".debug_pubnames" #endif #ifndef DEBUG_STR_SECTION #define DEBUG_STR_SECTION ".debug_str" #endif #ifndef DEBUG_RANGES_SECTION #define DEBUG_RANGES_SECTION ".debug_ranges" #endif /* Standard ELF section names for compiled code and data. */ #ifndef TEXT_SECTION_NAME #define TEXT_SECTION_NAME ".text" #endif /* Section flags for .debug_str section. */ #define DEBUG_STR_SECTION_FLAGS \ (HAVE_GAS_SHF_MERGE && flag_merge_constants \ ? SECTION_DEBUG | SECTION_MERGE | SECTION_STRINGS | 1 \ : SECTION_DEBUG) /* Labels we insert at beginning sections we can reference instead of the section names themselves. */ #ifndef TEXT_SECTION_LABEL #define TEXT_SECTION_LABEL "Ltext" #endif #ifndef DEBUG_LINE_SECTION_LABEL #define DEBUG_LINE_SECTION_LABEL "Ldebug_line" #endif #ifndef DEBUG_INFO_SECTION_LABEL #define DEBUG_INFO_SECTION_LABEL "Ldebug_info" #endif #ifndef DEBUG_ABBREV_SECTION_LABEL #define DEBUG_ABBREV_SECTION_LABEL "Ldebug_abbrev" #endif #ifndef DEBUG_LOC_SECTION_LABEL #define DEBUG_LOC_SECTION_LABEL "Ldebug_loc" #endif #ifndef DEBUG_RANGES_SECTION_LABEL #define DEBUG_RANGES_SECTION_LABEL "Ldebug_ranges" #endif #ifndef DEBUG_MACINFO_SECTION_LABEL #define DEBUG_MACINFO_SECTION_LABEL "Ldebug_macinfo" #endif /* Definitions of defaults for formats and names of various special (artificial) labels which may be generated within this file (when the -g options is used and DWARF2_DEBUGGING_INFO is in effect. If necessary, these may be overridden from within the tm.h file, but typically, overriding these defaults is unnecessary. */ static char text_end_label[MAX_ARTIFICIAL_LABEL_BYTES]; static char text_section_label[MAX_ARTIFICIAL_LABEL_BYTES]; static char abbrev_section_label[MAX_ARTIFICIAL_LABEL_BYTES]; static char debug_info_section_label[MAX_ARTIFICIAL_LABEL_BYTES]; static char debug_line_section_label[MAX_ARTIFICIAL_LABEL_BYTES]; static char macinfo_section_label[MAX_ARTIFICIAL_LABEL_BYTES]; static char loc_section_label[MAX_ARTIFICIAL_LABEL_BYTES]; static char ranges_section_label[2 * MAX_ARTIFICIAL_LABEL_BYTES]; #ifndef TEXT_END_LABEL #define TEXT_END_LABEL "Letext" #endif #ifndef BLOCK_BEGIN_LABEL #define BLOCK_BEGIN_LABEL "LBB" #endif #ifndef BLOCK_END_LABEL #define BLOCK_END_LABEL "LBE" #endif #ifndef LINE_CODE_LABEL #define LINE_CODE_LABEL "LM" #endif #ifndef SEPARATE_LINE_CODE_LABEL #define SEPARATE_LINE_CODE_LABEL "LSM" #endif /* We allow a language front-end to designate a function that is to be called to "demangle" any name before it it put into a DIE. */ static const char *(*demangle_name_func) (const char *); void dwarf2out_set_demangle_name_func (const char *(*func) (const char *)) { demangle_name_func = func; } /* Test if rtl node points to a pseudo register. */ static inline int is_pseudo_reg (rtx rtl) { return ((REG_P (rtl) && REGNO (rtl) >= FIRST_PSEUDO_REGISTER) || (GET_CODE (rtl) == SUBREG && REGNO (SUBREG_REG (rtl)) >= FIRST_PSEUDO_REGISTER)); } /* Return a reference to a type, with its const and volatile qualifiers removed. */ static inline tree type_main_variant (tree type) { type = TYPE_MAIN_VARIANT (type); /* ??? There really should be only one main variant among any group of variants of a given type (and all of the MAIN_VARIANT values for all members of the group should point to that one type) but sometimes the C front-end messes this up for array types, so we work around that bug here. */ if (TREE_CODE (type) == ARRAY_TYPE) while (type != TYPE_MAIN_VARIANT (type)) type = TYPE_MAIN_VARIANT (type); return type; } /* Return nonzero if the given type node represents a tagged type. */ static inline int is_tagged_type (tree type) { enum tree_code code = TREE_CODE (type); return (code == RECORD_TYPE || code == UNION_TYPE || code == QUAL_UNION_TYPE || code == ENUMERAL_TYPE); } /* Convert a DIE tag into its string name. */ static const char * dwarf_tag_name (unsigned int tag) { switch (tag) { case DW_TAG_padding: return "DW_TAG_padding"; case DW_TAG_array_type: return "DW_TAG_array_type"; case DW_TAG_class_type: return "DW_TAG_class_type"; case DW_TAG_entry_point: return "DW_TAG_entry_point"; case DW_TAG_enumeration_type: return "DW_TAG_enumeration_type"; case DW_TAG_formal_parameter: return "DW_TAG_formal_parameter"; case DW_TAG_imported_declaration: return "DW_TAG_imported_declaration"; case DW_TAG_label: return "DW_TAG_label"; case DW_TAG_lexical_block: return "DW_TAG_lexical_block"; case DW_TAG_member: return "DW_TAG_member"; case DW_TAG_pointer_type: return "DW_TAG_pointer_type"; case DW_TAG_reference_type: return "DW_TAG_reference_type"; case DW_TAG_compile_unit: return "DW_TAG_compile_unit"; case DW_TAG_string_type: return "DW_TAG_string_type"; case DW_TAG_structure_type: return "DW_TAG_structure_type"; case DW_TAG_subroutine_type: return "DW_TAG_subroutine_type"; case DW_TAG_typedef: return "DW_TAG_typedef"; case DW_TAG_union_type: return "DW_TAG_union_type"; case DW_TAG_unspecified_parameters: return "DW_TAG_unspecified_parameters"; case DW_TAG_variant: return "DW_TAG_variant"; case DW_TAG_common_block: return "DW_TAG_common_block"; case DW_TAG_common_inclusion: return "DW_TAG_common_inclusion"; case DW_TAG_inheritance: return "DW_TAG_inheritance"; case DW_TAG_inlined_subroutine: return "DW_TAG_inlined_subroutine"; case DW_TAG_module: return "DW_TAG_module"; case DW_TAG_ptr_to_member_type: return "DW_TAG_ptr_to_member_type"; case DW_TAG_set_type: return "DW_TAG_set_type"; case DW_TAG_subrange_type: return "DW_TAG_subrange_type"; case DW_TAG_with_stmt: return "DW_TAG_with_stmt"; case DW_TAG_access_declaration: return "DW_TAG_access_declaration"; case DW_TAG_base_type: return "DW_TAG_base_type"; case DW_TAG_catch_block: return "DW_TAG_catch_block"; case DW_TAG_const_type: return "DW_TAG_const_type"; case DW_TAG_constant: return "DW_TAG_constant"; case DW_TAG_enumerator: return "DW_TAG_enumerator"; case DW_TAG_file_type: return "DW_TAG_file_type"; case DW_TAG_friend: return "DW_TAG_friend"; case DW_TAG_namelist: return "DW_TAG_namelist"; case DW_TAG_namelist_item: return "DW_TAG_namelist_item"; case DW_TAG_namespace: return "DW_TAG_namespace"; case DW_TAG_packed_type: return "DW_TAG_packed_type"; case DW_TAG_subprogram: return "DW_TAG_subprogram"; case DW_TAG_template_type_param: return "DW_TAG_template_type_param"; case DW_TAG_template_value_param: return "DW_TAG_template_value_param"; case DW_TAG_thrown_type: return "DW_TAG_thrown_type"; case DW_TAG_try_block: return "DW_TAG_try_block"; case DW_TAG_variant_part: return "DW_TAG_variant_part"; case DW_TAG_variable: return "DW_TAG_variable"; case DW_TAG_volatile_type: return "DW_TAG_volatile_type"; case DW_TAG_imported_module: return "DW_TAG_imported_module"; case DW_TAG_MIPS_loop: return "DW_TAG_MIPS_loop"; case DW_TAG_format_label: return "DW_TAG_format_label"; case DW_TAG_function_template: return "DW_TAG_function_template"; case DW_TAG_class_template: return "DW_TAG_class_template"; case DW_TAG_GNU_BINCL: return "DW_TAG_GNU_BINCL"; case DW_TAG_GNU_EINCL: return "DW_TAG_GNU_EINCL"; default: return "DW_TAG_"; } } /* Convert a DWARF attribute code into its string name. */ static const char * dwarf_attr_name (unsigned int attr) { switch (attr) { case DW_AT_sibling: return "DW_AT_sibling"; case DW_AT_location: return "DW_AT_location"; case DW_AT_name: return "DW_AT_name"; case DW_AT_ordering: return "DW_AT_ordering"; case DW_AT_subscr_data: return "DW_AT_subscr_data"; case DW_AT_byte_size: return "DW_AT_byte_size"; case DW_AT_bit_offset: return "DW_AT_bit_offset"; case DW_AT_bit_size: return "DW_AT_bit_size"; case DW_AT_element_list: return "DW_AT_element_list"; case DW_AT_stmt_list: return "DW_AT_stmt_list"; case DW_AT_low_pc: return "DW_AT_low_pc"; case DW_AT_high_pc: return "DW_AT_high_pc"; case DW_AT_language: return "DW_AT_language"; case DW_AT_member: return "DW_AT_member"; case DW_AT_discr: return "DW_AT_discr"; case DW_AT_discr_value: return "DW_AT_discr_value"; case DW_AT_visibility: return "DW_AT_visibility"; case DW_AT_import: return "DW_AT_import"; case DW_AT_string_length: return "DW_AT_string_length"; case DW_AT_common_reference: return "DW_AT_common_reference"; case DW_AT_comp_dir: return "DW_AT_comp_dir"; case DW_AT_const_value: return "DW_AT_const_value"; case DW_AT_containing_type: return "DW_AT_containing_type"; case DW_AT_default_value: return "DW_AT_default_value"; case DW_AT_inline: return "DW_AT_inline"; case DW_AT_is_optional: return "DW_AT_is_optional"; case DW_AT_lower_bound: return "DW_AT_lower_bound"; case DW_AT_producer: return "DW_AT_producer"; case DW_AT_prototyped: return "DW_AT_prototyped"; case DW_AT_return_addr: return "DW_AT_return_addr"; case DW_AT_start_scope: return "DW_AT_start_scope"; case DW_AT_stride_size: return "DW_AT_stride_size"; case DW_AT_upper_bound: return "DW_AT_upper_bound"; case DW_AT_abstract_origin: return "DW_AT_abstract_origin"; case DW_AT_accessibility: return "DW_AT_accessibility"; case DW_AT_address_class: return "DW_AT_address_class"; case DW_AT_artificial: return "DW_AT_artificial"; case DW_AT_base_types: return "DW_AT_base_types"; case DW_AT_calling_convention: return "DW_AT_calling_convention"; case DW_AT_count: return "DW_AT_count"; case DW_AT_data_member_location: return "DW_AT_data_member_location"; case DW_AT_decl_column: return "DW_AT_decl_column"; case DW_AT_decl_file: return "DW_AT_decl_file"; case DW_AT_decl_line: return "DW_AT_decl_line"; case DW_AT_declaration: return "DW_AT_declaration"; case DW_AT_discr_list: return "DW_AT_discr_list"; case DW_AT_encoding: return "DW_AT_encoding"; case DW_AT_external: return "DW_AT_external"; case DW_AT_frame_base: return "DW_AT_frame_base"; case DW_AT_friend: return "DW_AT_friend"; case DW_AT_identifier_case: return "DW_AT_identifier_case"; case DW_AT_macro_info: return "DW_AT_macro_info"; case DW_AT_namelist_items: return "DW_AT_namelist_items"; case DW_AT_priority: return "DW_AT_priority"; case DW_AT_segment: return "DW_AT_segment"; case DW_AT_specification: return "DW_AT_specification"; case DW_AT_static_link: return "DW_AT_static_link"; case DW_AT_type: return "DW_AT_type"; case DW_AT_use_location: return "DW_AT_use_location"; case DW_AT_variable_parameter: return "DW_AT_variable_parameter"; case DW_AT_virtuality: return "DW_AT_virtuality"; case DW_AT_vtable_elem_location: return "DW_AT_vtable_elem_location"; case DW_AT_allocated: return "DW_AT_allocated"; case DW_AT_associated: return "DW_AT_associated"; case DW_AT_data_location: return "DW_AT_data_location"; case DW_AT_stride: return "DW_AT_stride"; case DW_AT_entry_pc: return "DW_AT_entry_pc"; case DW_AT_use_UTF8: return "DW_AT_use_UTF8"; case DW_AT_extension: return "DW_AT_extension"; case DW_AT_ranges: return "DW_AT_ranges"; case DW_AT_trampoline: return "DW_AT_trampoline"; case DW_AT_call_column: return "DW_AT_call_column"; case DW_AT_call_file: return "DW_AT_call_file"; case DW_AT_call_line: return "DW_AT_call_line"; case DW_AT_MIPS_fde: return "DW_AT_MIPS_fde"; case DW_AT_MIPS_loop_begin: return "DW_AT_MIPS_loop_begin"; case DW_AT_MIPS_tail_loop_begin: return "DW_AT_MIPS_tail_loop_begin"; case DW_AT_MIPS_epilog_begin: return "DW_AT_MIPS_epilog_begin"; case DW_AT_MIPS_loop_unroll_factor: return "DW_AT_MIPS_loop_unroll_factor"; case DW_AT_MIPS_software_pipeline_depth: return "DW_AT_MIPS_software_pipeline_depth"; case DW_AT_MIPS_linkage_name: return "DW_AT_MIPS_linkage_name"; case DW_AT_MIPS_stride: return "DW_AT_MIPS_stride"; case DW_AT_MIPS_abstract_name: return "DW_AT_MIPS_abstract_name"; case DW_AT_MIPS_clone_origin: return "DW_AT_MIPS_clone_origin"; case DW_AT_MIPS_has_inlines: return "DW_AT_MIPS_has_inlines"; case DW_AT_sf_names: return "DW_AT_sf_names"; case DW_AT_src_info: return "DW_AT_src_info"; case DW_AT_mac_info: return "DW_AT_mac_info"; case DW_AT_src_coords: return "DW_AT_src_coords"; case DW_AT_body_begin: return "DW_AT_body_begin"; case DW_AT_body_end: return "DW_AT_body_end"; case DW_AT_GNU_vector: return "DW_AT_GNU_vector"; case DW_AT_VMS_rtnbeg_pd_address: return "DW_AT_VMS_rtnbeg_pd_address"; default: return "DW_AT_"; } } /* Convert a DWARF value form code into its string name. */ static const char * dwarf_form_name (unsigned int form) { switch (form) { case DW_FORM_addr: return "DW_FORM_addr"; case DW_FORM_block2: return "DW_FORM_block2"; case DW_FORM_block4: return "DW_FORM_block4"; case DW_FORM_data2: return "DW_FORM_data2"; case DW_FORM_data4: return "DW_FORM_data4"; case DW_FORM_data8: return "DW_FORM_data8"; case DW_FORM_string: return "DW_FORM_string"; case DW_FORM_block: return "DW_FORM_block"; case DW_FORM_block1: return "DW_FORM_block1"; case DW_FORM_data1: return "DW_FORM_data1"; case DW_FORM_flag: return "DW_FORM_flag"; case DW_FORM_sdata: return "DW_FORM_sdata"; case DW_FORM_strp: return "DW_FORM_strp"; case DW_FORM_udata: return "DW_FORM_udata"; case DW_FORM_ref_addr: return "DW_FORM_ref_addr"; case DW_FORM_ref1: return "DW_FORM_ref1"; case DW_FORM_ref2: return "DW_FORM_ref2"; case DW_FORM_ref4: return "DW_FORM_ref4"; case DW_FORM_ref8: return "DW_FORM_ref8"; case DW_FORM_ref_udata: return "DW_FORM_ref_udata"; case DW_FORM_indirect: return "DW_FORM_indirect"; default: return "DW_FORM_"; } } /* Convert a DWARF type code into its string name. */ #if 0 static const char * dwarf_type_encoding_name (unsigned enc) { switch (enc) { case DW_ATE_address: return "DW_ATE_address"; case DW_ATE_boolean: return "DW_ATE_boolean"; case DW_ATE_complex_float: return "DW_ATE_complex_float"; case DW_ATE_float: return "DW_ATE_float"; case DW_ATE_signed: return "DW_ATE_signed"; case DW_ATE_signed_char: return "DW_ATE_signed_char"; case DW_ATE_unsigned: return "DW_ATE_unsigned"; case DW_ATE_unsigned_char: return "DW_ATE_unsigned_char"; default: return "DW_ATE_"; } } #endif /* Determine the "ultimate origin" of a decl. The decl may be an inlined instance of an inlined instance of a decl which is local to an inline function, so we have to trace all of the way back through the origin chain to find out what sort of node actually served as the original seed for the given block. */ static tree decl_ultimate_origin (tree decl) { /* output_inline_function sets DECL_ABSTRACT_ORIGIN for all the nodes in the function to point to themselves; ignore that if we're trying to output the abstract instance of this function. */ if (DECL_ABSTRACT (decl) && DECL_ABSTRACT_ORIGIN (decl) == decl) return NULL_TREE; #ifdef ENABLE_CHECKING if (DECL_FROM_INLINE (DECL_ORIGIN (decl))) /* Since the DECL_ABSTRACT_ORIGIN for a DECL is supposed to be the most distant ancestor, this should never happen. */ abort (); #endif return DECL_ABSTRACT_ORIGIN (decl); } /* Determine the "ultimate origin" of a block. The block may be an inlined instance of an inlined instance of a block which is local to an inline function, so we have to trace all of the way back through the origin chain to find out what sort of node actually served as the original seed for the given block. */ static tree block_ultimate_origin (tree block) { tree immediate_origin = BLOCK_ABSTRACT_ORIGIN (block); /* output_inline_function sets BLOCK_ABSTRACT_ORIGIN for all the nodes in the function to point to themselves; ignore that if we're trying to output the abstract instance of this function. */ if (BLOCK_ABSTRACT (block) && immediate_origin == block) return NULL_TREE; if (immediate_origin == NULL_TREE) return NULL_TREE; else { tree ret_val; tree lookahead = immediate_origin; do { ret_val = lookahead; lookahead = (TREE_CODE (ret_val) == BLOCK ? BLOCK_ABSTRACT_ORIGIN (ret_val) : NULL); } while (lookahead != NULL && lookahead != ret_val); return ret_val; } } /* Get the class to which DECL belongs, if any. In g++, the DECL_CONTEXT of a virtual function may refer to a base class, so we check the 'this' parameter. */ static tree decl_class_context (tree decl) { tree context = NULL_TREE; if (TREE_CODE (decl) != FUNCTION_DECL || ! DECL_VINDEX (decl)) context = DECL_CONTEXT (decl); else context = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (decl))))); if (context && !TYPE_P (context)) context = NULL_TREE; return context; } /* Add an attribute/value pair to a DIE. We build the lists up in reverse addition order, and correct that in reverse_all_dies. */ static inline void add_dwarf_attr (dw_die_ref die, dw_attr_ref attr) { if (die != NULL && attr != NULL) { attr->dw_attr_next = die->die_attr; die->die_attr = attr; } } static inline enum dw_val_class AT_class (dw_attr_ref a) { return a->dw_attr_val.val_class; } /* Add a flag value attribute to a DIE. */ static inline void add_AT_flag (dw_die_ref die, enum dwarf_attribute attr_kind, unsigned int flag) { dw_attr_ref attr = ggc_alloc (sizeof (dw_attr_node)); attr->dw_attr_next = NULL; attr->dw_attr = attr_kind; attr->dw_attr_val.val_class = dw_val_class_flag; attr->dw_attr_val.v.val_flag = flag; add_dwarf_attr (die, attr); } static inline unsigned AT_flag (dw_attr_ref a) { if (a && AT_class (a) == dw_val_class_flag) return a->dw_attr_val.v.val_flag; abort (); } /* Add a signed integer attribute value to a DIE. */ static inline void add_AT_int (dw_die_ref die, enum dwarf_attribute attr_kind, HOST_WIDE_INT int_val) { dw_attr_ref attr = ggc_alloc (sizeof (dw_attr_node)); attr->dw_attr_next = NULL; attr->dw_attr = attr_kind; attr->dw_attr_val.val_class = dw_val_class_const; attr->dw_attr_val.v.val_int = int_val; add_dwarf_attr (die, attr); } static inline HOST_WIDE_INT AT_int (dw_attr_ref a) { if (a && AT_class (a) == dw_val_class_const) return a->dw_attr_val.v.val_int; abort (); } /* Add an unsigned integer attribute value to a DIE. */ static inline void add_AT_unsigned (dw_die_ref die, enum dwarf_attribute attr_kind, unsigned HOST_WIDE_INT unsigned_val) { dw_attr_ref attr = ggc_alloc (sizeof (dw_attr_node)); attr->dw_attr_next = NULL; attr->dw_attr = attr_kind; attr->dw_attr_val.val_class = dw_val_class_unsigned_const; attr->dw_attr_val.v.val_unsigned = unsigned_val; add_dwarf_attr (die, attr); } static inline unsigned HOST_WIDE_INT AT_unsigned (dw_attr_ref a) { if (a && AT_class (a) == dw_val_class_unsigned_const) return a->dw_attr_val.v.val_unsigned; abort (); } /* Add an unsigned double integer attribute value to a DIE. */ static inline void add_AT_long_long (dw_die_ref die, enum dwarf_attribute attr_kind, long unsigned int val_hi, long unsigned int val_low) { dw_attr_ref attr = ggc_alloc (sizeof (dw_attr_node)); attr->dw_attr_next = NULL; attr->dw_attr = attr_kind; attr->dw_attr_val.val_class = dw_val_class_long_long; attr->dw_attr_val.v.val_long_long.hi = val_hi; attr->dw_attr_val.v.val_long_long.low = val_low; add_dwarf_attr (die, attr); } /* Add a floating point attribute value to a DIE and return it. */ static inline void add_AT_vec (dw_die_ref die, enum dwarf_attribute attr_kind, unsigned int length, unsigned int elt_size, unsigned char *array) { dw_attr_ref attr = ggc_alloc (sizeof (dw_attr_node)); attr->dw_attr_next = NULL; attr->dw_attr = attr_kind; attr->dw_attr_val.val_class = dw_val_class_vec; attr->dw_attr_val.v.val_vec.length = length; attr->dw_attr_val.v.val_vec.elt_size = elt_size; attr->dw_attr_val.v.val_vec.array = array; add_dwarf_attr (die, attr); } /* Hash and equality functions for debug_str_hash. */ static hashval_t debug_str_do_hash (const void *x) { return htab_hash_string (((const struct indirect_string_node *)x)->str); } static int debug_str_eq (const void *x1, const void *x2) { return strcmp ((((const struct indirect_string_node *)x1)->str), (const char *)x2) == 0; } /* Add a string attribute value to a DIE. */ static inline void add_AT_string (dw_die_ref die, enum dwarf_attribute attr_kind, const char *str) { dw_attr_ref attr = ggc_alloc (sizeof (dw_attr_node)); struct indirect_string_node *node; void **slot; if (! debug_str_hash) debug_str_hash = htab_create_ggc (10, debug_str_do_hash, debug_str_eq, NULL); slot = htab_find_slot_with_hash (debug_str_hash, str, htab_hash_string (str), INSERT); if (*slot == NULL) *slot = ggc_alloc_cleared (sizeof (struct indirect_string_node)); node = (struct indirect_string_node *) *slot; node->str = ggc_strdup (str); node->refcount++; attr->dw_attr_next = NULL; attr->dw_attr = attr_kind; attr->dw_attr_val.val_class = dw_val_class_str; attr->dw_attr_val.v.val_str = node; add_dwarf_attr (die, attr); } static inline const char * AT_string (dw_attr_ref a) { if (a && AT_class (a) == dw_val_class_str) return a->dw_attr_val.v.val_str->str; abort (); } /* Find out whether a string should be output inline in DIE or out-of-line in .debug_str section. */ static int AT_string_form (dw_attr_ref a) { if (a && AT_class (a) == dw_val_class_str) { struct indirect_string_node *node; unsigned int len; char label[32]; node = a->dw_attr_val.v.val_str; if (node->form) return node->form; len = strlen (node->str) + 1; /* If the string is shorter or equal to the size of the reference, it is always better to put it inline. */ if (len <= DWARF_OFFSET_SIZE || node->refcount == 0) return node->form = DW_FORM_string; /* If we cannot expect the linker to merge strings in .debug_str section, only put it into .debug_str if it is worth even in this single module. */ if ((DEBUG_STR_SECTION_FLAGS & SECTION_MERGE) == 0 && (len - DWARF_OFFSET_SIZE) * node->refcount <= len) return node->form = DW_FORM_string; ASM_GENERATE_INTERNAL_LABEL (label, "LASF", dw2_string_counter); ++dw2_string_counter; node->label = xstrdup (label); return node->form = DW_FORM_strp; } abort (); } /* Add a DIE reference attribute value to a DIE. */ static inline void add_AT_die_ref (dw_die_ref die, enum dwarf_attribute attr_kind, dw_die_ref targ_die) { dw_attr_ref attr = ggc_alloc (sizeof (dw_attr_node)); attr->dw_attr_next = NULL; attr->dw_attr = attr_kind; attr->dw_attr_val.val_class = dw_val_class_die_ref; attr->dw_attr_val.v.val_die_ref.die = targ_die; attr->dw_attr_val.v.val_die_ref.external = 0; add_dwarf_attr (die, attr); } /* Add an AT_specification attribute to a DIE, and also make the back pointer from the specification to the definition. */ static inline void add_AT_specification (dw_die_ref die, dw_die_ref targ_die) { add_AT_die_ref (die, DW_AT_specification, targ_die); if (targ_die->die_definition) abort (); targ_die->die_definition = die; } static inline dw_die_ref AT_ref (dw_attr_ref a) { if (a && AT_class (a) == dw_val_class_die_ref) return a->dw_attr_val.v.val_die_ref.die; abort (); } static inline int AT_ref_external (dw_attr_ref a) { if (a && AT_class (a) == dw_val_class_die_ref) return a->dw_attr_val.v.val_die_ref.external; return 0; } static inline void set_AT_ref_external (dw_attr_ref a, int i) { if (a && AT_class (a) == dw_val_class_die_ref) a->dw_attr_val.v.val_die_ref.external = i; else abort (); } /* Add an FDE reference attribute value to a DIE. */ static inline void add_AT_fde_ref (dw_die_ref die, enum dwarf_attribute attr_kind, unsigned int targ_fde) { dw_attr_ref attr = ggc_alloc (sizeof (dw_attr_node)); attr->dw_attr_next = NULL; attr->dw_attr = attr_kind; attr->dw_attr_val.val_class = dw_val_class_fde_ref; attr->dw_attr_val.v.val_fde_index = targ_fde; add_dwarf_attr (die, attr); } /* Add a location description attribute value to a DIE. */ static inline void add_AT_loc (dw_die_ref die, enum dwarf_attribute attr_kind, dw_loc_descr_ref loc) { dw_attr_ref attr = ggc_alloc (sizeof (dw_attr_node)); attr->dw_attr_next = NULL; attr->dw_attr = attr_kind; attr->dw_attr_val.val_class = dw_val_class_loc; attr->dw_attr_val.v.val_loc = loc; add_dwarf_attr (die, attr); } static inline dw_loc_descr_ref AT_loc (dw_attr_ref a) { if (a && AT_class (a) == dw_val_class_loc) return a->dw_attr_val.v.val_loc; abort (); } static inline void add_AT_loc_list (dw_die_ref die, enum dwarf_attribute attr_kind, dw_loc_list_ref loc_list) { dw_attr_ref attr = ggc_alloc (sizeof (dw_attr_node)); attr->dw_attr_next = NULL; attr->dw_attr = attr_kind; attr->dw_attr_val.val_class = dw_val_class_loc_list; attr->dw_attr_val.v.val_loc_list = loc_list; add_dwarf_attr (die, attr); have_location_lists = 1; } static inline dw_loc_list_ref AT_loc_list (dw_attr_ref a) { if (a && AT_class (a) == dw_val_class_loc_list) return a->dw_attr_val.v.val_loc_list; abort (); } /* Add an address constant attribute value to a DIE. */ static inline void add_AT_addr (dw_die_ref die, enum dwarf_attribute attr_kind, rtx addr) { dw_attr_ref attr = ggc_alloc (sizeof (dw_attr_node)); attr->dw_attr_next = NULL; attr->dw_attr = attr_kind; attr->dw_attr_val.val_class = dw_val_class_addr; attr->dw_attr_val.v.val_addr = addr; add_dwarf_attr (die, attr); } static inline rtx AT_addr (dw_attr_ref a) { if (a && AT_class (a) == dw_val_class_addr) return a->dw_attr_val.v.val_addr; abort (); } /* Add a label identifier attribute value to a DIE. */ static inline void add_AT_lbl_id (dw_die_ref die, enum dwarf_attribute attr_kind, const char *lbl_id) { dw_attr_ref attr = ggc_alloc (sizeof (dw_attr_node)); attr->dw_attr_next = NULL; attr->dw_attr = attr_kind; attr->dw_attr_val.val_class = dw_val_class_lbl_id; attr->dw_attr_val.v.val_lbl_id = xstrdup (lbl_id); add_dwarf_attr (die, attr); } /* Add a section offset attribute value to a DIE. */ static inline void add_AT_lbl_offset (dw_die_ref die, enum dwarf_attribute attr_kind, const char *label) { dw_attr_ref attr = ggc_alloc (sizeof (dw_attr_node)); attr->dw_attr_next = NULL; attr->dw_attr = attr_kind; attr->dw_attr_val.val_class = dw_val_class_lbl_offset; attr->dw_attr_val.v.val_lbl_id = xstrdup (label); add_dwarf_attr (die, attr); } /* Add an offset attribute value to a DIE. */ static inline void add_AT_offset (dw_die_ref die, enum dwarf_attribute attr_kind, unsigned HOST_WIDE_INT offset) { dw_attr_ref attr = ggc_alloc (sizeof (dw_attr_node)); attr->dw_attr_next = NULL; attr->dw_attr = attr_kind; attr->dw_attr_val.val_class = dw_val_class_offset; attr->dw_attr_val.v.val_offset = offset; add_dwarf_attr (die, attr); } /* Add an range_list attribute value to a DIE. */ static void add_AT_range_list (dw_die_ref die, enum dwarf_attribute attr_kind, long unsigned int offset) { dw_attr_ref attr = ggc_alloc (sizeof (dw_attr_node)); attr->dw_attr_next = NULL; attr->dw_attr = attr_kind; attr->dw_attr_val.val_class = dw_val_class_range_list; attr->dw_attr_val.v.val_offset = offset; add_dwarf_attr (die, attr); } static inline const char * AT_lbl (dw_attr_ref a) { if (a && (AT_class (a) == dw_val_class_lbl_id || AT_class (a) == dw_val_class_lbl_offset)) return a->dw_attr_val.v.val_lbl_id; abort (); } /* Get the attribute of type attr_kind. */ static dw_attr_ref get_AT (dw_die_ref die, enum dwarf_attribute attr_kind) { dw_attr_ref a; dw_die_ref spec = NULL; if (die != NULL) { for (a = die->die_attr; a != NULL; a = a->dw_attr_next) if (a->dw_attr == attr_kind) return a; else if (a->dw_attr == DW_AT_specification || a->dw_attr == DW_AT_abstract_origin) spec = AT_ref (a); if (spec) return get_AT (spec, attr_kind); } return NULL; } /* Return the "low pc" attribute value, typically associated with a subprogram DIE. Return null if the "low pc" attribute is either not present, or if it cannot be represented as an assembler label identifier. */ static inline const char * get_AT_low_pc (dw_die_ref die) { dw_attr_ref a = get_AT (die, DW_AT_low_pc); return a ? AT_lbl (a) : NULL; } /* Return the "high pc" attribute value, typically associated with a subprogram DIE. Return null if the "high pc" attribute is either not present, or if it cannot be represented as an assembler label identifier. */ static inline const char * get_AT_hi_pc (dw_die_ref die) { dw_attr_ref a = get_AT (die, DW_AT_high_pc); return a ? AT_lbl (a) : NULL; } /* Return the value of the string attribute designated by ATTR_KIND, or NULL if it is not present. */ static inline const char * get_AT_string (dw_die_ref die, enum dwarf_attribute attr_kind) { dw_attr_ref a = get_AT (die, attr_kind); return a ? AT_string (a) : NULL; } /* Return the value of the flag attribute designated by ATTR_KIND, or -1 if it is not present. */ static inline int get_AT_flag (dw_die_ref die, enum dwarf_attribute attr_kind) { dw_attr_ref a = get_AT (die, attr_kind); return a ? AT_flag (a) : 0; } /* Return the value of the unsigned attribute designated by ATTR_KIND, or 0 if it is not present. */ static inline unsigned get_AT_unsigned (dw_die_ref die, enum dwarf_attribute attr_kind) { dw_attr_ref a = get_AT (die, attr_kind); return a ? AT_unsigned (a) : 0; } static inline dw_die_ref get_AT_ref (dw_die_ref die, enum dwarf_attribute attr_kind) { dw_attr_ref a = get_AT (die, attr_kind); return a ? AT_ref (a) : NULL; } /* Return TRUE if the language is C or C++. */ static inline bool is_c_family (void) { unsigned int lang = get_AT_unsigned (comp_unit_die, DW_AT_language); return (lang == DW_LANG_C || lang == DW_LANG_C89 || lang == DW_LANG_C_plus_plus); } /* Return TRUE if the language is C++. */ static inline bool is_cxx (void) { return (get_AT_unsigned (comp_unit_die, DW_AT_language) == DW_LANG_C_plus_plus); } /* Return TRUE if the language is Fortran. */ static inline bool is_fortran (void) { unsigned int lang = get_AT_unsigned (comp_unit_die, DW_AT_language); return (lang == DW_LANG_Fortran77 || lang == DW_LANG_Fortran90 || lang == DW_LANG_Fortran95); } /* Return TRUE if the language is Java. */ static inline bool is_java (void) { unsigned int lang = get_AT_unsigned (comp_unit_die, DW_AT_language); return lang == DW_LANG_Java; } /* Return TRUE if the language is Ada. */ static inline bool is_ada (void) { unsigned int lang = get_AT_unsigned (comp_unit_die, DW_AT_language); return lang == DW_LANG_Ada95 || lang == DW_LANG_Ada83; } /* Free up the memory used by A. */ static inline void free_AT (dw_attr_ref); static inline void free_AT (dw_attr_ref a) { if (AT_class (a) == dw_val_class_str) if (a->dw_attr_val.v.val_str->refcount) a->dw_attr_val.v.val_str->refcount--; } /* Remove the specified attribute if present. */ static void remove_AT (dw_die_ref die, enum dwarf_attribute attr_kind) { dw_attr_ref *p; dw_attr_ref removed = NULL; if (die != NULL) { for (p = &(die->die_attr); *p; p = &((*p)->dw_attr_next)) if ((*p)->dw_attr == attr_kind) { removed = *p; *p = (*p)->dw_attr_next; break; } if (removed != 0) free_AT (removed); } } /* Remove child die whose die_tag is specified tag. */ static void remove_child_TAG (dw_die_ref die, enum dwarf_tag tag) { dw_die_ref current, prev, next; current = die->die_child; prev = NULL; while (current != NULL) { if (current->die_tag == tag) { next = current->die_sib; if (prev == NULL) die->die_child = next; else prev->die_sib = next; free_die (current); current = next; } else { prev = current; current = current->die_sib; } } } /* Free up the memory used by DIE. */ static inline void free_die (dw_die_ref die) { remove_children (die); } /* Discard the children of this DIE. */ static void remove_children (dw_die_ref die) { dw_die_ref child_die = die->die_child; die->die_child = NULL; while (child_die != NULL) { dw_die_ref tmp_die = child_die; dw_attr_ref a; child_die = child_die->die_sib; for (a = tmp_die->die_attr; a != NULL;) { dw_attr_ref tmp_a = a; a = a->dw_attr_next; free_AT (tmp_a); } free_die (tmp_die); } } /* Add a child DIE below its parent. We build the lists up in reverse addition order, and correct that in reverse_all_dies. */ static inline void add_child_die (dw_die_ref die, dw_die_ref child_die) { if (die != NULL && child_die != NULL) { if (die == child_die) abort (); child_die->die_parent = die; child_die->die_sib = die->die_child; die->die_child = child_die; } } /* Move CHILD, which must be a child of PARENT or the DIE for which PARENT is the specification, to the front of PARENT's list of children. */ static void splice_child_die (dw_die_ref parent, dw_die_ref child) { dw_die_ref *p; /* We want the declaration DIE from inside the class, not the specification DIE at toplevel. */ if (child->die_parent != parent) { dw_die_ref tmp = get_AT_ref (child, DW_AT_specification); if (tmp) child = tmp; } if (child->die_parent != parent && child->die_parent != get_AT_ref (parent, DW_AT_specification)) abort (); for (p = &(child->die_parent->die_child); *p; p = &((*p)->die_sib)) if (*p == child) { *p = child->die_sib; break; } child->die_parent = parent; child->die_sib = parent->die_child; parent->die_child = child; } /* Return a pointer to a newly created DIE node. */ static inline dw_die_ref new_die (enum dwarf_tag tag_value, dw_die_ref parent_die, tree t) { dw_die_ref die = ggc_alloc_cleared (sizeof (die_node)); die->die_tag = tag_value; if (parent_die != NULL) add_child_die (parent_die, die); else { limbo_die_node *limbo_node; limbo_node = ggc_alloc_cleared (sizeof (limbo_die_node)); limbo_node->die = die; limbo_node->created_for = t; limbo_node->next = limbo_die_list; limbo_die_list = limbo_node; } return die; } /* Return the DIE associated with the given type specifier. */ static inline dw_die_ref lookup_type_die (tree type) { return TYPE_SYMTAB_DIE (type); } /* Equate a DIE to a given type specifier. */ static inline void equate_type_number_to_die (tree type, dw_die_ref type_die) { TYPE_SYMTAB_DIE (type) = type_die; } /* Returns a hash value for X (which really is a die_struct). */ static hashval_t decl_die_table_hash (const void *x) { return (hashval_t) ((const dw_die_ref) x)->decl_id; } /* Return nonzero if decl_id of die_struct X is the same as UID of decl *Y. */ static int decl_die_table_eq (const void *x, const void *y) { return (((const dw_die_ref) x)->decl_id == DECL_UID ((const tree) y)); } /* Return the DIE associated with a given declaration. */ static inline dw_die_ref lookup_decl_die (tree decl) { return htab_find_with_hash (decl_die_table, decl, DECL_UID (decl)); } /* Returns a hash value for X (which really is a var_loc_list). */ static hashval_t decl_loc_table_hash (const void *x) { return (hashval_t) ((const var_loc_list *) x)->decl_id; } /* Return nonzero if decl_id of var_loc_list X is the same as UID of decl *Y. */ static int decl_loc_table_eq (const void *x, const void *y) { return (((const var_loc_list *) x)->decl_id == DECL_UID ((const tree) y)); } /* Return the var_loc list associated with a given declaration. */ static inline var_loc_list * lookup_decl_loc (tree decl) { return htab_find_with_hash (decl_loc_table, decl, DECL_UID (decl)); } /* Equate a DIE to a particular declaration. */ static void equate_decl_number_to_die (tree decl, dw_die_ref decl_die) { unsigned int decl_id = DECL_UID (decl); void **slot; slot = htab_find_slot_with_hash (decl_die_table, decl, decl_id, INSERT); *slot = decl_die; decl_die->decl_id = decl_id; } /* Add a variable location node to the linked list for DECL. */ static void add_var_loc_to_decl (tree decl, struct var_loc_node *loc) { unsigned int decl_id = DECL_UID (decl); var_loc_list *temp; void **slot; slot = htab_find_slot_with_hash (decl_loc_table, decl, decl_id, INSERT); if (*slot == NULL) { temp = ggc_alloc_cleared (sizeof (var_loc_list)); temp->decl_id = decl_id; *slot = temp; } else temp = *slot; if (temp->last) { /* If the current location is the same as the end of the list, we have nothing to do. */ if (!rtx_equal_p (NOTE_VAR_LOCATION_LOC (temp->last->var_loc_note), NOTE_VAR_LOCATION_LOC (loc->var_loc_note))) { /* Add LOC to the end of list and update LAST. */ temp->last->next = loc; temp->last = loc; } } /* Do not add empty location to the beginning of the list. */ else if (NOTE_VAR_LOCATION_LOC (loc->var_loc_note) != NULL_RTX) { temp->first = loc; temp->last = loc; } } /* Keep track of the number of spaces used to indent the output of the debugging routines that print the structure of the DIE internal representation. */ static int print_indent; /* Indent the line the number of spaces given by print_indent. */ static inline void print_spaces (FILE *outfile) { fprintf (outfile, "%*s", print_indent, ""); } /* Print the information associated with a given DIE, and its children. This routine is a debugging aid only. */ static void print_die (dw_die_ref die, FILE *outfile) { dw_attr_ref a; dw_die_ref c; print_spaces (outfile); fprintf (outfile, "DIE %4lu: %s\n", die->die_offset, dwarf_tag_name (die->die_tag)); print_spaces (outfile); fprintf (outfile, " abbrev id: %lu", die->die_abbrev); fprintf (outfile, " offset: %lu\n", die->die_offset); for (a = die->die_attr; a != NULL; a = a->dw_attr_next) { print_spaces (outfile); fprintf (outfile, " %s: ", dwarf_attr_name (a->dw_attr)); switch (AT_class (a)) { case dw_val_class_addr: fprintf (outfile, "address"); break; case dw_val_class_offset: fprintf (outfile, "offset"); break; case dw_val_class_loc: fprintf (outfile, "location descriptor"); break; case dw_val_class_loc_list: fprintf (outfile, "location list -> label:%s", AT_loc_list (a)->ll_symbol); break; case dw_val_class_range_list: fprintf (outfile, "range list"); break; case dw_val_class_const: fprintf (outfile, HOST_WIDE_INT_PRINT_DEC, AT_int (a)); break; case dw_val_class_unsigned_const: fprintf (outfile, HOST_WIDE_INT_PRINT_UNSIGNED, AT_unsigned (a)); break; case dw_val_class_long_long: fprintf (outfile, "constant (%lu,%lu)", a->dw_attr_val.v.val_long_long.hi, a->dw_attr_val.v.val_long_long.low); break; case dw_val_class_vec: fprintf (outfile, "floating-point or vector constant"); break; case dw_val_class_flag: fprintf (outfile, "%u", AT_flag (a)); break; case dw_val_class_die_ref: if (AT_ref (a) != NULL) { if (AT_ref (a)->die_symbol) fprintf (outfile, "die -> label: %s", AT_ref (a)->die_symbol); else fprintf (outfile, "die -> %lu", AT_ref (a)->die_offset); } else fprintf (outfile, "die -> "); break; case dw_val_class_lbl_id: case dw_val_class_lbl_offset: fprintf (outfile, "label: %s", AT_lbl (a)); break; case dw_val_class_str: if (AT_string (a) != NULL) fprintf (outfile, "\"%s\"", AT_string (a)); else fprintf (outfile, ""); break; default: break; } fprintf (outfile, "\n"); } if (die->die_child != NULL) { print_indent += 4; for (c = die->die_child; c != NULL; c = c->die_sib) print_die (c, outfile); print_indent -= 4; } if (print_indent == 0) fprintf (outfile, "\n"); } /* Print the contents of the source code line number correspondence table. This routine is a debugging aid only. */ static void print_dwarf_line_table (FILE *outfile) { unsigned i; dw_line_info_ref line_info; fprintf (outfile, "\n\nDWARF source line information\n"); for (i = 1; i < line_info_table_in_use; i++) { line_info = &line_info_table[i]; fprintf (outfile, "%5d: ", i); fprintf (outfile, "%-20s", VARRAY_CHAR_PTR (file_table, line_info->dw_file_num)); fprintf (outfile, "%6ld", line_info->dw_line_num); fprintf (outfile, "\n"); } fprintf (outfile, "\n\n"); } /* Print the information collected for a given DIE. */ void debug_dwarf_die (dw_die_ref die) { print_die (die, stderr); } /* Print all DWARF information collected for the compilation unit. This routine is a debugging aid only. */ void debug_dwarf (void) { print_indent = 0; print_die (comp_unit_die, stderr); if (! DWARF2_ASM_LINE_DEBUG_INFO) print_dwarf_line_table (stderr); } /* We build up the lists of children and attributes by pushing new ones onto the beginning of the list. Reverse the lists for DIE so that they are in order of addition. */ static void reverse_die_lists (dw_die_ref die) { dw_die_ref c, cp, cn; dw_attr_ref a, ap, an; for (a = die->die_attr, ap = 0; a; a = an) { an = a->dw_attr_next; a->dw_attr_next = ap; ap = a; } die->die_attr = ap; for (c = die->die_child, cp = 0; c; c = cn) { cn = c->die_sib; c->die_sib = cp; cp = c; } die->die_child = cp; } /* reverse_die_lists only reverses the single die you pass it. Since we used to reverse all dies in add_sibling_attributes, which runs through all the dies, it would reverse all the dies. Now, however, since we don't call reverse_die_lists in add_sibling_attributes, we need a routine to recursively reverse all the dies. This is that routine. */ static void reverse_all_dies (dw_die_ref die) { dw_die_ref c; reverse_die_lists (die); for (c = die->die_child; c; c = c->die_sib) reverse_all_dies (c); } /* Start a new compilation unit DIE for an include file. OLD_UNIT is the CU for the enclosing include file, if any. BINCL_DIE is the DW_TAG_GNU_BINCL DIE that marks the start of the DIEs for this include file. */ static dw_die_ref push_new_compile_unit (dw_die_ref old_unit, dw_die_ref bincl_die) { const char *filename = get_AT_string (bincl_die, DW_AT_name); dw_die_ref new_unit = gen_compile_unit_die (filename); new_unit->die_sib = old_unit; return new_unit; } /* Close an include-file CU and reopen the enclosing one. */ static dw_die_ref pop_compile_unit (dw_die_ref old_unit) { dw_die_ref new_unit = old_unit->die_sib; old_unit->die_sib = NULL; return new_unit; } #define CHECKSUM(FOO) md5_process_bytes (&(FOO), sizeof (FOO), ctx) #define CHECKSUM_STRING(FOO) md5_process_bytes ((FOO), strlen (FOO), ctx) /* Calculate the checksum of a location expression. */ static inline void loc_checksum (dw_loc_descr_ref loc, struct md5_ctx *ctx) { CHECKSUM (loc->dw_loc_opc); CHECKSUM (loc->dw_loc_oprnd1); CHECKSUM (loc->dw_loc_oprnd2); } /* Calculate the checksum of an attribute. */ static void attr_checksum (dw_attr_ref at, struct md5_ctx *ctx, int *mark) { dw_loc_descr_ref loc; rtx r; CHECKSUM (at->dw_attr); /* We don't care about differences in file numbering. */ if (at->dw_attr == DW_AT_decl_file /* Or that this was compiled with a different compiler snapshot; if the output is the same, that's what matters. */ || at->dw_attr == DW_AT_producer) return; switch (AT_class (at)) { case dw_val_class_const: CHECKSUM (at->dw_attr_val.v.val_int); break; case dw_val_class_unsigned_const: CHECKSUM (at->dw_attr_val.v.val_unsigned); break; case dw_val_class_long_long: CHECKSUM (at->dw_attr_val.v.val_long_long); break; case dw_val_class_vec: CHECKSUM (at->dw_attr_val.v.val_vec); break; case dw_val_class_flag: CHECKSUM (at->dw_attr_val.v.val_flag); break; case dw_val_class_str: CHECKSUM_STRING (AT_string (at)); break; case dw_val_class_addr: r = AT_addr (at); switch (GET_CODE (r)) { case SYMBOL_REF: CHECKSUM_STRING (XSTR (r, 0)); break; default: abort (); } break; case dw_val_class_offset: CHECKSUM (at->dw_attr_val.v.val_offset); break; case dw_val_class_loc: for (loc = AT_loc (at); loc; loc = loc->dw_loc_next) loc_checksum (loc, ctx); break; case dw_val_class_die_ref: die_checksum (AT_ref (at), ctx, mark); break; case dw_val_class_fde_ref: case dw_val_class_lbl_id: case dw_val_class_lbl_offset: break; default: break; } } /* Calculate the checksum of a DIE. */ static void die_checksum (dw_die_ref die, struct md5_ctx *ctx, int *mark) { dw_die_ref c; dw_attr_ref a; /* To avoid infinite recursion. */ if (die->die_mark) { CHECKSUM (die->die_mark); return; } die->die_mark = ++(*mark); CHECKSUM (die->die_tag); for (a = die->die_attr; a; a = a->dw_attr_next) attr_checksum (a, ctx, mark); for (c = die->die_child; c; c = c->die_sib) die_checksum (c, ctx, mark); } #undef CHECKSUM #undef CHECKSUM_STRING /* Do the location expressions look same? */ static inline int same_loc_p (dw_loc_descr_ref loc1, dw_loc_descr_ref loc2, int *mark) { return loc1->dw_loc_opc == loc2->dw_loc_opc && same_dw_val_p (&loc1->dw_loc_oprnd1, &loc2->dw_loc_oprnd1, mark) && same_dw_val_p (&loc1->dw_loc_oprnd2, &loc2->dw_loc_oprnd2, mark); } /* Do the values look the same? */ static int same_dw_val_p (dw_val_node *v1, dw_val_node *v2, int *mark) { dw_loc_descr_ref loc1, loc2; rtx r1, r2; if (v1->val_class != v2->val_class) return 0; switch (v1->val_class) { case dw_val_class_const: return v1->v.val_int == v2->v.val_int; case dw_val_class_unsigned_const: return v1->v.val_unsigned == v2->v.val_unsigned; case dw_val_class_long_long: return v1->v.val_long_long.hi == v2->v.val_long_long.hi && v1->v.val_long_long.low == v2->v.val_long_long.low; case dw_val_class_vec: if (v1->v.val_vec.length != v2->v.val_vec.length || v1->v.val_vec.elt_size != v2->v.val_vec.elt_size) return 0; if (memcmp (v1->v.val_vec.array, v2->v.val_vec.array, v1->v.val_vec.length * v1->v.val_vec.elt_size)) return 0; return 1; case dw_val_class_flag: return v1->v.val_flag == v2->v.val_flag; case dw_val_class_str: return !strcmp(v1->v.val_str->str, v2->v.val_str->str); case dw_val_class_addr: r1 = v1->v.val_addr; r2 = v2->v.val_addr; if (GET_CODE (r1) != GET_CODE (r2)) return 0; switch (GET_CODE (r1)) { case SYMBOL_REF: return !strcmp (XSTR (r1, 0), XSTR (r2, 0)); default: abort (); } case dw_val_class_offset: return v1->v.val_offset == v2->v.val_offset; case dw_val_class_loc: for (loc1 = v1->v.val_loc, loc2 = v2->v.val_loc; loc1 && loc2; loc1 = loc1->dw_loc_next, loc2 = loc2->dw_loc_next) if (!same_loc_p (loc1, loc2, mark)) return 0; return !loc1 && !loc2; case dw_val_class_die_ref: return same_die_p (v1->v.val_die_ref.die, v2->v.val_die_ref.die, mark); case dw_val_class_fde_ref: case dw_val_class_lbl_id: case dw_val_class_lbl_offset: return 1; default: return 1; } } /* Do the attributes look the same? */ static int same_attr_p (dw_attr_ref at1, dw_attr_ref at2, int *mark) { if (at1->dw_attr != at2->dw_attr) return 0; /* We don't care about differences in file numbering. */ if (at1->dw_attr == DW_AT_decl_file /* Or that this was compiled with a different compiler snapshot; if the output is the same, that's what matters. */ || at1->dw_attr == DW_AT_producer) return 1; return same_dw_val_p (&at1->dw_attr_val, &at2->dw_attr_val, mark); } /* Do the dies look the same? */ static int same_die_p (dw_die_ref die1, dw_die_ref die2, int *mark) { dw_die_ref c1, c2; dw_attr_ref a1, a2; /* To avoid infinite recursion. */ if (die1->die_mark) return die1->die_mark == die2->die_mark; die1->die_mark = die2->die_mark = ++(*mark); if (die1->die_tag != die2->die_tag) return 0; for (a1 = die1->die_attr, a2 = die2->die_attr; a1 && a2; a1 = a1->dw_attr_next, a2 = a2->dw_attr_next) if (!same_attr_p (a1, a2, mark)) return 0; if (a1 || a2) return 0; for (c1 = die1->die_child, c2 = die2->die_child; c1 && c2; c1 = c1->die_sib, c2 = c2->die_sib) if (!same_die_p (c1, c2, mark)) return 0; if (c1 || c2) return 0; return 1; } /* Do the dies look the same? Wrapper around same_die_p. */ static int same_die_p_wrap (dw_die_ref die1, dw_die_ref die2) { int mark = 0; int ret = same_die_p (die1, die2, &mark); unmark_all_dies (die1); unmark_all_dies (die2); return ret; } /* The prefix to attach to symbols on DIEs in the current comdat debug info section. */ static char *comdat_symbol_id; /* The index of the current symbol within the current comdat CU. */ static unsigned int comdat_symbol_number; /* Calculate the MD5 checksum of the compilation unit DIE UNIT_DIE and its children, and set comdat_symbol_id accordingly. */ static void compute_section_prefix (dw_die_ref unit_die) { const char *die_name = get_AT_string (unit_die, DW_AT_name); const char *base = die_name ? lbasename (die_name) : "anonymous"; char *name = alloca (strlen (base) + 64); char *p; int i, mark; unsigned char checksum[16]; struct md5_ctx ctx; /* Compute the checksum of the DIE, then append part of it as hex digits to the name filename of the unit. */ md5_init_ctx (&ctx); mark = 0; die_checksum (unit_die, &ctx, &mark); unmark_all_dies (unit_die); md5_finish_ctx (&ctx, checksum); sprintf (name, "%s.", base); clean_symbol_name (name); p = name + strlen (name); for (i = 0; i < 4; i++) { sprintf (p, "%.2x", checksum[i]); p += 2; } comdat_symbol_id = unit_die->die_symbol = xstrdup (name); comdat_symbol_number = 0; } /* Returns nonzero if DIE represents a type, in the sense of TYPE_P. */ static int is_type_die (dw_die_ref die) { switch (die->die_tag) { case DW_TAG_array_type: case DW_TAG_class_type: case DW_TAG_enumeration_type: case DW_TAG_pointer_type: case DW_TAG_reference_type: case DW_TAG_string_type: case DW_TAG_structure_type: case DW_TAG_subroutine_type: case DW_TAG_union_type: case DW_TAG_ptr_to_member_type: case DW_TAG_set_type: case DW_TAG_subrange_type: case DW_TAG_base_type: case DW_TAG_const_type: case DW_TAG_file_type: case DW_TAG_packed_type: case DW_TAG_volatile_type: case DW_TAG_typedef: return 1; default: return 0; } } /* Returns 1 iff C is the sort of DIE that should go into a COMDAT CU. Basically, we want to choose the bits that are likely to be shared between compilations (types) and leave out the bits that are specific to individual compilations (functions). */ static int is_comdat_die (dw_die_ref c) { /* I think we want to leave base types and __vtbl_ptr_type in the main CU, as we do for stabs. The advantage is a greater likelihood of sharing between objects that don't include headers in the same order (and therefore would put the base types in a different comdat). jason 8/28/00 */ if (c->die_tag == DW_TAG_base_type) return 0; if (c->die_tag == DW_TAG_pointer_type || c->die_tag == DW_TAG_reference_type || c->die_tag == DW_TAG_const_type || c->die_tag == DW_TAG_volatile_type) { dw_die_ref t = get_AT_ref (c, DW_AT_type); return t ? is_comdat_die (t) : 0; } return is_type_die (c); } /* Returns 1 iff C is the sort of DIE that might be referred to from another compilation unit. */ static int is_symbol_die (dw_die_ref c) { return (is_type_die (c) || (get_AT (c, DW_AT_declaration) && !get_AT (c, DW_AT_specification))); } static char * gen_internal_sym (const char *prefix) { char buf[256]; ASM_GENERATE_INTERNAL_LABEL (buf, prefix, label_num++); return xstrdup (buf); } /* Assign symbols to all worthy DIEs under DIE. */ static void assign_symbol_names (dw_die_ref die) { dw_die_ref c; if (is_symbol_die (die)) { if (comdat_symbol_id) { char *p = alloca (strlen (comdat_symbol_id) + 64); sprintf (p, "%s.%s.%x", DIE_LABEL_PREFIX, comdat_symbol_id, comdat_symbol_number++); die->die_symbol = xstrdup (p); } else die->die_symbol = gen_internal_sym ("LDIE"); } for (c = die->die_child; c != NULL; c = c->die_sib) assign_symbol_names (c); } struct cu_hash_table_entry { dw_die_ref cu; unsigned min_comdat_num, max_comdat_num; struct cu_hash_table_entry *next; }; /* Routines to manipulate hash table of CUs. */ static hashval_t htab_cu_hash (const void *of) { const struct cu_hash_table_entry *entry = of; return htab_hash_string (entry->cu->die_symbol); } static int htab_cu_eq (const void *of1, const void *of2) { const struct cu_hash_table_entry *entry1 = of1; const struct die_struct *entry2 = of2; return !strcmp (entry1->cu->die_symbol, entry2->die_symbol); } static void htab_cu_del (void *what) { struct cu_hash_table_entry *next, *entry = what; while (entry) { next = entry->next; free (entry); entry = next; } } /* Check whether we have already seen this CU and set up SYM_NUM accordingly. */ static int check_duplicate_cu (dw_die_ref cu, htab_t htable, unsigned int *sym_num) { struct cu_hash_table_entry dummy; struct cu_hash_table_entry **slot, *entry, *last = &dummy; dummy.max_comdat_num = 0; slot = (struct cu_hash_table_entry **) htab_find_slot_with_hash (htable, cu, htab_hash_string (cu->die_symbol), INSERT); entry = *slot; for (; entry; last = entry, entry = entry->next) { if (same_die_p_wrap (cu, entry->cu)) break; } if (entry) { *sym_num = entry->min_comdat_num; return 1; } entry = xcalloc (1, sizeof (struct cu_hash_table_entry)); entry->cu = cu; entry->min_comdat_num = *sym_num = last->max_comdat_num; entry->next = *slot; *slot = entry; return 0; } /* Record SYM_NUM to record of CU in HTABLE. */ static void record_comdat_symbol_number (dw_die_ref cu, htab_t htable, unsigned int sym_num) { struct cu_hash_table_entry **slot, *entry; slot = (struct cu_hash_table_entry **) htab_find_slot_with_hash (htable, cu, htab_hash_string (cu->die_symbol), NO_INSERT); entry = *slot; entry->max_comdat_num = sym_num; } /* Traverse the DIE (which is always comp_unit_die), and set up additional compilation units for each of the include files we see bracketed by BINCL/EINCL. */ static void break_out_includes (dw_die_ref die) { dw_die_ref *ptr; dw_die_ref unit = NULL; limbo_die_node *node, **pnode; htab_t cu_hash_table; for (ptr = &(die->die_child); *ptr;) { dw_die_ref c = *ptr; if (c->die_tag == DW_TAG_GNU_BINCL || c->die_tag == DW_TAG_GNU_EINCL || (unit && is_comdat_die (c))) { /* This DIE is for a secondary CU; remove it from the main one. */ *ptr = c->die_sib; if (c->die_tag == DW_TAG_GNU_BINCL) { unit = push_new_compile_unit (unit, c); free_die (c); } else if (c->die_tag == DW_TAG_GNU_EINCL) { unit = pop_compile_unit (unit); free_die (c); } else add_child_die (unit, c); } else { /* Leave this DIE in the main CU. */ ptr = &(c->die_sib); continue; } } #if 0 /* We can only use this in debugging, since the frontend doesn't check to make sure that we leave every include file we enter. */ if (unit != NULL) abort (); #endif assign_symbol_names (die); cu_hash_table = htab_create (10, htab_cu_hash, htab_cu_eq, htab_cu_del); for (node = limbo_die_list, pnode = &limbo_die_list; node; node = node->next) { int is_dupl; compute_section_prefix (node->die); is_dupl = check_duplicate_cu (node->die, cu_hash_table, &comdat_symbol_number); assign_symbol_names (node->die); if (is_dupl) *pnode = node->next; else { pnode = &node->next; record_comdat_symbol_number (node->die, cu_hash_table, comdat_symbol_number); } } htab_delete (cu_hash_table); } /* Traverse the DIE and add a sibling attribute if it may have the effect of speeding up access to siblings. To save some space, avoid generating sibling attributes for DIE's without children. */ static void add_sibling_attributes (dw_die_ref die) { dw_die_ref c; if (die->die_tag != DW_TAG_compile_unit && die->die_sib && die->die_child != NULL) /* Add the sibling link to the front of the attribute list. */ add_AT_die_ref (die, DW_AT_sibling, die->die_sib); for (c = die->die_child; c != NULL; c = c->die_sib) add_sibling_attributes (c); } /* Output all location lists for the DIE and its children. */ static void output_location_lists (dw_die_ref die) { dw_die_ref c; dw_attr_ref d_attr; for (d_attr = die->die_attr; d_attr; d_attr = d_attr->dw_attr_next) if (AT_class (d_attr) == dw_val_class_loc_list) output_loc_list (AT_loc_list (d_attr)); for (c = die->die_child; c != NULL; c = c->die_sib) output_location_lists (c); } /* The format of each DIE (and its attribute value pairs) is encoded in an abbreviation table. This routine builds the abbreviation table and assigns a unique abbreviation id for each abbreviation entry. The children of each die are visited recursively. */ static void build_abbrev_table (dw_die_ref die) { unsigned long abbrev_id; unsigned int n_alloc; dw_die_ref c; dw_attr_ref d_attr, a_attr; /* Scan the DIE references, and mark as external any that refer to DIEs from other CUs (i.e. those which are not marked). */ for (d_attr = die->die_attr; d_attr; d_attr = d_attr->dw_attr_next) if (AT_class (d_attr) == dw_val_class_die_ref && AT_ref (d_attr)->die_mark == 0) { if (AT_ref (d_attr)->die_symbol == 0) abort (); set_AT_ref_external (d_attr, 1); } for (abbrev_id = 1; abbrev_id < abbrev_die_table_in_use; ++abbrev_id) { dw_die_ref abbrev = abbrev_die_table[abbrev_id]; if (abbrev->die_tag == die->die_tag) { if ((abbrev->die_child != NULL) == (die->die_child != NULL)) { a_attr = abbrev->die_attr; d_attr = die->die_attr; while (a_attr != NULL && d_attr != NULL) { if ((a_attr->dw_attr != d_attr->dw_attr) || (value_format (a_attr) != value_format (d_attr))) break; a_attr = a_attr->dw_attr_next; d_attr = d_attr->dw_attr_next; } if (a_attr == NULL && d_attr == NULL) break; } } } if (abbrev_id >= abbrev_die_table_in_use) { if (abbrev_die_table_in_use >= abbrev_die_table_allocated) { n_alloc = abbrev_die_table_allocated + ABBREV_DIE_TABLE_INCREMENT; abbrev_die_table = ggc_realloc (abbrev_die_table, sizeof (dw_die_ref) * n_alloc); memset (&abbrev_die_table[abbrev_die_table_allocated], 0, (n_alloc - abbrev_die_table_allocated) * sizeof (dw_die_ref)); abbrev_die_table_allocated = n_alloc; } ++abbrev_die_table_in_use; abbrev_die_table[abbrev_id] = die; } die->die_abbrev = abbrev_id; for (c = die->die_child; c != NULL; c = c->die_sib) build_abbrev_table (c); } /* Return the power-of-two number of bytes necessary to represent VALUE. */ static int constant_size (long unsigned int value) { int log; if (value == 0) log = 0; else log = floor_log2 (value); log = log / 8; log = 1 << (floor_log2 (log) + 1); return log; } /* Return the size of a DIE as it is represented in the .debug_info section. */ static unsigned long size_of_die (dw_die_ref die) { unsigned long size = 0; dw_attr_ref a; size += size_of_uleb128 (die->die_abbrev); for (a = die->die_attr; a != NULL; a = a->dw_attr_next) { switch (AT_class (a)) { case dw_val_class_addr: size += DWARF2_ADDR_SIZE; break; case dw_val_class_offset: size += DWARF_OFFSET_SIZE; break; case dw_val_class_loc: { unsigned long lsize = size_of_locs (AT_loc (a)); /* Block length. */ size += constant_size (lsize); size += lsize; } break; case dw_val_class_loc_list: size += DWARF_OFFSET_SIZE; break; case dw_val_class_range_list: size += DWARF_OFFSET_SIZE; break; case dw_val_class_const: size += size_of_sleb128 (AT_int (a)); break; case dw_val_class_unsigned_const: size += constant_size (AT_unsigned (a)); break; case dw_val_class_long_long: size += 1 + 2*HOST_BITS_PER_LONG/HOST_BITS_PER_CHAR; /* block */ break; case dw_val_class_vec: size += 1 + (a->dw_attr_val.v.val_vec.length * a->dw_attr_val.v.val_vec.elt_size); /* block */ break; case dw_val_class_flag: size += 1; break; case dw_val_class_die_ref: if (AT_ref_external (a)) size += DWARF2_ADDR_SIZE; else size += DWARF_OFFSET_SIZE; break; case dw_val_class_fde_ref: size += DWARF_OFFSET_SIZE; break; case dw_val_class_lbl_id: size += DWARF2_ADDR_SIZE; break; case dw_val_class_lbl_offset: size += DWARF_OFFSET_SIZE; break; case dw_val_class_str: if (AT_string_form (a) == DW_FORM_strp) size += DWARF_OFFSET_SIZE; else size += strlen (a->dw_attr_val.v.val_str->str) + 1; break; default: abort (); } } return size; } /* Size the debugging information associated with a given DIE. Visits the DIE's children recursively. Updates the global variable next_die_offset, on each time through. Uses the current value of next_die_offset to update the die_offset field in each DIE. */ static void calc_die_sizes (dw_die_ref die) { dw_die_ref c; die->die_offset = next_die_offset; next_die_offset += size_of_die (die); for (c = die->die_child; c != NULL; c = c->die_sib) calc_die_sizes (c); if (die->die_child != NULL) /* Count the null byte used to terminate sibling lists. */ next_die_offset += 1; } /* Set the marks for a die and its children. We do this so that we know whether or not a reference needs to use FORM_ref_addr; only DIEs in the same CU will be marked. We used to clear out the offset and use that as the flag, but ran into ordering problems. */ static void mark_dies (dw_die_ref die) { dw_die_ref c; if (die->die_mark) abort (); die->die_mark = 1; for (c = die->die_child; c; c = c->die_sib) mark_dies (c); } /* Clear the marks for a die and its children. */ static void unmark_dies (dw_die_ref die) { dw_die_ref c; if (!die->die_mark) abort (); die->die_mark = 0; for (c = die->die_child; c; c = c->die_sib) unmark_dies (c); } /* Clear the marks for a die, its children and referred dies. */ static void unmark_all_dies (dw_die_ref die) { dw_die_ref c; dw_attr_ref a; if (!die->die_mark) return; die->die_mark = 0; for (c = die->die_child; c; c = c->die_sib) unmark_all_dies (c); for (a = die->die_attr; a; a = a->dw_attr_next) if (AT_class (a) == dw_val_class_die_ref) unmark_all_dies (AT_ref (a)); } /* Return the size of the .debug_pubnames table generated for the compilation unit. */ static unsigned long size_of_pubnames (void) { unsigned long size; unsigned i; size = DWARF_PUBNAMES_HEADER_SIZE; for (i = 0; i < pubname_table_in_use; i++) { pubname_ref p = &pubname_table[i]; size += DWARF_OFFSET_SIZE + strlen (p->name) + 1; } size += DWARF_OFFSET_SIZE; return size; } /* Return the size of the information in the .debug_aranges section. */ static unsigned long size_of_aranges (void) { unsigned long size; size = DWARF_ARANGES_HEADER_SIZE; /* Count the address/length pair for this compilation unit. */ size += 2 * DWARF2_ADDR_SIZE; size += 2 * DWARF2_ADDR_SIZE * arange_table_in_use; /* Count the two zero words used to terminated the address range table. */ size += 2 * DWARF2_ADDR_SIZE; return size; } /* Select the encoding of an attribute value. */ static enum dwarf_form value_format (dw_attr_ref a) { switch (a->dw_attr_val.val_class) { case dw_val_class_addr: return DW_FORM_addr; case dw_val_class_range_list: case dw_val_class_offset: if (DWARF_OFFSET_SIZE == 4) return DW_FORM_data4; if (DWARF_OFFSET_SIZE == 8) return DW_FORM_data8; abort (); case dw_val_class_loc_list: /* FIXME: Could be DW_FORM_data8, with a > 32 bit size .debug_loc section */ return DW_FORM_data4; case dw_val_class_loc: switch (constant_size (size_of_locs (AT_loc (a)))) { case 1: return DW_FORM_block1; case 2: return DW_FORM_block2; default: abort (); } case dw_val_class_const: return DW_FORM_sdata; case dw_val_class_unsigned_const: switch (constant_size (AT_unsigned (a))) { case 1: return DW_FORM_data1; case 2: return DW_FORM_data2; case 4: return DW_FORM_data4; case 8: return DW_FORM_data8; default: abort (); } case dw_val_class_long_long: return DW_FORM_block1; case dw_val_class_vec: return DW_FORM_block1; case dw_val_class_flag: return DW_FORM_flag; case dw_val_class_die_ref: if (AT_ref_external (a)) return DW_FORM_ref_addr; else return DW_FORM_ref; case dw_val_class_fde_ref: return DW_FORM_data; case dw_val_class_lbl_id: return DW_FORM_addr; case dw_val_class_lbl_offset: return DW_FORM_data; case dw_val_class_str: return AT_string_form (a); default: abort (); } } /* Output the encoding of an attribute value. */ static void output_value_format (dw_attr_ref a) { enum dwarf_form form = value_format (a); dw2_asm_output_data_uleb128 (form, "(%s)", dwarf_form_name (form)); } /* Output the .debug_abbrev section which defines the DIE abbreviation table. */ static void output_abbrev_section (void) { unsigned long abbrev_id; dw_attr_ref a_attr; for (abbrev_id = 1; abbrev_id < abbrev_die_table_in_use; ++abbrev_id) { dw_die_ref abbrev = abbrev_die_table[abbrev_id]; dw2_asm_output_data_uleb128 (abbrev_id, "(abbrev code)"); dw2_asm_output_data_uleb128 (abbrev->die_tag, "(TAG: %s)", dwarf_tag_name (abbrev->die_tag)); if (abbrev->die_child != NULL) dw2_asm_output_data (1, DW_children_yes, "DW_children_yes"); else dw2_asm_output_data (1, DW_children_no, "DW_children_no"); for (a_attr = abbrev->die_attr; a_attr != NULL; a_attr = a_attr->dw_attr_next) { dw2_asm_output_data_uleb128 (a_attr->dw_attr, "(%s)", dwarf_attr_name (a_attr->dw_attr)); output_value_format (a_attr); } dw2_asm_output_data (1, 0, NULL); dw2_asm_output_data (1, 0, NULL); } /* Terminate the table. */ dw2_asm_output_data (1, 0, NULL); } /* Output a symbol we can use to refer to this DIE from another CU. */ static inline void output_die_symbol (dw_die_ref die) { char *sym = die->die_symbol; if (sym == 0) return; if (strncmp (sym, DIE_LABEL_PREFIX, sizeof (DIE_LABEL_PREFIX) - 1) == 0) /* We make these global, not weak; if the target doesn't support .linkonce, it doesn't support combining the sections, so debugging will break. */ targetm.asm_out.globalize_label (asm_out_file, sym); ASM_OUTPUT_LABEL (asm_out_file, sym); } /* Return a new location list, given the begin and end range, and the expression. gensym tells us whether to generate a new internal symbol for this location list node, which is done for the head of the list only. */ static inline dw_loc_list_ref new_loc_list (dw_loc_descr_ref expr, const char *begin, const char *end, const char *section, unsigned int gensym) { dw_loc_list_ref retlist = ggc_alloc_cleared (sizeof (dw_loc_list_node)); retlist->begin = begin; retlist->end = end; retlist->expr = expr; retlist->section = section; if (gensym) retlist->ll_symbol = gen_internal_sym ("LLST"); return retlist; } /* Add a location description expression to a location list. */ static inline void add_loc_descr_to_loc_list (dw_loc_list_ref *list_head, dw_loc_descr_ref descr, const char *begin, const char *end, const char *section) { dw_loc_list_ref *d; /* Find the end of the chain. */ for (d = list_head; (*d) != NULL; d = &(*d)->dw_loc_next) ; /* Add a new location list node to the list. */ *d = new_loc_list (descr, begin, end, section, 0); } /* Output the location list given to us. */ static void output_loc_list (dw_loc_list_ref list_head) { dw_loc_list_ref curr = list_head; ASM_OUTPUT_LABEL (asm_out_file, list_head->ll_symbol); /* Walk the location list, and output each range + expression. */ for (curr = list_head; curr != NULL; curr = curr->dw_loc_next) { unsigned long size; if (separate_line_info_table_in_use == 0) { dw2_asm_output_delta (DWARF2_ADDR_SIZE, curr->begin, curr->section, "Location list begin address (%s)", list_head->ll_symbol); dw2_asm_output_delta (DWARF2_ADDR_SIZE, curr->end, curr->section, "Location list end address (%s)", list_head->ll_symbol); } else { dw2_asm_output_addr (DWARF2_ADDR_SIZE, curr->begin, "Location list begin address (%s)", list_head->ll_symbol); dw2_asm_output_addr (DWARF2_ADDR_SIZE, curr->end, "Location list end address (%s)", list_head->ll_symbol); } size = size_of_locs (curr->expr); /* Output the block length for this list of location operations. */ if (size > 0xffff) abort (); dw2_asm_output_data (2, size, "%s", "Location expression size"); output_loc_sequence (curr->expr); } dw2_asm_output_data (DWARF2_ADDR_SIZE, 0, "Location list terminator begin (%s)", list_head->ll_symbol); dw2_asm_output_data (DWARF2_ADDR_SIZE, 0, "Location list terminator end (%s)", list_head->ll_symbol); } /* Output the DIE and its attributes. Called recursively to generate the definitions of each child DIE. */ static void output_die (dw_die_ref die) { dw_attr_ref a; dw_die_ref c; unsigned long size; /* If someone in another CU might refer to us, set up a symbol for them to point to. */ if (die->die_symbol) output_die_symbol (die); dw2_asm_output_data_uleb128 (die->die_abbrev, "(DIE (0x%lx) %s)", die->die_offset, dwarf_tag_name (die->die_tag)); for (a = die->die_attr; a != NULL; a = a->dw_attr_next) { const char *name = dwarf_attr_name (a->dw_attr); switch (AT_class (a)) { case dw_val_class_addr: dw2_asm_output_addr_rtx (DWARF2_ADDR_SIZE, AT_addr (a), "%s", name); break; case dw_val_class_offset: dw2_asm_output_data (DWARF_OFFSET_SIZE, a->dw_attr_val.v.val_offset, "%s", name); break; case dw_val_class_range_list: { char *p = strchr (ranges_section_label, '\0'); sprintf (p, "+" HOST_WIDE_INT_PRINT_HEX, a->dw_attr_val.v.val_offset); dw2_asm_output_offset (DWARF_OFFSET_SIZE, ranges_section_label, "%s", name); *p = '\0'; } break; case dw_val_class_loc: size = size_of_locs (AT_loc (a)); /* Output the block length for this list of location operations. */ dw2_asm_output_data (constant_size (size), size, "%s", name); output_loc_sequence (AT_loc (a)); break; case dw_val_class_const: /* ??? It would be slightly more efficient to use a scheme like is used for unsigned constants below, but gdb 4.x does not sign extend. Gdb 5.x does sign extend. */ dw2_asm_output_data_sleb128 (AT_int (a), "%s", name); break; case dw_val_class_unsigned_const: dw2_asm_output_data (constant_size (AT_unsigned (a)), AT_unsigned (a), "%s", name); break; case dw_val_class_long_long: { unsigned HOST_WIDE_INT first, second; dw2_asm_output_data (1, 2 * HOST_BITS_PER_LONG / HOST_BITS_PER_CHAR, "%s", name); if (WORDS_BIG_ENDIAN) { first = a->dw_attr_val.v.val_long_long.hi; second = a->dw_attr_val.v.val_long_long.low; } else { first = a->dw_attr_val.v.val_long_long.low; second = a->dw_attr_val.v.val_long_long.hi; } dw2_asm_output_data (HOST_BITS_PER_LONG / HOST_BITS_PER_CHAR, first, "long long constant"); dw2_asm_output_data (HOST_BITS_PER_LONG / HOST_BITS_PER_CHAR, second, NULL); } break; case dw_val_class_vec: { unsigned int elt_size = a->dw_attr_val.v.val_vec.elt_size; unsigned int len = a->dw_attr_val.v.val_vec.length; unsigned int i; unsigned char *p; dw2_asm_output_data (1, len * elt_size, "%s", name); if (elt_size > sizeof (HOST_WIDE_INT)) { elt_size /= 2; len *= 2; } for (i = 0, p = a->dw_attr_val.v.val_vec.array; i < len; i++, p += elt_size) dw2_asm_output_data (elt_size, extract_int (p, elt_size), "fp or vector constant word %u", i); break; } case dw_val_class_flag: dw2_asm_output_data (1, AT_flag (a), "%s", name); break; case dw_val_class_loc_list: { char *sym = AT_loc_list (a)->ll_symbol; if (sym == 0) abort (); dw2_asm_output_offset (DWARF_OFFSET_SIZE, sym, "%s", name); } break; case dw_val_class_die_ref: if (AT_ref_external (a)) { char *sym = AT_ref (a)->die_symbol; if (sym == 0) abort (); dw2_asm_output_offset (DWARF2_ADDR_SIZE, sym, "%s", name); } else if (AT_ref (a)->die_offset == 0) abort (); else dw2_asm_output_data (DWARF_OFFSET_SIZE, AT_ref (a)->die_offset, "%s", name); break; case dw_val_class_fde_ref: { char l1[20]; ASM_GENERATE_INTERNAL_LABEL (l1, FDE_LABEL, a->dw_attr_val.v.val_fde_index * 2); dw2_asm_output_offset (DWARF_OFFSET_SIZE, l1, "%s", name); } break; case dw_val_class_lbl_id: dw2_asm_output_addr (DWARF2_ADDR_SIZE, AT_lbl (a), "%s", name); break; case dw_val_class_lbl_offset: dw2_asm_output_offset (DWARF_OFFSET_SIZE, AT_lbl (a), "%s", name); break; case dw_val_class_str: if (AT_string_form (a) == DW_FORM_strp) dw2_asm_output_offset (DWARF_OFFSET_SIZE, a->dw_attr_val.v.val_str->label, "%s: \"%s\"", name, AT_string (a)); else dw2_asm_output_nstring (AT_string (a), -1, "%s", name); break; default: abort (); } } for (c = die->die_child; c != NULL; c = c->die_sib) output_die (c); /* Add null byte to terminate sibling list. */ if (die->die_child != NULL) dw2_asm_output_data (1, 0, "end of children of DIE 0x%lx", die->die_offset); } /* Output the compilation unit that appears at the beginning of the .debug_info section, and precedes the DIE descriptions. */ static void output_compilation_unit_header (void) { if (DWARF_INITIAL_LENGTH_SIZE - DWARF_OFFSET_SIZE == 4) dw2_asm_output_data (4, 0xffffffff, "Initial length escape value indicating 64-bit DWARF extension"); dw2_asm_output_data (DWARF_OFFSET_SIZE, next_die_offset - DWARF_INITIAL_LENGTH_SIZE, "Length of Compilation Unit Info"); dw2_asm_output_data (2, DWARF_VERSION, "DWARF version number"); dw2_asm_output_offset (DWARF_OFFSET_SIZE, abbrev_section_label, "Offset Into Abbrev. Section"); dw2_asm_output_data (1, DWARF2_ADDR_SIZE, "Pointer Size (in bytes)"); } /* Output the compilation unit DIE and its children. */ static void output_comp_unit (dw_die_ref die, int output_if_empty) { const char *secname; char *oldsym, *tmp; /* Unless we are outputting main CU, we may throw away empty ones. */ if (!output_if_empty && die->die_child == NULL) return; /* Even if there are no children of this DIE, we must output the information about the compilation unit. Otherwise, on an empty translation unit, we will generate a present, but empty, .debug_info section. IRIX 6.5 `nm' will then complain when examining the file. First mark all the DIEs in this CU so we know which get local refs. */ mark_dies (die); build_abbrev_table (die); /* Initialize the beginning DIE offset - and calculate sizes/offsets. */ next_die_offset = DWARF_COMPILE_UNIT_HEADER_SIZE; calc_die_sizes (die); oldsym = die->die_symbol; if (oldsym) { tmp = alloca (strlen (oldsym) + 24); sprintf (tmp, ".gnu.linkonce.wi.%s", oldsym); secname = tmp; die->die_symbol = NULL; } else secname = (const char *) DEBUG_INFO_SECTION; /* Output debugging information. */ named_section_flags (secname, SECTION_DEBUG); output_compilation_unit_header (); output_die (die); /* Leave the marks on the main CU, so we can check them in output_pubnames. */ if (oldsym) { unmark_dies (die); die->die_symbol = oldsym; } } /* The DWARF2 pubname for a nested thingy looks like "A::f". The output of lang_hooks.decl_printable_name for C++ looks like "A::f(int)". Let's drop the argument list, and maybe the scope. */ static const char * dwarf2_name (tree decl, int scope) { return lang_hooks.decl_printable_name (decl, scope ? 1 : 0); } /* Add a new entry to .debug_pubnames if appropriate. */ static void add_pubname (tree decl, dw_die_ref die) { pubname_ref p; if (! TREE_PUBLIC (decl)) return; if (pubname_table_in_use == pubname_table_allocated) { pubname_table_allocated += PUBNAME_TABLE_INCREMENT; pubname_table = ggc_realloc (pubname_table, (pubname_table_allocated * sizeof (pubname_entry))); memset (pubname_table + pubname_table_in_use, 0, PUBNAME_TABLE_INCREMENT * sizeof (pubname_entry)); } p = &pubname_table[pubname_table_in_use++]; p->die = die; p->name = xstrdup (dwarf2_name (decl, 1)); } /* Output the public names table used to speed up access to externally visible names. For now, only generate entries for externally visible procedures. */ static void output_pubnames (void) { unsigned i; unsigned long pubnames_length = size_of_pubnames (); if (DWARF_INITIAL_LENGTH_SIZE - DWARF_OFFSET_SIZE == 4) dw2_asm_output_data (4, 0xffffffff, "Initial length escape value indicating 64-bit DWARF extension"); dw2_asm_output_data (DWARF_OFFSET_SIZE, pubnames_length, "Length of Public Names Info"); dw2_asm_output_data (2, DWARF_VERSION, "DWARF Version"); dw2_asm_output_offset (DWARF_OFFSET_SIZE, debug_info_section_label, "Offset of Compilation Unit Info"); dw2_asm_output_data (DWARF_OFFSET_SIZE, next_die_offset, "Compilation Unit Length"); for (i = 0; i < pubname_table_in_use; i++) { pubname_ref pub = &pubname_table[i]; /* We shouldn't see pubnames for DIEs outside of the main CU. */ if (pub->die->die_mark == 0) abort (); dw2_asm_output_data (DWARF_OFFSET_SIZE, pub->die->die_offset, "DIE offset"); dw2_asm_output_nstring (pub->name, -1, "external name"); } dw2_asm_output_data (DWARF_OFFSET_SIZE, 0, NULL); } /* Add a new entry to .debug_aranges if appropriate. */ static void add_arange (tree decl, dw_die_ref die) { if (! DECL_SECTION_NAME (decl)) return; if (arange_table_in_use == arange_table_allocated) { arange_table_allocated += ARANGE_TABLE_INCREMENT; arange_table = ggc_realloc (arange_table, (arange_table_allocated * sizeof (dw_die_ref))); memset (arange_table + arange_table_in_use, 0, ARANGE_TABLE_INCREMENT * sizeof (dw_die_ref)); } arange_table[arange_table_in_use++] = die; } /* Output the information that goes into the .debug_aranges table. Namely, define the beginning and ending address range of the text section generated for this compilation unit. */ static void output_aranges (void) { unsigned i; unsigned long aranges_length = size_of_aranges (); if (DWARF_INITIAL_LENGTH_SIZE - DWARF_OFFSET_SIZE == 4) dw2_asm_output_data (4, 0xffffffff, "Initial length escape value indicating 64-bit DWARF extension"); dw2_asm_output_data (DWARF_OFFSET_SIZE, aranges_length, "Length of Address Ranges Info"); dw2_asm_output_data (2, DWARF_VERSION, "DWARF Version"); dw2_asm_output_offset (DWARF_OFFSET_SIZE, debug_info_section_label, "Offset of Compilation Unit Info"); dw2_asm_output_data (1, DWARF2_ADDR_SIZE, "Size of Address"); dw2_asm_output_data (1, 0, "Size of Segment Descriptor"); /* We need to align to twice the pointer size here. */ if (DWARF_ARANGES_PAD_SIZE) { /* Pad using a 2 byte words so that padding is correct for any pointer size. */ dw2_asm_output_data (2, 0, "Pad to %d byte boundary", 2 * DWARF2_ADDR_SIZE); for (i = 2; i < (unsigned) DWARF_ARANGES_PAD_SIZE; i += 2) dw2_asm_output_data (2, 0, NULL); } dw2_asm_output_addr (DWARF2_ADDR_SIZE, text_section_label, "Address"); dw2_asm_output_delta (DWARF2_ADDR_SIZE, text_end_label, text_section_label, "Length"); for (i = 0; i < arange_table_in_use; i++) { dw_die_ref die = arange_table[i]; /* We shouldn't see aranges for DIEs outside of the main CU. */ if (die->die_mark == 0) abort (); if (die->die_tag == DW_TAG_subprogram) { dw2_asm_output_addr (DWARF2_ADDR_SIZE, get_AT_low_pc (die), "Address"); dw2_asm_output_delta (DWARF2_ADDR_SIZE, get_AT_hi_pc (die), get_AT_low_pc (die), "Length"); } else { /* A static variable; extract the symbol from DW_AT_location. Note that this code isn't currently hit, as we only emit aranges for functions (jason 9/23/99). */ dw_attr_ref a = get_AT (die, DW_AT_location); dw_loc_descr_ref loc; if (! a || AT_class (a) != dw_val_class_loc) abort (); loc = AT_loc (a); if (loc->dw_loc_opc != DW_OP_addr) abort (); dw2_asm_output_addr_rtx (DWARF2_ADDR_SIZE, loc->dw_loc_oprnd1.v.val_addr, "Address"); dw2_asm_output_data (DWARF2_ADDR_SIZE, get_AT_unsigned (die, DW_AT_byte_size), "Length"); } } /* Output the terminator words. */ dw2_asm_output_data (DWARF2_ADDR_SIZE, 0, NULL); dw2_asm_output_data (DWARF2_ADDR_SIZE, 0, NULL); } /* Add a new entry to .debug_ranges. Return the offset at which it was placed. */ static unsigned int add_ranges (tree block) { unsigned int in_use = ranges_table_in_use; if (in_use == ranges_table_allocated) { ranges_table_allocated += RANGES_TABLE_INCREMENT; ranges_table = ggc_realloc (ranges_table, (ranges_table_allocated * sizeof (struct dw_ranges_struct))); memset (ranges_table + ranges_table_in_use, 0, RANGES_TABLE_INCREMENT * sizeof (struct dw_ranges_struct)); } ranges_table[in_use].block_num = (block ? BLOCK_NUMBER (block) : 0); ranges_table_in_use = in_use + 1; return in_use * 2 * DWARF2_ADDR_SIZE; } static void output_ranges (void) { unsigned i; static const char *const start_fmt = "Offset 0x%x"; const char *fmt = start_fmt; for (i = 0; i < ranges_table_in_use; i++) { int block_num = ranges_table[i].block_num; if (block_num) { char blabel[MAX_ARTIFICIAL_LABEL_BYTES]; char elabel[MAX_ARTIFICIAL_LABEL_BYTES]; ASM_GENERATE_INTERNAL_LABEL (blabel, BLOCK_BEGIN_LABEL, block_num); ASM_GENERATE_INTERNAL_LABEL (elabel, BLOCK_END_LABEL, block_num); /* If all code is in the text section, then the compilation unit base address defaults to DW_AT_low_pc, which is the base of the text section. */ if (separate_line_info_table_in_use == 0) { dw2_asm_output_delta (DWARF2_ADDR_SIZE, blabel, text_section_label, fmt, i * 2 * DWARF2_ADDR_SIZE); dw2_asm_output_delta (DWARF2_ADDR_SIZE, elabel, text_section_label, NULL); } /* Otherwise, we add a DW_AT_entry_pc attribute to force the compilation unit base address to zero, which allows us to use absolute addresses, and not worry about whether the target supports cross-section arithmetic. */ else { dw2_asm_output_addr (DWARF2_ADDR_SIZE, blabel, fmt, i * 2 * DWARF2_ADDR_SIZE); dw2_asm_output_addr (DWARF2_ADDR_SIZE, elabel, NULL); } fmt = NULL; } else { dw2_asm_output_data (DWARF2_ADDR_SIZE, 0, NULL); dw2_asm_output_data (DWARF2_ADDR_SIZE, 0, NULL); fmt = start_fmt; } } } /* Data structure containing information about input files. */ struct file_info { char *path; /* Complete file name. */ char *fname; /* File name part. */ int length; /* Length of entire string. */ int file_idx; /* Index in input file table. */ int dir_idx; /* Index in directory table. */ }; /* Data structure containing information about directories with source files. */ struct dir_info { char *path; /* Path including directory name. */ int length; /* Path length. */ int prefix; /* Index of directory entry which is a prefix. */ int count; /* Number of files in this directory. */ int dir_idx; /* Index of directory used as base. */ int used; /* Used in the end? */ }; /* Callback function for file_info comparison. We sort by looking at the directories in the path. */ static int file_info_cmp (const void *p1, const void *p2) { const struct file_info *s1 = p1; const struct file_info *s2 = p2; unsigned char *cp1; unsigned char *cp2; /* Take care of file names without directories. We need to make sure that we return consistent values to qsort since some will get confused if we return the same value when identical operands are passed in opposite orders. So if neither has a directory, return 0 and otherwise return 1 or -1 depending on which one has the directory. */ if ((s1->path == s1->fname || s2->path == s2->fname)) return (s2->path == s2->fname) - (s1->path == s1->fname); cp1 = (unsigned char *) s1->path; cp2 = (unsigned char *) s2->path; while (1) { ++cp1; ++cp2; /* Reached the end of the first path? If so, handle like above. */ if ((cp1 == (unsigned char *) s1->fname) || (cp2 == (unsigned char *) s2->fname)) return ((cp2 == (unsigned char *) s2->fname) - (cp1 == (unsigned char *) s1->fname)); /* Character of current path component the same? */ else if (*cp1 != *cp2) return *cp1 - *cp2; } } /* Output the directory table and the file name table. We try to minimize the total amount of memory needed. A heuristic is used to avoid large slowdowns with many input files. */ static void output_file_names (void) { struct file_info *files; struct dir_info *dirs; int *saved; int *savehere; int *backmap; size_t ndirs; int idx_offset; size_t i; int idx; /* Handle the case where file_table is empty. */ if (VARRAY_ACTIVE_SIZE (file_table) <= 1) { dw2_asm_output_data (1, 0, "End directory table"); dw2_asm_output_data (1, 0, "End file name table"); return; } /* Allocate the various arrays we need. */ files = alloca (VARRAY_ACTIVE_SIZE (file_table) * sizeof (struct file_info)); dirs = alloca (VARRAY_ACTIVE_SIZE (file_table) * sizeof (struct dir_info)); /* Sort the file names. */ for (i = 1; i < VARRAY_ACTIVE_SIZE (file_table); i++) { char *f; /* Skip all leading "./". */ f = VARRAY_CHAR_PTR (file_table, i); while (f[0] == '.' && f[1] == '/') f += 2; /* Create a new array entry. */ files[i].path = f; files[i].length = strlen (f); files[i].file_idx = i; /* Search for the file name part. */ f = strrchr (f, '/'); files[i].fname = f == NULL ? files[i].path : f + 1; } qsort (files + 1, VARRAY_ACTIVE_SIZE (file_table) - 1, sizeof (files[0]), file_info_cmp); /* Find all the different directories used. */ dirs[0].path = files[1].path; dirs[0].length = files[1].fname - files[1].path; dirs[0].prefix = -1; dirs[0].count = 1; dirs[0].dir_idx = 0; dirs[0].used = 0; files[1].dir_idx = 0; ndirs = 1; for (i = 2; i < VARRAY_ACTIVE_SIZE (file_table); i++) if (files[i].fname - files[i].path == dirs[ndirs - 1].length && memcmp (dirs[ndirs - 1].path, files[i].path, dirs[ndirs - 1].length) == 0) { /* Same directory as last entry. */ files[i].dir_idx = ndirs - 1; ++dirs[ndirs - 1].count; } else { size_t j; /* This is a new directory. */ dirs[ndirs].path = files[i].path; dirs[ndirs].length = files[i].fname - files[i].path; dirs[ndirs].count = 1; dirs[ndirs].dir_idx = ndirs; dirs[ndirs].used = 0; files[i].dir_idx = ndirs; /* Search for a prefix. */ dirs[ndirs].prefix = -1; for (j = 0; j < ndirs; j++) if (dirs[j].length < dirs[ndirs].length && dirs[j].length > 1 && (dirs[ndirs].prefix == -1 || dirs[j].length > dirs[dirs[ndirs].prefix].length) && memcmp (dirs[j].path, dirs[ndirs].path, dirs[j].length) == 0) dirs[ndirs].prefix = j; ++ndirs; } /* Now to the actual work. We have to find a subset of the directories which allow expressing the file name using references to the directory table with the least amount of characters. We do not do an exhaustive search where we would have to check out every combination of every single possible prefix. Instead we use a heuristic which provides nearly optimal results in most cases and never is much off. */ saved = alloca (ndirs * sizeof (int)); savehere = alloca (ndirs * sizeof (int)); memset (saved, '\0', ndirs * sizeof (saved[0])); for (i = 0; i < ndirs; i++) { size_t j; int total; /* We can always save some space for the current directory. But this does not mean it will be enough to justify adding the directory. */ savehere[i] = dirs[i].length; total = (savehere[i] - saved[i]) * dirs[i].count; for (j = i + 1; j < ndirs; j++) { savehere[j] = 0; if (saved[j] < dirs[i].length) { /* Determine whether the dirs[i] path is a prefix of the dirs[j] path. */ int k; k = dirs[j].prefix; while (k != -1 && k != (int) i) k = dirs[k].prefix; if (k == (int) i) { /* Yes it is. We can possibly safe some memory but writing the filenames in dirs[j] relative to dirs[i]. */ savehere[j] = dirs[i].length; total += (savehere[j] - saved[j]) * dirs[j].count; } } } /* Check whether we can safe enough to justify adding the dirs[i] directory. */ if (total > dirs[i].length + 1) { /* It's worthwhile adding. */ for (j = i; j < ndirs; j++) if (savehere[j] > 0) { /* Remember how much we saved for this directory so far. */ saved[j] = savehere[j]; /* Remember the prefix directory. */ dirs[j].dir_idx = i; } } } /* We have to emit them in the order they appear in the file_table array since the index is used in the debug info generation. To do this efficiently we generate a back-mapping of the indices first. */ backmap = alloca (VARRAY_ACTIVE_SIZE (file_table) * sizeof (int)); for (i = 1; i < VARRAY_ACTIVE_SIZE (file_table); i++) { backmap[files[i].file_idx] = i; /* Mark this directory as used. */ dirs[dirs[files[i].dir_idx].dir_idx].used = 1; } /* That was it. We are ready to emit the information. First emit the directory name table. We have to make sure the first actually emitted directory name has index one; zero is reserved for the current working directory. Make sure we do not confuse these indices with the one for the constructed table (even though most of the time they are identical). */ idx = 1; idx_offset = dirs[0].length > 0 ? 1 : 0; for (i = 1 - idx_offset; i < ndirs; i++) if (dirs[i].used != 0) { dirs[i].used = idx++; dw2_asm_output_nstring (dirs[i].path, dirs[i].length - 1, "Directory Entry: 0x%x", dirs[i].used); } dw2_asm_output_data (1, 0, "End directory table"); /* Correct the index for the current working directory entry if it exists. */ if (idx_offset == 0) dirs[0].used = 0; /* Now write all the file names. */ for (i = 1; i < VARRAY_ACTIVE_SIZE (file_table); i++) { int file_idx = backmap[i]; int dir_idx = dirs[files[file_idx].dir_idx].dir_idx; dw2_asm_output_nstring (files[file_idx].path + dirs[dir_idx].length, -1, "File Entry: 0x%lx", (unsigned long) i); /* Include directory index. */ dw2_asm_output_data_uleb128 (dirs[dir_idx].used, NULL); /* Modification time. */ dw2_asm_output_data_uleb128 (0, NULL); /* File length in bytes. */ dw2_asm_output_data_uleb128 (0, NULL); } dw2_asm_output_data (1, 0, "End file name table"); } /* Output the source line number correspondence information. This information goes into the .debug_line section. */ static void output_line_info (void) { char l1[20], l2[20], p1[20], p2[20]; char line_label[MAX_ARTIFICIAL_LABEL_BYTES]; char prev_line_label[MAX_ARTIFICIAL_LABEL_BYTES]; unsigned opc; unsigned n_op_args; unsigned long lt_index; unsigned long current_line; long line_offset; long line_delta; unsigned long current_file; unsigned long function; ASM_GENERATE_INTERNAL_LABEL (l1, LINE_NUMBER_BEGIN_LABEL, 0); ASM_GENERATE_INTERNAL_LABEL (l2, LINE_NUMBER_END_LABEL, 0); ASM_GENERATE_INTERNAL_LABEL (p1, LN_PROLOG_AS_LABEL, 0); ASM_GENERATE_INTERNAL_LABEL (p2, LN_PROLOG_END_LABEL, 0); if (DWARF_INITIAL_LENGTH_SIZE - DWARF_OFFSET_SIZE == 4) dw2_asm_output_data (4, 0xffffffff, "Initial length escape value indicating 64-bit DWARF extension"); dw2_asm_output_delta (DWARF_OFFSET_SIZE, l2, l1, "Length of Source Line Info"); ASM_OUTPUT_LABEL (asm_out_file, l1); dw2_asm_output_data (2, DWARF_VERSION, "DWARF Version"); dw2_asm_output_delta (DWARF_OFFSET_SIZE, p2, p1, "Prolog Length"); ASM_OUTPUT_LABEL (asm_out_file, p1); /* Define the architecture-dependent minimum instruction length (in bytes). In this implementation of DWARF, this field is used for information purposes only. Since GCC generates assembly language, we have no a priori knowledge of how many instruction bytes are generated for each source line, and therefore can use only the DW_LNE_set_address and DW_LNS_fixed_advance_pc line information commands. Accordingly, we fix this as `1', which is "correct enough" for all architectures, and don't let the target override. */ dw2_asm_output_data (1, 1, "Minimum Instruction Length"); dw2_asm_output_data (1, DWARF_LINE_DEFAULT_IS_STMT_START, "Default is_stmt_start flag"); dw2_asm_output_data (1, DWARF_LINE_BASE, "Line Base Value (Special Opcodes)"); dw2_asm_output_data (1, DWARF_LINE_RANGE, "Line Range Value (Special Opcodes)"); dw2_asm_output_data (1, DWARF_LINE_OPCODE_BASE, "Special Opcode Base"); for (opc = 1; opc < DWARF_LINE_OPCODE_BASE; opc++) { switch (opc) { case DW_LNS_advance_pc: case DW_LNS_advance_line: case DW_LNS_set_file: case DW_LNS_set_column: case DW_LNS_fixed_advance_pc: n_op_args = 1; break; default: n_op_args = 0; break; } dw2_asm_output_data (1, n_op_args, "opcode: 0x%x has %d args", opc, n_op_args); } /* Write out the information about the files we use. */ output_file_names (); ASM_OUTPUT_LABEL (asm_out_file, p2); /* We used to set the address register to the first location in the text section here, but that didn't accomplish anything since we already have a line note for the opening brace of the first function. */ /* Generate the line number to PC correspondence table, encoded as a series of state machine operations. */ current_file = 1; current_line = 1; strcpy (prev_line_label, text_section_label); for (lt_index = 1; lt_index < line_info_table_in_use; ++lt_index) { dw_line_info_ref line_info = &line_info_table[lt_index]; #if 0 /* Disable this optimization for now; GDB wants to see two line notes at the beginning of a function so it can find the end of the prologue. */ /* Don't emit anything for redundant notes. Just updating the address doesn't accomplish anything, because we already assume that anything after the last address is this line. */ if (line_info->dw_line_num == current_line && line_info->dw_file_num == current_file) continue; #endif /* Emit debug info for the address of the current line. Unfortunately, we have little choice here currently, and must always use the most general form. GCC does not know the address delta itself, so we can't use DW_LNS_advance_pc. Many ports do have length attributes which will give an upper bound on the address range. We could perhaps use length attributes to determine when it is safe to use DW_LNS_fixed_advance_pc. */ ASM_GENERATE_INTERNAL_LABEL (line_label, LINE_CODE_LABEL, lt_index); if (0) { /* This can handle deltas up to 0xffff. This takes 3 bytes. */ dw2_asm_output_data (1, DW_LNS_fixed_advance_pc, "DW_LNS_fixed_advance_pc"); dw2_asm_output_delta (2, line_label, prev_line_label, NULL); } else { /* This can handle any delta. This takes 4+DWARF2_ADDR_SIZE bytes. */ dw2_asm_output_data (1, 0, "DW_LNE_set_address"); dw2_asm_output_data_uleb128 (1 + DWARF2_ADDR_SIZE, NULL); dw2_asm_output_data (1, DW_LNE_set_address, NULL); dw2_asm_output_addr (DWARF2_ADDR_SIZE, line_label, NULL); } strcpy (prev_line_label, line_label); /* Emit debug info for the source file of the current line, if different from the previous line. */ if (line_info->dw_file_num != current_file) { current_file = line_info->dw_file_num; dw2_asm_output_data (1, DW_LNS_set_file, "DW_LNS_set_file"); dw2_asm_output_data_uleb128 (current_file, "(\"%s\")", VARRAY_CHAR_PTR (file_table, current_file)); } /* Emit debug info for the current line number, choosing the encoding that uses the least amount of space. */ if (line_info->dw_line_num != current_line) { line_offset = line_info->dw_line_num - current_line; line_delta = line_offset - DWARF_LINE_BASE; current_line = line_info->dw_line_num; if (line_delta >= 0 && line_delta < (DWARF_LINE_RANGE - 1)) /* This can handle deltas from -10 to 234, using the current definitions of DWARF_LINE_BASE and DWARF_LINE_RANGE. This takes 1 byte. */ dw2_asm_output_data (1, DWARF_LINE_OPCODE_BASE + line_delta, "line %lu", current_line); else { /* This can handle any delta. This takes at least 4 bytes, depending on the value being encoded. */ dw2_asm_output_data (1, DW_LNS_advance_line, "advance to line %lu", current_line); dw2_asm_output_data_sleb128 (line_offset, NULL); dw2_asm_output_data (1, DW_LNS_copy, "DW_LNS_copy"); } } else /* We still need to start a new row, so output a copy insn. */ dw2_asm_output_data (1, DW_LNS_copy, "DW_LNS_copy"); } /* Emit debug info for the address of the end of the function. */ if (0) { dw2_asm_output_data (1, DW_LNS_fixed_advance_pc, "DW_LNS_fixed_advance_pc"); dw2_asm_output_delta (2, text_end_label, prev_line_label, NULL); } else { dw2_asm_output_data (1, 0, "DW_LNE_set_address"); dw2_asm_output_data_uleb128 (1 + DWARF2_ADDR_SIZE, NULL); dw2_asm_output_data (1, DW_LNE_set_address, NULL); dw2_asm_output_addr (DWARF2_ADDR_SIZE, text_end_label, NULL); } dw2_asm_output_data (1, 0, "DW_LNE_end_sequence"); dw2_asm_output_data_uleb128 (1, NULL); dw2_asm_output_data (1, DW_LNE_end_sequence, NULL); function = 0; current_file = 1; current_line = 1; for (lt_index = 0; lt_index < separate_line_info_table_in_use;) { dw_separate_line_info_ref line_info = &separate_line_info_table[lt_index]; #if 0 /* Don't emit anything for redundant notes. */ if (line_info->dw_line_num == current_line && line_info->dw_file_num == current_file && line_info->function == function) goto cont; #endif /* Emit debug info for the address of the current line. If this is a new function, or the first line of a function, then we need to handle it differently. */ ASM_GENERATE_INTERNAL_LABEL (line_label, SEPARATE_LINE_CODE_LABEL, lt_index); if (function != line_info->function) { function = line_info->function; /* Set the address register to the first line in the function. */ dw2_asm_output_data (1, 0, "DW_LNE_set_address"); dw2_asm_output_data_uleb128 (1 + DWARF2_ADDR_SIZE, NULL); dw2_asm_output_data (1, DW_LNE_set_address, NULL); dw2_asm_output_addr (DWARF2_ADDR_SIZE, line_label, NULL); } else { /* ??? See the DW_LNS_advance_pc comment above. */ if (0) { dw2_asm_output_data (1, DW_LNS_fixed_advance_pc, "DW_LNS_fixed_advance_pc"); dw2_asm_output_delta (2, line_label, prev_line_label, NULL); } else { dw2_asm_output_data (1, 0, "DW_LNE_set_address"); dw2_asm_output_data_uleb128 (1 + DWARF2_ADDR_SIZE, NULL); dw2_asm_output_data (1, DW_LNE_set_address, NULL); dw2_asm_output_addr (DWARF2_ADDR_SIZE, line_label, NULL); } } strcpy (prev_line_label, line_label); /* Emit debug info for the source file of the current line, if different from the previous line. */ if (line_info->dw_file_num != current_file) { current_file = line_info->dw_file_num; dw2_asm_output_data (1, DW_LNS_set_file, "DW_LNS_set_file"); dw2_asm_output_data_uleb128 (current_file, "(\"%s\")", VARRAY_CHAR_PTR (file_table, current_file)); } /* Emit debug info for the current line number, choosing the encoding that uses the least amount of space. */ if (line_info->dw_line_num != current_line) { line_offset = line_info->dw_line_num - current_line; line_delta = line_offset - DWARF_LINE_BASE; current_line = line_info->dw_line_num; if (line_delta >= 0 && line_delta < (DWARF_LINE_RANGE - 1)) dw2_asm_output_data (1, DWARF_LINE_OPCODE_BASE + line_delta, "line %lu", current_line); else { dw2_asm_output_data (1, DW_LNS_advance_line, "advance to line %lu", current_line); dw2_asm_output_data_sleb128 (line_offset, NULL); dw2_asm_output_data (1, DW_LNS_copy, "DW_LNS_copy"); } } else dw2_asm_output_data (1, DW_LNS_copy, "DW_LNS_copy"); #if 0 cont: #endif lt_index++; /* If we're done with a function, end its sequence. */ if (lt_index == separate_line_info_table_in_use || separate_line_info_table[lt_index].function != function) { current_file = 1; current_line = 1; /* Emit debug info for the address of the end of the function. */ ASM_GENERATE_INTERNAL_LABEL (line_label, FUNC_END_LABEL, function); if (0) { dw2_asm_output_data (1, DW_LNS_fixed_advance_pc, "DW_LNS_fixed_advance_pc"); dw2_asm_output_delta (2, line_label, prev_line_label, NULL); } else { dw2_asm_output_data (1, 0, "DW_LNE_set_address"); dw2_asm_output_data_uleb128 (1 + DWARF2_ADDR_SIZE, NULL); dw2_asm_output_data (1, DW_LNE_set_address, NULL); dw2_asm_output_addr (DWARF2_ADDR_SIZE, line_label, NULL); } /* Output the marker for the end of this sequence. */ dw2_asm_output_data (1, 0, "DW_LNE_end_sequence"); dw2_asm_output_data_uleb128 (1, NULL); dw2_asm_output_data (1, DW_LNE_end_sequence, NULL); } } /* Output the marker for the end of the line number info. */ ASM_OUTPUT_LABEL (asm_out_file, l2); } /* Given a pointer to a tree node for some base type, return a pointer to a DIE that describes the given type. This routine must only be called for GCC type nodes that correspond to Dwarf base (fundamental) types. */ static dw_die_ref base_type_die (tree type) { dw_die_ref base_type_result; const char *type_name; enum dwarf_type encoding; tree name = TYPE_NAME (type); if (TREE_CODE (type) == ERROR_MARK || TREE_CODE (type) == VOID_TYPE) return 0; if (name) { if (TREE_CODE (name) == TYPE_DECL) name = DECL_NAME (name); type_name = IDENTIFIER_POINTER (name); } else type_name = "__unknown__"; switch (TREE_CODE (type)) { case INTEGER_TYPE: /* Carefully distinguish the C character types, without messing up if the language is not C. Note that we check only for the names that contain spaces; other names might occur by coincidence in other languages. */ if (! (TYPE_PRECISION (type) == CHAR_TYPE_SIZE && (type == char_type_node || ! strcmp (type_name, "signed char") || ! strcmp (type_name, "unsigned char")))) { if (TYPE_UNSIGNED (type)) encoding = DW_ATE_unsigned; else encoding = DW_ATE_signed; break; } /* else fall through. */ case CHAR_TYPE: /* GNU Pascal/Ada CHAR type. Not used in C. */ if (TYPE_UNSIGNED (type)) encoding = DW_ATE_unsigned_char; else encoding = DW_ATE_signed_char; break; case REAL_TYPE: encoding = DW_ATE_float; break; /* Dwarf2 doesn't know anything about complex ints, so use a user defined type for it. */ case COMPLEX_TYPE: if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE) encoding = DW_ATE_complex_float; else encoding = DW_ATE_lo_user; break; case BOOLEAN_TYPE: /* GNU FORTRAN/Ada/C++ BOOLEAN type. */ encoding = DW_ATE_boolean; break; default: /* No other TREE_CODEs are Dwarf fundamental types. */ abort (); } base_type_result = new_die (DW_TAG_base_type, comp_unit_die, type); if (demangle_name_func) type_name = (*demangle_name_func) (type_name); add_AT_string (base_type_result, DW_AT_name, type_name); add_AT_unsigned (base_type_result, DW_AT_byte_size, int_size_in_bytes (type)); add_AT_unsigned (base_type_result, DW_AT_encoding, encoding); return base_type_result; } /* Given a pointer to an arbitrary ..._TYPE tree node, return a pointer to the Dwarf "root" type for the given input type. The Dwarf "root" type of a given type is generally the same as the given type, except that if the given type is a pointer or reference type, then the root type of the given type is the root type of the "basis" type for the pointer or reference type. (This definition of the "root" type is recursive.) Also, the root type of a `const' qualified type or a `volatile' qualified type is the root type of the given type without the qualifiers. */ static tree root_type (tree type) { if (TREE_CODE (type) == ERROR_MARK) return error_mark_node; switch (TREE_CODE (type)) { case ERROR_MARK: return error_mark_node; case POINTER_TYPE: case REFERENCE_TYPE: return type_main_variant (root_type (TREE_TYPE (type))); default: return type_main_variant (type); } } /* Given a pointer to an arbitrary ..._TYPE tree node, return nonzero if the given input type is a Dwarf "fundamental" type. Otherwise return null. */ static inline int is_base_type (tree type) { switch (TREE_CODE (type)) { case ERROR_MARK: case VOID_TYPE: case INTEGER_TYPE: case REAL_TYPE: case COMPLEX_TYPE: case BOOLEAN_TYPE: case CHAR_TYPE: return 1; case SET_TYPE: case ARRAY_TYPE: case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: case ENUMERAL_TYPE: case FUNCTION_TYPE: case METHOD_TYPE: case POINTER_TYPE: case REFERENCE_TYPE: case FILE_TYPE: case OFFSET_TYPE: case LANG_TYPE: case VECTOR_TYPE: return 0; default: abort (); } return 0; } /* Given a pointer to a tree node, assumed to be some kind of a ..._TYPE node, return the size in bits for the type if it is a constant, or else return the alignment for the type if the type's size is not constant, or else return BITS_PER_WORD if the type actually turns out to be an ERROR_MARK node. */ static inline unsigned HOST_WIDE_INT simple_type_size_in_bits (tree type) { if (TREE_CODE (type) == ERROR_MARK) return BITS_PER_WORD; else if (TYPE_SIZE (type) == NULL_TREE) return 0; else if (host_integerp (TYPE_SIZE (type), 1)) return tree_low_cst (TYPE_SIZE (type), 1); else return TYPE_ALIGN (type); } /* Return true if the debug information for the given type should be emitted as a subrange type. */ static inline bool is_subrange_type (tree type) { tree subtype = TREE_TYPE (type); /* Subrange types are identified by the fact that they are integer types, and that they have a subtype which is either an integer type or an enumeral type. */ if (TREE_CODE (type) != INTEGER_TYPE || subtype == NULL_TREE) return false; if (TREE_CODE (subtype) != INTEGER_TYPE && TREE_CODE (subtype) != ENUMERAL_TYPE) return false; if (TREE_CODE (type) == TREE_CODE (subtype) && int_size_in_bytes (type) == int_size_in_bytes (subtype) && TYPE_MIN_VALUE (type) != NULL && TYPE_MIN_VALUE (subtype) != NULL && tree_int_cst_equal (TYPE_MIN_VALUE (type), TYPE_MIN_VALUE (subtype)) && TYPE_MAX_VALUE (type) != NULL && TYPE_MAX_VALUE (subtype) != NULL && tree_int_cst_equal (TYPE_MAX_VALUE (type), TYPE_MAX_VALUE (subtype))) { /* The type and its subtype have the same representation. If in addition the two types also have the same name, then the given type is not a subrange type, but rather a plain base type. */ /* FIXME: brobecker/2004-03-22: Sizetype INTEGER_CSTs nodes are canonicalized. It should therefore be sufficient to check the TYPE_SIZE node pointers rather than checking the actual size. Unfortunately, we have found some cases, such as in the Ada "integer" type, where this is not the case. Until this problem is solved, we need to keep checking the actual size. */ tree type_name = TYPE_NAME (type); tree subtype_name = TYPE_NAME (subtype); if (type_name != NULL && TREE_CODE (type_name) == TYPE_DECL) type_name = DECL_NAME (type_name); if (subtype_name != NULL && TREE_CODE (subtype_name) == TYPE_DECL) subtype_name = DECL_NAME (subtype_name); if (type_name == subtype_name) return false; } return true; } /* Given a pointer to a tree node for a subrange type, return a pointer to a DIE that describes the given type. */ static dw_die_ref subrange_type_die (tree type, dw_die_ref context_die) { dw_die_ref subtype_die; dw_die_ref subrange_die; tree name = TYPE_NAME (type); const HOST_WIDE_INT size_in_bytes = int_size_in_bytes (type); tree subtype = TREE_TYPE (type); if (context_die == NULL) context_die = comp_unit_die; if (TREE_CODE (subtype) == ENUMERAL_TYPE) subtype_die = gen_enumeration_type_die (subtype, context_die); else subtype_die = base_type_die (subtype); subrange_die = new_die (DW_TAG_subrange_type, context_die, type); if (name != NULL) { if (TREE_CODE (name) == TYPE_DECL) name = DECL_NAME (name); add_name_attribute (subrange_die, IDENTIFIER_POINTER (name)); } if (int_size_in_bytes (subtype) != size_in_bytes) { /* The size of the subrange type and its base type do not match, so we need to generate a size attribute for the subrange type. */ add_AT_unsigned (subrange_die, DW_AT_byte_size, size_in_bytes); } if (TYPE_MIN_VALUE (type) != NULL) add_bound_info (subrange_die, DW_AT_lower_bound, TYPE_MIN_VALUE (type)); if (TYPE_MAX_VALUE (type) != NULL) add_bound_info (subrange_die, DW_AT_upper_bound, TYPE_MAX_VALUE (type)); add_AT_die_ref (subrange_die, DW_AT_type, subtype_die); return subrange_die; } /* Given a pointer to an arbitrary ..._TYPE tree node, return a debugging entry that chains various modifiers in front of the given type. */ static dw_die_ref modified_type_die (tree type, int is_const_type, int is_volatile_type, dw_die_ref context_die) { enum tree_code code = TREE_CODE (type); dw_die_ref mod_type_die = NULL; dw_die_ref sub_die = NULL; tree item_type = NULL; if (code != ERROR_MARK) { tree qualified_type; /* See if we already have the appropriately qualified variant of this type. */ qualified_type = get_qualified_type (type, ((is_const_type ? TYPE_QUAL_CONST : 0) | (is_volatile_type ? TYPE_QUAL_VOLATILE : 0))); /* If we do, then we can just use its DIE, if it exists. */ if (qualified_type) { mod_type_die = lookup_type_die (qualified_type); if (mod_type_die) return mod_type_die; } /* Handle C typedef types. */ if (qualified_type && TYPE_NAME (qualified_type) && TREE_CODE (TYPE_NAME (qualified_type)) == TYPE_DECL && DECL_ORIGINAL_TYPE (TYPE_NAME (qualified_type))) { tree type_name = TYPE_NAME (qualified_type); tree dtype = TREE_TYPE (type_name); if (qualified_type == dtype) { /* For a named type, use the typedef. */ gen_type_die (qualified_type, context_die); mod_type_die = lookup_type_die (qualified_type); } else if (is_const_type < TYPE_READONLY (dtype) || is_volatile_type < TYPE_VOLATILE (dtype)) /* cv-unqualified version of named type. Just use the unnamed type to which it refers. */ mod_type_die = modified_type_die (DECL_ORIGINAL_TYPE (type_name), is_const_type, is_volatile_type, context_die); /* Else cv-qualified version of named type; fall through. */ } if (mod_type_die) /* OK. */ ; else if (is_const_type) { mod_type_die = new_die (DW_TAG_const_type, comp_unit_die, type); sub_die = modified_type_die (type, 0, is_volatile_type, context_die); } else if (is_volatile_type) { mod_type_die = new_die (DW_TAG_volatile_type, comp_unit_die, type); sub_die = modified_type_die (type, 0, 0, context_die); } else if (code == POINTER_TYPE) { mod_type_die = new_die (DW_TAG_pointer_type, comp_unit_die, type); add_AT_unsigned (mod_type_die, DW_AT_byte_size, simple_type_size_in_bits (type) / BITS_PER_UNIT); #if 0 add_AT_unsigned (mod_type_die, DW_AT_address_class, 0); #endif item_type = TREE_TYPE (type); } else if (code == REFERENCE_TYPE) { mod_type_die = new_die (DW_TAG_reference_type, comp_unit_die, type); add_AT_unsigned (mod_type_die, DW_AT_byte_size, simple_type_size_in_bits (type) / BITS_PER_UNIT); #if 0 add_AT_unsigned (mod_type_die, DW_AT_address_class, 0); #endif item_type = TREE_TYPE (type); } else if (is_subrange_type (type)) mod_type_die = subrange_type_die (type, context_die); else if (is_base_type (type)) mod_type_die = base_type_die (type); else { gen_type_die (type, context_die); /* We have to get the type_main_variant here (and pass that to the `lookup_type_die' routine) because the ..._TYPE node we have might simply be a *copy* of some original type node (where the copy was created to help us keep track of typedef names) and that copy might have a different TYPE_UID from the original ..._TYPE node. */ if (TREE_CODE (type) != VECTOR_TYPE) mod_type_die = lookup_type_die (type_main_variant (type)); else /* Vectors have the debugging information in the type, not the main variant. */ mod_type_die = lookup_type_die (type); if (mod_type_die == NULL) abort (); } /* We want to equate the qualified type to the die below. */ type = qualified_type; } if (type) equate_type_number_to_die (type, mod_type_die); if (item_type) /* We must do this after the equate_type_number_to_die call, in case this is a recursive type. This ensures that the modified_type_die recursion will terminate even if the type is recursive. Recursive types are possible in Ada. */ sub_die = modified_type_die (item_type, TYPE_READONLY (item_type), TYPE_VOLATILE (item_type), context_die); if (sub_die != NULL) add_AT_die_ref (mod_type_die, DW_AT_type, sub_die); return mod_type_die; } /* Given a pointer to an arbitrary ..._TYPE tree node, return true if it is an enumerated type. */ static inline int type_is_enum (tree type) { return TREE_CODE (type) == ENUMERAL_TYPE; } /* Return the DBX register number described by a given RTL node. */ static unsigned int dbx_reg_number (rtx rtl) { unsigned regno = REGNO (rtl); if (regno >= FIRST_PSEUDO_REGISTER) abort (); return DBX_REGISTER_NUMBER (regno); } /* Return a location descriptor that designates a machine register or zero if there is none. */ static dw_loc_descr_ref reg_loc_descriptor (rtx rtl) { unsigned reg; rtx regs; if (REGNO (rtl) >= FIRST_PSEUDO_REGISTER) return 0; reg = dbx_reg_number (rtl); regs = targetm.dwarf_register_span (rtl); if (hard_regno_nregs[REGNO (rtl)][GET_MODE (rtl)] > 1 || regs) return multiple_reg_loc_descriptor (rtl, regs); else return one_reg_loc_descriptor (reg); } /* Return a location descriptor that designates a machine register for a given hard register number. */ static dw_loc_descr_ref one_reg_loc_descriptor (unsigned int regno) { if (regno <= 31) return new_loc_descr (DW_OP_reg0 + regno, 0, 0); else return new_loc_descr (DW_OP_regx, regno, 0); } /* Given an RTL of a register, return a location descriptor that designates a value that spans more than one register. */ static dw_loc_descr_ref multiple_reg_loc_descriptor (rtx rtl, rtx regs) { int nregs, size, i; unsigned reg; dw_loc_descr_ref loc_result = NULL; reg = dbx_reg_number (rtl); nregs = hard_regno_nregs[REGNO (rtl)][GET_MODE (rtl)]; /* Simple, contiguous registers. */ if (regs == NULL_RTX) { size = GET_MODE_SIZE (GET_MODE (rtl)) / nregs; loc_result = NULL; while (nregs--) { dw_loc_descr_ref t; t = one_reg_loc_descriptor (reg); add_loc_descr (&loc_result, t); add_loc_descr (&loc_result, new_loc_descr (DW_OP_piece, size, 0)); ++reg; } return loc_result; } /* Now onto stupid register sets in non contiguous locations. */ if (GET_CODE (regs) != PARALLEL) abort (); size = GET_MODE_SIZE (GET_MODE (XVECEXP (regs, 0, 0))); loc_result = NULL; for (i = 0; i < XVECLEN (regs, 0); ++i) { dw_loc_descr_ref t; t = one_reg_loc_descriptor (REGNO (XVECEXP (regs, 0, i))); add_loc_descr (&loc_result, t); size = GET_MODE_SIZE (GET_MODE (XVECEXP (regs, 0, 0))); add_loc_descr (&loc_result, new_loc_descr (DW_OP_piece, size, 0)); } return loc_result; } /* Return a location descriptor that designates a constant. */ static dw_loc_descr_ref int_loc_descriptor (HOST_WIDE_INT i) { enum dwarf_location_atom op; /* Pick the smallest representation of a constant, rather than just defaulting to the LEB encoding. */ if (i >= 0) { if (i <= 31) op = DW_OP_lit0 + i; else if (i <= 0xff) op = DW_OP_const1u; else if (i <= 0xffff) op = DW_OP_const2u; else if (HOST_BITS_PER_WIDE_INT == 32 || i <= 0xffffffff) op = DW_OP_const4u; else op = DW_OP_constu; } else { if (i >= -0x80) op = DW_OP_const1s; else if (i >= -0x8000) op = DW_OP_const2s; else if (HOST_BITS_PER_WIDE_INT == 32 || i >= -0x80000000) op = DW_OP_const4s; else op = DW_OP_consts; } return new_loc_descr (op, i, 0); } /* Return a location descriptor that designates a base+offset location. */ static dw_loc_descr_ref based_loc_descr (unsigned int reg, HOST_WIDE_INT offset, bool can_use_fbreg) { dw_loc_descr_ref loc_result; /* For the "frame base", we use the frame pointer or stack pointer registers, since the RTL for local variables is relative to one of them. */ unsigned fp_reg = DBX_REGISTER_NUMBER (frame_pointer_needed ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM); if (reg == fp_reg && can_use_fbreg) loc_result = new_loc_descr (DW_OP_fbreg, offset, 0); else if (reg <= 31) loc_result = new_loc_descr (DW_OP_breg0 + reg, offset, 0); else loc_result = new_loc_descr (DW_OP_bregx, reg, offset); return loc_result; } /* Return true if this RTL expression describes a base+offset calculation. */ static inline int is_based_loc (rtx rtl) { return (GET_CODE (rtl) == PLUS && ((REG_P (XEXP (rtl, 0)) && REGNO (XEXP (rtl, 0)) < FIRST_PSEUDO_REGISTER && GET_CODE (XEXP (rtl, 1)) == CONST_INT))); } /* The following routine converts the RTL for a variable or parameter (resident in memory) into an equivalent Dwarf representation of a mechanism for getting the address of that same variable onto the top of a hypothetical "address evaluation" stack. When creating memory location descriptors, we are effectively transforming the RTL for a memory-resident object into its Dwarf postfix expression equivalent. This routine recursively descends an RTL tree, turning it into Dwarf postfix code as it goes. MODE is the mode of the memory reference, needed to handle some autoincrement addressing modes. CAN_USE_FBREG is a flag whether we can use DW_AT_frame_base in the location list for RTL. We can't use it when we are emitting location list for virtual variable frame_base_decl (i.e. a location list for DW_AT_frame_base) which describes how frame base changes when !frame_pointer_needed. Return 0 if we can't represent the location. */ static dw_loc_descr_ref mem_loc_descriptor (rtx rtl, enum machine_mode mode, bool can_use_fbreg) { dw_loc_descr_ref mem_loc_result = NULL; enum dwarf_location_atom op; /* Note that for a dynamically sized array, the location we will generate a description of here will be the lowest numbered location which is actually within the array. That's *not* necessarily the same as the zeroth element of the array. */ rtl = targetm.delegitimize_address (rtl); switch (GET_CODE (rtl)) { case POST_INC: case POST_DEC: case POST_MODIFY: /* POST_INC and POST_DEC can be handled just like a SUBREG. So we just fall into the SUBREG code. */ /* ... fall through ... */ case SUBREG: /* The case of a subreg may arise when we have a local (register) variable or a formal (register) parameter which doesn't quite fill up an entire register. For now, just assume that it is legitimate to make the Dwarf info refer to the whole register which contains the given subreg. */ rtl = SUBREG_REG (rtl); /* ... fall through ... */ case REG: /* Whenever a register number forms a part of the description of the method for calculating the (dynamic) address of a memory resident object, DWARF rules require the register number be referred to as a "base register". This distinction is not based in any way upon what category of register the hardware believes the given register belongs to. This is strictly DWARF terminology we're dealing with here. Note that in cases where the location of a memory-resident data object could be expressed as: OP_ADD (OP_BASEREG (basereg), OP_CONST (0)) the actual DWARF location descriptor that we generate may just be OP_BASEREG (basereg). This may look deceptively like the object in question was allocated to a register (rather than in memory) so DWARF consumers need to be aware of the subtle distinction between OP_REG and OP_BASEREG. */ if (REGNO (rtl) < FIRST_PSEUDO_REGISTER) mem_loc_result = based_loc_descr (dbx_reg_number (rtl), 0, can_use_fbreg); break; case MEM: mem_loc_result = mem_loc_descriptor (XEXP (rtl, 0), GET_MODE (rtl), can_use_fbreg); if (mem_loc_result != 0) add_loc_descr (&mem_loc_result, new_loc_descr (DW_OP_deref, 0, 0)); break; case LO_SUM: rtl = XEXP (rtl, 1); /* ... fall through ... */ case LABEL_REF: /* Some ports can transform a symbol ref into a label ref, because the symbol ref is too far away and has to be dumped into a constant pool. */ case CONST: case SYMBOL_REF: /* Alternatively, the symbol in the constant pool might be referenced by a different symbol. */ if (GET_CODE (rtl) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (rtl)) { bool marked; rtx tmp = get_pool_constant_mark (rtl, &marked); if (GET_CODE (tmp) == SYMBOL_REF) { rtl = tmp; if (CONSTANT_POOL_ADDRESS_P (tmp)) get_pool_constant_mark (tmp, &marked); else marked = true; } /* If all references to this pool constant were optimized away, it was not output and thus we can't represent it. FIXME: might try to use DW_OP_const_value here, though DW_OP_piece complicates it. */ if (!marked) return 0; } mem_loc_result = new_loc_descr (DW_OP_addr, 0, 0); mem_loc_result->dw_loc_oprnd1.val_class = dw_val_class_addr; mem_loc_result->dw_loc_oprnd1.v.val_addr = rtl; VARRAY_PUSH_RTX (used_rtx_varray, rtl); break; case PRE_MODIFY: /* Extract the PLUS expression nested inside and fall into PLUS code below. */ rtl = XEXP (rtl, 1); goto plus; case PRE_INC: case PRE_DEC: /* Turn these into a PLUS expression and fall into the PLUS code below. */ rtl = gen_rtx_PLUS (word_mode, XEXP (rtl, 0), GEN_INT (GET_CODE (rtl) == PRE_INC ? GET_MODE_UNIT_SIZE (mode) : -GET_MODE_UNIT_SIZE (mode))); /* ... fall through ... */ case PLUS: plus: if (is_based_loc (rtl)) mem_loc_result = based_loc_descr (dbx_reg_number (XEXP (rtl, 0)), INTVAL (XEXP (rtl, 1)), can_use_fbreg); else { mem_loc_result = mem_loc_descriptor (XEXP (rtl, 0), mode, can_use_fbreg); if (mem_loc_result == 0) break; if (GET_CODE (XEXP (rtl, 1)) == CONST_INT && INTVAL (XEXP (rtl, 1)) >= 0) add_loc_descr (&mem_loc_result, new_loc_descr (DW_OP_plus_uconst, INTVAL (XEXP (rtl, 1)), 0)); else { add_loc_descr (&mem_loc_result, mem_loc_descriptor (XEXP (rtl, 1), mode, can_use_fbreg)); add_loc_descr (&mem_loc_result, new_loc_descr (DW_OP_plus, 0, 0)); } } break; /* If a pseudo-reg is optimized away, it is possible for it to be replaced with a MEM containing a multiply or shift. */ case MULT: op = DW_OP_mul; goto do_binop; case ASHIFT: op = DW_OP_shl; goto do_binop; case ASHIFTRT: op = DW_OP_shra; goto do_binop; case LSHIFTRT: op = DW_OP_shr; goto do_binop; do_binop: { dw_loc_descr_ref op0 = mem_loc_descriptor (XEXP (rtl, 0), mode, can_use_fbreg); dw_loc_descr_ref op1 = mem_loc_descriptor (XEXP (rtl, 1), mode, can_use_fbreg); if (op0 == 0 || op1 == 0) break; mem_loc_result = op0; add_loc_descr (&mem_loc_result, op1); add_loc_descr (&mem_loc_result, new_loc_descr (op, 0, 0)); break; } case CONST_INT: mem_loc_result = int_loc_descriptor (INTVAL (rtl)); break; default: abort (); } return mem_loc_result; } /* Return a descriptor that describes the concatenation of two locations. This is typically a complex variable. */ static dw_loc_descr_ref concat_loc_descriptor (rtx x0, rtx x1) { dw_loc_descr_ref cc_loc_result = NULL; dw_loc_descr_ref x0_ref = loc_descriptor (x0, true); dw_loc_descr_ref x1_ref = loc_descriptor (x1, true); if (x0_ref == 0 || x1_ref == 0) return 0; cc_loc_result = x0_ref; add_loc_descr (&cc_loc_result, new_loc_descr (DW_OP_piece, GET_MODE_SIZE (GET_MODE (x0)), 0)); add_loc_descr (&cc_loc_result, x1_ref); add_loc_descr (&cc_loc_result, new_loc_descr (DW_OP_piece, GET_MODE_SIZE (GET_MODE (x1)), 0)); return cc_loc_result; } /* Output a proper Dwarf location descriptor for a variable or parameter which is either allocated in a register or in a memory location. For a register, we just generate an OP_REG and the register number. For a memory location we provide a Dwarf postfix expression describing how to generate the (dynamic) address of the object onto the address stack. If we don't know how to describe it, return 0. */ static dw_loc_descr_ref loc_descriptor (rtx rtl, bool can_use_fbreg) { dw_loc_descr_ref loc_result = NULL; switch (GET_CODE (rtl)) { case SUBREG: /* The case of a subreg may arise when we have a local (register) variable or a formal (register) parameter which doesn't quite fill up an entire register. For now, just assume that it is legitimate to make the Dwarf info refer to the whole register which contains the given subreg. */ rtl = SUBREG_REG (rtl); /* ... fall through ... */ case REG: loc_result = reg_loc_descriptor (rtl); break; case MEM: loc_result = mem_loc_descriptor (XEXP (rtl, 0), GET_MODE (rtl), can_use_fbreg); break; case CONCAT: loc_result = concat_loc_descriptor (XEXP (rtl, 0), XEXP (rtl, 1)); break; case VAR_LOCATION: /* Single part. */ if (GET_CODE (XEXP (rtl, 1)) != PARALLEL) { loc_result = loc_descriptor (XEXP (XEXP (rtl, 1), 0), can_use_fbreg); } /* Multiple parts. */ else { rtvec par_elems = XVEC (XEXP (rtl, 1), 0); int num_elem = GET_NUM_ELEM (par_elems); enum machine_mode mode; int i; /* Create the first one, so we have something to add to. */ loc_result = loc_descriptor (XEXP (RTVEC_ELT (par_elems, 0), 0), can_use_fbreg); mode = GET_MODE (XEXP (RTVEC_ELT (par_elems, 0), 0)); add_loc_descr (&loc_result, new_loc_descr (DW_OP_piece, GET_MODE_SIZE (mode), 0)); for (i = 1; i < num_elem; i++) { dw_loc_descr_ref temp; temp = loc_descriptor (XEXP (RTVEC_ELT (par_elems, i), 0), can_use_fbreg); add_loc_descr (&loc_result, temp); mode = GET_MODE (XEXP (RTVEC_ELT (par_elems, i), 0)); add_loc_descr (&loc_result, new_loc_descr (DW_OP_piece, GET_MODE_SIZE (mode), 0)); } } break; default: abort (); } return loc_result; } /* Similar, but generate the descriptor from trees instead of rtl. This comes up particularly with variable length arrays. If ADDRESSP is nonzero, we are looking for an address. Otherwise, we return a value. If we can't make a descriptor, return 0. */ static dw_loc_descr_ref loc_descriptor_from_tree (tree loc, int addressp) { dw_loc_descr_ref ret, ret1; int indirect_p = 0; int unsignedp = TYPE_UNSIGNED (TREE_TYPE (loc)); enum dwarf_location_atom op; /* ??? Most of the time we do not take proper care for sign/zero extending the values properly. Hopefully this won't be a real problem... */ switch (TREE_CODE (loc)) { case ERROR_MARK: return 0; case PLACEHOLDER_EXPR: /* This case involves extracting fields from an object to determine the position of other fields. We don't try to encode this here. The only user of this is Ada, which encodes the needed information using the names of types. */ return 0; case CALL_EXPR: return 0; case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: case POSTINCREMENT_EXPR: case POSTDECREMENT_EXPR: /* There are no opcodes for these operations. */ return 0; case ADDR_EXPR: /* We can support this only if we can look through conversions and find an INDIRECT_EXPR. */ for (loc = TREE_OPERAND (loc, 0); TREE_CODE (loc) == CONVERT_EXPR || TREE_CODE (loc) == NOP_EXPR || TREE_CODE (loc) == NON_LVALUE_EXPR || TREE_CODE (loc) == VIEW_CONVERT_EXPR || TREE_CODE (loc) == SAVE_EXPR; loc = TREE_OPERAND (loc, 0)) ; return (TREE_CODE (loc) == INDIRECT_REF ? loc_descriptor_from_tree (TREE_OPERAND (loc, 0), addressp) : 0); case VAR_DECL: if (DECL_THREAD_LOCAL (loc)) { rtx rtl; #ifndef ASM_OUTPUT_DWARF_DTPREL /* If this is not defined, we have no way to emit the data. */ return 0; #endif /* The way DW_OP_GNU_push_tls_address is specified, we can only look up addresses of objects in the current module. */ if (DECL_EXTERNAL (loc)) return 0; rtl = rtl_for_decl_location (loc); if (rtl == NULL_RTX) return 0; if (!MEM_P (rtl)) return 0; rtl = XEXP (rtl, 0); if (! CONSTANT_P (rtl)) return 0; ret = new_loc_descr (INTERNAL_DW_OP_tls_addr, 0, 0); ret->dw_loc_oprnd1.val_class = dw_val_class_addr; ret->dw_loc_oprnd1.v.val_addr = rtl; ret1 = new_loc_descr (DW_OP_GNU_push_tls_address, 0, 0); add_loc_descr (&ret, ret1); indirect_p = 1; break; } /* Fall through. */ case PARM_DECL: case RESULT_DECL: { rtx rtl = rtl_for_decl_location (loc); if (rtl == NULL_RTX) return 0; else if (CONSTANT_P (rtl)) { ret = new_loc_descr (DW_OP_addr, 0, 0); ret->dw_loc_oprnd1.val_class = dw_val_class_addr; ret->dw_loc_oprnd1.v.val_addr = rtl; indirect_p = 1; } else { enum machine_mode mode = GET_MODE (rtl); if (MEM_P (rtl)) { indirect_p = 1; rtl = XEXP (rtl, 0); } ret = mem_loc_descriptor (rtl, mode, true); } } break; case INDIRECT_REF: ret = loc_descriptor_from_tree (TREE_OPERAND (loc, 0), 0); indirect_p = 1; break; case COMPOUND_EXPR: return loc_descriptor_from_tree (TREE_OPERAND (loc, 1), addressp); case NOP_EXPR: case CONVERT_EXPR: case NON_LVALUE_EXPR: case VIEW_CONVERT_EXPR: case SAVE_EXPR: case MODIFY_EXPR: return loc_descriptor_from_tree (TREE_OPERAND (loc, 0), addressp); case COMPONENT_REF: case BIT_FIELD_REF: case ARRAY_REF: case ARRAY_RANGE_REF: { tree obj, offset; HOST_WIDE_INT bitsize, bitpos, bytepos; enum machine_mode mode; int volatilep; obj = get_inner_reference (loc, &bitsize, &bitpos, &offset, &mode, &unsignedp, &volatilep); if (obj == loc) return 0; ret = loc_descriptor_from_tree (obj, 1); if (ret == 0 || bitpos % BITS_PER_UNIT != 0 || bitsize % BITS_PER_UNIT != 0) return 0; if (offset != NULL_TREE) { /* Variable offset. */ add_loc_descr (&ret, loc_descriptor_from_tree (offset, 0)); add_loc_descr (&ret, new_loc_descr (DW_OP_plus, 0, 0)); } if (!addressp) indirect_p = 1; bytepos = bitpos / BITS_PER_UNIT; if (bytepos > 0) add_loc_descr (&ret, new_loc_descr (DW_OP_plus_uconst, bytepos, 0)); else if (bytepos < 0) { add_loc_descr (&ret, int_loc_descriptor (bytepos)); add_loc_descr (&ret, new_loc_descr (DW_OP_plus, 0, 0)); } break; } case INTEGER_CST: if (host_integerp (loc, 0)) ret = int_loc_descriptor (tree_low_cst (loc, 0)); else return 0; break; case CONSTRUCTOR: { /* Get an RTL for this, if something has been emitted. */ rtx rtl = lookup_constant_def (loc); enum machine_mode mode; if (!MEM_P (rtl)) return 0; mode = GET_MODE (rtl); rtl = XEXP (rtl, 0); rtl = targetm.delegitimize_address (rtl); indirect_p = 1; ret = mem_loc_descriptor (rtl, mode, true); break; } case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR: case BIT_AND_EXPR: op = DW_OP_and; goto do_binop; case TRUTH_XOR_EXPR: case BIT_XOR_EXPR: op = DW_OP_xor; goto do_binop; case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR: case BIT_IOR_EXPR: op = DW_OP_or; goto do_binop; case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR: case ROUND_DIV_EXPR: case TRUNC_DIV_EXPR: op = DW_OP_div; goto do_binop; case MINUS_EXPR: op = DW_OP_minus; goto do_binop; case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR: case ROUND_MOD_EXPR: case TRUNC_MOD_EXPR: op = DW_OP_mod; goto do_binop; case MULT_EXPR: op = DW_OP_mul; goto do_binop; case LSHIFT_EXPR: op = DW_OP_shl; goto do_binop; case RSHIFT_EXPR: op = (unsignedp ? DW_OP_shr : DW_OP_shra); goto do_binop; case PLUS_EXPR: if (TREE_CODE (TREE_OPERAND (loc, 1)) == INTEGER_CST && host_integerp (TREE_OPERAND (loc, 1), 0)) { ret = loc_descriptor_from_tree (TREE_OPERAND (loc, 0), 0); if (ret == 0) return 0; add_loc_descr (&ret, new_loc_descr (DW_OP_plus_uconst, tree_low_cst (TREE_OPERAND (loc, 1), 0), 0)); break; } op = DW_OP_plus; goto do_binop; case LE_EXPR: if (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (loc, 0)))) return 0; op = DW_OP_le; goto do_binop; case GE_EXPR: if (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (loc, 0)))) return 0; op = DW_OP_ge; goto do_binop; case LT_EXPR: if (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (loc, 0)))) return 0; op = DW_OP_lt; goto do_binop; case GT_EXPR: if (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (loc, 0)))) return 0; op = DW_OP_gt; goto do_binop; case EQ_EXPR: op = DW_OP_eq; goto do_binop; case NE_EXPR: op = DW_OP_ne; goto do_binop; do_binop: ret = loc_descriptor_from_tree (TREE_OPERAND (loc, 0), 0); ret1 = loc_descriptor_from_tree (TREE_OPERAND (loc, 1), 0); if (ret == 0 || ret1 == 0) return 0; add_loc_descr (&ret, ret1); add_loc_descr (&ret, new_loc_descr (op, 0, 0)); break; case TRUTH_NOT_EXPR: case BIT_NOT_EXPR: op = DW_OP_not; goto do_unop; case ABS_EXPR: op = DW_OP_abs; goto do_unop; case NEGATE_EXPR: op = DW_OP_neg; goto do_unop; do_unop: ret = loc_descriptor_from_tree (TREE_OPERAND (loc, 0), 0); if (ret == 0) return 0; add_loc_descr (&ret, new_loc_descr (op, 0, 0)); break; case MIN_EXPR: case MAX_EXPR: { const enum tree_code code = TREE_CODE (loc) == MIN_EXPR ? GT_EXPR : LT_EXPR; loc = build (COND_EXPR, TREE_TYPE (loc), build (code, integer_type_node, TREE_OPERAND (loc, 0), TREE_OPERAND (loc, 1)), TREE_OPERAND (loc, 1), TREE_OPERAND (loc, 0)); } /* ... fall through ... */ case COND_EXPR: { dw_loc_descr_ref lhs = loc_descriptor_from_tree (TREE_OPERAND (loc, 1), 0); dw_loc_descr_ref rhs = loc_descriptor_from_tree (TREE_OPERAND (loc, 2), 0); dw_loc_descr_ref bra_node, jump_node, tmp; ret = loc_descriptor_from_tree (TREE_OPERAND (loc, 0), 0); if (ret == 0 || lhs == 0 || rhs == 0) return 0; bra_node = new_loc_descr (DW_OP_bra, 0, 0); add_loc_descr (&ret, bra_node); add_loc_descr (&ret, rhs); jump_node = new_loc_descr (DW_OP_skip, 0, 0); add_loc_descr (&ret, jump_node); add_loc_descr (&ret, lhs); bra_node->dw_loc_oprnd1.val_class = dw_val_class_loc; bra_node->dw_loc_oprnd1.v.val_loc = lhs; /* ??? Need a node to point the skip at. Use a nop. */ tmp = new_loc_descr (DW_OP_nop, 0, 0); add_loc_descr (&ret, tmp); jump_node->dw_loc_oprnd1.val_class = dw_val_class_loc; jump_node->dw_loc_oprnd1.v.val_loc = tmp; } break; default: /* Leave front-end specific codes as simply unknown. This comes up, for instance, with the C STMT_EXPR. */ if ((unsigned int) TREE_CODE (loc) >= (unsigned int) LAST_AND_UNUSED_TREE_CODE) return 0; /* Otherwise this is a generic code; we should just lists all of these explicitly. Aborting means we forgot one. */ abort (); } /* Show if we can't fill the request for an address. */ if (addressp && indirect_p == 0) return 0; /* If we've got an address and don't want one, dereference. */ if (!addressp && indirect_p > 0) { HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (loc)); if (size > DWARF2_ADDR_SIZE || size == -1) return 0; else if (size == DWARF2_ADDR_SIZE) op = DW_OP_deref; else op = DW_OP_deref_size; add_loc_descr (&ret, new_loc_descr (op, size, 0)); } return ret; } /* Given a value, round it up to the lowest multiple of `boundary' which is not less than the value itself. */ static inline HOST_WIDE_INT ceiling (HOST_WIDE_INT value, unsigned int boundary) { return (((value + boundary - 1) / boundary) * boundary); } /* Given a pointer to what is assumed to be a FIELD_DECL node, return a pointer to the declared type for the relevant field variable, or return `integer_type_node' if the given node turns out to be an ERROR_MARK node. */ static inline tree field_type (tree decl) { tree type; if (TREE_CODE (decl) == ERROR_MARK) return integer_type_node; type = DECL_BIT_FIELD_TYPE (decl); if (type == NULL_TREE) type = TREE_TYPE (decl); return type; } /* Given a pointer to a tree node, return the alignment in bits for it, or else return BITS_PER_WORD if the node actually turns out to be an ERROR_MARK node. */ static inline unsigned simple_type_align_in_bits (tree type) { return (TREE_CODE (type) != ERROR_MARK) ? TYPE_ALIGN (type) : BITS_PER_WORD; } static inline unsigned simple_decl_align_in_bits (tree decl) { return (TREE_CODE (decl) != ERROR_MARK) ? DECL_ALIGN (decl) : BITS_PER_WORD; } /* Given a pointer to a FIELD_DECL, compute and return the byte offset of the lowest addressed byte of the "containing object" for the given FIELD_DECL, or return 0 if we are unable to determine what that offset is, either because the argument turns out to be a pointer to an ERROR_MARK node, or because the offset is actually variable. (We can't handle the latter case just yet). */ static HOST_WIDE_INT field_byte_offset (tree decl) { unsigned int type_align_in_bits; unsigned int decl_align_in_bits; unsigned HOST_WIDE_INT type_size_in_bits; HOST_WIDE_INT object_offset_in_bits; tree type; tree field_size_tree; HOST_WIDE_INT bitpos_int; HOST_WIDE_INT deepest_bitpos; unsigned HOST_WIDE_INT field_size_in_bits; if (TREE_CODE (decl) == ERROR_MARK) return 0; else if (TREE_CODE (decl) != FIELD_DECL) abort (); type = field_type (decl); field_size_tree = DECL_SIZE (decl); /* The size could be unspecified if there was an error, or for a flexible array member. */ if (! field_size_tree) field_size_tree = bitsize_zero_node; /* We cannot yet cope with fields whose positions are variable, so for now, when we see such things, we simply return 0. Someday, we may be able to handle such cases, but it will be damn difficult. */ if (! host_integerp (bit_position (decl), 0)) return 0; bitpos_int = int_bit_position (decl); /* If we don't know the size of the field, pretend it's a full word. */ if (host_integerp (field_size_tree, 1)) field_size_in_bits = tree_low_cst (field_size_tree, 1); else field_size_in_bits = BITS_PER_WORD; type_size_in_bits = simple_type_size_in_bits (type); type_align_in_bits = simple_type_align_in_bits (type); decl_align_in_bits = simple_decl_align_in_bits (decl); /* The GCC front-end doesn't make any attempt to keep track of the starting bit offset (relative to the start of the containing structure type) of the hypothetical "containing object" for a bit-field. Thus, when computing the byte offset value for the start of the "containing object" of a bit-field, we must deduce this information on our own. This can be rather tricky to do in some cases. For example, handling the following structure type definition when compiling for an i386/i486 target (which only aligns long long's to 32-bit boundaries) can be very tricky: struct S { int field1; long long field2:31; }; Fortunately, there is a simple rule-of-thumb which can be used in such cases. When compiling for an i386/i486, GCC will allocate 8 bytes for the structure shown above. It decides to do this based upon one simple rule for bit-field allocation. GCC allocates each "containing object" for each bit-field at the first (i.e. lowest addressed) legitimate alignment boundary (based upon the required minimum alignment for the declared type of the field) which it can possibly use, subject to the condition that there is still enough available space remaining in the containing object (when allocated at the selected point) to fully accommodate all of the bits of the bit-field itself. This simple rule makes it obvious why GCC allocates 8 bytes for each object of the structure type shown above. When looking for a place to allocate the "containing object" for `field2', the compiler simply tries to allocate a 64-bit "containing object" at each successive 32-bit boundary (starting at zero) until it finds a place to allocate that 64- bit field such that at least 31 contiguous (and previously unallocated) bits remain within that selected 64 bit field. (As it turns out, for the example above, the compiler finds it is OK to allocate the "containing object" 64-bit field at bit-offset zero within the structure type.) Here we attempt to work backwards from the limited set of facts we're given, and we try to deduce from those facts, where GCC must have believed that the containing object started (within the structure type). The value we deduce is then used (by the callers of this routine) to generate DW_AT_location and DW_AT_bit_offset attributes for fields (both bit-fields and, in the case of DW_AT_location, regular fields as well). */ /* Figure out the bit-distance from the start of the structure to the "deepest" bit of the bit-field. */ deepest_bitpos = bitpos_int + field_size_in_bits; /* This is the tricky part. Use some fancy footwork to deduce where the lowest addressed bit of the containing object must be. */ object_offset_in_bits = deepest_bitpos - type_size_in_bits; /* Round up to type_align by default. This works best for bitfields. */ object_offset_in_bits += type_align_in_bits - 1; object_offset_in_bits /= type_align_in_bits; object_offset_in_bits *= type_align_in_bits; if (object_offset_in_bits > bitpos_int) { /* Sigh, the decl must be packed. */ object_offset_in_bits = deepest_bitpos - type_size_in_bits; /* Round up to decl_align instead. */ object_offset_in_bits += decl_align_in_bits - 1; object_offset_in_bits /= decl_align_in_bits; object_offset_in_bits *= decl_align_in_bits; } return object_offset_in_bits / BITS_PER_UNIT; } /* The following routines define various Dwarf attributes and any data associated with them. */ /* Add a location description attribute value to a DIE. This emits location attributes suitable for whole variables and whole parameters. Note that the location attributes for struct fields are generated by the routine `data_member_location_attribute' below. */ static inline void add_AT_location_description (dw_die_ref die, enum dwarf_attribute attr_kind, dw_loc_descr_ref descr) { if (descr != 0) add_AT_loc (die, attr_kind, descr); } /* Attach the specialized form of location attribute used for data members of struct and union types. In the special case of a FIELD_DECL node which represents a bit-field, the "offset" part of this special location descriptor must indicate the distance in bytes from the lowest-addressed byte of the containing struct or union type to the lowest-addressed byte of the "containing object" for the bit-field. (See the `field_byte_offset' function above). For any given bit-field, the "containing object" is a hypothetical object (of some integral or enum type) within which the given bit-field lives. The type of this hypothetical "containing object" is always the same as the declared type of the individual bit-field itself (for GCC anyway... the DWARF spec doesn't actually mandate this). Note that it is the size (in bytes) of the hypothetical "containing object" which will be given in the DW_AT_byte_size attribute for this bit-field. (See the `byte_size_attribute' function below.) It is also used when calculating the value of the DW_AT_bit_offset attribute. (See the `bit_offset_attribute' function below.) */ static void add_data_member_location_attribute (dw_die_ref die, tree decl) { HOST_WIDE_INT offset; dw_loc_descr_ref loc_descr = 0; if (TREE_CODE (decl) == TREE_BINFO) { /* We're working on the TAG_inheritance for a base class. */ if (BINFO_VIRTUAL_P (decl) && is_cxx ()) { /* For C++ virtual bases we can't just use BINFO_OFFSET, as they aren't at a fixed offset from all (sub)objects of the same type. We need to extract the appropriate offset from our vtable. The following dwarf expression means BaseAddr = ObAddr + *((*ObAddr) - Offset) This is specific to the V3 ABI, of course. */ dw_loc_descr_ref tmp; /* Make a copy of the object address. */ tmp = new_loc_descr (DW_OP_dup, 0, 0); add_loc_descr (&loc_descr, tmp); /* Extract the vtable address. */ tmp = new_loc_descr (DW_OP_deref, 0, 0); add_loc_descr (&loc_descr, tmp); /* Calculate the address of the offset. */ offset = tree_low_cst (BINFO_VPTR_FIELD (decl), 0); if (offset >= 0) abort (); tmp = int_loc_descriptor (-offset); add_loc_descr (&loc_descr, tmp); tmp = new_loc_descr (DW_OP_minus, 0, 0); add_loc_descr (&loc_descr, tmp); /* Extract the offset. */ tmp = new_loc_descr (DW_OP_deref, 0, 0); add_loc_descr (&loc_descr, tmp); /* Add it to the object address. */ tmp = new_loc_descr (DW_OP_plus, 0, 0); add_loc_descr (&loc_descr, tmp); } else offset = tree_low_cst (BINFO_OFFSET (decl), 0); } else offset = field_byte_offset (decl); if (! loc_descr) { enum dwarf_location_atom op; /* The DWARF2 standard says that we should assume that the structure address is already on the stack, so we can specify a structure field address by using DW_OP_plus_uconst. */ #ifdef MIPS_DEBUGGING_INFO /* ??? The SGI dwarf reader does not handle the DW_OP_plus_uconst operator correctly. It works only if we leave the offset on the stack. */ op = DW_OP_constu; #else op = DW_OP_plus_uconst; #endif loc_descr = new_loc_descr (op, offset, 0); } add_AT_loc (die, DW_AT_data_member_location, loc_descr); } /* Writes integer values to dw_vec_const array. */ static void insert_int (HOST_WIDE_INT val, unsigned int size, unsigned char *dest) { while (size != 0) { *dest++ = val & 0xff; val >>= 8; --size; } } /* Reads integers from dw_vec_const array. Inverse of insert_int. */ static HOST_WIDE_INT extract_int (const unsigned char *src, unsigned int size) { HOST_WIDE_INT val = 0; src += size; while (size != 0) { val <<= 8; val |= *--src & 0xff; --size; } return val; } /* Writes floating point values to dw_vec_const array. */ static void insert_float (rtx rtl, unsigned char *array) { REAL_VALUE_TYPE rv; long val[4]; int i; REAL_VALUE_FROM_CONST_DOUBLE (rv, rtl); real_to_target (val, &rv, GET_MODE (rtl)); /* real_to_target puts 32-bit pieces in each long. Pack them. */ for (i = 0; i < GET_MODE_SIZE (GET_MODE (rtl)) / 4; i++) { insert_int (val[i], 4, array); array += 4; } } /* Attach a DW_AT_const_value attribute for a variable or a parameter which does not have a "location" either in memory or in a register. These things can arise in GNU C when a constant is passed as an actual parameter to an inlined function. They can also arise in C++ where declared constants do not necessarily get memory "homes". */ static void add_const_value_attribute (dw_die_ref die, rtx rtl) { switch (GET_CODE (rtl)) { case CONST_INT: { HOST_WIDE_INT val = INTVAL (rtl); if (val < 0) add_AT_int (die, DW_AT_const_value, val); else add_AT_unsigned (die, DW_AT_const_value, (unsigned HOST_WIDE_INT) val); } break; case CONST_DOUBLE: /* Note that a CONST_DOUBLE rtx could represent either an integer or a floating-point constant. A CONST_DOUBLE is used whenever the constant requires more than one word in order to be adequately represented. We output CONST_DOUBLEs as blocks. */ { enum machine_mode mode = GET_MODE (rtl); if (GET_MODE_CLASS (mode) == MODE_FLOAT) { unsigned int length = GET_MODE_SIZE (mode); unsigned char *array = ggc_alloc (length); insert_float (rtl, array); add_AT_vec (die, DW_AT_const_value, length / 4, 4, array); } else { /* ??? We really should be using HOST_WIDE_INT throughout. */ if (HOST_BITS_PER_LONG != HOST_BITS_PER_WIDE_INT) abort (); add_AT_long_long (die, DW_AT_const_value, CONST_DOUBLE_HIGH (rtl), CONST_DOUBLE_LOW (rtl)); } } break; case CONST_VECTOR: { enum machine_mode mode = GET_MODE (rtl); unsigned int elt_size = GET_MODE_UNIT_SIZE (mode); unsigned int length = CONST_VECTOR_NUNITS (rtl); unsigned char *array = ggc_alloc (length * elt_size); unsigned int i; unsigned char *p; if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT) { for (i = 0, p = array; i < length; i++, p += elt_size) { rtx elt = CONST_VECTOR_ELT (rtl, i); HOST_WIDE_INT lo, hi; if (GET_CODE (elt) == CONST_INT) { lo = INTVAL (elt); hi = -(lo < 0); } else if (GET_CODE (elt) == CONST_DOUBLE) { lo = CONST_DOUBLE_LOW (elt); hi = CONST_DOUBLE_HIGH (elt); } else abort (); if (elt_size <= sizeof (HOST_WIDE_INT)) insert_int (lo, elt_size, p); else if (elt_size == 2 * sizeof (HOST_WIDE_INT)) { unsigned char *p0 = p; unsigned char *p1 = p + sizeof (HOST_WIDE_INT); if (WORDS_BIG_ENDIAN) { p0 = p1; p1 = p; } insert_int (lo, sizeof (HOST_WIDE_INT), p0); insert_int (hi, sizeof (HOST_WIDE_INT), p1); } else abort (); } } else if (GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT) { for (i = 0, p = array; i < length; i++, p += elt_size) { rtx elt = CONST_VECTOR_ELT (rtl, i); insert_float (elt, p); } } else abort (); add_AT_vec (die, DW_AT_const_value, length, elt_size, array); } break; case CONST_STRING: add_AT_string (die, DW_AT_const_value, XSTR (rtl, 0)); break; case SYMBOL_REF: case LABEL_REF: case CONST: add_AT_addr (die, DW_AT_const_value, rtl); VARRAY_PUSH_RTX (used_rtx_varray, rtl); break; case PLUS: /* In cases where an inlined instance of an inline function is passed the address of an `auto' variable (which is local to the caller) we can get a situation where the DECL_RTL of the artificial local variable (for the inlining) which acts as a stand-in for the corresponding formal parameter (of the inline function) will look like (plus:SI (reg:SI FRAME_PTR) (const_int ...)). This is not exactly a compile-time constant expression, but it isn't the address of the (artificial) local variable either. Rather, it represents the *value* which the artificial local variable always has during its lifetime. We currently have no way to represent such quasi-constant values in Dwarf, so for now we just punt and generate nothing. */ break; default: /* No other kinds of rtx should be possible here. */ abort (); } } static rtx rtl_for_decl_location (tree decl) { rtx rtl; /* Here we have to decide where we are going to say the parameter "lives" (as far as the debugger is concerned). We only have a couple of choices. GCC provides us with DECL_RTL and with DECL_INCOMING_RTL. DECL_RTL normally indicates where the parameter lives during most of the activation of the function. If optimization is enabled however, this could be either NULL or else a pseudo-reg. Both of those cases indicate that the parameter doesn't really live anywhere (as far as the code generation parts of GCC are concerned) during most of the function's activation. That will happen (for example) if the parameter is never referenced within the function. We could just generate a location descriptor here for all non-NULL non-pseudo values of DECL_RTL and ignore all of the rest, but we can be a little nicer than that if we also consider DECL_INCOMING_RTL in cases where DECL_RTL is NULL or is a pseudo-reg. Note however that we can only get away with using DECL_INCOMING_RTL as a backup substitute for DECL_RTL in certain limited cases. In cases where DECL_ARG_TYPE (decl) indicates the same type as TREE_TYPE (decl), we can be sure that the parameter was passed using the same type as it is declared to have within the function, and that its DECL_INCOMING_RTL points us to a place where a value of that type is passed. In cases where DECL_ARG_TYPE (decl) and TREE_TYPE (decl) are different, we cannot (in general) use DECL_INCOMING_RTL as a substitute for DECL_RTL because in these cases DECL_INCOMING_RTL points us to a value of some type which is *different* from the type of the parameter itself. Thus, if we tried to use DECL_INCOMING_RTL to generate a location attribute in such cases, the debugger would end up (for example) trying to fetch a `float' from a place which actually contains the first part of a `double'. That would lead to really incorrect and confusing output at debug-time. So, in general, we *do not* use DECL_INCOMING_RTL as a backup for DECL_RTL in cases where DECL_ARG_TYPE (decl) != TREE_TYPE (decl). There are a couple of exceptions however. On little-endian machines we can get away with using DECL_INCOMING_RTL even when DECL_ARG_TYPE (decl) is not the same as TREE_TYPE (decl), but only when DECL_ARG_TYPE (decl) is an integral type that is smaller than TREE_TYPE (decl). These cases arise when (on a little-endian machine) a non-prototyped function has a parameter declared to be of type `short' or `char'. In such cases, TREE_TYPE (decl) will be `short' or `char', DECL_ARG_TYPE (decl) will be `int', and DECL_INCOMING_RTL will point to the lowest-order byte of the passed `int' value. If the debugger then uses that address to fetch a `short' or a `char' (on a little-endian machine) the result will be the correct data, so we allow for such exceptional cases below. Note that our goal here is to describe the place where the given formal parameter lives during most of the function's activation (i.e. between the end of the prologue and the start of the epilogue). We'll do that as best as we can. Note however that if the given formal parameter is modified sometime during the execution of the function, then a stack backtrace (at debug-time) will show the function as having been called with the *new* value rather than the value which was originally passed in. This happens rarely enough that it is not a major problem, but it *is* a problem, and I'd like to fix it. A future version of dwarf2out.c may generate two additional attributes for any given DW_TAG_formal_parameter DIE which will describe the "passed type" and the "passed location" for the given formal parameter in addition to the attributes we now generate to indicate the "declared type" and the "active location" for each parameter. This additional set of attributes could be used by debuggers for stack backtraces. Separately, note that sometimes DECL_RTL can be NULL and DECL_INCOMING_RTL can be NULL also. This happens (for example) for inlined-instances of inline function formal parameters which are never referenced. This really shouldn't be happening. All PARM_DECL nodes should get valid non-NULL DECL_INCOMING_RTL values. FIXME. */ /* Use DECL_RTL as the "location" unless we find something better. */ rtl = DECL_RTL_IF_SET (decl); /* When generating abstract instances, ignore everything except constants, symbols living in memory, and symbols living in fixed registers. */ if (! reload_completed) { if (rtl && (CONSTANT_P (rtl) || (MEM_P (rtl) && CONSTANT_P (XEXP (rtl, 0))) || (REG_P (rtl) && TREE_CODE (decl) == VAR_DECL && TREE_STATIC (decl)))) { rtl = targetm.delegitimize_address (rtl); return rtl; } rtl = NULL_RTX; } else if (TREE_CODE (decl) == PARM_DECL) { if (rtl == NULL_RTX || is_pseudo_reg (rtl)) { tree declared_type = type_main_variant (TREE_TYPE (decl)); tree passed_type = type_main_variant (DECL_ARG_TYPE (decl)); /* This decl represents a formal parameter which was optimized out. Note that DECL_INCOMING_RTL may be NULL in here, but we handle all cases where (rtl == NULL_RTX) just below. */ if (declared_type == passed_type) rtl = DECL_INCOMING_RTL (decl); else if (! BYTES_BIG_ENDIAN && TREE_CODE (declared_type) == INTEGER_TYPE && (GET_MODE_SIZE (TYPE_MODE (declared_type)) <= GET_MODE_SIZE (TYPE_MODE (passed_type)))) rtl = DECL_INCOMING_RTL (decl); } /* If the parm was passed in registers, but lives on the stack, then make a big endian correction if the mode of the type of the parameter is not the same as the mode of the rtl. */ /* ??? This is the same series of checks that are made in dbxout.c before we reach the big endian correction code there. It isn't clear if all of these checks are necessary here, but keeping them all is the safe thing to do. */ else if (MEM_P (rtl) && XEXP (rtl, 0) != const0_rtx && ! CONSTANT_P (XEXP (rtl, 0)) /* Not passed in memory. */ && !MEM_P (DECL_INCOMING_RTL (decl)) /* Not passed by invisible reference. */ && (!REG_P (XEXP (rtl, 0)) || REGNO (XEXP (rtl, 0)) == HARD_FRAME_POINTER_REGNUM || REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM #if ARG_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM || REGNO (XEXP (rtl, 0)) == ARG_POINTER_REGNUM #endif ) /* Big endian correction check. */ && BYTES_BIG_ENDIAN && TYPE_MODE (TREE_TYPE (decl)) != GET_MODE (rtl) && (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (decl))) < UNITS_PER_WORD)) { int offset = (UNITS_PER_WORD - GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (decl)))); rtl = gen_rtx_MEM (TYPE_MODE (TREE_TYPE (decl)), plus_constant (XEXP (rtl, 0), offset)); } } else if (TREE_CODE (decl) == VAR_DECL && rtl && MEM_P (rtl) && GET_MODE (rtl) != TYPE_MODE (TREE_TYPE (decl)) && BYTES_BIG_ENDIAN) { int rsize = GET_MODE_SIZE (GET_MODE (rtl)); int dsize = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (decl))); /* If a variable is declared "register" yet is smaller than a register, then if we store the variable to memory, it looks like we're storing a register-sized value, when in fact we are not. We need to adjust the offset of the storage location to reflect the actual value's bytes, else gdb will not be able to display it. */ if (rsize > dsize) rtl = gen_rtx_MEM (TYPE_MODE (TREE_TYPE (decl)), plus_constant (XEXP (rtl, 0), rsize-dsize)); } if (rtl != NULL_RTX) { rtl = eliminate_regs (rtl, 0, NULL_RTX); #ifdef LEAF_REG_REMAP if (current_function_uses_only_leaf_regs) leaf_renumber_regs_insn (rtl); #endif } /* A variable with no DECL_RTL but a DECL_INITIAL is a compile-time constant, and will have been substituted directly into all expressions that use it. C does not have such a concept, but C++ and other languages do. */ else if (TREE_CODE (decl) == VAR_DECL && DECL_INITIAL (decl)) { /* If a variable is initialized with a string constant without embedded zeros, build CONST_STRING. */ if (TREE_CODE (DECL_INITIAL (decl)) == STRING_CST && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE) { tree arrtype = TREE_TYPE (decl); tree enttype = TREE_TYPE (arrtype); tree domain = TYPE_DOMAIN (arrtype); tree init = DECL_INITIAL (decl); enum machine_mode mode = TYPE_MODE (enttype); if (GET_MODE_CLASS (mode) == MODE_INT && GET_MODE_SIZE (mode) == 1 && domain && integer_zerop (TYPE_MIN_VALUE (domain)) && compare_tree_int (TYPE_MAX_VALUE (domain), TREE_STRING_LENGTH (init) - 1) == 0 && ((size_t) TREE_STRING_LENGTH (init) == strlen (TREE_STRING_POINTER (init)) + 1)) rtl = gen_rtx_CONST_STRING (VOIDmode, TREE_STRING_POINTER (init)); } /* If the initializer is something that we know will expand into an immediate RTL constant, expand it now. Expanding anything else tends to produce unresolved symbols; see debug/5770 and c++/6381. */ else if (TREE_CODE (DECL_INITIAL (decl)) == INTEGER_CST || TREE_CODE (DECL_INITIAL (decl)) == REAL_CST) { rtl = expand_expr (DECL_INITIAL (decl), NULL_RTX, VOIDmode, EXPAND_INITIALIZER); /* If expand_expr returns a MEM, it wasn't immediate. */ if (rtl && MEM_P (rtl)) abort (); } } if (rtl) rtl = targetm.delegitimize_address (rtl); /* If we don't look past the constant pool, we risk emitting a reference to a constant pool entry that isn't referenced from code, and thus is not emitted. */ if (rtl) rtl = avoid_constant_pool_reference (rtl); return rtl; } /* Generate *either* a DW_AT_location attribute or else a DW_AT_const_value data attribute for a variable or a parameter. We generate the DW_AT_const_value attribute only in those cases where the given variable or parameter does not have a true "location" either in memory or in a register. This can happen (for example) when a constant is passed as an actual argument in a call to an inline function. (It's possible that these things can crop up in other ways also.) Note that one type of constant value which can be passed into an inlined function is a constant pointer. This can happen for example if an actual argument in an inlined function call evaluates to a compile-time constant address. */ static void add_location_or_const_value_attribute (dw_die_ref die, tree decl, enum dwarf_attribute attr) { rtx rtl; dw_loc_descr_ref descr; var_loc_list *loc_list; if (TREE_CODE (decl) == ERROR_MARK) return; else if (TREE_CODE (decl) != VAR_DECL && TREE_CODE (decl) != PARM_DECL && TREE_CODE (decl) != RESULT_DECL) abort (); /* See if we possibly have multiple locations for this variable. */ loc_list = lookup_decl_loc (decl); /* If it truly has multiple locations, the first and last node will differ. */ if (loc_list && loc_list->first != loc_list->last) { const char *secname; const char *endname; dw_loc_list_ref list; rtx varloc; struct var_loc_node *node; /* We need to figure out what section we should use as the base for the address ranges where a given location is valid. 1. If this particular DECL has a section associated with it, use that. 2. If this function has a section associated with it, use that. 3. Otherwise, use the text section. XXX: If you split a variable across multiple sections, this won't notice. */ if (DECL_SECTION_NAME (decl)) { tree sectree = DECL_SECTION_NAME (decl); secname = TREE_STRING_POINTER (sectree); } else if (current_function_decl && DECL_SECTION_NAME (current_function_decl)) { tree sectree = DECL_SECTION_NAME (current_function_decl); secname = TREE_STRING_POINTER (sectree); } else secname = text_section_label; /* Now that we know what section we are using for a base, actually construct the list of locations. The first location information is what is passed to the function that creates the location list, and the remaining locations just get added on to that list. Note that we only know the start address for a location (IE location changes), so to build the range, we use the range [current location start, next location start]. This means we have to special case the last node, and generate a range of [last location start, end of function label]. */ node = loc_list->first; varloc = NOTE_VAR_LOCATION (node->var_loc_note); list = new_loc_list (loc_descriptor (varloc, attr != DW_AT_frame_base), node->label, node->next->label, secname, 1); node = node->next; for (; node->next; node = node->next) if (NOTE_VAR_LOCATION_LOC (node->var_loc_note) != NULL_RTX) { /* The variable has a location between NODE->LABEL and NODE->NEXT->LABEL. */ varloc = NOTE_VAR_LOCATION (node->var_loc_note); add_loc_descr_to_loc_list (&list, loc_descriptor (varloc, attr != DW_AT_frame_base), node->label, node->next->label, secname); } /* If the variable has a location at the last label it keeps its location until the end of function. */ if (NOTE_VAR_LOCATION_LOC (node->var_loc_note) != NULL_RTX) { char label_id[MAX_ARTIFICIAL_LABEL_BYTES]; varloc = NOTE_VAR_LOCATION (node->var_loc_note); if (!current_function_decl) endname = text_end_label; else { ASM_GENERATE_INTERNAL_LABEL (label_id, FUNC_END_LABEL, current_function_funcdef_no); endname = ggc_strdup (label_id); } add_loc_descr_to_loc_list (&list, loc_descriptor (varloc, attr != DW_AT_frame_base), node->label, endname, secname); } /* Finally, add the location list to the DIE, and we are done. */ add_AT_loc_list (die, attr, list); return; } rtl = rtl_for_decl_location (decl); if (rtl == NULL_RTX) return; switch (GET_CODE (rtl)) { case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case CONST_STRING: case SYMBOL_REF: case LABEL_REF: case CONST: case PLUS: /* DECL_RTL could be (plus (reg ...) (const_int ...)) */ add_const_value_attribute (die, rtl); break; case MEM: if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL (decl)) { /* Need loc_descriptor_from_tree since that's where we know how to handle TLS variables. Want the object's address since the top-level DW_AT_location assumes such. See the confusion in loc_descriptor for reference. */ descr = loc_descriptor_from_tree (decl, 1); } else { case REG: case SUBREG: case CONCAT: descr = loc_descriptor (rtl, true); } add_AT_location_description (die, attr, descr); break; case PARALLEL: { rtvec par_elems = XVEC (rtl, 0); int num_elem = GET_NUM_ELEM (par_elems); enum machine_mode mode; int i; /* Create the first one, so we have something to add to. */ descr = loc_descriptor (XEXP (RTVEC_ELT (par_elems, 0), 0), true); mode = GET_MODE (XEXP (RTVEC_ELT (par_elems, 0), 0)); add_loc_descr (&descr, new_loc_descr (DW_OP_piece, GET_MODE_SIZE (mode), 0)); for (i = 1; i < num_elem; i++) { dw_loc_descr_ref temp; temp = loc_descriptor (XEXP (RTVEC_ELT (par_elems, i), 0), true); add_loc_descr (&descr, temp); mode = GET_MODE (XEXP (RTVEC_ELT (par_elems, i), 0)); add_loc_descr (&descr, new_loc_descr (DW_OP_piece, GET_MODE_SIZE (mode), 0)); } } add_AT_location_description (die, DW_AT_location, descr); break; default: abort (); } } /* If we don't have a copy of this variable in memory for some reason (such as a C++ member constant that doesn't have an out-of-line definition), we should tell the debugger about the constant value. */ static void tree_add_const_value_attribute (dw_die_ref var_die, tree decl) { tree init = DECL_INITIAL (decl); tree type = TREE_TYPE (decl); if (TREE_READONLY (decl) && ! TREE_THIS_VOLATILE (decl) && init && initializer_constant_valid_p (init, type) == null_pointer_node) /* OK */; else return; switch (TREE_CODE (type)) { case INTEGER_TYPE: if (host_integerp (init, 0)) add_AT_unsigned (var_die, DW_AT_const_value, tree_low_cst (init, 0)); else add_AT_long_long (var_die, DW_AT_const_value, TREE_INT_CST_HIGH (init), TREE_INT_CST_LOW (init)); break; default:; } } /* Generate a DW_AT_name attribute given some string value to be included as the value of the attribute. */ static void add_name_attribute (dw_die_ref die, const char *name_string) { if (name_string != NULL && *name_string != 0) { if (demangle_name_func) name_string = (*demangle_name_func) (name_string); add_AT_string (die, DW_AT_name, name_string); } } /* Generate a DW_AT_comp_dir attribute for DIE. */ static void add_comp_dir_attribute (dw_die_ref die) { const char *wd = get_src_pwd (); if (wd != NULL) add_AT_string (die, DW_AT_comp_dir, wd); } /* Given a tree node describing an array bound (either lower or upper) output a representation for that bound. */ static void add_bound_info (dw_die_ref subrange_die, enum dwarf_attribute bound_attr, tree bound) { switch (TREE_CODE (bound)) { case ERROR_MARK: return; /* All fixed-bounds are represented by INTEGER_CST nodes. */ case INTEGER_CST: if (! host_integerp (bound, 0) || (bound_attr == DW_AT_lower_bound && (((is_c_family () || is_java ()) && integer_zerop (bound)) || (is_fortran () && integer_onep (bound))))) /* Use the default. */ ; else add_AT_unsigned (subrange_die, bound_attr, tree_low_cst (bound, 0)); break; case CONVERT_EXPR: case NOP_EXPR: case NON_LVALUE_EXPR: case VIEW_CONVERT_EXPR: add_bound_info (subrange_die, bound_attr, TREE_OPERAND (bound, 0)); break; case SAVE_EXPR: break; case VAR_DECL: case PARM_DECL: case RESULT_DECL: { dw_die_ref decl_die = lookup_decl_die (bound); /* ??? Can this happen, or should the variable have been bound first? Probably it can, since I imagine that we try to create the types of parameters in the order in which they exist in the list, and won't have created a forward reference to a later parameter. */ if (decl_die != NULL) add_AT_die_ref (subrange_die, bound_attr, decl_die); break; } default: { /* Otherwise try to create a stack operation procedure to evaluate the value of the array bound. */ dw_die_ref ctx, decl_die; dw_loc_descr_ref loc; loc = loc_descriptor_from_tree (bound, 0); if (loc == NULL) break; if (current_function_decl == 0) ctx = comp_unit_die; else ctx = lookup_decl_die (current_function_decl); decl_die = new_die (DW_TAG_variable, ctx, bound); add_AT_flag (decl_die, DW_AT_artificial, 1); add_type_attribute (decl_die, TREE_TYPE (bound), 1, 0, ctx); add_AT_loc (decl_die, DW_AT_location, loc); add_AT_die_ref (subrange_die, bound_attr, decl_die); break; } } } /* Note that the block of subscript information for an array type also includes information about the element type of type given array type. */ static void add_subscript_info (dw_die_ref type_die, tree type) { #ifndef MIPS_DEBUGGING_INFO unsigned dimension_number; #endif tree lower, upper; dw_die_ref subrange_die; /* The GNU compilers represent multidimensional array types as sequences of one dimensional array types whose element types are themselves array types. Here we squish that down, so that each multidimensional array type gets only one array_type DIE in the Dwarf debugging info. The draft Dwarf specification say that we are allowed to do this kind of compression in C (because there is no difference between an array or arrays and a multidimensional array in C) but for other source languages (e.g. Ada) we probably shouldn't do this. */ /* ??? The SGI dwarf reader fails for multidimensional arrays with a const enum type. E.g. const enum machine_mode insn_operand_mode[2][10]. We work around this by disabling this feature. See also gen_array_type_die. */ #ifndef MIPS_DEBUGGING_INFO for (dimension_number = 0; TREE_CODE (type) == ARRAY_TYPE; type = TREE_TYPE (type), dimension_number++) #endif { tree domain = TYPE_DOMAIN (type); /* Arrays come in three flavors: Unspecified bounds, fixed bounds, and (in GNU C only) variable bounds. Handle all three forms here. */ subrange_die = new_die (DW_TAG_subrange_type, type_die, NULL); if (domain) { /* We have an array type with specified bounds. */ lower = TYPE_MIN_VALUE (domain); upper = TYPE_MAX_VALUE (domain); /* Define the index type. */ if (TREE_TYPE (domain)) { /* ??? This is probably an Ada unnamed subrange type. Ignore the TREE_TYPE field. We can't emit debug info for this because it is an unnamed integral type. */ if (TREE_CODE (domain) == INTEGER_TYPE && TYPE_NAME (domain) == NULL_TREE && TREE_CODE (TREE_TYPE (domain)) == INTEGER_TYPE && TYPE_NAME (TREE_TYPE (domain)) == NULL_TREE) ; else add_type_attribute (subrange_die, TREE_TYPE (domain), 0, 0, type_die); } /* ??? If upper is NULL, the array has unspecified length, but it does have a lower bound. This happens with Fortran dimension arr(N:*) Since the debugger is definitely going to need to know N to produce useful results, go ahead and output the lower bound solo, and hope the debugger can cope. */ add_bound_info (subrange_die, DW_AT_lower_bound, lower); if (upper) add_bound_info (subrange_die, DW_AT_upper_bound, upper); } /* Otherwise we have an array type with an unspecified length. The DWARF-2 spec does not say how to handle this; let's just leave out the bounds. */ } } static void add_byte_size_attribute (dw_die_ref die, tree tree_node) { unsigned size; switch (TREE_CODE (tree_node)) { case ERROR_MARK: size = 0; break; case ENUMERAL_TYPE: case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: size = int_size_in_bytes (tree_node); break; case FIELD_DECL: /* For a data member of a struct or union, the DW_AT_byte_size is generally given as the number of bytes normally allocated for an object of the *declared* type of the member itself. This is true even for bit-fields. */ size = simple_type_size_in_bits (field_type (tree_node)) / BITS_PER_UNIT; break; default: abort (); } /* Note that `size' might be -1 when we get to this point. If it is, that indicates that the byte size of the entity in question is variable. We have no good way of expressing this fact in Dwarf at the present time, so just let the -1 pass on through. */ add_AT_unsigned (die, DW_AT_byte_size, size); } /* For a FIELD_DECL node which represents a bit-field, output an attribute which specifies the distance in bits from the highest order bit of the "containing object" for the bit-field to the highest order bit of the bit-field itself. For any given bit-field, the "containing object" is a hypothetical object (of some integral or enum type) within which the given bit-field lives. The type of this hypothetical "containing object" is always the same as the declared type of the individual bit-field itself. The determination of the exact location of the "containing object" for a bit-field is rather complicated. It's handled by the `field_byte_offset' function (above). Note that it is the size (in bytes) of the hypothetical "containing object" which will be given in the DW_AT_byte_size attribute for this bit-field. (See `byte_size_attribute' above). */ static inline void add_bit_offset_attribute (dw_die_ref die, tree decl) { HOST_WIDE_INT object_offset_in_bytes = field_byte_offset (decl); tree type = DECL_BIT_FIELD_TYPE (decl); HOST_WIDE_INT bitpos_int; HOST_WIDE_INT highest_order_object_bit_offset; HOST_WIDE_INT highest_order_field_bit_offset; HOST_WIDE_INT unsigned bit_offset; /* Must be a field and a bit field. */ if (!type || TREE_CODE (decl) != FIELD_DECL) abort (); /* We can't yet handle bit-fields whose offsets are variable, so if we encounter such things, just return without generating any attribute whatsoever. Likewise for variable or too large size. */ if (! host_integerp (bit_position (decl), 0) || ! host_integerp (DECL_SIZE (decl), 1)) return; bitpos_int = int_bit_position (decl); /* Note that the bit offset is always the distance (in bits) from the highest-order bit of the "containing object" to the highest-order bit of the bit-field itself. Since the "high-order end" of any object or field is different on big-endian and little-endian machines, the computation below must take account of these differences. */ highest_order_object_bit_offset = object_offset_in_bytes * BITS_PER_UNIT; highest_order_field_bit_offset = bitpos_int; if (! BYTES_BIG_ENDIAN) { highest_order_field_bit_offset += tree_low_cst (DECL_SIZE (decl), 0); highest_order_object_bit_offset += simple_type_size_in_bits (type); } bit_offset = (! BYTES_BIG_ENDIAN ? highest_order_object_bit_offset - highest_order_field_bit_offset : highest_order_field_bit_offset - highest_order_object_bit_offset); add_AT_unsigned (die, DW_AT_bit_offset, bit_offset); } /* For a FIELD_DECL node which represents a bit field, output an attribute which specifies the length in bits of the given field. */ static inline void add_bit_size_attribute (dw_die_ref die, tree decl) { /* Must be a field and a bit field. */ if (TREE_CODE (decl) != FIELD_DECL || ! DECL_BIT_FIELD_TYPE (decl)) abort (); if (host_integerp (DECL_SIZE (decl), 1)) add_AT_unsigned (die, DW_AT_bit_size, tree_low_cst (DECL_SIZE (decl), 1)); } /* If the compiled language is ANSI C, then add a 'prototyped' attribute, if arg types are given for the parameters of a function. */ static inline void add_prototyped_attribute (dw_die_ref die, tree func_type) { if (get_AT_unsigned (comp_unit_die, DW_AT_language) == DW_LANG_C89 && TYPE_ARG_TYPES (func_type) != NULL) add_AT_flag (die, DW_AT_prototyped, 1); } /* Add an 'abstract_origin' attribute below a given DIE. The DIE is found by looking in either the type declaration or object declaration equate table. */ static inline void add_abstract_origin_attribute (dw_die_ref die, tree origin) { dw_die_ref origin_die = NULL; if (TREE_CODE (origin) != FUNCTION_DECL) { /* We may have gotten separated from the block for the inlined function, if we're in an exception handler or some such; make sure that the abstract function has been written out. Doing this for nested functions is wrong, however; functions are distinct units, and our context might not even be inline. */ tree fn = origin; if (TYPE_P (fn)) fn = TYPE_STUB_DECL (fn); fn = decl_function_context (fn); if (fn) dwarf2out_abstract_function (fn); } if (DECL_P (origin)) origin_die = lookup_decl_die (origin); else if (TYPE_P (origin)) origin_die = lookup_type_die (origin); if (origin_die == NULL) abort (); add_AT_die_ref (die, DW_AT_abstract_origin, origin_die); } /* We do not currently support the pure_virtual attribute. */ static inline void add_pure_or_virtual_attribute (dw_die_ref die, tree func_decl) { if (DECL_VINDEX (func_decl)) { add_AT_unsigned (die, DW_AT_virtuality, DW_VIRTUALITY_virtual); if (host_integerp (DECL_VINDEX (func_decl), 0)) add_AT_loc (die, DW_AT_vtable_elem_location, new_loc_descr (DW_OP_constu, tree_low_cst (DECL_VINDEX (func_decl), 0), 0)); /* GNU extension: Record what type this method came from originally. */ if (debug_info_level > DINFO_LEVEL_TERSE) add_AT_die_ref (die, DW_AT_containing_type, lookup_type_die (DECL_CONTEXT (func_decl))); } } /* Add source coordinate attributes for the given decl. */ static void add_src_coords_attributes (dw_die_ref die, tree decl) { expanded_location s = expand_location (DECL_SOURCE_LOCATION (decl)); unsigned file_index = lookup_filename (s.file); add_AT_unsigned (die, DW_AT_decl_file, file_index); add_AT_unsigned (die, DW_AT_decl_line, s.line); } /* Add a DW_AT_name attribute and source coordinate attribute for the given decl, but only if it actually has a name. */ static void add_name_and_src_coords_attributes (dw_die_ref die, tree decl) { tree decl_name; decl_name = DECL_NAME (decl); if (decl_name != NULL && IDENTIFIER_POINTER (decl_name) != NULL) { add_name_attribute (die, dwarf2_name (decl, 0)); if (! DECL_ARTIFICIAL (decl)) add_src_coords_attributes (die, decl); if ((TREE_CODE (decl) == FUNCTION_DECL || TREE_CODE (decl) == VAR_DECL) && TREE_PUBLIC (decl) && DECL_ASSEMBLER_NAME (decl) != DECL_NAME (decl) && !DECL_ABSTRACT (decl)) add_AT_string (die, DW_AT_MIPS_linkage_name, IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))); } #ifdef VMS_DEBUGGING_INFO /* Get the function's name, as described by its RTL. This may be different from the DECL_NAME name used in the source file. */ if (TREE_CODE (decl) == FUNCTION_DECL && TREE_ASM_WRITTEN (decl)) { add_AT_addr (die, DW_AT_VMS_rtnbeg_pd_address, XEXP (DECL_RTL (decl), 0)); VARRAY_PUSH_RTX (used_rtx_varray, XEXP (DECL_RTL (decl), 0)); } #endif } /* Push a new declaration scope. */ static void push_decl_scope (tree scope) { VARRAY_PUSH_TREE (decl_scope_table, scope); } /* Pop a declaration scope. */ static inline void pop_decl_scope (void) { if (VARRAY_ACTIVE_SIZE (decl_scope_table) <= 0) abort (); VARRAY_POP (decl_scope_table); } /* Return the DIE for the scope that immediately contains this type. Non-named types get global scope. Named types nested in other types get their containing scope if it's open, or global scope otherwise. All other types (i.e. function-local named types) get the current active scope. */ static dw_die_ref scope_die_for (tree t, dw_die_ref context_die) { dw_die_ref scope_die = NULL; tree containing_scope; int i; /* Non-types always go in the current scope. */ if (! TYPE_P (t)) abort (); containing_scope = TYPE_CONTEXT (t); /* Use the containing namespace if it was passed in (for a declaration). */ if (containing_scope && TREE_CODE (containing_scope) == NAMESPACE_DECL) { if (context_die == lookup_decl_die (containing_scope)) /* OK */; else containing_scope = NULL_TREE; } /* Ignore function type "scopes" from the C frontend. They mean that a tagged type is local to a parmlist of a function declarator, but that isn't useful to DWARF. */ if (containing_scope && TREE_CODE (containing_scope) == FUNCTION_TYPE) containing_scope = NULL_TREE; if (containing_scope == NULL_TREE) scope_die = comp_unit_die; else if (TYPE_P (containing_scope)) { /* For types, we can just look up the appropriate DIE. But first we check to see if we're in the middle of emitting it so we know where the new DIE should go. */ for (i = VARRAY_ACTIVE_SIZE (decl_scope_table) - 1; i >= 0; --i) if (VARRAY_TREE (decl_scope_table, i) == containing_scope) break; if (i < 0) { if (debug_info_level > DINFO_LEVEL_TERSE && !TREE_ASM_WRITTEN (containing_scope)) abort (); /* If none of the current dies are suitable, we get file scope. */ scope_die = comp_unit_die; } else scope_die = lookup_type_die (containing_scope); } else scope_die = context_die; return scope_die; } /* Returns nonzero if CONTEXT_DIE is internal to a function. */ static inline int local_scope_p (dw_die_ref context_die) { for (; context_die; context_die = context_die->die_parent) if (context_die->die_tag == DW_TAG_inlined_subroutine || context_die->die_tag == DW_TAG_subprogram) return 1; return 0; } /* Returns nonzero if CONTEXT_DIE is a class or namespace, for deciding whether or not to treat a DIE in this context as a declaration. */ static inline int class_or_namespace_scope_p (dw_die_ref context_die) { return (context_die && (context_die->die_tag == DW_TAG_structure_type || context_die->die_tag == DW_TAG_union_type || context_die->die_tag == DW_TAG_namespace)); } /* Many forms of DIEs require a "type description" attribute. This routine locates the proper "type descriptor" die for the type given by 'type', and adds a DW_AT_type attribute below the given die. */ static void add_type_attribute (dw_die_ref object_die, tree type, int decl_const, int decl_volatile, dw_die_ref context_die) { enum tree_code code = TREE_CODE (type); dw_die_ref type_die = NULL; /* ??? If this type is an unnamed subrange type of an integral or floating-point type, use the inner type. This is because we have no support for unnamed types in base_type_die. This can happen if this is an Ada subrange type. Correct solution is emit a subrange type die. */ if ((code == INTEGER_TYPE || code == REAL_TYPE) && TREE_TYPE (type) != 0 && TYPE_NAME (type) == 0) type = TREE_TYPE (type), code = TREE_CODE (type); if (code == ERROR_MARK /* Handle a special case. For functions whose return type is void, we generate *no* type attribute. (Note that no object may have type `void', so this only applies to function return types). */ || code == VOID_TYPE) return; type_die = modified_type_die (type, decl_const || TYPE_READONLY (type), decl_volatile || TYPE_VOLATILE (type), context_die); if (type_die != NULL) add_AT_die_ref (object_die, DW_AT_type, type_die); } /* Given a tree pointer to a struct, class, union, or enum type node, return a pointer to the (string) tag name for the given type, or zero if the type was declared without a tag. */ static const char * type_tag (tree type) { const char *name = 0; if (TYPE_NAME (type) != 0) { tree t = 0; /* Find the IDENTIFIER_NODE for the type name. */ if (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE) t = TYPE_NAME (type); /* The g++ front end makes the TYPE_NAME of *each* tagged type point to a TYPE_DECL node, regardless of whether or not a `typedef' was involved. */ else if (TREE_CODE (TYPE_NAME (type)) == TYPE_DECL && ! DECL_IGNORED_P (TYPE_NAME (type))) t = DECL_NAME (TYPE_NAME (type)); /* Now get the name as a string, or invent one. */ if (t != 0) name = IDENTIFIER_POINTER (t); } return (name == 0 || *name == '\0') ? 0 : name; } /* Return the type associated with a data member, make a special check for bit field types. */ static inline tree member_declared_type (tree member) { return (DECL_BIT_FIELD_TYPE (member) ? DECL_BIT_FIELD_TYPE (member) : TREE_TYPE (member)); } /* Get the decl's label, as described by its RTL. This may be different from the DECL_NAME name used in the source file. */ #if 0 static const char * decl_start_label (tree decl) { rtx x; const char *fnname; x = DECL_RTL (decl); if (!MEM_P (x)) abort (); x = XEXP (x, 0); if (GET_CODE (x) != SYMBOL_REF) abort (); fnname = XSTR (x, 0); return fnname; } #endif /* These routines generate the internal representation of the DIE's for the compilation unit. Debugging information is collected by walking the declaration trees passed in from dwarf2out_decl(). */ static void gen_array_type_die (tree type, dw_die_ref context_die) { dw_die_ref scope_die = scope_die_for (type, context_die); dw_die_ref array_die; tree element_type; /* ??? The SGI dwarf reader fails for array of array of enum types unless the inner array type comes before the outer array type. Thus we must call gen_type_die before we call new_die. See below also. */ #ifdef MIPS_DEBUGGING_INFO gen_type_die (TREE_TYPE (type), context_die); #endif array_die = new_die (DW_TAG_array_type, scope_die, type); add_name_attribute (array_die, type_tag (type)); equate_type_number_to_die (type, array_die); if (TREE_CODE (type) == VECTOR_TYPE) { /* The frontend feeds us a representation for the vector as a struct containing an array. Pull out the array type. */ type = TREE_TYPE (TYPE_FIELDS (TYPE_DEBUG_REPRESENTATION_TYPE (type))); add_AT_flag (array_die, DW_AT_GNU_vector, 1); } #if 0 /* We default the array ordering. SDB will probably do the right things even if DW_AT_ordering is not present. It's not even an issue until we start to get into multidimensional arrays anyway. If SDB is ever caught doing the Wrong Thing for multi-dimensional arrays, then we'll have to put the DW_AT_ordering attribute back in. (But if and when we find out that we need to put these in, we will only do so for multidimensional arrays. */ add_AT_unsigned (array_die, DW_AT_ordering, DW_ORD_row_major); #endif #ifdef MIPS_DEBUGGING_INFO /* The SGI compilers handle arrays of unknown bound by setting AT_declaration and not emitting any subrange DIEs. */ if (! TYPE_DOMAIN (type)) add_AT_flag (array_die, DW_AT_declaration, 1); else #endif add_subscript_info (array_die, type); /* Add representation of the type of the elements of this array type. */ element_type = TREE_TYPE (type); /* ??? The SGI dwarf reader fails for multidimensional arrays with a const enum type. E.g. const enum machine_mode insn_operand_mode[2][10]. We work around this by disabling this feature. See also add_subscript_info. */ #ifndef MIPS_DEBUGGING_INFO while (TREE_CODE (element_type) == ARRAY_TYPE) element_type = TREE_TYPE (element_type); gen_type_die (element_type, context_die); #endif add_type_attribute (array_die, element_type, 0, 0, context_die); } static void gen_set_type_die (tree type, dw_die_ref context_die) { dw_die_ref type_die = new_die (DW_TAG_set_type, scope_die_for (type, context_die), type); equate_type_number_to_die (type, type_die); add_type_attribute (type_die, TREE_TYPE (type), 0, 0, context_die); } #if 0 static void gen_entry_point_die (tree decl, dw_die_ref context_die) { tree origin = decl_ultimate_origin (decl); dw_die_ref decl_die = new_die (DW_TAG_entry_point, context_die, decl); if (origin != NULL) add_abstract_origin_attribute (decl_die, origin); else { add_name_and_src_coords_attributes (decl_die, decl); add_type_attribute (decl_die, TREE_TYPE (TREE_TYPE (decl)), 0, 0, context_die); } if (DECL_ABSTRACT (decl)) equate_decl_number_to_die (decl, decl_die); else add_AT_lbl_id (decl_die, DW_AT_low_pc, decl_start_label (decl)); } #endif /* Walk through the list of incomplete types again, trying once more to emit full debugging info for them. */ static void retry_incomplete_types (void) { int i; for (i = VARRAY_ACTIVE_SIZE (incomplete_types) - 1; i >= 0; i--) gen_type_die (VARRAY_TREE (incomplete_types, i), comp_unit_die); } /* Generate a DIE to represent an inlined instance of an enumeration type. */ static void gen_inlined_enumeration_type_die (tree type, dw_die_ref context_die) { dw_die_ref type_die = new_die (DW_TAG_enumeration_type, context_die, type); /* We do not check for TREE_ASM_WRITTEN (type) being set, as the type may be incomplete and such types are not marked. */ add_abstract_origin_attribute (type_die, type); } /* Generate a DIE to represent an inlined instance of a structure type. */ static void gen_inlined_structure_type_die (tree type, dw_die_ref context_die) { dw_die_ref type_die = new_die (DW_TAG_structure_type, context_die, type); /* We do not check for TREE_ASM_WRITTEN (type) being set, as the type may be incomplete and such types are not marked. */ add_abstract_origin_attribute (type_die, type); } /* Generate a DIE to represent an inlined instance of a union type. */ static void gen_inlined_union_type_die (tree type, dw_die_ref context_die) { dw_die_ref type_die = new_die (DW_TAG_union_type, context_die, type); /* We do not check for TREE_ASM_WRITTEN (type) being set, as the type may be incomplete and such types are not marked. */ add_abstract_origin_attribute (type_die, type); } /* Generate a DIE to represent an enumeration type. Note that these DIEs include all of the information about the enumeration values also. Each enumerated type name/value is listed as a child of the enumerated type DIE. */ static dw_die_ref gen_enumeration_type_die (tree type, dw_die_ref context_die) { dw_die_ref type_die = lookup_type_die (type); if (type_die == NULL) { type_die = new_die (DW_TAG_enumeration_type, scope_die_for (type, context_die), type); equate_type_number_to_die (type, type_die); add_name_attribute (type_die, type_tag (type)); } else if (! TYPE_SIZE (type)) return type_die; else remove_AT (type_die, DW_AT_declaration); /* Handle a GNU C/C++ extension, i.e. incomplete enum types. If the given enum type is incomplete, do not generate the DW_AT_byte_size attribute or the DW_AT_element_list attribute. */ if (TYPE_SIZE (type)) { tree link; TREE_ASM_WRITTEN (type) = 1; add_byte_size_attribute (type_die, type); if (TYPE_STUB_DECL (type) != NULL_TREE) add_src_coords_attributes (type_die, TYPE_STUB_DECL (type)); /* If the first reference to this type was as the return type of an inline function, then it may not have a parent. Fix this now. */ if (type_die->die_parent == NULL) add_child_die (scope_die_for (type, context_die), type_die); for (link = TYPE_VALUES (type); link != NULL; link = TREE_CHAIN (link)) { dw_die_ref enum_die = new_die (DW_TAG_enumerator, type_die, link); tree value = TREE_VALUE (link); add_name_attribute (enum_die, IDENTIFIER_POINTER (TREE_PURPOSE (link))); if (host_integerp (value, TYPE_UNSIGNED (TREE_TYPE (value)))) /* DWARF2 does not provide a way of indicating whether or not enumeration constants are signed or unsigned. GDB always assumes the values are signed, so we output all values as if they were signed. That means that enumeration constants with very large unsigned values will appear to have negative values in the debugger. */ add_AT_int (enum_die, DW_AT_const_value, tree_low_cst (value, tree_int_cst_sgn (value) > 0)); } } else add_AT_flag (type_die, DW_AT_declaration, 1); return type_die; } /* Generate a DIE to represent either a real live formal parameter decl or to represent just the type of some formal parameter position in some function type. Note that this routine is a bit unusual because its argument may be a ..._DECL node (i.e. either a PARM_DECL or perhaps a VAR_DECL which represents an inlining of some PARM_DECL) or else some sort of a ..._TYPE node. If it's the former then this function is being called to output a DIE to represent a formal parameter object (or some inlining thereof). If it's the latter, then this function is only being called to output a DW_TAG_formal_parameter DIE to stand as a placeholder for some formal argument type of some subprogram type. */ static dw_die_ref gen_formal_parameter_die (tree node, dw_die_ref context_die) { dw_die_ref parm_die = new_die (DW_TAG_formal_parameter, context_die, node); tree origin; switch (TREE_CODE_CLASS (TREE_CODE (node))) { case 'd': origin = decl_ultimate_origin (node); if (origin != NULL) add_abstract_origin_attribute (parm_die, origin); else { add_name_and_src_coords_attributes (parm_die, node); add_type_attribute (parm_die, TREE_TYPE (node), TREE_READONLY (node), TREE_THIS_VOLATILE (node), context_die); if (DECL_ARTIFICIAL (node)) add_AT_flag (parm_die, DW_AT_artificial, 1); } equate_decl_number_to_die (node, parm_die); if (! DECL_ABSTRACT (node)) add_location_or_const_value_attribute (parm_die, node, DW_AT_location); break; case 't': /* We were called with some kind of a ..._TYPE node. */ add_type_attribute (parm_die, node, 0, 0, context_die); break; default: abort (); } return parm_die; } /* Generate a special type of DIE used as a stand-in for a trailing ellipsis at the end of an (ANSI prototyped) formal parameters list. */ static void gen_unspecified_parameters_die (tree decl_or_type, dw_die_ref context_die) { new_die (DW_TAG_unspecified_parameters, context_die, decl_or_type); } /* Generate a list of nameless DW_TAG_formal_parameter DIEs (and perhaps a DW_TAG_unspecified_parameters DIE) to represent the types of the formal parameters as specified in some function type specification (except for those which appear as part of a function *definition*). */ static void gen_formal_types_die (tree function_or_method_type, dw_die_ref context_die) { tree link; tree formal_type = NULL; tree first_parm_type; tree arg; if (TREE_CODE (function_or_method_type) == FUNCTION_DECL) { arg = DECL_ARGUMENTS (function_or_method_type); function_or_method_type = TREE_TYPE (function_or_method_type); } else arg = NULL_TREE; first_parm_type = TYPE_ARG_TYPES (function_or_method_type); /* Make our first pass over the list of formal parameter types and output a DW_TAG_formal_parameter DIE for each one. */ for (link = first_parm_type; link; ) { dw_die_ref parm_die; formal_type = TREE_VALUE (link); if (formal_type == void_type_node) break; /* Output a (nameless) DIE to represent the formal parameter itself. */ parm_die = gen_formal_parameter_die (formal_type, context_die); if ((TREE_CODE (function_or_method_type) == METHOD_TYPE && link == first_parm_type) || (arg && DECL_ARTIFICIAL (arg))) add_AT_flag (parm_die, DW_AT_artificial, 1); link = TREE_CHAIN (link); if (arg) arg = TREE_CHAIN (arg); } /* If this function type has an ellipsis, add a DW_TAG_unspecified_parameters DIE to the end of the parameter list. */ if (formal_type != void_type_node) gen_unspecified_parameters_die (function_or_method_type, context_die); /* Make our second (and final) pass over the list of formal parameter types and output DIEs to represent those types (as necessary). */ for (link = TYPE_ARG_TYPES (function_or_method_type); link && TREE_VALUE (link); link = TREE_CHAIN (link)) gen_type_die (TREE_VALUE (link), context_die); } /* We want to generate the DIE for TYPE so that we can generate the die for MEMBER, which has been defined; we will need to refer back to the member declaration nested within TYPE. If we're trying to generate minimal debug info for TYPE, processing TYPE won't do the trick; we need to attach the member declaration by hand. */ static void gen_type_die_for_member (tree type, tree member, dw_die_ref context_die) { gen_type_die (type, context_die); /* If we're trying to avoid duplicate debug info, we may not have emitted the member decl for this function. Emit it now. */ if (TYPE_DECL_SUPPRESS_DEBUG (TYPE_STUB_DECL (type)) && ! lookup_decl_die (member)) { if (decl_ultimate_origin (member)) abort (); push_decl_scope (type); if (TREE_CODE (member) == FUNCTION_DECL) gen_subprogram_die (member, lookup_type_die (type)); else gen_variable_die (member, lookup_type_die (type)); pop_decl_scope (); } } /* Generate the DWARF2 info for the "abstract" instance of a function which we may later generate inlined and/or out-of-line instances of. */ static void dwarf2out_abstract_function (tree decl) { dw_die_ref old_die; tree save_fn; tree context; int was_abstract = DECL_ABSTRACT (decl); /* Make sure we have the actual abstract inline, not a clone. */ decl = DECL_ORIGIN (decl); old_die = lookup_decl_die (decl); if (old_die && get_AT (old_die, DW_AT_inline)) /* We've already generated the abstract instance. */ return; /* Be sure we've emitted the in-class declaration DIE (if any) first, so we don't get confused by DECL_ABSTRACT. */ if (debug_info_level > DINFO_LEVEL_TERSE) { context = decl_class_context (decl); if (context) gen_type_die_for_member (context, decl, decl_function_context (decl) ? NULL : comp_unit_die); } /* Pretend we've just finished compiling this function. */ save_fn = current_function_decl; current_function_decl = decl; set_decl_abstract_flags (decl, 1); dwarf2out_decl (decl); if (! was_abstract) set_decl_abstract_flags (decl, 0); current_function_decl = save_fn; } /* Generate a DIE to represent a declared function (either file-scope or block-local). */ static void gen_subprogram_die (tree decl, dw_die_ref context_die) { char label_id[MAX_ARTIFICIAL_LABEL_BYTES]; tree origin = decl_ultimate_origin (decl); dw_die_ref subr_die; rtx fp_reg; tree fn_arg_types; tree outer_scope; dw_die_ref old_die = lookup_decl_die (decl); int declaration = (current_function_decl != decl || class_or_namespace_scope_p (context_die)); /* It is possible to have both DECL_ABSTRACT and DECLARATION be true if we started to generate the abstract instance of an inline, decided to output its containing class, and proceeded to emit the declaration of the inline from the member list for the class. If so, DECLARATION takes priority; we'll get back to the abstract instance when done with the class. */ /* The class-scope declaration DIE must be the primary DIE. */ if (origin && declaration && class_or_namespace_scope_p (context_die)) { origin = NULL; if (old_die) abort (); } if (origin != NULL) { if (declaration && ! local_scope_p (context_die)) abort (); /* Fixup die_parent for the abstract instance of a nested inline function. */ if (old_die && old_die->die_parent == NULL) add_child_die (context_die, old_die); subr_die = new_die (DW_TAG_subprogram, context_die, decl); add_abstract_origin_attribute (subr_die, origin); } else if (old_die) { expanded_location s = expand_location (DECL_SOURCE_LOCATION (decl)); unsigned file_index = lookup_filename (s.file); if (!get_AT_flag (old_die, DW_AT_declaration) /* We can have a normal definition following an inline one in the case of redefinition of GNU C extern inlines. It seems reasonable to use AT_specification in this case. */ && !get_AT (old_die, DW_AT_inline)) { /* ??? This can happen if there is a bug in the program, for instance, if it has duplicate function definitions. Ideally, we should detect this case and ignore it. For now, if we have already reported an error, any error at all, then assume that we got here because of an input error, not a dwarf2 bug. */ if (errorcount) return; abort (); } /* If the definition comes from the same place as the declaration, maybe use the old DIE. We always want the DIE for this function that has the *_pc attributes to be under comp_unit_die so the debugger can find it. We also need to do this for abstract instances of inlines, since the spec requires the out-of-line copy to have the same parent. For local class methods, this doesn't apply; we just use the old DIE. */ if ((old_die->die_parent == comp_unit_die || context_die == NULL) && (DECL_ARTIFICIAL (decl) || (get_AT_unsigned (old_die, DW_AT_decl_file) == file_index && (get_AT_unsigned (old_die, DW_AT_decl_line) == (unsigned) s.line)))) { subr_die = old_die; /* Clear out the declaration attribute and the formal parameters. Do not remove all children, because it is possible that this declaration die was forced using force_decl_die(). In such cases die that forced declaration die (e.g. TAG_imported_module) is one of the children that we do not want to remove. */ remove_AT (subr_die, DW_AT_declaration); remove_child_TAG (subr_die, DW_TAG_formal_parameter); } else { subr_die = new_die (DW_TAG_subprogram, context_die, decl); add_AT_specification (subr_die, old_die); if (get_AT_unsigned (old_die, DW_AT_decl_file) != file_index) add_AT_unsigned (subr_die, DW_AT_decl_file, file_index); if (get_AT_unsigned (old_die, DW_AT_decl_line) != (unsigned) s.line) add_AT_unsigned (subr_die, DW_AT_decl_line, s.line); } } else { subr_die = new_die (DW_TAG_subprogram, context_die, decl); if (TREE_PUBLIC (decl)) add_AT_flag (subr_die, DW_AT_external, 1); add_name_and_src_coords_attributes (subr_die, decl); if (debug_info_level > DINFO_LEVEL_TERSE) { add_prototyped_attribute (subr_die, TREE_TYPE (decl)); add_type_attribute (subr_die, TREE_TYPE (TREE_TYPE (decl)), 0, 0, context_die); } add_pure_or_virtual_attribute (subr_die, decl); if (DECL_ARTIFICIAL (decl)) add_AT_flag (subr_die, DW_AT_artificial, 1); if (TREE_PROTECTED (decl)) add_AT_unsigned (subr_die, DW_AT_accessibility, DW_ACCESS_protected); else if (TREE_PRIVATE (decl)) add_AT_unsigned (subr_die, DW_AT_accessibility, DW_ACCESS_private); } if (declaration) { if (!old_die || !get_AT (old_die, DW_AT_inline)) { add_AT_flag (subr_die, DW_AT_declaration, 1); /* The first time we see a member function, it is in the context of the class to which it belongs. We make sure of this by emitting the class first. The next time is the definition, which is handled above. The two may come from the same source text. Note that force_decl_die() forces function declaration die. It is later reused to represent definition. */ equate_decl_number_to_die (decl, subr_die); } } else if (DECL_ABSTRACT (decl)) { if (DECL_DECLARED_INLINE_P (decl)) { if (cgraph_function_possibly_inlined_p (decl)) add_AT_unsigned (subr_die, DW_AT_inline, DW_INL_declared_inlined); else add_AT_unsigned (subr_die, DW_AT_inline, DW_INL_declared_not_inlined); } else { if (cgraph_function_possibly_inlined_p (decl)) add_AT_unsigned (subr_die, DW_AT_inline, DW_INL_inlined); else add_AT_unsigned (subr_die, DW_AT_inline, DW_INL_not_inlined); } equate_decl_number_to_die (decl, subr_die); } else if (!DECL_EXTERNAL (decl)) { if (!old_die || !get_AT (old_die, DW_AT_inline)) equate_decl_number_to_die (decl, subr_die); ASM_GENERATE_INTERNAL_LABEL (label_id, FUNC_BEGIN_LABEL, current_function_funcdef_no); add_AT_lbl_id (subr_die, DW_AT_low_pc, label_id); ASM_GENERATE_INTERNAL_LABEL (label_id, FUNC_END_LABEL, current_function_funcdef_no); add_AT_lbl_id (subr_die, DW_AT_high_pc, label_id); add_pubname (decl, subr_die); add_arange (decl, subr_die); #ifdef MIPS_DEBUGGING_INFO /* Add a reference to the FDE for this routine. */ add_AT_fde_ref (subr_die, DW_AT_MIPS_fde, current_funcdef_fde); #endif /* Define the "frame base" location for this routine. We use the frame pointer or stack pointer registers, since the RTL for local variables is relative to one of them. */ if (frame_base_decl && lookup_decl_loc (frame_base_decl) != NULL) { add_location_or_const_value_attribute (subr_die, frame_base_decl, DW_AT_frame_base); } else { fp_reg = frame_pointer_needed ? hard_frame_pointer_rtx : stack_pointer_rtx; add_AT_loc (subr_die, DW_AT_frame_base, reg_loc_descriptor (fp_reg)); } if (cfun->static_chain_decl) add_AT_location_description (subr_die, DW_AT_static_link, loc_descriptor_from_tree (cfun->static_chain_decl, 0)); } /* Now output descriptions of the arguments for this function. This gets (unnecessarily?) complex because of the fact that the DECL_ARGUMENT list for a FUNCTION_DECL doesn't indicate cases where there was a trailing `...' at the end of the formal parameter list. In order to find out if there was a trailing ellipsis or not, we must instead look at the type associated with the FUNCTION_DECL. This will be a node of type FUNCTION_TYPE. If the chain of type nodes hanging off of this FUNCTION_TYPE node ends with a void_type_node then there should *not* be an ellipsis at the end. */ /* In the case where we are describing a mere function declaration, all we need to do here (and all we *can* do here) is to describe the *types* of its formal parameters. */ if (debug_info_level <= DINFO_LEVEL_TERSE) ; else if (declaration) gen_formal_types_die (decl, subr_die); else { /* Generate DIEs to represent all known formal parameters. */ tree arg_decls = DECL_ARGUMENTS (decl); tree parm; /* When generating DIEs, generate the unspecified_parameters DIE instead if we come across the arg "__builtin_va_alist" */ for (parm = arg_decls; parm; parm = TREE_CHAIN (parm)) if (TREE_CODE (parm) == PARM_DECL) { if (DECL_NAME (parm) && !strcmp (IDENTIFIER_POINTER (DECL_NAME (parm)), "__builtin_va_alist")) gen_unspecified_parameters_die (parm, subr_die); else gen_decl_die (parm, subr_die); } /* Decide whether we need an unspecified_parameters DIE at the end. There are 2 more cases to do this for: 1) the ansi ... declaration - this is detectable when the end of the arg list is not a void_type_node 2) an unprototyped function declaration (not a definition). This just means that we have no info about the parameters at all. */ fn_arg_types = TYPE_ARG_TYPES (TREE_TYPE (decl)); if (fn_arg_types != NULL) { /* This is the prototyped case, check for.... */ if (TREE_VALUE (tree_last (fn_arg_types)) != void_type_node) gen_unspecified_parameters_die (decl, subr_die); } else if (DECL_INITIAL (decl) == NULL_TREE) gen_unspecified_parameters_die (decl, subr_die); } /* Output Dwarf info for all of the stuff within the body of the function (if it has one - it may be just a declaration). */ outer_scope = DECL_INITIAL (decl); /* OUTER_SCOPE is a pointer to the outermost BLOCK node created to represent a function. This BLOCK actually represents the outermost binding contour for the function, i.e. the contour in which the function's formal parameters and labels get declared. Curiously, it appears that the front end doesn't actually put the PARM_DECL nodes for the current function onto the BLOCK_VARS list for this outer scope, but are strung off of the DECL_ARGUMENTS list for the function instead. The BLOCK_VARS list for the `outer_scope' does provide us with a list of the LABEL_DECL nodes for the function however, and we output DWARF info for those in decls_for_scope. Just within the `outer_scope' there will be a BLOCK node representing the function's outermost pair of curly braces, and any blocks used for the base and member initializers of a C++ constructor function. */ if (! declaration && TREE_CODE (outer_scope) != ERROR_MARK) { /* Emit a DW_TAG_variable DIE for a named return value. */ if (DECL_NAME (DECL_RESULT (decl))) gen_decl_die (DECL_RESULT (decl), subr_die); current_function_has_inlines = 0; decls_for_scope (outer_scope, subr_die, 0); #if 0 && defined (MIPS_DEBUGGING_INFO) if (current_function_has_inlines) { add_AT_flag (subr_die, DW_AT_MIPS_has_inlines, 1); if (! comp_unit_has_inlines) { add_AT_flag (comp_unit_die, DW_AT_MIPS_has_inlines, 1); comp_unit_has_inlines = 1; } } #endif } } /* Generate a DIE to represent a declared data object. */ static void gen_variable_die (tree decl, dw_die_ref context_die) { tree origin = decl_ultimate_origin (decl); dw_die_ref var_die = new_die (DW_TAG_variable, context_die, decl); dw_die_ref old_die = lookup_decl_die (decl); int declaration = (DECL_EXTERNAL (decl) || class_or_namespace_scope_p (context_die)); if (origin != NULL) add_abstract_origin_attribute (var_die, origin); /* Loop unrolling can create multiple blocks that refer to the same static variable, so we must test for the DW_AT_declaration flag. ??? Loop unrolling/reorder_blocks should perhaps be rewritten to copy decls and set the DECL_ABSTRACT flag on them instead of sharing them. ??? Duplicated blocks have been rewritten to use .debug_ranges. */ else if (old_die && TREE_STATIC (decl) && get_AT_flag (old_die, DW_AT_declaration) == 1) { /* This is a definition of a C++ class level static. */ add_AT_specification (var_die, old_die); if (DECL_NAME (decl)) { expanded_location s = expand_location (DECL_SOURCE_LOCATION (decl)); unsigned file_index = lookup_filename (s.file); if (get_AT_unsigned (old_die, DW_AT_decl_file) != file_index) add_AT_unsigned (var_die, DW_AT_decl_file, file_index); if (get_AT_unsigned (old_die, DW_AT_decl_line) != (unsigned) s.line) add_AT_unsigned (var_die, DW_AT_decl_line, s.line); } } else { add_name_and_src_coords_attributes (var_die, decl); add_type_attribute (var_die, TREE_TYPE (decl), TREE_READONLY (decl), TREE_THIS_VOLATILE (decl), context_die); if (TREE_PUBLIC (decl)) add_AT_flag (var_die, DW_AT_external, 1); if (DECL_ARTIFICIAL (decl)) add_AT_flag (var_die, DW_AT_artificial, 1); if (TREE_PROTECTED (decl)) add_AT_unsigned (var_die, DW_AT_accessibility, DW_ACCESS_protected); else if (TREE_PRIVATE (decl)) add_AT_unsigned (var_die, DW_AT_accessibility, DW_ACCESS_private); } if (declaration) add_AT_flag (var_die, DW_AT_declaration, 1); if (DECL_ABSTRACT (decl) || declaration) equate_decl_number_to_die (decl, var_die); if (! declaration && ! DECL_ABSTRACT (decl)) { add_location_or_const_value_attribute (var_die, decl, DW_AT_location); add_pubname (decl, var_die); } else tree_add_const_value_attribute (var_die, decl); } /* Generate a DIE to represent a label identifier. */ static void gen_label_die (tree decl, dw_die_ref context_die) { tree origin = decl_ultimate_origin (decl); dw_die_ref lbl_die = new_die (DW_TAG_label, context_die, decl); rtx insn; char label[MAX_ARTIFICIAL_LABEL_BYTES]; if (origin != NULL) add_abstract_origin_attribute (lbl_die, origin); else add_name_and_src_coords_attributes (lbl_die, decl); if (DECL_ABSTRACT (decl)) equate_decl_number_to_die (decl, lbl_die); else { insn = DECL_RTL_IF_SET (decl); /* Deleted labels are programmer specified labels which have been eliminated because of various optimizations. We still emit them here so that it is possible to put breakpoints on them. */ if (insn && (GET_CODE (insn) == CODE_LABEL || ((GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED_LABEL)))) { /* When optimization is enabled (via -O) some parts of the compiler (e.g. jump.c and cse.c) may try to delete CODE_LABEL insns which represent source-level labels which were explicitly declared by the user. This really shouldn't be happening though, so catch it if it ever does happen. */ if (INSN_DELETED_P (insn)) abort (); ASM_GENERATE_INTERNAL_LABEL (label, "L", CODE_LABEL_NUMBER (insn)); add_AT_lbl_id (lbl_die, DW_AT_low_pc, label); } } } /* Generate a DIE for a lexical block. */ static void gen_lexical_block_die (tree stmt, dw_die_ref context_die, int depth) { dw_die_ref stmt_die = new_die (DW_TAG_lexical_block, context_die, stmt); char label[MAX_ARTIFICIAL_LABEL_BYTES]; if (! BLOCK_ABSTRACT (stmt)) { if (BLOCK_FRAGMENT_CHAIN (stmt)) { tree chain; add_AT_range_list (stmt_die, DW_AT_ranges, add_ranges (stmt)); chain = BLOCK_FRAGMENT_CHAIN (stmt); do { add_ranges (chain); chain = BLOCK_FRAGMENT_CHAIN (chain); } while (chain); add_ranges (NULL); } else { ASM_GENERATE_INTERNAL_LABEL (label, BLOCK_BEGIN_LABEL, BLOCK_NUMBER (stmt)); add_AT_lbl_id (stmt_die, DW_AT_low_pc, label); ASM_GENERATE_INTERNAL_LABEL (label, BLOCK_END_LABEL, BLOCK_NUMBER (stmt)); add_AT_lbl_id (stmt_die, DW_AT_high_pc, label); } } decls_for_scope (stmt, stmt_die, depth); } /* Generate a DIE for an inlined subprogram. */ static void gen_inlined_subroutine_die (tree stmt, dw_die_ref context_die, int depth) { tree decl = block_ultimate_origin (stmt); /* Emit info for the abstract instance first, if we haven't yet. We must emit this even if the block is abstract, otherwise when we emit the block below (or elsewhere), we may end up trying to emit a die whose origin die hasn't been emitted, and crashing. */ dwarf2out_abstract_function (decl); if (! BLOCK_ABSTRACT (stmt)) { dw_die_ref subr_die = new_die (DW_TAG_inlined_subroutine, context_die, stmt); char label[MAX_ARTIFICIAL_LABEL_BYTES]; add_abstract_origin_attribute (subr_die, decl); ASM_GENERATE_INTERNAL_LABEL (label, BLOCK_BEGIN_LABEL, BLOCK_NUMBER (stmt)); add_AT_lbl_id (subr_die, DW_AT_low_pc, label); ASM_GENERATE_INTERNAL_LABEL (label, BLOCK_END_LABEL, BLOCK_NUMBER (stmt)); add_AT_lbl_id (subr_die, DW_AT_high_pc, label); decls_for_scope (stmt, subr_die, depth); current_function_has_inlines = 1; } else /* We may get here if we're the outer block of function A that was inlined into function B that was inlined into function C. When generating debugging info for C, dwarf2out_abstract_function(B) would mark all inlined blocks as abstract, including this one. So, we wouldn't (and shouldn't) expect labels to be generated for this one. Instead, just emit debugging info for declarations within the block. This is particularly important in the case of initializers of arguments passed from B to us: if they're statement expressions containing declarations, we wouldn't generate dies for their abstract variables, and then, when generating dies for the real variables, we'd die (pun intended :-) */ gen_lexical_block_die (stmt, context_die, depth); } /* Generate a DIE for a field in a record, or structure. */ static void gen_field_die (tree decl, dw_die_ref context_die) { dw_die_ref decl_die; if (TREE_TYPE (decl) == error_mark_node) return; decl_die = new_die (DW_TAG_member, context_die, decl); add_name_and_src_coords_attributes (decl_die, decl); add_type_attribute (decl_die, member_declared_type (decl), TREE_READONLY (decl), TREE_THIS_VOLATILE (decl), context_die); if (DECL_BIT_FIELD_TYPE (decl)) { add_byte_size_attribute (decl_die, decl); add_bit_size_attribute (decl_die, decl); add_bit_offset_attribute (decl_die, decl); } if (TREE_CODE (DECL_FIELD_CONTEXT (decl)) != UNION_TYPE) add_data_member_location_attribute (decl_die, decl); if (DECL_ARTIFICIAL (decl)) add_AT_flag (decl_die, DW_AT_artificial, 1); if (TREE_PROTECTED (decl)) add_AT_unsigned (decl_die, DW_AT_accessibility, DW_ACCESS_protected); else if (TREE_PRIVATE (decl)) add_AT_unsigned (decl_die, DW_AT_accessibility, DW_ACCESS_private); } #if 0 /* Don't generate either pointer_type DIEs or reference_type DIEs here. Use modified_type_die instead. We keep this code here just in case these types of DIEs may be needed to represent certain things in other languages (e.g. Pascal) someday. */ static void gen_pointer_type_die (tree type, dw_die_ref context_die) { dw_die_ref ptr_die = new_die (DW_TAG_pointer_type, scope_die_for (type, context_die), type); equate_type_number_to_die (type, ptr_die); add_type_attribute (ptr_die, TREE_TYPE (type), 0, 0, context_die); add_AT_unsigned (mod_type_die, DW_AT_byte_size, PTR_SIZE); } /* Don't generate either pointer_type DIEs or reference_type DIEs here. Use modified_type_die instead. We keep this code here just in case these types of DIEs may be needed to represent certain things in other languages (e.g. Pascal) someday. */ static void gen_reference_type_die (tree type, dw_die_ref context_die) { dw_die_ref ref_die = new_die (DW_TAG_reference_type, scope_die_for (type, context_die), type); equate_type_number_to_die (type, ref_die); add_type_attribute (ref_die, TREE_TYPE (type), 0, 0, context_die); add_AT_unsigned (mod_type_die, DW_AT_byte_size, PTR_SIZE); } #endif /* Generate a DIE for a pointer to a member type. */ static void gen_ptr_to_mbr_type_die (tree type, dw_die_ref context_die) { dw_die_ref ptr_die = new_die (DW_TAG_ptr_to_member_type, scope_die_for (type, context_die), type); equate_type_number_to_die (type, ptr_die); add_AT_die_ref (ptr_die, DW_AT_containing_type, lookup_type_die (TYPE_OFFSET_BASETYPE (type))); add_type_attribute (ptr_die, TREE_TYPE (type), 0, 0, context_die); } /* Generate the DIE for the compilation unit. */ static dw_die_ref gen_compile_unit_die (const char *filename) { dw_die_ref die; char producer[250]; const char *language_string = lang_hooks.name; int language; die = new_die (DW_TAG_compile_unit, NULL, NULL); if (filename) { add_name_attribute (die, filename); /* Don't add cwd for . */ if (filename[0] != DIR_SEPARATOR && filename[0] != '<') add_comp_dir_attribute (die); } sprintf (producer, "%s %s", language_string, version_string); #ifdef MIPS_DEBUGGING_INFO /* The MIPS/SGI compilers place the 'cc' command line options in the producer string. The SGI debugger looks for -g, -g1, -g2, or -g3; if they do not appear in the producer string, the debugger reaches the conclusion that the object file is stripped and has no debugging information. To get the MIPS/SGI debugger to believe that there is debugging information in the object file, we add a -g to the producer string. */ if (debug_info_level > DINFO_LEVEL_TERSE) strcat (producer, " -g"); #endif add_AT_string (die, DW_AT_producer, producer); if (strcmp (language_string, "GNU C++") == 0) language = DW_LANG_C_plus_plus; else if (strcmp (language_string, "GNU Ada") == 0) language = DW_LANG_Ada95; else if (strcmp (language_string, "GNU F77") == 0) language = DW_LANG_Fortran77; else if (strcmp (language_string, "GNU F95") == 0) language = DW_LANG_Fortran95; else if (strcmp (language_string, "GNU Pascal") == 0) language = DW_LANG_Pascal83; else if (strcmp (language_string, "GNU Java") == 0) language = DW_LANG_Java; else language = DW_LANG_C89; add_AT_unsigned (die, DW_AT_language, language); return die; } /* Generate a DIE for a string type. */ static void gen_string_type_die (tree type, dw_die_ref context_die) { dw_die_ref type_die = new_die (DW_TAG_string_type, scope_die_for (type, context_die), type); equate_type_number_to_die (type, type_die); /* ??? Fudge the string length attribute for now. TODO: add string length info. */ #if 0 string_length_attribute (TYPE_MAX_VALUE (TYPE_DOMAIN (type))); bound_representation (upper_bound, 0, 'u'); #endif } /* Generate the DIE for a base class. */ static void gen_inheritance_die (tree binfo, tree access, dw_die_ref context_die) { dw_die_ref die = new_die (DW_TAG_inheritance, context_die, binfo); add_type_attribute (die, BINFO_TYPE (binfo), 0, 0, context_die); add_data_member_location_attribute (die, binfo); if (BINFO_VIRTUAL_P (binfo)) add_AT_unsigned (die, DW_AT_virtuality, DW_VIRTUALITY_virtual); if (access == access_public_node) add_AT_unsigned (die, DW_AT_accessibility, DW_ACCESS_public); else if (access == access_protected_node) add_AT_unsigned (die, DW_AT_accessibility, DW_ACCESS_protected); } /* Generate a DIE for a class member. */ static void gen_member_die (tree type, dw_die_ref context_die) { tree member; tree binfo = TYPE_BINFO (type); dw_die_ref child; /* If this is not an incomplete type, output descriptions of each of its members. Note that as we output the DIEs necessary to represent the members of this record or union type, we will also be trying to output DIEs to represent the *types* of those members. However the `type' function (above) will specifically avoid generating type DIEs for member types *within* the list of member DIEs for this (containing) type except for those types (of members) which are explicitly marked as also being members of this (containing) type themselves. The g++ front- end can force any given type to be treated as a member of some other (containing) type by setting the TYPE_CONTEXT of the given (member) type to point to the TREE node representing the appropriate (containing) type. */ /* First output info about the base classes. */ if (binfo && BINFO_BASETYPES (binfo)) { tree bases = BINFO_BASETYPES (binfo); tree accesses = BINFO_BASEACCESSES (binfo); int n_bases = TREE_VEC_LENGTH (bases); int i; for (i = 0; i < n_bases; i++) gen_inheritance_die (TREE_VEC_ELT (bases, i), (accesses ? TREE_VEC_ELT (accesses, i) : access_public_node), context_die); } /* Now output info about the data members and type members. */ for (member = TYPE_FIELDS (type); member; member = TREE_CHAIN (member)) { /* If we thought we were generating minimal debug info for TYPE and then changed our minds, some of the member declarations may have already been defined. Don't define them again, but do put them in the right order. */ child = lookup_decl_die (member); if (child) splice_child_die (context_die, child); else gen_decl_die (member, context_die); } /* Now output info about the function members (if any). */ for (member = TYPE_METHODS (type); member; member = TREE_CHAIN (member)) { /* Don't include clones in the member list. */ if (DECL_ABSTRACT_ORIGIN (member)) continue; child = lookup_decl_die (member); if (child) splice_child_die (context_die, child); else gen_decl_die (member, context_die); } } /* Generate a DIE for a structure or union type. If TYPE_DECL_SUPPRESS_DEBUG is set, we pretend that the type was never defined, so we only get the member DIEs needed by later specification DIEs. */ static void gen_struct_or_union_type_die (tree type, dw_die_ref context_die) { dw_die_ref type_die = lookup_type_die (type); dw_die_ref scope_die = 0; int nested = 0; int complete = (TYPE_SIZE (type) && (! TYPE_STUB_DECL (type) || ! TYPE_DECL_SUPPRESS_DEBUG (TYPE_STUB_DECL (type)))); int ns_decl = (context_die && context_die->die_tag == DW_TAG_namespace); if (type_die && ! complete) return; if (TYPE_CONTEXT (type) != NULL_TREE && (AGGREGATE_TYPE_P (TYPE_CONTEXT (type)) || TREE_CODE (TYPE_CONTEXT (type)) == NAMESPACE_DECL)) nested = 1; scope_die = scope_die_for (type, context_die); if (! type_die || (nested && scope_die == comp_unit_die)) /* First occurrence of type or toplevel definition of nested class. */ { dw_die_ref old_die = type_die; type_die = new_die (TREE_CODE (type) == RECORD_TYPE ? DW_TAG_structure_type : DW_TAG_union_type, scope_die, type); equate_type_number_to_die (type, type_die); if (old_die) add_AT_specification (type_die, old_die); else add_name_attribute (type_die, type_tag (type)); } else remove_AT (type_die, DW_AT_declaration); /* If this type has been completed, then give it a byte_size attribute and then give a list of members. */ if (complete && !ns_decl) { /* Prevent infinite recursion in cases where the type of some member of this type is expressed in terms of this type itself. */ TREE_ASM_WRITTEN (type) = 1; add_byte_size_attribute (type_die, type); if (TYPE_STUB_DECL (type) != NULL_TREE) add_src_coords_attributes (type_die, TYPE_STUB_DECL (type)); /* If the first reference to this type was as the return type of an inline function, then it may not have a parent. Fix this now. */ if (type_die->die_parent == NULL) add_child_die (scope_die, type_die); push_decl_scope (type); gen_member_die (type, type_die); pop_decl_scope (); /* GNU extension: Record what type our vtable lives in. */ if (TYPE_VFIELD (type)) { tree vtype = DECL_FCONTEXT (TYPE_VFIELD (type)); gen_type_die (vtype, context_die); add_AT_die_ref (type_die, DW_AT_containing_type, lookup_type_die (vtype)); } } else { add_AT_flag (type_die, DW_AT_declaration, 1); /* We don't need to do this for function-local types. */ if (TYPE_STUB_DECL (type) && ! decl_function_context (TYPE_STUB_DECL (type))) VARRAY_PUSH_TREE (incomplete_types, type); } } /* Generate a DIE for a subroutine _type_. */ static void gen_subroutine_type_die (tree type, dw_die_ref context_die) { tree return_type = TREE_TYPE (type); dw_die_ref subr_die = new_die (DW_TAG_subroutine_type, scope_die_for (type, context_die), type); equate_type_number_to_die (type, subr_die); add_prototyped_attribute (subr_die, type); add_type_attribute (subr_die, return_type, 0, 0, context_die); gen_formal_types_die (type, subr_die); } /* Generate a DIE for a type definition. */ static void gen_typedef_die (tree decl, dw_die_ref context_die) { dw_die_ref type_die; tree origin; if (TREE_ASM_WRITTEN (decl)) return; TREE_ASM_WRITTEN (decl) = 1; type_die = new_die (DW_TAG_typedef, context_die, decl); origin = decl_ultimate_origin (decl); if (origin != NULL) add_abstract_origin_attribute (type_die, origin); else { tree type; add_name_and_src_coords_attributes (type_die, decl); if (DECL_ORIGINAL_TYPE (decl)) { type = DECL_ORIGINAL_TYPE (decl); if (type == TREE_TYPE (decl)) abort (); else equate_type_number_to_die (TREE_TYPE (decl), type_die); } else type = TREE_TYPE (decl); add_type_attribute (type_die, type, TREE_READONLY (decl), TREE_THIS_VOLATILE (decl), context_die); } if (DECL_ABSTRACT (decl)) equate_decl_number_to_die (decl, type_die); } /* Generate a type description DIE. */ static void gen_type_die (tree type, dw_die_ref context_die) { int need_pop; if (type == NULL_TREE || type == error_mark_node) return; if (TYPE_NAME (type) && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL && DECL_ORIGINAL_TYPE (TYPE_NAME (type))) { if (TREE_ASM_WRITTEN (type)) return; /* Prevent broken recursion; we can't hand off to the same type. */ if (DECL_ORIGINAL_TYPE (TYPE_NAME (type)) == type) abort (); TREE_ASM_WRITTEN (type) = 1; gen_decl_die (TYPE_NAME (type), context_die); return; } /* We are going to output a DIE to represent the unqualified version of this type (i.e. without any const or volatile qualifiers) so get the main variant (i.e. the unqualified version) of this type now. (Vectors are special because the debugging info is in the cloned type itself). */ if (TREE_CODE (type) != VECTOR_TYPE) type = type_main_variant (type); if (TREE_ASM_WRITTEN (type)) return; switch (TREE_CODE (type)) { case ERROR_MARK: break; case POINTER_TYPE: case REFERENCE_TYPE: /* We must set TREE_ASM_WRITTEN in case this is a recursive type. This ensures that the gen_type_die recursion will terminate even if the type is recursive. Recursive types are possible in Ada. */ /* ??? We could perhaps do this for all types before the switch statement. */ TREE_ASM_WRITTEN (type) = 1; /* For these types, all that is required is that we output a DIE (or a set of DIEs) to represent the "basis" type. */ gen_type_die (TREE_TYPE (type), context_die); break; case OFFSET_TYPE: /* This code is used for C++ pointer-to-data-member types. Output a description of the relevant class type. */ gen_type_die (TYPE_OFFSET_BASETYPE (type), context_die); /* Output a description of the type of the object pointed to. */ gen_type_die (TREE_TYPE (type), context_die); /* Now output a DIE to represent this pointer-to-data-member type itself. */ gen_ptr_to_mbr_type_die (type, context_die); break; case SET_TYPE: gen_type_die (TYPE_DOMAIN (type), context_die); gen_set_type_die (type, context_die); break; case FILE_TYPE: gen_type_die (TREE_TYPE (type), context_die); abort (); /* No way to represent these in Dwarf yet! */ break; case FUNCTION_TYPE: /* Force out return type (in case it wasn't forced out already). */ gen_type_die (TREE_TYPE (type), context_die); gen_subroutine_type_die (type, context_die); break; case METHOD_TYPE: /* Force out return type (in case it wasn't forced out already). */ gen_type_die (TREE_TYPE (type), context_die); gen_subroutine_type_die (type, context_die); break; case ARRAY_TYPE: if (TYPE_STRING_FLAG (type) && TREE_CODE (TREE_TYPE (type)) == CHAR_TYPE) { gen_type_die (TREE_TYPE (type), context_die); gen_string_type_die (type, context_die); } else gen_array_type_die (type, context_die); break; case VECTOR_TYPE: gen_array_type_die (type, context_die); break; case ENUMERAL_TYPE: case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: /* If this is a nested type whose containing class hasn't been written out yet, writing it out will cover this one, too. This does not apply to instantiations of member class templates; they need to be added to the containing class as they are generated. FIXME: This hurts the idea of combining type decls from multiple TUs, since we can't predict what set of template instantiations we'll get. */ if (TYPE_CONTEXT (type) && AGGREGATE_TYPE_P (TYPE_CONTEXT (type)) && ! TREE_ASM_WRITTEN (TYPE_CONTEXT (type))) { gen_type_die (TYPE_CONTEXT (type), context_die); if (TREE_ASM_WRITTEN (type)) return; /* If that failed, attach ourselves to the stub. */ push_decl_scope (TYPE_CONTEXT (type)); context_die = lookup_type_die (TYPE_CONTEXT (type)); need_pop = 1; } else { declare_in_namespace (type, context_die); need_pop = 0; } if (TREE_CODE (type) == ENUMERAL_TYPE) gen_enumeration_type_die (type, context_die); else gen_struct_or_union_type_die (type, context_die); if (need_pop) pop_decl_scope (); /* Don't set TREE_ASM_WRITTEN on an incomplete struct; we want to fix it up if it is ever completed. gen_*_type_die will set it for us when appropriate. */ return; case VOID_TYPE: case INTEGER_TYPE: case REAL_TYPE: case COMPLEX_TYPE: case BOOLEAN_TYPE: case CHAR_TYPE: /* No DIEs needed for fundamental types. */ break; case LANG_TYPE: /* No Dwarf representation currently defined. */ break; default: abort (); } TREE_ASM_WRITTEN (type) = 1; } /* Generate a DIE for a tagged type instantiation. */ static void gen_tagged_type_instantiation_die (tree type, dw_die_ref context_die) { if (type == NULL_TREE || type == error_mark_node) return; /* We are going to output a DIE to represent the unqualified version of this type (i.e. without any const or volatile qualifiers) so make sure that we have the main variant (i.e. the unqualified version) of this type now. */ if (type != type_main_variant (type)) abort (); /* Do not check TREE_ASM_WRITTEN (type) as it may not be set if this is an instance of an unresolved type. */ switch (TREE_CODE (type)) { case ERROR_MARK: break; case ENUMERAL_TYPE: gen_inlined_enumeration_type_die (type, context_die); break; case RECORD_TYPE: gen_inlined_structure_type_die (type, context_die); break; case UNION_TYPE: case QUAL_UNION_TYPE: gen_inlined_union_type_die (type, context_die); break; default: abort (); } } /* Generate a DW_TAG_lexical_block DIE followed by DIEs to represent all of the things which are local to the given block. */ static void gen_block_die (tree stmt, dw_die_ref context_die, int depth) { int must_output_die = 0; tree origin; tree decl; enum tree_code origin_code; /* Ignore blocks never really used to make RTL. */ if (stmt == NULL_TREE || !TREE_USED (stmt) || (!TREE_ASM_WRITTEN (stmt) && !BLOCK_ABSTRACT (stmt))) return; /* If the block is one fragment of a non-contiguous block, do not process the variables, since they will have been done by the origin block. Do process subblocks. */ if (BLOCK_FRAGMENT_ORIGIN (stmt)) { tree sub; for (sub = BLOCK_SUBBLOCKS (stmt); sub; sub = BLOCK_CHAIN (sub)) gen_block_die (sub, context_die, depth + 1); return; } /* Determine the "ultimate origin" of this block. This block may be an inlined instance of an inlined instance of inline function, so we have to trace all of the way back through the origin chain to find out what sort of node actually served as the original seed for the creation of the current block. */ origin = block_ultimate_origin (stmt); origin_code = (origin != NULL) ? TREE_CODE (origin) : ERROR_MARK; /* Determine if we need to output any Dwarf DIEs at all to represent this block. */ if (origin_code == FUNCTION_DECL) /* The outer scopes for inlinings *must* always be represented. We generate DW_TAG_inlined_subroutine DIEs for them. (See below.) */ must_output_die = 1; else { /* In the case where the current block represents an inlining of the "body block" of an inline function, we must *NOT* output any DIE for this block because we have already output a DIE to represent the whole inlined function scope and the "body block" of any function doesn't really represent a different scope according to ANSI C rules. So we check here to make sure that this block does not represent a "body block inlining" before trying to set the MUST_OUTPUT_DIE flag. */ if (! is_body_block (origin ? origin : stmt)) { /* Determine if this block directly contains any "significant" local declarations which we will need to output DIEs for. */ if (debug_info_level > DINFO_LEVEL_TERSE) /* We are not in terse mode so *any* local declaration counts as being a "significant" one. */ must_output_die = (BLOCK_VARS (stmt) != NULL); else /* We are in terse mode, so only local (nested) function definitions count as "significant" local declarations. */ for (decl = BLOCK_VARS (stmt); decl != NULL; decl = TREE_CHAIN (decl)) if (TREE_CODE (decl) == FUNCTION_DECL && DECL_INITIAL (decl)) { must_output_die = 1; break; } } } /* It would be a waste of space to generate a Dwarf DW_TAG_lexical_block DIE for any block which contains no significant local declarations at all. Rather, in such cases we just call `decls_for_scope' so that any needed Dwarf info for any sub-blocks will get properly generated. Note that in terse mode, our definition of what constitutes a "significant" local declaration gets restricted to include only inlined function instances and local (nested) function definitions. */ if (must_output_die) { if (origin_code == FUNCTION_DECL) gen_inlined_subroutine_die (stmt, context_die, depth); else gen_lexical_block_die (stmt, context_die, depth); } else decls_for_scope (stmt, context_die, depth); } /* Generate all of the decls declared within a given scope and (recursively) all of its sub-blocks. */ static void decls_for_scope (tree stmt, dw_die_ref context_die, int depth) { tree decl; tree subblocks; /* Ignore blocks never really used to make RTL. */ if (stmt == NULL_TREE || ! TREE_USED (stmt)) return; /* Output the DIEs to represent all of the data objects and typedefs declared directly within this block but not within any nested sub-blocks. Also, nested function and tag DIEs have been generated with a parent of NULL; fix that up now. */ for (decl = BLOCK_VARS (stmt); decl != NULL; decl = TREE_CHAIN (decl)) { dw_die_ref die; if (TREE_CODE (decl) == FUNCTION_DECL) die = lookup_decl_die (decl); else if (TREE_CODE (decl) == TYPE_DECL && TYPE_DECL_IS_STUB (decl)) die = lookup_type_die (TREE_TYPE (decl)); else die = NULL; if (die != NULL && die->die_parent == NULL) add_child_die (context_die, die); else gen_decl_die (decl, context_die); } /* If we're at -g1, we're not interested in subblocks. */ if (debug_info_level <= DINFO_LEVEL_TERSE) return; /* Output the DIEs to represent all sub-blocks (and the items declared therein) of this block. */ for (subblocks = BLOCK_SUBBLOCKS (stmt); subblocks != NULL; subblocks = BLOCK_CHAIN (subblocks)) gen_block_die (subblocks, context_die, depth + 1); } /* Is this a typedef we can avoid emitting? */ static inline int is_redundant_typedef (tree decl) { if (TYPE_DECL_IS_STUB (decl)) return 1; if (DECL_ARTIFICIAL (decl) && DECL_CONTEXT (decl) && is_tagged_type (DECL_CONTEXT (decl)) && TREE_CODE (TYPE_NAME (DECL_CONTEXT (decl))) == TYPE_DECL && DECL_NAME (decl) == DECL_NAME (TYPE_NAME (DECL_CONTEXT (decl)))) /* Also ignore the artificial member typedef for the class name. */ return 1; return 0; } /* Returns the DIE for decl or aborts. */ static dw_die_ref force_decl_die (tree decl) { dw_die_ref decl_die; unsigned saved_external_flag; tree save_fn = NULL_TREE; decl_die = lookup_decl_die (decl); if (!decl_die) { dw_die_ref context_die; tree decl_context = DECL_CONTEXT (decl); if (decl_context) { /* Find die that represents this context. */ if (TYPE_P (decl_context)) context_die = force_type_die (decl_context); else context_die = force_decl_die (decl_context); } else context_die = comp_unit_die; switch (TREE_CODE (decl)) { case FUNCTION_DECL: /* Clear current_function_decl, so that gen_subprogram_die thinks that this is a declaration. At this point, we just want to force declaration die. */ save_fn = current_function_decl; current_function_decl = NULL_TREE; gen_subprogram_die (decl, context_die); current_function_decl = save_fn; break; case VAR_DECL: /* Set external flag to force declaration die. Restore it after gen_decl_die() call. */ saved_external_flag = DECL_EXTERNAL (decl); DECL_EXTERNAL (decl) = 1; gen_decl_die (decl, context_die); DECL_EXTERNAL (decl) = saved_external_flag; break; case NAMESPACE_DECL: dwarf2out_decl (decl); break; default: abort (); } /* See if we can find the die for this deci now. If not then abort. */ if (!decl_die) decl_die = lookup_decl_die (decl); if (!decl_die) abort (); } return decl_die; } /* Returns the DIE for decl or aborts. */ static dw_die_ref force_type_die (tree type) { dw_die_ref type_die; type_die = lookup_type_die (type); if (!type_die) { dw_die_ref context_die; if (TYPE_CONTEXT (type)) if (TYPE_P (TYPE_CONTEXT (type))) context_die = force_type_die (TYPE_CONTEXT (type)); else context_die = force_decl_die (TYPE_CONTEXT (type)); else context_die = comp_unit_die; gen_type_die (type, context_die); type_die = lookup_type_die (type); if (!type_die) abort(); } return type_die; } /* Force out any required namespaces to be able to output DECL, and return the new context_die for it, if it's changed. */ static dw_die_ref setup_namespace_context (tree thing, dw_die_ref context_die) { tree context = DECL_P (thing) ? DECL_CONTEXT (thing) : TYPE_CONTEXT (thing); if (context && TREE_CODE (context) == NAMESPACE_DECL) /* Force out the namespace. */ context_die = force_decl_die (context); return context_die; } /* Emit a declaration DIE for THING (which is either a DECL or a tagged type) within its namespace, if appropriate. For compatibility with older debuggers, namespace DIEs only contain declarations; all definitions are emitted at CU scope. */ static void declare_in_namespace (tree thing, dw_die_ref context_die) { dw_die_ref ns_context; if (debug_info_level <= DINFO_LEVEL_TERSE) return; ns_context = setup_namespace_context (thing, context_die); if (ns_context != context_die) { if (DECL_P (thing)) gen_decl_die (thing, ns_context); else gen_type_die (thing, ns_context); } } /* Generate a DIE for a namespace or namespace alias. */ static void gen_namespace_die (tree decl) { dw_die_ref context_die = setup_namespace_context (decl, comp_unit_die); /* Namespace aliases have a DECL_ABSTRACT_ORIGIN of the namespace they are an alias of. */ if (DECL_ABSTRACT_ORIGIN (decl) == NULL) { /* Output a real namespace. */ dw_die_ref namespace_die = new_die (DW_TAG_namespace, context_die, decl); add_name_and_src_coords_attributes (namespace_die, decl); equate_decl_number_to_die (decl, namespace_die); } else { /* Output a namespace alias. */ /* Force out the namespace we are an alias of, if necessary. */ dw_die_ref origin_die = force_decl_die (DECL_ABSTRACT_ORIGIN (decl)); /* Now create the namespace alias DIE. */ dw_die_ref namespace_die = new_die (DW_TAG_imported_declaration, context_die, decl); add_name_and_src_coords_attributes (namespace_die, decl); add_AT_die_ref (namespace_die, DW_AT_import, origin_die); equate_decl_number_to_die (decl, namespace_die); } } /* Generate Dwarf debug information for a decl described by DECL. */ static void gen_decl_die (tree decl, dw_die_ref context_die) { tree origin; if (DECL_P (decl) && DECL_IGNORED_P (decl)) return; switch (TREE_CODE (decl)) { case ERROR_MARK: break; case CONST_DECL: /* The individual enumerators of an enum type get output when we output the Dwarf representation of the relevant enum type itself. */ break; case FUNCTION_DECL: /* Don't output any DIEs to represent mere function declarations, unless they are class members or explicit block externs. */ if (DECL_INITIAL (decl) == NULL_TREE && DECL_CONTEXT (decl) == NULL_TREE && (current_function_decl == NULL_TREE || DECL_ARTIFICIAL (decl))) break; #if 0 /* FIXME */ /* This doesn't work because the C frontend sets DECL_ABSTRACT_ORIGIN on local redeclarations of global functions. That seems broken. */ if (current_function_decl != decl) /* This is only a declaration. */; #endif /* If we're emitting a clone, emit info for the abstract instance. */ if (DECL_ORIGIN (decl) != decl) dwarf2out_abstract_function (DECL_ABSTRACT_ORIGIN (decl)); /* If we're emitting an out-of-line copy of an inline function, emit info for the abstract instance and set up to refer to it. */ else if (cgraph_function_possibly_inlined_p (decl) && ! DECL_ABSTRACT (decl) && ! class_or_namespace_scope_p (context_die) /* dwarf2out_abstract_function won't emit a die if this is just a declaration. We must avoid setting DECL_ABSTRACT_ORIGIN in that case, because that works only if we have a die. */ && DECL_INITIAL (decl) != NULL_TREE) { dwarf2out_abstract_function (decl); set_decl_origin_self (decl); } /* Otherwise we're emitting the primary DIE for this decl. */ else if (debug_info_level > DINFO_LEVEL_TERSE) { /* Before we describe the FUNCTION_DECL itself, make sure that we have described its return type. */ gen_type_die (TREE_TYPE (TREE_TYPE (decl)), context_die); /* And its virtual context. */ if (DECL_VINDEX (decl) != NULL_TREE) gen_type_die (DECL_CONTEXT (decl), context_die); /* And its containing type. */ origin = decl_class_context (decl); if (origin != NULL_TREE) gen_type_die_for_member (origin, decl, context_die); /* And its containing namespace. */ declare_in_namespace (decl, context_die); } /* Now output a DIE to represent the function itself. */ gen_subprogram_die (decl, context_die); break; case TYPE_DECL: /* If we are in terse mode, don't generate any DIEs to represent any actual typedefs. */ if (debug_info_level <= DINFO_LEVEL_TERSE) break; /* In the special case of a TYPE_DECL node representing the declaration of some type tag, if the given TYPE_DECL is marked as having been instantiated from some other (original) TYPE_DECL node (e.g. one which was generated within the original definition of an inline function) we have to generate a special (abbreviated) DW_TAG_structure_type, DW_TAG_union_type, or DW_TAG_enumeration_type DIE here. */ if (TYPE_DECL_IS_STUB (decl) && decl_ultimate_origin (decl) != NULL_TREE) { gen_tagged_type_instantiation_die (TREE_TYPE (decl), context_die); break; } if (is_redundant_typedef (decl)) gen_type_die (TREE_TYPE (decl), context_die); else /* Output a DIE to represent the typedef itself. */ gen_typedef_die (decl, context_die); break; case LABEL_DECL: if (debug_info_level >= DINFO_LEVEL_NORMAL) gen_label_die (decl, context_die); break; case VAR_DECL: case RESULT_DECL: /* If we are in terse mode, don't generate any DIEs to represent any variable declarations or definitions. */ if (debug_info_level <= DINFO_LEVEL_TERSE) break; /* Output any DIEs that are needed to specify the type of this data object. */ gen_type_die (TREE_TYPE (decl), context_die); /* And its containing type. */ origin = decl_class_context (decl); if (origin != NULL_TREE) gen_type_die_for_member (origin, decl, context_die); /* And its containing namespace. */ declare_in_namespace (decl, context_die); /* Now output the DIE to represent the data object itself. This gets complicated because of the possibility that the VAR_DECL really represents an inlined instance of a formal parameter for an inline function. */ origin = decl_ultimate_origin (decl); if (origin != NULL_TREE && TREE_CODE (origin) == PARM_DECL) gen_formal_parameter_die (decl, context_die); else gen_variable_die (decl, context_die); break; case FIELD_DECL: /* Ignore the nameless fields that are used to skip bits but handle C++ anonymous unions and structs. */ if (DECL_NAME (decl) != NULL_TREE || TREE_CODE (TREE_TYPE (decl)) == UNION_TYPE || TREE_CODE (TREE_TYPE (decl)) == RECORD_TYPE) { gen_type_die (member_declared_type (decl), context_die); gen_field_die (decl, context_die); } break; case PARM_DECL: gen_type_die (TREE_TYPE (decl), context_die); gen_formal_parameter_die (decl, context_die); break; case NAMESPACE_DECL: gen_namespace_die (decl); break; default: if ((int)TREE_CODE (decl) > NUM_TREE_CODES) /* Probably some frontend-internal decl. Assume we don't care. */ break; abort (); } } /* Add Ada "use" clause information for SGI Workshop debugger. */ void dwarf2out_add_library_unit_info (const char *filename, const char *context_list) { unsigned int file_index; if (filename != NULL) { dw_die_ref unit_die = new_die (DW_TAG_module, comp_unit_die, NULL); tree context_list_decl = build_decl (LABEL_DECL, get_identifier (context_list), void_type_node); TREE_PUBLIC (context_list_decl) = TRUE; add_name_attribute (unit_die, context_list); file_index = lookup_filename (filename); add_AT_unsigned (unit_die, DW_AT_decl_file, file_index); add_pubname (context_list_decl, unit_die); } } /* Output debug information for global decl DECL. Called from toplev.c after compilation proper has finished. */ static void dwarf2out_global_decl (tree decl) { /* Output DWARF2 information for file-scope tentative data object declarations, file-scope (extern) function declarations (which had no corresponding body) and file-scope tagged type declarations and definitions which have not yet been forced out. */ if (TREE_CODE (decl) != FUNCTION_DECL || !DECL_INITIAL (decl)) dwarf2out_decl (decl); } /* Output debug information for type decl DECL. Called from toplev.c and from language front ends (to record built-in types). */ static void dwarf2out_type_decl (tree decl, int local) { if (!local) dwarf2out_decl (decl); } /* Output debug information for imported module or decl. */ static void dwarf2out_imported_module_or_decl (tree decl, tree context) { dw_die_ref imported_die, at_import_die; dw_die_ref scope_die; unsigned file_index; expanded_location xloc; if (debug_info_level <= DINFO_LEVEL_TERSE) return; if (!decl) abort (); /* To emit DW_TAG_imported_module or DW_TAG_imported_decl, we need two DIEs. We need decl DIE for reference and scope die. First, get DIE for the decl itself. */ /* Get the scope die for decl context. Use comp_unit_die for global module or decl. If die is not found for non globals, force new die. */ if (!context) scope_die = comp_unit_die; else if (TYPE_P (context)) scope_die = force_type_die (context); else scope_die = force_decl_die (context); /* For TYPE_DECL or CONST_DECL, lookup TREE_TYPE. */ if (TREE_CODE (decl) == TYPE_DECL || TREE_CODE (decl) == CONST_DECL) at_import_die = force_type_die (TREE_TYPE (decl)); else at_import_die = force_decl_die (decl); /* OK, now we have DIEs for decl as well as scope. Emit imported die. */ if (TREE_CODE (decl) == NAMESPACE_DECL) imported_die = new_die (DW_TAG_imported_module, scope_die, context); else imported_die = new_die (DW_TAG_imported_declaration, scope_die, context); xloc = expand_location (input_location); file_index = lookup_filename (xloc.file); add_AT_unsigned (imported_die, DW_AT_decl_file, file_index); add_AT_unsigned (imported_die, DW_AT_decl_line, xloc.line); add_AT_die_ref (imported_die, DW_AT_import, at_import_die); } /* Write the debugging output for DECL. */ void dwarf2out_decl (tree decl) { dw_die_ref context_die = comp_unit_die; switch (TREE_CODE (decl)) { case ERROR_MARK: return; case FUNCTION_DECL: /* What we would really like to do here is to filter out all mere file-scope declarations of file-scope functions which are never referenced later within this translation unit (and keep all of ones that *are* referenced later on) but we aren't clairvoyant, so we have no idea which functions will be referenced in the future (i.e. later on within the current translation unit). So here we just ignore all file-scope function declarations which are not also definitions. If and when the debugger needs to know something about these functions, it will have to hunt around and find the DWARF information associated with the definition of the function. We can't just check DECL_EXTERNAL to find out which FUNCTION_DECL nodes represent definitions and which ones represent mere declarations. We have to check DECL_INITIAL instead. That's because the C front-end supports some weird semantics for "extern inline" function definitions. These can get inlined within the current translation unit (an thus, we need to generate Dwarf info for their abstract instances so that the Dwarf info for the concrete inlined instances can have something to refer to) but the compiler never generates any out-of-lines instances of such things (despite the fact that they *are* definitions). The important point is that the C front-end marks these "extern inline" functions as DECL_EXTERNAL, but we need to generate DWARF for them anyway. Note that the C++ front-end also plays some similar games for inline function definitions appearing within include files which also contain `#pragma interface' pragmas. */ if (DECL_INITIAL (decl) == NULL_TREE) return; /* If we're a nested function, initially use a parent of NULL; if we're a plain function, this will be fixed up in decls_for_scope. If we're a method, it will be ignored, since we already have a DIE. */ if (decl_function_context (decl) /* But if we're in terse mode, we don't care about scope. */ && debug_info_level > DINFO_LEVEL_TERSE) context_die = NULL; break; case VAR_DECL: /* Ignore this VAR_DECL if it refers to a file-scope extern data object declaration and if the declaration was never even referenced from within this entire compilation unit. We suppress these DIEs in order to save space in the .debug section (by eliminating entries which are probably useless). Note that we must not suppress block-local extern declarations (whether used or not) because that would screw-up the debugger's name lookup mechanism and cause it to miss things which really ought to be in scope at a given point. */ if (DECL_EXTERNAL (decl) && !TREE_USED (decl)) return; /* If we are in terse mode, don't generate any DIEs to represent any variable declarations or definitions. */ if (debug_info_level <= DINFO_LEVEL_TERSE) return; break; case NAMESPACE_DECL: if (debug_info_level <= DINFO_LEVEL_TERSE) return; if (lookup_decl_die (decl) != NULL) return; break; case TYPE_DECL: /* Don't emit stubs for types unless they are needed by other DIEs. */ if (TYPE_DECL_SUPPRESS_DEBUG (decl)) return; /* Don't bother trying to generate any DIEs to represent any of the normal built-in types for the language we are compiling. */ if (DECL_IS_BUILTIN (decl)) { /* OK, we need to generate one for `bool' so GDB knows what type comparisons have. */ if ((get_AT_unsigned (comp_unit_die, DW_AT_language) == DW_LANG_C_plus_plus) && TREE_CODE (TREE_TYPE (decl)) == BOOLEAN_TYPE && ! DECL_IGNORED_P (decl)) modified_type_die (TREE_TYPE (decl), 0, 0, NULL); return; } /* If we are in terse mode, don't generate any DIEs for types. */ if (debug_info_level <= DINFO_LEVEL_TERSE) return; /* If we're a function-scope tag, initially use a parent of NULL; this will be fixed up in decls_for_scope. */ if (decl_function_context (decl)) context_die = NULL; break; default: return; } gen_decl_die (decl, context_die); } /* Output a marker (i.e. a label) for the beginning of the generated code for a lexical block. */ static void dwarf2out_begin_block (unsigned int line ATTRIBUTE_UNUSED, unsigned int blocknum) { function_section (current_function_decl); ASM_OUTPUT_DEBUG_LABEL (asm_out_file, BLOCK_BEGIN_LABEL, blocknum); } /* Output a marker (i.e. a label) for the end of the generated code for a lexical block. */ static void dwarf2out_end_block (unsigned int line ATTRIBUTE_UNUSED, unsigned int blocknum) { function_section (current_function_decl); ASM_OUTPUT_DEBUG_LABEL (asm_out_file, BLOCK_END_LABEL, blocknum); } /* Returns nonzero if it is appropriate not to emit any debugging information for BLOCK, because it doesn't contain any instructions. Don't allow this for blocks with nested functions or local classes as we would end up with orphans, and in the presence of scheduling we may end up calling them anyway. */ static bool dwarf2out_ignore_block (tree block) { tree decl; for (decl = BLOCK_VARS (block); decl; decl = TREE_CHAIN (decl)) if (TREE_CODE (decl) == FUNCTION_DECL || (TREE_CODE (decl) == TYPE_DECL && TYPE_DECL_IS_STUB (decl))) return 0; return 1; } /* Lookup FILE_NAME (in the list of filenames that we know about here in dwarf2out.c) and return its "index". The index of each (known) filename is just a unique number which is associated with only that one filename. We need such numbers for the sake of generating labels (in the .debug_sfnames section) and references to those files numbers (in the .debug_srcinfo and.debug_macinfo sections). If the filename given as an argument is not found in our current list, add it to the list and assign it the next available unique index number. In order to speed up searches, we remember the index of the filename was looked up last. This handles the majority of all searches. */ static unsigned lookup_filename (const char *file_name) { size_t i, n; char *save_file_name; /* Check to see if the file name that was searched on the previous call matches this file name. If so, return the index. */ if (file_table_last_lookup_index != 0) { const char *last = VARRAY_CHAR_PTR (file_table, file_table_last_lookup_index); if (strcmp (file_name, last) == 0) return file_table_last_lookup_index; } /* Didn't match the previous lookup, search the table */ n = VARRAY_ACTIVE_SIZE (file_table); for (i = 1; i < n; i++) if (strcmp (file_name, VARRAY_CHAR_PTR (file_table, i)) == 0) { file_table_last_lookup_index = i; return i; } /* Add the new entry to the end of the filename table. */ file_table_last_lookup_index = n; save_file_name = (char *) ggc_strdup (file_name); VARRAY_PUSH_CHAR_PTR (file_table, save_file_name); VARRAY_PUSH_UINT (file_table_emitted, 0); return i; } static int maybe_emit_file (int fileno) { if (DWARF2_ASM_LINE_DEBUG_INFO && fileno > 0) { if (!VARRAY_UINT (file_table_emitted, fileno)) { VARRAY_UINT (file_table_emitted, fileno) = ++emitcount; fprintf (asm_out_file, "\t.file %u ", VARRAY_UINT (file_table_emitted, fileno)); output_quoted_string (asm_out_file, VARRAY_CHAR_PTR (file_table, fileno)); fputc ('\n', asm_out_file); } return VARRAY_UINT (file_table_emitted, fileno); } else return fileno; } static void init_file_table (void) { /* Allocate the initial hunk of the file_table. */ VARRAY_CHAR_PTR_INIT (file_table, 64, "file_table"); VARRAY_UINT_INIT (file_table_emitted, 64, "file_table_emitted"); /* Skip the first entry - file numbers begin at 1. */ VARRAY_PUSH_CHAR_PTR (file_table, NULL); VARRAY_PUSH_UINT (file_table_emitted, 0); file_table_last_lookup_index = 0; } /* Called by the final INSN scan whenever we see a var location. We use it to drop labels in the right places, and throw the location in our lookup table. */ static void dwarf2out_var_location (rtx loc_note) { char loclabel[MAX_ARTIFICIAL_LABEL_BYTES]; struct var_loc_node *newloc; rtx prev_insn; static rtx last_insn; static const char *last_label; if (!DECL_P (NOTE_VAR_LOCATION_DECL (loc_note))) return; prev_insn = PREV_INSN (loc_note); newloc = ggc_alloc_cleared (sizeof (struct var_loc_node)); /* If the insn we processed last time is the previous insn and it is also a var location note, use the label we emitted last time. */ if (last_insn != NULL_RTX && last_insn == prev_insn && GET_CODE (prev_insn) == NOTE && NOTE_LINE_NUMBER (prev_insn) == NOTE_INSN_VAR_LOCATION) { newloc->label = last_label; } else { ASM_GENERATE_INTERNAL_LABEL (loclabel, "LVL", loclabel_num); ASM_OUTPUT_DEBUG_LABEL (asm_out_file, "LVL", loclabel_num); loclabel_num++; newloc->label = ggc_strdup (loclabel); } newloc->var_loc_note = loc_note; newloc->next = NULL; last_insn = loc_note; last_label = newloc->label; add_var_loc_to_decl (NOTE_VAR_LOCATION_DECL (loc_note), newloc); } /* We need to reset the locations at the beginning of each function. We can't do this in the end_function hook, because the declarations that use the locations won't have been outputted when that hook is called. */ static void dwarf2out_begin_function (tree unused ATTRIBUTE_UNUSED) { htab_empty (decl_loc_table); } /* Output a label to mark the beginning of a source code line entry and record information relating to this source line, in 'line_info_table' for later output of the .debug_line section. */ static void dwarf2out_source_line (unsigned int line, const char *filename) { if (debug_info_level >= DINFO_LEVEL_NORMAL && line != 0) { function_section (current_function_decl); /* If requested, emit something human-readable. */ if (flag_debug_asm) fprintf (asm_out_file, "\t%s %s:%d\n", ASM_COMMENT_START, filename, line); if (DWARF2_ASM_LINE_DEBUG_INFO) { unsigned file_num = lookup_filename (filename); file_num = maybe_emit_file (file_num); /* Emit the .loc directive understood by GNU as. */ fprintf (asm_out_file, "\t.loc %d %d 0\n", file_num, line); /* Indicate that line number info exists. */ line_info_table_in_use++; /* Indicate that multiple line number tables exist. */ if (DECL_SECTION_NAME (current_function_decl)) separate_line_info_table_in_use++; } else if (DECL_SECTION_NAME (current_function_decl)) { dw_separate_line_info_ref line_info; targetm.asm_out.internal_label (asm_out_file, SEPARATE_LINE_CODE_LABEL, separate_line_info_table_in_use); /* Expand the line info table if necessary. */ if (separate_line_info_table_in_use == separate_line_info_table_allocated) { separate_line_info_table_allocated += LINE_INFO_TABLE_INCREMENT; separate_line_info_table = ggc_realloc (separate_line_info_table, separate_line_info_table_allocated * sizeof (dw_separate_line_info_entry)); memset (separate_line_info_table + separate_line_info_table_in_use, 0, (LINE_INFO_TABLE_INCREMENT * sizeof (dw_separate_line_info_entry))); } /* Add the new entry at the end of the line_info_table. */ line_info = &separate_line_info_table[separate_line_info_table_in_use++]; line_info->dw_file_num = lookup_filename (filename); line_info->dw_line_num = line; line_info->function = current_function_funcdef_no; } else { dw_line_info_ref line_info; targetm.asm_out.internal_label (asm_out_file, LINE_CODE_LABEL, line_info_table_in_use); /* Expand the line info table if necessary. */ if (line_info_table_in_use == line_info_table_allocated) { line_info_table_allocated += LINE_INFO_TABLE_INCREMENT; line_info_table = ggc_realloc (line_info_table, (line_info_table_allocated * sizeof (dw_line_info_entry))); memset (line_info_table + line_info_table_in_use, 0, LINE_INFO_TABLE_INCREMENT * sizeof (dw_line_info_entry)); } /* Add the new entry at the end of the line_info_table. */ line_info = &line_info_table[line_info_table_in_use++]; line_info->dw_file_num = lookup_filename (filename); line_info->dw_line_num = line; } } } /* Record the beginning of a new source file. */ static void dwarf2out_start_source_file (unsigned int lineno, const char *filename) { if (flag_eliminate_dwarf2_dups) { /* Record the beginning of the file for break_out_includes. */ dw_die_ref bincl_die; bincl_die = new_die (DW_TAG_GNU_BINCL, comp_unit_die, NULL); add_AT_string (bincl_die, DW_AT_name, filename); } if (debug_info_level >= DINFO_LEVEL_VERBOSE) { named_section_flags (DEBUG_MACINFO_SECTION, SECTION_DEBUG); dw2_asm_output_data (1, DW_MACINFO_start_file, "Start new file"); dw2_asm_output_data_uleb128 (lineno, "Included from line number %d", lineno); maybe_emit_file (lookup_filename (filename)); dw2_asm_output_data_uleb128 (lookup_filename (filename), "Filename we just started"); } } /* Record the end of a source file. */ static void dwarf2out_end_source_file (unsigned int lineno ATTRIBUTE_UNUSED) { if (flag_eliminate_dwarf2_dups) /* Record the end of the file for break_out_includes. */ new_die (DW_TAG_GNU_EINCL, comp_unit_die, NULL); if (debug_info_level >= DINFO_LEVEL_VERBOSE) { named_section_flags (DEBUG_MACINFO_SECTION, SECTION_DEBUG); dw2_asm_output_data (1, DW_MACINFO_end_file, "End file"); } } /* Called from debug_define in toplev.c. The `buffer' parameter contains the tail part of the directive line, i.e. the part which is past the initial whitespace, #, whitespace, directive-name, whitespace part. */ static void dwarf2out_define (unsigned int lineno ATTRIBUTE_UNUSED, const char *buffer ATTRIBUTE_UNUSED) { if (debug_info_level >= DINFO_LEVEL_VERBOSE) { named_section_flags (DEBUG_MACINFO_SECTION, SECTION_DEBUG); dw2_asm_output_data (1, DW_MACINFO_define, "Define macro"); dw2_asm_output_data_uleb128 (lineno, "At line number %d", lineno); dw2_asm_output_nstring (buffer, -1, "The macro"); } } /* Called from debug_undef in toplev.c. The `buffer' parameter contains the tail part of the directive line, i.e. the part which is past the initial whitespace, #, whitespace, directive-name, whitespace part. */ static void dwarf2out_undef (unsigned int lineno ATTRIBUTE_UNUSED, const char *buffer ATTRIBUTE_UNUSED) { if (debug_info_level >= DINFO_LEVEL_VERBOSE) { named_section_flags (DEBUG_MACINFO_SECTION, SECTION_DEBUG); dw2_asm_output_data (1, DW_MACINFO_undef, "Undefine macro"); dw2_asm_output_data_uleb128 (lineno, "At line number %d", lineno); dw2_asm_output_nstring (buffer, -1, "The macro"); } } /* Set up for Dwarf output at the start of compilation. */ static void dwarf2out_init (const char *filename ATTRIBUTE_UNUSED) { init_file_table (); /* Allocate the decl_die_table. */ decl_die_table = htab_create_ggc (10, decl_die_table_hash, decl_die_table_eq, NULL); /* Allocate the decl_loc_table. */ decl_loc_table = htab_create_ggc (10, decl_loc_table_hash, decl_loc_table_eq, NULL); /* Allocate the initial hunk of the decl_scope_table. */ VARRAY_TREE_INIT (decl_scope_table, 256, "decl_scope_table"); /* Allocate the initial hunk of the abbrev_die_table. */ abbrev_die_table = ggc_alloc_cleared (ABBREV_DIE_TABLE_INCREMENT * sizeof (dw_die_ref)); abbrev_die_table_allocated = ABBREV_DIE_TABLE_INCREMENT; /* Zero-th entry is allocated, but unused */ abbrev_die_table_in_use = 1; /* Allocate the initial hunk of the line_info_table. */ line_info_table = ggc_alloc_cleared (LINE_INFO_TABLE_INCREMENT * sizeof (dw_line_info_entry)); line_info_table_allocated = LINE_INFO_TABLE_INCREMENT; /* Zero-th entry is allocated, but unused */ line_info_table_in_use = 1; /* Generate the initial DIE for the .debug section. Note that the (string) value given in the DW_AT_name attribute of the DW_TAG_compile_unit DIE will (typically) be a relative pathname and that this pathname should be taken as being relative to the directory from which the compiler was invoked when the given (base) source file was compiled. We will fill in this value in dwarf2out_finish. */ comp_unit_die = gen_compile_unit_die (NULL); VARRAY_TREE_INIT (incomplete_types, 64, "incomplete_types"); VARRAY_RTX_INIT (used_rtx_varray, 32, "used_rtx_varray"); ASM_GENERATE_INTERNAL_LABEL (text_end_label, TEXT_END_LABEL, 0); ASM_GENERATE_INTERNAL_LABEL (abbrev_section_label, DEBUG_ABBREV_SECTION_LABEL, 0); if (DWARF2_GENERATE_TEXT_SECTION_LABEL) ASM_GENERATE_INTERNAL_LABEL (text_section_label, TEXT_SECTION_LABEL, 0); else strcpy (text_section_label, stripattributes (TEXT_SECTION_NAME)); ASM_GENERATE_INTERNAL_LABEL (debug_info_section_label, DEBUG_INFO_SECTION_LABEL, 0); ASM_GENERATE_INTERNAL_LABEL (debug_line_section_label, DEBUG_LINE_SECTION_LABEL, 0); ASM_GENERATE_INTERNAL_LABEL (ranges_section_label, DEBUG_RANGES_SECTION_LABEL, 0); named_section_flags (DEBUG_ABBREV_SECTION, SECTION_DEBUG); ASM_OUTPUT_LABEL (asm_out_file, abbrev_section_label); named_section_flags (DEBUG_INFO_SECTION, SECTION_DEBUG); ASM_OUTPUT_LABEL (asm_out_file, debug_info_section_label); named_section_flags (DEBUG_LINE_SECTION, SECTION_DEBUG); ASM_OUTPUT_LABEL (asm_out_file, debug_line_section_label); if (debug_info_level >= DINFO_LEVEL_VERBOSE) { named_section_flags (DEBUG_MACINFO_SECTION, SECTION_DEBUG); ASM_GENERATE_INTERNAL_LABEL (macinfo_section_label, DEBUG_MACINFO_SECTION_LABEL, 0); ASM_OUTPUT_LABEL (asm_out_file, macinfo_section_label); } if (DWARF2_GENERATE_TEXT_SECTION_LABEL) { text_section (); ASM_OUTPUT_LABEL (asm_out_file, text_section_label); } } /* A helper function for dwarf2out_finish called through ht_forall. Emit one queued .debug_str string. */ static int output_indirect_string (void **h, void *v ATTRIBUTE_UNUSED) { struct indirect_string_node *node = (struct indirect_string_node *) *h; if (node->form == DW_FORM_strp) { named_section_flags (DEBUG_STR_SECTION, DEBUG_STR_SECTION_FLAGS); ASM_OUTPUT_LABEL (asm_out_file, node->label); assemble_string (node->str, strlen (node->str) + 1); } return 1; } /* Clear the marks for a die and its children. Be cool if the mark isn't set. */ static void prune_unmark_dies (dw_die_ref die) { dw_die_ref c; die->die_mark = 0; for (c = die->die_child; c; c = c->die_sib) prune_unmark_dies (c); } /* Given DIE that we're marking as used, find any other dies it references as attributes and mark them as used. */ static void prune_unused_types_walk_attribs (dw_die_ref die) { dw_attr_ref a; for (a = die->die_attr; a != NULL; a = a->dw_attr_next) { if (a->dw_attr_val.val_class == dw_val_class_die_ref) { /* A reference to another DIE. Make sure that it will get emitted. */ prune_unused_types_mark (a->dw_attr_val.v.val_die_ref.die, 1); } else if (a->dw_attr == DW_AT_decl_file) { /* A reference to a file. Make sure the file name is emitted. */ a->dw_attr_val.v.val_unsigned = maybe_emit_file (a->dw_attr_val.v.val_unsigned); } } } /* Mark DIE as being used. If DOKIDS is true, then walk down to DIE's children. */ static void prune_unused_types_mark (dw_die_ref die, int dokids) { dw_die_ref c; if (die->die_mark == 0) { /* We haven't done this node yet. Mark it as used. */ die->die_mark = 1; /* We also have to mark its parents as used. (But we don't want to mark our parents' kids due to this.) */ if (die->die_parent) prune_unused_types_mark (die->die_parent, 0); /* Mark any referenced nodes. */ prune_unused_types_walk_attribs (die); /* If this node is a specification, also mark the definition, if it exists. */ if (get_AT_flag (die, DW_AT_declaration) && die->die_definition) prune_unused_types_mark (die->die_definition, 1); } if (dokids && die->die_mark != 2) { /* We need to walk the children, but haven't done so yet. Remember that we've walked the kids. */ die->die_mark = 2; /* Walk them. */ for (c = die->die_child; c; c = c->die_sib) { /* If this is an array type, we need to make sure our kids get marked, even if they're types. */ if (die->die_tag == DW_TAG_array_type) prune_unused_types_mark (c, 1); else prune_unused_types_walk (c); } } } /* Walk the tree DIE and mark types that we actually use. */ static void prune_unused_types_walk (dw_die_ref die) { dw_die_ref c; /* Don't do anything if this node is already marked. */ if (die->die_mark) return; switch (die->die_tag) { case DW_TAG_const_type: case DW_TAG_packed_type: case DW_TAG_pointer_type: case DW_TAG_reference_type: case DW_TAG_volatile_type: case DW_TAG_typedef: case DW_TAG_array_type: case DW_TAG_structure_type: case DW_TAG_union_type: case DW_TAG_class_type: case DW_TAG_friend: case DW_TAG_variant_part: case DW_TAG_enumeration_type: case DW_TAG_subroutine_type: case DW_TAG_string_type: case DW_TAG_set_type: case DW_TAG_subrange_type: case DW_TAG_ptr_to_member_type: case DW_TAG_file_type: /* It's a type node --- don't mark it. */ return; default: /* Mark everything else. */ break; } die->die_mark = 1; /* Now, mark any dies referenced from here. */ prune_unused_types_walk_attribs (die); /* Mark children. */ for (c = die->die_child; c; c = c->die_sib) prune_unused_types_walk (c); } /* Remove from the tree DIE any dies that aren't marked. */ static void prune_unused_types_prune (dw_die_ref die) { dw_die_ref c, p, n; if (!die->die_mark) abort(); p = NULL; for (c = die->die_child; c; c = n) { n = c->die_sib; if (c->die_mark) { prune_unused_types_prune (c); p = c; } else { if (p) p->die_sib = n; else die->die_child = n; free_die (c); } } } /* Remove dies representing declarations that we never use. */ static void prune_unused_types (void) { unsigned int i; limbo_die_node *node; /* Clear all the marks. */ prune_unmark_dies (comp_unit_die); for (node = limbo_die_list; node; node = node->next) prune_unmark_dies (node->die); /* Set the mark on nodes that are actually used. */ prune_unused_types_walk (comp_unit_die); for (node = limbo_die_list; node; node = node->next) prune_unused_types_walk (node->die); /* Also set the mark on nodes referenced from the pubname_table or arange_table. */ for (i = 0; i < pubname_table_in_use; i++) prune_unused_types_mark (pubname_table[i].die, 1); for (i = 0; i < arange_table_in_use; i++) prune_unused_types_mark (arange_table[i], 1); /* Get rid of nodes that aren't marked. */ prune_unused_types_prune (comp_unit_die); for (node = limbo_die_list; node; node = node->next) prune_unused_types_prune (node->die); /* Leave the marks clear. */ prune_unmark_dies (comp_unit_die); for (node = limbo_die_list; node; node = node->next) prune_unmark_dies (node->die); } /* Output stuff that dwarf requires at the end of every file, and generate the DWARF-2 debugging info. */ static void dwarf2out_finish (const char *filename) { limbo_die_node *node, *next_node; dw_die_ref die = 0; /* Add the name for the main input file now. We delayed this from dwarf2out_init to avoid complications with PCH. */ add_name_attribute (comp_unit_die, filename); if (filename[0] != DIR_SEPARATOR) add_comp_dir_attribute (comp_unit_die); else if (get_AT (comp_unit_die, DW_AT_comp_dir) == NULL) { size_t i; for (i = 1; i < VARRAY_ACTIVE_SIZE (file_table); i++) if (VARRAY_CHAR_PTR (file_table, i)[0] != DIR_SEPARATOR /* Don't add cwd for . */ && VARRAY_CHAR_PTR (file_table, i)[0] != '<') { add_comp_dir_attribute (comp_unit_die); break; } } /* Traverse the limbo die list, and add parent/child links. The only dies without parents that should be here are concrete instances of inline functions, and the comp_unit_die. We can ignore the comp_unit_die. For concrete instances, we can get the parent die from the abstract instance. */ for (node = limbo_die_list; node; node = next_node) { next_node = node->next; die = node->die; if (die->die_parent == NULL) { dw_die_ref origin = get_AT_ref (die, DW_AT_abstract_origin); tree context; if (origin) add_child_die (origin->die_parent, die); else if (die == comp_unit_die) ; else if (errorcount > 0 || sorrycount > 0) /* It's OK to be confused by errors in the input. */ add_child_die (comp_unit_die, die); else if (node->created_for && ((DECL_P (node->created_for) && (context = DECL_CONTEXT (node->created_for))) || (TYPE_P (node->created_for) && (context = TYPE_CONTEXT (node->created_for)))) && TREE_CODE (context) == FUNCTION_DECL) { /* In certain situations, the lexical block containing a nested function can be optimized away, which results in the nested function die being orphaned. Likewise with the return type of that nested function. Force this to be a child of the containing function. */ origin = lookup_decl_die (context); if (! origin) abort (); add_child_die (origin, die); } else abort (); } } limbo_die_list = NULL; /* Walk through the list of incomplete types again, trying once more to emit full debugging info for them. */ retry_incomplete_types (); /* We need to reverse all the dies before break_out_includes, or we'll see the end of an include file before the beginning. */ reverse_all_dies (comp_unit_die); if (flag_eliminate_unused_debug_types) prune_unused_types (); /* Generate separate CUs for each of the include files we've seen. They will go into limbo_die_list. */ if (flag_eliminate_dwarf2_dups) break_out_includes (comp_unit_die); /* Traverse the DIE's and add add sibling attributes to those DIE's that have children. */ add_sibling_attributes (comp_unit_die); for (node = limbo_die_list; node; node = node->next) add_sibling_attributes (node->die); /* Output a terminator label for the .text section. */ text_section (); targetm.asm_out.internal_label (asm_out_file, TEXT_END_LABEL, 0); /* Output the source line correspondence table. We must do this even if there is no line information. Otherwise, on an empty translation unit, we will generate a present, but empty, .debug_info section. IRIX 6.5 `nm' will then complain when examining the file. */ if (! DWARF2_ASM_LINE_DEBUG_INFO) { named_section_flags (DEBUG_LINE_SECTION, SECTION_DEBUG); output_line_info (); } /* Output location list section if necessary. */ if (have_location_lists) { /* Output the location lists info. */ named_section_flags (DEBUG_LOC_SECTION, SECTION_DEBUG); ASM_GENERATE_INTERNAL_LABEL (loc_section_label, DEBUG_LOC_SECTION_LABEL, 0); ASM_OUTPUT_LABEL (asm_out_file, loc_section_label); output_location_lists (die); have_location_lists = 0; } /* We can only use the low/high_pc attributes if all of the code was in .text. */ if (separate_line_info_table_in_use == 0) { add_AT_lbl_id (comp_unit_die, DW_AT_low_pc, text_section_label); add_AT_lbl_id (comp_unit_die, DW_AT_high_pc, text_end_label); } /* If it wasn't, we need to give .debug_loc and .debug_ranges an appropriate "base address". Use zero so that these addresses become absolute. */ else if (have_location_lists || ranges_table_in_use) add_AT_addr (comp_unit_die, DW_AT_entry_pc, const0_rtx); if (debug_info_level >= DINFO_LEVEL_NORMAL) add_AT_lbl_offset (comp_unit_die, DW_AT_stmt_list, debug_line_section_label); if (debug_info_level >= DINFO_LEVEL_VERBOSE) add_AT_lbl_offset (comp_unit_die, DW_AT_macro_info, macinfo_section_label); /* Output all of the compilation units. We put the main one last so that the offsets are available to output_pubnames. */ for (node = limbo_die_list; node; node = node->next) output_comp_unit (node->die, 0); output_comp_unit (comp_unit_die, 0); /* Output the abbreviation table. */ named_section_flags (DEBUG_ABBREV_SECTION, SECTION_DEBUG); output_abbrev_section (); /* Output public names table if necessary. */ if (pubname_table_in_use) { named_section_flags (DEBUG_PUBNAMES_SECTION, SECTION_DEBUG); output_pubnames (); } /* Output the address range information. We only put functions in the arange table, so don't write it out if we don't have any. */ if (fde_table_in_use) { named_section_flags (DEBUG_ARANGES_SECTION, SECTION_DEBUG); output_aranges (); } /* Output ranges section if necessary. */ if (ranges_table_in_use) { named_section_flags (DEBUG_RANGES_SECTION, SECTION_DEBUG); ASM_OUTPUT_LABEL (asm_out_file, ranges_section_label); output_ranges (); } /* Have to end the primary source file. */ if (debug_info_level >= DINFO_LEVEL_VERBOSE) { named_section_flags (DEBUG_MACINFO_SECTION, SECTION_DEBUG); dw2_asm_output_data (1, DW_MACINFO_end_file, "End file"); dw2_asm_output_data (1, 0, "End compilation unit"); } /* If we emitted any DW_FORM_strp form attribute, output the string table too. */ if (debug_str_hash) htab_traverse (debug_str_hash, output_indirect_string, NULL); } #else /* This should never be used, but its address is needed for comparisons. */ const struct gcc_debug_hooks dwarf2_debug_hooks; #endif /* DWARF2_DEBUGGING_INFO */ /* Type information for dwarf2out.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ void gt_ggc_mx_var_loc_list_def (void *x_p) { struct var_loc_list_def * const x = (struct var_loc_list_def *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_12var_loc_node ((*x).first); } } void gt_ggc_mx_var_loc_node (void *x_p) { struct var_loc_node * x = (struct var_loc_node *)x_p; struct var_loc_node * xlimit = x; while (ggc_test_and_set_mark (xlimit)) xlimit = ((*xlimit).next); while (x != xlimit) { gt_ggc_m_7rtx_def ((*x).var_loc_note); gt_ggc_m_12var_loc_node ((*x).next); x = ((*x).next); } } void gt_ggc_mx_limbo_die_struct (void *x_p) { struct limbo_die_struct * const x = (struct limbo_die_struct *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_10die_struct ((*x).die); gt_ggc_m_9tree_node ((*x).created_for); gt_ggc_m_16limbo_die_struct ((*x).next); } } void gt_ggc_mx_dw_ranges_struct (void *x_p) { struct dw_ranges_struct * const x = (struct dw_ranges_struct *)x_p; if (ggc_test_and_set_mark (x)) { } } void gt_ggc_mx_pubname_struct (void *x_p) { struct pubname_struct * const x = (struct pubname_struct *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_10die_struct ((*x).die); } } void gt_ggc_mx_dw_separate_line_info_struct (void *x_p) { struct dw_separate_line_info_struct * const x = (struct dw_separate_line_info_struct *)x_p; if (ggc_test_and_set_mark (x)) { } } void gt_ggc_mx_dw_line_info_struct (void *x_p) { struct dw_line_info_struct * const x = (struct dw_line_info_struct *)x_p; if (ggc_test_and_set_mark (x)) { } } void gt_ggc_mx_dw_attr_struct (void *x_p) { struct dw_attr_struct * const x = (struct dw_attr_struct *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_14dw_attr_struct ((*x).dw_attr_next); switch (((*x).dw_attr_val).val_class) { case dw_val_class_addr: gt_ggc_m_7rtx_def ((*x).dw_attr_val.v.val_addr); break; case dw_val_class_offset: break; case dw_val_class_loc_list: gt_ggc_m_18dw_loc_list_struct ((*x).dw_attr_val.v.val_loc_list); break; case dw_val_class_loc: gt_ggc_m_19dw_loc_descr_struct ((*x).dw_attr_val.v.val_loc); break; default: break; case dw_val_class_unsigned_const: break; case dw_val_class_long_long: break; case dw_val_class_vec: if ((*x).dw_attr_val.v.val_vec.array != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x).dw_attr_val.v.val_vec).length); i0++) { } ggc_mark ((*x).dw_attr_val.v.val_vec.array); } break; case dw_val_class_die_ref: gt_ggc_m_10die_struct ((*x).dw_attr_val.v.val_die_ref.die); break; case dw_val_class_fde_ref: break; case dw_val_class_str: gt_ggc_m_20indirect_string_node ((*x).dw_attr_val.v.val_str); break; case dw_val_class_lbl_id: break; case dw_val_class_flag: break; } } } void gt_ggc_mx_dw_loc_list_struct (void *x_p) { struct dw_loc_list_struct * const x = (struct dw_loc_list_struct *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_18dw_loc_list_struct ((*x).dw_loc_next); gt_ggc_m_19dw_loc_descr_struct ((*x).expr); } } void gt_ggc_mx_queued_reg_save (void *x_p) { struct queued_reg_save * const x = (struct queued_reg_save *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_15queued_reg_save ((*x).next); gt_ggc_m_7rtx_def ((*x).reg); gt_ggc_m_7rtx_def ((*x).saved_reg); } } void gt_ggc_mx_indirect_string_node (void *x_p) { struct indirect_string_node * const x = (struct indirect_string_node *)x_p; if (ggc_test_and_set_mark (x)) { } } void gt_ggc_mx_dw_loc_descr_struct (void *x_p) { struct dw_loc_descr_struct * const x = (struct dw_loc_descr_struct *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_19dw_loc_descr_struct ((*x).dw_loc_next); switch (((*x).dw_loc_oprnd1).val_class) { case dw_val_class_addr: gt_ggc_m_7rtx_def ((*x).dw_loc_oprnd1.v.val_addr); break; case dw_val_class_offset: break; case dw_val_class_loc_list: gt_ggc_m_18dw_loc_list_struct ((*x).dw_loc_oprnd1.v.val_loc_list); break; case dw_val_class_loc: gt_ggc_m_19dw_loc_descr_struct ((*x).dw_loc_oprnd1.v.val_loc); break; default: break; case dw_val_class_unsigned_const: break; case dw_val_class_long_long: break; case dw_val_class_vec: if ((*x).dw_loc_oprnd1.v.val_vec.array != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x).dw_loc_oprnd1.v.val_vec).length); i0++) { } ggc_mark ((*x).dw_loc_oprnd1.v.val_vec.array); } break; case dw_val_class_die_ref: gt_ggc_m_10die_struct ((*x).dw_loc_oprnd1.v.val_die_ref.die); break; case dw_val_class_fde_ref: break; case dw_val_class_str: gt_ggc_m_20indirect_string_node ((*x).dw_loc_oprnd1.v.val_str); break; case dw_val_class_lbl_id: break; case dw_val_class_flag: break; } switch (((*x).dw_loc_oprnd2).val_class) { case dw_val_class_addr: gt_ggc_m_7rtx_def ((*x).dw_loc_oprnd2.v.val_addr); break; case dw_val_class_offset: break; case dw_val_class_loc_list: gt_ggc_m_18dw_loc_list_struct ((*x).dw_loc_oprnd2.v.val_loc_list); break; case dw_val_class_loc: gt_ggc_m_19dw_loc_descr_struct ((*x).dw_loc_oprnd2.v.val_loc); break; default: break; case dw_val_class_unsigned_const: break; case dw_val_class_long_long: break; case dw_val_class_vec: if ((*x).dw_loc_oprnd2.v.val_vec.array != NULL) { size_t i1; for (i1 = 0; i1 < (size_t)(((*x).dw_loc_oprnd2.v.val_vec).length); i1++) { } ggc_mark ((*x).dw_loc_oprnd2.v.val_vec.array); } break; case dw_val_class_die_ref: gt_ggc_m_10die_struct ((*x).dw_loc_oprnd2.v.val_die_ref.die); break; case dw_val_class_fde_ref: break; case dw_val_class_str: gt_ggc_m_20indirect_string_node ((*x).dw_loc_oprnd2.v.val_str); break; case dw_val_class_lbl_id: break; case dw_val_class_flag: break; } } } void gt_ggc_mx_dw_fde_struct (void *x_p) { struct dw_fde_struct * const x = (struct dw_fde_struct *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_9tree_node ((*x).decl); gt_ggc_m_13dw_cfi_struct ((*x).dw_fde_cfi); } } void gt_ggc_mx_dw_cfi_struct (void *x_p) { struct dw_cfi_struct * const x = (struct dw_cfi_struct *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_13dw_cfi_struct ((*x).dw_cfi_next); switch (dw_cfi_oprnd1_desc (((*x)).dw_cfi_opc)) { case dw_cfi_oprnd_reg_num: break; case dw_cfi_oprnd_offset: break; case dw_cfi_oprnd_addr: break; case dw_cfi_oprnd_loc: gt_ggc_m_19dw_loc_descr_struct ((*x).dw_cfi_oprnd1.dw_cfi_loc); break; default: break; } switch (dw_cfi_oprnd2_desc (((*x)).dw_cfi_opc)) { case dw_cfi_oprnd_reg_num: break; case dw_cfi_oprnd_offset: break; case dw_cfi_oprnd_addr: break; case dw_cfi_oprnd_loc: gt_ggc_m_19dw_loc_descr_struct ((*x).dw_cfi_oprnd2.dw_cfi_loc); break; default: break; } } } void gt_ggc_mx_die_struct (void *x_p) { struct die_struct * const x = (struct die_struct *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_14dw_attr_struct ((*x).die_attr); gt_ggc_m_10die_struct ((*x).die_parent); gt_ggc_m_10die_struct ((*x).die_child); gt_ggc_m_10die_struct ((*x).die_sib); gt_ggc_m_10die_struct ((*x).die_definition); } } void gt_ggc_m_P16var_loc_list_def4htab (void *x_p) { struct htab * const x = (struct htab *)x_p; if (ggc_test_and_set_mark (x)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { gt_ggc_m_16var_loc_list_def ((*x).entries[i0]); } ggc_mark ((*x).entries); } } } void gt_ggc_m_P10die_struct4htab (void *x_p) { struct htab * const x = (struct htab *)x_p; if (ggc_test_and_set_mark (x)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { gt_ggc_m_10die_struct ((*x).entries[i0]); } ggc_mark ((*x).entries); } } } void gt_ggc_m_P20indirect_string_node4htab (void *x_p) { struct htab * const x = (struct htab *)x_p; if (ggc_test_and_set_mark (x)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { gt_ggc_m_20indirect_string_node ((*x).entries[i0]); } ggc_mark ((*x).entries); } } } void gt_pch_nx_var_loc_list_def (void *x_p) { struct var_loc_list_def * const x = (struct var_loc_list_def *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_16var_loc_list_def)) { gt_pch_n_12var_loc_node ((*x).first); } } void gt_pch_nx_var_loc_node (void *x_p) { struct var_loc_node * x = (struct var_loc_node *)x_p; struct var_loc_node * xlimit = x; while (gt_pch_note_object (xlimit, xlimit, gt_pch_p_12var_loc_node)) xlimit = ((*xlimit).next); while (x != xlimit) { gt_pch_n_7rtx_def ((*x).var_loc_note); gt_pch_n_S ((*x).label); gt_pch_n_12var_loc_node ((*x).next); x = ((*x).next); } } void gt_pch_nx_limbo_die_struct (void *x_p) { struct limbo_die_struct * const x = (struct limbo_die_struct *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_16limbo_die_struct)) { gt_pch_n_10die_struct ((*x).die); gt_pch_n_9tree_node ((*x).created_for); gt_pch_n_16limbo_die_struct ((*x).next); } } void gt_pch_nx_dw_ranges_struct (void *x_p) { struct dw_ranges_struct * const x = (struct dw_ranges_struct *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_16dw_ranges_struct)) { } } void gt_pch_nx_pubname_struct (void *x_p) { struct pubname_struct * const x = (struct pubname_struct *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_14pubname_struct)) { gt_pch_n_10die_struct ((*x).die); gt_pch_n_S ((*x).name); } } void gt_pch_nx_dw_separate_line_info_struct (void *x_p) { struct dw_separate_line_info_struct * const x = (struct dw_separate_line_info_struct *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_28dw_separate_line_info_struct)) { } } void gt_pch_nx_dw_line_info_struct (void *x_p) { struct dw_line_info_struct * const x = (struct dw_line_info_struct *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_19dw_line_info_struct)) { } } void gt_pch_nx_dw_attr_struct (void *x_p) { struct dw_attr_struct * const x = (struct dw_attr_struct *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_14dw_attr_struct)) { gt_pch_n_14dw_attr_struct ((*x).dw_attr_next); switch (((*x).dw_attr_val).val_class) { case dw_val_class_addr: gt_pch_n_7rtx_def ((*x).dw_attr_val.v.val_addr); break; case dw_val_class_offset: break; case dw_val_class_loc_list: gt_pch_n_18dw_loc_list_struct ((*x).dw_attr_val.v.val_loc_list); break; case dw_val_class_loc: gt_pch_n_19dw_loc_descr_struct ((*x).dw_attr_val.v.val_loc); break; default: break; case dw_val_class_unsigned_const: break; case dw_val_class_long_long: break; case dw_val_class_vec: if ((*x).dw_attr_val.v.val_vec.array != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x).dw_attr_val.v.val_vec).length); i0++) { } gt_pch_note_object ((*x).dw_attr_val.v.val_vec.array, x, gt_pch_p_14dw_attr_struct); } break; case dw_val_class_die_ref: gt_pch_n_10die_struct ((*x).dw_attr_val.v.val_die_ref.die); break; case dw_val_class_fde_ref: break; case dw_val_class_str: gt_pch_n_20indirect_string_node ((*x).dw_attr_val.v.val_str); break; case dw_val_class_lbl_id: gt_pch_n_S ((*x).dw_attr_val.v.val_lbl_id); break; case dw_val_class_flag: break; } } } void gt_pch_nx_dw_loc_list_struct (void *x_p) { struct dw_loc_list_struct * const x = (struct dw_loc_list_struct *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_18dw_loc_list_struct)) { gt_pch_n_18dw_loc_list_struct ((*x).dw_loc_next); gt_pch_n_S ((*x).begin); gt_pch_n_S ((*x).end); gt_pch_n_S ((*x).ll_symbol); gt_pch_n_S ((*x).section); gt_pch_n_19dw_loc_descr_struct ((*x).expr); } } void gt_pch_nx_queued_reg_save (void *x_p) { struct queued_reg_save * const x = (struct queued_reg_save *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_15queued_reg_save)) { gt_pch_n_15queued_reg_save ((*x).next); gt_pch_n_7rtx_def ((*x).reg); gt_pch_n_7rtx_def ((*x).saved_reg); } } void gt_pch_nx_indirect_string_node (void *x_p) { struct indirect_string_node * const x = (struct indirect_string_node *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_20indirect_string_node)) { gt_pch_n_S ((*x).str); gt_pch_n_S ((*x).label); } } void gt_pch_nx_dw_loc_descr_struct (void *x_p) { struct dw_loc_descr_struct * const x = (struct dw_loc_descr_struct *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_19dw_loc_descr_struct)) { gt_pch_n_19dw_loc_descr_struct ((*x).dw_loc_next); switch (((*x).dw_loc_oprnd1).val_class) { case dw_val_class_addr: gt_pch_n_7rtx_def ((*x).dw_loc_oprnd1.v.val_addr); break; case dw_val_class_offset: break; case dw_val_class_loc_list: gt_pch_n_18dw_loc_list_struct ((*x).dw_loc_oprnd1.v.val_loc_list); break; case dw_val_class_loc: gt_pch_n_19dw_loc_descr_struct ((*x).dw_loc_oprnd1.v.val_loc); break; default: break; case dw_val_class_unsigned_const: break; case dw_val_class_long_long: break; case dw_val_class_vec: if ((*x).dw_loc_oprnd1.v.val_vec.array != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x).dw_loc_oprnd1.v.val_vec).length); i0++) { } gt_pch_note_object ((*x).dw_loc_oprnd1.v.val_vec.array, x, gt_pch_p_19dw_loc_descr_struct); } break; case dw_val_class_die_ref: gt_pch_n_10die_struct ((*x).dw_loc_oprnd1.v.val_die_ref.die); break; case dw_val_class_fde_ref: break; case dw_val_class_str: gt_pch_n_20indirect_string_node ((*x).dw_loc_oprnd1.v.val_str); break; case dw_val_class_lbl_id: gt_pch_n_S ((*x).dw_loc_oprnd1.v.val_lbl_id); break; case dw_val_class_flag: break; } switch (((*x).dw_loc_oprnd2).val_class) { case dw_val_class_addr: gt_pch_n_7rtx_def ((*x).dw_loc_oprnd2.v.val_addr); break; case dw_val_class_offset: break; case dw_val_class_loc_list: gt_pch_n_18dw_loc_list_struct ((*x).dw_loc_oprnd2.v.val_loc_list); break; case dw_val_class_loc: gt_pch_n_19dw_loc_descr_struct ((*x).dw_loc_oprnd2.v.val_loc); break; default: break; case dw_val_class_unsigned_const: break; case dw_val_class_long_long: break; case dw_val_class_vec: if ((*x).dw_loc_oprnd2.v.val_vec.array != NULL) { size_t i1; for (i1 = 0; i1 < (size_t)(((*x).dw_loc_oprnd2.v.val_vec).length); i1++) { } gt_pch_note_object ((*x).dw_loc_oprnd2.v.val_vec.array, x, gt_pch_p_19dw_loc_descr_struct); } break; case dw_val_class_die_ref: gt_pch_n_10die_struct ((*x).dw_loc_oprnd2.v.val_die_ref.die); break; case dw_val_class_fde_ref: break; case dw_val_class_str: gt_pch_n_20indirect_string_node ((*x).dw_loc_oprnd2.v.val_str); break; case dw_val_class_lbl_id: gt_pch_n_S ((*x).dw_loc_oprnd2.v.val_lbl_id); break; case dw_val_class_flag: break; } } } void gt_pch_nx_dw_fde_struct (void *x_p) { struct dw_fde_struct * const x = (struct dw_fde_struct *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_13dw_fde_struct)) { gt_pch_n_9tree_node ((*x).decl); gt_pch_n_S ((*x).dw_fde_begin); gt_pch_n_S ((*x).dw_fde_current_label); gt_pch_n_S ((*x).dw_fde_end); gt_pch_n_13dw_cfi_struct ((*x).dw_fde_cfi); } } void gt_pch_nx_dw_cfi_struct (void *x_p) { struct dw_cfi_struct * const x = (struct dw_cfi_struct *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_13dw_cfi_struct)) { gt_pch_n_13dw_cfi_struct ((*x).dw_cfi_next); switch (dw_cfi_oprnd1_desc (((*x)).dw_cfi_opc)) { case dw_cfi_oprnd_reg_num: break; case dw_cfi_oprnd_offset: break; case dw_cfi_oprnd_addr: gt_pch_n_S ((*x).dw_cfi_oprnd1.dw_cfi_addr); break; case dw_cfi_oprnd_loc: gt_pch_n_19dw_loc_descr_struct ((*x).dw_cfi_oprnd1.dw_cfi_loc); break; default: break; } switch (dw_cfi_oprnd2_desc (((*x)).dw_cfi_opc)) { case dw_cfi_oprnd_reg_num: break; case dw_cfi_oprnd_offset: break; case dw_cfi_oprnd_addr: gt_pch_n_S ((*x).dw_cfi_oprnd2.dw_cfi_addr); break; case dw_cfi_oprnd_loc: gt_pch_n_19dw_loc_descr_struct ((*x).dw_cfi_oprnd2.dw_cfi_loc); break; default: break; } } } void gt_pch_nx_die_struct (void *x_p) { struct die_struct * const x = (struct die_struct *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_10die_struct)) { gt_pch_n_S ((*x).die_symbol); gt_pch_n_14dw_attr_struct ((*x).die_attr); gt_pch_n_10die_struct ((*x).die_parent); gt_pch_n_10die_struct ((*x).die_child); gt_pch_n_10die_struct ((*x).die_sib); gt_pch_n_10die_struct ((*x).die_definition); } } void gt_pch_n_P16var_loc_list_def4htab (void *x_p) { struct htab * const x = (struct htab *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_P16var_loc_list_def4htab)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { gt_pch_n_16var_loc_list_def ((*x).entries[i0]); } gt_pch_note_object ((*x).entries, x, gt_pch_p_P16var_loc_list_def4htab); } } } void gt_pch_n_P10die_struct4htab (void *x_p) { struct htab * const x = (struct htab *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_P10die_struct4htab)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { gt_pch_n_10die_struct ((*x).entries[i0]); } gt_pch_note_object ((*x).entries, x, gt_pch_p_P10die_struct4htab); } } } void gt_pch_n_P20indirect_string_node4htab (void *x_p) { struct htab * const x = (struct htab *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_P20indirect_string_node4htab)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { gt_pch_n_20indirect_string_node ((*x).entries[i0]); } gt_pch_note_object ((*x).entries, x, gt_pch_p_P20indirect_string_node4htab); } } } void gt_pch_p_16var_loc_list_def (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct var_loc_list_def * const x ATTRIBUTE_UNUSED = (struct var_loc_list_def *)x_p; if ((void *)(x) == this_obj) op (&((*x).first), cookie); } void gt_pch_p_12var_loc_node (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct var_loc_node * const x ATTRIBUTE_UNUSED = (struct var_loc_node *)x_p; if ((void *)(x) == this_obj) op (&((*x).var_loc_note), cookie); if ((void *)(x) == this_obj) op (&((*x).label), cookie); if ((void *)(x) == this_obj) op (&((*x).next), cookie); } void gt_pch_p_16limbo_die_struct (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct limbo_die_struct * const x ATTRIBUTE_UNUSED = (struct limbo_die_struct *)x_p; if ((void *)(x) == this_obj) op (&((*x).die), cookie); if ((void *)(x) == this_obj) op (&((*x).created_for), cookie); if ((void *)(x) == this_obj) op (&((*x).next), cookie); } void gt_pch_p_16dw_ranges_struct (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct dw_ranges_struct * const x ATTRIBUTE_UNUSED = (struct dw_ranges_struct *)x_p; } void gt_pch_p_14pubname_struct (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct pubname_struct * const x ATTRIBUTE_UNUSED = (struct pubname_struct *)x_p; if ((void *)(x) == this_obj) op (&((*x).die), cookie); if ((void *)(x) == this_obj) op (&((*x).name), cookie); } void gt_pch_p_28dw_separate_line_info_struct (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct dw_separate_line_info_struct * const x ATTRIBUTE_UNUSED = (struct dw_separate_line_info_struct *)x_p; } void gt_pch_p_19dw_line_info_struct (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct dw_line_info_struct * const x ATTRIBUTE_UNUSED = (struct dw_line_info_struct *)x_p; } void gt_pch_p_14dw_attr_struct (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct dw_attr_struct * const x ATTRIBUTE_UNUSED = (struct dw_attr_struct *)x_p; if ((void *)(x) == this_obj) op (&((*x).dw_attr_next), cookie); switch (((*x).dw_attr_val).val_class) { case dw_val_class_addr: if ((void *)(x) == this_obj) op (&((*x).dw_attr_val.v.val_addr), cookie); break; case dw_val_class_offset: break; case dw_val_class_loc_list: if ((void *)(x) == this_obj) op (&((*x).dw_attr_val.v.val_loc_list), cookie); break; case dw_val_class_loc: if ((void *)(x) == this_obj) op (&((*x).dw_attr_val.v.val_loc), cookie); break; default: break; case dw_val_class_unsigned_const: break; case dw_val_class_long_long: break; case dw_val_class_vec: if ((*x).dw_attr_val.v.val_vec.array != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x).dw_attr_val.v.val_vec).length); i0++) { } if ((void *)(x) == this_obj) op (&((*x).dw_attr_val.v.val_vec.array), cookie); } break; case dw_val_class_die_ref: if ((void *)(x) == this_obj) op (&((*x).dw_attr_val.v.val_die_ref.die), cookie); break; case dw_val_class_fde_ref: break; case dw_val_class_str: if ((void *)(x) == this_obj) op (&((*x).dw_attr_val.v.val_str), cookie); break; case dw_val_class_lbl_id: if ((void *)(x) == this_obj) op (&((*x).dw_attr_val.v.val_lbl_id), cookie); break; case dw_val_class_flag: break; } } void gt_pch_p_18dw_loc_list_struct (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct dw_loc_list_struct * const x ATTRIBUTE_UNUSED = (struct dw_loc_list_struct *)x_p; if ((void *)(x) == this_obj) op (&((*x).dw_loc_next), cookie); if ((void *)(x) == this_obj) op (&((*x).begin), cookie); if ((void *)(x) == this_obj) op (&((*x).end), cookie); if ((void *)(x) == this_obj) op (&((*x).ll_symbol), cookie); if ((void *)(x) == this_obj) op (&((*x).section), cookie); if ((void *)(x) == this_obj) op (&((*x).expr), cookie); } void gt_pch_p_15queued_reg_save (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct queued_reg_save * const x ATTRIBUTE_UNUSED = (struct queued_reg_save *)x_p; if ((void *)(x) == this_obj) op (&((*x).next), cookie); if ((void *)(x) == this_obj) op (&((*x).reg), cookie); if ((void *)(x) == this_obj) op (&((*x).saved_reg), cookie); } void gt_pch_p_20indirect_string_node (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct indirect_string_node * const x ATTRIBUTE_UNUSED = (struct indirect_string_node *)x_p; if ((void *)(x) == this_obj) op (&((*x).str), cookie); if ((void *)(x) == this_obj) op (&((*x).label), cookie); } void gt_pch_p_19dw_loc_descr_struct (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct dw_loc_descr_struct * const x ATTRIBUTE_UNUSED = (struct dw_loc_descr_struct *)x_p; if ((void *)(x) == this_obj) op (&((*x).dw_loc_next), cookie); switch (((*x).dw_loc_oprnd1).val_class) { case dw_val_class_addr: if ((void *)(x) == this_obj) op (&((*x).dw_loc_oprnd1.v.val_addr), cookie); break; case dw_val_class_offset: break; case dw_val_class_loc_list: if ((void *)(x) == this_obj) op (&((*x).dw_loc_oprnd1.v.val_loc_list), cookie); break; case dw_val_class_loc: if ((void *)(x) == this_obj) op (&((*x).dw_loc_oprnd1.v.val_loc), cookie); break; default: break; case dw_val_class_unsigned_const: break; case dw_val_class_long_long: break; case dw_val_class_vec: if ((*x).dw_loc_oprnd1.v.val_vec.array != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x).dw_loc_oprnd1.v.val_vec).length); i0++) { } if ((void *)(x) == this_obj) op (&((*x).dw_loc_oprnd1.v.val_vec.array), cookie); } break; case dw_val_class_die_ref: if ((void *)(x) == this_obj) op (&((*x).dw_loc_oprnd1.v.val_die_ref.die), cookie); break; case dw_val_class_fde_ref: break; case dw_val_class_str: if ((void *)(x) == this_obj) op (&((*x).dw_loc_oprnd1.v.val_str), cookie); break; case dw_val_class_lbl_id: if ((void *)(x) == this_obj) op (&((*x).dw_loc_oprnd1.v.val_lbl_id), cookie); break; case dw_val_class_flag: break; } switch (((*x).dw_loc_oprnd2).val_class) { case dw_val_class_addr: if ((void *)(x) == this_obj) op (&((*x).dw_loc_oprnd2.v.val_addr), cookie); break; case dw_val_class_offset: break; case dw_val_class_loc_list: if ((void *)(x) == this_obj) op (&((*x).dw_loc_oprnd2.v.val_loc_list), cookie); break; case dw_val_class_loc: if ((void *)(x) == this_obj) op (&((*x).dw_loc_oprnd2.v.val_loc), cookie); break; default: break; case dw_val_class_unsigned_const: break; case dw_val_class_long_long: break; case dw_val_class_vec: if ((*x).dw_loc_oprnd2.v.val_vec.array != NULL) { size_t i1; for (i1 = 0; i1 < (size_t)(((*x).dw_loc_oprnd2.v.val_vec).length); i1++) { } if ((void *)(x) == this_obj) op (&((*x).dw_loc_oprnd2.v.val_vec.array), cookie); } break; case dw_val_class_die_ref: if ((void *)(x) == this_obj) op (&((*x).dw_loc_oprnd2.v.val_die_ref.die), cookie); break; case dw_val_class_fde_ref: break; case dw_val_class_str: if ((void *)(x) == this_obj) op (&((*x).dw_loc_oprnd2.v.val_str), cookie); break; case dw_val_class_lbl_id: if ((void *)(x) == this_obj) op (&((*x).dw_loc_oprnd2.v.val_lbl_id), cookie); break; case dw_val_class_flag: break; } } void gt_pch_p_13dw_fde_struct (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct dw_fde_struct * const x ATTRIBUTE_UNUSED = (struct dw_fde_struct *)x_p; if ((void *)(x) == this_obj) op (&((*x).decl), cookie); if ((void *)(x) == this_obj) op (&((*x).dw_fde_begin), cookie); if ((void *)(x) == this_obj) op (&((*x).dw_fde_current_label), cookie); if ((void *)(x) == this_obj) op (&((*x).dw_fde_end), cookie); if ((void *)(x) == this_obj) op (&((*x).dw_fde_cfi), cookie); } void gt_pch_p_13dw_cfi_struct (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct dw_cfi_struct * const x ATTRIBUTE_UNUSED = (struct dw_cfi_struct *)x_p; if ((void *)(x) == this_obj) op (&((*x).dw_cfi_next), cookie); switch (dw_cfi_oprnd1_desc (((*x)).dw_cfi_opc)) { case dw_cfi_oprnd_reg_num: break; case dw_cfi_oprnd_offset: break; case dw_cfi_oprnd_addr: if ((void *)(x) == this_obj) op (&((*x).dw_cfi_oprnd1.dw_cfi_addr), cookie); break; case dw_cfi_oprnd_loc: if ((void *)(x) == this_obj) op (&((*x).dw_cfi_oprnd1.dw_cfi_loc), cookie); break; default: break; } switch (dw_cfi_oprnd2_desc (((*x)).dw_cfi_opc)) { case dw_cfi_oprnd_reg_num: break; case dw_cfi_oprnd_offset: break; case dw_cfi_oprnd_addr: if ((void *)(x) == this_obj) op (&((*x).dw_cfi_oprnd2.dw_cfi_addr), cookie); break; case dw_cfi_oprnd_loc: if ((void *)(x) == this_obj) op (&((*x).dw_cfi_oprnd2.dw_cfi_loc), cookie); break; default: break; } } void gt_pch_p_10die_struct (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct die_struct * const x ATTRIBUTE_UNUSED = (struct die_struct *)x_p; if ((void *)(x) == this_obj) op (&((*x).die_symbol), cookie); if ((void *)(x) == this_obj) op (&((*x).die_attr), cookie); if ((void *)(x) == this_obj) op (&((*x).die_parent), cookie); if ((void *)(x) == this_obj) op (&((*x).die_child), cookie); if ((void *)(x) == this_obj) op (&((*x).die_sib), cookie); if ((void *)(x) == this_obj) op (&((*x).die_definition), cookie); } void gt_pch_p_P16var_loc_list_def4htab (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct htab * const x ATTRIBUTE_UNUSED = (struct htab *)x_p; if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { if ((void *)((*x).entries) == this_obj) op (&((*x).entries[i0]), cookie); } if ((void *)(x) == this_obj) op (&((*x).entries), cookie); } } void gt_pch_p_P10die_struct4htab (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct htab * const x ATTRIBUTE_UNUSED = (struct htab *)x_p; if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { if ((void *)((*x).entries) == this_obj) op (&((*x).entries[i0]), cookie); } if ((void *)(x) == this_obj) op (&((*x).entries), cookie); } } void gt_pch_p_P20indirect_string_node4htab (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct htab * const x ATTRIBUTE_UNUSED = (struct htab *)x_p; if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { if ((void *)((*x).entries) == this_obj) op (&((*x).entries[i0]), cookie); } if ((void *)(x) == this_obj) op (&((*x).entries), cookie); } } /* GC roots. */ static void gt_ggc_ma_ranges_table (void *); static void gt_ggc_ma_ranges_table (void *x_p ATTRIBUTE_UNUSED) { if (ranges_table != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(ranges_table_allocated); i0++) { } ggc_mark (ranges_table); } } static void gt_pch_pa_ranges_table (void *, void *, gt_pointer_operator, void *); static void gt_pch_pa_ranges_table (void *this_obj ATTRIBUTE_UNUSED, void *x_p ATTRIBUTE_UNUSED, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { if (ranges_table != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(ranges_table_allocated); i0++) { } if ((void *)(&ranges_table) == this_obj) op (&(ranges_table), cookie); } } static void gt_pch_na_ranges_table (void *); static void gt_pch_na_ranges_table (void *x_p ATTRIBUTE_UNUSED) { if (ranges_table != NULL) { size_t i1; for (i1 = 0; i1 < (size_t)(ranges_table_allocated); i1++) { } gt_pch_note_object (ranges_table, &ranges_table, gt_pch_pa_ranges_table); } } static void gt_ggc_ma_arange_table (void *); static void gt_ggc_ma_arange_table (void *x_p ATTRIBUTE_UNUSED) { if (arange_table != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(arange_table_allocated); i0++) { gt_ggc_m_10die_struct (arange_table[i0]); } ggc_mark (arange_table); } } static void gt_pch_pa_arange_table (void *, void *, gt_pointer_operator, void *); static void gt_pch_pa_arange_table (void *this_obj ATTRIBUTE_UNUSED, void *x_p ATTRIBUTE_UNUSED, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { if (arange_table != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(arange_table_allocated); i0++) { if ((void *)(arange_table) == this_obj) op (&(arange_table[i0]), cookie); } if ((void *)(&arange_table) == this_obj) op (&(arange_table), cookie); } } static void gt_pch_na_arange_table (void *); static void gt_pch_na_arange_table (void *x_p ATTRIBUTE_UNUSED) { if (arange_table != NULL) { size_t i1; for (i1 = 0; i1 < (size_t)(arange_table_allocated); i1++) { gt_pch_n_10die_struct (arange_table[i1]); } gt_pch_note_object (arange_table, &arange_table, gt_pch_pa_arange_table); } } static void gt_ggc_ma_pubname_table (void *); static void gt_ggc_ma_pubname_table (void *x_p ATTRIBUTE_UNUSED) { if (pubname_table != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(pubname_table_allocated); i0++) { gt_ggc_m_10die_struct (pubname_table[i0].die); } ggc_mark (pubname_table); } } static void gt_pch_pa_pubname_table (void *, void *, gt_pointer_operator, void *); static void gt_pch_pa_pubname_table (void *this_obj ATTRIBUTE_UNUSED, void *x_p ATTRIBUTE_UNUSED, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { if (pubname_table != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(pubname_table_allocated); i0++) { if ((void *)(pubname_table) == this_obj) op (&(pubname_table[i0].die), cookie); if ((void *)(pubname_table) == this_obj) op (&(pubname_table[i0].name), cookie); } if ((void *)(&pubname_table) == this_obj) op (&(pubname_table), cookie); } } static void gt_pch_na_pubname_table (void *); static void gt_pch_na_pubname_table (void *x_p ATTRIBUTE_UNUSED) { if (pubname_table != NULL) { size_t i1; for (i1 = 0; i1 < (size_t)(pubname_table_allocated); i1++) { gt_pch_n_10die_struct (pubname_table[i1].die); gt_pch_n_S (pubname_table[i1].name); } gt_pch_note_object (pubname_table, &pubname_table, gt_pch_pa_pubname_table); } } static void gt_ggc_ma_separate_line_info_table (void *); static void gt_ggc_ma_separate_line_info_table (void *x_p ATTRIBUTE_UNUSED) { if (separate_line_info_table != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(separate_line_info_table_allocated); i0++) { } ggc_mark (separate_line_info_table); } } static void gt_pch_pa_separate_line_info_table (void *, void *, gt_pointer_operator, void *); static void gt_pch_pa_separate_line_info_table (void *this_obj ATTRIBUTE_UNUSED, void *x_p ATTRIBUTE_UNUSED, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { if (separate_line_info_table != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(separate_line_info_table_allocated); i0++) { } if ((void *)(&separate_line_info_table) == this_obj) op (&(separate_line_info_table), cookie); } } static void gt_pch_na_separate_line_info_table (void *); static void gt_pch_na_separate_line_info_table (void *x_p ATTRIBUTE_UNUSED) { if (separate_line_info_table != NULL) { size_t i1; for (i1 = 0; i1 < (size_t)(separate_line_info_table_allocated); i1++) { } gt_pch_note_object (separate_line_info_table, &separate_line_info_table, gt_pch_pa_separate_line_info_table); } } static void gt_ggc_ma_line_info_table (void *); static void gt_ggc_ma_line_info_table (void *x_p ATTRIBUTE_UNUSED) { if (line_info_table != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(line_info_table_allocated); i0++) { } ggc_mark (line_info_table); } } static void gt_pch_pa_line_info_table (void *, void *, gt_pointer_operator, void *); static void gt_pch_pa_line_info_table (void *this_obj ATTRIBUTE_UNUSED, void *x_p ATTRIBUTE_UNUSED, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { if (line_info_table != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(line_info_table_allocated); i0++) { } if ((void *)(&line_info_table) == this_obj) op (&(line_info_table), cookie); } } static void gt_pch_na_line_info_table (void *); static void gt_pch_na_line_info_table (void *x_p ATTRIBUTE_UNUSED) { if (line_info_table != NULL) { size_t i1; for (i1 = 0; i1 < (size_t)(line_info_table_allocated); i1++) { } gt_pch_note_object (line_info_table, &line_info_table, gt_pch_pa_line_info_table); } } static void gt_ggc_ma_abbrev_die_table (void *); static void gt_ggc_ma_abbrev_die_table (void *x_p ATTRIBUTE_UNUSED) { if (abbrev_die_table != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(abbrev_die_table_allocated); i0++) { gt_ggc_m_10die_struct (abbrev_die_table[i0]); } ggc_mark (abbrev_die_table); } } static void gt_pch_pa_abbrev_die_table (void *, void *, gt_pointer_operator, void *); static void gt_pch_pa_abbrev_die_table (void *this_obj ATTRIBUTE_UNUSED, void *x_p ATTRIBUTE_UNUSED, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { if (abbrev_die_table != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(abbrev_die_table_allocated); i0++) { if ((void *)(abbrev_die_table) == this_obj) op (&(abbrev_die_table[i0]), cookie); } if ((void *)(&abbrev_die_table) == this_obj) op (&(abbrev_die_table), cookie); } } static void gt_pch_na_abbrev_die_table (void *); static void gt_pch_na_abbrev_die_table (void *x_p ATTRIBUTE_UNUSED) { if (abbrev_die_table != NULL) { size_t i1; for (i1 = 0; i1 < (size_t)(abbrev_die_table_allocated); i1++) { gt_pch_n_10die_struct (abbrev_die_table[i1]); } gt_pch_note_object (abbrev_die_table, &abbrev_die_table, gt_pch_pa_abbrev_die_table); } } static void gt_ggc_ma_fde_table (void *); static void gt_ggc_ma_fde_table (void *x_p ATTRIBUTE_UNUSED) { if (fde_table != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(fde_table_allocated); i0++) { gt_ggc_m_9tree_node (fde_table[i0].decl); gt_ggc_m_13dw_cfi_struct (fde_table[i0].dw_fde_cfi); } ggc_mark (fde_table); } } static void gt_pch_pa_fde_table (void *, void *, gt_pointer_operator, void *); static void gt_pch_pa_fde_table (void *this_obj ATTRIBUTE_UNUSED, void *x_p ATTRIBUTE_UNUSED, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { if (fde_table != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(fde_table_allocated); i0++) { if ((void *)(fde_table) == this_obj) op (&(fde_table[i0].decl), cookie); if ((void *)(fde_table) == this_obj) op (&(fde_table[i0].dw_fde_begin), cookie); if ((void *)(fde_table) == this_obj) op (&(fde_table[i0].dw_fde_current_label), cookie); if ((void *)(fde_table) == this_obj) op (&(fde_table[i0].dw_fde_end), cookie); if ((void *)(fde_table) == this_obj) op (&(fde_table[i0].dw_fde_cfi), cookie); } if ((void *)(&fde_table) == this_obj) op (&(fde_table), cookie); } } static void gt_pch_na_fde_table (void *); static void gt_pch_na_fde_table (void *x_p ATTRIBUTE_UNUSED) { if (fde_table != NULL) { size_t i1; for (i1 = 0; i1 < (size_t)(fde_table_allocated); i1++) { gt_pch_n_9tree_node (fde_table[i1].decl); gt_pch_n_S (fde_table[i1].dw_fde_begin); gt_pch_n_S (fde_table[i1].dw_fde_current_label); gt_pch_n_S (fde_table[i1].dw_fde_end); gt_pch_n_13dw_cfi_struct (fde_table[i1].dw_fde_cfi); } gt_pch_note_object (fde_table, &fde_table, gt_pch_pa_fde_table); } } const struct ggc_root_tab gt_ggc_r_gt_dwarf2out_h[] = { { &ranges_table, 1, sizeof (ranges_table), >_ggc_ma_ranges_table, >_pch_na_ranges_table }, { &arange_table, 1, sizeof (arange_table), >_ggc_ma_arange_table, >_pch_na_arange_table }, { &pubname_table, 1, sizeof (pubname_table), >_ggc_ma_pubname_table, >_pch_na_pubname_table }, { &separate_line_info_table, 1, sizeof (separate_line_info_table), >_ggc_ma_separate_line_info_table, >_pch_na_separate_line_info_table }, { &line_info_table, 1, sizeof (line_info_table), >_ggc_ma_line_info_table, >_pch_na_line_info_table }, { &abbrev_die_table, 1, sizeof (abbrev_die_table), >_ggc_ma_abbrev_die_table, >_pch_na_abbrev_die_table }, { &decl_loc_table, 1, sizeof (decl_loc_table), >_ggc_m_P16var_loc_list_def4htab, >_pch_n_P16var_loc_list_def4htab }, { &decl_die_table, 1, sizeof (decl_die_table), >_ggc_m_P10die_struct4htab, >_pch_n_P10die_struct4htab }, { &file_table_emitted, 1, sizeof (file_table_emitted), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, { &file_table, 1, sizeof (file_table), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, { &limbo_die_list, 1, sizeof (limbo_die_list), >_ggc_mx_limbo_die_struct, >_pch_nx_limbo_die_struct }, { &comp_unit_die, 1, sizeof (comp_unit_die), >_ggc_mx_die_struct, >_pch_nx_die_struct }, { ®s_saved_in_regs[0].orig_reg, 1 * (4), sizeof (regs_saved_in_regs[0]), >_ggc_mx_rtx_def, >_pch_nx_rtx_def }, { ®s_saved_in_regs[0].saved_in_reg, 1 * (4), sizeof (regs_saved_in_regs[0]), >_ggc_mx_rtx_def, >_pch_nx_rtx_def }, { &queued_reg_saves, 1, sizeof (queued_reg_saves), >_ggc_mx_queued_reg_save, >_pch_nx_queued_reg_save }, { &debug_str_hash, 1, sizeof (debug_str_hash), >_ggc_m_P20indirect_string_node4htab, >_pch_n_P20indirect_string_node4htab }, { &cie_cfi_head, 1, sizeof (cie_cfi_head), >_ggc_mx_dw_cfi_struct, >_pch_nx_dw_cfi_struct }, { &fde_table, 1, sizeof (fde_table), >_ggc_ma_fde_table, >_pch_na_fde_table }, { &decl_scope_table, 1, sizeof (decl_scope_table), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, { &incomplete_types, 1, sizeof (incomplete_types), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, { &used_rtx_varray, 1, sizeof (used_rtx_varray), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, LAST_GGC_ROOT_TAB }; const struct ggc_root_tab gt_pch_rs_gt_dwarf2out_h[] = { { &label_num, 1, sizeof (label_num), NULL, NULL }, { &emitcount, 1, sizeof (emitcount), NULL, NULL }, { &loclabel_num, 1, sizeof (loclabel_num), NULL, NULL }, { &have_location_lists, 1, sizeof (have_location_lists), NULL, NULL }, { &ranges_table_in_use, 1, sizeof (ranges_table_in_use), NULL, NULL }, { &ranges_table_allocated, 1, sizeof (ranges_table_allocated), NULL, NULL }, { &arange_table_in_use, 1, sizeof (arange_table_in_use), NULL, NULL }, { &arange_table_allocated, 1, sizeof (arange_table_allocated), NULL, NULL }, { &pubname_table_in_use, 1, sizeof (pubname_table_in_use), NULL, NULL }, { &pubname_table_allocated, 1, sizeof (pubname_table_allocated), NULL, NULL }, { &separate_line_info_table_in_use, 1, sizeof (separate_line_info_table_in_use), NULL, NULL }, { &separate_line_info_table_allocated, 1, sizeof (separate_line_info_table_allocated), NULL, NULL }, { &line_info_table_in_use, 1, sizeof (line_info_table_in_use), NULL, NULL }, { &line_info_table_allocated, 1, sizeof (line_info_table_allocated), NULL, NULL }, { &abbrev_die_table_in_use, 1, sizeof (abbrev_die_table_in_use), NULL, NULL }, { &abbrev_die_table_allocated, 1, sizeof (abbrev_die_table_allocated), NULL, NULL }, { &file_table_last_lookup_index, 1, sizeof (file_table_last_lookup_index), NULL, NULL }, { &num_regs_saved_in_regs, 1, sizeof (num_regs_saved_in_regs), NULL, NULL }, { ®s_saved_in_regs, 1, sizeof (regs_saved_in_regs), NULL, NULL }, { &dwarf2out_cfi_label_num, 1, sizeof (dwarf2out_cfi_label_num), NULL, NULL }, { &dw2_string_counter, 1, sizeof (dw2_string_counter), NULL, NULL }, { &fde_table_in_use, 1, sizeof (fde_table_in_use), NULL, NULL }, { &fde_table_allocated, 1, sizeof (fde_table_allocated), NULL, NULL }, LAST_GGC_ROOT_TAB }; /* Emit RTL for the GCC expander. Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Middle-to-low level generation of rtx code and insns. This file contains support functions for creating rtl expressions and manipulating them in the doubly-linked chain of insns. The patterns of the insns are created by machine-dependent routines in insn-emit.c, which is generated automatically from the machine description. These routines make the individual rtx's of the pattern with `gen_rtx_fmt_ee' and others in genrtl.[ch], which are automatically generated from rtl.def; what is machine dependent is the kind of rtx's they make and what arguments they use. */ /* Commonly used modes. */ enum machine_mode byte_mode; /* Mode whose width is BITS_PER_UNIT. */ enum machine_mode word_mode; /* Mode whose width is BITS_PER_WORD. */ enum machine_mode double_mode; /* Mode whose width is DOUBLE_TYPE_SIZE. */ enum machine_mode ptr_mode; /* Mode whose width is POINTER_SIZE. */ /* This is *not* reset after each function. It gives each CODE_LABEL in the entire compilation a unique label number. */ static GTY(()) int label_num = 1; /* Highest label number in current function. Zero means use the value of label_num instead. This is nonzero only when belatedly compiling an inline function. */ static int last_label_num; /* Value label_num had when set_new_last_label_num was called. If label_num has not changed since then, last_label_num is valid. */ static int base_label_num; /* Nonzero means do not generate NOTEs for source line numbers. */ static int no_line_numbers; /* Commonly used rtx's, so that we only need space for one copy. These are initialized once for the entire compilation. All of these are unique; no other rtx-object will be equal to any of these. */ rtx global_rtl[GR_MAX]; /* Commonly used RTL for hard registers. These objects are not necessarily unique, so we allocate them separately from global_rtl. They are initialized once per compilation unit, then copied into regno_reg_rtx at the beginning of each function. */ static GTY(()) rtx static_regno_reg_rtx[FIRST_PSEUDO_REGISTER]; /* We record floating-point CONST_DOUBLEs in each floating-point mode for the values of 0, 1, and 2. For the integer entries and VOIDmode, we record a copy of const[012]_rtx. */ rtx const_tiny_rtx[3][(int) MAX_MACHINE_MODE]; rtx const_true_rtx; REAL_VALUE_TYPE dconst0; REAL_VALUE_TYPE dconst1; REAL_VALUE_TYPE dconst2; REAL_VALUE_TYPE dconst3; REAL_VALUE_TYPE dconst10; REAL_VALUE_TYPE dconstm1; REAL_VALUE_TYPE dconstm2; REAL_VALUE_TYPE dconsthalf; REAL_VALUE_TYPE dconstthird; REAL_VALUE_TYPE dconstpi; REAL_VALUE_TYPE dconste; /* All references to the following fixed hard registers go through these unique rtl objects. On machines where the frame-pointer and arg-pointer are the same register, they use the same unique object. After register allocation, other rtl objects which used to be pseudo-regs may be clobbered to refer to the frame-pointer register. But references that were originally to the frame-pointer can be distinguished from the others because they contain frame_pointer_rtx. When to use frame_pointer_rtx and hard_frame_pointer_rtx is a little tricky: until register elimination has taken place hard_frame_pointer_rtx should be used if it is being set, and frame_pointer_rtx otherwise. After register elimination hard_frame_pointer_rtx should always be used. On machines where the two registers are same (most) then these are the same. In an inline procedure, the stack and frame pointer rtxs may not be used for anything else. */ rtx static_chain_rtx; /* (REG:Pmode STATIC_CHAIN_REGNUM) */ rtx static_chain_incoming_rtx; /* (REG:Pmode STATIC_CHAIN_INCOMING_REGNUM) */ rtx pic_offset_table_rtx; /* (REG:Pmode PIC_OFFSET_TABLE_REGNUM) */ /* This is used to implement __builtin_return_address for some machines. See for instance the MIPS port. */ rtx return_address_pointer_rtx; /* (REG:Pmode RETURN_ADDRESS_POINTER_REGNUM) */ /* We make one copy of (const_int C) where C is in [- MAX_SAVED_CONST_INT, MAX_SAVED_CONST_INT] to save space during the compilation and simplify comparisons of integers. */ rtx const_int_rtx[MAX_SAVED_CONST_INT * 2 + 1]; /* A hash table storing CONST_INTs whose absolute value is greater than MAX_SAVED_CONST_INT. */ static GTY ((if_marked ("ggc_marked_p"), param_is (struct rtx_def))) htab_t const_int_htab; /* A hash table storing memory attribute structures. */ static GTY ((if_marked ("ggc_marked_p"), param_is (struct mem_attrs))) htab_t mem_attrs_htab; /* A hash table storing register attribute structures. */ static GTY ((if_marked ("ggc_marked_p"), param_is (struct reg_attrs))) htab_t reg_attrs_htab; /* A hash table storing all CONST_DOUBLEs. */ static GTY ((if_marked ("ggc_marked_p"), param_is (struct rtx_def))) htab_t const_double_htab; #define first_insn (cfun->emit->x_first_insn) #define last_insn (cfun->emit->x_last_insn) #define cur_insn_uid (cfun->emit->x_cur_insn_uid) #define last_location (cfun->emit->x_last_location) #define first_label_num (cfun->emit->x_first_label_num) static rtx make_jump_insn_raw (rtx); static rtx make_call_insn_raw (rtx); static rtx find_line_note_emit (rtx); static rtx change_address_1 (rtx, enum machine_mode, rtx, int); static void unshare_all_decls (tree); static void reset_used_decls (tree); static void mark_label_nuses (rtx); static hashval_t const_int_htab_hash (const void *); static int const_int_htab_eq (const void *, const void *); static hashval_t const_double_htab_hash (const void *); static int const_double_htab_eq (const void *, const void *); static rtx lookup_const_double (rtx); static hashval_t mem_attrs_htab_hash (const void *); static int mem_attrs_htab_eq (const void *, const void *); static mem_attrs *get_mem_attrs (HOST_WIDE_INT, tree, rtx, rtx, unsigned int, enum machine_mode); static hashval_t reg_attrs_htab_hash (const void *); static int reg_attrs_htab_eq (const void *, const void *); static reg_attrs *get_reg_attrs (tree, int); static tree component_ref_for_mem_expr (tree); static rtx gen_const_vector_0 (enum machine_mode); static rtx gen_complex_constant_part (enum machine_mode, rtx, int); static void copy_rtx_if_shared_1 (rtx *orig); /* Probability of the conditional branch currently proceeded by try_split. Set to -1 otherwise. */ int split_branch_probability = -1; /* Returns a hash code for X (which is a really a CONST_INT). */ static hashval_t const_int_htab_hash (const void *x) { return (hashval_t) INTVAL ((rtx) x); } /* Returns nonzero if the value represented by X (which is really a CONST_INT) is the same as that given by Y (which is really a HOST_WIDE_INT *). */ static int const_int_htab_eq (const void *x, const void *y) { return (INTVAL ((rtx) x) == *((const HOST_WIDE_INT *) y)); } /* Returns a hash code for X (which is really a CONST_DOUBLE). */ static hashval_t const_double_htab_hash (const void *x) { rtx value = (rtx) x; hashval_t h; if (GET_MODE (value) == VOIDmode) h = CONST_DOUBLE_LOW (value) ^ CONST_DOUBLE_HIGH (value); else { h = real_hash (CONST_DOUBLE_REAL_VALUE (value)); /* MODE is used in the comparison, so it should be in the hash. */ h ^= GET_MODE (value); } return h; } /* Returns nonzero if the value represented by X (really a ...) is the same as that represented by Y (really a ...) */ static int const_double_htab_eq (const void *x, const void *y) { rtx a = (rtx)x, b = (rtx)y; if (GET_MODE (a) != GET_MODE (b)) return 0; if (GET_MODE (a) == VOIDmode) return (CONST_DOUBLE_LOW (a) == CONST_DOUBLE_LOW (b) && CONST_DOUBLE_HIGH (a) == CONST_DOUBLE_HIGH (b)); else return real_identical (CONST_DOUBLE_REAL_VALUE (a), CONST_DOUBLE_REAL_VALUE (b)); } /* Returns a hash code for X (which is a really a mem_attrs *). */ static hashval_t mem_attrs_htab_hash (const void *x) { mem_attrs *p = (mem_attrs *) x; return (p->alias ^ (p->align * 1000) ^ ((p->offset ? INTVAL (p->offset) : 0) * 50000) ^ ((p->size ? INTVAL (p->size) : 0) * 2500000) ^ (size_t) p->expr); } /* Returns nonzero if the value represented by X (which is really a mem_attrs *) is the same as that given by Y (which is also really a mem_attrs *). */ static int mem_attrs_htab_eq (const void *x, const void *y) { mem_attrs *p = (mem_attrs *) x; mem_attrs *q = (mem_attrs *) y; return (p->alias == q->alias && p->expr == q->expr && p->offset == q->offset && p->size == q->size && p->align == q->align); } /* Allocate a new mem_attrs structure and insert it into the hash table if one identical to it is not already in the table. We are doing this for MEM of mode MODE. */ static mem_attrs * get_mem_attrs (HOST_WIDE_INT alias, tree expr, rtx offset, rtx size, unsigned int align, enum machine_mode mode) { mem_attrs attrs; void **slot; /* If everything is the default, we can just return zero. This must match what the corresponding MEM_* macros return when the field is not present. */ if (alias == 0 && expr == 0 && offset == 0 && (size == 0 || (mode != BLKmode && GET_MODE_SIZE (mode) == INTVAL (size))) && (STRICT_ALIGNMENT && mode != BLKmode ? align == GET_MODE_ALIGNMENT (mode) : align == BITS_PER_UNIT)) return 0; attrs.alias = alias; attrs.expr = expr; attrs.offset = offset; attrs.size = size; attrs.align = align; slot = htab_find_slot (mem_attrs_htab, &attrs, INSERT); if (*slot == 0) { *slot = ggc_alloc (sizeof (mem_attrs)); memcpy (*slot, &attrs, sizeof (mem_attrs)); } return *slot; } /* Returns a hash code for X (which is a really a reg_attrs *). */ static hashval_t reg_attrs_htab_hash (const void *x) { reg_attrs *p = (reg_attrs *) x; return ((p->offset * 1000) ^ (long) p->decl); } /* Returns nonzero if the value represented by X (which is really a reg_attrs *) is the same as that given by Y (which is also really a reg_attrs *). */ static int reg_attrs_htab_eq (const void *x, const void *y) { reg_attrs *p = (reg_attrs *) x; reg_attrs *q = (reg_attrs *) y; return (p->decl == q->decl && p->offset == q->offset); } /* Allocate a new reg_attrs structure and insert it into the hash table if one identical to it is not already in the table. We are doing this for MEM of mode MODE. */ static reg_attrs * get_reg_attrs (tree decl, int offset) { reg_attrs attrs; void **slot; /* If everything is the default, we can just return zero. */ if (decl == 0 && offset == 0) return 0; attrs.decl = decl; attrs.offset = offset; slot = htab_find_slot (reg_attrs_htab, &attrs, INSERT); if (*slot == 0) { *slot = ggc_alloc (sizeof (reg_attrs)); memcpy (*slot, &attrs, sizeof (reg_attrs)); } return *slot; } /* Generate a new REG rtx. Make sure ORIGINAL_REGNO is set properly, and don't attempt to share with the various global pieces of rtl (such as frame_pointer_rtx). */ rtx gen_raw_REG (enum machine_mode mode, int regno) { rtx x = gen_rtx_raw_REG (mode, regno); ORIGINAL_REGNO (x) = regno; return x; } /* There are some RTL codes that require special attention; the generation functions do the raw handling. If you add to this list, modify special_rtx in gengenrtl.c as well. */ rtx gen_rtx_CONST_INT (enum machine_mode mode ATTRIBUTE_UNUSED, HOST_WIDE_INT arg) { void **slot; if (arg >= - MAX_SAVED_CONST_INT && arg <= MAX_SAVED_CONST_INT) return const_int_rtx[arg + MAX_SAVED_CONST_INT]; #if STORE_FLAG_VALUE != 1 && STORE_FLAG_VALUE != -1 if (const_true_rtx && arg == STORE_FLAG_VALUE) return const_true_rtx; #endif /* Look up the CONST_INT in the hash table. */ slot = htab_find_slot_with_hash (const_int_htab, &arg, (hashval_t) arg, INSERT); if (*slot == 0) *slot = gen_rtx_raw_CONST_INT (VOIDmode, arg); return (rtx) *slot; } rtx gen_int_mode (HOST_WIDE_INT c, enum machine_mode mode) { return GEN_INT (trunc_int_for_mode (c, mode)); } /* CONST_DOUBLEs might be created from pairs of integers, or from REAL_VALUE_TYPEs. Also, their length is known only at run time, so we cannot use gen_rtx_raw_CONST_DOUBLE. */ /* Determine whether REAL, a CONST_DOUBLE, already exists in the hash table. If so, return its counterpart; otherwise add it to the hash table and return it. */ static rtx lookup_const_double (rtx real) { void **slot = htab_find_slot (const_double_htab, real, INSERT); if (*slot == 0) *slot = real; return (rtx) *slot; } /* Return a CONST_DOUBLE rtx for a floating-point value specified by VALUE in mode MODE. */ rtx const_double_from_real_value (REAL_VALUE_TYPE value, enum machine_mode mode) { rtx real = rtx_alloc (CONST_DOUBLE); PUT_MODE (real, mode); memcpy (&CONST_DOUBLE_LOW (real), &value, sizeof (REAL_VALUE_TYPE)); return lookup_const_double (real); } /* Return a CONST_DOUBLE or CONST_INT for a value specified as a pair of ints: I0 is the low-order word and I1 is the high-order word. Do not use this routine for non-integer modes; convert to REAL_VALUE_TYPE and use CONST_DOUBLE_FROM_REAL_VALUE. */ rtx immed_double_const (HOST_WIDE_INT i0, HOST_WIDE_INT i1, enum machine_mode mode) { rtx value; unsigned int i; if (mode != VOIDmode) { int width; if (GET_MODE_CLASS (mode) != MODE_INT && GET_MODE_CLASS (mode) != MODE_PARTIAL_INT /* We can get a 0 for an error mark. */ && GET_MODE_CLASS (mode) != MODE_VECTOR_INT && GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT) abort (); /* We clear out all bits that don't belong in MODE, unless they and our sign bit are all one. So we get either a reasonable negative value or a reasonable unsigned value for this mode. */ width = GET_MODE_BITSIZE (mode); if (width < HOST_BITS_PER_WIDE_INT && ((i0 & ((HOST_WIDE_INT) (-1) << (width - 1))) != ((HOST_WIDE_INT) (-1) << (width - 1)))) i0 &= ((HOST_WIDE_INT) 1 << width) - 1, i1 = 0; else if (width == HOST_BITS_PER_WIDE_INT && ! (i1 == ~0 && i0 < 0)) i1 = 0; else if (width > 2 * HOST_BITS_PER_WIDE_INT) /* We cannot represent this value as a constant. */ abort (); /* If this would be an entire word for the target, but is not for the host, then sign-extend on the host so that the number will look the same way on the host that it would on the target. For example, when building a 64 bit alpha hosted 32 bit sparc targeted compiler, then we want the 32 bit unsigned value -1 to be represented as a 64 bit value -1, and not as 0x00000000ffffffff. The latter confuses the sparc backend. */ if (width < HOST_BITS_PER_WIDE_INT && (i0 & ((HOST_WIDE_INT) 1 << (width - 1)))) i0 |= ((HOST_WIDE_INT) (-1) << width); /* If MODE fits within HOST_BITS_PER_WIDE_INT, always use a CONST_INT. ??? Strictly speaking, this is wrong if we create a CONST_INT for a large unsigned constant with the size of MODE being HOST_BITS_PER_WIDE_INT and later try to interpret that constant in a wider mode. In that case we will mis-interpret it as a negative number. Unfortunately, the only alternative is to make a CONST_DOUBLE for any constant in any mode if it is an unsigned constant larger than the maximum signed integer in an int on the host. However, doing this will break everyone that always expects to see a CONST_INT for SImode and smaller. We have always been making CONST_INTs in this case, so nothing new is being broken. */ if (width <= HOST_BITS_PER_WIDE_INT) i1 = (i0 < 0) ? ~(HOST_WIDE_INT) 0 : 0; } /* If this integer fits in one word, return a CONST_INT. */ if ((i1 == 0 && i0 >= 0) || (i1 == ~0 && i0 < 0)) return GEN_INT (i0); /* We use VOIDmode for integers. */ value = rtx_alloc (CONST_DOUBLE); PUT_MODE (value, VOIDmode); CONST_DOUBLE_LOW (value) = i0; CONST_DOUBLE_HIGH (value) = i1; for (i = 2; i < (sizeof CONST_DOUBLE_FORMAT - 1); i++) XWINT (value, i) = 0; return lookup_const_double (value); } rtx gen_rtx_REG (enum machine_mode mode, unsigned int regno) { /* In case the MD file explicitly references the frame pointer, have all such references point to the same frame pointer. This is used during frame pointer elimination to distinguish the explicit references to these registers from pseudos that happened to be assigned to them. If we have eliminated the frame pointer or arg pointer, we will be using it as a normal register, for example as a spill register. In such cases, we might be accessing it in a mode that is not Pmode and therefore cannot use the pre-allocated rtx. Also don't do this when we are making new REGs in reload, since we don't want to get confused with the real pointers. */ if (mode == Pmode && !reload_in_progress) { if (regno == FRAME_POINTER_REGNUM && (!reload_completed || frame_pointer_needed)) return frame_pointer_rtx; #if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM if (regno == HARD_FRAME_POINTER_REGNUM && (!reload_completed || frame_pointer_needed)) return hard_frame_pointer_rtx; #endif #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM && HARD_FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM if (regno == ARG_POINTER_REGNUM) return arg_pointer_rtx; #endif #ifdef RETURN_ADDRESS_POINTER_REGNUM if (regno == RETURN_ADDRESS_POINTER_REGNUM) return return_address_pointer_rtx; #endif if (regno == (unsigned) PIC_OFFSET_TABLE_REGNUM && fixed_regs[PIC_OFFSET_TABLE_REGNUM]) return pic_offset_table_rtx; if (regno == STACK_POINTER_REGNUM) return stack_pointer_rtx; } #if 0 /* If the per-function register table has been set up, try to re-use an existing entry in that table to avoid useless generation of RTL. This code is disabled for now until we can fix the various backends which depend on having non-shared hard registers in some cases. Long term we want to re-enable this code as it can significantly cut down on the amount of useless RTL that gets generated. We'll also need to fix some code that runs after reload that wants to set ORIGINAL_REGNO. */ if (cfun && cfun->emit && regno_reg_rtx && regno < FIRST_PSEUDO_REGISTER && reg_raw_mode[regno] == mode) return regno_reg_rtx[regno]; #endif return gen_raw_REG (mode, regno); } rtx gen_rtx_MEM (enum machine_mode mode, rtx addr) { rtx rt = gen_rtx_raw_MEM (mode, addr); /* This field is not cleared by the mere allocation of the rtx, so we clear it here. */ MEM_ATTRS (rt) = 0; return rt; } rtx gen_rtx_SUBREG (enum machine_mode mode, rtx reg, int offset) { /* This is the most common failure type. Catch it early so we can see who does it. */ if ((offset % GET_MODE_SIZE (mode)) != 0) abort (); /* This check isn't usable right now because combine will throw arbitrary crap like a CALL into a SUBREG in gen_lowpart_for_combine so we must just eat it. */ #if 0 /* Check for this too. */ if (offset >= GET_MODE_SIZE (GET_MODE (reg))) abort (); #endif return gen_rtx_raw_SUBREG (mode, reg, offset); } /* Generate a SUBREG representing the least-significant part of REG if MODE is smaller than mode of REG, otherwise paradoxical SUBREG. */ rtx gen_lowpart_SUBREG (enum machine_mode mode, rtx reg) { enum machine_mode inmode; inmode = GET_MODE (reg); if (inmode == VOIDmode) inmode = mode; return gen_rtx_SUBREG (mode, reg, subreg_lowpart_offset (mode, inmode)); } /* gen_rtvec (n, [rt1, ..., rtn]) ** ** This routine creates an rtvec and stores within it the ** pointers to rtx's which are its arguments. */ /*VARARGS1*/ rtvec gen_rtvec (int n, ...) { int i, save_n; rtx *vector; va_list p; va_start (p, n); if (n == 0) return NULL_RTVEC; /* Don't allocate an empty rtvec... */ vector = alloca (n * sizeof (rtx)); for (i = 0; i < n; i++) vector[i] = va_arg (p, rtx); /* The definition of VA_* in K&R C causes `n' to go out of scope. */ save_n = n; va_end (p); return gen_rtvec_v (save_n, vector); } rtvec gen_rtvec_v (int n, rtx *argp) { int i; rtvec rt_val; if (n == 0) return NULL_RTVEC; /* Don't allocate an empty rtvec... */ rt_val = rtvec_alloc (n); /* Allocate an rtvec... */ for (i = 0; i < n; i++) rt_val->elem[i] = *argp++; return rt_val; } /* Generate a REG rtx for a new pseudo register of mode MODE. This pseudo is assigned the next sequential register number. */ rtx gen_reg_rtx (enum machine_mode mode) { struct function *f = cfun; rtx val; /* Don't let anything called after initial flow analysis create new registers. */ if (no_new_pseudos) abort (); if (generating_concat_p && (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)) { /* For complex modes, don't make a single pseudo. Instead, make a CONCAT of two pseudos. This allows noncontiguous allocation of the real and imaginary parts, which makes much better code. Besides, allocating DCmode pseudos overstrains reload on some machines like the 386. */ rtx realpart, imagpart; enum machine_mode partmode = GET_MODE_INNER (mode); realpart = gen_reg_rtx (partmode); imagpart = gen_reg_rtx (partmode); return gen_rtx_CONCAT (mode, realpart, imagpart); } /* Make sure regno_pointer_align, and regno_reg_rtx are large enough to have an element for this pseudo reg number. */ if (reg_rtx_no == f->emit->regno_pointer_align_length) { int old_size = f->emit->regno_pointer_align_length; char *new; rtx *new1; new = ggc_realloc (f->emit->regno_pointer_align, old_size * 2); memset (new + old_size, 0, old_size); f->emit->regno_pointer_align = (unsigned char *) new; new1 = ggc_realloc (f->emit->x_regno_reg_rtx, old_size * 2 * sizeof (rtx)); memset (new1 + old_size, 0, old_size * sizeof (rtx)); regno_reg_rtx = new1; f->emit->regno_pointer_align_length = old_size * 2; } val = gen_raw_REG (mode, reg_rtx_no); regno_reg_rtx[reg_rtx_no++] = val; return val; } /* Generate a register with same attributes as REG, but offsetted by OFFSET. Do the big endian correction if needed. */ rtx gen_rtx_REG_offset (rtx reg, enum machine_mode mode, unsigned int regno, int offset) { rtx new = gen_rtx_REG (mode, regno); tree decl; HOST_WIDE_INT var_size; /* PR middle-end/14084 The problem appears when a variable is stored in a larger register and later it is used in the original mode or some mode in between or some part of variable is accessed. On little endian machines there is no problem because the REG_OFFSET of the start of the variable is the same when accessed in any mode (it is 0). However, this is not true on big endian machines. The offset of the start of the variable is different when accessed in different modes. When we are taking a part of the REG we have to change the OFFSET from offset WRT size of mode of REG to offset WRT size of variable. If we would not do the big endian correction the resulting REG_OFFSET would be larger than the size of the DECL. Examples of correction, for BYTES_BIG_ENDIAN WORDS_BIG_ENDIAN machine: REG.mode MODE DECL size old offset new offset description DI SI 4 4 0 int32 in SImode DI SI 1 4 0 char in SImode DI QI 1 7 0 char in QImode DI QI 4 5 1 1st element in QImode of char[4] DI HI 4 6 2 1st element in HImode of int16[2] If the size of DECL is equal or greater than the size of REG we can't do this correction because the register holds the whole variable or a part of the variable and thus the REG_OFFSET is already correct. */ decl = REG_EXPR (reg); if ((BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN) && decl != NULL && offset > 0 && GET_MODE_SIZE (GET_MODE (reg)) > GET_MODE_SIZE (mode) && ((var_size = int_size_in_bytes (TREE_TYPE (decl))) > 0 && var_size < GET_MODE_SIZE (GET_MODE (reg)))) { int offset_le; /* Convert machine endian to little endian WRT size of mode of REG. */ if (WORDS_BIG_ENDIAN) offset_le = ((GET_MODE_SIZE (GET_MODE (reg)) - 1 - offset) / UNITS_PER_WORD) * UNITS_PER_WORD; else offset_le = (offset / UNITS_PER_WORD) * UNITS_PER_WORD; if (BYTES_BIG_ENDIAN) offset_le += ((GET_MODE_SIZE (GET_MODE (reg)) - 1 - offset) % UNITS_PER_WORD); else offset_le += offset % UNITS_PER_WORD; if (offset_le >= var_size) { /* MODE is wider than the variable so the new reg will cover the whole variable so the resulting OFFSET should be 0. */ offset = 0; } else { /* Convert little endian to machine endian WRT size of variable. */ if (WORDS_BIG_ENDIAN) offset = ((var_size - 1 - offset_le) / UNITS_PER_WORD) * UNITS_PER_WORD; else offset = (offset_le / UNITS_PER_WORD) * UNITS_PER_WORD; if (BYTES_BIG_ENDIAN) offset += ((var_size - 1 - offset_le) % UNITS_PER_WORD); else offset += offset_le % UNITS_PER_WORD; } } REG_ATTRS (new) = get_reg_attrs (REG_EXPR (reg), REG_OFFSET (reg) + offset); return new; } /* Set the decl for MEM to DECL. */ void set_reg_attrs_from_mem (rtx reg, rtx mem) { if (MEM_OFFSET (mem) && GET_CODE (MEM_OFFSET (mem)) == CONST_INT) REG_ATTRS (reg) = get_reg_attrs (MEM_EXPR (mem), INTVAL (MEM_OFFSET (mem))); } /* Set the register attributes for registers contained in PARM_RTX. Use needed values from memory attributes of MEM. */ void set_reg_attrs_for_parm (rtx parm_rtx, rtx mem) { if (REG_P (parm_rtx)) set_reg_attrs_from_mem (parm_rtx, mem); else if (GET_CODE (parm_rtx) == PARALLEL) { /* Check for a NULL entry in the first slot, used to indicate that the parameter goes both on the stack and in registers. */ int i = XEXP (XVECEXP (parm_rtx, 0, 0), 0) ? 0 : 1; for (; i < XVECLEN (parm_rtx, 0); i++) { rtx x = XVECEXP (parm_rtx, 0, i); if (REG_P (XEXP (x, 0))) REG_ATTRS (XEXP (x, 0)) = get_reg_attrs (MEM_EXPR (mem), INTVAL (XEXP (x, 1))); } } } /* Assign the RTX X to declaration T. */ void set_decl_rtl (tree t, rtx x) { DECL_CHECK (t)->decl.rtl = x; if (!x) return; /* For register, we maintain the reverse information too. */ if (REG_P (x)) REG_ATTRS (x) = get_reg_attrs (t, 0); else if (GET_CODE (x) == SUBREG) REG_ATTRS (SUBREG_REG (x)) = get_reg_attrs (t, -SUBREG_BYTE (x)); if (GET_CODE (x) == CONCAT) { if (REG_P (XEXP (x, 0))) REG_ATTRS (XEXP (x, 0)) = get_reg_attrs (t, 0); if (REG_P (XEXP (x, 1))) REG_ATTRS (XEXP (x, 1)) = get_reg_attrs (t, GET_MODE_UNIT_SIZE (GET_MODE (XEXP (x, 0)))); } if (GET_CODE (x) == PARALLEL) { int i; for (i = 0; i < XVECLEN (x, 0); i++) { rtx y = XVECEXP (x, 0, i); if (REG_P (XEXP (y, 0))) REG_ATTRS (XEXP (y, 0)) = get_reg_attrs (t, INTVAL (XEXP (y, 1))); } } } /* Assign the RTX X to parameter declaration T. */ void set_decl_incoming_rtl (tree t, rtx x) { DECL_INCOMING_RTL (t) = x; if (!x) return; /* For register, we maintain the reverse information too. */ if (REG_P (x)) REG_ATTRS (x) = get_reg_attrs (t, 0); else if (GET_CODE (x) == SUBREG) REG_ATTRS (SUBREG_REG (x)) = get_reg_attrs (t, -SUBREG_BYTE (x)); if (GET_CODE (x) == CONCAT) { if (REG_P (XEXP (x, 0))) REG_ATTRS (XEXP (x, 0)) = get_reg_attrs (t, 0); if (REG_P (XEXP (x, 1))) REG_ATTRS (XEXP (x, 1)) = get_reg_attrs (t, GET_MODE_UNIT_SIZE (GET_MODE (XEXP (x, 0)))); } if (GET_CODE (x) == PARALLEL) { int i, start; /* Check for a NULL entry, used to indicate that the parameter goes both on the stack and in registers. */ if (XEXP (XVECEXP (x, 0, 0), 0)) start = 0; else start = 1; for (i = start; i < XVECLEN (x, 0); i++) { rtx y = XVECEXP (x, 0, i); if (REG_P (XEXP (y, 0))) REG_ATTRS (XEXP (y, 0)) = get_reg_attrs (t, INTVAL (XEXP (y, 1))); } } } /* Identify REG (which may be a CONCAT) as a user register. */ void mark_user_reg (rtx reg) { if (GET_CODE (reg) == CONCAT) { REG_USERVAR_P (XEXP (reg, 0)) = 1; REG_USERVAR_P (XEXP (reg, 1)) = 1; } else if (REG_P (reg)) REG_USERVAR_P (reg) = 1; else abort (); } /* Identify REG as a probable pointer register and show its alignment as ALIGN, if nonzero. */ void mark_reg_pointer (rtx reg, int align) { if (! REG_POINTER (reg)) { REG_POINTER (reg) = 1; if (align) REGNO_POINTER_ALIGN (REGNO (reg)) = align; } else if (align && align < REGNO_POINTER_ALIGN (REGNO (reg))) /* We can no-longer be sure just how aligned this pointer is. */ REGNO_POINTER_ALIGN (REGNO (reg)) = align; } /* Return 1 plus largest pseudo reg number used in the current function. */ int max_reg_num (void) { return reg_rtx_no; } /* Return 1 + the largest label number used so far in the current function. */ int max_label_num (void) { if (last_label_num && label_num == base_label_num) return last_label_num; return label_num; } /* Return first label number used in this function (if any were used). */ int get_first_label_num (void) { return first_label_num; } /* If the rtx for label was created during the expansion of a nested function, then first_label_num won't include this label number. Fix this now so that array indicies work later. */ void maybe_set_first_label_num (rtx x) { if (CODE_LABEL_NUMBER (x) < first_label_num) first_label_num = CODE_LABEL_NUMBER (x); } /* Return the final regno of X, which is a SUBREG of a hard register. */ int subreg_hard_regno (rtx x, int check_mode) { enum machine_mode mode = GET_MODE (x); unsigned int byte_offset, base_regno, final_regno; rtx reg = SUBREG_REG (x); /* This is where we attempt to catch illegal subregs created by the compiler. */ if (GET_CODE (x) != SUBREG || !REG_P (reg)) abort (); base_regno = REGNO (reg); if (base_regno >= FIRST_PSEUDO_REGISTER) abort (); if (check_mode && ! HARD_REGNO_MODE_OK (base_regno, GET_MODE (reg))) abort (); #ifdef ENABLE_CHECKING if (!subreg_offset_representable_p (REGNO (reg), GET_MODE (reg), SUBREG_BYTE (x), mode)) abort (); #endif /* Catch non-congruent offsets too. */ byte_offset = SUBREG_BYTE (x); if ((byte_offset % GET_MODE_SIZE (mode)) != 0) abort (); final_regno = subreg_regno (x); return final_regno; } /* Return a value representing some low-order bits of X, where the number of low-order bits is given by MODE. Note that no conversion is done between floating-point and fixed-point values, rather, the bit representation is returned. This function handles the cases in common between gen_lowpart, below, and two variants in cse.c and combine.c. These are the cases that can be safely handled at all points in the compilation. If this is not a case we can handle, return 0. */ rtx gen_lowpart_common (enum machine_mode mode, rtx x) { int msize = GET_MODE_SIZE (mode); int xsize; int offset = 0; enum machine_mode innermode; /* Unfortunately, this routine doesn't take a parameter for the mode of X, so we have to make one up. Yuk. */ innermode = GET_MODE (x); if (GET_CODE (x) == CONST_INT && msize <= HOST_BITS_PER_WIDE_INT) innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0); else if (innermode == VOIDmode) innermode = mode_for_size (HOST_BITS_PER_WIDE_INT * 2, MODE_INT, 0); xsize = GET_MODE_SIZE (innermode); if (innermode == VOIDmode || innermode == BLKmode) abort (); if (innermode == mode) return x; /* MODE must occupy no more words than the mode of X. */ if ((msize + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD > ((xsize + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)) return 0; /* Don't allow generating paradoxical FLOAT_MODE subregs. */ if (GET_MODE_CLASS (mode) == MODE_FLOAT && msize > xsize) return 0; offset = subreg_lowpart_offset (mode, innermode); if ((GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND) && (GET_MODE_CLASS (mode) == MODE_INT || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)) { /* If we are getting the low-order part of something that has been sign- or zero-extended, we can either just use the object being extended or make a narrower extension. If we want an even smaller piece than the size of the object being extended, call ourselves recursively. This case is used mostly by combine and cse. */ if (GET_MODE (XEXP (x, 0)) == mode) return XEXP (x, 0); else if (msize < GET_MODE_SIZE (GET_MODE (XEXP (x, 0)))) return gen_lowpart_common (mode, XEXP (x, 0)); else if (msize < xsize) return gen_rtx_fmt_e (GET_CODE (x), mode, XEXP (x, 0)); } else if (GET_CODE (x) == SUBREG || REG_P (x) || GET_CODE (x) == CONCAT || GET_CODE (x) == CONST_VECTOR || GET_CODE (x) == CONST_DOUBLE || GET_CODE (x) == CONST_INT) return simplify_gen_subreg (mode, x, innermode, offset); /* Otherwise, we can't do this. */ return 0; } /* Return the constant real or imaginary part (which has mode MODE) of a complex value X. The IMAGPART_P argument determines whether the real or complex component should be returned. This function returns NULL_RTX if the component isn't a constant. */ static rtx gen_complex_constant_part (enum machine_mode mode, rtx x, int imagpart_p) { tree decl, part; if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == SYMBOL_REF) { decl = SYMBOL_REF_DECL (XEXP (x, 0)); if (decl != NULL_TREE && TREE_CODE (decl) == COMPLEX_CST) { part = imagpart_p ? TREE_IMAGPART (decl) : TREE_REALPART (decl); if (TREE_CODE (part) == REAL_CST || TREE_CODE (part) == INTEGER_CST) return expand_expr (part, NULL_RTX, mode, 0); } } return NULL_RTX; } /* Return the real part (which has mode MODE) of a complex value X. This always comes at the low address in memory. */ rtx gen_realpart (enum machine_mode mode, rtx x) { rtx part; /* Handle complex constants. */ part = gen_complex_constant_part (mode, x, 0); if (part != NULL_RTX) return part; if (WORDS_BIG_ENDIAN && GET_MODE_BITSIZE (mode) < BITS_PER_WORD && REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER) internal_error ("can't access real part of complex value in hard register"); else if (WORDS_BIG_ENDIAN) return gen_highpart (mode, x); else return gen_lowpart (mode, x); } /* Return the imaginary part (which has mode MODE) of a complex value X. This always comes at the high address in memory. */ rtx gen_imagpart (enum machine_mode mode, rtx x) { rtx part; /* Handle complex constants. */ part = gen_complex_constant_part (mode, x, 1); if (part != NULL_RTX) return part; if (WORDS_BIG_ENDIAN) return gen_lowpart (mode, x); else if (! WORDS_BIG_ENDIAN && GET_MODE_BITSIZE (mode) < BITS_PER_WORD && REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER) internal_error ("can't access imaginary part of complex value in hard register"); else return gen_highpart (mode, x); } rtx gen_highpart (enum machine_mode mode, rtx x) { unsigned int msize = GET_MODE_SIZE (mode); rtx result; /* This case loses if X is a subreg. To catch bugs early, complain if an invalid MODE is used even in other cases. */ if (msize > UNITS_PER_WORD && msize != (unsigned int) GET_MODE_UNIT_SIZE (GET_MODE (x))) abort (); result = simplify_gen_subreg (mode, x, GET_MODE (x), subreg_highpart_offset (mode, GET_MODE (x))); /* simplify_gen_subreg is not guaranteed to return a valid operand for the target if we have a MEM. gen_highpart must return a valid operand, emitting code if necessary to do so. */ if (result != NULL_RTX && MEM_P (result)) result = validize_mem (result); if (!result) abort (); return result; } /* Like gen_highpart, but accept mode of EXP operand in case EXP can be VOIDmode constant. */ rtx gen_highpart_mode (enum machine_mode outermode, enum machine_mode innermode, rtx exp) { if (GET_MODE (exp) != VOIDmode) { if (GET_MODE (exp) != innermode) abort (); return gen_highpart (outermode, exp); } return simplify_gen_subreg (outermode, exp, innermode, subreg_highpart_offset (outermode, innermode)); } /* Return offset in bytes to get OUTERMODE low part of the value in mode INNERMODE stored in memory in target format. */ unsigned int subreg_lowpart_offset (enum machine_mode outermode, enum machine_mode innermode) { unsigned int offset = 0; int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)); if (difference > 0) { if (WORDS_BIG_ENDIAN) offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD; if (BYTES_BIG_ENDIAN) offset += difference % UNITS_PER_WORD; } return offset; } /* Return offset in bytes to get OUTERMODE high part of the value in mode INNERMODE stored in memory in target format. */ unsigned int subreg_highpart_offset (enum machine_mode outermode, enum machine_mode innermode) { unsigned int offset = 0; int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)); if (GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode)) abort (); if (difference > 0) { if (! WORDS_BIG_ENDIAN) offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD; if (! BYTES_BIG_ENDIAN) offset += difference % UNITS_PER_WORD; } return offset; } /* Return 1 iff X, assumed to be a SUBREG, refers to the least significant part of its containing reg. If X is not a SUBREG, always return 1 (it is its own low part!). */ int subreg_lowpart_p (rtx x) { if (GET_CODE (x) != SUBREG) return 1; else if (GET_MODE (SUBREG_REG (x)) == VOIDmode) return 0; return (subreg_lowpart_offset (GET_MODE (x), GET_MODE (SUBREG_REG (x))) == SUBREG_BYTE (x)); } /* Return subword OFFSET of operand OP. The word number, OFFSET, is interpreted as the word number starting at the low-order address. OFFSET 0 is the low-order word if not WORDS_BIG_ENDIAN, otherwise it is the high-order word. If we cannot extract the required word, we return zero. Otherwise, an rtx corresponding to the requested word will be returned. VALIDATE_ADDRESS is nonzero if the address should be validated. Before reload has completed, a valid address will always be returned. After reload, if a valid address cannot be returned, we return zero. If VALIDATE_ADDRESS is zero, we simply form the required address; validating it is the responsibility of the caller. MODE is the mode of OP in case it is a CONST_INT. ??? This is still rather broken for some cases. The problem for the moment is that all callers of this thing provide no 'goal mode' to tell us to work with. This exists because all callers were written in a word based SUBREG world. Now use of this function can be deprecated by simplify_subreg in most cases. */ rtx operand_subword (rtx op, unsigned int offset, int validate_address, enum machine_mode mode) { if (mode == VOIDmode) mode = GET_MODE (op); if (mode == VOIDmode) abort (); /* If OP is narrower than a word, fail. */ if (mode != BLKmode && (GET_MODE_SIZE (mode) < UNITS_PER_WORD)) return 0; /* If we want a word outside OP, return zero. */ if (mode != BLKmode && (offset + 1) * UNITS_PER_WORD > GET_MODE_SIZE (mode)) return const0_rtx; /* Form a new MEM at the requested address. */ if (MEM_P (op)) { rtx new = adjust_address_nv (op, word_mode, offset * UNITS_PER_WORD); if (! validate_address) return new; else if (reload_completed) { if (! strict_memory_address_p (word_mode, XEXP (new, 0))) return 0; } else return replace_equiv_address (new, XEXP (new, 0)); } /* Rest can be handled by simplify_subreg. */ return simplify_gen_subreg (word_mode, op, mode, (offset * UNITS_PER_WORD)); } /* Similar to `operand_subword', but never return 0. If we can't extract the required subword, put OP into a register and try again. If that fails, abort. We always validate the address in this case. MODE is the mode of OP, in case it is CONST_INT. */ rtx operand_subword_force (rtx op, unsigned int offset, enum machine_mode mode) { rtx result = operand_subword (op, offset, 1, mode); if (result) return result; if (mode != BLKmode && mode != VOIDmode) { /* If this is a register which can not be accessed by words, copy it to a pseudo register. */ if (REG_P (op)) op = copy_to_reg (op); else op = force_reg (mode, op); } result = operand_subword (op, offset, 1, mode); if (result == 0) abort (); return result; } /* Given a compare instruction, swap the operands. A test instruction is changed into a compare of 0 against the operand. */ void reverse_comparison (rtx insn) { rtx body = PATTERN (insn); rtx comp; if (GET_CODE (body) == SET) comp = SET_SRC (body); else comp = SET_SRC (XVECEXP (body, 0, 0)); if (GET_CODE (comp) == COMPARE) { rtx op0 = XEXP (comp, 0); rtx op1 = XEXP (comp, 1); XEXP (comp, 0) = op1; XEXP (comp, 1) = op0; } else { rtx new = gen_rtx_COMPARE (VOIDmode, CONST0_RTX (GET_MODE (comp)), comp); if (GET_CODE (body) == SET) SET_SRC (body) = new; else SET_SRC (XVECEXP (body, 0, 0)) = new; } } /* Within a MEM_EXPR, we care about either (1) a component ref of a decl, or (2) a component ref of something variable. Represent the later with a NULL expression. */ static tree component_ref_for_mem_expr (tree ref) { tree inner = TREE_OPERAND (ref, 0); if (TREE_CODE (inner) == COMPONENT_REF) inner = component_ref_for_mem_expr (inner); else { /* Now remove any conversions: they don't change what the underlying object is. Likewise for SAVE_EXPR. */ while (TREE_CODE (inner) == NOP_EXPR || TREE_CODE (inner) == CONVERT_EXPR || TREE_CODE (inner) == NON_LVALUE_EXPR || TREE_CODE (inner) == VIEW_CONVERT_EXPR || TREE_CODE (inner) == SAVE_EXPR) inner = TREE_OPERAND (inner, 0); if (! DECL_P (inner)) inner = NULL_TREE; } if (inner == TREE_OPERAND (ref, 0)) return ref; else return build (COMPONENT_REF, TREE_TYPE (ref), inner, TREE_OPERAND (ref, 1), NULL_TREE); } /* Returns 1 if both MEM_EXPR can be considered equal and 0 otherwise. */ int mem_expr_equal_p (tree expr1, tree expr2) { if (expr1 == expr2) return 1; if (! expr1 || ! expr2) return 0; if (TREE_CODE (expr1) != TREE_CODE (expr2)) return 0; if (TREE_CODE (expr1) == COMPONENT_REF) return mem_expr_equal_p (TREE_OPERAND (expr1, 0), TREE_OPERAND (expr2, 0)) && mem_expr_equal_p (TREE_OPERAND (expr1, 1), /* field decl */ TREE_OPERAND (expr2, 1)); if (TREE_CODE (expr1) == INDIRECT_REF) return mem_expr_equal_p (TREE_OPERAND (expr1, 0), TREE_OPERAND (expr2, 0)); /* Decls with different pointers can't be equal. */ if (DECL_P (expr1)) return 0; abort(); /* ARRAY_REFs, ARRAY_RANGE_REFs and BIT_FIELD_REFs should already have been resolved here. */ } /* Given REF, a MEM, and T, either the type of X or the expression corresponding to REF, set the memory attributes. OBJECTP is nonzero if we are making a new object of this type. BITPOS is nonzero if there is an offset outstanding on T that will be applied later. */ void set_mem_attributes_minus_bitpos (rtx ref, tree t, int objectp, HOST_WIDE_INT bitpos) { HOST_WIDE_INT alias = MEM_ALIAS_SET (ref); tree expr = MEM_EXPR (ref); rtx offset = MEM_OFFSET (ref); rtx size = MEM_SIZE (ref); unsigned int align = MEM_ALIGN (ref); HOST_WIDE_INT apply_bitpos = 0; tree type; /* It can happen that type_for_mode was given a mode for which there is no language-level type. In which case it returns NULL, which we can see here. */ if (t == NULL_TREE) return; type = TYPE_P (t) ? t : TREE_TYPE (t); if (type == error_mark_node) return; /* If we have already set DECL_RTL = ref, get_alias_set will get the wrong answer, as it assumes that DECL_RTL already has the right alias info. Callers should not set DECL_RTL until after the call to set_mem_attributes. */ if (DECL_P (t) && ref == DECL_RTL_IF_SET (t)) abort (); /* Get the alias set from the expression or type (perhaps using a front-end routine) and use it. */ alias = get_alias_set (t); MEM_VOLATILE_P (ref) |= TYPE_VOLATILE (type); MEM_IN_STRUCT_P (ref) = AGGREGATE_TYPE_P (type); RTX_UNCHANGING_P (ref) |= ((lang_hooks.honor_readonly && (TYPE_READONLY (type) || (t != type && TREE_READONLY (t)))) || (! TYPE_P (t) && TREE_CONSTANT (t))); MEM_POINTER (ref) = POINTER_TYPE_P (type); MEM_NOTRAP_P (ref) = TREE_THIS_NOTRAP (t); /* If we are making an object of this type, or if this is a DECL, we know that it is a scalar if the type is not an aggregate. */ if ((objectp || DECL_P (t)) && ! AGGREGATE_TYPE_P (type)) MEM_SCALAR_P (ref) = 1; /* We can set the alignment from the type if we are making an object, this is an INDIRECT_REF, or if TYPE_ALIGN_OK. */ if (objectp || TREE_CODE (t) == INDIRECT_REF || TYPE_ALIGN_OK (type)) align = MAX (align, TYPE_ALIGN (type)); /* If the size is known, we can set that. */ if (TYPE_SIZE_UNIT (type) && host_integerp (TYPE_SIZE_UNIT (type), 1)) size = GEN_INT (tree_low_cst (TYPE_SIZE_UNIT (type), 1)); /* If T is not a type, we may be able to deduce some more information about the expression. */ if (! TYPE_P (t)) { maybe_set_unchanging (ref, t); if (TREE_THIS_VOLATILE (t)) MEM_VOLATILE_P (ref) = 1; /* Now remove any conversions: they don't change what the underlying object is. Likewise for SAVE_EXPR. */ while (TREE_CODE (t) == NOP_EXPR || TREE_CODE (t) == CONVERT_EXPR || TREE_CODE (t) == NON_LVALUE_EXPR || TREE_CODE (t) == VIEW_CONVERT_EXPR || TREE_CODE (t) == SAVE_EXPR) t = TREE_OPERAND (t, 0); /* If this expression can't be addressed (e.g., it contains a reference to a non-addressable field), show we don't change its alias set. */ if (! can_address_p (t)) MEM_KEEP_ALIAS_SET_P (ref) = 1; /* If this is a decl, set the attributes of the MEM from it. */ if (DECL_P (t)) { expr = t; offset = const0_rtx; apply_bitpos = bitpos; size = (DECL_SIZE_UNIT (t) && host_integerp (DECL_SIZE_UNIT (t), 1) ? GEN_INT (tree_low_cst (DECL_SIZE_UNIT (t), 1)) : 0); align = DECL_ALIGN (t); } /* If this is a constant, we know the alignment. */ else if (TREE_CODE_CLASS (TREE_CODE (t)) == 'c') { align = TYPE_ALIGN (type); #ifdef CONSTANT_ALIGNMENT align = CONSTANT_ALIGNMENT (t, align); #endif } /* If this is a field reference and not a bit-field, record it. */ /* ??? There is some information that can be gleened from bit-fields, such as the word offset in the structure that might be modified. But skip it for now. */ else if (TREE_CODE (t) == COMPONENT_REF && ! DECL_BIT_FIELD (TREE_OPERAND (t, 1))) { expr = component_ref_for_mem_expr (t); offset = const0_rtx; apply_bitpos = bitpos; /* ??? Any reason the field size would be different than the size we got from the type? */ } /* If this is an array reference, look for an outer field reference. */ else if (TREE_CODE (t) == ARRAY_REF) { tree off_tree = size_zero_node; /* We can't modify t, because we use it at the end of the function. */ tree t2 = t; do { tree index = TREE_OPERAND (t2, 1); tree low_bound = array_ref_low_bound (t2); tree unit_size = array_ref_element_size (t2); /* We assume all arrays have sizes that are a multiple of a byte. First subtract the lower bound, if any, in the type of the index, then convert to sizetype and multiply by the size of the array element. */ if (! integer_zerop (low_bound)) index = fold (build (MINUS_EXPR, TREE_TYPE (index), index, low_bound)); off_tree = size_binop (PLUS_EXPR, size_binop (MULT_EXPR, convert (sizetype, index), unit_size), off_tree); t2 = TREE_OPERAND (t2, 0); } while (TREE_CODE (t2) == ARRAY_REF); if (DECL_P (t2)) { expr = t2; offset = NULL; if (host_integerp (off_tree, 1)) { HOST_WIDE_INT ioff = tree_low_cst (off_tree, 1); HOST_WIDE_INT aoff = (ioff & -ioff) * BITS_PER_UNIT; align = DECL_ALIGN (t2); if (aoff && (unsigned HOST_WIDE_INT) aoff < align) align = aoff; offset = GEN_INT (ioff); apply_bitpos = bitpos; } } else if (TREE_CODE (t2) == COMPONENT_REF) { expr = component_ref_for_mem_expr (t2); if (host_integerp (off_tree, 1)) { offset = GEN_INT (tree_low_cst (off_tree, 1)); apply_bitpos = bitpos; } /* ??? Any reason the field size would be different than the size we got from the type? */ } else if (flag_argument_noalias > 1 && TREE_CODE (t2) == INDIRECT_REF && TREE_CODE (TREE_OPERAND (t2, 0)) == PARM_DECL) { expr = t2; offset = NULL; } } /* If this is a Fortran indirect argument reference, record the parameter decl. */ else if (flag_argument_noalias > 1 && TREE_CODE (t) == INDIRECT_REF && TREE_CODE (TREE_OPERAND (t, 0)) == PARM_DECL) { expr = t; offset = NULL; } } /* If we modified OFFSET based on T, then subtract the outstanding bit position offset. Similarly, increase the size of the accessed object to contain the negative offset. */ if (apply_bitpos) { offset = plus_constant (offset, -(apply_bitpos / BITS_PER_UNIT)); if (size) size = plus_constant (size, apply_bitpos / BITS_PER_UNIT); } /* Now set the attributes we computed above. */ MEM_ATTRS (ref) = get_mem_attrs (alias, expr, offset, size, align, GET_MODE (ref)); /* If this is already known to be a scalar or aggregate, we are done. */ if (MEM_IN_STRUCT_P (ref) || MEM_SCALAR_P (ref)) return; /* If it is a reference into an aggregate, this is part of an aggregate. Otherwise we don't know. */ else if (TREE_CODE (t) == COMPONENT_REF || TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF || TREE_CODE (t) == BIT_FIELD_REF) MEM_IN_STRUCT_P (ref) = 1; } void set_mem_attributes (rtx ref, tree t, int objectp) { set_mem_attributes_minus_bitpos (ref, t, objectp, 0); } /* Set the decl for MEM to DECL. */ void set_mem_attrs_from_reg (rtx mem, rtx reg) { MEM_ATTRS (mem) = get_mem_attrs (MEM_ALIAS_SET (mem), REG_EXPR (reg), GEN_INT (REG_OFFSET (reg)), MEM_SIZE (mem), MEM_ALIGN (mem), GET_MODE (mem)); } /* Set the alias set of MEM to SET. */ void set_mem_alias_set (rtx mem, HOST_WIDE_INT set) { #ifdef ENABLE_CHECKING /* If the new and old alias sets don't conflict, something is wrong. */ if (!alias_sets_conflict_p (set, MEM_ALIAS_SET (mem))) abort (); #endif MEM_ATTRS (mem) = get_mem_attrs (set, MEM_EXPR (mem), MEM_OFFSET (mem), MEM_SIZE (mem), MEM_ALIGN (mem), GET_MODE (mem)); } /* Set the alignment of MEM to ALIGN bits. */ void set_mem_align (rtx mem, unsigned int align) { MEM_ATTRS (mem) = get_mem_attrs (MEM_ALIAS_SET (mem), MEM_EXPR (mem), MEM_OFFSET (mem), MEM_SIZE (mem), align, GET_MODE (mem)); } /* Set the expr for MEM to EXPR. */ void set_mem_expr (rtx mem, tree expr) { MEM_ATTRS (mem) = get_mem_attrs (MEM_ALIAS_SET (mem), expr, MEM_OFFSET (mem), MEM_SIZE (mem), MEM_ALIGN (mem), GET_MODE (mem)); } /* Set the offset of MEM to OFFSET. */ void set_mem_offset (rtx mem, rtx offset) { MEM_ATTRS (mem) = get_mem_attrs (MEM_ALIAS_SET (mem), MEM_EXPR (mem), offset, MEM_SIZE (mem), MEM_ALIGN (mem), GET_MODE (mem)); } /* Set the size of MEM to SIZE. */ void set_mem_size (rtx mem, rtx size) { MEM_ATTRS (mem) = get_mem_attrs (MEM_ALIAS_SET (mem), MEM_EXPR (mem), MEM_OFFSET (mem), size, MEM_ALIGN (mem), GET_MODE (mem)); } /* Return a memory reference like MEMREF, but with its mode changed to MODE and its address changed to ADDR. (VOIDmode means don't change the mode. NULL for ADDR means don't change the address.) VALIDATE is nonzero if the returned memory location is required to be valid. The memory attributes are not changed. */ static rtx change_address_1 (rtx memref, enum machine_mode mode, rtx addr, int validate) { rtx new; if (!MEM_P (memref)) abort (); if (mode == VOIDmode) mode = GET_MODE (memref); if (addr == 0) addr = XEXP (memref, 0); if (mode == GET_MODE (memref) && addr == XEXP (memref, 0) && (!validate || memory_address_p (mode, addr))) return memref; if (validate) { if (reload_in_progress || reload_completed) { if (! memory_address_p (mode, addr)) abort (); } else addr = memory_address (mode, addr); } if (rtx_equal_p (addr, XEXP (memref, 0)) && mode == GET_MODE (memref)) return memref; new = gen_rtx_MEM (mode, addr); MEM_COPY_ATTRIBUTES (new, memref); return new; } /* Like change_address_1 with VALIDATE nonzero, but we are not saying in what way we are changing MEMREF, so we only preserve the alias set. */ rtx change_address (rtx memref, enum machine_mode mode, rtx addr) { rtx new = change_address_1 (memref, mode, addr, 1), size; enum machine_mode mmode = GET_MODE (new); unsigned int align; size = mmode == BLKmode ? 0 : GEN_INT (GET_MODE_SIZE (mmode)); align = mmode == BLKmode ? BITS_PER_UNIT : GET_MODE_ALIGNMENT (mmode); /* If there are no changes, just return the original memory reference. */ if (new == memref) { if (MEM_ATTRS (memref) == 0 || (MEM_EXPR (memref) == NULL && MEM_OFFSET (memref) == NULL && MEM_SIZE (memref) == size && MEM_ALIGN (memref) == align)) return new; new = gen_rtx_MEM (mmode, XEXP (memref, 0)); MEM_COPY_ATTRIBUTES (new, memref); } MEM_ATTRS (new) = get_mem_attrs (MEM_ALIAS_SET (memref), 0, 0, size, align, mmode); return new; } /* Return a memory reference like MEMREF, but with its mode changed to MODE and its address offset by OFFSET bytes. If VALIDATE is nonzero, the memory address is forced to be valid. If ADJUST is zero, OFFSET is only used to update MEM_ATTRS and caller is responsible for adjusting MEMREF base register. */ rtx adjust_address_1 (rtx memref, enum machine_mode mode, HOST_WIDE_INT offset, int validate, int adjust) { rtx addr = XEXP (memref, 0); rtx new; rtx memoffset = MEM_OFFSET (memref); rtx size = 0; unsigned int memalign = MEM_ALIGN (memref); /* If there are no changes, just return the original memory reference. */ if (mode == GET_MODE (memref) && !offset && (!validate || memory_address_p (mode, addr))) return memref; /* ??? Prefer to create garbage instead of creating shared rtl. This may happen even if offset is nonzero -- consider (plus (plus reg reg) const_int) -- so do this always. */ addr = copy_rtx (addr); if (adjust) { /* If MEMREF is a LO_SUM and the offset is within the alignment of the object, we can merge it into the LO_SUM. */ if (GET_MODE (memref) != BLKmode && GET_CODE (addr) == LO_SUM && offset >= 0 && (unsigned HOST_WIDE_INT) offset < GET_MODE_ALIGNMENT (GET_MODE (memref)) / BITS_PER_UNIT) addr = gen_rtx_LO_SUM (Pmode, XEXP (addr, 0), plus_constant (XEXP (addr, 1), offset)); else addr = plus_constant (addr, offset); } new = change_address_1 (memref, mode, addr, validate); /* Compute the new values of the memory attributes due to this adjustment. We add the offsets and update the alignment. */ if (memoffset) memoffset = GEN_INT (offset + INTVAL (memoffset)); /* Compute the new alignment by taking the MIN of the alignment and the lowest-order set bit in OFFSET, but don't change the alignment if OFFSET if zero. */ if (offset != 0) memalign = MIN (memalign, (unsigned HOST_WIDE_INT) (offset & -offset) * BITS_PER_UNIT); /* We can compute the size in a number of ways. */ if (GET_MODE (new) != BLKmode) size = GEN_INT (GET_MODE_SIZE (GET_MODE (new))); else if (MEM_SIZE (memref)) size = plus_constant (MEM_SIZE (memref), -offset); MEM_ATTRS (new) = get_mem_attrs (MEM_ALIAS_SET (memref), MEM_EXPR (memref), memoffset, size, memalign, GET_MODE (new)); /* At some point, we should validate that this offset is within the object, if all the appropriate values are known. */ return new; } /* Return a memory reference like MEMREF, but with its mode changed to MODE and its address changed to ADDR, which is assumed to be MEMREF offseted by OFFSET bytes. If VALIDATE is nonzero, the memory address is forced to be valid. */ rtx adjust_automodify_address_1 (rtx memref, enum machine_mode mode, rtx addr, HOST_WIDE_INT offset, int validate) { memref = change_address_1 (memref, VOIDmode, addr, validate); return adjust_address_1 (memref, mode, offset, validate, 0); } /* Return a memory reference like MEMREF, but whose address is changed by adding OFFSET, an RTX, to it. POW2 is the highest power of two factor known to be in OFFSET (possibly 1). */ rtx offset_address (rtx memref, rtx offset, unsigned HOST_WIDE_INT pow2) { rtx new, addr = XEXP (memref, 0); new = simplify_gen_binary (PLUS, Pmode, addr, offset); /* At this point we don't know _why_ the address is invalid. It could have secondary memory references, multiplies or anything. However, if we did go and rearrange things, we can wind up not being able to recognize the magic around pic_offset_table_rtx. This stuff is fragile, and is yet another example of why it is bad to expose PIC machinery too early. */ if (! memory_address_p (GET_MODE (memref), new) && GET_CODE (addr) == PLUS && XEXP (addr, 0) == pic_offset_table_rtx) { addr = force_reg (GET_MODE (addr), addr); new = simplify_gen_binary (PLUS, Pmode, addr, offset); } update_temp_slot_address (XEXP (memref, 0), new); new = change_address_1 (memref, VOIDmode, new, 1); /* If there are no changes, just return the original memory reference. */ if (new == memref) return new; /* Update the alignment to reflect the offset. Reset the offset, which we don't know. */ MEM_ATTRS (new) = get_mem_attrs (MEM_ALIAS_SET (memref), MEM_EXPR (memref), 0, 0, MIN (MEM_ALIGN (memref), pow2 * BITS_PER_UNIT), GET_MODE (new)); return new; } /* Return a memory reference like MEMREF, but with its address changed to ADDR. The caller is asserting that the actual piece of memory pointed to is the same, just the form of the address is being changed, such as by putting something into a register. */ rtx replace_equiv_address (rtx memref, rtx addr) { /* change_address_1 copies the memory attribute structure without change and that's exactly what we want here. */ update_temp_slot_address (XEXP (memref, 0), addr); return change_address_1 (memref, VOIDmode, addr, 1); } /* Likewise, but the reference is not required to be valid. */ rtx replace_equiv_address_nv (rtx memref, rtx addr) { return change_address_1 (memref, VOIDmode, addr, 0); } /* Return a memory reference like MEMREF, but with its mode widened to MODE and offset by OFFSET. This would be used by targets that e.g. cannot issue QImode memory operations and have to use SImode memory operations plus masking logic. */ rtx widen_memory_access (rtx memref, enum machine_mode mode, HOST_WIDE_INT offset) { rtx new = adjust_address_1 (memref, mode, offset, 1, 1); tree expr = MEM_EXPR (new); rtx memoffset = MEM_OFFSET (new); unsigned int size = GET_MODE_SIZE (mode); /* If there are no changes, just return the original memory reference. */ if (new == memref) return new; /* If we don't know what offset we were at within the expression, then we can't know if we've overstepped the bounds. */ if (! memoffset) expr = NULL_TREE; while (expr) { if (TREE_CODE (expr) == COMPONENT_REF) { tree field = TREE_OPERAND (expr, 1); tree offset = component_ref_field_offset (expr); if (! DECL_SIZE_UNIT (field)) { expr = NULL_TREE; break; } /* Is the field at least as large as the access? If so, ok, otherwise strip back to the containing structure. */ if (TREE_CODE (DECL_SIZE_UNIT (field)) == INTEGER_CST && compare_tree_int (DECL_SIZE_UNIT (field), size) >= 0 && INTVAL (memoffset) >= 0) break; if (! host_integerp (offset, 1)) { expr = NULL_TREE; break; } expr = TREE_OPERAND (expr, 0); memoffset = (GEN_INT (INTVAL (memoffset) + tree_low_cst (offset, 1) + (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1) / BITS_PER_UNIT))); } /* Similarly for the decl. */ else if (DECL_P (expr) && DECL_SIZE_UNIT (expr) && TREE_CODE (DECL_SIZE_UNIT (expr)) == INTEGER_CST && compare_tree_int (DECL_SIZE_UNIT (expr), size) >= 0 && (! memoffset || INTVAL (memoffset) >= 0)) break; else { /* The widened memory access overflows the expression, which means that it could alias another expression. Zap it. */ expr = NULL_TREE; break; } } if (! expr) memoffset = NULL_RTX; /* The widened memory may alias other stuff, so zap the alias set. */ /* ??? Maybe use get_alias_set on any remaining expression. */ MEM_ATTRS (new) = get_mem_attrs (0, expr, memoffset, GEN_INT (size), MEM_ALIGN (new), mode); return new; } /* Return a newly created CODE_LABEL rtx with a unique label number. */ rtx gen_label_rtx (void) { return gen_rtx_CODE_LABEL (VOIDmode, 0, NULL_RTX, NULL_RTX, NULL, label_num++, NULL); } /* For procedure integration. */ /* Install new pointers to the first and last insns in the chain. Also, set cur_insn_uid to one higher than the last in use. Used for an inline-procedure after copying the insn chain. */ void set_new_first_and_last_insn (rtx first, rtx last) { rtx insn; first_insn = first; last_insn = last; cur_insn_uid = 0; for (insn = first; insn; insn = NEXT_INSN (insn)) cur_insn_uid = MAX (cur_insn_uid, INSN_UID (insn)); cur_insn_uid++; } /* Set the last label number found in the current function. This is used when belatedly compiling an inline function. */ void set_new_last_label_num (int last) { base_label_num = label_num; last_label_num = last; } /* Restore all variables describing the current status from the structure *P. This is used after a nested function. */ void restore_emit_status (struct function *p ATTRIBUTE_UNUSED) { last_label_num = 0; } /* Go through all the RTL insn bodies and copy any invalid shared structure. This routine should only be called once. */ static void unshare_all_rtl_1 (tree fndecl, rtx insn) { tree decl; /* Make sure that virtual parameters are not shared. */ for (decl = DECL_ARGUMENTS (fndecl); decl; decl = TREE_CHAIN (decl)) SET_DECL_RTL (decl, copy_rtx_if_shared (DECL_RTL (decl))); /* Make sure that virtual stack slots are not shared. */ unshare_all_decls (DECL_INITIAL (fndecl)); /* Unshare just about everything else. */ unshare_all_rtl_in_chain (insn); /* Make sure the addresses of stack slots found outside the insn chain (such as, in DECL_RTL of a variable) are not shared with the insn chain. This special care is necessary when the stack slot MEM does not actually appear in the insn chain. If it does appear, its address is unshared from all else at that point. */ stack_slot_list = copy_rtx_if_shared (stack_slot_list); } /* Go through all the RTL insn bodies and copy any invalid shared structure, again. This is a fairly expensive thing to do so it should be done sparingly. */ void unshare_all_rtl_again (rtx insn) { rtx p; tree decl; for (p = insn; p; p = NEXT_INSN (p)) if (INSN_P (p)) { reset_used_flags (PATTERN (p)); reset_used_flags (REG_NOTES (p)); reset_used_flags (LOG_LINKS (p)); } /* Make sure that virtual stack slots are not shared. */ reset_used_decls (DECL_INITIAL (cfun->decl)); /* Make sure that virtual parameters are not shared. */ for (decl = DECL_ARGUMENTS (cfun->decl); decl; decl = TREE_CHAIN (decl)) reset_used_flags (DECL_RTL (decl)); reset_used_flags (stack_slot_list); unshare_all_rtl_1 (cfun->decl, insn); } void unshare_all_rtl (void) { unshare_all_rtl_1 (current_function_decl, get_insns ()); } /* Check that ORIG is not marked when it should not be and mark ORIG as in use, Recursively does the same for subexpressions. */ static void verify_rtx_sharing (rtx orig, rtx insn) { rtx x = orig; int i; enum rtx_code code; const char *format_ptr; if (x == 0) return; code = GET_CODE (x); /* These types may be freely shared. */ switch (code) { case REG: case QUEUED: case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case SYMBOL_REF: case LABEL_REF: case CODE_LABEL: case PC: case CC0: case SCRATCH: return; /* SCRATCH must be shared because they represent distinct values. */ case CLOBBER: if (REG_P (XEXP (x, 0)) && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER) return; break; case CONST: /* CONST can be shared if it contains a SYMBOL_REF. If it contains a LABEL_REF, it isn't sharable. */ if (GET_CODE (XEXP (x, 0)) == PLUS && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT) return; break; case MEM: /* A MEM is allowed to be shared if its address is constant. */ if (CONSTANT_ADDRESS_P (XEXP (x, 0)) || reload_completed || reload_in_progress) return; break; default: break; } /* This rtx may not be shared. If it has already been seen, replace it with a copy of itself. */ if (RTX_FLAG (x, used)) { error ("Invalid rtl sharing found in the insn"); debug_rtx (insn); error ("Shared rtx"); debug_rtx (x); abort (); } RTX_FLAG (x, used) = 1; /* Now scan the subexpressions recursively. */ format_ptr = GET_RTX_FORMAT (code); for (i = 0; i < GET_RTX_LENGTH (code); i++) { switch (*format_ptr++) { case 'e': verify_rtx_sharing (XEXP (x, i), insn); break; case 'E': if (XVEC (x, i) != NULL) { int j; int len = XVECLEN (x, i); for (j = 0; j < len; j++) { /* We allow sharing of ASM_OPERANDS inside single instruction. */ if (j && GET_CODE (XVECEXP (x, i, j)) == SET && GET_CODE (SET_SRC (XVECEXP (x, i, j))) == ASM_OPERANDS) verify_rtx_sharing (SET_DEST (XVECEXP (x, i, j)), insn); else verify_rtx_sharing (XVECEXP (x, i, j), insn); } } break; } } return; } /* Go through all the RTL insn bodies and check that there is no unexpected sharing in between the subexpressions. */ void verify_rtl_sharing (void) { rtx p; for (p = get_insns (); p; p = NEXT_INSN (p)) if (INSN_P (p)) { reset_used_flags (PATTERN (p)); reset_used_flags (REG_NOTES (p)); reset_used_flags (LOG_LINKS (p)); } for (p = get_insns (); p; p = NEXT_INSN (p)) if (INSN_P (p)) { verify_rtx_sharing (PATTERN (p), p); verify_rtx_sharing (REG_NOTES (p), p); verify_rtx_sharing (LOG_LINKS (p), p); } } /* Go through all the RTL insn bodies and copy any invalid shared structure. Assumes the mark bits are cleared at entry. */ void unshare_all_rtl_in_chain (rtx insn) { for (; insn; insn = NEXT_INSN (insn)) if (INSN_P (insn)) { PATTERN (insn) = copy_rtx_if_shared (PATTERN (insn)); REG_NOTES (insn) = copy_rtx_if_shared (REG_NOTES (insn)); LOG_LINKS (insn) = copy_rtx_if_shared (LOG_LINKS (insn)); } } /* Go through all virtual stack slots of a function and copy any shared structure. */ static void unshare_all_decls (tree blk) { tree t; /* Copy shared decls. */ for (t = BLOCK_VARS (blk); t; t = TREE_CHAIN (t)) if (DECL_RTL_SET_P (t)) SET_DECL_RTL (t, copy_rtx_if_shared (DECL_RTL (t))); /* Now process sub-blocks. */ for (t = BLOCK_SUBBLOCKS (blk); t; t = TREE_CHAIN (t)) unshare_all_decls (t); } /* Go through all virtual stack slots of a function and mark them as not shared. */ static void reset_used_decls (tree blk) { tree t; /* Mark decls. */ for (t = BLOCK_VARS (blk); t; t = TREE_CHAIN (t)) if (DECL_RTL_SET_P (t)) reset_used_flags (DECL_RTL (t)); /* Now process sub-blocks. */ for (t = BLOCK_SUBBLOCKS (blk); t; t = TREE_CHAIN (t)) reset_used_decls (t); } /* Similar to `copy_rtx' except that if MAY_SHARE is present, it is placed in the result directly, rather than being copied. MAY_SHARE is either a MEM of an EXPR_LIST of MEMs. */ rtx copy_most_rtx (rtx orig, rtx may_share) { rtx copy; int i, j; RTX_CODE code; const char *format_ptr; if (orig == may_share || (GET_CODE (may_share) == EXPR_LIST && in_expr_list_p (may_share, orig))) return orig; code = GET_CODE (orig); switch (code) { case REG: case QUEUED: case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case SYMBOL_REF: case CODE_LABEL: case PC: case CC0: return orig; default: break; } copy = rtx_alloc (code); PUT_MODE (copy, GET_MODE (orig)); RTX_FLAG (copy, in_struct) = RTX_FLAG (orig, in_struct); RTX_FLAG (copy, volatil) = RTX_FLAG (orig, volatil); RTX_FLAG (copy, unchanging) = RTX_FLAG (orig, unchanging); RTX_FLAG (copy, frame_related) = RTX_FLAG (orig, frame_related); RTX_FLAG (copy, return_val) = RTX_FLAG (orig, return_val); format_ptr = GET_RTX_FORMAT (GET_CODE (copy)); for (i = 0; i < GET_RTX_LENGTH (GET_CODE (copy)); i++) { switch (*format_ptr++) { case 'e': XEXP (copy, i) = XEXP (orig, i); if (XEXP (orig, i) != NULL && XEXP (orig, i) != may_share) XEXP (copy, i) = copy_most_rtx (XEXP (orig, i), may_share); break; case 'u': XEXP (copy, i) = XEXP (orig, i); break; case 'E': case 'V': XVEC (copy, i) = XVEC (orig, i); if (XVEC (orig, i) != NULL) { XVEC (copy, i) = rtvec_alloc (XVECLEN (orig, i)); for (j = 0; j < XVECLEN (copy, i); j++) XVECEXP (copy, i, j) = copy_most_rtx (XVECEXP (orig, i, j), may_share); } break; case 'w': XWINT (copy, i) = XWINT (orig, i); break; case 'n': case 'i': XINT (copy, i) = XINT (orig, i); break; case 't': XTREE (copy, i) = XTREE (orig, i); break; case 's': case 'S': XSTR (copy, i) = XSTR (orig, i); break; case '0': X0ANY (copy, i) = X0ANY (orig, i); break; default: abort (); } } return copy; } /* Mark ORIG as in use, and return a copy of it if it was already in use. Recursively does the same for subexpressions. Uses copy_rtx_if_shared_1 to reduce stack space. */ rtx copy_rtx_if_shared (rtx orig) { copy_rtx_if_shared_1 (&orig); return orig; } /* Mark *ORIG1 as in use, and set it to a copy of it if it was already in use. Recursively does the same for subexpressions. */ static void copy_rtx_if_shared_1 (rtx *orig1) { rtx x; int i; enum rtx_code code; rtx *last_ptr; const char *format_ptr; int copied = 0; int length; /* Repeat is used to turn tail-recursion into iteration. */ repeat: x = *orig1; if (x == 0) return; code = GET_CODE (x); /* These types may be freely shared. */ switch (code) { case REG: case QUEUED: case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case SYMBOL_REF: case LABEL_REF: case CODE_LABEL: case PC: case CC0: case SCRATCH: /* SCRATCH must be shared because they represent distinct values. */ return; case CLOBBER: if (REG_P (XEXP (x, 0)) && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER) return; break; case CONST: /* CONST can be shared if it contains a SYMBOL_REF. If it contains a LABEL_REF, it isn't sharable. */ if (GET_CODE (XEXP (x, 0)) == PLUS && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT) return; break; case INSN: case JUMP_INSN: case CALL_INSN: case NOTE: case BARRIER: /* The chain of insns is not being copied. */ return; default: break; } /* This rtx may not be shared. If it has already been seen, replace it with a copy of itself. */ if (RTX_FLAG (x, used)) { rtx copy; copy = rtx_alloc (code); memcpy (copy, x, RTX_SIZE (code)); x = copy; copied = 1; } RTX_FLAG (x, used) = 1; /* Now scan the subexpressions recursively. We can store any replaced subexpressions directly into X since we know X is not shared! Any vectors in X must be copied if X was copied. */ format_ptr = GET_RTX_FORMAT (code); length = GET_RTX_LENGTH (code); last_ptr = NULL; for (i = 0; i < length; i++) { switch (*format_ptr++) { case 'e': if (last_ptr) copy_rtx_if_shared_1 (last_ptr); last_ptr = &XEXP (x, i); break; case 'E': if (XVEC (x, i) != NULL) { int j; int len = XVECLEN (x, i); /* Copy the vector iff I copied the rtx and the length is nonzero. */ if (copied && len > 0) XVEC (x, i) = gen_rtvec_v (len, XVEC (x, i)->elem); /* Call recursively on all inside the vector. */ for (j = 0; j < len; j++) { if (last_ptr) copy_rtx_if_shared_1 (last_ptr); last_ptr = &XVECEXP (x, i, j); } } break; } } *orig1 = x; if (last_ptr) { orig1 = last_ptr; goto repeat; } return; } /* Clear all the USED bits in X to allow copy_rtx_if_shared to be used to look for shared sub-parts. */ void reset_used_flags (rtx x) { int i, j; enum rtx_code code; const char *format_ptr; int length; /* Repeat is used to turn tail-recursion into iteration. */ repeat: if (x == 0) return; code = GET_CODE (x); /* These types may be freely shared so we needn't do any resetting for them. */ switch (code) { case REG: case QUEUED: case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case SYMBOL_REF: case CODE_LABEL: case PC: case CC0: return; case INSN: case JUMP_INSN: case CALL_INSN: case NOTE: case LABEL_REF: case BARRIER: /* The chain of insns is not being copied. */ return; default: break; } RTX_FLAG (x, used) = 0; format_ptr = GET_RTX_FORMAT (code); length = GET_RTX_LENGTH (code); for (i = 0; i < length; i++) { switch (*format_ptr++) { case 'e': if (i == length-1) { x = XEXP (x, i); goto repeat; } reset_used_flags (XEXP (x, i)); break; case 'E': for (j = 0; j < XVECLEN (x, i); j++) reset_used_flags (XVECEXP (x, i, j)); break; } } } /* Set all the USED bits in X to allow copy_rtx_if_shared to be used to look for shared sub-parts. */ void set_used_flags (rtx x) { int i, j; enum rtx_code code; const char *format_ptr; if (x == 0) return; code = GET_CODE (x); /* These types may be freely shared so we needn't do any resetting for them. */ switch (code) { case REG: case QUEUED: case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case SYMBOL_REF: case CODE_LABEL: case PC: case CC0: return; case INSN: case JUMP_INSN: case CALL_INSN: case NOTE: case LABEL_REF: case BARRIER: /* The chain of insns is not being copied. */ return; default: break; } RTX_FLAG (x, used) = 1; format_ptr = GET_RTX_FORMAT (code); for (i = 0; i < GET_RTX_LENGTH (code); i++) { switch (*format_ptr++) { case 'e': set_used_flags (XEXP (x, i)); break; case 'E': for (j = 0; j < XVECLEN (x, i); j++) set_used_flags (XVECEXP (x, i, j)); break; } } } /* Copy X if necessary so that it won't be altered by changes in OTHER. Return X or the rtx for the pseudo reg the value of X was copied into. OTHER must be valid as a SET_DEST. */ rtx make_safe_from (rtx x, rtx other) { while (1) switch (GET_CODE (other)) { case SUBREG: other = SUBREG_REG (other); break; case STRICT_LOW_PART: case SIGN_EXTEND: case ZERO_EXTEND: other = XEXP (other, 0); break; default: goto done; } done: if ((MEM_P (other) && ! CONSTANT_P (x) && !REG_P (x) && GET_CODE (x) != SUBREG) || (REG_P (other) && (REGNO (other) < FIRST_PSEUDO_REGISTER || reg_mentioned_p (other, x)))) { rtx temp = gen_reg_rtx (GET_MODE (x)); emit_move_insn (temp, x); return temp; } return x; } /* Emission of insns (adding them to the doubly-linked list). */ /* Return the first insn of the current sequence or current function. */ rtx get_insns (void) { return first_insn; } /* Specify a new insn as the first in the chain. */ void set_first_insn (rtx insn) { if (PREV_INSN (insn) != 0) abort (); first_insn = insn; } /* Return the last insn emitted in current sequence or current function. */ rtx get_last_insn (void) { return last_insn; } /* Specify a new insn as the last in the chain. */ void set_last_insn (rtx insn) { if (NEXT_INSN (insn) != 0) abort (); last_insn = insn; } /* Return the last insn emitted, even if it is in a sequence now pushed. */ rtx get_last_insn_anywhere (void) { struct sequence_stack *stack; if (last_insn) return last_insn; for (stack = seq_stack; stack; stack = stack->next) if (stack->last != 0) return stack->last; return 0; } /* Return the first nonnote insn emitted in current sequence or current function. This routine looks inside SEQUENCEs. */ rtx get_first_nonnote_insn (void) { rtx insn = first_insn; while (insn) { insn = next_insn (insn); if (insn == 0 || GET_CODE (insn) != NOTE) break; } return insn; } /* Return the last nonnote insn emitted in current sequence or current function. This routine looks inside SEQUENCEs. */ rtx get_last_nonnote_insn (void) { rtx insn = last_insn; while (insn) { insn = previous_insn (insn); if (insn == 0 || GET_CODE (insn) != NOTE) break; } return insn; } /* Return a number larger than any instruction's uid in this function. */ int get_max_uid (void) { return cur_insn_uid; } /* Renumber instructions so that no instruction UIDs are wasted. */ void renumber_insns (FILE *stream) { rtx insn; /* If we're not supposed to renumber instructions, don't. */ if (!flag_renumber_insns) return; /* If there aren't that many instructions, then it's not really worth renumbering them. */ if (flag_renumber_insns == 1 && get_max_uid () < 25000) return; cur_insn_uid = 1; for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) { if (stream) fprintf (stream, "Renumbering insn %d to %d\n", INSN_UID (insn), cur_insn_uid); INSN_UID (insn) = cur_insn_uid++; } } /* Return the next insn. If it is a SEQUENCE, return the first insn of the sequence. */ rtx next_insn (rtx insn) { if (insn) { insn = NEXT_INSN (insn); if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE) insn = XVECEXP (PATTERN (insn), 0, 0); } return insn; } /* Return the previous insn. If it is a SEQUENCE, return the last insn of the sequence. */ rtx previous_insn (rtx insn) { if (insn) { insn = PREV_INSN (insn); if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE) insn = XVECEXP (PATTERN (insn), 0, XVECLEN (PATTERN (insn), 0) - 1); } return insn; } /* Return the next insn after INSN that is not a NOTE. This routine does not look inside SEQUENCEs. */ rtx next_nonnote_insn (rtx insn) { while (insn) { insn = NEXT_INSN (insn); if (insn == 0 || GET_CODE (insn) != NOTE) break; } return insn; } /* Return the previous insn before INSN that is not a NOTE. This routine does not look inside SEQUENCEs. */ rtx prev_nonnote_insn (rtx insn) { while (insn) { insn = PREV_INSN (insn); if (insn == 0 || GET_CODE (insn) != NOTE) break; } return insn; } /* Return the next INSN, CALL_INSN or JUMP_INSN after INSN; or 0, if there is none. This routine does not look inside SEQUENCEs. */ rtx next_real_insn (rtx insn) { while (insn) { insn = NEXT_INSN (insn); if (insn == 0 || INSN_P (insn)) break; } return insn; } /* Return the last INSN, CALL_INSN or JUMP_INSN before INSN; or 0, if there is none. This routine does not look inside SEQUENCEs. */ rtx prev_real_insn (rtx insn) { while (insn) { insn = PREV_INSN (insn); if (insn == 0 || INSN_P (insn)) break; } return insn; } /* Return the last CALL_INSN in the current list, or 0 if there is none. This routine does not look inside SEQUENCEs. */ rtx last_call_insn (void) { rtx insn; for (insn = get_last_insn (); insn && GET_CODE (insn) != CALL_INSN; insn = PREV_INSN (insn)) ; return insn; } /* Find the next insn after INSN that really does something. This routine does not look inside SEQUENCEs. Until reload has completed, this is the same as next_real_insn. */ int active_insn_p (rtx insn) { return (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN || (GET_CODE (insn) == INSN && (! reload_completed || (GET_CODE (PATTERN (insn)) != USE && GET_CODE (PATTERN (insn)) != CLOBBER)))); } rtx next_active_insn (rtx insn) { while (insn) { insn = NEXT_INSN (insn); if (insn == 0 || active_insn_p (insn)) break; } return insn; } /* Find the last insn before INSN that really does something. This routine does not look inside SEQUENCEs. Until reload has completed, this is the same as prev_real_insn. */ rtx prev_active_insn (rtx insn) { while (insn) { insn = PREV_INSN (insn); if (insn == 0 || active_insn_p (insn)) break; } return insn; } /* Return the next CODE_LABEL after the insn INSN, or 0 if there is none. */ rtx next_label (rtx insn) { while (insn) { insn = NEXT_INSN (insn); if (insn == 0 || GET_CODE (insn) == CODE_LABEL) break; } return insn; } /* Return the last CODE_LABEL before the insn INSN, or 0 if there is none. */ rtx prev_label (rtx insn) { while (insn) { insn = PREV_INSN (insn); if (insn == 0 || GET_CODE (insn) == CODE_LABEL) break; } return insn; } /* Return the last label to mark the same position as LABEL. Return null if LABEL itself is null. */ rtx skip_consecutive_labels (rtx label) { rtx insn; for (insn = label; insn != 0 && !INSN_P (insn); insn = NEXT_INSN (insn)) if (LABEL_P (insn)) label = insn; return label; } #ifdef HAVE_cc0 /* INSN uses CC0 and is being moved into a delay slot. Set up REG_CC_SETTER and REG_CC_USER notes so we can find it. */ void link_cc0_insns (rtx insn) { rtx user = next_nonnote_insn (insn); if (GET_CODE (user) == INSN && GET_CODE (PATTERN (user)) == SEQUENCE) user = XVECEXP (PATTERN (user), 0, 0); REG_NOTES (user) = gen_rtx_INSN_LIST (REG_CC_SETTER, insn, REG_NOTES (user)); REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_CC_USER, user, REG_NOTES (insn)); } /* Return the next insn that uses CC0 after INSN, which is assumed to set it. This is the inverse of prev_cc0_setter (i.e., prev_cc0_setter applied to the result of this function should yield INSN). Normally, this is simply the next insn. However, if a REG_CC_USER note is present, it contains the insn that uses CC0. Return 0 if we can't find the insn. */ rtx next_cc0_user (rtx insn) { rtx note = find_reg_note (insn, REG_CC_USER, NULL_RTX); if (note) return XEXP (note, 0); insn = next_nonnote_insn (insn); if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE) insn = XVECEXP (PATTERN (insn), 0, 0); if (insn && INSN_P (insn) && reg_mentioned_p (cc0_rtx, PATTERN (insn))) return insn; return 0; } /* Find the insn that set CC0 for INSN. Unless INSN has a REG_CC_SETTER note, it is the previous insn. */ rtx prev_cc0_setter (rtx insn) { rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX); if (note) return XEXP (note, 0); insn = prev_nonnote_insn (insn); if (! sets_cc0_p (PATTERN (insn))) abort (); return insn; } #endif /* Increment the label uses for all labels present in rtx. */ static void mark_label_nuses (rtx x) { enum rtx_code code; int i, j; const char *fmt; code = GET_CODE (x); if (code == LABEL_REF && LABEL_P (XEXP (x, 0))) LABEL_NUSES (XEXP (x, 0))++; fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') mark_label_nuses (XEXP (x, i)); else if (fmt[i] == 'E') for (j = XVECLEN (x, i) - 1; j >= 0; j--) mark_label_nuses (XVECEXP (x, i, j)); } } /* Try splitting insns that can be split for better scheduling. PAT is the pattern which might split. TRIAL is the insn providing PAT. LAST is nonzero if we should return the last insn of the sequence produced. If this routine succeeds in splitting, it returns the first or last replacement insn depending on the value of LAST. Otherwise, it returns TRIAL. If the insn to be returned can be split, it will be. */ rtx try_split (rtx pat, rtx trial, int last) { rtx before = PREV_INSN (trial); rtx after = NEXT_INSN (trial); int has_barrier = 0; rtx tem; rtx note, seq; int probability; rtx insn_last, insn; int njumps = 0; if (any_condjump_p (trial) && (note = find_reg_note (trial, REG_BR_PROB, 0))) split_branch_probability = INTVAL (XEXP (note, 0)); probability = split_branch_probability; seq = split_insns (pat, trial); split_branch_probability = -1; /* If we are splitting a JUMP_INSN, it might be followed by a BARRIER. We may need to handle this specially. */ if (after && GET_CODE (after) == BARRIER) { has_barrier = 1; after = NEXT_INSN (after); } if (!seq) return trial; /* Avoid infinite loop if any insn of the result matches the original pattern. */ insn_last = seq; while (1) { if (INSN_P (insn_last) && rtx_equal_p (PATTERN (insn_last), pat)) return trial; if (!NEXT_INSN (insn_last)) break; insn_last = NEXT_INSN (insn_last); } /* Mark labels. */ for (insn = insn_last; insn ; insn = PREV_INSN (insn)) { if (GET_CODE (insn) == JUMP_INSN) { mark_jump_label (PATTERN (insn), insn, 0); njumps++; if (probability != -1 && any_condjump_p (insn) && !find_reg_note (insn, REG_BR_PROB, 0)) { /* We can preserve the REG_BR_PROB notes only if exactly one jump is created, otherwise the machine description is responsible for this step using split_branch_probability variable. */ if (njumps != 1) abort (); REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_BR_PROB, GEN_INT (probability), REG_NOTES (insn)); } } } /* If we are splitting a CALL_INSN, look for the CALL_INSN in SEQ and copy our CALL_INSN_FUNCTION_USAGE to it. */ if (GET_CODE (trial) == CALL_INSN) { for (insn = insn_last; insn ; insn = PREV_INSN (insn)) if (GET_CODE (insn) == CALL_INSN) { rtx *p = &CALL_INSN_FUNCTION_USAGE (insn); while (*p) p = &XEXP (*p, 1); *p = CALL_INSN_FUNCTION_USAGE (trial); SIBLING_CALL_P (insn) = SIBLING_CALL_P (trial); } } /* Copy notes, particularly those related to the CFG. */ for (note = REG_NOTES (trial); note; note = XEXP (note, 1)) { switch (REG_NOTE_KIND (note)) { case REG_EH_REGION: insn = insn_last; while (insn != NULL_RTX) { if (GET_CODE (insn) == CALL_INSN || (flag_non_call_exceptions && may_trap_p (PATTERN (insn)))) REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, XEXP (note, 0), REG_NOTES (insn)); insn = PREV_INSN (insn); } break; case REG_NORETURN: case REG_SETJMP: case REG_ALWAYS_RETURN: insn = insn_last; while (insn != NULL_RTX) { if (GET_CODE (insn) == CALL_INSN) REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NOTE_KIND (note), XEXP (note, 0), REG_NOTES (insn)); insn = PREV_INSN (insn); } break; case REG_NON_LOCAL_GOTO: insn = insn_last; while (insn != NULL_RTX) { if (GET_CODE (insn) == JUMP_INSN) REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NOTE_KIND (note), XEXP (note, 0), REG_NOTES (insn)); insn = PREV_INSN (insn); } break; default: break; } } /* If there are LABELS inside the split insns increment the usage count so we don't delete the label. */ if (GET_CODE (trial) == INSN) { insn = insn_last; while (insn != NULL_RTX) { if (GET_CODE (insn) == INSN) mark_label_nuses (PATTERN (insn)); insn = PREV_INSN (insn); } } tem = emit_insn_after_setloc (seq, trial, INSN_LOCATOR (trial)); delete_insn (trial); if (has_barrier) emit_barrier_after (tem); /* Recursively call try_split for each new insn created; by the time control returns here that insn will be fully split, so set LAST and continue from the insn after the one returned. We can't use next_active_insn here since AFTER may be a note. Ignore deleted insns, which can be occur if not optimizing. */ for (tem = NEXT_INSN (before); tem != after; tem = NEXT_INSN (tem)) if (! INSN_DELETED_P (tem) && INSN_P (tem)) tem = try_split (PATTERN (tem), tem, 1); /* Return either the first or the last insn, depending on which was requested. */ return last ? (after ? PREV_INSN (after) : last_insn) : NEXT_INSN (before); } /* Make and return an INSN rtx, initializing all its slots. Store PATTERN in the pattern slots. */ rtx make_insn_raw (rtx pattern) { rtx insn; insn = rtx_alloc (INSN); INSN_UID (insn) = cur_insn_uid++; PATTERN (insn) = pattern; INSN_CODE (insn) = -1; LOG_LINKS (insn) = NULL; REG_NOTES (insn) = NULL; INSN_LOCATOR (insn) = 0; BLOCK_FOR_INSN (insn) = NULL; #ifdef ENABLE_RTL_CHECKING if (insn && INSN_P (insn) && (returnjump_p (insn) || (GET_CODE (insn) == SET && SET_DEST (insn) == pc_rtx))) { warning ("ICE: emit_insn used where emit_jump_insn needed:\n"); debug_rtx (insn); } #endif return insn; } /* Like `make_insn_raw' but make a JUMP_INSN instead of an insn. */ static rtx make_jump_insn_raw (rtx pattern) { rtx insn; insn = rtx_alloc (JUMP_INSN); INSN_UID (insn) = cur_insn_uid++; PATTERN (insn) = pattern; INSN_CODE (insn) = -1; LOG_LINKS (insn) = NULL; REG_NOTES (insn) = NULL; JUMP_LABEL (insn) = NULL; INSN_LOCATOR (insn) = 0; BLOCK_FOR_INSN (insn) = NULL; return insn; } /* Like `make_insn_raw' but make a CALL_INSN instead of an insn. */ static rtx make_call_insn_raw (rtx pattern) { rtx insn; insn = rtx_alloc (CALL_INSN); INSN_UID (insn) = cur_insn_uid++; PATTERN (insn) = pattern; INSN_CODE (insn) = -1; LOG_LINKS (insn) = NULL; REG_NOTES (insn) = NULL; CALL_INSN_FUNCTION_USAGE (insn) = NULL; INSN_LOCATOR (insn) = 0; BLOCK_FOR_INSN (insn) = NULL; return insn; } /* Add INSN to the end of the doubly-linked list. INSN may be an INSN, JUMP_INSN, CALL_INSN, CODE_LABEL, BARRIER or NOTE. */ void add_insn (rtx insn) { PREV_INSN (insn) = last_insn; NEXT_INSN (insn) = 0; if (NULL != last_insn) NEXT_INSN (last_insn) = insn; if (NULL == first_insn) first_insn = insn; last_insn = insn; } /* Add INSN into the doubly-linked list after insn AFTER. This and the next should be the only functions called to insert an insn once delay slots have been filled since only they know how to update a SEQUENCE. */ void add_insn_after (rtx insn, rtx after) { rtx next = NEXT_INSN (after); basic_block bb; if (optimize && INSN_DELETED_P (after)) abort (); NEXT_INSN (insn) = next; PREV_INSN (insn) = after; if (next) { PREV_INSN (next) = insn; if (GET_CODE (next) == INSN && GET_CODE (PATTERN (next)) == SEQUENCE) PREV_INSN (XVECEXP (PATTERN (next), 0, 0)) = insn; } else if (last_insn == after) last_insn = insn; else { struct sequence_stack *stack = seq_stack; /* Scan all pending sequences too. */ for (; stack; stack = stack->next) if (after == stack->last) { stack->last = insn; break; } if (stack == 0) abort (); } if (GET_CODE (after) != BARRIER && GET_CODE (insn) != BARRIER && (bb = BLOCK_FOR_INSN (after))) { set_block_for_insn (insn, bb); if (INSN_P (insn)) bb->flags |= BB_DIRTY; /* Should not happen as first in the BB is always either NOTE or LABEL. */ if (BB_END (bb) == after /* Avoid clobbering of structure when creating new BB. */ && GET_CODE (insn) != BARRIER && (GET_CODE (insn) != NOTE || NOTE_LINE_NUMBER (insn) != NOTE_INSN_BASIC_BLOCK)) BB_END (bb) = insn; } NEXT_INSN (after) = insn; if (GET_CODE (after) == INSN && GET_CODE (PATTERN (after)) == SEQUENCE) { rtx sequence = PATTERN (after); NEXT_INSN (XVECEXP (sequence, 0, XVECLEN (sequence, 0) - 1)) = insn; } } /* Add INSN into the doubly-linked list before insn BEFORE. This and the previous should be the only functions called to insert an insn once delay slots have been filled since only they know how to update a SEQUENCE. */ void add_insn_before (rtx insn, rtx before) { rtx prev = PREV_INSN (before); basic_block bb; if (optimize && INSN_DELETED_P (before)) abort (); PREV_INSN (insn) = prev; NEXT_INSN (insn) = before; if (prev) { NEXT_INSN (prev) = insn; if (GET_CODE (prev) == INSN && GET_CODE (PATTERN (prev)) == SEQUENCE) { rtx sequence = PATTERN (prev); NEXT_INSN (XVECEXP (sequence, 0, XVECLEN (sequence, 0) - 1)) = insn; } } else if (first_insn == before) first_insn = insn; else { struct sequence_stack *stack = seq_stack; /* Scan all pending sequences too. */ for (; stack; stack = stack->next) if (before == stack->first) { stack->first = insn; break; } if (stack == 0) abort (); } if (GET_CODE (before) != BARRIER && GET_CODE (insn) != BARRIER && (bb = BLOCK_FOR_INSN (before))) { set_block_for_insn (insn, bb); if (INSN_P (insn)) bb->flags |= BB_DIRTY; /* Should not happen as first in the BB is always either NOTE or LABEl. */ if (BB_HEAD (bb) == insn /* Avoid clobbering of structure when creating new BB. */ && GET_CODE (insn) != BARRIER && (GET_CODE (insn) != NOTE || NOTE_LINE_NUMBER (insn) != NOTE_INSN_BASIC_BLOCK)) abort (); } PREV_INSN (before) = insn; if (GET_CODE (before) == INSN && GET_CODE (PATTERN (before)) == SEQUENCE) PREV_INSN (XVECEXP (PATTERN (before), 0, 0)) = insn; } /* Remove an insn from its doubly-linked list. This function knows how to handle sequences. */ void remove_insn (rtx insn) { rtx next = NEXT_INSN (insn); rtx prev = PREV_INSN (insn); basic_block bb; if (prev) { NEXT_INSN (prev) = next; if (GET_CODE (prev) == INSN && GET_CODE (PATTERN (prev)) == SEQUENCE) { rtx sequence = PATTERN (prev); NEXT_INSN (XVECEXP (sequence, 0, XVECLEN (sequence, 0) - 1)) = next; } } else if (first_insn == insn) first_insn = next; else { struct sequence_stack *stack = seq_stack; /* Scan all pending sequences too. */ for (; stack; stack = stack->next) if (insn == stack->first) { stack->first = next; break; } if (stack == 0) abort (); } if (next) { PREV_INSN (next) = prev; if (GET_CODE (next) == INSN && GET_CODE (PATTERN (next)) == SEQUENCE) PREV_INSN (XVECEXP (PATTERN (next), 0, 0)) = prev; } else if (last_insn == insn) last_insn = prev; else { struct sequence_stack *stack = seq_stack; /* Scan all pending sequences too. */ for (; stack; stack = stack->next) if (insn == stack->last) { stack->last = prev; break; } if (stack == 0) abort (); } if (GET_CODE (insn) != BARRIER && (bb = BLOCK_FOR_INSN (insn))) { if (INSN_P (insn)) bb->flags |= BB_DIRTY; if (BB_HEAD (bb) == insn) { /* Never ever delete the basic block note without deleting whole basic block. */ if (GET_CODE (insn) == NOTE) abort (); BB_HEAD (bb) = next; } if (BB_END (bb) == insn) BB_END (bb) = prev; } } /* Append CALL_FUSAGE to the CALL_INSN_FUNCTION_USAGE for CALL_INSN. */ void add_function_usage_to (rtx call_insn, rtx call_fusage) { if (! call_insn || GET_CODE (call_insn) != CALL_INSN) abort (); /* Put the register usage information on the CALL. If there is already some usage information, put ours at the end. */ if (CALL_INSN_FUNCTION_USAGE (call_insn)) { rtx link; for (link = CALL_INSN_FUNCTION_USAGE (call_insn); XEXP (link, 1) != 0; link = XEXP (link, 1)) ; XEXP (link, 1) = call_fusage; } else CALL_INSN_FUNCTION_USAGE (call_insn) = call_fusage; } /* Delete all insns made since FROM. FROM becomes the new last instruction. */ void delete_insns_since (rtx from) { if (from == 0) first_insn = 0; else NEXT_INSN (from) = 0; last_insn = from; } /* This function is deprecated, please use sequences instead. Move a consecutive bunch of insns to a different place in the chain. The insns to be moved are those between FROM and TO. They are moved to a new position after the insn AFTER. AFTER must not be FROM or TO or any insn in between. This function does not know about SEQUENCEs and hence should not be called after delay-slot filling has been done. */ void reorder_insns_nobb (rtx from, rtx to, rtx after) { /* Splice this bunch out of where it is now. */ if (PREV_INSN (from)) NEXT_INSN (PREV_INSN (from)) = NEXT_INSN (to); if (NEXT_INSN (to)) PREV_INSN (NEXT_INSN (to)) = PREV_INSN (from); if (last_insn == to) last_insn = PREV_INSN (from); if (first_insn == from) first_insn = NEXT_INSN (to); /* Make the new neighbors point to it and it to them. */ if (NEXT_INSN (after)) PREV_INSN (NEXT_INSN (after)) = to; NEXT_INSN (to) = NEXT_INSN (after); PREV_INSN (from) = after; NEXT_INSN (after) = from; if (after == last_insn) last_insn = to; } /* Same as function above, but take care to update BB boundaries. */ void reorder_insns (rtx from, rtx to, rtx after) { rtx prev = PREV_INSN (from); basic_block bb, bb2; reorder_insns_nobb (from, to, after); if (GET_CODE (after) != BARRIER && (bb = BLOCK_FOR_INSN (after))) { rtx x; bb->flags |= BB_DIRTY; if (GET_CODE (from) != BARRIER && (bb2 = BLOCK_FOR_INSN (from))) { if (BB_END (bb2) == to) BB_END (bb2) = prev; bb2->flags |= BB_DIRTY; } if (BB_END (bb) == after) BB_END (bb) = to; for (x = from; x != NEXT_INSN (to); x = NEXT_INSN (x)) set_block_for_insn (x, bb); } } /* Return the line note insn preceding INSN. */ static rtx find_line_note_emit (rtx insn) { if (no_line_numbers) return 0; for (; insn; insn = PREV_INSN (insn)) if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) >= 0) break; return insn; } /* Remove unnecessary notes from the instruction stream. */ void remove_unnecessary_notes (void) { rtx block_stack = NULL_RTX; rtx eh_stack = NULL_RTX; rtx insn; rtx next; rtx tmp; /* We must not remove the first instruction in the function because the compiler depends on the first instruction being a note. */ for (insn = NEXT_INSN (get_insns ()); insn; insn = next) { /* Remember what's next. */ next = NEXT_INSN (insn); /* We're only interested in notes. */ if (GET_CODE (insn) != NOTE) continue; switch (NOTE_LINE_NUMBER (insn)) { case NOTE_INSN_DELETED: case NOTE_INSN_LOOP_END_TOP_COND: remove_insn (insn); break; case NOTE_INSN_EH_REGION_BEG: eh_stack = alloc_INSN_LIST (insn, eh_stack); break; case NOTE_INSN_EH_REGION_END: /* Too many end notes. */ if (eh_stack == NULL_RTX) abort (); /* Mismatched nesting. */ if (NOTE_EH_HANDLER (XEXP (eh_stack, 0)) != NOTE_EH_HANDLER (insn)) abort (); tmp = eh_stack; eh_stack = XEXP (eh_stack, 1); free_INSN_LIST_node (tmp); break; case NOTE_INSN_BLOCK_BEG: /* By now, all notes indicating lexical blocks should have NOTE_BLOCK filled in. */ if (NOTE_BLOCK (insn) == NULL_TREE) abort (); block_stack = alloc_INSN_LIST (insn, block_stack); break; case NOTE_INSN_BLOCK_END: /* Too many end notes. */ if (block_stack == NULL_RTX) abort (); /* Mismatched nesting. */ if (NOTE_BLOCK (XEXP (block_stack, 0)) != NOTE_BLOCK (insn)) abort (); tmp = block_stack; block_stack = XEXP (block_stack, 1); free_INSN_LIST_node (tmp); /* Scan back to see if there are any non-note instructions between INSN and the beginning of this block. If not, then there is no PC range in the generated code that will actually be in this block, so there's no point in remembering the existence of the block. */ for (tmp = PREV_INSN (insn); tmp; tmp = PREV_INSN (tmp)) { /* This block contains a real instruction. Note that we don't include labels; if the only thing in the block is a label, then there are still no PC values that lie within the block. */ if (INSN_P (tmp)) break; /* We're only interested in NOTEs. */ if (GET_CODE (tmp) != NOTE) continue; if (NOTE_LINE_NUMBER (tmp) == NOTE_INSN_BLOCK_BEG) { /* We just verified that this BLOCK matches us with the block_stack check above. Never delete the BLOCK for the outermost scope of the function; we can refer to names from that scope even if the block notes are messed up. */ if (! is_body_block (NOTE_BLOCK (insn)) && (*debug_hooks->ignore_block) (NOTE_BLOCK (insn))) { remove_insn (tmp); remove_insn (insn); } break; } else if (NOTE_LINE_NUMBER (tmp) == NOTE_INSN_BLOCK_END) /* There's a nested block. We need to leave the current block in place since otherwise the debugger wouldn't be able to show symbols from our block in the nested block. */ break; } } } /* Too many begin notes. */ if (block_stack || eh_stack) abort (); } /* Emit insn(s) of given code and pattern at a specified place within the doubly-linked list. All of the emit_foo global entry points accept an object X which is either an insn list or a PATTERN of a single instruction. There are thus a few canonical ways to generate code and emit it at a specific place in the instruction stream. For example, consider the instruction named SPOT and the fact that we would like to emit some instructions before SPOT. We might do it like this: start_sequence (); ... emit the new instructions ... insns_head = get_insns (); end_sequence (); emit_insn_before (insns_head, SPOT); It used to be common to generate SEQUENCE rtl instead, but that is a relic of the past which no longer occurs. The reason is that SEQUENCE rtl results in much fragmented RTL memory since the SEQUENCE generated would almost certainly die right after it was created. */ /* Make X be output before the instruction BEFORE. */ rtx emit_insn_before (rtx x, rtx before) { rtx last = before; rtx insn; #ifdef ENABLE_RTL_CHECKING if (before == NULL_RTX) abort (); #endif if (x == NULL_RTX) return last; switch (GET_CODE (x)) { case INSN: case JUMP_INSN: case CALL_INSN: case CODE_LABEL: case BARRIER: case NOTE: insn = x; while (insn) { rtx next = NEXT_INSN (insn); add_insn_before (insn, before); last = insn; insn = next; } break; #ifdef ENABLE_RTL_CHECKING case SEQUENCE: abort (); break; #endif default: last = make_insn_raw (x); add_insn_before (last, before); break; } return last; } /* Make an instruction with body X and code JUMP_INSN and output it before the instruction BEFORE. */ rtx emit_jump_insn_before (rtx x, rtx before) { rtx insn, last = NULL_RTX; #ifdef ENABLE_RTL_CHECKING if (before == NULL_RTX) abort (); #endif switch (GET_CODE (x)) { case INSN: case JUMP_INSN: case CALL_INSN: case CODE_LABEL: case BARRIER: case NOTE: insn = x; while (insn) { rtx next = NEXT_INSN (insn); add_insn_before (insn, before); last = insn; insn = next; } break; #ifdef ENABLE_RTL_CHECKING case SEQUENCE: abort (); break; #endif default: last = make_jump_insn_raw (x); add_insn_before (last, before); break; } return last; } /* Make an instruction with body X and code CALL_INSN and output it before the instruction BEFORE. */ rtx emit_call_insn_before (rtx x, rtx before) { rtx last = NULL_RTX, insn; #ifdef ENABLE_RTL_CHECKING if (before == NULL_RTX) abort (); #endif switch (GET_CODE (x)) { case INSN: case JUMP_INSN: case CALL_INSN: case CODE_LABEL: case BARRIER: case NOTE: insn = x; while (insn) { rtx next = NEXT_INSN (insn); add_insn_before (insn, before); last = insn; insn = next; } break; #ifdef ENABLE_RTL_CHECKING case SEQUENCE: abort (); break; #endif default: last = make_call_insn_raw (x); add_insn_before (last, before); break; } return last; } /* Make an insn of code BARRIER and output it before the insn BEFORE. */ rtx emit_barrier_before (rtx before) { rtx insn = rtx_alloc (BARRIER); INSN_UID (insn) = cur_insn_uid++; add_insn_before (insn, before); return insn; } /* Emit the label LABEL before the insn BEFORE. */ rtx emit_label_before (rtx label, rtx before) { /* This can be called twice for the same label as a result of the confusion that follows a syntax error! So make it harmless. */ if (INSN_UID (label) == 0) { INSN_UID (label) = cur_insn_uid++; add_insn_before (label, before); } return label; } /* Emit a note of subtype SUBTYPE before the insn BEFORE. */ rtx emit_note_before (int subtype, rtx before) { rtx note = rtx_alloc (NOTE); INSN_UID (note) = cur_insn_uid++; #ifndef USE_MAPPED_LOCATION NOTE_SOURCE_FILE (note) = 0; #endif NOTE_LINE_NUMBER (note) = subtype; BLOCK_FOR_INSN (note) = NULL; add_insn_before (note, before); return note; } /* Helper for emit_insn_after, handles lists of instructions efficiently. */ static rtx emit_insn_after_1 (rtx, rtx); static rtx emit_insn_after_1 (rtx first, rtx after) { rtx last; rtx after_after; basic_block bb; if (GET_CODE (after) != BARRIER && (bb = BLOCK_FOR_INSN (after))) { bb->flags |= BB_DIRTY; for (last = first; NEXT_INSN (last); last = NEXT_INSN (last)) if (GET_CODE (last) != BARRIER) set_block_for_insn (last, bb); if (GET_CODE (last) != BARRIER) set_block_for_insn (last, bb); if (BB_END (bb) == after) BB_END (bb) = last; } else for (last = first; NEXT_INSN (last); last = NEXT_INSN (last)) continue; after_after = NEXT_INSN (after); NEXT_INSN (after) = first; PREV_INSN (first) = after; NEXT_INSN (last) = after_after; if (after_after) PREV_INSN (after_after) = last; if (after == last_insn) last_insn = last; return last; } /* Make X be output after the insn AFTER. */ rtx emit_insn_after (rtx x, rtx after) { rtx last = after; #ifdef ENABLE_RTL_CHECKING if (after == NULL_RTX) abort (); #endif if (x == NULL_RTX) return last; switch (GET_CODE (x)) { case INSN: case JUMP_INSN: case CALL_INSN: case CODE_LABEL: case BARRIER: case NOTE: last = emit_insn_after_1 (x, after); break; #ifdef ENABLE_RTL_CHECKING case SEQUENCE: abort (); break; #endif default: last = make_insn_raw (x); add_insn_after (last, after); break; } return last; } /* Similar to emit_insn_after, except that line notes are to be inserted so as to act as if this insn were at FROM. */ void emit_insn_after_with_line_notes (rtx x, rtx after, rtx from) { rtx from_line = find_line_note_emit (from); rtx after_line = find_line_note_emit (after); rtx insn = emit_insn_after (x, after); if (from_line) emit_note_copy_after (from_line, after); if (after_line) emit_note_copy_after (after_line, insn); } /* Make an insn of code JUMP_INSN with body X and output it after the insn AFTER. */ rtx emit_jump_insn_after (rtx x, rtx after) { rtx last; #ifdef ENABLE_RTL_CHECKING if (after == NULL_RTX) abort (); #endif switch (GET_CODE (x)) { case INSN: case JUMP_INSN: case CALL_INSN: case CODE_LABEL: case BARRIER: case NOTE: last = emit_insn_after_1 (x, after); break; #ifdef ENABLE_RTL_CHECKING case SEQUENCE: abort (); break; #endif default: last = make_jump_insn_raw (x); add_insn_after (last, after); break; } return last; } /* Make an instruction with body X and code CALL_INSN and output it after the instruction AFTER. */ rtx emit_call_insn_after (rtx x, rtx after) { rtx last; #ifdef ENABLE_RTL_CHECKING if (after == NULL_RTX) abort (); #endif switch (GET_CODE (x)) { case INSN: case JUMP_INSN: case CALL_INSN: case CODE_LABEL: case BARRIER: case NOTE: last = emit_insn_after_1 (x, after); break; #ifdef ENABLE_RTL_CHECKING case SEQUENCE: abort (); break; #endif default: last = make_call_insn_raw (x); add_insn_after (last, after); break; } return last; } /* Make an insn of code BARRIER and output it after the insn AFTER. */ rtx emit_barrier_after (rtx after) { rtx insn = rtx_alloc (BARRIER); INSN_UID (insn) = cur_insn_uid++; add_insn_after (insn, after); return insn; } /* Emit the label LABEL after the insn AFTER. */ rtx emit_label_after (rtx label, rtx after) { /* This can be called twice for the same label as a result of the confusion that follows a syntax error! So make it harmless. */ if (INSN_UID (label) == 0) { INSN_UID (label) = cur_insn_uid++; add_insn_after (label, after); } return label; } /* Emit a note of subtype SUBTYPE after the insn AFTER. */ rtx emit_note_after (int subtype, rtx after) { rtx note = rtx_alloc (NOTE); INSN_UID (note) = cur_insn_uid++; #ifndef USE_MAPPED_LOCATION NOTE_SOURCE_FILE (note) = 0; #endif NOTE_LINE_NUMBER (note) = subtype; BLOCK_FOR_INSN (note) = NULL; add_insn_after (note, after); return note; } /* Emit a copy of note ORIG after the insn AFTER. */ rtx emit_note_copy_after (rtx orig, rtx after) { rtx note; if (NOTE_LINE_NUMBER (orig) >= 0 && no_line_numbers) { cur_insn_uid++; return 0; } note = rtx_alloc (NOTE); INSN_UID (note) = cur_insn_uid++; NOTE_LINE_NUMBER (note) = NOTE_LINE_NUMBER (orig); NOTE_DATA (note) = NOTE_DATA (orig); BLOCK_FOR_INSN (note) = NULL; add_insn_after (note, after); return note; } /* Like emit_insn_after, but set INSN_LOCATOR according to SCOPE. */ rtx emit_insn_after_setloc (rtx pattern, rtx after, int loc) { rtx last = emit_insn_after (pattern, after); if (pattern == NULL_RTX) return last; after = NEXT_INSN (after); while (1) { if (active_insn_p (after)) INSN_LOCATOR (after) = loc; if (after == last) break; after = NEXT_INSN (after); } return last; } /* Like emit_jump_insn_after, but set INSN_LOCATOR according to SCOPE. */ rtx emit_jump_insn_after_setloc (rtx pattern, rtx after, int loc) { rtx last = emit_jump_insn_after (pattern, after); if (pattern == NULL_RTX) return last; after = NEXT_INSN (after); while (1) { if (active_insn_p (after)) INSN_LOCATOR (after) = loc; if (after == last) break; after = NEXT_INSN (after); } return last; } /* Like emit_call_insn_after, but set INSN_LOCATOR according to SCOPE. */ rtx emit_call_insn_after_setloc (rtx pattern, rtx after, int loc) { rtx last = emit_call_insn_after (pattern, after); if (pattern == NULL_RTX) return last; after = NEXT_INSN (after); while (1) { if (active_insn_p (after)) INSN_LOCATOR (after) = loc; if (after == last) break; after = NEXT_INSN (after); } return last; } /* Like emit_insn_before, but set INSN_LOCATOR according to SCOPE. */ rtx emit_insn_before_setloc (rtx pattern, rtx before, int loc) { rtx first = PREV_INSN (before); rtx last = emit_insn_before (pattern, before); if (pattern == NULL_RTX) return last; first = NEXT_INSN (first); while (1) { if (active_insn_p (first)) INSN_LOCATOR (first) = loc; if (first == last) break; first = NEXT_INSN (first); } return last; } /* Take X and emit it at the end of the doubly-linked INSN list. Returns the last insn emitted. */ rtx emit_insn (rtx x) { rtx last = last_insn; rtx insn; if (x == NULL_RTX) return last; switch (GET_CODE (x)) { case INSN: case JUMP_INSN: case CALL_INSN: case CODE_LABEL: case BARRIER: case NOTE: insn = x; while (insn) { rtx next = NEXT_INSN (insn); add_insn (insn); last = insn; insn = next; } break; #ifdef ENABLE_RTL_CHECKING case SEQUENCE: abort (); break; #endif default: last = make_insn_raw (x); add_insn (last); break; } return last; } /* Make an insn of code JUMP_INSN with pattern X and add it to the end of the doubly-linked list. */ rtx emit_jump_insn (rtx x) { rtx last = NULL_RTX, insn; switch (GET_CODE (x)) { case INSN: case JUMP_INSN: case CALL_INSN: case CODE_LABEL: case BARRIER: case NOTE: insn = x; while (insn) { rtx next = NEXT_INSN (insn); add_insn (insn); last = insn; insn = next; } break; #ifdef ENABLE_RTL_CHECKING case SEQUENCE: abort (); break; #endif default: last = make_jump_insn_raw (x); add_insn (last); break; } return last; } /* Make an insn of code CALL_INSN with pattern X and add it to the end of the doubly-linked list. */ rtx emit_call_insn (rtx x) { rtx insn; switch (GET_CODE (x)) { case INSN: case JUMP_INSN: case CALL_INSN: case CODE_LABEL: case BARRIER: case NOTE: insn = emit_insn (x); break; #ifdef ENABLE_RTL_CHECKING case SEQUENCE: abort (); break; #endif default: insn = make_call_insn_raw (x); add_insn (insn); break; } return insn; } /* Add the label LABEL to the end of the doubly-linked list. */ rtx emit_label (rtx label) { /* This can be called twice for the same label as a result of the confusion that follows a syntax error! So make it harmless. */ if (INSN_UID (label) == 0) { INSN_UID (label) = cur_insn_uid++; add_insn (label); } return label; } /* Make an insn of code BARRIER and add it to the end of the doubly-linked list. */ rtx emit_barrier (void) { rtx barrier = rtx_alloc (BARRIER); INSN_UID (barrier) = cur_insn_uid++; add_insn (barrier); return barrier; } /* Make line numbering NOTE insn for LOCATION add it to the end of the doubly-linked list, but only if line-numbers are desired for debugging info and it doesn't match the previous one. */ rtx emit_line_note (location_t location) { rtx note; set_file_and_line_for_stmt (location); #ifdef USE_MAPPED_LOCATION if (location == last_location) return NULL_RTX; #else if (location.file && last_location.file && !strcmp (location.file, last_location.file) && location.line == last_location.line) return NULL_RTX; #endif last_location = location; if (no_line_numbers) { cur_insn_uid++; return NULL_RTX; } #ifdef USE_MAPPED_LOCATION note = emit_note ((int) location); #else note = emit_note (location.line); NOTE_SOURCE_FILE (note) = location.file; #endif return note; } /* Emit a copy of note ORIG. */ rtx emit_note_copy (rtx orig) { rtx note; if (NOTE_LINE_NUMBER (orig) >= 0 && no_line_numbers) { cur_insn_uid++; return NULL_RTX; } note = rtx_alloc (NOTE); INSN_UID (note) = cur_insn_uid++; NOTE_DATA (note) = NOTE_DATA (orig); NOTE_LINE_NUMBER (note) = NOTE_LINE_NUMBER (orig); BLOCK_FOR_INSN (note) = NULL; add_insn (note); return note; } /* Make an insn of code NOTE or type NOTE_NO and add it to the end of the doubly-linked list. */ rtx emit_note (int note_no) { rtx note; note = rtx_alloc (NOTE); INSN_UID (note) = cur_insn_uid++; NOTE_LINE_NUMBER (note) = note_no; memset (&NOTE_DATA (note), 0, sizeof (NOTE_DATA (note))); BLOCK_FOR_INSN (note) = NULL; add_insn (note); return note; } /* Cause next statement to emit a line note even if the line number has not changed. */ void force_next_line_note (void) { #ifdef USE_MAPPED_LOCATION last_location = -1; #else last_location.line = -1; #endif } /* Place a note of KIND on insn INSN with DATUM as the datum. If a note of this type already exists, remove it first. */ rtx set_unique_reg_note (rtx insn, enum reg_note kind, rtx datum) { rtx note = find_reg_note (insn, kind, NULL_RTX); switch (kind) { case REG_EQUAL: case REG_EQUIV: /* Don't add REG_EQUAL/REG_EQUIV notes if the insn has multiple sets (some callers assume single_set means the insn only has one set, when in fact it means the insn only has one * useful * set). */ if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn)) { if (note) abort (); return NULL_RTX; } /* Don't add ASM_OPERAND REG_EQUAL/REG_EQUIV notes. It serves no useful purpose and breaks eliminate_regs. */ if (GET_CODE (datum) == ASM_OPERANDS) return NULL_RTX; break; default: break; } if (note) { XEXP (note, 0) = datum; return note; } REG_NOTES (insn) = gen_rtx_EXPR_LIST (kind, datum, REG_NOTES (insn)); return REG_NOTES (insn); } /* Return an indication of which type of insn should have X as a body. The value is CODE_LABEL, INSN, CALL_INSN or JUMP_INSN. */ enum rtx_code classify_insn (rtx x) { if (GET_CODE (x) == CODE_LABEL) return CODE_LABEL; if (GET_CODE (x) == CALL) return CALL_INSN; if (GET_CODE (x) == RETURN) return JUMP_INSN; if (GET_CODE (x) == SET) { if (SET_DEST (x) == pc_rtx) return JUMP_INSN; else if (GET_CODE (SET_SRC (x)) == CALL) return CALL_INSN; else return INSN; } if (GET_CODE (x) == PARALLEL) { int j; for (j = XVECLEN (x, 0) - 1; j >= 0; j--) if (GET_CODE (XVECEXP (x, 0, j)) == CALL) return CALL_INSN; else if (GET_CODE (XVECEXP (x, 0, j)) == SET && SET_DEST (XVECEXP (x, 0, j)) == pc_rtx) return JUMP_INSN; else if (GET_CODE (XVECEXP (x, 0, j)) == SET && GET_CODE (SET_SRC (XVECEXP (x, 0, j))) == CALL) return CALL_INSN; } return INSN; } /* Emit the rtl pattern X as an appropriate kind of insn. If X is a label, it is simply added into the insn chain. */ rtx emit (rtx x) { enum rtx_code code = classify_insn (x); if (code == CODE_LABEL) return emit_label (x); else if (code == INSN) return emit_insn (x); else if (code == JUMP_INSN) { rtx insn = emit_jump_insn (x); if (any_uncondjump_p (insn) || GET_CODE (x) == RETURN) return emit_barrier (); return insn; } else if (code == CALL_INSN) return emit_call_insn (x); else abort (); } /* Space for free sequence stack entries. */ static GTY ((deletable)) struct sequence_stack *free_sequence_stack; /* Begin emitting insns to a sequence. If this sequence will contain something that might cause the compiler to pop arguments to function calls (because those pops have previously been deferred; see INHIBIT_DEFER_POP for more details), use do_pending_stack_adjust before calling this function. That will ensure that the deferred pops are not accidentally emitted in the middle of this sequence. */ void start_sequence (void) { struct sequence_stack *tem; if (free_sequence_stack != NULL) { tem = free_sequence_stack; free_sequence_stack = tem->next; } else tem = ggc_alloc (sizeof (struct sequence_stack)); tem->next = seq_stack; tem->first = first_insn; tem->last = last_insn; seq_stack = tem; first_insn = 0; last_insn = 0; } /* Set up the insn chain starting with FIRST as the current sequence, saving the previously current one. See the documentation for start_sequence for more information about how to use this function. */ void push_to_sequence (rtx first) { rtx last; start_sequence (); for (last = first; last && NEXT_INSN (last); last = NEXT_INSN (last)); first_insn = first; last_insn = last; } /* Set up the insn chain from a chain stort in FIRST to LAST. */ void push_to_full_sequence (rtx first, rtx last) { start_sequence (); first_insn = first; last_insn = last; /* We really should have the end of the insn chain here. */ if (last && NEXT_INSN (last)) abort (); } /* Set up the outer-level insn chain as the current sequence, saving the previously current one. */ void push_topmost_sequence (void) { struct sequence_stack *stack, *top = NULL; start_sequence (); for (stack = seq_stack; stack; stack = stack->next) top = stack; first_insn = top->first; last_insn = top->last; } /* After emitting to the outer-level insn chain, update the outer-level insn chain, and restore the previous saved state. */ void pop_topmost_sequence (void) { struct sequence_stack *stack, *top = NULL; for (stack = seq_stack; stack; stack = stack->next) top = stack; top->first = first_insn; top->last = last_insn; end_sequence (); } /* After emitting to a sequence, restore previous saved state. To get the contents of the sequence just made, you must call `get_insns' *before* calling here. If the compiler might have deferred popping arguments while generating this sequence, and this sequence will not be immediately inserted into the instruction stream, use do_pending_stack_adjust before calling get_insns. That will ensure that the deferred pops are inserted into this sequence, and not into some random location in the instruction stream. See INHIBIT_DEFER_POP for more information about deferred popping of arguments. */ void end_sequence (void) { struct sequence_stack *tem = seq_stack; first_insn = tem->first; last_insn = tem->last; seq_stack = tem->next; memset (tem, 0, sizeof (*tem)); tem->next = free_sequence_stack; free_sequence_stack = tem; } /* Return 1 if currently emitting into a sequence. */ int in_sequence_p (void) { return seq_stack != 0; } /* Put the various virtual registers into REGNO_REG_RTX. */ void init_virtual_regs (struct emit_status *es) { rtx *ptr = es->x_regno_reg_rtx; ptr[VIRTUAL_INCOMING_ARGS_REGNUM] = virtual_incoming_args_rtx; ptr[VIRTUAL_STACK_VARS_REGNUM] = virtual_stack_vars_rtx; ptr[VIRTUAL_STACK_DYNAMIC_REGNUM] = virtual_stack_dynamic_rtx; ptr[VIRTUAL_OUTGOING_ARGS_REGNUM] = virtual_outgoing_args_rtx; ptr[VIRTUAL_CFA_REGNUM] = virtual_cfa_rtx; } /* Used by copy_insn_1 to avoid copying SCRATCHes more than once. */ static rtx copy_insn_scratch_in[MAX_RECOG_OPERANDS]; static rtx copy_insn_scratch_out[MAX_RECOG_OPERANDS]; static int copy_insn_n_scratches; /* When an insn is being copied by copy_insn_1, this is nonzero if we have copied an ASM_OPERANDS. In that case, it is the original input-operand vector. */ static rtvec orig_asm_operands_vector; /* When an insn is being copied by copy_insn_1, this is nonzero if we have copied an ASM_OPERANDS. In that case, it is the copied input-operand vector. */ static rtvec copy_asm_operands_vector; /* Likewise for the constraints vector. */ static rtvec orig_asm_constraints_vector; static rtvec copy_asm_constraints_vector; /* Recursively create a new copy of an rtx for copy_insn. This function differs from copy_rtx in that it handles SCRATCHes and ASM_OPERANDs properly. Normally, this function is not used directly; use copy_insn as front end. However, you could first copy an insn pattern with copy_insn and then use this function afterwards to properly copy any REG_NOTEs containing SCRATCHes. */ rtx copy_insn_1 (rtx orig) { rtx copy; int i, j; RTX_CODE code; const char *format_ptr; code = GET_CODE (orig); switch (code) { case REG: case QUEUED: case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case SYMBOL_REF: case CODE_LABEL: case PC: case CC0: return orig; case CLOBBER: if (REG_P (XEXP (orig, 0)) && REGNO (XEXP (orig, 0)) < FIRST_PSEUDO_REGISTER) return orig; break; case SCRATCH: for (i = 0; i < copy_insn_n_scratches; i++) if (copy_insn_scratch_in[i] == orig) return copy_insn_scratch_out[i]; break; case CONST: /* CONST can be shared if it contains a SYMBOL_REF. If it contains a LABEL_REF, it isn't sharable. */ if (GET_CODE (XEXP (orig, 0)) == PLUS && GET_CODE (XEXP (XEXP (orig, 0), 0)) == SYMBOL_REF && GET_CODE (XEXP (XEXP (orig, 0), 1)) == CONST_INT) return orig; break; /* A MEM with a constant address is not sharable. The problem is that the constant address may need to be reloaded. If the mem is shared, then reloading one copy of this mem will cause all copies to appear to have been reloaded. */ default: break; } copy = rtx_alloc (code); /* Copy the various flags, and other information. We assume that all fields need copying, and then clear the fields that should not be copied. That is the sensible default behavior, and forces us to explicitly document why we are *not* copying a flag. */ memcpy (copy, orig, RTX_HDR_SIZE); /* We do not copy the USED flag, which is used as a mark bit during walks over the RTL. */ RTX_FLAG (copy, used) = 0; /* We do not copy JUMP, CALL, or FRAME_RELATED for INSNs. */ if (INSN_P (orig)) { RTX_FLAG (copy, jump) = 0; RTX_FLAG (copy, call) = 0; RTX_FLAG (copy, frame_related) = 0; } format_ptr = GET_RTX_FORMAT (GET_CODE (copy)); for (i = 0; i < GET_RTX_LENGTH (GET_CODE (copy)); i++) { copy->u.fld[i] = orig->u.fld[i]; switch (*format_ptr++) { case 'e': if (XEXP (orig, i) != NULL) XEXP (copy, i) = copy_insn_1 (XEXP (orig, i)); break; case 'E': case 'V': if (XVEC (orig, i) == orig_asm_constraints_vector) XVEC (copy, i) = copy_asm_constraints_vector; else if (XVEC (orig, i) == orig_asm_operands_vector) XVEC (copy, i) = copy_asm_operands_vector; else if (XVEC (orig, i) != NULL) { XVEC (copy, i) = rtvec_alloc (XVECLEN (orig, i)); for (j = 0; j < XVECLEN (copy, i); j++) XVECEXP (copy, i, j) = copy_insn_1 (XVECEXP (orig, i, j)); } break; case 't': case 'w': case 'i': case 's': case 'S': case 'u': case '0': /* These are left unchanged. */ break; default: abort (); } } if (code == SCRATCH) { i = copy_insn_n_scratches++; if (i >= MAX_RECOG_OPERANDS) abort (); copy_insn_scratch_in[i] = orig; copy_insn_scratch_out[i] = copy; } else if (code == ASM_OPERANDS) { orig_asm_operands_vector = ASM_OPERANDS_INPUT_VEC (orig); copy_asm_operands_vector = ASM_OPERANDS_INPUT_VEC (copy); orig_asm_constraints_vector = ASM_OPERANDS_INPUT_CONSTRAINT_VEC (orig); copy_asm_constraints_vector = ASM_OPERANDS_INPUT_CONSTRAINT_VEC (copy); } return copy; } /* Create a new copy of an rtx. This function differs from copy_rtx in that it handles SCRATCHes and ASM_OPERANDs properly. INSN doesn't really have to be a full INSN; it could be just the pattern. */ rtx copy_insn (rtx insn) { copy_insn_n_scratches = 0; orig_asm_operands_vector = 0; orig_asm_constraints_vector = 0; copy_asm_operands_vector = 0; copy_asm_constraints_vector = 0; return copy_insn_1 (insn); } /* Initialize data structures and variables in this file before generating rtl for each function. */ void init_emit (void) { struct function *f = cfun; f->emit = ggc_alloc (sizeof (struct emit_status)); first_insn = NULL; last_insn = NULL; cur_insn_uid = 1; reg_rtx_no = LAST_VIRTUAL_REGISTER + 1; last_location = UNKNOWN_LOCATION; first_label_num = label_num; last_label_num = 0; seq_stack = NULL; /* Init the tables that describe all the pseudo regs. */ f->emit->regno_pointer_align_length = LAST_VIRTUAL_REGISTER + 101; f->emit->regno_pointer_align = ggc_alloc_cleared (f->emit->regno_pointer_align_length * sizeof (unsigned char)); regno_reg_rtx = ggc_alloc (f->emit->regno_pointer_align_length * sizeof (rtx)); /* Put copies of all the hard registers into regno_reg_rtx. */ memcpy (regno_reg_rtx, static_regno_reg_rtx, FIRST_PSEUDO_REGISTER * sizeof (rtx)); /* Put copies of all the virtual register rtx into regno_reg_rtx. */ init_virtual_regs (f->emit); /* Indicate that the virtual registers and stack locations are all pointers. */ REG_POINTER (stack_pointer_rtx) = 1; REG_POINTER (frame_pointer_rtx) = 1; REG_POINTER (hard_frame_pointer_rtx) = 1; REG_POINTER (arg_pointer_rtx) = 1; REG_POINTER (virtual_incoming_args_rtx) = 1; REG_POINTER (virtual_stack_vars_rtx) = 1; REG_POINTER (virtual_stack_dynamic_rtx) = 1; REG_POINTER (virtual_outgoing_args_rtx) = 1; REG_POINTER (virtual_cfa_rtx) = 1; #ifdef STACK_BOUNDARY REGNO_POINTER_ALIGN (STACK_POINTER_REGNUM) = STACK_BOUNDARY; REGNO_POINTER_ALIGN (FRAME_POINTER_REGNUM) = STACK_BOUNDARY; REGNO_POINTER_ALIGN (HARD_FRAME_POINTER_REGNUM) = STACK_BOUNDARY; REGNO_POINTER_ALIGN (ARG_POINTER_REGNUM) = STACK_BOUNDARY; REGNO_POINTER_ALIGN (VIRTUAL_INCOMING_ARGS_REGNUM) = STACK_BOUNDARY; REGNO_POINTER_ALIGN (VIRTUAL_STACK_VARS_REGNUM) = STACK_BOUNDARY; REGNO_POINTER_ALIGN (VIRTUAL_STACK_DYNAMIC_REGNUM) = STACK_BOUNDARY; REGNO_POINTER_ALIGN (VIRTUAL_OUTGOING_ARGS_REGNUM) = STACK_BOUNDARY; REGNO_POINTER_ALIGN (VIRTUAL_CFA_REGNUM) = BITS_PER_WORD; #endif #ifdef INIT_EXPANDERS INIT_EXPANDERS; #endif } /* Generate the constant 0. */ static rtx gen_const_vector_0 (enum machine_mode mode) { rtx tem; rtvec v; int units, i; enum machine_mode inner; units = GET_MODE_NUNITS (mode); inner = GET_MODE_INNER (mode); v = rtvec_alloc (units); /* We need to call this function after we to set CONST0_RTX first. */ if (!CONST0_RTX (inner)) abort (); for (i = 0; i < units; ++i) RTVEC_ELT (v, i) = CONST0_RTX (inner); tem = gen_rtx_raw_CONST_VECTOR (mode, v); return tem; } /* Generate a vector like gen_rtx_raw_CONST_VEC, but use the zero vector when all elements are zero. */ rtx gen_rtx_CONST_VECTOR (enum machine_mode mode, rtvec v) { rtx inner_zero = CONST0_RTX (GET_MODE_INNER (mode)); int i; for (i = GET_MODE_NUNITS (mode) - 1; i >= 0; i--) if (RTVEC_ELT (v, i) != inner_zero) return gen_rtx_raw_CONST_VECTOR (mode, v); return CONST0_RTX (mode); } /* Create some permanent unique rtl objects shared between all functions. LINE_NUMBERS is nonzero if line numbers are to be generated. */ void init_emit_once (int line_numbers) { int i; enum machine_mode mode; enum machine_mode double_mode; /* We need reg_raw_mode, so initialize the modes now. */ init_reg_modes_once (); /* Initialize the CONST_INT, CONST_DOUBLE, and memory attribute hash tables. */ const_int_htab = htab_create_ggc (37, const_int_htab_hash, const_int_htab_eq, NULL); const_double_htab = htab_create_ggc (37, const_double_htab_hash, const_double_htab_eq, NULL); mem_attrs_htab = htab_create_ggc (37, mem_attrs_htab_hash, mem_attrs_htab_eq, NULL); reg_attrs_htab = htab_create_ggc (37, reg_attrs_htab_hash, reg_attrs_htab_eq, NULL); no_line_numbers = ! line_numbers; /* Compute the word and byte modes. */ byte_mode = VOIDmode; word_mode = VOIDmode; double_mode = VOIDmode; for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode)) { if (GET_MODE_BITSIZE (mode) == BITS_PER_UNIT && byte_mode == VOIDmode) byte_mode = mode; if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD && word_mode == VOIDmode) word_mode = mode; } for (mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT); mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode)) { if (GET_MODE_BITSIZE (mode) == DOUBLE_TYPE_SIZE && double_mode == VOIDmode) double_mode = mode; } ptr_mode = mode_for_size (POINTER_SIZE, GET_MODE_CLASS (Pmode), 0); /* Assign register numbers to the globally defined register rtx. This must be done at runtime because the register number field is in a union and some compilers can't initialize unions. */ pc_rtx = gen_rtx_PC (VOIDmode); cc0_rtx = gen_rtx_CC0 (VOIDmode); stack_pointer_rtx = gen_raw_REG (Pmode, STACK_POINTER_REGNUM); frame_pointer_rtx = gen_raw_REG (Pmode, FRAME_POINTER_REGNUM); if (hard_frame_pointer_rtx == 0) hard_frame_pointer_rtx = gen_raw_REG (Pmode, HARD_FRAME_POINTER_REGNUM); if (arg_pointer_rtx == 0) arg_pointer_rtx = gen_raw_REG (Pmode, ARG_POINTER_REGNUM); virtual_incoming_args_rtx = gen_raw_REG (Pmode, VIRTUAL_INCOMING_ARGS_REGNUM); virtual_stack_vars_rtx = gen_raw_REG (Pmode, VIRTUAL_STACK_VARS_REGNUM); virtual_stack_dynamic_rtx = gen_raw_REG (Pmode, VIRTUAL_STACK_DYNAMIC_REGNUM); virtual_outgoing_args_rtx = gen_raw_REG (Pmode, VIRTUAL_OUTGOING_ARGS_REGNUM); virtual_cfa_rtx = gen_raw_REG (Pmode, VIRTUAL_CFA_REGNUM); /* Initialize RTL for commonly used hard registers. These are copied into regno_reg_rtx as we begin to compile each function. */ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) static_regno_reg_rtx[i] = gen_raw_REG (reg_raw_mode[i], i); #ifdef INIT_EXPANDERS /* This is to initialize {init|mark|free}_machine_status before the first call to push_function_context_to. This is needed by the Chill front end which calls push_function_context_to before the first call to init_function_start. */ INIT_EXPANDERS; #endif /* Create the unique rtx's for certain rtx codes and operand values. */ /* Don't use gen_rtx_CONST_INT here since gen_rtx_CONST_INT in this case tries to use these variables. */ for (i = - MAX_SAVED_CONST_INT; i <= MAX_SAVED_CONST_INT; i++) const_int_rtx[i + MAX_SAVED_CONST_INT] = gen_rtx_raw_CONST_INT (VOIDmode, (HOST_WIDE_INT) i); if (STORE_FLAG_VALUE >= - MAX_SAVED_CONST_INT && STORE_FLAG_VALUE <= MAX_SAVED_CONST_INT) const_true_rtx = const_int_rtx[STORE_FLAG_VALUE + MAX_SAVED_CONST_INT]; else const_true_rtx = gen_rtx_CONST_INT (VOIDmode, STORE_FLAG_VALUE); REAL_VALUE_FROM_INT (dconst0, 0, 0, double_mode); REAL_VALUE_FROM_INT (dconst1, 1, 0, double_mode); REAL_VALUE_FROM_INT (dconst2, 2, 0, double_mode); REAL_VALUE_FROM_INT (dconst3, 3, 0, double_mode); REAL_VALUE_FROM_INT (dconst10, 10, 0, double_mode); REAL_VALUE_FROM_INT (dconstm1, -1, -1, double_mode); REAL_VALUE_FROM_INT (dconstm2, -2, -1, double_mode); dconsthalf = dconst1; SET_REAL_EXP (&dconsthalf, REAL_EXP (&dconsthalf) - 1); real_arithmetic (&dconstthird, RDIV_EXPR, &dconst1, &dconst3); /* Initialize mathematical constants for constant folding builtins. These constants need to be given to at least 160 bits precision. */ real_from_string (&dconstpi, "3.1415926535897932384626433832795028841971693993751058209749445923078"); real_from_string (&dconste, "2.7182818284590452353602874713526624977572470936999595749669676277241"); for (i = 0; i < (int) ARRAY_SIZE (const_tiny_rtx); i++) { REAL_VALUE_TYPE *r = (i == 0 ? &dconst0 : i == 1 ? &dconst1 : &dconst2); for (mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT); mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode)) const_tiny_rtx[i][(int) mode] = CONST_DOUBLE_FROM_REAL_VALUE (*r, mode); const_tiny_rtx[i][(int) VOIDmode] = GEN_INT (i); for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode)) const_tiny_rtx[i][(int) mode] = GEN_INT (i); for (mode = GET_CLASS_NARROWEST_MODE (MODE_PARTIAL_INT); mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode)) const_tiny_rtx[i][(int) mode] = GEN_INT (i); } for (mode = GET_CLASS_NARROWEST_MODE (MODE_VECTOR_INT); mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode)) const_tiny_rtx[0][(int) mode] = gen_const_vector_0 (mode); for (mode = GET_CLASS_NARROWEST_MODE (MODE_VECTOR_FLOAT); mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode)) const_tiny_rtx[0][(int) mode] = gen_const_vector_0 (mode); for (i = (int) CCmode; i < (int) MAX_MACHINE_MODE; ++i) if (GET_MODE_CLASS ((enum machine_mode) i) == MODE_CC) const_tiny_rtx[0][i] = const0_rtx; const_tiny_rtx[0][(int) BImode] = const0_rtx; if (STORE_FLAG_VALUE == 1) const_tiny_rtx[1][(int) BImode] = const1_rtx; #ifdef RETURN_ADDRESS_POINTER_REGNUM return_address_pointer_rtx = gen_raw_REG (Pmode, RETURN_ADDRESS_POINTER_REGNUM); #endif #ifdef STATIC_CHAIN_REGNUM static_chain_rtx = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM); #ifdef STATIC_CHAIN_INCOMING_REGNUM if (STATIC_CHAIN_INCOMING_REGNUM != STATIC_CHAIN_REGNUM) static_chain_incoming_rtx = gen_rtx_REG (Pmode, STATIC_CHAIN_INCOMING_REGNUM); else #endif static_chain_incoming_rtx = static_chain_rtx; #endif #ifdef STATIC_CHAIN static_chain_rtx = STATIC_CHAIN; #ifdef STATIC_CHAIN_INCOMING static_chain_incoming_rtx = STATIC_CHAIN_INCOMING; #else static_chain_incoming_rtx = static_chain_rtx; #endif #endif if ((unsigned) PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM) pic_offset_table_rtx = gen_raw_REG (Pmode, PIC_OFFSET_TABLE_REGNUM); } /* Query and clear/ restore no_line_numbers. This is used by the switch / case handling in stmt.c to give proper line numbers in warnings about unreachable code. */ int force_line_numbers (void) { int old = no_line_numbers; no_line_numbers = 0; if (old) force_next_line_note (); return old; } void restore_line_number_status (int old_value) { no_line_numbers = old_value; } /* Produce exact duplicate of insn INSN after AFTER. Care updating of libcall regions if present. */ rtx emit_copy_of_insn_after (rtx insn, rtx after) { rtx new; rtx note1, note2, link; switch (GET_CODE (insn)) { case INSN: new = emit_insn_after (copy_insn (PATTERN (insn)), after); break; case JUMP_INSN: new = emit_jump_insn_after (copy_insn (PATTERN (insn)), after); break; case CALL_INSN: new = emit_call_insn_after (copy_insn (PATTERN (insn)), after); if (CALL_INSN_FUNCTION_USAGE (insn)) CALL_INSN_FUNCTION_USAGE (new) = copy_insn (CALL_INSN_FUNCTION_USAGE (insn)); SIBLING_CALL_P (new) = SIBLING_CALL_P (insn); CONST_OR_PURE_CALL_P (new) = CONST_OR_PURE_CALL_P (insn); break; default: abort (); } /* Update LABEL_NUSES. */ mark_jump_label (PATTERN (new), new, 0); INSN_LOCATOR (new) = INSN_LOCATOR (insn); /* Copy all REG_NOTES except REG_LABEL since mark_jump_label will make them. */ for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) if (REG_NOTE_KIND (link) != REG_LABEL) { if (GET_CODE (link) == EXPR_LIST) REG_NOTES (new) = copy_insn_1 (gen_rtx_EXPR_LIST (REG_NOTE_KIND (link), XEXP (link, 0), REG_NOTES (new))); else REG_NOTES (new) = copy_insn_1 (gen_rtx_INSN_LIST (REG_NOTE_KIND (link), XEXP (link, 0), REG_NOTES (new))); } /* Fix the libcall sequences. */ if ((note1 = find_reg_note (new, REG_RETVAL, NULL_RTX)) != NULL) { rtx p = new; while ((note2 = find_reg_note (p, REG_LIBCALL, NULL_RTX)) == NULL) p = PREV_INSN (p); XEXP (note1, 0) = p; XEXP (note2, 0) = new; } INSN_CODE (new) = INSN_CODE (insn); return new; } static GTY((deletable)) rtx hard_reg_clobbers [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER]; rtx gen_hard_reg_clobber (enum machine_mode mode, unsigned int regno) { if (hard_reg_clobbers[mode][regno]) return hard_reg_clobbers[mode][regno]; else return (hard_reg_clobbers[mode][regno] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (mode, regno))); } /* Type information for emit-rtl.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_emit_rtl_h[] = { { &static_regno_reg_rtx[0], 1 * (FIRST_PSEUDO_REGISTER), sizeof (static_regno_reg_rtx[0]), >_ggc_mx_rtx_def, >_pch_nx_rtx_def }, LAST_GGC_ROOT_TAB }; const struct ggc_root_tab gt_ggc_rd_gt_emit_rtl_h[] = { { &hard_reg_clobbers, 1, sizeof (hard_reg_clobbers), NULL, NULL }, { &free_sequence_stack, 1, sizeof (free_sequence_stack), NULL, NULL }, LAST_GGC_ROOT_TAB }; const struct ggc_cache_tab gt_ggc_rc_gt_emit_rtl_h[] = { { &const_double_htab, 1, sizeof (const_double_htab), >_ggc_mx_rtx_def, >_pch_nx_rtx_def, &ggc_marked_p }, { ®_attrs_htab, 1, sizeof (reg_attrs_htab), >_ggc_mx_reg_attrs, >_pch_nx_reg_attrs, &ggc_marked_p }, { &mem_attrs_htab, 1, sizeof (mem_attrs_htab), >_ggc_mx_mem_attrs, >_pch_nx_mem_attrs, &ggc_marked_p }, { &const_int_htab, 1, sizeof (const_int_htab), >_ggc_mx_rtx_def, >_pch_nx_rtx_def, &ggc_marked_p }, LAST_GGC_CACHE_TAB }; const struct ggc_root_tab gt_pch_rc_gt_emit_rtl_h[] = { { &const_double_htab, 1, sizeof (const_double_htab), >_ggc_m_P7rtx_def4htab, >_pch_n_P7rtx_def4htab }, { ®_attrs_htab, 1, sizeof (reg_attrs_htab), >_ggc_m_P9reg_attrs4htab, >_pch_n_P9reg_attrs4htab }, { &mem_attrs_htab, 1, sizeof (mem_attrs_htab), >_ggc_m_P9mem_attrs4htab, >_pch_n_P9mem_attrs4htab }, { &const_int_htab, 1, sizeof (const_int_htab), >_ggc_m_P7rtx_def4htab, >_pch_n_P7rtx_def4htab }, LAST_GGC_ROOT_TAB }; const struct ggc_root_tab gt_pch_rs_gt_emit_rtl_h[] = { { &label_num, 1, sizeof (label_num), NULL, NULL }, LAST_GGC_ROOT_TAB }; #undef first_insn #undef last_insn #undef cur_insn_uid #undef last_location #undef first_label_num /* Implements exception handling. Copyright (C) 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Mike Stump . This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* An exception is an event that can be signaled from within a function. This event can then be "caught" or "trapped" by the callers of this function. This potentially allows program flow to be transferred to any arbitrary code associated with a function call several levels up the stack. The intended use for this mechanism is for signaling "exceptional events" in an out-of-band fashion, hence its name. The C++ language (and many other OO-styled or functional languages) practically requires such a mechanism, as otherwise it becomes very difficult or even impossible to signal failure conditions in complex situations. The traditional C++ example is when an error occurs in the process of constructing an object; without such a mechanism, it is impossible to signal that the error occurs without adding global state variables and error checks around every object construction. The act of causing this event to occur is referred to as "throwing an exception". (Alternate terms include "raising an exception" or "signaling an exception".) The term "throw" is used because control is returned to the callers of the function that is signaling the exception, and thus there is the concept of "throwing" the exception up the call stack. [ Add updated documentation on how to use this. ] */ /* Function integration definitions for GCC Copyright (C) 1990, 1995, 1998, 1999, 2000, 2001, 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_INTEGRATE_H #define GCC_INTEGRATE_H /* This structure is used to remap objects in the function being inlined to those belonging to the calling function. It is passed by expand_inline_function to its children. This structure is also used when unrolling loops and otherwise replicating code, although not all fields are needed in this case; only those fields needed by copy_rtx_and_substitute() and its children are used. This structure is used instead of static variables because expand_inline_function may be called recursively via expand_expr. */ struct inline_remap { /* Definition of function be inlined. */ tree fndecl; /* Place to put insns needed at start of function. */ rtx insns_at_start; /* Mapping from old registers to new registers. It is allocated and deallocated in `expand_inline_function' */ rtx *reg_map; /* Mapping from old code-labels to new code-labels. The first element of this map is label_map[min_labelno]. */ rtx *label_map; /* Mapping from old insn uid's to copied insns. The first element of this map is insn_map[min_insnno]; the last element is insn_map[max_insnno]. We keep the bounds here for when the map only covers a partial range of insns (such as loop unrolling or code replication). */ rtx *insn_map; int min_insnno, max_insnno; /* Map pseudo reg number in calling function to equivalent constant. We cannot in general substitute constants into parameter pseudo registers, since some machine descriptions (many RISCs) won't always handle the resulting insns. So if an incoming parameter has a constant equivalent, we record it here, and if the resulting insn is recognizable, we go with it. We also use this mechanism to convert references to incoming arguments and stacked variables. copy_rtx_and_substitute will replace the virtual incoming argument and virtual stacked variables registers with new pseudos that contain pointers into the replacement area allocated for this inline instance. These pseudos are then marked as being equivalent to the appropriate address and substituted if valid. */ varray_type const_equiv_varray; /* This is incremented for each new basic block. It is used to store in the age field to record the domain of validity of each entry in const_equiv_varray. A value of -1 indicates an entry for a reg which is a parm. All other values are "positive". */ #define CONST_AGE_PARM (-1) unsigned int const_age; /* When an insn is being copied by copy_rtx_and_substitute, this is nonzero if we have copied an ASM_OPERANDS. In that case, it is the original input-operand vector. */ rtvec orig_asm_operands_vector; /* When an insn is being copied by copy_rtx_and_substitute, this is nonzero if we have copied an ASM_OPERANDS. In that case, it is the copied input-operand vector. */ rtvec copy_asm_operands_vector; /* Likewise, this is the copied constraints vector. */ rtvec copy_asm_constraints_vector; /* Indications for regs being pointers and their alignment. */ unsigned char *regno_pointer_align; rtx *x_regno_reg_rtx; /* The next few fields are used for subst_constants to record the SETs that it saw. */ int num_sets; struct equiv_table { rtx dest; rtx equiv; } equiv_sets[MAX_RECOG_OPERANDS]; /* Record the last thing assigned to pc. This is used for folded conditional branch insns. */ rtx last_pc_value; #ifdef HAVE_cc0 /* Record the last thing assigned to cc0. */ rtx last_cc0_value; #endif /* Note mode of COMPARE if the mode would be otherwise lost (comparing of two VOIDmode constants. */ rtx compare_src; enum machine_mode compare_mode; }; /* Return a copy of an rtx (as needed), substituting pseudo-register, labels, and frame-pointer offsets as necessary. */ extern rtx copy_rtx_and_substitute (rtx, struct inline_remap *, int); /* Return a pseudo that corresponds to the value in the specified hard reg as of the start of the function (for inlined functions, the value at the start of the parent function). */ extern rtx get_hard_reg_initial_val (enum machine_mode, int); /* Likewise, but for a different than the current function, or arbitrary expression. */ extern rtx get_func_hard_reg_initial_val (struct function *, rtx); /* Likewise, but iff someone else has caused it to become allocated. */ extern rtx has_func_hard_reg_initial_val (struct function *, rtx); /* Likewise, but for common cases. */ extern rtx has_hard_reg_initial_val (enum machine_mode, int); /* If a pseudo represents an initial hard reg (or expression), return it, else return NULL_RTX. */ extern rtx get_hard_reg_initial_reg (struct function *, rtx); /* Called from rest_of_compilation. */ extern void emit_initial_value_sets (void); extern void allocate_initial_values (rtx *); /* Copy a declaration when one function is substituted inline into another. */ extern tree copy_decl_for_inlining (tree, tree, tree); /* Check whether there's any attribute in a function declaration that makes the function uninlinable. Returns false if it finds any, true otherwise. */ extern bool function_attribute_inlinable_p (tree); extern void try_constants (rtx, struct inline_remap *); /* Return the label indicated. */ extern rtx get_label_from_map (struct inline_remap *, int); /* Set the label indicated. */ #define set_label_in_map(MAP, I, X) ((MAP)->label_map[I] = (X)) /* Unfortunately, we need a global copy of const_equiv varray for communication with a function called from note_stores. Be *very* careful that this is used properly in the presence of recursion. */ extern varray_type global_const_equiv_varray; #define MAYBE_EXTEND_CONST_EQUIV_VARRAY(MAP,MAX) \ { \ if ((size_t)(MAX) >= VARRAY_SIZE ((MAP)->const_equiv_varray)) \ { \ int is_global = (global_const_equiv_varray \ == (MAP)->const_equiv_varray); \ VARRAY_GROW ((MAP)->const_equiv_varray, (MAX)+1); \ if (is_global) \ global_const_equiv_varray = (MAP)->const_equiv_varray; \ } \ } #define SET_CONST_EQUIV_DATA(MAP,REG,RTX,AGE) \ { \ struct const_equiv_data *p; \ MAYBE_EXTEND_CONST_EQUIV_VARRAY ((MAP), REGNO (REG)); \ p = &VARRAY_CONST_EQUIV ((MAP)->const_equiv_varray, REGNO (REG)); \ p->rtx = (RTX); \ p->age = (AGE); \ } #endif /* GCC_INTEGRATE_H */ /* Provide defaults for stuff that may not be defined when using sjlj exceptions. */ #ifndef EH_RETURN_DATA_REGNO #define EH_RETURN_DATA_REGNO(N) INVALID_REGNUM #endif /* Protect cleanup actions with must-not-throw regions, with a call to the given failure handler. */ tree (*lang_protect_cleanup_actions) (void); /* Return true if type A catches type B. */ int (*lang_eh_type_covers) (tree a, tree b); /* Map a type to a runtime object to match type. */ tree (*lang_eh_runtime_type) (tree); /* A hash table of label to region number. */ struct ehl_map_entry GTY(()) { rtx label; struct eh_region *region; }; static GTY(()) int call_site_base; static GTY ((param_is (union tree_node))) htab_t type_to_runtime_map; /* Describe the SjLj_Function_Context structure. */ static GTY(()) tree sjlj_fc_type_node; static int sjlj_fc_call_site_ofs; static int sjlj_fc_data_ofs; static int sjlj_fc_personality_ofs; static int sjlj_fc_lsda_ofs; static int sjlj_fc_jbuf_ofs; /* Describes one exception region. */ struct eh_region GTY(()) { /* The immediately surrounding region. */ struct eh_region *outer; /* The list of immediately contained regions. */ struct eh_region *inner; struct eh_region *next_peer; /* An identifier for this region. */ int region_number; /* When a region is deleted, its parents inherit the REG_EH_REGION numbers already assigned. */ bitmap aka; /* Each region does exactly one thing. */ enum eh_region_type { ERT_UNKNOWN = 0, ERT_CLEANUP, ERT_TRY, ERT_CATCH, ERT_ALLOWED_EXCEPTIONS, ERT_MUST_NOT_THROW, ERT_THROW, ERT_FIXUP } type; /* Holds the action to perform based on the preceding type. */ union eh_region_u { /* A list of catch blocks, a surrounding try block, and the label for continuing after a catch. */ struct eh_region_u_try { struct eh_region *catch; struct eh_region *last_catch; struct eh_region *prev_try; rtx continue_label; } GTY ((tag ("ERT_TRY"))) try; /* The list through the catch handlers, the list of type objects matched, and the list of associated filters. */ struct eh_region_u_catch { struct eh_region *next_catch; struct eh_region *prev_catch; tree type_list; tree filter_list; } GTY ((tag ("ERT_CATCH"))) catch; /* A tree_list of allowed types. */ struct eh_region_u_allowed { tree type_list; int filter; } GTY ((tag ("ERT_ALLOWED_EXCEPTIONS"))) allowed; /* The type given by a call to "throw foo();", or discovered for a throw. */ struct eh_region_u_throw { tree type; } GTY ((tag ("ERT_THROW"))) throw; /* Retain the cleanup expression even after expansion so that we can match up fixup regions. */ struct eh_region_u_cleanup { tree exp; struct eh_region *prev_try; } GTY ((tag ("ERT_CLEANUP"))) cleanup; /* The real region (by expression and by pointer) that fixup code should live in. */ struct eh_region_u_fixup { tree cleanup_exp; struct eh_region *real_region; bool resolved; } GTY ((tag ("ERT_FIXUP"))) fixup; } GTY ((desc ("%0.type"))) u; /* Entry point for this region's handler before landing pads are built. */ rtx label; tree tree_label; /* Entry point for this region's handler from the runtime eh library. */ rtx landing_pad; /* Entry point for this region's handler from an inner region. */ rtx post_landing_pad; /* The RESX insn for handing off control to the next outermost handler, if appropriate. */ rtx resume; /* True if something in this region may throw. */ unsigned may_contain_throw : 1; }; struct call_site_record GTY(()) { rtx landing_pad; int action; }; /* Used to save exception status for each function. */ struct eh_status GTY(()) { /* The tree of all regions for this function. */ struct eh_region *region_tree; /* The same information as an indexable array. */ struct eh_region ** GTY ((length ("%h.last_region_number"))) region_array; /* The most recently open region. */ struct eh_region *cur_region; /* This is the region for which we are processing catch blocks. */ struct eh_region *try_region; rtx filter; rtx exc_ptr; int built_landing_pads; int last_region_number; varray_type ttype_data; varray_type ehspec_data; varray_type action_record_data; htab_t GTY ((param_is (struct ehl_map_entry))) exception_handler_label_map; struct call_site_record * GTY ((length ("%h.call_site_data_used"))) call_site_data; int call_site_data_used; int call_site_data_size; rtx ehr_stackadj; rtx ehr_handler; rtx ehr_label; rtx sjlj_fc; rtx sjlj_exit_after; }; static int t2r_eq (const void *, const void *); static hashval_t t2r_hash (const void *); static void add_type_for_runtime (tree); static tree lookup_type_for_runtime (tree); static struct eh_region *expand_eh_region_end (void); static void resolve_fixup_regions (void); static void remove_fixup_regions (void); static void remove_unreachable_regions (rtx); static void convert_from_eh_region_ranges_1 (rtx *, int *, int); static struct eh_region *duplicate_eh_region_1 (struct eh_region *, struct inline_remap *); static void duplicate_eh_region_2 (struct eh_region *, struct eh_region **); static int ttypes_filter_eq (const void *, const void *); static hashval_t ttypes_filter_hash (const void *); static int ehspec_filter_eq (const void *, const void *); static hashval_t ehspec_filter_hash (const void *); static int add_ttypes_entry (htab_t, tree); static int add_ehspec_entry (htab_t, htab_t, tree); static void assign_filter_values (void); static void build_post_landing_pads (void); static void connect_post_landing_pads (void); static void dw2_build_landing_pads (void); struct sjlj_lp_info; static bool sjlj_find_directly_reachable_regions (struct sjlj_lp_info *); static void sjlj_assign_call_site_values (rtx, struct sjlj_lp_info *); static void sjlj_mark_call_sites (struct sjlj_lp_info *); static void sjlj_emit_function_enter (rtx); static void sjlj_emit_function_exit (void); static void sjlj_emit_dispatch_table (rtx, struct sjlj_lp_info *); static void sjlj_build_landing_pads (void); static hashval_t ehl_hash (const void *); static int ehl_eq (const void *, const void *); static void add_ehl_entry (rtx, struct eh_region *); static void remove_exception_handler_label (rtx); static void remove_eh_handler (struct eh_region *); static int for_each_eh_label_1 (void **, void *); /* The return value of reachable_next_level. */ enum reachable_code { /* The given exception is not processed by the given region. */ RNL_NOT_CAUGHT, /* The given exception may need processing by the given region. */ RNL_MAYBE_CAUGHT, /* The given exception is completely processed by the given region. */ RNL_CAUGHT, /* The given exception is completely processed by the runtime. */ RNL_BLOCKED }; struct reachable_info; static enum reachable_code reachable_next_level (struct eh_region *, tree, struct reachable_info *); static int action_record_eq (const void *, const void *); static hashval_t action_record_hash (const void *); static int add_action_record (htab_t, int, int); static int collect_one_action_chain (htab_t, struct eh_region *); static int add_call_site (rtx, int); static void push_uleb128 (varray_type *, unsigned int); static void push_sleb128 (varray_type *, int); #ifndef HAVE_AS_LEB128 static int dw2_size_of_call_site_table (void); static int sjlj_size_of_call_site_table (void); #endif static void dw2_output_call_site_table (void); static void sjlj_output_call_site_table (void); /* Routine to see if exception handling is turned on. DO_WARN is nonzero if we want to inform the user that exception handling is turned off. This is used to ensure that -fexceptions has been specified if the compiler tries to use any exception-specific functions. */ int doing_eh (int do_warn) { if (! flag_exceptions) { static int warned = 0; if (! warned && do_warn) { error ("exception handling disabled, use -fexceptions to enable"); warned = 1; } return 0; } return 1; } void init_eh (void) { if (! flag_exceptions) return; type_to_runtime_map = htab_create_ggc (31, t2r_hash, t2r_eq, NULL); /* Create the SjLj_Function_Context structure. This should match the definition in unwind-sjlj.c. */ if (USING_SJLJ_EXCEPTIONS) { tree f_jbuf, f_per, f_lsda, f_prev, f_cs, f_data, tmp; sjlj_fc_type_node = lang_hooks.types.make_type (RECORD_TYPE); f_prev = build_decl (FIELD_DECL, get_identifier ("__prev"), build_pointer_type (sjlj_fc_type_node)); DECL_FIELD_CONTEXT (f_prev) = sjlj_fc_type_node; f_cs = build_decl (FIELD_DECL, get_identifier ("__call_site"), integer_type_node); DECL_FIELD_CONTEXT (f_cs) = sjlj_fc_type_node; tmp = build_index_type (build_int_2 (4 - 1, 0)); tmp = build_array_type (lang_hooks.types.type_for_mode (word_mode, 1), tmp); f_data = build_decl (FIELD_DECL, get_identifier ("__data"), tmp); DECL_FIELD_CONTEXT (f_data) = sjlj_fc_type_node; f_per = build_decl (FIELD_DECL, get_identifier ("__personality"), ptr_type_node); DECL_FIELD_CONTEXT (f_per) = sjlj_fc_type_node; f_lsda = build_decl (FIELD_DECL, get_identifier ("__lsda"), ptr_type_node); DECL_FIELD_CONTEXT (f_lsda) = sjlj_fc_type_node; #ifdef DONT_USE_BUILTIN_SETJMP #ifdef JMP_BUF_SIZE tmp = build_int_2 (JMP_BUF_SIZE - 1, 0); #else /* Should be large enough for most systems, if it is not, JMP_BUF_SIZE should be defined with the proper value. It will also tend to be larger than necessary for most systems, a more optimal port will define JMP_BUF_SIZE. */ tmp = build_int_2 (FIRST_PSEUDO_REGISTER + 2 - 1, 0); #endif #else /* builtin_setjmp takes a pointer to 5 words. */ tmp = build_int_2 (5 * BITS_PER_WORD / POINTER_SIZE - 1, 0); #endif tmp = build_index_type (tmp); tmp = build_array_type (ptr_type_node, tmp); f_jbuf = build_decl (FIELD_DECL, get_identifier ("__jbuf"), tmp); #ifdef DONT_USE_BUILTIN_SETJMP /* We don't know what the alignment requirements of the runtime's jmp_buf has. Overestimate. */ DECL_ALIGN (f_jbuf) = BIGGEST_ALIGNMENT; DECL_USER_ALIGN (f_jbuf) = 1; #endif DECL_FIELD_CONTEXT (f_jbuf) = sjlj_fc_type_node; TYPE_FIELDS (sjlj_fc_type_node) = f_prev; TREE_CHAIN (f_prev) = f_cs; TREE_CHAIN (f_cs) = f_data; TREE_CHAIN (f_data) = f_per; TREE_CHAIN (f_per) = f_lsda; TREE_CHAIN (f_lsda) = f_jbuf; layout_type (sjlj_fc_type_node); /* Cache the interesting field offsets so that we have easy access from rtl. */ sjlj_fc_call_site_ofs = (tree_low_cst (DECL_FIELD_OFFSET (f_cs), 1) + tree_low_cst (DECL_FIELD_BIT_OFFSET (f_cs), 1) / BITS_PER_UNIT); sjlj_fc_data_ofs = (tree_low_cst (DECL_FIELD_OFFSET (f_data), 1) + tree_low_cst (DECL_FIELD_BIT_OFFSET (f_data), 1) / BITS_PER_UNIT); sjlj_fc_personality_ofs = (tree_low_cst (DECL_FIELD_OFFSET (f_per), 1) + tree_low_cst (DECL_FIELD_BIT_OFFSET (f_per), 1) / BITS_PER_UNIT); sjlj_fc_lsda_ofs = (tree_low_cst (DECL_FIELD_OFFSET (f_lsda), 1) + tree_low_cst (DECL_FIELD_BIT_OFFSET (f_lsda), 1) / BITS_PER_UNIT); sjlj_fc_jbuf_ofs = (tree_low_cst (DECL_FIELD_OFFSET (f_jbuf), 1) + tree_low_cst (DECL_FIELD_BIT_OFFSET (f_jbuf), 1) / BITS_PER_UNIT); } } void init_eh_for_function (void) { cfun->eh = ggc_alloc_cleared (sizeof (struct eh_status)); } /* Routines to generate the exception tree somewhat directly. These are used from tree-eh.c when processing exception related nodes during tree optimization. */ static struct eh_region * gen_eh_region (enum eh_region_type type, struct eh_region *outer) { struct eh_region *new; #ifdef ENABLE_CHECKING if (! doing_eh (0)) abort (); #endif /* Insert a new blank region as a leaf in the tree. */ new = ggc_alloc_cleared (sizeof (*new)); new->type = type; new->outer = outer; if (outer) { new->next_peer = outer->inner; outer->inner = new; } else { new->next_peer = cfun->eh->region_tree; cfun->eh->region_tree = new; } new->region_number = ++cfun->eh->last_region_number; return new; } struct eh_region * gen_eh_region_cleanup (struct eh_region *outer, struct eh_region *prev_try) { struct eh_region *cleanup = gen_eh_region (ERT_CLEANUP, outer); cleanup->u.cleanup.prev_try = prev_try; return cleanup; } struct eh_region * gen_eh_region_try (struct eh_region *outer) { return gen_eh_region (ERT_TRY, outer); } struct eh_region * gen_eh_region_catch (struct eh_region *t, tree type_or_list) { struct eh_region *c, *l; tree type_list, type_node; /* Ensure to always end up with a type list to normalize further processing, then register each type against the runtime types map. */ type_list = type_or_list; if (type_or_list) { if (TREE_CODE (type_or_list) != TREE_LIST) type_list = tree_cons (NULL_TREE, type_or_list, NULL_TREE); type_node = type_list; for (; type_node; type_node = TREE_CHAIN (type_node)) add_type_for_runtime (TREE_VALUE (type_node)); } c = gen_eh_region (ERT_CATCH, t->outer); c->u.catch.type_list = type_list; l = t->u.try.last_catch; c->u.catch.prev_catch = l; if (l) l->u.catch.next_catch = c; else t->u.try.catch = c; t->u.try.last_catch = c; return c; } struct eh_region * gen_eh_region_allowed (struct eh_region *outer, tree allowed) { struct eh_region *region = gen_eh_region (ERT_ALLOWED_EXCEPTIONS, outer); region->u.allowed.type_list = allowed; for (; allowed ; allowed = TREE_CHAIN (allowed)) add_type_for_runtime (TREE_VALUE (allowed)); return region; } struct eh_region * gen_eh_region_must_not_throw (struct eh_region *outer) { return gen_eh_region (ERT_MUST_NOT_THROW, outer); } int get_eh_region_number (struct eh_region *region) { return region->region_number; } bool get_eh_region_may_contain_throw (struct eh_region *region) { return region->may_contain_throw; } tree get_eh_region_tree_label (struct eh_region *region) { return region->tree_label; } void set_eh_region_tree_label (struct eh_region *region, tree lab) { region->tree_label = lab; } /* Start an exception handling region. All instructions emitted after this point are considered to be part of the region until expand_eh_region_end is invoked. */ void expand_eh_region_start (void) { struct eh_region *new; rtx note; if (! doing_eh (0)) return; new = gen_eh_region (ERT_UNKNOWN, cfun->eh->cur_region); cfun->eh->cur_region = new; /* Create a note marking the start of this region. */ note = emit_note (NOTE_INSN_EH_REGION_BEG); NOTE_EH_HANDLER (note) = new->region_number; } /* Common code to end a region. Returns the region just ended. */ static struct eh_region * expand_eh_region_end (void) { struct eh_region *cur_region = cfun->eh->cur_region; rtx note; /* Create a note marking the end of this region. */ note = emit_note (NOTE_INSN_EH_REGION_END); NOTE_EH_HANDLER (note) = cur_region->region_number; /* Pop. */ cfun->eh->cur_region = cur_region->outer; return cur_region; } /* Expand HANDLER, which is the operand 1 of a TRY_CATCH_EXPR. Catch blocks and C++ exception-specifications are handled specially. */ void expand_eh_handler (tree handler) { tree inner = expr_first (handler); switch (TREE_CODE (inner)) { case CATCH_EXPR: expand_start_all_catch (); expand_expr (handler, const0_rtx, VOIDmode, 0); expand_end_all_catch (); break; case EH_FILTER_EXPR: if (EH_FILTER_MUST_NOT_THROW (handler)) expand_eh_region_end_must_not_throw (EH_FILTER_FAILURE (handler)); else expand_eh_region_end_allowed (EH_FILTER_TYPES (handler), EH_FILTER_FAILURE (handler)); break; default: expand_eh_region_end_cleanup (handler); break; } } /* End an exception handling region for a cleanup. HANDLER is an expression to expand for the cleanup. */ void expand_eh_region_end_cleanup (tree handler) { struct eh_region *region; tree protect_cleanup_actions; rtx around_label; rtx data_save[2]; if (! doing_eh (0)) return; region = expand_eh_region_end (); region->type = ERT_CLEANUP; region->label = gen_label_rtx (); region->u.cleanup.exp = handler; region->u.cleanup.prev_try = cfun->eh->try_region; around_label = gen_label_rtx (); emit_jump (around_label); emit_label (region->label); if (flag_non_call_exceptions || region->may_contain_throw) { /* Give the language a chance to specify an action to be taken if an exception is thrown that would propagate out of the HANDLER. */ protect_cleanup_actions = (lang_protect_cleanup_actions ? (*lang_protect_cleanup_actions) () : NULL_TREE); if (protect_cleanup_actions) expand_eh_region_start (); /* In case this cleanup involves an inline destructor with a try block in it, we need to save the EH return data registers around it. */ data_save[0] = gen_reg_rtx (ptr_mode); emit_move_insn (data_save[0], get_exception_pointer (cfun)); data_save[1] = gen_reg_rtx (word_mode); emit_move_insn (data_save[1], get_exception_filter (cfun)); expand_expr (handler, const0_rtx, VOIDmode, 0); emit_move_insn (cfun->eh->exc_ptr, data_save[0]); emit_move_insn (cfun->eh->filter, data_save[1]); if (protect_cleanup_actions) expand_eh_region_end_must_not_throw (protect_cleanup_actions); /* We need any stack adjustment complete before the around_label. */ do_pending_stack_adjust (); } /* We delay the generation of the _Unwind_Resume until we generate landing pads. We emit a marker here so as to get good control flow data in the meantime. */ region->resume = emit_jump_insn (gen_rtx_RESX (VOIDmode, region->region_number)); emit_barrier (); emit_label (around_label); } void expand_resx_expr (tree exp) { int region_nr = TREE_INT_CST_LOW (TREE_OPERAND (exp, 0)); struct eh_region *reg = cfun->eh->region_array[region_nr]; reg->resume = emit_jump_insn (gen_rtx_RESX (VOIDmode, region_nr)); emit_barrier (); } /* End an exception handling region for a try block, and prepares for subsequent calls to expand_start_catch. */ void expand_start_all_catch (void) { struct eh_region *region; if (! doing_eh (1)) return; region = expand_eh_region_end (); region->type = ERT_TRY; region->u.try.prev_try = cfun->eh->try_region; region->u.try.continue_label = gen_label_rtx (); cfun->eh->try_region = region; emit_jump (region->u.try.continue_label); } /* Begin a catch clause. TYPE is the type caught, a list of such types, (in the case of Java) an ADDR_EXPR which points to the runtime type to match, or null if this is a catch-all clause. Providing a type list enables to associate the catch region with potentially several exception types, which is useful e.g. for Ada. */ void expand_start_catch (tree type_or_list) { struct eh_region *c; rtx note; if (! doing_eh (0)) return; c = gen_eh_region_catch (cfun->eh->try_region, type_or_list); cfun->eh->cur_region = c; c->label = gen_label_rtx (); emit_label (c->label); note = emit_note (NOTE_INSN_EH_REGION_BEG); NOTE_EH_HANDLER (note) = c->region_number; } /* End a catch clause. Control will resume after the try/catch block. */ void expand_end_catch (void) { if (! doing_eh (0)) return; expand_eh_region_end (); emit_jump (cfun->eh->try_region->u.try.continue_label); } /* End a sequence of catch handlers for a try block. */ void expand_end_all_catch (void) { struct eh_region *try_region; if (! doing_eh (0)) return; try_region = cfun->eh->try_region; cfun->eh->try_region = try_region->u.try.prev_try; emit_label (try_region->u.try.continue_label); } /* End an exception region for an exception type filter. ALLOWED is a TREE_LIST of types to be matched by the runtime. FAILURE is an expression to invoke if a mismatch occurs. ??? We could use these semantics for calls to rethrow, too; if we can see the surrounding catch clause, we know that the exception we're rethrowing satisfies the "filter" of the catch type. */ void expand_eh_region_end_allowed (tree allowed, tree failure) { struct eh_region *region; rtx around_label; if (! doing_eh (0)) return; region = expand_eh_region_end (); region->type = ERT_ALLOWED_EXCEPTIONS; region->u.allowed.type_list = allowed; region->label = gen_label_rtx (); for (; allowed ; allowed = TREE_CHAIN (allowed)) add_type_for_runtime (TREE_VALUE (allowed)); /* We must emit the call to FAILURE here, so that if this function throws a different exception, that it will be processed by the correct region. */ around_label = gen_label_rtx (); emit_jump (around_label); emit_label (region->label); expand_expr (failure, const0_rtx, VOIDmode, EXPAND_NORMAL); /* We must adjust the stack before we reach the AROUND_LABEL because the call to FAILURE does not occur on all paths to the AROUND_LABEL. */ do_pending_stack_adjust (); emit_label (around_label); } /* End an exception region for a must-not-throw filter. FAILURE is an expression invoke if an uncaught exception propagates this far. This is conceptually identical to expand_eh_region_end_allowed with an empty allowed list (if you passed "std::terminate" instead of "__cxa_call_unexpected"), but they are represented differently in the C++ LSDA. */ void expand_eh_region_end_must_not_throw (tree failure) { struct eh_region *region; rtx around_label; if (! doing_eh (0)) return; region = expand_eh_region_end (); region->type = ERT_MUST_NOT_THROW; region->label = gen_label_rtx (); /* We must emit the call to FAILURE here, so that if this function throws a different exception, that it will be processed by the correct region. */ around_label = gen_label_rtx (); emit_jump (around_label); emit_label (region->label); expand_expr (failure, const0_rtx, VOIDmode, EXPAND_NORMAL); emit_label (around_label); } /* End an exception region for a throw. No handling goes on here, but it's the easiest way for the front-end to indicate what type is being thrown. */ void expand_eh_region_end_throw (tree type) { struct eh_region *region; if (! doing_eh (0)) return; region = expand_eh_region_end (); region->type = ERT_THROW; region->u.throw.type = type; } /* End a fixup region. Within this region the cleanups for the immediately enclosing region are _not_ run. This is used for goto cleanup to avoid destroying an object twice. This would be an extraordinarily simple prospect, were it not for the fact that we don't actually know what the immediately enclosing region is. This surprising fact is because expand_cleanups is currently generating a sequence that it will insert somewhere else. We collect the proper notion of "enclosing" in convert_from_eh_region_ranges. */ void expand_eh_region_end_fixup (tree handler) { struct eh_region *fixup; if (! doing_eh (0)) return; fixup = expand_eh_region_end (); fixup->type = ERT_FIXUP; fixup->u.fixup.cleanup_exp = handler; } /* Note that the current EH region (if any) may contain a throw, or a call to a function which itself may contain a throw. */ void note_eh_region_may_contain_throw (struct eh_region *region) { while (region && !region->may_contain_throw) { region->may_contain_throw = 1; region = region->outer; } } void note_current_region_may_contain_throw (void) { note_eh_region_may_contain_throw (cfun->eh->cur_region); } /* Return an rtl expression for a pointer to the exception object within a handler. */ rtx get_exception_pointer (struct function *fun) { rtx exc_ptr = fun->eh->exc_ptr; if (fun == cfun && ! exc_ptr) { exc_ptr = gen_reg_rtx (ptr_mode); fun->eh->exc_ptr = exc_ptr; } return exc_ptr; } /* Return an rtl expression for the exception dispatch filter within a handler. */ rtx get_exception_filter (struct function *fun) { rtx filter = fun->eh->filter; if (fun == cfun && ! filter) { filter = gen_reg_rtx (word_mode); fun->eh->filter = filter; } return filter; } /* This section is for the exception handling specific optimization pass. */ /* Random access the exception region tree. It's just as simple to collect the regions this way as in expand_eh_region_start, but without having to realloc memory. */ void collect_eh_region_array (void) { struct eh_region **array, *i; i = cfun->eh->region_tree; if (! i) return; array = ggc_alloc_cleared ((cfun->eh->last_region_number + 1) * sizeof (*array)); cfun->eh->region_array = array; while (1) { array[i->region_number] = i; /* If there are sub-regions, process them. */ if (i->inner) i = i->inner; /* If there are peers, process them. */ else if (i->next_peer) i = i->next_peer; /* Otherwise, step back up the tree to the next peer. */ else { do { i = i->outer; if (i == NULL) return; } while (i->next_peer == NULL); i = i->next_peer; } } } static void resolve_one_fixup_region (struct eh_region *fixup) { struct eh_region *cleanup, *real; int j, n; n = cfun->eh->last_region_number; cleanup = 0; for (j = 1; j <= n; ++j) { cleanup = cfun->eh->region_array[j]; if (cleanup && cleanup->type == ERT_CLEANUP && cleanup->u.cleanup.exp == fixup->u.fixup.cleanup_exp) break; } if (j > n) abort (); real = cleanup->outer; if (real && real->type == ERT_FIXUP) { if (!real->u.fixup.resolved) resolve_one_fixup_region (real); real = real->u.fixup.real_region; } fixup->u.fixup.real_region = real; fixup->u.fixup.resolved = true; } static void resolve_fixup_regions (void) { int i, n = cfun->eh->last_region_number; for (i = 1; i <= n; ++i) { struct eh_region *fixup = cfun->eh->region_array[i]; if (!fixup || fixup->type != ERT_FIXUP || fixup->u.fixup.resolved) continue; resolve_one_fixup_region (fixup); } } /* Now that we've discovered what region actually encloses a fixup, we can shuffle pointers and remove them from the tree. */ static void remove_fixup_regions (void) { int i; rtx insn, note; struct eh_region *fixup; /* Walk the insn chain and adjust the REG_EH_REGION numbers for instructions referencing fixup regions. This is only strictly necessary for fixup regions with no parent, but doesn't hurt to do it for all regions. */ for (insn = get_insns(); insn ; insn = NEXT_INSN (insn)) if (INSN_P (insn) && (note = find_reg_note (insn, REG_EH_REGION, NULL)) && INTVAL (XEXP (note, 0)) > 0 && (fixup = cfun->eh->region_array[INTVAL (XEXP (note, 0))]) && fixup->type == ERT_FIXUP) { if (fixup->u.fixup.real_region) XEXP (note, 0) = GEN_INT (fixup->u.fixup.real_region->region_number); else remove_note (insn, note); } /* Remove the fixup regions from the tree. */ for (i = cfun->eh->last_region_number; i > 0; --i) { fixup = cfun->eh->region_array[i]; if (! fixup) continue; /* Allow GC to maybe free some memory. */ if (fixup->type == ERT_CLEANUP) fixup->u.cleanup.exp = NULL_TREE; if (fixup->type != ERT_FIXUP) continue; if (fixup->inner) { struct eh_region *parent, *p, **pp; parent = fixup->u.fixup.real_region; /* Fix up the children's parent pointers; find the end of the list. */ for (p = fixup->inner; ; p = p->next_peer) { p->outer = parent; if (! p->next_peer) break; } /* In the tree of cleanups, only outer-inner ordering matters. So link the children back in anywhere at the correct level. */ if (parent) pp = &parent->inner; else pp = &cfun->eh->region_tree; p->next_peer = *pp; *pp = fixup->inner; fixup->inner = NULL; } remove_eh_handler (fixup); } } /* Remove all regions whose labels are not reachable from insns. */ static void remove_unreachable_regions (rtx insns) { int i, *uid_region_num; bool *reachable; struct eh_region *r; rtx insn; uid_region_num = xcalloc (get_max_uid (), sizeof(int)); reachable = xcalloc (cfun->eh->last_region_number + 1, sizeof(bool)); for (i = cfun->eh->last_region_number; i > 0; --i) { r = cfun->eh->region_array[i]; if (!r || r->region_number != i) continue; if (r->resume) { if (uid_region_num[INSN_UID (r->resume)]) abort (); uid_region_num[INSN_UID (r->resume)] = i; } if (r->label) { if (uid_region_num[INSN_UID (r->label)]) abort (); uid_region_num[INSN_UID (r->label)] = i; } } for (insn = insns; insn; insn = NEXT_INSN (insn)) reachable[uid_region_num[INSN_UID (insn)]] = true; for (i = cfun->eh->last_region_number; i > 0; --i) { r = cfun->eh->region_array[i]; if (r && r->region_number == i && !reachable[i]) { bool kill_it = true; switch (r->type) { case ERT_THROW: /* Don't remove ERT_THROW regions if their outer region is reachable. */ if (r->outer && reachable[r->outer->region_number]) kill_it = false; break; case ERT_MUST_NOT_THROW: /* MUST_NOT_THROW regions are implementable solely in the runtime, but their existence continues to affect calls within that region. Never delete them here. */ kill_it = false; break; case ERT_TRY: { /* TRY regions are reachable if any of its CATCH regions are reachable. */ struct eh_region *c; for (c = r->u.try.catch; c ; c = c->u.catch.next_catch) if (reachable[c->region_number]) { kill_it = false; break; } break; } default: break; } if (kill_it) remove_eh_handler (r); } } free (reachable); free (uid_region_num); } /* Turn NOTE_INSN_EH_REGION notes into REG_EH_REGION notes for each can_throw instruction in the region. */ static void convert_from_eh_region_ranges_1 (rtx *pinsns, int *orig_sp, int cur) { int *sp = orig_sp; rtx insn, next; for (insn = *pinsns; insn ; insn = next) { next = NEXT_INSN (insn); if (GET_CODE (insn) == NOTE) { int kind = NOTE_LINE_NUMBER (insn); if (kind == NOTE_INSN_EH_REGION_BEG || kind == NOTE_INSN_EH_REGION_END) { if (kind == NOTE_INSN_EH_REGION_BEG) { struct eh_region *r; *sp++ = cur; cur = NOTE_EH_HANDLER (insn); r = cfun->eh->region_array[cur]; if (r->type == ERT_FIXUP) { r = r->u.fixup.real_region; cur = r ? r->region_number : 0; } else if (r->type == ERT_CATCH) { r = r->outer; cur = r ? r->region_number : 0; } } else cur = *--sp; if (insn == *pinsns) *pinsns = next; remove_insn (insn); continue; } } else if (INSN_P (insn)) { if (cur > 0 && ! find_reg_note (insn, REG_EH_REGION, NULL_RTX) /* Calls can always potentially throw exceptions, unless they have a REG_EH_REGION note with a value of 0 or less. Which should be the only possible kind so far. */ && (GET_CODE (insn) == CALL_INSN /* If we wanted exceptions for non-call insns, then any may_trap_p instruction could throw. */ || (flag_non_call_exceptions && GET_CODE (PATTERN (insn)) != CLOBBER && GET_CODE (PATTERN (insn)) != USE && may_trap_p (PATTERN (insn))))) { REG_NOTES (insn) = alloc_EXPR_LIST (REG_EH_REGION, GEN_INT (cur), REG_NOTES (insn)); } } } if (sp != orig_sp) abort (); } static void collect_rtl_labels_from_trees (void) { int i, n = cfun->eh->last_region_number; for (i = 1; i <= n; ++i) { struct eh_region *reg = cfun->eh->region_array[i]; if (reg && reg->tree_label) reg->label = DECL_RTL_IF_SET (reg->tree_label); } } void convert_from_eh_region_ranges (void) { rtx insns = get_insns (); if (cfun->eh->region_array) { /* If the region array already exists, assume we're coming from optimize_function_tree. In this case all we need to do is collect the rtl labels that correspond to the tree labels that we allocated earlier. */ collect_rtl_labels_from_trees (); } else { int *stack; collect_eh_region_array (); resolve_fixup_regions (); stack = xmalloc (sizeof (int) * (cfun->eh->last_region_number + 1)); convert_from_eh_region_ranges_1 (&insns, stack, 0); free (stack); remove_fixup_regions (); } remove_unreachable_regions (insns); } static void add_ehl_entry (rtx label, struct eh_region *region) { struct ehl_map_entry **slot, *entry; LABEL_PRESERVE_P (label) = 1; entry = ggc_alloc (sizeof (*entry)); entry->label = label; entry->region = region; slot = (struct ehl_map_entry **) htab_find_slot (cfun->eh->exception_handler_label_map, entry, INSERT); /* Before landing pad creation, each exception handler has its own label. After landing pad creation, the exception handlers may share landing pads. This is ok, since maybe_remove_eh_handler only requires the 1-1 mapping before landing pad creation. */ if (*slot && !cfun->eh->built_landing_pads) abort (); *slot = entry; } void find_exception_handler_labels (void) { int i; if (cfun->eh->exception_handler_label_map) htab_empty (cfun->eh->exception_handler_label_map); else { /* ??? The expansion factor here (3/2) must be greater than the htab occupancy factor (4/3) to avoid unnecessary resizing. */ cfun->eh->exception_handler_label_map = htab_create_ggc (cfun->eh->last_region_number * 3 / 2, ehl_hash, ehl_eq, NULL); } if (cfun->eh->region_tree == NULL) return; for (i = cfun->eh->last_region_number; i > 0; --i) { struct eh_region *region = cfun->eh->region_array[i]; rtx lab; if (! region || region->region_number != i) continue; if (cfun->eh->built_landing_pads) lab = region->landing_pad; else lab = region->label; if (lab) add_ehl_entry (lab, region); } /* For sjlj exceptions, need the return label to remain live until after landing pad generation. */ if (USING_SJLJ_EXCEPTIONS && ! cfun->eh->built_landing_pads) add_ehl_entry (return_label, NULL); } bool current_function_has_exception_handlers (void) { int i; for (i = cfun->eh->last_region_number; i > 0; --i) { struct eh_region *region = cfun->eh->region_array[i]; if (! region || region->region_number != i) continue; if (region->type != ERT_THROW) return true; } return false; } static struct eh_region * duplicate_eh_region_1 (struct eh_region *o, struct inline_remap *map) { struct eh_region *n = ggc_alloc_cleared (sizeof (struct eh_region)); n->region_number = o->region_number + cfun->eh->last_region_number; n->type = o->type; switch (n->type) { case ERT_CLEANUP: case ERT_MUST_NOT_THROW: break; case ERT_TRY: if (o->u.try.continue_label) n->u.try.continue_label = get_label_from_map (map, CODE_LABEL_NUMBER (o->u.try.continue_label)); break; case ERT_CATCH: n->u.catch.type_list = o->u.catch.type_list; break; case ERT_ALLOWED_EXCEPTIONS: n->u.allowed.type_list = o->u.allowed.type_list; break; case ERT_THROW: n->u.throw.type = o->u.throw.type; default: abort (); } if (o->label) n->label = get_label_from_map (map, CODE_LABEL_NUMBER (o->label)); if (o->resume) { n->resume = map->insn_map[INSN_UID (o->resume)]; if (n->resume == NULL) abort (); } return n; } static void duplicate_eh_region_2 (struct eh_region *o, struct eh_region **n_array) { struct eh_region *n = n_array[o->region_number]; switch (n->type) { case ERT_TRY: n->u.try.catch = n_array[o->u.try.catch->region_number]; n->u.try.last_catch = n_array[o->u.try.last_catch->region_number]; break; case ERT_CATCH: if (o->u.catch.next_catch) n->u.catch.next_catch = n_array[o->u.catch.next_catch->region_number]; if (o->u.catch.prev_catch) n->u.catch.prev_catch = n_array[o->u.catch.prev_catch->region_number]; break; default: break; } if (o->outer) n->outer = n_array[o->outer->region_number]; if (o->inner) n->inner = n_array[o->inner->region_number]; if (o->next_peer) n->next_peer = n_array[o->next_peer->region_number]; } int duplicate_eh_regions (struct function *ifun, struct inline_remap *map) { int ifun_last_region_number = ifun->eh->last_region_number; struct eh_region **n_array, *root, *cur; int i; if (ifun_last_region_number == 0) return 0; n_array = xcalloc (ifun_last_region_number + 1, sizeof (*n_array)); for (i = 1; i <= ifun_last_region_number; ++i) { cur = ifun->eh->region_array[i]; if (!cur || cur->region_number != i) continue; n_array[i] = duplicate_eh_region_1 (cur, map); } for (i = 1; i <= ifun_last_region_number; ++i) { cur = ifun->eh->region_array[i]; if (!cur || cur->region_number != i) continue; duplicate_eh_region_2 (cur, n_array); } root = n_array[ifun->eh->region_tree->region_number]; cur = cfun->eh->cur_region; if (cur) { struct eh_region *p = cur->inner; if (p) { while (p->next_peer) p = p->next_peer; p->next_peer = root; } else cur->inner = root; for (i = 1; i <= ifun_last_region_number; ++i) if (n_array[i] && n_array[i]->outer == NULL) n_array[i]->outer = cur; } else { struct eh_region *p = cfun->eh->region_tree; if (p) { while (p->next_peer) p = p->next_peer; p->next_peer = root; } else cfun->eh->region_tree = root; } free (n_array); i = cfun->eh->last_region_number; cfun->eh->last_region_number = i + ifun_last_region_number; return i; } static int t2r_eq (const void *pentry, const void *pdata) { tree entry = (tree) pentry; tree data = (tree) pdata; return TREE_PURPOSE (entry) == data; } static hashval_t t2r_hash (const void *pentry) { tree entry = (tree) pentry; return TREE_HASH (TREE_PURPOSE (entry)); } static void add_type_for_runtime (tree type) { tree *slot; slot = (tree *) htab_find_slot_with_hash (type_to_runtime_map, type, TREE_HASH (type), INSERT); if (*slot == NULL) { tree runtime = (*lang_eh_runtime_type) (type); *slot = tree_cons (type, runtime, NULL_TREE); } } static tree lookup_type_for_runtime (tree type) { tree *slot; slot = (tree *) htab_find_slot_with_hash (type_to_runtime_map, type, TREE_HASH (type), NO_INSERT); /* We should have always inserted the data earlier. */ return TREE_VALUE (*slot); } /* Represent an entry in @TTypes for either catch actions or exception filter actions. */ struct ttypes_filter GTY(()) { tree t; int filter; }; /* Compare ENTRY (a ttypes_filter entry in the hash table) with DATA (a tree) for a @TTypes type node we are thinking about adding. */ static int ttypes_filter_eq (const void *pentry, const void *pdata) { const struct ttypes_filter *entry = (const struct ttypes_filter *) pentry; tree data = (tree) pdata; return entry->t == data; } static hashval_t ttypes_filter_hash (const void *pentry) { const struct ttypes_filter *entry = (const struct ttypes_filter *) pentry; return TREE_HASH (entry->t); } /* Compare ENTRY with DATA (both struct ttypes_filter) for a @TTypes exception specification list we are thinking about adding. */ /* ??? Currently we use the type lists in the order given. Someone should put these in some canonical order. */ static int ehspec_filter_eq (const void *pentry, const void *pdata) { const struct ttypes_filter *entry = (const struct ttypes_filter *) pentry; const struct ttypes_filter *data = (const struct ttypes_filter *) pdata; return type_list_equal (entry->t, data->t); } /* Hash function for exception specification lists. */ static hashval_t ehspec_filter_hash (const void *pentry) { const struct ttypes_filter *entry = (const struct ttypes_filter *) pentry; hashval_t h = 0; tree list; for (list = entry->t; list ; list = TREE_CHAIN (list)) h = (h << 5) + (h >> 27) + TREE_HASH (TREE_VALUE (list)); return h; } /* Add TYPE (which may be NULL) to cfun->eh->ttype_data, using TYPES_HASH to speed up the search. Return the filter value to be used. */ static int add_ttypes_entry (htab_t ttypes_hash, tree type) { struct ttypes_filter **slot, *n; slot = (struct ttypes_filter **) htab_find_slot_with_hash (ttypes_hash, type, TREE_HASH (type), INSERT); if ((n = *slot) == NULL) { /* Filter value is a 1 based table index. */ n = xmalloc (sizeof (*n)); n->t = type; n->filter = VARRAY_ACTIVE_SIZE (cfun->eh->ttype_data) + 1; *slot = n; VARRAY_PUSH_TREE (cfun->eh->ttype_data, type); } return n->filter; } /* Add LIST to cfun->eh->ehspec_data, using EHSPEC_HASH and TYPES_HASH to speed up the search. Return the filter value to be used. */ static int add_ehspec_entry (htab_t ehspec_hash, htab_t ttypes_hash, tree list) { struct ttypes_filter **slot, *n; struct ttypes_filter dummy; dummy.t = list; slot = (struct ttypes_filter **) htab_find_slot (ehspec_hash, &dummy, INSERT); if ((n = *slot) == NULL) { /* Filter value is a -1 based byte index into a uleb128 buffer. */ n = xmalloc (sizeof (*n)); n->t = list; n->filter = -(VARRAY_ACTIVE_SIZE (cfun->eh->ehspec_data) + 1); *slot = n; /* Look up each type in the list and encode its filter value as a uleb128. Terminate the list with 0. */ for (; list ; list = TREE_CHAIN (list)) push_uleb128 (&cfun->eh->ehspec_data, add_ttypes_entry (ttypes_hash, TREE_VALUE (list))); VARRAY_PUSH_UCHAR (cfun->eh->ehspec_data, 0); } return n->filter; } /* Generate the action filter values to be used for CATCH and ALLOWED_EXCEPTIONS regions. When using dwarf2 exception regions, we use lots of landing pads, and so every type or list can share the same filter value, which saves table space. */ static void assign_filter_values (void) { int i; htab_t ttypes, ehspec; VARRAY_TREE_INIT (cfun->eh->ttype_data, 16, "ttype_data"); VARRAY_UCHAR_INIT (cfun->eh->ehspec_data, 64, "ehspec_data"); ttypes = htab_create (31, ttypes_filter_hash, ttypes_filter_eq, free); ehspec = htab_create (31, ehspec_filter_hash, ehspec_filter_eq, free); for (i = cfun->eh->last_region_number; i > 0; --i) { struct eh_region *r = cfun->eh->region_array[i]; /* Mind we don't process a region more than once. */ if (!r || r->region_number != i) continue; switch (r->type) { case ERT_CATCH: /* Whatever type_list is (NULL or true list), we build a list of filters for the region. */ r->u.catch.filter_list = NULL_TREE; if (r->u.catch.type_list != NULL) { /* Get a filter value for each of the types caught and store them in the region's dedicated list. */ tree tp_node = r->u.catch.type_list; for (;tp_node; tp_node = TREE_CHAIN (tp_node)) { int flt = add_ttypes_entry (ttypes, TREE_VALUE (tp_node)); tree flt_node = build_int_2 (flt, 0); r->u.catch.filter_list = tree_cons (NULL_TREE, flt_node, r->u.catch.filter_list); } } else { /* Get a filter value for the NULL list also since it will need an action record anyway. */ int flt = add_ttypes_entry (ttypes, NULL); tree flt_node = build_int_2 (flt, 0); r->u.catch.filter_list = tree_cons (NULL_TREE, flt_node, r->u.catch.filter_list); } break; case ERT_ALLOWED_EXCEPTIONS: r->u.allowed.filter = add_ehspec_entry (ehspec, ttypes, r->u.allowed.type_list); break; default: break; } } htab_delete (ttypes); htab_delete (ehspec); } /* Emit SEQ into basic block just before INSN (that is assumed to be first instruction of some existing BB and return the newly produced block. */ static basic_block emit_to_new_bb_before (rtx seq, rtx insn) { rtx last; basic_block bb; edge e; /* If there happens to be an fallthru edge (possibly created by cleanup_cfg call), we don't want it to go into newly created landing pad or other EH construct. */ for (e = BLOCK_FOR_INSN (insn)->pred; e; e = e->pred_next) if (e->flags & EDGE_FALLTHRU) force_nonfallthru (e); last = emit_insn_before (seq, insn); if (GET_CODE (last) == BARRIER) last = PREV_INSN (last); bb = create_basic_block (seq, last, BLOCK_FOR_INSN (insn)->prev_bb); update_bb_for_insn (bb); bb->flags |= BB_SUPERBLOCK; return bb; } /* Generate the code to actually handle exceptions, which will follow the landing pads. */ static void build_post_landing_pads (void) { int i; for (i = cfun->eh->last_region_number; i > 0; --i) { struct eh_region *region = cfun->eh->region_array[i]; rtx seq; /* Mind we don't process a region more than once. */ if (!region || region->region_number != i) continue; switch (region->type) { case ERT_TRY: /* ??? Collect the set of all non-overlapping catch handlers all the way up the chain until blocked by a cleanup. */ /* ??? Outer try regions can share landing pads with inner try regions if the types are completely non-overlapping, and there are no intervening cleanups. */ region->post_landing_pad = gen_label_rtx (); start_sequence (); emit_label (region->post_landing_pad); /* ??? It is mighty inconvenient to call back into the switch statement generation code in expand_end_case. Rapid prototyping sez a sequence of ifs. */ { struct eh_region *c; for (c = region->u.try.catch; c ; c = c->u.catch.next_catch) { if (c->u.catch.type_list == NULL) emit_jump (c->label); else { /* Need for one cmp/jump per type caught. Each type list entry has a matching entry in the filter list (see assign_filter_values). */ tree tp_node = c->u.catch.type_list; tree flt_node = c->u.catch.filter_list; for (; tp_node; ) { emit_cmp_and_jump_insns (cfun->eh->filter, GEN_INT (tree_low_cst (TREE_VALUE (flt_node), 0)), EQ, NULL_RTX, word_mode, 0, c->label); tp_node = TREE_CHAIN (tp_node); flt_node = TREE_CHAIN (flt_node); } } } } /* We delay the generation of the _Unwind_Resume until we generate landing pads. We emit a marker here so as to get good control flow data in the meantime. */ region->resume = emit_jump_insn (gen_rtx_RESX (VOIDmode, region->region_number)); emit_barrier (); seq = get_insns (); end_sequence (); emit_to_new_bb_before (seq, region->u.try.catch->label); break; case ERT_ALLOWED_EXCEPTIONS: region->post_landing_pad = gen_label_rtx (); start_sequence (); emit_label (region->post_landing_pad); emit_cmp_and_jump_insns (cfun->eh->filter, GEN_INT (region->u.allowed.filter), EQ, NULL_RTX, word_mode, 0, region->label); /* We delay the generation of the _Unwind_Resume until we generate landing pads. We emit a marker here so as to get good control flow data in the meantime. */ region->resume = emit_jump_insn (gen_rtx_RESX (VOIDmode, region->region_number)); emit_barrier (); seq = get_insns (); end_sequence (); emit_to_new_bb_before (seq, region->label); break; case ERT_CLEANUP: case ERT_MUST_NOT_THROW: region->post_landing_pad = region->label; break; case ERT_CATCH: case ERT_THROW: /* Nothing to do. */ break; default: abort (); } } } /* Replace RESX patterns with jumps to the next handler if any, or calls to _Unwind_Resume otherwise. */ static void connect_post_landing_pads (void) { int i; for (i = cfun->eh->last_region_number; i > 0; --i) { struct eh_region *region = cfun->eh->region_array[i]; struct eh_region *outer; rtx seq; rtx barrier; /* Mind we don't process a region more than once. */ if (!region || region->region_number != i) continue; /* If there is no RESX, or it has been deleted by flow, there's nothing to fix up. */ if (! region->resume || INSN_DELETED_P (region->resume)) continue; /* Search for another landing pad in this function. */ for (outer = region->outer; outer ; outer = outer->outer) if (outer->post_landing_pad) break; start_sequence (); if (outer) { edge e; basic_block src, dest; emit_jump (outer->post_landing_pad); src = BLOCK_FOR_INSN (region->resume); dest = BLOCK_FOR_INSN (outer->post_landing_pad); while (src->succ) remove_edge (src->succ); e = make_edge (src, dest, 0); e->probability = REG_BR_PROB_BASE; e->count = src->count; } else { emit_library_call (unwind_resume_libfunc, LCT_THROW, VOIDmode, 1, cfun->eh->exc_ptr, ptr_mode); /* What we just emitted was a throwing libcall, so it got a barrier automatically added after it. If the last insn in the libcall sequence isn't the barrier, it's because the target emits multiple insns for a call, and there are insns after the actual call insn (which are redundant and would be optimized away). The barrier is inserted exactly after the call insn, so let's go get that and delete the insns after it, because below we need the barrier to be the last insn in the sequence. */ delete_insns_since (NEXT_INSN (last_call_insn ())); } seq = get_insns (); end_sequence (); barrier = emit_insn_before (seq, region->resume); /* Avoid duplicate barrier. */ if (GET_CODE (barrier) != BARRIER) abort (); delete_insn (barrier); delete_insn (region->resume); /* ??? From tree-ssa we can wind up with catch regions whose label is not instantiated, but whose resx is present. Now that we've dealt with the resx, kill the region. */ if (region->label == NULL && region->type == ERT_CLEANUP) remove_eh_handler (region); } } static void dw2_build_landing_pads (void) { int i; unsigned int j; for (i = cfun->eh->last_region_number; i > 0; --i) { struct eh_region *region = cfun->eh->region_array[i]; rtx seq; basic_block bb; bool clobbers_hard_regs = false; edge e; /* Mind we don't process a region more than once. */ if (!region || region->region_number != i) continue; if (region->type != ERT_CLEANUP && region->type != ERT_TRY && region->type != ERT_ALLOWED_EXCEPTIONS) continue; start_sequence (); region->landing_pad = gen_label_rtx (); emit_label (region->landing_pad); #ifdef HAVE_exception_receiver if (HAVE_exception_receiver) emit_insn (gen_exception_receiver ()); else #endif #ifdef HAVE_nonlocal_goto_receiver if (HAVE_nonlocal_goto_receiver) emit_insn (gen_nonlocal_goto_receiver ()); else #endif { /* Nothing */ } /* If the eh_return data registers are call-saved, then we won't have considered them clobbered from the call that threw. Kill them now. */ for (j = 0; ; ++j) { unsigned r = EH_RETURN_DATA_REGNO (j); if (r == INVALID_REGNUM) break; if (! call_used_regs[r]) { emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, r))); clobbers_hard_regs = true; } } if (clobbers_hard_regs) { /* @@@ This is a kludge. Not all machine descriptions define a blockage insn, but we must not allow the code we just generated to be reordered by scheduling. So emit an ASM_INPUT to act as blockage insn. */ emit_insn (gen_rtx_ASM_INPUT (VOIDmode, "")); } emit_move_insn (cfun->eh->exc_ptr, gen_rtx_REG (ptr_mode, EH_RETURN_DATA_REGNO (0))); emit_move_insn (cfun->eh->filter, gen_rtx_REG (word_mode, EH_RETURN_DATA_REGNO (1))); seq = get_insns (); end_sequence (); bb = emit_to_new_bb_before (seq, region->post_landing_pad); e = make_edge (bb, bb->next_bb, EDGE_FALLTHRU); e->count = bb->count; e->probability = REG_BR_PROB_BASE; } } struct sjlj_lp_info { int directly_reachable; int action_index; int dispatch_index; int call_site_index; }; static bool sjlj_find_directly_reachable_regions (struct sjlj_lp_info *lp_info) { rtx insn; bool found_one = false; for (insn = get_insns (); insn ; insn = NEXT_INSN (insn)) { struct eh_region *region; enum reachable_code rc; tree type_thrown; rtx note; if (! INSN_P (insn)) continue; note = find_reg_note (insn, REG_EH_REGION, NULL_RTX); if (!note || INTVAL (XEXP (note, 0)) <= 0) continue; region = cfun->eh->region_array[INTVAL (XEXP (note, 0))]; type_thrown = NULL_TREE; if (region->type == ERT_THROW) { type_thrown = region->u.throw.type; region = region->outer; } /* Find the first containing region that might handle the exception. That's the landing pad to which we will transfer control. */ rc = RNL_NOT_CAUGHT; for (; region; region = region->outer) { rc = reachable_next_level (region, type_thrown, NULL); if (rc != RNL_NOT_CAUGHT) break; } if (rc == RNL_MAYBE_CAUGHT || rc == RNL_CAUGHT) { lp_info[region->region_number].directly_reachable = 1; found_one = true; } } return found_one; } static void sjlj_assign_call_site_values (rtx dispatch_label, struct sjlj_lp_info *lp_info) { htab_t ar_hash; int i, index; /* First task: build the action table. */ VARRAY_UCHAR_INIT (cfun->eh->action_record_data, 64, "action_record_data"); ar_hash = htab_create (31, action_record_hash, action_record_eq, free); for (i = cfun->eh->last_region_number; i > 0; --i) if (lp_info[i].directly_reachable) { struct eh_region *r = cfun->eh->region_array[i]; r->landing_pad = dispatch_label; lp_info[i].action_index = collect_one_action_chain (ar_hash, r); if (lp_info[i].action_index != -1) cfun->uses_eh_lsda = 1; } htab_delete (ar_hash); /* Next: assign dispatch values. In dwarf2 terms, this would be the landing pad label for the region. For sjlj though, there is one common landing pad from which we dispatch to the post-landing pads. A region receives a dispatch index if it is directly reachable and requires in-function processing. Regions that share post-landing pads may share dispatch indices. */ /* ??? Post-landing pad sharing doesn't actually happen at the moment (see build_post_landing_pads) so we don't bother checking for it. */ index = 0; for (i = cfun->eh->last_region_number; i > 0; --i) if (lp_info[i].directly_reachable) lp_info[i].dispatch_index = index++; /* Finally: assign call-site values. If dwarf2 terms, this would be the region number assigned by convert_to_eh_region_ranges, but handles no-action and must-not-throw differently. */ call_site_base = 1; for (i = cfun->eh->last_region_number; i > 0; --i) if (lp_info[i].directly_reachable) { int action = lp_info[i].action_index; /* Map must-not-throw to otherwise unused call-site index 0. */ if (action == -2) index = 0; /* Map no-action to otherwise unused call-site index -1. */ else if (action == -1) index = -1; /* Otherwise, look it up in the table. */ else index = add_call_site (GEN_INT (lp_info[i].dispatch_index), action); lp_info[i].call_site_index = index; } } static void sjlj_mark_call_sites (struct sjlj_lp_info *lp_info) { int last_call_site = -2; rtx insn, mem; for (insn = get_insns (); insn ; insn = NEXT_INSN (insn)) { struct eh_region *region; int this_call_site; rtx note, before, p; /* Reset value tracking at extended basic block boundaries. */ if (GET_CODE (insn) == CODE_LABEL) last_call_site = -2; if (! INSN_P (insn)) continue; note = find_reg_note (insn, REG_EH_REGION, NULL_RTX); if (!note) { /* Calls (and trapping insns) without notes are outside any exception handling region in this function. Mark them as no action. */ if (GET_CODE (insn) == CALL_INSN || (flag_non_call_exceptions && may_trap_p (PATTERN (insn)))) this_call_site = -1; else continue; } else { /* Calls that are known to not throw need not be marked. */ if (INTVAL (XEXP (note, 0)) <= 0) continue; region = cfun->eh->region_array[INTVAL (XEXP (note, 0))]; this_call_site = lp_info[region->region_number].call_site_index; } if (this_call_site == last_call_site) continue; /* Don't separate a call from it's argument loads. */ before = insn; if (GET_CODE (insn) == CALL_INSN) before = find_first_parameter_load (insn, NULL_RTX); start_sequence (); mem = adjust_address (cfun->eh->sjlj_fc, TYPE_MODE (integer_type_node), sjlj_fc_call_site_ofs); emit_move_insn (mem, GEN_INT (this_call_site)); p = get_insns (); end_sequence (); emit_insn_before (p, before); last_call_site = this_call_site; } } /* Construct the SjLj_Function_Context. */ static void sjlj_emit_function_enter (rtx dispatch_label) { rtx fn_begin, fc, mem, seq; fc = cfun->eh->sjlj_fc; start_sequence (); /* We're storing this libcall's address into memory instead of calling it directly. Thus, we must call assemble_external_libcall here, as we can not depend on emit_library_call to do it for us. */ assemble_external_libcall (eh_personality_libfunc); mem = adjust_address (fc, Pmode, sjlj_fc_personality_ofs); emit_move_insn (mem, eh_personality_libfunc); mem = adjust_address (fc, Pmode, sjlj_fc_lsda_ofs); if (cfun->uses_eh_lsda) { char buf[20]; rtx sym; ASM_GENERATE_INTERNAL_LABEL (buf, "LLSDA", current_function_funcdef_no); sym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf)); SYMBOL_REF_FLAGS (sym) = SYMBOL_FLAG_LOCAL; emit_move_insn (mem, sym); } else emit_move_insn (mem, const0_rtx); #ifdef DONT_USE_BUILTIN_SETJMP { rtx x, note; x = emit_library_call_value (setjmp_libfunc, NULL_RTX, LCT_RETURNS_TWICE, TYPE_MODE (integer_type_node), 1, plus_constant (XEXP (fc, 0), sjlj_fc_jbuf_ofs), Pmode); note = emit_note (NOTE_INSN_EXPECTED_VALUE); NOTE_EXPECTED_VALUE (note) = gen_rtx_EQ (VOIDmode, x, const0_rtx); emit_cmp_and_jump_insns (x, const0_rtx, NE, 0, TYPE_MODE (integer_type_node), 0, dispatch_label); } #else expand_builtin_setjmp_setup (plus_constant (XEXP (fc, 0), sjlj_fc_jbuf_ofs), dispatch_label); #endif emit_library_call (unwind_sjlj_register_libfunc, LCT_NORMAL, VOIDmode, 1, XEXP (fc, 0), Pmode); seq = get_insns (); end_sequence (); /* ??? Instead of doing this at the beginning of the function, do this in a block that is at loop level 0 and dominates all can_throw_internal instructions. */ for (fn_begin = get_insns (); ; fn_begin = NEXT_INSN (fn_begin)) if (GET_CODE (fn_begin) == NOTE && (NOTE_LINE_NUMBER (fn_begin) == NOTE_INSN_FUNCTION_BEG || NOTE_LINE_NUMBER (fn_begin) == NOTE_INSN_BASIC_BLOCK)) break; if (NOTE_LINE_NUMBER (fn_begin) == NOTE_INSN_FUNCTION_BEG) insert_insn_on_edge (seq, ENTRY_BLOCK_PTR->succ); else { rtx last = BB_END (ENTRY_BLOCK_PTR->succ->dest); for (; ; fn_begin = NEXT_INSN (fn_begin)) if ((GET_CODE (fn_begin) == NOTE && NOTE_LINE_NUMBER (fn_begin) == NOTE_INSN_FUNCTION_BEG) || fn_begin == last) break; emit_insn_after (seq, fn_begin); } } /* Call back from expand_function_end to know where we should put the call to unwind_sjlj_unregister_libfunc if needed. */ void sjlj_emit_function_exit_after (rtx after) { cfun->eh->sjlj_exit_after = after; } static void sjlj_emit_function_exit (void) { rtx seq; edge e; start_sequence (); emit_library_call (unwind_sjlj_unregister_libfunc, LCT_NORMAL, VOIDmode, 1, XEXP (cfun->eh->sjlj_fc, 0), Pmode); seq = get_insns (); end_sequence (); /* ??? Really this can be done in any block at loop level 0 that post-dominates all can_throw_internal instructions. This is the last possible moment. */ for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next) if (e->flags & EDGE_FALLTHRU) break; if (e) { rtx insn; /* Figure out whether the place we are supposed to insert libcall is inside the last basic block or after it. In the other case we need to emit to edge. */ if (e->src->next_bb != EXIT_BLOCK_PTR) abort (); for (insn = NEXT_INSN (BB_END (e->src)); insn; insn = NEXT_INSN (insn)) if (insn == cfun->eh->sjlj_exit_after) break; if (insn) insert_insn_on_edge (seq, e); else { insn = cfun->eh->sjlj_exit_after; if (GET_CODE (insn) == CODE_LABEL) insn = NEXT_INSN (insn); emit_insn_after (seq, insn); } } } static void sjlj_emit_dispatch_table (rtx dispatch_label, struct sjlj_lp_info *lp_info) { int i, first_reachable; rtx mem, dispatch, seq, fc; rtx before; basic_block bb; edge e; fc = cfun->eh->sjlj_fc; start_sequence (); emit_label (dispatch_label); #ifndef DONT_USE_BUILTIN_SETJMP expand_builtin_setjmp_receiver (dispatch_label); #endif /* Load up dispatch index, exc_ptr and filter values from the function context. */ mem = adjust_address (fc, TYPE_MODE (integer_type_node), sjlj_fc_call_site_ofs); dispatch = copy_to_reg (mem); mem = adjust_address (fc, word_mode, sjlj_fc_data_ofs); if (word_mode != ptr_mode) { #ifdef POINTERS_EXTEND_UNSIGNED mem = convert_memory_address (ptr_mode, mem); #else mem = convert_to_mode (ptr_mode, mem, 0); #endif } emit_move_insn (cfun->eh->exc_ptr, mem); mem = adjust_address (fc, word_mode, sjlj_fc_data_ofs + UNITS_PER_WORD); emit_move_insn (cfun->eh->filter, mem); /* Jump to one of the directly reachable regions. */ /* ??? This really ought to be using a switch statement. */ first_reachable = 0; for (i = cfun->eh->last_region_number; i > 0; --i) { if (! lp_info[i].directly_reachable) continue; if (! first_reachable) { first_reachable = i; continue; } emit_cmp_and_jump_insns (dispatch, GEN_INT (lp_info[i].dispatch_index), EQ, NULL_RTX, TYPE_MODE (integer_type_node), 0, cfun->eh->region_array[i]->post_landing_pad); } seq = get_insns (); end_sequence (); before = cfun->eh->region_array[first_reachable]->post_landing_pad; bb = emit_to_new_bb_before (seq, before); e = make_edge (bb, bb->next_bb, EDGE_FALLTHRU); e->count = bb->count; e->probability = REG_BR_PROB_BASE; } static void sjlj_build_landing_pads (void) { struct sjlj_lp_info *lp_info; lp_info = xcalloc (cfun->eh->last_region_number + 1, sizeof (struct sjlj_lp_info)); if (sjlj_find_directly_reachable_regions (lp_info)) { rtx dispatch_label = gen_label_rtx (); cfun->eh->sjlj_fc = assign_stack_local (TYPE_MODE (sjlj_fc_type_node), int_size_in_bytes (sjlj_fc_type_node), TYPE_ALIGN (sjlj_fc_type_node)); sjlj_assign_call_site_values (dispatch_label, lp_info); sjlj_mark_call_sites (lp_info); sjlj_emit_function_enter (dispatch_label); sjlj_emit_dispatch_table (dispatch_label, lp_info); sjlj_emit_function_exit (); } free (lp_info); } void finish_eh_generation (void) { basic_block bb; /* Nothing to do if no regions created. */ if (cfun->eh->region_tree == NULL) return; /* The object here is to provide find_basic_blocks with detailed information (via reachable_handlers) on how exception control flows within the function. In this first pass, we can include type information garnered from ERT_THROW and ERT_ALLOWED_EXCEPTIONS regions, and hope that it will be useful in deleting unreachable handlers. Subsequently, we will generate landing pads which will connect many of the handlers, and then type information will not be effective. Still, this is a win over previous implementations. */ cleanup_cfg (CLEANUP_PRE_LOOP | CLEANUP_NO_INSN_DEL); /* These registers are used by the landing pads. Make sure they have been generated. */ get_exception_pointer (cfun); get_exception_filter (cfun); /* Construct the landing pads. */ assign_filter_values (); build_post_landing_pads (); connect_post_landing_pads (); if (USING_SJLJ_EXCEPTIONS) sjlj_build_landing_pads (); else dw2_build_landing_pads (); cfun->eh->built_landing_pads = 1; /* We've totally changed the CFG. Start over. */ find_exception_handler_labels (); break_superblocks (); if (USING_SJLJ_EXCEPTIONS) commit_edge_insertions (); FOR_EACH_BB (bb) { edge e, next; bool eh = false; for (e = bb->succ; e; e = next) { next = e->succ_next; if (e->flags & EDGE_EH) { remove_edge (e); eh = true; } } if (eh) rtl_make_eh_edge (NULL, bb, BB_END (bb)); } cleanup_cfg (CLEANUP_PRE_LOOP | CLEANUP_NO_INSN_DEL); } static hashval_t ehl_hash (const void *pentry) { struct ehl_map_entry *entry = (struct ehl_map_entry *) pentry; /* 2^32 * ((sqrt(5) - 1) / 2) */ const hashval_t scaled_golden_ratio = 0x9e3779b9; return CODE_LABEL_NUMBER (entry->label) * scaled_golden_ratio; } static int ehl_eq (const void *pentry, const void *pdata) { struct ehl_map_entry *entry = (struct ehl_map_entry *) pentry; struct ehl_map_entry *data = (struct ehl_map_entry *) pdata; return entry->label == data->label; } /* This section handles removing dead code for flow. */ /* Remove LABEL from exception_handler_label_map. */ static void remove_exception_handler_label (rtx label) { struct ehl_map_entry **slot, tmp; /* If exception_handler_label_map was not built yet, there is nothing to do. */ if (cfun->eh->exception_handler_label_map == NULL) return; tmp.label = label; slot = (struct ehl_map_entry **) htab_find_slot (cfun->eh->exception_handler_label_map, &tmp, NO_INSERT); if (! slot) abort (); htab_clear_slot (cfun->eh->exception_handler_label_map, (void **) slot); } /* Splice REGION from the region tree etc. */ static void remove_eh_handler (struct eh_region *region) { struct eh_region **pp, **pp_start, *p, *outer, *inner; rtx lab; /* For the benefit of efficiently handling REG_EH_REGION notes, replace this region in the region array with its containing region. Note that previous region deletions may result in multiple copies of this region in the array, so we have a list of alternate numbers by which we are known. */ outer = region->outer; cfun->eh->region_array[region->region_number] = outer; if (region->aka) { int i; EXECUTE_IF_SET_IN_BITMAP (region->aka, 0, i, { cfun->eh->region_array[i] = outer; }); } if (outer) { if (!outer->aka) outer->aka = BITMAP_GGC_ALLOC (); if (region->aka) bitmap_a_or_b (outer->aka, outer->aka, region->aka); bitmap_set_bit (outer->aka, region->region_number); } if (cfun->eh->built_landing_pads) lab = region->landing_pad; else lab = region->label; if (lab) remove_exception_handler_label (lab); if (outer) pp_start = &outer->inner; else pp_start = &cfun->eh->region_tree; for (pp = pp_start, p = *pp; p != region; pp = &p->next_peer, p = *pp) continue; *pp = region->next_peer; inner = region->inner; if (inner) { for (p = inner; p->next_peer ; p = p->next_peer) p->outer = outer; p->outer = outer; p->next_peer = *pp_start; *pp_start = inner; } if (region->type == ERT_CATCH) { struct eh_region *try, *next, *prev; for (try = region->next_peer; try->type == ERT_CATCH; try = try->next_peer) continue; if (try->type != ERT_TRY) abort (); next = region->u.catch.next_catch; prev = region->u.catch.prev_catch; if (next) next->u.catch.prev_catch = prev; else try->u.try.last_catch = prev; if (prev) prev->u.catch.next_catch = next; else { try->u.try.catch = next; if (! next) remove_eh_handler (try); } } } /* LABEL heads a basic block that is about to be deleted. If this label corresponds to an exception region, we may be able to delete the region. */ void maybe_remove_eh_handler (rtx label) { struct ehl_map_entry **slot, tmp; struct eh_region *region; /* ??? After generating landing pads, it's not so simple to determine if the region data is completely unused. One must examine the landing pad and the post landing pad, and whether an inner try block is referencing the catch handlers directly. */ if (cfun->eh->built_landing_pads) return; tmp.label = label; slot = (struct ehl_map_entry **) htab_find_slot (cfun->eh->exception_handler_label_map, &tmp, NO_INSERT); if (! slot) return; region = (*slot)->region; if (! region) return; /* Flow will want to remove MUST_NOT_THROW regions as unreachable because there is no path to the fallback call to terminate. But the region continues to affect call-site data until there are no more contained calls, which we don't see here. */ if (region->type == ERT_MUST_NOT_THROW) { htab_clear_slot (cfun->eh->exception_handler_label_map, (void **) slot); region->label = NULL_RTX; } else remove_eh_handler (region); } /* Invokes CALLBACK for every exception handler label. Only used by old loop hackery; should not be used by new code. */ void for_each_eh_label (void (*callback) (rtx)) { htab_traverse (cfun->eh->exception_handler_label_map, for_each_eh_label_1, (void *) &callback); } static int for_each_eh_label_1 (void **pentry, void *data) { struct ehl_map_entry *entry = *(struct ehl_map_entry **)pentry; void (*callback) (rtx) = *(void (**) (rtx)) data; (*callback) (entry->label); return 1; } /* Invoke CALLBACK for every exception region in the current function. */ void for_each_eh_region (void (*callback) (struct eh_region *)) { int i, n = cfun->eh->last_region_number; for (i = 1; i <= n; ++i) { struct eh_region *region = cfun->eh->region_array[i]; if (region) (*callback) (region); } } /* This section describes CFG exception edges for flow. */ /* For communicating between calls to reachable_next_level. */ struct reachable_info { tree types_caught; tree types_allowed; void (*callback) (struct eh_region *, void *); void *callback_data; bool saw_any_handlers; }; /* A subroutine of reachable_next_level. Return true if TYPE, or a base class of TYPE, is in HANDLED. */ int check_handled (tree handled, tree type) { tree t; /* We can check for exact matches without front-end help. */ if (! lang_eh_type_covers) { for (t = handled; t ; t = TREE_CHAIN (t)) if (TREE_VALUE (t) == type) return 1; } else { for (t = handled; t ; t = TREE_CHAIN (t)) if ((*lang_eh_type_covers) (TREE_VALUE (t), type)) return 1; } return 0; } /* A subroutine of reachable_next_level. If we are collecting a list of handlers, add one. After landing pad generation, reference it instead of the handlers themselves. Further, the handlers are all wired together, so by referencing one, we've got them all. Before landing pad generation we reference each handler individually. LP_REGION contains the landing pad; REGION is the handler. */ static void add_reachable_handler (struct reachable_info *info, struct eh_region *lp_region, struct eh_region *region) { if (! info) return; info->saw_any_handlers = true; if (cfun->eh->built_landing_pads) info->callback (lp_region, info->callback_data); else info->callback (region, info->callback_data); } /* Process one level of exception regions for reachability. If TYPE_THROWN is non-null, then it is the *exact* type being propagated. If INFO is non-null, then collect handler labels and caught/allowed type information between invocations. */ static enum reachable_code reachable_next_level (struct eh_region *region, tree type_thrown, struct reachable_info *info) { switch (region->type) { case ERT_CLEANUP: /* Before landing-pad generation, we model control flow directly to the individual handlers. In this way we can see that catch handler types may shadow one another. */ add_reachable_handler (info, region, region); return RNL_MAYBE_CAUGHT; case ERT_TRY: { struct eh_region *c; enum reachable_code ret = RNL_NOT_CAUGHT; for (c = region->u.try.catch; c ; c = c->u.catch.next_catch) { /* A catch-all handler ends the search. */ if (c->u.catch.type_list == NULL) { add_reachable_handler (info, region, c); return RNL_CAUGHT; } if (type_thrown) { /* If we have at least one type match, end the search. */ tree tp_node = c->u.catch.type_list; for (; tp_node; tp_node = TREE_CHAIN (tp_node)) { tree type = TREE_VALUE (tp_node); if (type == type_thrown || (lang_eh_type_covers && (*lang_eh_type_covers) (type, type_thrown))) { add_reachable_handler (info, region, c); return RNL_CAUGHT; } } /* If we have definitive information of a match failure, the catch won't trigger. */ if (lang_eh_type_covers) return RNL_NOT_CAUGHT; } /* At this point, we either don't know what type is thrown or don't have front-end assistance to help deciding if it is covered by one of the types in the list for this region. We'd then like to add this region to the list of reachable handlers since it is indeed potentially reachable based on the information we have. Actually, this handler is for sure not reachable if all the types it matches have already been caught. That is, it is only potentially reachable if at least one of the types it catches has not been previously caught. */ if (! info) ret = RNL_MAYBE_CAUGHT; else { tree tp_node = c->u.catch.type_list; bool maybe_reachable = false; /* Compute the potential reachability of this handler and update the list of types caught at the same time. */ for (; tp_node; tp_node = TREE_CHAIN (tp_node)) { tree type = TREE_VALUE (tp_node); if (! check_handled (info->types_caught, type)) { info->types_caught = tree_cons (NULL, type, info->types_caught); maybe_reachable = true; } } if (maybe_reachable) { add_reachable_handler (info, region, c); /* ??? If the catch type is a base class of every allowed type, then we know we can stop the search. */ ret = RNL_MAYBE_CAUGHT; } } } return ret; } case ERT_ALLOWED_EXCEPTIONS: /* An empty list of types definitely ends the search. */ if (region->u.allowed.type_list == NULL_TREE) { add_reachable_handler (info, region, region); return RNL_CAUGHT; } /* Collect a list of lists of allowed types for use in detecting when a catch may be transformed into a catch-all. */ if (info) info->types_allowed = tree_cons (NULL_TREE, region->u.allowed.type_list, info->types_allowed); /* If we have definitive information about the type hierarchy, then we can tell if the thrown type will pass through the filter. */ if (type_thrown && lang_eh_type_covers) { if (check_handled (region->u.allowed.type_list, type_thrown)) return RNL_NOT_CAUGHT; else { add_reachable_handler (info, region, region); return RNL_CAUGHT; } } add_reachable_handler (info, region, region); return RNL_MAYBE_CAUGHT; case ERT_CATCH: /* Catch regions are handled by their controlling try region. */ return RNL_NOT_CAUGHT; case ERT_MUST_NOT_THROW: /* Here we end our search, since no exceptions may propagate. If we've touched down at some landing pad previous, then the explicit function call we generated may be used. Otherwise the call is made by the runtime. */ if (info && info->saw_any_handlers) { add_reachable_handler (info, region, region); return RNL_CAUGHT; } else return RNL_BLOCKED; case ERT_THROW: case ERT_FIXUP: case ERT_UNKNOWN: /* Shouldn't see these here. */ break; } abort (); } /* Invoke CALLBACK on each region reachable from REGION_NUMBER. */ void foreach_reachable_handler (int region_number, bool is_resx, void (*callback) (struct eh_region *, void *), void *callback_data) { struct reachable_info info; struct eh_region *region; tree type_thrown; memset (&info, 0, sizeof (info)); info.callback = callback; info.callback_data = callback_data; region = cfun->eh->region_array[region_number]; type_thrown = NULL_TREE; if (is_resx) { /* A RESX leaves a region instead of entering it. Thus the region itself may have been deleted out from under us. */ if (region == NULL) return; region = region->outer; } else if (region->type == ERT_THROW) { type_thrown = region->u.throw.type; region = region->outer; } while (region) { if (reachable_next_level (region, type_thrown, &info) >= RNL_CAUGHT) break; /* If we have processed one cleanup, there is no point in processing any more of them. Each cleanup will have an edge to the next outer cleanup region, so the flow graph will be accurate. */ if (region->type == ERT_CLEANUP) region = region->u.cleanup.prev_try; else region = region->outer; } } /* Retrieve a list of labels of exception handlers which can be reached by a given insn. */ static void arh_to_landing_pad (struct eh_region *region, void *data) { rtx *p_handlers = data; if (! *p_handlers) *p_handlers = alloc_INSN_LIST (region->landing_pad, NULL_RTX); } static void arh_to_label (struct eh_region *region, void *data) { rtx *p_handlers = data; *p_handlers = alloc_INSN_LIST (region->label, *p_handlers); } rtx reachable_handlers (rtx insn) { bool is_resx = false; rtx handlers = NULL; int region_number; if (GET_CODE (insn) == JUMP_INSN && GET_CODE (PATTERN (insn)) == RESX) { region_number = XINT (PATTERN (insn), 0); is_resx = true; } else { rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX); if (!note || INTVAL (XEXP (note, 0)) <= 0) return NULL; region_number = INTVAL (XEXP (note, 0)); } foreach_reachable_handler (region_number, is_resx, (cfun->eh->built_landing_pads ? arh_to_landing_pad : arh_to_label), &handlers); return handlers; } /* Determine if the given INSN can throw an exception that is caught within the function. */ bool can_throw_internal_1 (int region_number) { struct eh_region *region; tree type_thrown; region = cfun->eh->region_array[region_number]; type_thrown = NULL_TREE; if (region->type == ERT_THROW) { type_thrown = region->u.throw.type; region = region->outer; } /* If this exception is ignored by each and every containing region, then control passes straight out. The runtime may handle some regions, which also do not require processing internally. */ for (; region; region = region->outer) { enum reachable_code how = reachable_next_level (region, type_thrown, 0); if (how == RNL_BLOCKED) return false; if (how != RNL_NOT_CAUGHT) return true; } return false; } bool can_throw_internal (rtx insn) { rtx note; if (! INSN_P (insn)) return false; if (GET_CODE (insn) == JUMP_INSN && GET_CODE (PATTERN (insn)) == RESX && XINT (PATTERN (insn), 0) > 0) return can_throw_internal_1 (XINT (PATTERN (insn), 0)); if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE) insn = XVECEXP (PATTERN (insn), 0, 0); /* Every insn that might throw has an EH_REGION note. */ note = find_reg_note (insn, REG_EH_REGION, NULL_RTX); if (!note || INTVAL (XEXP (note, 0)) <= 0) return false; return can_throw_internal_1 (INTVAL (XEXP (note, 0))); } /* Determine if the given INSN can throw an exception that is visible outside the function. */ bool can_throw_external_1 (int region_number) { struct eh_region *region; tree type_thrown; region = cfun->eh->region_array[region_number]; type_thrown = NULL_TREE; if (region->type == ERT_THROW) { type_thrown = region->u.throw.type; region = region->outer; } /* If the exception is caught or blocked by any containing region, then it is not seen by any calling function. */ for (; region ; region = region->outer) if (reachable_next_level (region, type_thrown, NULL) >= RNL_CAUGHT) return false; return true; } bool can_throw_external (rtx insn) { rtx note; if (! INSN_P (insn)) return false; if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE) insn = XVECEXP (PATTERN (insn), 0, 0); note = find_reg_note (insn, REG_EH_REGION, NULL_RTX); if (!note) { /* Calls (and trapping insns) without notes are outside any exception handling region in this function. We have to assume it might throw. Given that the front end and middle ends mark known NOTHROW functions, this isn't so wildly inaccurate. */ return (GET_CODE (insn) == CALL_INSN || (flag_non_call_exceptions && may_trap_p (PATTERN (insn)))); } if (INTVAL (XEXP (note, 0)) <= 0) return false; return can_throw_external_1 (INTVAL (XEXP (note, 0))); } /* Set current_function_nothrow and cfun->all_throwers_are_sibcalls. */ void set_nothrow_function_flags (void) { rtx insn; current_function_nothrow = 1; /* Assume cfun->all_throwers_are_sibcalls until we encounter something that can throw an exception. We specifically exempt CALL_INSNs that are SIBLING_CALL_P, as these are really jumps, and can't throw. Most CALL_INSNs are not SIBLING_CALL_P, so this is optimistic. */ cfun->all_throwers_are_sibcalls = 1; if (! flag_exceptions) return; for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) if (can_throw_external (insn)) { current_function_nothrow = 0; if (GET_CODE (insn) != CALL_INSN || !SIBLING_CALL_P (insn)) { cfun->all_throwers_are_sibcalls = 0; return; } } for (insn = current_function_epilogue_delay_list; insn; insn = XEXP (insn, 1)) if (can_throw_external (insn)) { current_function_nothrow = 0; if (GET_CODE (insn) != CALL_INSN || !SIBLING_CALL_P (insn)) { cfun->all_throwers_are_sibcalls = 0; return; } } } /* Various hooks for unwind library. */ /* Do any necessary initialization to access arbitrary stack frames. On the SPARC, this means flushing the register windows. */ void expand_builtin_unwind_init (void) { /* Set this so all the registers get saved in our frame; we need to be able to copy the saved values for any registers from frames we unwind. */ current_function_has_nonlocal_label = 1; #ifdef SETUP_FRAME_ADDRESSES SETUP_FRAME_ADDRESSES (); #endif } rtx expand_builtin_eh_return_data_regno (tree arglist) { tree which = TREE_VALUE (arglist); unsigned HOST_WIDE_INT iwhich; if (TREE_CODE (which) != INTEGER_CST) { error ("argument of `__builtin_eh_return_regno' must be constant"); return constm1_rtx; } iwhich = tree_low_cst (which, 1); iwhich = EH_RETURN_DATA_REGNO (iwhich); if (iwhich == INVALID_REGNUM) return constm1_rtx; #ifdef DWARF_FRAME_REGNUM iwhich = DWARF_FRAME_REGNUM (iwhich); #else iwhich = DBX_REGISTER_NUMBER (iwhich); #endif return GEN_INT (iwhich); } /* Given a value extracted from the return address register or stack slot, return the actual address encoded in that value. */ rtx expand_builtin_extract_return_addr (tree addr_tree) { rtx addr = expand_expr (addr_tree, NULL_RTX, Pmode, 0); if (GET_MODE (addr) != Pmode && GET_MODE (addr) != VOIDmode) { #ifdef POINTERS_EXTEND_UNSIGNED addr = convert_memory_address (Pmode, addr); #else addr = convert_to_mode (Pmode, addr, 0); #endif } /* First mask out any unwanted bits. */ #ifdef MASK_RETURN_ADDR expand_and (Pmode, addr, MASK_RETURN_ADDR, addr); #endif /* Then adjust to find the real return address. */ #if defined (RETURN_ADDR_OFFSET) addr = plus_constant (addr, RETURN_ADDR_OFFSET); #endif return addr; } /* Given an actual address in addr_tree, do any necessary encoding and return the value to be stored in the return address register or stack slot so the epilogue will return to that address. */ rtx expand_builtin_frob_return_addr (tree addr_tree) { rtx addr = expand_expr (addr_tree, NULL_RTX, ptr_mode, 0); addr = convert_memory_address (Pmode, addr); #ifdef RETURN_ADDR_OFFSET addr = force_reg (Pmode, addr); addr = plus_constant (addr, -RETURN_ADDR_OFFSET); #endif return addr; } /* Set up the epilogue with the magic bits we'll need to return to the exception handler. */ void expand_builtin_eh_return (tree stackadj_tree ATTRIBUTE_UNUSED, tree handler_tree) { rtx tmp; #ifdef EH_RETURN_STACKADJ_RTX tmp = expand_expr (stackadj_tree, cfun->eh->ehr_stackadj, VOIDmode, 0); tmp = convert_memory_address (Pmode, tmp); if (!cfun->eh->ehr_stackadj) cfun->eh->ehr_stackadj = copy_to_reg (tmp); else if (tmp != cfun->eh->ehr_stackadj) emit_move_insn (cfun->eh->ehr_stackadj, tmp); #endif tmp = expand_expr (handler_tree, cfun->eh->ehr_handler, VOIDmode, 0); tmp = convert_memory_address (Pmode, tmp); if (!cfun->eh->ehr_handler) cfun->eh->ehr_handler = copy_to_reg (tmp); else if (tmp != cfun->eh->ehr_handler) emit_move_insn (cfun->eh->ehr_handler, tmp); if (!cfun->eh->ehr_label) cfun->eh->ehr_label = gen_label_rtx (); emit_jump (cfun->eh->ehr_label); } void expand_eh_return (void) { rtx around_label; if (! cfun->eh->ehr_label) return; current_function_calls_eh_return = 1; #ifdef EH_RETURN_STACKADJ_RTX emit_move_insn (EH_RETURN_STACKADJ_RTX, const0_rtx); #endif around_label = gen_label_rtx (); emit_jump (around_label); emit_label (cfun->eh->ehr_label); clobber_return_register (); #ifdef EH_RETURN_STACKADJ_RTX emit_move_insn (EH_RETURN_STACKADJ_RTX, cfun->eh->ehr_stackadj); #endif #ifdef HAVE_eh_return if (HAVE_eh_return) emit_insn (gen_eh_return (cfun->eh->ehr_handler)); else #endif { #ifdef EH_RETURN_HANDLER_RTX emit_move_insn (EH_RETURN_HANDLER_RTX, cfun->eh->ehr_handler); #else error ("__builtin_eh_return not supported on this target"); #endif } emit_label (around_label); } /* Convert a ptr_mode address ADDR_TREE to a Pmode address controlled by POINTERS_EXTEND_UNSIGNED and return it. */ rtx expand_builtin_extend_pointer (tree addr_tree) { rtx addr = expand_expr (addr_tree, NULL_RTX, ptr_mode, 0); int extend; #ifdef POINTERS_EXTEND_UNSIGNED extend = POINTERS_EXTEND_UNSIGNED; #else /* The previous EH code did an unsigned extend by default, so we do this also for consistency. */ extend = 1; #endif return convert_modes (word_mode, ptr_mode, addr, extend); } /* In the following functions, we represent entries in the action table as 1-based indices. Special cases are: 0: null action record, non-null landing pad; implies cleanups -1: null action record, null landing pad; implies no action -2: no call-site entry; implies must_not_throw -3: we have yet to process outer regions Further, no special cases apply to the "next" field of the record. For next, 0 means end of list. */ struct action_record { int offset; int filter; int next; }; static int action_record_eq (const void *pentry, const void *pdata) { const struct action_record *entry = (const struct action_record *) pentry; const struct action_record *data = (const struct action_record *) pdata; return entry->filter == data->filter && entry->next == data->next; } static hashval_t action_record_hash (const void *pentry) { const struct action_record *entry = (const struct action_record *) pentry; return entry->next * 1009 + entry->filter; } static int add_action_record (htab_t ar_hash, int filter, int next) { struct action_record **slot, *new, tmp; tmp.filter = filter; tmp.next = next; slot = (struct action_record **) htab_find_slot (ar_hash, &tmp, INSERT); if ((new = *slot) == NULL) { new = xmalloc (sizeof (*new)); new->offset = VARRAY_ACTIVE_SIZE (cfun->eh->action_record_data) + 1; new->filter = filter; new->next = next; *slot = new; /* The filter value goes in untouched. The link to the next record is a "self-relative" byte offset, or zero to indicate that there is no next record. So convert the absolute 1 based indices we've been carrying around into a displacement. */ push_sleb128 (&cfun->eh->action_record_data, filter); if (next) next -= VARRAY_ACTIVE_SIZE (cfun->eh->action_record_data) + 1; push_sleb128 (&cfun->eh->action_record_data, next); } return new->offset; } static int collect_one_action_chain (htab_t ar_hash, struct eh_region *region) { struct eh_region *c; int next; /* If we've reached the top of the region chain, then we have no actions, and require no landing pad. */ if (region == NULL) return -1; switch (region->type) { case ERT_CLEANUP: /* A cleanup adds a zero filter to the beginning of the chain, but there are special cases to look out for. If there are *only* cleanups along a path, then it compresses to a zero action. Further, if there are multiple cleanups along a path, we only need to represent one of them, as that is enough to trigger entry to the landing pad at runtime. */ next = collect_one_action_chain (ar_hash, region->outer); if (next <= 0) return 0; for (c = region->outer; c ; c = c->outer) if (c->type == ERT_CLEANUP) return next; return add_action_record (ar_hash, 0, next); case ERT_TRY: /* Process the associated catch regions in reverse order. If there's a catch-all handler, then we don't need to search outer regions. Use a magic -3 value to record that we haven't done the outer search. */ next = -3; for (c = region->u.try.last_catch; c ; c = c->u.catch.prev_catch) { if (c->u.catch.type_list == NULL) { /* Retrieve the filter from the head of the filter list where we have stored it (see assign_filter_values). */ int filter = TREE_INT_CST_LOW (TREE_VALUE (c->u.catch.filter_list)); next = add_action_record (ar_hash, filter, 0); } else { /* Once the outer search is done, trigger an action record for each filter we have. */ tree flt_node; if (next == -3) { next = collect_one_action_chain (ar_hash, region->outer); /* If there is no next action, terminate the chain. */ if (next == -1) next = 0; /* If all outer actions are cleanups or must_not_throw, we'll have no action record for it, since we had wanted to encode these states in the call-site record directly. Add a cleanup action to the chain to catch these. */ else if (next <= 0) next = add_action_record (ar_hash, 0, 0); } flt_node = c->u.catch.filter_list; for (; flt_node; flt_node = TREE_CHAIN (flt_node)) { int filter = TREE_INT_CST_LOW (TREE_VALUE (flt_node)); next = add_action_record (ar_hash, filter, next); } } } return next; case ERT_ALLOWED_EXCEPTIONS: /* An exception specification adds its filter to the beginning of the chain. */ next = collect_one_action_chain (ar_hash, region->outer); /* If there is no next action, terminate the chain. */ if (next == -1) next = 0; /* If all outer actions are cleanups or must_not_throw, we'll have no action record for it, since we had wanted to encode these states in the call-site record directly. Add a cleanup action to the chain to catch these. */ else if (next <= 0) next = add_action_record (ar_hash, 0, 0); return add_action_record (ar_hash, region->u.allowed.filter, next); case ERT_MUST_NOT_THROW: /* A must-not-throw region with no inner handlers or cleanups requires no call-site entry. Note that this differs from the no handler or cleanup case in that we do require an lsda to be generated. Return a magic -2 value to record this. */ return -2; case ERT_CATCH: case ERT_THROW: /* CATCH regions are handled in TRY above. THROW regions are for optimization information only and produce no output. */ return collect_one_action_chain (ar_hash, region->outer); default: abort (); } } static int add_call_site (rtx landing_pad, int action) { struct call_site_record *data = cfun->eh->call_site_data; int used = cfun->eh->call_site_data_used; int size = cfun->eh->call_site_data_size; if (used >= size) { size = (size ? size * 2 : 64); data = ggc_realloc (data, sizeof (*data) * size); cfun->eh->call_site_data = data; cfun->eh->call_site_data_size = size; } data[used].landing_pad = landing_pad; data[used].action = action; cfun->eh->call_site_data_used = used + 1; return used + call_site_base; } /* Turn REG_EH_REGION notes back into NOTE_INSN_EH_REGION notes. The new note numbers will not refer to region numbers, but instead to call site entries. */ void convert_to_eh_region_ranges (void) { rtx insn, iter, note; htab_t ar_hash; int last_action = -3; rtx last_action_insn = NULL_RTX; rtx last_landing_pad = NULL_RTX; rtx first_no_action_insn = NULL_RTX; int call_site = 0; if (USING_SJLJ_EXCEPTIONS || cfun->eh->region_tree == NULL) return; VARRAY_UCHAR_INIT (cfun->eh->action_record_data, 64, "action_record_data"); ar_hash = htab_create (31, action_record_hash, action_record_eq, free); for (iter = get_insns (); iter ; iter = NEXT_INSN (iter)) if (INSN_P (iter)) { struct eh_region *region; int this_action; rtx this_landing_pad; insn = iter; if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE) insn = XVECEXP (PATTERN (insn), 0, 0); note = find_reg_note (insn, REG_EH_REGION, NULL_RTX); if (!note) { if (! (GET_CODE (insn) == CALL_INSN || (flag_non_call_exceptions && may_trap_p (PATTERN (insn))))) continue; this_action = -1; region = NULL; } else { if (INTVAL (XEXP (note, 0)) <= 0) continue; region = cfun->eh->region_array[INTVAL (XEXP (note, 0))]; this_action = collect_one_action_chain (ar_hash, region); } /* Existence of catch handlers, or must-not-throw regions implies that an lsda is needed (even if empty). */ if (this_action != -1) cfun->uses_eh_lsda = 1; /* Delay creation of region notes for no-action regions until we're sure that an lsda will be required. */ else if (last_action == -3) { first_no_action_insn = iter; last_action = -1; } /* Cleanups and handlers may share action chains but not landing pads. Collect the landing pad for this region. */ if (this_action >= 0) { struct eh_region *o; for (o = region; ! o->landing_pad ; o = o->outer) continue; this_landing_pad = o->landing_pad; } else this_landing_pad = NULL_RTX; /* Differing actions or landing pads implies a change in call-site info, which implies some EH_REGION note should be emitted. */ if (last_action != this_action || last_landing_pad != this_landing_pad) { /* If we'd not seen a previous action (-3) or the previous action was must-not-throw (-2), then we do not need an end note. */ if (last_action >= -1) { /* If we delayed the creation of the begin, do it now. */ if (first_no_action_insn) { call_site = add_call_site (NULL_RTX, 0); note = emit_note_before (NOTE_INSN_EH_REGION_BEG, first_no_action_insn); NOTE_EH_HANDLER (note) = call_site; first_no_action_insn = NULL_RTX; } note = emit_note_after (NOTE_INSN_EH_REGION_END, last_action_insn); NOTE_EH_HANDLER (note) = call_site; } /* If the new action is must-not-throw, then no region notes are created. */ if (this_action >= -1) { call_site = add_call_site (this_landing_pad, this_action < 0 ? 0 : this_action); note = emit_note_before (NOTE_INSN_EH_REGION_BEG, iter); NOTE_EH_HANDLER (note) = call_site; } last_action = this_action; last_landing_pad = this_landing_pad; } last_action_insn = iter; } if (last_action >= -1 && ! first_no_action_insn) { note = emit_note_after (NOTE_INSN_EH_REGION_END, last_action_insn); NOTE_EH_HANDLER (note) = call_site; } htab_delete (ar_hash); } static void push_uleb128 (varray_type *data_area, unsigned int value) { do { unsigned char byte = value & 0x7f; value >>= 7; if (value) byte |= 0x80; VARRAY_PUSH_UCHAR (*data_area, byte); } while (value); } static void push_sleb128 (varray_type *data_area, int value) { unsigned char byte; int more; do { byte = value & 0x7f; value >>= 7; more = ! ((value == 0 && (byte & 0x40) == 0) || (value == -1 && (byte & 0x40) != 0)); if (more) byte |= 0x80; VARRAY_PUSH_UCHAR (*data_area, byte); } while (more); } #ifndef HAVE_AS_LEB128 static int dw2_size_of_call_site_table (void) { int n = cfun->eh->call_site_data_used; int size = n * (4 + 4 + 4); int i; for (i = 0; i < n; ++i) { struct call_site_record *cs = &cfun->eh->call_site_data[i]; size += size_of_uleb128 (cs->action); } return size; } static int sjlj_size_of_call_site_table (void) { int n = cfun->eh->call_site_data_used; int size = 0; int i; for (i = 0; i < n; ++i) { struct call_site_record *cs = &cfun->eh->call_site_data[i]; size += size_of_uleb128 (INTVAL (cs->landing_pad)); size += size_of_uleb128 (cs->action); } return size; } #endif static void dw2_output_call_site_table (void) { const char *const function_start_lab = IDENTIFIER_POINTER (current_function_func_begin_label); int n = cfun->eh->call_site_data_used; int i; for (i = 0; i < n; ++i) { struct call_site_record *cs = &cfun->eh->call_site_data[i]; char reg_start_lab[32]; char reg_end_lab[32]; char landing_pad_lab[32]; ASM_GENERATE_INTERNAL_LABEL (reg_start_lab, "LEHB", call_site_base + i); ASM_GENERATE_INTERNAL_LABEL (reg_end_lab, "LEHE", call_site_base + i); if (cs->landing_pad) ASM_GENERATE_INTERNAL_LABEL (landing_pad_lab, "L", CODE_LABEL_NUMBER (cs->landing_pad)); /* ??? Perhaps use insn length scaling if the assembler supports generic arithmetic. */ /* ??? Perhaps use attr_length to choose data1 or data2 instead of data4 if the function is small enough. */ #ifdef HAVE_AS_LEB128 dw2_asm_output_delta_uleb128 (reg_start_lab, function_start_lab, "region %d start", i); dw2_asm_output_delta_uleb128 (reg_end_lab, reg_start_lab, "length"); if (cs->landing_pad) dw2_asm_output_delta_uleb128 (landing_pad_lab, function_start_lab, "landing pad"); else dw2_asm_output_data_uleb128 (0, "landing pad"); #else dw2_asm_output_delta (4, reg_start_lab, function_start_lab, "region %d start", i); dw2_asm_output_delta (4, reg_end_lab, reg_start_lab, "length"); if (cs->landing_pad) dw2_asm_output_delta (4, landing_pad_lab, function_start_lab, "landing pad"); else dw2_asm_output_data (4, 0, "landing pad"); #endif dw2_asm_output_data_uleb128 (cs->action, "action"); } call_site_base += n; } static void sjlj_output_call_site_table (void) { int n = cfun->eh->call_site_data_used; int i; for (i = 0; i < n; ++i) { struct call_site_record *cs = &cfun->eh->call_site_data[i]; dw2_asm_output_data_uleb128 (INTVAL (cs->landing_pad), "region %d landing pad", i); dw2_asm_output_data_uleb128 (cs->action, "action"); } call_site_base += n; } /* Tell assembler to switch to the section for the exception handling table. */ void default_exception_section (void) { if (targetm.have_named_sections) { int flags; #ifdef HAVE_LD_RO_RW_SECTION_MIXING int tt_format = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0, /*global=*/1); flags = (! flag_pic || ((tt_format & 0x70) != DW_EH_PE_absptr && (tt_format & 0x70) != DW_EH_PE_aligned)) ? 0 : SECTION_WRITE; #else flags = SECTION_WRITE; #endif named_section_flags (".gcc_except_table", flags); } else if (flag_pic) data_section (); else readonly_data_section (); } void output_function_exception_table (void) { int tt_format, cs_format, lp_format, i, n; #ifdef HAVE_AS_LEB128 char ttype_label[32]; char cs_after_size_label[32]; char cs_end_label[32]; #else int call_site_len; #endif int have_tt_data; int tt_format_size = 0; /* Not all functions need anything. */ if (! cfun->uses_eh_lsda) return; #ifdef IA64_UNWIND_INFO fputs ("\t.personality\t", asm_out_file); output_addr_const (asm_out_file, eh_personality_libfunc); fputs ("\n\t.handlerdata\n", asm_out_file); /* Note that varasm still thinks we're in the function's code section. The ".endp" directive that will immediately follow will take us back. */ #else targetm.asm_out.exception_section (); #endif have_tt_data = (VARRAY_ACTIVE_SIZE (cfun->eh->ttype_data) > 0 || VARRAY_ACTIVE_SIZE (cfun->eh->ehspec_data) > 0); /* Indicate the format of the @TType entries. */ if (! have_tt_data) tt_format = DW_EH_PE_omit; else { tt_format = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0, /*global=*/1); #ifdef HAVE_AS_LEB128 ASM_GENERATE_INTERNAL_LABEL (ttype_label, "LLSDATT", current_function_funcdef_no); #endif tt_format_size = size_of_encoded_value (tt_format); assemble_align (tt_format_size * BITS_PER_UNIT); } targetm.asm_out.internal_label (asm_out_file, "LLSDA", current_function_funcdef_no); /* The LSDA header. */ /* Indicate the format of the landing pad start pointer. An omitted field implies @LPStart == @Start. */ /* Currently we always put @LPStart == @Start. This field would be most useful in moving the landing pads completely out of line to another section, but it could also be used to minimize the size of uleb128 landing pad offsets. */ lp_format = DW_EH_PE_omit; dw2_asm_output_data (1, lp_format, "@LPStart format (%s)", eh_data_format_name (lp_format)); /* @LPStart pointer would go here. */ dw2_asm_output_data (1, tt_format, "@TType format (%s)", eh_data_format_name (tt_format)); #ifndef HAVE_AS_LEB128 if (USING_SJLJ_EXCEPTIONS) call_site_len = sjlj_size_of_call_site_table (); else call_site_len = dw2_size_of_call_site_table (); #endif /* A pc-relative 4-byte displacement to the @TType data. */ if (have_tt_data) { #ifdef HAVE_AS_LEB128 char ttype_after_disp_label[32]; ASM_GENERATE_INTERNAL_LABEL (ttype_after_disp_label, "LLSDATTD", current_function_funcdef_no); dw2_asm_output_delta_uleb128 (ttype_label, ttype_after_disp_label, "@TType base offset"); ASM_OUTPUT_LABEL (asm_out_file, ttype_after_disp_label); #else /* Ug. Alignment queers things. */ unsigned int before_disp, after_disp, last_disp, disp; before_disp = 1 + 1; after_disp = (1 + size_of_uleb128 (call_site_len) + call_site_len + VARRAY_ACTIVE_SIZE (cfun->eh->action_record_data) + (VARRAY_ACTIVE_SIZE (cfun->eh->ttype_data) * tt_format_size)); disp = after_disp; do { unsigned int disp_size, pad; last_disp = disp; disp_size = size_of_uleb128 (disp); pad = before_disp + disp_size + after_disp; if (pad % tt_format_size) pad = tt_format_size - (pad % tt_format_size); else pad = 0; disp = after_disp + pad; } while (disp != last_disp); dw2_asm_output_data_uleb128 (disp, "@TType base offset"); #endif } /* Indicate the format of the call-site offsets. */ #ifdef HAVE_AS_LEB128 cs_format = DW_EH_PE_uleb128; #else cs_format = DW_EH_PE_udata4; #endif dw2_asm_output_data (1, cs_format, "call-site format (%s)", eh_data_format_name (cs_format)); #ifdef HAVE_AS_LEB128 ASM_GENERATE_INTERNAL_LABEL (cs_after_size_label, "LLSDACSB", current_function_funcdef_no); ASM_GENERATE_INTERNAL_LABEL (cs_end_label, "LLSDACSE", current_function_funcdef_no); dw2_asm_output_delta_uleb128 (cs_end_label, cs_after_size_label, "Call-site table length"); ASM_OUTPUT_LABEL (asm_out_file, cs_after_size_label); if (USING_SJLJ_EXCEPTIONS) sjlj_output_call_site_table (); else dw2_output_call_site_table (); ASM_OUTPUT_LABEL (asm_out_file, cs_end_label); #else dw2_asm_output_data_uleb128 (call_site_len,"Call-site table length"); if (USING_SJLJ_EXCEPTIONS) sjlj_output_call_site_table (); else dw2_output_call_site_table (); #endif /* ??? Decode and interpret the data for flag_debug_asm. */ n = VARRAY_ACTIVE_SIZE (cfun->eh->action_record_data); for (i = 0; i < n; ++i) dw2_asm_output_data (1, VARRAY_UCHAR (cfun->eh->action_record_data, i), (i ? NULL : "Action record table")); if (have_tt_data) assemble_align (tt_format_size * BITS_PER_UNIT); i = VARRAY_ACTIVE_SIZE (cfun->eh->ttype_data); while (i-- > 0) { tree type = VARRAY_TREE (cfun->eh->ttype_data, i); rtx value; if (type == NULL_TREE) value = const0_rtx; else { struct cgraph_varpool_node *node; type = lookup_type_for_runtime (type); value = expand_expr (type, NULL_RTX, VOIDmode, EXPAND_INITIALIZER); /* Let cgraph know that the rtti decl is used. Not all of the paths below go through assemble_integer, which would take care of this for us. */ STRIP_NOPS (type); if (TREE_CODE (type) == ADDR_EXPR) { type = TREE_OPERAND (type, 0); if (TREE_CODE (type) == VAR_DECL) { node = cgraph_varpool_node (type); if (node) cgraph_varpool_mark_needed_node (node); } } else if (TREE_CODE (type) != INTEGER_CST) abort (); } if (tt_format == DW_EH_PE_absptr || tt_format == DW_EH_PE_aligned) assemble_integer (value, tt_format_size, tt_format_size * BITS_PER_UNIT, 1); else dw2_asm_output_encoded_addr_rtx (tt_format, value, NULL); } #ifdef HAVE_AS_LEB128 if (have_tt_data) ASM_OUTPUT_LABEL (asm_out_file, ttype_label); #endif /* ??? Decode and interpret the data for flag_debug_asm. */ n = VARRAY_ACTIVE_SIZE (cfun->eh->ehspec_data); for (i = 0; i < n; ++i) dw2_asm_output_data (1, VARRAY_UCHAR (cfun->eh->ehspec_data, i), (i ? NULL : "Exception specification table")); function_section (current_function_decl); } /* Type information for except.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ void gt_ggc_mx_eh_region (void *x_p) { struct eh_region * const x = (struct eh_region *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_9eh_region ((*x).outer); gt_ggc_m_9eh_region ((*x).inner); gt_ggc_m_9eh_region ((*x).next_peer); gt_ggc_m_15bitmap_head_def ((*x).aka); switch ((*x).type) { case ERT_TRY: gt_ggc_m_9eh_region ((*x).u.try.catch); gt_ggc_m_9eh_region ((*x).u.try.last_catch); gt_ggc_m_9eh_region ((*x).u.try.prev_try); gt_ggc_m_7rtx_def ((*x).u.try.continue_label); break; case ERT_CATCH: gt_ggc_m_9eh_region ((*x).u.catch.next_catch); gt_ggc_m_9eh_region ((*x).u.catch.prev_catch); gt_ggc_m_9tree_node ((*x).u.catch.type_list); gt_ggc_m_9tree_node ((*x).u.catch.filter_list); break; case ERT_ALLOWED_EXCEPTIONS: gt_ggc_m_9tree_node ((*x).u.allowed.type_list); break; case ERT_THROW: gt_ggc_m_9tree_node ((*x).u.throw.type); break; case ERT_CLEANUP: gt_ggc_m_9tree_node ((*x).u.cleanup.exp); gt_ggc_m_9eh_region ((*x).u.cleanup.prev_try); break; case ERT_FIXUP: gt_ggc_m_9tree_node ((*x).u.fixup.cleanup_exp); gt_ggc_m_9eh_region ((*x).u.fixup.real_region); break; default: break; } gt_ggc_m_7rtx_def ((*x).label); gt_ggc_m_9tree_node ((*x).tree_label); gt_ggc_m_7rtx_def ((*x).landing_pad); gt_ggc_m_7rtx_def ((*x).post_landing_pad); gt_ggc_m_7rtx_def ((*x).resume); } } void gt_ggc_mx_ehl_map_entry (void *x_p) { struct ehl_map_entry * const x = (struct ehl_map_entry *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_7rtx_def ((*x).label); gt_ggc_m_9eh_region ((*x).region); } } void gt_ggc_mx_eh_status (void *x_p) { struct eh_status * const x = (struct eh_status *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_9eh_region ((*x).region_tree); if ((*x).region_array != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).last_region_number); i0++) { gt_ggc_m_9eh_region ((*x).region_array[i0]); } ggc_mark ((*x).region_array); } gt_ggc_m_9eh_region ((*x).cur_region); gt_ggc_m_9eh_region ((*x).try_region); gt_ggc_m_7rtx_def ((*x).filter); gt_ggc_m_7rtx_def ((*x).exc_ptr); gt_ggc_m_15varray_head_tag ((*x).ttype_data); gt_ggc_m_15varray_head_tag ((*x).ehspec_data); gt_ggc_m_15varray_head_tag ((*x).action_record_data); gt_ggc_m_P13ehl_map_entry4htab ((*x).exception_handler_label_map); if ((*x).call_site_data != NULL) { size_t i1; for (i1 = 0; i1 < (size_t)(((*x)).call_site_data_used); i1++) { gt_ggc_m_7rtx_def ((*x).call_site_data[i1].landing_pad); } ggc_mark ((*x).call_site_data); } gt_ggc_m_7rtx_def ((*x).ehr_stackadj); gt_ggc_m_7rtx_def ((*x).ehr_handler); gt_ggc_m_7rtx_def ((*x).ehr_label); gt_ggc_m_7rtx_def ((*x).sjlj_fc); gt_ggc_m_7rtx_def ((*x).sjlj_exit_after); } } void gt_ggc_m_P13ehl_map_entry4htab (void *x_p) { struct htab * const x = (struct htab *)x_p; if (ggc_test_and_set_mark (x)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { gt_ggc_m_13ehl_map_entry ((*x).entries[i0]); } ggc_mark ((*x).entries); } } } void gt_pch_nx_eh_region (void *x_p) { struct eh_region * const x = (struct eh_region *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_9eh_region)) { gt_pch_n_9eh_region ((*x).outer); gt_pch_n_9eh_region ((*x).inner); gt_pch_n_9eh_region ((*x).next_peer); gt_pch_n_15bitmap_head_def ((*x).aka); switch ((*x).type) { case ERT_TRY: gt_pch_n_9eh_region ((*x).u.try.catch); gt_pch_n_9eh_region ((*x).u.try.last_catch); gt_pch_n_9eh_region ((*x).u.try.prev_try); gt_pch_n_7rtx_def ((*x).u.try.continue_label); break; case ERT_CATCH: gt_pch_n_9eh_region ((*x).u.catch.next_catch); gt_pch_n_9eh_region ((*x).u.catch.prev_catch); gt_pch_n_9tree_node ((*x).u.catch.type_list); gt_pch_n_9tree_node ((*x).u.catch.filter_list); break; case ERT_ALLOWED_EXCEPTIONS: gt_pch_n_9tree_node ((*x).u.allowed.type_list); break; case ERT_THROW: gt_pch_n_9tree_node ((*x).u.throw.type); break; case ERT_CLEANUP: gt_pch_n_9tree_node ((*x).u.cleanup.exp); gt_pch_n_9eh_region ((*x).u.cleanup.prev_try); break; case ERT_FIXUP: gt_pch_n_9tree_node ((*x).u.fixup.cleanup_exp); gt_pch_n_9eh_region ((*x).u.fixup.real_region); break; default: break; } gt_pch_n_7rtx_def ((*x).label); gt_pch_n_9tree_node ((*x).tree_label); gt_pch_n_7rtx_def ((*x).landing_pad); gt_pch_n_7rtx_def ((*x).post_landing_pad); gt_pch_n_7rtx_def ((*x).resume); } } void gt_pch_nx_ehl_map_entry (void *x_p) { struct ehl_map_entry * const x = (struct ehl_map_entry *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_13ehl_map_entry)) { gt_pch_n_7rtx_def ((*x).label); gt_pch_n_9eh_region ((*x).region); } } void gt_pch_nx_eh_status (void *x_p) { struct eh_status * const x = (struct eh_status *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_9eh_status)) { gt_pch_n_9eh_region ((*x).region_tree); if ((*x).region_array != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).last_region_number); i0++) { gt_pch_n_9eh_region ((*x).region_array[i0]); } gt_pch_note_object ((*x).region_array, x, gt_pch_p_9eh_status); } gt_pch_n_9eh_region ((*x).cur_region); gt_pch_n_9eh_region ((*x).try_region); gt_pch_n_7rtx_def ((*x).filter); gt_pch_n_7rtx_def ((*x).exc_ptr); gt_pch_n_15varray_head_tag ((*x).ttype_data); gt_pch_n_15varray_head_tag ((*x).ehspec_data); gt_pch_n_15varray_head_tag ((*x).action_record_data); gt_pch_n_P13ehl_map_entry4htab ((*x).exception_handler_label_map); if ((*x).call_site_data != NULL) { size_t i1; for (i1 = 0; i1 < (size_t)(((*x)).call_site_data_used); i1++) { gt_pch_n_7rtx_def ((*x).call_site_data[i1].landing_pad); } gt_pch_note_object ((*x).call_site_data, x, gt_pch_p_9eh_status); } gt_pch_n_7rtx_def ((*x).ehr_stackadj); gt_pch_n_7rtx_def ((*x).ehr_handler); gt_pch_n_7rtx_def ((*x).ehr_label); gt_pch_n_7rtx_def ((*x).sjlj_fc); gt_pch_n_7rtx_def ((*x).sjlj_exit_after); } } void gt_pch_n_P13ehl_map_entry4htab (void *x_p) { struct htab * const x = (struct htab *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_P13ehl_map_entry4htab)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { gt_pch_n_13ehl_map_entry ((*x).entries[i0]); } gt_pch_note_object ((*x).entries, x, gt_pch_p_P13ehl_map_entry4htab); } } } void gt_pch_p_9eh_region (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct eh_region * const x ATTRIBUTE_UNUSED = (struct eh_region *)x_p; if ((void *)(x) == this_obj) op (&((*x).outer), cookie); if ((void *)(x) == this_obj) op (&((*x).inner), cookie); if ((void *)(x) == this_obj) op (&((*x).next_peer), cookie); if ((void *)(x) == this_obj) op (&((*x).aka), cookie); switch ((*x).type) { case ERT_TRY: if ((void *)(x) == this_obj) op (&((*x).u.try.catch), cookie); if ((void *)(x) == this_obj) op (&((*x).u.try.last_catch), cookie); if ((void *)(x) == this_obj) op (&((*x).u.try.prev_try), cookie); if ((void *)(x) == this_obj) op (&((*x).u.try.continue_label), cookie); break; case ERT_CATCH: if ((void *)(x) == this_obj) op (&((*x).u.catch.next_catch), cookie); if ((void *)(x) == this_obj) op (&((*x).u.catch.prev_catch), cookie); if ((void *)(x) == this_obj) op (&((*x).u.catch.type_list), cookie); if ((void *)(x) == this_obj) op (&((*x).u.catch.filter_list), cookie); break; case ERT_ALLOWED_EXCEPTIONS: if ((void *)(x) == this_obj) op (&((*x).u.allowed.type_list), cookie); break; case ERT_THROW: if ((void *)(x) == this_obj) op (&((*x).u.throw.type), cookie); break; case ERT_CLEANUP: if ((void *)(x) == this_obj) op (&((*x).u.cleanup.exp), cookie); if ((void *)(x) == this_obj) op (&((*x).u.cleanup.prev_try), cookie); break; case ERT_FIXUP: if ((void *)(x) == this_obj) op (&((*x).u.fixup.cleanup_exp), cookie); if ((void *)(x) == this_obj) op (&((*x).u.fixup.real_region), cookie); break; default: break; } if ((void *)(x) == this_obj) op (&((*x).label), cookie); if ((void *)(x) == this_obj) op (&((*x).tree_label), cookie); if ((void *)(x) == this_obj) op (&((*x).landing_pad), cookie); if ((void *)(x) == this_obj) op (&((*x).post_landing_pad), cookie); if ((void *)(x) == this_obj) op (&((*x).resume), cookie); } void gt_pch_p_13ehl_map_entry (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct ehl_map_entry * const x ATTRIBUTE_UNUSED = (struct ehl_map_entry *)x_p; if ((void *)(x) == this_obj) op (&((*x).label), cookie); if ((void *)(x) == this_obj) op (&((*x).region), cookie); } void gt_pch_p_9eh_status (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct eh_status * const x ATTRIBUTE_UNUSED = (struct eh_status *)x_p; if ((void *)(x) == this_obj) op (&((*x).region_tree), cookie); if ((*x).region_array != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).last_region_number); i0++) { if ((void *)((*x).region_array) == this_obj) op (&((*x).region_array[i0]), cookie); } if ((void *)(x) == this_obj) op (&((*x).region_array), cookie); } if ((void *)(x) == this_obj) op (&((*x).cur_region), cookie); if ((void *)(x) == this_obj) op (&((*x).try_region), cookie); if ((void *)(x) == this_obj) op (&((*x).filter), cookie); if ((void *)(x) == this_obj) op (&((*x).exc_ptr), cookie); if ((void *)(x) == this_obj) op (&((*x).ttype_data), cookie); if ((void *)(x) == this_obj) op (&((*x).ehspec_data), cookie); if ((void *)(x) == this_obj) op (&((*x).action_record_data), cookie); if ((void *)(x) == this_obj) op (&((*x).exception_handler_label_map), cookie); if ((*x).call_site_data != NULL) { size_t i1; for (i1 = 0; i1 < (size_t)(((*x)).call_site_data_used); i1++) { if ((void *)((*x).call_site_data) == this_obj) op (&((*x).call_site_data[i1].landing_pad), cookie); } if ((void *)(x) == this_obj) op (&((*x).call_site_data), cookie); } if ((void *)(x) == this_obj) op (&((*x).ehr_stackadj), cookie); if ((void *)(x) == this_obj) op (&((*x).ehr_handler), cookie); if ((void *)(x) == this_obj) op (&((*x).ehr_label), cookie); if ((void *)(x) == this_obj) op (&((*x).sjlj_fc), cookie); if ((void *)(x) == this_obj) op (&((*x).sjlj_exit_after), cookie); } void gt_pch_p_P13ehl_map_entry4htab (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct htab * const x ATTRIBUTE_UNUSED = (struct htab *)x_p; if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { if ((void *)((*x).entries) == this_obj) op (&((*x).entries[i0]), cookie); } if ((void *)(x) == this_obj) op (&((*x).entries), cookie); } } /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_except_h[] = { { &sjlj_fc_type_node, 1, sizeof (sjlj_fc_type_node), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &type_to_runtime_map, 1, sizeof (type_to_runtime_map), >_ggc_m_P9tree_node4htab, >_pch_n_P9tree_node4htab }, LAST_GGC_ROOT_TAB }; const struct ggc_root_tab gt_pch_rs_gt_except_h[] = { { &call_site_base, 1, sizeof (call_site_base), NULL, NULL }, LAST_GGC_ROOT_TAB }; /* Subroutines for manipulating rtx's in semantically interesting ways. Copyright (C) 1987, 1991, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ static rtx break_out_memory_refs (rtx); static void emit_stack_probe (rtx); /* Truncate and perhaps sign-extend C as appropriate for MODE. */ HOST_WIDE_INT trunc_int_for_mode (HOST_WIDE_INT c, enum machine_mode mode) { int width = GET_MODE_BITSIZE (mode); /* You want to truncate to a _what_? */ if (! SCALAR_INT_MODE_P (mode)) abort (); /* Canonicalize BImode to 0 and STORE_FLAG_VALUE. */ if (mode == BImode) return c & 1 ? STORE_FLAG_VALUE : 0; /* Sign-extend for the requested mode. */ if (width < HOST_BITS_PER_WIDE_INT) { HOST_WIDE_INT sign = 1; sign <<= width - 1; c &= (sign << 1) - 1; c ^= sign; c -= sign; } return c; } /* Return an rtx for the sum of X and the integer C. This function should be used via the `plus_constant' macro. */ rtx plus_constant_wide (rtx x, HOST_WIDE_INT c) { RTX_CODE code; rtx y; enum machine_mode mode; rtx tem; int all_constant = 0; if (c == 0) return x; restart: code = GET_CODE (x); mode = GET_MODE (x); y = x; switch (code) { case CONST_INT: return GEN_INT (INTVAL (x) + c); case CONST_DOUBLE: { unsigned HOST_WIDE_INT l1 = CONST_DOUBLE_LOW (x); HOST_WIDE_INT h1 = CONST_DOUBLE_HIGH (x); unsigned HOST_WIDE_INT l2 = c; HOST_WIDE_INT h2 = c < 0 ? ~0 : 0; unsigned HOST_WIDE_INT lv; HOST_WIDE_INT hv; add_double (l1, h1, l2, h2, &lv, &hv); return immed_double_const (lv, hv, VOIDmode); } case MEM: /* If this is a reference to the constant pool, try replacing it with a reference to a new constant. If the resulting address isn't valid, don't return it because we have no way to validize it. */ if (GET_CODE (XEXP (x, 0)) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0))) { tem = force_const_mem (GET_MODE (x), plus_constant (get_pool_constant (XEXP (x, 0)), c)); if (memory_address_p (GET_MODE (tem), XEXP (tem, 0))) return tem; } break; case CONST: /* If adding to something entirely constant, set a flag so that we can add a CONST around the result. */ x = XEXP (x, 0); all_constant = 1; goto restart; case SYMBOL_REF: case LABEL_REF: all_constant = 1; break; case PLUS: /* The interesting case is adding the integer to a sum. Look for constant term in the sum and combine with C. For an integer constant term, we make a combined integer. For a constant term that is not an explicit integer, we cannot really combine, but group them together anyway. Restart or use a recursive call in case the remaining operand is something that we handle specially, such as a SYMBOL_REF. We may not immediately return from the recursive call here, lest all_constant gets lost. */ if (GET_CODE (XEXP (x, 1)) == CONST_INT) { c += INTVAL (XEXP (x, 1)); if (GET_MODE (x) != VOIDmode) c = trunc_int_for_mode (c, GET_MODE (x)); x = XEXP (x, 0); goto restart; } else if (CONSTANT_P (XEXP (x, 1))) { x = gen_rtx_PLUS (mode, XEXP (x, 0), plus_constant (XEXP (x, 1), c)); c = 0; } else if (find_constant_term_loc (&y)) { /* We need to be careful since X may be shared and we can't modify it in place. */ rtx copy = copy_rtx (x); rtx *const_loc = find_constant_term_loc (©); *const_loc = plus_constant (*const_loc, c); x = copy; c = 0; } break; default: break; } if (c != 0) x = gen_rtx_PLUS (mode, x, GEN_INT (c)); if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF) return x; else if (all_constant) return gen_rtx_CONST (mode, x); else return x; } /* If X is a sum, return a new sum like X but lacking any constant terms. Add all the removed constant terms into *CONSTPTR. X itself is not altered. The result != X if and only if it is not isomorphic to X. */ rtx eliminate_constant_term (rtx x, rtx *constptr) { rtx x0, x1; rtx tem; if (GET_CODE (x) != PLUS) return x; /* First handle constants appearing at this level explicitly. */ if (GET_CODE (XEXP (x, 1)) == CONST_INT && 0 != (tem = simplify_binary_operation (PLUS, GET_MODE (x), *constptr, XEXP (x, 1))) && GET_CODE (tem) == CONST_INT) { *constptr = tem; return eliminate_constant_term (XEXP (x, 0), constptr); } tem = const0_rtx; x0 = eliminate_constant_term (XEXP (x, 0), &tem); x1 = eliminate_constant_term (XEXP (x, 1), &tem); if ((x1 != XEXP (x, 1) || x0 != XEXP (x, 0)) && 0 != (tem = simplify_binary_operation (PLUS, GET_MODE (x), *constptr, tem)) && GET_CODE (tem) == CONST_INT) { *constptr = tem; return gen_rtx_PLUS (GET_MODE (x), x0, x1); } return x; } /* Return an rtx for the size in bytes of the value of EXP. */ rtx expr_size (tree exp) { tree size = SUBSTITUTE_PLACEHOLDER_IN_EXPR (lang_hooks.expr_size (exp), exp); return expand_expr (size, NULL_RTX, TYPE_MODE (sizetype), 0); } /* Return a wide integer for the size in bytes of the value of EXP, or -1 if the size can vary or is larger than an integer. */ HOST_WIDE_INT int_expr_size (tree exp) { tree t = lang_hooks.expr_size (exp); if (t == 0 || TREE_CODE (t) != INTEGER_CST || TREE_OVERFLOW (t) || TREE_INT_CST_HIGH (t) != 0 /* If the result would appear negative, it's too big to represent. */ || (HOST_WIDE_INT) TREE_INT_CST_LOW (t) < 0) return -1; return TREE_INT_CST_LOW (t); } /* Return a copy of X in which all memory references and all constants that involve symbol refs have been replaced with new temporary registers. Also emit code to load the memory locations and constants into those registers. If X contains no such constants or memory references, X itself (not a copy) is returned. If a constant is found in the address that is not a legitimate constant in an insn, it is left alone in the hope that it might be valid in the address. X may contain no arithmetic except addition, subtraction and multiplication. Values returned by expand_expr with 1 for sum_ok fit this constraint. */ static rtx break_out_memory_refs (rtx x) { if (MEM_P (x) || (CONSTANT_P (x) && CONSTANT_ADDRESS_P (x) && GET_MODE (x) != VOIDmode)) x = force_reg (GET_MODE (x), x); else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS || GET_CODE (x) == MULT) { rtx op0 = break_out_memory_refs (XEXP (x, 0)); rtx op1 = break_out_memory_refs (XEXP (x, 1)); if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1)) x = gen_rtx_fmt_ee (GET_CODE (x), Pmode, op0, op1); } return x; } /* Given X, a memory address in ptr_mode, convert it to an address in Pmode, or vice versa (TO_MODE says which way). We take advantage of the fact that pointers are not allowed to overflow by commuting arithmetic operations over conversions so that address arithmetic insns can be used. */ rtx convert_memory_address (enum machine_mode to_mode ATTRIBUTE_UNUSED, rtx x) { #ifndef POINTERS_EXTEND_UNSIGNED return x; #else /* defined(POINTERS_EXTEND_UNSIGNED) */ enum machine_mode from_mode; rtx temp; enum rtx_code code; /* If X already has the right mode, just return it. */ if (GET_MODE (x) == to_mode) return x; from_mode = to_mode == ptr_mode ? Pmode : ptr_mode; /* Here we handle some special cases. If none of them apply, fall through to the default case. */ switch (GET_CODE (x)) { case CONST_INT: case CONST_DOUBLE: if (GET_MODE_SIZE (to_mode) < GET_MODE_SIZE (from_mode)) code = TRUNCATE; else if (POINTERS_EXTEND_UNSIGNED < 0) break; else if (POINTERS_EXTEND_UNSIGNED > 0) code = ZERO_EXTEND; else code = SIGN_EXTEND; temp = simplify_unary_operation (code, to_mode, x, from_mode); if (temp) return temp; break; case SUBREG: if ((SUBREG_PROMOTED_VAR_P (x) || REG_POINTER (SUBREG_REG (x))) && GET_MODE (SUBREG_REG (x)) == to_mode) return SUBREG_REG (x); break; case LABEL_REF: temp = gen_rtx_LABEL_REF (to_mode, XEXP (x, 0)); LABEL_REF_NONLOCAL_P (temp) = LABEL_REF_NONLOCAL_P (x); return temp; break; case SYMBOL_REF: temp = shallow_copy_rtx (x); PUT_MODE (temp, to_mode); return temp; break; case CONST: return gen_rtx_CONST (to_mode, convert_memory_address (to_mode, XEXP (x, 0))); break; case PLUS: case MULT: /* For addition we can safely permute the conversion and addition operation if one operand is a constant and converting the constant does not change it. We can always safely permute them if we are making the address narrower. */ if (GET_MODE_SIZE (to_mode) < GET_MODE_SIZE (from_mode) || (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT && XEXP (x, 1) == convert_memory_address (to_mode, XEXP (x, 1)))) return gen_rtx_fmt_ee (GET_CODE (x), to_mode, convert_memory_address (to_mode, XEXP (x, 0)), XEXP (x, 1)); break; default: break; } return convert_modes (to_mode, from_mode, x, POINTERS_EXTEND_UNSIGNED); #endif /* defined(POINTERS_EXTEND_UNSIGNED) */ } /* Given a memory address or facsimile X, construct a new address, currently equivalent, that is stable: future stores won't change it. X must be composed of constants, register and memory references combined with addition, subtraction and multiplication: in other words, just what you can get from expand_expr if sum_ok is 1. Works by making copies of all regs and memory locations used by X and combining them the same way X does. You could also stabilize the reference to this address by copying the address to a register with copy_to_reg; but then you wouldn't get indexed addressing in the reference. */ rtx copy_all_regs (rtx x) { if (REG_P (x)) { if (REGNO (x) != FRAME_POINTER_REGNUM #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM && REGNO (x) != HARD_FRAME_POINTER_REGNUM #endif ) x = copy_to_reg (x); } else if (MEM_P (x)) x = copy_to_reg (x); else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS || GET_CODE (x) == MULT) { rtx op0 = copy_all_regs (XEXP (x, 0)); rtx op1 = copy_all_regs (XEXP (x, 1)); if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1)) x = gen_rtx_fmt_ee (GET_CODE (x), Pmode, op0, op1); } return x; } /* Return something equivalent to X but valid as a memory address for something of mode MODE. When X is not itself valid, this works by copying X or subexpressions of it into registers. */ rtx memory_address (enum machine_mode mode, rtx x) { rtx oldx = x; x = convert_memory_address (Pmode, x); /* By passing constant addresses through registers we get a chance to cse them. */ if (! cse_not_expected && CONSTANT_P (x) && CONSTANT_ADDRESS_P (x)) x = force_reg (Pmode, x); /* Accept a QUEUED that refers to a REG even though that isn't a valid address. On attempting to put this in an insn we will call protect_from_queue which will turn it into a REG, which is valid. */ else if (GET_CODE (x) == QUEUED && REG_P (QUEUED_VAR (x))) ; /* We get better cse by rejecting indirect addressing at this stage. Let the combiner create indirect addresses where appropriate. For now, generate the code so that the subexpressions useful to share are visible. But not if cse won't be done! */ else { if (! cse_not_expected && !REG_P (x)) x = break_out_memory_refs (x); /* At this point, any valid address is accepted. */ GO_IF_LEGITIMATE_ADDRESS (mode, x, win); /* If it was valid before but breaking out memory refs invalidated it, use it the old way. */ if (memory_address_p (mode, oldx)) goto win2; /* Perform machine-dependent transformations on X in certain cases. This is not necessary since the code below can handle all possible cases, but machine-dependent transformations can make better code. */ LEGITIMIZE_ADDRESS (x, oldx, mode, win); /* PLUS and MULT can appear in special ways as the result of attempts to make an address usable for indexing. Usually they are dealt with by calling force_operand, below. But a sum containing constant terms is special if removing them makes the sum a valid address: then we generate that address in a register and index off of it. We do this because it often makes shorter code, and because the addresses thus generated in registers often become common subexpressions. */ if (GET_CODE (x) == PLUS) { rtx constant_term = const0_rtx; rtx y = eliminate_constant_term (x, &constant_term); if (constant_term == const0_rtx || ! memory_address_p (mode, y)) x = force_operand (x, NULL_RTX); else { y = gen_rtx_PLUS (GET_MODE (x), copy_to_reg (y), constant_term); if (! memory_address_p (mode, y)) x = force_operand (x, NULL_RTX); else x = y; } } else if (GET_CODE (x) == MULT || GET_CODE (x) == MINUS) x = force_operand (x, NULL_RTX); /* If we have a register that's an invalid address, it must be a hard reg of the wrong class. Copy it to a pseudo. */ else if (REG_P (x)) x = copy_to_reg (x); /* Last resort: copy the value to a register, since the register is a valid address. */ else x = force_reg (Pmode, x); goto done; win2: x = oldx; win: if (flag_force_addr && ! cse_not_expected && !REG_P (x) /* Don't copy an addr via a reg if it is one of our stack slots. */ && ! (GET_CODE (x) == PLUS && (XEXP (x, 0) == virtual_stack_vars_rtx || XEXP (x, 0) == virtual_incoming_args_rtx))) { if (general_operand (x, Pmode)) x = force_reg (Pmode, x); else x = force_operand (x, NULL_RTX); } } done: /* If we didn't change the address, we are done. Otherwise, mark a reg as a pointer if we have REG or REG + CONST_INT. */ if (oldx == x) return x; else if (REG_P (x)) mark_reg_pointer (x, BITS_PER_UNIT); else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0)) && GET_CODE (XEXP (x, 1)) == CONST_INT) mark_reg_pointer (XEXP (x, 0), BITS_PER_UNIT); /* OLDX may have been the address on a temporary. Update the address to indicate that X is now used. */ update_temp_slot_address (oldx, x); return x; } /* Like `memory_address' but pretend `flag_force_addr' is 0. */ rtx memory_address_noforce (enum machine_mode mode, rtx x) { int ambient_force_addr = flag_force_addr; rtx val; flag_force_addr = 0; val = memory_address (mode, x); flag_force_addr = ambient_force_addr; return val; } /* Convert a mem ref into one with a valid memory address. Pass through anything else unchanged. */ rtx validize_mem (rtx ref) { if (!MEM_P (ref)) return ref; if (! (flag_force_addr && CONSTANT_ADDRESS_P (XEXP (ref, 0))) && memory_address_p (GET_MODE (ref), XEXP (ref, 0))) return ref; /* Don't alter REF itself, since that is probably a stack slot. */ return replace_equiv_address (ref, XEXP (ref, 0)); } /* Given REF, either a MEM or a REG, and T, either the type of X or the expression corresponding to REF, set RTX_UNCHANGING_P if appropriate. */ void maybe_set_unchanging (rtx ref, tree t) { /* We can set RTX_UNCHANGING_P from TREE_READONLY for decls whose initialization is only executed once, or whose initializer always has the same value. Currently we simplify this to PARM_DECLs in the first case, and decls with TREE_CONSTANT initializers in the second. We cannot do this for non-static aggregates, because of the double writes that can be generated by store_constructor, depending on the contents of the initializer. Yes, this does eliminate a good fraction of the number of uses of RTX_UNCHANGING_P for a language like Ada. It also eliminates a good quantity of bugs. Let this be incentive to eliminate RTX_UNCHANGING_P entirely in favor of a more reliable solution, perhaps based on alias sets. */ if ((TREE_READONLY (t) && DECL_P (t) && (TREE_STATIC (t) || ! AGGREGATE_TYPE_P (TREE_TYPE (t))) && (TREE_CODE (t) == PARM_DECL || (DECL_INITIAL (t) && TREE_CONSTANT (DECL_INITIAL (t))))) || TREE_CODE_CLASS (TREE_CODE (t)) == 'c') RTX_UNCHANGING_P (ref) = 1; } /* Return a modified copy of X with its memory address copied into a temporary register to protect it from side effects. If X is not a MEM, it is returned unchanged (and not copied). Perhaps even if it is a MEM, if there is no need to change it. */ rtx stabilize (rtx x) { if (!MEM_P (x) || ! rtx_unstable_p (XEXP (x, 0))) return x; return replace_equiv_address (x, force_reg (Pmode, copy_all_regs (XEXP (x, 0)))); } /* Copy the value or contents of X to a new temp reg and return that reg. */ rtx copy_to_reg (rtx x) { rtx temp = gen_reg_rtx (GET_MODE (x)); /* If not an operand, must be an address with PLUS and MULT so do the computation. */ if (! general_operand (x, VOIDmode)) x = force_operand (x, temp); if (x != temp) emit_move_insn (temp, x); return temp; } /* Like copy_to_reg but always give the new register mode Pmode in case X is a constant. */ rtx copy_addr_to_reg (rtx x) { return copy_to_mode_reg (Pmode, x); } /* Like copy_to_reg but always give the new register mode MODE in case X is a constant. */ rtx copy_to_mode_reg (enum machine_mode mode, rtx x) { rtx temp = gen_reg_rtx (mode); /* If not an operand, must be an address with PLUS and MULT so do the computation. */ if (! general_operand (x, VOIDmode)) x = force_operand (x, temp); if (GET_MODE (x) != mode && GET_MODE (x) != VOIDmode) abort (); if (x != temp) emit_move_insn (temp, x); return temp; } /* Load X into a register if it is not already one. Use mode MODE for the register. X should be valid for mode MODE, but it may be a constant which is valid for all integer modes; that's why caller must specify MODE. The caller must not alter the value in the register we return, since we mark it as a "constant" register. */ rtx force_reg (enum machine_mode mode, rtx x) { rtx temp, insn, set; if (REG_P (x)) return x; if (general_operand (x, mode)) { temp = gen_reg_rtx (mode); insn = emit_move_insn (temp, x); } else { temp = force_operand (x, NULL_RTX); if (REG_P (temp)) insn = get_last_insn (); else { rtx temp2 = gen_reg_rtx (mode); insn = emit_move_insn (temp2, temp); temp = temp2; } } /* Let optimizers know that TEMP's value never changes and that X can be substituted for it. Don't get confused if INSN set something else (such as a SUBREG of TEMP). */ if (CONSTANT_P (x) && (set = single_set (insn)) != 0 && SET_DEST (set) == temp && ! rtx_equal_p (x, SET_SRC (set))) set_unique_reg_note (insn, REG_EQUAL, x); /* Let optimizers know that TEMP is a pointer, and if so, the known alignment of that pointer. */ { unsigned align = 0; if (GET_CODE (x) == SYMBOL_REF) { align = BITS_PER_UNIT; if (SYMBOL_REF_DECL (x) && DECL_P (SYMBOL_REF_DECL (x))) align = DECL_ALIGN (SYMBOL_REF_DECL (x)); } else if (GET_CODE (x) == LABEL_REF) align = BITS_PER_UNIT; else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT) { rtx s = XEXP (XEXP (x, 0), 0); rtx c = XEXP (XEXP (x, 0), 1); unsigned sa, ca; sa = BITS_PER_UNIT; if (SYMBOL_REF_DECL (s) && DECL_P (SYMBOL_REF_DECL (s))) sa = DECL_ALIGN (SYMBOL_REF_DECL (s)); ca = exact_log2 (INTVAL (c) & -INTVAL (c)) * BITS_PER_UNIT; align = MIN (sa, ca); } if (align) mark_reg_pointer (temp, align); } return temp; } /* If X is a memory ref, copy its contents to a new temp reg and return that reg. Otherwise, return X. */ rtx force_not_mem (rtx x) { rtx temp; if (!MEM_P (x) || GET_MODE (x) == BLKmode) return x; temp = gen_reg_rtx (GET_MODE (x)); if (MEM_POINTER (x)) REG_POINTER (temp) = 1; emit_move_insn (temp, x); return temp; } /* Copy X to TARGET (if it's nonzero and a reg) or to a new temp reg and return that reg. MODE is the mode to use for X in case it is a constant. */ rtx copy_to_suggested_reg (rtx x, rtx target, enum machine_mode mode) { rtx temp; if (target && REG_P (target)) temp = target; else temp = gen_reg_rtx (mode); emit_move_insn (temp, x); return temp; } /* Return the mode to use to store a scalar of TYPE and MODE. PUNSIGNEDP points to the signedness of the type and may be adjusted to show what signedness to use on extension operations. FOR_CALL is nonzero if this call is promoting args for a call. */ #if defined(PROMOTE_MODE) && !defined(PROMOTE_FUNCTION_MODE) #define PROMOTE_FUNCTION_MODE PROMOTE_MODE #endif enum machine_mode promote_mode (tree type, enum machine_mode mode, int *punsignedp, int for_call ATTRIBUTE_UNUSED) { enum tree_code code = TREE_CODE (type); int unsignedp = *punsignedp; #ifndef PROMOTE_MODE if (! for_call) return mode; #endif switch (code) { #ifdef PROMOTE_FUNCTION_MODE case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: case CHAR_TYPE: case REAL_TYPE: case OFFSET_TYPE: #ifdef PROMOTE_MODE if (for_call) { #endif PROMOTE_FUNCTION_MODE (mode, unsignedp, type); #ifdef PROMOTE_MODE } else { PROMOTE_MODE (mode, unsignedp, type); } #endif break; #endif #ifdef POINTERS_EXTEND_UNSIGNED case REFERENCE_TYPE: case POINTER_TYPE: mode = Pmode; unsignedp = POINTERS_EXTEND_UNSIGNED; break; #endif default: break; } *punsignedp = unsignedp; return mode; } /* Adjust the stack pointer by ADJUST (an rtx for a number of bytes). This pops when ADJUST is positive. ADJUST need not be constant. */ void adjust_stack (rtx adjust) { rtx temp; adjust = protect_from_queue (adjust, 0); if (adjust == const0_rtx) return; /* We expect all variable sized adjustments to be multiple of PREFERRED_STACK_BOUNDARY. */ if (GET_CODE (adjust) == CONST_INT) stack_pointer_delta -= INTVAL (adjust); temp = expand_binop (Pmode, #ifdef STACK_GROWS_DOWNWARD add_optab, #else sub_optab, #endif stack_pointer_rtx, adjust, stack_pointer_rtx, 0, OPTAB_LIB_WIDEN); if (temp != stack_pointer_rtx) emit_move_insn (stack_pointer_rtx, temp); } /* Adjust the stack pointer by minus ADJUST (an rtx for a number of bytes). This pushes when ADJUST is positive. ADJUST need not be constant. */ void anti_adjust_stack (rtx adjust) { rtx temp; adjust = protect_from_queue (adjust, 0); if (adjust == const0_rtx) return; /* We expect all variable sized adjustments to be multiple of PREFERRED_STACK_BOUNDARY. */ if (GET_CODE (adjust) == CONST_INT) stack_pointer_delta += INTVAL (adjust); temp = expand_binop (Pmode, #ifdef STACK_GROWS_DOWNWARD sub_optab, #else add_optab, #endif stack_pointer_rtx, adjust, stack_pointer_rtx, 0, OPTAB_LIB_WIDEN); if (temp != stack_pointer_rtx) emit_move_insn (stack_pointer_rtx, temp); } /* Round the size of a block to be pushed up to the boundary required by this machine. SIZE is the desired size, which need not be constant. */ rtx round_push (rtx size) { int align = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT; if (align == 1) return size; if (GET_CODE (size) == CONST_INT) { HOST_WIDE_INT new = (INTVAL (size) + align - 1) / align * align; if (INTVAL (size) != new) size = GEN_INT (new); } else { /* CEIL_DIV_EXPR needs to worry about the addition overflowing, but we know it can't. So add ourselves and then do TRUNC_DIV_EXPR. */ size = expand_binop (Pmode, add_optab, size, GEN_INT (align - 1), NULL_RTX, 1, OPTAB_LIB_WIDEN); size = expand_divmod (0, TRUNC_DIV_EXPR, Pmode, size, GEN_INT (align), NULL_RTX, 1); size = expand_mult (Pmode, size, GEN_INT (align), NULL_RTX, 1); } return size; } /* Save the stack pointer for the purpose in SAVE_LEVEL. PSAVE is a pointer to a previously-created save area. If no save area has been allocated, this function will allocate one. If a save area is specified, it must be of the proper mode. The insns are emitted after insn AFTER, if nonzero, otherwise the insns are emitted at the current position. */ void emit_stack_save (enum save_level save_level, rtx *psave, rtx after) { rtx sa = *psave; /* The default is that we use a move insn and save in a Pmode object. */ rtx (*fcn) (rtx, rtx) = gen_move_insn; enum machine_mode mode = STACK_SAVEAREA_MODE (save_level); /* See if this machine has anything special to do for this kind of save. */ switch (save_level) { #ifdef HAVE_save_stack_block case SAVE_BLOCK: if (HAVE_save_stack_block) fcn = gen_save_stack_block; break; #endif #ifdef HAVE_save_stack_function case SAVE_FUNCTION: if (HAVE_save_stack_function) fcn = gen_save_stack_function; break; #endif #ifdef HAVE_save_stack_nonlocal case SAVE_NONLOCAL: if (HAVE_save_stack_nonlocal) fcn = gen_save_stack_nonlocal; break; #endif default: break; } /* If there is no save area and we have to allocate one, do so. Otherwise verify the save area is the proper mode. */ if (sa == 0) { if (mode != VOIDmode) { if (save_level == SAVE_NONLOCAL) *psave = sa = assign_stack_local (mode, GET_MODE_SIZE (mode), 0); else *psave = sa = gen_reg_rtx (mode); } } if (after) { rtx seq; start_sequence (); /* We must validize inside the sequence, to ensure that any instructions created by the validize call also get moved to the right place. */ if (sa != 0) sa = validize_mem (sa); emit_insn (fcn (sa, stack_pointer_rtx)); seq = get_insns (); end_sequence (); emit_insn_after (seq, after); } else { if (sa != 0) sa = validize_mem (sa); emit_insn (fcn (sa, stack_pointer_rtx)); } } /* Restore the stack pointer for the purpose in SAVE_LEVEL. SA is the save area made by emit_stack_save. If it is zero, we have nothing to do. Put any emitted insns after insn AFTER, if nonzero, otherwise at current position. */ void emit_stack_restore (enum save_level save_level, rtx sa, rtx after) { /* The default is that we use a move insn. */ rtx (*fcn) (rtx, rtx) = gen_move_insn; /* See if this machine has anything special to do for this kind of save. */ switch (save_level) { #ifdef HAVE_restore_stack_block case SAVE_BLOCK: if (HAVE_restore_stack_block) fcn = gen_restore_stack_block; break; #endif #ifdef HAVE_restore_stack_function case SAVE_FUNCTION: if (HAVE_restore_stack_function) fcn = gen_restore_stack_function; break; #endif #ifdef HAVE_restore_stack_nonlocal case SAVE_NONLOCAL: if (HAVE_restore_stack_nonlocal) fcn = gen_restore_stack_nonlocal; break; #endif default: break; } if (sa != 0) { sa = validize_mem (sa); /* These clobbers prevent the scheduler from moving references to variable arrays below the code that deletes (pops) the arrays. */ emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode)))); emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_MEM (BLKmode, stack_pointer_rtx))); } if (after) { rtx seq; start_sequence (); emit_insn (fcn (stack_pointer_rtx, sa)); seq = get_insns (); end_sequence (); emit_insn_after (seq, after); } else emit_insn (fcn (stack_pointer_rtx, sa)); } /* Invoke emit_stack_save on the nonlocal_goto_save_area for the current function. This function should be called whenever we allocate or deallocate dynamic stack space. */ void update_nonlocal_goto_save_area (void) { tree t_save; rtx r_save; /* The nonlocal_goto_save_area object is an array of N pointers. The first one is used for the frame pointer save; the rest are sized by STACK_SAVEAREA_MODE. Create a reference to array index 1, the first of the stack save area slots. */ t_save = build (ARRAY_REF, ptr_type_node, cfun->nonlocal_goto_save_area, integer_one_node, NULL_TREE, NULL_TREE); r_save = expand_expr (t_save, NULL_RTX, VOIDmode, EXPAND_WRITE); emit_stack_save (SAVE_NONLOCAL, &r_save, NULL_RTX); } #ifdef SETJMP_VIA_SAVE_AREA /* Optimize RTL generated by allocate_dynamic_stack_space for targets where SETJMP_VIA_SAVE_AREA is true. The problem is that on these platforms, the dynamic stack space used can corrupt the original frame, thus causing a crash if a longjmp unwinds to it. */ void optimize_save_area_alloca (rtx insns) { rtx insn; for (insn = insns; insn; insn = NEXT_INSN(insn)) { rtx note; if (GET_CODE (insn) != INSN) continue; for (note = REG_NOTES (insn); note; note = XEXP (note, 1)) { if (REG_NOTE_KIND (note) != REG_SAVE_AREA) continue; if (!current_function_calls_setjmp) { rtx pat = PATTERN (insn); /* If we do not see the note in a pattern matching these precise characteristics, we did something entirely wrong in allocate_dynamic_stack_space. Note, one way this could happen is if SETJMP_VIA_SAVE_AREA was defined on a machine where stacks grow towards higher addresses. Right now only supported port with stack that grow upward is the HPPA and it does not define SETJMP_VIA_SAVE_AREA. */ if (GET_CODE (pat) != SET || SET_DEST (pat) != stack_pointer_rtx || GET_CODE (SET_SRC (pat)) != MINUS || XEXP (SET_SRC (pat), 0) != stack_pointer_rtx) abort (); /* This will now be transformed into a (set REG REG) so we can just blow away all the other notes. */ XEXP (SET_SRC (pat), 1) = XEXP (note, 0); REG_NOTES (insn) = NULL_RTX; } else { /* setjmp was called, we must remove the REG_SAVE_AREA note so that later passes do not get confused by its presence. */ if (note == REG_NOTES (insn)) { REG_NOTES (insn) = XEXP (note, 1); } else { rtx srch; for (srch = REG_NOTES (insn); srch; srch = XEXP (srch, 1)) if (XEXP (srch, 1) == note) break; if (srch == NULL_RTX) abort (); XEXP (srch, 1) = XEXP (note, 1); } } /* Once we've seen the note of interest, we need not look at the rest of them. */ break; } } } #endif /* SETJMP_VIA_SAVE_AREA */ /* Return an rtx representing the address of an area of memory dynamically pushed on the stack. This region of memory is always aligned to a multiple of BIGGEST_ALIGNMENT. Any required stack pointer alignment is preserved. SIZE is an rtx representing the size of the area. TARGET is a place in which the address can be placed. KNOWN_ALIGN is the alignment (in bits) that we know SIZE has. */ rtx allocate_dynamic_stack_space (rtx size, rtx target, int known_align) { #ifdef SETJMP_VIA_SAVE_AREA rtx setjmpless_size = NULL_RTX; #endif /* If we're asking for zero bytes, it doesn't matter what we point to since we can't dereference it. But return a reasonable address anyway. */ if (size == const0_rtx) return virtual_stack_dynamic_rtx; /* Otherwise, show we're calling alloca or equivalent. */ current_function_calls_alloca = 1; /* Ensure the size is in the proper mode. */ if (GET_MODE (size) != VOIDmode && GET_MODE (size) != Pmode) size = convert_to_mode (Pmode, size, 1); /* We can't attempt to minimize alignment necessary, because we don't know the final value of preferred_stack_boundary yet while executing this code. */ cfun->preferred_stack_boundary = PREFERRED_STACK_BOUNDARY; /* We will need to ensure that the address we return is aligned to BIGGEST_ALIGNMENT. If STACK_DYNAMIC_OFFSET is defined, we don't always know its final value at this point in the compilation (it might depend on the size of the outgoing parameter lists, for example), so we must align the value to be returned in that case. (Note that STACK_DYNAMIC_OFFSET will have a default nonzero value if STACK_POINTER_OFFSET or ACCUMULATE_OUTGOING_ARGS are defined). We must also do an alignment operation on the returned value if the stack pointer alignment is less strict that BIGGEST_ALIGNMENT. If we have to align, we must leave space in SIZE for the hole that might result from the alignment operation. */ #if defined (STACK_DYNAMIC_OFFSET) || defined (STACK_POINTER_OFFSET) #define MUST_ALIGN 1 #else #define MUST_ALIGN (PREFERRED_STACK_BOUNDARY < BIGGEST_ALIGNMENT) #endif if (MUST_ALIGN) size = force_operand (plus_constant (size, BIGGEST_ALIGNMENT / BITS_PER_UNIT - 1), NULL_RTX); #ifdef SETJMP_VIA_SAVE_AREA /* If setjmp restores regs from a save area in the stack frame, avoid clobbering the reg save area. Note that the offset of virtual_incoming_args_rtx includes the preallocated stack args space. It would be no problem to clobber that, but it's on the wrong side of the old save area. */ { rtx dynamic_offset = expand_binop (Pmode, sub_optab, virtual_stack_dynamic_rtx, stack_pointer_rtx, NULL_RTX, 1, OPTAB_LIB_WIDEN); if (!current_function_calls_setjmp) { int align = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT; /* See optimize_save_area_alloca to understand what is being set up here. */ /* ??? Code below assumes that the save area needs maximal alignment. This constraint may be too strong. */ if (PREFERRED_STACK_BOUNDARY != BIGGEST_ALIGNMENT) abort (); if (GET_CODE (size) == CONST_INT) { HOST_WIDE_INT new = INTVAL (size) / align * align; if (INTVAL (size) != new) setjmpless_size = GEN_INT (new); else setjmpless_size = size; } else { /* Since we know overflow is not possible, we avoid using CEIL_DIV_EXPR and use TRUNC_DIV_EXPR instead. */ setjmpless_size = expand_divmod (0, TRUNC_DIV_EXPR, Pmode, size, GEN_INT (align), NULL_RTX, 1); setjmpless_size = expand_mult (Pmode, setjmpless_size, GEN_INT (align), NULL_RTX, 1); } /* Our optimization works based upon being able to perform a simple transformation of this RTL into a (set REG REG) so make sure things did in fact end up in a REG. */ if (!register_operand (setjmpless_size, Pmode)) setjmpless_size = force_reg (Pmode, setjmpless_size); } size = expand_binop (Pmode, add_optab, size, dynamic_offset, NULL_RTX, 1, OPTAB_LIB_WIDEN); } #endif /* SETJMP_VIA_SAVE_AREA */ /* Round the size to a multiple of the required stack alignment. Since the stack if presumed to be rounded before this allocation, this will maintain the required alignment. If the stack grows downward, we could save an insn by subtracting SIZE from the stack pointer and then aligning the stack pointer. The problem with this is that the stack pointer may be unaligned between the execution of the subtraction and alignment insns and some machines do not allow this. Even on those that do, some signal handlers malfunction if a signal should occur between those insns. Since this is an extremely rare event, we have no reliable way of knowing which systems have this problem. So we avoid even momentarily mis-aligning the stack. */ /* If we added a variable amount to SIZE, we can no longer assume it is aligned. */ #if !defined (SETJMP_VIA_SAVE_AREA) if (MUST_ALIGN || known_align % PREFERRED_STACK_BOUNDARY != 0) #endif size = round_push (size); do_pending_stack_adjust (); /* We ought to be called always on the toplevel and stack ought to be aligned properly. */ if (stack_pointer_delta % (PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT)) abort (); /* If needed, check that we have the required amount of stack. Take into account what has already been checked. */ if (flag_stack_check && ! STACK_CHECK_BUILTIN) probe_stack_range (STACK_CHECK_MAX_FRAME_SIZE + STACK_CHECK_PROTECT, size); /* Don't use a TARGET that isn't a pseudo or is the wrong mode. */ if (target == 0 || !REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER || GET_MODE (target) != Pmode) target = gen_reg_rtx (Pmode); mark_reg_pointer (target, known_align); /* Perform the required allocation from the stack. Some systems do this differently than simply incrementing/decrementing from the stack pointer, such as acquiring the space by calling malloc(). */ #ifdef HAVE_allocate_stack if (HAVE_allocate_stack) { enum machine_mode mode = STACK_SIZE_MODE; insn_operand_predicate_fn pred; /* We don't have to check against the predicate for operand 0 since TARGET is known to be a pseudo of the proper mode, which must be valid for the operand. For operand 1, convert to the proper mode and validate. */ if (mode == VOIDmode) mode = insn_data[(int) CODE_FOR_allocate_stack].operand[1].mode; pred = insn_data[(int) CODE_FOR_allocate_stack].operand[1].predicate; if (pred && ! ((*pred) (size, mode))) size = copy_to_mode_reg (mode, convert_to_mode (mode, size, 1)); emit_insn (gen_allocate_stack (target, size)); } else #endif { #ifndef STACK_GROWS_DOWNWARD emit_move_insn (target, virtual_stack_dynamic_rtx); #endif /* Check stack bounds if necessary. */ if (current_function_limit_stack) { rtx available; rtx space_available = gen_label_rtx (); #ifdef STACK_GROWS_DOWNWARD available = expand_binop (Pmode, sub_optab, stack_pointer_rtx, stack_limit_rtx, NULL_RTX, 1, OPTAB_WIDEN); #else available = expand_binop (Pmode, sub_optab, stack_limit_rtx, stack_pointer_rtx, NULL_RTX, 1, OPTAB_WIDEN); #endif emit_cmp_and_jump_insns (available, size, GEU, NULL_RTX, Pmode, 1, space_available); #ifdef HAVE_trap if (HAVE_trap) emit_insn (gen_trap ()); else #endif error ("stack limits not supported on this target"); emit_barrier (); emit_label (space_available); } anti_adjust_stack (size); #ifdef SETJMP_VIA_SAVE_AREA if (setjmpless_size != NULL_RTX) { rtx note_target = get_last_insn (); REG_NOTES (note_target) = gen_rtx_EXPR_LIST (REG_SAVE_AREA, setjmpless_size, REG_NOTES (note_target)); } #endif /* SETJMP_VIA_SAVE_AREA */ #ifdef STACK_GROWS_DOWNWARD emit_move_insn (target, virtual_stack_dynamic_rtx); #endif } if (MUST_ALIGN) { /* CEIL_DIV_EXPR needs to worry about the addition overflowing, but we know it can't. So add ourselves and then do TRUNC_DIV_EXPR. */ target = expand_binop (Pmode, add_optab, target, GEN_INT (BIGGEST_ALIGNMENT / BITS_PER_UNIT - 1), NULL_RTX, 1, OPTAB_LIB_WIDEN); target = expand_divmod (0, TRUNC_DIV_EXPR, Pmode, target, GEN_INT (BIGGEST_ALIGNMENT / BITS_PER_UNIT), NULL_RTX, 1); target = expand_mult (Pmode, target, GEN_INT (BIGGEST_ALIGNMENT / BITS_PER_UNIT), NULL_RTX, 1); } /* Record the new stack level for nonlocal gotos. */ if (cfun->nonlocal_goto_save_area != 0) update_nonlocal_goto_save_area (); return target; } /* A front end may want to override GCC's stack checking by providing a run-time routine to call to check the stack, so provide a mechanism for calling that routine. */ static GTY(()) rtx stack_check_libfunc; void set_stack_check_libfunc (rtx libfunc) { stack_check_libfunc = libfunc; } /* Emit one stack probe at ADDRESS, an address within the stack. */ static void emit_stack_probe (rtx address) { rtx memref = gen_rtx_MEM (word_mode, address); MEM_VOLATILE_P (memref) = 1; if (STACK_CHECK_PROBE_LOAD) emit_move_insn (gen_reg_rtx (word_mode), memref); else emit_move_insn (memref, const0_rtx); } /* Probe a range of stack addresses from FIRST to FIRST+SIZE, inclusive. FIRST is a constant and size is a Pmode RTX. These are offsets from the current stack pointer. STACK_GROWS_DOWNWARD says whether to add or subtract from the stack. If SIZE is constant, this is done with a fixed number of probes. Otherwise, we must make a loop. */ #ifdef STACK_GROWS_DOWNWARD #define STACK_GROW_OP MINUS #else #define STACK_GROW_OP PLUS #endif void probe_stack_range (HOST_WIDE_INT first, rtx size) { /* First ensure SIZE is Pmode. */ if (GET_MODE (size) != VOIDmode && GET_MODE (size) != Pmode) size = convert_to_mode (Pmode, size, 1); /* Next see if the front end has set up a function for us to call to check the stack. */ if (stack_check_libfunc != 0) { rtx addr = memory_address (QImode, gen_rtx_fmt_ee (STACK_GROW_OP, Pmode, stack_pointer_rtx, plus_constant (size, first))); addr = convert_memory_address (ptr_mode, addr); emit_library_call (stack_check_libfunc, LCT_NORMAL, VOIDmode, 1, addr, ptr_mode); } /* Next see if we have an insn to check the stack. Use it if so. */ #ifdef HAVE_check_stack else if (HAVE_check_stack) { insn_operand_predicate_fn pred; rtx last_addr = force_operand (gen_rtx_fmt_ee (STACK_GROW_OP, Pmode, stack_pointer_rtx, plus_constant (size, first)), NULL_RTX); pred = insn_data[(int) CODE_FOR_check_stack].operand[0].predicate; if (pred && ! ((*pred) (last_addr, Pmode))) last_addr = copy_to_mode_reg (Pmode, last_addr); emit_insn (gen_check_stack (last_addr)); } #endif /* If we have to generate explicit probes, see if we have a constant small number of them to generate. If so, that's the easy case. */ else if (GET_CODE (size) == CONST_INT && INTVAL (size) < 10 * STACK_CHECK_PROBE_INTERVAL) { HOST_WIDE_INT offset; /* Start probing at FIRST + N * STACK_CHECK_PROBE_INTERVAL for values of N from 1 until it exceeds LAST. If only one probe is needed, this will not generate any code. Then probe at LAST. */ for (offset = first + STACK_CHECK_PROBE_INTERVAL; offset < INTVAL (size); offset = offset + STACK_CHECK_PROBE_INTERVAL) emit_stack_probe (gen_rtx_fmt_ee (STACK_GROW_OP, Pmode, stack_pointer_rtx, GEN_INT (offset))); emit_stack_probe (gen_rtx_fmt_ee (STACK_GROW_OP, Pmode, stack_pointer_rtx, plus_constant (size, first))); } /* In the variable case, do the same as above, but in a loop. We emit loop notes so that loop optimization can be done. */ else { rtx test_addr = force_operand (gen_rtx_fmt_ee (STACK_GROW_OP, Pmode, stack_pointer_rtx, GEN_INT (first + STACK_CHECK_PROBE_INTERVAL)), NULL_RTX); rtx last_addr = force_operand (gen_rtx_fmt_ee (STACK_GROW_OP, Pmode, stack_pointer_rtx, plus_constant (size, first)), NULL_RTX); rtx incr = GEN_INT (STACK_CHECK_PROBE_INTERVAL); rtx loop_lab = gen_label_rtx (); rtx test_lab = gen_label_rtx (); rtx end_lab = gen_label_rtx (); rtx temp; if (!REG_P (test_addr) || REGNO (test_addr) < FIRST_PSEUDO_REGISTER) test_addr = force_reg (Pmode, test_addr); emit_jump (test_lab); emit_label (loop_lab); emit_stack_probe (test_addr); #ifdef STACK_GROWS_DOWNWARD #define CMP_OPCODE GTU temp = expand_binop (Pmode, sub_optab, test_addr, incr, test_addr, 1, OPTAB_WIDEN); #else #define CMP_OPCODE LTU temp = expand_binop (Pmode, add_optab, test_addr, incr, test_addr, 1, OPTAB_WIDEN); #endif if (temp != test_addr) abort (); emit_label (test_lab); emit_cmp_and_jump_insns (test_addr, last_addr, CMP_OPCODE, NULL_RTX, Pmode, 1, loop_lab); emit_jump (end_lab); emit_label (end_lab); emit_stack_probe (last_addr); } } /* Return an rtx representing the register or memory location in which a scalar value of data type VALTYPE was returned by a function call to function FUNC. FUNC is a FUNCTION_DECL node if the precise function is known, otherwise 0. OUTGOING is 1 if on a machine with register windows this function should return the register in which the function will put its result and 0 otherwise. */ rtx hard_function_value (tree valtype, tree func ATTRIBUTE_UNUSED, int outgoing ATTRIBUTE_UNUSED) { rtx val; #ifdef FUNCTION_OUTGOING_VALUE if (outgoing) val = FUNCTION_OUTGOING_VALUE (valtype, func); else #endif val = FUNCTION_VALUE (valtype, func); if (REG_P (val) && GET_MODE (val) == BLKmode) { unsigned HOST_WIDE_INT bytes = int_size_in_bytes (valtype); enum machine_mode tmpmode; /* int_size_in_bytes can return -1. We don't need a check here since the value of bytes will be large enough that no mode will match and we will abort later in this function. */ for (tmpmode = GET_CLASS_NARROWEST_MODE (MODE_INT); tmpmode != VOIDmode; tmpmode = GET_MODE_WIDER_MODE (tmpmode)) { /* Have we found a large enough mode? */ if (GET_MODE_SIZE (tmpmode) >= bytes) break; } /* No suitable mode found. */ if (tmpmode == VOIDmode) abort (); PUT_MODE (val, tmpmode); } return val; } /* Return an rtx representing the register or memory location in which a scalar value of mode MODE was returned by a library call. */ rtx hard_libcall_value (enum machine_mode mode) { return LIBCALL_VALUE (mode); } /* Look up the tree code for a given rtx code to provide the arithmetic operation for REAL_ARITHMETIC. The function returns an int because the caller may not know what `enum tree_code' means. */ int rtx_to_tree_code (enum rtx_code code) { enum tree_code tcode; switch (code) { case PLUS: tcode = PLUS_EXPR; break; case MINUS: tcode = MINUS_EXPR; break; case MULT: tcode = MULT_EXPR; break; case DIV: tcode = RDIV_EXPR; break; case SMIN: tcode = MIN_EXPR; break; case SMAX: tcode = MAX_EXPR; break; default: tcode = LAST_AND_UNUSED_TREE_CODE; break; } return ((int) tcode); } /* Type information for explow.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_explow_h[] = { { &stack_check_libfunc, 1, sizeof (stack_check_libfunc), >_ggc_mx_rtx_def, >_pch_nx_rtx_def }, LAST_GGC_ROOT_TAB }; /* Rtl-level induction variable analysis. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This is just a very simplistic analysis of induction variables of the loop. The major use is for determining the number of iterations of a loop for loop unrolling, doloop optimization and branch prediction. For this we are only interested in bivs and a fairly limited set of givs that are needed in the exit condition. We also only compute the iv information on demand. The interesting registers are determined. A register is interesting if -- it is set only in the blocks that dominate the latch of the current loop -- all its sets are simple -- i.e. in the form we understand We also number the insns sequentially in each basic block. For a use of the interesting reg, it is now easy to find a reaching definition (there may be only one). Induction variable is then simply analyzed by walking the use-def chains. Usage: iv_analysis_loop_init (loop); insn = iv_get_reaching_def (where, reg); if (iv_analyze (insn, reg, &iv)) { ... } iv_analysis_done (); */ /* The insn information. */ struct insn_iv_info { /* Id of the insn. */ unsigned luid; /* The previous definition of the register defined by the single set in the insn. */ rtx prev_def; /* The description of the iv. */ struct rtx_iv iv; }; static struct insn_iv_info *insn_iv_info; /* The last definition of register. */ static rtx *last_def; /* The bivs. */ static struct rtx_iv *bivs; /* Maximal insn number for that there is place in insn_iv_info array. */ static unsigned max_insn_no; /* Maximal register number for that there is place in bivs and last_def arrays. */ static unsigned max_reg_no; /* Dumps information about IV to FILE. */ extern void dump_iv_info (FILE *, struct rtx_iv *); void dump_iv_info (FILE *file, struct rtx_iv *iv) { if (!iv->base) { fprintf (file, "not simple"); return; } if (iv->step == const0_rtx) { fprintf (file, "invariant "); print_rtl (file, iv->base); return; } print_rtl (file, iv->base); fprintf (file, " + "); print_rtl (file, iv->step); fprintf (file, " * iteration"); fprintf (file, " (in %s)", GET_MODE_NAME (iv->mode)); if (iv->mode != iv->extend_mode) fprintf (file, " %s to %s", rtx_name[iv->extend], GET_MODE_NAME (iv->extend_mode)); if (iv->mult != const1_rtx) { fprintf (file, " * "); print_rtl (file, iv->mult); } if (iv->delta != const0_rtx) { fprintf (file, " + "); print_rtl (file, iv->delta); } if (iv->first_special) fprintf (file, " (first special)"); } /* Assigns luids to insns in basic block BB. */ static void assign_luids (basic_block bb) { unsigned i = 0, uid; rtx insn; FOR_BB_INSNS (bb, insn) { uid = INSN_UID (insn); insn_iv_info[uid].luid = i++; insn_iv_info[uid].prev_def = NULL_RTX; insn_iv_info[uid].iv.analysed = false; } } /* Generates a subreg to get the least significant part of EXPR (in mode INNER_MODE) to OUTER_MODE. */ static rtx lowpart_subreg (enum machine_mode outer_mode, rtx expr, enum machine_mode inner_mode) { return simplify_gen_subreg (outer_mode, expr, inner_mode, subreg_lowpart_offset (outer_mode, inner_mode)); } /* Checks whether REG is a well-behaved register. */ static bool simple_reg_p (rtx reg) { unsigned r; if (GET_CODE (reg) == SUBREG) { if (!subreg_lowpart_p (reg)) return false; reg = SUBREG_REG (reg); } if (!REG_P (reg)) return false; r = REGNO (reg); if (HARD_REGISTER_NUM_P (r)) return false; if (GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT) return false; if (last_def[r] == const0_rtx) return false; return true; } /* Checks whether assignment LHS = RHS is simple enough for us to process. */ static bool simple_set_p (rtx lhs, rtx rhs) { rtx op0, op1; if (!REG_P (lhs) || !simple_reg_p (lhs)) return false; if (CONSTANT_P (rhs)) return true; switch (GET_CODE (rhs)) { case SUBREG: case REG: return simple_reg_p (rhs); case SIGN_EXTEND: case ZERO_EXTEND: case NEG: return simple_reg_p (XEXP (rhs, 0)); case PLUS: case MINUS: case MULT: case ASHIFT: op0 = XEXP (rhs, 0); op1 = XEXP (rhs, 1); if (!simple_reg_p (op0) && !CONSTANT_P (op0)) return false; if (!simple_reg_p (op1) && !CONSTANT_P (op1)) return false; if (GET_CODE (rhs) == MULT && !CONSTANT_P (op0) && !CONSTANT_P (op1)) return false; if (GET_CODE (rhs) == ASHIFT && CONSTANT_P (op0)) return false; return true; default: return false; } } /* Mark single SET in INSN. */ static rtx mark_single_set (rtx insn, rtx set) { rtx def = SET_DEST (set), src; unsigned regno, uid; src = find_reg_equal_equiv_note (insn); if (src) src = XEXP (src, 0); else src = SET_SRC (set); if (!simple_set_p (SET_DEST (set), src)) return NULL_RTX; regno = REGNO (def); uid = INSN_UID (insn); bivs[regno].analysed = false; insn_iv_info[uid].prev_def = last_def[regno]; last_def[regno] = insn; return def; } /* Invalidate register REG unless it is equal to EXCEPT. */ static void kill_sets (rtx reg, rtx by ATTRIBUTE_UNUSED, void *except) { if (GET_CODE (reg) == SUBREG) reg = SUBREG_REG (reg); if (!REG_P (reg)) return; if (reg == except) return; last_def[REGNO (reg)] = const0_rtx; } /* Marks sets in basic block BB. If DOM is true, BB dominates the loop latch. */ static void mark_sets (basic_block bb, bool dom) { rtx insn, set, def; FOR_BB_INSNS (bb, insn) { if (!INSN_P (insn)) continue; if (dom && (set = single_set (insn))) def = mark_single_set (insn, set); else def = NULL_RTX; note_stores (PATTERN (insn), kill_sets, def); } } /* Prepare the data for an induction variable analysis of a LOOP. */ void iv_analysis_loop_init (struct loop *loop) { basic_block *body = get_loop_body_in_dom_order (loop); unsigned b; if ((unsigned) get_max_uid () >= max_insn_no) { /* Add some reserve for insns and registers produced in optimizations. */ max_insn_no = get_max_uid () + 100; if (insn_iv_info) free (insn_iv_info); insn_iv_info = xmalloc (max_insn_no * sizeof (struct insn_iv_info)); } if ((unsigned) max_reg_num () >= max_reg_no) { max_reg_no = max_reg_num () + 100; if (last_def) free (last_def); last_def = xmalloc (max_reg_no * sizeof (rtx)); if (bivs) free (bivs); bivs = xmalloc (max_reg_no * sizeof (struct rtx_iv)); } memset (last_def, 0, max_reg_num () * sizeof (rtx)); for (b = 0; b < loop->num_nodes; b++) { assign_luids (body[b]); mark_sets (body[b], just_once_each_iteration_p (loop, body[b])); } free (body); } /* Gets definition of REG reaching the INSN. If REG is not simple, const0_rtx is returned. If INSN is before the first def in the loop, NULL_RTX is returned. */ rtx iv_get_reaching_def (rtx insn, rtx reg) { unsigned regno, luid, auid; rtx ainsn; basic_block bb, abb; if (GET_CODE (reg) == SUBREG) { if (!subreg_lowpart_p (reg)) return const0_rtx; reg = SUBREG_REG (reg); } if (!REG_P (reg)) return NULL_RTX; regno = REGNO (reg); if (!last_def[regno] || last_def[regno] == const0_rtx) return last_def[regno]; bb = BLOCK_FOR_INSN (insn); luid = insn_iv_info[INSN_UID (insn)].luid; ainsn = last_def[regno]; while (1) { abb = BLOCK_FOR_INSN (ainsn); if (dominated_by_p (CDI_DOMINATORS, bb, abb)) break; auid = INSN_UID (ainsn); ainsn = insn_iv_info[auid].prev_def; if (!ainsn) return NULL_RTX; } while (1) { abb = BLOCK_FOR_INSN (ainsn); if (abb != bb) return ainsn; auid = INSN_UID (ainsn); if (luid > insn_iv_info[auid].luid) return ainsn; ainsn = insn_iv_info[auid].prev_def; if (!ainsn) return NULL_RTX; } } /* Sets IV to invariant CST in MODE. Always returns true (just for consistency with other iv manipulation functions that may fail). */ static bool iv_constant (struct rtx_iv *iv, rtx cst, enum machine_mode mode) { if (mode == VOIDmode) mode = GET_MODE (cst); iv->analysed = true; iv->mode = mode; iv->base = cst; iv->step = const0_rtx; iv->first_special = false; iv->extend = NIL; iv->extend_mode = iv->mode; iv->delta = const0_rtx; iv->mult = const1_rtx; return true; } /* Evaluates application of subreg to MODE on IV. */ static bool iv_subreg (struct rtx_iv *iv, enum machine_mode mode) { if (iv->extend_mode == mode) return true; if (GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (iv->mode)) return false; iv->extend = NIL; iv->mode = mode; iv->base = simplify_gen_binary (PLUS, iv->extend_mode, iv->delta, simplify_gen_binary (MULT, iv->extend_mode, iv->base, iv->mult)); iv->step = simplify_gen_binary (MULT, iv->extend_mode, iv->step, iv->mult); iv->mult = const1_rtx; iv->delta = const0_rtx; iv->first_special = false; return true; } /* Evaluates application of EXTEND to MODE on IV. */ static bool iv_extend (struct rtx_iv *iv, enum rtx_code extend, enum machine_mode mode) { if (mode != iv->extend_mode) return false; if (iv->extend != NIL && iv->extend != extend) return false; iv->extend = extend; return true; } /* Evaluates negation of IV. */ static bool iv_neg (struct rtx_iv *iv) { if (iv->extend == NIL) { iv->base = simplify_gen_unary (NEG, iv->extend_mode, iv->base, iv->extend_mode); iv->step = simplify_gen_unary (NEG, iv->extend_mode, iv->step, iv->extend_mode); } else { iv->delta = simplify_gen_unary (NEG, iv->extend_mode, iv->delta, iv->extend_mode); iv->mult = simplify_gen_unary (NEG, iv->extend_mode, iv->mult, iv->extend_mode); } return true; } /* Evaluates addition or subtraction (according to OP) of IV1 to IV0. */ static bool iv_add (struct rtx_iv *iv0, struct rtx_iv *iv1, enum rtx_code op) { enum machine_mode mode; rtx arg; /* Extend the constant to extend_mode of the other operand if necessary. */ if (iv0->extend == NIL && iv0->mode == iv0->extend_mode && iv0->step == const0_rtx && GET_MODE_SIZE (iv0->extend_mode) < GET_MODE_SIZE (iv1->extend_mode)) { iv0->extend_mode = iv1->extend_mode; iv0->base = simplify_gen_unary (ZERO_EXTEND, iv0->extend_mode, iv0->base, iv0->mode); } if (iv1->extend == NIL && iv1->mode == iv1->extend_mode && iv1->step == const0_rtx && GET_MODE_SIZE (iv1->extend_mode) < GET_MODE_SIZE (iv0->extend_mode)) { iv1->extend_mode = iv0->extend_mode; iv1->base = simplify_gen_unary (ZERO_EXTEND, iv1->extend_mode, iv1->base, iv1->mode); } mode = iv0->extend_mode; if (mode != iv1->extend_mode) return false; if (iv0->extend == NIL && iv1->extend == NIL) { if (iv0->mode != iv1->mode) return false; iv0->base = simplify_gen_binary (op, mode, iv0->base, iv1->base); iv0->step = simplify_gen_binary (op, mode, iv0->step, iv1->step); return true; } /* Handle addition of constant. */ if (iv1->extend == NIL && iv1->mode == mode && iv1->step == const0_rtx) { iv0->delta = simplify_gen_binary (op, mode, iv0->delta, iv1->base); return true; } if (iv0->extend == NIL && iv0->mode == mode && iv0->step == const0_rtx) { arg = iv0->base; *iv0 = *iv1; if (op == MINUS && !iv_neg (iv0)) return false; iv0->delta = simplify_gen_binary (PLUS, mode, iv0->delta, arg); return true; } return false; } /* Evaluates multiplication of IV by constant CST. */ static bool iv_mult (struct rtx_iv *iv, rtx mby) { enum machine_mode mode = iv->extend_mode; if (GET_MODE (mby) != VOIDmode && GET_MODE (mby) != mode) return false; if (iv->extend == NIL) { iv->base = simplify_gen_binary (MULT, mode, iv->base, mby); iv->step = simplify_gen_binary (MULT, mode, iv->step, mby); } else { iv->delta = simplify_gen_binary (MULT, mode, iv->delta, mby); iv->mult = simplify_gen_binary (MULT, mode, iv->mult, mby); } return true; } /* Evaluates shift of IV by constant CST. */ static bool iv_shift (struct rtx_iv *iv, rtx mby) { enum machine_mode mode = iv->extend_mode; if (GET_MODE (mby) != VOIDmode && GET_MODE (mby) != mode) return false; if (iv->extend == NIL) { iv->base = simplify_gen_binary (ASHIFT, mode, iv->base, mby); iv->step = simplify_gen_binary (ASHIFT, mode, iv->step, mby); } else { iv->delta = simplify_gen_binary (ASHIFT, mode, iv->delta, mby); iv->mult = simplify_gen_binary (ASHIFT, mode, iv->mult, mby); } return true; } /* The recursive part of get_biv_step. Gets the value of the single value defined in INSN wrto initial value of REG inside loop, in shape described at get_biv_step. */ static bool get_biv_step_1 (rtx insn, rtx reg, rtx *inner_step, enum machine_mode *inner_mode, enum rtx_code *extend, enum machine_mode outer_mode, rtx *outer_step) { rtx set, lhs, rhs, op0 = NULL_RTX, op1 = NULL_RTX; rtx next, nextr, def_insn, tmp; enum rtx_code code; set = single_set (insn); rhs = find_reg_equal_equiv_note (insn); if (rhs) rhs = XEXP (rhs, 0); else rhs = SET_SRC (set); lhs = SET_DEST (set); code = GET_CODE (rhs); switch (code) { case SUBREG: case REG: next = rhs; break; case PLUS: case MINUS: op0 = XEXP (rhs, 0); op1 = XEXP (rhs, 1); if (code == PLUS && CONSTANT_P (op0)) { tmp = op0; op0 = op1; op1 = tmp; } if (!simple_reg_p (op0) || !CONSTANT_P (op1)) return false; if (GET_MODE (rhs) != outer_mode) { /* ppc64 uses expressions like (set x:SI (plus:SI (subreg:SI y:DI) 1)). this is equivalent to (set x':DI (plus:DI y:DI 1)) (set x:SI (subreg:SI (x':DI)). */ if (GET_CODE (op0) != SUBREG) return false; if (GET_MODE (SUBREG_REG (op0)) != outer_mode) return false; } next = op0; break; case SIGN_EXTEND: case ZERO_EXTEND: if (GET_MODE (rhs) != outer_mode) return false; op0 = XEXP (rhs, 0); if (!simple_reg_p (op0)) return false; next = op0; break; default: return false; } if (GET_CODE (next) == SUBREG) { if (!subreg_lowpart_p (next)) return false; nextr = SUBREG_REG (next); if (GET_MODE (nextr) != outer_mode) return false; } else nextr = next; def_insn = iv_get_reaching_def (insn, nextr); if (def_insn == const0_rtx) return false; if (!def_insn) { if (!rtx_equal_p (nextr, reg)) return false; *inner_step = const0_rtx; *extend = NIL; *inner_mode = outer_mode; *outer_step = const0_rtx; } else if (!get_biv_step_1 (def_insn, reg, inner_step, inner_mode, extend, outer_mode, outer_step)) return false; if (GET_CODE (next) == SUBREG) { enum machine_mode amode = GET_MODE (next); if (GET_MODE_SIZE (amode) > GET_MODE_SIZE (*inner_mode)) return false; *inner_mode = amode; *inner_step = simplify_gen_binary (PLUS, outer_mode, *inner_step, *outer_step); *outer_step = const0_rtx; *extend = NIL; } switch (code) { case REG: case SUBREG: break; case PLUS: case MINUS: if (*inner_mode == outer_mode /* See comment in previous switch. */ || GET_MODE (rhs) != outer_mode) *inner_step = simplify_gen_binary (code, outer_mode, *inner_step, op1); else *outer_step = simplify_gen_binary (code, outer_mode, *outer_step, op1); break; case SIGN_EXTEND: case ZERO_EXTEND: if (GET_MODE (op0) != *inner_mode || *extend != NIL || *outer_step != const0_rtx) abort (); *extend = code; break; default: abort (); } return true; } /* Gets the operation on register REG inside loop, in shape OUTER_STEP + EXTEND_{OUTER_MODE} (SUBREG_{INNER_MODE} (REG + INNER_STEP)) If the operation cannot be described in this shape, return false. */ static bool get_biv_step (rtx reg, rtx *inner_step, enum machine_mode *inner_mode, enum rtx_code *extend, enum machine_mode *outer_mode, rtx *outer_step) { *outer_mode = GET_MODE (reg); if (!get_biv_step_1 (last_def[REGNO (reg)], reg, inner_step, inner_mode, extend, *outer_mode, outer_step)) return false; if (*inner_mode != *outer_mode && *extend == NIL) abort (); if (*inner_mode == *outer_mode && *extend != NIL) abort (); if (*inner_mode == *outer_mode && *outer_step != const0_rtx) abort (); return true; } /* Determines whether DEF is a biv and if so, stores its description to *IV. */ static bool iv_analyze_biv (rtx def, struct rtx_iv *iv) { unsigned regno; rtx inner_step, outer_step; enum machine_mode inner_mode, outer_mode; enum rtx_code extend; if (dump_file) { fprintf (dump_file, "Analysing "); print_rtl (dump_file, def); fprintf (dump_file, " for bivness.\n"); } if (!REG_P (def)) { if (!CONSTANT_P (def)) return false; return iv_constant (iv, def, VOIDmode); } regno = REGNO (def); if (last_def[regno] == const0_rtx) { if (dump_file) fprintf (dump_file, " not simple.\n"); return false; } if (last_def[regno] && bivs[regno].analysed) { if (dump_file) fprintf (dump_file, " already analysed.\n"); *iv = bivs[regno]; return iv->base != NULL_RTX; } if (!last_def[regno]) { iv_constant (iv, def, VOIDmode); goto end; } iv->analysed = true; if (!get_biv_step (def, &inner_step, &inner_mode, &extend, &outer_mode, &outer_step)) { iv->base = NULL_RTX; goto end; } /* Loop transforms base to es (base + inner_step) + outer_step, where es means extend of subreg between inner_mode and outer_mode. The corresponding induction variable is es ((base - outer_step) + i * (inner_step + outer_step)) + outer_step */ iv->base = simplify_gen_binary (MINUS, outer_mode, def, outer_step); iv->step = simplify_gen_binary (PLUS, outer_mode, inner_step, outer_step); iv->mode = inner_mode; iv->extend_mode = outer_mode; iv->extend = extend; iv->mult = const1_rtx; iv->delta = outer_step; iv->first_special = inner_mode != outer_mode; end: if (dump_file) { fprintf (dump_file, " "); dump_iv_info (dump_file, iv); fprintf (dump_file, "\n"); } bivs[regno] = *iv; return iv->base != NULL_RTX; } /* Analyzes operand OP of INSN and stores the result to *IV. */ static bool iv_analyze_op (rtx insn, rtx op, struct rtx_iv *iv) { rtx def_insn; unsigned regno; bool inv = CONSTANT_P (op); if (dump_file) { fprintf (dump_file, "Analysing operand "); print_rtl (dump_file, op); fprintf (dump_file, " of insn "); print_rtl_single (dump_file, insn); } if (GET_CODE (op) == SUBREG) { if (!subreg_lowpart_p (op)) return false; if (!iv_analyze_op (insn, SUBREG_REG (op), iv)) return false; return iv_subreg (iv, GET_MODE (op)); } if (!inv) { regno = REGNO (op); if (!last_def[regno]) inv = true; else if (last_def[regno] == const0_rtx) { if (dump_file) fprintf (dump_file, " not simple.\n"); return false; } } if (inv) { iv_constant (iv, op, VOIDmode); if (dump_file) { fprintf (dump_file, " "); dump_iv_info (dump_file, iv); fprintf (dump_file, "\n"); } return true; } def_insn = iv_get_reaching_def (insn, op); if (def_insn == const0_rtx) { if (dump_file) fprintf (dump_file, " not simple.\n"); return false; } return iv_analyze (def_insn, op, iv); } /* Analyzes iv DEF defined in INSN and stores the result to *IV. */ bool iv_analyze (rtx insn, rtx def, struct rtx_iv *iv) { unsigned uid; rtx set, rhs, mby = NULL_RTX, tmp; rtx op0 = NULL_RTX, op1 = NULL_RTX; struct rtx_iv iv0, iv1; enum machine_mode amode; enum rtx_code code; if (insn == const0_rtx) return false; if (GET_CODE (def) == SUBREG) { if (!subreg_lowpart_p (def)) return false; if (!iv_analyze (insn, SUBREG_REG (def), iv)) return false; return iv_subreg (iv, GET_MODE (def)); } if (!insn) return iv_analyze_biv (def, iv); if (dump_file) { fprintf (dump_file, "Analysing def of "); print_rtl (dump_file, def); fprintf (dump_file, " in insn "); print_rtl_single (dump_file, insn); } uid = INSN_UID (insn); if (insn_iv_info[uid].iv.analysed) { if (dump_file) fprintf (dump_file, " already analysed.\n"); *iv = insn_iv_info[uid].iv; return iv->base != NULL_RTX; } iv->mode = VOIDmode; iv->base = NULL_RTX; iv->step = NULL_RTX; set = single_set (insn); rhs = find_reg_equal_equiv_note (insn); if (rhs) rhs = XEXP (rhs, 0); else rhs = SET_SRC (set); code = GET_CODE (rhs); if (CONSTANT_P (rhs)) { op0 = rhs; amode = GET_MODE (def); } else { switch (code) { case SUBREG: if (!subreg_lowpart_p (rhs)) goto end; op0 = rhs; break; case REG: op0 = rhs; break; case SIGN_EXTEND: case ZERO_EXTEND: case NEG: op0 = XEXP (rhs, 0); break; case PLUS: case MINUS: op0 = XEXP (rhs, 0); op1 = XEXP (rhs, 1); break; case MULT: op0 = XEXP (rhs, 0); mby = XEXP (rhs, 1); if (!CONSTANT_P (mby)) { if (!CONSTANT_P (op0)) abort (); tmp = op0; op0 = mby; mby = tmp; } break; case ASHIFT: if (CONSTANT_P (XEXP (rhs, 0))) abort (); op0 = XEXP (rhs, 0); mby = XEXP (rhs, 1); break; default: abort (); } amode = GET_MODE (rhs); } if (op0) { if (!iv_analyze_op (insn, op0, &iv0)) goto end; if (iv0.mode == VOIDmode) { iv0.mode = amode; iv0.extend_mode = amode; } } if (op1) { if (!iv_analyze_op (insn, op1, &iv1)) goto end; if (iv1.mode == VOIDmode) { iv1.mode = amode; iv1.extend_mode = amode; } } switch (code) { case SIGN_EXTEND: case ZERO_EXTEND: if (!iv_extend (&iv0, code, amode)) goto end; break; case NEG: if (!iv_neg (&iv0)) goto end; break; case PLUS: case MINUS: if (!iv_add (&iv0, &iv1, code)) goto end; break; case MULT: if (!iv_mult (&iv0, mby)) goto end; break; case ASHIFT: if (!iv_shift (&iv0, mby)) goto end; break; default: break; } *iv = iv0; end: iv->analysed = true; insn_iv_info[uid].iv = *iv; if (dump_file) { print_rtl (dump_file, def); fprintf (dump_file, " in insn "); print_rtl_single (dump_file, insn); fprintf (dump_file, " is "); dump_iv_info (dump_file, iv); fprintf (dump_file, "\n"); } return iv->base != NULL_RTX; } /* Calculates value of IV at ITERATION-th iteration. */ rtx get_iv_value (struct rtx_iv *iv, rtx iteration) { rtx val; /* We would need to generate some if_then_else patterns, and so far it is not needed anywhere. */ if (iv->first_special) abort (); if (iv->step != const0_rtx && iteration != const0_rtx) val = simplify_gen_binary (PLUS, iv->extend_mode, iv->base, simplify_gen_binary (MULT, iv->extend_mode, iv->step, iteration)); else val = iv->base; if (iv->extend_mode == iv->mode) return val; val = lowpart_subreg (iv->mode, val, iv->extend_mode); if (iv->extend == NIL) return val; val = simplify_gen_unary (iv->extend, iv->extend_mode, val, iv->mode); val = simplify_gen_binary (PLUS, iv->extend_mode, iv->delta, simplify_gen_binary (MULT, iv->extend_mode, iv->mult, val)); return val; } /* Free the data for an induction variable analysis. */ void iv_analysis_done (void) { max_insn_no = 0; max_reg_no = 0; if (insn_iv_info) { free (insn_iv_info); insn_iv_info = NULL; } if (last_def) { free (last_def); last_def = NULL; } if (bivs) { free (bivs); bivs = NULL; } } /* Computes inverse to X modulo (1 << MOD). */ static unsigned HOST_WIDEST_INT inverse (unsigned HOST_WIDEST_INT x, int mod) { unsigned HOST_WIDEST_INT mask = ((unsigned HOST_WIDEST_INT) 1 << (mod - 1) << 1) - 1; unsigned HOST_WIDEST_INT rslt = 1; int i; for (i = 0; i < mod - 1; i++) { rslt = (rslt * x) & mask; x = (x * x) & mask; } return rslt; } /* Tries to estimate the maximum number of iterations. */ static unsigned HOST_WIDEST_INT determine_max_iter (struct niter_desc *desc) { rtx niter = desc->niter_expr; rtx mmin, mmax, left, right; unsigned HOST_WIDEST_INT nmax, inc; if (GET_CODE (niter) == AND && GET_CODE (XEXP (niter, 0)) == CONST_INT) { nmax = INTVAL (XEXP (niter, 0)); if (!(nmax & (nmax + 1))) { desc->niter_max = nmax; return nmax; } } get_mode_bounds (desc->mode, desc->signed_p, desc->mode, &mmin, &mmax); nmax = INTVAL (mmax) - INTVAL (mmin); if (GET_CODE (niter) == UDIV) { if (GET_CODE (XEXP (niter, 1)) != CONST_INT) { desc->niter_max = nmax; return nmax; } inc = INTVAL (XEXP (niter, 1)); niter = XEXP (niter, 0); } else inc = 1; if (GET_CODE (niter) == PLUS) { left = XEXP (niter, 0); right = XEXP (niter, 0); if (GET_CODE (right) == CONST_INT) right = GEN_INT (-INTVAL (right)); } else if (GET_CODE (niter) == MINUS) { left = XEXP (niter, 0); right = XEXP (niter, 0); } else { left = niter; right = mmin; } if (GET_CODE (left) == CONST_INT) mmax = left; if (GET_CODE (right) == CONST_INT) mmin = right; nmax = INTVAL (mmax) - INTVAL (mmin); desc->niter_max = nmax / inc; return nmax / inc; } /* Checks whether register *REG is in set ALT. Callback for for_each_rtx. */ static int altered_reg_used (rtx *reg, void *alt) { if (!REG_P (*reg)) return 0; return REGNO_REG_SET_P (alt, REGNO (*reg)); } /* Marks registers altered by EXPR in set ALT. */ static void mark_altered (rtx expr, rtx by ATTRIBUTE_UNUSED, void *alt) { if (GET_CODE (expr) == SUBREG) expr = SUBREG_REG (expr); if (!REG_P (expr)) return; SET_REGNO_REG_SET (alt, REGNO (expr)); } /* Checks whether RHS is simple enough to process. */ static bool simple_rhs_p (rtx rhs) { rtx op0, op1; if (CONSTANT_P (rhs) || REG_P (rhs)) return true; switch (GET_CODE (rhs)) { case PLUS: case MINUS: op0 = XEXP (rhs, 0); op1 = XEXP (rhs, 1); /* Allow reg + const sets only. */ if (REG_P (op0) && CONSTANT_P (op1)) return true; if (REG_P (op1) && CONSTANT_P (op0)) return true; return false; default: return false; } } /* Simplifies *EXPR using assignment in INSN. ALTERED is the set of registers altered so far. */ static void simplify_using_assignment (rtx insn, rtx *expr, regset altered) { rtx set = single_set (insn); rtx lhs, rhs; bool ret = false; if (set) { lhs = SET_DEST (set); if (!REG_P (lhs) || altered_reg_used (&lhs, altered)) ret = true; } else ret = true; note_stores (PATTERN (insn), mark_altered, altered); if (GET_CODE (insn) == CALL_INSN) { int i; /* Kill all call clobbered registers. */ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)) SET_REGNO_REG_SET (altered, i); } if (ret) return; rhs = find_reg_equal_equiv_note (insn); if (rhs) rhs = XEXP (rhs, 0); else rhs = SET_SRC (set); if (!simple_rhs_p (rhs)) return; if (for_each_rtx (&rhs, altered_reg_used, altered)) return; *expr = simplify_replace_rtx (*expr, lhs, rhs); } /* Checks whether A implies B. */ static bool implies_p (rtx a, rtx b) { rtx op0, op1, opb0, opb1, r; enum machine_mode mode; if (GET_CODE (a) == EQ) { op0 = XEXP (a, 0); op1 = XEXP (a, 1); if (REG_P (op0)) { r = simplify_replace_rtx (b, op0, op1); if (r == const_true_rtx) return true; } if (REG_P (op1)) { r = simplify_replace_rtx (b, op1, op0); if (r == const_true_rtx) return true; } } /* A < B implies A + 1 <= B. */ if ((GET_CODE (a) == GT || GET_CODE (a) == LT) && (GET_CODE (b) == GE || GET_CODE (b) == LE)) { op0 = XEXP (a, 0); op1 = XEXP (a, 1); opb0 = XEXP (b, 0); opb1 = XEXP (b, 1); if (GET_CODE (a) == GT) { r = op0; op0 = op1; op1 = r; } if (GET_CODE (b) == GE) { r = opb0; opb0 = opb1; opb1 = r; } mode = GET_MODE (op0); if (mode != GET_MODE (opb0)) mode = VOIDmode; else if (mode == VOIDmode) { mode = GET_MODE (op1); if (mode != GET_MODE (opb1)) mode = VOIDmode; } if (mode != VOIDmode && rtx_equal_p (op1, opb1) && simplify_gen_binary (MINUS, mode, opb0, op0) == const1_rtx) return true; } return false; } /* Canonicalizes COND so that (1) Ensure that operands are ordered according to swap_commutative_operands_p. (2) (LE x const) will be replaced with (LT x ) and similarly for GE, GEU, and LEU. */ rtx canon_condition (rtx cond) { rtx tem; rtx op0, op1; enum rtx_code code; enum machine_mode mode; code = GET_CODE (cond); op0 = XEXP (cond, 0); op1 = XEXP (cond, 1); if (swap_commutative_operands_p (op0, op1)) { code = swap_condition (code); tem = op0; op0 = op1; op1 = tem; } mode = GET_MODE (op0); if (mode == VOIDmode) mode = GET_MODE (op1); if (mode == VOIDmode) abort (); if (GET_CODE (op1) == CONST_INT && GET_MODE_CLASS (mode) != MODE_CC && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT) { HOST_WIDE_INT const_val = INTVAL (op1); unsigned HOST_WIDE_INT uconst_val = const_val; unsigned HOST_WIDE_INT max_val = (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode); switch (code) { case LE: if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1) code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0)); break; /* When cross-compiling, const_val might be sign-extended from BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */ case GE: if ((HOST_WIDE_INT) (const_val & max_val) != (((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1)))) code = GT, op1 = gen_int_mode (const_val - 1, mode); break; case LEU: if (uconst_val < max_val) code = LTU, op1 = gen_int_mode (uconst_val + 1, mode); break; case GEU: if (uconst_val != 0) code = GTU, op1 = gen_int_mode (uconst_val - 1, mode); break; default: break; } } if (op0 != XEXP (cond, 0) || op1 != XEXP (cond, 1) || code != GET_CODE (cond) || GET_MODE (cond) != SImode) cond = gen_rtx_fmt_ee (code, SImode, op0, op1); return cond; } /* Tries to use the fact that COND holds to simplify EXPR. ALTERED is the set of altered regs. */ void simplify_using_condition (rtx cond, rtx *expr, regset altered) { rtx rev, reve, exp = *expr; if (!COMPARISON_P (exp)) return; /* If some register gets altered later, we do not really speak about its value at the time of comparison. */ if (altered && for_each_rtx (&cond, altered_reg_used, altered)) return; rev = reversed_condition (cond); reve = reversed_condition (exp); cond = canon_condition (cond); exp = canon_condition (exp); if (rev) rev = canon_condition (rev); if (reve) reve = canon_condition (reve); if (rtx_equal_p (exp, cond)) { *expr = const_true_rtx; return; } if (rev && rtx_equal_p (exp, rev)) { *expr = const0_rtx; return; } if (implies_p (cond, exp)) { *expr = const_true_rtx; return; } if (reve && implies_p (cond, reve)) { *expr = const0_rtx; return; } /* A proof by contradiction. If *EXPR implies (not cond), *EXPR must be false. */ if (rev && implies_p (exp, rev)) { *expr = const0_rtx; return; } /* Similarly, If (not *EXPR) implies (not cond), *EXPR must be true. */ if (rev && reve && implies_p (reve, rev)) { *expr = const_true_rtx; return; } /* We would like to have some other tests here. TODO. */ return; } /* Use relationship between A and *B to eventually eliminate *B. OP is the operation we consider. */ static void eliminate_implied_condition (enum rtx_code op, rtx a, rtx *b) { if (op == AND) { /* If A implies *B, we may replace *B by true. */ if (implies_p (a, *b)) *b = const_true_rtx; } else if (op == IOR) { /* If *B implies A, we may replace *B by false. */ if (implies_p (*b, a)) *b = const0_rtx; } else abort (); } /* Eliminates the conditions in TAIL that are implied by HEAD. OP is the operation we consider. */ static void eliminate_implied_conditions (enum rtx_code op, rtx *head, rtx tail) { rtx elt; for (elt = tail; elt; elt = XEXP (elt, 1)) eliminate_implied_condition (op, *head, &XEXP (elt, 0)); for (elt = tail; elt; elt = XEXP (elt, 1)) eliminate_implied_condition (op, XEXP (elt, 0), head); } /* Simplifies *EXPR using initial values at the start of the LOOP. If *EXPR is a list, its elements are assumed to be combined using OP. */ static void simplify_using_initial_values (struct loop *loop, enum rtx_code op, rtx *expr) { rtx head, tail, insn; rtx neutral, aggr; regset altered; regset_head altered_head; edge e; if (!*expr) return; if (CONSTANT_P (*expr)) return; if (GET_CODE (*expr) == EXPR_LIST) { head = XEXP (*expr, 0); tail = XEXP (*expr, 1); eliminate_implied_conditions (op, &head, tail); if (op == AND) { neutral = const_true_rtx; aggr = const0_rtx; } else if (op == IOR) { neutral = const0_rtx; aggr = const_true_rtx; } else abort (); simplify_using_initial_values (loop, NIL, &head); if (head == aggr) { XEXP (*expr, 0) = aggr; XEXP (*expr, 1) = NULL_RTX; return; } else if (head == neutral) { *expr = tail; simplify_using_initial_values (loop, op, expr); return; } simplify_using_initial_values (loop, op, &tail); if (tail && XEXP (tail, 0) == aggr) { *expr = tail; return; } XEXP (*expr, 0) = head; XEXP (*expr, 1) = tail; return; } if (op != NIL) abort (); e = loop_preheader_edge (loop); if (e->src == ENTRY_BLOCK_PTR) return; altered = INITIALIZE_REG_SET (altered_head); while (1) { insn = BB_END (e->src); if (any_condjump_p (insn)) { /* FIXME -- slightly wrong -- what if compared register gets altered between start of the condition and insn? */ rtx cond = get_condition (BB_END (e->src), NULL, false); if (cond && (e->flags & EDGE_FALLTHRU)) cond = reversed_condition (cond); if (cond) { simplify_using_condition (cond, expr, altered); if (CONSTANT_P (*expr)) { FREE_REG_SET (altered); return; } } } FOR_BB_INSNS_REVERSE (e->src, insn) { if (!INSN_P (insn)) continue; simplify_using_assignment (insn, expr, altered); if (CONSTANT_P (*expr)) { FREE_REG_SET (altered); return; } } e = e->src->pred; if (e->pred_next || e->src == ENTRY_BLOCK_PTR) break; } FREE_REG_SET (altered); } /* Transforms invariant IV into MODE. Adds assumptions based on the fact that IV occurs as left operands of comparison COND and its signedness is SIGNED_P to DESC. */ static void shorten_into_mode (struct rtx_iv *iv, enum machine_mode mode, enum rtx_code cond, bool signed_p, struct niter_desc *desc) { rtx mmin, mmax, cond_over, cond_under; get_mode_bounds (mode, signed_p, iv->extend_mode, &mmin, &mmax); cond_under = simplify_gen_relational (LT, SImode, iv->extend_mode, iv->base, mmin); cond_over = simplify_gen_relational (GT, SImode, iv->extend_mode, iv->base, mmax); switch (cond) { case LE: case LT: case LEU: case LTU: if (cond_under != const0_rtx) desc->infinite = alloc_EXPR_LIST (0, cond_under, desc->infinite); if (cond_over != const0_rtx) desc->noloop_assumptions = alloc_EXPR_LIST (0, cond_over, desc->noloop_assumptions); break; case GE: case GT: case GEU: case GTU: if (cond_over != const0_rtx) desc->infinite = alloc_EXPR_LIST (0, cond_over, desc->infinite); if (cond_under != const0_rtx) desc->noloop_assumptions = alloc_EXPR_LIST (0, cond_under, desc->noloop_assumptions); break; case NE: if (cond_over != const0_rtx) desc->infinite = alloc_EXPR_LIST (0, cond_over, desc->infinite); if (cond_under != const0_rtx) desc->infinite = alloc_EXPR_LIST (0, cond_under, desc->infinite); break; default: abort (); } iv->mode = mode; iv->extend = signed_p ? SIGN_EXTEND : ZERO_EXTEND; } /* Transforms IV0 and IV1 compared by COND so that they are both compared as subregs of the same mode if possible (sometimes it is necessary to add some assumptions to DESC). */ static bool canonicalize_iv_subregs (struct rtx_iv *iv0, struct rtx_iv *iv1, enum rtx_code cond, struct niter_desc *desc) { enum machine_mode comp_mode; bool signed_p; /* If the ivs behave specially in the first iteration, or are added/multiplied after extending, we ignore them. */ if (iv0->first_special || iv0->mult != const1_rtx || iv0->delta != const0_rtx) return false; if (iv1->first_special || iv1->mult != const1_rtx || iv1->delta != const0_rtx) return false; /* If there is some extend, it must match signedness of the comparison. */ switch (cond) { case LE: case LT: if (iv0->extend == ZERO_EXTEND || iv1->extend == ZERO_EXTEND) return false; signed_p = true; break; case LEU: case LTU: if (iv0->extend == SIGN_EXTEND || iv1->extend == SIGN_EXTEND) return false; signed_p = false; break; case NE: if (iv0->extend != NIL && iv1->extend != NIL && iv0->extend != iv1->extend) return false; signed_p = false; if (iv0->extend != NIL) signed_p = iv0->extend == SIGN_EXTEND; if (iv1->extend != NIL) signed_p = iv1->extend == SIGN_EXTEND; break; default: abort (); } /* Values of both variables should be computed in the same mode. These might indeed be different, if we have comparison like (compare (subreg:SI (iv0)) (subreg:SI (iv1))) and iv0 and iv1 are both ivs iterating in SI mode, but calculated in different modes. This does not seem impossible to handle, but it hardly ever occurs in practice. The only exception is the case when one of operands is invariant. For example pentium 3 generates comparisons like (lt (subreg:HI (reg:SI)) 100). Here we assign HImode to 100, but we definitely do not want this prevent the optimization. */ comp_mode = iv0->extend_mode; if (GET_MODE_BITSIZE (comp_mode) < GET_MODE_BITSIZE (iv1->extend_mode)) comp_mode = iv1->extend_mode; if (iv0->extend_mode != comp_mode) { if (iv0->mode != iv0->extend_mode || iv0->step != const0_rtx) return false; iv0->base = simplify_gen_unary (signed_p ? SIGN_EXTEND : ZERO_EXTEND, comp_mode, iv0->base, iv0->mode); iv0->extend_mode = comp_mode; } if (iv1->extend_mode != comp_mode) { if (iv1->mode != iv1->extend_mode || iv1->step != const0_rtx) return false; iv1->base = simplify_gen_unary (signed_p ? SIGN_EXTEND : ZERO_EXTEND, comp_mode, iv1->base, iv1->mode); iv1->extend_mode = comp_mode; } /* Check that both ivs belong to a range of a single mode. If one of the operands is an invariant, we may need to shorten it into the common mode. */ if (iv0->mode == iv0->extend_mode && iv0->step == const0_rtx && iv0->mode != iv1->mode) shorten_into_mode (iv0, iv1->mode, cond, signed_p, desc); if (iv1->mode == iv1->extend_mode && iv1->step == const0_rtx && iv0->mode != iv1->mode) shorten_into_mode (iv1, iv0->mode, swap_condition (cond), signed_p, desc); if (iv0->mode != iv1->mode) return false; desc->mode = iv0->mode; desc->signed_p = signed_p; return true; } /* Computes number of iterations of the CONDITION in INSN in LOOP and stores the result into DESC. Very similar to determine_number_of_iterations (basically its rtl version), complicated by things like subregs. */ void iv_number_of_iterations (struct loop *loop, rtx insn, rtx condition, struct niter_desc *desc) { rtx op0, op1, delta, step, bound, may_xform, def_insn, tmp, tmp0, tmp1; struct rtx_iv iv0, iv1, tmp_iv; rtx assumption, may_not_xform; enum rtx_code cond; enum machine_mode mode, comp_mode; rtx mmin, mmax, mode_mmin, mode_mmax; unsigned HOST_WIDEST_INT s, size, d, inv; HOST_WIDEST_INT up, down, inc; int was_sharp = false; /* The meaning of these assumptions is this: if !assumptions then the rest of information does not have to be valid if noloop_assumptions then the loop does not roll if infinite then this exit is never used */ desc->assumptions = NULL_RTX; desc->noloop_assumptions = NULL_RTX; desc->infinite = NULL_RTX; desc->simple_p = true; desc->const_iter = false; desc->niter_expr = NULL_RTX; desc->niter_max = 0; cond = GET_CODE (condition); if (!COMPARISON_P (condition)) abort (); mode = GET_MODE (XEXP (condition, 0)); if (mode == VOIDmode) mode = GET_MODE (XEXP (condition, 1)); /* The constant comparisons should be folded. */ if (mode == VOIDmode) abort (); /* We only handle integers or pointers. */ if (GET_MODE_CLASS (mode) != MODE_INT && GET_MODE_CLASS (mode) != MODE_PARTIAL_INT) goto fail; op0 = XEXP (condition, 0); def_insn = iv_get_reaching_def (insn, op0); if (!iv_analyze (def_insn, op0, &iv0)) goto fail; if (iv0.extend_mode == VOIDmode) iv0.mode = iv0.extend_mode = mode; op1 = XEXP (condition, 1); def_insn = iv_get_reaching_def (insn, op1); if (!iv_analyze (def_insn, op1, &iv1)) goto fail; if (iv1.extend_mode == VOIDmode) iv1.mode = iv1.extend_mode = mode; if (GET_MODE_BITSIZE (iv0.extend_mode) > HOST_BITS_PER_WIDE_INT || GET_MODE_BITSIZE (iv1.extend_mode) > HOST_BITS_PER_WIDE_INT) goto fail; /* Check condition and normalize it. */ switch (cond) { case GE: case GT: case GEU: case GTU: tmp_iv = iv0; iv0 = iv1; iv1 = tmp_iv; cond = swap_condition (cond); break; case NE: case LE: case LEU: case LT: case LTU: break; default: goto fail; } /* Handle extends. This is relatively nontrivial, so we only try in some easy cases, when we can canonicalize the ivs (possibly by adding some assumptions) to shape subreg (base + i * step). This function also fills in desc->mode and desc->signed_p. */ if (!canonicalize_iv_subregs (&iv0, &iv1, cond, desc)) goto fail; comp_mode = iv0.extend_mode; mode = iv0.mode; size = GET_MODE_BITSIZE (mode); get_mode_bounds (mode, (cond == LE || cond == LT), comp_mode, &mmin, &mmax); mode_mmin = lowpart_subreg (mode, mmin, comp_mode); mode_mmax = lowpart_subreg (mode, mmax, comp_mode); if (GET_CODE (iv0.step) != CONST_INT || GET_CODE (iv1.step) != CONST_INT) goto fail; /* We can take care of the case of two induction variables chasing each other if the test is NE. I have never seen a loop using it, but still it is cool. */ if (iv0.step != const0_rtx && iv1.step != const0_rtx) { if (cond != NE) goto fail; iv0.step = simplify_gen_binary (MINUS, comp_mode, iv0.step, iv1.step); iv1.step = const0_rtx; } /* This is either infinite loop or the one that ends immediately, depending on initial values. Unswitching should remove this kind of conditions. */ if (iv0.step == const0_rtx && iv1.step == const0_rtx) goto fail; /* Ignore loops of while (i-- < 10) type. */ if (cond != NE && (INTVAL (iv0.step) < 0 || INTVAL (iv1.step) > 0)) goto fail; /* Some more condition normalization. We must record some assumptions due to overflows. */ switch (cond) { case LT: case LTU: /* We want to take care only of non-sharp relationals; this is easy, as in cases the overflow would make the transformation unsafe the loop does not roll. Seemingly it would make more sense to want to take care of sharp relationals instead, as NE is more similar to them, but the problem is that here the transformation would be more difficult due to possibly infinite loops. */ if (iv0.step == const0_rtx) { tmp = lowpart_subreg (mode, iv0.base, comp_mode); assumption = simplify_gen_relational (EQ, SImode, mode, tmp, mode_mmax); if (assumption == const_true_rtx) goto zero_iter; iv0.base = simplify_gen_binary (PLUS, comp_mode, iv0.base, const1_rtx); } else { tmp = lowpart_subreg (mode, iv1.base, comp_mode); assumption = simplify_gen_relational (EQ, SImode, mode, tmp, mode_mmin); if (assumption == const_true_rtx) goto zero_iter; iv1.base = simplify_gen_binary (PLUS, comp_mode, iv1.base, constm1_rtx); } if (assumption != const0_rtx) desc->noloop_assumptions = alloc_EXPR_LIST (0, assumption, desc->noloop_assumptions); cond = (cond == LT) ? LE : LEU; /* It will be useful to be able to tell the difference once more in LE -> NE reduction. */ was_sharp = true; break; default: ; } /* Take care of trivially infinite loops. */ if (cond != NE) { if (iv0.step == const0_rtx) { tmp = lowpart_subreg (mode, iv0.base, comp_mode); if (rtx_equal_p (tmp, mode_mmin)) { desc->infinite = alloc_EXPR_LIST (0, const_true_rtx, NULL_RTX); return; } } else { tmp = lowpart_subreg (mode, iv1.base, comp_mode); if (rtx_equal_p (tmp, mode_mmax)) { desc->infinite = alloc_EXPR_LIST (0, const_true_rtx, NULL_RTX); return; } } } /* If we can we want to take care of NE conditions instead of size comparisons, as they are much more friendly (most importantly this takes care of special handling of loops with step 1). We can do it if we first check that upper bound is greater or equal to lower bound, their difference is constant c modulo step and that there is not an overflow. */ if (cond != NE) { if (iv0.step == const0_rtx) step = simplify_gen_unary (NEG, comp_mode, iv1.step, comp_mode); else step = iv0.step; delta = simplify_gen_binary (MINUS, comp_mode, iv1.base, iv0.base); delta = lowpart_subreg (mode, delta, comp_mode); delta = simplify_gen_binary (UMOD, mode, delta, step); may_xform = const0_rtx; may_not_xform = const_true_rtx; if (GET_CODE (delta) == CONST_INT) { if (was_sharp && INTVAL (delta) == INTVAL (step) - 1) { /* A special case. We have transformed condition of type for (i = 0; i < 4; i += 4) into for (i = 0; i <= 3; i += 4) obviously if the test for overflow during that transformation passed, we cannot overflow here. Most importantly any loop with sharp end condition and step 1 falls into this category, so handling this case specially is definitely worth the troubles. */ may_xform = const_true_rtx; } else if (iv0.step == const0_rtx) { bound = simplify_gen_binary (PLUS, comp_mode, mmin, step); bound = simplify_gen_binary (MINUS, comp_mode, bound, delta); bound = lowpart_subreg (mode, bound, comp_mode); tmp = lowpart_subreg (mode, iv0.base, comp_mode); may_xform = simplify_gen_relational (cond, SImode, mode, bound, tmp); may_not_xform = simplify_gen_relational (reverse_condition (cond), SImode, mode, bound, tmp); } else { bound = simplify_gen_binary (MINUS, comp_mode, mmax, step); bound = simplify_gen_binary (PLUS, comp_mode, bound, delta); bound = lowpart_subreg (mode, bound, comp_mode); tmp = lowpart_subreg (mode, iv1.base, comp_mode); may_xform = simplify_gen_relational (cond, SImode, mode, tmp, bound); may_not_xform = simplify_gen_relational (reverse_condition (cond), SImode, mode, tmp, bound); } } if (may_xform != const0_rtx) { /* We perform the transformation always provided that it is not completely senseless. This is OK, as we would need this assumption to determine the number of iterations anyway. */ if (may_xform != const_true_rtx) { /* If the step is a power of two and the final value we have computed overflows, the cycle is infinite. Otherwise it is nontrivial to compute the number of iterations. */ s = INTVAL (step); if ((s & (s - 1)) == 0) desc->infinite = alloc_EXPR_LIST (0, may_not_xform, desc->infinite); else desc->assumptions = alloc_EXPR_LIST (0, may_xform, desc->assumptions); } /* We are going to lose some information about upper bound on number of iterations in this step, so record the information here. */ inc = INTVAL (iv0.step) - INTVAL (iv1.step); if (GET_CODE (iv1.base) == CONST_INT) up = INTVAL (iv1.base); else up = INTVAL (mode_mmax) - inc; down = INTVAL (GET_CODE (iv0.base) == CONST_INT ? iv0.base : mode_mmin); desc->niter_max = (up - down) / inc + 1; if (iv0.step == const0_rtx) { iv0.base = simplify_gen_binary (PLUS, comp_mode, iv0.base, delta); iv0.base = simplify_gen_binary (MINUS, comp_mode, iv0.base, step); } else { iv1.base = simplify_gen_binary (MINUS, comp_mode, iv1.base, delta); iv1.base = simplify_gen_binary (PLUS, comp_mode, iv1.base, step); } tmp0 = lowpart_subreg (mode, iv0.base, comp_mode); tmp1 = lowpart_subreg (mode, iv1.base, comp_mode); assumption = simplify_gen_relational (reverse_condition (cond), SImode, mode, tmp0, tmp1); if (assumption == const_true_rtx) goto zero_iter; else if (assumption != const0_rtx) desc->noloop_assumptions = alloc_EXPR_LIST (0, assumption, desc->noloop_assumptions); cond = NE; } } /* Count the number of iterations. */ if (cond == NE) { /* Everything we do here is just arithmetics modulo size of mode. This makes us able to do more involved computations of number of iterations than in other cases. First transform the condition into shape s * i <> c, with s positive. */ iv1.base = simplify_gen_binary (MINUS, comp_mode, iv1.base, iv0.base); iv0.base = const0_rtx; iv0.step = simplify_gen_binary (MINUS, comp_mode, iv0.step, iv1.step); iv1.step = const0_rtx; if (INTVAL (iv0.step) < 0) { iv0.step = simplify_gen_unary (NEG, comp_mode, iv0.step, mode); iv1.base = simplify_gen_unary (NEG, comp_mode, iv1.base, mode); } iv0.step = lowpart_subreg (mode, iv0.step, comp_mode); /* Let nsd (s, size of mode) = d. If d does not divide c, the loop is infinite. Otherwise, the number of iterations is (inverse(s/d) * (c/d)) mod (size of mode/d). */ s = INTVAL (iv0.step); d = 1; while (s % 2 != 1) { s /= 2; d *= 2; size--; } bound = GEN_INT (((unsigned HOST_WIDEST_INT) 1 << (size - 1 ) << 1) - 1); tmp1 = lowpart_subreg (mode, iv1.base, comp_mode); tmp = simplify_gen_binary (UMOD, mode, tmp1, GEN_INT (d)); assumption = simplify_gen_relational (NE, SImode, mode, tmp, const0_rtx); desc->infinite = alloc_EXPR_LIST (0, assumption, desc->infinite); tmp = simplify_gen_binary (UDIV, mode, tmp1, GEN_INT (d)); inv = inverse (s, size); inv = trunc_int_for_mode (inv, mode); tmp = simplify_gen_binary (MULT, mode, tmp, GEN_INT (inv)); desc->niter_expr = simplify_gen_binary (AND, mode, tmp, bound); } else { if (iv1.step == const0_rtx) /* Condition in shape a + s * i <= b We must know that b + s does not overflow and a <= b + s and then we can compute number of iterations as (b + s - a) / s. (It might seem that we in fact could be more clever about testing the b + s overflow condition using some information about b - a mod s, but it was already taken into account during LE -> NE transform). */ { step = iv0.step; tmp0 = lowpart_subreg (mode, iv0.base, comp_mode); tmp1 = lowpart_subreg (mode, iv1.base, comp_mode); bound = simplify_gen_binary (MINUS, mode, mode_mmax, lowpart_subreg (mode, step, comp_mode)); assumption = simplify_gen_relational (cond, SImode, mode, tmp1, bound); desc->assumptions = alloc_EXPR_LIST (0, assumption, desc->assumptions); tmp = simplify_gen_binary (PLUS, comp_mode, iv1.base, iv0.step); tmp = lowpart_subreg (mode, tmp, comp_mode); assumption = simplify_gen_relational (reverse_condition (cond), SImode, mode, tmp0, tmp); delta = simplify_gen_binary (PLUS, mode, tmp1, step); delta = simplify_gen_binary (MINUS, mode, delta, tmp0); } else { /* Condition in shape a <= b - s * i We must know that a - s does not overflow and a - s <= b and then we can again compute number of iterations as (b - (a - s)) / s. */ step = simplify_gen_unary (NEG, mode, iv1.step, mode); tmp0 = lowpart_subreg (mode, iv0.base, comp_mode); tmp1 = lowpart_subreg (mode, iv1.base, comp_mode); bound = simplify_gen_binary (MINUS, mode, mode_mmin, lowpart_subreg (mode, step, comp_mode)); assumption = simplify_gen_relational (cond, SImode, mode, bound, tmp0); desc->assumptions = alloc_EXPR_LIST (0, assumption, desc->assumptions); tmp = simplify_gen_binary (PLUS, comp_mode, iv0.base, iv1.step); tmp = lowpart_subreg (mode, tmp, comp_mode); assumption = simplify_gen_relational (reverse_condition (cond), SImode, mode, tmp, tmp1); delta = simplify_gen_binary (MINUS, mode, tmp0, step); delta = simplify_gen_binary (MINUS, mode, tmp1, delta); } if (assumption == const_true_rtx) goto zero_iter; else if (assumption != const0_rtx) desc->noloop_assumptions = alloc_EXPR_LIST (0, assumption, desc->noloop_assumptions); delta = simplify_gen_binary (UDIV, mode, delta, step); desc->niter_expr = delta; } simplify_using_initial_values (loop, AND, &desc->assumptions); if (desc->assumptions && XEXP (desc->assumptions, 0) == const0_rtx) goto fail; simplify_using_initial_values (loop, IOR, &desc->noloop_assumptions); simplify_using_initial_values (loop, IOR, &desc->infinite); simplify_using_initial_values (loop, NIL, &desc->niter_expr); /* Rerun the simplification. Consider code (created by copying loop headers) i = 0; if (0 < n) { do { i++; } while (i < n); } The first pass determines that i = 0, the second pass uses it to eliminate noloop assumption. */ simplify_using_initial_values (loop, AND, &desc->assumptions); if (desc->assumptions && XEXP (desc->assumptions, 0) == const0_rtx) goto fail; simplify_using_initial_values (loop, IOR, &desc->noloop_assumptions); simplify_using_initial_values (loop, IOR, &desc->infinite); simplify_using_initial_values (loop, NIL, &desc->niter_expr); if (desc->noloop_assumptions && XEXP (desc->noloop_assumptions, 0) == const_true_rtx) goto zero_iter; if (GET_CODE (desc->niter_expr) == CONST_INT) { unsigned HOST_WIDEST_INT val = INTVAL (desc->niter_expr); desc->const_iter = true; desc->niter_max = desc->niter = val & GET_MODE_MASK (desc->mode); } else if (!desc->niter_max) desc->niter_max = determine_max_iter (desc); return; fail: desc->simple_p = false; return; zero_iter: desc->const_iter = true; desc->niter = 0; desc->niter_max = 0; desc->niter_expr = const0_rtx; return; } /* Checks whether E is a simple exit from LOOP and stores its description into DESC. */ static void check_simple_exit (struct loop *loop, edge e, struct niter_desc *desc) { basic_block exit_bb; rtx condition, at; edge ei; exit_bb = e->src; desc->simple_p = false; /* It must belong directly to the loop. */ if (exit_bb->loop_father != loop) return; /* It must be tested (at least) once during any iteration. */ if (!dominated_by_p (CDI_DOMINATORS, loop->latch, exit_bb)) return; /* It must end in a simple conditional jump. */ if (!any_condjump_p (BB_END (exit_bb))) return; ei = exit_bb->succ; if (ei == e) ei = ei->succ_next; desc->out_edge = e; desc->in_edge = ei; /* Test whether the condition is suitable. */ if (!(condition = get_condition (BB_END (ei->src), &at, false))) return; if (ei->flags & EDGE_FALLTHRU) { condition = reversed_condition (condition); if (!condition) return; } /* Check that we are able to determine number of iterations and fill in information about it. */ iv_number_of_iterations (loop, at, condition, desc); } /* Finds a simple exit of LOOP and stores its description into DESC. */ void find_simple_exit (struct loop *loop, struct niter_desc *desc) { unsigned i; basic_block *body; edge e; struct niter_desc act; bool any = false; desc->simple_p = false; body = get_loop_body (loop); for (i = 0; i < loop->num_nodes; i++) { for (e = body[i]->succ; e; e = e->succ_next) { if (flow_bb_inside_loop_p (loop, e->dest)) continue; check_simple_exit (loop, e, &act); if (!act.simple_p) continue; /* Prefer constant iterations; the less the better. */ if (!any) any = true; else if (!act.const_iter || (desc->const_iter && act.niter >= desc->niter)) continue; *desc = act; } } if (dump_file) { if (desc->simple_p) { fprintf (dump_file, "Loop %d is simple:\n", loop->num); fprintf (dump_file, " simple exit %d -> %d\n", desc->out_edge->src->index, desc->out_edge->dest->index); if (desc->assumptions) { fprintf (dump_file, " assumptions: "); print_rtl (dump_file, desc->assumptions); fprintf (dump_file, "\n"); } if (desc->noloop_assumptions) { fprintf (dump_file, " does not roll if: "); print_rtl (dump_file, desc->noloop_assumptions); fprintf (dump_file, "\n"); } if (desc->infinite) { fprintf (dump_file, " infinite if: "); print_rtl (dump_file, desc->infinite); fprintf (dump_file, "\n"); } fprintf (dump_file, " number of iterations: "); print_rtl (dump_file, desc->niter_expr); fprintf (dump_file, "\n"); fprintf (dump_file, " upper bound: "); fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC, desc->niter_max); fprintf (dump_file, "\n"); } else fprintf (dump_file, "Loop %d is not simple.\n", loop->num); } free (body); } /* Creates a simple loop description of LOOP if it was not computed already. */ struct niter_desc * get_simple_loop_desc (struct loop *loop) { struct niter_desc *desc = simple_loop_desc (loop); if (desc) return desc; desc = xmalloc (sizeof (struct niter_desc)); iv_analysis_loop_init (loop); find_simple_exit (loop, desc); loop->aux = desc; return desc; } /* Releases simple loop description for LOOP. */ void free_simple_loop_desc (struct loop *loop) { struct niter_desc *desc = simple_loop_desc (loop); if (!desc) return; free (desc); loop->aux = NULL; } /* Medium-level subroutines: convert bit-field store and extract and shifts, multiplies and divides to rtl instructions. Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, rtx); static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, rtx); static rtx extract_fixed_bit_field (enum machine_mode, rtx, unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, rtx, int); static rtx mask_rtx (enum machine_mode, int, int, int); static rtx lshift_value (enum machine_mode, rtx, int, int); static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, int); static void do_cmp_and_jump (rtx, rtx, enum rtx_code, enum machine_mode, rtx); static rtx expand_smod_pow2 (enum machine_mode, rtx, HOST_WIDE_INT); /* Nonzero means divides or modulus operations are relatively cheap for powers of two, so don't use branches; emit the operation instead. Usually, this will mean that the MD file will emit non-branch sequences. */ static int sdiv_pow2_cheap[NUM_MACHINE_MODES]; static int smod_pow2_cheap[NUM_MACHINE_MODES]; #ifndef SLOW_UNALIGNED_ACCESS #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT #endif /* For compilers that support multiple targets with different word sizes, MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example is the H8/300(H) compiler. */ #ifndef MAX_BITS_PER_WORD #define MAX_BITS_PER_WORD BITS_PER_WORD #endif /* Reduce conditional compilation elsewhere. */ #ifndef HAVE_insv #define HAVE_insv 0 #define CODE_FOR_insv CODE_FOR_nothing #define gen_insv(a,b,c,d) NULL_RTX #endif #ifndef HAVE_extv #define HAVE_extv 0 #define CODE_FOR_extv CODE_FOR_nothing #define gen_extv(a,b,c,d) NULL_RTX #endif #ifndef HAVE_extzv #define HAVE_extzv 0 #define CODE_FOR_extzv CODE_FOR_nothing #define gen_extzv(a,b,c,d) NULL_RTX #endif /* Cost of various pieces of RTL. Note that some of these are indexed by shift count and some by mode. */ static int zero_cost; static int add_cost[NUM_MACHINE_MODES]; static int neg_cost[NUM_MACHINE_MODES]; static int shift_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD]; static int shiftadd_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD]; static int shiftsub_cost[NUM_MACHINE_MODES][MAX_BITS_PER_WORD]; static int mul_cost[NUM_MACHINE_MODES]; static int div_cost[NUM_MACHINE_MODES]; static int mul_widen_cost[NUM_MACHINE_MODES]; static int mul_highpart_cost[NUM_MACHINE_MODES]; void init_expmed (void) { rtx reg, shift_insn, shiftadd_insn, shiftsub_insn; rtx shift_pat, shiftadd_pat, shiftsub_pat; rtx pow2[MAX_BITS_PER_WORD]; rtx cint[MAX_BITS_PER_WORD]; int dummy; int m, n; enum machine_mode mode, wider_mode; start_sequence (); zero_cost = rtx_cost (const0_rtx, 0); init_recog (); for (m = 1; m < MAX_BITS_PER_WORD; m++) { pow2[m] = GEN_INT ((HOST_WIDE_INT) 1 << m); cint[m] = GEN_INT (m); } for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode)) { reg = gen_rtx_REG (mode, 10000); add_cost[mode] = rtx_cost (gen_rtx_PLUS (mode, reg, reg), SET); neg_cost[mode] = rtx_cost (gen_rtx_NEG (mode, reg), SET); div_cost[mode] = rtx_cost (gen_rtx_UDIV (mode, reg, reg), SET); mul_cost[mode] = rtx_cost (gen_rtx_MULT (mode, reg, reg), SET); sdiv_pow2_cheap[mode] = (rtx_cost (gen_rtx_DIV (mode, reg, GEN_INT (32)), SET) <= 2 * add_cost[mode]); smod_pow2_cheap[mode] = (rtx_cost (gen_rtx_MOD (mode, reg, GEN_INT (32)), SET) <= 2 * add_cost[mode]); wider_mode = GET_MODE_WIDER_MODE (mode); if (wider_mode != VOIDmode) { mul_widen_cost[wider_mode] = rtx_cost (gen_rtx_MULT (wider_mode, gen_rtx_ZERO_EXTEND (wider_mode, reg), gen_rtx_ZERO_EXTEND (wider_mode, reg)), SET); mul_highpart_cost[mode] = rtx_cost (gen_rtx_TRUNCATE (mode, gen_rtx_LSHIFTRT (wider_mode, gen_rtx_MULT (wider_mode, gen_rtx_ZERO_EXTEND (wider_mode, reg), gen_rtx_ZERO_EXTEND (wider_mode, reg)), GEN_INT (GET_MODE_BITSIZE (mode)))), SET); } shift_insn = emit_insn (gen_rtx_SET (VOIDmode, reg, gen_rtx_ASHIFT (mode, reg, const0_rtx))); shiftadd_insn = emit_insn (gen_rtx_SET (VOIDmode, reg, gen_rtx_PLUS (mode, gen_rtx_MULT (mode, reg, const0_rtx), reg))); shiftsub_insn = emit_insn (gen_rtx_SET (VOIDmode, reg, gen_rtx_MINUS (mode, gen_rtx_MULT (mode, reg, const0_rtx), reg))); shift_pat = PATTERN (shift_insn); shiftadd_pat = PATTERN (shiftadd_insn); shiftsub_pat = PATTERN (shiftsub_insn); shift_cost[mode][0] = 0; shiftadd_cost[mode][0] = shiftsub_cost[mode][0] = add_cost[mode]; n = MIN (MAX_BITS_PER_WORD, GET_MODE_BITSIZE (mode)); for (m = 1; m < n; m++) { shift_cost[mode][m] = 32000; XEXP (SET_SRC (shift_pat), 1) = cint[m]; if (recog (shift_pat, shift_insn, &dummy) >= 0) shift_cost[mode][m] = rtx_cost (SET_SRC (shift_pat), SET); shiftadd_cost[mode][m] = 32000; XEXP (XEXP (SET_SRC (shiftadd_pat), 0), 1) = pow2[m]; if (recog (shiftadd_pat, shiftadd_insn, &dummy) >= 0) shiftadd_cost[mode][m] = rtx_cost (SET_SRC (shiftadd_pat), SET); shiftsub_cost[mode][m] = 32000; XEXP (XEXP (SET_SRC (shiftsub_pat), 0), 1) = pow2[m]; if (recog (shiftsub_pat, shiftsub_insn, &dummy) >= 0) shiftsub_cost[mode][m] = rtx_cost (SET_SRC (shiftsub_pat), SET); } } end_sequence (); } /* Return an rtx representing minus the value of X. MODE is the intended mode of the result, useful if X is a CONST_INT. */ rtx negate_rtx (enum machine_mode mode, rtx x) { rtx result = simplify_unary_operation (NEG, mode, x, mode); if (result == 0) result = expand_unop (mode, neg_optab, x, NULL_RTX, 0); return result; } /* Report on the availability of insv/extv/extzv and the desired mode of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo is false; else the mode of the specified operand. If OPNO is -1, all the caller cares about is whether the insn is available. */ enum machine_mode mode_for_extraction (enum extraction_pattern pattern, int opno) { const struct insn_data *data; switch (pattern) { case EP_insv: if (HAVE_insv) { data = &insn_data[CODE_FOR_insv]; break; } return MAX_MACHINE_MODE; case EP_extv: if (HAVE_extv) { data = &insn_data[CODE_FOR_extv]; break; } return MAX_MACHINE_MODE; case EP_extzv: if (HAVE_extzv) { data = &insn_data[CODE_FOR_extzv]; break; } return MAX_MACHINE_MODE; default: abort (); } if (opno == -1) return VOIDmode; /* Everyone who uses this function used to follow it with if (result == VOIDmode) result = word_mode; */ if (data->operand[opno].mode == VOIDmode) return word_mode; return data->operand[opno].mode; } /* Generate code to store value from rtx VALUE into a bit-field within structure STR_RTX containing BITSIZE bits starting at bit BITNUM. FIELDMODE is the machine-mode of the FIELD_DECL node for this field. ALIGN is the alignment that STR_RTX is known to have. TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */ /* ??? Note that there are two different ideas here for how to determine the size to count bits within, for a register. One is BITS_PER_WORD, and the other is the size of operand 3 of the insv pattern. If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD else, we use the mode of operand 3. */ rtx store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode, rtx value, HOST_WIDE_INT total_size) { unsigned int unit = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD; unsigned HOST_WIDE_INT offset = bitnum / unit; unsigned HOST_WIDE_INT bitpos = bitnum % unit; rtx op0 = str_rtx; int byte_offset; enum machine_mode op_mode = mode_for_extraction (EP_insv, 3); /* Discount the part of the structure before the desired byte. We need to know how many bytes are safe to reference after it. */ if (total_size >= 0) total_size -= (bitpos / BIGGEST_ALIGNMENT * (BIGGEST_ALIGNMENT / BITS_PER_UNIT)); while (GET_CODE (op0) == SUBREG) { /* The following line once was done only if WORDS_BIG_ENDIAN, but I think that is a mistake. WORDS_BIG_ENDIAN is meaningful at a much higher level; when structures are copied between memory and regs, the higher-numbered regs always get higher addresses. */ offset += (SUBREG_BYTE (op0) / UNITS_PER_WORD); /* We used to adjust BITPOS here, but now we do the whole adjustment right after the loop. */ op0 = SUBREG_REG (op0); } value = protect_from_queue (value, 0); /* Use vec_extract patterns for extracting parts of vectors whenever available. */ if (VECTOR_MODE_P (GET_MODE (op0)) && !MEM_P (op0) && (vec_set_optab->handlers[GET_MODE (op0)].insn_code != CODE_FOR_nothing) && fieldmode == GET_MODE_INNER (GET_MODE (op0)) && bitsize == GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0))) && !(bitnum % GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0))))) { enum machine_mode outermode = GET_MODE (op0); enum machine_mode innermode = GET_MODE_INNER (outermode); int icode = (int) vec_set_optab->handlers[outermode].insn_code; int pos = bitnum / GET_MODE_BITSIZE (innermode); rtx rtxpos = GEN_INT (pos); rtx src = value; rtx dest = op0; rtx pat, seq; enum machine_mode mode0 = insn_data[icode].operand[0].mode; enum machine_mode mode1 = insn_data[icode].operand[1].mode; enum machine_mode mode2 = insn_data[icode].operand[2].mode; start_sequence (); if (! (*insn_data[icode].operand[1].predicate) (src, mode1)) src = copy_to_mode_reg (mode1, src); if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2)) rtxpos = copy_to_mode_reg (mode1, rtxpos); /* We could handle this, but we should always be called with a pseudo for our targets and all insns should take them as outputs. */ if (! (*insn_data[icode].operand[0].predicate) (dest, mode0) || ! (*insn_data[icode].operand[1].predicate) (src, mode1) || ! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2)) abort (); pat = GEN_FCN (icode) (dest, src, rtxpos); seq = get_insns (); end_sequence (); if (pat) { emit_insn (seq); emit_insn (pat); return dest; } } if (flag_force_mem) { int old_generating_concat_p = generating_concat_p; generating_concat_p = 0; value = force_not_mem (value); generating_concat_p = old_generating_concat_p; } /* If the target is a register, overwriting the entire object, or storing a full-word or multi-word field can be done with just a SUBREG. If the target is memory, storing any naturally aligned field can be done with a simple store. For targets that support fast unaligned memory, any naturally sized, unit aligned field can be done directly. */ byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT + (offset * UNITS_PER_WORD); if (bitpos == 0 && bitsize == GET_MODE_BITSIZE (fieldmode) && (!MEM_P (op0) ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode)) && byte_offset % GET_MODE_SIZE (fieldmode) == 0) : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0)) || (offset * BITS_PER_UNIT % bitsize == 0 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0)))) { if (GET_MODE (op0) != fieldmode) { if (GET_CODE (op0) == SUBREG) { if (GET_MODE (SUBREG_REG (op0)) == fieldmode || GET_MODE_CLASS (fieldmode) == MODE_INT || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT) op0 = SUBREG_REG (op0); else /* Else we've got some float mode source being extracted into a different float mode destination -- this combination of subregs results in Severe Tire Damage. */ abort (); } if (REG_P (op0)) op0 = gen_rtx_SUBREG (fieldmode, op0, byte_offset); else op0 = adjust_address (op0, fieldmode, offset); } emit_move_insn (op0, value); return value; } /* Make sure we are playing with integral modes. Pun with subregs if we aren't. This must come after the entire register case above, since that case is valid for any mode. The following cases are only valid for integral modes. */ { enum machine_mode imode = int_mode_for_mode (GET_MODE (op0)); if (imode != GET_MODE (op0)) { if (MEM_P (op0)) op0 = adjust_address (op0, imode, 0); else if (imode != BLKmode) op0 = gen_lowpart (imode, op0); else abort (); } } /* We may be accessing data outside the field, which means we can alias adjacent data. */ if (MEM_P (op0)) { op0 = shallow_copy_rtx (op0); set_mem_alias_set (op0, 0); set_mem_expr (op0, 0); } /* If OP0 is a register, BITPOS must count within a word. But as we have it, it counts within whatever size OP0 now has. On a bigendian machine, these are not the same, so convert. */ if (BYTES_BIG_ENDIAN && !MEM_P (op0) && unit > GET_MODE_BITSIZE (GET_MODE (op0))) bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0)); /* Storing an lsb-aligned field in a register can be done with a movestrict instruction. */ if (!MEM_P (op0) && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0) && bitsize == GET_MODE_BITSIZE (fieldmode) && (movstrict_optab->handlers[fieldmode].insn_code != CODE_FOR_nothing)) { int icode = movstrict_optab->handlers[fieldmode].insn_code; /* Get appropriate low part of the value being stored. */ if (GET_CODE (value) == CONST_INT || REG_P (value)) value = gen_lowpart (fieldmode, value); else if (!(GET_CODE (value) == SYMBOL_REF || GET_CODE (value) == LABEL_REF || GET_CODE (value) == CONST)) value = convert_to_mode (fieldmode, value, 0); if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode)) value = copy_to_mode_reg (fieldmode, value); if (GET_CODE (op0) == SUBREG) { if (GET_MODE (SUBREG_REG (op0)) == fieldmode || GET_MODE_CLASS (fieldmode) == MODE_INT || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT) op0 = SUBREG_REG (op0); else /* Else we've got some float mode source being extracted into a different float mode destination -- this combination of subregs results in Severe Tire Damage. */ abort (); } emit_insn (GEN_FCN (icode) (gen_rtx_SUBREG (fieldmode, op0, (bitnum % BITS_PER_WORD) / BITS_PER_UNIT + (offset * UNITS_PER_WORD)), value)); return value; } /* Handle fields bigger than a word. */ if (bitsize > BITS_PER_WORD) { /* Here we transfer the words of the field in the order least significant first. This is because the most significant word is the one which may be less than full. However, only do that if the value is not BLKmode. */ unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode; unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD; unsigned int i; /* This is the mode we must force value to, so that there will be enough subwords to extract. Note that fieldmode will often (always?) be VOIDmode, because that is what store_field uses to indicate that this is a bit field, but passing VOIDmode to operand_subword_force will result in an abort. */ fieldmode = GET_MODE (value); if (fieldmode == VOIDmode) fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT); for (i = 0; i < nwords; i++) { /* If I is 0, use the low-order word in both field and target; if I is 1, use the next to lowest word; and so on. */ unsigned int wordnum = (backwards ? nwords - i - 1 : i); unsigned int bit_offset = (backwards ? MAX ((int) bitsize - ((int) i + 1) * BITS_PER_WORD, 0) : (int) i * BITS_PER_WORD); store_bit_field (op0, MIN (BITS_PER_WORD, bitsize - i * BITS_PER_WORD), bitnum + bit_offset, word_mode, operand_subword_force (value, wordnum, fieldmode), total_size); } return value; } /* From here on we can assume that the field to be stored in is a full-word (whatever type that is), since it is shorter than a word. */ /* OFFSET is the number of words or bytes (UNIT says which) from STR_RTX to the first word or byte containing part of the field. */ if (!MEM_P (op0)) { if (offset != 0 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD) { if (!REG_P (op0)) { /* Since this is a destination (lvalue), we can't copy it to a pseudo. We can trivially remove a SUBREG that does not change the size of the operand. Such a SUBREG may have been added above. Otherwise, abort. */ if (GET_CODE (op0) == SUBREG && (GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0))))) op0 = SUBREG_REG (op0); else abort (); } op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0), op0, (offset * UNITS_PER_WORD)); } offset = 0; } else op0 = protect_from_queue (op0, 1); /* If VALUE is a floating-point mode, access it as an integer of the corresponding size. This can occur on a machine with 64 bit registers that uses SFmode for float. This can also occur for unaligned float structure fields. */ if (GET_MODE_CLASS (GET_MODE (value)) != MODE_INT && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT) value = gen_lowpart ((GET_MODE (value) == VOIDmode ? word_mode : int_mode_for_mode (GET_MODE (value))), value); /* Now OFFSET is nonzero only if OP0 is memory and is therefore always measured in bytes. */ if (HAVE_insv && GET_MODE (value) != BLKmode && !(bitsize == 1 && GET_CODE (value) == CONST_INT) /* Ensure insv's size is wide enough for this field. */ && (GET_MODE_BITSIZE (op_mode) >= bitsize) && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG) && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode)))) { int xbitpos = bitpos; rtx value1; rtx xop0 = op0; rtx last = get_last_insn (); rtx pat; enum machine_mode maxmode = mode_for_extraction (EP_insv, 3); int save_volatile_ok = volatile_ok; volatile_ok = 1; /* If this machine's insv can only insert into a register, copy OP0 into a register and save it back later. */ /* This used to check flag_force_mem, but that was a serious de-optimization now that flag_force_mem is enabled by -O2. */ if (MEM_P (op0) && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate) (op0, VOIDmode))) { rtx tempreg; enum machine_mode bestmode; /* Get the mode to use for inserting into this field. If OP0 is BLKmode, get the smallest mode consistent with the alignment. If OP0 is a non-BLKmode object that is no wider than MAXMODE, use its mode. Otherwise, use the smallest mode containing the field. */ if (GET_MODE (op0) == BLKmode || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode)) bestmode = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0), maxmode, MEM_VOLATILE_P (op0)); else bestmode = GET_MODE (op0); if (bestmode == VOIDmode || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0)) && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0))) goto insv_loses; /* Adjust address to point to the containing unit of that mode. Compute offset as multiple of this unit, counting in bytes. */ unit = GET_MODE_BITSIZE (bestmode); offset = (bitnum / unit) * GET_MODE_SIZE (bestmode); bitpos = bitnum % unit; op0 = adjust_address (op0, bestmode, offset); /* Fetch that unit, store the bitfield in it, then store the unit. */ tempreg = copy_to_reg (op0); store_bit_field (tempreg, bitsize, bitpos, fieldmode, value, total_size); emit_move_insn (op0, tempreg); return value; } volatile_ok = save_volatile_ok; /* Add OFFSET into OP0's address. */ if (MEM_P (xop0)) xop0 = adjust_address (xop0, byte_mode, offset); /* If xop0 is a register, we need it in MAXMODE to make it acceptable to the format of insv. */ if (GET_CODE (xop0) == SUBREG) /* We can't just change the mode, because this might clobber op0, and we will need the original value of op0 if insv fails. */ xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_BYTE (xop0)); if (REG_P (xop0) && GET_MODE (xop0) != maxmode) xop0 = gen_rtx_SUBREG (maxmode, xop0, 0); /* On big-endian machines, we count bits from the most significant. If the bit field insn does not, we must invert. */ if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN) xbitpos = unit - bitsize - xbitpos; /* We have been counting XBITPOS within UNIT. Count instead within the size of the register. */ if (BITS_BIG_ENDIAN && !MEM_P (xop0)) xbitpos += GET_MODE_BITSIZE (maxmode) - unit; unit = GET_MODE_BITSIZE (maxmode); /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */ value1 = value; if (GET_MODE (value) != maxmode) { if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize) { /* Optimization: Don't bother really extending VALUE if it has all the bits we will actually use. However, if we must narrow it, be sure we do it correctly. */ if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode)) { rtx tmp; tmp = simplify_subreg (maxmode, value1, GET_MODE (value), 0); if (! tmp) tmp = simplify_gen_subreg (maxmode, force_reg (GET_MODE (value), value1), GET_MODE (value), 0); value1 = tmp; } else value1 = gen_lowpart (maxmode, value1); } else if (GET_CODE (value) == CONST_INT) value1 = gen_int_mode (INTVAL (value), maxmode); else if (!CONSTANT_P (value)) /* Parse phase is supposed to make VALUE's data type match that of the component reference, which is a type at least as wide as the field; so VALUE should have a mode that corresponds to that type. */ abort (); } /* If this machine's insv insists on a register, get VALUE1 into a register. */ if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate) (value1, maxmode))) value1 = force_reg (maxmode, value1); pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1); if (pat) emit_insn (pat); else { delete_insns_since (last); store_fixed_bit_field (op0, offset, bitsize, bitpos, value); } } else insv_loses: /* Insv is not available; store using shifts and boolean ops. */ store_fixed_bit_field (op0, offset, bitsize, bitpos, value); return value; } /* Use shifts and boolean operations to store VALUE into a bit field of width BITSIZE in a memory location specified by OP0 except offset by OFFSET bytes. (OFFSET must be 0 if OP0 is a register.) The field starts at position BITPOS within the byte. (If OP0 is a register, it may be a full word or a narrower mode, but BITPOS still counts within a full word, which is significant on bigendian machines.) Note that protect_from_queue has already been done on OP0 and VALUE. */ static void store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT offset, unsigned HOST_WIDE_INT bitsize, unsigned HOST_WIDE_INT bitpos, rtx value) { enum machine_mode mode; unsigned int total_bits = BITS_PER_WORD; rtx subtarget, temp; int all_zero = 0; int all_one = 0; /* There is a case not handled here: a structure with a known alignment of just a halfword and a field split across two aligned halfwords within the structure. Or likewise a structure with a known alignment of just a byte and a field split across two bytes. Such cases are not supposed to be able to occur. */ if (REG_P (op0) || GET_CODE (op0) == SUBREG) { if (offset != 0) abort (); /* Special treatment for a bit field split across two registers. */ if (bitsize + bitpos > BITS_PER_WORD) { store_split_bit_field (op0, bitsize, bitpos, value); return; } } else { /* Get the proper mode to use for this field. We want a mode that includes the entire field. If such a mode would be larger than a word, we won't be doing the extraction the normal way. We don't want a mode bigger than the destination. */ mode = GET_MODE (op0); if (GET_MODE_BITSIZE (mode) == 0 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode)) mode = word_mode; mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT, MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0)); if (mode == VOIDmode) { /* The only way this should occur is if the field spans word boundaries. */ store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT, value); return; } total_bits = GET_MODE_BITSIZE (mode); /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to be in the range 0 to total_bits-1, and put any excess bytes in OFFSET. */ if (bitpos >= total_bits) { offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT); bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT) * BITS_PER_UNIT); } /* Get ref to an aligned byte, halfword, or word containing the field. Adjust BITPOS to be position within a word, and OFFSET to be the offset of that word. Then alter OP0 to refer to that word. */ bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT; offset -= (offset % (total_bits / BITS_PER_UNIT)); op0 = adjust_address (op0, mode, offset); } mode = GET_MODE (op0); /* Now MODE is either some integral mode for a MEM as OP0, or is a full-word for a REG as OP0. TOTAL_BITS corresponds. The bit field is contained entirely within OP0. BITPOS is the starting bit number within OP0. (OP0's mode may actually be narrower than MODE.) */ if (BYTES_BIG_ENDIAN) /* BITPOS is the distance between our msb and that of the containing datum. Convert it to the distance from the lsb. */ bitpos = total_bits - bitsize - bitpos; /* Now BITPOS is always the distance between our lsb and that of OP0. */ /* Shift VALUE left by BITPOS bits. If VALUE is not constant, we must first convert its mode to MODE. */ if (GET_CODE (value) == CONST_INT) { HOST_WIDE_INT v = INTVAL (value); if (bitsize < HOST_BITS_PER_WIDE_INT) v &= ((HOST_WIDE_INT) 1 << bitsize) - 1; if (v == 0) all_zero = 1; else if ((bitsize < HOST_BITS_PER_WIDE_INT && v == ((HOST_WIDE_INT) 1 << bitsize) - 1) || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1)) all_one = 1; value = lshift_value (mode, value, bitpos, bitsize); } else { int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize && bitpos + bitsize != GET_MODE_BITSIZE (mode)); if (GET_MODE (value) != mode) { if ((REG_P (value) || GET_CODE (value) == SUBREG) && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value))) value = gen_lowpart (mode, value); else value = convert_to_mode (mode, value, 1); } if (must_and) value = expand_binop (mode, and_optab, value, mask_rtx (mode, 0, bitsize, 0), NULL_RTX, 1, OPTAB_LIB_WIDEN); if (bitpos > 0) value = expand_shift (LSHIFT_EXPR, mode, value, build_int_2 (bitpos, 0), NULL_RTX, 1); } /* Now clear the chosen bits in OP0, except that if VALUE is -1 we need not bother. */ subtarget = (REG_P (op0) || ! flag_force_mem) ? op0 : 0; if (! all_one) { temp = expand_binop (mode, and_optab, op0, mask_rtx (mode, bitpos, bitsize, 1), subtarget, 1, OPTAB_LIB_WIDEN); subtarget = temp; } else temp = op0; /* Now logical-or VALUE into OP0, unless it is zero. */ if (! all_zero) temp = expand_binop (mode, ior_optab, temp, value, subtarget, 1, OPTAB_LIB_WIDEN); if (op0 != temp) emit_move_insn (op0, temp); } /* Store a bit field that is split across multiple accessible memory objects. OP0 is the REG, SUBREG or MEM rtx for the first of the objects. BITSIZE is the field width; BITPOS the position of its first bit (within the word). VALUE is the value to store. This does not yet handle fields wider than BITS_PER_WORD. */ static void store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize, unsigned HOST_WIDE_INT bitpos, rtx value) { unsigned int unit; unsigned int bitsdone = 0; /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that much at a time. */ if (REG_P (op0) || GET_CODE (op0) == SUBREG) unit = BITS_PER_WORD; else unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD); /* If VALUE is a constant other than a CONST_INT, get it into a register in WORD_MODE. If we can do this using gen_lowpart_common, do so. Note that VALUE might be a floating-point constant. */ if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT) { rtx word = gen_lowpart_common (word_mode, value); if (word && (value != word)) value = word; else value = gen_lowpart_common (word_mode, force_reg (GET_MODE (value) != VOIDmode ? GET_MODE (value) : word_mode, value)); } while (bitsdone < bitsize) { unsigned HOST_WIDE_INT thissize; rtx part, word; unsigned HOST_WIDE_INT thispos; unsigned HOST_WIDE_INT offset; offset = (bitpos + bitsdone) / unit; thispos = (bitpos + bitsdone) % unit; /* THISSIZE must not overrun a word boundary. Otherwise, store_fixed_bit_field will call us again, and we will mutually recurse forever. */ thissize = MIN (bitsize - bitsdone, BITS_PER_WORD); thissize = MIN (thissize, unit - thispos); if (BYTES_BIG_ENDIAN) { int total_bits; /* We must do an endian conversion exactly the same way as it is done in extract_bit_field, so that the two calls to extract_fixed_bit_field will have comparable arguments. */ if (!MEM_P (value) || GET_MODE (value) == BLKmode) total_bits = BITS_PER_WORD; else total_bits = GET_MODE_BITSIZE (GET_MODE (value)); /* Fetch successively less significant portions. */ if (GET_CODE (value) == CONST_INT) part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value)) >> (bitsize - bitsdone - thissize)) & (((HOST_WIDE_INT) 1 << thissize) - 1)); else /* The args are chosen so that the last part includes the lsb. Give extract_bit_field the value it needs (with endianness compensation) to fetch the piece we want. */ part = extract_fixed_bit_field (word_mode, value, 0, thissize, total_bits - bitsize + bitsdone, NULL_RTX, 1); } else { /* Fetch successively more significant portions. */ if (GET_CODE (value) == CONST_INT) part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value)) >> bitsdone) & (((HOST_WIDE_INT) 1 << thissize) - 1)); else part = extract_fixed_bit_field (word_mode, value, 0, thissize, bitsdone, NULL_RTX, 1); } /* If OP0 is a register, then handle OFFSET here. When handling multiword bitfields, extract_bit_field may pass down a word_mode SUBREG of a larger REG for a bitfield that actually crosses a word boundary. Thus, for a SUBREG, we must find the current word starting from the base register. */ if (GET_CODE (op0) == SUBREG) { int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset; word = operand_subword_force (SUBREG_REG (op0), word_offset, GET_MODE (SUBREG_REG (op0))); offset = 0; } else if (REG_P (op0)) { word = operand_subword_force (op0, offset, GET_MODE (op0)); offset = 0; } else word = op0; /* OFFSET is in UNITs, and UNIT is in bits. store_fixed_bit_field wants offset in bytes. */ store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize, thispos, part); bitsdone += thissize; } } /* Generate code to extract a byte-field from STR_RTX containing BITSIZE bits, starting at BITNUM, and put it in TARGET if possible (if TARGET is nonzero). Regardless of TARGET, we return the rtx for where the value is placed. It may be a QUEUED. STR_RTX is the structure containing the byte (a REG or MEM). UNSIGNEDP is nonzero if this is an unsigned bit field. MODE is the natural mode of the field value once extracted. TMODE is the mode the caller would like the value to have; but the value may be returned with type MODE instead. TOTAL_SIZE is the size in bytes of the containing structure, or -1 if varying. If a TARGET is specified and we can store in it at no extra cost, we do so, and return TARGET. Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred if they are equally easy. */ rtx extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target, enum machine_mode mode, enum machine_mode tmode, HOST_WIDE_INT total_size) { unsigned int unit = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD; unsigned HOST_WIDE_INT offset = bitnum / unit; unsigned HOST_WIDE_INT bitpos = bitnum % unit; rtx op0 = str_rtx; rtx spec_target = target; rtx spec_target_subreg = 0; enum machine_mode int_mode; enum machine_mode extv_mode = mode_for_extraction (EP_extv, 0); enum machine_mode extzv_mode = mode_for_extraction (EP_extzv, 0); enum machine_mode mode1; int byte_offset; /* Discount the part of the structure before the desired byte. We need to know how many bytes are safe to reference after it. */ if (total_size >= 0) total_size -= (bitpos / BIGGEST_ALIGNMENT * (BIGGEST_ALIGNMENT / BITS_PER_UNIT)); if (tmode == VOIDmode) tmode = mode; while (GET_CODE (op0) == SUBREG) { bitpos += SUBREG_BYTE (op0) * BITS_PER_UNIT; if (bitpos > unit) { offset += (bitpos / unit); bitpos %= unit; } op0 = SUBREG_REG (op0); } if (REG_P (op0) && mode == GET_MODE (op0) && bitnum == 0 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0))) { /* We're trying to extract a full register from itself. */ return op0; } /* Use vec_extract patterns for extracting parts of vectors whenever available. */ if (VECTOR_MODE_P (GET_MODE (op0)) && !MEM_P (op0) && (vec_extract_optab->handlers[GET_MODE (op0)].insn_code != CODE_FOR_nothing) && ((bitsize + bitnum) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0))) == bitsize / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0))))) { enum machine_mode outermode = GET_MODE (op0); enum machine_mode innermode = GET_MODE_INNER (outermode); int icode = (int) vec_extract_optab->handlers[outermode].insn_code; int pos = bitnum / GET_MODE_BITSIZE (innermode); rtx rtxpos = GEN_INT (pos); rtx src = op0; rtx dest = NULL, pat, seq; enum machine_mode mode0 = insn_data[icode].operand[0].mode; enum machine_mode mode1 = insn_data[icode].operand[1].mode; enum machine_mode mode2 = insn_data[icode].operand[2].mode; if (innermode == tmode || innermode == mode) dest = target; if (!dest) dest = gen_reg_rtx (innermode); start_sequence (); if (! (*insn_data[icode].operand[0].predicate) (dest, mode0)) dest = copy_to_mode_reg (mode0, dest); if (! (*insn_data[icode].operand[1].predicate) (src, mode1)) src = copy_to_mode_reg (mode1, src); if (! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2)) rtxpos = copy_to_mode_reg (mode1, rtxpos); /* We could handle this, but we should always be called with a pseudo for our targets and all insns should take them as outputs. */ if (! (*insn_data[icode].operand[0].predicate) (dest, mode0) || ! (*insn_data[icode].operand[1].predicate) (src, mode1) || ! (*insn_data[icode].operand[2].predicate) (rtxpos, mode2)) abort (); pat = GEN_FCN (icode) (dest, src, rtxpos); seq = get_insns (); end_sequence (); if (pat) { emit_insn (seq); emit_insn (pat); return dest; } } /* Make sure we are playing with integral modes. Pun with subregs if we aren't. */ { enum machine_mode imode = int_mode_for_mode (GET_MODE (op0)); if (imode != GET_MODE (op0)) { if (MEM_P (op0)) op0 = adjust_address (op0, imode, 0); else if (imode != BLKmode) op0 = gen_lowpart (imode, op0); else abort (); } } /* We may be accessing data outside the field, which means we can alias adjacent data. */ if (MEM_P (op0)) { op0 = shallow_copy_rtx (op0); set_mem_alias_set (op0, 0); set_mem_expr (op0, 0); } /* Extraction of a full-word or multi-word value from a structure in a register or aligned memory can be done with just a SUBREG. A subword value in the least significant part of a register can also be extracted with a SUBREG. For this, we need the byte offset of the value in op0. */ byte_offset = bitpos / BITS_PER_UNIT + offset * UNITS_PER_WORD; /* If OP0 is a register, BITPOS must count within a word. But as we have it, it counts within whatever size OP0 now has. On a bigendian machine, these are not the same, so convert. */ if (BYTES_BIG_ENDIAN && !MEM_P (op0) && unit > GET_MODE_BITSIZE (GET_MODE (op0))) bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0)); /* ??? We currently assume TARGET is at least as big as BITSIZE. If that's wrong, the solution is to test for it and set TARGET to 0 if needed. */ /* Only scalar integer modes can be converted via subregs. There is an additional problem for FP modes here in that they can have a precision which is different from the size. mode_for_size uses precision, but we want a mode based on the size, so we must avoid calling it for FP modes. */ mode1 = (SCALAR_INT_MODE_P (tmode) ? mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0) : mode); if (((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode) && bitpos % BITS_PER_WORD == 0) || (mode1 != BLKmode /* ??? The big endian test here is wrong. This is correct if the value is in a register, and if mode_for_size is not the same mode as op0. This causes us to get unnecessarily inefficient code from the Thumb port when -mbig-endian. */ && (BYTES_BIG_ENDIAN ? bitpos + bitsize == BITS_PER_WORD : bitpos == 0))) && ((!MEM_P (op0) && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode), GET_MODE_BITSIZE (GET_MODE (op0))) && GET_MODE_SIZE (mode1) != 0 && byte_offset % GET_MODE_SIZE (mode1) == 0) || (MEM_P (op0) && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0)) || (offset * BITS_PER_UNIT % bitsize == 0 && MEM_ALIGN (op0) % bitsize == 0))))) { if (mode1 != GET_MODE (op0)) { if (GET_CODE (op0) == SUBREG) { if (GET_MODE (SUBREG_REG (op0)) == mode1 || GET_MODE_CLASS (mode1) == MODE_INT || GET_MODE_CLASS (mode1) == MODE_PARTIAL_INT) op0 = SUBREG_REG (op0); else /* Else we've got some float mode source being extracted into a different float mode destination -- this combination of subregs results in Severe Tire Damage. */ goto no_subreg_mode_swap; } if (REG_P (op0)) op0 = gen_rtx_SUBREG (mode1, op0, byte_offset); else op0 = adjust_address (op0, mode1, offset); } if (mode1 != mode) return convert_to_mode (tmode, op0, unsignedp); return op0; } no_subreg_mode_swap: /* Handle fields bigger than a word. */ if (bitsize > BITS_PER_WORD) { /* Here we transfer the words of the field in the order least significant first. This is because the most significant word is the one which may be less than full. */ unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD; unsigned int i; if (target == 0 || !REG_P (target)) target = gen_reg_rtx (mode); /* Indicate for flow that the entire target reg is being set. */ emit_insn (gen_rtx_CLOBBER (VOIDmode, target)); for (i = 0; i < nwords; i++) { /* If I is 0, use the low-order word in both field and target; if I is 1, use the next to lowest word; and so on. */ /* Word number in TARGET to use. */ unsigned int wordnum = (WORDS_BIG_ENDIAN ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1 : i); /* Offset from start of field in OP0. */ unsigned int bit_offset = (WORDS_BIG_ENDIAN ? MAX (0, ((int) bitsize - ((int) i + 1) * (int) BITS_PER_WORD)) : (int) i * BITS_PER_WORD); rtx target_part = operand_subword (target, wordnum, 1, VOIDmode); rtx result_part = extract_bit_field (op0, MIN (BITS_PER_WORD, bitsize - i * BITS_PER_WORD), bitnum + bit_offset, 1, target_part, mode, word_mode, total_size); if (target_part == 0) abort (); if (result_part != target_part) emit_move_insn (target_part, result_part); } if (unsignedp) { /* Unless we've filled TARGET, the upper regs in a multi-reg value need to be zero'd out. */ if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD) { unsigned int i, total_words; total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD; for (i = nwords; i < total_words; i++) emit_move_insn (operand_subword (target, WORDS_BIG_ENDIAN ? total_words - i - 1 : i, 1, VOIDmode), const0_rtx); } return target; } /* Signed bit field: sign-extend with two arithmetic shifts. */ target = expand_shift (LSHIFT_EXPR, mode, target, build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0), NULL_RTX, 0); return expand_shift (RSHIFT_EXPR, mode, target, build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0), NULL_RTX, 0); } /* From here on we know the desired field is smaller than a word. */ /* Check if there is a correspondingly-sized integer field, so we can safely extract it as one size of integer, if necessary; then truncate or extend to the size that is wanted; then use SUBREGs or convert_to_mode to get one of the modes we really wanted. */ int_mode = int_mode_for_mode (tmode); if (int_mode == BLKmode) int_mode = int_mode_for_mode (mode); if (int_mode == BLKmode) abort (); /* Should probably push op0 out to memory and then do a load. */ /* OFFSET is the number of words or bytes (UNIT says which) from STR_RTX to the first word or byte containing part of the field. */ if (!MEM_P (op0)) { if (offset != 0 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD) { if (!REG_P (op0)) op0 = copy_to_reg (op0); op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0), op0, (offset * UNITS_PER_WORD)); } offset = 0; } else op0 = protect_from_queue (str_rtx, 1); /* Now OFFSET is nonzero only for memory operands. */ if (unsignedp) { if (HAVE_extzv && (GET_MODE_BITSIZE (extzv_mode) >= bitsize) && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG) && (bitsize + bitpos > GET_MODE_BITSIZE (extzv_mode)))) { unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset; rtx bitsize_rtx, bitpos_rtx; rtx last = get_last_insn (); rtx xop0 = op0; rtx xtarget = target; rtx xspec_target = spec_target; rtx xspec_target_subreg = spec_target_subreg; rtx pat; enum machine_mode maxmode = mode_for_extraction (EP_extzv, 0); if (MEM_P (xop0)) { int save_volatile_ok = volatile_ok; volatile_ok = 1; /* Is the memory operand acceptable? */ if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate) (xop0, GET_MODE (xop0)))) { /* No, load into a reg and extract from there. */ enum machine_mode bestmode; /* Get the mode to use for inserting into this field. If OP0 is BLKmode, get the smallest mode consistent with the alignment. If OP0 is a non-BLKmode object that is no wider than MAXMODE, use its mode. Otherwise, use the smallest mode containing the field. */ if (GET_MODE (xop0) == BLKmode || (GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))) bestmode = get_best_mode (bitsize, bitnum, MEM_ALIGN (xop0), maxmode, MEM_VOLATILE_P (xop0)); else bestmode = GET_MODE (xop0); if (bestmode == VOIDmode || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0)) && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0))) goto extzv_loses; /* Compute offset as multiple of this unit, counting in bytes. */ unit = GET_MODE_BITSIZE (bestmode); xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode); xbitpos = bitnum % unit; xop0 = adjust_address (xop0, bestmode, xoffset); /* Fetch it to a register in that size. */ xop0 = force_reg (bestmode, xop0); /* XBITPOS counts within UNIT, which is what is expected. */ } else /* Get ref to first byte containing part of the field. */ xop0 = adjust_address (xop0, byte_mode, xoffset); volatile_ok = save_volatile_ok; } /* If op0 is a register, we need it in MAXMODE (which is usually SImode). to make it acceptable to the format of extzv. */ if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode) goto extzv_loses; if (REG_P (xop0) && GET_MODE (xop0) != maxmode) xop0 = gen_rtx_SUBREG (maxmode, xop0, 0); /* On big-endian machines, we count bits from the most significant. If the bit field insn does not, we must invert. */ if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN) xbitpos = unit - bitsize - xbitpos; /* Now convert from counting within UNIT to counting in MAXMODE. */ if (BITS_BIG_ENDIAN && !MEM_P (xop0)) xbitpos += GET_MODE_BITSIZE (maxmode) - unit; unit = GET_MODE_BITSIZE (maxmode); if (xtarget == 0 || (flag_force_mem && MEM_P (xtarget))) xtarget = xspec_target = gen_reg_rtx (tmode); if (GET_MODE (xtarget) != maxmode) { if (REG_P (xtarget)) { int wider = (GET_MODE_SIZE (maxmode) > GET_MODE_SIZE (GET_MODE (xtarget))); xtarget = gen_lowpart (maxmode, xtarget); if (wider) xspec_target_subreg = xtarget; } else xtarget = gen_reg_rtx (maxmode); } /* If this machine's extzv insists on a register target, make sure we have one. */ if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate) (xtarget, maxmode))) xtarget = gen_reg_rtx (maxmode); bitsize_rtx = GEN_INT (bitsize); bitpos_rtx = GEN_INT (xbitpos); pat = gen_extzv (protect_from_queue (xtarget, 1), xop0, bitsize_rtx, bitpos_rtx); if (pat) { emit_insn (pat); target = xtarget; spec_target = xspec_target; spec_target_subreg = xspec_target_subreg; } else { delete_insns_since (last); target = extract_fixed_bit_field (int_mode, op0, offset, bitsize, bitpos, target, 1); } } else extzv_loses: target = extract_fixed_bit_field (int_mode, op0, offset, bitsize, bitpos, target, 1); } else { if (HAVE_extv && (GET_MODE_BITSIZE (extv_mode) >= bitsize) && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG) && (bitsize + bitpos > GET_MODE_BITSIZE (extv_mode)))) { int xbitpos = bitpos, xoffset = offset; rtx bitsize_rtx, bitpos_rtx; rtx last = get_last_insn (); rtx xop0 = op0, xtarget = target; rtx xspec_target = spec_target; rtx xspec_target_subreg = spec_target_subreg; rtx pat; enum machine_mode maxmode = mode_for_extraction (EP_extv, 0); if (MEM_P (xop0)) { /* Is the memory operand acceptable? */ if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate) (xop0, GET_MODE (xop0)))) { /* No, load into a reg and extract from there. */ enum machine_mode bestmode; /* Get the mode to use for inserting into this field. If OP0 is BLKmode, get the smallest mode consistent with the alignment. If OP0 is a non-BLKmode object that is no wider than MAXMODE, use its mode. Otherwise, use the smallest mode containing the field. */ if (GET_MODE (xop0) == BLKmode || (GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))) bestmode = get_best_mode (bitsize, bitnum, MEM_ALIGN (xop0), maxmode, MEM_VOLATILE_P (xop0)); else bestmode = GET_MODE (xop0); if (bestmode == VOIDmode || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0)) && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0))) goto extv_loses; /* Compute offset as multiple of this unit, counting in bytes. */ unit = GET_MODE_BITSIZE (bestmode); xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode); xbitpos = bitnum % unit; xop0 = adjust_address (xop0, bestmode, xoffset); /* Fetch it to a register in that size. */ xop0 = force_reg (bestmode, xop0); /* XBITPOS counts within UNIT, which is what is expected. */ } else /* Get ref to first byte containing part of the field. */ xop0 = adjust_address (xop0, byte_mode, xoffset); } /* If op0 is a register, we need it in MAXMODE (which is usually SImode) to make it acceptable to the format of extv. */ if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode) goto extv_loses; if (REG_P (xop0) && GET_MODE (xop0) != maxmode) xop0 = gen_rtx_SUBREG (maxmode, xop0, 0); /* On big-endian machines, we count bits from the most significant. If the bit field insn does not, we must invert. */ if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN) xbitpos = unit - bitsize - xbitpos; /* XBITPOS counts within a size of UNIT. Adjust to count within a size of MAXMODE. */ if (BITS_BIG_ENDIAN && !MEM_P (xop0)) xbitpos += (GET_MODE_BITSIZE (maxmode) - unit); unit = GET_MODE_BITSIZE (maxmode); if (xtarget == 0 || (flag_force_mem && MEM_P (xtarget))) xtarget = xspec_target = gen_reg_rtx (tmode); if (GET_MODE (xtarget) != maxmode) { if (REG_P (xtarget)) { int wider = (GET_MODE_SIZE (maxmode) > GET_MODE_SIZE (GET_MODE (xtarget))); xtarget = gen_lowpart (maxmode, xtarget); if (wider) xspec_target_subreg = xtarget; } else xtarget = gen_reg_rtx (maxmode); } /* If this machine's extv insists on a register target, make sure we have one. */ if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate) (xtarget, maxmode))) xtarget = gen_reg_rtx (maxmode); bitsize_rtx = GEN_INT (bitsize); bitpos_rtx = GEN_INT (xbitpos); pat = gen_extv (protect_from_queue (xtarget, 1), xop0, bitsize_rtx, bitpos_rtx); if (pat) { emit_insn (pat); target = xtarget; spec_target = xspec_target; spec_target_subreg = xspec_target_subreg; } else { delete_insns_since (last); target = extract_fixed_bit_field (int_mode, op0, offset, bitsize, bitpos, target, 0); } } else extv_loses: target = extract_fixed_bit_field (int_mode, op0, offset, bitsize, bitpos, target, 0); } if (target == spec_target) return target; if (target == spec_target_subreg) return spec_target; if (GET_MODE (target) != tmode && GET_MODE (target) != mode) { /* If the target mode is floating-point, first convert to the integer mode of that size and then access it as a floating-point value via a SUBREG. */ if (GET_MODE_CLASS (tmode) != MODE_INT && GET_MODE_CLASS (tmode) != MODE_PARTIAL_INT) { target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode), MODE_INT, 0), target, unsignedp); return gen_lowpart (tmode, target); } else return convert_to_mode (tmode, target, unsignedp); } return target; } /* Extract a bit field using shifts and boolean operations Returns an rtx to represent the value. OP0 addresses a register (word) or memory (byte). BITPOS says which bit within the word or byte the bit field starts in. OFFSET says how many bytes farther the bit field starts; it is 0 if OP0 is a register. BITSIZE says how many bits long the bit field is. (If OP0 is a register, it may be narrower than a full word, but BITPOS still counts within a full word, which is significant on bigendian machines.) UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value). If TARGET is nonzero, attempts to store the value there and return TARGET, but this is not guaranteed. If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */ static rtx extract_fixed_bit_field (enum machine_mode tmode, rtx op0, unsigned HOST_WIDE_INT offset, unsigned HOST_WIDE_INT bitsize, unsigned HOST_WIDE_INT bitpos, rtx target, int unsignedp) { unsigned int total_bits = BITS_PER_WORD; enum machine_mode mode; if (GET_CODE (op0) == SUBREG || REG_P (op0)) { /* Special treatment for a bit field split across two registers. */ if (bitsize + bitpos > BITS_PER_WORD) return extract_split_bit_field (op0, bitsize, bitpos, unsignedp); } else { /* Get the proper mode to use for this field. We want a mode that includes the entire field. If such a mode would be larger than a word, we won't be doing the extraction the normal way. */ mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT, MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0)); if (mode == VOIDmode) /* The only way this should occur is if the field spans word boundaries. */ return extract_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT, unsignedp); total_bits = GET_MODE_BITSIZE (mode); /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to be in the range 0 to total_bits-1, and put any excess bytes in OFFSET. */ if (bitpos >= total_bits) { offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT); bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT) * BITS_PER_UNIT); } /* Get ref to an aligned byte, halfword, or word containing the field. Adjust BITPOS to be position within a word, and OFFSET to be the offset of that word. Then alter OP0 to refer to that word. */ bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT; offset -= (offset % (total_bits / BITS_PER_UNIT)); op0 = adjust_address (op0, mode, offset); } mode = GET_MODE (op0); if (BYTES_BIG_ENDIAN) /* BITPOS is the distance between our msb and that of OP0. Convert it to the distance from the lsb. */ bitpos = total_bits - bitsize - bitpos; /* Now BITPOS is always the distance between the field's lsb and that of OP0. We have reduced the big-endian case to the little-endian case. */ if (unsignedp) { if (bitpos) { /* If the field does not already start at the lsb, shift it so it does. */ tree amount = build_int_2 (bitpos, 0); /* Maybe propagate the target for the shift. */ /* But not if we will return it--could confuse integrate.c. */ rtx subtarget = (target != 0 && REG_P (target) ? target : 0); if (tmode != mode) subtarget = 0; op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1); } /* Convert the value to the desired mode. */ if (mode != tmode) op0 = convert_to_mode (tmode, op0, 1); /* Unless the msb of the field used to be the msb when we shifted, mask out the upper bits. */ if (GET_MODE_BITSIZE (mode) != bitpos + bitsize) return expand_binop (GET_MODE (op0), and_optab, op0, mask_rtx (GET_MODE (op0), 0, bitsize, 0), target, 1, OPTAB_LIB_WIDEN); return op0; } /* To extract a signed bit-field, first shift its msb to the msb of the word, then arithmetic-shift its lsb to the lsb of the word. */ op0 = force_reg (mode, op0); if (mode != tmode) target = 0; /* Find the narrowest integer mode that contains the field. */ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode)) if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos) { op0 = convert_to_mode (mode, op0, 0); break; } if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos)) { tree amount = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0); /* Maybe propagate the target for the shift. */ rtx subtarget = (target != 0 && REG_P (target) ? target : 0); op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1); } return expand_shift (RSHIFT_EXPR, mode, op0, build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0), target, 0); } /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value of mode MODE with BITSIZE ones followed by BITPOS zeros, or the complement of that if COMPLEMENT. The mask is truncated if necessary to the width of mode MODE. The mask is zero-extended if BITSIZE+BITPOS is too small for MODE. */ static rtx mask_rtx (enum machine_mode mode, int bitpos, int bitsize, int complement) { HOST_WIDE_INT masklow, maskhigh; if (bitsize == 0) masklow = 0; else if (bitpos < HOST_BITS_PER_WIDE_INT) masklow = (HOST_WIDE_INT) -1 << bitpos; else masklow = 0; if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT) masklow &= ((unsigned HOST_WIDE_INT) -1 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize)); if (bitpos <= HOST_BITS_PER_WIDE_INT) maskhigh = -1; else maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT); if (bitsize == 0) maskhigh = 0; else if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT) maskhigh &= ((unsigned HOST_WIDE_INT) -1 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize)); else maskhigh = 0; if (complement) { maskhigh = ~maskhigh; masklow = ~masklow; } return immed_double_const (masklow, maskhigh, mode); } /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */ static rtx lshift_value (enum machine_mode mode, rtx value, int bitpos, int bitsize) { unsigned HOST_WIDE_INT v = INTVAL (value); HOST_WIDE_INT low, high; if (bitsize < HOST_BITS_PER_WIDE_INT) v &= ~((HOST_WIDE_INT) -1 << bitsize); if (bitpos < HOST_BITS_PER_WIDE_INT) { low = v << bitpos; high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0); } else { low = 0; high = v << (bitpos - HOST_BITS_PER_WIDE_INT); } return immed_double_const (low, high, mode); } /* Extract a bit field that is split across two words and return an RTX for the result. OP0 is the REG, SUBREG or MEM rtx for the first of the two words. BITSIZE is the field width; BITPOS, position of its first bit, in the word. UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */ static rtx extract_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize, unsigned HOST_WIDE_INT bitpos, int unsignedp) { unsigned int unit; unsigned int bitsdone = 0; rtx result = NULL_RTX; int first = 1; /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that much at a time. */ if (REG_P (op0) || GET_CODE (op0) == SUBREG) unit = BITS_PER_WORD; else unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD); while (bitsdone < bitsize) { unsigned HOST_WIDE_INT thissize; rtx part, word; unsigned HOST_WIDE_INT thispos; unsigned HOST_WIDE_INT offset; offset = (bitpos + bitsdone) / unit; thispos = (bitpos + bitsdone) % unit; /* THISSIZE must not overrun a word boundary. Otherwise, extract_fixed_bit_field will call us again, and we will mutually recurse forever. */ thissize = MIN (bitsize - bitsdone, BITS_PER_WORD); thissize = MIN (thissize, unit - thispos); /* If OP0 is a register, then handle OFFSET here. When handling multiword bitfields, extract_bit_field may pass down a word_mode SUBREG of a larger REG for a bitfield that actually crosses a word boundary. Thus, for a SUBREG, we must find the current word starting from the base register. */ if (GET_CODE (op0) == SUBREG) { int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset; word = operand_subword_force (SUBREG_REG (op0), word_offset, GET_MODE (SUBREG_REG (op0))); offset = 0; } else if (REG_P (op0)) { word = operand_subword_force (op0, offset, GET_MODE (op0)); offset = 0; } else word = op0; /* Extract the parts in bit-counting order, whose meaning is determined by BYTES_PER_UNIT. OFFSET is in UNITs, and UNIT is in bits. extract_fixed_bit_field wants offset in bytes. */ part = extract_fixed_bit_field (word_mode, word, offset * unit / BITS_PER_UNIT, thissize, thispos, 0, 1); bitsdone += thissize; /* Shift this part into place for the result. */ if (BYTES_BIG_ENDIAN) { if (bitsize != bitsdone) part = expand_shift (LSHIFT_EXPR, word_mode, part, build_int_2 (bitsize - bitsdone, 0), 0, 1); } else { if (bitsdone != thissize) part = expand_shift (LSHIFT_EXPR, word_mode, part, build_int_2 (bitsdone - thissize, 0), 0, 1); } if (first) result = part; else /* Combine the parts with bitwise or. This works because we extracted each part as an unsigned bit field. */ result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1, OPTAB_LIB_WIDEN); first = 0; } /* Unsigned bit field: we are done. */ if (unsignedp) return result; /* Signed bit field: sign-extend with two arithmetic shifts. */ result = expand_shift (LSHIFT_EXPR, word_mode, result, build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0); return expand_shift (RSHIFT_EXPR, word_mode, result, build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0); } /* Add INC into TARGET. */ void expand_inc (rtx target, rtx inc) { rtx value = expand_binop (GET_MODE (target), add_optab, target, inc, target, 0, OPTAB_LIB_WIDEN); if (value != target) emit_move_insn (target, value); } /* Subtract DEC from TARGET. */ void expand_dec (rtx target, rtx dec) { rtx value = expand_binop (GET_MODE (target), sub_optab, target, dec, target, 0, OPTAB_LIB_WIDEN); if (value != target) emit_move_insn (target, value); } /* Output a shift instruction for expression code CODE, with SHIFTED being the rtx for the value to shift, and AMOUNT the tree for the amount to shift by. Store the result in the rtx TARGET, if that is convenient. If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic. Return the rtx for where the value is. */ rtx expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted, tree amount, rtx target, int unsignedp) { rtx op1, temp = 0; int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR); int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR); int try; /* Previously detected shift-counts computed by NEGATE_EXPR and shifted in the other direction; but that does not work on all machines. */ op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0); if (SHIFT_COUNT_TRUNCATED) { if (GET_CODE (op1) == CONST_INT && ((unsigned HOST_WIDE_INT) INTVAL (op1) >= (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode))) op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1) % GET_MODE_BITSIZE (mode)); else if (GET_CODE (op1) == SUBREG && subreg_lowpart_p (op1)) op1 = SUBREG_REG (op1); } if (op1 == const0_rtx) return shifted; /* Check whether its cheaper to implement a left shift by a constant bit count by a sequence of additions. */ if (code == LSHIFT_EXPR && GET_CODE (op1) == CONST_INT && INTVAL (op1) > 0 && INTVAL (op1) < GET_MODE_BITSIZE (mode) && shift_cost[mode][INTVAL (op1)] > INTVAL (op1) * add_cost[mode]) { int i; for (i = 0; i < INTVAL (op1); i++) { temp = force_reg (mode, shifted); shifted = expand_binop (mode, add_optab, temp, temp, NULL_RTX, unsignedp, OPTAB_LIB_WIDEN); } return shifted; } for (try = 0; temp == 0 && try < 3; try++) { enum optab_methods methods; if (try == 0) methods = OPTAB_DIRECT; else if (try == 1) methods = OPTAB_WIDEN; else methods = OPTAB_LIB_WIDEN; if (rotate) { /* Widening does not work for rotation. */ if (methods == OPTAB_WIDEN) continue; else if (methods == OPTAB_LIB_WIDEN) { /* If we have been unable to open-code this by a rotation, do it as the IOR of two shifts. I.e., to rotate A by N bits, compute (A << N) | ((unsigned) A >> (C - N)) where C is the bitsize of A. It is theoretically possible that the target machine might not be able to perform either shift and hence we would be making two libcalls rather than just the one for the shift (similarly if IOR could not be done). We will allow this extremely unlikely lossage to avoid complicating the code below. */ rtx subtarget = target == shifted ? 0 : target; rtx temp1; tree type = TREE_TYPE (amount); tree new_amount = make_tree (type, op1); tree other_amount = fold (build (MINUS_EXPR, type, convert (type, build_int_2 (GET_MODE_BITSIZE (mode), 0)), amount)); shifted = force_reg (mode, shifted); temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR, mode, shifted, new_amount, subtarget, 1); temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR, mode, shifted, other_amount, 0, 1); return expand_binop (mode, ior_optab, temp, temp1, target, unsignedp, methods); } temp = expand_binop (mode, left ? rotl_optab : rotr_optab, shifted, op1, target, unsignedp, methods); /* If we don't have the rotate, but we are rotating by a constant that is in range, try a rotate in the opposite direction. */ if (temp == 0 && GET_CODE (op1) == CONST_INT && INTVAL (op1) > 0 && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode)) temp = expand_binop (mode, left ? rotr_optab : rotl_optab, shifted, GEN_INT (GET_MODE_BITSIZE (mode) - INTVAL (op1)), target, unsignedp, methods); } else if (unsignedp) temp = expand_binop (mode, left ? ashl_optab : lshr_optab, shifted, op1, target, unsignedp, methods); /* Do arithmetic shifts. Also, if we are going to widen the operand, we can just as well use an arithmetic right-shift instead of a logical one. */ if (temp == 0 && ! rotate && (! unsignedp || (! left && methods == OPTAB_WIDEN))) { enum optab_methods methods1 = methods; /* If trying to widen a log shift to an arithmetic shift, don't accept an arithmetic shift of the same size. */ if (unsignedp) methods1 = OPTAB_MUST_WIDEN; /* Arithmetic shift */ temp = expand_binop (mode, left ? ashl_optab : ashr_optab, shifted, op1, target, unsignedp, methods1); } /* We used to try extzv here for logical right shifts, but that was only useful for one machine, the VAX, and caused poor code generation there for lshrdi3, so the code was deleted and a define_expand for lshrsi3 was added to vax.md. */ } if (temp == 0) abort (); return temp; } enum alg_code { alg_zero, alg_m, alg_shift, alg_add_t_m2, alg_sub_t_m2, alg_add_factor, alg_sub_factor, alg_add_t2_m, alg_sub_t2_m, alg_add, alg_subtract, alg_factor, alg_shiftop }; /* This structure records a sequence of operations. `ops' is the number of operations recorded. `cost' is their total cost. The operations are stored in `op' and the corresponding logarithms of the integer coefficients in `log'. These are the operations: alg_zero total := 0; alg_m total := multiplicand; alg_shift total := total * coeff alg_add_t_m2 total := total + multiplicand * coeff; alg_sub_t_m2 total := total - multiplicand * coeff; alg_add_factor total := total * coeff + total; alg_sub_factor total := total * coeff - total; alg_add_t2_m total := total * coeff + multiplicand; alg_sub_t2_m total := total * coeff - multiplicand; The first operand must be either alg_zero or alg_m. */ struct algorithm { short cost; short ops; /* The size of the OP and LOG fields are not directly related to the word size, but the worst-case algorithms will be if we have few consecutive ones or zeros, i.e., a multiplicand like 10101010101... In that case we will generate shift-by-2, add, shift-by-2, add,..., in total wordsize operations. */ enum alg_code op[MAX_BITS_PER_WORD]; char log[MAX_BITS_PER_WORD]; }; /* Indicates the type of fixup needed after a constant multiplication. BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that the result should be negated, and ADD_VARIANT means that the multiplicand should be added to the result. */ enum mult_variant {basic_variant, negate_variant, add_variant}; static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT, int, enum machine_mode mode); static bool choose_mult_variant (enum machine_mode, HOST_WIDE_INT, struct algorithm *, enum mult_variant *, int); static rtx expand_mult_const (enum machine_mode, rtx, HOST_WIDE_INT, rtx, const struct algorithm *, enum mult_variant); static unsigned HOST_WIDE_INT choose_multiplier (unsigned HOST_WIDE_INT, int, int, unsigned HOST_WIDE_INT *, int *, int *); static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int); static rtx extract_high_half (enum machine_mode, rtx); static rtx expand_mult_highpart_optab (enum machine_mode, rtx, rtx, rtx, int, int); /* Compute and return the best algorithm for multiplying by T. The algorithm must cost less than cost_limit If retval.cost >= COST_LIMIT, no algorithm was found and all other field of the returned struct are undefined. MODE is the machine mode of the multiplication. */ static void synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t, int cost_limit, enum machine_mode mode) { int m; struct algorithm *alg_in, *best_alg; int cost; unsigned HOST_WIDE_INT q; int maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (mode)); /* Indicate that no algorithm is yet found. If no algorithm is found, this value will be returned and indicate failure. */ alg_out->cost = cost_limit; if (cost_limit <= 0) return; /* Restrict the bits of "t" to the multiplication's mode. */ t &= GET_MODE_MASK (mode); /* t == 1 can be done in zero cost. */ if (t == 1) { alg_out->ops = 1; alg_out->cost = 0; alg_out->op[0] = alg_m; return; } /* t == 0 sometimes has a cost. If it does and it exceeds our limit, fail now. */ if (t == 0) { if (zero_cost >= cost_limit) return; else { alg_out->ops = 1; alg_out->cost = zero_cost; alg_out->op[0] = alg_zero; return; } } /* We'll be needing a couple extra algorithm structures now. */ alg_in = alloca (sizeof (struct algorithm)); best_alg = alloca (sizeof (struct algorithm)); /* If we have a group of zero bits at the low-order part of T, try multiplying by the remaining bits and then doing a shift. */ if ((t & 1) == 0) { m = floor_log2 (t & -t); /* m = number of low zero bits */ if (m < maxm) { q = t >> m; /* The function expand_shift will choose between a shift and a sequence of additions, so the observed cost is given as MIN (m * add_cost[mode], shift_cost[mode][m]). */ cost = m * add_cost[mode]; if (shift_cost[mode][m] < cost) cost = shift_cost[mode][m]; synth_mult (alg_in, q, cost_limit - cost, mode); cost += alg_in->cost; if (cost < cost_limit) { struct algorithm *x; x = alg_in, alg_in = best_alg, best_alg = x; best_alg->log[best_alg->ops] = m; best_alg->op[best_alg->ops] = alg_shift; cost_limit = cost; } } } /* If we have an odd number, add or subtract one. */ if ((t & 1) != 0) { unsigned HOST_WIDE_INT w; for (w = 1; (w & t) != 0; w <<= 1) ; /* If T was -1, then W will be zero after the loop. This is another case where T ends with ...111. Handling this with (T + 1) and subtract 1 produces slightly better code and results in algorithm selection much faster than treating it like the ...0111 case below. */ if (w == 0 || (w > 2 /* Reject the case where t is 3. Thus we prefer addition in that case. */ && t != 3)) { /* T ends with ...111. Multiply by (T + 1) and subtract 1. */ cost = add_cost[mode]; synth_mult (alg_in, t + 1, cost_limit - cost, mode); cost += alg_in->cost; if (cost < cost_limit) { struct algorithm *x; x = alg_in, alg_in = best_alg, best_alg = x; best_alg->log[best_alg->ops] = 0; best_alg->op[best_alg->ops] = alg_sub_t_m2; cost_limit = cost; } } else { /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */ cost = add_cost[mode]; synth_mult (alg_in, t - 1, cost_limit - cost, mode); cost += alg_in->cost; if (cost < cost_limit) { struct algorithm *x; x = alg_in, alg_in = best_alg, best_alg = x; best_alg->log[best_alg->ops] = 0; best_alg->op[best_alg->ops] = alg_add_t_m2; cost_limit = cost; } } } /* Look for factors of t of the form t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)). If we find such a factor, we can multiply by t using an algorithm that multiplies by q, shift the result by m and add/subtract it to itself. We search for large factors first and loop down, even if large factors are less probable than small; if we find a large factor we will find a good sequence quickly, and therefore be able to prune (by decreasing COST_LIMIT) the search. */ for (m = floor_log2 (t - 1); m >= 2; m--) { unsigned HOST_WIDE_INT d; d = ((unsigned HOST_WIDE_INT) 1 << m) + 1; if (t % d == 0 && t > d && m < maxm) { cost = add_cost[mode] + shift_cost[mode][m]; if (shiftadd_cost[mode][m] < cost) cost = shiftadd_cost[mode][m]; synth_mult (alg_in, t / d, cost_limit - cost, mode); cost += alg_in->cost; if (cost < cost_limit) { struct algorithm *x; x = alg_in, alg_in = best_alg, best_alg = x; best_alg->log[best_alg->ops] = m; best_alg->op[best_alg->ops] = alg_add_factor; cost_limit = cost; } /* Other factors will have been taken care of in the recursion. */ break; } d = ((unsigned HOST_WIDE_INT) 1 << m) - 1; if (t % d == 0 && t > d && m < maxm) { cost = add_cost[mode] + shift_cost[mode][m]; if (shiftsub_cost[mode][m] < cost) cost = shiftsub_cost[mode][m]; synth_mult (alg_in, t / d, cost_limit - cost, mode); cost += alg_in->cost; if (cost < cost_limit) { struct algorithm *x; x = alg_in, alg_in = best_alg, best_alg = x; best_alg->log[best_alg->ops] = m; best_alg->op[best_alg->ops] = alg_sub_factor; cost_limit = cost; } break; } } /* Try shift-and-add (load effective address) instructions, i.e. do a*3, a*5, a*9. */ if ((t & 1) != 0) { q = t - 1; q = q & -q; m = exact_log2 (q); if (m >= 0 && m < maxm) { cost = shiftadd_cost[mode][m]; synth_mult (alg_in, (t - 1) >> m, cost_limit - cost, mode); cost += alg_in->cost; if (cost < cost_limit) { struct algorithm *x; x = alg_in, alg_in = best_alg, best_alg = x; best_alg->log[best_alg->ops] = m; best_alg->op[best_alg->ops] = alg_add_t2_m; cost_limit = cost; } } q = t + 1; q = q & -q; m = exact_log2 (q); if (m >= 0 && m < maxm) { cost = shiftsub_cost[mode][m]; synth_mult (alg_in, (t + 1) >> m, cost_limit - cost, mode); cost += alg_in->cost; if (cost < cost_limit) { struct algorithm *x; x = alg_in, alg_in = best_alg, best_alg = x; best_alg->log[best_alg->ops] = m; best_alg->op[best_alg->ops] = alg_sub_t2_m; cost_limit = cost; } } } /* If cost_limit has not decreased since we stored it in alg_out->cost, we have not found any algorithm. */ if (cost_limit == alg_out->cost) return; /* If we are getting a too long sequence for `struct algorithm' to record, make this search fail. */ if (best_alg->ops == MAX_BITS_PER_WORD) return; /* Copy the algorithm from temporary space to the space at alg_out. We avoid using structure assignment because the majority of best_alg is normally undefined, and this is a critical function. */ alg_out->ops = best_alg->ops + 1; alg_out->cost = cost_limit; memcpy (alg_out->op, best_alg->op, alg_out->ops * sizeof *alg_out->op); memcpy (alg_out->log, best_alg->log, alg_out->ops * sizeof *alg_out->log); } /* Find the cheapest way of multiplying a value of mode MODE by VAL. Try three variations: - a shift/add sequence based on VAL itself - a shift/add sequence based on -VAL, followed by a negation - a shift/add sequence based on VAL - 1, followed by an addition. Return true if the cheapest of these cost less than MULT_COST, describing the algorithm in *ALG and final fixup in *VARIANT. */ static bool choose_mult_variant (enum machine_mode mode, HOST_WIDE_INT val, struct algorithm *alg, enum mult_variant *variant, int mult_cost) { struct algorithm alg2; *variant = basic_variant; synth_mult (alg, val, mult_cost, mode); /* This works only if the inverted value actually fits in an `unsigned int' */ if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode)) { synth_mult (&alg2, -val, MIN (alg->cost, mult_cost) - neg_cost[mode], mode); alg2.cost += neg_cost[mode]; if (alg2.cost < alg->cost) *alg = alg2, *variant = negate_variant; } /* This proves very useful for division-by-constant. */ synth_mult (&alg2, val - 1, MIN (alg->cost, mult_cost) - add_cost[mode], mode); alg2.cost += add_cost[mode]; if (alg2.cost < alg->cost) *alg = alg2, *variant = add_variant; return alg->cost < mult_cost; } /* A subroutine of expand_mult, used for constant multiplications. Multiply OP0 by VAL in mode MODE, storing the result in TARGET if convenient. Use the shift/add sequence described by ALG and apply the final fixup specified by VARIANT. */ static rtx expand_mult_const (enum machine_mode mode, rtx op0, HOST_WIDE_INT val, rtx target, const struct algorithm *alg, enum mult_variant variant) { HOST_WIDE_INT val_so_far; rtx insn, accum, tem; int opno; enum machine_mode nmode; /* op0 must be register to make mult_cost match the precomputed shiftadd_cost array. */ op0 = protect_from_queue (op0, 0); /* Avoid referencing memory over and over. For speed, but also for correctness when mem is volatile. */ if (MEM_P (op0)) op0 = force_reg (mode, op0); /* ACCUM starts out either as OP0 or as a zero, depending on the first operation. */ if (alg->op[0] == alg_zero) { accum = copy_to_mode_reg (mode, const0_rtx); val_so_far = 0; } else if (alg->op[0] == alg_m) { accum = copy_to_mode_reg (mode, op0); val_so_far = 1; } else abort (); for (opno = 1; opno < alg->ops; opno++) { int log = alg->log[opno]; int preserve = preserve_subexpressions_p (); rtx shift_subtarget = preserve ? 0 : accum; rtx add_target = (opno == alg->ops - 1 && target != 0 && variant != add_variant && ! preserve) ? target : 0; rtx accum_target = preserve ? 0 : accum; switch (alg->op[opno]) { case alg_shift: accum = expand_shift (LSHIFT_EXPR, mode, accum, build_int_2 (log, 0), NULL_RTX, 0); val_so_far <<= log; break; case alg_add_t_m2: tem = expand_shift (LSHIFT_EXPR, mode, op0, build_int_2 (log, 0), NULL_RTX, 0); accum = force_operand (gen_rtx_PLUS (mode, accum, tem), add_target ? add_target : accum_target); val_so_far += (HOST_WIDE_INT) 1 << log; break; case alg_sub_t_m2: tem = expand_shift (LSHIFT_EXPR, mode, op0, build_int_2 (log, 0), NULL_RTX, 0); accum = force_operand (gen_rtx_MINUS (mode, accum, tem), add_target ? add_target : accum_target); val_so_far -= (HOST_WIDE_INT) 1 << log; break; case alg_add_t2_m: accum = expand_shift (LSHIFT_EXPR, mode, accum, build_int_2 (log, 0), shift_subtarget, 0); accum = force_operand (gen_rtx_PLUS (mode, accum, op0), add_target ? add_target : accum_target); val_so_far = (val_so_far << log) + 1; break; case alg_sub_t2_m: accum = expand_shift (LSHIFT_EXPR, mode, accum, build_int_2 (log, 0), shift_subtarget, 0); accum = force_operand (gen_rtx_MINUS (mode, accum, op0), add_target ? add_target : accum_target); val_so_far = (val_so_far << log) - 1; break; case alg_add_factor: tem = expand_shift (LSHIFT_EXPR, mode, accum, build_int_2 (log, 0), NULL_RTX, 0); accum = force_operand (gen_rtx_PLUS (mode, accum, tem), add_target ? add_target : accum_target); val_so_far += val_so_far << log; break; case alg_sub_factor: tem = expand_shift (LSHIFT_EXPR, mode, accum, build_int_2 (log, 0), NULL_RTX, 0); accum = force_operand (gen_rtx_MINUS (mode, tem, accum), (add_target ? add_target : preserve ? 0 : tem)); val_so_far = (val_so_far << log) - val_so_far; break; default: abort (); } /* Write a REG_EQUAL note on the last insn so that we can cse multiplication sequences. Note that if ACCUM is a SUBREG, we've set the inner register and must properly indicate that. */ tem = op0, nmode = mode; if (GET_CODE (accum) == SUBREG) { nmode = GET_MODE (SUBREG_REG (accum)); tem = gen_lowpart (nmode, op0); } insn = get_last_insn (); set_unique_reg_note (insn, REG_EQUAL, gen_rtx_MULT (nmode, tem, GEN_INT (val_so_far))); } if (variant == negate_variant) { val_so_far = -val_so_far; accum = expand_unop (mode, neg_optab, accum, target, 0); } else if (variant == add_variant) { val_so_far = val_so_far + 1; accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target); } /* Compare only the bits of val and val_so_far that are significant in the result mode, to avoid sign-/zero-extension confusion. */ val &= GET_MODE_MASK (mode); val_so_far &= GET_MODE_MASK (mode); if (val != val_so_far) abort (); return accum; } /* Perform a multiplication and return an rtx for the result. MODE is mode of value; OP0 and OP1 are what to multiply (rtx's); TARGET is a suggestion for where to store the result (an rtx). We check specially for a constant integer as OP1. If you want this check for OP0 as well, then before calling you should swap the two operands if OP0 would be constant. */ rtx expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target, int unsignedp) { rtx const_op1 = op1; enum mult_variant variant; struct algorithm algorithm; /* synth_mult does an `unsigned int' multiply. As long as the mode is less than or equal in size to `unsigned int' this doesn't matter. If the mode is larger than `unsigned int', then synth_mult works only if the constant value exactly fits in an `unsigned int' without any truncation. This means that multiplying by negative values does not work; results are off by 2^32 on a 32 bit machine. */ /* If we are multiplying in DImode, it may still be a win to try to work with shifts and adds. */ if (GET_CODE (op1) == CONST_DOUBLE && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT && HOST_BITS_PER_INT >= BITS_PER_WORD && CONST_DOUBLE_HIGH (op1) == 0) const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1)); else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode) && GET_CODE (op1) == CONST_INT && INTVAL (op1) < 0) const_op1 = 0; /* We used to test optimize here, on the grounds that it's better to produce a smaller program when -O is not used. But this causes such a terrible slowdown sometimes that it seems better to use synth_mult always. */ if (const_op1 && GET_CODE (const_op1) == CONST_INT && (unsignedp || !flag_trapv)) { int mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET); if (choose_mult_variant (mode, INTVAL (const_op1), &algorithm, &variant, mult_cost)) return expand_mult_const (mode, op0, INTVAL (const_op1), target, &algorithm, variant); } if (GET_CODE (op0) == CONST_DOUBLE) { rtx temp = op0; op0 = op1; op1 = temp; } /* Expand x*2.0 as x+x. */ if (GET_CODE (op1) == CONST_DOUBLE && GET_MODE_CLASS (mode) == MODE_FLOAT) { REAL_VALUE_TYPE d; REAL_VALUE_FROM_CONST_DOUBLE (d, op1); if (REAL_VALUES_EQUAL (d, dconst2)) { op0 = force_reg (GET_MODE (op0), op0); return expand_binop (mode, add_optab, op0, op0, target, unsignedp, OPTAB_LIB_WIDEN); } } /* This used to use umul_optab if unsigned, but for non-widening multiply there is no difference between signed and unsigned. */ op0 = expand_binop (mode, ! unsignedp && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT) ? smulv_optab : smul_optab, op0, op1, target, unsignedp, OPTAB_LIB_WIDEN); if (op0 == 0) abort (); return op0; } /* Return the smallest n such that 2**n >= X. */ int ceil_log2 (unsigned HOST_WIDE_INT x) { return floor_log2 (x - 1) + 1; } /* Choose a minimal N + 1 bit approximation to 1/D that can be used to replace division by D, and put the least significant N bits of the result in *MULTIPLIER_PTR and return the most significant bit. The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the needed precision is in PRECISION (should be <= N). PRECISION should be as small as possible so this function can choose multiplier more freely. The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that is to be used for a final right shift is placed in *POST_SHIFT_PTR. Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR), where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */ static unsigned HOST_WIDE_INT choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision, unsigned HOST_WIDE_INT *multiplier_ptr, int *post_shift_ptr, int *lgup_ptr) { HOST_WIDE_INT mhigh_hi, mlow_hi; unsigned HOST_WIDE_INT mhigh_lo, mlow_lo; int lgup, post_shift; int pow, pow2; unsigned HOST_WIDE_INT nl, dummy1; HOST_WIDE_INT nh, dummy2; /* lgup = ceil(log2(divisor)); */ lgup = ceil_log2 (d); if (lgup > n) abort (); pow = n + lgup; pow2 = n + lgup - precision; if (pow == 2 * HOST_BITS_PER_WIDE_INT) { /* We could handle this with some effort, but this case is much better handled directly with a scc insn, so rely on caller using that. */ abort (); } /* mlow = 2^(N + lgup)/d */ if (pow >= HOST_BITS_PER_WIDE_INT) { nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT); nl = 0; } else { nh = 0; nl = (unsigned HOST_WIDE_INT) 1 << pow; } div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0, &mlow_lo, &mlow_hi, &dummy1, &dummy2); /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */ if (pow2 >= HOST_BITS_PER_WIDE_INT) nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT); else nl |= (unsigned HOST_WIDE_INT) 1 << pow2; div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0, &mhigh_lo, &mhigh_hi, &dummy1, &dummy2); if (mhigh_hi && nh - d >= d) abort (); if (mhigh_hi > 1 || mlow_hi > 1) abort (); /* Assert that mlow < mhigh. */ if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo))) abort (); /* If precision == N, then mlow, mhigh exceed 2^N (but they do not exceed 2^(N+1)). */ /* Reduce to lowest terms. */ for (post_shift = lgup; post_shift > 0; post_shift--) { unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1); unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1); if (ml_lo >= mh_lo) break; mlow_hi = 0; mlow_lo = ml_lo; mhigh_hi = 0; mhigh_lo = mh_lo; } *post_shift_ptr = post_shift; *lgup_ptr = lgup; if (n < HOST_BITS_PER_WIDE_INT) { unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1; *multiplier_ptr = mhigh_lo & mask; return mhigh_lo >= mask; } else { *multiplier_ptr = mhigh_lo; return mhigh_hi; } } /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is congruent to 1 (mod 2**N). */ static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT x, int n) { /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */ /* The algorithm notes that the choice y = x satisfies x*y == 1 mod 2^3, since x is assumed odd. Each iteration doubles the number of bits of significance in y. */ unsigned HOST_WIDE_INT mask; unsigned HOST_WIDE_INT y = x; int nbit = 3; mask = (n == HOST_BITS_PER_WIDE_INT ? ~(unsigned HOST_WIDE_INT) 0 : ((unsigned HOST_WIDE_INT) 1 << n) - 1); while (nbit < n) { y = y * (2 - x*y) & mask; /* Modulo 2^N */ nbit *= 2; } return y; } /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to become signed. The result is put in TARGET if that is convenient. MODE is the mode of operation. */ rtx expand_mult_highpart_adjust (enum machine_mode mode, rtx adj_operand, rtx op0, rtx op1, rtx target, int unsignedp) { rtx tem; enum rtx_code adj_code = unsignedp ? PLUS : MINUS; tem = expand_shift (RSHIFT_EXPR, mode, op0, build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0), NULL_RTX, 0); tem = expand_and (mode, tem, op1, NULL_RTX); adj_operand = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem), adj_operand); tem = expand_shift (RSHIFT_EXPR, mode, op1, build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0), NULL_RTX, 0); tem = expand_and (mode, tem, op0, NULL_RTX); target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem), target); return target; } /* Subroutine of expand_mult_highpart. Return the MODE high part of OP. */ static rtx extract_high_half (enum machine_mode mode, rtx op) { enum machine_mode wider_mode; if (mode == word_mode) return gen_highpart (mode, op); wider_mode = GET_MODE_WIDER_MODE (mode); op = expand_shift (RSHIFT_EXPR, wider_mode, op, build_int_2 (GET_MODE_BITSIZE (mode), 0), 0, 1); return convert_modes (mode, wider_mode, op, 0); } /* Like expand_mult_highpart, but only consider using a multiplication optab. OP1 is an rtx for the constant operand. */ static rtx expand_mult_highpart_optab (enum machine_mode mode, rtx op0, rtx op1, rtx target, int unsignedp, int max_cost) { rtx narrow_op1 = gen_int_mode (INTVAL (op1), mode); enum machine_mode wider_mode; optab moptab; rtx tem; int size; wider_mode = GET_MODE_WIDER_MODE (mode); size = GET_MODE_BITSIZE (mode); /* Firstly, try using a multiplication insn that only generates the needed high part of the product, and in the sign flavor of unsignedp. */ if (mul_highpart_cost[mode] < max_cost) { moptab = unsignedp ? umul_highpart_optab : smul_highpart_optab; tem = expand_binop (mode, moptab, op0, narrow_op1, target, unsignedp, OPTAB_DIRECT); if (tem) return tem; } /* Secondly, same as above, but use sign flavor opposite of unsignedp. Need to adjust the result after the multiplication. */ if (size - 1 < BITS_PER_WORD && (mul_highpart_cost[mode] + 2 * shift_cost[mode][size-1] + 4 * add_cost[mode] < max_cost)) { moptab = unsignedp ? smul_highpart_optab : umul_highpart_optab; tem = expand_binop (mode, moptab, op0, narrow_op1, target, unsignedp, OPTAB_DIRECT); if (tem) /* We used the wrong signedness. Adjust the result. */ return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1, tem, unsignedp); } /* Try widening multiplication. */ moptab = unsignedp ? umul_widen_optab : smul_widen_optab; if (moptab->handlers[wider_mode].insn_code != CODE_FOR_nothing && mul_widen_cost[wider_mode] < max_cost) { tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0, unsignedp, OPTAB_WIDEN); if (tem) return extract_high_half (mode, tem); } /* Try widening the mode and perform a non-widening multiplication. */ moptab = smul_optab; if (smul_optab->handlers[wider_mode].insn_code != CODE_FOR_nothing && size - 1 < BITS_PER_WORD && mul_cost[wider_mode] + shift_cost[mode][size-1] < max_cost) { tem = expand_binop (wider_mode, moptab, op0, op1, 0, unsignedp, OPTAB_WIDEN); if (tem) return extract_high_half (mode, tem); } /* Try widening multiplication of opposite signedness, and adjust. */ moptab = unsignedp ? smul_widen_optab : umul_widen_optab; if (moptab->handlers[wider_mode].insn_code != CODE_FOR_nothing && size - 1 < BITS_PER_WORD && (mul_widen_cost[wider_mode] + 2 * shift_cost[mode][size-1] + 4 * add_cost[mode] < max_cost)) { tem = expand_binop (wider_mode, moptab, op0, narrow_op1, NULL_RTX, ! unsignedp, OPTAB_WIDEN); if (tem != 0) { tem = extract_high_half (mode, tem); /* We used the wrong signedness. Adjust the result. */ return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1, target, unsignedp); } } return 0; } /* Emit code to multiply OP0 and CNST1, putting the high half of the result in TARGET if that is convenient, and return where the result is. If the operation can not be performed, 0 is returned. MODE is the mode of operation and result. UNSIGNEDP nonzero means unsigned multiply. MAX_COST is the total allowed cost for the expanded RTL. */ rtx expand_mult_highpart (enum machine_mode mode, rtx op0, unsigned HOST_WIDE_INT cnst1, rtx target, int unsignedp, int max_cost) { enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode); int extra_cost; bool sign_adjust = false; enum mult_variant variant; struct algorithm alg; rtx op1, tem; /* We can't support modes wider than HOST_BITS_PER_INT. */ if (GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT) abort (); op1 = gen_int_mode (cnst1, wider_mode); cnst1 &= GET_MODE_MASK (mode); /* We can't optimize modes wider than BITS_PER_WORD. ??? We might be able to perform double-word arithmetic if mode == word_mode, however all the cost calculations in synth_mult etc. assume single-word operations. */ if (GET_MODE_BITSIZE (wider_mode) > BITS_PER_WORD) return expand_mult_highpart_optab (mode, op0, op1, target, unsignedp, max_cost); extra_cost = shift_cost[mode][GET_MODE_BITSIZE (mode) - 1]; /* Check whether we try to multiply by a negative constant. */ if (!unsignedp && ((cnst1 >> (GET_MODE_BITSIZE (mode) - 1)) & 1)) { sign_adjust = true; extra_cost += add_cost[mode]; } /* See whether shift/add multiplication is cheap enough. */ if (choose_mult_variant (wider_mode, cnst1, &alg, &variant, max_cost - extra_cost)) { /* See whether the specialized multiplication optabs are cheaper than the shift/add version. */ tem = expand_mult_highpart_optab (mode, op0, op1, target, unsignedp, alg.cost + extra_cost); if (tem) return tem; tem = convert_to_mode (wider_mode, op0, unsignedp); tem = expand_mult_const (wider_mode, tem, cnst1, 0, &alg, variant); tem = extract_high_half (mode, tem); /* Adjust result for signedness. */ if (sign_adjust) tem = force_operand (gen_rtx_MINUS (mode, tem, op0), tem); return tem; } return expand_mult_highpart_optab (mode, op0, op1, target, unsignedp, max_cost); } /* Expand signed modulus of OP0 by a power of two D in mode MODE. */ static rtx expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d) { unsigned HOST_WIDE_INT mask; rtx result, temp, shift, label; int logd; logd = floor_log2 (d); result = gen_reg_rtx (mode); /* Avoid conditional branches when they're expensive. */ if (BRANCH_COST >= 2 && !optimize_size) { rtx signmask = emit_store_flag (result, LT, op0, const0_rtx, mode, 0, -1); if (signmask) { signmask = force_reg (mode, signmask); mask = ((HOST_WIDE_INT) 1 << logd) - 1; shift = GEN_INT (GET_MODE_BITSIZE (mode) - logd); /* Use the rtx_cost of a LSHIFTRT instruction to determine which instruction sequence to use. If logical right shifts are expensive the use 2 XORs, 2 SUBs and an AND, otherwise use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */ temp = gen_rtx_LSHIFTRT (mode, result, shift); if (lshr_optab->handlers[mode].insn_code == CODE_FOR_nothing || rtx_cost (temp, SET) > COSTS_N_INSNS (2)) { temp = expand_binop (mode, xor_optab, op0, signmask, NULL_RTX, 1, OPTAB_LIB_WIDEN); temp = expand_binop (mode, sub_optab, temp, signmask, NULL_RTX, 1, OPTAB_LIB_WIDEN); temp = expand_binop (mode, and_optab, temp, GEN_INT (mask), NULL_RTX, 1, OPTAB_LIB_WIDEN); temp = expand_binop (mode, xor_optab, temp, signmask, NULL_RTX, 1, OPTAB_LIB_WIDEN); temp = expand_binop (mode, sub_optab, temp, signmask, NULL_RTX, 1, OPTAB_LIB_WIDEN); } else { signmask = expand_binop (mode, lshr_optab, signmask, shift, NULL_RTX, 1, OPTAB_LIB_WIDEN); signmask = force_reg (mode, signmask); temp = expand_binop (mode, add_optab, op0, signmask, NULL_RTX, 1, OPTAB_LIB_WIDEN); temp = expand_binop (mode, and_optab, temp, GEN_INT (mask), NULL_RTX, 1, OPTAB_LIB_WIDEN); temp = expand_binop (mode, sub_optab, temp, signmask, NULL_RTX, 1, OPTAB_LIB_WIDEN); } return temp; } } /* Mask contains the mode's signbit and the significant bits of the modulus. By including the signbit in the operation, many targets can avoid an explicit compare operation in the following comparison against zero. */ mask = (HOST_WIDE_INT) -1 << (GET_MODE_BITSIZE (mode) - 1) | (((HOST_WIDE_INT) 1 << logd) - 1); temp = expand_binop (mode, and_optab, op0, GEN_INT (mask), result, 1, OPTAB_LIB_WIDEN); if (temp != result) emit_move_insn (result, temp); label = gen_label_rtx (); do_cmp_and_jump (result, const0_rtx, GE, mode, label); temp = expand_binop (mode, sub_optab, result, const1_rtx, result, 0, OPTAB_LIB_WIDEN); mask = (HOST_WIDE_INT) -1 << logd; temp = expand_binop (mode, ior_optab, temp, GEN_INT (mask), result, 1, OPTAB_LIB_WIDEN); temp = expand_binop (mode, add_optab, temp, const1_rtx, result, 0, OPTAB_LIB_WIDEN); if (temp != result) emit_move_insn (result, temp); emit_label (label); return result; } /* Emit the code to divide OP0 by OP1, putting the result in TARGET if that is convenient, and returning where the result is. You may request either the quotient or the remainder as the result; specify REM_FLAG nonzero to get the remainder. CODE is the expression code for which kind of division this is; it controls how rounding is done. MODE is the machine mode to use. UNSIGNEDP nonzero means do unsigned division. */ /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI and then correct it by or'ing in missing high bits if result of ANDI is nonzero. For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result. This could optimize to a bfexts instruction. But C doesn't use these operations, so their optimizations are left for later. */ /* ??? For modulo, we don't actually need the highpart of the first product, the low part will do nicely. And for small divisors, the second multiply can also be a low-part only multiply or even be completely left out. E.g. to calculate the remainder of a division by 3 with a 32 bit multiply, multiply with 0x55555556 and extract the upper two bits; the result is exact for inputs up to 0x1fffffff. The input range can be reduced by using cross-sum rules. For odd divisors >= 3, the following table gives right shift counts so that if a number is shifted by an integer multiple of the given amount, the remainder stays the same: 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20, 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0, 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0, 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33, 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12 Cross-sum rules for even numbers can be derived by leaving as many bits to the right alone as the divisor has zeros to the right. E.g. if x is an unsigned 32 bit number: (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28 */ #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0) rtx expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode, rtx op0, rtx op1, rtx target, int unsignedp) { enum machine_mode compute_mode; rtx tquotient; rtx quotient = 0, remainder = 0; rtx last; int size; rtx insn, set; optab optab1, optab2; int op1_is_constant, op1_is_pow2 = 0; int max_cost, extra_cost; static HOST_WIDE_INT last_div_const = 0; static HOST_WIDE_INT ext_op1; op1_is_constant = GET_CODE (op1) == CONST_INT; if (op1_is_constant) { ext_op1 = INTVAL (op1); if (unsignedp) ext_op1 &= GET_MODE_MASK (mode); op1_is_pow2 = ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1) || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1)))); } /* This is the structure of expand_divmod: First comes code to fix up the operands so we can perform the operations correctly and efficiently. Second comes a switch statement with code specific for each rounding mode. For some special operands this code emits all RTL for the desired operation, for other cases, it generates only a quotient and stores it in QUOTIENT. The case for trunc division/remainder might leave quotient = 0, to indicate that it has not done anything. Last comes code that finishes the operation. If QUOTIENT is set and REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If QUOTIENT is not set, it is computed using trunc rounding. We try to generate special code for division and remainder when OP1 is a constant. If |OP1| = 2**n we can use shifts and some other fast operations. For other values of OP1, we compute a carefully selected fixed-point approximation m = 1/OP1, and generate code that multiplies OP0 by m. In all cases but EXACT_DIV_EXPR, this multiplication requires the upper half of the product. Different strategies for generating the product are implemented in expand_mult_highpart. If what we actually want is the remainder, we generate that by another by-constant multiplication and a subtraction. */ /* We shouldn't be called with OP1 == const1_rtx, but some of the code below will malfunction if we are, so check here and handle the special case if so. */ if (op1 == const1_rtx) return rem_flag ? const0_rtx : op0; /* When dividing by -1, we could get an overflow. negv_optab can handle overflows. */ if (! unsignedp && op1 == constm1_rtx) { if (rem_flag) return const0_rtx; return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT ? negv_optab : neg_optab, op0, target, 0); } if (target /* Don't use the function value register as a target since we have to read it as well as write it, and function-inlining gets confused by this. */ && ((REG_P (target) && REG_FUNCTION_VALUE_P (target)) /* Don't clobber an operand while doing a multi-step calculation. */ || ((rem_flag || op1_is_constant) && (reg_mentioned_p (target, op0) || (MEM_P (op0) && MEM_P (target)))) || reg_mentioned_p (target, op1) || (MEM_P (op1) && MEM_P (target)))) target = 0; /* Get the mode in which to perform this computation. Normally it will be MODE, but sometimes we can't do the desired operation in MODE. If so, pick a wider mode in which we can do the operation. Convert to that mode at the start to avoid repeated conversions. First see what operations we need. These depend on the expression we are evaluating. (We assume that divxx3 insns exist under the same conditions that modxx3 insns and that these insns don't normally fail. If these assumptions are not correct, we may generate less efficient code in some cases.) Then see if we find a mode in which we can open-code that operation (either a division, modulus, or shift). Finally, check for the smallest mode for which we can do the operation with a library call. */ /* We might want to refine this now that we have division-by-constant optimization. Since expand_mult_highpart tries so many variants, it is not straightforward to generalize this. Maybe we should make an array of possible modes in init_expmed? Save this for GCC 2.7. */ optab1 = ((op1_is_pow2 && op1 != const0_rtx) ? (unsignedp ? lshr_optab : ashr_optab) : (unsignedp ? udiv_optab : sdiv_optab)); optab2 = ((op1_is_pow2 && op1 != const0_rtx) ? optab1 : (unsignedp ? udivmod_optab : sdivmod_optab)); for (compute_mode = mode; compute_mode != VOIDmode; compute_mode = GET_MODE_WIDER_MODE (compute_mode)) if (optab1->handlers[compute_mode].insn_code != CODE_FOR_nothing || optab2->handlers[compute_mode].insn_code != CODE_FOR_nothing) break; if (compute_mode == VOIDmode) for (compute_mode = mode; compute_mode != VOIDmode; compute_mode = GET_MODE_WIDER_MODE (compute_mode)) if (optab1->handlers[compute_mode].libfunc || optab2->handlers[compute_mode].libfunc) break; /* If we still couldn't find a mode, use MODE, but we'll probably abort in expand_binop. */ if (compute_mode == VOIDmode) compute_mode = mode; if (target && GET_MODE (target) == compute_mode) tquotient = target; else tquotient = gen_reg_rtx (compute_mode); size = GET_MODE_BITSIZE (compute_mode); #if 0 /* It should be possible to restrict the precision to GET_MODE_BITSIZE (mode), and thereby get better code when OP1 is a constant. Do that later. It will require going over all usages of SIZE below. */ size = GET_MODE_BITSIZE (mode); #endif /* Only deduct something for a REM if the last divide done was for a different constant. Then set the constant of the last divide. */ max_cost = div_cost[compute_mode] - (rem_flag && ! (last_div_const != 0 && op1_is_constant && INTVAL (op1) == last_div_const) ? mul_cost[compute_mode] + add_cost[compute_mode] : 0); last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0; /* Now convert to the best mode to use. */ if (compute_mode != mode) { op0 = convert_modes (compute_mode, mode, op0, unsignedp); op1 = convert_modes (compute_mode, mode, op1, unsignedp); /* convert_modes may have placed op1 into a register, so we must recompute the following. */ op1_is_constant = GET_CODE (op1) == CONST_INT; op1_is_pow2 = (op1_is_constant && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)) || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ; } /* If one of the operands is a volatile MEM, copy it into a register. */ if (MEM_P (op0) && MEM_VOLATILE_P (op0)) op0 = force_reg (compute_mode, op0); if (MEM_P (op1) && MEM_VOLATILE_P (op1)) op1 = force_reg (compute_mode, op1); /* If we need the remainder or if OP1 is constant, we need to put OP0 in a register in case it has any queued subexpressions. */ if (rem_flag || op1_is_constant) op0 = force_reg (compute_mode, op0); last = get_last_insn (); /* Promote floor rounding to trunc rounding for unsigned operations. */ if (unsignedp) { if (code == FLOOR_DIV_EXPR) code = TRUNC_DIV_EXPR; if (code == FLOOR_MOD_EXPR) code = TRUNC_MOD_EXPR; if (code == EXACT_DIV_EXPR && op1_is_pow2) code = TRUNC_DIV_EXPR; } if (op1 != const0_rtx) switch (code) { case TRUNC_MOD_EXPR: case TRUNC_DIV_EXPR: if (op1_is_constant) { if (unsignedp) { unsigned HOST_WIDE_INT mh, ml; int pre_shift, post_shift; int dummy; unsigned HOST_WIDE_INT d = (INTVAL (op1) & GET_MODE_MASK (compute_mode)); if (EXACT_POWER_OF_2_OR_ZERO_P (d)) { pre_shift = floor_log2 (d); if (rem_flag) { remainder = expand_binop (compute_mode, and_optab, op0, GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1), remainder, 1, OPTAB_LIB_WIDEN); if (remainder) return gen_lowpart (mode, remainder); } quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0, build_int_2 (pre_shift, 0), tquotient, 1); } else if (size <= HOST_BITS_PER_WIDE_INT) { if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1))) { /* Most significant bit of divisor is set; emit an scc insn. */ quotient = emit_store_flag (tquotient, GEU, op0, op1, compute_mode, 1, 1); if (quotient == 0) goto fail1; } else { /* Find a suitable multiplier and right shift count instead of multiplying with D. */ mh = choose_multiplier (d, size, size, &ml, &post_shift, &dummy); /* If the suggested multiplier is more than SIZE bits, we can do better for even divisors, using an initial right shift. */ if (mh != 0 && (d & 1) == 0) { pre_shift = floor_log2 (d & -d); mh = choose_multiplier (d >> pre_shift, size, size - pre_shift, &ml, &post_shift, &dummy); if (mh) abort (); } else pre_shift = 0; if (mh != 0) { rtx t1, t2, t3, t4; if (post_shift - 1 >= BITS_PER_WORD) goto fail1; extra_cost = (shift_cost[compute_mode][post_shift - 1] + shift_cost[compute_mode][1] + 2 * add_cost[compute_mode]); t1 = expand_mult_highpart (compute_mode, op0, ml, NULL_RTX, 1, max_cost - extra_cost); if (t1 == 0) goto fail1; t2 = force_operand (gen_rtx_MINUS (compute_mode, op0, t1), NULL_RTX); t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2, build_int_2 (1, 0), NULL_RTX,1); t4 = force_operand (gen_rtx_PLUS (compute_mode, t1, t3), NULL_RTX); quotient = expand_shift (RSHIFT_EXPR, compute_mode, t4, build_int_2 (post_shift - 1, 0), tquotient, 1); } else { rtx t1, t2; if (pre_shift >= BITS_PER_WORD || post_shift >= BITS_PER_WORD) goto fail1; t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0, build_int_2 (pre_shift, 0), NULL_RTX, 1); extra_cost = (shift_cost[compute_mode][pre_shift] + shift_cost[compute_mode][post_shift]); t2 = expand_mult_highpart (compute_mode, t1, ml, NULL_RTX, 1, max_cost - extra_cost); if (t2 == 0) goto fail1; quotient = expand_shift (RSHIFT_EXPR, compute_mode, t2, build_int_2 (post_shift, 0), tquotient, 1); } } } else /* Too wide mode to use tricky code */ break; insn = get_last_insn (); if (insn != last && (set = single_set (insn)) != 0 && SET_DEST (set) == quotient) set_unique_reg_note (insn, REG_EQUAL, gen_rtx_UDIV (compute_mode, op0, op1)); } else /* TRUNC_DIV, signed */ { unsigned HOST_WIDE_INT ml; int lgup, post_shift; HOST_WIDE_INT d = INTVAL (op1); unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d; /* n rem d = n rem -d */ if (rem_flag && d < 0) { d = abs_d; op1 = gen_int_mode (abs_d, compute_mode); } if (d == 1) quotient = op0; else if (d == -1) quotient = expand_unop (compute_mode, neg_optab, op0, tquotient, 0); else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1)) { /* This case is not handled correctly below. */ quotient = emit_store_flag (tquotient, EQ, op0, op1, compute_mode, 1, 1); if (quotient == 0) goto fail1; } else if (EXACT_POWER_OF_2_OR_ZERO_P (d) && (rem_flag ? smod_pow2_cheap[compute_mode] : sdiv_pow2_cheap[compute_mode]) /* We assume that cheap metric is true if the optab has an expander for this mode. */ && (((rem_flag ? smod_optab : sdiv_optab) ->handlers[compute_mode].insn_code != CODE_FOR_nothing) || (sdivmod_optab->handlers[compute_mode] .insn_code != CODE_FOR_nothing))) ; else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d)) { if (rem_flag) { remainder = expand_smod_pow2 (compute_mode, op0, d); if (remainder) return gen_lowpart (mode, remainder); } lgup = floor_log2 (abs_d); if (BRANCH_COST < 1 || (abs_d != 2 && BRANCH_COST < 3)) { rtx label = gen_label_rtx (); rtx t1; t1 = copy_to_mode_reg (compute_mode, op0); do_cmp_and_jump (t1, const0_rtx, GE, compute_mode, label); expand_inc (t1, gen_int_mode (abs_d - 1, compute_mode)); emit_label (label); quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1, build_int_2 (lgup, 0), tquotient, 0); } else { rtx t1, t2, t3; t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0, build_int_2 (size - 1, 0), NULL_RTX, 0); t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1, build_int_2 (size - lgup, 0), NULL_RTX, 1); t3 = force_operand (gen_rtx_PLUS (compute_mode, op0, t2), NULL_RTX); quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3, build_int_2 (lgup, 0), tquotient, 0); } /* We have computed OP0 / abs(OP1). If OP1 is negative, negate the quotient. */ if (d < 0) { insn = get_last_insn (); if (insn != last && (set = single_set (insn)) != 0 && SET_DEST (set) == quotient && abs_d < ((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1))) set_unique_reg_note (insn, REG_EQUAL, gen_rtx_DIV (compute_mode, op0, GEN_INT (trunc_int_for_mode (abs_d, compute_mode)))); quotient = expand_unop (compute_mode, neg_optab, quotient, quotient, 0); } } else if (size <= HOST_BITS_PER_WIDE_INT) { choose_multiplier (abs_d, size, size - 1, &ml, &post_shift, &lgup); if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1)) { rtx t1, t2, t3; if (post_shift >= BITS_PER_WORD || size - 1 >= BITS_PER_WORD) goto fail1; extra_cost = (shift_cost[compute_mode][post_shift] + shift_cost[compute_mode][size - 1] + add_cost[compute_mode]); t1 = expand_mult_highpart (compute_mode, op0, ml, NULL_RTX, 0, max_cost - extra_cost); if (t1 == 0) goto fail1; t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1, build_int_2 (post_shift, 0), NULL_RTX, 0); t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0, build_int_2 (size - 1, 0), NULL_RTX, 0); if (d < 0) quotient = force_operand (gen_rtx_MINUS (compute_mode, t3, t2), tquotient); else quotient = force_operand (gen_rtx_MINUS (compute_mode, t2, t3), tquotient); } else { rtx t1, t2, t3, t4; if (post_shift >= BITS_PER_WORD || size - 1 >= BITS_PER_WORD) goto fail1; ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1); extra_cost = (shift_cost[compute_mode][post_shift] + shift_cost[compute_mode][size - 1] + 2 * add_cost[compute_mode]); t1 = expand_mult_highpart (compute_mode, op0, ml, NULL_RTX, 0, max_cost - extra_cost); if (t1 == 0) goto fail1; t2 = force_operand (gen_rtx_PLUS (compute_mode, t1, op0), NULL_RTX); t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2, build_int_2 (post_shift, 0), NULL_RTX, 0); t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0, build_int_2 (size - 1, 0), NULL_RTX, 0); if (d < 0) quotient = force_operand (gen_rtx_MINUS (compute_mode, t4, t3), tquotient); else quotient = force_operand (gen_rtx_MINUS (compute_mode, t3, t4), tquotient); } } else /* Too wide mode to use tricky code */ break; insn = get_last_insn (); if (insn != last && (set = single_set (insn)) != 0 && SET_DEST (set) == quotient) set_unique_reg_note (insn, REG_EQUAL, gen_rtx_DIV (compute_mode, op0, op1)); } break; } fail1: delete_insns_since (last); break; case FLOOR_DIV_EXPR: case FLOOR_MOD_EXPR: /* We will come here only for signed operations. */ if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size) { unsigned HOST_WIDE_INT mh, ml; int pre_shift, lgup, post_shift; HOST_WIDE_INT d = INTVAL (op1); if (d > 0) { /* We could just as easily deal with negative constants here, but it does not seem worth the trouble for GCC 2.6. */ if (EXACT_POWER_OF_2_OR_ZERO_P (d)) { pre_shift = floor_log2 (d); if (rem_flag) { remainder = expand_binop (compute_mode, and_optab, op0, GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1), remainder, 0, OPTAB_LIB_WIDEN); if (remainder) return gen_lowpart (mode, remainder); } quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0, build_int_2 (pre_shift, 0), tquotient, 0); } else { rtx t1, t2, t3, t4; mh = choose_multiplier (d, size, size - 1, &ml, &post_shift, &lgup); if (mh) abort (); if (post_shift < BITS_PER_WORD && size - 1 < BITS_PER_WORD) { t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0, build_int_2 (size - 1, 0), NULL_RTX, 0); t2 = expand_binop (compute_mode, xor_optab, op0, t1, NULL_RTX, 0, OPTAB_WIDEN); extra_cost = (shift_cost[compute_mode][post_shift] + shift_cost[compute_mode][size - 1] + 2 * add_cost[compute_mode]); t3 = expand_mult_highpart (compute_mode, t2, ml, NULL_RTX, 1, max_cost - extra_cost); if (t3 != 0) { t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3, build_int_2 (post_shift, 0), NULL_RTX, 1); quotient = expand_binop (compute_mode, xor_optab, t4, t1, tquotient, 0, OPTAB_WIDEN); } } } } else { rtx nsign, t1, t2, t3, t4; t1 = force_operand (gen_rtx_PLUS (compute_mode, op0, constm1_rtx), NULL_RTX); t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX, 0, OPTAB_WIDEN); nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2, build_int_2 (size - 1, 0), NULL_RTX, 0); t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign), NULL_RTX); t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1, NULL_RTX, 0); if (t4) { rtx t5; t5 = expand_unop (compute_mode, one_cmpl_optab, nsign, NULL_RTX, 0); quotient = force_operand (gen_rtx_PLUS (compute_mode, t4, t5), tquotient); } } } if (quotient != 0) break; delete_insns_since (last); /* Try using an instruction that produces both the quotient and remainder, using truncation. We can easily compensate the quotient or remainder to get floor rounding, once we have the remainder. Notice that we compute also the final remainder value here, and return the result right away. */ if (target == 0 || GET_MODE (target) != compute_mode) target = gen_reg_rtx (compute_mode); if (rem_flag) { remainder = REG_P (target) ? target : gen_reg_rtx (compute_mode); quotient = gen_reg_rtx (compute_mode); } else { quotient = REG_P (target) ? target : gen_reg_rtx (compute_mode); remainder = gen_reg_rtx (compute_mode); } if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0)) { /* This could be computed with a branch-less sequence. Save that for later. */ rtx tem; rtx label = gen_label_rtx (); do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label); tem = expand_binop (compute_mode, xor_optab, op0, op1, NULL_RTX, 0, OPTAB_WIDEN); do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label); expand_dec (quotient, const1_rtx); expand_inc (remainder, op1); emit_label (label); return gen_lowpart (mode, rem_flag ? remainder : quotient); } /* No luck with division elimination or divmod. Have to do it by conditionally adjusting op0 *and* the result. */ { rtx label1, label2, label3, label4, label5; rtx adjusted_op0; rtx tem; quotient = gen_reg_rtx (compute_mode); adjusted_op0 = copy_to_mode_reg (compute_mode, op0); label1 = gen_label_rtx (); label2 = gen_label_rtx (); label3 = gen_label_rtx (); label4 = gen_label_rtx (); label5 = gen_label_rtx (); do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2); do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1); tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1, quotient, 0, OPTAB_LIB_WIDEN); if (tem != quotient) emit_move_insn (quotient, tem); emit_jump_insn (gen_jump (label5)); emit_barrier (); emit_label (label1); expand_inc (adjusted_op0, const1_rtx); emit_jump_insn (gen_jump (label4)); emit_barrier (); emit_label (label2); do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3); tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1, quotient, 0, OPTAB_LIB_WIDEN); if (tem != quotient) emit_move_insn (quotient, tem); emit_jump_insn (gen_jump (label5)); emit_barrier (); emit_label (label3); expand_dec (adjusted_op0, const1_rtx); emit_label (label4); tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1, quotient, 0, OPTAB_LIB_WIDEN); if (tem != quotient) emit_move_insn (quotient, tem); expand_dec (quotient, const1_rtx); emit_label (label5); } break; case CEIL_DIV_EXPR: case CEIL_MOD_EXPR: if (unsignedp) { if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))) { rtx t1, t2, t3; unsigned HOST_WIDE_INT d = INTVAL (op1); t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0, build_int_2 (floor_log2 (d), 0), tquotient, 1); t2 = expand_binop (compute_mode, and_optab, op0, GEN_INT (d - 1), NULL_RTX, 1, OPTAB_LIB_WIDEN); t3 = gen_reg_rtx (compute_mode); t3 = emit_store_flag (t3, NE, t2, const0_rtx, compute_mode, 1, 1); if (t3 == 0) { rtx lab; lab = gen_label_rtx (); do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab); expand_inc (t1, const1_rtx); emit_label (lab); quotient = t1; } else quotient = force_operand (gen_rtx_PLUS (compute_mode, t1, t3), tquotient); break; } /* Try using an instruction that produces both the quotient and remainder, using truncation. We can easily compensate the quotient or remainder to get ceiling rounding, once we have the remainder. Notice that we compute also the final remainder value here, and return the result right away. */ if (target == 0 || GET_MODE (target) != compute_mode) target = gen_reg_rtx (compute_mode); if (rem_flag) { remainder = (REG_P (target) ? target : gen_reg_rtx (compute_mode)); quotient = gen_reg_rtx (compute_mode); } else { quotient = (REG_P (target) ? target : gen_reg_rtx (compute_mode)); remainder = gen_reg_rtx (compute_mode); } if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1)) { /* This could be computed with a branch-less sequence. Save that for later. */ rtx label = gen_label_rtx (); do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label); expand_inc (quotient, const1_rtx); expand_dec (remainder, op1); emit_label (label); return gen_lowpart (mode, rem_flag ? remainder : quotient); } /* No luck with division elimination or divmod. Have to do it by conditionally adjusting op0 *and* the result. */ { rtx label1, label2; rtx adjusted_op0, tem; quotient = gen_reg_rtx (compute_mode); adjusted_op0 = copy_to_mode_reg (compute_mode, op0); label1 = gen_label_rtx (); label2 = gen_label_rtx (); do_cmp_and_jump (adjusted_op0, const0_rtx, NE, compute_mode, label1); emit_move_insn (quotient, const0_rtx); emit_jump_insn (gen_jump (label2)); emit_barrier (); emit_label (label1); expand_dec (adjusted_op0, const1_rtx); tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1, quotient, 1, OPTAB_LIB_WIDEN); if (tem != quotient) emit_move_insn (quotient, tem); expand_inc (quotient, const1_rtx); emit_label (label2); } } else /* signed */ { if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)) && INTVAL (op1) >= 0) { /* This is extremely similar to the code for the unsigned case above. For 2.7 we should merge these variants, but for 2.6.1 I don't want to touch the code for unsigned since that get used in C. The signed case will only be used by other languages (Ada). */ rtx t1, t2, t3; unsigned HOST_WIDE_INT d = INTVAL (op1); t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0, build_int_2 (floor_log2 (d), 0), tquotient, 0); t2 = expand_binop (compute_mode, and_optab, op0, GEN_INT (d - 1), NULL_RTX, 1, OPTAB_LIB_WIDEN); t3 = gen_reg_rtx (compute_mode); t3 = emit_store_flag (t3, NE, t2, const0_rtx, compute_mode, 1, 1); if (t3 == 0) { rtx lab; lab = gen_label_rtx (); do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab); expand_inc (t1, const1_rtx); emit_label (lab); quotient = t1; } else quotient = force_operand (gen_rtx_PLUS (compute_mode, t1, t3), tquotient); break; } /* Try using an instruction that produces both the quotient and remainder, using truncation. We can easily compensate the quotient or remainder to get ceiling rounding, once we have the remainder. Notice that we compute also the final remainder value here, and return the result right away. */ if (target == 0 || GET_MODE (target) != compute_mode) target = gen_reg_rtx (compute_mode); if (rem_flag) { remainder= (REG_P (target) ? target : gen_reg_rtx (compute_mode)); quotient = gen_reg_rtx (compute_mode); } else { quotient = (REG_P (target) ? target : gen_reg_rtx (compute_mode)); remainder = gen_reg_rtx (compute_mode); } if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0)) { /* This could be computed with a branch-less sequence. Save that for later. */ rtx tem; rtx label = gen_label_rtx (); do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label); tem = expand_binop (compute_mode, xor_optab, op0, op1, NULL_RTX, 0, OPTAB_WIDEN); do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label); expand_inc (quotient, const1_rtx); expand_dec (remainder, op1); emit_label (label); return gen_lowpart (mode, rem_flag ? remainder : quotient); } /* No luck with division elimination or divmod. Have to do it by conditionally adjusting op0 *and* the result. */ { rtx label1, label2, label3, label4, label5; rtx adjusted_op0; rtx tem; quotient = gen_reg_rtx (compute_mode); adjusted_op0 = copy_to_mode_reg (compute_mode, op0); label1 = gen_label_rtx (); label2 = gen_label_rtx (); label3 = gen_label_rtx (); label4 = gen_label_rtx (); label5 = gen_label_rtx (); do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2); do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label1); tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1, quotient, 0, OPTAB_LIB_WIDEN); if (tem != quotient) emit_move_insn (quotient, tem); emit_jump_insn (gen_jump (label5)); emit_barrier (); emit_label (label1); expand_dec (adjusted_op0, const1_rtx); emit_jump_insn (gen_jump (label4)); emit_barrier (); emit_label (label2); do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label3); tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1, quotient, 0, OPTAB_LIB_WIDEN); if (tem != quotient) emit_move_insn (quotient, tem); emit_jump_insn (gen_jump (label5)); emit_barrier (); emit_label (label3); expand_inc (adjusted_op0, const1_rtx); emit_label (label4); tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1, quotient, 0, OPTAB_LIB_WIDEN); if (tem != quotient) emit_move_insn (quotient, tem); expand_inc (quotient, const1_rtx); emit_label (label5); } } break; case EXACT_DIV_EXPR: if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size) { HOST_WIDE_INT d = INTVAL (op1); unsigned HOST_WIDE_INT ml; int pre_shift; rtx t1; pre_shift = floor_log2 (d & -d); ml = invert_mod2n (d >> pre_shift, size); t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0, build_int_2 (pre_shift, 0), NULL_RTX, unsignedp); quotient = expand_mult (compute_mode, t1, gen_int_mode (ml, compute_mode), NULL_RTX, 1); insn = get_last_insn (); set_unique_reg_note (insn, REG_EQUAL, gen_rtx_fmt_ee (unsignedp ? UDIV : DIV, compute_mode, op0, op1)); } break; case ROUND_DIV_EXPR: case ROUND_MOD_EXPR: if (unsignedp) { rtx tem; rtx label; label = gen_label_rtx (); quotient = gen_reg_rtx (compute_mode); remainder = gen_reg_rtx (compute_mode); if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0) { rtx tem; quotient = expand_binop (compute_mode, udiv_optab, op0, op1, quotient, 1, OPTAB_LIB_WIDEN); tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1); remainder = expand_binop (compute_mode, sub_optab, op0, tem, remainder, 1, OPTAB_LIB_WIDEN); } tem = plus_constant (op1, -1); tem = expand_shift (RSHIFT_EXPR, compute_mode, tem, build_int_2 (1, 0), NULL_RTX, 1); do_cmp_and_jump (remainder, tem, LEU, compute_mode, label); expand_inc (quotient, const1_rtx); expand_dec (remainder, op1); emit_label (label); } else { rtx abs_rem, abs_op1, tem, mask; rtx label; label = gen_label_rtx (); quotient = gen_reg_rtx (compute_mode); remainder = gen_reg_rtx (compute_mode); if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0) { rtx tem; quotient = expand_binop (compute_mode, sdiv_optab, op0, op1, quotient, 0, OPTAB_LIB_WIDEN); tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0); remainder = expand_binop (compute_mode, sub_optab, op0, tem, remainder, 0, OPTAB_LIB_WIDEN); } abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0); abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0); tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem, build_int_2 (1, 0), NULL_RTX, 1); do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label); tem = expand_binop (compute_mode, xor_optab, op0, op1, NULL_RTX, 0, OPTAB_WIDEN); mask = expand_shift (RSHIFT_EXPR, compute_mode, tem, build_int_2 (size - 1, 0), NULL_RTX, 0); tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx, NULL_RTX, 0, OPTAB_WIDEN); tem = expand_binop (compute_mode, sub_optab, tem, mask, NULL_RTX, 0, OPTAB_WIDEN); expand_inc (quotient, tem); tem = expand_binop (compute_mode, xor_optab, mask, op1, NULL_RTX, 0, OPTAB_WIDEN); tem = expand_binop (compute_mode, sub_optab, tem, mask, NULL_RTX, 0, OPTAB_WIDEN); expand_dec (remainder, tem); emit_label (label); } return gen_lowpart (mode, rem_flag ? remainder : quotient); default: abort (); } if (quotient == 0) { if (target && GET_MODE (target) != compute_mode) target = 0; if (rem_flag) { /* Try to produce the remainder without producing the quotient. If we seem to have a divmod pattern that does not require widening, don't try widening here. We should really have a WIDEN argument to expand_twoval_binop, since what we'd really like to do here is 1) try a mod insn in compute_mode 2) try a divmod insn in compute_mode 3) try a div insn in compute_mode and multiply-subtract to get remainder 4) try the same things with widening allowed. */ remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab, op0, op1, target, unsignedp, ((optab2->handlers[compute_mode].insn_code != CODE_FOR_nothing) ? OPTAB_DIRECT : OPTAB_WIDEN)); if (remainder == 0) { /* No luck there. Can we do remainder and divide at once without a library call? */ remainder = gen_reg_rtx (compute_mode); if (! expand_twoval_binop ((unsignedp ? udivmod_optab : sdivmod_optab), op0, op1, NULL_RTX, remainder, unsignedp)) remainder = 0; } if (remainder) return gen_lowpart (mode, remainder); } /* Produce the quotient. Try a quotient insn, but not a library call. If we have a divmod in this mode, use it in preference to widening the div (for this test we assume it will not fail). Note that optab2 is set to the one of the two optabs that the call below will use. */ quotient = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab, op0, op1, rem_flag ? NULL_RTX : target, unsignedp, ((optab2->handlers[compute_mode].insn_code != CODE_FOR_nothing) ? OPTAB_DIRECT : OPTAB_WIDEN)); if (quotient == 0) { /* No luck there. Try a quotient-and-remainder insn, keeping the quotient alone. */ quotient = gen_reg_rtx (compute_mode); if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab, op0, op1, quotient, NULL_RTX, unsignedp)) { quotient = 0; if (! rem_flag) /* Still no luck. If we are not computing the remainder, use a library call for the quotient. */ quotient = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab, op0, op1, target, unsignedp, OPTAB_LIB_WIDEN); } } } if (rem_flag) { if (target && GET_MODE (target) != compute_mode) target = 0; if (quotient == 0) /* No divide instruction either. Use library for remainder. */ remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab, op0, op1, target, unsignedp, OPTAB_LIB_WIDEN); else { /* We divided. Now finish doing X - Y * (X / Y). */ remainder = expand_mult (compute_mode, quotient, op1, NULL_RTX, unsignedp); remainder = expand_binop (compute_mode, sub_optab, op0, remainder, target, unsignedp, OPTAB_LIB_WIDEN); } } return gen_lowpart (mode, rem_flag ? remainder : quotient); } /* Return a tree node with data type TYPE, describing the value of X. Usually this is an VAR_DECL, if there is no obvious better choice. X may be an expression, however we only support those expressions generated by loop.c. */ tree make_tree (tree type, rtx x) { tree t; switch (GET_CODE (x)) { case CONST_INT: t = build_int_2 (INTVAL (x), (TYPE_UNSIGNED (type) && (GET_MODE_BITSIZE (TYPE_MODE (type)) < HOST_BITS_PER_WIDE_INT)) || INTVAL (x) >= 0 ? 0 : -1); TREE_TYPE (t) = type; return t; case CONST_DOUBLE: if (GET_MODE (x) == VOIDmode) { t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x)); TREE_TYPE (t) = type; } else { REAL_VALUE_TYPE d; REAL_VALUE_FROM_CONST_DOUBLE (d, x); t = build_real (type, d); } return t; case CONST_VECTOR: { int i, units; rtx elt; tree t = NULL_TREE; units = CONST_VECTOR_NUNITS (x); /* Build a tree with vector elements. */ for (i = units - 1; i >= 0; --i) { elt = CONST_VECTOR_ELT (x, i); t = tree_cons (NULL_TREE, make_tree (type, elt), t); } return build_vector (type, t); } case PLUS: return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)), make_tree (type, XEXP (x, 1)))); case MINUS: return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)), make_tree (type, XEXP (x, 1)))); case NEG: return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0)))); case MULT: return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)), make_tree (type, XEXP (x, 1)))); case ASHIFT: return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)), make_tree (type, XEXP (x, 1)))); case LSHIFTRT: t = lang_hooks.types.unsigned_type (type); return fold (convert (type, build (RSHIFT_EXPR, t, make_tree (t, XEXP (x, 0)), make_tree (type, XEXP (x, 1))))); case ASHIFTRT: t = lang_hooks.types.signed_type (type); return fold (convert (type, build (RSHIFT_EXPR, t, make_tree (t, XEXP (x, 0)), make_tree (type, XEXP (x, 1))))); case DIV: if (TREE_CODE (type) != REAL_TYPE) t = lang_hooks.types.signed_type (type); else t = type; return fold (convert (type, build (TRUNC_DIV_EXPR, t, make_tree (t, XEXP (x, 0)), make_tree (t, XEXP (x, 1))))); case UDIV: t = lang_hooks.types.unsigned_type (type); return fold (convert (type, build (TRUNC_DIV_EXPR, t, make_tree (t, XEXP (x, 0)), make_tree (t, XEXP (x, 1))))); case SIGN_EXTEND: case ZERO_EXTEND: t = lang_hooks.types.type_for_mode (GET_MODE (XEXP (x, 0)), GET_CODE (x) == ZERO_EXTEND); return fold (convert (type, make_tree (t, XEXP (x, 0)))); default: t = build_decl (VAR_DECL, NULL_TREE, type); /* If TYPE is a POINTER_TYPE, X might be Pmode with TYPE_MODE being ptr_mode. So convert. */ if (POINTER_TYPE_P (type)) x = convert_memory_address (TYPE_MODE (type), x); /* Note that we do *not* use SET_DECL_RTL here, because we do not want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */ t->decl.rtl = x; return t; } } /* Check whether the multiplication X * MULT + ADD overflows. X, MULT and ADD must be CONST_*. MODE is the machine mode for the computation. X and MULT must have mode MODE. ADD may have a different mode. So can X (defaults to same as MODE). UNSIGNEDP is nonzero to do unsigned multiplication. */ bool const_mult_add_overflow_p (rtx x, rtx mult, rtx add, enum machine_mode mode, int unsignedp) { tree type, mult_type, add_type, result; type = lang_hooks.types.type_for_mode (mode, unsignedp); /* In order to get a proper overflow indication from an unsigned type, we have to pretend that it's a sizetype. */ mult_type = type; if (unsignedp) { mult_type = copy_node (type); TYPE_IS_SIZETYPE (mult_type) = 1; } add_type = (GET_MODE (add) == VOIDmode ? mult_type : lang_hooks.types.type_for_mode (GET_MODE (add), unsignedp)); result = fold (build (PLUS_EXPR, mult_type, fold (build (MULT_EXPR, mult_type, make_tree (mult_type, x), make_tree (mult_type, mult))), make_tree (add_type, add))); return TREE_CONSTANT_OVERFLOW (result); } /* Return an rtx representing the value of X * MULT + ADD. TARGET is a suggestion for where to store the result (an rtx). MODE is the machine mode for the computation. X and MULT must have mode MODE. ADD may have a different mode. So can X (defaults to same as MODE). UNSIGNEDP is nonzero to do unsigned multiplication. This may emit insns. */ rtx expand_mult_add (rtx x, rtx target, rtx mult, rtx add, enum machine_mode mode, int unsignedp) { tree type = lang_hooks.types.type_for_mode (mode, unsignedp); tree add_type = (GET_MODE (add) == VOIDmode ? type: lang_hooks.types.type_for_mode (GET_MODE (add), unsignedp)); tree result = fold (build (PLUS_EXPR, type, fold (build (MULT_EXPR, type, make_tree (type, x), make_tree (type, mult))), make_tree (add_type, add))); return expand_expr (result, target, VOIDmode, 0); } /* Compute the logical-and of OP0 and OP1, storing it in TARGET and returning TARGET. If TARGET is 0, a pseudo-register or constant is returned. */ rtx expand_and (enum machine_mode mode, rtx op0, rtx op1, rtx target) { rtx tem = 0; if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode) tem = simplify_binary_operation (AND, mode, op0, op1); if (tem == 0) tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN); if (target == 0) target = tem; else if (tem != target) emit_move_insn (target, tem); return target; } /* Emit a store-flags instruction for comparison CODE on OP0 and OP1 and storing in TARGET. Normally return TARGET. Return 0 if that cannot be done. MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If it is VOIDmode, they cannot both be CONST_INT. UNSIGNEDP is for the case where we have to widen the operands to perform the operation. It says to use zero-extension. NORMALIZEP is 1 if we should convert the result to be either zero or one. Normalize is -1 if we should convert the result to be either zero or -1. If NORMALIZEP is zero, the result will be left "raw" out of the scc insn. */ rtx emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1, enum machine_mode mode, int unsignedp, int normalizep) { rtx subtarget; enum insn_code icode; enum machine_mode compare_mode; enum machine_mode target_mode = GET_MODE (target); rtx tem; rtx last = get_last_insn (); rtx pattern, comparison; /* ??? Ok to do this and then fail? */ op0 = protect_from_queue (op0, 0); op1 = protect_from_queue (op1, 0); if (unsignedp) code = unsigned_condition (code); /* If one operand is constant, make it the second one. Only do this if the other operand is not constant as well. */ if (swap_commutative_operands_p (op0, op1)) { tem = op0; op0 = op1; op1 = tem; code = swap_condition (code); } if (mode == VOIDmode) mode = GET_MODE (op0); /* For some comparisons with 1 and -1, we can convert this to comparisons with zero. This will often produce more opportunities for store-flag insns. */ switch (code) { case LT: if (op1 == const1_rtx) op1 = const0_rtx, code = LE; break; case LE: if (op1 == constm1_rtx) op1 = const0_rtx, code = LT; break; case GE: if (op1 == const1_rtx) op1 = const0_rtx, code = GT; break; case GT: if (op1 == constm1_rtx) op1 = const0_rtx, code = GE; break; case GEU: if (op1 == const1_rtx) op1 = const0_rtx, code = NE; break; case LTU: if (op1 == const1_rtx) op1 = const0_rtx, code = EQ; break; default: break; } /* If we are comparing a double-word integer with zero, we can convert the comparison into one involving a single word. */ if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2 && GET_MODE_CLASS (mode) == MODE_INT && op1 == const0_rtx && (!MEM_P (op0) || ! MEM_VOLATILE_P (op0))) { if (code == EQ || code == NE) { rtx op00, op01, op0both; /* Do a logical OR of the two words and compare the result. */ op00 = simplify_gen_subreg (word_mode, op0, mode, 0); op01 = simplify_gen_subreg (word_mode, op0, mode, UNITS_PER_WORD); op0both = expand_binop (word_mode, ior_optab, op00, op01, NULL_RTX, unsignedp, OPTAB_DIRECT); if (op0both != 0) return emit_store_flag (target, code, op0both, op1, word_mode, unsignedp, normalizep); } else if (code == LT || code == GE) { rtx op0h; /* If testing the sign bit, can just test on high word. */ op0h = simplify_gen_subreg (word_mode, op0, mode, subreg_highpart_offset (word_mode, mode)); return emit_store_flag (target, code, op0h, op1, word_mode, unsignedp, normalizep); } } /* From now on, we won't change CODE, so set ICODE now. */ icode = setcc_gen_code[(int) code]; /* If this is A < 0 or A >= 0, we can do this by taking the ones complement of A (for GE) and shifting the sign bit to the low bit. */ if (op1 == const0_rtx && (code == LT || code == GE) && GET_MODE_CLASS (mode) == MODE_INT && (normalizep || STORE_FLAG_VALUE == 1 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode)) == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))))) { subtarget = target; /* If the result is to be wider than OP0, it is best to convert it first. If it is to be narrower, it is *incorrect* to convert it first. */ if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode)) { op0 = protect_from_queue (op0, 0); op0 = convert_modes (target_mode, mode, op0, 0); mode = target_mode; } if (target_mode != mode) subtarget = 0; if (code == GE) op0 = expand_unop (mode, one_cmpl_optab, op0, ((STORE_FLAG_VALUE == 1 || normalizep) ? 0 : subtarget), 0); if (STORE_FLAG_VALUE == 1 || normalizep) /* If we are supposed to produce a 0/1 value, we want to do a logical shift from the sign bit to the low-order bit; for a -1/0 value, we do an arithmetic shift. */ op0 = expand_shift (RSHIFT_EXPR, mode, op0, size_int (GET_MODE_BITSIZE (mode) - 1), subtarget, normalizep != -1); if (mode != target_mode) op0 = convert_modes (target_mode, mode, op0, 0); return op0; } if (icode != CODE_FOR_nothing) { insn_operand_predicate_fn pred; /* We think we may be able to do this with a scc insn. Emit the comparison and then the scc insn. compare_from_rtx may call emit_queue, which would be deleted below if the scc insn fails. So call it ourselves before setting LAST. Likewise for do_pending_stack_adjust. */ emit_queue (); do_pending_stack_adjust (); last = get_last_insn (); comparison = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX); if (CONSTANT_P (comparison)) { if (GET_CODE (comparison) == CONST_INT) { if (comparison == const0_rtx) return const0_rtx; } #ifdef FLOAT_STORE_FLAG_VALUE else if (GET_CODE (comparison) == CONST_DOUBLE) { if (comparison == CONST0_RTX (GET_MODE (comparison))) return const0_rtx; } #endif else abort (); if (normalizep == 1) return const1_rtx; if (normalizep == -1) return constm1_rtx; return const_true_rtx; } /* The code of COMPARISON may not match CODE if compare_from_rtx decided to swap its operands and reverse the original code. We know that compare_from_rtx returns either a CONST_INT or a new comparison code, so it is safe to just extract the code from COMPARISON. */ code = GET_CODE (comparison); /* Get a reference to the target in the proper mode for this insn. */ compare_mode = insn_data[(int) icode].operand[0].mode; subtarget = target; pred = insn_data[(int) icode].operand[0].predicate; if (preserve_subexpressions_p () || ! (*pred) (subtarget, compare_mode)) subtarget = gen_reg_rtx (compare_mode); pattern = GEN_FCN (icode) (subtarget); if (pattern) { emit_insn (pattern); /* If we are converting to a wider mode, first convert to TARGET_MODE, then normalize. This produces better combining opportunities on machines that have a SIGN_EXTRACT when we are testing a single bit. This mostly benefits the 68k. If STORE_FLAG_VALUE does not have the sign bit set when interpreted in COMPARE_MODE, we can do this conversion as unsigned, which is usually more efficient. */ if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode)) { convert_move (target, subtarget, (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT) && 0 == (STORE_FLAG_VALUE & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (compare_mode) -1)))); op0 = target; compare_mode = target_mode; } else op0 = subtarget; /* If we want to keep subexpressions around, don't reuse our last target. */ if (preserve_subexpressions_p ()) subtarget = 0; /* Now normalize to the proper value in COMPARE_MODE. Sometimes we don't have to do anything. */ if (normalizep == 0 || normalizep == STORE_FLAG_VALUE) ; /* STORE_FLAG_VALUE might be the most negative number, so write the comparison this way to avoid a compiler-time warning. */ else if (- normalizep == STORE_FLAG_VALUE) op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0); /* We don't want to use STORE_FLAG_VALUE < 0 below since this makes it hard to use a value of just the sign bit due to ANSI integer constant typing rules. */ else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT && (STORE_FLAG_VALUE & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (compare_mode) - 1)))) op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0, size_int (GET_MODE_BITSIZE (compare_mode) - 1), subtarget, normalizep == 1); else if (STORE_FLAG_VALUE & 1) { op0 = expand_and (compare_mode, op0, const1_rtx, subtarget); if (normalizep == -1) op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0); } else abort (); /* If we were converting to a smaller mode, do the conversion now. */ if (target_mode != compare_mode) { convert_move (target, op0, 0); return target; } else return op0; } } delete_insns_since (last); /* If expensive optimizations, use different pseudo registers for each insn, instead of reusing the same pseudo. This leads to better CSE, but slows down the compiler, since there are more pseudos */ subtarget = (!flag_expensive_optimizations && (target_mode == mode)) ? target : NULL_RTX; /* If we reached here, we can't do this with a scc insn. However, there are some comparisons that can be done directly. For example, if this is an equality comparison of integers, we can try to exclusive-or (or subtract) the two operands and use a recursive call to try the comparison with zero. Don't do any of these cases if branches are very cheap. */ if (BRANCH_COST > 0 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE) && op1 != const0_rtx) { tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1, OPTAB_WIDEN); if (tem == 0) tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1, OPTAB_WIDEN); if (tem != 0) tem = emit_store_flag (target, code, tem, const0_rtx, mode, unsignedp, normalizep); if (tem == 0) delete_insns_since (last); return tem; } /* Some other cases we can do are EQ, NE, LE, and GT comparisons with the constant zero. Reject all other comparisons at this point. Only do LE and GT if branches are expensive since they are expensive on 2-operand machines. */ if (BRANCH_COST == 0 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx || (code != EQ && code != NE && (BRANCH_COST <= 1 || (code != LE && code != GT)))) return 0; /* See what we need to return. We can only return a 1, -1, or the sign bit. */ if (normalizep == 0) { if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1) normalizep = STORE_FLAG_VALUE; else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode)) == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))) ; else return 0; } /* Try to put the result of the comparison in the sign bit. Assume we can't do the necessary operation below. */ tem = 0; /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has the sign bit set. */ if (code == LE) { /* This is destructive, so SUBTARGET can't be OP0. */ if (rtx_equal_p (subtarget, op0)) subtarget = 0; tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0, OPTAB_WIDEN); if (tem) tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0, OPTAB_WIDEN); } /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the number of bits in the mode of OP0, minus one. */ if (code == GT) { if (rtx_equal_p (subtarget, op0)) subtarget = 0; tem = expand_shift (RSHIFT_EXPR, mode, op0, size_int (GET_MODE_BITSIZE (mode) - 1), subtarget, 0); tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0, OPTAB_WIDEN); } if (code == EQ || code == NE) { /* For EQ or NE, one way to do the comparison is to apply an operation that converts the operand into a positive number if it is nonzero or zero if it was originally zero. Then, for EQ, we subtract 1 and for NE we negate. This puts the result in the sign bit. Then we normalize with a shift, if needed. Two operations that can do the above actions are ABS and FFS, so try them. If that doesn't work, and MODE is smaller than a full word, we can use zero-extension to the wider mode (an unsigned conversion) as the operation. */ /* Note that ABS doesn't yield a positive number for INT_MIN, but that is compensated by the subsequent overflow when subtracting one / negating. */ if (abs_optab->handlers[mode].insn_code != CODE_FOR_nothing) tem = expand_unop (mode, abs_optab, op0, subtarget, 1); else if (ffs_optab->handlers[mode].insn_code != CODE_FOR_nothing) tem = expand_unop (mode, ffs_optab, op0, subtarget, 1); else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD) { op0 = protect_from_queue (op0, 0); tem = convert_modes (word_mode, mode, op0, 1); mode = word_mode; } if (tem != 0) { if (code == EQ) tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget, 0, OPTAB_WIDEN); else tem = expand_unop (mode, neg_optab, tem, subtarget, 0); } /* If we couldn't do it that way, for NE we can "or" the two's complement of the value with itself. For EQ, we take the one's complement of that "or", which is an extra insn, so we only handle EQ if branches are expensive. */ if (tem == 0 && (code == NE || BRANCH_COST > 1)) { if (rtx_equal_p (subtarget, op0)) subtarget = 0; tem = expand_unop (mode, neg_optab, op0, subtarget, 0); tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0, OPTAB_WIDEN); if (tem && code == EQ) tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0); } } if (tem && normalizep) tem = expand_shift (RSHIFT_EXPR, mode, tem, size_int (GET_MODE_BITSIZE (mode) - 1), subtarget, normalizep == 1); if (tem) { if (GET_MODE (tem) != target_mode) { convert_move (target, tem, 0); tem = target; } else if (!subtarget) { emit_move_insn (target, tem); tem = target; } } else delete_insns_since (last); return tem; } /* Like emit_store_flag, but always succeeds. */ rtx emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1, enum machine_mode mode, int unsignedp, int normalizep) { rtx tem, label; /* First see if emit_store_flag can do the job. */ tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep); if (tem != 0) return tem; if (normalizep == 0) normalizep = 1; /* If this failed, we have to do this with set/compare/jump/set code. */ if (!REG_P (target) || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1)) target = gen_reg_rtx (GET_MODE (target)); emit_move_insn (target, const1_rtx); label = gen_label_rtx (); do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX, NULL_RTX, label); emit_move_insn (target, const0_rtx); emit_label (label); return target; } /* Perform possibly multi-word comparison and conditional jump to LABEL if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE The algorithm is based on the code in expr.c:do_jump. Note that this does not perform a general comparison. Only variants generated within expmed.c are correctly handled, others abort (but could be handled if needed). */ static void do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, enum machine_mode mode, rtx label) { /* If this mode is an integer too wide to compare properly, compare word by word. Rely on cse to optimize constant cases. */ if (GET_MODE_CLASS (mode) == MODE_INT && ! can_compare_p (op, mode, ccp_jump)) { rtx label2 = gen_label_rtx (); switch (op) { case LTU: do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label); break; case LEU: do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2); break; case LT: do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label); break; case GT: do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label); break; case GE: do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2); break; /* do_jump_by_parts_equality_rtx compares with zero. Luckily that's the only equality operations we do */ case EQ: if (arg2 != const0_rtx || mode != GET_MODE(arg1)) abort (); do_jump_by_parts_equality_rtx (arg1, label2, label); break; case NE: if (arg2 != const0_rtx || mode != GET_MODE(arg1)) abort (); do_jump_by_parts_equality_rtx (arg1, label, label2); break; default: abort (); } emit_label (label2); } else emit_cmp_and_jump_insns (arg1, arg2, op, NULL_RTX, mode, 0, label); } /* Convert tree expression to rtl instructions, for GNU compiler. Copyright (C) 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */ /* Decide whether a function's arguments should be processed from first to last or from last to first. They should if the stack and args grow in opposite directions, but only if we have push insns. */ #ifdef PUSH_ROUNDING #ifndef PUSH_ARGS_REVERSED #if defined (STACK_GROWS_DOWNWARD) != defined (ARGS_GROW_DOWNWARD) #define PUSH_ARGS_REVERSED /* If it's last to first. */ #endif #endif #endif #ifndef STACK_PUSH_CODE #ifdef STACK_GROWS_DOWNWARD #define STACK_PUSH_CODE PRE_DEC #else #define STACK_PUSH_CODE PRE_INC #endif #endif /* If this is nonzero, we do not bother generating VOLATILE around volatile memory references, and we are willing to output indirect addresses. If cse is to follow, we reject indirect addresses so a useful potential cse is generated; if it is used only once, instruction combination will produce the same indirect address eventually. */ int cse_not_expected; /* This structure is used by move_by_pieces to describe the move to be performed. */ struct move_by_pieces { rtx to; rtx to_addr; int autinc_to; int explicit_inc_to; rtx from; rtx from_addr; int autinc_from; int explicit_inc_from; unsigned HOST_WIDE_INT len; HOST_WIDE_INT offset; int reverse; }; /* This structure is used by store_by_pieces to describe the clear to be performed. */ struct store_by_pieces { rtx to; rtx to_addr; int autinc_to; int explicit_inc_to; unsigned HOST_WIDE_INT len; HOST_WIDE_INT offset; rtx (*constfun) (void *, HOST_WIDE_INT, enum machine_mode); void *constfundata; int reverse; }; static rtx enqueue_insn (rtx, rtx); static unsigned HOST_WIDE_INT move_by_pieces_ninsns (unsigned HOST_WIDE_INT, unsigned int); static void move_by_pieces_1 (rtx (*) (rtx, ...), enum machine_mode, struct move_by_pieces *); static bool block_move_libcall_safe_for_call_parm (void); static bool emit_block_move_via_movstr (rtx, rtx, rtx, unsigned); static rtx emit_block_move_via_libcall (rtx, rtx, rtx); static tree emit_block_move_libcall_fn (int); static void emit_block_move_via_loop (rtx, rtx, rtx, unsigned); static rtx clear_by_pieces_1 (void *, HOST_WIDE_INT, enum machine_mode); static void clear_by_pieces (rtx, unsigned HOST_WIDE_INT, unsigned int); static void store_by_pieces_1 (struct store_by_pieces *, unsigned int); static void store_by_pieces_2 (rtx (*) (rtx, ...), enum machine_mode, struct store_by_pieces *); static bool clear_storage_via_clrstr (rtx, rtx, unsigned); static rtx clear_storage_via_libcall (rtx, rtx); static tree clear_storage_libcall_fn (int); static rtx compress_float_constant (rtx, rtx); static rtx get_subtarget (rtx); static void store_constructor_field (rtx, unsigned HOST_WIDE_INT, HOST_WIDE_INT, enum machine_mode, tree, tree, int, int); static void store_constructor (tree, rtx, int, HOST_WIDE_INT); static rtx store_field (rtx, HOST_WIDE_INT, HOST_WIDE_INT, enum machine_mode, tree, enum machine_mode, int, tree, int); static rtx var_rtx (tree); static unsigned HOST_WIDE_INT highest_pow2_factor (tree); static unsigned HOST_WIDE_INT highest_pow2_factor_for_target (tree, tree); static int is_aligning_offset (tree, tree); static rtx expand_increment (tree, int, int); static void expand_operands (tree, tree, rtx, rtx*, rtx*, enum expand_modifier); static rtx do_store_flag (tree, rtx, enum machine_mode, int); #ifdef PUSH_ROUNDING static void emit_single_push_insn (enum machine_mode, rtx, tree); #endif static void do_tablejump (rtx, enum machine_mode, rtx, rtx, rtx); static rtx const_vector_from_tree (tree); /* Record for each mode whether we can move a register directly to or from an object of that mode in memory. If we can't, we won't try to use that mode directly when accessing a field of that mode. */ static char direct_load[NUM_MACHINE_MODES]; static char direct_store[NUM_MACHINE_MODES]; /* Record for each mode whether we can float-extend from memory. */ static bool float_extend_from_mem[NUM_MACHINE_MODES][NUM_MACHINE_MODES]; /* This macro is used to determine whether move_by_pieces should be called to perform a structure copy. */ #ifndef MOVE_BY_PIECES_P #define MOVE_BY_PIECES_P(SIZE, ALIGN) \ (move_by_pieces_ninsns (SIZE, ALIGN) < (unsigned int) MOVE_RATIO) #endif /* This macro is used to determine whether clear_by_pieces should be called to clear storage. */ #ifndef CLEAR_BY_PIECES_P #define CLEAR_BY_PIECES_P(SIZE, ALIGN) \ (move_by_pieces_ninsns (SIZE, ALIGN) < (unsigned int) CLEAR_RATIO) #endif /* This macro is used to determine whether store_by_pieces should be called to "memset" storage with byte values other than zero, or to "memcpy" storage when the source is a constant string. */ #ifndef STORE_BY_PIECES_P #define STORE_BY_PIECES_P(SIZE, ALIGN) MOVE_BY_PIECES_P (SIZE, ALIGN) #endif /* This array records the insn_code of insns to perform block moves. */ enum insn_code movstr_optab[NUM_MACHINE_MODES]; /* This array records the insn_code of insns to perform block clears. */ enum insn_code clrstr_optab[NUM_MACHINE_MODES]; /* These arrays record the insn_code of two different kinds of insns to perform block compares. */ enum insn_code cmpstr_optab[NUM_MACHINE_MODES]; enum insn_code cmpmem_optab[NUM_MACHINE_MODES]; /* SLOW_UNALIGNED_ACCESS is nonzero if unaligned accesses are very slow. */ #ifndef SLOW_UNALIGNED_ACCESS #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT #endif /* This is run once per compilation to set up which modes can be used directly in memory and to initialize the block move optab. */ void init_expr_once (void) { rtx insn, pat; enum machine_mode mode; int num_clobbers; rtx mem, mem1; rtx reg; /* Try indexing by frame ptr and try by stack ptr. It is known that on the Convex the stack ptr isn't a valid index. With luck, one or the other is valid on any machine. */ mem = gen_rtx_MEM (VOIDmode, stack_pointer_rtx); mem1 = gen_rtx_MEM (VOIDmode, frame_pointer_rtx); /* A scratch register we can modify in-place below to avoid useless RTL allocations. */ reg = gen_rtx_REG (VOIDmode, -1); insn = rtx_alloc (INSN); pat = gen_rtx_SET (0, NULL_RTX, NULL_RTX); PATTERN (insn) = pat; for (mode = VOIDmode; (int) mode < NUM_MACHINE_MODES; mode = (enum machine_mode) ((int) mode + 1)) { int regno; direct_load[(int) mode] = direct_store[(int) mode] = 0; PUT_MODE (mem, mode); PUT_MODE (mem1, mode); PUT_MODE (reg, mode); /* See if there is some register that can be used in this mode and directly loaded or stored from memory. */ if (mode != VOIDmode && mode != BLKmode) for (regno = 0; regno < FIRST_PSEUDO_REGISTER && (direct_load[(int) mode] == 0 || direct_store[(int) mode] == 0); regno++) { if (! HARD_REGNO_MODE_OK (regno, mode)) continue; REGNO (reg) = regno; SET_SRC (pat) = mem; SET_DEST (pat) = reg; if (recog (pat, insn, &num_clobbers) >= 0) direct_load[(int) mode] = 1; SET_SRC (pat) = mem1; SET_DEST (pat) = reg; if (recog (pat, insn, &num_clobbers) >= 0) direct_load[(int) mode] = 1; SET_SRC (pat) = reg; SET_DEST (pat) = mem; if (recog (pat, insn, &num_clobbers) >= 0) direct_store[(int) mode] = 1; SET_SRC (pat) = reg; SET_DEST (pat) = mem1; if (recog (pat, insn, &num_clobbers) >= 0) direct_store[(int) mode] = 1; } } mem = gen_rtx_MEM (VOIDmode, gen_rtx_raw_REG (Pmode, 10000)); for (mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT); mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode)) { enum machine_mode srcmode; for (srcmode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT); srcmode != mode; srcmode = GET_MODE_WIDER_MODE (srcmode)) { enum insn_code ic; ic = can_extend_p (mode, srcmode, 0); if (ic == CODE_FOR_nothing) continue; PUT_MODE (mem, srcmode); if ((*insn_data[ic].operand[1].predicate) (mem, srcmode)) float_extend_from_mem[mode][srcmode] = true; } } } /* This is run at the start of compiling a function. */ void init_expr (void) { cfun->expr = ggc_alloc_cleared (sizeof (struct expr_status)); } /* Small sanity check that the queue is empty at the end of a function. */ void finish_expr_for_function (void) { if (pending_chain) abort (); } /* Manage the queue of increment instructions to be output for POSTINCREMENT_EXPR expressions, etc. */ /* Queue up to increment (or change) VAR later. BODY says how: BODY should be the same thing you would pass to emit_insn to increment right away. It will go to emit_insn later on. The value is a QUEUED expression to be used in place of VAR where you want to guarantee the pre-incrementation value of VAR. */ static rtx enqueue_insn (rtx var, rtx body) { pending_chain = gen_rtx_QUEUED (GET_MODE (var), var, NULL_RTX, NULL_RTX, body, pending_chain); return pending_chain; } /* Use protect_from_queue to convert a QUEUED expression into something that you can put immediately into an instruction. If the queued incrementation has not happened yet, protect_from_queue returns the variable itself. If the incrementation has happened, protect_from_queue returns a temp that contains a copy of the old value of the variable. Any time an rtx which might possibly be a QUEUED is to be put into an instruction, it must be passed through protect_from_queue first. QUEUED expressions are not meaningful in instructions. Do not pass a value through protect_from_queue and then hold on to it for a while before putting it in an instruction! If the queue is flushed in between, incorrect code will result. */ rtx protect_from_queue (rtx x, int modify) { RTX_CODE code = GET_CODE (x); #if 0 /* A QUEUED can hang around after the queue is forced out. */ /* Shortcut for most common case. */ if (pending_chain == 0) return x; #endif if (code != QUEUED) { /* A special hack for read access to (MEM (QUEUED ...)) to facilitate use of autoincrement. Make a copy of the contents of the memory location rather than a copy of the address, but not if the value is of mode BLKmode. Don't modify X in place since it might be shared. */ if (code == MEM && GET_MODE (x) != BLKmode && GET_CODE (XEXP (x, 0)) == QUEUED && !modify) { rtx y = XEXP (x, 0); rtx new = replace_equiv_address_nv (x, QUEUED_VAR (y)); if (QUEUED_INSN (y)) { rtx temp = gen_reg_rtx (GET_MODE (x)); emit_insn_before (gen_move_insn (temp, new), QUEUED_INSN (y)); return temp; } /* Copy the address into a pseudo, so that the returned value remains correct across calls to emit_queue. */ return replace_equiv_address (new, copy_to_reg (XEXP (new, 0))); } /* Otherwise, recursively protect the subexpressions of all the kinds of rtx's that can contain a QUEUED. */ if (code == MEM) { rtx tem = protect_from_queue (XEXP (x, 0), 0); if (tem != XEXP (x, 0)) { x = copy_rtx (x); XEXP (x, 0) = tem; } } else if (code == PLUS || code == MULT) { rtx new0 = protect_from_queue (XEXP (x, 0), 0); rtx new1 = protect_from_queue (XEXP (x, 1), 0); if (new0 != XEXP (x, 0) || new1 != XEXP (x, 1)) { x = copy_rtx (x); XEXP (x, 0) = new0; XEXP (x, 1) = new1; } } return x; } /* If the increment has not happened, use the variable itself. Copy it into a new pseudo so that the value remains correct across calls to emit_queue. */ if (QUEUED_INSN (x) == 0) return copy_to_reg (QUEUED_VAR (x)); /* If the increment has happened and a pre-increment copy exists, use that copy. */ if (QUEUED_COPY (x) != 0) return QUEUED_COPY (x); /* The increment has happened but we haven't set up a pre-increment copy. Set one up now, and use it. */ QUEUED_COPY (x) = gen_reg_rtx (GET_MODE (QUEUED_VAR (x))); emit_insn_before (gen_move_insn (QUEUED_COPY (x), QUEUED_VAR (x)), QUEUED_INSN (x)); return QUEUED_COPY (x); } /* Return nonzero if X contains a QUEUED expression: if it contains anything that will be altered by a queued increment. We handle only combinations of MEM, PLUS, MINUS and MULT operators since memory addresses generally contain only those. */ int queued_subexp_p (rtx x) { enum rtx_code code = GET_CODE (x); switch (code) { case QUEUED: return 1; case MEM: return queued_subexp_p (XEXP (x, 0)); case MULT: case PLUS: case MINUS: return (queued_subexp_p (XEXP (x, 0)) || queued_subexp_p (XEXP (x, 1))); default: return 0; } } /* Retrieve a mark on the queue. */ static rtx mark_queue (void) { return pending_chain; } /* Perform all the pending incrementations that have been enqueued after MARK was retrieved. If MARK is null, perform all the pending incrementations. */ static void emit_insns_enqueued_after_mark (rtx mark) { rtx p; /* The marked incrementation may have been emitted in the meantime through a call to emit_queue. In this case, the mark is not valid anymore so do nothing. */ if (mark && ! QUEUED_BODY (mark)) return; while ((p = pending_chain) != mark) { rtx body = QUEUED_BODY (p); switch (GET_CODE (body)) { case INSN: case JUMP_INSN: case CALL_INSN: case CODE_LABEL: case BARRIER: case NOTE: QUEUED_INSN (p) = body; emit_insn (body); break; #ifdef ENABLE_CHECKING case SEQUENCE: abort (); break; #endif default: QUEUED_INSN (p) = emit_insn (body); break; } QUEUED_BODY (p) = 0; pending_chain = QUEUED_NEXT (p); } } /* Perform all the pending incrementations. */ void emit_queue (void) { emit_insns_enqueued_after_mark (NULL_RTX); } /* Copy data from FROM to TO, where the machine modes are not the same. Both modes may be integer, or both may be floating. UNSIGNEDP should be nonzero if FROM is an unsigned type. This causes zero-extension instead of sign-extension. */ void convert_move (rtx to, rtx from, int unsignedp) { enum machine_mode to_mode = GET_MODE (to); enum machine_mode from_mode = GET_MODE (from); int to_real = GET_MODE_CLASS (to_mode) == MODE_FLOAT; int from_real = GET_MODE_CLASS (from_mode) == MODE_FLOAT; enum insn_code code; rtx libcall; /* rtx code for making an equivalent value. */ enum rtx_code equiv_code = (unsignedp < 0 ? UNKNOWN : (unsignedp ? ZERO_EXTEND : SIGN_EXTEND)); to = protect_from_queue (to, 1); from = protect_from_queue (from, 0); if (to_real != from_real) abort (); /* If the source and destination are already the same, then there's nothing to do. */ if (to == from) return; /* If FROM is a SUBREG that indicates that we have already done at least the required extension, strip it. We don't handle such SUBREGs as TO here. */ if (GET_CODE (from) == SUBREG && SUBREG_PROMOTED_VAR_P (from) && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (from))) >= GET_MODE_SIZE (to_mode)) && SUBREG_PROMOTED_UNSIGNED_P (from) == unsignedp) from = gen_lowpart (to_mode, from), from_mode = to_mode; if (GET_CODE (to) == SUBREG && SUBREG_PROMOTED_VAR_P (to)) abort (); if (to_mode == from_mode || (from_mode == VOIDmode && CONSTANT_P (from))) { emit_move_insn (to, from); return; } if (VECTOR_MODE_P (to_mode) || VECTOR_MODE_P (from_mode)) { if (GET_MODE_BITSIZE (from_mode) != GET_MODE_BITSIZE (to_mode)) abort (); if (VECTOR_MODE_P (to_mode)) from = simplify_gen_subreg (to_mode, from, GET_MODE (from), 0); else to = simplify_gen_subreg (from_mode, to, GET_MODE (to), 0); emit_move_insn (to, from); return; } if (GET_CODE (to) == CONCAT && GET_CODE (from) == CONCAT) { convert_move (XEXP (to, 0), XEXP (from, 0), unsignedp); convert_move (XEXP (to, 1), XEXP (from, 1), unsignedp); return; } if (to_real) { rtx value, insns; convert_optab tab; if (GET_MODE_PRECISION (from_mode) < GET_MODE_PRECISION (to_mode)) tab = sext_optab; else if (GET_MODE_PRECISION (from_mode) > GET_MODE_PRECISION (to_mode)) tab = trunc_optab; else abort (); /* Try converting directly if the insn is supported. */ code = tab->handlers[to_mode][from_mode].insn_code; if (code != CODE_FOR_nothing) { emit_unop_insn (code, to, from, tab == sext_optab ? FLOAT_EXTEND : FLOAT_TRUNCATE); return; } /* Otherwise use a libcall. */ libcall = tab->handlers[to_mode][from_mode].libfunc; if (!libcall) /* This conversion is not implemented yet. */ abort (); start_sequence (); value = emit_library_call_value (libcall, NULL_RTX, LCT_CONST, to_mode, 1, from, from_mode); insns = get_insns (); end_sequence (); emit_libcall_block (insns, to, value, tab == trunc_optab ? gen_rtx_FLOAT_TRUNCATE (to_mode, from) : gen_rtx_FLOAT_EXTEND (to_mode, from)); return; } /* Handle pointer conversion. */ /* SPEE 900220. */ /* Targets are expected to provide conversion insns between PxImode and xImode for all MODE_PARTIAL_INT modes they use, but no others. */ if (GET_MODE_CLASS (to_mode) == MODE_PARTIAL_INT) { enum machine_mode full_mode = smallest_mode_for_size (GET_MODE_BITSIZE (to_mode), MODE_INT); if (trunc_optab->handlers[to_mode][full_mode].insn_code == CODE_FOR_nothing) abort (); if (full_mode != from_mode) from = convert_to_mode (full_mode, from, unsignedp); emit_unop_insn (trunc_optab->handlers[to_mode][full_mode].insn_code, to, from, UNKNOWN); return; } if (GET_MODE_CLASS (from_mode) == MODE_PARTIAL_INT) { enum machine_mode full_mode = smallest_mode_for_size (GET_MODE_BITSIZE (from_mode), MODE_INT); if (sext_optab->handlers[full_mode][from_mode].insn_code == CODE_FOR_nothing) abort (); emit_unop_insn (sext_optab->handlers[full_mode][from_mode].insn_code, to, from, UNKNOWN); if (to_mode == full_mode) return; /* else proceed to integer conversions below. */ from_mode = full_mode; } /* Now both modes are integers. */ /* Handle expanding beyond a word. */ if (GET_MODE_BITSIZE (from_mode) < GET_MODE_BITSIZE (to_mode) && GET_MODE_BITSIZE (to_mode) > BITS_PER_WORD) { rtx insns; rtx lowpart; rtx fill_value; rtx lowfrom; int i; enum machine_mode lowpart_mode; int nwords = CEIL (GET_MODE_SIZE (to_mode), UNITS_PER_WORD); /* Try converting directly if the insn is supported. */ if ((code = can_extend_p (to_mode, from_mode, unsignedp)) != CODE_FOR_nothing) { /* If FROM is a SUBREG, put it into a register. Do this so that we always generate the same set of insns for better cse'ing; if an intermediate assignment occurred, we won't be doing the operation directly on the SUBREG. */ if (optimize > 0 && GET_CODE (from) == SUBREG) from = force_reg (from_mode, from); emit_unop_insn (code, to, from, equiv_code); return; } /* Next, try converting via full word. */ else if (GET_MODE_BITSIZE (from_mode) < BITS_PER_WORD && ((code = can_extend_p (to_mode, word_mode, unsignedp)) != CODE_FOR_nothing)) { if (REG_P (to)) { if (reg_overlap_mentioned_p (to, from)) from = force_reg (from_mode, from); emit_insn (gen_rtx_CLOBBER (VOIDmode, to)); } convert_move (gen_lowpart (word_mode, to), from, unsignedp); emit_unop_insn (code, to, gen_lowpart (word_mode, to), equiv_code); return; } /* No special multiword conversion insn; do it by hand. */ start_sequence (); /* Since we will turn this into a no conflict block, we must ensure that the source does not overlap the target. */ if (reg_overlap_mentioned_p (to, from)) from = force_reg (from_mode, from); /* Get a copy of FROM widened to a word, if necessary. */ if (GET_MODE_BITSIZE (from_mode) < BITS_PER_WORD) lowpart_mode = word_mode; else lowpart_mode = from_mode; lowfrom = convert_to_mode (lowpart_mode, from, unsignedp); lowpart = gen_lowpart (lowpart_mode, to); emit_move_insn (lowpart, lowfrom); /* Compute the value to put in each remaining word. */ if (unsignedp) fill_value = const0_rtx; else { #ifdef HAVE_slt if (HAVE_slt && insn_data[(int) CODE_FOR_slt].operand[0].mode == word_mode && STORE_FLAG_VALUE == -1) { emit_cmp_insn (lowfrom, const0_rtx, NE, NULL_RTX, lowpart_mode, 0); fill_value = gen_reg_rtx (word_mode); emit_insn (gen_slt (fill_value)); } else #endif { fill_value = expand_shift (RSHIFT_EXPR, lowpart_mode, lowfrom, size_int (GET_MODE_BITSIZE (lowpart_mode) - 1), NULL_RTX, 0); fill_value = convert_to_mode (word_mode, fill_value, 1); } } /* Fill the remaining words. */ for (i = GET_MODE_SIZE (lowpart_mode) / UNITS_PER_WORD; i < nwords; i++) { int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i); rtx subword = operand_subword (to, index, 1, to_mode); if (subword == 0) abort (); if (fill_value != subword) emit_move_insn (subword, fill_value); } insns = get_insns (); end_sequence (); emit_no_conflict_block (insns, to, from, NULL_RTX, gen_rtx_fmt_e (equiv_code, to_mode, copy_rtx (from))); return; } /* Truncating multi-word to a word or less. */ if (GET_MODE_BITSIZE (from_mode) > BITS_PER_WORD && GET_MODE_BITSIZE (to_mode) <= BITS_PER_WORD) { if (!((MEM_P (from) && ! MEM_VOLATILE_P (from) && direct_load[(int) to_mode] && ! mode_dependent_address_p (XEXP (from, 0))) || REG_P (from) || GET_CODE (from) == SUBREG)) from = force_reg (from_mode, from); convert_move (to, gen_lowpart (word_mode, from), 0); return; } /* Now follow all the conversions between integers no more than a word long. */ /* For truncation, usually we can just refer to FROM in a narrower mode. */ if (GET_MODE_BITSIZE (to_mode) < GET_MODE_BITSIZE (from_mode) && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (to_mode), GET_MODE_BITSIZE (from_mode))) { if (!((MEM_P (from) && ! MEM_VOLATILE_P (from) && direct_load[(int) to_mode] && ! mode_dependent_address_p (XEXP (from, 0))) || REG_P (from) || GET_CODE (from) == SUBREG)) from = force_reg (from_mode, from); if (REG_P (from) && REGNO (from) < FIRST_PSEUDO_REGISTER && ! HARD_REGNO_MODE_OK (REGNO (from), to_mode)) from = copy_to_reg (from); emit_move_insn (to, gen_lowpart (to_mode, from)); return; } /* Handle extension. */ if (GET_MODE_BITSIZE (to_mode) > GET_MODE_BITSIZE (from_mode)) { /* Convert directly if that works. */ if ((code = can_extend_p (to_mode, from_mode, unsignedp)) != CODE_FOR_nothing) { if (flag_force_mem) from = force_not_mem (from); emit_unop_insn (code, to, from, equiv_code); return; } else { enum machine_mode intermediate; rtx tmp; tree shift_amount; /* Search for a mode to convert via. */ for (intermediate = from_mode; intermediate != VOIDmode; intermediate = GET_MODE_WIDER_MODE (intermediate)) if (((can_extend_p (to_mode, intermediate, unsignedp) != CODE_FOR_nothing) || (GET_MODE_SIZE (to_mode) < GET_MODE_SIZE (intermediate) && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (to_mode), GET_MODE_BITSIZE (intermediate)))) && (can_extend_p (intermediate, from_mode, unsignedp) != CODE_FOR_nothing)) { convert_move (to, convert_to_mode (intermediate, from, unsignedp), unsignedp); return; } /* No suitable intermediate mode. Generate what we need with shifts. */ shift_amount = build_int_2 (GET_MODE_BITSIZE (to_mode) - GET_MODE_BITSIZE (from_mode), 0); from = gen_lowpart (to_mode, force_reg (from_mode, from)); tmp = expand_shift (LSHIFT_EXPR, to_mode, from, shift_amount, to, unsignedp); tmp = expand_shift (RSHIFT_EXPR, to_mode, tmp, shift_amount, to, unsignedp); if (tmp != to) emit_move_insn (to, tmp); return; } } /* Support special truncate insns for certain modes. */ if (trunc_optab->handlers[to_mode][from_mode].insn_code != CODE_FOR_nothing) { emit_unop_insn (trunc_optab->handlers[to_mode][from_mode].insn_code, to, from, UNKNOWN); return; } /* Handle truncation of volatile memrefs, and so on; the things that couldn't be truncated directly, and for which there was no special instruction. ??? Code above formerly short-circuited this, for most integer mode pairs, with a force_reg in from_mode followed by a recursive call to this routine. Appears always to have been wrong. */ if (GET_MODE_BITSIZE (to_mode) < GET_MODE_BITSIZE (from_mode)) { rtx temp = force_reg (to_mode, gen_lowpart (to_mode, from)); emit_move_insn (to, temp); return; } /* Mode combination is not recognized. */ abort (); } /* Return an rtx for a value that would result from converting X to mode MODE. Both X and MODE may be floating, or both integer. UNSIGNEDP is nonzero if X is an unsigned value. This can be done by referring to a part of X in place or by copying to a new temporary with conversion. This function *must not* call protect_from_queue except when putting X into an insn (in which case convert_move does it). */ rtx convert_to_mode (enum machine_mode mode, rtx x, int unsignedp) { return convert_modes (mode, VOIDmode, x, unsignedp); } /* Return an rtx for a value that would result from converting X from mode OLDMODE to mode MODE. Both modes may be floating, or both integer. UNSIGNEDP is nonzero if X is an unsigned value. This can be done by referring to a part of X in place or by copying to a new temporary with conversion. You can give VOIDmode for OLDMODE, if you are sure X has a nonvoid mode. This function *must not* call protect_from_queue except when putting X into an insn (in which case convert_move does it). */ rtx convert_modes (enum machine_mode mode, enum machine_mode oldmode, rtx x, int unsignedp) { rtx temp; /* If FROM is a SUBREG that indicates that we have already done at least the required extension, strip it. */ if (GET_CODE (x) == SUBREG && SUBREG_PROMOTED_VAR_P (x) && GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))) >= GET_MODE_SIZE (mode) && SUBREG_PROMOTED_UNSIGNED_P (x) == unsignedp) x = gen_lowpart (mode, x); if (GET_MODE (x) != VOIDmode) oldmode = GET_MODE (x); if (mode == oldmode) return x; /* There is one case that we must handle specially: If we are converting a CONST_INT into a mode whose size is twice HOST_BITS_PER_WIDE_INT and we are to interpret the constant as unsigned, gen_lowpart will do the wrong if the constant appears negative. What we want to do is make the high-order word of the constant zero, not all ones. */ if (unsignedp && GET_MODE_CLASS (mode) == MODE_INT && GET_MODE_BITSIZE (mode) == 2 * HOST_BITS_PER_WIDE_INT && GET_CODE (x) == CONST_INT && INTVAL (x) < 0) { HOST_WIDE_INT val = INTVAL (x); if (oldmode != VOIDmode && HOST_BITS_PER_WIDE_INT > GET_MODE_BITSIZE (oldmode)) { int width = GET_MODE_BITSIZE (oldmode); /* We need to zero extend VAL. */ val &= ((HOST_WIDE_INT) 1 << width) - 1; } return immed_double_const (val, (HOST_WIDE_INT) 0, mode); } /* We can do this with a gen_lowpart if both desired and current modes are integer, and this is either a constant integer, a register, or a non-volatile MEM. Except for the constant case where MODE is no wider than HOST_BITS_PER_WIDE_INT, we must be narrowing the operand. */ if ((GET_CODE (x) == CONST_INT && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT) || (GET_MODE_CLASS (mode) == MODE_INT && GET_MODE_CLASS (oldmode) == MODE_INT && (GET_CODE (x) == CONST_DOUBLE || (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (oldmode) && ((MEM_P (x) && ! MEM_VOLATILE_P (x) && direct_load[(int) mode]) || (REG_P (x) && (! HARD_REGISTER_P (x) || HARD_REGNO_MODE_OK (REGNO (x), mode)) && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode), GET_MODE_BITSIZE (GET_MODE (x))))))))) { /* ?? If we don't know OLDMODE, we have to assume here that X does not need sign- or zero-extension. This may not be the case, but it's the best we can do. */ if (GET_CODE (x) == CONST_INT && oldmode != VOIDmode && GET_MODE_SIZE (mode) > GET_MODE_SIZE (oldmode)) { HOST_WIDE_INT val = INTVAL (x); int width = GET_MODE_BITSIZE (oldmode); /* We must sign or zero-extend in this case. Start by zero-extending, then sign extend if we need to. */ val &= ((HOST_WIDE_INT) 1 << width) - 1; if (! unsignedp && (val & ((HOST_WIDE_INT) 1 << (width - 1)))) val |= (HOST_WIDE_INT) (-1) << width; return gen_int_mode (val, mode); } return gen_lowpart (mode, x); } /* Converting from integer constant into mode is always equivalent to an subreg operation. */ if (VECTOR_MODE_P (mode) && GET_MODE (x) == VOIDmode) { if (GET_MODE_BITSIZE (mode) != GET_MODE_BITSIZE (oldmode)) abort (); return simplify_gen_subreg (mode, x, oldmode, 0); } temp = gen_reg_rtx (mode); convert_move (temp, x, unsignedp); return temp; } /* STORE_MAX_PIECES is the number of bytes at a time that we can store efficiently. Due to internal GCC limitations, this is MOVE_MAX_PIECES limited by the number of bytes GCC can represent for an immediate constant. */ #define STORE_MAX_PIECES MIN (MOVE_MAX_PIECES, 2 * sizeof (HOST_WIDE_INT)) /* Determine whether the LEN bytes can be moved by using several move instructions. Return nonzero if a call to move_by_pieces should succeed. */ int can_move_by_pieces (unsigned HOST_WIDE_INT len, unsigned int align ATTRIBUTE_UNUSED) { return MOVE_BY_PIECES_P (len, align); } /* Generate several move instructions to copy LEN bytes from block FROM to block TO. (These are MEM rtx's with BLKmode). The caller must pass FROM and TO through protect_from_queue before calling. If PUSH_ROUNDING is defined and TO is NULL, emit_single_push_insn is used to push FROM to the stack. ALIGN is maximum stack alignment we can assume. If ENDP is 0 return to, if ENDP is 1 return memory at the end ala mempcpy, and if ENDP is 2 return memory the end minus one byte ala stpcpy. */ rtx move_by_pieces (rtx to, rtx from, unsigned HOST_WIDE_INT len, unsigned int align, int endp) { struct move_by_pieces data; rtx to_addr, from_addr = XEXP (from, 0); unsigned int max_size = MOVE_MAX_PIECES + 1; enum machine_mode mode = VOIDmode, tmode; enum insn_code icode; align = MIN (to ? MEM_ALIGN (to) : align, MEM_ALIGN (from)); data.offset = 0; data.from_addr = from_addr; if (to) { to_addr = XEXP (to, 0); data.to = to; data.autinc_to = (GET_CODE (to_addr) == PRE_INC || GET_CODE (to_addr) == PRE_DEC || GET_CODE (to_addr) == POST_INC || GET_CODE (to_addr) == POST_DEC); data.reverse = (GET_CODE (to_addr) == PRE_DEC || GET_CODE (to_addr) == POST_DEC); } else { to_addr = NULL_RTX; data.to = NULL_RTX; data.autinc_to = 1; #ifdef STACK_GROWS_DOWNWARD data.reverse = 1; #else data.reverse = 0; #endif } data.to_addr = to_addr; data.from = from; data.autinc_from = (GET_CODE (from_addr) == PRE_INC || GET_CODE (from_addr) == PRE_DEC || GET_CODE (from_addr) == POST_INC || GET_CODE (from_addr) == POST_DEC); data.explicit_inc_from = 0; data.explicit_inc_to = 0; if (data.reverse) data.offset = len; data.len = len; /* If copying requires more than two move insns, copy addresses to registers (to make displacements shorter) and use post-increment if available. */ if (!(data.autinc_from && data.autinc_to) && move_by_pieces_ninsns (len, align) > 2) { /* Find the mode of the largest move... */ for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT); tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode)) if (GET_MODE_SIZE (tmode) < max_size) mode = tmode; if (USE_LOAD_PRE_DECREMENT (mode) && data.reverse && ! data.autinc_from) { data.from_addr = copy_addr_to_reg (plus_constant (from_addr, len)); data.autinc_from = 1; data.explicit_inc_from = -1; } if (USE_LOAD_POST_INCREMENT (mode) && ! data.autinc_from) { data.from_addr = copy_addr_to_reg (from_addr); data.autinc_from = 1; data.explicit_inc_from = 1; } if (!data.autinc_from && CONSTANT_P (from_addr)) data.from_addr = copy_addr_to_reg (from_addr); if (USE_STORE_PRE_DECREMENT (mode) && data.reverse && ! data.autinc_to) { data.to_addr = copy_addr_to_reg (plus_constant (to_addr, len)); data.autinc_to = 1; data.explicit_inc_to = -1; } if (USE_STORE_POST_INCREMENT (mode) && ! data.reverse && ! data.autinc_to) { data.to_addr = copy_addr_to_reg (to_addr); data.autinc_to = 1; data.explicit_inc_to = 1; } if (!data.autinc_to && CONSTANT_P (to_addr)) data.to_addr = copy_addr_to_reg (to_addr); } if (! SLOW_UNALIGNED_ACCESS (word_mode, align) || align > MOVE_MAX * BITS_PER_UNIT || align >= BIGGEST_ALIGNMENT) align = MOVE_MAX * BITS_PER_UNIT; /* First move what we can in the largest integer mode, then go to successively smaller modes. */ while (max_size > 1) { for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT); tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode)) if (GET_MODE_SIZE (tmode) < max_size) mode = tmode; if (mode == VOIDmode) break; icode = mov_optab->handlers[(int) mode].insn_code; if (icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode)) move_by_pieces_1 (GEN_FCN (icode), mode, &data); max_size = GET_MODE_SIZE (mode); } /* The code above should have handled everything. */ if (data.len > 0) abort (); if (endp) { rtx to1; if (data.reverse) abort (); if (data.autinc_to) { if (endp == 2) { if (HAVE_POST_INCREMENT && data.explicit_inc_to > 0) emit_insn (gen_add2_insn (data.to_addr, constm1_rtx)); else data.to_addr = copy_addr_to_reg (plus_constant (data.to_addr, -1)); } to1 = adjust_automodify_address (data.to, QImode, data.to_addr, data.offset); } else { if (endp == 2) --data.offset; to1 = adjust_address (data.to, QImode, data.offset); } return to1; } else return data.to; } /* Return number of insns required to move L bytes by pieces. ALIGN (in bits) is maximum alignment we can assume. */ static unsigned HOST_WIDE_INT move_by_pieces_ninsns (unsigned HOST_WIDE_INT l, unsigned int align) { unsigned HOST_WIDE_INT n_insns = 0; unsigned HOST_WIDE_INT max_size = MOVE_MAX + 1; if (! SLOW_UNALIGNED_ACCESS (word_mode, align) || align > MOVE_MAX * BITS_PER_UNIT || align >= BIGGEST_ALIGNMENT) align = MOVE_MAX * BITS_PER_UNIT; while (max_size > 1) { enum machine_mode mode = VOIDmode, tmode; enum insn_code icode; for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT); tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode)) if (GET_MODE_SIZE (tmode) < max_size) mode = tmode; if (mode == VOIDmode) break; icode = mov_optab->handlers[(int) mode].insn_code; if (icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode)) n_insns += l / GET_MODE_SIZE (mode), l %= GET_MODE_SIZE (mode); max_size = GET_MODE_SIZE (mode); } if (l) abort (); return n_insns; } /* Subroutine of move_by_pieces. Move as many bytes as appropriate with move instructions for mode MODE. GENFUN is the gen_... function to make a move insn for that mode. DATA has all the other info. */ static void move_by_pieces_1 (rtx (*genfun) (rtx, ...), enum machine_mode mode, struct move_by_pieces *data) { unsigned int size = GET_MODE_SIZE (mode); rtx to1 = NULL_RTX, from1; while (data->len >= size) { if (data->reverse) data->offset -= size; if (data->to) { if (data->autinc_to) to1 = adjust_automodify_address (data->to, mode, data->to_addr, data->offset); else to1 = adjust_address (data->to, mode, data->offset); } if (data->autinc_from) from1 = adjust_automodify_address (data->from, mode, data->from_addr, data->offset); else from1 = adjust_address (data->from, mode, data->offset); if (HAVE_PRE_DECREMENT && data->explicit_inc_to < 0) emit_insn (gen_add2_insn (data->to_addr, GEN_INT (-(HOST_WIDE_INT)size))); if (HAVE_PRE_DECREMENT && data->explicit_inc_from < 0) emit_insn (gen_add2_insn (data->from_addr, GEN_INT (-(HOST_WIDE_INT)size))); if (data->to) emit_insn ((*genfun) (to1, from1)); else { #ifdef PUSH_ROUNDING emit_single_push_insn (mode, from1, NULL); #else abort (); #endif } if (HAVE_POST_INCREMENT && data->explicit_inc_to > 0) emit_insn (gen_add2_insn (data->to_addr, GEN_INT (size))); if (HAVE_POST_INCREMENT && data->explicit_inc_from > 0) emit_insn (gen_add2_insn (data->from_addr, GEN_INT (size))); if (! data->reverse) data->offset += size; data->len -= size; } } /* Emit code to move a block Y to a block X. This may be done with string-move instructions, with multiple scalar move instructions, or with a library call. Both X and Y must be MEM rtx's (perhaps inside VOLATILE) with mode BLKmode. SIZE is an rtx that says how long they are. ALIGN is the maximum alignment we can assume they have. METHOD describes what kind of copy this is, and what mechanisms may be used. Return the address of the new block, if memcpy is called and returns it, 0 otherwise. */ rtx emit_block_move (rtx x, rtx y, rtx size, enum block_op_methods method) { bool may_use_call; rtx retval = 0; unsigned int align; switch (method) { case BLOCK_OP_NORMAL: may_use_call = true; break; case BLOCK_OP_CALL_PARM: may_use_call = block_move_libcall_safe_for_call_parm (); /* Make inhibit_defer_pop nonzero around the library call to force it to pop the arguments right away. */ NO_DEFER_POP; break; case BLOCK_OP_NO_LIBCALL: may_use_call = false; break; default: abort (); } align = MIN (MEM_ALIGN (x), MEM_ALIGN (y)); x = protect_from_queue (x, 1); y = protect_from_queue (y, 0); size = protect_from_queue (size, 0); if (!MEM_P (x)) abort (); if (!MEM_P (y)) abort (); if (size == 0) abort (); /* Make sure we've got BLKmode addresses; store_one_arg can decide that block copy is more efficient for other large modes, e.g. DCmode. */ x = adjust_address (x, BLKmode, 0); y = adjust_address (y, BLKmode, 0); /* Set MEM_SIZE as appropriate for this block copy. The main place this can be incorrect is coming from __builtin_memcpy. */ if (GET_CODE (size) == CONST_INT) { if (INTVAL (size) == 0) return 0; x = shallow_copy_rtx (x); y = shallow_copy_rtx (y); set_mem_size (x, size); set_mem_size (y, size); } if (GET_CODE (size) == CONST_INT && MOVE_BY_PIECES_P (INTVAL (size), align)) move_by_pieces (x, y, INTVAL (size), align, 0); else if (emit_block_move_via_movstr (x, y, size, align)) ; else if (may_use_call) retval = emit_block_move_via_libcall (x, y, size); else emit_block_move_via_loop (x, y, size, align); if (method == BLOCK_OP_CALL_PARM) OK_DEFER_POP; return retval; } /* A subroutine of emit_block_move. Returns true if calling the block move libcall will not clobber any parameters which may have already been placed on the stack. */ static bool block_move_libcall_safe_for_call_parm (void) { /* If arguments are pushed on the stack, then they're safe. */ if (PUSH_ARGS) return true; /* If registers go on the stack anyway, any argument is sure to clobber an outgoing argument. */ #if defined (REG_PARM_STACK_SPACE) && defined (OUTGOING_REG_PARM_STACK_SPACE) { tree fn = emit_block_move_libcall_fn (false); (void) fn; if (REG_PARM_STACK_SPACE (fn) != 0) return false; } #endif /* If any argument goes in memory, then it might clobber an outgoing argument. */ { CUMULATIVE_ARGS args_so_far; tree fn, arg; fn = emit_block_move_libcall_fn (false); INIT_CUMULATIVE_ARGS (args_so_far, TREE_TYPE (fn), NULL_RTX, 0, 3); arg = TYPE_ARG_TYPES (TREE_TYPE (fn)); for ( ; arg != void_list_node ; arg = TREE_CHAIN (arg)) { enum machine_mode mode = TYPE_MODE (TREE_VALUE (arg)); rtx tmp = FUNCTION_ARG (args_so_far, mode, NULL_TREE, 1); if (!tmp || !REG_P (tmp)) return false; #ifdef FUNCTION_ARG_PARTIAL_NREGS if (FUNCTION_ARG_PARTIAL_NREGS (args_so_far, mode, NULL_TREE, 1)) return false; #endif FUNCTION_ARG_ADVANCE (args_so_far, mode, NULL_TREE, 1); } } return true; } /* A subroutine of emit_block_move. Expand a movstr pattern; return true if successful. */ static bool emit_block_move_via_movstr (rtx x, rtx y, rtx size, unsigned int align) { rtx opalign = GEN_INT (align / BITS_PER_UNIT); int save_volatile_ok = volatile_ok; enum machine_mode mode; /* Since this is a move insn, we don't care about volatility. */ volatile_ok = 1; /* Try the most limited insn first, because there's no point including more than one in the machine description unless the more limited one has some advantage. */ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode)) { enum insn_code code = movstr_optab[(int) mode]; insn_operand_predicate_fn pred; if (code != CODE_FOR_nothing /* We don't need MODE to be narrower than BITS_PER_HOST_WIDE_INT here because if SIZE is less than the mode mask, as it is returned by the macro, it will definitely be less than the actual mode mask. */ && ((GET_CODE (size) == CONST_INT && ((unsigned HOST_WIDE_INT) INTVAL (size) <= (GET_MODE_MASK (mode) >> 1))) || GET_MODE_BITSIZE (mode) >= BITS_PER_WORD) && ((pred = insn_data[(int) code].operand[0].predicate) == 0 || (*pred) (x, BLKmode)) && ((pred = insn_data[(int) code].operand[1].predicate) == 0 || (*pred) (y, BLKmode)) && ((pred = insn_data[(int) code].operand[3].predicate) == 0 || (*pred) (opalign, VOIDmode))) { rtx op2; rtx last = get_last_insn (); rtx pat; op2 = convert_to_mode (mode, size, 1); pred = insn_data[(int) code].operand[2].predicate; if (pred != 0 && ! (*pred) (op2, mode)) op2 = copy_to_mode_reg (mode, op2); /* ??? When called via emit_block_move_for_call, it'd be nice if there were some way to inform the backend, so that it doesn't fail the expansion because it thinks emitting the libcall would be more efficient. */ pat = GEN_FCN ((int) code) (x, y, op2, opalign); if (pat) { emit_insn (pat); volatile_ok = save_volatile_ok; return true; } else delete_insns_since (last); } } volatile_ok = save_volatile_ok; return false; } /* A subroutine of emit_block_move. Expand a call to memcpy. Return the return value from memcpy, 0 otherwise. */ static rtx emit_block_move_via_libcall (rtx dst, rtx src, rtx size) { rtx dst_addr, src_addr; tree call_expr, arg_list, fn, src_tree, dst_tree, size_tree; enum machine_mode size_mode; rtx retval; /* DST, SRC, or SIZE may have been passed through protect_from_queue. It is unsafe to save the value generated by protect_from_queue and reuse it later. Consider what happens if emit_queue is called before the return value from protect_from_queue is used. Expansion of the CALL_EXPR below will call emit_queue before we are finished emitting RTL for argument setup. So if we are not careful we could get the wrong value for an argument. To avoid this problem we go ahead and emit code to copy the addresses of DST and SRC and SIZE into new pseudos. Note this is not strictly needed for library calls since they do not call emit_queue before loading their arguments. However, we may need to have library calls call emit_queue in the future since failing to do so could cause problems for targets which define SMALL_REGISTER_CLASSES and pass arguments in registers. */ dst_addr = copy_to_mode_reg (Pmode, XEXP (dst, 0)); src_addr = copy_to_mode_reg (Pmode, XEXP (src, 0)); dst_addr = convert_memory_address (ptr_mode, dst_addr); src_addr = convert_memory_address (ptr_mode, src_addr); dst_tree = make_tree (ptr_type_node, dst_addr); src_tree = make_tree (ptr_type_node, src_addr); size_mode = TYPE_MODE (sizetype); size = convert_to_mode (size_mode, size, 1); size = copy_to_mode_reg (size_mode, size); /* It is incorrect to use the libcall calling conventions to call memcpy in this context. This could be a user call to memcpy and the user may wish to examine the return value from memcpy. For targets where libcalls and normal calls have different conventions for returning pointers, we could end up generating incorrect code. */ size_tree = make_tree (sizetype, size); fn = emit_block_move_libcall_fn (true); arg_list = tree_cons (NULL_TREE, size_tree, NULL_TREE); arg_list = tree_cons (NULL_TREE, src_tree, arg_list); arg_list = tree_cons (NULL_TREE, dst_tree, arg_list); /* Now we have to build up the CALL_EXPR itself. */ call_expr = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (fn)), fn); call_expr = build (CALL_EXPR, TREE_TYPE (TREE_TYPE (fn)), call_expr, arg_list, NULL_TREE); retval = expand_expr (call_expr, NULL_RTX, VOIDmode, 0); /* If we are initializing a readonly value, show the above call clobbered it. Otherwise, a load from it may erroneously be hoisted from a loop, or the delay slot scheduler might overlook conflicts and take nasty decisions. */ if (RTX_UNCHANGING_P (dst)) add_function_usage_to (last_call_insn (), gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_CLOBBER (VOIDmode, dst), NULL_RTX)); return retval; } /* A subroutine of emit_block_move_via_libcall. Create the tree node for the function we use for block copies. The first time FOR_CALL is true, we call assemble_external. */ static GTY(()) tree block_move_fn; void init_block_move_fn (const char *asmspec) { if (!block_move_fn) { tree args, fn; fn = get_identifier ("memcpy"); args = build_function_type_list (ptr_type_node, ptr_type_node, const_ptr_type_node, sizetype, NULL_TREE); fn = build_decl (FUNCTION_DECL, fn, args); DECL_EXTERNAL (fn) = 1; TREE_PUBLIC (fn) = 1; DECL_ARTIFICIAL (fn) = 1; TREE_NOTHROW (fn) = 1; block_move_fn = fn; } if (asmspec) { SET_DECL_RTL (block_move_fn, NULL_RTX); SET_DECL_ASSEMBLER_NAME (block_move_fn, get_identifier (asmspec)); } } static tree emit_block_move_libcall_fn (int for_call) { static bool emitted_extern; if (!block_move_fn) init_block_move_fn (NULL); if (for_call && !emitted_extern) { emitted_extern = true; make_decl_rtl (block_move_fn, NULL); assemble_external (block_move_fn); } return block_move_fn; } /* A subroutine of emit_block_move. Copy the data via an explicit loop. This is used only when libcalls are forbidden. */ /* ??? It'd be nice to copy in hunks larger than QImode. */ static void emit_block_move_via_loop (rtx x, rtx y, rtx size, unsigned int align ATTRIBUTE_UNUSED) { rtx cmp_label, top_label, iter, x_addr, y_addr, tmp; enum machine_mode iter_mode; iter_mode = GET_MODE (size); if (iter_mode == VOIDmode) iter_mode = word_mode; top_label = gen_label_rtx (); cmp_label = gen_label_rtx (); iter = gen_reg_rtx (iter_mode); emit_move_insn (iter, const0_rtx); x_addr = force_operand (XEXP (x, 0), NULL_RTX); y_addr = force_operand (XEXP (y, 0), NULL_RTX); do_pending_stack_adjust (); emit_jump (cmp_label); emit_label (top_label); tmp = convert_modes (Pmode, iter_mode, iter, true); x_addr = gen_rtx_PLUS (Pmode, x_addr, tmp); y_addr = gen_rtx_PLUS (Pmode, y_addr, tmp); x = change_address (x, QImode, x_addr); y = change_address (y, QImode, y_addr); emit_move_insn (x, y); tmp = expand_simple_binop (iter_mode, PLUS, iter, const1_rtx, iter, true, OPTAB_LIB_WIDEN); if (tmp != iter) emit_move_insn (iter, tmp); emit_label (cmp_label); emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode, true, top_label); } /* Copy all or part of a value X into registers starting at REGNO. The number of registers to be filled is NREGS. */ void move_block_to_reg (int regno, rtx x, int nregs, enum machine_mode mode) { int i; #ifdef HAVE_load_multiple rtx pat; rtx last; #endif if (nregs == 0) return; if (CONSTANT_P (x) && ! LEGITIMATE_CONSTANT_P (x)) x = validize_mem (force_const_mem (mode, x)); /* See if the machine can do this with a load multiple insn. */ #ifdef HAVE_load_multiple if (HAVE_load_multiple) { last = get_last_insn (); pat = gen_load_multiple (gen_rtx_REG (word_mode, regno), x, GEN_INT (nregs)); if (pat) { emit_insn (pat); return; } else delete_insns_since (last); } #endif for (i = 0; i < nregs; i++) emit_move_insn (gen_rtx_REG (word_mode, regno + i), operand_subword_force (x, i, mode)); } /* Copy all or part of a BLKmode value X out of registers starting at REGNO. The number of registers to be filled is NREGS. */ void move_block_from_reg (int regno, rtx x, int nregs) { int i; if (nregs == 0) return; /* See if the machine can do this with a store multiple insn. */ #ifdef HAVE_store_multiple if (HAVE_store_multiple) { rtx last = get_last_insn (); rtx pat = gen_store_multiple (x, gen_rtx_REG (word_mode, regno), GEN_INT (nregs)); if (pat) { emit_insn (pat); return; } else delete_insns_since (last); } #endif for (i = 0; i < nregs; i++) { rtx tem = operand_subword (x, i, 1, BLKmode); if (tem == 0) abort (); emit_move_insn (tem, gen_rtx_REG (word_mode, regno + i)); } } /* Generate a PARALLEL rtx for a new non-consecutive group of registers from ORIG, where ORIG is a non-consecutive group of registers represented by a PARALLEL. The clone is identical to the original except in that the original set of registers is replaced by a new set of pseudo registers. The new set has the same modes as the original set. */ rtx gen_group_rtx (rtx orig) { int i, length; rtx *tmps; if (GET_CODE (orig) != PARALLEL) abort (); length = XVECLEN (orig, 0); tmps = alloca (sizeof (rtx) * length); /* Skip a NULL entry in first slot. */ i = XEXP (XVECEXP (orig, 0, 0), 0) ? 0 : 1; if (i) tmps[0] = 0; for (; i < length; i++) { enum machine_mode mode = GET_MODE (XEXP (XVECEXP (orig, 0, i), 0)); rtx offset = XEXP (XVECEXP (orig, 0, i), 1); tmps[i] = gen_rtx_EXPR_LIST (VOIDmode, gen_reg_rtx (mode), offset); } return gen_rtx_PARALLEL (GET_MODE (orig), gen_rtvec_v (length, tmps)); } /* Emit code to move a block ORIG_SRC of type TYPE to a block DST, where DST is non-consecutive registers represented by a PARALLEL. SSIZE represents the total size of block ORIG_SRC in bytes, or -1 if not known. */ void emit_group_load (rtx dst, rtx orig_src, tree type ATTRIBUTE_UNUSED, int ssize) { rtx *tmps, src; int start, i; if (GET_CODE (dst) != PARALLEL) abort (); /* Check for a NULL entry, used to indicate that the parameter goes both on the stack and in registers. */ if (XEXP (XVECEXP (dst, 0, 0), 0)) start = 0; else start = 1; tmps = alloca (sizeof (rtx) * XVECLEN (dst, 0)); /* Process the pieces. */ for (i = start; i < XVECLEN (dst, 0); i++) { enum machine_mode mode = GET_MODE (XEXP (XVECEXP (dst, 0, i), 0)); HOST_WIDE_INT bytepos = INTVAL (XEXP (XVECEXP (dst, 0, i), 1)); unsigned int bytelen = GET_MODE_SIZE (mode); int shift = 0; /* Handle trailing fragments that run over the size of the struct. */ if (ssize >= 0 && bytepos + (HOST_WIDE_INT) bytelen > ssize) { /* Arrange to shift the fragment to where it belongs. extract_bit_field loads to the lsb of the reg. */ if ( #ifdef BLOCK_REG_PADDING BLOCK_REG_PADDING (GET_MODE (orig_src), type, i == start) == (BYTES_BIG_ENDIAN ? upward : downward) #else BYTES_BIG_ENDIAN #endif ) shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT; bytelen = ssize - bytepos; if (bytelen <= 0) abort (); } /* If we won't be loading directly from memory, protect the real source from strange tricks we might play; but make sure that the source can be loaded directly into the destination. */ src = orig_src; if (!MEM_P (orig_src) && (!CONSTANT_P (orig_src) || (GET_MODE (orig_src) != mode && GET_MODE (orig_src) != VOIDmode))) { if (GET_MODE (orig_src) == VOIDmode) src = gen_reg_rtx (mode); else src = gen_reg_rtx (GET_MODE (orig_src)); emit_move_insn (src, orig_src); } /* Optimize the access just a bit. */ if (MEM_P (src) && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (src)) || MEM_ALIGN (src) >= GET_MODE_ALIGNMENT (mode)) && bytepos * BITS_PER_UNIT % GET_MODE_ALIGNMENT (mode) == 0 && bytelen == GET_MODE_SIZE (mode)) { tmps[i] = gen_reg_rtx (mode); emit_move_insn (tmps[i], adjust_address (src, mode, bytepos)); } else if (GET_CODE (src) == CONCAT) { unsigned int slen = GET_MODE_SIZE (GET_MODE (src)); unsigned int slen0 = GET_MODE_SIZE (GET_MODE (XEXP (src, 0))); if ((bytepos == 0 && bytelen == slen0) || (bytepos != 0 && bytepos + bytelen <= slen)) { /* The following assumes that the concatenated objects all have the same size. In this case, a simple calculation can be used to determine the object and the bit field to be extracted. */ tmps[i] = XEXP (src, bytepos / slen0); if (! CONSTANT_P (tmps[i]) && (!REG_P (tmps[i]) || GET_MODE (tmps[i]) != mode)) tmps[i] = extract_bit_field (tmps[i], bytelen * BITS_PER_UNIT, (bytepos % slen0) * BITS_PER_UNIT, 1, NULL_RTX, mode, mode, ssize); } else if (bytepos == 0) { rtx mem = assign_stack_temp (GET_MODE (src), slen, 0); emit_move_insn (mem, src); tmps[i] = adjust_address (mem, mode, 0); } else abort (); } /* FIXME: A SIMD parallel will eventually lead to a subreg of a SIMD register, which is currently broken. While we get GCC to emit proper RTL for these cases, let's dump to memory. */ else if (VECTOR_MODE_P (GET_MODE (dst)) && REG_P (src)) { int slen = GET_MODE_SIZE (GET_MODE (src)); rtx mem; mem = assign_stack_temp (GET_MODE (src), slen, 0); emit_move_insn (mem, src); tmps[i] = adjust_address (mem, mode, (int) bytepos); } else if (CONSTANT_P (src) && GET_MODE (dst) != BLKmode && XVECLEN (dst, 0) > 1) tmps[i] = simplify_gen_subreg (mode, src, GET_MODE(dst), bytepos); else if (CONSTANT_P (src) || (REG_P (src) && GET_MODE (src) == mode)) tmps[i] = src; else tmps[i] = extract_bit_field (src, bytelen * BITS_PER_UNIT, bytepos * BITS_PER_UNIT, 1, NULL_RTX, mode, mode, ssize); if (shift) tmps[i] = expand_shift (LSHIFT_EXPR, mode, tmps[i], build_int_2 (shift, 0), tmps[i], 0); } emit_queue (); /* Copy the extracted pieces into the proper (probable) hard regs. */ for (i = start; i < XVECLEN (dst, 0); i++) emit_move_insn (XEXP (XVECEXP (dst, 0, i), 0), tmps[i]); } /* Emit code to move a block SRC to block DST, where SRC and DST are non-consecutive groups of registers, each represented by a PARALLEL. */ void emit_group_move (rtx dst, rtx src) { int i; if (GET_CODE (src) != PARALLEL || GET_CODE (dst) != PARALLEL || XVECLEN (src, 0) != XVECLEN (dst, 0)) abort (); /* Skip first entry if NULL. */ for (i = XEXP (XVECEXP (src, 0, 0), 0) ? 0 : 1; i < XVECLEN (src, 0); i++) emit_move_insn (XEXP (XVECEXP (dst, 0, i), 0), XEXP (XVECEXP (src, 0, i), 0)); } /* Emit code to move a block SRC to a block ORIG_DST of type TYPE, where SRC is non-consecutive registers represented by a PARALLEL. SSIZE represents the total size of block ORIG_DST, or -1 if not known. */ void emit_group_store (rtx orig_dst, rtx src, tree type ATTRIBUTE_UNUSED, int ssize) { rtx *tmps, dst; int start, i; if (GET_CODE (src) != PARALLEL) abort (); /* Check for a NULL entry, used to indicate that the parameter goes both on the stack and in registers. */ if (XEXP (XVECEXP (src, 0, 0), 0)) start = 0; else start = 1; tmps = alloca (sizeof (rtx) * XVECLEN (src, 0)); /* Copy the (probable) hard regs into pseudos. */ for (i = start; i < XVECLEN (src, 0); i++) { rtx reg = XEXP (XVECEXP (src, 0, i), 0); tmps[i] = gen_reg_rtx (GET_MODE (reg)); emit_move_insn (tmps[i], reg); } emit_queue (); /* If we won't be storing directly into memory, protect the real destination from strange tricks we might play. */ dst = orig_dst; if (GET_CODE (dst) == PARALLEL) { rtx temp; /* We can get a PARALLEL dst if there is a conditional expression in a return statement. In that case, the dst and src are the same, so no action is necessary. */ if (rtx_equal_p (dst, src)) return; /* It is unclear if we can ever reach here, but we may as well handle it. Allocate a temporary, and split this into a store/load to/from the temporary. */ temp = assign_stack_temp (GET_MODE (dst), ssize, 0); emit_group_store (temp, src, type, ssize); emit_group_load (dst, temp, type, ssize); return; } else if (!MEM_P (dst) && GET_CODE (dst) != CONCAT) { dst = gen_reg_rtx (GET_MODE (orig_dst)); /* Make life a bit easier for combine. */ emit_move_insn (dst, CONST0_RTX (GET_MODE (orig_dst))); } /* Process the pieces. */ for (i = start; i < XVECLEN (src, 0); i++) { HOST_WIDE_INT bytepos = INTVAL (XEXP (XVECEXP (src, 0, i), 1)); enum machine_mode mode = GET_MODE (tmps[i]); unsigned int bytelen = GET_MODE_SIZE (mode); rtx dest = dst; /* Handle trailing fragments that run over the size of the struct. */ if (ssize >= 0 && bytepos + (HOST_WIDE_INT) bytelen > ssize) { /* store_bit_field always takes its value from the lsb. Move the fragment to the lsb if it's not already there. */ if ( #ifdef BLOCK_REG_PADDING BLOCK_REG_PADDING (GET_MODE (orig_dst), type, i == start) == (BYTES_BIG_ENDIAN ? upward : downward) #else BYTES_BIG_ENDIAN #endif ) { int shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT; tmps[i] = expand_shift (RSHIFT_EXPR, mode, tmps[i], build_int_2 (shift, 0), tmps[i], 0); } bytelen = ssize - bytepos; } if (GET_CODE (dst) == CONCAT) { if (bytepos + bytelen <= GET_MODE_SIZE (GET_MODE (XEXP (dst, 0)))) dest = XEXP (dst, 0); else if (bytepos >= GET_MODE_SIZE (GET_MODE (XEXP (dst, 0)))) { bytepos -= GET_MODE_SIZE (GET_MODE (XEXP (dst, 0))); dest = XEXP (dst, 1); } else if (bytepos == 0 && XVECLEN (src, 0)) { dest = assign_stack_temp (GET_MODE (dest), GET_MODE_SIZE (GET_MODE (dest)), 0); emit_move_insn (adjust_address (dest, GET_MODE (tmps[i]), bytepos), tmps[i]); dst = dest; break; } else abort (); } /* Optimize the access just a bit. */ if (MEM_P (dest) && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (dest)) || MEM_ALIGN (dest) >= GET_MODE_ALIGNMENT (mode)) && bytepos * BITS_PER_UNIT % GET_MODE_ALIGNMENT (mode) == 0 && bytelen == GET_MODE_SIZE (mode)) emit_move_insn (adjust_address (dest, mode, bytepos), tmps[i]); else store_bit_field (dest, bytelen * BITS_PER_UNIT, bytepos * BITS_PER_UNIT, mode, tmps[i], ssize); } emit_queue (); /* Copy from the pseudo into the (probable) hard reg. */ if (orig_dst != dst) emit_move_insn (orig_dst, dst); } /* Generate code to copy a BLKmode object of TYPE out of a set of registers starting with SRCREG into TGTBLK. If TGTBLK is null, a stack temporary is created. TGTBLK is returned. The purpose of this routine is to handle functions that return BLKmode structures in registers. Some machines (the PA for example) want to return all small structures in registers regardless of the structure's alignment. */ rtx copy_blkmode_from_reg (rtx tgtblk, rtx srcreg, tree type) { unsigned HOST_WIDE_INT bytes = int_size_in_bytes (type); rtx src = NULL, dst = NULL; unsigned HOST_WIDE_INT bitsize = MIN (TYPE_ALIGN (type), BITS_PER_WORD); unsigned HOST_WIDE_INT bitpos, xbitpos, padding_correction = 0; if (tgtblk == 0) { tgtblk = assign_temp (build_qualified_type (type, (TYPE_QUALS (type) | TYPE_QUAL_CONST)), 0, 1, 1); preserve_temp_slots (tgtblk); } /* This code assumes srcreg is at least a full word. If it isn't, copy it into a new pseudo which is a full word. */ if (GET_MODE (srcreg) != BLKmode && GET_MODE_SIZE (GET_MODE (srcreg)) < UNITS_PER_WORD) srcreg = convert_to_mode (word_mode, srcreg, TYPE_UNSIGNED (type)); /* If the structure doesn't take up a whole number of words, see whether SRCREG is padded on the left or on the right. If it's on the left, set PADDING_CORRECTION to the number of bits to skip. In most ABIs, the structure will be returned at the least end of the register, which translates to right padding on little-endian targets and left padding on big-endian targets. The opposite holds if the structure is returned at the most significant end of the register. */ if (bytes % UNITS_PER_WORD != 0 && (targetm.calls.return_in_msb (type) ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)) padding_correction = (BITS_PER_WORD - ((bytes % UNITS_PER_WORD) * BITS_PER_UNIT)); /* Copy the structure BITSIZE bites at a time. We could probably emit more efficient code for machines which do not use strict alignment, but it doesn't seem worth the effort at the current time. */ for (bitpos = 0, xbitpos = padding_correction; bitpos < bytes * BITS_PER_UNIT; bitpos += bitsize, xbitpos += bitsize) { /* We need a new source operand each time xbitpos is on a word boundary and when xbitpos == padding_correction (the first time through). */ if (xbitpos % BITS_PER_WORD == 0 || xbitpos == padding_correction) src = operand_subword_force (srcreg, xbitpos / BITS_PER_WORD, GET_MODE (srcreg)); /* We need a new destination operand each time bitpos is on a word boundary. */ if (bitpos % BITS_PER_WORD == 0) dst = operand_subword (tgtblk, bitpos / BITS_PER_WORD, 1, BLKmode); /* Use xbitpos for the source extraction (right justified) and xbitpos for the destination store (left justified). */ store_bit_field (dst, bitsize, bitpos % BITS_PER_WORD, word_mode, extract_bit_field (src, bitsize, xbitpos % BITS_PER_WORD, 1, NULL_RTX, word_mode, word_mode, BITS_PER_WORD), BITS_PER_WORD); } return tgtblk; } /* Add a USE expression for REG to the (possibly empty) list pointed to by CALL_FUSAGE. REG must denote a hard register. */ void use_reg (rtx *call_fusage, rtx reg) { if (!REG_P (reg) || REGNO (reg) >= FIRST_PSEUDO_REGISTER) abort (); *call_fusage = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, reg), *call_fusage); } /* Add USE expressions to *CALL_FUSAGE for each of NREGS consecutive regs, starting at REGNO. All of these registers must be hard registers. */ void use_regs (rtx *call_fusage, int regno, int nregs) { int i; if (regno + nregs > FIRST_PSEUDO_REGISTER) abort (); for (i = 0; i < nregs; i++) use_reg (call_fusage, regno_reg_rtx[regno + i]); } /* Add USE expressions to *CALL_FUSAGE for each REG contained in the PARALLEL REGS. This is for calls that pass values in multiple non-contiguous locations. The Irix 6 ABI has examples of this. */ void use_group_regs (rtx *call_fusage, rtx regs) { int i; for (i = 0; i < XVECLEN (regs, 0); i++) { rtx reg = XEXP (XVECEXP (regs, 0, i), 0); /* A NULL entry means the parameter goes both on the stack and in registers. This can also be a MEM for targets that pass values partially on the stack and partially in registers. */ if (reg != 0 && REG_P (reg)) use_reg (call_fusage, reg); } } /* Determine whether the LEN bytes generated by CONSTFUN can be stored to memory using several move instructions. CONSTFUNDATA is a pointer which will be passed as argument in every CONSTFUN call. ALIGN is maximum alignment we can assume. Return nonzero if a call to store_by_pieces should succeed. */ int can_store_by_pieces (unsigned HOST_WIDE_INT len, rtx (*constfun) (void *, HOST_WIDE_INT, enum machine_mode), void *constfundata, unsigned int align) { unsigned HOST_WIDE_INT max_size, l; HOST_WIDE_INT offset = 0; enum machine_mode mode, tmode; enum insn_code icode; int reverse; rtx cst; if (len == 0) return 1; if (! STORE_BY_PIECES_P (len, align)) return 0; if (! SLOW_UNALIGNED_ACCESS (word_mode, align) || align > MOVE_MAX * BITS_PER_UNIT || align >= BIGGEST_ALIGNMENT) align = MOVE_MAX * BITS_PER_UNIT; /* We would first store what we can in the largest integer mode, then go to successively smaller modes. */ for (reverse = 0; reverse <= (HAVE_PRE_DECREMENT || HAVE_POST_DECREMENT); reverse++) { l = len; mode = VOIDmode; max_size = STORE_MAX_PIECES + 1; while (max_size > 1) { for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT); tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode)) if (GET_MODE_SIZE (tmode) < max_size) mode = tmode; if (mode == VOIDmode) break; icode = mov_optab->handlers[(int) mode].insn_code; if (icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode)) { unsigned int size = GET_MODE_SIZE (mode); while (l >= size) { if (reverse) offset -= size; cst = (*constfun) (constfundata, offset, mode); if (!LEGITIMATE_CONSTANT_P (cst)) return 0; if (!reverse) offset += size; l -= size; } } max_size = GET_MODE_SIZE (mode); } /* The code above should have handled everything. */ if (l != 0) abort (); } return 1; } /* Generate several move instructions to store LEN bytes generated by CONSTFUN to block TO. (A MEM rtx with BLKmode). CONSTFUNDATA is a pointer which will be passed as argument in every CONSTFUN call. ALIGN is maximum alignment we can assume. If ENDP is 0 return to, if ENDP is 1 return memory at the end ala mempcpy, and if ENDP is 2 return memory the end minus one byte ala stpcpy. */ rtx store_by_pieces (rtx to, unsigned HOST_WIDE_INT len, rtx (*constfun) (void *, HOST_WIDE_INT, enum machine_mode), void *constfundata, unsigned int align, int endp) { struct store_by_pieces data; if (len == 0) { if (endp == 2) abort (); return to; } if (! STORE_BY_PIECES_P (len, align)) abort (); to = protect_from_queue (to, 1); data.constfun = constfun; data.constfundata = constfundata; data.len = len; data.to = to; store_by_pieces_1 (&data, align); if (endp) { rtx to1; if (data.reverse) abort (); if (data.autinc_to) { if (endp == 2) { if (HAVE_POST_INCREMENT && data.explicit_inc_to > 0) emit_insn (gen_add2_insn (data.to_addr, constm1_rtx)); else data.to_addr = copy_addr_to_reg (plus_constant (data.to_addr, -1)); } to1 = adjust_automodify_address (data.to, QImode, data.to_addr, data.offset); } else { if (endp == 2) --data.offset; to1 = adjust_address (data.to, QImode, data.offset); } return to1; } else return data.to; } /* Generate several move instructions to clear LEN bytes of block TO. (A MEM rtx with BLKmode). The caller must pass TO through protect_from_queue before calling. ALIGN is maximum alignment we can assume. */ static void clear_by_pieces (rtx to, unsigned HOST_WIDE_INT len, unsigned int align) { struct store_by_pieces data; if (len == 0) return; data.constfun = clear_by_pieces_1; data.constfundata = NULL; data.len = len; data.to = to; store_by_pieces_1 (&data, align); } /* Callback routine for clear_by_pieces. Return const0_rtx unconditionally. */ static rtx clear_by_pieces_1 (void *data ATTRIBUTE_UNUSED, HOST_WIDE_INT offset ATTRIBUTE_UNUSED, enum machine_mode mode ATTRIBUTE_UNUSED) { return const0_rtx; } /* Subroutine of clear_by_pieces and store_by_pieces. Generate several move instructions to store LEN bytes of block TO. (A MEM rtx with BLKmode). The caller must pass TO through protect_from_queue before calling. ALIGN is maximum alignment we can assume. */ static void store_by_pieces_1 (struct store_by_pieces *data ATTRIBUTE_UNUSED, unsigned int align ATTRIBUTE_UNUSED) { rtx to_addr = XEXP (data->to, 0); unsigned HOST_WIDE_INT max_size = STORE_MAX_PIECES + 1; enum machine_mode mode = VOIDmode, tmode; enum insn_code icode; data->offset = 0; data->to_addr = to_addr; data->autinc_to = (GET_CODE (to_addr) == PRE_INC || GET_CODE (to_addr) == PRE_DEC || GET_CODE (to_addr) == POST_INC || GET_CODE (to_addr) == POST_DEC); data->explicit_inc_to = 0; data->reverse = (GET_CODE (to_addr) == PRE_DEC || GET_CODE (to_addr) == POST_DEC); if (data->reverse) data->offset = data->len; /* If storing requires more than two move insns, copy addresses to registers (to make displacements shorter) and use post-increment if available. */ if (!data->autinc_to && move_by_pieces_ninsns (data->len, align) > 2) { /* Determine the main mode we'll be using. */ for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT); tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode)) if (GET_MODE_SIZE (tmode) < max_size) mode = tmode; if (USE_STORE_PRE_DECREMENT (mode) && data->reverse && ! data->autinc_to) { data->to_addr = copy_addr_to_reg (plus_constant (to_addr, data->len)); data->autinc_to = 1; data->explicit_inc_to = -1; } if (USE_STORE_POST_INCREMENT (mode) && ! data->reverse && ! data->autinc_to) { data->to_addr = copy_addr_to_reg (to_addr); data->autinc_to = 1; data->explicit_inc_to = 1; } if ( !data->autinc_to && CONSTANT_P (to_addr)) data->to_addr = copy_addr_to_reg (to_addr); } if (! SLOW_UNALIGNED_ACCESS (word_mode, align) || align > MOVE_MAX * BITS_PER_UNIT || align >= BIGGEST_ALIGNMENT) align = MOVE_MAX * BITS_PER_UNIT; /* First store what we can in the largest integer mode, then go to successively smaller modes. */ while (max_size > 1) { for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT); tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode)) if (GET_MODE_SIZE (tmode) < max_size) mode = tmode; if (mode == VOIDmode) break; icode = mov_optab->handlers[(int) mode].insn_code; if (icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode)) store_by_pieces_2 (GEN_FCN (icode), mode, data); max_size = GET_MODE_SIZE (mode); } /* The code above should have handled everything. */ if (data->len != 0) abort (); } /* Subroutine of store_by_pieces_1. Store as many bytes as appropriate with move instructions for mode MODE. GENFUN is the gen_... function to make a move insn for that mode. DATA has all the other info. */ static void store_by_pieces_2 (rtx (*genfun) (rtx, ...), enum machine_mode mode, struct store_by_pieces *data) { unsigned int size = GET_MODE_SIZE (mode); rtx to1, cst; while (data->len >= size) { if (data->reverse) data->offset -= size; if (data->autinc_to) to1 = adjust_automodify_address (data->to, mode, data->to_addr, data->offset); else to1 = adjust_address (data->to, mode, data->offset); if (HAVE_PRE_DECREMENT && data->explicit_inc_to < 0) emit_insn (gen_add2_insn (data->to_addr, GEN_INT (-(HOST_WIDE_INT) size))); cst = (*data->constfun) (data->constfundata, data->offset, mode); emit_insn ((*genfun) (to1, cst)); if (HAVE_POST_INCREMENT && data->explicit_inc_to > 0) emit_insn (gen_add2_insn (data->to_addr, GEN_INT (size))); if (! data->reverse) data->offset += size; data->len -= size; } } /* Write zeros through the storage of OBJECT. If OBJECT has BLKmode, SIZE is its length in bytes. */ rtx clear_storage (rtx object, rtx size) { rtx retval = 0; unsigned int align = (MEM_P (object) ? MEM_ALIGN (object) : GET_MODE_ALIGNMENT (GET_MODE (object))); /* If OBJECT is not BLKmode and SIZE is the same size as its mode, just move a zero. Otherwise, do this a piece at a time. */ if (GET_MODE (object) != BLKmode && GET_CODE (size) == CONST_INT && INTVAL (size) == (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (object))) emit_move_insn (object, CONST0_RTX (GET_MODE (object))); else { object = protect_from_queue (object, 1); size = protect_from_queue (size, 0); if (size == const0_rtx) ; else if (GET_CODE (size) == CONST_INT && CLEAR_BY_PIECES_P (INTVAL (size), align)) clear_by_pieces (object, INTVAL (size), align); else if (clear_storage_via_clrstr (object, size, align)) ; else retval = clear_storage_via_libcall (object, size); } return retval; } /* A subroutine of clear_storage. Expand a clrstr pattern; return true if successful. */ static bool clear_storage_via_clrstr (rtx object, rtx size, unsigned int align) { /* Try the most limited insn first, because there's no point including more than one in the machine description unless the more limited one has some advantage. */ rtx opalign = GEN_INT (align / BITS_PER_UNIT); enum machine_mode mode; for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode)) { enum insn_code code = clrstr_optab[(int) mode]; insn_operand_predicate_fn pred; if (code != CODE_FOR_nothing /* We don't need MODE to be narrower than BITS_PER_HOST_WIDE_INT here because if SIZE is less than the mode mask, as it is returned by the macro, it will definitely be less than the actual mode mask. */ && ((GET_CODE (size) == CONST_INT && ((unsigned HOST_WIDE_INT) INTVAL (size) <= (GET_MODE_MASK (mode) >> 1))) || GET_MODE_BITSIZE (mode) >= BITS_PER_WORD) && ((pred = insn_data[(int) code].operand[0].predicate) == 0 || (*pred) (object, BLKmode)) && ((pred = insn_data[(int) code].operand[2].predicate) == 0 || (*pred) (opalign, VOIDmode))) { rtx op1; rtx last = get_last_insn (); rtx pat; op1 = convert_to_mode (mode, size, 1); pred = insn_data[(int) code].operand[1].predicate; if (pred != 0 && ! (*pred) (op1, mode)) op1 = copy_to_mode_reg (mode, op1); pat = GEN_FCN ((int) code) (object, op1, opalign); if (pat) { emit_insn (pat); return true; } else delete_insns_since (last); } } return false; } /* A subroutine of clear_storage. Expand a call to memset. Return the return value of memset, 0 otherwise. */ static rtx clear_storage_via_libcall (rtx object, rtx size) { tree call_expr, arg_list, fn, object_tree, size_tree; enum machine_mode size_mode; rtx retval; /* OBJECT or SIZE may have been passed through protect_from_queue. It is unsafe to save the value generated by protect_from_queue and reuse it later. Consider what happens if emit_queue is called before the return value from protect_from_queue is used. Expansion of the CALL_EXPR below will call emit_queue before we are finished emitting RTL for argument setup. So if we are not careful we could get the wrong value for an argument. To avoid this problem we go ahead and emit code to copy OBJECT and SIZE into new pseudos. Note this is not strictly needed for library calls since they do not call emit_queue before loading their arguments. However, we may need to have library calls call emit_queue in the future since failing to do so could cause problems for targets which define SMALL_REGISTER_CLASSES and pass arguments in registers. */ object = copy_to_mode_reg (Pmode, XEXP (object, 0)); size_mode = TYPE_MODE (sizetype); size = convert_to_mode (size_mode, size, 1); size = copy_to_mode_reg (size_mode, size); /* It is incorrect to use the libcall calling conventions to call memset in this context. This could be a user call to memset and the user may wish to examine the return value from memset. For targets where libcalls and normal calls have different conventions for returning pointers, we could end up generating incorrect code. */ object_tree = make_tree (ptr_type_node, object); size_tree = make_tree (sizetype, size); fn = clear_storage_libcall_fn (true); arg_list = tree_cons (NULL_TREE, size_tree, NULL_TREE); arg_list = tree_cons (NULL_TREE, integer_zero_node, arg_list); arg_list = tree_cons (NULL_TREE, object_tree, arg_list); /* Now we have to build up the CALL_EXPR itself. */ call_expr = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (fn)), fn); call_expr = build (CALL_EXPR, TREE_TYPE (TREE_TYPE (fn)), call_expr, arg_list, NULL_TREE); retval = expand_expr (call_expr, NULL_RTX, VOIDmode, 0); /* If we are initializing a readonly value, show the above call clobbered it. Otherwise, a load from it may erroneously be hoisted from a loop. */ if (RTX_UNCHANGING_P (object)) emit_insn (gen_rtx_CLOBBER (VOIDmode, object)); return retval; } /* A subroutine of clear_storage_via_libcall. Create the tree node for the function we use for block clears. The first time FOR_CALL is true, we call assemble_external. */ static GTY(()) tree block_clear_fn; void init_block_clear_fn (const char *asmspec) { if (!block_clear_fn) { tree fn, args; fn = get_identifier ("memset"); args = build_function_type_list (ptr_type_node, ptr_type_node, integer_type_node, sizetype, NULL_TREE); fn = build_decl (FUNCTION_DECL, fn, args); DECL_EXTERNAL (fn) = 1; TREE_PUBLIC (fn) = 1; DECL_ARTIFICIAL (fn) = 1; TREE_NOTHROW (fn) = 1; block_clear_fn = fn; } if (asmspec) { SET_DECL_RTL (block_clear_fn, NULL_RTX); SET_DECL_ASSEMBLER_NAME (block_clear_fn, get_identifier (asmspec)); } } static tree clear_storage_libcall_fn (int for_call) { static bool emitted_extern; if (!block_clear_fn) init_block_clear_fn (NULL); if (for_call && !emitted_extern) { emitted_extern = true; make_decl_rtl (block_clear_fn, NULL); assemble_external (block_clear_fn); } return block_clear_fn; } /* Generate code to copy Y into X. Both Y and X must have the same mode, except that Y can be a constant with VOIDmode. This mode cannot be BLKmode; use emit_block_move for that. Return the last instruction emitted. */ rtx emit_move_insn (rtx x, rtx y) { enum machine_mode mode = GET_MODE (x); rtx y_cst = NULL_RTX; rtx last_insn, set; x = protect_from_queue (x, 1); y = protect_from_queue (y, 0); if (mode == BLKmode || (GET_MODE (y) != mode && GET_MODE (y) != VOIDmode)) abort (); if (CONSTANT_P (y)) { if (optimize && SCALAR_FLOAT_MODE_P (GET_MODE (x)) && (last_insn = compress_float_constant (x, y))) return last_insn; y_cst = y; if (!LEGITIMATE_CONSTANT_P (y)) { y = force_const_mem (mode, y); /* If the target's cannot_force_const_mem prevented the spill, assume that the target's move expanders will also take care of the non-legitimate constant. */ if (!y) y = y_cst; } } /* If X or Y are memory references, verify that their addresses are valid for the machine. */ if (MEM_P (x) && ((! memory_address_p (GET_MODE (x), XEXP (x, 0)) && ! push_operand (x, GET_MODE (x))) || (flag_force_addr && CONSTANT_ADDRESS_P (XEXP (x, 0))))) x = validize_mem (x); if (MEM_P (y) && (! memory_address_p (GET_MODE (y), XEXP (y, 0)) || (flag_force_addr && CONSTANT_ADDRESS_P (XEXP (y, 0))))) y = validize_mem (y); if (mode == BLKmode) abort (); last_insn = emit_move_insn_1 (x, y); if (y_cst && REG_P (x) && (set = single_set (last_insn)) != NULL_RTX && SET_DEST (set) == x && ! rtx_equal_p (y_cst, SET_SRC (set))) set_unique_reg_note (last_insn, REG_EQUAL, y_cst); return last_insn; } /* Low level part of emit_move_insn. Called just like emit_move_insn, but assumes X and Y are basically valid. */ rtx emit_move_insn_1 (rtx x, rtx y) { enum machine_mode mode = GET_MODE (x); enum machine_mode submode; enum mode_class class = GET_MODE_CLASS (mode); if ((unsigned int) mode >= (unsigned int) MAX_MACHINE_MODE) abort (); if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing) return emit_insn (GEN_FCN (mov_optab->handlers[(int) mode].insn_code) (x, y)); /* Expand complex moves by moving real part and imag part, if possible. */ else if ((class == MODE_COMPLEX_FLOAT || class == MODE_COMPLEX_INT) && BLKmode != (submode = GET_MODE_INNER (mode)) && (mov_optab->handlers[(int) submode].insn_code != CODE_FOR_nothing)) { /* Don't split destination if it is a stack push. */ int stack = push_operand (x, GET_MODE (x)); #ifdef PUSH_ROUNDING /* In case we output to the stack, but the size is smaller than the machine can push exactly, we need to use move instructions. */ if (stack && (PUSH_ROUNDING (GET_MODE_SIZE (submode)) != GET_MODE_SIZE (submode))) { rtx temp; HOST_WIDE_INT offset1, offset2; /* Do not use anti_adjust_stack, since we don't want to update stack_pointer_delta. */ temp = expand_binop (Pmode, #ifdef STACK_GROWS_DOWNWARD sub_optab, #else add_optab, #endif stack_pointer_rtx, GEN_INT (PUSH_ROUNDING (GET_MODE_SIZE (GET_MODE (x)))), stack_pointer_rtx, 0, OPTAB_LIB_WIDEN); if (temp != stack_pointer_rtx) emit_move_insn (stack_pointer_rtx, temp); #ifdef STACK_GROWS_DOWNWARD offset1 = 0; offset2 = GET_MODE_SIZE (submode); #else offset1 = -PUSH_ROUNDING (GET_MODE_SIZE (GET_MODE (x))); offset2 = (-PUSH_ROUNDING (GET_MODE_SIZE (GET_MODE (x))) + GET_MODE_SIZE (submode)); #endif emit_move_insn (change_address (x, submode, gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (offset1))), gen_realpart (submode, y)); emit_move_insn (change_address (x, submode, gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (offset2))), gen_imagpart (submode, y)); } else #endif /* If this is a stack, push the highpart first, so it will be in the argument order. In that case, change_address is used only to convert the mode, not to change the address. */ if (stack) { /* Note that the real part always precedes the imag part in memory regardless of machine's endianness. */ #ifdef STACK_GROWS_DOWNWARD emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)), gen_imagpart (submode, y)); emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)), gen_realpart (submode, y)); #else emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)), gen_realpart (submode, y)); emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)), gen_imagpart (submode, y)); #endif } else { rtx realpart_x, realpart_y; rtx imagpart_x, imagpart_y; /* If this is a complex value with each part being smaller than a word, the usual calling sequence will likely pack the pieces into a single register. Unfortunately, SUBREG of hard registers only deals in terms of words, so we have a problem converting input arguments to the CONCAT of two registers that is used elsewhere for complex values. If this is before reload, we can copy it into memory and reload. FIXME, we should see about using extract and insert on integer registers, but complex short and complex char variables should be rarely used. */ if (GET_MODE_BITSIZE (mode) < 2 * BITS_PER_WORD && (reload_in_progress | reload_completed) == 0) { int packed_dest_p = (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER); int packed_src_p = (REG_P (y) && REGNO (y) < FIRST_PSEUDO_REGISTER); if (packed_dest_p || packed_src_p) { enum mode_class reg_class = ((class == MODE_COMPLEX_FLOAT) ? MODE_FLOAT : MODE_INT); enum machine_mode reg_mode = mode_for_size (GET_MODE_BITSIZE (mode), reg_class, 1); if (reg_mode != BLKmode) { rtx mem = assign_stack_temp (reg_mode, GET_MODE_SIZE (mode), 0); rtx cmem = adjust_address (mem, mode, 0); if (packed_dest_p) { rtx sreg = gen_rtx_SUBREG (reg_mode, x, 0); emit_move_insn_1 (cmem, y); return emit_move_insn_1 (sreg, mem); } else { rtx sreg = gen_rtx_SUBREG (reg_mode, y, 0); emit_move_insn_1 (mem, sreg); return emit_move_insn_1 (x, cmem); } } } } realpart_x = gen_realpart (submode, x); realpart_y = gen_realpart (submode, y); imagpart_x = gen_imagpart (submode, x); imagpart_y = gen_imagpart (submode, y); /* Show the output dies here. This is necessary for SUBREGs of pseudos since we cannot track their lifetimes correctly; hard regs shouldn't appear here except as return values. We never want to emit such a clobber after reload. */ if (x != y && ! (reload_in_progress || reload_completed) && (GET_CODE (realpart_x) == SUBREG || GET_CODE (imagpart_x) == SUBREG)) emit_insn (gen_rtx_CLOBBER (VOIDmode, x)); emit_move_insn (realpart_x, realpart_y); emit_move_insn (imagpart_x, imagpart_y); } return get_last_insn (); } /* Handle MODE_CC modes: If we don't have a special move insn for this mode, find a mode to do it in. If we have a movcc, use it. Otherwise, find the MODE_INT mode of the same width. */ else if (GET_MODE_CLASS (mode) == MODE_CC && mov_optab->handlers[(int) mode].insn_code == CODE_FOR_nothing) { enum insn_code insn_code; enum machine_mode tmode = VOIDmode; rtx x1 = x, y1 = y; if (mode != CCmode && mov_optab->handlers[(int) CCmode].insn_code != CODE_FOR_nothing) tmode = CCmode; else for (tmode = QImode; tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode)) if (GET_MODE_SIZE (tmode) == GET_MODE_SIZE (mode)) break; if (tmode == VOIDmode) abort (); /* Get X and Y in TMODE. We can't use gen_lowpart here because it may call change_address which is not appropriate if we were called when a reload was in progress. We don't have to worry about changing the address since the size in bytes is supposed to be the same. Copy the MEM to change the mode and move any substitutions from the old MEM to the new one. */ if (reload_in_progress) { x = gen_lowpart_common (tmode, x1); if (x == 0 && MEM_P (x1)) { x = adjust_address_nv (x1, tmode, 0); copy_replacements (x1, x); } y = gen_lowpart_common (tmode, y1); if (y == 0 && MEM_P (y1)) { y = adjust_address_nv (y1, tmode, 0); copy_replacements (y1, y); } } else { x = gen_lowpart (tmode, x); y = gen_lowpart (tmode, y); } insn_code = mov_optab->handlers[(int) tmode].insn_code; return emit_insn (GEN_FCN (insn_code) (x, y)); } /* Try using a move pattern for the corresponding integer mode. This is only safe when simplify_subreg can convert MODE constants into integer constants. At present, it can only do this reliably if the value fits within a HOST_WIDE_INT. */ else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT && (submode = int_mode_for_mode (mode)) != BLKmode && mov_optab->handlers[submode].insn_code != CODE_FOR_nothing) return emit_insn (GEN_FCN (mov_optab->handlers[submode].insn_code) (simplify_gen_subreg (submode, x, mode, 0), simplify_gen_subreg (submode, y, mode, 0))); /* This will handle any multi-word or full-word mode that lacks a move_insn pattern. However, you will get better code if you define such patterns, even if they must turn into multiple assembler instructions. */ else if (GET_MODE_SIZE (mode) >= UNITS_PER_WORD) { rtx last_insn = 0; rtx seq, inner; int need_clobber; int i; #ifdef PUSH_ROUNDING /* If X is a push on the stack, do the push now and replace X with a reference to the stack pointer. */ if (push_operand (x, GET_MODE (x))) { rtx temp; enum rtx_code code; /* Do not use anti_adjust_stack, since we don't want to update stack_pointer_delta. */ temp = expand_binop (Pmode, #ifdef STACK_GROWS_DOWNWARD sub_optab, #else add_optab, #endif stack_pointer_rtx, GEN_INT (PUSH_ROUNDING (GET_MODE_SIZE (GET_MODE (x)))), stack_pointer_rtx, 0, OPTAB_LIB_WIDEN); if (temp != stack_pointer_rtx) emit_move_insn (stack_pointer_rtx, temp); code = GET_CODE (XEXP (x, 0)); /* Just hope that small offsets off SP are OK. */ if (code == POST_INC) temp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-((HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x))))); else if (code == POST_DEC) temp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (GET_MODE_SIZE (GET_MODE (x)))); else temp = stack_pointer_rtx; x = change_address (x, VOIDmode, temp); } #endif /* If we are in reload, see if either operand is a MEM whose address is scheduled for replacement. */ if (reload_in_progress && MEM_P (x) && (inner = find_replacement (&XEXP (x, 0))) != XEXP (x, 0)) x = replace_equiv_address_nv (x, inner); if (reload_in_progress && MEM_P (y) && (inner = find_replacement (&XEXP (y, 0))) != XEXP (y, 0)) y = replace_equiv_address_nv (y, inner); start_sequence (); need_clobber = 0; for (i = 0; i < (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD; i++) { rtx xpart = operand_subword (x, i, 1, mode); rtx ypart = operand_subword (y, i, 1, mode); /* If we can't get a part of Y, put Y into memory if it is a constant. Otherwise, force it into a register. If we still can't get a part of Y, abort. */ if (ypart == 0 && CONSTANT_P (y)) { y = force_const_mem (mode, y); ypart = operand_subword (y, i, 1, mode); } else if (ypart == 0) ypart = operand_subword_force (y, i, mode); if (xpart == 0 || ypart == 0) abort (); need_clobber |= (GET_CODE (xpart) == SUBREG); last_insn = emit_move_insn (xpart, ypart); } seq = get_insns (); end_sequence (); /* Show the output dies here. This is necessary for SUBREGs of pseudos since we cannot track their lifetimes correctly; hard regs shouldn't appear here except as return values. We never want to emit such a clobber after reload. */ if (x != y && ! (reload_in_progress || reload_completed) && need_clobber != 0) emit_insn (gen_rtx_CLOBBER (VOIDmode, x)); emit_insn (seq); return last_insn; } else abort (); } /* If Y is representable exactly in a narrower mode, and the target can perform the extension directly from constant or memory, then emit the move as an extension. */ static rtx compress_float_constant (rtx x, rtx y) { enum machine_mode dstmode = GET_MODE (x); enum machine_mode orig_srcmode = GET_MODE (y); enum machine_mode srcmode; REAL_VALUE_TYPE r; REAL_VALUE_FROM_CONST_DOUBLE (r, y); for (srcmode = GET_CLASS_NARROWEST_MODE (GET_MODE_CLASS (orig_srcmode)); srcmode != orig_srcmode; srcmode = GET_MODE_WIDER_MODE (srcmode)) { enum insn_code ic; rtx trunc_y, last_insn; /* Skip if the target can't extend this way. */ ic = can_extend_p (dstmode, srcmode, 0); if (ic == CODE_FOR_nothing) continue; /* Skip if the narrowed value isn't exact. */ if (! exact_real_truncate (srcmode, &r)) continue; trunc_y = CONST_DOUBLE_FROM_REAL_VALUE (r, srcmode); if (LEGITIMATE_CONSTANT_P (trunc_y)) { /* Skip if the target needs extra instructions to perform the extension. */ if (! (*insn_data[ic].operand[1].predicate) (trunc_y, srcmode)) continue; } else if (float_extend_from_mem[dstmode][srcmode]) trunc_y = validize_mem (force_const_mem (srcmode, trunc_y)); else continue; emit_unop_insn (ic, x, trunc_y, UNKNOWN); last_insn = get_last_insn (); if (REG_P (x)) set_unique_reg_note (last_insn, REG_EQUAL, y); return last_insn; } return NULL_RTX; } /* Pushing data onto the stack. */ /* Push a block of length SIZE (perhaps variable) and return an rtx to address the beginning of the block. Note that it is not possible for the value returned to be a QUEUED. The value may be virtual_outgoing_args_rtx. EXTRA is the number of bytes of padding to push in addition to SIZE. BELOW nonzero means this padding comes at low addresses; otherwise, the padding comes at high addresses. */ rtx push_block (rtx size, int extra, int below) { rtx temp; size = convert_modes (Pmode, ptr_mode, size, 1); if (CONSTANT_P (size)) anti_adjust_stack (plus_constant (size, extra)); else if (REG_P (size) && extra == 0) anti_adjust_stack (size); else { temp = copy_to_mode_reg (Pmode, size); if (extra != 0) temp = expand_binop (Pmode, add_optab, temp, GEN_INT (extra), temp, 0, OPTAB_LIB_WIDEN); anti_adjust_stack (temp); } #ifndef STACK_GROWS_DOWNWARD if (0) #else if (1) #endif { temp = virtual_outgoing_args_rtx; if (extra != 0 && below) temp = plus_constant (temp, extra); } else { if (GET_CODE (size) == CONST_INT) temp = plus_constant (virtual_outgoing_args_rtx, -INTVAL (size) - (below ? 0 : extra)); else if (extra != 0 && !below) temp = gen_rtx_PLUS (Pmode, virtual_outgoing_args_rtx, negate_rtx (Pmode, plus_constant (size, extra))); else temp = gen_rtx_PLUS (Pmode, virtual_outgoing_args_rtx, negate_rtx (Pmode, size)); } return memory_address (GET_CLASS_NARROWEST_MODE (MODE_INT), temp); } #ifdef PUSH_ROUNDING /* Emit single push insn. */ static void emit_single_push_insn (enum machine_mode mode, rtx x, tree type) { rtx dest_addr; unsigned rounded_size = PUSH_ROUNDING (GET_MODE_SIZE (mode)); rtx dest; enum insn_code icode; insn_operand_predicate_fn pred; stack_pointer_delta += PUSH_ROUNDING (GET_MODE_SIZE (mode)); /* If there is push pattern, use it. Otherwise try old way of throwing MEM representing push operation to move expander. */ icode = push_optab->handlers[(int) mode].insn_code; if (icode != CODE_FOR_nothing) { if (((pred = insn_data[(int) icode].operand[0].predicate) && !((*pred) (x, mode)))) x = force_reg (mode, x); emit_insn (GEN_FCN (icode) (x)); return; } if (GET_MODE_SIZE (mode) == rounded_size) dest_addr = gen_rtx_fmt_e (STACK_PUSH_CODE, Pmode, stack_pointer_rtx); /* If we are to pad downward, adjust the stack pointer first and then store X into the stack location using an offset. This is because emit_move_insn does not know how to pad; it does not have access to type. */ else if (FUNCTION_ARG_PADDING (mode, type) == downward) { unsigned padding_size = rounded_size - GET_MODE_SIZE (mode); HOST_WIDE_INT offset; emit_move_insn (stack_pointer_rtx, expand_binop (Pmode, #ifdef STACK_GROWS_DOWNWARD sub_optab, #else add_optab, #endif stack_pointer_rtx, GEN_INT (rounded_size), NULL_RTX, 0, OPTAB_LIB_WIDEN)); offset = (HOST_WIDE_INT) padding_size; #ifdef STACK_GROWS_DOWNWARD if (STACK_PUSH_CODE == POST_DEC) /* We have already decremented the stack pointer, so get the previous value. */ offset += (HOST_WIDE_INT) rounded_size; #else if (STACK_PUSH_CODE == POST_INC) /* We have already incremented the stack pointer, so get the previous value. */ offset -= (HOST_WIDE_INT) rounded_size; #endif dest_addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (offset)); } else { #ifdef STACK_GROWS_DOWNWARD /* ??? This seems wrong if STACK_PUSH_CODE == POST_DEC. */ dest_addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-(HOST_WIDE_INT) rounded_size)); #else /* ??? This seems wrong if STACK_PUSH_CODE == POST_INC. */ dest_addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (rounded_size)); #endif dest_addr = gen_rtx_PRE_MODIFY (Pmode, stack_pointer_rtx, dest_addr); } dest = gen_rtx_MEM (mode, dest_addr); if (type != 0) { set_mem_attributes (dest, type, 1); if (flag_optimize_sibling_calls) /* Function incoming arguments may overlap with sibling call outgoing arguments and we cannot allow reordering of reads from function arguments with stores to outgoing arguments of sibling calls. */ set_mem_alias_set (dest, 0); } emit_move_insn (dest, x); } #endif /* Generate code to push X onto the stack, assuming it has mode MODE and type TYPE. MODE is redundant except when X is a CONST_INT (since they don't carry mode info). SIZE is an rtx for the size of data to be copied (in bytes), needed only if X is BLKmode. ALIGN (in bits) is maximum alignment we can assume. If PARTIAL and REG are both nonzero, then copy that many of the first words of X into registers starting with REG, and push the rest of X. The amount of space pushed is decreased by PARTIAL words, rounded *down* to a multiple of PARM_BOUNDARY. REG must be a hard register in this case. If REG is zero but PARTIAL is not, take any all others actions for an argument partially in registers, but do not actually load any registers. EXTRA is the amount in bytes of extra space to leave next to this arg. This is ignored if an argument block has already been allocated. On a machine that lacks real push insns, ARGS_ADDR is the address of the bottom of the argument block for this call. We use indexing off there to store the arg. On machines with push insns, ARGS_ADDR is 0 when a argument block has not been preallocated. ARGS_SO_FAR is the size of args previously pushed for this call. REG_PARM_STACK_SPACE is nonzero if functions require stack space for arguments passed in registers. If nonzero, it will be the number of bytes required. */ void emit_push_insn (rtx x, enum machine_mode mode, tree type, rtx size, unsigned int align, int partial, rtx reg, int extra, rtx args_addr, rtx args_so_far, int reg_parm_stack_space, rtx alignment_pad) { rtx xinner; enum direction stack_direction #ifdef STACK_GROWS_DOWNWARD = downward; #else = upward; #endif /* Decide where to pad the argument: `downward' for below, `upward' for above, or `none' for don't pad it. Default is below for small data on big-endian machines; else above. */ enum direction where_pad = FUNCTION_ARG_PADDING (mode, type); /* Invert direction if stack is post-decrement. FIXME: why? */ if (STACK_PUSH_CODE == POST_DEC) if (where_pad != none) where_pad = (where_pad == downward ? upward : downward); xinner = x = protect_from_queue (x, 0); if (mode == BLKmode) { /* Copy a block into the stack, entirely or partially. */ rtx temp; int used = partial * UNITS_PER_WORD; int offset; int skip; if (reg && GET_CODE (reg) == PARALLEL) { /* Use the size of the elt to compute offset. */ rtx elt = XEXP (XVECEXP (reg, 0, 0), 0); used = partial * GET_MODE_SIZE (GET_MODE (elt)); offset = used % (PARM_BOUNDARY / BITS_PER_UNIT); } else offset = used % (PARM_BOUNDARY / BITS_PER_UNIT); if (size == 0) abort (); used -= offset; /* USED is now the # of bytes we need not copy to the stack because registers will take care of them. */ if (partial != 0) xinner = adjust_address (xinner, BLKmode, used); /* If the partial register-part of the arg counts in its stack size, skip the part of stack space corresponding to the registers. Otherwise, start copying to the beginning of the stack space, by setting SKIP to 0. */ skip = (reg_parm_stack_space == 0) ? 0 : used; #ifdef PUSH_ROUNDING /* Do it with several push insns if that doesn't take lots of insns and if there is no difficulty with push insns that skip bytes on the stack for alignment purposes. */ if (args_addr == 0 && PUSH_ARGS && GET_CODE (size) == CONST_INT && skip == 0 && MEM_ALIGN (xinner) >= align && (MOVE_BY_PIECES_P ((unsigned) INTVAL (size) - used, align)) /* Here we avoid the case of a structure whose weak alignment forces many pushes of a small amount of data, and such small pushes do rounding that causes trouble. */ && ((! SLOW_UNALIGNED_ACCESS (word_mode, align)) || align >= BIGGEST_ALIGNMENT || (PUSH_ROUNDING (align / BITS_PER_UNIT) == (align / BITS_PER_UNIT))) && PUSH_ROUNDING (INTVAL (size)) == INTVAL (size)) { /* Push padding now if padding above and stack grows down, or if padding below and stack grows up. But if space already allocated, this has already been done. */ if (extra && args_addr == 0 && where_pad != none && where_pad != stack_direction) anti_adjust_stack (GEN_INT (extra)); move_by_pieces (NULL, xinner, INTVAL (size) - used, align, 0); } else #endif /* PUSH_ROUNDING */ { rtx target; /* Otherwise make space on the stack and copy the data to the address of that space. */ /* Deduct words put into registers from the size we must copy. */ if (partial != 0) { if (GET_CODE (size) == CONST_INT) size = GEN_INT (INTVAL (size) - used); else size = expand_binop (GET_MODE (size), sub_optab, size, GEN_INT (used), NULL_RTX, 0, OPTAB_LIB_WIDEN); } /* Get the address of the stack space. In this case, we do not deal with EXTRA separately. A single stack adjust will do. */ if (! args_addr) { temp = push_block (size, extra, where_pad == downward); extra = 0; } else if (GET_CODE (args_so_far) == CONST_INT) temp = memory_address (BLKmode, plus_constant (args_addr, skip + INTVAL (args_so_far))); else temp = memory_address (BLKmode, plus_constant (gen_rtx_PLUS (Pmode, args_addr, args_so_far), skip)); if (!ACCUMULATE_OUTGOING_ARGS) { /* If the source is referenced relative to the stack pointer, copy it to another register to stabilize it. We do not need to do this if we know that we won't be changing sp. */ if (reg_mentioned_p (virtual_stack_dynamic_rtx, temp) || reg_mentioned_p (virtual_outgoing_args_rtx, temp)) temp = copy_to_reg (temp); } target = gen_rtx_MEM (BLKmode, temp); if (type != 0) { set_mem_attributes (target, type, 1); /* Function incoming arguments may overlap with sibling call outgoing arguments and we cannot allow reordering of reads from function arguments with stores to outgoing arguments of sibling calls. */ set_mem_alias_set (target, 0); } /* ALIGN may well be better aligned than TYPE, e.g. due to PARM_BOUNDARY. Assume the caller isn't lying. */ set_mem_align (target, align); emit_block_move (target, xinner, size, BLOCK_OP_CALL_PARM); } } else if (partial > 0) { /* Scalar partly in registers. */ int size = GET_MODE_SIZE (mode) / UNITS_PER_WORD; int i; int not_stack; /* # words of start of argument that we must make space for but need not store. */ int offset = partial % (PARM_BOUNDARY / BITS_PER_WORD); int args_offset = INTVAL (args_so_far); int skip; /* Push padding now if padding above and stack grows down, or if padding below and stack grows up. But if space already allocated, this has already been done. */ if (extra && args_addr == 0 && where_pad != none && where_pad != stack_direction) anti_adjust_stack (GEN_INT (extra)); /* If we make space by pushing it, we might as well push the real data. Otherwise, we can leave OFFSET nonzero and leave the space uninitialized. */ if (args_addr == 0) offset = 0; /* Now NOT_STACK gets the number of words that we don't need to allocate on the stack. */ not_stack = partial - offset; /* If the partial register-part of the arg counts in its stack size, skip the part of stack space corresponding to the registers. Otherwise, start copying to the beginning of the stack space, by setting SKIP to 0. */ skip = (reg_parm_stack_space == 0) ? 0 : not_stack; if (CONSTANT_P (x) && ! LEGITIMATE_CONSTANT_P (x)) x = validize_mem (force_const_mem (mode, x)); /* If X is a hard register in a non-integer mode, copy it into a pseudo; SUBREGs of such registers are not allowed. */ if ((REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER && GET_MODE_CLASS (GET_MODE (x)) != MODE_INT)) x = copy_to_reg (x); /* Loop over all the words allocated on the stack for this arg. */ /* We can do it by words, because any scalar bigger than a word has a size a multiple of a word. */ #ifndef PUSH_ARGS_REVERSED for (i = not_stack; i < size; i++) #else for (i = size - 1; i >= not_stack; i--) #endif if (i >= not_stack + offset) emit_push_insn (operand_subword_force (x, i, mode), word_mode, NULL_TREE, NULL_RTX, align, 0, NULL_RTX, 0, args_addr, GEN_INT (args_offset + ((i - not_stack + skip) * UNITS_PER_WORD)), reg_parm_stack_space, alignment_pad); } else { rtx addr; rtx dest; /* Push padding now if padding above and stack grows down, or if padding below and stack grows up. But if space already allocated, this has already been done. */ if (extra && args_addr == 0 && where_pad != none && where_pad != stack_direction) anti_adjust_stack (GEN_INT (extra)); #ifdef PUSH_ROUNDING if (args_addr == 0 && PUSH_ARGS) emit_single_push_insn (mode, x, type); else #endif { if (GET_CODE (args_so_far) == CONST_INT) addr = memory_address (mode, plus_constant (args_addr, INTVAL (args_so_far))); else addr = memory_address (mode, gen_rtx_PLUS (Pmode, args_addr, args_so_far)); dest = gen_rtx_MEM (mode, addr); if (type != 0) { set_mem_attributes (dest, type, 1); /* Function incoming arguments may overlap with sibling call outgoing arguments and we cannot allow reordering of reads from function arguments with stores to outgoing arguments of sibling calls. */ set_mem_alias_set (dest, 0); } emit_move_insn (dest, x); } } /* If part should go in registers, copy that part into the appropriate registers. Do this now, at the end, since mem-to-mem copies above may do function calls. */ if (partial > 0 && reg != 0) { /* Handle calls that pass values in multiple non-contiguous locations. The Irix 6 ABI has examples of this. */ if (GET_CODE (reg) == PARALLEL) emit_group_load (reg, x, type, -1); else move_block_to_reg (REGNO (reg), x, partial, mode); } if (extra && args_addr == 0 && where_pad == stack_direction) anti_adjust_stack (GEN_INT (extra)); if (alignment_pad && args_addr == 0) anti_adjust_stack (alignment_pad); } /* Return X if X can be used as a subtarget in a sequence of arithmetic operations. */ static rtx get_subtarget (rtx x) { return ((x == 0 /* Only registers can be subtargets. */ || !REG_P (x) /* If the register is readonly, it can't be set more than once. */ || RTX_UNCHANGING_P (x) /* Don't use hard regs to avoid extending their life. */ || REGNO (x) < FIRST_PSEUDO_REGISTER /* Avoid subtargets inside loops, since they hide some invariant expressions. */ || preserve_subexpressions_p ()) ? 0 : x); } /* Expand an assignment that stores the value of FROM into TO. If WANT_VALUE is nonzero, return an rtx for the value of TO. (This may contain a QUEUED rtx; if the value is constant, this rtx is a constant.) Otherwise, the returned value is NULL_RTX. */ rtx expand_assignment (tree to, tree from, int want_value) { rtx to_rtx = 0; rtx result; /* Don't crash if the lhs of the assignment was erroneous. */ if (TREE_CODE (to) == ERROR_MARK) { result = expand_expr (from, NULL_RTX, VOIDmode, 0); return want_value ? result : NULL_RTX; } /* Assignment of a structure component needs special treatment if the structure component's rtx is not simply a MEM. Assignment of an array element at a constant index, and assignment of an array element in an unaligned packed structure field, has the same problem. */ if (TREE_CODE (to) == COMPONENT_REF || TREE_CODE (to) == BIT_FIELD_REF || TREE_CODE (to) == ARRAY_REF || TREE_CODE (to) == ARRAY_RANGE_REF || TREE_CODE (TREE_TYPE (to)) == ARRAY_TYPE) { enum machine_mode mode1; HOST_WIDE_INT bitsize, bitpos; rtx orig_to_rtx; tree offset; int unsignedp; int volatilep = 0; tree tem; push_temp_slots (); tem = get_inner_reference (to, &bitsize, &bitpos, &offset, &mode1, &unsignedp, &volatilep); /* If we are going to use store_bit_field and extract_bit_field, make sure to_rtx will be safe for multiple use. */ if (mode1 == VOIDmode && want_value) tem = stabilize_reference (tem); orig_to_rtx = to_rtx = expand_expr (tem, NULL_RTX, VOIDmode, 0); if (offset != 0) { rtx offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode, EXPAND_SUM); if (!MEM_P (to_rtx)) abort (); #ifdef POINTERS_EXTEND_UNSIGNED if (GET_MODE (offset_rtx) != Pmode) offset_rtx = convert_to_mode (Pmode, offset_rtx, 0); #else if (GET_MODE (offset_rtx) != ptr_mode) offset_rtx = convert_to_mode (ptr_mode, offset_rtx, 0); #endif /* A constant address in TO_RTX can have VOIDmode, we must not try to call force_reg for that case. Avoid that case. */ if (MEM_P (to_rtx) && GET_MODE (to_rtx) == BLKmode && GET_MODE (XEXP (to_rtx, 0)) != VOIDmode && bitsize > 0 && (bitpos % bitsize) == 0 && (bitsize % GET_MODE_ALIGNMENT (mode1)) == 0 && MEM_ALIGN (to_rtx) == GET_MODE_ALIGNMENT (mode1)) { to_rtx = adjust_address (to_rtx, mode1, bitpos / BITS_PER_UNIT); bitpos = 0; } to_rtx = offset_address (to_rtx, offset_rtx, highest_pow2_factor_for_target (to, offset)); } if (MEM_P (to_rtx)) { /* If the field is at offset zero, we could have been given the DECL_RTX of the parent struct. Don't munge it. */ to_rtx = shallow_copy_rtx (to_rtx); set_mem_attributes_minus_bitpos (to_rtx, to, 0, bitpos); } /* Deal with volatile and readonly fields. The former is only done for MEM. Also set MEM_KEEP_ALIAS_SET_P if needed. */ if (volatilep && MEM_P (to_rtx)) { if (to_rtx == orig_to_rtx) to_rtx = copy_rtx (to_rtx); MEM_VOLATILE_P (to_rtx) = 1; } if (TREE_CODE (to) == COMPONENT_REF && TREE_READONLY (TREE_OPERAND (to, 1)) /* We can't assert that a MEM won't be set more than once if the component is not addressable because another non-addressable component may be referenced by the same MEM. */ && ! (MEM_P (to_rtx) && ! can_address_p (to))) { if (to_rtx == orig_to_rtx) to_rtx = copy_rtx (to_rtx); RTX_UNCHANGING_P (to_rtx) = 1; } if (MEM_P (to_rtx) && ! can_address_p (to)) { if (to_rtx == orig_to_rtx) to_rtx = copy_rtx (to_rtx); MEM_KEEP_ALIAS_SET_P (to_rtx) = 1; } /* Disabled temporarily. GET_MODE (to_rtx) is often not the right mode. */ while (0 && mode1 == VOIDmode && !want_value && bitpos + bitsize <= BITS_PER_WORD && bitsize < BITS_PER_WORD && GET_MODE_BITSIZE (GET_MODE (to_rtx)) <= BITS_PER_WORD && !TREE_SIDE_EFFECTS (to) && !TREE_THIS_VOLATILE (to)) { tree src, op0, op1; rtx value; HOST_WIDE_INT count = bitpos; optab binop; src = from; STRIP_NOPS (src); if (TREE_CODE (TREE_TYPE (src)) != INTEGER_TYPE || TREE_CODE_CLASS (TREE_CODE (src)) != '2') break; op0 = TREE_OPERAND (src, 0); op1 = TREE_OPERAND (src, 1); STRIP_NOPS (op0); if (! operand_equal_p (to, op0, 0)) break; if (BYTES_BIG_ENDIAN) count = GET_MODE_BITSIZE (GET_MODE (to_rtx)) - bitpos - bitsize; /* Special case some bitfield op= exp. */ switch (TREE_CODE (src)) { case PLUS_EXPR: case MINUS_EXPR: if (count <= 0) break; /* For now, just optimize the case of the topmost bitfield where we don't need to do any masking and also 1 bit bitfields where xor can be used. We might win by one instruction for the other bitfields too if insv/extv instructions aren't used, so that can be added later. */ if (count + bitsize != GET_MODE_BITSIZE (GET_MODE (to_rtx)) && (bitsize != 1 || TREE_CODE (op1) != INTEGER_CST)) break; value = expand_expr (op1, NULL_RTX, VOIDmode, 0); value = protect_from_queue (value, 0); to_rtx = protect_from_queue (to_rtx, 1); binop = TREE_CODE (src) == PLUS_EXPR ? add_optab : sub_optab; if (bitsize == 1 && count + bitsize != GET_MODE_BITSIZE (GET_MODE (to_rtx))) { value = expand_and (GET_MODE (to_rtx), value, const1_rtx, NULL_RTX); binop = xor_optab; } value = expand_shift (LSHIFT_EXPR, GET_MODE (to_rtx), value, build_int_2 (count, 0), NULL_RTX, 1); result = expand_binop (GET_MODE (to_rtx), binop, to_rtx, value, to_rtx, 1, OPTAB_WIDEN); if (result != to_rtx) emit_move_insn (to_rtx, result); free_temp_slots (); pop_temp_slots (); return NULL_RTX; default: break; } break; } result = store_field (to_rtx, bitsize, bitpos, mode1, from, (want_value /* Spurious cast for HPUX compiler. */ ? ((enum machine_mode) TYPE_MODE (TREE_TYPE (to))) : VOIDmode), unsignedp, TREE_TYPE (tem), get_alias_set (to)); preserve_temp_slots (result); free_temp_slots (); pop_temp_slots (); /* If the value is meaningful, convert RESULT to the proper mode. Otherwise, return nothing. */ return (want_value ? convert_modes (TYPE_MODE (TREE_TYPE (to)), TYPE_MODE (TREE_TYPE (from)), result, TYPE_UNSIGNED (TREE_TYPE (to))) : NULL_RTX); } /* If the rhs is a function call and its value is not an aggregate, call the function before we start to compute the lhs. This is needed for correct code for cases such as val = setjmp (buf) on machines where reference to val requires loading up part of an address in a separate insn. Don't do this if TO is a VAR_DECL or PARM_DECL whose DECL_RTL is REG since it might be a promoted variable where the zero- or sign- extension needs to be done. Handling this in the normal way is safe because no computation is done before the call. */ if (TREE_CODE (from) == CALL_EXPR && ! aggregate_value_p (from, from) && TREE_CODE (TYPE_SIZE (TREE_TYPE (from))) == INTEGER_CST && ! ((TREE_CODE (to) == VAR_DECL || TREE_CODE (to) == PARM_DECL) && REG_P (DECL_RTL (to)))) { rtx value; push_temp_slots (); value = expand_expr (from, NULL_RTX, VOIDmode, 0); if (to_rtx == 0) to_rtx = expand_expr (to, NULL_RTX, VOIDmode, EXPAND_WRITE); /* Handle calls that return values in multiple non-contiguous locations. The Irix 6 ABI has examples of this. */ if (GET_CODE (to_rtx) == PARALLEL) emit_group_load (to_rtx, value, TREE_TYPE (from), int_size_in_bytes (TREE_TYPE (from))); else if (GET_MODE (to_rtx) == BLKmode) emit_block_move (to_rtx, value, expr_size (from), BLOCK_OP_NORMAL); else { if (POINTER_TYPE_P (TREE_TYPE (to))) value = convert_memory_address (GET_MODE (to_rtx), value); emit_move_insn (to_rtx, value); } preserve_temp_slots (to_rtx); free_temp_slots (); pop_temp_slots (); return want_value ? to_rtx : NULL_RTX; } /* Ordinary treatment. Expand TO to get a REG or MEM rtx. Don't re-expand if it was expanded already (in COMPONENT_REF case). */ if (to_rtx == 0) to_rtx = expand_expr (to, NULL_RTX, VOIDmode, EXPAND_WRITE); /* Don't move directly into a return register. */ if (TREE_CODE (to) == RESULT_DECL && (REG_P (to_rtx) || GET_CODE (to_rtx) == PARALLEL)) { rtx temp; push_temp_slots (); temp = expand_expr (from, 0, GET_MODE (to_rtx), 0); if (GET_CODE (to_rtx) == PARALLEL) emit_group_load (to_rtx, temp, TREE_TYPE (from), int_size_in_bytes (TREE_TYPE (from))); else emit_move_insn (to_rtx, temp); preserve_temp_slots (to_rtx); free_temp_slots (); pop_temp_slots (); return want_value ? to_rtx : NULL_RTX; } /* In case we are returning the contents of an object which overlaps the place the value is being stored, use a safe function when copying a value through a pointer into a structure value return block. */ if (TREE_CODE (to) == RESULT_DECL && TREE_CODE (from) == INDIRECT_REF && current_function_returns_struct && !current_function_returns_pcc_struct) { rtx from_rtx, size; push_temp_slots (); size = expr_size (from); from_rtx = expand_expr (from, NULL_RTX, VOIDmode, 0); emit_library_call (memmove_libfunc, LCT_NORMAL, VOIDmode, 3, XEXP (to_rtx, 0), Pmode, XEXP (from_rtx, 0), Pmode, convert_to_mode (TYPE_MODE (sizetype), size, TYPE_UNSIGNED (sizetype)), TYPE_MODE (sizetype)); preserve_temp_slots (to_rtx); free_temp_slots (); pop_temp_slots (); return want_value ? to_rtx : NULL_RTX; } /* Compute FROM and store the value in the rtx we got. */ push_temp_slots (); result = store_expr (from, to_rtx, want_value); preserve_temp_slots (result); free_temp_slots (); pop_temp_slots (); return want_value ? result : NULL_RTX; } /* Generate code for computing expression EXP, and storing the value into TARGET. TARGET may contain a QUEUED rtx. If WANT_VALUE & 1 is nonzero, return a copy of the value not in TARGET, so that we can be sure to use the proper value in a containing expression even if TARGET has something else stored in it. If possible, we copy the value through a pseudo and return that pseudo. Or, if the value is constant, we try to return the constant. In some cases, we return a pseudo copied *from* TARGET. If the mode is BLKmode then we may return TARGET itself. It turns out that in BLKmode it doesn't cause a problem. because C has no operators that could combine two different assignments into the same BLKmode object with different values with no sequence point. Will other languages need this to be more thorough? If WANT_VALUE & 1 is 0, we return NULL, to make sure to catch quickly any cases where the caller uses the value and fails to set WANT_VALUE. If WANT_VALUE & 2 is set, this is a store into a call param on the stack, and block moves may need to be treated specially. */ rtx store_expr (tree exp, rtx target, int want_value) { rtx temp; rtx alt_rtl = NULL_RTX; rtx mark = mark_queue (); int dont_return_target = 0; int dont_store_target = 0; if (VOID_TYPE_P (TREE_TYPE (exp))) { /* C++ can generate ?: expressions with a throw expression in one branch and an rvalue in the other. Here, we resolve attempts to store the throw expression's nonexistent result. */ if (want_value) abort (); expand_expr (exp, const0_rtx, VOIDmode, 0); return NULL_RTX; } if (TREE_CODE (exp) == COMPOUND_EXPR) { /* Perform first part of compound expression, then assign from second part. */ expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode, want_value & 2 ? EXPAND_STACK_PARM : EXPAND_NORMAL); emit_queue (); return store_expr (TREE_OPERAND (exp, 1), target, want_value); } else if (TREE_CODE (exp) == COND_EXPR && GET_MODE (target) == BLKmode) { /* For conditional expression, get safe form of the target. Then test the condition, doing the appropriate assignment on either side. This avoids the creation of unnecessary temporaries. For non-BLKmode, it is more efficient not to do this. */ rtx lab1 = gen_label_rtx (), lab2 = gen_label_rtx (); emit_queue (); target = protect_from_queue (target, 1); do_pending_stack_adjust (); NO_DEFER_POP; jumpifnot (TREE_OPERAND (exp, 0), lab1); start_cleanup_deferral (); store_expr (TREE_OPERAND (exp, 1), target, want_value & 2); end_cleanup_deferral (); emit_queue (); emit_jump_insn (gen_jump (lab2)); emit_barrier (); emit_label (lab1); start_cleanup_deferral (); store_expr (TREE_OPERAND (exp, 2), target, want_value & 2); end_cleanup_deferral (); emit_queue (); emit_label (lab2); OK_DEFER_POP; return want_value & 1 ? target : NULL_RTX; } else if (queued_subexp_p (target)) /* If target contains a postincrement, let's not risk using it as the place to generate the rhs. */ { if (GET_MODE (target) != BLKmode && GET_MODE (target) != VOIDmode) { /* Expand EXP into a new pseudo. */ temp = gen_reg_rtx (GET_MODE (target)); temp = expand_expr (exp, temp, GET_MODE (target), (want_value & 2 ? EXPAND_STACK_PARM : EXPAND_NORMAL)); } else temp = expand_expr (exp, NULL_RTX, GET_MODE (target), (want_value & 2 ? EXPAND_STACK_PARM : EXPAND_NORMAL)); /* If target is volatile, ANSI requires accessing the value *from* the target, if it is accessed. So make that happen. In no case return the target itself. */ if (! MEM_VOLATILE_P (target) && (want_value & 1) != 0) dont_return_target = 1; } else if ((want_value & 1) != 0 && MEM_P (target) && ! MEM_VOLATILE_P (target) && GET_MODE (target) != BLKmode) /* If target is in memory and caller wants value in a register instead, arrange that. Pass TARGET as target for expand_expr so that, if EXP is another assignment, WANT_VALUE will be nonzero for it. We know expand_expr will not use the target in that case. Don't do this if TARGET is volatile because we are supposed to write it and then read it. */ { temp = expand_expr (exp, target, GET_MODE (target), want_value & 2 ? EXPAND_STACK_PARM : EXPAND_NORMAL); if (GET_MODE (temp) != BLKmode && GET_MODE (temp) != VOIDmode) { /* If TEMP is already in the desired TARGET, only copy it from memory and don't store it there again. */ if (temp == target || (rtx_equal_p (temp, target) && ! side_effects_p (temp) && ! side_effects_p (target))) dont_store_target = 1; temp = copy_to_reg (temp); } dont_return_target = 1; } else if (GET_CODE (target) == SUBREG && SUBREG_PROMOTED_VAR_P (target)) /* If this is a scalar in a register that is stored in a wider mode than the declared mode, compute the result into its declared mode and then convert to the wider mode. Our value is the computed expression. */ { rtx inner_target = 0; /* If we don't want a value, we can do the conversion inside EXP, which will often result in some optimizations. Do the conversion in two steps: first change the signedness, if needed, then the extend. But don't do this if the type of EXP is a subtype of something else since then the conversion might involve more than just converting modes. */ if ((want_value & 1) == 0 && INTEGRAL_TYPE_P (TREE_TYPE (exp)) && TREE_TYPE (TREE_TYPE (exp)) == 0) { if (TYPE_UNSIGNED (TREE_TYPE (exp)) != SUBREG_PROMOTED_UNSIGNED_P (target)) exp = convert (lang_hooks.types.signed_or_unsigned_type (SUBREG_PROMOTED_UNSIGNED_P (target), TREE_TYPE (exp)), exp); exp = convert (lang_hooks.types.type_for_mode (GET_MODE (SUBREG_REG (target)), SUBREG_PROMOTED_UNSIGNED_P (target)), exp); inner_target = SUBREG_REG (target); } temp = expand_expr (exp, inner_target, VOIDmode, want_value & 2 ? EXPAND_STACK_PARM : EXPAND_NORMAL); /* If TEMP is a MEM and we want a result value, make the access now so it gets done only once. Strictly speaking, this is only necessary if the MEM is volatile, or if the address overlaps TARGET. But not performing the load twice also reduces the amount of rtl we generate and then have to CSE. */ if (MEM_P (temp) && (want_value & 1) != 0) temp = copy_to_reg (temp); /* If TEMP is a VOIDmode constant, use convert_modes to make sure that we properly convert it. */ if (CONSTANT_P (temp) && GET_MODE (temp) == VOIDmode) { temp = convert_modes (GET_MODE (target), TYPE_MODE (TREE_TYPE (exp)), temp, SUBREG_PROMOTED_UNSIGNED_P (target)); temp = convert_modes (GET_MODE (SUBREG_REG (target)), GET_MODE (target), temp, SUBREG_PROMOTED_UNSIGNED_P (target)); } convert_move (SUBREG_REG (target), temp, SUBREG_PROMOTED_UNSIGNED_P (target)); /* If we promoted a constant, change the mode back down to match target. Otherwise, the caller might get confused by a result whose mode is larger than expected. */ if ((want_value & 1) != 0 && GET_MODE (temp) != GET_MODE (target)) { if (GET_MODE (temp) != VOIDmode) { temp = gen_lowpart_SUBREG (GET_MODE (target), temp); SUBREG_PROMOTED_VAR_P (temp) = 1; SUBREG_PROMOTED_UNSIGNED_SET (temp, SUBREG_PROMOTED_UNSIGNED_P (target)); } else temp = convert_modes (GET_MODE (target), GET_MODE (SUBREG_REG (target)), temp, SUBREG_PROMOTED_UNSIGNED_P (target)); } return want_value & 1 ? temp : NULL_RTX; } else { temp = expand_expr_real (exp, target, GET_MODE (target), (want_value & 2 ? EXPAND_STACK_PARM : EXPAND_NORMAL), &alt_rtl); /* Return TARGET if it's a specified hardware register. If TARGET is a volatile mem ref, either return TARGET or return a reg copied *from* TARGET; ANSI requires this. Otherwise, if TEMP is not TARGET, return TEMP if it is constant (for efficiency), or if we really want the correct value. */ if (!(target && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER) && !(MEM_P (target) && MEM_VOLATILE_P (target)) && ! rtx_equal_p (temp, target) && (CONSTANT_P (temp) || (want_value & 1) != 0)) dont_return_target = 1; } /* If TEMP is a VOIDmode constant and the mode of the type of EXP is not the same as that of TARGET, adjust the constant. This is needed, for example, in case it is a CONST_DOUBLE and we want only a word-sized value. */ if (CONSTANT_P (temp) && GET_MODE (temp) == VOIDmode && TREE_CODE (exp) != ERROR_MARK && GET_MODE (target) != TYPE_MODE (TREE_TYPE (exp))) temp = convert_modes (GET_MODE (target), TYPE_MODE (TREE_TYPE (exp)), temp, TYPE_UNSIGNED (TREE_TYPE (exp))); /* If value was not generated in the target, store it there. Convert the value to TARGET's type first if necessary and emit the pending incrementations that have been queued when expanding EXP. Note that we cannot emit the whole queue blindly because this will effectively disable the POST_INC optimization later. If TEMP and TARGET compare equal according to rtx_equal_p, but one or both of them are volatile memory refs, we have to distinguish two cases: - expand_expr has used TARGET. In this case, we must not generate another copy. This can be detected by TARGET being equal according to == . - expand_expr has not used TARGET - that means that the source just happens to have the same RTX form. Since temp will have been created by expand_expr, it will compare unequal according to == . We must generate a copy in this case, to reach the correct number of volatile memory references. */ if ((! rtx_equal_p (temp, target) || (temp != target && (side_effects_p (temp) || side_effects_p (target)))) && TREE_CODE (exp) != ERROR_MARK && ! dont_store_target /* If store_expr stores a DECL whose DECL_RTL(exp) == TARGET, but TARGET is not valid memory reference, TEMP will differ from TARGET although it is really the same location. */ && !(alt_rtl && rtx_equal_p (alt_rtl, target)) /* If there's nothing to copy, don't bother. Don't call expr_size unless necessary, because some front-ends (C++) expr_size-hook aborts on objects that are not supposed to be bit-copied or bit-initialized. */ && expr_size (exp) != const0_rtx) { emit_insns_enqueued_after_mark (mark); target = protect_from_queue (target, 1); temp = protect_from_queue (temp, 0); if (GET_MODE (temp) != GET_MODE (target) && GET_MODE (temp) != VOIDmode) { int unsignedp = TYPE_UNSIGNED (TREE_TYPE (exp)); if (dont_return_target) { /* In this case, we will return TEMP, so make sure it has the proper mode. But don't forget to store the value into TARGET. */ temp = convert_to_mode (GET_MODE (target), temp, unsignedp); emit_move_insn (target, temp); } else convert_move (target, temp, unsignedp); } else if (GET_MODE (temp) == BLKmode && TREE_CODE (exp) == STRING_CST) { /* Handle copying a string constant into an array. The string constant may be shorter than the array. So copy just the string's actual length, and clear the rest. First get the size of the data type of the string, which is actually the size of the target. */ rtx size = expr_size (exp); if (GET_CODE (size) == CONST_INT && INTVAL (size) < TREE_STRING_LENGTH (exp)) emit_block_move (target, temp, size, (want_value & 2 ? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL)); else { /* Compute the size of the data to copy from the string. */ tree copy_size = size_binop (MIN_EXPR, make_tree (sizetype, size), size_int (TREE_STRING_LENGTH (exp))); rtx copy_size_rtx = expand_expr (copy_size, NULL_RTX, VOIDmode, (want_value & 2 ? EXPAND_STACK_PARM : EXPAND_NORMAL)); rtx label = 0; /* Copy that much. */ copy_size_rtx = convert_to_mode (ptr_mode, copy_size_rtx, TYPE_UNSIGNED (sizetype)); emit_block_move (target, temp, copy_size_rtx, (want_value & 2 ? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL)); /* Figure out how much is left in TARGET that we have to clear. Do all calculations in ptr_mode. */ if (GET_CODE (copy_size_rtx) == CONST_INT) { size = plus_constant (size, -INTVAL (copy_size_rtx)); target = adjust_address (target, BLKmode, INTVAL (copy_size_rtx)); } else { size = expand_binop (TYPE_MODE (sizetype), sub_optab, size, copy_size_rtx, NULL_RTX, 0, OPTAB_LIB_WIDEN); #ifdef POINTERS_EXTEND_UNSIGNED if (GET_MODE (copy_size_rtx) != Pmode) copy_size_rtx = convert_to_mode (Pmode, copy_size_rtx, TYPE_UNSIGNED (sizetype)); #endif target = offset_address (target, copy_size_rtx, highest_pow2_factor (copy_size)); label = gen_label_rtx (); emit_cmp_and_jump_insns (size, const0_rtx, LT, NULL_RTX, GET_MODE (size), 0, label); } if (size != const0_rtx) clear_storage (target, size); if (label) emit_label (label); } } /* Handle calls that return values in multiple non-contiguous locations. The Irix 6 ABI has examples of this. */ else if (GET_CODE (target) == PARALLEL) emit_group_load (target, temp, TREE_TYPE (exp), int_size_in_bytes (TREE_TYPE (exp))); else if (GET_MODE (temp) == BLKmode) emit_block_move (target, temp, expr_size (exp), (want_value & 2 ? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL)); else { temp = force_operand (temp, target); if (temp != target) emit_move_insn (target, temp); } } /* If we don't want a value, return NULL_RTX. */ if ((want_value & 1) == 0) return NULL_RTX; /* If we are supposed to return TEMP, do so as long as it isn't a MEM. ??? The latter test doesn't seem to make sense. */ else if (dont_return_target && !MEM_P (temp)) return temp; /* Return TARGET itself if it is a hard register. */ else if ((want_value & 1) != 0 && GET_MODE (target) != BLKmode && ! (REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER)) return copy_to_reg (target); else return target; } /* Examine CTOR. Discover how many scalar fields are set to nonzero values and place it in *P_NZ_ELTS. Discover how many scalar fields are set to non-constant values and place it in *P_NC_ELTS. */ static void categorize_ctor_elements_1 (tree ctor, HOST_WIDE_INT *p_nz_elts, HOST_WIDE_INT *p_nc_elts) { HOST_WIDE_INT nz_elts, nc_elts; tree list; nz_elts = 0; nc_elts = 0; for (list = CONSTRUCTOR_ELTS (ctor); list; list = TREE_CHAIN (list)) { tree value = TREE_VALUE (list); tree purpose = TREE_PURPOSE (list); HOST_WIDE_INT mult; mult = 1; if (TREE_CODE (purpose) == RANGE_EXPR) { tree lo_index = TREE_OPERAND (purpose, 0); tree hi_index = TREE_OPERAND (purpose, 1); if (host_integerp (lo_index, 1) && host_integerp (hi_index, 1)) mult = (tree_low_cst (hi_index, 1) - tree_low_cst (lo_index, 1) + 1); } switch (TREE_CODE (value)) { case CONSTRUCTOR: { HOST_WIDE_INT nz = 0, nc = 0; categorize_ctor_elements_1 (value, &nz, &nc); nz_elts += mult * nz; nc_elts += mult * nc; } break; case INTEGER_CST: case REAL_CST: if (!initializer_zerop (value)) nz_elts += mult; break; case COMPLEX_CST: if (!initializer_zerop (TREE_REALPART (value))) nz_elts += mult; if (!initializer_zerop (TREE_IMAGPART (value))) nz_elts += mult; break; case VECTOR_CST: { tree v; for (v = TREE_VECTOR_CST_ELTS (value); v; v = TREE_CHAIN (v)) if (!initializer_zerop (TREE_VALUE (v))) nz_elts += mult; } break; default: nz_elts += mult; if (!initializer_constant_valid_p (value, TREE_TYPE (value))) nc_elts += mult; break; } } *p_nz_elts += nz_elts; *p_nc_elts += nc_elts; } void categorize_ctor_elements (tree ctor, HOST_WIDE_INT *p_nz_elts, HOST_WIDE_INT *p_nc_elts) { *p_nz_elts = 0; *p_nc_elts = 0; categorize_ctor_elements_1 (ctor, p_nz_elts, p_nc_elts); } /* Count the number of scalars in TYPE. Return -1 on overflow or variable-sized. */ HOST_WIDE_INT count_type_elements (tree type) { const HOST_WIDE_INT max = ~((HOST_WIDE_INT)1 << (HOST_BITS_PER_WIDE_INT-1)); switch (TREE_CODE (type)) { case ARRAY_TYPE: { tree telts = array_type_nelts (type); if (telts && host_integerp (telts, 1)) { HOST_WIDE_INT n = tree_low_cst (telts, 1) + 1; HOST_WIDE_INT m = count_type_elements (TREE_TYPE (type)); if (n == 0) return 0; else if (max / n > m) return n * m; } return -1; } case RECORD_TYPE: { HOST_WIDE_INT n = 0, t; tree f; for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f)) if (TREE_CODE (f) == FIELD_DECL) { t = count_type_elements (TREE_TYPE (f)); if (t < 0) return -1; n += t; } return n; } case UNION_TYPE: case QUAL_UNION_TYPE: { /* Ho hum. How in the world do we guess here? Clearly it isn't right to count the fields. Guess based on the number of words. */ HOST_WIDE_INT n = int_size_in_bytes (type); if (n < 0) return -1; return n / UNITS_PER_WORD; } case COMPLEX_TYPE: return 2; case VECTOR_TYPE: /* ??? This is broke. We should encode the vector width in the tree. */ return GET_MODE_NUNITS (TYPE_MODE (type)); case INTEGER_TYPE: case REAL_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: case CHAR_TYPE: case POINTER_TYPE: case OFFSET_TYPE: case REFERENCE_TYPE: return 1; case VOID_TYPE: case METHOD_TYPE: case FILE_TYPE: case SET_TYPE: case FUNCTION_TYPE: case LANG_TYPE: default: abort (); } } /* Return 1 if EXP contains mostly (3/4) zeros. */ int mostly_zeros_p (tree exp) { if (TREE_CODE (exp) == CONSTRUCTOR) { HOST_WIDE_INT nz_elts, nc_elts, elts; /* If there are no ranges of true bits, it is all zero. */ if (TREE_TYPE (exp) && TREE_CODE (TREE_TYPE (exp)) == SET_TYPE) return CONSTRUCTOR_ELTS (exp) == NULL_TREE; categorize_ctor_elements (exp, &nz_elts, &nc_elts); elts = count_type_elements (TREE_TYPE (exp)); return nz_elts < elts / 4; } return initializer_zerop (exp); } /* Helper function for store_constructor. TARGET, BITSIZE, BITPOS, MODE, EXP are as for store_field. TYPE is the type of the CONSTRUCTOR, not the element type. CLEARED is as for store_constructor. ALIAS_SET is the alias set to use for any stores. This provides a recursive shortcut back to store_constructor when it isn't necessary to go through store_field. This is so that we can pass through the cleared field to let store_constructor know that we may not have to clear a substructure if the outer structure has already been cleared. */ static void store_constructor_field (rtx target, unsigned HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos, enum machine_mode mode, tree exp, tree type, int cleared, int alias_set) { if (TREE_CODE (exp) == CONSTRUCTOR /* We can only call store_constructor recursively if the size and bit position are on a byte boundary. */ && bitpos % BITS_PER_UNIT == 0 && (bitsize > 0 && bitsize % BITS_PER_UNIT == 0) /* If we have a nonzero bitpos for a register target, then we just let store_field do the bitfield handling. This is unlikely to generate unnecessary clear instructions anyways. */ && (bitpos == 0 || MEM_P (target))) { if (MEM_P (target)) target = adjust_address (target, GET_MODE (target) == BLKmode || 0 != (bitpos % GET_MODE_ALIGNMENT (GET_MODE (target))) ? BLKmode : VOIDmode, bitpos / BITS_PER_UNIT); /* Update the alias set, if required. */ if (MEM_P (target) && ! MEM_KEEP_ALIAS_SET_P (target) && MEM_ALIAS_SET (target) != 0) { target = copy_rtx (target); set_mem_alias_set (target, alias_set); } store_constructor (exp, target, cleared, bitsize / BITS_PER_UNIT); } else store_field (target, bitsize, bitpos, mode, exp, VOIDmode, 0, type, alias_set); } /* Store the value of constructor EXP into the rtx TARGET. TARGET is either a REG or a MEM; we know it cannot conflict, since safe_from_p has been called. CLEARED is true if TARGET is known to have been zero'd. SIZE is the number of bytes of TARGET we are allowed to modify: this may not be the same as the size of EXP if we are assigning to a field which has been packed to exclude padding bits. */ static void store_constructor (tree exp, rtx target, int cleared, HOST_WIDE_INT size) { tree type = TREE_TYPE (exp); #ifdef WORD_REGISTER_OPERATIONS HOST_WIDE_INT exp_size = int_size_in_bytes (type); #endif if (TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == QUAL_UNION_TYPE) { tree elt; /* If size is zero or the target is already cleared, do nothing. */ if (size == 0 || cleared) cleared = 1; /* We either clear the aggregate or indicate the value is dead. */ else if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == QUAL_UNION_TYPE) && ! CONSTRUCTOR_ELTS (exp)) /* If the constructor is empty, clear the union. */ { clear_storage (target, expr_size (exp)); cleared = 1; } /* If we are building a static constructor into a register, set the initial value as zero so we can fold the value into a constant. But if more than one register is involved, this probably loses. */ else if (REG_P (target) && TREE_STATIC (exp) && GET_MODE_SIZE (GET_MODE (target)) <= UNITS_PER_WORD) { emit_move_insn (target, CONST0_RTX (GET_MODE (target))); cleared = 1; } /* If the constructor has fewer fields than the structure or if we are initializing the structure to mostly zeros, clear the whole structure first. Don't do this if TARGET is a register whose mode size isn't equal to SIZE since clear_storage can't handle this case. */ else if (size > 0 && ((list_length (CONSTRUCTOR_ELTS (exp)) != fields_length (type)) || mostly_zeros_p (exp)) && (!REG_P (target) || ((HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (target)) == size))) { rtx xtarget = target; if (readonly_fields_p (type)) { xtarget = copy_rtx (xtarget); RTX_UNCHANGING_P (xtarget) = 1; } clear_storage (xtarget, GEN_INT (size)); cleared = 1; } if (! cleared) emit_insn (gen_rtx_CLOBBER (VOIDmode, target)); /* Store each element of the constructor into the corresponding field of TARGET. */ for (elt = CONSTRUCTOR_ELTS (exp); elt; elt = TREE_CHAIN (elt)) { tree field = TREE_PURPOSE (elt); tree value = TREE_VALUE (elt); enum machine_mode mode; HOST_WIDE_INT bitsize; HOST_WIDE_INT bitpos = 0; tree offset; rtx to_rtx = target; /* Just ignore missing fields. We cleared the whole structure, above, if any fields are missing. */ if (field == 0) continue; if (cleared && initializer_zerop (value)) continue; if (host_integerp (DECL_SIZE (field), 1)) bitsize = tree_low_cst (DECL_SIZE (field), 1); else bitsize = -1; mode = DECL_MODE (field); if (DECL_BIT_FIELD (field)) mode = VOIDmode; offset = DECL_FIELD_OFFSET (field); if (host_integerp (offset, 0) && host_integerp (bit_position (field), 0)) { bitpos = int_bit_position (field); offset = 0; } else bitpos = tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 0); if (offset) { rtx offset_rtx; offset = SUBSTITUTE_PLACEHOLDER_IN_EXPR (offset, make_tree (TREE_TYPE (exp), target)); offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode, 0); if (!MEM_P (to_rtx)) abort (); #ifdef POINTERS_EXTEND_UNSIGNED if (GET_MODE (offset_rtx) != Pmode) offset_rtx = convert_to_mode (Pmode, offset_rtx, 0); #else if (GET_MODE (offset_rtx) != ptr_mode) offset_rtx = convert_to_mode (ptr_mode, offset_rtx, 0); #endif to_rtx = offset_address (to_rtx, offset_rtx, highest_pow2_factor (offset)); } if (TREE_READONLY (field)) { if (MEM_P (to_rtx)) to_rtx = copy_rtx (to_rtx); RTX_UNCHANGING_P (to_rtx) = 1; } #ifdef WORD_REGISTER_OPERATIONS /* If this initializes a field that is smaller than a word, at the start of a word, try to widen it to a full word. This special case allows us to output C++ member function initializations in a form that the optimizers can understand. */ if (REG_P (target) && bitsize < BITS_PER_WORD && bitpos % BITS_PER_WORD == 0 && GET_MODE_CLASS (mode) == MODE_INT && TREE_CODE (value) == INTEGER_CST && exp_size >= 0 && bitpos + BITS_PER_WORD <= exp_size * BITS_PER_UNIT) { tree type = TREE_TYPE (value); if (TYPE_PRECISION (type) < BITS_PER_WORD) { type = lang_hooks.types.type_for_size (BITS_PER_WORD, TYPE_UNSIGNED (type)); value = convert (type, value); } if (BYTES_BIG_ENDIAN) value = fold (build (LSHIFT_EXPR, type, value, build_int_2 (BITS_PER_WORD - bitsize, 0))); bitsize = BITS_PER_WORD; mode = word_mode; } #endif if (MEM_P (to_rtx) && !MEM_KEEP_ALIAS_SET_P (to_rtx) && DECL_NONADDRESSABLE_P (field)) { to_rtx = copy_rtx (to_rtx); MEM_KEEP_ALIAS_SET_P (to_rtx) = 1; } store_constructor_field (to_rtx, bitsize, bitpos, mode, value, type, cleared, get_alias_set (TREE_TYPE (field))); } } else if (TREE_CODE (type) == ARRAY_TYPE || TREE_CODE (type) == VECTOR_TYPE) { tree elt; int i; int need_to_clear; tree domain; tree elttype = TREE_TYPE (type); int const_bounds_p; HOST_WIDE_INT minelt = 0; HOST_WIDE_INT maxelt = 0; int icode = 0; rtx *vector = NULL; int elt_size = 0; unsigned n_elts = 0; if (TREE_CODE (type) == ARRAY_TYPE) domain = TYPE_DOMAIN (type); else /* Vectors do not have domains; look up the domain of the array embedded in the debug representation type. FIXME Would probably be more efficient to treat vectors separately from arrays. */ { domain = TYPE_DEBUG_REPRESENTATION_TYPE (type); domain = TYPE_DOMAIN (TREE_TYPE (TYPE_FIELDS (domain))); if (REG_P (target) && VECTOR_MODE_P (GET_MODE (target))) { enum machine_mode mode = GET_MODE (target); icode = (int) vec_init_optab->handlers[mode].insn_code; if (icode != CODE_FOR_nothing) { unsigned int i; elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode)); n_elts = (GET_MODE_SIZE (mode) / elt_size); vector = alloca (n_elts); for (i = 0; i < n_elts; i++) vector [i] = CONST0_RTX (GET_MODE_INNER (mode)); } } } const_bounds_p = (TYPE_MIN_VALUE (domain) && TYPE_MAX_VALUE (domain) && host_integerp (TYPE_MIN_VALUE (domain), 0) && host_integerp (TYPE_MAX_VALUE (domain), 0)); /* If we have constant bounds for the range of the type, get them. */ if (const_bounds_p) { minelt = tree_low_cst (TYPE_MIN_VALUE (domain), 0); maxelt = tree_low_cst (TYPE_MAX_VALUE (domain), 0); } /* If the constructor has fewer elements than the array, clear the whole array first. Similarly if this is static constructor of a non-BLKmode object. */ if (cleared || (REG_P (target) && TREE_STATIC (exp))) need_to_clear = 1; else { HOST_WIDE_INT count = 0, zero_count = 0; need_to_clear = ! const_bounds_p; /* This loop is a more accurate version of the loop in mostly_zeros_p (it handles RANGE_EXPR in an index). It is also needed to check for missing elements. */ for (elt = CONSTRUCTOR_ELTS (exp); elt != NULL_TREE && ! need_to_clear; elt = TREE_CHAIN (elt)) { tree index = TREE_PURPOSE (elt); HOST_WIDE_INT this_node_count; if (index != NULL_TREE && TREE_CODE (index) == RANGE_EXPR) { tree lo_index = TREE_OPERAND (index, 0); tree hi_index = TREE_OPERAND (index, 1); if (! host_integerp (lo_index, 1) || ! host_integerp (hi_index, 1)) { need_to_clear = 1; break; } this_node_count = (tree_low_cst (hi_index, 1) - tree_low_cst (lo_index, 1) + 1); } else this_node_count = 1; count += this_node_count; if (mostly_zeros_p (TREE_VALUE (elt))) zero_count += this_node_count; } /* Clear the entire array first if there are any missing elements, or if the incidence of zero elements is >= 75%. */ if (! need_to_clear && (count < maxelt - minelt + 1 || 4 * zero_count >= 3 * count)) need_to_clear = 1; } if (need_to_clear && size > 0 && !vector) { if (! cleared) { if (REG_P (target)) emit_move_insn (target, CONST0_RTX (GET_MODE (target))); else clear_storage (target, GEN_INT (size)); } cleared = 1; } else if (REG_P (target)) /* Inform later passes that the old value is dead. */ emit_insn (gen_rtx_CLOBBER (VOIDmode, target)); /* Store each element of the constructor into the corresponding element of TARGET, determined by counting the elements. */ for (elt = CONSTRUCTOR_ELTS (exp), i = 0; elt; elt = TREE_CHAIN (elt), i++) { enum machine_mode mode; HOST_WIDE_INT bitsize; HOST_WIDE_INT bitpos; int unsignedp; tree value = TREE_VALUE (elt); tree index = TREE_PURPOSE (elt); rtx xtarget = target; if (cleared && initializer_zerop (value)) continue; unsignedp = TYPE_UNSIGNED (elttype); mode = TYPE_MODE (elttype); if (mode == BLKmode) bitsize = (host_integerp (TYPE_SIZE (elttype), 1) ? tree_low_cst (TYPE_SIZE (elttype), 1) : -1); else bitsize = GET_MODE_BITSIZE (mode); if (index != NULL_TREE && TREE_CODE (index) == RANGE_EXPR) { tree lo_index = TREE_OPERAND (index, 0); tree hi_index = TREE_OPERAND (index, 1); rtx index_r, pos_rtx; HOST_WIDE_INT lo, hi, count; tree position; if (vector) abort (); /* If the range is constant and "small", unroll the loop. */ if (const_bounds_p && host_integerp (lo_index, 0) && host_integerp (hi_index, 0) && (lo = tree_low_cst (lo_index, 0), hi = tree_low_cst (hi_index, 0), count = hi - lo + 1, (!MEM_P (target) || count <= 2 || (host_integerp (TYPE_SIZE (elttype), 1) && (tree_low_cst (TYPE_SIZE (elttype), 1) * count <= 40 * 8))))) { lo -= minelt; hi -= minelt; for (; lo <= hi; lo++) { bitpos = lo * tree_low_cst (TYPE_SIZE (elttype), 0); if (MEM_P (target) && !MEM_KEEP_ALIAS_SET_P (target) && TREE_CODE (type) == ARRAY_TYPE && TYPE_NONALIASED_COMPONENT (type)) { target = copy_rtx (target); MEM_KEEP_ALIAS_SET_P (target) = 1; } store_constructor_field (target, bitsize, bitpos, mode, value, type, cleared, get_alias_set (elttype)); } } else { rtx loop_start = gen_label_rtx (); rtx loop_end = gen_label_rtx (); tree exit_cond; expand_expr (hi_index, NULL_RTX, VOIDmode, 0); unsignedp = TYPE_UNSIGNED (domain); index = build_decl (VAR_DECL, NULL_TREE, domain); index_r = gen_reg_rtx (promote_mode (domain, DECL_MODE (index), &unsignedp, 0)); SET_DECL_RTL (index, index_r); store_expr (lo_index, index_r, 0); /* Build the head of the loop. */ do_pending_stack_adjust (); emit_queue (); emit_label (loop_start); /* Assign value to element index. */ position = convert (ssizetype, fold (build (MINUS_EXPR, TREE_TYPE (index), index, TYPE_MIN_VALUE (domain)))); position = size_binop (MULT_EXPR, position, convert (ssizetype, TYPE_SIZE_UNIT (elttype))); pos_rtx = expand_expr (position, 0, VOIDmode, 0); xtarget = offset_address (target, pos_rtx, highest_pow2_factor (position)); xtarget = adjust_address (xtarget, mode, 0); if (TREE_CODE (value) == CONSTRUCTOR) store_constructor (value, xtarget, cleared, bitsize / BITS_PER_UNIT); else store_expr (value, xtarget, 0); /* Generate a conditional jump to exit the loop. */ exit_cond = build (LT_EXPR, integer_type_node, index, hi_index); jumpif (exit_cond, loop_end); /* Update the loop counter, and jump to the head of the loop. */ expand_increment (build (PREINCREMENT_EXPR, TREE_TYPE (index), index, integer_one_node), 0, 0); emit_jump (loop_start); /* Build the end of the loop. */ emit_label (loop_end); } } else if ((index != 0 && ! host_integerp (index, 0)) || ! host_integerp (TYPE_SIZE (elttype), 1)) { tree position; if (vector) abort (); if (index == 0) index = ssize_int (1); if (minelt) index = convert (ssizetype, fold (build (MINUS_EXPR, index, TYPE_MIN_VALUE (domain)))); position = size_binop (MULT_EXPR, index, convert (ssizetype, TYPE_SIZE_UNIT (elttype))); xtarget = offset_address (target, expand_expr (position, 0, VOIDmode, 0), highest_pow2_factor (position)); xtarget = adjust_address (xtarget, mode, 0); store_expr (value, xtarget, 0); } else if (vector) { int pos; if (index != 0) pos = tree_low_cst (index, 0) - minelt; else pos = i; vector[pos] = expand_expr (value, NULL_RTX, VOIDmode, 0); } else { if (index != 0) bitpos = ((tree_low_cst (index, 0) - minelt) * tree_low_cst (TYPE_SIZE (elttype), 1)); else bitpos = (i * tree_low_cst (TYPE_SIZE (elttype), 1)); if (MEM_P (target) && !MEM_KEEP_ALIAS_SET_P (target) && TREE_CODE (type) == ARRAY_TYPE && TYPE_NONALIASED_COMPONENT (type)) { target = copy_rtx (target); MEM_KEEP_ALIAS_SET_P (target) = 1; } store_constructor_field (target, bitsize, bitpos, mode, value, type, cleared, get_alias_set (elttype)); } } if (vector) { emit_insn (GEN_FCN (icode) (target, gen_rtx_PARALLEL (GET_MODE (target), gen_rtvec_v (n_elts, vector)))); } } /* Set constructor assignments. */ else if (TREE_CODE (type) == SET_TYPE) { tree elt = CONSTRUCTOR_ELTS (exp); unsigned HOST_WIDE_INT nbytes = int_size_in_bytes (type), nbits; tree domain = TYPE_DOMAIN (type); tree domain_min, domain_max, bitlength; /* The default implementation strategy is to extract the constant parts of the constructor, use that to initialize the target, and then "or" in whatever non-constant ranges we need in addition. If a large set is all zero or all ones, it is probably better to set it using memset. Also, if a large set has just a single range, it may also be better to first clear all the first clear the set (using memset), and set the bits we want. */ /* Check for all zeros. */ if (elt == NULL_TREE && size > 0) { if (!cleared) clear_storage (target, GEN_INT (size)); return; } domain_min = convert (sizetype, TYPE_MIN_VALUE (domain)); domain_max = convert (sizetype, TYPE_MAX_VALUE (domain)); bitlength = size_binop (PLUS_EXPR, size_diffop (domain_max, domain_min), ssize_int (1)); nbits = tree_low_cst (bitlength, 1); /* For "small" sets, or "medium-sized" (up to 32 bytes) sets that are "complicated" (more than one range), initialize (the constant parts) by copying from a constant. */ if (GET_MODE (target) != BLKmode || nbits <= 2 * BITS_PER_WORD || (nbytes <= 32 && TREE_CHAIN (elt) != NULL_TREE)) { unsigned int set_word_size = TYPE_ALIGN (TREE_TYPE (exp)); enum machine_mode mode = mode_for_size (set_word_size, MODE_INT, 1); char *bit_buffer = alloca (nbits); HOST_WIDE_INT word = 0; unsigned int bit_pos = 0; unsigned int ibit = 0; unsigned int offset = 0; /* In bytes from beginning of set. */ elt = get_set_constructor_bits (exp, bit_buffer, nbits); for (;;) { if (bit_buffer[ibit]) { if (BYTES_BIG_ENDIAN) word |= (1 << (set_word_size - 1 - bit_pos)); else word |= 1 << bit_pos; } bit_pos++; ibit++; if (bit_pos >= set_word_size || ibit == nbits) { if (word != 0 || ! cleared) { rtx datum = gen_int_mode (word, mode); rtx to_rtx; /* The assumption here is that it is safe to use XEXP if the set is multi-word, but not if it's single-word. */ if (MEM_P (target)) to_rtx = adjust_address (target, mode, offset); else if (offset == 0) to_rtx = target; else abort (); emit_move_insn (to_rtx, datum); } if (ibit == nbits) break; word = 0; bit_pos = 0; offset += set_word_size / BITS_PER_UNIT; } } } else if (!cleared) /* Don't bother clearing storage if the set is all ones. */ if (TREE_CHAIN (elt) != NULL_TREE || (TREE_PURPOSE (elt) == NULL_TREE ? nbits != 1 : ( ! host_integerp (TREE_VALUE (elt), 0) || ! host_integerp (TREE_PURPOSE (elt), 0) || (tree_low_cst (TREE_VALUE (elt), 0) - tree_low_cst (TREE_PURPOSE (elt), 0) + 1 != (HOST_WIDE_INT) nbits)))) clear_storage (target, expr_size (exp)); for (; elt != NULL_TREE; elt = TREE_CHAIN (elt)) { /* Start of range of element or NULL. */ tree startbit = TREE_PURPOSE (elt); /* End of range of element, or element value. */ tree endbit = TREE_VALUE (elt); HOST_WIDE_INT startb, endb; rtx bitlength_rtx, startbit_rtx, endbit_rtx, targetx; bitlength_rtx = expand_expr (bitlength, NULL_RTX, MEM, EXPAND_CONST_ADDRESS); /* Handle non-range tuple element like [ expr ]. */ if (startbit == NULL_TREE) { startbit = save_expr (endbit); endbit = startbit; } startbit = convert (sizetype, startbit); endbit = convert (sizetype, endbit); if (! integer_zerop (domain_min)) { startbit = size_binop (MINUS_EXPR, startbit, domain_min); endbit = size_binop (MINUS_EXPR, endbit, domain_min); } startbit_rtx = expand_expr (startbit, NULL_RTX, MEM, EXPAND_CONST_ADDRESS); endbit_rtx = expand_expr (endbit, NULL_RTX, MEM, EXPAND_CONST_ADDRESS); if (REG_P (target)) { targetx = assign_temp ((build_qualified_type (lang_hooks.types.type_for_mode (GET_MODE (target), 0), TYPE_QUAL_CONST)), 0, 1, 1); emit_move_insn (targetx, target); } else if (MEM_P (target)) targetx = target; else abort (); /* Optimization: If startbit and endbit are constants divisible by BITS_PER_UNIT, call memset instead. */ if (TREE_CODE (startbit) == INTEGER_CST && TREE_CODE (endbit) == INTEGER_CST && (startb = TREE_INT_CST_LOW (startbit)) % BITS_PER_UNIT == 0 && (endb = TREE_INT_CST_LOW (endbit) + 1) % BITS_PER_UNIT == 0) { emit_library_call (memset_libfunc, LCT_NORMAL, VOIDmode, 3, plus_constant (XEXP (targetx, 0), startb / BITS_PER_UNIT), Pmode, constm1_rtx, TYPE_MODE (integer_type_node), GEN_INT ((endb - startb) / BITS_PER_UNIT), TYPE_MODE (sizetype)); } else emit_library_call (setbits_libfunc, LCT_NORMAL, VOIDmode, 4, XEXP (targetx, 0), Pmode, bitlength_rtx, TYPE_MODE (sizetype), startbit_rtx, TYPE_MODE (sizetype), endbit_rtx, TYPE_MODE (sizetype)); if (REG_P (target)) emit_move_insn (target, targetx); } } else abort (); } /* Store the value of EXP (an expression tree) into a subfield of TARGET which has mode MODE and occupies BITSIZE bits, starting BITPOS bits from the start of TARGET. If MODE is VOIDmode, it means that we are storing into a bit-field. If VALUE_MODE is VOIDmode, return nothing in particular. UNSIGNEDP is not used in this case. Otherwise, return an rtx for the value stored. This rtx has mode VALUE_MODE if that is convenient to do. In this case, UNSIGNEDP must be nonzero if the value is an unsigned type. TYPE is the type of the underlying object, ALIAS_SET is the alias set for the destination. This value will (in general) be different from that for TARGET, since TARGET is a reference to the containing structure. */ static rtx store_field (rtx target, HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos, enum machine_mode mode, tree exp, enum machine_mode value_mode, int unsignedp, tree type, int alias_set) { HOST_WIDE_INT width_mask = 0; if (TREE_CODE (exp) == ERROR_MARK) return const0_rtx; /* If we have nothing to store, do nothing unless the expression has side-effects. */ if (bitsize == 0) return expand_expr (exp, const0_rtx, VOIDmode, 0); else if (bitsize >= 0 && bitsize < HOST_BITS_PER_WIDE_INT) width_mask = ((HOST_WIDE_INT) 1 << bitsize) - 1; /* If we are storing into an unaligned field of an aligned union that is in a register, we may have the mode of TARGET being an integer mode but MODE == BLKmode. In that case, get an aligned object whose size and alignment are the same as TARGET and store TARGET into it (we can avoid the store if the field being stored is the entire width of TARGET). Then call ourselves recursively to store the field into a BLKmode version of that object. Finally, load from the object into TARGET. This is not very efficient in general, but should only be slightly more expensive than the otherwise-required unaligned accesses. Perhaps this can be cleaned up later. It's tempting to make OBJECT readonly, but it's set twice, once with emit_move_insn and once via store_field. */ if (mode == BLKmode && (REG_P (target) || GET_CODE (target) == SUBREG)) { rtx object = assign_temp (type, 0, 1, 1); rtx blk_object = adjust_address (object, BLKmode, 0); if (bitsize != (HOST_WIDE_INT) GET_MODE_BITSIZE (GET_MODE (target))) emit_move_insn (object, target); store_field (blk_object, bitsize, bitpos, mode, exp, VOIDmode, 0, type, alias_set); emit_move_insn (target, object); /* We want to return the BLKmode version of the data. */ return blk_object; } if (GET_CODE (target) == CONCAT) { /* We're storing into a struct containing a single __complex. */ if (bitpos != 0) abort (); return store_expr (exp, target, value_mode != VOIDmode); } /* If the structure is in a register or if the component is a bit field, we cannot use addressing to access it. Use bit-field techniques or SUBREG to store in it. */ if (mode == VOIDmode || (mode != BLKmode && ! direct_store[(int) mode] && GET_MODE_CLASS (mode) != MODE_COMPLEX_INT && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT) || REG_P (target) || GET_CODE (target) == SUBREG /* If the field isn't aligned enough to store as an ordinary memref, store it as a bit field. */ || (mode != BLKmode && ((((MEM_ALIGN (target) < GET_MODE_ALIGNMENT (mode)) || bitpos % GET_MODE_ALIGNMENT (mode)) && SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (target))) || (bitpos % BITS_PER_UNIT != 0))) /* If the RHS and field are a constant size and the size of the RHS isn't the same size as the bitfield, we must use bitfield operations. */ || (bitsize >= 0 && TREE_CODE (TYPE_SIZE (TREE_TYPE (exp))) == INTEGER_CST && compare_tree_int (TYPE_SIZE (TREE_TYPE (exp)), bitsize) != 0)) { rtx temp = expand_expr (exp, NULL_RTX, VOIDmode, 0); /* If BITSIZE is narrower than the size of the type of EXP we will be narrowing TEMP. Normally, what's wanted are the low-order bits. However, if EXP's type is a record and this is big-endian machine, we want the upper BITSIZE bits. */ if (BYTES_BIG_ENDIAN && GET_MODE_CLASS (GET_MODE (temp)) == MODE_INT && bitsize < (HOST_WIDE_INT) GET_MODE_BITSIZE (GET_MODE (temp)) && TREE_CODE (TREE_TYPE (exp)) == RECORD_TYPE) temp = expand_shift (RSHIFT_EXPR, GET_MODE (temp), temp, size_int (GET_MODE_BITSIZE (GET_MODE (temp)) - bitsize), NULL_RTX, 1); /* Unless MODE is VOIDmode or BLKmode, convert TEMP to MODE. */ if (mode != VOIDmode && mode != BLKmode && mode != TYPE_MODE (TREE_TYPE (exp))) temp = convert_modes (mode, TYPE_MODE (TREE_TYPE (exp)), temp, 1); /* If the modes of TARGET and TEMP are both BLKmode, both must be in memory and BITPOS must be aligned on a byte boundary. If so, we simply do a block copy. */ if (GET_MODE (target) == BLKmode && GET_MODE (temp) == BLKmode) { if (!MEM_P (target) || !MEM_P (temp) || bitpos % BITS_PER_UNIT != 0) abort (); target = adjust_address (target, VOIDmode, bitpos / BITS_PER_UNIT); emit_block_move (target, temp, GEN_INT ((bitsize + BITS_PER_UNIT - 1) / BITS_PER_UNIT), BLOCK_OP_NORMAL); return value_mode == VOIDmode ? const0_rtx : target; } /* Store the value in the bitfield. */ store_bit_field (target, bitsize, bitpos, mode, temp, int_size_in_bytes (type)); if (value_mode != VOIDmode) { /* The caller wants an rtx for the value. If possible, avoid refetching from the bitfield itself. */ if (width_mask != 0 && ! (MEM_P (target) && MEM_VOLATILE_P (target))) { tree count; enum machine_mode tmode; tmode = GET_MODE (temp); if (tmode == VOIDmode) tmode = value_mode; if (unsignedp) return expand_and (tmode, temp, gen_int_mode (width_mask, tmode), NULL_RTX); count = build_int_2 (GET_MODE_BITSIZE (tmode) - bitsize, 0); temp = expand_shift (LSHIFT_EXPR, tmode, temp, count, 0, 0); return expand_shift (RSHIFT_EXPR, tmode, temp, count, 0, 0); } return extract_bit_field (target, bitsize, bitpos, unsignedp, NULL_RTX, value_mode, VOIDmode, int_size_in_bytes (type)); } return const0_rtx; } else { rtx addr = XEXP (target, 0); rtx to_rtx = target; /* If a value is wanted, it must be the lhs; so make the address stable for multiple use. */ if (value_mode != VOIDmode && !REG_P (addr) && ! CONSTANT_ADDRESS_P (addr) /* A frame-pointer reference is already stable. */ && ! (GET_CODE (addr) == PLUS && GET_CODE (XEXP (addr, 1)) == CONST_INT && (XEXP (addr, 0) == virtual_incoming_args_rtx || XEXP (addr, 0) == virtual_stack_vars_rtx))) to_rtx = replace_equiv_address (to_rtx, copy_to_reg (addr)); /* Now build a reference to just the desired component. */ to_rtx = adjust_address (target, mode, bitpos / BITS_PER_UNIT); if (to_rtx == target) to_rtx = copy_rtx (to_rtx); MEM_SET_IN_STRUCT_P (to_rtx, 1); if (!MEM_KEEP_ALIAS_SET_P (to_rtx) && MEM_ALIAS_SET (to_rtx) != 0) set_mem_alias_set (to_rtx, alias_set); return store_expr (exp, to_rtx, value_mode != VOIDmode); } } /* Given an expression EXP that may be a COMPONENT_REF, a BIT_FIELD_REF, an ARRAY_REF, or an ARRAY_RANGE_REF, look for nested operations of these codes and find the ultimate containing object, which we return. We set *PBITSIZE to the size in bits that we want, *PBITPOS to the bit position, and *PUNSIGNEDP to the signedness of the field. If the position of the field is variable, we store a tree giving the variable offset (in units) in *POFFSET. This offset is in addition to the bit position. If the position is not variable, we store 0 in *POFFSET. If any of the extraction expressions is volatile, we store 1 in *PVOLATILEP. Otherwise we don't change that. If the field is a bit-field, *PMODE is set to VOIDmode. Otherwise, it is a mode that can be used to access the field. In that case, *PBITSIZE is redundant. If the field describes a variable-sized object, *PMODE is set to VOIDmode and *PBITSIZE is set to -1. An access cannot be made in this case, but the address of the object can be found. */ tree get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize, HOST_WIDE_INT *pbitpos, tree *poffset, enum machine_mode *pmode, int *punsignedp, int *pvolatilep) { tree size_tree = 0; enum machine_mode mode = VOIDmode; tree offset = size_zero_node; tree bit_offset = bitsize_zero_node; tree tem; /* First get the mode, signedness, and size. We do this from just the outermost expression. */ if (TREE_CODE (exp) == COMPONENT_REF) { size_tree = DECL_SIZE (TREE_OPERAND (exp, 1)); if (! DECL_BIT_FIELD (TREE_OPERAND (exp, 1))) mode = DECL_MODE (TREE_OPERAND (exp, 1)); *punsignedp = DECL_UNSIGNED (TREE_OPERAND (exp, 1)); } else if (TREE_CODE (exp) == BIT_FIELD_REF) { size_tree = TREE_OPERAND (exp, 1); *punsignedp = BIT_FIELD_REF_UNSIGNED (exp); } else { mode = TYPE_MODE (TREE_TYPE (exp)); *punsignedp = TYPE_UNSIGNED (TREE_TYPE (exp)); if (mode == BLKmode) size_tree = TYPE_SIZE (TREE_TYPE (exp)); else *pbitsize = GET_MODE_BITSIZE (mode); } if (size_tree != 0) { if (! host_integerp (size_tree, 1)) mode = BLKmode, *pbitsize = -1; else *pbitsize = tree_low_cst (size_tree, 1); } /* Compute cumulative bit-offset for nested component-refs and array-refs, and find the ultimate containing object. */ while (1) { if (TREE_CODE (exp) == BIT_FIELD_REF) bit_offset = size_binop (PLUS_EXPR, bit_offset, TREE_OPERAND (exp, 2)); else if (TREE_CODE (exp) == COMPONENT_REF) { tree field = TREE_OPERAND (exp, 1); tree this_offset = component_ref_field_offset (exp); /* If this field hasn't been filled in yet, don't go past it. This should only happen when folding expressions made during type construction. */ if (this_offset == 0) break; offset = size_binop (PLUS_EXPR, offset, this_offset); bit_offset = size_binop (PLUS_EXPR, bit_offset, DECL_FIELD_BIT_OFFSET (field)); /* ??? Right now we don't do anything with DECL_OFFSET_ALIGN. */ } else if (TREE_CODE (exp) == ARRAY_REF || TREE_CODE (exp) == ARRAY_RANGE_REF) { tree index = TREE_OPERAND (exp, 1); tree low_bound = array_ref_low_bound (exp); tree unit_size = array_ref_element_size (exp); /* We assume all arrays have sizes that are a multiple of a byte. First subtract the lower bound, if any, in the type of the index, then convert to sizetype and multiply by the size of the array element. */ if (! integer_zerop (low_bound)) index = fold (build (MINUS_EXPR, TREE_TYPE (index), index, low_bound)); offset = size_binop (PLUS_EXPR, offset, size_binop (MULT_EXPR, convert (sizetype, index), unit_size)); } /* We can go inside most conversions: all NON_VALUE_EXPRs, all normal conversions that don't change the mode, and all view conversions except those that need to "step up" the alignment. */ else if (TREE_CODE (exp) != NON_LVALUE_EXPR && ! (TREE_CODE (exp) == VIEW_CONVERT_EXPR && ! ((TYPE_ALIGN (TREE_TYPE (exp)) > TYPE_ALIGN (TREE_TYPE (TREE_OPERAND (exp, 0)))) && STRICT_ALIGNMENT && (TYPE_ALIGN (TREE_TYPE (TREE_OPERAND (exp, 0))) < BIGGEST_ALIGNMENT) && (TYPE_ALIGN_OK (TREE_TYPE (exp)) || TYPE_ALIGN_OK (TREE_TYPE (TREE_OPERAND (exp, 0)))))) && ! ((TREE_CODE (exp) == NOP_EXPR || TREE_CODE (exp) == CONVERT_EXPR) && (TYPE_MODE (TREE_TYPE (exp)) == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))) break; /* If any reference in the chain is volatile, the effect is volatile. */ if (TREE_THIS_VOLATILE (exp)) *pvolatilep = 1; exp = TREE_OPERAND (exp, 0); } /* If OFFSET is constant, see if we can return the whole thing as a constant bit position. Otherwise, split it up. */ if (host_integerp (offset, 0) && 0 != (tem = size_binop (MULT_EXPR, convert (bitsizetype, offset), bitsize_unit_node)) && 0 != (tem = size_binop (PLUS_EXPR, tem, bit_offset)) && host_integerp (tem, 0)) *pbitpos = tree_low_cst (tem, 0), *poffset = 0; else *pbitpos = tree_low_cst (bit_offset, 0), *poffset = offset; *pmode = mode; return exp; } /* Return a tree of sizetype representing the size, in bytes, of the element of EXP, an ARRAY_REF. */ tree array_ref_element_size (tree exp) { tree aligned_size = TREE_OPERAND (exp, 3); tree elmt_type = TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0))); /* If a size was specified in the ARRAY_REF, it's the size measured in alignment units of the element type. So multiply by that value. */ if (aligned_size) return size_binop (MULT_EXPR, aligned_size, size_int (TYPE_ALIGN (elmt_type) / BITS_PER_UNIT)); /* Otherwise, take the size from that of the element type. Substitute any PLACEHOLDER_EXPR that we have. */ else return SUBSTITUTE_PLACEHOLDER_IN_EXPR (TYPE_SIZE_UNIT (elmt_type), exp); } /* Return a tree representing the lower bound of the array mentioned in EXP, an ARRAY_REF. */ tree array_ref_low_bound (tree exp) { tree domain_type = TYPE_DOMAIN (TREE_TYPE (TREE_OPERAND (exp, 0))); /* If a lower bound is specified in EXP, use it. */ if (TREE_OPERAND (exp, 2)) return TREE_OPERAND (exp, 2); /* Otherwise, if there is a domain type and it has a lower bound, use it, substituting for a PLACEHOLDER_EXPR as needed. */ if (domain_type && TYPE_MIN_VALUE (domain_type)) return SUBSTITUTE_PLACEHOLDER_IN_EXPR (TYPE_MIN_VALUE (domain_type), exp); /* Otherwise, return a zero of the appropriate type. */ return fold_convert (TREE_TYPE (TREE_OPERAND (exp, 1)), integer_zero_node); } /* Return a tree representing the offset, in bytes, of the field referenced by EXP. This does not include any offset in DECL_FIELD_BIT_OFFSET. */ tree component_ref_field_offset (tree exp) { tree aligned_offset = TREE_OPERAND (exp, 2); tree field = TREE_OPERAND (exp, 1); /* If an offset was specified in the COMPONENT_REF, it's the offset measured in units of DECL_OFFSET_ALIGN / BITS_PER_UNIT. So multiply by that value. */ if (aligned_offset) return size_binop (MULT_EXPR, aligned_offset, size_int (DECL_OFFSET_ALIGN (field) / BITS_PER_UNIT)); /* Otherwise, take the offset from that of the field. Substitute any PLACEHOLDER_EXPR that we have. */ else return SUBSTITUTE_PLACEHOLDER_IN_EXPR (DECL_FIELD_OFFSET (field), exp); } /* Return 1 if T is an expression that get_inner_reference handles. */ int handled_component_p (tree t) { switch (TREE_CODE (t)) { case BIT_FIELD_REF: case COMPONENT_REF: case ARRAY_REF: case ARRAY_RANGE_REF: case NON_LVALUE_EXPR: case VIEW_CONVERT_EXPR: return 1; /* ??? Sure they are handled, but get_inner_reference may return a different PBITSIZE, depending upon whether the expression is wrapped up in a NOP_EXPR or not, e.g. for bitfields. */ case NOP_EXPR: case CONVERT_EXPR: return (TYPE_MODE (TREE_TYPE (t)) == TYPE_MODE (TREE_TYPE (TREE_OPERAND (t, 0)))); default: return 0; } } /* Given an rtx VALUE that may contain additions and multiplications, return an equivalent value that just refers to a register, memory, or constant. This is done by generating instructions to perform the arithmetic and returning a pseudo-register containing the value. The returned value may be a REG, SUBREG, MEM or constant. */ rtx force_operand (rtx value, rtx target) { rtx op1, op2; /* Use subtarget as the target for operand 0 of a binary operation. */ rtx subtarget = get_subtarget (target); enum rtx_code code = GET_CODE (value); /* Check for subreg applied to an expression produced by loop optimizer. */ if (code == SUBREG && !REG_P (SUBREG_REG (value)) && !MEM_P (SUBREG_REG (value))) { value = simplify_gen_subreg (GET_MODE (value), force_reg (GET_MODE (SUBREG_REG (value)), force_operand (SUBREG_REG (value), NULL_RTX)), GET_MODE (SUBREG_REG (value)), SUBREG_BYTE (value)); code = GET_CODE (value); } /* Check for a PIC address load. */ if ((code == PLUS || code == MINUS) && XEXP (value, 0) == pic_offset_table_rtx && (GET_CODE (XEXP (value, 1)) == SYMBOL_REF || GET_CODE (XEXP (value, 1)) == LABEL_REF || GET_CODE (XEXP (value, 1)) == CONST)) { if (!subtarget) subtarget = gen_reg_rtx (GET_MODE (value)); emit_move_insn (subtarget, value); return subtarget; } if (code == ZERO_EXTEND || code == SIGN_EXTEND) { if (!target) target = gen_reg_rtx (GET_MODE (value)); convert_move (target, force_operand (XEXP (value, 0), NULL), code == ZERO_EXTEND); return target; } if (ARITHMETIC_P (value)) { op2 = XEXP (value, 1); if (!CONSTANT_P (op2) && !(REG_P (op2) && op2 != subtarget)) subtarget = 0; if (code == MINUS && GET_CODE (op2) == CONST_INT) { code = PLUS; op2 = negate_rtx (GET_MODE (value), op2); } /* Check for an addition with OP2 a constant integer and our first operand a PLUS of a virtual register and something else. In that case, we want to emit the sum of the virtual register and the constant first and then add the other value. This allows virtual register instantiation to simply modify the constant rather than creating another one around this addition. */ if (code == PLUS && GET_CODE (op2) == CONST_INT && GET_CODE (XEXP (value, 0)) == PLUS && REG_P (XEXP (XEXP (value, 0), 0)) && REGNO (XEXP (XEXP (value, 0), 0)) >= FIRST_VIRTUAL_REGISTER && REGNO (XEXP (XEXP (value, 0), 0)) <= LAST_VIRTUAL_REGISTER) { rtx temp = expand_simple_binop (GET_MODE (value), code, XEXP (XEXP (value, 0), 0), op2, subtarget, 0, OPTAB_LIB_WIDEN); return expand_simple_binop (GET_MODE (value), code, temp, force_operand (XEXP (XEXP (value, 0), 1), 0), target, 0, OPTAB_LIB_WIDEN); } op1 = force_operand (XEXP (value, 0), subtarget); op2 = force_operand (op2, NULL_RTX); switch (code) { case MULT: return expand_mult (GET_MODE (value), op1, op2, target, 1); case DIV: if (!INTEGRAL_MODE_P (GET_MODE (value))) return expand_simple_binop (GET_MODE (value), code, op1, op2, target, 1, OPTAB_LIB_WIDEN); else return expand_divmod (0, FLOAT_MODE_P (GET_MODE (value)) ? RDIV_EXPR : TRUNC_DIV_EXPR, GET_MODE (value), op1, op2, target, 0); break; case MOD: return expand_divmod (1, TRUNC_MOD_EXPR, GET_MODE (value), op1, op2, target, 0); break; case UDIV: return expand_divmod (0, TRUNC_DIV_EXPR, GET_MODE (value), op1, op2, target, 1); break; case UMOD: return expand_divmod (1, TRUNC_MOD_EXPR, GET_MODE (value), op1, op2, target, 1); break; case ASHIFTRT: return expand_simple_binop (GET_MODE (value), code, op1, op2, target, 0, OPTAB_LIB_WIDEN); break; default: return expand_simple_binop (GET_MODE (value), code, op1, op2, target, 1, OPTAB_LIB_WIDEN); } } if (UNARY_P (value)) { op1 = force_operand (XEXP (value, 0), NULL_RTX); return expand_simple_unop (GET_MODE (value), code, op1, target, 0); } #ifdef INSN_SCHEDULING /* On machines that have insn scheduling, we want all memory reference to be explicit, so we need to deal with such paradoxical SUBREGs. */ if (GET_CODE (value) == SUBREG && MEM_P (SUBREG_REG (value)) && (GET_MODE_SIZE (GET_MODE (value)) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (value))))) value = simplify_gen_subreg (GET_MODE (value), force_reg (GET_MODE (SUBREG_REG (value)), force_operand (SUBREG_REG (value), NULL_RTX)), GET_MODE (SUBREG_REG (value)), SUBREG_BYTE (value)); #endif return value; } /* Subroutine of expand_expr: return nonzero iff there is no way that EXP can reference X, which is being modified. TOP_P is nonzero if this call is going to be used to determine whether we need a temporary for EXP, as opposed to a recursive call to this function. It is always safe for this routine to return zero since it merely searches for optimization opportunities. */ int safe_from_p (rtx x, tree exp, int top_p) { rtx exp_rtl = 0; int i, nops; if (x == 0 /* If EXP has varying size, we MUST use a target since we currently have no way of allocating temporaries of variable size (except for arrays that have TYPE_ARRAY_MAX_SIZE set). So we assume here that something at a higher level has prevented a clash. This is somewhat bogus, but the best we can do. Only do this when X is BLKmode and when we are at the top level. */ || (top_p && TREE_TYPE (exp) != 0 && COMPLETE_TYPE_P (TREE_TYPE (exp)) && TREE_CODE (TYPE_SIZE (TREE_TYPE (exp))) != INTEGER_CST && (TREE_CODE (TREE_TYPE (exp)) != ARRAY_TYPE || TYPE_ARRAY_MAX_SIZE (TREE_TYPE (exp)) == NULL_TREE || TREE_CODE (TYPE_ARRAY_MAX_SIZE (TREE_TYPE (exp))) != INTEGER_CST) && GET_MODE (x) == BLKmode) /* If X is in the outgoing argument area, it is always safe. */ || (MEM_P (x) && (XEXP (x, 0) == virtual_outgoing_args_rtx || (GET_CODE (XEXP (x, 0)) == PLUS && XEXP (XEXP (x, 0), 0) == virtual_outgoing_args_rtx)))) return 1; /* If this is a subreg of a hard register, declare it unsafe, otherwise, find the underlying pseudo. */ if (GET_CODE (x) == SUBREG) { x = SUBREG_REG (x); if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER) return 0; } /* Now look at our tree code and possibly recurse. */ switch (TREE_CODE_CLASS (TREE_CODE (exp))) { case 'd': exp_rtl = DECL_RTL_IF_SET (exp); break; case 'c': return 1; case 'x': if (TREE_CODE (exp) == TREE_LIST) { while (1) { if (TREE_VALUE (exp) && !safe_from_p (x, TREE_VALUE (exp), 0)) return 0; exp = TREE_CHAIN (exp); if (!exp) return 1; if (TREE_CODE (exp) != TREE_LIST) return safe_from_p (x, exp, 0); } } else if (TREE_CODE (exp) == ERROR_MARK) return 1; /* An already-visited SAVE_EXPR? */ else return 0; case 's': /* The only case we look at here is the DECL_INITIAL inside a DECL_EXPR. */ return (TREE_CODE (exp) != DECL_EXPR || TREE_CODE (DECL_EXPR_DECL (exp)) != VAR_DECL || !DECL_INITIAL (DECL_EXPR_DECL (exp)) || safe_from_p (x, DECL_INITIAL (DECL_EXPR_DECL (exp)), 0)); case '2': case '<': if (!safe_from_p (x, TREE_OPERAND (exp, 1), 0)) return 0; /* Fall through. */ case '1': return safe_from_p (x, TREE_OPERAND (exp, 0), 0); case 'e': case 'r': /* Now do code-specific tests. EXP_RTL is set to any rtx we find in the expression. If it is set, we conflict iff we are that rtx or both are in memory. Otherwise, we check all operands of the expression recursively. */ switch (TREE_CODE (exp)) { case ADDR_EXPR: /* If the operand is static or we are static, we can't conflict. Likewise if we don't conflict with the operand at all. */ if (staticp (TREE_OPERAND (exp, 0)) || TREE_STATIC (exp) || safe_from_p (x, TREE_OPERAND (exp, 0), 0)) return 1; /* Otherwise, the only way this can conflict is if we are taking the address of a DECL a that address if part of X, which is very rare. */ exp = TREE_OPERAND (exp, 0); if (DECL_P (exp)) { if (!DECL_RTL_SET_P (exp) || !MEM_P (DECL_RTL (exp))) return 0; else exp_rtl = XEXP (DECL_RTL (exp), 0); } break; case INDIRECT_REF: if (MEM_P (x) && alias_sets_conflict_p (MEM_ALIAS_SET (x), get_alias_set (exp))) return 0; break; case CALL_EXPR: /* Assume that the call will clobber all hard registers and all of memory. */ if ((REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER) || MEM_P (x)) return 0; break; case WITH_CLEANUP_EXPR: exp_rtl = WITH_CLEANUP_EXPR_RTL (exp); break; case CLEANUP_POINT_EXPR: case SAVE_EXPR: return safe_from_p (x, TREE_OPERAND (exp, 0), 0); case BIND_EXPR: /* The only operand we look at is operand 1. The rest aren't part of the expression. */ return safe_from_p (x, TREE_OPERAND (exp, 1), 0); default: break; } /* If we have an rtx, we do not need to scan our operands. */ if (exp_rtl) break; nops = first_rtl_op (TREE_CODE (exp)); for (i = 0; i < nops; i++) if (TREE_OPERAND (exp, i) != 0 && ! safe_from_p (x, TREE_OPERAND (exp, i), 0)) return 0; /* If this is a language-specific tree code, it may require special handling. */ if ((unsigned int) TREE_CODE (exp) >= (unsigned int) LAST_AND_UNUSED_TREE_CODE && !lang_hooks.safe_from_p (x, exp)) return 0; } /* If we have an rtl, find any enclosed object. Then see if we conflict with it. */ if (exp_rtl) { if (GET_CODE (exp_rtl) == SUBREG) { exp_rtl = SUBREG_REG (exp_rtl); if (REG_P (exp_rtl) && REGNO (exp_rtl) < FIRST_PSEUDO_REGISTER) return 0; } /* If the rtl is X, then it is not safe. Otherwise, it is unless both are memory and they conflict. */ return ! (rtx_equal_p (x, exp_rtl) || (MEM_P (x) && MEM_P (exp_rtl) && true_dependence (exp_rtl, VOIDmode, x, rtx_addr_varies_p))); } /* If we reach here, it is safe. */ return 1; } /* Subroutine of expand_expr: return rtx if EXP is a variable or parameter; else return 0. */ static rtx var_rtx (tree exp) { STRIP_NOPS (exp); switch (TREE_CODE (exp)) { case PARM_DECL: case VAR_DECL: return DECL_RTL (exp); default: return 0; } } /* Return the highest power of two that EXP is known to be a multiple of. This is used in updating alignment of MEMs in array references. */ static unsigned HOST_WIDE_INT highest_pow2_factor (tree exp) { unsigned HOST_WIDE_INT c0, c1; switch (TREE_CODE (exp)) { case INTEGER_CST: /* We can find the lowest bit that's a one. If the low HOST_BITS_PER_WIDE_INT bits are zero, return BIGGEST_ALIGNMENT. We need to handle this case since we can find it in a COND_EXPR, a MIN_EXPR, or a MAX_EXPR. If the constant overflows, we have an erroneous program, so return BIGGEST_ALIGNMENT to avoid any later ICE. */ if (TREE_CONSTANT_OVERFLOW (exp)) return BIGGEST_ALIGNMENT; else { /* Note: tree_low_cst is intentionally not used here, we don't care about the upper bits. */ c0 = TREE_INT_CST_LOW (exp); c0 &= -c0; return c0 ? c0 : BIGGEST_ALIGNMENT; } break; case PLUS_EXPR: case MINUS_EXPR: case MIN_EXPR: case MAX_EXPR: c0 = highest_pow2_factor (TREE_OPERAND (exp, 0)); c1 = highest_pow2_factor (TREE_OPERAND (exp, 1)); return MIN (c0, c1); case MULT_EXPR: c0 = highest_pow2_factor (TREE_OPERAND (exp, 0)); c1 = highest_pow2_factor (TREE_OPERAND (exp, 1)); return c0 * c1; case ROUND_DIV_EXPR: case TRUNC_DIV_EXPR: case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR: if (integer_pow2p (TREE_OPERAND (exp, 1)) && host_integerp (TREE_OPERAND (exp, 1), 1)) { c0 = highest_pow2_factor (TREE_OPERAND (exp, 0)); c1 = tree_low_cst (TREE_OPERAND (exp, 1), 1); return MAX (1, c0 / c1); } break; case NON_LVALUE_EXPR: case NOP_EXPR: case CONVERT_EXPR: case SAVE_EXPR: return highest_pow2_factor (TREE_OPERAND (exp, 0)); case COMPOUND_EXPR: return highest_pow2_factor (TREE_OPERAND (exp, 1)); case COND_EXPR: c0 = highest_pow2_factor (TREE_OPERAND (exp, 1)); c1 = highest_pow2_factor (TREE_OPERAND (exp, 2)); return MIN (c0, c1); default: break; } return 1; } /* Similar, except that the alignment requirements of TARGET are taken into account. Assume it is at least as aligned as its type, unless it is a COMPONENT_REF in which case the layout of the structure gives the alignment. */ static unsigned HOST_WIDE_INT highest_pow2_factor_for_target (tree target, tree exp) { unsigned HOST_WIDE_INT target_align, factor; factor = highest_pow2_factor (exp); if (TREE_CODE (target) == COMPONENT_REF) target_align = DECL_ALIGN (TREE_OPERAND (target, 1)) / BITS_PER_UNIT; else target_align = TYPE_ALIGN (TREE_TYPE (target)) / BITS_PER_UNIT; return MAX (factor, target_align); } /* Expands variable VAR. */ void expand_var (tree var) { if (DECL_EXTERNAL (var)) return; if (TREE_STATIC (var)) /* If this is an inlined copy of a static local variable, look up the original decl. */ var = DECL_ORIGIN (var); if (TREE_STATIC (var) ? !TREE_ASM_WRITTEN (var) : !DECL_RTL_SET_P (var)) { if (TREE_CODE (var) == VAR_DECL && DECL_DEFER_OUTPUT (var)) { /* Prepare a mem & address for the decl. */ rtx x; if (TREE_STATIC (var)) abort (); x = gen_rtx_MEM (DECL_MODE (var), gen_reg_rtx (Pmode)); set_mem_attributes (x, var, 1); SET_DECL_RTL (var, x); } else if (lang_hooks.expand_decl (var)) /* OK. */; else if (TREE_CODE (var) == VAR_DECL && !TREE_STATIC (var)) expand_decl (var); else if (TREE_CODE (var) == VAR_DECL && TREE_STATIC (var)) rest_of_decl_compilation (var, NULL, 0, 0); else if (TREE_CODE (var) == TYPE_DECL || TREE_CODE (var) == CONST_DECL || TREE_CODE (var) == FUNCTION_DECL || TREE_CODE (var) == LABEL_DECL) /* No expansion needed. */; else abort (); } } /* Expands declarations of variables in list VARS. */ static void expand_vars (tree vars) { for (; vars; vars = TREE_CHAIN (vars)) { tree var = vars; if (DECL_EXTERNAL (var)) continue; expand_var (var); expand_decl_init (var); } } /* Subroutine of expand_expr. Expand the two operands of a binary expression EXP0 and EXP1 placing the results in OP0 and OP1. The value may be stored in TARGET if TARGET is nonzero. The MODIFIER argument is as documented by expand_expr. */ static void expand_operands (tree exp0, tree exp1, rtx target, rtx *op0, rtx *op1, enum expand_modifier modifier) { if (! safe_from_p (target, exp1, 1)) target = 0; if (operand_equal_p (exp0, exp1, 0)) { *op0 = expand_expr (exp0, target, VOIDmode, modifier); *op1 = copy_rtx (*op0); } else { /* If we need to preserve evaluation order, copy exp0 into its own temporary variable so that it can't be clobbered by exp1. */ if (flag_evaluation_order && TREE_SIDE_EFFECTS (exp1)) exp0 = save_expr (exp0); *op0 = expand_expr (exp0, target, VOIDmode, modifier); *op1 = expand_expr (exp1, NULL_RTX, VOIDmode, modifier); } } /* expand_expr: generate code for computing expression EXP. An rtx for the computed value is returned. The value is never null. In the case of a void EXP, const0_rtx is returned. The value may be stored in TARGET if TARGET is nonzero. TARGET is just a suggestion; callers must assume that the rtx returned may not be the same as TARGET. If TARGET is CONST0_RTX, it means that the value will be ignored. If TMODE is not VOIDmode, it suggests generating the result in mode TMODE. But this is done only when convenient. Otherwise, TMODE is ignored and the value generated in its natural mode. TMODE is just a suggestion; callers must assume that the rtx returned may not have mode TMODE. Note that TARGET may have neither TMODE nor MODE. In that case, it probably will not be used. If MODIFIER is EXPAND_SUM then when EXP is an addition we can return an rtx of the form (MULT (REG ...) (CONST_INT ...)) or a nest of (PLUS ...) and (MINUS ...) where the terms are products as above, or REG or MEM, or constant. Ordinarily in such cases we would output mul or add instructions and then return a pseudo reg containing the sum. EXPAND_INITIALIZER is much like EXPAND_SUM except that it also marks a label as absolutely required (it can't be dead). It also makes a ZERO_EXTEND or SIGN_EXTEND instead of emitting extend insns. This is used for outputting expressions used in initializers. EXPAND_CONST_ADDRESS says that it is okay to return a MEM with a constant address even if that address is not normally legitimate. EXPAND_INITIALIZER and EXPAND_SUM also have this effect. EXPAND_STACK_PARM is used when expanding to a TARGET on the stack for a call parameter. Such targets require special care as we haven't yet marked TARGET so that it's safe from being trashed by libcalls. We don't want to use TARGET for anything but the final result; Intermediate values must go elsewhere. Additionally, calls to emit_block_move will be flagged with BLOCK_OP_CALL_PARM. If EXP is a VAR_DECL whose DECL_RTL was a MEM with an invalid address, and ALT_RTL is non-NULL, then *ALT_RTL is set to the DECL_RTL of the VAR_DECL. *ALT_RTL is also set if EXP is a COMPOUND_EXPR whose second argument is such a VAR_DECL, and so on recursively. */ static rtx expand_expr_real_1 (tree, rtx, enum machine_mode, enum expand_modifier, rtx *); rtx expand_expr_real (tree exp, rtx target, enum machine_mode tmode, enum expand_modifier modifier, rtx *alt_rtl) { int rn = -1; rtx ret, last = NULL; /* Handle ERROR_MARK before anybody tries to access its type. */ if (TREE_CODE (exp) == ERROR_MARK || TREE_CODE (TREE_TYPE (exp)) == ERROR_MARK) { ret = CONST0_RTX (tmode); return ret ? ret : const0_rtx; } if (flag_non_call_exceptions) { rn = lookup_stmt_eh_region (exp); /* If rn < 0, then either (1) tree-ssa not used or (2) doesn't throw. */ if (rn >= 0) last = get_last_insn (); } /* If this is an expression of some kind and it has an associated line number, then emit the line number before expanding the expression. We need to save and restore the file and line information so that errors discovered during expansion are emitted with the right information. It would be better of the diagnostic routines used the file/line information embedded in the tree nodes rather than globals. */ if (cfun && EXPR_HAS_LOCATION (exp)) { location_t saved_location = input_location; input_location = EXPR_LOCATION (exp); emit_line_note (input_location); /* Record where the insns produced belong. */ record_block_change (TREE_BLOCK (exp)); ret = expand_expr_real_1 (exp, target, tmode, modifier, alt_rtl); input_location = saved_location; } else { ret = expand_expr_real_1 (exp, target, tmode, modifier, alt_rtl); } /* If using non-call exceptions, mark all insns that may trap. expand_call() will mark CALL_INSNs before we get to this code, but it doesn't handle libcalls, and these may trap. */ if (rn >= 0) { rtx insn; for (insn = next_real_insn (last); insn; insn = next_real_insn (insn)) { if (! find_reg_note (insn, REG_EH_REGION, NULL_RTX) /* If we want exceptions for non-call insns, any may_trap_p instruction may throw. */ && GET_CODE (PATTERN (insn)) != CLOBBER && GET_CODE (PATTERN (insn)) != USE && (GET_CODE (insn) == CALL_INSN || may_trap_p (PATTERN (insn)))) { REG_NOTES (insn) = alloc_EXPR_LIST (REG_EH_REGION, GEN_INT (rn), REG_NOTES (insn)); } } } return ret; } static rtx expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode, enum expand_modifier modifier, rtx *alt_rtl) { rtx op0, op1, temp; tree type = TREE_TYPE (exp); int unsignedp; enum machine_mode mode; enum tree_code code = TREE_CODE (exp); optab this_optab; rtx subtarget, original_target; int ignore; tree context; mode = TYPE_MODE (type); unsignedp = TYPE_UNSIGNED (type); /* Use subtarget as the target for operand 0 of a binary operation. */ subtarget = get_subtarget (target); original_target = target; ignore = (target == const0_rtx || ((code == NON_LVALUE_EXPR || code == NOP_EXPR || code == CONVERT_EXPR || code == REFERENCE_EXPR || code == COND_EXPR || code == VIEW_CONVERT_EXPR) && TREE_CODE (type) == VOID_TYPE)); /* If we are going to ignore this result, we need only do something if there is a side-effect somewhere in the expression. If there is, short-circuit the most common cases here. Note that we must not call expand_expr with anything but const0_rtx in case this is an initial expansion of a size that contains a PLACEHOLDER_EXPR. */ if (ignore) { if (! TREE_SIDE_EFFECTS (exp)) return const0_rtx; /* Ensure we reference a volatile object even if value is ignored, but don't do this if all we are doing is taking its address. */ if (TREE_THIS_VOLATILE (exp) && TREE_CODE (exp) != FUNCTION_DECL && mode != VOIDmode && mode != BLKmode && modifier != EXPAND_CONST_ADDRESS) { temp = expand_expr (exp, NULL_RTX, VOIDmode, modifier); if (MEM_P (temp)) temp = copy_to_reg (temp); return const0_rtx; } if (TREE_CODE_CLASS (code) == '1' || code == COMPONENT_REF || code == INDIRECT_REF || code == BUFFER_REF) return expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode, modifier); else if (TREE_CODE_CLASS (code) == '2' || TREE_CODE_CLASS (code) == '<' || code == ARRAY_REF || code == ARRAY_RANGE_REF) { expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode, modifier); expand_expr (TREE_OPERAND (exp, 1), const0_rtx, VOIDmode, modifier); return const0_rtx; } else if ((code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR) && ! TREE_SIDE_EFFECTS (TREE_OPERAND (exp, 1))) /* If the second operand has no side effects, just evaluate the first. */ return expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode, modifier); else if (code == BIT_FIELD_REF) { expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode, modifier); expand_expr (TREE_OPERAND (exp, 1), const0_rtx, VOIDmode, modifier); expand_expr (TREE_OPERAND (exp, 2), const0_rtx, VOIDmode, modifier); return const0_rtx; } target = 0; } /* If will do cse, generate all results into pseudo registers since 1) that allows cse to find more things and 2) otherwise cse could produce an insn the machine cannot support. An exception is a CONSTRUCTOR into a multi-word MEM: that's much more likely to be most efficient into the MEM. Another is a CALL_EXPR which must return in memory. */ if (! cse_not_expected && mode != BLKmode && target && (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER) && ! (code == CONSTRUCTOR && GET_MODE_SIZE (mode) > UNITS_PER_WORD) && ! (code == CALL_EXPR && aggregate_value_p (exp, exp))) target = 0; switch (code) { case LABEL_DECL: { tree function = decl_function_context (exp); temp = label_rtx (exp); temp = gen_rtx_LABEL_REF (Pmode, temp); if (function != current_function_decl && function != 0) LABEL_REF_NONLOCAL_P (temp) = 1; temp = gen_rtx_MEM (FUNCTION_MODE, temp); return temp; } case PARM_DECL: if (!DECL_RTL_SET_P (exp)) { error ("%Jprior parameter's size depends on '%D'", exp, exp); return CONST0_RTX (mode); } /* ... fall through ... */ case VAR_DECL: /* If a static var's type was incomplete when the decl was written, but the type is complete now, lay out the decl now. */ if (DECL_SIZE (exp) == 0 && COMPLETE_OR_UNBOUND_ARRAY_TYPE_P (TREE_TYPE (exp)) && (TREE_STATIC (exp) || DECL_EXTERNAL (exp))) layout_decl (exp, 0); /* ... fall through ... */ case FUNCTION_DECL: case RESULT_DECL: if (DECL_RTL (exp) == 0) abort (); /* Ensure variable marked as used even if it doesn't go through a parser. If it hasn't be used yet, write out an external definition. */ if (! TREE_USED (exp)) { assemble_external (exp); TREE_USED (exp) = 1; } /* Show we haven't gotten RTL for this yet. */ temp = 0; /* Handle variables inherited from containing functions. */ context = decl_function_context (exp); if (context != 0 && context != current_function_decl /* If var is static, we don't need a static chain to access it. */ && ! (MEM_P (DECL_RTL (exp)) && CONSTANT_P (XEXP (DECL_RTL (exp), 0)))) { rtx addr; /* Mark as non-local and addressable. */ DECL_NONLOCAL (exp) = 1; if (DECL_NO_STATIC_CHAIN (current_function_decl)) abort (); lang_hooks.mark_addressable (exp); if (!MEM_P (DECL_RTL (exp))) abort (); addr = XEXP (DECL_RTL (exp), 0); if (MEM_P (addr)) addr = replace_equiv_address (addr, fix_lexical_addr (XEXP (addr, 0), exp)); else addr = fix_lexical_addr (addr, exp); temp = replace_equiv_address (DECL_RTL (exp), addr); } /* This is the case of an array whose size is to be determined from its initializer, while the initializer is still being parsed. See expand_decl. */ else if (MEM_P (DECL_RTL (exp)) && REG_P (XEXP (DECL_RTL (exp), 0))) temp = validize_mem (DECL_RTL (exp)); /* If DECL_RTL is memory, we are in the normal case and either the address is not valid or it is not a register and -fforce-addr is specified, get the address into a register. */ else if (MEM_P (DECL_RTL (exp)) && modifier != EXPAND_CONST_ADDRESS && modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER && (! memory_address_p (DECL_MODE (exp), XEXP (DECL_RTL (exp), 0)) || (flag_force_addr && !REG_P (XEXP (DECL_RTL (exp), 0))))) { if (alt_rtl) *alt_rtl = DECL_RTL (exp); temp = replace_equiv_address (DECL_RTL (exp), copy_rtx (XEXP (DECL_RTL (exp), 0))); } /* If we got something, return it. But first, set the alignment if the address is a register. */ if (temp != 0) { if (MEM_P (temp) && REG_P (XEXP (temp, 0))) mark_reg_pointer (XEXP (temp, 0), DECL_ALIGN (exp)); return temp; } /* If the mode of DECL_RTL does not match that of the decl, it must be a promoted value. We return a SUBREG of the wanted mode, but mark it so that we know that it was already extended. */ if (REG_P (DECL_RTL (exp)) && GET_MODE (DECL_RTL (exp)) != DECL_MODE (exp)) { /* Get the signedness used for this variable. Ensure we get the same mode we got when the variable was declared. */ if (GET_MODE (DECL_RTL (exp)) != promote_mode (type, DECL_MODE (exp), &unsignedp, (TREE_CODE (exp) == RESULT_DECL ? 1 : 0))) abort (); temp = gen_lowpart_SUBREG (mode, DECL_RTL (exp)); SUBREG_PROMOTED_VAR_P (temp) = 1; SUBREG_PROMOTED_UNSIGNED_SET (temp, unsignedp); return temp; } return DECL_RTL (exp); case INTEGER_CST: temp = immed_double_const (TREE_INT_CST_LOW (exp), TREE_INT_CST_HIGH (exp), mode); /* ??? If overflow is set, fold will have done an incomplete job, which can result in (plus xx (const_int 0)), which can get simplified by validate_replace_rtx during virtual register instantiation, which can result in unrecognizable insns. Avoid this by forcing all overflows into registers. */ if (TREE_CONSTANT_OVERFLOW (exp) && modifier != EXPAND_INITIALIZER) temp = force_reg (mode, temp); return temp; case VECTOR_CST: return const_vector_from_tree (exp); case CONST_DECL: return expand_expr (DECL_INITIAL (exp), target, VOIDmode, modifier); case REAL_CST: /* If optimized, generate immediate CONST_DOUBLE which will be turned into memory by reload if necessary. We used to force a register so that loop.c could see it. But this does not allow gen_* patterns to perform optimizations with the constants. It also produces two insns in cases like "x = 1.0;". On most machines, floating-point constants are not permitted in many insns, so we'd end up copying it to a register in any case. Now, we do the copying in expand_binop, if appropriate. */ return CONST_DOUBLE_FROM_REAL_VALUE (TREE_REAL_CST (exp), TYPE_MODE (TREE_TYPE (exp))); case COMPLEX_CST: /* Handle evaluating a complex constant in a CONCAT target. */ if (original_target && GET_CODE (original_target) == CONCAT) { enum machine_mode mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (exp))); rtx rtarg, itarg; rtarg = XEXP (original_target, 0); itarg = XEXP (original_target, 1); /* Move the real and imaginary parts separately. */ op0 = expand_expr (TREE_REALPART (exp), rtarg, mode, 0); op1 = expand_expr (TREE_IMAGPART (exp), itarg, mode, 0); if (op0 != rtarg) emit_move_insn (rtarg, op0); if (op1 != itarg) emit_move_insn (itarg, op1); return original_target; } /* ... fall through ... */ case STRING_CST: temp = output_constant_def (exp, 1); /* temp contains a constant address. On RISC machines where a constant address isn't valid, make some insns to get that address into a register. */ if (modifier != EXPAND_CONST_ADDRESS && modifier != EXPAND_INITIALIZER && modifier != EXPAND_SUM && (! memory_address_p (mode, XEXP (temp, 0)) || flag_force_addr)) return replace_equiv_address (temp, copy_rtx (XEXP (temp, 0))); return temp; case SAVE_EXPR: { tree val = TREE_OPERAND (exp, 0); rtx ret = expand_expr_real_1 (val, target, tmode, modifier, alt_rtl); if (TREE_CODE (val) != VAR_DECL || !DECL_ARTIFICIAL (val)) { /* We can indeed still hit this case, typically via builtin expanders calling save_expr immediately before expanding something. Assume this means that we only have to deal with non-BLKmode values. */ if (GET_MODE (ret) == BLKmode) abort (); val = build_decl (VAR_DECL, NULL, TREE_TYPE (exp)); DECL_ARTIFICIAL (val) = 1; TREE_OPERAND (exp, 0) = val; if (!CONSTANT_P (ret)) ret = copy_to_reg (ret); SET_DECL_RTL (val, ret); } return ret; } case UNSAVE_EXPR: { rtx temp; temp = expand_expr (TREE_OPERAND (exp, 0), target, tmode, modifier); TREE_OPERAND (exp, 0) = lang_hooks.unsave_expr_now (TREE_OPERAND (exp, 0)); return temp; } case GOTO_EXPR: if (TREE_CODE (TREE_OPERAND (exp, 0)) == LABEL_DECL) expand_goto (TREE_OPERAND (exp, 0)); else expand_computed_goto (TREE_OPERAND (exp, 0)); return const0_rtx; /* These are lowered during gimplification, so we should never ever see them here. */ case LOOP_EXPR: case EXIT_EXPR: abort (); case LABELED_BLOCK_EXPR: if (LABELED_BLOCK_BODY (exp)) expand_expr_stmt (LABELED_BLOCK_BODY (exp)); /* Should perhaps use expand_label, but this is simpler and safer. */ do_pending_stack_adjust (); emit_label (label_rtx (LABELED_BLOCK_LABEL (exp))); return const0_rtx; case EXIT_BLOCK_EXPR: if (EXIT_BLOCK_RETURN (exp)) sorry ("returned value in block_exit_expr"); expand_goto (LABELED_BLOCK_LABEL (EXIT_BLOCK_LABELED_BLOCK (exp))); return const0_rtx; case BIND_EXPR: { tree block = BIND_EXPR_BLOCK (exp); int mark_ends; /* If we're in functions-as-trees mode, this BIND_EXPR represents the block, so we need to emit NOTE_INSN_BLOCK_* notes. */ mark_ends = (block != NULL_TREE); expand_start_bindings_and_block (mark_ends ? 0 : 2, block); /* If VARS have not yet been expanded, expand them now. */ expand_vars (BIND_EXPR_VARS (exp)); /* TARGET was clobbered early in this function. The correct indicator or whether or not we need the value of this expression is the IGNORE variable. */ temp = expand_expr (BIND_EXPR_BODY (exp), ignore ? const0_rtx : target, tmode, modifier); expand_end_bindings (BIND_EXPR_VARS (exp), mark_ends, 0); return temp; } case CONSTRUCTOR: /* If we don't need the result, just ensure we evaluate any subexpressions. */ if (ignore) { tree elt; for (elt = CONSTRUCTOR_ELTS (exp); elt; elt = TREE_CHAIN (elt)) expand_expr (TREE_VALUE (elt), const0_rtx, VOIDmode, 0); return const0_rtx; } /* All elts simple constants => refer to a constant in memory. But if this is a non-BLKmode mode, let it store a field at a time since that should make a CONST_INT or CONST_DOUBLE when we fold. Likewise, if we have a target we can use, it is best to store directly into the target unless the type is large enough that memcpy will be used. If we are making an initializer and all operands are constant, put it in memory as well. FIXME: Avoid trying to fill vector constructors piece-meal. Output them with output_constant_def below unless we're sure they're zeros. This should go away when vector initializers are treated like VECTOR_CST instead of arrays. */ else if ((TREE_STATIC (exp) && ((mode == BLKmode && ! (target != 0 && safe_from_p (target, exp, 1))) || TREE_ADDRESSABLE (exp) || (host_integerp (TYPE_SIZE_UNIT (type), 1) && (! MOVE_BY_PIECES_P (tree_low_cst (TYPE_SIZE_UNIT (type), 1), TYPE_ALIGN (type))) && ! mostly_zeros_p (exp)))) || ((modifier == EXPAND_INITIALIZER || modifier == EXPAND_CONST_ADDRESS) && TREE_CONSTANT (exp))) { rtx constructor = output_constant_def (exp, 1); if (modifier != EXPAND_CONST_ADDRESS && modifier != EXPAND_INITIALIZER && modifier != EXPAND_SUM) constructor = validize_mem (constructor); return constructor; } else { /* Handle calls that pass values in multiple non-contiguous locations. The Irix 6 ABI has examples of this. */ if (target == 0 || ! safe_from_p (target, exp, 1) || GET_CODE (target) == PARALLEL || modifier == EXPAND_STACK_PARM) target = assign_temp (build_qualified_type (type, (TYPE_QUALS (type) | (TREE_READONLY (exp) * TYPE_QUAL_CONST))), 0, TREE_ADDRESSABLE (exp), 1); store_constructor (exp, target, 0, int_expr_size (exp)); return target; } case INDIRECT_REF: { tree exp1 = TREE_OPERAND (exp, 0); if (modifier != EXPAND_WRITE) { tree t; t = fold_read_from_constant_string (exp); if (t) return expand_expr (t, target, tmode, modifier); } op0 = expand_expr (exp1, NULL_RTX, VOIDmode, EXPAND_SUM); op0 = memory_address (mode, op0); temp = gen_rtx_MEM (mode, op0); set_mem_attributes (temp, exp, 0); /* If we are writing to this object and its type is a record with readonly fields, we must mark it as readonly so it will conflict with readonly references to those fields. */ if (modifier == EXPAND_WRITE && readonly_fields_p (type)) RTX_UNCHANGING_P (temp) = 1; return temp; } case ARRAY_REF: #ifdef ENABLE_CHECKING if (TREE_CODE (TREE_TYPE (TREE_OPERAND (exp, 0))) != ARRAY_TYPE) abort (); #endif { tree array = TREE_OPERAND (exp, 0); tree low_bound = array_ref_low_bound (exp); tree index = convert (sizetype, TREE_OPERAND (exp, 1)); HOST_WIDE_INT i; /* Optimize the special-case of a zero lower bound. We convert the low_bound to sizetype to avoid some problems with constant folding. (E.g. suppose the lower bound is 1, and its mode is QI. Without the conversion, (ARRAY +(INDEX-(unsigned char)1)) becomes ((ARRAY+(-(unsigned char)1)) +INDEX), which becomes (ARRAY+255+INDEX). Oops!) */ if (! integer_zerop (low_bound)) index = size_diffop (index, convert (sizetype, low_bound)); /* Fold an expression like: "foo"[2]. This is not done in fold so it won't happen inside &. Don't fold if this is for wide characters since it's too difficult to do correctly and this is a very rare case. */ if (modifier != EXPAND_CONST_ADDRESS && modifier != EXPAND_INITIALIZER && modifier != EXPAND_MEMORY) { tree t = fold_read_from_constant_string (exp); if (t) return expand_expr (t, target, tmode, modifier); } /* If this is a constant index into a constant array, just get the value from the array. Handle both the cases when we have an explicit constructor and when our operand is a variable that was declared const. */ if (modifier != EXPAND_CONST_ADDRESS && modifier != EXPAND_INITIALIZER && modifier != EXPAND_MEMORY && TREE_CODE (array) == CONSTRUCTOR && ! TREE_SIDE_EFFECTS (array) && TREE_CODE (index) == INTEGER_CST && 0 > compare_tree_int (index, list_length (CONSTRUCTOR_ELTS (TREE_OPERAND (exp, 0))))) { tree elem; for (elem = CONSTRUCTOR_ELTS (TREE_OPERAND (exp, 0)), i = TREE_INT_CST_LOW (index); elem != 0 && i != 0; i--, elem = TREE_CHAIN (elem)) ; if (elem) return expand_expr (fold (TREE_VALUE (elem)), target, tmode, modifier); } else if (optimize >= 1 && modifier != EXPAND_CONST_ADDRESS && modifier != EXPAND_INITIALIZER && modifier != EXPAND_MEMORY && TREE_READONLY (array) && ! TREE_SIDE_EFFECTS (array) && TREE_CODE (array) == VAR_DECL && DECL_INITIAL (array) && TREE_CODE (DECL_INITIAL (array)) != ERROR_MARK && targetm.binds_local_p (array)) { if (TREE_CODE (index) == INTEGER_CST) { tree init = DECL_INITIAL (array); if (TREE_CODE (init) == CONSTRUCTOR) { tree elem; for (elem = CONSTRUCTOR_ELTS (init); (elem && !tree_int_cst_equal (TREE_PURPOSE (elem), index)); elem = TREE_CHAIN (elem)) ; if (elem && !TREE_SIDE_EFFECTS (TREE_VALUE (elem))) return expand_expr (fold (TREE_VALUE (elem)), target, tmode, modifier); } else if (TREE_CODE (init) == STRING_CST && 0 > compare_tree_int (index, TREE_STRING_LENGTH (init))) { tree type = TREE_TYPE (TREE_TYPE (init)); enum machine_mode mode = TYPE_MODE (type); if (GET_MODE_CLASS (mode) == MODE_INT && GET_MODE_SIZE (mode) == 1) return gen_int_mode (TREE_STRING_POINTER (init) [TREE_INT_CST_LOW (index)], mode); } } } } goto normal_inner_ref; case COMPONENT_REF: /* If the operand is a CONSTRUCTOR, we can just extract the appropriate field if it is present. */ if (TREE_CODE (TREE_OPERAND (exp, 0)) == CONSTRUCTOR) { tree elt; for (elt = CONSTRUCTOR_ELTS (TREE_OPERAND (exp, 0)); elt; elt = TREE_CHAIN (elt)) if (TREE_PURPOSE (elt) == TREE_OPERAND (exp, 1) /* We can normally use the value of the field in the CONSTRUCTOR. However, if this is a bitfield in an integral mode that we can fit in a HOST_WIDE_INT, we must mask only the number of bits in the bitfield, since this is done implicitly by the constructor. If the bitfield does not meet either of those conditions, we can't do this optimization. */ && (! DECL_BIT_FIELD (TREE_PURPOSE (elt)) || ((GET_MODE_CLASS (DECL_MODE (TREE_PURPOSE (elt))) == MODE_INT) && (GET_MODE_BITSIZE (DECL_MODE (TREE_PURPOSE (elt))) <= HOST_BITS_PER_WIDE_INT)))) { if (DECL_BIT_FIELD (TREE_PURPOSE (elt)) && modifier == EXPAND_STACK_PARM) target = 0; op0 = expand_expr (TREE_VALUE (elt), target, tmode, modifier); if (DECL_BIT_FIELD (TREE_PURPOSE (elt))) { HOST_WIDE_INT bitsize = TREE_INT_CST_LOW (DECL_SIZE (TREE_PURPOSE (elt))); enum machine_mode imode = TYPE_MODE (TREE_TYPE (TREE_PURPOSE (elt))); if (TYPE_UNSIGNED (TREE_TYPE (TREE_PURPOSE (elt)))) { op1 = GEN_INT (((HOST_WIDE_INT) 1 << bitsize) - 1); op0 = expand_and (imode, op0, op1, target); } else { tree count = build_int_2 (GET_MODE_BITSIZE (imode) - bitsize, 0); op0 = expand_shift (LSHIFT_EXPR, imode, op0, count, target, 0); op0 = expand_shift (RSHIFT_EXPR, imode, op0, count, target, 0); } } return op0; } } goto normal_inner_ref; case BIT_FIELD_REF: case ARRAY_RANGE_REF: normal_inner_ref: { enum machine_mode mode1; HOST_WIDE_INT bitsize, bitpos; tree offset; int volatilep = 0; tree tem = get_inner_reference (exp, &bitsize, &bitpos, &offset, &mode1, &unsignedp, &volatilep); rtx orig_op0; /* If we got back the original object, something is wrong. Perhaps we are evaluating an expression too early. In any event, don't infinitely recurse. */ if (tem == exp) abort (); /* If TEM's type is a union of variable size, pass TARGET to the inner computation, since it will need a temporary and TARGET is known to have to do. This occurs in unchecked conversion in Ada. */ orig_op0 = op0 = expand_expr (tem, (TREE_CODE (TREE_TYPE (tem)) == UNION_TYPE && (TREE_CODE (TYPE_SIZE (TREE_TYPE (tem))) != INTEGER_CST) && modifier != EXPAND_STACK_PARM ? target : NULL_RTX), VOIDmode, (modifier == EXPAND_INITIALIZER || modifier == EXPAND_CONST_ADDRESS || modifier == EXPAND_STACK_PARM) ? modifier : EXPAND_NORMAL); /* If this is a constant, put it into a register if it is a legitimate constant and OFFSET is 0 and memory if it isn't. */ if (CONSTANT_P (op0)) { enum machine_mode mode = TYPE_MODE (TREE_TYPE (tem)); if (mode != BLKmode && LEGITIMATE_CONSTANT_P (op0) && offset == 0) op0 = force_reg (mode, op0); else op0 = validize_mem (force_const_mem (mode, op0)); } /* Otherwise, if this object not in memory and we either have an offset or a BLKmode result, put it there. This case can't occur in C, but can in Ada if we have unchecked conversion of an expression from a scalar type to an array or record type or for an ARRAY_RANGE_REF whose type is BLKmode. */ else if (!MEM_P (op0) && (offset != 0 || (code == ARRAY_RANGE_REF && mode == BLKmode))) { tree nt = build_qualified_type (TREE_TYPE (tem), (TYPE_QUALS (TREE_TYPE (tem)) | TYPE_QUAL_CONST)); rtx memloc = assign_temp (nt, 1, 1, 1); emit_move_insn (memloc, op0); op0 = memloc; } if (offset != 0) { rtx offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode, EXPAND_SUM); if (!MEM_P (op0)) abort (); #ifdef POINTERS_EXTEND_UNSIGNED if (GET_MODE (offset_rtx) != Pmode) offset_rtx = convert_to_mode (Pmode, offset_rtx, 0); #else if (GET_MODE (offset_rtx) != ptr_mode) offset_rtx = convert_to_mode (ptr_mode, offset_rtx, 0); #endif if (GET_MODE (op0) == BLKmode /* A constant address in OP0 can have VOIDmode, we must not try to call force_reg in that case. */ && GET_MODE (XEXP (op0, 0)) != VOIDmode && bitsize != 0 && (bitpos % bitsize) == 0 && (bitsize % GET_MODE_ALIGNMENT (mode1)) == 0 && MEM_ALIGN (op0) == GET_MODE_ALIGNMENT (mode1)) { op0 = adjust_address (op0, mode1, bitpos / BITS_PER_UNIT); bitpos = 0; } op0 = offset_address (op0, offset_rtx, highest_pow2_factor (offset)); } /* If OFFSET is making OP0 more aligned than BIGGEST_ALIGNMENT, record its alignment as BIGGEST_ALIGNMENT. */ if (MEM_P (op0) && bitpos == 0 && offset != 0 && is_aligning_offset (offset, tem)) set_mem_align (op0, BIGGEST_ALIGNMENT); /* Don't forget about volatility even if this is a bitfield. */ if (MEM_P (op0) && volatilep && ! MEM_VOLATILE_P (op0)) { if (op0 == orig_op0) op0 = copy_rtx (op0); MEM_VOLATILE_P (op0) = 1; } /* The following code doesn't handle CONCAT. Assume only bitpos == 0 can be used for CONCAT, due to one element arrays having the same mode as its element. */ if (GET_CODE (op0) == CONCAT) { if (bitpos != 0 || bitsize != GET_MODE_BITSIZE (GET_MODE (op0))) abort (); return op0; } /* In cases where an aligned union has an unaligned object as a field, we might be extracting a BLKmode value from an integer-mode (e.g., SImode) object. Handle this case by doing the extract into an object as wide as the field (which we know to be the width of a basic mode), then storing into memory, and changing the mode to BLKmode. */ if (mode1 == VOIDmode || REG_P (op0) || GET_CODE (op0) == SUBREG || (mode1 != BLKmode && ! direct_load[(int) mode1] && GET_MODE_CLASS (mode) != MODE_COMPLEX_INT && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT && modifier != EXPAND_CONST_ADDRESS && modifier != EXPAND_INITIALIZER) /* If the field isn't aligned enough to fetch as a memref, fetch it as a bit field. */ || (mode1 != BLKmode && (((TYPE_ALIGN (TREE_TYPE (tem)) < GET_MODE_ALIGNMENT (mode) || (bitpos % GET_MODE_ALIGNMENT (mode) != 0) || (MEM_P (op0) && (MEM_ALIGN (op0) < GET_MODE_ALIGNMENT (mode1) || (bitpos % GET_MODE_ALIGNMENT (mode1) != 0)))) && ((modifier == EXPAND_CONST_ADDRESS || modifier == EXPAND_INITIALIZER) ? STRICT_ALIGNMENT : SLOW_UNALIGNED_ACCESS (mode1, MEM_ALIGN (op0)))) || (bitpos % BITS_PER_UNIT != 0))) /* If the type and the field are a constant size and the size of the type isn't the same size as the bitfield, we must use bitfield operations. */ || (bitsize >= 0 && (TREE_CODE (TYPE_SIZE (TREE_TYPE (exp))) == INTEGER_CST) && 0 != compare_tree_int (TYPE_SIZE (TREE_TYPE (exp)), bitsize))) { enum machine_mode ext_mode = mode; if (ext_mode == BLKmode && ! (target != 0 && MEM_P (op0) && MEM_P (target) && bitpos % BITS_PER_UNIT == 0)) ext_mode = mode_for_size (bitsize, MODE_INT, 1); if (ext_mode == BLKmode) { if (target == 0) target = assign_temp (type, 0, 1, 1); if (bitsize == 0) return target; /* In this case, BITPOS must start at a byte boundary and TARGET, if specified, must be a MEM. */ if (!MEM_P (op0) || (target != 0 && !MEM_P (target)) || bitpos % BITS_PER_UNIT != 0) abort (); emit_block_move (target, adjust_address (op0, VOIDmode, bitpos / BITS_PER_UNIT), GEN_INT ((bitsize + BITS_PER_UNIT - 1) / BITS_PER_UNIT), (modifier == EXPAND_STACK_PARM ? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL)); return target; } op0 = validize_mem (op0); if (MEM_P (op0) && REG_P (XEXP (op0, 0))) mark_reg_pointer (XEXP (op0, 0), MEM_ALIGN (op0)); op0 = extract_bit_field (op0, bitsize, bitpos, unsignedp, (modifier == EXPAND_STACK_PARM ? NULL_RTX : target), ext_mode, ext_mode, int_size_in_bytes (TREE_TYPE (tem))); /* If the result is a record type and BITSIZE is narrower than the mode of OP0, an integral mode, and this is a big endian machine, we must put the field into the high-order bits. */ if (TREE_CODE (type) == RECORD_TYPE && BYTES_BIG_ENDIAN && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT && bitsize < (HOST_WIDE_INT) GET_MODE_BITSIZE (GET_MODE (op0))) op0 = expand_shift (LSHIFT_EXPR, GET_MODE (op0), op0, size_int (GET_MODE_BITSIZE (GET_MODE (op0)) - bitsize), op0, 1); /* If the result type is BLKmode, store the data into a temporary of the appropriate type, but with the mode corresponding to the mode for the data we have (op0's mode). It's tempting to make this a constant type, since we know it's only being stored once, but that can cause problems if we are taking the address of this COMPONENT_REF because the MEM of any reference via that address will have flags corresponding to the type, which will not necessarily be constant. */ if (mode == BLKmode) { rtx new = assign_stack_temp_for_type (ext_mode, GET_MODE_BITSIZE (ext_mode), 0, type); emit_move_insn (new, op0); op0 = copy_rtx (new); PUT_MODE (op0, BLKmode); set_mem_attributes (op0, exp, 1); } return op0; } /* If the result is BLKmode, use that to access the object now as well. */ if (mode == BLKmode) mode1 = BLKmode; /* Get a reference to just this component. */ if (modifier == EXPAND_CONST_ADDRESS || modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER) op0 = adjust_address_nv (op0, mode1, bitpos / BITS_PER_UNIT); else op0 = adjust_address (op0, mode1, bitpos / BITS_PER_UNIT); if (op0 == orig_op0) op0 = copy_rtx (op0); set_mem_attributes (op0, exp, 0); if (REG_P (XEXP (op0, 0))) mark_reg_pointer (XEXP (op0, 0), MEM_ALIGN (op0)); MEM_VOLATILE_P (op0) |= volatilep; if (mode == mode1 || mode1 == BLKmode || mode1 == tmode || modifier == EXPAND_CONST_ADDRESS || modifier == EXPAND_INITIALIZER) return op0; else if (target == 0) target = gen_reg_rtx (tmode != VOIDmode ? tmode : mode); convert_move (target, op0, unsignedp); return target; } case OBJ_TYPE_REF: return expand_expr (OBJ_TYPE_REF_EXPR (exp), target, tmode, modifier); /* Intended for a reference to a buffer of a file-object in Pascal. But it's not certain that a special tree code will really be necessary for these. INDIRECT_REF might work for them. */ case BUFFER_REF: abort (); case IN_EXPR: { /* Pascal set IN expression. Algorithm: rlo = set_low - (set_low%bits_per_word); the_word = set [ (index - rlo)/bits_per_word ]; bit_index = index % bits_per_word; bitmask = 1 << bit_index; return !!(the_word & bitmask); */ tree set = TREE_OPERAND (exp, 0); tree index = TREE_OPERAND (exp, 1); int iunsignedp = TYPE_UNSIGNED (TREE_TYPE (index)); tree set_type = TREE_TYPE (set); tree set_low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (set_type)); tree set_high_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (set_type)); rtx index_val = expand_expr (index, 0, VOIDmode, 0); rtx lo_r = expand_expr (set_low_bound, 0, VOIDmode, 0); rtx hi_r = expand_expr (set_high_bound, 0, VOIDmode, 0); rtx setval = expand_expr (set, 0, VOIDmode, 0); rtx setaddr = XEXP (setval, 0); enum machine_mode index_mode = TYPE_MODE (TREE_TYPE (index)); rtx rlow; rtx diff, quo, rem, addr, bit, result; /* If domain is empty, answer is no. Likewise if index is constant and out of bounds. */ if (((TREE_CODE (set_high_bound) == INTEGER_CST && TREE_CODE (set_low_bound) == INTEGER_CST && tree_int_cst_lt (set_high_bound, set_low_bound)) || (TREE_CODE (index) == INTEGER_CST && TREE_CODE (set_low_bound) == INTEGER_CST && tree_int_cst_lt (index, set_low_bound)) || (TREE_CODE (set_high_bound) == INTEGER_CST && TREE_CODE (index) == INTEGER_CST && tree_int_cst_lt (set_high_bound, index)))) return const0_rtx; if (target == 0) target = gen_reg_rtx (tmode != VOIDmode ? tmode : mode); /* If we get here, we have to generate the code for both cases (in range and out of range). */ op0 = gen_label_rtx (); op1 = gen_label_rtx (); if (! (GET_CODE (index_val) == CONST_INT && GET_CODE (lo_r) == CONST_INT)) emit_cmp_and_jump_insns (index_val, lo_r, LT, NULL_RTX, GET_MODE (index_val), iunsignedp, op1); if (! (GET_CODE (index_val) == CONST_INT && GET_CODE (hi_r) == CONST_INT)) emit_cmp_and_jump_insns (index_val, hi_r, GT, NULL_RTX, GET_MODE (index_val), iunsignedp, op1); /* Calculate the element number of bit zero in the first word of the set. */ if (GET_CODE (lo_r) == CONST_INT) rlow = GEN_INT (INTVAL (lo_r) & ~((HOST_WIDE_INT) 1 << BITS_PER_UNIT)); else rlow = expand_binop (index_mode, and_optab, lo_r, GEN_INT (~((HOST_WIDE_INT) 1 << BITS_PER_UNIT)), NULL_RTX, iunsignedp, OPTAB_LIB_WIDEN); diff = expand_binop (index_mode, sub_optab, index_val, rlow, NULL_RTX, iunsignedp, OPTAB_LIB_WIDEN); quo = expand_divmod (0, TRUNC_DIV_EXPR, index_mode, diff, GEN_INT (BITS_PER_UNIT), NULL_RTX, iunsignedp); rem = expand_divmod (1, TRUNC_MOD_EXPR, index_mode, index_val, GEN_INT (BITS_PER_UNIT), NULL_RTX, iunsignedp); addr = memory_address (byte_mode, expand_binop (index_mode, add_optab, diff, setaddr, NULL_RTX, iunsignedp, OPTAB_LIB_WIDEN)); /* Extract the bit we want to examine. */ bit = expand_shift (RSHIFT_EXPR, byte_mode, gen_rtx_MEM (byte_mode, addr), make_tree (TREE_TYPE (index), rem), NULL_RTX, 1); result = expand_binop (byte_mode, and_optab, bit, const1_rtx, GET_MODE (target) == byte_mode ? target : 0, 1, OPTAB_LIB_WIDEN); if (result != target) convert_move (target, result, 1); /* Output the code to handle the out-of-range case. */ emit_jump (op0); emit_label (op1); emit_move_insn (target, const0_rtx); emit_label (op0); return target; } case WITH_CLEANUP_EXPR: if (WITH_CLEANUP_EXPR_RTL (exp) == 0) { WITH_CLEANUP_EXPR_RTL (exp) = expand_expr (TREE_OPERAND (exp, 0), target, tmode, modifier); expand_decl_cleanup_eh (NULL_TREE, TREE_OPERAND (exp, 1), CLEANUP_EH_ONLY (exp)); /* That's it for this cleanup. */ TREE_OPERAND (exp, 1) = 0; } return WITH_CLEANUP_EXPR_RTL (exp); case CLEANUP_POINT_EXPR: { /* Start a new binding layer that will keep track of all cleanup actions to be performed. */ expand_start_bindings (2); target_temp_slot_level = temp_slot_level; op0 = expand_expr (TREE_OPERAND (exp, 0), target, tmode, modifier); /* If we're going to use this value, load it up now. */ if (! ignore) op0 = force_not_mem (op0); preserve_temp_slots (op0); expand_end_bindings (NULL_TREE, 0, 0); } return op0; case CALL_EXPR: /* Check for a built-in function. */ if (TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR && (TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)) == FUNCTION_DECL) && DECL_BUILT_IN (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))) { if (DECL_BUILT_IN_CLASS (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)) == BUILT_IN_FRONTEND) return lang_hooks.expand_expr (exp, original_target, tmode, modifier, alt_rtl); else return expand_builtin (exp, target, subtarget, tmode, ignore); } return expand_call (exp, target, ignore); case NON_LVALUE_EXPR: case NOP_EXPR: case CONVERT_EXPR: case REFERENCE_EXPR: if (TREE_OPERAND (exp, 0) == error_mark_node) return const0_rtx; if (TREE_CODE (type) == UNION_TYPE) { tree valtype = TREE_TYPE (TREE_OPERAND (exp, 0)); /* If both input and output are BLKmode, this conversion isn't doing anything except possibly changing memory attribute. */ if (mode == BLKmode && TYPE_MODE (valtype) == BLKmode) { rtx result = expand_expr (TREE_OPERAND (exp, 0), target, tmode, modifier); result = copy_rtx (result); set_mem_attributes (result, exp, 0); return result; } if (target == 0) { if (TYPE_MODE (type) != BLKmode) target = gen_reg_rtx (TYPE_MODE (type)); else target = assign_temp (type, 0, 1, 1); } if (MEM_P (target)) /* Store data into beginning of memory target. */ store_expr (TREE_OPERAND (exp, 0), adjust_address (target, TYPE_MODE (valtype), 0), modifier == EXPAND_STACK_PARM ? 2 : 0); else if (REG_P (target)) /* Store this field into a union of the proper type. */ store_field (target, MIN ((int_size_in_bytes (TREE_TYPE (TREE_OPERAND (exp, 0))) * BITS_PER_UNIT), (HOST_WIDE_INT) GET_MODE_BITSIZE (mode)), 0, TYPE_MODE (valtype), TREE_OPERAND (exp, 0), VOIDmode, 0, type, 0); else abort (); /* Return the entire union. */ return target; } if (mode == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))) { op0 = expand_expr (TREE_OPERAND (exp, 0), target, VOIDmode, modifier); /* If the signedness of the conversion differs and OP0 is a promoted SUBREG, clear that indication since we now have to do the proper extension. */ if (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0))) != unsignedp && GET_CODE (op0) == SUBREG) SUBREG_PROMOTED_VAR_P (op0) = 0; return op0; } op0 = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX, mode, modifier); if (GET_MODE (op0) == mode) return op0; /* If OP0 is a constant, just convert it into the proper mode. */ if (CONSTANT_P (op0)) { tree inner_type = TREE_TYPE (TREE_OPERAND (exp, 0)); enum machine_mode inner_mode = TYPE_MODE (inner_type); if (modifier == EXPAND_INITIALIZER) return simplify_gen_subreg (mode, op0, inner_mode, subreg_lowpart_offset (mode, inner_mode)); else return convert_modes (mode, inner_mode, op0, TYPE_UNSIGNED (inner_type)); } if (modifier == EXPAND_INITIALIZER) return gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND, mode, op0); if (target == 0) return convert_to_mode (mode, op0, TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0)))); else convert_move (target, op0, TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0)))); return target; case VIEW_CONVERT_EXPR: op0 = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX, mode, modifier); /* If the input and output modes are both the same, we are done. Otherwise, if neither mode is BLKmode and both are integral and within a word, we can use gen_lowpart. If neither is true, make sure the operand is in memory and convert the MEM to the new mode. */ if (TYPE_MODE (type) == GET_MODE (op0)) ; else if (TYPE_MODE (type) != BLKmode && GET_MODE (op0) != BLKmode && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_WORD && GET_MODE_SIZE (GET_MODE (op0)) <= UNITS_PER_WORD) op0 = gen_lowpart (TYPE_MODE (type), op0); else if (!MEM_P (op0)) { /* If the operand is not a MEM, force it into memory. Since we are going to be be changing the mode of the MEM, don't call force_const_mem for constants because we don't allow pool constants to change mode. */ tree inner_type = TREE_TYPE (TREE_OPERAND (exp, 0)); if (TREE_ADDRESSABLE (exp)) abort (); if (target == 0 || GET_MODE (target) != TYPE_MODE (inner_type)) target = assign_stack_temp_for_type (TYPE_MODE (inner_type), GET_MODE_SIZE (TYPE_MODE (inner_type)), 0, inner_type); emit_move_insn (target, op0); op0 = target; } /* At this point, OP0 is in the correct mode. If the output type is such that the operand is known to be aligned, indicate that it is. Otherwise, we need only be concerned about alignment for non-BLKmode results. */ if (MEM_P (op0)) { op0 = copy_rtx (op0); if (TYPE_ALIGN_OK (type)) set_mem_align (op0, MAX (MEM_ALIGN (op0), TYPE_ALIGN (type))); else if (TYPE_MODE (type) != BLKmode && STRICT_ALIGNMENT && MEM_ALIGN (op0) < GET_MODE_ALIGNMENT (TYPE_MODE (type))) { tree inner_type = TREE_TYPE (TREE_OPERAND (exp, 0)); HOST_WIDE_INT temp_size = MAX (int_size_in_bytes (inner_type), (HOST_WIDE_INT) GET_MODE_SIZE (TYPE_MODE (type))); rtx new = assign_stack_temp_for_type (TYPE_MODE (type), temp_size, 0, type); rtx new_with_op0_mode = adjust_address (new, GET_MODE (op0), 0); if (TREE_ADDRESSABLE (exp)) abort (); if (GET_MODE (op0) == BLKmode) emit_block_move (new_with_op0_mode, op0, GEN_INT (GET_MODE_SIZE (TYPE_MODE (type))), (modifier == EXPAND_STACK_PARM ? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL)); else emit_move_insn (new_with_op0_mode, op0); op0 = new; } op0 = adjust_address (op0, TYPE_MODE (type), 0); } return op0; case PLUS_EXPR: this_optab = ! unsignedp && flag_trapv && (GET_MODE_CLASS (mode) == MODE_INT) ? addv_optab : add_optab; /* If we are adding a constant, a VAR_DECL that is sp, fp, or ap, and something else, make sure we add the register to the constant and then to the other thing. This case can occur during strength reduction and doing it this way will produce better code if the frame pointer or argument pointer is eliminated. fold-const.c will ensure that the constant is always in the inner PLUS_EXPR, so the only case we need to do anything about is if sp, ap, or fp is our second argument, in which case we must swap the innermost first argument and our second argument. */ if (TREE_CODE (TREE_OPERAND (exp, 0)) == PLUS_EXPR && TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 1)) == INTEGER_CST && TREE_CODE (TREE_OPERAND (exp, 1)) == VAR_DECL && (DECL_RTL (TREE_OPERAND (exp, 1)) == frame_pointer_rtx || DECL_RTL (TREE_OPERAND (exp, 1)) == stack_pointer_rtx || DECL_RTL (TREE_OPERAND (exp, 1)) == arg_pointer_rtx)) { tree t = TREE_OPERAND (exp, 1); TREE_OPERAND (exp, 1) = TREE_OPERAND (TREE_OPERAND (exp, 0), 0); TREE_OPERAND (TREE_OPERAND (exp, 0), 0) = t; } /* If the result is to be ptr_mode and we are adding an integer to something, we might be forming a constant. So try to use plus_constant. If it produces a sum and we can't accept it, use force_operand. This allows P = &ARR[const] to generate efficient code on machines where a SYMBOL_REF is not a valid address. If this is an EXPAND_SUM call, always return the sum. */ if (modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER || (mode == ptr_mode && (unsignedp || ! flag_trapv))) { if (modifier == EXPAND_STACK_PARM) target = 0; if (TREE_CODE (TREE_OPERAND (exp, 0)) == INTEGER_CST && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT && TREE_CONSTANT (TREE_OPERAND (exp, 1))) { rtx constant_part; op1 = expand_expr (TREE_OPERAND (exp, 1), subtarget, VOIDmode, EXPAND_SUM); /* Use immed_double_const to ensure that the constant is truncated according to the mode of OP1, then sign extended to a HOST_WIDE_INT. Using the constant directly can result in non-canonical RTL in a 64x32 cross compile. */ constant_part = immed_double_const (TREE_INT_CST_LOW (TREE_OPERAND (exp, 0)), (HOST_WIDE_INT) 0, TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 1)))); op1 = plus_constant (op1, INTVAL (constant_part)); if (modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER) op1 = force_operand (op1, target); return op1; } else if (TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_INT && TREE_CONSTANT (TREE_OPERAND (exp, 0))) { rtx constant_part; op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, (modifier == EXPAND_INITIALIZER ? EXPAND_INITIALIZER : EXPAND_SUM)); if (! CONSTANT_P (op0)) { op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, modifier); /* Return a PLUS if modifier says it's OK. */ if (modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER) return simplify_gen_binary (PLUS, mode, op0, op1); goto binop2; } /* Use immed_double_const to ensure that the constant is truncated according to the mode of OP1, then sign extended to a HOST_WIDE_INT. Using the constant directly can result in non-canonical RTL in a 64x32 cross compile. */ constant_part = immed_double_const (TREE_INT_CST_LOW (TREE_OPERAND (exp, 1)), (HOST_WIDE_INT) 0, TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))); op0 = plus_constant (op0, INTVAL (constant_part)); if (modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER) op0 = force_operand (op0, target); return op0; } } /* No sense saving up arithmetic to be done if it's all in the wrong mode to form part of an address. And force_operand won't know whether to sign-extend or zero-extend. */ if ((modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER) || mode != ptr_mode) { expand_operands (TREE_OPERAND (exp, 0), TREE_OPERAND (exp, 1), subtarget, &op0, &op1, 0); if (op0 == const0_rtx) return op1; if (op1 == const0_rtx) return op0; goto binop2; } expand_operands (TREE_OPERAND (exp, 0), TREE_OPERAND (exp, 1), subtarget, &op0, &op1, modifier); return simplify_gen_binary (PLUS, mode, op0, op1); case MINUS_EXPR: /* For initializers, we are allowed to return a MINUS of two symbolic constants. Here we handle all cases when both operands are constant. */ /* Handle difference of two symbolic constants, for the sake of an initializer. */ if ((modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER) && really_constant_p (TREE_OPERAND (exp, 0)) && really_constant_p (TREE_OPERAND (exp, 1))) { expand_operands (TREE_OPERAND (exp, 0), TREE_OPERAND (exp, 1), NULL_RTX, &op0, &op1, modifier); /* If the last operand is a CONST_INT, use plus_constant of the negated constant. Else make the MINUS. */ if (GET_CODE (op1) == CONST_INT) return plus_constant (op0, - INTVAL (op1)); else return gen_rtx_MINUS (mode, op0, op1); } this_optab = ! unsignedp && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT) ? subv_optab : sub_optab; /* No sense saving up arithmetic to be done if it's all in the wrong mode to form part of an address. And force_operand won't know whether to sign-extend or zero-extend. */ if ((modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER) || mode != ptr_mode) goto binop; expand_operands (TREE_OPERAND (exp, 0), TREE_OPERAND (exp, 1), subtarget, &op0, &op1, modifier); /* Convert A - const to A + (-const). */ if (GET_CODE (op1) == CONST_INT) { op1 = negate_rtx (mode, op1); return simplify_gen_binary (PLUS, mode, op0, op1); } goto binop2; case MULT_EXPR: /* If first operand is constant, swap them. Thus the following special case checks need only check the second operand. */ if (TREE_CODE (TREE_OPERAND (exp, 0)) == INTEGER_CST) { tree t1 = TREE_OPERAND (exp, 0); TREE_OPERAND (exp, 0) = TREE_OPERAND (exp, 1); TREE_OPERAND (exp, 1) = t1; } /* Attempt to return something suitable for generating an indexed address, for machines that support that. */ if (modifier == EXPAND_SUM && mode == ptr_mode && host_integerp (TREE_OPERAND (exp, 1), 0)) { tree exp1 = TREE_OPERAND (exp, 1); op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, EXPAND_SUM); if (!REG_P (op0)) op0 = force_operand (op0, NULL_RTX); if (!REG_P (op0)) op0 = copy_to_mode_reg (mode, op0); return gen_rtx_MULT (mode, op0, gen_int_mode (tree_low_cst (exp1, 0), TYPE_MODE (TREE_TYPE (exp1)))); } if (modifier == EXPAND_STACK_PARM) target = 0; /* Check for multiplying things that have been extended from a narrower type. If this machine supports multiplying in that narrower type with a result in the desired type, do it that way, and avoid the explicit type-conversion. */ if (TREE_CODE (TREE_OPERAND (exp, 0)) == NOP_EXPR && TREE_CODE (type) == INTEGER_TYPE && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))) < TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (exp, 0)))) && ((TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST && int_fits_type_p (TREE_OPERAND (exp, 1), TREE_TYPE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))) /* Don't use a widening multiply if a shift will do. */ && ((GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 1)))) > HOST_BITS_PER_WIDE_INT) || exact_log2 (TREE_INT_CST_LOW (TREE_OPERAND (exp, 1))) < 0)) || (TREE_CODE (TREE_OPERAND (exp, 1)) == NOP_EXPR && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (exp, 1), 0))) == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)))) /* If both operands are extended, they must either both be zero-extended or both be sign-extended. */ && (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (exp, 1), 0))) == TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))))))) { tree op0type = TREE_TYPE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)); enum machine_mode innermode = TYPE_MODE (op0type); bool zextend_p = TYPE_UNSIGNED (op0type); optab other_optab = zextend_p ? smul_widen_optab : umul_widen_optab; this_optab = zextend_p ? umul_widen_optab : smul_widen_optab; if (mode == GET_MODE_WIDER_MODE (innermode)) { if (this_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing) { if (TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST) expand_operands (TREE_OPERAND (TREE_OPERAND (exp, 0), 0), TREE_OPERAND (exp, 1), NULL_RTX, &op0, &op1, 0); else expand_operands (TREE_OPERAND (TREE_OPERAND (exp, 0), 0), TREE_OPERAND (TREE_OPERAND (exp, 1), 0), NULL_RTX, &op0, &op1, 0); goto binop2; } else if (other_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing && innermode == word_mode) { rtx htem, hipart; op0 = expand_expr (TREE_OPERAND (TREE_OPERAND (exp, 0), 0), NULL_RTX, VOIDmode, 0); if (TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST) op1 = convert_modes (innermode, mode, expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, 0), unsignedp); else op1 = expand_expr (TREE_OPERAND (TREE_OPERAND (exp, 1), 0), NULL_RTX, VOIDmode, 0); temp = expand_binop (mode, other_optab, op0, op1, target, unsignedp, OPTAB_LIB_WIDEN); hipart = gen_highpart (innermode, temp); htem = expand_mult_highpart_adjust (innermode, hipart, op0, op1, hipart, zextend_p); if (htem != hipart) emit_move_insn (hipart, htem); return temp; } } } expand_operands (TREE_OPERAND (exp, 0), TREE_OPERAND (exp, 1), subtarget, &op0, &op1, 0); return expand_mult (mode, op0, op1, target, unsignedp); case TRUNC_DIV_EXPR: case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR: case ROUND_DIV_EXPR: case EXACT_DIV_EXPR: if (modifier == EXPAND_STACK_PARM) target = 0; /* Possible optimization: compute the dividend with EXPAND_SUM then if the divisor is constant can optimize the case where some terms of the dividend have coeffs divisible by it. */ expand_operands (TREE_OPERAND (exp, 0), TREE_OPERAND (exp, 1), subtarget, &op0, &op1, 0); return expand_divmod (0, code, mode, op0, op1, target, unsignedp); case RDIV_EXPR: /* Emit a/b as a*(1/b). Later we may manage CSE the reciprocal saving expensive divide. If not, combine will rebuild the original computation. */ if (flag_unsafe_math_optimizations && optimize && !optimize_size && TREE_CODE (type) == REAL_TYPE && !real_onep (TREE_OPERAND (exp, 0))) return expand_expr (build (MULT_EXPR, type, TREE_OPERAND (exp, 0), build (RDIV_EXPR, type, build_real (type, dconst1), TREE_OPERAND (exp, 1))), target, tmode, modifier); this_optab = sdiv_optab; goto binop; case TRUNC_MOD_EXPR: case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR: case ROUND_MOD_EXPR: if (modifier == EXPAND_STACK_PARM) target = 0; expand_operands (TREE_OPERAND (exp, 0), TREE_OPERAND (exp, 1), subtarget, &op0, &op1, 0); return expand_divmod (1, code, mode, op0, op1, target, unsignedp); case FIX_ROUND_EXPR: case FIX_FLOOR_EXPR: case FIX_CEIL_EXPR: abort (); /* Not used for C. */ case FIX_TRUNC_EXPR: op0 = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX, VOIDmode, 0); if (target == 0 || modifier == EXPAND_STACK_PARM) target = gen_reg_rtx (mode); expand_fix (target, op0, unsignedp); return target; case FLOAT_EXPR: op0 = expand_expr (TREE_OPERAND (exp, 0), NULL_RTX, VOIDmode, 0); if (target == 0 || modifier == EXPAND_STACK_PARM) target = gen_reg_rtx (mode); /* expand_float can't figure out what to do if FROM has VOIDmode. So give it the correct mode. With -O, cse will optimize this. */ if (GET_MODE (op0) == VOIDmode) op0 = copy_to_mode_reg (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))), op0); expand_float (target, op0, TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0)))); return target; case NEGATE_EXPR: op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, 0); if (modifier == EXPAND_STACK_PARM) target = 0; temp = expand_unop (mode, ! unsignedp && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT) ? negv_optab : neg_optab, op0, target, 0); if (temp == 0) abort (); return temp; case ABS_EXPR: op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, 0); if (modifier == EXPAND_STACK_PARM) target = 0; /* ABS_EXPR is not valid for complex arguments. */ if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT) abort (); /* Unsigned abs is simply the operand. Testing here means we don't risk generating incorrect code below. */ if (TYPE_UNSIGNED (type)) return op0; return expand_abs (mode, op0, target, unsignedp, safe_from_p (target, TREE_OPERAND (exp, 0), 1)); case MAX_EXPR: case MIN_EXPR: target = original_target; if (target == 0 || modifier == EXPAND_STACK_PARM || (MEM_P (target) && MEM_VOLATILE_P (target)) || GET_MODE (target) != mode || (REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER)) target = gen_reg_rtx (mode); expand_operands (TREE_OPERAND (exp, 0), TREE_OPERAND (exp, 1), target, &op0, &op1, 0); /* First try to do it with a special MIN or MAX instruction. If that does not win, use a conditional jump to select the proper value. */ this_optab = (unsignedp ? (code == MIN_EXPR ? umin_optab : umax_optab) : (code == MIN_EXPR ? smin_optab : smax_optab)); temp = expand_binop (mode, this_optab, op0, op1, target, unsignedp, OPTAB_WIDEN); if (temp != 0) return temp; /* At this point, a MEM target is no longer useful; we will get better code without it. */ if (MEM_P (target)) target = gen_reg_rtx (mode); /* If op1 was placed in target, swap op0 and op1. */ if (target != op0 && target == op1) { rtx tem = op0; op0 = op1; op1 = tem; } if (target != op0) emit_move_insn (target, op0); op0 = gen_label_rtx (); /* If this mode is an integer too wide to compare properly, compare word by word. Rely on cse to optimize constant cases. */ if (GET_MODE_CLASS (mode) == MODE_INT && ! can_compare_p (GE, mode, ccp_jump)) { if (code == MAX_EXPR) do_jump_by_parts_greater_rtx (mode, unsignedp, target, op1, NULL_RTX, op0); else do_jump_by_parts_greater_rtx (mode, unsignedp, op1, target, NULL_RTX, op0); } else { do_compare_rtx_and_jump (target, op1, code == MAX_EXPR ? GE : LE, unsignedp, mode, NULL_RTX, NULL_RTX, op0); } emit_move_insn (target, op1); emit_label (op0); return target; case BIT_NOT_EXPR: op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, 0); if (modifier == EXPAND_STACK_PARM) target = 0; temp = expand_unop (mode, one_cmpl_optab, op0, target, 1); if (temp == 0) abort (); return temp; /* ??? Can optimize bitwise operations with one arg constant. Can optimize (a bitwise1 n) bitwise2 (a bitwise3 b) and (a bitwise1 b) bitwise2 b (etc) but that is probably not worth while. */ /* BIT_AND_EXPR is for bitwise anding. TRUTH_AND_EXPR is for anding two boolean values when we want in all cases to compute both of them. In general it is fastest to do TRUTH_AND_EXPR by computing both operands as actual zero-or-1 values and then bitwise anding. In cases where there cannot be any side effects, better code would be made by treating TRUTH_AND_EXPR like TRUTH_ANDIF_EXPR; but the question is how to recognize those cases. */ case TRUTH_AND_EXPR: case BIT_AND_EXPR: this_optab = and_optab; goto binop; case TRUTH_OR_EXPR: case BIT_IOR_EXPR: this_optab = ior_optab; goto binop; case TRUTH_XOR_EXPR: case BIT_XOR_EXPR: this_optab = xor_optab; goto binop; case LSHIFT_EXPR: case RSHIFT_EXPR: case LROTATE_EXPR: case RROTATE_EXPR: if (! safe_from_p (subtarget, TREE_OPERAND (exp, 1), 1)) subtarget = 0; if (modifier == EXPAND_STACK_PARM) target = 0; op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode, 0); return expand_shift (code, mode, op0, TREE_OPERAND (exp, 1), target, unsignedp); /* Could determine the answer when only additive constants differ. Also, the addition of one can be handled by changing the condition. */ case LT_EXPR: case LE_EXPR: case GT_EXPR: case GE_EXPR: case EQ_EXPR: case NE_EXPR: case UNORDERED_EXPR: case ORDERED_EXPR: case UNLT_EXPR: case UNLE_EXPR: case UNGT_EXPR: case UNGE_EXPR: case UNEQ_EXPR: case LTGT_EXPR: temp = do_store_flag (exp, modifier != EXPAND_STACK_PARM ? target : NULL_RTX, tmode != VOIDmode ? tmode : mode, 0); if (temp != 0) return temp; /* For foo != 0, load foo, and if it is nonzero load 1 instead. */ if (code == NE_EXPR && integer_zerop (TREE_OPERAND (exp, 1)) && original_target && REG_P (original_target) && (GET_MODE (original_target) == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))))) { temp = expand_expr (TREE_OPERAND (exp, 0), original_target, VOIDmode, 0); /* If temp is constant, we can just compute the result. */ if (GET_CODE (temp) == CONST_INT) { if (INTVAL (temp) != 0) emit_move_insn (target, const1_rtx); else emit_move_insn (target, const0_rtx); return target; } if (temp != original_target) { enum machine_mode mode1 = GET_MODE (temp); if (mode1 == VOIDmode) mode1 = tmode != VOIDmode ? tmode : mode; temp = copy_to_mode_reg (mode1, temp); } op1 = gen_label_rtx (); emit_cmp_and_jump_insns (temp, const0_rtx, EQ, NULL_RTX, GET_MODE (temp), unsignedp, op1); emit_move_insn (temp, const1_rtx); emit_label (op1); return temp; } /* If no set-flag instruction, must generate a conditional store into a temporary variable. Drop through and handle this like && and ||. */ case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: if (! ignore && (target == 0 || modifier == EXPAND_STACK_PARM || ! safe_from_p (target, exp, 1) /* Make sure we don't have a hard reg (such as function's return value) live across basic blocks, if not optimizing. */ || (!optimize && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER))) target = gen_reg_rtx (tmode != VOIDmode ? tmode : mode); if (target) emit_clr_insn (target); op1 = gen_label_rtx (); jumpifnot (exp, op1); if (target) emit_0_to_1_insn (target); emit_label (op1); return ignore ? const0_rtx : target; case TRUTH_NOT_EXPR: if (modifier == EXPAND_STACK_PARM) target = 0; op0 = expand_expr (TREE_OPERAND (exp, 0), target, VOIDmode, 0); /* The parser is careful to generate TRUTH_NOT_EXPR only with operands that are always zero or one. */ temp = expand_binop (mode, xor_optab, op0, const1_rtx, target, 1, OPTAB_LIB_WIDEN); if (temp == 0) abort (); return temp; case COMPOUND_EXPR: expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode, 0); emit_queue (); return expand_expr_real (TREE_OPERAND (exp, 1), (ignore ? const0_rtx : target), VOIDmode, modifier, alt_rtl); case STATEMENT_LIST: { tree_stmt_iterator iter; if (!ignore) abort (); for (iter = tsi_start (exp); !tsi_end_p (iter); tsi_next (&iter)) expand_expr (tsi_stmt (iter), const0_rtx, VOIDmode, modifier); } return const0_rtx; case COND_EXPR: /* If it's void, we don't need to worry about computing a value. */ if (VOID_TYPE_P (TREE_TYPE (exp))) { tree pred = TREE_OPERAND (exp, 0); tree then_ = TREE_OPERAND (exp, 1); tree else_ = TREE_OPERAND (exp, 2); /* If we do not have any pending cleanups or stack_levels to restore, and at least one arm of the COND_EXPR is a GOTO_EXPR to a local label, then we can emit more efficient code by using jumpif/jumpifnot instead of the 'if' machinery. */ if (! optimize || containing_blocks_have_cleanups_or_stack_level ()) ; else if (TREE_CODE (then_) == GOTO_EXPR && TREE_CODE (GOTO_DESTINATION (then_)) == LABEL_DECL) { jumpif (pred, label_rtx (GOTO_DESTINATION (then_))); return expand_expr (else_, const0_rtx, VOIDmode, 0); } else if (TREE_CODE (else_) == GOTO_EXPR && TREE_CODE (GOTO_DESTINATION (else_)) == LABEL_DECL) { jumpifnot (pred, label_rtx (GOTO_DESTINATION (else_))); return expand_expr (then_, const0_rtx, VOIDmode, 0); } /* Just use the 'if' machinery. */ expand_start_cond (pred, 0); start_cleanup_deferral (); expand_expr (then_, const0_rtx, VOIDmode, 0); exp = else_; /* Iterate over 'else if's instead of recursing. */ for (; TREE_CODE (exp) == COND_EXPR; exp = TREE_OPERAND (exp, 2)) { expand_start_else (); if (EXPR_HAS_LOCATION (exp)) { emit_line_note (EXPR_LOCATION (exp)); record_block_change (TREE_BLOCK (exp)); } expand_elseif (TREE_OPERAND (exp, 0)); expand_expr (TREE_OPERAND (exp, 1), const0_rtx, VOIDmode, 0); } /* Don't emit the jump and label if there's no 'else' clause. */ if (TREE_SIDE_EFFECTS (exp)) { expand_start_else (); expand_expr (exp, const0_rtx, VOIDmode, 0); } end_cleanup_deferral (); expand_end_cond (); return const0_rtx; } /* If we would have a "singleton" (see below) were it not for a conversion in each arm, bring that conversion back out. */ if (TREE_CODE (TREE_OPERAND (exp, 1)) == NOP_EXPR && TREE_CODE (TREE_OPERAND (exp, 2)) == NOP_EXPR && (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (exp, 1), 0)) == TREE_TYPE (TREE_OPERAND (TREE_OPERAND (exp, 2), 0)))) { tree iftrue = TREE_OPERAND (TREE_OPERAND (exp, 1), 0); tree iffalse = TREE_OPERAND (TREE_OPERAND (exp, 2), 0); if ((TREE_CODE_CLASS (TREE_CODE (iftrue)) == '2' && operand_equal_p (iffalse, TREE_OPERAND (iftrue, 0), 0)) || (TREE_CODE_CLASS (TREE_CODE (iffalse)) == '2' && operand_equal_p (iftrue, TREE_OPERAND (iffalse, 0), 0)) || (TREE_CODE_CLASS (TREE_CODE (iftrue)) == '1' && operand_equal_p (iffalse, TREE_OPERAND (iftrue, 0), 0)) || (TREE_CODE_CLASS (TREE_CODE (iffalse)) == '1' && operand_equal_p (iftrue, TREE_OPERAND (iffalse, 0), 0))) return expand_expr (build1 (NOP_EXPR, type, build (COND_EXPR, TREE_TYPE (iftrue), TREE_OPERAND (exp, 0), iftrue, iffalse)), target, tmode, modifier); } { /* Note that COND_EXPRs whose type is a structure or union are required to be constructed to contain assignments of a temporary variable, so that we can evaluate them here for side effect only. If type is void, we must do likewise. */ /* If an arm of the branch requires a cleanup, only that cleanup is performed. */ tree singleton = 0; tree binary_op = 0, unary_op = 0; /* If this is (A ? 1 : 0) and A is a condition, just evaluate it and convert it to our mode, if necessary. */ if (integer_onep (TREE_OPERAND (exp, 1)) && integer_zerop (TREE_OPERAND (exp, 2)) && TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (exp, 0))) == '<') { if (ignore) { expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode, modifier); return const0_rtx; } if (modifier == EXPAND_STACK_PARM) target = 0; op0 = expand_expr (TREE_OPERAND (exp, 0), target, mode, modifier); if (GET_MODE (op0) == mode) return op0; if (target == 0) target = gen_reg_rtx (mode); convert_move (target, op0, unsignedp); return target; } /* Check for X ? A + B : A. If we have this, we can copy A to the output and conditionally add B. Similarly for unary operations. Don't do this if X has side-effects because those side effects might affect A or B and the "?" operation is a sequence point in ANSI. (operand_equal_p tests for side effects.) */ if (TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (exp, 1))) == '2' && operand_equal_p (TREE_OPERAND (exp, 2), TREE_OPERAND (TREE_OPERAND (exp, 1), 0), 0)) singleton = TREE_OPERAND (exp, 2), binary_op = TREE_OPERAND (exp, 1); else if (TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (exp, 2))) == '2' && operand_equal_p (TREE_OPERAND (exp, 1), TREE_OPERAND (TREE_OPERAND (exp, 2), 0), 0)) singleton = TREE_OPERAND (exp, 1), binary_op = TREE_OPERAND (exp, 2); else if (TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (exp, 1))) == '1' && operand_equal_p (TREE_OPERAND (exp, 2), TREE_OPERAND (TREE_OPERAND (exp, 1), 0), 0)) singleton = TREE_OPERAND (exp, 2), unary_op = TREE_OPERAND (exp, 1); else if (TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (exp, 2))) == '1' && operand_equal_p (TREE_OPERAND (exp, 1), TREE_OPERAND (TREE_OPERAND (exp, 2), 0), 0)) singleton = TREE_OPERAND (exp, 1), unary_op = TREE_OPERAND (exp, 2); /* If we are not to produce a result, we have no target. Otherwise, if a target was specified use it; it will not be used as an intermediate target unless it is safe. If no target, use a temporary. */ if (ignore) temp = 0; else if (modifier == EXPAND_STACK_PARM) temp = assign_temp (type, 0, 0, 1); else if (original_target && (safe_from_p (original_target, TREE_OPERAND (exp, 0), 1) || (singleton && REG_P (original_target) && REGNO (original_target) >= FIRST_PSEUDO_REGISTER && original_target == var_rtx (singleton))) && GET_MODE (original_target) == mode #ifdef HAVE_conditional_move && (! can_conditionally_move_p (mode) || REG_P (original_target) || TREE_ADDRESSABLE (type)) #endif && (!MEM_P (original_target) || TREE_ADDRESSABLE (type))) temp = original_target; else if (TREE_ADDRESSABLE (type)) abort (); else temp = assign_temp (type, 0, 0, 1); /* If we had X ? A + C : A, with C a constant power of 2, and we can do the test of X as a store-flag operation, do this as A + ((X != 0) << log C). Similarly for other simple binary operators. Only do for C == 1 if BRANCH_COST is low. */ if (temp && singleton && binary_op && (TREE_CODE (binary_op) == PLUS_EXPR || TREE_CODE (binary_op) == MINUS_EXPR || TREE_CODE (binary_op) == BIT_IOR_EXPR || TREE_CODE (binary_op) == BIT_XOR_EXPR) && (BRANCH_COST >= 3 ? integer_pow2p (TREE_OPERAND (binary_op, 1)) : integer_onep (TREE_OPERAND (binary_op, 1))) && TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (exp, 0))) == '<') { rtx result; tree cond; optab boptab = (TREE_CODE (binary_op) == PLUS_EXPR ? (TYPE_TRAP_SIGNED (TREE_TYPE (binary_op)) ? addv_optab : add_optab) : TREE_CODE (binary_op) == MINUS_EXPR ? (TYPE_TRAP_SIGNED (TREE_TYPE (binary_op)) ? subv_optab : sub_optab) : TREE_CODE (binary_op) == BIT_IOR_EXPR ? ior_optab : xor_optab); /* If we had X ? A : A + 1, do this as A + (X == 0). */ if (singleton == TREE_OPERAND (exp, 1)) cond = invert_truthvalue (TREE_OPERAND (exp, 0)); else cond = TREE_OPERAND (exp, 0); result = do_store_flag (cond, (safe_from_p (temp, singleton, 1) ? temp : NULL_RTX), mode, BRANCH_COST <= 1); if (result != 0 && ! integer_onep (TREE_OPERAND (binary_op, 1))) result = expand_shift (LSHIFT_EXPR, mode, result, build_int_2 (tree_log2 (TREE_OPERAND (binary_op, 1)), 0), (safe_from_p (temp, singleton, 1) ? temp : NULL_RTX), 0); if (result) { op1 = expand_expr (singleton, NULL_RTX, VOIDmode, 0); return expand_binop (mode, boptab, op1, result, temp, unsignedp, OPTAB_LIB_WIDEN); } } do_pending_stack_adjust (); NO_DEFER_POP; op0 = gen_label_rtx (); if (singleton && ! TREE_SIDE_EFFECTS (TREE_OPERAND (exp, 0))) { if (temp != 0) { /* If the target conflicts with the other operand of the binary op, we can't use it. Also, we can't use the target if it is a hard register, because evaluating the condition might clobber it. */ if ((binary_op && ! safe_from_p (temp, TREE_OPERAND (binary_op, 1), 1)) || (REG_P (temp) && REGNO (temp) < FIRST_PSEUDO_REGISTER)) temp = gen_reg_rtx (mode); store_expr (singleton, temp, modifier == EXPAND_STACK_PARM ? 2 : 0); } else expand_expr (singleton, ignore ? const0_rtx : NULL_RTX, VOIDmode, 0); if (singleton == TREE_OPERAND (exp, 1)) jumpif (TREE_OPERAND (exp, 0), op0); else jumpifnot (TREE_OPERAND (exp, 0), op0); start_cleanup_deferral (); if (binary_op && temp == 0) /* Just touch the other operand. */ expand_expr (TREE_OPERAND (binary_op, 1), ignore ? const0_rtx : NULL_RTX, VOIDmode, 0); else if (binary_op) store_expr (build (TREE_CODE (binary_op), type, make_tree (type, temp), TREE_OPERAND (binary_op, 1)), temp, modifier == EXPAND_STACK_PARM ? 2 : 0); else store_expr (build1 (TREE_CODE (unary_op), type, make_tree (type, temp)), temp, modifier == EXPAND_STACK_PARM ? 2 : 0); op1 = op0; } /* Check for A op 0 ? A : FOO and A op 0 ? FOO : A where OP is any comparison operator. If we have one of these cases, set the output to A, branch on A (cse will merge these two references), then set the output to FOO. */ else if (temp && TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (exp, 0))) == '<' && integer_zerop (TREE_OPERAND (TREE_OPERAND (exp, 0), 1)) && operand_equal_p (TREE_OPERAND (TREE_OPERAND (exp, 0), 0), TREE_OPERAND (exp, 1), 0) && (! TREE_SIDE_EFFECTS (TREE_OPERAND (exp, 0)) || TREE_CODE (TREE_OPERAND (exp, 1)) == SAVE_EXPR) && safe_from_p (temp, TREE_OPERAND (exp, 2), 1)) { if (REG_P (temp) && REGNO (temp) < FIRST_PSEUDO_REGISTER) temp = gen_reg_rtx (mode); store_expr (TREE_OPERAND (exp, 1), temp, modifier == EXPAND_STACK_PARM ? 2 : 0); jumpif (TREE_OPERAND (exp, 0), op0); start_cleanup_deferral (); if (TREE_TYPE (TREE_OPERAND (exp, 2)) != void_type_node) store_expr (TREE_OPERAND (exp, 2), temp, modifier == EXPAND_STACK_PARM ? 2 : 0); else expand_expr (TREE_OPERAND (exp, 2), ignore ? const0_rtx : NULL_RTX, VOIDmode, 0); op1 = op0; } else if (temp && TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (exp, 0))) == '<' && integer_zerop (TREE_OPERAND (TREE_OPERAND (exp, 0), 1)) && operand_equal_p (TREE_OPERAND (TREE_OPERAND (exp, 0), 0), TREE_OPERAND (exp, 2), 0) && (! TREE_SIDE_EFFECTS (TREE_OPERAND (exp, 0)) || TREE_CODE (TREE_OPERAND (exp, 2)) == SAVE_EXPR) && safe_from_p (temp, TREE_OPERAND (exp, 1), 1)) { if (REG_P (temp) && REGNO (temp) < FIRST_PSEUDO_REGISTER) temp = gen_reg_rtx (mode); store_expr (TREE_OPERAND (exp, 2), temp, modifier == EXPAND_STACK_PARM ? 2 : 0); jumpifnot (TREE_OPERAND (exp, 0), op0); start_cleanup_deferral (); if (TREE_TYPE (TREE_OPERAND (exp, 1)) != void_type_node) store_expr (TREE_OPERAND (exp, 1), temp, modifier == EXPAND_STACK_PARM ? 2 : 0); else expand_expr (TREE_OPERAND (exp, 1), ignore ? const0_rtx : NULL_RTX, VOIDmode, 0); op1 = op0; } else { op1 = gen_label_rtx (); jumpifnot (TREE_OPERAND (exp, 0), op0); start_cleanup_deferral (); /* One branch of the cond can be void, if it never returns. For example A ? throw : E */ if (temp != 0 && TREE_TYPE (TREE_OPERAND (exp, 1)) != void_type_node) store_expr (TREE_OPERAND (exp, 1), temp, modifier == EXPAND_STACK_PARM ? 2 : 0); else expand_expr (TREE_OPERAND (exp, 1), ignore ? const0_rtx : NULL_RTX, VOIDmode, 0); end_cleanup_deferral (); emit_queue (); emit_jump_insn (gen_jump (op1)); emit_barrier (); emit_label (op0); start_cleanup_deferral (); if (temp != 0 && TREE_TYPE (TREE_OPERAND (exp, 2)) != void_type_node) store_expr (TREE_OPERAND (exp, 2), temp, modifier == EXPAND_STACK_PARM ? 2 : 0); else expand_expr (TREE_OPERAND (exp, 2), ignore ? const0_rtx : NULL_RTX, VOIDmode, 0); } end_cleanup_deferral (); emit_queue (); emit_label (op1); OK_DEFER_POP; return temp; } case TARGET_EXPR: { /* Something needs to be initialized, but we didn't know where that thing was when building the tree. For example, it could be the return value of a function, or a parameter to a function which lays down in the stack, or a temporary variable which must be passed by reference. We guarantee that the expression will either be constructed or copied into our original target. */ tree slot = TREE_OPERAND (exp, 0); tree cleanups = NULL_TREE; tree exp1; if (TREE_CODE (slot) != VAR_DECL) abort (); if (! ignore) target = original_target; /* Set this here so that if we get a target that refers to a register variable that's already been used, put_reg_into_stack knows that it should fix up those uses. */ TREE_USED (slot) = 1; if (target == 0) { if (DECL_RTL_SET_P (slot)) { target = DECL_RTL (slot); /* If we have already expanded the slot, so don't do it again. (mrs) */ if (TREE_OPERAND (exp, 1) == NULL_TREE) return target; } else { target = assign_temp (type, 2, 0, 1); SET_DECL_RTL (slot, target); /* Since SLOT is not known to the called function to belong to its stack frame, we must build an explicit cleanup. This case occurs when we must build up a reference to pass the reference as an argument. In this case, it is very likely that such a reference need not be built here. */ if (TREE_OPERAND (exp, 2) == 0) TREE_OPERAND (exp, 2) = lang_hooks.maybe_build_cleanup (slot); cleanups = TREE_OPERAND (exp, 2); } } else { /* This case does occur, when expanding a parameter which needs to be constructed on the stack. The target is the actual stack address that we want to initialize. The function we call will perform the cleanup in this case. */ /* If we have already assigned it space, use that space, not target that we were passed in, as our target parameter is only a hint. */ if (DECL_RTL_SET_P (slot)) { target = DECL_RTL (slot); /* If we have already expanded the slot, so don't do it again. (mrs) */ if (TREE_OPERAND (exp, 1) == NULL_TREE) return target; } else SET_DECL_RTL (slot, target); } exp1 = TREE_OPERAND (exp, 3) = TREE_OPERAND (exp, 1); /* Mark it as expanded. */ TREE_OPERAND (exp, 1) = NULL_TREE; if (VOID_TYPE_P (TREE_TYPE (exp1))) /* If the initializer is void, just expand it; it will initialize the object directly. */ expand_expr (exp1, const0_rtx, VOIDmode, 0); else store_expr (exp1, target, modifier == EXPAND_STACK_PARM ? 2 : 0); expand_decl_cleanup_eh (NULL_TREE, cleanups, CLEANUP_EH_ONLY (exp)); return target; } case INIT_EXPR: { tree lhs = TREE_OPERAND (exp, 0); tree rhs = TREE_OPERAND (exp, 1); temp = expand_assignment (lhs, rhs, ! ignore); return temp; } case MODIFY_EXPR: { /* If lhs is complex, expand calls in rhs before computing it. That's so we don't compute a pointer and save it over a call. If lhs is simple, compute it first so we can give it as a target if the rhs is just a call. This avoids an extra temp and copy and that prevents a partial-subsumption which makes bad code. Actually we could treat component_ref's of vars like vars. */ tree lhs = TREE_OPERAND (exp, 0); tree rhs = TREE_OPERAND (exp, 1); temp = 0; /* Check for |= or &= of a bitfield of size one into another bitfield of size 1. In this case, (unless we need the result of the assignment) we can do this more efficiently with a test followed by an assignment, if necessary. ??? At this point, we can't get a BIT_FIELD_REF here. But if things change so we do, this code should be enhanced to support it. */ if (ignore && TREE_CODE (lhs) == COMPONENT_REF && (TREE_CODE (rhs) == BIT_IOR_EXPR || TREE_CODE (rhs) == BIT_AND_EXPR) && TREE_OPERAND (rhs, 0) == lhs && TREE_CODE (TREE_OPERAND (rhs, 1)) == COMPONENT_REF && integer_onep (DECL_SIZE (TREE_OPERAND (lhs, 1))) && integer_onep (DECL_SIZE (TREE_OPERAND (TREE_OPERAND (rhs, 1), 1)))) { rtx label = gen_label_rtx (); do_jump (TREE_OPERAND (rhs, 1), TREE_CODE (rhs) == BIT_IOR_EXPR ? label : 0, TREE_CODE (rhs) == BIT_AND_EXPR ? label : 0); expand_assignment (lhs, convert (TREE_TYPE (rhs), (TREE_CODE (rhs) == BIT_IOR_EXPR ? integer_one_node : integer_zero_node)), 0); do_pending_stack_adjust (); emit_label (label); return const0_rtx; } temp = expand_assignment (lhs, rhs, ! ignore); return temp; } case RETURN_EXPR: if (!TREE_OPERAND (exp, 0)) expand_null_return (); else expand_return (TREE_OPERAND (exp, 0)); return const0_rtx; case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: return expand_increment (exp, 0, ignore); case POSTINCREMENT_EXPR: case POSTDECREMENT_EXPR: /* Faster to treat as pre-increment if result is not used. */ return expand_increment (exp, ! ignore, ignore); case ADDR_EXPR: if (modifier == EXPAND_STACK_PARM) target = 0; /* If we are taking the address of something erroneous, just return a zero. */ if (TREE_CODE (TREE_OPERAND (exp, 0)) == ERROR_MARK) return const0_rtx; /* If we are taking the address of a constant and are at the top level, we have to use output_constant_def since we can't call force_const_mem at top level. */ else if (cfun == 0 && (TREE_CODE (TREE_OPERAND (exp, 0)) == CONSTRUCTOR || (TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (exp, 0))) == 'c'))) op0 = XEXP (output_constant_def (TREE_OPERAND (exp, 0), 0), 0); else { /* We make sure to pass const0_rtx down if we came in with ignore set, to avoid doing the cleanups twice for something. */ op0 = expand_expr (TREE_OPERAND (exp, 0), ignore ? const0_rtx : NULL_RTX, VOIDmode, (modifier == EXPAND_INITIALIZER ? modifier : EXPAND_CONST_ADDRESS)); /* If we are going to ignore the result, OP0 will have been set to const0_rtx, so just return it. Don't get confused and think we are taking the address of the constant. */ if (ignore) return op0; /* Pass 1 for MODIFY, so that protect_from_queue doesn't get clever and returns a REG when given a MEM. */ op0 = protect_from_queue (op0, 1); /* We would like the object in memory. If it is a constant, we can have it be statically allocated into memory. For a non-constant, we need to allocate some memory and store the value into it. */ if (CONSTANT_P (op0)) op0 = force_const_mem (TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))), op0); else if (REG_P (op0) || GET_CODE (op0) == SUBREG || GET_CODE (op0) == CONCAT || GET_CODE (op0) == PARALLEL || GET_CODE (op0) == LO_SUM) { /* If this object is in a register, it can't be BLKmode. */ tree inner_type = TREE_TYPE (TREE_OPERAND (exp, 0)); rtx memloc = assign_temp (inner_type, 1, 1, 1); if (GET_CODE (op0) == PARALLEL) /* Handle calls that pass values in multiple non-contiguous locations. The Irix 6 ABI has examples of this. */ emit_group_store (memloc, op0, inner_type, int_size_in_bytes (inner_type)); else emit_move_insn (memloc, op0); op0 = memloc; } if (!MEM_P (op0)) abort (); mark_temp_addr_taken (op0); if (modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER) { op0 = XEXP (op0, 0); if (GET_MODE (op0) == Pmode && mode == ptr_mode) op0 = convert_memory_address (ptr_mode, op0); return op0; } /* If OP0 is not aligned as least as much as the type requires, we need to make a temporary, copy OP0 to it, and take the address of the temporary. We want to use the alignment of the type, not of the operand. Note that this is incorrect for FUNCTION_TYPE, but the test for BLKmode means that can't happen. The test for BLKmode is because we never make mis-aligned MEMs with non-BLKmode. We don't need to do this at all if the machine doesn't have strict alignment. */ if (STRICT_ALIGNMENT && GET_MODE (op0) == BLKmode && (TYPE_ALIGN (TREE_TYPE (TREE_OPERAND (exp, 0))) > MEM_ALIGN (op0)) && MEM_ALIGN (op0) < BIGGEST_ALIGNMENT) { tree inner_type = TREE_TYPE (TREE_OPERAND (exp, 0)); rtx new; if (TYPE_ALIGN_OK (inner_type)) abort (); if (TREE_ADDRESSABLE (inner_type)) { /* We can't make a bitwise copy of this object, so fail. */ error ("cannot take the address of an unaligned member"); return const0_rtx; } new = assign_stack_temp_for_type (TYPE_MODE (inner_type), MEM_SIZE (op0) ? INTVAL (MEM_SIZE (op0)) : int_size_in_bytes (inner_type), 1, build_qualified_type (inner_type, (TYPE_QUALS (inner_type) | TYPE_QUAL_CONST))); emit_block_move (new, op0, expr_size (TREE_OPERAND (exp, 0)), (modifier == EXPAND_STACK_PARM ? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL)); op0 = new; } op0 = force_operand (XEXP (op0, 0), target); } if (flag_force_addr && !REG_P (op0) && modifier != EXPAND_CONST_ADDRESS && modifier != EXPAND_INITIALIZER && modifier != EXPAND_SUM) op0 = force_reg (Pmode, op0); if (REG_P (op0) && ! REG_USERVAR_P (op0)) mark_reg_pointer (op0, TYPE_ALIGN (TREE_TYPE (type))); if (GET_MODE (op0) == Pmode && mode == ptr_mode) op0 = convert_memory_address (ptr_mode, op0); return op0; case ENTRY_VALUE_EXPR: abort (); /* COMPLEX type for Extended Pascal & Fortran */ case COMPLEX_EXPR: { enum machine_mode mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (exp))); rtx insns; /* Get the rtx code of the operands. */ op0 = expand_expr (TREE_OPERAND (exp, 0), 0, VOIDmode, 0); op1 = expand_expr (TREE_OPERAND (exp, 1), 0, VOIDmode, 0); if (! target) target = gen_reg_rtx (TYPE_MODE (TREE_TYPE (exp))); start_sequence (); /* Move the real (op0) and imaginary (op1) parts to their location. */ emit_move_insn (gen_realpart (mode, target), op0); emit_move_insn (gen_imagpart (mode, target), op1); insns = get_insns (); end_sequence (); /* Complex construction should appear as a single unit. */ /* If TARGET is a CONCAT, we got insns like RD = RS, ID = IS, each with a separate pseudo as destination. It's not correct for flow to treat them as a unit. */ if (GET_CODE (target) != CONCAT) emit_no_conflict_block (insns, target, op0, op1, NULL_RTX); else emit_insn (insns); return target; } case REALPART_EXPR: op0 = expand_expr (TREE_OPERAND (exp, 0), 0, VOIDmode, 0); return gen_realpart (mode, op0); case IMAGPART_EXPR: op0 = expand_expr (TREE_OPERAND (exp, 0), 0, VOIDmode, 0); return gen_imagpart (mode, op0); case CONJ_EXPR: { enum machine_mode partmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (exp))); rtx imag_t; rtx insns; op0 = expand_expr (TREE_OPERAND (exp, 0), 0, VOIDmode, 0); if (! target) target = gen_reg_rtx (mode); start_sequence (); /* Store the realpart and the negated imagpart to target. */ emit_move_insn (gen_realpart (partmode, target), gen_realpart (partmode, op0)); imag_t = gen_imagpart (partmode, target); temp = expand_unop (partmode, ! unsignedp && flag_trapv && (GET_MODE_CLASS(partmode) == MODE_INT) ? negv_optab : neg_optab, gen_imagpart (partmode, op0), imag_t, 0); if (temp != imag_t) emit_move_insn (imag_t, temp); insns = get_insns (); end_sequence (); /* Conjugate should appear as a single unit If TARGET is a CONCAT, we got insns like RD = RS, ID = - IS, each with a separate pseudo as destination. It's not correct for flow to treat them as a unit. */ if (GET_CODE (target) != CONCAT) emit_no_conflict_block (insns, target, op0, NULL_RTX, NULL_RTX); else emit_insn (insns); return target; } case RESX_EXPR: expand_resx_expr (exp); return const0_rtx; case TRY_CATCH_EXPR: { tree handler = TREE_OPERAND (exp, 1); expand_eh_region_start (); op0 = expand_expr (TREE_OPERAND (exp, 0), 0, VOIDmode, 0); expand_eh_handler (handler); return op0; } case CATCH_EXPR: expand_start_catch (CATCH_TYPES (exp)); expand_expr (CATCH_BODY (exp), const0_rtx, VOIDmode, 0); expand_end_catch (); return const0_rtx; case EH_FILTER_EXPR: /* Should have been handled in expand_eh_handler. */ abort (); case TRY_FINALLY_EXPR: { tree try_block = TREE_OPERAND (exp, 0); tree finally_block = TREE_OPERAND (exp, 1); if ((!optimize && lang_protect_cleanup_actions == NULL) || unsafe_for_reeval (finally_block) > 1) { /* In this case, wrapping FINALLY_BLOCK in an UNSAVE_EXPR is not sufficient, so we cannot expand the block twice. So we play games with GOTO_SUBROUTINE_EXPR to let us expand the thing only once. */ /* When not optimizing, we go ahead with this form since (1) user breakpoints operate more predictably without code duplication, and (2) we're not running any of the global optimizers that would explode in time/space with the highly connected CFG created by the indirect branching. */ rtx finally_label = gen_label_rtx (); rtx done_label = gen_label_rtx (); rtx return_link = gen_reg_rtx (Pmode); tree cleanup = build (GOTO_SUBROUTINE_EXPR, void_type_node, (tree) finally_label, (tree) return_link); TREE_SIDE_EFFECTS (cleanup) = 1; /* Start a new binding layer that will keep track of all cleanup actions to be performed. */ expand_start_bindings (2); target_temp_slot_level = temp_slot_level; expand_decl_cleanup (NULL_TREE, cleanup); op0 = expand_expr (try_block, target, tmode, modifier); preserve_temp_slots (op0); expand_end_bindings (NULL_TREE, 0, 0); emit_jump (done_label); emit_label (finally_label); expand_expr (finally_block, const0_rtx, VOIDmode, 0); emit_indirect_jump (return_link); emit_label (done_label); } else { expand_start_bindings (2); target_temp_slot_level = temp_slot_level; expand_decl_cleanup (NULL_TREE, finally_block); op0 = expand_expr (try_block, target, tmode, modifier); preserve_temp_slots (op0); expand_end_bindings (NULL_TREE, 0, 0); } return op0; } case GOTO_SUBROUTINE_EXPR: { rtx subr = (rtx) TREE_OPERAND (exp, 0); rtx return_link = *(rtx *) &TREE_OPERAND (exp, 1); rtx return_address = gen_label_rtx (); emit_move_insn (return_link, gen_rtx_LABEL_REF (Pmode, return_address)); emit_jump (subr); emit_label (return_address); return const0_rtx; } case VA_ARG_EXPR: return expand_builtin_va_arg (TREE_OPERAND (exp, 0), type); case EXC_PTR_EXPR: return get_exception_pointer (cfun); case FILTER_EXPR: return get_exception_filter (cfun); case FDESC_EXPR: /* Function descriptors are not valid except for as initialization constants, and should not be expanded. */ abort (); case SWITCH_EXPR: expand_start_case (0, SWITCH_COND (exp), integer_type_node, "switch"); if (SWITCH_BODY (exp)) expand_expr_stmt (SWITCH_BODY (exp)); if (SWITCH_LABELS (exp)) { tree duplicate = 0; tree vec = SWITCH_LABELS (exp); size_t i, n = TREE_VEC_LENGTH (vec); for (i = 0; i < n; ++i) { tree elt = TREE_VEC_ELT (vec, i); tree controlling_expr_type = TREE_TYPE (SWITCH_COND (exp)); tree min_value = TYPE_MIN_VALUE (controlling_expr_type); tree max_value = TYPE_MAX_VALUE (controlling_expr_type); tree case_low = CASE_LOW (elt); tree case_high = CASE_HIGH (elt) ? CASE_HIGH (elt) : case_low; if (case_low && case_high) { /* Case label is less than minimum for type. */ if (TREE_CODE (min_value) == INTEGER_CST && tree_int_cst_compare (case_low, min_value) < 0 && tree_int_cst_compare (case_high, min_value) < 0) { warning ("case label value %d is less than minimum value for type", TREE_INT_CST (case_low)); continue; } /* Case value is greater than maximum for type. */ if (TREE_CODE (max_value) == INTEGER_CST && tree_int_cst_compare (case_low, max_value) > 0 && tree_int_cst_compare (case_high, max_value) > 0) { warning ("case label value %d exceeds maximum value for type", TREE_INT_CST (case_high)); continue; } /* Saturate lower case label value to minimum. */ if (TREE_CODE (min_value) == INTEGER_CST && tree_int_cst_compare (case_high, min_value) >= 0 && tree_int_cst_compare (case_low, min_value) < 0) { warning ("lower value %d in case label range less than minimum value for type", TREE_INT_CST (case_low)); case_low = min_value; } /* Saturate upper case label value to maximum. */ if (TREE_CODE (max_value) == INTEGER_CST && tree_int_cst_compare (case_low, max_value) <= 0 && tree_int_cst_compare (case_high, max_value) > 0) { warning ("upper value %d in case label range exceeds maximum value for type", TREE_INT_CST (case_high)); case_high = max_value; } } add_case_node (case_low, case_high, CASE_LABEL (elt), &duplicate, true); if (duplicate) abort (); } } expand_end_case_type (SWITCH_COND (exp), TREE_TYPE (exp)); return const0_rtx; case LABEL_EXPR: expand_label (TREE_OPERAND (exp, 0)); return const0_rtx; case CASE_LABEL_EXPR: { tree duplicate = 0; add_case_node (CASE_LOW (exp), CASE_HIGH (exp), CASE_LABEL (exp), &duplicate, false); if (duplicate) abort (); return const0_rtx; } case ASM_EXPR: expand_asm_expr (exp); return const0_rtx; default: return lang_hooks.expand_expr (exp, original_target, tmode, modifier, alt_rtl); } /* Here to do an ordinary binary operator, generating an instruction from the optab already placed in `this_optab'. */ binop: expand_operands (TREE_OPERAND (exp, 0), TREE_OPERAND (exp, 1), subtarget, &op0, &op1, 0); binop2: if (modifier == EXPAND_STACK_PARM) target = 0; temp = expand_binop (mode, this_optab, op0, op1, target, unsignedp, OPTAB_LIB_WIDEN); if (temp == 0) abort (); return temp; } /* Subroutine of above: returns 1 if OFFSET corresponds to an offset that when applied to the address of EXP produces an address known to be aligned more than BIGGEST_ALIGNMENT. */ static int is_aligning_offset (tree offset, tree exp) { /* Strip off any conversions. */ while (TREE_CODE (offset) == NON_LVALUE_EXPR || TREE_CODE (offset) == NOP_EXPR || TREE_CODE (offset) == CONVERT_EXPR) offset = TREE_OPERAND (offset, 0); /* We must now have a BIT_AND_EXPR with a constant that is one less than power of 2 and which is larger than BIGGEST_ALIGNMENT. */ if (TREE_CODE (offset) != BIT_AND_EXPR || !host_integerp (TREE_OPERAND (offset, 1), 1) || compare_tree_int (TREE_OPERAND (offset, 1), BIGGEST_ALIGNMENT / BITS_PER_UNIT) <= 0 || !exact_log2 (tree_low_cst (TREE_OPERAND (offset, 1), 1) + 1) < 0) return 0; /* Look at the first operand of BIT_AND_EXPR and strip any conversion. It must be NEGATE_EXPR. Then strip any more conversions. */ offset = TREE_OPERAND (offset, 0); while (TREE_CODE (offset) == NON_LVALUE_EXPR || TREE_CODE (offset) == NOP_EXPR || TREE_CODE (offset) == CONVERT_EXPR) offset = TREE_OPERAND (offset, 0); if (TREE_CODE (offset) != NEGATE_EXPR) return 0; offset = TREE_OPERAND (offset, 0); while (TREE_CODE (offset) == NON_LVALUE_EXPR || TREE_CODE (offset) == NOP_EXPR || TREE_CODE (offset) == CONVERT_EXPR) offset = TREE_OPERAND (offset, 0); /* This must now be the address of EXP. */ return TREE_CODE (offset) == ADDR_EXPR && TREE_OPERAND (offset, 0) == exp; } /* Return the tree node if an ARG corresponds to a string constant or zero if it doesn't. If we return nonzero, set *PTR_OFFSET to the offset in bytes within the string that ARG is accessing. The type of the offset will be `sizetype'. */ tree string_constant (tree arg, tree *ptr_offset) { STRIP_NOPS (arg); if (TREE_CODE (arg) == ADDR_EXPR && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST) { *ptr_offset = size_zero_node; return TREE_OPERAND (arg, 0); } if (TREE_CODE (arg) == ADDR_EXPR && TREE_CODE (TREE_OPERAND (arg, 0)) == ARRAY_REF && TREE_CODE (TREE_OPERAND (TREE_OPERAND (arg, 0), 0)) == STRING_CST) { *ptr_offset = convert (sizetype, TREE_OPERAND (TREE_OPERAND (arg, 0), 1)); return TREE_OPERAND (TREE_OPERAND (arg, 0), 0); } else if (TREE_CODE (arg) == PLUS_EXPR) { tree arg0 = TREE_OPERAND (arg, 0); tree arg1 = TREE_OPERAND (arg, 1); STRIP_NOPS (arg0); STRIP_NOPS (arg1); if (TREE_CODE (arg0) == ADDR_EXPR && TREE_CODE (TREE_OPERAND (arg0, 0)) == STRING_CST) { *ptr_offset = convert (sizetype, arg1); return TREE_OPERAND (arg0, 0); } else if (TREE_CODE (arg1) == ADDR_EXPR && TREE_CODE (TREE_OPERAND (arg1, 0)) == STRING_CST) { *ptr_offset = convert (sizetype, arg0); return TREE_OPERAND (arg1, 0); } } return 0; } /* Expand code for a post- or pre- increment or decrement and return the RTX for the result. POST is 1 for postinc/decrements and 0 for preinc/decrements. */ static rtx expand_increment (tree exp, int post, int ignore) { rtx op0, op1; rtx temp, value; tree incremented = TREE_OPERAND (exp, 0); optab this_optab = add_optab; int icode; enum machine_mode mode = TYPE_MODE (TREE_TYPE (exp)); int op0_is_copy = 0; int single_insn = 0; /* 1 means we can't store into OP0 directly, because it is a subreg narrower than a word, and we don't dare clobber the rest of the word. */ int bad_subreg = 0; /* Stabilize any component ref that might need to be evaluated more than once below. */ if (!post || TREE_CODE (incremented) == BIT_FIELD_REF || (TREE_CODE (incremented) == COMPONENT_REF && (TREE_CODE (TREE_OPERAND (incremented, 0)) != INDIRECT_REF || DECL_BIT_FIELD (TREE_OPERAND (incremented, 1))))) incremented = stabilize_reference (incremented); /* Nested *INCREMENT_EXPRs can happen in C++. We must force innermost ones into save exprs so that they don't accidentally get evaluated more than once by the code below. */ if (TREE_CODE (incremented) == PREINCREMENT_EXPR || TREE_CODE (incremented) == PREDECREMENT_EXPR) incremented = save_expr (incremented); /* Compute the operands as RTX. Note whether OP0 is the actual lvalue or a copy of it: I believe it is a copy iff it is a register or subreg and insns were generated in computing it. */ temp = get_last_insn (); op0 = expand_expr (incremented, NULL_RTX, VOIDmode, 0); /* If OP0 is a SUBREG made for a promoted variable, we cannot increment in place but instead must do sign- or zero-extension during assignment, so we copy it into a new register and let the code below use it as a copy. Note that we can safely modify this SUBREG since it is know not to be shared (it was made by the expand_expr call above). */ if (GET_CODE (op0) == SUBREG && SUBREG_PROMOTED_VAR_P (op0)) { if (post) SUBREG_REG (op0) = copy_to_reg (SUBREG_REG (op0)); else bad_subreg = 1; } else if (GET_CODE (op0) == SUBREG && GET_MODE_BITSIZE (GET_MODE (op0)) < BITS_PER_WORD) { /* We cannot increment this SUBREG in place. If we are post-incrementing, get a copy of the old value. Otherwise, just mark that we cannot increment in place. */ if (post) op0 = copy_to_reg (op0); else bad_subreg = 1; } op0_is_copy = ((GET_CODE (op0) == SUBREG || REG_P (op0)) && temp != get_last_insn ()); op1 = expand_expr (TREE_OPERAND (exp, 1), NULL_RTX, VOIDmode, 0); /* Decide whether incrementing or decrementing. */ if (TREE_CODE (exp) == POSTDECREMENT_EXPR || TREE_CODE (exp) == PREDECREMENT_EXPR) this_optab = sub_optab; /* Convert decrement by a constant into a negative increment. */ if (this_optab == sub_optab && GET_CODE (op1) == CONST_INT) { op1 = GEN_INT (-INTVAL (op1)); this_optab = add_optab; } if (TYPE_TRAP_SIGNED (TREE_TYPE (exp))) this_optab = this_optab == add_optab ? addv_optab : subv_optab; /* For a preincrement, see if we can do this with a single instruction. */ if (!post) { icode = (int) this_optab->handlers[(int) mode].insn_code; if (icode != (int) CODE_FOR_nothing /* Make sure that OP0 is valid for operands 0 and 1 of the insn we want to queue. */ && (*insn_data[icode].operand[0].predicate) (op0, mode) && (*insn_data[icode].operand[1].predicate) (op0, mode) && (*insn_data[icode].operand[2].predicate) (op1, mode)) single_insn = 1; } /* If OP0 is not the actual lvalue, but rather a copy in a register, then we cannot just increment OP0. We must therefore contrive to increment the original value. Then, for postincrement, we can return OP0 since it is a copy of the old value. For preincrement, expand here unless we can do it with a single insn. Likewise if storing directly into OP0 would clobber high bits we need to preserve (bad_subreg). */ if (op0_is_copy || (!post && !single_insn) || bad_subreg) { /* This is the easiest way to increment the value wherever it is. Problems with multiple evaluation of INCREMENTED are prevented because either (1) it is a component_ref or preincrement, in which case it was stabilized above, or (2) it is an array_ref with constant index in an array in a register, which is safe to reevaluate. */ tree newexp = build (((TREE_CODE (exp) == POSTDECREMENT_EXPR || TREE_CODE (exp) == PREDECREMENT_EXPR) ? MINUS_EXPR : PLUS_EXPR), TREE_TYPE (exp), incremented, TREE_OPERAND (exp, 1)); while (TREE_CODE (incremented) == NOP_EXPR || TREE_CODE (incremented) == CONVERT_EXPR) { newexp = convert (TREE_TYPE (incremented), newexp); incremented = TREE_OPERAND (incremented, 0); } temp = expand_assignment (incremented, newexp, ! post && ! ignore); return post ? op0 : temp; } if (post) { /* We have a true reference to the value in OP0. If there is an insn to add or subtract in this mode, queue it. Queuing the increment insn avoids the register shuffling that often results if we must increment now and first save the old value for subsequent use. */ #if 0 /* Turned off to avoid making extra insn for indexed memref. */ op0 = stabilize (op0); #endif icode = (int) this_optab->handlers[(int) mode].insn_code; if (icode != (int) CODE_FOR_nothing /* Make sure that OP0 is valid for operands 0 and 1 of the insn we want to queue. */ && (*insn_data[icode].operand[0].predicate) (op0, mode) && (*insn_data[icode].operand[1].predicate) (op0, mode)) { if (! (*insn_data[icode].operand[2].predicate) (op1, mode)) op1 = force_reg (mode, op1); return enqueue_insn (op0, GEN_FCN (icode) (op0, op0, op1)); } if (icode != (int) CODE_FOR_nothing && MEM_P (op0)) { rtx addr = (general_operand (XEXP (op0, 0), mode) ? force_reg (Pmode, XEXP (op0, 0)) : copy_to_reg (XEXP (op0, 0))); rtx temp, result; op0 = replace_equiv_address (op0, addr); temp = force_reg (GET_MODE (op0), op0); if (! (*insn_data[icode].operand[2].predicate) (op1, mode)) op1 = force_reg (mode, op1); /* The increment queue is LIFO, thus we have to `queue' the instructions in reverse order. */ enqueue_insn (op0, gen_move_insn (op0, temp)); result = enqueue_insn (temp, GEN_FCN (icode) (temp, temp, op1)); return result; } } /* Preincrement, or we can't increment with one simple insn. */ if (post) /* Save a copy of the value before inc or dec, to return it later. */ temp = value = copy_to_reg (op0); else /* Arrange to return the incremented value. */ /* Copy the rtx because expand_binop will protect from the queue, and the results of that would be invalid for us to return if our caller does emit_queue before using our result. */ temp = copy_rtx (value = op0); /* Increment however we can. */ op1 = expand_binop (mode, this_optab, value, op1, op0, TYPE_UNSIGNED (TREE_TYPE (exp)), OPTAB_LIB_WIDEN); /* Make sure the value is stored into OP0. */ if (op1 != op0) emit_move_insn (op0, op1); return temp; } /* Generate code to calculate EXP using a store-flag instruction and return an rtx for the result. EXP is either a comparison or a TRUTH_NOT_EXPR whose operand is a comparison. If TARGET is nonzero, store the result there if convenient. If ONLY_CHEAP is nonzero, only do this if it is likely to be very cheap. Return zero if there is no suitable set-flag instruction available on this machine. Once expand_expr has been called on the arguments of the comparison, we are committed to doing the store flag, since it is not safe to re-evaluate the expression. We emit the store-flag insn by calling emit_store_flag, but only expand the arguments if we have a reason to believe that emit_store_flag will be successful. If we think that it will, but it isn't, we have to simulate the store-flag with a set/jump/set sequence. */ static rtx do_store_flag (tree exp, rtx target, enum machine_mode mode, int only_cheap) { enum rtx_code code; tree arg0, arg1, type; tree tem; enum machine_mode operand_mode; int invert = 0; int unsignedp; rtx op0, op1; enum insn_code icode; rtx subtarget = target; rtx result, label; /* If this is a TRUTH_NOT_EXPR, set a flag indicating we must invert the result at the end. We can't simply invert the test since it would have already been inverted if it were valid. This case occurs for some floating-point comparisons. */ if (TREE_CODE (exp) == TRUTH_NOT_EXPR) invert = 1, exp = TREE_OPERAND (exp, 0); arg0 = TREE_OPERAND (exp, 0); arg1 = TREE_OPERAND (exp, 1); /* Don't crash if the comparison was erroneous. */ if (arg0 == error_mark_node || arg1 == error_mark_node) return const0_rtx; type = TREE_TYPE (arg0); operand_mode = TYPE_MODE (type); unsignedp = TYPE_UNSIGNED (type); /* We won't bother with BLKmode store-flag operations because it would mean passing a lot of information to emit_store_flag. */ if (operand_mode == BLKmode) return 0; /* We won't bother with store-flag operations involving function pointers when function pointers must be canonicalized before comparisons. */ #ifdef HAVE_canonicalize_funcptr_for_compare if (HAVE_canonicalize_funcptr_for_compare && ((TREE_CODE (TREE_TYPE (TREE_OPERAND (exp, 0))) == POINTER_TYPE && (TREE_CODE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0)))) == FUNCTION_TYPE)) || (TREE_CODE (TREE_TYPE (TREE_OPERAND (exp, 1))) == POINTER_TYPE && (TREE_CODE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 1)))) == FUNCTION_TYPE)))) return 0; #endif STRIP_NOPS (arg0); STRIP_NOPS (arg1); /* Get the rtx comparison code to use. We know that EXP is a comparison operation of some type. Some comparisons against 1 and -1 can be converted to comparisons with zero. Do so here so that the tests below will be aware that we have a comparison with zero. These tests will not catch constants in the first operand, but constants are rarely passed as the first operand. */ switch (TREE_CODE (exp)) { case EQ_EXPR: code = EQ; break; case NE_EXPR: code = NE; break; case LT_EXPR: if (integer_onep (arg1)) arg1 = integer_zero_node, code = unsignedp ? LEU : LE; else code = unsignedp ? LTU : LT; break; case LE_EXPR: if (! unsignedp && integer_all_onesp (arg1)) arg1 = integer_zero_node, code = LT; else code = unsignedp ? LEU : LE; break; case GT_EXPR: if (! unsignedp && integer_all_onesp (arg1)) arg1 = integer_zero_node, code = GE; else code = unsignedp ? GTU : GT; break; case GE_EXPR: if (integer_onep (arg1)) arg1 = integer_zero_node, code = unsignedp ? GTU : GT; else code = unsignedp ? GEU : GE; break; case UNORDERED_EXPR: code = UNORDERED; break; case ORDERED_EXPR: code = ORDERED; break; case UNLT_EXPR: code = UNLT; break; case UNLE_EXPR: code = UNLE; break; case UNGT_EXPR: code = UNGT; break; case UNGE_EXPR: code = UNGE; break; case UNEQ_EXPR: code = UNEQ; break; case LTGT_EXPR: code = LTGT; break; default: abort (); } /* Put a constant second. */ if (TREE_CODE (arg0) == REAL_CST || TREE_CODE (arg0) == INTEGER_CST) { tem = arg0; arg0 = arg1; arg1 = tem; code = swap_condition (code); } /* If this is an equality or inequality test of a single bit, we can do this by shifting the bit being tested to the low-order bit and masking the result with the constant 1. If the condition was EQ, we xor it with 1. This does not require an scc insn and is faster than an scc insn even if we have it. The code to make this transformation was moved into fold_single_bit_test, so we just call into the folder and expand its result. */ if ((code == NE || code == EQ) && TREE_CODE (arg0) == BIT_AND_EXPR && integer_zerop (arg1) && integer_pow2p (TREE_OPERAND (arg0, 1))) { tree type = lang_hooks.types.type_for_mode (mode, unsignedp); return expand_expr (fold_single_bit_test (code == NE ? NE_EXPR : EQ_EXPR, arg0, arg1, type), target, VOIDmode, EXPAND_NORMAL); } /* Now see if we are likely to be able to do this. Return if not. */ if (! can_compare_p (code, operand_mode, ccp_store_flag)) return 0; icode = setcc_gen_code[(int) code]; if (icode == CODE_FOR_nothing || (only_cheap && insn_data[(int) icode].operand[0].mode != mode)) { /* We can only do this if it is one of the special cases that can be handled without an scc insn. */ if ((code == LT && integer_zerop (arg1)) || (! only_cheap && code == GE && integer_zerop (arg1))) ; else if (BRANCH_COST >= 0 && ! only_cheap && (code == NE || code == EQ) && TREE_CODE (type) != REAL_TYPE && ((abs_optab->handlers[(int) operand_mode].insn_code != CODE_FOR_nothing) || (ffs_optab->handlers[(int) operand_mode].insn_code != CODE_FOR_nothing))) ; else return 0; } if (! get_subtarget (target) || GET_MODE (subtarget) != operand_mode) subtarget = 0; expand_operands (arg0, arg1, subtarget, &op0, &op1, 0); if (target == 0) target = gen_reg_rtx (mode); /* Pass copies of OP0 and OP1 in case they contain a QUEUED. This is safe because, if the emit_store_flag does anything it will succeed and OP0 and OP1 will not be used subsequently. */ result = emit_store_flag (target, code, queued_subexp_p (op0) ? copy_rtx (op0) : op0, queued_subexp_p (op1) ? copy_rtx (op1) : op1, operand_mode, unsignedp, 1); if (result) { if (invert) result = expand_binop (mode, xor_optab, result, const1_rtx, result, 0, OPTAB_LIB_WIDEN); return result; } /* If this failed, we have to do this with set/compare/jump/set code. */ if (!REG_P (target) || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1)) target = gen_reg_rtx (GET_MODE (target)); emit_move_insn (target, invert ? const0_rtx : const1_rtx); result = compare_from_rtx (op0, op1, code, unsignedp, operand_mode, NULL_RTX); if (GET_CODE (result) == CONST_INT) return (((result == const0_rtx && ! invert) || (result != const0_rtx && invert)) ? const0_rtx : const1_rtx); /* The code of RESULT may not match CODE if compare_from_rtx decided to swap its operands and reverse the original code. We know that compare_from_rtx returns either a CONST_INT or a new comparison code, so it is safe to just extract the code from RESULT. */ code = GET_CODE (result); label = gen_label_rtx (); if (bcc_gen_fctn[(int) code] == 0) abort (); emit_jump_insn ((*bcc_gen_fctn[(int) code]) (label)); emit_move_insn (target, invert ? const1_rtx : const0_rtx); emit_label (label); return target; } /* Stubs in case we haven't got a casesi insn. */ #ifndef HAVE_casesi # define HAVE_casesi 0 # define gen_casesi(a, b, c, d, e) (0) # define CODE_FOR_casesi CODE_FOR_nothing #endif /* If the machine does not have a case insn that compares the bounds, this means extra overhead for dispatch tables, which raises the threshold for using them. */ #ifndef CASE_VALUES_THRESHOLD #define CASE_VALUES_THRESHOLD (HAVE_casesi ? 4 : 5) #endif /* CASE_VALUES_THRESHOLD */ unsigned int case_values_threshold (void) { return CASE_VALUES_THRESHOLD; } /* Attempt to generate a casesi instruction. Returns 1 if successful, 0 otherwise (i.e. if there is no casesi instruction). */ int try_casesi (tree index_type, tree index_expr, tree minval, tree range, rtx table_label ATTRIBUTE_UNUSED, rtx default_label) { enum machine_mode index_mode = SImode; int index_bits = GET_MODE_BITSIZE (index_mode); rtx op1, op2, index; enum machine_mode op_mode; if (! HAVE_casesi) return 0; /* Convert the index to SImode. */ if (GET_MODE_BITSIZE (TYPE_MODE (index_type)) > GET_MODE_BITSIZE (index_mode)) { enum machine_mode omode = TYPE_MODE (index_type); rtx rangertx = expand_expr (range, NULL_RTX, VOIDmode, 0); /* We must handle the endpoints in the original mode. */ index_expr = build (MINUS_EXPR, index_type, index_expr, minval); minval = integer_zero_node; index = expand_expr (index_expr, NULL_RTX, VOIDmode, 0); emit_cmp_and_jump_insns (rangertx, index, LTU, NULL_RTX, omode, 1, default_label); /* Now we can safely truncate. */ index = convert_to_mode (index_mode, index, 0); } else { if (TYPE_MODE (index_type) != index_mode) { index_expr = convert (lang_hooks.types.type_for_size (index_bits, 0), index_expr); index_type = TREE_TYPE (index_expr); } index = expand_expr (index_expr, NULL_RTX, VOIDmode, 0); } emit_queue (); index = protect_from_queue (index, 0); do_pending_stack_adjust (); op_mode = insn_data[(int) CODE_FOR_casesi].operand[0].mode; if (! (*insn_data[(int) CODE_FOR_casesi].operand[0].predicate) (index, op_mode)) index = copy_to_mode_reg (op_mode, index); op1 = expand_expr (minval, NULL_RTX, VOIDmode, 0); op_mode = insn_data[(int) CODE_FOR_casesi].operand[1].mode; op1 = convert_modes (op_mode, TYPE_MODE (TREE_TYPE (minval)), op1, TYPE_UNSIGNED (TREE_TYPE (minval))); if (! (*insn_data[(int) CODE_FOR_casesi].operand[1].predicate) (op1, op_mode)) op1 = copy_to_mode_reg (op_mode, op1); op2 = expand_expr (range, NULL_RTX, VOIDmode, 0); op_mode = insn_data[(int) CODE_FOR_casesi].operand[2].mode; op2 = convert_modes (op_mode, TYPE_MODE (TREE_TYPE (range)), op2, TYPE_UNSIGNED (TREE_TYPE (range))); if (! (*insn_data[(int) CODE_FOR_casesi].operand[2].predicate) (op2, op_mode)) op2 = copy_to_mode_reg (op_mode, op2); emit_jump_insn (gen_casesi (index, op1, op2, table_label, default_label)); return 1; } /* Attempt to generate a tablejump instruction; same concept. */ #ifndef HAVE_tablejump #define HAVE_tablejump 0 #define gen_tablejump(x, y) (0) #endif /* Subroutine of the next function. INDEX is the value being switched on, with the lowest value in the table already subtracted. MODE is its expected mode (needed if INDEX is constant). RANGE is the length of the jump table. TABLE_LABEL is a CODE_LABEL rtx for the table itself. DEFAULT_LABEL is a CODE_LABEL rtx to jump to if the index value is out of range. */ static void do_tablejump (rtx index, enum machine_mode mode, rtx range, rtx table_label, rtx default_label) { rtx temp, vector; if (INTVAL (range) > cfun->max_jumptable_ents) cfun->max_jumptable_ents = INTVAL (range); /* Do an unsigned comparison (in the proper mode) between the index expression and the value which represents the length of the range. Since we just finished subtracting the lower bound of the range from the index expression, this comparison allows us to simultaneously check that the original index expression value is both greater than or equal to the minimum value of the range and less than or equal to the maximum value of the range. */ emit_cmp_and_jump_insns (index, range, GTU, NULL_RTX, mode, 1, default_label); /* If index is in range, it must fit in Pmode. Convert to Pmode so we can index with it. */ if (mode != Pmode) index = convert_to_mode (Pmode, index, 1); /* Don't let a MEM slip through, because then INDEX that comes out of PIC_CASE_VECTOR_ADDRESS won't be a valid address, and break_out_memory_refs will go to work on it and mess it up. */ #ifdef PIC_CASE_VECTOR_ADDRESS if (flag_pic && !REG_P (index)) index = copy_to_mode_reg (Pmode, index); #endif /* If flag_force_addr were to affect this address it could interfere with the tricky assumptions made about addresses that contain label-refs, which may be valid only very near the tablejump itself. */ /* ??? The only correct use of CASE_VECTOR_MODE is the one inside the GET_MODE_SIZE, because this indicates how large insns are. The other uses should all be Pmode, because they are addresses. This code could fail if addresses and insns are not the same size. */ index = gen_rtx_PLUS (Pmode, gen_rtx_MULT (Pmode, index, GEN_INT (GET_MODE_SIZE (CASE_VECTOR_MODE))), gen_rtx_LABEL_REF (Pmode, table_label)); #ifdef PIC_CASE_VECTOR_ADDRESS if (flag_pic) index = PIC_CASE_VECTOR_ADDRESS (index); else #endif index = memory_address_noforce (CASE_VECTOR_MODE, index); temp = gen_reg_rtx (CASE_VECTOR_MODE); vector = gen_rtx_MEM (CASE_VECTOR_MODE, index); RTX_UNCHANGING_P (vector) = 1; MEM_NOTRAP_P (vector) = 1; convert_move (temp, vector, 0); emit_jump_insn (gen_tablejump (temp, table_label)); /* If we are generating PIC code or if the table is PC-relative, the table and JUMP_INSN must be adjacent, so don't output a BARRIER. */ if (! CASE_VECTOR_PC_RELATIVE && ! flag_pic) emit_barrier (); } int try_tablejump (tree index_type, tree index_expr, tree minval, tree range, rtx table_label, rtx default_label) { rtx index; if (! HAVE_tablejump) return 0; index_expr = fold (build (MINUS_EXPR, index_type, convert (index_type, index_expr), convert (index_type, minval))); index = expand_expr (index_expr, NULL_RTX, VOIDmode, 0); emit_queue (); index = protect_from_queue (index, 0); do_pending_stack_adjust (); do_tablejump (index, TYPE_MODE (index_type), convert_modes (TYPE_MODE (index_type), TYPE_MODE (TREE_TYPE (range)), expand_expr (range, NULL_RTX, VOIDmode, 0), TYPE_UNSIGNED (TREE_TYPE (range))), table_label, default_label); return 1; } /* Nonzero if the mode is a valid vector mode for this architecture. This returns nonzero even if there is no hardware support for the vector mode, but we can emulate with narrower modes. */ int vector_mode_valid_p (enum machine_mode mode) { enum mode_class class = GET_MODE_CLASS (mode); enum machine_mode innermode; /* Doh! What's going on? */ if (class != MODE_VECTOR_INT && class != MODE_VECTOR_FLOAT) return 0; /* Hardware support. Woo hoo! */ if (VECTOR_MODE_SUPPORTED_P (mode)) return 1; innermode = GET_MODE_INNER (mode); /* We should probably return 1 if requesting V4DI and we have no DI, but we have V2DI, but this is probably very unlikely. */ /* If we have support for the inner mode, we can safely emulate it. We may not have V2DI, but me can emulate with a pair of DIs. */ return mov_optab->handlers[innermode].insn_code != CODE_FOR_nothing; } /* Return a CONST_VECTOR rtx for a VECTOR_CST tree. */ static rtx const_vector_from_tree (tree exp) { rtvec v; int units, i; tree link, elt; enum machine_mode inner, mode; mode = TYPE_MODE (TREE_TYPE (exp)); if (initializer_zerop (exp)) return CONST0_RTX (mode); units = GET_MODE_NUNITS (mode); inner = GET_MODE_INNER (mode); v = rtvec_alloc (units); link = TREE_VECTOR_CST_ELTS (exp); for (i = 0; link; link = TREE_CHAIN (link), ++i) { elt = TREE_VALUE (link); if (TREE_CODE (elt) == REAL_CST) RTVEC_ELT (v, i) = CONST_DOUBLE_FROM_REAL_VALUE (TREE_REAL_CST (elt), inner); else RTVEC_ELT (v, i) = immed_double_const (TREE_INT_CST_LOW (elt), TREE_INT_CST_HIGH (elt), inner); } /* Initialize remaining elements to 0. */ for (; i < units; ++i) RTVEC_ELT (v, i) = CONST0_RTX (inner); return gen_rtx_raw_CONST_VECTOR (mode, v); } /* Type information for expr.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_expr_h[] = { { &block_clear_fn, 1, sizeof (block_clear_fn), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &block_move_fn, 1, sizeof (block_move_fn), >_ggc_mx_tree_node, >_pch_nx_tree_node }, LAST_GGC_ROOT_TAB }; /* Convert RTL to assembler code and output it, for GNU compiler. Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This is the final pass of the compiler. It looks at the rtl code for a function and outputs assembler code. Call `final_start_function' to output the assembler code for function entry, `final' to output assembler code for some RTL code, `final_end_function' to output assembler code for function exit. If a function is compiled in several pieces, each piece is output separately with `final'. Some optimizations are also done at this level. Move instructions that were made unnecessary by good register allocation are detected and omitted from the output. (Though most of these are removed by the last jump pass.) Instructions to set the condition codes are omitted when it can be seen that the condition codes already had the desired values. In some cases it is sufficient if the inherited condition codes have related values, but this may require the following insn (the one that tests the condition codes) to be modified. The code for the function prologue and epilogue are generated directly in assembler by the target functions function_prologue and function_epilogue. Those instructions never exist as rtl. */ #ifdef XCOFF_DEBUGGING_INFO declarations for e.g. AIX 4.x. */ #endif #if defined (DWARF2_UNWIND_INFO) || defined (DWARF2_DEBUGGING_INFO) #endif #ifdef DBX_DEBUGGING_INFO #endif /* If we aren't using cc0, CC_STATUS_INIT shouldn't exist. So define a null default for it to save conditionalization later. */ #ifndef CC_STATUS_INIT #define CC_STATUS_INIT #endif /* How to start an assembler comment. */ #ifndef ASM_COMMENT_START #define ASM_COMMENT_START ";#" #endif /* Is the given character a logical line separator for the assembler? */ #ifndef IS_ASM_LOGICAL_LINE_SEPARATOR #define IS_ASM_LOGICAL_LINE_SEPARATOR(C) ((C) == ';') #endif #ifndef JUMP_TABLES_IN_TEXT_SECTION #define JUMP_TABLES_IN_TEXT_SECTION 0 #endif #if defined(READONLY_DATA_SECTION) || defined(READONLY_DATA_SECTION_ASM_OP) #define HAVE_READONLY_DATA_SECTION 1 #else #define HAVE_READONLY_DATA_SECTION 0 #endif /* Bitflags used by final_scan_insn. */ #define SEEN_BB 1 #define SEEN_NOTE 2 #define SEEN_EMITTED 4 /* Last insn processed by final_scan_insn. */ static rtx debug_insn; rtx current_output_insn; /* Line number of last NOTE. */ static int last_linenum; /* Highest line number in current block. */ static int high_block_linenum; /* Likewise for function. */ static int high_function_linenum; /* Filename of last NOTE. */ static const char *last_filename; extern int length_unit_log; /* This is defined in insn-attrtab.c. */ /* Nonzero while outputting an `asm' with operands. This means that inconsistencies are the user's fault, so don't abort. The precise value is the insn being output, to pass to error_for_asm. */ rtx this_is_asm_operands; /* Number of operands of this insn, for an `asm' with operands. */ static unsigned int insn_noperands; /* Compare optimization flag. */ static rtx last_ignored_compare = 0; /* Assign a unique number to each insn that is output. This can be used to generate unique local labels. */ static int insn_counter = 0; #ifdef HAVE_cc0 /* This variable contains machine-dependent flags (defined in tm.h) set and examined by output routines that describe how to interpret the condition codes properly. */ CC_STATUS cc_status; /* During output of an insn, this contains a copy of cc_status from before the insn. */ CC_STATUS cc_prev_status; #endif /* Indexed by hardware reg number, is 1 if that register is ever used in the current function. In life_analysis, or in stupid_life_analysis, this is set up to record the hard regs used explicitly. Reload adds in the hard regs used for holding pseudo regs. Final uses it to generate the code in the function prologue and epilogue to save and restore registers as needed. */ char regs_ever_live[FIRST_PSEUDO_REGISTER]; /* Like regs_ever_live, but 1 if a reg is set or clobbered from an asm. Unlike regs_ever_live, elements of this array corresponding to eliminable regs like the frame pointer are set if an asm sets them. */ char regs_asm_clobbered[FIRST_PSEUDO_REGISTER]; /* Nonzero means current function must be given a frame pointer. Initialized in function.c to 0. Set only in reload1.c as per the needs of the function. */ int frame_pointer_needed; /* Number of unmatched NOTE_INSN_BLOCK_BEG notes we have seen. */ static int block_depth; /* Nonzero if have enabled APP processing of our assembler output. */ static int app_on; /* If we are outputting an insn sequence, this contains the sequence rtx. Zero otherwise. */ rtx final_sequence; #ifdef ASSEMBLER_DIALECT /* Number of the assembler dialect to use, starting at 0. */ static int dialect_number; #endif #ifdef HAVE_conditional_execution /* Nonnull if the insn currently being emitted was a COND_EXEC pattern. */ rtx current_insn_predicate; #endif #ifdef HAVE_ATTR_length static int asm_insn_count (rtx); #endif static void profile_function (FILE *); static void profile_after_prologue (FILE *); static bool notice_source_line (rtx); static rtx walk_alter_subreg (rtx *); static void output_asm_name (void); static void output_alternate_entry_point (FILE *, rtx); static tree get_mem_expr_from_op (rtx, int *); static void output_asm_operand_names (rtx *, int *, int); static void output_operand (rtx, int); #ifdef LEAF_REGISTERS static void leaf_renumber_regs (rtx); #endif #ifdef HAVE_cc0 static int alter_cond (rtx); #endif #ifndef ADDR_VEC_ALIGN static int final_addr_vec_align (rtx); #endif #ifdef HAVE_ATTR_length static int align_fuzz (rtx, rtx, int, unsigned); #endif /* Initialize data in final at the beginning of a compilation. */ void init_final (const char *filename ATTRIBUTE_UNUSED) { app_on = 0; final_sequence = 0; #ifdef ASSEMBLER_DIALECT dialect_number = ASSEMBLER_DIALECT; #endif } /* Default target function prologue and epilogue assembler output. If not overridden for epilogue code, then the function body itself contains return instructions wherever needed. */ void default_function_pro_epilogue (FILE *file ATTRIBUTE_UNUSED, HOST_WIDE_INT size ATTRIBUTE_UNUSED) { } /* Default target hook that outputs nothing to a stream. */ void no_asm_to_stream (FILE *file ATTRIBUTE_UNUSED) { } /* Enable APP processing of subsequent output. Used before the output from an `asm' statement. */ void app_enable (void) { if (! app_on) { fputs (ASM_APP_ON, asm_out_file); app_on = 1; } } /* Disable APP processing of subsequent output. Called from varasm.c before most kinds of output. */ void app_disable (void) { if (app_on) { fputs (ASM_APP_OFF, asm_out_file); app_on = 0; } } /* Return the number of slots filled in the current delayed branch sequence (we don't count the insn needing the delay slot). Zero if not in a delayed branch sequence. */ #ifdef DELAY_SLOTS int dbr_sequence_length (void) { if (final_sequence != 0) return XVECLEN (final_sequence, 0) - 1; else return 0; } #endif /* The next two pages contain routines used to compute the length of an insn and to shorten branches. */ /* Arrays for insn lengths, and addresses. The latter is referenced by `insn_current_length'. */ static int *insn_lengths; varray_type insn_addresses_; /* Max uid for which the above arrays are valid. */ static int insn_lengths_max_uid; /* Address of insn being processed. Used by `insn_current_length'. */ int insn_current_address; /* Address of insn being processed in previous iteration. */ int insn_last_address; /* known invariant alignment of insn being processed. */ int insn_current_align; /* After shorten_branches, for any insn, uid_align[INSN_UID (insn)] gives the next following alignment insn that increases the known alignment, or NULL_RTX if there is no such insn. For any alignment obtained this way, we can again index uid_align with its uid to obtain the next following align that in turn increases the alignment, till we reach NULL_RTX; the sequence obtained this way for each insn we'll call the alignment chain of this insn in the following comments. */ struct label_alignment { short alignment; short max_skip; }; static rtx *uid_align; static int *uid_shuid; static struct label_alignment *label_align; /* Indicate that branch shortening hasn't yet been done. */ void init_insn_lengths (void) { if (uid_shuid) { free (uid_shuid); uid_shuid = 0; } if (insn_lengths) { free (insn_lengths); insn_lengths = 0; insn_lengths_max_uid = 0; } #ifdef HAVE_ATTR_length INSN_ADDRESSES_FREE (); #endif if (uid_align) { free (uid_align); uid_align = 0; } } /* Obtain the current length of an insn. If branch shortening has been done, get its actual length. Otherwise, get its maximum length. */ int get_attr_length (rtx insn ATTRIBUTE_UNUSED) { #ifdef HAVE_ATTR_length rtx body; int i; int length = 0; if (insn_lengths_max_uid > INSN_UID (insn)) return insn_lengths[INSN_UID (insn)]; else switch (GET_CODE (insn)) { case NOTE: case BARRIER: case CODE_LABEL: return 0; case CALL_INSN: length = insn_default_length (insn); break; case JUMP_INSN: body = PATTERN (insn); if (GET_CODE (body) == ADDR_VEC || GET_CODE (body) == ADDR_DIFF_VEC) { /* Alignment is machine-dependent and should be handled by ADDR_VEC_ALIGN. */ } else length = insn_default_length (insn); break; case INSN: body = PATTERN (insn); if (GET_CODE (body) == USE || GET_CODE (body) == CLOBBER) return 0; else if (GET_CODE (body) == ASM_INPUT || asm_noperands (body) >= 0) length = asm_insn_count (body) * insn_default_length (insn); else if (GET_CODE (body) == SEQUENCE) for (i = 0; i < XVECLEN (body, 0); i++) length += get_attr_length (XVECEXP (body, 0, i)); else length = insn_default_length (insn); break; default: break; } #ifdef ADJUST_INSN_LENGTH ADJUST_INSN_LENGTH (insn, length); #endif return length; #else /* not HAVE_ATTR_length */ return 0; #endif /* not HAVE_ATTR_length */ } /* Code to handle alignment inside shorten_branches. */ /* Here is an explanation how the algorithm in align_fuzz can give proper results: Call a sequence of instructions beginning with alignment point X and continuing until the next alignment point `block X'. When `X' is used in an expression, it means the alignment value of the alignment point. Call the distance between the start of the first insn of block X, and the end of the last insn of block X `IX', for the `inner size of X'. This is clearly the sum of the instruction lengths. Likewise with the next alignment-delimited block following X, which we shall call block Y. Call the distance between the start of the first insn of block X, and the start of the first insn of block Y `OX', for the `outer size of X'. The estimated padding is then OX - IX. OX can be safely estimated as if (X >= Y) OX = round_up(IX, Y) else OX = round_up(IX, X) + Y - X Clearly est(IX) >= real(IX), because that only depends on the instruction lengths, and those being overestimated is a given. Clearly round_up(foo, Z) >= round_up(bar, Z) if foo >= bar, so we needn't worry about that when thinking about OX. When X >= Y, the alignment provided by Y adds no uncertainty factor for branch ranges starting before X, so we can just round what we have. But when X < Y, we don't know anything about the, so to speak, `middle bits', so we have to assume the worst when aligning up from an address mod X to one mod Y, which is Y - X. */ #ifndef LABEL_ALIGN #define LABEL_ALIGN(LABEL) align_labels_log #endif #ifndef LABEL_ALIGN_MAX_SKIP #define LABEL_ALIGN_MAX_SKIP align_labels_max_skip #endif #ifndef LOOP_ALIGN #define LOOP_ALIGN(LABEL) align_loops_log #endif #ifndef LOOP_ALIGN_MAX_SKIP #define LOOP_ALIGN_MAX_SKIP align_loops_max_skip #endif #ifndef LABEL_ALIGN_AFTER_BARRIER #define LABEL_ALIGN_AFTER_BARRIER(LABEL) 0 #endif #ifndef LABEL_ALIGN_AFTER_BARRIER_MAX_SKIP #define LABEL_ALIGN_AFTER_BARRIER_MAX_SKIP 0 #endif #ifndef JUMP_ALIGN #define JUMP_ALIGN(LABEL) align_jumps_log #endif #ifndef JUMP_ALIGN_MAX_SKIP #define JUMP_ALIGN_MAX_SKIP align_jumps_max_skip #endif #ifndef ADDR_VEC_ALIGN static int final_addr_vec_align (rtx addr_vec) { int align = GET_MODE_SIZE (GET_MODE (PATTERN (addr_vec))); if (align > BIGGEST_ALIGNMENT / BITS_PER_UNIT) align = BIGGEST_ALIGNMENT / BITS_PER_UNIT; return exact_log2 (align); } #define ADDR_VEC_ALIGN(ADDR_VEC) final_addr_vec_align (ADDR_VEC) #endif #ifndef INSN_LENGTH_ALIGNMENT #define INSN_LENGTH_ALIGNMENT(INSN) length_unit_log #endif #define INSN_SHUID(INSN) (uid_shuid[INSN_UID (INSN)]) static int min_labelno, max_labelno; #define LABEL_TO_ALIGNMENT(LABEL) \ (label_align[CODE_LABEL_NUMBER (LABEL) - min_labelno].alignment) #define LABEL_TO_MAX_SKIP(LABEL) \ (label_align[CODE_LABEL_NUMBER (LABEL) - min_labelno].max_skip) /* For the benefit of port specific code do this also as a function. */ int label_to_alignment (rtx label) { return LABEL_TO_ALIGNMENT (label); } #ifdef HAVE_ATTR_length /* The differences in addresses between a branch and its target might grow or shrink depending on the alignment the start insn of the range (the branch for a forward branch or the label for a backward branch) starts out on; if these differences are used naively, they can even oscillate infinitely. We therefore want to compute a 'worst case' address difference that is independent of the alignment the start insn of the range end up on, and that is at least as large as the actual difference. The function align_fuzz calculates the amount we have to add to the naively computed difference, by traversing the part of the alignment chain of the start insn of the range that is in front of the end insn of the range, and considering for each alignment the maximum amount that it might contribute to a size increase. For casesi tables, we also want to know worst case minimum amounts of address difference, in case a machine description wants to introduce some common offset that is added to all offsets in a table. For this purpose, align_fuzz with a growth argument of 0 computes the appropriate adjustment. */ /* Compute the maximum delta by which the difference of the addresses of START and END might grow / shrink due to a different address for start which changes the size of alignment insns between START and END. KNOWN_ALIGN_LOG is the alignment known for START. GROWTH should be ~0 if the objective is to compute potential code size increase, and 0 if the objective is to compute potential shrink. The return value is undefined for any other value of GROWTH. */ static int align_fuzz (rtx start, rtx end, int known_align_log, unsigned int growth) { int uid = INSN_UID (start); rtx align_label; int known_align = 1 << known_align_log; int end_shuid = INSN_SHUID (end); int fuzz = 0; for (align_label = uid_align[uid]; align_label; align_label = uid_align[uid]) { int align_addr, new_align; uid = INSN_UID (align_label); align_addr = INSN_ADDRESSES (uid) - insn_lengths[uid]; if (uid_shuid[uid] > end_shuid) break; known_align_log = LABEL_TO_ALIGNMENT (align_label); new_align = 1 << known_align_log; if (new_align < known_align) continue; fuzz += (-align_addr ^ growth) & (new_align - known_align); known_align = new_align; } return fuzz; } /* Compute a worst-case reference address of a branch so that it can be safely used in the presence of aligned labels. Since the size of the branch itself is unknown, the size of the branch is not included in the range. I.e. for a forward branch, the reference address is the end address of the branch as known from the previous branch shortening pass, minus a value to account for possible size increase due to alignment. For a backward branch, it is the start address of the branch as known from the current pass, plus a value to account for possible size increase due to alignment. NB.: Therefore, the maximum offset allowed for backward branches needs to exclude the branch size. */ int insn_current_reference_address (rtx branch) { rtx dest, seq; int seq_uid; if (! INSN_ADDRESSES_SET_P ()) return 0; seq = NEXT_INSN (PREV_INSN (branch)); seq_uid = INSN_UID (seq); if (GET_CODE (branch) != JUMP_INSN) /* This can happen for example on the PA; the objective is to know the offset to address something in front of the start of the function. Thus, we can treat it like a backward branch. We assume here that FUNCTION_BOUNDARY / BITS_PER_UNIT is larger than any alignment we'd encounter, so we skip the call to align_fuzz. */ return insn_current_address; dest = JUMP_LABEL (branch); /* BRANCH has no proper alignment chain set, so use SEQ. BRANCH also has no INSN_SHUID. */ if (INSN_SHUID (seq) < INSN_SHUID (dest)) { /* Forward branch. */ return (insn_last_address + insn_lengths[seq_uid] - align_fuzz (seq, dest, length_unit_log, ~0)); } else { /* Backward branch. */ return (insn_current_address + align_fuzz (dest, seq, length_unit_log, ~0)); } } #endif /* HAVE_ATTR_length */ void compute_alignments (void) { int log, max_skip, max_log; basic_block bb; if (label_align) { free (label_align); label_align = 0; } max_labelno = max_label_num (); min_labelno = get_first_label_num (); label_align = xcalloc (max_labelno - min_labelno + 1, sizeof (struct label_alignment)); /* If not optimizing or optimizing for size, don't assign any alignments. */ if (! optimize || optimize_size) return; FOR_EACH_BB (bb) { rtx label = BB_HEAD (bb); int fallthru_frequency = 0, branch_frequency = 0, has_fallthru = 0; edge e; if (GET_CODE (label) != CODE_LABEL || probably_never_executed_bb_p (bb)) continue; max_log = LABEL_ALIGN (label); max_skip = LABEL_ALIGN_MAX_SKIP; for (e = bb->pred; e; e = e->pred_next) { if (e->flags & EDGE_FALLTHRU) has_fallthru = 1, fallthru_frequency += EDGE_FREQUENCY (e); else branch_frequency += EDGE_FREQUENCY (e); } /* There are two purposes to align block with no fallthru incoming edge: 1) to avoid fetch stalls when branch destination is near cache boundary 2) to improve cache efficiency in case the previous block is not executed (so it does not need to be in the cache). We to catch first case, we align frequently executed blocks. To catch the second, we align blocks that are executed more frequently than the predecessor and the predecessor is likely to not be executed when function is called. */ if (!has_fallthru && (branch_frequency > BB_FREQ_MAX / 10 || (bb->frequency > bb->prev_bb->frequency * 10 && (bb->prev_bb->frequency <= ENTRY_BLOCK_PTR->frequency / 2)))) { log = JUMP_ALIGN (label); if (max_log < log) { max_log = log; max_skip = JUMP_ALIGN_MAX_SKIP; } } /* In case block is frequent and reached mostly by non-fallthru edge, align it. It is most likely a first block of loop. */ if (has_fallthru && maybe_hot_bb_p (bb) && branch_frequency + fallthru_frequency > BB_FREQ_MAX / 10 && branch_frequency > fallthru_frequency * 2) { log = LOOP_ALIGN (label); if (max_log < log) { max_log = log; max_skip = LOOP_ALIGN_MAX_SKIP; } } LABEL_TO_ALIGNMENT (label) = max_log; LABEL_TO_MAX_SKIP (label) = max_skip; } } /* Make a pass over all insns and compute their actual lengths by shortening any branches of variable length if possible. */ /* shorten_branches might be called multiple times: for example, the SH port splits out-of-range conditional branches in MACHINE_DEPENDENT_REORG. In order to do this, it needs proper length information, which it obtains by calling shorten_branches. This cannot be collapsed with shorten_branches itself into a single pass unless we also want to integrate reorg.c, since the branch splitting exposes new instructions with delay slots. */ void shorten_branches (rtx first ATTRIBUTE_UNUSED) { rtx insn; int max_uid; int i; int max_log; int max_skip; #ifdef HAVE_ATTR_length #define MAX_CODE_ALIGN 16 rtx seq; int something_changed = 1; char *varying_length; rtx body; int uid; rtx align_tab[MAX_CODE_ALIGN]; #endif /* Compute maximum UID and allocate label_align / uid_shuid. */ max_uid = get_max_uid (); /* Free uid_shuid before reallocating it. */ free (uid_shuid); uid_shuid = xmalloc (max_uid * sizeof *uid_shuid); if (max_labelno != max_label_num ()) { int old = max_labelno; int n_labels; int n_old_labels; max_labelno = max_label_num (); n_labels = max_labelno - min_labelno + 1; n_old_labels = old - min_labelno + 1; label_align = xrealloc (label_align, n_labels * sizeof (struct label_alignment)); /* Range of labels grows monotonically in the function. Abort here means that the initialization of array got lost. */ if (n_old_labels > n_labels) abort (); memset (label_align + n_old_labels, 0, (n_labels - n_old_labels) * sizeof (struct label_alignment)); } /* Initialize label_align and set up uid_shuid to be strictly monotonically rising with insn order. */ /* We use max_log here to keep track of the maximum alignment we want to impose on the next CODE_LABEL (or the current one if we are processing the CODE_LABEL itself). */ max_log = 0; max_skip = 0; for (insn = get_insns (), i = 1; insn; insn = NEXT_INSN (insn)) { int log; INSN_SHUID (insn) = i++; if (INSN_P (insn)) { /* reorg might make the first insn of a loop being run once only, and delete the label in front of it. Then we want to apply the loop alignment to the new label created by reorg, which is separated by the former loop start insn from the NOTE_INSN_LOOP_BEG. */ } else if (GET_CODE (insn) == CODE_LABEL) { rtx next; /* Merge in alignments computed by compute_alignments. */ log = LABEL_TO_ALIGNMENT (insn); if (max_log < log) { max_log = log; max_skip = LABEL_TO_MAX_SKIP (insn); } log = LABEL_ALIGN (insn); if (max_log < log) { max_log = log; max_skip = LABEL_ALIGN_MAX_SKIP; } next = NEXT_INSN (insn); /* ADDR_VECs only take room if read-only data goes into the text section. */ if (JUMP_TABLES_IN_TEXT_SECTION || !HAVE_READONLY_DATA_SECTION) if (next && GET_CODE (next) == JUMP_INSN) { rtx nextbody = PATTERN (next); if (GET_CODE (nextbody) == ADDR_VEC || GET_CODE (nextbody) == ADDR_DIFF_VEC) { log = ADDR_VEC_ALIGN (next); if (max_log < log) { max_log = log; max_skip = LABEL_ALIGN_MAX_SKIP; } } } LABEL_TO_ALIGNMENT (insn) = max_log; LABEL_TO_MAX_SKIP (insn) = max_skip; max_log = 0; max_skip = 0; } else if (GET_CODE (insn) == BARRIER) { rtx label; for (label = insn; label && ! INSN_P (label); label = NEXT_INSN (label)) if (GET_CODE (label) == CODE_LABEL) { log = LABEL_ALIGN_AFTER_BARRIER (insn); if (max_log < log) { max_log = log; max_skip = LABEL_ALIGN_AFTER_BARRIER_MAX_SKIP; } break; } } } #ifdef HAVE_ATTR_length /* Allocate the rest of the arrays. */ insn_lengths = xmalloc (max_uid * sizeof (*insn_lengths)); insn_lengths_max_uid = max_uid; /* Syntax errors can lead to labels being outside of the main insn stream. Initialize insn_addresses, so that we get reproducible results. */ INSN_ADDRESSES_ALLOC (max_uid); varying_length = xcalloc (max_uid, sizeof (char)); /* Initialize uid_align. We scan instructions from end to start, and keep in align_tab[n] the last seen insn that does an alignment of at least n+1, i.e. the successor in the alignment chain for an insn that does / has a known alignment of n. */ uid_align = xcalloc (max_uid, sizeof *uid_align); for (i = MAX_CODE_ALIGN; --i >= 0;) align_tab[i] = NULL_RTX; seq = get_last_insn (); for (; seq; seq = PREV_INSN (seq)) { int uid = INSN_UID (seq); int log; log = (GET_CODE (seq) == CODE_LABEL ? LABEL_TO_ALIGNMENT (seq) : 0); uid_align[uid] = align_tab[0]; if (log) { /* Found an alignment label. */ uid_align[uid] = align_tab[log]; for (i = log - 1; i >= 0; i--) align_tab[i] = seq; } } #ifdef CASE_VECTOR_SHORTEN_MODE if (optimize) { /* Look for ADDR_DIFF_VECs, and initialize their minimum and maximum label fields. */ int min_shuid = INSN_SHUID (get_insns ()) - 1; int max_shuid = INSN_SHUID (get_last_insn ()) + 1; int rel; for (insn = first; insn != 0; insn = NEXT_INSN (insn)) { rtx min_lab = NULL_RTX, max_lab = NULL_RTX, pat; int len, i, min, max, insn_shuid; int min_align; addr_diff_vec_flags flags; if (GET_CODE (insn) != JUMP_INSN || GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC) continue; pat = PATTERN (insn); len = XVECLEN (pat, 1); if (len <= 0) abort (); min_align = MAX_CODE_ALIGN; for (min = max_shuid, max = min_shuid, i = len - 1; i >= 0; i--) { rtx lab = XEXP (XVECEXP (pat, 1, i), 0); int shuid = INSN_SHUID (lab); if (shuid < min) { min = shuid; min_lab = lab; } if (shuid > max) { max = shuid; max_lab = lab; } if (min_align > LABEL_TO_ALIGNMENT (lab)) min_align = LABEL_TO_ALIGNMENT (lab); } XEXP (pat, 2) = gen_rtx_LABEL_REF (VOIDmode, min_lab); XEXP (pat, 3) = gen_rtx_LABEL_REF (VOIDmode, max_lab); insn_shuid = INSN_SHUID (insn); rel = INSN_SHUID (XEXP (XEXP (pat, 0), 0)); flags.min_align = min_align; flags.base_after_vec = rel > insn_shuid; flags.min_after_vec = min > insn_shuid; flags.max_after_vec = max > insn_shuid; flags.min_after_base = min > rel; flags.max_after_base = max > rel; ADDR_DIFF_VEC_FLAGS (pat) = flags; } } #endif /* CASE_VECTOR_SHORTEN_MODE */ /* Compute initial lengths, addresses, and varying flags for each insn. */ for (insn_current_address = 0, insn = first; insn != 0; insn_current_address += insn_lengths[uid], insn = NEXT_INSN (insn)) { uid = INSN_UID (insn); insn_lengths[uid] = 0; if (GET_CODE (insn) == CODE_LABEL) { int log = LABEL_TO_ALIGNMENT (insn); if (log) { int align = 1 << log; int new_address = (insn_current_address + align - 1) & -align; insn_lengths[uid] = new_address - insn_current_address; } } INSN_ADDRESSES (uid) = insn_current_address + insn_lengths[uid]; if (GET_CODE (insn) == NOTE || GET_CODE (insn) == BARRIER || GET_CODE (insn) == CODE_LABEL) continue; if (INSN_DELETED_P (insn)) continue; body = PATTERN (insn); if (GET_CODE (body) == ADDR_VEC || GET_CODE (body) == ADDR_DIFF_VEC) { /* This only takes room if read-only data goes into the text section. */ if (JUMP_TABLES_IN_TEXT_SECTION || !HAVE_READONLY_DATA_SECTION) insn_lengths[uid] = (XVECLEN (body, GET_CODE (body) == ADDR_DIFF_VEC) * GET_MODE_SIZE (GET_MODE (body))); /* Alignment is handled by ADDR_VEC_ALIGN. */ } else if (GET_CODE (body) == ASM_INPUT || asm_noperands (body) >= 0) insn_lengths[uid] = asm_insn_count (body) * insn_default_length (insn); else if (GET_CODE (body) == SEQUENCE) { int i; int const_delay_slots; #ifdef DELAY_SLOTS const_delay_slots = const_num_delay_slots (XVECEXP (body, 0, 0)); #else const_delay_slots = 0; #endif /* Inside a delay slot sequence, we do not do any branch shortening if the shortening could change the number of delay slots of the branch. */ for (i = 0; i < XVECLEN (body, 0); i++) { rtx inner_insn = XVECEXP (body, 0, i); int inner_uid = INSN_UID (inner_insn); int inner_length; if (GET_CODE (body) == ASM_INPUT || asm_noperands (PATTERN (XVECEXP (body, 0, i))) >= 0) inner_length = (asm_insn_count (PATTERN (inner_insn)) * insn_default_length (inner_insn)); else inner_length = insn_default_length (inner_insn); insn_lengths[inner_uid] = inner_length; if (const_delay_slots) { if ((varying_length[inner_uid] = insn_variable_length_p (inner_insn)) != 0) varying_length[uid] = 1; INSN_ADDRESSES (inner_uid) = (insn_current_address + insn_lengths[uid]); } else varying_length[inner_uid] = 0; insn_lengths[uid] += inner_length; } } else if (GET_CODE (body) != USE && GET_CODE (body) != CLOBBER) { insn_lengths[uid] = insn_default_length (insn); varying_length[uid] = insn_variable_length_p (insn); } /* If needed, do any adjustment. */ #ifdef ADJUST_INSN_LENGTH ADJUST_INSN_LENGTH (insn, insn_lengths[uid]); if (insn_lengths[uid] < 0) fatal_insn ("negative insn length", insn); #endif } /* Now loop over all the insns finding varying length insns. For each, get the current insn length. If it has changed, reflect the change. When nothing changes for a full pass, we are done. */ while (something_changed) { something_changed = 0; insn_current_align = MAX_CODE_ALIGN - 1; for (insn_current_address = 0, insn = first; insn != 0; insn = NEXT_INSN (insn)) { int new_length; #ifdef ADJUST_INSN_LENGTH int tmp_length; #endif int length_align; uid = INSN_UID (insn); if (GET_CODE (insn) == CODE_LABEL) { int log = LABEL_TO_ALIGNMENT (insn); if (log > insn_current_align) { int align = 1 << log; int new_address= (insn_current_address + align - 1) & -align; insn_lengths[uid] = new_address - insn_current_address; insn_current_align = log; insn_current_address = new_address; } else insn_lengths[uid] = 0; INSN_ADDRESSES (uid) = insn_current_address; continue; } length_align = INSN_LENGTH_ALIGNMENT (insn); if (length_align < insn_current_align) insn_current_align = length_align; insn_last_address = INSN_ADDRESSES (uid); INSN_ADDRESSES (uid) = insn_current_address; #ifdef CASE_VECTOR_SHORTEN_MODE if (optimize && GET_CODE (insn) == JUMP_INSN && GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC) { rtx body = PATTERN (insn); int old_length = insn_lengths[uid]; rtx rel_lab = XEXP (XEXP (body, 0), 0); rtx min_lab = XEXP (XEXP (body, 2), 0); rtx max_lab = XEXP (XEXP (body, 3), 0); int rel_addr = INSN_ADDRESSES (INSN_UID (rel_lab)); int min_addr = INSN_ADDRESSES (INSN_UID (min_lab)); int max_addr = INSN_ADDRESSES (INSN_UID (max_lab)); rtx prev; int rel_align = 0; addr_diff_vec_flags flags; /* Avoid automatic aggregate initialization. */ flags = ADDR_DIFF_VEC_FLAGS (body); /* Try to find a known alignment for rel_lab. */ for (prev = rel_lab; prev && ! insn_lengths[INSN_UID (prev)] && ! (varying_length[INSN_UID (prev)] & 1); prev = PREV_INSN (prev)) if (varying_length[INSN_UID (prev)] & 2) { rel_align = LABEL_TO_ALIGNMENT (prev); break; } /* See the comment on addr_diff_vec_flags in rtl.h for the meaning of the flags values. base: REL_LAB vec: INSN */ /* Anything after INSN has still addresses from the last pass; adjust these so that they reflect our current estimate for this pass. */ if (flags.base_after_vec) rel_addr += insn_current_address - insn_last_address; if (flags.min_after_vec) min_addr += insn_current_address - insn_last_address; if (flags.max_after_vec) max_addr += insn_current_address - insn_last_address; /* We want to know the worst case, i.e. lowest possible value for the offset of MIN_LAB. If MIN_LAB is after REL_LAB, its offset is positive, and we have to be wary of code shrink; otherwise, it is negative, and we have to be vary of code size increase. */ if (flags.min_after_base) { /* If INSN is between REL_LAB and MIN_LAB, the size changes we are about to make can change the alignment within the observed offset, therefore we have to break it up into two parts that are independent. */ if (! flags.base_after_vec && flags.min_after_vec) { min_addr -= align_fuzz (rel_lab, insn, rel_align, 0); min_addr -= align_fuzz (insn, min_lab, 0, 0); } else min_addr -= align_fuzz (rel_lab, min_lab, rel_align, 0); } else { if (flags.base_after_vec && ! flags.min_after_vec) { min_addr -= align_fuzz (min_lab, insn, 0, ~0); min_addr -= align_fuzz (insn, rel_lab, 0, ~0); } else min_addr -= align_fuzz (min_lab, rel_lab, 0, ~0); } /* Likewise, determine the highest lowest possible value for the offset of MAX_LAB. */ if (flags.max_after_base) { if (! flags.base_after_vec && flags.max_after_vec) { max_addr += align_fuzz (rel_lab, insn, rel_align, ~0); max_addr += align_fuzz (insn, max_lab, 0, ~0); } else max_addr += align_fuzz (rel_lab, max_lab, rel_align, ~0); } else { if (flags.base_after_vec && ! flags.max_after_vec) { max_addr += align_fuzz (max_lab, insn, 0, 0); max_addr += align_fuzz (insn, rel_lab, 0, 0); } else max_addr += align_fuzz (max_lab, rel_lab, 0, 0); } PUT_MODE (body, CASE_VECTOR_SHORTEN_MODE (min_addr - rel_addr, max_addr - rel_addr, body)); if (JUMP_TABLES_IN_TEXT_SECTION || !HAVE_READONLY_DATA_SECTION) { insn_lengths[uid] = (XVECLEN (body, 1) * GET_MODE_SIZE (GET_MODE (body))); insn_current_address += insn_lengths[uid]; if (insn_lengths[uid] != old_length) something_changed = 1; } continue; } #endif /* CASE_VECTOR_SHORTEN_MODE */ if (! (varying_length[uid])) { if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE) { int i; body = PATTERN (insn); for (i = 0; i < XVECLEN (body, 0); i++) { rtx inner_insn = XVECEXP (body, 0, i); int inner_uid = INSN_UID (inner_insn); INSN_ADDRESSES (inner_uid) = insn_current_address; insn_current_address += insn_lengths[inner_uid]; } } else insn_current_address += insn_lengths[uid]; continue; } if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE) { int i; body = PATTERN (insn); new_length = 0; for (i = 0; i < XVECLEN (body, 0); i++) { rtx inner_insn = XVECEXP (body, 0, i); int inner_uid = INSN_UID (inner_insn); int inner_length; INSN_ADDRESSES (inner_uid) = insn_current_address; /* insn_current_length returns 0 for insns with a non-varying length. */ if (! varying_length[inner_uid]) inner_length = insn_lengths[inner_uid]; else inner_length = insn_current_length (inner_insn); if (inner_length != insn_lengths[inner_uid]) { insn_lengths[inner_uid] = inner_length; something_changed = 1; } insn_current_address += insn_lengths[inner_uid]; new_length += inner_length; } } else { new_length = insn_current_length (insn); insn_current_address += new_length; } #ifdef ADJUST_INSN_LENGTH /* If needed, do any adjustment. */ tmp_length = new_length; ADJUST_INSN_LENGTH (insn, new_length); insn_current_address += (new_length - tmp_length); #endif if (new_length != insn_lengths[uid]) { insn_lengths[uid] = new_length; something_changed = 1; } } /* For a non-optimizing compile, do only a single pass. */ if (!optimize) break; } free (varying_length); #endif /* HAVE_ATTR_length */ } #ifdef HAVE_ATTR_length /* Given the body of an INSN known to be generated by an ASM statement, return the number of machine instructions likely to be generated for this insn. This is used to compute its length. */ static int asm_insn_count (rtx body) { const char *template; int count = 1; if (GET_CODE (body) == ASM_INPUT) template = XSTR (body, 0); else template = decode_asm_operands (body, NULL, NULL, NULL, NULL); for (; *template; template++) if (IS_ASM_LOGICAL_LINE_SEPARATOR (*template) || *template == '\n') count++; return count; } #endif /* Output assembler code for the start of a function, and initialize some of the variables in this file for the new function. The label for the function and associated assembler pseudo-ops have already been output in `assemble_start_function'. FIRST is the first insn of the rtl for the function being compiled. FILE is the file to write assembler code to. OPTIMIZE is nonzero if we should eliminate redundant test and compare insns. */ void final_start_function (rtx first ATTRIBUTE_UNUSED, FILE *file, int optimize ATTRIBUTE_UNUSED) { block_depth = 0; this_is_asm_operands = 0; last_filename = locator_file (prologue_locator); last_linenum = locator_line (prologue_locator); high_block_linenum = high_function_linenum = last_linenum; (*debug_hooks->begin_prologue) (last_linenum, last_filename); #if defined (DWARF2_UNWIND_INFO) || defined (IA64_UNWIND_INFO) if (write_symbols != DWARF2_DEBUG && write_symbols != VMS_AND_DWARF2_DEBUG) dwarf2out_begin_prologue (0, NULL); #endif #ifdef LEAF_REG_REMAP if (current_function_uses_only_leaf_regs) leaf_renumber_regs (first); #endif /* The Sun386i and perhaps other machines don't work right if the profiling code comes after the prologue. */ #ifdef PROFILE_BEFORE_PROLOGUE if (current_function_profile) profile_function (file); #endif /* PROFILE_BEFORE_PROLOGUE */ #if defined (DWARF2_UNWIND_INFO) && defined (HAVE_prologue) if (dwarf2out_do_frame ()) dwarf2out_frame_debug (NULL_RTX); #endif /* If debugging, assign block numbers to all of the blocks in this function. */ if (write_symbols) { remove_unnecessary_notes (); reemit_insn_block_notes (); number_blocks (current_function_decl); /* We never actually put out begin/end notes for the top-level block in the function. But, conceptually, that block is always needed. */ TREE_ASM_WRITTEN (DECL_INITIAL (current_function_decl)) = 1; } /* First output the function prologue: code to set up the stack frame. */ targetm.asm_out.function_prologue (file, get_frame_size ()); /* If the machine represents the prologue as RTL, the profiling code must be emitted when NOTE_INSN_PROLOGUE_END is scanned. */ #ifdef HAVE_prologue if (! HAVE_prologue) #endif profile_after_prologue (file); } static void profile_after_prologue (FILE *file ATTRIBUTE_UNUSED) { #ifndef PROFILE_BEFORE_PROLOGUE if (current_function_profile) profile_function (file); #endif /* not PROFILE_BEFORE_PROLOGUE */ } static void profile_function (FILE *file ATTRIBUTE_UNUSED) { #ifndef NO_PROFILE_COUNTERS # define NO_PROFILE_COUNTERS 0 #endif #if defined(ASM_OUTPUT_REG_PUSH) int sval = current_function_returns_struct; rtx svrtx = targetm.calls.struct_value_rtx (TREE_TYPE (current_function_decl), 1); #if defined(STATIC_CHAIN_INCOMING_REGNUM) || defined(STATIC_CHAIN_REGNUM) int cxt = cfun->static_chain_decl != NULL; #endif #endif /* ASM_OUTPUT_REG_PUSH */ if (! NO_PROFILE_COUNTERS) { int align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE); data_section (); ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT)); targetm.asm_out.internal_label (file, "LP", current_function_funcdef_no); assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1); } function_section (current_function_decl); #if defined(ASM_OUTPUT_REG_PUSH) if (sval && svrtx != NULL_RTX && REG_P (svrtx)) ASM_OUTPUT_REG_PUSH (file, REGNO (svrtx)); #endif #if defined(STATIC_CHAIN_INCOMING_REGNUM) && defined(ASM_OUTPUT_REG_PUSH) if (cxt) ASM_OUTPUT_REG_PUSH (file, STATIC_CHAIN_INCOMING_REGNUM); #else #if defined(STATIC_CHAIN_REGNUM) && defined(ASM_OUTPUT_REG_PUSH) if (cxt) { ASM_OUTPUT_REG_PUSH (file, STATIC_CHAIN_REGNUM); } #endif #endif FUNCTION_PROFILER (file, current_function_funcdef_no); #if defined(STATIC_CHAIN_INCOMING_REGNUM) && defined(ASM_OUTPUT_REG_PUSH) if (cxt) ASM_OUTPUT_REG_POP (file, STATIC_CHAIN_INCOMING_REGNUM); #else #if defined(STATIC_CHAIN_REGNUM) && defined(ASM_OUTPUT_REG_PUSH) if (cxt) { ASM_OUTPUT_REG_POP (file, STATIC_CHAIN_REGNUM); } #endif #endif #if defined(ASM_OUTPUT_REG_PUSH) if (sval && svrtx != NULL_RTX && REG_P (svrtx)) ASM_OUTPUT_REG_POP (file, REGNO (svrtx)); #endif } /* Output assembler code for the end of a function. For clarity, args are same as those of `final_start_function' even though not all of them are needed. */ void final_end_function (void) { app_disable (); (*debug_hooks->end_function) (high_function_linenum); /* Finally, output the function epilogue: code to restore the stack frame and return to the caller. */ targetm.asm_out.function_epilogue (asm_out_file, get_frame_size ()); /* And debug output. */ (*debug_hooks->end_epilogue) (last_linenum, last_filename); #if defined (DWARF2_UNWIND_INFO) if (write_symbols != DWARF2_DEBUG && write_symbols != VMS_AND_DWARF2_DEBUG && dwarf2out_do_frame ()) dwarf2out_end_epilogue (last_linenum, last_filename); #endif } /* Output assembler code for some insns: all or part of a function. For description of args, see `final_start_function', above. PRESCAN is 1 if we are not really outputting, just scanning as if we were outputting. Prescanning deletes and rearranges insns just like ordinary output. PRESCAN is -2 if we are outputting after having prescanned. In this case, don't try to delete or rearrange insns because that has already been done. Prescanning is done only on certain machines. */ void final (rtx first, FILE *file, int optimize, int prescan) { rtx insn; int max_uid = 0; int seen = 0; last_ignored_compare = 0; #ifdef SDB_DEBUGGING_INFO /* When producing SDB debugging info, delete troublesome line number notes from inlined functions in other files as well as duplicate line number notes. */ if (write_symbols == SDB_DEBUG) { rtx last = 0; for (insn = first; insn; insn = NEXT_INSN (insn)) if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0) { if (last != 0 #ifdef USE_MAPPED_LOCATION && NOTE_SOURCE_LOCATION (insn) == NOTE_SOURCE_LOCATION (last) #else && NOTE_LINE_NUMBER (insn) == NOTE_LINE_NUMBER (last) && NOTE_SOURCE_FILE (insn) == NOTE_SOURCE_FILE (last) #endif ) { delete_insn (insn); /* Use delete_note. */ continue; } last = insn; } } #endif for (insn = first; insn; insn = NEXT_INSN (insn)) { if (INSN_UID (insn) > max_uid) /* Find largest UID. */ max_uid = INSN_UID (insn); #ifdef HAVE_cc0 /* If CC tracking across branches is enabled, record the insn which jumps to each branch only reached from one place. */ if (optimize && GET_CODE (insn) == JUMP_INSN) { rtx lab = JUMP_LABEL (insn); if (lab && LABEL_NUSES (lab) == 1) { LABEL_REFS (lab) = insn; } } #endif } init_recog (); CC_STATUS_INIT; /* Output the insns. */ for (insn = NEXT_INSN (first); insn;) { #ifdef HAVE_ATTR_length if ((unsigned) INSN_UID (insn) >= INSN_ADDRESSES_SIZE ()) { /* This can be triggered by bugs elsewhere in the compiler if new insns are created after init_insn_lengths is called. */ if (GET_CODE (insn) == NOTE) insn_current_address = -1; else abort (); } else insn_current_address = INSN_ADDRESSES (INSN_UID (insn)); #endif /* HAVE_ATTR_length */ insn = final_scan_insn (insn, file, optimize, prescan, 0, &seen); } } const char * get_insn_template (int code, rtx insn) { switch (insn_data[code].output_format) { case INSN_OUTPUT_FORMAT_SINGLE: return insn_data[code].output.single; case INSN_OUTPUT_FORMAT_MULTI: return insn_data[code].output.multi[which_alternative]; case INSN_OUTPUT_FORMAT_FUNCTION: if (insn == NULL) abort (); return (*insn_data[code].output.function) (recog_data.operand, insn); default: abort (); } } /* Emit the appropriate declaration for an alternate-entry-point symbol represented by INSN, to FILE. INSN is a CODE_LABEL with LABEL_KIND != LABEL_NORMAL. The case fall-through in this function is intentional. */ static void output_alternate_entry_point (FILE *file, rtx insn) { const char *name = LABEL_NAME (insn); switch (LABEL_KIND (insn)) { case LABEL_WEAK_ENTRY: #ifdef ASM_WEAKEN_LABEL ASM_WEAKEN_LABEL (file, name); #endif case LABEL_GLOBAL_ENTRY: targetm.asm_out.globalize_label (file, name); case LABEL_STATIC_ENTRY: #ifdef ASM_OUTPUT_TYPE_DIRECTIVE ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function"); #endif ASM_OUTPUT_LABEL (file, name); break; case LABEL_NORMAL: default: abort (); } } /* Return boolean indicating if there is a NOTE_INSN_UNLIKELY_EXECUTED_CODE note in the instruction chain (going forward) between the current instruction, and the next 'executable' instruction. */ bool scan_ahead_for_unlikely_executed_note (rtx insn) { rtx temp; int bb_note_count = 0; for (temp = insn; temp; temp = NEXT_INSN (temp)) { if (GET_CODE (temp) == NOTE && NOTE_LINE_NUMBER (temp) == NOTE_INSN_UNLIKELY_EXECUTED_CODE) return true; if (GET_CODE (temp) == NOTE && NOTE_LINE_NUMBER (temp) == NOTE_INSN_BASIC_BLOCK) { bb_note_count++; if (bb_note_count > 1) return false; } if (INSN_P (temp)) return false; } return false; } /* The final scan for one insn, INSN. Args are same as in `final', except that INSN is the insn being scanned. Value returned is the next insn to be scanned. NOPEEPHOLES is the flag to disallow peephole processing (currently used for within delayed branch sequence output). SEEN is used to track the end of the prologue, for emitting debug information. We force the emission of a line note after both NOTE_INSN_PROLOGUE_END and NOTE_INSN_FUNCTION_BEG, or at the beginning of the second basic block, whichever comes first. */ rtx final_scan_insn (rtx insn, FILE *file, int optimize ATTRIBUTE_UNUSED, int prescan, int nopeepholes ATTRIBUTE_UNUSED, int *seen) { #ifdef HAVE_cc0 rtx set; #endif insn_counter++; /* Ignore deleted insns. These can occur when we split insns (due to a template of "#") while not optimizing. */ if (INSN_DELETED_P (insn)) return NEXT_INSN (insn); switch (GET_CODE (insn)) { case NOTE: if (prescan > 0) break; switch (NOTE_LINE_NUMBER (insn)) { case NOTE_INSN_DELETED: case NOTE_INSN_LOOP_BEG: case NOTE_INSN_LOOP_END: case NOTE_INSN_LOOP_END_TOP_COND: case NOTE_INSN_LOOP_CONT: case NOTE_INSN_LOOP_VTOP: case NOTE_INSN_FUNCTION_END: case NOTE_INSN_REPEATED_LINE_NUMBER: case NOTE_INSN_EXPECTED_VALUE: break; case NOTE_INSN_UNLIKELY_EXECUTED_CODE: /* The presence of this note indicates that this basic block belongs in the "cold" section of the .o file. If we are not already writing to the cold section we need to change to it. */ unlikely_text_section (); break; case NOTE_INSN_BASIC_BLOCK: /* If we are performing the optimization that partitions basic blocks into hot & cold sections of the .o file, then at the start of each new basic block, before beginning to write code for the basic block, we need to check to see whether the basic block belongs in the hot or cold section of the .o file, and change the section we are writing to appropriately. */ if (flag_reorder_blocks_and_partition && in_unlikely_text_section() && !scan_ahead_for_unlikely_executed_note (insn)) text_section (); #ifdef IA64_UNWIND_INFO IA64_UNWIND_EMIT (asm_out_file, insn); #endif if (flag_debug_asm) fprintf (asm_out_file, "\t%s basic block %d\n", ASM_COMMENT_START, NOTE_BASIC_BLOCK (insn)->index); if ((*seen & (SEEN_EMITTED | SEEN_BB)) == SEEN_BB) { *seen |= SEEN_EMITTED; last_filename = NULL; } else *seen |= SEEN_BB; break; case NOTE_INSN_EH_REGION_BEG: ASM_OUTPUT_DEBUG_LABEL (asm_out_file, "LEHB", NOTE_EH_HANDLER (insn)); break; case NOTE_INSN_EH_REGION_END: ASM_OUTPUT_DEBUG_LABEL (asm_out_file, "LEHE", NOTE_EH_HANDLER (insn)); break; case NOTE_INSN_PROLOGUE_END: targetm.asm_out.function_end_prologue (file); profile_after_prologue (file); if ((*seen & (SEEN_EMITTED | SEEN_NOTE)) == SEEN_NOTE) { *seen |= SEEN_EMITTED; last_filename = NULL; } else *seen |= SEEN_NOTE; break; case NOTE_INSN_EPILOGUE_BEG: targetm.asm_out.function_begin_epilogue (file); break; case NOTE_INSN_FUNCTION_BEG: app_disable (); (*debug_hooks->end_prologue) (last_linenum, last_filename); if ((*seen & (SEEN_EMITTED | SEEN_NOTE)) == SEEN_NOTE) { *seen |= SEEN_EMITTED; last_filename = NULL; } else *seen |= SEEN_NOTE; break; case NOTE_INSN_BLOCK_BEG: if (debug_info_level == DINFO_LEVEL_NORMAL || debug_info_level == DINFO_LEVEL_VERBOSE || write_symbols == DWARF_DEBUG || write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG || write_symbols == VMS_DEBUG) { int n = BLOCK_NUMBER (NOTE_BLOCK (insn)); app_disable (); ++block_depth; high_block_linenum = last_linenum; /* Output debugging info about the symbol-block beginning. */ (*debug_hooks->begin_block) (last_linenum, n); /* Mark this block as output. */ TREE_ASM_WRITTEN (NOTE_BLOCK (insn)) = 1; } break; case NOTE_INSN_BLOCK_END: if (debug_info_level == DINFO_LEVEL_NORMAL || debug_info_level == DINFO_LEVEL_VERBOSE || write_symbols == DWARF_DEBUG || write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG || write_symbols == VMS_DEBUG) { int n = BLOCK_NUMBER (NOTE_BLOCK (insn)); app_disable (); /* End of a symbol-block. */ --block_depth; if (block_depth < 0) abort (); (*debug_hooks->end_block) (high_block_linenum, n); } break; case NOTE_INSN_DELETED_LABEL: /* Emit the label. We may have deleted the CODE_LABEL because the label could be proved to be unreachable, though still referenced (in the form of having its address taken. */ ASM_OUTPUT_DEBUG_LABEL (file, "L", CODE_LABEL_NUMBER (insn)); break; case NOTE_INSN_VAR_LOCATION: (*debug_hooks->var_location) (insn); break; case 0: break; default: if (NOTE_LINE_NUMBER (insn) <= 0) abort (); break; } break; case BARRIER: #if defined (DWARF2_UNWIND_INFO) if (dwarf2out_do_frame ()) dwarf2out_frame_debug (insn); #endif break; case CODE_LABEL: /* The target port might emit labels in the output function for some insn, e.g. sh.c output_branchy_insn. */ if (CODE_LABEL_NUMBER (insn) <= max_labelno) { int align = LABEL_TO_ALIGNMENT (insn); #ifdef ASM_OUTPUT_MAX_SKIP_ALIGN int max_skip = LABEL_TO_MAX_SKIP (insn); #endif if (align && NEXT_INSN (insn)) { #ifdef ASM_OUTPUT_MAX_SKIP_ALIGN ASM_OUTPUT_MAX_SKIP_ALIGN (file, align, max_skip); #else #ifdef ASM_OUTPUT_ALIGN_WITH_NOP ASM_OUTPUT_ALIGN_WITH_NOP (file, align); #else ASM_OUTPUT_ALIGN (file, align); #endif #endif } } #ifdef HAVE_cc0 CC_STATUS_INIT; /* If this label is reached from only one place, set the condition codes from the instruction just before the branch. */ /* Disabled because some insns set cc_status in the C output code and NOTICE_UPDATE_CC alone can set incorrect status. */ if (0 /* optimize && LABEL_NUSES (insn) == 1*/) { rtx jump = LABEL_REFS (insn); rtx barrier = prev_nonnote_insn (insn); rtx prev; /* If the LABEL_REFS field of this label has been set to point at a branch, the predecessor of the branch is a regular insn, and that branch is the only way to reach this label, set the condition codes based on the branch and its predecessor. */ if (barrier && GET_CODE (barrier) == BARRIER && jump && GET_CODE (jump) == JUMP_INSN && (prev = prev_nonnote_insn (jump)) && GET_CODE (prev) == INSN) { NOTICE_UPDATE_CC (PATTERN (prev), prev); NOTICE_UPDATE_CC (PATTERN (jump), jump); } } #endif if (prescan > 0) break; if (LABEL_NAME (insn)) (*debug_hooks->label) (insn); /* If we are doing the optimization that partitions hot & cold basic blocks into separate sections of the .o file, we need to ensure the jump table ends up in the correct section... */ if (flag_reorder_blocks_and_partition) { rtx tmp_table, tmp_label; if (GET_CODE (insn) == CODE_LABEL && tablejump_p (NEXT_INSN (insn), &tmp_label, &tmp_table)) { /* Do nothing; Do NOT change the current section. */ } else if (scan_ahead_for_unlikely_executed_note (insn)) unlikely_text_section (); else { if (in_unlikely_text_section ()) text_section (); } } if (app_on) { fputs (ASM_APP_OFF, file); app_on = 0; } if (NEXT_INSN (insn) != 0 && GET_CODE (NEXT_INSN (insn)) == JUMP_INSN) { rtx nextbody = PATTERN (NEXT_INSN (insn)); /* If this label is followed by a jump-table, make sure we put the label in the read-only section. Also possibly write the label and jump table together. */ if (GET_CODE (nextbody) == ADDR_VEC || GET_CODE (nextbody) == ADDR_DIFF_VEC) { #if defined(ASM_OUTPUT_ADDR_VEC) || defined(ASM_OUTPUT_ADDR_DIFF_VEC) /* In this case, the case vector is being moved by the target, so don't output the label at all. Leave that to the back end macros. */ #else if (! JUMP_TABLES_IN_TEXT_SECTION) { int log_align; readonly_data_section (); #ifdef ADDR_VEC_ALIGN log_align = ADDR_VEC_ALIGN (NEXT_INSN (insn)); #else log_align = exact_log2 (BIGGEST_ALIGNMENT / BITS_PER_UNIT); #endif ASM_OUTPUT_ALIGN (file, log_align); } else function_section (current_function_decl); #ifdef ASM_OUTPUT_CASE_LABEL ASM_OUTPUT_CASE_LABEL (file, "L", CODE_LABEL_NUMBER (insn), NEXT_INSN (insn)); #else targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (insn)); #endif #endif break; } } if (LABEL_ALT_ENTRY_P (insn)) output_alternate_entry_point (file, insn); else targetm.asm_out.internal_label (file, "L", CODE_LABEL_NUMBER (insn)); break; default: { rtx body = PATTERN (insn); int insn_code_number; const char *template; rtx note; /* An INSN, JUMP_INSN or CALL_INSN. First check for special kinds that recog doesn't recognize. */ if (GET_CODE (body) == USE /* These are just declarations. */ || GET_CODE (body) == CLOBBER) break; #ifdef HAVE_cc0 /* If there is a REG_CC_SETTER note on this insn, it means that the setting of the condition code was done in the delay slot of the insn that branched here. So recover the cc status from the insn that set it. */ note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX); if (note) { NOTICE_UPDATE_CC (PATTERN (XEXP (note, 0)), XEXP (note, 0)); cc_prev_status = cc_status; } #endif /* Detect insns that are really jump-tables and output them as such. */ if (GET_CODE (body) == ADDR_VEC || GET_CODE (body) == ADDR_DIFF_VEC) { #if !(defined(ASM_OUTPUT_ADDR_VEC) || defined(ASM_OUTPUT_ADDR_DIFF_VEC)) int vlen, idx; #endif if (prescan > 0) break; if (app_on) { fputs (ASM_APP_OFF, file); app_on = 0; } #if defined(ASM_OUTPUT_ADDR_VEC) || defined(ASM_OUTPUT_ADDR_DIFF_VEC) if (GET_CODE (body) == ADDR_VEC) { #ifdef ASM_OUTPUT_ADDR_VEC ASM_OUTPUT_ADDR_VEC (PREV_INSN (insn), body); #else abort (); #endif } else { #ifdef ASM_OUTPUT_ADDR_DIFF_VEC ASM_OUTPUT_ADDR_DIFF_VEC (PREV_INSN (insn), body); #else abort (); #endif } #else vlen = XVECLEN (body, GET_CODE (body) == ADDR_DIFF_VEC); for (idx = 0; idx < vlen; idx++) { if (GET_CODE (body) == ADDR_VEC) { #ifdef ASM_OUTPUT_ADDR_VEC_ELT ASM_OUTPUT_ADDR_VEC_ELT (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0))); #else abort (); #endif } else { #ifdef ASM_OUTPUT_ADDR_DIFF_ELT ASM_OUTPUT_ADDR_DIFF_ELT (file, body, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)), CODE_LABEL_NUMBER (XEXP (XEXP (body, 0), 0))); #else abort (); #endif } } #ifdef ASM_OUTPUT_CASE_END ASM_OUTPUT_CASE_END (file, CODE_LABEL_NUMBER (PREV_INSN (insn)), insn); #endif #endif function_section (current_function_decl); break; } /* Output this line note if it is the first or the last line note in a row. */ if (notice_source_line (insn)) { (*debug_hooks->source_line) (last_linenum, last_filename); } if (GET_CODE (body) == ASM_INPUT) { const char *string = XSTR (body, 0); /* There's no telling what that did to the condition codes. */ CC_STATUS_INIT; if (prescan > 0) break; if (string[0]) { if (! app_on) { fputs (ASM_APP_ON, file); app_on = 1; } fprintf (asm_out_file, "\t%s\n", string); } break; } /* Detect `asm' construct with operands. */ if (asm_noperands (body) >= 0) { unsigned int noperands = asm_noperands (body); rtx *ops = alloca (noperands * sizeof (rtx)); const char *string; /* There's no telling what that did to the condition codes. */ CC_STATUS_INIT; if (prescan > 0) break; /* Get out the operand values. */ string = decode_asm_operands (body, ops, NULL, NULL, NULL); /* Inhibit aborts on what would otherwise be compiler bugs. */ insn_noperands = noperands; this_is_asm_operands = insn; #ifdef FINAL_PRESCAN_INSN FINAL_PRESCAN_INSN (insn, ops, insn_noperands); #endif /* Output the insn using them. */ if (string[0]) { if (! app_on) { fputs (ASM_APP_ON, file); app_on = 1; } output_asm_insn (string, ops); } this_is_asm_operands = 0; break; } if (prescan <= 0 && app_on) { fputs (ASM_APP_OFF, file); app_on = 0; } if (GET_CODE (body) == SEQUENCE) { /* A delayed-branch sequence */ int i; rtx next; if (prescan > 0) break; final_sequence = body; /* Record the delay slots' frame information before the branch. This is needed for delayed calls: see execute_cfa_program(). */ #if defined (DWARF2_UNWIND_INFO) if (dwarf2out_do_frame ()) for (i = 1; i < XVECLEN (body, 0); i++) dwarf2out_frame_debug (XVECEXP (body, 0, i)); #endif /* The first insn in this SEQUENCE might be a JUMP_INSN that will force the restoration of a comparison that was previously thought unnecessary. If that happens, cancel this sequence and cause that insn to be restored. */ next = final_scan_insn (XVECEXP (body, 0, 0), file, 0, prescan, 1, seen); if (next != XVECEXP (body, 0, 1)) { final_sequence = 0; return next; } for (i = 1; i < XVECLEN (body, 0); i++) { rtx insn = XVECEXP (body, 0, i); rtx next = NEXT_INSN (insn); /* We loop in case any instruction in a delay slot gets split. */ do insn = final_scan_insn (insn, file, 0, prescan, 1, seen); while (insn != next); } #ifdef DBR_OUTPUT_SEQEND DBR_OUTPUT_SEQEND (file); #endif final_sequence = 0; /* If the insn requiring the delay slot was a CALL_INSN, the insns in the delay slot are actually executed before the called function. Hence we don't preserve any CC-setting actions in these insns and the CC must be marked as being clobbered by the function. */ if (GET_CODE (XVECEXP (body, 0, 0)) == CALL_INSN) { CC_STATUS_INIT; } break; } /* We have a real machine instruction as rtl. */ body = PATTERN (insn); #ifdef HAVE_cc0 set = single_set (insn); /* Check for redundant test and compare instructions (when the condition codes are already set up as desired). This is done only when optimizing; if not optimizing, it should be possible for the user to alter a variable with the debugger in between statements and the next statement should reexamine the variable to compute the condition codes. */ if (optimize) { if (set && GET_CODE (SET_DEST (set)) == CC0 && insn != last_ignored_compare) { if (GET_CODE (SET_SRC (set)) == SUBREG) SET_SRC (set) = alter_subreg (&SET_SRC (set)); else if (GET_CODE (SET_SRC (set)) == COMPARE) { if (GET_CODE (XEXP (SET_SRC (set), 0)) == SUBREG) XEXP (SET_SRC (set), 0) = alter_subreg (&XEXP (SET_SRC (set), 0)); if (GET_CODE (XEXP (SET_SRC (set), 1)) == SUBREG) XEXP (SET_SRC (set), 1) = alter_subreg (&XEXP (SET_SRC (set), 1)); } if ((cc_status.value1 != 0 && rtx_equal_p (SET_SRC (set), cc_status.value1)) || (cc_status.value2 != 0 && rtx_equal_p (SET_SRC (set), cc_status.value2))) { /* Don't delete insn if it has an addressing side-effect. */ if (! FIND_REG_INC_NOTE (insn, NULL_RTX) /* or if anything in it is volatile. */ && ! volatile_refs_p (PATTERN (insn))) { /* We don't really delete the insn; just ignore it. */ last_ignored_compare = insn; break; } } } } #endif #ifndef STACK_REGS /* Don't bother outputting obvious no-ops, even without -O. This optimization is fast and doesn't interfere with debugging. Don't do this if the insn is in a delay slot, since this will cause an improper number of delay insns to be written. */ if (final_sequence == 0 && prescan >= 0 && GET_CODE (insn) == INSN && GET_CODE (body) == SET && REG_P (SET_SRC (body)) && REG_P (SET_DEST (body)) && REGNO (SET_SRC (body)) == REGNO (SET_DEST (body))) break; #endif #ifdef HAVE_cc0 /* If this is a conditional branch, maybe modify it if the cc's are in a nonstandard state so that it accomplishes the same thing that it would do straightforwardly if the cc's were set up normally. */ if (cc_status.flags != 0 && GET_CODE (insn) == JUMP_INSN && GET_CODE (body) == SET && SET_DEST (body) == pc_rtx && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE && COMPARISON_P (XEXP (SET_SRC (body), 0)) && XEXP (XEXP (SET_SRC (body), 0), 0) == cc0_rtx /* This is done during prescan; it is not done again in final scan when prescan has been done. */ && prescan >= 0) { /* This function may alter the contents of its argument and clear some of the cc_status.flags bits. It may also return 1 meaning condition now always true or -1 meaning condition now always false or 2 meaning condition nontrivial but altered. */ int result = alter_cond (XEXP (SET_SRC (body), 0)); /* If condition now has fixed value, replace the IF_THEN_ELSE with its then-operand or its else-operand. */ if (result == 1) SET_SRC (body) = XEXP (SET_SRC (body), 1); if (result == -1) SET_SRC (body) = XEXP (SET_SRC (body), 2); /* The jump is now either unconditional or a no-op. If it has become a no-op, don't try to output it. (It would not be recognized.) */ if (SET_SRC (body) == pc_rtx) { delete_insn (insn); break; } else if (GET_CODE (SET_SRC (body)) == RETURN) /* Replace (set (pc) (return)) with (return). */ PATTERN (insn) = body = SET_SRC (body); /* Rerecognize the instruction if it has changed. */ if (result != 0) INSN_CODE (insn) = -1; } /* Make same adjustments to instructions that examine the condition codes without jumping and instructions that handle conditional moves (if this machine has either one). */ if (cc_status.flags != 0 && set != 0) { rtx cond_rtx, then_rtx, else_rtx; if (GET_CODE (insn) != JUMP_INSN && GET_CODE (SET_SRC (set)) == IF_THEN_ELSE) { cond_rtx = XEXP (SET_SRC (set), 0); then_rtx = XEXP (SET_SRC (set), 1); else_rtx = XEXP (SET_SRC (set), 2); } else { cond_rtx = SET_SRC (set); then_rtx = const_true_rtx; else_rtx = const0_rtx; } switch (GET_CODE (cond_rtx)) { case GTU: case GT: case LTU: case LT: case GEU: case GE: case LEU: case LE: case EQ: case NE: { int result; if (XEXP (cond_rtx, 0) != cc0_rtx) break; result = alter_cond (cond_rtx); if (result == 1) validate_change (insn, &SET_SRC (set), then_rtx, 0); else if (result == -1) validate_change (insn, &SET_SRC (set), else_rtx, 0); else if (result == 2) INSN_CODE (insn) = -1; if (SET_DEST (set) == SET_SRC (set)) delete_insn (insn); } break; default: break; } } #endif #ifdef HAVE_peephole /* Do machine-specific peephole optimizations if desired. */ if (optimize && !flag_no_peephole && !nopeepholes) { rtx next = peephole (insn); /* When peepholing, if there were notes within the peephole, emit them before the peephole. */ if (next != 0 && next != NEXT_INSN (insn)) { rtx prev = PREV_INSN (insn); for (note = NEXT_INSN (insn); note != next; note = NEXT_INSN (note)) final_scan_insn (note, file, optimize, prescan, nopeepholes, seen); /* In case this is prescan, put the notes in proper position for later rescan. */ note = NEXT_INSN (insn); PREV_INSN (note) = prev; NEXT_INSN (prev) = note; NEXT_INSN (PREV_INSN (next)) = insn; PREV_INSN (insn) = PREV_INSN (next); NEXT_INSN (insn) = next; PREV_INSN (next) = insn; } /* PEEPHOLE might have changed this. */ body = PATTERN (insn); } #endif /* Try to recognize the instruction. If successful, verify that the operands satisfy the constraints for the instruction. Crash if they don't, since `reload' should have changed them so that they do. */ insn_code_number = recog_memoized (insn); cleanup_subreg_operands (insn); /* Dump the insn in the assembly for debugging. */ if (flag_dump_rtl_in_asm) { print_rtx_head = ASM_COMMENT_START; print_rtl_single (asm_out_file, insn); print_rtx_head = ""; } if (! constrain_operands_cached (1)) fatal_insn_not_found (insn); /* Some target machines need to prescan each insn before it is output. */ #ifdef FINAL_PRESCAN_INSN FINAL_PRESCAN_INSN (insn, recog_data.operand, recog_data.n_operands); #endif #ifdef HAVE_conditional_execution if (GET_CODE (PATTERN (insn)) == COND_EXEC) current_insn_predicate = COND_EXEC_TEST (PATTERN (insn)); else current_insn_predicate = NULL_RTX; #endif #ifdef HAVE_cc0 cc_prev_status = cc_status; /* Update `cc_status' for this instruction. The instruction's output routine may change it further. If the output routine for a jump insn needs to depend on the cc status, it should look at cc_prev_status. */ NOTICE_UPDATE_CC (body, insn); #endif current_output_insn = debug_insn = insn; #if defined (DWARF2_UNWIND_INFO) if (GET_CODE (insn) == CALL_INSN && dwarf2out_do_frame ()) dwarf2out_frame_debug (insn); #endif /* Find the proper template for this insn. */ template = get_insn_template (insn_code_number, insn); /* If the C code returns 0, it means that it is a jump insn which follows a deleted test insn, and that test insn needs to be reinserted. */ if (template == 0) { rtx prev; if (prev_nonnote_insn (insn) != last_ignored_compare) abort (); /* We have already processed the notes between the setter and the user. Make sure we don't process them again, this is particularly important if one of the notes is a block scope note or an EH note. */ for (prev = insn; prev != last_ignored_compare; prev = PREV_INSN (prev)) { if (GET_CODE (prev) == NOTE) delete_insn (prev); /* Use delete_note. */ } return prev; } /* If the template is the string "#", it means that this insn must be split. */ if (template[0] == '#' && template[1] == '\0') { rtx new = try_split (body, insn, 0); /* If we didn't split the insn, go away. */ if (new == insn && PATTERN (new) == body) fatal_insn ("could not split insn", insn); #ifdef HAVE_ATTR_length /* This instruction should have been split in shorten_branches, to ensure that we would have valid length info for the splitees. */ abort (); #endif return new; } if (prescan > 0) break; #ifdef IA64_UNWIND_INFO IA64_UNWIND_EMIT (asm_out_file, insn); #endif /* Output assembler code from the template. */ output_asm_insn (template, recog_data.operand); /* If necessary, report the effect that the instruction has on the unwind info. We've already done this for delay slots and call instructions. */ #if defined (DWARF2_UNWIND_INFO) if (GET_CODE (insn) == INSN #if !defined (HAVE_prologue) && !ACCUMULATE_OUTGOING_ARGS #endif && final_sequence == 0 && dwarf2out_do_frame ()) dwarf2out_frame_debug (insn); #endif #if 0 /* It's not at all clear why we did this and doing so used to interfere with tests that used REG_WAS_0 notes, which are now gone, so let's try with this out. */ /* Mark this insn as having been output. */ INSN_DELETED_P (insn) = 1; #endif /* Emit information for vtable gc. */ note = find_reg_note (insn, REG_VTABLE_REF, NULL_RTX); current_output_insn = debug_insn = 0; } } return NEXT_INSN (insn); } /* Output debugging info to the assembler file FILE based on the NOTE-insn INSN, assumed to be a line number. */ static bool notice_source_line (rtx insn) { const char *filename = insn_file (insn); int linenum = insn_line (insn); if (filename && (filename != last_filename || last_linenum != linenum)) { last_filename = filename; last_linenum = linenum; high_block_linenum = MAX (last_linenum, high_block_linenum); high_function_linenum = MAX (last_linenum, high_function_linenum); return true; } return false; } /* For each operand in INSN, simplify (subreg (reg)) so that it refers directly to the desired hard register. */ void cleanup_subreg_operands (rtx insn) { int i; extract_insn_cached (insn); for (i = 0; i < recog_data.n_operands; i++) { /* The following test cannot use recog_data.operand when testing for a SUBREG: the underlying object might have been changed already if we are inside a match_operator expression that matches the else clause. Instead we test the underlying expression directly. */ if (GET_CODE (*recog_data.operand_loc[i]) == SUBREG) recog_data.operand[i] = alter_subreg (recog_data.operand_loc[i]); else if (GET_CODE (recog_data.operand[i]) == PLUS || GET_CODE (recog_data.operand[i]) == MULT || MEM_P (recog_data.operand[i])) recog_data.operand[i] = walk_alter_subreg (recog_data.operand_loc[i]); } for (i = 0; i < recog_data.n_dups; i++) { if (GET_CODE (*recog_data.dup_loc[i]) == SUBREG) *recog_data.dup_loc[i] = alter_subreg (recog_data.dup_loc[i]); else if (GET_CODE (*recog_data.dup_loc[i]) == PLUS || GET_CODE (*recog_data.dup_loc[i]) == MULT || MEM_P (*recog_data.dup_loc[i])) *recog_data.dup_loc[i] = walk_alter_subreg (recog_data.dup_loc[i]); } } /* If X is a SUBREG, replace it with a REG or a MEM, based on the thing it is a subreg of. */ rtx alter_subreg (rtx *xp) { rtx x = *xp; rtx y = SUBREG_REG (x); /* simplify_subreg does not remove subreg from volatile references. We are required to. */ if (MEM_P (y)) *xp = adjust_address (y, GET_MODE (x), SUBREG_BYTE (x)); else { rtx new = simplify_subreg (GET_MODE (x), y, GET_MODE (y), SUBREG_BYTE (x)); if (new != 0) *xp = new; /* Simplify_subreg can't handle some REG cases, but we have to. */ else if (REG_P (y)) { unsigned int regno = subreg_hard_regno (x, 1); *xp = gen_rtx_REG_offset (y, GET_MODE (x), regno, SUBREG_BYTE (x)); } else abort (); } return *xp; } /* Do alter_subreg on all the SUBREGs contained in X. */ static rtx walk_alter_subreg (rtx *xp) { rtx x = *xp; switch (GET_CODE (x)) { case PLUS: case MULT: XEXP (x, 0) = walk_alter_subreg (&XEXP (x, 0)); XEXP (x, 1) = walk_alter_subreg (&XEXP (x, 1)); break; case MEM: XEXP (x, 0) = walk_alter_subreg (&XEXP (x, 0)); break; case SUBREG: return alter_subreg (xp); default: break; } return *xp; } #ifdef HAVE_cc0 /* Given BODY, the body of a jump instruction, alter the jump condition as required by the bits that are set in cc_status.flags. Not all of the bits there can be handled at this level in all cases. The value is normally 0. 1 means that the condition has become always true. -1 means that the condition has become always false. 2 means that COND has been altered. */ static int alter_cond (rtx cond) { int value = 0; if (cc_status.flags & CC_REVERSED) { value = 2; PUT_CODE (cond, swap_condition (GET_CODE (cond))); } if (cc_status.flags & CC_INVERTED) { value = 2; PUT_CODE (cond, reverse_condition (GET_CODE (cond))); } if (cc_status.flags & CC_NOT_POSITIVE) switch (GET_CODE (cond)) { case LE: case LEU: case GEU: /* Jump becomes unconditional. */ return 1; case GT: case GTU: case LTU: /* Jump becomes no-op. */ return -1; case GE: PUT_CODE (cond, EQ); value = 2; break; case LT: PUT_CODE (cond, NE); value = 2; break; default: break; } if (cc_status.flags & CC_NOT_NEGATIVE) switch (GET_CODE (cond)) { case GE: case GEU: /* Jump becomes unconditional. */ return 1; case LT: case LTU: /* Jump becomes no-op. */ return -1; case LE: case LEU: PUT_CODE (cond, EQ); value = 2; break; case GT: case GTU: PUT_CODE (cond, NE); value = 2; break; default: break; } if (cc_status.flags & CC_NO_OVERFLOW) switch (GET_CODE (cond)) { case GEU: /* Jump becomes unconditional. */ return 1; case LEU: PUT_CODE (cond, EQ); value = 2; break; case GTU: PUT_CODE (cond, NE); value = 2; break; case LTU: /* Jump becomes no-op. */ return -1; default: break; } if (cc_status.flags & (CC_Z_IN_NOT_N | CC_Z_IN_N)) switch (GET_CODE (cond)) { default: abort (); case NE: PUT_CODE (cond, cc_status.flags & CC_Z_IN_N ? GE : LT); value = 2; break; case EQ: PUT_CODE (cond, cc_status.flags & CC_Z_IN_N ? LT : GE); value = 2; break; } if (cc_status.flags & CC_NOT_SIGNED) /* The flags are valid if signed condition operators are converted to unsigned. */ switch (GET_CODE (cond)) { case LE: PUT_CODE (cond, LEU); value = 2; break; case LT: PUT_CODE (cond, LTU); value = 2; break; case GT: PUT_CODE (cond, GTU); value = 2; break; case GE: PUT_CODE (cond, GEU); value = 2; break; default: break; } return value; } #endif /* Report inconsistency between the assembler template and the operands. In an `asm', it's the user's fault; otherwise, the compiler's fault. */ void output_operand_lossage (const char *msgid, ...) { char *fmt_string; char *new_message; const char *pfx_str; va_list ap; va_start (ap, msgid); pfx_str = this_is_asm_operands ? _("invalid `asm': ") : "output_operand: "; asprintf (&fmt_string, "%s%s", pfx_str, _(msgid)); vasprintf (&new_message, fmt_string, ap); if (this_is_asm_operands) error_for_asm (this_is_asm_operands, "%s", new_message); else internal_error ("%s", new_message); free (fmt_string); free (new_message); va_end (ap); } /* Output of assembler code from a template, and its subroutines. */ /* Annotate the assembly with a comment describing the pattern and alternative used. */ static void output_asm_name (void) { if (debug_insn) { int num = INSN_CODE (debug_insn); fprintf (asm_out_file, "\t%s %d\t%s", ASM_COMMENT_START, INSN_UID (debug_insn), insn_data[num].name); if (insn_data[num].n_alternatives > 1) fprintf (asm_out_file, "/%d", which_alternative + 1); #ifdef HAVE_ATTR_length fprintf (asm_out_file, "\t[length = %d]", get_attr_length (debug_insn)); #endif /* Clear this so only the first assembler insn of any rtl insn will get the special comment for -dp. */ debug_insn = 0; } } /* If OP is a REG or MEM and we can find a MEM_EXPR corresponding to it or its address, return that expr . Set *PADDRESSP to 1 if the expr corresponds to the address of the object and 0 if to the object. */ static tree get_mem_expr_from_op (rtx op, int *paddressp) { tree expr; int inner_addressp; *paddressp = 0; if (REG_P (op)) return REG_EXPR (op); else if (!MEM_P (op)) return 0; if (MEM_EXPR (op) != 0) return MEM_EXPR (op); /* Otherwise we have an address, so indicate it and look at the address. */ *paddressp = 1; op = XEXP (op, 0); /* First check if we have a decl for the address, then look at the right side if it is a PLUS. Otherwise, strip off arithmetic and keep looking. But don't allow the address to itself be indirect. */ if ((expr = get_mem_expr_from_op (op, &inner_addressp)) && ! inner_addressp) return expr; else if (GET_CODE (op) == PLUS && (expr = get_mem_expr_from_op (XEXP (op, 1), &inner_addressp))) return expr; while (GET_RTX_CLASS (GET_CODE (op)) == RTX_UNARY || GET_RTX_CLASS (GET_CODE (op)) == RTX_BIN_ARITH) op = XEXP (op, 0); expr = get_mem_expr_from_op (op, &inner_addressp); return inner_addressp ? 0 : expr; } /* Output operand names for assembler instructions. OPERANDS is the operand vector, OPORDER is the order to write the operands, and NOPS is the number of operands to write. */ static void output_asm_operand_names (rtx *operands, int *oporder, int nops) { int wrote = 0; int i; for (i = 0; i < nops; i++) { int addressp; rtx op = operands[oporder[i]]; tree expr = get_mem_expr_from_op (op, &addressp); fprintf (asm_out_file, "%c%s", wrote ? ',' : '\t', wrote ? "" : ASM_COMMENT_START); wrote = 1; if (expr) { fprintf (asm_out_file, "%s", addressp ? "*" : ""); print_mem_expr (asm_out_file, expr); wrote = 1; } else if (REG_P (op) && ORIGINAL_REGNO (op) && ORIGINAL_REGNO (op) != REGNO (op)) fprintf (asm_out_file, " tmp%i", ORIGINAL_REGNO (op)); } } /* Output text from TEMPLATE to the assembler output file, obeying %-directions to substitute operands taken from the vector OPERANDS. %N (for N a digit) means print operand N in usual manner. %lN means require operand N to be a CODE_LABEL or LABEL_REF and print the label name with no punctuation. %cN means require operand N to be a constant and print the constant expression with no punctuation. %aN means expect operand N to be a memory address (not a memory reference!) and print a reference to that address. %nN means expect operand N to be a constant and print a constant expression for minus the value of the operand, with no other punctuation. */ void output_asm_insn (const char *template, rtx *operands) { const char *p; int c; #ifdef ASSEMBLER_DIALECT int dialect = 0; #endif int oporder[MAX_RECOG_OPERANDS]; char opoutput[MAX_RECOG_OPERANDS]; int ops = 0; /* An insn may return a null string template in a case where no assembler code is needed. */ if (*template == 0) return; memset (opoutput, 0, sizeof opoutput); p = template; putc ('\t', asm_out_file); #ifdef ASM_OUTPUT_OPCODE ASM_OUTPUT_OPCODE (asm_out_file, p); #endif while ((c = *p++)) switch (c) { case '\n': if (flag_verbose_asm) output_asm_operand_names (operands, oporder, ops); if (flag_print_asm_name) output_asm_name (); ops = 0; memset (opoutput, 0, sizeof opoutput); putc (c, asm_out_file); #ifdef ASM_OUTPUT_OPCODE while ((c = *p) == '\t') { putc (c, asm_out_file); p++; } ASM_OUTPUT_OPCODE (asm_out_file, p); #endif break; #ifdef ASSEMBLER_DIALECT case '{': { int i; if (dialect) output_operand_lossage ("nested assembly dialect alternatives"); else dialect = 1; /* If we want the first dialect, do nothing. Otherwise, skip DIALECT_NUMBER of strings ending with '|'. */ for (i = 0; i < dialect_number; i++) { while (*p && *p != '}' && *p++ != '|') ; if (*p == '}') break; if (*p == '|') p++; } if (*p == '\0') output_operand_lossage ("unterminated assembly dialect alternative"); } break; case '|': if (dialect) { /* Skip to close brace. */ do { if (*p == '\0') { output_operand_lossage ("unterminated assembly dialect alternative"); break; } } while (*p++ != '}'); dialect = 0; } else putc (c, asm_out_file); break; case '}': if (! dialect) putc (c, asm_out_file); dialect = 0; break; #endif case '%': /* %% outputs a single %. */ if (*p == '%') { p++; putc (c, asm_out_file); } /* %= outputs a number which is unique to each insn in the entire compilation. This is useful for making local labels that are referred to more than once in a given insn. */ else if (*p == '=') { p++; fprintf (asm_out_file, "%d", insn_counter); } /* % followed by a letter and some digits outputs an operand in a special way depending on the letter. Letters `acln' are implemented directly. Other letters are passed to `output_operand' so that the PRINT_OPERAND macro can define them. */ else if (ISALPHA (*p)) { int letter = *p++; c = atoi (p); if (! ISDIGIT (*p)) output_operand_lossage ("operand number missing after %%-letter"); else if (this_is_asm_operands && (c < 0 || (unsigned int) c >= insn_noperands)) output_operand_lossage ("operand number out of range"); else if (letter == 'l') output_asm_label (operands[c]); else if (letter == 'a') output_address (operands[c]); else if (letter == 'c') { if (CONSTANT_ADDRESS_P (operands[c])) output_addr_const (asm_out_file, operands[c]); else output_operand (operands[c], 'c'); } else if (letter == 'n') { if (GET_CODE (operands[c]) == CONST_INT) fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, - INTVAL (operands[c])); else { putc ('-', asm_out_file); output_addr_const (asm_out_file, operands[c]); } } else output_operand (operands[c], letter); if (!opoutput[c]) oporder[ops++] = c; opoutput[c] = 1; while (ISDIGIT (c = *p)) p++; } /* % followed by a digit outputs an operand the default way. */ else if (ISDIGIT (*p)) { c = atoi (p); if (this_is_asm_operands && (c < 0 || (unsigned int) c >= insn_noperands)) output_operand_lossage ("operand number out of range"); else output_operand (operands[c], 0); if (!opoutput[c]) oporder[ops++] = c; opoutput[c] = 1; while (ISDIGIT (c = *p)) p++; } /* % followed by punctuation: output something for that punctuation character alone, with no operand. The PRINT_OPERAND macro decides what is actually done. */ #ifdef PRINT_OPERAND_PUNCT_VALID_P else if (PRINT_OPERAND_PUNCT_VALID_P ((unsigned char) *p)) output_operand (NULL_RTX, *p++); #endif else output_operand_lossage ("invalid %%-code"); break; default: putc (c, asm_out_file); } /* Write out the variable names for operands, if we know them. */ if (flag_verbose_asm) output_asm_operand_names (operands, oporder, ops); if (flag_print_asm_name) output_asm_name (); putc ('\n', asm_out_file); } /* Output a LABEL_REF, or a bare CODE_LABEL, as an assembler symbol. */ void output_asm_label (rtx x) { char buf[256]; if (GET_CODE (x) == LABEL_REF) x = XEXP (x, 0); if (GET_CODE (x) == CODE_LABEL || (GET_CODE (x) == NOTE && NOTE_LINE_NUMBER (x) == NOTE_INSN_DELETED_LABEL)) ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x)); else output_operand_lossage ("`%%l' operand isn't a label"); assemble_name (asm_out_file, buf); } /* Print operand X using machine-dependent assembler syntax. The macro PRINT_OPERAND is defined just to control this function. CODE is a non-digit that preceded the operand-number in the % spec, such as 'z' if the spec was `%z3'. CODE is 0 if there was no char between the % and the digits. When CODE is a non-letter, X is 0. The meanings of the letters are machine-dependent and controlled by PRINT_OPERAND. */ static void output_operand (rtx x, int code ATTRIBUTE_UNUSED) { if (x && GET_CODE (x) == SUBREG) x = alter_subreg (&x); /* If X is a pseudo-register, abort now rather than writing trash to the assembler file. */ if (x && REG_P (x) && REGNO (x) >= FIRST_PSEUDO_REGISTER) abort (); PRINT_OPERAND (asm_out_file, x, code); } /* Print a memory reference operand for address X using machine-dependent assembler syntax. The macro PRINT_OPERAND_ADDRESS exists just to control this function. */ void output_address (rtx x) { walk_alter_subreg (&x); PRINT_OPERAND_ADDRESS (asm_out_file, x); } /* Print an integer constant expression in assembler syntax. Addition and subtraction are the only arithmetic that may appear in these expressions. */ void output_addr_const (FILE *file, rtx x) { char buf[256]; restart: switch (GET_CODE (x)) { case PC: putc ('.', file); break; case SYMBOL_REF: if (SYMBOL_REF_DECL (x)) mark_decl_referenced (SYMBOL_REF_DECL (x)); #ifdef ASM_OUTPUT_SYMBOL_REF ASM_OUTPUT_SYMBOL_REF (file, x); #else assemble_name (file, XSTR (x, 0)); #endif break; case LABEL_REF: x = XEXP (x, 0); /* Fall through. */ case CODE_LABEL: ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x)); #ifdef ASM_OUTPUT_LABEL_REF ASM_OUTPUT_LABEL_REF (file, buf); #else assemble_name (file, buf); #endif break; case CONST_INT: fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x)); break; case CONST: /* This used to output parentheses around the expression, but that does not work on the 386 (either ATT or BSD assembler). */ output_addr_const (file, XEXP (x, 0)); break; case CONST_DOUBLE: if (GET_MODE (x) == VOIDmode) { /* We can use %d if the number is one word and positive. */ if (CONST_DOUBLE_HIGH (x)) fprintf (file, HOST_WIDE_INT_PRINT_DOUBLE_HEX, CONST_DOUBLE_HIGH (x), CONST_DOUBLE_LOW (x)); else if (CONST_DOUBLE_LOW (x) < 0) fprintf (file, HOST_WIDE_INT_PRINT_HEX, CONST_DOUBLE_LOW (x)); else fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x)); } else /* We can't handle floating point constants; PRINT_OPERAND must handle them. */ output_operand_lossage ("floating constant misused"); break; case PLUS: /* Some assemblers need integer constants to appear last (eg masm). */ if (GET_CODE (XEXP (x, 0)) == CONST_INT) { output_addr_const (file, XEXP (x, 1)); if (INTVAL (XEXP (x, 0)) >= 0) fprintf (file, "+"); output_addr_const (file, XEXP (x, 0)); } else { output_addr_const (file, XEXP (x, 0)); if (GET_CODE (XEXP (x, 1)) != CONST_INT || INTVAL (XEXP (x, 1)) >= 0) fprintf (file, "+"); output_addr_const (file, XEXP (x, 1)); } break; case MINUS: /* Avoid outputting things like x-x or x+5-x, since some assemblers can't handle that. */ x = simplify_subtraction (x); if (GET_CODE (x) != MINUS) goto restart; output_addr_const (file, XEXP (x, 0)); fprintf (file, "-"); if ((GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) >= 0) || GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 1)) == SYMBOL_REF) output_addr_const (file, XEXP (x, 1)); else { fputs (targetm.asm_out.open_paren, file); output_addr_const (file, XEXP (x, 1)); fputs (targetm.asm_out.close_paren, file); } break; case ZERO_EXTEND: case SIGN_EXTEND: case SUBREG: output_addr_const (file, XEXP (x, 0)); break; default: #ifdef OUTPUT_ADDR_CONST_EXTRA OUTPUT_ADDR_CONST_EXTRA (file, x, fail); break; fail: #endif output_operand_lossage ("invalid expression as operand"); } } /* A poor man's fprintf, with the added features of %I, %R, %L, and %U. %R prints the value of REGISTER_PREFIX. %L prints the value of LOCAL_LABEL_PREFIX. %U prints the value of USER_LABEL_PREFIX. %I prints the value of IMMEDIATE_PREFIX. %O runs ASM_OUTPUT_OPCODE to transform what follows in the string. Also supported are %d, %i, %u, %x, %X, %o, %c, %s and %%. We handle alternate assembler dialects here, just like output_asm_insn. */ void asm_fprintf (FILE *file, const char *p, ...) { char buf[10]; char *q, c; va_list argptr; va_start (argptr, p); buf[0] = '%'; while ((c = *p++)) switch (c) { #ifdef ASSEMBLER_DIALECT case '{': { int i; /* If we want the first dialect, do nothing. Otherwise, skip DIALECT_NUMBER of strings ending with '|'. */ for (i = 0; i < dialect_number; i++) { while (*p && *p++ != '|') ; if (*p == '|') p++; } } break; case '|': /* Skip to close brace. */ while (*p && *p++ != '}') ; break; case '}': break; #endif case '%': c = *p++; q = &buf[1]; while (strchr ("-+ #0", c)) { *q++ = c; c = *p++; } while (ISDIGIT (c) || c == '.') { *q++ = c; c = *p++; } switch (c) { case '%': putc ('%', file); break; case 'd': case 'i': case 'u': case 'x': case 'X': case 'o': case 'c': *q++ = c; *q = 0; fprintf (file, buf, va_arg (argptr, int)); break; case 'w': /* This is a prefix to the 'd', 'i', 'u', 'x', 'X', and 'o' cases, but we do not check for those cases. It means that the value is a HOST_WIDE_INT, which may be either `long' or `long long'. */ memcpy (q, HOST_WIDE_INT_PRINT, strlen (HOST_WIDE_INT_PRINT)); q += strlen (HOST_WIDE_INT_PRINT); *q++ = *p++; *q = 0; fprintf (file, buf, va_arg (argptr, HOST_WIDE_INT)); break; case 'l': *q++ = c; #ifdef HAVE_LONG_LONG if (*p == 'l') { *q++ = *p++; *q++ = *p++; *q = 0; fprintf (file, buf, va_arg (argptr, long long)); } else #endif { *q++ = *p++; *q = 0; fprintf (file, buf, va_arg (argptr, long)); } break; case 's': *q++ = c; *q = 0; fprintf (file, buf, va_arg (argptr, char *)); break; case 'O': #ifdef ASM_OUTPUT_OPCODE ASM_OUTPUT_OPCODE (asm_out_file, p); #endif break; case 'R': #ifdef REGISTER_PREFIX fprintf (file, "%s", REGISTER_PREFIX); #endif break; case 'I': #ifdef IMMEDIATE_PREFIX fprintf (file, "%s", IMMEDIATE_PREFIX); #endif break; case 'L': #ifdef LOCAL_LABEL_PREFIX fprintf (file, "%s", LOCAL_LABEL_PREFIX); #endif break; case 'U': fputs (user_label_prefix, file); break; #ifdef ASM_FPRINTF_EXTENSIONS /* Uppercase letters are reserved for general use by asm_fprintf and so are not available to target specific code. In order to prevent the ASM_FPRINTF_EXTENSIONS macro from using them then, they are defined here. As they get turned into real extensions to asm_fprintf they should be removed from this list. */ case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'J': case 'K': case 'M': case 'N': case 'P': case 'Q': case 'S': case 'T': case 'V': case 'W': case 'Y': case 'Z': break; ASM_FPRINTF_EXTENSIONS (file, argptr, p) #endif default: abort (); } break; default: putc (c, file); } va_end (argptr); } /* Split up a CONST_DOUBLE or integer constant rtx into two rtx's for single words, storing in *FIRST the word that comes first in memory in the target and in *SECOND the other. */ void split_double (rtx value, rtx *first, rtx *second) { if (GET_CODE (value) == CONST_INT) { if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD)) { /* In this case the CONST_INT holds both target words. Extract the bits from it into two word-sized pieces. Sign extend each half to HOST_WIDE_INT. */ unsigned HOST_WIDE_INT low, high; unsigned HOST_WIDE_INT mask, sign_bit, sign_extend; /* Set sign_bit to the most significant bit of a word. */ sign_bit = 1; sign_bit <<= BITS_PER_WORD - 1; /* Set mask so that all bits of the word are set. We could have used 1 << BITS_PER_WORD instead of basing the calculation on sign_bit. However, on machines where HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a compiler warning, even though the code would never be executed. */ mask = sign_bit << 1; mask--; /* Set sign_extend as any remaining bits. */ sign_extend = ~mask; /* Pick the lower word and sign-extend it. */ low = INTVAL (value); low &= mask; if (low & sign_bit) low |= sign_extend; /* Pick the higher word, shifted to the least significant bits, and sign-extend it. */ high = INTVAL (value); high >>= BITS_PER_WORD - 1; high >>= 1; high &= mask; if (high & sign_bit) high |= sign_extend; /* Store the words in the target machine order. */ if (WORDS_BIG_ENDIAN) { *first = GEN_INT (high); *second = GEN_INT (low); } else { *first = GEN_INT (low); *second = GEN_INT (high); } } else { /* The rule for using CONST_INT for a wider mode is that we regard the value as signed. So sign-extend it. */ rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx); if (WORDS_BIG_ENDIAN) { *first = high; *second = value; } else { *first = value; *second = high; } } } else if (GET_CODE (value) != CONST_DOUBLE) { if (WORDS_BIG_ENDIAN) { *first = const0_rtx; *second = value; } else { *first = value; *second = const0_rtx; } } else if (GET_MODE (value) == VOIDmode /* This is the old way we did CONST_DOUBLE integers. */ || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT) { /* In an integer, the words are defined as most and least significant. So order them by the target's convention. */ if (WORDS_BIG_ENDIAN) { *first = GEN_INT (CONST_DOUBLE_HIGH (value)); *second = GEN_INT (CONST_DOUBLE_LOW (value)); } else { *first = GEN_INT (CONST_DOUBLE_LOW (value)); *second = GEN_INT (CONST_DOUBLE_HIGH (value)); } } else { REAL_VALUE_TYPE r; long l[2]; REAL_VALUE_FROM_CONST_DOUBLE (r, value); /* Note, this converts the REAL_VALUE_TYPE to the target's format, splits up the floating point double and outputs exactly 32 bits of it into each of l[0] and l[1] -- not necessarily BITS_PER_WORD bits. */ REAL_VALUE_TO_TARGET_DOUBLE (r, l); /* If 32 bits is an entire word for the target, but not for the host, then sign-extend on the host so that the number will look the same way on the host that it would on the target. See for instance simplify_unary_operation. The #if is needed to avoid compiler warnings. */ #if HOST_BITS_PER_LONG > 32 if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32) { if (l[0] & ((long) 1 << 31)) l[0] |= ((long) (-1) << 32); if (l[1] & ((long) 1 << 31)) l[1] |= ((long) (-1) << 32); } #endif *first = GEN_INT ((HOST_WIDE_INT) l[0]); *second = GEN_INT ((HOST_WIDE_INT) l[1]); } } /* Return nonzero if this function has no function calls. */ int leaf_function_p (void) { rtx insn; rtx link; if (current_function_profile || profile_arc_flag) return 0; for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) { if (GET_CODE (insn) == CALL_INSN && ! SIBLING_CALL_P (insn)) return 0; if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == CALL_INSN && ! SIBLING_CALL_P (XVECEXP (PATTERN (insn), 0, 0))) return 0; } for (link = current_function_epilogue_delay_list; link; link = XEXP (link, 1)) { insn = XEXP (link, 0); if (GET_CODE (insn) == CALL_INSN && ! SIBLING_CALL_P (insn)) return 0; if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == CALL_INSN && ! SIBLING_CALL_P (XVECEXP (PATTERN (insn), 0, 0))) return 0; } return 1; } /* Return 1 if branch is a forward branch. Uses insn_shuid array, so it works only in the final pass. May be used by output templates to customary add branch prediction hints. */ int final_forward_branch_p (rtx insn) { int insn_id, label_id; if (!uid_shuid) abort (); insn_id = INSN_SHUID (insn); label_id = INSN_SHUID (JUMP_LABEL (insn)); /* We've hit some insns that does not have id information available. */ if (!insn_id || !label_id) abort (); return insn_id < label_id; } /* On some machines, a function with no call insns can run faster if it doesn't create its own register window. When output, the leaf function should use only the "output" registers. Ordinarily, the function would be compiled to use the "input" registers to find its arguments; it is a candidate for leaf treatment if it uses only the "input" registers. Leaf function treatment means renumbering so the function uses the "output" registers instead. */ #ifdef LEAF_REGISTERS /* Return 1 if this function uses only the registers that can be safely renumbered. */ int only_leaf_regs_used (void) { int i; const char *const permitted_reg_in_leaf_functions = LEAF_REGISTERS; for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if ((regs_ever_live[i] || global_regs[i]) && ! permitted_reg_in_leaf_functions[i]) return 0; if (current_function_uses_pic_offset_table && pic_offset_table_rtx != 0 && REG_P (pic_offset_table_rtx) && ! permitted_reg_in_leaf_functions[REGNO (pic_offset_table_rtx)]) return 0; return 1; } /* Scan all instructions and renumber all registers into those available in leaf functions. */ static void leaf_renumber_regs (rtx first) { rtx insn; /* Renumber only the actual patterns. The reg-notes can contain frame pointer refs, and renumbering them could crash, and should not be needed. */ for (insn = first; insn; insn = NEXT_INSN (insn)) if (INSN_P (insn)) leaf_renumber_regs_insn (PATTERN (insn)); for (insn = current_function_epilogue_delay_list; insn; insn = XEXP (insn, 1)) if (INSN_P (XEXP (insn, 0))) leaf_renumber_regs_insn (PATTERN (XEXP (insn, 0))); } /* Scan IN_RTX and its subexpressions, and renumber all regs into those available in leaf functions. */ void leaf_renumber_regs_insn (rtx in_rtx) { int i, j; const char *format_ptr; if (in_rtx == 0) return; /* Renumber all input-registers into output-registers. renumbered_regs would be 1 for an output-register; they */ if (REG_P (in_rtx)) { int newreg; /* Don't renumber the same reg twice. */ if (in_rtx->used) return; newreg = REGNO (in_rtx); /* Don't try to renumber pseudo regs. It is possible for a pseudo reg to reach here as part of a REG_NOTE. */ if (newreg >= FIRST_PSEUDO_REGISTER) { in_rtx->used = 1; return; } newreg = LEAF_REG_REMAP (newreg); if (newreg < 0) abort (); regs_ever_live[REGNO (in_rtx)] = 0; regs_ever_live[newreg] = 1; REGNO (in_rtx) = newreg; in_rtx->used = 1; } if (INSN_P (in_rtx)) { /* Inside a SEQUENCE, we find insns. Renumber just the patterns of these insns, just as we do for the top-level insns. */ leaf_renumber_regs_insn (PATTERN (in_rtx)); return; } format_ptr = GET_RTX_FORMAT (GET_CODE (in_rtx)); for (i = 0; i < GET_RTX_LENGTH (GET_CODE (in_rtx)); i++) switch (*format_ptr++) { case 'e': leaf_renumber_regs_insn (XEXP (in_rtx, i)); break; case 'E': if (NULL != XVEC (in_rtx, i)) { for (j = 0; j < XVECLEN (in_rtx, i); j++) leaf_renumber_regs_insn (XVECEXP (in_rtx, i, j)); } break; case 'S': case 's': case '0': case 'i': case 'w': case 'n': case 'u': break; default: abort (); } } #endif /* When -gused is used, emit debug info for only used symbols. But in addition to the standard intercepted debug_hooks there are some direct calls into this file, i.e., dbxout_symbol, dbxout_parms, and dbxout_reg_params. Those routines may also be called from a higher level intercepted routine. So to prevent recording data for an inner call to one of these for an intercept, we maintain an intercept nesting counter (debug_nesting). We only save the intercepted arguments if the nesting is 1. */ int debug_nesting = 0; static tree *symbol_queue; int symbol_queue_index = 0; static int symbol_queue_size = 0; /* Generate the symbols for any queued up type symbols we encountered while generating the type info for some originally used symbol. This might generate additional entries in the queue. Only when the nesting depth goes to 0 is this routine called. */ void debug_flush_symbol_queue (void) { int i; /* Make sure that additionally queued items are not flushed prematurely. */ ++debug_nesting; for (i = 0; i < symbol_queue_index; ++i) { /* If we pushed queued symbols then such symbols are must be output no matter what anyone else says. Specifically, we need to make sure dbxout_symbol() thinks the symbol was used and also we need to override TYPE_DECL_SUPPRESS_DEBUG which may be set for outside reasons. */ int saved_tree_used = TREE_USED (symbol_queue[i]); int saved_suppress_debug = TYPE_DECL_SUPPRESS_DEBUG (symbol_queue[i]); TREE_USED (symbol_queue[i]) = 1; TYPE_DECL_SUPPRESS_DEBUG (symbol_queue[i]) = 0; #ifdef DBX_DEBUGGING_INFO dbxout_symbol (symbol_queue[i], 0); #endif TREE_USED (symbol_queue[i]) = saved_tree_used; TYPE_DECL_SUPPRESS_DEBUG (symbol_queue[i]) = saved_suppress_debug; } symbol_queue_index = 0; --debug_nesting; } /* Queue a type symbol needed as part of the definition of a decl symbol. These symbols are generated when debug_flush_symbol_queue() is called. */ void debug_queue_symbol (tree decl) { if (symbol_queue_index >= symbol_queue_size) { symbol_queue_size += 10; symbol_queue = xrealloc (symbol_queue, symbol_queue_size * sizeof (tree)); } symbol_queue[symbol_queue_index++] = decl; } /* Free symbol queue. */ void debug_free_queue (void) { if (symbol_queue) { free (symbol_queue); symbol_queue = NULL; symbol_queue_size = 0; } } /* Data flow analysis for GNU compiler. Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file contains the data flow analysis pass of the compiler. It computes data flow information which tells combine_instructions which insns to consider combining and controls register allocation. Additional data flow information that is too bulky to record is generated during the analysis, and is used at that time to create autoincrement and autodecrement addressing. The first step is dividing the function into basic blocks. find_basic_blocks does this. Then life_analysis determines where each register is live and where it is dead. ** find_basic_blocks ** find_basic_blocks divides the current function's rtl into basic blocks and constructs the CFG. The blocks are recorded in the basic_block_info array; the CFG exists in the edge structures referenced by the blocks. find_basic_blocks also finds any unreachable loops and deletes them. ** life_analysis ** life_analysis is called immediately after find_basic_blocks. It uses the basic block information to determine where each hard or pseudo register is live. ** live-register info ** The information about where each register is live is in two parts: the REG_NOTES of insns, and the vector basic_block->global_live_at_start. basic_block->global_live_at_start has an element for each basic block, and the element is a bit-vector with a bit for each hard or pseudo register. The bit is 1 if the register is live at the beginning of the basic block. Two types of elements can be added to an insn's REG_NOTES. A REG_DEAD note is added to an insn's REG_NOTES for any register that meets both of two conditions: The value in the register is not needed in subsequent insns and the insn does not replace the value in the register (in the case of multi-word hard registers, the value in each register must be replaced by the insn to avoid a REG_DEAD note). In the vast majority of cases, an object in a REG_DEAD note will be used somewhere in the insn. The (rare) exception to this is if an insn uses a multi-word hard register and only some of the registers are needed in subsequent insns. In that case, REG_DEAD notes will be provided for those hard registers that are not subsequently needed. Partial REG_DEAD notes of this type do not occur when an insn sets only some of the hard registers used in such a multi-word operand; omitting REG_DEAD notes for objects stored in an insn is optional and the desire to do so does not justify the complexity of the partial REG_DEAD notes. REG_UNUSED notes are added for each register that is set by the insn but is unused subsequently (if every register set by the insn is unused and the insn does not reference memory or have some other side-effect, the insn is deleted instead). If only part of a multi-word hard register is used in a subsequent insn, REG_UNUSED notes are made for the parts that will not be used. To determine which registers are live after any insn, one can start from the beginning of the basic block and scan insns, noting which registers are set by each insn and which die there. ** Other actions of life_analysis ** life_analysis sets up the LOG_LINKS fields of insns because the information needed to do so is readily available. life_analysis deletes insns whose only effect is to store a value that is never used. life_analysis notices cases where a reference to a register as a memory address can be combined with a preceding or following incrementation or decrementation of the register. The separate instruction to increment or decrement is deleted and the address is changed to a POST_INC or similar rtx. Each time an incrementing or decrementing address is created, a REG_INC element is added to the insn's REG_NOTES list. life_analysis fills in certain vectors containing information about register usage: REG_N_REFS, REG_N_DEATHS, REG_N_SETS, REG_LIVE_LENGTH, REG_N_CALLS_CROSSED and REG_BASIC_BLOCK. life_analysis sets current_function_sp_is_unchanging if the function doesn't modify the stack pointer. */ /* TODO: Split out from life_analysis: - local property discovery (bb->local_live, bb->local_set) - global property computation - log links creation - pre/post modify transformation */ #ifndef HAVE_epilogue #define HAVE_epilogue 0 #endif #ifndef HAVE_prologue #define HAVE_prologue 0 #endif #ifndef HAVE_sibcall_epilogue #define HAVE_sibcall_epilogue 0 #endif #ifndef EPILOGUE_USES #define EPILOGUE_USES(REGNO) 0 #endif #ifndef EH_USES #define EH_USES(REGNO) 0 #endif #ifdef HAVE_conditional_execution #ifndef REVERSE_CONDEXEC_PREDICATES_P #define REVERSE_CONDEXEC_PREDICATES_P(x, y) ((x) == reverse_condition (y)) #endif #endif /* Nonzero if the second flow pass has completed. */ int flow2_completed; /* Maximum register number used in this function, plus one. */ int max_regno; /* Indexed by n, giving various register information */ varray_type reg_n_info; /* Size of a regset for the current function, in (1) bytes and (2) elements. */ int regset_bytes; int regset_size; /* Regset of regs live when calls to `setjmp'-like functions happen. */ /* ??? Does this exist only for the setjmp-clobbered warning message? */ regset regs_live_at_setjmp; /* List made of EXPR_LIST rtx's which gives pairs of pseudo registers that have to go in the same hard reg. The first two regs in the list are a pair, and the next two are another pair, etc. */ rtx regs_may_share; /* Set of registers that may be eliminable. These are handled specially in updating regs_ever_live. */ static HARD_REG_SET elim_reg_set; /* Holds information for tracking conditional register life information. */ struct reg_cond_life_info { /* A boolean expression of conditions under which a register is dead. */ rtx condition; /* Conditions under which a register is dead at the basic block end. */ rtx orig_condition; /* A boolean expression of conditions under which a register has been stored into. */ rtx stores; /* ??? Could store mask of bytes that are dead, so that we could finally track lifetimes of multi-word registers accessed via subregs. */ }; /* For use in communicating between propagate_block and its subroutines. Holds all information needed to compute life and def-use information. */ struct propagate_block_info { /* The basic block we're considering. */ basic_block bb; /* Bit N is set if register N is conditionally or unconditionally live. */ regset reg_live; /* Bit N is set if register N is set this insn. */ regset new_set; /* Element N is the next insn that uses (hard or pseudo) register N within the current basic block; or zero, if there is no such insn. */ rtx *reg_next_use; /* Contains a list of all the MEMs we are tracking for dead store elimination. */ rtx mem_set_list; /* If non-null, record the set of registers set unconditionally in the basic block. */ regset local_set; /* If non-null, record the set of registers set conditionally in the basic block. */ regset cond_local_set; #ifdef HAVE_conditional_execution /* Indexed by register number, holds a reg_cond_life_info for each register that is not unconditionally live or dead. */ splay_tree reg_cond_dead; /* Bit N is set if register N is in an expression in reg_cond_dead. */ regset reg_cond_reg; #endif /* The length of mem_set_list. */ int mem_set_list_len; /* Nonzero if the value of CC0 is live. */ int cc0_live; /* Flags controlling the set of information propagate_block collects. */ int flags; /* Index of instruction being processed. */ int insn_num; }; /* Number of dead insns removed. */ static int ndead; /* When PROP_REG_INFO set, array contains pbi->insn_num of instruction where given register died. When the register is marked alive, we use the information to compute amount of instructions life range cross. (remember, we are walking backward). This can be computed as current pbi->insn_num - reg_deaths[regno]. At the end of processing each basic block, the remaining live registers are inspected and liferanges are increased same way so liverange of global registers are computed correctly. The array is maintained clear for dead registers, so it can be safely reused for next basic block without expensive memset of the whole array after reseting pbi->insn_num to 0. */ static int *reg_deaths; /* Maximum length of pbi->mem_set_list before we start dropping new elements on the floor. */ #define MAX_MEM_SET_LIST_LEN 100 /* Forward declarations */ static int verify_wide_reg_1 (rtx *, void *); static void verify_wide_reg (int, basic_block); static void verify_local_live_at_start (regset, basic_block); static void notice_stack_pointer_modification_1 (rtx, rtx, void *); static void notice_stack_pointer_modification (void); static void mark_reg_flow (rtx, void *); static void mark_regs_live_at_end (regset); static void calculate_global_regs_live (sbitmap, sbitmap, int); static void propagate_block_delete_insn (rtx); static rtx propagate_block_delete_libcall (rtx, rtx); static int insn_dead_p (struct propagate_block_info *, rtx, int, rtx); static int libcall_dead_p (struct propagate_block_info *, rtx, rtx); static void mark_set_regs_flow (struct propagate_block_info *, rtx, rtx); static void mark_set_1 (struct propagate_block_info *, enum rtx_code, rtx, rtx, rtx, int); static int find_regno_partial (rtx *, void *); #ifdef HAVE_conditional_execution static int mark_regno_cond_dead (struct propagate_block_info *, int, rtx); static void free_reg_cond_life_info (splay_tree_value); static int flush_reg_cond_reg_1 (splay_tree_node, void *); static void flush_reg_cond_reg (struct propagate_block_info *, int); static rtx elim_reg_cond (rtx, unsigned int); static rtx ior_reg_cond (rtx, rtx, int); static rtx not_reg_cond (rtx); static rtx and_reg_cond (rtx, rtx, int); #endif #ifdef AUTO_INC_DEC static void attempt_auto_inc (struct propagate_block_info *, rtx, rtx, rtx, rtx, rtx); static void find_auto_inc (struct propagate_block_info *, rtx, rtx); static int try_pre_increment_1 (struct propagate_block_info *, rtx); static int try_pre_increment (rtx, rtx, HOST_WIDE_INT); #endif static void mark_used_reg (struct propagate_block_info *, rtx, rtx, rtx); static void mark_used_regs (struct propagate_block_info *, rtx, rtx, rtx); void debug_flow_info (void); static void add_to_mem_set_list (struct propagate_block_info *, rtx); static int invalidate_mems_from_autoinc (rtx *, void *); static void invalidate_mems_from_set (struct propagate_block_info *, rtx); static void clear_log_links (sbitmap); static int count_or_remove_death_notes_bb (basic_block, int); /* Return the INSN immediately following the NOTE_INSN_BASIC_BLOCK note associated with the BLOCK. */ rtx first_insn_after_basic_block_note (basic_block block) { rtx insn; /* Get the first instruction in the block. */ insn = BB_HEAD (block); if (insn == NULL_RTX) return NULL_RTX; if (GET_CODE (insn) == CODE_LABEL) insn = NEXT_INSN (insn); if (!NOTE_INSN_BASIC_BLOCK_P (insn)) abort (); return NEXT_INSN (insn); } /* Perform data flow analysis for the whole control flow graph. FLAGS is a set of PROP_* flags to be used in accumulating flow info. */ void life_analysis (FILE *file, int flags) { #ifdef ELIMINABLE_REGS int i; static const struct {const int from, to; } eliminables[] = ELIMINABLE_REGS; #endif /* Record which registers will be eliminated. We use this in mark_used_regs. */ CLEAR_HARD_REG_SET (elim_reg_set); #ifdef ELIMINABLE_REGS for (i = 0; i < (int) ARRAY_SIZE (eliminables); i++) SET_HARD_REG_BIT (elim_reg_set, eliminables[i].from); #else SET_HARD_REG_BIT (elim_reg_set, FRAME_POINTER_REGNUM); #endif #ifdef CANNOT_CHANGE_MODE_CLASS if (flags & PROP_REG_INFO) bitmap_initialize (&subregs_of_mode, 1); #endif if (! optimize) flags &= ~(PROP_LOG_LINKS | PROP_AUTOINC | PROP_ALLOW_CFG_CHANGES); /* The post-reload life analysis have (on a global basis) the same registers live as was computed by reload itself. elimination Otherwise offsets and such may be incorrect. Reload will make some registers as live even though they do not appear in the rtl. We don't want to create new auto-incs after reload, since they are unlikely to be useful and can cause problems with shared stack slots. */ if (reload_completed) flags &= ~(PROP_REG_INFO | PROP_AUTOINC); /* We want alias analysis information for local dead store elimination. */ if (optimize && (flags & PROP_SCAN_DEAD_STORES)) init_alias_analysis (); /* Always remove no-op moves. Do this before other processing so that we don't have to keep re-scanning them. */ delete_noop_moves (); /* Some targets can emit simpler epilogues if they know that sp was not ever modified during the function. After reload, of course, we've already emitted the epilogue so there's no sense searching. */ if (! reload_completed) notice_stack_pointer_modification (); /* Allocate and zero out data structures that will record the data from lifetime analysis. */ allocate_reg_life_data (); allocate_bb_life_data (); /* Find the set of registers live on function exit. */ mark_regs_live_at_end (EXIT_BLOCK_PTR->global_live_at_start); /* "Update" life info from zero. It'd be nice to begin the relaxation with just the exit and noreturn blocks, but that set is not immediately handy. */ if (flags & PROP_REG_INFO) { memset (regs_ever_live, 0, sizeof (regs_ever_live)); memset (regs_asm_clobbered, 0, sizeof (regs_asm_clobbered)); } update_life_info (NULL, UPDATE_LIFE_GLOBAL, flags); if (reg_deaths) { free (reg_deaths); reg_deaths = NULL; } /* Clean up. */ if (optimize && (flags & PROP_SCAN_DEAD_STORES)) end_alias_analysis (); if (file) dump_flow_info (file); /* Removing dead insns should have made jumptables really dead. */ delete_dead_jumptables (); } /* A subroutine of verify_wide_reg, called through for_each_rtx. Search for REGNO. If found, return 2 if it is not wider than word_mode. */ static int verify_wide_reg_1 (rtx *px, void *pregno) { rtx x = *px; unsigned int regno = *(int *) pregno; if (REG_P (x) && REGNO (x) == regno) { if (GET_MODE_BITSIZE (GET_MODE (x)) <= BITS_PER_WORD) return 2; return 1; } return 0; } /* A subroutine of verify_local_live_at_start. Search through insns of BB looking for register REGNO. */ static void verify_wide_reg (int regno, basic_block bb) { rtx head = BB_HEAD (bb), end = BB_END (bb); while (1) { if (INSN_P (head)) { int r = for_each_rtx (&PATTERN (head), verify_wide_reg_1, ®no); if (r == 1) return; if (r == 2) break; } if (head == end) break; head = NEXT_INSN (head); } if (dump_file) { fprintf (dump_file, "Register %d died unexpectedly.\n", regno); dump_bb (bb, dump_file, 0); } abort (); } /* A subroutine of update_life_info. Verify that there are no untoward changes in live_at_start during a local update. */ static void verify_local_live_at_start (regset new_live_at_start, basic_block bb) { if (reload_completed) { /* After reload, there are no pseudos, nor subregs of multi-word registers. The regsets should exactly match. */ if (! REG_SET_EQUAL_P (new_live_at_start, bb->global_live_at_start)) { if (dump_file) { fprintf (dump_file, "live_at_start mismatch in bb %d, aborting\nNew:\n", bb->index); debug_bitmap_file (dump_file, new_live_at_start); fputs ("Old:\n", dump_file); dump_bb (bb, dump_file, 0); } abort (); } } else { int i; /* Find the set of changed registers. */ XOR_REG_SET (new_live_at_start, bb->global_live_at_start); EXECUTE_IF_SET_IN_REG_SET (new_live_at_start, 0, i, { /* No registers should die. */ if (REGNO_REG_SET_P (bb->global_live_at_start, i)) { if (dump_file) { fprintf (dump_file, "Register %d died unexpectedly.\n", i); dump_bb (bb, dump_file, 0); } abort (); } /* Verify that the now-live register is wider than word_mode. */ verify_wide_reg (i, bb); }); } } /* Updates life information starting with the basic blocks set in BLOCKS. If BLOCKS is null, consider it to be the universal set. If EXTENT is UPDATE_LIFE_LOCAL, such as after splitting or peepholing, we are only expecting local modifications to basic blocks. If we find extra registers live at the beginning of a block, then we either killed useful data, or we have a broken split that wants data not provided. If we find registers removed from live_at_start, that means we have a broken peephole that is killing a register it shouldn't. ??? This is not true in one situation -- when a pre-reload splitter generates subregs of a multi-word pseudo, current life analysis will lose the kill. So we _can_ have a pseudo go live. How irritating. It is also not true when a peephole decides that it doesn't need one or more of the inputs. Including PROP_REG_INFO does not properly refresh regs_ever_live unless the caller resets it to zero. */ int update_life_info (sbitmap blocks, enum update_life_extent extent, int prop_flags) { regset tmp; regset_head tmp_head; int i; int stabilized_prop_flags = prop_flags; basic_block bb; tmp = INITIALIZE_REG_SET (tmp_head); ndead = 0; if ((prop_flags & PROP_REG_INFO) && !reg_deaths) reg_deaths = xcalloc (sizeof (*reg_deaths), max_regno); timevar_push ((extent == UPDATE_LIFE_LOCAL || blocks) ? TV_LIFE_UPDATE : TV_LIFE); /* Changes to the CFG are only allowed when doing a global update for the entire CFG. */ if ((prop_flags & PROP_ALLOW_CFG_CHANGES) && (extent == UPDATE_LIFE_LOCAL || blocks)) abort (); /* For a global update, we go through the relaxation process again. */ if (extent != UPDATE_LIFE_LOCAL) { for ( ; ; ) { int changed = 0; calculate_global_regs_live (blocks, blocks, prop_flags & (PROP_SCAN_DEAD_CODE | PROP_SCAN_DEAD_STORES | PROP_ALLOW_CFG_CHANGES)); if ((prop_flags & (PROP_KILL_DEAD_CODE | PROP_ALLOW_CFG_CHANGES)) != (PROP_KILL_DEAD_CODE | PROP_ALLOW_CFG_CHANGES)) break; /* Removing dead code may allow the CFG to be simplified which in turn may allow for further dead code detection / removal. */ FOR_EACH_BB_REVERSE (bb) { COPY_REG_SET (tmp, bb->global_live_at_end); changed |= propagate_block (bb, tmp, NULL, NULL, prop_flags & (PROP_SCAN_DEAD_CODE | PROP_SCAN_DEAD_STORES | PROP_KILL_DEAD_CODE)); } /* Don't pass PROP_SCAN_DEAD_CODE or PROP_KILL_DEAD_CODE to subsequent propagate_block calls, since removing or acting as removing dead code can affect global register liveness, which is supposed to be finalized for this call after this loop. */ stabilized_prop_flags &= ~(PROP_SCAN_DEAD_CODE | PROP_SCAN_DEAD_STORES | PROP_KILL_DEAD_CODE); if (! changed) break; /* We repeat regardless of what cleanup_cfg says. If there were instructions deleted above, that might have been only a partial improvement (see MAX_MEM_SET_LIST_LEN usage). Further improvement may be possible. */ cleanup_cfg (CLEANUP_EXPENSIVE); /* Zap the life information from the last round. If we don't do this, we can wind up with registers that no longer appear in the code being marked live at entry. */ FOR_EACH_BB (bb) { CLEAR_REG_SET (bb->global_live_at_start); CLEAR_REG_SET (bb->global_live_at_end); } } /* If asked, remove notes from the blocks we'll update. */ if (extent == UPDATE_LIFE_GLOBAL_RM_NOTES) count_or_remove_death_notes (blocks, 1); } /* Clear log links in case we are asked to (re)compute them. */ if (prop_flags & PROP_LOG_LINKS) clear_log_links (blocks); if (blocks) { EXECUTE_IF_SET_IN_SBITMAP (blocks, 0, i, { bb = BASIC_BLOCK (i); COPY_REG_SET (tmp, bb->global_live_at_end); propagate_block (bb, tmp, NULL, NULL, stabilized_prop_flags); if (extent == UPDATE_LIFE_LOCAL) verify_local_live_at_start (tmp, bb); }); } else { FOR_EACH_BB_REVERSE (bb) { COPY_REG_SET (tmp, bb->global_live_at_end); propagate_block (bb, tmp, NULL, NULL, stabilized_prop_flags); if (extent == UPDATE_LIFE_LOCAL) verify_local_live_at_start (tmp, bb); } } FREE_REG_SET (tmp); if (prop_flags & PROP_REG_INFO) { /* The only pseudos that are live at the beginning of the function are those that were not set anywhere in the function. local-alloc doesn't know how to handle these correctly, so mark them as not local to any one basic block. */ EXECUTE_IF_SET_IN_REG_SET (ENTRY_BLOCK_PTR->global_live_at_end, FIRST_PSEUDO_REGISTER, i, { REG_BASIC_BLOCK (i) = REG_BLOCK_GLOBAL; }); /* We have a problem with any pseudoreg that lives across the setjmp. ANSI says that if a user variable does not change in value between the setjmp and the longjmp, then the longjmp preserves it. This includes longjmp from a place where the pseudo appears dead. (In principle, the value still exists if it is in scope.) If the pseudo goes in a hard reg, some other value may occupy that hard reg where this pseudo is dead, thus clobbering the pseudo. Conclusion: such a pseudo must not go in a hard reg. */ EXECUTE_IF_SET_IN_REG_SET (regs_live_at_setjmp, FIRST_PSEUDO_REGISTER, i, { if (regno_reg_rtx[i] != 0) { REG_LIVE_LENGTH (i) = -1; REG_BASIC_BLOCK (i) = REG_BLOCK_UNKNOWN; } }); } if (reg_deaths) { free (reg_deaths); reg_deaths = NULL; } timevar_pop ((extent == UPDATE_LIFE_LOCAL || blocks) ? TV_LIFE_UPDATE : TV_LIFE); if (ndead && dump_file) fprintf (dump_file, "deleted %i dead insns\n", ndead); return ndead; } /* Update life information in all blocks where BB_DIRTY is set. */ int update_life_info_in_dirty_blocks (enum update_life_extent extent, int prop_flags) { sbitmap update_life_blocks = sbitmap_alloc (last_basic_block); int n = 0; basic_block bb; int retval = 0; sbitmap_zero (update_life_blocks); FOR_EACH_BB (bb) { if (extent == UPDATE_LIFE_LOCAL) { if (bb->flags & BB_DIRTY) { SET_BIT (update_life_blocks, bb->index); n++; } } else { /* ??? Bootstrap with -march=pentium4 fails to terminate with only a partial life update. */ SET_BIT (update_life_blocks, bb->index); if (bb->flags & BB_DIRTY) n++; } } if (n) retval = update_life_info (update_life_blocks, extent, prop_flags); sbitmap_free (update_life_blocks); return retval; } /* Free the variables allocated by find_basic_blocks. */ void free_basic_block_vars (void) { if (basic_block_info) { clear_edges (); basic_block_info = NULL; } n_basic_blocks = 0; last_basic_block = 0; ENTRY_BLOCK_PTR->aux = NULL; ENTRY_BLOCK_PTR->global_live_at_end = NULL; EXIT_BLOCK_PTR->aux = NULL; EXIT_BLOCK_PTR->global_live_at_start = NULL; } /* Delete any insns that copy a register to itself. */ int delete_noop_moves (void) { rtx insn, next; basic_block bb; int nnoops = 0; FOR_EACH_BB (bb) { for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next) { next = NEXT_INSN (insn); if (INSN_P (insn) && noop_move_p (insn)) { rtx note; /* If we're about to remove the first insn of a libcall then move the libcall note to the next real insn and update the retval note. */ if ((note = find_reg_note (insn, REG_LIBCALL, NULL_RTX)) && XEXP (note, 0) != insn) { rtx new_libcall_insn = next_real_insn (insn); rtx retval_note = find_reg_note (XEXP (note, 0), REG_RETVAL, NULL_RTX); REG_NOTES (new_libcall_insn) = gen_rtx_INSN_LIST (REG_LIBCALL, XEXP (note, 0), REG_NOTES (new_libcall_insn)); XEXP (retval_note, 0) = new_libcall_insn; } delete_insn_and_edges (insn); nnoops++; } } } if (nnoops && dump_file) fprintf (dump_file, "deleted %i noop moves", nnoops); return nnoops; } /* Delete any jump tables never referenced. We can't delete them at the time of removing tablejump insn as they are referenced by the preceding insns computing the destination, so we delay deleting and garbagecollect them once life information is computed. */ void delete_dead_jumptables (void) { rtx insn, next; for (insn = get_insns (); insn; insn = next) { next = NEXT_INSN (insn); if (GET_CODE (insn) == CODE_LABEL && LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn) && GET_CODE (next) == JUMP_INSN && (GET_CODE (PATTERN (next)) == ADDR_VEC || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC)) { if (dump_file) fprintf (dump_file, "Dead jumptable %i removed\n", INSN_UID (insn)); delete_insn (NEXT_INSN (insn)); delete_insn (insn); next = NEXT_INSN (next); } } } /* Determine if the stack pointer is constant over the life of the function. Only useful before prologues have been emitted. */ static void notice_stack_pointer_modification_1 (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED) { if (x == stack_pointer_rtx /* The stack pointer is only modified indirectly as the result of a push until later in flow. See the comments in rtl.texi regarding Embedded Side-Effects on Addresses. */ || (MEM_P (x) && GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == RTX_AUTOINC && XEXP (XEXP (x, 0), 0) == stack_pointer_rtx)) current_function_sp_is_unchanging = 0; } static void notice_stack_pointer_modification (void) { basic_block bb; rtx insn; /* Assume that the stack pointer is unchanging if alloca hasn't been used. */ current_function_sp_is_unchanging = !current_function_calls_alloca; if (! current_function_sp_is_unchanging) return; FOR_EACH_BB (bb) FOR_BB_INSNS (bb, insn) { if (INSN_P (insn)) { /* Check if insn modifies the stack pointer. */ note_stores (PATTERN (insn), notice_stack_pointer_modification_1, NULL); if (! current_function_sp_is_unchanging) return; } } } /* Mark a register in SET. Hard registers in large modes get all of their component registers set as well. */ static void mark_reg_flow (rtx reg, void *xset) { regset set = (regset) xset; int regno = REGNO (reg); if (GET_MODE (reg) == BLKmode) abort (); SET_REGNO_REG_SET (set, regno); if (regno < FIRST_PSEUDO_REGISTER) { int n = hard_regno_nregs[regno][GET_MODE (reg)]; while (--n > 0) SET_REGNO_REG_SET (set, regno + n); } } /* Mark those regs which are needed at the end of the function as live at the end of the last basic block. */ static void mark_regs_live_at_end (regset set) { unsigned int i; /* If exiting needs the right stack value, consider the stack pointer live at the end of the function. */ if ((HAVE_epilogue && epilogue_completed) || ! EXIT_IGNORE_STACK || (! FRAME_POINTER_REQUIRED && ! current_function_calls_alloca && flag_omit_frame_pointer) || current_function_sp_is_unchanging) { SET_REGNO_REG_SET (set, STACK_POINTER_REGNUM); } /* Mark the frame pointer if needed at the end of the function. If we end up eliminating it, it will be removed from the live list of each basic block by reload. */ if (! reload_completed || frame_pointer_needed) { SET_REGNO_REG_SET (set, FRAME_POINTER_REGNUM); #if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM /* If they are different, also mark the hard frame pointer as live. */ if (! LOCAL_REGNO (HARD_FRAME_POINTER_REGNUM)) SET_REGNO_REG_SET (set, HARD_FRAME_POINTER_REGNUM); #endif } #ifndef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED /* Many architectures have a GP register even without flag_pic. Assume the pic register is not in use, or will be handled by other means, if it is not fixed. */ if ((unsigned) PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM && fixed_regs[PIC_OFFSET_TABLE_REGNUM]) SET_REGNO_REG_SET (set, PIC_OFFSET_TABLE_REGNUM); #endif /* Mark all global registers, and all registers used by the epilogue as being live at the end of the function since they may be referenced by our caller. */ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (global_regs[i] || EPILOGUE_USES (i)) SET_REGNO_REG_SET (set, i); if (HAVE_epilogue && epilogue_completed) { /* Mark all call-saved registers that we actually used. */ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (regs_ever_live[i] && ! LOCAL_REGNO (i) && ! TEST_HARD_REG_BIT (regs_invalidated_by_call, i)) SET_REGNO_REG_SET (set, i); } #ifdef EH_RETURN_DATA_REGNO /* Mark the registers that will contain data for the handler. */ if (reload_completed && current_function_calls_eh_return) for (i = 0; ; ++i) { unsigned regno = EH_RETURN_DATA_REGNO(i); if (regno == INVALID_REGNUM) break; SET_REGNO_REG_SET (set, regno); } #endif #ifdef EH_RETURN_STACKADJ_RTX if ((! HAVE_epilogue || ! epilogue_completed) && current_function_calls_eh_return) { rtx tmp = EH_RETURN_STACKADJ_RTX; if (tmp && REG_P (tmp)) mark_reg_flow (tmp, set); } #endif #ifdef EH_RETURN_HANDLER_RTX if ((! HAVE_epilogue || ! epilogue_completed) && current_function_calls_eh_return) { rtx tmp = EH_RETURN_HANDLER_RTX; if (tmp && REG_P (tmp)) mark_reg_flow (tmp, set); } #endif /* Mark function return value. */ diddle_return_value (mark_reg_flow, set); } /* Propagate global life info around the graph of basic blocks. Begin considering blocks with their corresponding bit set in BLOCKS_IN. If BLOCKS_IN is null, consider it the universal set. BLOCKS_OUT is set for every block that was changed. */ static void calculate_global_regs_live (sbitmap blocks_in, sbitmap blocks_out, int flags) { basic_block *queue, *qhead, *qtail, *qend, bb; regset tmp, new_live_at_end, invalidated_by_call; regset_head tmp_head, invalidated_by_call_head; regset_head new_live_at_end_head; int i; /* Some passes used to forget clear aux field of basic block causing sick behavior here. */ #ifdef ENABLE_CHECKING FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) if (bb->aux) abort (); #endif tmp = INITIALIZE_REG_SET (tmp_head); new_live_at_end = INITIALIZE_REG_SET (new_live_at_end_head); invalidated_by_call = INITIALIZE_REG_SET (invalidated_by_call_head); /* Inconveniently, this is only readily available in hard reg set form. */ for (i = 0; i < FIRST_PSEUDO_REGISTER; ++i) if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)) SET_REGNO_REG_SET (invalidated_by_call, i); /* Create a worklist. Allocate an extra slot for ENTRY_BLOCK, and one because the `head == tail' style test for an empty queue doesn't work with a full queue. */ queue = xmalloc ((n_basic_blocks + 2) * sizeof (*queue)); qtail = queue; qhead = qend = queue + n_basic_blocks + 2; /* Queue the blocks set in the initial mask. Do this in reverse block number order so that we are more likely for the first round to do useful work. We use AUX non-null to flag that the block is queued. */ if (blocks_in) { FOR_EACH_BB (bb) if (TEST_BIT (blocks_in, bb->index)) { *--qhead = bb; bb->aux = bb; } } else { FOR_EACH_BB (bb) { *--qhead = bb; bb->aux = bb; } } /* We clean aux when we remove the initially-enqueued bbs, but we don't enqueue ENTRY and EXIT initially, so clean them upfront and unconditionally. */ ENTRY_BLOCK_PTR->aux = EXIT_BLOCK_PTR->aux = NULL; if (blocks_out) sbitmap_zero (blocks_out); /* We work through the queue until there are no more blocks. What is live at the end of this block is precisely the union of what is live at the beginning of all its successors. So, we set its GLOBAL_LIVE_AT_END field based on the GLOBAL_LIVE_AT_START field for its successors. Then, we compute GLOBAL_LIVE_AT_START for this block by walking through the instructions in this block in reverse order and updating as we go. If that changed GLOBAL_LIVE_AT_START, we add the predecessors of the block to the queue; they will now need to recalculate GLOBAL_LIVE_AT_END. We are guaranteed to terminate, because GLOBAL_LIVE_AT_START never shrinks. If a register appears in GLOBAL_LIVE_AT_START, it must either be live at the end of the block, or used within the block. In the latter case, it will certainly never disappear from GLOBAL_LIVE_AT_START. In the former case, the register could go away only if it disappeared from GLOBAL_LIVE_AT_START for one of the successor blocks. By induction, that cannot occur. */ while (qhead != qtail) { int rescan, changed; basic_block bb; edge e; bb = *qhead++; if (qhead == qend) qhead = queue; bb->aux = NULL; /* Begin by propagating live_at_start from the successor blocks. */ CLEAR_REG_SET (new_live_at_end); if (bb->succ) for (e = bb->succ; e; e = e->succ_next) { basic_block sb = e->dest; /* Call-clobbered registers die across exception and call edges. */ /* ??? Abnormal call edges ignored for the moment, as this gets confused by sibling call edges, which crashes reg-stack. */ if (e->flags & EDGE_EH) { bitmap_operation (tmp, sb->global_live_at_start, invalidated_by_call, BITMAP_AND_COMPL); IOR_REG_SET (new_live_at_end, tmp); } else IOR_REG_SET (new_live_at_end, sb->global_live_at_start); /* If a target saves one register in another (instead of on the stack) the save register will need to be live for EH. */ if (e->flags & EDGE_EH) for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (EH_USES (i)) SET_REGNO_REG_SET (new_live_at_end, i); } else { /* This might be a noreturn function that throws. And even if it isn't, getting the unwind info right helps debugging. */ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (EH_USES (i)) SET_REGNO_REG_SET (new_live_at_end, i); } /* The all-important stack pointer must always be live. */ SET_REGNO_REG_SET (new_live_at_end, STACK_POINTER_REGNUM); /* Before reload, there are a few registers that must be forced live everywhere -- which might not already be the case for blocks within infinite loops. */ if (! reload_completed) { /* Any reference to any pseudo before reload is a potential reference of the frame pointer. */ SET_REGNO_REG_SET (new_live_at_end, FRAME_POINTER_REGNUM); #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM /* Pseudos with argument area equivalences may require reloading via the argument pointer. */ if (fixed_regs[ARG_POINTER_REGNUM]) SET_REGNO_REG_SET (new_live_at_end, ARG_POINTER_REGNUM); #endif /* Any constant, or pseudo with constant equivalences, may require reloading from memory using the pic register. */ if ((unsigned) PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM && fixed_regs[PIC_OFFSET_TABLE_REGNUM]) SET_REGNO_REG_SET (new_live_at_end, PIC_OFFSET_TABLE_REGNUM); } if (bb == ENTRY_BLOCK_PTR) { COPY_REG_SET (bb->global_live_at_end, new_live_at_end); continue; } /* On our first pass through this block, we'll go ahead and continue. Recognize first pass by local_set NULL. On subsequent passes, we get to skip out early if live_at_end wouldn't have changed. */ if (bb->local_set == NULL) { bb->local_set = OBSTACK_ALLOC_REG_SET (&flow_obstack); bb->cond_local_set = OBSTACK_ALLOC_REG_SET (&flow_obstack); rescan = 1; } else { /* If any bits were removed from live_at_end, we'll have to rescan the block. This wouldn't be necessary if we had precalculated local_live, however with PROP_SCAN_DEAD_CODE local_live is really dependent on live_at_end. */ CLEAR_REG_SET (tmp); rescan = bitmap_operation (tmp, bb->global_live_at_end, new_live_at_end, BITMAP_AND_COMPL); if (! rescan) { /* If any of the registers in the new live_at_end set are conditionally set in this basic block, we must rescan. This is because conditional lifetimes at the end of the block do not just take the live_at_end set into account, but also the liveness at the start of each successor block. We can miss changes in those sets if we only compare the new live_at_end against the previous one. */ CLEAR_REG_SET (tmp); rescan = bitmap_operation (tmp, new_live_at_end, bb->cond_local_set, BITMAP_AND); } if (! rescan) { /* Find the set of changed bits. Take this opportunity to notice that this set is empty and early out. */ CLEAR_REG_SET (tmp); changed = bitmap_operation (tmp, bb->global_live_at_end, new_live_at_end, BITMAP_XOR); if (! changed) continue; /* If any of the changed bits overlap with local_set, we'll have to rescan the block. Detect overlap by the AND with ~local_set turning off bits. */ rescan = bitmap_operation (tmp, tmp, bb->local_set, BITMAP_AND_COMPL); } } /* Let our caller know that BB changed enough to require its death notes updated. */ if (blocks_out) SET_BIT (blocks_out, bb->index); if (! rescan) { /* Add to live_at_start the set of all registers in new_live_at_end that aren't in the old live_at_end. */ bitmap_operation (tmp, new_live_at_end, bb->global_live_at_end, BITMAP_AND_COMPL); COPY_REG_SET (bb->global_live_at_end, new_live_at_end); changed = bitmap_operation (bb->global_live_at_start, bb->global_live_at_start, tmp, BITMAP_IOR); if (! changed) continue; } else { COPY_REG_SET (bb->global_live_at_end, new_live_at_end); /* Rescan the block insn by insn to turn (a copy of) live_at_end into live_at_start. */ propagate_block (bb, new_live_at_end, bb->local_set, bb->cond_local_set, flags); /* If live_at start didn't change, no need to go farther. */ if (REG_SET_EQUAL_P (bb->global_live_at_start, new_live_at_end)) continue; COPY_REG_SET (bb->global_live_at_start, new_live_at_end); } /* Queue all predecessors of BB so that we may re-examine their live_at_end. */ for (e = bb->pred; e; e = e->pred_next) { basic_block pb = e->src; if (pb->aux == NULL) { *qtail++ = pb; if (qtail == qend) qtail = queue; pb->aux = pb; } } } FREE_REG_SET (tmp); FREE_REG_SET (new_live_at_end); FREE_REG_SET (invalidated_by_call); if (blocks_out) { EXECUTE_IF_SET_IN_SBITMAP (blocks_out, 0, i, { basic_block bb = BASIC_BLOCK (i); FREE_REG_SET (bb->local_set); FREE_REG_SET (bb->cond_local_set); }); } else { FOR_EACH_BB (bb) { FREE_REG_SET (bb->local_set); FREE_REG_SET (bb->cond_local_set); } } free (queue); } /* This structure is used to pass parameters to and from the the function find_regno_partial(). It is used to pass in the register number we are looking, as well as to return any rtx we find. */ typedef struct { unsigned regno_to_find; rtx retval; } find_regno_partial_param; /* Find the rtx for the reg numbers specified in 'data' if it is part of an expression which only uses part of the register. Return it in the structure passed in. */ static int find_regno_partial (rtx *ptr, void *data) { find_regno_partial_param *param = (find_regno_partial_param *)data; unsigned reg = param->regno_to_find; param->retval = NULL_RTX; if (*ptr == NULL_RTX) return 0; switch (GET_CODE (*ptr)) { case ZERO_EXTRACT: case SIGN_EXTRACT: case STRICT_LOW_PART: if (REG_P (XEXP (*ptr, 0)) && REGNO (XEXP (*ptr, 0)) == reg) { param->retval = XEXP (*ptr, 0); return 1; } break; case SUBREG: if (REG_P (SUBREG_REG (*ptr)) && REGNO (SUBREG_REG (*ptr)) == reg) { param->retval = SUBREG_REG (*ptr); return 1; } break; default: break; } return 0; } /* Process all immediate successors of the entry block looking for pseudo registers which are live on entry. Find all of those whose first instance is a partial register reference of some kind, and initialize them to 0 after the entry block. This will prevent bit sets within registers whose value is unknown, and may contain some kind of sticky bits we don't want. */ int initialize_uninitialized_subregs (void) { rtx insn; edge e; int reg, did_something = 0; find_regno_partial_param param; for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) { basic_block bb = e->dest; regset map = bb->global_live_at_start; EXECUTE_IF_SET_IN_REG_SET (map, FIRST_PSEUDO_REGISTER, reg, { int uid = REGNO_FIRST_UID (reg); rtx i; /* Find an insn which mentions the register we are looking for. Its preferable to have an instance of the register's rtl since there may be various flags set which we need to duplicate. If we can't find it, its probably an automatic whose initial value doesn't matter, or hopefully something we don't care about. */ for (i = get_insns (); i && INSN_UID (i) != uid; i = NEXT_INSN (i)) ; if (i != NULL_RTX) { /* Found the insn, now get the REG rtx, if we can. */ param.regno_to_find = reg; for_each_rtx (&i, find_regno_partial, ¶m); if (param.retval != NULL_RTX) { start_sequence (); emit_move_insn (param.retval, CONST0_RTX (GET_MODE (param.retval))); insn = get_insns (); end_sequence (); insert_insn_on_edge (insn, e); did_something = 1; } } }); } if (did_something) commit_edge_insertions (); return did_something; } /* Subroutines of life analysis. */ /* Allocate the permanent data structures that represent the results of life analysis. Not static since used also for stupid life analysis. */ void allocate_bb_life_data (void) { basic_block bb; FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) { bb->global_live_at_start = OBSTACK_ALLOC_REG_SET (&flow_obstack); bb->global_live_at_end = OBSTACK_ALLOC_REG_SET (&flow_obstack); } regs_live_at_setjmp = OBSTACK_ALLOC_REG_SET (&flow_obstack); } void allocate_reg_life_data (void) { int i; max_regno = max_reg_num (); if (reg_deaths) abort (); reg_deaths = xcalloc (sizeof (*reg_deaths), max_regno); /* Recalculate the register space, in case it has grown. Old style vector oriented regsets would set regset_{size,bytes} here also. */ allocate_reg_info (max_regno, FALSE, FALSE); /* Reset all the data we'll collect in propagate_block and its subroutines. */ for (i = 0; i < max_regno; i++) { REG_N_SETS (i) = 0; REG_N_REFS (i) = 0; REG_N_DEATHS (i) = 0; REG_N_CALLS_CROSSED (i) = 0; REG_LIVE_LENGTH (i) = 0; REG_FREQ (i) = 0; REG_BASIC_BLOCK (i) = REG_BLOCK_UNKNOWN; } } /* Delete dead instructions for propagate_block. */ static void propagate_block_delete_insn (rtx insn) { rtx inote = find_reg_note (insn, REG_LABEL, NULL_RTX); /* If the insn referred to a label, and that label was attached to an ADDR_VEC, it's safe to delete the ADDR_VEC. In fact, it's pretty much mandatory to delete it, because the ADDR_VEC may be referencing labels that no longer exist. INSN may reference a deleted label, particularly when a jump table has been optimized into a direct jump. There's no real good way to fix up the reference to the deleted label when the label is deleted, so we just allow it here. */ if (inote && GET_CODE (inote) == CODE_LABEL) { rtx label = XEXP (inote, 0); rtx next; /* The label may be forced if it has been put in the constant pool. If that is the only use we must discard the table jump following it, but not the label itself. */ if (LABEL_NUSES (label) == 1 + LABEL_PRESERVE_P (label) && (next = next_nonnote_insn (label)) != NULL && GET_CODE (next) == JUMP_INSN && (GET_CODE (PATTERN (next)) == ADDR_VEC || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC)) { rtx pat = PATTERN (next); int diff_vec_p = GET_CODE (pat) == ADDR_DIFF_VEC; int len = XVECLEN (pat, diff_vec_p); int i; for (i = 0; i < len; i++) LABEL_NUSES (XEXP (XVECEXP (pat, diff_vec_p, i), 0))--; delete_insn_and_edges (next); ndead++; } } delete_insn_and_edges (insn); ndead++; } /* Delete dead libcalls for propagate_block. Return the insn before the libcall. */ static rtx propagate_block_delete_libcall (rtx insn, rtx note) { rtx first = XEXP (note, 0); rtx before = PREV_INSN (first); delete_insn_chain_and_edges (first, insn); ndead++; return before; } /* Update the life-status of regs for one insn. Return the previous insn. */ rtx propagate_one_insn (struct propagate_block_info *pbi, rtx insn) { rtx prev = PREV_INSN (insn); int flags = pbi->flags; int insn_is_dead = 0; int libcall_is_dead = 0; rtx note; int i; if (! INSN_P (insn)) return prev; note = find_reg_note (insn, REG_RETVAL, NULL_RTX); if (flags & PROP_SCAN_DEAD_CODE) { insn_is_dead = insn_dead_p (pbi, PATTERN (insn), 0, REG_NOTES (insn)); libcall_is_dead = (insn_is_dead && note != 0 && libcall_dead_p (pbi, note, insn)); } /* If an instruction consists of just dead store(s) on final pass, delete it. */ if ((flags & PROP_KILL_DEAD_CODE) && insn_is_dead) { /* If we're trying to delete a prologue or epilogue instruction that isn't flagged as possibly being dead, something is wrong. But if we are keeping the stack pointer depressed, we might well be deleting insns that are used to compute the amount to update it by, so they are fine. */ if (reload_completed && !(TREE_CODE (TREE_TYPE (current_function_decl)) == FUNCTION_TYPE && (TYPE_RETURNS_STACK_DEPRESSED (TREE_TYPE (current_function_decl)))) && (((HAVE_epilogue || HAVE_prologue) && prologue_epilogue_contains (insn)) || (HAVE_sibcall_epilogue && sibcall_epilogue_contains (insn))) && find_reg_note (insn, REG_MAYBE_DEAD, NULL_RTX) == 0) fatal_insn ("Attempt to delete prologue/epilogue insn:", insn); /* Record sets. Do this even for dead instructions, since they would have killed the values if they hadn't been deleted. */ mark_set_regs_flow (pbi, PATTERN (insn), insn); /* CC0 is now known to be dead. Either this insn used it, in which case it doesn't anymore, or clobbered it, so the next insn can't use it. */ pbi->cc0_live = 0; if (libcall_is_dead) prev = propagate_block_delete_libcall ( insn, note); else { /* If INSN contains a RETVAL note and is dead, but the libcall as a whole is not dead, then we want to remove INSN, but not the whole libcall sequence. However, we need to also remove the dangling REG_LIBCALL note so that we do not have mis-matched LIBCALL/RETVAL notes. In theory we could find a new location for the REG_RETVAL note, but it hardly seems worth the effort. NOTE at this point will be the RETVAL note if it exists. */ if (note) { rtx libcall_note; libcall_note = find_reg_note (XEXP (note, 0), REG_LIBCALL, NULL_RTX); remove_note (XEXP (note, 0), libcall_note); } /* Similarly if INSN contains a LIBCALL note, remove the dangling REG_RETVAL note. */ note = find_reg_note (insn, REG_LIBCALL, NULL_RTX); if (note) { rtx retval_note; retval_note = find_reg_note (XEXP (note, 0), REG_RETVAL, NULL_RTX); remove_note (XEXP (note, 0), retval_note); } /* Now delete INSN. */ propagate_block_delete_insn (insn); } return prev; } /* See if this is an increment or decrement that can be merged into a following memory address. */ #ifdef AUTO_INC_DEC { rtx x = single_set (insn); /* Does this instruction increment or decrement a register? */ if ((flags & PROP_AUTOINC) && x != 0 && REG_P (SET_DEST (x)) && (GET_CODE (SET_SRC (x)) == PLUS || GET_CODE (SET_SRC (x)) == MINUS) && XEXP (SET_SRC (x), 0) == SET_DEST (x) && GET_CODE (XEXP (SET_SRC (x), 1)) == CONST_INT /* Ok, look for a following memory ref we can combine with. If one is found, change the memory ref to a PRE_INC or PRE_DEC, cancel this insn, and return 1. Return 0 if nothing has been done. */ && try_pre_increment_1 (pbi, insn)) return prev; } #endif /* AUTO_INC_DEC */ CLEAR_REG_SET (pbi->new_set); /* If this is not the final pass, and this insn is copying the value of a library call and it's dead, don't scan the insns that perform the library call, so that the call's arguments are not marked live. */ if (libcall_is_dead) { /* Record the death of the dest reg. */ mark_set_regs_flow (pbi, PATTERN (insn), insn); insn = XEXP (note, 0); return PREV_INSN (insn); } else if (GET_CODE (PATTERN (insn)) == SET && SET_DEST (PATTERN (insn)) == stack_pointer_rtx && GET_CODE (SET_SRC (PATTERN (insn))) == PLUS && XEXP (SET_SRC (PATTERN (insn)), 0) == stack_pointer_rtx && GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 1)) == CONST_INT) { /* We have an insn to pop a constant amount off the stack. (Such insns use PLUS regardless of the direction of the stack, and any insn to adjust the stack by a constant is always a pop or part of a push.) These insns, if not dead stores, have no effect on life, though they do have an effect on the memory stores we are tracking. */ invalidate_mems_from_set (pbi, stack_pointer_rtx); /* Still, we need to update local_set, lest ifcvt.c:dead_or_predicable concludes that the stack pointer is not modified. */ mark_set_regs_flow (pbi, PATTERN (insn), insn); } else { rtx note; /* Any regs live at the time of a call instruction must not go in a register clobbered by calls. Find all regs now live and record this for them. */ if (GET_CODE (insn) == CALL_INSN && (flags & PROP_REG_INFO)) EXECUTE_IF_SET_IN_REG_SET (pbi->reg_live, 0, i, { REG_N_CALLS_CROSSED (i)++; }); /* Record sets. Do this even for dead instructions, since they would have killed the values if they hadn't been deleted. */ mark_set_regs_flow (pbi, PATTERN (insn), insn); if (GET_CODE (insn) == CALL_INSN) { regset live_at_end; bool sibcall_p; rtx note, cond; int i; cond = NULL_RTX; if (GET_CODE (PATTERN (insn)) == COND_EXEC) cond = COND_EXEC_TEST (PATTERN (insn)); /* Non-constant calls clobber memory, constant calls do not clobber memory, though they may clobber outgoing arguments on the stack. */ if (! CONST_OR_PURE_CALL_P (insn)) { free_EXPR_LIST_list (&pbi->mem_set_list); pbi->mem_set_list_len = 0; } else invalidate_mems_from_set (pbi, stack_pointer_rtx); /* There may be extra registers to be clobbered. */ for (note = CALL_INSN_FUNCTION_USAGE (insn); note; note = XEXP (note, 1)) if (GET_CODE (XEXP (note, 0)) == CLOBBER) mark_set_1 (pbi, CLOBBER, XEXP (XEXP (note, 0), 0), cond, insn, pbi->flags); /* Calls change all call-used and global registers; sibcalls do not clobber anything that must be preserved at end-of-function, except for return values. */ sibcall_p = SIBLING_CALL_P (insn); live_at_end = EXIT_BLOCK_PTR->global_live_at_start; for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i) && ! (sibcall_p && REGNO_REG_SET_P (live_at_end, i) && ! refers_to_regno_p (i, i+1, current_function_return_rtx, (rtx *) 0))) { enum rtx_code code = global_regs[i] ? SET : CLOBBER; /* We do not want REG_UNUSED notes for these registers. */ mark_set_1 (pbi, code, regno_reg_rtx[i], cond, insn, pbi->flags & ~(PROP_DEATH_NOTES | PROP_REG_INFO)); } } /* If an insn doesn't use CC0, it becomes dead since we assume that every insn clobbers it. So show it dead here; mark_used_regs will set it live if it is referenced. */ pbi->cc0_live = 0; /* Record uses. */ if (! insn_is_dead) mark_used_regs (pbi, PATTERN (insn), NULL_RTX, insn); if ((flags & PROP_EQUAL_NOTES) && ((note = find_reg_note (insn, REG_EQUAL, NULL_RTX)) || (note = find_reg_note (insn, REG_EQUIV, NULL_RTX)))) mark_used_regs (pbi, XEXP (note, 0), NULL_RTX, insn); /* Sometimes we may have inserted something before INSN (such as a move) when we make an auto-inc. So ensure we will scan those insns. */ #ifdef AUTO_INC_DEC prev = PREV_INSN (insn); #endif if (! insn_is_dead && GET_CODE (insn) == CALL_INSN) { int i; rtx note, cond; cond = NULL_RTX; if (GET_CODE (PATTERN (insn)) == COND_EXEC) cond = COND_EXEC_TEST (PATTERN (insn)); /* Calls use their arguments, and may clobber memory which address involves some register. */ for (note = CALL_INSN_FUNCTION_USAGE (insn); note; note = XEXP (note, 1)) /* We find USE or CLOBBER entities in a FUNCTION_USAGE list: both of which mark_used_regs knows how to handle. */ mark_used_regs (pbi, XEXP (XEXP (note, 0), 0), cond, insn); /* The stack ptr is used (honorarily) by a CALL insn. */ if ((flags & PROP_REG_INFO) && !REGNO_REG_SET_P (pbi->reg_live, STACK_POINTER_REGNUM)) reg_deaths[STACK_POINTER_REGNUM] = pbi->insn_num; SET_REGNO_REG_SET (pbi->reg_live, STACK_POINTER_REGNUM); /* Calls may also reference any of the global registers, so they are made live. */ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (global_regs[i]) mark_used_reg (pbi, regno_reg_rtx[i], cond, insn); } } pbi->insn_num++; return prev; } /* Initialize a propagate_block_info struct for public consumption. Note that the structure itself is opaque to this file, but that the user can use the regsets provided here. */ struct propagate_block_info * init_propagate_block_info (basic_block bb, regset live, regset local_set, regset cond_local_set, int flags) { struct propagate_block_info *pbi = xmalloc (sizeof (*pbi)); pbi->bb = bb; pbi->reg_live = live; pbi->mem_set_list = NULL_RTX; pbi->mem_set_list_len = 0; pbi->local_set = local_set; pbi->cond_local_set = cond_local_set; pbi->cc0_live = 0; pbi->flags = flags; pbi->insn_num = 0; if (flags & (PROP_LOG_LINKS | PROP_AUTOINC)) pbi->reg_next_use = xcalloc (max_reg_num (), sizeof (rtx)); else pbi->reg_next_use = NULL; pbi->new_set = BITMAP_XMALLOC (); #ifdef HAVE_conditional_execution pbi->reg_cond_dead = splay_tree_new (splay_tree_compare_ints, NULL, free_reg_cond_life_info); pbi->reg_cond_reg = BITMAP_XMALLOC (); /* If this block ends in a conditional branch, for each register live from one side of the branch and not the other, record the register as conditionally dead. */ if (GET_CODE (BB_END (bb)) == JUMP_INSN && any_condjump_p (BB_END (bb))) { regset_head diff_head; regset diff = INITIALIZE_REG_SET (diff_head); basic_block bb_true, bb_false; int i; /* Identify the successor blocks. */ bb_true = bb->succ->dest; if (bb->succ->succ_next != NULL) { bb_false = bb->succ->succ_next->dest; if (bb->succ->flags & EDGE_FALLTHRU) { basic_block t = bb_false; bb_false = bb_true; bb_true = t; } else if (! (bb->succ->succ_next->flags & EDGE_FALLTHRU)) abort (); } else { /* This can happen with a conditional jump to the next insn. */ if (JUMP_LABEL (BB_END (bb)) != BB_HEAD (bb_true)) abort (); /* Simplest way to do nothing. */ bb_false = bb_true; } /* Compute which register lead different lives in the successors. */ if (bitmap_operation (diff, bb_true->global_live_at_start, bb_false->global_live_at_start, BITMAP_XOR)) { /* Extract the condition from the branch. */ rtx set_src = SET_SRC (pc_set (BB_END (bb))); rtx cond_true = XEXP (set_src, 0); rtx reg = XEXP (cond_true, 0); if (GET_CODE (reg) == SUBREG) reg = SUBREG_REG (reg); /* We can only track conditional lifetimes if the condition is in the form of a comparison of a register against zero. If the condition is more complex than that, then it is safe not to record any information. */ if (REG_P (reg) && XEXP (cond_true, 1) == const0_rtx) { rtx cond_false = gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond_true)), GET_MODE (cond_true), XEXP (cond_true, 0), XEXP (cond_true, 1)); if (GET_CODE (XEXP (set_src, 1)) == PC) { rtx t = cond_false; cond_false = cond_true; cond_true = t; } SET_REGNO_REG_SET (pbi->reg_cond_reg, REGNO (reg)); /* For each such register, mark it conditionally dead. */ EXECUTE_IF_SET_IN_REG_SET (diff, 0, i, { struct reg_cond_life_info *rcli; rtx cond; rcli = xmalloc (sizeof (*rcli)); if (REGNO_REG_SET_P (bb_true->global_live_at_start, i)) cond = cond_false; else cond = cond_true; rcli->condition = cond; rcli->stores = const0_rtx; rcli->orig_condition = cond; splay_tree_insert (pbi->reg_cond_dead, i, (splay_tree_value) rcli); }); } } FREE_REG_SET (diff); } #endif /* If this block has no successors, any stores to the frame that aren't used later in the block are dead. So make a pass over the block recording any such that are made and show them dead at the end. We do a very conservative and simple job here. */ if (optimize && ! (TREE_CODE (TREE_TYPE (current_function_decl)) == FUNCTION_TYPE && (TYPE_RETURNS_STACK_DEPRESSED (TREE_TYPE (current_function_decl)))) && (flags & PROP_SCAN_DEAD_STORES) && (bb->succ == NULL || (bb->succ->succ_next == NULL && bb->succ->dest == EXIT_BLOCK_PTR && ! current_function_calls_eh_return))) { rtx insn, set; for (insn = BB_END (bb); insn != BB_HEAD (bb); insn = PREV_INSN (insn)) if (GET_CODE (insn) == INSN && (set = single_set (insn)) && MEM_P (SET_DEST (set))) { rtx mem = SET_DEST (set); rtx canon_mem = canon_rtx (mem); if (XEXP (canon_mem, 0) == frame_pointer_rtx || (GET_CODE (XEXP (canon_mem, 0)) == PLUS && XEXP (XEXP (canon_mem, 0), 0) == frame_pointer_rtx && GET_CODE (XEXP (XEXP (canon_mem, 0), 1)) == CONST_INT)) add_to_mem_set_list (pbi, canon_mem); } } return pbi; } /* Release a propagate_block_info struct. */ void free_propagate_block_info (struct propagate_block_info *pbi) { free_EXPR_LIST_list (&pbi->mem_set_list); BITMAP_XFREE (pbi->new_set); #ifdef HAVE_conditional_execution splay_tree_delete (pbi->reg_cond_dead); BITMAP_XFREE (pbi->reg_cond_reg); #endif if (pbi->flags & PROP_REG_INFO) { int num = pbi->insn_num; int i; EXECUTE_IF_SET_IN_REG_SET (pbi->reg_live, 0, i, { REG_LIVE_LENGTH (i) += num - reg_deaths[i]; reg_deaths[i] = 0; }); } if (pbi->reg_next_use) free (pbi->reg_next_use); free (pbi); } /* Compute the registers live at the beginning of a basic block BB from those live at the end. When called, REG_LIVE contains those live at the end. On return, it contains those live at the beginning. LOCAL_SET, if non-null, will be set with all registers killed unconditionally by this basic block. Likewise, COND_LOCAL_SET, if non-null, will be set with all registers killed conditionally by this basic block. If there is any unconditional set of a register, then the corresponding bit will be set in LOCAL_SET and cleared in COND_LOCAL_SET. It is valid for LOCAL_SET and COND_LOCAL_SET to be the same set. In this case, the resulting set will be equal to the union of the two sets that would otherwise be computed. Return nonzero if an INSN is deleted (i.e. by dead code removal). */ int propagate_block (basic_block bb, regset live, regset local_set, regset cond_local_set, int flags) { struct propagate_block_info *pbi; rtx insn, prev; int changed; pbi = init_propagate_block_info (bb, live, local_set, cond_local_set, flags); if (flags & PROP_REG_INFO) { int i; /* Process the regs live at the end of the block. Mark them as not local to any one basic block. */ EXECUTE_IF_SET_IN_REG_SET (live, 0, i, { REG_BASIC_BLOCK (i) = REG_BLOCK_GLOBAL; }); } /* Scan the block an insn at a time from end to beginning. */ changed = 0; for (insn = BB_END (bb); ; insn = prev) { /* If this is a call to `setjmp' et al, warn if any non-volatile datum is live. */ if ((flags & PROP_REG_INFO) && GET_CODE (insn) == CALL_INSN && find_reg_note (insn, REG_SETJMP, NULL)) IOR_REG_SET (regs_live_at_setjmp, pbi->reg_live); prev = propagate_one_insn (pbi, insn); if (!prev) changed |= insn != get_insns (); else changed |= NEXT_INSN (prev) != insn; if (insn == BB_HEAD (bb)) break; } free_propagate_block_info (pbi); return changed; } /* Return 1 if X (the body of an insn, or part of it) is just dead stores (SET expressions whose destinations are registers dead after the insn). NEEDED is the regset that says which regs are alive after the insn. Unless CALL_OK is nonzero, an insn is needed if it contains a CALL. If X is the entire body of an insn, NOTES contains the reg notes pertaining to the insn. */ static int insn_dead_p (struct propagate_block_info *pbi, rtx x, int call_ok, rtx notes ATTRIBUTE_UNUSED) { enum rtx_code code = GET_CODE (x); /* Don't eliminate insns that may trap. */ if (flag_non_call_exceptions && may_trap_p (x)) return 0; #ifdef AUTO_INC_DEC /* As flow is invoked after combine, we must take existing AUTO_INC expressions into account. */ for (; notes; notes = XEXP (notes, 1)) { if (REG_NOTE_KIND (notes) == REG_INC) { int regno = REGNO (XEXP (notes, 0)); /* Don't delete insns to set global regs. */ if ((regno < FIRST_PSEUDO_REGISTER && global_regs[regno]) || REGNO_REG_SET_P (pbi->reg_live, regno)) return 0; } } #endif /* If setting something that's a reg or part of one, see if that register's altered value will be live. */ if (code == SET) { rtx r = SET_DEST (x); #ifdef HAVE_cc0 if (GET_CODE (r) == CC0) return ! pbi->cc0_live; #endif /* A SET that is a subroutine call cannot be dead. */ if (GET_CODE (SET_SRC (x)) == CALL) { if (! call_ok) return 0; } /* Don't eliminate loads from volatile memory or volatile asms. */ else if (volatile_refs_p (SET_SRC (x))) return 0; if (MEM_P (r)) { rtx temp, canon_r; if (MEM_VOLATILE_P (r) || GET_MODE (r) == BLKmode) return 0; canon_r = canon_rtx (r); /* Walk the set of memory locations we are currently tracking and see if one is an identical match to this memory location. If so, this memory write is dead (remember, we're walking backwards from the end of the block to the start). Since rtx_equal_p does not check the alias set or flags, we also must have the potential for them to conflict (anti_dependence). */ for (temp = pbi->mem_set_list; temp != 0; temp = XEXP (temp, 1)) if (unchanging_anti_dependence (r, XEXP (temp, 0))) { rtx mem = XEXP (temp, 0); if (rtx_equal_p (XEXP (canon_r, 0), XEXP (mem, 0)) && (GET_MODE_SIZE (GET_MODE (canon_r)) <= GET_MODE_SIZE (GET_MODE (mem)))) return 1; #ifdef AUTO_INC_DEC /* Check if memory reference matches an auto increment. Only post increment/decrement or modify are valid. */ if (GET_MODE (mem) == GET_MODE (r) && (GET_CODE (XEXP (mem, 0)) == POST_DEC || GET_CODE (XEXP (mem, 0)) == POST_INC || GET_CODE (XEXP (mem, 0)) == POST_MODIFY) && GET_MODE (XEXP (mem, 0)) == GET_MODE (r) && rtx_equal_p (XEXP (XEXP (mem, 0), 0), XEXP (r, 0))) return 1; #endif } } else { while (GET_CODE (r) == SUBREG || GET_CODE (r) == STRICT_LOW_PART || GET_CODE (r) == ZERO_EXTRACT) r = XEXP (r, 0); if (REG_P (r)) { int regno = REGNO (r); /* Obvious. */ if (REGNO_REG_SET_P (pbi->reg_live, regno)) return 0; /* If this is a hard register, verify that subsequent words are not needed. */ if (regno < FIRST_PSEUDO_REGISTER) { int n = hard_regno_nregs[regno][GET_MODE (r)]; while (--n > 0) if (REGNO_REG_SET_P (pbi->reg_live, regno+n)) return 0; } /* Don't delete insns to set global regs. */ if (regno < FIRST_PSEUDO_REGISTER && global_regs[regno]) return 0; /* Make sure insns to set the stack pointer aren't deleted. */ if (regno == STACK_POINTER_REGNUM) return 0; /* ??? These bits might be redundant with the force live bits in calculate_global_regs_live. We would delete from sequential sets; whether this actually affects real code for anything but the stack pointer I don't know. */ /* Make sure insns to set the frame pointer aren't deleted. */ if (regno == FRAME_POINTER_REGNUM && (! reload_completed || frame_pointer_needed)) return 0; #if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM if (regno == HARD_FRAME_POINTER_REGNUM && (! reload_completed || frame_pointer_needed)) return 0; #endif #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM /* Make sure insns to set arg pointer are never deleted (if the arg pointer isn't fixed, there will be a USE for it, so we can treat it normally). */ if (regno == ARG_POINTER_REGNUM && fixed_regs[regno]) return 0; #endif /* Otherwise, the set is dead. */ return 1; } } } /* If performing several activities, insn is dead if each activity is individually dead. Also, CLOBBERs and USEs can be ignored; a CLOBBER or USE that's inside a PARALLEL doesn't make the insn worth keeping. */ else if (code == PARALLEL) { int i = XVECLEN (x, 0); for (i--; i >= 0; i--) if (GET_CODE (XVECEXP (x, 0, i)) != CLOBBER && GET_CODE (XVECEXP (x, 0, i)) != USE && ! insn_dead_p (pbi, XVECEXP (x, 0, i), call_ok, NULL_RTX)) return 0; return 1; } /* A CLOBBER of a pseudo-register that is dead serves no purpose. That is not necessarily true for hard registers until after reload. */ else if (code == CLOBBER) { if (REG_P (XEXP (x, 0)) && (REGNO (XEXP (x, 0)) >= FIRST_PSEUDO_REGISTER || reload_completed) && ! REGNO_REG_SET_P (pbi->reg_live, REGNO (XEXP (x, 0)))) return 1; } /* ??? A base USE is a historical relic. It ought not be needed anymore. Instances where it is still used are either (1) temporary and the USE escaped the pass, (2) cruft and the USE need not be emitted anymore, or (3) hiding bugs elsewhere that are not properly representing data flow. */ return 0; } /* If INSN is the last insn in a libcall, and assuming INSN is dead, return 1 if the entire library call is dead. This is true if INSN copies a register (hard or pseudo) and if the hard return reg of the call insn is dead. (The caller should have tested the destination of the SET inside INSN already for death.) If this insn doesn't just copy a register, then we don't have an ordinary libcall. In that case, cse could not have managed to substitute the source for the dest later on, so we can assume the libcall is dead. PBI is the block info giving pseudoregs live before this insn. NOTE is the REG_RETVAL note of the insn. */ static int libcall_dead_p (struct propagate_block_info *pbi, rtx note, rtx insn) { rtx x = single_set (insn); if (x) { rtx r = SET_SRC (x); if (REG_P (r)) { rtx call = XEXP (note, 0); rtx call_pat; int i; /* Find the call insn. */ while (call != insn && GET_CODE (call) != CALL_INSN) call = NEXT_INSN (call); /* If there is none, do nothing special, since ordinary death handling can understand these insns. */ if (call == insn) return 0; /* See if the hard reg holding the value is dead. If this is a PARALLEL, find the call within it. */ call_pat = PATTERN (call); if (GET_CODE (call_pat) == PARALLEL) { for (i = XVECLEN (call_pat, 0) - 1; i >= 0; i--) if (GET_CODE (XVECEXP (call_pat, 0, i)) == SET && GET_CODE (SET_SRC (XVECEXP (call_pat, 0, i))) == CALL) break; /* This may be a library call that is returning a value via invisible pointer. Do nothing special, since ordinary death handling can understand these insns. */ if (i < 0) return 0; call_pat = XVECEXP (call_pat, 0, i); } return insn_dead_p (pbi, call_pat, 1, REG_NOTES (call)); } } return 1; } /* 1 if register REGNO was alive at a place where `setjmp' was called and was set more than once or is an argument. Such regs may be clobbered by `longjmp'. */ int regno_clobbered_at_setjmp (int regno) { if (n_basic_blocks == 0) return 0; return ((REG_N_SETS (regno) > 1 || REGNO_REG_SET_P (ENTRY_BLOCK_PTR->global_live_at_end, regno)) && REGNO_REG_SET_P (regs_live_at_setjmp, regno)); } /* Add MEM to PBI->MEM_SET_LIST. MEM should be canonical. Respect the maximal list size; look for overlaps in mode and select the largest. */ static void add_to_mem_set_list (struct propagate_block_info *pbi, rtx mem) { rtx i; /* We don't know how large a BLKmode store is, so we must not take them into consideration. */ if (GET_MODE (mem) == BLKmode) return; for (i = pbi->mem_set_list; i ; i = XEXP (i, 1)) { rtx e = XEXP (i, 0); if (rtx_equal_p (XEXP (mem, 0), XEXP (e, 0))) { if (GET_MODE_SIZE (GET_MODE (mem)) > GET_MODE_SIZE (GET_MODE (e))) { #ifdef AUTO_INC_DEC /* If we must store a copy of the mem, we can just modify the mode of the stored copy. */ if (pbi->flags & PROP_AUTOINC) PUT_MODE (e, GET_MODE (mem)); else #endif XEXP (i, 0) = mem; } return; } } if (pbi->mem_set_list_len < MAX_MEM_SET_LIST_LEN) { #ifdef AUTO_INC_DEC /* Store a copy of mem, otherwise the address may be scrogged by find_auto_inc. */ if (pbi->flags & PROP_AUTOINC) mem = shallow_copy_rtx (mem); #endif pbi->mem_set_list = alloc_EXPR_LIST (0, mem, pbi->mem_set_list); pbi->mem_set_list_len++; } } /* INSN references memory, possibly using autoincrement addressing modes. Find any entries on the mem_set_list that need to be invalidated due to an address change. */ static int invalidate_mems_from_autoinc (rtx *px, void *data) { rtx x = *px; struct propagate_block_info *pbi = data; if (GET_RTX_CLASS (GET_CODE (x)) == RTX_AUTOINC) { invalidate_mems_from_set (pbi, XEXP (x, 0)); return -1; } return 0; } /* EXP is a REG. Remove any dependent entries from pbi->mem_set_list. */ static void invalidate_mems_from_set (struct propagate_block_info *pbi, rtx exp) { rtx temp = pbi->mem_set_list; rtx prev = NULL_RTX; rtx next; while (temp) { next = XEXP (temp, 1); if (reg_overlap_mentioned_p (exp, XEXP (temp, 0))) { /* Splice this entry out of the list. */ if (prev) XEXP (prev, 1) = next; else pbi->mem_set_list = next; free_EXPR_LIST_node (temp); pbi->mem_set_list_len--; } else prev = temp; temp = next; } } /* Process the registers that are set within X. Their bits are set to 1 in the regset DEAD, because they are dead prior to this insn. If INSN is nonzero, it is the insn being processed. FLAGS is the set of operations to perform. */ static void mark_set_regs_flow (struct propagate_block_info *pbi, rtx x, rtx insn) { rtx cond = NULL_RTX; rtx link; enum rtx_code code; int flags = pbi->flags; if (insn) for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) { if (REG_NOTE_KIND (link) == REG_INC) mark_set_1 (pbi, SET, XEXP (link, 0), (GET_CODE (x) == COND_EXEC ? COND_EXEC_TEST (x) : NULL_RTX), insn, flags); } retry: switch (code = GET_CODE (x)) { case SET: if (GET_CODE (XEXP (x, 1)) == ASM_OPERANDS) flags |= PROP_ASM_SCAN; /* Fall through */ case CLOBBER: mark_set_1 (pbi, code, SET_DEST (x), cond, insn, flags); return; case COND_EXEC: cond = COND_EXEC_TEST (x); x = COND_EXEC_CODE (x); goto retry; case PARALLEL: { int i; /* We must scan forwards. If we have an asm, we need to set the PROP_ASM_SCAN flag before scanning the clobbers. */ for (i = 0; i < XVECLEN (x, 0); i++) { rtx sub = XVECEXP (x, 0, i); switch (code = GET_CODE (sub)) { case COND_EXEC: if (cond != NULL_RTX) abort (); cond = COND_EXEC_TEST (sub); sub = COND_EXEC_CODE (sub); if (GET_CODE (sub) == SET) goto mark_set; if (GET_CODE (sub) == CLOBBER) goto mark_clob; break; case SET: mark_set: if (GET_CODE (XEXP (sub, 1)) == ASM_OPERANDS) flags |= PROP_ASM_SCAN; /* Fall through */ case CLOBBER: mark_clob: mark_set_1 (pbi, code, SET_DEST (sub), cond, insn, flags); break; case ASM_OPERANDS: flags |= PROP_ASM_SCAN; break; default: break; } } break; } default: break; } } /* Process a single set, which appears in INSN. REG (which may not actually be a REG, it may also be a SUBREG, PARALLEL, etc.) is being set using the CODE (which may be SET, CLOBBER, or COND_EXEC). If the set is conditional (because it appear in a COND_EXEC), COND will be the condition. */ static void mark_set_1 (struct propagate_block_info *pbi, enum rtx_code code, rtx reg, rtx cond, rtx insn, int flags) { int regno_first = -1, regno_last = -1; unsigned long not_dead = 0; int i; /* Modifying just one hardware register of a multi-reg value or just a byte field of a register does not mean the value from before this insn is now dead. Of course, if it was dead after it's unused now. */ switch (GET_CODE (reg)) { case PARALLEL: /* Some targets place small structures in registers for return values of functions. We have to detect this case specially here to get correct flow information. */ for (i = XVECLEN (reg, 0) - 1; i >= 0; i--) if (XEXP (XVECEXP (reg, 0, i), 0) != 0) mark_set_1 (pbi, code, XEXP (XVECEXP (reg, 0, i), 0), cond, insn, flags); return; case ZERO_EXTRACT: case SIGN_EXTRACT: case STRICT_LOW_PART: /* ??? Assumes STRICT_LOW_PART not used on multi-word registers. */ do reg = XEXP (reg, 0); while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT || GET_CODE (reg) == SIGN_EXTRACT || GET_CODE (reg) == STRICT_LOW_PART); if (MEM_P (reg)) break; not_dead = (unsigned long) REGNO_REG_SET_P (pbi->reg_live, REGNO (reg)); /* Fall through. */ case REG: regno_last = regno_first = REGNO (reg); if (regno_first < FIRST_PSEUDO_REGISTER) regno_last += hard_regno_nregs[regno_first][GET_MODE (reg)] - 1; break; case SUBREG: if (REG_P (SUBREG_REG (reg))) { enum machine_mode outer_mode = GET_MODE (reg); enum machine_mode inner_mode = GET_MODE (SUBREG_REG (reg)); /* Identify the range of registers affected. This is moderately tricky for hard registers. See alter_subreg. */ regno_last = regno_first = REGNO (SUBREG_REG (reg)); if (regno_first < FIRST_PSEUDO_REGISTER) { regno_first += subreg_regno_offset (regno_first, inner_mode, SUBREG_BYTE (reg), outer_mode); regno_last = (regno_first + hard_regno_nregs[regno_first][outer_mode] - 1); /* Since we've just adjusted the register number ranges, make sure REG matches. Otherwise some_was_live will be clear when it shouldn't have been, and we'll create incorrect REG_UNUSED notes. */ reg = gen_rtx_REG (outer_mode, regno_first); } else { /* If the number of words in the subreg is less than the number of words in the full register, we have a well-defined partial set. Otherwise the high bits are undefined. This is only really applicable to pseudos, since we just took care of multi-word hard registers. */ if (((GET_MODE_SIZE (outer_mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD) < ((GET_MODE_SIZE (inner_mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)) not_dead = (unsigned long) REGNO_REG_SET_P (pbi->reg_live, regno_first); reg = SUBREG_REG (reg); } } else reg = SUBREG_REG (reg); break; default: break; } /* If this set is a MEM, then it kills any aliased writes. If this set is a REG, then it kills any MEMs which use the reg. */ if (optimize && (flags & PROP_SCAN_DEAD_STORES)) { if (REG_P (reg)) invalidate_mems_from_set (pbi, reg); /* If the memory reference had embedded side effects (autoincrement address modes. Then we may need to kill some entries on the memory set list. */ if (insn && MEM_P (reg)) for_each_rtx (&PATTERN (insn), invalidate_mems_from_autoinc, pbi); if (MEM_P (reg) && ! side_effects_p (reg) /* ??? With more effort we could track conditional memory life. */ && ! cond) add_to_mem_set_list (pbi, canon_rtx (reg)); } if (REG_P (reg) && ! (regno_first == FRAME_POINTER_REGNUM && (! reload_completed || frame_pointer_needed)) #if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM && ! (regno_first == HARD_FRAME_POINTER_REGNUM && (! reload_completed || frame_pointer_needed)) #endif #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM && ! (regno_first == ARG_POINTER_REGNUM && fixed_regs[regno_first]) #endif ) { int some_was_live = 0, some_was_dead = 0; for (i = regno_first; i <= regno_last; ++i) { int needed_regno = REGNO_REG_SET_P (pbi->reg_live, i); if (pbi->local_set) { /* Order of the set operation matters here since both sets may be the same. */ CLEAR_REGNO_REG_SET (pbi->cond_local_set, i); if (cond != NULL_RTX && ! REGNO_REG_SET_P (pbi->local_set, i)) SET_REGNO_REG_SET (pbi->cond_local_set, i); else SET_REGNO_REG_SET (pbi->local_set, i); } if (code != CLOBBER) SET_REGNO_REG_SET (pbi->new_set, i); some_was_live |= needed_regno; some_was_dead |= ! needed_regno; } #ifdef HAVE_conditional_execution /* Consider conditional death in deciding that the register needs a death note. */ if (some_was_live && ! not_dead /* The stack pointer is never dead. Well, not strictly true, but it's very difficult to tell from here. Hopefully combine_stack_adjustments will fix up the most egregious errors. */ && regno_first != STACK_POINTER_REGNUM) { for (i = regno_first; i <= regno_last; ++i) if (! mark_regno_cond_dead (pbi, i, cond)) not_dead |= ((unsigned long) 1) << (i - regno_first); } #endif /* Additional data to record if this is the final pass. */ if (flags & (PROP_LOG_LINKS | PROP_REG_INFO | PROP_DEATH_NOTES | PROP_AUTOINC)) { rtx y; int blocknum = pbi->bb->index; y = NULL_RTX; if (flags & (PROP_LOG_LINKS | PROP_AUTOINC)) { y = pbi->reg_next_use[regno_first]; /* The next use is no longer next, since a store intervenes. */ for (i = regno_first; i <= regno_last; ++i) pbi->reg_next_use[i] = 0; } if (flags & PROP_REG_INFO) { for (i = regno_first; i <= regno_last; ++i) { /* Count (weighted) references, stores, etc. This counts a register twice if it is modified, but that is correct. */ REG_N_SETS (i) += 1; REG_N_REFS (i) += 1; REG_FREQ (i) += REG_FREQ_FROM_BB (pbi->bb); /* The insns where a reg is live are normally counted elsewhere, but we want the count to include the insn where the reg is set, and the normal counting mechanism would not count it. */ REG_LIVE_LENGTH (i) += 1; } /* If this is a hard reg, record this function uses the reg. */ if (regno_first < FIRST_PSEUDO_REGISTER) { for (i = regno_first; i <= regno_last; i++) regs_ever_live[i] = 1; if (flags & PROP_ASM_SCAN) for (i = regno_first; i <= regno_last; i++) regs_asm_clobbered[i] = 1; } else { /* Keep track of which basic blocks each reg appears in. */ if (REG_BASIC_BLOCK (regno_first) == REG_BLOCK_UNKNOWN) REG_BASIC_BLOCK (regno_first) = blocknum; else if (REG_BASIC_BLOCK (regno_first) != blocknum) REG_BASIC_BLOCK (regno_first) = REG_BLOCK_GLOBAL; } } if (! some_was_dead) { if (flags & PROP_LOG_LINKS) { /* Make a logical link from the next following insn that uses this register, back to this insn. The following insns have already been processed. We don't build a LOG_LINK for hard registers containing in ASM_OPERANDs. If these registers get replaced, we might wind up changing the semantics of the insn, even if reload can make what appear to be valid assignments later. We don't build a LOG_LINK for global registers to or from a function call. We don't want to let combine think that it knows what is going on with global registers. */ if (y && (BLOCK_NUM (y) == blocknum) && (regno_first >= FIRST_PSEUDO_REGISTER || (asm_noperands (PATTERN (y)) < 0 && ! ((GET_CODE (insn) == CALL_INSN || GET_CODE (y) == CALL_INSN) && global_regs[regno_first])))) LOG_LINKS (y) = alloc_INSN_LIST (insn, LOG_LINKS (y)); } } else if (not_dead) ; else if (! some_was_live) { if (flags & PROP_REG_INFO) REG_N_DEATHS (regno_first) += 1; if (flags & PROP_DEATH_NOTES) { /* Note that dead stores have already been deleted when possible. If we get here, we have found a dead store that cannot be eliminated (because the same insn does something useful). Indicate this by marking the reg being set as dying here. */ REG_NOTES (insn) = alloc_EXPR_LIST (REG_UNUSED, reg, REG_NOTES (insn)); } } else { if (flags & PROP_DEATH_NOTES) { /* This is a case where we have a multi-word hard register and some, but not all, of the words of the register are needed in subsequent insns. Write REG_UNUSED notes for those parts that were not needed. This case should be rare. */ for (i = regno_first; i <= regno_last; ++i) if (! REGNO_REG_SET_P (pbi->reg_live, i)) REG_NOTES (insn) = alloc_EXPR_LIST (REG_UNUSED, regno_reg_rtx[i], REG_NOTES (insn)); } } } /* Mark the register as being dead. */ if (some_was_live /* The stack pointer is never dead. Well, not strictly true, but it's very difficult to tell from here. Hopefully combine_stack_adjustments will fix up the most egregious errors. */ && regno_first != STACK_POINTER_REGNUM) { for (i = regno_first; i <= regno_last; ++i) if (!(not_dead & (((unsigned long) 1) << (i - regno_first)))) { if ((pbi->flags & PROP_REG_INFO) && REGNO_REG_SET_P (pbi->reg_live, i)) { REG_LIVE_LENGTH (i) += pbi->insn_num - reg_deaths[i]; reg_deaths[i] = 0; } CLEAR_REGNO_REG_SET (pbi->reg_live, i); } } } else if (REG_P (reg)) { if (flags & (PROP_LOG_LINKS | PROP_AUTOINC)) pbi->reg_next_use[regno_first] = 0; if ((flags & PROP_REG_INFO) != 0 && (flags & PROP_ASM_SCAN) != 0 && regno_first < FIRST_PSEUDO_REGISTER) { for (i = regno_first; i <= regno_last; i++) regs_asm_clobbered[i] = 1; } } /* If this is the last pass and this is a SCRATCH, show it will be dying here and count it. */ else if (GET_CODE (reg) == SCRATCH) { if (flags & PROP_DEATH_NOTES) REG_NOTES (insn) = alloc_EXPR_LIST (REG_UNUSED, reg, REG_NOTES (insn)); } } #ifdef HAVE_conditional_execution /* Mark REGNO conditionally dead. Return true if the register is now unconditionally dead. */ static int mark_regno_cond_dead (struct propagate_block_info *pbi, int regno, rtx cond) { /* If this is a store to a predicate register, the value of the predicate is changing, we don't know that the predicate as seen before is the same as that seen after. Flush all dependent conditions from reg_cond_dead. This will make all such conditionally live registers unconditionally live. */ if (REGNO_REG_SET_P (pbi->reg_cond_reg, regno)) flush_reg_cond_reg (pbi, regno); /* If this is an unconditional store, remove any conditional life that may have existed. */ if (cond == NULL_RTX) splay_tree_remove (pbi->reg_cond_dead, regno); else { splay_tree_node node; struct reg_cond_life_info *rcli; rtx ncond; /* Otherwise this is a conditional set. Record that fact. It may have been conditionally used, or there may be a subsequent set with a complimentary condition. */ node = splay_tree_lookup (pbi->reg_cond_dead, regno); if (node == NULL) { /* The register was unconditionally live previously. Record the current condition as the condition under which it is dead. */ rcli = xmalloc (sizeof (*rcli)); rcli->condition = cond; rcli->stores = cond; rcli->orig_condition = const0_rtx; splay_tree_insert (pbi->reg_cond_dead, regno, (splay_tree_value) rcli); SET_REGNO_REG_SET (pbi->reg_cond_reg, REGNO (XEXP (cond, 0))); /* Not unconditionally dead. */ return 0; } else { /* The register was conditionally live previously. Add the new condition to the old. */ rcli = (struct reg_cond_life_info *) node->value; ncond = rcli->condition; ncond = ior_reg_cond (ncond, cond, 1); if (rcli->stores == const0_rtx) rcli->stores = cond; else if (rcli->stores != const1_rtx) rcli->stores = ior_reg_cond (rcli->stores, cond, 1); /* If the register is now unconditionally dead, remove the entry in the splay_tree. A register is unconditionally dead if the dead condition ncond is true. A register is also unconditionally dead if the sum of all conditional stores is an unconditional store (stores is true), and the dead condition is identically the same as the original dead condition initialized at the end of the block. This is a pointer compare, not an rtx_equal_p compare. */ if (ncond == const1_rtx || (ncond == rcli->orig_condition && rcli->stores == const1_rtx)) splay_tree_remove (pbi->reg_cond_dead, regno); else { rcli->condition = ncond; SET_REGNO_REG_SET (pbi->reg_cond_reg, REGNO (XEXP (cond, 0))); /* Not unconditionally dead. */ return 0; } } } return 1; } /* Called from splay_tree_delete for pbi->reg_cond_life. */ static void free_reg_cond_life_info (splay_tree_value value) { struct reg_cond_life_info *rcli = (struct reg_cond_life_info *) value; free (rcli); } /* Helper function for flush_reg_cond_reg. */ static int flush_reg_cond_reg_1 (splay_tree_node node, void *data) { struct reg_cond_life_info *rcli; int *xdata = (int *) data; unsigned int regno = xdata[0]; /* Don't need to search if last flushed value was farther on in the in-order traversal. */ if (xdata[1] >= (int) node->key) return 0; /* Splice out portions of the expression that refer to regno. */ rcli = (struct reg_cond_life_info *) node->value; rcli->condition = elim_reg_cond (rcli->condition, regno); if (rcli->stores != const0_rtx && rcli->stores != const1_rtx) rcli->stores = elim_reg_cond (rcli->stores, regno); /* If the entire condition is now false, signal the node to be removed. */ if (rcli->condition == const0_rtx) { xdata[1] = node->key; return -1; } else if (rcli->condition == const1_rtx) abort (); return 0; } /* Flush all (sub) expressions referring to REGNO from REG_COND_LIVE. */ static void flush_reg_cond_reg (struct propagate_block_info *pbi, int regno) { int pair[2]; pair[0] = regno; pair[1] = -1; while (splay_tree_foreach (pbi->reg_cond_dead, flush_reg_cond_reg_1, pair) == -1) splay_tree_remove (pbi->reg_cond_dead, pair[1]); CLEAR_REGNO_REG_SET (pbi->reg_cond_reg, regno); } /* Logical arithmetic on predicate conditions. IOR, NOT and AND. For ior/and, the ADD flag determines whether we want to add the new condition X to the old one unconditionally. If it is zero, we will only return a new expression if X allows us to simplify part of OLD, otherwise we return NULL to the caller. If ADD is nonzero, we will return a new condition in all cases. The toplevel caller of one of these functions should always pass 1 for ADD. */ static rtx ior_reg_cond (rtx old, rtx x, int add) { rtx op0, op1; if (COMPARISON_P (old)) { if (COMPARISON_P (x) && REVERSE_CONDEXEC_PREDICATES_P (GET_CODE (x), GET_CODE (old)) && REGNO (XEXP (x, 0)) == REGNO (XEXP (old, 0))) return const1_rtx; if (GET_CODE (x) == GET_CODE (old) && REGNO (XEXP (x, 0)) == REGNO (XEXP (old, 0))) return old; if (! add) return NULL; return gen_rtx_IOR (0, old, x); } switch (GET_CODE (old)) { case IOR: op0 = ior_reg_cond (XEXP (old, 0), x, 0); op1 = ior_reg_cond (XEXP (old, 1), x, 0); if (op0 != NULL || op1 != NULL) { if (op0 == const0_rtx) return op1 ? op1 : gen_rtx_IOR (0, XEXP (old, 1), x); if (op1 == const0_rtx) return op0 ? op0 : gen_rtx_IOR (0, XEXP (old, 0), x); if (op0 == const1_rtx || op1 == const1_rtx) return const1_rtx; if (op0 == NULL) op0 = gen_rtx_IOR (0, XEXP (old, 0), x); else if (rtx_equal_p (x, op0)) /* (x | A) | x ~ (x | A). */ return old; if (op1 == NULL) op1 = gen_rtx_IOR (0, XEXP (old, 1), x); else if (rtx_equal_p (x, op1)) /* (A | x) | x ~ (A | x). */ return old; return gen_rtx_IOR (0, op0, op1); } if (! add) return NULL; return gen_rtx_IOR (0, old, x); case AND: op0 = ior_reg_cond (XEXP (old, 0), x, 0); op1 = ior_reg_cond (XEXP (old, 1), x, 0); if (op0 != NULL || op1 != NULL) { if (op0 == const1_rtx) return op1 ? op1 : gen_rtx_IOR (0, XEXP (old, 1), x); if (op1 == const1_rtx) return op0 ? op0 : gen_rtx_IOR (0, XEXP (old, 0), x); if (op0 == const0_rtx || op1 == const0_rtx) return const0_rtx; if (op0 == NULL) op0 = gen_rtx_IOR (0, XEXP (old, 0), x); else if (rtx_equal_p (x, op0)) /* (x & A) | x ~ x. */ return op0; if (op1 == NULL) op1 = gen_rtx_IOR (0, XEXP (old, 1), x); else if (rtx_equal_p (x, op1)) /* (A & x) | x ~ x. */ return op1; return gen_rtx_AND (0, op0, op1); } if (! add) return NULL; return gen_rtx_IOR (0, old, x); case NOT: op0 = and_reg_cond (XEXP (old, 0), not_reg_cond (x), 0); if (op0 != NULL) return not_reg_cond (op0); if (! add) return NULL; return gen_rtx_IOR (0, old, x); default: abort (); } } static rtx not_reg_cond (rtx x) { enum rtx_code x_code; if (x == const0_rtx) return const1_rtx; else if (x == const1_rtx) return const0_rtx; x_code = GET_CODE (x); if (x_code == NOT) return XEXP (x, 0); if (COMPARISON_P (x) && REG_P (XEXP (x, 0))) { if (XEXP (x, 1) != const0_rtx) abort (); return gen_rtx_fmt_ee (reverse_condition (x_code), VOIDmode, XEXP (x, 0), const0_rtx); } return gen_rtx_NOT (0, x); } static rtx and_reg_cond (rtx old, rtx x, int add) { rtx op0, op1; if (COMPARISON_P (old)) { if (COMPARISON_P (x) && GET_CODE (x) == reverse_condition (GET_CODE (old)) && REGNO (XEXP (x, 0)) == REGNO (XEXP (old, 0))) return const0_rtx; if (GET_CODE (x) == GET_CODE (old) && REGNO (XEXP (x, 0)) == REGNO (XEXP (old, 0))) return old; if (! add) return NULL; return gen_rtx_AND (0, old, x); } switch (GET_CODE (old)) { case IOR: op0 = and_reg_cond (XEXP (old, 0), x, 0); op1 = and_reg_cond (XEXP (old, 1), x, 0); if (op0 != NULL || op1 != NULL) { if (op0 == const0_rtx) return op1 ? op1 : gen_rtx_AND (0, XEXP (old, 1), x); if (op1 == const0_rtx) return op0 ? op0 : gen_rtx_AND (0, XEXP (old, 0), x); if (op0 == const1_rtx || op1 == const1_rtx) return const1_rtx; if (op0 == NULL) op0 = gen_rtx_AND (0, XEXP (old, 0), x); else if (rtx_equal_p (x, op0)) /* (x | A) & x ~ x. */ return op0; if (op1 == NULL) op1 = gen_rtx_AND (0, XEXP (old, 1), x); else if (rtx_equal_p (x, op1)) /* (A | x) & x ~ x. */ return op1; return gen_rtx_IOR (0, op0, op1); } if (! add) return NULL; return gen_rtx_AND (0, old, x); case AND: op0 = and_reg_cond (XEXP (old, 0), x, 0); op1 = and_reg_cond (XEXP (old, 1), x, 0); if (op0 != NULL || op1 != NULL) { if (op0 == const1_rtx) return op1 ? op1 : gen_rtx_AND (0, XEXP (old, 1), x); if (op1 == const1_rtx) return op0 ? op0 : gen_rtx_AND (0, XEXP (old, 0), x); if (op0 == const0_rtx || op1 == const0_rtx) return const0_rtx; if (op0 == NULL) op0 = gen_rtx_AND (0, XEXP (old, 0), x); else if (rtx_equal_p (x, op0)) /* (x & A) & x ~ (x & A). */ return old; if (op1 == NULL) op1 = gen_rtx_AND (0, XEXP (old, 1), x); else if (rtx_equal_p (x, op1)) /* (A & x) & x ~ (A & x). */ return old; return gen_rtx_AND (0, op0, op1); } if (! add) return NULL; return gen_rtx_AND (0, old, x); case NOT: op0 = ior_reg_cond (XEXP (old, 0), not_reg_cond (x), 0); if (op0 != NULL) return not_reg_cond (op0); if (! add) return NULL; return gen_rtx_AND (0, old, x); default: abort (); } } /* Given a condition X, remove references to reg REGNO and return the new condition. The removal will be done so that all conditions involving REGNO are considered to evaluate to false. This function is used when the value of REGNO changes. */ static rtx elim_reg_cond (rtx x, unsigned int regno) { rtx op0, op1; if (COMPARISON_P (x)) { if (REGNO (XEXP (x, 0)) == regno) return const0_rtx; return x; } switch (GET_CODE (x)) { case AND: op0 = elim_reg_cond (XEXP (x, 0), regno); op1 = elim_reg_cond (XEXP (x, 1), regno); if (op0 == const0_rtx || op1 == const0_rtx) return const0_rtx; if (op0 == const1_rtx) return op1; if (op1 == const1_rtx) return op0; if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1)) return x; return gen_rtx_AND (0, op0, op1); case IOR: op0 = elim_reg_cond (XEXP (x, 0), regno); op1 = elim_reg_cond (XEXP (x, 1), regno); if (op0 == const1_rtx || op1 == const1_rtx) return const1_rtx; if (op0 == const0_rtx) return op1; if (op1 == const0_rtx) return op0; if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1)) return x; return gen_rtx_IOR (0, op0, op1); case NOT: op0 = elim_reg_cond (XEXP (x, 0), regno); if (op0 == const0_rtx) return const1_rtx; if (op0 == const1_rtx) return const0_rtx; if (op0 != XEXP (x, 0)) return not_reg_cond (op0); return x; default: abort (); } } #endif /* HAVE_conditional_execution */ #ifdef AUTO_INC_DEC /* Try to substitute the auto-inc expression INC as the address inside MEM which occurs in INSN. Currently, the address of MEM is an expression involving INCR_REG, and INCR is the next use of INCR_REG; it is an insn that has a single set whose source is a PLUS of INCR_REG and something else. */ static void attempt_auto_inc (struct propagate_block_info *pbi, rtx inc, rtx insn, rtx mem, rtx incr, rtx incr_reg) { int regno = REGNO (incr_reg); rtx set = single_set (incr); rtx q = SET_DEST (set); rtx y = SET_SRC (set); int opnum = XEXP (y, 0) == incr_reg ? 0 : 1; /* Make sure this reg appears only once in this insn. */ if (count_occurrences (PATTERN (insn), incr_reg, 1) != 1) return; if (dead_or_set_p (incr, incr_reg) /* Mustn't autoinc an eliminable register. */ && (regno >= FIRST_PSEUDO_REGISTER || ! TEST_HARD_REG_BIT (elim_reg_set, regno))) { /* This is the simple case. Try to make the auto-inc. If we can't, we are done. Otherwise, we will do any needed updates below. */ if (! validate_change (insn, &XEXP (mem, 0), inc, 0)) return; } else if (REG_P (q) /* PREV_INSN used here to check the semi-open interval [insn,incr). */ && ! reg_used_between_p (q, PREV_INSN (insn), incr) /* We must also check for sets of q as q may be a call clobbered hard register and there may be a call between PREV_INSN (insn) and incr. */ && ! reg_set_between_p (q, PREV_INSN (insn), incr)) { /* We have *p followed sometime later by q = p+size. Both p and q must be live afterward, and q is not used between INSN and its assignment. Change it to q = p, ...*q..., q = q+size. Then fall into the usual case. */ rtx insns, temp; start_sequence (); emit_move_insn (q, incr_reg); insns = get_insns (); end_sequence (); /* If we can't make the auto-inc, or can't make the replacement into Y, exit. There's no point in making the change below if we can't do the auto-inc and doing so is not correct in the pre-inc case. */ XEXP (inc, 0) = q; validate_change (insn, &XEXP (mem, 0), inc, 1); validate_change (incr, &XEXP (y, opnum), q, 1); if (! apply_change_group ()) return; /* We now know we'll be doing this change, so emit the new insn(s) and do the updates. */ emit_insn_before (insns, insn); if (BB_HEAD (pbi->bb) == insn) BB_HEAD (pbi->bb) = insns; /* INCR will become a NOTE and INSN won't contain a use of INCR_REG. If a use of INCR_REG was just placed in the insn before INSN, make that the next use. Otherwise, invalidate it. */ if (GET_CODE (PREV_INSN (insn)) == INSN && GET_CODE (PATTERN (PREV_INSN (insn))) == SET && SET_SRC (PATTERN (PREV_INSN (insn))) == incr_reg) pbi->reg_next_use[regno] = PREV_INSN (insn); else pbi->reg_next_use[regno] = 0; incr_reg = q; regno = REGNO (q); if ((pbi->flags & PROP_REG_INFO) && !REGNO_REG_SET_P (pbi->reg_live, regno)) reg_deaths[regno] = pbi->insn_num; /* REGNO is now used in INCR which is below INSN, but it previously wasn't live here. If we don't mark it as live, we'll put a REG_DEAD note for it on this insn, which is incorrect. */ SET_REGNO_REG_SET (pbi->reg_live, regno); /* If there are any calls between INSN and INCR, show that REGNO now crosses them. */ for (temp = insn; temp != incr; temp = NEXT_INSN (temp)) if (GET_CODE (temp) == CALL_INSN) REG_N_CALLS_CROSSED (regno)++; /* Invalidate alias info for Q since we just changed its value. */ clear_reg_alias_info (q); } else return; /* If we haven't returned, it means we were able to make the auto-inc, so update the status. First, record that this insn has an implicit side effect. */ REG_NOTES (insn) = alloc_EXPR_LIST (REG_INC, incr_reg, REG_NOTES (insn)); /* Modify the old increment-insn to simply copy the already-incremented value of our register. */ if (! validate_change (incr, &SET_SRC (set), incr_reg, 0)) abort (); /* If that makes it a no-op (copying the register into itself) delete it so it won't appear to be a "use" and a "set" of this register. */ if (REGNO (SET_DEST (set)) == REGNO (incr_reg)) { /* If the original source was dead, it's dead now. */ rtx note; while ((note = find_reg_note (incr, REG_DEAD, NULL_RTX)) != NULL_RTX) { remove_note (incr, note); if (XEXP (note, 0) != incr_reg) { unsigned int regno = REGNO (XEXP (note, 0)); if ((pbi->flags & PROP_REG_INFO) && REGNO_REG_SET_P (pbi->reg_live, regno)) { REG_LIVE_LENGTH (regno) += pbi->insn_num - reg_deaths[regno]; reg_deaths[regno] = 0; } CLEAR_REGNO_REG_SET (pbi->reg_live, REGNO (XEXP (note, 0))); } } PUT_CODE (incr, NOTE); SET_INSN_DELETED (incr); } if (regno >= FIRST_PSEUDO_REGISTER) { /* Count an extra reference to the reg. When a reg is incremented, spilling it is worse, so we want to make that less likely. */ REG_FREQ (regno) += REG_FREQ_FROM_BB (pbi->bb); /* Count the increment as a setting of the register, even though it isn't a SET in rtl. */ REG_N_SETS (regno)++; } } /* X is a MEM found in INSN. See if we can convert it into an auto-increment reference. */ static void find_auto_inc (struct propagate_block_info *pbi, rtx x, rtx insn) { rtx addr = XEXP (x, 0); HOST_WIDE_INT offset = 0; rtx set, y, incr, inc_val; int regno; int size = GET_MODE_SIZE (GET_MODE (x)); if (GET_CODE (insn) == JUMP_INSN) return; /* Here we detect use of an index register which might be good for postincrement, postdecrement, preincrement, or predecrement. */ if (GET_CODE (addr) == PLUS && GET_CODE (XEXP (addr, 1)) == CONST_INT) offset = INTVAL (XEXP (addr, 1)), addr = XEXP (addr, 0); if (!REG_P (addr)) return; regno = REGNO (addr); /* Is the next use an increment that might make auto-increment? */ incr = pbi->reg_next_use[regno]; if (incr == 0 || BLOCK_NUM (incr) != BLOCK_NUM (insn)) return; set = single_set (incr); if (set == 0 || GET_CODE (set) != SET) return; y = SET_SRC (set); if (GET_CODE (y) != PLUS) return; if (REG_P (XEXP (y, 0)) && REGNO (XEXP (y, 0)) == REGNO (addr)) inc_val = XEXP (y, 1); else if (REG_P (XEXP (y, 1)) && REGNO (XEXP (y, 1)) == REGNO (addr)) inc_val = XEXP (y, 0); else return; if (GET_CODE (inc_val) == CONST_INT) { if (HAVE_POST_INCREMENT && (INTVAL (inc_val) == size && offset == 0)) attempt_auto_inc (pbi, gen_rtx_POST_INC (Pmode, addr), insn, x, incr, addr); else if (HAVE_POST_DECREMENT && (INTVAL (inc_val) == -size && offset == 0)) attempt_auto_inc (pbi, gen_rtx_POST_DEC (Pmode, addr), insn, x, incr, addr); else if (HAVE_PRE_INCREMENT && (INTVAL (inc_val) == size && offset == size)) attempt_auto_inc (pbi, gen_rtx_PRE_INC (Pmode, addr), insn, x, incr, addr); else if (HAVE_PRE_DECREMENT && (INTVAL (inc_val) == -size && offset == -size)) attempt_auto_inc (pbi, gen_rtx_PRE_DEC (Pmode, addr), insn, x, incr, addr); else if (HAVE_POST_MODIFY_DISP && offset == 0) attempt_auto_inc (pbi, gen_rtx_POST_MODIFY (Pmode, addr, gen_rtx_PLUS (Pmode, addr, inc_val)), insn, x, incr, addr); else if (HAVE_PRE_MODIFY_DISP && offset == INTVAL (inc_val)) attempt_auto_inc (pbi, gen_rtx_PRE_MODIFY (Pmode, addr, gen_rtx_PLUS (Pmode, addr, inc_val)), insn, x, incr, addr); } else if (REG_P (inc_val) && ! reg_set_between_p (inc_val, PREV_INSN (insn), NEXT_INSN (incr))) { if (HAVE_POST_MODIFY_REG && offset == 0) attempt_auto_inc (pbi, gen_rtx_POST_MODIFY (Pmode, addr, gen_rtx_PLUS (Pmode, addr, inc_val)), insn, x, incr, addr); } } #endif /* AUTO_INC_DEC */ static void mark_used_reg (struct propagate_block_info *pbi, rtx reg, rtx cond ATTRIBUTE_UNUSED, rtx insn) { unsigned int regno_first, regno_last, i; int some_was_live, some_was_dead, some_not_set; regno_last = regno_first = REGNO (reg); if (regno_first < FIRST_PSEUDO_REGISTER) regno_last += hard_regno_nregs[regno_first][GET_MODE (reg)] - 1; /* Find out if any of this register is live after this instruction. */ some_was_live = some_was_dead = 0; for (i = regno_first; i <= regno_last; ++i) { int needed_regno = REGNO_REG_SET_P (pbi->reg_live, i); some_was_live |= needed_regno; some_was_dead |= ! needed_regno; } /* Find out if any of the register was set this insn. */ some_not_set = 0; for (i = regno_first; i <= regno_last; ++i) some_not_set |= ! REGNO_REG_SET_P (pbi->new_set, i); if (pbi->flags & (PROP_LOG_LINKS | PROP_AUTOINC)) { /* Record where each reg is used, so when the reg is set we know the next insn that uses it. */ pbi->reg_next_use[regno_first] = insn; } if (pbi->flags & PROP_REG_INFO) { if (regno_first < FIRST_PSEUDO_REGISTER) { /* If this is a register we are going to try to eliminate, don't mark it live here. If we are successful in eliminating it, it need not be live unless it is used for pseudos, in which case it will have been set live when it was allocated to the pseudos. If the register will not be eliminated, reload will set it live at that point. Otherwise, record that this function uses this register. */ /* ??? The PPC backend tries to "eliminate" on the pic register to itself. This should be fixed. In the mean time, hack around it. */ if (! (TEST_HARD_REG_BIT (elim_reg_set, regno_first) && (regno_first == FRAME_POINTER_REGNUM || regno_first == ARG_POINTER_REGNUM))) for (i = regno_first; i <= regno_last; ++i) regs_ever_live[i] = 1; } else { /* Keep track of which basic block each reg appears in. */ int blocknum = pbi->bb->index; if (REG_BASIC_BLOCK (regno_first) == REG_BLOCK_UNKNOWN) REG_BASIC_BLOCK (regno_first) = blocknum; else if (REG_BASIC_BLOCK (regno_first) != blocknum) REG_BASIC_BLOCK (regno_first) = REG_BLOCK_GLOBAL; /* Count (weighted) number of uses of each reg. */ REG_FREQ (regno_first) += REG_FREQ_FROM_BB (pbi->bb); REG_N_REFS (regno_first)++; } for (i = regno_first; i <= regno_last; ++i) if (! REGNO_REG_SET_P (pbi->reg_live, i)) { #ifdef ENABLE_CHECKING if (reg_deaths[i]) abort (); #endif reg_deaths[i] = pbi->insn_num; } } /* Record and count the insns in which a reg dies. If it is used in this insn and was dead below the insn then it dies in this insn. If it was set in this insn, we do not make a REG_DEAD note; likewise if we already made such a note. */ if ((pbi->flags & (PROP_DEATH_NOTES | PROP_REG_INFO)) && some_was_dead && some_not_set) { /* Check for the case where the register dying partially overlaps the register set by this insn. */ if (regno_first != regno_last) for (i = regno_first; i <= regno_last; ++i) some_was_live |= REGNO_REG_SET_P (pbi->new_set, i); /* If none of the words in X is needed, make a REG_DEAD note. Otherwise, we must make partial REG_DEAD notes. */ if (! some_was_live) { if ((pbi->flags & PROP_DEATH_NOTES) && ! find_regno_note (insn, REG_DEAD, regno_first)) REG_NOTES (insn) = alloc_EXPR_LIST (REG_DEAD, reg, REG_NOTES (insn)); if (pbi->flags & PROP_REG_INFO) REG_N_DEATHS (regno_first)++; } else { /* Don't make a REG_DEAD note for a part of a register that is set in the insn. */ for (i = regno_first; i <= regno_last; ++i) if (! REGNO_REG_SET_P (pbi->reg_live, i) && ! dead_or_set_regno_p (insn, i)) REG_NOTES (insn) = alloc_EXPR_LIST (REG_DEAD, regno_reg_rtx[i], REG_NOTES (insn)); } } /* Mark the register as being live. */ for (i = regno_first; i <= regno_last; ++i) { #ifdef HAVE_conditional_execution int this_was_live = REGNO_REG_SET_P (pbi->reg_live, i); #endif SET_REGNO_REG_SET (pbi->reg_live, i); #ifdef HAVE_conditional_execution /* If this is a conditional use, record that fact. If it is later conditionally set, we'll know to kill the register. */ if (cond != NULL_RTX) { splay_tree_node node; struct reg_cond_life_info *rcli; rtx ncond; if (this_was_live) { node = splay_tree_lookup (pbi->reg_cond_dead, i); if (node == NULL) { /* The register was unconditionally live previously. No need to do anything. */ } else { /* The register was conditionally live previously. Subtract the new life cond from the old death cond. */ rcli = (struct reg_cond_life_info *) node->value; ncond = rcli->condition; ncond = and_reg_cond (ncond, not_reg_cond (cond), 1); /* If the register is now unconditionally live, remove the entry in the splay_tree. */ if (ncond == const0_rtx) splay_tree_remove (pbi->reg_cond_dead, i); else { rcli->condition = ncond; SET_REGNO_REG_SET (pbi->reg_cond_reg, REGNO (XEXP (cond, 0))); } } } else { /* The register was not previously live at all. Record the condition under which it is still dead. */ rcli = xmalloc (sizeof (*rcli)); rcli->condition = not_reg_cond (cond); rcli->stores = const0_rtx; rcli->orig_condition = const0_rtx; splay_tree_insert (pbi->reg_cond_dead, i, (splay_tree_value) rcli); SET_REGNO_REG_SET (pbi->reg_cond_reg, REGNO (XEXP (cond, 0))); } } else if (this_was_live) { /* The register may have been conditionally live previously, but is now unconditionally live. Remove it from the conditionally dead list, so that a conditional set won't cause us to think it dead. */ splay_tree_remove (pbi->reg_cond_dead, i); } #endif } } /* Scan expression X and store a 1-bit in NEW_LIVE for each reg it uses. This is done assuming the registers needed from X are those that have 1-bits in PBI->REG_LIVE. INSN is the containing instruction. If INSN is dead, this function is not called. */ static void mark_used_regs (struct propagate_block_info *pbi, rtx x, rtx cond, rtx insn) { RTX_CODE code; int regno; int flags = pbi->flags; retry: if (!x) return; code = GET_CODE (x); switch (code) { case LABEL_REF: case SYMBOL_REF: case CONST_INT: case CONST: case CONST_DOUBLE: case CONST_VECTOR: case PC: case ADDR_VEC: case ADDR_DIFF_VEC: return; #ifdef HAVE_cc0 case CC0: pbi->cc0_live = 1; return; #endif case CLOBBER: /* If we are clobbering a MEM, mark any registers inside the address as being used. */ if (MEM_P (XEXP (x, 0))) mark_used_regs (pbi, XEXP (XEXP (x, 0), 0), cond, insn); return; case MEM: /* Don't bother watching stores to mems if this is not the final pass. We'll not be deleting dead stores this round. */ if (optimize && (flags & PROP_SCAN_DEAD_STORES)) { /* Invalidate the data for the last MEM stored, but only if MEM is something that can be stored into. */ if (GET_CODE (XEXP (x, 0)) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0))) /* Needn't clear the memory set list. */ ; else { rtx temp = pbi->mem_set_list; rtx prev = NULL_RTX; rtx next; while (temp) { next = XEXP (temp, 1); if (unchanging_anti_dependence (XEXP (temp, 0), x)) { /* Splice temp out of the list. */ if (prev) XEXP (prev, 1) = next; else pbi->mem_set_list = next; free_EXPR_LIST_node (temp); pbi->mem_set_list_len--; } else prev = temp; temp = next; } } /* If the memory reference had embedded side effects (autoincrement address modes. Then we may need to kill some entries on the memory set list. */ if (insn) for_each_rtx (&PATTERN (insn), invalidate_mems_from_autoinc, pbi); } #ifdef AUTO_INC_DEC if (flags & PROP_AUTOINC) find_auto_inc (pbi, x, insn); #endif break; case SUBREG: #ifdef CANNOT_CHANGE_MODE_CLASS if ((flags & PROP_REG_INFO) && REG_P (SUBREG_REG (x)) && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER) bitmap_set_bit (&subregs_of_mode, REGNO (SUBREG_REG (x)) * MAX_MACHINE_MODE + GET_MODE (x)); #endif /* While we're here, optimize this case. */ x = SUBREG_REG (x); if (!REG_P (x)) goto retry; /* Fall through. */ case REG: /* See a register other than being set => mark it as needed. */ mark_used_reg (pbi, x, cond, insn); return; case SET: { rtx testreg = SET_DEST (x); int mark_dest = 0; /* If storing into MEM, don't show it as being used. But do show the address as being used. */ if (MEM_P (testreg)) { #ifdef AUTO_INC_DEC if (flags & PROP_AUTOINC) find_auto_inc (pbi, testreg, insn); #endif mark_used_regs (pbi, XEXP (testreg, 0), cond, insn); mark_used_regs (pbi, SET_SRC (x), cond, insn); return; } /* Storing in STRICT_LOW_PART is like storing in a reg in that this SET might be dead, so ignore it in TESTREG. but in some other ways it is like using the reg. Storing in a SUBREG or a bit field is like storing the entire register in that if the register's value is not used then this SET is not needed. */ while (GET_CODE (testreg) == STRICT_LOW_PART || GET_CODE (testreg) == ZERO_EXTRACT || GET_CODE (testreg) == SIGN_EXTRACT || GET_CODE (testreg) == SUBREG) { #ifdef CANNOT_CHANGE_MODE_CLASS if ((flags & PROP_REG_INFO) && GET_CODE (testreg) == SUBREG && REG_P (SUBREG_REG (testreg)) && REGNO (SUBREG_REG (testreg)) >= FIRST_PSEUDO_REGISTER) bitmap_set_bit (&subregs_of_mode, REGNO (SUBREG_REG (testreg)) * MAX_MACHINE_MODE + GET_MODE (testreg)); #endif /* Modifying a single register in an alternate mode does not use any of the old value. But these other ways of storing in a register do use the old value. */ if (GET_CODE (testreg) == SUBREG && !((REG_BYTES (SUBREG_REG (testreg)) + UNITS_PER_WORD - 1) / UNITS_PER_WORD > (REG_BYTES (testreg) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)) ; else mark_dest = 1; testreg = XEXP (testreg, 0); } /* If this is a store into a register or group of registers, recursively scan the value being stored. */ if ((GET_CODE (testreg) == PARALLEL && GET_MODE (testreg) == BLKmode) || (REG_P (testreg) && (regno = REGNO (testreg), ! (regno == FRAME_POINTER_REGNUM && (! reload_completed || frame_pointer_needed))) #if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM && ! (regno == HARD_FRAME_POINTER_REGNUM && (! reload_completed || frame_pointer_needed)) #endif #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM && ! (regno == ARG_POINTER_REGNUM && fixed_regs[regno]) #endif )) { if (mark_dest) mark_used_regs (pbi, SET_DEST (x), cond, insn); mark_used_regs (pbi, SET_SRC (x), cond, insn); return; } } break; case ASM_OPERANDS: case UNSPEC_VOLATILE: case TRAP_IF: case ASM_INPUT: { /* Traditional and volatile asm instructions must be considered to use and clobber all hard registers, all pseudo-registers and all of memory. So must TRAP_IF and UNSPEC_VOLATILE operations. Consider for instance a volatile asm that changes the fpu rounding mode. An insn should not be moved across this even if it only uses pseudo-regs because it might give an incorrectly rounded result. ?!? Unfortunately, marking all hard registers as live causes massive problems for the register allocator and marking all pseudos as live creates mountains of uninitialized variable warnings. So for now, just clear the memory set list and mark any regs we can find in ASM_OPERANDS as used. */ if (code != ASM_OPERANDS || MEM_VOLATILE_P (x)) { free_EXPR_LIST_list (&pbi->mem_set_list); pbi->mem_set_list_len = 0; } /* For all ASM_OPERANDS, we must traverse the vector of input operands. We can not just fall through here since then we would be confused by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate traditional asms unlike their normal usage. */ if (code == ASM_OPERANDS) { int j; for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++) mark_used_regs (pbi, ASM_OPERANDS_INPUT (x, j), cond, insn); } break; } case COND_EXEC: if (cond != NULL_RTX) abort (); mark_used_regs (pbi, COND_EXEC_TEST (x), NULL_RTX, insn); cond = COND_EXEC_TEST (x); x = COND_EXEC_CODE (x); goto retry; default: break; } /* Recursively scan the operands of this expression. */ { const char * const fmt = GET_RTX_FORMAT (code); int i; for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') { /* Tail recursive case: save a function call level. */ if (i == 0) { x = XEXP (x, 0); goto retry; } mark_used_regs (pbi, XEXP (x, i), cond, insn); } else if (fmt[i] == 'E') { int j; for (j = 0; j < XVECLEN (x, i); j++) mark_used_regs (pbi, XVECEXP (x, i, j), cond, insn); } } } } #ifdef AUTO_INC_DEC static int try_pre_increment_1 (struct propagate_block_info *pbi, rtx insn) { /* Find the next use of this reg. If in same basic block, make it do pre-increment or pre-decrement if appropriate. */ rtx x = single_set (insn); HOST_WIDE_INT amount = ((GET_CODE (SET_SRC (x)) == PLUS ? 1 : -1) * INTVAL (XEXP (SET_SRC (x), 1))); int regno = REGNO (SET_DEST (x)); rtx y = pbi->reg_next_use[regno]; if (y != 0 && SET_DEST (x) != stack_pointer_rtx && BLOCK_NUM (y) == BLOCK_NUM (insn) /* Don't do this if the reg dies, or gets set in y; a standard addressing mode would be better. */ && ! dead_or_set_p (y, SET_DEST (x)) && try_pre_increment (y, SET_DEST (x), amount)) { /* We have found a suitable auto-increment and already changed insn Y to do it. So flush this increment instruction. */ propagate_block_delete_insn (insn); /* Count a reference to this reg for the increment insn we are deleting. When a reg is incremented, spilling it is worse, so we want to make that less likely. */ if (regno >= FIRST_PSEUDO_REGISTER) { REG_FREQ (regno) += REG_FREQ_FROM_BB (pbi->bb); REG_N_SETS (regno)++; } /* Flush any remembered memories depending on the value of the incremented register. */ invalidate_mems_from_set (pbi, SET_DEST (x)); return 1; } return 0; } /* Try to change INSN so that it does pre-increment or pre-decrement addressing on register REG in order to add AMOUNT to REG. AMOUNT is negative for pre-decrement. Returns 1 if the change could be made. This checks all about the validity of the result of modifying INSN. */ static int try_pre_increment (rtx insn, rtx reg, HOST_WIDE_INT amount) { rtx use; /* Nonzero if we can try to make a pre-increment or pre-decrement. For example, addl $4,r1; movl (r1),... can become movl +(r1),... */ int pre_ok = 0; /* Nonzero if we can try to make a post-increment or post-decrement. For example, addl $4,r1; movl -4(r1),... can become movl (r1)+,... It is possible for both PRE_OK and POST_OK to be nonzero if the machine supports both pre-inc and post-inc, or both pre-dec and post-dec. */ int post_ok = 0; /* Nonzero if the opportunity actually requires post-inc or post-dec. */ int do_post = 0; /* From the sign of increment, see which possibilities are conceivable on this target machine. */ if (HAVE_PRE_INCREMENT && amount > 0) pre_ok = 1; if (HAVE_POST_INCREMENT && amount > 0) post_ok = 1; if (HAVE_PRE_DECREMENT && amount < 0) pre_ok = 1; if (HAVE_POST_DECREMENT && amount < 0) post_ok = 1; if (! (pre_ok || post_ok)) return 0; /* It is not safe to add a side effect to a jump insn because if the incremented register is spilled and must be reloaded there would be no way to store the incremented value back in memory. */ if (GET_CODE (insn) == JUMP_INSN) return 0; use = 0; if (pre_ok) use = find_use_as_address (PATTERN (insn), reg, 0); if (post_ok && (use == 0 || use == (rtx) (size_t) 1)) { use = find_use_as_address (PATTERN (insn), reg, -amount); do_post = 1; } if (use == 0 || use == (rtx) (size_t) 1) return 0; if (GET_MODE_SIZE (GET_MODE (use)) != (amount > 0 ? amount : - amount)) return 0; /* See if this combination of instruction and addressing mode exists. */ if (! validate_change (insn, &XEXP (use, 0), gen_rtx_fmt_e (amount > 0 ? (do_post ? POST_INC : PRE_INC) : (do_post ? POST_DEC : PRE_DEC), Pmode, reg), 0)) return 0; /* Record that this insn now has an implicit side effect on X. */ REG_NOTES (insn) = alloc_EXPR_LIST (REG_INC, reg, REG_NOTES (insn)); return 1; } #endif /* AUTO_INC_DEC */ /* Find the place in the rtx X where REG is used as a memory address. Return the MEM rtx that so uses it. If PLUSCONST is nonzero, search instead for a memory address equivalent to (plus REG (const_int PLUSCONST)). If such an address does not appear, return 0. If REG appears more than once, or is used other than in such an address, return (rtx) 1. */ rtx find_use_as_address (rtx x, rtx reg, HOST_WIDE_INT plusconst) { enum rtx_code code = GET_CODE (x); const char * const fmt = GET_RTX_FORMAT (code); int i; rtx value = 0; rtx tem; if (code == MEM && XEXP (x, 0) == reg && plusconst == 0) return x; if (code == MEM && GET_CODE (XEXP (x, 0)) == PLUS && XEXP (XEXP (x, 0), 0) == reg && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT && INTVAL (XEXP (XEXP (x, 0), 1)) == plusconst) return x; if (code == SIGN_EXTRACT || code == ZERO_EXTRACT) { /* If REG occurs inside a MEM used in a bit-field reference, that is unacceptable. */ if (find_use_as_address (XEXP (x, 0), reg, 0) != 0) return (rtx) (size_t) 1; } if (x == reg) return (rtx) (size_t) 1; for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') { tem = find_use_as_address (XEXP (x, i), reg, plusconst); if (value == 0) value = tem; else if (tem != 0) return (rtx) (size_t) 1; } else if (fmt[i] == 'E') { int j; for (j = XVECLEN (x, i) - 1; j >= 0; j--) { tem = find_use_as_address (XVECEXP (x, i, j), reg, plusconst); if (value == 0) value = tem; else if (tem != 0) return (rtx) (size_t) 1; } } } return value; } /* Write information about registers and basic blocks into FILE. This is part of making a debugging dump. */ void dump_regset (regset r, FILE *outf) { int i; if (r == NULL) { fputs (" (nil)", outf); return; } EXECUTE_IF_SET_IN_REG_SET (r, 0, i, { fprintf (outf, " %d", i); if (i < FIRST_PSEUDO_REGISTER) fprintf (outf, " [%s]", reg_names[i]); }); } /* Print a human-readable representation of R on the standard error stream. This function is designed to be used from within the debugger. */ void debug_regset (regset r) { dump_regset (r, stderr); putc ('\n', stderr); } /* Recompute register set/reference counts immediately prior to register allocation. This avoids problems with set/reference counts changing to/from values which have special meanings to the register allocators. Additionally, the reference counts are the primary component used by the register allocators to prioritize pseudos for allocation to hard regs. More accurate reference counts generally lead to better register allocation. F is the first insn to be scanned. LOOP_STEP denotes how much loop_depth should be incremented per loop nesting level in order to increase the ref count more for references in a loop. It might be worthwhile to update REG_LIVE_LENGTH, REG_BASIC_BLOCK and possibly other information which is used by the register allocators. */ void recompute_reg_usage (rtx f ATTRIBUTE_UNUSED, int loop_step ATTRIBUTE_UNUSED) { allocate_reg_life_data (); /* distribute_notes in combiner fails to convert some of the REG_UNUSED notes to REG_DEAD notes. This causes CHECK_DEAD_NOTES in sched1 to abort. To solve this update the DEATH_NOTES here. */ update_life_info (NULL, UPDATE_LIFE_LOCAL, PROP_REG_INFO | PROP_DEATH_NOTES); } /* Optionally removes all the REG_DEAD and REG_UNUSED notes from a set of blocks. If BLOCKS is NULL, assume the universal set. Returns a count of the number of registers that died. */ int count_or_remove_death_notes (sbitmap blocks, int kill) { int count = 0; int i; basic_block bb; /* This used to be a loop over all the blocks with a membership test inside the loop. That can be amazingly expensive on a large CFG when only a small number of bits are set in BLOCKs (for example, the calls from the scheduler typically have very few bits set). For extra credit, someone should convert BLOCKS to a bitmap rather than an sbitmap. */ if (blocks) { EXECUTE_IF_SET_IN_SBITMAP (blocks, 0, i, { count += count_or_remove_death_notes_bb (BASIC_BLOCK (i), kill); }); } else { FOR_EACH_BB (bb) { count += count_or_remove_death_notes_bb (bb, kill); } } return count; } /* Optionally removes all the REG_DEAD and REG_UNUSED notes from basic block BB. Returns a count of the number of registers that died. */ static int count_or_remove_death_notes_bb (basic_block bb, int kill) { int count = 0; rtx insn; for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn)) { if (INSN_P (insn)) { rtx *pprev = ®_NOTES (insn); rtx link = *pprev; while (link) { switch (REG_NOTE_KIND (link)) { case REG_DEAD: if (REG_P (XEXP (link, 0))) { rtx reg = XEXP (link, 0); int n; if (REGNO (reg) >= FIRST_PSEUDO_REGISTER) n = 1; else n = hard_regno_nregs[REGNO (reg)][GET_MODE (reg)]; count += n; } /* Fall through. */ case REG_UNUSED: if (kill) { rtx next = XEXP (link, 1); free_EXPR_LIST_node (link); *pprev = link = next; break; } /* Fall through. */ default: pprev = &XEXP (link, 1); link = *pprev; break; } } } if (insn == BB_END (bb)) break; } return count; } /* Clear LOG_LINKS fields of insns in a selected blocks or whole chain if blocks is NULL. */ static void clear_log_links (sbitmap blocks) { rtx insn; int i; if (!blocks) { for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) if (INSN_P (insn)) free_INSN_LIST_list (&LOG_LINKS (insn)); } else EXECUTE_IF_SET_IN_SBITMAP (blocks, 0, i, { basic_block bb = BASIC_BLOCK (i); for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = NEXT_INSN (insn)) if (INSN_P (insn)) free_INSN_LIST_list (&LOG_LINKS (insn)); }); } /* Given a register bitmap, turn on the bits in a HARD_REG_SET that correspond to the hard registers, if any, set in that map. This could be done far more efficiently by having all sorts of special-cases with moving single words, but probably isn't worth the trouble. */ void reg_set_to_hard_reg_set (HARD_REG_SET *to, bitmap from) { int i; EXECUTE_IF_SET_IN_BITMAP (from, 0, i, { if (i >= FIRST_PSEUDO_REGISTER) return; SET_HARD_REG_BIT (*to, i); }); } /* Fold a constant sub-tree into a single node for C-compiler Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /*@@ This file should be rewritten to use an arbitrary precision @@ representation for "struct tree_int_cst" and "struct tree_real_cst". @@ Perhaps the routines could also be used for bc/dc, and made a lib. @@ The routines that translate from the ap rep should @@ warn if precision et. al. is lost. @@ This would also make life easier when this technology is used @@ for cross-compilers. */ /* The entry points in this file are fold, size_int_wide, size_binop and force_fit_type. fold takes a tree as argument and returns a simplified tree. size_binop takes a tree code for an arithmetic operation and two operands that are trees, and produces a tree for the result, assuming the type comes from `sizetype'. size_int takes an integer value, and creates a tree constant with type from `sizetype'. force_fit_type takes a constant and prior overflow indicator, and forces the value to fit the type. It returns an overflow indicator. */ /* The following constants represent a bit based encoding of GCC's comparison operators. This encoding simplifies transformations on relational comparison operators, such as AND and OR. */ enum comparison_code { COMPCODE_FALSE = 0, COMPCODE_LT = 1, COMPCODE_EQ = 2, COMPCODE_LE = 3, COMPCODE_GT = 4, COMPCODE_LTGT = 5, COMPCODE_GE = 6, COMPCODE_ORD = 7, COMPCODE_UNORD = 8, COMPCODE_UNLT = 9, COMPCODE_UNEQ = 10, COMPCODE_UNLE = 11, COMPCODE_UNGT = 12, COMPCODE_NE = 13, COMPCODE_UNGE = 14, COMPCODE_TRUE = 15 }; static void encode (HOST_WIDE_INT *, unsigned HOST_WIDE_INT, HOST_WIDE_INT); static void decode (HOST_WIDE_INT *, unsigned HOST_WIDE_INT *, HOST_WIDE_INT *); static bool negate_mathfn_p (enum built_in_function); static bool negate_expr_p (tree); static tree negate_expr (tree); static tree split_tree (tree, enum tree_code, tree *, tree *, tree *, int); static tree associate_trees (tree, tree, enum tree_code, tree); static tree const_binop (enum tree_code, tree, tree, int); static hashval_t size_htab_hash (const void *); static int size_htab_eq (const void *, const void *); static tree fold_convert_const (enum tree_code, tree, tree); static enum tree_code invert_tree_comparison (enum tree_code, bool); static enum comparison_code comparison_to_compcode (enum tree_code); static enum tree_code compcode_to_comparison (enum comparison_code); static tree combine_comparisons (enum tree_code, enum tree_code, enum tree_code, tree, tree, tree); static int truth_value_p (enum tree_code); static int operand_equal_for_comparison_p (tree, tree, tree); static int twoval_comparison_p (tree, tree *, tree *, int *); static tree eval_subst (tree, tree, tree, tree, tree); static tree pedantic_omit_one_operand (tree, tree, tree); static tree distribute_bit_expr (enum tree_code, tree, tree, tree); static tree make_bit_field_ref (tree, tree, int, int, int); static tree optimize_bit_field_compare (enum tree_code, tree, tree, tree); static tree decode_field_reference (tree, HOST_WIDE_INT *, HOST_WIDE_INT *, enum machine_mode *, int *, int *, tree *, tree *); static int all_ones_mask_p (tree, int); static tree sign_bit_p (tree, tree); static int simple_operand_p (tree); static tree range_binop (enum tree_code, tree, tree, int, tree, int); static tree make_range (tree, int *, tree *, tree *); static tree build_range_check (tree, tree, int, tree, tree); static int merge_ranges (int *, tree *, tree *, int, tree, tree, int, tree, tree); static tree fold_range_test (tree); static tree fold_cond_expr_with_comparison (tree, tree, tree, tree); static tree unextend (tree, int, int, tree); static tree fold_truthop (enum tree_code, tree, tree, tree); static tree optimize_minmax_comparison (tree); static tree extract_muldiv (tree, tree, enum tree_code, tree); static tree extract_muldiv_1 (tree, tree, enum tree_code, tree); static int multiple_of_p (tree, tree, tree); static tree constant_boolean_node (int, tree); static tree fold_binary_op_with_conditional_arg (enum tree_code, tree, tree, tree, int); static bool fold_real_zero_addition_p (tree, tree, int); static tree fold_mathfn_compare (enum built_in_function, enum tree_code, tree, tree, tree); static tree fold_inf_compare (enum tree_code, tree, tree, tree); static tree fold_div_compare (enum tree_code, tree, tree, tree); static bool reorder_operands_p (tree, tree); static tree fold_negate_const (tree, tree); static tree fold_not_const (tree, tree); static tree fold_relational_const (enum tree_code, tree, tree, tree); static tree fold_relational_hi_lo (enum tree_code *, const tree, tree *, tree *); /* We know that A1 + B1 = SUM1, using 2's complement arithmetic and ignoring overflow. Suppose A, B and SUM have the same respective signs as A1, B1, and SUM1. Then this yields nonzero if overflow occurred during the addition. Overflow occurs if A and B have the same sign, but A and SUM differ in sign. Use `^' to test whether signs differ, and `< 0' to isolate the sign. */ #define OVERFLOW_SUM_SIGN(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0) /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic. We do that by representing the two-word integer in 4 words, with only HOST_BITS_PER_WIDE_INT / 2 bits stored in each word, as a positive number. The value of the word is LOWPART + HIGHPART * BASE. */ #define LOWPART(x) \ ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)) - 1)) #define HIGHPART(x) \ ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT / 2) #define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT / 2) /* Unpack a two-word integer into 4 words. LOW and HI are the integer, as two `HOST_WIDE_INT' pieces. WORDS points to the array of HOST_WIDE_INTs. */ static void encode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT low, HOST_WIDE_INT hi) { words[0] = LOWPART (low); words[1] = HIGHPART (low); words[2] = LOWPART (hi); words[3] = HIGHPART (hi); } /* Pack an array of 4 words into a two-word integer. WORDS points to the array of words. The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */ static void decode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT *low, HOST_WIDE_INT *hi) { *low = words[0] + words[1] * BASE; *hi = words[2] + words[3] * BASE; } /* Make the integer constant T valid for its type by setting to 0 or 1 all the bits in the constant that don't belong in the type. Return 1 if a signed overflow occurs, 0 otherwise. If OVERFLOW is nonzero, a signed overflow has already occurred in calculating T, so propagate it. */ int force_fit_type (tree t, int overflow) { unsigned HOST_WIDE_INT low; HOST_WIDE_INT high; unsigned int prec; if (TREE_CODE (t) == REAL_CST) { /* ??? Used to check for overflow here via CHECK_FLOAT_TYPE. Consider doing it via real_convert now. */ return overflow; } else if (TREE_CODE (t) != INTEGER_CST) return overflow; low = TREE_INT_CST_LOW (t); high = TREE_INT_CST_HIGH (t); if (POINTER_TYPE_P (TREE_TYPE (t)) || TREE_CODE (TREE_TYPE (t)) == OFFSET_TYPE) prec = POINTER_SIZE; else prec = TYPE_PRECISION (TREE_TYPE (t)); /* First clear all bits that are beyond the type's precision. */ if (prec == 2 * HOST_BITS_PER_WIDE_INT) ; else if (prec > HOST_BITS_PER_WIDE_INT) TREE_INT_CST_HIGH (t) &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT)); else { TREE_INT_CST_HIGH (t) = 0; if (prec < HOST_BITS_PER_WIDE_INT) TREE_INT_CST_LOW (t) &= ~((unsigned HOST_WIDE_INT) (-1) << prec); } /* Unsigned types do not suffer sign extension or overflow unless they are a sizetype. */ if (TYPE_UNSIGNED (TREE_TYPE (t)) && ! (TREE_CODE (TREE_TYPE (t)) == INTEGER_TYPE && TYPE_IS_SIZETYPE (TREE_TYPE (t)))) return overflow; /* If the value's sign bit is set, extend the sign. */ if (prec != 2 * HOST_BITS_PER_WIDE_INT && (prec > HOST_BITS_PER_WIDE_INT ? 0 != (TREE_INT_CST_HIGH (t) & ((HOST_WIDE_INT) 1 << (prec - HOST_BITS_PER_WIDE_INT - 1))) : 0 != (TREE_INT_CST_LOW (t) & ((unsigned HOST_WIDE_INT) 1 << (prec - 1))))) { /* Value is negative: set to 1 all the bits that are outside this type's precision. */ if (prec > HOST_BITS_PER_WIDE_INT) TREE_INT_CST_HIGH (t) |= ((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT)); else { TREE_INT_CST_HIGH (t) = -1; if (prec < HOST_BITS_PER_WIDE_INT) TREE_INT_CST_LOW (t) |= ((unsigned HOST_WIDE_INT) (-1) << prec); } } /* Return nonzero if signed overflow occurred. */ return ((overflow | (low ^ TREE_INT_CST_LOW (t)) | (high ^ TREE_INT_CST_HIGH (t))) != 0); } /* Add two doubleword integers with doubleword result. Each argument is given as two `HOST_WIDE_INT' pieces. One argument is L1 and H1; the other, L2 and H2. The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */ int add_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1, unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2, unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv) { unsigned HOST_WIDE_INT l; HOST_WIDE_INT h; l = l1 + l2; h = h1 + h2 + (l < l1); *lv = l; *hv = h; return OVERFLOW_SUM_SIGN (h1, h2, h); } /* Negate a doubleword integer with doubleword result. Return nonzero if the operation overflows, assuming it's signed. The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1. The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */ int neg_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1, unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv) { if (l1 == 0) { *lv = 0; *hv = - h1; return (*hv & h1) < 0; } else { *lv = -l1; *hv = ~h1; return 0; } } /* Multiply two doubleword integers with doubleword result. Return nonzero if the operation overflows, assuming it's signed. Each argument is given as two `HOST_WIDE_INT' pieces. One argument is L1 and H1; the other, L2 and H2. The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */ int mul_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1, unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2, unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv) { HOST_WIDE_INT arg1[4]; HOST_WIDE_INT arg2[4]; HOST_WIDE_INT prod[4 * 2]; unsigned HOST_WIDE_INT carry; int i, j, k; unsigned HOST_WIDE_INT toplow, neglow; HOST_WIDE_INT tophigh, neghigh; encode (arg1, l1, h1); encode (arg2, l2, h2); memset (prod, 0, sizeof prod); for (i = 0; i < 4; i++) { carry = 0; for (j = 0; j < 4; j++) { k = i + j; /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */ carry += arg1[i] * arg2[j]; /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */ carry += prod[k]; prod[k] = LOWPART (carry); carry = HIGHPART (carry); } prod[i + 4] = carry; } decode (prod, lv, hv); /* This ignores prod[4] through prod[4*2-1] */ /* Check for overflow by calculating the top half of the answer in full; it should agree with the low half's sign bit. */ decode (prod + 4, &toplow, &tophigh); if (h1 < 0) { neg_double (l2, h2, &neglow, &neghigh); add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh); } if (h2 < 0) { neg_double (l1, h1, &neglow, &neghigh); add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh); } return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0; } /* Shift the doubleword integer in L1, H1 left by COUNT places keeping only PREC bits of result. Shift right if COUNT is negative. ARITH nonzero specifies arithmetic shifting; otherwise use logical shift. Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */ void lshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1, HOST_WIDE_INT count, unsigned int prec, unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, int arith) { unsigned HOST_WIDE_INT signmask; if (count < 0) { rshift_double (l1, h1, -count, prec, lv, hv, arith); return; } if (SHIFT_COUNT_TRUNCATED) count %= prec; if (count >= 2 * HOST_BITS_PER_WIDE_INT) { /* Shifting by the host word size is undefined according to the ANSI standard, so we must handle this as a special case. */ *hv = 0; *lv = 0; } else if (count >= HOST_BITS_PER_WIDE_INT) { *hv = l1 << (count - HOST_BITS_PER_WIDE_INT); *lv = 0; } else { *hv = (((unsigned HOST_WIDE_INT) h1 << count) | (l1 >> (HOST_BITS_PER_WIDE_INT - count - 1) >> 1)); *lv = l1 << count; } /* Sign extend all bits that are beyond the precision. */ signmask = -((prec > HOST_BITS_PER_WIDE_INT ? ((unsigned HOST_WIDE_INT) *hv >> (prec - HOST_BITS_PER_WIDE_INT - 1)) : (*lv >> (prec - 1))) & 1); if (prec >= 2 * HOST_BITS_PER_WIDE_INT) ; else if (prec >= HOST_BITS_PER_WIDE_INT) { *hv &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT)); *hv |= signmask << (prec - HOST_BITS_PER_WIDE_INT); } else { *hv = signmask; *lv &= ~((unsigned HOST_WIDE_INT) (-1) << prec); *lv |= signmask << prec; } } /* Shift the doubleword integer in L1, H1 right by COUNT places keeping only PREC bits of result. COUNT must be positive. ARITH nonzero specifies arithmetic shifting; otherwise use logical shift. Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */ void rshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1, HOST_WIDE_INT count, unsigned int prec, unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, int arith) { unsigned HOST_WIDE_INT signmask; signmask = (arith ? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1)) : 0); if (SHIFT_COUNT_TRUNCATED) count %= prec; if (count >= 2 * HOST_BITS_PER_WIDE_INT) { /* Shifting by the host word size is undefined according to the ANSI standard, so we must handle this as a special case. */ *hv = 0; *lv = 0; } else if (count >= HOST_BITS_PER_WIDE_INT) { *hv = 0; *lv = (unsigned HOST_WIDE_INT) h1 >> (count - HOST_BITS_PER_WIDE_INT); } else { *hv = (unsigned HOST_WIDE_INT) h1 >> count; *lv = ((l1 >> count) | ((unsigned HOST_WIDE_INT) h1 << (HOST_BITS_PER_WIDE_INT - count - 1) << 1)); } /* Zero / sign extend all bits that are beyond the precision. */ if (count >= (HOST_WIDE_INT)prec) { *hv = signmask; *lv = signmask; } else if ((prec - count) >= 2 * HOST_BITS_PER_WIDE_INT) ; else if ((prec - count) >= HOST_BITS_PER_WIDE_INT) { *hv &= ~((HOST_WIDE_INT) (-1) << (prec - count - HOST_BITS_PER_WIDE_INT)); *hv |= signmask << (prec - count - HOST_BITS_PER_WIDE_INT); } else { *hv = signmask; *lv &= ~((unsigned HOST_WIDE_INT) (-1) << (prec - count)); *lv |= signmask << (prec - count); } } /* Rotate the doubleword integer in L1, H1 left by COUNT places keeping only PREC bits of result. Rotate right if COUNT is negative. Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */ void lrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1, HOST_WIDE_INT count, unsigned int prec, unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv) { unsigned HOST_WIDE_INT s1l, s2l; HOST_WIDE_INT s1h, s2h; count %= prec; if (count < 0) count += prec; lshift_double (l1, h1, count, prec, &s1l, &s1h, 0); rshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0); *lv = s1l | s2l; *hv = s1h | s2h; } /* Rotate the doubleword integer in L1, H1 left by COUNT places keeping only PREC bits of result. COUNT must be positive. Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */ void rrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1, HOST_WIDE_INT count, unsigned int prec, unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv) { unsigned HOST_WIDE_INT s1l, s2l; HOST_WIDE_INT s1h, s2h; count %= prec; if (count < 0) count += prec; rshift_double (l1, h1, count, prec, &s1l, &s1h, 0); lshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0); *lv = s1l | s2l; *hv = s1h | s2h; } /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM). CODE is a tree code for a kind of division, one of TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR or EXACT_DIV_EXPR It controls how the quotient is rounded to an integer. Return nonzero if the operation overflows. UNS nonzero says do unsigned division. */ int div_and_round_double (enum tree_code code, int uns, unsigned HOST_WIDE_INT lnum_orig, /* num == numerator == dividend */ HOST_WIDE_INT hnum_orig, unsigned HOST_WIDE_INT lden_orig, /* den == denominator == divisor */ HOST_WIDE_INT hden_orig, unsigned HOST_WIDE_INT *lquo, HOST_WIDE_INT *hquo, unsigned HOST_WIDE_INT *lrem, HOST_WIDE_INT *hrem) { int quo_neg = 0; HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */ HOST_WIDE_INT den[4], quo[4]; int i, j; unsigned HOST_WIDE_INT work; unsigned HOST_WIDE_INT carry = 0; unsigned HOST_WIDE_INT lnum = lnum_orig; HOST_WIDE_INT hnum = hnum_orig; unsigned HOST_WIDE_INT lden = lden_orig; HOST_WIDE_INT hden = hden_orig; int overflow = 0; if (hden == 0 && lden == 0) overflow = 1, lden = 1; /* Calculate quotient sign and convert operands to unsigned. */ if (!uns) { if (hnum < 0) { quo_neg = ~ quo_neg; /* (minimum integer) / (-1) is the only overflow case. */ if (neg_double (lnum, hnum, &lnum, &hnum) && ((HOST_WIDE_INT) lden & hden) == -1) overflow = 1; } if (hden < 0) { quo_neg = ~ quo_neg; neg_double (lden, hden, &lden, &hden); } } if (hnum == 0 && hden == 0) { /* single precision */ *hquo = *hrem = 0; /* This unsigned division rounds toward zero. */ *lquo = lnum / lden; goto finish_up; } if (hnum == 0) { /* trivial case: dividend < divisor */ /* hden != 0 already checked. */ *hquo = *lquo = 0; *hrem = hnum; *lrem = lnum; goto finish_up; } memset (quo, 0, sizeof quo); memset (num, 0, sizeof num); /* to zero 9th element */ memset (den, 0, sizeof den); encode (num, lnum, hnum); encode (den, lden, hden); /* Special code for when the divisor < BASE. */ if (hden == 0 && lden < (unsigned HOST_WIDE_INT) BASE) { /* hnum != 0 already checked. */ for (i = 4 - 1; i >= 0; i--) { work = num[i] + carry * BASE; quo[i] = work / lden; carry = work % lden; } } else { /* Full double precision division, with thanks to Don Knuth's "Seminumerical Algorithms". */ int num_hi_sig, den_hi_sig; unsigned HOST_WIDE_INT quo_est, scale; /* Find the highest nonzero divisor digit. */ for (i = 4 - 1;; i--) if (den[i] != 0) { den_hi_sig = i; break; } /* Insure that the first digit of the divisor is at least BASE/2. This is required by the quotient digit estimation algorithm. */ scale = BASE / (den[den_hi_sig] + 1); if (scale > 1) { /* scale divisor and dividend */ carry = 0; for (i = 0; i <= 4 - 1; i++) { work = (num[i] * scale) + carry; num[i] = LOWPART (work); carry = HIGHPART (work); } num[4] = carry; carry = 0; for (i = 0; i <= 4 - 1; i++) { work = (den[i] * scale) + carry; den[i] = LOWPART (work); carry = HIGHPART (work); if (den[i] != 0) den_hi_sig = i; } } num_hi_sig = 4; /* Main loop */ for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--) { /* Guess the next quotient digit, quo_est, by dividing the first two remaining dividend digits by the high order quotient digit. quo_est is never low and is at most 2 high. */ unsigned HOST_WIDE_INT tmp; num_hi_sig = i + den_hi_sig + 1; work = num[num_hi_sig] * BASE + num[num_hi_sig - 1]; if (num[num_hi_sig] != den[den_hi_sig]) quo_est = work / den[den_hi_sig]; else quo_est = BASE - 1; /* Refine quo_est so it's usually correct, and at most one high. */ tmp = work - quo_est * den[den_hi_sig]; if (tmp < BASE && (den[den_hi_sig - 1] * quo_est > (tmp * BASE + num[num_hi_sig - 2]))) quo_est--; /* Try QUO_EST as the quotient digit, by multiplying the divisor by QUO_EST and subtracting from the remaining dividend. Keep in mind that QUO_EST is the I - 1st digit. */ carry = 0; for (j = 0; j <= den_hi_sig; j++) { work = quo_est * den[j] + carry; carry = HIGHPART (work); work = num[i + j] - LOWPART (work); num[i + j] = LOWPART (work); carry += HIGHPART (work) != 0; } /* If quo_est was high by one, then num[i] went negative and we need to correct things. */ if (num[num_hi_sig] < (HOST_WIDE_INT) carry) { quo_est--; carry = 0; /* add divisor back in */ for (j = 0; j <= den_hi_sig; j++) { work = num[i + j] + den[j] + carry; carry = HIGHPART (work); num[i + j] = LOWPART (work); } num [num_hi_sig] += carry; } /* Store the quotient digit. */ quo[i] = quo_est; } } decode (quo, lquo, hquo); finish_up: /* If result is negative, make it so. */ if (quo_neg) neg_double (*lquo, *hquo, lquo, hquo); /* Compute trial remainder: rem = num - (quo * den) */ mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem); neg_double (*lrem, *hrem, lrem, hrem); add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem); switch (code) { case TRUNC_DIV_EXPR: case TRUNC_MOD_EXPR: /* round toward zero */ case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */ return overflow; case FLOOR_DIV_EXPR: case FLOOR_MOD_EXPR: /* round toward negative infinity */ if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */ { /* quo = quo - 1; */ add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo); } else return overflow; break; case CEIL_DIV_EXPR: case CEIL_MOD_EXPR: /* round toward positive infinity */ if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */ { add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0, lquo, hquo); } else return overflow; break; case ROUND_DIV_EXPR: case ROUND_MOD_EXPR: /* round to closest integer */ { unsigned HOST_WIDE_INT labs_rem = *lrem; HOST_WIDE_INT habs_rem = *hrem; unsigned HOST_WIDE_INT labs_den = lden, ltwice; HOST_WIDE_INT habs_den = hden, htwice; /* Get absolute values. */ if (*hrem < 0) neg_double (*lrem, *hrem, &labs_rem, &habs_rem); if (hden < 0) neg_double (lden, hden, &labs_den, &habs_den); /* If (2 * abs (lrem) >= abs (lden)) */ mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0, labs_rem, habs_rem, <wice, &htwice); if (((unsigned HOST_WIDE_INT) habs_den < (unsigned HOST_WIDE_INT) htwice) || (((unsigned HOST_WIDE_INT) habs_den == (unsigned HOST_WIDE_INT) htwice) && (labs_den < ltwice))) { if (*hquo < 0) /* quo = quo - 1; */ add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo); else /* quo = quo + 1; */ add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0, lquo, hquo); } else return overflow; } break; default: abort (); } /* Compute true remainder: rem = num - (quo * den) */ mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem); neg_double (*lrem, *hrem, lrem, hrem); add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem); return overflow; } /* Return true if built-in mathematical function specified by CODE preserves the sign of it argument, i.e. -f(x) == f(-x). */ static bool negate_mathfn_p (enum built_in_function code) { switch (code) { case BUILT_IN_ASIN: case BUILT_IN_ASINF: case BUILT_IN_ASINL: case BUILT_IN_ATAN: case BUILT_IN_ATANF: case BUILT_IN_ATANL: case BUILT_IN_SIN: case BUILT_IN_SINF: case BUILT_IN_SINL: case BUILT_IN_TAN: case BUILT_IN_TANF: case BUILT_IN_TANL: return true; default: break; } return false; } /* Determine whether an expression T can be cheaply negated using the function negate_expr. */ static bool negate_expr_p (tree t) { unsigned HOST_WIDE_INT val; unsigned int prec; tree type; if (t == 0) return false; type = TREE_TYPE (t); STRIP_SIGN_NOPS (t); switch (TREE_CODE (t)) { case INTEGER_CST: if (TYPE_UNSIGNED (type) || ! flag_trapv) return true; /* Check that -CST will not overflow type. */ prec = TYPE_PRECISION (type); if (prec > HOST_BITS_PER_WIDE_INT) { if (TREE_INT_CST_LOW (t) != 0) return true; prec -= HOST_BITS_PER_WIDE_INT; val = TREE_INT_CST_HIGH (t); } else val = TREE_INT_CST_LOW (t); if (prec < HOST_BITS_PER_WIDE_INT) val &= ((unsigned HOST_WIDE_INT) 1 << prec) - 1; return val != ((unsigned HOST_WIDE_INT) 1 << (prec - 1)); case REAL_CST: case NEGATE_EXPR: return true; case COMPLEX_CST: return negate_expr_p (TREE_REALPART (t)) && negate_expr_p (TREE_IMAGPART (t)); case PLUS_EXPR: if (FLOAT_TYPE_P (type) && !flag_unsafe_math_optimizations) return false; /* -(A + B) -> (-B) - A. */ if (negate_expr_p (TREE_OPERAND (t, 1)) && reorder_operands_p (TREE_OPERAND (t, 0), TREE_OPERAND (t, 1))) return true; /* -(A + B) -> (-A) - B. */ return negate_expr_p (TREE_OPERAND (t, 0)); case MINUS_EXPR: /* We can't turn -(A-B) into B-A when we honor signed zeros. */ return (! FLOAT_TYPE_P (type) || flag_unsafe_math_optimizations) && reorder_operands_p (TREE_OPERAND (t, 0), TREE_OPERAND (t, 1)); case MULT_EXPR: if (TYPE_UNSIGNED (TREE_TYPE (t))) break; /* Fall through. */ case RDIV_EXPR: if (! HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (TREE_TYPE (t)))) return negate_expr_p (TREE_OPERAND (t, 1)) || negate_expr_p (TREE_OPERAND (t, 0)); break; case NOP_EXPR: /* Negate -((double)float) as (double)(-float). */ if (TREE_CODE (type) == REAL_TYPE) { tree tem = strip_float_extensions (t); if (tem != t) return negate_expr_p (tem); } break; case CALL_EXPR: /* Negate -f(x) as f(-x). */ if (negate_mathfn_p (builtin_mathfn_code (t))) return negate_expr_p (TREE_VALUE (TREE_OPERAND (t, 1))); break; case RSHIFT_EXPR: /* Optimize -((int)x >> 31) into (unsigned)x >> 31. */ if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST) { tree op1 = TREE_OPERAND (t, 1); if (TREE_INT_CST_HIGH (op1) == 0 && (unsigned HOST_WIDE_INT) (TYPE_PRECISION (type) - 1) == TREE_INT_CST_LOW (op1)) return true; } break; default: break; } return false; } /* Given T, an expression, return the negation of T. Allow for T to be null, in which case return null. */ static tree negate_expr (tree t) { tree type; tree tem; if (t == 0) return 0; type = TREE_TYPE (t); STRIP_SIGN_NOPS (t); switch (TREE_CODE (t)) { case INTEGER_CST: tem = fold_negate_const (t, type); if (! TREE_OVERFLOW (tem) || TYPE_UNSIGNED (type) || ! flag_trapv) return tem; break; case REAL_CST: tem = fold_negate_const (t, type); /* Two's complement FP formats, such as c4x, may overflow. */ if (! TREE_OVERFLOW (tem) || ! flag_trapping_math) return fold_convert (type, tem); break; case COMPLEX_CST: { tree rpart = negate_expr (TREE_REALPART (t)); tree ipart = negate_expr (TREE_IMAGPART (t)); if ((TREE_CODE (rpart) == REAL_CST && TREE_CODE (ipart) == REAL_CST) || (TREE_CODE (rpart) == INTEGER_CST && TREE_CODE (ipart) == INTEGER_CST)) return build_complex (type, rpart, ipart); } break; case NEGATE_EXPR: return fold_convert (type, TREE_OPERAND (t, 0)); case PLUS_EXPR: if (! FLOAT_TYPE_P (type) || flag_unsafe_math_optimizations) { /* -(A + B) -> (-B) - A. */ if (negate_expr_p (TREE_OPERAND (t, 1)) && reorder_operands_p (TREE_OPERAND (t, 0), TREE_OPERAND (t, 1))) { tem = negate_expr (TREE_OPERAND (t, 1)); tem = fold (build2 (MINUS_EXPR, TREE_TYPE (t), tem, TREE_OPERAND (t, 0))); return fold_convert (type, tem); } /* -(A + B) -> (-A) - B. */ if (negate_expr_p (TREE_OPERAND (t, 0))) { tem = negate_expr (TREE_OPERAND (t, 0)); tem = fold (build2 (MINUS_EXPR, TREE_TYPE (t), tem, TREE_OPERAND (t, 1))); return fold_convert (type, tem); } } break; case MINUS_EXPR: /* - (A - B) -> B - A */ if ((! FLOAT_TYPE_P (type) || flag_unsafe_math_optimizations) && reorder_operands_p (TREE_OPERAND (t, 0), TREE_OPERAND (t, 1))) return fold_convert (type, fold (build2 (MINUS_EXPR, TREE_TYPE (t), TREE_OPERAND (t, 1), TREE_OPERAND (t, 0)))); break; case MULT_EXPR: if (TYPE_UNSIGNED (TREE_TYPE (t))) break; /* Fall through. */ case RDIV_EXPR: if (! HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (TREE_TYPE (t)))) { tem = TREE_OPERAND (t, 1); if (negate_expr_p (tem)) return fold_convert (type, fold (build2 (TREE_CODE (t), TREE_TYPE (t), TREE_OPERAND (t, 0), negate_expr (tem)))); tem = TREE_OPERAND (t, 0); if (negate_expr_p (tem)) return fold_convert (type, fold (build2 (TREE_CODE (t), TREE_TYPE (t), negate_expr (tem), TREE_OPERAND (t, 1)))); } break; case NOP_EXPR: /* Convert -((double)float) into (double)(-float). */ if (TREE_CODE (type) == REAL_TYPE) { tem = strip_float_extensions (t); if (tem != t && negate_expr_p (tem)) return fold_convert (type, negate_expr (tem)); } break; case CALL_EXPR: /* Negate -f(x) as f(-x). */ if (negate_mathfn_p (builtin_mathfn_code (t)) && negate_expr_p (TREE_VALUE (TREE_OPERAND (t, 1)))) { tree fndecl, arg, arglist; fndecl = get_callee_fndecl (t); arg = negate_expr (TREE_VALUE (TREE_OPERAND (t, 1))); arglist = build_tree_list (NULL_TREE, arg); return build_function_call_expr (fndecl, arglist); } break; case RSHIFT_EXPR: /* Optimize -((int)x >> 31) into (unsigned)x >> 31. */ if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST) { tree op1 = TREE_OPERAND (t, 1); if (TREE_INT_CST_HIGH (op1) == 0 && (unsigned HOST_WIDE_INT) (TYPE_PRECISION (type) - 1) == TREE_INT_CST_LOW (op1)) { tree ntype = TYPE_UNSIGNED (type) ? lang_hooks.types.signed_type (type) : lang_hooks.types.unsigned_type (type); tree temp = fold_convert (ntype, TREE_OPERAND (t, 0)); temp = fold (build2 (RSHIFT_EXPR, ntype, temp, op1)); return fold_convert (type, temp); } } break; default: break; } tem = fold (build1 (NEGATE_EXPR, TREE_TYPE (t), t)); return fold_convert (type, tem); } /* Split a tree IN into a constant, literal and variable parts that could be combined with CODE to make IN. "constant" means an expression with TREE_CONSTANT but that isn't an actual constant. CODE must be a commutative arithmetic operation. Store the constant part into *CONP, the literal in *LITP and return the variable part. If a part isn't present, set it to null. If the tree does not decompose in this way, return the entire tree as the variable part and the other parts as null. If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR. In that case, we negate an operand that was subtracted. Except if it is a literal for which we use *MINUS_LITP instead. If NEGATE_P is true, we are negating all of IN, again except a literal for which we use *MINUS_LITP instead. If IN is itself a literal or constant, return it as appropriate. Note that we do not guarantee that any of the three values will be the same type as IN, but they will have the same signedness and mode. */ static tree split_tree (tree in, enum tree_code code, tree *conp, tree *litp, tree *minus_litp, int negate_p) { tree var = 0; *conp = 0; *litp = 0; *minus_litp = 0; /* Strip any conversions that don't change the machine mode or signedness. */ STRIP_SIGN_NOPS (in); if (TREE_CODE (in) == INTEGER_CST || TREE_CODE (in) == REAL_CST) *litp = in; else if (TREE_CODE (in) == code || (! FLOAT_TYPE_P (TREE_TYPE (in)) /* We can associate addition and subtraction together (even though the C standard doesn't say so) for integers because the value is not affected. For reals, the value might be affected, so we can't. */ && ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR) || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR)))) { tree op0 = TREE_OPERAND (in, 0); tree op1 = TREE_OPERAND (in, 1); int neg1_p = TREE_CODE (in) == MINUS_EXPR; int neg_litp_p = 0, neg_conp_p = 0, neg_var_p = 0; /* First see if either of the operands is a literal, then a constant. */ if (TREE_CODE (op0) == INTEGER_CST || TREE_CODE (op0) == REAL_CST) *litp = op0, op0 = 0; else if (TREE_CODE (op1) == INTEGER_CST || TREE_CODE (op1) == REAL_CST) *litp = op1, neg_litp_p = neg1_p, op1 = 0; if (op0 != 0 && TREE_CONSTANT (op0)) *conp = op0, op0 = 0; else if (op1 != 0 && TREE_CONSTANT (op1)) *conp = op1, neg_conp_p = neg1_p, op1 = 0; /* If we haven't dealt with either operand, this is not a case we can decompose. Otherwise, VAR is either of the ones remaining, if any. */ if (op0 != 0 && op1 != 0) var = in; else if (op0 != 0) var = op0; else var = op1, neg_var_p = neg1_p; /* Now do any needed negations. */ if (neg_litp_p) *minus_litp = *litp, *litp = 0; if (neg_conp_p) *conp = negate_expr (*conp); if (neg_var_p) var = negate_expr (var); } else if (TREE_CONSTANT (in)) *conp = in; else var = in; if (negate_p) { if (*litp) *minus_litp = *litp, *litp = 0; else if (*minus_litp) *litp = *minus_litp, *minus_litp = 0; *conp = negate_expr (*conp); var = negate_expr (var); } return var; } /* Re-associate trees split by the above function. T1 and T2 are either expressions to associate or null. Return the new expression, if any. If we build an operation, do it in TYPE and with CODE. */ static tree associate_trees (tree t1, tree t2, enum tree_code code, tree type) { if (t1 == 0) return t2; else if (t2 == 0) return t1; /* If either input is CODE, a PLUS_EXPR, or a MINUS_EXPR, don't try to fold this since we will have infinite recursion. But do deal with any NEGATE_EXPRs. */ if (TREE_CODE (t1) == code || TREE_CODE (t2) == code || TREE_CODE (t1) == MINUS_EXPR || TREE_CODE (t2) == MINUS_EXPR) { if (code == PLUS_EXPR) { if (TREE_CODE (t1) == NEGATE_EXPR) return build2 (MINUS_EXPR, type, fold_convert (type, t2), fold_convert (type, TREE_OPERAND (t1, 0))); else if (TREE_CODE (t2) == NEGATE_EXPR) return build2 (MINUS_EXPR, type, fold_convert (type, t1), fold_convert (type, TREE_OPERAND (t2, 0))); } return build2 (code, type, fold_convert (type, t1), fold_convert (type, t2)); } return fold (build2 (code, type, fold_convert (type, t1), fold_convert (type, t2))); } /* Combine two integer constants ARG1 and ARG2 under operation CODE to produce a new constant. If NOTRUNC is nonzero, do not truncate the result to fit the data type. */ tree int_const_binop (enum tree_code code, tree arg1, tree arg2, int notrunc) { unsigned HOST_WIDE_INT int1l, int2l; HOST_WIDE_INT int1h, int2h; unsigned HOST_WIDE_INT low; HOST_WIDE_INT hi; unsigned HOST_WIDE_INT garbagel; HOST_WIDE_INT garbageh; tree t; tree type = TREE_TYPE (arg1); int uns = TYPE_UNSIGNED (type); int is_sizetype = (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type)); int overflow = 0; int no_overflow = 0; int1l = TREE_INT_CST_LOW (arg1); int1h = TREE_INT_CST_HIGH (arg1); int2l = TREE_INT_CST_LOW (arg2); int2h = TREE_INT_CST_HIGH (arg2); switch (code) { case BIT_IOR_EXPR: low = int1l | int2l, hi = int1h | int2h; break; case BIT_XOR_EXPR: low = int1l ^ int2l, hi = int1h ^ int2h; break; case BIT_AND_EXPR: low = int1l & int2l, hi = int1h & int2h; break; case RSHIFT_EXPR: int2l = -int2l; case LSHIFT_EXPR: /* It's unclear from the C standard whether shifts can overflow. The following code ignores overflow; perhaps a C standard interpretation ruling is needed. */ lshift_double (int1l, int1h, int2l, TYPE_PRECISION (type), &low, &hi, !uns); no_overflow = 1; break; case RROTATE_EXPR: int2l = - int2l; case LROTATE_EXPR: lrotate_double (int1l, int1h, int2l, TYPE_PRECISION (type), &low, &hi); break; case PLUS_EXPR: overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi); break; case MINUS_EXPR: neg_double (int2l, int2h, &low, &hi); add_double (int1l, int1h, low, hi, &low, &hi); overflow = OVERFLOW_SUM_SIGN (hi, int2h, int1h); break; case MULT_EXPR: overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi); break; case TRUNC_DIV_EXPR: case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR: case EXACT_DIV_EXPR: /* This is a shortcut for a common special case. */ if (int2h == 0 && (HOST_WIDE_INT) int2l > 0 && ! TREE_CONSTANT_OVERFLOW (arg1) && ! TREE_CONSTANT_OVERFLOW (arg2) && int1h == 0 && (HOST_WIDE_INT) int1l >= 0) { if (code == CEIL_DIV_EXPR) int1l += int2l - 1; low = int1l / int2l, hi = 0; break; } /* ... fall through ... */ case ROUND_DIV_EXPR: if (int2h == 0 && int2l == 1) { low = int1l, hi = int1h; break; } if (int1l == int2l && int1h == int2h && ! (int1l == 0 && int1h == 0)) { low = 1, hi = 0; break; } overflow = div_and_round_double (code, uns, int1l, int1h, int2l, int2h, &low, &hi, &garbagel, &garbageh); break; case TRUNC_MOD_EXPR: case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR: /* This is a shortcut for a common special case. */ if (int2h == 0 && (HOST_WIDE_INT) int2l > 0 && ! TREE_CONSTANT_OVERFLOW (arg1) && ! TREE_CONSTANT_OVERFLOW (arg2) && int1h == 0 && (HOST_WIDE_INT) int1l >= 0) { if (code == CEIL_MOD_EXPR) int1l += int2l - 1; low = int1l % int2l, hi = 0; break; } /* ... fall through ... */ case ROUND_MOD_EXPR: overflow = div_and_round_double (code, uns, int1l, int1h, int2l, int2h, &garbagel, &garbageh, &low, &hi); break; case MIN_EXPR: case MAX_EXPR: if (uns) low = (((unsigned HOST_WIDE_INT) int1h < (unsigned HOST_WIDE_INT) int2h) || (((unsigned HOST_WIDE_INT) int1h == (unsigned HOST_WIDE_INT) int2h) && int1l < int2l)); else low = (int1h < int2h || (int1h == int2h && int1l < int2l)); if (low == (code == MIN_EXPR)) low = int1l, hi = int1h; else low = int2l, hi = int2h; break; default: abort (); } /* If this is for a sizetype, can be represented as one (signed) HOST_WIDE_INT word, and doesn't overflow, use size_int since it caches constants. */ if (is_sizetype && ((hi == 0 && (HOST_WIDE_INT) low >= 0) || (hi == -1 && (HOST_WIDE_INT) low < 0)) && overflow == 0 && ! TREE_OVERFLOW (arg1) && ! TREE_OVERFLOW (arg2)) return size_int_type_wide (low, type); else { t = build_int_2 (low, hi); TREE_TYPE (t) = TREE_TYPE (arg1); } TREE_OVERFLOW (t) = ((notrunc ? (!uns || is_sizetype) && overflow : (force_fit_type (t, (!uns || is_sizetype) && overflow) && ! no_overflow)) | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2)); /* If we're doing a size calculation, unsigned arithmetic does overflow. So check if force_fit_type truncated the value. */ if (is_sizetype && ! TREE_OVERFLOW (t) && (TREE_INT_CST_HIGH (t) != hi || TREE_INT_CST_LOW (t) != low)) TREE_OVERFLOW (t) = 1; TREE_CONSTANT_OVERFLOW (t) = (TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1) | TREE_CONSTANT_OVERFLOW (arg2)); return t; } /* Combine two constants ARG1 and ARG2 under operation CODE to produce a new constant. We assume ARG1 and ARG2 have the same data type, or at least are the same kind of constant and the same machine mode. If NOTRUNC is nonzero, do not truncate the result to fit the data type. */ static tree const_binop (enum tree_code code, tree arg1, tree arg2, int notrunc) { STRIP_NOPS (arg1); STRIP_NOPS (arg2); if (TREE_CODE (arg1) == INTEGER_CST) return int_const_binop (code, arg1, arg2, notrunc); if (TREE_CODE (arg1) == REAL_CST) { enum machine_mode mode; REAL_VALUE_TYPE d1; REAL_VALUE_TYPE d2; REAL_VALUE_TYPE value; tree t, type; d1 = TREE_REAL_CST (arg1); d2 = TREE_REAL_CST (arg2); type = TREE_TYPE (arg1); mode = TYPE_MODE (type); /* Don't perform operation if we honor signaling NaNs and either operand is a NaN. */ if (HONOR_SNANS (mode) && (REAL_VALUE_ISNAN (d1) || REAL_VALUE_ISNAN (d2))) return NULL_TREE; /* Don't perform operation if it would raise a division by zero exception. */ if (code == RDIV_EXPR && REAL_VALUES_EQUAL (d2, dconst0) && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode))) return NULL_TREE; /* If either operand is a NaN, just return it. Otherwise, set up for floating-point trap; we return an overflow. */ if (REAL_VALUE_ISNAN (d1)) return arg1; else if (REAL_VALUE_ISNAN (d2)) return arg2; REAL_ARITHMETIC (value, code, d1, d2); t = build_real (type, real_value_truncate (mode, value)); TREE_OVERFLOW (t) = (force_fit_type (t, 0) | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2)); TREE_CONSTANT_OVERFLOW (t) = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1) | TREE_CONSTANT_OVERFLOW (arg2); return t; } if (TREE_CODE (arg1) == COMPLEX_CST) { tree type = TREE_TYPE (arg1); tree r1 = TREE_REALPART (arg1); tree i1 = TREE_IMAGPART (arg1); tree r2 = TREE_REALPART (arg2); tree i2 = TREE_IMAGPART (arg2); tree t; switch (code) { case PLUS_EXPR: t = build_complex (type, const_binop (PLUS_EXPR, r1, r2, notrunc), const_binop (PLUS_EXPR, i1, i2, notrunc)); break; case MINUS_EXPR: t = build_complex (type, const_binop (MINUS_EXPR, r1, r2, notrunc), const_binop (MINUS_EXPR, i1, i2, notrunc)); break; case MULT_EXPR: t = build_complex (type, const_binop (MINUS_EXPR, const_binop (MULT_EXPR, r1, r2, notrunc), const_binop (MULT_EXPR, i1, i2, notrunc), notrunc), const_binop (PLUS_EXPR, const_binop (MULT_EXPR, r1, i2, notrunc), const_binop (MULT_EXPR, i1, r2, notrunc), notrunc)); break; case RDIV_EXPR: { tree magsquared = const_binop (PLUS_EXPR, const_binop (MULT_EXPR, r2, r2, notrunc), const_binop (MULT_EXPR, i2, i2, notrunc), notrunc); t = build_complex (type, const_binop (INTEGRAL_TYPE_P (TREE_TYPE (r1)) ? TRUNC_DIV_EXPR : RDIV_EXPR, const_binop (PLUS_EXPR, const_binop (MULT_EXPR, r1, r2, notrunc), const_binop (MULT_EXPR, i1, i2, notrunc), notrunc), magsquared, notrunc), const_binop (INTEGRAL_TYPE_P (TREE_TYPE (r1)) ? TRUNC_DIV_EXPR : RDIV_EXPR, const_binop (MINUS_EXPR, const_binop (MULT_EXPR, i1, r2, notrunc), const_binop (MULT_EXPR, r1, i2, notrunc), notrunc), magsquared, notrunc)); } break; default: abort (); } return t; } return 0; } /* These are the hash table functions for the hash table of INTEGER_CST nodes of a sizetype. */ /* Return the hash code code X, an INTEGER_CST. */ static hashval_t size_htab_hash (const void *x) { tree t = (tree) x; return (TREE_INT_CST_HIGH (t) ^ TREE_INT_CST_LOW (t) ^ htab_hash_pointer (TREE_TYPE (t)) ^ (TREE_OVERFLOW (t) << 20)); } /* Return nonzero if the value represented by *X (an INTEGER_CST tree node) is the same as that given by *Y, which is the same. */ static int size_htab_eq (const void *x, const void *y) { tree xt = (tree) x; tree yt = (tree) y; return (TREE_INT_CST_HIGH (xt) == TREE_INT_CST_HIGH (yt) && TREE_INT_CST_LOW (xt) == TREE_INT_CST_LOW (yt) && TREE_TYPE (xt) == TREE_TYPE (yt) && TREE_OVERFLOW (xt) == TREE_OVERFLOW (yt)); } /* Return an INTEGER_CST with value whose low-order HOST_BITS_PER_WIDE_INT bits are given by NUMBER and of the sizetype represented by KIND. */ tree size_int_wide (HOST_WIDE_INT number, enum size_type_kind kind) { return size_int_type_wide (number, sizetype_tab[(int) kind]); } /* Likewise, but the desired type is specified explicitly. */ static GTY (()) tree new_const; static GTY ((if_marked ("ggc_marked_p"), param_is (union tree_node))) htab_t size_htab; tree size_int_type_wide (HOST_WIDE_INT number, tree type) { void **slot; if (size_htab == 0) { size_htab = htab_create_ggc (1024, size_htab_hash, size_htab_eq, NULL); new_const = make_node (INTEGER_CST); } /* Adjust NEW_CONST to be the constant we want. If it's already in the hash table, we return the value from the hash table. Otherwise, we place that in the hash table and make a new node for the next time. */ TREE_INT_CST_LOW (new_const) = number; TREE_INT_CST_HIGH (new_const) = number < 0 ? -1 : 0; TREE_TYPE (new_const) = type; TREE_OVERFLOW (new_const) = TREE_CONSTANT_OVERFLOW (new_const) = force_fit_type (new_const, 0); slot = htab_find_slot (size_htab, new_const, INSERT); if (*slot == 0) { tree t = new_const; *slot = new_const; new_const = make_node (INTEGER_CST); return t; } else return (tree) *slot; } /* Combine operands OP1 and OP2 with arithmetic operation CODE. CODE is a tree code. The type of the result is taken from the operands. Both must be the same type integer type and it must be a size type. If the operands are constant, so is the result. */ tree size_binop (enum tree_code code, tree arg0, tree arg1) { tree type = TREE_TYPE (arg0); if (TREE_CODE (type) != INTEGER_TYPE || ! TYPE_IS_SIZETYPE (type) || type != TREE_TYPE (arg1)) abort (); /* Handle the special case of two integer constants faster. */ if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST) { /* And some specific cases even faster than that. */ if (code == PLUS_EXPR && integer_zerop (arg0)) return arg1; else if ((code == MINUS_EXPR || code == PLUS_EXPR) && integer_zerop (arg1)) return arg0; else if (code == MULT_EXPR && integer_onep (arg0)) return arg1; /* Handle general case of two integer constants. */ return int_const_binop (code, arg0, arg1, 0); } if (arg0 == error_mark_node || arg1 == error_mark_node) return error_mark_node; return fold (build2 (code, type, arg0, arg1)); } /* Given two values, either both of sizetype or both of bitsizetype, compute the difference between the two values. Return the value in signed type corresponding to the type of the operands. */ tree size_diffop (tree arg0, tree arg1) { tree type = TREE_TYPE (arg0); tree ctype; if (TREE_CODE (type) != INTEGER_TYPE || ! TYPE_IS_SIZETYPE (type) || type != TREE_TYPE (arg1)) abort (); /* If the type is already signed, just do the simple thing. */ if (!TYPE_UNSIGNED (type)) return size_binop (MINUS_EXPR, arg0, arg1); ctype = (type == bitsizetype || type == ubitsizetype ? sbitsizetype : ssizetype); /* If either operand is not a constant, do the conversions to the signed type and subtract. The hardware will do the right thing with any overflow in the subtraction. */ if (TREE_CODE (arg0) != INTEGER_CST || TREE_CODE (arg1) != INTEGER_CST) return size_binop (MINUS_EXPR, fold_convert (ctype, arg0), fold_convert (ctype, arg1)); /* If ARG0 is larger than ARG1, subtract and return the result in CTYPE. Otherwise, subtract the other way, convert to CTYPE (we know that can't overflow) and negate (which can't either). Special-case a result of zero while we're here. */ if (tree_int_cst_equal (arg0, arg1)) return fold_convert (ctype, integer_zero_node); else if (tree_int_cst_lt (arg1, arg0)) return fold_convert (ctype, size_binop (MINUS_EXPR, arg0, arg1)); else return size_binop (MINUS_EXPR, fold_convert (ctype, integer_zero_node), fold_convert (ctype, size_binop (MINUS_EXPR, arg1, arg0))); } /* Attempt to fold type conversion operation CODE of expression ARG1 to type TYPE. If no simplification can be done return NULL_TREE. */ static tree fold_convert_const (enum tree_code code, tree type, tree arg1) { int overflow = 0; tree t; if (TREE_TYPE (arg1) == type) return arg1; if (POINTER_TYPE_P (type) || INTEGRAL_TYPE_P (type)) { if (TREE_CODE (arg1) == INTEGER_CST) { /* If we would build a constant wider than GCC supports, leave the conversion unfolded. */ if (TYPE_PRECISION (type) > 2 * HOST_BITS_PER_WIDE_INT) return NULL_TREE; /* If we are trying to make a sizetype for a small integer, use size_int to pick up cached types to reduce duplicate nodes. */ if (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type) && !TREE_CONSTANT_OVERFLOW (arg1) && compare_tree_int (arg1, 10000) < 0) return size_int_type_wide (TREE_INT_CST_LOW (arg1), type); /* Given an integer constant, make new constant with new type, appropriately sign-extended or truncated. */ t = build_int_2 (TREE_INT_CST_LOW (arg1), TREE_INT_CST_HIGH (arg1)); TREE_TYPE (t) = type; /* Indicate an overflow if (1) ARG1 already overflowed, or (2) force_fit_type indicates an overflow. Tell force_fit_type that an overflow has already occurred if ARG1 is a too-large unsigned value and T is signed. But don't indicate an overflow if converting a pointer. */ TREE_OVERFLOW (t) = ((force_fit_type (t, (TREE_INT_CST_HIGH (arg1) < 0 && (TYPE_UNSIGNED (type) < TYPE_UNSIGNED (TREE_TYPE (arg1))))) && ! POINTER_TYPE_P (TREE_TYPE (arg1))) || TREE_OVERFLOW (arg1)); TREE_CONSTANT_OVERFLOW (t) = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1); return t; } else if (TREE_CODE (arg1) == REAL_CST) { /* The following code implements the floating point to integer conversion rules required by the Java Language Specification, that IEEE NaNs are mapped to zero and values that overflow the target precision saturate, i.e. values greater than INT_MAX are mapped to INT_MAX, and values less than INT_MIN are mapped to INT_MIN. These semantics are allowed by the C and C++ standards that simply state that the behavior of FP-to-integer conversion is unspecified upon overflow. */ HOST_WIDE_INT high, low; REAL_VALUE_TYPE r; REAL_VALUE_TYPE x = TREE_REAL_CST (arg1); switch (code) { case FIX_TRUNC_EXPR: real_trunc (&r, VOIDmode, &x); break; case FIX_CEIL_EXPR: real_ceil (&r, VOIDmode, &x); break; case FIX_FLOOR_EXPR: real_floor (&r, VOIDmode, &x); break; case FIX_ROUND_EXPR: real_round (&r, VOIDmode, &x); break; default: abort (); } /* If R is NaN, return zero and show we have an overflow. */ if (REAL_VALUE_ISNAN (r)) { overflow = 1; high = 0; low = 0; } /* See if R is less than the lower bound or greater than the upper bound. */ if (! overflow) { tree lt = TYPE_MIN_VALUE (type); REAL_VALUE_TYPE l = real_value_from_int_cst (NULL_TREE, lt); if (REAL_VALUES_LESS (r, l)) { overflow = 1; high = TREE_INT_CST_HIGH (lt); low = TREE_INT_CST_LOW (lt); } } if (! overflow) { tree ut = TYPE_MAX_VALUE (type); if (ut) { REAL_VALUE_TYPE u = real_value_from_int_cst (NULL_TREE, ut); if (REAL_VALUES_LESS (u, r)) { overflow = 1; high = TREE_INT_CST_HIGH (ut); low = TREE_INT_CST_LOW (ut); } } } if (! overflow) REAL_VALUE_TO_INT (&low, &high, r); t = build_int_2 (low, high); TREE_TYPE (t) = type; TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1) | force_fit_type (t, overflow); TREE_CONSTANT_OVERFLOW (t) = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1); return t; } } else if (TREE_CODE (type) == REAL_TYPE) { if (TREE_CODE (arg1) == INTEGER_CST) return build_real_from_int_cst (type, arg1); if (TREE_CODE (arg1) == REAL_CST) { if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg1))) { /* We make a copy of ARG1 so that we don't modify an existing constant tree. */ t = copy_node (arg1); TREE_TYPE (t) = type; return t; } t = build_real (type, real_value_truncate (TYPE_MODE (type), TREE_REAL_CST (arg1))); TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1) | force_fit_type (t, 0); TREE_CONSTANT_OVERFLOW (t) = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1); return t; } } return NULL_TREE; } /* Convert expression ARG to type TYPE. Used by the middle-end for simple conversions in preference to calling the front-end's convert. */ tree fold_convert (tree type, tree arg) { tree orig = TREE_TYPE (arg); tree tem; if (type == orig) return arg; if (TREE_CODE (arg) == ERROR_MARK || TREE_CODE (type) == ERROR_MARK || TREE_CODE (orig) == ERROR_MARK) return error_mark_node; if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (orig) || lang_hooks.types_compatible_p (TYPE_MAIN_VARIANT (type), TYPE_MAIN_VARIANT (orig))) return fold (build1 (NOP_EXPR, type, arg)); if (INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type) || TREE_CODE (type) == OFFSET_TYPE) { if (TREE_CODE (arg) == INTEGER_CST) { tem = fold_convert_const (NOP_EXPR, type, arg); if (tem != NULL_TREE) return tem; } if (INTEGRAL_TYPE_P (orig) || POINTER_TYPE_P (orig) || TREE_CODE (orig) == OFFSET_TYPE) return fold (build1 (NOP_EXPR, type, arg)); if (TREE_CODE (orig) == COMPLEX_TYPE) { tem = fold (build1 (REALPART_EXPR, TREE_TYPE (orig), arg)); return fold_convert (type, tem); } if (TREE_CODE (orig) == VECTOR_TYPE && GET_MODE_SIZE (TYPE_MODE (type)) == GET_MODE_SIZE (TYPE_MODE (orig))) return fold (build1 (NOP_EXPR, type, arg)); } else if (TREE_CODE (type) == REAL_TYPE) { if (TREE_CODE (arg) == INTEGER_CST) { tem = fold_convert_const (FLOAT_EXPR, type, arg); if (tem != NULL_TREE) return tem; } else if (TREE_CODE (arg) == REAL_CST) { tem = fold_convert_const (NOP_EXPR, type, arg); if (tem != NULL_TREE) return tem; } if (INTEGRAL_TYPE_P (orig) || POINTER_TYPE_P (orig)) return fold (build1 (FLOAT_EXPR, type, arg)); if (TREE_CODE (orig) == REAL_TYPE) return fold (build1 (flag_float_store ? CONVERT_EXPR : NOP_EXPR, type, arg)); if (TREE_CODE (orig) == COMPLEX_TYPE) { tem = fold (build1 (REALPART_EXPR, TREE_TYPE (orig), arg)); return fold_convert (type, tem); } } else if (TREE_CODE (type) == COMPLEX_TYPE) { if (INTEGRAL_TYPE_P (orig) || POINTER_TYPE_P (orig) || TREE_CODE (orig) == REAL_TYPE) return build2 (COMPLEX_EXPR, type, fold_convert (TREE_TYPE (type), arg), fold_convert (TREE_TYPE (type), integer_zero_node)); if (TREE_CODE (orig) == COMPLEX_TYPE) { tree rpart, ipart; if (TREE_CODE (arg) == COMPLEX_EXPR) { rpart = fold_convert (TREE_TYPE (type), TREE_OPERAND (arg, 0)); ipart = fold_convert (TREE_TYPE (type), TREE_OPERAND (arg, 1)); return fold (build2 (COMPLEX_EXPR, type, rpart, ipart)); } arg = save_expr (arg); rpart = fold (build1 (REALPART_EXPR, TREE_TYPE (orig), arg)); ipart = fold (build1 (IMAGPART_EXPR, TREE_TYPE (orig), arg)); rpart = fold_convert (TREE_TYPE (type), rpart); ipart = fold_convert (TREE_TYPE (type), ipart); return fold (build2 (COMPLEX_EXPR, type, rpart, ipart)); } } else if (TREE_CODE (type) == VECTOR_TYPE) { if ((INTEGRAL_TYPE_P (orig) || POINTER_TYPE_P (orig)) && GET_MODE_SIZE (TYPE_MODE (type)) == GET_MODE_SIZE (TYPE_MODE (orig))) return fold (build1 (NOP_EXPR, type, arg)); if (TREE_CODE (orig) == VECTOR_TYPE && GET_MODE_SIZE (TYPE_MODE (type)) == GET_MODE_SIZE (TYPE_MODE (orig))) return fold (build1 (NOP_EXPR, type, arg)); } else if (VOID_TYPE_P (type)) return fold (build1 (CONVERT_EXPR, type, arg)); abort (); } /* Return an expr equal to X but certainly not valid as an lvalue. */ tree non_lvalue (tree x) { /* We only need to wrap lvalue tree codes. */ switch (TREE_CODE (x)) { case VAR_DECL: case PARM_DECL: case RESULT_DECL: case LABEL_DECL: case FUNCTION_DECL: case SSA_NAME: case COMPONENT_REF: case INDIRECT_REF: case ARRAY_REF: case ARRAY_RANGE_REF: case BIT_FIELD_REF: case BUFFER_REF: case OBJ_TYPE_REF: case REALPART_EXPR: case IMAGPART_EXPR: case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: case SAVE_EXPR: case UNSAVE_EXPR: case TRY_CATCH_EXPR: case WITH_CLEANUP_EXPR: case COMPOUND_EXPR: case MODIFY_EXPR: case TARGET_EXPR: case COND_EXPR: case BIND_EXPR: case MIN_EXPR: case MAX_EXPR: break; default: /* Assume the worst for front-end tree codes. */ if ((int)TREE_CODE (x) >= NUM_TREE_CODES) break; return x; } return build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x); } /* Nonzero means lvalues are limited to those valid in pedantic ANSI C. Zero means allow extended lvalues. */ int pedantic_lvalues; /* When pedantic, return an expr equal to X but certainly not valid as a pedantic lvalue. Otherwise, return X. */ tree pedantic_non_lvalue (tree x) { if (pedantic_lvalues) return non_lvalue (x); else return x; } /* Given a tree comparison code, return the code that is the logical inverse of the given code. It is not safe to do this for floating-point comparisons, except for NE_EXPR and EQ_EXPR, so we receive a machine mode as well: if reversing the comparison is unsafe, return ERROR_MARK. */ static enum tree_code invert_tree_comparison (enum tree_code code, bool honor_nans) { if (honor_nans && flag_trapping_math) return ERROR_MARK; switch (code) { case EQ_EXPR: return NE_EXPR; case NE_EXPR: return EQ_EXPR; case GT_EXPR: return honor_nans ? UNLE_EXPR : LE_EXPR; case GE_EXPR: return honor_nans ? UNLT_EXPR : LT_EXPR; case LT_EXPR: return honor_nans ? UNGE_EXPR : GE_EXPR; case LE_EXPR: return honor_nans ? UNGT_EXPR : GT_EXPR; case LTGT_EXPR: return UNEQ_EXPR; case UNEQ_EXPR: return LTGT_EXPR; case UNGT_EXPR: return LE_EXPR; case UNGE_EXPR: return LT_EXPR; case UNLT_EXPR: return GE_EXPR; case UNLE_EXPR: return GT_EXPR; case ORDERED_EXPR: return UNORDERED_EXPR; case UNORDERED_EXPR: return ORDERED_EXPR; default: abort (); } } /* Similar, but return the comparison that results if the operands are swapped. This is safe for floating-point. */ enum tree_code swap_tree_comparison (enum tree_code code) { switch (code) { case EQ_EXPR: case NE_EXPR: return code; case GT_EXPR: return LT_EXPR; case GE_EXPR: return LE_EXPR; case LT_EXPR: return GT_EXPR; case LE_EXPR: return GE_EXPR; default: abort (); } } /* Convert a comparison tree code from an enum tree_code representation into a compcode bit-based encoding. This function is the inverse of compcode_to_comparison. */ static enum comparison_code comparison_to_compcode (enum tree_code code) { switch (code) { case LT_EXPR: return COMPCODE_LT; case EQ_EXPR: return COMPCODE_EQ; case LE_EXPR: return COMPCODE_LE; case GT_EXPR: return COMPCODE_GT; case NE_EXPR: return COMPCODE_NE; case GE_EXPR: return COMPCODE_GE; case ORDERED_EXPR: return COMPCODE_ORD; case UNORDERED_EXPR: return COMPCODE_UNORD; case UNLT_EXPR: return COMPCODE_UNLT; case UNEQ_EXPR: return COMPCODE_UNEQ; case UNLE_EXPR: return COMPCODE_UNLE; case UNGT_EXPR: return COMPCODE_UNGT; case LTGT_EXPR: return COMPCODE_LTGT; case UNGE_EXPR: return COMPCODE_UNGE; default: abort (); } } /* Convert a compcode bit-based encoding of a comparison operator back to GCC's enum tree_code representation. This function is the inverse of comparison_to_compcode. */ static enum tree_code compcode_to_comparison (enum comparison_code code) { switch (code) { case COMPCODE_LT: return LT_EXPR; case COMPCODE_EQ: return EQ_EXPR; case COMPCODE_LE: return LE_EXPR; case COMPCODE_GT: return GT_EXPR; case COMPCODE_NE: return NE_EXPR; case COMPCODE_GE: return GE_EXPR; case COMPCODE_ORD: return ORDERED_EXPR; case COMPCODE_UNORD: return UNORDERED_EXPR; case COMPCODE_UNLT: return UNLT_EXPR; case COMPCODE_UNEQ: return UNEQ_EXPR; case COMPCODE_UNLE: return UNLE_EXPR; case COMPCODE_UNGT: return UNGT_EXPR; case COMPCODE_LTGT: return LTGT_EXPR; case COMPCODE_UNGE: return UNGE_EXPR; default: abort (); } } /* Return a tree for the comparison which is the combination of doing the AND or OR (depending on CODE) of the two operations LCODE and RCODE on the identical operands LL_ARG and LR_ARG. Take into account the possibility of trapping if the mode has NaNs, and return NULL_TREE if this makes the transformation invalid. */ tree combine_comparisons (enum tree_code code, enum tree_code lcode, enum tree_code rcode, tree truth_type, tree ll_arg, tree lr_arg) { bool honor_nans = HONOR_NANS (TYPE_MODE (TREE_TYPE (ll_arg))); enum comparison_code lcompcode = comparison_to_compcode (lcode); enum comparison_code rcompcode = comparison_to_compcode (rcode); enum comparison_code compcode; switch (code) { case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR: compcode = lcompcode & rcompcode; break; case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR: compcode = lcompcode | rcompcode; break; default: return NULL_TREE; } if (!honor_nans) { /* Eliminate unordered comparisons, as well as LTGT and ORD which are not used unless the mode has NaNs. */ compcode &= ~COMPCODE_UNORD; if (compcode == COMPCODE_LTGT) compcode = COMPCODE_NE; else if (compcode == COMPCODE_ORD) compcode = COMPCODE_TRUE; } else if (flag_trapping_math) { /* Check that the original operation and the optimized ones will trap under the same condition. */ bool ltrap = (lcompcode & COMPCODE_UNORD) == 0 && (lcompcode != COMPCODE_EQ) && (lcompcode != COMPCODE_ORD); bool rtrap = (rcompcode & COMPCODE_UNORD) == 0 && (rcompcode != COMPCODE_EQ) && (rcompcode != COMPCODE_ORD); bool trap = (compcode & COMPCODE_UNORD) == 0 && (compcode != COMPCODE_EQ) && (compcode != COMPCODE_ORD); /* In a short-circuited boolean expression the LHS might be such that the RHS, if evaluated, will never trap. For example, in ORD (x, y) && (x < y), we evaluate the RHS only if neither x nor y is NaN. (This is a mixed blessing: for example, the expression above will never trap, hence optimizing it to x < y would be invalid). */ if ((code == TRUTH_ORIF_EXPR && (lcompcode & COMPCODE_UNORD)) || (code == TRUTH_ANDIF_EXPR && !(lcompcode & COMPCODE_UNORD))) rtrap = false; /* If the comparison was short-circuited, and only the RHS trapped, we may now generate a spurious trap. */ if (rtrap && !ltrap && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR)) return NULL_TREE; /* If we changed the conditions that cause a trap, we lose. */ if ((ltrap || rtrap) != trap) return NULL_TREE; } if (compcode == COMPCODE_TRUE) return constant_boolean_node (true, truth_type); else if (compcode == COMPCODE_FALSE) return constant_boolean_node (false, truth_type); else return fold (build2 (compcode_to_comparison (compcode), truth_type, ll_arg, lr_arg)); } /* Return nonzero if CODE is a tree code that represents a truth value. */ static int truth_value_p (enum tree_code code) { return (TREE_CODE_CLASS (code) == '<' || code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR || code == TRUTH_OR_EXPR || code == TRUTH_ORIF_EXPR || code == TRUTH_XOR_EXPR || code == TRUTH_NOT_EXPR); } /* Return nonzero if two operands (typically of the same tree node) are necessarily equal. If either argument has side-effects this function returns zero. FLAGS modifies behavior as follows: If OEP_ONLY_CONST is set, only return nonzero for constants. This function tests whether the operands are indistinguishable; it does not test whether they are equal using C's == operation. The distinction is important for IEEE floating point, because (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and (2) two NaNs may be indistinguishable, but NaN!=NaN. If OEP_ONLY_CONST is unset, a VAR_DECL is considered equal to itself even though it may hold multiple values during a function. This is because a GCC tree node guarantees that nothing else is executed between the evaluation of its "operands" (which may often be evaluated in arbitrary order). Hence if the operands themselves don't side-effect, the VAR_DECLs, PARM_DECLs etc... must hold the same value in each operand/subexpression. Hence leaving OEP_ONLY_CONST unset means assuming isochronic (or instantaneous) tree equivalence. Unless comparing arbitrary expression trees, such as from different statements, this flag can usually be left unset. If OEP_PURE_SAME is set, then pure functions with identical arguments are considered the same. It is used when the caller has other ways to ensure that global memory is unchanged in between. */ int operand_equal_p (tree arg0, tree arg1, unsigned int flags) { /* If either is ERROR_MARK, they aren't equal. */ if (TREE_CODE (arg0) == ERROR_MARK || TREE_CODE (arg1) == ERROR_MARK) return 0; /* If both types don't have the same signedness, then we can't consider them equal. We must check this before the STRIP_NOPS calls because they may change the signedness of the arguments. */ if (TYPE_UNSIGNED (TREE_TYPE (arg0)) != TYPE_UNSIGNED (TREE_TYPE (arg1))) return 0; STRIP_NOPS (arg0); STRIP_NOPS (arg1); if (TREE_CODE (arg0) != TREE_CODE (arg1) /* This is needed for conversions and for COMPONENT_REF. Might as well play it safe and always test this. */ || TREE_CODE (TREE_TYPE (arg0)) == ERROR_MARK || TREE_CODE (TREE_TYPE (arg1)) == ERROR_MARK || TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1))) return 0; /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal. We don't care about side effects in that case because the SAVE_EXPR takes care of that for us. In all other cases, two expressions are equal if they have no side effects. If we have two identical expressions with side effects that should be treated the same due to the only side effects being identical SAVE_EXPR's, that will be detected in the recursive calls below. */ if (arg0 == arg1 && ! (flags & OEP_ONLY_CONST) && (TREE_CODE (arg0) == SAVE_EXPR || (! TREE_SIDE_EFFECTS (arg0) && ! TREE_SIDE_EFFECTS (arg1)))) return 1; /* Next handle constant cases, those for which we can return 1 even if ONLY_CONST is set. */ if (TREE_CONSTANT (arg0) && TREE_CONSTANT (arg1)) switch (TREE_CODE (arg0)) { case INTEGER_CST: return (! TREE_CONSTANT_OVERFLOW (arg0) && ! TREE_CONSTANT_OVERFLOW (arg1) && tree_int_cst_equal (arg0, arg1)); case REAL_CST: return (! TREE_CONSTANT_OVERFLOW (arg0) && ! TREE_CONSTANT_OVERFLOW (arg1) && REAL_VALUES_IDENTICAL (TREE_REAL_CST (arg0), TREE_REAL_CST (arg1))); case VECTOR_CST: { tree v1, v2; if (TREE_CONSTANT_OVERFLOW (arg0) || TREE_CONSTANT_OVERFLOW (arg1)) return 0; v1 = TREE_VECTOR_CST_ELTS (arg0); v2 = TREE_VECTOR_CST_ELTS (arg1); while (v1 && v2) { if (!operand_equal_p (TREE_VALUE (v1), TREE_VALUE (v2), flags)) return 0; v1 = TREE_CHAIN (v1); v2 = TREE_CHAIN (v2); } return 1; } case COMPLEX_CST: return (operand_equal_p (TREE_REALPART (arg0), TREE_REALPART (arg1), flags) && operand_equal_p (TREE_IMAGPART (arg0), TREE_IMAGPART (arg1), flags)); case STRING_CST: return (TREE_STRING_LENGTH (arg0) == TREE_STRING_LENGTH (arg1) && ! memcmp (TREE_STRING_POINTER (arg0), TREE_STRING_POINTER (arg1), TREE_STRING_LENGTH (arg0))); case ADDR_EXPR: return operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0); default: break; } if (flags & OEP_ONLY_CONST) return 0; switch (TREE_CODE_CLASS (TREE_CODE (arg0))) { case '1': /* Two conversions are equal only if signedness and modes match. */ if ((TREE_CODE (arg0) == NOP_EXPR || TREE_CODE (arg0) == CONVERT_EXPR) && (TYPE_UNSIGNED (TREE_TYPE (arg0)) != TYPE_UNSIGNED (TREE_TYPE (arg1)))) return 0; return operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), flags); case '<': case '2': if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), flags) && operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), flags)) return 1; /* For commutative ops, allow the other order. */ return (commutative_tree_code (TREE_CODE (arg0)) && operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), flags) && operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), flags)); case 'r': /* If either of the pointer (or reference) expressions we are dereferencing contain a side effect, these cannot be equal. */ if (TREE_SIDE_EFFECTS (arg0) || TREE_SIDE_EFFECTS (arg1)) return 0; switch (TREE_CODE (arg0)) { case INDIRECT_REF: case REALPART_EXPR: case IMAGPART_EXPR: return operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), flags); case COMPONENT_REF: case ARRAY_REF: case ARRAY_RANGE_REF: return (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), flags) && operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), flags)); case BIT_FIELD_REF: return (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), flags) && operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), flags) && operand_equal_p (TREE_OPERAND (arg0, 2), TREE_OPERAND (arg1, 2), flags)); default: return 0; } case 'e': switch (TREE_CODE (arg0)) { case ADDR_EXPR: case TRUTH_NOT_EXPR: return operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), flags); case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: return operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), flags) && operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), flags); case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case TRUTH_XOR_EXPR: return (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), flags) && operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), flags)) || (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), flags) && operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), flags)); case CALL_EXPR: /* If the CALL_EXPRs call different functions, then they clearly can not be equal. */ if (! operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), flags)) return 0; { unsigned int cef = call_expr_flags (arg0); if (flags & OEP_PURE_SAME) cef &= ECF_CONST | ECF_PURE; else cef &= ECF_CONST; if (!cef) return 0; } /* Now see if all the arguments are the same. operand_equal_p does not handle TREE_LIST, so we walk the operands here feeding them to operand_equal_p. */ arg0 = TREE_OPERAND (arg0, 1); arg1 = TREE_OPERAND (arg1, 1); while (arg0 && arg1) { if (! operand_equal_p (TREE_VALUE (arg0), TREE_VALUE (arg1), flags)) return 0; arg0 = TREE_CHAIN (arg0); arg1 = TREE_CHAIN (arg1); } /* If we get here and both argument lists are exhausted then the CALL_EXPRs are equal. */ return ! (arg0 || arg1); default: return 0; } case 'd': /* Consider __builtin_sqrt equal to sqrt. */ return (TREE_CODE (arg0) == FUNCTION_DECL && DECL_BUILT_IN (arg0) && DECL_BUILT_IN (arg1) && DECL_BUILT_IN_CLASS (arg0) == DECL_BUILT_IN_CLASS (arg1) && DECL_FUNCTION_CODE (arg0) == DECL_FUNCTION_CODE (arg1)); default: return 0; } } /* Similar to operand_equal_p, but see if ARG0 might have been made by shorten_compare from ARG1 when ARG1 was being compared with OTHER. When in doubt, return 0. */ static int operand_equal_for_comparison_p (tree arg0, tree arg1, tree other) { int unsignedp1, unsignedpo; tree primarg0, primarg1, primother; unsigned int correct_width; if (operand_equal_p (arg0, arg1, 0)) return 1; if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)) || ! INTEGRAL_TYPE_P (TREE_TYPE (arg1))) return 0; /* Discard any conversions that don't change the modes of ARG0 and ARG1 and see if the inner values are the same. This removes any signedness comparison, which doesn't matter here. */ primarg0 = arg0, primarg1 = arg1; STRIP_NOPS (primarg0); STRIP_NOPS (primarg1); if (operand_equal_p (primarg0, primarg1, 0)) return 1; /* Duplicate what shorten_compare does to ARG1 and see if that gives the actual comparison operand, ARG0. First throw away any conversions to wider types already present in the operands. */ primarg1 = get_narrower (arg1, &unsignedp1); primother = get_narrower (other, &unsignedpo); correct_width = TYPE_PRECISION (TREE_TYPE (arg1)); if (unsignedp1 == unsignedpo && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width) { tree type = TREE_TYPE (arg0); /* Make sure shorter operand is extended the right way to match the longer operand. */ primarg1 = fold_convert (lang_hooks.types.signed_or_unsigned_type (unsignedp1, TREE_TYPE (primarg1)), primarg1); if (operand_equal_p (arg0, fold_convert (type, primarg1), 0)) return 1; } return 0; } /* See if ARG is an expression that is either a comparison or is performing arithmetic on comparisons. The comparisons must only be comparing two different values, which will be stored in *CVAL1 and *CVAL2; if they are nonzero it means that some operands have already been found. No variables may be used anywhere else in the expression except in the comparisons. If SAVE_P is true it means we removed a SAVE_EXPR around the expression and save_expr needs to be called with CVAL1 and CVAL2. If this is true, return 1. Otherwise, return zero. */ static int twoval_comparison_p (tree arg, tree *cval1, tree *cval2, int *save_p) { enum tree_code code = TREE_CODE (arg); char class = TREE_CODE_CLASS (code); /* We can handle some of the 'e' cases here. */ if (class == 'e' && code == TRUTH_NOT_EXPR) class = '1'; else if (class == 'e' && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR || code == COMPOUND_EXPR)) class = '2'; else if (class == 'e' && code == SAVE_EXPR && ! TREE_SIDE_EFFECTS (TREE_OPERAND (arg, 0))) { /* If we've already found a CVAL1 or CVAL2, this expression is two complex to handle. */ if (*cval1 || *cval2) return 0; class = '1'; *save_p = 1; } switch (class) { case '1': return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p); case '2': return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p) && twoval_comparison_p (TREE_OPERAND (arg, 1), cval1, cval2, save_p)); case 'c': return 1; case 'e': if (code == COND_EXPR) return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p) && twoval_comparison_p (TREE_OPERAND (arg, 1), cval1, cval2, save_p) && twoval_comparison_p (TREE_OPERAND (arg, 2), cval1, cval2, save_p)); return 0; case '<': /* First see if we can handle the first operand, then the second. For the second operand, we know *CVAL1 can't be zero. It must be that one side of the comparison is each of the values; test for the case where this isn't true by failing if the two operands are the same. */ if (operand_equal_p (TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1), 0)) return 0; if (*cval1 == 0) *cval1 = TREE_OPERAND (arg, 0); else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0)) ; else if (*cval2 == 0) *cval2 = TREE_OPERAND (arg, 0); else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0)) ; else return 0; if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0)) ; else if (*cval2 == 0) *cval2 = TREE_OPERAND (arg, 1); else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0)) ; else return 0; return 1; default: return 0; } } /* ARG is a tree that is known to contain just arithmetic operations and comparisons. Evaluate the operations in the tree substituting NEW0 for any occurrence of OLD0 as an operand of a comparison and likewise for NEW1 and OLD1. */ static tree eval_subst (tree arg, tree old0, tree new0, tree old1, tree new1) { tree type = TREE_TYPE (arg); enum tree_code code = TREE_CODE (arg); char class = TREE_CODE_CLASS (code); /* We can handle some of the 'e' cases here. */ if (class == 'e' && code == TRUTH_NOT_EXPR) class = '1'; else if (class == 'e' && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR)) class = '2'; switch (class) { case '1': return fold (build1 (code, type, eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1))); case '2': return fold (build2 (code, type, eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1), eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1))); case 'e': switch (code) { case SAVE_EXPR: return eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1); case COMPOUND_EXPR: return eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1); case COND_EXPR: return fold (build3 (code, type, eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1), eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1), eval_subst (TREE_OPERAND (arg, 2), old0, new0, old1, new1))); default: break; } /* Fall through - ??? */ case '<': { tree arg0 = TREE_OPERAND (arg, 0); tree arg1 = TREE_OPERAND (arg, 1); /* We need to check both for exact equality and tree equality. The former will be true if the operand has a side-effect. In that case, we know the operand occurred exactly once. */ if (arg0 == old0 || operand_equal_p (arg0, old0, 0)) arg0 = new0; else if (arg0 == old1 || operand_equal_p (arg0, old1, 0)) arg0 = new1; if (arg1 == old0 || operand_equal_p (arg1, old0, 0)) arg1 = new0; else if (arg1 == old1 || operand_equal_p (arg1, old1, 0)) arg1 = new1; return fold (build2 (code, type, arg0, arg1)); } default: return arg; } } /* Return a tree for the case when the result of an expression is RESULT converted to TYPE and OMITTED was previously an operand of the expression but is now not needed (e.g., we folded OMITTED * 0). If OMITTED has side effects, we must evaluate it. Otherwise, just do the conversion of RESULT to TYPE. */ tree omit_one_operand (tree type, tree result, tree omitted) { tree t = fold_convert (type, result); if (TREE_SIDE_EFFECTS (omitted)) return build2 (COMPOUND_EXPR, type, omitted, t); return non_lvalue (t); } /* Similar, but call pedantic_non_lvalue instead of non_lvalue. */ static tree pedantic_omit_one_operand (tree type, tree result, tree omitted) { tree t = fold_convert (type, result); if (TREE_SIDE_EFFECTS (omitted)) return build2 (COMPOUND_EXPR, type, omitted, t); return pedantic_non_lvalue (t); } /* Return a tree for the case when the result of an expression is RESULT converted to TYPE and OMITTED1 and OMITTED2 were previously operands of the expression but are now not needed. If OMITTED1 or OMITTED2 has side effects, they must be evaluated. If both OMITTED1 and OMITTED2 have side effects, OMITTED1 is evaluated before OMITTED2. Otherwise, if neither has side effects, just do the conversion of RESULT to TYPE. */ tree omit_two_operands (tree type, tree result, tree omitted1, tree omitted2) { tree t = fold_convert (type, result); if (TREE_SIDE_EFFECTS (omitted2)) t = build2 (COMPOUND_EXPR, type, omitted2, t); if (TREE_SIDE_EFFECTS (omitted1)) t = build2 (COMPOUND_EXPR, type, omitted1, t); return TREE_CODE (t) != COMPOUND_EXPR ? non_lvalue (t) : t; } /* Return a simplified tree node for the truth-negation of ARG. This never alters ARG itself. We assume that ARG is an operation that returns a truth value (0 or 1). FIXME: one would think we would fold the result, but it causes problems with the dominator optimizer. */ tree invert_truthvalue (tree arg) { tree type = TREE_TYPE (arg); enum tree_code code = TREE_CODE (arg); if (code == ERROR_MARK) return arg; /* If this is a comparison, we can simply invert it, except for floating-point non-equality comparisons, in which case we just enclose a TRUTH_NOT_EXPR around what we have. */ if (TREE_CODE_CLASS (code) == '<') { tree op_type = TREE_TYPE (TREE_OPERAND (arg, 0)); if (FLOAT_TYPE_P (op_type) && flag_trapping_math && code != ORDERED_EXPR && code != UNORDERED_EXPR && code != NE_EXPR && code != EQ_EXPR) return build1 (TRUTH_NOT_EXPR, type, arg); else { code = invert_tree_comparison (code, HONOR_NANS (TYPE_MODE (op_type))); if (code == ERROR_MARK) return build1 (TRUTH_NOT_EXPR, type, arg); else return build2 (code, type, TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1)); } } switch (code) { case INTEGER_CST: return fold_convert (type, build_int_2 (integer_zerop (arg), 0)); case TRUTH_AND_EXPR: return build2 (TRUTH_OR_EXPR, type, invert_truthvalue (TREE_OPERAND (arg, 0)), invert_truthvalue (TREE_OPERAND (arg, 1))); case TRUTH_OR_EXPR: return build2 (TRUTH_AND_EXPR, type, invert_truthvalue (TREE_OPERAND (arg, 0)), invert_truthvalue (TREE_OPERAND (arg, 1))); case TRUTH_XOR_EXPR: /* Here we can invert either operand. We invert the first operand unless the second operand is a TRUTH_NOT_EXPR in which case our result is the XOR of the first operand with the inside of the negation of the second operand. */ if (TREE_CODE (TREE_OPERAND (arg, 1)) == TRUTH_NOT_EXPR) return build2 (TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0), TREE_OPERAND (TREE_OPERAND (arg, 1), 0)); else return build2 (TRUTH_XOR_EXPR, type, invert_truthvalue (TREE_OPERAND (arg, 0)), TREE_OPERAND (arg, 1)); case TRUTH_ANDIF_EXPR: return build2 (TRUTH_ORIF_EXPR, type, invert_truthvalue (TREE_OPERAND (arg, 0)), invert_truthvalue (TREE_OPERAND (arg, 1))); case TRUTH_ORIF_EXPR: return build2 (TRUTH_ANDIF_EXPR, type, invert_truthvalue (TREE_OPERAND (arg, 0)), invert_truthvalue (TREE_OPERAND (arg, 1))); case TRUTH_NOT_EXPR: return TREE_OPERAND (arg, 0); case COND_EXPR: return build3 (COND_EXPR, type, TREE_OPERAND (arg, 0), invert_truthvalue (TREE_OPERAND (arg, 1)), invert_truthvalue (TREE_OPERAND (arg, 2))); case COMPOUND_EXPR: return build2 (COMPOUND_EXPR, type, TREE_OPERAND (arg, 0), invert_truthvalue (TREE_OPERAND (arg, 1))); case NON_LVALUE_EXPR: return invert_truthvalue (TREE_OPERAND (arg, 0)); case NOP_EXPR: if (TREE_CODE (TREE_TYPE (arg)) == BOOLEAN_TYPE) break; case CONVERT_EXPR: case FLOAT_EXPR: return build1 (TREE_CODE (arg), type, invert_truthvalue (TREE_OPERAND (arg, 0))); case BIT_AND_EXPR: if (!integer_onep (TREE_OPERAND (arg, 1))) break; return build2 (EQ_EXPR, type, arg, fold_convert (type, integer_zero_node)); case SAVE_EXPR: return build1 (TRUTH_NOT_EXPR, type, arg); case CLEANUP_POINT_EXPR: return build1 (CLEANUP_POINT_EXPR, type, invert_truthvalue (TREE_OPERAND (arg, 0))); default: break; } if (TREE_CODE (TREE_TYPE (arg)) != BOOLEAN_TYPE) abort (); return build1 (TRUTH_NOT_EXPR, type, arg); } /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both operands are another bit-wise operation with a common input. If so, distribute the bit operations to save an operation and possibly two if constants are involved. For example, convert (A | B) & (A | C) into A | (B & C) Further simplification will occur if B and C are constants. If this optimization cannot be done, 0 will be returned. */ static tree distribute_bit_expr (enum tree_code code, tree type, tree arg0, tree arg1) { tree common; tree left, right; if (TREE_CODE (arg0) != TREE_CODE (arg1) || TREE_CODE (arg0) == code || (TREE_CODE (arg0) != BIT_AND_EXPR && TREE_CODE (arg0) != BIT_IOR_EXPR)) return 0; if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0)) { common = TREE_OPERAND (arg0, 0); left = TREE_OPERAND (arg0, 1); right = TREE_OPERAND (arg1, 1); } else if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), 0)) { common = TREE_OPERAND (arg0, 0); left = TREE_OPERAND (arg0, 1); right = TREE_OPERAND (arg1, 0); } else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), 0)) { common = TREE_OPERAND (arg0, 1); left = TREE_OPERAND (arg0, 0); right = TREE_OPERAND (arg1, 1); } else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0)) { common = TREE_OPERAND (arg0, 1); left = TREE_OPERAND (arg0, 0); right = TREE_OPERAND (arg1, 0); } else return 0; return fold (build2 (TREE_CODE (arg0), type, common, fold (build2 (code, type, left, right)))); } /* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER starting at BITPOS. The field is unsigned if UNSIGNEDP is nonzero. */ static tree make_bit_field_ref (tree inner, tree type, int bitsize, int bitpos, int unsignedp) { tree result = build3 (BIT_FIELD_REF, type, inner, size_int (bitsize), bitsize_int (bitpos)); BIT_FIELD_REF_UNSIGNED (result) = unsignedp; return result; } /* Optimize a bit-field compare. There are two cases: First is a compare against a constant and the second is a comparison of two items where the fields are at the same bit position relative to the start of a chunk (byte, halfword, word) large enough to contain it. In these cases we can avoid the shift implicit in bitfield extractions. For constants, we emit a compare of the shifted constant with the BIT_AND_EXPR of a mask and a byte, halfword, or word of the operand being compared. For two fields at the same position, we do the ANDs with the similar mask and compare the result of the ANDs. CODE is the comparison code, known to be either NE_EXPR or EQ_EXPR. COMPARE_TYPE is the type of the comparison, and LHS and RHS are the left and right operands of the comparison, respectively. If the optimization described above can be done, we return the resulting tree. Otherwise we return zero. */ static tree optimize_bit_field_compare (enum tree_code code, tree compare_type, tree lhs, tree rhs) { HOST_WIDE_INT lbitpos, lbitsize, rbitpos, rbitsize, nbitpos, nbitsize; tree type = TREE_TYPE (lhs); tree signed_type, unsigned_type; int const_p = TREE_CODE (rhs) == INTEGER_CST; enum machine_mode lmode, rmode, nmode; int lunsignedp, runsignedp; int lvolatilep = 0, rvolatilep = 0; tree linner, rinner = NULL_TREE; tree mask; tree offset; /* Get all the information about the extractions being done. If the bit size if the same as the size of the underlying object, we aren't doing an extraction at all and so can do nothing. We also don't want to do anything if the inner expression is a PLACEHOLDER_EXPR since we then will no longer be able to replace it. */ linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode, &lunsignedp, &lvolatilep); if (linner == lhs || lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0 || offset != 0 || TREE_CODE (linner) == PLACEHOLDER_EXPR) return 0; if (!const_p) { /* If this is not a constant, we can only do something if bit positions, sizes, and signedness are the same. */ rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset, &rmode, &runsignedp, &rvolatilep); if (rinner == rhs || lbitpos != rbitpos || lbitsize != rbitsize || lunsignedp != runsignedp || offset != 0 || TREE_CODE (rinner) == PLACEHOLDER_EXPR) return 0; } /* See if we can find a mode to refer to this field. We should be able to, but fail if we can't. */ nmode = get_best_mode (lbitsize, lbitpos, const_p ? TYPE_ALIGN (TREE_TYPE (linner)) : MIN (TYPE_ALIGN (TREE_TYPE (linner)), TYPE_ALIGN (TREE_TYPE (rinner))), word_mode, lvolatilep || rvolatilep); if (nmode == VOIDmode) return 0; /* Set signed and unsigned types of the precision of this mode for the shifts below. */ signed_type = lang_hooks.types.type_for_mode (nmode, 0); unsigned_type = lang_hooks.types.type_for_mode (nmode, 1); /* Compute the bit position and size for the new reference and our offset within it. If the new reference is the same size as the original, we won't optimize anything, so return zero. */ nbitsize = GET_MODE_BITSIZE (nmode); nbitpos = lbitpos & ~ (nbitsize - 1); lbitpos -= nbitpos; if (nbitsize == lbitsize) return 0; if (BYTES_BIG_ENDIAN) lbitpos = nbitsize - lbitsize - lbitpos; /* Make the mask to be used against the extracted field. */ mask = build_int_2 (~0, ~0); TREE_TYPE (mask) = unsigned_type; force_fit_type (mask, 0); mask = fold_convert (unsigned_type, mask); mask = const_binop (LSHIFT_EXPR, mask, size_int (nbitsize - lbitsize), 0); mask = const_binop (RSHIFT_EXPR, mask, size_int (nbitsize - lbitsize - lbitpos), 0); if (! const_p) /* If not comparing with constant, just rework the comparison and return. */ return build2 (code, compare_type, build2 (BIT_AND_EXPR, unsigned_type, make_bit_field_ref (linner, unsigned_type, nbitsize, nbitpos, 1), mask), build2 (BIT_AND_EXPR, unsigned_type, make_bit_field_ref (rinner, unsigned_type, nbitsize, nbitpos, 1), mask)); /* Otherwise, we are handling the constant case. See if the constant is too big for the field. Warn and return a tree of for 0 (false) if so. We do this not only for its own sake, but to avoid having to test for this error case below. If we didn't, we might generate wrong code. For unsigned fields, the constant shifted right by the field length should be all zero. For signed fields, the high-order bits should agree with the sign bit. */ if (lunsignedp) { if (! integer_zerop (const_binop (RSHIFT_EXPR, fold_convert (unsigned_type, rhs), size_int (lbitsize), 0))) { warning ("comparison is always %d due to width of bit-field", code == NE_EXPR); return constant_boolean_node (code == NE_EXPR, compare_type); } } else { tree tem = const_binop (RSHIFT_EXPR, fold_convert (signed_type, rhs), size_int (lbitsize - 1), 0); if (! integer_zerop (tem) && ! integer_all_onesp (tem)) { warning ("comparison is always %d due to width of bit-field", code == NE_EXPR); return constant_boolean_node (code == NE_EXPR, compare_type); } } /* Single-bit compares should always be against zero. */ if (lbitsize == 1 && ! integer_zerop (rhs)) { code = code == EQ_EXPR ? NE_EXPR : EQ_EXPR; rhs = fold_convert (type, integer_zero_node); } /* Make a new bitfield reference, shift the constant over the appropriate number of bits and mask it with the computed mask (in case this was a signed field). If we changed it, make a new one. */ lhs = make_bit_field_ref (linner, unsigned_type, nbitsize, nbitpos, 1); if (lvolatilep) { TREE_SIDE_EFFECTS (lhs) = 1; TREE_THIS_VOLATILE (lhs) = 1; } rhs = fold (const_binop (BIT_AND_EXPR, const_binop (LSHIFT_EXPR, fold_convert (unsigned_type, rhs), size_int (lbitpos), 0), mask, 0)); return build2 (code, compare_type, build2 (BIT_AND_EXPR, unsigned_type, lhs, mask), rhs); } /* Subroutine for fold_truthop: decode a field reference. If EXP is a comparison reference, we return the innermost reference. *PBITSIZE is set to the number of bits in the reference, *PBITPOS is set to the starting bit number. If the innermost field can be completely contained in a mode-sized unit, *PMODE is set to that mode. Otherwise, it is set to VOIDmode. *PVOLATILEP is set to 1 if the any expression encountered is volatile; otherwise it is not changed. *PUNSIGNEDP is set to the signedness of the field. *PMASK is set to the mask used. This is either contained in a BIT_AND_EXPR or derived from the width of the field. *PAND_MASK is set to the mask found in a BIT_AND_EXPR, if any. Return 0 if this is not a component reference or is one that we can't do anything with. */ static tree decode_field_reference (tree exp, HOST_WIDE_INT *pbitsize, HOST_WIDE_INT *pbitpos, enum machine_mode *pmode, int *punsignedp, int *pvolatilep, tree *pmask, tree *pand_mask) { tree outer_type = 0; tree and_mask = 0; tree mask, inner, offset; tree unsigned_type; unsigned int precision; /* All the optimizations using this function assume integer fields. There are problems with FP fields since the type_for_size call below can fail for, e.g., XFmode. */ if (! INTEGRAL_TYPE_P (TREE_TYPE (exp))) return 0; /* We are interested in the bare arrangement of bits, so strip everything that doesn't affect the machine mode. However, record the type of the outermost expression if it may matter below. */ if (TREE_CODE (exp) == NOP_EXPR || TREE_CODE (exp) == CONVERT_EXPR || TREE_CODE (exp) == NON_LVALUE_EXPR) outer_type = TREE_TYPE (exp); STRIP_NOPS (exp); if (TREE_CODE (exp) == BIT_AND_EXPR) { and_mask = TREE_OPERAND (exp, 1); exp = TREE_OPERAND (exp, 0); STRIP_NOPS (exp); STRIP_NOPS (and_mask); if (TREE_CODE (and_mask) != INTEGER_CST) return 0; } inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode, punsignedp, pvolatilep); if ((inner == exp && and_mask == 0) || *pbitsize < 0 || offset != 0 || TREE_CODE (inner) == PLACEHOLDER_EXPR) return 0; /* If the number of bits in the reference is the same as the bitsize of the outer type, then the outer type gives the signedness. Otherwise (in case of a small bitfield) the signedness is unchanged. */ if (outer_type && *pbitsize == tree_low_cst (TYPE_SIZE (outer_type), 1)) *punsignedp = TYPE_UNSIGNED (outer_type); /* Compute the mask to access the bitfield. */ unsigned_type = lang_hooks.types.type_for_size (*pbitsize, 1); precision = TYPE_PRECISION (unsigned_type); mask = build_int_2 (~0, ~0); TREE_TYPE (mask) = unsigned_type; force_fit_type (mask, 0); mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0); mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0); /* Merge it with the mask we found in the BIT_AND_EXPR, if any. */ if (and_mask != 0) mask = fold (build2 (BIT_AND_EXPR, unsigned_type, fold_convert (unsigned_type, and_mask), mask)); *pmask = mask; *pand_mask = and_mask; return inner; } /* Return nonzero if MASK represents a mask of SIZE ones in the low-order bit positions. */ static int all_ones_mask_p (tree mask, int size) { tree type = TREE_TYPE (mask); unsigned int precision = TYPE_PRECISION (type); tree tmask; tmask = build_int_2 (~0, ~0); TREE_TYPE (tmask) = lang_hooks.types.signed_type (type); force_fit_type (tmask, 0); return tree_int_cst_equal (mask, const_binop (RSHIFT_EXPR, const_binop (LSHIFT_EXPR, tmask, size_int (precision - size), 0), size_int (precision - size), 0)); } /* Subroutine for fold: determine if VAL is the INTEGER_CONST that represents the sign bit of EXP's type. If EXP represents a sign or zero extension, also test VAL against the unextended type. The return value is the (sub)expression whose sign bit is VAL, or NULL_TREE otherwise. */ static tree sign_bit_p (tree exp, tree val) { unsigned HOST_WIDE_INT mask_lo, lo; HOST_WIDE_INT mask_hi, hi; int width; tree t; /* Tree EXP must have an integral type. */ t = TREE_TYPE (exp); if (! INTEGRAL_TYPE_P (t)) return NULL_TREE; /* Tree VAL must be an integer constant. */ if (TREE_CODE (val) != INTEGER_CST || TREE_CONSTANT_OVERFLOW (val)) return NULL_TREE; width = TYPE_PRECISION (t); if (width > HOST_BITS_PER_WIDE_INT) { hi = (unsigned HOST_WIDE_INT) 1 << (width - HOST_BITS_PER_WIDE_INT - 1); lo = 0; mask_hi = ((unsigned HOST_WIDE_INT) -1 >> (2 * HOST_BITS_PER_WIDE_INT - width)); mask_lo = -1; } else { hi = 0; lo = (unsigned HOST_WIDE_INT) 1 << (width - 1); mask_hi = 0; mask_lo = ((unsigned HOST_WIDE_INT) -1 >> (HOST_BITS_PER_WIDE_INT - width)); } /* We mask off those bits beyond TREE_TYPE (exp) so that we can treat VAL as if it were unsigned. */ if ((TREE_INT_CST_HIGH (val) & mask_hi) == hi && (TREE_INT_CST_LOW (val) & mask_lo) == lo) return exp; /* Handle extension from a narrower type. */ if (TREE_CODE (exp) == NOP_EXPR && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (exp, 0))) < width) return sign_bit_p (TREE_OPERAND (exp, 0), val); return NULL_TREE; } /* Subroutine for fold_truthop: determine if an operand is simple enough to be evaluated unconditionally. */ static int simple_operand_p (tree exp) { /* Strip any conversions that don't change the machine mode. */ while ((TREE_CODE (exp) == NOP_EXPR || TREE_CODE (exp) == CONVERT_EXPR) && (TYPE_MODE (TREE_TYPE (exp)) == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0))))) exp = TREE_OPERAND (exp, 0); return (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c' || (DECL_P (exp) && ! TREE_ADDRESSABLE (exp) && ! TREE_THIS_VOLATILE (exp) && ! DECL_NONLOCAL (exp) /* Don't regard global variables as simple. They may be allocated in ways unknown to the compiler (shared memory, #pragma weak, etc). */ && ! TREE_PUBLIC (exp) && ! DECL_EXTERNAL (exp) /* Loading a static variable is unduly expensive, but global registers aren't expensive. */ && (! TREE_STATIC (exp) || DECL_REGISTER (exp)))); } /* The following functions are subroutines to fold_range_test and allow it to try to change a logical combination of comparisons into a range test. For example, both X == 2 || X == 3 || X == 4 || X == 5 and X >= 2 && X <= 5 are converted to (unsigned) (X - 2) <= 3 We describe each set of comparisons as being either inside or outside a range, using a variable named like IN_P, and then describe the range with a lower and upper bound. If one of the bounds is omitted, it represents either the highest or lowest value of the type. In the comments below, we represent a range by two numbers in brackets preceded by a "+" to designate being inside that range, or a "-" to designate being outside that range, so the condition can be inverted by flipping the prefix. An omitted bound is represented by a "-". For example, "- [-, 10]" means being outside the range starting at the lowest possible value and ending at 10, in other words, being greater than 10. The range "+ [-, -]" is always true and hence the range "- [-, -]" is always false. We set up things so that the missing bounds are handled in a consistent manner so neither a missing bound nor "true" and "false" need to be handled using a special case. */ /* Return the result of applying CODE to ARG0 and ARG1, but handle the case of ARG0 and/or ARG1 being omitted, meaning an unlimited range. UPPER0_P and UPPER1_P are nonzero if the respective argument is an upper bound and zero for a lower. TYPE, if nonzero, is the type of the result; it must be specified for a comparison. ARG1 will be converted to ARG0's type if both are specified. */ static tree range_binop (enum tree_code code, tree type, tree arg0, int upper0_p, tree arg1, int upper1_p) { tree tem; int result; int sgn0, sgn1; /* If neither arg represents infinity, do the normal operation. Else, if not a comparison, return infinity. Else handle the special comparison rules. Note that most of the cases below won't occur, but are handled for consistency. */ if (arg0 != 0 && arg1 != 0) { tem = fold (build2 (code, type != 0 ? type : TREE_TYPE (arg0), arg0, fold_convert (TREE_TYPE (arg0), arg1))); STRIP_NOPS (tem); return TREE_CODE (tem) == INTEGER_CST ? tem : 0; } if (TREE_CODE_CLASS (code) != '<') return 0; /* Set SGN[01] to -1 if ARG[01] is a lower bound, 1 for upper, and 0 for neither. In real maths, we cannot assume open ended ranges are the same. But, this is computer arithmetic, where numbers are finite. We can therefore make the transformation of any unbounded range with the value Z, Z being greater than any representable number. This permits us to treat unbounded ranges as equal. */ sgn0 = arg0 != 0 ? 0 : (upper0_p ? 1 : -1); sgn1 = arg1 != 0 ? 0 : (upper1_p ? 1 : -1); switch (code) { case EQ_EXPR: result = sgn0 == sgn1; break; case NE_EXPR: result = sgn0 != sgn1; break; case LT_EXPR: result = sgn0 < sgn1; break; case LE_EXPR: result = sgn0 <= sgn1; break; case GT_EXPR: result = sgn0 > sgn1; break; case GE_EXPR: result = sgn0 >= sgn1; break; default: abort (); } return constant_boolean_node (result, type); } /* Given EXP, a logical expression, set the range it is testing into variables denoted by PIN_P, PLOW, and PHIGH. Return the expression actually being tested. *PLOW and *PHIGH will be made of the same type as the returned expression. If EXP is not a comparison, we will most likely not be returning a useful value and range. */ static tree make_range (tree exp, int *pin_p, tree *plow, tree *phigh) { enum tree_code code; tree arg0 = NULL_TREE, arg1 = NULL_TREE; tree exp_type = NULL_TREE, arg0_type = NULL_TREE; int in_p, n_in_p; tree low, high, n_low, n_high; /* Start with simply saying "EXP != 0" and then look at the code of EXP and see if we can refine the range. Some of the cases below may not happen, but it doesn't seem worth worrying about this. We "continue" the outer loop when we've changed something; otherwise we "break" the switch, which will "break" the while. */ in_p = 0; low = high = fold_convert (TREE_TYPE (exp), integer_zero_node); while (1) { code = TREE_CODE (exp); exp_type = TREE_TYPE (exp); if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code))) { if (first_rtl_op (code) > 0) arg0 = TREE_OPERAND (exp, 0); if (TREE_CODE_CLASS (code) == '<' || TREE_CODE_CLASS (code) == '1' || TREE_CODE_CLASS (code) == '2') arg0_type = TREE_TYPE (arg0); if (TREE_CODE_CLASS (code) == '2' || TREE_CODE_CLASS (code) == '<' || (TREE_CODE_CLASS (code) == 'e' && TREE_CODE_LENGTH (code) > 1)) arg1 = TREE_OPERAND (exp, 1); } switch (code) { case TRUTH_NOT_EXPR: in_p = ! in_p, exp = arg0; continue; case EQ_EXPR: case NE_EXPR: case LT_EXPR: case LE_EXPR: case GE_EXPR: case GT_EXPR: /* We can only do something if the range is testing for zero and if the second operand is an integer constant. Note that saying something is "in" the range we make is done by complementing IN_P since it will set in the initial case of being not equal to zero; "out" is leaving it alone. */ if (low == 0 || high == 0 || ! integer_zerop (low) || ! integer_zerop (high) || TREE_CODE (arg1) != INTEGER_CST) break; switch (code) { case NE_EXPR: /* - [c, c] */ low = high = arg1; break; case EQ_EXPR: /* + [c, c] */ in_p = ! in_p, low = high = arg1; break; case GT_EXPR: /* - [-, c] */ low = 0, high = arg1; break; case GE_EXPR: /* + [c, -] */ in_p = ! in_p, low = arg1, high = 0; break; case LT_EXPR: /* - [c, -] */ low = arg1, high = 0; break; case LE_EXPR: /* + [-, c] */ in_p = ! in_p, low = 0, high = arg1; break; default: abort (); } /* If this is an unsigned comparison, we also know that EXP is greater than or equal to zero. We base the range tests we make on that fact, so we record it here so we can parse existing range tests. We test arg0_type since often the return type of, e.g. EQ_EXPR, is boolean. */ if (TYPE_UNSIGNED (arg0_type) && (low == 0 || high == 0)) { if (! merge_ranges (&n_in_p, &n_low, &n_high, in_p, low, high, 1, fold_convert (arg0_type, integer_zero_node), NULL_TREE)) break; in_p = n_in_p, low = n_low, high = n_high; /* If the high bound is missing, but we have a nonzero low bound, reverse the range so it goes from zero to the low bound minus 1. */ if (high == 0 && low && ! integer_zerop (low)) { in_p = ! in_p; high = range_binop (MINUS_EXPR, NULL_TREE, low, 0, integer_one_node, 0); low = fold_convert (arg0_type, integer_zero_node); } } exp = arg0; continue; case NEGATE_EXPR: /* (-x) IN [a,b] -> x in [-b, -a] */ n_low = range_binop (MINUS_EXPR, exp_type, fold_convert (exp_type, integer_zero_node), 0, high, 1); n_high = range_binop (MINUS_EXPR, exp_type, fold_convert (exp_type, integer_zero_node), 0, low, 0); low = n_low, high = n_high; exp = arg0; continue; case BIT_NOT_EXPR: /* ~ X -> -X - 1 */ exp = build2 (MINUS_EXPR, exp_type, negate_expr (arg0), fold_convert (exp_type, integer_one_node)); continue; case PLUS_EXPR: case MINUS_EXPR: if (TREE_CODE (arg1) != INTEGER_CST) break; /* If EXP is signed, any overflow in the computation is undefined, so we don't worry about it so long as our computations on the bounds don't overflow. For unsigned, overflow is defined and this is exactly the right thing. */ n_low = range_binop (code == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR, arg0_type, low, 0, arg1, 0); n_high = range_binop (code == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR, arg0_type, high, 1, arg1, 0); if ((n_low != 0 && TREE_OVERFLOW (n_low)) || (n_high != 0 && TREE_OVERFLOW (n_high))) break; /* Check for an unsigned range which has wrapped around the maximum value thus making n_high < n_low, and normalize it. */ if (n_low && n_high && tree_int_cst_lt (n_high, n_low)) { low = range_binop (PLUS_EXPR, arg0_type, n_high, 0, integer_one_node, 0); high = range_binop (MINUS_EXPR, arg0_type, n_low, 0, integer_one_node, 0); /* If the range is of the form +/- [ x+1, x ], we won't be able to normalize it. But then, it represents the whole range or the empty set, so make it +/- [ -, - ]. */ if (tree_int_cst_equal (n_low, low) && tree_int_cst_equal (n_high, high)) low = high = 0; else in_p = ! in_p; } else low = n_low, high = n_high; exp = arg0; continue; case NOP_EXPR: case NON_LVALUE_EXPR: case CONVERT_EXPR: if (TYPE_PRECISION (arg0_type) > TYPE_PRECISION (exp_type)) break; if (! INTEGRAL_TYPE_P (arg0_type) || (low != 0 && ! int_fits_type_p (low, arg0_type)) || (high != 0 && ! int_fits_type_p (high, arg0_type))) break; n_low = low, n_high = high; if (n_low != 0) n_low = fold_convert (arg0_type, n_low); if (n_high != 0) n_high = fold_convert (arg0_type, n_high); /* If we're converting arg0 from an unsigned type, to exp, a signed type, we will be doing the comparison as unsigned. The tests above have already verified that LOW and HIGH are both positive. So we have to ensure that we will handle large unsigned values the same way that the current signed bounds treat negative values. */ if (!TYPE_UNSIGNED (exp_type) && TYPE_UNSIGNED (arg0_type)) { tree high_positive; tree equiv_type = lang_hooks.types.type_for_mode (TYPE_MODE (arg0_type), 1); /* A range without an upper bound is, naturally, unbounded. Since convert would have cropped a very large value, use the max value for the destination type. */ high_positive = TYPE_MAX_VALUE (equiv_type) ? TYPE_MAX_VALUE (equiv_type) : TYPE_MAX_VALUE (arg0_type); if (TYPE_PRECISION (exp_type) == TYPE_PRECISION (arg0_type)) high_positive = fold (build2 (RSHIFT_EXPR, arg0_type, fold_convert (arg0_type, high_positive), fold_convert (arg0_type, integer_one_node))); /* If the low bound is specified, "and" the range with the range for which the original unsigned value will be positive. */ if (low != 0) { if (! merge_ranges (&n_in_p, &n_low, &n_high, 1, n_low, n_high, 1, fold_convert (arg0_type, integer_zero_node), high_positive)) break; in_p = (n_in_p == in_p); } else { /* Otherwise, "or" the range with the range of the input that will be interpreted as negative. */ if (! merge_ranges (&n_in_p, &n_low, &n_high, 0, n_low, n_high, 1, fold_convert (arg0_type, integer_zero_node), high_positive)) break; in_p = (in_p != n_in_p); } } exp = arg0; low = n_low, high = n_high; continue; default: break; } break; } /* If EXP is a constant, we can evaluate whether this is true or false. */ if (TREE_CODE (exp) == INTEGER_CST) { in_p = in_p == (integer_onep (range_binop (GE_EXPR, integer_type_node, exp, 0, low, 0)) && integer_onep (range_binop (LE_EXPR, integer_type_node, exp, 1, high, 1))); low = high = 0; exp = 0; } *pin_p = in_p, *plow = low, *phigh = high; return exp; } /* Given a range, LOW, HIGH, and IN_P, an expression, EXP, and a result type, TYPE, return an expression to test if EXP is in (or out of, depending on IN_P) the range. Return 0 if the test couldn't be created. */ static tree build_range_check (tree type, tree exp, int in_p, tree low, tree high) { tree etype = TREE_TYPE (exp); tree value; if (! in_p) { value = build_range_check (type, exp, 1, low, high); if (value != 0) return invert_truthvalue (value); return 0; } if (low == 0 && high == 0) return fold_convert (type, integer_one_node); if (low == 0) return fold (build2 (LE_EXPR, type, exp, high)); if (high == 0) return fold (build2 (GE_EXPR, type, exp, low)); if (operand_equal_p (low, high, 0)) return fold (build2 (EQ_EXPR, type, exp, low)); if (integer_zerop (low)) { if (! TYPE_UNSIGNED (etype)) { etype = lang_hooks.types.unsigned_type (etype); high = fold_convert (etype, high); exp = fold_convert (etype, exp); } return build_range_check (type, exp, 1, 0, high); } /* Optimize (c>=1) && (c<=127) into (signed char)c > 0. */ if (integer_onep (low) && TREE_CODE (high) == INTEGER_CST) { unsigned HOST_WIDE_INT lo; HOST_WIDE_INT hi; int prec; prec = TYPE_PRECISION (etype); if (prec <= HOST_BITS_PER_WIDE_INT) { hi = 0; lo = ((unsigned HOST_WIDE_INT) 1 << (prec - 1)) - 1; } else { hi = ((HOST_WIDE_INT) 1 << (prec - HOST_BITS_PER_WIDE_INT - 1)) - 1; lo = (unsigned HOST_WIDE_INT) -1; } if (TREE_INT_CST_HIGH (high) == hi && TREE_INT_CST_LOW (high) == lo) { if (TYPE_UNSIGNED (etype)) { etype = lang_hooks.types.signed_type (etype); exp = fold_convert (etype, exp); } return fold (build2 (GT_EXPR, type, exp, fold_convert (etype, integer_zero_node))); } } value = const_binop (MINUS_EXPR, high, low, 0); if (value != 0 && TREE_OVERFLOW (value) && ! TYPE_UNSIGNED (etype)) { tree utype, minv, maxv; /* Check if (unsigned) INT_MAX + 1 == (unsigned) INT_MIN for the type in question, as we rely on this here. */ switch (TREE_CODE (etype)) { case INTEGER_TYPE: case ENUMERAL_TYPE: case CHAR_TYPE: utype = lang_hooks.types.unsigned_type (etype); maxv = fold_convert (utype, TYPE_MAX_VALUE (etype)); maxv = range_binop (PLUS_EXPR, NULL_TREE, maxv, 1, integer_one_node, 1); minv = fold_convert (utype, TYPE_MIN_VALUE (etype)); if (integer_zerop (range_binop (NE_EXPR, integer_type_node, minv, 1, maxv, 1))) { etype = utype; high = fold_convert (etype, high); low = fold_convert (etype, low); exp = fold_convert (etype, exp); value = const_binop (MINUS_EXPR, high, low, 0); } break; default: break; } } if (value != 0 && ! TREE_OVERFLOW (value)) return build_range_check (type, fold (build2 (MINUS_EXPR, etype, exp, low)), 1, fold_convert (etype, integer_zero_node), value); return 0; } /* Given two ranges, see if we can merge them into one. Return 1 if we can, 0 if we can't. Set the output range into the specified parameters. */ static int merge_ranges (int *pin_p, tree *plow, tree *phigh, int in0_p, tree low0, tree high0, int in1_p, tree low1, tree high1) { int no_overlap; int subset; int temp; tree tem; int in_p; tree low, high; int lowequal = ((low0 == 0 && low1 == 0) || integer_onep (range_binop (EQ_EXPR, integer_type_node, low0, 0, low1, 0))); int highequal = ((high0 == 0 && high1 == 0) || integer_onep (range_binop (EQ_EXPR, integer_type_node, high0, 1, high1, 1))); /* Make range 0 be the range that starts first, or ends last if they start at the same value. Swap them if it isn't. */ if (integer_onep (range_binop (GT_EXPR, integer_type_node, low0, 0, low1, 0)) || (lowequal && integer_onep (range_binop (GT_EXPR, integer_type_node, high1, 1, high0, 1)))) { temp = in0_p, in0_p = in1_p, in1_p = temp; tem = low0, low0 = low1, low1 = tem; tem = high0, high0 = high1, high1 = tem; } /* Now flag two cases, whether the ranges are disjoint or whether the second range is totally subsumed in the first. Note that the tests below are simplified by the ones above. */ no_overlap = integer_onep (range_binop (LT_EXPR, integer_type_node, high0, 1, low1, 0)); subset = integer_onep (range_binop (LE_EXPR, integer_type_node, high1, 1, high0, 1)); /* We now have four cases, depending on whether we are including or excluding the two ranges. */ if (in0_p && in1_p) { /* If they don't overlap, the result is false. If the second range is a subset it is the result. Otherwise, the range is from the start of the second to the end of the first. */ if (no_overlap) in_p = 0, low = high = 0; else if (subset) in_p = 1, low = low1, high = high1; else in_p = 1, low = low1, high = high0; } else if (in0_p && ! in1_p) { /* If they don't overlap, the result is the first range. If they are equal, the result is false. If the second range is a subset of the first, and the ranges begin at the same place, we go from just after the end of the first range to the end of the second. If the second range is not a subset of the first, or if it is a subset and both ranges end at the same place, the range starts at the start of the first range and ends just before the second range. Otherwise, we can't describe this as a single range. */ if (no_overlap) in_p = 1, low = low0, high = high0; else if (lowequal && highequal) in_p = 0, low = high = 0; else if (subset && lowequal) { in_p = 1, high = high0; low = range_binop (PLUS_EXPR, NULL_TREE, high1, 0, integer_one_node, 0); } else if (! subset || highequal) { in_p = 1, low = low0; high = range_binop (MINUS_EXPR, NULL_TREE, low1, 0, integer_one_node, 0); } else return 0; } else if (! in0_p && in1_p) { /* If they don't overlap, the result is the second range. If the second is a subset of the first, the result is false. Otherwise, the range starts just after the first range and ends at the end of the second. */ if (no_overlap) in_p = 1, low = low1, high = high1; else if (subset || highequal) in_p = 0, low = high = 0; else { in_p = 1, high = high1; low = range_binop (PLUS_EXPR, NULL_TREE, high0, 1, integer_one_node, 0); } } else { /* The case where we are excluding both ranges. Here the complex case is if they don't overlap. In that case, the only time we have a range is if they are adjacent. If the second is a subset of the first, the result is the first. Otherwise, the range to exclude starts at the beginning of the first range and ends at the end of the second. */ if (no_overlap) { if (integer_onep (range_binop (EQ_EXPR, integer_type_node, range_binop (PLUS_EXPR, NULL_TREE, high0, 1, integer_one_node, 1), 1, low1, 0))) in_p = 0, low = low0, high = high1; else { /* Canonicalize - [min, x] into - [-, x]. */ if (low0 && TREE_CODE (low0) == INTEGER_CST) switch (TREE_CODE (TREE_TYPE (low0))) { case ENUMERAL_TYPE: if (TYPE_PRECISION (TREE_TYPE (low0)) != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (low0)))) break; /* FALLTHROUGH */ case INTEGER_TYPE: case CHAR_TYPE: if (tree_int_cst_equal (low0, TYPE_MIN_VALUE (TREE_TYPE (low0)))) low0 = 0; break; case POINTER_TYPE: if (TYPE_UNSIGNED (TREE_TYPE (low0)) && integer_zerop (low0)) low0 = 0; break; default: break; } /* Canonicalize - [x, max] into - [x, -]. */ if (high1 && TREE_CODE (high1) == INTEGER_CST) switch (TREE_CODE (TREE_TYPE (high1))) { case ENUMERAL_TYPE: if (TYPE_PRECISION (TREE_TYPE (high1)) != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (high1)))) break; /* FALLTHROUGH */ case INTEGER_TYPE: case CHAR_TYPE: if (tree_int_cst_equal (high1, TYPE_MAX_VALUE (TREE_TYPE (high1)))) high1 = 0; break; case POINTER_TYPE: if (TYPE_UNSIGNED (TREE_TYPE (high1)) && integer_zerop (range_binop (PLUS_EXPR, NULL_TREE, high1, 1, integer_one_node, 1))) high1 = 0; break; default: break; } /* The ranges might be also adjacent between the maximum and minimum values of the given type. For - [{min,-}, x] and - [y, {max,-}] ranges where x + 1 < y return + [x + 1, y - 1]. */ if (low0 == 0 && high1 == 0) { low = range_binop (PLUS_EXPR, NULL_TREE, high0, 1, integer_one_node, 1); high = range_binop (MINUS_EXPR, NULL_TREE, low1, 0, integer_one_node, 0); if (low == 0 || high == 0) return 0; in_p = 1; } else return 0; } } else if (subset) in_p = 0, low = low0, high = high0; else in_p = 0, low = low0, high = high1; } *pin_p = in_p, *plow = low, *phigh = high; return 1; } /* Subroutine of fold, looking inside expressions of the form A op B ? A : C, where ARG0, ARG1 and ARG2 are the three operands of the COND_EXPR. This function is being used also to optimize A op B ? C : A, by reversing the comparison first. Return a folded expression whose code is not a COND_EXPR anymore, or NULL_TREE if no folding opportunity is found. */ static tree fold_cond_expr_with_comparison (tree type, tree arg0, tree arg1, tree arg2) { enum tree_code comp_code = TREE_CODE (arg0); tree arg00 = TREE_OPERAND (arg0, 0); tree arg01 = TREE_OPERAND (arg0, 1); tree arg1_type = TREE_TYPE (arg1); tree tem; STRIP_NOPS (arg1); STRIP_NOPS (arg2); /* If we have A op 0 ? A : -A, consider applying the following transformations: A == 0? A : -A same as -A A != 0? A : -A same as A A >= 0? A : -A same as abs (A) A > 0? A : -A same as abs (A) A <= 0? A : -A same as -abs (A) A < 0? A : -A same as -abs (A) None of these transformations work for modes with signed zeros. If A is +/-0, the first two transformations will change the sign of the result (from +0 to -0, or vice versa). The last four will fix the sign of the result, even though the original expressions could be positive or negative, depending on the sign of A. Note that all these transformations are correct if A is NaN, since the two alternatives (A and -A) are also NaNs. */ if ((FLOAT_TYPE_P (TREE_TYPE (arg01)) ? real_zerop (arg01) : integer_zerop (arg01)) && TREE_CODE (arg2) == NEGATE_EXPR && operand_equal_p (TREE_OPERAND (arg2, 0), arg1, 0)) switch (comp_code) { case EQ_EXPR: tem = fold_convert (arg1_type, arg1); return pedantic_non_lvalue (fold_convert (type, negate_expr (tem))); case NE_EXPR: return pedantic_non_lvalue (fold_convert (type, arg1)); case GE_EXPR: case GT_EXPR: if (TYPE_UNSIGNED (TREE_TYPE (arg1))) arg1 = fold_convert (lang_hooks.types.signed_type (TREE_TYPE (arg1)), arg1); tem = fold (build1 (ABS_EXPR, TREE_TYPE (arg1), arg1)); return pedantic_non_lvalue (fold_convert (type, tem)); case LE_EXPR: case LT_EXPR: if (TYPE_UNSIGNED (TREE_TYPE (arg1))) arg1 = fold_convert (lang_hooks.types.signed_type (TREE_TYPE (arg1)), arg1); tem = fold (build1 (ABS_EXPR, TREE_TYPE (arg1), arg1)); return negate_expr (fold_convert (type, tem)); default: abort (); } /* A != 0 ? A : 0 is simply A, unless A is -0. Likewise A == 0 ? A : 0 is always 0 unless A is -0. Note that both transformations are correct when A is NaN: A != 0 is then true, and A == 0 is false. */ if (integer_zerop (arg01) && integer_zerop (arg2)) { if (comp_code == NE_EXPR) return pedantic_non_lvalue (fold_convert (type, arg1)); else if (comp_code == EQ_EXPR) return pedantic_non_lvalue (fold_convert (type, integer_zero_node)); } /* Try some transformations of A op B ? A : B. A == B? A : B same as B A != B? A : B same as A A >= B? A : B same as max (A, B) A > B? A : B same as max (B, A) A <= B? A : B same as min (A, B) A < B? A : B same as min (B, A) As above, these transformations don't work in the presence of signed zeros. For example, if A and B are zeros of opposite sign, the first two transformations will change the sign of the result. In the last four, the original expressions give different results for (A=+0, B=-0) and (A=-0, B=+0), but the transformed expressions do not. The first two transformations are correct if either A or B is a NaN. In the first transformation, the condition will be false, and B will indeed be chosen. In the case of the second transformation, the condition A != B will be true, and A will be chosen. The conversions to max() and min() are not correct if B is a number and A is not. The conditions in the original expressions will be false, so all four give B. The min() and max() versions would give a NaN instead. */ if (operand_equal_for_comparison_p (arg01, arg2, arg00)) { tree comp_op0 = arg00; tree comp_op1 = arg01; tree comp_type = TREE_TYPE (comp_op0); /* Avoid adding NOP_EXPRs in case this is an lvalue. */ if (TYPE_MAIN_VARIANT (comp_type) == TYPE_MAIN_VARIANT (type)) { comp_type = type; comp_op0 = arg1; comp_op1 = arg2; } switch (comp_code) { case EQ_EXPR: return pedantic_non_lvalue (fold_convert (type, arg2)); case NE_EXPR: return pedantic_non_lvalue (fold_convert (type, arg1)); case LE_EXPR: case LT_EXPR: /* In C++ a ?: expression can be an lvalue, so put the operand which will be used if they are equal first so that we can convert this back to the corresponding COND_EXPR. */ if (!HONOR_NANS (TYPE_MODE (TREE_TYPE (arg1)))) return pedantic_non_lvalue ( fold_convert (type, fold (build2 (MIN_EXPR, comp_type, (comp_code == LE_EXPR ? comp_op0 : comp_op1), (comp_code == LE_EXPR ? comp_op1 : comp_op0))))); break; case GE_EXPR: case GT_EXPR: if (!HONOR_NANS (TYPE_MODE (TREE_TYPE (arg1)))) return pedantic_non_lvalue ( fold_convert (type, fold (build2 (MAX_EXPR, comp_type, (comp_code == GE_EXPR ? comp_op0 : comp_op1), (comp_code == GE_EXPR ? comp_op1 : comp_op0))))); break; default: abort (); } } /* If this is A op C1 ? A : C2 with C1 and C2 constant integers, we might still be able to simplify this. For example, if C1 is one less or one more than C2, this might have started out as a MIN or MAX and been transformed by this function. Only good for INTEGER_TYPEs, because we need TYPE_MAX_VALUE. */ if (INTEGRAL_TYPE_P (type) && TREE_CODE (arg01) == INTEGER_CST && TREE_CODE (arg2) == INTEGER_CST) switch (comp_code) { case EQ_EXPR: /* We can replace A with C1 in this case. */ arg1 = fold_convert (type, arg01); return fold (build3 (COND_EXPR, type, arg0, arg1, arg2)); case LT_EXPR: /* If C1 is C2 + 1, this is min(A, C2). */ if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), OEP_ONLY_CONST) && operand_equal_p (arg01, const_binop (PLUS_EXPR, arg2, integer_one_node, 0), OEP_ONLY_CONST)) return pedantic_non_lvalue (fold (build2 (MIN_EXPR, type, arg1, arg2))); break; case LE_EXPR: /* If C1 is C2 - 1, this is min(A, C2). */ if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), OEP_ONLY_CONST) && operand_equal_p (arg01, const_binop (MINUS_EXPR, arg2, integer_one_node, 0), OEP_ONLY_CONST)) return pedantic_non_lvalue (fold (build2 (MIN_EXPR, type, arg1, arg2))); break; case GT_EXPR: /* If C1 is C2 - 1, this is max(A, C2). */ if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), OEP_ONLY_CONST) && operand_equal_p (arg01, const_binop (MINUS_EXPR, arg2, integer_one_node, 0), OEP_ONLY_CONST)) return pedantic_non_lvalue (fold (build2 (MAX_EXPR, type, arg1, arg2))); break; case GE_EXPR: /* If C1 is C2 + 1, this is max(A, C2). */ if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), OEP_ONLY_CONST) && operand_equal_p (arg01, const_binop (PLUS_EXPR, arg2, integer_one_node, 0), OEP_ONLY_CONST)) return pedantic_non_lvalue (fold (build2 (MAX_EXPR, type, arg1, arg2))); break; case NE_EXPR: break; default: abort (); } return NULL_TREE; } #ifndef RANGE_TEST_NON_SHORT_CIRCUIT #define RANGE_TEST_NON_SHORT_CIRCUIT (BRANCH_COST >= 2) #endif /* EXP is some logical combination of boolean tests. See if we can merge it into some range test. Return the new tree if so. */ static tree fold_range_test (tree exp) { int or_op = (TREE_CODE (exp) == TRUTH_ORIF_EXPR || TREE_CODE (exp) == TRUTH_OR_EXPR); int in0_p, in1_p, in_p; tree low0, low1, low, high0, high1, high; tree lhs = make_range (TREE_OPERAND (exp, 0), &in0_p, &low0, &high0); tree rhs = make_range (TREE_OPERAND (exp, 1), &in1_p, &low1, &high1); tree tem; /* If this is an OR operation, invert both sides; we will invert again at the end. */ if (or_op) in0_p = ! in0_p, in1_p = ! in1_p; /* If both expressions are the same, if we can merge the ranges, and we can build the range test, return it or it inverted. If one of the ranges is always true or always false, consider it to be the same expression as the other. */ if ((lhs == 0 || rhs == 0 || operand_equal_p (lhs, rhs, 0)) && merge_ranges (&in_p, &low, &high, in0_p, low0, high0, in1_p, low1, high1) && 0 != (tem = (build_range_check (TREE_TYPE (exp), lhs != 0 ? lhs : rhs != 0 ? rhs : integer_zero_node, in_p, low, high)))) return or_op ? invert_truthvalue (tem) : tem; /* On machines where the branch cost is expensive, if this is a short-circuited branch and the underlying object on both sides is the same, make a non-short-circuit operation. */ else if (RANGE_TEST_NON_SHORT_CIRCUIT && lhs != 0 && rhs != 0 && (TREE_CODE (exp) == TRUTH_ANDIF_EXPR || TREE_CODE (exp) == TRUTH_ORIF_EXPR) && operand_equal_p (lhs, rhs, 0)) { /* If simple enough, just rewrite. Otherwise, make a SAVE_EXPR unless we are at top level or LHS contains a PLACEHOLDER_EXPR, in which cases we can't do this. */ if (simple_operand_p (lhs)) return build2 (TREE_CODE (exp) == TRUTH_ANDIF_EXPR ? TRUTH_AND_EXPR : TRUTH_OR_EXPR, TREE_TYPE (exp), TREE_OPERAND (exp, 0), TREE_OPERAND (exp, 1)); else if (lang_hooks.decls.global_bindings_p () == 0 && ! CONTAINS_PLACEHOLDER_P (lhs)) { tree common = save_expr (lhs); if (0 != (lhs = build_range_check (TREE_TYPE (exp), common, or_op ? ! in0_p : in0_p, low0, high0)) && (0 != (rhs = build_range_check (TREE_TYPE (exp), common, or_op ? ! in1_p : in1_p, low1, high1)))) return build2 (TREE_CODE (exp) == TRUTH_ANDIF_EXPR ? TRUTH_AND_EXPR : TRUTH_OR_EXPR, TREE_TYPE (exp), lhs, rhs); } } return 0; } /* Subroutine for fold_truthop: C is an INTEGER_CST interpreted as a P bit value. Arrange things so the extra bits will be set to zero if and only if C is signed-extended to its full width. If MASK is nonzero, it is an INTEGER_CST that should be AND'ed with the extra bits. */ static tree unextend (tree c, int p, int unsignedp, tree mask) { tree type = TREE_TYPE (c); int modesize = GET_MODE_BITSIZE (TYPE_MODE (type)); tree temp; if (p == modesize || unsignedp) return c; /* We work by getting just the sign bit into the low-order bit, then into the high-order bit, then sign-extend. We then XOR that value with C. */ temp = const_binop (RSHIFT_EXPR, c, size_int (p - 1), 0); temp = const_binop (BIT_AND_EXPR, temp, size_int (1), 0); /* We must use a signed type in order to get an arithmetic right shift. However, we must also avoid introducing accidental overflows, so that a subsequent call to integer_zerop will work. Hence we must do the type conversion here. At this point, the constant is either zero or one, and the conversion to a signed type can never overflow. We could get an overflow if this conversion is done anywhere else. */ if (TYPE_UNSIGNED (type)) temp = fold_convert (lang_hooks.types.signed_type (type), temp); temp = const_binop (LSHIFT_EXPR, temp, size_int (modesize - 1), 0); temp = const_binop (RSHIFT_EXPR, temp, size_int (modesize - p - 1), 0); if (mask != 0) temp = const_binop (BIT_AND_EXPR, temp, fold_convert (TREE_TYPE (c), mask), 0); /* If necessary, convert the type back to match the type of C. */ if (TYPE_UNSIGNED (type)) temp = fold_convert (type, temp); return fold_convert (type, const_binop (BIT_XOR_EXPR, c, temp, 0)); } /* Find ways of folding logical expressions of LHS and RHS: Try to merge two comparisons to the same innermost item. Look for range tests like "ch >= '0' && ch <= '9'". Look for combinations of simple terms on machines with expensive branches and evaluate the RHS unconditionally. For example, if we have p->a == 2 && p->b == 4 and we can make an object large enough to span both A and B, we can do this with a comparison against the object ANDed with the a mask. If we have p->a == q->a && p->b == q->b, we may be able to use bit masking operations to do this with one comparison. We check for both normal comparisons and the BIT_AND_EXPRs made this by function and the one above. CODE is the logical operation being done. It can be TRUTH_ANDIF_EXPR, TRUTH_AND_EXPR, TRUTH_ORIF_EXPR, or TRUTH_OR_EXPR. TRUTH_TYPE is the type of the logical operand and LHS and RHS are its two operands. We return the simplified tree or 0 if no optimization is possible. */ static tree fold_truthop (enum tree_code code, tree truth_type, tree lhs, tree rhs) { /* If this is the "or" of two comparisons, we can do something if the comparisons are NE_EXPR. If this is the "and", we can do something if the comparisons are EQ_EXPR. I.e., (a->b == 2 && a->c == 4) can become (a->new == NEW). WANTED_CODE is this operation code. For single bit fields, we can convert EQ_EXPR to NE_EXPR so we need not reject the "wrong" comparison for one-bit fields. */ enum tree_code wanted_code; enum tree_code lcode, rcode; tree ll_arg, lr_arg, rl_arg, rr_arg; tree ll_inner, lr_inner, rl_inner, rr_inner; HOST_WIDE_INT ll_bitsize, ll_bitpos, lr_bitsize, lr_bitpos; HOST_WIDE_INT rl_bitsize, rl_bitpos, rr_bitsize, rr_bitpos; HOST_WIDE_INT xll_bitpos, xlr_bitpos, xrl_bitpos, xrr_bitpos; HOST_WIDE_INT lnbitsize, lnbitpos, rnbitsize, rnbitpos; int ll_unsignedp, lr_unsignedp, rl_unsignedp, rr_unsignedp; enum machine_mode ll_mode, lr_mode, rl_mode, rr_mode; enum machine_mode lnmode, rnmode; tree ll_mask, lr_mask, rl_mask, rr_mask; tree ll_and_mask, lr_and_mask, rl_and_mask, rr_and_mask; tree l_const, r_const; tree lntype, rntype, result; int first_bit, end_bit; int volatilep; /* Start by getting the comparison codes. Fail if anything is volatile. If one operand is a BIT_AND_EXPR with the constant one, treat it as if it were surrounded with a NE_EXPR. */ if (TREE_SIDE_EFFECTS (lhs) || TREE_SIDE_EFFECTS (rhs)) return 0; lcode = TREE_CODE (lhs); rcode = TREE_CODE (rhs); if (lcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (lhs, 1))) { lhs = build2 (NE_EXPR, truth_type, lhs, integer_zero_node); lcode = NE_EXPR; } if (rcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (rhs, 1))) { rhs = build2 (NE_EXPR, truth_type, rhs, integer_zero_node); rcode = NE_EXPR; } if (TREE_CODE_CLASS (lcode) != '<' || TREE_CODE_CLASS (rcode) != '<') return 0; ll_arg = TREE_OPERAND (lhs, 0); lr_arg = TREE_OPERAND (lhs, 1); rl_arg = TREE_OPERAND (rhs, 0); rr_arg = TREE_OPERAND (rhs, 1); /* Simplify (x= 2 && ! FLOAT_TYPE_P (TREE_TYPE (rl_arg)) && simple_operand_p (rl_arg) && simple_operand_p (rr_arg)) { /* Convert (a != 0) || (b != 0) into (a | b) != 0. */ if (code == TRUTH_OR_EXPR && lcode == NE_EXPR && integer_zerop (lr_arg) && rcode == NE_EXPR && integer_zerop (rr_arg) && TREE_TYPE (ll_arg) == TREE_TYPE (rl_arg)) return build2 (NE_EXPR, truth_type, build2 (BIT_IOR_EXPR, TREE_TYPE (ll_arg), ll_arg, rl_arg), fold_convert (TREE_TYPE (ll_arg), integer_zero_node)); /* Convert (a == 0) && (b == 0) into (a | b) == 0. */ if (code == TRUTH_AND_EXPR && lcode == EQ_EXPR && integer_zerop (lr_arg) && rcode == EQ_EXPR && integer_zerop (rr_arg) && TREE_TYPE (ll_arg) == TREE_TYPE (rl_arg)) return build2 (EQ_EXPR, truth_type, build2 (BIT_IOR_EXPR, TREE_TYPE (ll_arg), ll_arg, rl_arg), fold_convert (TREE_TYPE (ll_arg), integer_zero_node)); return build2 (code, truth_type, lhs, rhs); } /* See if the comparisons can be merged. Then get all the parameters for each side. */ if ((lcode != EQ_EXPR && lcode != NE_EXPR) || (rcode != EQ_EXPR && rcode != NE_EXPR)) return 0; volatilep = 0; ll_inner = decode_field_reference (ll_arg, &ll_bitsize, &ll_bitpos, &ll_mode, &ll_unsignedp, &volatilep, &ll_mask, &ll_and_mask); lr_inner = decode_field_reference (lr_arg, &lr_bitsize, &lr_bitpos, &lr_mode, &lr_unsignedp, &volatilep, &lr_mask, &lr_and_mask); rl_inner = decode_field_reference (rl_arg, &rl_bitsize, &rl_bitpos, &rl_mode, &rl_unsignedp, &volatilep, &rl_mask, &rl_and_mask); rr_inner = decode_field_reference (rr_arg, &rr_bitsize, &rr_bitpos, &rr_mode, &rr_unsignedp, &volatilep, &rr_mask, &rr_and_mask); /* It must be true that the inner operation on the lhs of each comparison must be the same if we are to be able to do anything. Then see if we have constants. If not, the same must be true for the rhs's. */ if (volatilep || ll_inner == 0 || rl_inner == 0 || ! operand_equal_p (ll_inner, rl_inner, 0)) return 0; if (TREE_CODE (lr_arg) == INTEGER_CST && TREE_CODE (rr_arg) == INTEGER_CST) l_const = lr_arg, r_const = rr_arg; else if (lr_inner == 0 || rr_inner == 0 || ! operand_equal_p (lr_inner, rr_inner, 0)) return 0; else l_const = r_const = 0; /* If either comparison code is not correct for our logical operation, fail. However, we can convert a one-bit comparison against zero into the opposite comparison against that bit being set in the field. */ wanted_code = (code == TRUTH_AND_EXPR ? EQ_EXPR : NE_EXPR); if (lcode != wanted_code) { if (l_const && integer_zerop (l_const) && integer_pow2p (ll_mask)) { /* Make the left operand unsigned, since we are only interested in the value of one bit. Otherwise we are doing the wrong thing below. */ ll_unsignedp = 1; l_const = ll_mask; } else return 0; } /* This is analogous to the code for l_const above. */ if (rcode != wanted_code) { if (r_const && integer_zerop (r_const) && integer_pow2p (rl_mask)) { rl_unsignedp = 1; r_const = rl_mask; } else return 0; } /* After this point all optimizations will generate bit-field references, which we might not want. */ if (! lang_hooks.can_use_bit_fields_p ()) return 0; /* See if we can find a mode that contains both fields being compared on the left. If we can't, fail. Otherwise, update all constants and masks to be relative to a field of that size. */ first_bit = MIN (ll_bitpos, rl_bitpos); end_bit = MAX (ll_bitpos + ll_bitsize, rl_bitpos + rl_bitsize); lnmode = get_best_mode (end_bit - first_bit, first_bit, TYPE_ALIGN (TREE_TYPE (ll_inner)), word_mode, volatilep); if (lnmode == VOIDmode) return 0; lnbitsize = GET_MODE_BITSIZE (lnmode); lnbitpos = first_bit & ~ (lnbitsize - 1); lntype = lang_hooks.types.type_for_size (lnbitsize, 1); xll_bitpos = ll_bitpos - lnbitpos, xrl_bitpos = rl_bitpos - lnbitpos; if (BYTES_BIG_ENDIAN) { xll_bitpos = lnbitsize - xll_bitpos - ll_bitsize; xrl_bitpos = lnbitsize - xrl_bitpos - rl_bitsize; } ll_mask = const_binop (LSHIFT_EXPR, fold_convert (lntype, ll_mask), size_int (xll_bitpos), 0); rl_mask = const_binop (LSHIFT_EXPR, fold_convert (lntype, rl_mask), size_int (xrl_bitpos), 0); if (l_const) { l_const = fold_convert (lntype, l_const); l_const = unextend (l_const, ll_bitsize, ll_unsignedp, ll_and_mask); l_const = const_binop (LSHIFT_EXPR, l_const, size_int (xll_bitpos), 0); if (! integer_zerop (const_binop (BIT_AND_EXPR, l_const, fold (build1 (BIT_NOT_EXPR, lntype, ll_mask)), 0))) { warning ("comparison is always %d", wanted_code == NE_EXPR); return constant_boolean_node (wanted_code == NE_EXPR, truth_type); } } if (r_const) { r_const = fold_convert (lntype, r_const); r_const = unextend (r_const, rl_bitsize, rl_unsignedp, rl_and_mask); r_const = const_binop (LSHIFT_EXPR, r_const, size_int (xrl_bitpos), 0); if (! integer_zerop (const_binop (BIT_AND_EXPR, r_const, fold (build1 (BIT_NOT_EXPR, lntype, rl_mask)), 0))) { warning ("comparison is always %d", wanted_code == NE_EXPR); return constant_boolean_node (wanted_code == NE_EXPR, truth_type); } } /* If the right sides are not constant, do the same for it. Also, disallow this optimization if a size or signedness mismatch occurs between the left and right sides. */ if (l_const == 0) { if (ll_bitsize != lr_bitsize || rl_bitsize != rr_bitsize || ll_unsignedp != lr_unsignedp || rl_unsignedp != rr_unsignedp /* Make sure the two fields on the right correspond to the left without being swapped. */ || ll_bitpos - rl_bitpos != lr_bitpos - rr_bitpos) return 0; first_bit = MIN (lr_bitpos, rr_bitpos); end_bit = MAX (lr_bitpos + lr_bitsize, rr_bitpos + rr_bitsize); rnmode = get_best_mode (end_bit - first_bit, first_bit, TYPE_ALIGN (TREE_TYPE (lr_inner)), word_mode, volatilep); if (rnmode == VOIDmode) return 0; rnbitsize = GET_MODE_BITSIZE (rnmode); rnbitpos = first_bit & ~ (rnbitsize - 1); rntype = lang_hooks.types.type_for_size (rnbitsize, 1); xlr_bitpos = lr_bitpos - rnbitpos, xrr_bitpos = rr_bitpos - rnbitpos; if (BYTES_BIG_ENDIAN) { xlr_bitpos = rnbitsize - xlr_bitpos - lr_bitsize; xrr_bitpos = rnbitsize - xrr_bitpos - rr_bitsize; } lr_mask = const_binop (LSHIFT_EXPR, fold_convert (rntype, lr_mask), size_int (xlr_bitpos), 0); rr_mask = const_binop (LSHIFT_EXPR, fold_convert (rntype, rr_mask), size_int (xrr_bitpos), 0); /* Make a mask that corresponds to both fields being compared. Do this for both items being compared. If the operands are the same size and the bits being compared are in the same position then we can do this by masking both and comparing the masked results. */ ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0); lr_mask = const_binop (BIT_IOR_EXPR, lr_mask, rr_mask, 0); if (lnbitsize == rnbitsize && xll_bitpos == xlr_bitpos) { lhs = make_bit_field_ref (ll_inner, lntype, lnbitsize, lnbitpos, ll_unsignedp || rl_unsignedp); if (! all_ones_mask_p (ll_mask, lnbitsize)) lhs = build2 (BIT_AND_EXPR, lntype, lhs, ll_mask); rhs = make_bit_field_ref (lr_inner, rntype, rnbitsize, rnbitpos, lr_unsignedp || rr_unsignedp); if (! all_ones_mask_p (lr_mask, rnbitsize)) rhs = build2 (BIT_AND_EXPR, rntype, rhs, lr_mask); return build2 (wanted_code, truth_type, lhs, rhs); } /* There is still another way we can do something: If both pairs of fields being compared are adjacent, we may be able to make a wider field containing them both. Note that we still must mask the lhs/rhs expressions. Furthermore, the mask must be shifted to account for the shift done by make_bit_field_ref. */ if ((ll_bitsize + ll_bitpos == rl_bitpos && lr_bitsize + lr_bitpos == rr_bitpos) || (ll_bitpos == rl_bitpos + rl_bitsize && lr_bitpos == rr_bitpos + rr_bitsize)) { tree type; lhs = make_bit_field_ref (ll_inner, lntype, ll_bitsize + rl_bitsize, MIN (ll_bitpos, rl_bitpos), ll_unsignedp); rhs = make_bit_field_ref (lr_inner, rntype, lr_bitsize + rr_bitsize, MIN (lr_bitpos, rr_bitpos), lr_unsignedp); ll_mask = const_binop (RSHIFT_EXPR, ll_mask, size_int (MIN (xll_bitpos, xrl_bitpos)), 0); lr_mask = const_binop (RSHIFT_EXPR, lr_mask, size_int (MIN (xlr_bitpos, xrr_bitpos)), 0); /* Convert to the smaller type before masking out unwanted bits. */ type = lntype; if (lntype != rntype) { if (lnbitsize > rnbitsize) { lhs = fold_convert (rntype, lhs); ll_mask = fold_convert (rntype, ll_mask); type = rntype; } else if (lnbitsize < rnbitsize) { rhs = fold_convert (lntype, rhs); lr_mask = fold_convert (lntype, lr_mask); type = lntype; } } if (! all_ones_mask_p (ll_mask, ll_bitsize + rl_bitsize)) lhs = build2 (BIT_AND_EXPR, type, lhs, ll_mask); if (! all_ones_mask_p (lr_mask, lr_bitsize + rr_bitsize)) rhs = build2 (BIT_AND_EXPR, type, rhs, lr_mask); return build2 (wanted_code, truth_type, lhs, rhs); } return 0; } /* Handle the case of comparisons with constants. If there is something in common between the masks, those bits of the constants must be the same. If not, the condition is always false. Test for this to avoid generating incorrect code below. */ result = const_binop (BIT_AND_EXPR, ll_mask, rl_mask, 0); if (! integer_zerop (result) && simple_cst_equal (const_binop (BIT_AND_EXPR, result, l_const, 0), const_binop (BIT_AND_EXPR, result, r_const, 0)) != 1) { if (wanted_code == NE_EXPR) { warning ("`or' of unmatched not-equal tests is always 1"); return constant_boolean_node (true, truth_type); } else { warning ("`and' of mutually exclusive equal-tests is always 0"); return constant_boolean_node (false, truth_type); } } /* Construct the expression we will return. First get the component reference we will make. Unless the mask is all ones the width of that field, perform the mask operation. Then compare with the merged constant. */ result = make_bit_field_ref (ll_inner, lntype, lnbitsize, lnbitpos, ll_unsignedp || rl_unsignedp); ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0); if (! all_ones_mask_p (ll_mask, lnbitsize)) result = build2 (BIT_AND_EXPR, lntype, result, ll_mask); return build2 (wanted_code, truth_type, result, const_binop (BIT_IOR_EXPR, l_const, r_const, 0)); } /* Optimize T, which is a comparison of a MIN_EXPR or MAX_EXPR with a constant. */ static tree optimize_minmax_comparison (tree t) { tree type = TREE_TYPE (t); tree arg0 = TREE_OPERAND (t, 0); enum tree_code op_code; tree comp_const = TREE_OPERAND (t, 1); tree minmax_const; int consts_equal, consts_lt; tree inner; STRIP_SIGN_NOPS (arg0); op_code = TREE_CODE (arg0); minmax_const = TREE_OPERAND (arg0, 1); consts_equal = tree_int_cst_equal (minmax_const, comp_const); consts_lt = tree_int_cst_lt (minmax_const, comp_const); inner = TREE_OPERAND (arg0, 0); /* If something does not permit us to optimize, return the original tree. */ if ((op_code != MIN_EXPR && op_code != MAX_EXPR) || TREE_CODE (comp_const) != INTEGER_CST || TREE_CONSTANT_OVERFLOW (comp_const) || TREE_CODE (minmax_const) != INTEGER_CST || TREE_CONSTANT_OVERFLOW (minmax_const)) return t; /* Now handle all the various comparison codes. We only handle EQ_EXPR and GT_EXPR, doing the rest with recursive calls using logical simplifications. */ switch (TREE_CODE (t)) { case NE_EXPR: case LT_EXPR: case LE_EXPR: return invert_truthvalue (optimize_minmax_comparison (invert_truthvalue (t))); case GE_EXPR: return fold (build2 (TRUTH_ORIF_EXPR, type, optimize_minmax_comparison (build2 (EQ_EXPR, type, arg0, comp_const)), optimize_minmax_comparison (build2 (GT_EXPR, type, arg0, comp_const)))); case EQ_EXPR: if (op_code == MAX_EXPR && consts_equal) /* MAX (X, 0) == 0 -> X <= 0 */ return fold (build2 (LE_EXPR, type, inner, comp_const)); else if (op_code == MAX_EXPR && consts_lt) /* MAX (X, 0) == 5 -> X == 5 */ return fold (build2 (EQ_EXPR, type, inner, comp_const)); else if (op_code == MAX_EXPR) /* MAX (X, 0) == -1 -> false */ return omit_one_operand (type, integer_zero_node, inner); else if (consts_equal) /* MIN (X, 0) == 0 -> X >= 0 */ return fold (build2 (GE_EXPR, type, inner, comp_const)); else if (consts_lt) /* MIN (X, 0) == 5 -> false */ return omit_one_operand (type, integer_zero_node, inner); else /* MIN (X, 0) == -1 -> X == -1 */ return fold (build2 (EQ_EXPR, type, inner, comp_const)); case GT_EXPR: if (op_code == MAX_EXPR && (consts_equal || consts_lt)) /* MAX (X, 0) > 0 -> X > 0 MAX (X, 0) > 5 -> X > 5 */ return fold (build2 (GT_EXPR, type, inner, comp_const)); else if (op_code == MAX_EXPR) /* MAX (X, 0) > -1 -> true */ return omit_one_operand (type, integer_one_node, inner); else if (op_code == MIN_EXPR && (consts_equal || consts_lt)) /* MIN (X, 0) > 0 -> false MIN (X, 0) > 5 -> false */ return omit_one_operand (type, integer_zero_node, inner); else /* MIN (X, 0) > -1 -> X > -1 */ return fold (build2 (GT_EXPR, type, inner, comp_const)); default: return t; } } /* T is an integer expression that is being multiplied, divided, or taken a modulus (CODE says which and what kind of divide or modulus) by a constant C. See if we can eliminate that operation by folding it with other operations already in T. WIDE_TYPE, if non-null, is a type that should be used for the computation if wider than our type. For example, if we are dividing (X * 8) + (Y * 16) by 4, we can return (X * 2) + (Y * 4). We must, however, be assured that either the original expression would not overflow or that overflow is undefined for the type in the language in question. We also canonicalize (X + 7) * 4 into X * 4 + 28 in the hope that either the machine has a multiply-accumulate insn or that this is part of an addressing calculation. If we return a non-null expression, it is an equivalent form of the original computation, but need not be in the original type. */ static tree extract_muldiv (tree t, tree c, enum tree_code code, tree wide_type) { /* To avoid exponential search depth, refuse to allow recursion past three levels. Beyond that (1) it's highly unlikely that we'll find something interesting and (2) we've probably processed it before when we built the inner expression. */ static int depth; tree ret; if (depth > 3) return NULL; depth++; ret = extract_muldiv_1 (t, c, code, wide_type); depth--; return ret; } static tree extract_muldiv_1 (tree t, tree c, enum tree_code code, tree wide_type) { tree type = TREE_TYPE (t); enum tree_code tcode = TREE_CODE (t); tree ctype = (wide_type != 0 && (GET_MODE_SIZE (TYPE_MODE (wide_type)) > GET_MODE_SIZE (TYPE_MODE (type))) ? wide_type : type); tree t1, t2; int same_p = tcode == code; tree op0 = NULL_TREE, op1 = NULL_TREE; /* Don't deal with constants of zero here; they confuse the code below. */ if (integer_zerop (c)) return NULL_TREE; if (TREE_CODE_CLASS (tcode) == '1') op0 = TREE_OPERAND (t, 0); if (TREE_CODE_CLASS (tcode) == '2') op0 = TREE_OPERAND (t, 0), op1 = TREE_OPERAND (t, 1); /* Note that we need not handle conditional operations here since fold already handles those cases. So just do arithmetic here. */ switch (tcode) { case INTEGER_CST: /* For a constant, we can always simplify if we are a multiply or (for divide and modulus) if it is a multiple of our constant. */ if (code == MULT_EXPR || integer_zerop (const_binop (TRUNC_MOD_EXPR, t, c, 0))) return const_binop (code, fold_convert (ctype, t), fold_convert (ctype, c), 0); break; case CONVERT_EXPR: case NON_LVALUE_EXPR: case NOP_EXPR: /* If op0 is an expression ... */ if ((TREE_CODE_CLASS (TREE_CODE (op0)) == '<' || TREE_CODE_CLASS (TREE_CODE (op0)) == '1' || TREE_CODE_CLASS (TREE_CODE (op0)) == '2' || TREE_CODE_CLASS (TREE_CODE (op0)) == 'e') /* ... and is unsigned, and its type is smaller than ctype, then we cannot pass through as widening. */ && ((TYPE_UNSIGNED (TREE_TYPE (op0)) && ! (TREE_CODE (TREE_TYPE (op0)) == INTEGER_TYPE && TYPE_IS_SIZETYPE (TREE_TYPE (op0))) && (GET_MODE_SIZE (TYPE_MODE (ctype)) > GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (op0))))) /* ... or its type is larger than ctype, then we cannot pass through this truncation. */ || (GET_MODE_SIZE (TYPE_MODE (ctype)) < GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (op0)))) /* ... or signedness changes for division or modulus, then we cannot pass through this conversion. */ || (code != MULT_EXPR && (TYPE_UNSIGNED (ctype) != TYPE_UNSIGNED (TREE_TYPE (op0)))))) break; /* Pass the constant down and see if we can make a simplification. If we can, replace this expression with the inner simplification for possible later conversion to our or some other type. */ if ((t2 = fold_convert (TREE_TYPE (op0), c)) != 0 && TREE_CODE (t2) == INTEGER_CST && ! TREE_CONSTANT_OVERFLOW (t2) && (0 != (t1 = extract_muldiv (op0, t2, code, code == MULT_EXPR ? ctype : NULL_TREE)))) return t1; break; case NEGATE_EXPR: case ABS_EXPR: if ((t1 = extract_muldiv (op0, c, code, wide_type)) != 0) return fold (build1 (tcode, ctype, fold_convert (ctype, t1))); break; case MIN_EXPR: case MAX_EXPR: /* If widening the type changes the signedness, then we can't perform this optimization as that changes the result. */ if (TYPE_UNSIGNED (ctype) != TYPE_UNSIGNED (type)) break; /* MIN (a, b) / 5 -> MIN (a / 5, b / 5) */ if ((t1 = extract_muldiv (op0, c, code, wide_type)) != 0 && (t2 = extract_muldiv (op1, c, code, wide_type)) != 0) { if (tree_int_cst_sgn (c) < 0) tcode = (tcode == MIN_EXPR ? MAX_EXPR : MIN_EXPR); return fold (build2 (tcode, ctype, fold_convert (ctype, t1), fold_convert (ctype, t2))); } break; case LSHIFT_EXPR: case RSHIFT_EXPR: /* If the second operand is constant, this is a multiplication or floor division, by a power of two, so we can treat it that way unless the multiplier or divisor overflows. */ if (TREE_CODE (op1) == INTEGER_CST /* const_binop may not detect overflow correctly, so check for it explicitly here. */ && TYPE_PRECISION (TREE_TYPE (size_one_node)) > TREE_INT_CST_LOW (op1) && TREE_INT_CST_HIGH (op1) == 0 && 0 != (t1 = fold_convert (ctype, const_binop (LSHIFT_EXPR, size_one_node, op1, 0))) && ! TREE_OVERFLOW (t1)) return extract_muldiv (build2 (tcode == LSHIFT_EXPR ? MULT_EXPR : FLOOR_DIV_EXPR, ctype, fold_convert (ctype, op0), t1), c, code, wide_type); break; case PLUS_EXPR: case MINUS_EXPR: /* See if we can eliminate the operation on both sides. If we can, we can return a new PLUS or MINUS. If we can't, the only remaining cases where we can do anything are if the second operand is a constant. */ t1 = extract_muldiv (op0, c, code, wide_type); t2 = extract_muldiv (op1, c, code, wide_type); if (t1 != 0 && t2 != 0 && (code == MULT_EXPR /* If not multiplication, we can only do this if both operands are divisible by c. */ || (multiple_of_p (ctype, op0, c) && multiple_of_p (ctype, op1, c)))) return fold (build2 (tcode, ctype, fold_convert (ctype, t1), fold_convert (ctype, t2))); /* If this was a subtraction, negate OP1 and set it to be an addition. This simplifies the logic below. */ if (tcode == MINUS_EXPR) tcode = PLUS_EXPR, op1 = negate_expr (op1); if (TREE_CODE (op1) != INTEGER_CST) break; /* If either OP1 or C are negative, this optimization is not safe for some of the division and remainder types while for others we need to change the code. */ if (tree_int_cst_sgn (op1) < 0 || tree_int_cst_sgn (c) < 0) { if (code == CEIL_DIV_EXPR) code = FLOOR_DIV_EXPR; else if (code == FLOOR_DIV_EXPR) code = CEIL_DIV_EXPR; else if (code != MULT_EXPR && code != CEIL_MOD_EXPR && code != FLOOR_MOD_EXPR) break; } /* If it's a multiply or a division/modulus operation of a multiple of our constant, do the operation and verify it doesn't overflow. */ if (code == MULT_EXPR || integer_zerop (const_binop (TRUNC_MOD_EXPR, op1, c, 0))) { op1 = const_binop (code, fold_convert (ctype, op1), fold_convert (ctype, c), 0); /* We allow the constant to overflow with wrapping semantics. */ if (op1 == 0 || (TREE_OVERFLOW (op1) && ! flag_wrapv)) break; } else break; /* If we have an unsigned type is not a sizetype, we cannot widen the operation since it will change the result if the original computation overflowed. */ if (TYPE_UNSIGNED (ctype) && ! (TREE_CODE (ctype) == INTEGER_TYPE && TYPE_IS_SIZETYPE (ctype)) && ctype != type) break; /* If we were able to eliminate our operation from the first side, apply our operation to the second side and reform the PLUS. */ if (t1 != 0 && (TREE_CODE (t1) != code || code == MULT_EXPR)) return fold (build2 (tcode, ctype, fold_convert (ctype, t1), op1)); /* The last case is if we are a multiply. In that case, we can apply the distributive law to commute the multiply and addition if the multiplication of the constants doesn't overflow. */ if (code == MULT_EXPR) return fold (build2 (tcode, ctype, fold (build2 (code, ctype, fold_convert (ctype, op0), fold_convert (ctype, c))), op1)); break; case MULT_EXPR: /* We have a special case here if we are doing something like (C * 8) % 4 since we know that's zero. */ if ((code == TRUNC_MOD_EXPR || code == CEIL_MOD_EXPR || code == FLOOR_MOD_EXPR || code == ROUND_MOD_EXPR) && TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST && integer_zerop (const_binop (TRUNC_MOD_EXPR, op1, c, 0))) return omit_one_operand (type, integer_zero_node, op0); /* ... fall through ... */ case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case EXACT_DIV_EXPR: /* If we can extract our operation from the LHS, do so and return a new operation. Likewise for the RHS from a MULT_EXPR. Otherwise, do something only if the second operand is a constant. */ if (same_p && (t1 = extract_muldiv (op0, c, code, wide_type)) != 0) return fold (build2 (tcode, ctype, fold_convert (ctype, t1), fold_convert (ctype, op1))); else if (tcode == MULT_EXPR && code == MULT_EXPR && (t1 = extract_muldiv (op1, c, code, wide_type)) != 0) return fold (build2 (tcode, ctype, fold_convert (ctype, op0), fold_convert (ctype, t1))); else if (TREE_CODE (op1) != INTEGER_CST) return 0; /* If these are the same operation types, we can associate them assuming no overflow. */ if (tcode == code && 0 != (t1 = const_binop (MULT_EXPR, fold_convert (ctype, op1), fold_convert (ctype, c), 0)) && ! TREE_OVERFLOW (t1)) return fold (build2 (tcode, ctype, fold_convert (ctype, op0), t1)); /* If these operations "cancel" each other, we have the main optimizations of this pass, which occur when either constant is a multiple of the other, in which case we replace this with either an operation or CODE or TCODE. If we have an unsigned type that is not a sizetype, we cannot do this since it will change the result if the original computation overflowed. */ if ((! TYPE_UNSIGNED (ctype) || (TREE_CODE (ctype) == INTEGER_TYPE && TYPE_IS_SIZETYPE (ctype))) && ! flag_wrapv && ((code == MULT_EXPR && tcode == EXACT_DIV_EXPR) || (tcode == MULT_EXPR && code != TRUNC_MOD_EXPR && code != CEIL_MOD_EXPR && code != FLOOR_MOD_EXPR && code != ROUND_MOD_EXPR))) { if (integer_zerop (const_binop (TRUNC_MOD_EXPR, op1, c, 0))) return fold (build2 (tcode, ctype, fold_convert (ctype, op0), fold_convert (ctype, const_binop (TRUNC_DIV_EXPR, op1, c, 0)))); else if (integer_zerop (const_binop (TRUNC_MOD_EXPR, c, op1, 0))) return fold (build2 (code, ctype, fold_convert (ctype, op0), fold_convert (ctype, const_binop (TRUNC_DIV_EXPR, c, op1, 0)))); } break; default: break; } return 0; } /* Return a node which has the indicated constant VALUE (either 0 or 1), and is of the indicated TYPE. */ static tree constant_boolean_node (int value, tree type) { if (type == integer_type_node) return value ? integer_one_node : integer_zero_node; else if (type == boolean_type_node) return value ? boolean_true_node : boolean_false_node; else if (TREE_CODE (type) == BOOLEAN_TYPE) return lang_hooks.truthvalue_conversion (value ? integer_one_node : integer_zero_node); else { tree t = build_int_2 (value, 0); TREE_TYPE (t) = type; return t; } } /* Transform `a + (b ? x : y)' into `b ? (a + x) : (a + y)'. Transform, `a + (x < y)' into `(x < y) ? (a + 1) : (a + 0)'. Here CODE corresponds to the `+', COND to the `(b ? x : y)' or `(x < y)' expression, and ARG to `a'. If COND_FIRST_P is nonzero, then the COND is the first argument to CODE; otherwise (as in the example given here), it is the second argument. TYPE is the type of the original expression. Return NULL_TREE if no simplification is possible. */ static tree fold_binary_op_with_conditional_arg (enum tree_code code, tree type, tree cond, tree arg, int cond_first_p) { tree test, true_value, false_value; tree lhs = NULL_TREE; tree rhs = NULL_TREE; /* This transformation is only worthwhile if we don't have to wrap arg in a SAVE_EXPR, and the operation can be simplified on atleast one of the branches once its pushed inside the COND_EXPR. */ if (!TREE_CONSTANT (arg)) return NULL_TREE; if (TREE_CODE (cond) == COND_EXPR) { test = TREE_OPERAND (cond, 0); true_value = TREE_OPERAND (cond, 1); false_value = TREE_OPERAND (cond, 2); /* If this operand throws an expression, then it does not make sense to try to perform a logical or arithmetic operation involving it. */ if (VOID_TYPE_P (TREE_TYPE (true_value))) lhs = true_value; if (VOID_TYPE_P (TREE_TYPE (false_value))) rhs = false_value; } else { tree testtype = TREE_TYPE (cond); test = cond; true_value = constant_boolean_node (true, testtype); false_value = constant_boolean_node (false, testtype); } if (lhs == 0) lhs = fold (cond_first_p ? build2 (code, type, true_value, arg) : build2 (code, type, arg, true_value)); if (rhs == 0) rhs = fold (cond_first_p ? build2 (code, type, false_value, arg) : build2 (code, type, arg, false_value)); test = fold (build3 (COND_EXPR, type, test, lhs, rhs)); return fold_convert (type, test); } /* Subroutine of fold() that checks for the addition of +/- 0.0. If !NEGATE, return true if ADDEND is +/-0.0 and, for all X of type TYPE, X + ADDEND is the same as X. If NEGATE, return true if X - ADDEND is the same as X. X + 0 and X - 0 both give X when X is NaN, infinite, or nonzero and finite. The problematic cases are when X is zero, and its mode has signed zeros. In the case of rounding towards -infinity, X - 0 is not the same as X because 0 - 0 is -0. In other rounding modes, X + 0 is not the same as X because -0 + 0 is 0. */ static bool fold_real_zero_addition_p (tree type, tree addend, int negate) { if (!real_zerop (addend)) return false; /* Don't allow the fold with -fsignaling-nans. */ if (HONOR_SNANS (TYPE_MODE (type))) return false; /* Allow the fold if zeros aren't signed, or their sign isn't important. */ if (!HONOR_SIGNED_ZEROS (TYPE_MODE (type))) return true; /* Treat x + -0 as x - 0 and x - -0 as x + 0. */ if (TREE_CODE (addend) == REAL_CST && REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (addend))) negate = !negate; /* The mode has signed zeros, and we have to honor their sign. In this situation, there is only one case we can return true for. X - 0 is the same as X unless rounding towards -infinity is supported. */ return negate && !HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type)); } /* Subroutine of fold() that checks comparisons of built-in math functions against real constants. FCODE is the DECL_FUNCTION_CODE of the built-in, CODE is the comparison operator: EQ_EXPR, NE_EXPR, GT_EXPR, LT_EXPR, GE_EXPR or LE_EXPR. TYPE is the type of the result and ARG0 and ARG1 are the operands of the comparison. ARG1 must be a TREE_REAL_CST. The function returns the constant folded tree if a simplification can be made, and NULL_TREE otherwise. */ static tree fold_mathfn_compare (enum built_in_function fcode, enum tree_code code, tree type, tree arg0, tree arg1) { REAL_VALUE_TYPE c; if (BUILTIN_SQRT_P (fcode)) { tree arg = TREE_VALUE (TREE_OPERAND (arg0, 1)); enum machine_mode mode = TYPE_MODE (TREE_TYPE (arg0)); c = TREE_REAL_CST (arg1); if (REAL_VALUE_NEGATIVE (c)) { /* sqrt(x) < y is always false, if y is negative. */ if (code == EQ_EXPR || code == LT_EXPR || code == LE_EXPR) return omit_one_operand (type, integer_zero_node, arg); /* sqrt(x) > y is always true, if y is negative and we don't care about NaNs, i.e. negative values of x. */ if (code == NE_EXPR || !HONOR_NANS (mode)) return omit_one_operand (type, integer_one_node, arg); /* sqrt(x) > y is the same as x >= 0, if y is negative. */ return fold (build2 (GE_EXPR, type, arg, build_real (TREE_TYPE (arg), dconst0))); } else if (code == GT_EXPR || code == GE_EXPR) { REAL_VALUE_TYPE c2; REAL_ARITHMETIC (c2, MULT_EXPR, c, c); real_convert (&c2, mode, &c2); if (REAL_VALUE_ISINF (c2)) { /* sqrt(x) > y is x == +Inf, when y is very large. */ if (HONOR_INFINITIES (mode)) return fold (build2 (EQ_EXPR, type, arg, build_real (TREE_TYPE (arg), c2))); /* sqrt(x) > y is always false, when y is very large and we don't care about infinities. */ return omit_one_operand (type, integer_zero_node, arg); } /* sqrt(x) > c is the same as x > c*c. */ return fold (build2 (code, type, arg, build_real (TREE_TYPE (arg), c2))); } else if (code == LT_EXPR || code == LE_EXPR) { REAL_VALUE_TYPE c2; REAL_ARITHMETIC (c2, MULT_EXPR, c, c); real_convert (&c2, mode, &c2); if (REAL_VALUE_ISINF (c2)) { /* sqrt(x) < y is always true, when y is a very large value and we don't care about NaNs or Infinities. */ if (! HONOR_NANS (mode) && ! HONOR_INFINITIES (mode)) return omit_one_operand (type, integer_one_node, arg); /* sqrt(x) < y is x != +Inf when y is very large and we don't care about NaNs. */ if (! HONOR_NANS (mode)) return fold (build2 (NE_EXPR, type, arg, build_real (TREE_TYPE (arg), c2))); /* sqrt(x) < y is x >= 0 when y is very large and we don't care about Infinities. */ if (! HONOR_INFINITIES (mode)) return fold (build2 (GE_EXPR, type, arg, build_real (TREE_TYPE (arg), dconst0))); /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large. */ if (lang_hooks.decls.global_bindings_p () != 0 || CONTAINS_PLACEHOLDER_P (arg)) return NULL_TREE; arg = save_expr (arg); return fold (build2 (TRUTH_ANDIF_EXPR, type, fold (build2 (GE_EXPR, type, arg, build_real (TREE_TYPE (arg), dconst0))), fold (build2 (NE_EXPR, type, arg, build_real (TREE_TYPE (arg), c2))))); } /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs. */ if (! HONOR_NANS (mode)) return fold (build2 (code, type, arg, build_real (TREE_TYPE (arg), c2))); /* sqrt(x) < c is the same as x >= 0 && x < c*c. */ if (lang_hooks.decls.global_bindings_p () == 0 && ! CONTAINS_PLACEHOLDER_P (arg)) { arg = save_expr (arg); return fold (build2 (TRUTH_ANDIF_EXPR, type, fold (build2 (GE_EXPR, type, arg, build_real (TREE_TYPE (arg), dconst0))), fold (build2 (code, type, arg, build_real (TREE_TYPE (arg), c2))))); } } } return NULL_TREE; } /* Subroutine of fold() that optimizes comparisons against Infinities, either +Inf or -Inf. CODE is the comparison operator: EQ_EXPR, NE_EXPR, GT_EXPR, LT_EXPR, GE_EXPR or LE_EXPR. TYPE is the type of the result and ARG0 and ARG1 are the operands of the comparison. ARG1 must be a TREE_REAL_CST. The function returns the constant folded tree if a simplification can be made, and NULL_TREE otherwise. */ static tree fold_inf_compare (enum tree_code code, tree type, tree arg0, tree arg1) { enum machine_mode mode; REAL_VALUE_TYPE max; tree temp; bool neg; mode = TYPE_MODE (TREE_TYPE (arg0)); /* For negative infinity swap the sense of the comparison. */ neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (arg1)); if (neg) code = swap_tree_comparison (code); switch (code) { case GT_EXPR: /* x > +Inf is always false, if with ignore sNANs. */ if (HONOR_SNANS (mode)) return NULL_TREE; return omit_one_operand (type, integer_zero_node, arg0); case LE_EXPR: /* x <= +Inf is always true, if we don't case about NaNs. */ if (! HONOR_NANS (mode)) return omit_one_operand (type, integer_one_node, arg0); /* x <= +Inf is the same as x == x, i.e. isfinite(x). */ if (lang_hooks.decls.global_bindings_p () == 0 && ! CONTAINS_PLACEHOLDER_P (arg0)) { arg0 = save_expr (arg0); return fold (build2 (EQ_EXPR, type, arg0, arg0)); } break; case EQ_EXPR: case GE_EXPR: /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX. */ real_maxval (&max, neg, mode); return fold (build2 (neg ? LT_EXPR : GT_EXPR, type, arg0, build_real (TREE_TYPE (arg0), max))); case LT_EXPR: /* x < +Inf is always equal to x <= DBL_MAX. */ real_maxval (&max, neg, mode); return fold (build2 (neg ? GE_EXPR : LE_EXPR, type, arg0, build_real (TREE_TYPE (arg0), max))); case NE_EXPR: /* x != +Inf is always equal to !(x > DBL_MAX). */ real_maxval (&max, neg, mode); if (! HONOR_NANS (mode)) return fold (build2 (neg ? GE_EXPR : LE_EXPR, type, arg0, build_real (TREE_TYPE (arg0), max))); /* The transformation below creates non-gimple code and thus is not appropriate if we are in gimple form. */ if (in_gimple_form) return NULL_TREE; temp = fold (build2 (neg ? LT_EXPR : GT_EXPR, type, arg0, build_real (TREE_TYPE (arg0), max))); return fold (build1 (TRUTH_NOT_EXPR, type, temp)); default: break; } return NULL_TREE; } /* Subroutine of fold() that optimizes comparisons of a division by a nonzero integer constant against an integer constant, i.e. X/C1 op C2. CODE is the comparison operator: EQ_EXPR, NE_EXPR, GT_EXPR, LT_EXPR, GE_EXPR or LE_EXPR. TYPE is the type of the result and ARG0 and ARG1 are the operands of the comparison. ARG1 must be a TREE_REAL_CST. The function returns the constant folded tree if a simplification can be made, and NULL_TREE otherwise. */ static tree fold_div_compare (enum tree_code code, tree type, tree arg0, tree arg1) { tree prod, tmp, hi, lo; tree arg00 = TREE_OPERAND (arg0, 0); tree arg01 = TREE_OPERAND (arg0, 1); unsigned HOST_WIDE_INT lpart; HOST_WIDE_INT hpart; int overflow; /* We have to do this the hard way to detect unsigned overflow. prod = int_const_binop (MULT_EXPR, arg01, arg1, 0); */ overflow = mul_double (TREE_INT_CST_LOW (arg01), TREE_INT_CST_HIGH (arg01), TREE_INT_CST_LOW (arg1), TREE_INT_CST_HIGH (arg1), &lpart, &hpart); prod = build_int_2 (lpart, hpart); TREE_TYPE (prod) = TREE_TYPE (arg00); TREE_OVERFLOW (prod) = force_fit_type (prod, overflow) || TREE_INT_CST_HIGH (prod) != hpart || TREE_INT_CST_LOW (prod) != lpart; TREE_CONSTANT_OVERFLOW (prod) = TREE_OVERFLOW (prod); if (TYPE_UNSIGNED (TREE_TYPE (arg0))) { tmp = int_const_binop (MINUS_EXPR, arg01, integer_one_node, 0); lo = prod; /* Likewise hi = int_const_binop (PLUS_EXPR, prod, tmp, 0). */ overflow = add_double (TREE_INT_CST_LOW (prod), TREE_INT_CST_HIGH (prod), TREE_INT_CST_LOW (tmp), TREE_INT_CST_HIGH (tmp), &lpart, &hpart); hi = build_int_2 (lpart, hpart); TREE_TYPE (hi) = TREE_TYPE (arg00); TREE_OVERFLOW (hi) = force_fit_type (hi, overflow) || TREE_INT_CST_HIGH (hi) != hpart || TREE_INT_CST_LOW (hi) != lpart || TREE_OVERFLOW (prod); TREE_CONSTANT_OVERFLOW (hi) = TREE_OVERFLOW (hi); } else if (tree_int_cst_sgn (arg01) >= 0) { tmp = int_const_binop (MINUS_EXPR, arg01, integer_one_node, 0); switch (tree_int_cst_sgn (arg1)) { case -1: lo = int_const_binop (MINUS_EXPR, prod, tmp, 0); hi = prod; break; case 0: lo = fold_negate_const (tmp, TREE_TYPE (arg0)); hi = tmp; break; case 1: hi = int_const_binop (PLUS_EXPR, prod, tmp, 0); lo = prod; break; default: abort (); } } else { tmp = int_const_binop (PLUS_EXPR, arg01, integer_one_node, 0); switch (tree_int_cst_sgn (arg1)) { case -1: hi = int_const_binop (MINUS_EXPR, prod, tmp, 0); lo = prod; break; case 0: hi = fold_negate_const (tmp, TREE_TYPE (arg0)); lo = tmp; break; case 1: lo = int_const_binop (PLUS_EXPR, prod, tmp, 0); hi = prod; break; default: abort (); } } switch (code) { case EQ_EXPR: if (TREE_OVERFLOW (lo) && TREE_OVERFLOW (hi)) return omit_one_operand (type, integer_zero_node, arg00); if (TREE_OVERFLOW (hi)) return fold (build2 (GE_EXPR, type, arg00, lo)); if (TREE_OVERFLOW (lo)) return fold (build2 (LE_EXPR, type, arg00, hi)); return build_range_check (type, arg00, 1, lo, hi); case NE_EXPR: if (TREE_OVERFLOW (lo) && TREE_OVERFLOW (hi)) return omit_one_operand (type, integer_one_node, arg00); if (TREE_OVERFLOW (hi)) return fold (build2 (LT_EXPR, type, arg00, lo)); if (TREE_OVERFLOW (lo)) return fold (build2 (GT_EXPR, type, arg00, hi)); return build_range_check (type, arg00, 0, lo, hi); case LT_EXPR: if (TREE_OVERFLOW (lo)) return omit_one_operand (type, integer_zero_node, arg00); return fold (build2 (LT_EXPR, type, arg00, lo)); case LE_EXPR: if (TREE_OVERFLOW (hi)) return omit_one_operand (type, integer_one_node, arg00); return fold (build2 (LE_EXPR, type, arg00, hi)); case GT_EXPR: if (TREE_OVERFLOW (hi)) return omit_one_operand (type, integer_zero_node, arg00); return fold (build2 (GT_EXPR, type, arg00, hi)); case GE_EXPR: if (TREE_OVERFLOW (lo)) return omit_one_operand (type, integer_one_node, arg00); return fold (build2 (GE_EXPR, type, arg00, lo)); default: break; } return NULL_TREE; } /* If CODE with arguments ARG0 and ARG1 represents a single bit equality/inequality test, then return a simplified form of the test using shifts and logical operations. Otherwise return NULL. TYPE is the desired result type. */ tree fold_single_bit_test (enum tree_code code, tree arg0, tree arg1, tree result_type) { /* If this is a TRUTH_NOT_EXPR, it may have a single bit test inside operand 0. */ if (code == TRUTH_NOT_EXPR) { code = TREE_CODE (arg0); if (code != NE_EXPR && code != EQ_EXPR) return NULL_TREE; /* Extract the arguments of the EQ/NE. */ arg1 = TREE_OPERAND (arg0, 1); arg0 = TREE_OPERAND (arg0, 0); /* This requires us to invert the code. */ code = (code == EQ_EXPR ? NE_EXPR : EQ_EXPR); } /* If this is testing a single bit, we can optimize the test. */ if ((code == NE_EXPR || code == EQ_EXPR) && TREE_CODE (arg0) == BIT_AND_EXPR && integer_zerop (arg1) && integer_pow2p (TREE_OPERAND (arg0, 1))) { tree inner = TREE_OPERAND (arg0, 0); tree type = TREE_TYPE (arg0); int bitnum = tree_log2 (TREE_OPERAND (arg0, 1)); enum machine_mode operand_mode = TYPE_MODE (type); int ops_unsigned; tree signed_type, unsigned_type, intermediate_type; tree arg00; /* If we have (A & C) != 0 where C is the sign bit of A, convert this into A < 0. Similarly for (A & C) == 0 into A >= 0. */ arg00 = sign_bit_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg0, 1)); if (arg00 != NULL_TREE /* This is only a win if casting to a signed type is cheap, i.e. when arg00's type is not a partial mode. */ && TYPE_PRECISION (TREE_TYPE (arg00)) == GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (arg00)))) { tree stype = lang_hooks.types.signed_type (TREE_TYPE (arg00)); return fold (build2 (code == EQ_EXPR ? GE_EXPR : LT_EXPR, result_type, fold_convert (stype, arg00), fold_convert (stype, integer_zero_node))); } /* Otherwise we have (A & C) != 0 where C is a single bit, convert that into ((A >> C2) & 1). Where C2 = log2(C). Similarly for (A & C) == 0. */ /* If INNER is a right shift of a constant and it plus BITNUM does not overflow, adjust BITNUM and INNER. */ if (TREE_CODE (inner) == RSHIFT_EXPR && TREE_CODE (TREE_OPERAND (inner, 1)) == INTEGER_CST && TREE_INT_CST_HIGH (TREE_OPERAND (inner, 1)) == 0 && bitnum < TYPE_PRECISION (type) && 0 > compare_tree_int (TREE_OPERAND (inner, 1), bitnum - TYPE_PRECISION (type))) { bitnum += TREE_INT_CST_LOW (TREE_OPERAND (inner, 1)); inner = TREE_OPERAND (inner, 0); } /* If we are going to be able to omit the AND below, we must do our operations as unsigned. If we must use the AND, we have a choice. Normally unsigned is faster, but for some machines signed is. */ #ifdef LOAD_EXTEND_OP ops_unsigned = (LOAD_EXTEND_OP (operand_mode) == SIGN_EXTEND ? 0 : 1); #else ops_unsigned = 1; #endif signed_type = lang_hooks.types.type_for_mode (operand_mode, 0); unsigned_type = lang_hooks.types.type_for_mode (operand_mode, 1); intermediate_type = ops_unsigned ? unsigned_type : signed_type; inner = fold_convert (intermediate_type, inner); if (bitnum != 0) inner = build2 (RSHIFT_EXPR, intermediate_type, inner, size_int (bitnum)); if (code == EQ_EXPR) inner = build2 (BIT_XOR_EXPR, intermediate_type, inner, integer_one_node); /* Put the AND last so it can combine with more things. */ inner = build2 (BIT_AND_EXPR, intermediate_type, inner, integer_one_node); /* Make sure to return the proper type. */ inner = fold_convert (result_type, inner); return inner; } return NULL_TREE; } /* Check whether we are allowed to reorder operands arg0 and arg1, such that the evaluation of arg1 occurs before arg0. */ static bool reorder_operands_p (tree arg0, tree arg1) { if (! flag_evaluation_order) return true; if (TREE_CONSTANT (arg0) || TREE_CONSTANT (arg1)) return true; return ! TREE_SIDE_EFFECTS (arg0) && ! TREE_SIDE_EFFECTS (arg1); } /* Test whether it is preferable two swap two operands, ARG0 and ARG1, for example because ARG0 is an integer constant and ARG1 isn't. If REORDER is true, only recommend swapping if we can evaluate the operands in reverse order. */ bool tree_swap_operands_p (tree arg0, tree arg1, bool reorder) { STRIP_SIGN_NOPS (arg0); STRIP_SIGN_NOPS (arg1); if (TREE_CODE (arg1) == INTEGER_CST) return 0; if (TREE_CODE (arg0) == INTEGER_CST) return 1; if (TREE_CODE (arg1) == REAL_CST) return 0; if (TREE_CODE (arg0) == REAL_CST) return 1; if (TREE_CODE (arg1) == COMPLEX_CST) return 0; if (TREE_CODE (arg0) == COMPLEX_CST) return 1; if (TREE_CONSTANT (arg1)) return 0; if (TREE_CONSTANT (arg0)) return 1; if (optimize_size) return 0; if (reorder && flag_evaluation_order && (TREE_SIDE_EFFECTS (arg0) || TREE_SIDE_EFFECTS (arg1))) return 0; if (DECL_P (arg1)) return 0; if (DECL_P (arg0)) return 1; if (reorder && flag_evaluation_order && (TREE_SIDE_EFFECTS (arg0) || TREE_SIDE_EFFECTS (arg1))) return 0; if (DECL_P (arg1)) return 0; if (DECL_P (arg0)) return 1; /* It is preferable to swap two SSA_NAME to ensure a canonical form for commutative and comparison operators. Ensuring a canonical form allows the optimizers to find additional redundancies without having to explicitly check for both orderings. */ if (TREE_CODE (arg0) == SSA_NAME && TREE_CODE (arg1) == SSA_NAME && SSA_NAME_VERSION (arg0) > SSA_NAME_VERSION (arg1)) return 1; return 0; } /* Perform constant folding and related simplification of EXPR. The related simplifications include x*1 => x, x*0 => 0, etc., and application of the associative law. NOP_EXPR conversions may be removed freely (as long as we are careful not to change the type of the overall expression). We cannot simplify through a CONVERT_EXPR, FIX_EXPR or FLOAT_EXPR, but we can constant-fold them if they have constant operands. */ #ifdef ENABLE_FOLD_CHECKING # define fold(x) fold_1 (x) static tree fold_1 (tree); static #endif tree fold (tree expr) { const tree t = expr; const tree type = TREE_TYPE (expr); tree t1 = NULL_TREE; tree tem; tree arg0 = NULL_TREE, arg1 = NULL_TREE; enum tree_code code = TREE_CODE (t); int kind = TREE_CODE_CLASS (code); /* WINS will be nonzero when the switch is done if all operands are constant. */ int wins = 1; /* Return right away if a constant. */ if (kind == 'c') return t; if (code == NOP_EXPR || code == FLOAT_EXPR || code == CONVERT_EXPR) { tree subop; /* Special case for conversion ops that can have fixed point args. */ arg0 = TREE_OPERAND (t, 0); /* Don't use STRIP_NOPS, because signedness of argument type matters. */ if (arg0 != 0) STRIP_SIGN_NOPS (arg0); if (arg0 != 0 && TREE_CODE (arg0) == COMPLEX_CST) subop = TREE_REALPART (arg0); else subop = arg0; if (subop != 0 && TREE_CODE (subop) != INTEGER_CST && TREE_CODE (subop) != REAL_CST) /* Note that TREE_CONSTANT isn't enough: static var addresses are constant but we can't do arithmetic on them. */ wins = 0; } else if (IS_EXPR_CODE_CLASS (kind)) { int len = first_rtl_op (code); int i; for (i = 0; i < len; i++) { tree op = TREE_OPERAND (t, i); tree subop; if (op == 0) continue; /* Valid for CALL_EXPR, at least. */ /* Strip any conversions that don't change the mode. This is safe for every expression, except for a comparison expression because its signedness is derived from its operands. So, in the latter case, only strip conversions that don't change the signedness. Note that this is done as an internal manipulation within the constant folder, in order to find the simplest representation of the arguments so that their form can be studied. In any cases, the appropriate type conversions should be put back in the tree that will get out of the constant folder. */ if (kind == '<') STRIP_SIGN_NOPS (op); else STRIP_NOPS (op); if (TREE_CODE (op) == COMPLEX_CST) subop = TREE_REALPART (op); else subop = op; if (TREE_CODE (subop) != INTEGER_CST && TREE_CODE (subop) != REAL_CST) /* Note that TREE_CONSTANT isn't enough: static var addresses are constant but we can't do arithmetic on them. */ wins = 0; if (i == 0) arg0 = op; else if (i == 1) arg1 = op; } } /* If this is a commutative operation, and ARG0 is a constant, move it to ARG1 to reduce the number of tests below. */ if (commutative_tree_code (code) && tree_swap_operands_p (arg0, arg1, true)) return fold (build2 (code, type, TREE_OPERAND (t, 1), TREE_OPERAND (t, 0))); /* Now WINS is set as described above, ARG0 is the first operand of EXPR, and ARG1 is the second operand (if it has more than one operand). First check for cases where an arithmetic operation is applied to a compound, conditional, or comparison operation. Push the arithmetic operation inside the compound or conditional to see if any folding can then be done. Convert comparison to conditional for this purpose. The also optimizes non-constant cases that used to be done in expand_expr. Before we do that, see if this is a BIT_AND_EXPR or a BIT_IOR_EXPR, one of the operands is a comparison and the other is a comparison, a BIT_AND_EXPR with the constant 1, or a truth value. In that case, the code below would make the expression more complex. Change it to a TRUTH_{AND,OR}_EXPR. Likewise, convert a similar NE_EXPR to TRUTH_XOR_EXPR and an EQ_EXPR to the inversion of a TRUTH_XOR_EXPR. */ if ((code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == EQ_EXPR || code == NE_EXPR) && ((truth_value_p (TREE_CODE (arg0)) && (truth_value_p (TREE_CODE (arg1)) || (TREE_CODE (arg1) == BIT_AND_EXPR && integer_onep (TREE_OPERAND (arg1, 1))))) || (truth_value_p (TREE_CODE (arg1)) && (truth_value_p (TREE_CODE (arg0)) || (TREE_CODE (arg0) == BIT_AND_EXPR && integer_onep (TREE_OPERAND (arg0, 1))))))) { tem = fold (build2 (code == BIT_AND_EXPR ? TRUTH_AND_EXPR : code == BIT_IOR_EXPR ? TRUTH_OR_EXPR : TRUTH_XOR_EXPR, type, fold_convert (boolean_type_node, arg0), fold_convert (boolean_type_node, arg1))); if (code == EQ_EXPR) tem = invert_truthvalue (tem); return tem; } if (TREE_CODE_CLASS (code) == '1') { if (TREE_CODE (arg0) == COMPOUND_EXPR) return build2 (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0), fold (build1 (code, type, TREE_OPERAND (arg0, 1)))); else if (TREE_CODE (arg0) == COND_EXPR) { tree arg01 = TREE_OPERAND (arg0, 1); tree arg02 = TREE_OPERAND (arg0, 2); if (! VOID_TYPE_P (TREE_TYPE (arg01))) arg01 = fold (build1 (code, type, arg01)); if (! VOID_TYPE_P (TREE_TYPE (arg02))) arg02 = fold (build1 (code, type, arg02)); tem = fold (build3 (COND_EXPR, type, TREE_OPERAND (arg0, 0), arg01, arg02)); /* If this was a conversion, and all we did was to move into inside the COND_EXPR, bring it back out. But leave it if it is a conversion from integer to integer and the result precision is no wider than a word since such a conversion is cheap and may be optimized away by combine, while it couldn't if it were outside the COND_EXPR. Then return so we don't get into an infinite recursion loop taking the conversion out and then back in. */ if ((code == NOP_EXPR || code == CONVERT_EXPR || code == NON_LVALUE_EXPR) && TREE_CODE (tem) == COND_EXPR && TREE_CODE (TREE_OPERAND (tem, 1)) == code && TREE_CODE (TREE_OPERAND (tem, 2)) == code && ! VOID_TYPE_P (TREE_OPERAND (tem, 1)) && ! VOID_TYPE_P (TREE_OPERAND (tem, 2)) && (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (tem, 1), 0)) == TREE_TYPE (TREE_OPERAND (TREE_OPERAND (tem, 2), 0))) && ! (INTEGRAL_TYPE_P (TREE_TYPE (tem)) && (INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (tem, 1), 0)))) && TYPE_PRECISION (TREE_TYPE (tem)) <= BITS_PER_WORD)) tem = build1 (code, type, build3 (COND_EXPR, TREE_TYPE (TREE_OPERAND (TREE_OPERAND (tem, 1), 0)), TREE_OPERAND (tem, 0), TREE_OPERAND (TREE_OPERAND (tem, 1), 0), TREE_OPERAND (TREE_OPERAND (tem, 2), 0))); return tem; } else if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<') { if (TREE_CODE (type) == BOOLEAN_TYPE) { arg0 = copy_node (arg0); TREE_TYPE (arg0) = type; return arg0; } else if (TREE_CODE (type) != INTEGER_TYPE) return fold (build3 (COND_EXPR, type, arg0, fold (build1 (code, type, integer_one_node)), fold (build1 (code, type, integer_zero_node)))); } } else if (TREE_CODE_CLASS (code) == '<' && TREE_CODE (arg0) == COMPOUND_EXPR) return build2 (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0), fold (build2 (code, type, TREE_OPERAND (arg0, 1), arg1))); else if (TREE_CODE_CLASS (code) == '<' && TREE_CODE (arg1) == COMPOUND_EXPR) return build2 (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0), fold (build2 (code, type, arg0, TREE_OPERAND (arg1, 1)))); else if (TREE_CODE_CLASS (code) == '2' || TREE_CODE_CLASS (code) == '<') { if (TREE_CODE (arg0) == COMPOUND_EXPR) return build2 (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0), fold (build2 (code, type, TREE_OPERAND (arg0, 1), arg1))); if (TREE_CODE (arg1) == COMPOUND_EXPR && reorder_operands_p (arg0, TREE_OPERAND (arg1, 0))) return build2 (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0), fold (build2 (code, type, arg0, TREE_OPERAND (arg1, 1)))); if (TREE_CODE (arg0) == COND_EXPR || TREE_CODE_CLASS (TREE_CODE (arg0)) == '<') { tem = fold_binary_op_with_conditional_arg (code, type, arg0, arg1, /*cond_first_p=*/1); if (tem != NULL_TREE) return tem; } if (TREE_CODE (arg1) == COND_EXPR || TREE_CODE_CLASS (TREE_CODE (arg1)) == '<') { tem = fold_binary_op_with_conditional_arg (code, type, arg1, arg0, /*cond_first_p=*/0); if (tem != NULL_TREE) return tem; } } switch (code) { case CONST_DECL: return fold (DECL_INITIAL (t)); case NOP_EXPR: case FLOAT_EXPR: case CONVERT_EXPR: case FIX_TRUNC_EXPR: case FIX_CEIL_EXPR: case FIX_FLOOR_EXPR: case FIX_ROUND_EXPR: if (TREE_TYPE (TREE_OPERAND (t, 0)) == type) return TREE_OPERAND (t, 0); /* Handle cases of two conversions in a row. */ if (TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR) { tree inside_type = TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)); tree inter_type = TREE_TYPE (TREE_OPERAND (t, 0)); int inside_int = INTEGRAL_TYPE_P (inside_type); int inside_ptr = POINTER_TYPE_P (inside_type); int inside_float = FLOAT_TYPE_P (inside_type); unsigned int inside_prec = TYPE_PRECISION (inside_type); int inside_unsignedp = TYPE_UNSIGNED (inside_type); int inter_int = INTEGRAL_TYPE_P (inter_type); int inter_ptr = POINTER_TYPE_P (inter_type); int inter_float = FLOAT_TYPE_P (inter_type); unsigned int inter_prec = TYPE_PRECISION (inter_type); int inter_unsignedp = TYPE_UNSIGNED (inter_type); int final_int = INTEGRAL_TYPE_P (type); int final_ptr = POINTER_TYPE_P (type); int final_float = FLOAT_TYPE_P (type); unsigned int final_prec = TYPE_PRECISION (type); int final_unsignedp = TYPE_UNSIGNED (type); /* In addition to the cases of two conversions in a row handled below, if we are converting something to its own type via an object of identical or wider precision, neither conversion is needed. */ if (TYPE_MAIN_VARIANT (inside_type) == TYPE_MAIN_VARIANT (type) && ((inter_int && final_int) || (inter_float && final_float)) && inter_prec >= final_prec) return fold (build1 (code, type, TREE_OPERAND (TREE_OPERAND (t, 0), 0))); /* Likewise, if the intermediate and final types are either both float or both integer, we don't need the middle conversion if it is wider than the final type and doesn't change the signedness (for integers). Avoid this if the final type is a pointer since then we sometimes need the inner conversion. Likewise if the outer has a precision not equal to the size of its mode. */ if ((((inter_int || inter_ptr) && (inside_int || inside_ptr)) || (inter_float && inside_float)) && inter_prec >= inside_prec && (inter_float || inter_unsignedp == inside_unsignedp) && ! (final_prec != GET_MODE_BITSIZE (TYPE_MODE (type)) && TYPE_MODE (type) == TYPE_MODE (inter_type)) && ! final_ptr) return fold (build1 (code, type, TREE_OPERAND (TREE_OPERAND (t, 0), 0))); /* If we have a sign-extension of a zero-extended value, we can replace that by a single zero-extension. */ if (inside_int && inter_int && final_int && inside_prec < inter_prec && inter_prec < final_prec && inside_unsignedp && !inter_unsignedp) return fold (build1 (code, type, TREE_OPERAND (TREE_OPERAND (t, 0), 0))); /* Two conversions in a row are not needed unless: - some conversion is floating-point (overstrict for now), or - the intermediate type is narrower than both initial and final, or - the intermediate type and innermost type differ in signedness, and the outermost type is wider than the intermediate, or - the initial type is a pointer type and the precisions of the intermediate and final types differ, or - the final type is a pointer type and the precisions of the initial and intermediate types differ. */ if (! inside_float && ! inter_float && ! final_float && (inter_prec > inside_prec || inter_prec > final_prec) && ! (inside_int && inter_int && inter_unsignedp != inside_unsignedp && inter_prec < final_prec) && ((inter_unsignedp && inter_prec > inside_prec) == (final_unsignedp && final_prec > inter_prec)) && ! (inside_ptr && inter_prec != final_prec) && ! (final_ptr && inside_prec != inter_prec) && ! (final_prec != GET_MODE_BITSIZE (TYPE_MODE (type)) && TYPE_MODE (type) == TYPE_MODE (inter_type)) && ! final_ptr) return fold (build1 (code, type, TREE_OPERAND (TREE_OPERAND (t, 0), 0))); } if (TREE_CODE (TREE_OPERAND (t, 0)) == MODIFY_EXPR && TREE_CONSTANT (TREE_OPERAND (TREE_OPERAND (t, 0), 1)) /* Detect assigning a bitfield. */ && !(TREE_CODE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == COMPONENT_REF && DECL_BIT_FIELD (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (t, 0), 0), 1)))) { /* Don't leave an assignment inside a conversion unless assigning a bitfield. */ tree prev = TREE_OPERAND (t, 0); tem = copy_node (t); TREE_OPERAND (tem, 0) = TREE_OPERAND (prev, 1); /* First do the assignment, then return converted constant. */ tem = build2 (COMPOUND_EXPR, TREE_TYPE (tem), prev, fold (tem)); TREE_NO_WARNING (tem) = 1; TREE_USED (tem) = 1; return tem; } /* Convert (T)(x & c) into (T)x & (T)c, if c is an integer constants (if x has signed type, the sign bit cannot be set in c). This folds extension into the BIT_AND_EXPR. */ if (INTEGRAL_TYPE_P (type) && TREE_CODE (type) != BOOLEAN_TYPE && TREE_CODE (TREE_OPERAND (t, 0)) == BIT_AND_EXPR && TREE_CODE (TREE_OPERAND (TREE_OPERAND (t, 0), 1)) == INTEGER_CST) { tree and = TREE_OPERAND (t, 0); tree and0 = TREE_OPERAND (and, 0), and1 = TREE_OPERAND (and, 1); int change = 0; if (TYPE_UNSIGNED (TREE_TYPE (and)) || (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (and)))) change = 1; else if (TYPE_PRECISION (TREE_TYPE (and1)) <= HOST_BITS_PER_WIDE_INT && host_integerp (and1, 1)) { unsigned HOST_WIDE_INT cst; cst = tree_low_cst (and1, 1); cst &= (HOST_WIDE_INT) -1 << (TYPE_PRECISION (TREE_TYPE (and1)) - 1); change = (cst == 0); #ifdef LOAD_EXTEND_OP if (change && (LOAD_EXTEND_OP (TYPE_MODE (TREE_TYPE (and0))) == ZERO_EXTEND)) { tree uns = lang_hooks.types.unsigned_type (TREE_TYPE (and0)); and0 = fold_convert (uns, and0); and1 = fold_convert (uns, and1); } #endif } if (change) return fold (build2 (BIT_AND_EXPR, type, fold_convert (type, and0), fold_convert (type, and1))); } /* Convert (T1)((T2)X op Y) into (T1)X op Y, for pointer types T1 and T2 being pointers to types of the same size. */ if (POINTER_TYPE_P (TREE_TYPE (t)) && TREE_CODE_CLASS (TREE_CODE (arg0)) == '2' && TREE_CODE (TREE_OPERAND (arg0, 0)) == NOP_EXPR && POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 0)))) { tree arg00 = TREE_OPERAND (arg0, 0); tree t0 = TREE_TYPE (t); tree t1 = TREE_TYPE (arg00); tree tt0 = TREE_TYPE (t0); tree tt1 = TREE_TYPE (t1); tree s0 = TYPE_SIZE (tt0); tree s1 = TYPE_SIZE (tt1); if (s0 && s1 && operand_equal_p (s0, s1, OEP_ONLY_CONST)) return build2 (TREE_CODE (arg0), t0, fold_convert (t0, arg00), TREE_OPERAND (arg0, 1)); } tem = fold_convert_const (code, type, arg0); return tem ? tem : t; case VIEW_CONVERT_EXPR: if (TREE_CODE (TREE_OPERAND (t, 0)) == VIEW_CONVERT_EXPR) return build1 (VIEW_CONVERT_EXPR, type, TREE_OPERAND (TREE_OPERAND (t, 0), 0)); return t; case COMPONENT_REF: if (TREE_CODE (arg0) == CONSTRUCTOR && ! type_contains_placeholder_p (TREE_TYPE (arg0))) { tree m = purpose_member (arg1, CONSTRUCTOR_ELTS (arg0)); if (m) return TREE_VALUE (m); } return t; case RANGE_EXPR: if (TREE_CONSTANT (t) != wins) { tem = copy_node (t); TREE_CONSTANT (tem) = wins; TREE_INVARIANT (tem) = wins; return tem; } return t; case NEGATE_EXPR: if (negate_expr_p (arg0)) return fold_convert (type, negate_expr (arg0)); return t; case ABS_EXPR: if (TREE_CODE (arg0) == INTEGER_CST || TREE_CODE (arg0) == REAL_CST) return fold_abs_const (arg0, type); else if (TREE_CODE (arg0) == NEGATE_EXPR) return fold (build1 (ABS_EXPR, type, TREE_OPERAND (arg0, 0))); /* Convert fabs((double)float) into (double)fabsf(float). */ else if (TREE_CODE (arg0) == NOP_EXPR && TREE_CODE (type) == REAL_TYPE) { tree targ0 = strip_float_extensions (arg0); if (targ0 != arg0) return fold_convert (type, fold (build1 (ABS_EXPR, TREE_TYPE (targ0), targ0))); } else if (tree_expr_nonnegative_p (arg0)) return arg0; return t; case CONJ_EXPR: if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE) return fold_convert (type, arg0); else if (TREE_CODE (arg0) == COMPLEX_EXPR) return build2 (COMPLEX_EXPR, type, TREE_OPERAND (arg0, 0), negate_expr (TREE_OPERAND (arg0, 1))); else if (TREE_CODE (arg0) == COMPLEX_CST) return build_complex (type, TREE_REALPART (arg0), negate_expr (TREE_IMAGPART (arg0))); else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR) return fold (build2 (TREE_CODE (arg0), type, fold (build1 (CONJ_EXPR, type, TREE_OPERAND (arg0, 0))), fold (build1 (CONJ_EXPR, type, TREE_OPERAND (arg0, 1))))); else if (TREE_CODE (arg0) == CONJ_EXPR) return TREE_OPERAND (arg0, 0); return t; case BIT_NOT_EXPR: if (TREE_CODE (arg0) == INTEGER_CST) return fold_not_const (arg0, type); else if (TREE_CODE (arg0) == BIT_NOT_EXPR) return TREE_OPERAND (arg0, 0); return t; case PLUS_EXPR: /* A + (-B) -> A - B */ if (TREE_CODE (arg1) == NEGATE_EXPR) return fold (build2 (MINUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0))); /* (-A) + B -> B - A */ if (TREE_CODE (arg0) == NEGATE_EXPR && reorder_operands_p (TREE_OPERAND (arg0, 0), arg1)) return fold (build2 (MINUS_EXPR, type, arg1, TREE_OPERAND (arg0, 0))); if (! FLOAT_TYPE_P (type)) { if (integer_zerop (arg1)) return non_lvalue (fold_convert (type, arg0)); /* If we are adding two BIT_AND_EXPR's, both of which are and'ing with a constant, and the two constants have no bits in common, we should treat this as a BIT_IOR_EXPR since this may produce more simplifications. */ if (TREE_CODE (arg0) == BIT_AND_EXPR && TREE_CODE (arg1) == BIT_AND_EXPR && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST && integer_zerop (const_binop (BIT_AND_EXPR, TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0))) { code = BIT_IOR_EXPR; goto bit_ior; } /* Reassociate (plus (plus (mult) (foo)) (mult)) as (plus (plus (mult) (mult)) (foo)) so that we can take advantage of the factoring cases below. */ if ((TREE_CODE (arg0) == PLUS_EXPR && TREE_CODE (arg1) == MULT_EXPR) || (TREE_CODE (arg1) == PLUS_EXPR && TREE_CODE (arg0) == MULT_EXPR)) { tree parg0, parg1, parg, marg; if (TREE_CODE (arg0) == PLUS_EXPR) parg = arg0, marg = arg1; else parg = arg1, marg = arg0; parg0 = TREE_OPERAND (parg, 0); parg1 = TREE_OPERAND (parg, 1); STRIP_NOPS (parg0); STRIP_NOPS (parg1); if (TREE_CODE (parg0) == MULT_EXPR && TREE_CODE (parg1) != MULT_EXPR) return fold (build2 (PLUS_EXPR, type, fold (build2 (PLUS_EXPR, type, fold_convert (type, parg0), fold_convert (type, marg))), fold_convert (type, parg1))); if (TREE_CODE (parg0) != MULT_EXPR && TREE_CODE (parg1) == MULT_EXPR) return fold (build2 (PLUS_EXPR, type, fold (build2 (PLUS_EXPR, type, fold_convert (type, parg1), fold_convert (type, marg))), fold_convert (type, parg0))); } if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR) { tree arg00, arg01, arg10, arg11; tree alt0 = NULL_TREE, alt1 = NULL_TREE, same; /* (A * C) + (B * C) -> (A+B) * C. We are most concerned about the case where C is a constant, but other combinations show up during loop reduction. Since it is not difficult, try all four possibilities. */ arg00 = TREE_OPERAND (arg0, 0); arg01 = TREE_OPERAND (arg0, 1); arg10 = TREE_OPERAND (arg1, 0); arg11 = TREE_OPERAND (arg1, 1); same = NULL_TREE; if (operand_equal_p (arg01, arg11, 0)) same = arg01, alt0 = arg00, alt1 = arg10; else if (operand_equal_p (arg00, arg10, 0)) same = arg00, alt0 = arg01, alt1 = arg11; else if (operand_equal_p (arg00, arg11, 0)) same = arg00, alt0 = arg01, alt1 = arg10; else if (operand_equal_p (arg01, arg10, 0)) same = arg01, alt0 = arg00, alt1 = arg11; /* No identical multiplicands; see if we can find a common power-of-two factor in non-power-of-two multiplies. This can help in multi-dimensional array access. */ else if (TREE_CODE (arg01) == INTEGER_CST && TREE_CODE (arg11) == INTEGER_CST && TREE_INT_CST_HIGH (arg01) == 0 && TREE_INT_CST_HIGH (arg11) == 0) { HOST_WIDE_INT int01, int11, tmp; int01 = TREE_INT_CST_LOW (arg01); int11 = TREE_INT_CST_LOW (arg11); /* Move min of absolute values to int11. */ if ((int01 >= 0 ? int01 : -int01) < (int11 >= 0 ? int11 : -int11)) { tmp = int01, int01 = int11, int11 = tmp; alt0 = arg00, arg00 = arg10, arg10 = alt0; alt0 = arg01, arg01 = arg11, arg11 = alt0; } if (exact_log2 (int11) > 0 && int01 % int11 == 0) { alt0 = fold (build2 (MULT_EXPR, type, arg00, build_int_2 (int01 / int11, 0))); alt1 = arg10; same = arg11; } } if (same) return fold (build2 (MULT_EXPR, type, fold (build2 (PLUS_EXPR, type, alt0, alt1)), same)); } } else { /* See if ARG1 is zero and X + ARG1 reduces to X. */ if (fold_real_zero_addition_p (TREE_TYPE (arg0), arg1, 0)) return non_lvalue (fold_convert (type, arg0)); /* Likewise if the operands are reversed. */ if (fold_real_zero_addition_p (TREE_TYPE (arg1), arg0, 0)) return non_lvalue (fold_convert (type, arg1)); /* Convert x+x into x*2.0. */ if (operand_equal_p (arg0, arg1, 0) && SCALAR_FLOAT_TYPE_P (type)) return fold (build2 (MULT_EXPR, type, arg0, build_real (type, dconst2))); /* Convert x*c+x into x*(c+1). */ if (flag_unsafe_math_optimizations && TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (TREE_OPERAND (arg0, 1)) == REAL_CST && ! TREE_CONSTANT_OVERFLOW (TREE_OPERAND (arg0, 1)) && operand_equal_p (TREE_OPERAND (arg0, 0), arg1, 0)) { REAL_VALUE_TYPE c; c = TREE_REAL_CST (TREE_OPERAND (arg0, 1)); real_arithmetic (&c, PLUS_EXPR, &c, &dconst1); return fold (build2 (MULT_EXPR, type, arg1, build_real (type, c))); } /* Convert x+x*c into x*(c+1). */ if (flag_unsafe_math_optimizations && TREE_CODE (arg1) == MULT_EXPR && TREE_CODE (TREE_OPERAND (arg1, 1)) == REAL_CST && ! TREE_CONSTANT_OVERFLOW (TREE_OPERAND (arg1, 1)) && operand_equal_p (TREE_OPERAND (arg1, 0), arg0, 0)) { REAL_VALUE_TYPE c; c = TREE_REAL_CST (TREE_OPERAND (arg1, 1)); real_arithmetic (&c, PLUS_EXPR, &c, &dconst1); return fold (build2 (MULT_EXPR, type, arg0, build_real (type, c))); } /* Convert x*c1+x*c2 into x*(c1+c2). */ if (flag_unsafe_math_optimizations && TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR && TREE_CODE (TREE_OPERAND (arg0, 1)) == REAL_CST && ! TREE_CONSTANT_OVERFLOW (TREE_OPERAND (arg0, 1)) && TREE_CODE (TREE_OPERAND (arg1, 1)) == REAL_CST && ! TREE_CONSTANT_OVERFLOW (TREE_OPERAND (arg1, 1)) && operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0)) { REAL_VALUE_TYPE c1, c2; c1 = TREE_REAL_CST (TREE_OPERAND (arg0, 1)); c2 = TREE_REAL_CST (TREE_OPERAND (arg1, 1)); real_arithmetic (&c1, PLUS_EXPR, &c1, &c2); return fold (build2 (MULT_EXPR, type, TREE_OPERAND (arg0, 0), build_real (type, c1))); } /* Convert a + (b*c + d*e) into (a + b*c) + d*e */ if (flag_unsafe_math_optimizations && TREE_CODE (arg1) == PLUS_EXPR && TREE_CODE (arg0) != MULT_EXPR) { tree tree10 = TREE_OPERAND (arg1, 0); tree tree11 = TREE_OPERAND (arg1, 1); if (TREE_CODE (tree11) == MULT_EXPR && TREE_CODE (tree10) == MULT_EXPR) { tree tree0; tree0 = fold (build2 (PLUS_EXPR, type, arg0, tree10)); return fold (build2 (PLUS_EXPR, type, tree0, tree11)); } } /* Convert (b*c + d*e) + a into b*c + (d*e +a) */ if (flag_unsafe_math_optimizations && TREE_CODE (arg0) == PLUS_EXPR && TREE_CODE (arg1) != MULT_EXPR) { tree tree00 = TREE_OPERAND (arg0, 0); tree tree01 = TREE_OPERAND (arg0, 1); if (TREE_CODE (tree01) == MULT_EXPR && TREE_CODE (tree00) == MULT_EXPR) { tree tree0; tree0 = fold (build2 (PLUS_EXPR, type, tree01, arg1)); return fold (build2 (PLUS_EXPR, type, tree00, tree0)); } } } bit_rotate: /* (A << C1) + (A >> C2) if A is unsigned and C1+C2 is the size of A is a rotate of A by C1 bits. */ /* (A << B) + (A >> (Z - B)) if A is unsigned and Z is the size of A is a rotate of A by B bits. */ { enum tree_code code0, code1; code0 = TREE_CODE (arg0); code1 = TREE_CODE (arg1); if (((code0 == RSHIFT_EXPR && code1 == LSHIFT_EXPR) || (code1 == RSHIFT_EXPR && code0 == LSHIFT_EXPR)) && operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0) && TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0)))) { tree tree01, tree11; enum tree_code code01, code11; tree01 = TREE_OPERAND (arg0, 1); tree11 = TREE_OPERAND (arg1, 1); STRIP_NOPS (tree01); STRIP_NOPS (tree11); code01 = TREE_CODE (tree01); code11 = TREE_CODE (tree11); if (code01 == INTEGER_CST && code11 == INTEGER_CST && TREE_INT_CST_HIGH (tree01) == 0 && TREE_INT_CST_HIGH (tree11) == 0 && ((TREE_INT_CST_LOW (tree01) + TREE_INT_CST_LOW (tree11)) == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0))))) return build2 (LROTATE_EXPR, type, TREE_OPERAND (arg0, 0), code0 == LSHIFT_EXPR ? tree01 : tree11); else if (code11 == MINUS_EXPR) { tree tree110, tree111; tree110 = TREE_OPERAND (tree11, 0); tree111 = TREE_OPERAND (tree11, 1); STRIP_NOPS (tree110); STRIP_NOPS (tree111); if (TREE_CODE (tree110) == INTEGER_CST && 0 == compare_tree_int (tree110, TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)))) && operand_equal_p (tree01, tree111, 0)) return build2 ((code0 == LSHIFT_EXPR ? LROTATE_EXPR : RROTATE_EXPR), type, TREE_OPERAND (arg0, 0), tree01); } else if (code01 == MINUS_EXPR) { tree tree010, tree011; tree010 = TREE_OPERAND (tree01, 0); tree011 = TREE_OPERAND (tree01, 1); STRIP_NOPS (tree010); STRIP_NOPS (tree011); if (TREE_CODE (tree010) == INTEGER_CST && 0 == compare_tree_int (tree010, TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)))) && operand_equal_p (tree11, tree011, 0)) return build2 ((code0 != LSHIFT_EXPR ? LROTATE_EXPR : RROTATE_EXPR), type, TREE_OPERAND (arg0, 0), tree11); } } } associate: /* In most languages, can't associate operations on floats through parentheses. Rather than remember where the parentheses were, we don't associate floats at all, unless the user has specified -funsafe-math-optimizations. */ if (! wins && (! FLOAT_TYPE_P (type) || flag_unsafe_math_optimizations)) { tree var0, con0, lit0, minus_lit0; tree var1, con1, lit1, minus_lit1; /* Split both trees into variables, constants, and literals. Then associate each group together, the constants with literals, then the result with variables. This increases the chances of literals being recombined later and of generating relocatable expressions for the sum of a constant and literal. */ var0 = split_tree (arg0, code, &con0, &lit0, &minus_lit0, 0); var1 = split_tree (arg1, code, &con1, &lit1, &minus_lit1, code == MINUS_EXPR); /* Only do something if we found more than two objects. Otherwise, nothing has changed and we risk infinite recursion. */ if (2 < ((var0 != 0) + (var1 != 0) + (con0 != 0) + (con1 != 0) + (lit0 != 0) + (lit1 != 0) + (minus_lit0 != 0) + (minus_lit1 != 0))) { /* Recombine MINUS_EXPR operands by using PLUS_EXPR. */ if (code == MINUS_EXPR) code = PLUS_EXPR; var0 = associate_trees (var0, var1, code, type); con0 = associate_trees (con0, con1, code, type); lit0 = associate_trees (lit0, lit1, code, type); minus_lit0 = associate_trees (minus_lit0, minus_lit1, code, type); /* Preserve the MINUS_EXPR if the negative part of the literal is greater than the positive part. Otherwise, the multiplicative folding code (i.e extract_muldiv) may be fooled in case unsigned constants are subtracted, like in the following example: ((X*2 + 4) - 8U)/2. */ if (minus_lit0 && lit0) { if (TREE_CODE (lit0) == INTEGER_CST && TREE_CODE (minus_lit0) == INTEGER_CST && tree_int_cst_lt (lit0, minus_lit0)) { minus_lit0 = associate_trees (minus_lit0, lit0, MINUS_EXPR, type); lit0 = 0; } else { lit0 = associate_trees (lit0, minus_lit0, MINUS_EXPR, type); minus_lit0 = 0; } } if (minus_lit0) { if (con0 == 0) return fold_convert (type, associate_trees (var0, minus_lit0, MINUS_EXPR, type)); else { con0 = associate_trees (con0, minus_lit0, MINUS_EXPR, type); return fold_convert (type, associate_trees (var0, con0, PLUS_EXPR, type)); } } con0 = associate_trees (con0, lit0, code, type); return fold_convert (type, associate_trees (var0, con0, code, type)); } } binary: if (wins) t1 = const_binop (code, arg0, arg1, 0); if (t1 != NULL_TREE) { /* The return value should always have the same type as the original expression. */ if (TREE_TYPE (t1) != type) t1 = fold_convert (type, t1); return t1; } return t; case MINUS_EXPR: /* A - (-B) -> A + B */ if (TREE_CODE (arg1) == NEGATE_EXPR) return fold (build2 (PLUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0))); /* (-A) - B -> (-B) - A where B is easily negated and we can swap. */ if (TREE_CODE (arg0) == NEGATE_EXPR && (FLOAT_TYPE_P (type) || (INTEGRAL_TYPE_P (type) && flag_wrapv && !flag_trapv)) && negate_expr_p (arg1) && reorder_operands_p (arg0, arg1)) return fold (build2 (MINUS_EXPR, type, negate_expr (arg1), TREE_OPERAND (arg0, 0))); if (! FLOAT_TYPE_P (type)) { if (! wins && integer_zerop (arg0)) return negate_expr (fold_convert (type, arg1)); if (integer_zerop (arg1)) return non_lvalue (fold_convert (type, arg0)); /* Fold A - (A & B) into ~B & A. */ if (!TREE_SIDE_EFFECTS (arg0) && TREE_CODE (arg1) == BIT_AND_EXPR) { if (operand_equal_p (arg0, TREE_OPERAND (arg1, 1), 0)) return fold (build2 (BIT_AND_EXPR, type, fold (build1 (BIT_NOT_EXPR, type, TREE_OPERAND (arg1, 0))), arg0)); if (operand_equal_p (arg0, TREE_OPERAND (arg1, 0), 0)) return fold (build2 (BIT_AND_EXPR, type, fold (build1 (BIT_NOT_EXPR, type, TREE_OPERAND (arg1, 1))), arg0)); } /* Fold (A & ~B) - (A & B) into (A ^ B) - B, where B is any power of 2 minus 1. */ if (TREE_CODE (arg0) == BIT_AND_EXPR && TREE_CODE (arg1) == BIT_AND_EXPR && operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0)) { tree mask0 = TREE_OPERAND (arg0, 1); tree mask1 = TREE_OPERAND (arg1, 1); tree tem = fold (build1 (BIT_NOT_EXPR, type, mask0)); if (operand_equal_p (tem, mask1, 0)) { tem = fold (build2 (BIT_XOR_EXPR, type, TREE_OPERAND (arg0, 0), mask1)); return fold (build2 (MINUS_EXPR, type, tem, mask1)); } } } /* See if ARG1 is zero and X - ARG1 reduces to X. */ else if (fold_real_zero_addition_p (TREE_TYPE (arg0), arg1, 1)) return non_lvalue (fold_convert (type, arg0)); /* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether ARG0 is zero and X + ARG0 reduces to X, since that would mean (-ARG1 + ARG0) reduces to -ARG1. */ else if (!wins && fold_real_zero_addition_p (TREE_TYPE (arg1), arg0, 0)) return negate_expr (fold_convert (type, arg1)); /* Fold &x - &x. This can happen from &x.foo - &x. This is unsafe for certain floats even in non-IEEE formats. In IEEE, it is unsafe because it does wrong for NaNs. Also note that operand_equal_p is always false if an operand is volatile. */ if ((! FLOAT_TYPE_P (type) || flag_unsafe_math_optimizations) && operand_equal_p (arg0, arg1, 0)) return fold_convert (type, integer_zero_node); /* A - B -> A + (-B) if B is easily negatable. */ if (!wins && negate_expr_p (arg1) && (FLOAT_TYPE_P (type) || (INTEGRAL_TYPE_P (type) && flag_wrapv && !flag_trapv))) return fold (build2 (PLUS_EXPR, type, arg0, negate_expr (arg1))); if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR && (INTEGRAL_TYPE_P (type) || flag_unsafe_math_optimizations)) { /* (A * C) - (B * C) -> (A-B) * C. */ if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0)) return fold (build2 (MULT_EXPR, type, fold (build2 (MINUS_EXPR, type, TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0))), TREE_OPERAND (arg0, 1))); /* (A * C1) - (A * C2) -> A * (C1-C2). */ if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0)) return fold (build2 (MULT_EXPR, type, TREE_OPERAND (arg0, 0), fold (build2 (MINUS_EXPR, type, TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1))))); } goto associate; case MULT_EXPR: /* (-A) * (-B) -> A * B */ if (TREE_CODE (arg0) == NEGATE_EXPR && negate_expr_p (arg1)) return fold (build2 (MULT_EXPR, type, TREE_OPERAND (arg0, 0), negate_expr (arg1))); if (TREE_CODE (arg1) == NEGATE_EXPR && negate_expr_p (arg0)) return fold (build2 (MULT_EXPR, type, negate_expr (arg0), TREE_OPERAND (arg1, 0))); if (! FLOAT_TYPE_P (type)) { if (integer_zerop (arg1)) return omit_one_operand (type, arg1, arg0); if (integer_onep (arg1)) return non_lvalue (fold_convert (type, arg0)); /* (a * (1 << b)) is (a << b) */ if (TREE_CODE (arg1) == LSHIFT_EXPR && integer_onep (TREE_OPERAND (arg1, 0))) return fold (build2 (LSHIFT_EXPR, type, arg0, TREE_OPERAND (arg1, 1))); if (TREE_CODE (arg0) == LSHIFT_EXPR && integer_onep (TREE_OPERAND (arg0, 0))) return fold (build2 (LSHIFT_EXPR, type, arg1, TREE_OPERAND (arg0, 1))); if (TREE_CODE (arg1) == INTEGER_CST && 0 != (tem = extract_muldiv (TREE_OPERAND (t, 0), fold_convert (type, arg1), code, NULL_TREE))) return fold_convert (type, tem); } else { /* Maybe fold x * 0 to 0. The expressions aren't the same when x is NaN, since x * 0 is also NaN. Nor are they the same in modes with signed zeros, since multiplying a negative value by 0 gives -0, not +0. */ if (!HONOR_NANS (TYPE_MODE (TREE_TYPE (arg0))) && !HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg0))) && real_zerop (arg1)) return omit_one_operand (type, arg1, arg0); /* In IEEE floating point, x*1 is not equivalent to x for snans. */ if (!HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg0))) && real_onep (arg1)) return non_lvalue (fold_convert (type, arg0)); /* Transform x * -1.0 into -x. */ if (!HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg0))) && real_minus_onep (arg1)) return fold_convert (type, negate_expr (arg0)); /* Convert (C1/X)*C2 into (C1*C2)/X. */ if (flag_unsafe_math_optimizations && TREE_CODE (arg0) == RDIV_EXPR && TREE_CODE (arg1) == REAL_CST && TREE_CODE (TREE_OPERAND (arg0, 0)) == REAL_CST) { tree tem = const_binop (MULT_EXPR, TREE_OPERAND (arg0, 0), arg1, 0); if (tem) return fold (build2 (RDIV_EXPR, type, tem, TREE_OPERAND (arg0, 1))); } if (flag_unsafe_math_optimizations) { enum built_in_function fcode0 = builtin_mathfn_code (arg0); enum built_in_function fcode1 = builtin_mathfn_code (arg1); /* Optimizations of root(...)*root(...). */ if (fcode0 == fcode1 && BUILTIN_ROOT_P (fcode0)) { tree rootfn, arg, arglist; tree arg00 = TREE_VALUE (TREE_OPERAND (arg0, 1)); tree arg10 = TREE_VALUE (TREE_OPERAND (arg1, 1)); /* Optimize sqrt(x)*sqrt(x) as x. */ if (BUILTIN_SQRT_P (fcode0) && operand_equal_p (arg00, arg10, 0) && ! HONOR_SNANS (TYPE_MODE (type))) return arg00; /* Optimize root(x)*root(y) as root(x*y). */ rootfn = TREE_OPERAND (TREE_OPERAND (arg0, 0), 0); arg = fold (build2 (MULT_EXPR, type, arg00, arg10)); arglist = build_tree_list (NULL_TREE, arg); return build_function_call_expr (rootfn, arglist); } /* Optimize expN(x)*expN(y) as expN(x+y). */ if (fcode0 == fcode1 && BUILTIN_EXPONENT_P (fcode0)) { tree expfn = TREE_OPERAND (TREE_OPERAND (arg0, 0), 0); tree arg = build2 (PLUS_EXPR, type, TREE_VALUE (TREE_OPERAND (arg0, 1)), TREE_VALUE (TREE_OPERAND (arg1, 1))); tree arglist = build_tree_list (NULL_TREE, fold (arg)); return build_function_call_expr (expfn, arglist); } /* Optimizations of pow(...)*pow(...). */ if ((fcode0 == BUILT_IN_POW && fcode1 == BUILT_IN_POW) || (fcode0 == BUILT_IN_POWF && fcode1 == BUILT_IN_POWF) || (fcode0 == BUILT_IN_POWL && fcode1 == BUILT_IN_POWL)) { tree arg00 = TREE_VALUE (TREE_OPERAND (arg0, 1)); tree arg01 = TREE_VALUE (TREE_CHAIN (TREE_OPERAND (arg0, 1))); tree arg10 = TREE_VALUE (TREE_OPERAND (arg1, 1)); tree arg11 = TREE_VALUE (TREE_CHAIN (TREE_OPERAND (arg1, 1))); /* Optimize pow(x,y)*pow(z,y) as pow(x*z,y). */ if (operand_equal_p (arg01, arg11, 0)) { tree powfn = TREE_OPERAND (TREE_OPERAND (arg0, 0), 0); tree arg = build2 (MULT_EXPR, type, arg00, arg10); tree arglist = tree_cons (NULL_TREE, fold (arg), build_tree_list (NULL_TREE, arg01)); return build_function_call_expr (powfn, arglist); } /* Optimize pow(x,y)*pow(x,z) as pow(x,y+z). */ if (operand_equal_p (arg00, arg10, 0)) { tree powfn = TREE_OPERAND (TREE_OPERAND (arg0, 0), 0); tree arg = fold (build2 (PLUS_EXPR, type, arg01, arg11)); tree arglist = tree_cons (NULL_TREE, arg00, build_tree_list (NULL_TREE, arg)); return build_function_call_expr (powfn, arglist); } } /* Optimize tan(x)*cos(x) as sin(x). */ if (((fcode0 == BUILT_IN_TAN && fcode1 == BUILT_IN_COS) || (fcode0 == BUILT_IN_TANF && fcode1 == BUILT_IN_COSF) || (fcode0 == BUILT_IN_TANL && fcode1 == BUILT_IN_COSL) || (fcode0 == BUILT_IN_COS && fcode1 == BUILT_IN_TAN) || (fcode0 == BUILT_IN_COSF && fcode1 == BUILT_IN_TANF) || (fcode0 == BUILT_IN_COSL && fcode1 == BUILT_IN_TANL)) && operand_equal_p (TREE_VALUE (TREE_OPERAND (arg0, 1)), TREE_VALUE (TREE_OPERAND (arg1, 1)), 0)) { tree sinfn = mathfn_built_in (type, BUILT_IN_SIN); if (sinfn != NULL_TREE) return build_function_call_expr (sinfn, TREE_OPERAND (arg0, 1)); } /* Optimize x*pow(x,c) as pow(x,c+1). */ if (fcode1 == BUILT_IN_POW || fcode1 == BUILT_IN_POWF || fcode1 == BUILT_IN_POWL) { tree arg10 = TREE_VALUE (TREE_OPERAND (arg1, 1)); tree arg11 = TREE_VALUE (TREE_CHAIN (TREE_OPERAND (arg1, 1))); if (TREE_CODE (arg11) == REAL_CST && ! TREE_CONSTANT_OVERFLOW (arg11) && operand_equal_p (arg0, arg10, 0)) { tree powfn = TREE_OPERAND (TREE_OPERAND (arg1, 0), 0); REAL_VALUE_TYPE c; tree arg, arglist; c = TREE_REAL_CST (arg11); real_arithmetic (&c, PLUS_EXPR, &c, &dconst1); arg = build_real (type, c); arglist = build_tree_list (NULL_TREE, arg); arglist = tree_cons (NULL_TREE, arg0, arglist); return build_function_call_expr (powfn, arglist); } } /* Optimize pow(x,c)*x as pow(x,c+1). */ if (fcode0 == BUILT_IN_POW || fcode0 == BUILT_IN_POWF || fcode0 == BUILT_IN_POWL) { tree arg00 = TREE_VALUE (TREE_OPERAND (arg0, 1)); tree arg01 = TREE_VALUE (TREE_CHAIN (TREE_OPERAND (arg0, 1))); if (TREE_CODE (arg01) == REAL_CST && ! TREE_CONSTANT_OVERFLOW (arg01) && operand_equal_p (arg1, arg00, 0)) { tree powfn = TREE_OPERAND (TREE_OPERAND (arg0, 0), 0); REAL_VALUE_TYPE c; tree arg, arglist; c = TREE_REAL_CST (arg01); real_arithmetic (&c, PLUS_EXPR, &c, &dconst1); arg = build_real (type, c); arglist = build_tree_list (NULL_TREE, arg); arglist = tree_cons (NULL_TREE, arg1, arglist); return build_function_call_expr (powfn, arglist); } } /* Optimize x*x as pow(x,2.0), which is expanded as x*x. */ if (! optimize_size && operand_equal_p (arg0, arg1, 0)) { tree powfn = mathfn_built_in (type, BUILT_IN_POW); if (powfn) { tree arg = build_real (type, dconst2); tree arglist = build_tree_list (NULL_TREE, arg); arglist = tree_cons (NULL_TREE, arg0, arglist); return build_function_call_expr (powfn, arglist); } } } } goto associate; case BIT_IOR_EXPR: bit_ior: if (integer_all_onesp (arg1)) return omit_one_operand (type, arg1, arg0); if (integer_zerop (arg1)) return non_lvalue (fold_convert (type, arg0)); if (operand_equal_p (arg0, arg1, 0)) return non_lvalue (fold_convert (type, arg0)); /* ~X | X is -1. */ if (TREE_CODE (arg0) == BIT_NOT_EXPR && operand_equal_p (TREE_OPERAND (arg0, 0), arg1, 0)) { t1 = build_int_2 (-1, -1); TREE_TYPE (t1) = type; force_fit_type (t1, 0); return omit_one_operand (type, t1, arg1); } /* X | ~X is -1. */ if (TREE_CODE (arg1) == BIT_NOT_EXPR && operand_equal_p (arg0, TREE_OPERAND (arg1, 0), 0)) { t1 = build_int_2 (-1, -1); TREE_TYPE (t1) = type; force_fit_type (t1, 0); return omit_one_operand (type, t1, arg0); } t1 = distribute_bit_expr (code, type, arg0, arg1); if (t1 != NULL_TREE) return t1; /* Convert (or (not arg0) (not arg1)) to (not (and (arg0) (arg1))). This results in more efficient code for machines without a NAND instruction. Combine will canonicalize to the first form which will allow use of NAND instructions provided by the backend if they exist. */ if (TREE_CODE (arg0) == BIT_NOT_EXPR && TREE_CODE (arg1) == BIT_NOT_EXPR) { return fold (build1 (BIT_NOT_EXPR, type, build2 (BIT_AND_EXPR, type, TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0)))); } /* See if this can be simplified into a rotate first. If that is unsuccessful continue in the association code. */ goto bit_rotate; case BIT_XOR_EXPR: if (integer_zerop (arg1)) return non_lvalue (fold_convert (type, arg0)); if (integer_all_onesp (arg1)) return fold (build1 (BIT_NOT_EXPR, type, arg0)); if (operand_equal_p (arg0, arg1, 0)) return omit_one_operand (type, integer_zero_node, arg0); /* ~X ^ X is -1. */ if (TREE_CODE (arg0) == BIT_NOT_EXPR && operand_equal_p (TREE_OPERAND (arg0, 0), arg1, 0)) { t1 = build_int_2 (-1, -1); TREE_TYPE (t1) = type; force_fit_type (t1, 0); return omit_one_operand (type, t1, arg1); } /* X ^ ~X is -1. */ if (TREE_CODE (arg1) == BIT_NOT_EXPR && operand_equal_p (arg0, TREE_OPERAND (arg1, 0), 0)) { t1 = build_int_2 (-1, -1); TREE_TYPE (t1) = type; force_fit_type (t1, 0); return omit_one_operand (type, t1, arg0); } /* If we are XORing two BIT_AND_EXPR's, both of which are and'ing with a constant, and the two constants have no bits in common, we should treat this as a BIT_IOR_EXPR since this may produce more simplifications. */ if (TREE_CODE (arg0) == BIT_AND_EXPR && TREE_CODE (arg1) == BIT_AND_EXPR && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST && integer_zerop (const_binop (BIT_AND_EXPR, TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0))) { code = BIT_IOR_EXPR; goto bit_ior; } /* See if this can be simplified into a rotate first. If that is unsuccessful continue in the association code. */ goto bit_rotate; case BIT_AND_EXPR: if (integer_all_onesp (arg1)) return non_lvalue (fold_convert (type, arg0)); if (integer_zerop (arg1)) return omit_one_operand (type, arg1, arg0); if (operand_equal_p (arg0, arg1, 0)) return non_lvalue (fold_convert (type, arg0)); /* ~X & X is always zero. */ if (TREE_CODE (arg0) == BIT_NOT_EXPR && operand_equal_p (TREE_OPERAND (arg0, 0), arg1, 0)) return omit_one_operand (type, integer_zero_node, arg1); /* X & ~X is always zero. */ if (TREE_CODE (arg1) == BIT_NOT_EXPR && operand_equal_p (arg0, TREE_OPERAND (arg1, 0), 0)) return omit_one_operand (type, integer_zero_node, arg0); t1 = distribute_bit_expr (code, type, arg0, arg1); if (t1 != NULL_TREE) return t1; /* Simplify ((int)c & 0377) into (int)c, if c is unsigned char. */ if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == NOP_EXPR && TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0)))) { unsigned int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0))); if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT && (~TREE_INT_CST_LOW (arg1) & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0) return fold_convert (type, TREE_OPERAND (arg0, 0)); } /* Convert (and (not arg0) (not arg1)) to (not (or (arg0) (arg1))). This results in more efficient code for machines without a NOR instruction. Combine will canonicalize to the first form which will allow use of NOR instructions provided by the backend if they exist. */ if (TREE_CODE (arg0) == BIT_NOT_EXPR && TREE_CODE (arg1) == BIT_NOT_EXPR) { return fold (build1 (BIT_NOT_EXPR, type, build2 (BIT_IOR_EXPR, type, TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0)))); } goto associate; case RDIV_EXPR: /* Don't touch a floating-point divide by zero unless the mode of the constant can represent infinity. */ if (TREE_CODE (arg1) == REAL_CST && !MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (arg1))) && real_zerop (arg1)) return t; /* (-A) / (-B) -> A / B */ if (TREE_CODE (arg0) == NEGATE_EXPR && negate_expr_p (arg1)) return fold (build2 (RDIV_EXPR, type, TREE_OPERAND (arg0, 0), negate_expr (arg1))); if (TREE_CODE (arg1) == NEGATE_EXPR && negate_expr_p (arg0)) return fold (build2 (RDIV_EXPR, type, negate_expr (arg0), TREE_OPERAND (arg1, 0))); /* In IEEE floating point, x/1 is not equivalent to x for snans. */ if (!HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg0))) && real_onep (arg1)) return non_lvalue (fold_convert (type, arg0)); /* In IEEE floating point, x/-1 is not equivalent to -x for snans. */ if (!HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg0))) && real_minus_onep (arg1)) return non_lvalue (fold_convert (type, negate_expr (arg0))); /* If ARG1 is a constant, we can convert this to a multiply by the reciprocal. This does not have the same rounding properties, so only do this if -funsafe-math-optimizations. We can actually always safely do it if ARG1 is a power of two, but it's hard to tell if it is or not in a portable manner. */ if (TREE_CODE (arg1) == REAL_CST) { if (flag_unsafe_math_optimizations && 0 != (tem = const_binop (code, build_real (type, dconst1), arg1, 0))) return fold (build2 (MULT_EXPR, type, arg0, tem)); /* Find the reciprocal if optimizing and the result is exact. */ if (optimize) { REAL_VALUE_TYPE r; r = TREE_REAL_CST (arg1); if (exact_real_inverse (TYPE_MODE(TREE_TYPE(arg0)), &r)) { tem = build_real (type, r); return fold (build2 (MULT_EXPR, type, arg0, tem)); } } } /* Convert A/B/C to A/(B*C). */ if (flag_unsafe_math_optimizations && TREE_CODE (arg0) == RDIV_EXPR) return fold (build2 (RDIV_EXPR, type, TREE_OPERAND (arg0, 0), fold (build2 (MULT_EXPR, type, TREE_OPERAND (arg0, 1), arg1)))); /* Convert A/(B/C) to (A/B)*C. */ if (flag_unsafe_math_optimizations && TREE_CODE (arg1) == RDIV_EXPR) return fold (build2 (MULT_EXPR, type, fold (build2 (RDIV_EXPR, type, arg0, TREE_OPERAND (arg1, 0))), TREE_OPERAND (arg1, 1))); /* Convert C1/(X*C2) into (C1/C2)/X. */ if (flag_unsafe_math_optimizations && TREE_CODE (arg1) == MULT_EXPR && TREE_CODE (arg0) == REAL_CST && TREE_CODE (TREE_OPERAND (arg1, 1)) == REAL_CST) { tree tem = const_binop (RDIV_EXPR, arg0, TREE_OPERAND (arg1, 1), 0); if (tem) return fold (build2 (RDIV_EXPR, type, tem, TREE_OPERAND (arg1, 0))); } if (flag_unsafe_math_optimizations) { enum built_in_function fcode = builtin_mathfn_code (arg1); /* Optimize x/expN(y) into x*expN(-y). */ if (BUILTIN_EXPONENT_P (fcode)) { tree expfn = TREE_OPERAND (TREE_OPERAND (arg1, 0), 0); tree arg = negate_expr (TREE_VALUE (TREE_OPERAND (arg1, 1))); tree arglist = build_tree_list (NULL_TREE, fold_convert (type, arg)); arg1 = build_function_call_expr (expfn, arglist); return fold (build2 (MULT_EXPR, type, arg0, arg1)); } /* Optimize x/pow(y,z) into x*pow(y,-z). */ if (fcode == BUILT_IN_POW || fcode == BUILT_IN_POWF || fcode == BUILT_IN_POWL) { tree powfn = TREE_OPERAND (TREE_OPERAND (arg1, 0), 0); tree arg10 = TREE_VALUE (TREE_OPERAND (arg1, 1)); tree arg11 = TREE_VALUE (TREE_CHAIN (TREE_OPERAND (arg1, 1))); tree neg11 = fold_convert (type, negate_expr (arg11)); tree arglist = tree_cons(NULL_TREE, arg10, build_tree_list (NULL_TREE, neg11)); arg1 = build_function_call_expr (powfn, arglist); return fold (build2 (MULT_EXPR, type, arg0, arg1)); } } if (flag_unsafe_math_optimizations) { enum built_in_function fcode0 = builtin_mathfn_code (arg0); enum built_in_function fcode1 = builtin_mathfn_code (arg1); /* Optimize sin(x)/cos(x) as tan(x). */ if (((fcode0 == BUILT_IN_SIN && fcode1 == BUILT_IN_COS) || (fcode0 == BUILT_IN_SINF && fcode1 == BUILT_IN_COSF) || (fcode0 == BUILT_IN_SINL && fcode1 == BUILT_IN_COSL)) && operand_equal_p (TREE_VALUE (TREE_OPERAND (arg0, 1)), TREE_VALUE (TREE_OPERAND (arg1, 1)), 0)) { tree tanfn = mathfn_built_in (type, BUILT_IN_TAN); if (tanfn != NULL_TREE) return build_function_call_expr (tanfn, TREE_OPERAND (arg0, 1)); } /* Optimize cos(x)/sin(x) as 1.0/tan(x). */ if (((fcode0 == BUILT_IN_COS && fcode1 == BUILT_IN_SIN) || (fcode0 == BUILT_IN_COSF && fcode1 == BUILT_IN_SINF) || (fcode0 == BUILT_IN_COSL && fcode1 == BUILT_IN_SINL)) && operand_equal_p (TREE_VALUE (TREE_OPERAND (arg0, 1)), TREE_VALUE (TREE_OPERAND (arg1, 1)), 0)) { tree tanfn = mathfn_built_in (type, BUILT_IN_TAN); if (tanfn != NULL_TREE) { tree tmp = TREE_OPERAND (arg0, 1); tmp = build_function_call_expr (tanfn, tmp); return fold (build2 (RDIV_EXPR, type, build_real (type, dconst1), tmp)); } } /* Optimize pow(x,c)/x as pow(x,c-1). */ if (fcode0 == BUILT_IN_POW || fcode0 == BUILT_IN_POWF || fcode0 == BUILT_IN_POWL) { tree arg00 = TREE_VALUE (TREE_OPERAND (arg0, 1)); tree arg01 = TREE_VALUE (TREE_CHAIN (TREE_OPERAND (arg0, 1))); if (TREE_CODE (arg01) == REAL_CST && ! TREE_CONSTANT_OVERFLOW (arg01) && operand_equal_p (arg1, arg00, 0)) { tree powfn = TREE_OPERAND (TREE_OPERAND (arg0, 0), 0); REAL_VALUE_TYPE c; tree arg, arglist; c = TREE_REAL_CST (arg01); real_arithmetic (&c, MINUS_EXPR, &c, &dconst1); arg = build_real (type, c); arglist = build_tree_list (NULL_TREE, arg); arglist = tree_cons (NULL_TREE, arg1, arglist); return build_function_call_expr (powfn, arglist); } } } goto binary; case TRUNC_DIV_EXPR: case ROUND_DIV_EXPR: case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR: case EXACT_DIV_EXPR: if (integer_onep (arg1)) return non_lvalue (fold_convert (type, arg0)); if (integer_zerop (arg1)) return t; /* X / -1 is -X. */ if (!TYPE_UNSIGNED (type) && TREE_CODE (arg1) == INTEGER_CST && TREE_INT_CST_LOW (arg1) == (unsigned HOST_WIDE_INT) -1 && TREE_INT_CST_HIGH (arg1) == -1) return fold_convert (type, negate_expr (arg0)); /* If arg0 is a multiple of arg1, then rewrite to the fastest div operation, EXACT_DIV_EXPR. Note that only CEIL_DIV_EXPR and FLOOR_DIV_EXPR are rewritten now. At one time others generated faster code, it's not clear if they do after the last round to changes to the DIV code in expmed.c. */ if ((code == CEIL_DIV_EXPR || code == FLOOR_DIV_EXPR) && multiple_of_p (type, arg0, arg1)) return fold (build2 (EXACT_DIV_EXPR, type, arg0, arg1)); if (TREE_CODE (arg1) == INTEGER_CST && 0 != (tem = extract_muldiv (TREE_OPERAND (t, 0), arg1, code, NULL_TREE))) return fold_convert (type, tem); goto binary; case CEIL_MOD_EXPR: case FLOOR_MOD_EXPR: case ROUND_MOD_EXPR: case TRUNC_MOD_EXPR: if (integer_onep (arg1)) return omit_one_operand (type, integer_zero_node, arg0); if (integer_zerop (arg1)) return t; /* X % -1 is zero. */ if (!TYPE_UNSIGNED (type) && TREE_CODE (arg1) == INTEGER_CST && TREE_INT_CST_LOW (arg1) == (unsigned HOST_WIDE_INT) -1 && TREE_INT_CST_HIGH (arg1) == -1) return omit_one_operand (type, integer_zero_node, arg0); /* Optimize unsigned TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR, i.e. "X % C" into "X & C2". */ if (code == TRUNC_MOD_EXPR && TYPE_UNSIGNED (type) && integer_pow2p (arg1)) { unsigned HOST_WIDE_INT high, low; tree mask; int l; l = tree_log2 (arg1); if (l >= HOST_BITS_PER_WIDE_INT) { high = ((unsigned HOST_WIDE_INT) 1 << (l - HOST_BITS_PER_WIDE_INT)) - 1; low = -1; } else { high = 0; low = ((unsigned HOST_WIDE_INT) 1 << l) - 1; } mask = build_int_2 (low, high); TREE_TYPE (mask) = type; return fold (build2 (BIT_AND_EXPR, type, fold_convert (type, arg0), mask)); } /* X % -C is the same as X % C (for all rounding moduli). */ if (!TYPE_UNSIGNED (type) && TREE_CODE (arg1) == INTEGER_CST && TREE_INT_CST_HIGH (arg1) < 0 && !flag_trapv /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */ && !sign_bit_p (arg1, arg1)) return fold (build2 (code, type, fold_convert (type, arg0), fold_convert (type, negate_expr (arg1)))); /* X % -Y is the same as X % Y (for all rounding moduli). */ if (!TYPE_UNSIGNED (type) && TREE_CODE (arg1) == NEGATE_EXPR && !flag_trapv) return fold (build2 (code, type, fold_convert (type, arg0), fold_convert (type, TREE_OPERAND (arg1, 0)))); if (TREE_CODE (arg1) == INTEGER_CST && 0 != (tem = extract_muldiv (TREE_OPERAND (t, 0), arg1, code, NULL_TREE))) return fold_convert (type, tem); goto binary; case LROTATE_EXPR: case RROTATE_EXPR: if (integer_all_onesp (arg0)) return omit_one_operand (type, arg0, arg1); goto shift; case RSHIFT_EXPR: /* Optimize -1 >> x for arithmetic right shifts. */ if (integer_all_onesp (arg0) && !TYPE_UNSIGNED (type)) return omit_one_operand (type, arg0, arg1); /* ... fall through ... */ case LSHIFT_EXPR: shift: if (integer_zerop (arg1)) return non_lvalue (fold_convert (type, arg0)); if (integer_zerop (arg0)) return omit_one_operand (type, arg0, arg1); /* Since negative shift count is not well-defined, don't try to compute it in the compiler. */ if (TREE_CODE (arg1) == INTEGER_CST && tree_int_cst_sgn (arg1) < 0) return t; /* Rewrite an LROTATE_EXPR by a constant into an RROTATE_EXPR by a new constant. */ if (code == LROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST) { tree tem = build_int_2 (GET_MODE_BITSIZE (TYPE_MODE (type)), 0); tem = fold_convert (TREE_TYPE (arg1), tem); tem = const_binop (MINUS_EXPR, tem, arg1, 0); return fold (build2 (RROTATE_EXPR, type, arg0, tem)); } /* If we have a rotate of a bit operation with the rotate count and the second operand of the bit operation both constant, permute the two operations. */ if (code == RROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST && (TREE_CODE (arg0) == BIT_AND_EXPR || TREE_CODE (arg0) == BIT_IOR_EXPR || TREE_CODE (arg0) == BIT_XOR_EXPR) && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST) return fold (build2 (TREE_CODE (arg0), type, fold (build2 (code, type, TREE_OPERAND (arg0, 0), arg1)), fold (build2 (code, type, TREE_OPERAND (arg0, 1), arg1)))); /* Two consecutive rotates adding up to the width of the mode can be ignored. */ if (code == RROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == RROTATE_EXPR && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST && TREE_INT_CST_HIGH (arg1) == 0 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0 && ((TREE_INT_CST_LOW (arg1) + TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1))) == (unsigned int) GET_MODE_BITSIZE (TYPE_MODE (type)))) return TREE_OPERAND (arg0, 0); goto binary; case MIN_EXPR: if (operand_equal_p (arg0, arg1, 0)) return omit_one_operand (type, arg0, arg1); if (INTEGRAL_TYPE_P (type) && operand_equal_p (arg1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST)) return omit_one_operand (type, arg1, arg0); goto associate; case MAX_EXPR: if (operand_equal_p (arg0, arg1, 0)) return omit_one_operand (type, arg0, arg1); if (INTEGRAL_TYPE_P (type) && TYPE_MAX_VALUE (type) && operand_equal_p (arg1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST)) return omit_one_operand (type, arg1, arg0); goto associate; case TRUTH_NOT_EXPR: /* The argument to invert_truthvalue must have Boolean type. */ if (TREE_CODE (TREE_TYPE (arg0)) != BOOLEAN_TYPE) arg0 = fold_convert (boolean_type_node, arg0); /* Note that the operand of this must be an int and its values must be 0 or 1. ("true" is a fixed value perhaps depending on the language, but we don't handle values other than 1 correctly yet.) */ tem = invert_truthvalue (arg0); /* Avoid infinite recursion. */ if (TREE_CODE (tem) == TRUTH_NOT_EXPR) { tem = fold_single_bit_test (code, arg0, arg1, type); if (tem) return tem; return t; } return fold_convert (type, tem); case TRUTH_ANDIF_EXPR: /* Note that the operands of this must be ints and their values must be 0 or 1. ("true" is a fixed value perhaps depending on the language.) */ /* If first arg is constant zero, return it. */ if (integer_zerop (arg0)) return fold_convert (type, arg0); case TRUTH_AND_EXPR: /* If either arg is constant true, drop it. */ if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0)) return non_lvalue (fold_convert (type, arg1)); if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1) /* Preserve sequence points. */ && (code != TRUTH_ANDIF_EXPR || ! TREE_SIDE_EFFECTS (arg0))) return non_lvalue (fold_convert (type, arg0)); /* If second arg is constant zero, result is zero, but first arg must be evaluated. */ if (integer_zerop (arg1)) return omit_one_operand (type, arg1, arg0); /* Likewise for first arg, but note that only the TRUTH_AND_EXPR case will be handled here. */ if (integer_zerop (arg0)) return omit_one_operand (type, arg0, arg1); /* !X && X is always false. */ if (TREE_CODE (arg0) == TRUTH_NOT_EXPR && operand_equal_p (TREE_OPERAND (arg0, 0), arg1, 0)) return omit_one_operand (type, integer_zero_node, arg1); /* X && !X is always false. */ if (TREE_CODE (arg1) == TRUTH_NOT_EXPR && operand_equal_p (arg0, TREE_OPERAND (arg1, 0), 0)) return omit_one_operand (type, integer_zero_node, arg0); truth_andor: /* We only do these simplifications if we are optimizing. */ if (!optimize) return t; /* Check for things like (A || B) && (A || C). We can convert this to A || (B && C). Note that either operator can be any of the four truth and/or operations and the transformation will still be valid. Also note that we only care about order for the ANDIF and ORIF operators. If B contains side effects, this might change the truth-value of A. */ if (TREE_CODE (arg0) == TREE_CODE (arg1) && (TREE_CODE (arg0) == TRUTH_ANDIF_EXPR || TREE_CODE (arg0) == TRUTH_ORIF_EXPR || TREE_CODE (arg0) == TRUTH_AND_EXPR || TREE_CODE (arg0) == TRUTH_OR_EXPR) && ! TREE_SIDE_EFFECTS (TREE_OPERAND (arg0, 1))) { tree a00 = TREE_OPERAND (arg0, 0); tree a01 = TREE_OPERAND (arg0, 1); tree a10 = TREE_OPERAND (arg1, 0); tree a11 = TREE_OPERAND (arg1, 1); int commutative = ((TREE_CODE (arg0) == TRUTH_OR_EXPR || TREE_CODE (arg0) == TRUTH_AND_EXPR) && (code == TRUTH_AND_EXPR || code == TRUTH_OR_EXPR)); if (operand_equal_p (a00, a10, 0)) return fold (build2 (TREE_CODE (arg0), type, a00, fold (build2 (code, type, a01, a11)))); else if (commutative && operand_equal_p (a00, a11, 0)) return fold (build2 (TREE_CODE (arg0), type, a00, fold (build2 (code, type, a01, a10)))); else if (commutative && operand_equal_p (a01, a10, 0)) return fold (build2 (TREE_CODE (arg0), type, a01, fold (build2 (code, type, a00, a11)))); /* This case if tricky because we must either have commutative operators or else A10 must not have side-effects. */ else if ((commutative || ! TREE_SIDE_EFFECTS (a10)) && operand_equal_p (a01, a11, 0)) return fold (build2 (TREE_CODE (arg0), type, fold (build2 (code, type, a00, a10)), a01)); } /* See if we can build a range comparison. */ if (0 != (tem = fold_range_test (t))) return tem; /* Check for the possibility of merging component references. If our lhs is another similar operation, try to merge its rhs with our rhs. Then try to merge our lhs and rhs. */ if (TREE_CODE (arg0) == code && 0 != (tem = fold_truthop (code, type, TREE_OPERAND (arg0, 1), arg1))) return fold (build2 (code, type, TREE_OPERAND (arg0, 0), tem)); if ((tem = fold_truthop (code, type, arg0, arg1)) != 0) return tem; return t; case TRUTH_ORIF_EXPR: /* Note that the operands of this must be ints and their values must be 0 or true. ("true" is a fixed value perhaps depending on the language.) */ /* If first arg is constant true, return it. */ if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0)) return fold_convert (type, arg0); case TRUTH_OR_EXPR: /* If either arg is constant zero, drop it. */ if (TREE_CODE (arg0) == INTEGER_CST && integer_zerop (arg0)) return non_lvalue (fold_convert (type, arg1)); if (TREE_CODE (arg1) == INTEGER_CST && integer_zerop (arg1) /* Preserve sequence points. */ && (code != TRUTH_ORIF_EXPR || ! TREE_SIDE_EFFECTS (arg0))) return non_lvalue (fold_convert (type, arg0)); /* If second arg is constant true, result is true, but we must evaluate first arg. */ if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1)) return omit_one_operand (type, arg1, arg0); /* Likewise for first arg, but note this only occurs here for TRUTH_OR_EXPR. */ if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0)) return omit_one_operand (type, arg0, arg1); /* !X || X is always true. */ if (TREE_CODE (arg0) == TRUTH_NOT_EXPR && operand_equal_p (TREE_OPERAND (arg0, 0), arg1, 0)) return omit_one_operand (type, integer_one_node, arg1); /* X || !X is always true. */ if (TREE_CODE (arg1) == TRUTH_NOT_EXPR && operand_equal_p (arg0, TREE_OPERAND (arg1, 0), 0)) return omit_one_operand (type, integer_one_node, arg0); goto truth_andor; case TRUTH_XOR_EXPR: /* If the second arg is constant zero, drop it. */ if (integer_zerop (arg1)) return non_lvalue (fold_convert (type, arg0)); /* If the second arg is constant true, this is a logical inversion. */ if (integer_onep (arg1)) return non_lvalue (fold_convert (type, invert_truthvalue (arg0))); /* Identical arguments cancel to zero. */ if (operand_equal_p (arg0, arg1, 0)) return omit_one_operand (type, integer_zero_node, arg0); /* !X ^ X is always true. */ if (TREE_CODE (arg0) == TRUTH_NOT_EXPR && operand_equal_p (TREE_OPERAND (arg0, 0), arg1, 0)) return omit_one_operand (type, integer_one_node, arg1); /* X ^ !X is always true. */ if (TREE_CODE (arg1) == TRUTH_NOT_EXPR && operand_equal_p (arg0, TREE_OPERAND (arg1, 0), 0)) return omit_one_operand (type, integer_one_node, arg0); return t; case EQ_EXPR: case NE_EXPR: case LT_EXPR: case GT_EXPR: case LE_EXPR: case GE_EXPR: /* If one arg is a real or integer constant, put it last. */ if (tree_swap_operands_p (arg0, arg1, true)) return fold (build2 (swap_tree_comparison (code), type, arg1, arg0)); /* If this is an equality comparison of the address of a non-weak object against zero, then we know the result. */ if ((code == EQ_EXPR || code == NE_EXPR) && TREE_CODE (arg0) == ADDR_EXPR && DECL_P (TREE_OPERAND (arg0, 0)) && ! DECL_WEAK (TREE_OPERAND (arg0, 0)) && integer_zerop (arg1)) return constant_boolean_node (code != EQ_EXPR, type); /* If this is an equality comparison of the address of two non-weak, unaliased symbols neither of which are extern (since we do not have access to attributes for externs), then we know the result. */ if ((code == EQ_EXPR || code == NE_EXPR) && TREE_CODE (arg0) == ADDR_EXPR && DECL_P (TREE_OPERAND (arg0, 0)) && ! DECL_WEAK (TREE_OPERAND (arg0, 0)) && ! lookup_attribute ("alias", DECL_ATTRIBUTES (TREE_OPERAND (arg0, 0))) && ! DECL_EXTERNAL (TREE_OPERAND (arg0, 0)) && TREE_CODE (arg1) == ADDR_EXPR && DECL_P (TREE_OPERAND (arg1, 0)) && ! DECL_WEAK (TREE_OPERAND (arg1, 0)) && ! lookup_attribute ("alias", DECL_ATTRIBUTES (TREE_OPERAND (arg1, 0))) && ! DECL_EXTERNAL (TREE_OPERAND (arg1, 0))) return constant_boolean_node (operand_equal_p (arg0, arg1, 0) ? code == EQ_EXPR : code != EQ_EXPR, type); if (FLOAT_TYPE_P (TREE_TYPE (arg0))) { tree targ0 = strip_float_extensions (arg0); tree targ1 = strip_float_extensions (arg1); tree newtype = TREE_TYPE (targ0); if (TYPE_PRECISION (TREE_TYPE (targ1)) > TYPE_PRECISION (newtype)) newtype = TREE_TYPE (targ1); /* Fold (double)float1 CMP (double)float2 into float1 CMP float2. */ if (TYPE_PRECISION (newtype) < TYPE_PRECISION (TREE_TYPE (arg0))) return fold (build2 (code, type, fold_convert (newtype, targ0), fold_convert (newtype, targ1))); /* (-a) CMP (-b) -> b CMP a */ if (TREE_CODE (arg0) == NEGATE_EXPR && TREE_CODE (arg1) == NEGATE_EXPR) return fold (build2 (code, type, TREE_OPERAND (arg1, 0), TREE_OPERAND (arg0, 0))); if (TREE_CODE (arg1) == REAL_CST) { REAL_VALUE_TYPE cst; cst = TREE_REAL_CST (arg1); /* (-a) CMP CST -> a swap(CMP) (-CST) */ if (TREE_CODE (arg0) == NEGATE_EXPR) return fold (build2 (swap_tree_comparison (code), type, TREE_OPERAND (arg0, 0), build_real (TREE_TYPE (arg1), REAL_VALUE_NEGATE (cst)))); /* IEEE doesn't distinguish +0 and -0 in comparisons. */ /* a CMP (-0) -> a CMP 0 */ if (REAL_VALUE_MINUS_ZERO (cst)) return fold (build2 (code, type, arg0, build_real (TREE_TYPE (arg1), dconst0))); /* x != NaN is always true, other ops are always false. */ if (REAL_VALUE_ISNAN (cst) && ! HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg1)))) { tem = (code == NE_EXPR) ? integer_one_node : integer_zero_node; return omit_one_operand (type, tem, arg0); } /* Fold comparisons against infinity. */ if (REAL_VALUE_ISINF (cst)) { tem = fold_inf_compare (code, type, arg0, arg1); if (tem != NULL_TREE) return tem; } } /* If this is a comparison of a real constant with a PLUS_EXPR or a MINUS_EXPR of a real constant, we can convert it into a comparison with a revised real constant as long as no overflow occurs when unsafe_math_optimizations are enabled. */ if (flag_unsafe_math_optimizations && TREE_CODE (arg1) == REAL_CST && (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR) && TREE_CODE (TREE_OPERAND (arg0, 1)) == REAL_CST && 0 != (tem = const_binop (TREE_CODE (arg0) == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR, arg1, TREE_OPERAND (arg0, 1), 0)) && ! TREE_CONSTANT_OVERFLOW (tem)) return fold (build2 (code, type, TREE_OPERAND (arg0, 0), tem)); /* Likewise, we can simplify a comparison of a real constant with a MINUS_EXPR whose first operand is also a real constant, i.e. (c1 - x) < c2 becomes x > c1-c2. */ if (flag_unsafe_math_optimizations && TREE_CODE (arg1) == REAL_CST && TREE_CODE (arg0) == MINUS_EXPR && TREE_CODE (TREE_OPERAND (arg0, 0)) == REAL_CST && 0 != (tem = const_binop (MINUS_EXPR, TREE_OPERAND (arg0, 0), arg1, 0)) && ! TREE_CONSTANT_OVERFLOW (tem)) return fold (build2 (swap_tree_comparison (code), type, TREE_OPERAND (arg0, 1), tem)); /* Fold comparisons against built-in math functions. */ if (TREE_CODE (arg1) == REAL_CST && flag_unsafe_math_optimizations && ! flag_errno_math) { enum built_in_function fcode = builtin_mathfn_code (arg0); if (fcode != END_BUILTINS) { tem = fold_mathfn_compare (fcode, code, type, arg0, arg1); if (tem != NULL_TREE) return tem; } } } /* Convert foo++ == CONST into ++foo == CONST + INCR. */ if (TREE_CONSTANT (arg1) && (TREE_CODE (arg0) == POSTINCREMENT_EXPR || TREE_CODE (arg0) == POSTDECREMENT_EXPR) /* This optimization is invalid for ordered comparisons if CONST+INCR overflows or if foo+incr might overflow. This optimization is invalid for floating point due to rounding. For pointer types we assume overflow doesn't happen. */ && (POINTER_TYPE_P (TREE_TYPE (arg0)) || (INTEGRAL_TYPE_P (TREE_TYPE (arg0)) && (code == EQ_EXPR || code == NE_EXPR)))) { tree varop, newconst; if (TREE_CODE (arg0) == POSTINCREMENT_EXPR) { newconst = fold (build2 (PLUS_EXPR, TREE_TYPE (arg0), arg1, TREE_OPERAND (arg0, 1))); varop = build2 (PREINCREMENT_EXPR, TREE_TYPE (arg0), TREE_OPERAND (arg0, 0), TREE_OPERAND (arg0, 1)); } else { newconst = fold (build2 (MINUS_EXPR, TREE_TYPE (arg0), arg1, TREE_OPERAND (arg0, 1))); varop = build2 (PREDECREMENT_EXPR, TREE_TYPE (arg0), TREE_OPERAND (arg0, 0), TREE_OPERAND (arg0, 1)); } /* If VAROP is a reference to a bitfield, we must mask the constant by the width of the field. */ if (TREE_CODE (TREE_OPERAND (varop, 0)) == COMPONENT_REF && DECL_BIT_FIELD (TREE_OPERAND (TREE_OPERAND (varop, 0), 1)) && host_integerp (DECL_SIZE (TREE_OPERAND (TREE_OPERAND (varop, 0), 1)), 1)) { tree fielddecl = TREE_OPERAND (TREE_OPERAND (varop, 0), 1); HOST_WIDE_INT size = tree_low_cst (DECL_SIZE (fielddecl), 1); tree folded_compare, shift; /* First check whether the comparison would come out always the same. If we don't do that we would change the meaning with the masking. */ folded_compare = fold (build2 (code, type, TREE_OPERAND (varop, 0), arg1)); if (integer_zerop (folded_compare) || integer_onep (folded_compare)) return omit_one_operand (type, folded_compare, varop); shift = build_int_2 (TYPE_PRECISION (TREE_TYPE (varop)) - size, 0); shift = fold_convert (TREE_TYPE (varop), shift); newconst = fold (build2 (LSHIFT_EXPR, TREE_TYPE (varop), newconst, shift)); newconst = fold (build2 (RSHIFT_EXPR, TREE_TYPE (varop), newconst, shift)); } return fold (build2 (code, type, varop, newconst)); } /* Change X >= C to X > (C - 1) and X < C to X <= (C - 1) if C > 0. This transformation affects the cases which are handled in later optimizations involving comparisons with non-negative constants. */ if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) != INTEGER_CST && tree_int_cst_sgn (arg1) > 0) { switch (code) { case GE_EXPR: arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0); return fold (build2 (GT_EXPR, type, arg0, arg1)); case LT_EXPR: arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0); return fold (build2 (LE_EXPR, type, arg0, arg1)); default: break; } } /* Comparisons with the highest or lowest possible integer of the specified size will have known values. This is quite similar to fold_relational_hi_lo; however, my attempts to share the code have been nothing but trouble. I give up for now. */ { int width = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (arg1))); if (TREE_CODE (arg1) == INTEGER_CST && ! TREE_CONSTANT_OVERFLOW (arg1) && width <= HOST_BITS_PER_WIDE_INT && (INTEGRAL_TYPE_P (TREE_TYPE (arg1)) || POINTER_TYPE_P (TREE_TYPE (arg1)))) { unsigned HOST_WIDE_INT signed_max; unsigned HOST_WIDE_INT max, min; signed_max = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1; if (TYPE_UNSIGNED (TREE_TYPE (arg1))) { max = ((unsigned HOST_WIDE_INT) 2 << (width - 1)) - 1; min = 0; } else { max = signed_max; min = ((unsigned HOST_WIDE_INT) -1 << (width - 1)); } if (TREE_INT_CST_HIGH (arg1) == 0 && TREE_INT_CST_LOW (arg1) == max) switch (code) { case GT_EXPR: return omit_one_operand (type, integer_zero_node, arg0); case GE_EXPR: return fold (build2 (EQ_EXPR, type, arg0, arg1)); case LE_EXPR: return omit_one_operand (type, integer_one_node, arg0); case LT_EXPR: return fold (build2 (NE_EXPR, type, arg0, arg1)); /* The GE_EXPR and LT_EXPR cases above are not normally reached because of previous transformations. */ default: break; } else if (TREE_INT_CST_HIGH (arg1) == 0 && TREE_INT_CST_LOW (arg1) == max - 1) switch (code) { case GT_EXPR: arg1 = const_binop (PLUS_EXPR, arg1, integer_one_node, 0); return fold (build2 (EQ_EXPR, type, arg0, arg1)); case LE_EXPR: arg1 = const_binop (PLUS_EXPR, arg1, integer_one_node, 0); return fold (build2 (NE_EXPR, type, arg0, arg1)); default: break; } else if (TREE_INT_CST_HIGH (arg1) == (min ? -1 : 0) && TREE_INT_CST_LOW (arg1) == min) switch (code) { case LT_EXPR: return omit_one_operand (type, integer_zero_node, arg0); case LE_EXPR: return fold (build2 (EQ_EXPR, type, arg0, arg1)); case GE_EXPR: return omit_one_operand (type, integer_one_node, arg0); case GT_EXPR: return fold (build2 (NE_EXPR, type, arg0, arg1)); default: break; } else if (TREE_INT_CST_HIGH (arg1) == (min ? -1 : 0) && TREE_INT_CST_LOW (arg1) == min + 1) switch (code) { case GE_EXPR: arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0); return fold (build2 (NE_EXPR, type, arg0, arg1)); case LT_EXPR: arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0); return fold (build2 (EQ_EXPR, type, arg0, arg1)); default: break; } else if (!in_gimple_form && TREE_INT_CST_HIGH (arg1) == 0 && TREE_INT_CST_LOW (arg1) == signed_max && TYPE_UNSIGNED (TREE_TYPE (arg1)) /* signed_type does not work on pointer types. */ && INTEGRAL_TYPE_P (TREE_TYPE (arg1))) { /* The following case also applies to X < signed_max+1 and X >= signed_max+1 because previous transformations. */ if (code == LE_EXPR || code == GT_EXPR) { tree st0, st1; st0 = lang_hooks.types.signed_type (TREE_TYPE (arg0)); st1 = lang_hooks.types.signed_type (TREE_TYPE (arg1)); return fold (build2 (code == LE_EXPR ? GE_EXPR: LT_EXPR, type, fold_convert (st0, arg0), fold_convert (st1, integer_zero_node))); } } } } /* If this is an EQ or NE comparison of a constant with a PLUS_EXPR or a MINUS_EXPR of a constant, we can convert it into a comparison with a revised constant as long as no overflow occurs. */ if ((code == EQ_EXPR || code == NE_EXPR) && TREE_CODE (arg1) == INTEGER_CST && (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR) && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST && 0 != (tem = const_binop (TREE_CODE (arg0) == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR, arg1, TREE_OPERAND (arg0, 1), 0)) && ! TREE_CONSTANT_OVERFLOW (tem)) return fold (build2 (code, type, TREE_OPERAND (arg0, 0), tem)); /* Similarly for a NEGATE_EXPR. */ else if ((code == EQ_EXPR || code == NE_EXPR) && TREE_CODE (arg0) == NEGATE_EXPR && TREE_CODE (arg1) == INTEGER_CST && 0 != (tem = negate_expr (arg1)) && TREE_CODE (tem) == INTEGER_CST && ! TREE_CONSTANT_OVERFLOW (tem)) return fold (build2 (code, type, TREE_OPERAND (arg0, 0), tem)); /* If we have X - Y == 0, we can convert that to X == Y and similarly for !=. Don't do this for ordered comparisons due to overflow. */ else if ((code == NE_EXPR || code == EQ_EXPR) && integer_zerop (arg1) && TREE_CODE (arg0) == MINUS_EXPR) return fold (build2 (code, type, TREE_OPERAND (arg0, 0), TREE_OPERAND (arg0, 1))); /* If we are widening one operand of an integer comparison, see if the other operand is similarly being widened. Perhaps we can do the comparison in the narrower type. */ else if (TREE_CODE (TREE_TYPE (arg0)) == INTEGER_TYPE && TREE_CODE (arg0) == NOP_EXPR && (tem = get_unwidened (arg0, NULL_TREE)) != arg0 && (code == EQ_EXPR || code == NE_EXPR || TYPE_UNSIGNED (TREE_TYPE (arg0)) == TYPE_UNSIGNED (TREE_TYPE (tem))) && (t1 = get_unwidened (arg1, TREE_TYPE (tem))) != 0 && (TREE_TYPE (t1) == TREE_TYPE (tem) || (TREE_CODE (t1) == INTEGER_CST && int_fits_type_p (t1, TREE_TYPE (tem))))) return fold (build2 (code, type, tem, fold_convert (TREE_TYPE (tem), t1))); /* If this is comparing a constant with a MIN_EXPR or a MAX_EXPR of a constant, we can simplify it. */ else if (TREE_CODE (arg1) == INTEGER_CST && (TREE_CODE (arg0) == MIN_EXPR || TREE_CODE (arg0) == MAX_EXPR) && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST) return optimize_minmax_comparison (t); /* If we are comparing an ABS_EXPR with a constant, we can convert all the cases into explicit comparisons, but they may well not be faster than doing the ABS and one comparison. But ABS (X) <= C is a range comparison, which becomes a subtraction and a comparison, and is probably faster. */ else if (code == LE_EXPR && TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == ABS_EXPR && ! TREE_SIDE_EFFECTS (arg0) && (0 != (tem = negate_expr (arg1))) && TREE_CODE (tem) == INTEGER_CST && ! TREE_CONSTANT_OVERFLOW (tem)) return fold (build2 (TRUTH_ANDIF_EXPR, type, build2 (GE_EXPR, type, TREE_OPERAND (arg0, 0), tem), build2 (LE_EXPR, type, TREE_OPERAND (arg0, 0), arg1))); /* If this is an EQ or NE comparison with zero and ARG0 is (1 << foo) & bar, convert it to (bar >> foo) & 1. Both require two operations, but the latter can be done in one less insn on machines that have only two-operand insns or on which a constant cannot be the first operand. */ if (integer_zerop (arg1) && (code == EQ_EXPR || code == NE_EXPR) && TREE_CODE (arg0) == BIT_AND_EXPR) { tree arg00 = TREE_OPERAND (arg0, 0); tree arg01 = TREE_OPERAND (arg0, 1); if (TREE_CODE (arg00) == LSHIFT_EXPR && integer_onep (TREE_OPERAND (arg00, 0))) return fold (build2 (code, type, build2 (BIT_AND_EXPR, TREE_TYPE (arg0), build2 (RSHIFT_EXPR, TREE_TYPE (arg00), arg01, TREE_OPERAND (arg00, 1)), fold_convert (TREE_TYPE (arg0), integer_one_node)), arg1)); else if (TREE_CODE (TREE_OPERAND (arg0, 1)) == LSHIFT_EXPR && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 1), 0))) return fold (build2 (code, type, build2 (BIT_AND_EXPR, TREE_TYPE (arg0), build2 (RSHIFT_EXPR, TREE_TYPE (arg01), arg00, TREE_OPERAND (arg01, 1)), fold_convert (TREE_TYPE (arg0), integer_one_node)), arg1)); } /* If this is an NE or EQ comparison of zero against the result of a signed MOD operation whose second operand is a power of 2, make the MOD operation unsigned since it is simpler and equivalent. */ if ((code == NE_EXPR || code == EQ_EXPR) && integer_zerop (arg1) && !TYPE_UNSIGNED (TREE_TYPE (arg0)) && (TREE_CODE (arg0) == TRUNC_MOD_EXPR || TREE_CODE (arg0) == CEIL_MOD_EXPR || TREE_CODE (arg0) == FLOOR_MOD_EXPR || TREE_CODE (arg0) == ROUND_MOD_EXPR) && integer_pow2p (TREE_OPERAND (arg0, 1))) { tree newtype = lang_hooks.types.unsigned_type (TREE_TYPE (arg0)); tree newmod = fold (build2 (TREE_CODE (arg0), newtype, fold_convert (newtype, TREE_OPERAND (arg0, 0)), fold_convert (newtype, TREE_OPERAND (arg0, 1)))); return fold (build2 (code, type, newmod, fold_convert (newtype, arg1))); } /* If this is an NE comparison of zero with an AND of one, remove the comparison since the AND will give the correct value. */ if (code == NE_EXPR && integer_zerop (arg1) && TREE_CODE (arg0) == BIT_AND_EXPR && integer_onep (TREE_OPERAND (arg0, 1))) return fold_convert (type, arg0); /* If we have (A & C) == C where C is a power of 2, convert this into (A & C) != 0. Similarly for NE_EXPR. */ if ((code == EQ_EXPR || code == NE_EXPR) && TREE_CODE (arg0) == BIT_AND_EXPR && integer_pow2p (TREE_OPERAND (arg0, 1)) && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0)) return fold (build2 (code == EQ_EXPR ? NE_EXPR : EQ_EXPR, type, arg0, integer_zero_node)); /* If we have (A & C) != 0 or (A & C) == 0 and C is a power of 2, then fold the expression into shifts and logical operations. */ tem = fold_single_bit_test (code, arg0, arg1, type); if (tem) return tem; /* If we have (A & C) == D where D & ~C != 0, convert this into 0. Similarly for NE_EXPR. */ if ((code == EQ_EXPR || code == NE_EXPR) && TREE_CODE (arg0) == BIT_AND_EXPR && TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST) { tree dandnotc = fold (build2 (BIT_AND_EXPR, TREE_TYPE (arg0), arg1, build1 (BIT_NOT_EXPR, TREE_TYPE (TREE_OPERAND (arg0, 1)), TREE_OPERAND (arg0, 1)))); tree rslt = code == EQ_EXPR ? integer_zero_node : integer_one_node; if (integer_nonzerop (dandnotc)) return omit_one_operand (type, rslt, arg0); } /* If we have (A | C) == D where C & ~D != 0, convert this into 0. Similarly for NE_EXPR. */ if ((code == EQ_EXPR || code == NE_EXPR) && TREE_CODE (arg0) == BIT_IOR_EXPR && TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST) { tree candnotd = fold (build2 (BIT_AND_EXPR, TREE_TYPE (arg0), TREE_OPERAND (arg0, 1), build1 (BIT_NOT_EXPR, TREE_TYPE (arg1), arg1))); tree rslt = code == EQ_EXPR ? integer_zero_node : integer_one_node; if (integer_nonzerop (candnotd)) return omit_one_operand (type, rslt, arg0); } /* If X is unsigned, convert X < (1 << Y) into X >> Y == 0 and similarly for >= into !=. */ if ((code == LT_EXPR || code == GE_EXPR) && TYPE_UNSIGNED (TREE_TYPE (arg0)) && TREE_CODE (arg1) == LSHIFT_EXPR && integer_onep (TREE_OPERAND (arg1, 0))) return build2 (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type, build2 (RSHIFT_EXPR, TREE_TYPE (arg0), arg0, TREE_OPERAND (arg1, 1)), fold_convert (TREE_TYPE (arg0), integer_zero_node)); else if ((code == LT_EXPR || code == GE_EXPR) && TYPE_UNSIGNED (TREE_TYPE (arg0)) && (TREE_CODE (arg1) == NOP_EXPR || TREE_CODE (arg1) == CONVERT_EXPR) && TREE_CODE (TREE_OPERAND (arg1, 0)) == LSHIFT_EXPR && integer_onep (TREE_OPERAND (TREE_OPERAND (arg1, 0), 0))) return build2 (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type, fold_convert (TREE_TYPE (arg0), build2 (RSHIFT_EXPR, TREE_TYPE (arg0), arg0, TREE_OPERAND (TREE_OPERAND (arg1, 0), 1))), fold_convert (TREE_TYPE (arg0), integer_zero_node)); /* Simplify comparison of something with itself. (For IEEE floating-point, we can only do some of these simplifications.) */ if (operand_equal_p (arg0, arg1, 0)) { switch (code) { case EQ_EXPR: if (! FLOAT_TYPE_P (TREE_TYPE (arg0)) || ! HONOR_NANS (TYPE_MODE (TREE_TYPE (arg0)))) return constant_boolean_node (1, type); break; case GE_EXPR: case LE_EXPR: if (! FLOAT_TYPE_P (TREE_TYPE (arg0)) || ! HONOR_NANS (TYPE_MODE (TREE_TYPE (arg0)))) return constant_boolean_node (1, type); return fold (build2 (EQ_EXPR, type, arg0, arg1)); case NE_EXPR: /* For NE, we can only do this simplification if integer or we don't honor IEEE floating point NaNs. */ if (FLOAT_TYPE_P (TREE_TYPE (arg0)) && HONOR_NANS (TYPE_MODE (TREE_TYPE (arg0)))) break; /* ... fall through ... */ case GT_EXPR: case LT_EXPR: return constant_boolean_node (0, type); default: abort (); } } /* If we are comparing an expression that just has comparisons of two integer values, arithmetic expressions of those comparisons, and constants, we can simplify it. There are only three cases to check: the two values can either be equal, the first can be greater, or the second can be greater. Fold the expression for those three values. Since each value must be 0 or 1, we have eight possibilities, each of which corresponds to the constant 0 or 1 or one of the six possible comparisons. This handles common cases like (a > b) == 0 but also handles expressions like ((x > y) - (y > x)) > 0, which supposedly occur in macroized code. */ if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) != INTEGER_CST) { tree cval1 = 0, cval2 = 0; int save_p = 0; if (twoval_comparison_p (arg0, &cval1, &cval2, &save_p) /* Don't handle degenerate cases here; they should already have been handled anyway. */ && cval1 != 0 && cval2 != 0 && ! (TREE_CONSTANT (cval1) && TREE_CONSTANT (cval2)) && TREE_TYPE (cval1) == TREE_TYPE (cval2) && INTEGRAL_TYPE_P (TREE_TYPE (cval1)) && TYPE_MAX_VALUE (TREE_TYPE (cval1)) && TYPE_MAX_VALUE (TREE_TYPE (cval2)) && ! operand_equal_p (TYPE_MIN_VALUE (TREE_TYPE (cval1)), TYPE_MAX_VALUE (TREE_TYPE (cval2)), 0)) { tree maxval = TYPE_MAX_VALUE (TREE_TYPE (cval1)); tree minval = TYPE_MIN_VALUE (TREE_TYPE (cval1)); /* We can't just pass T to eval_subst in case cval1 or cval2 was the same as ARG1. */ tree high_result = fold (build2 (code, type, eval_subst (arg0, cval1, maxval, cval2, minval), arg1)); tree equal_result = fold (build2 (code, type, eval_subst (arg0, cval1, maxval, cval2, maxval), arg1)); tree low_result = fold (build2 (code, type, eval_subst (arg0, cval1, minval, cval2, maxval), arg1)); /* All three of these results should be 0 or 1. Confirm they are. Then use those values to select the proper code to use. */ if ((integer_zerop (high_result) || integer_onep (high_result)) && (integer_zerop (equal_result) || integer_onep (equal_result)) && (integer_zerop (low_result) || integer_onep (low_result))) { /* Make a 3-bit mask with the high-order bit being the value for `>', the next for '=', and the low for '<'. */ switch ((integer_onep (high_result) * 4) + (integer_onep (equal_result) * 2) + integer_onep (low_result)) { case 0: /* Always false. */ return omit_one_operand (type, integer_zero_node, arg0); case 1: code = LT_EXPR; break; case 2: code = EQ_EXPR; break; case 3: code = LE_EXPR; break; case 4: code = GT_EXPR; break; case 5: code = NE_EXPR; break; case 6: code = GE_EXPR; break; case 7: /* Always true. */ return omit_one_operand (type, integer_one_node, arg0); } tem = build2 (code, type, cval1, cval2); if (save_p) return save_expr (tem); else return fold (tem); } } } /* If this is a comparison of a field, we may be able to simplify it. */ if (((TREE_CODE (arg0) == COMPONENT_REF && lang_hooks.can_use_bit_fields_p ()) || TREE_CODE (arg0) == BIT_FIELD_REF) && (code == EQ_EXPR || code == NE_EXPR) /* Handle the constant case even without -O to make sure the warnings are given. */ && (optimize || TREE_CODE (arg1) == INTEGER_CST)) { t1 = optimize_bit_field_compare (code, type, arg0, arg1); if (t1) return t1; } /* If this is a comparison of complex values and either or both sides are a COMPLEX_EXPR or COMPLEX_CST, it is best to split up the comparisons and join them with a TRUTH_ANDIF_EXPR or TRUTH_ORIF_EXPR. This may prevent needless evaluations. */ if ((code == EQ_EXPR || code == NE_EXPR) && TREE_CODE (TREE_TYPE (arg0)) == COMPLEX_TYPE && (TREE_CODE (arg0) == COMPLEX_EXPR || TREE_CODE (arg1) == COMPLEX_EXPR || TREE_CODE (arg0) == COMPLEX_CST || TREE_CODE (arg1) == COMPLEX_CST)) { tree subtype = TREE_TYPE (TREE_TYPE (arg0)); tree real0, imag0, real1, imag1; arg0 = save_expr (arg0); arg1 = save_expr (arg1); real0 = fold (build1 (REALPART_EXPR, subtype, arg0)); imag0 = fold (build1 (IMAGPART_EXPR, subtype, arg0)); real1 = fold (build1 (REALPART_EXPR, subtype, arg1)); imag1 = fold (build1 (IMAGPART_EXPR, subtype, arg1)); return fold (build2 ((code == EQ_EXPR ? TRUTH_ANDIF_EXPR : TRUTH_ORIF_EXPR), type, fold (build2 (code, type, real0, real1)), fold (build2 (code, type, imag0, imag1)))); } /* Optimize comparisons of strlen vs zero to a compare of the first character of the string vs zero. To wit, strlen(ptr) == 0 => *ptr == 0 strlen(ptr) != 0 => *ptr != 0 Other cases should reduce to one of these two (or a constant) due to the return value of strlen being unsigned. */ if ((code == EQ_EXPR || code == NE_EXPR) && integer_zerop (arg1) && TREE_CODE (arg0) == CALL_EXPR) { tree fndecl = get_callee_fndecl (arg0); tree arglist; if (fndecl && DECL_BUILT_IN (fndecl) && DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_MD && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_STRLEN && (arglist = TREE_OPERAND (arg0, 1)) && TREE_CODE (TREE_TYPE (TREE_VALUE (arglist))) == POINTER_TYPE && ! TREE_CHAIN (arglist)) return fold (build2 (code, type, build1 (INDIRECT_REF, char_type_node, TREE_VALUE(arglist)), integer_zero_node)); } /* We can fold X/C1 op C2 where C1 and C2 are integer constants into a single range test. */ if (TREE_CODE (arg0) == TRUNC_DIV_EXPR && TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST && !integer_zerop (TREE_OPERAND (arg0, 1)) && !TREE_OVERFLOW (TREE_OPERAND (arg0, 1)) && !TREE_OVERFLOW (arg1)) { t1 = fold_div_compare (code, type, arg0, arg1); if (t1 != NULL_TREE) return t1; } /* Both ARG0 and ARG1 are known to be constants at this point. */ t1 = fold_relational_const (code, type, arg0, arg1); return (t1 == NULL_TREE ? t : t1); case UNORDERED_EXPR: case ORDERED_EXPR: case UNLT_EXPR: case UNLE_EXPR: case UNGT_EXPR: case UNGE_EXPR: case UNEQ_EXPR: case LTGT_EXPR: if (TREE_CODE (arg0) == REAL_CST && TREE_CODE (arg1) == REAL_CST) { t1 = fold_relational_const (code, type, arg0, arg1); if (t1 != NULL_TREE) return t1; } /* If the first operand is NaN, the result is constant. */ if (TREE_CODE (arg0) == REAL_CST && REAL_VALUE_ISNAN (TREE_REAL_CST (arg0)) && (code != LTGT_EXPR || ! flag_trapping_math)) { t1 = (code == ORDERED_EXPR || code == LTGT_EXPR) ? integer_zero_node : integer_one_node; return omit_one_operand (type, t1, arg1); } /* If the second operand is NaN, the result is constant. */ if (TREE_CODE (arg1) == REAL_CST && REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)) && (code != LTGT_EXPR || ! flag_trapping_math)) { t1 = (code == ORDERED_EXPR || code == LTGT_EXPR) ? integer_zero_node : integer_one_node; return omit_one_operand (type, t1, arg0); } /* Fold (double)float1 CMP (double)float2 into float1 CMP float2. */ { tree targ0 = strip_float_extensions (arg0); tree targ1 = strip_float_extensions (arg1); tree newtype = TREE_TYPE (targ0); if (TYPE_PRECISION (TREE_TYPE (targ1)) > TYPE_PRECISION (newtype)) newtype = TREE_TYPE (targ1); if (TYPE_PRECISION (newtype) < TYPE_PRECISION (TREE_TYPE (arg0))) return fold (build2 (code, type, fold_convert (newtype, targ0), fold_convert (newtype, targ1))); } return t; case COND_EXPR: /* Pedantic ANSI C says that a conditional expression is never an lvalue, so all simple results must be passed through pedantic_non_lvalue. */ if (TREE_CODE (arg0) == INTEGER_CST) { tem = TREE_OPERAND (t, (integer_zerop (arg0) ? 2 : 1)); /* Only optimize constant conditions when the selected branch has the same type as the COND_EXPR. This avoids optimizing away "c ? x : throw", where the throw has a void type. */ if (! VOID_TYPE_P (TREE_TYPE (tem)) || VOID_TYPE_P (type)) return pedantic_non_lvalue (tem); return t; } if (operand_equal_p (arg1, TREE_OPERAND (t, 2), 0)) return pedantic_omit_one_operand (type, arg1, arg0); /* If we have A op B ? A : C, we may be able to convert this to a simpler expression, depending on the operation and the values of B and C. Signed zeros prevent all of these transformations, for reasons given above each one. Also try swapping the arguments and inverting the conditional. */ if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<' && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0), arg1, TREE_OPERAND (arg0, 1)) && !HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg1)))) { tem = fold_cond_expr_with_comparison (type, arg0, TREE_OPERAND (t, 1), TREE_OPERAND (t, 2)); if (tem) return tem; } if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<' && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (t, 2), TREE_OPERAND (arg0, 1)) && !HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (TREE_OPERAND (t, 2))))) { tem = invert_truthvalue (arg0); if (TREE_CODE_CLASS (TREE_CODE (tem)) == '<') { tem = fold_cond_expr_with_comparison (type, tem, TREE_OPERAND (t, 2), TREE_OPERAND (t, 1)); if (tem) return tem; } } /* If the second operand is simpler than the third, swap them since that produces better jump optimization results. */ if (tree_swap_operands_p (TREE_OPERAND (t, 1), TREE_OPERAND (t, 2), false)) { /* See if this can be inverted. If it can't, possibly because it was a floating-point inequality comparison, don't do anything. */ tem = invert_truthvalue (arg0); if (TREE_CODE (tem) != TRUTH_NOT_EXPR) return fold (build3 (code, type, tem, TREE_OPERAND (t, 2), TREE_OPERAND (t, 1))); } /* Convert A ? 1 : 0 to simply A. */ if (integer_onep (TREE_OPERAND (t, 1)) && integer_zerop (TREE_OPERAND (t, 2)) /* If we try to convert TREE_OPERAND (t, 0) to our type, the call to fold will try to move the conversion inside a COND, which will recurse. In that case, the COND_EXPR is probably the best choice, so leave it alone. */ && type == TREE_TYPE (arg0)) return pedantic_non_lvalue (arg0); /* Convert A ? 0 : 1 to !A. This prefers the use of NOT_EXPR over COND_EXPR in cases such as floating point comparisons. */ if (integer_zerop (TREE_OPERAND (t, 1)) && integer_onep (TREE_OPERAND (t, 2)) && truth_value_p (TREE_CODE (arg0))) return pedantic_non_lvalue (fold_convert (type, invert_truthvalue (arg0))); /* A < 0 ? : 0 is simply (A & ). */ if (TREE_CODE (arg0) == LT_EXPR && integer_zerop (TREE_OPERAND (arg0, 1)) && integer_zerop (TREE_OPERAND (t, 2)) && (tem = sign_bit_p (TREE_OPERAND (arg0, 0), arg1))) return fold_convert (type, fold (build2 (BIT_AND_EXPR, TREE_TYPE (tem), tem, arg1))); /* (A >> N) & 1 ? (1 << N) : 0 is simply A & (1 << N). A & 1 was already handled above. */ if (TREE_CODE (arg0) == BIT_AND_EXPR && integer_onep (TREE_OPERAND (arg0, 1)) && integer_zerop (TREE_OPERAND (t, 2)) && integer_pow2p (arg1)) { tree tem = TREE_OPERAND (arg0, 0); STRIP_NOPS (tem); if (TREE_CODE (tem) == RSHIFT_EXPR && (unsigned HOST_WIDE_INT) tree_log2 (arg1) == TREE_INT_CST_LOW (TREE_OPERAND (tem, 1))) return fold (build2 (BIT_AND_EXPR, type, TREE_OPERAND (tem, 0), arg1)); } /* A & N ? N : 0 is simply A & N if N is a power of two. This is probably obsolete because the first operand should be a truth value (that's why we have the two cases above), but let's leave it in until we can confirm this for all front-ends. */ if (integer_zerop (TREE_OPERAND (t, 2)) && TREE_CODE (arg0) == NE_EXPR && integer_zerop (TREE_OPERAND (arg0, 1)) && integer_pow2p (arg1) && TREE_CODE (TREE_OPERAND (arg0, 0)) == BIT_AND_EXPR && operand_equal_p (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1), arg1, OEP_ONLY_CONST)) return pedantic_non_lvalue (fold_convert (type, TREE_OPERAND (arg0, 0))); /* Convert A ? B : 0 into A && B if A and B are truth values. */ if (integer_zerop (TREE_OPERAND (t, 2)) && truth_value_p (TREE_CODE (arg0)) && truth_value_p (TREE_CODE (arg1))) return fold (build2 (TRUTH_ANDIF_EXPR, type, arg0, arg1)); /* Convert A ? B : 1 into !A || B if A and B are truth values. */ if (integer_onep (TREE_OPERAND (t, 2)) && truth_value_p (TREE_CODE (arg0)) && truth_value_p (TREE_CODE (arg1))) { /* Only perform transformation if ARG0 is easily inverted. */ tem = invert_truthvalue (arg0); if (TREE_CODE (tem) != TRUTH_NOT_EXPR) return fold (build2 (TRUTH_ORIF_EXPR, type, tem, arg1)); } /* Convert A ? 0 : B into !A && B if A and B are truth values. */ if (integer_zerop (arg1) && truth_value_p (TREE_CODE (arg0)) && truth_value_p (TREE_CODE (TREE_OPERAND (t, 2)))) { /* Only perform transformation if ARG0 is easily inverted. */ tem = invert_truthvalue (arg0); if (TREE_CODE (tem) != TRUTH_NOT_EXPR) return fold (build2 (TRUTH_ANDIF_EXPR, type, tem, TREE_OPERAND (t, 2))); } /* Convert A ? 1 : B into A || B if A and B are truth values. */ if (integer_onep (arg1) && truth_value_p (TREE_CODE (arg0)) && truth_value_p (TREE_CODE (TREE_OPERAND (t, 2)))) return fold (build2 (TRUTH_ORIF_EXPR, type, arg0, TREE_OPERAND (t, 2))); return t; case COMPOUND_EXPR: /* When pedantic, a compound expression can be neither an lvalue nor an integer constant expression. */ if (TREE_SIDE_EFFECTS (arg0) || TREE_CONSTANT (arg1)) return t; /* Don't let (0, 0) be null pointer constant. */ tem = integer_zerop (arg1) ? build1 (NOP_EXPR, type, arg1) : fold_convert (type, arg1); return pedantic_non_lvalue (tem); case COMPLEX_EXPR: if (wins) return build_complex (type, arg0, arg1); return t; case REALPART_EXPR: if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE) return t; else if (TREE_CODE (arg0) == COMPLEX_EXPR) return omit_one_operand (type, TREE_OPERAND (arg0, 0), TREE_OPERAND (arg0, 1)); else if (TREE_CODE (arg0) == COMPLEX_CST) return TREE_REALPART (arg0); else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR) return fold (build2 (TREE_CODE (arg0), type, fold (build1 (REALPART_EXPR, type, TREE_OPERAND (arg0, 0))), fold (build1 (REALPART_EXPR, type, TREE_OPERAND (arg0, 1))))); return t; case IMAGPART_EXPR: if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE) return fold_convert (type, integer_zero_node); else if (TREE_CODE (arg0) == COMPLEX_EXPR) return omit_one_operand (type, TREE_OPERAND (arg0, 1), TREE_OPERAND (arg0, 0)); else if (TREE_CODE (arg0) == COMPLEX_CST) return TREE_IMAGPART (arg0); else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR) return fold (build2 (TREE_CODE (arg0), type, fold (build1 (IMAGPART_EXPR, type, TREE_OPERAND (arg0, 0))), fold (build1 (IMAGPART_EXPR, type, TREE_OPERAND (arg0, 1))))); return t; /* Pull arithmetic ops out of the CLEANUP_POINT_EXPR where appropriate. */ case CLEANUP_POINT_EXPR: if (! has_cleanups (arg0)) return TREE_OPERAND (t, 0); { enum tree_code code0 = TREE_CODE (arg0); int kind0 = TREE_CODE_CLASS (code0); tree arg00 = TREE_OPERAND (arg0, 0); tree arg01; if (kind0 == '1' || code0 == TRUTH_NOT_EXPR) return fold (build1 (code0, type, fold (build1 (CLEANUP_POINT_EXPR, TREE_TYPE (arg00), arg00)))); if (kind0 == '<' || kind0 == '2' || code0 == TRUTH_ANDIF_EXPR || code0 == TRUTH_ORIF_EXPR || code0 == TRUTH_AND_EXPR || code0 == TRUTH_OR_EXPR || code0 == TRUTH_XOR_EXPR) { arg01 = TREE_OPERAND (arg0, 1); if (TREE_CONSTANT (arg00) || ((code0 == TRUTH_ANDIF_EXPR || code0 == TRUTH_ORIF_EXPR) && ! has_cleanups (arg00))) return fold (build2 (code0, type, arg00, fold (build1 (CLEANUP_POINT_EXPR, TREE_TYPE (arg01), arg01)))); if (TREE_CONSTANT (arg01)) return fold (build2 (code0, type, fold (build1 (CLEANUP_POINT_EXPR, TREE_TYPE (arg00), arg00)), arg01)); } return t; } case CALL_EXPR: /* Check for a built-in function. */ if (TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR && (TREE_CODE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == FUNCTION_DECL) && DECL_BUILT_IN (TREE_OPERAND (TREE_OPERAND (t, 0), 0))) { tree tmp = fold_builtin (t); if (tmp) return tmp; } return t; default: return t; } /* switch (code) */ } #ifdef ENABLE_FOLD_CHECKING #undef fold static void fold_checksum_tree (tree, struct md5_ctx *, htab_t); static void fold_check_failed (tree, tree); void print_fold_checksum (tree); /* When --enable-checking=fold, compute a digest of expr before and after actual fold call to see if fold did not accidentally change original expr. */ tree fold (tree expr) { tree ret; struct md5_ctx ctx; unsigned char checksum_before[16], checksum_after[16]; htab_t ht; ht = htab_create (32, htab_hash_pointer, htab_eq_pointer, NULL); md5_init_ctx (&ctx); fold_checksum_tree (expr, &ctx, ht); md5_finish_ctx (&ctx, checksum_before); htab_empty (ht); ret = fold_1 (expr); md5_init_ctx (&ctx); fold_checksum_tree (expr, &ctx, ht); md5_finish_ctx (&ctx, checksum_after); htab_delete (ht); if (memcmp (checksum_before, checksum_after, 16)) fold_check_failed (expr, ret); return ret; } void print_fold_checksum (tree expr) { struct md5_ctx ctx; unsigned char checksum[16], cnt; htab_t ht; ht = htab_create (32, htab_hash_pointer, htab_eq_pointer, NULL); md5_init_ctx (&ctx); fold_checksum_tree (expr, &ctx, ht); md5_finish_ctx (&ctx, checksum); htab_delete (ht); for (cnt = 0; cnt < 16; ++cnt) fprintf (stderr, "%02x", checksum[cnt]); putc ('\n', stderr); } static void fold_check_failed (tree expr ATTRIBUTE_UNUSED, tree ret ATTRIBUTE_UNUSED) { internal_error ("fold check: original tree changed by fold"); } static void fold_checksum_tree (tree expr, struct md5_ctx *ctx, htab_t ht) { void **slot; enum tree_code code; char buf[sizeof (struct tree_decl)]; int i, len; if (sizeof (struct tree_exp) + 5 * sizeof (tree) > sizeof (struct tree_decl) || sizeof (struct tree_type) > sizeof (struct tree_decl)) abort (); if (expr == NULL) return; slot = htab_find_slot (ht, expr, INSERT); if (*slot != NULL) return; *slot = expr; code = TREE_CODE (expr); if (TREE_CODE_CLASS (code) == 'd' && DECL_ASSEMBLER_NAME_SET_P (expr)) { /* Allow DECL_ASSEMBLER_NAME to be modified. */ memcpy (buf, expr, tree_size (expr)); expr = (tree) buf; SET_DECL_ASSEMBLER_NAME (expr, NULL); } else if (TREE_CODE_CLASS (code) == 't' && (TYPE_POINTER_TO (expr) || TYPE_REFERENCE_TO (expr))) { /* Allow TYPE_POINTER_TO and TYPE_REFERENCE_TO to be modified. */ memcpy (buf, expr, tree_size (expr)); expr = (tree) buf; TYPE_POINTER_TO (expr) = NULL; TYPE_REFERENCE_TO (expr) = NULL; } md5_process_bytes (expr, tree_size (expr), ctx); fold_checksum_tree (TREE_TYPE (expr), ctx, ht); if (TREE_CODE_CLASS (code) != 't' && TREE_CODE_CLASS (code) != 'd') fold_checksum_tree (TREE_CHAIN (expr), ctx, ht); len = TREE_CODE_LENGTH (code); switch (TREE_CODE_CLASS (code)) { case 'c': switch (code) { case STRING_CST: md5_process_bytes (TREE_STRING_POINTER (expr), TREE_STRING_LENGTH (expr), ctx); break; case COMPLEX_CST: fold_checksum_tree (TREE_REALPART (expr), ctx, ht); fold_checksum_tree (TREE_IMAGPART (expr), ctx, ht); break; case VECTOR_CST: fold_checksum_tree (TREE_VECTOR_CST_ELTS (expr), ctx, ht); break; default: break; } break; case 'x': switch (code) { case TREE_LIST: fold_checksum_tree (TREE_PURPOSE (expr), ctx, ht); fold_checksum_tree (TREE_VALUE (expr), ctx, ht); break; case TREE_VEC: for (i = 0; i < TREE_VEC_LENGTH (expr); ++i) fold_checksum_tree (TREE_VEC_ELT (expr, i), ctx, ht); break; default: break; } break; case 'e': switch (code) { case GOTO_SUBROUTINE_EXPR: len = 0; break; case WITH_CLEANUP_EXPR: len = 2; break; default: break; } /* Fall through. */ case 'r': case '<': case '1': case '2': case 's': for (i = 0; i < len; ++i) fold_checksum_tree (TREE_OPERAND (expr, i), ctx, ht); break; case 'd': fold_checksum_tree (DECL_SIZE (expr), ctx, ht); fold_checksum_tree (DECL_SIZE_UNIT (expr), ctx, ht); fold_checksum_tree (DECL_NAME (expr), ctx, ht); fold_checksum_tree (DECL_CONTEXT (expr), ctx, ht); fold_checksum_tree (DECL_ARGUMENTS (expr), ctx, ht); fold_checksum_tree (DECL_RESULT_FLD (expr), ctx, ht); fold_checksum_tree (DECL_INITIAL (expr), ctx, ht); fold_checksum_tree (DECL_ABSTRACT_ORIGIN (expr), ctx, ht); fold_checksum_tree (DECL_SECTION_NAME (expr), ctx, ht); fold_checksum_tree (DECL_ATTRIBUTES (expr), ctx, ht); fold_checksum_tree (DECL_VINDEX (expr), ctx, ht); break; case 't': if (TREE_CODE (expr) == ENUMERAL_TYPE) fold_checksum_tree (TYPE_VALUES (expr), ctx, ht); fold_checksum_tree (TYPE_SIZE (expr), ctx, ht); fold_checksum_tree (TYPE_SIZE_UNIT (expr), ctx, ht); fold_checksum_tree (TYPE_ATTRIBUTES (expr), ctx, ht); fold_checksum_tree (TYPE_NAME (expr), ctx, ht); if (INTEGRAL_TYPE_P (expr) || SCALAR_FLOAT_TYPE_P (expr)) { fold_checksum_tree (TYPE_MIN_VALUE (expr), ctx, ht); fold_checksum_tree (TYPE_MAX_VALUE (expr), ctx, ht); } fold_checksum_tree (TYPE_MAIN_VARIANT (expr), ctx, ht); fold_checksum_tree (TYPE_BINFO (expr), ctx, ht); fold_checksum_tree (TYPE_CONTEXT (expr), ctx, ht); break; default: break; } } #endif /* Perform constant folding and related simplification of initializer expression EXPR. This behaves identically to "fold" but ignores potential run-time traps and exceptions that fold must preserve. */ tree fold_initializer (tree expr) { int saved_signaling_nans = flag_signaling_nans; int saved_trapping_math = flag_trapping_math; int saved_trapv = flag_trapv; tree result; flag_signaling_nans = 0; flag_trapping_math = 0; flag_trapv = 0; result = fold (expr); flag_signaling_nans = saved_signaling_nans; flag_trapping_math = saved_trapping_math; flag_trapv = saved_trapv; return result; } /* Determine if first argument is a multiple of second argument. Return 0 if it is not, or we cannot easily determined it to be. An example of the sort of thing we care about (at this point; this routine could surely be made more general, and expanded to do what the *_DIV_EXPR's fold cases do now) is discovering that SAVE_EXPR (I) * SAVE_EXPR (J * 8) is a multiple of SAVE_EXPR (J * 8) when we know that the two SAVE_EXPR (J * 8) nodes are the same node. This code also handles discovering that SAVE_EXPR (I) * SAVE_EXPR (J * 8) is a multiple of 8 so we don't have to worry about dealing with a possible remainder. Note that we *look* inside a SAVE_EXPR only to determine how it was calculated; it is not safe for fold to do much of anything else with the internals of a SAVE_EXPR, since it cannot know when it will be evaluated at run time. For example, the latter example above *cannot* be implemented as SAVE_EXPR (I) * J or any variant thereof, since the value of J at evaluation time of the original SAVE_EXPR is not necessarily the same at the time the new expression is evaluated. The only optimization of this sort that would be valid is changing SAVE_EXPR (I) * SAVE_EXPR (SAVE_EXPR (J) * 8) divided by 8 to SAVE_EXPR (I) * SAVE_EXPR (J) (where the same SAVE_EXPR (J) is used in the original and the transformed version). */ static int multiple_of_p (tree type, tree top, tree bottom) { if (operand_equal_p (top, bottom, 0)) return 1; if (TREE_CODE (type) != INTEGER_TYPE) return 0; switch (TREE_CODE (top)) { case MULT_EXPR: return (multiple_of_p (type, TREE_OPERAND (top, 0), bottom) || multiple_of_p (type, TREE_OPERAND (top, 1), bottom)); case PLUS_EXPR: case MINUS_EXPR: return (multiple_of_p (type, TREE_OPERAND (top, 0), bottom) && multiple_of_p (type, TREE_OPERAND (top, 1), bottom)); case LSHIFT_EXPR: if (TREE_CODE (TREE_OPERAND (top, 1)) == INTEGER_CST) { tree op1, t1; op1 = TREE_OPERAND (top, 1); /* const_binop may not detect overflow correctly, so check for it explicitly here. */ if (TYPE_PRECISION (TREE_TYPE (size_one_node)) > TREE_INT_CST_LOW (op1) && TREE_INT_CST_HIGH (op1) == 0 && 0 != (t1 = fold_convert (type, const_binop (LSHIFT_EXPR, size_one_node, op1, 0))) && ! TREE_OVERFLOW (t1)) return multiple_of_p (type, t1, bottom); } return 0; case NOP_EXPR: /* Can't handle conversions from non-integral or wider integral type. */ if ((TREE_CODE (TREE_TYPE (TREE_OPERAND (top, 0))) != INTEGER_TYPE) || (TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (top, 0))))) return 0; /* .. fall through ... */ case SAVE_EXPR: return multiple_of_p (type, TREE_OPERAND (top, 0), bottom); case INTEGER_CST: if (TREE_CODE (bottom) != INTEGER_CST || (TYPE_UNSIGNED (type) && (tree_int_cst_sgn (top) < 0 || tree_int_cst_sgn (bottom) < 0))) return 0; return integer_zerop (const_binop (TRUNC_MOD_EXPR, top, bottom, 0)); default: return 0; } } /* Return true if `t' is known to be non-negative. */ int tree_expr_nonnegative_p (tree t) { switch (TREE_CODE (t)) { case ABS_EXPR: return 1; case INTEGER_CST: return tree_int_cst_sgn (t) >= 0; case REAL_CST: return ! REAL_VALUE_NEGATIVE (TREE_REAL_CST (t)); case PLUS_EXPR: if (FLOAT_TYPE_P (TREE_TYPE (t))) return tree_expr_nonnegative_p (TREE_OPERAND (t, 0)) && tree_expr_nonnegative_p (TREE_OPERAND (t, 1)); /* zero_extend(x) + zero_extend(y) is non-negative if x and y are both unsigned and at least 2 bits shorter than the result. */ if (TREE_CODE (TREE_TYPE (t)) == INTEGER_TYPE && TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR && TREE_CODE (TREE_OPERAND (t, 1)) == NOP_EXPR) { tree inner1 = TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)); tree inner2 = TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)); if (TREE_CODE (inner1) == INTEGER_TYPE && TYPE_UNSIGNED (inner1) && TREE_CODE (inner2) == INTEGER_TYPE && TYPE_UNSIGNED (inner2)) { unsigned int prec = MAX (TYPE_PRECISION (inner1), TYPE_PRECISION (inner2)) + 1; return prec < TYPE_PRECISION (TREE_TYPE (t)); } } break; case MULT_EXPR: if (FLOAT_TYPE_P (TREE_TYPE (t))) { /* x * x for floating point x is always non-negative. */ if (operand_equal_p (TREE_OPERAND (t, 0), TREE_OPERAND (t, 1), 0)) return 1; return tree_expr_nonnegative_p (TREE_OPERAND (t, 0)) && tree_expr_nonnegative_p (TREE_OPERAND (t, 1)); } /* zero_extend(x) * zero_extend(y) is non-negative if x and y are both unsigned and their total bits is shorter than the result. */ if (TREE_CODE (TREE_TYPE (t)) == INTEGER_TYPE && TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR && TREE_CODE (TREE_OPERAND (t, 1)) == NOP_EXPR) { tree inner1 = TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)); tree inner2 = TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)); if (TREE_CODE (inner1) == INTEGER_TYPE && TYPE_UNSIGNED (inner1) && TREE_CODE (inner2) == INTEGER_TYPE && TYPE_UNSIGNED (inner2)) return TYPE_PRECISION (inner1) + TYPE_PRECISION (inner2) < TYPE_PRECISION (TREE_TYPE (t)); } return 0; case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: return tree_expr_nonnegative_p (TREE_OPERAND (t, 0)) && tree_expr_nonnegative_p (TREE_OPERAND (t, 1)); case TRUNC_MOD_EXPR: case CEIL_MOD_EXPR: case FLOOR_MOD_EXPR: case ROUND_MOD_EXPR: return tree_expr_nonnegative_p (TREE_OPERAND (t, 0)); case RDIV_EXPR: return tree_expr_nonnegative_p (TREE_OPERAND (t, 0)) && tree_expr_nonnegative_p (TREE_OPERAND (t, 1)); case BIT_AND_EXPR: return tree_expr_nonnegative_p (TREE_OPERAND (t, 1)) || tree_expr_nonnegative_p (TREE_OPERAND (t, 0)); case BIT_IOR_EXPR: case BIT_XOR_EXPR: return tree_expr_nonnegative_p (TREE_OPERAND (t, 0)) && tree_expr_nonnegative_p (TREE_OPERAND (t, 1)); case NOP_EXPR: { tree inner_type = TREE_TYPE (TREE_OPERAND (t, 0)); tree outer_type = TREE_TYPE (t); if (TREE_CODE (outer_type) == REAL_TYPE) { if (TREE_CODE (inner_type) == REAL_TYPE) return tree_expr_nonnegative_p (TREE_OPERAND (t, 0)); if (TREE_CODE (inner_type) == INTEGER_TYPE) { if (TYPE_UNSIGNED (inner_type)) return 1; return tree_expr_nonnegative_p (TREE_OPERAND (t, 0)); } } else if (TREE_CODE (outer_type) == INTEGER_TYPE) { if (TREE_CODE (inner_type) == REAL_TYPE) return tree_expr_nonnegative_p (TREE_OPERAND (t,0)); if (TREE_CODE (inner_type) == INTEGER_TYPE) return TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type) && TYPE_UNSIGNED (inner_type); } } break; case COND_EXPR: return tree_expr_nonnegative_p (TREE_OPERAND (t, 1)) && tree_expr_nonnegative_p (TREE_OPERAND (t, 2)); case COMPOUND_EXPR: return tree_expr_nonnegative_p (TREE_OPERAND (t, 1)); case MIN_EXPR: return tree_expr_nonnegative_p (TREE_OPERAND (t, 0)) && tree_expr_nonnegative_p (TREE_OPERAND (t, 1)); case MAX_EXPR: return tree_expr_nonnegative_p (TREE_OPERAND (t, 0)) || tree_expr_nonnegative_p (TREE_OPERAND (t, 1)); case MODIFY_EXPR: return tree_expr_nonnegative_p (TREE_OPERAND (t, 1)); case BIND_EXPR: return tree_expr_nonnegative_p (expr_last (TREE_OPERAND (t, 1))); case SAVE_EXPR: return tree_expr_nonnegative_p (TREE_OPERAND (t, 0)); case NON_LVALUE_EXPR: return tree_expr_nonnegative_p (TREE_OPERAND (t, 0)); case FLOAT_EXPR: return tree_expr_nonnegative_p (TREE_OPERAND (t, 0)); case TARGET_EXPR: { tree temp = TARGET_EXPR_SLOT (t); t = TARGET_EXPR_INITIAL (t); /* If the initializer is non-void, then it's a normal expression that will be assigned to the slot. */ if (!VOID_TYPE_P (t)) return tree_expr_nonnegative_p (t); /* Otherwise, the initializer sets the slot in some way. One common way is an assignment statement at the end of the initializer. */ while (1) { if (TREE_CODE (t) == BIND_EXPR) t = expr_last (BIND_EXPR_BODY (t)); else if (TREE_CODE (t) == TRY_FINALLY_EXPR || TREE_CODE (t) == TRY_CATCH_EXPR) t = expr_last (TREE_OPERAND (t, 0)); else if (TREE_CODE (t) == STATEMENT_LIST) t = expr_last (t); else break; } if (TREE_CODE (t) == MODIFY_EXPR && TREE_OPERAND (t, 0) == temp) return tree_expr_nonnegative_p (TREE_OPERAND (t, 1)); return 0; } case CALL_EXPR: { tree fndecl = get_callee_fndecl (t); tree arglist = TREE_OPERAND (t, 1); if (fndecl && DECL_BUILT_IN (fndecl) && DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_MD) switch (DECL_FUNCTION_CODE (fndecl)) { #define CASE_BUILTIN_F(BUILT_IN_FN) \ case BUILT_IN_FN: case BUILT_IN_FN##F: case BUILT_IN_FN##L: #define CASE_BUILTIN_I(BUILT_IN_FN) \ case BUILT_IN_FN: case BUILT_IN_FN##L: case BUILT_IN_FN##LL: CASE_BUILTIN_F (BUILT_IN_ACOS) CASE_BUILTIN_F (BUILT_IN_ACOSH) CASE_BUILTIN_F (BUILT_IN_CABS) CASE_BUILTIN_F (BUILT_IN_COSH) CASE_BUILTIN_F (BUILT_IN_ERFC) CASE_BUILTIN_F (BUILT_IN_EXP) CASE_BUILTIN_F (BUILT_IN_EXP10) CASE_BUILTIN_F (BUILT_IN_EXP2) CASE_BUILTIN_F (BUILT_IN_FABS) CASE_BUILTIN_F (BUILT_IN_FDIM) CASE_BUILTIN_F (BUILT_IN_FREXP) CASE_BUILTIN_F (BUILT_IN_HYPOT) CASE_BUILTIN_F (BUILT_IN_POW10) CASE_BUILTIN_I (BUILT_IN_FFS) CASE_BUILTIN_I (BUILT_IN_PARITY) CASE_BUILTIN_I (BUILT_IN_POPCOUNT) /* Always true. */ return 1; CASE_BUILTIN_F (BUILT_IN_SQRT) /* sqrt(-0.0) is -0.0. */ if (!HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (t)))) return 1; return tree_expr_nonnegative_p (TREE_VALUE (arglist)); CASE_BUILTIN_F (BUILT_IN_ASINH) CASE_BUILTIN_F (BUILT_IN_ATAN) CASE_BUILTIN_F (BUILT_IN_ATANH) CASE_BUILTIN_F (BUILT_IN_CBRT) CASE_BUILTIN_F (BUILT_IN_CEIL) CASE_BUILTIN_F (BUILT_IN_ERF) CASE_BUILTIN_F (BUILT_IN_EXPM1) CASE_BUILTIN_F (BUILT_IN_FLOOR) CASE_BUILTIN_F (BUILT_IN_FMOD) CASE_BUILTIN_F (BUILT_IN_LDEXP) CASE_BUILTIN_F (BUILT_IN_LLRINT) CASE_BUILTIN_F (BUILT_IN_LLROUND) CASE_BUILTIN_F (BUILT_IN_LRINT) CASE_BUILTIN_F (BUILT_IN_LROUND) CASE_BUILTIN_F (BUILT_IN_MODF) CASE_BUILTIN_F (BUILT_IN_NEARBYINT) CASE_BUILTIN_F (BUILT_IN_POW) CASE_BUILTIN_F (BUILT_IN_RINT) CASE_BUILTIN_F (BUILT_IN_ROUND) CASE_BUILTIN_F (BUILT_IN_SIGNBIT) CASE_BUILTIN_F (BUILT_IN_SINH) CASE_BUILTIN_F (BUILT_IN_TANH) CASE_BUILTIN_F (BUILT_IN_TRUNC) /* True if the 1st argument is nonnegative. */ return tree_expr_nonnegative_p (TREE_VALUE (arglist)); CASE_BUILTIN_F (BUILT_IN_FMAX) /* True if the 1st OR 2nd arguments are nonnegative. */ return tree_expr_nonnegative_p (TREE_VALUE (arglist)) || tree_expr_nonnegative_p (TREE_VALUE (TREE_CHAIN (arglist))); CASE_BUILTIN_F (BUILT_IN_FMIN) /* True if the 1st AND 2nd arguments are nonnegative. */ return tree_expr_nonnegative_p (TREE_VALUE (arglist)) && tree_expr_nonnegative_p (TREE_VALUE (TREE_CHAIN (arglist))); CASE_BUILTIN_F (BUILT_IN_COPYSIGN) /* True if the 2nd argument is nonnegative. */ return tree_expr_nonnegative_p (TREE_VALUE (TREE_CHAIN (arglist))); default: break; #undef CASE_BUILTIN_F #undef CASE_BUILTIN_I } } /* ... fall through ... */ default: if (truth_value_p (TREE_CODE (t))) /* Truth values evaluate to 0 or 1, which is nonnegative. */ return 1; } /* We don't know sign of `t', so be conservative and return false. */ return 0; } /* Return true when T is an address and is known to be nonzero. For floating point we further ensure that T is not denormal. Similar logic is present in nonzero_address in rtlanal.h */ static bool tree_expr_nonzero_p (tree t) { tree type = TREE_TYPE (t); /* Doing something useful for floating point would need more work. */ if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type)) return false; switch (TREE_CODE (t)) { case ABS_EXPR: if (!TYPE_UNSIGNED (type) && !flag_wrapv) return tree_expr_nonzero_p (TREE_OPERAND (t, 0)); case INTEGER_CST: return !integer_zerop (t); case PLUS_EXPR: if (!TYPE_UNSIGNED (type) && !flag_wrapv) { /* With the presence of negative values it is hard to say something. */ if (!tree_expr_nonnegative_p (TREE_OPERAND (t, 0)) || !tree_expr_nonnegative_p (TREE_OPERAND (t, 1))) return false; /* One of operands must be positive and the other non-negative. */ return (tree_expr_nonzero_p (TREE_OPERAND (t, 0)) || tree_expr_nonzero_p (TREE_OPERAND (t, 1))); } break; case MULT_EXPR: if (!TYPE_UNSIGNED (type) && !flag_wrapv) { return (tree_expr_nonzero_p (TREE_OPERAND (t, 0)) && tree_expr_nonzero_p (TREE_OPERAND (t, 1))); } break; case NOP_EXPR: { tree inner_type = TREE_TYPE (TREE_OPERAND (t, 0)); tree outer_type = TREE_TYPE (t); return (TYPE_PRECISION (inner_type) >= TYPE_PRECISION (outer_type) && tree_expr_nonzero_p (TREE_OPERAND (t, 0))); } break; case ADDR_EXPR: /* Weak declarations may link to NULL. */ if (DECL_P (TREE_OPERAND (t, 0))) return !DECL_WEAK (TREE_OPERAND (t, 0)); /* Constants and all other cases are never weak. */ return true; case COND_EXPR: return (tree_expr_nonzero_p (TREE_OPERAND (t, 1)) && tree_expr_nonzero_p (TREE_OPERAND (t, 2))); case MIN_EXPR: return (tree_expr_nonzero_p (TREE_OPERAND (t, 0)) && tree_expr_nonzero_p (TREE_OPERAND (t, 1))); case MAX_EXPR: if (tree_expr_nonzero_p (TREE_OPERAND (t, 0))) { /* When both operands are nonzero, then MAX must be too. */ if (tree_expr_nonzero_p (TREE_OPERAND (t, 1))) return true; /* MAX where operand 0 is positive is positive. */ return tree_expr_nonnegative_p (TREE_OPERAND (t, 0)); } /* MAX where operand 1 is positive is positive. */ else if (tree_expr_nonzero_p (TREE_OPERAND (t, 1)) && tree_expr_nonnegative_p (TREE_OPERAND (t, 1))) return true; break; case COMPOUND_EXPR: case MODIFY_EXPR: case BIND_EXPR: return tree_expr_nonzero_p (TREE_OPERAND (t, 1)); case SAVE_EXPR: case NON_LVALUE_EXPR: return tree_expr_nonzero_p (TREE_OPERAND (t, 0)); case BIT_IOR_EXPR: return tree_expr_nonzero_p (TREE_OPERAND (t, 1)) || tree_expr_nonzero_p (TREE_OPERAND (t, 0)); default: break; } return false; } /* Return true if `r' is known to be non-negative. Only handles constants at the moment. */ int rtl_expr_nonnegative_p (rtx r) { switch (GET_CODE (r)) { case CONST_INT: return INTVAL (r) >= 0; case CONST_DOUBLE: if (GET_MODE (r) == VOIDmode) return CONST_DOUBLE_HIGH (r) >= 0; return 0; case CONST_VECTOR: { int units, i; rtx elt; units = CONST_VECTOR_NUNITS (r); for (i = 0; i < units; ++i) { elt = CONST_VECTOR_ELT (r, i); if (!rtl_expr_nonnegative_p (elt)) return 0; } return 1; } case SYMBOL_REF: case LABEL_REF: /* These are always nonnegative. */ return 1; default: return 0; } } /* See if we are applying CODE, a relational to the highest or lowest possible integer of TYPE. If so, then the result is a compile time constant. */ static tree fold_relational_hi_lo (enum tree_code *code_p, const tree type, tree *op0_p, tree *op1_p) { tree op0 = *op0_p; tree op1 = *op1_p; enum tree_code code = *code_p; int width = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (op1))); if (TREE_CODE (op1) == INTEGER_CST && ! TREE_CONSTANT_OVERFLOW (op1) && width <= HOST_BITS_PER_WIDE_INT && (INTEGRAL_TYPE_P (TREE_TYPE (op1)) || POINTER_TYPE_P (TREE_TYPE (op1)))) { unsigned HOST_WIDE_INT signed_max; unsigned HOST_WIDE_INT max, min; signed_max = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1; if (TYPE_UNSIGNED (TREE_TYPE (op1))) { max = ((unsigned HOST_WIDE_INT) 2 << (width - 1)) - 1; min = 0; } else { max = signed_max; min = ((unsigned HOST_WIDE_INT) -1 << (width - 1)); } if (TREE_INT_CST_HIGH (op1) == 0 && TREE_INT_CST_LOW (op1) == max) switch (code) { case GT_EXPR: return omit_one_operand (type, integer_zero_node, op0); case GE_EXPR: *code_p = EQ_EXPR; break; case LE_EXPR: return omit_one_operand (type, integer_one_node, op0); case LT_EXPR: *code_p = NE_EXPR; break; /* The GE_EXPR and LT_EXPR cases above are not normally reached because of previous transformations. */ default: break; } else if (TREE_INT_CST_HIGH (op1) == 0 && TREE_INT_CST_LOW (op1) == max - 1) switch (code) { case GT_EXPR: *code_p = EQ_EXPR; *op1_p = const_binop (PLUS_EXPR, op1, integer_one_node, 0); break; case LE_EXPR: *code_p = NE_EXPR; *op1_p = const_binop (PLUS_EXPR, op1, integer_one_node, 0); break; default: break; } else if (TREE_INT_CST_HIGH (op1) == (min ? -1 : 0) && TREE_INT_CST_LOW (op1) == min) switch (code) { case LT_EXPR: return omit_one_operand (type, integer_zero_node, op0); case LE_EXPR: *code_p = EQ_EXPR; break; case GE_EXPR: return omit_one_operand (type, integer_one_node, op0); case GT_EXPR: *code_p = NE_EXPR; break; default: break; } else if (TREE_INT_CST_HIGH (op1) == (min ? -1 : 0) && TREE_INT_CST_LOW (op1) == min + 1) switch (code) { case GE_EXPR: *code_p = NE_EXPR; *op1_p = const_binop (MINUS_EXPR, op1, integer_one_node, 0); break; case LT_EXPR: *code_p = EQ_EXPR; *op1_p = const_binop (MINUS_EXPR, op1, integer_one_node, 0); break; default: break; } else if (TREE_INT_CST_HIGH (op1) == 0 && TREE_INT_CST_LOW (op1) == signed_max && TYPE_UNSIGNED (TREE_TYPE (op1)) /* signed_type does not work on pointer types. */ && INTEGRAL_TYPE_P (TREE_TYPE (op1))) { /* The following case also applies to X < signed_max+1 and X >= signed_max+1 because previous transformations. */ if (code == LE_EXPR || code == GT_EXPR) { tree st0, st1, exp, retval; st0 = lang_hooks.types.signed_type (TREE_TYPE (op0)); st1 = lang_hooks.types.signed_type (TREE_TYPE (op1)); exp = build2 (code == LE_EXPR ? GE_EXPR: LT_EXPR, type, fold_convert (st0, op0), fold_convert (st1, integer_zero_node)); retval = nondestructive_fold_binary_to_constant (TREE_CODE (exp), TREE_TYPE (exp), TREE_OPERAND (exp, 0), TREE_OPERAND (exp, 1)); /* If we are in gimple form, then returning EXP would create non-gimple expressions. Clearing it is safe and insures we do not allow a non-gimple expression to escape. */ if (in_gimple_form) exp = NULL; return (retval ? retval : exp); } } } return NULL_TREE; } /* Given the components of a binary expression CODE, TYPE, OP0 and OP1, attempt to fold the expression to a constant without modifying TYPE, OP0 or OP1. If the expression could be simplified to a constant, then return the constant. If the expression would not be simplified to a constant, then return NULL_TREE. Note this is primarily designed to be called after gimplification of the tree structures and when at least one operand is a constant. As a result of those simplifying assumptions this routine is far simpler than the generic fold routine. */ tree nondestructive_fold_binary_to_constant (enum tree_code code, tree type, tree op0, tree op1) { int wins = 1; tree subop0; tree subop1; tree tem; /* If this is a commutative operation, and ARG0 is a constant, move it to ARG1 to reduce the number of tests below. */ if (commutative_tree_code (code) && (TREE_CODE (op0) == INTEGER_CST || TREE_CODE (op0) == REAL_CST)) { tem = op0; op0 = op1; op1 = tem; } /* If either operand is a complex type, extract its real component. */ if (TREE_CODE (op0) == COMPLEX_CST) subop0 = TREE_REALPART (op0); else subop0 = op0; if (TREE_CODE (op1) == COMPLEX_CST) subop1 = TREE_REALPART (op1); else subop1 = op1; /* Note if either argument is not a real or integer constant. With a few exceptions, simplification is limited to cases where both arguments are constants. */ if ((TREE_CODE (subop0) != INTEGER_CST && TREE_CODE (subop0) != REAL_CST) || (TREE_CODE (subop1) != INTEGER_CST && TREE_CODE (subop1) != REAL_CST)) wins = 0; switch (code) { case PLUS_EXPR: /* (plus (address) (const_int)) is a constant. */ if (TREE_CODE (op0) == PLUS_EXPR && TREE_CODE (op1) == INTEGER_CST && (TREE_CODE (TREE_OPERAND (op0, 0)) == ADDR_EXPR || (TREE_CODE (TREE_OPERAND (op0, 0)) == NOP_EXPR && (TREE_CODE (TREE_OPERAND (TREE_OPERAND (op0, 0), 0)) == ADDR_EXPR))) && TREE_CODE (TREE_OPERAND (op0, 1)) == INTEGER_CST) { return build2 (PLUS_EXPR, type, TREE_OPERAND (op0, 0), const_binop (PLUS_EXPR, op1, TREE_OPERAND (op0, 1), 0)); } case BIT_XOR_EXPR: binary: if (!wins) return NULL_TREE; /* Both arguments are constants. Simplify. */ tem = const_binop (code, op0, op1, 0); if (tem != NULL_TREE) { /* The return value should always have the same type as the original expression. */ if (TREE_TYPE (tem) != type) tem = fold_convert (type, tem); return tem; } return NULL_TREE; case MINUS_EXPR: /* Fold &x - &x. This can happen from &x.foo - &x. This is unsafe for certain floats even in non-IEEE formats. In IEEE, it is unsafe because it does wrong for NaNs. Also note that operand_equal_p is always false if an operand is volatile. */ if (! FLOAT_TYPE_P (type) && operand_equal_p (op0, op1, 0)) return fold_convert (type, integer_zero_node); goto binary; case MULT_EXPR: case BIT_AND_EXPR: /* Special case multiplication or bitwise AND where one argument is zero. */ if (! FLOAT_TYPE_P (type) && integer_zerop (op1)) return omit_one_operand (type, op1, op0); else if (!HONOR_NANS (TYPE_MODE (TREE_TYPE (op0))) && !HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op0))) && real_zerop (op1)) return omit_one_operand (type, op1, op0); goto binary; case BIT_IOR_EXPR: /* Special case when we know the result will be all ones. */ if (integer_all_onesp (op1)) return omit_one_operand (type, op1, op0); goto binary; case TRUNC_DIV_EXPR: case ROUND_DIV_EXPR: case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR: case EXACT_DIV_EXPR: case TRUNC_MOD_EXPR: case ROUND_MOD_EXPR: case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR: case RDIV_EXPR: /* Division by zero is undefined. */ if (integer_zerop (op1)) return NULL_TREE; if (TREE_CODE (op1) == REAL_CST && !MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (op1))) && real_zerop (op1)) return NULL_TREE; goto binary; case MIN_EXPR: if (INTEGRAL_TYPE_P (type) && operand_equal_p (op1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST)) return omit_one_operand (type, op1, op0); goto binary; case MAX_EXPR: if (INTEGRAL_TYPE_P (type) && TYPE_MAX_VALUE (type) && operand_equal_p (op1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST)) return omit_one_operand (type, op1, op0); goto binary; case RSHIFT_EXPR: /* Optimize -1 >> x for arithmetic right shifts. */ if (integer_all_onesp (op0) && ! TYPE_UNSIGNED (type)) return omit_one_operand (type, op0, op1); /* ... fall through ... */ case LSHIFT_EXPR: if (integer_zerop (op0)) return omit_one_operand (type, op0, op1); /* Since negative shift count is not well-defined, don't try to compute it in the compiler. */ if (TREE_CODE (op1) == INTEGER_CST && tree_int_cst_sgn (op1) < 0) return NULL_TREE; goto binary; case LROTATE_EXPR: case RROTATE_EXPR: /* -1 rotated either direction by any amount is still -1. */ if (integer_all_onesp (op0)) return omit_one_operand (type, op0, op1); /* 0 rotated either direction by any amount is still zero. */ if (integer_zerop (op0)) return omit_one_operand (type, op0, op1); goto binary; case COMPLEX_EXPR: if (wins) return build_complex (type, op0, op1); return NULL_TREE; case LT_EXPR: case LE_EXPR: case GT_EXPR: case GE_EXPR: case EQ_EXPR: case NE_EXPR: /* If one arg is a real or integer constant, put it last. */ if ((TREE_CODE (op0) == INTEGER_CST && TREE_CODE (op1) != INTEGER_CST) || (TREE_CODE (op0) == REAL_CST && TREE_CODE (op0) != REAL_CST)) { tree temp; temp = op0; op0 = op1; op1 = temp; code = swap_tree_comparison (code); } /* Change X >= C to X > (C - 1) and X < C to X <= (C - 1) if C > 0. This transformation affects the cases which are handled in later optimizations involving comparisons with non-negative constants. */ if (TREE_CODE (op1) == INTEGER_CST && TREE_CODE (op0) != INTEGER_CST && tree_int_cst_sgn (op1) > 0) { switch (code) { case GE_EXPR: code = GT_EXPR; op1 = const_binop (MINUS_EXPR, op1, integer_one_node, 0); break; case LT_EXPR: code = LE_EXPR; op1 = const_binop (MINUS_EXPR, op1, integer_one_node, 0); break; default: break; } } tem = fold_relational_hi_lo (&code, type, &op0, &op1); if (tem) return tem; /* Fall through. */ case ORDERED_EXPR: case UNORDERED_EXPR: case UNLT_EXPR: case UNLE_EXPR: case UNGT_EXPR: case UNGE_EXPR: case UNEQ_EXPR: case LTGT_EXPR: if (!wins) return NULL_TREE; return fold_relational_const (code, type, op0, op1); case RANGE_EXPR: /* This could probably be handled. */ return NULL_TREE; case TRUTH_AND_EXPR: /* If second arg is constant zero, result is zero, but first arg must be evaluated. */ if (integer_zerop (op1)) return omit_one_operand (type, op1, op0); /* Likewise for first arg, but note that only the TRUTH_AND_EXPR case will be handled here. */ if (integer_zerop (op0)) return omit_one_operand (type, op0, op1); if (TREE_CODE (op0) == INTEGER_CST && TREE_CODE (op1) == INTEGER_CST) return constant_boolean_node (true, type); return NULL_TREE; case TRUTH_OR_EXPR: /* If second arg is constant true, result is true, but we must evaluate first arg. */ if (TREE_CODE (op1) == INTEGER_CST && ! integer_zerop (op1)) return omit_one_operand (type, op1, op0); /* Likewise for first arg, but note this only occurs here for TRUTH_OR_EXPR. */ if (TREE_CODE (op0) == INTEGER_CST && ! integer_zerop (op0)) return omit_one_operand (type, op0, op1); if (TREE_CODE (op0) == INTEGER_CST && TREE_CODE (op1) == INTEGER_CST) return constant_boolean_node (false, type); return NULL_TREE; case TRUTH_XOR_EXPR: if (TREE_CODE (op0) == INTEGER_CST && TREE_CODE (op1) == INTEGER_CST) { int x = ! integer_zerop (op0) ^ ! integer_zerop (op1); return constant_boolean_node (x, type); } return NULL_TREE; default: return NULL_TREE; } } /* Given the components of a unary expression CODE, TYPE and OP0, attempt to fold the expression to a constant without modifying TYPE or OP0. If the expression could be simplified to a constant, then return the constant. If the expression would not be simplified to a constant, then return NULL_TREE. Note this is primarily designed to be called after gimplification of the tree structures and when op0 is a constant. As a result of those simplifying assumptions this routine is far simpler than the generic fold routine. */ tree nondestructive_fold_unary_to_constant (enum tree_code code, tree type, tree op0) { /* Make sure we have a suitable constant argument. */ if (code == NOP_EXPR || code == FLOAT_EXPR || code == CONVERT_EXPR) { tree subop; if (TREE_CODE (op0) == COMPLEX_CST) subop = TREE_REALPART (op0); else subop = op0; if (TREE_CODE (subop) != INTEGER_CST && TREE_CODE (subop) != REAL_CST) return NULL_TREE; } switch (code) { case NOP_EXPR: case FLOAT_EXPR: case CONVERT_EXPR: case FIX_TRUNC_EXPR: case FIX_FLOOR_EXPR: case FIX_CEIL_EXPR: return fold_convert_const (code, type, op0); case NEGATE_EXPR: if (TREE_CODE (op0) == INTEGER_CST || TREE_CODE (op0) == REAL_CST) return fold_negate_const (op0, type); else return NULL_TREE; case ABS_EXPR: if (TREE_CODE (op0) == INTEGER_CST || TREE_CODE (op0) == REAL_CST) return fold_abs_const (op0, type); else return NULL_TREE; case BIT_NOT_EXPR: if (TREE_CODE (op0) == INTEGER_CST) return fold_not_const (op0, type); else return NULL_TREE; case REALPART_EXPR: if (TREE_CODE (op0) == COMPLEX_CST) return TREE_REALPART (op0); else return NULL_TREE; case IMAGPART_EXPR: if (TREE_CODE (op0) == COMPLEX_CST) return TREE_IMAGPART (op0); else return NULL_TREE; case CONJ_EXPR: if (TREE_CODE (op0) == COMPLEX_CST && TREE_CODE (TREE_TYPE (op0)) == COMPLEX_TYPE) return build_complex (type, TREE_REALPART (op0), negate_expr (TREE_IMAGPART (op0))); return NULL_TREE; default: return NULL_TREE; } } /* If EXP represents referencing an element in a constant string (either via pointer arithmetic or array indexing), return the tree representing the value accessed, otherwise return NULL. */ tree fold_read_from_constant_string (tree exp) { if (TREE_CODE (exp) == INDIRECT_REF || TREE_CODE (exp) == ARRAY_REF) { tree exp1 = TREE_OPERAND (exp, 0); tree index; tree string; if (TREE_CODE (exp) == INDIRECT_REF) string = string_constant (exp1, &index); else { tree low_bound = array_ref_low_bound (exp); index = fold_convert (sizetype, TREE_OPERAND (exp, 1)); /* Optimize the special-case of a zero lower bound. We convert the low_bound to sizetype to avoid some problems with constant folding. (E.g. suppose the lower bound is 1, and its mode is QI. Without the conversion,l (ARRAY +(INDEX-(unsigned char)1)) becomes ((ARRAY+(-(unsigned char)1)) +INDEX), which becomes (ARRAY+255+INDEX). Opps!) */ if (! integer_zerop (low_bound)) index = size_diffop (index, fold_convert (sizetype, low_bound)); string = exp1; } if (string && TREE_TYPE (exp) == TREE_TYPE (TREE_TYPE (string)) && TREE_CODE (string) == STRING_CST && TREE_CODE (index) == INTEGER_CST && compare_tree_int (index, TREE_STRING_LENGTH (string)) < 0 && (GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (TREE_TYPE (string)))) == MODE_INT) && (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (string)))) == 1)) return fold_convert (TREE_TYPE (exp), build_int_2 ((TREE_STRING_POINTER (string) [TREE_INT_CST_LOW (index)]), 0)); } return NULL; } /* Return the tree for neg (ARG0) when ARG0 is known to be either an integer constant or real constant. TYPE is the type of the result. */ static tree fold_negate_const (tree arg0, tree type) { tree t = NULL_TREE; if (TREE_CODE (arg0) == INTEGER_CST) { unsigned HOST_WIDE_INT low; HOST_WIDE_INT high; int overflow = neg_double (TREE_INT_CST_LOW (arg0), TREE_INT_CST_HIGH (arg0), &low, &high); t = build_int_2 (low, high); TREE_TYPE (t) = type; TREE_OVERFLOW (t) = (TREE_OVERFLOW (arg0) | force_fit_type (t, overflow && !TYPE_UNSIGNED (type))); TREE_CONSTANT_OVERFLOW (t) = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0); } else if (TREE_CODE (arg0) == REAL_CST) t = build_real (type, REAL_VALUE_NEGATE (TREE_REAL_CST (arg0))); #ifdef ENABLE_CHECKING else abort (); #endif return t; } /* Return the tree for abs (ARG0) when ARG0 is known to be either an integer constant or real constant. TYPE is the type of the result. */ tree fold_abs_const (tree arg0, tree type) { tree t = NULL_TREE; if (TREE_CODE (arg0) == INTEGER_CST) { /* If the value is unsigned, then the absolute value is the same as the ordinary value. */ if (TYPE_UNSIGNED (type)) return arg0; /* Similarly, if the value is non-negative. */ else if (INT_CST_LT (integer_minus_one_node, arg0)) return arg0; /* If the value is negative, then the absolute value is its negation. */ else { unsigned HOST_WIDE_INT low; HOST_WIDE_INT high; int overflow = neg_double (TREE_INT_CST_LOW (arg0), TREE_INT_CST_HIGH (arg0), &low, &high); t = build_int_2 (low, high); TREE_TYPE (t) = type; TREE_OVERFLOW (t) = (TREE_OVERFLOW (arg0) | force_fit_type (t, overflow)); TREE_CONSTANT_OVERFLOW (t) = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0); return t; } } else if (TREE_CODE (arg0) == REAL_CST) { if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (arg0))) return build_real (type, REAL_VALUE_NEGATE (TREE_REAL_CST (arg0))); else return arg0; } #ifdef ENABLE_CHECKING else abort (); #endif return t; } /* Return the tree for not (ARG0) when ARG0 is known to be an integer constant. TYPE is the type of the result. */ static tree fold_not_const (tree arg0, tree type) { tree t = NULL_TREE; if (TREE_CODE (arg0) == INTEGER_CST) { t = build_int_2 (~ TREE_INT_CST_LOW (arg0), ~ TREE_INT_CST_HIGH (arg0)); TREE_TYPE (t) = type; force_fit_type (t, 0); TREE_OVERFLOW (t) = TREE_OVERFLOW (arg0); TREE_CONSTANT_OVERFLOW (t) = TREE_CONSTANT_OVERFLOW (arg0); } #ifdef ENABLE_CHECKING else abort (); #endif return t; } /* Given CODE, a relational operator, the target type, TYPE and two constant operands OP0 and OP1, return the result of the relational operation. If the result is not a compile time constant, then return NULL_TREE. */ static tree fold_relational_const (enum tree_code code, tree type, tree op0, tree op1) { int result, invert; /* From here on, the only cases we handle are when the result is known to be a constant. */ if (TREE_CODE (op0) == REAL_CST && TREE_CODE (op1) == REAL_CST) { /* Handle the cases where either operand is a NaN. */ if (REAL_VALUE_ISNAN (TREE_REAL_CST (op0)) || REAL_VALUE_ISNAN (TREE_REAL_CST (op1))) { switch (code) { case EQ_EXPR: case ORDERED_EXPR: result = 0; break; case NE_EXPR: case UNORDERED_EXPR: case UNLT_EXPR: case UNLE_EXPR: case UNGT_EXPR: case UNGE_EXPR: case UNEQ_EXPR: result = 1; break; case LT_EXPR: case LE_EXPR: case GT_EXPR: case GE_EXPR: case LTGT_EXPR: if (flag_trapping_math) return NULL_TREE; result = 0; break; default: abort (); } return constant_boolean_node (result, type); } /* From here on we're sure there are no NaNs. */ switch (code) { case ORDERED_EXPR: return constant_boolean_node (true, type); case UNORDERED_EXPR: return constant_boolean_node (false, type); case UNLT_EXPR: code = LT_EXPR; break; case UNLE_EXPR: code = LE_EXPR; break; case UNGT_EXPR: code = GT_EXPR; break; case UNGE_EXPR: code = GE_EXPR; break; case UNEQ_EXPR: code = EQ_EXPR; break; case LTGT_EXPR: code = NE_EXPR; break; default: break; } } /* From here on we only handle LT, LE, GT, GE, EQ and NE. To compute GT, swap the arguments and do LT. To compute GE, do LT and invert the result. To compute LE, swap the arguments, do LT and invert the result. To compute NE, do EQ and invert the result. Therefore, the code below must handle only EQ and LT. */ if (code == LE_EXPR || code == GT_EXPR) { tree tem = op0; op0 = op1; op1 = tem; code = swap_tree_comparison (code); } /* Note that it is safe to invert for real values here because we have already handled the one case that it matters. */ invert = 0; if (code == NE_EXPR || code == GE_EXPR) { invert = 1; code = invert_tree_comparison (code, false); } /* Compute a result for LT or EQ if args permit; Otherwise return T. */ if (TREE_CODE (op0) == INTEGER_CST && TREE_CODE (op1) == INTEGER_CST) { if (code == EQ_EXPR) result = tree_int_cst_equal (op0, op1); else if (TYPE_UNSIGNED (TREE_TYPE (op0))) result = INT_CST_LT_UNSIGNED (op0, op1); else result = INT_CST_LT (op0, op1); } else if (code == EQ_EXPR && !TREE_SIDE_EFFECTS (op0) && integer_zerop (op1) && tree_expr_nonzero_p (op0)) result = 0; /* Two real constants can be compared explicitly. */ else if (TREE_CODE (op0) == REAL_CST && TREE_CODE (op1) == REAL_CST) { if (code == EQ_EXPR) result = REAL_VALUES_EQUAL (TREE_REAL_CST (op0), TREE_REAL_CST (op1)); else result = REAL_VALUES_LESS (TREE_REAL_CST (op0), TREE_REAL_CST (op1)); } else return NULL_TREE; if (invert) result ^= 1; return constant_boolean_node (result, type); } /* Build an expression for the address of T. Folds away INDIRECT_REF to avoid confusing the gimplify process. */ tree build_fold_addr_expr_with_type (tree t, tree ptrtype) { if (TREE_CODE (t) == INDIRECT_REF) { t = TREE_OPERAND (t, 0); if (TREE_TYPE (t) != ptrtype) t = build1 (NOP_EXPR, ptrtype, t); } else { tree base = t; while (handled_component_p (base) || TREE_CODE (base) == REALPART_EXPR || TREE_CODE (base) == IMAGPART_EXPR) base = TREE_OPERAND (base, 0); if (DECL_P (base)) TREE_ADDRESSABLE (base) = 1; t = build1 (ADDR_EXPR, ptrtype, t); } return t; } tree build_fold_addr_expr (tree t) { return build_fold_addr_expr_with_type (t, build_pointer_type (TREE_TYPE (t))); } /* Builds an expression for an indirection through T, simplifying some cases. */ tree build_fold_indirect_ref (tree t) { tree type = TREE_TYPE (TREE_TYPE (t)); tree sub = t; tree subtype; STRIP_NOPS (sub); if (TREE_CODE (sub) == ADDR_EXPR) { tree op = TREE_OPERAND (sub, 0); tree optype = TREE_TYPE (op); /* *&p => p */ if (lang_hooks.types_compatible_p (type, optype)) return op; /* *(foo *)&fooarray => fooarray[0] */ else if (TREE_CODE (optype) == ARRAY_TYPE && lang_hooks.types_compatible_p (type, TREE_TYPE (optype))) return build4 (ARRAY_REF, type, op, size_zero_node, NULL_TREE, NULL_TREE); } /* *(foo *)fooarrptr => (*fooarrptr)[0] */ subtype = TREE_TYPE (sub); if (TREE_CODE (TREE_TYPE (subtype)) == ARRAY_TYPE && lang_hooks.types_compatible_p (type, TREE_TYPE (TREE_TYPE (subtype)))) { sub = build_fold_indirect_ref (sub); return build4 (ARRAY_REF, type, sub, size_zero_node, NULL_TREE, NULL_TREE); } return build1 (INDIRECT_REF, type, t); } /* Type information for fold-const.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_fold_const_h[] = { { &new_const, 1, sizeof (new_const), >_ggc_mx_tree_node, >_pch_nx_tree_node }, LAST_GGC_ROOT_TAB }; const struct ggc_cache_tab gt_ggc_rc_gt_fold_const_h[] = { { &size_htab, 1, sizeof (size_htab), >_ggc_mx_tree_node, >_pch_nx_tree_node, &ggc_marked_p }, LAST_GGC_CACHE_TAB }; const struct ggc_root_tab gt_pch_rc_gt_fold_const_h[] = { { &size_htab, 1, sizeof (size_htab), >_ggc_m_P9tree_node4htab, >_pch_n_P9tree_node4htab }, LAST_GGC_ROOT_TAB }; /* Expands front end tree to back end RTL for GCC. Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file handles the generation of rtl code from tree structure at the level of the function as a whole. It creates the rtl expressions for parameters and auto variables and has full responsibility for allocating stack slots. `expand_function_start' is called at the beginning of a function, before the function body is parsed, and `expand_function_end' is called after parsing the body. Call `assign_stack_local' to allocate a stack slot for a local variable. This is usually done during the RTL generation for the function body, but it can also be done in the reload pass when a pseudo-register does not get a hard register. */ #ifndef LOCAL_ALIGNMENT #define LOCAL_ALIGNMENT(TYPE, ALIGNMENT) ALIGNMENT #endif #ifndef STACK_ALIGNMENT_NEEDED #define STACK_ALIGNMENT_NEEDED 1 #endif #define STACK_BYTES (STACK_BOUNDARY / BITS_PER_UNIT) /* Some systems use __main in a way incompatible with its use in gcc, in these cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to give the same symbol without quotes for an alternative entry point. You must define both, or neither. */ #ifndef NAME__MAIN #define NAME__MAIN "__main" #endif /* Round a value to the lowest integer less than it that is a multiple of the required alignment. Avoid using division in case the value is negative. Assume the alignment is a power of two. */ #define FLOOR_ROUND(VALUE,ALIGN) ((VALUE) & ~((ALIGN) - 1)) /* Similar, but round to the next highest integer that meets the alignment. */ #define CEIL_ROUND(VALUE,ALIGN) (((VALUE) + (ALIGN) - 1) & ~((ALIGN)- 1)) /* NEED_SEPARATE_AP means that we cannot derive ap from the value of fp during rtl generation. If they are different register numbers, this is always true. It may also be true if FIRST_PARM_OFFSET - STARTING_FRAME_OFFSET is not a constant during rtl generation. See fix_lexical_addr for details. */ #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM #define NEED_SEPARATE_AP #endif /* Nonzero if function being compiled doesn't contain any calls (ignoring the prologue and epilogue). This is set prior to local register allocation and is valid for the remaining compiler passes. */ int current_function_is_leaf; /* Nonzero if function being compiled doesn't contain any instructions that can throw an exception. This is set prior to final. */ int current_function_nothrow; /* Nonzero if function being compiled doesn't modify the stack pointer (ignoring the prologue and epilogue). This is only valid after life_analysis has run. */ int current_function_sp_is_unchanging; /* Nonzero if the function being compiled is a leaf function which only uses leaf registers. This is valid after reload (specifically after sched2) and is useful only if the port defines LEAF_REGISTERS. */ int current_function_uses_only_leaf_regs; /* Nonzero once virtual register instantiation has been done. assign_stack_local uses frame_pointer_rtx when this is nonzero. calls.c:emit_library_call_value_1 uses it to set up post-instantiation libcalls. */ int virtuals_instantiated; /* Assign unique numbers to labels generated for profiling, debugging, etc. */ static GTY(()) int funcdef_no; /* These variables hold pointers to functions to create and destroy target specific, per-function data structures. */ struct machine_function * (*init_machine_status) (void); /* The currently compiled function. */ struct function *cfun = 0; /* These arrays record the INSN_UIDs of the prologue and epilogue insns. */ static GTY(()) varray_type prologue; static GTY(()) varray_type epilogue; /* Array of INSN_UIDs to hold the INSN_UIDs for each sibcall epilogue in this function. */ static GTY(()) varray_type sibcall_epilogue; /* In order to evaluate some expressions, such as function calls returning structures in memory, we need to temporarily allocate stack locations. We record each allocated temporary in the following structure. Associated with each temporary slot is a nesting level. When we pop up one level, all temporaries associated with the previous level are freed. Normally, all temporaries are freed after the execution of the statement in which they were created. However, if we are inside a ({...}) grouping, the result may be in a temporary and hence must be preserved. If the result could be in a temporary, we preserve it if we can determine which one it is in. If we cannot determine which temporary may contain the result, all temporaries are preserved. A temporary is preserved by pretending it was allocated at the previous nesting level. Automatic variables are also assigned temporary slots, at the nesting level where they are defined. They are marked a "kept" so that free_temp_slots will not free them. */ struct temp_slot GTY(()) { /* Points to next temporary slot. */ struct temp_slot *next; /* Points to previous temporary slot. */ struct temp_slot *prev; /* The rtx to used to reference the slot. */ rtx slot; /* The rtx used to represent the address if not the address of the slot above. May be an EXPR_LIST if multiple addresses exist. */ rtx address; /* The alignment (in bits) of the slot. */ unsigned int align; /* The size, in units, of the slot. */ HOST_WIDE_INT size; /* The type of the object in the slot, or zero if it doesn't correspond to a type. We use this to determine whether a slot can be reused. It can be reused if objects of the type of the new slot will always conflict with objects of the type of the old slot. */ tree type; /* Nonzero if this temporary is currently in use. */ char in_use; /* Nonzero if this temporary has its address taken. */ char addr_taken; /* Nesting level at which this slot is being used. */ int level; /* Nonzero if this should survive a call to free_temp_slots. */ int keep; /* The offset of the slot from the frame_pointer, including extra space for alignment. This info is for combine_temp_slots. */ HOST_WIDE_INT base_offset; /* The size of the slot, including extra space for alignment. This info is for combine_temp_slots. */ HOST_WIDE_INT full_size; }; /* Forward declarations. */ static rtx assign_stack_local_1 (enum machine_mode, HOST_WIDE_INT, int, struct function *); static struct temp_slot *find_temp_slot_from_address (rtx); static void instantiate_decls (tree, int); static void instantiate_decls_1 (tree, int); static void instantiate_decl (rtx, HOST_WIDE_INT, int); static rtx instantiate_new_reg (rtx, HOST_WIDE_INT *); static int instantiate_virtual_regs_1 (rtx *, rtx, int); static void pad_to_arg_alignment (struct args_size *, int, struct args_size *); static void pad_below (struct args_size *, enum machine_mode, tree); static void reorder_blocks_1 (rtx, tree, varray_type *); static void reorder_fix_fragments (tree); static int all_blocks (tree, tree *); static tree *get_block_vector (tree, int *); extern tree debug_find_var_in_block_tree (tree, tree); /* We always define `record_insns' even if it's not used so that we can always export `prologue_epilogue_contains'. */ static void record_insns (rtx, varray_type *) ATTRIBUTE_UNUSED; static int contains (rtx, varray_type); #ifdef HAVE_return static void emit_return_into_block (basic_block, rtx); #endif static void purge_single_hard_subreg_set (rtx); #if defined(HAVE_epilogue) && defined(INCOMING_RETURN_ADDR_RTX) static rtx keep_stack_depressed (rtx); #endif static void prepare_function_start (tree); static void do_clobber_return_reg (rtx, void *); static void do_use_return_reg (rtx, void *); static void instantiate_virtual_regs_lossage (rtx); static void set_insn_locators (rtx, int) ATTRIBUTE_UNUSED; /* Pointer to chain of `struct function' for containing functions. */ struct function *outer_function_chain; /* Given a function decl for a containing function, return the `struct function' for it. */ struct function * find_function_data (tree decl) { struct function *p; for (p = outer_function_chain; p; p = p->outer) if (p->decl == decl) return p; abort (); } /* Save the current context for compilation of a nested function. This is called from language-specific code. The caller should use the enter_nested langhook to save any language-specific state, since this function knows only about language-independent variables. */ void push_function_context_to (tree context) { struct function *p; if (context) { if (context == current_function_decl) cfun->contains_functions = 1; else { struct function *containing = find_function_data (context); containing->contains_functions = 1; } } if (cfun == 0) init_dummy_function_start (); p = cfun; p->outer = outer_function_chain; outer_function_chain = p; lang_hooks.function.enter_nested (p); cfun = 0; } void push_function_context (void) { push_function_context_to (current_function_decl); } /* Restore the last saved context, at the end of a nested function. This function is called from language-specific code. */ void pop_function_context_from (tree context ATTRIBUTE_UNUSED) { struct function *p = outer_function_chain; cfun = p; outer_function_chain = p->outer; current_function_decl = p->decl; reg_renumber = 0; restore_emit_status (p); lang_hooks.function.leave_nested (p); /* Reset variables that have known state during rtx generation. */ rtx_equal_function_value_matters = 1; virtuals_instantiated = 0; generating_concat_p = 1; } void pop_function_context (void) { pop_function_context_from (current_function_decl); } /* Clear out all parts of the state in F that can safely be discarded after the function has been parsed, but not compiled, to let garbage collection reclaim the memory. */ void free_after_parsing (struct function *f) { /* f->expr->forced_labels is used by code generation. */ /* f->emit->regno_reg_rtx is used by code generation. */ /* f->varasm is used by code generation. */ /* f->eh->eh_return_stub_label is used by code generation. */ lang_hooks.function.final (f); f->stmt = NULL; } /* Clear out all parts of the state in F that can safely be discarded after the function has been compiled, to let garbage collection reclaim the memory. */ void free_after_compilation (struct function *f) { f->eh = NULL; f->expr = NULL; f->emit = NULL; f->varasm = NULL; f->machine = NULL; f->x_avail_temp_slots = NULL; f->x_used_temp_slots = NULL; f->arg_offset_rtx = NULL; f->return_rtx = NULL; f->internal_arg_pointer = NULL; f->x_nonlocal_goto_handler_labels = NULL; f->x_return_label = NULL; f->x_naked_return_label = NULL; f->x_stack_slot_list = NULL; f->x_tail_recursion_reentry = NULL; f->x_arg_pointer_save_area = NULL; f->x_parm_birth_insn = NULL; f->original_arg_vector = NULL; f->original_decl_initial = NULL; f->epilogue_delay_list = NULL; } /* Allocate fixed slots in the stack frame of the current function. */ /* Return size needed for stack frame based on slots so far allocated in function F. This size counts from zero. It is not rounded to PREFERRED_STACK_BOUNDARY; the caller may have to do that. */ HOST_WIDE_INT get_func_frame_size (struct function *f) { #ifdef FRAME_GROWS_DOWNWARD return -f->x_frame_offset; #else return f->x_frame_offset; #endif } /* Return size needed for stack frame based on slots so far allocated. This size counts from zero. It is not rounded to PREFERRED_STACK_BOUNDARY; the caller may have to do that. */ HOST_WIDE_INT get_frame_size (void) { return get_func_frame_size (cfun); } /* Allocate a stack slot of SIZE bytes and return a MEM rtx for it with machine mode MODE. ALIGN controls the amount of alignment for the address of the slot: 0 means according to MODE, -1 means use BIGGEST_ALIGNMENT and round size to multiple of that, -2 means use BITS_PER_UNIT, positive specifies alignment boundary in bits. We do not round to stack_boundary here. FUNCTION specifies the function to allocate in. */ static rtx assign_stack_local_1 (enum machine_mode mode, HOST_WIDE_INT size, int align, struct function *function) { rtx x, addr; int bigend_correction = 0; int alignment; int frame_off, frame_alignment, frame_phase; if (align == 0) { tree type; if (mode == BLKmode) alignment = BIGGEST_ALIGNMENT; else alignment = GET_MODE_ALIGNMENT (mode); /* Allow the target to (possibly) increase the alignment of this stack slot. */ type = lang_hooks.types.type_for_mode (mode, 0); if (type) alignment = LOCAL_ALIGNMENT (type, alignment); alignment /= BITS_PER_UNIT; } else if (align == -1) { alignment = BIGGEST_ALIGNMENT / BITS_PER_UNIT; size = CEIL_ROUND (size, alignment); } else if (align == -2) alignment = 1; /* BITS_PER_UNIT / BITS_PER_UNIT */ else alignment = align / BITS_PER_UNIT; #ifdef FRAME_GROWS_DOWNWARD function->x_frame_offset -= size; #endif /* Ignore alignment we can't do with expected alignment of the boundary. */ if (alignment * BITS_PER_UNIT > PREFERRED_STACK_BOUNDARY) alignment = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT; if (function->stack_alignment_needed < alignment * BITS_PER_UNIT) function->stack_alignment_needed = alignment * BITS_PER_UNIT; /* Calculate how many bytes the start of local variables is off from stack alignment. */ frame_alignment = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT; frame_off = STARTING_FRAME_OFFSET % frame_alignment; frame_phase = frame_off ? frame_alignment - frame_off : 0; /* Round the frame offset to the specified alignment. The default is to always honor requests to align the stack but a port may choose to do its own stack alignment by defining STACK_ALIGNMENT_NEEDED. */ if (STACK_ALIGNMENT_NEEDED || mode != BLKmode || size != 0) { /* We must be careful here, since FRAME_OFFSET might be negative and division with a negative dividend isn't as well defined as we might like. So we instead assume that ALIGNMENT is a power of two and use logical operations which are unambiguous. */ #ifdef FRAME_GROWS_DOWNWARD function->x_frame_offset = (FLOOR_ROUND (function->x_frame_offset - frame_phase, alignment) + frame_phase); #else function->x_frame_offset = (CEIL_ROUND (function->x_frame_offset - frame_phase, alignment) + frame_phase); #endif } /* On a big-endian machine, if we are allocating more space than we will use, use the least significant bytes of those that are allocated. */ if (BYTES_BIG_ENDIAN && mode != BLKmode) bigend_correction = size - GET_MODE_SIZE (mode); /* If we have already instantiated virtual registers, return the actual address relative to the frame pointer. */ if (function == cfun && virtuals_instantiated) addr = plus_constant (frame_pointer_rtx, trunc_int_for_mode (frame_offset + bigend_correction + STARTING_FRAME_OFFSET, Pmode)); else addr = plus_constant (virtual_stack_vars_rtx, trunc_int_for_mode (function->x_frame_offset + bigend_correction, Pmode)); #ifndef FRAME_GROWS_DOWNWARD function->x_frame_offset += size; #endif x = gen_rtx_MEM (mode, addr); function->x_stack_slot_list = gen_rtx_EXPR_LIST (VOIDmode, x, function->x_stack_slot_list); return x; } /* Wrapper around assign_stack_local_1; assign a local stack slot for the current function. */ rtx assign_stack_local (enum machine_mode mode, HOST_WIDE_INT size, int align) { return assign_stack_local_1 (mode, size, align, cfun); } /* Removes temporary slot TEMP from LIST. */ static void cut_slot_from_list (struct temp_slot *temp, struct temp_slot **list) { if (temp->next) temp->next->prev = temp->prev; if (temp->prev) temp->prev->next = temp->next; else *list = temp->next; temp->prev = temp->next = NULL; } /* Inserts temporary slot TEMP to LIST. */ static void insert_slot_to_list (struct temp_slot *temp, struct temp_slot **list) { temp->next = *list; if (*list) (*list)->prev = temp; temp->prev = NULL; *list = temp; } /* Returns the list of used temp slots at LEVEL. */ static struct temp_slot ** temp_slots_at_level (int level) { level++; if (!used_temp_slots) VARRAY_GENERIC_PTR_INIT (used_temp_slots, 3, "used_temp_slots"); while (level >= (int) VARRAY_ACTIVE_SIZE (used_temp_slots)) VARRAY_PUSH_GENERIC_PTR (used_temp_slots, NULL); return (struct temp_slot **) &VARRAY_GENERIC_PTR (used_temp_slots, level); } /* Returns the maximal temporary slot level. */ static int max_slot_level (void) { if (!used_temp_slots) return -1; return VARRAY_ACTIVE_SIZE (used_temp_slots) - 1; } /* Moves temporary slot TEMP to LEVEL. */ static void move_slot_to_level (struct temp_slot *temp, int level) { cut_slot_from_list (temp, temp_slots_at_level (temp->level)); insert_slot_to_list (temp, temp_slots_at_level (level)); temp->level = level; } /* Make temporary slot TEMP available. */ static void make_slot_available (struct temp_slot *temp) { cut_slot_from_list (temp, temp_slots_at_level (temp->level)); insert_slot_to_list (temp, &avail_temp_slots); temp->in_use = 0; temp->level = -1; } /* Allocate a temporary stack slot and record it for possible later reuse. MODE is the machine mode to be given to the returned rtx. SIZE is the size in units of the space required. We do no rounding here since assign_stack_local will do any required rounding. KEEP is 1 if this slot is to be retained after a call to free_temp_slots. Automatic variables for a block are allocated with this flag. KEEP is 2 if we allocate a longer term temporary, whose lifetime is controlled by CLEANUP_POINT_EXPRs. KEEP is 3 if we are to allocate something at an inner level to be treated as a variable in the block (e.g., a SAVE_EXPR). TYPE is the type that will be used for the stack slot. */ rtx assign_stack_temp_for_type (enum machine_mode mode, HOST_WIDE_INT size, int keep, tree type) { unsigned int align; struct temp_slot *p, *best_p = 0, *selected = NULL, **pp; rtx slot; /* If SIZE is -1 it means that somebody tried to allocate a temporary of a variable size. */ if (size == -1) abort (); if (mode == BLKmode) align = BIGGEST_ALIGNMENT; else align = GET_MODE_ALIGNMENT (mode); if (! type) type = lang_hooks.types.type_for_mode (mode, 0); if (type) align = LOCAL_ALIGNMENT (type, align); /* Try to find an available, already-allocated temporary of the proper mode which meets the size and alignment requirements. Choose the smallest one with the closest alignment. */ for (p = avail_temp_slots; p; p = p->next) { if (p->align >= align && p->size >= size && GET_MODE (p->slot) == mode && objects_must_conflict_p (p->type, type) && (best_p == 0 || best_p->size > p->size || (best_p->size == p->size && best_p->align > p->align))) { if (p->align == align && p->size == size) { selected = p; cut_slot_from_list (selected, &avail_temp_slots); best_p = 0; break; } best_p = p; } } /* Make our best, if any, the one to use. */ if (best_p) { selected = best_p; cut_slot_from_list (selected, &avail_temp_slots); /* If there are enough aligned bytes left over, make them into a new temp_slot so that the extra bytes don't get wasted. Do this only for BLKmode slots, so that we can be sure of the alignment. */ if (GET_MODE (best_p->slot) == BLKmode) { int alignment = best_p->align / BITS_PER_UNIT; HOST_WIDE_INT rounded_size = CEIL_ROUND (size, alignment); if (best_p->size - rounded_size >= alignment) { p = ggc_alloc (sizeof (struct temp_slot)); p->in_use = p->addr_taken = 0; p->size = best_p->size - rounded_size; p->base_offset = best_p->base_offset + rounded_size; p->full_size = best_p->full_size - rounded_size; p->slot = gen_rtx_MEM (BLKmode, plus_constant (XEXP (best_p->slot, 0), rounded_size)); p->align = best_p->align; p->address = 0; p->type = best_p->type; insert_slot_to_list (p, &avail_temp_slots); stack_slot_list = gen_rtx_EXPR_LIST (VOIDmode, p->slot, stack_slot_list); best_p->size = rounded_size; best_p->full_size = rounded_size; } } } /* If we still didn't find one, make a new temporary. */ if (selected == 0) { HOST_WIDE_INT frame_offset_old = frame_offset; p = ggc_alloc (sizeof (struct temp_slot)); /* We are passing an explicit alignment request to assign_stack_local. One side effect of that is assign_stack_local will not round SIZE to ensure the frame offset remains suitably aligned. So for requests which depended on the rounding of SIZE, we go ahead and round it now. We also make sure ALIGNMENT is at least BIGGEST_ALIGNMENT. */ if (mode == BLKmode && align < BIGGEST_ALIGNMENT) abort (); p->slot = assign_stack_local (mode, (mode == BLKmode ? CEIL_ROUND (size, (int) align / BITS_PER_UNIT) : size), align); p->align = align; /* The following slot size computation is necessary because we don't know the actual size of the temporary slot until assign_stack_local has performed all the frame alignment and size rounding for the requested temporary. Note that extra space added for alignment can be either above or below this stack slot depending on which way the frame grows. We include the extra space if and only if it is above this slot. */ #ifdef FRAME_GROWS_DOWNWARD p->size = frame_offset_old - frame_offset; #else p->size = size; #endif /* Now define the fields used by combine_temp_slots. */ #ifdef FRAME_GROWS_DOWNWARD p->base_offset = frame_offset; p->full_size = frame_offset_old - frame_offset; #else p->base_offset = frame_offset_old; p->full_size = frame_offset - frame_offset_old; #endif p->address = 0; selected = p; } p = selected; p->in_use = 1; p->addr_taken = 0; p->type = type; if (keep == 2) { p->level = target_temp_slot_level; p->keep = 1; } else if (keep == 3) { p->level = var_temp_slot_level; p->keep = 0; } else { p->level = temp_slot_level; p->keep = keep; } pp = temp_slots_at_level (p->level); insert_slot_to_list (p, pp); /* Create a new MEM rtx to avoid clobbering MEM flags of old slots. */ slot = gen_rtx_MEM (mode, XEXP (p->slot, 0)); stack_slot_list = gen_rtx_EXPR_LIST (VOIDmode, slot, stack_slot_list); /* If we know the alias set for the memory that will be used, use it. If there's no TYPE, then we don't know anything about the alias set for the memory. */ set_mem_alias_set (slot, type ? get_alias_set (type) : 0); set_mem_align (slot, align); /* If a type is specified, set the relevant flags. */ if (type != 0) { RTX_UNCHANGING_P (slot) = (lang_hooks.honor_readonly && TYPE_READONLY (type)); MEM_VOLATILE_P (slot) = TYPE_VOLATILE (type); MEM_SET_IN_STRUCT_P (slot, AGGREGATE_TYPE_P (type)); } return slot; } /* Allocate a temporary stack slot and record it for possible later reuse. First three arguments are same as in preceding function. */ rtx assign_stack_temp (enum machine_mode mode, HOST_WIDE_INT size, int keep) { return assign_stack_temp_for_type (mode, size, keep, NULL_TREE); } /* Assign a temporary. If TYPE_OR_DECL is a decl, then we are doing it on behalf of the decl and so that should be used in error messages. In either case, we allocate of the given type. KEEP is as for assign_stack_temp. MEMORY_REQUIRED is 1 if the result must be addressable stack memory; it is 0 if a register is OK. DONT_PROMOTE is 1 if we should not promote values in register to wider modes. */ rtx assign_temp (tree type_or_decl, int keep, int memory_required, int dont_promote ATTRIBUTE_UNUSED) { tree type, decl; enum machine_mode mode; #ifdef PROMOTE_MODE int unsignedp; #endif if (DECL_P (type_or_decl)) decl = type_or_decl, type = TREE_TYPE (decl); else decl = NULL, type = type_or_decl; mode = TYPE_MODE (type); #ifdef PROMOTE_MODE unsignedp = TYPE_UNSIGNED (type); #endif if (mode == BLKmode || memory_required) { HOST_WIDE_INT size = int_size_in_bytes (type); tree size_tree; rtx tmp; /* Zero sized arrays are GNU C extension. Set size to 1 to avoid problems with allocating the stack space. */ if (size == 0) size = 1; /* Unfortunately, we don't yet know how to allocate variable-sized temporaries. However, sometimes we have a fixed upper limit on the size (which is stored in TYPE_ARRAY_MAX_SIZE) and can use that instead. This is the case for Chill variable-sized strings. */ if (size == -1 && TREE_CODE (type) == ARRAY_TYPE && TYPE_ARRAY_MAX_SIZE (type) != NULL_TREE && host_integerp (TYPE_ARRAY_MAX_SIZE (type), 1)) size = tree_low_cst (TYPE_ARRAY_MAX_SIZE (type), 1); /* If we still haven't been able to get a size, see if the language can compute a maximum size. */ if (size == -1 && (size_tree = lang_hooks.types.max_size (type)) != 0 && host_integerp (size_tree, 1)) size = tree_low_cst (size_tree, 1); /* The size of the temporary may be too large to fit into an integer. */ /* ??? Not sure this should happen except for user silliness, so limit this to things that aren't compiler-generated temporaries. The rest of the time we'll abort in assign_stack_temp_for_type. */ if (decl && size == -1 && TREE_CODE (TYPE_SIZE_UNIT (type)) == INTEGER_CST) { error ("%Jsize of variable '%D' is too large", decl, decl); size = 1; } tmp = assign_stack_temp_for_type (mode, size, keep, type); return tmp; } #ifdef PROMOTE_MODE if (! dont_promote) mode = promote_mode (type, mode, &unsignedp, 0); #endif return gen_reg_rtx (mode); } /* Combine temporary stack slots which are adjacent on the stack. This allows for better use of already allocated stack space. This is only done for BLKmode slots because we can be sure that we won't have alignment problems in this case. */ void combine_temp_slots (void) { struct temp_slot *p, *q, *next, *next_q; int num_slots; /* We can't combine slots, because the information about which slot is in which alias set will be lost. */ if (flag_strict_aliasing) return; /* If there are a lot of temp slots, don't do anything unless high levels of optimization. */ if (! flag_expensive_optimizations) for (p = avail_temp_slots, num_slots = 0; p; p = p->next, num_slots++) if (num_slots > 100 || (num_slots > 10 && optimize == 0)) return; for (p = avail_temp_slots; p; p = next) { int delete_p = 0; next = p->next; if (GET_MODE (p->slot) != BLKmode) continue; for (q = p->next; q; q = next_q) { int delete_q = 0; next_q = q->next; if (GET_MODE (q->slot) != BLKmode) continue; if (p->base_offset + p->full_size == q->base_offset) { /* Q comes after P; combine Q into P. */ p->size += q->size; p->full_size += q->full_size; delete_q = 1; } else if (q->base_offset + q->full_size == p->base_offset) { /* P comes after Q; combine P into Q. */ q->size += p->size; q->full_size += p->full_size; delete_p = 1; break; } if (delete_q) cut_slot_from_list (q, &avail_temp_slots); } /* Either delete P or advance past it. */ if (delete_p) cut_slot_from_list (p, &avail_temp_slots); } } /* Find the temp slot corresponding to the object at address X. */ static struct temp_slot * find_temp_slot_from_address (rtx x) { struct temp_slot *p; rtx next; int i; for (i = max_slot_level (); i >= 0; i--) for (p = *temp_slots_at_level (i); p; p = p->next) { if (XEXP (p->slot, 0) == x || p->address == x || (GET_CODE (x) == PLUS && XEXP (x, 0) == virtual_stack_vars_rtx && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) >= p->base_offset && INTVAL (XEXP (x, 1)) < p->base_offset + p->full_size)) return p; else if (p->address != 0 && GET_CODE (p->address) == EXPR_LIST) for (next = p->address; next; next = XEXP (next, 1)) if (XEXP (next, 0) == x) return p; } /* If we have a sum involving a register, see if it points to a temp slot. */ if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0)) && (p = find_temp_slot_from_address (XEXP (x, 0))) != 0) return p; else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 1)) && (p = find_temp_slot_from_address (XEXP (x, 1))) != 0) return p; return 0; } /* Indicate that NEW is an alternate way of referring to the temp slot that previously was known by OLD. */ void update_temp_slot_address (rtx old, rtx new) { struct temp_slot *p; if (rtx_equal_p (old, new)) return; p = find_temp_slot_from_address (old); /* If we didn't find one, see if both OLD is a PLUS. If so, and NEW is a register, see if one operand of the PLUS is a temporary location. If so, NEW points into it. Otherwise, if both OLD and NEW are a PLUS and if there is a register in common between them. If so, try a recursive call on those values. */ if (p == 0) { if (GET_CODE (old) != PLUS) return; if (REG_P (new)) { update_temp_slot_address (XEXP (old, 0), new); update_temp_slot_address (XEXP (old, 1), new); return; } else if (GET_CODE (new) != PLUS) return; if (rtx_equal_p (XEXP (old, 0), XEXP (new, 0))) update_temp_slot_address (XEXP (old, 1), XEXP (new, 1)); else if (rtx_equal_p (XEXP (old, 1), XEXP (new, 0))) update_temp_slot_address (XEXP (old, 0), XEXP (new, 1)); else if (rtx_equal_p (XEXP (old, 0), XEXP (new, 1))) update_temp_slot_address (XEXP (old, 1), XEXP (new, 0)); else if (rtx_equal_p (XEXP (old, 1), XEXP (new, 1))) update_temp_slot_address (XEXP (old, 0), XEXP (new, 0)); return; } /* Otherwise add an alias for the temp's address. */ else if (p->address == 0) p->address = new; else { if (GET_CODE (p->address) != EXPR_LIST) p->address = gen_rtx_EXPR_LIST (VOIDmode, p->address, NULL_RTX); p->address = gen_rtx_EXPR_LIST (VOIDmode, new, p->address); } } /* If X could be a reference to a temporary slot, mark the fact that its address was taken. */ void mark_temp_addr_taken (rtx x) { struct temp_slot *p; if (x == 0) return; /* If X is not in memory or is at a constant address, it cannot be in a temporary slot. */ if (!MEM_P (x) || CONSTANT_P (XEXP (x, 0))) return; p = find_temp_slot_from_address (XEXP (x, 0)); if (p != 0) p->addr_taken = 1; } /* If X could be a reference to a temporary slot, mark that slot as belonging to the to one level higher than the current level. If X matched one of our slots, just mark that one. Otherwise, we can't easily predict which it is, so upgrade all of them. Kept slots need not be touched. This is called when an ({...}) construct occurs and a statement returns a value in memory. */ void preserve_temp_slots (rtx x) { struct temp_slot *p = 0, *next; /* If there is no result, we still might have some objects whose address were taken, so we need to make sure they stay around. */ if (x == 0) { for (p = *temp_slots_at_level (temp_slot_level); p; p = next) { next = p->next; if (p->addr_taken) move_slot_to_level (p, temp_slot_level - 1); } return; } /* If X is a register that is being used as a pointer, see if we have a temporary slot we know it points to. To be consistent with the code below, we really should preserve all non-kept slots if we can't find a match, but that seems to be much too costly. */ if (REG_P (x) && REG_POINTER (x)) p = find_temp_slot_from_address (x); /* If X is not in memory or is at a constant address, it cannot be in a temporary slot, but it can contain something whose address was taken. */ if (p == 0 && (!MEM_P (x) || CONSTANT_P (XEXP (x, 0)))) { for (p = *temp_slots_at_level (temp_slot_level); p; p = next) { next = p->next; if (p->addr_taken) move_slot_to_level (p, temp_slot_level - 1); } return; } /* First see if we can find a match. */ if (p == 0) p = find_temp_slot_from_address (XEXP (x, 0)); if (p != 0) { /* Move everything at our level whose address was taken to our new level in case we used its address. */ struct temp_slot *q; if (p->level == temp_slot_level) { for (q = *temp_slots_at_level (temp_slot_level); q; q = next) { next = q->next; if (p != q && q->addr_taken) move_slot_to_level (q, temp_slot_level - 1); } move_slot_to_level (p, temp_slot_level - 1); p->addr_taken = 0; } return; } /* Otherwise, preserve all non-kept slots at this level. */ for (p = *temp_slots_at_level (temp_slot_level); p; p = next) { next = p->next; if (!p->keep) move_slot_to_level (p, temp_slot_level - 1); } } /* Free all temporaries used so far. This is normally called at the end of generating code for a statement. */ void free_temp_slots (void) { struct temp_slot *p, *next; for (p = *temp_slots_at_level (temp_slot_level); p; p = next) { next = p->next; if (!p->keep) make_slot_available (p); } combine_temp_slots (); } /* Push deeper into the nesting level for stack temporaries. */ void push_temp_slots (void) { temp_slot_level++; } /* Pop a temporary nesting level. All slots in use in the current level are freed. */ void pop_temp_slots (void) { struct temp_slot *p, *next; for (p = *temp_slots_at_level (temp_slot_level); p; p = next) { next = p->next; make_slot_available (p); } combine_temp_slots (); temp_slot_level--; } /* Initialize temporary slots. */ void init_temp_slots (void) { /* We have not allocated any temporaries yet. */ avail_temp_slots = 0; used_temp_slots = 0; temp_slot_level = 0; var_temp_slot_level = 0; target_temp_slot_level = 0; } /* These routines are responsible for converting virtual register references to the actual hard register references once RTL generation is complete. The following four variables are used for communication between the routines. They contain the offsets of the virtual registers from their respective hard registers. */ static int in_arg_offset; static int var_offset; static int dynamic_offset; static int out_arg_offset; static int cfa_offset; /* In most machines, the stack pointer register is equivalent to the bottom of the stack. */ #ifndef STACK_POINTER_OFFSET #define STACK_POINTER_OFFSET 0 #endif /* If not defined, pick an appropriate default for the offset of dynamically allocated memory depending on the value of ACCUMULATE_OUTGOING_ARGS, REG_PARM_STACK_SPACE, and OUTGOING_REG_PARM_STACK_SPACE. */ #ifndef STACK_DYNAMIC_OFFSET /* The bottom of the stack points to the actual arguments. If REG_PARM_STACK_SPACE is defined, this includes the space for the register parameters. However, if OUTGOING_REG_PARM_STACK space is not defined, stack space for register parameters is not pushed by the caller, but rather part of the fixed stack areas and hence not included in `current_function_outgoing_args_size'. Nevertheless, we must allow for it when allocating stack dynamic objects. */ #if defined(REG_PARM_STACK_SPACE) && ! defined(OUTGOING_REG_PARM_STACK_SPACE) #define STACK_DYNAMIC_OFFSET(FNDECL) \ ((ACCUMULATE_OUTGOING_ARGS \ ? (current_function_outgoing_args_size + REG_PARM_STACK_SPACE (FNDECL)) : 0)\ + (STACK_POINTER_OFFSET)) \ #else #define STACK_DYNAMIC_OFFSET(FNDECL) \ ((ACCUMULATE_OUTGOING_ARGS ? current_function_outgoing_args_size : 0) \ + (STACK_POINTER_OFFSET)) #endif #endif /* On most machines, the CFA coincides with the first incoming parm. */ #ifndef ARG_POINTER_CFA_OFFSET #define ARG_POINTER_CFA_OFFSET(FNDECL) FIRST_PARM_OFFSET (FNDECL) #endif /* Convert a SET of a hard subreg to a set of the appropriate hard register. A subroutine of purge_hard_subreg_sets. */ static void purge_single_hard_subreg_set (rtx pattern) { rtx reg = SET_DEST (pattern); enum machine_mode mode = GET_MODE (SET_DEST (pattern)); int offset = 0; if (GET_CODE (reg) == SUBREG && REG_P (SUBREG_REG (reg)) && REGNO (SUBREG_REG (reg)) < FIRST_PSEUDO_REGISTER) { offset = subreg_regno_offset (REGNO (SUBREG_REG (reg)), GET_MODE (SUBREG_REG (reg)), SUBREG_BYTE (reg), GET_MODE (reg)); reg = SUBREG_REG (reg); } if (REG_P (reg) && REGNO (reg) < FIRST_PSEUDO_REGISTER) { reg = gen_rtx_REG (mode, REGNO (reg) + offset); SET_DEST (pattern) = reg; } } /* Eliminate all occurrences of SETs of hard subregs from INSNS. The only such SETs that we expect to see are those left in because integrate can't handle sets of parts of a return value register. We don't use alter_subreg because we only want to eliminate subregs of hard registers. */ void purge_hard_subreg_sets (rtx insn) { for (; insn; insn = NEXT_INSN (insn)) { if (INSN_P (insn)) { rtx pattern = PATTERN (insn); switch (GET_CODE (pattern)) { case SET: if (GET_CODE (SET_DEST (pattern)) == SUBREG) purge_single_hard_subreg_set (pattern); break; case PARALLEL: { int j; for (j = XVECLEN (pattern, 0) - 1; j >= 0; j--) { rtx inner_pattern = XVECEXP (pattern, 0, j); if (GET_CODE (inner_pattern) == SET && GET_CODE (SET_DEST (inner_pattern)) == SUBREG) purge_single_hard_subreg_set (inner_pattern); } } break; default: break; } } } } /* Pass through the INSNS of function FNDECL and convert virtual register references to hard register references. */ void instantiate_virtual_regs (void) { rtx insn; /* Compute the offsets to use for this function. */ in_arg_offset = FIRST_PARM_OFFSET (current_function_decl); var_offset = STARTING_FRAME_OFFSET; dynamic_offset = STACK_DYNAMIC_OFFSET (current_function_decl); out_arg_offset = STACK_POINTER_OFFSET; cfa_offset = ARG_POINTER_CFA_OFFSET (current_function_decl); /* Scan all variables and parameters of this function. For each that is in memory, instantiate all virtual registers if the result is a valid address. If not, we do it later. That will handle most uses of virtual regs on many machines. */ instantiate_decls (current_function_decl, 1); /* Initialize recognition, indicating that volatile is OK. */ init_recog (); /* Scan through all the insns, instantiating every virtual register still present. */ for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN || GET_CODE (insn) == CALL_INSN) { instantiate_virtual_regs_1 (&PATTERN (insn), insn, 1); if (INSN_DELETED_P (insn)) continue; instantiate_virtual_regs_1 (®_NOTES (insn), NULL_RTX, 0); /* Instantiate any virtual registers in CALL_INSN_FUNCTION_USAGE. */ if (GET_CODE (insn) == CALL_INSN) instantiate_virtual_regs_1 (&CALL_INSN_FUNCTION_USAGE (insn), NULL_RTX, 0); /* Past this point all ASM statements should match. Verify that to avoid failures later in the compilation process. */ if (asm_noperands (PATTERN (insn)) >= 0 && ! check_asm_operands (PATTERN (insn))) instantiate_virtual_regs_lossage (insn); } /* Now instantiate the remaining register equivalences for debugging info. These will not be valid addresses. */ instantiate_decls (current_function_decl, 0); /* Indicate that, from now on, assign_stack_local should use frame_pointer_rtx. */ virtuals_instantiated = 1; } /* Scan all decls in FNDECL (both variables and parameters) and instantiate all virtual registers in their DECL_RTL's. If VALID_ONLY, do this only if the resulting address is still valid. Otherwise, always do it. */ static void instantiate_decls (tree fndecl, int valid_only) { tree decl; /* Process all parameters of the function. */ for (decl = DECL_ARGUMENTS (fndecl); decl; decl = TREE_CHAIN (decl)) { HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl)); HOST_WIDE_INT size_rtl; instantiate_decl (DECL_RTL (decl), size, valid_only); /* If the parameter was promoted, then the incoming RTL mode may be larger than the declared type size. We must use the larger of the two sizes. */ size_rtl = GET_MODE_SIZE (GET_MODE (DECL_INCOMING_RTL (decl))); size = MAX (size_rtl, size); instantiate_decl (DECL_INCOMING_RTL (decl), size, valid_only); } /* Now process all variables defined in the function or its subblocks. */ instantiate_decls_1 (DECL_INITIAL (fndecl), valid_only); } /* Subroutine of instantiate_decls: Process all decls in the given BLOCK node and all its subblocks. */ static void instantiate_decls_1 (tree let, int valid_only) { tree t; for (t = BLOCK_VARS (let); t; t = TREE_CHAIN (t)) if (DECL_RTL_SET_P (t)) instantiate_decl (DECL_RTL (t), int_size_in_bytes (TREE_TYPE (t)), valid_only); /* Process all subblocks. */ for (t = BLOCK_SUBBLOCKS (let); t; t = TREE_CHAIN (t)) instantiate_decls_1 (t, valid_only); } /* Subroutine of the preceding procedures: Given RTL representing a decl and the size of the object, do any instantiation required. If VALID_ONLY is nonzero, it means that the RTL should only be changed if the new address is valid. */ static void instantiate_decl (rtx x, HOST_WIDE_INT size, int valid_only) { enum machine_mode mode; rtx addr; /* If this is not a MEM, no need to do anything. Similarly if the address is a constant or a register that is not a virtual register. */ if (x == 0 || !MEM_P (x)) return; addr = XEXP (x, 0); if (CONSTANT_P (addr) || (REG_P (addr) && (REGNO (addr) < FIRST_VIRTUAL_REGISTER || REGNO (addr) > LAST_VIRTUAL_REGISTER))) return; /* If we should only do this if the address is valid, copy the address. We need to do this so we can undo any changes that might make the address invalid. This copy is unfortunate, but probably can't be avoided. */ if (valid_only) addr = copy_rtx (addr); instantiate_virtual_regs_1 (&addr, NULL_RTX, 0); if (valid_only && size >= 0) { unsigned HOST_WIDE_INT decl_size = size; /* Now verify that the resulting address is valid for every integer or floating-point mode up to and including SIZE bytes long. We do this since the object might be accessed in any mode and frame addresses are shared. */ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode && GET_MODE_SIZE (mode) <= decl_size; mode = GET_MODE_WIDER_MODE (mode)) if (! memory_address_p (mode, addr)) return; for (mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT); mode != VOIDmode && GET_MODE_SIZE (mode) <= decl_size; mode = GET_MODE_WIDER_MODE (mode)) if (! memory_address_p (mode, addr)) return; } /* Put back the address now that we have updated it and we either know it is valid or we don't care whether it is valid. */ XEXP (x, 0) = addr; } /* Given a piece of RTX and a pointer to a HOST_WIDE_INT, if the RTX is a virtual register, return the equivalent hard register and set the offset indirectly through the pointer. Otherwise, return 0. */ static rtx instantiate_new_reg (rtx x, HOST_WIDE_INT *poffset) { rtx new; HOST_WIDE_INT offset; if (x == virtual_incoming_args_rtx) new = arg_pointer_rtx, offset = in_arg_offset; else if (x == virtual_stack_vars_rtx) new = frame_pointer_rtx, offset = var_offset; else if (x == virtual_stack_dynamic_rtx) new = stack_pointer_rtx, offset = dynamic_offset; else if (x == virtual_outgoing_args_rtx) new = stack_pointer_rtx, offset = out_arg_offset; else if (x == virtual_cfa_rtx) new = arg_pointer_rtx, offset = cfa_offset; else return 0; *poffset = offset; return new; } /* Called when instantiate_virtual_regs has failed to update the instruction. Usually this means that non-matching instruction has been emit, however for asm statements it may be the problem in the constraints. */ static void instantiate_virtual_regs_lossage (rtx insn) { if (asm_noperands (PATTERN (insn)) >= 0) { error_for_asm (insn, "impossible constraint in `asm'"); delete_insn (insn); } else abort (); } /* Given a pointer to a piece of rtx and an optional pointer to the containing object, instantiate any virtual registers present in it. If EXTRA_INSNS, we always do the replacement and generate any extra insns before OBJECT. If it zero, we do nothing if replacement is not valid. Return 1 if we either had nothing to do or if we were able to do the needed replacement. Return 0 otherwise; we only return zero if EXTRA_INSNS is zero. We first try some simple transformations to avoid the creation of extra pseudos. */ static int instantiate_virtual_regs_1 (rtx *loc, rtx object, int extra_insns) { rtx x; RTX_CODE code; rtx new = 0; HOST_WIDE_INT offset = 0; rtx temp; rtx seq; int i, j; const char *fmt; /* Re-start here to avoid recursion in common cases. */ restart: x = *loc; if (x == 0) return 1; /* We may have detected and deleted invalid asm statements. */ if (object && INSN_P (object) && INSN_DELETED_P (object)) return 1; code = GET_CODE (x); /* Check for some special cases. */ switch (code) { case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case CONST: case SYMBOL_REF: case CODE_LABEL: case PC: case CC0: case ASM_INPUT: case ADDR_VEC: case ADDR_DIFF_VEC: case RETURN: return 1; case SET: /* We are allowed to set the virtual registers. This means that the actual register should receive the source minus the appropriate offset. This is used, for example, in the handling of non-local gotos. */ if ((new = instantiate_new_reg (SET_DEST (x), &offset)) != 0) { rtx src = SET_SRC (x); /* We are setting the register, not using it, so the relevant offset is the negative of the offset to use were we using the register. */ offset = - offset; instantiate_virtual_regs_1 (&src, NULL_RTX, 0); /* The only valid sources here are PLUS or REG. Just do the simplest possible thing to handle them. */ if (!REG_P (src) && GET_CODE (src) != PLUS) { instantiate_virtual_regs_lossage (object); return 1; } start_sequence (); if (!REG_P (src)) temp = force_operand (src, NULL_RTX); else temp = src; temp = force_operand (plus_constant (temp, offset), NULL_RTX); seq = get_insns (); end_sequence (); emit_insn_before (seq, object); SET_DEST (x) = new; if (! validate_change (object, &SET_SRC (x), temp, 0) || ! extra_insns) instantiate_virtual_regs_lossage (object); return 1; } instantiate_virtual_regs_1 (&SET_DEST (x), object, extra_insns); loc = &SET_SRC (x); goto restart; case PLUS: /* Handle special case of virtual register plus constant. */ if (CONSTANT_P (XEXP (x, 1))) { rtx old, new_offset; /* Check for (plus (plus VIRT foo) (const_int)) first. */ if (GET_CODE (XEXP (x, 0)) == PLUS) { if ((new = instantiate_new_reg (XEXP (XEXP (x, 0), 0), &offset))) { instantiate_virtual_regs_1 (&XEXP (XEXP (x, 0), 1), object, extra_insns); new = gen_rtx_PLUS (Pmode, new, XEXP (XEXP (x, 0), 1)); } else { loc = &XEXP (x, 0); goto restart; } } #ifdef POINTERS_EXTEND_UNSIGNED /* If we have (plus (subreg (virtual-reg)) (const_int)), we know we can commute the PLUS and SUBREG because pointers into the frame are well-behaved. */ else if (GET_CODE (XEXP (x, 0)) == SUBREG && GET_MODE (x) == ptr_mode && GET_CODE (XEXP (x, 1)) == CONST_INT && 0 != (new = instantiate_new_reg (SUBREG_REG (XEXP (x, 0)), &offset)) && validate_change (object, loc, plus_constant (gen_lowpart (ptr_mode, new), offset + INTVAL (XEXP (x, 1))), 0)) return 1; #endif else if ((new = instantiate_new_reg (XEXP (x, 0), &offset)) == 0) { /* We know the second operand is a constant. Unless the first operand is a REG (which has been already checked), it needs to be checked. */ if (!REG_P (XEXP (x, 0))) { loc = &XEXP (x, 0); goto restart; } return 1; } new_offset = plus_constant (XEXP (x, 1), offset); /* If the new constant is zero, try to replace the sum with just the register. */ if (new_offset == const0_rtx && validate_change (object, loc, new, 0)) return 1; /* Next try to replace the register and new offset. There are two changes to validate here and we can't assume that in the case of old offset equals new just changing the register will yield a valid insn. In the interests of a little efficiency, however, we only call validate change once (we don't queue up the changes and then call apply_change_group). */ old = XEXP (x, 0); if (offset == 0 ? ! validate_change (object, &XEXP (x, 0), new, 0) : (XEXP (x, 0) = new, ! validate_change (object, &XEXP (x, 1), new_offset, 0))) { if (! extra_insns) { XEXP (x, 0) = old; return 0; } /* Otherwise copy the new constant into a register and replace constant with that register. */ temp = gen_reg_rtx (Pmode); XEXP (x, 0) = new; if (validate_change (object, &XEXP (x, 1), temp, 0)) emit_insn_before (gen_move_insn (temp, new_offset), object); else { /* If that didn't work, replace this expression with a register containing the sum. */ XEXP (x, 0) = old; new = gen_rtx_PLUS (Pmode, new, new_offset); start_sequence (); temp = force_operand (new, NULL_RTX); seq = get_insns (); end_sequence (); emit_insn_before (seq, object); if (! validate_change (object, loc, temp, 0) && ! validate_replace_rtx (x, temp, object)) { instantiate_virtual_regs_lossage (object); return 1; } } } return 1; } /* Fall through to generic two-operand expression case. */ case EXPR_LIST: case CALL: case COMPARE: case MINUS: case MULT: case DIV: case UDIV: case MOD: case UMOD: case AND: case IOR: case XOR: case ROTATERT: case ROTATE: case ASHIFTRT: case LSHIFTRT: case ASHIFT: case NE: case EQ: case GE: case GT: case GEU: case GTU: case LE: case LT: case LEU: case LTU: if (XEXP (x, 1) && ! CONSTANT_P (XEXP (x, 1))) instantiate_virtual_regs_1 (&XEXP (x, 1), object, extra_insns); loc = &XEXP (x, 0); goto restart; case MEM: /* Most cases of MEM that convert to valid addresses have already been handled by our scan of decls. The only special handling we need here is to make a copy of the rtx to ensure it isn't being shared if we have to change it to a pseudo. If the rtx is a simple reference to an address via a virtual register, it can potentially be shared. In such cases, first try to make it a valid address, which can also be shared. Otherwise, copy it and proceed normally. First check for common cases that need no processing. These are usually due to instantiation already being done on a previous instance of a shared rtx. */ temp = XEXP (x, 0); if (CONSTANT_ADDRESS_P (temp) #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM || temp == arg_pointer_rtx #endif #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM || temp == hard_frame_pointer_rtx #endif || temp == frame_pointer_rtx) return 1; if (GET_CODE (temp) == PLUS && CONSTANT_ADDRESS_P (XEXP (temp, 1)) && (XEXP (temp, 0) == frame_pointer_rtx #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM || XEXP (temp, 0) == hard_frame_pointer_rtx #endif #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM || XEXP (temp, 0) == arg_pointer_rtx #endif )) return 1; if (temp == virtual_stack_vars_rtx || temp == virtual_incoming_args_rtx || (GET_CODE (temp) == PLUS && CONSTANT_ADDRESS_P (XEXP (temp, 1)) && (XEXP (temp, 0) == virtual_stack_vars_rtx || XEXP (temp, 0) == virtual_incoming_args_rtx))) { /* This MEM may be shared. If the substitution can be done without the need to generate new pseudos, we want to do it in place so all copies of the shared rtx benefit. The call below will only make substitutions if the resulting address is still valid. Note that we cannot pass X as the object in the recursive call since the insn being processed may not allow all valid addresses. However, if we were not passed on object, we can only modify X without copying it if X will have a valid address. ??? Also note that this can still lose if OBJECT is an insn that has less restrictions on an address that some other insn. In that case, we will modify the shared address. This case doesn't seem very likely, though. One case where this could happen is in the case of a USE or CLOBBER reference, but we take care of that below. */ if (instantiate_virtual_regs_1 (&XEXP (x, 0), object ? object : x, 0)) return 1; /* Otherwise make a copy and process that copy. We copy the entire RTL expression since it might be a PLUS which could also be shared. */ *loc = x = copy_rtx (x); } /* Fall through to generic unary operation case. */ case PREFETCH: case SUBREG: case STRICT_LOW_PART: case NEG: case NOT: case PRE_DEC: case PRE_INC: case POST_DEC: case POST_INC: case SIGN_EXTEND: case ZERO_EXTEND: case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT: case FIX: case UNSIGNED_FIX: case UNSIGNED_FLOAT: case ABS: case SQRT: case FFS: case CLZ: case CTZ: case POPCOUNT: case PARITY: /* These case either have just one operand or we know that we need not check the rest of the operands. */ loc = &XEXP (x, 0); goto restart; case USE: case CLOBBER: /* If the operand is a MEM, see if the change is a valid MEM. If not, go ahead and make the invalid one, but do it to a copy. For a REG, just make the recursive call, since there's no chance of a problem. */ if ((MEM_P (XEXP (x, 0)) && instantiate_virtual_regs_1 (&XEXP (XEXP (x, 0), 0), XEXP (x, 0), 0)) || (REG_P (XEXP (x, 0)) && instantiate_virtual_regs_1 (&XEXP (x, 0), object, 0))) return 1; XEXP (x, 0) = copy_rtx (XEXP (x, 0)); loc = &XEXP (x, 0); goto restart; case REG: /* Try to replace with a PLUS. If that doesn't work, compute the sum in front of this insn and substitute the temporary. */ if ((new = instantiate_new_reg (x, &offset)) != 0) { temp = plus_constant (new, offset); if (!validate_change (object, loc, temp, 0)) { if (! extra_insns) return 0; start_sequence (); temp = force_operand (temp, NULL_RTX); seq = get_insns (); end_sequence (); emit_insn_before (seq, object); if (! validate_change (object, loc, temp, 0) && ! validate_replace_rtx (x, temp, object)) instantiate_virtual_regs_lossage (object); } } return 1; default: break; } /* Scan all subexpressions. */ fmt = GET_RTX_FORMAT (code); for (i = 0; i < GET_RTX_LENGTH (code); i++, fmt++) if (*fmt == 'e') { if (!instantiate_virtual_regs_1 (&XEXP (x, i), object, extra_insns)) return 0; } else if (*fmt == 'E') for (j = 0; j < XVECLEN (x, i); j++) if (! instantiate_virtual_regs_1 (&XVECEXP (x, i, j), object, extra_insns)) return 0; return 1; } /* Return 1 if EXP is an aggregate type (or a value with aggregate type). This means a type for which function calls must pass an address to the function or get an address back from the function. EXP may be a type node or an expression (whose type is tested). */ int aggregate_value_p (tree exp, tree fntype) { int i, regno, nregs; rtx reg; tree type = (TYPE_P (exp)) ? exp : TREE_TYPE (exp); if (fntype) switch (TREE_CODE (fntype)) { case CALL_EXPR: fntype = get_callee_fndecl (fntype); fntype = fntype ? TREE_TYPE (fntype) : 0; break; case FUNCTION_DECL: fntype = TREE_TYPE (fntype); break; case FUNCTION_TYPE: case METHOD_TYPE: break; case IDENTIFIER_NODE: fntype = 0; break; default: /* We don't expect other rtl types here. */ abort(); } if (TREE_CODE (type) == VOID_TYPE) return 0; if (targetm.calls.return_in_memory (type, fntype)) return 1; /* Types that are TREE_ADDRESSABLE must be constructed in memory, and thus can't be returned in registers. */ if (TREE_ADDRESSABLE (type)) return 1; if (flag_pcc_struct_return && AGGREGATE_TYPE_P (type)) return 1; /* Make sure we have suitable call-clobbered regs to return the value in; if not, we must return it in memory. */ reg = hard_function_value (type, 0, 0); /* If we have something other than a REG (e.g. a PARALLEL), then assume it is OK. */ if (!REG_P (reg)) return 0; regno = REGNO (reg); nregs = hard_regno_nregs[regno][TYPE_MODE (type)]; for (i = 0; i < nregs; i++) if (! call_used_regs[regno + i]) return 1; return 0; } /* Return true if we should assign DECL a pseudo register; false if it should live on the local stack. */ bool use_register_for_decl (tree decl) { /* Honor volatile. */ if (TREE_SIDE_EFFECTS (decl)) return false; /* Honor addressability. */ if (TREE_ADDRESSABLE (decl)) return false; /* Only register-like things go in registers. */ if (DECL_MODE (decl) == BLKmode) return false; /* If -ffloat-store specified, don't put explicit float variables into registers. */ /* ??? This should be checked after DECL_ARTIFICIAL, but tree-ssa propagates values across these stores, and it probably shouldn't. */ if (flag_float_store && FLOAT_TYPE_P (TREE_TYPE (decl))) return false; /* Compiler-generated temporaries can always go in registers. */ if (DECL_ARTIFICIAL (decl)) return true; #ifdef NON_SAVING_SETJMP /* Protect variables not declared "register" from setjmp. */ if (NON_SAVING_SETJMP && current_function_calls_setjmp && !DECL_REGISTER (decl)) return false; #endif return (optimize || DECL_REGISTER (decl)); } /* Structures to communicate between the subroutines of assign_parms. The first holds data persistent across all parameters, the second is cleared out for each parameter. */ struct assign_parm_data_all { CUMULATIVE_ARGS args_so_far; struct args_size stack_args_size; tree function_result_decl; tree orig_fnargs; rtx conversion_insns; HOST_WIDE_INT pretend_args_size; HOST_WIDE_INT extra_pretend_bytes; int reg_parm_stack_space; }; struct assign_parm_data_one { tree nominal_type; tree passed_type; rtx entry_parm; rtx stack_parm; enum machine_mode nominal_mode; enum machine_mode passed_mode; enum machine_mode promoted_mode; struct locate_and_pad_arg_data locate; int partial; BOOL_BITFIELD named_arg : 1; BOOL_BITFIELD last_named : 1; BOOL_BITFIELD passed_pointer : 1; BOOL_BITFIELD on_stack : 1; BOOL_BITFIELD loaded_in_reg : 1; }; /* A subroutine of assign_parms. Initialize ALL. */ static void assign_parms_initialize_all (struct assign_parm_data_all *all) { tree fntype; memset (all, 0, sizeof (*all)); fntype = TREE_TYPE (current_function_decl); #ifdef INIT_CUMULATIVE_INCOMING_ARGS INIT_CUMULATIVE_INCOMING_ARGS (all->args_so_far, fntype, NULL_RTX); #else INIT_CUMULATIVE_ARGS (all->args_so_far, fntype, NULL_RTX, current_function_decl, -1); #endif #ifdef REG_PARM_STACK_SPACE all->reg_parm_stack_space = REG_PARM_STACK_SPACE (current_function_decl); #endif } /* If ARGS contains entries with complex types, split the entry into two entries of the component type. Return a new list of substitutions are needed, else the old list. */ static tree split_complex_args (tree args) { tree p; /* Before allocating memory, check for the common case of no complex. */ for (p = args; p; p = TREE_CHAIN (p)) { tree type = TREE_TYPE (p); if (TREE_CODE (type) == COMPLEX_TYPE && targetm.calls.split_complex_arg (type)) goto found; } return args; found: args = copy_list (args); for (p = args; p; p = TREE_CHAIN (p)) { tree type = TREE_TYPE (p); if (TREE_CODE (type) == COMPLEX_TYPE && targetm.calls.split_complex_arg (type)) { tree decl; tree subtype = TREE_TYPE (type); /* Rewrite the PARM_DECL's type with its component. */ TREE_TYPE (p) = subtype; DECL_ARG_TYPE (p) = TREE_TYPE (DECL_ARG_TYPE (p)); DECL_MODE (p) = VOIDmode; DECL_SIZE (p) = NULL; DECL_SIZE_UNIT (p) = NULL; layout_decl (p, 0); /* Build a second synthetic decl. */ decl = build_decl (PARM_DECL, NULL_TREE, subtype); DECL_ARG_TYPE (decl) = DECL_ARG_TYPE (p); layout_decl (decl, 0); /* Splice it in; skip the new decl. */ TREE_CHAIN (decl) = TREE_CHAIN (p); TREE_CHAIN (p) = decl; p = decl; } } return args; } /* A subroutine of assign_parms. Adjust the parameter list to incorporate the hidden struct return argument, and (abi willing) complex args. Return the new parameter list. */ static tree assign_parms_augmented_arg_list (struct assign_parm_data_all *all) { tree fndecl = current_function_decl; tree fntype = TREE_TYPE (fndecl); tree fnargs = DECL_ARGUMENTS (fndecl); /* If struct value address is treated as the first argument, make it so. */ if (aggregate_value_p (DECL_RESULT (fndecl), fndecl) && ! current_function_returns_pcc_struct && targetm.calls.struct_value_rtx (TREE_TYPE (fndecl), 1) == 0) { tree type = build_pointer_type (TREE_TYPE (fntype)); tree decl; decl = build_decl (PARM_DECL, NULL_TREE, type); DECL_ARG_TYPE (decl) = type; DECL_ARTIFICIAL (decl) = 1; TREE_CHAIN (decl) = fnargs; fnargs = decl; all->function_result_decl = decl; } all->orig_fnargs = fnargs; /* If the target wants to split complex arguments into scalars, do so. */ if (targetm.calls.split_complex_arg) fnargs = split_complex_args (fnargs); return fnargs; } /* A subroutine of assign_parms. Examine PARM and pull out type and mode data for the parameter. Incorporate ABI specifics such as pass-by- reference and type promotion. */ static void assign_parm_find_data_types (struct assign_parm_data_all *all, tree parm, struct assign_parm_data_one *data) { tree nominal_type, passed_type; enum machine_mode nominal_mode, passed_mode, promoted_mode; memset (data, 0, sizeof (*data)); /* Set LAST_NAMED if this is last named arg before last anonymous args. */ if (current_function_stdarg) { tree tem; for (tem = TREE_CHAIN (parm); tem; tem = TREE_CHAIN (tem)) if (DECL_NAME (tem)) break; if (tem == 0) data->last_named = true; } /* Set NAMED_ARG if this arg should be treated as a named arg. For most machines, if this is a varargs/stdarg function, then we treat the last named arg as if it were anonymous too. */ if (targetm.calls.strict_argument_naming (&all->args_so_far)) data->named_arg = 1; else data->named_arg = !data->last_named; nominal_type = TREE_TYPE (parm); passed_type = DECL_ARG_TYPE (parm); /* Look out for errors propagating this far. Also, if the parameter's type is void then its value doesn't matter. */ if (TREE_TYPE (parm) == error_mark_node /* This can happen after weird syntax errors or if an enum type is defined among the parms. */ || TREE_CODE (parm) != PARM_DECL || passed_type == NULL || VOID_TYPE_P (nominal_type)) { nominal_type = passed_type = void_type_node; nominal_mode = passed_mode = promoted_mode = VOIDmode; goto egress; } /* Find mode of arg as it is passed, and mode of arg as it should be during execution of this function. */ passed_mode = TYPE_MODE (passed_type); nominal_mode = TYPE_MODE (nominal_type); /* If the parm is to be passed as a transparent union, use the type of the first field for the tests below. We have already verified that the modes are the same. */ if (DECL_TRANSPARENT_UNION (parm) || (TREE_CODE (passed_type) == UNION_TYPE && TYPE_TRANSPARENT_UNION (passed_type))) passed_type = TREE_TYPE (TYPE_FIELDS (passed_type)); /* See if this arg was passed by invisible reference. It is if it is an object whose size depends on the contents of the object itself or if the machine requires these objects be passed that way. */ if (CONTAINS_PLACEHOLDER_P (TYPE_SIZE (passed_type)) || TREE_ADDRESSABLE (passed_type) || FUNCTION_ARG_PASS_BY_REFERENCE (all->args_so_far, passed_mode, passed_type, data->named_arg)) { passed_type = nominal_type = build_pointer_type (passed_type); data->passed_pointer = true; passed_mode = nominal_mode = Pmode; } /* See if the frontend wants to pass this by invisible reference. */ else if (passed_type != nominal_type && POINTER_TYPE_P (passed_type) && TREE_TYPE (passed_type) == nominal_type) { nominal_type = passed_type; data->passed_pointer = 1; passed_mode = nominal_mode = Pmode; } /* Find mode as it is passed by the ABI. */ promoted_mode = passed_mode; if (targetm.calls.promote_function_args (TREE_TYPE (current_function_decl))) { int unsignedp = TYPE_UNSIGNED (passed_type); promoted_mode = promote_mode (passed_type, promoted_mode, &unsignedp, 1); } egress: data->nominal_type = nominal_type; data->passed_type = passed_type; data->nominal_mode = nominal_mode; data->passed_mode = passed_mode; data->promoted_mode = promoted_mode; } /* A subroutine of assign_parms. Invoke setup_incoming_varargs. */ static void assign_parms_setup_varargs (struct assign_parm_data_all *all, struct assign_parm_data_one *data, bool no_rtl) { int varargs_pretend_bytes = 0; targetm.calls.setup_incoming_varargs (&all->args_so_far, data->promoted_mode, data->passed_type, &varargs_pretend_bytes, no_rtl); /* If the back-end has requested extra stack space, record how much is needed. Do not change pretend_args_size otherwise since it may be nonzero from an earlier partial argument. */ if (varargs_pretend_bytes > 0) all->pretend_args_size = varargs_pretend_bytes; } /* A subroutine of assign_parms. Set DATA->ENTRY_PARM corresponding to the incoming location of the current parameter. */ static void assign_parm_find_entry_rtl (struct assign_parm_data_all *all, struct assign_parm_data_one *data) { HOST_WIDE_INT pretend_bytes = 0; rtx entry_parm; bool in_regs; if (data->promoted_mode == VOIDmode) { data->entry_parm = data->stack_parm = const0_rtx; return; } #ifdef FUNCTION_INCOMING_ARG entry_parm = FUNCTION_INCOMING_ARG (all->args_so_far, data->promoted_mode, data->passed_type, data->named_arg); #else entry_parm = FUNCTION_ARG (all->args_so_far, data->promoted_mode, data->passed_type, data->named_arg); #endif if (entry_parm == 0) data->promoted_mode = data->passed_mode; /* Determine parm's home in the stack, in case it arrives in the stack or we should pretend it did. Compute the stack position and rtx where the argument arrives and its size. There is one complexity here: If this was a parameter that would have been passed in registers, but wasn't only because it is __builtin_va_alist, we want locate_and_pad_parm to treat it as if it came in a register so that REG_PARM_STACK_SPACE isn't skipped. In this case, we call FUNCTION_ARG with NAMED set to 1 instead of 0 as it was the previous time. */ in_regs = entry_parm != 0; #ifdef STACK_PARMS_IN_REG_PARM_AREA in_regs = true; #endif if (!in_regs && !data->named_arg) { if (targetm.calls.pretend_outgoing_varargs_named (&all->args_so_far)) { rtx tem; #ifdef FUNCTION_INCOMING_ARG tem = FUNCTION_INCOMING_ARG (all->args_so_far, data->promoted_mode, data->passed_type, true); #else tem = FUNCTION_ARG (all->args_so_far, data->promoted_mode, data->passed_type, true); #endif in_regs = tem != NULL; } } /* If this parameter was passed both in registers and in the stack, use the copy on the stack. */ if (MUST_PASS_IN_STACK (data->promoted_mode, data->passed_type)) entry_parm = 0; #ifdef FUNCTION_ARG_PARTIAL_NREGS if (entry_parm) { int partial; partial = FUNCTION_ARG_PARTIAL_NREGS (all->args_so_far, data->promoted_mode, data->passed_type, data->named_arg); data->partial = partial; /* The caller might already have allocated stack space for the register parameters. */ if (partial != 0 && all->reg_parm_stack_space == 0) { /* Part of this argument is passed in registers and part is passed on the stack. Ask the prologue code to extend the stack part so that we can recreate the full value. PRETEND_BYTES is the size of the registers we need to store. CURRENT_FUNCTION_PRETEND_ARGS_SIZE is the amount of extra stack space that the prologue should allocate. Internally, gcc assumes that the argument pointer is aligned to STACK_BOUNDARY bits. This is used both for alignment optimizations (see init_emit) and to locate arguments that are aligned to more than PARM_BOUNDARY bits. We must preserve this invariant by rounding CURRENT_FUNCTION_PRETEND_ARGS_SIZE up to a stack boundary. */ /* We assume at most one partial arg, and it must be the first argument on the stack. */ if (all->extra_pretend_bytes || all->pretend_args_size) abort (); pretend_bytes = partial * UNITS_PER_WORD; all->pretend_args_size = CEIL_ROUND (pretend_bytes, STACK_BYTES); /* We want to align relative to the actual stack pointer, so don't include this in the stack size until later. */ all->extra_pretend_bytes = all->pretend_args_size; } } #endif locate_and_pad_parm (data->promoted_mode, data->passed_type, in_regs, entry_parm ? data->partial : 0, current_function_decl, &all->stack_args_size, &data->locate); /* Adjust offsets to include the pretend args. */ pretend_bytes = all->extra_pretend_bytes - pretend_bytes; data->locate.slot_offset.constant += pretend_bytes; data->locate.offset.constant += pretend_bytes; data->entry_parm = entry_parm; } /* A subroutine of assign_parms. If there is actually space on the stack for this parm, count it in stack_args_size and return true. */ static bool assign_parm_is_stack_parm (struct assign_parm_data_all *all, struct assign_parm_data_one *data) { /* Trivially true if we've no incomming register. */ if (data->entry_parm == NULL) ; /* Also true if we're partially in registers and partially not, since we've arranged to drop the entire argument on the stack. */ else if (data->partial != 0) ; /* Also true if the target says that it's passed in both registers and on the stack. */ else if (GET_CODE (data->entry_parm) == PARALLEL && XEXP (XVECEXP (data->entry_parm, 0, 0), 0) == NULL_RTX) ; /* Also true if the target says that there's stack allocated for all register parameters. */ else if (all->reg_parm_stack_space > 0) ; /* Otherwise, no, this parameter has no ABI defined stack slot. */ else return false; all->stack_args_size.constant += data->locate.size.constant; if (data->locate.size.var) ADD_PARM_SIZE (all->stack_args_size, data->locate.size.var); return true; } /* A subroutine of assign_parms. Given that this parameter is allocated stack space by the ABI, find it. */ static void assign_parm_find_stack_rtl (tree parm, struct assign_parm_data_one *data) { rtx offset_rtx, stack_parm; unsigned int align, boundary; /* If we're passing this arg using a reg, make its stack home the aligned stack slot. */ if (data->entry_parm) offset_rtx = ARGS_SIZE_RTX (data->locate.slot_offset); else offset_rtx = ARGS_SIZE_RTX (data->locate.offset); stack_parm = current_function_internal_arg_pointer; if (offset_rtx != const0_rtx) stack_parm = gen_rtx_PLUS (Pmode, stack_parm, offset_rtx); stack_parm = gen_rtx_MEM (data->promoted_mode, stack_parm); set_mem_attributes (stack_parm, parm, 1); boundary = FUNCTION_ARG_BOUNDARY (data->promoted_mode, data->passed_type); align = 0; /* If we're padding upward, we know that the alignment of the slot is FUNCTION_ARG_BOUNDARY. If we're using slot_offset, we're intentionally forcing upward padding. Otherwise we have to come up with a guess at the alignment based on OFFSET_RTX. */ if (data->locate.where_pad == upward || data->entry_parm) align = boundary; else if (GET_CODE (offset_rtx) == CONST_INT) { align = INTVAL (offset_rtx) * BITS_PER_UNIT | boundary; align = align & -align; } if (align > 0) set_mem_align (stack_parm, align); if (data->entry_parm) set_reg_attrs_for_parm (data->entry_parm, stack_parm); data->stack_parm = stack_parm; } /* A subroutine of assign_parms. Adjust DATA->ENTRY_RTL such that it's always valid and contiguous. */ static void assign_parm_adjust_entry_rtl (struct assign_parm_data_one *data) { rtx entry_parm = data->entry_parm; rtx stack_parm = data->stack_parm; /* If this parm was passed part in regs and part in memory, pretend it arrived entirely in memory by pushing the register-part onto the stack. In the special case of a DImode or DFmode that is split, we could put it together in a pseudoreg directly, but for now that's not worth bothering with. */ if (data->partial != 0) { /* Handle calls that pass values in multiple non-contiguous locations. The Irix 6 ABI has examples of this. */ if (GET_CODE (entry_parm) == PARALLEL) emit_group_store (validize_mem (stack_parm), entry_parm, data->passed_type, int_size_in_bytes (data->passed_type)); else move_block_from_reg (REGNO (entry_parm), validize_mem (stack_parm), data->partial); entry_parm = stack_parm; } /* If we didn't decide this parm came in a register, by default it came on the stack. */ else if (entry_parm == NULL) entry_parm = stack_parm; /* When an argument is passed in multiple locations, we can't make use of this information, but we can save some copying if the whole argument is passed in a single register. */ else if (GET_CODE (entry_parm) == PARALLEL && data->nominal_mode != BLKmode && data->passed_mode != BLKmode) { size_t i, len = XVECLEN (entry_parm, 0); for (i = 0; i < len; i++) if (XEXP (XVECEXP (entry_parm, 0, i), 0) != NULL_RTX && REG_P (XEXP (XVECEXP (entry_parm, 0, i), 0)) && (GET_MODE (XEXP (XVECEXP (entry_parm, 0, i), 0)) == data->passed_mode) && INTVAL (XEXP (XVECEXP (entry_parm, 0, i), 1)) == 0) { entry_parm = XEXP (XVECEXP (entry_parm, 0, i), 0); break; } } data->entry_parm = entry_parm; } /* A subroutine of assign_parms. Adjust DATA->STACK_RTL such that it's always valid and properly aligned. */ static void assign_parm_adjust_stack_rtl (struct assign_parm_data_one *data) { rtx stack_parm = data->stack_parm; /* If we can't trust the parm stack slot to be aligned enough for its ultimate type, don't use that slot after entry. We'll make another stack slot, if we need one. */ if (STRICT_ALIGNMENT && stack_parm && GET_MODE_ALIGNMENT (data->nominal_mode) > MEM_ALIGN (stack_parm)) stack_parm = NULL; /* If parm was passed in memory, and we need to convert it on entry, don't store it back in that same slot. */ else if (data->entry_parm == stack_parm && data->nominal_mode != BLKmode && data->nominal_mode != data->passed_mode) stack_parm = NULL; data->stack_parm = stack_parm; } /* A subroutine of assign_parms. Return true if the current parameter should be stored as a BLKmode in the current frame. */ static bool assign_parm_setup_block_p (struct assign_parm_data_one *data) { if (data->nominal_mode == BLKmode) return true; if (GET_CODE (data->entry_parm) == PARALLEL) return true; #ifdef BLOCK_REG_PADDING if (data->locate.where_pad == (BYTES_BIG_ENDIAN ? upward : downward) && GET_MODE_SIZE (data->promoted_mode) < UNITS_PER_WORD) return true; #endif return false; } /* A subroutine of assign_parms. Arrange for the parameter to be present and valid in DATA->STACK_RTL. */ static void assign_parm_setup_block (tree parm, struct assign_parm_data_one *data) { rtx entry_parm = data->entry_parm; rtx stack_parm = data->stack_parm; /* If we've a non-block object that's nevertheless passed in parts, reconstitute it in register operations rather than on the stack. */ if (GET_CODE (entry_parm) == PARALLEL && data->nominal_mode != BLKmode && XVECLEN (entry_parm, 0) > 1 && optimize) { rtx parmreg = gen_reg_rtx (data->nominal_mode); emit_group_store (parmreg, entry_parm, data->nominal_type, int_size_in_bytes (data->nominal_type)); SET_DECL_RTL (parm, parmreg); return; } /* If a BLKmode arrives in registers, copy it to a stack slot. Handle calls that pass values in multiple non-contiguous locations. */ if (REG_P (entry_parm) || GET_CODE (entry_parm) == PARALLEL) { HOST_WIDE_INT size = int_size_in_bytes (data->passed_type); HOST_WIDE_INT size_stored = CEIL_ROUND (size, UNITS_PER_WORD); rtx mem; /* Note that we will be storing an integral number of words. So we have to be careful to ensure that we allocate an integral number of words. We do this below in the assign_stack_local if space was not allocated in the argument list. If it was, this will not work if PARM_BOUNDARY is not a multiple of BITS_PER_WORD. It isn't clear how to fix this if it becomes a problem. Exception is when BLKmode arrives with arguments not conforming to word_mode. */ if (stack_parm == 0) { stack_parm = assign_stack_local (BLKmode, size_stored, 0); data->stack_parm = stack_parm; PUT_MODE (stack_parm, GET_MODE (entry_parm)); set_mem_attributes (stack_parm, parm, 1); } else if (GET_CODE (entry_parm) == PARALLEL) ; else if (size != 0 && PARM_BOUNDARY % BITS_PER_WORD != 0) abort (); mem = validize_mem (stack_parm); /* Handle values in multiple non-contiguous locations. */ if (GET_CODE (entry_parm) == PARALLEL) emit_group_store (mem, entry_parm, data->passed_type, size); else if (size == 0) ; /* If SIZE is that of a mode no bigger than a word, just use that mode's store operation. */ else if (size <= UNITS_PER_WORD) { enum machine_mode mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0); if (mode != BLKmode #ifdef BLOCK_REG_PADDING && (size == UNITS_PER_WORD || (BLOCK_REG_PADDING (mode, data->passed_type, 1) != (BYTES_BIG_ENDIAN ? upward : downward))) #endif ) { rtx reg = gen_rtx_REG (mode, REGNO (entry_parm)); emit_move_insn (change_address (mem, mode, 0), reg); } /* Blocks smaller than a word on a BYTES_BIG_ENDIAN machine must be aligned to the left before storing to memory. Note that the previous test doesn't handle all cases (e.g. SIZE == 3). */ else if (size != UNITS_PER_WORD #ifdef BLOCK_REG_PADDING && (BLOCK_REG_PADDING (mode, data->passed_type, 1) == downward) #else && BYTES_BIG_ENDIAN #endif ) { rtx tem, x; int by = (UNITS_PER_WORD - size) * BITS_PER_UNIT; rtx reg = gen_rtx_REG (word_mode, REGNO (data->entry_parm)); x = expand_shift (LSHIFT_EXPR, word_mode, reg, build_int_2 (by, 0), NULL_RTX, 1); tem = change_address (mem, word_mode, 0); emit_move_insn (tem, x); } else move_block_from_reg (REGNO (data->entry_parm), mem, size_stored / UNITS_PER_WORD); } else move_block_from_reg (REGNO (data->entry_parm), mem, size_stored / UNITS_PER_WORD); } SET_DECL_RTL (parm, stack_parm); } /* A subroutine of assign_parms. Allocate a pseudo to hold the current parameter. Get it there. Perform all ABI specified conversions. */ static void assign_parm_setup_reg (struct assign_parm_data_all *all, tree parm, struct assign_parm_data_one *data) { rtx parmreg; enum machine_mode promoted_nominal_mode; int unsignedp = TYPE_UNSIGNED (TREE_TYPE (parm)); bool did_conversion = false; /* Store the parm in a pseudoregister during the function, but we may need to do it in a wider mode. */ promoted_nominal_mode = promote_mode (data->nominal_type, data->nominal_mode, &unsignedp, 0); parmreg = gen_reg_rtx (promoted_nominal_mode); if (!DECL_ARTIFICIAL (parm)) mark_user_reg (parmreg); /* If this was an item that we received a pointer to, set DECL_RTL appropriately. */ if (data->passed_pointer) { rtx x = gen_rtx_MEM (TYPE_MODE (TREE_TYPE (data->passed_type)), parmreg); set_mem_attributes (x, parm, 1); SET_DECL_RTL (parm, x); } else { SET_DECL_RTL (parm, parmreg); maybe_set_unchanging (DECL_RTL (parm), parm); } /* Copy the value into the register. */ if (data->nominal_mode != data->passed_mode || promoted_nominal_mode != data->promoted_mode) { int save_tree_used; /* ENTRY_PARM has been converted to PROMOTED_MODE, its mode, by the caller. We now have to convert it to NOMINAL_MODE, if different. However, PARMREG may be in a different mode than NOMINAL_MODE if it is being stored promoted. If ENTRY_PARM is a hard register, it might be in a register not valid for operating in its mode (e.g., an odd-numbered register for a DFmode). In that case, moves are the only thing valid, so we can't do a convert from there. This occurs when the calling sequence allow such misaligned usages. In addition, the conversion may involve a call, which could clobber parameters which haven't been copied to pseudo registers yet. Therefore, we must first copy the parm to a pseudo reg here, and save the conversion until after all parameters have been moved. */ rtx tempreg = gen_reg_rtx (GET_MODE (data->entry_parm)); emit_move_insn (tempreg, validize_mem (data->entry_parm)); push_to_sequence (all->conversion_insns); tempreg = convert_to_mode (data->nominal_mode, tempreg, unsignedp); if (GET_CODE (tempreg) == SUBREG && GET_MODE (tempreg) == data->nominal_mode && REG_P (SUBREG_REG (tempreg)) && data->nominal_mode == data->passed_mode && GET_MODE (SUBREG_REG (tempreg)) == GET_MODE (data->entry_parm) && GET_MODE_SIZE (GET_MODE (tempreg)) < GET_MODE_SIZE (GET_MODE (data->entry_parm))) { /* The argument is already sign/zero extended, so note it into the subreg. */ SUBREG_PROMOTED_VAR_P (tempreg) = 1; SUBREG_PROMOTED_UNSIGNED_SET (tempreg, unsignedp); } /* TREE_USED gets set erroneously during expand_assignment. */ save_tree_used = TREE_USED (parm); expand_assignment (parm, make_tree (data->nominal_type, tempreg), 0); TREE_USED (parm) = save_tree_used; all->conversion_insns = get_insns (); end_sequence (); did_conversion = true; } else emit_move_insn (parmreg, validize_mem (data->entry_parm)); /* If we were passed a pointer but the actual value can safely live in a register, put it in one. */ if (data->passed_pointer && TYPE_MODE (TREE_TYPE (parm)) != BLKmode /* If by-reference argument was promoted, demote it. */ && (TYPE_MODE (TREE_TYPE (parm)) != GET_MODE (DECL_RTL (parm)) || use_register_for_decl (parm))) { /* We can't use nominal_mode, because it will have been set to Pmode above. We must use the actual mode of the parm. */ parmreg = gen_reg_rtx (TYPE_MODE (TREE_TYPE (parm))); mark_user_reg (parmreg); if (GET_MODE (parmreg) != GET_MODE (DECL_RTL (parm))) { rtx tempreg = gen_reg_rtx (GET_MODE (DECL_RTL (parm))); int unsigned_p = TYPE_UNSIGNED (TREE_TYPE (parm)); push_to_sequence (all->conversion_insns); emit_move_insn (tempreg, DECL_RTL (parm)); tempreg = convert_to_mode (GET_MODE (parmreg), tempreg, unsigned_p); emit_move_insn (parmreg, tempreg); all->conversion_insns = get_insns(); end_sequence (); did_conversion = true; } else emit_move_insn (parmreg, DECL_RTL (parm)); SET_DECL_RTL (parm, parmreg); /* STACK_PARM is the pointer, not the parm, and PARMREG is now the parm. */ data->stack_parm = NULL; } #ifdef FUNCTION_ARG_CALLEE_COPIES /* If we are passed an arg by reference and it is our responsibility to make a copy, do it now. PASSED_TYPE and PASSED mode now refer to the pointer, not the original argument, so we must recreate them in the call to FUNCTION_ARG_CALLEE_COPIES. */ /* ??? Later add code to handle the case that if the argument isn't modified, don't do the copy. */ else if (data->passed_pointer) { tree type = TREE_TYPE (data->passed_type); if (FUNCTION_ARG_CALLEE_COPIES (all->args_so_far, TYPE_MODE (type), type, data->named_arg) && !TREE_ADDRESSABLE (type)) { rtx copy; /* This sequence may involve a library call perhaps clobbering registers that haven't been copied to pseudos yet. */ push_to_sequence (all->conversion_insns); if (!COMPLETE_TYPE_P (type) || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST) { /* This is a variable sized object. */ copy = allocate_dynamic_stack_space (expr_size (parm), NULL_RTX, TYPE_ALIGN (type)); copy = gen_rtx_MEM (BLKmode, copy); } else copy = assign_stack_temp (TYPE_MODE (type), int_size_in_bytes (type), 1); set_mem_attributes (copy, parm, 1); store_expr (parm, copy, 0); emit_move_insn (parmreg, XEXP (copy, 0)); all->conversion_insns = get_insns (); end_sequence (); did_conversion = true; } } #endif /* FUNCTION_ARG_CALLEE_COPIES */ /* Mark the register as eliminable if we did no conversion and it was copied from memory at a fixed offset, and the arg pointer was not copied to a pseudo-reg. If the arg pointer is a pseudo reg or the offset formed an invalid address, such memory-equivalences as we make here would screw up life analysis for it. */ if (data->nominal_mode == data->passed_mode && !did_conversion && data->stack_parm != 0 && MEM_P (data->stack_parm) && data->locate.offset.var == 0 && reg_mentioned_p (virtual_incoming_args_rtx, XEXP (data->stack_parm, 0))) { rtx linsn = get_last_insn (); rtx sinsn, set; /* Mark complex types separately. */ if (GET_CODE (parmreg) == CONCAT) { enum machine_mode submode = GET_MODE_INNER (GET_MODE (parmreg)); int regnor = REGNO (gen_realpart (submode, parmreg)); int regnoi = REGNO (gen_imagpart (submode, parmreg)); rtx stackr = gen_realpart (submode, data->stack_parm); rtx stacki = gen_imagpart (submode, data->stack_parm); /* Scan backwards for the set of the real and imaginary parts. */ for (sinsn = linsn; sinsn != 0; sinsn = prev_nonnote_insn (sinsn)) { set = single_set (sinsn); if (set == 0) continue; if (SET_DEST (set) == regno_reg_rtx [regnoi]) REG_NOTES (sinsn) = gen_rtx_EXPR_LIST (REG_EQUIV, stacki, REG_NOTES (sinsn)); else if (SET_DEST (set) == regno_reg_rtx [regnor]) REG_NOTES (sinsn) = gen_rtx_EXPR_LIST (REG_EQUIV, stackr, REG_NOTES (sinsn)); } } else if ((set = single_set (linsn)) != 0 && SET_DEST (set) == parmreg) REG_NOTES (linsn) = gen_rtx_EXPR_LIST (REG_EQUIV, data->stack_parm, REG_NOTES (linsn)); } /* For pointer data type, suggest pointer register. */ if (POINTER_TYPE_P (TREE_TYPE (parm))) mark_reg_pointer (parmreg, TYPE_ALIGN (TREE_TYPE (TREE_TYPE (parm)))); } /* A subroutine of assign_parms. Allocate stack space to hold the current parameter. Get it there. Perform all ABI specified conversions. */ static void assign_parm_setup_stack (struct assign_parm_data_all *all, tree parm, struct assign_parm_data_one *data) { /* Value must be stored in the stack slot STACK_PARM during function execution. */ if (data->promoted_mode != data->nominal_mode) { /* Conversion is required. */ rtx tempreg = gen_reg_rtx (GET_MODE (data->entry_parm)); emit_move_insn (tempreg, validize_mem (data->entry_parm)); push_to_sequence (all->conversion_insns); data->entry_parm = convert_to_mode (data->nominal_mode, tempreg, TYPE_UNSIGNED (TREE_TYPE (parm))); if (data->stack_parm) /* ??? This may need a big-endian conversion on sparc64. */ data->stack_parm = adjust_address (data->stack_parm, data->nominal_mode, 0); all->conversion_insns = get_insns (); end_sequence (); } if (data->entry_parm != data->stack_parm) { if (data->stack_parm == 0) { data->stack_parm = assign_stack_local (GET_MODE (data->entry_parm), GET_MODE_SIZE (GET_MODE (data->entry_parm)), 0); set_mem_attributes (data->stack_parm, parm, 1); } if (data->promoted_mode != data->nominal_mode) { push_to_sequence (all->conversion_insns); emit_move_insn (validize_mem (data->stack_parm), validize_mem (data->entry_parm)); all->conversion_insns = get_insns (); end_sequence (); } else emit_move_insn (validize_mem (data->stack_parm), validize_mem (data->entry_parm)); } SET_DECL_RTL (parm, data->stack_parm); } /* A subroutine of assign_parms. If the ABI splits complex arguments, then undo the frobbing that we did in assign_parms_augmented_arg_list. */ static void assign_parms_unsplit_complex (tree orig_fnargs, tree fnargs) { tree parm; for (parm = orig_fnargs; parm; parm = TREE_CHAIN (parm)) { if (TREE_CODE (TREE_TYPE (parm)) == COMPLEX_TYPE && targetm.calls.split_complex_arg (TREE_TYPE (parm))) { rtx tmp, real, imag; enum machine_mode inner = GET_MODE_INNER (DECL_MODE (parm)); real = DECL_RTL (fnargs); imag = DECL_RTL (TREE_CHAIN (fnargs)); if (inner != GET_MODE (real)) { real = gen_lowpart_SUBREG (inner, real); imag = gen_lowpart_SUBREG (inner, imag); } tmp = gen_rtx_CONCAT (DECL_MODE (parm), real, imag); SET_DECL_RTL (parm, tmp); real = DECL_INCOMING_RTL (fnargs); imag = DECL_INCOMING_RTL (TREE_CHAIN (fnargs)); if (inner != GET_MODE (real)) { real = gen_lowpart_SUBREG (inner, real); imag = gen_lowpart_SUBREG (inner, imag); } tmp = gen_rtx_CONCAT (DECL_MODE (parm), real, imag); set_decl_incoming_rtl (parm, tmp); fnargs = TREE_CHAIN (fnargs); } else { SET_DECL_RTL (parm, DECL_RTL (fnargs)); set_decl_incoming_rtl (parm, DECL_INCOMING_RTL (fnargs)); /* Set MEM_EXPR to the original decl, i.e. to PARM, instead of the copy of decl, i.e. FNARGS. */ if (DECL_INCOMING_RTL (parm) && MEM_P (DECL_INCOMING_RTL (parm))) set_mem_expr (DECL_INCOMING_RTL (parm), parm); } fnargs = TREE_CHAIN (fnargs); } } /* Assign RTL expressions to the function's parameters. This may involve copying them into registers and using those registers as the DECL_RTL. */ void assign_parms (tree fndecl) { struct assign_parm_data_all all; tree fnargs, parm; rtx internal_arg_pointer; int varargs_setup = 0; /* If the reg that the virtual arg pointer will be translated into is not a fixed reg or is the stack pointer, make a copy of the virtual arg pointer, and address parms via the copy. The frame pointer is considered fixed even though it is not marked as such. The second time through, simply use ap to avoid generating rtx. */ if ((ARG_POINTER_REGNUM == STACK_POINTER_REGNUM || ! (fixed_regs[ARG_POINTER_REGNUM] || ARG_POINTER_REGNUM == FRAME_POINTER_REGNUM))) internal_arg_pointer = copy_to_reg (virtual_incoming_args_rtx); else internal_arg_pointer = virtual_incoming_args_rtx; current_function_internal_arg_pointer = internal_arg_pointer; assign_parms_initialize_all (&all); fnargs = assign_parms_augmented_arg_list (&all); for (parm = fnargs; parm; parm = TREE_CHAIN (parm)) { struct assign_parm_data_one data; /* Extract the type of PARM; adjust it according to ABI. */ assign_parm_find_data_types (&all, parm, &data); /* Early out for errors and void parameters. */ if (data.passed_mode == VOIDmode) { SET_DECL_RTL (parm, const0_rtx); DECL_INCOMING_RTL (parm) = DECL_RTL (parm); continue; } /* Handle stdargs. LAST_NAMED is a slight mis-nomer; it's also true for the unnamed dummy argument following the last named argument. See ABI silliness wrt strict_argument_naming and NAMED_ARG. So we only want to do this when we get to the actual last named argument, which will be the first time LAST_NAMED gets set. */ if (data.last_named && !varargs_setup) { varargs_setup = true; assign_parms_setup_varargs (&all, &data, false); } /* Find out where the parameter arrives in this function. */ assign_parm_find_entry_rtl (&all, &data); /* Find out where stack space for this parameter might be. */ if (assign_parm_is_stack_parm (&all, &data)) { assign_parm_find_stack_rtl (parm, &data); assign_parm_adjust_entry_rtl (&data); } /* Record permanently how this parm was passed. */ set_decl_incoming_rtl (parm, data.entry_parm); /* Update info on where next arg arrives in registers. */ FUNCTION_ARG_ADVANCE (all.args_so_far, data.promoted_mode, data.passed_type, data.named_arg); assign_parm_adjust_stack_rtl (&data); if (assign_parm_setup_block_p (&data)) assign_parm_setup_block (parm, &data); else if (data.passed_pointer || use_register_for_decl (parm)) assign_parm_setup_reg (&all, parm, &data); else assign_parm_setup_stack (&all, parm, &data); } if (targetm.calls.split_complex_arg && fnargs != all.orig_fnargs) assign_parms_unsplit_complex (all.orig_fnargs, fnargs); /* Output all parameter conversion instructions (possibly including calls) now that all parameters have been copied out of hard registers. */ emit_insn (all.conversion_insns); /* If we are receiving a struct value address as the first argument, set up the RTL for the function result. As this might require code to convert the transmitted address to Pmode, we do this here to ensure that possible preliminary conversions of the address have been emitted already. */ if (all.function_result_decl) { tree result = DECL_RESULT (current_function_decl); rtx addr = DECL_RTL (all.function_result_decl); rtx x; addr = convert_memory_address (Pmode, addr); x = gen_rtx_MEM (DECL_MODE (result), addr); set_mem_attributes (x, result, 1); SET_DECL_RTL (result, x); } /* We have aligned all the args, so add space for the pretend args. */ current_function_pretend_args_size = all.pretend_args_size; all.stack_args_size.constant += all.extra_pretend_bytes; current_function_args_size = all.stack_args_size.constant; /* Adjust function incoming argument size for alignment and minimum length. */ #ifdef REG_PARM_STACK_SPACE current_function_args_size = MAX (current_function_args_size, REG_PARM_STACK_SPACE (fndecl)); #endif current_function_args_size = ((current_function_args_size + STACK_BYTES - 1) / STACK_BYTES) * STACK_BYTES; #ifdef ARGS_GROW_DOWNWARD current_function_arg_offset_rtx = (stack_args_size.var == 0 ? GEN_INT (-all.stack_args_size.constant) : expand_expr (size_diffop (all.stack_args_size.var, size_int (-all.stack_args_size.constant)), NULL_RTX, VOIDmode, 0)); #else current_function_arg_offset_rtx = ARGS_SIZE_RTX (all.stack_args_size); #endif /* See how many bytes, if any, of its args a function should try to pop on return. */ current_function_pops_args = RETURN_POPS_ARGS (fndecl, TREE_TYPE (fndecl), current_function_args_size); /* For stdarg.h function, save info about regs and stack space used by the named args. */ current_function_args_info = all.args_so_far; /* Set the rtx used for the function return value. Put this in its own variable so any optimizers that need this information don't have to include tree.h. Do this here so it gets done when an inlined function gets output. */ current_function_return_rtx = (DECL_RTL_SET_P (DECL_RESULT (fndecl)) ? DECL_RTL (DECL_RESULT (fndecl)) : NULL_RTX); /* If scalar return value was computed in a pseudo-reg, or was a named return value that got dumped to the stack, copy that to the hard return register. */ if (DECL_RTL_SET_P (DECL_RESULT (fndecl))) { tree decl_result = DECL_RESULT (fndecl); rtx decl_rtl = DECL_RTL (decl_result); if (REG_P (decl_rtl) ? REGNO (decl_rtl) >= FIRST_PSEUDO_REGISTER : DECL_REGISTER (decl_result)) { rtx real_decl_rtl; #ifdef FUNCTION_OUTGOING_VALUE real_decl_rtl = FUNCTION_OUTGOING_VALUE (TREE_TYPE (decl_result), fndecl); #else real_decl_rtl = FUNCTION_VALUE (TREE_TYPE (decl_result), fndecl); #endif REG_FUNCTION_VALUE_P (real_decl_rtl) = 1; /* The delay slot scheduler assumes that current_function_return_rtx holds the hard register containing the return value, not a temporary pseudo. */ current_function_return_rtx = real_decl_rtl; } } } /* Indicate whether REGNO is an incoming argument to the current function that was promoted to a wider mode. If so, return the RTX for the register (to get its mode). PMODE and PUNSIGNEDP are set to the mode that REGNO is promoted from and whether the promotion was signed or unsigned. */ rtx promoted_input_arg (unsigned int regno, enum machine_mode *pmode, int *punsignedp) { tree arg; for (arg = DECL_ARGUMENTS (current_function_decl); arg; arg = TREE_CHAIN (arg)) if (REG_P (DECL_INCOMING_RTL (arg)) && REGNO (DECL_INCOMING_RTL (arg)) == regno && TYPE_MODE (DECL_ARG_TYPE (arg)) == TYPE_MODE (TREE_TYPE (arg))) { enum machine_mode mode = TYPE_MODE (TREE_TYPE (arg)); int unsignedp = TYPE_UNSIGNED (TREE_TYPE (arg)); mode = promote_mode (TREE_TYPE (arg), mode, &unsignedp, 1); if (mode == GET_MODE (DECL_INCOMING_RTL (arg)) && mode != DECL_MODE (arg)) { *pmode = DECL_MODE (arg); *punsignedp = unsignedp; return DECL_INCOMING_RTL (arg); } } return 0; } /* Compute the size and offset from the start of the stacked arguments for a parm passed in mode PASSED_MODE and with type TYPE. INITIAL_OFFSET_PTR points to the current offset into the stacked arguments. The starting offset and size for this parm are returned in LOCATE->OFFSET and LOCATE->SIZE, respectively. When IN_REGS is nonzero, the offset is that of stack slot, which is returned in LOCATE->SLOT_OFFSET. LOCATE->ALIGNMENT_PAD is the amount of padding required from the initial offset ptr to the stack slot. IN_REGS is nonzero if the argument will be passed in registers. It will never be set if REG_PARM_STACK_SPACE is not defined. FNDECL is the function in which the argument was defined. There are two types of rounding that are done. The first, controlled by FUNCTION_ARG_BOUNDARY, forces the offset from the start of the argument list to be aligned to the specific boundary (in bits). This rounding affects the initial and starting offsets, but not the argument size. The second, controlled by FUNCTION_ARG_PADDING and PARM_BOUNDARY, optionally rounds the size of the parm to PARM_BOUNDARY. The initial offset is not affected by this rounding, while the size always is and the starting offset may be. */ /* LOCATE->OFFSET will be negative for ARGS_GROW_DOWNWARD case; INITIAL_OFFSET_PTR is positive because locate_and_pad_parm's callers pass in the total size of args so far as INITIAL_OFFSET_PTR. LOCATE->SIZE is always positive. */ void locate_and_pad_parm (enum machine_mode passed_mode, tree type, int in_regs, int partial, tree fndecl ATTRIBUTE_UNUSED, struct args_size *initial_offset_ptr, struct locate_and_pad_arg_data *locate) { tree sizetree; enum direction where_pad; int boundary; int reg_parm_stack_space = 0; int part_size_in_regs; #ifdef REG_PARM_STACK_SPACE reg_parm_stack_space = REG_PARM_STACK_SPACE (fndecl); /* If we have found a stack parm before we reach the end of the area reserved for registers, skip that area. */ if (! in_regs) { if (reg_parm_stack_space > 0) { if (initial_offset_ptr->var) { initial_offset_ptr->var = size_binop (MAX_EXPR, ARGS_SIZE_TREE (*initial_offset_ptr), ssize_int (reg_parm_stack_space)); initial_offset_ptr->constant = 0; } else if (initial_offset_ptr->constant < reg_parm_stack_space) initial_offset_ptr->constant = reg_parm_stack_space; } } #endif /* REG_PARM_STACK_SPACE */ part_size_in_regs = 0; if (reg_parm_stack_space == 0) part_size_in_regs = ((partial * UNITS_PER_WORD) / (PARM_BOUNDARY / BITS_PER_UNIT) * (PARM_BOUNDARY / BITS_PER_UNIT)); sizetree = type ? size_in_bytes (type) : size_int (GET_MODE_SIZE (passed_mode)); where_pad = FUNCTION_ARG_PADDING (passed_mode, type); boundary = FUNCTION_ARG_BOUNDARY (passed_mode, type); locate->where_pad = where_pad; #ifdef ARGS_GROW_DOWNWARD locate->slot_offset.constant = -initial_offset_ptr->constant; if (initial_offset_ptr->var) locate->slot_offset.var = size_binop (MINUS_EXPR, ssize_int (0), initial_offset_ptr->var); { tree s2 = sizetree; if (where_pad != none && (!host_integerp (sizetree, 1) || (tree_low_cst (sizetree, 1) * BITS_PER_UNIT) % PARM_BOUNDARY)) s2 = round_up (s2, PARM_BOUNDARY / BITS_PER_UNIT); SUB_PARM_SIZE (locate->slot_offset, s2); } locate->slot_offset.constant += part_size_in_regs; if (!in_regs #ifdef REG_PARM_STACK_SPACE || REG_PARM_STACK_SPACE (fndecl) > 0 #endif ) pad_to_arg_alignment (&locate->slot_offset, boundary, &locate->alignment_pad); locate->size.constant = (-initial_offset_ptr->constant - locate->slot_offset.constant); if (initial_offset_ptr->var) locate->size.var = size_binop (MINUS_EXPR, size_binop (MINUS_EXPR, ssize_int (0), initial_offset_ptr->var), locate->slot_offset.var); /* Pad_below needs the pre-rounded size to know how much to pad below. */ locate->offset = locate->slot_offset; if (where_pad == downward) pad_below (&locate->offset, passed_mode, sizetree); #else /* !ARGS_GROW_DOWNWARD */ if (!in_regs #ifdef REG_PARM_STACK_SPACE || REG_PARM_STACK_SPACE (fndecl) > 0 #endif ) pad_to_arg_alignment (initial_offset_ptr, boundary, &locate->alignment_pad); locate->slot_offset = *initial_offset_ptr; #ifdef PUSH_ROUNDING if (passed_mode != BLKmode) sizetree = size_int (PUSH_ROUNDING (TREE_INT_CST_LOW (sizetree))); #endif /* Pad_below needs the pre-rounded size to know how much to pad below so this must be done before rounding up. */ locate->offset = locate->slot_offset; if (where_pad == downward) pad_below (&locate->offset, passed_mode, sizetree); if (where_pad != none && (!host_integerp (sizetree, 1) || (tree_low_cst (sizetree, 1) * BITS_PER_UNIT) % PARM_BOUNDARY)) sizetree = round_up (sizetree, PARM_BOUNDARY / BITS_PER_UNIT); ADD_PARM_SIZE (locate->size, sizetree); locate->size.constant -= part_size_in_regs; #endif /* ARGS_GROW_DOWNWARD */ } /* Round the stack offset in *OFFSET_PTR up to a multiple of BOUNDARY. BOUNDARY is measured in bits, but must be a multiple of a storage unit. */ static void pad_to_arg_alignment (struct args_size *offset_ptr, int boundary, struct args_size *alignment_pad) { tree save_var = NULL_TREE; HOST_WIDE_INT save_constant = 0; int boundary_in_bytes = boundary / BITS_PER_UNIT; HOST_WIDE_INT sp_offset = STACK_POINTER_OFFSET; #ifdef SPARC_STACK_BOUNDARY_HACK /* The sparc port has a bug. It sometimes claims a STACK_BOUNDARY higher than the real alignment of %sp. However, when it does this, the alignment of %sp+STACK_POINTER_OFFSET will be STACK_BOUNDARY. This is a temporary hack while the sparc port is fixed. */ if (SPARC_STACK_BOUNDARY_HACK) sp_offset = 0; #endif if (boundary > PARM_BOUNDARY && boundary > STACK_BOUNDARY) { save_var = offset_ptr->var; save_constant = offset_ptr->constant; } alignment_pad->var = NULL_TREE; alignment_pad->constant = 0; if (boundary > BITS_PER_UNIT) { if (offset_ptr->var) { tree sp_offset_tree = ssize_int (sp_offset); tree offset = size_binop (PLUS_EXPR, ARGS_SIZE_TREE (*offset_ptr), sp_offset_tree); #ifdef ARGS_GROW_DOWNWARD tree rounded = round_down (offset, boundary / BITS_PER_UNIT); #else tree rounded = round_up (offset, boundary / BITS_PER_UNIT); #endif offset_ptr->var = size_binop (MINUS_EXPR, rounded, sp_offset_tree); /* ARGS_SIZE_TREE includes constant term. */ offset_ptr->constant = 0; if (boundary > PARM_BOUNDARY && boundary > STACK_BOUNDARY) alignment_pad->var = size_binop (MINUS_EXPR, offset_ptr->var, save_var); } else { offset_ptr->constant = -sp_offset + #ifdef ARGS_GROW_DOWNWARD FLOOR_ROUND (offset_ptr->constant + sp_offset, boundary_in_bytes); #else CEIL_ROUND (offset_ptr->constant + sp_offset, boundary_in_bytes); #endif if (boundary > PARM_BOUNDARY && boundary > STACK_BOUNDARY) alignment_pad->constant = offset_ptr->constant - save_constant; } } } static void pad_below (struct args_size *offset_ptr, enum machine_mode passed_mode, tree sizetree) { if (passed_mode != BLKmode) { if (GET_MODE_BITSIZE (passed_mode) % PARM_BOUNDARY) offset_ptr->constant += (((GET_MODE_BITSIZE (passed_mode) + PARM_BOUNDARY - 1) / PARM_BOUNDARY * PARM_BOUNDARY / BITS_PER_UNIT) - GET_MODE_SIZE (passed_mode)); } else { if (TREE_CODE (sizetree) != INTEGER_CST || (TREE_INT_CST_LOW (sizetree) * BITS_PER_UNIT) % PARM_BOUNDARY) { /* Round the size up to multiple of PARM_BOUNDARY bits. */ tree s2 = round_up (sizetree, PARM_BOUNDARY / BITS_PER_UNIT); /* Add it in. */ ADD_PARM_SIZE (*offset_ptr, s2); SUB_PARM_SIZE (*offset_ptr, sizetree); } } } /* Walk the tree of blocks describing the binding levels within a function and warn about variables the might be killed by setjmp or vfork. This is done after calling flow_analysis and before global_alloc clobbers the pseudo-regs to hard regs. */ void setjmp_vars_warning (tree block) { tree decl, sub; for (decl = BLOCK_VARS (block); decl; decl = TREE_CHAIN (decl)) { if (TREE_CODE (decl) == VAR_DECL && DECL_RTL_SET_P (decl) && REG_P (DECL_RTL (decl)) && regno_clobbered_at_setjmp (REGNO (DECL_RTL (decl)))) warning ("%Jvariable '%D' might be clobbered by `longjmp' or `vfork'", decl, decl); } for (sub = BLOCK_SUBBLOCKS (block); sub; sub = TREE_CHAIN (sub)) setjmp_vars_warning (sub); } /* Do the appropriate part of setjmp_vars_warning but for arguments instead of local variables. */ void setjmp_args_warning (void) { tree decl; for (decl = DECL_ARGUMENTS (current_function_decl); decl; decl = TREE_CHAIN (decl)) if (DECL_RTL (decl) != 0 && REG_P (DECL_RTL (decl)) && regno_clobbered_at_setjmp (REGNO (DECL_RTL (decl)))) warning ("%Jargument '%D' might be clobbered by `longjmp' or `vfork'", decl, decl); } /* Convert a stack slot address ADDR for variable VAR (from a containing function) into an address valid in this function (using a static chain). */ rtx fix_lexical_addr (rtx addr, tree var) { rtx basereg; HOST_WIDE_INT displacement; tree context = decl_function_context (var); struct function *fp; rtx base = 0; /* If this is the present function, we need not do anything. */ if (context == current_function_decl) return addr; fp = find_function_data (context); /* Decode given address as base reg plus displacement. */ if (REG_P (addr)) basereg = addr, displacement = 0; else if (GET_CODE (addr) == PLUS && GET_CODE (XEXP (addr, 1)) == CONST_INT) basereg = XEXP (addr, 0), displacement = INTVAL (XEXP (addr, 1)); else abort (); if (base == 0) abort (); /* Use same offset, relative to appropriate static chain or argument pointer. */ return plus_constant (base, displacement); } /* Identify BLOCKs referenced by more than one NOTE_INSN_BLOCK_{BEG,END}, and create duplicate blocks. */ /* ??? Need an option to either create block fragments or to create abstract origin duplicates of a source block. It really depends on what optimization has been performed. */ void reorder_blocks (void) { tree block = DECL_INITIAL (current_function_decl); varray_type block_stack; if (block == NULL_TREE) return; VARRAY_TREE_INIT (block_stack, 10, "block_stack"); /* Reset the TREE_ASM_WRITTEN bit for all blocks. */ clear_block_marks (block); /* Prune the old trees away, so that they don't get in the way. */ BLOCK_SUBBLOCKS (block) = NULL_TREE; BLOCK_CHAIN (block) = NULL_TREE; /* Recreate the block tree from the note nesting. */ reorder_blocks_1 (get_insns (), block, &block_stack); BLOCK_SUBBLOCKS (block) = blocks_nreverse (BLOCK_SUBBLOCKS (block)); /* Remove deleted blocks from the block fragment chains. */ reorder_fix_fragments (block); } /* Helper function for reorder_blocks. Reset TREE_ASM_WRITTEN. */ void clear_block_marks (tree block) { while (block) { TREE_ASM_WRITTEN (block) = 0; clear_block_marks (BLOCK_SUBBLOCKS (block)); block = BLOCK_CHAIN (block); } } static void reorder_blocks_1 (rtx insns, tree current_block, varray_type *p_block_stack) { rtx insn; for (insn = insns; insn; insn = NEXT_INSN (insn)) { if (GET_CODE (insn) == NOTE) { if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_BEG) { tree block = NOTE_BLOCK (insn); /* If we have seen this block before, that means it now spans multiple address regions. Create a new fragment. */ if (TREE_ASM_WRITTEN (block)) { tree new_block = copy_node (block); tree origin; origin = (BLOCK_FRAGMENT_ORIGIN (block) ? BLOCK_FRAGMENT_ORIGIN (block) : block); BLOCK_FRAGMENT_ORIGIN (new_block) = origin; BLOCK_FRAGMENT_CHAIN (new_block) = BLOCK_FRAGMENT_CHAIN (origin); BLOCK_FRAGMENT_CHAIN (origin) = new_block; NOTE_BLOCK (insn) = new_block; block = new_block; } BLOCK_SUBBLOCKS (block) = 0; TREE_ASM_WRITTEN (block) = 1; /* When there's only one block for the entire function, current_block == block and we mustn't do this, it will cause infinite recursion. */ if (block != current_block) { BLOCK_SUPERCONTEXT (block) = current_block; BLOCK_CHAIN (block) = BLOCK_SUBBLOCKS (current_block); BLOCK_SUBBLOCKS (current_block) = block; current_block = block; } VARRAY_PUSH_TREE (*p_block_stack, block); } else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_END) { NOTE_BLOCK (insn) = VARRAY_TOP_TREE (*p_block_stack); VARRAY_POP (*p_block_stack); BLOCK_SUBBLOCKS (current_block) = blocks_nreverse (BLOCK_SUBBLOCKS (current_block)); current_block = BLOCK_SUPERCONTEXT (current_block); } } } } /* Rationalize BLOCK_FRAGMENT_ORIGIN. If an origin block no longer appears in the block tree, select one of the fragments to become the new origin block. */ static void reorder_fix_fragments (tree block) { while (block) { tree dup_origin = BLOCK_FRAGMENT_ORIGIN (block); tree new_origin = NULL_TREE; if (dup_origin) { if (! TREE_ASM_WRITTEN (dup_origin)) { new_origin = BLOCK_FRAGMENT_CHAIN (dup_origin); /* Find the first of the remaining fragments. There must be at least one -- the current block. */ while (! TREE_ASM_WRITTEN (new_origin)) new_origin = BLOCK_FRAGMENT_CHAIN (new_origin); BLOCK_FRAGMENT_ORIGIN (new_origin) = NULL_TREE; } } else if (! dup_origin) new_origin = block; /* Re-root the rest of the fragments to the new origin. In the case that DUP_ORIGIN was null, that means BLOCK was the origin of a chain of fragments and we want to remove those fragments that didn't make it to the output. */ if (new_origin) { tree *pp = &BLOCK_FRAGMENT_CHAIN (new_origin); tree chain = *pp; while (chain) { if (TREE_ASM_WRITTEN (chain)) { BLOCK_FRAGMENT_ORIGIN (chain) = new_origin; *pp = chain; pp = &BLOCK_FRAGMENT_CHAIN (chain); } chain = BLOCK_FRAGMENT_CHAIN (chain); } *pp = NULL_TREE; } reorder_fix_fragments (BLOCK_SUBBLOCKS (block)); block = BLOCK_CHAIN (block); } } /* Reverse the order of elements in the chain T of blocks, and return the new head of the chain (old last element). */ tree blocks_nreverse (tree t) { tree prev = 0, decl, next; for (decl = t; decl; decl = next) { next = BLOCK_CHAIN (decl); BLOCK_CHAIN (decl) = prev; prev = decl; } return prev; } /* Count the subblocks of the list starting with BLOCK. If VECTOR is non-NULL, list them all into VECTOR, in a depth-first preorder traversal of the block tree. Also clear TREE_ASM_WRITTEN in all blocks. */ static int all_blocks (tree block, tree *vector) { int n_blocks = 0; while (block) { TREE_ASM_WRITTEN (block) = 0; /* Record this block. */ if (vector) vector[n_blocks] = block; ++n_blocks; /* Record the subblocks, and their subblocks... */ n_blocks += all_blocks (BLOCK_SUBBLOCKS (block), vector ? vector + n_blocks : 0); block = BLOCK_CHAIN (block); } return n_blocks; } /* Return a vector containing all the blocks rooted at BLOCK. The number of elements in the vector is stored in N_BLOCKS_P. The vector is dynamically allocated; it is the caller's responsibility to call `free' on the pointer returned. */ static tree * get_block_vector (tree block, int *n_blocks_p) { tree *block_vector; *n_blocks_p = all_blocks (block, NULL); block_vector = xmalloc (*n_blocks_p * sizeof (tree)); all_blocks (block, block_vector); return block_vector; } static GTY(()) int next_block_index = 2; /* Set BLOCK_NUMBER for all the blocks in FN. */ void number_blocks (tree fn) { int i; int n_blocks; tree *block_vector; /* For SDB and XCOFF debugging output, we start numbering the blocks from 1 within each function, rather than keeping a running count. */ #if defined (SDB_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO) if (write_symbols == SDB_DEBUG || write_symbols == XCOFF_DEBUG) next_block_index = 1; #endif block_vector = get_block_vector (DECL_INITIAL (fn), &n_blocks); /* The top-level BLOCK isn't numbered at all. */ for (i = 1; i < n_blocks; ++i) /* We number the blocks from two. */ BLOCK_NUMBER (block_vector[i]) = next_block_index++; free (block_vector); return; } /* If VAR is present in a subblock of BLOCK, return the subblock. */ tree debug_find_var_in_block_tree (tree var, tree block) { tree t; for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t)) if (t == var) return block; for (t = BLOCK_SUBBLOCKS (block); t; t = TREE_CHAIN (t)) { tree ret = debug_find_var_in_block_tree (var, t); if (ret) return ret; } return NULL_TREE; } /* Allocate a function structure for FNDECL and set its contents to the defaults. */ void allocate_struct_function (tree fndecl) { tree result; tree fntype = fndecl ? TREE_TYPE (fndecl) : NULL_TREE; cfun = ggc_alloc_cleared (sizeof (struct function)); cfun->stack_alignment_needed = STACK_BOUNDARY; cfun->preferred_stack_boundary = STACK_BOUNDARY; current_function_funcdef_no = funcdef_no++; cfun->function_frequency = FUNCTION_FREQUENCY_NORMAL; init_stmt_for_function (); init_eh_for_function (); lang_hooks.function.init (cfun); if (init_machine_status) cfun->machine = (*init_machine_status) (); if (fndecl == NULL) return; DECL_STRUCT_FUNCTION (fndecl) = cfun; cfun->decl = fndecl; result = DECL_RESULT (fndecl); if (aggregate_value_p (result, fndecl)) { #ifdef PCC_STATIC_STRUCT_RETURN current_function_returns_pcc_struct = 1; #endif current_function_returns_struct = 1; } current_function_returns_pointer = POINTER_TYPE_P (TREE_TYPE (result)); current_function_stdarg = (fntype && TYPE_ARG_TYPES (fntype) != 0 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype))) != void_type_node)); } /* Reset cfun, and other non-struct-function variables to defaults as appropriate for emitting rtl at the start of a function. */ static void prepare_function_start (tree fndecl) { if (fndecl && DECL_STRUCT_FUNCTION (fndecl)) cfun = DECL_STRUCT_FUNCTION (fndecl); else allocate_struct_function (fndecl); init_emit (); init_varasm_status (cfun); init_expr (); cse_not_expected = ! optimize; /* Caller save not needed yet. */ caller_save_needed = 0; /* We haven't done register allocation yet. */ reg_renumber = 0; /* Indicate that we need to distinguish between the return value of the present function and the return value of a function being called. */ rtx_equal_function_value_matters = 1; /* Indicate that we have not instantiated virtual registers yet. */ virtuals_instantiated = 0; /* Indicate that we want CONCATs now. */ generating_concat_p = 1; /* Indicate we have no need of a frame pointer yet. */ frame_pointer_needed = 0; } /* Initialize the rtl expansion mechanism so that we can do simple things like generate sequences. This is used to provide a context during global initialization of some passes. */ void init_dummy_function_start (void) { prepare_function_start (NULL); } /* Generate RTL for the start of the function SUBR (a FUNCTION_DECL tree node) and initialize static variables for generating RTL for the statements of the function. */ void init_function_start (tree subr) { prepare_function_start (subr); /* Prevent ever trying to delete the first instruction of a function. Also tell final how to output a linenum before the function prologue. Note linenums could be missing, e.g. when compiling a Java .class file. */ if (! DECL_IS_BUILTIN (subr)) emit_line_note (DECL_SOURCE_LOCATION (subr)); /* Make sure first insn is a note even if we don't want linenums. This makes sure the first insn will never be deleted. Also, final expects a note to appear there. */ emit_note (NOTE_INSN_DELETED); /* Warn if this value is an aggregate type, regardless of which calling convention we are using for it. */ if (warn_aggregate_return && AGGREGATE_TYPE_P (TREE_TYPE (DECL_RESULT (subr)))) warning ("function returns an aggregate"); } /* Make sure all values used by the optimization passes have sane defaults. */ void init_function_for_compilation (void) { reg_renumber = 0; /* No prologue/epilogue insns yet. */ VARRAY_GROW (prologue, 0); VARRAY_GROW (epilogue, 0); VARRAY_GROW (sibcall_epilogue, 0); } /* Expand a call to __main at the beginning of a possible main function. */ #if defined(INIT_SECTION_ASM_OP) && !defined(INVOKE__main) #undef HAS_INIT_SECTION #define HAS_INIT_SECTION #endif void expand_main_function (void) { #ifdef FORCE_PREFERRED_STACK_BOUNDARY_IN_MAIN if (FORCE_PREFERRED_STACK_BOUNDARY_IN_MAIN) { int align = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT; rtx tmp, seq; start_sequence (); /* Forcibly align the stack. */ #ifdef STACK_GROWS_DOWNWARD tmp = expand_simple_binop (Pmode, AND, stack_pointer_rtx, GEN_INT(-align), stack_pointer_rtx, 1, OPTAB_WIDEN); #else tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx, GEN_INT (align - 1), NULL_RTX, 1, OPTAB_WIDEN); tmp = expand_simple_binop (Pmode, AND, tmp, GEN_INT (-align), stack_pointer_rtx, 1, OPTAB_WIDEN); #endif if (tmp != stack_pointer_rtx) emit_move_insn (stack_pointer_rtx, tmp); /* Enlist allocate_dynamic_stack_space to pick up the pieces. */ tmp = force_reg (Pmode, const0_rtx); allocate_dynamic_stack_space (tmp, NULL_RTX, BIGGEST_ALIGNMENT); seq = get_insns (); end_sequence (); for (tmp = get_last_insn (); tmp; tmp = PREV_INSN (tmp)) if (NOTE_P (tmp) && NOTE_LINE_NUMBER (tmp) == NOTE_INSN_FUNCTION_BEG) break; if (tmp) emit_insn_before (seq, tmp); else emit_insn (seq); } #endif #ifndef HAS_INIT_SECTION emit_library_call (init_one_libfunc (NAME__MAIN), LCT_NORMAL, VOIDmode, 0); #endif } /* The PENDING_SIZES represent the sizes of variable-sized types. Create RTL for the various sizes now (using temporary variables), so that we can refer to the sizes from the RTL we are generating for the current function. The PENDING_SIZES are a TREE_LIST. The TREE_VALUE of each node is a SAVE_EXPR. */ void expand_pending_sizes (tree pending_sizes) { tree tem; /* Evaluate now the sizes of any types declared among the arguments. */ for (tem = pending_sizes; tem; tem = TREE_CHAIN (tem)) { expand_expr (TREE_VALUE (tem), const0_rtx, VOIDmode, 0); /* Flush the queue in case this parameter declaration has side-effects. */ emit_queue (); } } /* Start the RTL for a new function, and set variables used for emitting RTL. SUBR is the FUNCTION_DECL node. PARMS_HAVE_CLEANUPS is nonzero if there are cleanups associated with the function's parameters, which must be run at any return statement. */ void expand_function_start (tree subr) { /* Make sure volatile mem refs aren't considered valid operands of arithmetic insns. */ init_recog_no_volatile (); current_function_profile = (profile_flag && ! DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (subr)); current_function_limit_stack = (stack_limit_rtx != NULL_RTX && ! DECL_NO_LIMIT_STACK (subr)); /* Make the label for return statements to jump to. Do not special case machines with special return instructions -- they will be handled later during jump, ifcvt, or epilogue creation. */ return_label = gen_label_rtx (); /* Initialize rtx used to return the value. */ /* Do this before assign_parms so that we copy the struct value address before any library calls that assign parms might generate. */ /* Decide whether to return the value in memory or in a register. */ if (aggregate_value_p (DECL_RESULT (subr), subr)) { /* Returning something that won't go in a register. */ rtx value_address = 0; #ifdef PCC_STATIC_STRUCT_RETURN if (current_function_returns_pcc_struct) { int size = int_size_in_bytes (TREE_TYPE (DECL_RESULT (subr))); value_address = assemble_static_space (size); } else #endif { rtx sv = targetm.calls.struct_value_rtx (TREE_TYPE (subr), 1); /* Expect to be passed the address of a place to store the value. If it is passed as an argument, assign_parms will take care of it. */ if (sv) { value_address = gen_reg_rtx (Pmode); emit_move_insn (value_address, sv); } } if (value_address) { rtx x = gen_rtx_MEM (DECL_MODE (DECL_RESULT (subr)), value_address); set_mem_attributes (x, DECL_RESULT (subr), 1); SET_DECL_RTL (DECL_RESULT (subr), x); } } else if (DECL_MODE (DECL_RESULT (subr)) == VOIDmode) /* If return mode is void, this decl rtl should not be used. */ SET_DECL_RTL (DECL_RESULT (subr), NULL_RTX); else { /* Compute the return values into a pseudo reg, which we will copy into the true return register after the cleanups are done. */ /* In order to figure out what mode to use for the pseudo, we figure out what the mode of the eventual return register will actually be, and use that. */ rtx hard_reg = hard_function_value (TREE_TYPE (DECL_RESULT (subr)), subr, 1); /* Structures that are returned in registers are not aggregate_value_p, so we may see a PARALLEL or a REG. */ if (REG_P (hard_reg)) SET_DECL_RTL (DECL_RESULT (subr), gen_reg_rtx (GET_MODE (hard_reg))); else if (GET_CODE (hard_reg) == PARALLEL) SET_DECL_RTL (DECL_RESULT (subr), gen_group_rtx (hard_reg)); else abort (); /* Set DECL_REGISTER flag so that expand_function_end will copy the result to the real return register(s). */ DECL_REGISTER (DECL_RESULT (subr)) = 1; } /* Initialize rtx for parameters and local variables. In some cases this requires emitting insns. */ assign_parms (subr); /* If function gets a static chain arg, store it. */ if (cfun->static_chain_decl) { tree parm = cfun->static_chain_decl; rtx local = gen_reg_rtx (Pmode); set_decl_incoming_rtl (parm, static_chain_incoming_rtx); SET_DECL_RTL (parm, local); maybe_set_unchanging (local, parm); mark_reg_pointer (local, TYPE_ALIGN (TREE_TYPE (TREE_TYPE (parm)))); emit_move_insn (local, static_chain_incoming_rtx); } /* If the function receives a non-local goto, then store the bits we need to restore the frame pointer. */ if (cfun->nonlocal_goto_save_area) { tree t_save; rtx r_save; /* ??? We need to do this save early. Unfortunately here is before the frame variable gets declared. Help out... */ expand_var (TREE_OPERAND (cfun->nonlocal_goto_save_area, 0)); t_save = build (ARRAY_REF, ptr_type_node, cfun->nonlocal_goto_save_area, integer_zero_node, NULL_TREE, NULL_TREE); r_save = expand_expr (t_save, NULL_RTX, VOIDmode, EXPAND_WRITE); emit_move_insn (r_save, virtual_stack_vars_rtx); update_nonlocal_goto_save_area (); } /* The following was moved from init_function_start. The move is supposed to make sdb output more accurate. */ /* Indicate the beginning of the function body, as opposed to parm setup. */ emit_note (NOTE_INSN_FUNCTION_BEG); if (GET_CODE (get_last_insn ()) != NOTE) emit_note (NOTE_INSN_DELETED); parm_birth_insn = get_last_insn (); if (current_function_profile) { #ifdef PROFILE_HOOK PROFILE_HOOK (current_function_funcdef_no); #endif } /* After the display initializations is where the tail-recursion label should go, if we end up needing one. Ensure we have a NOTE here since some things (like trampolines) get placed before this. */ tail_recursion_reentry = emit_note (NOTE_INSN_DELETED); /* Evaluate now the sizes of any types declared among the arguments. */ expand_pending_sizes (nreverse (get_pending_sizes ())); /* Make sure there is a line number after the function entry setup code. */ force_next_line_note (); } /* Undo the effects of init_dummy_function_start. */ void expand_dummy_function_end (void) { /* End any sequences that failed to be closed due to syntax errors. */ while (in_sequence_p ()) end_sequence (); /* Outside function body, can't compute type's actual size until next function's body starts. */ free_after_parsing (cfun); free_after_compilation (cfun); cfun = 0; } /* Call DOIT for each hard register used as a return value from the current function. */ void diddle_return_value (void (*doit) (rtx, void *), void *arg) { rtx outgoing = current_function_return_rtx; if (! outgoing) return; if (REG_P (outgoing)) (*doit) (outgoing, arg); else if (GET_CODE (outgoing) == PARALLEL) { int i; for (i = 0; i < XVECLEN (outgoing, 0); i++) { rtx x = XEXP (XVECEXP (outgoing, 0, i), 0); if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER) (*doit) (x, arg); } } } static void do_clobber_return_reg (rtx reg, void *arg ATTRIBUTE_UNUSED) { emit_insn (gen_rtx_CLOBBER (VOIDmode, reg)); } void clobber_return_register (void) { diddle_return_value (do_clobber_return_reg, NULL); /* In case we do use pseudo to return value, clobber it too. */ if (DECL_RTL_SET_P (DECL_RESULT (current_function_decl))) { tree decl_result = DECL_RESULT (current_function_decl); rtx decl_rtl = DECL_RTL (decl_result); if (REG_P (decl_rtl) && REGNO (decl_rtl) >= FIRST_PSEUDO_REGISTER) { do_clobber_return_reg (decl_rtl, NULL); } } } static void do_use_return_reg (rtx reg, void *arg ATTRIBUTE_UNUSED) { emit_insn (gen_rtx_USE (VOIDmode, reg)); } void use_return_register (void) { diddle_return_value (do_use_return_reg, NULL); } /* Possibly warn about unused parameters. */ void do_warn_unused_parameter (tree fn) { tree decl; for (decl = DECL_ARGUMENTS (fn); decl; decl = TREE_CHAIN (decl)) if (!TREE_USED (decl) && TREE_CODE (decl) == PARM_DECL && DECL_NAME (decl) && !DECL_ARTIFICIAL (decl)) warning ("%Junused parameter '%D'", decl, decl); } static GTY(()) rtx initial_trampoline; /* Generate RTL for the end of the current function. */ void expand_function_end (void) { rtx clobber_after; finish_expr_for_function (); /* If arg_pointer_save_area was referenced only from a nested function, we will not have initialized it yet. Do that now. */ if (arg_pointer_save_area && ! cfun->arg_pointer_save_area_init) get_arg_pointer_save_area (cfun); /* If we are doing stack checking and this function makes calls, do a stack probe at the start of the function to ensure we have enough space for another stack frame. */ if (flag_stack_check && ! STACK_CHECK_BUILTIN) { rtx insn, seq; for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) if (GET_CODE (insn) == CALL_INSN) { start_sequence (); probe_stack_range (STACK_CHECK_PROTECT, GEN_INT (STACK_CHECK_MAX_FRAME_SIZE)); seq = get_insns (); end_sequence (); emit_insn_before (seq, tail_recursion_reentry); break; } } /* Possibly warn about unused parameters. When frontend does unit-at-a-time, the warning is already issued at finalization time. */ if (warn_unused_parameter && !lang_hooks.callgraph.expand_function) do_warn_unused_parameter (current_function_decl); /* End any sequences that failed to be closed due to syntax errors. */ while (in_sequence_p ()) end_sequence (); clear_pending_stack_adjust (); do_pending_stack_adjust (); /* @@@ This is a kludge. We want to ensure that instructions that may trap are not moved into the epilogue by scheduling, because we don't always emit unwind information for the epilogue. However, not all machine descriptions define a blockage insn, so emit an ASM_INPUT to act as one. */ if (flag_non_call_exceptions) emit_insn (gen_rtx_ASM_INPUT (VOIDmode, "")); /* Mark the end of the function body. If control reaches this insn, the function can drop through without returning a value. */ emit_note (NOTE_INSN_FUNCTION_END); /* Must mark the last line number note in the function, so that the test coverage code can avoid counting the last line twice. This just tells the code to ignore the immediately following line note, since there already exists a copy of this note somewhere above. This line number note is still needed for debugging though, so we can't delete it. */ if (flag_test_coverage) emit_note (NOTE_INSN_REPEATED_LINE_NUMBER); /* Output a linenumber for the end of the function. SDB depends on this. */ force_next_line_note (); emit_line_note (input_location); /* Before the return label (if any), clobber the return registers so that they are not propagated live to the rest of the function. This can only happen with functions that drop through; if there had been a return statement, there would have either been a return rtx, or a jump to the return label. We delay actual code generation after the current_function_value_rtx is computed. */ clobber_after = get_last_insn (); /* Output the label for the actual return from the function, if one is expected. This happens either because a function epilogue is used instead of a return instruction, or because a return was done with a goto in order to run local cleanups, or because of pcc-style structure returning. */ if (return_label) emit_label (return_label); /* Let except.c know where it should emit the call to unregister the function context for sjlj exceptions. */ if (flag_exceptions && USING_SJLJ_EXCEPTIONS) sjlj_emit_function_exit_after (get_last_insn ()); /* If we had calls to alloca, and this machine needs an accurate stack pointer to exit the function, insert some code to save and restore the stack pointer. */ if (! EXIT_IGNORE_STACK && current_function_calls_alloca) { rtx tem = 0; emit_stack_save (SAVE_FUNCTION, &tem, parm_birth_insn); emit_stack_restore (SAVE_FUNCTION, tem, NULL_RTX); } /* If scalar return value was computed in a pseudo-reg, or was a named return value that got dumped to the stack, copy that to the hard return register. */ if (DECL_RTL_SET_P (DECL_RESULT (current_function_decl))) { tree decl_result = DECL_RESULT (current_function_decl); rtx decl_rtl = DECL_RTL (decl_result); if (REG_P (decl_rtl) ? REGNO (decl_rtl) >= FIRST_PSEUDO_REGISTER : DECL_REGISTER (decl_result)) { rtx real_decl_rtl = current_function_return_rtx; /* This should be set in assign_parms. */ if (! REG_FUNCTION_VALUE_P (real_decl_rtl)) abort (); /* If this is a BLKmode structure being returned in registers, then use the mode computed in expand_return. Note that if decl_rtl is memory, then its mode may have been changed, but that current_function_return_rtx has not. */ if (GET_MODE (real_decl_rtl) == BLKmode) PUT_MODE (real_decl_rtl, GET_MODE (decl_rtl)); /* If a named return value dumped decl_return to memory, then we may need to re-do the PROMOTE_MODE signed/unsigned extension. */ if (GET_MODE (real_decl_rtl) != GET_MODE (decl_rtl)) { int unsignedp = TYPE_UNSIGNED (TREE_TYPE (decl_result)); if (targetm.calls.promote_function_return (TREE_TYPE (current_function_decl))) promote_mode (TREE_TYPE (decl_result), GET_MODE (decl_rtl), &unsignedp, 1); convert_move (real_decl_rtl, decl_rtl, unsignedp); } else if (GET_CODE (real_decl_rtl) == PARALLEL) { /* If expand_function_start has created a PARALLEL for decl_rtl, move the result to the real return registers. Otherwise, do a group load from decl_rtl for a named return. */ if (GET_CODE (decl_rtl) == PARALLEL) emit_group_move (real_decl_rtl, decl_rtl); else emit_group_load (real_decl_rtl, decl_rtl, TREE_TYPE (decl_result), int_size_in_bytes (TREE_TYPE (decl_result))); } else emit_move_insn (real_decl_rtl, decl_rtl); } } /* If returning a structure, arrange to return the address of the value in a place where debuggers expect to find it. If returning a structure PCC style, the caller also depends on this value. And current_function_returns_pcc_struct is not necessarily set. */ if (current_function_returns_struct || current_function_returns_pcc_struct) { rtx value_address = XEXP (DECL_RTL (DECL_RESULT (current_function_decl)), 0); tree type = TREE_TYPE (DECL_RESULT (current_function_decl)); #ifdef FUNCTION_OUTGOING_VALUE rtx outgoing = FUNCTION_OUTGOING_VALUE (build_pointer_type (type), current_function_decl); #else rtx outgoing = FUNCTION_VALUE (build_pointer_type (type), current_function_decl); #endif /* Mark this as a function return value so integrate will delete the assignment and USE below when inlining this function. */ REG_FUNCTION_VALUE_P (outgoing) = 1; /* The address may be ptr_mode and OUTGOING may be Pmode. */ value_address = convert_memory_address (GET_MODE (outgoing), value_address); emit_move_insn (outgoing, value_address); /* Show return register used to hold result (in this case the address of the result. */ current_function_return_rtx = outgoing; } /* If this is an implementation of throw, do what's necessary to communicate between __builtin_eh_return and the epilogue. */ expand_eh_return (); /* Emit the actual code to clobber return register. */ { rtx seq, after; start_sequence (); clobber_return_register (); seq = get_insns (); end_sequence (); after = emit_insn_after (seq, clobber_after); } /* Output the label for the naked return from the function, if one is expected. This is currently used only by __builtin_return. */ if (naked_return_label) emit_label (naked_return_label); /* ??? This should no longer be necessary since stupid is no longer with us, but there are some parts of the compiler (eg reload_combine, and sh mach_dep_reorg) that still try and compute their own lifetime info instead of using the general framework. */ use_return_register (); /* Fix up any gotos that jumped out to the outermost binding level of the function. Must follow emitting RETURN_LABEL. */ /* If you have any cleanups to do at this point, and they need to create temporary variables, then you will lose. */ expand_fixups (get_insns ()); } rtx get_arg_pointer_save_area (struct function *f) { rtx ret = f->x_arg_pointer_save_area; if (! ret) { ret = assign_stack_local_1 (Pmode, GET_MODE_SIZE (Pmode), 0, f); f->x_arg_pointer_save_area = ret; } if (f == cfun && ! f->arg_pointer_save_area_init) { rtx seq; /* Save the arg pointer at the beginning of the function. The generated stack slot may not be a valid memory address, so we have to check it and fix it if necessary. */ start_sequence (); emit_move_insn (validize_mem (ret), virtual_incoming_args_rtx); seq = get_insns (); end_sequence (); push_topmost_sequence (); emit_insn_after (seq, get_insns ()); pop_topmost_sequence (); } return ret; } /* Extend a vector that records the INSN_UIDs of INSNS (a list of one or more insns). */ static void record_insns (rtx insns, varray_type *vecp) { int i, len; rtx tmp; tmp = insns; len = 0; while (tmp != NULL_RTX) { len++; tmp = NEXT_INSN (tmp); } i = VARRAY_SIZE (*vecp); VARRAY_GROW (*vecp, i + len); tmp = insns; while (tmp != NULL_RTX) { VARRAY_INT (*vecp, i) = INSN_UID (tmp); i++; tmp = NEXT_INSN (tmp); } } /* Set the locator of the insn chain starting at INSN to LOC. */ static void set_insn_locators (rtx insn, int loc) { while (insn != NULL_RTX) { if (INSN_P (insn)) INSN_LOCATOR (insn) = loc; insn = NEXT_INSN (insn); } } /* Determine how many INSN_UIDs in VEC are part of INSN. Because we can be running after reorg, SEQUENCE rtl is possible. */ static int contains (rtx insn, varray_type vec) { int i, j; if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE) { int count = 0; for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--) for (j = VARRAY_SIZE (vec) - 1; j >= 0; --j) if (INSN_UID (XVECEXP (PATTERN (insn), 0, i)) == VARRAY_INT (vec, j)) count++; return count; } else { for (j = VARRAY_SIZE (vec) - 1; j >= 0; --j) if (INSN_UID (insn) == VARRAY_INT (vec, j)) return 1; } return 0; } int prologue_epilogue_contains (rtx insn) { if (contains (insn, prologue)) return 1; if (contains (insn, epilogue)) return 1; return 0; } int sibcall_epilogue_contains (rtx insn) { if (sibcall_epilogue) return contains (insn, sibcall_epilogue); return 0; } #ifdef HAVE_return /* Insert gen_return at the end of block BB. This also means updating block_for_insn appropriately. */ static void emit_return_into_block (basic_block bb, rtx line_note) { emit_jump_insn_after (gen_return (), BB_END (bb)); if (line_note) emit_note_copy_after (line_note, PREV_INSN (BB_END (bb))); } #endif /* HAVE_return */ #if defined(HAVE_epilogue) && defined(INCOMING_RETURN_ADDR_RTX) /* These functions convert the epilogue into a variant that does not modify the stack pointer. This is used in cases where a function returns an object whose size is not known until it is computed. The called function leaves the object on the stack, leaves the stack depressed, and returns a pointer to the object. What we need to do is track all modifications and references to the stack pointer, deleting the modifications and changing the references to point to the location the stack pointer would have pointed to had the modifications taken place. These functions need to be portable so we need to make as few assumptions about the epilogue as we can. However, the epilogue basically contains three things: instructions to reset the stack pointer, instructions to reload registers, possibly including the frame pointer, and an instruction to return to the caller. If we can't be sure of what a relevant epilogue insn is doing, we abort. We also make no attempt to validate the insns we make since if they are invalid, we probably can't do anything valid. The intent is that these routines get "smarter" as more and more machines start to use them and they try operating on different epilogues. We use the following structure to track what the part of the epilogue that we've already processed has done. We keep two copies of the SP equivalence, one for use during the insn we are processing and one for use in the next insn. The difference is because one part of a PARALLEL may adjust SP and the other may use it. */ struct epi_info { rtx sp_equiv_reg; /* REG that SP is set from, perhaps SP. */ HOST_WIDE_INT sp_offset; /* Offset from SP_EQUIV_REG of present SP. */ rtx new_sp_equiv_reg; /* REG to be used at end of insn. */ HOST_WIDE_INT new_sp_offset; /* Offset to be used at end of insn. */ rtx equiv_reg_src; /* If nonzero, the value that SP_EQUIV_REG should be set to once we no longer need its value. */ rtx const_equiv[FIRST_PSEUDO_REGISTER]; /* Any known constant equivalences for registers. */ }; static void handle_epilogue_set (rtx, struct epi_info *); static void update_epilogue_consts (rtx, rtx, void *); static void emit_equiv_load (struct epi_info *); /* Modify INSN, a list of one or more insns that is part of the epilogue, to no modifications to the stack pointer. Return the new list of insns. */ static rtx keep_stack_depressed (rtx insns) { int j; struct epi_info info; rtx insn, next; /* If the epilogue is just a single instruction, it must be OK as is. */ if (NEXT_INSN (insns) == NULL_RTX) return insns; /* Otherwise, start a sequence, initialize the information we have, and process all the insns we were given. */ start_sequence (); info.sp_equiv_reg = stack_pointer_rtx; info.sp_offset = 0; info.equiv_reg_src = 0; for (j = 0; j < FIRST_PSEUDO_REGISTER; j++) info.const_equiv[j] = 0; insn = insns; next = NULL_RTX; while (insn != NULL_RTX) { next = NEXT_INSN (insn); if (!INSN_P (insn)) { add_insn (insn); insn = next; continue; } /* If this insn references the register that SP is equivalent to and we have a pending load to that register, we must force out the load first and then indicate we no longer know what SP's equivalent is. */ if (info.equiv_reg_src != 0 && reg_referenced_p (info.sp_equiv_reg, PATTERN (insn))) { emit_equiv_load (&info); info.sp_equiv_reg = 0; } info.new_sp_equiv_reg = info.sp_equiv_reg; info.new_sp_offset = info.sp_offset; /* If this is a (RETURN) and the return address is on the stack, update the address and change to an indirect jump. */ if (GET_CODE (PATTERN (insn)) == RETURN || (GET_CODE (PATTERN (insn)) == PARALLEL && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == RETURN)) { rtx retaddr = INCOMING_RETURN_ADDR_RTX; rtx base = 0; HOST_WIDE_INT offset = 0; rtx jump_insn, jump_set; /* If the return address is in a register, we can emit the insn unchanged. Otherwise, it must be a MEM and we see what the base register and offset are. In any case, we have to emit any pending load to the equivalent reg of SP, if any. */ if (REG_P (retaddr)) { emit_equiv_load (&info); add_insn (insn); insn = next; continue; } else if (MEM_P (retaddr) && REG_P (XEXP (retaddr, 0))) base = gen_rtx_REG (Pmode, REGNO (XEXP (retaddr, 0))), offset = 0; else if (MEM_P (retaddr) && GET_CODE (XEXP (retaddr, 0)) == PLUS && REG_P (XEXP (XEXP (retaddr, 0), 0)) && GET_CODE (XEXP (XEXP (retaddr, 0), 1)) == CONST_INT) { base = gen_rtx_REG (Pmode, REGNO (XEXP (XEXP (retaddr, 0), 0))); offset = INTVAL (XEXP (XEXP (retaddr, 0), 1)); } else abort (); /* If the base of the location containing the return pointer is SP, we must update it with the replacement address. Otherwise, just build the necessary MEM. */ retaddr = plus_constant (base, offset); if (base == stack_pointer_rtx) retaddr = simplify_replace_rtx (retaddr, stack_pointer_rtx, plus_constant (info.sp_equiv_reg, info.sp_offset)); retaddr = gen_rtx_MEM (Pmode, retaddr); /* If there is a pending load to the equivalent register for SP and we reference that register, we must load our address into a scratch register and then do that load. */ if (info.equiv_reg_src && reg_overlap_mentioned_p (info.equiv_reg_src, retaddr)) { unsigned int regno; rtx reg; for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) if (HARD_REGNO_MODE_OK (regno, Pmode) && !fixed_regs[regno] && TEST_HARD_REG_BIT (regs_invalidated_by_call, regno) && !REGNO_REG_SET_P (EXIT_BLOCK_PTR->global_live_at_start, regno) && !refers_to_regno_p (regno, regno + hard_regno_nregs[regno] [Pmode], info.equiv_reg_src, NULL) && info.const_equiv[regno] == 0) break; if (regno == FIRST_PSEUDO_REGISTER) abort (); reg = gen_rtx_REG (Pmode, regno); emit_move_insn (reg, retaddr); retaddr = reg; } emit_equiv_load (&info); jump_insn = emit_jump_insn (gen_indirect_jump (retaddr)); /* Show the SET in the above insn is a RETURN. */ jump_set = single_set (jump_insn); if (jump_set == 0) abort (); else SET_IS_RETURN_P (jump_set) = 1; } /* If SP is not mentioned in the pattern and its equivalent register, if any, is not modified, just emit it. Otherwise, if neither is set, replace the reference to SP and emit the insn. If none of those are true, handle each SET individually. */ else if (!reg_mentioned_p (stack_pointer_rtx, PATTERN (insn)) && (info.sp_equiv_reg == stack_pointer_rtx || !reg_set_p (info.sp_equiv_reg, insn))) add_insn (insn); else if (! reg_set_p (stack_pointer_rtx, insn) && (info.sp_equiv_reg == stack_pointer_rtx || !reg_set_p (info.sp_equiv_reg, insn))) { if (! validate_replace_rtx (stack_pointer_rtx, plus_constant (info.sp_equiv_reg, info.sp_offset), insn)) abort (); add_insn (insn); } else if (GET_CODE (PATTERN (insn)) == SET) handle_epilogue_set (PATTERN (insn), &info); else if (GET_CODE (PATTERN (insn)) == PARALLEL) { for (j = 0; j < XVECLEN (PATTERN (insn), 0); j++) if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET) handle_epilogue_set (XVECEXP (PATTERN (insn), 0, j), &info); } else add_insn (insn); info.sp_equiv_reg = info.new_sp_equiv_reg; info.sp_offset = info.new_sp_offset; /* Now update any constants this insn sets. */ note_stores (PATTERN (insn), update_epilogue_consts, &info); insn = next; } insns = get_insns (); end_sequence (); return insns; } /* SET is a SET from an insn in the epilogue. P is a pointer to the epi_info structure that contains information about what we've seen so far. We process this SET by either updating that data or by emitting one or more insns. */ static void handle_epilogue_set (rtx set, struct epi_info *p) { /* First handle the case where we are setting SP. Record what it is being set from. If unknown, abort. */ if (reg_set_p (stack_pointer_rtx, set)) { if (SET_DEST (set) != stack_pointer_rtx) abort (); if (GET_CODE (SET_SRC (set)) == PLUS) { p->new_sp_equiv_reg = XEXP (SET_SRC (set), 0); if (GET_CODE (XEXP (SET_SRC (set), 1)) == CONST_INT) p->new_sp_offset = INTVAL (XEXP (SET_SRC (set), 1)); else if (REG_P (XEXP (SET_SRC (set), 1)) && REGNO (XEXP (SET_SRC (set), 1)) < FIRST_PSEUDO_REGISTER && p->const_equiv[REGNO (XEXP (SET_SRC (set), 1))] != 0) p->new_sp_offset = INTVAL (p->const_equiv[REGNO (XEXP (SET_SRC (set), 1))]); else abort (); } else p->new_sp_equiv_reg = SET_SRC (set), p->new_sp_offset = 0; /* If we are adjusting SP, we adjust from the old data. */ if (p->new_sp_equiv_reg == stack_pointer_rtx) { p->new_sp_equiv_reg = p->sp_equiv_reg; p->new_sp_offset += p->sp_offset; } if (p->new_sp_equiv_reg == 0 || !REG_P (p->new_sp_equiv_reg)) abort (); return; } /* Next handle the case where we are setting SP's equivalent register. If we already have a value to set it to, abort. We could update, but there seems little point in handling that case. Note that we have to allow for the case where we are setting the register set in the previous part of a PARALLEL inside a single insn. But use the old offset for any updates within this insn. We must allow for the case where the register is being set in a different (usually wider) mode than Pmode). */ else if (p->new_sp_equiv_reg != 0 && reg_set_p (p->new_sp_equiv_reg, set)) { if (p->equiv_reg_src != 0 || !REG_P (p->new_sp_equiv_reg) || !REG_P (SET_DEST (set)) || GET_MODE_BITSIZE (GET_MODE (SET_DEST (set))) > BITS_PER_WORD || REGNO (p->new_sp_equiv_reg) != REGNO (SET_DEST (set))) abort (); else p->equiv_reg_src = simplify_replace_rtx (SET_SRC (set), stack_pointer_rtx, plus_constant (p->sp_equiv_reg, p->sp_offset)); } /* Otherwise, replace any references to SP in the insn to its new value and emit the insn. */ else { SET_SRC (set) = simplify_replace_rtx (SET_SRC (set), stack_pointer_rtx, plus_constant (p->sp_equiv_reg, p->sp_offset)); SET_DEST (set) = simplify_replace_rtx (SET_DEST (set), stack_pointer_rtx, plus_constant (p->sp_equiv_reg, p->sp_offset)); emit_insn (set); } } /* Update the tracking information for registers set to constants. */ static void update_epilogue_consts (rtx dest, rtx x, void *data) { struct epi_info *p = (struct epi_info *) data; rtx new; if (!REG_P (dest) || REGNO (dest) >= FIRST_PSEUDO_REGISTER) return; /* If we are either clobbering a register or doing a partial set, show we don't know the value. */ else if (GET_CODE (x) == CLOBBER || ! rtx_equal_p (dest, SET_DEST (x))) p->const_equiv[REGNO (dest)] = 0; /* If we are setting it to a constant, record that constant. */ else if (GET_CODE (SET_SRC (x)) == CONST_INT) p->const_equiv[REGNO (dest)] = SET_SRC (x); /* If this is a binary operation between a register we have been tracking and a constant, see if we can compute a new constant value. */ else if (ARITHMETIC_P (SET_SRC (x)) && REG_P (XEXP (SET_SRC (x), 0)) && REGNO (XEXP (SET_SRC (x), 0)) < FIRST_PSEUDO_REGISTER && p->const_equiv[REGNO (XEXP (SET_SRC (x), 0))] != 0 && GET_CODE (XEXP (SET_SRC (x), 1)) == CONST_INT && 0 != (new = simplify_binary_operation (GET_CODE (SET_SRC (x)), GET_MODE (dest), p->const_equiv[REGNO (XEXP (SET_SRC (x), 0))], XEXP (SET_SRC (x), 1))) && GET_CODE (new) == CONST_INT) p->const_equiv[REGNO (dest)] = new; /* Otherwise, we can't do anything with this value. */ else p->const_equiv[REGNO (dest)] = 0; } /* Emit an insn to do the load shown in p->equiv_reg_src, if needed. */ static void emit_equiv_load (struct epi_info *p) { if (p->equiv_reg_src != 0) { rtx dest = p->sp_equiv_reg; if (GET_MODE (p->equiv_reg_src) != GET_MODE (dest)) dest = gen_rtx_REG (GET_MODE (p->equiv_reg_src), REGNO (p->sp_equiv_reg)); emit_move_insn (dest, p->equiv_reg_src); p->equiv_reg_src = 0; } } #endif /* Generate the prologue and epilogue RTL if the machine supports it. Thread this into place with notes indicating where the prologue ends and where the epilogue begins. Update the basic block information when possible. */ void thread_prologue_and_epilogue_insns (rtx f ATTRIBUTE_UNUSED) { int inserted = 0; edge e; #if defined (HAVE_sibcall_epilogue) || defined (HAVE_epilogue) || defined (HAVE_return) || defined (HAVE_prologue) rtx seq; #endif #ifdef HAVE_prologue rtx prologue_end = NULL_RTX; #endif #if defined (HAVE_epilogue) || defined(HAVE_return) rtx epilogue_end = NULL_RTX; #endif #ifdef HAVE_prologue if (HAVE_prologue) { start_sequence (); seq = gen_prologue (); emit_insn (seq); /* Retain a map of the prologue insns. */ record_insns (seq, &prologue); prologue_end = emit_note (NOTE_INSN_PROLOGUE_END); seq = get_insns (); end_sequence (); set_insn_locators (seq, prologue_locator); /* Can't deal with multiple successors of the entry block at the moment. Function should always have at least one entry point. */ if (!ENTRY_BLOCK_PTR->succ || ENTRY_BLOCK_PTR->succ->succ_next) abort (); insert_insn_on_edge (seq, ENTRY_BLOCK_PTR->succ); inserted = 1; } #endif /* If the exit block has no non-fake predecessors, we don't need an epilogue. */ for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next) if ((e->flags & EDGE_FAKE) == 0) break; if (e == NULL) goto epilogue_done; #ifdef HAVE_return if (optimize && HAVE_return) { /* If we're allowed to generate a simple return instruction, then by definition we don't need a full epilogue. Examine the block that falls through to EXIT. If it does not contain any code, examine its predecessors and try to emit (conditional) return instructions. */ basic_block last; edge e_next; rtx label; for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next) if (e->flags & EDGE_FALLTHRU) break; if (e == NULL) goto epilogue_done; last = e->src; /* Verify that there are no active instructions in the last block. */ label = BB_END (last); while (label && GET_CODE (label) != CODE_LABEL) { if (active_insn_p (label)) break; label = PREV_INSN (label); } if (BB_HEAD (last) == label && GET_CODE (label) == CODE_LABEL) { rtx epilogue_line_note = NULL_RTX; /* Locate the line number associated with the closing brace, if we can find one. */ for (seq = get_last_insn (); seq && ! active_insn_p (seq); seq = PREV_INSN (seq)) if (GET_CODE (seq) == NOTE && NOTE_LINE_NUMBER (seq) > 0) { epilogue_line_note = seq; break; } for (e = last->pred; e; e = e_next) { basic_block bb = e->src; rtx jump; e_next = e->pred_next; if (bb == ENTRY_BLOCK_PTR) continue; jump = BB_END (bb); if ((GET_CODE (jump) != JUMP_INSN) || JUMP_LABEL (jump) != label) continue; /* If we have an unconditional jump, we can replace that with a simple return instruction. */ if (simplejump_p (jump)) { emit_return_into_block (bb, epilogue_line_note); delete_insn (jump); } /* If we have a conditional jump, we can try to replace that with a conditional return instruction. */ else if (condjump_p (jump)) { if (! redirect_jump (jump, 0, 0)) continue; /* If this block has only one successor, it both jumps and falls through to the fallthru block, so we can't delete the edge. */ if (bb->succ->succ_next == NULL) continue; } else continue; /* Fix up the CFG for the successful change we just made. */ redirect_edge_succ (e, EXIT_BLOCK_PTR); } /* Emit a return insn for the exit fallthru block. Whether this is still reachable will be determined later. */ emit_barrier_after (BB_END (last)); emit_return_into_block (last, epilogue_line_note); epilogue_end = BB_END (last); last->succ->flags &= ~EDGE_FALLTHRU; goto epilogue_done; } } #endif /* Find the edge that falls through to EXIT. Other edges may exist due to RETURN instructions, but those don't need epilogues. There really shouldn't be a mixture -- either all should have been converted or none, however... */ for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next) if (e->flags & EDGE_FALLTHRU) break; if (e == NULL) goto epilogue_done; #ifdef HAVE_epilogue if (HAVE_epilogue) { start_sequence (); epilogue_end = emit_note (NOTE_INSN_EPILOGUE_BEG); seq = gen_epilogue (); #ifdef INCOMING_RETURN_ADDR_RTX /* If this function returns with the stack depressed and we can support it, massage the epilogue to actually do that. */ if (TREE_CODE (TREE_TYPE (current_function_decl)) == FUNCTION_TYPE && TYPE_RETURNS_STACK_DEPRESSED (TREE_TYPE (current_function_decl))) seq = keep_stack_depressed (seq); #endif emit_jump_insn (seq); /* Retain a map of the epilogue insns. */ record_insns (seq, &epilogue); set_insn_locators (seq, epilogue_locator); seq = get_insns (); end_sequence (); insert_insn_on_edge (seq, e); inserted = 1; } else #endif { basic_block cur_bb; if (! next_active_insn (BB_END (e->src))) goto epilogue_done; /* We have a fall-through edge to the exit block, the source is not at the end of the function, and there will be an assembler epilogue at the end of the function. We can't use force_nonfallthru here, because that would try to use return. Inserting a jump 'by hand' is extremely messy, so we take advantage of cfg_layout_finalize using fixup_fallthru_exit_predecessor. */ cfg_layout_initialize (); FOR_EACH_BB (cur_bb) if (cur_bb->index >= 0 && cur_bb->next_bb->index >= 0) cur_bb->rbi->next = cur_bb->next_bb; cfg_layout_finalize (); } epilogue_done: if (inserted) commit_edge_insertions (); #ifdef HAVE_sibcall_epilogue /* Emit sibling epilogues before any sibling call sites. */ for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next) { basic_block bb = e->src; rtx insn = BB_END (bb); rtx i; rtx newinsn; if (GET_CODE (insn) != CALL_INSN || ! SIBLING_CALL_P (insn)) continue; start_sequence (); emit_insn (gen_sibcall_epilogue ()); seq = get_insns (); end_sequence (); /* Retain a map of the epilogue insns. Used in life analysis to avoid getting rid of sibcall epilogue insns. Do this before we actually emit the sequence. */ record_insns (seq, &sibcall_epilogue); set_insn_locators (seq, epilogue_locator); i = PREV_INSN (insn); newinsn = emit_insn_before (seq, insn); } #endif #ifdef HAVE_prologue /* This is probably all useless now that we use locators. */ if (prologue_end) { rtx insn, prev; /* GDB handles `break f' by setting a breakpoint on the first line note after the prologue. Which means (1) that if there are line number notes before where we inserted the prologue we should move them, and (2) we should generate a note before the end of the first basic block, if there isn't one already there. ??? This behavior is completely broken when dealing with multiple entry functions. We simply place the note always into first basic block and let alternate entry points to be missed. */ for (insn = prologue_end; insn; insn = prev) { prev = PREV_INSN (insn); if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0) { /* Note that we cannot reorder the first insn in the chain, since rest_of_compilation relies on that remaining constant. */ if (prev == NULL) break; reorder_insns (insn, insn, prologue_end); } } /* Find the last line number note in the first block. */ for (insn = BB_END (ENTRY_BLOCK_PTR->next_bb); insn != prologue_end && insn; insn = PREV_INSN (insn)) if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0) break; /* If we didn't find one, make a copy of the first line number we run across. */ if (! insn) { for (insn = next_active_insn (prologue_end); insn; insn = PREV_INSN (insn)) if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0) { emit_note_copy_after (insn, prologue_end); break; } } } #endif #ifdef HAVE_epilogue if (epilogue_end) { rtx insn, next; /* Similarly, move any line notes that appear after the epilogue. There is no need, however, to be quite so anal about the existence of such a note. Also move the NOTE_INSN_FUNCTION_END and (possibly) NOTE_INSN_FUNCTION_BEG notes, as those can be relevant for debug info generation. */ for (insn = epilogue_end; insn; insn = next) { next = NEXT_INSN (insn); if (GET_CODE (insn) == NOTE && (NOTE_LINE_NUMBER (insn) > 0 || NOTE_LINE_NUMBER (insn) == NOTE_INSN_FUNCTION_BEG || NOTE_LINE_NUMBER (insn) == NOTE_INSN_FUNCTION_END)) reorder_insns (insn, insn, PREV_INSN (epilogue_end)); } } #endif } /* Reposition the prologue-end and epilogue-begin notes after instruction scheduling and delayed branch scheduling. */ void reposition_prologue_and_epilogue_notes (rtx f ATTRIBUTE_UNUSED) { #if defined (HAVE_prologue) || defined (HAVE_epilogue) rtx insn, last, note; int len; if ((len = VARRAY_SIZE (prologue)) > 0) { last = 0, note = 0; /* Scan from the beginning until we reach the last prologue insn. We apparently can't depend on basic_block_{head,end} after reorg has run. */ for (insn = f; insn; insn = NEXT_INSN (insn)) { if (GET_CODE (insn) == NOTE) { if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_PROLOGUE_END) note = insn; } else if (contains (insn, prologue)) { last = insn; if (--len == 0) break; } } if (last) { /* Find the prologue-end note if we haven't already, and move it to just after the last prologue insn. */ if (note == 0) { for (note = last; (note = NEXT_INSN (note));) if (GET_CODE (note) == NOTE && NOTE_LINE_NUMBER (note) == NOTE_INSN_PROLOGUE_END) break; } /* Avoid placing note between CODE_LABEL and BASIC_BLOCK note. */ if (GET_CODE (last) == CODE_LABEL) last = NEXT_INSN (last); reorder_insns (note, note, last); } } if ((len = VARRAY_SIZE (epilogue)) > 0) { last = 0, note = 0; /* Scan from the end until we reach the first epilogue insn. We apparently can't depend on basic_block_{head,end} after reorg has run. */ for (insn = get_last_insn (); insn; insn = PREV_INSN (insn)) { if (GET_CODE (insn) == NOTE) { if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EPILOGUE_BEG) note = insn; } else if (contains (insn, epilogue)) { last = insn; if (--len == 0) break; } } if (last) { /* Find the epilogue-begin note if we haven't already, and move it to just before the first epilogue insn. */ if (note == 0) { for (note = insn; (note = PREV_INSN (note));) if (GET_CODE (note) == NOTE && NOTE_LINE_NUMBER (note) == NOTE_INSN_EPILOGUE_BEG) break; } if (PREV_INSN (last) != note) reorder_insns (note, note, PREV_INSN (last)); } } #endif /* HAVE_prologue or HAVE_epilogue */ } /* Called once, at initialization, to initialize function.c. */ void init_function_once (void) { VARRAY_INT_INIT (prologue, 0, "prologue"); VARRAY_INT_INIT (epilogue, 0, "epilogue"); VARRAY_INT_INIT (sibcall_epilogue, 0, "sibcall_epilogue"); } /* Resets insn_block_boundaries array. */ void reset_block_changes (void) { VARRAY_TREE_INIT (cfun->ib_boundaries_block, 100, "ib_boundaries_block"); VARRAY_PUSH_TREE (cfun->ib_boundaries_block, NULL_TREE); } /* Record the boundary for BLOCK. */ void record_block_change (tree block) { int i, n; tree last_block; if (!block) return; last_block = VARRAY_TOP_TREE (cfun->ib_boundaries_block); VARRAY_POP (cfun->ib_boundaries_block); n = get_max_uid (); for (i = VARRAY_ACTIVE_SIZE (cfun->ib_boundaries_block); i < n; i++) VARRAY_PUSH_TREE (cfun->ib_boundaries_block, last_block); VARRAY_PUSH_TREE (cfun->ib_boundaries_block, block); } /* Finishes record of boundaries. */ void finalize_block_changes (void) { record_block_change (DECL_INITIAL (current_function_decl)); } /* For INSN return the BLOCK it belongs to. */ void check_block_change (rtx insn, tree *block) { unsigned uid = INSN_UID (insn); if (uid >= VARRAY_ACTIVE_SIZE (cfun->ib_boundaries_block)) return; *block = VARRAY_TREE (cfun->ib_boundaries_block, uid); } /* Releases the ib_boundaries_block records. */ void free_block_changes (void) { cfun->ib_boundaries_block = NULL; } /* Returns the name of the current function. */ const char * current_function_name (void) { return lang_hooks.decl_printable_name (cfun->decl, 2); } /* Type information for function.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ void gt_ggc_mx_temp_slot (void *x_p) { struct temp_slot * const x = (struct temp_slot *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_9temp_slot ((*x).next); gt_ggc_m_9temp_slot ((*x).prev); gt_ggc_m_7rtx_def ((*x).slot); gt_ggc_m_7rtx_def ((*x).address); gt_ggc_m_9tree_node ((*x).type); } } void gt_ggc_m_P9temp_slot15varray_head_tag (void *x_p) { struct varray_head_tag * const x = (struct varray_head_tag *)x_p; if (ggc_test_and_set_mark (x)) { switch ((*x).type) { case VARRAY_DATA_C: break; case VARRAY_DATA_UC: break; case VARRAY_DATA_S: break; case VARRAY_DATA_US: break; case VARRAY_DATA_I: break; case VARRAY_DATA_U: break; case VARRAY_DATA_L: break; case VARRAY_DATA_UL: break; case VARRAY_DATA_HINT: break; case VARRAY_DATA_UHINT: break; case VARRAY_DATA_GENERIC: { size_t i10; for (i10 = 0; i10 < (size_t)((*x).num_elements); i10++) { gt_ggc_m_9temp_slot ((*x).data.generic[i10]); } } break; case VARRAY_DATA_CPTR: { size_t i11; for (i11 = 0; i11 < (size_t)((*x).num_elements); i11++) { } } break; case VARRAY_DATA_RTX: { size_t i12; for (i12 = 0; i12 < (size_t)((*x).num_elements); i12++) { gt_ggc_m_7rtx_def ((*x).data.rtx[i12]); } } break; case VARRAY_DATA_RTVEC: { size_t i13; for (i13 = 0; i13 < (size_t)((*x).num_elements); i13++) { gt_ggc_m_9rtvec_def ((*x).data.rtvec[i13]); } } break; case VARRAY_DATA_TREE: { size_t i14; for (i14 = 0; i14 < (size_t)((*x).num_elements); i14++) { gt_ggc_m_9tree_node ((*x).data.tree[i14]); } } break; case VARRAY_DATA_BITMAP: { size_t i15; for (i15 = 0; i15 < (size_t)((*x).num_elements); i15++) { gt_ggc_m_15bitmap_head_def ((*x).data.bitmap[i15]); } } break; case VARRAY_DATA_CONST_EQUIV: { size_t i16; for (i16 = 0; i16 < (size_t)((*x).num_elements); i16++) { gt_ggc_m_7rtx_def ((*x).data.const_equiv[i16].rtx); } } break; case VARRAY_DATA_TE: { size_t i17; for (i17 = 0; i17 < (size_t)((*x).num_elements); i17++) { gt_ggc_m_8elt_list ((*x).data.te[i17]); } } break; case VARRAY_DATA_EDGE: { size_t i18; for (i18 = 0; i18 < (size_t)((*x).num_elements); i18++) { gt_ggc_m_8edge_def ((*x).data.e[i18]); } } break; default: break; } } } void gt_pch_nx_temp_slot (void *x_p) { struct temp_slot * const x = (struct temp_slot *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_9temp_slot)) { gt_pch_n_9temp_slot ((*x).next); gt_pch_n_9temp_slot ((*x).prev); gt_pch_n_7rtx_def ((*x).slot); gt_pch_n_7rtx_def ((*x).address); gt_pch_n_9tree_node ((*x).type); } } void gt_pch_n_P9temp_slot15varray_head_tag (void *x_p) { struct varray_head_tag * const x = (struct varray_head_tag *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_P9temp_slot15varray_head_tag)) { gt_pch_n_S ((*x).name); switch ((*x).type) { case VARRAY_DATA_C: break; case VARRAY_DATA_UC: break; case VARRAY_DATA_S: break; case VARRAY_DATA_US: break; case VARRAY_DATA_I: break; case VARRAY_DATA_U: break; case VARRAY_DATA_L: break; case VARRAY_DATA_UL: break; case VARRAY_DATA_HINT: break; case VARRAY_DATA_UHINT: break; case VARRAY_DATA_GENERIC: { size_t i10; for (i10 = 0; i10 < (size_t)((*x).num_elements); i10++) { gt_pch_n_9temp_slot ((*x).data.generic[i10]); } } break; case VARRAY_DATA_CPTR: { size_t i11; for (i11 = 0; i11 < (size_t)((*x).num_elements); i11++) { gt_pch_n_S ((*x).data.cptr[i11]); } } break; case VARRAY_DATA_RTX: { size_t i12; for (i12 = 0; i12 < (size_t)((*x).num_elements); i12++) { gt_pch_n_7rtx_def ((*x).data.rtx[i12]); } } break; case VARRAY_DATA_RTVEC: { size_t i13; for (i13 = 0; i13 < (size_t)((*x).num_elements); i13++) { gt_pch_n_9rtvec_def ((*x).data.rtvec[i13]); } } break; case VARRAY_DATA_TREE: { size_t i14; for (i14 = 0; i14 < (size_t)((*x).num_elements); i14++) { gt_pch_n_9tree_node ((*x).data.tree[i14]); } } break; case VARRAY_DATA_BITMAP: { size_t i15; for (i15 = 0; i15 < (size_t)((*x).num_elements); i15++) { gt_pch_n_15bitmap_head_def ((*x).data.bitmap[i15]); } } break; case VARRAY_DATA_CONST_EQUIV: { size_t i16; for (i16 = 0; i16 < (size_t)((*x).num_elements); i16++) { gt_pch_n_7rtx_def ((*x).data.const_equiv[i16].rtx); } } break; case VARRAY_DATA_TE: { size_t i17; for (i17 = 0; i17 < (size_t)((*x).num_elements); i17++) { gt_pch_n_8elt_list ((*x).data.te[i17]); } } break; case VARRAY_DATA_EDGE: { size_t i18; for (i18 = 0; i18 < (size_t)((*x).num_elements); i18++) { gt_pch_n_8edge_def ((*x).data.e[i18]); } } break; default: break; } } } void gt_pch_p_9temp_slot (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct temp_slot * const x ATTRIBUTE_UNUSED = (struct temp_slot *)x_p; if ((void *)(x) == this_obj) op (&((*x).next), cookie); if ((void *)(x) == this_obj) op (&((*x).prev), cookie); if ((void *)(x) == this_obj) op (&((*x).slot), cookie); if ((void *)(x) == this_obj) op (&((*x).address), cookie); if ((void *)(x) == this_obj) op (&((*x).type), cookie); } void gt_pch_p_P9temp_slot15varray_head_tag (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct varray_head_tag * const x ATTRIBUTE_UNUSED = (struct varray_head_tag *)x_p; if ((void *)(x) == this_obj) op (&((*x).name), cookie); switch ((*x).type) { case VARRAY_DATA_C: break; case VARRAY_DATA_UC: break; case VARRAY_DATA_S: break; case VARRAY_DATA_US: break; case VARRAY_DATA_I: break; case VARRAY_DATA_U: break; case VARRAY_DATA_L: break; case VARRAY_DATA_UL: break; case VARRAY_DATA_HINT: break; case VARRAY_DATA_UHINT: break; case VARRAY_DATA_GENERIC: { size_t i10; for (i10 = 0; i10 < (size_t)((*x).num_elements); i10++) { if ((void *)(x) == this_obj) op (&((*x).data.generic[i10]), cookie); } } break; case VARRAY_DATA_CPTR: { size_t i11; for (i11 = 0; i11 < (size_t)((*x).num_elements); i11++) { if ((void *)(x) == this_obj) op (&((*x).data.cptr[i11]), cookie); } } break; case VARRAY_DATA_RTX: { size_t i12; for (i12 = 0; i12 < (size_t)((*x).num_elements); i12++) { if ((void *)(x) == this_obj) op (&((*x).data.rtx[i12]), cookie); } } break; case VARRAY_DATA_RTVEC: { size_t i13; for (i13 = 0; i13 < (size_t)((*x).num_elements); i13++) { if ((void *)(x) == this_obj) op (&((*x).data.rtvec[i13]), cookie); } } break; case VARRAY_DATA_TREE: { size_t i14; for (i14 = 0; i14 < (size_t)((*x).num_elements); i14++) { if ((void *)(x) == this_obj) op (&((*x).data.tree[i14]), cookie); } } break; case VARRAY_DATA_BITMAP: { size_t i15; for (i15 = 0; i15 < (size_t)((*x).num_elements); i15++) { if ((void *)(x) == this_obj) op (&((*x).data.bitmap[i15]), cookie); } } break; case VARRAY_DATA_CONST_EQUIV: { size_t i16; for (i16 = 0; i16 < (size_t)((*x).num_elements); i16++) { if ((void *)(x) == this_obj) op (&((*x).data.const_equiv[i16].rtx), cookie); } } break; case VARRAY_DATA_TE: { size_t i17; for (i17 = 0; i17 < (size_t)((*x).num_elements); i17++) { if ((void *)(x) == this_obj) op (&((*x).data.te[i17]), cookie); } } break; case VARRAY_DATA_EDGE: { size_t i18; for (i18 = 0; i18 < (size_t)((*x).num_elements); i18++) { if ((void *)(x) == this_obj) op (&((*x).data.e[i18]), cookie); } } break; default: break; } } /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_function_h[] = { { &initial_trampoline, 1, sizeof (initial_trampoline), >_ggc_mx_rtx_def, >_pch_nx_rtx_def }, { &sibcall_epilogue, 1, sizeof (sibcall_epilogue), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, { &epilogue, 1, sizeof (epilogue), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, { &prologue, 1, sizeof (prologue), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, LAST_GGC_ROOT_TAB }; const struct ggc_root_tab gt_pch_rs_gt_function_h[] = { { &next_block_index, 1, sizeof (next_block_index), NULL, NULL }, { &funcdef_no, 1, sizeof (funcdef_no), NULL, NULL }, LAST_GGC_ROOT_TAB }; /* Global common subexpression elimination/Partial redundancy elimination and global constant/copy propagation for GNU compiler. Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* TODO - reordering of memory allocation and freeing to be more space efficient - do rough calc of how many regs are needed in each block, and a rough calc of how many regs are available in each class and use that to throttle back the code in cases where RTX_COST is minimal. - a store to the same address as a load does not kill the load if the source of the store is also the destination of the load. Handling this allows more load motion, particularly out of loops. - ability to realloc sbitmap vectors would allow one initial computation of reg_set_in_block with only subsequent additions, rather than recomputing it for each pass */ /* References searched while implementing this. Compilers Principles, Techniques and Tools Aho, Sethi, Ullman Addison-Wesley, 1988 Global Optimization by Suppression of Partial Redundancies E. Morel, C. Renvoise communications of the acm, Vol. 22, Num. 2, Feb. 1979 A Portable Machine-Independent Global Optimizer - Design and Measurements Frederick Chow Stanford Ph.D. thesis, Dec. 1983 A Fast Algorithm for Code Movement Optimization D.M. Dhamdhere SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988 A Solution to a Problem with Morel and Renvoise's Global Optimization by Suppression of Partial Redundancies K-H Drechsler, M.P. Stadel ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988 Practical Adaptation of the Global Optimization Algorithm of Morel and Renvoise D.M. Dhamdhere ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991 Efficiently Computing Static Single Assignment Form and the Control Dependence Graph R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991 Lazy Code Motion J. Knoop, O. Ruthing, B. Steffen ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI What's In a Region? Or Computing Control Dependence Regions in Near-Linear Time for Reducible Flow Control Thomas Ball ACM Letters on Programming Languages and Systems, Vol. 2, Num. 1-4, Mar-Dec 1993 An Efficient Representation for Sparse Sets Preston Briggs, Linda Torczon ACM Letters on Programming Languages and Systems, Vol. 2, Num. 1-4, Mar-Dec 1993 A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion K-H Drechsler, M.P. Stadel ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993 Partial Dead Code Elimination J. Knoop, O. Ruthing, B. Steffen ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994 Effective Partial Redundancy Elimination P. Briggs, K.D. Cooper ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994 The Program Structure Tree: Computing Control Regions in Linear Time R. Johnson, D. Pearson, K. Pingali ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994 Optimal Code Motion: Theory and Practice J. Knoop, O. Ruthing, B. Steffen ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994 The power of assignment motion J. Knoop, O. Ruthing, B. Steffen ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI Global code motion / global value numbering C. Click ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI Value Driven Redundancy Elimination L.T. Simpson Rice University Ph.D. thesis, Apr. 1996 Value Numbering L.T. Simpson Massively Scalar Compiler Project, Rice University, Sep. 1996 High Performance Compilers for Parallel Computing Michael Wolfe Addison-Wesley, 1996 Advanced Compiler Design and Implementation Steven Muchnick Morgan Kaufmann, 1997 Building an Optimizing Compiler Robert Morgan Digital Press, 1998 People wishing to speed up the code here should read: Elimination Algorithms for Data Flow Analysis B.G. Ryder, M.C. Paull ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986 How to Analyze Large Programs Efficiently and Informatively D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI People wishing to do something different can find various possibilities in the above papers and elsewhere. */ /* Propagate flow information through back edges and thus enable PRE's moving loop invariant calculations out of loops. Originally this tended to create worse overall code, but several improvements during the development of PRE seem to have made following back edges generally a win. Note much of the loop invariant code motion done here would normally be done by loop.c, which has more heuristics for when to move invariants out of loops. At some point we might need to move some of those heuristics into gcse.c. */ /* We support GCSE via Partial Redundancy Elimination. PRE optimizations are a superset of those done by GCSE. We perform the following steps: 1) Compute basic block information. 2) Compute table of places where registers are set. 3) Perform copy/constant propagation. 4) Perform global cse using lazy code motion if not optimizing for size, or code hoisting if we are. 5) Perform another pass of copy/constant propagation. Two passes of copy/constant propagation are done because the first one enables more GCSE and the second one helps to clean up the copies that GCSE creates. This is needed more for PRE than for Classic because Classic GCSE will try to use an existing register containing the common subexpression rather than create a new one. This is harder to do for PRE because of the code motion (which Classic GCSE doesn't do). Expressions we are interested in GCSE-ing are of the form (set (pseudo-reg) (expression)). Function want_to_gcse_p says what these are. PRE handles moving invariant expressions out of loops (by treating them as partially redundant). Eventually it would be nice to replace cse.c/gcse.c with SSA (static single assignment) based GVN (global value numbering). L. T. Simpson's paper (Rice University) on value numbering is a useful reference for this. ********************** We used to support multiple passes but there are diminishing returns in doing so. The first pass usually makes 90% of the changes that are doable. A second pass can make a few more changes made possible by the first pass. Experiments show any further passes don't make enough changes to justify the expense. A study of spec92 using an unlimited number of passes: [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83, [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2, [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1 It was found doing copy propagation between each pass enables further substitutions. PRE is quite expensive in complicated functions because the DFA can take a while to converge. Hence we only perform one pass. The parameter max-gcse-passes can be modified if one wants to experiment. ********************** The steps for PRE are: 1) Build the hash table of expressions we wish to GCSE (expr_hash_table). 2) Perform the data flow analysis for PRE. 3) Delete the redundant instructions 4) Insert the required copies [if any] that make the partially redundant instructions fully redundant. 5) For other reaching expressions, insert an instruction to copy the value to a newly created pseudo that will reach the redundant instruction. The deletion is done first so that when we do insertions we know which pseudo reg to use. Various papers have argued that PRE DFA is expensive (O(n^2)) and others argue it is not. The number of iterations for the algorithm to converge is typically 2-4 so I don't view it as that expensive (relatively speaking). PRE GCSE depends heavily on the second CSE pass to clean up the copies we create. To make an expression reach the place where it's redundant, the result of the expression is copied to a new register, and the redundant expression is deleted by replacing it with this new register. Classic GCSE doesn't have this problem as much as it computes the reaching defs of each register in each block and thus can try to use an existing register. ********************** A fair bit of simplicity is created by creating small functions for simple tasks, even when the function is only called in one place. This may measurably slow things down [or may not] by creating more function call overhead than is necessary. The source is laid out so that it's trivial to make the affected functions inline so that one can measure what speed up, if any, can be achieved, and maybe later when things settle things can be rearranged. Help stamp out big monolithic functions! */ /* GCSE global vars. */ /* -dG dump file. */ static FILE *gcse_file; /* Note whether or not we should run jump optimization after gcse. We want to do this for two cases. * If we changed any jumps via cprop. * If we added any labels via edge splitting. */ static int run_jump_opt_after_gcse; /* Bitmaps are normally not included in debugging dumps. However it's useful to be able to print them from GDB. We could create special functions for this, but it's simpler to just allow passing stderr to the dump_foo fns. Since stderr can be a macro, we store a copy here. */ static FILE *debug_stderr; /* An obstack for our working variables. */ static struct obstack gcse_obstack; struct reg_use_gcse {rtx reg_rtx; }; /* Hash table of expressions. */ struct expr { /* The expression (SET_SRC for expressions, PATTERN for assignments). */ rtx expr; /* Index in the available expression bitmaps. */ int bitmap_index; /* Next entry with the same hash. */ struct expr *next_same_hash; /* List of anticipatable occurrences in basic blocks in the function. An "anticipatable occurrence" is one that is the first occurrence in the basic block, the operands are not modified in the basic block prior to the occurrence and the output is not used between the start of the block and the occurrence. */ struct occr *antic_occr; /* List of available occurrence in basic blocks in the function. An "available occurrence" is one that is the last occurrence in the basic block and the operands are not modified by following statements in the basic block [including this insn]. */ struct occr *avail_occr; /* Non-null if the computation is PRE redundant. The value is the newly created pseudo-reg to record a copy of the expression in all the places that reach the redundant copy. */ rtx reaching_reg; }; /* Occurrence of an expression. There is one per basic block. If a pattern appears more than once the last appearance is used [or first for anticipatable expressions]. */ struct occr { /* Next occurrence of this expression. */ struct occr *next; /* The insn that computes the expression. */ rtx insn; /* Nonzero if this [anticipatable] occurrence has been deleted. */ char deleted_p; /* Nonzero if this [available] occurrence has been copied to reaching_reg. */ /* ??? This is mutually exclusive with deleted_p, so they could share the same byte. */ char copied_p; }; /* Expression and copy propagation hash tables. Each hash table is an array of buckets. ??? It is known that if it were an array of entries, structure elements `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is not clear whether in the final analysis a sufficient amount of memory would be saved as the size of the available expression bitmaps would be larger [one could build a mapping table without holes afterwards though]. Someday I'll perform the computation and figure it out. */ struct hash_table { /* The table itself. This is an array of `expr_hash_table_size' elements. */ struct expr **table; /* Size of the hash table, in elements. */ unsigned int size; /* Number of hash table elements. */ unsigned int n_elems; /* Whether the table is expression of copy propagation one. */ int set_p; }; /* Expression hash table. */ static struct hash_table expr_hash_table; /* Copy propagation hash table. */ static struct hash_table set_hash_table; /* Mapping of uids to cuids. Only real insns get cuids. */ static int *uid_cuid; /* Highest UID in UID_CUID. */ static int max_uid; /* Get the cuid of an insn. */ #undef INSN_CUID #ifdef ENABLE_CHECKING #define INSN_CUID(INSN) (INSN_UID (INSN) > max_uid ? (abort (), 0) : uid_cuid[INSN_UID (INSN)]) #else #define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)]) #endif /* Number of cuids. */ static int max_cuid; /* Mapping of cuids to insns. */ static rtx *cuid_insn; /* Get insn from cuid. */ #define CUID_INSN(CUID) (cuid_insn[CUID]) /* Maximum register number in function prior to doing gcse + 1. Registers created during this pass have regno >= max_gcse_regno. This is named with "gcse" to not collide with global of same name. */ static unsigned int max_gcse_regno; /* Table of registers that are modified. For each register, each element is a list of places where the pseudo-reg is set. For simplicity, GCSE is done on sets of pseudo-regs only. PRE GCSE only requires knowledge of which blocks kill which regs [and thus could use a bitmap instead of the lists `reg_set_table' uses]. `reg_set_table' and could be turned into an array of bitmaps (num-bbs x num-regs) [however perhaps it may be useful to keep the data as is]. One advantage of recording things this way is that `reg_set_table' is fairly sparse with respect to pseudo regs but for hard regs could be fairly dense [relatively speaking]. And recording sets of pseudo-regs in lists speeds up functions like compute_transp since in the case of pseudo-regs we only need to iterate over the number of times a pseudo-reg is set, not over the number of basic blocks [clearly there is a bit of a slow down in the cases where a pseudo is set more than once in a block, however it is believed that the net effect is to speed things up]. This isn't done for hard-regs because recording call-clobbered hard-regs in `reg_set_table' at each function call can consume a fair bit of memory, and iterating over hard-regs stored this way in compute_transp will be more expensive. */ typedef struct reg_set { /* The next setting of this register. */ struct reg_set *next; /* The insn where it was set. */ rtx insn; } reg_set; static reg_set **reg_set_table; /* Size of `reg_set_table'. The table starts out at max_gcse_regno + slop, and is enlarged as necessary. */ static int reg_set_table_size; /* Amount to grow `reg_set_table' by when it's full. */ #define REG_SET_TABLE_SLOP 100 /* This is a list of expressions which are MEMs and will be used by load or store motion. Load motion tracks MEMs which aren't killed by anything except itself. (ie, loads and stores to a single location). We can then allow movement of these MEM refs with a little special allowance. (all stores copy the same value to the reaching reg used for the loads). This means all values used to store into memory must have no side effects so we can re-issue the setter value. Store Motion uses this structure as an expression table to track stores which look interesting, and might be moveable towards the exit block. */ struct ls_expr { struct expr * expr; /* Gcse expression reference for LM. */ rtx pattern; /* Pattern of this mem. */ rtx pattern_regs; /* List of registers mentioned by the mem. */ rtx loads; /* INSN list of loads seen. */ rtx stores; /* INSN list of stores seen. */ struct ls_expr * next; /* Next in the list. */ int invalid; /* Invalid for some reason. */ int index; /* If it maps to a bitmap index. */ unsigned int hash_index; /* Index when in a hash table. */ rtx reaching_reg; /* Register to use when re-writing. */ }; /* Array of implicit set patterns indexed by basic block index. */ static rtx *implicit_sets; /* Head of the list of load/store memory refs. */ static struct ls_expr * pre_ldst_mems = NULL; /* Bitmap containing one bit for each register in the program. Used when performing GCSE to track which registers have been set since the start of the basic block. */ static regset reg_set_bitmap; /* For each block, a bitmap of registers set in the block. This is used by compute_transp. It is computed during hash table computation and not by compute_sets as it includes registers added since the last pass (or between cprop and gcse) and it's currently not easy to realloc sbitmap vectors. */ static sbitmap *reg_set_in_block; /* Array, indexed by basic block number for a list of insns which modify memory within that block. */ static rtx * modify_mem_list; bitmap modify_mem_list_set; /* This array parallels modify_mem_list, but is kept canonicalized. */ static rtx * canon_modify_mem_list; bitmap canon_modify_mem_list_set; /* Various variables for statistics gathering. */ /* Memory used in a pass. This isn't intended to be absolutely precise. Its intent is only to keep an eye on memory usage. */ static int bytes_used; /* GCSE substitutions made. */ static int gcse_subst_count; /* Number of copy instructions created. */ static int gcse_create_count; /* Number of constants propagated. */ static int const_prop_count; /* Number of copys propagated. */ static int copy_prop_count; /* For available exprs */ static sbitmap *ae_kill, *ae_gen; /* Objects of this type are passed around by the null-pointer check removal routines. */ struct null_pointer_info { /* The basic block being processed. */ basic_block current_block; /* The first register to be handled in this pass. */ unsigned int min_reg; /* One greater than the last register to be handled in this pass. */ unsigned int max_reg; sbitmap *nonnull_local; sbitmap *nonnull_killed; }; static void compute_can_copy (void); static void *gmalloc (size_t) ATTRIBUTE_MALLOC; static void *gcalloc (size_t, size_t) ATTRIBUTE_MALLOC; static void *grealloc (void *, size_t); static void *gcse_alloc (unsigned long); static void alloc_gcse_mem (rtx); static void free_gcse_mem (void); static void alloc_reg_set_mem (int); static void free_reg_set_mem (void); static void record_one_set (int, rtx); static void replace_one_set (int, rtx, rtx); static void record_set_info (rtx, rtx, void *); static void compute_sets (rtx); static void hash_scan_insn (rtx, struct hash_table *, int); static void hash_scan_set (rtx, rtx, struct hash_table *); static void hash_scan_clobber (rtx, rtx, struct hash_table *); static void hash_scan_call (rtx, rtx, struct hash_table *); static int want_to_gcse_p (rtx); static bool can_assign_to_reg_p (rtx); static bool gcse_constant_p (rtx); static int oprs_unchanged_p (rtx, rtx, int); static int oprs_anticipatable_p (rtx, rtx); static int oprs_available_p (rtx, rtx); static void insert_expr_in_table (rtx, enum machine_mode, rtx, int, int, struct hash_table *); static void insert_set_in_table (rtx, rtx, struct hash_table *); static unsigned int hash_expr (rtx, enum machine_mode, int *, int); static unsigned int hash_expr_1 (rtx, enum machine_mode, int *); static unsigned int hash_string_1 (const char *); static unsigned int hash_set (int, int); static int expr_equiv_p (rtx, rtx); static void record_last_reg_set_info (rtx, int); static void record_last_mem_set_info (rtx); static void record_last_set_info (rtx, rtx, void *); static void compute_hash_table (struct hash_table *); static void alloc_hash_table (int, struct hash_table *, int); static void free_hash_table (struct hash_table *); static void compute_hash_table_work (struct hash_table *); static void dump_hash_table (FILE *, const char *, struct hash_table *); static struct expr *lookup_expr (rtx, struct hash_table *); static struct expr *lookup_set (unsigned int, struct hash_table *); static struct expr *next_set (unsigned int, struct expr *); static void reset_opr_set_tables (void); static int oprs_not_set_p (rtx, rtx); static void mark_call (rtx); static void mark_set (rtx, rtx); static void mark_clobber (rtx, rtx); static void mark_oprs_set (rtx); static void alloc_cprop_mem (int, int); static void free_cprop_mem (void); static void compute_transp (rtx, int, sbitmap *, int); static void compute_transpout (void); static void compute_local_properties (sbitmap *, sbitmap *, sbitmap *, struct hash_table *); static void compute_cprop_data (void); static void find_used_regs (rtx *, void *); static int try_replace_reg (rtx, rtx, rtx); static struct expr *find_avail_set (int, rtx); static int cprop_jump (basic_block, rtx, rtx, rtx, rtx); static void mems_conflict_for_gcse_p (rtx, rtx, void *); static int load_killed_in_block_p (basic_block, int, rtx, int); static void canon_list_insert (rtx, rtx, void *); static int cprop_insn (rtx, int); static int cprop (int); static void find_implicit_sets (void); static int one_cprop_pass (int, int, int); static bool constprop_register (rtx, rtx, rtx, int); static struct expr *find_bypass_set (int, int); static bool reg_killed_on_edge (rtx, edge); static int bypass_block (basic_block, rtx, rtx); static int bypass_conditional_jumps (void); static void alloc_pre_mem (int, int); static void free_pre_mem (void); static void compute_pre_data (void); static int pre_expr_reaches_here_p (basic_block, struct expr *, basic_block); static void insert_insn_end_bb (struct expr *, basic_block, int); static void pre_insert_copy_insn (struct expr *, rtx); static void pre_insert_copies (void); static int pre_delete (void); static int pre_gcse (void); static int one_pre_gcse_pass (int); static void add_label_notes_gcse (rtx, rtx); static void alloc_code_hoist_mem (int, int); static void free_code_hoist_mem (void); static void compute_code_hoist_vbeinout (void); static void compute_code_hoist_data (void); static int hoist_expr_reaches_here_p (basic_block, int, basic_block, char *); static void hoist_code (void); static int one_code_hoisting_pass (void); static rtx process_insert_insn (struct expr *); static int pre_edge_insert (struct edge_list *, struct expr **); static int pre_expr_reaches_here_p_work (basic_block, struct expr *, basic_block, char *); static struct ls_expr * ldst_entry (rtx); static void free_ldst_entry (struct ls_expr *); static void free_ldst_mems (void); static void print_ldst_list (FILE *); static struct ls_expr * find_rtx_in_ldst (rtx); static int enumerate_ldsts (void); static inline struct ls_expr * first_ls_expr (void); static inline struct ls_expr * next_ls_expr (struct ls_expr *); static int simple_mem (rtx); static void invalidate_any_buried_refs (rtx); static void compute_ld_motion_mems (void); static void trim_ld_motion_mems (void); static void update_ld_motion_stores (struct expr *); static void reg_set_info (rtx, rtx, void *); static void reg_clear_last_set (rtx, rtx, void *); static bool store_ops_ok (rtx, int *); static rtx extract_mentioned_regs (rtx); static rtx extract_mentioned_regs_helper (rtx, rtx); static void find_moveable_store (rtx, int *, int *); static int compute_store_table (void); static bool load_kills_store (rtx, rtx, int); static bool find_loads (rtx, rtx, int); static bool store_killed_in_insn (rtx, rtx, rtx, int); static bool store_killed_after (rtx, rtx, rtx, basic_block, int *, rtx *); static bool store_killed_before (rtx, rtx, rtx, basic_block, int *); static void build_store_vectors (void); static void insert_insn_start_bb (rtx, basic_block); static int insert_store (struct ls_expr *, edge); static void remove_reachable_equiv_notes (basic_block, struct ls_expr *); static void replace_store_insn (rtx, rtx, basic_block, struct ls_expr *); static void delete_store (struct ls_expr *, basic_block); static void free_store_memory (void); static void store_motion (void); static void free_insn_expr_list_list (rtx *); static void clear_modify_mem_tables (void); static void free_modify_mem_tables (void); static rtx gcse_emit_move_after (rtx, rtx, rtx); static void local_cprop_find_used_regs (rtx *, void *); static bool do_local_cprop (rtx, rtx, int, rtx*); static bool adjust_libcall_notes (rtx, rtx, rtx, rtx*); static void local_cprop_pass (int); static bool is_too_expensive (const char *); /* Entry point for global common subexpression elimination. F is the first instruction in the function. */ int gcse_main (rtx f, FILE *file) { int changed, pass; /* Bytes used at start of pass. */ int initial_bytes_used; /* Maximum number of bytes used by a pass. */ int max_pass_bytes; /* Point to release obstack data from for each pass. */ char *gcse_obstack_bottom; /* We do not construct an accurate cfg in functions which call setjmp, so just punt to be safe. */ if (current_function_calls_setjmp) return 0; /* Assume that we do not need to run jump optimizations after gcse. */ run_jump_opt_after_gcse = 0; /* For calling dump_foo fns from gdb. */ debug_stderr = stderr; gcse_file = file; /* Identify the basic block information for this function, including successors and predecessors. */ max_gcse_regno = max_reg_num (); if (file) dump_flow_info (file); /* Return if there's nothing to do, or it is too expensive. */ if (n_basic_blocks <= 1 || is_too_expensive (_("GCSE disabled"))) return 0; gcc_obstack_init (&gcse_obstack); bytes_used = 0; /* We need alias. */ init_alias_analysis (); /* Record where pseudo-registers are set. This data is kept accurate during each pass. ??? We could also record hard-reg information here [since it's unchanging], however it is currently done during hash table computation. It may be tempting to compute MEM set information here too, but MEM sets will be subject to code motion one day and thus we need to compute information about memory sets when we build the hash tables. */ alloc_reg_set_mem (max_gcse_regno); compute_sets (f); pass = 0; initial_bytes_used = bytes_used; max_pass_bytes = 0; gcse_obstack_bottom = gcse_alloc (1); changed = 1; while (changed && pass < MAX_GCSE_PASSES) { changed = 0; if (file) fprintf (file, "GCSE pass %d\n\n", pass + 1); /* Initialize bytes_used to the space for the pred/succ lists, and the reg_set_table data. */ bytes_used = initial_bytes_used; /* Each pass may create new registers, so recalculate each time. */ max_gcse_regno = max_reg_num (); alloc_gcse_mem (f); /* Don't allow constant propagation to modify jumps during this pass. */ changed = one_cprop_pass (pass + 1, 0, 0); if (optimize_size) /* Do nothing. */ ; else { changed |= one_pre_gcse_pass (pass + 1); /* We may have just created new basic blocks. Release and recompute various things which are sized on the number of basic blocks. */ if (changed) { free_modify_mem_tables (); modify_mem_list = gcalloc (last_basic_block, sizeof (rtx)); canon_modify_mem_list = gcalloc (last_basic_block, sizeof (rtx)); } free_reg_set_mem (); alloc_reg_set_mem (max_reg_num ()); compute_sets (f); run_jump_opt_after_gcse = 1; } if (max_pass_bytes < bytes_used) max_pass_bytes = bytes_used; /* Free up memory, then reallocate for code hoisting. We can not re-use the existing allocated memory because the tables will not have info for the insns or registers created by partial redundancy elimination. */ free_gcse_mem (); /* It does not make sense to run code hoisting unless we are optimizing for code size -- it rarely makes programs faster, and can make them bigger if we did partial redundancy elimination (when optimizing for space, we don't run the partial redundancy algorithms). */ if (optimize_size) { max_gcse_regno = max_reg_num (); alloc_gcse_mem (f); changed |= one_code_hoisting_pass (); free_gcse_mem (); if (max_pass_bytes < bytes_used) max_pass_bytes = bytes_used; } if (file) { fprintf (file, "\n"); fflush (file); } obstack_free (&gcse_obstack, gcse_obstack_bottom); pass++; } /* Do one last pass of copy propagation, including cprop into conditional jumps. */ max_gcse_regno = max_reg_num (); alloc_gcse_mem (f); /* This time, go ahead and allow cprop to alter jumps. */ one_cprop_pass (pass + 1, 1, 0); free_gcse_mem (); if (file) { fprintf (file, "GCSE of %s: %d basic blocks, ", current_function_name (), n_basic_blocks); fprintf (file, "%d pass%s, %d bytes\n\n", pass, pass > 1 ? "es" : "", max_pass_bytes); } obstack_free (&gcse_obstack, NULL); free_reg_set_mem (); /* We are finished with alias. */ end_alias_analysis (); allocate_reg_info (max_reg_num (), FALSE, FALSE); if (!optimize_size && flag_gcse_sm) store_motion (); /* Record where pseudo-registers are set. */ return run_jump_opt_after_gcse; } /* Misc. utilities. */ /* Nonzero for each mode that supports (set (reg) (reg)). This is trivially true for integer and floating point values. It may or may not be true for condition codes. */ static char can_copy[(int) NUM_MACHINE_MODES]; /* Compute which modes support reg/reg copy operations. */ static void compute_can_copy (void) { int i; #ifndef AVOID_CCMODE_COPIES rtx reg, insn; #endif memset (can_copy, 0, NUM_MACHINE_MODES); start_sequence (); for (i = 0; i < NUM_MACHINE_MODES; i++) if (GET_MODE_CLASS (i) == MODE_CC) { #ifdef AVOID_CCMODE_COPIES can_copy[i] = 0; #else reg = gen_rtx_REG ((enum machine_mode) i, LAST_VIRTUAL_REGISTER + 1); insn = emit_insn (gen_rtx_SET (VOIDmode, reg, reg)); if (recog (PATTERN (insn), insn, NULL) >= 0) can_copy[i] = 1; #endif } else can_copy[i] = 1; end_sequence (); } /* Returns whether the mode supports reg/reg copy operations. */ bool can_copy_p (enum machine_mode mode) { static bool can_copy_init_p = false; if (! can_copy_init_p) { compute_can_copy (); can_copy_init_p = true; } return can_copy[mode] != 0; } /* Cover function to xmalloc to record bytes allocated. */ static void * gmalloc (size_t size) { bytes_used += size; return xmalloc (size); } /* Cover function to xcalloc to record bytes allocated. */ static void * gcalloc (size_t nelem, size_t elsize) { bytes_used += nelem * elsize; return xcalloc (nelem, elsize); } /* Cover function to xrealloc. We don't record the additional size since we don't know it. It won't affect memory usage stats much anyway. */ static void * grealloc (void *ptr, size_t size) { return xrealloc (ptr, size); } /* Cover function to obstack_alloc. */ static void * gcse_alloc (unsigned long size) { bytes_used += size; return obstack_alloc (&gcse_obstack, size); } /* Allocate memory for the cuid mapping array, and reg/memory set tracking tables. This is called at the start of each pass. */ static void alloc_gcse_mem (rtx f) { int i; rtx insn; /* Find the largest UID and create a mapping from UIDs to CUIDs. CUIDs are like UIDs except they increase monotonically, have no gaps, and only apply to real insns. */ max_uid = get_max_uid (); uid_cuid = gcalloc (max_uid + 1, sizeof (int)); for (insn = f, i = 0; insn; insn = NEXT_INSN (insn)) { if (INSN_P (insn)) uid_cuid[INSN_UID (insn)] = i++; else uid_cuid[INSN_UID (insn)] = i; } /* Create a table mapping cuids to insns. */ max_cuid = i; cuid_insn = gcalloc (max_cuid + 1, sizeof (rtx)); for (insn = f, i = 0; insn; insn = NEXT_INSN (insn)) if (INSN_P (insn)) CUID_INSN (i++) = insn; /* Allocate vars to track sets of regs. */ reg_set_bitmap = BITMAP_XMALLOC (); /* Allocate vars to track sets of regs, memory per block. */ reg_set_in_block = sbitmap_vector_alloc (last_basic_block, max_gcse_regno); /* Allocate array to keep a list of insns which modify memory in each basic block. */ modify_mem_list = gcalloc (last_basic_block, sizeof (rtx)); canon_modify_mem_list = gcalloc (last_basic_block, sizeof (rtx)); modify_mem_list_set = BITMAP_XMALLOC (); canon_modify_mem_list_set = BITMAP_XMALLOC (); } /* Free memory allocated by alloc_gcse_mem. */ static void free_gcse_mem (void) { free (uid_cuid); free (cuid_insn); BITMAP_XFREE (reg_set_bitmap); sbitmap_vector_free (reg_set_in_block); free_modify_mem_tables (); BITMAP_XFREE (modify_mem_list_set); BITMAP_XFREE (canon_modify_mem_list_set); } /* Compute the local properties of each recorded expression. Local properties are those that are defined by the block, irrespective of other blocks. An expression is transparent in a block if its operands are not modified in the block. An expression is computed (locally available) in a block if it is computed at least once and expression would contain the same value if the computation was moved to the end of the block. An expression is locally anticipatable in a block if it is computed at least once and expression would contain the same value if the computation was moved to the beginning of the block. We call this routine for cprop, pre and code hoisting. They all compute basically the same information and thus can easily share this code. TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local properties. If NULL, then it is not necessary to compute or record that particular property. TABLE controls which hash table to look at. If it is set hash table, additionally, TRANSP is computed as ~TRANSP, since this is really cprop's ABSALTERED. */ static void compute_local_properties (sbitmap *transp, sbitmap *comp, sbitmap *antloc, struct hash_table *table) { unsigned int i; /* Initialize any bitmaps that were passed in. */ if (transp) { if (table->set_p) sbitmap_vector_zero (transp, last_basic_block); else sbitmap_vector_ones (transp, last_basic_block); } if (comp) sbitmap_vector_zero (comp, last_basic_block); if (antloc) sbitmap_vector_zero (antloc, last_basic_block); for (i = 0; i < table->size; i++) { struct expr *expr; for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash) { int indx = expr->bitmap_index; struct occr *occr; /* The expression is transparent in this block if it is not killed. We start by assuming all are transparent [none are killed], and then reset the bits for those that are. */ if (transp) compute_transp (expr->expr, indx, transp, table->set_p); /* The occurrences recorded in antic_occr are exactly those that we want to set to nonzero in ANTLOC. */ if (antloc) for (occr = expr->antic_occr; occr != NULL; occr = occr->next) { SET_BIT (antloc[BLOCK_NUM (occr->insn)], indx); /* While we're scanning the table, this is a good place to initialize this. */ occr->deleted_p = 0; } /* The occurrences recorded in avail_occr are exactly those that we want to set to nonzero in COMP. */ if (comp) for (occr = expr->avail_occr; occr != NULL; occr = occr->next) { SET_BIT (comp[BLOCK_NUM (occr->insn)], indx); /* While we're scanning the table, this is a good place to initialize this. */ occr->copied_p = 0; } /* While we're scanning the table, this is a good place to initialize this. */ expr->reaching_reg = 0; } } } /* Register set information. `reg_set_table' records where each register is set or otherwise modified. */ static struct obstack reg_set_obstack; static void alloc_reg_set_mem (int n_regs) { reg_set_table_size = n_regs + REG_SET_TABLE_SLOP; reg_set_table = gcalloc (reg_set_table_size, sizeof (struct reg_set *)); gcc_obstack_init (®_set_obstack); } static void free_reg_set_mem (void) { free (reg_set_table); obstack_free (®_set_obstack, NULL); } /* An OLD_INSN that used to set REGNO was replaced by NEW_INSN. Update the corresponding `reg_set_table' entry accordingly. We assume that NEW_INSN is not already recorded in reg_set_table[regno]. */ static void replace_one_set (int regno, rtx old_insn, rtx new_insn) { struct reg_set *reg_info; if (regno >= reg_set_table_size) return; for (reg_info = reg_set_table[regno]; reg_info; reg_info = reg_info->next) if (reg_info->insn == old_insn) { reg_info->insn = new_insn; break; } } /* Record REGNO in the reg_set table. */ static void record_one_set (int regno, rtx insn) { /* Allocate a new reg_set element and link it onto the list. */ struct reg_set *new_reg_info; /* If the table isn't big enough, enlarge it. */ if (regno >= reg_set_table_size) { int new_size = regno + REG_SET_TABLE_SLOP; reg_set_table = grealloc (reg_set_table, new_size * sizeof (struct reg_set *)); memset (reg_set_table + reg_set_table_size, 0, (new_size - reg_set_table_size) * sizeof (struct reg_set *)); reg_set_table_size = new_size; } new_reg_info = obstack_alloc (®_set_obstack, sizeof (struct reg_set)); bytes_used += sizeof (struct reg_set); new_reg_info->insn = insn; new_reg_info->next = reg_set_table[regno]; reg_set_table[regno] = new_reg_info; } /* Called from compute_sets via note_stores to handle one SET or CLOBBER in an insn. The DATA is really the instruction in which the SET is occurring. */ static void record_set_info (rtx dest, rtx setter ATTRIBUTE_UNUSED, void *data) { rtx record_set_insn = (rtx) data; if (REG_P (dest) && REGNO (dest) >= FIRST_PSEUDO_REGISTER) record_one_set (REGNO (dest), record_set_insn); } /* Scan the function and record each set of each pseudo-register. This is called once, at the start of the gcse pass. See the comments for `reg_set_table' for further documentation. */ static void compute_sets (rtx f) { rtx insn; for (insn = f; insn != 0; insn = NEXT_INSN (insn)) if (INSN_P (insn)) note_stores (PATTERN (insn), record_set_info, insn); } /* Hash table support. */ struct reg_avail_info { basic_block last_bb; int first_set; int last_set; }; static struct reg_avail_info *reg_avail_info; static basic_block current_bb; /* See whether X, the source of a set, is something we want to consider for GCSE. */ static int want_to_gcse_p (rtx x) { switch (GET_CODE (x)) { case REG: case SUBREG: case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case CALL: return 0; default: return can_assign_to_reg_p (x); } } /* Used internally by can_assign_to_reg_p. */ static GTY(()) rtx test_insn; /* Return true if we can assign X to a pseudo register. */ static bool can_assign_to_reg_p (rtx x) { int num_clobbers = 0; int icode; /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */ if (general_operand (x, GET_MODE (x))) return 1; else if (GET_MODE (x) == VOIDmode) return 0; /* Otherwise, check if we can make a valid insn from it. First initialize our test insn if we haven't already. */ if (test_insn == 0) { test_insn = make_insn_raw (gen_rtx_SET (VOIDmode, gen_rtx_REG (word_mode, FIRST_PSEUDO_REGISTER * 2), const0_rtx)); NEXT_INSN (test_insn) = PREV_INSN (test_insn) = 0; } /* Now make an insn like the one we would make when GCSE'ing and see if valid. */ PUT_MODE (SET_DEST (PATTERN (test_insn)), GET_MODE (x)); SET_SRC (PATTERN (test_insn)) = x; return ((icode = recog (PATTERN (test_insn), test_insn, &num_clobbers)) >= 0 && (num_clobbers == 0 || ! added_clobbers_hard_reg_p (icode))); } /* Return nonzero if the operands of expression X are unchanged from the start of INSN's basic block up to but not including INSN (if AVAIL_P == 0), or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */ static int oprs_unchanged_p (rtx x, rtx insn, int avail_p) { int i, j; enum rtx_code code; const char *fmt; if (x == 0) return 1; code = GET_CODE (x); switch (code) { case REG: { struct reg_avail_info *info = ®_avail_info[REGNO (x)]; if (info->last_bb != current_bb) return 1; if (avail_p) return info->last_set < INSN_CUID (insn); else return info->first_set >= INSN_CUID (insn); } case MEM: if (load_killed_in_block_p (current_bb, INSN_CUID (insn), x, avail_p)) return 0; else return oprs_unchanged_p (XEXP (x, 0), insn, avail_p); case PRE_DEC: case PRE_INC: case POST_DEC: case POST_INC: case PRE_MODIFY: case POST_MODIFY: return 0; case PC: case CC0: /*FIXME*/ case CONST: case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case SYMBOL_REF: case LABEL_REF: case ADDR_VEC: case ADDR_DIFF_VEC: return 1; default: break; } for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--) { if (fmt[i] == 'e') { /* If we are about to do the last recursive call needed at this level, change it into iteration. This function is called enough to be worth it. */ if (i == 0) return oprs_unchanged_p (XEXP (x, i), insn, avail_p); else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p)) return 0; } else if (fmt[i] == 'E') for (j = 0; j < XVECLEN (x, i); j++) if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p)) return 0; } return 1; } /* Used for communication between mems_conflict_for_gcse_p and load_killed_in_block_p. Nonzero if mems_conflict_for_gcse_p finds a conflict between two memory references. */ static int gcse_mems_conflict_p; /* Used for communication between mems_conflict_for_gcse_p and load_killed_in_block_p. A memory reference for a load instruction, mems_conflict_for_gcse_p will see if a memory store conflicts with this memory load. */ static rtx gcse_mem_operand; /* DEST is the output of an instruction. If it is a memory reference, and possibly conflicts with the load found in gcse_mem_operand, then set gcse_mems_conflict_p to a nonzero value. */ static void mems_conflict_for_gcse_p (rtx dest, rtx setter ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED) { while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT || GET_CODE (dest) == STRICT_LOW_PART) dest = XEXP (dest, 0); /* If DEST is not a MEM, then it will not conflict with the load. Note that function calls are assumed to clobber memory, but are handled elsewhere. */ if (! MEM_P (dest)) return; /* If we are setting a MEM in our list of specially recognized MEMs, don't mark as killed this time. */ if (expr_equiv_p (dest, gcse_mem_operand) && pre_ldst_mems != NULL) { if (!find_rtx_in_ldst (dest)) gcse_mems_conflict_p = 1; return; } if (true_dependence (dest, GET_MODE (dest), gcse_mem_operand, rtx_addr_varies_p)) gcse_mems_conflict_p = 1; } /* Return nonzero if the expression in X (a memory reference) is killed in block BB before or after the insn with the CUID in UID_LIMIT. AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills before UID_LIMIT. To check the entire block, set UID_LIMIT to max_uid + 1 and AVAIL_P to 0. */ static int load_killed_in_block_p (basic_block bb, int uid_limit, rtx x, int avail_p) { rtx list_entry = modify_mem_list[bb->index]; while (list_entry) { rtx setter; /* Ignore entries in the list that do not apply. */ if ((avail_p && INSN_CUID (XEXP (list_entry, 0)) < uid_limit) || (! avail_p && INSN_CUID (XEXP (list_entry, 0)) > uid_limit)) { list_entry = XEXP (list_entry, 1); continue; } setter = XEXP (list_entry, 0); /* If SETTER is a call everything is clobbered. Note that calls to pure functions are never put on the list, so we need not worry about them. */ if (CALL_P (setter)) return 1; /* SETTER must be an INSN of some kind that sets memory. Call note_stores to examine each hunk of memory that is modified. The note_stores interface is pretty limited, so we have to communicate via global variables. Yuk. */ gcse_mem_operand = x; gcse_mems_conflict_p = 0; note_stores (PATTERN (setter), mems_conflict_for_gcse_p, NULL); if (gcse_mems_conflict_p) return 1; list_entry = XEXP (list_entry, 1); } return 0; } /* Return nonzero if the operands of expression X are unchanged from the start of INSN's basic block up to but not including INSN. */ static int oprs_anticipatable_p (rtx x, rtx insn) { return oprs_unchanged_p (x, insn, 0); } /* Return nonzero if the operands of expression X are unchanged from INSN to the end of INSN's basic block. */ static int oprs_available_p (rtx x, rtx insn) { return oprs_unchanged_p (x, insn, 1); } /* Hash expression X. MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean indicating if a volatile operand is found or if the expression contains something we don't want to insert in the table. HASH_TABLE_SIZE is the current size of the hash table to be probed. ??? One might want to merge this with canon_hash. Later. */ static unsigned int hash_expr (rtx x, enum machine_mode mode, int *do_not_record_p, int hash_table_size) { unsigned int hash; *do_not_record_p = 0; hash = hash_expr_1 (x, mode, do_not_record_p); return hash % hash_table_size; } /* Hash a string. Just add its bytes up. */ static inline unsigned hash_string_1 (const char *ps) { unsigned hash = 0; const unsigned char *p = (const unsigned char *) ps; if (p) while (*p) hash += *p++; return hash; } /* Subroutine of hash_expr to do the actual work. */ static unsigned int hash_expr_1 (rtx x, enum machine_mode mode, int *do_not_record_p) { int i, j; unsigned hash = 0; enum rtx_code code; const char *fmt; if (x == 0) return hash; /* Used to turn recursion into iteration. We can't rely on GCC's tail-recursion elimination since we need to keep accumulating values in HASH. */ repeat: code = GET_CODE (x); switch (code) { case REG: hash += ((unsigned int) REG << 7) + REGNO (x); return hash; case CONST_INT: hash += (((unsigned int) CONST_INT << 7) + (unsigned int) mode + (unsigned int) INTVAL (x)); return hash; case CONST_DOUBLE: /* This is like the general case, except that it only counts the integers representing the constant. */ hash += (unsigned int) code + (unsigned int) GET_MODE (x); if (GET_MODE (x) != VOIDmode) for (i = 2; i < GET_RTX_LENGTH (CONST_DOUBLE); i++) hash += (unsigned int) XWINT (x, i); else hash += ((unsigned int) CONST_DOUBLE_LOW (x) + (unsigned int) CONST_DOUBLE_HIGH (x)); return hash; case CONST_VECTOR: { int units; rtx elt; units = CONST_VECTOR_NUNITS (x); for (i = 0; i < units; ++i) { elt = CONST_VECTOR_ELT (x, i); hash += hash_expr_1 (elt, GET_MODE (elt), do_not_record_p); } return hash; } /* Assume there is only one rtx object for any given label. */ case LABEL_REF: /* We don't hash on the address of the CODE_LABEL to avoid bootstrap differences and differences between each stage's debugging dumps. */ hash += (((unsigned int) LABEL_REF << 7) + CODE_LABEL_NUMBER (XEXP (x, 0))); return hash; case SYMBOL_REF: { /* Don't hash on the symbol's address to avoid bootstrap differences. Different hash values may cause expressions to be recorded in different orders and thus different registers to be used in the final assembler. This also avoids differences in the dump files between various stages. */ unsigned int h = 0; const unsigned char *p = (const unsigned char *) XSTR (x, 0); while (*p) h += (h << 7) + *p++; /* ??? revisit */ hash += ((unsigned int) SYMBOL_REF << 7) + h; return hash; } case MEM: if (MEM_VOLATILE_P (x)) { *do_not_record_p = 1; return 0; } hash += (unsigned int) MEM; /* We used alias set for hashing, but this is not good, since the alias set may differ in -fprofile-arcs and -fbranch-probabilities compilation causing the profiles to fail to match. */ x = XEXP (x, 0); goto repeat; case PRE_DEC: case PRE_INC: case POST_DEC: case POST_INC: case PC: case CC0: case CALL: case UNSPEC_VOLATILE: *do_not_record_p = 1; return 0; case ASM_OPERANDS: if (MEM_VOLATILE_P (x)) { *do_not_record_p = 1; return 0; } else { /* We don't want to take the filename and line into account. */ hash += (unsigned) code + (unsigned) GET_MODE (x) + hash_string_1 (ASM_OPERANDS_TEMPLATE (x)) + hash_string_1 (ASM_OPERANDS_OUTPUT_CONSTRAINT (x)) + (unsigned) ASM_OPERANDS_OUTPUT_IDX (x); if (ASM_OPERANDS_INPUT_LENGTH (x)) { for (i = 1; i < ASM_OPERANDS_INPUT_LENGTH (x); i++) { hash += (hash_expr_1 (ASM_OPERANDS_INPUT (x, i), GET_MODE (ASM_OPERANDS_INPUT (x, i)), do_not_record_p) + hash_string_1 (ASM_OPERANDS_INPUT_CONSTRAINT (x, i))); } hash += hash_string_1 (ASM_OPERANDS_INPUT_CONSTRAINT (x, 0)); x = ASM_OPERANDS_INPUT (x, 0); mode = GET_MODE (x); goto repeat; } return hash; } default: break; } hash += (unsigned) code + (unsigned) GET_MODE (x); for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--) { if (fmt[i] == 'e') { /* If we are about to do the last recursive call needed at this level, change it into iteration. This function is called enough to be worth it. */ if (i == 0) { x = XEXP (x, i); goto repeat; } hash += hash_expr_1 (XEXP (x, i), 0, do_not_record_p); if (*do_not_record_p) return 0; } else if (fmt[i] == 'E') for (j = 0; j < XVECLEN (x, i); j++) { hash += hash_expr_1 (XVECEXP (x, i, j), 0, do_not_record_p); if (*do_not_record_p) return 0; } else if (fmt[i] == 's') hash += hash_string_1 (XSTR (x, i)); else if (fmt[i] == 'i') hash += (unsigned int) XINT (x, i); else abort (); } return hash; } /* Hash a set of register REGNO. Sets are hashed on the register that is set. This simplifies the PRE copy propagation code. ??? May need to make things more elaborate. Later, as necessary. */ static unsigned int hash_set (int regno, int hash_table_size) { unsigned int hash; hash = regno; return hash % hash_table_size; } /* Return nonzero if exp1 is equivalent to exp2. ??? Borrowed from cse.c. Might want to remerge with cse.c. Later. */ static int expr_equiv_p (rtx x, rtx y) { int i, j; enum rtx_code code; const char *fmt; if (x == y) return 1; if (x == 0 || y == 0) return 0; code = GET_CODE (x); if (code != GET_CODE (y)) return 0; /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */ if (GET_MODE (x) != GET_MODE (y)) return 0; switch (code) { case PC: case CC0: case CONST_INT: return 0; case LABEL_REF: return XEXP (x, 0) == XEXP (y, 0); case SYMBOL_REF: return XSTR (x, 0) == XSTR (y, 0); case REG: return REGNO (x) == REGNO (y); case MEM: /* Can't merge two expressions in different alias sets, since we can decide that the expression is transparent in a block when it isn't, due to it being set with the different alias set. */ if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y)) return 0; /* A volatile mem should not be considered equivalent to any other. */ if (MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y)) return 0; break; /* For commutative operations, check both orders. */ case PLUS: case MULT: case AND: case IOR: case XOR: case NE: case EQ: return ((expr_equiv_p (XEXP (x, 0), XEXP (y, 0)) && expr_equiv_p (XEXP (x, 1), XEXP (y, 1))) || (expr_equiv_p (XEXP (x, 0), XEXP (y, 1)) && expr_equiv_p (XEXP (x, 1), XEXP (y, 0)))); case ASM_OPERANDS: /* We don't use the generic code below because we want to disregard filename and line numbers. */ /* A volatile asm isn't equivalent to any other. */ if (MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y)) return 0; if (GET_MODE (x) != GET_MODE (y) || strcmp (ASM_OPERANDS_TEMPLATE (x), ASM_OPERANDS_TEMPLATE (y)) || strcmp (ASM_OPERANDS_OUTPUT_CONSTRAINT (x), ASM_OPERANDS_OUTPUT_CONSTRAINT (y)) || ASM_OPERANDS_OUTPUT_IDX (x) != ASM_OPERANDS_OUTPUT_IDX (y) || ASM_OPERANDS_INPUT_LENGTH (x) != ASM_OPERANDS_INPUT_LENGTH (y)) return 0; if (ASM_OPERANDS_INPUT_LENGTH (x)) { for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--) if (! expr_equiv_p (ASM_OPERANDS_INPUT (x, i), ASM_OPERANDS_INPUT (y, i)) || strcmp (ASM_OPERANDS_INPUT_CONSTRAINT (x, i), ASM_OPERANDS_INPUT_CONSTRAINT (y, i))) return 0; } return 1; default: break; } /* Compare the elements. If any pair of corresponding elements fail to match, return 0 for the whole thing. */ fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { switch (fmt[i]) { case 'e': if (! expr_equiv_p (XEXP (x, i), XEXP (y, i))) return 0; break; case 'E': if (XVECLEN (x, i) != XVECLEN (y, i)) return 0; for (j = 0; j < XVECLEN (x, i); j++) if (! expr_equiv_p (XVECEXP (x, i, j), XVECEXP (y, i, j))) return 0; break; case 's': if (strcmp (XSTR (x, i), XSTR (y, i))) return 0; break; case 'i': if (XINT (x, i) != XINT (y, i)) return 0; break; case 'w': if (XWINT (x, i) != XWINT (y, i)) return 0; break; case '0': break; default: abort (); } } return 1; } /* Insert expression X in INSN in the hash TABLE. If it is already present, record it as the last occurrence in INSN's basic block. MODE is the mode of the value X is being stored into. It is only used if X is a CONST_INT. ANTIC_P is nonzero if X is an anticipatable expression. AVAIL_P is nonzero if X is an available expression. */ static void insert_expr_in_table (rtx x, enum machine_mode mode, rtx insn, int antic_p, int avail_p, struct hash_table *table) { int found, do_not_record_p; unsigned int hash; struct expr *cur_expr, *last_expr = NULL; struct occr *antic_occr, *avail_occr; struct occr *last_occr = NULL; hash = hash_expr (x, mode, &do_not_record_p, table->size); /* Do not insert expression in table if it contains volatile operands, or if hash_expr determines the expression is something we don't want to or can't handle. */ if (do_not_record_p) return; cur_expr = table->table[hash]; found = 0; while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x))) { /* If the expression isn't found, save a pointer to the end of the list. */ last_expr = cur_expr; cur_expr = cur_expr->next_same_hash; } if (! found) { cur_expr = gcse_alloc (sizeof (struct expr)); bytes_used += sizeof (struct expr); if (table->table[hash] == NULL) /* This is the first pattern that hashed to this index. */ table->table[hash] = cur_expr; else /* Add EXPR to end of this hash chain. */ last_expr->next_same_hash = cur_expr; /* Set the fields of the expr element. */ cur_expr->expr = x; cur_expr->bitmap_index = table->n_elems++; cur_expr->next_same_hash = NULL; cur_expr->antic_occr = NULL; cur_expr->avail_occr = NULL; } /* Now record the occurrence(s). */ if (antic_p) { antic_occr = cur_expr->antic_occr; /* Search for another occurrence in the same basic block. */ while (antic_occr && BLOCK_NUM (antic_occr->insn) != BLOCK_NUM (insn)) { /* If an occurrence isn't found, save a pointer to the end of the list. */ last_occr = antic_occr; antic_occr = antic_occr->next; } if (antic_occr) /* Found another instance of the expression in the same basic block. Prefer the currently recorded one. We want the first one in the block and the block is scanned from start to end. */ ; /* nothing to do */ else { /* First occurrence of this expression in this basic block. */ antic_occr = gcse_alloc (sizeof (struct occr)); bytes_used += sizeof (struct occr); /* First occurrence of this expression in any block? */ if (cur_expr->antic_occr == NULL) cur_expr->antic_occr = antic_occr; else last_occr->next = antic_occr; antic_occr->insn = insn; antic_occr->next = NULL; antic_occr->deleted_p = 0; } } if (avail_p) { avail_occr = cur_expr->avail_occr; /* Search for another occurrence in the same basic block. */ while (avail_occr && BLOCK_NUM (avail_occr->insn) != BLOCK_NUM (insn)) { /* If an occurrence isn't found, save a pointer to the end of the list. */ last_occr = avail_occr; avail_occr = avail_occr->next; } if (avail_occr) /* Found another instance of the expression in the same basic block. Prefer this occurrence to the currently recorded one. We want the last one in the block and the block is scanned from start to end. */ avail_occr->insn = insn; else { /* First occurrence of this expression in this basic block. */ avail_occr = gcse_alloc (sizeof (struct occr)); bytes_used += sizeof (struct occr); /* First occurrence of this expression in any block? */ if (cur_expr->avail_occr == NULL) cur_expr->avail_occr = avail_occr; else last_occr->next = avail_occr; avail_occr->insn = insn; avail_occr->next = NULL; avail_occr->deleted_p = 0; } } } /* Insert pattern X in INSN in the hash table. X is a SET of a reg to either another reg or a constant. If it is already present, record it as the last occurrence in INSN's basic block. */ static void insert_set_in_table (rtx x, rtx insn, struct hash_table *table) { int found; unsigned int hash; struct expr *cur_expr, *last_expr = NULL; struct occr *cur_occr, *last_occr = NULL; if (GET_CODE (x) != SET || ! REG_P (SET_DEST (x))) abort (); hash = hash_set (REGNO (SET_DEST (x)), table->size); cur_expr = table->table[hash]; found = 0; while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x))) { /* If the expression isn't found, save a pointer to the end of the list. */ last_expr = cur_expr; cur_expr = cur_expr->next_same_hash; } if (! found) { cur_expr = gcse_alloc (sizeof (struct expr)); bytes_used += sizeof (struct expr); if (table->table[hash] == NULL) /* This is the first pattern that hashed to this index. */ table->table[hash] = cur_expr; else /* Add EXPR to end of this hash chain. */ last_expr->next_same_hash = cur_expr; /* Set the fields of the expr element. We must copy X because it can be modified when copy propagation is performed on its operands. */ cur_expr->expr = copy_rtx (x); cur_expr->bitmap_index = table->n_elems++; cur_expr->next_same_hash = NULL; cur_expr->antic_occr = NULL; cur_expr->avail_occr = NULL; } /* Now record the occurrence. */ cur_occr = cur_expr->avail_occr; /* Search for another occurrence in the same basic block. */ while (cur_occr && BLOCK_NUM (cur_occr->insn) != BLOCK_NUM (insn)) { /* If an occurrence isn't found, save a pointer to the end of the list. */ last_occr = cur_occr; cur_occr = cur_occr->next; } if (cur_occr) /* Found another instance of the expression in the same basic block. Prefer this occurrence to the currently recorded one. We want the last one in the block and the block is scanned from start to end. */ cur_occr->insn = insn; else { /* First occurrence of this expression in this basic block. */ cur_occr = gcse_alloc (sizeof (struct occr)); bytes_used += sizeof (struct occr); /* First occurrence of this expression in any block? */ if (cur_expr->avail_occr == NULL) cur_expr->avail_occr = cur_occr; else last_occr->next = cur_occr; cur_occr->insn = insn; cur_occr->next = NULL; cur_occr->deleted_p = 0; } } /* Determine whether the rtx X should be treated as a constant for the purposes of GCSE's constant propagation. */ static bool gcse_constant_p (rtx x) { /* Consider a COMPARE of two integers constant. */ if (GET_CODE (x) == COMPARE && GET_CODE (XEXP (x, 0)) == CONST_INT && GET_CODE (XEXP (x, 1)) == CONST_INT) return true; /* Consider a COMPARE of the same registers is a constant if they are not floating point registers. */ if (GET_CODE(x) == COMPARE && REG_P (XEXP (x, 0)) && REG_P (XEXP (x, 1)) && REGNO (XEXP (x, 0)) == REGNO (XEXP (x, 1)) && ! FLOAT_MODE_P (GET_MODE (XEXP (x, 0))) && ! FLOAT_MODE_P (GET_MODE (XEXP (x, 1)))) return true; return CONSTANT_P (x); } /* Scan pattern PAT of INSN and add an entry to the hash TABLE (set or expression one). */ static void hash_scan_set (rtx pat, rtx insn, struct hash_table *table) { rtx src = SET_SRC (pat); rtx dest = SET_DEST (pat); rtx note; if (GET_CODE (src) == CALL) hash_scan_call (src, insn, table); else if (REG_P (dest)) { unsigned int regno = REGNO (dest); rtx tmp; /* If this is a single set and we are doing constant propagation, see if a REG_NOTE shows this equivalent to a constant. */ if (table->set_p && (note = find_reg_equal_equiv_note (insn)) != 0 && gcse_constant_p (XEXP (note, 0))) src = XEXP (note, 0), pat = gen_rtx_SET (VOIDmode, dest, src); /* Only record sets of pseudo-regs in the hash table. */ if (! table->set_p && regno >= FIRST_PSEUDO_REGISTER /* Don't GCSE something if we can't do a reg/reg copy. */ && can_copy_p (GET_MODE (dest)) /* GCSE commonly inserts instruction after the insn. We can't do that easily for EH_REGION notes so disable GCSE on these for now. */ && !find_reg_note (insn, REG_EH_REGION, NULL_RTX) /* Is SET_SRC something we want to gcse? */ && want_to_gcse_p (src) /* Don't CSE a nop. */ && ! set_noop_p (pat) /* Don't GCSE if it has attached REG_EQUIV note. At this point this only function parameters should have REG_EQUIV notes and if the argument slot is used somewhere explicitly, it means address of parameter has been taken, so we should not extend the lifetime of the pseudo. */ && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0 || ! MEM_P (XEXP (note, 0)))) { /* An expression is not anticipatable if its operands are modified before this insn or if this is not the only SET in this insn. */ int antic_p = oprs_anticipatable_p (src, insn) && single_set (insn); /* An expression is not available if its operands are subsequently modified, including this insn. It's also not available if this is a branch, because we can't insert a set after the branch. */ int avail_p = (oprs_available_p (src, insn) && ! JUMP_P (insn)); insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p, table); } /* Record sets for constant/copy propagation. */ else if (table->set_p && regno >= FIRST_PSEUDO_REGISTER && ((REG_P (src) && REGNO (src) >= FIRST_PSEUDO_REGISTER && can_copy_p (GET_MODE (dest)) && REGNO (src) != regno) || gcse_constant_p (src)) /* A copy is not available if its src or dest is subsequently modified. Here we want to search from INSN+1 on, but oprs_available_p searches from INSN on. */ && (insn == BB_END (BLOCK_FOR_INSN (insn)) || ((tmp = next_nonnote_insn (insn)) != NULL_RTX && oprs_available_p (pat, tmp)))) insert_set_in_table (pat, insn, table); } /* In case of store we want to consider the memory value as available in the REG stored in that memory. This makes it possible to remove redundant loads from due to stores to the same location. */ else if (flag_gcse_las && REG_P (src) && MEM_P (dest)) { unsigned int regno = REGNO (src); /* Do not do this for constant/copy propagation. */ if (! table->set_p /* Only record sets of pseudo-regs in the hash table. */ && regno >= FIRST_PSEUDO_REGISTER /* Don't GCSE something if we can't do a reg/reg copy. */ && can_copy_p (GET_MODE (src)) /* GCSE commonly inserts instruction after the insn. We can't do that easily for EH_REGION notes so disable GCSE on these for now. */ && ! find_reg_note (insn, REG_EH_REGION, NULL_RTX) /* Is SET_DEST something we want to gcse? */ && want_to_gcse_p (dest) /* Don't CSE a nop. */ && ! set_noop_p (pat) /* Don't GCSE if it has attached REG_EQUIV note. At this point this only function parameters should have REG_EQUIV notes and if the argument slot is used somewhere explicitly, it means address of parameter has been taken, so we should not extend the lifetime of the pseudo. */ && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0 || ! MEM_P (XEXP (note, 0)))) { /* Stores are never anticipatable. */ int antic_p = 0; /* An expression is not available if its operands are subsequently modified, including this insn. It's also not available if this is a branch, because we can't insert a set after the branch. */ int avail_p = oprs_available_p (dest, insn) && ! JUMP_P (insn); /* Record the memory expression (DEST) in the hash table. */ insert_expr_in_table (dest, GET_MODE (dest), insn, antic_p, avail_p, table); } } } static void hash_scan_clobber (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, struct hash_table *table ATTRIBUTE_UNUSED) { /* Currently nothing to do. */ } static void hash_scan_call (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED, struct hash_table *table ATTRIBUTE_UNUSED) { /* Currently nothing to do. */ } /* Process INSN and add hash table entries as appropriate. Only available expressions that set a single pseudo-reg are recorded. Single sets in a PARALLEL could be handled, but it's an extra complication that isn't dealt with right now. The trick is handling the CLOBBERs that are also in the PARALLEL. Later. If SET_P is nonzero, this is for the assignment hash table, otherwise it is for the expression hash table. If IN_LIBCALL_BLOCK nonzero, we are in a libcall block, and should not record any expressions. */ static void hash_scan_insn (rtx insn, struct hash_table *table, int in_libcall_block) { rtx pat = PATTERN (insn); int i; if (in_libcall_block) return; /* Pick out the sets of INSN and for other forms of instructions record what's been modified. */ if (GET_CODE (pat) == SET) hash_scan_set (pat, insn, table); else if (GET_CODE (pat) == PARALLEL) for (i = 0; i < XVECLEN (pat, 0); i++) { rtx x = XVECEXP (pat, 0, i); if (GET_CODE (x) == SET) hash_scan_set (x, insn, table); else if (GET_CODE (x) == CLOBBER) hash_scan_clobber (x, insn, table); else if (GET_CODE (x) == CALL) hash_scan_call (x, insn, table); } else if (GET_CODE (pat) == CLOBBER) hash_scan_clobber (pat, insn, table); else if (GET_CODE (pat) == CALL) hash_scan_call (pat, insn, table); } static void dump_hash_table (FILE *file, const char *name, struct hash_table *table) { int i; /* Flattened out table, so it's printed in proper order. */ struct expr **flat_table; unsigned int *hash_val; struct expr *expr; flat_table = xcalloc (table->n_elems, sizeof (struct expr *)); hash_val = xmalloc (table->n_elems * sizeof (unsigned int)); for (i = 0; i < (int) table->size; i++) for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash) { flat_table[expr->bitmap_index] = expr; hash_val[expr->bitmap_index] = i; } fprintf (file, "%s hash table (%d buckets, %d entries)\n", name, table->size, table->n_elems); for (i = 0; i < (int) table->n_elems; i++) if (flat_table[i] != 0) { expr = flat_table[i]; fprintf (file, "Index %d (hash value %d)\n ", expr->bitmap_index, hash_val[i]); print_rtl (file, expr->expr); fprintf (file, "\n"); } fprintf (file, "\n"); free (flat_table); free (hash_val); } /* Record register first/last/block set information for REGNO in INSN. first_set records the first place in the block where the register is set and is used to compute "anticipatability". last_set records the last place in the block where the register is set and is used to compute "availability". last_bb records the block for which first_set and last_set are valid, as a quick test to invalidate them. reg_set_in_block records whether the register is set in the block and is used to compute "transparency". */ static void record_last_reg_set_info (rtx insn, int regno) { struct reg_avail_info *info = ®_avail_info[regno]; int cuid = INSN_CUID (insn); info->last_set = cuid; if (info->last_bb != current_bb) { info->last_bb = current_bb; info->first_set = cuid; SET_BIT (reg_set_in_block[current_bb->index], regno); } } /* Record all of the canonicalized MEMs of record_last_mem_set_info's insn. Note we store a pair of elements in the list, so they have to be taken off pairwise. */ static void canon_list_insert (rtx dest ATTRIBUTE_UNUSED, rtx unused1 ATTRIBUTE_UNUSED, void * v_insn) { rtx dest_addr, insn; int bb; while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT || GET_CODE (dest) == STRICT_LOW_PART) dest = XEXP (dest, 0); /* If DEST is not a MEM, then it will not conflict with a load. Note that function calls are assumed to clobber memory, but are handled elsewhere. */ if (! MEM_P (dest)) return; dest_addr = get_addr (XEXP (dest, 0)); dest_addr = canon_rtx (dest_addr); insn = (rtx) v_insn; bb = BLOCK_NUM (insn); canon_modify_mem_list[bb] = alloc_EXPR_LIST (VOIDmode, dest_addr, canon_modify_mem_list[bb]); canon_modify_mem_list[bb] = alloc_EXPR_LIST (VOIDmode, dest, canon_modify_mem_list[bb]); bitmap_set_bit (canon_modify_mem_list_set, bb); } /* Record memory modification information for INSN. We do not actually care about the memory location(s) that are set, or even how they are set (consider a CALL_INSN). We merely need to record which insns modify memory. */ static void record_last_mem_set_info (rtx insn) { int bb = BLOCK_NUM (insn); /* load_killed_in_block_p will handle the case of calls clobbering everything. */ modify_mem_list[bb] = alloc_INSN_LIST (insn, modify_mem_list[bb]); bitmap_set_bit (modify_mem_list_set, bb); if (CALL_P (insn)) { /* Note that traversals of this loop (other than for free-ing) will break after encountering a CALL_INSN. So, there's no need to insert a pair of items, as canon_list_insert does. */ canon_modify_mem_list[bb] = alloc_INSN_LIST (insn, canon_modify_mem_list[bb]); bitmap_set_bit (canon_modify_mem_list_set, bb); } else note_stores (PATTERN (insn), canon_list_insert, (void*) insn); } /* Called from compute_hash_table via note_stores to handle one SET or CLOBBER in an insn. DATA is really the instruction in which the SET is taking place. */ static void record_last_set_info (rtx dest, rtx setter ATTRIBUTE_UNUSED, void *data) { rtx last_set_insn = (rtx) data; if (GET_CODE (dest) == SUBREG) dest = SUBREG_REG (dest); if (REG_P (dest)) record_last_reg_set_info (last_set_insn, REGNO (dest)); else if (MEM_P (dest) /* Ignore pushes, they clobber nothing. */ && ! push_operand (dest, GET_MODE (dest))) record_last_mem_set_info (last_set_insn); } /* Top level function to create an expression or assignment hash table. Expression entries are placed in the hash table if - they are of the form (set (pseudo-reg) src), - src is something we want to perform GCSE on, - none of the operands are subsequently modified in the block Assignment entries are placed in the hash table if - they are of the form (set (pseudo-reg) src), - src is something we want to perform const/copy propagation on, - none of the operands or target are subsequently modified in the block Currently src must be a pseudo-reg or a const_int. TABLE is the table computed. */ static void compute_hash_table_work (struct hash_table *table) { unsigned int i; /* While we compute the hash table we also compute a bit array of which registers are set in which blocks. ??? This isn't needed during const/copy propagation, but it's cheap to compute. Later. */ sbitmap_vector_zero (reg_set_in_block, last_basic_block); /* re-Cache any INSN_LIST nodes we have allocated. */ clear_modify_mem_tables (); /* Some working arrays used to track first and last set in each block. */ reg_avail_info = gmalloc (max_gcse_regno * sizeof (struct reg_avail_info)); for (i = 0; i < max_gcse_regno; ++i) reg_avail_info[i].last_bb = NULL; FOR_EACH_BB (current_bb) { rtx insn; unsigned int regno; int in_libcall_block; /* First pass over the instructions records information used to determine when registers and memory are first and last set. ??? hard-reg reg_set_in_block computation could be moved to compute_sets since they currently don't change. */ for (insn = BB_HEAD (current_bb); insn && insn != NEXT_INSN (BB_END (current_bb)); insn = NEXT_INSN (insn)) { if (! INSN_P (insn)) continue; if (CALL_P (insn)) { bool clobbers_all = false; #ifdef NON_SAVING_SETJMP if (NON_SAVING_SETJMP && find_reg_note (insn, REG_SETJMP, NULL_RTX)) clobbers_all = true; #endif for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) if (clobbers_all || TEST_HARD_REG_BIT (regs_invalidated_by_call, regno)) record_last_reg_set_info (insn, regno); mark_call (insn); } note_stores (PATTERN (insn), record_last_set_info, insn); } /* Insert implicit sets in the hash table. */ if (table->set_p && implicit_sets[current_bb->index] != NULL_RTX) hash_scan_set (implicit_sets[current_bb->index], BB_HEAD (current_bb), table); /* The next pass builds the hash table. */ for (insn = BB_HEAD (current_bb), in_libcall_block = 0; insn && insn != NEXT_INSN (BB_END (current_bb)); insn = NEXT_INSN (insn)) if (INSN_P (insn)) { if (find_reg_note (insn, REG_LIBCALL, NULL_RTX)) in_libcall_block = 1; else if (table->set_p && find_reg_note (insn, REG_RETVAL, NULL_RTX)) in_libcall_block = 0; hash_scan_insn (insn, table, in_libcall_block); if (!table->set_p && find_reg_note (insn, REG_RETVAL, NULL_RTX)) in_libcall_block = 0; } } free (reg_avail_info); reg_avail_info = NULL; } /* Allocate space for the set/expr hash TABLE. N_INSNS is the number of instructions in the function. It is used to determine the number of buckets to use. SET_P determines whether set or expression table will be created. */ static void alloc_hash_table (int n_insns, struct hash_table *table, int set_p) { int n; table->size = n_insns / 4; if (table->size < 11) table->size = 11; /* Attempt to maintain efficient use of hash table. Making it an odd number is simplest for now. ??? Later take some measurements. */ table->size |= 1; n = table->size * sizeof (struct expr *); table->table = gmalloc (n); table->set_p = set_p; } /* Free things allocated by alloc_hash_table. */ static void free_hash_table (struct hash_table *table) { free (table->table); } /* Compute the hash TABLE for doing copy/const propagation or expression hash table. */ static void compute_hash_table (struct hash_table *table) { /* Initialize count of number of entries in hash table. */ table->n_elems = 0; memset (table->table, 0, table->size * sizeof (struct expr *)); compute_hash_table_work (table); } /* Expression tracking support. */ /* Lookup pattern PAT in the expression TABLE. The result is a pointer to the table entry, or NULL if not found. */ static struct expr * lookup_expr (rtx pat, struct hash_table *table) { int do_not_record_p; unsigned int hash = hash_expr (pat, GET_MODE (pat), &do_not_record_p, table->size); struct expr *expr; if (do_not_record_p) return NULL; expr = table->table[hash]; while (expr && ! expr_equiv_p (expr->expr, pat)) expr = expr->next_same_hash; return expr; } /* Lookup REGNO in the set TABLE. The result is a pointer to the table entry, or NULL if not found. */ static struct expr * lookup_set (unsigned int regno, struct hash_table *table) { unsigned int hash = hash_set (regno, table->size); struct expr *expr; expr = table->table[hash]; while (expr && REGNO (SET_DEST (expr->expr)) != regno) expr = expr->next_same_hash; return expr; } /* Return the next entry for REGNO in list EXPR. */ static struct expr * next_set (unsigned int regno, struct expr *expr) { do expr = expr->next_same_hash; while (expr && REGNO (SET_DEST (expr->expr)) != regno); return expr; } /* Like free_INSN_LIST_list or free_EXPR_LIST_list, except that the node types may be mixed. */ static void free_insn_expr_list_list (rtx *listp) { rtx list, next; for (list = *listp; list ; list = next) { next = XEXP (list, 1); if (GET_CODE (list) == EXPR_LIST) free_EXPR_LIST_node (list); else free_INSN_LIST_node (list); } *listp = NULL; } /* Clear canon_modify_mem_list and modify_mem_list tables. */ static void clear_modify_mem_tables (void) { int i; EXECUTE_IF_SET_IN_BITMAP (modify_mem_list_set, 0, i, free_INSN_LIST_list (modify_mem_list + i)); bitmap_clear (modify_mem_list_set); EXECUTE_IF_SET_IN_BITMAP (canon_modify_mem_list_set, 0, i, free_insn_expr_list_list (canon_modify_mem_list + i)); bitmap_clear (canon_modify_mem_list_set); } /* Release memory used by modify_mem_list_set and canon_modify_mem_list_set. */ static void free_modify_mem_tables (void) { clear_modify_mem_tables (); free (modify_mem_list); free (canon_modify_mem_list); modify_mem_list = 0; canon_modify_mem_list = 0; } /* Reset tables used to keep track of what's still available [since the start of the block]. */ static void reset_opr_set_tables (void) { /* Maintain a bitmap of which regs have been set since beginning of the block. */ CLEAR_REG_SET (reg_set_bitmap); /* Also keep a record of the last instruction to modify memory. For now this is very trivial, we only record whether any memory location has been modified. */ clear_modify_mem_tables (); } /* Return nonzero if the operands of X are not set before INSN in INSN's basic block. */ static int oprs_not_set_p (rtx x, rtx insn) { int i, j; enum rtx_code code; const char *fmt; if (x == 0) return 1; code = GET_CODE (x); switch (code) { case PC: case CC0: case CONST: case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case SYMBOL_REF: case LABEL_REF: case ADDR_VEC: case ADDR_DIFF_VEC: return 1; case MEM: if (load_killed_in_block_p (BLOCK_FOR_INSN (insn), INSN_CUID (insn), x, 0)) return 0; else return oprs_not_set_p (XEXP (x, 0), insn); case REG: return ! REGNO_REG_SET_P (reg_set_bitmap, REGNO (x)); default: break; } for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--) { if (fmt[i] == 'e') { /* If we are about to do the last recursive call needed at this level, change it into iteration. This function is called enough to be worth it. */ if (i == 0) return oprs_not_set_p (XEXP (x, i), insn); if (! oprs_not_set_p (XEXP (x, i), insn)) return 0; } else if (fmt[i] == 'E') for (j = 0; j < XVECLEN (x, i); j++) if (! oprs_not_set_p (XVECEXP (x, i, j), insn)) return 0; } return 1; } /* Mark things set by a CALL. */ static void mark_call (rtx insn) { if (! CONST_OR_PURE_CALL_P (insn)) record_last_mem_set_info (insn); } /* Mark things set by a SET. */ static void mark_set (rtx pat, rtx insn) { rtx dest = SET_DEST (pat); while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT || GET_CODE (dest) == STRICT_LOW_PART) dest = XEXP (dest, 0); if (REG_P (dest)) SET_REGNO_REG_SET (reg_set_bitmap, REGNO (dest)); else if (MEM_P (dest)) record_last_mem_set_info (insn); if (GET_CODE (SET_SRC (pat)) == CALL) mark_call (insn); } /* Record things set by a CLOBBER. */ static void mark_clobber (rtx pat, rtx insn) { rtx clob = XEXP (pat, 0); while (GET_CODE (clob) == SUBREG || GET_CODE (clob) == STRICT_LOW_PART) clob = XEXP (clob, 0); if (REG_P (clob)) SET_REGNO_REG_SET (reg_set_bitmap, REGNO (clob)); else record_last_mem_set_info (insn); } /* Record things set by INSN. This data is used by oprs_not_set_p. */ static void mark_oprs_set (rtx insn) { rtx pat = PATTERN (insn); int i; if (GET_CODE (pat) == SET) mark_set (pat, insn); else if (GET_CODE (pat) == PARALLEL) for (i = 0; i < XVECLEN (pat, 0); i++) { rtx x = XVECEXP (pat, 0, i); if (GET_CODE (x) == SET) mark_set (x, insn); else if (GET_CODE (x) == CLOBBER) mark_clobber (x, insn); else if (GET_CODE (x) == CALL) mark_call (insn); } else if (GET_CODE (pat) == CLOBBER) mark_clobber (pat, insn); else if (GET_CODE (pat) == CALL) mark_call (insn); } /* Compute copy/constant propagation working variables. */ /* Local properties of assignments. */ static sbitmap *cprop_pavloc; static sbitmap *cprop_absaltered; /* Global properties of assignments (computed from the local properties). */ static sbitmap *cprop_avin; static sbitmap *cprop_avout; /* Allocate vars used for copy/const propagation. N_BLOCKS is the number of basic blocks. N_SETS is the number of sets. */ static void alloc_cprop_mem (int n_blocks, int n_sets) { cprop_pavloc = sbitmap_vector_alloc (n_blocks, n_sets); cprop_absaltered = sbitmap_vector_alloc (n_blocks, n_sets); cprop_avin = sbitmap_vector_alloc (n_blocks, n_sets); cprop_avout = sbitmap_vector_alloc (n_blocks, n_sets); } /* Free vars used by copy/const propagation. */ static void free_cprop_mem (void) { sbitmap_vector_free (cprop_pavloc); sbitmap_vector_free (cprop_absaltered); sbitmap_vector_free (cprop_avin); sbitmap_vector_free (cprop_avout); } /* For each block, compute whether X is transparent. X is either an expression or an assignment [though we don't care which, for this context an assignment is treated as an expression]. For each block where an element of X is modified, set (SET_P == 1) or reset (SET_P == 0) the INDX bit in BMAP. */ static void compute_transp (rtx x, int indx, sbitmap *bmap, int set_p) { int i, j; basic_block bb; enum rtx_code code; reg_set *r; const char *fmt; /* repeat is used to turn tail-recursion into iteration since GCC can't do it when there's no return value. */ repeat: if (x == 0) return; code = GET_CODE (x); switch (code) { case REG: if (set_p) { if (REGNO (x) < FIRST_PSEUDO_REGISTER) { FOR_EACH_BB (bb) if (TEST_BIT (reg_set_in_block[bb->index], REGNO (x))) SET_BIT (bmap[bb->index], indx); } else { for (r = reg_set_table[REGNO (x)]; r != NULL; r = r->next) SET_BIT (bmap[BLOCK_NUM (r->insn)], indx); } } else { if (REGNO (x) < FIRST_PSEUDO_REGISTER) { FOR_EACH_BB (bb) if (TEST_BIT (reg_set_in_block[bb->index], REGNO (x))) RESET_BIT (bmap[bb->index], indx); } else { for (r = reg_set_table[REGNO (x)]; r != NULL; r = r->next) RESET_BIT (bmap[BLOCK_NUM (r->insn)], indx); } } return; case MEM: FOR_EACH_BB (bb) { rtx list_entry = canon_modify_mem_list[bb->index]; while (list_entry) { rtx dest, dest_addr; if (CALL_P (XEXP (list_entry, 0))) { if (set_p) SET_BIT (bmap[bb->index], indx); else RESET_BIT (bmap[bb->index], indx); break; } /* LIST_ENTRY must be an INSN of some kind that sets memory. Examine each hunk of memory that is modified. */ dest = XEXP (list_entry, 0); list_entry = XEXP (list_entry, 1); dest_addr = XEXP (list_entry, 0); if (canon_true_dependence (dest, GET_MODE (dest), dest_addr, x, rtx_addr_varies_p)) { if (set_p) SET_BIT (bmap[bb->index], indx); else RESET_BIT (bmap[bb->index], indx); break; } list_entry = XEXP (list_entry, 1); } } x = XEXP (x, 0); goto repeat; case PC: case CC0: /*FIXME*/ case CONST: case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case SYMBOL_REF: case LABEL_REF: case ADDR_VEC: case ADDR_DIFF_VEC: return; default: break; } for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--) { if (fmt[i] == 'e') { /* If we are about to do the last recursive call needed at this level, change it into iteration. This function is called enough to be worth it. */ if (i == 0) { x = XEXP (x, i); goto repeat; } compute_transp (XEXP (x, i), indx, bmap, set_p); } else if (fmt[i] == 'E') for (j = 0; j < XVECLEN (x, i); j++) compute_transp (XVECEXP (x, i, j), indx, bmap, set_p); } } /* Top level routine to do the dataflow analysis needed by copy/const propagation. */ static void compute_cprop_data (void) { compute_local_properties (cprop_absaltered, cprop_pavloc, NULL, &set_hash_table); compute_available (cprop_pavloc, cprop_absaltered, cprop_avout, cprop_avin); } /* Copy/constant propagation. */ /* Maximum number of register uses in an insn that we handle. */ #define MAX_USES 8 /* Table of uses found in an insn. Allocated statically to avoid alloc/free complexity and overhead. */ static struct reg_use_gcse reg_use_table[MAX_USES]; /* Index into `reg_use_table' while building it. */ static int reg_use_count; /* Set up a list of register numbers used in INSN. The found uses are stored in `reg_use_table'. `reg_use_count' is initialized to zero before entry, and contains the number of uses in the table upon exit. ??? If a register appears multiple times we will record it multiple times. This doesn't hurt anything but it will slow things down. */ static void find_used_regs (rtx *xptr, void *data ATTRIBUTE_UNUSED) { int i, j; enum rtx_code code; const char *fmt; rtx x = *xptr; /* repeat is used to turn tail-recursion into iteration since GCC can't do it when there's no return value. */ repeat: if (x == 0) return; code = GET_CODE (x); if (REG_P (x)) { if (reg_use_count == MAX_USES) return; reg_use_table[reg_use_count].reg_rtx = x; reg_use_count++; } /* Recursively scan the operands of this expression. */ for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--) { if (fmt[i] == 'e') { /* If we are about to do the last recursive call needed at this level, change it into iteration. This function is called enough to be worth it. */ if (i == 0) { x = XEXP (x, 0); goto repeat; } find_used_regs (&XEXP (x, i), data); } else if (fmt[i] == 'E') for (j = 0; j < XVECLEN (x, i); j++) find_used_regs (&XVECEXP (x, i, j), data); } } /* Try to replace all non-SET_DEST occurrences of FROM in INSN with TO. Returns nonzero is successful. */ static int try_replace_reg (rtx from, rtx to, rtx insn) { rtx note = find_reg_equal_equiv_note (insn); rtx src = 0; int success = 0; rtx set = single_set (insn); validate_replace_src_group (from, to, insn); if (num_changes_pending () && apply_change_group ()) success = 1; /* Try to simplify SET_SRC if we have substituted a constant. */ if (success && set && CONSTANT_P (to)) { src = simplify_rtx (SET_SRC (set)); if (src) validate_change (insn, &SET_SRC (set), src, 0); } /* If there is already a NOTE, update the expression in it with our replacement. */ if (note != 0) XEXP (note, 0) = simplify_replace_rtx (XEXP (note, 0), from, to); if (!success && set && reg_mentioned_p (from, SET_SRC (set))) { /* If above failed and this is a single set, try to simplify the source of the set given our substitution. We could perhaps try this for multiple SETs, but it probably won't buy us anything. */ src = simplify_replace_rtx (SET_SRC (set), from, to); if (!rtx_equal_p (src, SET_SRC (set)) && validate_change (insn, &SET_SRC (set), src, 0)) success = 1; /* If we've failed to do replacement, have a single SET, don't already have a note, and have no special SET, add a REG_EQUAL note to not lose information. */ if (!success && note == 0 && set != 0 && GET_CODE (XEXP (set, 0)) != ZERO_EXTRACT && GET_CODE (XEXP (set, 0)) != SIGN_EXTRACT) note = set_unique_reg_note (insn, REG_EQUAL, copy_rtx (src)); } /* REG_EQUAL may get simplified into register. We don't allow that. Remove that note. This code ought not to happen, because previous code ought to synthesize reg-reg move, but be on the safe side. */ if (note && REG_P (XEXP (note, 0))) remove_note (insn, note); return success; } /* Find a set of REGNOs that are available on entry to INSN's block. Returns NULL no such set is found. */ static struct expr * find_avail_set (int regno, rtx insn) { /* SET1 contains the last set found that can be returned to the caller for use in a substitution. */ struct expr *set1 = 0; /* Loops are not possible here. To get a loop we would need two sets available at the start of the block containing INSN. ie we would need two sets like this available at the start of the block: (set (reg X) (reg Y)) (set (reg Y) (reg X)) This can not happen since the set of (reg Y) would have killed the set of (reg X) making it unavailable at the start of this block. */ while (1) { rtx src; struct expr *set = lookup_set (regno, &set_hash_table); /* Find a set that is available at the start of the block which contains INSN. */ while (set) { if (TEST_BIT (cprop_avin[BLOCK_NUM (insn)], set->bitmap_index)) break; set = next_set (regno, set); } /* If no available set was found we've reached the end of the (possibly empty) copy chain. */ if (set == 0) break; if (GET_CODE (set->expr) != SET) abort (); src = SET_SRC (set->expr); /* We know the set is available. Now check that SRC is ANTLOC (i.e. none of the source operands have changed since the start of the block). If the source operand changed, we may still use it for the next iteration of this loop, but we may not use it for substitutions. */ if (gcse_constant_p (src) || oprs_not_set_p (src, insn)) set1 = set; /* If the source of the set is anything except a register, then we have reached the end of the copy chain. */ if (! REG_P (src)) break; /* Follow the copy chain, ie start another iteration of the loop and see if we have an available copy into SRC. */ regno = REGNO (src); } /* SET1 holds the last set that was available and anticipatable at INSN. */ return set1; } /* Subroutine of cprop_insn that tries to propagate constants into JUMP_INSNS. JUMP must be a conditional jump. If SETCC is non-NULL it is the instruction that immediately precedes JUMP, and must be a single SET of a register. FROM is what we will try to replace, SRC is the constant we will try to substitute for it. Returns nonzero if a change was made. */ static int cprop_jump (basic_block bb, rtx setcc, rtx jump, rtx from, rtx src) { rtx new, set_src, note_src; rtx set = pc_set (jump); rtx note = find_reg_equal_equiv_note (jump); if (note) { note_src = XEXP (note, 0); if (GET_CODE (note_src) == EXPR_LIST) note_src = NULL_RTX; } else note_src = NULL_RTX; /* Prefer REG_EQUAL notes except those containing EXPR_LISTs. */ set_src = note_src ? note_src : SET_SRC (set); /* First substitute the SETCC condition into the JUMP instruction, then substitute that given values into this expanded JUMP. */ if (setcc != NULL_RTX && !modified_between_p (from, setcc, jump) && !modified_between_p (src, setcc, jump)) { rtx setcc_src; rtx setcc_set = single_set (setcc); rtx setcc_note = find_reg_equal_equiv_note (setcc); setcc_src = (setcc_note && GET_CODE (XEXP (setcc_note, 0)) != EXPR_LIST) ? XEXP (setcc_note, 0) : SET_SRC (setcc_set); set_src = simplify_replace_rtx (set_src, SET_DEST (setcc_set), setcc_src); } else setcc = NULL_RTX; new = simplify_replace_rtx (set_src, from, src); /* If no simplification can be made, then try the next register. */ if (rtx_equal_p (new, SET_SRC (set))) return 0; /* If this is now a no-op delete it, otherwise this must be a valid insn. */ if (new == pc_rtx) delete_insn (jump); else { /* Ensure the value computed inside the jump insn to be equivalent to one computed by setcc. */ if (setcc && modified_in_p (new, setcc)) return 0; if (! validate_change (jump, &SET_SRC (set), new, 0)) { /* When (some) constants are not valid in a comparison, and there are two registers to be replaced by constants before the entire comparison can be folded into a constant, we need to keep intermediate information in REG_EQUAL notes. For targets with separate compare insns, such notes are added by try_replace_reg. When we have a combined compare-and-branch instruction, however, we need to attach a note to the branch itself to make this optimization work. */ if (!rtx_equal_p (new, note_src)) set_unique_reg_note (jump, REG_EQUAL, copy_rtx (new)); return 0; } /* Remove REG_EQUAL note after simplification. */ if (note_src) remove_note (jump, note); /* If this has turned into an unconditional jump, then put a barrier after it so that the unreachable code will be deleted. */ if (GET_CODE (SET_SRC (set)) == LABEL_REF) emit_barrier_after (jump); } #ifdef HAVE_cc0 /* Delete the cc0 setter. */ if (setcc != NULL && CC0_P (SET_DEST (single_set (setcc)))) delete_insn (setcc); #endif run_jump_opt_after_gcse = 1; const_prop_count++; if (gcse_file != NULL) { fprintf (gcse_file, "CONST-PROP: Replacing reg %d in jump_insn %d with constant ", REGNO (from), INSN_UID (jump)); print_rtl (gcse_file, src); fprintf (gcse_file, "\n"); } purge_dead_edges (bb); return 1; } static bool constprop_register (rtx insn, rtx from, rtx to, int alter_jumps) { rtx sset; /* Check for reg or cc0 setting instructions followed by conditional branch instructions first. */ if (alter_jumps && (sset = single_set (insn)) != NULL && NEXT_INSN (insn) && any_condjump_p (NEXT_INSN (insn)) && onlyjump_p (NEXT_INSN (insn))) { rtx dest = SET_DEST (sset); if ((REG_P (dest) || CC0_P (dest)) && cprop_jump (BLOCK_FOR_INSN (insn), insn, NEXT_INSN (insn), from, to)) return 1; } /* Handle normal insns next. */ if (GET_CODE (insn) == INSN && try_replace_reg (from, to, insn)) return 1; /* Try to propagate a CONST_INT into a conditional jump. We're pretty specific about what we will handle in this code, we can extend this as necessary over time. Right now the insn in question must look like (set (pc) (if_then_else ...)) */ else if (alter_jumps && any_condjump_p (insn) && onlyjump_p (insn)) return cprop_jump (BLOCK_FOR_INSN (insn), NULL, insn, from, to); return 0; } /* Perform constant and copy propagation on INSN. The result is nonzero if a change was made. */ static int cprop_insn (rtx insn, int alter_jumps) { struct reg_use_gcse *reg_used; int changed = 0; rtx note; if (!INSN_P (insn)) return 0; reg_use_count = 0; note_uses (&PATTERN (insn), find_used_regs, NULL); note = find_reg_equal_equiv_note (insn); /* We may win even when propagating constants into notes. */ if (note) find_used_regs (&XEXP (note, 0), NULL); for (reg_used = ®_use_table[0]; reg_use_count > 0; reg_used++, reg_use_count--) { unsigned int regno = REGNO (reg_used->reg_rtx); rtx pat, src; struct expr *set; /* Ignore registers created by GCSE. We do this because ... */ if (regno >= max_gcse_regno) continue; /* If the register has already been set in this block, there's nothing we can do. */ if (! oprs_not_set_p (reg_used->reg_rtx, insn)) continue; /* Find an assignment that sets reg_used and is available at the start of the block. */ set = find_avail_set (regno, insn); if (! set) continue; pat = set->expr; /* ??? We might be able to handle PARALLELs. Later. */ if (GET_CODE (pat) != SET) abort (); src = SET_SRC (pat); /* Constant propagation. */ if (gcse_constant_p (src)) { if (constprop_register (insn, reg_used->reg_rtx, src, alter_jumps)) { changed = 1; const_prop_count++; if (gcse_file != NULL) { fprintf (gcse_file, "GLOBAL CONST-PROP: Replacing reg %d in ", regno); fprintf (gcse_file, "insn %d with constant ", INSN_UID (insn)); print_rtl (gcse_file, src); fprintf (gcse_file, "\n"); } if (INSN_DELETED_P (insn)) return 1; } } else if (REG_P (src) && REGNO (src) >= FIRST_PSEUDO_REGISTER && REGNO (src) != regno) { if (try_replace_reg (reg_used->reg_rtx, src, insn)) { changed = 1; copy_prop_count++; if (gcse_file != NULL) { fprintf (gcse_file, "GLOBAL COPY-PROP: Replacing reg %d in insn %d", regno, INSN_UID (insn)); fprintf (gcse_file, " with reg %d\n", REGNO (src)); } /* The original insn setting reg_used may or may not now be deletable. We leave the deletion to flow. */ /* FIXME: If it turns out that the insn isn't deletable, then we may have unnecessarily extended register lifetimes and made things worse. */ } } } return changed; } /* Like find_used_regs, but avoid recording uses that appear in input-output contexts such as zero_extract or pre_dec. This restricts the cases we consider to those for which local cprop can legitimately make replacements. */ static void local_cprop_find_used_regs (rtx *xptr, void *data) { rtx x = *xptr; if (x == 0) return; switch (GET_CODE (x)) { case ZERO_EXTRACT: case SIGN_EXTRACT: case STRICT_LOW_PART: return; case PRE_DEC: case PRE_INC: case POST_DEC: case POST_INC: case PRE_MODIFY: case POST_MODIFY: /* Can only legitimately appear this early in the context of stack pushes for function arguments, but handle all of the codes nonetheless. */ return; case SUBREG: /* Setting a subreg of a register larger than word_mode leaves the non-written words unchanged. */ if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) > BITS_PER_WORD) return; break; default: break; } find_used_regs (xptr, data); } /* LIBCALL_SP is a zero-terminated array of insns at the end of a libcall; their REG_EQUAL notes need updating. */ static bool do_local_cprop (rtx x, rtx insn, int alter_jumps, rtx *libcall_sp) { rtx newreg = NULL, newcnst = NULL; /* Rule out USE instructions and ASM statements as we don't want to change the hard registers mentioned. */ if (REG_P (x) && (REGNO (x) >= FIRST_PSEUDO_REGISTER || (GET_CODE (PATTERN (insn)) != USE && asm_noperands (PATTERN (insn)) < 0))) { cselib_val *val = cselib_lookup (x, GET_MODE (x), 0); struct elt_loc_list *l; if (!val) return false; for (l = val->locs; l; l = l->next) { rtx this_rtx = l->loc; rtx note; if (l->in_libcall) continue; if (gcse_constant_p (this_rtx)) newcnst = this_rtx; if (REG_P (this_rtx) && REGNO (this_rtx) >= FIRST_PSEUDO_REGISTER /* Don't copy propagate if it has attached REG_EQUIV note. At this point this only function parameters should have REG_EQUIV notes and if the argument slot is used somewhere explicitly, it means address of parameter has been taken, so we should not extend the lifetime of the pseudo. */ && (!(note = find_reg_note (l->setting_insn, REG_EQUIV, NULL_RTX)) || ! MEM_P (XEXP (note, 0)))) newreg = this_rtx; } if (newcnst && constprop_register (insn, x, newcnst, alter_jumps)) { /* If we find a case where we can't fix the retval REG_EQUAL notes match the new register, we either have to abandon this replacement or fix delete_trivially_dead_insns to preserve the setting insn, or make it delete the REG_EUAQL note, and fix up all passes that require the REG_EQUAL note there. */ if (!adjust_libcall_notes (x, newcnst, insn, libcall_sp)) abort (); if (gcse_file != NULL) { fprintf (gcse_file, "LOCAL CONST-PROP: Replacing reg %d in ", REGNO (x)); fprintf (gcse_file, "insn %d with constant ", INSN_UID (insn)); print_rtl (gcse_file, newcnst); fprintf (gcse_file, "\n"); } const_prop_count++; return true; } else if (newreg && newreg != x && try_replace_reg (x, newreg, insn)) { adjust_libcall_notes (x, newreg, insn, libcall_sp); if (gcse_file != NULL) { fprintf (gcse_file, "LOCAL COPY-PROP: Replacing reg %d in insn %d", REGNO (x), INSN_UID (insn)); fprintf (gcse_file, " with reg %d\n", REGNO (newreg)); } copy_prop_count++; return true; } } return false; } /* LIBCALL_SP is a zero-terminated array of insns at the end of a libcall; their REG_EQUAL notes need updating to reflect that OLDREG has been replaced with NEWVAL in INSN. Return true if all substitutions could be made. */ static bool adjust_libcall_notes (rtx oldreg, rtx newval, rtx insn, rtx *libcall_sp) { rtx end; while ((end = *libcall_sp++)) { rtx note = find_reg_equal_equiv_note (end); if (! note) continue; if (REG_P (newval)) { if (reg_set_between_p (newval, PREV_INSN (insn), end)) { do { note = find_reg_equal_equiv_note (end); if (! note) continue; if (reg_mentioned_p (newval, XEXP (note, 0))) return false; } while ((end = *libcall_sp++)); return true; } } XEXP (note, 0) = replace_rtx (XEXP (note, 0), oldreg, newval); insn = end; } return true; } #define MAX_NESTED_LIBCALLS 9 static void local_cprop_pass (int alter_jumps) { rtx insn; struct reg_use_gcse *reg_used; rtx libcall_stack[MAX_NESTED_LIBCALLS + 1], *libcall_sp; bool changed = false; cselib_init (false); libcall_sp = &libcall_stack[MAX_NESTED_LIBCALLS]; *libcall_sp = 0; for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) { if (INSN_P (insn)) { rtx note = find_reg_note (insn, REG_LIBCALL, NULL_RTX); if (note) { if (libcall_sp == libcall_stack) abort (); *--libcall_sp = XEXP (note, 0); } note = find_reg_note (insn, REG_RETVAL, NULL_RTX); if (note) libcall_sp++; note = find_reg_equal_equiv_note (insn); do { reg_use_count = 0; note_uses (&PATTERN (insn), local_cprop_find_used_regs, NULL); if (note) local_cprop_find_used_regs (&XEXP (note, 0), NULL); for (reg_used = ®_use_table[0]; reg_use_count > 0; reg_used++, reg_use_count--) if (do_local_cprop (reg_used->reg_rtx, insn, alter_jumps, libcall_sp)) { changed = true; break; } if (INSN_DELETED_P (insn)) break; } while (reg_use_count); } cselib_process_insn (insn); } cselib_finish (); /* Global analysis may get into infinite loops for unreachable blocks. */ if (changed && alter_jumps) { delete_unreachable_blocks (); free_reg_set_mem (); alloc_reg_set_mem (max_reg_num ()); compute_sets (get_insns ()); } } /* Forward propagate copies. This includes copies and constants. Return nonzero if a change was made. */ static int cprop (int alter_jumps) { int changed; basic_block bb; rtx insn; /* Note we start at block 1. */ if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR) { if (gcse_file != NULL) fprintf (gcse_file, "\n"); return 0; } changed = 0; FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb, EXIT_BLOCK_PTR, next_bb) { /* Reset tables used to keep track of what's still valid [since the start of the block]. */ reset_opr_set_tables (); for (insn = BB_HEAD (bb); insn != NULL && insn != NEXT_INSN (BB_END (bb)); insn = NEXT_INSN (insn)) if (INSN_P (insn)) { changed |= cprop_insn (insn, alter_jumps); /* Keep track of everything modified by this insn. */ /* ??? Need to be careful w.r.t. mods done to INSN. Don't call mark_oprs_set if we turned the insn into a NOTE. */ if (! NOTE_P (insn)) mark_oprs_set (insn); } } if (gcse_file != NULL) fprintf (gcse_file, "\n"); return changed; } /* Similar to get_condition, only the resulting condition must be valid at JUMP, instead of at EARLIEST. This differs from noce_get_condition in ifcvt.c in that we prefer not to settle for the condition variable in the jump instruction being integral. We prefer to be able to record the value of a user variable, rather than the value of a temporary used in a condition. This could be solved by recording the value of *every* register scaned by canonicalize_condition, but this would require some code reorganization. */ rtx fis_get_condition (rtx jump) { rtx cond, set, tmp, insn, earliest; bool reverse; if (! any_condjump_p (jump)) return NULL_RTX; set = pc_set (jump); cond = XEXP (SET_SRC (set), 0); /* If this branches to JUMP_LABEL when the condition is false, reverse the condition. */ reverse = (GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump)); /* Use canonicalize_condition to do the dirty work of manipulating MODE_CC values and COMPARE rtx codes. */ tmp = canonicalize_condition (jump, cond, reverse, &earliest, NULL_RTX, false); if (!tmp) return NULL_RTX; /* Verify that the given condition is valid at JUMP by virtue of not having been modified since EARLIEST. */ for (insn = earliest; insn != jump; insn = NEXT_INSN (insn)) if (INSN_P (insn) && modified_in_p (tmp, insn)) break; if (insn == jump) return tmp; /* The condition was modified. See if we can get a partial result that doesn't follow all the reversals. Perhaps combine can fold them together later. */ tmp = XEXP (tmp, 0); if (!REG_P (tmp) || GET_MODE_CLASS (GET_MODE (tmp)) != MODE_INT) return NULL_RTX; tmp = canonicalize_condition (jump, cond, reverse, &earliest, tmp, false); if (!tmp) return NULL_RTX; /* For sanity's sake, re-validate the new result. */ for (insn = earliest; insn != jump; insn = NEXT_INSN (insn)) if (INSN_P (insn) && modified_in_p (tmp, insn)) return NULL_RTX; return tmp; } /* Check the comparison COND to see if we can safely form an implicit set from it. COND is either an EQ or NE comparison. */ static bool implicit_set_cond_p (rtx cond) { enum machine_mode mode = GET_MODE (XEXP (cond, 0)); rtx cst = XEXP (cond, 1); /* We can't perform this optimization if either operand might be or might contain a signed zero. */ if (HONOR_SIGNED_ZEROS (mode)) { /* It is sufficient to check if CST is or contains a zero. We must handle float, complex, and vector. If any subpart is a zero, then the optimization can't be performed. */ /* ??? The complex and vector checks are not implemented yet. We just always return zero for them. */ if (GET_CODE (cst) == CONST_DOUBLE) { REAL_VALUE_TYPE d; REAL_VALUE_FROM_CONST_DOUBLE (d, cst); if (REAL_VALUES_EQUAL (d, dconst0)) return 0; } else return 0; } return gcse_constant_p (cst); } /* Find the implicit sets of a function. An "implicit set" is a constraint on the value of a variable, implied by a conditional jump. For example, following "if (x == 2)", the then branch may be optimized as though the conditional performed an "explicit set", in this example, "x = 2". This function records the set patterns that are implicit at the start of each basic block. */ static void find_implicit_sets (void) { basic_block bb, dest; unsigned int count; rtx cond, new; count = 0; FOR_EACH_BB (bb) /* Check for more than one successor. */ if (bb->succ && bb->succ->succ_next) { cond = fis_get_condition (BB_END (bb)); if (cond && (GET_CODE (cond) == EQ || GET_CODE (cond) == NE) && REG_P (XEXP (cond, 0)) && REGNO (XEXP (cond, 0)) >= FIRST_PSEUDO_REGISTER && implicit_set_cond_p (cond)) { dest = GET_CODE (cond) == EQ ? BRANCH_EDGE (bb)->dest : FALLTHRU_EDGE (bb)->dest; if (dest && ! dest->pred->pred_next && dest != EXIT_BLOCK_PTR) { new = gen_rtx_SET (VOIDmode, XEXP (cond, 0), XEXP (cond, 1)); implicit_sets[dest->index] = new; if (gcse_file) { fprintf(gcse_file, "Implicit set of reg %d in ", REGNO (XEXP (cond, 0))); fprintf(gcse_file, "basic block %d\n", dest->index); } count++; } } } if (gcse_file) fprintf (gcse_file, "Found %d implicit sets\n", count); } /* Perform one copy/constant propagation pass. PASS is the pass count. If CPROP_JUMPS is true, perform constant propagation into conditional jumps. If BYPASS_JUMPS is true, perform conditional jump bypassing optimizations. */ static int one_cprop_pass (int pass, int cprop_jumps, int bypass_jumps) { int changed = 0; const_prop_count = 0; copy_prop_count = 0; local_cprop_pass (cprop_jumps); /* Determine implicit sets. */ implicit_sets = xcalloc (last_basic_block, sizeof (rtx)); find_implicit_sets (); alloc_hash_table (max_cuid, &set_hash_table, 1); compute_hash_table (&set_hash_table); /* Free implicit_sets before peak usage. */ free (implicit_sets); implicit_sets = NULL; if (gcse_file) dump_hash_table (gcse_file, "SET", &set_hash_table); if (set_hash_table.n_elems > 0) { alloc_cprop_mem (last_basic_block, set_hash_table.n_elems); compute_cprop_data (); changed = cprop (cprop_jumps); if (bypass_jumps) changed |= bypass_conditional_jumps (); free_cprop_mem (); } free_hash_table (&set_hash_table); if (gcse_file) { fprintf (gcse_file, "CPROP of %s, pass %d: %d bytes needed, ", current_function_name (), pass, bytes_used); fprintf (gcse_file, "%d const props, %d copy props\n\n", const_prop_count, copy_prop_count); } /* Global analysis may get into infinite loops for unreachable blocks. */ if (changed && cprop_jumps) delete_unreachable_blocks (); return changed; } /* Bypass conditional jumps. */ /* The value of last_basic_block at the beginning of the jump_bypass pass. The use of redirect_edge_and_branch_force may introduce new basic blocks, but the data flow analysis is only valid for basic block indices less than bypass_last_basic_block. */ static int bypass_last_basic_block; /* Find a set of REGNO to a constant that is available at the end of basic block BB. Returns NULL if no such set is found. Based heavily upon find_avail_set. */ static struct expr * find_bypass_set (int regno, int bb) { struct expr *result = 0; for (;;) { rtx src; struct expr *set = lookup_set (regno, &set_hash_table); while (set) { if (TEST_BIT (cprop_avout[bb], set->bitmap_index)) break; set = next_set (regno, set); } if (set == 0) break; if (GET_CODE (set->expr) != SET) abort (); src = SET_SRC (set->expr); if (gcse_constant_p (src)) result = set; if (! REG_P (src)) break; regno = REGNO (src); } return result; } /* Subroutine of bypass_block that checks whether a pseudo is killed by any of the instructions inserted on an edge. Jump bypassing places condition code setters on CFG edges using insert_insn_on_edge. This function is required to check that our data flow analysis is still valid prior to commit_edge_insertions. */ static bool reg_killed_on_edge (rtx reg, edge e) { rtx insn; for (insn = e->insns.r; insn; insn = NEXT_INSN (insn)) if (INSN_P (insn) && reg_set_p (reg, insn)) return true; return false; } /* Subroutine of bypass_conditional_jumps that attempts to bypass the given basic block BB which has more than one predecessor. If not NULL, SETCC is the first instruction of BB, which is immediately followed by JUMP_INSN JUMP. Otherwise, SETCC is NULL, and JUMP is the first insn of BB. Returns nonzero if a change was made. During the jump bypassing pass, we may place copies of SETCC instructions on CFG edges. The following routine must be careful to pay attention to these inserted insns when performing its transformations. */ static int bypass_block (basic_block bb, rtx setcc, rtx jump) { rtx insn, note; edge e, enext, edest; int i, change; int may_be_loop_header; insn = (setcc != NULL) ? setcc : jump; /* Determine set of register uses in INSN. */ reg_use_count = 0; note_uses (&PATTERN (insn), find_used_regs, NULL); note = find_reg_equal_equiv_note (insn); if (note) find_used_regs (&XEXP (note, 0), NULL); may_be_loop_header = false; for (e = bb->pred; e; e = e->pred_next) if (e->flags & EDGE_DFS_BACK) { may_be_loop_header = true; break; } change = 0; for (e = bb->pred; e; e = enext) { enext = e->pred_next; if (e->flags & EDGE_COMPLEX) continue; /* We can't redirect edges from new basic blocks. */ if (e->src->index >= bypass_last_basic_block) continue; /* The irreducible loops created by redirecting of edges entering the loop from outside would decrease effectiveness of some of the following optimizations, so prevent this. */ if (may_be_loop_header && !(e->flags & EDGE_DFS_BACK)) continue; for (i = 0; i < reg_use_count; i++) { struct reg_use_gcse *reg_used = ®_use_table[i]; unsigned int regno = REGNO (reg_used->reg_rtx); basic_block dest, old_dest; struct expr *set; rtx src, new; if (regno >= max_gcse_regno) continue; set = find_bypass_set (regno, e->src->index); if (! set) continue; /* Check the data flow is valid after edge insertions. */ if (e->insns.r && reg_killed_on_edge (reg_used->reg_rtx, e)) continue; src = SET_SRC (pc_set (jump)); if (setcc != NULL) src = simplify_replace_rtx (src, SET_DEST (PATTERN (setcc)), SET_SRC (PATTERN (setcc))); new = simplify_replace_rtx (src, reg_used->reg_rtx, SET_SRC (set->expr)); /* Jump bypassing may have already placed instructions on edges of the CFG. We can't bypass an outgoing edge that has instructions associated with it, as these insns won't get executed if the incoming edge is redirected. */ if (new == pc_rtx) { edest = FALLTHRU_EDGE (bb); dest = edest->insns.r ? NULL : edest->dest; } else if (GET_CODE (new) == LABEL_REF) { dest = BLOCK_FOR_INSN (XEXP (new, 0)); /* Don't bypass edges containing instructions. */ for (edest = bb->succ; edest; edest = edest->succ_next) if (edest->dest == dest && edest->insns.r) { dest = NULL; break; } } else dest = NULL; /* Avoid unification of the edge with other edges from original branch. We would end up emitting the instruction on "both" edges. */ if (dest && setcc && !CC0_P (SET_DEST (PATTERN (setcc)))) { edge e2; for (e2 = e->src->succ; e2; e2 = e2->succ_next) if (e2->dest == dest) { dest = NULL; break; } } old_dest = e->dest; if (dest != NULL && dest != old_dest && dest != EXIT_BLOCK_PTR) { redirect_edge_and_branch_force (e, dest); /* Copy the register setter to the redirected edge. Don't copy CC0 setters, as CC0 is dead after jump. */ if (setcc) { rtx pat = PATTERN (setcc); if (!CC0_P (SET_DEST (pat))) insert_insn_on_edge (copy_insn (pat), e); } if (gcse_file != NULL) { fprintf (gcse_file, "JUMP-BYPASS: Proved reg %d in jump_insn %d equals constant ", regno, INSN_UID (jump)); print_rtl (gcse_file, SET_SRC (set->expr)); fprintf (gcse_file, "\nBypass edge from %d->%d to %d\n", e->src->index, old_dest->index, dest->index); } change = 1; break; } } } return change; } /* Find basic blocks with more than one predecessor that only contain a single conditional jump. If the result of the comparison is known at compile-time from any incoming edge, redirect that edge to the appropriate target. Returns nonzero if a change was made. This function is now mis-named, because we also handle indirect jumps. */ static int bypass_conditional_jumps (void) { basic_block bb; int changed; rtx setcc; rtx insn; rtx dest; /* Note we start at block 1. */ if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR) return 0; bypass_last_basic_block = last_basic_block; mark_dfs_back_edges (); changed = 0; FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb, EXIT_BLOCK_PTR, next_bb) { /* Check for more than one predecessor. */ if (bb->pred && bb->pred->pred_next) { setcc = NULL_RTX; for (insn = BB_HEAD (bb); insn != NULL && insn != NEXT_INSN (BB_END (bb)); insn = NEXT_INSN (insn)) if (GET_CODE (insn) == INSN) { if (setcc) break; if (GET_CODE (PATTERN (insn)) != SET) break; dest = SET_DEST (PATTERN (insn)); if (REG_P (dest) || CC0_P (dest)) setcc = insn; else break; } else if (JUMP_P (insn)) { if ((any_condjump_p (insn) || computed_jump_p (insn)) && onlyjump_p (insn)) changed |= bypass_block (bb, setcc, insn); break; } else if (INSN_P (insn)) break; } } /* If we bypassed any register setting insns, we inserted a copy on the redirected edge. These need to be committed. */ if (changed) commit_edge_insertions(); return changed; } /* Compute PRE+LCM working variables. */ /* Local properties of expressions. */ /* Nonzero for expressions that are transparent in the block. */ static sbitmap *transp; /* Nonzero for expressions that are transparent at the end of the block. This is only zero for expressions killed by abnormal critical edge created by a calls. */ static sbitmap *transpout; /* Nonzero for expressions that are computed (available) in the block. */ static sbitmap *comp; /* Nonzero for expressions that are locally anticipatable in the block. */ static sbitmap *antloc; /* Nonzero for expressions where this block is an optimal computation point. */ static sbitmap *pre_optimal; /* Nonzero for expressions which are redundant in a particular block. */ static sbitmap *pre_redundant; /* Nonzero for expressions which should be inserted on a specific edge. */ static sbitmap *pre_insert_map; /* Nonzero for expressions which should be deleted in a specific block. */ static sbitmap *pre_delete_map; /* Contains the edge_list returned by pre_edge_lcm. */ static struct edge_list *edge_list; /* Redundant insns. */ static sbitmap pre_redundant_insns; /* Allocate vars used for PRE analysis. */ static void alloc_pre_mem (int n_blocks, int n_exprs) { transp = sbitmap_vector_alloc (n_blocks, n_exprs); comp = sbitmap_vector_alloc (n_blocks, n_exprs); antloc = sbitmap_vector_alloc (n_blocks, n_exprs); pre_optimal = NULL; pre_redundant = NULL; pre_insert_map = NULL; pre_delete_map = NULL; ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs); /* pre_insert and pre_delete are allocated later. */ } /* Free vars used for PRE analysis. */ static void free_pre_mem (void) { sbitmap_vector_free (transp); sbitmap_vector_free (comp); /* ANTLOC and AE_KILL are freed just after pre_lcm finishes. */ if (pre_optimal) sbitmap_vector_free (pre_optimal); if (pre_redundant) sbitmap_vector_free (pre_redundant); if (pre_insert_map) sbitmap_vector_free (pre_insert_map); if (pre_delete_map) sbitmap_vector_free (pre_delete_map); transp = comp = NULL; pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL; } /* Top level routine to do the dataflow analysis needed by PRE. */ static void compute_pre_data (void) { sbitmap trapping_expr; basic_block bb; unsigned int ui; compute_local_properties (transp, comp, antloc, &expr_hash_table); sbitmap_vector_zero (ae_kill, last_basic_block); /* Collect expressions which might trap. */ trapping_expr = sbitmap_alloc (expr_hash_table.n_elems); sbitmap_zero (trapping_expr); for (ui = 0; ui < expr_hash_table.size; ui++) { struct expr *e; for (e = expr_hash_table.table[ui]; e != NULL; e = e->next_same_hash) if (may_trap_p (e->expr)) SET_BIT (trapping_expr, e->bitmap_index); } /* Compute ae_kill for each basic block using: ~(TRANSP | COMP) */ FOR_EACH_BB (bb) { edge e; /* If the current block is the destination of an abnormal edge, we kill all trapping expressions because we won't be able to properly place the instruction on the edge. So make them neither anticipatable nor transparent. This is fairly conservative. */ for (e = bb->pred; e ; e = e->pred_next) if (e->flags & EDGE_ABNORMAL) { sbitmap_difference (antloc[bb->index], antloc[bb->index], trapping_expr); sbitmap_difference (transp[bb->index], transp[bb->index], trapping_expr); break; } sbitmap_a_or_b (ae_kill[bb->index], transp[bb->index], comp[bb->index]); sbitmap_not (ae_kill[bb->index], ae_kill[bb->index]); } edge_list = pre_edge_lcm (gcse_file, expr_hash_table.n_elems, transp, comp, antloc, ae_kill, &pre_insert_map, &pre_delete_map); sbitmap_vector_free (antloc); antloc = NULL; sbitmap_vector_free (ae_kill); ae_kill = NULL; sbitmap_free (trapping_expr); } /* PRE utilities */ /* Return nonzero if an occurrence of expression EXPR in OCCR_BB would reach block BB. VISITED is a pointer to a working buffer for tracking which BB's have been visited. It is NULL for the top-level call. We treat reaching expressions that go through blocks containing the same reaching expression as "not reaching". E.g. if EXPR is generated in blocks 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block 2 as not reaching. The intent is to improve the probability of finding only one reaching expression and to reduce register lifetimes by picking the closest such expression. */ static int pre_expr_reaches_here_p_work (basic_block occr_bb, struct expr *expr, basic_block bb, char *visited) { edge pred; for (pred = bb->pred; pred != NULL; pred = pred->pred_next) { basic_block pred_bb = pred->src; if (pred->src == ENTRY_BLOCK_PTR /* Has predecessor has already been visited? */ || visited[pred_bb->index]) ;/* Nothing to do. */ /* Does this predecessor generate this expression? */ else if (TEST_BIT (comp[pred_bb->index], expr->bitmap_index)) { /* Is this the occurrence we're looking for? Note that there's only one generating occurrence per block so we just need to check the block number. */ if (occr_bb == pred_bb) return 1; visited[pred_bb->index] = 1; } /* Ignore this predecessor if it kills the expression. */ else if (! TEST_BIT (transp[pred_bb->index], expr->bitmap_index)) visited[pred_bb->index] = 1; /* Neither gen nor kill. */ else { visited[pred_bb->index] = 1; if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited)) return 1; } } /* All paths have been checked. */ return 0; } /* The wrapper for pre_expr_reaches_here_work that ensures that any memory allocated for that function is returned. */ static int pre_expr_reaches_here_p (basic_block occr_bb, struct expr *expr, basic_block bb) { int rval; char *visited = xcalloc (last_basic_block, 1); rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited); free (visited); return rval; } /* Given an expr, generate RTL which we can insert at the end of a BB, or on an edge. Set the block number of any insns generated to the value of BB. */ static rtx process_insert_insn (struct expr *expr) { rtx reg = expr->reaching_reg; rtx exp = copy_rtx (expr->expr); rtx pat; start_sequence (); /* If the expression is something that's an operand, like a constant, just copy it to a register. */ if (general_operand (exp, GET_MODE (reg))) emit_move_insn (reg, exp); /* Otherwise, make a new insn to compute this expression and make sure the insn will be recognized (this also adds any needed CLOBBERs). Copy the expression to make sure we don't have any sharing issues. */ else if (insn_invalid_p (emit_insn (gen_rtx_SET (VOIDmode, reg, exp)))) abort (); pat = get_insns (); end_sequence (); return pat; } /* Add EXPR to the end of basic block BB. This is used by both the PRE and code hoisting. For PRE, we want to verify that the expr is either transparent or locally anticipatable in the target block. This check makes no sense for code hoisting. */ static void insert_insn_end_bb (struct expr *expr, basic_block bb, int pre) { rtx insn = BB_END (bb); rtx new_insn; rtx reg = expr->reaching_reg; int regno = REGNO (reg); rtx pat, pat_end; pat = process_insert_insn (expr); if (pat == NULL_RTX || ! INSN_P (pat)) abort (); pat_end = pat; while (NEXT_INSN (pat_end) != NULL_RTX) pat_end = NEXT_INSN (pat_end); /* If the last insn is a jump, insert EXPR in front [taking care to handle cc0, etc. properly]. Similarly we need to care trapping instructions in presence of non-call exceptions. */ if (JUMP_P (insn) || (GET_CODE (insn) == INSN && (bb->succ->succ_next || (bb->succ->flags & EDGE_ABNORMAL)))) { #ifdef HAVE_cc0 rtx note; #endif /* It should always be the case that we can put these instructions anywhere in the basic block with performing PRE optimizations. Check this. */ if (GET_CODE (insn) == INSN && pre && !TEST_BIT (antloc[bb->index], expr->bitmap_index) && !TEST_BIT (transp[bb->index], expr->bitmap_index)) abort (); /* If this is a jump table, then we can't insert stuff here. Since we know the previous real insn must be the tablejump, we insert the new instruction just before the tablejump. */ if (GET_CODE (PATTERN (insn)) == ADDR_VEC || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC) insn = prev_real_insn (insn); #ifdef HAVE_cc0 /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts if cc0 isn't set. */ note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX); if (note) insn = XEXP (note, 0); else { rtx maybe_cc0_setter = prev_nonnote_insn (insn); if (maybe_cc0_setter && INSN_P (maybe_cc0_setter) && sets_cc0_p (PATTERN (maybe_cc0_setter))) insn = maybe_cc0_setter; } #endif /* FIXME: What if something in cc0/jump uses value set in new insn? */ new_insn = emit_insn_before (pat, insn); } /* Likewise if the last insn is a call, as will happen in the presence of exception handling. */ else if (CALL_P (insn) && (bb->succ->succ_next || (bb->succ->flags & EDGE_ABNORMAL))) { /* Keeping in mind SMALL_REGISTER_CLASSES and parameters in registers, we search backward and place the instructions before the first parameter is loaded. Do this for everyone for consistency and a presumption that we'll get better code elsewhere as well. It should always be the case that we can put these instructions anywhere in the basic block with performing PRE optimizations. Check this. */ if (pre && !TEST_BIT (antloc[bb->index], expr->bitmap_index) && !TEST_BIT (transp[bb->index], expr->bitmap_index)) abort (); /* Since different machines initialize their parameter registers in different orders, assume nothing. Collect the set of all parameter registers. */ insn = find_first_parameter_load (insn, BB_HEAD (bb)); /* If we found all the parameter loads, then we want to insert before the first parameter load. If we did not find all the parameter loads, then we might have stopped on the head of the block, which could be a CODE_LABEL. If we inserted before the CODE_LABEL, then we would be putting the insn in the wrong basic block. In that case, put the insn after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */ while (LABEL_P (insn) || NOTE_INSN_BASIC_BLOCK_P (insn)) insn = NEXT_INSN (insn); new_insn = emit_insn_before (pat, insn); } else new_insn = emit_insn_after (pat, insn); while (1) { if (INSN_P (pat)) { add_label_notes_gcse (PATTERN (pat), new_insn); note_stores (PATTERN (pat), record_set_info, pat); } if (pat == pat_end) break; pat = NEXT_INSN (pat); } gcse_create_count++; if (gcse_file) { fprintf (gcse_file, "PRE/HOIST: end of bb %d, insn %d, ", bb->index, INSN_UID (new_insn)); fprintf (gcse_file, "copying expression %d to reg %d\n", expr->bitmap_index, regno); } } /* Insert partially redundant expressions on edges in the CFG to make the expressions fully redundant. */ static int pre_edge_insert (struct edge_list *edge_list, struct expr **index_map) { int e, i, j, num_edges, set_size, did_insert = 0; sbitmap *inserted; /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge if it reaches any of the deleted expressions. */ set_size = pre_insert_map[0]->size; num_edges = NUM_EDGES (edge_list); inserted = sbitmap_vector_alloc (num_edges, expr_hash_table.n_elems); sbitmap_vector_zero (inserted, num_edges); for (e = 0; e < num_edges; e++) { int indx; basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e); for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS) { SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i]; for (j = indx; insert && j < (int) expr_hash_table.n_elems; j++, insert >>= 1) if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX) { struct expr *expr = index_map[j]; struct occr *occr; /* Now look at each deleted occurrence of this expression. */ for (occr = expr->antic_occr; occr != NULL; occr = occr->next) { if (! occr->deleted_p) continue; /* Insert this expression on this edge if if it would reach the deleted occurrence in BB. */ if (!TEST_BIT (inserted[e], j)) { rtx insn; edge eg = INDEX_EDGE (edge_list, e); /* We can't insert anything on an abnormal and critical edge, so we insert the insn at the end of the previous block. There are several alternatives detailed in Morgans book P277 (sec 10.5) for handling this situation. This one is easiest for now. */ if ((eg->flags & EDGE_ABNORMAL) == EDGE_ABNORMAL) insert_insn_end_bb (index_map[j], bb, 0); else { insn = process_insert_insn (index_map[j]); insert_insn_on_edge (insn, eg); } if (gcse_file) { fprintf (gcse_file, "PRE/HOIST: edge (%d,%d), ", bb->index, INDEX_EDGE_SUCC_BB (edge_list, e)->index); fprintf (gcse_file, "copy expression %d\n", expr->bitmap_index); } update_ld_motion_stores (expr); SET_BIT (inserted[e], j); did_insert = 1; gcse_create_count++; } } } } } sbitmap_vector_free (inserted); return did_insert; } /* Copy the result of EXPR->EXPR generated by INSN to EXPR->REACHING_REG. Given "old_reg <- expr" (INSN), instead of adding after it reaching_reg <- old_reg it's better to do the following: reaching_reg <- expr old_reg <- reaching_reg because this way copy propagation can discover additional PRE opportunities. But if this fails, we try the old way. When "expr" is a store, i.e. given "MEM <- old_reg", instead of adding after it reaching_reg <- old_reg it's better to add it before as follows: reaching_reg <- old_reg MEM <- reaching_reg. */ static void pre_insert_copy_insn (struct expr *expr, rtx insn) { rtx reg = expr->reaching_reg; int regno = REGNO (reg); int indx = expr->bitmap_index; rtx pat = PATTERN (insn); rtx set, new_insn; rtx old_reg; int i; /* This block matches the logic in hash_scan_insn. */ if (GET_CODE (pat) == SET) set = pat; else if (GET_CODE (pat) == PARALLEL) { /* Search through the parallel looking for the set whose source was the expression that we're interested in. */ set = NULL_RTX; for (i = 0; i < XVECLEN (pat, 0); i++) { rtx x = XVECEXP (pat, 0, i); if (GET_CODE (x) == SET && expr_equiv_p (SET_SRC (x), expr->expr)) { set = x; break; } } } else abort (); if (REG_P (SET_DEST (set))) { old_reg = SET_DEST (set); /* Check if we can modify the set destination in the original insn. */ if (validate_change (insn, &SET_DEST (set), reg, 0)) { new_insn = gen_move_insn (old_reg, reg); new_insn = emit_insn_after (new_insn, insn); /* Keep register set table up to date. */ replace_one_set (REGNO (old_reg), insn, new_insn); record_one_set (regno, insn); } else { new_insn = gen_move_insn (reg, old_reg); new_insn = emit_insn_after (new_insn, insn); /* Keep register set table up to date. */ record_one_set (regno, new_insn); } } else /* This is possible only in case of a store to memory. */ { old_reg = SET_SRC (set); new_insn = gen_move_insn (reg, old_reg); /* Check if we can modify the set source in the original insn. */ if (validate_change (insn, &SET_SRC (set), reg, 0)) new_insn = emit_insn_before (new_insn, insn); else new_insn = emit_insn_after (new_insn, insn); /* Keep register set table up to date. */ record_one_set (regno, new_insn); } gcse_create_count++; if (gcse_file) fprintf (gcse_file, "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n", BLOCK_NUM (insn), INSN_UID (new_insn), indx, INSN_UID (insn), regno); } /* Copy available expressions that reach the redundant expression to `reaching_reg'. */ static void pre_insert_copies (void) { unsigned int i, added_copy; struct expr *expr; struct occr *occr; struct occr *avail; /* For each available expression in the table, copy the result to `reaching_reg' if the expression reaches a deleted one. ??? The current algorithm is rather brute force. Need to do some profiling. */ for (i = 0; i < expr_hash_table.size; i++) for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash) { /* If the basic block isn't reachable, PPOUT will be TRUE. However, we don't want to insert a copy here because the expression may not really be redundant. So only insert an insn if the expression was deleted. This test also avoids further processing if the expression wasn't deleted anywhere. */ if (expr->reaching_reg == NULL) continue; /* Set when we add a copy for that expression. */ added_copy = 0; for (occr = expr->antic_occr; occr != NULL; occr = occr->next) { if (! occr->deleted_p) continue; for (avail = expr->avail_occr; avail != NULL; avail = avail->next) { rtx insn = avail->insn; /* No need to handle this one if handled already. */ if (avail->copied_p) continue; /* Don't handle this one if it's a redundant one. */ if (TEST_BIT (pre_redundant_insns, INSN_CUID (insn))) continue; /* Or if the expression doesn't reach the deleted one. */ if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn), expr, BLOCK_FOR_INSN (occr->insn))) continue; added_copy = 1; /* Copy the result of avail to reaching_reg. */ pre_insert_copy_insn (expr, insn); avail->copied_p = 1; } } if (added_copy) update_ld_motion_stores (expr); } } /* Emit move from SRC to DEST noting the equivalence with expression computed in INSN. */ static rtx gcse_emit_move_after (rtx src, rtx dest, rtx insn) { rtx new; rtx set = single_set (insn), set2; rtx note; rtx eqv; /* This should never fail since we're creating a reg->reg copy we've verified to be valid. */ new = emit_insn_after (gen_move_insn (dest, src), insn); /* Note the equivalence for local CSE pass. */ set2 = single_set (new); if (!set2 || !rtx_equal_p (SET_DEST (set2), dest)) return new; if ((note = find_reg_equal_equiv_note (insn))) eqv = XEXP (note, 0); else eqv = SET_SRC (set); set_unique_reg_note (new, REG_EQUAL, copy_insn_1 (eqv)); return new; } /* Delete redundant computations. Deletion is done by changing the insn to copy the `reaching_reg' of the expression into the result of the SET. It is left to later passes (cprop, cse2, flow, combine, regmove) to propagate the copy or eliminate it. Returns nonzero if a change is made. */ static int pre_delete (void) { unsigned int i; int changed; struct expr *expr; struct occr *occr; changed = 0; for (i = 0; i < expr_hash_table.size; i++) for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash) { int indx = expr->bitmap_index; /* We only need to search antic_occr since we require ANTLOC != 0. */ for (occr = expr->antic_occr; occr != NULL; occr = occr->next) { rtx insn = occr->insn; rtx set; basic_block bb = BLOCK_FOR_INSN (insn); /* We only delete insns that have a single_set. */ if (TEST_BIT (pre_delete_map[bb->index], indx) && (set = single_set (insn)) != 0) { /* Create a pseudo-reg to store the result of reaching expressions into. Get the mode for the new pseudo from the mode of the original destination pseudo. */ if (expr->reaching_reg == NULL) expr->reaching_reg = gen_reg_rtx (GET_MODE (SET_DEST (set))); gcse_emit_move_after (expr->reaching_reg, SET_DEST (set), insn); delete_insn (insn); occr->deleted_p = 1; SET_BIT (pre_redundant_insns, INSN_CUID (insn)); changed = 1; gcse_subst_count++; if (gcse_file) { fprintf (gcse_file, "PRE: redundant insn %d (expression %d) in ", INSN_UID (insn), indx); fprintf (gcse_file, "bb %d, reaching reg is %d\n", bb->index, REGNO (expr->reaching_reg)); } } } } return changed; } /* Perform GCSE optimizations using PRE. This is called by one_pre_gcse_pass after all the dataflow analysis has been done. This is based on the original Morel-Renvoise paper Fred Chow's thesis, and lazy code motion from Knoop, Ruthing and Steffen as described in Advanced Compiler Design and Implementation. ??? A new pseudo reg is created to hold the reaching expression. The nice thing about the classical approach is that it would try to use an existing reg. If the register can't be adequately optimized [i.e. we introduce reload problems], one could add a pass here to propagate the new register through the block. ??? We don't handle single sets in PARALLELs because we're [currently] not able to copy the rest of the parallel when we insert copies to create full redundancies from partial redundancies. However, there's no reason why we can't handle PARALLELs in the cases where there are no partial redundancies. */ static int pre_gcse (void) { unsigned int i; int did_insert, changed; struct expr **index_map; struct expr *expr; /* Compute a mapping from expression number (`bitmap_index') to hash table entry. */ index_map = xcalloc (expr_hash_table.n_elems, sizeof (struct expr *)); for (i = 0; i < expr_hash_table.size; i++) for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash) index_map[expr->bitmap_index] = expr; /* Reset bitmap used to track which insns are redundant. */ pre_redundant_insns = sbitmap_alloc (max_cuid); sbitmap_zero (pre_redundant_insns); /* Delete the redundant insns first so that - we know what register to use for the new insns and for the other ones with reaching expressions - we know which insns are redundant when we go to create copies */ changed = pre_delete (); did_insert = pre_edge_insert (edge_list, index_map); /* In other places with reaching expressions, copy the expression to the specially allocated pseudo-reg that reaches the redundant expr. */ pre_insert_copies (); if (did_insert) { commit_edge_insertions (); changed = 1; } free (index_map); sbitmap_free (pre_redundant_insns); return changed; } /* Top level routine to perform one PRE GCSE pass. Return nonzero if a change was made. */ static int one_pre_gcse_pass (int pass) { int changed = 0; gcse_subst_count = 0; gcse_create_count = 0; alloc_hash_table (max_cuid, &expr_hash_table, 0); add_noreturn_fake_exit_edges (); if (flag_gcse_lm) compute_ld_motion_mems (); compute_hash_table (&expr_hash_table); trim_ld_motion_mems (); if (gcse_file) dump_hash_table (gcse_file, "Expression", &expr_hash_table); if (expr_hash_table.n_elems > 0) { alloc_pre_mem (last_basic_block, expr_hash_table.n_elems); compute_pre_data (); changed |= pre_gcse (); free_edge_list (edge_list); free_pre_mem (); } free_ldst_mems (); remove_fake_edges (); free_hash_table (&expr_hash_table); if (gcse_file) { fprintf (gcse_file, "\nPRE GCSE of %s, pass %d: %d bytes needed, ", current_function_name (), pass, bytes_used); fprintf (gcse_file, "%d substs, %d insns created\n", gcse_subst_count, gcse_create_count); } return changed; } /* If X contains any LABEL_REF's, add REG_LABEL notes for them to INSN. If notes are added to an insn which references a CODE_LABEL, the LABEL_NUSES count is incremented. We have to add REG_LABEL notes, because the following loop optimization pass requires them. */ /* ??? This is very similar to the loop.c add_label_notes function. We could probably share code here. */ /* ??? If there was a jump optimization pass after gcse and before loop, then we would not need to do this here, because jump would add the necessary REG_LABEL notes. */ static void add_label_notes_gcse (rtx x, rtx insn) { enum rtx_code code = GET_CODE (x); int i, j; const char *fmt; if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x)) { /* This code used to ignore labels that referred to dispatch tables to avoid flow generating (slightly) worse code. We no longer ignore such label references (see LABEL_REF handling in mark_jump_label for additional information). */ REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0), REG_NOTES (insn)); if (LABEL_P (XEXP (x, 0))) LABEL_NUSES (XEXP (x, 0))++; return; } for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--) { if (fmt[i] == 'e') add_label_notes_gcse (XEXP (x, i), insn); else if (fmt[i] == 'E') for (j = XVECLEN (x, i) - 1; j >= 0; j--) add_label_notes_gcse (XVECEXP (x, i, j), insn); } } /* Compute transparent outgoing information for each block. An expression is transparent to an edge unless it is killed by the edge itself. This can only happen with abnormal control flow, when the edge is traversed through a call. This happens with non-local labels and exceptions. This would not be necessary if we split the edge. While this is normally impossible for abnormal critical edges, with some effort it should be possible with exception handling, since we still have control over which handler should be invoked. But due to increased EH table sizes, this may not be worthwhile. */ static void compute_transpout (void) { basic_block bb; unsigned int i; struct expr *expr; sbitmap_vector_ones (transpout, last_basic_block); FOR_EACH_BB (bb) { /* Note that flow inserted a nop a the end of basic blocks that end in call instructions for reasons other than abnormal control flow. */ if (! CALL_P (BB_END (bb))) continue; for (i = 0; i < expr_hash_table.size; i++) for (expr = expr_hash_table.table[i]; expr ; expr = expr->next_same_hash) if (MEM_P (expr->expr)) { if (GET_CODE (XEXP (expr->expr, 0)) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (XEXP (expr->expr, 0))) continue; /* ??? Optimally, we would use interprocedural alias analysis to determine if this mem is actually killed by this call. */ RESET_BIT (transpout[bb->index], expr->bitmap_index); } } } /* Code Hoisting variables and subroutines. */ /* Very busy expressions. */ static sbitmap *hoist_vbein; static sbitmap *hoist_vbeout; /* Hoistable expressions. */ static sbitmap *hoist_exprs; /* ??? We could compute post dominators and run this algorithm in reverse to perform tail merging, doing so would probably be more effective than the tail merging code in jump.c. It's unclear if tail merging could be run in parallel with code hoisting. It would be nice. */ /* Allocate vars used for code hoisting analysis. */ static void alloc_code_hoist_mem (int n_blocks, int n_exprs) { antloc = sbitmap_vector_alloc (n_blocks, n_exprs); transp = sbitmap_vector_alloc (n_blocks, n_exprs); comp = sbitmap_vector_alloc (n_blocks, n_exprs); hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs); hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs); hoist_exprs = sbitmap_vector_alloc (n_blocks, n_exprs); transpout = sbitmap_vector_alloc (n_blocks, n_exprs); } /* Free vars used for code hoisting analysis. */ static void free_code_hoist_mem (void) { sbitmap_vector_free (antloc); sbitmap_vector_free (transp); sbitmap_vector_free (comp); sbitmap_vector_free (hoist_vbein); sbitmap_vector_free (hoist_vbeout); sbitmap_vector_free (hoist_exprs); sbitmap_vector_free (transpout); free_dominance_info (CDI_DOMINATORS); } /* Compute the very busy expressions at entry/exit from each block. An expression is very busy if all paths from a given point compute the expression. */ static void compute_code_hoist_vbeinout (void) { int changed, passes; basic_block bb; sbitmap_vector_zero (hoist_vbeout, last_basic_block); sbitmap_vector_zero (hoist_vbein, last_basic_block); passes = 0; changed = 1; while (changed) { changed = 0; /* We scan the blocks in the reverse order to speed up the convergence. */ FOR_EACH_BB_REVERSE (bb) { changed |= sbitmap_a_or_b_and_c_cg (hoist_vbein[bb->index], antloc[bb->index], hoist_vbeout[bb->index], transp[bb->index]); if (bb->next_bb != EXIT_BLOCK_PTR) sbitmap_intersection_of_succs (hoist_vbeout[bb->index], hoist_vbein, bb->index); } passes++; } if (gcse_file) fprintf (gcse_file, "hoisting vbeinout computation: %d passes\n", passes); } /* Top level routine to do the dataflow analysis needed by code hoisting. */ static void compute_code_hoist_data (void) { compute_local_properties (transp, comp, antloc, &expr_hash_table); compute_transpout (); compute_code_hoist_vbeinout (); calculate_dominance_info (CDI_DOMINATORS); if (gcse_file) fprintf (gcse_file, "\n"); } /* Determine if the expression identified by EXPR_INDEX would reach BB unimpared if it was placed at the end of EXPR_BB. It's unclear exactly what Muchnick meant by "unimpared". It seems to me that the expression must either be computed or transparent in *every* block in the path(s) from EXPR_BB to BB. Any other definition would allow the expression to be hoisted out of loops, even if the expression wasn't a loop invariant. Contrast this to reachability for PRE where an expression is considered reachable if *any* path reaches instead of *all* paths. */ static int hoist_expr_reaches_here_p (basic_block expr_bb, int expr_index, basic_block bb, char *visited) { edge pred; int visited_allocated_locally = 0; if (visited == NULL) { visited_allocated_locally = 1; visited = xcalloc (last_basic_block, 1); } for (pred = bb->pred; pred != NULL; pred = pred->pred_next) { basic_block pred_bb = pred->src; if (pred->src == ENTRY_BLOCK_PTR) break; else if (pred_bb == expr_bb) continue; else if (visited[pred_bb->index]) continue; /* Does this predecessor generate this expression? */ else if (TEST_BIT (comp[pred_bb->index], expr_index)) break; else if (! TEST_BIT (transp[pred_bb->index], expr_index)) break; /* Not killed. */ else { visited[pred_bb->index] = 1; if (! hoist_expr_reaches_here_p (expr_bb, expr_index, pred_bb, visited)) break; } } if (visited_allocated_locally) free (visited); return (pred == NULL); } /* Actually perform code hoisting. */ static void hoist_code (void) { basic_block bb, dominated; basic_block *domby; unsigned int domby_len; unsigned int i,j; struct expr **index_map; struct expr *expr; sbitmap_vector_zero (hoist_exprs, last_basic_block); /* Compute a mapping from expression number (`bitmap_index') to hash table entry. */ index_map = xcalloc (expr_hash_table.n_elems, sizeof (struct expr *)); for (i = 0; i < expr_hash_table.size; i++) for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash) index_map[expr->bitmap_index] = expr; /* Walk over each basic block looking for potentially hoistable expressions, nothing gets hoisted from the entry block. */ FOR_EACH_BB (bb) { int found = 0; int insn_inserted_p; domby_len = get_dominated_by (CDI_DOMINATORS, bb, &domby); /* Examine each expression that is very busy at the exit of this block. These are the potentially hoistable expressions. */ for (i = 0; i < hoist_vbeout[bb->index]->n_bits; i++) { int hoistable = 0; if (TEST_BIT (hoist_vbeout[bb->index], i) && TEST_BIT (transpout[bb->index], i)) { /* We've found a potentially hoistable expression, now we look at every block BB dominates to see if it computes the expression. */ for (j = 0; j < domby_len; j++) { dominated = domby[j]; /* Ignore self dominance. */ if (bb == dominated) continue; /* We've found a dominated block, now see if it computes the busy expression and whether or not moving that expression to the "beginning" of that block is safe. */ if (!TEST_BIT (antloc[dominated->index], i)) continue; /* Note if the expression would reach the dominated block unimpared if it was placed at the end of BB. Keep track of how many times this expression is hoistable from a dominated block into BB. */ if (hoist_expr_reaches_here_p (bb, i, dominated, NULL)) hoistable++; } /* If we found more than one hoistable occurrence of this expression, then note it in the bitmap of expressions to hoist. It makes no sense to hoist things which are computed in only one BB, and doing so tends to pessimize register allocation. One could increase this value to try harder to avoid any possible code expansion due to register allocation issues; however experiments have shown that the vast majority of hoistable expressions are only movable from two successors, so raising this threshold is likely to nullify any benefit we get from code hoisting. */ if (hoistable > 1) { SET_BIT (hoist_exprs[bb->index], i); found = 1; } } } /* If we found nothing to hoist, then quit now. */ if (! found) { free (domby); continue; } /* Loop over all the hoistable expressions. */ for (i = 0; i < hoist_exprs[bb->index]->n_bits; i++) { /* We want to insert the expression into BB only once, so note when we've inserted it. */ insn_inserted_p = 0; /* These tests should be the same as the tests above. */ if (TEST_BIT (hoist_vbeout[bb->index], i)) { /* We've found a potentially hoistable expression, now we look at every block BB dominates to see if it computes the expression. */ for (j = 0; j < domby_len; j++) { dominated = domby[j]; /* Ignore self dominance. */ if (bb == dominated) continue; /* We've found a dominated block, now see if it computes the busy expression and whether or not moving that expression to the "beginning" of that block is safe. */ if (!TEST_BIT (antloc[dominated->index], i)) continue; /* The expression is computed in the dominated block and it would be safe to compute it at the start of the dominated block. Now we have to determine if the expression would reach the dominated block if it was placed at the end of BB. */ if (hoist_expr_reaches_here_p (bb, i, dominated, NULL)) { struct expr *expr = index_map[i]; struct occr *occr = expr->antic_occr; rtx insn; rtx set; /* Find the right occurrence of this expression. */ while (BLOCK_FOR_INSN (occr->insn) != dominated && occr) occr = occr->next; /* Should never happen. */ if (!occr) abort (); insn = occr->insn; set = single_set (insn); if (! set) abort (); /* Create a pseudo-reg to store the result of reaching expressions into. Get the mode for the new pseudo from the mode of the original destination pseudo. */ if (expr->reaching_reg == NULL) expr->reaching_reg = gen_reg_rtx (GET_MODE (SET_DEST (set))); gcse_emit_move_after (expr->reaching_reg, SET_DEST (set), insn); delete_insn (insn); occr->deleted_p = 1; if (!insn_inserted_p) { insert_insn_end_bb (index_map[i], bb, 0); insn_inserted_p = 1; } } } } } free (domby); } free (index_map); } /* Top level routine to perform one code hoisting (aka unification) pass Return nonzero if a change was made. */ static int one_code_hoisting_pass (void) { int changed = 0; alloc_hash_table (max_cuid, &expr_hash_table, 0); compute_hash_table (&expr_hash_table); if (gcse_file) dump_hash_table (gcse_file, "Code Hosting Expressions", &expr_hash_table); if (expr_hash_table.n_elems > 0) { alloc_code_hoist_mem (last_basic_block, expr_hash_table.n_elems); compute_code_hoist_data (); hoist_code (); free_code_hoist_mem (); } free_hash_table (&expr_hash_table); return changed; } /* Here we provide the things required to do store motion towards the exit. In order for this to be effective, gcse also needed to be taught how to move a load when it is kill only by a store to itself. int i; float a[10]; void foo(float scale) { for (i=0; i<10; i++) a[i] *= scale; } 'i' is both loaded and stored to in the loop. Normally, gcse cannot move the load out since its live around the loop, and stored at the bottom of the loop. The 'Load Motion' referred to and implemented in this file is an enhancement to gcse which when using edge based lcm, recognizes this situation and allows gcse to move the load out of the loop. Once gcse has hoisted the load, store motion can then push this load towards the exit, and we end up with no loads or stores of 'i' in the loop. */ /* This will search the ldst list for a matching expression. If it doesn't find one, we create one and initialize it. */ static struct ls_expr * ldst_entry (rtx x) { int do_not_record_p = 0; struct ls_expr * ptr; unsigned int hash; hash = hash_expr_1 (x, GET_MODE (x), & do_not_record_p); for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next) if (ptr->hash_index == hash && expr_equiv_p (ptr->pattern, x)) return ptr; ptr = xmalloc (sizeof (struct ls_expr)); ptr->next = pre_ldst_mems; ptr->expr = NULL; ptr->pattern = x; ptr->pattern_regs = NULL_RTX; ptr->loads = NULL_RTX; ptr->stores = NULL_RTX; ptr->reaching_reg = NULL_RTX; ptr->invalid = 0; ptr->index = 0; ptr->hash_index = hash; pre_ldst_mems = ptr; return ptr; } /* Free up an individual ldst entry. */ static void free_ldst_entry (struct ls_expr * ptr) { free_INSN_LIST_list (& ptr->loads); free_INSN_LIST_list (& ptr->stores); free (ptr); } /* Free up all memory associated with the ldst list. */ static void free_ldst_mems (void) { while (pre_ldst_mems) { struct ls_expr * tmp = pre_ldst_mems; pre_ldst_mems = pre_ldst_mems->next; free_ldst_entry (tmp); } pre_ldst_mems = NULL; } /* Dump debugging info about the ldst list. */ static void print_ldst_list (FILE * file) { struct ls_expr * ptr; fprintf (file, "LDST list: \n"); for (ptr = first_ls_expr(); ptr != NULL; ptr = next_ls_expr (ptr)) { fprintf (file, " Pattern (%3d): ", ptr->index); print_rtl (file, ptr->pattern); fprintf (file, "\n Loads : "); if (ptr->loads) print_rtl (file, ptr->loads); else fprintf (file, "(nil)"); fprintf (file, "\n Stores : "); if (ptr->stores) print_rtl (file, ptr->stores); else fprintf (file, "(nil)"); fprintf (file, "\n\n"); } fprintf (file, "\n"); } /* Returns 1 if X is in the list of ldst only expressions. */ static struct ls_expr * find_rtx_in_ldst (rtx x) { struct ls_expr * ptr; for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next) if (expr_equiv_p (ptr->pattern, x) && ! ptr->invalid) return ptr; return NULL; } /* Assign each element of the list of mems a monotonically increasing value. */ static int enumerate_ldsts (void) { struct ls_expr * ptr; int n = 0; for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next) ptr->index = n++; return n; } /* Return first item in the list. */ static inline struct ls_expr * first_ls_expr (void) { return pre_ldst_mems; } /* Return the next item in the list after the specified one. */ static inline struct ls_expr * next_ls_expr (struct ls_expr * ptr) { return ptr->next; } /* Load Motion for loads which only kill themselves. */ /* Return true if x is a simple MEM operation, with no registers or side effects. These are the types of loads we consider for the ld_motion list, otherwise we let the usual aliasing take care of it. */ static int simple_mem (rtx x) { if (! MEM_P (x)) return 0; if (MEM_VOLATILE_P (x)) return 0; if (GET_MODE (x) == BLKmode) return 0; /* If we are handling exceptions, we must be careful with memory references that may trap. If we are not, the behavior is undefined, so we may just continue. */ if (flag_non_call_exceptions && may_trap_p (x)) return 0; if (side_effects_p (x)) return 0; /* Do not consider function arguments passed on stack. */ if (reg_mentioned_p (stack_pointer_rtx, x)) return 0; if (flag_float_store && FLOAT_MODE_P (GET_MODE (x))) return 0; return 1; } /* Make sure there isn't a buried reference in this pattern anywhere. If there is, invalidate the entry for it since we're not capable of fixing it up just yet.. We have to be sure we know about ALL loads since the aliasing code will allow all entries in the ld_motion list to not-alias itself. If we miss a load, we will get the wrong value since gcse might common it and we won't know to fix it up. */ static void invalidate_any_buried_refs (rtx x) { const char * fmt; int i, j; struct ls_expr * ptr; /* Invalidate it in the list. */ if (MEM_P (x) && simple_mem (x)) { ptr = ldst_entry (x); ptr->invalid = 1; } /* Recursively process the insn. */ fmt = GET_RTX_FORMAT (GET_CODE (x)); for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--) { if (fmt[i] == 'e') invalidate_any_buried_refs (XEXP (x, i)); else if (fmt[i] == 'E') for (j = XVECLEN (x, i) - 1; j >= 0; j--) invalidate_any_buried_refs (XVECEXP (x, i, j)); } } /* Find all the 'simple' MEMs which are used in LOADs and STORES. Simple being defined as MEM loads and stores to symbols, with no side effects and no registers in the expression. For a MEM destination, we also check that the insn is still valid if we replace the destination with a REG, as is done in update_ld_motion_stores. If there are any uses/defs which don't match this criteria, they are invalidated and trimmed out later. */ static void compute_ld_motion_mems (void) { struct ls_expr * ptr; basic_block bb; rtx insn; pre_ldst_mems = NULL; FOR_EACH_BB (bb) { for (insn = BB_HEAD (bb); insn && insn != NEXT_INSN (BB_END (bb)); insn = NEXT_INSN (insn)) { if (INSN_P (insn)) { if (GET_CODE (PATTERN (insn)) == SET) { rtx src = SET_SRC (PATTERN (insn)); rtx dest = SET_DEST (PATTERN (insn)); /* Check for a simple LOAD... */ if (MEM_P (src) && simple_mem (src)) { ptr = ldst_entry (src); if (REG_P (dest)) ptr->loads = alloc_INSN_LIST (insn, ptr->loads); else ptr->invalid = 1; } else { /* Make sure there isn't a buried load somewhere. */ invalidate_any_buried_refs (src); } /* Check for stores. Don't worry about aliased ones, they will block any movement we might do later. We only care about this exact pattern since those are the only circumstance that we will ignore the aliasing info. */ if (MEM_P (dest) && simple_mem (dest)) { ptr = ldst_entry (dest); if (! MEM_P (src) && GET_CODE (src) != ASM_OPERANDS /* Check for REG manually since want_to_gcse_p returns 0 for all REGs. */ && can_assign_to_reg_p (src)) ptr->stores = alloc_INSN_LIST (insn, ptr->stores); else ptr->invalid = 1; } } else invalidate_any_buried_refs (PATTERN (insn)); } } } } /* Remove any references that have been either invalidated or are not in the expression list for pre gcse. */ static void trim_ld_motion_mems (void) { struct ls_expr * * last = & pre_ldst_mems; struct ls_expr * ptr = pre_ldst_mems; while (ptr != NULL) { struct expr * expr; /* Delete if entry has been made invalid. */ if (! ptr->invalid) { /* Delete if we cannot find this mem in the expression list. */ unsigned int hash = ptr->hash_index % expr_hash_table.size; for (expr = expr_hash_table.table[hash]; expr != NULL; expr = expr->next_same_hash) if (expr_equiv_p (expr->expr, ptr->pattern)) break; } else expr = (struct expr *) 0; if (expr) { /* Set the expression field if we are keeping it. */ ptr->expr = expr; last = & ptr->next; ptr = ptr->next; } else { *last = ptr->next; free_ldst_entry (ptr); ptr = * last; } } /* Show the world what we've found. */ if (gcse_file && pre_ldst_mems != NULL) print_ldst_list (gcse_file); } /* This routine will take an expression which we are replacing with a reaching register, and update any stores that are needed if that expression is in the ld_motion list. Stores are updated by copying their SRC to the reaching register, and then storing the reaching register into the store location. These keeps the correct value in the reaching register for the loads. */ static void update_ld_motion_stores (struct expr * expr) { struct ls_expr * mem_ptr; if ((mem_ptr = find_rtx_in_ldst (expr->expr))) { /* We can try to find just the REACHED stores, but is shouldn't matter to set the reaching reg everywhere... some might be dead and should be eliminated later. */ /* We replace (set mem expr) with (set reg expr) (set mem reg) where reg is the reaching reg used in the load. We checked in compute_ld_motion_mems that we can replace (set mem expr) with (set reg expr) in that insn. */ rtx list = mem_ptr->stores; for ( ; list != NULL_RTX; list = XEXP (list, 1)) { rtx insn = XEXP (list, 0); rtx pat = PATTERN (insn); rtx src = SET_SRC (pat); rtx reg = expr->reaching_reg; rtx copy, new; /* If we've already copied it, continue. */ if (expr->reaching_reg == src) continue; if (gcse_file) { fprintf (gcse_file, "PRE: store updated with reaching reg "); print_rtl (gcse_file, expr->reaching_reg); fprintf (gcse_file, ":\n "); print_inline_rtx (gcse_file, insn, 8); fprintf (gcse_file, "\n"); } copy = gen_move_insn ( reg, copy_rtx (SET_SRC (pat))); new = emit_insn_before (copy, insn); record_one_set (REGNO (reg), new); SET_SRC (pat) = reg; /* un-recognize this pattern since it's probably different now. */ INSN_CODE (insn) = -1; gcse_create_count++; } } } /* Store motion code. */ #define ANTIC_STORE_LIST(x) ((x)->loads) #define AVAIL_STORE_LIST(x) ((x)->stores) #define LAST_AVAIL_CHECK_FAILURE(x) ((x)->reaching_reg) /* This is used to communicate the target bitvector we want to use in the reg_set_info routine when called via the note_stores mechanism. */ static int * regvec; /* And current insn, for the same routine. */ static rtx compute_store_table_current_insn; /* Used in computing the reverse edge graph bit vectors. */ static sbitmap * st_antloc; /* Global holding the number of store expressions we are dealing with. */ static int num_stores; /* Checks to set if we need to mark a register set. Called from note_stores. */ static void reg_set_info (rtx dest, rtx setter ATTRIBUTE_UNUSED, void *data) { sbitmap bb_reg = data; if (GET_CODE (dest) == SUBREG) dest = SUBREG_REG (dest); if (REG_P (dest)) { regvec[REGNO (dest)] = INSN_UID (compute_store_table_current_insn); if (bb_reg) SET_BIT (bb_reg, REGNO (dest)); } } /* Clear any mark that says that this insn sets dest. Called from note_stores. */ static void reg_clear_last_set (rtx dest, rtx setter ATTRIBUTE_UNUSED, void *data) { int *dead_vec = data; if (GET_CODE (dest) == SUBREG) dest = SUBREG_REG (dest); if (REG_P (dest) && dead_vec[REGNO (dest)] == INSN_UID (compute_store_table_current_insn)) dead_vec[REGNO (dest)] = 0; } /* Return zero if some of the registers in list X are killed due to set of registers in bitmap REGS_SET. */ static bool store_ops_ok (rtx x, int *regs_set) { rtx reg; for (; x; x = XEXP (x, 1)) { reg = XEXP (x, 0); if (regs_set[REGNO(reg)]) return false; } return true; } /* Returns a list of registers mentioned in X. */ static rtx extract_mentioned_regs (rtx x) { return extract_mentioned_regs_helper (x, NULL_RTX); } /* Helper for extract_mentioned_regs; ACCUM is used to accumulate used registers. */ static rtx extract_mentioned_regs_helper (rtx x, rtx accum) { int i; enum rtx_code code; const char * fmt; /* Repeat is used to turn tail-recursion into iteration. */ repeat: if (x == 0) return accum; code = GET_CODE (x); switch (code) { case REG: return alloc_EXPR_LIST (0, x, accum); case MEM: x = XEXP (x, 0); goto repeat; case PRE_DEC: case PRE_INC: case POST_DEC: case POST_INC: /* We do not run this function with arguments having side effects. */ abort (); case PC: case CC0: /*FIXME*/ case CONST: case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case SYMBOL_REF: case LABEL_REF: case ADDR_VEC: case ADDR_DIFF_VEC: return accum; default: break; } i = GET_RTX_LENGTH (code) - 1; fmt = GET_RTX_FORMAT (code); for (; i >= 0; i--) { if (fmt[i] == 'e') { rtx tem = XEXP (x, i); /* If we are about to do the last recursive call needed at this level, change it into iteration. */ if (i == 0) { x = tem; goto repeat; } accum = extract_mentioned_regs_helper (tem, accum); } else if (fmt[i] == 'E') { int j; for (j = 0; j < XVECLEN (x, i); j++) accum = extract_mentioned_regs_helper (XVECEXP (x, i, j), accum); } } return accum; } /* Determine whether INSN is MEM store pattern that we will consider moving. REGS_SET_BEFORE is bitmap of registers set before (and including) the current insn, REGS_SET_AFTER is bitmap of registers set after (and including) the insn in this basic block. We must be passing through BB from head to end, as we are using this fact to speed things up. The results are stored this way: -- the first anticipatable expression is added into ANTIC_STORE_LIST -- if the processed expression is not anticipatable, NULL_RTX is added there instead, so that we can use it as indicator that no further expression of this type may be anticipatable -- if the expression is available, it is added as head of AVAIL_STORE_LIST; consequently, all of them but this head are dead and may be deleted. -- if the expression is not available, the insn due to that it fails to be available is stored in reaching_reg. The things are complicated a bit by fact that there already may be stores to the same MEM from other blocks; also caller must take care of the necessary cleanup of the temporary markers after end of the basic block. */ static void find_moveable_store (rtx insn, int *regs_set_before, int *regs_set_after) { struct ls_expr * ptr; rtx dest, set, tmp; int check_anticipatable, check_available; basic_block bb = BLOCK_FOR_INSN (insn); set = single_set (insn); if (!set) return; dest = SET_DEST (set); if (! MEM_P (dest) || MEM_VOLATILE_P (dest) || GET_MODE (dest) == BLKmode) return; if (side_effects_p (dest)) return; /* If we are handling exceptions, we must be careful with memory references that may trap. If we are not, the behavior is undefined, so we may just continue. */ if (flag_non_call_exceptions && may_trap_p (dest)) return; /* Even if the destination cannot trap, the source may. In this case we'd need to handle updating the REG_EH_REGION note. */ if (find_reg_note (insn, REG_EH_REGION, NULL_RTX)) return; ptr = ldst_entry (dest); if (!ptr->pattern_regs) ptr->pattern_regs = extract_mentioned_regs (dest); /* Do not check for anticipatability if we either found one anticipatable store already, or tested for one and found out that it was killed. */ check_anticipatable = 0; if (!ANTIC_STORE_LIST (ptr)) check_anticipatable = 1; else { tmp = XEXP (ANTIC_STORE_LIST (ptr), 0); if (tmp != NULL_RTX && BLOCK_FOR_INSN (tmp) != bb) check_anticipatable = 1; } if (check_anticipatable) { if (store_killed_before (dest, ptr->pattern_regs, insn, bb, regs_set_before)) tmp = NULL_RTX; else tmp = insn; ANTIC_STORE_LIST (ptr) = alloc_INSN_LIST (tmp, ANTIC_STORE_LIST (ptr)); } /* It is not necessary to check whether store is available if we did it successfully before; if we failed before, do not bother to check until we reach the insn that caused us to fail. */ check_available = 0; if (!AVAIL_STORE_LIST (ptr)) check_available = 1; else { tmp = XEXP (AVAIL_STORE_LIST (ptr), 0); if (BLOCK_FOR_INSN (tmp) != bb) check_available = 1; } if (check_available) { /* Check that we have already reached the insn at that the check failed last time. */ if (LAST_AVAIL_CHECK_FAILURE (ptr)) { for (tmp = BB_END (bb); tmp != insn && tmp != LAST_AVAIL_CHECK_FAILURE (ptr); tmp = PREV_INSN (tmp)) continue; if (tmp == insn) check_available = 0; } else check_available = store_killed_after (dest, ptr->pattern_regs, insn, bb, regs_set_after, &LAST_AVAIL_CHECK_FAILURE (ptr)); } if (!check_available) AVAIL_STORE_LIST (ptr) = alloc_INSN_LIST (insn, AVAIL_STORE_LIST (ptr)); } /* Find available and anticipatable stores. */ static int compute_store_table (void) { int ret; basic_block bb; unsigned regno; rtx insn, pat, tmp; int *last_set_in, *already_set; struct ls_expr * ptr, **prev_next_ptr_ptr; max_gcse_regno = max_reg_num (); reg_set_in_block = sbitmap_vector_alloc (last_basic_block, max_gcse_regno); sbitmap_vector_zero (reg_set_in_block, last_basic_block); pre_ldst_mems = 0; last_set_in = xcalloc (max_gcse_regno, sizeof (int)); already_set = xmalloc (sizeof (int) * max_gcse_regno); /* Find all the stores we care about. */ FOR_EACH_BB (bb) { /* First compute the registers set in this block. */ regvec = last_set_in; for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = NEXT_INSN (insn)) { if (! INSN_P (insn)) continue; if (CALL_P (insn)) { bool clobbers_all = false; #ifdef NON_SAVING_SETJMP if (NON_SAVING_SETJMP && find_reg_note (insn, REG_SETJMP, NULL_RTX)) clobbers_all = true; #endif for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) if (clobbers_all || TEST_HARD_REG_BIT (regs_invalidated_by_call, regno)) { last_set_in[regno] = INSN_UID (insn); SET_BIT (reg_set_in_block[bb->index], regno); } } pat = PATTERN (insn); compute_store_table_current_insn = insn; note_stores (pat, reg_set_info, reg_set_in_block[bb->index]); } /* Now find the stores. */ memset (already_set, 0, sizeof (int) * max_gcse_regno); regvec = already_set; for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = NEXT_INSN (insn)) { if (! INSN_P (insn)) continue; if (CALL_P (insn)) { bool clobbers_all = false; #ifdef NON_SAVING_SETJMP if (NON_SAVING_SETJMP && find_reg_note (insn, REG_SETJMP, NULL_RTX)) clobbers_all = true; #endif for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) if (clobbers_all || TEST_HARD_REG_BIT (regs_invalidated_by_call, regno)) already_set[regno] = 1; } pat = PATTERN (insn); note_stores (pat, reg_set_info, NULL); /* Now that we've marked regs, look for stores. */ find_moveable_store (insn, already_set, last_set_in); /* Unmark regs that are no longer set. */ compute_store_table_current_insn = insn; note_stores (pat, reg_clear_last_set, last_set_in); if (CALL_P (insn)) { bool clobbers_all = false; #ifdef NON_SAVING_SETJMP if (NON_SAVING_SETJMP && find_reg_note (insn, REG_SETJMP, NULL_RTX)) clobbers_all = true; #endif for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) if ((clobbers_all || TEST_HARD_REG_BIT (regs_invalidated_by_call, regno)) && last_set_in[regno] == INSN_UID (insn)) last_set_in[regno] = 0; } } #ifdef ENABLE_CHECKING /* last_set_in should now be all-zero. */ for (regno = 0; regno < max_gcse_regno; regno++) if (last_set_in[regno] != 0) abort (); #endif /* Clear temporary marks. */ for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr)) { LAST_AVAIL_CHECK_FAILURE(ptr) = NULL_RTX; if (ANTIC_STORE_LIST (ptr) && (tmp = XEXP (ANTIC_STORE_LIST (ptr), 0)) == NULL_RTX) ANTIC_STORE_LIST (ptr) = XEXP (ANTIC_STORE_LIST (ptr), 1); } } /* Remove the stores that are not available anywhere, as there will be no opportunity to optimize them. */ for (ptr = pre_ldst_mems, prev_next_ptr_ptr = &pre_ldst_mems; ptr != NULL; ptr = *prev_next_ptr_ptr) { if (!AVAIL_STORE_LIST (ptr)) { *prev_next_ptr_ptr = ptr->next; free_ldst_entry (ptr); } else prev_next_ptr_ptr = &ptr->next; } ret = enumerate_ldsts (); if (gcse_file) { fprintf (gcse_file, "ST_avail and ST_antic (shown under loads..)\n"); print_ldst_list (gcse_file); } free (last_set_in); free (already_set); return ret; } /* Check to see if the load X is aliased with STORE_PATTERN. AFTER is true if we are checking the case when STORE_PATTERN occurs after the X. */ static bool load_kills_store (rtx x, rtx store_pattern, int after) { if (after) return anti_dependence (x, store_pattern); else return true_dependence (store_pattern, GET_MODE (store_pattern), x, rtx_addr_varies_p); } /* Go through the entire insn X, looking for any loads which might alias STORE_PATTERN. Return true if found. AFTER is true if we are checking the case when STORE_PATTERN occurs after the insn X. */ static bool find_loads (rtx x, rtx store_pattern, int after) { const char * fmt; int i, j; int ret = false; if (!x) return false; if (GET_CODE (x) == SET) x = SET_SRC (x); if (MEM_P (x)) { if (load_kills_store (x, store_pattern, after)) return true; } /* Recursively process the insn. */ fmt = GET_RTX_FORMAT (GET_CODE (x)); for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0 && !ret; i--) { if (fmt[i] == 'e') ret |= find_loads (XEXP (x, i), store_pattern, after); else if (fmt[i] == 'E') for (j = XVECLEN (x, i) - 1; j >= 0; j--) ret |= find_loads (XVECEXP (x, i, j), store_pattern, after); } return ret; } /* Check if INSN kills the store pattern X (is aliased with it). AFTER is true if we are checking the case when store X occurs after the insn. Return true if it it does. */ static bool store_killed_in_insn (rtx x, rtx x_regs, rtx insn, int after) { rtx reg, base, note; if (!INSN_P (insn)) return false; if (CALL_P (insn)) { /* A normal or pure call might read from pattern, but a const call will not. */ if (! CONST_OR_PURE_CALL_P (insn) || pure_call_p (insn)) return true; /* But even a const call reads its parameters. Check whether the base of some of registers used in mem is stack pointer. */ for (reg = x_regs; reg; reg = XEXP (reg, 1)) { base = find_base_term (XEXP (reg, 0)); if (!base || (GET_CODE (base) == ADDRESS && GET_MODE (base) == Pmode && XEXP (base, 0) == stack_pointer_rtx)) return true; } return false; } if (GET_CODE (PATTERN (insn)) == SET) { rtx pat = PATTERN (insn); rtx dest = SET_DEST (pat); if (GET_CODE (dest) == SIGN_EXTRACT || GET_CODE (dest) == ZERO_EXTRACT) dest = XEXP (dest, 0); /* Check for memory stores to aliased objects. */ if (MEM_P (dest) && !expr_equiv_p (dest, x)) { if (after) { if (output_dependence (dest, x)) return true; } else { if (output_dependence (x, dest)) return true; } } if (find_loads (SET_SRC (pat), x, after)) return true; } else if (find_loads (PATTERN (insn), x, after)) return true; /* If this insn has a REG_EQUAL or REG_EQUIV note referencing a memory location aliased with X, then this insn kills X. */ note = find_reg_equal_equiv_note (insn); if (! note) return false; note = XEXP (note, 0); /* However, if the note represents a must alias rather than a may alias relationship, then it does not kill X. */ if (expr_equiv_p (note, x)) return false; /* See if there are any aliased loads in the note. */ return find_loads (note, x, after); } /* Returns true if the expression X is loaded or clobbered on or after INSN within basic block BB. REGS_SET_AFTER is bitmap of registers set in or after the insn. X_REGS is list of registers mentioned in X. If the store is killed, return the last insn in that it occurs in FAIL_INSN. */ static bool store_killed_after (rtx x, rtx x_regs, rtx insn, basic_block bb, int *regs_set_after, rtx *fail_insn) { rtx last = BB_END (bb), act; if (!store_ops_ok (x_regs, regs_set_after)) { /* We do not know where it will happen. */ if (fail_insn) *fail_insn = NULL_RTX; return true; } /* Scan from the end, so that fail_insn is determined correctly. */ for (act = last; act != PREV_INSN (insn); act = PREV_INSN (act)) if (store_killed_in_insn (x, x_regs, act, false)) { if (fail_insn) *fail_insn = act; return true; } return false; } /* Returns true if the expression X is loaded or clobbered on or before INSN within basic block BB. X_REGS is list of registers mentioned in X. REGS_SET_BEFORE is bitmap of registers set before or in this insn. */ static bool store_killed_before (rtx x, rtx x_regs, rtx insn, basic_block bb, int *regs_set_before) { rtx first = BB_HEAD (bb); if (!store_ops_ok (x_regs, regs_set_before)) return true; for ( ; insn != PREV_INSN (first); insn = PREV_INSN (insn)) if (store_killed_in_insn (x, x_regs, insn, true)) return true; return false; } /* Fill in available, anticipatable, transparent and kill vectors in STORE_DATA, based on lists of available and anticipatable stores. */ static void build_store_vectors (void) { basic_block bb; int *regs_set_in_block; rtx insn, st; struct ls_expr * ptr; unsigned regno; /* Build the gen_vector. This is any store in the table which is not killed by aliasing later in its block. */ ae_gen = sbitmap_vector_alloc (last_basic_block, num_stores); sbitmap_vector_zero (ae_gen, last_basic_block); st_antloc = sbitmap_vector_alloc (last_basic_block, num_stores); sbitmap_vector_zero (st_antloc, last_basic_block); for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr)) { for (st = AVAIL_STORE_LIST (ptr); st != NULL; st = XEXP (st, 1)) { insn = XEXP (st, 0); bb = BLOCK_FOR_INSN (insn); /* If we've already seen an available expression in this block, we can delete this one (It occurs earlier in the block). We'll copy the SRC expression to an unused register in case there are any side effects. */ if (TEST_BIT (ae_gen[bb->index], ptr->index)) { rtx r = gen_reg_rtx (GET_MODE (ptr->pattern)); if (gcse_file) fprintf (gcse_file, "Removing redundant store:\n"); replace_store_insn (r, XEXP (st, 0), bb, ptr); continue; } SET_BIT (ae_gen[bb->index], ptr->index); } for (st = ANTIC_STORE_LIST (ptr); st != NULL; st = XEXP (st, 1)) { insn = XEXP (st, 0); bb = BLOCK_FOR_INSN (insn); SET_BIT (st_antloc[bb->index], ptr->index); } } ae_kill = sbitmap_vector_alloc (last_basic_block, num_stores); sbitmap_vector_zero (ae_kill, last_basic_block); transp = sbitmap_vector_alloc (last_basic_block, num_stores); sbitmap_vector_zero (transp, last_basic_block); regs_set_in_block = xmalloc (sizeof (int) * max_gcse_regno); FOR_EACH_BB (bb) { for (regno = 0; regno < max_gcse_regno; regno++) regs_set_in_block[regno] = TEST_BIT (reg_set_in_block[bb->index], regno); for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr)) { if (store_killed_after (ptr->pattern, ptr->pattern_regs, BB_HEAD (bb), bb, regs_set_in_block, NULL)) { /* It should not be necessary to consider the expression killed if it is both anticipatable and available. */ if (!TEST_BIT (st_antloc[bb->index], ptr->index) || !TEST_BIT (ae_gen[bb->index], ptr->index)) SET_BIT (ae_kill[bb->index], ptr->index); } else SET_BIT (transp[bb->index], ptr->index); } } free (regs_set_in_block); if (gcse_file) { dump_sbitmap_vector (gcse_file, "st_antloc", "", st_antloc, last_basic_block); dump_sbitmap_vector (gcse_file, "st_kill", "", ae_kill, last_basic_block); dump_sbitmap_vector (gcse_file, "Transpt", "", transp, last_basic_block); dump_sbitmap_vector (gcse_file, "st_avloc", "", ae_gen, last_basic_block); } } /* Insert an instruction at the beginning of a basic block, and update the BB_HEAD if needed. */ static void insert_insn_start_bb (rtx insn, basic_block bb) { /* Insert at start of successor block. */ rtx prev = PREV_INSN (BB_HEAD (bb)); rtx before = BB_HEAD (bb); while (before != 0) { if (! LABEL_P (before) && (! NOTE_P (before) || NOTE_LINE_NUMBER (before) != NOTE_INSN_BASIC_BLOCK)) break; prev = before; if (prev == BB_END (bb)) break; before = NEXT_INSN (before); } insn = emit_insn_after (insn, prev); if (gcse_file) { fprintf (gcse_file, "STORE_MOTION insert store at start of BB %d:\n", bb->index); print_inline_rtx (gcse_file, insn, 6); fprintf (gcse_file, "\n"); } } /* This routine will insert a store on an edge. EXPR is the ldst entry for the memory reference, and E is the edge to insert it on. Returns nonzero if an edge insertion was performed. */ static int insert_store (struct ls_expr * expr, edge e) { rtx reg, insn; basic_block bb; edge tmp; /* We did all the deleted before this insert, so if we didn't delete a store, then we haven't set the reaching reg yet either. */ if (expr->reaching_reg == NULL_RTX) return 0; if (e->flags & EDGE_FAKE) return 0; reg = expr->reaching_reg; insn = gen_move_insn (copy_rtx (expr->pattern), reg); /* If we are inserting this expression on ALL predecessor edges of a BB, insert it at the start of the BB, and reset the insert bits on the other edges so we don't try to insert it on the other edges. */ bb = e->dest; for (tmp = e->dest->pred; tmp ; tmp = tmp->pred_next) if (!(tmp->flags & EDGE_FAKE)) { int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest); if (index == EDGE_INDEX_NO_EDGE) abort (); if (! TEST_BIT (pre_insert_map[index], expr->index)) break; } /* If tmp is NULL, we found an insertion on every edge, blank the insertion vector for these edges, and insert at the start of the BB. */ if (!tmp && bb != EXIT_BLOCK_PTR) { for (tmp = e->dest->pred; tmp ; tmp = tmp->pred_next) { int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest); RESET_BIT (pre_insert_map[index], expr->index); } insert_insn_start_bb (insn, bb); return 0; } /* We can't insert on this edge, so we'll insert at the head of the successors block. See Morgan, sec 10.5. */ if ((e->flags & EDGE_ABNORMAL) == EDGE_ABNORMAL) { insert_insn_start_bb (insn, bb); return 0; } insert_insn_on_edge (insn, e); if (gcse_file) { fprintf (gcse_file, "STORE_MOTION insert insn on edge (%d, %d):\n", e->src->index, e->dest->index); print_inline_rtx (gcse_file, insn, 6); fprintf (gcse_file, "\n"); } return 1; } /* Remove any REG_EQUAL or REG_EQUIV notes containing a reference to the memory location in SMEXPR set in basic block BB. This could be rather expensive. */ static void remove_reachable_equiv_notes (basic_block bb, struct ls_expr *smexpr) { edge *stack = xmalloc (sizeof (edge) * n_basic_blocks), act; sbitmap visited = sbitmap_alloc (last_basic_block); int stack_top = 0; rtx last, insn, note; rtx mem = smexpr->pattern; sbitmap_zero (visited); act = bb->succ; while (1) { if (!act) { if (!stack_top) { free (stack); sbitmap_free (visited); return; } act = stack[--stack_top]; } bb = act->dest; if (bb == EXIT_BLOCK_PTR || TEST_BIT (visited, bb->index)) { act = act->succ_next; continue; } SET_BIT (visited, bb->index); if (TEST_BIT (st_antloc[bb->index], smexpr->index)) { for (last = ANTIC_STORE_LIST (smexpr); BLOCK_FOR_INSN (XEXP (last, 0)) != bb; last = XEXP (last, 1)) continue; last = XEXP (last, 0); } else last = NEXT_INSN (BB_END (bb)); for (insn = BB_HEAD (bb); insn != last; insn = NEXT_INSN (insn)) if (INSN_P (insn)) { note = find_reg_equal_equiv_note (insn); if (!note || !expr_equiv_p (XEXP (note, 0), mem)) continue; if (gcse_file) fprintf (gcse_file, "STORE_MOTION drop REG_EQUAL note at insn %d:\n", INSN_UID (insn)); remove_note (insn, note); } act = act->succ_next; if (bb->succ) { if (act) stack[stack_top++] = act; act = bb->succ; } } } /* This routine will replace a store with a SET to a specified register. */ static void replace_store_insn (rtx reg, rtx del, basic_block bb, struct ls_expr *smexpr) { rtx insn, mem, note, set, ptr; mem = smexpr->pattern; insn = gen_move_insn (reg, SET_SRC (single_set (del))); insn = emit_insn_after (insn, del); if (gcse_file) { fprintf (gcse_file, "STORE_MOTION delete insn in BB %d:\n ", bb->index); print_inline_rtx (gcse_file, del, 6); fprintf (gcse_file, "\nSTORE MOTION replaced with insn:\n "); print_inline_rtx (gcse_file, insn, 6); fprintf (gcse_file, "\n"); } for (ptr = ANTIC_STORE_LIST (smexpr); ptr; ptr = XEXP (ptr, 1)) if (XEXP (ptr, 0) == del) { XEXP (ptr, 0) = insn; break; } delete_insn (del); /* Now we must handle REG_EQUAL notes whose contents is equal to the mem; they are no longer accurate provided that they are reached by this definition, so drop them. */ for (; insn != NEXT_INSN (BB_END (bb)); insn = NEXT_INSN (insn)) if (INSN_P (insn)) { set = single_set (insn); if (!set) continue; if (expr_equiv_p (SET_DEST (set), mem)) return; note = find_reg_equal_equiv_note (insn); if (!note || !expr_equiv_p (XEXP (note, 0), mem)) continue; if (gcse_file) fprintf (gcse_file, "STORE_MOTION drop REG_EQUAL note at insn %d:\n", INSN_UID (insn)); remove_note (insn, note); } remove_reachable_equiv_notes (bb, smexpr); } /* Delete a store, but copy the value that would have been stored into the reaching_reg for later storing. */ static void delete_store (struct ls_expr * expr, basic_block bb) { rtx reg, i, del; if (expr->reaching_reg == NULL_RTX) expr->reaching_reg = gen_reg_rtx (GET_MODE (expr->pattern)); reg = expr->reaching_reg; for (i = AVAIL_STORE_LIST (expr); i; i = XEXP (i, 1)) { del = XEXP (i, 0); if (BLOCK_FOR_INSN (del) == bb) { /* We know there is only one since we deleted redundant ones during the available computation. */ replace_store_insn (reg, del, bb, expr); break; } } } /* Free memory used by store motion. */ static void free_store_memory (void) { free_ldst_mems (); if (ae_gen) sbitmap_vector_free (ae_gen); if (ae_kill) sbitmap_vector_free (ae_kill); if (transp) sbitmap_vector_free (transp); if (st_antloc) sbitmap_vector_free (st_antloc); if (pre_insert_map) sbitmap_vector_free (pre_insert_map); if (pre_delete_map) sbitmap_vector_free (pre_delete_map); if (reg_set_in_block) sbitmap_vector_free (reg_set_in_block); ae_gen = ae_kill = transp = st_antloc = NULL; pre_insert_map = pre_delete_map = reg_set_in_block = NULL; } /* Perform store motion. Much like gcse, except we move expressions the other way by looking at the flowgraph in reverse. */ static void store_motion (void) { basic_block bb; int x; struct ls_expr * ptr; int update_flow = 0; if (gcse_file) { fprintf (gcse_file, "before store motion\n"); print_rtl (gcse_file, get_insns ()); } init_alias_analysis (); /* Find all the available and anticipatable stores. */ num_stores = compute_store_table (); if (num_stores == 0) { sbitmap_vector_free (reg_set_in_block); end_alias_analysis (); return; } /* Now compute kill & transp vectors. */ build_store_vectors (); add_noreturn_fake_exit_edges (); connect_infinite_loops_to_exit (); edge_list = pre_edge_rev_lcm (gcse_file, num_stores, transp, ae_gen, st_antloc, ae_kill, &pre_insert_map, &pre_delete_map); /* Now we want to insert the new stores which are going to be needed. */ for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr)) { FOR_EACH_BB (bb) if (TEST_BIT (pre_delete_map[bb->index], ptr->index)) delete_store (ptr, bb); for (x = 0; x < NUM_EDGES (edge_list); x++) if (TEST_BIT (pre_insert_map[x], ptr->index)) update_flow |= insert_store (ptr, INDEX_EDGE (edge_list, x)); } if (update_flow) commit_edge_insertions (); free_store_memory (); free_edge_list (edge_list); remove_fake_edges (); end_alias_analysis (); } /* Entry point for jump bypassing optimization pass. */ int bypass_jumps (FILE *file) { int changed; /* We do not construct an accurate cfg in functions which call setjmp, so just punt to be safe. */ if (current_function_calls_setjmp) return 0; /* For calling dump_foo fns from gdb. */ debug_stderr = stderr; gcse_file = file; /* Identify the basic block information for this function, including successors and predecessors. */ max_gcse_regno = max_reg_num (); if (file) dump_flow_info (file); /* Return if there's nothing to do, or it is too expensive. */ if (n_basic_blocks <= 1 || is_too_expensive (_ ("jump bypassing disabled"))) return 0; gcc_obstack_init (&gcse_obstack); bytes_used = 0; /* We need alias. */ init_alias_analysis (); /* Record where pseudo-registers are set. This data is kept accurate during each pass. ??? We could also record hard-reg information here [since it's unchanging], however it is currently done during hash table computation. It may be tempting to compute MEM set information here too, but MEM sets will be subject to code motion one day and thus we need to compute information about memory sets when we build the hash tables. */ alloc_reg_set_mem (max_gcse_regno); compute_sets (get_insns ()); max_gcse_regno = max_reg_num (); alloc_gcse_mem (get_insns ()); changed = one_cprop_pass (1, 1, 1); free_gcse_mem (); if (file) { fprintf (file, "BYPASS of %s: %d basic blocks, ", current_function_name (), n_basic_blocks); fprintf (file, "%d bytes\n\n", bytes_used); } obstack_free (&gcse_obstack, NULL); free_reg_set_mem (); /* We are finished with alias. */ end_alias_analysis (); allocate_reg_info (max_reg_num (), FALSE, FALSE); return changed; } /* Return true if the graph is too expensive to optimize. PASS is the optimization about to be performed. */ static bool is_too_expensive (const char *pass) { /* Trying to perform global optimizations on flow graphs which have a high connectivity will take a long time and is unlikely to be particularly useful. In normal circumstances a cfg should have about twice as many edges as blocks. But we do not want to punish small functions which have a couple switch statements. Rather than simply threshold the number of blocks, uses something with a more graceful degradation. */ if (n_edges > 20000 + n_basic_blocks * 4) { if (warn_disabled_optimization) warning ("%s: %d basic blocks and %d edges/basic block", pass, n_basic_blocks, n_edges / n_basic_blocks); return true; } /* If allocating memory for the cprop bitmap would take up too much storage it's better just to disable the optimization. */ if ((n_basic_blocks * SBITMAP_SET_SIZE (max_reg_num ()) * sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY) { if (warn_disabled_optimization) warning ("%s: %d basic blocks and %d registers", pass, n_basic_blocks, max_reg_num ()); return true; } return false; } /* The following code implements gcse after reload, the purpose of this pass is to cleanup redundant loads generated by reload and other optimizations that come after gcse. It searches for simple inter-block redundancies and tries to eliminate them by adding moves and loads in cold places. */ /* The following structure holds the information about the occurrences of the redundant instructions. */ struct unoccr { struct unoccr *next; edge pred; rtx insn; }; static bool reg_used_on_edge (rtx, edge); static rtx reg_set_between_after_reload_p (rtx, rtx, rtx); static rtx reg_used_between_after_reload_p (rtx, rtx, rtx); static rtx get_avail_load_store_reg (rtx); static bool is_jump_table_basic_block (basic_block); static bool bb_has_well_behaved_predecessors (basic_block); static struct occr* get_bb_avail_insn (basic_block, struct occr *); static void hash_scan_set_after_reload (rtx, rtx, struct hash_table *); static void compute_hash_table_after_reload (struct hash_table *); static void eliminate_partially_redundant_loads (basic_block, rtx, struct expr *); static void gcse_after_reload (void); static struct occr* get_bb_avail_insn (basic_block, struct occr *); void gcse_after_reload_main (rtx, FILE *); /* Check if register REG is used in any insn waiting to be inserted on E. Assumes no such insn can be a CALL_INSN; if so call reg_used_between_p with PREV(insn),NEXT(insn) instead of calling reg_overlap_mentioned_p. */ static bool reg_used_on_edge (rtx reg, edge e) { rtx insn; for (insn = e->insns.r; insn; insn = NEXT_INSN (insn)) if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn))) return true; return false; } /* Return the insn that sets register REG or clobbers it in between FROM_INSN and TO_INSN (exclusive of those two). Just like reg_set_between but for hard registers and not pseudos. */ static rtx reg_set_between_after_reload_p (rtx reg, rtx from_insn, rtx to_insn) { rtx insn; int regno; if (! REG_P (reg)) abort (); regno = REGNO (reg); /* We are called after register allocation. */ if (regno >= FIRST_PSEUDO_REGISTER) abort (); if (from_insn == to_insn) return NULL_RTX; for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn)) { if (INSN_P (insn)) { if (FIND_REG_INC_NOTE (insn, reg) || (CALL_P (insn) && call_used_regs[regno]) || find_reg_fusage (insn, CLOBBER, reg)) return insn; } if (set_of (reg, insn) != NULL_RTX) return insn; } return NULL_RTX; } /* Return the insn that uses register REG in between FROM_INSN and TO_INSN (exclusive of those two). Similar to reg_used_between but for hard registers and not pseudos. */ static rtx reg_used_between_after_reload_p (rtx reg, rtx from_insn, rtx to_insn) { rtx insn; int regno; if (! REG_P (reg)) return to_insn; regno = REGNO (reg); /* We are called after register allocation. */ if (regno >= FIRST_PSEUDO_REGISTER) abort (); if (from_insn == to_insn) return NULL_RTX; for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn)) if (INSN_P (insn) && (reg_overlap_mentioned_p (reg, PATTERN (insn)) || (CALL_P (insn) && call_used_regs[regno]) || find_reg_fusage (insn, USE, reg) || find_reg_fusage (insn, CLOBBER, reg))) return insn; return NULL_RTX; } /* Return the loaded/stored register of a load/store instruction. */ static rtx get_avail_load_store_reg (rtx insn) { if (REG_P (SET_DEST (PATTERN (insn)))) /* A load. */ return SET_DEST(PATTERN(insn)); if (REG_P (SET_SRC (PATTERN (insn)))) /* A store. */ return SET_SRC (PATTERN (insn)); abort (); } /* Don't handle ABNORMAL edges or jump tables. */ static bool is_jump_table_basic_block (basic_block bb) { rtx insn = BB_END (bb); if (JUMP_TABLE_DATA_P (insn)) return true; return false; } /* Return nonzero if the predecessors of BB are "well behaved". */ static bool bb_has_well_behaved_predecessors (basic_block bb) { edge pred; if (! bb->pred) return false; for (pred = bb->pred; pred != NULL; pred = pred->pred_next) if (((pred->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (pred)) || is_jump_table_basic_block (pred->src)) return false; return true; } /* Search for the occurrences of expression in BB. */ static struct occr* get_bb_avail_insn (basic_block bb, struct occr *occr) { for (; occr != NULL; occr = occr->next) if (BLOCK_FOR_INSN (occr->insn)->index == bb->index) return occr; return NULL; } /* Perform partial GCSE pass after reload, try to eliminate redundant loads created by the reload pass. We try to look for a full or partial redundant loads fed by one or more loads/stores in predecessor BBs, and try adding loads to make them fully redundant. We also check if it's worth adding loads to be able to delete the redundant load. Algorithm: 1. Build available expressions hash table: For each load/store instruction, if the loaded/stored memory didn't change until the end of the basic block add this memory expression to the hash table. 2. Perform Redundancy elimination: For each load instruction do the following: perform partial redundancy elimination, check if it's worth adding loads to make the load fully redundant. If so add loads and register copies and delete the load. Future enhancement: if loaded register is used/defined between load and some store, look for some other free register between load and all its stores, and replace load with a copy from this register to the loaded register. */ /* This handles the case where several stores feed a partially redundant load. It checks if the redundancy elimination is possible and if it's worth it. */ static void eliminate_partially_redundant_loads (basic_block bb, rtx insn, struct expr *expr) { edge pred; rtx avail_insn = NULL_RTX; rtx avail_reg; rtx dest, pat; struct occr *a_occr; struct unoccr *occr, *avail_occrs = NULL; struct unoccr *unoccr, *unavail_occrs = NULL; int npred_ok = 0; gcov_type ok_count = 0; /* Redundant load execution count. */ gcov_type critical_count = 0; /* Execution count of critical edges. */ /* The execution count of the loads to be added to make the load fully redundant. */ gcov_type not_ok_count = 0; basic_block pred_bb; pat = PATTERN (insn); dest = SET_DEST (pat); /* Check that the loaded register is not used, set, or killed from the beginning of the block. */ if (reg_used_between_after_reload_p (dest, PREV_INSN (BB_HEAD (bb)), insn) || reg_set_between_after_reload_p (dest, PREV_INSN (BB_HEAD (bb)), insn)) return; /* Check potential for replacing load with copy for predecessors. */ for (pred = bb->pred; pred; pred = pred->pred_next) { rtx next_pred_bb_end; avail_insn = NULL_RTX; pred_bb = pred->src; next_pred_bb_end = NEXT_INSN (BB_END (pred_bb)); for (a_occr = get_bb_avail_insn (pred_bb, expr->avail_occr); a_occr; a_occr = get_bb_avail_insn (pred_bb, a_occr->next)) { /* Check if the loaded register is not used. */ avail_insn = a_occr->insn; if (! (avail_reg = get_avail_load_store_reg (avail_insn))) abort (); /* Make sure we can generate a move from register avail_reg to dest. */ extract_insn (gen_move_insn (copy_rtx (dest), copy_rtx (avail_reg))); if (! constrain_operands (1) || reg_killed_on_edge (avail_reg, pred) || reg_used_on_edge (dest, pred)) { avail_insn = NULL; continue; } if (! reg_set_between_after_reload_p (avail_reg, avail_insn, next_pred_bb_end)) /* AVAIL_INSN remains non-null. */ break; else avail_insn = NULL; } if (avail_insn != NULL_RTX) { npred_ok++; ok_count += pred->count; if (EDGE_CRITICAL_P (pred)) critical_count += pred->count; occr = gmalloc (sizeof (struct unoccr)); occr->insn = avail_insn; occr->pred = pred; occr->next = avail_occrs; avail_occrs = occr; } else { not_ok_count += pred->count; if (EDGE_CRITICAL_P (pred)) critical_count += pred->count; unoccr = gmalloc (sizeof (struct unoccr)); unoccr->insn = NULL_RTX; unoccr->pred = pred; unoccr->next = unavail_occrs; unavail_occrs = unoccr; } } if (npred_ok == 0 /* No load can be replaced by copy. */ || (optimize_size && npred_ok > 1)) /* Prevent exploding the code. */ goto cleanup; /* Check if it's worth applying the partial redundancy elimination. */ if (ok_count < GCSE_AFTER_RELOAD_PARTIAL_FRACTION * not_ok_count) goto cleanup; if (ok_count < GCSE_AFTER_RELOAD_CRITICAL_FRACTION * critical_count) goto cleanup; /* Generate moves to the loaded register from where the memory is available. */ for (occr = avail_occrs; occr; occr = occr->next) { avail_insn = occr->insn; pred = occr->pred; /* Set avail_reg to be the register having the value of the memory. */ avail_reg = get_avail_load_store_reg (avail_insn); if (! avail_reg) abort (); insert_insn_on_edge (gen_move_insn (copy_rtx (dest), copy_rtx (avail_reg)), pred); if (gcse_file) fprintf (gcse_file, "GCSE AFTER reload generating move from %d to %d on \ edge from %d to %d\n", REGNO (avail_reg), REGNO (dest), pred->src->index, pred->dest->index); } /* Regenerate loads where the memory is unavailable. */ for (unoccr = unavail_occrs; unoccr; unoccr = unoccr->next) { pred = unoccr->pred; insert_insn_on_edge (copy_insn (PATTERN (insn)), pred); if (gcse_file) fprintf (gcse_file, "GCSE AFTER reload: generating on edge from %d to %d\ a copy of load:\n", pred->src->index, pred->dest->index); } /* Delete the insn if it is not available in this block and mark it for deletion if it is available. If insn is available it may help discover additional redundancies, so mark it for later deletion.*/ for (a_occr = get_bb_avail_insn (bb, expr->avail_occr); a_occr && (a_occr->insn != insn); a_occr = get_bb_avail_insn (bb, a_occr->next)); if (!a_occr) delete_insn (insn); else a_occr->deleted_p = 1; cleanup: while (unavail_occrs) { struct unoccr *temp = unavail_occrs->next; free (unavail_occrs); unavail_occrs = temp; } while (avail_occrs) { struct unoccr *temp = avail_occrs->next; free (avail_occrs); avail_occrs = temp; } } /* Performing the redundancy elimination as described before. */ static void gcse_after_reload (void) { unsigned int i; rtx insn; basic_block bb; struct expr *expr; struct occr *occr; /* Note we start at block 1. */ if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR) return; FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb, EXIT_BLOCK_PTR, next_bb) { if (! bb_has_well_behaved_predecessors (bb)) continue; /* Do not try this optimization on cold basic blocks. */ if (probably_cold_bb_p (bb)) continue; reset_opr_set_tables (); for (insn = BB_HEAD (bb); insn != NULL && insn != NEXT_INSN (BB_END (bb)); insn = NEXT_INSN (insn)) { /* Is it a load - of the form (set (reg) (mem))? */ if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SET && REG_P (SET_DEST (PATTERN (insn))) && MEM_P (SET_SRC (PATTERN (insn)))) { rtx pat = PATTERN (insn); rtx src = SET_SRC (pat); struct expr *expr; if (general_operand (src, GET_MODE (src)) /* Is the expression recorded? */ && (expr = lookup_expr (src, &expr_hash_table)) != NULL /* Are the operands unchanged since the start of the block? */ && oprs_not_set_p (src, insn) && ! MEM_VOLATILE_P (src) && GET_MODE (src) != BLKmode && !(flag_non_call_exceptions && may_trap_p (src)) && !side_effects_p (src)) { /* We now have a load (insn) and an available memory at its BB start (expr). Try to remove the loads if it is redundant. */ eliminate_partially_redundant_loads (bb, insn, expr); } } /* Keep track of everything modified by this insn. */ if (INSN_P (insn)) mark_oprs_set (insn); } } commit_edge_insertions (); /* Go over the expression hash table and delete insns that were marked for later deletion. */ for (i = 0; i < expr_hash_table.size; i++) { for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash) for (occr = expr->avail_occr; occr; occr = occr->next) if (occr->deleted_p) delete_insn (occr->insn); } } /* Scan pattern PAT of INSN and add an entry to the hash TABLE. After reload we are interested in loads/stores only. */ static void hash_scan_set_after_reload (rtx pat, rtx insn, struct hash_table *table) { rtx src = SET_SRC (pat); rtx dest = SET_DEST (pat); if (! MEM_P (src) && ! MEM_P (dest)) return; if (REG_P (dest)) { if (/* Don't GCSE something if we can't do a reg/reg copy. */ can_copy_p (GET_MODE (dest)) /* GCSE commonly inserts instruction after the insn. We can't do that easily for EH_REGION notes so disable GCSE on these for now. */ && ! find_reg_note (insn, REG_EH_REGION, NULL_RTX) /* Is SET_SRC something we want to gcse? */ && general_operand (src, GET_MODE (src)) /* Don't CSE a nop. */ && ! set_noop_p (pat) && ! JUMP_P (insn)) { /* An expression is not available if its operands are subsequently modified, including this insn. */ if (oprs_available_p (src, insn)) insert_expr_in_table (src, GET_MODE (dest), insn, 0, 1, table); } } else if (REG_P (src)) { /* Only record sets of pseudo-regs in the hash table. */ if (/* Don't GCSE something if we can't do a reg/reg copy. */ can_copy_p (GET_MODE (src)) /* GCSE commonly inserts instruction after the insn. We can't do that easily for EH_REGION notes so disable GCSE on these for now. */ && ! find_reg_note (insn, REG_EH_REGION, NULL_RTX) /* Is SET_DEST something we want to gcse? */ && general_operand (dest, GET_MODE (dest)) /* Don't CSE a nop. */ && ! set_noop_p (pat) &&! JUMP_P (insn) && ! (flag_float_store && FLOAT_MODE_P (GET_MODE (dest))) /* Check if the memory expression is killed after insn. */ && ! load_killed_in_block_p (BLOCK_FOR_INSN (insn), INSN_CUID (insn) + 1, dest, 1) && oprs_unchanged_p (XEXP (dest, 0), insn, 1)) { insert_expr_in_table (dest, GET_MODE (dest), insn, 0, 1, table); } } } /* Create hash table of memory expressions available at end of basic blocks. */ static void compute_hash_table_after_reload (struct hash_table *table) { unsigned int i; table->set_p = 0; /* Initialize count of number of entries in hash table. */ table->n_elems = 0; memset ((char *) table->table, 0, table->size * sizeof (struct expr *)); /* While we compute the hash table we also compute a bit array of which registers are set in which blocks. */ sbitmap_vector_zero (reg_set_in_block, last_basic_block); /* Re-cache any INSN_LIST nodes we have allocated. */ clear_modify_mem_tables (); /* Some working arrays used to track first and last set in each block. */ reg_avail_info = gmalloc (max_gcse_regno * sizeof (struct reg_avail_info)); for (i = 0; i < max_gcse_regno; ++i) reg_avail_info[i].last_bb = NULL; FOR_EACH_BB (current_bb) { rtx insn; unsigned int regno; /* First pass over the instructions records information used to determine when registers and memory are first and last set. */ for (insn = BB_HEAD (current_bb); insn && insn != NEXT_INSN (BB_END (current_bb)); insn = NEXT_INSN (insn)) { if (! INSN_P (insn)) continue; if (CALL_P (insn)) { bool clobbers_all = false; #ifdef NON_SAVING_SETJMP if (NON_SAVING_SETJMP && find_reg_note (insn, REG_SETJMP, NULL_RTX)) clobbers_all = true; #endif for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) if (clobbers_all || TEST_HARD_REG_BIT (regs_invalidated_by_call, regno)) record_last_reg_set_info (insn, regno); mark_call (insn); } note_stores (PATTERN (insn), record_last_set_info, insn); if (GET_CODE (PATTERN (insn)) == SET) { rtx src, dest; src = SET_SRC (PATTERN (insn)); dest = SET_DEST (PATTERN (insn)); if (MEM_P (src) && auto_inc_p (XEXP (src, 0))) { regno = REGNO (XEXP (XEXP (src, 0), 0)); record_last_reg_set_info (insn, regno); } if (MEM_P (dest) && auto_inc_p (XEXP (dest, 0))) { regno = REGNO (XEXP (XEXP (dest, 0), 0)); record_last_reg_set_info (insn, regno); } } } /* The next pass builds the hash table. */ for (insn = BB_HEAD (current_bb); insn && insn != NEXT_INSN (BB_END (current_bb)); insn = NEXT_INSN (insn)) if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == SET) if (! find_reg_note (insn, REG_LIBCALL, NULL_RTX)) hash_scan_set_after_reload (PATTERN (insn), insn, table); } free (reg_avail_info); reg_avail_info = NULL; } /* Main entry point of the GCSE after reload - clean some redundant loads due to spilling. */ void gcse_after_reload_main (rtx f, FILE* file) { gcse_subst_count = 0; gcse_create_count = 0; gcse_file = file; gcc_obstack_init (&gcse_obstack); bytes_used = 0; /* We need alias. */ init_alias_analysis (); max_gcse_regno = max_reg_num (); alloc_reg_set_mem (max_gcse_regno); alloc_gcse_mem (f); alloc_hash_table (max_cuid, &expr_hash_table, 0); compute_hash_table_after_reload (&expr_hash_table); if (gcse_file) dump_hash_table (gcse_file, "Expression", &expr_hash_table); if (expr_hash_table.n_elems > 0) gcse_after_reload (); free_hash_table (&expr_hash_table); free_gcse_mem (); free_reg_set_mem (); /* We are finished with alias. */ end_alias_analysis (); obstack_free (&gcse_obstack, NULL); } /* Type information for gcse.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_gcse_h[] = { { &test_insn, 1, sizeof (test_insn), >_ggc_mx_rtx_def, >_pch_nx_rtx_def }, LAST_GGC_ROOT_TAB }; /* Simple garbage collection for the GNU compiler. Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Generic garbage collection (GC) functions and data, not specific to any particular GC implementation. */ /* The host_hooks data structure. Copyright 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_HOST_HOOKS_H #define GCC_HOST_HOOKS_H struct host_hooks { void (*extra_signals) (void); /* Identify an address that's likely to be free in a subsequent invocation of the compiler. The area should be able to hold SIZE bytes. FD is an open file descriptor if the host would like to probe with mmap. */ void * (*gt_pch_get_address) (size_t size, int fd); /* ADDR is an address returned by gt_pch_get_address. Attempt to allocate SIZE bytes at the same address and load it with the data from FD at OFFSET. Return -1 if we couldn't allocate memory at ADDR, return 0 if the memory is allocated but the data not loaded, return 1 if done. */ int (*gt_pch_use_address) (void *addr, size_t size, int fd, size_t offset); /* Return the alignment required for allocating virtual memory. Usually this is the same as pagesize. */ size_t (*gt_pch_alloc_granularity) (void); /* Whenever you add entries here, make sure you adjust hosthooks-def.h. */ }; /* Each host provides its own. */ extern const struct host_hooks host_hooks; #endif /* GCC_LANG_HOOKS_H */ /* Default macros to initialize the lang_hooks data structure. Copyright 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_HOST_HOOKS_DEF_H #define GCC_HOST_HOOKS_DEF_H #define HOST_HOOKS_EXTRA_SIGNALS hook_void_void #if HAVE_MMAP_FILE #define HOST_HOOKS_GT_PCH_GET_ADDRESS mmap_gt_pch_get_address #define HOST_HOOKS_GT_PCH_USE_ADDRESS mmap_gt_pch_use_address #else #define HOST_HOOKS_GT_PCH_GET_ADDRESS default_gt_pch_get_address #define HOST_HOOKS_GT_PCH_USE_ADDRESS default_gt_pch_use_address #endif #define HOST_HOOKS_GT_PCH_ALLOC_GRANULARITY \ default_gt_pch_alloc_granularity extern void* default_gt_pch_get_address (size_t, int); extern int default_gt_pch_use_address (void *, size_t, int, size_t); extern size_t default_gt_pch_alloc_granularity (void); extern void* mmap_gt_pch_get_address (size_t, int); extern int mmap_gt_pch_use_address (void *, size_t, int, size_t); /* The structure is defined in hosthooks.h. */ #define HOST_HOOKS_INITIALIZER { \ HOST_HOOKS_EXTRA_SIGNALS, \ HOST_HOOKS_GT_PCH_GET_ADDRESS, \ HOST_HOOKS_GT_PCH_USE_ADDRESS, \ HOST_HOOKS_GT_PCH_ALLOC_GRANULARITY \ } #endif /* GCC_HOST_HOOKS_DEF_H */ #ifdef HAVE_SYS_RESOURCE_H # include #endif #ifdef HAVE_MMAP_FILE # include # ifdef HAVE_MINCORE /* This is on Solaris. */ # include # endif #endif #ifndef MAP_FAILED # define MAP_FAILED ((void *)-1) #endif /* Avoid #ifdef:s when we can help it. */ #define VALGRIND_DISCARD(x) /* Statistics about the allocation. */ static ggc_statistics *ggc_stats; struct traversal_state; static int ggc_htab_delete (void **, void *); static hashval_t saving_htab_hash (const void *); static int saving_htab_eq (const void *, const void *); static int call_count (void **, void *); static int call_alloc (void **, void *); static int compare_ptr_data (const void *, const void *); static void relocate_ptrs (void *, void *); static void write_pch_globals (const struct ggc_root_tab * const *tab, struct traversal_state *state); static double ggc_rlimit_bound (double); /* Maintain global roots that are preserved during GC. */ /* Process a slot of an htab by deleting it if it has not been marked. */ static int ggc_htab_delete (void **slot, void *info) { const struct ggc_cache_tab *r = (const struct ggc_cache_tab *) info; if (! (*r->marked_p) (*slot)) htab_clear_slot (*r->base, slot); else (*r->cb) (*slot); return 1; } /* Iterate through all registered roots and mark each element. */ void ggc_mark_roots (void) { const struct ggc_root_tab *const *rt; const struct ggc_root_tab *rti; const struct ggc_cache_tab *const *ct; const struct ggc_cache_tab *cti; size_t i; for (rt = gt_ggc_deletable_rtab; *rt; rt++) for (rti = *rt; rti->base != NULL; rti++) memset (rti->base, 0, rti->stride); for (rt = gt_ggc_rtab; *rt; rt++) for (rti = *rt; rti->base != NULL; rti++) for (i = 0; i < rti->nelt; i++) (*rti->cb)(*(void **)((char *)rti->base + rti->stride * i)); ggc_mark_stringpool (); /* Now scan all hash tables that have objects which are to be deleted if they are not already marked. */ for (ct = gt_ggc_cache_rtab; *ct; ct++) for (cti = *ct; cti->base != NULL; cti++) if (*cti->base) { ggc_set_mark (*cti->base); htab_traverse_noresize (*cti->base, ggc_htab_delete, (void *) cti); ggc_set_mark ((*cti->base)->entries); } } /* Allocate a block of memory, then clear it. */ void * ggc_alloc_cleared_stat (size_t size MEM_STAT_DECL) { void *buf = ggc_alloc_stat (size PASS_MEM_STAT); memset (buf, 0, size); return buf; } /* Resize a block of memory, possibly re-allocating it. */ void * ggc_realloc_stat (void *x, size_t size MEM_STAT_DECL) { void *r; size_t old_size; if (x == NULL) return ggc_alloc_stat (size PASS_MEM_STAT); old_size = ggc_get_size (x); if (size <= old_size) { /* Mark the unwanted memory as unaccessible. We also need to make the "new" size accessible, since ggc_get_size returns the size of the pool, not the size of the individually allocated object, the size which was previously made accessible. Unfortunately, we don't know that previously allocated size. Without that knowledge we have to lose some initialization-tracking for the old parts of the object. An alternative is to mark the whole old_size as reachable, but that would lose tracking of writes after the end of the object (by small offsets). Discard the handle to avoid handle leak. */ VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) x + size, old_size - size)); VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (x, size)); return x; } r = ggc_alloc_stat (size PASS_MEM_STAT); /* Since ggc_get_size returns the size of the pool, not the size of the individually allocated object, we'd access parts of the old object that were marked invalid with the memcpy below. We lose a bit of the initialization-tracking since some of it may be uninitialized. */ VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (x, old_size)); memcpy (r, x, old_size); /* The old object is not supposed to be used anymore. */ ggc_free (x); return r; } /* Like ggc_alloc_cleared, but performs a multiplication. */ void * ggc_calloc (size_t s1, size_t s2) { return ggc_alloc_cleared (s1 * s2); } /* These are for splay_tree_new_ggc. */ void * ggc_splay_alloc (int sz, void *nl) { if (nl != NULL) abort (); return ggc_alloc (sz); } void ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED, void *nl) { if (nl != NULL) abort (); } /* Print statistics that are independent of the collector in use. */ #ifndef SCALE #define SCALE(x) ((unsigned long) ((x) < 1024*10 \ ? (x) \ : ((x) < 1024*1024*10 \ ? (x) / 1024 \ : (x) / (1024*1024)))) #endif #ifndef LABEL #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M')) #endif void ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED, ggc_statistics *stats) { /* Set the pointer so that during collection we will actually gather the statistics. */ ggc_stats = stats; /* Then do one collection to fill in the statistics. */ ggc_collect (); /* At present, we don't really gather any interesting statistics. */ /* Don't gather statistics any more. */ ggc_stats = NULL; } /* Functions for saving and restoring GCable memory to disk. */ static htab_t saving_htab; struct ptr_data { void *obj; void *note_ptr_cookie; gt_note_pointers note_ptr_fn; gt_handle_reorder reorder_fn; size_t size; void *new_addr; }; #define POINTER_HASH(x) (hashval_t)((long)x >> 3) /* Register an object in the hash table. */ int gt_pch_note_object (void *obj, void *note_ptr_cookie, gt_note_pointers note_ptr_fn) { struct ptr_data **slot; if (obj == NULL || obj == (void *) 1) return 0; slot = (struct ptr_data **) htab_find_slot_with_hash (saving_htab, obj, POINTER_HASH (obj), INSERT); if (*slot != NULL) { if ((*slot)->note_ptr_fn != note_ptr_fn || (*slot)->note_ptr_cookie != note_ptr_cookie) abort (); return 0; } *slot = xcalloc (sizeof (struct ptr_data), 1); (*slot)->obj = obj; (*slot)->note_ptr_fn = note_ptr_fn; (*slot)->note_ptr_cookie = note_ptr_cookie; if (note_ptr_fn == gt_pch_p_S) (*slot)->size = strlen (obj) + 1; else (*slot)->size = ggc_get_size (obj); return 1; } /* Register an object in the hash table. */ void gt_pch_note_reorder (void *obj, void *note_ptr_cookie, gt_handle_reorder reorder_fn) { struct ptr_data *data; if (obj == NULL || obj == (void *) 1) return; data = htab_find_with_hash (saving_htab, obj, POINTER_HASH (obj)); if (data == NULL || data->note_ptr_cookie != note_ptr_cookie) abort (); data->reorder_fn = reorder_fn; } /* Hash and equality functions for saving_htab, callbacks for htab_create. */ static hashval_t saving_htab_hash (const void *p) { return POINTER_HASH (((struct ptr_data *)p)->obj); } static int saving_htab_eq (const void *p1, const void *p2) { return ((struct ptr_data *)p1)->obj == p2; } /* Handy state for the traversal functions. */ struct traversal_state { FILE *f; struct ggc_pch_data *d; size_t count; struct ptr_data **ptrs; size_t ptrs_i; }; /* Callbacks for htab_traverse. */ static int call_count (void **slot, void *state_p) { struct ptr_data *d = (struct ptr_data *)*slot; struct traversal_state *state = (struct traversal_state *)state_p; ggc_pch_count_object (state->d, d->obj, d->size, d->note_ptr_fn == gt_pch_p_S); state->count++; return 1; } static int call_alloc (void **slot, void *state_p) { struct ptr_data *d = (struct ptr_data *)*slot; struct traversal_state *state = (struct traversal_state *)state_p; d->new_addr = ggc_pch_alloc_object (state->d, d->obj, d->size, d->note_ptr_fn == gt_pch_p_S); state->ptrs[state->ptrs_i++] = d; return 1; } /* Callback for qsort. */ static int compare_ptr_data (const void *p1_p, const void *p2_p) { struct ptr_data *p1 = *(struct ptr_data *const *)p1_p; struct ptr_data *p2 = *(struct ptr_data *const *)p2_p; return (((size_t)p1->new_addr > (size_t)p2->new_addr) - ((size_t)p1->new_addr < (size_t)p2->new_addr)); } /* Callbacks for note_ptr_fn. */ static void relocate_ptrs (void *ptr_p, void *state_p) { void **ptr = (void **)ptr_p; struct traversal_state *state ATTRIBUTE_UNUSED = (struct traversal_state *)state_p; struct ptr_data *result; if (*ptr == NULL || *ptr == (void *)1) return; result = htab_find_with_hash (saving_htab, *ptr, POINTER_HASH (*ptr)); if (result == NULL) abort (); *ptr = result->new_addr; } /* Write out, after relocation, the pointers in TAB. */ static void write_pch_globals (const struct ggc_root_tab * const *tab, struct traversal_state *state) { const struct ggc_root_tab *const *rt; const struct ggc_root_tab *rti; size_t i; for (rt = tab; *rt; rt++) for (rti = *rt; rti->base != NULL; rti++) for (i = 0; i < rti->nelt; i++) { void *ptr = *(void **)((char *)rti->base + rti->stride * i); struct ptr_data *new_ptr; if (ptr == NULL || ptr == (void *)1) { if (fwrite (&ptr, sizeof (void *), 1, state->f) != 1) fatal_error ("can't write PCH file: %m"); } else { new_ptr = htab_find_with_hash (saving_htab, ptr, POINTER_HASH (ptr)); if (fwrite (&new_ptr->new_addr, sizeof (void *), 1, state->f) != 1) fatal_error ("can't write PCH file: %m"); } } } /* Hold the information we need to mmap the file back in. */ struct mmap_info { size_t offset; size_t size; void *preferred_base; }; /* Write out the state of the compiler to F. */ void gt_pch_save (FILE *f) { const struct ggc_root_tab *const *rt; const struct ggc_root_tab *rti; size_t i; struct traversal_state state; char *this_object = NULL; size_t this_object_size = 0; struct mmap_info mmi; const size_t mmap_offset_alignment = host_hooks.gt_pch_alloc_granularity(); gt_pch_save_stringpool (); saving_htab = htab_create (50000, saving_htab_hash, saving_htab_eq, free); for (rt = gt_ggc_rtab; *rt; rt++) for (rti = *rt; rti->base != NULL; rti++) for (i = 0; i < rti->nelt; i++) (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i)); for (rt = gt_pch_cache_rtab; *rt; rt++) for (rti = *rt; rti->base != NULL; rti++) for (i = 0; i < rti->nelt; i++) (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i)); /* Prepare the objects for writing, determine addresses and such. */ state.f = f; state.d = init_ggc_pch(); state.count = 0; htab_traverse (saving_htab, call_count, &state); mmi.size = ggc_pch_total_size (state.d); /* Try to arrange things so that no relocation is necessary, but don't try very hard. On most platforms, this will always work, and on the rest it's a lot of work to do better. (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and HOST_HOOKS_GT_PCH_USE_ADDRESS.) */ mmi.preferred_base = host_hooks.gt_pch_get_address (mmi.size, fileno (f)); ggc_pch_this_base (state.d, mmi.preferred_base); state.ptrs = xmalloc (state.count * sizeof (*state.ptrs)); state.ptrs_i = 0; htab_traverse (saving_htab, call_alloc, &state); qsort (state.ptrs, state.count, sizeof (*state.ptrs), compare_ptr_data); /* Write out all the scalar variables. */ for (rt = gt_pch_scalar_rtab; *rt; rt++) for (rti = *rt; rti->base != NULL; rti++) if (fwrite (rti->base, rti->stride, 1, f) != 1) fatal_error ("can't write PCH file: %m"); /* Write out all the global pointers, after translation. */ write_pch_globals (gt_ggc_rtab, &state); write_pch_globals (gt_pch_cache_rtab, &state); ggc_pch_prepare_write (state.d, state.f); /* Pad the PCH file so that the mmapped area starts on an allocation granularity (usually page) boundary. */ { long o; o = ftell (state.f) + sizeof (mmi); if (o == -1) fatal_error ("can't get position in PCH file: %m"); mmi.offset = mmap_offset_alignment - o % mmap_offset_alignment; if (mmi.offset == mmap_offset_alignment) mmi.offset = 0; mmi.offset += o; } if (fwrite (&mmi, sizeof (mmi), 1, state.f) != 1) fatal_error ("can't write PCH file: %m"); if (mmi.offset != 0 && fseek (state.f, mmi.offset, SEEK_SET) != 0) fatal_error ("can't write padding to PCH file: %m"); /* Actually write out the objects. */ for (i = 0; i < state.count; i++) { if (this_object_size < state.ptrs[i]->size) { this_object_size = state.ptrs[i]->size; this_object = xrealloc (this_object, this_object_size); } memcpy (this_object, state.ptrs[i]->obj, state.ptrs[i]->size); if (state.ptrs[i]->reorder_fn != NULL) state.ptrs[i]->reorder_fn (state.ptrs[i]->obj, state.ptrs[i]->note_ptr_cookie, relocate_ptrs, &state); state.ptrs[i]->note_ptr_fn (state.ptrs[i]->obj, state.ptrs[i]->note_ptr_cookie, relocate_ptrs, &state); ggc_pch_write_object (state.d, state.f, state.ptrs[i]->obj, state.ptrs[i]->new_addr, state.ptrs[i]->size, state.ptrs[i]->note_ptr_fn == gt_pch_p_S); if (state.ptrs[i]->note_ptr_fn != gt_pch_p_S) memcpy (state.ptrs[i]->obj, this_object, state.ptrs[i]->size); } ggc_pch_finish (state.d, state.f); gt_pch_fixup_stringpool (); free (state.ptrs); htab_delete (saving_htab); } /* Read the state of the compiler back in from F. */ void gt_pch_restore (FILE *f) { const struct ggc_root_tab *const *rt; const struct ggc_root_tab *rti; size_t i; struct mmap_info mmi; int result; /* Delete any deletable objects. This makes ggc_pch_read much faster, as it can be sure that no GCable objects remain other than the ones just read in. */ for (rt = gt_ggc_deletable_rtab; *rt; rt++) for (rti = *rt; rti->base != NULL; rti++) memset (rti->base, 0, rti->stride); /* Read in all the scalar variables. */ for (rt = gt_pch_scalar_rtab; *rt; rt++) for (rti = *rt; rti->base != NULL; rti++) if (fread (rti->base, rti->stride, 1, f) != 1) fatal_error ("can't read PCH file: %m"); /* Read in all the global pointers, in 6 easy loops. */ for (rt = gt_ggc_rtab; *rt; rt++) for (rti = *rt; rti->base != NULL; rti++) for (i = 0; i < rti->nelt; i++) if (fread ((char *)rti->base + rti->stride * i, sizeof (void *), 1, f) != 1) fatal_error ("can't read PCH file: %m"); for (rt = gt_pch_cache_rtab; *rt; rt++) for (rti = *rt; rti->base != NULL; rti++) for (i = 0; i < rti->nelt; i++) if (fread ((char *)rti->base + rti->stride * i, sizeof (void *), 1, f) != 1) fatal_error ("can't read PCH file: %m"); if (fread (&mmi, sizeof (mmi), 1, f) != 1) fatal_error ("can't read PCH file: %m"); result = host_hooks.gt_pch_use_address (mmi.preferred_base, mmi.size, fileno (f), mmi.offset); if (result < 0) fatal_error ("had to relocate PCH"); if (result == 0) { if (fseek (f, mmi.offset, SEEK_SET) != 0 || fread (mmi.preferred_base, mmi.size, 1, f) != 1) fatal_error ("can't read PCH file: %m"); } else if (fseek (f, mmi.offset + mmi.size, SEEK_SET) != 0) fatal_error ("can't read PCH file: %m"); ggc_pch_read (f, mmi.preferred_base); gt_pch_restore_stringpool (); } /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is not present. Select no address whatsoever, and let gt_pch_save choose what it will with malloc, presumably. */ void * default_gt_pch_get_address (size_t size ATTRIBUTE_UNUSED, int fd ATTRIBUTE_UNUSED) { return NULL; } /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is not present. Allocate SIZE bytes with malloc. Return 0 if the address we got is the same as base, indicating that the memory has been allocated but needs to be read in from the file. Return -1 if the address differs, to relocation of the PCH file would be required. */ int default_gt_pch_use_address (void *base, size_t size, int fd ATTRIBUTE_UNUSED, size_t offset ATTRIBUTE_UNUSED) { void *addr = xmalloc (size); return (addr == base) - 1; } /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS. Return the alignment required for allocating virtual memory. Usually this is the same as pagesize. */ size_t default_gt_pch_alloc_granularity (void) { return getpagesize(); } #if HAVE_MMAP_FILE /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is present. We temporarily allocate SIZE bytes, and let the kernel place the data wherever it will. If it worked, that's our spot, if not we're likely to be in trouble. */ void * mmap_gt_pch_get_address (size_t size, int fd) { void *ret; ret = mmap (NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); if (ret == (void *) MAP_FAILED) ret = NULL; else munmap (ret, size); return ret; } /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is present. Map SIZE bytes of FD+OFFSET at BASE. Return 1 if we succeeded at mapping the data at BASE, -1 if we couldn't. This version assumes that the kernel honors the START operand of mmap even without MAP_FIXED if START through START+SIZE are not currently mapped with something. */ int mmap_gt_pch_use_address (void *base, size_t size, int fd, size_t offset) { void *addr; /* We're called with size == 0 if we're not planning to load a PCH file at all. This allows the hook to free any static space that we might have allocated at link time. */ if (size == 0) return -1; addr = mmap (base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, offset); return addr == base ? 1 : -1; } #endif /* HAVE_MMAP_FILE */ /* Modify the bound based on rlimits. Keep the smallest number found. */ static double ggc_rlimit_bound (double limit) { #if defined(HAVE_GETRLIMIT) struct rlimit rlim; # ifdef RLIMIT_RSS if (getrlimit (RLIMIT_RSS, &rlim) == 0 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY && rlim.rlim_cur < limit) limit = rlim.rlim_cur; # endif # ifdef RLIMIT_DATA if (getrlimit (RLIMIT_DATA, &rlim) == 0 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY && rlim.rlim_cur < limit) limit = rlim.rlim_cur; # endif # ifdef RLIMIT_AS if (getrlimit (RLIMIT_AS, &rlim) == 0 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY && rlim.rlim_cur < limit) limit = rlim.rlim_cur; # endif #endif /* HAVE_GETRLIMIT */ return limit; } /* Heuristic to set a default for GGC_MIN_EXPAND. */ int ggc_min_expand_heuristic (void) { double min_expand = physmem_total(); /* Adjust for rlimits. */ min_expand = ggc_rlimit_bound (min_expand); /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB). */ min_expand /= 1024*1024*1024; min_expand *= 70; min_expand = MIN (min_expand, 70); min_expand += 30; return min_expand; } /* Heuristic to set a default for GGC_MIN_HEAPSIZE. */ int ggc_min_heapsize_heuristic (void) { double min_heap_kbytes = physmem_total(); /* Adjust for rlimits. */ min_heap_kbytes = ggc_rlimit_bound (min_heap_kbytes); min_heap_kbytes /= 1024; /* Convert to Kbytes. */ /* The heuristic is RAM/8, with a lower bound of 4M and an upper bound of 128M (when RAM >= 1GB). */ min_heap_kbytes /= 8; min_heap_kbytes = MAX (min_heap_kbytes, 4 * 1024); min_heap_kbytes = MIN (min_heap_kbytes, 128 * 1024); return min_heap_kbytes; } void init_ggc_heuristics (void) { #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT set_param_value ("ggc-min-expand", ggc_min_expand_heuristic()); set_param_value ("ggc-min-heapsize", ggc_min_heapsize_heuristic()); #endif } #ifdef GATHER_STATISTICS /* Datastructure used to store per-call-site statistics. */ struct loc_descriptor { const char *file; int line; const char *function; int times; size_t allocated; size_t overhead; }; /* Hashtable used for statistics. */ static htab_t loc_hash; /* Hash table helpers functions. */ static hashval_t hash_descriptor (const void *p) { const struct loc_descriptor *d = p; return htab_hash_pointer (d->function) | d->line; } static int eq_descriptor (const void *p1, const void *p2) { const struct loc_descriptor *d = p1; const struct loc_descriptor *d2 = p2; return (d->file == d2->file && d->line == d2->line && d->function == d2->function); } /* Return descriptor for given call site, create new one if needed. */ static struct loc_descriptor * loc_descriptor (const char *name, int line, const char *function) { struct loc_descriptor loc; struct loc_descriptor **slot; loc.file = name; loc.line = line; loc.function = function; if (!loc_hash) loc_hash = htab_create (10, hash_descriptor, eq_descriptor, NULL); slot = (struct loc_descriptor **) htab_find_slot (loc_hash, &loc, 1); if (*slot) return *slot; *slot = xcalloc (sizeof (**slot), 1); (*slot)->file = name; (*slot)->line = line; (*slot)->function = function; return *slot; } /* Record ALLOCATED and OVERHEAD bytes to descriptor NAME:LINE (FUNCTION). */ void ggc_record_overhead (size_t allocated, size_t overhead, const char *name, int line, const char *function) { struct loc_descriptor *loc = loc_descriptor (name, line, function); loc->times++; loc->allocated+=allocated; loc->overhead+=overhead; } /* Helper for qsort; sort descriptors by amount of memory consumed. */ static int cmp_statistic (const void *loc1, const void *loc2) { struct loc_descriptor *l1 = *(struct loc_descriptor **) loc1; struct loc_descriptor *l2 = *(struct loc_descriptor **) loc2; return (l1->allocated + l1->overhead) - (l2->allocated + l2->overhead); } /* Collect array of the descriptors from hashtable. */ struct loc_descriptor **loc_array; static int add_statistics (void **slot, void *b) { int *n = (int *)b; loc_array[*n] = (struct loc_descriptor *) *slot; (*n)++; return 1; } /* Dump per-site memory statistics. */ #endif void dump_ggc_loc_statistics (void) { #ifdef GATHER_STATISTICS int nentries = 0; char s[4096]; size_t count, size, overhead; int i; loc_array = xcalloc (sizeof (*loc_array), loc_hash->n_elements); fprintf (stderr, "-------------------------------------------------------\n"); fprintf (stderr, "\n%-60s %10s %10s %10s\n", "source location", "Times", "Allocated", "Overhead"); fprintf (stderr, "-------------------------------------------------------\n"); count = 0; size = 0; overhead = 0; htab_traverse (loc_hash, add_statistics, &nentries); qsort (loc_array, nentries, sizeof (*loc_array), cmp_statistic); for (i = 0; i < nentries; i++) { struct loc_descriptor *d = loc_array[i]; size += d->allocated; count += d->times; overhead += d->overhead; } for (i = 0; i < nentries; i++) { struct loc_descriptor *d = loc_array[i]; if (d->allocated) { const char *s1 = d->file; const char *s2; while ((s2 = strstr (s1, "gcc/"))) s1 = s2 + 4; sprintf (s, "%s:%i (%s)", s1, d->line, d->function); fprintf (stderr, "%-60s %10i %10li %10li:%.3f%%\n", s, d->times, (long)d->allocated, (long)d->overhead, (d->allocated + d->overhead) *100.0 / (size + overhead)); } } fprintf (stderr, "%-60s %10ld %10ld %10ld\n", "Total", (long)count, (long)size, (long)overhead); fprintf (stderr, "-------------------------------------------------------\n"); #endif } /* Allocate registers for pseudo-registers that span basic blocks. Copyright (C) 1987, 1988, 1991, 1994, 1996, 1997, 1998, 1999, 2000, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This pass of the compiler performs global register allocation. It assigns hard register numbers to all the pseudo registers that were not handled in local_alloc. Assignments are recorded in the vector reg_renumber, not by changing the rtl code. (Such changes are made by final). The entry point is the function global_alloc. After allocation is complete, the reload pass is run as a subroutine of this pass, so that when a pseudo reg loses its hard reg due to spilling it is possible to make a second attempt to find a hard reg for it. The reload pass is independent in other respects and it is run even when stupid register allocation is in use. 1. Assign allocation-numbers (allocnos) to the pseudo-registers still needing allocations and to the pseudo-registers currently allocated by local-alloc which may be spilled by reload. Set up tables reg_allocno and allocno_reg to map reg numbers to allocnos and vice versa. max_allocno gets the number of allocnos in use. 2. Allocate a max_allocno by max_allocno conflict bit matrix and clear it. Allocate a max_allocno by FIRST_PSEUDO_REGISTER conflict matrix for conflicts between allocnos and explicit hard register use (which includes use of pseudo-registers allocated by local_alloc). 3. For each basic block walk forward through the block, recording which pseudo-registers and which hardware registers are live. Build the conflict matrix between the pseudo-registers and another of pseudo-registers versus hardware registers. Also record the preferred hardware registers for each pseudo-register. 4. Sort a table of the allocnos into order of desirability of the variables. 5. Allocate the variables in that order; each if possible into a preferred register, else into another register. */ /* Number of pseudo-registers which are candidates for allocation. */ static int max_allocno; /* Indexed by (pseudo) reg number, gives the allocno, or -1 for pseudo registers which are not to be allocated. */ static int *reg_allocno; struct allocno { int reg; /* Gives the number of consecutive hard registers needed by that pseudo reg. */ int size; /* Number of calls crossed by each allocno. */ int calls_crossed; /* Number of refs to each allocno. */ int n_refs; /* Frequency of uses of each allocno. */ int freq; /* Guess at live length of each allocno. This is actually the max of the live lengths of the regs. */ int live_length; /* Set of hard regs conflicting with allocno N. */ HARD_REG_SET hard_reg_conflicts; /* Set of hard regs preferred by allocno N. This is used to make allocnos go into regs that are copied to or from them, when possible, to reduce register shuffling. */ HARD_REG_SET hard_reg_preferences; /* Similar, but just counts register preferences made in simple copy operations, rather than arithmetic. These are given priority because we can always eliminate an insn by using these, but using a register in the above list won't always eliminate an insn. */ HARD_REG_SET hard_reg_copy_preferences; /* Similar to hard_reg_preferences, but includes bits for subsequent registers when an allocno is multi-word. The above variable is used for allocation while this is used to build reg_someone_prefers, below. */ HARD_REG_SET hard_reg_full_preferences; /* Set of hard registers that some later allocno has a preference for. */ HARD_REG_SET regs_someone_prefers; #ifdef STACK_REGS /* Set to true if allocno can't be allocated in the stack register. */ bool no_stack_reg; #endif }; static struct allocno *allocno; /* A vector of the integers from 0 to max_allocno-1, sorted in the order of first-to-be-allocated first. */ static int *allocno_order; /* Indexed by (pseudo) reg number, gives the number of another lower-numbered pseudo reg which can share a hard reg with this pseudo *even if the two pseudos would otherwise appear to conflict*. */ static int *reg_may_share; /* Define the number of bits in each element of `conflicts' and what type that element has. We use the largest integer format on the host machine. */ #define INT_BITS HOST_BITS_PER_WIDE_INT #define INT_TYPE HOST_WIDE_INT /* max_allocno by max_allocno array of bits, recording whether two allocno's conflict (can't go in the same hardware register). `conflicts' is symmetric after the call to mirror_conflicts. */ static INT_TYPE *conflicts; /* Number of ints require to hold max_allocno bits. This is the length of a row in `conflicts'. */ static int allocno_row_words; /* Two macros to test or store 1 in an element of `conflicts'. */ #define CONFLICTP(I, J) \ (conflicts[(I) * allocno_row_words + (unsigned) (J) / INT_BITS] \ & ((INT_TYPE) 1 << ((unsigned) (J) % INT_BITS))) /* For any allocno set in ALLOCNO_SET, set ALLOCNO to that allocno, and execute CODE. */ #define EXECUTE_IF_SET_IN_ALLOCNO_SET(ALLOCNO_SET, ALLOCNO, CODE) \ do { \ int i_; \ int allocno_; \ INT_TYPE *p_ = (ALLOCNO_SET); \ \ for (i_ = allocno_row_words - 1, allocno_ = 0; i_ >= 0; \ i_--, allocno_ += INT_BITS) \ { \ unsigned INT_TYPE word_ = (unsigned INT_TYPE) *p_++; \ \ for ((ALLOCNO) = allocno_; word_; word_ >>= 1, (ALLOCNO)++) \ { \ if (word_ & 1) \ {CODE;} \ } \ } \ } while (0) /* This doesn't work for non-GNU C due to the way CODE is macro expanded. */ #if 0 /* For any allocno that conflicts with IN_ALLOCNO, set OUT_ALLOCNO to the conflicting allocno, and execute CODE. This macro assumes that mirror_conflicts has been run. */ #define EXECUTE_IF_CONFLICT(IN_ALLOCNO, OUT_ALLOCNO, CODE)\ EXECUTE_IF_SET_IN_ALLOCNO_SET (conflicts + (IN_ALLOCNO) * allocno_row_words,\ OUT_ALLOCNO, (CODE)) #endif /* Set of hard regs currently live (during scan of all insns). */ static HARD_REG_SET hard_regs_live; /* Set of registers that global-alloc isn't supposed to use. */ static HARD_REG_SET no_global_alloc_regs; /* Set of registers used so far. */ static HARD_REG_SET regs_used_so_far; /* Number of refs to each hard reg, as used by local alloc. It is zero for a reg that contains global pseudos or is explicitly used. */ static int local_reg_n_refs[FIRST_PSEUDO_REGISTER]; /* Frequency of uses of given hard reg. */ static int local_reg_freq[FIRST_PSEUDO_REGISTER]; /* Guess at live length of each hard reg, as used by local alloc. This is actually the sum of the live lengths of the specific regs. */ static int local_reg_live_length[FIRST_PSEUDO_REGISTER]; /* Set to 1 a bit in a vector TABLE of HARD_REG_SETs, for vector element I, and hard register number J. */ #define SET_REGBIT(TABLE, I, J) SET_HARD_REG_BIT (allocno[I].TABLE, J) /* Bit mask for allocnos live at current point in the scan. */ static INT_TYPE *allocnos_live; /* Test, set or clear bit number I in allocnos_live, a bit vector indexed by allocno. */ #define SET_ALLOCNO_LIVE(I) \ (allocnos_live[(unsigned) (I) / INT_BITS] \ |= ((INT_TYPE) 1 << ((unsigned) (I) % INT_BITS))) #define CLEAR_ALLOCNO_LIVE(I) \ (allocnos_live[(unsigned) (I) / INT_BITS] \ &= ~((INT_TYPE) 1 << ((unsigned) (I) % INT_BITS))) /* This is turned off because it doesn't work right for DImode. (And it is only used for DImode, so the other cases are worthless.) The problem is that it isn't true that there is NO possibility of conflict; only that there is no conflict if the two pseudos get the exact same regs. If they were allocated with a partial overlap, there would be a conflict. We can't safely turn off the conflict unless we have another way to prevent the partial overlap. Idea: change hard_reg_conflicts so that instead of recording which hard regs the allocno may not overlap, it records where the allocno may not start. Change both where it is used and where it is updated. Then there is a way to record that (reg:DI 108) may start at 10 but not at 9 or 11. There is still the question of how to record this semi-conflict between two pseudos. */ #if 0 /* Reg pairs for which conflict after the current insn is inhibited by a REG_NO_CONFLICT note. If the table gets full, we ignore any other notes--that is conservative. */ #define NUM_NO_CONFLICT_PAIRS 4 /* Number of pairs in use in this insn. */ int n_no_conflict_pairs; static struct { int allocno1, allocno2;} no_conflict_pairs[NUM_NO_CONFLICT_PAIRS]; #endif /* 0 */ /* Record all regs that are set in any one insn. Communication from mark_reg_{store,clobber} and global_conflicts. */ static rtx *regs_set; static int n_regs_set; /* All registers that can be eliminated. */ static HARD_REG_SET eliminable_regset; static int allocno_compare (const void *, const void *); static void global_conflicts (void); static void mirror_conflicts (void); static void expand_preferences (void); static void prune_preferences (void); static void find_reg_global (int, HARD_REG_SET, int, int, int); static void record_one_conflict (int); static void record_conflicts (int *, int); static void mark_reg_store (rtx, rtx, void *); static void mark_reg_clobber (rtx, rtx, void *); static void mark_reg_conflicts (rtx); static void mark_reg_death (rtx); static void mark_reg_live_nc (int, enum machine_mode); static void set_preference (rtx, rtx); static void dump_conflicts (FILE *); static void reg_becomes_live_global (rtx, rtx, void *); static void reg_dies_global (int, enum machine_mode, struct insn_chain *); static void allocate_bb_info (void); static void free_bb_info_global (void); static void calculate_local_reg_bb_info (void); static void set_up_bb_rts_numbers (void); static int rpost_cmp (const void *, const void *); static bool modify_bb_reg_pav (basic_block, basic_block, bool); static void calculate_reg_pav (void); static void make_accurate_live_analysis (void); /* Perform allocation of pseudo-registers not allocated by local_alloc. FILE is a file to output debugging information on, or zero if such output is not desired. Return value is nonzero if reload failed and we must not do any more for this function. */ int global_alloc (FILE *file) { int retval; #ifdef ELIMINABLE_REGS static const struct {const int from, to; } eliminables[] = ELIMINABLE_REGS; #endif int need_fp = (! flag_omit_frame_pointer || (current_function_calls_alloca && EXIT_IGNORE_STACK) || FRAME_POINTER_REQUIRED); size_t i; rtx x; make_accurate_live_analysis (); max_allocno = 0; /* A machine may have certain hard registers that are safe to use only within a basic block. */ CLEAR_HARD_REG_SET (no_global_alloc_regs); /* Build the regset of all eliminable registers and show we can't use those that we already know won't be eliminated. */ #ifdef ELIMINABLE_REGS for (i = 0; i < ARRAY_SIZE (eliminables); i++) { bool cannot_elim = (! CAN_ELIMINATE (eliminables[i].from, eliminables[i].to) || (eliminables[i].to == STACK_POINTER_REGNUM && need_fp)); if (!regs_asm_clobbered[eliminables[i].from]) { SET_HARD_REG_BIT (eliminable_regset, eliminables[i].from); if (cannot_elim) SET_HARD_REG_BIT (no_global_alloc_regs, eliminables[i].from); } else if (cannot_elim) error ("%s cannot be used in asm here", reg_names[eliminables[i].from]); else regs_ever_live[eliminables[i].from] = 1; } #if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM if (!regs_asm_clobbered[HARD_FRAME_POINTER_REGNUM]) { SET_HARD_REG_BIT (eliminable_regset, HARD_FRAME_POINTER_REGNUM); if (need_fp) SET_HARD_REG_BIT (no_global_alloc_regs, HARD_FRAME_POINTER_REGNUM); } else if (need_fp) error ("%s cannot be used in asm here", reg_names[HARD_FRAME_POINTER_REGNUM]); else regs_ever_live[HARD_FRAME_POINTER_REGNUM] = 1; #endif #else if (!regs_asm_clobbered[FRAME_POINTER_REGNUM]) { SET_HARD_REG_BIT (eliminable_regset, FRAME_POINTER_REGNUM); if (need_fp) SET_HARD_REG_BIT (no_global_alloc_regs, FRAME_POINTER_REGNUM); } else if (need_fp) error ("%s cannot be used in asm here", reg_names[FRAME_POINTER_REGNUM]); else regs_ever_live[FRAME_POINTER_REGNUM] = 1; #endif /* Track which registers have already been used. Start with registers explicitly in the rtl, then registers allocated by local register allocation. */ CLEAR_HARD_REG_SET (regs_used_so_far); #ifdef LEAF_REGISTERS /* If we are doing the leaf function optimization, and this is a leaf function, it means that the registers that take work to save are those that need a register window. So prefer the ones that can be used in a leaf function. */ { const char *cheap_regs; const char *const leaf_regs = LEAF_REGISTERS; if (only_leaf_regs_used () && leaf_function_p ()) cheap_regs = leaf_regs; else cheap_regs = call_used_regs; for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (regs_ever_live[i] || cheap_regs[i]) SET_HARD_REG_BIT (regs_used_so_far, i); } #else /* We consider registers that do not have to be saved over calls as if they were already used since there is no cost in using them. */ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (regs_ever_live[i] || call_used_regs[i]) SET_HARD_REG_BIT (regs_used_so_far, i); #endif for (i = FIRST_PSEUDO_REGISTER; i < (size_t) max_regno; i++) if (reg_renumber[i] >= 0) SET_HARD_REG_BIT (regs_used_so_far, reg_renumber[i]); /* Establish mappings from register number to allocation number and vice versa. In the process, count the allocnos. */ reg_allocno = xmalloc (max_regno * sizeof (int)); for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) reg_allocno[i] = -1; /* Initialize the shared-hard-reg mapping from the list of pairs that may share. */ reg_may_share = xcalloc (max_regno, sizeof (int)); for (x = regs_may_share; x; x = XEXP (XEXP (x, 1), 1)) { int r1 = REGNO (XEXP (x, 0)); int r2 = REGNO (XEXP (XEXP (x, 1), 0)); if (r1 > r2) reg_may_share[r1] = r2; else reg_may_share[r2] = r1; } for (i = FIRST_PSEUDO_REGISTER; i < (size_t) max_regno; i++) /* Note that reg_live_length[i] < 0 indicates a "constant" reg that we are supposed to refrain from putting in a hard reg. -2 means do make an allocno but don't allocate it. */ if (REG_N_REFS (i) != 0 && REG_LIVE_LENGTH (i) != -1 /* Don't allocate pseudos that cross calls, if this function receives a nonlocal goto. */ && (! current_function_has_nonlocal_label || REG_N_CALLS_CROSSED (i) == 0)) { if (reg_renumber[i] < 0 && reg_may_share[i] && reg_allocno[reg_may_share[i]] >= 0) reg_allocno[i] = reg_allocno[reg_may_share[i]]; else reg_allocno[i] = max_allocno++; if (REG_LIVE_LENGTH (i) == 0) abort (); } else reg_allocno[i] = -1; allocno = xcalloc (max_allocno, sizeof (struct allocno)); for (i = FIRST_PSEUDO_REGISTER; i < (size_t) max_regno; i++) if (reg_allocno[i] >= 0) { int num = reg_allocno[i]; allocno[num].reg = i; allocno[num].size = PSEUDO_REGNO_SIZE (i); allocno[num].calls_crossed += REG_N_CALLS_CROSSED (i); allocno[num].n_refs += REG_N_REFS (i); allocno[num].freq += REG_FREQ (i); if (allocno[num].live_length < REG_LIVE_LENGTH (i)) allocno[num].live_length = REG_LIVE_LENGTH (i); } /* Calculate amount of usage of each hard reg by pseudos allocated by local-alloc. This is to see if we want to override it. */ memset (local_reg_live_length, 0, sizeof local_reg_live_length); memset (local_reg_n_refs, 0, sizeof local_reg_n_refs); memset (local_reg_freq, 0, sizeof local_reg_freq); for (i = FIRST_PSEUDO_REGISTER; i < (size_t) max_regno; i++) if (reg_renumber[i] >= 0) { int regno = reg_renumber[i]; int endregno = regno + hard_regno_nregs[regno][PSEUDO_REGNO_MODE (i)]; int j; for (j = regno; j < endregno; j++) { local_reg_n_refs[j] += REG_N_REFS (i); local_reg_freq[j] += REG_FREQ (i); local_reg_live_length[j] += REG_LIVE_LENGTH (i); } } /* We can't override local-alloc for a reg used not just by local-alloc. */ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (regs_ever_live[i]) local_reg_n_refs[i] = 0, local_reg_freq[i] = 0; allocno_row_words = (max_allocno + INT_BITS - 1) / INT_BITS; /* We used to use alloca here, but the size of what it would try to allocate would occasionally cause it to exceed the stack limit and cause unpredictable core dumps. Some examples were > 2Mb in size. */ conflicts = xcalloc (max_allocno * allocno_row_words, sizeof (INT_TYPE)); allocnos_live = xmalloc (allocno_row_words * sizeof (INT_TYPE)); /* If there is work to be done (at least one reg to allocate), perform global conflict analysis and allocate the regs. */ if (max_allocno > 0) { /* Scan all the insns and compute the conflicts among allocnos and between allocnos and hard regs. */ global_conflicts (); mirror_conflicts (); /* Eliminate conflicts between pseudos and eliminable registers. If the register is not eliminated, the pseudo won't really be able to live in the eliminable register, so the conflict doesn't matter. If we do eliminate the register, the conflict will no longer exist. So in either case, we can ignore the conflict. Likewise for preferences. */ for (i = 0; i < (size_t) max_allocno; i++) { AND_COMPL_HARD_REG_SET (allocno[i].hard_reg_conflicts, eliminable_regset); AND_COMPL_HARD_REG_SET (allocno[i].hard_reg_copy_preferences, eliminable_regset); AND_COMPL_HARD_REG_SET (allocno[i].hard_reg_preferences, eliminable_regset); } /* Try to expand the preferences by merging them between allocnos. */ expand_preferences (); /* Determine the order to allocate the remaining pseudo registers. */ allocno_order = xmalloc (max_allocno * sizeof (int)); for (i = 0; i < (size_t) max_allocno; i++) allocno_order[i] = i; /* Default the size to 1, since allocno_compare uses it to divide by. Also convert allocno_live_length of zero to -1. A length of zero can occur when all the registers for that allocno have reg_live_length equal to -2. In this case, we want to make an allocno, but not allocate it. So avoid the divide-by-zero and set it to a low priority. */ for (i = 0; i < (size_t) max_allocno; i++) { if (allocno[i].size == 0) allocno[i].size = 1; if (allocno[i].live_length == 0) allocno[i].live_length = -1; } qsort (allocno_order, max_allocno, sizeof (int), allocno_compare); prune_preferences (); if (file) dump_conflicts (file); /* Try allocating them, one by one, in that order, except for parameters marked with reg_live_length[regno] == -2. */ for (i = 0; i < (size_t) max_allocno; i++) if (reg_renumber[allocno[allocno_order[i]].reg] < 0 && REG_LIVE_LENGTH (allocno[allocno_order[i]].reg) >= 0) { /* If we have more than one register class, first try allocating in the class that is cheapest for this pseudo-reg. If that fails, try any reg. */ if (N_REG_CLASSES > 1) { find_reg_global (allocno_order[i], 0, 0, 0, 0); if (reg_renumber[allocno[allocno_order[i]].reg] >= 0) continue; } if (reg_alternate_class (allocno[allocno_order[i]].reg) != NO_REGS) find_reg_global (allocno_order[i], 0, 1, 0, 0); } free (allocno_order); } /* Do the reloads now while the allocno data still exist, so that we can try to assign new hard regs to any pseudo regs that are spilled. */ #if 0 /* We need to eliminate regs even if there is no rtl code, for the sake of debugging information. */ if (n_basic_blocks > 0) #endif { build_insn_chain (get_insns ()); retval = reload (get_insns (), 1); } /* Clean up. */ free (reg_allocno); free (reg_may_share); free (allocno); free (conflicts); free (allocnos_live); return retval; } /* Sort predicate for ordering the allocnos. Returns -1 (1) if *v1 should be allocated before (after) *v2. */ static int allocno_compare (const void *v1p, const void *v2p) { int v1 = *(const int *)v1p, v2 = *(const int *)v2p; /* Note that the quotient will never be bigger than the value of floor_log2 times the maximum number of times a register can occur in one insn (surely less than 100) weighted by the frequency (maximally REG_FREQ_MAX). Multiplying this by 10000/REG_FREQ_MAX can't overflow. */ int pri1 = (((double) (floor_log2 (allocno[v1].n_refs) * allocno[v1].freq) / allocno[v1].live_length) * (10000 / REG_FREQ_MAX) * allocno[v1].size); int pri2 = (((double) (floor_log2 (allocno[v2].n_refs) * allocno[v2].freq) / allocno[v2].live_length) * (10000 / REG_FREQ_MAX) * allocno[v2].size); if (pri2 - pri1) return pri2 - pri1; /* If regs are equally good, sort by allocno, so that the results of qsort leave nothing to chance. */ return v1 - v2; } /* Scan the rtl code and record all conflicts and register preferences in the conflict matrices and preference tables. */ static void global_conflicts (void) { int i; basic_block b; rtx insn; int *block_start_allocnos; /* Make a vector that mark_reg_{store,clobber} will store in. */ regs_set = xmalloc (max_parallel * sizeof (rtx) * 2); block_start_allocnos = xmalloc (max_allocno * sizeof (int)); FOR_EACH_BB (b) { memset (allocnos_live, 0, allocno_row_words * sizeof (INT_TYPE)); /* Initialize table of registers currently live to the state at the beginning of this basic block. This also marks the conflicts among hard registers and any allocnos that are live. For pseudo-regs, there is only one bit for each one no matter how many hard regs it occupies. This is ok; we know the size from PSEUDO_REGNO_SIZE. For explicit hard regs, we cannot know the size that way since one hard reg can be used with various sizes. Therefore, we must require that all the hard regs implicitly live as part of a multi-word hard reg are explicitly marked in basic_block_live_at_start. */ { regset old = b->global_live_at_start; int ax = 0; REG_SET_TO_HARD_REG_SET (hard_regs_live, old); EXECUTE_IF_SET_IN_REG_SET (old, FIRST_PSEUDO_REGISTER, i, { int a = reg_allocno[i]; if (a >= 0) { SET_ALLOCNO_LIVE (a); block_start_allocnos[ax++] = a; } else if ((a = reg_renumber[i]) >= 0) mark_reg_live_nc (a, PSEUDO_REGNO_MODE (i)); }); /* Record that each allocno now live conflicts with each hard reg now live. It is not necessary to mark any conflicts between pseudos as this point, even for pseudos which are live at the start of the basic block. Given two pseudos X and Y and any point in the CFG P. On any path to point P where X and Y are live one of the following conditions must be true: 1. X is live at some instruction on the path that evaluates Y. 2. Y is live at some instruction on the path that evaluates X. 3. Either X or Y is not evaluated on the path to P (ie it is used uninitialized) and thus the conflict can be ignored. In cases #1 and #2 the conflict will be recorded when we scan the instruction that makes either X or Y become live. */ record_conflicts (block_start_allocnos, ax); /* Pseudos can't go in stack regs at the start of a basic block that is reached by an abnormal edge. Likewise for call clobbered regs, because because caller-save, fixup_abnormal_edges, and possibly the table driven EH machinery are not quite ready to handle such regs live across such edges. */ { edge e; for (e = b->pred; e ; e = e->pred_next) if (e->flags & EDGE_ABNORMAL) break; if (e != NULL) { #ifdef STACK_REGS EXECUTE_IF_SET_IN_ALLOCNO_SET (allocnos_live, ax, { allocno[ax].no_stack_reg = 1; }); for (ax = FIRST_STACK_REG; ax <= LAST_STACK_REG; ax++) record_one_conflict (ax); #endif /* No need to record conflicts for call clobbered regs if we have nonlocal labels around, as we don't ever try to allocate such regs in this case. */ if (! current_function_has_nonlocal_label) for (ax = 0; ax < FIRST_PSEUDO_REGISTER; ax++) if (call_used_regs [ax]) record_one_conflict (ax); } } } insn = BB_HEAD (b); /* Scan the code of this basic block, noting which allocnos and hard regs are born or die. When one is born, record a conflict with all others currently live. */ while (1) { RTX_CODE code = GET_CODE (insn); rtx link; /* Make regs_set an empty set. */ n_regs_set = 0; if (code == INSN || code == CALL_INSN || code == JUMP_INSN) { #if 0 int i = 0; for (link = REG_NOTES (insn); link && i < NUM_NO_CONFLICT_PAIRS; link = XEXP (link, 1)) if (REG_NOTE_KIND (link) == REG_NO_CONFLICT) { no_conflict_pairs[i].allocno1 = reg_allocno[REGNO (SET_DEST (PATTERN (insn)))]; no_conflict_pairs[i].allocno2 = reg_allocno[REGNO (XEXP (link, 0))]; i++; } #endif /* 0 */ /* Mark any registers clobbered by INSN as live, so they conflict with the inputs. */ note_stores (PATTERN (insn), mark_reg_clobber, NULL); /* Mark any registers dead after INSN as dead now. */ for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) if (REG_NOTE_KIND (link) == REG_DEAD) mark_reg_death (XEXP (link, 0)); /* Mark any registers set in INSN as live, and mark them as conflicting with all other live regs. Clobbers are processed again, so they conflict with the registers that are set. */ note_stores (PATTERN (insn), mark_reg_store, NULL); #ifdef AUTO_INC_DEC for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) if (REG_NOTE_KIND (link) == REG_INC) mark_reg_store (XEXP (link, 0), NULL_RTX, NULL); #endif /* If INSN has multiple outputs, then any reg that dies here and is used inside of an output must conflict with the other outputs. It is unsafe to use !single_set here since it will ignore an unused output. Just because an output is unused does not mean the compiler can assume the side effect will not occur. Consider if REG appears in the address of an output and we reload the output. If we allocate REG to the same hard register as an unused output we could set the hard register before the output reload insn. */ if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn)) for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) if (REG_NOTE_KIND (link) == REG_DEAD) { int used_in_output = 0; int i; rtx reg = XEXP (link, 0); for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--) { rtx set = XVECEXP (PATTERN (insn), 0, i); if (GET_CODE (set) == SET && !REG_P (SET_DEST (set)) && !rtx_equal_p (reg, SET_DEST (set)) && reg_overlap_mentioned_p (reg, SET_DEST (set))) used_in_output = 1; } if (used_in_output) mark_reg_conflicts (reg); } /* Mark any registers set in INSN and then never used. */ while (n_regs_set-- > 0) { rtx note = find_regno_note (insn, REG_UNUSED, REGNO (regs_set[n_regs_set])); if (note) mark_reg_death (XEXP (note, 0)); } } if (insn == BB_END (b)) break; insn = NEXT_INSN (insn); } } /* Clean up. */ free (block_start_allocnos); free (regs_set); } /* Expand the preference information by looking for cases where one allocno dies in an insn that sets an allocno. If those two allocnos don't conflict, merge any preferences between those allocnos. */ static void expand_preferences (void) { rtx insn; rtx link; rtx set; /* We only try to handle the most common cases here. Most of the cases where this wins are reg-reg copies. */ for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) if (INSN_P (insn) && (set = single_set (insn)) != 0 && REG_P (SET_DEST (set)) && reg_allocno[REGNO (SET_DEST (set))] >= 0) for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) if (REG_NOTE_KIND (link) == REG_DEAD && REG_P (XEXP (link, 0)) && reg_allocno[REGNO (XEXP (link, 0))] >= 0 && ! CONFLICTP (reg_allocno[REGNO (SET_DEST (set))], reg_allocno[REGNO (XEXP (link, 0))])) { int a1 = reg_allocno[REGNO (SET_DEST (set))]; int a2 = reg_allocno[REGNO (XEXP (link, 0))]; if (XEXP (link, 0) == SET_SRC (set)) { IOR_HARD_REG_SET (allocno[a1].hard_reg_copy_preferences, allocno[a2].hard_reg_copy_preferences); IOR_HARD_REG_SET (allocno[a2].hard_reg_copy_preferences, allocno[a1].hard_reg_copy_preferences); } IOR_HARD_REG_SET (allocno[a1].hard_reg_preferences, allocno[a2].hard_reg_preferences); IOR_HARD_REG_SET (allocno[a2].hard_reg_preferences, allocno[a1].hard_reg_preferences); IOR_HARD_REG_SET (allocno[a1].hard_reg_full_preferences, allocno[a2].hard_reg_full_preferences); IOR_HARD_REG_SET (allocno[a2].hard_reg_full_preferences, allocno[a1].hard_reg_full_preferences); } } /* Prune the preferences for global registers to exclude registers that cannot be used. Compute `regs_someone_prefers', which is a bitmask of the hard registers that are preferred by conflicting registers of lower priority. If possible, we will avoid using these registers. */ static void prune_preferences (void) { int i; int num; int *allocno_to_order = xmalloc (max_allocno * sizeof (int)); /* Scan least most important to most important. For each allocno, remove from preferences registers that cannot be used, either because of conflicts or register type. Then compute all registers preferred by each lower-priority register that conflicts. */ for (i = max_allocno - 1; i >= 0; i--) { HARD_REG_SET temp; num = allocno_order[i]; allocno_to_order[num] = i; COPY_HARD_REG_SET (temp, allocno[num].hard_reg_conflicts); if (allocno[num].calls_crossed == 0) IOR_HARD_REG_SET (temp, fixed_reg_set); else IOR_HARD_REG_SET (temp, call_used_reg_set); IOR_COMPL_HARD_REG_SET (temp, reg_class_contents[(int) reg_preferred_class (allocno[num].reg)]); AND_COMPL_HARD_REG_SET (allocno[num].hard_reg_preferences, temp); AND_COMPL_HARD_REG_SET (allocno[num].hard_reg_copy_preferences, temp); AND_COMPL_HARD_REG_SET (allocno[num].hard_reg_full_preferences, temp); } for (i = max_allocno - 1; i >= 0; i--) { /* Merge in the preferences of lower-priority registers (they have already been pruned). If we also prefer some of those registers, don't exclude them unless we are of a smaller size (in which case we want to give the lower-priority allocno the first chance for these registers). */ HARD_REG_SET temp, temp2; int allocno2; num = allocno_order[i]; CLEAR_HARD_REG_SET (temp); CLEAR_HARD_REG_SET (temp2); EXECUTE_IF_SET_IN_ALLOCNO_SET (conflicts + num * allocno_row_words, allocno2, { if (allocno_to_order[allocno2] > i) { if (allocno[allocno2].size <= allocno[num].size) IOR_HARD_REG_SET (temp, allocno[allocno2].hard_reg_full_preferences); else IOR_HARD_REG_SET (temp2, allocno[allocno2].hard_reg_full_preferences); } }); AND_COMPL_HARD_REG_SET (temp, allocno[num].hard_reg_full_preferences); IOR_HARD_REG_SET (temp, temp2); COPY_HARD_REG_SET (allocno[num].regs_someone_prefers, temp); } free (allocno_to_order); } /* Assign a hard register to allocno NUM; look for one that is the beginning of a long enough stretch of hard regs none of which conflicts with ALLOCNO. The registers marked in PREFREGS are tried first. LOSERS, if nonzero, is a HARD_REG_SET indicating registers that cannot be used for this allocation. If ALT_REGS_P is zero, consider only the preferred class of ALLOCNO's reg. Otherwise ignore that preferred class and use the alternate class. If ACCEPT_CALL_CLOBBERED is nonzero, accept a call-clobbered hard reg that will have to be saved and restored at calls. RETRYING is nonzero if this is called from retry_global_alloc. If we find one, record it in reg_renumber. If not, do nothing. */ static void find_reg_global (int num, HARD_REG_SET losers, int alt_regs_p, int accept_call_clobbered, int retrying) { int i, best_reg, pass; HARD_REG_SET used, used1, used2; enum reg_class class = (alt_regs_p ? reg_alternate_class (allocno[num].reg) : reg_preferred_class (allocno[num].reg)); enum machine_mode mode = PSEUDO_REGNO_MODE (allocno[num].reg); if (accept_call_clobbered) COPY_HARD_REG_SET (used1, call_fixed_reg_set); else if (allocno[num].calls_crossed == 0) COPY_HARD_REG_SET (used1, fixed_reg_set); else COPY_HARD_REG_SET (used1, call_used_reg_set); /* Some registers should not be allocated in global-alloc. */ IOR_HARD_REG_SET (used1, no_global_alloc_regs); if (losers) IOR_HARD_REG_SET (used1, losers); IOR_COMPL_HARD_REG_SET (used1, reg_class_contents[(int) class]); COPY_HARD_REG_SET (used2, used1); IOR_HARD_REG_SET (used1, allocno[num].hard_reg_conflicts); #ifdef CANNOT_CHANGE_MODE_CLASS cannot_change_mode_set_regs (&used1, mode, allocno[num].reg); #endif /* Try each hard reg to see if it fits. Do this in two passes. In the first pass, skip registers that are preferred by some other pseudo to give it a better chance of getting one of those registers. Only if we can't get a register when excluding those do we take one of them. However, we never allocate a register for the first time in pass 0. */ COPY_HARD_REG_SET (used, used1); IOR_COMPL_HARD_REG_SET (used, regs_used_so_far); IOR_HARD_REG_SET (used, allocno[num].regs_someone_prefers); best_reg = -1; for (i = FIRST_PSEUDO_REGISTER, pass = 0; pass <= 1 && i >= FIRST_PSEUDO_REGISTER; pass++) { if (pass == 1) COPY_HARD_REG_SET (used, used1); for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) { #ifdef REG_ALLOC_ORDER int regno = reg_alloc_order[i]; #else int regno = i; #endif if (! TEST_HARD_REG_BIT (used, regno) && HARD_REGNO_MODE_OK (regno, mode) && (allocno[num].calls_crossed == 0 || accept_call_clobbered || ! HARD_REGNO_CALL_PART_CLOBBERED (regno, mode))) { int j; int lim = regno + hard_regno_nregs[regno][mode]; for (j = regno + 1; (j < lim && ! TEST_HARD_REG_BIT (used, j)); j++); if (j == lim) { best_reg = regno; break; } #ifndef REG_ALLOC_ORDER i = j; /* Skip starting points we know will lose */ #endif } } } /* See if there is a preferred register with the same class as the register we allocated above. Making this restriction prevents register preferencing from creating worse register allocation. Remove from the preferred registers and conflicting registers. Note that additional conflicts may have been added after `prune_preferences' was called. First do this for those register with copy preferences, then all preferred registers. */ AND_COMPL_HARD_REG_SET (allocno[num].hard_reg_copy_preferences, used); GO_IF_HARD_REG_SUBSET (allocno[num].hard_reg_copy_preferences, reg_class_contents[(int) NO_REGS], no_copy_prefs); if (best_reg >= 0) { for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (TEST_HARD_REG_BIT (allocno[num].hard_reg_copy_preferences, i) && HARD_REGNO_MODE_OK (i, mode) && (allocno[num].calls_crossed == 0 || accept_call_clobbered || ! HARD_REGNO_CALL_PART_CLOBBERED (i, mode)) && (REGNO_REG_CLASS (i) == REGNO_REG_CLASS (best_reg) || reg_class_subset_p (REGNO_REG_CLASS (i), REGNO_REG_CLASS (best_reg)) || reg_class_subset_p (REGNO_REG_CLASS (best_reg), REGNO_REG_CLASS (i)))) { int j; int lim = i + hard_regno_nregs[i][mode]; for (j = i + 1; (j < lim && ! TEST_HARD_REG_BIT (used, j) && (REGNO_REG_CLASS (j) == REGNO_REG_CLASS (best_reg + (j - i)) || reg_class_subset_p (REGNO_REG_CLASS (j), REGNO_REG_CLASS (best_reg + (j - i))) || reg_class_subset_p (REGNO_REG_CLASS (best_reg + (j - i)), REGNO_REG_CLASS (j)))); j++); if (j == lim) { best_reg = i; goto no_prefs; } } } no_copy_prefs: AND_COMPL_HARD_REG_SET (allocno[num].hard_reg_preferences, used); GO_IF_HARD_REG_SUBSET (allocno[num].hard_reg_preferences, reg_class_contents[(int) NO_REGS], no_prefs); if (best_reg >= 0) { for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (TEST_HARD_REG_BIT (allocno[num].hard_reg_preferences, i) && HARD_REGNO_MODE_OK (i, mode) && (allocno[num].calls_crossed == 0 || accept_call_clobbered || ! HARD_REGNO_CALL_PART_CLOBBERED (i, mode)) && (REGNO_REG_CLASS (i) == REGNO_REG_CLASS (best_reg) || reg_class_subset_p (REGNO_REG_CLASS (i), REGNO_REG_CLASS (best_reg)) || reg_class_subset_p (REGNO_REG_CLASS (best_reg), REGNO_REG_CLASS (i)))) { int j; int lim = i + hard_regno_nregs[i][mode]; for (j = i + 1; (j < lim && ! TEST_HARD_REG_BIT (used, j) && (REGNO_REG_CLASS (j) == REGNO_REG_CLASS (best_reg + (j - i)) || reg_class_subset_p (REGNO_REG_CLASS (j), REGNO_REG_CLASS (best_reg + (j - i))) || reg_class_subset_p (REGNO_REG_CLASS (best_reg + (j - i)), REGNO_REG_CLASS (j)))); j++); if (j == lim) { best_reg = i; break; } } } no_prefs: /* If we haven't succeeded yet, try with caller-saves. We need not check to see if the current function has nonlocal labels because we don't put any pseudos that are live over calls in registers in that case. */ if (flag_caller_saves && best_reg < 0) { /* Did not find a register. If it would be profitable to allocate a call-clobbered register and save and restore it around calls, do that. */ if (! accept_call_clobbered && allocno[num].calls_crossed != 0 && CALLER_SAVE_PROFITABLE (allocno[num].n_refs, allocno[num].calls_crossed)) { HARD_REG_SET new_losers; if (! losers) CLEAR_HARD_REG_SET (new_losers); else COPY_HARD_REG_SET (new_losers, losers); IOR_HARD_REG_SET(new_losers, losing_caller_save_reg_set); find_reg_global (num, new_losers, alt_regs_p, 1, retrying); if (reg_renumber[allocno[num].reg] >= 0) { caller_save_needed = 1; return; } } } /* If we haven't succeeded yet, see if some hard reg that conflicts with us was utilized poorly by local-alloc. If so, kick out the regs that were put there by local-alloc so we can use it instead. */ if (best_reg < 0 && !retrying /* Let's not bother with multi-reg allocnos. */ && allocno[num].size == 1) { /* Count from the end, to find the least-used ones first. */ for (i = FIRST_PSEUDO_REGISTER - 1; i >= 0; i--) { #ifdef REG_ALLOC_ORDER int regno = reg_alloc_order[i]; #else int regno = i; #endif if (local_reg_n_refs[regno] != 0 /* Don't use a reg no good for this pseudo. */ && ! TEST_HARD_REG_BIT (used2, regno) && HARD_REGNO_MODE_OK (regno, mode) /* The code below assumes that we need only a single register, but the check of allocno[num].size above was not enough. Sometimes we need more than one register for a single-word value. */ && hard_regno_nregs[regno][mode] == 1 && (allocno[num].calls_crossed == 0 || accept_call_clobbered || ! HARD_REGNO_CALL_PART_CLOBBERED (regno, mode)) #ifdef CANNOT_CHANGE_MODE_CLASS && ! invalid_mode_change_p (regno, REGNO_REG_CLASS (regno), mode) #endif #ifdef STACK_REGS && (!allocno[num].no_stack_reg || regno < FIRST_STACK_REG || regno > LAST_STACK_REG) #endif ) { /* We explicitly evaluate the divide results into temporary variables so as to avoid excess precision problems that occur on an i386-unknown-sysv4.2 (unixware) host. */ double tmp1 = ((double) local_reg_freq[regno] / local_reg_live_length[regno]); double tmp2 = ((double) allocno[num].freq / allocno[num].live_length); if (tmp1 < tmp2) { /* Hard reg REGNO was used less in total by local regs than it would be used by this one allocno! */ int k; for (k = 0; k < max_regno; k++) if (reg_renumber[k] >= 0) { int r = reg_renumber[k]; int endregno = r + hard_regno_nregs[r][PSEUDO_REGNO_MODE (k)]; if (regno >= r && regno < endregno) reg_renumber[k] = -1; } best_reg = regno; break; } } } } /* Did we find a register? */ if (best_reg >= 0) { int lim, j; HARD_REG_SET this_reg; /* Yes. Record it as the hard register of this pseudo-reg. */ reg_renumber[allocno[num].reg] = best_reg; /* Also of any pseudo-regs that share with it. */ if (reg_may_share[allocno[num].reg]) for (j = FIRST_PSEUDO_REGISTER; j < max_regno; j++) if (reg_allocno[j] == num) reg_renumber[j] = best_reg; /* Make a set of the hard regs being allocated. */ CLEAR_HARD_REG_SET (this_reg); lim = best_reg + hard_regno_nregs[best_reg][mode]; for (j = best_reg; j < lim; j++) { SET_HARD_REG_BIT (this_reg, j); SET_HARD_REG_BIT (regs_used_so_far, j); /* This is no longer a reg used just by local regs. */ local_reg_n_refs[j] = 0; local_reg_freq[j] = 0; } /* For each other pseudo-reg conflicting with this one, mark it as conflicting with the hard regs this one occupies. */ lim = num; EXECUTE_IF_SET_IN_ALLOCNO_SET (conflicts + lim * allocno_row_words, j, { IOR_HARD_REG_SET (allocno[j].hard_reg_conflicts, this_reg); }); } } /* Called from `reload' to look for a hard reg to put pseudo reg REGNO in. Perhaps it had previously seemed not worth a hard reg, or perhaps its old hard reg has been commandeered for reloads. FORBIDDEN_REGS indicates certain hard regs that may not be used, even if they do not appear to be allocated. If FORBIDDEN_REGS is zero, no regs are forbidden. */ void retry_global_alloc (int regno, HARD_REG_SET forbidden_regs) { int alloc_no = reg_allocno[regno]; if (alloc_no >= 0) { /* If we have more than one register class, first try allocating in the class that is cheapest for this pseudo-reg. If that fails, try any reg. */ if (N_REG_CLASSES > 1) find_reg_global (alloc_no, forbidden_regs, 0, 0, 1); if (reg_renumber[regno] < 0 && reg_alternate_class (regno) != NO_REGS) find_reg_global (alloc_no, forbidden_regs, 1, 0, 1); /* If we found a register, modify the RTL for the register to show the hard register, and mark that register live. */ if (reg_renumber[regno] >= 0) { REGNO (regno_reg_rtx[regno]) = reg_renumber[regno]; mark_home_live (regno); } } } /* Record a conflict between register REGNO and everything currently live. REGNO must not be a pseudo reg that was allocated by local_alloc; such numbers must be translated through reg_renumber before calling here. */ static void record_one_conflict (int regno) { int j; if (regno < FIRST_PSEUDO_REGISTER) /* When a hard register becomes live, record conflicts with live pseudo regs. */ EXECUTE_IF_SET_IN_ALLOCNO_SET (allocnos_live, j, { SET_HARD_REG_BIT (allocno[j].hard_reg_conflicts, regno); }); else /* When a pseudo-register becomes live, record conflicts first with hard regs, then with other pseudo regs. */ { int ialloc = reg_allocno[regno]; int ialloc_prod = ialloc * allocno_row_words; IOR_HARD_REG_SET (allocno[ialloc].hard_reg_conflicts, hard_regs_live); for (j = allocno_row_words - 1; j >= 0; j--) conflicts[ialloc_prod + j] |= allocnos_live[j]; } } /* Record all allocnos currently live as conflicting with all hard regs currently live. ALLOCNO_VEC is a vector of LEN allocnos, all allocnos that are currently live. Their bits are also flagged in allocnos_live. */ static void record_conflicts (int *allocno_vec, int len) { while (--len >= 0) IOR_HARD_REG_SET (allocno[allocno_vec[len]].hard_reg_conflicts, hard_regs_live); } /* If CONFLICTP (i, j) is true, make sure CONFLICTP (j, i) is also true. */ static void mirror_conflicts (void) { int i, j; int rw = allocno_row_words; int rwb = rw * INT_BITS; INT_TYPE *p = conflicts; INT_TYPE *q0 = conflicts, *q1, *q2; unsigned INT_TYPE mask; for (i = max_allocno - 1, mask = 1; i >= 0; i--, mask <<= 1) { if (! mask) { mask = 1; q0++; } for (j = allocno_row_words - 1, q1 = q0; j >= 0; j--, q1 += rwb) { unsigned INT_TYPE word; for (word = (unsigned INT_TYPE) *p++, q2 = q1; word; word >>= 1, q2 += rw) { if (word & 1) *q2 |= mask; } } } } /* Handle the case where REG is set by the insn being scanned, during the forward scan to accumulate conflicts. Store a 1 in regs_live or allocnos_live for this register, record how many consecutive hardware registers it actually needs, and record a conflict with all other registers already live. Note that even if REG does not remain alive after this insn, we must mark it here as live, to ensure a conflict between REG and any other regs set in this insn that really do live. This is because those other regs could be considered after this. REG might actually be something other than a register; if so, we do nothing. SETTER is 0 if this register was modified by an auto-increment (i.e., a REG_INC note was found for it). */ static void mark_reg_store (rtx reg, rtx setter, void *data ATTRIBUTE_UNUSED) { int regno; if (GET_CODE (reg) == SUBREG) reg = SUBREG_REG (reg); if (!REG_P (reg)) return; regs_set[n_regs_set++] = reg; if (setter && GET_CODE (setter) != CLOBBER) set_preference (reg, SET_SRC (setter)); regno = REGNO (reg); /* Either this is one of the max_allocno pseudo regs not allocated, or it is or has a hardware reg. First handle the pseudo-regs. */ if (regno >= FIRST_PSEUDO_REGISTER) { if (reg_allocno[regno] >= 0) { SET_ALLOCNO_LIVE (reg_allocno[regno]); record_one_conflict (regno); } } if (reg_renumber[regno] >= 0) regno = reg_renumber[regno]; /* Handle hardware regs (and pseudos allocated to hard regs). */ if (regno < FIRST_PSEUDO_REGISTER && ! fixed_regs[regno]) { int last = regno + hard_regno_nregs[regno][GET_MODE (reg)]; while (regno < last) { record_one_conflict (regno); SET_HARD_REG_BIT (hard_regs_live, regno); regno++; } } } /* Like mark_reg_set except notice just CLOBBERs; ignore SETs. */ static void mark_reg_clobber (rtx reg, rtx setter, void *data) { if (GET_CODE (setter) == CLOBBER) mark_reg_store (reg, setter, data); } /* Record that REG has conflicts with all the regs currently live. Do not mark REG itself as live. */ static void mark_reg_conflicts (rtx reg) { int regno; if (GET_CODE (reg) == SUBREG) reg = SUBREG_REG (reg); if (!REG_P (reg)) return; regno = REGNO (reg); /* Either this is one of the max_allocno pseudo regs not allocated, or it is or has a hardware reg. First handle the pseudo-regs. */ if (regno >= FIRST_PSEUDO_REGISTER) { if (reg_allocno[regno] >= 0) record_one_conflict (regno); } if (reg_renumber[regno] >= 0) regno = reg_renumber[regno]; /* Handle hardware regs (and pseudos allocated to hard regs). */ if (regno < FIRST_PSEUDO_REGISTER && ! fixed_regs[regno]) { int last = regno + hard_regno_nregs[regno][GET_MODE (reg)]; while (regno < last) { record_one_conflict (regno); regno++; } } } /* Mark REG as being dead (following the insn being scanned now). Store a 0 in regs_live or allocnos_live for this register. */ static void mark_reg_death (rtx reg) { int regno = REGNO (reg); /* Either this is one of the max_allocno pseudo regs not allocated, or it is a hardware reg. First handle the pseudo-regs. */ if (regno >= FIRST_PSEUDO_REGISTER) { if (reg_allocno[regno] >= 0) CLEAR_ALLOCNO_LIVE (reg_allocno[regno]); } /* For pseudo reg, see if it has been assigned a hardware reg. */ if (reg_renumber[regno] >= 0) regno = reg_renumber[regno]; /* Handle hardware regs (and pseudos allocated to hard regs). */ if (regno < FIRST_PSEUDO_REGISTER && ! fixed_regs[regno]) { /* Pseudo regs already assigned hardware regs are treated almost the same as explicit hardware regs. */ int last = regno + hard_regno_nregs[regno][GET_MODE (reg)]; while (regno < last) { CLEAR_HARD_REG_BIT (hard_regs_live, regno); regno++; } } } /* Mark hard reg REGNO as currently live, assuming machine mode MODE for the value stored in it. MODE determines how many consecutive registers are actually in use. Do not record conflicts; it is assumed that the caller will do that. */ static void mark_reg_live_nc (int regno, enum machine_mode mode) { int last = regno + hard_regno_nregs[regno][mode]; while (regno < last) { SET_HARD_REG_BIT (hard_regs_live, regno); regno++; } } /* Try to set a preference for an allocno to a hard register. We are passed DEST and SRC which are the operands of a SET. It is known that SRC is a register. If SRC or the first operand of SRC is a register, try to set a preference. If one of the two is a hard register and the other is a pseudo-register, mark the preference. Note that we are not as aggressive as local-alloc in trying to tie a pseudo-register to a hard register. */ static void set_preference (rtx dest, rtx src) { unsigned int src_regno, dest_regno; /* Amount to add to the hard regno for SRC, or subtract from that for DEST, to compensate for subregs in SRC or DEST. */ int offset = 0; unsigned int i; int copy = 1; if (GET_RTX_FORMAT (GET_CODE (src))[0] == 'e') src = XEXP (src, 0), copy = 0; /* Get the reg number for both SRC and DEST. If neither is a reg, give up. */ if (REG_P (src)) src_regno = REGNO (src); else if (GET_CODE (src) == SUBREG && REG_P (SUBREG_REG (src))) { src_regno = REGNO (SUBREG_REG (src)); if (REGNO (SUBREG_REG (src)) < FIRST_PSEUDO_REGISTER) offset += subreg_regno_offset (REGNO (SUBREG_REG (src)), GET_MODE (SUBREG_REG (src)), SUBREG_BYTE (src), GET_MODE (src)); else offset += (SUBREG_BYTE (src) / REGMODE_NATURAL_SIZE (GET_MODE (src))); } else return; if (REG_P (dest)) dest_regno = REGNO (dest); else if (GET_CODE (dest) == SUBREG && REG_P (SUBREG_REG (dest))) { dest_regno = REGNO (SUBREG_REG (dest)); if (REGNO (SUBREG_REG (dest)) < FIRST_PSEUDO_REGISTER) offset -= subreg_regno_offset (REGNO (SUBREG_REG (dest)), GET_MODE (SUBREG_REG (dest)), SUBREG_BYTE (dest), GET_MODE (dest)); else offset -= (SUBREG_BYTE (dest) / REGMODE_NATURAL_SIZE (GET_MODE (dest))); } else return; /* Convert either or both to hard reg numbers. */ if (reg_renumber[src_regno] >= 0) src_regno = reg_renumber[src_regno]; if (reg_renumber[dest_regno] >= 0) dest_regno = reg_renumber[dest_regno]; /* Now if one is a hard reg and the other is a global pseudo then give the other a preference. */ if (dest_regno < FIRST_PSEUDO_REGISTER && src_regno >= FIRST_PSEUDO_REGISTER && reg_allocno[src_regno] >= 0) { dest_regno -= offset; if (dest_regno < FIRST_PSEUDO_REGISTER) { if (copy) SET_REGBIT (hard_reg_copy_preferences, reg_allocno[src_regno], dest_regno); SET_REGBIT (hard_reg_preferences, reg_allocno[src_regno], dest_regno); for (i = dest_regno; i < dest_regno + hard_regno_nregs[dest_regno][GET_MODE (dest)]; i++) SET_REGBIT (hard_reg_full_preferences, reg_allocno[src_regno], i); } } if (src_regno < FIRST_PSEUDO_REGISTER && dest_regno >= FIRST_PSEUDO_REGISTER && reg_allocno[dest_regno] >= 0) { src_regno += offset; if (src_regno < FIRST_PSEUDO_REGISTER) { if (copy) SET_REGBIT (hard_reg_copy_preferences, reg_allocno[dest_regno], src_regno); SET_REGBIT (hard_reg_preferences, reg_allocno[dest_regno], src_regno); for (i = src_regno; i < src_regno + hard_regno_nregs[src_regno][GET_MODE (src)]; i++) SET_REGBIT (hard_reg_full_preferences, reg_allocno[dest_regno], i); } } } /* Indicate that hard register number FROM was eliminated and replaced with an offset from hard register number TO. The status of hard registers live at the start of a basic block is updated by replacing a use of FROM with a use of TO. */ void mark_elimination (int from, int to) { basic_block bb; FOR_EACH_BB (bb) { regset r = bb->global_live_at_start; if (REGNO_REG_SET_P (r, from)) { CLEAR_REGNO_REG_SET (r, from); SET_REGNO_REG_SET (r, to); } } } /* Used for communication between the following functions. Holds the current life information. */ static regset live_relevant_regs; /* Record in live_relevant_regs and REGS_SET that register REG became live. This is called via note_stores. */ static void reg_becomes_live_global (rtx reg, rtx setter ATTRIBUTE_UNUSED, void *regs_set) { int regno; if (GET_CODE (reg) == SUBREG) reg = SUBREG_REG (reg); if (!REG_P (reg)) return; regno = REGNO (reg); if (regno < FIRST_PSEUDO_REGISTER) { int nregs = hard_regno_nregs[regno][GET_MODE (reg)]; while (nregs-- > 0) { SET_REGNO_REG_SET (live_relevant_regs, regno); if (! fixed_regs[regno]) SET_REGNO_REG_SET ((regset) regs_set, regno); regno++; } } else if (reg_renumber[regno] >= 0) { SET_REGNO_REG_SET (live_relevant_regs, regno); SET_REGNO_REG_SET ((regset) regs_set, regno); } } /* Record in live_relevant_regs that register REGNO died. */ static void reg_dies_global (int regno, enum machine_mode mode, struct insn_chain *chain) { if (regno < FIRST_PSEUDO_REGISTER) { int nregs = hard_regno_nregs[regno][mode]; while (nregs-- > 0) { CLEAR_REGNO_REG_SET (live_relevant_regs, regno); if (! fixed_regs[regno]) SET_REGNO_REG_SET (&chain->dead_or_set, regno); regno++; } } else { CLEAR_REGNO_REG_SET (live_relevant_regs, regno); if (reg_renumber[regno] >= 0) SET_REGNO_REG_SET (&chain->dead_or_set, regno); } } /* Walk the insns of the current function and build reload_insn_chain, and record register life information. */ void build_insn_chain (rtx first) { struct insn_chain **p = &reload_insn_chain; struct insn_chain *prev = 0; basic_block b = ENTRY_BLOCK_PTR->next_bb; regset_head live_relevant_regs_head; live_relevant_regs = INITIALIZE_REG_SET (live_relevant_regs_head); for (; first; first = NEXT_INSN (first)) { struct insn_chain *c; if (first == BB_HEAD (b)) { int i; CLEAR_REG_SET (live_relevant_regs); EXECUTE_IF_SET_IN_BITMAP (b->global_live_at_start, 0, i, { if (i < FIRST_PSEUDO_REGISTER ? ! TEST_HARD_REG_BIT (eliminable_regset, i) : reg_renumber[i] >= 0) SET_REGNO_REG_SET (live_relevant_regs, i); }); } if (GET_CODE (first) != NOTE && GET_CODE (first) != BARRIER) { c = new_insn_chain (); c->prev = prev; prev = c; *p = c; p = &c->next; c->insn = first; c->block = b->index; if (INSN_P (first)) { rtx link; /* Mark the death of everything that dies in this instruction. */ for (link = REG_NOTES (first); link; link = XEXP (link, 1)) if (REG_NOTE_KIND (link) == REG_DEAD && REG_P (XEXP (link, 0))) reg_dies_global (REGNO (XEXP (link, 0)), GET_MODE (XEXP (link, 0)), c); COPY_REG_SET (&c->live_throughout, live_relevant_regs); /* Mark everything born in this instruction as live. */ note_stores (PATTERN (first), reg_becomes_live_global, &c->dead_or_set); } else COPY_REG_SET (&c->live_throughout, live_relevant_regs); if (INSN_P (first)) { rtx link; /* Mark anything that is set in this insn and then unused as dying. */ for (link = REG_NOTES (first); link; link = XEXP (link, 1)) if (REG_NOTE_KIND (link) == REG_UNUSED && REG_P (XEXP (link, 0))) reg_dies_global (REGNO (XEXP (link, 0)), GET_MODE (XEXP (link, 0)), c); } } if (first == BB_END (b)) b = b->next_bb; /* Stop after we pass the end of the last basic block. Verify that no real insns are after the end of the last basic block. We may want to reorganize the loop somewhat since this test should always be the right exit test. Allow an ADDR_VEC or ADDR_DIF_VEC if the previous real insn is a JUMP_INSN. */ if (b == EXIT_BLOCK_PTR) { for (first = NEXT_INSN (first) ; first; first = NEXT_INSN (first)) if (INSN_P (first) && GET_CODE (PATTERN (first)) != USE && ! ((GET_CODE (PATTERN (first)) == ADDR_VEC || GET_CODE (PATTERN (first)) == ADDR_DIFF_VEC) && prev_real_insn (first) != 0 && GET_CODE (prev_real_insn (first)) == JUMP_INSN)) abort (); break; } } FREE_REG_SET (live_relevant_regs); *p = 0; } /* Print debugging trace information if -dg switch is given, showing the information on which the allocation decisions are based. */ static void dump_conflicts (FILE *file) { int i; int has_preferences; int nregs; nregs = 0; for (i = 0; i < max_allocno; i++) { if (reg_renumber[allocno[allocno_order[i]].reg] >= 0) continue; nregs++; } fprintf (file, ";; %d regs to allocate:", nregs); for (i = 0; i < max_allocno; i++) { int j; if (reg_renumber[allocno[allocno_order[i]].reg] >= 0) continue; fprintf (file, " %d", allocno[allocno_order[i]].reg); for (j = 0; j < max_regno; j++) if (reg_allocno[j] == allocno_order[i] && j != allocno[allocno_order[i]].reg) fprintf (file, "+%d", j); if (allocno[allocno_order[i]].size != 1) fprintf (file, " (%d)", allocno[allocno_order[i]].size); } fprintf (file, "\n"); for (i = 0; i < max_allocno; i++) { int j; fprintf (file, ";; %d conflicts:", allocno[i].reg); for (j = 0; j < max_allocno; j++) if (CONFLICTP (j, i)) fprintf (file, " %d", allocno[j].reg); for (j = 0; j < FIRST_PSEUDO_REGISTER; j++) if (TEST_HARD_REG_BIT (allocno[i].hard_reg_conflicts, j)) fprintf (file, " %d", j); fprintf (file, "\n"); has_preferences = 0; for (j = 0; j < FIRST_PSEUDO_REGISTER; j++) if (TEST_HARD_REG_BIT (allocno[i].hard_reg_preferences, j)) has_preferences = 1; if (! has_preferences) continue; fprintf (file, ";; %d preferences:", allocno[i].reg); for (j = 0; j < FIRST_PSEUDO_REGISTER; j++) if (TEST_HARD_REG_BIT (allocno[i].hard_reg_preferences, j)) fprintf (file, " %d", j); fprintf (file, "\n"); } fprintf (file, "\n"); } void dump_global_regs (FILE *file) { int i, j; fprintf (file, ";; Register dispositions:\n"); for (i = FIRST_PSEUDO_REGISTER, j = 0; i < max_regno; i++) if (reg_renumber[i] >= 0) { fprintf (file, "%d in %d ", i, reg_renumber[i]); if (++j % 6 == 0) fprintf (file, "\n"); } fprintf (file, "\n\n;; Hard regs used: "); for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (regs_ever_live[i]) fprintf (file, " %d", i); fprintf (file, "\n\n"); } /* This page contains code to make live information more accurate. The accurate register liveness at program point P means: o there is a path from P to usage of the register and the register is not redefined or killed on the path. o register at P is partially available, i.e. there is a path from a register definition to the point P and the register is not killed (clobbered) on the path The standard GCC live information means only the first condition. Without the partial availability, there will be more register conflicts and as a consequence worse register allocation. The typical example where the information can be different is a register initialized in the loop at the basic block preceding the loop in CFG. */ /* The following structure contains basic block data flow information used to calculate partial availability of registers. */ struct bb_global_info { /* The basic block reverse post-order number. */ int rts_number; /* Registers correspondingly killed (clobbered) and defined but not killed afterward in the basic block. */ bitmap killed, avloc; /* Registers partially available correspondingly at the start and end of the basic block. */ bitmap pavin, pavout; }; /* Macros for accessing data flow information of basic blocks. */ #define GLOBAL_BB_INFO(BB) ((struct bb_global_info *) (BB)->aux) #define BB_INFO_BY_INDEX(N) GLOBAL_BB_INFO (BASIC_BLOCK(N)) /* The function allocates the info structures of each basic block. It also initialized PAVIN and PAVOUT as if all hard registers were partially available. */ static void allocate_bb_info (void) { int i; basic_block bb; struct bb_global_info *bb_info; bitmap init; alloc_aux_for_blocks (sizeof (struct bb_global_info)); init = BITMAP_XMALLOC (); for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) bitmap_set_bit (init, i); FOR_EACH_BB (bb) { bb_info = bb->aux; bb_info->avloc = BITMAP_XMALLOC (); bb_info->killed = BITMAP_XMALLOC (); bb_info->pavin = BITMAP_XMALLOC (); bb_info->pavout = BITMAP_XMALLOC (); bitmap_copy (bb_info->pavin, init); bitmap_copy (bb_info->pavout, init); } BITMAP_XFREE (init); } /* The function frees the allocated info of all basic blocks. */ static void free_bb_info_global (void) { basic_block bb; struct bb_global_info *bb_info; FOR_EACH_BB (bb) { bb_info = GLOBAL_BB_INFO (bb); BITMAP_XFREE (bb_info->pavout); BITMAP_XFREE (bb_info->pavin); BITMAP_XFREE (bb_info->killed); BITMAP_XFREE (bb_info->avloc); } free_aux_for_blocks (); } /* The function modifies local info for register REG being changed in SETTER. DATA is used to pass the current basic block info. */ static void mark_reg_change (rtx reg, rtx setter, void *data) { int regno; basic_block bb = data; struct bb_global_info *bb_info = GLOBAL_BB_INFO (bb); if (GET_CODE (reg) == SUBREG) reg = SUBREG_REG (reg); if (!REG_P (reg)) return; regno = REGNO (reg); bitmap_set_bit (bb_info->killed, regno); if (GET_CODE (setter) != CLOBBER) bitmap_set_bit (bb_info->avloc, regno); else bitmap_clear_bit (bb_info->avloc, regno); } /* The function calculates local info for each basic block. */ static void calculate_local_reg_bb_info (void) { basic_block bb; rtx insn, bound; FOR_EACH_BB (bb) { bound = NEXT_INSN (BB_END (bb)); for (insn = BB_HEAD (bb); insn != bound; insn = NEXT_INSN (insn)) if (INSN_P (insn)) note_stores (PATTERN (insn), mark_reg_change, bb); } } /* The function sets up reverse post-order number of each basic block. */ static void set_up_bb_rts_numbers (void) { int i; int *rts_order; rts_order = xmalloc (sizeof (int) * n_basic_blocks); flow_reverse_top_sort_order_compute (rts_order); for (i = 0; i < n_basic_blocks; i++) BB_INFO_BY_INDEX (rts_order [i])->rts_number = i; free (rts_order); } /* Compare function for sorting blocks in reverse postorder. */ static int rpost_cmp (const void *bb1, const void *bb2) { basic_block b1 = *(basic_block *) bb1, b2 = *(basic_block *) bb2; return GLOBAL_BB_INFO (b2)->rts_number - GLOBAL_BB_INFO (b1)->rts_number; } /* The function calculates partial availability of registers. The function calculates partial availability at the end of basic block BB by propagating partial availability at end of predecessor basic block PRED. The function returns true if the partial availability at the end of BB has been changed or if CHANGED_P. We have the following equations: bb.pavin = empty for entry block | union (pavout of predecessors) bb.pavout = union (bb.pavin - b.killed, bb.avloc) */ static bool modify_bb_reg_pav (basic_block bb, basic_block pred, bool changed_p) { struct bb_global_info *bb_info; bitmap bb_pavin, bb_pavout; bb_info = GLOBAL_BB_INFO (bb); bb_pavin = bb_info->pavin; bb_pavout = bb_info->pavout; if (pred->index != ENTRY_BLOCK) bitmap_a_or_b (bb_pavin, bb_pavin, GLOBAL_BB_INFO (pred)->pavout); changed_p |= bitmap_union_of_diff (bb_pavout, bb_info->avloc, bb_pavin, bb_info->killed); return changed_p; } /* The function calculates partial register availability. */ static void calculate_reg_pav (void) { basic_block bb, succ; edge e; bool changed_p; int i, nel; varray_type bbs, new_bbs, temp; basic_block *bb_array; sbitmap wset; VARRAY_BB_INIT (bbs, n_basic_blocks, "basic blocks"); VARRAY_BB_INIT (new_bbs, n_basic_blocks, "basic blocks for the next iter."); FOR_EACH_BB (bb) { VARRAY_PUSH_BB (bbs, bb); } wset = sbitmap_alloc (n_basic_blocks + 1); while (VARRAY_ACTIVE_SIZE (bbs)) { bb_array = &VARRAY_BB (bbs, 0); nel = VARRAY_ACTIVE_SIZE (bbs); qsort (bb_array, nel, sizeof (basic_block), rpost_cmp); sbitmap_zero (wset); for (i = 0; i < nel; i++) { bb = bb_array [i]; changed_p = 0; for (e = bb->pred; e; e = e->pred_next) changed_p = modify_bb_reg_pav (bb, e->src, changed_p); if (changed_p) for (e = bb->succ; e; e = e->succ_next) { succ = e->dest; if (succ->index != EXIT_BLOCK && !TEST_BIT (wset, succ->index)) { SET_BIT (wset, succ->index); VARRAY_PUSH_BB (new_bbs, succ); } } } temp = bbs; bbs = new_bbs; new_bbs = temp; VARRAY_POP_ALL (new_bbs); } sbitmap_free (wset); } /* The following function makes live information more accurate by modifying global_live_at_start and global_live_at_end of basic blocks. After the function call a register lives at a program point only if it is initialized on a path from CFG entry to the program point. The standard GCC life analysis permits registers to live uninitialized. */ static void make_accurate_live_analysis (void) { basic_block bb; struct bb_global_info *bb_info; max_regno = max_reg_num (); compact_blocks (); allocate_bb_info (); calculate_local_reg_bb_info (); set_up_bb_rts_numbers (); calculate_reg_pav (); FOR_EACH_BB (bb) { bb_info = GLOBAL_BB_INFO (bb); bitmap_a_and_b (bb->global_live_at_start, bb->global_live_at_start, bb_info->pavin); bitmap_a_and_b (bb->global_live_at_end, bb->global_live_at_end, bb_info->pavout); } free_bb_info_global (); } /* Output routines for graphical representation. Copyright (C) 1998, 1999, 2000, 2001, 2003, 2004 Free Software Foundation, Inc. Contributed by Ulrich Drepper , 1998. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Header file for graph routines. Copyright (C) 1999, 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_GRAPH_H #define GCC_GRAPH_H extern void print_rtl_graph_with_bb (const char *, const char *, rtx); extern void clean_graph_dump_file (const char *, const char *); extern void finish_graph_dump_file (const char *, const char *); #endif /* ! GCC_GRAPH_H */ static const char *const graph_ext[] = { /* no_graph */ "", /* vcg */ ".vcg", }; static void start_fct (FILE *); static void start_bb (FILE *, int); static void node_data (FILE *, rtx); static void draw_edge (FILE *, int, int, int, int); static void end_fct (FILE *); static void end_bb (FILE *); /* Output text for new basic block. */ static void start_fct (FILE *fp) { switch (graph_dump_format) { case vcg: fprintf (fp, "\ graph: { title: \"%s\"\nfolding: 1\nhidden: 2\nnode: { title: \"%s.0\" }\n", current_function_name (), current_function_name ()); break; case no_graph: break; } } static void start_bb (FILE *fp, int bb) { switch (graph_dump_format) { case vcg: fprintf (fp, "\ graph: {\ntitle: \"%s.BB%d\"\nfolding: 1\ncolor: lightblue\n\ label: \"basic block %d", current_function_name (), bb, bb); break; case no_graph: break; } #if 0 /* FIXME Should this be printed? It makes the graph significantly larger. */ /* Print the live-at-start register list. */ fputc ('\n', fp); EXECUTE_IF_SET_IN_REG_SET (basic_block_live_at_start[bb], 0, i, { fprintf (fp, " %d", i); if (i < FIRST_PSEUDO_REGISTER) fprintf (fp, " [%s]", reg_names[i]); }); #endif switch (graph_dump_format) { case vcg: fputs ("\"\n\n", fp); break; case no_graph: break; } } static void node_data (FILE *fp, rtx tmp_rtx) { if (PREV_INSN (tmp_rtx) == 0) { /* This is the first instruction. Add an edge from the starting block. */ switch (graph_dump_format) { case vcg: fprintf (fp, "\ edge: { sourcename: \"%s.0\" targetname: \"%s.%d\" }\n", current_function_name (), current_function_name (), XINT (tmp_rtx, 0)); break; case no_graph: break; } } switch (graph_dump_format) { case vcg: fprintf (fp, "node: {\n title: \"%s.%d\"\n color: %s\n \ label: \"%s %d\n", current_function_name (), XINT (tmp_rtx, 0), GET_CODE (tmp_rtx) == NOTE ? "lightgrey" : GET_CODE (tmp_rtx) == INSN ? "green" : GET_CODE (tmp_rtx) == JUMP_INSN ? "darkgreen" : GET_CODE (tmp_rtx) == CALL_INSN ? "darkgreen" : GET_CODE (tmp_rtx) == CODE_LABEL ? "\ darkgrey\n shape: ellipse" : "white", GET_RTX_NAME (GET_CODE (tmp_rtx)), XINT (tmp_rtx, 0)); break; case no_graph: break; } /* Print the RTL. */ if (GET_CODE (tmp_rtx) == NOTE) { const char *name = ""; if (NOTE_LINE_NUMBER (tmp_rtx) < 0) name = GET_NOTE_INSN_NAME (NOTE_LINE_NUMBER (tmp_rtx)); fprintf (fp, " %s", name); } else if (INSN_P (tmp_rtx)) print_rtl_single (fp, PATTERN (tmp_rtx)); else print_rtl_single (fp, tmp_rtx); switch (graph_dump_format) { case vcg: fputs ("\"\n}\n", fp); break; case no_graph: break; } } static void draw_edge (FILE *fp, int from, int to, int bb_edge, int class) { const char * color; switch (graph_dump_format) { case vcg: color = ""; if (class == 2) color = "color: red "; else if (bb_edge) color = "color: blue "; else if (class == 3) color = "color: green "; fprintf (fp, "edge: { sourcename: \"%s.%d\" targetname: \"%s.%d\" %s", current_function_name (), from, current_function_name (), to, color); if (class) fprintf (fp, "class: %d ", class); fputs ("}\n", fp); break; case no_graph: break; } } static void end_bb (FILE *fp) { switch (graph_dump_format) { case vcg: fputs ("}\n", fp); break; case no_graph: break; } } static void end_fct (FILE *fp) { switch (graph_dump_format) { case vcg: fprintf (fp, "node: { title: \"%s.999999\" label: \"END\" }\n}\n", current_function_name ()); break; case no_graph: break; } } /* Like print_rtl, but also print out live information for the start of each basic block. */ void print_rtl_graph_with_bb (const char *base, const char *suffix, rtx rtx_first) { rtx tmp_rtx; size_t namelen = strlen (base); size_t suffixlen = strlen (suffix); size_t extlen = strlen (graph_ext[graph_dump_format]) + 1; char *buf = alloca (namelen + suffixlen + extlen); FILE *fp; if (basic_block_info == NULL) return; memcpy (buf, base, namelen); memcpy (buf + namelen, suffix, suffixlen); memcpy (buf + namelen + suffixlen, graph_ext[graph_dump_format], extlen); fp = fopen (buf, "a"); if (fp == NULL) return; if (rtx_first == 0) fprintf (fp, "(nil)\n"); else { enum bb_state { NOT_IN_BB, IN_ONE_BB, IN_MULTIPLE_BB }; int max_uid = get_max_uid (); int *start = xmalloc (max_uid * sizeof (int)); int *end = xmalloc (max_uid * sizeof (int)); enum bb_state *in_bb_p = xmalloc (max_uid * sizeof (enum bb_state)); basic_block bb; int i; for (i = 0; i < max_uid; ++i) { start[i] = end[i] = -1; in_bb_p[i] = NOT_IN_BB; } FOR_EACH_BB_REVERSE (bb) { rtx x; start[INSN_UID (BB_HEAD (bb))] = bb->index; end[INSN_UID (BB_END (bb))] = bb->index; for (x = BB_HEAD (bb); x != NULL_RTX; x = NEXT_INSN (x)) { in_bb_p[INSN_UID (x)] = (in_bb_p[INSN_UID (x)] == NOT_IN_BB) ? IN_ONE_BB : IN_MULTIPLE_BB; if (x == BB_END (bb)) break; } } /* Tell print-rtl that we want graph output. */ dump_for_graph = 1; /* Start new function. */ start_fct (fp); for (tmp_rtx = NEXT_INSN (rtx_first); NULL != tmp_rtx; tmp_rtx = NEXT_INSN (tmp_rtx)) { int edge_printed = 0; rtx next_insn; if (start[INSN_UID (tmp_rtx)] < 0 && end[INSN_UID (tmp_rtx)] < 0) { if (GET_CODE (tmp_rtx) == BARRIER) continue; if (GET_CODE (tmp_rtx) == NOTE && (1 || in_bb_p[INSN_UID (tmp_rtx)] == NOT_IN_BB)) continue; } if ((i = start[INSN_UID (tmp_rtx)]) >= 0) { /* We start a subgraph for each basic block. */ start_bb (fp, i); if (i == 0) draw_edge (fp, 0, INSN_UID (tmp_rtx), 1, 0); } /* Print the data for this node. */ node_data (fp, tmp_rtx); next_insn = next_nonnote_insn (tmp_rtx); if ((i = end[INSN_UID (tmp_rtx)]) >= 0) { edge e; bb = BASIC_BLOCK (i); /* End of the basic block. */ end_bb (fp); /* Now specify the edges to all the successors of this basic block. */ for (e = bb->succ; e ; e = e->succ_next) { if (e->dest != EXIT_BLOCK_PTR) { rtx block_head = BB_HEAD (e->dest); draw_edge (fp, INSN_UID (tmp_rtx), INSN_UID (block_head), next_insn != block_head, (e->flags & EDGE_ABNORMAL ? 2 : 0)); if (block_head == next_insn) edge_printed = 1; } else { draw_edge (fp, INSN_UID (tmp_rtx), 999999, next_insn != 0, (e->flags & EDGE_ABNORMAL ? 2 : 0)); if (next_insn == 0) edge_printed = 1; } } } if (!edge_printed) { /* Don't print edges to barriers. */ if (next_insn == 0 || GET_CODE (next_insn) != BARRIER) draw_edge (fp, XINT (tmp_rtx, 0), next_insn ? INSN_UID (next_insn) : 999999, 0, 0); else { /* We draw the remaining edges in class 3. We have to skip over the barrier since these nodes are not printed at all. */ do next_insn = NEXT_INSN (next_insn); while (next_insn && (GET_CODE (next_insn) == NOTE || GET_CODE (next_insn) == BARRIER)); draw_edge (fp, XINT (tmp_rtx, 0), next_insn ? INSN_UID (next_insn) : 999999, 0, 3); } } } dump_for_graph = 0; end_fct (fp); /* Clean up. */ free (start); free (end); free (in_bb_p); } fclose (fp); } /* Similar as clean_dump_file, but this time for graph output files. */ void clean_graph_dump_file (const char *base, const char *suffix) { size_t namelen = strlen (base); size_t suffixlen = strlen (suffix); size_t extlen = strlen (graph_ext[graph_dump_format]) + 1; char *buf = alloca (namelen + extlen + suffixlen); FILE *fp; memcpy (buf, base, namelen); memcpy (buf + namelen, suffix, suffixlen); memcpy (buf + namelen + suffixlen, graph_ext[graph_dump_format], extlen); fp = fopen (buf, "w"); if (fp == NULL) fatal_error ("can't open %s: %m", buf); switch (graph_dump_format) { case vcg: fputs ("graph: {\nport_sharing: no\n", fp); break; case no_graph: abort (); } fclose (fp); } /* Do final work on the graph output file. */ void finish_graph_dump_file (const char *base, const char *suffix) { size_t namelen = strlen (base); size_t suffixlen = strlen (suffix); size_t extlen = strlen (graph_ext[graph_dump_format]) + 1; char *buf = alloca (namelen + suffixlen + extlen); FILE *fp; memcpy (buf, base, namelen); memcpy (buf + namelen, suffix, suffixlen); memcpy (buf + namelen + suffixlen, graph_ext[graph_dump_format], extlen); fp = fopen (buf, "a"); if (fp != NULL) { switch (graph_dump_format) { case vcg: fputs ("}\n", fp); break; case no_graph: abort (); } fclose (fp); } } /* Instruction scheduling pass. Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by, and currently maintained by, Jim Wilson (wilson@cygnus.com) This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Instruction scheduling pass. This file, along with sched-deps.c, contains the generic parts. The actual entry point is found for the normal instruction scheduling pass is found in sched-rgn.c. We compute insn priorities based on data dependencies. Flow analysis only creates a fraction of the data-dependencies we must observe: namely, only those dependencies which the combiner can be expected to use. For this pass, we must therefore create the remaining dependencies we need to observe: register dependencies, memory dependencies, dependencies to keep function calls in order, and the dependence between a conditional branch and the setting of condition codes are all dealt with here. The scheduler first traverses the data flow graph, starting with the last instruction, and proceeding to the first, assigning values to insn_priority as it goes. This sorts the instructions topologically by data dependence. Once priorities have been established, we order the insns using list scheduling. This works as follows: starting with a list of all the ready insns, and sorted according to priority number, we schedule the insn from the end of the list by placing its predecessors in the list according to their priority order. We consider this insn scheduled by setting the pointer to the "end" of the list to point to the previous insn. When an insn has no predecessors, we either queue it until sufficient time has elapsed or add it to the ready list. As the instructions are scheduled or when stalls are introduced, the queue advances and dumps insns into the ready list. When all insns down to the lowest priority have been scheduled, the critical path of the basic block has been made as short as possible. The remaining insns are then scheduled in remaining slots. Function unit conflicts are resolved during forward list scheduling by tracking the time when each insn is committed to the schedule and from that, the time the function units it uses must be free. As insns on the ready list are considered for scheduling, those that would result in a blockage of the already committed insns are queued until no blockage will result. The following list shows the order in which we want to break ties among insns in the ready list: 1. choose insn with the longest path to end of bb, ties broken by 2. choose insn with least contribution to register pressure, ties broken by 3. prefer in-block upon interblock motion, ties broken by 4. prefer useful upon speculative motion, ties broken by 5. choose insn with largest control flow probability, ties broken by 6. choose insn with the least dependences upon the previously scheduled insn, or finally 7 choose the insn which has the most insns dependent on it. 8. choose insn with lowest UID. Memory references complicate matters. Only if we can be certain that memory references are not part of the data dependency graph (via true, anti, or output dependence), can we move operations past memory references. To first approximation, reads can be done independently, while writes introduce dependencies. Better approximations will yield fewer dependencies. Before reload, an extended analysis of interblock data dependences is required for interblock scheduling. This is performed in compute_block_backward_dependences (). Dependencies set up by memory references are treated in exactly the same way as other dependencies, by using LOG_LINKS backward dependences. LOG_LINKS are translated into INSN_DEPEND forward dependences for the purpose of forward list scheduling. Having optimized the critical path, we may have also unduly extended the lifetimes of some registers. If an operation requires that constants be loaded into registers, it is certainly desirable to load those constants as early as necessary, but no earlier. I.e., it will not do to load up a bunch of registers at the beginning of a basic block only to use them at the end, if they could be loaded later, since this may result in excessive register utilization. Note that since branches are never in basic blocks, but only end basic blocks, this pass will not move branches. But that is ok, since we can use GNU's delayed branch scheduling pass to take care of this case. Also note that no further optimizations based on algebraic identities are performed, so this pass would be a good one to perform instruction splitting, such as breaking up a multiply instruction into shifts and adds where that is profitable. Given the memory aliasing analysis that this pass should perform, it should be possible to remove redundant stores to memory, and to load values from registers instead of hitting memory. Before reload, speculative insns are moved only if a 'proof' exists that no exception will be caused by this, and if no live registers exist that inhibit the motion (live registers constraints are not represented by data dependence edges). This pass must update information that subsequent passes expect to be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths, reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END. The information in the line number notes is carefully retained by this pass. Notes that refer to the starting and ending of exception regions are also carefully retained by this pass. All other NOTE insns are grouped in their same relative order at the beginning of basic blocks and regions that have been scheduled. */ #ifdef INSN_SCHEDULING /* issue_rate is the number of insns that can be scheduled in the same machine cycle. It can be defined in the config/mach/mach.h file, otherwise we set it to 1. */ static int issue_rate; /* If the following variable value is nonzero, the scheduler inserts bubbles (nop insns). The value of variable affects on scheduler behavior only if automaton pipeline interface with multipass scheduling is used and hook dfa_bubble is defined. */ int insert_schedule_bubbles_p = 0; /* sched-verbose controls the amount of debugging output the scheduler prints. It is controlled by -fsched-verbose=N: N>0 and no -DSR : the output is directed to stderr. N>=10 will direct the printouts to stderr (regardless of -dSR). N=1: same as -dSR. N=2: bb's probabilities, detailed ready list info, unit/insn info. N=3: rtl at abort point, control-flow, regions info. N=5: dependences info. */ static int sched_verbose_param = 0; int sched_verbose = 0; /* Debugging file. All printouts are sent to dump, which is always set, either to stderr, or to the dump listing file (-dRS). */ FILE *sched_dump = 0; /* Highest uid before scheduling. */ static int old_max_uid; /* fix_sched_param() is called from toplev.c upon detection of the -fsched-verbose=N option. */ void fix_sched_param (const char *param, const char *val) { if (!strcmp (param, "verbose")) sched_verbose_param = atoi (val); else warning ("fix_sched_param: unknown param: %s", param); } struct haifa_insn_data *h_i_d; #define LINE_NOTE(INSN) (h_i_d[INSN_UID (INSN)].line_note) #define INSN_TICK(INSN) (h_i_d[INSN_UID (INSN)].tick) /* Vector indexed by basic block number giving the starting line-number for each basic block. */ static rtx *line_note_head; /* List of important notes we must keep around. This is a pointer to the last element in the list. */ static rtx note_list; /* Queues, etc. */ /* An instruction is ready to be scheduled when all insns preceding it have already been scheduled. It is important to ensure that all insns which use its result will not be executed until its result has been computed. An insn is maintained in one of four structures: (P) the "Pending" set of insns which cannot be scheduled until their dependencies have been satisfied. (Q) the "Queued" set of insns that can be scheduled when sufficient time has passed. (R) the "Ready" list of unscheduled, uncommitted insns. (S) the "Scheduled" list of insns. Initially, all insns are either "Pending" or "Ready" depending on whether their dependencies are satisfied. Insns move from the "Ready" list to the "Scheduled" list as they are committed to the schedule. As this occurs, the insns in the "Pending" list have their dependencies satisfied and move to either the "Ready" list or the "Queued" set depending on whether sufficient time has passed to make them ready. As time passes, insns move from the "Queued" set to the "Ready" list. Insns may move from the "Ready" list to the "Queued" set if they are blocked due to a function unit conflict. The "Pending" list (P) are the insns in the INSN_DEPEND of the unscheduled insns, i.e., those that are ready, queued, and pending. The "Queued" set (Q) is implemented by the variable `insn_queue'. The "Ready" list (R) is implemented by the variables `ready' and `n_ready'. The "Scheduled" list (S) is the new insn chain built by this pass. The transition (R->S) is implemented in the scheduling loop in `schedule_block' when the best insn to schedule is chosen. The transition (R->Q) is implemented in `queue_insn' when an insn is found to have a function unit conflict with the already committed insns. The transitions (P->R and P->Q) are implemented in `schedule_insn' as insns move from the ready list to the scheduled list. The transition (Q->R) is implemented in 'queue_to_insn' as time passes or stalls are introduced. */ /* Implement a circular buffer to delay instructions until sufficient time has passed. For the old pipeline description interface, INSN_QUEUE_SIZE is a power of two larger than MAX_BLOCKAGE and MAX_READY_COST computed by genattr.c. For the new pipeline description interface, MAX_INSN_QUEUE_INDEX is a power of two minus one which is larger than maximal time of instruction execution computed by genattr.c on the base maximal time of functional unit reservations and getting a result. This is the longest time an insn may be queued. */ #define MAX_INSN_QUEUE_INDEX max_insn_queue_index_macro_value static rtx *insn_queue; static int q_ptr = 0; static int q_size = 0; #define NEXT_Q(X) (((X)+1) & MAX_INSN_QUEUE_INDEX) #define NEXT_Q_AFTER(X, C) (((X)+C) & MAX_INSN_QUEUE_INDEX) /* The following variable defines value for macro MAX_INSN_QUEUE_INDEX. */ static int max_insn_queue_index_macro_value; /* The following variable value refers for all current and future reservations of the processor units. */ state_t curr_state; /* The following variable value is size of memory representing all current and future reservations of the processor units. It is used only by DFA based scheduler. */ static size_t dfa_state_size; /* The following array is used to find the best insn from ready when the automaton pipeline interface is used. */ static char *ready_try; /* Describe the ready list of the scheduler. VEC holds space enough for all insns in the current region. VECLEN says how many exactly. FIRST is the index of the element with the highest priority; i.e. the last one in the ready list, since elements are ordered by ascending priority. N_READY determines how many insns are on the ready list. */ struct ready_list { rtx *vec; int veclen; int first; int n_ready; }; static int may_trap_exp (rtx, int); /* Nonzero iff the address is comprised from at most 1 register. */ #define CONST_BASED_ADDRESS_P(x) \ (REG_P (x) \ || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \ || (GET_CODE (x) == LO_SUM)) \ && (CONSTANT_P (XEXP (x, 0)) \ || CONSTANT_P (XEXP (x, 1))))) /* Returns a class that insn with GET_DEST(insn)=x may belong to, as found by analyzing insn's expression. */ static int may_trap_exp (rtx x, int is_store) { enum rtx_code code; if (x == 0) return TRAP_FREE; code = GET_CODE (x); if (is_store) { if (code == MEM && may_trap_p (x)) return TRAP_RISKY; else return TRAP_FREE; } if (code == MEM) { /* The insn uses memory: a volatile load. */ if (MEM_VOLATILE_P (x)) return IRISKY; /* An exception-free load. */ if (!may_trap_p (x)) return IFREE; /* A load with 1 base register, to be further checked. */ if (CONST_BASED_ADDRESS_P (XEXP (x, 0))) return PFREE_CANDIDATE; /* No info on the load, to be further checked. */ return PRISKY_CANDIDATE; } else { const char *fmt; int i, insn_class = TRAP_FREE; /* Neither store nor load, check if it may cause a trap. */ if (may_trap_p (x)) return TRAP_RISKY; /* Recursive step: walk the insn... */ fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') { int tmp_class = may_trap_exp (XEXP (x, i), is_store); insn_class = WORST_CLASS (insn_class, tmp_class); } else if (fmt[i] == 'E') { int j; for (j = 0; j < XVECLEN (x, i); j++) { int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store); insn_class = WORST_CLASS (insn_class, tmp_class); if (insn_class == TRAP_RISKY || insn_class == IRISKY) break; } } if (insn_class == TRAP_RISKY || insn_class == IRISKY) break; } return insn_class; } } /* Classifies insn for the purpose of verifying that it can be moved speculatively, by examining it's patterns, returning: TRAP_RISKY: store, or risky non-load insn (e.g. division by variable). TRAP_FREE: non-load insn. IFREE: load from a globally safe location. IRISKY: volatile load. PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for being either PFREE or PRISKY. */ int haifa_classify_insn (rtx insn) { rtx pat = PATTERN (insn); int tmp_class = TRAP_FREE; int insn_class = TRAP_FREE; enum rtx_code code; if (GET_CODE (pat) == PARALLEL) { int i, len = XVECLEN (pat, 0); for (i = len - 1; i >= 0; i--) { code = GET_CODE (XVECEXP (pat, 0, i)); switch (code) { case CLOBBER: /* Test if it is a 'store'. */ tmp_class = may_trap_exp (XEXP (XVECEXP (pat, 0, i), 0), 1); break; case SET: /* Test if it is a store. */ tmp_class = may_trap_exp (SET_DEST (XVECEXP (pat, 0, i)), 1); if (tmp_class == TRAP_RISKY) break; /* Test if it is a load. */ tmp_class = WORST_CLASS (tmp_class, may_trap_exp (SET_SRC (XVECEXP (pat, 0, i)), 0)); break; case COND_EXEC: case TRAP_IF: tmp_class = TRAP_RISKY; break; default: ; } insn_class = WORST_CLASS (insn_class, tmp_class); if (insn_class == TRAP_RISKY || insn_class == IRISKY) break; } } else { code = GET_CODE (pat); switch (code) { case CLOBBER: /* Test if it is a 'store'. */ tmp_class = may_trap_exp (XEXP (pat, 0), 1); break; case SET: /* Test if it is a store. */ tmp_class = may_trap_exp (SET_DEST (pat), 1); if (tmp_class == TRAP_RISKY) break; /* Test if it is a load. */ tmp_class = WORST_CLASS (tmp_class, may_trap_exp (SET_SRC (pat), 0)); break; case COND_EXEC: case TRAP_IF: tmp_class = TRAP_RISKY; break; default:; } insn_class = tmp_class; } return insn_class; } /* Forward declarations. */ /* The scheduler using only DFA description should never use the following five functions: */ static unsigned int blockage_range (int, rtx); static void clear_units (void); static void schedule_unit (int, rtx, int); static int actual_hazard (int, rtx, int, int); static int potential_hazard (int, rtx, int); static int priority (rtx); static int rank_for_schedule (const void *, const void *); static void swap_sort (rtx *, int); static void queue_insn (rtx, int); static int schedule_insn (rtx, struct ready_list *, int); static int find_set_reg_weight (rtx); static void find_insn_reg_weight (int); static void adjust_priority (rtx); static void advance_one_cycle_haifa (void); /* Notes handling mechanism: ========================= Generally, NOTES are saved before scheduling and restored after scheduling. The scheduler distinguishes between three types of notes: (1) LINE_NUMBER notes, generated and used for debugging. Here, before scheduling a region, a pointer to the LINE_NUMBER note is added to the insn following it (in save_line_notes()), and the note is removed (in rm_line_notes() and unlink_line_notes()). After scheduling the region, this pointer is used for regeneration of the LINE_NUMBER note (in restore_line_notes()). (2) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes: Before scheduling a region, a pointer to the note is added to the insn that follows or precedes it. (This happens as part of the data dependence computation). After scheduling an insn, the pointer contained in it is used for regenerating the corresponding note (in reemit_notes). (3) All other notes (e.g. INSN_DELETED): Before scheduling a block, these notes are put in a list (in rm_other_notes() and unlink_other_notes ()). After scheduling the block, these notes are inserted at the beginning of the block (in schedule_block()). */ static rtx unlink_other_notes (rtx, rtx); static rtx unlink_line_notes (rtx, rtx); static rtx reemit_notes (rtx, rtx); static rtx *ready_lastpos (struct ready_list *); static void ready_sort (struct ready_list *); static rtx ready_remove_first (struct ready_list *); static void queue_to_ready (struct ready_list *); static int early_queue_to_ready (state_t, struct ready_list *); static void debug_ready_list (struct ready_list *); static rtx move_insn1 (rtx, rtx); static rtx move_insn (rtx, rtx); /* The following functions are used to implement multi-pass scheduling on the first cycle. It is used only for DFA based scheduler. */ static rtx ready_element (struct ready_list *, int); static rtx ready_remove (struct ready_list *, int); static int max_issue (struct ready_list *, int *); static rtx choose_ready (struct ready_list *); #endif /* INSN_SCHEDULING */ /* Point to state used for the current scheduling pass. */ struct sched_info *current_sched_info; #ifndef INSN_SCHEDULING void schedule_insns (FILE *dump_file ATTRIBUTE_UNUSED) { } #else /* Pointer to the last instruction scheduled. Used by rank_for_schedule, so that insns independent of the last scheduled insn will be preferred over dependent instructions. */ static rtx last_scheduled_insn; /* Compute the function units used by INSN. This caches the value returned by function_units_used. A function unit is encoded as the unit number if the value is non-negative and the complement of a mask if the value is negative. A function unit index is the non-negative encoding. The scheduler using only DFA description should never use the following function. */ HAIFA_INLINE int insn_unit (rtx insn) { int unit = INSN_UNIT (insn); if (unit == 0) { recog_memoized (insn); /* A USE insn, or something else we don't need to understand. We can't pass these directly to function_units_used because it will trigger a fatal error for unrecognizable insns. */ if (INSN_CODE (insn) < 0) unit = -1; else { unit = function_units_used (insn); /* Increment non-negative values so we can cache zero. */ if (unit >= 0) unit++; } /* We only cache 16 bits of the result, so if the value is out of range, don't cache it. */ if (FUNCTION_UNITS_SIZE < HOST_BITS_PER_SHORT || unit >= 0 || (unit & ~((1 << (HOST_BITS_PER_SHORT - 1)) - 1)) == 0) INSN_UNIT (insn) = unit; } return (unit > 0 ? unit - 1 : unit); } /* Compute the blockage range for executing INSN on UNIT. This caches the value returned by the blockage_range_function for the unit. These values are encoded in an int where the upper half gives the minimum value and the lower half gives the maximum value. The scheduler using only DFA description should never use the following function. */ HAIFA_INLINE static unsigned int blockage_range (int unit, rtx insn) { unsigned int blockage = INSN_BLOCKAGE (insn); unsigned int range; if ((int) UNIT_BLOCKED (blockage) != unit + 1) { range = function_units[unit].blockage_range_function (insn); /* We only cache the blockage range for one unit and then only if the values fit. */ if (HOST_BITS_PER_INT >= UNIT_BITS + 2 * BLOCKAGE_BITS) INSN_BLOCKAGE (insn) = ENCODE_BLOCKAGE (unit + 1, range); } else range = BLOCKAGE_RANGE (blockage); return range; } /* A vector indexed by function unit instance giving the last insn to use the unit. The value of the function unit instance index for unit U instance I is (U + I * FUNCTION_UNITS_SIZE). The scheduler using only DFA description should never use the following variable. */ #if FUNCTION_UNITS_SIZE static rtx unit_last_insn[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY]; #else static rtx unit_last_insn[1]; #endif /* A vector indexed by function unit instance giving the minimum time when the unit will unblock based on the maximum blockage cost. The scheduler using only DFA description should never use the following variable. */ #if FUNCTION_UNITS_SIZE static int unit_tick[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY]; #else static int unit_tick[1]; #endif /* A vector indexed by function unit number giving the number of insns that remain to use the unit. The scheduler using only DFA description should never use the following variable. */ #if FUNCTION_UNITS_SIZE static int unit_n_insns[FUNCTION_UNITS_SIZE]; #else static int unit_n_insns[1]; #endif /* Access the unit_last_insn array. Used by the visualization code. The scheduler using only DFA description should never use the following function. */ rtx get_unit_last_insn (int instance) { return unit_last_insn[instance]; } /* Reset the function unit state to the null state. */ static void clear_units (void) { memset (unit_last_insn, 0, sizeof (unit_last_insn)); memset (unit_tick, 0, sizeof (unit_tick)); memset (unit_n_insns, 0, sizeof (unit_n_insns)); } /* Return the issue-delay of an insn. The scheduler using only DFA description should never use the following function. */ HAIFA_INLINE int insn_issue_delay (rtx insn) { int i, delay = 0; int unit = insn_unit (insn); /* Efficiency note: in fact, we are working 'hard' to compute a value that was available in md file, and is not available in function_units[] structure. It would be nice to have this value there, too. */ if (unit >= 0) { if (function_units[unit].blockage_range_function && function_units[unit].blockage_function) delay = function_units[unit].blockage_function (insn, insn); } else for (i = 0, unit = ~unit; unit; i++, unit >>= 1) if ((unit & 1) != 0 && function_units[i].blockage_range_function && function_units[i].blockage_function) delay = MAX (delay, function_units[i].blockage_function (insn, insn)); return delay; } /* Return the actual hazard cost of executing INSN on the unit UNIT, instance INSTANCE at time CLOCK if the previous actual hazard cost was COST. The scheduler using only DFA description should never use the following function. */ HAIFA_INLINE int actual_hazard_this_instance (int unit, int instance, rtx insn, int clock, int cost) { int tick = unit_tick[instance]; /* Issue time of the last issued insn. */ if (tick - clock > cost) { /* The scheduler is operating forward, so unit's last insn is the executing insn and INSN is the candidate insn. We want a more exact measure of the blockage if we execute INSN at CLOCK given when we committed the execution of the unit's last insn. The blockage value is given by either the unit's max blockage constant, blockage range function, or blockage function. Use the most exact form for the given unit. */ if (function_units[unit].blockage_range_function) { if (function_units[unit].blockage_function) tick += (function_units[unit].blockage_function (unit_last_insn[instance], insn) - function_units[unit].max_blockage); else tick += ((int) MAX_BLOCKAGE_COST (blockage_range (unit, insn)) - function_units[unit].max_blockage); } if (tick - clock > cost) cost = tick - clock; } return cost; } /* Record INSN as having begun execution on the units encoded by UNIT at time CLOCK. The scheduler using only DFA description should never use the following function. */ static void schedule_unit (int unit, rtx insn, int clock) { int i; if (unit >= 0) { int instance = unit; #if MAX_MULTIPLICITY > 1 /* Find the first free instance of the function unit and use that one. We assume that one is free. */ for (i = function_units[unit].multiplicity - 1; i > 0; i--) { if (!actual_hazard_this_instance (unit, instance, insn, clock, 0)) break; instance += FUNCTION_UNITS_SIZE; } #endif unit_last_insn[instance] = insn; unit_tick[instance] = (clock + function_units[unit].max_blockage); } else for (i = 0, unit = ~unit; unit; i++, unit >>= 1) if ((unit & 1) != 0) schedule_unit (i, insn, clock); } /* Return the actual hazard cost of executing INSN on the units encoded by UNIT at time CLOCK if the previous actual hazard cost was COST. The scheduler using only DFA description should never use the following function. */ static int actual_hazard (int unit, rtx insn, int clock, int cost) { int i; if (unit >= 0) { /* Find the instance of the function unit with the minimum hazard. */ int instance = unit; int best_cost = actual_hazard_this_instance (unit, instance, insn, clock, cost); #if MAX_MULTIPLICITY > 1 int this_cost; if (best_cost > cost) { for (i = function_units[unit].multiplicity - 1; i > 0; i--) { instance += FUNCTION_UNITS_SIZE; this_cost = actual_hazard_this_instance (unit, instance, insn, clock, cost); if (this_cost < best_cost) { best_cost = this_cost; if (this_cost <= cost) break; } } } #endif cost = MAX (cost, best_cost); } else for (i = 0, unit = ~unit; unit; i++, unit >>= 1) if ((unit & 1) != 0) cost = actual_hazard (i, insn, clock, cost); return cost; } /* Return the potential hazard cost of executing an instruction on the units encoded by UNIT if the previous potential hazard cost was COST. An insn with a large blockage time is chosen in preference to one with a smaller time; an insn that uses a unit that is more likely to be used is chosen in preference to one with a unit that is less used. We are trying to minimize a subsequent actual hazard. The scheduler using only DFA description should never use the following function. */ HAIFA_INLINE static int potential_hazard (int unit, rtx insn, int cost) { int i, ncost; unsigned int minb, maxb; if (unit >= 0) { minb = maxb = function_units[unit].max_blockage; if (maxb > 1) { if (function_units[unit].blockage_range_function) { maxb = minb = blockage_range (unit, insn); maxb = MAX_BLOCKAGE_COST (maxb); minb = MIN_BLOCKAGE_COST (minb); } if (maxb > 1) { /* Make the number of instructions left dominate. Make the minimum delay dominate the maximum delay. If all these are the same, use the unit number to add an arbitrary ordering. Other terms can be added. */ ncost = minb * 0x40 + maxb; ncost *= (unit_n_insns[unit] - 1) * 0x1000 + unit; if (ncost > cost) cost = ncost; } } } else for (i = 0, unit = ~unit; unit; i++, unit >>= 1) if ((unit & 1) != 0) cost = potential_hazard (i, insn, cost); return cost; } /* Compute cost of executing INSN given the dependence LINK on the insn USED. This is the number of cycles between instruction issue and instruction results. */ int insn_cost (rtx insn, rtx link, rtx used) { int cost = INSN_COST (insn); if (cost < 0) { /* A USE insn, or something else we don't need to understand. We can't pass these directly to result_ready_cost or insn_default_latency because it will trigger a fatal error for unrecognizable insns. */ if (recog_memoized (insn) < 0) { INSN_COST (insn) = 0; return 0; } else { if (targetm.sched.use_dfa_pipeline_interface && targetm.sched.use_dfa_pipeline_interface ()) cost = insn_default_latency (insn); else cost = result_ready_cost (insn); if (cost < 0) cost = 0; INSN_COST (insn) = cost; } } /* In this case estimate cost without caring how insn is used. */ if (link == 0 || used == 0) return cost; /* A USE insn should never require the value used to be computed. This allows the computation of a function's result and parameter values to overlap the return and call. */ if (recog_memoized (used) < 0) cost = 0; else { if (targetm.sched.use_dfa_pipeline_interface && targetm.sched.use_dfa_pipeline_interface ()) { if (INSN_CODE (insn) >= 0) { if (REG_NOTE_KIND (link) == REG_DEP_ANTI) cost = 0; else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT) { cost = (insn_default_latency (insn) - insn_default_latency (used)); if (cost <= 0) cost = 1; } else if (bypass_p (insn)) cost = insn_latency (insn, used); } } if (targetm.sched.adjust_cost) cost = targetm.sched.adjust_cost (used, link, insn, cost); if (cost < 0) cost = 0; } return cost; } /* Compute the priority number for INSN. */ static int priority (rtx insn) { rtx link; if (! INSN_P (insn)) return 0; if (! INSN_PRIORITY_KNOWN (insn)) { int this_priority = 0; if (INSN_DEPEND (insn) == 0) this_priority = insn_cost (insn, 0, 0); else { for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1)) { rtx next; int next_priority; next = XEXP (link, 0); /* Critical path is meaningful in block boundaries only. */ if (! (*current_sched_info->contributes_to_priority) (next, insn)) continue; next_priority = insn_cost (insn, link, next) + priority (next); if (next_priority > this_priority) this_priority = next_priority; } } INSN_PRIORITY (insn) = this_priority; INSN_PRIORITY_KNOWN (insn) = 1; } return INSN_PRIORITY (insn); } /* Macros and functions for keeping the priority queue sorted, and dealing with queuing and dequeuing of instructions. */ #define SCHED_SORT(READY, N_READY) \ do { if ((N_READY) == 2) \ swap_sort (READY, N_READY); \ else if ((N_READY) > 2) \ qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \ while (0) /* Returns a positive value if x is preferred; returns a negative value if y is preferred. Should never return 0, since that will make the sort unstable. */ static int rank_for_schedule (const void *x, const void *y) { rtx tmp = *(const rtx *) y; rtx tmp2 = *(const rtx *) x; rtx link; int tmp_class, tmp2_class, depend_count1, depend_count2; int val, priority_val, weight_val, info_val; /* The insn in a schedule group should be issued the first. */ if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2)) return SCHED_GROUP_P (tmp2) ? 1 : -1; /* Prefer insn with higher priority. */ priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp); if (priority_val) return priority_val; /* Prefer an insn with smaller contribution to registers-pressure. */ if (!reload_completed && (weight_val = INSN_REG_WEIGHT (tmp) - INSN_REG_WEIGHT (tmp2))) return weight_val; info_val = (*current_sched_info->rank) (tmp, tmp2); if (info_val) return info_val; /* Compare insns based on their relation to the last-scheduled-insn. */ if (last_scheduled_insn) { /* Classify the instructions into three classes: 1) Data dependent on last schedule insn. 2) Anti/Output dependent on last scheduled insn. 3) Independent of last scheduled insn, or has latency of one. Choose the insn from the highest numbered class if different. */ link = find_insn_list (tmp, INSN_DEPEND (last_scheduled_insn)); if (link == 0 || insn_cost (last_scheduled_insn, link, tmp) == 1) tmp_class = 3; else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */ tmp_class = 1; else tmp_class = 2; link = find_insn_list (tmp2, INSN_DEPEND (last_scheduled_insn)); if (link == 0 || insn_cost (last_scheduled_insn, link, tmp2) == 1) tmp2_class = 3; else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */ tmp2_class = 1; else tmp2_class = 2; if ((val = tmp2_class - tmp_class)) return val; } /* Prefer the insn which has more later insns that depend on it. This gives the scheduler more freedom when scheduling later instructions at the expense of added register pressure. */ depend_count1 = 0; for (link = INSN_DEPEND (tmp); link; link = XEXP (link, 1)) depend_count1++; depend_count2 = 0; for (link = INSN_DEPEND (tmp2); link; link = XEXP (link, 1)) depend_count2++; val = depend_count2 - depend_count1; if (val) return val; /* If insns are equally good, sort by SCHED_INSN_LUID (original insn order), so that we make the sort stable. This minimizes instruction movement, thus minimizing sched's effect on debugging and cross-jumping. */ return SCHED_INSN_LUID (tmp) - SCHED_INSN_LUID (tmp2); } /* Resort the array A in which only element at index N may be out of order. */ HAIFA_INLINE static void swap_sort (rtx *a, int n) { rtx insn = a[n - 1]; int i = n - 2; while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0) { a[i + 1] = a[i]; i -= 1; } a[i + 1] = insn; } /* Add INSN to the insn queue so that it can be executed at least N_CYCLES after the currently executing insn. Preserve insns chain for debugging purposes. */ HAIFA_INLINE static void queue_insn (rtx insn, int n_cycles) { int next_q = NEXT_Q_AFTER (q_ptr, n_cycles); rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]); insn_queue[next_q] = link; q_size += 1; if (sched_verbose >= 2) { fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ", (*current_sched_info->print_insn) (insn, 0)); fprintf (sched_dump, "queued for %d cycles.\n", n_cycles); } } /* Return a pointer to the bottom of the ready list, i.e. the insn with the lowest priority. */ HAIFA_INLINE static rtx * ready_lastpos (struct ready_list *ready) { if (ready->n_ready == 0) abort (); return ready->vec + ready->first - ready->n_ready + 1; } /* Add an element INSN to the ready list so that it ends up with the lowest priority. */ HAIFA_INLINE void ready_add (struct ready_list *ready, rtx insn) { if (ready->first == ready->n_ready) { memmove (ready->vec + ready->veclen - ready->n_ready, ready_lastpos (ready), ready->n_ready * sizeof (rtx)); ready->first = ready->veclen - 1; } ready->vec[ready->first - ready->n_ready] = insn; ready->n_ready++; } /* Remove the element with the highest priority from the ready list and return it. */ HAIFA_INLINE static rtx ready_remove_first (struct ready_list *ready) { rtx t; if (ready->n_ready == 0) abort (); t = ready->vec[ready->first--]; ready->n_ready--; /* If the queue becomes empty, reset it. */ if (ready->n_ready == 0) ready->first = ready->veclen - 1; return t; } /* The following code implements multi-pass scheduling for the first cycle. In other words, we will try to choose ready insn which permits to start maximum number of insns on the same cycle. */ /* Return a pointer to the element INDEX from the ready. INDEX for insn with the highest priority is 0, and the lowest priority has N_READY - 1. */ HAIFA_INLINE static rtx ready_element (struct ready_list *ready, int index) { #ifdef ENABLE_CHECKING if (ready->n_ready == 0 || index >= ready->n_ready) abort (); #endif return ready->vec[ready->first - index]; } /* Remove the element INDEX from the ready list and return it. INDEX for insn with the highest priority is 0, and the lowest priority has N_READY - 1. */ HAIFA_INLINE static rtx ready_remove (struct ready_list *ready, int index) { rtx t; int i; if (index == 0) return ready_remove_first (ready); if (ready->n_ready == 0 || index >= ready->n_ready) abort (); t = ready->vec[ready->first - index]; ready->n_ready--; for (i = index; i < ready->n_ready; i++) ready->vec[ready->first - i] = ready->vec[ready->first - i - 1]; return t; } /* Sort the ready list READY by ascending priority, using the SCHED_SORT macro. */ HAIFA_INLINE static void ready_sort (struct ready_list *ready) { rtx *first = ready_lastpos (ready); SCHED_SORT (first, ready->n_ready); } /* PREV is an insn that is ready to execute. Adjust its priority if that will help shorten or lengthen register lifetimes as appropriate. Also provide a hook for the target to tweek itself. */ HAIFA_INLINE static void adjust_priority (rtx prev) { /* ??? There used to be code here to try and estimate how an insn affected register lifetimes, but it did it by looking at REG_DEAD notes, which we removed in schedule_region. Nor did it try to take into account register pressure or anything useful like that. Revisit when we have a machine model to work with and not before. */ if (targetm.sched.adjust_priority) INSN_PRIORITY (prev) = targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev)); } /* Advance time on one cycle. */ HAIFA_INLINE static void advance_one_cycle_haifa (void) { if (targetm.sched.use_dfa_pipeline_interface && targetm.sched.use_dfa_pipeline_interface ()) { if (targetm.sched.dfa_pre_cycle_insn) state_transition (curr_state, targetm.sched.dfa_pre_cycle_insn ()); state_transition (curr_state, NULL); if (targetm.sched.dfa_post_cycle_insn) state_transition (curr_state, targetm.sched.dfa_post_cycle_insn ()); } } /* Clock at which the previous instruction was issued. */ static int last_clock_var; /* INSN is the "currently executing insn". Launch each insn which was waiting on INSN. READY is the ready list which contains the insns that are ready to fire. CLOCK is the current cycle. The function returns necessary cycle advance after issuing the insn (it is not zero for insns in a schedule group). */ static int schedule_insn (rtx insn, struct ready_list *ready, int clock) { rtx link; int advance = 0; int unit = 0; int premature_issue = 0; if (!targetm.sched.use_dfa_pipeline_interface || !targetm.sched.use_dfa_pipeline_interface ()) unit = insn_unit (insn); if (targetm.sched.use_dfa_pipeline_interface && targetm.sched.use_dfa_pipeline_interface () && sched_verbose >= 1) { char buf[2048]; print_insn (buf, insn, 0); buf[40] = 0; fprintf (sched_dump, ";;\t%3i--> %-40s:", clock, buf); if (recog_memoized (insn) < 0) fprintf (sched_dump, "nothing"); else print_reservation (sched_dump, insn); fputc ('\n', sched_dump); } else if (sched_verbose >= 2) { fprintf (sched_dump, ";;\t\t--> scheduling insn <<<%d>>> on unit ", INSN_UID (insn)); insn_print_units (insn); fputc ('\n', sched_dump); } if (!targetm.sched.use_dfa_pipeline_interface || !targetm.sched.use_dfa_pipeline_interface ()) { if (sched_verbose && unit == -1) visualize_no_unit (insn); if (MAX_BLOCKAGE > 1 || issue_rate > 1 || sched_verbose) schedule_unit (unit, insn, clock); if (INSN_DEPEND (insn) == 0) return 0; } if (INSN_TICK (insn) > clock) { /* 'insn' has been prematurely moved from the queue to the ready list. */ premature_issue = INSN_TICK (insn) - clock; } for (link = INSN_DEPEND (insn); link != 0; link = XEXP (link, 1)) { rtx next = XEXP (link, 0); int cost = insn_cost (insn, link, next); INSN_TICK (next) = MAX (INSN_TICK (next), clock + cost + premature_issue); if ((INSN_DEP_COUNT (next) -= 1) == 0) { int effective_cost = INSN_TICK (next) - clock; if (! (*current_sched_info->new_ready) (next)) continue; if (sched_verbose >= 2) { fprintf (sched_dump, ";;\t\tdependences resolved: insn %s ", (*current_sched_info->print_insn) (next, 0)); if (effective_cost < 1) fprintf (sched_dump, "into ready\n"); else fprintf (sched_dump, "into queue with cost=%d\n", effective_cost); } /* Adjust the priority of NEXT and either put it on the ready list or queue it. */ adjust_priority (next); if (effective_cost < 1) ready_add (ready, next); else { queue_insn (next, effective_cost); if (SCHED_GROUP_P (next) && advance < effective_cost) advance = effective_cost; } } } /* Annotate the instruction with issue information -- TImode indicates that the instruction is expected not to be able to issue on the same cycle as the previous insn. A machine may use this information to decide how the instruction should be aligned. */ if (issue_rate > 1 && GET_CODE (PATTERN (insn)) != USE && GET_CODE (PATTERN (insn)) != CLOBBER) { if (reload_completed) PUT_MODE (insn, clock > last_clock_var ? TImode : VOIDmode); last_clock_var = clock; } return advance; } /* Functions for handling of notes. */ /* Delete notes beginning with INSN and put them in the chain of notes ended by NOTE_LIST. Returns the insn following the notes. */ static rtx unlink_other_notes (rtx insn, rtx tail) { rtx prev = PREV_INSN (insn); while (insn != tail && GET_CODE (insn) == NOTE) { rtx next = NEXT_INSN (insn); /* Delete the note from its current position. */ if (prev) NEXT_INSN (prev) = next; if (next) PREV_INSN (next) = prev; /* See sched_analyze to see how these are handled. */ if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_END && NOTE_LINE_NUMBER (insn) != NOTE_INSN_BASIC_BLOCK && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END) { /* Insert the note at the end of the notes list. */ PREV_INSN (insn) = note_list; if (note_list) NEXT_INSN (note_list) = insn; note_list = insn; } insn = next; } return insn; } /* Delete line notes beginning with INSN. Record line-number notes so they can be reused. Returns the insn following the notes. */ static rtx unlink_line_notes (rtx insn, rtx tail) { rtx prev = PREV_INSN (insn); while (insn != tail && GET_CODE (insn) == NOTE) { rtx next = NEXT_INSN (insn); if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0) { /* Delete the note from its current position. */ if (prev) NEXT_INSN (prev) = next; if (next) PREV_INSN (next) = prev; /* Record line-number notes so they can be reused. */ LINE_NOTE (insn) = insn; } else prev = insn; insn = next; } return insn; } /* Return the head and tail pointers of BB. */ void get_block_head_tail (int b, rtx *headp, rtx *tailp) { /* HEAD and TAIL delimit the basic block being scheduled. */ rtx head = BB_HEAD (BASIC_BLOCK (b)); rtx tail = BB_END (BASIC_BLOCK (b)); /* Don't include any notes or labels at the beginning of the basic block, or notes at the ends of basic blocks. */ while (head != tail) { if (GET_CODE (head) == NOTE) head = NEXT_INSN (head); else if (GET_CODE (tail) == NOTE) tail = PREV_INSN (tail); else if (GET_CODE (head) == CODE_LABEL) head = NEXT_INSN (head); else break; } *headp = head; *tailp = tail; } /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */ int no_real_insns_p (rtx head, rtx tail) { while (head != NEXT_INSN (tail)) { if (GET_CODE (head) != NOTE && GET_CODE (head) != CODE_LABEL) return 0; head = NEXT_INSN (head); } return 1; } /* Delete line notes from one block. Save them so they can be later restored (in restore_line_notes). HEAD and TAIL are the boundaries of the block in which notes should be processed. */ void rm_line_notes (rtx head, rtx tail) { rtx next_tail; rtx insn; next_tail = NEXT_INSN (tail); for (insn = head; insn != next_tail; insn = NEXT_INSN (insn)) { rtx prev; /* Farm out notes, and maybe save them in NOTE_LIST. This is needed to keep the debugger from getting completely deranged. */ if (GET_CODE (insn) == NOTE) { prev = insn; insn = unlink_line_notes (insn, next_tail); if (prev == tail) abort (); if (prev == head) abort (); if (insn == next_tail) abort (); } } } /* Save line number notes for each insn in block B. HEAD and TAIL are the boundaries of the block in which notes should be processed. */ void save_line_notes (int b, rtx head, rtx tail) { rtx next_tail; /* We must use the true line number for the first insn in the block that was computed and saved at the start of this pass. We can't use the current line number, because scheduling of the previous block may have changed the current line number. */ rtx line = line_note_head[b]; rtx insn; next_tail = NEXT_INSN (tail); for (insn = head; insn != next_tail; insn = NEXT_INSN (insn)) if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0) line = insn; else LINE_NOTE (insn) = line; } /* After a block was scheduled, insert line notes into the insns list. HEAD and TAIL are the boundaries of the block in which notes should be processed. */ void restore_line_notes (rtx head, rtx tail) { rtx line, note, prev, new; int added_notes = 0; rtx next_tail, insn; head = head; next_tail = NEXT_INSN (tail); /* Determine the current line-number. We want to know the current line number of the first insn of the block here, in case it is different from the true line number that was saved earlier. If different, then we need a line number note before the first insn of this block. If it happens to be the same, then we don't want to emit another line number note here. */ for (line = head; line; line = PREV_INSN (line)) if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0) break; /* Walk the insns keeping track of the current line-number and inserting the line-number notes as needed. */ for (insn = head; insn != next_tail; insn = NEXT_INSN (insn)) if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0) line = insn; /* This used to emit line number notes before every non-deleted note. However, this confuses a debugger, because line notes not separated by real instructions all end up at the same address. I can find no use for line number notes before other notes, so none are emitted. */ else if (GET_CODE (insn) != NOTE && INSN_UID (insn) < old_max_uid && (note = LINE_NOTE (insn)) != 0 && note != line && (line == 0 #ifdef USE_MAPPED_LOCATION || NOTE_SOURCE_LOCATION (note) != NOTE_SOURCE_LOCATION (line) #else || NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line) || NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line) #endif )) { line = note; prev = PREV_INSN (insn); if (LINE_NOTE (note)) { /* Re-use the original line-number note. */ LINE_NOTE (note) = 0; PREV_INSN (note) = prev; NEXT_INSN (prev) = note; PREV_INSN (insn) = note; NEXT_INSN (note) = insn; } else { added_notes++; new = emit_note_after (NOTE_LINE_NUMBER (note), prev); #ifndef USE_MAPPED_LOCATION NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note); #endif } } if (sched_verbose && added_notes) fprintf (sched_dump, ";; added %d line-number notes\n", added_notes); } /* After scheduling the function, delete redundant line notes from the insns list. */ void rm_redundant_line_notes (void) { rtx line = 0; rtx insn = get_insns (); int active_insn = 0; int notes = 0; /* Walk the insns deleting redundant line-number notes. Many of these are already present. The remainder tend to occur at basic block boundaries. */ for (insn = get_last_insn (); insn; insn = PREV_INSN (insn)) if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0) { /* If there are no active insns following, INSN is redundant. */ if (active_insn == 0) { notes++; SET_INSN_DELETED (insn); } /* If the line number is unchanged, LINE is redundant. */ else if (line #ifdef USE_MAPPED_LOCATION && NOTE_SOURCE_LOCATION (line) == NOTE_SOURCE_LOCATION (insn) #else && NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn) && NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn) #endif ) { notes++; SET_INSN_DELETED (line); line = insn; } else line = insn; active_insn = 0; } else if (!((GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED) || (GET_CODE (insn) == INSN && (GET_CODE (PATTERN (insn)) == USE || GET_CODE (PATTERN (insn)) == CLOBBER)))) active_insn++; if (sched_verbose && notes) fprintf (sched_dump, ";; deleted %d line-number notes\n", notes); } /* Delete notes between HEAD and TAIL and put them in the chain of notes ended by NOTE_LIST. */ void rm_other_notes (rtx head, rtx tail) { rtx next_tail; rtx insn; note_list = 0; if (head == tail && (! INSN_P (head))) return; next_tail = NEXT_INSN (tail); for (insn = head; insn != next_tail; insn = NEXT_INSN (insn)) { rtx prev; /* Farm out notes, and maybe save them in NOTE_LIST. This is needed to keep the debugger from getting completely deranged. */ if (GET_CODE (insn) == NOTE) { prev = insn; insn = unlink_other_notes (insn, next_tail); if (prev == tail) abort (); if (prev == head) abort (); if (insn == next_tail) abort (); } } } /* Functions for computation of registers live/usage info. */ /* This function looks for a new register being defined. If the destination register is already used by the source, a new register is not needed. */ static int find_set_reg_weight (rtx x) { if (GET_CODE (x) == CLOBBER && register_operand (SET_DEST (x), VOIDmode)) return 1; if (GET_CODE (x) == SET && register_operand (SET_DEST (x), VOIDmode)) { if (REG_P (SET_DEST (x))) { if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x))) return 1; else return 0; } return 1; } return 0; } /* Calculate INSN_REG_WEIGHT for all insns of a block. */ static void find_insn_reg_weight (int b) { rtx insn, next_tail, head, tail; get_block_head_tail (b, &head, &tail); next_tail = NEXT_INSN (tail); for (insn = head; insn != next_tail; insn = NEXT_INSN (insn)) { int reg_weight = 0; rtx x; /* Handle register life information. */ if (! INSN_P (insn)) continue; /* Increment weight for each register born here. */ x = PATTERN (insn); reg_weight += find_set_reg_weight (x); if (GET_CODE (x) == PARALLEL) { int j; for (j = XVECLEN (x, 0) - 1; j >= 0; j--) { x = XVECEXP (PATTERN (insn), 0, j); reg_weight += find_set_reg_weight (x); } } /* Decrement weight for each register that dies here. */ for (x = REG_NOTES (insn); x; x = XEXP (x, 1)) { if (REG_NOTE_KIND (x) == REG_DEAD || REG_NOTE_KIND (x) == REG_UNUSED) reg_weight--; } INSN_REG_WEIGHT (insn) = reg_weight; } } /* Scheduling clock, modified in schedule_block() and queue_to_ready (). */ static int clock_var; /* Move insns that became ready to fire from queue to ready list. */ static void queue_to_ready (struct ready_list *ready) { rtx insn; rtx link; q_ptr = NEXT_Q (q_ptr); /* Add all pending insns that can be scheduled without stalls to the ready list. */ for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1)) { insn = XEXP (link, 0); q_size -= 1; if (sched_verbose >= 2) fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ", (*current_sched_info->print_insn) (insn, 0)); ready_add (ready, insn); if (sched_verbose >= 2) fprintf (sched_dump, "moving to ready without stalls\n"); } insn_queue[q_ptr] = 0; /* If there are no ready insns, stall until one is ready and add all of the pending insns at that point to the ready list. */ if (ready->n_ready == 0) { int stalls; for (stalls = 1; stalls <= MAX_INSN_QUEUE_INDEX; stalls++) { if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)])) { for (; link; link = XEXP (link, 1)) { insn = XEXP (link, 0); q_size -= 1; if (sched_verbose >= 2) fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ", (*current_sched_info->print_insn) (insn, 0)); ready_add (ready, insn); if (sched_verbose >= 2) fprintf (sched_dump, "moving to ready with %d stalls\n", stalls); } insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = 0; advance_one_cycle_haifa (); break; } advance_one_cycle_haifa (); } if ((!targetm.sched.use_dfa_pipeline_interface || !targetm.sched.use_dfa_pipeline_interface ()) && sched_verbose && stalls) visualize_stall_cycles (stalls); q_ptr = NEXT_Q_AFTER (q_ptr, stalls); clock_var += stalls; } } /* Used by early_queue_to_ready. Determines whether it is "ok" to prematurely move INSN from the queue to the ready list. Currently, if a target defines the hook 'is_costly_dependence', this function uses the hook to check whether there exist any dependences which are considered costly by the target, between INSN and other insns that have already been scheduled. Dependences are checked up to Y cycles back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows controlling this value. (Other considerations could be taken into account instead (or in addition) depending on user flags and target hooks. */ static bool ok_for_early_queue_removal (rtx insn) { int n_cycles; rtx prev_insn = last_scheduled_insn; if (targetm.sched.is_costly_dependence) { for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--) { for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn)) { rtx dep_link = 0; int dep_cost; if (GET_CODE (prev_insn) != NOTE) { dep_link = find_insn_list (insn, INSN_DEPEND (prev_insn)); if (dep_link) { dep_cost = insn_cost (prev_insn, dep_link, insn) ; if (targetm.sched.is_costly_dependence (prev_insn, insn, dep_link, dep_cost, flag_sched_stalled_insns_dep - n_cycles)) return false; } } if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */ break; } if (!prev_insn) break; prev_insn = PREV_INSN (prev_insn); } } return true; } /* Remove insns from the queue, before they become "ready" with respect to FU latency considerations. */ static int early_queue_to_ready (state_t state, struct ready_list *ready) { rtx insn; rtx link; rtx next_link; rtx prev_link; bool move_to_ready; int cost; state_t temp_state = alloca (dfa_state_size); int stalls; int insns_removed = 0; /* Flag '-fsched-stalled-insns=X' determines the aggressiveness of this function: X == 0: There is no limit on how many queued insns can be removed prematurely. (flag_sched_stalled_insns = -1). X >= 1: Only X queued insns can be removed prematurely in each invocation. (flag_sched_stalled_insns = X). Otherwise: Early queue removal is disabled. (flag_sched_stalled_insns = 0) */ if (! flag_sched_stalled_insns) return 0; for (stalls = 0; stalls <= MAX_INSN_QUEUE_INDEX; stalls++) { if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)])) { if (sched_verbose > 6) fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls); prev_link = 0; while (link) { next_link = XEXP (link, 1); insn = XEXP (link, 0); if (insn && sched_verbose > 6) print_rtl_single (sched_dump, insn); memcpy (temp_state, state, dfa_state_size); if (recog_memoized (insn) < 0) /* non-negative to indicate that it's not ready to avoid infinite Q->R->Q->R... */ cost = 0; else cost = state_transition (temp_state, insn); if (sched_verbose >= 6) fprintf (sched_dump, "transition cost = %d\n", cost); move_to_ready = false; if (cost < 0) { move_to_ready = ok_for_early_queue_removal (insn); if (move_to_ready == true) { /* move from Q to R */ q_size -= 1; ready_add (ready, insn); if (prev_link) XEXP (prev_link, 1) = next_link; else insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link; free_INSN_LIST_node (link); if (sched_verbose >= 2) fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n", (*current_sched_info->print_insn) (insn, 0)); insns_removed++; if (insns_removed == flag_sched_stalled_insns) /* Remove only one insn from Q at a time. */ return insns_removed; } } if (move_to_ready == false) prev_link = link; link = next_link; } /* while link */ } /* if link */ } /* for stalls.. */ return insns_removed; } /* Print the ready list for debugging purposes. Callable from debugger. */ static void debug_ready_list (struct ready_list *ready) { rtx *p; int i; if (ready->n_ready == 0) { fprintf (sched_dump, "\n"); return; } p = ready_lastpos (ready); for (i = 0; i < ready->n_ready; i++) fprintf (sched_dump, " %s", (*current_sched_info->print_insn) (p[i], 0)); fprintf (sched_dump, "\n"); } /* move_insn1: Remove INSN from insn chain, and link it after LAST insn. */ static rtx move_insn1 (rtx insn, rtx last) { NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn); PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn); NEXT_INSN (insn) = NEXT_INSN (last); PREV_INSN (NEXT_INSN (last)) = insn; NEXT_INSN (last) = insn; PREV_INSN (insn) = last; return insn; } /* Search INSN for REG_SAVE_NOTE note pairs for NOTE_INSN_{LOOP,EHREGION}_{BEG,END}; and convert them back into NOTEs. The REG_SAVE_NOTE note following first one is contains the saved value for NOTE_BLOCK_NUMBER which is useful for NOTE_INSN_EH_REGION_{BEG,END} NOTEs. LAST is the last instruction output by the instruction scheduler. Return the new value of LAST. */ static rtx reemit_notes (rtx insn, rtx last) { rtx note, retval; retval = last; for (note = REG_NOTES (insn); note; note = XEXP (note, 1)) { if (REG_NOTE_KIND (note) == REG_SAVE_NOTE) { enum insn_note note_type = INTVAL (XEXP (note, 0)); last = emit_note_before (note_type, last); remove_note (insn, note); note = XEXP (note, 1); if (note_type == NOTE_INSN_EH_REGION_BEG || note_type == NOTE_INSN_EH_REGION_END) NOTE_EH_HANDLER (last) = INTVAL (XEXP (note, 0)); remove_note (insn, note); } } return retval; } /* Move INSN. Reemit notes if needed. Return the last insn emitted by the scheduler, which is the return value from the first call to reemit_notes. */ static rtx move_insn (rtx insn, rtx last) { rtx retval = NULL; move_insn1 (insn, last); /* If this is the first call to reemit_notes, then record its return value. */ if (retval == NULL_RTX) retval = reemit_notes (insn, insn); else reemit_notes (insn, insn); SCHED_GROUP_P (insn) = 0; return retval; } /* The following structure describe an entry of the stack of choices. */ struct choice_entry { /* Ordinal number of the issued insn in the ready queue. */ int index; /* The number of the rest insns whose issues we should try. */ int rest; /* The number of issued essential insns. */ int n; /* State after issuing the insn. */ state_t state; }; /* The following array is used to implement a stack of choices used in function max_issue. */ static struct choice_entry *choice_stack; /* The following variable value is number of essential insns issued on the current cycle. An insn is essential one if it changes the processors state. */ static int cycle_issued_insns; /* The following variable value is maximal number of tries of issuing insns for the first cycle multipass insn scheduling. We define this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not need this constraint if all real insns (with non-negative codes) had reservations because in this case the algorithm complexity is O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions might be incomplete and such insn might occur. For such descriptions, the complexity of algorithm (without the constraint) could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */ static int max_lookahead_tries; /* The following value is value of hook `first_cycle_multipass_dfa_lookahead' at the last call of `max_issue'. */ static int cached_first_cycle_multipass_dfa_lookahead = 0; /* The following value is value of `issue_rate' at the last call of `sched_init'. */ static int cached_issue_rate = 0; /* The following function returns maximal (or close to maximal) number of insns which can be issued on the same cycle and one of which insns is insns with the best rank (the first insn in READY). To make this function tries different samples of ready insns. READY is current queue `ready'. Global array READY_TRY reflects what insns are already issued in this try. INDEX will contain index of the best insn in READY. The following function is used only for first cycle multipass scheduling. */ static int max_issue (struct ready_list *ready, int *index) { int n, i, all, n_ready, best, delay, tries_num; struct choice_entry *top; rtx insn; best = 0; memcpy (choice_stack->state, curr_state, dfa_state_size); top = choice_stack; top->rest = cached_first_cycle_multipass_dfa_lookahead; top->n = 0; n_ready = ready->n_ready; for (all = i = 0; i < n_ready; i++) if (!ready_try [i]) all++; i = 0; tries_num = 0; for (;;) { if (top->rest == 0 || i >= n_ready) { if (top == choice_stack) break; if (best < top - choice_stack && ready_try [0]) { best = top - choice_stack; *index = choice_stack [1].index; if (top->n == issue_rate - cycle_issued_insns || best == all) break; } i = top->index; ready_try [i] = 0; top--; memcpy (curr_state, top->state, dfa_state_size); } else if (!ready_try [i]) { tries_num++; if (tries_num > max_lookahead_tries) break; insn = ready_element (ready, i); delay = state_transition (curr_state, insn); if (delay < 0) { if (state_dead_lock_p (curr_state)) top->rest = 0; else top->rest--; n = top->n; if (memcmp (top->state, curr_state, dfa_state_size) != 0) n++; top++; top->rest = cached_first_cycle_multipass_dfa_lookahead; top->index = i; top->n = n; memcpy (top->state, curr_state, dfa_state_size); ready_try [i] = 1; i = -1; } } i++; } while (top != choice_stack) { ready_try [top->index] = 0; top--; } memcpy (curr_state, choice_stack->state, dfa_state_size); return best; } /* The following function chooses insn from READY and modifies *N_READY and READY. The following function is used only for first cycle multipass scheduling. */ static rtx choose_ready (struct ready_list *ready) { int lookahead = 0; if (targetm.sched.first_cycle_multipass_dfa_lookahead) lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead (); if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0))) return ready_remove_first (ready); else { /* Try to choose the better insn. */ int index = 0, i; rtx insn; if (cached_first_cycle_multipass_dfa_lookahead != lookahead) { cached_first_cycle_multipass_dfa_lookahead = lookahead; max_lookahead_tries = 100; for (i = 0; i < issue_rate; i++) max_lookahead_tries *= lookahead; } insn = ready_element (ready, 0); if (INSN_CODE (insn) < 0) return ready_remove_first (ready); for (i = 1; i < ready->n_ready; i++) { insn = ready_element (ready, i); ready_try [i] = (INSN_CODE (insn) < 0 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard (insn))); } if (max_issue (ready, &index) == 0) return ready_remove_first (ready); else return ready_remove (ready, index); } } /* Use forward list scheduling to rearrange insns of block B in region RGN, possibly bringing insns from subsequent blocks in the same region. */ void schedule_block (int b, int rgn_n_insns) { struct ready_list ready; int i, first_cycle_insn_p; int can_issue_more; state_t temp_state = NULL; /* It is used for multipass scheduling. */ int sort_p, advance, start_clock_var; /* Head/tail info for this block. */ rtx prev_head = current_sched_info->prev_head; rtx next_tail = current_sched_info->next_tail; rtx head = NEXT_INSN (prev_head); rtx tail = PREV_INSN (next_tail); /* We used to have code to avoid getting parameters moved from hard argument registers into pseudos. However, it was removed when it proved to be of marginal benefit and caused problems because schedule_block and compute_forward_dependences had different notions of what the "head" insn was. */ if (head == tail && (! INSN_P (head))) abort (); /* Debug info. */ if (sched_verbose) { fprintf (sched_dump, ";; ======================================================\n"); fprintf (sched_dump, ";; -- basic block %d from %d to %d -- %s reload\n", b, INSN_UID (head), INSN_UID (tail), (reload_completed ? "after" : "before")); fprintf (sched_dump, ";; ======================================================\n"); fprintf (sched_dump, "\n"); visualize_alloc (); init_block_visualization (); } if (targetm.sched.use_dfa_pipeline_interface && targetm.sched.use_dfa_pipeline_interface ()) state_reset (curr_state); else clear_units (); /* Allocate the ready list. */ ready.veclen = rgn_n_insns + 1 + issue_rate; ready.first = ready.veclen - 1; ready.vec = xmalloc (ready.veclen * sizeof (rtx)); ready.n_ready = 0; if (targetm.sched.use_dfa_pipeline_interface && targetm.sched.use_dfa_pipeline_interface ()) { /* It is used for first cycle multipass scheduling. */ temp_state = alloca (dfa_state_size); ready_try = xcalloc ((rgn_n_insns + 1), sizeof (char)); choice_stack = xmalloc ((rgn_n_insns + 1) * sizeof (struct choice_entry)); for (i = 0; i <= rgn_n_insns; i++) choice_stack[i].state = xmalloc (dfa_state_size); } (*current_sched_info->init_ready_list) (&ready); if (targetm.sched.md_init) targetm.sched.md_init (sched_dump, sched_verbose, ready.veclen); /* We start inserting insns after PREV_HEAD. */ last_scheduled_insn = prev_head; /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the queue. */ q_ptr = 0; q_size = 0; if (!targetm.sched.use_dfa_pipeline_interface || !targetm.sched.use_dfa_pipeline_interface ()) max_insn_queue_index_macro_value = INSN_QUEUE_SIZE - 1; else max_insn_queue_index_macro_value = max_insn_queue_index; insn_queue = alloca ((MAX_INSN_QUEUE_INDEX + 1) * sizeof (rtx)); memset (insn_queue, 0, (MAX_INSN_QUEUE_INDEX + 1) * sizeof (rtx)); last_clock_var = -1; /* Start just before the beginning of time. */ clock_var = -1; advance = 0; sort_p = TRUE; /* Loop until all the insns in BB are scheduled. */ while ((*current_sched_info->schedule_more_p) ()) { do { start_clock_var = clock_var; clock_var++; advance_one_cycle_haifa (); /* Add to the ready list all pending insns that can be issued now. If there are no ready insns, increment clock until one is ready and add all pending insns at that point to the ready list. */ queue_to_ready (&ready); if (ready.n_ready == 0) abort (); if (sched_verbose >= 2) { fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: "); debug_ready_list (&ready); } advance -= clock_var - start_clock_var; } while (advance > 0); if (sort_p) { /* Sort the ready list based on priority. */ ready_sort (&ready); if (sched_verbose >= 2) { fprintf (sched_dump, ";;\t\tReady list after ready_sort: "); debug_ready_list (&ready); } } /* Allow the target to reorder the list, typically for better instruction bundling. */ if (sort_p && targetm.sched.reorder && (ready.n_ready == 0 || !SCHED_GROUP_P (ready_element (&ready, 0)))) can_issue_more = targetm.sched.reorder (sched_dump, sched_verbose, ready_lastpos (&ready), &ready.n_ready, clock_var); else can_issue_more = issue_rate; first_cycle_insn_p = 1; cycle_issued_insns = 0; for (;;) { rtx insn; int cost; bool asm_p = false; if (sched_verbose >= 2) { fprintf (sched_dump, ";;\tReady list (t =%3d): ", clock_var); debug_ready_list (&ready); } if (!targetm.sched.use_dfa_pipeline_interface || !targetm.sched.use_dfa_pipeline_interface ()) { if (ready.n_ready == 0 || !can_issue_more || !(*current_sched_info->schedule_more_p) ()) break; insn = ready_remove_first (&ready); cost = actual_hazard (insn_unit (insn), insn, clock_var, 0); } else { if (ready.n_ready == 0 && can_issue_more && reload_completed) { /* Allow scheduling insns directly from the queue in case there's nothing better to do (ready list is empty) but there are still vacant dispatch slots in the current cycle. */ if (sched_verbose >= 6) fprintf(sched_dump,";;\t\tSecond chance\n"); memcpy (temp_state, curr_state, dfa_state_size); if (early_queue_to_ready (temp_state, &ready)) ready_sort (&ready); } if (ready.n_ready == 0 || !can_issue_more || state_dead_lock_p (curr_state) || !(*current_sched_info->schedule_more_p) ()) break; /* Select and remove the insn from the ready list. */ if (sort_p) insn = choose_ready (&ready); else insn = ready_remove_first (&ready); if (targetm.sched.dfa_new_cycle && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose, insn, last_clock_var, clock_var, &sort_p)) { ready_add (&ready, insn); break; } sort_p = TRUE; memcpy (temp_state, curr_state, dfa_state_size); if (recog_memoized (insn) < 0) { asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT || asm_noperands (PATTERN (insn)) >= 0); if (!first_cycle_insn_p && asm_p) /* This is asm insn which is tryed to be issued on the cycle not first. Issue it on the next cycle. */ cost = 1; else /* A USE insn, or something else we don't need to understand. We can't pass these directly to state_transition because it will trigger a fatal error for unrecognizable insns. */ cost = 0; } else { cost = state_transition (temp_state, insn); if (targetm.sched.first_cycle_multipass_dfa_lookahead && targetm.sched.dfa_bubble) { if (cost == 0) { int j; rtx bubble; for (j = 0; (bubble = targetm.sched.dfa_bubble (j)) != NULL_RTX; j++) { memcpy (temp_state, curr_state, dfa_state_size); if (state_transition (temp_state, bubble) < 0 && state_transition (temp_state, insn) < 0) break; } if (bubble != NULL_RTX) { if (insert_schedule_bubbles_p) { rtx copy; copy = copy_rtx (PATTERN (bubble)); emit_insn_after (copy, last_scheduled_insn); last_scheduled_insn = NEXT_INSN (last_scheduled_insn); INSN_CODE (last_scheduled_insn) = INSN_CODE (bubble); /* Annotate the same for the first insns scheduling by using mode. */ PUT_MODE (last_scheduled_insn, (clock_var > last_clock_var ? clock_var - last_clock_var : VOIDmode)); last_clock_var = clock_var; if (sched_verbose >= 2) { fprintf (sched_dump, ";;\t\t--> scheduling bubble insn <<<%d>>>:reservation ", INSN_UID (last_scheduled_insn)); if (recog_memoized (last_scheduled_insn) < 0) fprintf (sched_dump, "nothing"); else print_reservation (sched_dump, last_scheduled_insn); fprintf (sched_dump, "\n"); } } cost = -1; } } } if (cost < 0) cost = 0; else if (cost == 0) cost = 1; } } if (cost >= 1) { queue_insn (insn, cost); continue; } if (! (*current_sched_info->can_schedule_ready_p) (insn)) goto next; last_scheduled_insn = move_insn (insn, last_scheduled_insn); if (targetm.sched.use_dfa_pipeline_interface && targetm.sched.use_dfa_pipeline_interface ()) { if (memcmp (curr_state, temp_state, dfa_state_size) != 0) cycle_issued_insns++; memcpy (curr_state, temp_state, dfa_state_size); } if (targetm.sched.variable_issue) can_issue_more = targetm.sched.variable_issue (sched_dump, sched_verbose, insn, can_issue_more); /* A naked CLOBBER or USE generates no instruction, so do not count them against the issue rate. */ else if (GET_CODE (PATTERN (insn)) != USE && GET_CODE (PATTERN (insn)) != CLOBBER) can_issue_more--; advance = schedule_insn (insn, &ready, clock_var); /* After issuing an asm insn we should start a new cycle. */ if (advance == 0 && asm_p) advance = 1; if (advance != 0) break; next: first_cycle_insn_p = 0; /* Sort the ready list based on priority. This must be redone here, as schedule_insn may have readied additional insns that will not be sorted correctly. */ if (ready.n_ready > 0) ready_sort (&ready); if (targetm.sched.reorder2 && (ready.n_ready == 0 || !SCHED_GROUP_P (ready_element (&ready, 0)))) { can_issue_more = targetm.sched.reorder2 (sched_dump, sched_verbose, ready.n_ready ? ready_lastpos (&ready) : NULL, &ready.n_ready, clock_var); } } if ((!targetm.sched.use_dfa_pipeline_interface || !targetm.sched.use_dfa_pipeline_interface ()) && sched_verbose) /* Debug info. */ visualize_scheduled_insns (clock_var); } if (targetm.sched.md_finish) targetm.sched.md_finish (sched_dump, sched_verbose); /* Debug info. */ if (sched_verbose) { fprintf (sched_dump, ";;\tReady list (final): "); debug_ready_list (&ready); if (!targetm.sched.use_dfa_pipeline_interface || !targetm.sched.use_dfa_pipeline_interface ()) print_block_visualization (""); } /* Sanity check -- queue must be empty now. Meaningless if region has multiple bbs. */ if (current_sched_info->queue_must_finish_empty && q_size != 0) abort (); /* Update head/tail boundaries. */ head = NEXT_INSN (prev_head); tail = last_scheduled_insn; if (!reload_completed) { rtx insn, link, next; /* INSN_TICK (minimum clock tick at which the insn becomes ready) may be not correct for the insn in the subsequent blocks of the region. We should use a correct value of `clock_var' or modify INSN_TICK. It is better to keep clock_var value equal to 0 at the start of a basic block. Therefore we modify INSN_TICK here. */ for (insn = head; insn != tail; insn = NEXT_INSN (insn)) if (INSN_P (insn)) { for (link = INSN_DEPEND (insn); link != 0; link = XEXP (link, 1)) { next = XEXP (link, 0); INSN_TICK (next) -= clock_var; } } } /* Restore-other-notes: NOTE_LIST is the end of a chain of notes previously found among the insns. Insert them at the beginning of the insns. */ if (note_list != 0) { rtx note_head = note_list; while (PREV_INSN (note_head)) { note_head = PREV_INSN (note_head); } PREV_INSN (note_head) = PREV_INSN (head); NEXT_INSN (PREV_INSN (head)) = note_head; PREV_INSN (head) = note_list; NEXT_INSN (note_list) = head; head = note_head; } /* Debugging. */ if (sched_verbose) { fprintf (sched_dump, ";; total time = %d\n;; new head = %d\n", clock_var, INSN_UID (head)); fprintf (sched_dump, ";; new tail = %d\n\n", INSN_UID (tail)); visualize_free (); } current_sched_info->head = head; current_sched_info->tail = tail; free (ready.vec); if (targetm.sched.use_dfa_pipeline_interface && targetm.sched.use_dfa_pipeline_interface ()) { free (ready_try); for (i = 0; i <= rgn_n_insns; i++) free (choice_stack [i].state); free (choice_stack); } } /* Set_priorities: compute priority of each insn in the block. */ int set_priorities (rtx head, rtx tail) { rtx insn; int n_insn; int sched_max_insns_priority = current_sched_info->sched_max_insns_priority; rtx prev_head; prev_head = PREV_INSN (head); if (head == tail && (! INSN_P (head))) return 0; n_insn = 0; sched_max_insns_priority = 0; for (insn = tail; insn != prev_head; insn = PREV_INSN (insn)) { if (GET_CODE (insn) == NOTE) continue; n_insn++; (void) priority (insn); if (INSN_PRIORITY_KNOWN (insn)) sched_max_insns_priority = MAX (sched_max_insns_priority, INSN_PRIORITY (insn)); } sched_max_insns_priority += 1; current_sched_info->sched_max_insns_priority = sched_max_insns_priority; return n_insn; } /* Initialize some global state for the scheduler. DUMP_FILE is to be used for debugging output. */ void sched_init (FILE *dump_file) { int luid; basic_block b; rtx insn; int i; /* Disable speculative loads in their presence if cc0 defined. */ #ifdef HAVE_cc0 flag_schedule_speculative_load = 0; #endif /* Set dump and sched_verbose for the desired debugging output. If no dump-file was specified, but -fsched-verbose=N (any N), print to stderr. For -fsched-verbose=N, N>=10, print everything to stderr. */ sched_verbose = sched_verbose_param; if (sched_verbose_param == 0 && dump_file) sched_verbose = 1; sched_dump = ((sched_verbose_param >= 10 || !dump_file) ? stderr : dump_file); /* Initialize issue_rate. */ if (targetm.sched.issue_rate) issue_rate = targetm.sched.issue_rate (); else issue_rate = 1; if (cached_issue_rate != issue_rate) { cached_issue_rate = issue_rate; /* To invalidate max_lookahead_tries: */ cached_first_cycle_multipass_dfa_lookahead = 0; } /* We use LUID 0 for the fake insn (UID 0) which holds dependencies for pseudos which do not cross calls. */ old_max_uid = get_max_uid () + 1; h_i_d = xcalloc (old_max_uid, sizeof (*h_i_d)); for (i = 0; i < old_max_uid; i++) h_i_d [i].cost = -1; if (targetm.sched.use_dfa_pipeline_interface && targetm.sched.use_dfa_pipeline_interface ()) { if (targetm.sched.init_dfa_pre_cycle_insn) targetm.sched.init_dfa_pre_cycle_insn (); if (targetm.sched.init_dfa_post_cycle_insn) targetm.sched.init_dfa_post_cycle_insn (); if (targetm.sched.first_cycle_multipass_dfa_lookahead && targetm.sched.init_dfa_bubbles) targetm.sched.init_dfa_bubbles (); dfa_start (); dfa_state_size = state_size (); curr_state = xmalloc (dfa_state_size); } h_i_d[0].luid = 0; luid = 1; FOR_EACH_BB (b) for (insn = BB_HEAD (b); ; insn = NEXT_INSN (insn)) { SCHED_INSN_LUID (insn) = luid; /* Increment the next luid, unless this is a note. We don't really need separate IDs for notes and we don't want to schedule differently depending on whether or not there are line-number notes, i.e., depending on whether or not we're generating debugging information. */ if (GET_CODE (insn) != NOTE) ++luid; if (insn == BB_END (b)) break; } init_dependency_caches (luid); init_alias_analysis (); if (write_symbols != NO_DEBUG) { rtx line; line_note_head = xcalloc (last_basic_block, sizeof (rtx)); /* Save-line-note-head: Determine the line-number at the start of each basic block. This must be computed and saved now, because after a basic block's predecessor has been scheduled, it is impossible to accurately determine the correct line number for the first insn of the block. */ FOR_EACH_BB (b) { for (line = BB_HEAD (b); line; line = PREV_INSN (line)) if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0) { line_note_head[b->index] = line; break; } /* Do a forward search as well, since we won't get to see the first notes in a basic block. */ for (line = BB_HEAD (b); line; line = NEXT_INSN (line)) { if (INSN_P (line)) break; if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0) line_note_head[b->index] = line; } } } if ((!targetm.sched.use_dfa_pipeline_interface || !targetm.sched.use_dfa_pipeline_interface ()) && sched_verbose) /* Find units used in this function, for visualization. */ init_target_units (); /* ??? Add a NOTE after the last insn of the last basic block. It is not known why this is done. */ insn = BB_END (EXIT_BLOCK_PTR->prev_bb); if (NEXT_INSN (insn) == 0 || (GET_CODE (insn) != NOTE && GET_CODE (insn) != CODE_LABEL /* Don't emit a NOTE if it would end up before a BARRIER. */ && GET_CODE (NEXT_INSN (insn)) != BARRIER)) { emit_note_after (NOTE_INSN_DELETED, BB_END (EXIT_BLOCK_PTR->prev_bb)); /* Make insn to appear outside BB. */ BB_END (EXIT_BLOCK_PTR->prev_bb) = PREV_INSN (BB_END (EXIT_BLOCK_PTR->prev_bb)); } /* Compute INSN_REG_WEIGHT for all blocks. We must do this before removing death notes. */ FOR_EACH_BB_REVERSE (b) find_insn_reg_weight (b->index); if (targetm.sched.md_init_global) targetm.sched.md_init_global (sched_dump, sched_verbose, old_max_uid); } /* Free global data used during insn scheduling. */ void sched_finish (void) { free (h_i_d); if (targetm.sched.use_dfa_pipeline_interface && targetm.sched.use_dfa_pipeline_interface ()) { free (curr_state); dfa_finish (); } free_dependency_caches (); end_alias_analysis (); if (write_symbols != NO_DEBUG) free (line_note_head); if (targetm.sched.md_finish_global) targetm.sched.md_finish_global (sched_dump, sched_verbose); } #endif /* INSN_SCHEDULING */ /* General-purpose hooks. Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. In other words, you are welcome to use, share and improve this program. You are forbidden to forbid anyone else to use, share and improve what you give them. Help stamp out software-hoarding! */ /* This file contains generic hooks that can be used as defaults for target or language-dependent hook initializers. */ /* Generic hook that does absolutely zappo. */ void hook_void_void (void) { } /* Generic hook that takes no arguments and returns false. */ bool hook_bool_void_false (void) { return false; } /* The same, but formally returning NO_REGS. */ int hook_int_void_no_regs (void) { return NO_REGS; } /* Generic hook that returns 1. */ int hook_int_void_1 (void) { return 1; } /* Generic hook that takes (bool) and returns false. */ bool hook_bool_bool_false (bool a ATTRIBUTE_UNUSED) { return false; } /* Generic hook that takes (FILE *, const char *) and does nothing. */ void hook_void_FILEptr_constcharptr (FILE *a ATTRIBUTE_UNUSED, const char *b ATTRIBUTE_UNUSED) { } /* Used for the TARGET_ASM_CAN_OUTPUT_MI_THUNK hook. */ bool hook_bool_tree_hwi_hwi_tree_false (tree a ATTRIBUTE_UNUSED, HOST_WIDE_INT b ATTRIBUTE_UNUSED, HOST_WIDE_INT c ATTRIBUTE_UNUSED, tree d ATTRIBUTE_UNUSED) { return false; } bool hook_bool_tree_hwi_hwi_tree_true (tree a ATTRIBUTE_UNUSED, HOST_WIDE_INT b ATTRIBUTE_UNUSED, HOST_WIDE_INT c ATTRIBUTE_UNUSED, tree d ATTRIBUTE_UNUSED) { return true; } bool hook_bool_constcharptr_size_t_false (const char *a ATTRIBUTE_UNUSED, size_t b ATTRIBUTE_UNUSED) { return false; } bool default_can_output_mi_thunk_no_vcall (tree a ATTRIBUTE_UNUSED, HOST_WIDE_INT b ATTRIBUTE_UNUSED, HOST_WIDE_INT c, tree d ATTRIBUTE_UNUSED) { return c == 0; } /* ??? Used for comp_type_attributes, which ought to return bool. */ int hook_int_tree_tree_1 (tree a ATTRIBUTE_UNUSED, tree b ATTRIBUTE_UNUSED) { return 1; } int hook_int_rtx_0 (rtx a ATTRIBUTE_UNUSED) { return 0; } int hook_int_size_t_constcharptr_int_0 (size_t a ATTRIBUTE_UNUSED, const char *b ATTRIBUTE_UNUSED, int c ATTRIBUTE_UNUSED) { return 0; } unsigned int hook_uint_uint_constcharptrptr_0 (unsigned int a ATTRIBUTE_UNUSED, const char **b ATTRIBUTE_UNUSED) { return 0; } void hook_void_int (int b ATTRIBUTE_UNUSED) { } void hook_void_tree (tree a ATTRIBUTE_UNUSED) { } void hook_void_charptr (char *a ATTRIBUTE_UNUSED) { } void hook_void_tree_treeptr (tree a ATTRIBUTE_UNUSED, tree *b ATTRIBUTE_UNUSED) { } bool hook_bool_tree_false (tree a ATTRIBUTE_UNUSED) { return false; } bool hook_bool_tree_true (tree a ATTRIBUTE_UNUSED) { return true; } bool hook_bool_tree_tree_false (tree a ATTRIBUTE_UNUSED, tree b ATTRIBUTE_UNUSED) { return false; } bool hook_bool_rtx_false (rtx a ATTRIBUTE_UNUSED) { return false; } bool hook_bool_uintp_uintp_false (unsigned int *a ATTRIBUTE_UNUSED, unsigned int *b ATTRIBUTE_UNUSED) { return false; } bool hook_bool_rtx_int_int_intp_false (rtx a ATTRIBUTE_UNUSED, int b ATTRIBUTE_UNUSED, int c ATTRIBUTE_UNUSED, int *d ATTRIBUTE_UNUSED) { return false; } /* Generic hook that takes an rtx and returns it. */ rtx hook_rtx_rtx_identity (rtx x) { return x; } /* Generic hook that takes an rtx and returns NULL_RTX. */ rtx hook_rtx_rtx_null (rtx x ATTRIBUTE_UNUSED) { return NULL; } /* Generic hook that takes a tree and an int and returns NULL_RTX. */ rtx hook_rtx_tree_int_null (tree a ATTRIBUTE_UNUSED, int b ATTRIBUTE_UNUSED) { return NULL; } /* Generic hook that takes a tree and returns it as is. */ tree hook_tree_tree_identity (tree a) { return a; } /* Generic hook that takes a tree and returns a NULL string. */ const char * hook_constcharptr_tree_null (tree t ATTRIBUTE_UNUSED) { return NULL; } /* If-conversion support. Copyright (C) 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef HAVE_conditional_execution #define HAVE_conditional_execution 0 #endif #ifndef HAVE_conditional_move #define HAVE_conditional_move 0 #endif #ifndef HAVE_incscc #define HAVE_incscc 0 #endif #ifndef HAVE_decscc #define HAVE_decscc 0 #endif #ifndef HAVE_trap #define HAVE_trap 0 #endif #ifndef HAVE_conditional_trap #define HAVE_conditional_trap 0 #endif #ifndef MAX_CONDITIONAL_EXECUTE #define MAX_CONDITIONAL_EXECUTE (BRANCH_COST + 1) #endif #define NULL_EDGE ((struct edge_def *)NULL) #define NULL_BLOCK ((struct basic_block_def *)NULL) /* # of IF-THEN or IF-THEN-ELSE blocks we looked at */ static int num_possible_if_blocks; /* # of IF-THEN or IF-THEN-ELSE blocks were converted to conditional execution. */ static int num_updated_if_blocks; /* # of changes made which require life information to be updated. */ static int num_true_changes; /* Whether conditional execution changes were made. */ static int cond_exec_changed_p; /* True if life data ok at present. */ static bool life_data_ok; /* Forward references. */ static int count_bb_insns (basic_block); static rtx first_active_insn (basic_block); static rtx last_active_insn (basic_block, int); static basic_block block_fallthru (basic_block); static int cond_exec_process_insns (ce_if_block_t *, rtx, rtx, rtx, rtx, int); static rtx cond_exec_get_condition (rtx); static int cond_exec_process_if_block (ce_if_block_t *, int); static rtx noce_get_condition (rtx, rtx *); static int noce_operand_ok (rtx); static int noce_process_if_block (ce_if_block_t *); static int process_if_block (ce_if_block_t *); static void merge_if_block (ce_if_block_t *); static int find_cond_trap (basic_block, edge, edge); static basic_block find_if_header (basic_block, int); static int block_jumps_and_fallthru_p (basic_block, basic_block); static int find_if_block (ce_if_block_t *); static int find_if_case_1 (basic_block, edge, edge); static int find_if_case_2 (basic_block, edge, edge); static int find_memory (rtx *, void *); static int dead_or_predicable (basic_block, basic_block, basic_block, basic_block, int); static void noce_emit_move_insn (rtx, rtx); static rtx block_has_only_trap (basic_block); static void mark_loop_exit_edges (void); /* Sets EDGE_LOOP_EXIT flag for all loop exits. */ static void mark_loop_exit_edges (void) { struct loops loops; basic_block bb; edge e; flow_loops_find (&loops, LOOP_TREE); free_dominance_info (CDI_DOMINATORS); if (loops.num > 1) { FOR_EACH_BB (bb) { for (e = bb->succ; e; e = e->succ_next) { if (find_common_loop (bb->loop_father, e->dest->loop_father) != bb->loop_father) e->flags |= EDGE_LOOP_EXIT; else e->flags &= ~EDGE_LOOP_EXIT; } } } flow_loops_free (&loops); } /* Count the number of non-jump active insns in BB. */ static int count_bb_insns (basic_block bb) { int count = 0; rtx insn = BB_HEAD (bb); while (1) { if (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == INSN) count++; if (insn == BB_END (bb)) break; insn = NEXT_INSN (insn); } return count; } /* Return the first non-jump active insn in the basic block. */ static rtx first_active_insn (basic_block bb) { rtx insn = BB_HEAD (bb); if (GET_CODE (insn) == CODE_LABEL) { if (insn == BB_END (bb)) return NULL_RTX; insn = NEXT_INSN (insn); } while (GET_CODE (insn) == NOTE) { if (insn == BB_END (bb)) return NULL_RTX; insn = NEXT_INSN (insn); } if (GET_CODE (insn) == JUMP_INSN) return NULL_RTX; return insn; } /* Return the last non-jump active (non-jump) insn in the basic block. */ static rtx last_active_insn (basic_block bb, int skip_use_p) { rtx insn = BB_END (bb); rtx head = BB_HEAD (bb); while (GET_CODE (insn) == NOTE || GET_CODE (insn) == JUMP_INSN || (skip_use_p && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == USE)) { if (insn == head) return NULL_RTX; insn = PREV_INSN (insn); } if (GET_CODE (insn) == CODE_LABEL) return NULL_RTX; return insn; } /* Return the basic block reached by falling though the basic block BB. */ static basic_block block_fallthru (basic_block bb) { edge e; for (e = bb->succ; e != NULL_EDGE && (e->flags & EDGE_FALLTHRU) == 0; e = e->succ_next) ; return (e) ? e->dest : NULL_BLOCK; } /* Go through a bunch of insns, converting them to conditional execution format if possible. Return TRUE if all of the non-note insns were processed. */ static int cond_exec_process_insns (ce_if_block_t *ce_info ATTRIBUTE_UNUSED, /* if block information */rtx start, /* first insn to look at */rtx end, /* last insn to look at */rtx test, /* conditional execution test */rtx prob_val, /* probability of branch taken. */int mod_ok) { int must_be_last = FALSE; rtx insn; rtx xtest; rtx pattern; if (!start || !end) return FALSE; for (insn = start; ; insn = NEXT_INSN (insn)) { if (GET_CODE (insn) == NOTE) goto insn_done; if (GET_CODE (insn) != INSN && GET_CODE (insn) != CALL_INSN) abort (); /* Remove USE insns that get in the way. */ if (reload_completed && GET_CODE (PATTERN (insn)) == USE) { /* ??? Ug. Actually unlinking the thing is problematic, given what we'd have to coordinate with our callers. */ SET_INSN_DELETED (insn); goto insn_done; } /* Last insn wasn't last? */ if (must_be_last) return FALSE; if (modified_in_p (test, insn)) { if (!mod_ok) return FALSE; must_be_last = TRUE; } /* Now build the conditional form of the instruction. */ pattern = PATTERN (insn); xtest = copy_rtx (test); /* If this is already a COND_EXEC, rewrite the test to be an AND of the two conditions. */ if (GET_CODE (pattern) == COND_EXEC) { if (GET_MODE (xtest) != GET_MODE (COND_EXEC_TEST (pattern))) return FALSE; xtest = gen_rtx_AND (GET_MODE (xtest), xtest, COND_EXEC_TEST (pattern)); pattern = COND_EXEC_CODE (pattern); } pattern = gen_rtx_COND_EXEC (VOIDmode, xtest, pattern); /* If the machine needs to modify the insn being conditionally executed, say for example to force a constant integer operand into a temp register, do so here. */ #ifdef IFCVT_MODIFY_INSN IFCVT_MODIFY_INSN (ce_info, pattern, insn); if (! pattern) return FALSE; #endif validate_change (insn, &PATTERN (insn), pattern, 1); if (GET_CODE (insn) == CALL_INSN && prob_val) validate_change (insn, ®_NOTES (insn), alloc_EXPR_LIST (REG_BR_PROB, prob_val, REG_NOTES (insn)), 1); insn_done: if (insn == end) break; } return TRUE; } /* Return the condition for a jump. Do not do any special processing. */ static rtx cond_exec_get_condition (rtx jump) { rtx test_if, cond; if (any_condjump_p (jump)) test_if = SET_SRC (pc_set (jump)); else return NULL_RTX; cond = XEXP (test_if, 0); /* If this branches to JUMP_LABEL when the condition is false, reverse the condition. */ if (GET_CODE (XEXP (test_if, 2)) == LABEL_REF && XEXP (XEXP (test_if, 2), 0) == JUMP_LABEL (jump)) { enum rtx_code rev = reversed_comparison_code (cond, jump); if (rev == UNKNOWN) return NULL_RTX; cond = gen_rtx_fmt_ee (rev, GET_MODE (cond), XEXP (cond, 0), XEXP (cond, 1)); } return cond; } /* Given a simple IF-THEN or IF-THEN-ELSE block, attempt to convert it to conditional execution. Return TRUE if we were successful at converting the block. */ static int cond_exec_process_if_block (ce_if_block_t * ce_info, /* if block information */int do_multiple_p) { basic_block test_bb = ce_info->test_bb; /* last test block */ basic_block then_bb = ce_info->then_bb; /* THEN */ basic_block else_bb = ce_info->else_bb; /* ELSE or NULL */ rtx test_expr; /* expression in IF_THEN_ELSE that is tested */ rtx then_start; /* first insn in THEN block */ rtx then_end; /* last insn + 1 in THEN block */ rtx else_start = NULL_RTX; /* first insn in ELSE block or NULL */ rtx else_end = NULL_RTX; /* last insn + 1 in ELSE block */ int max; /* max # of insns to convert. */ int then_mod_ok; /* whether conditional mods are ok in THEN */ rtx true_expr; /* test for else block insns */ rtx false_expr; /* test for then block insns */ rtx true_prob_val; /* probability of else block */ rtx false_prob_val; /* probability of then block */ int n_insns; enum rtx_code false_code; /* If test is comprised of && or || elements, and we've failed at handling all of them together, just use the last test if it is the special case of && elements without an ELSE block. */ if (!do_multiple_p && ce_info->num_multiple_test_blocks) { if (else_bb || ! ce_info->and_and_p) return FALSE; ce_info->test_bb = test_bb = ce_info->last_test_bb; ce_info->num_multiple_test_blocks = 0; ce_info->num_and_and_blocks = 0; ce_info->num_or_or_blocks = 0; } /* Find the conditional jump to the ELSE or JOIN part, and isolate the test. */ test_expr = cond_exec_get_condition (BB_END (test_bb)); if (! test_expr) return FALSE; /* If the conditional jump is more than just a conditional jump, then we can not do conditional execution conversion on this block. */ if (! onlyjump_p (BB_END (test_bb))) return FALSE; /* Collect the bounds of where we're to search, skipping any labels, jumps and notes at the beginning and end of the block. Then count the total number of insns and see if it is small enough to convert. */ then_start = first_active_insn (then_bb); then_end = last_active_insn (then_bb, TRUE); n_insns = ce_info->num_then_insns = count_bb_insns (then_bb); max = MAX_CONDITIONAL_EXECUTE; if (else_bb) { max *= 2; else_start = first_active_insn (else_bb); else_end = last_active_insn (else_bb, TRUE); n_insns += ce_info->num_else_insns = count_bb_insns (else_bb); } if (n_insns > max) return FALSE; /* Map test_expr/test_jump into the appropriate MD tests to use on the conditionally executed code. */ true_expr = test_expr; false_code = reversed_comparison_code (true_expr, BB_END (test_bb)); if (false_code != UNKNOWN) false_expr = gen_rtx_fmt_ee (false_code, GET_MODE (true_expr), XEXP (true_expr, 0), XEXP (true_expr, 1)); else false_expr = NULL_RTX; #ifdef IFCVT_MODIFY_TESTS /* If the machine description needs to modify the tests, such as setting a conditional execution register from a comparison, it can do so here. */ IFCVT_MODIFY_TESTS (ce_info, true_expr, false_expr); /* See if the conversion failed. */ if (!true_expr || !false_expr) goto fail; #endif true_prob_val = find_reg_note (BB_END (test_bb), REG_BR_PROB, NULL_RTX); if (true_prob_val) { true_prob_val = XEXP (true_prob_val, 0); false_prob_val = GEN_INT (REG_BR_PROB_BASE - INTVAL (true_prob_val)); } else false_prob_val = NULL_RTX; /* If we have && or || tests, do them here. These tests are in the adjacent blocks after the first block containing the test. */ if (ce_info->num_multiple_test_blocks > 0) { basic_block bb = test_bb; basic_block last_test_bb = ce_info->last_test_bb; if (! false_expr) goto fail; do { rtx start, end; rtx t, f; bb = block_fallthru (bb); start = first_active_insn (bb); end = last_active_insn (bb, TRUE); if (start && ! cond_exec_process_insns (ce_info, start, end, false_expr, false_prob_val, FALSE)) goto fail; /* If the conditional jump is more than just a conditional jump, then we can not do conditional execution conversion on this block. */ if (! onlyjump_p (BB_END (bb))) goto fail; /* Find the conditional jump and isolate the test. */ t = cond_exec_get_condition (BB_END (bb)); if (! t) goto fail; f = gen_rtx_fmt_ee (reverse_condition (GET_CODE (t)), GET_MODE (t), XEXP (t, 0), XEXP (t, 1)); if (ce_info->and_and_p) { t = gen_rtx_AND (GET_MODE (t), true_expr, t); f = gen_rtx_IOR (GET_MODE (t), false_expr, f); } else { t = gen_rtx_IOR (GET_MODE (t), true_expr, t); f = gen_rtx_AND (GET_MODE (t), false_expr, f); } /* If the machine description needs to modify the tests, such as setting a conditional execution register from a comparison, it can do so here. */ #ifdef IFCVT_MODIFY_MULTIPLE_TESTS IFCVT_MODIFY_MULTIPLE_TESTS (ce_info, bb, t, f); /* See if the conversion failed. */ if (!t || !f) goto fail; #endif true_expr = t; false_expr = f; } while (bb != last_test_bb); } /* For IF-THEN-ELSE blocks, we don't allow modifications of the test on then THEN block. */ then_mod_ok = (else_bb == NULL_BLOCK); /* Go through the THEN and ELSE blocks converting the insns if possible to conditional execution. */ if (then_end && (! false_expr || ! cond_exec_process_insns (ce_info, then_start, then_end, false_expr, false_prob_val, then_mod_ok))) goto fail; if (else_bb && else_end && ! cond_exec_process_insns (ce_info, else_start, else_end, true_expr, true_prob_val, TRUE)) goto fail; /* If we cannot apply the changes, fail. Do not go through the normal fail processing, since apply_change_group will call cancel_changes. */ if (! apply_change_group ()) { #ifdef IFCVT_MODIFY_CANCEL /* Cancel any machine dependent changes. */ IFCVT_MODIFY_CANCEL (ce_info); #endif return FALSE; } #ifdef IFCVT_MODIFY_FINAL /* Do any machine dependent final modifications. */ IFCVT_MODIFY_FINAL (ce_info); #endif /* Conversion succeeded. */ if (dump_file) fprintf (dump_file, "%d insn%s converted to conditional execution.\n", n_insns, (n_insns == 1) ? " was" : "s were"); /* Merge the blocks! */ merge_if_block (ce_info); cond_exec_changed_p = TRUE; return TRUE; fail: #ifdef IFCVT_MODIFY_CANCEL /* Cancel any machine dependent changes. */ IFCVT_MODIFY_CANCEL (ce_info); #endif cancel_changes (0); return FALSE; } /* Used by noce_process_if_block to communicate with its subroutines. The subroutines know that A and B may be evaluated freely. They know that X is a register. They should insert new instructions before cond_earliest. */ struct noce_if_info { basic_block test_bb; rtx insn_a, insn_b; rtx x, a, b; rtx jump, cond, cond_earliest; }; static rtx noce_emit_store_flag (struct noce_if_info *, rtx, int, int); static int noce_try_move (struct noce_if_info *); static int noce_try_store_flag (struct noce_if_info *); static int noce_try_addcc (struct noce_if_info *); static int noce_try_store_flag_constants (struct noce_if_info *); static int noce_try_store_flag_mask (struct noce_if_info *); static rtx noce_emit_cmove (struct noce_if_info *, rtx, enum rtx_code, rtx, rtx, rtx, rtx); static int noce_try_cmove (struct noce_if_info *); static int noce_try_cmove_arith (struct noce_if_info *); static rtx noce_get_alt_condition (struct noce_if_info *, rtx, rtx *); static int noce_try_minmax (struct noce_if_info *); static int noce_try_abs (struct noce_if_info *); static int noce_try_sign_mask (struct noce_if_info *); /* Helper function for noce_try_store_flag*. */ static rtx noce_emit_store_flag (struct noce_if_info *if_info, rtx x, int reversep, int normalize) { rtx cond = if_info->cond; int cond_complex; enum rtx_code code; cond_complex = (! general_operand (XEXP (cond, 0), VOIDmode) || ! general_operand (XEXP (cond, 1), VOIDmode)); /* If earliest == jump, or when the condition is complex, try to build the store_flag insn directly. */ if (cond_complex) cond = XEXP (SET_SRC (pc_set (if_info->jump)), 0); if (reversep) code = reversed_comparison_code (cond, if_info->jump); else code = GET_CODE (cond); if ((if_info->cond_earliest == if_info->jump || cond_complex) && (normalize == 0 || STORE_FLAG_VALUE == normalize)) { rtx tmp; tmp = gen_rtx_fmt_ee (code, GET_MODE (x), XEXP (cond, 0), XEXP (cond, 1)); tmp = gen_rtx_SET (VOIDmode, x, tmp); start_sequence (); tmp = emit_insn (tmp); if (recog_memoized (tmp) >= 0) { tmp = get_insns (); end_sequence (); emit_insn (tmp); if_info->cond_earliest = if_info->jump; return x; } end_sequence (); } /* Don't even try if the comparison operands or the mode of X are weird. */ if (cond_complex || !SCALAR_INT_MODE_P (GET_MODE (x))) return NULL_RTX; return emit_store_flag (x, code, XEXP (cond, 0), XEXP (cond, 1), VOIDmode, (code == LTU || code == LEU || code == GEU || code == GTU), normalize); } /* Emit instruction to move an rtx, possibly into STRICT_LOW_PART. X is the destination/target and Y is the value to copy. */ static void noce_emit_move_insn (rtx x, rtx y) { enum machine_mode outmode, inmode; rtx outer, inner; int bitpos; if (GET_CODE (x) != STRICT_LOW_PART) { emit_move_insn (x, y); return; } outer = XEXP (x, 0); inner = XEXP (outer, 0); outmode = GET_MODE (outer); inmode = GET_MODE (inner); bitpos = SUBREG_BYTE (outer) * BITS_PER_UNIT; store_bit_field (inner, GET_MODE_BITSIZE (outmode), bitpos, outmode, y, GET_MODE_BITSIZE (inmode)); } /* Return sequence of instructions generated by if conversion. This function calls end_sequence() to end the current stream, ensures that are instructions are unshared, recognizable non-jump insns. On failure, this function returns a NULL_RTX. */ static rtx end_ifcvt_sequence (struct noce_if_info *if_info) { rtx insn; rtx seq = get_insns (); set_used_flags (if_info->x); set_used_flags (if_info->cond); unshare_all_rtl_in_chain (seq); end_sequence (); /* Make sure that all of the instructions emitted are recognizable, and that we haven't introduced a new jump instruction. As an exercise for the reader, build a general mechanism that allows proper placement of required clobbers. */ for (insn = seq; insn; insn = NEXT_INSN (insn)) if (GET_CODE (insn) == JUMP_INSN || recog_memoized (insn) == -1) return NULL_RTX; return seq; } /* Convert "if (a != b) x = a; else x = b" into "x = a" and "if (a == b) x = a; else x = b" into "x = b". */ static int noce_try_move (struct noce_if_info *if_info) { rtx cond = if_info->cond; enum rtx_code code = GET_CODE (cond); rtx y, seq; if (code != NE && code != EQ) return FALSE; /* This optimization isn't valid if either A or B could be a NaN or a signed zero. */ if (HONOR_NANS (GET_MODE (if_info->x)) || HONOR_SIGNED_ZEROS (GET_MODE (if_info->x))) return FALSE; /* Check whether the operands of the comparison are A and in either order. */ if ((rtx_equal_p (if_info->a, XEXP (cond, 0)) && rtx_equal_p (if_info->b, XEXP (cond, 1))) || (rtx_equal_p (if_info->a, XEXP (cond, 1)) && rtx_equal_p (if_info->b, XEXP (cond, 0)))) { y = (code == EQ) ? if_info->a : if_info->b; /* Avoid generating the move if the source is the destination. */ if (! rtx_equal_p (if_info->x, y)) { start_sequence (); noce_emit_move_insn (if_info->x, y); seq = end_ifcvt_sequence (if_info); if (!seq) return FALSE; emit_insn_before_setloc (seq, if_info->jump, INSN_LOCATOR (if_info->insn_a)); } return TRUE; } return FALSE; } /* Convert "if (test) x = 1; else x = 0". Only try 0 and STORE_FLAG_VALUE here. Other combinations will be tried in noce_try_store_flag_constants after noce_try_cmove has had a go at the conversion. */ static int noce_try_store_flag (struct noce_if_info *if_info) { int reversep; rtx target, seq; if (GET_CODE (if_info->b) == CONST_INT && INTVAL (if_info->b) == STORE_FLAG_VALUE && if_info->a == const0_rtx) reversep = 0; else if (if_info->b == const0_rtx && GET_CODE (if_info->a) == CONST_INT && INTVAL (if_info->a) == STORE_FLAG_VALUE && (reversed_comparison_code (if_info->cond, if_info->jump) != UNKNOWN)) reversep = 1; else return FALSE; start_sequence (); target = noce_emit_store_flag (if_info, if_info->x, reversep, 0); if (target) { if (target != if_info->x) noce_emit_move_insn (if_info->x, target); seq = end_ifcvt_sequence (if_info); if (! seq) return FALSE; emit_insn_before_setloc (seq, if_info->jump, INSN_LOCATOR (if_info->insn_a)); return TRUE; } else { end_sequence (); return FALSE; } } /* Convert "if (test) x = a; else x = b", for A and B constant. */ static int noce_try_store_flag_constants (struct noce_if_info *if_info) { rtx target, seq; int reversep; HOST_WIDE_INT itrue, ifalse, diff, tmp; int normalize, can_reverse; enum machine_mode mode; if (! no_new_pseudos && GET_CODE (if_info->a) == CONST_INT && GET_CODE (if_info->b) == CONST_INT) { mode = GET_MODE (if_info->x); ifalse = INTVAL (if_info->a); itrue = INTVAL (if_info->b); /* Make sure we can represent the difference between the two values. */ if ((itrue - ifalse > 0) != ((ifalse < 0) != (itrue < 0) ? ifalse < 0 : ifalse < itrue)) return FALSE; diff = trunc_int_for_mode (itrue - ifalse, mode); can_reverse = (reversed_comparison_code (if_info->cond, if_info->jump) != UNKNOWN); reversep = 0; if (diff == STORE_FLAG_VALUE || diff == -STORE_FLAG_VALUE) normalize = 0; else if (ifalse == 0 && exact_log2 (itrue) >= 0 && (STORE_FLAG_VALUE == 1 || BRANCH_COST >= 2)) normalize = 1; else if (itrue == 0 && exact_log2 (ifalse) >= 0 && can_reverse && (STORE_FLAG_VALUE == 1 || BRANCH_COST >= 2)) normalize = 1, reversep = 1; else if (itrue == -1 && (STORE_FLAG_VALUE == -1 || BRANCH_COST >= 2)) normalize = -1; else if (ifalse == -1 && can_reverse && (STORE_FLAG_VALUE == -1 || BRANCH_COST >= 2)) normalize = -1, reversep = 1; else if ((BRANCH_COST >= 2 && STORE_FLAG_VALUE == -1) || BRANCH_COST >= 3) normalize = -1; else return FALSE; if (reversep) { tmp = itrue; itrue = ifalse; ifalse = tmp; diff = trunc_int_for_mode (-diff, mode); } start_sequence (); target = noce_emit_store_flag (if_info, if_info->x, reversep, normalize); if (! target) { end_sequence (); return FALSE; } /* if (test) x = 3; else x = 4; => x = 3 + (test == 0); */ if (diff == STORE_FLAG_VALUE || diff == -STORE_FLAG_VALUE) { target = expand_simple_binop (mode, (diff == STORE_FLAG_VALUE ? PLUS : MINUS), GEN_INT (ifalse), target, if_info->x, 0, OPTAB_WIDEN); } /* if (test) x = 8; else x = 0; => x = (test != 0) << 3; */ else if (ifalse == 0 && (tmp = exact_log2 (itrue)) >= 0) { target = expand_simple_binop (mode, ASHIFT, target, GEN_INT (tmp), if_info->x, 0, OPTAB_WIDEN); } /* if (test) x = -1; else x = b; => x = -(test != 0) | b; */ else if (itrue == -1) { target = expand_simple_binop (mode, IOR, target, GEN_INT (ifalse), if_info->x, 0, OPTAB_WIDEN); } /* if (test) x = a; else x = b; => x = (-(test != 0) & (b - a)) + a; */ else { target = expand_simple_binop (mode, AND, target, GEN_INT (diff), if_info->x, 0, OPTAB_WIDEN); if (target) target = expand_simple_binop (mode, PLUS, target, GEN_INT (ifalse), if_info->x, 0, OPTAB_WIDEN); } if (! target) { end_sequence (); return FALSE; } if (target != if_info->x) noce_emit_move_insn (if_info->x, target); seq = end_ifcvt_sequence (if_info); if (!seq) return FALSE; emit_insn_before_setloc (seq, if_info->jump, INSN_LOCATOR (if_info->insn_a)); return TRUE; } return FALSE; } /* Convert "if (test) foo++" into "foo += (test != 0)", and similarly for "foo--". */ static int noce_try_addcc (struct noce_if_info *if_info) { rtx target, seq; int subtract, normalize; if (! no_new_pseudos && GET_CODE (if_info->a) == PLUS && rtx_equal_p (XEXP (if_info->a, 0), if_info->b) && (reversed_comparison_code (if_info->cond, if_info->jump) != UNKNOWN)) { rtx cond = if_info->cond; enum rtx_code code = reversed_comparison_code (cond, if_info->jump); /* First try to use addcc pattern. */ if (general_operand (XEXP (cond, 0), VOIDmode) && general_operand (XEXP (cond, 1), VOIDmode)) { start_sequence (); target = emit_conditional_add (if_info->x, code, XEXP (cond, 0), XEXP (cond, 1), VOIDmode, if_info->b, XEXP (if_info->a, 1), GET_MODE (if_info->x), (code == LTU || code == GEU || code == LEU || code == GTU)); if (target) { if (target != if_info->x) noce_emit_move_insn (if_info->x, target); seq = end_ifcvt_sequence (if_info); if (!seq) return FALSE; emit_insn_before_setloc (seq, if_info->jump, INSN_LOCATOR (if_info->insn_a)); return TRUE; } end_sequence (); } /* If that fails, construct conditional increment or decrement using setcc. */ if (BRANCH_COST >= 2 && (XEXP (if_info->a, 1) == const1_rtx || XEXP (if_info->a, 1) == constm1_rtx)) { start_sequence (); if (STORE_FLAG_VALUE == INTVAL (XEXP (if_info->a, 1))) subtract = 0, normalize = 0; else if (-STORE_FLAG_VALUE == INTVAL (XEXP (if_info->a, 1))) subtract = 1, normalize = 0; else subtract = 0, normalize = INTVAL (XEXP (if_info->a, 1)); target = noce_emit_store_flag (if_info, gen_reg_rtx (GET_MODE (if_info->x)), 1, normalize); if (target) target = expand_simple_binop (GET_MODE (if_info->x), subtract ? MINUS : PLUS, if_info->b, target, if_info->x, 0, OPTAB_WIDEN); if (target) { if (target != if_info->x) noce_emit_move_insn (if_info->x, target); seq = end_ifcvt_sequence (if_info); if (!seq) return FALSE; emit_insn_before_setloc (seq, if_info->jump, INSN_LOCATOR (if_info->insn_a)); return TRUE; } end_sequence (); } } return FALSE; } /* Convert "if (test) x = 0;" to "x &= -(test == 0);" */ static int noce_try_store_flag_mask (struct noce_if_info *if_info) { rtx target, seq; int reversep; reversep = 0; if (! no_new_pseudos && (BRANCH_COST >= 2 || STORE_FLAG_VALUE == -1) && ((if_info->a == const0_rtx && rtx_equal_p (if_info->b, if_info->x)) || ((reversep = (reversed_comparison_code (if_info->cond, if_info->jump) != UNKNOWN)) && if_info->b == const0_rtx && rtx_equal_p (if_info->a, if_info->x)))) { start_sequence (); target = noce_emit_store_flag (if_info, gen_reg_rtx (GET_MODE (if_info->x)), reversep, -1); if (target) target = expand_simple_binop (GET_MODE (if_info->x), AND, if_info->x, target, if_info->x, 0, OPTAB_WIDEN); if (target) { if (target != if_info->x) noce_emit_move_insn (if_info->x, target); seq = end_ifcvt_sequence (if_info); if (!seq) return FALSE; emit_insn_before_setloc (seq, if_info->jump, INSN_LOCATOR (if_info->insn_a)); return TRUE; } end_sequence (); } return FALSE; } /* Helper function for noce_try_cmove and noce_try_cmove_arith. */ static rtx noce_emit_cmove (struct noce_if_info *if_info, rtx x, enum rtx_code code, rtx cmp_a, rtx cmp_b, rtx vfalse, rtx vtrue) { /* If earliest == jump, try to build the cmove insn directly. This is helpful when combine has created some complex condition (like for alpha's cmovlbs) that we can't hope to regenerate through the normal interface. */ if (if_info->cond_earliest == if_info->jump) { rtx tmp; tmp = gen_rtx_fmt_ee (code, GET_MODE (if_info->cond), cmp_a, cmp_b); tmp = gen_rtx_IF_THEN_ELSE (GET_MODE (x), tmp, vtrue, vfalse); tmp = gen_rtx_SET (VOIDmode, x, tmp); start_sequence (); tmp = emit_insn (tmp); if (recog_memoized (tmp) >= 0) { tmp = get_insns (); end_sequence (); emit_insn (tmp); return x; } end_sequence (); } /* Don't even try if the comparison operands are weird. */ if (! general_operand (cmp_a, GET_MODE (cmp_a)) || ! general_operand (cmp_b, GET_MODE (cmp_b))) return NULL_RTX; #if HAVE_conditional_move return emit_conditional_move (x, code, cmp_a, cmp_b, VOIDmode, vtrue, vfalse, GET_MODE (x), (code == LTU || code == GEU || code == LEU || code == GTU)); #else /* We'll never get here, as noce_process_if_block doesn't call the functions involved. Ifdef code, however, should be discouraged because it leads to typos in the code not selected. However, emit_conditional_move won't exist either. */ return NULL_RTX; #endif } /* Try only simple constants and registers here. More complex cases are handled in noce_try_cmove_arith after noce_try_store_flag_arith has had a go at it. */ static int noce_try_cmove (struct noce_if_info *if_info) { enum rtx_code code; rtx target, seq; if ((CONSTANT_P (if_info->a) || register_operand (if_info->a, VOIDmode)) && (CONSTANT_P (if_info->b) || register_operand (if_info->b, VOIDmode))) { start_sequence (); code = GET_CODE (if_info->cond); target = noce_emit_cmove (if_info, if_info->x, code, XEXP (if_info->cond, 0), XEXP (if_info->cond, 1), if_info->a, if_info->b); if (target) { if (target != if_info->x) noce_emit_move_insn (if_info->x, target); seq = end_ifcvt_sequence (if_info); if (!seq) return FALSE; emit_insn_before_setloc (seq, if_info->jump, INSN_LOCATOR (if_info->insn_a)); return TRUE; } else { end_sequence (); return FALSE; } } return FALSE; } /* Try more complex cases involving conditional_move. */ static int noce_try_cmove_arith (struct noce_if_info *if_info) { rtx a = if_info->a; rtx b = if_info->b; rtx x = if_info->x; rtx insn_a, insn_b; rtx tmp, target; int is_mem = 0; enum rtx_code code; /* A conditional move from two memory sources is equivalent to a conditional on their addresses followed by a load. Don't do this early because it'll screw alias analysis. Note that we've already checked for no side effects. */ if (! no_new_pseudos && cse_not_expected && MEM_P (a) && MEM_P (b) && BRANCH_COST >= 5) { a = XEXP (a, 0); b = XEXP (b, 0); x = gen_reg_rtx (Pmode); is_mem = 1; } /* ??? We could handle this if we knew that a load from A or B could not fault. This is also true if we've already loaded from the address along the path from ENTRY. */ else if (may_trap_p (a) || may_trap_p (b)) return FALSE; /* if (test) x = a + b; else x = c - d; => y = a + b; x = c - d; if (test) x = y; */ code = GET_CODE (if_info->cond); insn_a = if_info->insn_a; insn_b = if_info->insn_b; /* Possibly rearrange operands to make things come out more natural. */ if (reversed_comparison_code (if_info->cond, if_info->jump) != UNKNOWN) { int reversep = 0; if (rtx_equal_p (b, x)) reversep = 1; else if (general_operand (b, GET_MODE (b))) reversep = 1; if (reversep) { code = reversed_comparison_code (if_info->cond, if_info->jump); tmp = a, a = b, b = tmp; tmp = insn_a, insn_a = insn_b, insn_b = tmp; } } start_sequence (); /* If either operand is complex, load it into a register first. The best way to do this is to copy the original insn. In this way we preserve any clobbers etc that the insn may have had. This is of course not possible in the IS_MEM case. */ if (! general_operand (a, GET_MODE (a))) { rtx set; if (no_new_pseudos) goto end_seq_and_fail; if (is_mem) { tmp = gen_reg_rtx (GET_MODE (a)); tmp = emit_insn (gen_rtx_SET (VOIDmode, tmp, a)); } else if (! insn_a) goto end_seq_and_fail; else { a = gen_reg_rtx (GET_MODE (a)); tmp = copy_rtx (insn_a); set = single_set (tmp); SET_DEST (set) = a; tmp = emit_insn (PATTERN (tmp)); } if (recog_memoized (tmp) < 0) goto end_seq_and_fail; } if (! general_operand (b, GET_MODE (b))) { rtx set; if (no_new_pseudos) goto end_seq_and_fail; if (is_mem) { tmp = gen_reg_rtx (GET_MODE (b)); tmp = emit_insn (gen_rtx_SET (VOIDmode, tmp, b)); } else if (! insn_b) goto end_seq_and_fail; else { b = gen_reg_rtx (GET_MODE (b)); tmp = copy_rtx (insn_b); set = single_set (tmp); SET_DEST (set) = b; tmp = emit_insn (PATTERN (tmp)); } if (recog_memoized (tmp) < 0) goto end_seq_and_fail; } target = noce_emit_cmove (if_info, x, code, XEXP (if_info->cond, 0), XEXP (if_info->cond, 1), a, b); if (! target) goto end_seq_and_fail; /* If we're handling a memory for above, emit the load now. */ if (is_mem) { tmp = gen_rtx_MEM (GET_MODE (if_info->x), target); /* Copy over flags as appropriate. */ if (MEM_VOLATILE_P (if_info->a) || MEM_VOLATILE_P (if_info->b)) MEM_VOLATILE_P (tmp) = 1; if (MEM_IN_STRUCT_P (if_info->a) && MEM_IN_STRUCT_P (if_info->b)) MEM_IN_STRUCT_P (tmp) = 1; if (MEM_SCALAR_P (if_info->a) && MEM_SCALAR_P (if_info->b)) MEM_SCALAR_P (tmp) = 1; if (MEM_ALIAS_SET (if_info->a) == MEM_ALIAS_SET (if_info->b)) set_mem_alias_set (tmp, MEM_ALIAS_SET (if_info->a)); set_mem_align (tmp, MIN (MEM_ALIGN (if_info->a), MEM_ALIGN (if_info->b))); noce_emit_move_insn (if_info->x, tmp); } else if (target != x) noce_emit_move_insn (x, target); tmp = end_ifcvt_sequence (if_info); if (!tmp) return FALSE; emit_insn_before_setloc (tmp, if_info->jump, INSN_LOCATOR (if_info->insn_a)); return TRUE; end_seq_and_fail: end_sequence (); return FALSE; } /* For most cases, the simplified condition we found is the best choice, but this is not the case for the min/max/abs transforms. For these we wish to know that it is A or B in the condition. */ static rtx noce_get_alt_condition (struct noce_if_info *if_info, rtx target, rtx *earliest) { rtx cond, set, insn; int reverse; /* If target is already mentioned in the known condition, return it. */ if (reg_mentioned_p (target, if_info->cond)) { *earliest = if_info->cond_earliest; return if_info->cond; } set = pc_set (if_info->jump); cond = XEXP (SET_SRC (set), 0); reverse = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (if_info->jump); /* If we're looking for a constant, try to make the conditional have that constant in it. There are two reasons why it may not have the constant we want: 1. GCC may have needed to put the constant in a register, because the target can't compare directly against that constant. For this case, we look for a SET immediately before the comparison that puts a constant in that register. 2. GCC may have canonicalized the conditional, for example replacing "if x < 4" with "if x <= 3". We can undo that (or make equivalent types of changes) to get the constants we need if they're off by one in the right direction. */ if (GET_CODE (target) == CONST_INT) { enum rtx_code code = GET_CODE (if_info->cond); rtx op_a = XEXP (if_info->cond, 0); rtx op_b = XEXP (if_info->cond, 1); rtx prev_insn; /* First, look to see if we put a constant in a register. */ prev_insn = PREV_INSN (if_info->cond_earliest); if (prev_insn && INSN_P (prev_insn) && GET_CODE (PATTERN (prev_insn)) == SET) { rtx src = find_reg_equal_equiv_note (prev_insn); if (!src) src = SET_SRC (PATTERN (prev_insn)); if (GET_CODE (src) == CONST_INT) { if (rtx_equal_p (op_a, SET_DEST (PATTERN (prev_insn)))) op_a = src; else if (rtx_equal_p (op_b, SET_DEST (PATTERN (prev_insn)))) op_b = src; if (GET_CODE (op_a) == CONST_INT) { rtx tmp = op_a; op_a = op_b; op_b = tmp; code = swap_condition (code); } } } /* Now, look to see if we can get the right constant by adjusting the conditional. */ if (GET_CODE (op_b) == CONST_INT) { HOST_WIDE_INT desired_val = INTVAL (target); HOST_WIDE_INT actual_val = INTVAL (op_b); switch (code) { case LT: if (actual_val == desired_val + 1) { code = LE; op_b = GEN_INT (desired_val); } break; case LE: if (actual_val == desired_val - 1) { code = LT; op_b = GEN_INT (desired_val); } break; case GT: if (actual_val == desired_val - 1) { code = GE; op_b = GEN_INT (desired_val); } break; case GE: if (actual_val == desired_val + 1) { code = GT; op_b = GEN_INT (desired_val); } break; default: break; } } /* If we made any changes, generate a new conditional that is equivalent to what we started with, but has the right constants in it. */ if (code != GET_CODE (if_info->cond) || op_a != XEXP (if_info->cond, 0) || op_b != XEXP (if_info->cond, 1)) { cond = gen_rtx_fmt_ee (code, GET_MODE (cond), op_a, op_b); *earliest = if_info->cond_earliest; return cond; } } cond = canonicalize_condition (if_info->jump, cond, reverse, earliest, target, false); if (! cond || ! reg_mentioned_p (target, cond)) return NULL; /* We almost certainly searched back to a different place. Need to re-verify correct lifetimes. */ /* X may not be mentioned in the range (cond_earliest, jump]. */ for (insn = if_info->jump; insn != *earliest; insn = PREV_INSN (insn)) if (INSN_P (insn) && reg_overlap_mentioned_p (if_info->x, PATTERN (insn))) return NULL; /* A and B may not be modified in the range [cond_earliest, jump). */ for (insn = *earliest; insn != if_info->jump; insn = NEXT_INSN (insn)) if (INSN_P (insn) && (modified_in_p (if_info->a, insn) || modified_in_p (if_info->b, insn))) return NULL; return cond; } /* Convert "if (a < b) x = a; else x = b;" to "x = min(a, b);", etc. */ static int noce_try_minmax (struct noce_if_info *if_info) { rtx cond, earliest, target, seq; enum rtx_code code, op; int unsignedp; /* ??? Can't guarantee that expand_binop won't create pseudos. */ if (no_new_pseudos) return FALSE; /* ??? Reject modes with NaNs or signed zeros since we don't know how they will be resolved with an SMIN/SMAX. It wouldn't be too hard to get the target to tell us... */ if (HONOR_SIGNED_ZEROS (GET_MODE (if_info->x)) || HONOR_NANS (GET_MODE (if_info->x))) return FALSE; cond = noce_get_alt_condition (if_info, if_info->a, &earliest); if (!cond) return FALSE; /* Verify the condition is of the form we expect, and canonicalize the comparison code. */ code = GET_CODE (cond); if (rtx_equal_p (XEXP (cond, 0), if_info->a)) { if (! rtx_equal_p (XEXP (cond, 1), if_info->b)) return FALSE; } else if (rtx_equal_p (XEXP (cond, 1), if_info->a)) { if (! rtx_equal_p (XEXP (cond, 0), if_info->b)) return FALSE; code = swap_condition (code); } else return FALSE; /* Determine what sort of operation this is. Note that the code is for a taken branch, so the code->operation mapping appears backwards. */ switch (code) { case LT: case LE: case UNLT: case UNLE: op = SMAX; unsignedp = 0; break; case GT: case GE: case UNGT: case UNGE: op = SMIN; unsignedp = 0; break; case LTU: case LEU: op = UMAX; unsignedp = 1; break; case GTU: case GEU: op = UMIN; unsignedp = 1; break; default: return FALSE; } start_sequence (); target = expand_simple_binop (GET_MODE (if_info->x), op, if_info->a, if_info->b, if_info->x, unsignedp, OPTAB_WIDEN); if (! target) { end_sequence (); return FALSE; } if (target != if_info->x) noce_emit_move_insn (if_info->x, target); seq = end_ifcvt_sequence (if_info); if (!seq) return FALSE; emit_insn_before_setloc (seq, if_info->jump, INSN_LOCATOR (if_info->insn_a)); if_info->cond = cond; if_info->cond_earliest = earliest; return TRUE; } /* Convert "if (a < 0) x = -a; else x = a;" to "x = abs(a);", etc. */ static int noce_try_abs (struct noce_if_info *if_info) { rtx cond, earliest, target, seq, a, b, c; int negate; /* ??? Can't guarantee that expand_binop won't create pseudos. */ if (no_new_pseudos) return FALSE; /* Recognize A and B as constituting an ABS or NABS. */ a = if_info->a; b = if_info->b; if (GET_CODE (a) == NEG && rtx_equal_p (XEXP (a, 0), b)) negate = 0; else if (GET_CODE (b) == NEG && rtx_equal_p (XEXP (b, 0), a)) { c = a; a = b; b = c; negate = 1; } else return FALSE; cond = noce_get_alt_condition (if_info, b, &earliest); if (!cond) return FALSE; /* Verify the condition is of the form we expect. */ if (rtx_equal_p (XEXP (cond, 0), b)) c = XEXP (cond, 1); else if (rtx_equal_p (XEXP (cond, 1), b)) c = XEXP (cond, 0); else return FALSE; /* Verify that C is zero. Search backward through the block for a REG_EQUAL note if necessary. */ if (REG_P (c)) { rtx insn, note = NULL; for (insn = earliest; insn != BB_HEAD (if_info->test_bb); insn = PREV_INSN (insn)) if (INSN_P (insn) && ((note = find_reg_note (insn, REG_EQUAL, c)) || (note = find_reg_note (insn, REG_EQUIV, c)))) break; if (! note) return FALSE; c = XEXP (note, 0); } if (MEM_P (c) && GET_CODE (XEXP (c, 0)) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (XEXP (c, 0))) c = get_pool_constant (XEXP (c, 0)); /* Work around funny ideas get_condition has wrt canonicalization. Note that these rtx constants are known to be CONST_INT, and therefore imply integer comparisons. */ if (c == constm1_rtx && GET_CODE (cond) == GT) ; else if (c == const1_rtx && GET_CODE (cond) == LT) ; else if (c != CONST0_RTX (GET_MODE (b))) return FALSE; /* Determine what sort of operation this is. */ switch (GET_CODE (cond)) { case LT: case LE: case UNLT: case UNLE: negate = !negate; break; case GT: case GE: case UNGT: case UNGE: break; default: return FALSE; } start_sequence (); target = expand_abs_nojump (GET_MODE (if_info->x), b, if_info->x, 1); /* ??? It's a quandary whether cmove would be better here, especially for integers. Perhaps combine will clean things up. */ if (target && negate) target = expand_simple_unop (GET_MODE (target), NEG, target, if_info->x, 0); if (! target) { end_sequence (); return FALSE; } if (target != if_info->x) noce_emit_move_insn (if_info->x, target); seq = end_ifcvt_sequence (if_info); if (!seq) return FALSE; emit_insn_before_setloc (seq, if_info->jump, INSN_LOCATOR (if_info->insn_a)); if_info->cond = cond; if_info->cond_earliest = earliest; return TRUE; } /* Convert "if (m < 0) x = b; else x = 0;" to "x = (m >> C) & b;". */ static int noce_try_sign_mask (struct noce_if_info *if_info) { rtx cond, t, m, c, seq; enum machine_mode mode; enum rtx_code code; if (no_new_pseudos) return FALSE; cond = if_info->cond; code = GET_CODE (cond); m = XEXP (cond, 0); c = XEXP (cond, 1); t = NULL_RTX; if (if_info->a == const0_rtx) { if ((code == LT && c == const0_rtx) || (code == LE && c == constm1_rtx)) t = if_info->b; } else if (if_info->b == const0_rtx) { if ((code == GE && c == const0_rtx) || (code == GT && c == constm1_rtx)) t = if_info->a; } if (! t || side_effects_p (t)) return FALSE; /* We currently don't handle different modes. */ mode = GET_MODE (t); if (GET_MODE (m) != mode) return FALSE; /* This is only profitable if T is cheap. */ if (rtx_cost (t, SET) >= COSTS_N_INSNS (2)) return FALSE; start_sequence (); /* Use emit_store_flag to generate "m < 0 ? -1 : 0" instead of expanding "(signed) m >> 31" directly. This benefits targets with specialized insns to obtain the signmask, but still uses ashr_optab otherwise. */ m = emit_store_flag (gen_reg_rtx (mode), LT, m, const0_rtx, mode, 0, -1); t = m ? expand_binop (mode, and_optab, m, t, NULL_RTX, 0, OPTAB_DIRECT) : NULL_RTX; if (!t) { end_sequence (); return FALSE; } noce_emit_move_insn (if_info->x, t); seq = end_ifcvt_sequence (if_info); if (!seq) return FALSE; emit_insn_before_setloc (seq, if_info->jump, INSN_LOCATOR (if_info->insn_a)); return TRUE; } /* Similar to get_condition, only the resulting condition must be valid at JUMP, instead of at EARLIEST. */ static rtx noce_get_condition (rtx jump, rtx *earliest) { rtx cond, set, tmp, insn; bool reverse; if (! any_condjump_p (jump)) return NULL_RTX; set = pc_set (jump); /* If this branches to JUMP_LABEL when the condition is false, reverse the condition. */ reverse = (GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump)); /* If the condition variable is a register and is MODE_INT, accept it. */ cond = XEXP (SET_SRC (set), 0); tmp = XEXP (cond, 0); if (REG_P (tmp) && GET_MODE_CLASS (GET_MODE (tmp)) == MODE_INT) { *earliest = jump; if (reverse) cond = gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond)), GET_MODE (cond), tmp, XEXP (cond, 1)); return cond; } /* Otherwise, fall back on canonicalize_condition to do the dirty work of manipulating MODE_CC values and COMPARE rtx codes. */ tmp = canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX, false); if (!tmp) return NULL_RTX; /* We are going to insert code before JUMP, not before EARLIEST. We must therefore be certain that the given condition is valid at JUMP by virtue of not having been modified since. */ for (insn = *earliest; insn != jump; insn = NEXT_INSN (insn)) if (INSN_P (insn) && modified_in_p (tmp, insn)) break; if (insn == jump) return tmp; /* The condition was modified. See if we can get a partial result that doesn't follow all the reversals. Perhaps combine can fold them together later. */ tmp = XEXP (tmp, 0); if (!REG_P (tmp) || GET_MODE_CLASS (GET_MODE (tmp)) != MODE_INT) return NULL_RTX; tmp = canonicalize_condition (jump, cond, reverse, earliest, tmp, false); if (!tmp) return NULL_RTX; /* For sanity's sake, re-validate the new result. */ for (insn = *earliest; insn != jump; insn = NEXT_INSN (insn)) if (INSN_P (insn) && modified_in_p (tmp, insn)) return NULL_RTX; return tmp; } /* Return true if OP is ok for if-then-else processing. */ static int noce_operand_ok (rtx op) { /* We special-case memories, so handle any of them with no address side effects. */ if (MEM_P (op)) return ! side_effects_p (XEXP (op, 0)); if (side_effects_p (op)) return FALSE; return ! may_trap_p (op); } /* Given a simple IF-THEN or IF-THEN-ELSE block, attempt to convert it without using conditional execution. Return TRUE if we were successful at converting the block. */ static int noce_process_if_block (struct ce_if_block * ce_info) { basic_block test_bb = ce_info->test_bb; /* test block */ basic_block then_bb = ce_info->then_bb; /* THEN */ basic_block else_bb = ce_info->else_bb; /* ELSE or NULL */ struct noce_if_info if_info; rtx insn_a, insn_b; rtx set_a, set_b; rtx orig_x, x, a, b; rtx jump, cond; /* We're looking for patterns of the form (1) if (...) x = a; else x = b; (2) x = b; if (...) x = a; (3) if (...) x = a; // as if with an initial x = x. The later patterns require jumps to be more expensive. ??? For future expansion, look for multiple X in such patterns. */ /* If test is comprised of && or || elements, don't handle it unless it is the special case of && elements without an ELSE block. */ if (ce_info->num_multiple_test_blocks) { if (else_bb || ! ce_info->and_and_p) return FALSE; ce_info->test_bb = test_bb = ce_info->last_test_bb; ce_info->num_multiple_test_blocks = 0; ce_info->num_and_and_blocks = 0; ce_info->num_or_or_blocks = 0; } /* If this is not a standard conditional jump, we can't parse it. */ jump = BB_END (test_bb); cond = noce_get_condition (jump, &if_info.cond_earliest); if (! cond) return FALSE; /* If the conditional jump is more than just a conditional jump, then we can not do if-conversion on this block. */ if (! onlyjump_p (jump)) return FALSE; /* We must be comparing objects whose modes imply the size. */ if (GET_MODE (XEXP (cond, 0)) == BLKmode) return FALSE; /* Look for one of the potential sets. */ insn_a = first_active_insn (then_bb); if (! insn_a || insn_a != last_active_insn (then_bb, FALSE) || (set_a = single_set (insn_a)) == NULL_RTX) return FALSE; x = SET_DEST (set_a); a = SET_SRC (set_a); /* Look for the other potential set. Make sure we've got equivalent destinations. */ /* ??? This is overconservative. Storing to two different mems is as easy as conditionally computing the address. Storing to a single mem merely requires a scratch memory to use as one of the destination addresses; often the memory immediately below the stack pointer is available for this. */ set_b = NULL_RTX; if (else_bb) { insn_b = first_active_insn (else_bb); if (! insn_b || insn_b != last_active_insn (else_bb, FALSE) || (set_b = single_set (insn_b)) == NULL_RTX || ! rtx_equal_p (x, SET_DEST (set_b))) return FALSE; } else { insn_b = prev_nonnote_insn (if_info.cond_earliest); /* We're going to be moving the evaluation of B down from above COND_EARLIEST to JUMP. Make sure the relevant data is still intact. */ if (! insn_b || GET_CODE (insn_b) != INSN || (set_b = single_set (insn_b)) == NULL_RTX || ! rtx_equal_p (x, SET_DEST (set_b)) || reg_overlap_mentioned_p (x, SET_SRC (set_b)) || modified_between_p (SET_SRC (set_b), PREV_INSN (if_info.cond_earliest), jump) /* Likewise with X. In particular this can happen when noce_get_condition looks farther back in the instruction stream than one might expect. */ || reg_overlap_mentioned_p (x, cond) || reg_overlap_mentioned_p (x, a) || modified_between_p (x, PREV_INSN (if_info.cond_earliest), jump)) insn_b = set_b = NULL_RTX; } /* If x has side effects then only the if-then-else form is safe to convert. But even in that case we would need to restore any notes (such as REG_INC) at then end. That can be tricky if noce_emit_move_insn expands to more than one insn, so disable the optimization entirely for now if there are side effects. */ if (side_effects_p (x)) return FALSE; b = (set_b ? SET_SRC (set_b) : x); /* Only operate on register destinations, and even then avoid extending the lifetime of hard registers on small register class machines. */ orig_x = x; if (!REG_P (x) || (SMALL_REGISTER_CLASSES && REGNO (x) < FIRST_PSEUDO_REGISTER)) { if (no_new_pseudos || GET_MODE (x) == BLKmode) return FALSE; x = gen_reg_rtx (GET_MODE (GET_CODE (x) == STRICT_LOW_PART ? XEXP (x, 0) : x)); } /* Don't operate on sources that may trap or are volatile. */ if (! noce_operand_ok (a) || ! noce_operand_ok (b)) return FALSE; /* Set up the info block for our subroutines. */ if_info.test_bb = test_bb; if_info.cond = cond; if_info.jump = jump; if_info.insn_a = insn_a; if_info.insn_b = insn_b; if_info.x = x; if_info.a = a; if_info.b = b; /* Try optimizations in some approximation of a useful order. */ /* ??? Should first look to see if X is live incoming at all. If it isn't, we don't need anything but an unconditional set. */ /* Look and see if A and B are really the same. Avoid creating silly cmove constructs that no one will fix up later. */ if (rtx_equal_p (a, b)) { /* If we have an INSN_B, we don't have to create any new rtl. Just move the instruction that we already have. If we don't have an INSN_B, that means that A == X, and we've got a noop move. In that case don't do anything and let the code below delete INSN_A. */ if (insn_b && else_bb) { rtx note; if (else_bb && insn_b == BB_END (else_bb)) BB_END (else_bb) = PREV_INSN (insn_b); reorder_insns (insn_b, insn_b, PREV_INSN (jump)); /* If there was a REG_EQUAL note, delete it since it may have been true due to this insn being after a jump. */ if ((note = find_reg_note (insn_b, REG_EQUAL, NULL_RTX)) != 0) remove_note (insn_b, note); insn_b = NULL_RTX; } /* If we have "x = b; if (...) x = a;", and x has side-effects, then x must be executed twice. */ else if (insn_b && side_effects_p (orig_x)) return FALSE; x = orig_x; goto success; } /* Disallow the "if (...) x = a;" form (with an implicit "else x = x;") for most optimizations if writing to x may trap, i.e. it's a memory other than a static var or a stack slot. */ if (! set_b && MEM_P (orig_x) && ! MEM_NOTRAP_P (orig_x) && rtx_addr_can_trap_p (XEXP (orig_x, 0))) { if (HAVE_conditional_move) { if (noce_try_cmove (&if_info)) goto success; if (! HAVE_conditional_execution && noce_try_cmove_arith (&if_info)) goto success; } return FALSE; } if (noce_try_move (&if_info)) goto success; if (noce_try_store_flag (&if_info)) goto success; if (noce_try_minmax (&if_info)) goto success; if (noce_try_abs (&if_info)) goto success; if (HAVE_conditional_move && noce_try_cmove (&if_info)) goto success; if (! HAVE_conditional_execution) { if (noce_try_store_flag_constants (&if_info)) goto success; if (noce_try_addcc (&if_info)) goto success; if (noce_try_store_flag_mask (&if_info)) goto success; if (HAVE_conditional_move && noce_try_cmove_arith (&if_info)) goto success; if (noce_try_sign_mask (&if_info)) goto success; } return FALSE; success: /* The original sets may now be killed. */ delete_insn (insn_a); /* Several special cases here: First, we may have reused insn_b above, in which case insn_b is now NULL. Second, we want to delete insn_b if it came from the ELSE block, because follows the now correct write that appears in the TEST block. However, if we got insn_b from the TEST block, it may in fact be loading data needed for the comparison. We'll let life_analysis remove the insn if it's really dead. */ if (insn_b && else_bb) delete_insn (insn_b); /* The new insns will have been inserted immediately before the jump. We should be able to remove the jump with impunity, but the condition itself may have been modified by gcse to be shared across basic blocks. */ delete_insn (jump); /* If we used a temporary, fix it up now. */ if (orig_x != x) { start_sequence (); noce_emit_move_insn (orig_x, x); insn_b = get_insns (); set_used_flags (orig_x); unshare_all_rtl_in_chain (insn_b); end_sequence (); emit_insn_after_setloc (insn_b, BB_END (test_bb), INSN_LOCATOR (insn_a)); } /* Merge the blocks! */ merge_if_block (ce_info); return TRUE; } /* Attempt to convert an IF-THEN or IF-THEN-ELSE block into straight line code. Return true if successful. */ static int process_if_block (struct ce_if_block * ce_info) { if (! reload_completed && noce_process_if_block (ce_info)) return TRUE; if (HAVE_conditional_execution && reload_completed) { /* If we have && and || tests, try to first handle combining the && and || tests into the conditional code, and if that fails, go back and handle it without the && and ||, which at present handles the && case if there was no ELSE block. */ if (cond_exec_process_if_block (ce_info, TRUE)) return TRUE; if (ce_info->num_multiple_test_blocks) { cancel_changes (0); if (cond_exec_process_if_block (ce_info, FALSE)) return TRUE; } } return FALSE; } /* Merge the blocks and mark for local life update. */ static void merge_if_block (struct ce_if_block * ce_info) { basic_block test_bb = ce_info->test_bb; /* last test block */ basic_block then_bb = ce_info->then_bb; /* THEN */ basic_block else_bb = ce_info->else_bb; /* ELSE or NULL */ basic_block join_bb = ce_info->join_bb; /* join block */ basic_block combo_bb; /* All block merging is done into the lower block numbers. */ combo_bb = test_bb; /* Merge any basic blocks to handle && and || subtests. Each of the blocks are on the fallthru path from the predecessor block. */ if (ce_info->num_multiple_test_blocks > 0) { basic_block bb = test_bb; basic_block last_test_bb = ce_info->last_test_bb; basic_block fallthru = block_fallthru (bb); do { bb = fallthru; fallthru = block_fallthru (bb); merge_blocks (combo_bb, bb); num_true_changes++; } while (bb != last_test_bb); } /* Merge TEST block into THEN block. Normally the THEN block won't have a label, but it might if there were || tests. That label's count should be zero, and it normally should be removed. */ if (then_bb) { if (combo_bb->global_live_at_end) COPY_REG_SET (combo_bb->global_live_at_end, then_bb->global_live_at_end); merge_blocks (combo_bb, then_bb); num_true_changes++; } /* The ELSE block, if it existed, had a label. That label count will almost always be zero, but odd things can happen when labels get their addresses taken. */ if (else_bb) { merge_blocks (combo_bb, else_bb); num_true_changes++; } /* If there was no join block reported, that means it was not adjacent to the others, and so we cannot merge them. */ if (! join_bb) { rtx last = BB_END (combo_bb); /* The outgoing edge for the current COMBO block should already be correct. Verify this. */ if (combo_bb->succ == NULL_EDGE) { if (find_reg_note (last, REG_NORETURN, NULL)) ; else if (GET_CODE (last) == INSN && GET_CODE (PATTERN (last)) == TRAP_IF && TRAP_CONDITION (PATTERN (last)) == const_true_rtx) ; else abort (); } /* There should still be something at the end of the THEN or ELSE blocks taking us to our final destination. */ else if (GET_CODE (last) == JUMP_INSN) ; else if (combo_bb->succ->dest == EXIT_BLOCK_PTR && GET_CODE (last) == CALL_INSN && SIBLING_CALL_P (last)) ; else if ((combo_bb->succ->flags & EDGE_EH) && can_throw_internal (last)) ; else abort (); } /* The JOIN block may have had quite a number of other predecessors too. Since we've already merged the TEST, THEN and ELSE blocks, we should have only one remaining edge from our if-then-else diamond. If there is more than one remaining edge, it must come from elsewhere. There may be zero incoming edges if the THEN block didn't actually join back up (as with a call to abort). */ else if ((join_bb->pred == NULL || join_bb->pred->pred_next == NULL) && join_bb != EXIT_BLOCK_PTR) { /* We can merge the JOIN. */ if (combo_bb->global_live_at_end) COPY_REG_SET (combo_bb->global_live_at_end, join_bb->global_live_at_end); merge_blocks (combo_bb, join_bb); num_true_changes++; } else { /* We cannot merge the JOIN. */ /* The outgoing edge for the current COMBO block should already be correct. Verify this. */ if (combo_bb->succ->succ_next != NULL_EDGE || combo_bb->succ->dest != join_bb) abort (); /* Remove the jump and cruft from the end of the COMBO block. */ if (join_bb != EXIT_BLOCK_PTR) tidy_fallthru_edge (combo_bb->succ); } num_updated_if_blocks++; } /* Find a block ending in a simple IF condition and try to transform it in some way. When converting a multi-block condition, put the new code in the first such block and delete the rest. Return a pointer to this first block if some transformation was done. Return NULL otherwise. */ static basic_block find_if_header (basic_block test_bb, int pass) { ce_if_block_t ce_info; edge then_edge; edge else_edge; /* The kind of block we're looking for has exactly two successors. */ if ((then_edge = test_bb->succ) == NULL_EDGE || (else_edge = then_edge->succ_next) == NULL_EDGE || else_edge->succ_next != NULL_EDGE) return NULL; /* Neither edge should be abnormal. */ if ((then_edge->flags & EDGE_COMPLEX) || (else_edge->flags & EDGE_COMPLEX)) return NULL; /* Nor exit the loop. */ if ((then_edge->flags & EDGE_LOOP_EXIT) || (else_edge->flags & EDGE_LOOP_EXIT)) return NULL; /* The THEN edge is canonically the one that falls through. */ if (then_edge->flags & EDGE_FALLTHRU) ; else if (else_edge->flags & EDGE_FALLTHRU) { edge e = else_edge; else_edge = then_edge; then_edge = e; } else /* Otherwise this must be a multiway branch of some sort. */ return NULL; memset (&ce_info, '\0', sizeof (ce_info)); ce_info.test_bb = test_bb; ce_info.then_bb = then_edge->dest; ce_info.else_bb = else_edge->dest; ce_info.pass = pass; #ifdef IFCVT_INIT_EXTRA_FIELDS IFCVT_INIT_EXTRA_FIELDS (&ce_info); #endif if (find_if_block (&ce_info)) goto success; if (HAVE_trap && HAVE_conditional_trap && find_cond_trap (test_bb, then_edge, else_edge)) goto success; if (dom_computed[CDI_POST_DOMINATORS] >= DOM_NO_FAST_QUERY && (! HAVE_conditional_execution || reload_completed)) { if (find_if_case_1 (test_bb, then_edge, else_edge)) goto success; if (find_if_case_2 (test_bb, then_edge, else_edge)) goto success; } return NULL; success: if (dump_file) fprintf (dump_file, "Conversion succeeded on pass %d.\n", pass); return ce_info.test_bb; } /* Return true if a block has two edges, one of which falls through to the next block, and the other jumps to a specific block, so that we can tell if the block is part of an && test or an || test. Returns either -1 or the number of non-note, non-jump, non-USE/CLOBBER insns in the block. */ static int block_jumps_and_fallthru_p (basic_block cur_bb, basic_block target_bb) { edge cur_edge; int fallthru_p = FALSE; int jump_p = FALSE; rtx insn; rtx end; int n_insns = 0; if (!cur_bb || !target_bb) return -1; /* If no edges, obviously it doesn't jump or fallthru. */ if (cur_bb->succ == NULL_EDGE) return FALSE; for (cur_edge = cur_bb->succ; cur_edge != NULL_EDGE; cur_edge = cur_edge->succ_next) { if (cur_edge->flags & EDGE_COMPLEX) /* Anything complex isn't what we want. */ return -1; else if (cur_edge->flags & EDGE_FALLTHRU) fallthru_p = TRUE; else if (cur_edge->dest == target_bb) jump_p = TRUE; else return -1; } if ((jump_p & fallthru_p) == 0) return -1; /* Don't allow calls in the block, since this is used to group && and || together for conditional execution support. ??? we should support conditional execution support across calls for IA-64 some day, but for now it makes the code simpler. */ end = BB_END (cur_bb); insn = BB_HEAD (cur_bb); while (insn != NULL_RTX) { if (GET_CODE (insn) == CALL_INSN) return -1; if (INSN_P (insn) && GET_CODE (insn) != JUMP_INSN && GET_CODE (PATTERN (insn)) != USE && GET_CODE (PATTERN (insn)) != CLOBBER) n_insns++; if (insn == end) break; insn = NEXT_INSN (insn); } return n_insns; } /* Determine if a given basic block heads a simple IF-THEN or IF-THEN-ELSE block. If so, we'll try to convert the insns to not require the branch. Return TRUE if we were successful at converting the block. */ static int find_if_block (struct ce_if_block * ce_info) { basic_block test_bb = ce_info->test_bb; basic_block then_bb = ce_info->then_bb; basic_block else_bb = ce_info->else_bb; basic_block join_bb = NULL_BLOCK; edge then_succ = then_bb->succ; edge else_succ = else_bb->succ; int then_predecessors; int else_predecessors; edge cur_edge; basic_block next; ce_info->last_test_bb = test_bb; /* Discover if any fall through predecessors of the current test basic block were && tests (which jump to the else block) or || tests (which jump to the then block). */ if (HAVE_conditional_execution && reload_completed && test_bb->pred != NULL_EDGE && test_bb->pred->pred_next == NULL_EDGE && test_bb->pred->flags == EDGE_FALLTHRU) { basic_block bb = test_bb->pred->src; basic_block target_bb; int max_insns = MAX_CONDITIONAL_EXECUTE; int n_insns; /* Determine if the preceding block is an && or || block. */ if ((n_insns = block_jumps_and_fallthru_p (bb, else_bb)) >= 0) { ce_info->and_and_p = TRUE; target_bb = else_bb; } else if ((n_insns = block_jumps_and_fallthru_p (bb, then_bb)) >= 0) { ce_info->and_and_p = FALSE; target_bb = then_bb; } else target_bb = NULL_BLOCK; if (target_bb && n_insns <= max_insns) { int total_insns = 0; int blocks = 0; ce_info->last_test_bb = test_bb; /* Found at least one && or || block, look for more. */ do { ce_info->test_bb = test_bb = bb; total_insns += n_insns; blocks++; if (bb->pred == NULL_EDGE || bb->pred->pred_next != NULL_EDGE) break; bb = bb->pred->src; n_insns = block_jumps_and_fallthru_p (bb, target_bb); } while (n_insns >= 0 && (total_insns + n_insns) <= max_insns); ce_info->num_multiple_test_blocks = blocks; ce_info->num_multiple_test_insns = total_insns; if (ce_info->and_and_p) ce_info->num_and_and_blocks = blocks; else ce_info->num_or_or_blocks = blocks; } } /* Count the number of edges the THEN and ELSE blocks have. */ then_predecessors = 0; for (cur_edge = then_bb->pred; cur_edge != NULL_EDGE; cur_edge = cur_edge->pred_next) { then_predecessors++; if (cur_edge->flags & EDGE_COMPLEX) return FALSE; } else_predecessors = 0; for (cur_edge = else_bb->pred; cur_edge != NULL_EDGE; cur_edge = cur_edge->pred_next) { else_predecessors++; if (cur_edge->flags & EDGE_COMPLEX) return FALSE; } /* The THEN block of an IF-THEN combo must have exactly one predecessor, other than any || blocks which jump to the THEN block. */ if ((then_predecessors - ce_info->num_or_or_blocks) != 1) return FALSE; /* The THEN block of an IF-THEN combo must have zero or one successors. */ if (then_succ != NULL_EDGE && (then_succ->succ_next != NULL_EDGE || (then_succ->flags & EDGE_COMPLEX) || (flow2_completed && tablejump_p (BB_END (then_bb), NULL, NULL)))) return FALSE; /* If the THEN block has no successors, conditional execution can still make a conditional call. Don't do this unless the ELSE block has only one incoming edge -- the CFG manipulation is too ugly otherwise. Check for the last insn of the THEN block being an indirect jump, which is listed as not having any successors, but confuses the rest of the CE code processing. ??? we should fix this in the future. */ if (then_succ == NULL) { if (else_bb->pred->pred_next == NULL_EDGE) { rtx last_insn = BB_END (then_bb); while (last_insn && GET_CODE (last_insn) == NOTE && last_insn != BB_HEAD (then_bb)) last_insn = PREV_INSN (last_insn); if (last_insn && GET_CODE (last_insn) == JUMP_INSN && ! simplejump_p (last_insn)) return FALSE; join_bb = else_bb; else_bb = NULL_BLOCK; } else return FALSE; } /* If the THEN block's successor is the other edge out of the TEST block, then we have an IF-THEN combo without an ELSE. */ else if (then_succ->dest == else_bb) { join_bb = else_bb; else_bb = NULL_BLOCK; } /* If the THEN and ELSE block meet in a subsequent block, and the ELSE has exactly one predecessor and one successor, and the outgoing edge is not complex, then we have an IF-THEN-ELSE combo. */ else if (else_succ != NULL_EDGE && then_succ->dest == else_succ->dest && else_bb->pred->pred_next == NULL_EDGE && else_succ->succ_next == NULL_EDGE && ! (else_succ->flags & EDGE_COMPLEX) && ! (flow2_completed && tablejump_p (BB_END (else_bb), NULL, NULL))) join_bb = else_succ->dest; /* Otherwise it is not an IF-THEN or IF-THEN-ELSE combination. */ else return FALSE; num_possible_if_blocks++; if (dump_file) { fprintf (dump_file, "\nIF-THEN%s block found, pass %d, start block %d " "[insn %d], then %d [%d]", (else_bb) ? "-ELSE" : "", ce_info->pass, test_bb->index, BB_HEAD (test_bb) ? (int)INSN_UID (BB_HEAD (test_bb)) : -1, then_bb->index, BB_HEAD (then_bb) ? (int)INSN_UID (BB_HEAD (then_bb)) : -1); if (else_bb) fprintf (dump_file, ", else %d [%d]", else_bb->index, BB_HEAD (else_bb) ? (int)INSN_UID (BB_HEAD (else_bb)) : -1); fprintf (dump_file, ", join %d [%d]", join_bb->index, BB_HEAD (join_bb) ? (int)INSN_UID (BB_HEAD (join_bb)) : -1); if (ce_info->num_multiple_test_blocks > 0) fprintf (dump_file, ", %d %s block%s last test %d [%d]", ce_info->num_multiple_test_blocks, (ce_info->and_and_p) ? "&&" : "||", (ce_info->num_multiple_test_blocks == 1) ? "" : "s", ce_info->last_test_bb->index, ((BB_HEAD (ce_info->last_test_bb)) ? (int)INSN_UID (BB_HEAD (ce_info->last_test_bb)) : -1)); fputc ('\n', dump_file); } /* Make sure IF, THEN, and ELSE, blocks are adjacent. Actually, we get the first condition for free, since we've already asserted that there's a fallthru edge from IF to THEN. Likewise for the && and || blocks, since we checked the FALLTHRU flag, those are already adjacent to the last IF block. */ /* ??? As an enhancement, move the ELSE block. Have to deal with BLOCK notes, if by no other means than aborting the merge if they exist. Sticky enough I don't want to think about it now. */ next = then_bb; if (else_bb && (next = next->next_bb) != else_bb) return FALSE; if ((next = next->next_bb) != join_bb && join_bb != EXIT_BLOCK_PTR) { if (else_bb) join_bb = NULL; else return FALSE; } /* Do the real work. */ ce_info->else_bb = else_bb; ce_info->join_bb = join_bb; return process_if_block (ce_info); } /* Convert a branch over a trap, or a branch to a trap, into a conditional trap. */ static int find_cond_trap (basic_block test_bb, edge then_edge, edge else_edge) { basic_block then_bb = then_edge->dest; basic_block else_bb = else_edge->dest; basic_block other_bb, trap_bb; rtx trap, jump, cond, cond_earliest, seq; enum rtx_code code; /* Locate the block with the trap instruction. */ /* ??? While we look for no successors, we really ought to allow EH successors. Need to fix merge_if_block for that to work. */ if ((trap = block_has_only_trap (then_bb)) != NULL) trap_bb = then_bb, other_bb = else_bb; else if ((trap = block_has_only_trap (else_bb)) != NULL) trap_bb = else_bb, other_bb = then_bb; else return FALSE; if (dump_file) { fprintf (dump_file, "\nTRAP-IF block found, start %d, trap %d\n", test_bb->index, trap_bb->index); } /* If this is not a standard conditional jump, we can't parse it. */ jump = BB_END (test_bb); cond = noce_get_condition (jump, &cond_earliest); if (! cond) return FALSE; /* If the conditional jump is more than just a conditional jump, then we can not do if-conversion on this block. */ if (! onlyjump_p (jump)) return FALSE; /* We must be comparing objects whose modes imply the size. */ if (GET_MODE (XEXP (cond, 0)) == BLKmode) return FALSE; /* Reverse the comparison code, if necessary. */ code = GET_CODE (cond); if (then_bb == trap_bb) { code = reversed_comparison_code (cond, jump); if (code == UNKNOWN) return FALSE; } /* Attempt to generate the conditional trap. */ seq = gen_cond_trap (code, XEXP (cond, 0), XEXP (cond, 1), TRAP_CODE (PATTERN (trap))); if (seq == NULL) return FALSE; num_true_changes++; /* Emit the new insns before cond_earliest. */ emit_insn_before_setloc (seq, cond_earliest, INSN_LOCATOR (trap)); /* Delete the trap block if possible. */ remove_edge (trap_bb == then_bb ? then_edge : else_edge); if (trap_bb->pred == NULL) delete_basic_block (trap_bb); /* If the non-trap block and the test are now adjacent, merge them. Otherwise we must insert a direct branch. */ if (test_bb->next_bb == other_bb) { struct ce_if_block new_ce_info; delete_insn (jump); memset (&new_ce_info, '\0', sizeof (new_ce_info)); new_ce_info.test_bb = test_bb; new_ce_info.then_bb = NULL; new_ce_info.else_bb = NULL; new_ce_info.join_bb = other_bb; merge_if_block (&new_ce_info); } else { rtx lab, newjump; lab = JUMP_LABEL (jump); newjump = emit_jump_insn_after (gen_jump (lab), jump); LABEL_NUSES (lab) += 1; JUMP_LABEL (newjump) = lab; emit_barrier_after (newjump); delete_insn (jump); } return TRUE; } /* Subroutine of find_cond_trap: if BB contains only a trap insn, return it. */ static rtx block_has_only_trap (basic_block bb) { rtx trap; /* We're not the exit block. */ if (bb == EXIT_BLOCK_PTR) return NULL_RTX; /* The block must have no successors. */ if (bb->succ) return NULL_RTX; /* The only instruction in the THEN block must be the trap. */ trap = first_active_insn (bb); if (! (trap == BB_END (bb) && GET_CODE (PATTERN (trap)) == TRAP_IF && TRAP_CONDITION (PATTERN (trap)) == const_true_rtx)) return NULL_RTX; return trap; } /* Look for IF-THEN-ELSE cases in which one of THEN or ELSE is transformable, but not necessarily the other. There need be no JOIN block. Return TRUE if we were successful at converting the block. Cases we'd like to look at: (1) if (test) goto over; // x not live x = a; goto label; over: becomes x = a; if (! test) goto label; (2) if (test) goto E; // x not live x = big(); goto L; E: x = b; goto M; becomes x = b; if (test) goto M; x = big(); goto L; (3) // This one's really only interesting for targets that can do // multiway branching, e.g. IA-64 BBB bundles. For other targets // it results in multiple branches on a cache line, which often // does not sit well with predictors. if (test1) goto E; // predicted not taken x = a; if (test2) goto F; ... E: x = b; J: becomes x = a; if (test1) goto E; if (test2) goto F; Notes: (A) Don't do (2) if the branch is predicted against the block we're eliminating. Do it anyway if we can eliminate a branch; this requires that the sole successor of the eliminated block postdominate the other side of the if. (B) With CE, on (3) we can steal from both sides of the if, creating if (test1) x = a; if (!test1) x = b; if (test1) goto J; if (test2) goto F; ... J: Again, this is most useful if J postdominates. (C) CE substitutes for helpful life information. (D) These heuristics need a lot of work. */ /* Tests for case 1 above. */ static int find_if_case_1 (basic_block test_bb, edge then_edge, edge else_edge) { basic_block then_bb = then_edge->dest; basic_block else_bb = else_edge->dest, new_bb; edge then_succ = then_bb->succ; int then_bb_index; /* If we are partitioning hot/cold basic blocks, we don't want to mess up unconditional or indirect jumps that cross between hot and cold sections. */ if (flag_reorder_blocks_and_partition && ((BB_END (then_bb) && find_reg_note (BB_END (then_bb), REG_CROSSING_JUMP, NULL_RTX)) || (BB_END (else_bb) && find_reg_note (BB_END (else_bb), REG_CROSSING_JUMP, NULL_RTX)))) return FALSE; /* THEN has one successor. */ if (!then_succ || then_succ->succ_next != NULL) return FALSE; /* THEN does not fall through, but is not strange either. */ if (then_succ->flags & (EDGE_COMPLEX | EDGE_FALLTHRU)) return FALSE; /* THEN has one predecessor. */ if (then_bb->pred->pred_next != NULL) return FALSE; /* THEN must do something. */ if (forwarder_block_p (then_bb)) return FALSE; num_possible_if_blocks++; if (dump_file) fprintf (dump_file, "\nIF-CASE-1 found, start %d, then %d\n", test_bb->index, then_bb->index); /* THEN is small. */ if (count_bb_insns (then_bb) > BRANCH_COST) return FALSE; /* Registers set are dead, or are predicable. */ if (! dead_or_predicable (test_bb, then_bb, else_bb, then_bb->succ->dest, 1)) return FALSE; /* Conversion went ok, including moving the insns and fixing up the jump. Adjust the CFG to match. */ bitmap_operation (test_bb->global_live_at_end, else_bb->global_live_at_start, then_bb->global_live_at_end, BITMAP_IOR); new_bb = redirect_edge_and_branch_force (FALLTHRU_EDGE (test_bb), else_bb); then_bb_index = then_bb->index; delete_basic_block (then_bb); /* Make rest of code believe that the newly created block is the THEN_BB block we removed. */ if (new_bb) { new_bb->index = then_bb_index; BASIC_BLOCK (then_bb_index) = new_bb; } /* We've possibly created jump to next insn, cleanup_cfg will solve that later. */ num_true_changes++; num_updated_if_blocks++; return TRUE; } /* Test for case 2 above. */ static int find_if_case_2 (basic_block test_bb, edge then_edge, edge else_edge) { basic_block then_bb = then_edge->dest; basic_block else_bb = else_edge->dest; edge else_succ = else_bb->succ; rtx note; /* If we are partitioning hot/cold basic blocks, we don't want to mess up unconditional or indirect jumps that cross between hot and cold sections. */ if (flag_reorder_blocks_and_partition && ((BB_END (then_bb) && find_reg_note (BB_END (then_bb), REG_CROSSING_JUMP, NULL_RTX)) || (BB_END (else_bb) && find_reg_note (BB_END (else_bb), REG_CROSSING_JUMP, NULL_RTX)))) return FALSE; /* ELSE has one successor. */ if (!else_succ || else_succ->succ_next != NULL) return FALSE; /* ELSE outgoing edge is not complex. */ if (else_succ->flags & EDGE_COMPLEX) return FALSE; /* ELSE has one predecessor. */ if (else_bb->pred->pred_next != NULL) return FALSE; /* THEN is not EXIT. */ if (then_bb->index < 0) return FALSE; /* ELSE is predicted or SUCC(ELSE) postdominates THEN. */ note = find_reg_note (BB_END (test_bb), REG_BR_PROB, NULL_RTX); if (note && INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ; else if (else_succ->dest->index < 0 || dominated_by_p (CDI_POST_DOMINATORS, then_bb, else_succ->dest)) ; else return FALSE; num_possible_if_blocks++; if (dump_file) fprintf (dump_file, "\nIF-CASE-2 found, start %d, else %d\n", test_bb->index, else_bb->index); /* ELSE is small. */ if (count_bb_insns (else_bb) > BRANCH_COST) return FALSE; /* Registers set are dead, or are predicable. */ if (! dead_or_predicable (test_bb, else_bb, then_bb, else_succ->dest, 0)) return FALSE; /* Conversion went ok, including moving the insns and fixing up the jump. Adjust the CFG to match. */ bitmap_operation (test_bb->global_live_at_end, then_bb->global_live_at_start, else_bb->global_live_at_end, BITMAP_IOR); delete_basic_block (else_bb); num_true_changes++; num_updated_if_blocks++; /* ??? We may now fallthru from one of THEN's successors into a join block. Rerun cleanup_cfg? Examine things manually? Wait? */ return TRUE; } /* A subroutine of dead_or_predicable called through for_each_rtx. Return 1 if a memory is found. */ static int find_memory (rtx *px, void *data ATTRIBUTE_UNUSED) { return MEM_P (*px); } /* Used by the code above to perform the actual rtl transformations. Return TRUE if successful. TEST_BB is the block containing the conditional branch. MERGE_BB is the block containing the code to manipulate. NEW_DEST is the label TEST_BB should be branching to after the conversion. REVERSEP is true if the sense of the branch should be reversed. */ static int dead_or_predicable (basic_block test_bb, basic_block merge_bb, basic_block other_bb, basic_block new_dest, int reversep) { rtx head, end, jump, earliest = NULL_RTX, old_dest, new_label = NULL_RTX; jump = BB_END (test_bb); /* Find the extent of the real code in the merge block. */ head = BB_HEAD (merge_bb); end = BB_END (merge_bb); if (GET_CODE (head) == CODE_LABEL) head = NEXT_INSN (head); if (GET_CODE (head) == NOTE) { if (head == end) { head = end = NULL_RTX; goto no_body; } head = NEXT_INSN (head); } if (GET_CODE (end) == JUMP_INSN) { if (head == end) { head = end = NULL_RTX; goto no_body; } end = PREV_INSN (end); } /* Disable handling dead code by conditional execution if the machine needs to do anything funny with the tests, etc. */ #ifndef IFCVT_MODIFY_TESTS if (HAVE_conditional_execution) { /* In the conditional execution case, we have things easy. We know the condition is reversible. We don't have to check life info because we're going to conditionally execute the code anyway. All that's left is making sure the insns involved can actually be predicated. */ rtx cond, prob_val; cond = cond_exec_get_condition (jump); if (! cond) return FALSE; prob_val = find_reg_note (jump, REG_BR_PROB, NULL_RTX); if (prob_val) prob_val = XEXP (prob_val, 0); if (reversep) { enum rtx_code rev = reversed_comparison_code (cond, jump); if (rev == UNKNOWN) return FALSE; cond = gen_rtx_fmt_ee (rev, GET_MODE (cond), XEXP (cond, 0), XEXP (cond, 1)); if (prob_val) prob_val = GEN_INT (REG_BR_PROB_BASE - INTVAL (prob_val)); } if (! cond_exec_process_insns ((ce_if_block_t *)0, head, end, cond, prob_val, 0)) goto cancel; earliest = jump; } else #endif { /* In the non-conditional execution case, we have to verify that there are no trapping operations, no calls, no references to memory, and that any registers modified are dead at the branch site. */ rtx insn, cond, prev; regset_head merge_set_head, tmp_head, test_live_head, test_set_head; regset merge_set, tmp, test_live, test_set; struct propagate_block_info *pbi; int i, fail = 0; /* Check for no calls or trapping operations. */ for (insn = head; ; insn = NEXT_INSN (insn)) { if (GET_CODE (insn) == CALL_INSN) return FALSE; if (INSN_P (insn)) { if (may_trap_p (PATTERN (insn))) return FALSE; /* ??? Even non-trapping memories such as stack frame references must be avoided. For stores, we collect no lifetime info; for reads, we'd have to assert true_dependence false against every store in the TEST range. */ if (for_each_rtx (&PATTERN (insn), find_memory, NULL)) return FALSE; } if (insn == end) break; } if (! any_condjump_p (jump)) return FALSE; /* Find the extent of the conditional. */ cond = noce_get_condition (jump, &earliest); if (! cond) return FALSE; /* Collect: MERGE_SET = set of registers set in MERGE_BB TEST_LIVE = set of registers live at EARLIEST TEST_SET = set of registers set between EARLIEST and the end of the block. */ tmp = INITIALIZE_REG_SET (tmp_head); merge_set = INITIALIZE_REG_SET (merge_set_head); test_live = INITIALIZE_REG_SET (test_live_head); test_set = INITIALIZE_REG_SET (test_set_head); /* ??? bb->local_set is only valid during calculate_global_regs_live, so we must recompute usage for MERGE_BB. Not so bad, I suppose, since we've already asserted that MERGE_BB is small. */ propagate_block (merge_bb, tmp, merge_set, merge_set, 0); /* For small register class machines, don't lengthen lifetimes of hard registers before reload. */ if (SMALL_REGISTER_CLASSES && ! reload_completed) { EXECUTE_IF_SET_IN_BITMAP (merge_set, 0, i, { if (i < FIRST_PSEUDO_REGISTER && ! fixed_regs[i] && ! global_regs[i]) fail = 1; }); } /* For TEST, we're interested in a range of insns, not a whole block. Moreover, we're interested in the insns live from OTHER_BB. */ COPY_REG_SET (test_live, other_bb->global_live_at_start); pbi = init_propagate_block_info (test_bb, test_live, test_set, test_set, 0); for (insn = jump; ; insn = prev) { prev = propagate_one_insn (pbi, insn); if (insn == earliest) break; } free_propagate_block_info (pbi); /* We can perform the transformation if MERGE_SET & (TEST_SET | TEST_LIVE) and TEST_SET & merge_bb->global_live_at_start are empty. */ bitmap_operation (tmp, test_set, test_live, BITMAP_IOR); bitmap_operation (tmp, tmp, merge_set, BITMAP_AND); EXECUTE_IF_SET_IN_BITMAP(tmp, 0, i, fail = 1); bitmap_operation (tmp, test_set, merge_bb->global_live_at_start, BITMAP_AND); EXECUTE_IF_SET_IN_BITMAP(tmp, 0, i, fail = 1); FREE_REG_SET (tmp); FREE_REG_SET (merge_set); FREE_REG_SET (test_live); FREE_REG_SET (test_set); if (fail) return FALSE; } no_body: /* We don't want to use normal invert_jump or redirect_jump because we don't want to delete_insn called. Also, we want to do our own change group management. */ old_dest = JUMP_LABEL (jump); if (other_bb != new_dest) { new_label = block_label (new_dest); if (reversep ? ! invert_jump_1 (jump, new_label) : ! redirect_jump_1 (jump, new_label)) goto cancel; } if (! apply_change_group ()) return FALSE; if (other_bb != new_dest) { if (old_dest) LABEL_NUSES (old_dest) -= 1; if (new_label) LABEL_NUSES (new_label) += 1; JUMP_LABEL (jump) = new_label; if (reversep) invert_br_probabilities (jump); redirect_edge_succ (BRANCH_EDGE (test_bb), new_dest); if (reversep) { gcov_type count, probability; count = BRANCH_EDGE (test_bb)->count; BRANCH_EDGE (test_bb)->count = FALLTHRU_EDGE (test_bb)->count; FALLTHRU_EDGE (test_bb)->count = count; probability = BRANCH_EDGE (test_bb)->probability; BRANCH_EDGE (test_bb)->probability = FALLTHRU_EDGE (test_bb)->probability; FALLTHRU_EDGE (test_bb)->probability = probability; update_br_prob_note (test_bb); } } /* Move the insns out of MERGE_BB to before the branch. */ if (head != NULL) { if (end == BB_END (merge_bb)) BB_END (merge_bb) = PREV_INSN (head); if (squeeze_notes (&head, &end)) return TRUE; reorder_insns (head, end, PREV_INSN (earliest)); } /* Remove the jump and edge if we can. */ if (other_bb == new_dest) { delete_insn (jump); remove_edge (BRANCH_EDGE (test_bb)); /* ??? Can't merge blocks here, as then_bb is still in use. At minimum, the merge will get done just before bb-reorder. */ } return TRUE; cancel: cancel_changes (0); return FALSE; } /* Main entry point for all if-conversion. */ void if_convert (int x_life_data_ok) { basic_block bb; int pass; num_possible_if_blocks = 0; num_updated_if_blocks = 0; num_true_changes = 0; life_data_ok = (x_life_data_ok != 0); if ((! targetm.cannot_modify_jumps_p ()) && (!flag_reorder_blocks_and_partition || !no_new_pseudos)) mark_loop_exit_edges (); /* Compute postdominators if we think we'll use them. */ if (HAVE_conditional_execution || life_data_ok) calculate_dominance_info (CDI_POST_DOMINATORS); if (life_data_ok) clear_bb_flags (); /* Go through each of the basic blocks looking for things to convert. If we have conditional execution, we make multiple passes to allow us to handle IF-THEN{-ELSE} blocks within other IF-THEN{-ELSE} blocks. */ pass = 0; do { cond_exec_changed_p = FALSE; pass++; #ifdef IFCVT_MULTIPLE_DUMPS if (dump_file && pass > 1) fprintf (dump_file, "\n\n========== Pass %d ==========\n", pass); #endif FOR_EACH_BB (bb) { basic_block new_bb; while ((new_bb = find_if_header (bb, pass))) bb = new_bb; } #ifdef IFCVT_MULTIPLE_DUMPS if (dump_file && cond_exec_changed_p) print_rtl_with_bb (dump_file, get_insns ()); #endif } while (cond_exec_changed_p); #ifdef IFCVT_MULTIPLE_DUMPS if (dump_file) fprintf (dump_file, "\n\n========== no more changes\n"); #endif free_dominance_info (CDI_POST_DOMINATORS); if (dump_file) fflush (dump_file); clear_aux_for_blocks (); /* Rebuild life info for basic blocks that require it. */ if (num_true_changes && life_data_ok) { /* If we allocated new pseudos, we must resize the array for sched1. */ if (max_regno < max_reg_num ()) { max_regno = max_reg_num (); allocate_reg_info (max_regno, FALSE, FALSE); } update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES, PROP_DEATH_NOTES | PROP_SCAN_DEAD_CODE | PROP_KILL_DEAD_CODE); } /* Write the final stats. */ if (dump_file && num_possible_if_blocks > 0) { fprintf (dump_file, "\n%d possible IF blocks searched.\n", num_possible_if_blocks); fprintf (dump_file, "%d IF blocks converted.\n", num_updated_if_blocks); fprintf (dump_file, "%d true changes made.\n\n\n", num_true_changes); } #ifdef ENABLE_CHECKING verify_flow_info (); #endif } /* Procedure integration for GCC. Copyright (C) 1988, 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Michael Tiemann (tiemann@cygnus.com) This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Round to the next highest integer that meets the alignment. */ #define CEIL_ROUND(VALUE,ALIGN) (((VALUE) + (ALIGN) - 1) & ~((ALIGN)- 1)) /* Private type used by {get/has}_func_hard_reg_initial_val. */ typedef struct initial_value_pair GTY(()) { rtx hard_reg; rtx pseudo; } initial_value_pair; typedef struct initial_value_struct GTY(()) { int num_entries; int max_entries; initial_value_pair * GTY ((length ("%h.num_entries"))) entries; } initial_value_struct; static void subst_constants (rtx *, rtx, struct inline_remap *, int); static void set_block_origin_self (tree); static void set_block_abstract_flags (tree, int); static void mark_stores (rtx, rtx, void *); /* Returns the Ith entry in the label_map contained in MAP. If the Ith entry has not yet been set, return a fresh label. This function performs a lazy initialization of label_map, thereby avoiding huge memory explosions when the label_map gets very large. */ rtx get_label_from_map (struct inline_remap *map, int i) { rtx x = map->label_map[i]; if (x == NULL_RTX) x = map->label_map[i] = gen_label_rtx (); return x; } /* Return false if the function FNDECL cannot be inlined on account of its attributes, true otherwise. */ bool function_attribute_inlinable_p (tree fndecl) { if (targetm.attribute_table) { tree a; for (a = DECL_ATTRIBUTES (fndecl); a; a = TREE_CHAIN (a)) { tree name = TREE_PURPOSE (a); int i; for (i = 0; targetm.attribute_table[i].name != NULL; i++) if (is_attribute_p (targetm.attribute_table[i].name, name)) return targetm.function_attribute_inlinable_p (fndecl); } } return true; } /* Copy NODE (which must be a DECL). The DECL originally was in the FROM_FN, but now it will be in the TO_FN. */ tree copy_decl_for_inlining (tree decl, tree from_fn, tree to_fn) { tree copy; /* Copy the declaration. */ if (TREE_CODE (decl) == PARM_DECL || TREE_CODE (decl) == RESULT_DECL) { tree type; int invisiref = 0; /* See if the frontend wants to pass this by invisible reference. */ if (TREE_CODE (decl) == PARM_DECL && DECL_ARG_TYPE (decl) != TREE_TYPE (decl) && POINTER_TYPE_P (DECL_ARG_TYPE (decl)) && TREE_TYPE (DECL_ARG_TYPE (decl)) == TREE_TYPE (decl)) { invisiref = 1; type = DECL_ARG_TYPE (decl); } else type = TREE_TYPE (decl); /* For a parameter or result, we must make an equivalent VAR_DECL, not a new PARM_DECL. */ copy = build_decl (VAR_DECL, DECL_NAME (decl), type); if (!invisiref) { TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (decl); TREE_READONLY (copy) = TREE_READONLY (decl); TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (decl); } else { TREE_ADDRESSABLE (copy) = 0; TREE_READONLY (copy) = 1; TREE_THIS_VOLATILE (copy) = 0; } } else { copy = copy_node (decl); /* The COPY is not abstract; it will be generated in TO_FN. */ DECL_ABSTRACT (copy) = 0; lang_hooks.dup_lang_specific_decl (copy); /* TREE_ADDRESSABLE isn't used to indicate that a label's address has been taken; it's for internal bookkeeping in expand_goto_internal. */ if (TREE_CODE (copy) == LABEL_DECL) { TREE_ADDRESSABLE (copy) = 0; DECL_TOO_LATE (copy) = 0; } } /* Set the DECL_ABSTRACT_ORIGIN so the debugging routines know what declaration inspired this copy. */ DECL_ABSTRACT_ORIGIN (copy) = DECL_ORIGIN (decl); /* The new variable/label has no RTL, yet. */ if (!TREE_STATIC (copy) && !DECL_EXTERNAL (copy)) SET_DECL_RTL (copy, NULL_RTX); /* These args would always appear unused, if not for this. */ TREE_USED (copy) = 1; /* Set the context for the new declaration. */ if (!DECL_CONTEXT (decl)) /* Globals stay global. */ ; else if (DECL_CONTEXT (decl) != from_fn) /* Things that weren't in the scope of the function we're inlining from aren't in the scope we're inlining to, either. */ ; else if (TREE_STATIC (decl)) /* Function-scoped static variables should stay in the original function. */ ; else /* Ordinary automatic local variables are now in the scope of the new function. */ DECL_CONTEXT (copy) = to_fn; return copy; } /* Unfortunately, we need a global copy of const_equiv map for communication with a function called from note_stores. Be *very* careful that this is used properly in the presence of recursion. */ varray_type global_const_equiv_varray; /* Create a new copy of an rtx. Recursively copies the operands of the rtx, except for those few rtx codes that are sharable. We always return an rtx that is similar to that incoming rtx, with the exception of possibly changing a REG to a SUBREG or vice versa. No rtl is ever emitted. If FOR_LHS is nonzero, if means we are processing something that will be the LHS of a SET. In that case, we copy RTX_UNCHANGING_P even if inlining since we need to be conservative in how it is set for such cases. Handle constants that need to be placed in the constant pool by calling `force_const_mem'. */ rtx copy_rtx_and_substitute (rtx orig, struct inline_remap *map, int for_lhs) { rtx copy, temp; int i, j; RTX_CODE code; enum machine_mode mode; const char *format_ptr; int regno; if (orig == 0) return 0; code = GET_CODE (orig); mode = GET_MODE (orig); switch (code) { case REG: /* If the stack pointer register shows up, it must be part of stack-adjustments (*not* because we eliminated the frame pointer!). Small hard registers are returned as-is. Pseudo-registers go through their `reg_map'. */ regno = REGNO (orig); if (regno <= LAST_VIRTUAL_REGISTER) { /* Some hard registers are also mapped, but others are not translated. */ if (map->reg_map[regno] != 0) return map->reg_map[regno]; /* If this is the virtual frame pointer, make space in current function's stack frame for the stack frame of the inline function. Copy the address of this area into a pseudo. Map virtual_stack_vars_rtx to this pseudo and set up a constant equivalence for it to be the address. This will substitute the address into insns where it can be substituted and use the new pseudo where it can't. */ else if (regno == VIRTUAL_STACK_VARS_REGNUM) { rtx loc, seq; int size = get_func_frame_size (DECL_STRUCT_FUNCTION (map->fndecl)); #ifdef FRAME_GROWS_DOWNWARD int alignment = (DECL_STRUCT_FUNCTION (map->fndecl)->stack_alignment_needed / BITS_PER_UNIT); /* In this case, virtual_stack_vars_rtx points to one byte higher than the top of the frame area. So make sure we allocate a big enough chunk to keep the frame pointer aligned like a real one. */ if (alignment) size = CEIL_ROUND (size, alignment); #endif start_sequence (); loc = assign_stack_temp (BLKmode, size, 1); loc = XEXP (loc, 0); #ifdef FRAME_GROWS_DOWNWARD /* In this case, virtual_stack_vars_rtx points to one byte higher than the top of the frame area. So compute the offset to one byte higher than our substitute frame. */ loc = plus_constant (loc, size); #endif map->reg_map[regno] = temp = force_reg (Pmode, force_operand (loc, NULL_RTX)); #ifdef STACK_BOUNDARY mark_reg_pointer (map->reg_map[regno], STACK_BOUNDARY); #endif SET_CONST_EQUIV_DATA (map, temp, loc, CONST_AGE_PARM); seq = get_insns (); end_sequence (); emit_insn_after (seq, map->insns_at_start); return temp; } else if (regno == VIRTUAL_INCOMING_ARGS_REGNUM) { /* Do the same for a block to contain any arguments referenced in memory. */ rtx loc, seq; int size = DECL_STRUCT_FUNCTION (map->fndecl)->args_size; start_sequence (); loc = assign_stack_temp (BLKmode, size, 1); loc = XEXP (loc, 0); /* When arguments grow downward, the virtual incoming args pointer points to the top of the argument block, so the remapped location better do the same. */ #ifdef ARGS_GROW_DOWNWARD loc = plus_constant (loc, size); #endif map->reg_map[regno] = temp = force_reg (Pmode, force_operand (loc, NULL_RTX)); #ifdef STACK_BOUNDARY mark_reg_pointer (map->reg_map[regno], STACK_BOUNDARY); #endif SET_CONST_EQUIV_DATA (map, temp, loc, CONST_AGE_PARM); seq = get_insns (); end_sequence (); emit_insn_after (seq, map->insns_at_start); return temp; } else if (REG_FUNCTION_VALUE_P (orig)) { if (rtx_equal_function_value_matters) /* This is an ignored return value. We must not leave it in with REG_FUNCTION_VALUE_P set, since that would confuse subsequent inlining of the current function into a later function. */ return gen_rtx_REG (GET_MODE (orig), regno); else /* Must be unrolling loops or replicating code if we reach here, so return the register unchanged. */ return orig; } else return orig; abort (); } if (map->reg_map[regno] == NULL) { map->reg_map[regno] = gen_reg_rtx (mode); REG_USERVAR_P (map->reg_map[regno]) = REG_USERVAR_P (orig); REG_LOOP_TEST_P (map->reg_map[regno]) = REG_LOOP_TEST_P (orig); RTX_UNCHANGING_P (map->reg_map[regno]) = RTX_UNCHANGING_P (orig); /* A reg with REG_FUNCTION_VALUE_P true will never reach here. */ if (REG_POINTER (map->x_regno_reg_rtx[regno])) mark_reg_pointer (map->reg_map[regno], map->regno_pointer_align[regno]); } return map->reg_map[regno]; case SUBREG: copy = copy_rtx_and_substitute (SUBREG_REG (orig), map, for_lhs); return simplify_gen_subreg (GET_MODE (orig), copy, GET_MODE (SUBREG_REG (orig)), SUBREG_BYTE (orig)); case USE: case CLOBBER: /* USE and CLOBBER are ordinary, but we convert (use (subreg foo)) to (use foo) if the original insn didn't have a subreg. Removing the subreg distorts the VAX movstrhi pattern by changing the mode of an operand. */ copy = copy_rtx_and_substitute (XEXP (orig, 0), map, code == CLOBBER); if (GET_CODE (copy) == SUBREG && GET_CODE (XEXP (orig, 0)) != SUBREG) copy = SUBREG_REG (copy); return gen_rtx_fmt_e (code, VOIDmode, copy); /* We need to handle "deleted" labels that appear in the DECL_RTL of a LABEL_DECL. */ case NOTE: if (NOTE_LINE_NUMBER (orig) != NOTE_INSN_DELETED_LABEL) break; /* Fall through. */ case CODE_LABEL: LABEL_PRESERVE_P (get_label_from_map (map, CODE_LABEL_NUMBER (orig))) = LABEL_PRESERVE_P (orig); return get_label_from_map (map, CODE_LABEL_NUMBER (orig)); case LABEL_REF: copy = gen_rtx_LABEL_REF (mode, LABEL_REF_NONLOCAL_P (orig) ? XEXP (orig, 0) : get_label_from_map (map, CODE_LABEL_NUMBER (XEXP (orig, 0)))); LABEL_OUTSIDE_LOOP_P (copy) = LABEL_OUTSIDE_LOOP_P (orig); /* The fact that this label was previously nonlocal does not mean it still is, so we must check if it is within the range of this function's labels. */ LABEL_REF_NONLOCAL_P (copy) = (LABEL_REF_NONLOCAL_P (orig) && ! (CODE_LABEL_NUMBER (XEXP (copy, 0)) >= get_first_label_num () && CODE_LABEL_NUMBER (XEXP (copy, 0)) < max_label_num ())); return copy; case PC: case CC0: case CONST_INT: case CONST_VECTOR: return orig; case SYMBOL_REF: /* Symbols which represent the address of a label stored in the constant pool must be modified to point to a constant pool entry for the remapped label. Otherwise, symbols are returned unchanged. */ if (CONSTANT_POOL_ADDRESS_P (orig)) { struct function *f = cfun; rtx constant = get_pool_constant_for_function (f, orig); if (GET_CODE (constant) == LABEL_REF) return XEXP (force_const_mem (GET_MODE (orig), copy_rtx_and_substitute (constant, map, for_lhs)), 0); } return orig; case CONST_DOUBLE: /* We have to make a new copy of this CONST_DOUBLE because don't want to use the old value of CONST_DOUBLE_MEM. Also, this may be a duplicate of a CONST_DOUBLE we have already seen. */ if (GET_MODE_CLASS (GET_MODE (orig)) == MODE_FLOAT) { REAL_VALUE_TYPE d; REAL_VALUE_FROM_CONST_DOUBLE (d, orig); return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (orig)); } else return immed_double_const (CONST_DOUBLE_LOW (orig), CONST_DOUBLE_HIGH (orig), VOIDmode); case CONST: break; case ASM_OPERANDS: /* If a single asm insn contains multiple output operands then it contains multiple ASM_OPERANDS rtx's that share the input and constraint vecs. We must make sure that the copied insn continues to share it. */ if (map->orig_asm_operands_vector == ASM_OPERANDS_INPUT_VEC (orig)) { copy = rtx_alloc (ASM_OPERANDS); RTX_FLAG (copy, volatil) = RTX_FLAG (orig, volatil); PUT_MODE (copy, GET_MODE (orig)); ASM_OPERANDS_TEMPLATE (copy) = ASM_OPERANDS_TEMPLATE (orig); ASM_OPERANDS_OUTPUT_CONSTRAINT (copy) = ASM_OPERANDS_OUTPUT_CONSTRAINT (orig); ASM_OPERANDS_OUTPUT_IDX (copy) = ASM_OPERANDS_OUTPUT_IDX (orig); ASM_OPERANDS_INPUT_VEC (copy) = map->copy_asm_operands_vector; ASM_OPERANDS_INPUT_CONSTRAINT_VEC (copy) = map->copy_asm_constraints_vector; #ifdef USE_MAPPED_LOCATION ASM_OPERANDS_SOURCE_LOCATION (copy) = ASM_OPERANDS_SOURCE_LOCATION (orig); #else ASM_OPERANDS_SOURCE_FILE (copy) = ASM_OPERANDS_SOURCE_FILE (orig); ASM_OPERANDS_SOURCE_LINE (copy) = ASM_OPERANDS_SOURCE_LINE (orig); #endif return copy; } break; case CALL: /* This is given special treatment because the first operand of a CALL is a (MEM ...) which may get forced into a register for cse. This is undesirable if function-address cse isn't wanted or if we won't do cse. */ #ifndef NO_FUNCTION_CSE if (! (optimize && ! flag_no_function_cse)) #endif { rtx copy = gen_rtx_MEM (GET_MODE (XEXP (orig, 0)), copy_rtx_and_substitute (XEXP (XEXP (orig, 0), 0), map, 0)); MEM_COPY_ATTRIBUTES (copy, XEXP (orig, 0)); return gen_rtx_CALL (GET_MODE (orig), copy, copy_rtx_and_substitute (XEXP (orig, 1), map, 0)); } break; #if 0 /* Must be ifdefed out for loop unrolling to work. */ /* ??? Is this for the old or the new unroller? */ case RETURN: abort (); #endif case SET: /* If this is setting fp or ap, it means that we have a nonlocal goto. Adjust the setting by the offset of the area we made. If the nonlocal goto is into the current function, this will result in unnecessarily bad code, but should work. */ if (SET_DEST (orig) == virtual_stack_vars_rtx || SET_DEST (orig) == virtual_incoming_args_rtx) { /* In case a translation hasn't occurred already, make one now. */ rtx equiv_reg; rtx equiv_loc; HOST_WIDE_INT loc_offset; copy_rtx_and_substitute (SET_DEST (orig), map, for_lhs); equiv_reg = map->reg_map[REGNO (SET_DEST (orig))]; equiv_loc = VARRAY_CONST_EQUIV (map->const_equiv_varray, REGNO (equiv_reg)).rtx; loc_offset = REG_P (equiv_loc) ? 0 : INTVAL (XEXP (equiv_loc, 1)); return gen_rtx_SET (VOIDmode, SET_DEST (orig), force_operand (plus_constant (copy_rtx_and_substitute (SET_SRC (orig), map, 0), - loc_offset), NULL_RTX)); } else return gen_rtx_SET (VOIDmode, copy_rtx_and_substitute (SET_DEST (orig), map, 1), copy_rtx_and_substitute (SET_SRC (orig), map, 0)); break; case MEM: copy = gen_rtx_MEM (mode, copy_rtx_and_substitute (XEXP (orig, 0), map, 0)); MEM_COPY_ATTRIBUTES (copy, orig); return copy; default: break; } copy = rtx_alloc (code); PUT_MODE (copy, mode); RTX_FLAG (copy, in_struct) = RTX_FLAG (orig, in_struct); RTX_FLAG (copy, volatil) = RTX_FLAG (orig, volatil); RTX_FLAG (copy, unchanging) = RTX_FLAG (orig, unchanging); format_ptr = GET_RTX_FORMAT (GET_CODE (copy)); for (i = 0; i < GET_RTX_LENGTH (GET_CODE (copy)); i++) { switch (*format_ptr++) { case '0': X0ANY (copy, i) = X0ANY (orig, i); break; case 'e': XEXP (copy, i) = copy_rtx_and_substitute (XEXP (orig, i), map, for_lhs); break; case 'u': /* Change any references to old-insns to point to the corresponding copied insns. */ XEXP (copy, i) = map->insn_map[INSN_UID (XEXP (orig, i))]; break; case 'E': XVEC (copy, i) = XVEC (orig, i); if (XVEC (orig, i) != NULL && XVECLEN (orig, i) != 0) { XVEC (copy, i) = rtvec_alloc (XVECLEN (orig, i)); for (j = 0; j < XVECLEN (copy, i); j++) XVECEXP (copy, i, j) = copy_rtx_and_substitute (XVECEXP (orig, i, j), map, for_lhs); } break; case 'w': XWINT (copy, i) = XWINT (orig, i); break; case 'i': XINT (copy, i) = XINT (orig, i); break; case 's': XSTR (copy, i) = XSTR (orig, i); break; case 't': XTREE (copy, i) = XTREE (orig, i); break; default: abort (); } } if (code == ASM_OPERANDS && map->orig_asm_operands_vector == 0) { map->orig_asm_operands_vector = ASM_OPERANDS_INPUT_VEC (orig); map->copy_asm_operands_vector = ASM_OPERANDS_INPUT_VEC (copy); map->copy_asm_constraints_vector = ASM_OPERANDS_INPUT_CONSTRAINT_VEC (copy); } return copy; } /* Substitute known constant values into INSN, if that is valid. */ void try_constants (rtx insn, struct inline_remap *map) { int i; map->num_sets = 0; /* First try just updating addresses, then other things. This is important when we have something like the store of a constant into memory and we can update the memory address but the machine does not support a constant source. */ subst_constants (&PATTERN (insn), insn, map, 1); apply_change_group (); subst_constants (&PATTERN (insn), insn, map, 0); apply_change_group (); /* Enforce consistency between the addresses in the regular insn flow and the ones in CALL_INSN_FUNCTION_USAGE lists, if any. */ if (GET_CODE (insn) == CALL_INSN && CALL_INSN_FUNCTION_USAGE (insn)) { subst_constants (&CALL_INSN_FUNCTION_USAGE (insn), insn, map, 1); apply_change_group (); } /* Show we don't know the value of anything stored or clobbered. */ note_stores (PATTERN (insn), mark_stores, NULL); map->last_pc_value = 0; #ifdef HAVE_cc0 map->last_cc0_value = 0; #endif /* Set up any constant equivalences made in this insn. */ for (i = 0; i < map->num_sets; i++) { if (REG_P (map->equiv_sets[i].dest)) { int regno = REGNO (map->equiv_sets[i].dest); MAYBE_EXTEND_CONST_EQUIV_VARRAY (map, regno); if (VARRAY_CONST_EQUIV (map->const_equiv_varray, regno).rtx == 0 /* Following clause is a hack to make case work where GNU C++ reassigns a variable to make cse work right. */ || ! rtx_equal_p (VARRAY_CONST_EQUIV (map->const_equiv_varray, regno).rtx, map->equiv_sets[i].equiv)) SET_CONST_EQUIV_DATA (map, map->equiv_sets[i].dest, map->equiv_sets[i].equiv, map->const_age); } else if (map->equiv_sets[i].dest == pc_rtx) map->last_pc_value = map->equiv_sets[i].equiv; #ifdef HAVE_cc0 else if (map->equiv_sets[i].dest == cc0_rtx) map->last_cc0_value = map->equiv_sets[i].equiv; #endif } } /* Substitute known constants for pseudo regs in the contents of LOC, which are part of INSN. If INSN is zero, the substitution should always be done (this is used to update DECL_RTL). These changes are taken out by try_constants if the result is not valid. Note that we are more concerned with determining when the result of a SET is a constant, for further propagation, than actually inserting constants into insns; cse will do the latter task better. This function is also used to adjust address of items previously addressed via the virtual stack variable or virtual incoming arguments registers. If MEMONLY is nonzero, only make changes inside a MEM. */ static void subst_constants (rtx *loc, rtx insn, struct inline_remap *map, int memonly) { rtx x = *loc; int i, j; enum rtx_code code; const char *format_ptr; int num_changes = num_validated_changes (); rtx new = 0; enum machine_mode op0_mode = MAX_MACHINE_MODE; code = GET_CODE (x); switch (code) { case PC: case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case SYMBOL_REF: case CONST: case LABEL_REF: case ADDRESS: return; #ifdef HAVE_cc0 case CC0: if (! memonly) validate_change (insn, loc, map->last_cc0_value, 1); return; #endif case USE: case CLOBBER: /* The only thing we can do with a USE or CLOBBER is possibly do some substitutions in a MEM within it. */ if (MEM_P (XEXP (x, 0))) subst_constants (&XEXP (XEXP (x, 0), 0), insn, map, 0); return; case REG: /* Substitute for parms and known constants. Don't replace hard regs used as user variables with constants. */ if (! memonly) { int regno = REGNO (x); struct const_equiv_data *p; if (! (regno < FIRST_PSEUDO_REGISTER && REG_USERVAR_P (x)) && (size_t) regno < VARRAY_SIZE (map->const_equiv_varray) && (p = &VARRAY_CONST_EQUIV (map->const_equiv_varray, regno), p->rtx != 0) && p->age >= map->const_age) validate_change (insn, loc, p->rtx, 1); } return; case SUBREG: /* SUBREG applied to something other than a reg should be treated as ordinary, since that must be a special hack and we don't know how to treat it specially. Consider for example mulsidi3 in m68k.md. Ordinary SUBREG of a REG needs this special treatment. */ if (! memonly && REG_P (SUBREG_REG (x))) { rtx inner = SUBREG_REG (x); rtx new = 0; /* We can't call subst_constants on &SUBREG_REG (x) because any constant or SUBREG wouldn't be valid inside our SUBEG. Instead, see what is inside, try to form the new SUBREG and see if that is valid. We handle two cases: extracting a full word in an integral mode and extracting the low part. */ subst_constants (&inner, NULL_RTX, map, 0); new = simplify_gen_subreg (GET_MODE (x), inner, GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x)); if (new) validate_change (insn, loc, new, 1); else cancel_changes (num_changes); return; } break; case MEM: subst_constants (&XEXP (x, 0), insn, map, 0); /* If a memory address got spoiled, change it back. */ if (! memonly && insn != 0 && num_validated_changes () != num_changes && ! memory_address_p (GET_MODE (x), XEXP (x, 0))) cancel_changes (num_changes); return; case SET: { /* Substitute constants in our source, and in any arguments to a complex (e..g, ZERO_EXTRACT) destination, but not in the destination itself. */ rtx *dest_loc = &SET_DEST (x); rtx dest = *dest_loc; rtx src, tem; enum machine_mode compare_mode = VOIDmode; /* If SET_SRC is a COMPARE which subst_constants would turn into COMPARE of 2 VOIDmode constants, note the mode in which comparison is to be done. */ if (GET_CODE (SET_SRC (x)) == COMPARE) { src = SET_SRC (x); if (GET_MODE_CLASS (GET_MODE (src)) == MODE_CC || CC0_P (dest)) { compare_mode = GET_MODE (XEXP (src, 0)); if (compare_mode == VOIDmode) compare_mode = GET_MODE (XEXP (src, 1)); } } subst_constants (&SET_SRC (x), insn, map, memonly); src = SET_SRC (x); while (GET_CODE (*dest_loc) == ZERO_EXTRACT || GET_CODE (*dest_loc) == SUBREG || GET_CODE (*dest_loc) == STRICT_LOW_PART) { if (GET_CODE (*dest_loc) == ZERO_EXTRACT) { subst_constants (&XEXP (*dest_loc, 1), insn, map, memonly); subst_constants (&XEXP (*dest_loc, 2), insn, map, memonly); } dest_loc = &XEXP (*dest_loc, 0); } /* Do substitute in the address of a destination in memory. */ if (MEM_P (*dest_loc)) subst_constants (&XEXP (*dest_loc, 0), insn, map, 0); /* Check for the case of DEST a SUBREG, both it and the underlying register are less than one word, and the SUBREG has the wider mode. In the case, we are really setting the underlying register to the source converted to the mode of DEST. So indicate that. */ if (GET_CODE (dest) == SUBREG && GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD && GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))) <= UNITS_PER_WORD && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))) <= GET_MODE_SIZE (GET_MODE (dest))) && (tem = gen_lowpart_if_possible (GET_MODE (SUBREG_REG (dest)), src))) src = tem, dest = SUBREG_REG (dest); /* If storing a recognizable value save it for later recording. */ if ((map->num_sets < MAX_RECOG_OPERANDS) && (CONSTANT_P (src) || (REG_P (src) && (REGNO (src) == VIRTUAL_INCOMING_ARGS_REGNUM || REGNO (src) == VIRTUAL_STACK_VARS_REGNUM)) || (GET_CODE (src) == PLUS && REG_P (XEXP (src, 0)) && (REGNO (XEXP (src, 0)) == VIRTUAL_INCOMING_ARGS_REGNUM || REGNO (XEXP (src, 0)) == VIRTUAL_STACK_VARS_REGNUM) && CONSTANT_P (XEXP (src, 1))) || GET_CODE (src) == COMPARE || CC0_P (dest) || (dest == pc_rtx && (src == pc_rtx || GET_CODE (src) == RETURN || GET_CODE (src) == LABEL_REF)))) { /* Normally, this copy won't do anything. But, if SRC is a COMPARE it will cause us to save the COMPARE with any constants substituted, which is what we want for later. */ rtx src_copy = copy_rtx (src); map->equiv_sets[map->num_sets].equiv = src_copy; map->equiv_sets[map->num_sets++].dest = dest; if (compare_mode != VOIDmode && GET_CODE (src) == COMPARE && (GET_MODE_CLASS (GET_MODE (src)) == MODE_CC || CC0_P (dest)) && GET_MODE (XEXP (src, 0)) == VOIDmode && GET_MODE (XEXP (src, 1)) == VOIDmode) { map->compare_src = src_copy; map->compare_mode = compare_mode; } } } return; default: break; } format_ptr = GET_RTX_FORMAT (code); /* If the first operand is an expression, save its mode for later. */ if (*format_ptr == 'e') op0_mode = GET_MODE (XEXP (x, 0)); for (i = 0; i < GET_RTX_LENGTH (code); i++) { switch (*format_ptr++) { case '0': break; case 'e': if (XEXP (x, i)) subst_constants (&XEXP (x, i), insn, map, memonly); break; case 'u': case 'i': case 's': case 'w': case 'n': case 't': case 'B': break; case 'E': if (XVEC (x, i) != NULL && XVECLEN (x, i) != 0) for (j = 0; j < XVECLEN (x, i); j++) subst_constants (&XVECEXP (x, i, j), insn, map, memonly); break; default: abort (); } } /* If this is a commutative operation, move a constant to the second operand unless the second operand is already a CONST_INT. */ if (! memonly && (GET_RTX_CLASS (code) == RTX_COMM_ARITH || GET_RTX_CLASS (code) == RTX_COMM_COMPARE) && CONSTANT_P (XEXP (x, 0)) && GET_CODE (XEXP (x, 1)) != CONST_INT) { rtx tem = XEXP (x, 0); validate_change (insn, &XEXP (x, 0), XEXP (x, 1), 1); validate_change (insn, &XEXP (x, 1), tem, 1); } /* Simplify the expression in case we put in some constants. */ if (! memonly) switch (GET_RTX_CLASS (code)) { case RTX_UNARY: if (op0_mode == MAX_MACHINE_MODE) abort (); new = simplify_unary_operation (code, GET_MODE (x), XEXP (x, 0), op0_mode); break; case RTX_COMPARE: case RTX_COMM_COMPARE: { enum machine_mode op_mode = GET_MODE (XEXP (x, 0)); if (op_mode == VOIDmode) op_mode = GET_MODE (XEXP (x, 1)); new = simplify_relational_operation (code, GET_MODE (x), op_mode, XEXP (x, 0), XEXP (x, 1)); break; } case RTX_BIN_ARITH: case RTX_COMM_ARITH: new = simplify_binary_operation (code, GET_MODE (x), XEXP (x, 0), XEXP (x, 1)); break; case RTX_BITFIELD_OPS: case RTX_TERNARY: if (op0_mode == MAX_MACHINE_MODE) abort (); if (code == IF_THEN_ELSE) { rtx op0 = XEXP (x, 0); if (COMPARISON_P (op0) && GET_MODE (op0) == VOIDmode && ! side_effects_p (op0) && XEXP (op0, 0) == map->compare_src && GET_MODE (XEXP (op0, 1)) == VOIDmode) { /* We have compare of two VOIDmode constants for which we recorded the comparison mode. */ rtx tem = simplify_gen_relational (GET_CODE (op0), GET_MODE (op0), map->compare_mode, XEXP (op0, 0), XEXP (op0, 1)); if (GET_CODE (tem) != CONST_INT) new = simplify_ternary_operation (code, GET_MODE (x), op0_mode, tem, XEXP (x, 1), XEXP (x, 2)); else if (tem == const0_rtx) new = XEXP (x, 2); else new = XEXP (x, 1); } } if (!new) new = simplify_ternary_operation (code, GET_MODE (x), op0_mode, XEXP (x, 0), XEXP (x, 1), XEXP (x, 2)); break; default: break; } if (new) validate_change (insn, loc, new, 1); } /* Show that register modified no longer contain known constants. We are called from note_stores with parts of the new insn. */ static void mark_stores (rtx dest, rtx x ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED) { int regno = -1; enum machine_mode mode = VOIDmode; /* DEST is always the innermost thing set, except in the case of SUBREGs of hard registers. */ if (REG_P (dest)) regno = REGNO (dest), mode = GET_MODE (dest); else if (GET_CODE (dest) == SUBREG && REG_P (SUBREG_REG (dest))) { regno = REGNO (SUBREG_REG (dest)); if (regno < FIRST_PSEUDO_REGISTER) regno += subreg_regno_offset (REGNO (SUBREG_REG (dest)), GET_MODE (SUBREG_REG (dest)), SUBREG_BYTE (dest), GET_MODE (dest)); mode = GET_MODE (SUBREG_REG (dest)); } if (regno >= 0) { unsigned int uregno = regno; unsigned int last_reg = (uregno >= FIRST_PSEUDO_REGISTER ? uregno : uregno + hard_regno_nregs[uregno][mode] - 1); unsigned int i; /* Ignore virtual stack var or virtual arg register since those are handled separately. */ if (uregno != VIRTUAL_INCOMING_ARGS_REGNUM && uregno != VIRTUAL_STACK_VARS_REGNUM) for (i = uregno; i <= last_reg; i++) if ((size_t) i < VARRAY_SIZE (global_const_equiv_varray)) VARRAY_CONST_EQUIV (global_const_equiv_varray, i).rtx = 0; } } /* Given a pointer to some BLOCK node, if the BLOCK_ABSTRACT_ORIGIN for the given BLOCK node is NULL, set the BLOCK_ABSTRACT_ORIGIN for the node so that it points to the node itself, thus indicating that the node is its own (abstract) origin. Additionally, if the BLOCK_ABSTRACT_ORIGIN for the given node is NULL, recursively descend the decl/block tree which it is the root of, and for each other ..._DECL or BLOCK node contained therein whose DECL_ABSTRACT_ORIGINs or BLOCK_ABSTRACT_ORIGINs are also still NULL, set *their* DECL_ABSTRACT_ORIGIN or BLOCK_ABSTRACT_ORIGIN values to point to themselves. */ static void set_block_origin_self (tree stmt) { if (BLOCK_ABSTRACT_ORIGIN (stmt) == NULL_TREE) { BLOCK_ABSTRACT_ORIGIN (stmt) = stmt; { tree local_decl; for (local_decl = BLOCK_VARS (stmt); local_decl != NULL_TREE; local_decl = TREE_CHAIN (local_decl)) set_decl_origin_self (local_decl); /* Potential recursion. */ } { tree subblock; for (subblock = BLOCK_SUBBLOCKS (stmt); subblock != NULL_TREE; subblock = BLOCK_CHAIN (subblock)) set_block_origin_self (subblock); /* Recurse. */ } } } /* Given a pointer to some ..._DECL node, if the DECL_ABSTRACT_ORIGIN for the given ..._DECL node is NULL, set the DECL_ABSTRACT_ORIGIN for the node to so that it points to the node itself, thus indicating that the node represents its own (abstract) origin. Additionally, if the DECL_ABSTRACT_ORIGIN for the given node is NULL, recursively descend the decl/block tree of which the given node is the root of, and for each other ..._DECL or BLOCK node contained therein whose DECL_ABSTRACT_ORIGINs or BLOCK_ABSTRACT_ORIGINs are also still NULL, set *their* DECL_ABSTRACT_ORIGIN or BLOCK_ABSTRACT_ORIGIN values to point to themselves. */ void set_decl_origin_self (tree decl) { if (DECL_ABSTRACT_ORIGIN (decl) == NULL_TREE) { DECL_ABSTRACT_ORIGIN (decl) = decl; if (TREE_CODE (decl) == FUNCTION_DECL) { tree arg; for (arg = DECL_ARGUMENTS (decl); arg; arg = TREE_CHAIN (arg)) DECL_ABSTRACT_ORIGIN (arg) = arg; if (DECL_INITIAL (decl) != NULL_TREE && DECL_INITIAL (decl) != error_mark_node) set_block_origin_self (DECL_INITIAL (decl)); } } } /* Given a pointer to some BLOCK node, and a boolean value to set the "abstract" flags to, set that value into the BLOCK_ABSTRACT flag for the given block, and for all local decls and all local sub-blocks (recursively) which are contained therein. */ static void set_block_abstract_flags (tree stmt, int setting) { tree local_decl; tree subblock; BLOCK_ABSTRACT (stmt) = setting; for (local_decl = BLOCK_VARS (stmt); local_decl != NULL_TREE; local_decl = TREE_CHAIN (local_decl)) set_decl_abstract_flags (local_decl, setting); for (subblock = BLOCK_SUBBLOCKS (stmt); subblock != NULL_TREE; subblock = BLOCK_CHAIN (subblock)) set_block_abstract_flags (subblock, setting); } /* Given a pointer to some ..._DECL node, and a boolean value to set the "abstract" flags to, set that value into the DECL_ABSTRACT flag for the given decl, and (in the case where the decl is a FUNCTION_DECL) also set the abstract flags for all of the parameters, local vars, local blocks and sub-blocks (recursively) to the same setting. */ void set_decl_abstract_flags (tree decl, int setting) { DECL_ABSTRACT (decl) = setting; if (TREE_CODE (decl) == FUNCTION_DECL) { tree arg; for (arg = DECL_ARGUMENTS (decl); arg; arg = TREE_CHAIN (arg)) DECL_ABSTRACT (arg) = setting; if (DECL_INITIAL (decl) != NULL_TREE && DECL_INITIAL (decl) != error_mark_node) set_block_abstract_flags (DECL_INITIAL (decl), setting); } } /* Functions to keep track of the values hard regs had at the start of the function. */ rtx get_hard_reg_initial_reg (struct function *fun, rtx reg) { struct initial_value_struct *ivs = fun->hard_reg_initial_vals; int i; if (ivs == 0) return NULL_RTX; for (i = 0; i < ivs->num_entries; i++) if (rtx_equal_p (ivs->entries[i].pseudo, reg)) return ivs->entries[i].hard_reg; return NULL_RTX; } rtx has_func_hard_reg_initial_val (struct function *fun, rtx reg) { struct initial_value_struct *ivs = fun->hard_reg_initial_vals; int i; if (ivs == 0) return NULL_RTX; for (i = 0; i < ivs->num_entries; i++) if (rtx_equal_p (ivs->entries[i].hard_reg, reg)) return ivs->entries[i].pseudo; return NULL_RTX; } rtx get_func_hard_reg_initial_val (struct function *fun, rtx reg) { struct initial_value_struct *ivs = fun->hard_reg_initial_vals; rtx rv = has_func_hard_reg_initial_val (fun, reg); if (rv) return rv; if (ivs == 0) { fun->hard_reg_initial_vals = ggc_alloc (sizeof (initial_value_struct)); ivs = fun->hard_reg_initial_vals; ivs->num_entries = 0; ivs->max_entries = 5; ivs->entries = ggc_alloc (5 * sizeof (initial_value_pair)); } if (ivs->num_entries >= ivs->max_entries) { ivs->max_entries += 5; ivs->entries = ggc_realloc (ivs->entries, ivs->max_entries * sizeof (initial_value_pair)); } ivs->entries[ivs->num_entries].hard_reg = reg; ivs->entries[ivs->num_entries].pseudo = gen_reg_rtx (GET_MODE (reg)); return ivs->entries[ivs->num_entries++].pseudo; } rtx get_hard_reg_initial_val (enum machine_mode mode, int regno) { return get_func_hard_reg_initial_val (cfun, gen_rtx_REG (mode, regno)); } rtx has_hard_reg_initial_val (enum machine_mode mode, int regno) { return has_func_hard_reg_initial_val (cfun, gen_rtx_REG (mode, regno)); } void emit_initial_value_sets (void) { struct initial_value_struct *ivs = cfun->hard_reg_initial_vals; int i; rtx seq; if (ivs == 0) return; start_sequence (); for (i = 0; i < ivs->num_entries; i++) emit_move_insn (ivs->entries[i].pseudo, ivs->entries[i].hard_reg); seq = get_insns (); end_sequence (); emit_insn_after (seq, entry_of_function ()); } /* If the backend knows where to allocate pseudos for hard register initial values, register these allocations now. */ void allocate_initial_values (rtx *reg_equiv_memory_loc ATTRIBUTE_UNUSED) { #ifdef ALLOCATE_INITIAL_VALUE struct initial_value_struct *ivs = cfun->hard_reg_initial_vals; int i; if (ivs == 0) return; for (i = 0; i < ivs->num_entries; i++) { int regno = REGNO (ivs->entries[i].pseudo); rtx x = ALLOCATE_INITIAL_VALUE (ivs->entries[i].hard_reg); if (x == NULL_RTX || REG_N_SETS (REGNO (ivs->entries[i].pseudo)) > 1) ; /* Do nothing. */ else if (MEM_P (x)) reg_equiv_memory_loc[regno] = x; else if (REG_P (x)) { reg_renumber[regno] = REGNO (x); /* Poke the regno right into regno_reg_rtx so that even fixed regs are accepted. */ REGNO (ivs->entries[i].pseudo) = REGNO (x); } else abort (); } #endif } /* Type information for integrate.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ void gt_ggc_mx_initial_value_struct (void *x_p) { struct initial_value_struct * const x = (struct initial_value_struct *)x_p; if (ggc_test_and_set_mark (x)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).num_entries); i0++) { gt_ggc_m_7rtx_def ((*x).entries[i0].hard_reg); gt_ggc_m_7rtx_def ((*x).entries[i0].pseudo); } ggc_mark ((*x).entries); } } } void gt_pch_nx_initial_value_struct (void *x_p) { struct initial_value_struct * const x = (struct initial_value_struct *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_20initial_value_struct)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).num_entries); i0++) { gt_pch_n_7rtx_def ((*x).entries[i0].hard_reg); gt_pch_n_7rtx_def ((*x).entries[i0].pseudo); } gt_pch_note_object ((*x).entries, x, gt_pch_p_20initial_value_struct); } } } void gt_pch_p_20initial_value_struct (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct initial_value_struct * const x ATTRIBUTE_UNUSED = (struct initial_value_struct *)x_p; if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).num_entries); i0++) { if ((void *)((*x).entries) == this_obj) op (&((*x).entries[i0].hard_reg), cookie); if ((void *)((*x).entries) == this_obj) op (&((*x).entries[i0].pseudo), cookie); } if ((void *)(x) == this_obj) op (&((*x).entries), cookie); } } /* Message translation utilities. Copyright (C) 2001, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifdef ONE_COMPILATION_UNIT #define LOCALEDIR "/scratch2/smcc-extras/build/gcc-cvs/install/share/locale" #endif #ifdef HAVE_LANGINFO_CODESET #include #endif /* Opening quotation mark for diagnostics. */ const char *open_quote = "'"; /* Closing quotation mark for diagnostics. */ const char *close_quote = "'"; #ifdef ENABLE_NLS /* Initialize the translation library for GCC. This performs the appropriate sequence of calls - setlocale, bindtextdomain, textdomain. LC_CTYPE determines the character set used by the terminal, so it has be set to output messages correctly. */ void gcc_init_libintl (void) { #ifdef HAVE_LC_MESSAGES setlocale (LC_CTYPE, ""); setlocale (LC_MESSAGES, ""); #else setlocale (LC_ALL, ""); #endif (void) bindtextdomain ("gcc", LOCALEDIR); (void) textdomain ("gcc"); /* Opening quotation mark. */ open_quote = _("`"); /* Closing quotation mark. */ close_quote = _("'"); if (!strcmp (open_quote, "`") && !strcmp (close_quote, "'")) { #if defined HAVE_LANGINFO_CODESET const char *encoding; #endif /* Untranslated quotes that it may be possible to replace with U+2018 and U+2019; but otherwise use "'" instead of "`" as opening quote. */ open_quote = "'"; #if defined HAVE_LANGINFO_CODESET encoding = nl_langinfo (CODESET); if (encoding != NULL && (!strcasecmp (encoding, "utf-8") || !strcasecmp (encoding, "utf8"))) { open_quote = "\xe2\x80\x98"; close_quote = "\xe2\x80\x99"; } #endif } } #if defined HAVE_WCHAR_H && defined HAVE_WORKING_MBSTOWCS && defined HAVE_WCSWIDTH #include /* Returns the width in columns of MSGSTR, which came from gettext. This is for indenting subsequent output. */ size_t gcc_gettext_width (const char *msgstr) { size_t nwcs = mbstowcs (0, msgstr, 0); wchar_t *wmsgstr = alloca ((nwcs + 1) * sizeof (wchar_t)); mbstowcs (wmsgstr, msgstr, nwcs + 1); return wcswidth (wmsgstr, nwcs); } #else /* no wcswidth */ /* We don't have any way of knowing how wide the string is. Guess the length of the string. */ size_t gcc_gettext_width (const char *msgstr) { return strlen (msgstr); } #endif #endif /* ENABLE_NLS */ /* Optimize jump instructions, for GNU compiler. Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This is the pathetic reminder of old fame of the jump-optimization pass of the compiler. Now it contains basically set of utility function to operate with jumps. Each CODE_LABEL has a count of the times it is used stored in the LABEL_NUSES internal field, and each JUMP_INSN has one label that it refers to stored in the JUMP_LABEL internal field. With this we can detect labels that become unused because of the deletion of all the jumps that formerly used them. The JUMP_LABEL info is sometimes looked at by later passes. The subroutines redirect_jump and invert_jump are used from other passes as well. */ /* Optimize jump y; x: ... y: jumpif... x? Don't know if it is worth bothering with. */ /* Optimize two cases of conditional jump to conditional jump? This can never delete any instruction or make anything dead, or even change what is live at any point. So perhaps let combiner do it. */ static void init_label_info (rtx); static void mark_all_labels (rtx); static void delete_computation (rtx); static void redirect_exp_1 (rtx *, rtx, rtx, rtx); static int redirect_exp (rtx, rtx, rtx); static void invert_exp_1 (rtx); static int invert_exp (rtx); static int returnjump_p_1 (rtx *, void *); static void delete_prior_computation (rtx, rtx); /* Alternate entry into the jump optimizer. This entry point only rebuilds the JUMP_LABEL field in jumping insns and REG_LABEL notes in non-jumping instructions. */ void rebuild_jump_labels (rtx f) { rtx insn; timevar_push (TV_REBUILD_JUMP); init_label_info (f); mark_all_labels (f); /* Keep track of labels used from static data; we don't track them closely enough to delete them here, so make sure their reference count doesn't drop to zero. */ for (insn = forced_labels; insn; insn = XEXP (insn, 1)) if (GET_CODE (XEXP (insn, 0)) == CODE_LABEL) LABEL_NUSES (XEXP (insn, 0))++; timevar_pop (TV_REBUILD_JUMP); } /* Some old code expects exactly one BARRIER as the NEXT_INSN of a non-fallthru insn. This is not generally true, as multiple barriers may have crept in, or the BARRIER may be separated from the last real insn by one or more NOTEs. This simple pass moves barriers and removes duplicates so that the old code is happy. */ void cleanup_barriers (void) { rtx insn, next, prev; for (insn = get_insns (); insn; insn = next) { next = NEXT_INSN (insn); if (GET_CODE (insn) == BARRIER) { prev = prev_nonnote_insn (insn); if (GET_CODE (prev) == BARRIER) delete_barrier (insn); else if (prev != PREV_INSN (insn)) reorder_insns (insn, insn, prev); } } } void purge_line_number_notes (rtx f) { rtx last_note = 0; rtx insn; /* Delete extraneous line number notes. Note that two consecutive notes for different lines are not really extraneous. There should be some indication where that line belonged, even if it became empty. */ for (insn = f; insn; insn = NEXT_INSN (insn)) if (GET_CODE (insn) == NOTE) { if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_FUNCTION_BEG) /* Any previous line note was for the prologue; gdb wants a new note after the prologue even if it is for the same line. */ last_note = NULL_RTX; else if (NOTE_LINE_NUMBER (insn) >= 0) { /* Delete this note if it is identical to previous note. */ if (last_note #ifdef USE_MAPPED_LOCATION && NOTE_SOURCE_LOCATION (insn) == NOTE_SOURCE_LOCATION (last_note) #else && NOTE_SOURCE_FILE (insn) == NOTE_SOURCE_FILE (last_note) && NOTE_LINE_NUMBER (insn) == NOTE_LINE_NUMBER (last_note) #endif ) { delete_related_insns (insn); continue; } last_note = insn; } } } /* Initialize LABEL_NUSES and JUMP_LABEL fields. Delete any REG_LABEL notes whose labels don't occur in the insn any more. Returns the largest INSN_UID found. */ static void init_label_info (rtx f) { rtx insn; for (insn = f; insn; insn = NEXT_INSN (insn)) if (GET_CODE (insn) == CODE_LABEL) LABEL_NUSES (insn) = (LABEL_PRESERVE_P (insn) != 0); else if (GET_CODE (insn) == JUMP_INSN) JUMP_LABEL (insn) = 0; else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN) { rtx note, next; for (note = REG_NOTES (insn); note; note = next) { next = XEXP (note, 1); if (REG_NOTE_KIND (note) == REG_LABEL && ! reg_mentioned_p (XEXP (note, 0), PATTERN (insn))) remove_note (insn, note); } } } /* Mark the label each jump jumps to. Combine consecutive labels, and count uses of labels. */ static void mark_all_labels (rtx f) { rtx insn; for (insn = f; insn; insn = NEXT_INSN (insn)) if (INSN_P (insn)) { mark_jump_label (PATTERN (insn), insn, 0); if (! INSN_DELETED_P (insn) && GET_CODE (insn) == JUMP_INSN) { /* When we know the LABEL_REF contained in a REG used in an indirect jump, we'll have a REG_LABEL note so that flow can tell where it's going. */ if (JUMP_LABEL (insn) == 0) { rtx label_note = find_reg_note (insn, REG_LABEL, NULL_RTX); if (label_note) { /* But a LABEL_REF around the REG_LABEL note, so that we can canonicalize it. */ rtx label_ref = gen_rtx_LABEL_REF (VOIDmode, XEXP (label_note, 0)); mark_jump_label (label_ref, insn, 0); XEXP (label_note, 0) = XEXP (label_ref, 0); JUMP_LABEL (insn) = XEXP (label_note, 0); } } } } } /* Move all block-beg, block-end, loop-beg, loop-cont, loop-vtop, loop-end, notes between START and END out before START. START and END may be such notes. Returns the values of the new starting and ending insns, which may be different if the original ones were such notes. Return true if there were only such notes and no real instructions. */ bool squeeze_notes (rtx* startp, rtx* endp) { rtx start = *startp; rtx end = *endp; rtx insn; rtx next; rtx last = NULL; rtx past_end = NEXT_INSN (end); for (insn = start; insn != past_end; insn = next) { next = NEXT_INSN (insn); if (GET_CODE (insn) == NOTE && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_END || NOTE_LINE_NUMBER (insn) == NOTE_INSN_BLOCK_BEG || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_VTOP)) { if (insn == start) start = next; else { rtx prev = PREV_INSN (insn); PREV_INSN (insn) = PREV_INSN (start); NEXT_INSN (insn) = start; NEXT_INSN (PREV_INSN (insn)) = insn; PREV_INSN (NEXT_INSN (insn)) = insn; NEXT_INSN (prev) = next; PREV_INSN (next) = prev; } } else last = insn; } /* There were no real instructions. */ if (start == past_end) return true; end = last; *startp = start; *endp = end; return false; } /* Return the label before INSN, or put a new label there. */ rtx get_label_before (rtx insn) { rtx label; /* Find an existing label at this point or make a new one if there is none. */ label = prev_nonnote_insn (insn); if (label == 0 || GET_CODE (label) != CODE_LABEL) { rtx prev = PREV_INSN (insn); label = gen_label_rtx (); emit_label_after (label, prev); LABEL_NUSES (label) = 0; } return label; } /* Return the label after INSN, or put a new label there. */ rtx get_label_after (rtx insn) { rtx label; /* Find an existing label at this point or make a new one if there is none. */ label = next_nonnote_insn (insn); if (label == 0 || GET_CODE (label) != CODE_LABEL) { label = gen_label_rtx (); emit_label_after (label, insn); LABEL_NUSES (label) = 0; } return label; } /* Given a comparison (CODE ARG0 ARG1), inside an insn, INSN, return a code of reversed comparison if it is possible to do so. Otherwise return UNKNOWN. UNKNOWN may be returned in case we are having CC_MODE compare and we don't know whether it's source is floating point or integer comparison. Machine description should define REVERSIBLE_CC_MODE and REVERSE_CONDITION macros to help this function avoid overhead in these cases. */ enum rtx_code reversed_comparison_code_parts (enum rtx_code code, rtx arg0, rtx arg1, rtx insn) { enum machine_mode mode; /* If this is not actually a comparison, we can't reverse it. */ if (GET_RTX_CLASS (code) != RTX_COMPARE && GET_RTX_CLASS (code) != RTX_COMM_COMPARE) return UNKNOWN; mode = GET_MODE (arg0); if (mode == VOIDmode) mode = GET_MODE (arg1); /* First see if machine description supplies us way to reverse the comparison. Give it priority over everything else to allow machine description to do tricks. */ if (GET_MODE_CLASS (mode) == MODE_CC && REVERSIBLE_CC_MODE (mode)) { #ifdef REVERSE_CONDITION return REVERSE_CONDITION (code, mode); #endif return reverse_condition (code); } /* Try a few special cases based on the comparison code. */ switch (code) { case GEU: case GTU: case LEU: case LTU: case NE: case EQ: /* It is always safe to reverse EQ and NE, even for the floating point. Similarly the unsigned comparisons are never used for floating point so we can reverse them in the default way. */ return reverse_condition (code); case ORDERED: case UNORDERED: case LTGT: case UNEQ: /* In case we already see unordered comparison, we can be sure to be dealing with floating point so we don't need any more tests. */ return reverse_condition_maybe_unordered (code); case UNLT: case UNLE: case UNGT: case UNGE: /* We don't have safe way to reverse these yet. */ return UNKNOWN; default: break; } if (GET_MODE_CLASS (mode) == MODE_CC || CC0_P (arg0)) { rtx prev; /* Try to search for the comparison to determine the real mode. This code is expensive, but with sane machine description it will be never used, since REVERSIBLE_CC_MODE will return true in all cases. */ if (! insn) return UNKNOWN; for (prev = prev_nonnote_insn (insn); prev != 0 && GET_CODE (prev) != CODE_LABEL; prev = prev_nonnote_insn (prev)) { rtx set = set_of (arg0, prev); if (set && GET_CODE (set) == SET && rtx_equal_p (SET_DEST (set), arg0)) { rtx src = SET_SRC (set); if (GET_CODE (src) == COMPARE) { rtx comparison = src; arg0 = XEXP (src, 0); mode = GET_MODE (arg0); if (mode == VOIDmode) mode = GET_MODE (XEXP (comparison, 1)); break; } /* We can get past reg-reg moves. This may be useful for model of i387 comparisons that first move flag registers around. */ if (REG_P (src)) { arg0 = src; continue; } } /* If register is clobbered in some ununderstandable way, give up. */ if (set) return UNKNOWN; } } /* Test for an integer condition, or a floating-point comparison in which NaNs can be ignored. */ if (GET_CODE (arg0) == CONST_INT || (GET_MODE (arg0) != VOIDmode && GET_MODE_CLASS (mode) != MODE_CC && !HONOR_NANS (mode))) return reverse_condition (code); return UNKNOWN; } /* A wrapper around the previous function to take COMPARISON as rtx expression. This simplifies many callers. */ enum rtx_code reversed_comparison_code (rtx comparison, rtx insn) { if (!COMPARISON_P (comparison)) return UNKNOWN; return reversed_comparison_code_parts (GET_CODE (comparison), XEXP (comparison, 0), XEXP (comparison, 1), insn); } /* Given an rtx-code for a comparison, return the code for the negated comparison. If no such code exists, return UNKNOWN. WATCH OUT! reverse_condition is not safe to use on a jump that might be acting on the results of an IEEE floating point comparison, because of the special treatment of non-signaling nans in comparisons. Use reversed_comparison_code instead. */ enum rtx_code reverse_condition (enum rtx_code code) { switch (code) { case EQ: return NE; case NE: return EQ; case GT: return LE; case GE: return LT; case LT: return GE; case LE: return GT; case GTU: return LEU; case GEU: return LTU; case LTU: return GEU; case LEU: return GTU; case UNORDERED: return ORDERED; case ORDERED: return UNORDERED; case UNLT: case UNLE: case UNGT: case UNGE: case UNEQ: case LTGT: return UNKNOWN; default: abort (); } } /* Similar, but we're allowed to generate unordered comparisons, which makes it safe for IEEE floating-point. Of course, we have to recognize that the target will support them too... */ enum rtx_code reverse_condition_maybe_unordered (enum rtx_code code) { switch (code) { case EQ: return NE; case NE: return EQ; case GT: return UNLE; case GE: return UNLT; case LT: return UNGE; case LE: return UNGT; case LTGT: return UNEQ; case UNORDERED: return ORDERED; case ORDERED: return UNORDERED; case UNLT: return GE; case UNLE: return GT; case UNGT: return LE; case UNGE: return LT; case UNEQ: return LTGT; default: abort (); } } /* Similar, but return the code when two operands of a comparison are swapped. This IS safe for IEEE floating-point. */ enum rtx_code swap_condition (enum rtx_code code) { switch (code) { case EQ: case NE: case UNORDERED: case ORDERED: case UNEQ: case LTGT: return code; case GT: return LT; case GE: return LE; case LT: return GT; case LE: return GE; case GTU: return LTU; case GEU: return LEU; case LTU: return GTU; case LEU: return GEU; case UNLT: return UNGT; case UNLE: return UNGE; case UNGT: return UNLT; case UNGE: return UNLE; default: abort (); } } /* Given a comparison CODE, return the corresponding unsigned comparison. If CODE is an equality comparison or already an unsigned comparison, CODE is returned. */ enum rtx_code unsigned_condition (enum rtx_code code) { switch (code) { case EQ: case NE: case GTU: case GEU: case LTU: case LEU: return code; case GT: return GTU; case GE: return GEU; case LT: return LTU; case LE: return LEU; default: abort (); } } /* Similarly, return the signed version of a comparison. */ enum rtx_code signed_condition (enum rtx_code code) { switch (code) { case EQ: case NE: case GT: case GE: case LT: case LE: return code; case GTU: return GT; case GEU: return GE; case LTU: return LT; case LEU: return LE; default: abort (); } } /* Return nonzero if CODE1 is more strict than CODE2, i.e., if the truth of CODE1 implies the truth of CODE2. */ int comparison_dominates_p (enum rtx_code code1, enum rtx_code code2) { /* UNKNOWN comparison codes can happen as a result of trying to revert comparison codes. They can't match anything, so we have to reject them here. */ if (code1 == UNKNOWN || code2 == UNKNOWN) return 0; if (code1 == code2) return 1; switch (code1) { case UNEQ: if (code2 == UNLE || code2 == UNGE) return 1; break; case EQ: if (code2 == LE || code2 == LEU || code2 == GE || code2 == GEU || code2 == ORDERED) return 1; break; case UNLT: if (code2 == UNLE || code2 == NE) return 1; break; case LT: if (code2 == LE || code2 == NE || code2 == ORDERED || code2 == LTGT) return 1; break; case UNGT: if (code2 == UNGE || code2 == NE) return 1; break; case GT: if (code2 == GE || code2 == NE || code2 == ORDERED || code2 == LTGT) return 1; break; case GE: case LE: if (code2 == ORDERED) return 1; break; case LTGT: if (code2 == NE || code2 == ORDERED) return 1; break; case LTU: if (code2 == LEU || code2 == NE) return 1; break; case GTU: if (code2 == GEU || code2 == NE) return 1; break; case UNORDERED: if (code2 == NE || code2 == UNEQ || code2 == UNLE || code2 == UNLT || code2 == UNGE || code2 == UNGT) return 1; break; default: break; } return 0; } /* Return 1 if INSN is an unconditional jump and nothing else. */ int simplejump_p (rtx insn) { return (GET_CODE (insn) == JUMP_INSN && GET_CODE (PATTERN (insn)) == SET && GET_CODE (SET_DEST (PATTERN (insn))) == PC && GET_CODE (SET_SRC (PATTERN (insn))) == LABEL_REF); } /* Return nonzero if INSN is a (possibly) conditional jump and nothing more. Use of this function is deprecated, since we need to support combined branch and compare insns. Use any_condjump_p instead whenever possible. */ int condjump_p (rtx insn) { rtx x = PATTERN (insn); if (GET_CODE (x) != SET || GET_CODE (SET_DEST (x)) != PC) return 0; x = SET_SRC (x); if (GET_CODE (x) == LABEL_REF) return 1; else return (GET_CODE (x) == IF_THEN_ELSE && ((GET_CODE (XEXP (x, 2)) == PC && (GET_CODE (XEXP (x, 1)) == LABEL_REF || GET_CODE (XEXP (x, 1)) == RETURN)) || (GET_CODE (XEXP (x, 1)) == PC && (GET_CODE (XEXP (x, 2)) == LABEL_REF || GET_CODE (XEXP (x, 2)) == RETURN)))); return 0; } /* Return nonzero if INSN is a (possibly) conditional jump inside a PARALLEL. Use this function is deprecated, since we need to support combined branch and compare insns. Use any_condjump_p instead whenever possible. */ int condjump_in_parallel_p (rtx insn) { rtx x = PATTERN (insn); if (GET_CODE (x) != PARALLEL) return 0; else x = XVECEXP (x, 0, 0); if (GET_CODE (x) != SET) return 0; if (GET_CODE (SET_DEST (x)) != PC) return 0; if (GET_CODE (SET_SRC (x)) == LABEL_REF) return 1; if (GET_CODE (SET_SRC (x)) != IF_THEN_ELSE) return 0; if (XEXP (SET_SRC (x), 2) == pc_rtx && (GET_CODE (XEXP (SET_SRC (x), 1)) == LABEL_REF || GET_CODE (XEXP (SET_SRC (x), 1)) == RETURN)) return 1; if (XEXP (SET_SRC (x), 1) == pc_rtx && (GET_CODE (XEXP (SET_SRC (x), 2)) == LABEL_REF || GET_CODE (XEXP (SET_SRC (x), 2)) == RETURN)) return 1; return 0; } /* Return set of PC, otherwise NULL. */ rtx pc_set (rtx insn) { rtx pat; if (GET_CODE (insn) != JUMP_INSN) return NULL_RTX; pat = PATTERN (insn); /* The set is allowed to appear either as the insn pattern or the first set in a PARALLEL. */ if (GET_CODE (pat) == PARALLEL) pat = XVECEXP (pat, 0, 0); if (GET_CODE (pat) == SET && GET_CODE (SET_DEST (pat)) == PC) return pat; return NULL_RTX; } /* Return true when insn is an unconditional direct jump, possibly bundled inside a PARALLEL. */ int any_uncondjump_p (rtx insn) { rtx x = pc_set (insn); if (!x) return 0; if (GET_CODE (SET_SRC (x)) != LABEL_REF) return 0; if (find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX)) return 0; return 1; } /* Return true when insn is a conditional jump. This function works for instructions containing PC sets in PARALLELs. The instruction may have various other effects so before removing the jump you must verify onlyjump_p. Note that unlike condjump_p it returns false for unconditional jumps. */ int any_condjump_p (rtx insn) { rtx x = pc_set (insn); enum rtx_code a, b; if (!x) return 0; if (GET_CODE (SET_SRC (x)) != IF_THEN_ELSE) return 0; a = GET_CODE (XEXP (SET_SRC (x), 1)); b = GET_CODE (XEXP (SET_SRC (x), 2)); return ((b == PC && (a == LABEL_REF || a == RETURN)) || (a == PC && (b == LABEL_REF || b == RETURN))); } /* Return the label of a conditional jump. */ rtx condjump_label (rtx insn) { rtx x = pc_set (insn); if (!x) return NULL_RTX; x = SET_SRC (x); if (GET_CODE (x) == LABEL_REF) return x; if (GET_CODE (x) != IF_THEN_ELSE) return NULL_RTX; if (XEXP (x, 2) == pc_rtx && GET_CODE (XEXP (x, 1)) == LABEL_REF) return XEXP (x, 1); if (XEXP (x, 1) == pc_rtx && GET_CODE (XEXP (x, 2)) == LABEL_REF) return XEXP (x, 2); return NULL_RTX; } /* Return true if INSN is a (possibly conditional) return insn. */ static int returnjump_p_1 (rtx *loc, void *data ATTRIBUTE_UNUSED) { rtx x = *loc; return x && (GET_CODE (x) == RETURN || (GET_CODE (x) == SET && SET_IS_RETURN_P (x))); } int returnjump_p (rtx insn) { if (GET_CODE (insn) != JUMP_INSN) return 0; return for_each_rtx (&PATTERN (insn), returnjump_p_1, NULL); } /* Return true if INSN is a jump that only transfers control and nothing more. */ int onlyjump_p (rtx insn) { rtx set; if (GET_CODE (insn) != JUMP_INSN) return 0; set = single_set (insn); if (set == NULL) return 0; if (GET_CODE (SET_DEST (set)) != PC) return 0; if (side_effects_p (SET_SRC (set))) return 0; return 1; } #ifdef HAVE_cc0 /* Return nonzero if X is an RTX that only sets the condition codes and has no side effects. */ int only_sets_cc0_p (rtx x) { if (! x) return 0; if (INSN_P (x)) x = PATTERN (x); return sets_cc0_p (x) == 1 && ! side_effects_p (x); } /* Return 1 if X is an RTX that does nothing but set the condition codes and CLOBBER or USE registers. Return -1 if X does explicitly set the condition codes, but also does other things. */ int sets_cc0_p (rtx x) { if (! x) return 0; if (INSN_P (x)) x = PATTERN (x); if (GET_CODE (x) == SET && SET_DEST (x) == cc0_rtx) return 1; if (GET_CODE (x) == PARALLEL) { int i; int sets_cc0 = 0; int other_things = 0; for (i = XVECLEN (x, 0) - 1; i >= 0; i--) { if (GET_CODE (XVECEXP (x, 0, i)) == SET && SET_DEST (XVECEXP (x, 0, i)) == cc0_rtx) sets_cc0 = 1; else if (GET_CODE (XVECEXP (x, 0, i)) == SET) other_things = 1; } return ! sets_cc0 ? 0 : other_things ? -1 : 1; } return 0; } #endif /* Follow any unconditional jump at LABEL; return the ultimate label reached by any such chain of jumps. Return null if the chain ultimately leads to a return instruction. If LABEL is not followed by a jump, return LABEL. If the chain loops or we can't find end, return LABEL, since that tells caller to avoid changing the insn. If RELOAD_COMPLETED is 0, we do not chain across a NOTE_INSN_LOOP_BEG or a USE or CLOBBER. */ rtx follow_jumps (rtx label) { rtx insn; rtx next; rtx value = label; int depth; for (depth = 0; (depth < 10 && (insn = next_active_insn (value)) != 0 && GET_CODE (insn) == JUMP_INSN && ((JUMP_LABEL (insn) != 0 && any_uncondjump_p (insn) && onlyjump_p (insn)) || GET_CODE (PATTERN (insn)) == RETURN) && (next = NEXT_INSN (insn)) && GET_CODE (next) == BARRIER); depth++) { /* Don't chain through the insn that jumps into a loop from outside the loop, since that would create multiple loop entry jumps and prevent loop optimization. */ rtx tem; if (!reload_completed) for (tem = value; tem != insn; tem = NEXT_INSN (tem)) if (GET_CODE (tem) == NOTE && (NOTE_LINE_NUMBER (tem) == NOTE_INSN_LOOP_BEG /* ??? Optional. Disables some optimizations, but makes gcov output more accurate with -O. */ || (flag_test_coverage && NOTE_LINE_NUMBER (tem) > 0))) return value; /* If we have found a cycle, make the insn jump to itself. */ if (JUMP_LABEL (insn) == label) return label; tem = next_active_insn (JUMP_LABEL (insn)); if (tem && (GET_CODE (PATTERN (tem)) == ADDR_VEC || GET_CODE (PATTERN (tem)) == ADDR_DIFF_VEC)) break; value = JUMP_LABEL (insn); } if (depth == 10) return label; return value; } /* Find all CODE_LABELs referred to in X, and increment their use counts. If INSN is a JUMP_INSN and there is at least one CODE_LABEL referenced in INSN, then store one of them in JUMP_LABEL (INSN). If INSN is an INSN or a CALL_INSN and there is at least one CODE_LABEL referenced in INSN, add a REG_LABEL note containing that label to INSN. Also, when there are consecutive labels, canonicalize on the last of them. Note that two labels separated by a loop-beginning note must be kept distinct if we have not yet done loop-optimization, because the gap between them is where loop-optimize will want to move invariant code to. CROSS_JUMP tells us that loop-optimization is done with. */ void mark_jump_label (rtx x, rtx insn, int in_mem) { RTX_CODE code = GET_CODE (x); int i; const char *fmt; switch (code) { case PC: case CC0: case REG: case CONST_INT: case CONST_DOUBLE: case CLOBBER: case CALL: return; case MEM: in_mem = 1; break; case SYMBOL_REF: if (!in_mem) return; /* If this is a constant-pool reference, see if it is a label. */ if (CONSTANT_POOL_ADDRESS_P (x)) mark_jump_label (get_pool_constant (x), insn, in_mem); break; case LABEL_REF: { rtx label = XEXP (x, 0); /* Ignore remaining references to unreachable labels that have been deleted. */ if (GET_CODE (label) == NOTE && NOTE_LINE_NUMBER (label) == NOTE_INSN_DELETED_LABEL) break; if (GET_CODE (label) != CODE_LABEL) abort (); /* Ignore references to labels of containing functions. */ if (LABEL_REF_NONLOCAL_P (x)) break; XEXP (x, 0) = label; if (! insn || ! INSN_DELETED_P (insn)) ++LABEL_NUSES (label); if (insn) { if (GET_CODE (insn) == JUMP_INSN) JUMP_LABEL (insn) = label; else { /* Add a REG_LABEL note for LABEL unless there already is one. All uses of a label, except for labels that are the targets of jumps, must have a REG_LABEL note. */ if (! find_reg_note (insn, REG_LABEL, label)) REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, label, REG_NOTES (insn)); } } return; } /* Do walk the labels in a vector, but not the first operand of an ADDR_DIFF_VEC. Don't set the JUMP_LABEL of a vector. */ case ADDR_VEC: case ADDR_DIFF_VEC: if (! INSN_DELETED_P (insn)) { int eltnum = code == ADDR_DIFF_VEC ? 1 : 0; for (i = 0; i < XVECLEN (x, eltnum); i++) mark_jump_label (XVECEXP (x, eltnum, i), NULL_RTX, in_mem); } return; default: break; } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') mark_jump_label (XEXP (x, i), insn, in_mem); else if (fmt[i] == 'E') { int j; for (j = 0; j < XVECLEN (x, i); j++) mark_jump_label (XVECEXP (x, i, j), insn, in_mem); } } } /* If all INSN does is set the pc, delete it, and delete the insn that set the condition codes for it if that's what the previous thing was. */ void delete_jump (rtx insn) { rtx set = single_set (insn); if (set && GET_CODE (SET_DEST (set)) == PC) delete_computation (insn); } /* Verify INSN is a BARRIER and delete it. */ void delete_barrier (rtx insn) { if (GET_CODE (insn) != BARRIER) abort (); delete_insn (insn); } /* Recursively delete prior insns that compute the value (used only by INSN which the caller is deleting) stored in the register mentioned by NOTE which is a REG_DEAD note associated with INSN. */ static void delete_prior_computation (rtx note, rtx insn) { rtx our_prev; rtx reg = XEXP (note, 0); for (our_prev = prev_nonnote_insn (insn); our_prev && (GET_CODE (our_prev) == INSN || GET_CODE (our_prev) == CALL_INSN); our_prev = prev_nonnote_insn (our_prev)) { rtx pat = PATTERN (our_prev); /* If we reach a CALL which is not calling a const function or the callee pops the arguments, then give up. */ if (GET_CODE (our_prev) == CALL_INSN && (! CONST_OR_PURE_CALL_P (our_prev) || GET_CODE (pat) != SET || GET_CODE (SET_SRC (pat)) != CALL)) break; /* If we reach a SEQUENCE, it is too complex to try to do anything with it, so give up. We can be run during and after reorg, so SEQUENCE rtl can legitimately show up here. */ if (GET_CODE (pat) == SEQUENCE) break; if (GET_CODE (pat) == USE && GET_CODE (XEXP (pat, 0)) == INSN) /* reorg creates USEs that look like this. We leave them alone because reorg needs them for its own purposes. */ break; if (reg_set_p (reg, pat)) { if (side_effects_p (pat) && GET_CODE (our_prev) != CALL_INSN) break; if (GET_CODE (pat) == PARALLEL) { /* If we find a SET of something else, we can't delete the insn. */ int i; for (i = 0; i < XVECLEN (pat, 0); i++) { rtx part = XVECEXP (pat, 0, i); if (GET_CODE (part) == SET && SET_DEST (part) != reg) break; } if (i == XVECLEN (pat, 0)) delete_computation (our_prev); } else if (GET_CODE (pat) == SET && REG_P (SET_DEST (pat))) { int dest_regno = REGNO (SET_DEST (pat)); int dest_endregno = (dest_regno + (dest_regno < FIRST_PSEUDO_REGISTER ? hard_regno_nregs[dest_regno] [GET_MODE (SET_DEST (pat))] : 1)); int regno = REGNO (reg); int endregno = (regno + (regno < FIRST_PSEUDO_REGISTER ? hard_regno_nregs[regno][GET_MODE (reg)] : 1)); if (dest_regno >= regno && dest_endregno <= endregno) delete_computation (our_prev); /* We may have a multi-word hard register and some, but not all, of the words of the register are needed in subsequent insns. Write REG_UNUSED notes for those parts that were not needed. */ else if (dest_regno <= regno && dest_endregno >= endregno) { int i; REG_NOTES (our_prev) = gen_rtx_EXPR_LIST (REG_UNUSED, reg, REG_NOTES (our_prev)); for (i = dest_regno; i < dest_endregno; i++) if (! find_regno_note (our_prev, REG_UNUSED, i)) break; if (i == dest_endregno) delete_computation (our_prev); } } break; } /* If PAT references the register that dies here, it is an additional use. Hence any prior SET isn't dead. However, this insn becomes the new place for the REG_DEAD note. */ if (reg_overlap_mentioned_p (reg, pat)) { XEXP (note, 1) = REG_NOTES (our_prev); REG_NOTES (our_prev) = note; break; } } } /* Delete INSN and recursively delete insns that compute values used only by INSN. This uses the REG_DEAD notes computed during flow analysis. If we are running before flow.c, we need do nothing since flow.c will delete dead code. We also can't know if the registers being used are dead or not at this point. Otherwise, look at all our REG_DEAD notes. If a previous insn does nothing other than set a register that dies in this insn, we can delete that insn as well. On machines with CC0, if CC0 is used in this insn, we may be able to delete the insn that set it. */ static void delete_computation (rtx insn) { rtx note, next; #ifdef HAVE_cc0 if (reg_referenced_p (cc0_rtx, PATTERN (insn))) { rtx prev = prev_nonnote_insn (insn); /* We assume that at this stage CC's are always set explicitly and always immediately before the jump that will use them. So if the previous insn exists to set the CC's, delete it (unless it performs auto-increments, etc.). */ if (prev && GET_CODE (prev) == INSN && sets_cc0_p (PATTERN (prev))) { if (sets_cc0_p (PATTERN (prev)) > 0 && ! side_effects_p (PATTERN (prev))) delete_computation (prev); else /* Otherwise, show that cc0 won't be used. */ REG_NOTES (prev) = gen_rtx_EXPR_LIST (REG_UNUSED, cc0_rtx, REG_NOTES (prev)); } } #endif for (note = REG_NOTES (insn); note; note = next) { next = XEXP (note, 1); if (REG_NOTE_KIND (note) != REG_DEAD /* Verify that the REG_NOTE is legitimate. */ || !REG_P (XEXP (note, 0))) continue; delete_prior_computation (note, insn); } delete_related_insns (insn); } /* Delete insn INSN from the chain of insns and update label ref counts and delete insns now unreachable. Returns the first insn after INSN that was not deleted. Usage of this instruction is deprecated. Use delete_insn instead and subsequent cfg_cleanup pass to delete unreachable code if needed. */ rtx delete_related_insns (rtx insn) { int was_code_label = (GET_CODE (insn) == CODE_LABEL); rtx note; rtx next = NEXT_INSN (insn), prev = PREV_INSN (insn); while (next && INSN_DELETED_P (next)) next = NEXT_INSN (next); /* This insn is already deleted => return first following nondeleted. */ if (INSN_DELETED_P (insn)) return next; delete_insn (insn); /* If instruction is followed by a barrier, delete the barrier too. */ if (next != 0 && GET_CODE (next) == BARRIER) delete_insn (next); /* If deleting a jump, decrement the count of the label, and delete the label if it is now unused. */ if (GET_CODE (insn) == JUMP_INSN && JUMP_LABEL (insn)) { rtx lab = JUMP_LABEL (insn), lab_next; if (LABEL_NUSES (lab) == 0) { /* This can delete NEXT or PREV, either directly if NEXT is JUMP_LABEL (INSN), or indirectly through more levels of jumps. */ delete_related_insns (lab); /* I feel a little doubtful about this loop, but I see no clean and sure alternative way to find the first insn after INSN that is not now deleted. I hope this works. */ while (next && INSN_DELETED_P (next)) next = NEXT_INSN (next); return next; } else if (tablejump_p (insn, NULL, &lab_next)) { /* If we're deleting the tablejump, delete the dispatch table. We may not be able to kill the label immediately preceding just yet, as it might be referenced in code leading up to the tablejump. */ delete_related_insns (lab_next); } } /* Likewise if we're deleting a dispatch table. */ if (GET_CODE (insn) == JUMP_INSN && (GET_CODE (PATTERN (insn)) == ADDR_VEC || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)) { rtx pat = PATTERN (insn); int i, diff_vec_p = GET_CODE (pat) == ADDR_DIFF_VEC; int len = XVECLEN (pat, diff_vec_p); for (i = 0; i < len; i++) if (LABEL_NUSES (XEXP (XVECEXP (pat, diff_vec_p, i), 0)) == 0) delete_related_insns (XEXP (XVECEXP (pat, diff_vec_p, i), 0)); while (next && INSN_DELETED_P (next)) next = NEXT_INSN (next); return next; } /* Likewise for an ordinary INSN / CALL_INSN with a REG_LABEL note. */ if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN) for (note = REG_NOTES (insn); note; note = XEXP (note, 1)) if (REG_NOTE_KIND (note) == REG_LABEL /* This could also be a NOTE_INSN_DELETED_LABEL note. */ && GET_CODE (XEXP (note, 0)) == CODE_LABEL) if (LABEL_NUSES (XEXP (note, 0)) == 0) delete_related_insns (XEXP (note, 0)); while (prev && (INSN_DELETED_P (prev) || GET_CODE (prev) == NOTE)) prev = PREV_INSN (prev); /* If INSN was a label and a dispatch table follows it, delete the dispatch table. The tablejump must have gone already. It isn't useful to fall through into a table. */ if (was_code_label && NEXT_INSN (insn) != 0 && GET_CODE (NEXT_INSN (insn)) == JUMP_INSN && (GET_CODE (PATTERN (NEXT_INSN (insn))) == ADDR_VEC || GET_CODE (PATTERN (NEXT_INSN (insn))) == ADDR_DIFF_VEC)) next = delete_related_insns (NEXT_INSN (insn)); /* If INSN was a label, delete insns following it if now unreachable. */ if (was_code_label && prev && GET_CODE (prev) == BARRIER) { enum rtx_code code; while (next) { code = GET_CODE (next); if (code == NOTE && NOTE_LINE_NUMBER (next) != NOTE_INSN_FUNCTION_END) next = NEXT_INSN (next); /* Keep going past other deleted labels to delete what follows. */ else if (code == CODE_LABEL && INSN_DELETED_P (next)) next = NEXT_INSN (next); else if (code == BARRIER || INSN_P (next)) /* Note: if this deletes a jump, it can cause more deletion of unreachable code, after a different label. As long as the value from this recursive call is correct, this invocation functions correctly. */ next = delete_related_insns (next); else break; } } return next; } /* Delete a range of insns from FROM to TO, inclusive. This is for the sake of peephole optimization, so assume that whatever these insns do will still be done by a new peephole insn that will replace them. */ void delete_for_peephole (rtx from, rtx to) { rtx insn = from; while (1) { rtx next = NEXT_INSN (insn); rtx prev = PREV_INSN (insn); if (GET_CODE (insn) != NOTE) { INSN_DELETED_P (insn) = 1; /* Patch this insn out of the chain. */ /* We don't do this all at once, because we must preserve all NOTEs. */ if (prev) NEXT_INSN (prev) = next; if (next) PREV_INSN (next) = prev; } if (insn == to) break; insn = next; } /* Note that if TO is an unconditional jump we *do not* delete the BARRIER that follows, since the peephole that replaces this sequence is also an unconditional jump in that case. */ } /* Throughout LOC, redirect OLABEL to NLABEL. Treat null OLABEL or NLABEL as a return. Accrue modifications into the change group. */ static void redirect_exp_1 (rtx *loc, rtx olabel, rtx nlabel, rtx insn) { rtx x = *loc; RTX_CODE code = GET_CODE (x); int i; const char *fmt; if (code == LABEL_REF) { if (XEXP (x, 0) == olabel) { rtx n; if (nlabel) n = gen_rtx_LABEL_REF (VOIDmode, nlabel); else n = gen_rtx_RETURN (VOIDmode); validate_change (insn, loc, n, 1); return; } } else if (code == RETURN && olabel == 0) { x = gen_rtx_LABEL_REF (VOIDmode, nlabel); if (loc == &PATTERN (insn)) x = gen_rtx_SET (VOIDmode, pc_rtx, x); validate_change (insn, loc, x, 1); return; } if (code == SET && nlabel == 0 && SET_DEST (x) == pc_rtx && GET_CODE (SET_SRC (x)) == LABEL_REF && XEXP (SET_SRC (x), 0) == olabel) { validate_change (insn, loc, gen_rtx_RETURN (VOIDmode), 1); return; } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') redirect_exp_1 (&XEXP (x, i), olabel, nlabel, insn); else if (fmt[i] == 'E') { int j; for (j = 0; j < XVECLEN (x, i); j++) redirect_exp_1 (&XVECEXP (x, i, j), olabel, nlabel, insn); } } } /* Similar, but apply the change group and report success or failure. */ static int redirect_exp (rtx olabel, rtx nlabel, rtx insn) { rtx *loc; if (GET_CODE (PATTERN (insn)) == PARALLEL) loc = &XVECEXP (PATTERN (insn), 0, 0); else loc = &PATTERN (insn); redirect_exp_1 (loc, olabel, nlabel, insn); if (num_validated_changes () == 0) return 0; return apply_change_group (); } /* Make JUMP go to NLABEL instead of where it jumps now. Accrue the modifications into the change group. Return false if we did not see how to do that. */ int redirect_jump_1 (rtx jump, rtx nlabel) { int ochanges = num_validated_changes (); rtx *loc; if (GET_CODE (PATTERN (jump)) == PARALLEL) loc = &XVECEXP (PATTERN (jump), 0, 0); else loc = &PATTERN (jump); redirect_exp_1 (loc, JUMP_LABEL (jump), nlabel, jump); return num_validated_changes () > ochanges; } /* Make JUMP go to NLABEL instead of where it jumps now. If the old jump target label is unused as a result, it and the code following it may be deleted. If NLABEL is zero, we are to turn the jump into a (possibly conditional) RETURN insn. The return value will be 1 if the change was made, 0 if it wasn't (this can only occur for NLABEL == 0). */ int redirect_jump (rtx jump, rtx nlabel, int delete_unused) { rtx olabel = JUMP_LABEL (jump); rtx note; if (nlabel == olabel) return 1; if (! redirect_exp (olabel, nlabel, jump)) return 0; JUMP_LABEL (jump) = nlabel; if (nlabel) ++LABEL_NUSES (nlabel); /* Update labels in any REG_EQUAL note. */ if ((note = find_reg_note (jump, REG_EQUAL, NULL_RTX)) != NULL_RTX) { if (nlabel && olabel) { rtx dest = XEXP (note, 0); if (GET_CODE (dest) == IF_THEN_ELSE) { if (GET_CODE (XEXP (dest, 1)) == LABEL_REF && XEXP (XEXP (dest, 1), 0) == olabel) XEXP (XEXP (dest, 1), 0) = nlabel; if (GET_CODE (XEXP (dest, 2)) == LABEL_REF && XEXP (XEXP (dest, 2), 0) == olabel) XEXP (XEXP (dest, 2), 0) = nlabel; } else remove_note (jump, note); } else remove_note (jump, note); } /* If we're eliding the jump over exception cleanups at the end of a function, move the function end note so that -Wreturn-type works. */ if (olabel && nlabel && NEXT_INSN (olabel) && GET_CODE (NEXT_INSN (olabel)) == NOTE && NOTE_LINE_NUMBER (NEXT_INSN (olabel)) == NOTE_INSN_FUNCTION_END) emit_note_after (NOTE_INSN_FUNCTION_END, nlabel); if (olabel && --LABEL_NUSES (olabel) == 0 && delete_unused /* Undefined labels will remain outside the insn stream. */ && INSN_UID (olabel)) delete_related_insns (olabel); return 1; } /* Invert the jump condition of rtx X contained in jump insn, INSN. Accrue the modifications into the change group. */ static void invert_exp_1 (rtx insn) { RTX_CODE code; rtx x = pc_set (insn); if (!x) abort (); x = SET_SRC (x); code = GET_CODE (x); if (code == IF_THEN_ELSE) { rtx comp = XEXP (x, 0); rtx tem; enum rtx_code reversed_code; /* We can do this in two ways: The preferable way, which can only be done if this is not an integer comparison, is to reverse the comparison code. Otherwise, swap the THEN-part and ELSE-part of the IF_THEN_ELSE. If we can't do either, fail. */ reversed_code = reversed_comparison_code (comp, insn); if (reversed_code != UNKNOWN) { validate_change (insn, &XEXP (x, 0), gen_rtx_fmt_ee (reversed_code, GET_MODE (comp), XEXP (comp, 0), XEXP (comp, 1)), 1); return; } tem = XEXP (x, 1); validate_change (insn, &XEXP (x, 1), XEXP (x, 2), 1); validate_change (insn, &XEXP (x, 2), tem, 1); } else abort (); } /* Invert the jump condition of conditional jump insn, INSN. Return 1 if we can do so, 0 if we cannot find a way to do so that matches a pattern. */ static int invert_exp (rtx insn) { invert_exp_1 (insn); if (num_validated_changes () == 0) return 0; return apply_change_group (); } /* Invert the condition of the jump JUMP, and make it jump to label NLABEL instead of where it jumps now. Accrue changes into the change group. Return false if we didn't see how to perform the inversion and redirection. */ int invert_jump_1 (rtx jump, rtx nlabel) { int ochanges; ochanges = num_validated_changes (); invert_exp_1 (jump); if (num_validated_changes () == ochanges) return 0; return redirect_jump_1 (jump, nlabel); } /* Invert the condition of the jump JUMP, and make it jump to label NLABEL instead of where it jumps now. Return true if successful. */ int invert_jump (rtx jump, rtx nlabel, int delete_unused) { /* We have to either invert the condition and change the label or do neither. Either operation could fail. We first try to invert the jump. If that succeeds, we try changing the label. If that fails, we invert the jump back to what it was. */ if (! invert_exp (jump)) return 0; if (redirect_jump (jump, nlabel, delete_unused)) { /* Remove REG_EQUAL note if we have one. */ rtx note = find_reg_note (jump, REG_EQUAL, NULL_RTX); if (note) remove_note (jump, note); invert_br_probabilities (jump); return 1; } if (! invert_exp (jump)) /* This should just be putting it back the way it was. */ abort (); return 0; } /* Like rtx_equal_p except that it considers two REGs as equal if they renumber to the same value and considers two commutative operations to be the same if the order of the operands has been reversed. ??? Addition is not commutative on the PA due to the weird implicit space register selection rules for memory addresses. Therefore, we don't consider a + b == b + a. We could/should make this test a little tighter. Possibly only disabling it on the PA via some backend macro or only disabling this case when the PLUS is inside a MEM. */ int rtx_renumbered_equal_p (rtx x, rtx y) { int i; enum rtx_code code = GET_CODE (x); const char *fmt; if (x == y) return 1; if ((code == REG || (code == SUBREG && REG_P (SUBREG_REG (x)))) && (REG_P (y) || (GET_CODE (y) == SUBREG && REG_P (SUBREG_REG (y))))) { int reg_x = -1, reg_y = -1; int byte_x = 0, byte_y = 0; if (GET_MODE (x) != GET_MODE (y)) return 0; /* If we haven't done any renumbering, don't make any assumptions. */ if (reg_renumber == 0) return rtx_equal_p (x, y); if (code == SUBREG) { reg_x = REGNO (SUBREG_REG (x)); byte_x = SUBREG_BYTE (x); if (reg_renumber[reg_x] >= 0) { reg_x = subreg_regno_offset (reg_renumber[reg_x], GET_MODE (SUBREG_REG (x)), byte_x, GET_MODE (x)); byte_x = 0; } } else { reg_x = REGNO (x); if (reg_renumber[reg_x] >= 0) reg_x = reg_renumber[reg_x]; } if (GET_CODE (y) == SUBREG) { reg_y = REGNO (SUBREG_REG (y)); byte_y = SUBREG_BYTE (y); if (reg_renumber[reg_y] >= 0) { reg_y = subreg_regno_offset (reg_renumber[reg_y], GET_MODE (SUBREG_REG (y)), byte_y, GET_MODE (y)); byte_y = 0; } } else { reg_y = REGNO (y); if (reg_renumber[reg_y] >= 0) reg_y = reg_renumber[reg_y]; } return reg_x >= 0 && reg_x == reg_y && byte_x == byte_y; } /* Now we have disposed of all the cases in which different rtx codes can match. */ if (code != GET_CODE (y)) return 0; switch (code) { case PC: case CC0: case ADDR_VEC: case ADDR_DIFF_VEC: case CONST_INT: return 0; case LABEL_REF: /* We can't assume nonlocal labels have their following insns yet. */ if (LABEL_REF_NONLOCAL_P (x) || LABEL_REF_NONLOCAL_P (y)) return XEXP (x, 0) == XEXP (y, 0); /* Two label-refs are equivalent if they point at labels in the same position in the instruction stream. */ return (next_real_insn (XEXP (x, 0)) == next_real_insn (XEXP (y, 0))); case SYMBOL_REF: return XSTR (x, 0) == XSTR (y, 0); case CODE_LABEL: /* If we didn't match EQ equality above, they aren't the same. */ return 0; default: break; } /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */ if (GET_MODE (x) != GET_MODE (y)) return 0; /* For commutative operations, the RTX match if the operand match in any order. Also handle the simple binary and unary cases without a loop. ??? Don't consider PLUS a commutative operator; see comments above. */ if (COMMUTATIVE_P (x) && code != PLUS) return ((rtx_renumbered_equal_p (XEXP (x, 0), XEXP (y, 0)) && rtx_renumbered_equal_p (XEXP (x, 1), XEXP (y, 1))) || (rtx_renumbered_equal_p (XEXP (x, 0), XEXP (y, 1)) && rtx_renumbered_equal_p (XEXP (x, 1), XEXP (y, 0)))); else if (NON_COMMUTATIVE_P (x)) return (rtx_renumbered_equal_p (XEXP (x, 0), XEXP (y, 0)) && rtx_renumbered_equal_p (XEXP (x, 1), XEXP (y, 1))); else if (UNARY_P (x)) return rtx_renumbered_equal_p (XEXP (x, 0), XEXP (y, 0)); /* Compare the elements. If any pair of corresponding elements fail to match, return 0 for the whole things. */ fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { int j; switch (fmt[i]) { case 'w': if (XWINT (x, i) != XWINT (y, i)) return 0; break; case 'i': if (XINT (x, i) != XINT (y, i)) return 0; break; case 't': if (XTREE (x, i) != XTREE (y, i)) return 0; break; case 's': if (strcmp (XSTR (x, i), XSTR (y, i))) return 0; break; case 'e': if (! rtx_renumbered_equal_p (XEXP (x, i), XEXP (y, i))) return 0; break; case 'u': if (XEXP (x, i) != XEXP (y, i)) return 0; /* Fall through. */ case '0': break; case 'E': if (XVECLEN (x, i) != XVECLEN (y, i)) return 0; for (j = XVECLEN (x, i) - 1; j >= 0; j--) if (!rtx_renumbered_equal_p (XVECEXP (x, i, j), XVECEXP (y, i, j))) return 0; break; default: abort (); } } return 1; } /* If X is a hard register or equivalent to one or a subregister of one, return the hard register number. If X is a pseudo register that was not assigned a hard register, return the pseudo register number. Otherwise, return -1. Any rtx is valid for X. */ int true_regnum (rtx x) { if (REG_P (x)) { if (REGNO (x) >= FIRST_PSEUDO_REGISTER && reg_renumber[REGNO (x)] >= 0) return reg_renumber[REGNO (x)]; return REGNO (x); } if (GET_CODE (x) == SUBREG) { int base = true_regnum (SUBREG_REG (x)); if (base >= 0 && base < FIRST_PSEUDO_REGISTER) return base + subreg_regno_offset (REGNO (SUBREG_REG (x)), GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x), GET_MODE (x)); } return -1; } /* Return regno of the register REG and handle subregs too. */ unsigned int reg_or_subregno (rtx reg) { if (REG_P (reg)) return REGNO (reg); if (GET_CODE (reg) == SUBREG) return REGNO (SUBREG_REG (reg)); abort (); } /* Default language-specific hooks. Copyright 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Alexandre Oliva This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Do nothing; in many cases the default hook. */ void lhd_do_nothing (void) { } /* Do nothing (tree). */ void lhd_do_nothing_t (tree t ATTRIBUTE_UNUSED) { } /* Do nothing (int). */ void lhd_do_nothing_i (int i ATTRIBUTE_UNUSED) { } /* Do nothing (int, int, int). Return NULL_TREE. */ tree lhd_do_nothing_iii_return_null_tree (int i ATTRIBUTE_UNUSED, int j ATTRIBUTE_UNUSED, int k ATTRIBUTE_UNUSED) { return NULL_TREE; } /* Do nothing (function). */ void lhd_do_nothing_f (struct function *f ATTRIBUTE_UNUSED) { } /* Do nothing (return the tree node passed). */ tree lhd_return_tree (tree t) { return t; } /* Do nothing (return NULL_TREE). */ tree lhd_return_null_tree_v (void) { return NULL_TREE; } /* Do nothing (return NULL_TREE). */ tree lhd_return_null_tree (tree t ATTRIBUTE_UNUSED) { return NULL_TREE; } /* The default post options hook. */ bool lhd_post_options (const char **pfilename ATTRIBUTE_UNUSED) { return false; } /* Called from by print-tree.c. */ void lhd_print_tree_nothing (FILE *file ATTRIBUTE_UNUSED, tree node ATTRIBUTE_UNUSED, int indent ATTRIBUTE_UNUSED) { } /* Called from safe_from_p. */ int lhd_safe_from_p (rtx x ATTRIBUTE_UNUSED, tree exp ATTRIBUTE_UNUSED) { return 1; } /* Called from unsafe_for_reeval. */ int lhd_unsafe_for_reeval (tree t ATTRIBUTE_UNUSED) { return -1; } /* Called from staticp. */ int lhd_staticp (tree exp ATTRIBUTE_UNUSED) { return 0; } /* Called from check_global_declarations. */ bool lhd_warn_unused_global_decl (tree decl) { /* This is what used to exist in check_global_declarations. Probably not many of these actually apply to non-C languages. */ if (TREE_CODE (decl) == FUNCTION_DECL && DECL_INLINE (decl)) return false; if (TREE_CODE (decl) == VAR_DECL && TREE_READONLY (decl)) return false; if (DECL_IN_SYSTEM_HEADER (decl)) return false; return true; } /* Set the DECL_ASSEMBLER_NAME for DECL. */ void lhd_set_decl_assembler_name (tree decl) { /* The language-independent code should never use the DECL_ASSEMBLER_NAME for lots of DECLs. Only FUNCTION_DECLs and VAR_DECLs for variables with static storage duration need a real DECL_ASSEMBLER_NAME. */ if (TREE_CODE (decl) == FUNCTION_DECL || (TREE_CODE (decl) == VAR_DECL && (TREE_STATIC (decl) || DECL_EXTERNAL (decl) || TREE_PUBLIC (decl)))) { /* By default, assume the name to use in assembly code is the same as that used in the source language. (That's correct for C, and GCC used to set DECL_ASSEMBLER_NAME to the same value as DECL_NAME in build_decl, so this choice provides backwards compatibility with existing front-ends. Can't use just the variable's own name for a variable whose scope is less than the whole compilation. Concatenate a distinguishing number. If the decl is at block scope, the number assigned is the DECL_UID; if the decl is at file scope, the number is the DECL_UID of the surrounding TRANSLATION_UNIT_DECL, except for the T_U_D with UID 0. Those (the file-scope internal-linkage declarations from the first input file) get no suffix, which is consistent with what has historically been done for file-scope declarations with internal linkage. */ if (TREE_PUBLIC (decl) || DECL_CONTEXT (decl) == NULL_TREE || (TREE_CODE (DECL_CONTEXT (decl)) == TRANSLATION_UNIT_DECL && DECL_UID (DECL_CONTEXT (decl)) == 0)) SET_DECL_ASSEMBLER_NAME (decl, DECL_NAME (decl)); else { const char *name = IDENTIFIER_POINTER (DECL_NAME (decl)); char *label; unsigned int uid; if (TREE_CODE (DECL_CONTEXT (decl)) == TRANSLATION_UNIT_DECL) uid = DECL_UID (DECL_CONTEXT (decl)); else uid = DECL_UID (decl); ASM_FORMAT_PRIVATE_NAME (label, name, uid); SET_DECL_ASSEMBLER_NAME (decl, get_identifier (label)); } } else /* Nobody should ever be asking for the DECL_ASSEMBLER_NAME of these DECLs -- unless they're in language-dependent code, in which case set_decl_assembler_name hook should handle things. */ abort (); } /* By default we always allow bit-field based optimizations. */ bool lhd_can_use_bit_fields_p (void) { return true; } /* Provide a default routine to clear the binding stack. This is used by languages that don't need to do anything special. */ void lhd_clear_binding_stack (void) { while (! lang_hooks.decls.global_bindings_p ()) lang_hooks.decls.poplevel (0, 0, 0); } /* Type promotion for variable arguments. */ tree lhd_type_promotes_to (tree type ATTRIBUTE_UNUSED) { abort (); } /* Registration of machine- or os-specific builtin types. */ void lhd_register_builtin_type (tree type ATTRIBUTE_UNUSED, const char* name ATTRIBUTE_UNUSED) { } /* Invalid use of an incomplete type. */ void lhd_incomplete_type_error (tree value ATTRIBUTE_UNUSED, tree type) { if (TREE_CODE (type) == ERROR_MARK) return; abort (); } /* Provide a default routine for alias sets that always returns -1. This is used by languages that don't need to do anything special. */ HOST_WIDE_INT lhd_get_alias_set (tree t ATTRIBUTE_UNUSED) { return -1; } /* Provide a hook routine for alias sets that always returns 0. This is used by languages that haven't deal with alias sets yet. */ HOST_WIDE_INT hook_get_alias_set_0 (tree t ATTRIBUTE_UNUSED) { return 0; } /* This is the default expand_expr function. */ rtx lhd_expand_expr (tree t ATTRIBUTE_UNUSED, rtx r ATTRIBUTE_UNUSED, enum machine_mode mm ATTRIBUTE_UNUSED, int em ATTRIBUTE_UNUSED, rtx *a ATTRIBUTE_UNUSED) { abort (); } /* The default language-specific function for expanding a decl. After the language-independent cases are handled, this function will be called. If this function is not defined, it is assumed that declarations other than those for variables and labels do not require any RTL generation. */ int lhd_expand_decl (tree t ATTRIBUTE_UNUSED) { return 0; } /* This is the default decl_printable_name function. */ const char * lhd_decl_printable_name (tree decl, int verbosity ATTRIBUTE_UNUSED) { return IDENTIFIER_POINTER (DECL_NAME (decl)); } /* This compares two types for equivalence ("compatible" in C-based languages). This routine should only return 1 if it is sure. It should not be used in contexts where erroneously returning 0 causes problems. */ int lhd_types_compatible_p (tree x, tree y) { return TYPE_MAIN_VARIANT (x) == TYPE_MAIN_VARIANT (y); } /* lang_hooks.tree_inlining.walk_subtrees is called by walk_tree() after handling common cases, but before walking code-specific sub-trees. If this hook is overridden for a language, it should handle language-specific tree codes, as well as language-specific information associated to common tree codes. If a tree node is completely handled within this function, it should set *SUBTREES to 0, so that generic handling isn't attempted. For language-specific tree codes, generic handling would abort(), so make sure it is set properly. Both SUBTREES and *SUBTREES is guaranteed to be nonzero when the function is called. */ tree lhd_tree_inlining_walk_subtrees (tree *tp ATTRIBUTE_UNUSED, int *subtrees ATTRIBUTE_UNUSED, walk_tree_fn func ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED, void *htab ATTRIBUTE_UNUSED) { return NULL_TREE; } /* lang_hooks.tree_inlining.cannot_inline_tree_fn is called to determine whether there are language-specific reasons for not inlining a given function. */ int lhd_tree_inlining_cannot_inline_tree_fn (tree *fnp) { if (flag_really_no_inline && lookup_attribute ("always_inline", DECL_ATTRIBUTES (*fnp)) == NULL) return 1; return 0; } /* lang_hooks.tree_inlining.disregard_inline_limits is called to determine whether a function should be considered for inlining even if it would exceed inlining limits. */ int lhd_tree_inlining_disregard_inline_limits (tree fn) { if (lookup_attribute ("always_inline", DECL_ATTRIBUTES (fn)) != NULL) return 1; return 0; } /* lang_hooks.tree_inlining.add_pending_fn_decls is called before starting to inline a function, to push any language-specific functions that should not be inlined into the current function, into VAFNP. PFN is the top of varray, and should be returned if no functions are pushed into VAFNP. The top of the varray should be returned. */ tree lhd_tree_inlining_add_pending_fn_decls (void *vafnp ATTRIBUTE_UNUSED, tree pfn) { return pfn; } /* lang_hooks.tree_inlining.auto_var_in_fn_p is called to determine whether VT is an automatic variable defined in function FT. */ int lhd_tree_inlining_auto_var_in_fn_p (tree var, tree fn) { return (DECL_P (var) && DECL_CONTEXT (var) == fn && (((TREE_CODE (var) == VAR_DECL || TREE_CODE (var) == PARM_DECL) && ! TREE_STATIC (var)) || TREE_CODE (var) == LABEL_DECL || TREE_CODE (var) == RESULT_DECL)); } /* lang_hooks.tree_inlining.copy_res_decl_for_inlining should return a declaration for the result RES of function FN to be inlined into CALLER. NDP points to an integer that should be set in case a new declaration wasn't created (presumably because RES was of aggregate type, such that a TARGET_EXPR is used for the result). TEXPS is a pointer to a varray with the stack of TARGET_EXPRs seen while inlining functions into caller; the top of TEXPS is supposed to match RES. */ tree lhd_tree_inlining_copy_res_decl_for_inlining (tree res, tree fn, tree caller, void *dm ATTRIBUTE_UNUSED, int *ndp ATTRIBUTE_UNUSED, tree return_slot_addr ATTRIBUTE_UNUSED) { if (return_slot_addr) return build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (return_slot_addr)), return_slot_addr); else return copy_decl_for_inlining (res, fn, caller); } /* lang_hooks.tree_inlining.anon_aggr_type_p determines whether T is a type node representing an anonymous aggregate (union, struct, etc), i.e., one whose members are in the same scope as the union itself. */ int lhd_tree_inlining_anon_aggr_type_p (tree t ATTRIBUTE_UNUSED) { return 0; } /* lang_hooks.tree_inlining.start_inlining and end_inlining perform any language-specific bookkeeping necessary for processing FN. start_inlining returns nonzero if inlining should proceed, zero if not. For instance, the C++ version keeps track of template instantiations to avoid infinite recursion. */ int lhd_tree_inlining_start_inlining (tree fn ATTRIBUTE_UNUSED) { return 1; } void lhd_tree_inlining_end_inlining (tree fn ATTRIBUTE_UNUSED) { } /* lang_hooks.tree_inlining.convert_parm_for_inlining performs any language-specific conversion before assigning VALUE to PARM. */ tree lhd_tree_inlining_convert_parm_for_inlining (tree parm ATTRIBUTE_UNUSED, tree value, tree fndecl ATTRIBUTE_UNUSED, int argnum ATTRIBUTE_UNUSED) { return value; } /* lang_hooks.tree_dump.dump_tree: Dump language-specific parts of tree nodes. Returns nonzero if it does not want the usual dumping of the second argument. */ bool lhd_tree_dump_dump_tree (void *di ATTRIBUTE_UNUSED, tree t ATTRIBUTE_UNUSED) { return false; } /* lang_hooks.tree_dump.type_qual: Determine type qualifiers in a language-specific way. */ int lhd_tree_dump_type_quals (tree t) { return TYPE_QUALS (t); } /* lang_hooks.expr_size: Determine the size of the value of an expression T in a language-specific way. Returns a tree for the size in bytes. */ tree lhd_expr_size (tree exp) { if (TREE_CODE_CLASS (TREE_CODE (exp)) == 'd' && DECL_SIZE_UNIT (exp) != 0) return DECL_SIZE_UNIT (exp); else return size_in_bytes (TREE_TYPE (exp)); } /* lang_hooks.gimplify_expr re-writes *EXPR_P into GIMPLE form. */ int lhd_gimplify_expr (tree *expr_p ATTRIBUTE_UNUSED, tree *pre_p ATTRIBUTE_UNUSED, tree *post_p ATTRIBUTE_UNUSED) { return GS_UNHANDLED; } /* lang_hooks.tree_size: Determine the size of a tree with code C, which is a language-specific tree code in category 'x'. The default expects never to be called. */ size_t lhd_tree_size (enum tree_code c ATTRIBUTE_UNUSED) { abort (); return 0; } /* Return true if decl, which is a function decl, may be called by a sibcall. */ bool lhd_decl_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED) { return true; } /* lang_hooks.decls.final_write_globals: perform final processing on global variables. */ void write_global_declarations (void) { /* Really define vars that have had only a tentative definition. Really output inline functions that must actually be callable and have not been output so far. */ tree globals = lang_hooks.decls.getdecls (); int len = list_length (globals); tree *vec = xmalloc (sizeof (tree) * len); int i; tree decl; /* Process the decls in reverse order--earliest first. Put them into VEC from back to front, then take out from front. */ for (i = 0, decl = globals; i < len; i++, decl = TREE_CHAIN (decl)) vec[len - i - 1] = decl; wrapup_global_declarations (vec, len); check_global_declarations (vec, len); /* Clean up. */ free (vec); } /* Called to perform language-specific initialization of CTX. */ void lhd_initialize_diagnostics (struct diagnostic_context *ctx ATTRIBUTE_UNUSED) { } /* The default function to print out name of current function that caused an error. */ void lhd_print_error_function (diagnostic_context *context, const char *file) { if (diagnostic_last_function_changed (context)) { const char *old_prefix = context->printer->prefix; char *new_prefix = file ? file_name_as_prefix (file) : NULL; pp_set_prefix (context->printer, new_prefix); if (current_function_decl == NULL) pp_printf (context->printer, "At top level:"); else { if (TREE_CODE (TREE_TYPE (current_function_decl)) == METHOD_TYPE) pp_printf (context->printer, "In member function `%s':", lang_hooks.decl_printable_name (current_function_decl, 2)); else pp_printf (context->printer, "In function `%s':", lang_hooks.decl_printable_name (current_function_decl, 2)); } diagnostic_set_last_function (context); pp_flush (context->printer); context->printer->prefix = old_prefix; free ((char*) new_prefix); } } tree lhd_callgraph_analyze_expr (tree *tp ATTRIBUTE_UNUSED, int *walk_subtrees ATTRIBUTE_UNUSED, tree decl ATTRIBUTE_UNUSED) { return NULL; } tree lhd_make_node (enum tree_code code) { return make_node (code); } /* Generic partial redundancy elimination with lazy code motion support. Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* These routines are meant to be used by various optimization passes which can be modeled as lazy code motion problems. Including, but not limited to: * Traditional partial redundancy elimination. * Placement of caller/caller register save/restores. * Load/store motion. * Copy motion. * Conversion of flat register files to a stacked register model. * Dead load/store elimination. These routines accept as input: * Basic block information (number of blocks, lists of predecessors and successors). Note the granularity does not need to be basic block, they could be statements or functions. * Bitmaps of local properties (computed, transparent and anticipatable expressions). The output of these routines is bitmap of redundant computations and a bitmap of optimal placement points. */ /* We want target macros for the mode switching code to be able to refer to instruction attribute values. */ /* Edge based LCM routines. */ static void compute_antinout_edge (sbitmap *, sbitmap *, sbitmap *, sbitmap *); static void compute_earliest (struct edge_list *, int, sbitmap *, sbitmap *, sbitmap *, sbitmap *, sbitmap *); static void compute_laterin (struct edge_list *, sbitmap *, sbitmap *, sbitmap *, sbitmap *); static void compute_insert_delete (struct edge_list *edge_list, sbitmap *, sbitmap *, sbitmap *, sbitmap *, sbitmap *); /* Edge based LCM routines on a reverse flowgraph. */ static void compute_farthest (struct edge_list *, int, sbitmap *, sbitmap *, sbitmap*, sbitmap *, sbitmap *); static void compute_nearerout (struct edge_list *, sbitmap *, sbitmap *, sbitmap *, sbitmap *); static void compute_rev_insert_delete (struct edge_list *edge_list, sbitmap *, sbitmap *, sbitmap *, sbitmap *, sbitmap *); /* Edge based lcm routines. */ /* Compute expression anticipatability at entrance and exit of each block. This is done based on the flow graph, and not on the pred-succ lists. Other than that, its pretty much identical to compute_antinout. */ static void compute_antinout_edge (sbitmap *antloc, sbitmap *transp, sbitmap *antin, sbitmap *antout) { basic_block bb; edge e; basic_block *worklist, *qin, *qout, *qend; unsigned int qlen; /* Allocate a worklist array/queue. Entries are only added to the list if they were not already on the list. So the size is bounded by the number of basic blocks. */ qin = qout = worklist = xmalloc (sizeof (basic_block) * n_basic_blocks); /* We want a maximal solution, so make an optimistic initialization of ANTIN. */ sbitmap_vector_ones (antin, last_basic_block); /* Put every block on the worklist; this is necessary because of the optimistic initialization of ANTIN above. */ FOR_EACH_BB_REVERSE (bb) { *qin++ = bb; bb->aux = bb; } qin = worklist; qend = &worklist[n_basic_blocks]; qlen = n_basic_blocks; /* Mark blocks which are predecessors of the exit block so that we can easily identify them below. */ for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next) e->src->aux = EXIT_BLOCK_PTR; /* Iterate until the worklist is empty. */ while (qlen) { /* Take the first entry off the worklist. */ bb = *qout++; qlen--; if (qout >= qend) qout = worklist; if (bb->aux == EXIT_BLOCK_PTR) /* Do not clear the aux field for blocks which are predecessors of the EXIT block. That way we never add then to the worklist again. */ sbitmap_zero (antout[bb->index]); else { /* Clear the aux field of this block so that it can be added to the worklist again if necessary. */ bb->aux = NULL; sbitmap_intersection_of_succs (antout[bb->index], antin, bb->index); } if (sbitmap_a_or_b_and_c_cg (antin[bb->index], antloc[bb->index], transp[bb->index], antout[bb->index])) /* If the in state of this block changed, then we need to add the predecessors of this block to the worklist if they are not already on the worklist. */ for (e = bb->pred; e; e = e->pred_next) if (!e->src->aux && e->src != ENTRY_BLOCK_PTR) { *qin++ = e->src; e->src->aux = e; qlen++; if (qin >= qend) qin = worklist; } } clear_aux_for_edges (); clear_aux_for_blocks (); free (worklist); } /* Compute the earliest vector for edge based lcm. */ static void compute_earliest (struct edge_list *edge_list, int n_exprs, sbitmap *antin, sbitmap *antout, sbitmap *avout, sbitmap *kill, sbitmap *earliest) { sbitmap difference, temp_bitmap; int x, num_edges; basic_block pred, succ; num_edges = NUM_EDGES (edge_list); difference = sbitmap_alloc (n_exprs); temp_bitmap = sbitmap_alloc (n_exprs); for (x = 0; x < num_edges; x++) { pred = INDEX_EDGE_PRED_BB (edge_list, x); succ = INDEX_EDGE_SUCC_BB (edge_list, x); if (pred == ENTRY_BLOCK_PTR) sbitmap_copy (earliest[x], antin[succ->index]); else { if (succ == EXIT_BLOCK_PTR) sbitmap_zero (earliest[x]); else { sbitmap_difference (difference, antin[succ->index], avout[pred->index]); sbitmap_not (temp_bitmap, antout[pred->index]); sbitmap_a_and_b_or_c (earliest[x], difference, kill[pred->index], temp_bitmap); } } } sbitmap_free (temp_bitmap); sbitmap_free (difference); } /* later(p,s) is dependent on the calculation of laterin(p). laterin(p) is dependent on the calculation of later(p2,p). laterin(ENTRY) is defined as all 0's later(ENTRY, succs(ENTRY)) are defined using laterin(ENTRY) laterin(succs(ENTRY)) is defined by later(ENTRY, succs(ENTRY)). If we progress in this manner, starting with all basic blocks in the work list, anytime we change later(bb), we need to add succs(bb) to the worklist if they are not already on the worklist. Boundary conditions: We prime the worklist all the normal basic blocks. The ENTRY block can never be added to the worklist since it is never the successor of any block. We explicitly prevent the EXIT block from being added to the worklist. We optimistically initialize LATER. That is the only time this routine will compute LATER for an edge out of the entry block since the entry block is never on the worklist. Thus, LATERIN is neither used nor computed for the ENTRY block. Since the EXIT block is never added to the worklist, we will neither use nor compute LATERIN for the exit block. Edges which reach the EXIT block are handled in the normal fashion inside the loop. However, the insertion/deletion computation needs LATERIN(EXIT), so we have to compute it. */ static void compute_laterin (struct edge_list *edge_list, sbitmap *earliest, sbitmap *antloc, sbitmap *later, sbitmap *laterin) { int num_edges, i; edge e; basic_block *worklist, *qin, *qout, *qend, bb; unsigned int qlen; num_edges = NUM_EDGES (edge_list); /* Allocate a worklist array/queue. Entries are only added to the list if they were not already on the list. So the size is bounded by the number of basic blocks. */ qin = qout = worklist = xmalloc (sizeof (basic_block) * (n_basic_blocks + 1)); /* Initialize a mapping from each edge to its index. */ for (i = 0; i < num_edges; i++) INDEX_EDGE (edge_list, i)->aux = (void *) (size_t) i; /* We want a maximal solution, so initially consider LATER true for all edges. This allows propagation through a loop since the incoming loop edge will have LATER set, so if all the other incoming edges to the loop are set, then LATERIN will be set for the head of the loop. If the optimistic setting of LATER on that edge was incorrect (for example the expression is ANTLOC in a block within the loop) then this algorithm will detect it when we process the block at the head of the optimistic edge. That will requeue the affected blocks. */ sbitmap_vector_ones (later, num_edges); /* Note that even though we want an optimistic setting of LATER, we do not want to be overly optimistic. Consider an outgoing edge from the entry block. That edge should always have a LATER value the same as EARLIEST for that edge. */ for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) sbitmap_copy (later[(size_t) e->aux], earliest[(size_t) e->aux]); /* Add all the blocks to the worklist. This prevents an early exit from the loop given our optimistic initialization of LATER above. */ FOR_EACH_BB (bb) { *qin++ = bb; bb->aux = bb; } qin = worklist; /* Note that we do not use the last allocated element for our queue, as EXIT_BLOCK is never inserted into it. In fact the above allocation of n_basic_blocks + 1 elements is not necessary. */ qend = &worklist[n_basic_blocks]; qlen = n_basic_blocks; /* Iterate until the worklist is empty. */ while (qlen) { /* Take the first entry off the worklist. */ bb = *qout++; bb->aux = NULL; qlen--; if (qout >= qend) qout = worklist; /* Compute the intersection of LATERIN for each incoming edge to B. */ sbitmap_ones (laterin[bb->index]); for (e = bb->pred; e != NULL; e = e->pred_next) sbitmap_a_and_b (laterin[bb->index], laterin[bb->index], later[(size_t)e->aux]); /* Calculate LATER for all outgoing edges. */ for (e = bb->succ; e != NULL; e = e->succ_next) if (sbitmap_union_of_diff_cg (later[(size_t) e->aux], earliest[(size_t) e->aux], laterin[e->src->index], antloc[e->src->index]) /* If LATER for an outgoing edge was changed, then we need to add the target of the outgoing edge to the worklist. */ && e->dest != EXIT_BLOCK_PTR && e->dest->aux == 0) { *qin++ = e->dest; e->dest->aux = e; qlen++; if (qin >= qend) qin = worklist; } } /* Computation of insertion and deletion points requires computing LATERIN for the EXIT block. We allocated an extra entry in the LATERIN array for just this purpose. */ sbitmap_ones (laterin[last_basic_block]); for (e = EXIT_BLOCK_PTR->pred; e != NULL; e = e->pred_next) sbitmap_a_and_b (laterin[last_basic_block], laterin[last_basic_block], later[(size_t) e->aux]); clear_aux_for_edges (); free (worklist); } /* Compute the insertion and deletion points for edge based LCM. */ static void compute_insert_delete (struct edge_list *edge_list, sbitmap *antloc, sbitmap *later, sbitmap *laterin, sbitmap *insert, sbitmap *delete) { int x; basic_block bb; FOR_EACH_BB (bb) sbitmap_difference (delete[bb->index], antloc[bb->index], laterin[bb->index]); for (x = 0; x < NUM_EDGES (edge_list); x++) { basic_block b = INDEX_EDGE_SUCC_BB (edge_list, x); if (b == EXIT_BLOCK_PTR) sbitmap_difference (insert[x], later[x], laterin[last_basic_block]); else sbitmap_difference (insert[x], later[x], laterin[b->index]); } } /* Given local properties TRANSP, ANTLOC, AVOUT, KILL return the insert and delete vectors for edge based LCM. Returns an edgelist which is used to map the insert vector to what edge an expression should be inserted on. */ struct edge_list * pre_edge_lcm (FILE *file ATTRIBUTE_UNUSED, int n_exprs, sbitmap *transp, sbitmap *avloc, sbitmap *antloc, sbitmap *kill, sbitmap **insert, sbitmap **delete) { sbitmap *antin, *antout, *earliest; sbitmap *avin, *avout; sbitmap *later, *laterin; struct edge_list *edge_list; int num_edges; edge_list = create_edge_list (); num_edges = NUM_EDGES (edge_list); #ifdef LCM_DEBUG_INFO if (file) { fprintf (file, "Edge List:\n"); verify_edge_list (file, edge_list); print_edge_list (file, edge_list); dump_sbitmap_vector (file, "transp", "", transp, last_basic_block); dump_sbitmap_vector (file, "antloc", "", antloc, last_basic_block); dump_sbitmap_vector (file, "avloc", "", avloc, last_basic_block); dump_sbitmap_vector (file, "kill", "", kill, last_basic_block); } #endif /* Compute global availability. */ avin = sbitmap_vector_alloc (last_basic_block, n_exprs); avout = sbitmap_vector_alloc (last_basic_block, n_exprs); compute_available (avloc, kill, avout, avin); sbitmap_vector_free (avin); /* Compute global anticipatability. */ antin = sbitmap_vector_alloc (last_basic_block, n_exprs); antout = sbitmap_vector_alloc (last_basic_block, n_exprs); compute_antinout_edge (antloc, transp, antin, antout); #ifdef LCM_DEBUG_INFO if (file) { dump_sbitmap_vector (file, "antin", "", antin, last_basic_block); dump_sbitmap_vector (file, "antout", "", antout, last_basic_block); } #endif /* Compute earliestness. */ earliest = sbitmap_vector_alloc (num_edges, n_exprs); compute_earliest (edge_list, n_exprs, antin, antout, avout, kill, earliest); #ifdef LCM_DEBUG_INFO if (file) dump_sbitmap_vector (file, "earliest", "", earliest, num_edges); #endif sbitmap_vector_free (antout); sbitmap_vector_free (antin); sbitmap_vector_free (avout); later = sbitmap_vector_alloc (num_edges, n_exprs); /* Allocate an extra element for the exit block in the laterin vector. */ laterin = sbitmap_vector_alloc (last_basic_block + 1, n_exprs); compute_laterin (edge_list, earliest, antloc, later, laterin); #ifdef LCM_DEBUG_INFO if (file) { dump_sbitmap_vector (file, "laterin", "", laterin, last_basic_block + 1); dump_sbitmap_vector (file, "later", "", later, num_edges); } #endif sbitmap_vector_free (earliest); *insert = sbitmap_vector_alloc (num_edges, n_exprs); *delete = sbitmap_vector_alloc (last_basic_block, n_exprs); compute_insert_delete (edge_list, antloc, later, laterin, *insert, *delete); sbitmap_vector_free (laterin); sbitmap_vector_free (later); #ifdef LCM_DEBUG_INFO if (file) { dump_sbitmap_vector (file, "pre_insert_map", "", *insert, num_edges); dump_sbitmap_vector (file, "pre_delete_map", "", *delete, last_basic_block); } #endif return edge_list; } /* Compute the AVIN and AVOUT vectors from the AVLOC and KILL vectors. Return the number of passes we performed to iterate to a solution. */ void compute_available (sbitmap *avloc, sbitmap *kill, sbitmap *avout, sbitmap *avin) { edge e; basic_block *worklist, *qin, *qout, *qend, bb; unsigned int qlen; /* Allocate a worklist array/queue. Entries are only added to the list if they were not already on the list. So the size is bounded by the number of basic blocks. */ qin = qout = worklist = xmalloc (sizeof (basic_block) * n_basic_blocks); /* We want a maximal solution. */ sbitmap_vector_ones (avout, last_basic_block); /* Put every block on the worklist; this is necessary because of the optimistic initialization of AVOUT above. */ FOR_EACH_BB (bb) { *qin++ = bb; bb->aux = bb; } qin = worklist; qend = &worklist[n_basic_blocks]; qlen = n_basic_blocks; /* Mark blocks which are successors of the entry block so that we can easily identify them below. */ for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) e->dest->aux = ENTRY_BLOCK_PTR; /* Iterate until the worklist is empty. */ while (qlen) { /* Take the first entry off the worklist. */ bb = *qout++; qlen--; if (qout >= qend) qout = worklist; /* If one of the predecessor blocks is the ENTRY block, then the intersection of avouts is the null set. We can identify such blocks by the special value in the AUX field in the block structure. */ if (bb->aux == ENTRY_BLOCK_PTR) /* Do not clear the aux field for blocks which are successors of the ENTRY block. That way we never add then to the worklist again. */ sbitmap_zero (avin[bb->index]); else { /* Clear the aux field of this block so that it can be added to the worklist again if necessary. */ bb->aux = NULL; sbitmap_intersection_of_preds (avin[bb->index], avout, bb->index); } if (sbitmap_union_of_diff_cg (avout[bb->index], avloc[bb->index], avin[bb->index], kill[bb->index])) /* If the out state of this block changed, then we need to add the successors of this block to the worklist if they are not already on the worklist. */ for (e = bb->succ; e; e = e->succ_next) if (!e->dest->aux && e->dest != EXIT_BLOCK_PTR) { *qin++ = e->dest; e->dest->aux = e; qlen++; if (qin >= qend) qin = worklist; } } clear_aux_for_edges (); clear_aux_for_blocks (); free (worklist); } /* Compute the farthest vector for edge based lcm. */ static void compute_farthest (struct edge_list *edge_list, int n_exprs, sbitmap *st_avout, sbitmap *st_avin, sbitmap *st_antin, sbitmap *kill, sbitmap *farthest) { sbitmap difference, temp_bitmap; int x, num_edges; basic_block pred, succ; num_edges = NUM_EDGES (edge_list); difference = sbitmap_alloc (n_exprs); temp_bitmap = sbitmap_alloc (n_exprs); for (x = 0; x < num_edges; x++) { pred = INDEX_EDGE_PRED_BB (edge_list, x); succ = INDEX_EDGE_SUCC_BB (edge_list, x); if (succ == EXIT_BLOCK_PTR) sbitmap_copy (farthest[x], st_avout[pred->index]); else { if (pred == ENTRY_BLOCK_PTR) sbitmap_zero (farthest[x]); else { sbitmap_difference (difference, st_avout[pred->index], st_antin[succ->index]); sbitmap_not (temp_bitmap, st_avin[succ->index]); sbitmap_a_and_b_or_c (farthest[x], difference, kill[succ->index], temp_bitmap); } } } sbitmap_free (temp_bitmap); sbitmap_free (difference); } /* Compute nearer and nearerout vectors for edge based lcm. This is the mirror of compute_laterin, additional comments on the implementation can be found before compute_laterin. */ static void compute_nearerout (struct edge_list *edge_list, sbitmap *farthest, sbitmap *st_avloc, sbitmap *nearer, sbitmap *nearerout) { int num_edges, i; edge e; basic_block *worklist, *tos, bb; num_edges = NUM_EDGES (edge_list); /* Allocate a worklist array/queue. Entries are only added to the list if they were not already on the list. So the size is bounded by the number of basic blocks. */ tos = worklist = xmalloc (sizeof (basic_block) * (n_basic_blocks + 1)); /* Initialize NEARER for each edge and build a mapping from an edge to its index. */ for (i = 0; i < num_edges; i++) INDEX_EDGE (edge_list, i)->aux = (void *) (size_t) i; /* We want a maximal solution. */ sbitmap_vector_ones (nearer, num_edges); /* Note that even though we want an optimistic setting of NEARER, we do not want to be overly optimistic. Consider an incoming edge to the exit block. That edge should always have a NEARER value the same as FARTHEST for that edge. */ for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next) sbitmap_copy (nearer[(size_t)e->aux], farthest[(size_t)e->aux]); /* Add all the blocks to the worklist. This prevents an early exit from the loop given our optimistic initialization of NEARER. */ FOR_EACH_BB (bb) { *tos++ = bb; bb->aux = bb; } /* Iterate until the worklist is empty. */ while (tos != worklist) { /* Take the first entry off the worklist. */ bb = *--tos; bb->aux = NULL; /* Compute the intersection of NEARER for each outgoing edge from B. */ sbitmap_ones (nearerout[bb->index]); for (e = bb->succ; e != NULL; e = e->succ_next) sbitmap_a_and_b (nearerout[bb->index], nearerout[bb->index], nearer[(size_t) e->aux]); /* Calculate NEARER for all incoming edges. */ for (e = bb->pred; e != NULL; e = e->pred_next) if (sbitmap_union_of_diff_cg (nearer[(size_t) e->aux], farthest[(size_t) e->aux], nearerout[e->dest->index], st_avloc[e->dest->index]) /* If NEARER for an incoming edge was changed, then we need to add the source of the incoming edge to the worklist. */ && e->src != ENTRY_BLOCK_PTR && e->src->aux == 0) { *tos++ = e->src; e->src->aux = e; } } /* Computation of insertion and deletion points requires computing NEAREROUT for the ENTRY block. We allocated an extra entry in the NEAREROUT array for just this purpose. */ sbitmap_ones (nearerout[last_basic_block]); for (e = ENTRY_BLOCK_PTR->succ; e != NULL; e = e->succ_next) sbitmap_a_and_b (nearerout[last_basic_block], nearerout[last_basic_block], nearer[(size_t) e->aux]); clear_aux_for_edges (); free (tos); } /* Compute the insertion and deletion points for edge based LCM. */ static void compute_rev_insert_delete (struct edge_list *edge_list, sbitmap *st_avloc, sbitmap *nearer, sbitmap *nearerout, sbitmap *insert, sbitmap *delete) { int x; basic_block bb; FOR_EACH_BB (bb) sbitmap_difference (delete[bb->index], st_avloc[bb->index], nearerout[bb->index]); for (x = 0; x < NUM_EDGES (edge_list); x++) { basic_block b = INDEX_EDGE_PRED_BB (edge_list, x); if (b == ENTRY_BLOCK_PTR) sbitmap_difference (insert[x], nearer[x], nearerout[last_basic_block]); else sbitmap_difference (insert[x], nearer[x], nearerout[b->index]); } } /* Given local properties TRANSP, ST_AVLOC, ST_ANTLOC, KILL return the insert and delete vectors for edge based reverse LCM. Returns an edgelist which is used to map the insert vector to what edge an expression should be inserted on. */ struct edge_list * pre_edge_rev_lcm (FILE *file ATTRIBUTE_UNUSED, int n_exprs, sbitmap *transp, sbitmap *st_avloc, sbitmap *st_antloc, sbitmap *kill, sbitmap **insert, sbitmap **delete) { sbitmap *st_antin, *st_antout; sbitmap *st_avout, *st_avin, *farthest; sbitmap *nearer, *nearerout; struct edge_list *edge_list; int num_edges; edge_list = create_edge_list (); num_edges = NUM_EDGES (edge_list); st_antin = sbitmap_vector_alloc (last_basic_block, n_exprs); st_antout = sbitmap_vector_alloc (last_basic_block, n_exprs); sbitmap_vector_zero (st_antin, last_basic_block); sbitmap_vector_zero (st_antout, last_basic_block); compute_antinout_edge (st_antloc, transp, st_antin, st_antout); /* Compute global anticipatability. */ st_avout = sbitmap_vector_alloc (last_basic_block, n_exprs); st_avin = sbitmap_vector_alloc (last_basic_block, n_exprs); compute_available (st_avloc, kill, st_avout, st_avin); #ifdef LCM_DEBUG_INFO if (file) { fprintf (file, "Edge List:\n"); verify_edge_list (file, edge_list); print_edge_list (file, edge_list); dump_sbitmap_vector (file, "transp", "", transp, last_basic_block); dump_sbitmap_vector (file, "st_avloc", "", st_avloc, last_basic_block); dump_sbitmap_vector (file, "st_antloc", "", st_antloc, last_basic_block); dump_sbitmap_vector (file, "st_antin", "", st_antin, last_basic_block); dump_sbitmap_vector (file, "st_antout", "", st_antout, last_basic_block); dump_sbitmap_vector (file, "st_kill", "", kill, last_basic_block); } #endif #ifdef LCM_DEBUG_INFO if (file) { dump_sbitmap_vector (file, "st_avout", "", st_avout, last_basic_block); dump_sbitmap_vector (file, "st_avin", "", st_avin, last_basic_block); } #endif /* Compute farthestness. */ farthest = sbitmap_vector_alloc (num_edges, n_exprs); compute_farthest (edge_list, n_exprs, st_avout, st_avin, st_antin, kill, farthest); #ifdef LCM_DEBUG_INFO if (file) dump_sbitmap_vector (file, "farthest", "", farthest, num_edges); #endif sbitmap_vector_free (st_antin); sbitmap_vector_free (st_antout); sbitmap_vector_free (st_avin); sbitmap_vector_free (st_avout); nearer = sbitmap_vector_alloc (num_edges, n_exprs); /* Allocate an extra element for the entry block. */ nearerout = sbitmap_vector_alloc (last_basic_block + 1, n_exprs); compute_nearerout (edge_list, farthest, st_avloc, nearer, nearerout); #ifdef LCM_DEBUG_INFO if (file) { dump_sbitmap_vector (file, "nearerout", "", nearerout, last_basic_block + 1); dump_sbitmap_vector (file, "nearer", "", nearer, num_edges); } #endif sbitmap_vector_free (farthest); *insert = sbitmap_vector_alloc (num_edges, n_exprs); *delete = sbitmap_vector_alloc (last_basic_block, n_exprs); compute_rev_insert_delete (edge_list, st_avloc, nearer, nearerout, *insert, *delete); sbitmap_vector_free (nearerout); sbitmap_vector_free (nearer); #ifdef LCM_DEBUG_INFO if (file) { dump_sbitmap_vector (file, "pre_insert_map", "", *insert, num_edges); dump_sbitmap_vector (file, "pre_delete_map", "", *delete, last_basic_block); } #endif return edge_list; } /* Mode switching: The algorithm for setting the modes consists of scanning the insn list and finding all the insns which require a specific mode. Each insn gets a unique struct seginfo element. These structures are inserted into a list for each basic block. For each entity, there is an array of bb_info over the flow graph basic blocks (local var 'bb_info'), and contains a list of all insns within that basic block, in the order they are encountered. For each entity, any basic block WITHOUT any insns requiring a specific mode are given a single entry, without a mode. (Each basic block in the flow graph must have at least one entry in the segment table.) The LCM algorithm is then run over the flow graph to determine where to place the sets to the highest-priority value in respect of first the first insn in any one block. Any adjustments required to the transparency vectors are made, then the next iteration starts for the next-lower priority mode, till for each entity all modes are exhausted. More details are located in the code for optimize_mode_switching(). */ /* This structure contains the information for each insn which requires either single or double mode to be set. MODE is the mode this insn must be executed in. INSN_PTR is the insn to be executed (may be the note that marks the beginning of a basic block). BBNUM is the flow graph basic block this insn occurs in. NEXT is the next insn in the same basic block. */ struct seginfo { int mode; rtx insn_ptr; int bbnum; struct seginfo *next; HARD_REG_SET regs_live; }; struct bb_lcm_info { struct seginfo *seginfo; int computing; }; /* These bitmaps are used for the LCM algorithm. */ #ifdef OPTIMIZE_MODE_SWITCHING static sbitmap *antic; static sbitmap *transp; static sbitmap *comp; static sbitmap *delete; static sbitmap *insert; static struct seginfo * new_seginfo (int, rtx, int, HARD_REG_SET); static void add_seginfo (struct bb_lcm_info *, struct seginfo *); static void reg_dies_lcm (rtx, HARD_REG_SET); static void reg_becomes_live_lcm (rtx, rtx, void *); static void make_preds_opaque (basic_block, int); #endif #ifdef OPTIMIZE_MODE_SWITCHING /* This function will allocate a new BBINFO structure, initialized with the MODE, INSN, and basic block BB parameters. */ static struct seginfo * new_seginfo (int mode, rtx insn, int bb, HARD_REG_SET regs_live) { struct seginfo *ptr; ptr = xmalloc (sizeof (struct seginfo)); ptr->mode = mode; ptr->insn_ptr = insn; ptr->bbnum = bb; ptr->next = NULL; COPY_HARD_REG_SET (ptr->regs_live, regs_live); return ptr; } /* Add a seginfo element to the end of a list. HEAD is a pointer to the list beginning. INFO is the structure to be linked in. */ static void add_seginfo (struct bb_lcm_info *head, struct seginfo *info) { struct seginfo *ptr; if (head->seginfo == NULL) head->seginfo = info; else { ptr = head->seginfo; while (ptr->next != NULL) ptr = ptr->next; ptr->next = info; } } /* Make all predecessors of basic block B opaque, recursively, till we hit some that are already non-transparent, or an edge where aux is set; that denotes that a mode set is to be done on that edge. J is the bit number in the bitmaps that corresponds to the entity that we are currently handling mode-switching for. */ static void make_preds_opaque (basic_block b, int j) { edge e; for (e = b->pred; e; e = e->pred_next) { basic_block pb = e->src; if (e->aux || ! TEST_BIT (transp[pb->index], j)) continue; RESET_BIT (transp[pb->index], j); make_preds_opaque (pb, j); } } /* Record in LIVE that register REG died. */ static void reg_dies_lcm (rtx reg, HARD_REG_SET live) { int regno, nregs; if (!REG_P (reg)) return; regno = REGNO (reg); if (regno < FIRST_PSEUDO_REGISTER) for (nregs = hard_regno_nregs[regno][GET_MODE (reg)] - 1; nregs >= 0; nregs--) CLEAR_HARD_REG_BIT (live, regno + nregs); } /* Record in LIVE that register REG became live. This is called via note_stores. */ static void reg_becomes_live_lcm (rtx reg, rtx setter ATTRIBUTE_UNUSED, void *live) { int regno, nregs; if (GET_CODE (reg) == SUBREG) reg = SUBREG_REG (reg); if (!REG_P (reg)) return; regno = REGNO (reg); if (regno < FIRST_PSEUDO_REGISTER) for (nregs = hard_regno_nregs[regno][GET_MODE (reg)] - 1; nregs >= 0; nregs--) SET_HARD_REG_BIT (* (HARD_REG_SET *) live, regno + nregs); } /* Make sure if MODE_ENTRY is defined the MODE_EXIT is defined and vice versa. */ #if defined (MODE_ENTRY) != defined (MODE_EXIT) #error "Both MODE_ENTRY and MODE_EXIT must be defined" #endif /* Find all insns that need a particular mode setting, and insert the necessary mode switches. Return true if we did work. */ int optimize_mode_switching (FILE *file) { rtx insn; int e; basic_block bb; int need_commit = 0; sbitmap *kill; struct edge_list *edge_list; static const int num_modes[] = NUM_MODES_FOR_MODE_SWITCHING; #define N_ENTITIES ARRAY_SIZE (num_modes) int entity_map[N_ENTITIES]; struct bb_lcm_info *bb_info[N_ENTITIES]; int i, j; int n_entities; int max_num_modes = 0; bool emited = false; basic_block post_entry ATTRIBUTE_UNUSED, pre_exit ATTRIBUTE_UNUSED; clear_bb_flags (); for (e = N_ENTITIES - 1, n_entities = 0; e >= 0; e--) if (OPTIMIZE_MODE_SWITCHING (e)) { int entry_exit_extra = 0; /* Create the list of segments within each basic block. If NORMAL_MODE is defined, allow for two extra blocks split from the entry and exit block. */ #if defined (MODE_ENTRY) && defined (MODE_EXIT) entry_exit_extra = 2; #endif bb_info[n_entities] = xcalloc (last_basic_block + entry_exit_extra, sizeof **bb_info); entity_map[n_entities++] = e; if (num_modes[e] > max_num_modes) max_num_modes = num_modes[e]; } if (! n_entities) return 0; #if defined (MODE_ENTRY) && defined (MODE_EXIT) { /* Split the edge from the entry block and the fallthrough edge to the exit block, so that we can note that there NORMAL_MODE is supplied / required. */ edge eg; post_entry = split_edge (ENTRY_BLOCK_PTR->succ); /* The only non-call predecessor at this stage is a block with a fallthrough edge; there can be at most one, but there could be none at all, e.g. when exit is called. */ for (pre_exit = 0, eg = EXIT_BLOCK_PTR->pred; eg; eg = eg->pred_next) if (eg->flags & EDGE_FALLTHRU) { regset live_at_end = eg->src->global_live_at_end; if (pre_exit) abort (); pre_exit = split_edge (eg); COPY_REG_SET (pre_exit->global_live_at_start, live_at_end); COPY_REG_SET (pre_exit->global_live_at_end, live_at_end); } } #endif /* Create the bitmap vectors. */ antic = sbitmap_vector_alloc (last_basic_block, n_entities); transp = sbitmap_vector_alloc (last_basic_block, n_entities); comp = sbitmap_vector_alloc (last_basic_block, n_entities); sbitmap_vector_ones (transp, last_basic_block); for (j = n_entities - 1; j >= 0; j--) { int e = entity_map[j]; int no_mode = num_modes[e]; struct bb_lcm_info *info = bb_info[j]; /* Determine what the first use (if any) need for a mode of entity E is. This will be the mode that is anticipatable for this block. Also compute the initial transparency settings. */ FOR_EACH_BB (bb) { struct seginfo *ptr; int last_mode = no_mode; HARD_REG_SET live_now; REG_SET_TO_HARD_REG_SET (live_now, bb->global_live_at_start); for (insn = BB_HEAD (bb); insn != NULL && insn != NEXT_INSN (BB_END (bb)); insn = NEXT_INSN (insn)) { if (INSN_P (insn)) { int mode = MODE_NEEDED (e, insn); rtx link; if (mode != no_mode && mode != last_mode) { last_mode = mode; ptr = new_seginfo (mode, insn, bb->index, live_now); add_seginfo (info + bb->index, ptr); RESET_BIT (transp[bb->index], j); } #ifdef MODE_AFTER last_mode = MODE_AFTER (last_mode, insn); #endif /* Update LIVE_NOW. */ for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) if (REG_NOTE_KIND (link) == REG_DEAD) reg_dies_lcm (XEXP (link, 0), live_now); note_stores (PATTERN (insn), reg_becomes_live_lcm, &live_now); for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) if (REG_NOTE_KIND (link) == REG_UNUSED) reg_dies_lcm (XEXP (link, 0), live_now); } } info[bb->index].computing = last_mode; /* Check for blocks without ANY mode requirements. */ if (last_mode == no_mode) { ptr = new_seginfo (no_mode, BB_END (bb), bb->index, live_now); add_seginfo (info + bb->index, ptr); } } #if defined (MODE_ENTRY) && defined (MODE_EXIT) { int mode = MODE_ENTRY (e); if (mode != no_mode) { bb = post_entry; /* By always making this nontransparent, we save an extra check in make_preds_opaque. We also need this to avoid confusing pre_edge_lcm when antic is cleared but transp and comp are set. */ RESET_BIT (transp[bb->index], j); /* Insert a fake computing definition of MODE into entry blocks which compute no mode. This represents the mode on entry. */ info[bb->index].computing = mode; if (pre_exit) info[pre_exit->index].seginfo->mode = MODE_EXIT (e); } } #endif /* NORMAL_MODE */ } kill = sbitmap_vector_alloc (last_basic_block, n_entities); for (i = 0; i < max_num_modes; i++) { int current_mode[N_ENTITIES]; /* Set the anticipatable and computing arrays. */ sbitmap_vector_zero (antic, last_basic_block); sbitmap_vector_zero (comp, last_basic_block); for (j = n_entities - 1; j >= 0; j--) { int m = current_mode[j] = MODE_PRIORITY_TO_MODE (entity_map[j], i); struct bb_lcm_info *info = bb_info[j]; FOR_EACH_BB (bb) { if (info[bb->index].seginfo->mode == m) SET_BIT (antic[bb->index], j); if (info[bb->index].computing == m) SET_BIT (comp[bb->index], j); } } /* Calculate the optimal locations for the placement mode switches to modes with priority I. */ FOR_EACH_BB (bb) sbitmap_not (kill[bb->index], transp[bb->index]); edge_list = pre_edge_lcm (file, 1, transp, comp, antic, kill, &insert, &delete); for (j = n_entities - 1; j >= 0; j--) { /* Insert all mode sets that have been inserted by lcm. */ int no_mode = num_modes[entity_map[j]]; /* Wherever we have moved a mode setting upwards in the flow graph, the blocks between the new setting site and the now redundant computation ceases to be transparent for any lower-priority mode of the same entity. First set the aux field of each insertion site edge non-transparent, then propagate the new non-transparency from the redundant computation upwards till we hit an insertion site or an already non-transparent block. */ for (e = NUM_EDGES (edge_list) - 1; e >= 0; e--) { edge eg = INDEX_EDGE (edge_list, e); int mode; basic_block src_bb; HARD_REG_SET live_at_edge; rtx mode_set; eg->aux = 0; if (! TEST_BIT (insert[e], j)) continue; eg->aux = (void *)1; mode = current_mode[j]; src_bb = eg->src; REG_SET_TO_HARD_REG_SET (live_at_edge, src_bb->global_live_at_end); start_sequence (); EMIT_MODE_SET (entity_map[j], mode, live_at_edge); mode_set = get_insns (); end_sequence (); /* Do not bother to insert empty sequence. */ if (mode_set == NULL_RTX) continue; /* If this is an abnormal edge, we'll insert at the end of the previous block. */ if (eg->flags & EDGE_ABNORMAL) { emited = true; if (GET_CODE (BB_END (src_bb)) == JUMP_INSN) emit_insn_before (mode_set, BB_END (src_bb)); /* It doesn't make sense to switch to normal mode after a CALL_INSN, so we're going to abort if we find one. The cases in which a CALL_INSN may have an abnormal edge are sibcalls and EH edges. In the case of sibcalls, the dest basic-block is the EXIT_BLOCK, that runs in normal mode; it is assumed that a sibcall insn requires normal mode itself, so no mode switch would be required after the call (it wouldn't make sense, anyway). In the case of EH edges, EH entry points also start in normal mode, so a similar reasoning applies. */ else if (GET_CODE (BB_END (src_bb)) == INSN) emit_insn_after (mode_set, BB_END (src_bb)); else abort (); bb_info[j][src_bb->index].computing = mode; RESET_BIT (transp[src_bb->index], j); } else { need_commit = 1; insert_insn_on_edge (mode_set, eg); } } FOR_EACH_BB_REVERSE (bb) if (TEST_BIT (delete[bb->index], j)) { make_preds_opaque (bb, j); /* Cancel the 'deleted' mode set. */ bb_info[j][bb->index].seginfo->mode = no_mode; } } clear_aux_for_edges (); free_edge_list (edge_list); } /* Now output the remaining mode sets in all the segments. */ for (j = n_entities - 1; j >= 0; j--) { int no_mode = num_modes[entity_map[j]]; FOR_EACH_BB_REVERSE (bb) { struct seginfo *ptr, *next; for (ptr = bb_info[j][bb->index].seginfo; ptr; ptr = next) { next = ptr->next; if (ptr->mode != no_mode) { rtx mode_set; start_sequence (); EMIT_MODE_SET (entity_map[j], ptr->mode, ptr->regs_live); mode_set = get_insns (); end_sequence (); /* Do not bother to insert empty sequence. */ if (mode_set == NULL_RTX) continue; emited = true; if (GET_CODE (ptr->insn_ptr) == NOTE && (NOTE_LINE_NUMBER (ptr->insn_ptr) == NOTE_INSN_BASIC_BLOCK)) emit_insn_after (mode_set, ptr->insn_ptr); else emit_insn_before (mode_set, ptr->insn_ptr); } free (ptr); } } free (bb_info[j]); } /* Finished. Free up all the things we've allocated. */ sbitmap_vector_free (kill); sbitmap_vector_free (antic); sbitmap_vector_free (transp); sbitmap_vector_free (comp); sbitmap_vector_free (delete); sbitmap_vector_free (insert); if (need_commit) commit_edge_insertions (); #if defined (MODE_ENTRY) && defined (MODE_EXIT) cleanup_cfg (CLEANUP_NO_INSN_DEL); #else if (!need_commit && !emited) return 0; #endif max_regno = max_reg_num (); allocate_reg_info (max_regno, FALSE, FALSE); update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES, (PROP_DEATH_NOTES | PROP_KILL_DEAD_CODE | PROP_SCAN_DEAD_CODE)); return 1; } #endif /* OPTIMIZE_MODE_SWITCHING */ /* List management for the GCC expander. Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ static void free_list (rtx *, rtx *); /* Functions for maintaining cache-able lists of EXPR_LIST and INSN_LISTs. */ /* An INSN_LIST containing all INSN_LISTs allocated but currently unused. */ static GTY ((deletable)) rtx unused_insn_list; /* An EXPR_LIST containing all EXPR_LISTs allocated but currently unused. */ static GTY ((deletable)) rtx unused_expr_list; /* This function will free an entire list of either EXPR_LIST or INSN_LIST nodes. This is to be used only on lists that consist exclusively of nodes of one type only. This is only called by free_EXPR_LIST_list and free_INSN_LIST_list. */ static void free_list (rtx *listp, rtx *unused_listp) { rtx link, prev_link; prev_link = *listp; link = XEXP (prev_link, 1); while (link) { prev_link = link; link = XEXP (link, 1); } XEXP (prev_link, 1) = *unused_listp; *unused_listp = *listp; *listp = 0; } /* This call is used in place of a gen_rtx_INSN_LIST. If there is a cached node available, we'll use it, otherwise a call to gen_rtx_INSN_LIST is made. */ rtx alloc_INSN_LIST (rtx val, rtx next) { rtx r; if (unused_insn_list) { r = unused_insn_list; unused_insn_list = XEXP (r, 1); XEXP (r, 0) = val; XEXP (r, 1) = next; PUT_REG_NOTE_KIND (r, VOIDmode); } else r = gen_rtx_INSN_LIST (VOIDmode, val, next); return r; } /* This call is used in place of a gen_rtx_EXPR_LIST. If there is a cached node available, we'll use it, otherwise a call to gen_rtx_EXPR_LIST is made. */ rtx alloc_EXPR_LIST (int kind, rtx val, rtx next) { rtx r; if (unused_expr_list) { r = unused_expr_list; unused_expr_list = XEXP (r, 1); XEXP (r, 0) = val; XEXP (r, 1) = next; PUT_REG_NOTE_KIND (r, kind); } else r = gen_rtx_EXPR_LIST (kind, val, next); return r; } /* This function will free up an entire list of EXPR_LIST nodes. */ void free_EXPR_LIST_list (rtx *listp) { if (*listp == 0) return; free_list (listp, &unused_expr_list); } /* This function will free up an entire list of INSN_LIST nodes. */ void free_INSN_LIST_list (rtx *listp) { if (*listp == 0) return; free_list (listp, &unused_insn_list); } /* This function will free up an individual EXPR_LIST node. */ void free_EXPR_LIST_node (rtx ptr) { XEXP (ptr, 1) = unused_expr_list; unused_expr_list = ptr; } /* This function will free up an individual INSN_LIST node. */ void free_INSN_LIST_node (rtx ptr) { XEXP (ptr, 1) = unused_insn_list; unused_insn_list = ptr; } /* Type information for lists.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ /* GC roots. */ const struct ggc_root_tab gt_ggc_rd_gt_lists_h[] = { { &unused_expr_list, 1, sizeof (unused_expr_list), NULL, NULL }, { &unused_insn_list, 1, sizeof (unused_insn_list), NULL, NULL }, LAST_GGC_ROOT_TAB }; /* Allocate registers within a basic block, for GNU compiler. Copyright (C) 1987, 1988, 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Allocation of hard register numbers to pseudo registers is done in two passes. In this pass we consider only regs that are born and die once within one basic block. We do this one basic block at a time. Then the next pass allocates the registers that remain. Two passes are used because this pass uses methods that work only on linear code, but that do a better job than the general methods used in global_alloc, and more quickly too. The assignments made are recorded in the vector reg_renumber whose space is allocated here. The rtl code itself is not altered. We assign each instruction in the basic block a number which is its order from the beginning of the block. Then we can represent the lifetime of a pseudo register with a pair of numbers, and check for conflicts easily. We can record the availability of hard registers with a HARD_REG_SET for each instruction. The HARD_REG_SET contains 0 or 1 for each hard reg. To avoid register shuffling, we tie registers together when one dies by being copied into another, or dies in an instruction that does arithmetic to produce another. The tied registers are allocated as one. Registers with different reg class preferences can never be tied unless the class preferred by one is a subclass of the one preferred by the other. Tying is represented with "quantity numbers". A non-tied register is given a new quantity number. Tied registers have the same quantity number. We have provision to exempt registers, even when they are contained within the block, that can be tied to others that are not contained in it. This is so that global_alloc could process them both and tie them then. But this is currently disabled since tying in global_alloc is not yet implemented. */ /* Pseudos allocated here can be reallocated by global.c if the hard register is used as a spill register. Currently we don't allocate such pseudos here if their preferred class is likely to be used by spills. */ /* Next quantity number available for allocation. */ static int next_qty; /* Information we maintain about each quantity. */ struct qty { /* The number of refs to quantity Q. */ int n_refs; /* The frequency of uses of quantity Q. */ int freq; /* Insn number (counting from head of basic block) where quantity Q was born. -1 if birth has not been recorded. */ int birth; /* Insn number (counting from head of basic block) where given quantity died. Due to the way tying is done, and the fact that we consider in this pass only regs that die but once, a quantity can die only once. Each quantity's life span is a set of consecutive insns. -1 if death has not been recorded. */ int death; /* Number of words needed to hold the data in given quantity. This depends on its machine mode. It is used for these purposes: 1. It is used in computing the relative importance of qtys, which determines the order in which we look for regs for them. 2. It is used in rules that prevent tying several registers of different sizes in a way that is geometrically impossible (see combine_regs). */ int size; /* Number of times a reg tied to given qty lives across a CALL_INSN. */ int n_calls_crossed; /* The register number of one pseudo register whose reg_qty value is Q. This register should be the head of the chain maintained in reg_next_in_qty. */ int first_reg; /* Reg class contained in (smaller than) the preferred classes of all the pseudo regs that are tied in given quantity. This is the preferred class for allocating that quantity. */ enum reg_class min_class; /* Register class within which we allocate given qty if we can't get its preferred class. */ enum reg_class alternate_class; /* This holds the mode of the registers that are tied to given qty, or VOIDmode if registers with differing modes are tied together. */ enum machine_mode mode; /* the hard reg number chosen for given quantity, or -1 if none was found. */ short phys_reg; }; static struct qty *qty; /* These fields are kept separately to speedup their clearing. */ /* We maintain two hard register sets that indicate suggested hard registers for each quantity. The first, phys_copy_sugg, contains hard registers that are tied to the quantity by a simple copy. The second contains all hard registers that are tied to the quantity via an arithmetic operation. The former register set is given priority for allocation. This tends to eliminate copy insns. */ /* Element Q is a set of hard registers that are suggested for quantity Q by copy insns. */ static HARD_REG_SET *qty_phys_copy_sugg; /* Element Q is a set of hard registers that are suggested for quantity Q by arithmetic insns. */ static HARD_REG_SET *qty_phys_sugg; /* Element Q is the number of suggested registers in qty_phys_copy_sugg. */ static short *qty_phys_num_copy_sugg; /* Element Q is the number of suggested registers in qty_phys_sugg. */ static short *qty_phys_num_sugg; /* If (REG N) has been assigned a quantity number, is a register number of another register assigned the same quantity number, or -1 for the end of the chain. qty->first_reg point to the head of this chain. */ static int *reg_next_in_qty; /* reg_qty[N] (where N is a pseudo reg number) is the qty number of that reg if it is >= 0, of -1 if this register cannot be allocated by local-alloc, or -2 if not known yet. Note that if we see a use or death of pseudo register N with reg_qty[N] == -2, register N must be local to the current block. If it were used in more than one block, we would have reg_qty[N] == -1. This relies on the fact that if reg_basic_block[N] is >= 0, register N will not appear in any other block. We save a considerable number of tests by exploiting this. If N is < FIRST_PSEUDO_REGISTER, reg_qty[N] is undefined and should not be referenced. */ static int *reg_qty; /* The offset (in words) of register N within its quantity. This can be nonzero if register N is SImode, and has been tied to a subreg of a DImode register. */ static char *reg_offset_local; /* Vector of substitutions of register numbers, used to map pseudo regs into hardware regs. This is set up as a result of register allocation. Element N is the hard reg assigned to pseudo reg N, or is -1 if no hard reg was assigned. If N is a hard reg number, element N is N. */ short *reg_renumber; /* Set of hard registers live at the current point in the scan of the instructions in a basic block. */ static HARD_REG_SET regs_live; /* Each set of hard registers indicates registers live at a particular point in the basic block. For N even, regs_live_at[N] says which hard registers are needed *after* insn N/2 (i.e., they may not conflict with the outputs of insn N/2 or the inputs of insn N/2 + 1. If an object is to conflict with the inputs of insn J but not the outputs of insn J + 1, we say it is born at index J*2 - 1. Similarly, if it is to conflict with the outputs of insn J but not the inputs of insn J + 1, it is said to die at index J*2 + 1. */ static HARD_REG_SET *regs_live_at; /* Communicate local vars `insn_number' and `insn' from `block_alloc' to `reg_is_set', `wipe_dead_reg', and `alloc_qty'. */ static int this_insn_number; static rtx this_insn; struct equivalence { /* Set when an attempt should be made to replace a register with the associated src_p entry. */ char replace; /* Set when a REG_EQUIV note is found or created. Use to keep track of what memory accesses might be created later, e.g. by reload. */ rtx replacement; rtx *src_p; /* Loop depth is used to recognize equivalences which appear to be present within the same loop (or in an inner loop). */ int loop_depth; /* The list of each instruction which initializes this register. */ rtx init_insns; }; /* reg_equiv[N] (where N is a pseudo reg number) is the equivalence structure for that register. */ static struct equivalence *reg_equiv; /* Nonzero if we recorded an equivalence for a LABEL_REF. */ static int recorded_label_ref; static void alloc_qty (int, enum machine_mode, int, int); static void validate_equiv_mem_from_store (rtx, rtx, void *); static int validate_equiv_mem (rtx, rtx, rtx); static int equiv_init_varies_p (rtx); static int equiv_init_movable_p (rtx, int); static int contains_replace_regs (rtx); static int memref_referenced_p (rtx, rtx); static int memref_used_between_p (rtx, rtx, rtx); static void update_equiv_regs (void); static void no_equiv (rtx, rtx, void *); static void block_alloc (int); static int qty_sugg_compare (int, int); static int qty_sugg_compare_1 (const void *, const void *); static int qty_compare (int, int); static int qty_compare_1 (const void *, const void *); static int combine_regs (rtx, rtx, int, int, rtx, int); static int reg_meets_class_p (int, enum reg_class); static void update_qty_class (int, int); static void reg_is_set (rtx, rtx, void *); static void reg_is_born (rtx, int); static void wipe_dead_reg (rtx, int); static int find_free_reg (enum reg_class, enum machine_mode, int, int, int, int, int); static void mark_life (int, enum machine_mode, int); static void post_mark_life (int, enum machine_mode, int, int, int); static int no_conflict_p (rtx, rtx, rtx); static int requires_inout (const char *); /* Allocate a new quantity (new within current basic block) for register number REGNO which is born at index BIRTH within the block. MODE and SIZE are info on reg REGNO. */ static void alloc_qty (int regno, enum machine_mode mode, int size, int birth) { int qtyno = next_qty++; reg_qty[regno] = qtyno; reg_offset_local[regno] = 0; reg_next_in_qty[regno] = -1; qty[qtyno].first_reg = regno; qty[qtyno].size = size; qty[qtyno].mode = mode; qty[qtyno].birth = birth; qty[qtyno].n_calls_crossed = REG_N_CALLS_CROSSED (regno); qty[qtyno].min_class = reg_preferred_class (regno); qty[qtyno].alternate_class = reg_alternate_class (regno); qty[qtyno].n_refs = REG_N_REFS (regno); qty[qtyno].freq = REG_FREQ (regno); } /* Main entry point of this file. */ int local_alloc (void) { int i; int max_qty; basic_block b; /* We need to keep track of whether or not we recorded a LABEL_REF so that we know if the jump optimizer needs to be rerun. */ recorded_label_ref = 0; /* Leaf functions and non-leaf functions have different needs. If defined, let the machine say what kind of ordering we should use. */ #ifdef ORDER_REGS_FOR_LOCAL_ALLOC ORDER_REGS_FOR_LOCAL_ALLOC; #endif /* Promote REG_EQUAL notes to REG_EQUIV notes and adjust status of affected registers. */ if (optimize) update_equiv_regs (); /* This sets the maximum number of quantities we can have. Quantity numbers start at zero and we can have one for each pseudo. */ max_qty = (max_regno - FIRST_PSEUDO_REGISTER); /* Allocate vectors of temporary data. See the declarations of these variables, above, for what they mean. */ qty = xmalloc (max_qty * sizeof (struct qty)); qty_phys_copy_sugg = xmalloc (max_qty * sizeof (HARD_REG_SET)); qty_phys_num_copy_sugg = xmalloc (max_qty * sizeof (short)); qty_phys_sugg = xmalloc (max_qty * sizeof (HARD_REG_SET)); qty_phys_num_sugg = xmalloc (max_qty * sizeof (short)); reg_qty = xmalloc (max_regno * sizeof (int)); reg_offset_local = xmalloc (max_regno * sizeof (char)); reg_next_in_qty = xmalloc (max_regno * sizeof (int)); /* Determine which pseudo-registers can be allocated by local-alloc. In general, these are the registers used only in a single block and which only die once. We need not be concerned with which block actually uses the register since we will never see it outside that block. */ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++) { if (REG_BASIC_BLOCK (i) >= 0 && REG_N_DEATHS (i) == 1) reg_qty[i] = -2; else reg_qty[i] = -1; } /* Force loop below to initialize entire quantity array. */ next_qty = max_qty; /* Allocate each block's local registers, block by block. */ FOR_EACH_BB (b) { /* NEXT_QTY indicates which elements of the `qty_...' vectors might need to be initialized because they were used for the previous block; it is set to the entire array before block 0. Initialize those, with explicit loop if there are few, else with bzero and bcopy. Do not initialize vectors that are explicit set by `alloc_qty'. */ if (next_qty < 6) { for (i = 0; i < next_qty; i++) { CLEAR_HARD_REG_SET (qty_phys_copy_sugg[i]); qty_phys_num_copy_sugg[i] = 0; CLEAR_HARD_REG_SET (qty_phys_sugg[i]); qty_phys_num_sugg[i] = 0; } } else { #define CLEAR(vector) \ memset ((vector), 0, (sizeof (*(vector))) * next_qty); CLEAR (qty_phys_copy_sugg); CLEAR (qty_phys_num_copy_sugg); CLEAR (qty_phys_sugg); CLEAR (qty_phys_num_sugg); } next_qty = 0; block_alloc (b->index); } free (qty); free (qty_phys_copy_sugg); free (qty_phys_num_copy_sugg); free (qty_phys_sugg); free (qty_phys_num_sugg); free (reg_qty); free (reg_offset_local); free (reg_next_in_qty); return recorded_label_ref; } /* Used for communication between the following two functions: contains a MEM that we wish to ensure remains unchanged. */ static rtx equiv_mem; /* Set nonzero if EQUIV_MEM is modified. */ static int equiv_mem_modified; /* If EQUIV_MEM is modified by modifying DEST, indicate that it is modified. Called via note_stores. */ static void validate_equiv_mem_from_store (rtx dest, rtx set ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED) { if ((REG_P (dest) && reg_overlap_mentioned_p (dest, equiv_mem)) || (MEM_P (dest) && true_dependence (dest, VOIDmode, equiv_mem, rtx_varies_p))) equiv_mem_modified = 1; } /* Verify that no store between START and the death of REG invalidates MEMREF. MEMREF is invalidated by modifying a register used in MEMREF, by storing into an overlapping memory location, or with a non-const CALL_INSN. Return 1 if MEMREF remains valid. */ static int validate_equiv_mem (rtx start, rtx reg, rtx memref) { rtx insn; rtx note; equiv_mem = memref; equiv_mem_modified = 0; /* If the memory reference has side effects or is volatile, it isn't a valid equivalence. */ if (side_effects_p (memref)) return 0; for (insn = start; insn && ! equiv_mem_modified; insn = NEXT_INSN (insn)) { if (! INSN_P (insn)) continue; if (find_reg_note (insn, REG_DEAD, reg)) return 1; if (GET_CODE (insn) == CALL_INSN && ! RTX_UNCHANGING_P (memref) && ! CONST_OR_PURE_CALL_P (insn)) return 0; note_stores (PATTERN (insn), validate_equiv_mem_from_store, NULL); /* If a register mentioned in MEMREF is modified via an auto-increment, we lose the equivalence. Do the same if one dies; although we could extend the life, it doesn't seem worth the trouble. */ for (note = REG_NOTES (insn); note; note = XEXP (note, 1)) if ((REG_NOTE_KIND (note) == REG_INC || REG_NOTE_KIND (note) == REG_DEAD) && REG_P (XEXP (note, 0)) && reg_overlap_mentioned_p (XEXP (note, 0), memref)) return 0; } return 0; } /* Returns zero if X is known to be invariant. */ static int equiv_init_varies_p (rtx x) { RTX_CODE code = GET_CODE (x); int i; const char *fmt; switch (code) { case MEM: return ! RTX_UNCHANGING_P (x) || equiv_init_varies_p (XEXP (x, 0)); case QUEUED: return 1; case CONST: case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case SYMBOL_REF: case LABEL_REF: return 0; case REG: return reg_equiv[REGNO (x)].replace == 0 && rtx_varies_p (x, 0); case ASM_OPERANDS: if (MEM_VOLATILE_P (x)) return 1; /* Fall through. */ default: break; } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) if (fmt[i] == 'e') { if (equiv_init_varies_p (XEXP (x, i))) return 1; } else if (fmt[i] == 'E') { int j; for (j = 0; j < XVECLEN (x, i); j++) if (equiv_init_varies_p (XVECEXP (x, i, j))) return 1; } return 0; } /* Returns nonzero if X (used to initialize register REGNO) is movable. X is only movable if the registers it uses have equivalent initializations which appear to be within the same loop (or in an inner loop) and movable or if they are not candidates for local_alloc and don't vary. */ static int equiv_init_movable_p (rtx x, int regno) { int i, j; const char *fmt; enum rtx_code code = GET_CODE (x); switch (code) { case SET: return equiv_init_movable_p (SET_SRC (x), regno); case CC0: case CLOBBER: return 0; case PRE_INC: case PRE_DEC: case POST_INC: case POST_DEC: case PRE_MODIFY: case POST_MODIFY: return 0; case REG: return (reg_equiv[REGNO (x)].loop_depth >= reg_equiv[regno].loop_depth && reg_equiv[REGNO (x)].replace) || (REG_BASIC_BLOCK (REGNO (x)) < 0 && ! rtx_varies_p (x, 0)); case UNSPEC_VOLATILE: return 0; case ASM_OPERANDS: if (MEM_VOLATILE_P (x)) return 0; /* Fall through. */ default: break; } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) switch (fmt[i]) { case 'e': if (! equiv_init_movable_p (XEXP (x, i), regno)) return 0; break; case 'E': for (j = XVECLEN (x, i) - 1; j >= 0; j--) if (! equiv_init_movable_p (XVECEXP (x, i, j), regno)) return 0; break; } return 1; } /* TRUE if X uses any registers for which reg_equiv[REGNO].replace is true. */ static int contains_replace_regs (rtx x) { int i, j; const char *fmt; enum rtx_code code = GET_CODE (x); switch (code) { case CONST_INT: case CONST: case LABEL_REF: case SYMBOL_REF: case CONST_DOUBLE: case CONST_VECTOR: case PC: case CC0: case HIGH: return 0; case REG: return reg_equiv[REGNO (x)].replace; default: break; } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) switch (fmt[i]) { case 'e': if (contains_replace_regs (XEXP (x, i))) return 1; break; case 'E': for (j = XVECLEN (x, i) - 1; j >= 0; j--) if (contains_replace_regs (XVECEXP (x, i, j))) return 1; break; } return 0; } /* TRUE if X references a memory location that would be affected by a store to MEMREF. */ static int memref_referenced_p (rtx memref, rtx x) { int i, j; const char *fmt; enum rtx_code code = GET_CODE (x); switch (code) { case CONST_INT: case CONST: case LABEL_REF: case SYMBOL_REF: case CONST_DOUBLE: case CONST_VECTOR: case PC: case CC0: case HIGH: case LO_SUM: return 0; case REG: return (reg_equiv[REGNO (x)].replacement && memref_referenced_p (memref, reg_equiv[REGNO (x)].replacement)); case MEM: if (true_dependence (memref, VOIDmode, x, rtx_varies_p)) return 1; break; case SET: /* If we are setting a MEM, it doesn't count (its address does), but any other SET_DEST that has a MEM in it is referencing the MEM. */ if (MEM_P (SET_DEST (x))) { if (memref_referenced_p (memref, XEXP (SET_DEST (x), 0))) return 1; } else if (memref_referenced_p (memref, SET_DEST (x))) return 1; return memref_referenced_p (memref, SET_SRC (x)); default: break; } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) switch (fmt[i]) { case 'e': if (memref_referenced_p (memref, XEXP (x, i))) return 1; break; case 'E': for (j = XVECLEN (x, i) - 1; j >= 0; j--) if (memref_referenced_p (memref, XVECEXP (x, i, j))) return 1; break; } return 0; } /* TRUE if some insn in the range (START, END] references a memory location that would be affected by a store to MEMREF. */ static int memref_used_between_p (rtx memref, rtx start, rtx end) { rtx insn; for (insn = NEXT_INSN (start); insn != NEXT_INSN (end); insn = NEXT_INSN (insn)) if (INSN_P (insn) && memref_referenced_p (memref, PATTERN (insn))) return 1; return 0; } /* Find registers that are equivalent to a single value throughout the compilation (either because they can be referenced in memory or are set once from a single constant). Lower their priority for a register. If such a register is only referenced once, try substituting its value into the using insn. If it succeeds, we can eliminate the register completely. */ static void update_equiv_regs (void) { rtx insn; basic_block bb; int loop_depth; regset_head cleared_regs; int clear_regnos = 0; reg_equiv = xcalloc (max_regno, sizeof *reg_equiv); INIT_REG_SET (&cleared_regs); init_alias_analysis (); /* Scan the insns and find which registers have equivalences. Do this in a separate scan of the insns because (due to -fcse-follow-jumps) a register can be set below its use. */ FOR_EACH_BB (bb) { loop_depth = bb->loop_depth; for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = NEXT_INSN (insn)) { rtx note; rtx set; rtx dest, src; int regno; if (! INSN_P (insn)) continue; for (note = REG_NOTES (insn); note; note = XEXP (note, 1)) if (REG_NOTE_KIND (note) == REG_INC) no_equiv (XEXP (note, 0), note, NULL); set = single_set (insn); /* If this insn contains more (or less) than a single SET, only mark all destinations as having no known equivalence. */ if (set == 0) { note_stores (PATTERN (insn), no_equiv, NULL); continue; } else if (GET_CODE (PATTERN (insn)) == PARALLEL) { int i; for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--) { rtx part = XVECEXP (PATTERN (insn), 0, i); if (part != set) note_stores (part, no_equiv, NULL); } } dest = SET_DEST (set); src = SET_SRC (set); /* If this sets a MEM to the contents of a REG that is only used in a single basic block, see if the register is always equivalent to that memory location and if moving the store from INSN to the insn that set REG is safe. If so, put a REG_EQUIV note on the initializing insn. Don't add a REG_EQUIV note if the insn already has one. The existing REG_EQUIV is likely more useful than the one we are adding. If one of the regs in the address has reg_equiv[REGNO].replace set, then we can't add this REG_EQUIV note. The reg_equiv[REGNO].replace optimization may move the set of this register immediately before insn, which puts it after reg_equiv[REGNO].init_insns, and hence the mention in the REG_EQUIV note would be to an uninitialized pseudo. */ /* ????? This test isn't good enough; we might see a MEM with a use of a pseudo register before we see its setting insn that will cause reg_equiv[].replace for that pseudo to be set. Equivalences to MEMs should be made in another pass, after the reg_equiv[].replace information has been gathered. */ if (MEM_P (dest) && REG_P (src) && (regno = REGNO (src)) >= FIRST_PSEUDO_REGISTER && REG_BASIC_BLOCK (regno) >= 0 && REG_N_SETS (regno) == 1 && reg_equiv[regno].init_insns != 0 && reg_equiv[regno].init_insns != const0_rtx && ! find_reg_note (XEXP (reg_equiv[regno].init_insns, 0), REG_EQUIV, NULL_RTX) && ! contains_replace_regs (XEXP (dest, 0))) { rtx init_insn = XEXP (reg_equiv[regno].init_insns, 0); if (validate_equiv_mem (init_insn, src, dest) && ! memref_used_between_p (dest, init_insn, insn)) REG_NOTES (init_insn) = gen_rtx_EXPR_LIST (REG_EQUIV, dest, REG_NOTES (init_insn)); } /* We only handle the case of a pseudo register being set once, or always to the same value. */ /* ??? The mn10200 port breaks if we add equivalences for values that need an ADDRESS_REGS register and set them equivalent to a MEM of a pseudo. The actual problem is in the over-conservative handling of INPADDR_ADDRESS / INPUT_ADDRESS / INPUT triples in calculate_needs, but we traditionally work around this problem here by rejecting equivalences when the destination is in a register that's likely spilled. This is fragile, of course, since the preferred class of a pseudo depends on all instructions that set or use it. */ if (!REG_P (dest) || (regno = REGNO (dest)) < FIRST_PSEUDO_REGISTER || reg_equiv[regno].init_insns == const0_rtx || (CLASS_LIKELY_SPILLED_P (reg_preferred_class (regno)) && MEM_P (src))) { /* This might be setting a SUBREG of a pseudo, a pseudo that is also set somewhere else to a constant. */ note_stores (set, no_equiv, NULL); continue; } note = find_reg_note (insn, REG_EQUAL, NULL_RTX); /* cse sometimes generates function invariants, but doesn't put a REG_EQUAL note on the insn. Since this note would be redundant, there's no point creating it earlier than here. */ if (! note && ! rtx_varies_p (src, 0)) note = set_unique_reg_note (insn, REG_EQUAL, src); /* Don't bother considering a REG_EQUAL note containing an EXPR_LIST since it represents a function call */ if (note && GET_CODE (XEXP (note, 0)) == EXPR_LIST) note = NULL_RTX; if (REG_N_SETS (regno) != 1 && (! note || rtx_varies_p (XEXP (note, 0), 0) || (reg_equiv[regno].replacement && ! rtx_equal_p (XEXP (note, 0), reg_equiv[regno].replacement)))) { no_equiv (dest, set, NULL); continue; } /* Record this insn as initializing this register. */ reg_equiv[regno].init_insns = gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv[regno].init_insns); /* If this register is known to be equal to a constant, record that it is always equivalent to the constant. */ if (note && ! rtx_varies_p (XEXP (note, 0), 0)) PUT_MODE (note, (enum machine_mode) REG_EQUIV); /* If this insn introduces a "constant" register, decrease the priority of that register. Record this insn if the register is only used once more and the equivalence value is the same as our source. The latter condition is checked for two reasons: First, it is an indication that it may be more efficient to actually emit the insn as written (if no registers are available, reload will substitute the equivalence). Secondly, it avoids problems with any registers dying in this insn whose death notes would be missed. If we don't have a REG_EQUIV note, see if this insn is loading a register used only in one basic block from a MEM. If so, and the MEM remains unchanged for the life of the register, add a REG_EQUIV note. */ note = find_reg_note (insn, REG_EQUIV, NULL_RTX); if (note == 0 && REG_BASIC_BLOCK (regno) >= 0 && MEM_P (SET_SRC (set)) && validate_equiv_mem (insn, dest, SET_SRC (set))) REG_NOTES (insn) = note = gen_rtx_EXPR_LIST (REG_EQUIV, SET_SRC (set), REG_NOTES (insn)); if (note) { int regno = REGNO (dest); /* Record whether or not we created a REG_EQUIV note for a LABEL_REF. We might end up substituting the LABEL_REF for uses of the pseudo here or later. That kind of transformation may turn an indirect jump into a direct jump, in which case we must rerun the jump optimizer to ensure that the JUMP_LABEL fields are valid. */ if (GET_CODE (XEXP (note, 0)) == LABEL_REF || (GET_CODE (XEXP (note, 0)) == CONST && GET_CODE (XEXP (XEXP (note, 0), 0)) == PLUS && (GET_CODE (XEXP (XEXP (XEXP (note, 0), 0), 0)) == LABEL_REF))) recorded_label_ref = 1; reg_equiv[regno].replacement = XEXP (note, 0); reg_equiv[regno].src_p = &SET_SRC (set); reg_equiv[regno].loop_depth = loop_depth; /* Don't mess with things live during setjmp. */ if (REG_LIVE_LENGTH (regno) >= 0 && optimize) { /* Note that the statement below does not affect the priority in local-alloc! */ REG_LIVE_LENGTH (regno) *= 2; /* If the register is referenced exactly twice, meaning it is set once and used once, indicate that the reference may be replaced by the equivalence we computed above. Do this even if the register is only used in one block so that dependencies can be handled where the last register is used in a different block (i.e. HIGH / LO_SUM sequences) and to reduce the number of registers alive across calls. */ if (REG_N_REFS (regno) == 2 && (rtx_equal_p (XEXP (note, 0), src) || ! equiv_init_varies_p (src)) && GET_CODE (insn) == INSN && equiv_init_movable_p (PATTERN (insn), regno)) reg_equiv[regno].replace = 1; } } } } /* Now scan all regs killed in an insn to see if any of them are registers only used that once. If so, see if we can replace the reference with the equivalent from. If we can, delete the initializing reference and this register will go away. If we can't replace the reference, and the initializing reference is within the same loop (or in an inner loop), then move the register initialization just before the use, so that they are in the same basic block. */ FOR_EACH_BB_REVERSE (bb) { loop_depth = bb->loop_depth; for (insn = BB_END (bb); insn != PREV_INSN (BB_HEAD (bb)); insn = PREV_INSN (insn)) { rtx link; if (! INSN_P (insn)) continue; for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) { if (REG_NOTE_KIND (link) == REG_DEAD /* Make sure this insn still refers to the register. */ && reg_mentioned_p (XEXP (link, 0), PATTERN (insn))) { int regno = REGNO (XEXP (link, 0)); rtx equiv_insn; if (! reg_equiv[regno].replace || reg_equiv[regno].loop_depth < loop_depth) continue; /* reg_equiv[REGNO].replace gets set only when REG_N_REFS[REGNO] is 2, i.e. the register is set once and used once. (If it were only set, but not used, flow would have deleted the setting insns.) Hence there can only be one insn in reg_equiv[REGNO].init_insns. */ if (reg_equiv[regno].init_insns == NULL_RTX || XEXP (reg_equiv[regno].init_insns, 1) != NULL_RTX) abort (); equiv_insn = XEXP (reg_equiv[regno].init_insns, 0); /* We may not move instructions that can throw, since that changes basic block boundaries and we are not prepared to adjust the CFG to match. */ if (can_throw_internal (equiv_insn)) continue; if (asm_noperands (PATTERN (equiv_insn)) < 0 && validate_replace_rtx (regno_reg_rtx[regno], *(reg_equiv[regno].src_p), insn)) { rtx equiv_link; rtx last_link; rtx note; /* Find the last note. */ for (last_link = link; XEXP (last_link, 1); last_link = XEXP (last_link, 1)) ; /* Append the REG_DEAD notes from equiv_insn. */ equiv_link = REG_NOTES (equiv_insn); while (equiv_link) { note = equiv_link; equiv_link = XEXP (equiv_link, 1); if (REG_NOTE_KIND (note) == REG_DEAD) { remove_note (equiv_insn, note); XEXP (last_link, 1) = note; XEXP (note, 1) = NULL_RTX; last_link = note; } } remove_death (regno, insn); REG_N_REFS (regno) = 0; REG_FREQ (regno) = 0; delete_insn (equiv_insn); reg_equiv[regno].init_insns = XEXP (reg_equiv[regno].init_insns, 1); } /* Move the initialization of the register to just before INSN. Update the flow information. */ else if (PREV_INSN (insn) != equiv_insn) { rtx new_insn; new_insn = emit_insn_before (PATTERN (equiv_insn), insn); REG_NOTES (new_insn) = REG_NOTES (equiv_insn); REG_NOTES (equiv_insn) = 0; /* Make sure this insn is recognized before reload begins, otherwise eliminate_regs_in_insn will abort. */ INSN_CODE (new_insn) = INSN_CODE (equiv_insn); delete_insn (equiv_insn); XEXP (reg_equiv[regno].init_insns, 0) = new_insn; REG_BASIC_BLOCK (regno) = bb->index; REG_N_CALLS_CROSSED (regno) = 0; REG_LIVE_LENGTH (regno) = 2; if (insn == BB_HEAD (bb)) BB_HEAD (bb) = PREV_INSN (insn); /* Remember to clear REGNO from all basic block's live info. */ SET_REGNO_REG_SET (&cleared_regs, regno); clear_regnos++; } } } } } /* Clear all dead REGNOs from all basic block's live info. */ if (clear_regnos) { int j; if (clear_regnos > 8) { FOR_EACH_BB (bb) { AND_COMPL_REG_SET (bb->global_live_at_start, &cleared_regs); AND_COMPL_REG_SET (bb->global_live_at_end, &cleared_regs); } } else EXECUTE_IF_SET_IN_REG_SET (&cleared_regs, 0, j, { FOR_EACH_BB (bb) { CLEAR_REGNO_REG_SET (bb->global_live_at_start, j); CLEAR_REGNO_REG_SET (bb->global_live_at_end, j); } }); } /* Clean up. */ end_alias_analysis (); CLEAR_REG_SET (&cleared_regs); free (reg_equiv); } /* Mark REG as having no known equivalence. Some instructions might have been processed before and furnished with REG_EQUIV notes for this register; these notes will have to be removed. STORE is the piece of RTL that does the non-constant / conflicting assignment - a SET, CLOBBER or REG_INC note. It is currently not used, but needs to be there because this function is called from note_stores. */ static void no_equiv (rtx reg, rtx store ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED) { int regno; rtx list; if (!REG_P (reg)) return; regno = REGNO (reg); list = reg_equiv[regno].init_insns; if (list == const0_rtx) return; for (; list; list = XEXP (list, 1)) { rtx insn = XEXP (list, 0); remove_note (insn, find_reg_note (insn, REG_EQUIV, NULL_RTX)); } reg_equiv[regno].init_insns = const0_rtx; reg_equiv[regno].replacement = NULL_RTX; } /* Allocate hard regs to the pseudo regs used only within block number B. Only the pseudos that die but once can be handled. */ static void block_alloc (int b) { int i, q; rtx insn; rtx note, hard_reg; int insn_number = 0; int insn_count = 0; int max_uid = get_max_uid (); int *qty_order; int no_conflict_combined_regno = -1; /* Count the instructions in the basic block. */ insn = BB_END (BASIC_BLOCK (b)); while (1) { if (GET_CODE (insn) != NOTE) if (++insn_count > max_uid) abort (); if (insn == BB_HEAD (BASIC_BLOCK (b))) break; insn = PREV_INSN (insn); } /* +2 to leave room for a post_mark_life at the last insn and for the birth of a CLOBBER in the first insn. */ regs_live_at = xcalloc ((2 * insn_count + 2), sizeof (HARD_REG_SET)); /* Initialize table of hardware registers currently live. */ REG_SET_TO_HARD_REG_SET (regs_live, BASIC_BLOCK (b)->global_live_at_start); /* This loop scans the instructions of the basic block and assigns quantities to registers. It computes which registers to tie. */ insn = BB_HEAD (BASIC_BLOCK (b)); while (1) { if (GET_CODE (insn) != NOTE) insn_number++; if (INSN_P (insn)) { rtx link, set; int win = 0; rtx r0, r1 = NULL_RTX; int combined_regno = -1; int i; this_insn_number = insn_number; this_insn = insn; extract_insn (insn); which_alternative = -1; /* Is this insn suitable for tying two registers? If so, try doing that. Suitable insns are those with at least two operands and where operand 0 is an output that is a register that is not earlyclobber. We can tie operand 0 with some operand that dies in this insn. First look for operands that are required to be in the same register as operand 0. If we find such, only try tying that operand or one that can be put into that operand if the operation is commutative. If we don't find an operand that is required to be in the same register as operand 0, we can tie with any operand. Subregs in place of regs are also ok. If tying is done, WIN is set nonzero. */ if (optimize && recog_data.n_operands > 1 && recog_data.constraints[0][0] == '=' && recog_data.constraints[0][1] != '&') { /* If non-negative, is an operand that must match operand 0. */ int must_match_0 = -1; /* Counts number of alternatives that require a match with operand 0. */ int n_matching_alts = 0; for (i = 1; i < recog_data.n_operands; i++) { const char *p = recog_data.constraints[i]; int this_match = requires_inout (p); n_matching_alts += this_match; if (this_match == recog_data.n_alternatives) must_match_0 = i; } r0 = recog_data.operand[0]; for (i = 1; i < recog_data.n_operands; i++) { /* Skip this operand if we found an operand that must match operand 0 and this operand isn't it and can't be made to be it by commutativity. */ if (must_match_0 >= 0 && i != must_match_0 && ! (i == must_match_0 + 1 && recog_data.constraints[i-1][0] == '%') && ! (i == must_match_0 - 1 && recog_data.constraints[i][0] == '%')) continue; /* Likewise if each alternative has some operand that must match operand zero. In that case, skip any operand that doesn't list operand 0 since we know that the operand always conflicts with operand 0. We ignore commutativity in this case to keep things simple. */ if (n_matching_alts == recog_data.n_alternatives && 0 == requires_inout (recog_data.constraints[i])) continue; r1 = recog_data.operand[i]; /* If the operand is an address, find a register in it. There may be more than one register, but we only try one of them. */ if (recog_data.constraints[i][0] == 'p' || EXTRA_ADDRESS_CONSTRAINT (recog_data.constraints[i][0], recog_data.constraints[i])) while (GET_CODE (r1) == PLUS || GET_CODE (r1) == MULT) r1 = XEXP (r1, 0); /* Avoid making a call-saved register unnecessarily clobbered. */ hard_reg = get_hard_reg_initial_reg (cfun, r1); if (hard_reg != NULL_RTX) { if (REG_P (hard_reg) && IN_RANGE (REGNO (hard_reg), 0, FIRST_PSEUDO_REGISTER - 1) && ! call_used_regs[REGNO (hard_reg)]) continue; } if (REG_P (r0) || GET_CODE (r0) == SUBREG) { /* We have two priorities for hard register preferences. If we have a move insn or an insn whose first input can only be in the same register as the output, give priority to an equivalence found from that insn. */ int may_save_copy = (r1 == recog_data.operand[i] && must_match_0 >= 0); if (REG_P (r1) || GET_CODE (r1) == SUBREG) win = combine_regs (r1, r0, may_save_copy, insn_number, insn, 0); } if (win) break; } } /* Recognize an insn sequence with an ultimate result which can safely overlap one of the inputs. The sequence begins with a CLOBBER of its result, and ends with an insn that copies the result to itself and has a REG_EQUAL note for an equivalent formula. That note indicates what the inputs are. The result and the input can overlap if each insn in the sequence either doesn't mention the input or has a REG_NO_CONFLICT note to inhibit the conflict. We do the combining test at the CLOBBER so that the destination register won't have had a quantity number assigned, since that would prevent combining. */ if (optimize && GET_CODE (PATTERN (insn)) == CLOBBER && (r0 = XEXP (PATTERN (insn), 0), REG_P (r0)) && (link = find_reg_note (insn, REG_LIBCALL, NULL_RTX)) != 0 && XEXP (link, 0) != 0 && GET_CODE (XEXP (link, 0)) == INSN && (set = single_set (XEXP (link, 0))) != 0 && SET_DEST (set) == r0 && SET_SRC (set) == r0 && (note = find_reg_note (XEXP (link, 0), REG_EQUAL, NULL_RTX)) != 0) { if (r1 = XEXP (note, 0), REG_P (r1) /* Check that we have such a sequence. */ && no_conflict_p (insn, r0, r1)) win = combine_regs (r1, r0, 1, insn_number, insn, 1); else if (GET_RTX_FORMAT (GET_CODE (XEXP (note, 0)))[0] == 'e' && (r1 = XEXP (XEXP (note, 0), 0), REG_P (r1) || GET_CODE (r1) == SUBREG) && no_conflict_p (insn, r0, r1)) win = combine_regs (r1, r0, 0, insn_number, insn, 1); /* Here we care if the operation to be computed is commutative. */ else if (COMMUTATIVE_P (XEXP (note, 0)) && (r1 = XEXP (XEXP (note, 0), 1), (REG_P (r1) || GET_CODE (r1) == SUBREG)) && no_conflict_p (insn, r0, r1)) win = combine_regs (r1, r0, 0, insn_number, insn, 1); /* If we did combine something, show the register number in question so that we know to ignore its death. */ if (win) no_conflict_combined_regno = REGNO (r1); } /* If registers were just tied, set COMBINED_REGNO to the number of the register used in this insn that was tied to the register set in this insn. This register's qty should not be "killed". */ if (win) { while (GET_CODE (r1) == SUBREG) r1 = SUBREG_REG (r1); combined_regno = REGNO (r1); } /* Mark the death of everything that dies in this instruction, except for anything that was just combined. */ for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) if (REG_NOTE_KIND (link) == REG_DEAD && REG_P (XEXP (link, 0)) && combined_regno != (int) REGNO (XEXP (link, 0)) && (no_conflict_combined_regno != (int) REGNO (XEXP (link, 0)) || ! find_reg_note (insn, REG_NO_CONFLICT, XEXP (link, 0)))) wipe_dead_reg (XEXP (link, 0), 0); /* Allocate qty numbers for all registers local to this block that are born (set) in this instruction. A pseudo that already has a qty is not changed. */ note_stores (PATTERN (insn), reg_is_set, NULL); /* If anything is set in this insn and then unused, mark it as dying after this insn, so it will conflict with our outputs. This can't match with something that combined, and it doesn't matter if it did. Do this after the calls to reg_is_set since these die after, not during, the current insn. */ for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) if (REG_NOTE_KIND (link) == REG_UNUSED && REG_P (XEXP (link, 0))) wipe_dead_reg (XEXP (link, 0), 1); /* If this is an insn that has a REG_RETVAL note pointing at a CLOBBER insn, we have reached the end of a REG_NO_CONFLICT block, so clear any register number that combined within it. */ if ((note = find_reg_note (insn, REG_RETVAL, NULL_RTX)) != 0 && GET_CODE (XEXP (note, 0)) == INSN && GET_CODE (PATTERN (XEXP (note, 0))) == CLOBBER) no_conflict_combined_regno = -1; } /* Set the registers live after INSN_NUMBER. Note that we never record the registers live before the block's first insn, since no pseudos we care about are live before that insn. */ IOR_HARD_REG_SET (regs_live_at[2 * insn_number], regs_live); IOR_HARD_REG_SET (regs_live_at[2 * insn_number + 1], regs_live); if (insn == BB_END (BASIC_BLOCK (b))) break; insn = NEXT_INSN (insn); } /* Now every register that is local to this basic block should have been given a quantity, or else -1 meaning ignore it. Every quantity should have a known birth and death. Order the qtys so we assign them registers in order of the number of suggested registers they need so we allocate those with the most restrictive needs first. */ qty_order = xmalloc (next_qty * sizeof (int)); for (i = 0; i < next_qty; i++) qty_order[i] = i; #define EXCHANGE(I1, I2) \ { i = qty_order[I1]; qty_order[I1] = qty_order[I2]; qty_order[I2] = i; } switch (next_qty) { case 3: /* Make qty_order[2] be the one to allocate last. */ if (qty_sugg_compare (0, 1) > 0) EXCHANGE (0, 1); if (qty_sugg_compare (1, 2) > 0) EXCHANGE (2, 1); /* ... Fall through ... */ case 2: /* Put the best one to allocate in qty_order[0]. */ if (qty_sugg_compare (0, 1) > 0) EXCHANGE (0, 1); /* ... Fall through ... */ case 1: case 0: /* Nothing to do here. */ break; default: qsort (qty_order, next_qty, sizeof (int), qty_sugg_compare_1); } /* Try to put each quantity in a suggested physical register, if it has one. This may cause registers to be allocated that otherwise wouldn't be, but this seems acceptable in local allocation (unlike global allocation). */ for (i = 0; i < next_qty; i++) { q = qty_order[i]; if (qty_phys_num_sugg[q] != 0 || qty_phys_num_copy_sugg[q] != 0) qty[q].phys_reg = find_free_reg (qty[q].min_class, qty[q].mode, q, 0, 1, qty[q].birth, qty[q].death); else qty[q].phys_reg = -1; } /* Order the qtys so we assign them registers in order of decreasing length of life. Normally call qsort, but if we have only a very small number of quantities, sort them ourselves. */ for (i = 0; i < next_qty; i++) qty_order[i] = i; #define EXCHANGE(I1, I2) \ { i = qty_order[I1]; qty_order[I1] = qty_order[I2]; qty_order[I2] = i; } switch (next_qty) { case 3: /* Make qty_order[2] be the one to allocate last. */ if (qty_compare (0, 1) > 0) EXCHANGE (0, 1); if (qty_compare (1, 2) > 0) EXCHANGE (2, 1); /* ... Fall through ... */ case 2: /* Put the best one to allocate in qty_order[0]. */ if (qty_compare (0, 1) > 0) EXCHANGE (0, 1); /* ... Fall through ... */ case 1: case 0: /* Nothing to do here. */ break; default: qsort (qty_order, next_qty, sizeof (int), qty_compare_1); } /* Now for each qty that is not a hardware register, look for a hardware register to put it in. First try the register class that is cheapest for this qty, if there is more than one class. */ for (i = 0; i < next_qty; i++) { q = qty_order[i]; if (qty[q].phys_reg < 0) { #ifdef INSN_SCHEDULING /* These values represent the adjusted lifetime of a qty so that it conflicts with qtys which appear near the start/end of this qty's lifetime. The purpose behind extending the lifetime of this qty is to discourage the register allocator from creating false dependencies. The adjustment value is chosen to indicate that this qty conflicts with all the qtys in the instructions immediately before and after the lifetime of this qty. Experiments have shown that higher values tend to hurt overall code performance. If allocation using the extended lifetime fails we will try again with the qty's unadjusted lifetime. */ int fake_birth = MAX (0, qty[q].birth - 2 + qty[q].birth % 2); int fake_death = MIN (insn_number * 2 + 1, qty[q].death + 2 - qty[q].death % 2); #endif if (N_REG_CLASSES > 1) { #ifdef INSN_SCHEDULING /* We try to avoid using hard registers allocated to qtys which are born immediately after this qty or die immediately before this qty. This optimization is only appropriate when we will run a scheduling pass after reload and we are not optimizing for code size. */ if (flag_schedule_insns_after_reload && !optimize_size && !SMALL_REGISTER_CLASSES) { qty[q].phys_reg = find_free_reg (qty[q].min_class, qty[q].mode, q, 0, 0, fake_birth, fake_death); if (qty[q].phys_reg >= 0) continue; } #endif qty[q].phys_reg = find_free_reg (qty[q].min_class, qty[q].mode, q, 0, 0, qty[q].birth, qty[q].death); if (qty[q].phys_reg >= 0) continue; } #ifdef INSN_SCHEDULING /* Similarly, avoid false dependencies. */ if (flag_schedule_insns_after_reload && !optimize_size && !SMALL_REGISTER_CLASSES && qty[q].alternate_class != NO_REGS) qty[q].phys_reg = find_free_reg (qty[q].alternate_class, qty[q].mode, q, 0, 0, fake_birth, fake_death); #endif if (qty[q].alternate_class != NO_REGS) qty[q].phys_reg = find_free_reg (qty[q].alternate_class, qty[q].mode, q, 0, 0, qty[q].birth, qty[q].death); } } /* Now propagate the register assignments to the pseudo regs belonging to the qtys. */ for (q = 0; q < next_qty; q++) if (qty[q].phys_reg >= 0) { for (i = qty[q].first_reg; i >= 0; i = reg_next_in_qty[i]) reg_renumber[i] = qty[q].phys_reg + reg_offset_local[i]; } /* Clean up. */ free (regs_live_at); free (qty_order); } /* Compare two quantities' priority for getting real registers. We give shorter-lived quantities higher priority. Quantities with more references are also preferred, as are quantities that require multiple registers. This is the identical prioritization as done by global-alloc. We used to give preference to registers with *longer* lives, but using the same algorithm in both local- and global-alloc can speed up execution of some programs by as much as a factor of three! */ /* Note that the quotient will never be bigger than the value of floor_log2 times the maximum number of times a register can occur in one insn (surely less than 100) weighted by frequency (max REG_FREQ_MAX). Multiplying this by 10000/REG_FREQ_MAX can't overflow. QTY_CMP_PRI is also used by qty_sugg_compare. */ #define QTY_CMP_PRI(q) \ ((int) (((double) (floor_log2 (qty[q].n_refs) * qty[q].freq * qty[q].size) \ / (qty[q].death - qty[q].birth)) * (10000 / REG_FREQ_MAX))) static int qty_compare (int q1, int q2) { return QTY_CMP_PRI (q2) - QTY_CMP_PRI (q1); } static int qty_compare_1 (const void *q1p, const void *q2p) { int q1 = *(const int *) q1p, q2 = *(const int *) q2p; int tem = QTY_CMP_PRI (q2) - QTY_CMP_PRI (q1); if (tem != 0) return tem; /* If qtys are equally good, sort by qty number, so that the results of qsort leave nothing to chance. */ return q1 - q2; } /* Compare two quantities' priority for getting real registers. This version is called for quantities that have suggested hard registers. First priority goes to quantities that have copy preferences, then to those that have normal preferences. Within those groups, quantities with the lower number of preferences have the highest priority. Of those, we use the same algorithm as above. */ #define QTY_CMP_SUGG(q) \ (qty_phys_num_copy_sugg[q] \ ? qty_phys_num_copy_sugg[q] \ : qty_phys_num_sugg[q] * FIRST_PSEUDO_REGISTER) static int qty_sugg_compare (int q1, int q2) { int tem = QTY_CMP_SUGG (q1) - QTY_CMP_SUGG (q2); if (tem != 0) return tem; return QTY_CMP_PRI (q2) - QTY_CMP_PRI (q1); } static int qty_sugg_compare_1 (const void *q1p, const void *q2p) { int q1 = *(const int *) q1p, q2 = *(const int *) q2p; int tem = QTY_CMP_SUGG (q1) - QTY_CMP_SUGG (q2); if (tem != 0) return tem; tem = QTY_CMP_PRI (q2) - QTY_CMP_PRI (q1); if (tem != 0) return tem; /* If qtys are equally good, sort by qty number, so that the results of qsort leave nothing to chance. */ return q1 - q2; } #undef QTY_CMP_SUGG #undef QTY_CMP_PRI /* Attempt to combine the two registers (rtx's) USEDREG and SETREG. Returns 1 if have done so, or 0 if cannot. Combining registers means marking them as having the same quantity and adjusting the offsets within the quantity if either of them is a SUBREG. We don't actually combine a hard reg with a pseudo; instead we just record the hard reg as the suggestion for the pseudo's quantity. If we really combined them, we could lose if the pseudo lives across an insn that clobbers the hard reg (eg, movstr). ALREADY_DEAD is nonzero if USEDREG is known to be dead even though there is no REG_DEAD note on INSN. This occurs during the processing of REG_NO_CONFLICT blocks. MAY_SAVE_COPY is nonzero if this insn is simply copying USEDREG to SETREG or if the input and output must share a register. In that case, we record a hard reg suggestion in QTY_PHYS_COPY_SUGG. There are elaborate checks for the validity of combining. */ static int combine_regs (rtx usedreg, rtx setreg, int may_save_copy, int insn_number, rtx insn, int already_dead) { int ureg, sreg; int offset = 0; int usize, ssize; int sqty; /* Determine the numbers and sizes of registers being used. If a subreg is present that does not change the entire register, don't consider this a copy insn. */ while (GET_CODE (usedreg) == SUBREG) { rtx subreg = SUBREG_REG (usedreg); if (REG_P (subreg)) { if (GET_MODE_SIZE (GET_MODE (subreg)) > UNITS_PER_WORD) may_save_copy = 0; if (REGNO (subreg) < FIRST_PSEUDO_REGISTER) offset += subreg_regno_offset (REGNO (subreg), GET_MODE (subreg), SUBREG_BYTE (usedreg), GET_MODE (usedreg)); else offset += (SUBREG_BYTE (usedreg) / REGMODE_NATURAL_SIZE (GET_MODE (usedreg))); } usedreg = subreg; } if (!REG_P (usedreg)) return 0; ureg = REGNO (usedreg); if (ureg < FIRST_PSEUDO_REGISTER) usize = hard_regno_nregs[ureg][GET_MODE (usedreg)]; else usize = ((GET_MODE_SIZE (GET_MODE (usedreg)) + (REGMODE_NATURAL_SIZE (GET_MODE (usedreg)) - 1)) / REGMODE_NATURAL_SIZE (GET_MODE (usedreg))); while (GET_CODE (setreg) == SUBREG) { rtx subreg = SUBREG_REG (setreg); if (REG_P (subreg)) { if (GET_MODE_SIZE (GET_MODE (subreg)) > UNITS_PER_WORD) may_save_copy = 0; if (REGNO (subreg) < FIRST_PSEUDO_REGISTER) offset -= subreg_regno_offset (REGNO (subreg), GET_MODE (subreg), SUBREG_BYTE (setreg), GET_MODE (setreg)); else offset -= (SUBREG_BYTE (setreg) / REGMODE_NATURAL_SIZE (GET_MODE (setreg))); } setreg = subreg; } if (!REG_P (setreg)) return 0; sreg = REGNO (setreg); if (sreg < FIRST_PSEUDO_REGISTER) ssize = hard_regno_nregs[sreg][GET_MODE (setreg)]; else ssize = ((GET_MODE_SIZE (GET_MODE (setreg)) + (REGMODE_NATURAL_SIZE (GET_MODE (setreg)) - 1)) / REGMODE_NATURAL_SIZE (GET_MODE (setreg))); /* If UREG is a pseudo-register that hasn't already been assigned a quantity number, it means that it is not local to this block or dies more than once. In either event, we can't do anything with it. */ if ((ureg >= FIRST_PSEUDO_REGISTER && reg_qty[ureg] < 0) /* Do not combine registers unless one fits within the other. */ || (offset > 0 && usize + offset > ssize) || (offset < 0 && usize + offset < ssize) /* Do not combine with a smaller already-assigned object if that smaller object is already combined with something bigger. */ || (ssize > usize && ureg >= FIRST_PSEUDO_REGISTER && usize < qty[reg_qty[ureg]].size) /* Can't combine if SREG is not a register we can allocate. */ || (sreg >= FIRST_PSEUDO_REGISTER && reg_qty[sreg] == -1) /* Don't combine with a pseudo mentioned in a REG_NO_CONFLICT note. These have already been taken care of. This probably wouldn't combine anyway, but don't take any chances. */ || (ureg >= FIRST_PSEUDO_REGISTER && find_reg_note (insn, REG_NO_CONFLICT, usedreg)) /* Don't tie something to itself. In most cases it would make no difference, but it would screw up if the reg being tied to itself also dies in this insn. */ || ureg == sreg /* Don't try to connect two different hardware registers. */ || (ureg < FIRST_PSEUDO_REGISTER && sreg < FIRST_PSEUDO_REGISTER) /* Don't connect two different machine modes if they have different implications as to which registers may be used. */ || !MODES_TIEABLE_P (GET_MODE (usedreg), GET_MODE (setreg))) return 0; /* Now, if UREG is a hard reg and SREG is a pseudo, record the hard reg in qty_phys_sugg for the pseudo instead of tying them. Return "failure" so that the lifespan of UREG is terminated here; that way the two lifespans will be disjoint and nothing will prevent the pseudo reg from being given this hard reg. */ if (ureg < FIRST_PSEUDO_REGISTER) { /* Allocate a quantity number so we have a place to put our suggestions. */ if (reg_qty[sreg] == -2) reg_is_born (setreg, 2 * insn_number); if (reg_qty[sreg] >= 0) { if (may_save_copy && ! TEST_HARD_REG_BIT (qty_phys_copy_sugg[reg_qty[sreg]], ureg)) { SET_HARD_REG_BIT (qty_phys_copy_sugg[reg_qty[sreg]], ureg); qty_phys_num_copy_sugg[reg_qty[sreg]]++; } else if (! TEST_HARD_REG_BIT (qty_phys_sugg[reg_qty[sreg]], ureg)) { SET_HARD_REG_BIT (qty_phys_sugg[reg_qty[sreg]], ureg); qty_phys_num_sugg[reg_qty[sreg]]++; } } return 0; } /* Similarly for SREG a hard register and UREG a pseudo register. */ if (sreg < FIRST_PSEUDO_REGISTER) { if (may_save_copy && ! TEST_HARD_REG_BIT (qty_phys_copy_sugg[reg_qty[ureg]], sreg)) { SET_HARD_REG_BIT (qty_phys_copy_sugg[reg_qty[ureg]], sreg); qty_phys_num_copy_sugg[reg_qty[ureg]]++; } else if (! TEST_HARD_REG_BIT (qty_phys_sugg[reg_qty[ureg]], sreg)) { SET_HARD_REG_BIT (qty_phys_sugg[reg_qty[ureg]], sreg); qty_phys_num_sugg[reg_qty[ureg]]++; } return 0; } /* At this point we know that SREG and UREG are both pseudos. Do nothing if SREG already has a quantity or is a register that we don't allocate. */ if (reg_qty[sreg] >= -1 /* If we are not going to let any regs live across calls, don't tie a call-crossing reg to a non-call-crossing reg. */ || (current_function_has_nonlocal_label && ((REG_N_CALLS_CROSSED (ureg) > 0) != (REG_N_CALLS_CROSSED (sreg) > 0)))) return 0; /* We don't already know about SREG, so tie it to UREG if this is the last use of UREG, provided the classes they want are compatible. */ if ((already_dead || find_regno_note (insn, REG_DEAD, ureg)) && reg_meets_class_p (sreg, qty[reg_qty[ureg]].min_class)) { /* Add SREG to UREG's quantity. */ sqty = reg_qty[ureg]; reg_qty[sreg] = sqty; reg_offset_local[sreg] = reg_offset_local[ureg] + offset; reg_next_in_qty[sreg] = qty[sqty].first_reg; qty[sqty].first_reg = sreg; /* If SREG's reg class is smaller, set qty[SQTY].min_class. */ update_qty_class (sqty, sreg); /* Update info about quantity SQTY. */ qty[sqty].n_calls_crossed += REG_N_CALLS_CROSSED (sreg); qty[sqty].n_refs += REG_N_REFS (sreg); qty[sqty].freq += REG_FREQ (sreg); if (usize < ssize) { int i; for (i = qty[sqty].first_reg; i >= 0; i = reg_next_in_qty[i]) reg_offset_local[i] -= offset; qty[sqty].size = ssize; qty[sqty].mode = GET_MODE (setreg); } } else return 0; return 1; } /* Return 1 if the preferred class of REG allows it to be tied to a quantity or register whose class is CLASS. True if REG's reg class either contains or is contained in CLASS. */ static int reg_meets_class_p (int reg, enum reg_class class) { enum reg_class rclass = reg_preferred_class (reg); return (reg_class_subset_p (rclass, class) || reg_class_subset_p (class, rclass)); } /* Update the class of QTYNO assuming that REG is being tied to it. */ static void update_qty_class (int qtyno, int reg) { enum reg_class rclass = reg_preferred_class (reg); if (reg_class_subset_p (rclass, qty[qtyno].min_class)) qty[qtyno].min_class = rclass; rclass = reg_alternate_class (reg); if (reg_class_subset_p (rclass, qty[qtyno].alternate_class)) qty[qtyno].alternate_class = rclass; } /* Handle something which alters the value of an rtx REG. REG is whatever is set or clobbered. SETTER is the rtx that is modifying the register. If it is not really a register, we do nothing. The file-global variables `this_insn' and `this_insn_number' carry info from `block_alloc'. */ static void reg_is_set (rtx reg, rtx setter, void *data ATTRIBUTE_UNUSED) { /* Note that note_stores will only pass us a SUBREG if it is a SUBREG of a hard register. These may actually not exist any more. */ if (GET_CODE (reg) != SUBREG && !REG_P (reg)) return; /* Mark this register as being born. If it is used in a CLOBBER, mark it as being born halfway between the previous insn and this insn so that it conflicts with our inputs but not the outputs of the previous insn. */ reg_is_born (reg, 2 * this_insn_number - (GET_CODE (setter) == CLOBBER)); } /* Handle beginning of the life of register REG. BIRTH is the index at which this is happening. */ static void reg_is_born (rtx reg, int birth) { int regno; if (GET_CODE (reg) == SUBREG) { regno = REGNO (SUBREG_REG (reg)); if (regno < FIRST_PSEUDO_REGISTER) regno = subreg_hard_regno (reg, 1); } else regno = REGNO (reg); if (regno < FIRST_PSEUDO_REGISTER) { mark_life (regno, GET_MODE (reg), 1); /* If the register was to have been born earlier that the present insn, mark it as live where it is actually born. */ if (birth < 2 * this_insn_number) post_mark_life (regno, GET_MODE (reg), 1, birth, 2 * this_insn_number); } else { if (reg_qty[regno] == -2) alloc_qty (regno, GET_MODE (reg), PSEUDO_REGNO_SIZE (regno), birth); /* If this register has a quantity number, show that it isn't dead. */ if (reg_qty[regno] >= 0) qty[reg_qty[regno]].death = -1; } } /* Record the death of REG in the current insn. If OUTPUT_P is nonzero, REG is an output that is dying (i.e., it is never used), otherwise it is an input (the normal case). If OUTPUT_P is 1, then we extend the life past the end of this insn. */ static void wipe_dead_reg (rtx reg, int output_p) { int regno = REGNO (reg); /* If this insn has multiple results, and the dead reg is used in one of the results, extend its life to after this insn, so it won't get allocated together with any other result of this insn. It is unsafe to use !single_set here since it will ignore an unused output. Just because an output is unused does not mean the compiler can assume the side effect will not occur. Consider if REG appears in the address of an output and we reload the output. If we allocate REG to the same hard register as an unused output we could set the hard register before the output reload insn. */ if (GET_CODE (PATTERN (this_insn)) == PARALLEL && multiple_sets (this_insn)) { int i; for (i = XVECLEN (PATTERN (this_insn), 0) - 1; i >= 0; i--) { rtx set = XVECEXP (PATTERN (this_insn), 0, i); if (GET_CODE (set) == SET && !REG_P (SET_DEST (set)) && !rtx_equal_p (reg, SET_DEST (set)) && reg_overlap_mentioned_p (reg, SET_DEST (set))) output_p = 1; } } /* If this register is used in an auto-increment address, then extend its life to after this insn, so that it won't get allocated together with the result of this insn. */ if (! output_p && find_regno_note (this_insn, REG_INC, regno)) output_p = 1; if (regno < FIRST_PSEUDO_REGISTER) { mark_life (regno, GET_MODE (reg), 0); /* If a hard register is dying as an output, mark it as in use at the beginning of this insn (the above statement would cause this not to happen). */ if (output_p) post_mark_life (regno, GET_MODE (reg), 1, 2 * this_insn_number, 2 * this_insn_number + 1); } else if (reg_qty[regno] >= 0) qty[reg_qty[regno]].death = 2 * this_insn_number + output_p; } /* Find a block of SIZE words of hard regs in reg_class CLASS that can hold something of machine-mode MODE (but actually we test only the first of the block for holding MODE) and still free between insn BORN_INDEX and insn DEAD_INDEX, and return the number of the first of them. Return -1 if such a block cannot be found. If QTYNO crosses calls, insist on a register preserved by calls, unless ACCEPT_CALL_CLOBBERED is nonzero. If JUST_TRY_SUGGESTED is nonzero, only try to see if the suggested register is available. If not, return -1. */ static int find_free_reg (enum reg_class class, enum machine_mode mode, int qtyno, int accept_call_clobbered, int just_try_suggested, int born_index, int dead_index) { int i, ins; HARD_REG_SET first_used, used; #ifdef ELIMINABLE_REGS static const struct {const int from, to; } eliminables[] = ELIMINABLE_REGS; #endif /* Validate our parameters. */ if (born_index < 0 || born_index > dead_index) abort (); /* Don't let a pseudo live in a reg across a function call if we might get a nonlocal goto. */ if (current_function_has_nonlocal_label && qty[qtyno].n_calls_crossed > 0) return -1; if (accept_call_clobbered) COPY_HARD_REG_SET (used, call_fixed_reg_set); else if (qty[qtyno].n_calls_crossed == 0) COPY_HARD_REG_SET (used, fixed_reg_set); else COPY_HARD_REG_SET (used, call_used_reg_set); if (accept_call_clobbered) IOR_HARD_REG_SET (used, losing_caller_save_reg_set); for (ins = born_index; ins < dead_index; ins++) IOR_HARD_REG_SET (used, regs_live_at[ins]); IOR_COMPL_HARD_REG_SET (used, reg_class_contents[(int) class]); /* Don't use the frame pointer reg in local-alloc even if we may omit the frame pointer, because if we do that and then we need a frame pointer, reload won't know how to move the pseudo to another hard reg. It can move only regs made by global-alloc. This is true of any register that can be eliminated. */ #ifdef ELIMINABLE_REGS for (i = 0; i < (int) ARRAY_SIZE (eliminables); i++) SET_HARD_REG_BIT (used, eliminables[i].from); #if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM /* If FRAME_POINTER_REGNUM is not a real register, then protect the one that it might be eliminated into. */ SET_HARD_REG_BIT (used, HARD_FRAME_POINTER_REGNUM); #endif #else SET_HARD_REG_BIT (used, FRAME_POINTER_REGNUM); #endif #ifdef CANNOT_CHANGE_MODE_CLASS cannot_change_mode_set_regs (&used, mode, qty[qtyno].first_reg); #endif /* Normally, the registers that can be used for the first register in a multi-register quantity are the same as those that can be used for subsequent registers. However, if just trying suggested registers, restrict our consideration to them. If there are copy-suggested register, try them. Otherwise, try the arithmetic-suggested registers. */ COPY_HARD_REG_SET (first_used, used); if (just_try_suggested) { if (qty_phys_num_copy_sugg[qtyno] != 0) IOR_COMPL_HARD_REG_SET (first_used, qty_phys_copy_sugg[qtyno]); else IOR_COMPL_HARD_REG_SET (first_used, qty_phys_sugg[qtyno]); } /* If all registers are excluded, we can't do anything. */ GO_IF_HARD_REG_SUBSET (reg_class_contents[(int) ALL_REGS], first_used, fail); /* If at least one would be suitable, test each hard reg. */ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) { #ifdef REG_ALLOC_ORDER int regno = reg_alloc_order[i]; #else int regno = i; #endif if (! TEST_HARD_REG_BIT (first_used, regno) && HARD_REGNO_MODE_OK (regno, mode) && (qty[qtyno].n_calls_crossed == 0 || accept_call_clobbered || ! HARD_REGNO_CALL_PART_CLOBBERED (regno, mode))) { int j; int size1 = hard_regno_nregs[regno][mode]; for (j = 1; j < size1 && ! TEST_HARD_REG_BIT (used, regno + j); j++); if (j == size1) { /* Mark that this register is in use between its birth and death insns. */ post_mark_life (regno, mode, 1, born_index, dead_index); return regno; } #ifndef REG_ALLOC_ORDER /* Skip starting points we know will lose. */ i += j; #endif } } fail: /* If we are just trying suggested register, we have just tried copy- suggested registers, and there are arithmetic-suggested registers, try them. */ /* If it would be profitable to allocate a call-clobbered register and save and restore it around calls, do that. */ if (just_try_suggested && qty_phys_num_copy_sugg[qtyno] != 0 && qty_phys_num_sugg[qtyno] != 0) { /* Don't try the copy-suggested regs again. */ qty_phys_num_copy_sugg[qtyno] = 0; return find_free_reg (class, mode, qtyno, accept_call_clobbered, 1, born_index, dead_index); } /* We need not check to see if the current function has nonlocal labels because we don't put any pseudos that are live over calls in registers in that case. */ if (! accept_call_clobbered && flag_caller_saves && ! just_try_suggested && qty[qtyno].n_calls_crossed != 0 && CALLER_SAVE_PROFITABLE (qty[qtyno].n_refs, qty[qtyno].n_calls_crossed)) { i = find_free_reg (class, mode, qtyno, 1, 0, born_index, dead_index); if (i >= 0) caller_save_needed = 1; return i; } return -1; } /* Mark that REGNO with machine-mode MODE is live starting from the current insn (if LIFE is nonzero) or dead starting at the current insn (if LIFE is zero). */ static void mark_life (int regno, enum machine_mode mode, int life) { int j = hard_regno_nregs[regno][mode]; if (life) while (--j >= 0) SET_HARD_REG_BIT (regs_live, regno + j); else while (--j >= 0) CLEAR_HARD_REG_BIT (regs_live, regno + j); } /* Mark register number REGNO (with machine-mode MODE) as live (if LIFE is nonzero) or dead (if LIFE is zero) from insn number BIRTH (inclusive) to insn number DEATH (exclusive). */ static void post_mark_life (int regno, enum machine_mode mode, int life, int birth, int death) { int j = hard_regno_nregs[regno][mode]; HARD_REG_SET this_reg; CLEAR_HARD_REG_SET (this_reg); while (--j >= 0) SET_HARD_REG_BIT (this_reg, regno + j); if (life) while (birth < death) { IOR_HARD_REG_SET (regs_live_at[birth], this_reg); birth++; } else while (birth < death) { AND_COMPL_HARD_REG_SET (regs_live_at[birth], this_reg); birth++; } } /* INSN is the CLOBBER insn that starts a REG_NO_NOCONFLICT block, R0 is the register being clobbered, and R1 is a register being used in the equivalent expression. If R1 dies in the block and has a REG_NO_CONFLICT note on every insn in which it is used, return 1. Otherwise, return 0. */ static int no_conflict_p (rtx insn, rtx r0 ATTRIBUTE_UNUSED, rtx r1) { int ok = 0; rtx note = find_reg_note (insn, REG_LIBCALL, NULL_RTX); rtx p, last; /* If R1 is a hard register, return 0 since we handle this case when we scan the insns that actually use it. */ if (note == 0 || (REG_P (r1) && REGNO (r1) < FIRST_PSEUDO_REGISTER) || (GET_CODE (r1) == SUBREG && REG_P (SUBREG_REG (r1)) && REGNO (SUBREG_REG (r1)) < FIRST_PSEUDO_REGISTER)) return 0; last = XEXP (note, 0); for (p = NEXT_INSN (insn); p && p != last; p = NEXT_INSN (p)) if (INSN_P (p)) { if (find_reg_note (p, REG_DEAD, r1)) ok = 1; /* There must be a REG_NO_CONFLICT note on every insn, otherwise some earlier optimization pass has inserted instructions into the sequence, and it is not safe to perform this optimization. Note that emit_no_conflict_block always ensures that this is true when these sequences are created. */ if (! find_reg_note (p, REG_NO_CONFLICT, r1)) return 0; } return ok; } /* Return the number of alternatives for which the constraint string P indicates that the operand must be equal to operand 0 and that no register is acceptable. */ static int requires_inout (const char *p) { char c; int found_zero = 0; int reg_allowed = 0; int num_matching_alts = 0; int len; for ( ; (c = *p); p += len) { len = CONSTRAINT_LEN (c, p); switch (c) { case '=': case '+': case '?': case '#': case '&': case '!': case '*': case '%': case 'm': case '<': case '>': case 'V': case 'o': case 'E': case 'F': case 'G': case 'H': case 's': case 'i': case 'n': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'X': /* These don't say anything we care about. */ break; case ',': if (found_zero && ! reg_allowed) num_matching_alts++; found_zero = reg_allowed = 0; break; case '0': found_zero = 1; break; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': /* Skip the balance of the matching constraint. */ do p++; while (ISDIGIT (*p)); len = 0; break; default: if (REG_CLASS_FROM_CONSTRAINT (c, p) == NO_REGS && !EXTRA_ADDRESS_CONSTRAINT (c, p)) break; /* Fall through. */ case 'p': case 'g': case 'r': reg_allowed = 1; break; } } if (found_zero && ! reg_allowed) num_matching_alts++; return num_matching_alts; } void dump_local_alloc (FILE *file) { int i; for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++) if (reg_renumber[i] != -1) fprintf (file, ";; Register %d in %d.\n", i, reg_renumber[i]); } /* Perform various loop optimizations, including strength reduction. Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This is the loop optimization pass of the compiler. It finds invariant computations within loops and moves them to the beginning of the loop. Then it identifies basic and general induction variables. Basic induction variables (BIVs) are a pseudo registers which are set within a loop only by incrementing or decrementing its value. General induction variables (GIVs) are pseudo registers with a value which is a linear function of a basic induction variable. BIVs are recognized by `basic_induction_var'; GIVs by `general_induction_var'. Once induction variables are identified, strength reduction is applied to the general induction variables, and induction variable elimination is applied to the basic induction variables. It also finds cases where a register is set within the loop by zero-extending a narrower value and changes these to zero the entire register once before the loop and merely copy the low part within the loop. Most of the complexity is in heuristics to decide when it is worth while to do these things. */ /* Loop optimization definitions for GCC Copyright (C) 1991, 1995, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_LOOP_H #define GCC_LOOP_H /* Flags passed to loop_optimize. */ #define LOOP_UNROLL 1 #define LOOP_PREFETCH 2 #define LOOP_AUTO_UNROLL 4 /* Get the loop info pointer of a loop. */ #define LOOP_INFO(LOOP) ((struct loop_info *) (LOOP)->aux) /* Get a pointer to the loop movables structure. */ #define LOOP_MOVABLES(LOOP) (&LOOP_INFO (LOOP)->movables) /* Get a pointer to the loop registers structure. */ #define LOOP_REGS(LOOP) (&LOOP_INFO (LOOP)->regs) /* Get a pointer to the loop induction variables structure. */ #define LOOP_IVS(LOOP) (&LOOP_INFO (LOOP)->ivs) /* Get the luid of an insn. Catch the error of trying to reference the LUID of an insn added during loop, since these don't have LUIDs. */ #define LOOP_INSN_LUID(INSN) \ (INSN_UID (INSN) < max_uid_for_loop ? uid_luid[INSN_UID (INSN)] \ : (abort (), -1)) #define REGNO_FIRST_LUID(REGNO) \ (REGNO_FIRST_UID (REGNO) < max_uid_for_loop \ ? uid_luid[REGNO_FIRST_UID (REGNO)] \ : 0) #define REGNO_LAST_LUID(REGNO) \ (REGNO_LAST_UID (REGNO) < max_uid_for_loop \ ? uid_luid[REGNO_LAST_UID (REGNO)] \ : INT_MAX) /* A "basic induction variable" or biv is a pseudo reg that is set (within this loop) only by incrementing or decrementing it. */ /* A "general induction variable" or giv is a pseudo reg whose value is a linear function of a biv. */ /* Bivs are recognized by `basic_induction_var'; Givs by `general_induction_var'. */ /* An enum for the two different types of givs, those that are used as memory addresses and those that are calculated into registers. */ enum g_types { DEST_ADDR, DEST_REG }; /* A `struct induction' is created for every instruction that sets an induction variable (either a biv or a giv). */ struct induction { rtx insn; /* The insn that sets a biv or giv */ rtx new_reg; /* New register, containing strength reduced version of this giv. */ rtx src_reg; /* Biv from which this giv is computed. (If this is a biv, then this is the biv.) */ enum g_types giv_type; /* Indicate whether DEST_ADDR or DEST_REG */ rtx dest_reg; /* Destination register for insn: this is the register which was the biv or giv. For a biv, this equals src_reg. For a DEST_ADDR type giv, this is 0. */ rtx *location; /* Place in the insn where this giv occurs. If GIV_TYPE is DEST_REG, this is 0. */ /* For a biv, this is the place where add_val was found. */ enum machine_mode mode; /* The mode of this biv or giv */ rtx mem; /* For DEST_ADDR, the memory object. */ rtx mult_val; /* Multiplicative factor for src_reg. */ rtx add_val; /* Additive constant for that product. */ int benefit; /* Gain from eliminating this insn. */ rtx final_value; /* If the giv is used outside the loop, and its final value could be calculated, it is put here, and the giv is made replaceable. Set the giv to this value before the loop. */ unsigned combined_with; /* The number of givs this giv has been combined with. If nonzero, this giv cannot combine with any other giv. */ unsigned replaceable : 1; /* 1 if we can substitute the strength-reduced variable for the original variable. 0 means they must be kept separate and the new one must be copied into the old pseudo reg each time the old one is set. */ unsigned not_replaceable : 1; /* Used to prevent duplicating work. This is 1 if we know that the giv definitely can not be made replaceable, in which case we don't bother checking the variable again even if further info is available. Both this and the above can be zero. */ unsigned ignore : 1; /* 1 prohibits further processing of giv */ unsigned always_computable : 1;/* 1 if this value is computable every iteration. */ unsigned always_executed : 1; /* 1 if this set occurs each iteration. */ unsigned maybe_multiple : 1; /* Only used for a biv and 1 if this biv update may be done multiple times per iteration. */ unsigned cant_derive : 1; /* For giv's, 1 if this giv cannot derive another giv. This occurs in many cases where a giv's lifetime spans an update to a biv. */ unsigned maybe_dead : 1; /* 1 if this giv might be dead. In that case, we won't use it to eliminate a biv, it would probably lose. */ unsigned auto_inc_opt : 1; /* 1 if this giv had its increment output next to it to try to form an auto-inc address. */ unsigned unrolled : 1; /* 1 if new register has been allocated and initialized in unrolled loop. */ unsigned shared : 1; unsigned no_const_addval : 1; /* 1 if add_val does not contain a const. */ int lifetime; /* Length of life of this giv */ rtx derive_adjustment; /* If nonzero, is an adjustment to be subtracted from add_val when this giv derives another. This occurs when the giv spans a biv update by incrementation. */ rtx ext_dependent; /* If nonzero, is a sign or zero extension if a biv on which this giv is dependent. */ struct induction *next_iv; /* For givs, links together all givs that are based on the same biv. For bivs, links together all biv entries that refer to the same biv register. */ struct induction *same; /* For givs, if the giv has been combined with another giv, this points to the base giv. The base giv will have COMBINED_WITH nonzero. For bivs, if the biv has the same LOCATION than another biv, this points to the base biv. */ HOST_WIDE_INT const_adjust; /* Used by loop unrolling, when an address giv is split, and a constant is eliminated from the address, the -constant is stored here for later use. */ struct induction *same_insn; /* If there are multiple identical givs in the same insn, then all but one have this field set, and they all point to the giv that doesn't have this field set. */ rtx last_use; /* For a giv made from a biv increment, this is a substitute for the lifetime information. */ }; /* A `struct iv_class' is created for each biv. */ struct iv_class { unsigned int regno; /* Pseudo reg which is the biv. */ int biv_count; /* Number of insns setting this reg. */ struct induction *biv; /* List of all insns that set this reg. */ int giv_count; /* Number of DEST_REG givs computed from this biv. The resulting count is only used in check_dbra_loop. */ struct induction *giv; /* List of all insns that compute a giv from this reg. */ int total_benefit; /* Sum of BENEFITs of all those givs. */ rtx initial_value; /* Value of reg at loop start. */ rtx initial_test; /* Test performed on BIV before loop. */ rtx final_value; /* Value of reg at loop end, if known. */ struct iv_class *next; /* Links all class structures together. */ rtx init_insn; /* insn which initializes biv, 0 if none. */ rtx init_set; /* SET of INIT_INSN, if any. */ unsigned incremented : 1; /* 1 if somewhere incremented/decremented */ unsigned eliminable : 1; /* 1 if plausible candidate for elimination. */ unsigned nonneg : 1; /* 1 if we added a REG_NONNEG note for this. */ unsigned reversed : 1; /* 1 if we reversed the loop that this biv controls. */ unsigned all_reduced : 1; /* 1 if all givs using this biv have been reduced. */ }; /* Definitions used by the basic induction variable discovery code. */ enum iv_mode { UNKNOWN_INDUCT, BASIC_INDUCT, NOT_BASIC_INDUCT, GENERAL_INDUCT }; /* A `struct iv' is created for every register. */ struct iv { enum iv_mode type; union { struct iv_class *class; struct induction *info; } iv; }; #define REG_IV_TYPE(ivs, n) ivs->regs[n].type #define REG_IV_INFO(ivs, n) ivs->regs[n].iv.info #define REG_IV_CLASS(ivs, n) ivs->regs[n].iv.class struct loop_ivs { /* Indexed by register number, contains pointer to `struct iv' if register is an induction variable. */ struct iv *regs; /* Size of regs array. */ unsigned int n_regs; /* The head of a list which links together (via the next field) every iv class for the current loop. */ struct iv_class *list; }; typedef struct loop_mem_info { rtx mem; /* The MEM itself. */ rtx reg; /* Corresponding pseudo, if any. */ int optimize; /* Nonzero if we can optimize access to this MEM. */ } loop_mem_info; struct loop_reg { /* Number of times the reg is set during the loop being scanned. During code motion, a negative value indicates a reg that has been made a candidate; in particular -2 means that it is an candidate that we know is equal to a constant and -1 means that it is a candidate not known equal to a constant. After code motion, regs moved have 0 (which is accurate now) while the failed candidates have the original number of times set. Therefore, at all times, == 0 indicates an invariant register; < 0 a conditionally invariant one. */ int set_in_loop; /* Original value of set_in_loop; same except that this value is not set negative for a reg whose sets have been made candidates and not set to 0 for a reg that is moved. */ int n_times_set; /* Contains the insn in which a register was used if it was used exactly once; contains const0_rtx if it was used more than once. */ rtx single_usage; /* Nonzero indicates that the register cannot be moved or strength reduced. */ char may_not_optimize; /* Nonzero means reg N has already been moved out of one loop. This reduces the desire to move it out of another. */ char moved_once; }; struct loop_regs { int num; /* Number of regs used in table. */ int size; /* Size of table. */ struct loop_reg *array; /* Register usage info. array. */ int multiple_uses; /* Nonzero if a reg has multiple uses. */ }; struct loop_movables { /* Head of movable chain. */ struct movable *head; /* Last movable in chain. */ struct movable *last; }; /* Information pertaining to a loop. */ struct loop_info { /* Nonzero if there is a subroutine call in the current loop. */ int has_call; /* Nonzero if there is a libcall in the current loop. */ int has_libcall; /* Nonzero if there is a non constant call in the current loop. */ int has_nonconst_call; /* Nonzero if there is a prefetch instruction in the current loop. */ int has_prefetch; /* Nonzero if there is a volatile memory reference in the current loop. */ int has_volatile; /* Nonzero if there is a tablejump in the current loop. */ int has_tablejump; /* Nonzero if there are ways to leave the loop other than falling off the end. */ int has_multiple_exit_targets; /* Nonzero if there is an indirect jump in the current function. */ int has_indirect_jump; /* Whether loop unrolling has emitted copies of the loop body so that the main loop needs no exit tests. */ int preconditioned; /* Register or constant initial loop value. */ rtx initial_value; /* Register or constant value used for comparison test. */ rtx comparison_value; /* Register or constant approximate final value. */ rtx final_value; /* Register or constant initial loop value with term common to final_value removed. */ rtx initial_equiv_value; /* Register or constant final loop value with term common to initial_value removed. */ rtx final_equiv_value; /* Register corresponding to iteration variable. */ rtx iteration_var; /* Constant loop increment. */ rtx increment; enum rtx_code comparison_code; /* Holds the number of loop iterations. It is zero if the number could not be calculated. Must be unsigned since the number of iterations can be as high as 2^wordsize - 1. For loops with a wider iterator, this number will be zero if the number of loop iterations is too large for an unsigned integer to hold. */ unsigned HOST_WIDE_INT n_iterations; /* The number of times the loop body was unrolled. */ unsigned int unroll_number; int used_count_register; /* The loop iterator induction variable. */ struct iv_class *iv; /* List of MEMs that are stored in this loop. */ rtx store_mems; /* Array of MEMs that are used (read or written) in this loop, but cannot be aliased by anything in this loop, except perhaps themselves. In other words, if mems[i] is altered during the loop, it is altered by an expression that is rtx_equal_p to it. */ loop_mem_info *mems; /* The index of the next available slot in MEMS. */ int mems_idx; /* The number of elements allocated in MEMS. */ int mems_allocated; /* Nonzero if we don't know what MEMs were changed in the current loop. This happens if the loop contains a call (in which case `has_call' will also be set) or if we store into more than NUM_STORES MEMs. */ int unknown_address_altered; /* The above doesn't count any readonly memory locations that are stored. This does. */ int unknown_constant_address_altered; /* Count of memory write instructions discovered in the loop. */ int num_mem_sets; /* The insn where the first of these was found. */ rtx first_loop_store_insn; /* The chain of movable insns in loop. */ struct loop_movables movables; /* The registers used the in loop. */ struct loop_regs regs; /* The induction variable information in loop. */ struct loop_ivs ivs; /* Nonzero if call is in pre_header extended basic block. */ int pre_header_has_call; }; /* Variables declared in loop.c, but also needed in unroll.c. */ extern int *uid_luid; extern int max_uid_for_loop; extern unsigned int max_reg_before_loop; extern struct loop **uid_loop; extern FILE *loop_dump_stream; /* Forward declarations for non-static functions declared in loop.c and unroll.c. */ extern int loop_invariant_p (const struct loop *, rtx); extern rtx get_condition (rtx jump, rtx *earliest, int allow_cc_mode); extern rtx get_condition_for_loop (const struct loop *, rtx); extern void loop_iv_add_mult_hoist (const struct loop *, rtx, rtx, rtx, rtx); extern void loop_iv_add_mult_sink (const struct loop *, rtx, rtx, rtx, rtx); extern void loop_iv_add_mult_emit_before (const struct loop *, rtx, rtx, rtx, rtx, basic_block, rtx); extern rtx express_from (struct induction *, struct induction *); extern rtx extend_value_for_giv (struct induction *, rtx); extern void unroll_loop (struct loop *, int, int); extern rtx biv_total_increment (const struct iv_class *); extern unsigned HOST_WIDE_INT loop_iterations (struct loop *); extern int precondition_loop_p (const struct loop *, rtx *, rtx *, rtx *, enum machine_mode *mode); extern rtx final_biv_value (const struct loop *, struct iv_class *); extern rtx final_giv_value (const struct loop *, struct induction *); extern void emit_unrolled_add (rtx, rtx, rtx); extern int back_branch_in_range_p (const struct loop *, rtx); extern int loop_insn_first_p (rtx, rtx); typedef rtx (*loop_insn_callback) (struct loop *, rtx, int, int); extern void for_each_insn_in_loop (struct loop *, loop_insn_callback); extern rtx loop_insn_emit_before (const struct loop *, basic_block, rtx, rtx); extern rtx loop_insn_sink (const struct loop *, rtx); extern rtx loop_insn_hoist (const struct loop *, rtx); /* Forward declarations for non-static functions declared in doloop.c. */ extern bool doloop_optimize (struct loop *); #endif /* GCC_LOOP_H */ /* Not really meaningful values, but at least something. */ #ifndef SIMULTANEOUS_PREFETCHES #define SIMULTANEOUS_PREFETCHES 3 #endif #ifndef PREFETCH_BLOCK #define PREFETCH_BLOCK 32 #endif #ifndef HAVE_prefetch #define HAVE_prefetch 0 #define CODE_FOR_prefetch 0 #define gen_prefetch(a,b,c) (abort(), NULL_RTX) #endif /* Give up the prefetch optimizations once we exceed a given threshold. It is unlikely that we would be able to optimize something in a loop with so many detected prefetches. */ #define MAX_PREFETCHES 100 /* The number of prefetch blocks that are beneficial to fetch at once before a loop with a known (and low) iteration count. */ #define PREFETCH_BLOCKS_BEFORE_LOOP_MAX 6 /* For very tiny loops it is not worthwhile to prefetch even before the loop, since it is likely that the data are already in the cache. */ #define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2 /* Parameterize some prefetch heuristics so they can be turned on and off easily for performance testing on new architectures. These can be defined in target-dependent files. */ /* Prefetch is worthwhile only when loads/stores are dense. */ #ifndef PREFETCH_ONLY_DENSE_MEM #define PREFETCH_ONLY_DENSE_MEM 1 #endif /* Define what we mean by "dense" loads and stores; This value divided by 256 is the minimum percentage of memory references that worth prefetching. */ #ifndef PREFETCH_DENSE_MEM #define PREFETCH_DENSE_MEM 220 #endif /* Do not prefetch for a loop whose iteration count is known to be low. */ #ifndef PREFETCH_NO_LOW_LOOPCNT #define PREFETCH_NO_LOW_LOOPCNT 1 #endif /* Define what we mean by a "low" iteration count. */ #ifndef PREFETCH_LOW_LOOPCNT #define PREFETCH_LOW_LOOPCNT 32 #endif /* Do not prefetch for a loop that contains a function call; such a loop is probably not an internal loop. */ #ifndef PREFETCH_NO_CALL #define PREFETCH_NO_CALL 1 #endif /* Do not prefetch accesses with an extreme stride. */ #ifndef PREFETCH_NO_EXTREME_STRIDE #define PREFETCH_NO_EXTREME_STRIDE 1 #endif /* Define what we mean by an "extreme" stride. */ #ifndef PREFETCH_EXTREME_STRIDE #define PREFETCH_EXTREME_STRIDE 4096 #endif /* Define a limit to how far apart indices can be and still be merged into a single prefetch. */ #ifndef PREFETCH_EXTREME_DIFFERENCE #define PREFETCH_EXTREME_DIFFERENCE 4096 #endif /* Issue prefetch instructions before the loop to fetch data to be used in the first few loop iterations. */ #ifndef PREFETCH_BEFORE_LOOP #define PREFETCH_BEFORE_LOOP 1 #endif /* Do not handle reversed order prefetches (negative stride). */ #ifndef PREFETCH_NO_REVERSE_ORDER #define PREFETCH_NO_REVERSE_ORDER 1 #endif /* Prefetch even if the GIV is in conditional code. */ #ifndef PREFETCH_CONDITIONAL #define PREFETCH_CONDITIONAL 1 #endif #define LOOP_REG_LIFETIME(LOOP, REGNO) \ ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO))) #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \ ((REGNO_LAST_LUID (REGNO) > LOOP_INSN_LUID ((LOOP)->end) \ || REGNO_FIRST_LUID (REGNO) < LOOP_INSN_LUID ((LOOP)->start))) #define LOOP_REGNO_NREGS(REGNO, SET_DEST) \ ((REGNO) < FIRST_PSEUDO_REGISTER \ ? (int) hard_regno_nregs[(REGNO)][GET_MODE (SET_DEST)] : 1) /* Vector mapping INSN_UIDs to luids. The luids are like uids but increase monotonically always. We use them to see whether a jump comes from outside a given loop. */ int *uid_luid; /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop number the insn is contained in. */ struct loop **uid_loop; /* 1 + largest uid of any insn. */ int max_uid_for_loop; /* Number of loops detected in current function. Used as index to the next few tables. */ static int max_loop_num; /* Bound on pseudo register number before loop optimization. A pseudo has valid regscan info if its number is < max_reg_before_loop. */ unsigned int max_reg_before_loop; /* The value to pass to the next call of reg_scan_update. */ static int loop_max_reg; /* During the analysis of a loop, a chain of `struct movable's is made to record all the movable insns found. Then the entire chain can be scanned to decide which to move. */ struct movable { rtx insn; /* A movable insn */ rtx set_src; /* The expression this reg is set from. */ rtx set_dest; /* The destination of this SET. */ rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST of any registers used within the LIBCALL. */ int consec; /* Number of consecutive following insns that must be moved with this one. */ unsigned int regno; /* The register it sets */ short lifetime; /* lifetime of that register; may be adjusted when matching movables that load the same value are found. */ short savings; /* Number of insns we can move for this reg, including other movables that force this or match this one. */ ENUM_BITFIELD(machine_mode) savemode : 8; /* Nonzero means it is a mode for a low part that we should avoid changing when clearing the rest of the reg. */ unsigned int cond : 1; /* 1 if only conditionally movable */ unsigned int force : 1; /* 1 means MUST move this insn */ unsigned int global : 1; /* 1 means reg is live outside this loop */ /* If PARTIAL is 1, GLOBAL means something different: that the reg is live outside the range from where it is set to the following label. */ unsigned int done : 1; /* 1 inhibits further processing of this */ unsigned int partial : 1; /* 1 means this reg is used for zero-extending. In particular, moving it does not make it invariant. */ unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to load SRC, rather than copying INSN. */ unsigned int move_insn_first:1;/* Same as above, if this is necessary for the first insn of a consecutive sets group. */ unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */ unsigned int insert_temp : 1; /* 1 means we copy to a new pseudo and replace the original insn with a copy from that pseudo, rather than deleting it. */ struct movable *match; /* First entry for same value */ struct movable *forces; /* An insn that must be moved if this is */ struct movable *next; }; FILE *loop_dump_stream; /* Forward declarations. */ static void invalidate_loops_containing_label (rtx); static void find_and_verify_loops (rtx, struct loops *); static void mark_loop_jump (rtx, struct loop *); static void prescan_loop (struct loop *); static int reg_in_basic_block_p (rtx, rtx); static int consec_sets_invariant_p (const struct loop *, rtx, int, rtx); static int labels_in_range_p (rtx, int); static void count_one_set (struct loop_regs *, rtx, rtx, rtx *); static void note_addr_stored (rtx, rtx, void *); static void note_set_pseudo_multiple_uses (rtx, rtx, void *); static int loop_reg_used_before_p (const struct loop *, rtx, rtx); static rtx find_regs_nested (rtx, rtx); static void scan_loop (struct loop*, int); #if 0 static void replace_call_address (rtx, rtx, rtx); #endif static rtx skip_consec_insns (rtx, int); static int libcall_benefit (rtx); static rtx libcall_other_reg (rtx, rtx); static void record_excess_regs (rtx, rtx, rtx *); static void ignore_some_movables (struct loop_movables *); static void force_movables (struct loop_movables *); static void combine_movables (struct loop_movables *, struct loop_regs *); static int num_unmoved_movables (const struct loop *); static int regs_match_p (rtx, rtx, struct loop_movables *); static int rtx_equal_for_loop_p (rtx, rtx, struct loop_movables *, struct loop_regs *); static void add_label_notes_loop (rtx, rtx); static void move_movables (struct loop *loop, struct loop_movables *, int, int); static void loop_movables_add (struct loop_movables *, struct movable *); static void loop_movables_free (struct loop_movables *); static int count_nonfixed_reads (const struct loop *, rtx); static void loop_bivs_find (struct loop *); static void loop_bivs_init_find (struct loop *); static void loop_bivs_check (struct loop *); static void loop_givs_find (struct loop *); static void loop_givs_check (struct loop *); static int loop_biv_eliminable_p (struct loop *, struct iv_class *, int, int); static int loop_giv_reduce_benefit (struct loop *, struct iv_class *, struct induction *, rtx); static void loop_givs_dead_check (struct loop *, struct iv_class *); static void loop_givs_reduce (struct loop *, struct iv_class *); static void loop_givs_rescan (struct loop *, struct iv_class *, rtx *); static void loop_ivs_free (struct loop *); static void strength_reduce (struct loop *, int); static void find_single_use_in_loop (struct loop_regs *, rtx, rtx); static int valid_initial_value_p (rtx, rtx, int, rtx); static void find_mem_givs (const struct loop *, rtx, rtx, int, int); static void record_biv (struct loop *, struct induction *, rtx, rtx, rtx, rtx, rtx *, int, int); static void check_final_value (const struct loop *, struct induction *); static void loop_ivs_dump (const struct loop *, FILE *, int); static void loop_iv_class_dump (const struct iv_class *, FILE *, int); static void loop_biv_dump (const struct induction *, FILE *, int); static void loop_giv_dump (const struct induction *, FILE *, int); static void record_giv (const struct loop *, struct induction *, rtx, rtx, rtx, rtx, rtx, rtx, int, enum g_types, int, int, rtx *); static void update_giv_derive (const struct loop *, rtx); static void check_ext_dependent_givs (const struct loop *, struct iv_class *); static int basic_induction_var (const struct loop *, rtx, enum machine_mode, rtx, rtx, rtx *, rtx *, rtx **); static rtx simplify_giv_expr (const struct loop *, rtx, rtx *, int *); static int general_induction_var (const struct loop *loop, rtx, rtx *, rtx *, rtx *, rtx *, int, int *, enum machine_mode); static int consec_sets_giv (const struct loop *, int, rtx, rtx, rtx, rtx *, rtx *, rtx *, rtx *); static int check_dbra_loop (struct loop *, int); static rtx express_from_1 (rtx, rtx, rtx); static rtx combine_givs_p (struct induction *, struct induction *); static int cmp_combine_givs_stats (const void *, const void *); static void combine_givs (struct loop_regs *, struct iv_class *); static int product_cheap_p (rtx, rtx); static int maybe_eliminate_biv (const struct loop *, struct iv_class *, int, int, int); static int maybe_eliminate_biv_1 (const struct loop *, rtx, rtx, struct iv_class *, int, basic_block, rtx); static int last_use_this_basic_block (rtx, rtx); static void record_initial (rtx, rtx, void *); static void update_reg_last_use (rtx, rtx); static rtx next_insn_in_loop (const struct loop *, rtx); static void loop_regs_scan (const struct loop *, int); static int count_insns_in_loop (const struct loop *); static int find_mem_in_note_1 (rtx *, void *); static rtx find_mem_in_note (rtx); static void load_mems (const struct loop *); static int insert_loop_mem (rtx *, void *); static int replace_loop_mem (rtx *, void *); static void replace_loop_mems (rtx, rtx, rtx, int); static int replace_loop_reg (rtx *, void *); static void replace_loop_regs (rtx insn, rtx, rtx); static void note_reg_stored (rtx, rtx, void *); static void try_copy_prop (const struct loop *, rtx, unsigned int); static void try_swap_copy_prop (const struct loop *, rtx, unsigned int); static rtx check_insn_for_givs (struct loop *, rtx, int, int); static rtx check_insn_for_bivs (struct loop *, rtx, int, int); static rtx gen_add_mult (rtx, rtx, rtx, rtx); static void loop_regs_update (const struct loop *, rtx); static int iv_add_mult_cost (rtx, rtx, rtx, rtx); static rtx loop_insn_emit_after (const struct loop *, basic_block, rtx, rtx); static rtx loop_call_insn_emit_before (const struct loop *, basic_block, rtx, rtx); static rtx loop_call_insn_hoist (const struct loop *, rtx); static rtx loop_insn_sink_or_swim (const struct loop *, rtx); static void loop_dump_aux (const struct loop *, FILE *, int); static void loop_delete_insns (rtx, rtx); static HOST_WIDE_INT remove_constant_addition (rtx *); static rtx gen_load_of_final_value (rtx, rtx); void debug_ivs (const struct loop *); void debug_iv_class (const struct iv_class *); void debug_biv (const struct induction *); void debug_giv (const struct induction *); void debug_loop (const struct loop *); void debug_loops (const struct loops *); typedef struct loop_replace_args { rtx match; rtx replacement; rtx insn; } loop_replace_args; /* Nonzero iff INSN is between START and END, inclusive. */ #define INSN_IN_RANGE_P(INSN, START, END) \ (INSN_UID (INSN) < max_uid_for_loop \ && LOOP_INSN_LUID (INSN) >= LOOP_INSN_LUID (START) \ && LOOP_INSN_LUID (INSN) <= LOOP_INSN_LUID (END)) /* Indirect_jump_in_function is computed once per function. */ static int indirect_jump_in_function; static int indirect_jump_in_function_p (rtx); static int compute_luids (rtx, rtx, int); static int biv_elimination_giv_has_0_offset (struct induction *, struct induction *, rtx); /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to copy the value of the strength reduced giv to its original register. */ static int cost_per_copy; /* Cost of using a register, to normalize the benefits of a giv. */ static int reg_address_cost; void init_loop (void) { rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1); reg_address_cost = address_cost (reg, SImode); cost_per_copy = COSTS_N_INSNS (1); } /* Compute the mapping from uids to luids. LUIDs are numbers assigned to insns, like uids, except that luids increase monotonically through the code. Start at insn START and stop just before END. Assign LUIDs starting with PREV_LUID + 1. Return the last assigned LUID + 1. */ static int compute_luids (rtx start, rtx end, int prev_luid) { int i; rtx insn; for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn)) { if (INSN_UID (insn) >= max_uid_for_loop) continue; /* Don't assign luids to line-number NOTEs, so that the distance in luids between two insns is not affected by -g. */ if (GET_CODE (insn) != NOTE || NOTE_LINE_NUMBER (insn) <= 0) uid_luid[INSN_UID (insn)] = ++i; else /* Give a line number note the same luid as preceding insn. */ uid_luid[INSN_UID (insn)] = i; } return i + 1; } /* Entry point of this file. Perform loop optimization on the current function. F is the first insn of the function and DUMPFILE is a stream for output of a trace of actions taken (or 0 if none should be output). */ void loop_optimize (rtx f, FILE *dumpfile, int flags) { rtx insn; int i; struct loops loops_data; struct loops *loops = &loops_data; struct loop_info *loops_info; loop_dump_stream = dumpfile; init_recog_no_volatile (); max_reg_before_loop = max_reg_num (); loop_max_reg = max_reg_before_loop; regs_may_share = 0; /* Count the number of loops. */ max_loop_num = 0; for (insn = f; insn; insn = NEXT_INSN (insn)) { if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG) max_loop_num++; } /* Don't waste time if no loops. */ if (max_loop_num == 0) return; loops->num = max_loop_num; /* Get size to use for tables indexed by uids. Leave some space for labels allocated by find_and_verify_loops. */ max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32; uid_luid = xcalloc (max_uid_for_loop, sizeof (int)); uid_loop = xcalloc (max_uid_for_loop, sizeof (struct loop *)); /* Allocate storage for array of loops. */ loops->array = xcalloc (loops->num, sizeof (struct loop)); /* Find and process each loop. First, find them, and record them in order of their beginnings. */ find_and_verify_loops (f, loops); /* Allocate and initialize auxiliary loop information. */ loops_info = xcalloc (loops->num, sizeof (struct loop_info)); for (i = 0; i < (int) loops->num; i++) loops->array[i].aux = loops_info + i; /* Now find all register lifetimes. This must be done after find_and_verify_loops, because it might reorder the insns in the function. */ reg_scan (f, max_reg_before_loop, 1); /* This must occur after reg_scan so that registers created by gcse will have entries in the register tables. We could have added a call to reg_scan after gcse_main in toplev.c, but moving this call to init_alias_analysis is more efficient. */ init_alias_analysis (); /* See if we went too far. Note that get_max_uid already returns one more that the maximum uid of all insn. */ if (get_max_uid () > max_uid_for_loop) abort (); /* Now reset it to the actual size we need. See above. */ max_uid_for_loop = get_max_uid (); /* find_and_verify_loops has already called compute_luids, but it might have rearranged code afterwards, so we need to recompute the luids now. */ compute_luids (f, NULL_RTX, 0); /* Don't leave gaps in uid_luid for insns that have been deleted. It is possible that the first or last insn using some register has been deleted by cross-jumping. Make sure that uid_luid for that former insn's uid points to the general area where that insn used to be. */ for (i = 0; i < max_uid_for_loop; i++) { uid_luid[0] = uid_luid[i]; if (uid_luid[0] != 0) break; } for (i = 0; i < max_uid_for_loop; i++) if (uid_luid[i] == 0) uid_luid[i] = uid_luid[i - 1]; /* Determine if the function has indirect jump. On some systems this prevents low overhead loop instructions from being used. */ indirect_jump_in_function = indirect_jump_in_function_p (f); /* Now scan the loops, last ones first, since this means inner ones are done before outer ones. */ for (i = max_loop_num - 1; i >= 0; i--) { struct loop *loop = &loops->array[i]; if (! loop->invalid && loop->end) { scan_loop (loop, flags); ggc_collect (); } } end_alias_analysis (); /* Clean up. */ for (i = 0; i < (int) loops->num; i++) free (loops_info[i].mems); free (uid_luid); free (uid_loop); free (loops_info); free (loops->array); } /* Returns the next insn, in execution order, after INSN. START and END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop, respectively. LOOP->TOP, if non-NULL, is the top of the loop in the insn-stream; it is used with loops that are entered near the bottom. */ static rtx next_insn_in_loop (const struct loop *loop, rtx insn) { insn = NEXT_INSN (insn); if (insn == loop->end) { if (loop->top) /* Go to the top of the loop, and continue there. */ insn = loop->top; else /* We're done. */ insn = NULL_RTX; } if (insn == loop->scan_start) /* We're done. */ insn = NULL_RTX; return insn; } /* Find any register references hidden inside X and add them to the dependency list DEPS. This is used to look inside CLOBBER (MEM when checking whether a PARALLEL can be pulled out of a loop. */ static rtx find_regs_nested (rtx deps, rtx x) { enum rtx_code code = GET_CODE (x); if (code == REG) deps = gen_rtx_EXPR_LIST (VOIDmode, x, deps); else { const char *fmt = GET_RTX_FORMAT (code); int i, j; for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') deps = find_regs_nested (deps, XEXP (x, i)); else if (fmt[i] == 'E') for (j = 0; j < XVECLEN (x, i); j++) deps = find_regs_nested (deps, XVECEXP (x, i, j)); } } return deps; } /* Optimize one loop described by LOOP. */ /* ??? Could also move memory writes out of loops if the destination address is invariant, the source is invariant, the memory write is not volatile, and if we can prove that no read inside the loop can read this address before the write occurs. If there is a read of this address after the write, then we can also mark the memory read as invariant. */ static void scan_loop (struct loop *loop, int flags) { struct loop_info *loop_info = LOOP_INFO (loop); struct loop_regs *regs = LOOP_REGS (loop); int i; rtx loop_start = loop->start; rtx loop_end = loop->end; rtx p; /* 1 if we are scanning insns that could be executed zero times. */ int maybe_never = 0; /* 1 if we are scanning insns that might never be executed due to a subroutine call which might exit before they are reached. */ int call_passed = 0; /* Number of insns in the loop. */ int insn_count; int tem; rtx temp, update_start, update_end; /* The SET from an insn, if it is the only SET in the insn. */ rtx set, set1; /* Chain describing insns movable in current loop. */ struct loop_movables *movables = LOOP_MOVABLES (loop); /* Ratio of extra register life span we can justify for saving an instruction. More if loop doesn't call subroutines since in that case saving an insn makes more difference and more registers are available. */ int threshold; /* Nonzero if we are scanning instructions in a sub-loop. */ int loop_depth = 0; int in_libcall; loop->top = 0; movables->head = 0; movables->last = 0; /* Determine whether this loop starts with a jump down to a test at the end. This will occur for a small number of loops with a test that is too complex to duplicate in front of the loop. We search for the first insn or label in the loop, skipping NOTEs. However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG (because we might have a loop executed only once that contains a loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END (in case we have a degenerate loop). Note that if we mistakenly think that a loop is entered at the top when, in fact, it is entered at the exit test, the only effect will be slightly poorer optimization. Making the opposite error can generate incorrect code. Since very few loops now start with a jump to the exit test, the code here to detect that case is very conservative. */ for (p = NEXT_INSN (loop_start); p != loop_end && GET_CODE (p) != CODE_LABEL && ! INSN_P (p) && (GET_CODE (p) != NOTE || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END)); p = NEXT_INSN (p)) ; loop->scan_start = p; /* If loop end is the end of the current function, then emit a NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy note insn. This is the position we use when sinking insns out of the loop. */ if (NEXT_INSN (loop->end) != 0) loop->sink = NEXT_INSN (loop->end); else loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end); /* Set up variables describing this loop. */ prescan_loop (loop); threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs); /* If loop has a jump before the first label, the true entry is the target of that jump. Start scan from there. But record in LOOP->TOP the place where the end-test jumps back to so we can scan that after the end of the loop. */ if (GET_CODE (p) == JUMP_INSN /* Loop entry must be unconditional jump (and not a RETURN) */ && any_uncondjump_p (p) && JUMP_LABEL (p) != 0 /* Check to see whether the jump actually jumps out of the loop (meaning it's no loop). This case can happen for things like do {..} while (0). If this label was generated previously by loop, we can't tell anything about it and have to reject the loop. */ && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end)) { loop->top = next_label (loop->scan_start); loop->scan_start = JUMP_LABEL (p); } /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid as required by loop_reg_used_before_p. So skip such loops. (This test may never be true, but it's best to play it safe.) Also, skip loops where we do not start scanning at a label. This test also rejects loops starting with a JUMP_INSN that failed the test above. */ if (INSN_UID (loop->scan_start) >= max_uid_for_loop || GET_CODE (loop->scan_start) != CODE_LABEL) { if (loop_dump_stream) fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n", INSN_UID (loop_start), INSN_UID (loop_end)); return; } /* Allocate extra space for REGs that might be created by load_mems. We allocate a little extra slop as well, in the hopes that we won't have to reallocate the regs array. */ loop_regs_scan (loop, loop_info->mems_idx + 16); insn_count = count_insns_in_loop (loop); if (loop_dump_stream) { fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n", INSN_UID (loop_start), INSN_UID (loop_end), insn_count); if (loop->cont) fprintf (loop_dump_stream, "Continue at insn %d.\n", INSN_UID (loop->cont)); } /* Scan through the loop finding insns that are safe to move. Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that this reg will be considered invariant for subsequent insns. We consider whether subsequent insns use the reg in deciding whether it is worth actually moving. MAYBE_NEVER is nonzero if we have passed a conditional jump insn and therefore it is possible that the insns we are scanning would never be executed. At such times, we must make sure that it is safe to execute the insn once instead of zero times. When MAYBE_NEVER is 0, all insns will be executed at least once so that is not a problem. */ for (in_libcall = 0, p = next_insn_in_loop (loop, loop->scan_start); p != NULL_RTX; p = next_insn_in_loop (loop, p)) { if (in_libcall && INSN_P (p) && find_reg_note (p, REG_RETVAL, NULL_RTX)) in_libcall--; if (GET_CODE (p) == INSN) { temp = find_reg_note (p, REG_LIBCALL, NULL_RTX); if (temp) in_libcall++; if (! in_libcall && (set = single_set (p)) && REG_P (SET_DEST (set)) #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && SET_DEST (set) != pic_offset_table_rtx #endif && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize) { int tem1 = 0; int tem2 = 0; int move_insn = 0; int insert_temp = 0; rtx src = SET_SRC (set); rtx dependencies = 0; /* Figure out what to use as a source of this insn. If a REG_EQUIV note is given or if a REG_EQUAL note with a constant operand is specified, use it as the source and mark that we should move this insn by calling emit_move_insn rather that duplicating the insn. Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note is present. */ temp = find_reg_note (p, REG_EQUIV, NULL_RTX); if (temp) src = XEXP (temp, 0), move_insn = 1; else { temp = find_reg_note (p, REG_EQUAL, NULL_RTX); if (temp && CONSTANT_P (XEXP (temp, 0))) src = XEXP (temp, 0), move_insn = 1; if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX)) { src = XEXP (temp, 0); /* A libcall block can use regs that don't appear in the equivalent expression. To move the libcall, we must move those regs too. */ dependencies = libcall_other_reg (p, src); } } /* For parallels, add any possible uses to the dependencies, as we can't move the insn without resolving them first. MEMs inside CLOBBERs may also reference registers; these count as implicit uses. */ if (GET_CODE (PATTERN (p)) == PARALLEL) { for (i = 0; i < XVECLEN (PATTERN (p), 0); i++) { rtx x = XVECEXP (PATTERN (p), 0, i); if (GET_CODE (x) == USE) dependencies = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0), dependencies); else if (GET_CODE (x) == CLOBBER && MEM_P (XEXP (x, 0))) dependencies = find_regs_nested (dependencies, XEXP (XEXP (x, 0), 0)); } } if (/* The register is used in basic blocks other than the one where it is set (meaning that something after this point in the loop might depend on its value before the set). */ ! reg_in_basic_block_p (p, SET_DEST (set)) /* And the set is not guaranteed to be executed once the loop starts, or the value before the set is needed before the set occurs... ??? Note we have quadratic behavior here, mitigated by the fact that the previous test will often fail for large loops. Rather than re-scanning the entire loop each time for register usage, we should build tables of the register usage and use them here instead. */ && (maybe_never || loop_reg_used_before_p (loop, set, p))) /* It is unsafe to move the set. However, it may be OK to move the source into a new pseudo, and substitute a reg-to-reg copy for the original insn. This code used to consider it OK to move a set of a variable which was not created by the user and not used in an exit test. That behavior is incorrect and was removed. */ insert_temp = 1; /* Don't try to optimize a MODE_CC set with a constant source. It probably will be combined with a conditional jump. */ if (GET_MODE_CLASS (GET_MODE (SET_DEST (set))) == MODE_CC && CONSTANT_P (src)) ; /* Don't try to optimize a register that was made by loop-optimization for an inner loop. We don't know its life-span, so we can't compute the benefit. */ else if (REGNO (SET_DEST (set)) >= max_reg_before_loop) ; /* Don't move the source and add a reg-to-reg copy: - with -Os (this certainly increases size), - if the mode doesn't support copy operations (obviously), - if the source is already a reg (the motion will gain nothing), - if the source is a legitimate constant (likewise). */ else if (insert_temp && (optimize_size || ! can_copy_p (GET_MODE (SET_SRC (set))) || REG_P (SET_SRC (set)) || (CONSTANT_P (SET_SRC (set)) && LEGITIMATE_CONSTANT_P (SET_SRC (set))))) ; else if ((tem = loop_invariant_p (loop, src)) && (dependencies == 0 || (tem2 = loop_invariant_p (loop, dependencies)) != 0) && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1 || (tem1 = consec_sets_invariant_p (loop, SET_DEST (set), regs->array[REGNO (SET_DEST (set))].set_in_loop, p))) /* If the insn can cause a trap (such as divide by zero), can't move it unless it's guaranteed to be executed once loop is entered. Even a function call might prevent the trap insn from being reached (since it might exit!) */ && ! ((maybe_never || call_passed) && may_trap_p (src))) { struct movable *m; int regno = REGNO (SET_DEST (set)); /* A potential lossage is where we have a case where two insns can be combined as long as they are both in the loop, but we move one of them outside the loop. For large loops, this can lose. The most common case of this is the address of a function being called. Therefore, if this register is marked as being used exactly once if we are in a loop with calls (a "large loop"), see if we can replace the usage of this register with the source of this SET. If we can, delete this insn. Don't do this if P has a REG_RETVAL note or if we have SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */ if (loop_info->has_call && regs->array[regno].single_usage != 0 && regs->array[regno].single_usage != const0_rtx && REGNO_FIRST_UID (regno) == INSN_UID (p) && (REGNO_LAST_UID (regno) == INSN_UID (regs->array[regno].single_usage)) && regs->array[regno].set_in_loop == 1 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS && ! side_effects_p (SET_SRC (set)) && ! find_reg_note (p, REG_RETVAL, NULL_RTX) && (! SMALL_REGISTER_CLASSES || (! (REG_P (SET_SRC (set)) && (REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)))) && regno >= FIRST_PSEUDO_REGISTER /* This test is not redundant; SET_SRC (set) might be a call-clobbered register and the life of REGNO might span a call. */ && ! modified_between_p (SET_SRC (set), p, regs->array[regno].single_usage) && no_labels_between_p (p, regs->array[regno].single_usage) && validate_replace_rtx (SET_DEST (set), SET_SRC (set), regs->array[regno].single_usage)) { /* Replace any usage in a REG_EQUAL note. Must copy the new source, so that we don't get rtx sharing between the SET_SOURCE and REG_NOTES of insn p. */ REG_NOTES (regs->array[regno].single_usage) = (replace_rtx (REG_NOTES (regs->array[regno].single_usage), SET_DEST (set), copy_rtx (SET_SRC (set)))); delete_insn (p); for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++) regs->array[regno+i].set_in_loop = 0; continue; } m = xmalloc (sizeof (struct movable)); m->next = 0; m->insn = p; m->set_src = src; m->dependencies = dependencies; m->set_dest = SET_DEST (set); m->force = 0; m->consec = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1; m->done = 0; m->forces = 0; m->partial = 0; m->move_insn = move_insn; m->move_insn_first = 0; m->insert_temp = insert_temp; m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0); m->savemode = VOIDmode; m->regno = regno; /* Set M->cond if either loop_invariant_p or consec_sets_invariant_p returned 2 (only conditionally invariant). */ m->cond = ((tem | tem1 | tem2) > 1); m->global = LOOP_REG_GLOBAL_P (loop, regno); m->match = 0; m->lifetime = LOOP_REG_LIFETIME (loop, regno); m->savings = regs->array[regno].n_times_set; if (find_reg_note (p, REG_RETVAL, NULL_RTX)) m->savings += libcall_benefit (p); for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++) regs->array[regno+i].set_in_loop = move_insn ? -2 : -1; /* Add M to the end of the chain MOVABLES. */ loop_movables_add (movables, m); if (m->consec > 0) { /* It is possible for the first instruction to have a REG_EQUAL note but a non-invariant SET_SRC, so we must remember the status of the first instruction in case the last instruction doesn't have a REG_EQUAL note. */ m->move_insn_first = m->move_insn; /* Skip this insn, not checking REG_LIBCALL notes. */ p = next_nonnote_insn (p); /* Skip the consecutive insns, if there are any. */ p = skip_consec_insns (p, m->consec); /* Back up to the last insn of the consecutive group. */ p = prev_nonnote_insn (p); /* We must now reset m->move_insn, m->is_equiv, and possibly m->set_src to correspond to the effects of all the insns. */ temp = find_reg_note (p, REG_EQUIV, NULL_RTX); if (temp) m->set_src = XEXP (temp, 0), m->move_insn = 1; else { temp = find_reg_note (p, REG_EQUAL, NULL_RTX); if (temp && CONSTANT_P (XEXP (temp, 0))) m->set_src = XEXP (temp, 0), m->move_insn = 1; else m->move_insn = 0; } m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0); } } /* If this register is always set within a STRICT_LOW_PART or set to zero, then its high bytes are constant. So clear them outside the loop and within the loop just load the low bytes. We must check that the machine has an instruction to do so. Also, if the value loaded into the register depends on the same register, this cannot be done. */ else if (SET_SRC (set) == const0_rtx && GET_CODE (NEXT_INSN (p)) == INSN && (set1 = single_set (NEXT_INSN (p))) && GET_CODE (set1) == SET && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART) && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG) && (SUBREG_REG (XEXP (SET_DEST (set1), 0)) == SET_DEST (set)) && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1))) { int regno = REGNO (SET_DEST (set)); if (regs->array[regno].set_in_loop == 2) { struct movable *m; m = xmalloc (sizeof (struct movable)); m->next = 0; m->insn = p; m->set_dest = SET_DEST (set); m->dependencies = 0; m->force = 0; m->consec = 0; m->done = 0; m->forces = 0; m->move_insn = 0; m->move_insn_first = 0; m->insert_temp = insert_temp; m->partial = 1; /* If the insn may not be executed on some cycles, we can't clear the whole reg; clear just high part. Not even if the reg is used only within this loop. Consider this: while (1) while (s != t) { if (foo ()) x = *s; use (x); } Clearing x before the inner loop could clobber a value being saved from the last time around the outer loop. However, if the reg is not used outside this loop and all uses of the register are in the same basic block as the store, there is no problem. If this insn was made by loop, we don't know its LOOP_INSN_LUID and hence must make a conservative assumption. */ m->global = (INSN_UID (p) >= max_uid_for_loop || LOOP_REG_GLOBAL_P (loop, regno) || (labels_in_range_p (p, REGNO_FIRST_LUID (regno)))); if (maybe_never && m->global) m->savemode = GET_MODE (SET_SRC (set1)); else m->savemode = VOIDmode; m->regno = regno; m->cond = 0; m->match = 0; m->lifetime = LOOP_REG_LIFETIME (loop, regno); m->savings = 1; for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++) regs->array[regno+i].set_in_loop = -1; /* Add M to the end of the chain MOVABLES. */ loop_movables_add (movables, m); } } } } /* Past a call insn, we get to insns which might not be executed because the call might exit. This matters for insns that trap. Constant and pure call insns always return, so they don't count. */ else if (GET_CODE (p) == CALL_INSN && ! CONST_OR_PURE_CALL_P (p)) call_passed = 1; /* Past a label or a jump, we get to insns for which we can't count on whether or how many times they will be executed during each iteration. Therefore, we can only move out sets of trivial variables (those not used after the loop). */ /* Similar code appears twice in strength_reduce. */ else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN) /* If we enter the loop in the middle, and scan around to the beginning, don't set maybe_never for that. This must be an unconditional jump, otherwise the code at the top of the loop might never be executed. Unconditional jumps are followed by a barrier then the loop_end. */ && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop->top && NEXT_INSN (NEXT_INSN (p)) == loop_end && any_uncondjump_p (p))) maybe_never = 1; else if (GET_CODE (p) == NOTE) { /* At the virtual top of a converted loop, insns are again known to be executed: logically, the loop begins here even though the exit code has been duplicated. */ if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0) maybe_never = call_passed = 0; else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG) loop_depth++; else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END) loop_depth--; } } /* If one movable subsumes another, ignore that other. */ ignore_some_movables (movables); /* For each movable insn, see if the reg that it loads leads when it dies right into another conditionally movable insn. If so, record that the second insn "forces" the first one, since the second can be moved only if the first is. */ force_movables (movables); /* See if there are multiple movable insns that load the same value. If there are, make all but the first point at the first one through the `match' field, and add the priorities of them all together as the priority of the first. */ combine_movables (movables, regs); /* Now consider each movable insn to decide whether it is worth moving. Store 0 in regs->array[I].set_in_loop for each reg I that is moved. For machines with few registers this increases code size, so do not move moveables when optimizing for code size on such machines. (The 18 below is the value for i386.) */ if (!optimize_size || (reg_class_size[GENERAL_REGS] > 18 && !loop_info->has_call)) { move_movables (loop, movables, threshold, insn_count); /* Recalculate regs->array if move_movables has created new registers. */ if (max_reg_num () > regs->num) { loop_regs_scan (loop, 0); for (update_start = loop_start; PREV_INSN (update_start) && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL; update_start = PREV_INSN (update_start)) ; update_end = NEXT_INSN (loop_end); reg_scan_update (update_start, update_end, loop_max_reg); loop_max_reg = max_reg_num (); } } /* Now candidates that still are negative are those not moved. Change regs->array[I].set_in_loop to indicate that those are not actually invariant. */ for (i = 0; i < regs->num; i++) if (regs->array[i].set_in_loop < 0) regs->array[i].set_in_loop = regs->array[i].n_times_set; /* Now that we've moved some things out of the loop, we might be able to hoist even more memory references. */ load_mems (loop); /* Recalculate regs->array if load_mems has created new registers. */ if (max_reg_num () > regs->num) loop_regs_scan (loop, 0); for (update_start = loop_start; PREV_INSN (update_start) && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL; update_start = PREV_INSN (update_start)) ; update_end = NEXT_INSN (loop_end); reg_scan_update (update_start, update_end, loop_max_reg); loop_max_reg = max_reg_num (); if (flag_strength_reduce) { if (update_end && GET_CODE (update_end) == CODE_LABEL) /* Ensure our label doesn't go away. */ LABEL_NUSES (update_end)++; strength_reduce (loop, flags); reg_scan_update (update_start, update_end, loop_max_reg); loop_max_reg = max_reg_num (); if (update_end && GET_CODE (update_end) == CODE_LABEL && --LABEL_NUSES (update_end) == 0) delete_related_insns (update_end); } /* The movable information is required for strength reduction. */ loop_movables_free (movables); free (regs->array); regs->array = 0; regs->num = 0; } /* Add elements to *OUTPUT to record all the pseudo-regs mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */ static void record_excess_regs (rtx in_this, rtx not_in_this, rtx *output) { enum rtx_code code; const char *fmt; int i; code = GET_CODE (in_this); switch (code) { case PC: case CC0: case CONST_INT: case CONST_DOUBLE: case CONST: case SYMBOL_REF: case LABEL_REF: return; case REG: if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER && ! reg_mentioned_p (in_this, not_in_this)) *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output); return; default: break; } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { int j; switch (fmt[i]) { case 'E': for (j = 0; j < XVECLEN (in_this, i); j++) record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output); break; case 'e': record_excess_regs (XEXP (in_this, i), not_in_this, output); break; } } } /* Check what regs are referred to in the libcall block ending with INSN, aside from those mentioned in the equivalent value. If there are none, return 0. If there are one or more, return an EXPR_LIST containing all of them. */ static rtx libcall_other_reg (rtx insn, rtx equiv) { rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX); rtx p = XEXP (note, 0); rtx output = 0; /* First, find all the regs used in the libcall block that are not mentioned as inputs to the result. */ while (p != insn) { if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN || GET_CODE (p) == CALL_INSN) record_excess_regs (PATTERN (p), equiv, &output); p = NEXT_INSN (p); } return output; } /* Return 1 if all uses of REG are between INSN and the end of the basic block. */ static int reg_in_basic_block_p (rtx insn, rtx reg) { int regno = REGNO (reg); rtx p; if (REGNO_FIRST_UID (regno) != INSN_UID (insn)) return 0; /* Search this basic block for the already recorded last use of the reg. */ for (p = insn; p; p = NEXT_INSN (p)) { switch (GET_CODE (p)) { case NOTE: break; case INSN: case CALL_INSN: /* Ordinary insn: if this is the last use, we win. */ if (REGNO_LAST_UID (regno) == INSN_UID (p)) return 1; break; case JUMP_INSN: /* Jump insn: if this is the last use, we win. */ if (REGNO_LAST_UID (regno) == INSN_UID (p)) return 1; /* Otherwise, it's the end of the basic block, so we lose. */ return 0; case CODE_LABEL: case BARRIER: /* It's the end of the basic block, so we lose. */ return 0; default: break; } } /* The "last use" that was recorded can't be found after the first use. This can happen when the last use was deleted while processing an inner loop, this inner loop was then completely unrolled, and the outer loop is always exited after the inner loop, so that everything after the first use becomes a single basic block. */ return 1; } /* Compute the benefit of eliminating the insns in the block whose last insn is LAST. This may be a group of insns used to compute a value directly or can contain a library call. */ static int libcall_benefit (rtx last) { rtx insn; int benefit = 0; for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0); insn != last; insn = NEXT_INSN (insn)) { if (GET_CODE (insn) == CALL_INSN) benefit += 10; /* Assume at least this many insns in a library routine. */ else if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) != USE && GET_CODE (PATTERN (insn)) != CLOBBER) benefit++; } return benefit; } /* Skip COUNT insns from INSN, counting library calls as 1 insn. */ static rtx skip_consec_insns (rtx insn, int count) { for (; count > 0; count--) { rtx temp; /* If first insn of libcall sequence, skip to end. */ /* Do this at start of loop, since INSN is guaranteed to be an insn here. */ if (GET_CODE (insn) != NOTE && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX))) insn = XEXP (temp, 0); do insn = NEXT_INSN (insn); while (GET_CODE (insn) == NOTE); } return insn; } /* Ignore any movable whose insn falls within a libcall which is part of another movable. We make use of the fact that the movable for the libcall value was made later and so appears later on the chain. */ static void ignore_some_movables (struct loop_movables *movables) { struct movable *m, *m1; for (m = movables->head; m; m = m->next) { /* Is this a movable for the value of a libcall? */ rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX); if (note) { rtx insn; /* Check for earlier movables inside that range, and mark them invalid. We cannot use LUIDs here because insns created by loop.c for prior loops don't have LUIDs. Rather than reject all such insns from movables, we just explicitly check each insn in the libcall (since invariant libcalls aren't that common). */ for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn)) for (m1 = movables->head; m1 != m; m1 = m1->next) if (m1->insn == insn) m1->done = 1; } } } /* For each movable insn, see if the reg that it loads leads when it dies right into another conditionally movable insn. If so, record that the second insn "forces" the first one, since the second can be moved only if the first is. */ static void force_movables (struct loop_movables *movables) { struct movable *m, *m1; for (m1 = movables->head; m1; m1 = m1->next) /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */ if (!m1->partial && !m1->done) { int regno = m1->regno; for (m = m1->next; m; m = m->next) /* ??? Could this be a bug? What if CSE caused the register of M1 to be used after this insn? Since CSE does not update regno_last_uid, this insn M->insn might not be where it dies. But very likely this doesn't matter; what matters is that M's reg is computed from M1's reg. */ if (INSN_UID (m->insn) == REGNO_LAST_UID (regno) && !m->done) break; if (m != 0 && m->set_src == m1->set_dest /* If m->consec, m->set_src isn't valid. */ && m->consec == 0) m = 0; /* Increase the priority of the moving the first insn since it permits the second to be moved as well. Likewise for insns already forced by the first insn. */ if (m != 0) { struct movable *m2; m->forces = m1; for (m2 = m1; m2; m2 = m2->forces) { m2->lifetime += m->lifetime; m2->savings += m->savings; } } } } /* Find invariant expressions that are equal and can be combined into one register. */ static void combine_movables (struct loop_movables *movables, struct loop_regs *regs) { struct movable *m; char *matched_regs = xmalloc (regs->num); enum machine_mode mode; /* Regs that are set more than once are not allowed to match or be matched. I'm no longer sure why not. */ /* Only pseudo registers are allowed to match or be matched, since move_movables does not validate the change. */ /* Perhaps testing m->consec_sets would be more appropriate here? */ for (m = movables->head; m; m = m->next) if (m->match == 0 && regs->array[m->regno].n_times_set == 1 && m->regno >= FIRST_PSEUDO_REGISTER && !m->insert_temp && !m->partial) { struct movable *m1; int regno = m->regno; memset (matched_regs, 0, regs->num); matched_regs[regno] = 1; /* We want later insns to match the first one. Don't make the first one match any later ones. So start this loop at m->next. */ for (m1 = m->next; m1; m1 = m1->next) if (m != m1 && m1->match == 0 && !m1->insert_temp && regs->array[m1->regno].n_times_set == 1 && m1->regno >= FIRST_PSEUDO_REGISTER /* A reg used outside the loop mustn't be eliminated. */ && !m1->global /* A reg used for zero-extending mustn't be eliminated. */ && !m1->partial && (matched_regs[m1->regno] || ( /* Can combine regs with different modes loaded from the same constant only if the modes are the same or if both are integer modes with M wider or the same width as M1. The check for integer is redundant, but safe, since the only case of differing destination modes with equal sources is when both sources are VOIDmode, i.e., CONST_INT. */ (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest) || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT && (GET_MODE_BITSIZE (GET_MODE (m->set_dest)) >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest))))) /* See if the source of M1 says it matches M. */ && ((REG_P (m1->set_src) && matched_regs[REGNO (m1->set_src)]) || rtx_equal_for_loop_p (m->set_src, m1->set_src, movables, regs)))) && ((m->dependencies == m1->dependencies) || rtx_equal_p (m->dependencies, m1->dependencies))) { m->lifetime += m1->lifetime; m->savings += m1->savings; m1->done = 1; m1->match = m; matched_regs[m1->regno] = 1; } } /* Now combine the regs used for zero-extension. This can be done for those not marked `global' provided their lives don't overlap. */ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode)) { struct movable *m0 = 0; /* Combine all the registers for extension from mode MODE. Don't combine any that are used outside this loop. */ for (m = movables->head; m; m = m->next) if (m->partial && ! m->global && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn))))) { struct movable *m1; int first = REGNO_FIRST_LUID (m->regno); int last = REGNO_LAST_LUID (m->regno); if (m0 == 0) { /* First one: don't check for overlap, just record it. */ m0 = m; continue; } /* Make sure they extend to the same mode. (Almost always true.) */ if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest)) continue; /* We already have one: check for overlap with those already combined together. */ for (m1 = movables->head; m1 != m; m1 = m1->next) if (m1 == m0 || (m1->partial && m1->match == m0)) if (! (REGNO_FIRST_LUID (m1->regno) > last || REGNO_LAST_LUID (m1->regno) < first)) goto overlap; /* No overlap: we can combine this with the others. */ m0->lifetime += m->lifetime; m0->savings += m->savings; m->done = 1; m->match = m0; overlap: ; } } /* Clean up. */ free (matched_regs); } /* Returns the number of movable instructions in LOOP that were not moved outside the loop. */ static int num_unmoved_movables (const struct loop *loop) { int num = 0; struct movable *m; for (m = LOOP_MOVABLES (loop)->head; m; m = m->next) if (!m->done) ++num; return num; } /* Return 1 if regs X and Y will become the same if moved. */ static int regs_match_p (rtx x, rtx y, struct loop_movables *movables) { unsigned int xn = REGNO (x); unsigned int yn = REGNO (y); struct movable *mx, *my; for (mx = movables->head; mx; mx = mx->next) if (mx->regno == xn) break; for (my = movables->head; my; my = my->next) if (my->regno == yn) break; return (mx && my && ((mx->match == my->match && mx->match != 0) || mx->match == my || mx == my->match)); } /* Return 1 if X and Y are identical-looking rtx's. This is the Lisp function EQUAL for rtx arguments. If two registers are matching movables or a movable register and an equivalent constant, consider them equal. */ static int rtx_equal_for_loop_p (rtx x, rtx y, struct loop_movables *movables, struct loop_regs *regs) { int i; int j; struct movable *m; enum rtx_code code; const char *fmt; if (x == y) return 1; if (x == 0 || y == 0) return 0; code = GET_CODE (x); /* If we have a register and a constant, they may sometimes be equal. */ if (REG_P (x) && regs->array[REGNO (x)].set_in_loop == -2 && CONSTANT_P (y)) { for (m = movables->head; m; m = m->next) if (m->move_insn && m->regno == REGNO (x) && rtx_equal_p (m->set_src, y)) return 1; } else if (REG_P (y) && regs->array[REGNO (y)].set_in_loop == -2 && CONSTANT_P (x)) { for (m = movables->head; m; m = m->next) if (m->move_insn && m->regno == REGNO (y) && rtx_equal_p (m->set_src, x)) return 1; } /* Otherwise, rtx's of different codes cannot be equal. */ if (code != GET_CODE (y)) return 0; /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. (REG:SI x) and (REG:HI x) are NOT equivalent. */ if (GET_MODE (x) != GET_MODE (y)) return 0; /* These three types of rtx's can be compared nonrecursively. */ if (code == REG) return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables)); if (code == LABEL_REF) return XEXP (x, 0) == XEXP (y, 0); if (code == SYMBOL_REF) return XSTR (x, 0) == XSTR (y, 0); /* Compare the elements. If any pair of corresponding elements fail to match, return 0 for the whole things. */ fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { switch (fmt[i]) { case 'w': if (XWINT (x, i) != XWINT (y, i)) return 0; break; case 'i': if (XINT (x, i) != XINT (y, i)) return 0; break; case 'E': /* Two vectors must have the same length. */ if (XVECLEN (x, i) != XVECLEN (y, i)) return 0; /* And the corresponding elements must match. */ for (j = 0; j < XVECLEN (x, i); j++) if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j), movables, regs) == 0) return 0; break; case 'e': if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs) == 0) return 0; break; case 's': if (strcmp (XSTR (x, i), XSTR (y, i))) return 0; break; case 'u': /* These are just backpointers, so they don't matter. */ break; case '0': break; /* It is believed that rtx's at this level will never contain anything but integers and other rtx's, except for within LABEL_REFs and SYMBOL_REFs. */ default: abort (); } } return 1; } /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL references is incremented once for each added note. */ static void add_label_notes_loop (rtx x, rtx insns) { enum rtx_code code = GET_CODE (x); int i, j; const char *fmt; rtx insn; if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x)) { /* This code used to ignore labels that referred to dispatch tables to avoid flow generating (slightly) worse code. We no longer ignore such label references (see LABEL_REF handling in mark_jump_label for additional information). */ for (insn = insns; insn; insn = NEXT_INSN (insn)) if (reg_mentioned_p (XEXP (x, 0), insn)) { REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0), REG_NOTES (insn)); if (LABEL_P (XEXP (x, 0))) LABEL_NUSES (XEXP (x, 0))++; } } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') add_label_notes_loop (XEXP (x, i), insns); else if (fmt[i] == 'E') for (j = XVECLEN (x, i) - 1; j >= 0; j--) add_label_notes_loop (XVECEXP (x, i, j), insns); } } /* Scan MOVABLES, and move the insns that deserve to be moved. If two matching movables are combined, replace one reg with the other throughout. */ static void move_movables (struct loop *loop, struct loop_movables *movables, int threshold, int insn_count) { struct loop_regs *regs = LOOP_REGS (loop); int nregs = regs->num; rtx new_start = 0; struct movable *m; rtx p; rtx loop_start = loop->start; rtx loop_end = loop->end; /* Map of pseudo-register replacements to handle combining when we move several insns that load the same value into different pseudo-registers. */ rtx *reg_map = xcalloc (nregs, sizeof (rtx)); char *already_moved = xcalloc (nregs, sizeof (char)); for (m = movables->head; m; m = m->next) { /* Describe this movable insn. */ if (loop_dump_stream) { fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ", INSN_UID (m->insn), m->regno, m->lifetime); if (m->consec > 0) fprintf (loop_dump_stream, "consec %d, ", m->consec); if (m->cond) fprintf (loop_dump_stream, "cond "); if (m->force) fprintf (loop_dump_stream, "force "); if (m->global) fprintf (loop_dump_stream, "global "); if (m->done) fprintf (loop_dump_stream, "done "); if (m->move_insn) fprintf (loop_dump_stream, "move-insn "); if (m->match) fprintf (loop_dump_stream, "matches %d ", INSN_UID (m->match->insn)); if (m->forces) fprintf (loop_dump_stream, "forces %d ", INSN_UID (m->forces->insn)); } /* Ignore the insn if it's already done (it matched something else). Otherwise, see if it is now safe to move. */ if (!m->done && (! m->cond || (1 == loop_invariant_p (loop, m->set_src) && (m->dependencies == 0 || 1 == loop_invariant_p (loop, m->dependencies)) && (m->consec == 0 || 1 == consec_sets_invariant_p (loop, m->set_dest, m->consec + 1, m->insn)))) && (! m->forces || m->forces->done)) { int regno; rtx p; int savings = m->savings; /* We have an insn that is safe to move. Compute its desirability. */ p = m->insn; regno = m->regno; if (loop_dump_stream) fprintf (loop_dump_stream, "savings %d ", savings); if (regs->array[regno].moved_once && loop_dump_stream) fprintf (loop_dump_stream, "halved since already moved "); /* An insn MUST be moved if we already moved something else which is safe only if this one is moved too: that is, if already_moved[REGNO] is nonzero. */ /* An insn is desirable to move if the new lifetime of the register is no more than THRESHOLD times the old lifetime. If it's not desirable, it means the loop is so big that moving won't speed things up much, and it is liable to make register usage worse. */ /* It is also desirable to move if it can be moved at no extra cost because something else was already moved. */ if (already_moved[regno] || flag_move_all_movables || (threshold * savings * m->lifetime) >= (regs->array[regno].moved_once ? insn_count * 2 : insn_count) || (m->forces && m->forces->done && regs->array[m->forces->regno].n_times_set == 1)) { int count; struct movable *m1; rtx first = NULL_RTX; rtx newreg = NULL_RTX; if (m->insert_temp) newreg = gen_reg_rtx (GET_MODE (m->set_dest)); /* Now move the insns that set the reg. */ if (m->partial && m->match) { rtx newpat, i1; rtx r1, r2; /* Find the end of this chain of matching regs. Thus, we load each reg in the chain from that one reg. And that reg is loaded with 0 directly, since it has ->match == 0. */ for (m1 = m; m1->match; m1 = m1->match); newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)), SET_DEST (PATTERN (m1->insn))); i1 = loop_insn_hoist (loop, newpat); /* Mark the moved, invariant reg as being allowed to share a hard reg with the other matching invariant. */ REG_NOTES (i1) = REG_NOTES (m->insn); r1 = SET_DEST (PATTERN (m->insn)); r2 = SET_DEST (PATTERN (m1->insn)); regs_may_share = gen_rtx_EXPR_LIST (VOIDmode, r1, gen_rtx_EXPR_LIST (VOIDmode, r2, regs_may_share)); delete_insn (m->insn); if (new_start == 0) new_start = i1; if (loop_dump_stream) fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1)); } /* If we are to re-generate the item being moved with a new move insn, first delete what we have and then emit the move insn before the loop. */ else if (m->move_insn) { rtx i1, temp, seq; for (count = m->consec; count >= 0; count--) { /* If this is the first insn of a library call sequence, something is very wrong. */ if (GET_CODE (p) != NOTE && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX))) abort (); /* If this is the last insn of a libcall sequence, then delete every insn in the sequence except the last. The last insn is handled in the normal manner. */ if (GET_CODE (p) != NOTE && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX))) { temp = XEXP (temp, 0); while (temp != p) temp = delete_insn (temp); } temp = p; p = delete_insn (p); /* simplify_giv_expr expects that it can walk the insns at m->insn forwards and see this old sequence we are tossing here. delete_insn does preserve the next pointers, but when we skip over a NOTE we must fix it up. Otherwise that code walks into the non-deleted insn stream. */ while (p && GET_CODE (p) == NOTE) p = NEXT_INSN (temp) = NEXT_INSN (p); if (m->insert_temp) { /* Replace the original insn with a move from our newly created temp. */ start_sequence (); emit_move_insn (m->set_dest, newreg); seq = get_insns (); end_sequence (); emit_insn_before (seq, p); } } start_sequence (); emit_move_insn (m->insert_temp ? newreg : m->set_dest, m->set_src); seq = get_insns (); end_sequence (); add_label_notes_loop (m->set_src, seq); i1 = loop_insn_hoist (loop, seq); if (! find_reg_note (i1, REG_EQUAL, NULL_RTX)) set_unique_reg_note (i1, m->is_equiv ? REG_EQUIV : REG_EQUAL, m->set_src); if (loop_dump_stream) fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1)); /* The more regs we move, the less we like moving them. */ threshold -= 3; } else { for (count = m->consec; count >= 0; count--) { rtx i1, temp; /* If first insn of libcall sequence, skip to end. */ /* Do this at start of loop, since p is guaranteed to be an insn here. */ if (GET_CODE (p) != NOTE && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX))) p = XEXP (temp, 0); /* If last insn of libcall sequence, move all insns except the last before the loop. The last insn is handled in the normal manner. */ if (GET_CODE (p) != NOTE && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX))) { rtx fn_address = 0; rtx fn_reg = 0; rtx fn_address_insn = 0; first = 0; for (temp = XEXP (temp, 0); temp != p; temp = NEXT_INSN (temp)) { rtx body; rtx n; rtx next; if (GET_CODE (temp) == NOTE) continue; body = PATTERN (temp); /* Find the next insn after TEMP, not counting USE or NOTE insns. */ for (next = NEXT_INSN (temp); next != p; next = NEXT_INSN (next)) if (! (GET_CODE (next) == INSN && GET_CODE (PATTERN (next)) == USE) && GET_CODE (next) != NOTE) break; /* If that is the call, this may be the insn that loads the function address. Extract the function address from the insn that loads it into a register. If this insn was cse'd, we get incorrect code. So emit a new move insn that copies the function address into the register that the call insn will use. flow.c will delete any redundant stores that we have created. */ if (GET_CODE (next) == CALL_INSN && GET_CODE (body) == SET && REG_P (SET_DEST (body)) && (n = find_reg_note (temp, REG_EQUAL, NULL_RTX))) { fn_reg = SET_SRC (body); if (!REG_P (fn_reg)) fn_reg = SET_DEST (body); fn_address = XEXP (n, 0); fn_address_insn = temp; } /* We have the call insn. If it uses the register we suspect it might, load it with the correct address directly. */ if (GET_CODE (temp) == CALL_INSN && fn_address != 0 && reg_referenced_p (fn_reg, body)) loop_insn_emit_after (loop, 0, fn_address_insn, gen_move_insn (fn_reg, fn_address)); if (GET_CODE (temp) == CALL_INSN) { i1 = loop_call_insn_hoist (loop, body); /* Because the USAGE information potentially contains objects other than hard registers we need to copy it. */ if (CALL_INSN_FUNCTION_USAGE (temp)) CALL_INSN_FUNCTION_USAGE (i1) = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp)); } else i1 = loop_insn_hoist (loop, body); if (first == 0) first = i1; if (temp == fn_address_insn) fn_address_insn = i1; REG_NOTES (i1) = REG_NOTES (temp); REG_NOTES (temp) = NULL; delete_insn (temp); } if (new_start == 0) new_start = first; } if (m->savemode != VOIDmode) { /* P sets REG to zero; but we should clear only the bits that are not covered by the mode m->savemode. */ rtx reg = m->set_dest; rtx sequence; rtx tem; start_sequence (); tem = expand_simple_binop (GET_MODE (reg), AND, reg, GEN_INT ((((HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (m->savemode))) - 1), reg, 1, OPTAB_LIB_WIDEN); if (tem == 0) abort (); if (tem != reg) emit_move_insn (reg, tem); sequence = get_insns (); end_sequence (); i1 = loop_insn_hoist (loop, sequence); } else if (GET_CODE (p) == CALL_INSN) { i1 = loop_call_insn_hoist (loop, PATTERN (p)); /* Because the USAGE information potentially contains objects other than hard registers we need to copy it. */ if (CALL_INSN_FUNCTION_USAGE (p)) CALL_INSN_FUNCTION_USAGE (i1) = copy_rtx (CALL_INSN_FUNCTION_USAGE (p)); } else if (count == m->consec && m->move_insn_first) { rtx seq; /* The SET_SRC might not be invariant, so we must use the REG_EQUAL note. */ start_sequence (); emit_move_insn (m->insert_temp ? newreg : m->set_dest, m->set_src); seq = get_insns (); end_sequence (); add_label_notes_loop (m->set_src, seq); i1 = loop_insn_hoist (loop, seq); if (! find_reg_note (i1, REG_EQUAL, NULL_RTX)) set_unique_reg_note (i1, m->is_equiv ? REG_EQUIV : REG_EQUAL, m->set_src); } else if (m->insert_temp) { rtx *reg_map2 = xcalloc (REGNO (newreg), sizeof(rtx)); reg_map2 [m->regno] = newreg; i1 = loop_insn_hoist (loop, copy_rtx (PATTERN (p))); replace_regs (i1, reg_map2, REGNO (newreg), 1); free (reg_map2); } else i1 = loop_insn_hoist (loop, PATTERN (p)); if (REG_NOTES (i1) == 0) { REG_NOTES (i1) = REG_NOTES (p); REG_NOTES (p) = NULL; /* If there is a REG_EQUAL note present whose value is not loop invariant, then delete it, since it may cause problems with later optimization passes. It is possible for cse to create such notes like this as a result of record_jump_cond. */ if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX)) && ! loop_invariant_p (loop, XEXP (temp, 0))) remove_note (i1, temp); } if (new_start == 0) new_start = i1; if (loop_dump_stream) fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1)); /* If library call, now fix the REG_NOTES that contain insn pointers, namely REG_LIBCALL on FIRST and REG_RETVAL on I1. */ if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX))) { XEXP (temp, 0) = first; temp = find_reg_note (first, REG_LIBCALL, NULL_RTX); XEXP (temp, 0) = i1; } temp = p; delete_insn (p); p = NEXT_INSN (p); /* simplify_giv_expr expects that it can walk the insns at m->insn forwards and see this old sequence we are tossing here. delete_insn does preserve the next pointers, but when we skip over a NOTE we must fix it up. Otherwise that code walks into the non-deleted insn stream. */ while (p && GET_CODE (p) == NOTE) p = NEXT_INSN (temp) = NEXT_INSN (p); if (m->insert_temp) { rtx seq; /* Replace the original insn with a move from our newly created temp. */ start_sequence (); emit_move_insn (m->set_dest, newreg); seq = get_insns (); end_sequence (); emit_insn_before (seq, p); } } /* The more regs we move, the less we like moving them. */ threshold -= 3; } m->done = 1; if (!m->insert_temp) { /* Any other movable that loads the same register MUST be moved. */ already_moved[regno] = 1; /* This reg has been moved out of one loop. */ regs->array[regno].moved_once = 1; /* The reg set here is now invariant. */ if (! m->partial) { int i; for (i = 0; i < LOOP_REGNO_NREGS (regno, m->set_dest); i++) regs->array[regno+i].set_in_loop = 0; } /* Change the length-of-life info for the register to say it lives at least the full length of this loop. This will help guide optimizations in outer loops. */ if (REGNO_FIRST_LUID (regno) > LOOP_INSN_LUID (loop_start)) /* This is the old insn before all the moved insns. We can't use the moved insn because it is out of range in uid_luid. Only the old insns have luids. */ REGNO_FIRST_UID (regno) = INSN_UID (loop_start); if (REGNO_LAST_LUID (regno) < LOOP_INSN_LUID (loop_end)) REGNO_LAST_UID (regno) = INSN_UID (loop_end); } /* Combine with this moved insn any other matching movables. */ if (! m->partial) for (m1 = movables->head; m1; m1 = m1->next) if (m1->match == m) { rtx temp; /* Schedule the reg loaded by M1 for replacement so that shares the reg of M. If the modes differ (only possible in restricted circumstances, make a SUBREG. Note this assumes that the target dependent files treat REG and SUBREG equally, including within GO_IF_LEGITIMATE_ADDRESS and in all the predicates since we never verify that replacing the original register with a SUBREG results in a recognizable insn. */ if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)) reg_map[m1->regno] = m->set_dest; else reg_map[m1->regno] = gen_lowpart_common (GET_MODE (m1->set_dest), m->set_dest); /* Get rid of the matching insn and prevent further processing of it. */ m1->done = 1; /* If library call, delete all insns. */ if ((temp = find_reg_note (m1->insn, REG_RETVAL, NULL_RTX))) delete_insn_chain (XEXP (temp, 0), m1->insn); else delete_insn (m1->insn); /* Any other movable that loads the same register MUST be moved. */ already_moved[m1->regno] = 1; /* The reg merged here is now invariant, if the reg it matches is invariant. */ if (! m->partial) { int i; for (i = 0; i < LOOP_REGNO_NREGS (regno, m1->set_dest); i++) regs->array[m1->regno+i].set_in_loop = 0; } } } else if (loop_dump_stream) fprintf (loop_dump_stream, "not desirable"); } else if (loop_dump_stream && !m->match) fprintf (loop_dump_stream, "not safe"); if (loop_dump_stream) fprintf (loop_dump_stream, "\n"); } if (new_start == 0) new_start = loop_start; /* Go through all the instructions in the loop, making all the register substitutions scheduled in REG_MAP. */ for (p = new_start; p != loop_end; p = NEXT_INSN (p)) if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN || GET_CODE (p) == CALL_INSN) { replace_regs (PATTERN (p), reg_map, nregs, 0); replace_regs (REG_NOTES (p), reg_map, nregs, 0); INSN_CODE (p) = -1; } /* Clean up. */ free (reg_map); free (already_moved); } static void loop_movables_add (struct loop_movables *movables, struct movable *m) { if (movables->head == 0) movables->head = m; else movables->last->next = m; movables->last = m; } static void loop_movables_free (struct loop_movables *movables) { struct movable *m; struct movable *m_next; for (m = movables->head; m; m = m_next) { m_next = m->next; free (m); } } #if 0 /* Scan X and replace the address of any MEM in it with ADDR. REG is the address that MEM should have before the replacement. */ static void replace_call_address (rtx x, rtx reg, rtx addr) { enum rtx_code code; int i; const char *fmt; if (x == 0) return; code = GET_CODE (x); switch (code) { case PC: case CC0: case CONST_INT: case CONST_DOUBLE: case CONST: case SYMBOL_REF: case LABEL_REF: case REG: return; case SET: /* Short cut for very common case. */ replace_call_address (XEXP (x, 1), reg, addr); return; case CALL: /* Short cut for very common case. */ replace_call_address (XEXP (x, 0), reg, addr); return; case MEM: /* If this MEM uses a reg other than the one we expected, something is wrong. */ if (XEXP (x, 0) != reg) abort (); XEXP (x, 0) = addr; return; default: break; } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') replace_call_address (XEXP (x, i), reg, addr); else if (fmt[i] == 'E') { int j; for (j = 0; j < XVECLEN (x, i); j++) replace_call_address (XVECEXP (x, i, j), reg, addr); } } } #endif /* Return the number of memory refs to addresses that vary in the rtx X. */ static int count_nonfixed_reads (const struct loop *loop, rtx x) { enum rtx_code code; int i; const char *fmt; int value; if (x == 0) return 0; code = GET_CODE (x); switch (code) { case PC: case CC0: case CONST_INT: case CONST_DOUBLE: case CONST: case SYMBOL_REF: case LABEL_REF: case REG: return 0; case MEM: return ((loop_invariant_p (loop, XEXP (x, 0)) != 1) + count_nonfixed_reads (loop, XEXP (x, 0))); default: break; } value = 0; fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') value += count_nonfixed_reads (loop, XEXP (x, i)); if (fmt[i] == 'E') { int j; for (j = 0; j < XVECLEN (x, i); j++) value += count_nonfixed_reads (loop, XVECEXP (x, i, j)); } } return value; } /* Scan a loop setting the elements `cont', `vtop', `loops_enclosed', `has_call', `has_nonconst_call', `has_volatile', `has_tablejump', `unknown_address_altered', `unknown_constant_address_altered', and `num_mem_sets' in LOOP. Also, fill in the array `mems' and the list `store_mems' in LOOP. */ static void prescan_loop (struct loop *loop) { int level = 1; rtx insn; struct loop_info *loop_info = LOOP_INFO (loop); rtx start = loop->start; rtx end = loop->end; /* The label after END. Jumping here is just like falling off the end of the loop. We use next_nonnote_insn instead of next_label as a hedge against the (pathological) case where some actual insn might end up between the two. */ rtx exit_target = next_nonnote_insn (end); loop_info->has_indirect_jump = indirect_jump_in_function; loop_info->pre_header_has_call = 0; loop_info->has_call = 0; loop_info->has_nonconst_call = 0; loop_info->has_prefetch = 0; loop_info->has_volatile = 0; loop_info->has_tablejump = 0; loop_info->has_multiple_exit_targets = 0; loop->level = 1; loop_info->unknown_address_altered = 0; loop_info->unknown_constant_address_altered = 0; loop_info->store_mems = NULL_RTX; loop_info->first_loop_store_insn = NULL_RTX; loop_info->mems_idx = 0; loop_info->num_mem_sets = 0; /* If loop opts run twice, this was set on 1st pass for 2nd. */ loop_info->preconditioned = NOTE_PRECONDITIONED (end); for (insn = start; insn && GET_CODE (insn) != CODE_LABEL; insn = PREV_INSN (insn)) { if (GET_CODE (insn) == CALL_INSN) { loop_info->pre_header_has_call = 1; break; } } for (insn = NEXT_INSN (start); insn != NEXT_INSN (end); insn = NEXT_INSN (insn)) { switch (GET_CODE (insn)) { case NOTE: if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG) { ++level; /* Count number of loops contained in this one. */ loop->level++; } else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END) --level; break; case CALL_INSN: if (! CONST_OR_PURE_CALL_P (insn)) { loop_info->unknown_address_altered = 1; loop_info->has_nonconst_call = 1; } else if (pure_call_p (insn)) loop_info->has_nonconst_call = 1; loop_info->has_call = 1; if (can_throw_internal (insn)) loop_info->has_multiple_exit_targets = 1; /* Calls initializing constant objects have CLOBBER of MEM /u in the attached FUNCTION_USAGE expression list, not accounted for by the code above. We should note these to avoid missing dependencies in later references. */ { rtx fusage_entry; for (fusage_entry = CALL_INSN_FUNCTION_USAGE (insn); fusage_entry; fusage_entry = XEXP (fusage_entry, 1)) { rtx fusage = XEXP (fusage_entry, 0); if (GET_CODE (fusage) == CLOBBER && MEM_P (XEXP (fusage, 0)) && RTX_UNCHANGING_P (XEXP (fusage, 0))) { note_stores (fusage, note_addr_stored, loop_info); if (! loop_info->first_loop_store_insn && loop_info->store_mems) loop_info->first_loop_store_insn = insn; } } } break; case JUMP_INSN: if (! loop_info->has_multiple_exit_targets) { rtx set = pc_set (insn); if (set) { rtx src = SET_SRC (set); rtx label1, label2; if (GET_CODE (src) == IF_THEN_ELSE) { label1 = XEXP (src, 1); label2 = XEXP (src, 2); } else { label1 = src; label2 = NULL_RTX; } do { if (label1 && label1 != pc_rtx) { if (GET_CODE (label1) != LABEL_REF) { /* Something tricky. */ loop_info->has_multiple_exit_targets = 1; break; } else if (XEXP (label1, 0) != exit_target && LABEL_OUTSIDE_LOOP_P (label1)) { /* A jump outside the current loop. */ loop_info->has_multiple_exit_targets = 1; break; } } label1 = label2; label2 = NULL_RTX; } while (label1); } else { /* A return, or something tricky. */ loop_info->has_multiple_exit_targets = 1; } } /* Fall through. */ case INSN: if (volatile_refs_p (PATTERN (insn))) loop_info->has_volatile = 1; if (GET_CODE (insn) == JUMP_INSN && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC || GET_CODE (PATTERN (insn)) == ADDR_VEC)) loop_info->has_tablejump = 1; note_stores (PATTERN (insn), note_addr_stored, loop_info); if (! loop_info->first_loop_store_insn && loop_info->store_mems) loop_info->first_loop_store_insn = insn; if (flag_non_call_exceptions && can_throw_internal (insn)) loop_info->has_multiple_exit_targets = 1; break; default: break; } } /* Now, rescan the loop, setting up the LOOP_MEMS array. */ if (/* An exception thrown by a called function might land us anywhere. */ ! loop_info->has_nonconst_call /* We don't want loads for MEMs moved to a location before the one at which their stack memory becomes allocated. (Note that this is not a problem for malloc, etc., since those require actual function calls. */ && ! current_function_calls_alloca /* There are ways to leave the loop other than falling off the end. */ && ! loop_info->has_multiple_exit_targets) for (insn = NEXT_INSN (start); insn != NEXT_INSN (end); insn = NEXT_INSN (insn)) for_each_rtx (&insn, insert_loop_mem, loop_info); /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so that loop_invariant_p and load_mems can use true_dependence to determine what is really clobbered. */ if (loop_info->unknown_address_altered) { rtx mem = gen_rtx_MEM (BLKmode, const0_rtx); loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems); } if (loop_info->unknown_constant_address_altered) { rtx mem = gen_rtx_MEM (BLKmode, const0_rtx); RTX_UNCHANGING_P (mem) = 1; loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems); } } /* Invalidate all loops containing LABEL. */ static void invalidate_loops_containing_label (rtx label) { struct loop *loop; for (loop = uid_loop[INSN_UID (label)]; loop; loop = loop->outer) loop->invalid = 1; } /* Scan the function looking for loops. Record the start and end of each loop. Also mark as invalid loops any loops that contain a setjmp or are branched to from outside the loop. */ static void find_and_verify_loops (rtx f, struct loops *loops) { rtx insn; rtx label; int num_loops; struct loop *current_loop; struct loop *next_loop; struct loop *loop; num_loops = loops->num; compute_luids (f, NULL_RTX, 0); /* If there are jumps to undefined labels, treat them as jumps out of any/all loops. This also avoids writing past end of tables when there are no loops. */ uid_loop[0] = NULL; /* Find boundaries of loops, mark which loops are contained within loops, and invalidate loops that have setjmp. */ num_loops = 0; current_loop = NULL; for (insn = f; insn; insn = NEXT_INSN (insn)) { if (GET_CODE (insn) == NOTE) switch (NOTE_LINE_NUMBER (insn)) { case NOTE_INSN_LOOP_BEG: next_loop = loops->array + num_loops; next_loop->num = num_loops; num_loops++; next_loop->start = insn; next_loop->outer = current_loop; current_loop = next_loop; break; case NOTE_INSN_LOOP_CONT: current_loop->cont = insn; break; case NOTE_INSN_LOOP_VTOP: current_loop->vtop = insn; break; case NOTE_INSN_LOOP_END: if (! current_loop) abort (); current_loop->end = insn; current_loop = current_loop->outer; break; default: break; } if (GET_CODE (insn) == CALL_INSN && find_reg_note (insn, REG_SETJMP, NULL)) { /* In this case, we must invalidate our current loop and any enclosing loop. */ for (loop = current_loop; loop; loop = loop->outer) { loop->invalid = 1; if (loop_dump_stream) fprintf (loop_dump_stream, "\nLoop at %d ignored due to setjmp.\n", INSN_UID (loop->start)); } } /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the enclosing loop, but this doesn't matter. */ uid_loop[INSN_UID (insn)] = current_loop; } /* Any loop containing a label used in an initializer must be invalidated, because it can be jumped into from anywhere. */ for (label = forced_labels; label; label = XEXP (label, 1)) invalidate_loops_containing_label (XEXP (label, 0)); /* Any loop containing a label used for an exception handler must be invalidated, because it can be jumped into from anywhere. */ for_each_eh_label (invalidate_loops_containing_label); /* Now scan all insn's in the function. If any JUMP_INSN branches into a loop that it is not contained within, that loop is marked invalid. If any INSN or CALL_INSN uses a label's address, then the loop containing that label is marked invalid, because it could be jumped into from anywhere. Also look for blocks of code ending in an unconditional branch that exits the loop. If such a block is surrounded by a conditional branch around the block, move the block elsewhere (see below) and invert the jump to point to the code block. This may eliminate a label in our loop and will simplify processing by both us and a possible second cse pass. */ for (insn = f; insn; insn = NEXT_INSN (insn)) if (INSN_P (insn)) { struct loop *this_loop = uid_loop[INSN_UID (insn)]; if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN) { rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX); if (note) invalidate_loops_containing_label (XEXP (note, 0)); } if (GET_CODE (insn) != JUMP_INSN) continue; mark_loop_jump (PATTERN (insn), this_loop); /* See if this is an unconditional branch outside the loop. */ if (this_loop && (GET_CODE (PATTERN (insn)) == RETURN || (any_uncondjump_p (insn) && onlyjump_p (insn) && (uid_loop[INSN_UID (JUMP_LABEL (insn))] != this_loop))) && get_max_uid () < max_uid_for_loop) { rtx p; rtx our_next = next_real_insn (insn); rtx last_insn_to_move = NEXT_INSN (insn); struct loop *dest_loop; struct loop *outer_loop = NULL; /* Go backwards until we reach the start of the loop, a label, or a JUMP_INSN. */ for (p = PREV_INSN (insn); GET_CODE (p) != CODE_LABEL && ! (GET_CODE (p) == NOTE && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG) && GET_CODE (p) != JUMP_INSN; p = PREV_INSN (p)) ; /* Check for the case where we have a jump to an inner nested loop, and do not perform the optimization in that case. */ if (JUMP_LABEL (insn)) { dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))]; if (dest_loop) { for (outer_loop = dest_loop; outer_loop; outer_loop = outer_loop->outer) if (outer_loop == this_loop) break; } } /* Make sure that the target of P is within the current loop. */ if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop) outer_loop = this_loop; /* If we stopped on a JUMP_INSN to the next insn after INSN, we have a block of code to try to move. We look backward and then forward from the target of INSN to find a BARRIER at the same loop depth as the target. If we find such a BARRIER, we make a new label for the start of the block, invert the jump in P and point it to that label, and move the block of code to the spot we found. */ if (! outer_loop && GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) != 0 /* Just ignore jumps to labels that were never emitted. These always indicate compilation errors. */ && INSN_UID (JUMP_LABEL (p)) != 0 && any_condjump_p (p) && onlyjump_p (p) && next_real_insn (JUMP_LABEL (p)) == our_next /* If it's not safe to move the sequence, then we mustn't try. */ && insns_safe_to_move_p (p, NEXT_INSN (insn), &last_insn_to_move)) { rtx target = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn (); struct loop *target_loop = uid_loop[INSN_UID (target)]; rtx loc, loc2; rtx tmp; /* Search for possible garbage past the conditional jumps and look for the last barrier. */ for (tmp = last_insn_to_move; tmp && GET_CODE (tmp) != CODE_LABEL; tmp = NEXT_INSN (tmp)) if (GET_CODE (tmp) == BARRIER) last_insn_to_move = tmp; for (loc = target; loc; loc = PREV_INSN (loc)) if (GET_CODE (loc) == BARRIER /* Don't move things inside a tablejump. */ && ((loc2 = next_nonnote_insn (loc)) == 0 || GET_CODE (loc2) != CODE_LABEL || (loc2 = next_nonnote_insn (loc2)) == 0 || GET_CODE (loc2) != JUMP_INSN || (GET_CODE (PATTERN (loc2)) != ADDR_VEC && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC)) && uid_loop[INSN_UID (loc)] == target_loop) break; if (loc == 0) for (loc = target; loc; loc = NEXT_INSN (loc)) if (GET_CODE (loc) == BARRIER /* Don't move things inside a tablejump. */ && ((loc2 = next_nonnote_insn (loc)) == 0 || GET_CODE (loc2) != CODE_LABEL || (loc2 = next_nonnote_insn (loc2)) == 0 || GET_CODE (loc2) != JUMP_INSN || (GET_CODE (PATTERN (loc2)) != ADDR_VEC && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC)) && uid_loop[INSN_UID (loc)] == target_loop) break; if (loc) { rtx cond_label = JUMP_LABEL (p); rtx new_label = get_label_after (p); /* Ensure our label doesn't go away. */ LABEL_NUSES (cond_label)++; /* Verify that uid_loop is large enough and that we can invert P. */ if (invert_jump (p, new_label, 1)) { rtx q, r; /* If no suitable BARRIER was found, create a suitable one before TARGET. Since TARGET is a fall through path, we'll need to insert a jump around our block and add a BARRIER before TARGET. This creates an extra unconditional jump outside the loop. However, the benefits of removing rarely executed instructions from inside the loop usually outweighs the cost of the extra unconditional jump outside the loop. */ if (loc == 0) { rtx temp; temp = gen_jump (JUMP_LABEL (insn)); temp = emit_jump_insn_before (temp, target); JUMP_LABEL (temp) = JUMP_LABEL (insn); LABEL_NUSES (JUMP_LABEL (insn))++; loc = emit_barrier_before (target); } /* Include the BARRIER after INSN and copy the block after LOC. */ if (squeeze_notes (&new_label, &last_insn_to_move)) abort (); reorder_insns (new_label, last_insn_to_move, loc); /* All those insns are now in TARGET_LOOP. */ for (q = new_label; q != NEXT_INSN (last_insn_to_move); q = NEXT_INSN (q)) uid_loop[INSN_UID (q)] = target_loop; /* The label jumped to by INSN is no longer a loop exit. Unless INSN does not have a label (e.g., it is a RETURN insn), search loop->exit_labels to find its label_ref, and remove it. Also turn off LABEL_OUTSIDE_LOOP_P bit. */ if (JUMP_LABEL (insn)) { for (q = 0, r = this_loop->exit_labels; r; q = r, r = LABEL_NEXTREF (r)) if (XEXP (r, 0) == JUMP_LABEL (insn)) { LABEL_OUTSIDE_LOOP_P (r) = 0; if (q) LABEL_NEXTREF (q) = LABEL_NEXTREF (r); else this_loop->exit_labels = LABEL_NEXTREF (r); break; } for (loop = this_loop; loop && loop != target_loop; loop = loop->outer) loop->exit_count--; /* If we didn't find it, then something is wrong. */ if (! r) abort (); } /* P is now a jump outside the loop, so it must be put in loop->exit_labels, and marked as such. The easiest way to do this is to just call mark_loop_jump again for P. */ mark_loop_jump (PATTERN (p), this_loop); /* If INSN now jumps to the insn after it, delete INSN. */ if (JUMP_LABEL (insn) != 0 && (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))) delete_related_insns (insn); } /* Continue the loop after where the conditional branch used to jump, since the only branch insn in the block (if it still remains) is an inter-loop branch and hence needs no processing. */ insn = NEXT_INSN (cond_label); if (--LABEL_NUSES (cond_label) == 0) delete_related_insns (cond_label); /* This loop will be continued with NEXT_INSN (insn). */ insn = PREV_INSN (insn); } } } } } /* If any label in X jumps to a loop different from LOOP_NUM and any of the loops it is contained in, mark the target loop invalid. For speed, we assume that X is part of a pattern of a JUMP_INSN. */ static void mark_loop_jump (rtx x, struct loop *loop) { struct loop *dest_loop; struct loop *outer_loop; int i; switch (GET_CODE (x)) { case PC: case USE: case CLOBBER: case REG: case MEM: case CONST_INT: case CONST_DOUBLE: case RETURN: return; case CONST: /* There could be a label reference in here. */ mark_loop_jump (XEXP (x, 0), loop); return; case PLUS: case MINUS: case MULT: mark_loop_jump (XEXP (x, 0), loop); mark_loop_jump (XEXP (x, 1), loop); return; case LO_SUM: /* This may refer to a LABEL_REF or SYMBOL_REF. */ mark_loop_jump (XEXP (x, 1), loop); return; case SIGN_EXTEND: case ZERO_EXTEND: mark_loop_jump (XEXP (x, 0), loop); return; case LABEL_REF: dest_loop = uid_loop[INSN_UID (XEXP (x, 0))]; /* Link together all labels that branch outside the loop. This is used by final_[bg]iv_value and the loop unrolling code. Also mark this LABEL_REF so we know that this branch should predict false. */ /* A check to make sure the label is not in an inner nested loop, since this does not count as a loop exit. */ if (dest_loop) { for (outer_loop = dest_loop; outer_loop; outer_loop = outer_loop->outer) if (outer_loop == loop) break; } else outer_loop = NULL; if (loop && ! outer_loop) { LABEL_OUTSIDE_LOOP_P (x) = 1; LABEL_NEXTREF (x) = loop->exit_labels; loop->exit_labels = x; for (outer_loop = loop; outer_loop && outer_loop != dest_loop; outer_loop = outer_loop->outer) outer_loop->exit_count++; } /* If this is inside a loop, but not in the current loop or one enclosed by it, it invalidates at least one loop. */ if (! dest_loop) return; /* We must invalidate every nested loop containing the target of this label, except those that also contain the jump insn. */ for (; dest_loop; dest_loop = dest_loop->outer) { /* Stop when we reach a loop that also contains the jump insn. */ for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer) if (dest_loop == outer_loop) return; /* If we get here, we know we need to invalidate a loop. */ if (loop_dump_stream && ! dest_loop->invalid) fprintf (loop_dump_stream, "\nLoop at %d ignored due to multiple entry points.\n", INSN_UID (dest_loop->start)); dest_loop->invalid = 1; } return; case SET: /* If this is not setting pc, ignore. */ if (SET_DEST (x) == pc_rtx) mark_loop_jump (SET_SRC (x), loop); return; case IF_THEN_ELSE: mark_loop_jump (XEXP (x, 1), loop); mark_loop_jump (XEXP (x, 2), loop); return; case PARALLEL: case ADDR_VEC: for (i = 0; i < XVECLEN (x, 0); i++) mark_loop_jump (XVECEXP (x, 0, i), loop); return; case ADDR_DIFF_VEC: for (i = 0; i < XVECLEN (x, 1); i++) mark_loop_jump (XVECEXP (x, 1, i), loop); return; default: /* Strictly speaking this is not a jump into the loop, only a possible jump out of the loop. However, we have no way to link the destination of this jump onto the list of exit labels. To be safe we mark this loop and any containing loops as invalid. */ if (loop) { for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer) { if (loop_dump_stream && ! outer_loop->invalid) fprintf (loop_dump_stream, "\nLoop at %d ignored due to unknown exit jump.\n", INSN_UID (outer_loop->start)); outer_loop->invalid = 1; } } return; } } /* Return nonzero if there is a label in the range from insn INSN to and including the insn whose luid is END INSN must have an assigned luid (i.e., it must not have been previously created by loop.c). */ static int labels_in_range_p (rtx insn, int end) { while (insn && LOOP_INSN_LUID (insn) <= end) { if (GET_CODE (insn) == CODE_LABEL) return 1; insn = NEXT_INSN (insn); } return 0; } /* Record that a memory reference X is being set. */ static void note_addr_stored (rtx x, rtx y ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED) { struct loop_info *loop_info = data; if (x == 0 || !MEM_P (x)) return; /* Count number of memory writes. This affects heuristics in strength_reduce. */ loop_info->num_mem_sets++; /* BLKmode MEM means all memory is clobbered. */ if (GET_MODE (x) == BLKmode) { if (RTX_UNCHANGING_P (x)) loop_info->unknown_constant_address_altered = 1; else loop_info->unknown_address_altered = 1; return; } loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x, loop_info->store_mems); } /* X is a value modified by an INSN that references a biv inside a loop exit test (ie, X is somehow related to the value of the biv). If X is a pseudo that is used more than once, then the biv is (effectively) used more than once. DATA is a pointer to a loop_regs structure. */ static void note_set_pseudo_multiple_uses (rtx x, rtx y ATTRIBUTE_UNUSED, void *data) { struct loop_regs *regs = (struct loop_regs *) data; if (x == 0) return; while (GET_CODE (x) == STRICT_LOW_PART || GET_CODE (x) == SIGN_EXTRACT || GET_CODE (x) == ZERO_EXTRACT || GET_CODE (x) == SUBREG) x = XEXP (x, 0); if (!REG_P (x) || REGNO (x) < FIRST_PSEUDO_REGISTER) return; /* If we do not have usage information, or if we know the register is used more than once, note that fact for check_dbra_loop. */ if (REGNO (x) >= max_reg_before_loop || ! regs->array[REGNO (x)].single_usage || regs->array[REGNO (x)].single_usage == const0_rtx) regs->multiple_uses = 1; } /* Return nonzero if the rtx X is invariant over the current loop. The value is 2 if we refer to something only conditionally invariant. A memory ref is invariant if it is not volatile and does not conflict with anything stored in `loop_info->store_mems'. */ int loop_invariant_p (const struct loop *loop, rtx x) { struct loop_info *loop_info = LOOP_INFO (loop); struct loop_regs *regs = LOOP_REGS (loop); int i; enum rtx_code code; const char *fmt; int conditional = 0; rtx mem_list_entry; if (x == 0) return 1; code = GET_CODE (x); switch (code) { case CONST_INT: case CONST_DOUBLE: case SYMBOL_REF: case CONST: return 1; case LABEL_REF: /* A LABEL_REF is normally invariant, however, if we are unrolling loops, and this label is inside the loop, then it isn't invariant. This is because each unrolled copy of the loop body will have a copy of this label. If this was invariant, then an insn loading the address of this label into a register might get moved outside the loop, and then each loop body would end up using the same label. We don't know the loop bounds here though, so just fail for all labels. */ if (flag_old_unroll_loops) return 0; else return 1; case PC: case CC0: case UNSPEC_VOLATILE: return 0; case REG: /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid since the reg might be set by initialization within the loop. */ if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx || x == arg_pointer_rtx || x == pic_offset_table_rtx) && ! current_function_has_nonlocal_goto) return 1; if (LOOP_INFO (loop)->has_call && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)]) return 0; /* Out-of-range regs can occur when we are called from unrolling. These registers created by the unroller are set in the loop, hence are never invariant. Other out-of-range regs can be generated by load_mems; those that are written to in the loop are not invariant, while those that are not written to are invariant. It would be easy for load_mems to set n_times_set correctly for these registers, however, there is no easy way to distinguish them from registers created by the unroller. */ if (REGNO (x) >= (unsigned) regs->num) return 0; if (regs->array[REGNO (x)].set_in_loop < 0) return 2; return regs->array[REGNO (x)].set_in_loop == 0; case MEM: /* Volatile memory references must be rejected. Do this before checking for read-only items, so that volatile read-only items will be rejected also. */ if (MEM_VOLATILE_P (x)) return 0; /* See if there is any dependence between a store and this load. */ mem_list_entry = loop_info->store_mems; while (mem_list_entry) { if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode, x, rtx_varies_p)) return 0; mem_list_entry = XEXP (mem_list_entry, 1); } /* It's not invalidated by a store in memory but we must still verify the address is invariant. */ break; case ASM_OPERANDS: /* Don't mess with insns declared volatile. */ if (MEM_VOLATILE_P (x)) return 0; break; default: break; } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') { int tem = loop_invariant_p (loop, XEXP (x, i)); if (tem == 0) return 0; if (tem == 2) conditional = 1; } else if (fmt[i] == 'E') { int j; for (j = 0; j < XVECLEN (x, i); j++) { int tem = loop_invariant_p (loop, XVECEXP (x, i, j)); if (tem == 0) return 0; if (tem == 2) conditional = 1; } } } return 1 + conditional; } /* Return nonzero if all the insns in the loop that set REG are INSN and the immediately following insns, and if each of those insns sets REG in an invariant way (not counting uses of REG in them). The value is 2 if some of these insns are only conditionally invariant. We assume that INSN itself is the first set of REG and that its source is invariant. */ static int consec_sets_invariant_p (const struct loop *loop, rtx reg, int n_sets, rtx insn) { struct loop_regs *regs = LOOP_REGS (loop); rtx p = insn; unsigned int regno = REGNO (reg); rtx temp; /* Number of sets we have to insist on finding after INSN. */ int count = n_sets - 1; int old = regs->array[regno].set_in_loop; int value = 0; int this; /* If N_SETS hit the limit, we can't rely on its value. */ if (n_sets == 127) return 0; regs->array[regno].set_in_loop = 0; while (count > 0) { enum rtx_code code; rtx set; p = NEXT_INSN (p); code = GET_CODE (p); /* If library call, skip to end of it. */ if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX))) p = XEXP (temp, 0); this = 0; if (code == INSN && (set = single_set (p)) && REG_P (SET_DEST (set)) && REGNO (SET_DEST (set)) == regno) { this = loop_invariant_p (loop, SET_SRC (set)); if (this != 0) value |= this; else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))) { /* If this is a libcall, then any invariant REG_EQUAL note is OK. If this is an ordinary insn, then only CONSTANT_P REG_EQUAL notes are OK. */ this = (CONSTANT_P (XEXP (temp, 0)) || (find_reg_note (p, REG_RETVAL, NULL_RTX) && loop_invariant_p (loop, XEXP (temp, 0)))); if (this != 0) value |= this; } } if (this != 0) count--; else if (code != NOTE) { regs->array[regno].set_in_loop = old; return 0; } } regs->array[regno].set_in_loop = old; /* If loop_invariant_p ever returned 2, we return 2. */ return 1 + (value & 2); } /* Look at all uses (not sets) of registers in X. For each, if it is the single use, set USAGE[REGNO] to INSN; if there was a previous use in a different insn, set USAGE[REGNO] to const0_rtx. */ static void find_single_use_in_loop (struct loop_regs *regs, rtx insn, rtx x) { enum rtx_code code = GET_CODE (x); const char *fmt = GET_RTX_FORMAT (code); int i, j; if (code == REG) regs->array[REGNO (x)].single_usage = (regs->array[REGNO (x)].single_usage != 0 && regs->array[REGNO (x)].single_usage != insn) ? const0_rtx : insn; else if (code == SET) { /* Don't count SET_DEST if it is a REG; otherwise count things in SET_DEST because if a register is partially modified, it won't show up as a potential movable so we don't care how USAGE is set for it. */ if (!REG_P (SET_DEST (x))) find_single_use_in_loop (regs, insn, SET_DEST (x)); find_single_use_in_loop (regs, insn, SET_SRC (x)); } else for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e' && XEXP (x, i) != 0) find_single_use_in_loop (regs, insn, XEXP (x, i)); else if (fmt[i] == 'E') for (j = XVECLEN (x, i) - 1; j >= 0; j--) find_single_use_in_loop (regs, insn, XVECEXP (x, i, j)); } } /* Count and record any set in X which is contained in INSN. Update REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set in X. */ static void count_one_set (struct loop_regs *regs, rtx insn, rtx x, rtx *last_set) { if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0))) /* Don't move a reg that has an explicit clobber. It's not worth the pain to try to do it correctly. */ regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1; if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER) { rtx dest = SET_DEST (x); while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT || GET_CODE (dest) == STRICT_LOW_PART) dest = XEXP (dest, 0); if (REG_P (dest)) { int i; int regno = REGNO (dest); for (i = 0; i < LOOP_REGNO_NREGS (regno, dest); i++) { /* If this is the first setting of this reg in current basic block, and it was set before, it must be set in two basic blocks, so it cannot be moved out of the loop. */ if (regs->array[regno].set_in_loop > 0 && last_set[regno] == 0) regs->array[regno+i].may_not_optimize = 1; /* If this is not first setting in current basic block, see if reg was used in between previous one and this. If so, neither one can be moved. */ if (last_set[regno] != 0 && reg_used_between_p (dest, last_set[regno], insn)) regs->array[regno+i].may_not_optimize = 1; if (regs->array[regno+i].set_in_loop < 127) ++regs->array[regno+i].set_in_loop; last_set[regno+i] = insn; } } } } /* Given a loop that is bounded by LOOP->START and LOOP->END and that is entered at LOOP->SCAN_START, return 1 if the register set in SET contained in insn INSN is used by any insn that precedes INSN in cyclic order starting from the loop entry point. We don't want to use LOOP_INSN_LUID here because if we restrict INSN to those that have a valid LOOP_INSN_LUID, it means we cannot move an invariant out from an inner loop past two loops. */ static int loop_reg_used_before_p (const struct loop *loop, rtx set, rtx insn) { rtx reg = SET_DEST (set); rtx p; /* Scan forward checking for register usage. If we hit INSN, we are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */ for (p = loop->scan_start; p != insn; p = NEXT_INSN (p)) { if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p))) return 1; if (p == loop->end) p = loop->start; } return 0; } /* Information we collect about arrays that we might want to prefetch. */ struct prefetch_info { struct iv_class *class; /* Class this prefetch is based on. */ struct induction *giv; /* GIV this prefetch is based on. */ rtx base_address; /* Start prefetching from this address plus index. */ HOST_WIDE_INT index; HOST_WIDE_INT stride; /* Prefetch stride in bytes in each iteration. */ unsigned int bytes_accessed; /* Sum of sizes of all accesses to this prefetch area in one iteration. */ unsigned int total_bytes; /* Total bytes loop will access in this block. This is set only for loops with known iteration counts and is 0xffffffff otherwise. */ int prefetch_in_loop; /* Number of prefetch insns in loop. */ int prefetch_before_loop; /* Number of prefetch insns before loop. */ unsigned int write : 1; /* 1 for read/write prefetches. */ }; /* Data used by check_store function. */ struct check_store_data { rtx mem_address; int mem_write; }; static void check_store (rtx, rtx, void *); static void emit_prefetch_instructions (struct loop *); static int rtx_equal_for_prefetch_p (rtx, rtx); /* Set mem_write when mem_address is found. Used as callback to note_stores. */ static void check_store (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data) { struct check_store_data *d = (struct check_store_data *) data; if ((MEM_P (x)) && rtx_equal_p (d->mem_address, XEXP (x, 0))) d->mem_write = 1; } /* Like rtx_equal_p, but attempts to swap commutative operands. This is important to get some addresses combined. Later more sophisticated transformations can be added when necessary. ??? Same trick with swapping operand is done at several other places. It can be nice to develop some common way to handle this. */ static int rtx_equal_for_prefetch_p (rtx x, rtx y) { int i; int j; enum rtx_code code = GET_CODE (x); const char *fmt; if (x == y) return 1; if (code != GET_CODE (y)) return 0; if (COMMUTATIVE_ARITH_P (x)) { return ((rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 0)) && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 1))) || (rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 1)) && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 0)))); } /* Compare the elements. If any pair of corresponding elements fails to match, return 0 for the whole thing. */ fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { switch (fmt[i]) { case 'w': if (XWINT (x, i) != XWINT (y, i)) return 0; break; case 'i': if (XINT (x, i) != XINT (y, i)) return 0; break; case 'E': /* Two vectors must have the same length. */ if (XVECLEN (x, i) != XVECLEN (y, i)) return 0; /* And the corresponding elements must match. */ for (j = 0; j < XVECLEN (x, i); j++) if (rtx_equal_for_prefetch_p (XVECEXP (x, i, j), XVECEXP (y, i, j)) == 0) return 0; break; case 'e': if (rtx_equal_for_prefetch_p (XEXP (x, i), XEXP (y, i)) == 0) return 0; break; case 's': if (strcmp (XSTR (x, i), XSTR (y, i))) return 0; break; case 'u': /* These are just backpointers, so they don't matter. */ break; case '0': break; /* It is believed that rtx's at this level will never contain anything but integers and other rtx's, except for within LABEL_REFs and SYMBOL_REFs. */ default: abort (); } } return 1; } /* Remove constant addition value from the expression X (when present) and return it. */ static HOST_WIDE_INT remove_constant_addition (rtx *x) { HOST_WIDE_INT addval = 0; rtx exp = *x; /* Avoid clobbering a shared CONST expression. */ if (GET_CODE (exp) == CONST) { if (GET_CODE (XEXP (exp, 0)) == PLUS && GET_CODE (XEXP (XEXP (exp, 0), 0)) == SYMBOL_REF && GET_CODE (XEXP (XEXP (exp, 0), 1)) == CONST_INT) { *x = XEXP (XEXP (exp, 0), 0); return INTVAL (XEXP (XEXP (exp, 0), 1)); } return 0; } if (GET_CODE (exp) == CONST_INT) { addval = INTVAL (exp); *x = const0_rtx; } /* For plus expression recurse on ourself. */ else if (GET_CODE (exp) == PLUS) { addval += remove_constant_addition (&XEXP (exp, 0)); addval += remove_constant_addition (&XEXP (exp, 1)); /* In case our parameter was constant, remove extra zero from the expression. */ if (XEXP (exp, 0) == const0_rtx) *x = XEXP (exp, 1); else if (XEXP (exp, 1) == const0_rtx) *x = XEXP (exp, 0); } return addval; } /* Attempt to identify accesses to arrays that are most likely to cause cache misses, and emit prefetch instructions a few prefetch blocks forward. To detect the arrays we use the GIV information that was collected by the strength reduction pass. The prefetch instructions are generated after the GIV information is done and before the strength reduction process. The new GIVs are injected into the strength reduction tables, so the prefetch addresses are optimized as well. GIVs are split into base address, stride, and constant addition values. GIVs with the same address, stride and close addition values are combined into a single prefetch. Also writes to GIVs are detected, so that prefetch for write instructions can be used for the block we write to, on machines that support write prefetches. Several heuristics are used to determine when to prefetch. They are controlled by defined symbols that can be overridden for each target. */ static void emit_prefetch_instructions (struct loop *loop) { int num_prefetches = 0; int num_real_prefetches = 0; int num_real_write_prefetches = 0; int num_prefetches_before = 0; int num_write_prefetches_before = 0; int ahead = 0; int i; struct iv_class *bl; struct induction *iv; struct prefetch_info info[MAX_PREFETCHES]; struct loop_ivs *ivs = LOOP_IVS (loop); if (!HAVE_prefetch) return; /* Consider only loops w/o calls. When a call is done, the loop is probably slow enough to read the memory. */ if (PREFETCH_NO_CALL && LOOP_INFO (loop)->has_call) { if (loop_dump_stream) fprintf (loop_dump_stream, "Prefetch: ignoring loop: has call.\n"); return; } /* Don't prefetch in loops known to have few iterations. */ if (PREFETCH_NO_LOW_LOOPCNT && LOOP_INFO (loop)->n_iterations && LOOP_INFO (loop)->n_iterations <= PREFETCH_LOW_LOOPCNT) { if (loop_dump_stream) fprintf (loop_dump_stream, "Prefetch: ignoring loop: not enough iterations.\n"); return; } /* Search all induction variables and pick those interesting for the prefetch machinery. */ for (bl = ivs->list; bl; bl = bl->next) { struct induction *biv = bl->biv, *biv1; int basestride = 0; biv1 = biv; /* Expect all BIVs to be executed in each iteration. This makes our analysis more conservative. */ while (biv1) { /* Discard non-constant additions that we can't handle well yet, and BIVs that are executed multiple times; such BIVs ought to be handled in the nested loop. We accept not_every_iteration BIVs, since these only result in larger strides and make our heuristics more conservative. */ if (GET_CODE (biv->add_val) != CONST_INT) { if (loop_dump_stream) { fprintf (loop_dump_stream, "Prefetch: ignoring biv %d: non-constant addition at insn %d:", REGNO (biv->src_reg), INSN_UID (biv->insn)); print_rtl (loop_dump_stream, biv->add_val); fprintf (loop_dump_stream, "\n"); } break; } if (biv->maybe_multiple) { if (loop_dump_stream) { fprintf (loop_dump_stream, "Prefetch: ignoring biv %d: maybe_multiple at insn %i:", REGNO (biv->src_reg), INSN_UID (biv->insn)); print_rtl (loop_dump_stream, biv->add_val); fprintf (loop_dump_stream, "\n"); } break; } basestride += INTVAL (biv1->add_val); biv1 = biv1->next_iv; } if (biv1 || !basestride) continue; for (iv = bl->giv; iv; iv = iv->next_iv) { rtx address; rtx temp; HOST_WIDE_INT index = 0; int add = 1; HOST_WIDE_INT stride = 0; int stride_sign = 1; struct check_store_data d; const char *ignore_reason = NULL; int size = GET_MODE_SIZE (GET_MODE (iv)); /* See whether an induction variable is interesting to us and if not, report the reason. */ if (iv->giv_type != DEST_ADDR) ignore_reason = "giv is not a destination address"; /* We are interested only in constant stride memory references in order to be able to compute density easily. */ else if (GET_CODE (iv->mult_val) != CONST_INT) ignore_reason = "stride is not constant"; else { stride = INTVAL (iv->mult_val) * basestride; if (stride < 0) { stride = -stride; stride_sign = -1; } /* On some targets, reversed order prefetches are not worthwhile. */ if (PREFETCH_NO_REVERSE_ORDER && stride_sign < 0) ignore_reason = "reversed order stride"; /* Prefetch of accesses with an extreme stride might not be worthwhile, either. */ else if (PREFETCH_NO_EXTREME_STRIDE && stride > PREFETCH_EXTREME_STRIDE) ignore_reason = "extreme stride"; /* Ignore GIVs with varying add values; we can't predict the value for the next iteration. */ else if (!loop_invariant_p (loop, iv->add_val)) ignore_reason = "giv has varying add value"; /* Ignore GIVs in the nested loops; they ought to have been handled already. */ else if (iv->maybe_multiple) ignore_reason = "giv is in nested loop"; } if (ignore_reason != NULL) { if (loop_dump_stream) fprintf (loop_dump_stream, "Prefetch: ignoring giv at %d: %s.\n", INSN_UID (iv->insn), ignore_reason); continue; } /* Determine the pointer to the basic array we are examining. It is the sum of the BIV's initial value and the GIV's add_val. */ address = copy_rtx (iv->add_val); temp = copy_rtx (bl->initial_value); address = simplify_gen_binary (PLUS, Pmode, temp, address); index = remove_constant_addition (&address); d.mem_write = 0; d.mem_address = *iv->location; /* When the GIV is not always executed, we might be better off by not dirtying the cache pages. */ if (PREFETCH_CONDITIONAL || iv->always_executed) note_stores (PATTERN (iv->insn), check_store, &d); else { if (loop_dump_stream) fprintf (loop_dump_stream, "Prefetch: Ignoring giv at %d: %s\n", INSN_UID (iv->insn), "in conditional code."); continue; } /* Attempt to find another prefetch to the same array and see if we can merge this one. */ for (i = 0; i < num_prefetches; i++) if (rtx_equal_for_prefetch_p (address, info[i].base_address) && stride == info[i].stride) { /* In case both access same array (same location just with small difference in constant indexes), merge the prefetches. Just do the later and the earlier will get prefetched from previous iteration. The artificial threshold should not be too small, but also not bigger than small portion of memory usually traversed by single loop. */ if (index >= info[i].index && index - info[i].index < PREFETCH_EXTREME_DIFFERENCE) { info[i].write |= d.mem_write; info[i].bytes_accessed += size; info[i].index = index; info[i].giv = iv; info[i].class = bl; info[num_prefetches].base_address = address; add = 0; break; } if (index < info[i].index && info[i].index - index < PREFETCH_EXTREME_DIFFERENCE) { info[i].write |= d.mem_write; info[i].bytes_accessed += size; add = 0; break; } } /* Merging failed. */ if (add) { info[num_prefetches].giv = iv; info[num_prefetches].class = bl; info[num_prefetches].index = index; info[num_prefetches].stride = stride; info[num_prefetches].base_address = address; info[num_prefetches].write = d.mem_write; info[num_prefetches].bytes_accessed = size; num_prefetches++; if (num_prefetches >= MAX_PREFETCHES) { if (loop_dump_stream) fprintf (loop_dump_stream, "Maximal number of prefetches exceeded.\n"); return; } } } } for (i = 0; i < num_prefetches; i++) { int density; /* Attempt to calculate the total number of bytes fetched by all iterations of the loop. Avoid overflow. */ if (LOOP_INFO (loop)->n_iterations && ((unsigned HOST_WIDE_INT) (0xffffffff / info[i].stride) >= LOOP_INFO (loop)->n_iterations)) info[i].total_bytes = info[i].stride * LOOP_INFO (loop)->n_iterations; else info[i].total_bytes = 0xffffffff; density = info[i].bytes_accessed * 100 / info[i].stride; /* Prefetch might be worthwhile only when the loads/stores are dense. */ if (PREFETCH_ONLY_DENSE_MEM) if (density * 256 > PREFETCH_DENSE_MEM * 100 && (info[i].total_bytes / PREFETCH_BLOCK >= PREFETCH_BLOCKS_BEFORE_LOOP_MIN)) { info[i].prefetch_before_loop = 1; info[i].prefetch_in_loop = (info[i].total_bytes / PREFETCH_BLOCK > PREFETCH_BLOCKS_BEFORE_LOOP_MAX); } else { info[i].prefetch_in_loop = 0, info[i].prefetch_before_loop = 0; if (loop_dump_stream) fprintf (loop_dump_stream, "Prefetch: ignoring giv at %d: %d%% density is too low.\n", INSN_UID (info[i].giv->insn), density); } else info[i].prefetch_in_loop = 1, info[i].prefetch_before_loop = 1; /* Find how many prefetch instructions we'll use within the loop. */ if (info[i].prefetch_in_loop != 0) { info[i].prefetch_in_loop = ((info[i].stride + PREFETCH_BLOCK - 1) / PREFETCH_BLOCK); num_real_prefetches += info[i].prefetch_in_loop; if (info[i].write) num_real_write_prefetches += info[i].prefetch_in_loop; } } /* Determine how many iterations ahead to prefetch within the loop, based on how many prefetches we currently expect to do within the loop. */ if (num_real_prefetches != 0) { if ((ahead = SIMULTANEOUS_PREFETCHES / num_real_prefetches) == 0) { if (loop_dump_stream) fprintf (loop_dump_stream, "Prefetch: ignoring prefetches within loop: ahead is zero; %d < %d\n", SIMULTANEOUS_PREFETCHES, num_real_prefetches); num_real_prefetches = 0, num_real_write_prefetches = 0; } } /* We'll also use AHEAD to determine how many prefetch instructions to emit before a loop, so don't leave it zero. */ if (ahead == 0) ahead = PREFETCH_BLOCKS_BEFORE_LOOP_MAX; for (i = 0; i < num_prefetches; i++) { /* Update if we've decided not to prefetch anything within the loop. */ if (num_real_prefetches == 0) info[i].prefetch_in_loop = 0; /* Find how many prefetch instructions we'll use before the loop. */ if (info[i].prefetch_before_loop != 0) { int n = info[i].total_bytes / PREFETCH_BLOCK; if (n > ahead) n = ahead; info[i].prefetch_before_loop = n; num_prefetches_before += n; if (info[i].write) num_write_prefetches_before += n; } if (loop_dump_stream) { if (info[i].prefetch_in_loop == 0 && info[i].prefetch_before_loop == 0) continue; fprintf (loop_dump_stream, "Prefetch insn: %d", INSN_UID (info[i].giv->insn)); fprintf (loop_dump_stream, "; in loop: %d; before: %d; %s\n", info[i].prefetch_in_loop, info[i].prefetch_before_loop, info[i].write ? "read/write" : "read only"); fprintf (loop_dump_stream, " density: %d%%; bytes_accessed: %u; total_bytes: %u\n", (int) (info[i].bytes_accessed * 100 / info[i].stride), info[i].bytes_accessed, info[i].total_bytes); fprintf (loop_dump_stream, " index: " HOST_WIDE_INT_PRINT_DEC "; stride: " HOST_WIDE_INT_PRINT_DEC "; address: ", info[i].index, info[i].stride); print_rtl (loop_dump_stream, info[i].base_address); fprintf (loop_dump_stream, "\n"); } } if (num_real_prefetches + num_prefetches_before > 0) { /* Record that this loop uses prefetch instructions. */ LOOP_INFO (loop)->has_prefetch = 1; if (loop_dump_stream) { fprintf (loop_dump_stream, "Real prefetches needed within loop: %d (write: %d)\n", num_real_prefetches, num_real_write_prefetches); fprintf (loop_dump_stream, "Real prefetches needed before loop: %d (write: %d)\n", num_prefetches_before, num_write_prefetches_before); } } for (i = 0; i < num_prefetches; i++) { int y; for (y = 0; y < info[i].prefetch_in_loop; y++) { rtx loc = copy_rtx (*info[i].giv->location); rtx insn; int bytes_ahead = PREFETCH_BLOCK * (ahead + y); rtx before_insn = info[i].giv->insn; rtx prev_insn = PREV_INSN (info[i].giv->insn); rtx seq; /* We can save some effort by offsetting the address on architectures with offsettable memory references. */ if (offsettable_address_p (0, VOIDmode, loc)) loc = plus_constant (loc, bytes_ahead); else { rtx reg = gen_reg_rtx (Pmode); loop_iv_add_mult_emit_before (loop, loc, const1_rtx, GEN_INT (bytes_ahead), reg, 0, before_insn); loc = reg; } start_sequence (); /* Make sure the address operand is valid for prefetch. */ if (! (*insn_data[(int)CODE_FOR_prefetch].operand[0].predicate) (loc, insn_data[(int)CODE_FOR_prefetch].operand[0].mode)) loc = force_reg (Pmode, loc); emit_insn (gen_prefetch (loc, GEN_INT (info[i].write), GEN_INT (3))); seq = get_insns (); end_sequence (); emit_insn_before (seq, before_insn); /* Check all insns emitted and record the new GIV information. */ insn = NEXT_INSN (prev_insn); while (insn != before_insn) { insn = check_insn_for_givs (loop, insn, info[i].giv->always_executed, info[i].giv->maybe_multiple); insn = NEXT_INSN (insn); } } if (PREFETCH_BEFORE_LOOP) { /* Emit insns before the loop to fetch the first cache lines or, if we're not prefetching within the loop, everything we expect to need. */ for (y = 0; y < info[i].prefetch_before_loop; y++) { rtx reg = gen_reg_rtx (Pmode); rtx loop_start = loop->start; rtx init_val = info[i].class->initial_value; rtx add_val = simplify_gen_binary (PLUS, Pmode, info[i].giv->add_val, GEN_INT (y * PREFETCH_BLOCK)); /* Functions called by LOOP_IV_ADD_EMIT_BEFORE expect a non-constant INIT_VAL to have the same mode as REG, which in this case we know to be Pmode. */ if (GET_MODE (init_val) != Pmode && !CONSTANT_P (init_val)) { rtx seq; start_sequence (); init_val = convert_to_mode (Pmode, init_val, 0); seq = get_insns (); end_sequence (); loop_insn_emit_before (loop, 0, loop_start, seq); } loop_iv_add_mult_emit_before (loop, init_val, info[i].giv->mult_val, add_val, reg, 0, loop_start); emit_insn_before (gen_prefetch (reg, GEN_INT (info[i].write), GEN_INT (3)), loop_start); } } } return; } /* Communication with routines called via `note_stores'. */ static rtx note_insn; /* Dummy register to have nonzero DEST_REG for DEST_ADDR type givs. */ static rtx addr_placeholder; /* ??? Unfinished optimizations, and possible future optimizations, for the strength reduction code. */ /* ??? The interaction of biv elimination, and recognition of 'constant' bivs, may cause problems. */ /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause performance problems. Perhaps don't eliminate things that can be combined with an addressing mode. Find all givs that have the same biv, mult_val, and add_val; then for each giv, check to see if its only use dies in a following memory address. If so, generate a new memory address and check to see if it is valid. If it is valid, then store the modified memory address, otherwise, mark the giv as not done so that it will get its own iv. */ /* ??? Could try to optimize branches when it is known that a biv is always positive. */ /* ??? When replace a biv in a compare insn, we should replace with closest giv so that an optimized branch can still be recognized by the combiner, e.g. the VAX acb insn. */ /* ??? Many of the checks involving uid_luid could be simplified if regscan was rerun in loop_optimize whenever a register was added or moved. Also, some of the optimizations could be a little less conservative. */ /* Scan the loop body and call FNCALL for each insn. In the addition to the LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the callback. NOT_EVERY_ITERATION is 1 if current insn is not known to be executed at least once for every loop iteration except for the last one. MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every loop iteration. */ void for_each_insn_in_loop (struct loop *loop, loop_insn_callback fncall) { int not_every_iteration = 0; int maybe_multiple = 0; int past_loop_latch = 0; int loop_depth = 0; rtx p; /* If loop_scan_start points to the loop exit test, we have to be wary of subversive use of gotos inside expression statements. */ if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start)) maybe_multiple = back_branch_in_range_p (loop, loop->scan_start); /* Scan through loop and update NOT_EVERY_ITERATION and MAYBE_MULTIPLE. */ for (p = next_insn_in_loop (loop, loop->scan_start); p != NULL_RTX; p = next_insn_in_loop (loop, p)) { p = fncall (loop, p, not_every_iteration, maybe_multiple); /* Past CODE_LABEL, we get to insns that may be executed multiple times. The only way we can be sure that they can't is if every jump insn between here and the end of the loop either returns, exits the loop, is a jump to a location that is still behind the label, or is a jump to the loop start. */ if (GET_CODE (p) == CODE_LABEL) { rtx insn = p; maybe_multiple = 0; while (1) { insn = NEXT_INSN (insn); if (insn == loop->scan_start) break; if (insn == loop->end) { if (loop->top != 0) insn = loop->top; else break; if (insn == loop->scan_start) break; } if (GET_CODE (insn) == JUMP_INSN && GET_CODE (PATTERN (insn)) != RETURN && (!any_condjump_p (insn) || (JUMP_LABEL (insn) != 0 && JUMP_LABEL (insn) != loop->scan_start && !loop_insn_first_p (p, JUMP_LABEL (insn))))) { maybe_multiple = 1; break; } } } /* Past a jump, we get to insns for which we can't count on whether they will be executed during each iteration. */ /* This code appears twice in strength_reduce. There is also similar code in scan_loop. */ if (GET_CODE (p) == JUMP_INSN /* If we enter the loop in the middle, and scan around to the beginning, don't set not_every_iteration for that. This can be any kind of jump, since we want to know if insns will be executed if the loop is executed. */ && !(JUMP_LABEL (p) == loop->top && ((NEXT_INSN (NEXT_INSN (p)) == loop->end && any_uncondjump_p (p)) || (NEXT_INSN (p) == loop->end && any_condjump_p (p))))) { rtx label = 0; /* If this is a jump outside the loop, then it also doesn't matter. Check to see if the target of this branch is on the loop->exits_labels list. */ for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label)) if (XEXP (label, 0) == JUMP_LABEL (p)) break; if (!label) not_every_iteration = 1; } else if (GET_CODE (p) == NOTE) { /* At the virtual top of a converted loop, insns are again known to be executed each iteration: logically, the loop begins here even though the exit code has been duplicated. Insns are also again known to be executed each iteration at the LOOP_CONT note. */ if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT) && loop_depth == 0) not_every_iteration = 0; else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG) loop_depth++; else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END) loop_depth--; } /* Note if we pass a loop latch. If we do, then we can not clear NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in a loop since a jump before the last CODE_LABEL may have started a new loop iteration. Note that LOOP_TOP is only set for rotated loops and we need this check for all loops, so compare against the CODE_LABEL which immediately follows LOOP_START. */ if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == NEXT_INSN (loop->start)) past_loop_latch = 1; /* Unlike in the code motion pass where MAYBE_NEVER indicates that an insn may never be executed, NOT_EVERY_ITERATION indicates whether or not an insn is known to be executed each iteration of the loop, whether or not any iterations are known to occur. Therefore, if we have just passed a label and have no more labels between here and the test insn of the loop, and we have not passed a jump to the top of the loop, then we know these insns will be executed each iteration. */ if (not_every_iteration && !past_loop_latch && GET_CODE (p) == CODE_LABEL && no_labels_between_p (p, loop->end) && loop_insn_first_p (p, loop->cont)) not_every_iteration = 0; } } static void loop_bivs_find (struct loop *loop) { struct loop_regs *regs = LOOP_REGS (loop); struct loop_ivs *ivs = LOOP_IVS (loop); /* Temporary list pointers for traversing ivs->list. */ struct iv_class *bl, **backbl; ivs->list = 0; for_each_insn_in_loop (loop, check_insn_for_bivs); /* Scan ivs->list to remove all regs that proved not to be bivs. Make a sanity check against regs->n_times_set. */ for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next) { if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT /* Above happens if register modified by subreg, etc. */ /* Make sure it is not recognized as a basic induction var: */ || regs->array[bl->regno].n_times_set != bl->biv_count /* If never incremented, it is invariant that we decided not to move. So leave it alone. */ || ! bl->incremented) { if (loop_dump_stream) fprintf (loop_dump_stream, "Biv %d: discarded, %s\n", bl->regno, (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT ? "not induction variable" : (! bl->incremented ? "never incremented" : "count error"))); REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT; *backbl = bl->next; } else { backbl = &bl->next; if (loop_dump_stream) fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno); } } } /* Determine how BIVS are initialized by looking through pre-header extended basic block. */ static void loop_bivs_init_find (struct loop *loop) { struct loop_ivs *ivs = LOOP_IVS (loop); /* Temporary list pointers for traversing ivs->list. */ struct iv_class *bl; int call_seen; rtx p; /* Find initial value for each biv by searching backwards from loop_start, halting at first label. Also record any test condition. */ call_seen = 0; for (p = loop->start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p)) { rtx test; note_insn = p; if (GET_CODE (p) == CALL_INSN) call_seen = 1; if (INSN_P (p)) note_stores (PATTERN (p), record_initial, ivs); /* Record any test of a biv that branches around the loop if no store between it and the start of loop. We only care about tests with constants and registers and only certain of those. */ if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) != 0 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end) && (test = get_condition_for_loop (loop, p)) != 0 && REG_P (XEXP (test, 0)) && REGNO (XEXP (test, 0)) < max_reg_before_loop && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start) && bl->init_insn == 0) { /* If an NE test, we have an initial value! */ if (GET_CODE (test) == NE) { bl->init_insn = p; bl->init_set = gen_rtx_SET (VOIDmode, XEXP (test, 0), XEXP (test, 1)); } else bl->initial_test = test; } } } /* Look at the each biv and see if we can say anything better about its initial value from any initializing insns set up above. (This is done in two passes to avoid missing SETs in a PARALLEL.) */ static void loop_bivs_check (struct loop *loop) { struct loop_ivs *ivs = LOOP_IVS (loop); /* Temporary list pointers for traversing ivs->list. */ struct iv_class *bl; struct iv_class **backbl; for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next) { rtx src; rtx note; if (! bl->init_insn) continue; /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value is a constant, use the value of that. */ if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL && CONSTANT_P (XEXP (note, 0))) || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL && CONSTANT_P (XEXP (note, 0)))) src = XEXP (note, 0); else src = SET_SRC (bl->init_set); if (loop_dump_stream) fprintf (loop_dump_stream, "Biv %d: initialized at insn %d: initial value ", bl->regno, INSN_UID (bl->init_insn)); if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno]) || GET_MODE (src) == VOIDmode) && valid_initial_value_p (src, bl->init_insn, LOOP_INFO (loop)->pre_header_has_call, loop->start)) { bl->initial_value = src; if (loop_dump_stream) { print_simple_rtl (loop_dump_stream, src); fputc ('\n', loop_dump_stream); } } /* If we can't make it a giv, let biv keep initial value of "itself". */ else if (loop_dump_stream) fprintf (loop_dump_stream, "is complex\n"); } } /* Search the loop for general induction variables. */ static void loop_givs_find (struct loop* loop) { for_each_insn_in_loop (loop, check_insn_for_givs); } /* For each giv for which we still don't know whether or not it is replaceable, check to see if it is replaceable because its final value can be calculated. */ static void loop_givs_check (struct loop *loop) { struct loop_ivs *ivs = LOOP_IVS (loop); struct iv_class *bl; for (bl = ivs->list; bl; bl = bl->next) { struct induction *v; for (v = bl->giv; v; v = v->next_iv) if (! v->replaceable && ! v->not_replaceable) check_final_value (loop, v); } } /* Return nonzero if it is possible to eliminate the biv BL provided all givs are reduced. This is possible if either the reg is not used outside the loop, or we can compute what its final value will be. */ static int loop_biv_eliminable_p (struct loop *loop, struct iv_class *bl, int threshold, int insn_count) { /* For architectures with a decrement_and_branch_until_zero insn, don't do this if we put a REG_NONNEG note on the endtest for this biv. */ #ifdef HAVE_decrement_and_branch_until_zero if (bl->nonneg) { if (loop_dump_stream) fprintf (loop_dump_stream, "Cannot eliminate nonneg biv %d.\n", bl->regno); return 0; } #endif /* Check that biv is used outside loop or if it has a final value. Compare against bl->init_insn rather than loop->start. We aren't concerned with any uses of the biv between init_insn and loop->start since these won't be affected by the value of the biv elsewhere in the function, so long as init_insn doesn't use the biv itself. */ if ((REGNO_LAST_LUID (bl->regno) < LOOP_INSN_LUID (loop->end) && bl->init_insn && INSN_UID (bl->init_insn) < max_uid_for_loop && REGNO_FIRST_LUID (bl->regno) >= LOOP_INSN_LUID (bl->init_insn) && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set))) || (bl->final_value = final_biv_value (loop, bl))) return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count); if (loop_dump_stream) { fprintf (loop_dump_stream, "Cannot eliminate biv %d.\n", bl->regno); fprintf (loop_dump_stream, "First use: insn %d, last use: insn %d.\n", REGNO_FIRST_UID (bl->regno), REGNO_LAST_UID (bl->regno)); } return 0; } /* Reduce each giv of BL that we have decided to reduce. */ static void loop_givs_reduce (struct loop *loop, struct iv_class *bl) { struct induction *v; for (v = bl->giv; v; v = v->next_iv) { struct induction *tv; if (! v->ignore && v->same == 0) { int auto_inc_opt = 0; /* If the code for derived givs immediately below has already allocated a new_reg, we must keep it. */ if (! v->new_reg) v->new_reg = gen_reg_rtx (v->mode); #ifdef AUTO_INC_DEC /* If the target has auto-increment addressing modes, and this is an address giv, then try to put the increment immediately after its use, so that flow can create an auto-increment addressing mode. */ /* Don't do this for loops entered at the bottom, to avoid this invalid transformation: jmp L; -> jmp L; TOP: TOP: use giv use giv L: inc giv inc biv L: test biv test giv cbr TOP cbr TOP */ if (v->giv_type == DEST_ADDR && bl->biv_count == 1 && bl->biv->always_executed && ! bl->biv->maybe_multiple /* We don't handle reversed biv's because bl->biv->insn does not have a valid LOOP_INSN_LUID. */ && ! bl->reversed && v->always_executed && ! v->maybe_multiple && INSN_UID (v->insn) < max_uid_for_loop && !loop->top) { /* If other giv's have been combined with this one, then this will work only if all uses of the other giv's occur before this giv's insn. This is difficult to check. We simplify this by looking for the common case where there is one DEST_REG giv, and this giv's insn is the last use of the dest_reg of that DEST_REG giv. If the increment occurs after the address giv, then we can perform the optimization. (Otherwise, the increment would have to go before other_giv, and we would not be able to combine it with the address giv to get an auto-inc address.) */ if (v->combined_with) { struct induction *other_giv = 0; for (tv = bl->giv; tv; tv = tv->next_iv) if (tv->same == v) { if (other_giv) break; else other_giv = tv; } if (! tv && other_giv && REGNO (other_giv->dest_reg) < max_reg_before_loop && (REGNO_LAST_UID (REGNO (other_giv->dest_reg)) == INSN_UID (v->insn)) && LOOP_INSN_LUID (v->insn) < LOOP_INSN_LUID (bl->biv->insn)) auto_inc_opt = 1; } /* Check for case where increment is before the address giv. Do this test in "loop order". */ else if ((LOOP_INSN_LUID (v->insn) > LOOP_INSN_LUID (bl->biv->insn) && (LOOP_INSN_LUID (v->insn) < LOOP_INSN_LUID (loop->scan_start) || (LOOP_INSN_LUID (bl->biv->insn) > LOOP_INSN_LUID (loop->scan_start)))) || (LOOP_INSN_LUID (v->insn) < LOOP_INSN_LUID (loop->scan_start) && (LOOP_INSN_LUID (loop->scan_start) < LOOP_INSN_LUID (bl->biv->insn)))) auto_inc_opt = -1; else auto_inc_opt = 1; #ifdef HAVE_cc0 { rtx prev; /* We can't put an insn immediately after one setting cc0, or immediately before one using cc0. */ if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn))) || (auto_inc_opt == -1 && (prev = prev_nonnote_insn (v->insn)) != 0 && INSN_P (prev) && sets_cc0_p (PATTERN (prev)))) auto_inc_opt = 0; } #endif if (auto_inc_opt) v->auto_inc_opt = 1; } #endif /* For each place where the biv is incremented, add an insn to increment the new, reduced reg for the giv. */ for (tv = bl->biv; tv; tv = tv->next_iv) { rtx insert_before; /* Skip if location is the same as a previous one. */ if (tv->same) continue; if (! auto_inc_opt) insert_before = NEXT_INSN (tv->insn); else if (auto_inc_opt == 1) insert_before = NEXT_INSN (v->insn); else insert_before = v->insn; if (tv->mult_val == const1_rtx) loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val, v->new_reg, v->new_reg, 0, insert_before); else /* tv->mult_val == const0_rtx */ /* A multiply is acceptable here since this is presumed to be seldom executed. */ loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val, v->add_val, v->new_reg, 0, insert_before); } /* Add code at loop start to initialize giv's reduced reg. */ loop_iv_add_mult_hoist (loop, extend_value_for_giv (v, bl->initial_value), v->mult_val, v->add_val, v->new_reg); } } } /* Check for givs whose first use is their definition and whose last use is the definition of another giv. If so, it is likely dead and should not be used to derive another giv nor to eliminate a biv. */ static void loop_givs_dead_check (struct loop *loop ATTRIBUTE_UNUSED, struct iv_class *bl) { struct induction *v; for (v = bl->giv; v; v = v->next_iv) { if (v->ignore || (v->same && v->same->ignore)) continue; if (v->giv_type == DEST_REG && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn)) { struct induction *v1; for (v1 = bl->giv; v1; v1 = v1->next_iv) if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn)) v->maybe_dead = 1; } } } static void loop_givs_rescan (struct loop *loop, struct iv_class *bl, rtx *reg_map) { struct induction *v; for (v = bl->giv; v; v = v->next_iv) { if (v->same && v->same->ignore) v->ignore = 1; if (v->ignore) continue; /* Update expression if this was combined, in case other giv was replaced. */ if (v->same) v->new_reg = replace_rtx (v->new_reg, v->same->dest_reg, v->same->new_reg); /* See if this register is known to be a pointer to something. If so, see if we can find the alignment. First see if there is a destination register that is a pointer. If so, this shares the alignment too. Next see if we can deduce anything from the computational information. If not, and this is a DEST_ADDR giv, at least we know that it's a pointer, though we don't know the alignment. */ if (REG_P (v->new_reg) && v->giv_type == DEST_REG && REG_POINTER (v->dest_reg)) mark_reg_pointer (v->new_reg, REGNO_POINTER_ALIGN (REGNO (v->dest_reg))); else if (REG_P (v->new_reg) && REG_POINTER (v->src_reg)) { unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg)); if (align == 0 || GET_CODE (v->add_val) != CONST_INT || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0) align = 0; mark_reg_pointer (v->new_reg, align); } else if (REG_P (v->new_reg) && REG_P (v->add_val) && REG_POINTER (v->add_val)) { unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val)); if (align == 0 || GET_CODE (v->mult_val) != CONST_INT || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0) align = 0; mark_reg_pointer (v->new_reg, align); } else if (REG_P (v->new_reg) && v->giv_type == DEST_ADDR) mark_reg_pointer (v->new_reg, 0); if (v->giv_type == DEST_ADDR) /* Store reduced reg as the address in the memref where we found this giv. */ validate_change (v->insn, v->location, v->new_reg, 0); else if (v->replaceable) { reg_map[REGNO (v->dest_reg)] = v->new_reg; } else { rtx original_insn = v->insn; rtx note; /* Not replaceable; emit an insn to set the original giv reg from the reduced giv, same as above. */ v->insn = loop_insn_emit_after (loop, 0, original_insn, gen_move_insn (v->dest_reg, v->new_reg)); /* The original insn may have a REG_EQUAL note. This note is now incorrect and may result in invalid substitutions later. The original insn is dead, but may be part of a libcall sequence, which doesn't seem worth the bother of handling. */ note = find_reg_note (original_insn, REG_EQUAL, NULL_RTX); if (note) remove_note (original_insn, note); } /* When a loop is reversed, givs which depend on the reversed biv, and which are live outside the loop, must be set to their correct final value. This insn is only needed if the giv is not replaceable. The correct final value is the same as the value that the giv starts the reversed loop with. */ if (bl->reversed && ! v->replaceable) loop_iv_add_mult_sink (loop, extend_value_for_giv (v, bl->initial_value), v->mult_val, v->add_val, v->dest_reg); else if (v->final_value) loop_insn_sink_or_swim (loop, gen_load_of_final_value (v->dest_reg, v->final_value)); if (loop_dump_stream) { fprintf (loop_dump_stream, "giv at %d reduced to ", INSN_UID (v->insn)); print_simple_rtl (loop_dump_stream, v->new_reg); fprintf (loop_dump_stream, "\n"); } } } static int loop_giv_reduce_benefit (struct loop *loop ATTRIBUTE_UNUSED, struct iv_class *bl, struct induction *v, rtx test_reg) { int add_cost; int benefit; benefit = v->benefit; PUT_MODE (test_reg, v->mode); add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val, test_reg, test_reg); /* Reduce benefit if not replaceable, since we will insert a move-insn to replace the insn that calculates this giv. Don't do this unless the giv is a user variable, since it will often be marked non-replaceable because of the duplication of the exit code outside the loop. In such a case, the copies we insert are dead and will be deleted. So they don't have a cost. Similar situations exist. */ /* ??? The new final_[bg]iv_value code does a much better job of finding replaceable giv's, and hence this code may no longer be necessary. */ if (! v->replaceable && ! bl->eliminable && REG_USERVAR_P (v->dest_reg)) benefit -= cost_per_copy; /* Decrease the benefit to count the add-insns that we will insert to increment the reduced reg for the giv. ??? This can overestimate the run-time cost of the additional insns, e.g. if there are multiple basic blocks that increment the biv, but only one of these blocks is executed during each iteration. There is no good way to detect cases like this with the current structure of the loop optimizer. This code is more accurate for determining code size than run-time benefits. */ benefit -= add_cost * bl->biv_count; /* Decide whether to strength-reduce this giv or to leave the code unchanged (recompute it from the biv each time it is used). This decision can be made independently for each giv. */ #ifdef AUTO_INC_DEC /* Attempt to guess whether autoincrement will handle some of the new add insns; if so, increase BENEFIT (undo the subtraction of add_cost that was done above). */ if (v->giv_type == DEST_ADDR /* Increasing the benefit is risky, since this is only a guess. Avoid increasing register pressure in cases where there would be no other benefit from reducing this giv. */ && benefit > 0 && GET_CODE (v->mult_val) == CONST_INT) { int size = GET_MODE_SIZE (GET_MODE (v->mem)); if (HAVE_POST_INCREMENT && INTVAL (v->mult_val) == size) benefit += add_cost * bl->biv_count; else if (HAVE_PRE_INCREMENT && INTVAL (v->mult_val) == size) benefit += add_cost * bl->biv_count; else if (HAVE_POST_DECREMENT && -INTVAL (v->mult_val) == size) benefit += add_cost * bl->biv_count; else if (HAVE_PRE_DECREMENT && -INTVAL (v->mult_val) == size) benefit += add_cost * bl->biv_count; } #endif return benefit; } /* Free IV structures for LOOP. */ static void loop_ivs_free (struct loop *loop) { struct loop_ivs *ivs = LOOP_IVS (loop); struct iv_class *iv = ivs->list; free (ivs->regs); while (iv) { struct iv_class *next = iv->next; struct induction *induction; struct induction *next_induction; for (induction = iv->biv; induction; induction = next_induction) { next_induction = induction->next_iv; free (induction); } for (induction = iv->giv; induction; induction = next_induction) { next_induction = induction->next_iv; free (induction); } free (iv); iv = next; } } /* Perform strength reduction and induction variable elimination. Pseudo registers created during this function will be beyond the last valid index in several tables including REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a problem here, because the added registers cannot be givs outside of their loop, and hence will never be reconsidered. But scan_loop must check regnos to make sure they are in bounds. */ static void strength_reduce (struct loop *loop, int flags) { struct loop_info *loop_info = LOOP_INFO (loop); struct loop_regs *regs = LOOP_REGS (loop); struct loop_ivs *ivs = LOOP_IVS (loop); rtx p; /* Temporary list pointer for traversing ivs->list. */ struct iv_class *bl; /* Ratio of extra register life span we can justify for saving an instruction. More if loop doesn't call subroutines since in that case saving an insn makes more difference and more registers are available. */ /* ??? could set this to last value of threshold in move_movables */ int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs); /* Map of pseudo-register replacements. */ rtx *reg_map = NULL; int reg_map_size; int unrolled_insn_copies = 0; rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1); int insn_count = count_insns_in_loop (loop); addr_placeholder = gen_reg_rtx (Pmode); ivs->n_regs = max_reg_before_loop; ivs->regs = xcalloc (ivs->n_regs, sizeof (struct iv)); /* Find all BIVs in loop. */ loop_bivs_find (loop); /* Exit if there are no bivs. */ if (! ivs->list) { /* Can still unroll the loop anyways, but indicate that there is no strength reduction info available. */ if (flags & LOOP_UNROLL) unroll_loop (loop, insn_count, 0); loop_ivs_free (loop); return; } /* Determine how BIVS are initialized by looking through pre-header extended basic block. */ loop_bivs_init_find (loop); /* Look at the each biv and see if we can say anything better about its initial value from any initializing insns set up above. */ loop_bivs_check (loop); /* Search the loop for general induction variables. */ loop_givs_find (loop); /* Try to calculate and save the number of loop iterations. This is set to zero if the actual number can not be calculated. This must be called after all giv's have been identified, since otherwise it may fail if the iteration variable is a giv. */ loop_iterations (loop); #ifdef HAVE_prefetch if (flags & LOOP_PREFETCH) emit_prefetch_instructions (loop); #endif /* Now for each giv for which we still don't know whether or not it is replaceable, check to see if it is replaceable because its final value can be calculated. This must be done after loop_iterations is called, so that final_giv_value will work correctly. */ loop_givs_check (loop); /* Try to prove that the loop counter variable (if any) is always nonnegative; if so, record that fact with a REG_NONNEG note so that "decrement and branch until zero" insn can be used. */ check_dbra_loop (loop, insn_count); /* Create reg_map to hold substitutions for replaceable giv regs. Some givs might have been made from biv increments, so look at ivs->reg_iv_type for a suitable size. */ reg_map_size = ivs->n_regs; reg_map = xcalloc (reg_map_size, sizeof (rtx)); /* Examine each iv class for feasibility of strength reduction/induction variable elimination. */ for (bl = ivs->list; bl; bl = bl->next) { struct induction *v; int benefit; /* Test whether it will be possible to eliminate this biv provided all givs are reduced. */ bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count); /* This will be true at the end, if all givs which depend on this biv have been strength reduced. We can't (currently) eliminate the biv unless this is so. */ bl->all_reduced = 1; /* Check each extension dependent giv in this class to see if its root biv is safe from wrapping in the interior mode. */ check_ext_dependent_givs (loop, bl); /* Combine all giv's for this iv_class. */ combine_givs (regs, bl); for (v = bl->giv; v; v = v->next_iv) { struct induction *tv; if (v->ignore || v->same) continue; benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg); /* If an insn is not to be strength reduced, then set its ignore flag, and clear bl->all_reduced. */ /* A giv that depends on a reversed biv must be reduced if it is used after the loop exit, otherwise, it would have the wrong value after the loop exit. To make it simple, just reduce all of such giv's whether or not we know they are used after the loop exit. */ if (! flag_reduce_all_givs && v->lifetime * threshold * benefit < insn_count && ! bl->reversed) { if (loop_dump_stream) fprintf (loop_dump_stream, "giv of insn %d not worth while, %d vs %d.\n", INSN_UID (v->insn), v->lifetime * threshold * benefit, insn_count); v->ignore = 1; bl->all_reduced = 0; } else { /* Check that we can increment the reduced giv without a multiply insn. If not, reject it. */ for (tv = bl->biv; tv; tv = tv->next_iv) if (tv->mult_val == const1_rtx && ! product_cheap_p (tv->add_val, v->mult_val)) { if (loop_dump_stream) fprintf (loop_dump_stream, "giv of insn %d: would need a multiply.\n", INSN_UID (v->insn)); v->ignore = 1; bl->all_reduced = 0; break; } } } /* Check for givs whose first use is their definition and whose last use is the definition of another giv. If so, it is likely dead and should not be used to derive another giv nor to eliminate a biv. */ loop_givs_dead_check (loop, bl); /* Reduce each giv that we decided to reduce. */ loop_givs_reduce (loop, bl); /* Rescan all givs. If a giv is the same as a giv not reduced, mark it as not reduced. For each giv register that can be reduced now: if replaceable, substitute reduced reg wherever the old giv occurs; else add new move insn "giv_reg = reduced_reg". */ loop_givs_rescan (loop, bl, reg_map); /* All the givs based on the biv bl have been reduced if they merit it. */ /* For each giv not marked as maybe dead that has been combined with a second giv, clear any "maybe dead" mark on that second giv. v->new_reg will either be or refer to the register of the giv it combined with. Doing this clearing avoids problems in biv elimination where a giv's new_reg is a complex value that can't be put in the insn but the giv combined with (with a reg as new_reg) is marked maybe_dead. Since the register will be used in either case, we'd prefer it be used from the simpler giv. */ for (v = bl->giv; v; v = v->next_iv) if (! v->maybe_dead && v->same) v->same->maybe_dead = 0; /* Try to eliminate the biv, if it is a candidate. This won't work if ! bl->all_reduced, since the givs we planned to use might not have been reduced. We have to be careful that we didn't initially think we could eliminate this biv because of a giv that we now think may be dead and shouldn't be used as a biv replacement. Also, there is the possibility that we may have a giv that looks like it can be used to eliminate a biv, but the resulting insn isn't valid. This can happen, for example, on the 88k, where a JUMP_INSN can compare a register only with zero. Attempts to replace it with a compare with a constant will fail. Note that in cases where this call fails, we may have replaced some of the occurrences of the biv with a giv, but no harm was done in doing so in the rare cases where it can occur. */ if (bl->all_reduced == 1 && bl->eliminable && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count)) { /* ?? If we created a new test to bypass the loop entirely, or otherwise drop straight in, based on this test, then we might want to rewrite it also. This way some later pass has more hope of removing the initialization of this biv entirely. */ /* If final_value != 0, then the biv may be used after loop end and we must emit an insn to set it just in case. Reversed bivs already have an insn after the loop setting their value, so we don't need another one. We can't calculate the proper final value for such a biv here anyways. */ if (bl->final_value && ! bl->reversed) loop_insn_sink_or_swim (loop, gen_load_of_final_value (bl->biv->dest_reg, bl->final_value)); if (loop_dump_stream) fprintf (loop_dump_stream, "Reg %d: biv eliminated\n", bl->regno); } /* See above note wrt final_value. But since we couldn't eliminate the biv, we must set the value after the loop instead of before. */ else if (bl->final_value && ! bl->reversed) loop_insn_sink (loop, gen_load_of_final_value (bl->biv->dest_reg, bl->final_value)); } /* Go through all the instructions in the loop, making all the register substitutions scheduled in REG_MAP. */ for (p = loop->start; p != loop->end; p = NEXT_INSN (p)) if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN || GET_CODE (p) == CALL_INSN) { replace_regs (PATTERN (p), reg_map, reg_map_size, 0); replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0); INSN_CODE (p) = -1; } if (loop_info->n_iterations > 0) { /* When we completely unroll a loop we will likely not need the increment of the loop BIV and we will not need the conditional branch at the end of the loop. */ unrolled_insn_copies = insn_count - 2; #ifdef HAVE_cc0 /* When we completely unroll a loop on a HAVE_cc0 machine we will not need the comparison before the conditional branch at the end of the loop. */ unrolled_insn_copies -= 1; #endif /* We'll need one copy for each loop iteration. */ unrolled_insn_copies *= loop_info->n_iterations; /* A little slop to account for the ability to remove initialization code, better CSE, and other secondary benefits of completely unrolling some loops. */ unrolled_insn_copies -= 1; /* Clamp the value. */ if (unrolled_insn_copies < 0) unrolled_insn_copies = 0; } /* Unroll loops from within strength reduction so that we can use the induction variable information that strength_reduce has already collected. Always unroll loops that would be as small or smaller unrolled than when rolled. */ if ((flags & LOOP_UNROLL) || ((flags & LOOP_AUTO_UNROLL) && loop_info->n_iterations > 0 && unrolled_insn_copies <= insn_count)) unroll_loop (loop, insn_count, 1); if (loop_dump_stream) fprintf (loop_dump_stream, "\n"); loop_ivs_free (loop); if (reg_map) free (reg_map); } /*Record all basic induction variables calculated in the insn. */ static rtx check_insn_for_bivs (struct loop *loop, rtx p, int not_every_iteration, int maybe_multiple) { struct loop_ivs *ivs = LOOP_IVS (loop); rtx set; rtx dest_reg; rtx inc_val; rtx mult_val; rtx *location; if (GET_CODE (p) == INSN && (set = single_set (p)) && REG_P (SET_DEST (set))) { dest_reg = SET_DEST (set); if (REGNO (dest_reg) < max_reg_before_loop && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT) { if (basic_induction_var (loop, SET_SRC (set), GET_MODE (SET_SRC (set)), dest_reg, p, &inc_val, &mult_val, &location)) { /* It is a possible basic induction variable. Create and initialize an induction structure for it. */ struct induction *v = xmalloc (sizeof (struct induction)); record_biv (loop, v, p, dest_reg, inc_val, mult_val, location, not_every_iteration, maybe_multiple); REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT; } else if (REGNO (dest_reg) < ivs->n_regs) REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT; } } return p; } /* Record all givs calculated in the insn. A register is a giv if: it is only set once, it is a function of a biv and a constant (or invariant), and it is not a biv. */ static rtx check_insn_for_givs (struct loop *loop, rtx p, int not_every_iteration, int maybe_multiple) { struct loop_regs *regs = LOOP_REGS (loop); rtx set; /* Look for a general induction variable in a register. */ if (GET_CODE (p) == INSN && (set = single_set (p)) && REG_P (SET_DEST (set)) && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize) { rtx src_reg; rtx dest_reg; rtx add_val; rtx mult_val; rtx ext_val; int benefit; rtx regnote = 0; rtx last_consec_insn; dest_reg = SET_DEST (set); if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER) return p; if (/* SET_SRC is a giv. */ (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val, &mult_val, &ext_val, 0, &benefit, VOIDmode) /* Equivalent expression is a giv. */ || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX)) && general_induction_var (loop, XEXP (regnote, 0), &src_reg, &add_val, &mult_val, &ext_val, 0, &benefit, VOIDmode))) /* Don't try to handle any regs made by loop optimization. We have nothing on them in regno_first_uid, etc. */ && REGNO (dest_reg) < max_reg_before_loop /* Don't recognize a BASIC_INDUCT_VAR here. */ && dest_reg != src_reg /* This must be the only place where the register is set. */ && (regs->array[REGNO (dest_reg)].n_times_set == 1 /* or all sets must be consecutive and make a giv. */ || (benefit = consec_sets_giv (loop, benefit, p, src_reg, dest_reg, &add_val, &mult_val, &ext_val, &last_consec_insn)))) { struct induction *v = xmalloc (sizeof (struct induction)); /* If this is a library call, increase benefit. */ if (find_reg_note (p, REG_RETVAL, NULL_RTX)) benefit += libcall_benefit (p); /* Skip the consecutive insns, if there are any. */ if (regs->array[REGNO (dest_reg)].n_times_set != 1) p = last_consec_insn; record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val, ext_val, benefit, DEST_REG, not_every_iteration, maybe_multiple, (rtx*) 0); } } /* Look for givs which are memory addresses. */ if (GET_CODE (p) == INSN) find_mem_givs (loop, PATTERN (p), p, not_every_iteration, maybe_multiple); /* Update the status of whether giv can derive other givs. This can change when we pass a label or an insn that updates a biv. */ if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN || GET_CODE (p) == CODE_LABEL) update_giv_derive (loop, p); return p; } /* Return 1 if X is a valid source for an initial value (or as value being compared against in an initial test). X must be either a register or constant and must not be clobbered between the current insn and the start of the loop. INSN is the insn containing X. */ static int valid_initial_value_p (rtx x, rtx insn, int call_seen, rtx loop_start) { if (CONSTANT_P (x)) return 1; /* Only consider pseudos we know about initialized in insns whose luids we know. */ if (!REG_P (x) || REGNO (x) >= max_reg_before_loop) return 0; /* Don't use call-clobbered registers across a call which clobbers it. On some machines, don't use any hard registers at all. */ if (REGNO (x) < FIRST_PSEUDO_REGISTER && (SMALL_REGISTER_CLASSES || (call_used_regs[REGNO (x)] && call_seen))) return 0; /* Don't use registers that have been clobbered before the start of the loop. */ if (reg_set_between_p (x, insn, loop_start)) return 0; return 1; } /* Scan X for memory refs and check each memory address as a possible giv. INSN is the insn whose pattern X comes from. NOT_EVERY_ITERATION is 1 if the insn might not be executed during every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed more than once in each loop iteration. */ static void find_mem_givs (const struct loop *loop, rtx x, rtx insn, int not_every_iteration, int maybe_multiple) { int i, j; enum rtx_code code; const char *fmt; if (x == 0) return; code = GET_CODE (x); switch (code) { case REG: case CONST_INT: case CONST: case CONST_DOUBLE: case SYMBOL_REF: case LABEL_REF: case PC: case CC0: case ADDR_VEC: case ADDR_DIFF_VEC: case USE: case CLOBBER: return; case MEM: { rtx src_reg; rtx add_val; rtx mult_val; rtx ext_val; int benefit; /* This code used to disable creating GIVs with mult_val == 1 and add_val == 0. However, this leads to lost optimizations when it comes time to combine a set of related DEST_ADDR GIVs, since this one would not be seen. */ if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val, &mult_val, &ext_val, 1, &benefit, GET_MODE (x))) { /* Found one; record it. */ struct induction *v = xmalloc (sizeof (struct induction)); record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val, add_val, ext_val, benefit, DEST_ADDR, not_every_iteration, maybe_multiple, &XEXP (x, 0)); v->mem = x; } } return; default: break; } /* Recursively scan the subexpressions for other mem refs. */ fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) if (fmt[i] == 'e') find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration, maybe_multiple); else if (fmt[i] == 'E') for (j = 0; j < XVECLEN (x, i); j++) find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration, maybe_multiple); } /* Fill in the data about one biv update. V is the `struct induction' in which we record the biv. (It is allocated by the caller, with alloca.) INSN is the insn that sets it. DEST_REG is the biv's reg. MULT_VAL is const1_rtx if the biv is being incremented here, in which case INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is being set to INC_VAL. NOT_EVERY_ITERATION is nonzero if this biv update is not know to be executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update can be executed more than once per iteration. If MAYBE_MULTIPLE and NOT_EVERY_ITERATION are both zero, we know that the biv update is executed exactly once per iteration. */ static void record_biv (struct loop *loop, struct induction *v, rtx insn, rtx dest_reg, rtx inc_val, rtx mult_val, rtx *location, int not_every_iteration, int maybe_multiple) { struct loop_ivs *ivs = LOOP_IVS (loop); struct iv_class *bl; v->insn = insn; v->src_reg = dest_reg; v->dest_reg = dest_reg; v->mult_val = mult_val; v->add_val = inc_val; v->ext_dependent = NULL_RTX; v->location = location; v->mode = GET_MODE (dest_reg); v->always_computable = ! not_every_iteration; v->always_executed = ! not_every_iteration; v->maybe_multiple = maybe_multiple; v->same = 0; /* Add this to the reg's iv_class, creating a class if this is the first incrementation of the reg. */ bl = REG_IV_CLASS (ivs, REGNO (dest_reg)); if (bl == 0) { /* Create and initialize new iv_class. */ bl = xmalloc (sizeof (struct iv_class)); bl->regno = REGNO (dest_reg); bl->biv = 0; bl->giv = 0; bl->biv_count = 0; bl->giv_count = 0; /* Set initial value to the reg itself. */ bl->initial_value = dest_reg; bl->final_value = 0; /* We haven't seen the initializing insn yet. */ bl->init_insn = 0; bl->init_set = 0; bl->initial_test = 0; bl->incremented = 0; bl->eliminable = 0; bl->nonneg = 0; bl->reversed = 0; bl->total_benefit = 0; /* Add this class to ivs->list. */ bl->next = ivs->list; ivs->list = bl; /* Put it in the array of biv register classes. */ REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl; } else { /* Check if location is the same as a previous one. */ struct induction *induction; for (induction = bl->biv; induction; induction = induction->next_iv) if (location == induction->location) { v->same = induction; break; } } /* Update IV_CLASS entry for this biv. */ v->next_iv = bl->biv; bl->biv = v; bl->biv_count++; if (mult_val == const1_rtx) bl->incremented = 1; if (loop_dump_stream) loop_biv_dump (v, loop_dump_stream, 0); } /* Fill in the data about one giv. V is the `struct induction' in which we record the giv. (It is allocated by the caller, with alloca.) INSN is the insn that sets it. BENEFIT estimates the savings from deleting this insn. TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed into a register or is used as a memory address. SRC_REG is the biv reg which the giv is computed from. DEST_REG is the giv's reg (if the giv is stored in a reg). MULT_VAL and ADD_VAL are the coefficients used to compute the giv. LOCATION points to the place where this giv's value appears in INSN. */ static void record_giv (const struct loop *loop, struct induction *v, rtx insn, rtx src_reg, rtx dest_reg, rtx mult_val, rtx add_val, rtx ext_val, int benefit, enum g_types type, int not_every_iteration, int maybe_multiple, rtx *location) { struct loop_ivs *ivs = LOOP_IVS (loop); struct induction *b; struct iv_class *bl; rtx set = single_set (insn); rtx temp; /* Attempt to prove constantness of the values. Don't let simplify_rtx undo the MULT canonicalization that we performed earlier. */ temp = simplify_rtx (add_val); if (temp && ! (GET_CODE (add_val) == MULT && GET_CODE (temp) == ASHIFT)) add_val = temp; v->insn = insn; v->src_reg = src_reg; v->giv_type = type; v->dest_reg = dest_reg; v->mult_val = mult_val; v->add_val = add_val; v->ext_dependent = ext_val; v->benefit = benefit; v->location = location; v->cant_derive = 0; v->combined_with = 0; v->maybe_multiple = maybe_multiple; v->maybe_dead = 0; v->derive_adjustment = 0; v->same = 0; v->ignore = 0; v->new_reg = 0; v->final_value = 0; v->same_insn = 0; v->auto_inc_opt = 0; v->unrolled = 0; v->shared = 0; /* The v->always_computable field is used in update_giv_derive, to determine whether a giv can be used to derive another giv. For a DEST_REG giv, INSN computes a new value for the giv, so its value isn't computable if INSN insn't executed every iteration. However, for a DEST_ADDR giv, INSN merely uses the value of the giv; it does not compute a new value. Hence the value is always computable regardless of whether INSN is executed each iteration. */ if (type == DEST_ADDR) v->always_computable = 1; else v->always_computable = ! not_every_iteration; v->always_executed = ! not_every_iteration; if (type == DEST_ADDR) { v->mode = GET_MODE (*location); v->lifetime = 1; } else /* type == DEST_REG */ { v->mode = GET_MODE (SET_DEST (set)); v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg)); /* If the lifetime is zero, it means that this register is really a dead store. So mark this as a giv that can be ignored. This will not prevent the biv from being eliminated. */ if (v->lifetime == 0) v->ignore = 1; REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT; REG_IV_INFO (ivs, REGNO (dest_reg)) = v; } /* Add the giv to the class of givs computed from one biv. */ bl = REG_IV_CLASS (ivs, REGNO (src_reg)); if (bl) { v->next_iv = bl->giv; bl->giv = v; /* Don't count DEST_ADDR. This is supposed to count the number of insns that calculate givs. */ if (type == DEST_REG) bl->giv_count++; bl->total_benefit += benefit; } else /* Fatal error, biv missing for this giv? */ abort (); if (type == DEST_ADDR) { v->replaceable = 1; v->not_replaceable = 0; } else { /* The giv can be replaced outright by the reduced register only if all of the following conditions are true: - the insn that sets the giv is always executed on any iteration on which the giv is used at all (there are two ways to deduce this: either the insn is executed on every iteration, or all uses follow that insn in the same basic block), - the giv is not used outside the loop - no assignments to the biv occur during the giv's lifetime. */ if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn) /* Previous line always fails if INSN was moved by loop opt. */ && REGNO_LAST_LUID (REGNO (dest_reg)) < LOOP_INSN_LUID (loop->end) && (! not_every_iteration || last_use_this_basic_block (dest_reg, insn))) { /* Now check that there are no assignments to the biv within the giv's lifetime. This requires two separate checks. */ /* Check each biv update, and fail if any are between the first and last use of the giv. If this loop contains an inner loop that was unrolled, then the insn modifying the biv may have been emitted by the loop unrolling code, and hence does not have a valid luid. Just mark the biv as not replaceable in this case. It is not very useful as a biv, because it is used in two different loops. It is very unlikely that we would be able to optimize the giv using this biv anyways. */ v->replaceable = 1; v->not_replaceable = 0; for (b = bl->biv; b; b = b->next_iv) { if (INSN_UID (b->insn) >= max_uid_for_loop || ((LOOP_INSN_LUID (b->insn) >= REGNO_FIRST_LUID (REGNO (dest_reg))) && (LOOP_INSN_LUID (b->insn) <= REGNO_LAST_LUID (REGNO (dest_reg))))) { v->replaceable = 0; v->not_replaceable = 1; break; } } /* If there are any backwards branches that go from after the biv update to before it, then this giv is not replaceable. */ if (v->replaceable) for (b = bl->biv; b; b = b->next_iv) if (back_branch_in_range_p (loop, b->insn)) { v->replaceable = 0; v->not_replaceable = 1; break; } } else { /* May still be replaceable, we don't have enough info here to decide. */ v->replaceable = 0; v->not_replaceable = 0; } } /* Record whether the add_val contains a const_int, for later use by combine_givs. */ { rtx tem = add_val; v->no_const_addval = 1; if (tem == const0_rtx) ; else if (CONSTANT_P (add_val)) v->no_const_addval = 0; if (GET_CODE (tem) == PLUS) { while (1) { if (GET_CODE (XEXP (tem, 0)) == PLUS) tem = XEXP (tem, 0); else if (GET_CODE (XEXP (tem, 1)) == PLUS) tem = XEXP (tem, 1); else break; } if (CONSTANT_P (XEXP (tem, 1))) v->no_const_addval = 0; } } if (loop_dump_stream) loop_giv_dump (v, loop_dump_stream, 0); } /* All this does is determine whether a giv can be made replaceable because its final value can be calculated. This code can not be part of record_giv above, because final_giv_value requires that the number of loop iterations be known, and that can not be accurately calculated until after all givs have been identified. */ static void check_final_value (const struct loop *loop, struct induction *v) { rtx final_value = 0; /* DEST_ADDR givs will never reach here, because they are always marked replaceable above in record_giv. */ /* The giv can be replaced outright by the reduced register only if all of the following conditions are true: - the insn that sets the giv is always executed on any iteration on which the giv is used at all (there are two ways to deduce this: either the insn is executed on every iteration, or all uses follow that insn in the same basic block), - its final value can be calculated (this condition is different than the one above in record_giv) - it's not used before the it's set - no assignments to the biv occur during the giv's lifetime. */ #if 0 /* This is only called now when replaceable is known to be false. */ /* Clear replaceable, so that it won't confuse final_giv_value. */ v->replaceable = 0; #endif if ((final_value = final_giv_value (loop, v)) && (v->always_executed || last_use_this_basic_block (v->dest_reg, v->insn))) { int biv_increment_seen = 0, before_giv_insn = 0; rtx p = v->insn; rtx last_giv_use; v->replaceable = 1; v->not_replaceable = 0; /* When trying to determine whether or not a biv increment occurs during the lifetime of the giv, we can ignore uses of the variable outside the loop because final_value is true. Hence we can not use regno_last_uid and regno_first_uid as above in record_giv. */ /* Search the loop to determine whether any assignments to the biv occur during the giv's lifetime. Start with the insn that sets the giv, and search around the loop until we come back to that insn again. Also fail if there is a jump within the giv's lifetime that jumps to somewhere outside the lifetime but still within the loop. This catches spaghetti code where the execution order is not linear, and hence the above test fails. Here we assume that the giv lifetime does not extend from one iteration of the loop to the next, so as to make the test easier. Since the lifetime isn't known yet, this requires two loops. See also record_giv above. */ last_giv_use = v->insn; while (1) { p = NEXT_INSN (p); if (p == loop->end) { before_giv_insn = 1; p = NEXT_INSN (loop->start); } if (p == v->insn) break; if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN || GET_CODE (p) == CALL_INSN) { /* It is possible for the BIV increment to use the GIV if we have a cycle. Thus we must be sure to check each insn for both BIV and GIV uses, and we must check for BIV uses first. */ if (! biv_increment_seen && reg_set_p (v->src_reg, PATTERN (p))) biv_increment_seen = 1; if (reg_mentioned_p (v->dest_reg, PATTERN (p))) { if (biv_increment_seen || before_giv_insn) { v->replaceable = 0; v->not_replaceable = 1; break; } last_giv_use = p; } } } /* Now that the lifetime of the giv is known, check for branches from within the lifetime to outside the lifetime if it is still replaceable. */ if (v->replaceable) { p = v->insn; while (1) { p = NEXT_INSN (p); if (p == loop->end) p = NEXT_INSN (loop->start); if (p == last_giv_use) break; if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) && LABEL_NAME (JUMP_LABEL (p)) && ((loop_insn_first_p (JUMP_LABEL (p), v->insn) && loop_insn_first_p (loop->start, JUMP_LABEL (p))) || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p)) && loop_insn_first_p (JUMP_LABEL (p), loop->end)))) { v->replaceable = 0; v->not_replaceable = 1; if (loop_dump_stream) fprintf (loop_dump_stream, "Found branch outside giv lifetime.\n"); break; } } } /* If it is replaceable, then save the final value. */ if (v->replaceable) v->final_value = final_value; } if (loop_dump_stream && v->replaceable) fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n", INSN_UID (v->insn), REGNO (v->dest_reg)); } /* Update the status of whether a giv can derive other givs. We need to do something special if there is or may be an update to the biv between the time the giv is defined and the time it is used to derive another giv. In addition, a giv that is only conditionally set is not allowed to derive another giv once a label has been passed. The cases we look at are when a label or an update to a biv is passed. */ static void update_giv_derive (const struct loop *loop, rtx p) { struct loop_ivs *ivs = LOOP_IVS (loop); struct iv_class *bl; struct induction *biv, *giv; rtx tem; int dummy; /* Search all IV classes, then all bivs, and finally all givs. There are three cases we are concerned with. First we have the situation of a giv that is only updated conditionally. In that case, it may not derive any givs after a label is passed. The second case is when a biv update occurs, or may occur, after the definition of a giv. For certain biv updates (see below) that are known to occur between the giv definition and use, we can adjust the giv definition. For others, or when the biv update is conditional, we must prevent the giv from deriving any other givs. There are two sub-cases within this case. If this is a label, we are concerned with any biv update that is done conditionally, since it may be done after the giv is defined followed by a branch here (actually, we need to pass both a jump and a label, but this extra tracking doesn't seem worth it). If this is a jump, we are concerned about any biv update that may be executed multiple times. We are actually only concerned about backward jumps, but it is probably not worth performing the test on the jump again here. If this is a biv update, we must adjust the giv status to show that a subsequent biv update was performed. If this adjustment cannot be done, the giv cannot derive further givs. */ for (bl = ivs->list; bl; bl = bl->next) for (biv = bl->biv; biv; biv = biv->next_iv) if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN || biv->insn == p) { /* Skip if location is the same as a previous one. */ if (biv->same) continue; for (giv = bl->giv; giv; giv = giv->next_iv) { /* If cant_derive is already true, there is no point in checking all of these conditions again. */ if (giv->cant_derive) continue; /* If this giv is conditionally set and we have passed a label, it cannot derive anything. */ if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable) giv->cant_derive = 1; /* Skip givs that have mult_val == 0, since they are really invariants. Also skip those that are replaceable, since we know their lifetime doesn't contain any biv update. */ else if (giv->mult_val == const0_rtx || giv->replaceable) continue; /* The only way we can allow this giv to derive another is if this is a biv increment and we can form the product of biv->add_val and giv->mult_val. In this case, we will be able to compute a compensation. */ else if (biv->insn == p) { rtx ext_val_dummy; tem = 0; if (biv->mult_val == const1_rtx) tem = simplify_giv_expr (loop, gen_rtx_MULT (giv->mode, biv->add_val, giv->mult_val), &ext_val_dummy, &dummy); if (tem && giv->derive_adjustment) tem = simplify_giv_expr (loop, gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment), &ext_val_dummy, &dummy); if (tem) giv->derive_adjustment = tem; else giv->cant_derive = 1; } else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable) || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple)) giv->cant_derive = 1; } } } /* Check whether an insn is an increment legitimate for a basic induction var. X is the source of insn P, or a part of it. MODE is the mode in which X should be interpreted. DEST_REG is the putative biv, also the destination of the insn. We accept patterns of these forms: REG = REG + INVARIANT (includes REG = REG - CONSTANT) REG = INVARIANT + REG If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX, store the additive term into *INC_VAL, and store the place where we found the additive term into *LOCATION. If X is an assignment of an invariant into DEST_REG, we set *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL. We also want to detect a BIV when it corresponds to a variable whose mode was promoted. In that case, an increment of the variable may be a PLUS that adds a SUBREG of that variable to an invariant and then sign- or zero-extends the result of the PLUS into the variable. Most GIVs in such cases will be in the promoted mode, since that is the probably the natural computation mode (and almost certainly the mode used for addresses) on the machine. So we view the pseudo-reg containing the variable as the BIV, as if it were simply incremented. Note that treating the entire pseudo as a BIV will result in making simple increments to any GIVs based on it. However, if the variable overflows in its declared mode but not its promoted mode, the result will be incorrect. This is acceptable if the variable is signed, since overflows in such cases are undefined, but not if it is unsigned, since those overflows are defined. So we only check for SIGN_EXTEND and not ZERO_EXTEND. If we cannot find a biv, we return 0. */ static int basic_induction_var (const struct loop *loop, rtx x, enum machine_mode mode, rtx dest_reg, rtx p, rtx *inc_val, rtx *mult_val, rtx **location) { enum rtx_code code; rtx *argp, arg; rtx insn, set = 0, last, inc; code = GET_CODE (x); *location = NULL; switch (code) { case PLUS: if (rtx_equal_p (XEXP (x, 0), dest_reg) || (GET_CODE (XEXP (x, 0)) == SUBREG && SUBREG_PROMOTED_VAR_P (XEXP (x, 0)) && SUBREG_REG (XEXP (x, 0)) == dest_reg)) { argp = &XEXP (x, 1); } else if (rtx_equal_p (XEXP (x, 1), dest_reg) || (GET_CODE (XEXP (x, 1)) == SUBREG && SUBREG_PROMOTED_VAR_P (XEXP (x, 1)) && SUBREG_REG (XEXP (x, 1)) == dest_reg)) { argp = &XEXP (x, 0); } else return 0; arg = *argp; if (loop_invariant_p (loop, arg) != 1) return 0; /* convert_modes can emit new instructions, e.g. when arg is a loop invariant MEM and dest_reg has a different mode. These instructions would be emitted after the end of the function and then *inc_val would be an uninitialized pseudo. Detect this and bail in this case. Other alternatives to solve this can be introducing a convert_modes variant which is allowed to fail but not allowed to emit new instructions, emit these instructions before loop start and let it be garbage collected if *inc_val is never used or saving the *inc_val initialization sequence generated here and when *inc_val is going to be actually used, emit it at some suitable place. */ last = get_last_insn (); inc = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0); if (get_last_insn () != last) { delete_insns_since (last); return 0; } *inc_val = inc; *mult_val = const1_rtx; *location = argp; return 1; case SUBREG: /* If what's inside the SUBREG is a BIV, then the SUBREG. This will handle addition of promoted variables. ??? The comment at the start of this function is wrong: promoted variable increments don't look like it says they do. */ return basic_induction_var (loop, SUBREG_REG (x), GET_MODE (SUBREG_REG (x)), dest_reg, p, inc_val, mult_val, location); case REG: /* If this register is assigned in a previous insn, look at its source, but don't go outside the loop or past a label. */ /* If this sets a register to itself, we would repeat any previous biv increment if we applied this strategy blindly. */ if (rtx_equal_p (dest_reg, x)) return 0; insn = p; while (1) { rtx dest; do { insn = PREV_INSN (insn); } while (insn && GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG); if (!insn) break; set = single_set (insn); if (set == 0) break; dest = SET_DEST (set); if (dest == x || (GET_CODE (dest) == SUBREG && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD) && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT) && SUBREG_REG (dest) == x)) return basic_induction_var (loop, SET_SRC (set), (GET_MODE (SET_SRC (set)) == VOIDmode ? GET_MODE (x) : GET_MODE (SET_SRC (set))), dest_reg, insn, inc_val, mult_val, location); while (GET_CODE (dest) == SIGN_EXTRACT || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART) dest = XEXP (dest, 0); if (dest == x) break; } /* Fall through. */ /* Can accept constant setting of biv only when inside inner most loop. Otherwise, a biv of an inner loop may be incorrectly recognized as a biv of the outer loop, causing code to be moved INTO the inner loop. */ case MEM: if (loop_invariant_p (loop, x) != 1) return 0; case CONST_INT: case SYMBOL_REF: case CONST: /* convert_modes aborts if we try to convert to or from CCmode, so just exclude that case. It is very unlikely that a condition code value would be a useful iterator anyways. convert_modes aborts if we try to convert a float mode to non-float or vice versa too. */ if (loop->level == 1 && GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (dest_reg)) && GET_MODE_CLASS (mode) != MODE_CC) { /* Possible bug here? Perhaps we don't know the mode of X. */ last = get_last_insn (); inc = convert_modes (GET_MODE (dest_reg), mode, x, 0); if (get_last_insn () != last) { delete_insns_since (last); return 0; } *inc_val = inc; *mult_val = const0_rtx; return 1; } else return 0; case SIGN_EXTEND: /* Ignore this BIV if signed arithmetic overflow is defined. */ if (flag_wrapv) return 0; return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)), dest_reg, p, inc_val, mult_val, location); case ASHIFTRT: /* Similar, since this can be a sign extension. */ for (insn = PREV_INSN (p); (insn && GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG); insn = PREV_INSN (insn)) ; if (insn) set = single_set (insn); if (! rtx_equal_p (dest_reg, XEXP (x, 0)) && set && SET_DEST (set) == XEXP (x, 0) && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) >= 0 && GET_CODE (SET_SRC (set)) == ASHIFT && XEXP (x, 1) == XEXP (SET_SRC (set), 1)) return basic_induction_var (loop, XEXP (SET_SRC (set), 0), GET_MODE (XEXP (x, 0)), dest_reg, insn, inc_val, mult_val, location); return 0; default: return 0; } } /* A general induction variable (giv) is any quantity that is a linear function of a basic induction variable, i.e. giv = biv * mult_val + add_val. The coefficients can be any loop invariant quantity. A giv need not be computed directly from the biv; it can be computed by way of other givs. */ /* Determine whether X computes a giv. If it does, return a nonzero value which is the benefit from eliminating the computation of X; set *SRC_REG to the register of the biv that it is computed from; set *ADD_VAL and *MULT_VAL to the coefficients, such that the value of X is biv * mult + add; */ static int general_induction_var (const struct loop *loop, rtx x, rtx *src_reg, rtx *add_val, rtx *mult_val, rtx *ext_val, int is_addr, int *pbenefit, enum machine_mode addr_mode) { struct loop_ivs *ivs = LOOP_IVS (loop); rtx orig_x = x; /* If this is an invariant, forget it, it isn't a giv. */ if (loop_invariant_p (loop, x) == 1) return 0; *pbenefit = 0; *ext_val = NULL_RTX; x = simplify_giv_expr (loop, x, ext_val, pbenefit); if (x == 0) return 0; switch (GET_CODE (x)) { case USE: case CONST_INT: /* Since this is now an invariant and wasn't before, it must be a giv with MULT_VAL == 0. It doesn't matter which BIV we associate this with. */ *src_reg = ivs->list->biv->dest_reg; *mult_val = const0_rtx; *add_val = x; break; case REG: /* This is equivalent to a BIV. */ *src_reg = x; *mult_val = const1_rtx; *add_val = const0_rtx; break; case PLUS: /* Either (plus (biv) (invar)) or (plus (mult (biv) (invar_1)) (invar_2)). */ if (GET_CODE (XEXP (x, 0)) == MULT) { *src_reg = XEXP (XEXP (x, 0), 0); *mult_val = XEXP (XEXP (x, 0), 1); } else { *src_reg = XEXP (x, 0); *mult_val = const1_rtx; } *add_val = XEXP (x, 1); break; case MULT: /* ADD_VAL is zero. */ *src_reg = XEXP (x, 0); *mult_val = XEXP (x, 1); *add_val = const0_rtx; break; default: abort (); } /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be unless they are CONST_INT). */ if (GET_CODE (*add_val) == USE) *add_val = XEXP (*add_val, 0); if (GET_CODE (*mult_val) == USE) *mult_val = XEXP (*mult_val, 0); if (is_addr) *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost; else *pbenefit += rtx_cost (orig_x, SET); /* Always return true if this is a giv so it will be detected as such, even if the benefit is zero or negative. This allows elimination of bivs that might otherwise not be eliminated. */ return 1; } /* Given an expression, X, try to form it as a linear function of a biv. We will canonicalize it to be of the form (plus (mult (BIV) (invar_1)) (invar_2)) with possible degeneracies. The invariant expressions must each be of a form that can be used as a machine operand. We surround then with a USE rtx (a hack, but localized and certainly unambiguous!) if not a CONST_INT for simplicity in this routine; it is the caller's responsibility to strip them. If no such canonicalization is possible (i.e., two biv's are used or an expression that is neither invariant nor a biv or giv), this routine returns 0. For a nonzero return, the result will have a code of CONST_INT, USE, REG (for a BIV), PLUS, or MULT. No other codes will occur. *BENEFIT will be incremented by the benefit of any sub-giv encountered. */ static rtx sge_plus (enum machine_mode, rtx, rtx); static rtx sge_plus_constant (rtx, rtx); static rtx simplify_giv_expr (const struct loop *loop, rtx x, rtx *ext_val, int *benefit) { struct loop_ivs *ivs = LOOP_IVS (loop); struct loop_regs *regs = LOOP_REGS (loop); enum machine_mode mode = GET_MODE (x); rtx arg0, arg1; rtx tem; /* If this is not an integer mode, or if we cannot do arithmetic in this mode, this can't be a giv. */ if (mode != VOIDmode && (GET_MODE_CLASS (mode) != MODE_INT || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)) return NULL_RTX; switch (GET_CODE (x)) { case PLUS: arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit); arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit); if (arg0 == 0 || arg1 == 0) return NULL_RTX; /* Put constant last, CONST_INT last if both constant. */ if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT) && ! ((GET_CODE (arg0) == USE && GET_CODE (arg1) == USE) || GET_CODE (arg1) == CONST_INT)) tem = arg0, arg0 = arg1, arg1 = tem; /* Handle addition of zero, then addition of an invariant. */ if (arg1 == const0_rtx) return arg0; else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE) switch (GET_CODE (arg0)) { case CONST_INT: case USE: /* Adding two invariants must result in an invariant, so enclose addition operation inside a USE and return it. */ if (GET_CODE (arg0) == USE) arg0 = XEXP (arg0, 0); if (GET_CODE (arg1) == USE) arg1 = XEXP (arg1, 0); if (GET_CODE (arg0) == CONST_INT) tem = arg0, arg0 = arg1, arg1 = tem; if (GET_CODE (arg1) == CONST_INT) tem = sge_plus_constant (arg0, arg1); else tem = sge_plus (mode, arg0, arg1); if (GET_CODE (tem) != CONST_INT) tem = gen_rtx_USE (mode, tem); return tem; case REG: case MULT: /* biv + invar or mult + invar. Return sum. */ return gen_rtx_PLUS (mode, arg0, arg1); case PLUS: /* (a + invar_1) + invar_2. Associate. */ return simplify_giv_expr (loop, gen_rtx_PLUS (mode, XEXP (arg0, 0), gen_rtx_PLUS (mode, XEXP (arg0, 1), arg1)), ext_val, benefit); default: abort (); } /* Each argument must be either REG, PLUS, or MULT. Convert REG to MULT to reduce cases. */ if (REG_P (arg0)) arg0 = gen_rtx_MULT (mode, arg0, const1_rtx); if (REG_P (arg1)) arg1 = gen_rtx_MULT (mode, arg1, const1_rtx); /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT. Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT. Recurse to associate the second PLUS. */ if (GET_CODE (arg1) == MULT) tem = arg0, arg0 = arg1, arg1 = tem; if (GET_CODE (arg1) == PLUS) return simplify_giv_expr (loop, gen_rtx_PLUS (mode, gen_rtx_PLUS (mode, arg0, XEXP (arg1, 0)), XEXP (arg1, 1)), ext_val, benefit); /* Now must have MULT + MULT. Distribute if same biv, else not giv. */ if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT) return NULL_RTX; if (!rtx_equal_p (arg0, arg1)) return NULL_RTX; return simplify_giv_expr (loop, gen_rtx_MULT (mode, XEXP (arg0, 0), gen_rtx_PLUS (mode, XEXP (arg0, 1), XEXP (arg1, 1))), ext_val, benefit); case MINUS: /* Handle "a - b" as "a + b * (-1)". */ return simplify_giv_expr (loop, gen_rtx_PLUS (mode, XEXP (x, 0), gen_rtx_MULT (mode, XEXP (x, 1), constm1_rtx)), ext_val, benefit); case MULT: arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit); arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit); if (arg0 == 0 || arg1 == 0) return NULL_RTX; /* Put constant last, CONST_INT last if both constant. */ if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT) && GET_CODE (arg1) != CONST_INT) tem = arg0, arg0 = arg1, arg1 = tem; /* If second argument is not now constant, not giv. */ if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT) return NULL_RTX; /* Handle multiply by 0 or 1. */ if (arg1 == const0_rtx) return const0_rtx; else if (arg1 == const1_rtx) return arg0; switch (GET_CODE (arg0)) { case REG: /* biv * invar. Done. */ return gen_rtx_MULT (mode, arg0, arg1); case CONST_INT: /* Product of two constants. */ return GEN_INT (INTVAL (arg0) * INTVAL (arg1)); case USE: /* invar * invar is a giv, but attempt to simplify it somehow. */ if (GET_CODE (arg1) != CONST_INT) return NULL_RTX; arg0 = XEXP (arg0, 0); if (GET_CODE (arg0) == MULT) { /* (invar_0 * invar_1) * invar_2. Associate. */ return simplify_giv_expr (loop, gen_rtx_MULT (mode, XEXP (arg0, 0), gen_rtx_MULT (mode, XEXP (arg0, 1), arg1)), ext_val, benefit); } /* Propagate the MULT expressions to the innermost nodes. */ else if (GET_CODE (arg0) == PLUS) { /* (invar_0 + invar_1) * invar_2. Distribute. */ return simplify_giv_expr (loop, gen_rtx_PLUS (mode, gen_rtx_MULT (mode, XEXP (arg0, 0), arg1), gen_rtx_MULT (mode, XEXP (arg0, 1), arg1)), ext_val, benefit); } return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1)); case MULT: /* (a * invar_1) * invar_2. Associate. */ return simplify_giv_expr (loop, gen_rtx_MULT (mode, XEXP (arg0, 0), gen_rtx_MULT (mode, XEXP (arg0, 1), arg1)), ext_val, benefit); case PLUS: /* (a + invar_1) * invar_2. Distribute. */ return simplify_giv_expr (loop, gen_rtx_PLUS (mode, gen_rtx_MULT (mode, XEXP (arg0, 0), arg1), gen_rtx_MULT (mode, XEXP (arg0, 1), arg1)), ext_val, benefit); default: abort (); } case ASHIFT: /* Shift by constant is multiply by power of two. */ if (GET_CODE (XEXP (x, 1)) != CONST_INT) return 0; return simplify_giv_expr (loop, gen_rtx_MULT (mode, XEXP (x, 0), GEN_INT ((HOST_WIDE_INT) 1 << INTVAL (XEXP (x, 1)))), ext_val, benefit); case NEG: /* "-a" is "a * (-1)" */ return simplify_giv_expr (loop, gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx), ext_val, benefit); case NOT: /* "~a" is "-a - 1". Silly, but easy. */ return simplify_giv_expr (loop, gen_rtx_MINUS (mode, gen_rtx_NEG (mode, XEXP (x, 0)), const1_rtx), ext_val, benefit); case USE: /* Already in proper form for invariant. */ return x; case SIGN_EXTEND: case ZERO_EXTEND: case TRUNCATE: /* Conditionally recognize extensions of simple IVs. After we've computed loop traversal counts and verified the range of the source IV, we'll reevaluate this as a GIV. */ if (*ext_val == NULL_RTX) { arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit); if (arg0 && *ext_val == NULL_RTX && REG_P (arg0)) { *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0); return arg0; } } goto do_default; case REG: /* If this is a new register, we can't deal with it. */ if (REGNO (x) >= max_reg_before_loop) return 0; /* Check for biv or giv. */ switch (REG_IV_TYPE (ivs, REGNO (x))) { case BASIC_INDUCT: return x; case GENERAL_INDUCT: { struct induction *v = REG_IV_INFO (ivs, REGNO (x)); /* Form expression from giv and add benefit. Ensure this giv can derive another and subtract any needed adjustment if so. */ /* Increasing the benefit here is risky. The only case in which it is arguably correct is if this is the only use of V. In other cases, this will artificially inflate the benefit of the current giv, and lead to suboptimal code. Thus, it is disabled, since potentially not reducing an only marginally beneficial giv is less harmful than reducing many givs that are not really beneficial. */ { rtx single_use = regs->array[REGNO (x)].single_usage; if (single_use && single_use != const0_rtx) *benefit += v->benefit; } if (v->cant_derive) return 0; tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode, v->src_reg, v->mult_val), v->add_val); if (v->derive_adjustment) tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment); arg0 = simplify_giv_expr (loop, tem, ext_val, benefit); if (*ext_val) { if (!v->ext_dependent) return arg0; } else { *ext_val = v->ext_dependent; return arg0; } return 0; } default: do_default: /* If it isn't an induction variable, and it is invariant, we may be able to simplify things further by looking through the bits we just moved outside the loop. */ if (loop_invariant_p (loop, x) == 1) { struct movable *m; struct loop_movables *movables = LOOP_MOVABLES (loop); for (m = movables->head; m; m = m->next) if (rtx_equal_p (x, m->set_dest)) { /* Ok, we found a match. Substitute and simplify. */ /* If we match another movable, we must use that, as this one is going away. */ if (m->match) return simplify_giv_expr (loop, m->match->set_dest, ext_val, benefit); /* If consec is nonzero, this is a member of a group of instructions that were moved together. We handle this case only to the point of seeking to the last insn and looking for a REG_EQUAL. Fail if we don't find one. */ if (m->consec != 0) { int i = m->consec; tem = m->insn; do { tem = NEXT_INSN (tem); } while (--i > 0); tem = find_reg_note (tem, REG_EQUAL, NULL_RTX); if (tem) tem = XEXP (tem, 0); } else { tem = single_set (m->insn); if (tem) tem = SET_SRC (tem); } if (tem) { /* What we are most interested in is pointer arithmetic on invariants -- only take patterns we may be able to do something with. */ if (GET_CODE (tem) == PLUS || GET_CODE (tem) == MULT || GET_CODE (tem) == ASHIFT || GET_CODE (tem) == CONST_INT || GET_CODE (tem) == SYMBOL_REF) { tem = simplify_giv_expr (loop, tem, ext_val, benefit); if (tem) return tem; } else if (GET_CODE (tem) == CONST && GET_CODE (XEXP (tem, 0)) == PLUS && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT) { tem = simplify_giv_expr (loop, XEXP (tem, 0), ext_val, benefit); if (tem) return tem; } } break; } } break; } /* Fall through to general case. */ default: /* If invariant, return as USE (unless CONST_INT). Otherwise, not giv. */ if (GET_CODE (x) == USE) x = XEXP (x, 0); if (loop_invariant_p (loop, x) == 1) { if (GET_CODE (x) == CONST_INT) return x; if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT) x = XEXP (x, 0); return gen_rtx_USE (mode, x); } else return 0; } } /* This routine folds invariants such that there is only ever one CONST_INT in the summation. It is only used by simplify_giv_expr. */ static rtx sge_plus_constant (rtx x, rtx c) { if (GET_CODE (x) == CONST_INT) return GEN_INT (INTVAL (x) + INTVAL (c)); else if (GET_CODE (x) != PLUS) return gen_rtx_PLUS (GET_MODE (x), x, c); else if (GET_CODE (XEXP (x, 1)) == CONST_INT) { return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0), GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c))); } else if (GET_CODE (XEXP (x, 0)) == PLUS || GET_CODE (XEXP (x, 1)) != PLUS) { return gen_rtx_PLUS (GET_MODE (x), sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1)); } else { return gen_rtx_PLUS (GET_MODE (x), sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0)); } } static rtx sge_plus (enum machine_mode mode, rtx x, rtx y) { while (GET_CODE (y) == PLUS) { rtx a = XEXP (y, 0); if (GET_CODE (a) == CONST_INT) x = sge_plus_constant (x, a); else x = gen_rtx_PLUS (mode, x, a); y = XEXP (y, 1); } if (GET_CODE (y) == CONST_INT) x = sge_plus_constant (x, y); else x = gen_rtx_PLUS (mode, x, y); return x; } /* Help detect a giv that is calculated by several consecutive insns; for example, giv = biv * M giv = giv + A The caller has already identified the first insn P as having a giv as dest; we check that all other insns that set the same register follow immediately after P, that they alter nothing else, and that the result of the last is still a giv. The value is 0 if the reg set in P is not really a giv. Otherwise, the value is the amount gained by eliminating all the consecutive insns that compute the value. FIRST_BENEFIT is the amount gained by eliminating the first insn, P. SRC_REG is the reg of the biv; DEST_REG is the reg of the giv. The coefficients of the ultimate giv value are stored in *MULT_VAL and *ADD_VAL. */ static int consec_sets_giv (const struct loop *loop, int first_benefit, rtx p, rtx src_reg, rtx dest_reg, rtx *add_val, rtx *mult_val, rtx *ext_val, rtx *last_consec_insn) { struct loop_ivs *ivs = LOOP_IVS (loop); struct loop_regs *regs = LOOP_REGS (loop); int count; enum rtx_code code; int benefit; rtx temp; rtx set; /* Indicate that this is a giv so that we can update the value produced in each insn of the multi-insn sequence. This induction structure will be used only by the call to general_induction_var below, so we can allocate it on our stack. If this is a giv, our caller will replace the induct var entry with a new induction structure. */ struct induction *v; if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT) return 0; v = alloca (sizeof (struct induction)); v->src_reg = src_reg; v->mult_val = *mult_val; v->add_val = *add_val; v->benefit = first_benefit; v->cant_derive = 0; v->derive_adjustment = 0; v->ext_dependent = NULL_RTX; REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT; REG_IV_INFO (ivs, REGNO (dest_reg)) = v; count = regs->array[REGNO (dest_reg)].n_times_set - 1; while (count > 0) { p = NEXT_INSN (p); code = GET_CODE (p); /* If libcall, skip to end of call sequence. */ if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX))) p = XEXP (temp, 0); if (code == INSN && (set = single_set (p)) && REG_P (SET_DEST (set)) && SET_DEST (set) == dest_reg && (general_induction_var (loop, SET_SRC (set), &src_reg, add_val, mult_val, ext_val, 0, &benefit, VOIDmode) /* Giv created by equivalent expression. */ || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)) && general_induction_var (loop, XEXP (temp, 0), &src_reg, add_val, mult_val, ext_val, 0, &benefit, VOIDmode))) && src_reg == v->src_reg) { if (find_reg_note (p, REG_RETVAL, NULL_RTX)) benefit += libcall_benefit (p); count--; v->mult_val = *mult_val; v->add_val = *add_val; v->benefit += benefit; } else if (code != NOTE) { /* Allow insns that set something other than this giv to a constant. Such insns are needed on machines which cannot include long constants and should not disqualify a giv. */ if (code == INSN && (set = single_set (p)) && SET_DEST (set) != dest_reg && CONSTANT_P (SET_SRC (set))) continue; REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT; return 0; } } REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT; *last_consec_insn = p; return v->benefit; } /* Return an rtx, if any, that expresses giv G2 as a function of the register represented by G1. If no such expression can be found, or it is clear that it cannot possibly be a valid address, 0 is returned. To perform the computation, we note that G1 = x * v + a and G2 = y * v + b where `v' is the biv. So G2 = (y/b) * G1 + (b - a*y/x). Note that MULT = y/x. Update: A and B are now allowed to be additive expressions such that B contains all variables in A. That is, computing B-A will not require subtracting variables. */ static rtx express_from_1 (rtx a, rtx b, rtx mult) { /* If MULT is zero, then A*MULT is zero, and our expression is B. */ if (mult == const0_rtx) return b; /* If MULT is not 1, we cannot handle A with non-constants, since we would then be required to subtract multiples of the registers in A. This is theoretically possible, and may even apply to some Fortran constructs, but it is a lot of work and we do not attempt it here. */ if (mult != const1_rtx && GET_CODE (a) != CONST_INT) return NULL_RTX; /* In general these structures are sorted top to bottom (down the PLUS chain), but not left to right across the PLUS. If B is a higher order giv than A, we can strip one level and recurse. If A is higher order, we'll eventually bail out, but won't know that until the end. If they are the same, we'll strip one level around this loop. */ while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS) { rtx ra, rb, oa, ob, tmp; ra = XEXP (a, 0), oa = XEXP (a, 1); if (GET_CODE (ra) == PLUS) tmp = ra, ra = oa, oa = tmp; rb = XEXP (b, 0), ob = XEXP (b, 1); if (GET_CODE (rb) == PLUS) tmp = rb, rb = ob, ob = tmp; if (rtx_equal_p (ra, rb)) /* We matched: remove one reg completely. */ a = oa, b = ob; else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob)) /* An alternate match. */ a = oa, b = rb; else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb)) /* An alternate match. */ a = ra, b = ob; else { /* Indicates an extra register in B. Strip one level from B and recurse, hoping B was the higher order expression. */ ob = express_from_1 (a, ob, mult); if (ob == NULL_RTX) return NULL_RTX; return gen_rtx_PLUS (GET_MODE (b), rb, ob); } } /* Here we are at the last level of A, go through the cases hoping to get rid of everything but a constant. */ if (GET_CODE (a) == PLUS) { rtx ra, oa; ra = XEXP (a, 0), oa = XEXP (a, 1); if (rtx_equal_p (oa, b)) oa = ra; else if (!rtx_equal_p (ra, b)) return NULL_RTX; if (GET_CODE (oa) != CONST_INT) return NULL_RTX; return GEN_INT (-INTVAL (oa) * INTVAL (mult)); } else if (GET_CODE (a) == CONST_INT) { return plus_constant (b, -INTVAL (a) * INTVAL (mult)); } else if (CONSTANT_P (a)) { enum machine_mode mode_a = GET_MODE (a); enum machine_mode mode_b = GET_MODE (b); enum machine_mode mode = mode_b == VOIDmode ? mode_a : mode_b; return simplify_gen_binary (MINUS, mode, b, a); } else if (GET_CODE (b) == PLUS) { if (rtx_equal_p (a, XEXP (b, 0))) return XEXP (b, 1); else if (rtx_equal_p (a, XEXP (b, 1))) return XEXP (b, 0); else return NULL_RTX; } else if (rtx_equal_p (a, b)) return const0_rtx; return NULL_RTX; } rtx express_from (struct induction *g1, struct induction *g2) { rtx mult, add; /* The value that G1 will be multiplied by must be a constant integer. Also, the only chance we have of getting a valid address is if b*c/a (see above for notation) is also an integer. */ if (GET_CODE (g1->mult_val) == CONST_INT && GET_CODE (g2->mult_val) == CONST_INT) { if (g1->mult_val == const0_rtx || (g1->mult_val == constm1_rtx && INTVAL (g2->mult_val) == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)) || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0) return NULL_RTX; mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val)); } else if (rtx_equal_p (g1->mult_val, g2->mult_val)) mult = const1_rtx; else { /* ??? Find out if the one is a multiple of the other? */ return NULL_RTX; } add = express_from_1 (g1->add_val, g2->add_val, mult); if (add == NULL_RTX) { /* Failed. If we've got a multiplication factor between G1 and G2, scale G1's addend and try again. */ if (INTVAL (mult) > 1) { rtx g1_add_val = g1->add_val; if (GET_CODE (g1_add_val) == MULT && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT) { HOST_WIDE_INT m; m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1)); g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), XEXP (g1_add_val, 0), GEN_INT (m)); } else { g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val, mult); } add = express_from_1 (g1_add_val, g2->add_val, const1_rtx); } } if (add == NULL_RTX) return NULL_RTX; /* Form simplified final result. */ if (mult == const0_rtx) return add; else if (mult == const1_rtx) mult = g1->dest_reg; else mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult); if (add == const0_rtx) return mult; else { if (GET_CODE (add) == PLUS && CONSTANT_P (XEXP (add, 1))) { rtx tem = XEXP (add, 1); mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0)); add = tem; } return gen_rtx_PLUS (g2->mode, mult, add); } } /* Return an rtx, if any, that expresses giv G2 as a function of the register represented by G1. This indicates that G2 should be combined with G1 and that G2 can use (either directly or via an address expression) a register used to represent G1. */ static rtx combine_givs_p (struct induction *g1, struct induction *g2) { rtx comb, ret; /* With the introduction of ext dependent givs, we must care for modes. G2 must not use a wider mode than G1. */ if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode)) return NULL_RTX; ret = comb = express_from (g1, g2); if (comb == NULL_RTX) return NULL_RTX; if (g1->mode != g2->mode) ret = gen_lowpart (g2->mode, comb); /* If these givs are identical, they can be combined. We use the results of express_from because the addends are not in a canonical form, so rtx_equal_p is a weaker test. */ /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the combination to be the other way round. */ if (comb == g1->dest_reg && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR)) { return ret; } /* If G2 can be expressed as a function of G1 and that function is valid as an address and no more expensive than using a register for G2, the expression of G2 in terms of G1 can be used. */ if (ret != NULL_RTX && g2->giv_type == DEST_ADDR && memory_address_p (GET_MODE (g2->mem), ret)) return ret; return NULL_RTX; } /* Check each extension dependent giv in this class to see if its root biv is safe from wrapping in the interior mode, which would make the giv illegal. */ static void check_ext_dependent_givs (const struct loop *loop, struct iv_class *bl) { struct loop_info *loop_info = LOOP_INFO (loop); int ze_ok = 0, se_ok = 0, info_ok = 0; enum machine_mode biv_mode = GET_MODE (bl->biv->src_reg); HOST_WIDE_INT start_val; unsigned HOST_WIDE_INT u_end_val = 0; unsigned HOST_WIDE_INT u_start_val = 0; rtx incr = pc_rtx; struct induction *v; /* Make sure the iteration data is available. We must have constants in order to be certain of no overflow. */ if (loop_info->n_iterations > 0 && bl->initial_value && GET_CODE (bl->initial_value) == CONST_INT && (incr = biv_total_increment (bl)) && GET_CODE (incr) == CONST_INT /* Make sure the host can represent the arithmetic. */ && HOST_BITS_PER_WIDE_INT >= GET_MODE_BITSIZE (biv_mode)) { unsigned HOST_WIDE_INT abs_incr, total_incr; HOST_WIDE_INT s_end_val; int neg_incr; info_ok = 1; start_val = INTVAL (bl->initial_value); u_start_val = start_val; neg_incr = 0, abs_incr = INTVAL (incr); if (INTVAL (incr) < 0) neg_incr = 1, abs_incr = -abs_incr; total_incr = abs_incr * loop_info->n_iterations; /* Check for host arithmetic overflow. */ if (total_incr / loop_info->n_iterations == abs_incr) { unsigned HOST_WIDE_INT u_max; HOST_WIDE_INT s_max; u_end_val = start_val + (neg_incr ? -total_incr : total_incr); s_end_val = u_end_val; u_max = GET_MODE_MASK (biv_mode); s_max = u_max >> 1; /* Check zero extension of biv ok. */ if (start_val >= 0 /* Check for host arithmetic overflow. */ && (neg_incr ? u_end_val < u_start_val : u_end_val > u_start_val) /* Check for target arithmetic overflow. */ && (neg_incr ? 1 /* taken care of with host overflow */ : u_end_val <= u_max)) { ze_ok = 1; } /* Check sign extension of biv ok. */ /* ??? While it is true that overflow with signed and pointer arithmetic is undefined, I fear too many programmers don't keep this fact in mind -- myself included on occasion. So leave alone with the signed overflow optimizations. */ if (start_val >= -s_max - 1 /* Check for host arithmetic overflow. */ && (neg_incr ? s_end_val < start_val : s_end_val > start_val) /* Check for target arithmetic overflow. */ && (neg_incr ? s_end_val >= -s_max - 1 : s_end_val <= s_max)) { se_ok = 1; } } } /* If we know the BIV is compared at run-time against an invariant value, and the increment is +/- 1, we may also be able to prove that the BIV cannot overflow. */ else if (bl->biv->src_reg == loop_info->iteration_var && loop_info->comparison_value && loop_invariant_p (loop, loop_info->comparison_value) && (incr = biv_total_increment (bl)) && GET_CODE (incr) == CONST_INT) { /* If the increment is +1, and the exit test is a <, the BIV cannot overflow. (For <=, we have the problematic case that the comparison value might be the maximum value of the range.) */ if (INTVAL (incr) == 1) { if (loop_info->comparison_code == LT) se_ok = ze_ok = 1; else if (loop_info->comparison_code == LTU) ze_ok = 1; } /* Likewise for increment -1 and exit test >. */ if (INTVAL (incr) == -1) { if (loop_info->comparison_code == GT) se_ok = ze_ok = 1; else if (loop_info->comparison_code == GTU) ze_ok = 1; } } /* Invalidate givs that fail the tests. */ for (v = bl->giv; v; v = v->next_iv) if (v->ext_dependent) { enum rtx_code code = GET_CODE (v->ext_dependent); int ok = 0; switch (code) { case SIGN_EXTEND: ok = se_ok; break; case ZERO_EXTEND: ok = ze_ok; break; case TRUNCATE: /* We don't know whether this value is being used as either signed or unsigned, so to safely truncate we must satisfy both. The initial check here verifies the BIV itself; once that is successful we may check its range wrt the derived GIV. This works only if we were able to determine constant start and end values above. */ if (se_ok && ze_ok && info_ok) { enum machine_mode outer_mode = GET_MODE (v->ext_dependent); unsigned HOST_WIDE_INT max = GET_MODE_MASK (outer_mode) >> 1; /* We know from the above that both endpoints are nonnegative, and that there is no wrapping. Verify that both endpoints are within the (signed) range of the outer mode. */ if (u_start_val <= max && u_end_val <= max) ok = 1; } break; default: abort (); } if (ok) { if (loop_dump_stream) { fprintf (loop_dump_stream, "Verified ext dependent giv at %d of reg %d\n", INSN_UID (v->insn), bl->regno); } } else { if (loop_dump_stream) { const char *why; if (info_ok) why = "biv iteration values overflowed"; else { if (incr == pc_rtx) incr = biv_total_increment (bl); if (incr == const1_rtx) why = "biv iteration info incomplete; incr by 1"; else why = "biv iteration info incomplete"; } fprintf (loop_dump_stream, "Failed ext dependent giv at %d, %s\n", INSN_UID (v->insn), why); } v->ignore = 1; bl->all_reduced = 0; } } } /* Generate a version of VALUE in a mode appropriate for initializing V. */ rtx extend_value_for_giv (struct induction *v, rtx value) { rtx ext_dep = v->ext_dependent; if (! ext_dep) return value; /* Recall that check_ext_dependent_givs verified that the known bounds of a biv did not overflow or wrap with respect to the extension for the giv. Therefore, constants need no additional adjustment. */ if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode) return value; /* Otherwise, we must adjust the value to compensate for the differing modes of the biv and the giv. */ return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value); } struct combine_givs_stats { int giv_number; int total_benefit; }; static int cmp_combine_givs_stats (const void *xp, const void *yp) { const struct combine_givs_stats * const x = (const struct combine_givs_stats *) xp; const struct combine_givs_stats * const y = (const struct combine_givs_stats *) yp; int d; d = y->total_benefit - x->total_benefit; /* Stabilize the sort. */ if (!d) d = x->giv_number - y->giv_number; return d; } /* Check all pairs of givs for iv_class BL and see if any can be combined with any other. If so, point SAME to the giv combined with and set NEW_REG to be an expression (in terms of the other giv's DEST_REG) equivalent to the giv. Also, update BENEFIT and related fields for cost/benefit analysis. */ static void combine_givs (struct loop_regs *regs, struct iv_class *bl) { /* Additional benefit to add for being combined multiple times. */ const int extra_benefit = 3; struct induction *g1, *g2, **giv_array; int i, j, k, giv_count; struct combine_givs_stats *stats; rtx *can_combine; /* Count givs, because bl->giv_count is incorrect here. */ giv_count = 0; for (g1 = bl->giv; g1; g1 = g1->next_iv) if (!g1->ignore) giv_count++; giv_array = alloca (giv_count * sizeof (struct induction *)); i = 0; for (g1 = bl->giv; g1; g1 = g1->next_iv) if (!g1->ignore) giv_array[i++] = g1; stats = xcalloc (giv_count, sizeof (*stats)); can_combine = xcalloc (giv_count, giv_count * sizeof (rtx)); for (i = 0; i < giv_count; i++) { int this_benefit; rtx single_use; g1 = giv_array[i]; stats[i].giv_number = i; /* If a DEST_REG GIV is used only once, do not allow it to combine with anything, for in doing so we will gain nothing that cannot be had by simply letting the GIV with which we would have combined to be reduced on its own. The losage shows up in particular with DEST_ADDR targets on hosts with reg+reg addressing, though it can be seen elsewhere as well. */ if (g1->giv_type == DEST_REG && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage) && single_use != const0_rtx) continue; this_benefit = g1->benefit; /* Add an additional weight for zero addends. */ if (g1->no_const_addval) this_benefit += 1; for (j = 0; j < giv_count; j++) { rtx this_combine; g2 = giv_array[j]; if (g1 != g2 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX) { can_combine[i * giv_count + j] = this_combine; this_benefit += g2->benefit + extra_benefit; } } stats[i].total_benefit = this_benefit; } /* Iterate, combining until we can't. */ restart: qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats); if (loop_dump_stream) { fprintf (loop_dump_stream, "Sorted combine statistics:\n"); for (k = 0; k < giv_count; k++) { g1 = giv_array[stats[k].giv_number]; if (!g1->combined_with && !g1->same) fprintf (loop_dump_stream, " {%d, %d}", INSN_UID (giv_array[stats[k].giv_number]->insn), stats[k].total_benefit); } putc ('\n', loop_dump_stream); } for (k = 0; k < giv_count; k++) { int g1_add_benefit = 0; i = stats[k].giv_number; g1 = giv_array[i]; /* If it has already been combined, skip. */ if (g1->combined_with || g1->same) continue; for (j = 0; j < giv_count; j++) { g2 = giv_array[j]; if (g1 != g2 && can_combine[i * giv_count + j] /* If it has already been combined, skip. */ && ! g2->same && ! g2->combined_with) { int l; g2->new_reg = can_combine[i * giv_count + j]; g2->same = g1; /* For destination, we now may replace by mem expression instead of register. This changes the costs considerably, so add the compensation. */ if (g2->giv_type == DEST_ADDR) g2->benefit = (g2->benefit + reg_address_cost - address_cost (g2->new_reg, GET_MODE (g2->mem))); g1->combined_with++; g1->lifetime += g2->lifetime; g1_add_benefit += g2->benefit; /* ??? The new final_[bg]iv_value code does a much better job of finding replaceable giv's, and hence this code may no longer be necessary. */ if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg)) g1_add_benefit -= cost_per_copy; /* To help optimize the next set of combinations, remove this giv from the benefits of other potential mates. */ for (l = 0; l < giv_count; ++l) { int m = stats[l].giv_number; if (can_combine[m * giv_count + j]) stats[l].total_benefit -= g2->benefit + extra_benefit; } if (loop_dump_stream) fprintf (loop_dump_stream, "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n", INSN_UID (g2->insn), INSN_UID (g1->insn), g1->benefit, g1_add_benefit, g1->lifetime); } } /* To help optimize the next set of combinations, remove this giv from the benefits of other potential mates. */ if (g1->combined_with) { for (j = 0; j < giv_count; ++j) { int m = stats[j].giv_number; if (can_combine[m * giv_count + i]) stats[j].total_benefit -= g1->benefit + extra_benefit; } g1->benefit += g1_add_benefit; /* We've finished with this giv, and everything it touched. Restart the combination so that proper weights for the rest of the givs are properly taken into account. */ /* ??? Ideally we would compact the arrays at this point, so as to not cover old ground. But sanely compacting can_combine is tricky. */ goto restart; } } /* Clean up. */ free (stats); free (can_combine); } /* Generate sequence for REG = B * M + A. B is the initial value of the basic induction variable, M a multiplicative constant, A an additive constant and REG the destination register. */ static rtx gen_add_mult (rtx b, rtx m, rtx a, rtx reg) { rtx seq; rtx result; start_sequence (); /* Use unsigned arithmetic. */ result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1); if (reg != result) emit_move_insn (reg, result); seq = get_insns (); end_sequence (); return seq; } /* Update registers created in insn sequence SEQ. */ static void loop_regs_update (const struct loop *loop ATTRIBUTE_UNUSED, rtx seq) { rtx insn; /* Update register info for alias analysis. */ insn = seq; while (insn != NULL_RTX) { rtx set = single_set (insn); if (set && REG_P (SET_DEST (set))) record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0); insn = NEXT_INSN (insn); } } /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. B is the initial value of the basic induction variable, M a multiplicative constant, A an additive constant and REG the destination register. */ void loop_iv_add_mult_emit_before (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg, basic_block before_bb, rtx before_insn) { rtx seq; if (! before_insn) { loop_iv_add_mult_hoist (loop, b, m, a, reg); return; } /* Use copy_rtx to prevent unexpected sharing of these rtx. */ seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg); /* Increase the lifetime of any invariants moved further in code. */ update_reg_last_use (a, before_insn); update_reg_last_use (b, before_insn); update_reg_last_use (m, before_insn); /* It is possible that the expansion created lots of new registers. Iterate over the sequence we just created and record them all. We must do this before inserting the sequence. */ loop_regs_update (loop, seq); loop_insn_emit_before (loop, before_bb, before_insn, seq); } /* Emit insns in loop pre-header to set REG = B * M + A. B is the initial value of the basic induction variable, M a multiplicative constant, A an additive constant and REG the destination register. */ void loop_iv_add_mult_sink (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg) { rtx seq; /* Use copy_rtx to prevent unexpected sharing of these rtx. */ seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg); /* Increase the lifetime of any invariants moved further in code. ???? Is this really necessary? */ update_reg_last_use (a, loop->sink); update_reg_last_use (b, loop->sink); update_reg_last_use (m, loop->sink); /* It is possible that the expansion created lots of new registers. Iterate over the sequence we just created and record them all. We must do this before inserting the sequence. */ loop_regs_update (loop, seq); loop_insn_sink (loop, seq); } /* Emit insns after loop to set REG = B * M + A. B is the initial value of the basic induction variable, M a multiplicative constant, A an additive constant and REG the destination register. */ void loop_iv_add_mult_hoist (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg) { rtx seq; /* Use copy_rtx to prevent unexpected sharing of these rtx. */ seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg); /* It is possible that the expansion created lots of new registers. Iterate over the sequence we just created and record them all. We must do this before inserting the sequence. */ loop_regs_update (loop, seq); loop_insn_hoist (loop, seq); } /* Similar to gen_add_mult, but compute cost rather than generating sequence. */ static int iv_add_mult_cost (rtx b, rtx m, rtx a, rtx reg) { int cost = 0; rtx last, result; start_sequence (); result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1); if (reg != result) emit_move_insn (reg, result); last = get_last_insn (); while (last) { rtx t = single_set (last); if (t) cost += rtx_cost (SET_SRC (t), SET); last = PREV_INSN (last); } end_sequence (); return cost; } /* Test whether A * B can be computed without an actual multiply insn. Value is 1 if so. ??? This function stinks because it generates a ton of wasted RTL ??? and as a result fragments GC memory to no end. There are other ??? places in the compiler which are invoked a lot and do the same ??? thing, generate wasted RTL just to see if something is possible. */ static int product_cheap_p (rtx a, rtx b) { rtx tmp; int win, n_insns; /* If only one is constant, make it B. */ if (GET_CODE (a) == CONST_INT) tmp = a, a = b, b = tmp; /* If first constant, both constant, so don't need multiply. */ if (GET_CODE (a) == CONST_INT) return 1; /* If second not constant, neither is constant, so would need multiply. */ if (GET_CODE (b) != CONST_INT) return 0; /* One operand is constant, so might not need multiply insn. Generate the code for the multiply and see if a call or multiply, or long sequence of insns is generated. */ start_sequence (); expand_mult (GET_MODE (a), a, b, NULL_RTX, 1); tmp = get_insns (); end_sequence (); win = 1; if (INSN_P (tmp)) { n_insns = 0; while (tmp != NULL_RTX) { rtx next = NEXT_INSN (tmp); if (++n_insns > 3 || GET_CODE (tmp) != INSN || (GET_CODE (PATTERN (tmp)) == SET && GET_CODE (SET_SRC (PATTERN (tmp))) == MULT) || (GET_CODE (PATTERN (tmp)) == PARALLEL && GET_CODE (XVECEXP (PATTERN (tmp), 0, 0)) == SET && GET_CODE (SET_SRC (XVECEXP (PATTERN (tmp), 0, 0))) == MULT)) { win = 0; break; } tmp = next; } } else if (GET_CODE (tmp) == SET && GET_CODE (SET_SRC (tmp)) == MULT) win = 0; else if (GET_CODE (tmp) == PARALLEL && GET_CODE (XVECEXP (tmp, 0, 0)) == SET && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT) win = 0; return win; } /* Check to see if loop can be terminated by a "decrement and branch until zero" instruction. If so, add a REG_NONNEG note to the branch insn if so. Also try reversing an increment loop to a decrement loop to see if the optimization can be performed. Value is nonzero if optimization was performed. */ /* This is useful even if the architecture doesn't have such an insn, because it might change a loops which increments from 0 to n to a loop which decrements from n to 0. A loop that decrements to zero is usually faster than one that increments from zero. */ /* ??? This could be rewritten to use some of the loop unrolling procedures, such as approx_final_value, biv_total_increment, loop_iterations, and final_[bg]iv_value. */ static int check_dbra_loop (struct loop *loop, int insn_count) { struct loop_info *loop_info = LOOP_INFO (loop); struct loop_regs *regs = LOOP_REGS (loop); struct loop_ivs *ivs = LOOP_IVS (loop); struct iv_class *bl; rtx reg; enum machine_mode mode; rtx jump_label; rtx final_value; rtx start_value; rtx new_add_val; rtx comparison; rtx before_comparison; rtx p; rtx jump; rtx first_compare; int compare_and_branch; rtx loop_start = loop->start; rtx loop_end = loop->end; /* If last insn is a conditional branch, and the insn before tests a register value, try to optimize it. Otherwise, we can't do anything. */ jump = PREV_INSN (loop_end); comparison = get_condition_for_loop (loop, jump); if (comparison == 0) return 0; if (!onlyjump_p (jump)) return 0; /* Try to compute whether the compare/branch at the loop end is one or two instructions. */ get_condition (jump, &first_compare, false); if (first_compare == jump) compare_and_branch = 1; else if (first_compare == prev_nonnote_insn (jump)) compare_and_branch = 2; else return 0; { /* If more than one condition is present to control the loop, then do not proceed, as this function does not know how to rewrite loop tests with more than one condition. Look backwards from the first insn in the last comparison sequence and see if we've got another comparison sequence. */ rtx jump1; if ((jump1 = prev_nonnote_insn (first_compare)) != loop->cont) if (GET_CODE (jump1) == JUMP_INSN) return 0; } /* Check all of the bivs to see if the compare uses one of them. Skip biv's set more than once because we can't guarantee that it will be zero on the last iteration. Also skip if the biv is used between its update and the test insn. */ for (bl = ivs->list; bl; bl = bl->next) { if (bl->biv_count == 1 && ! bl->biv->maybe_multiple && bl->biv->dest_reg == XEXP (comparison, 0) && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn, first_compare)) break; } /* Try swapping the comparison to identify a suitable biv. */ if (!bl) for (bl = ivs->list; bl; bl = bl->next) if (bl->biv_count == 1 && ! bl->biv->maybe_multiple && bl->biv->dest_reg == XEXP (comparison, 1) && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn, first_compare)) { comparison = gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode, XEXP (comparison, 1), XEXP (comparison, 0)); break; } if (! bl) return 0; /* Look for the case where the basic induction variable is always nonnegative, and equals zero on the last iteration. In this case, add a reg_note REG_NONNEG, which allows the m68k DBRA instruction to be used. */ if (((GET_CODE (comparison) == GT && XEXP (comparison, 1) == constm1_rtx) || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx)) && GET_CODE (bl->biv->add_val) == CONST_INT && INTVAL (bl->biv->add_val) < 0) { /* Initial value must be greater than 0, init_val % -dec_value == 0 to ensure that it equals zero on the last iteration */ if (GET_CODE (bl->initial_value) == CONST_INT && INTVAL (bl->initial_value) > 0 && (INTVAL (bl->initial_value) % (-INTVAL (bl->biv->add_val))) == 0) { /* Register always nonnegative, add REG_NOTE to branch. */ if (! find_reg_note (jump, REG_NONNEG, NULL_RTX)) REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg, REG_NOTES (jump)); bl->nonneg = 1; return 1; } /* If the decrement is 1 and the value was tested as >= 0 before the loop, then we can safely optimize. */ for (p = loop_start; p; p = PREV_INSN (p)) { if (GET_CODE (p) == CODE_LABEL) break; if (GET_CODE (p) != JUMP_INSN) continue; before_comparison = get_condition_for_loop (loop, p); if (before_comparison && XEXP (before_comparison, 0) == bl->biv->dest_reg && (GET_CODE (before_comparison) == LT || GET_CODE (before_comparison) == LTU) && XEXP (before_comparison, 1) == const0_rtx && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start) && INTVAL (bl->biv->add_val) == -1) { if (! find_reg_note (jump, REG_NONNEG, NULL_RTX)) REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg, REG_NOTES (jump)); bl->nonneg = 1; return 1; } } } else if (GET_CODE (bl->biv->add_val) == CONST_INT && INTVAL (bl->biv->add_val) > 0) { /* Try to change inc to dec, so can apply above optimization. */ /* Can do this if: all registers modified are induction variables or invariant, all memory references have non-overlapping addresses (obviously true if only one write) allow 2 insns for the compare/jump at the end of the loop. */ /* Also, we must avoid any instructions which use both the reversed biv and another biv. Such instructions will fail if the loop is reversed. We meet this condition by requiring that either no_use_except_counting is true, or else that there is only one biv. */ int num_nonfixed_reads = 0; /* 1 if the iteration var is used only to count iterations. */ int no_use_except_counting = 0; /* 1 if the loop has no memory store, or it has a single memory store which is reversible. */ int reversible_mem_store = 1; if (bl->giv_count == 0 && !loop->exit_count && !loop_info->has_multiple_exit_targets) { rtx bivreg = regno_reg_rtx[bl->regno]; struct iv_class *blt; /* If there are no givs for this biv, and the only exit is the fall through at the end of the loop, then see if perhaps there are no uses except to count. */ no_use_except_counting = 1; for (p = loop_start; p != loop_end; p = NEXT_INSN (p)) if (INSN_P (p)) { rtx set = single_set (p); if (set && REG_P (SET_DEST (set)) && REGNO (SET_DEST (set)) == bl->regno) /* An insn that sets the biv is okay. */ ; else if (!reg_mentioned_p (bivreg, PATTERN (p))) /* An insn that doesn't mention the biv is okay. */ ; else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end)) || p == prev_nonnote_insn (loop_end)) { /* If either of these insns uses the biv and sets a pseudo that has more than one usage, then the biv has uses other than counting since it's used to derive a value that is used more than one time. */ note_stores (PATTERN (p), note_set_pseudo_multiple_uses, regs); if (regs->multiple_uses) { no_use_except_counting = 0; break; } } else { no_use_except_counting = 0; break; } } /* A biv has uses besides counting if it is used to set another biv. */ for (blt = ivs->list; blt; blt = blt->next) if (blt->init_set && reg_mentioned_p (bivreg, SET_SRC (blt->init_set))) { no_use_except_counting = 0; break; } } if (no_use_except_counting) /* No need to worry about MEMs. */ ; else if (loop_info->num_mem_sets <= 1) { for (p = loop_start; p != loop_end; p = NEXT_INSN (p)) if (INSN_P (p)) num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p)); /* If the loop has a single store, and the destination address is invariant, then we can't reverse the loop, because this address might then have the wrong value at loop exit. This would work if the source was invariant also, however, in that case, the insn should have been moved out of the loop. */ if (loop_info->num_mem_sets == 1) { struct induction *v; /* If we could prove that each of the memory locations written to was different, then we could reverse the store -- but we don't presently have any way of knowing that. */ reversible_mem_store = 0; /* If the store depends on a register that is set after the store, it depends on the initial value, and is thus not reversible. */ for (v = bl->giv; reversible_mem_store && v; v = v->next_iv) { if (v->giv_type == DEST_REG && reg_mentioned_p (v->dest_reg, PATTERN (loop_info->first_loop_store_insn)) && loop_insn_first_p (loop_info->first_loop_store_insn, v->insn)) reversible_mem_store = 0; } } } else return 0; /* This code only acts for innermost loops. Also it simplifies the memory address check by only reversing loops with zero or one memory access. Two memory accesses could involve parts of the same array, and that can't be reversed. If the biv is used only for counting, than we don't need to worry about all these things. */ if ((num_nonfixed_reads <= 1 && ! loop_info->has_nonconst_call && ! loop_info->has_prefetch && ! loop_info->has_volatile && reversible_mem_store && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets + num_unmoved_movables (loop) + compare_and_branch == insn_count) && (bl == ivs->list && bl->next == 0)) || (no_use_except_counting && ! loop_info->has_prefetch)) { rtx tem; /* Loop can be reversed. */ if (loop_dump_stream) fprintf (loop_dump_stream, "Can reverse loop\n"); /* Now check other conditions: The increment must be a constant, as must the initial value, and the comparison code must be LT. This test can probably be improved since +/- 1 in the constant can be obtained by changing LT to LE and vice versa; this is confusing. */ if (comparison /* for constants, LE gets turned into LT */ && (GET_CODE (comparison) == LT || (GET_CODE (comparison) == LE && no_use_except_counting) || GET_CODE (comparison) == LTU)) { HOST_WIDE_INT add_val, add_adjust, comparison_val = 0; rtx initial_value, comparison_value; int nonneg = 0; enum rtx_code cmp_code; int comparison_const_width; unsigned HOST_WIDE_INT comparison_sign_mask; add_val = INTVAL (bl->biv->add_val); comparison_value = XEXP (comparison, 1); if (GET_MODE (comparison_value) == VOIDmode) comparison_const_width = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0))); else comparison_const_width = GET_MODE_BITSIZE (GET_MODE (comparison_value)); if (comparison_const_width > HOST_BITS_PER_WIDE_INT) comparison_const_width = HOST_BITS_PER_WIDE_INT; comparison_sign_mask = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1); /* If the comparison value is not a loop invariant, then we can not reverse this loop. ??? If the insns which initialize the comparison value as a whole compute an invariant result, then we could move them out of the loop and proceed with loop reversal. */ if (! loop_invariant_p (loop, comparison_value)) return 0; if (GET_CODE (comparison_value) == CONST_INT) comparison_val = INTVAL (comparison_value); initial_value = bl->initial_value; /* Normalize the initial value if it is an integer and has no other use except as a counter. This will allow a few more loops to be reversed. */ if (no_use_except_counting && GET_CODE (comparison_value) == CONST_INT && GET_CODE (initial_value) == CONST_INT) { comparison_val = comparison_val - INTVAL (bl->initial_value); /* The code below requires comparison_val to be a multiple of add_val in order to do the loop reversal, so round up comparison_val to a multiple of add_val. Since comparison_value is constant, we know that the current comparison code is LT. */ comparison_val = comparison_val + add_val - 1; comparison_val -= (unsigned HOST_WIDE_INT) comparison_val % add_val; /* We postpone overflow checks for COMPARISON_VAL here; even if there is an overflow, we might still be able to reverse the loop, if converting the loop exit test to NE is possible. */ initial_value = const0_rtx; } /* First check if we can do a vanilla loop reversal. */ if (initial_value == const0_rtx /* If we have a decrement_and_branch_on_count, prefer the NE test, since this will allow that instruction to be generated. Note that we must use a vanilla loop reversal if the biv is used to calculate a giv or has a non-counting use. */ #if ! defined (HAVE_decrement_and_branch_until_zero) \ && defined (HAVE_decrement_and_branch_on_count) && (! (add_val == 1 && loop->vtop && (bl->biv_count == 0 || no_use_except_counting))) #endif && GET_CODE (comparison_value) == CONST_INT /* Now do postponed overflow checks on COMPARISON_VAL. */ && ! (((comparison_val - add_val) ^ INTVAL (comparison_value)) & comparison_sign_mask)) { /* Register will always be nonnegative, with value 0 on last iteration */ add_adjust = add_val; nonneg = 1; cmp_code = GE; } else if (add_val == 1 && loop->vtop && (bl->biv_count == 0 || no_use_except_counting)) { add_adjust = 0; cmp_code = NE; } else return 0; if (GET_CODE (comparison) == LE) add_adjust -= add_val; /* If the initial value is not zero, or if the comparison value is not an exact multiple of the increment, then we can not reverse this loop. */ if (initial_value == const0_rtx && GET_CODE (comparison_value) == CONST_INT) { if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0) return 0; } else { if (! no_use_except_counting || add_val != 1) return 0; } final_value = comparison_value; /* Reset these in case we normalized the initial value and comparison value above. */ if (GET_CODE (comparison_value) == CONST_INT && GET_CODE (initial_value) == CONST_INT) { comparison_value = GEN_INT (comparison_val); final_value = GEN_INT (comparison_val + INTVAL (bl->initial_value)); } bl->initial_value = initial_value; /* Save some info needed to produce the new insns. */ reg = bl->biv->dest_reg; mode = GET_MODE (reg); jump_label = condjump_label (PREV_INSN (loop_end)); new_add_val = GEN_INT (-INTVAL (bl->biv->add_val)); /* Set start_value; if this is not a CONST_INT, we need to generate a SUB. Initialize biv to start_value before loop start. The old initializing insn will be deleted as a dead store by flow.c. */ if (initial_value == const0_rtx && GET_CODE (comparison_value) == CONST_INT) { start_value = gen_int_mode (comparison_val - add_adjust, mode); loop_insn_hoist (loop, gen_move_insn (reg, start_value)); } else if (GET_CODE (initial_value) == CONST_INT) { rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust); rtx add_insn = gen_add3_insn (reg, comparison_value, offset); if (add_insn == 0) return 0; start_value = gen_rtx_PLUS (mode, comparison_value, offset); loop_insn_hoist (loop, add_insn); if (GET_CODE (comparison) == LE) final_value = gen_rtx_PLUS (mode, comparison_value, GEN_INT (add_val)); } else if (! add_adjust) { rtx sub_insn = gen_sub3_insn (reg, comparison_value, initial_value); if (sub_insn == 0) return 0; start_value = gen_rtx_MINUS (mode, comparison_value, initial_value); loop_insn_hoist (loop, sub_insn); } else /* We could handle the other cases too, but it'll be better to have a testcase first. */ return 0; /* We may not have a single insn which can increment a reg, so create a sequence to hold all the insns from expand_inc. */ start_sequence (); expand_inc (reg, new_add_val); tem = get_insns (); end_sequence (); p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem); delete_insn (bl->biv->insn); /* Update biv info to reflect its new status. */ bl->biv->insn = p; bl->initial_value = start_value; bl->biv->add_val = new_add_val; /* Update loop info. */ loop_info->initial_value = reg; loop_info->initial_equiv_value = reg; loop_info->final_value = const0_rtx; loop_info->final_equiv_value = const0_rtx; loop_info->comparison_value = const0_rtx; loop_info->comparison_code = cmp_code; loop_info->increment = new_add_val; /* Inc LABEL_NUSES so that delete_insn will not delete the label. */ LABEL_NUSES (XEXP (jump_label, 0))++; /* Emit an insn after the end of the loop to set the biv's proper exit value if it is used anywhere outside the loop. */ if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare)) || ! bl->init_insn || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn)) loop_insn_sink (loop, gen_load_of_final_value (reg, final_value)); /* Delete compare/branch at end of loop. */ delete_related_insns (PREV_INSN (loop_end)); if (compare_and_branch == 2) delete_related_insns (first_compare); /* Add new compare/branch insn at end of loop. */ start_sequence (); emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX, mode, 0, XEXP (jump_label, 0)); tem = get_insns (); end_sequence (); emit_jump_insn_before (tem, loop_end); for (tem = PREV_INSN (loop_end); tem && GET_CODE (tem) != JUMP_INSN; tem = PREV_INSN (tem)) ; if (tem) JUMP_LABEL (tem) = XEXP (jump_label, 0); if (nonneg) { if (tem) { /* Increment of LABEL_NUSES done above. */ /* Register is now always nonnegative, so add REG_NONNEG note to the branch. */ REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg, REG_NOTES (tem)); } bl->nonneg = 1; } /* No insn may reference both the reversed and another biv or it will fail (see comment near the top of the loop reversal code). Earlier on, we have verified that the biv has no use except counting, or it is the only biv in this function. However, the code that computes no_use_except_counting does not verify reg notes. It's possible to have an insn that references another biv, and has a REG_EQUAL note with an expression based on the reversed biv. To avoid this case, remove all REG_EQUAL notes based on the reversed biv here. */ for (p = loop_start; p != loop_end; p = NEXT_INSN (p)) if (INSN_P (p)) { rtx *pnote; rtx set = single_set (p); /* If this is a set of a GIV based on the reversed biv, any REG_EQUAL notes should still be correct. */ if (! set || !REG_P (SET_DEST (set)) || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg) for (pnote = ®_NOTES (p); *pnote;) { if (REG_NOTE_KIND (*pnote) == REG_EQUAL && reg_mentioned_p (regno_reg_rtx[bl->regno], XEXP (*pnote, 0))) *pnote = XEXP (*pnote, 1); else pnote = &XEXP (*pnote, 1); } } /* Mark that this biv has been reversed. Each giv which depends on this biv, and which is also live past the end of the loop will have to be fixed up. */ bl->reversed = 1; if (loop_dump_stream) { fprintf (loop_dump_stream, "Reversed loop"); if (bl->nonneg) fprintf (loop_dump_stream, " and added reg_nonneg\n"); else fprintf (loop_dump_stream, "\n"); } return 1; } } } return 0; } /* Verify whether the biv BL appears to be eliminable, based on the insns in the loop that refer to it. If ELIMINATE_P is nonzero, actually do the elimination. THRESHOLD and INSN_COUNT are from loop_optimize and are used to determine whether invariant insns should be placed inside or at the start of the loop. */ static int maybe_eliminate_biv (const struct loop *loop, struct iv_class *bl, int eliminate_p, int threshold, int insn_count) { struct loop_ivs *ivs = LOOP_IVS (loop); rtx reg = bl->biv->dest_reg; rtx p; /* Scan all insns in the loop, stopping if we find one that uses the biv in a way that we cannot eliminate. */ for (p = loop->start; p != loop->end; p = NEXT_INSN (p)) { enum rtx_code code = GET_CODE (p); basic_block where_bb = 0; rtx where_insn = threshold >= insn_count ? 0 : p; rtx note; /* If this is a libcall that sets a giv, skip ahead to its end. */ if (INSN_P (p)) { note = find_reg_note (p, REG_LIBCALL, NULL_RTX); if (note) { rtx last = XEXP (note, 0); rtx set = single_set (last); if (set && REG_P (SET_DEST (set))) { unsigned int regno = REGNO (SET_DEST (set)); if (regno < ivs->n_regs && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg) p = last; } } } /* Closely examine the insn if the biv is mentioned. */ if ((code == INSN || code == JUMP_INSN || code == CALL_INSN) && reg_mentioned_p (reg, PATTERN (p)) && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl, eliminate_p, where_bb, where_insn)) { if (loop_dump_stream) fprintf (loop_dump_stream, "Cannot eliminate biv %d: biv used in insn %d.\n", bl->regno, INSN_UID (p)); break; } /* If we are eliminating, kill REG_EQUAL notes mentioning the biv. */ if (eliminate_p && (note = find_reg_note (p, REG_EQUAL, NULL_RTX)) != NULL_RTX && reg_mentioned_p (reg, XEXP (note, 0))) remove_note (p, note); } if (p == loop->end) { if (loop_dump_stream) fprintf (loop_dump_stream, "biv %d %s eliminated.\n", bl->regno, eliminate_p ? "was" : "can be"); return 1; } return 0; } /* INSN and REFERENCE are instructions in the same insn chain. Return nonzero if INSN is first. */ int loop_insn_first_p (rtx insn, rtx reference) { rtx p, q; for (p = insn, q = reference;;) { /* Start with test for not first so that INSN == REFERENCE yields not first. */ if (q == insn || ! p) return 0; if (p == reference || ! q) return 1; /* Either of P or Q might be a NOTE. Notes have the same LUID as the previous insn, hence the <= comparison below does not work if P is a note. */ if (INSN_UID (p) < max_uid_for_loop && INSN_UID (q) < max_uid_for_loop && GET_CODE (p) != NOTE) return LOOP_INSN_LUID (p) <= LOOP_INSN_LUID (q); if (INSN_UID (p) >= max_uid_for_loop || GET_CODE (p) == NOTE) p = NEXT_INSN (p); if (INSN_UID (q) >= max_uid_for_loop) q = NEXT_INSN (q); } } /* We are trying to eliminate BIV in INSN using GIV. Return nonzero if the offset that we have to take into account due to auto-increment / div derivation is zero. */ static int biv_elimination_giv_has_0_offset (struct induction *biv, struct induction *giv, rtx insn) { /* If the giv V had the auto-inc address optimization applied to it, and INSN occurs between the giv insn and the biv insn, then we'd have to adjust the value used here. This is rare, so we don't bother to make this possible. */ if (giv->auto_inc_opt && ((loop_insn_first_p (giv->insn, insn) && loop_insn_first_p (insn, biv->insn)) || (loop_insn_first_p (biv->insn, insn) && loop_insn_first_p (insn, giv->insn)))) return 0; return 1; } /* If BL appears in X (part of the pattern of INSN), see if we can eliminate its use. If so, return 1. If not, return 0. If BIV does not appear in X, return 1. If ELIMINATE_P is nonzero, actually do the elimination. WHERE_INSN/WHERE_BB indicate where extra insns should be added. Depending on how many items have been moved out of the loop, it will either be before INSN (when WHERE_INSN is nonzero) or at the start of the loop (when WHERE_INSN is zero). */ static int maybe_eliminate_biv_1 (const struct loop *loop, rtx x, rtx insn, struct iv_class *bl, int eliminate_p, basic_block where_bb, rtx where_insn) { enum rtx_code code = GET_CODE (x); rtx reg = bl->biv->dest_reg; enum machine_mode mode = GET_MODE (reg); struct induction *v; rtx arg, tem; #ifdef HAVE_cc0 rtx new; #endif int arg_operand; const char *fmt; int i, j; switch (code) { case REG: /* If we haven't already been able to do something with this BIV, we can't eliminate it. */ if (x == reg) return 0; return 1; case SET: /* If this sets the BIV, it is not a problem. */ if (SET_DEST (x) == reg) return 1; /* If this is an insn that defines a giv, it is also ok because it will go away when the giv is reduced. */ for (v = bl->giv; v; v = v->next_iv) if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg) return 1; #ifdef HAVE_cc0 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg) { /* Can replace with any giv that was reduced and that has (MULT_VAL != 0) and (ADD_VAL == 0). Require a constant for MULT_VAL, so we know it's nonzero. ??? We disable this optimization to avoid potential overflows. */ for (v = bl->giv; v; v = v->next_iv) if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx && v->add_val == const0_rtx && ! v->ignore && ! v->maybe_dead && v->always_computable && v->mode == mode && 0) { if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn)) continue; if (! eliminate_p) return 1; /* If the giv has the opposite direction of change, then reverse the comparison. */ if (INTVAL (v->mult_val) < 0) new = gen_rtx_COMPARE (GET_MODE (v->new_reg), const0_rtx, v->new_reg); else new = v->new_reg; /* We can probably test that giv's reduced reg. */ if (validate_change (insn, &SET_SRC (x), new, 0)) return 1; } /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0); replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL). Require a constant for MULT_VAL, so we know it's nonzero. ??? Do this only if ADD_VAL is a pointer to avoid a potential overflow problem. */ for (v = bl->giv; v; v = v->next_iv) if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx && ! v->ignore && ! v->maybe_dead && v->always_computable && v->mode == mode && (GET_CODE (v->add_val) == SYMBOL_REF || GET_CODE (v->add_val) == LABEL_REF || GET_CODE (v->add_val) == CONST || (REG_P (v->add_val) && REG_POINTER (v->add_val)))) { if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn)) continue; if (! eliminate_p) return 1; /* If the giv has the opposite direction of change, then reverse the comparison. */ if (INTVAL (v->mult_val) < 0) new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val), v->new_reg); else new = gen_rtx_COMPARE (VOIDmode, v->new_reg, copy_rtx (v->add_val)); /* Replace biv with the giv's reduced register. */ update_reg_last_use (v->add_val, insn); if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0)) return 1; /* Insn doesn't support that constant or invariant. Copy it into a register (it will be a loop invariant.) */ tem = gen_reg_rtx (GET_MODE (v->new_reg)); loop_insn_emit_before (loop, 0, where_insn, gen_move_insn (tem, copy_rtx (v->add_val))); /* Substitute the new register for its invariant value in the compare expression. */ XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem; if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0)) return 1; } } #endif break; case COMPARE: case EQ: case NE: case GT: case GE: case GTU: case GEU: case LT: case LE: case LTU: case LEU: /* See if either argument is the biv. */ if (XEXP (x, 0) == reg) arg = XEXP (x, 1), arg_operand = 1; else if (XEXP (x, 1) == reg) arg = XEXP (x, 0), arg_operand = 0; else break; if (CONSTANT_P (arg)) { /* First try to replace with any giv that has constant positive mult_val and constant add_val. We might be able to support negative mult_val, but it seems complex to do it in general. */ for (v = bl->giv; v; v = v->next_iv) if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0 && (GET_CODE (v->add_val) == SYMBOL_REF || GET_CODE (v->add_val) == LABEL_REF || GET_CODE (v->add_val) == CONST || (REG_P (v->add_val) && REG_POINTER (v->add_val))) && ! v->ignore && ! v->maybe_dead && v->always_computable && v->mode == mode) { if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn)) continue; /* Don't eliminate if the linear combination that makes up the giv overflows when it is applied to ARG. */ if (GET_CODE (arg) == CONST_INT) { rtx add_val; if (GET_CODE (v->add_val) == CONST_INT) add_val = v->add_val; else add_val = const0_rtx; if (const_mult_add_overflow_p (arg, v->mult_val, add_val, mode, 1)) continue; } if (! eliminate_p) return 1; /* Replace biv with the giv's reduced reg. */ validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1); /* If all constants are actually constant integers and the derived constant can be directly placed in the COMPARE, do so. */ if (GET_CODE (arg) == CONST_INT && GET_CODE (v->add_val) == CONST_INT) { tem = expand_mult_add (arg, NULL_RTX, v->mult_val, v->add_val, mode, 1); } else { /* Otherwise, load it into a register. */ tem = gen_reg_rtx (mode); loop_iv_add_mult_emit_before (loop, arg, v->mult_val, v->add_val, tem, where_bb, where_insn); } validate_change (insn, &XEXP (x, arg_operand), tem, 1); if (apply_change_group ()) return 1; } /* Look for giv with positive constant mult_val and nonconst add_val. Insert insns to calculate new compare value. ??? Turn this off due to possible overflow. */ for (v = bl->giv; v; v = v->next_iv) if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0 && ! v->ignore && ! v->maybe_dead && v->always_computable && v->mode == mode && 0) { rtx tem; if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn)) continue; if (! eliminate_p) return 1; tem = gen_reg_rtx (mode); /* Replace biv with giv's reduced register. */ validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1); /* Compute value to compare against. */ loop_iv_add_mult_emit_before (loop, arg, v->mult_val, v->add_val, tem, where_bb, where_insn); /* Use it in this insn. */ validate_change (insn, &XEXP (x, arg_operand), tem, 1); if (apply_change_group ()) return 1; } } else if (REG_P (arg) || MEM_P (arg)) { if (loop_invariant_p (loop, arg) == 1) { /* Look for giv with constant positive mult_val and nonconst add_val. Insert insns to compute new compare value. ??? Turn this off due to possible overflow. */ for (v = bl->giv; v; v = v->next_iv) if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0 && ! v->ignore && ! v->maybe_dead && v->always_computable && v->mode == mode && 0) { rtx tem; if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn)) continue; if (! eliminate_p) return 1; tem = gen_reg_rtx (mode); /* Replace biv with giv's reduced register. */ validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1); /* Compute value to compare against. */ loop_iv_add_mult_emit_before (loop, arg, v->mult_val, v->add_val, tem, where_bb, where_insn); validate_change (insn, &XEXP (x, arg_operand), tem, 1); if (apply_change_group ()) return 1; } } /* This code has problems. Basically, you can't know when seeing if we will eliminate BL, whether a particular giv of ARG will be reduced. If it isn't going to be reduced, we can't eliminate BL. We can try forcing it to be reduced, but that can generate poor code. The problem is that the benefit of reducing TV, below should be increased if BL can actually be eliminated, but this means we might have to do a topological sort of the order in which we try to process biv. It doesn't seem worthwhile to do this sort of thing now. */ #if 0 /* Otherwise the reg compared with had better be a biv. */ if (!REG_P (arg) || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT) return 0; /* Look for a pair of givs, one for each biv, with identical coefficients. */ for (v = bl->giv; v; v = v->next_iv) { struct induction *tv; if (v->ignore || v->maybe_dead || v->mode != mode) continue; for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv; tv = tv->next_iv) if (! tv->ignore && ! tv->maybe_dead && rtx_equal_p (tv->mult_val, v->mult_val) && rtx_equal_p (tv->add_val, v->add_val) && tv->mode == mode) { if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn)) continue; if (! eliminate_p) return 1; /* Replace biv with its giv's reduced reg. */ XEXP (x, 1 - arg_operand) = v->new_reg; /* Replace other operand with the other giv's reduced reg. */ XEXP (x, arg_operand) = tv->new_reg; return 1; } } #endif } /* If we get here, the biv can't be eliminated. */ return 0; case MEM: /* If this address is a DEST_ADDR giv, it doesn't matter if the biv is used in it, since it will be replaced. */ for (v = bl->giv; v; v = v->next_iv) if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0)) return 1; break; default: break; } /* See if any subexpression fails elimination. */ fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { switch (fmt[i]) { case 'e': if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl, eliminate_p, where_bb, where_insn)) return 0; break; case 'E': for (j = XVECLEN (x, i) - 1; j >= 0; j--) if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl, eliminate_p, where_bb, where_insn)) return 0; break; } } return 1; } /* Return nonzero if the last use of REG is in an insn following INSN in the same basic block. */ static int last_use_this_basic_block (rtx reg, rtx insn) { rtx n; for (n = insn; n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN; n = NEXT_INSN (n)) { if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n)) return 1; } return 0; } /* Called via `note_stores' to record the initial value of a biv. Here we just record the location of the set and process it later. */ static void record_initial (rtx dest, rtx set, void *data ATTRIBUTE_UNUSED) { struct loop_ivs *ivs = (struct loop_ivs *) data; struct iv_class *bl; if (!REG_P (dest) || REGNO (dest) >= ivs->n_regs || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT) return; bl = REG_IV_CLASS (ivs, REGNO (dest)); /* If this is the first set found, record it. */ if (bl->init_insn == 0) { bl->init_insn = note_insn; bl->init_set = set; } } /* If any of the registers in X are "old" and currently have a last use earlier than INSN, update them to have a last use of INSN. Their actual last use will be the previous insn but it will not have a valid uid_luid so we can't use it. X must be a source expression only. */ static void update_reg_last_use (rtx x, rtx insn) { /* Check for the case where INSN does not have a valid luid. In this case, there is no need to modify the regno_last_uid, as this can only happen when code is inserted after the loop_end to set a pseudo's final value, and hence this insn will never be the last use of x. ???? This comment is not correct. See for example loop_givs_reduce. This may insert an insn before another new insn. */ if (REG_P (x) && REGNO (x) < max_reg_before_loop && INSN_UID (insn) < max_uid_for_loop && REGNO_LAST_LUID (REGNO (x)) < LOOP_INSN_LUID (insn)) { REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn); } else { int i, j; const char *fmt = GET_RTX_FORMAT (GET_CODE (x)); for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--) { if (fmt[i] == 'e') update_reg_last_use (XEXP (x, i), insn); else if (fmt[i] == 'E') for (j = XVECLEN (x, i) - 1; j >= 0; j--) update_reg_last_use (XVECEXP (x, i, j), insn); } } } /* Given an insn INSN and condition COND, return the condition in a canonical form to simplify testing by callers. Specifically: (1) The code will always be a comparison operation (EQ, NE, GT, etc.). (2) Both operands will be machine operands; (cc0) will have been replaced. (3) If an operand is a constant, it will be the second operand. (4) (LE x const) will be replaced with (LT x ) and similarly for GE, GEU, and LEU. If the condition cannot be understood, or is an inequality floating-point comparison which needs to be reversed, 0 will be returned. If REVERSE is nonzero, then reverse the condition prior to canonizing it. If EARLIEST is nonzero, it is a pointer to a place where the earliest insn used in locating the condition was found. If a replacement test of the condition is desired, it should be placed in front of that insn and we will be sure that the inputs are still valid. If WANT_REG is nonzero, we wish the condition to be relative to that register, if possible. Therefore, do not canonicalize the condition further. If ALLOW_CC_MODE is nonzero, allow the condition returned to be a compare to a CC mode register. */ rtx canonicalize_condition (rtx insn, rtx cond, int reverse, rtx *earliest, rtx want_reg, int allow_cc_mode) { enum rtx_code code; rtx prev = insn; rtx set; rtx tem; rtx op0, op1; int reverse_code = 0; enum machine_mode mode; code = GET_CODE (cond); mode = GET_MODE (cond); op0 = XEXP (cond, 0); op1 = XEXP (cond, 1); if (reverse) code = reversed_comparison_code (cond, insn); if (code == UNKNOWN) return 0; if (earliest) *earliest = insn; /* If we are comparing a register with zero, see if the register is set in the previous insn to a COMPARE or a comparison operation. Perform the same tests as a function of STORE_FLAG_VALUE as find_comparison_args in cse.c */ while ((GET_RTX_CLASS (code) == RTX_COMPARE || GET_RTX_CLASS (code) == RTX_COMM_COMPARE) && op1 == CONST0_RTX (GET_MODE (op0)) && op0 != want_reg) { /* Set nonzero when we find something of interest. */ rtx x = 0; #ifdef HAVE_cc0 /* If comparison with cc0, import actual comparison from compare insn. */ if (op0 == cc0_rtx) { if ((prev = prev_nonnote_insn (prev)) == 0 || GET_CODE (prev) != INSN || (set = single_set (prev)) == 0 || SET_DEST (set) != cc0_rtx) return 0; op0 = SET_SRC (set); op1 = CONST0_RTX (GET_MODE (op0)); if (earliest) *earliest = prev; } #endif /* If this is a COMPARE, pick up the two things being compared. */ if (GET_CODE (op0) == COMPARE) { op1 = XEXP (op0, 1); op0 = XEXP (op0, 0); continue; } else if (!REG_P (op0)) break; /* Go back to the previous insn. Stop if it is not an INSN. We also stop if it isn't a single set or if it has a REG_INC note because we don't want to bother dealing with it. */ if ((prev = prev_nonnote_insn (prev)) == 0 || GET_CODE (prev) != INSN || FIND_REG_INC_NOTE (prev, NULL_RTX)) break; set = set_of (op0, prev); if (set && (GET_CODE (set) != SET || !rtx_equal_p (SET_DEST (set), op0))) break; /* If this is setting OP0, get what it sets it to if it looks relevant. */ if (set) { enum machine_mode inner_mode = GET_MODE (SET_DEST (set)); #ifdef FLOAT_STORE_FLAG_VALUE REAL_VALUE_TYPE fsfv; #endif /* ??? We may not combine comparisons done in a CCmode with comparisons not done in a CCmode. This is to aid targets like Alpha that have an IEEE compliant EQ instruction, and a non-IEEE compliant BEQ instruction. The use of CCmode is actually artificial, simply to prevent the combination, but should not affect other platforms. However, we must allow VOIDmode comparisons to match either CCmode or non-CCmode comparison, because some ports have modeless comparisons inside branch patterns. ??? This mode check should perhaps look more like the mode check in simplify_comparison in combine. */ if ((GET_CODE (SET_SRC (set)) == COMPARE || (((code == NE || (code == LT && GET_MODE_CLASS (inner_mode) == MODE_INT && (GET_MODE_BITSIZE (inner_mode) <= HOST_BITS_PER_WIDE_INT) && (STORE_FLAG_VALUE & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (inner_mode) - 1)))) #ifdef FLOAT_STORE_FLAG_VALUE || (code == LT && GET_MODE_CLASS (inner_mode) == MODE_FLOAT && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode), REAL_VALUE_NEGATIVE (fsfv))) #endif )) && COMPARISON_P (SET_SRC (set)))) && (((GET_MODE_CLASS (mode) == MODE_CC) == (GET_MODE_CLASS (inner_mode) == MODE_CC)) || mode == VOIDmode || inner_mode == VOIDmode)) x = SET_SRC (set); else if (((code == EQ || (code == GE && (GET_MODE_BITSIZE (inner_mode) <= HOST_BITS_PER_WIDE_INT) && GET_MODE_CLASS (inner_mode) == MODE_INT && (STORE_FLAG_VALUE & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (inner_mode) - 1)))) #ifdef FLOAT_STORE_FLAG_VALUE || (code == GE && GET_MODE_CLASS (inner_mode) == MODE_FLOAT && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode), REAL_VALUE_NEGATIVE (fsfv))) #endif )) && COMPARISON_P (SET_SRC (set)) && (((GET_MODE_CLASS (mode) == MODE_CC) == (GET_MODE_CLASS (inner_mode) == MODE_CC)) || mode == VOIDmode || inner_mode == VOIDmode)) { reverse_code = 1; x = SET_SRC (set); } else break; } else if (reg_set_p (op0, prev)) /* If this sets OP0, but not directly, we have to give up. */ break; if (x) { if (COMPARISON_P (x)) code = GET_CODE (x); if (reverse_code) { code = reversed_comparison_code (x, prev); if (code == UNKNOWN) return 0; reverse_code = 0; } op0 = XEXP (x, 0), op1 = XEXP (x, 1); if (earliest) *earliest = prev; } } /* If constant is first, put it last. */ if (CONSTANT_P (op0)) code = swap_condition (code), tem = op0, op0 = op1, op1 = tem; /* If OP0 is the result of a comparison, we weren't able to find what was really being compared, so fail. */ if (!allow_cc_mode && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC) return 0; /* Canonicalize any ordered comparison with integers involving equality if we can do computations in the relevant mode and we do not overflow. */ if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC && GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT) { HOST_WIDE_INT const_val = INTVAL (op1); unsigned HOST_WIDE_INT uconst_val = const_val; unsigned HOST_WIDE_INT max_val = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0)); switch (code) { case LE: if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1) code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0)); break; /* When cross-compiling, const_val might be sign-extended from BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */ case GE: if ((HOST_WIDE_INT) (const_val & max_val) != (((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1)))) code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0)); break; case LEU: if (uconst_val < max_val) code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0)); break; case GEU: if (uconst_val != 0) code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0)); break; default: break; } } /* Never return CC0; return zero instead. */ if (CC0_P (op0)) return 0; return gen_rtx_fmt_ee (code, VOIDmode, op0, op1); } /* Given a jump insn JUMP, return the condition that will cause it to branch to its JUMP_LABEL. If the condition cannot be understood, or is an inequality floating-point comparison which needs to be reversed, 0 will be returned. If EARLIEST is nonzero, it is a pointer to a place where the earliest insn used in locating the condition was found. If a replacement test of the condition is desired, it should be placed in front of that insn and we will be sure that the inputs are still valid. If ALLOW_CC_MODE is nonzero, allow the condition returned to be a compare CC mode register. */ rtx get_condition (rtx jump, rtx *earliest, int allow_cc_mode) { rtx cond; int reverse; rtx set; /* If this is not a standard conditional jump, we can't parse it. */ if (GET_CODE (jump) != JUMP_INSN || ! any_condjump_p (jump)) return 0; set = pc_set (jump); cond = XEXP (SET_SRC (set), 0); /* If this branches to JUMP_LABEL when the condition is false, reverse the condition. */ reverse = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump); return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX, allow_cc_mode); } /* Similar to above routine, except that we also put an invariant last unless both operands are invariants. */ rtx get_condition_for_loop (const struct loop *loop, rtx x) { rtx comparison = get_condition (x, (rtx*) 0, false); if (comparison == 0 || ! loop_invariant_p (loop, XEXP (comparison, 0)) || loop_invariant_p (loop, XEXP (comparison, 1))) return comparison; return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode, XEXP (comparison, 1), XEXP (comparison, 0)); } /* Scan the function and determine whether it has indirect (computed) jumps. This is taken mostly from flow.c; similar code exists elsewhere in the compiler. It may be useful to put this into rtlanal.c. */ static int indirect_jump_in_function_p (rtx start) { rtx insn; for (insn = start; insn; insn = NEXT_INSN (insn)) if (computed_jump_p (insn)) return 1; return 0; } /* Add MEM to the LOOP_MEMS array, if appropriate. See the documentation for LOOP_MEMS for the definition of `appropriate'. This function is called from prescan_loop via for_each_rtx. */ static int insert_loop_mem (rtx *mem, void *data ATTRIBUTE_UNUSED) { struct loop_info *loop_info = data; int i; rtx m = *mem; if (m == NULL_RTX) return 0; switch (GET_CODE (m)) { case MEM: break; case CLOBBER: /* We're not interested in MEMs that are only clobbered. */ return -1; case CONST_DOUBLE: /* We're not interested in the MEM associated with a CONST_DOUBLE, so there's no need to traverse into this. */ return -1; case EXPR_LIST: /* We're not interested in any MEMs that only appear in notes. */ return -1; default: /* This is not a MEM. */ return 0; } /* See if we've already seen this MEM. */ for (i = 0; i < loop_info->mems_idx; ++i) if (rtx_equal_p (m, loop_info->mems[i].mem)) { if (MEM_VOLATILE_P (m) && !MEM_VOLATILE_P (loop_info->mems[i].mem)) loop_info->mems[i].mem = m; if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem)) /* The modes of the two memory accesses are different. If this happens, something tricky is going on, and we just don't optimize accesses to this MEM. */ loop_info->mems[i].optimize = 0; return 0; } /* Resize the array, if necessary. */ if (loop_info->mems_idx == loop_info->mems_allocated) { if (loop_info->mems_allocated != 0) loop_info->mems_allocated *= 2; else loop_info->mems_allocated = 32; loop_info->mems = xrealloc (loop_info->mems, loop_info->mems_allocated * sizeof (loop_mem_info)); } /* Actually insert the MEM. */ loop_info->mems[loop_info->mems_idx].mem = m; /* We can't hoist this MEM out of the loop if it's a BLKmode MEM because we can't put it in a register. We still store it in the table, though, so that if we see the same address later, but in a non-BLK mode, we'll not think we can optimize it at that point. */ loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode); loop_info->mems[loop_info->mems_idx].reg = NULL_RTX; ++loop_info->mems_idx; return 0; } /* Allocate REGS->ARRAY or reallocate it if it is too small. Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each register that is modified by an insn between FROM and TO. If the value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or more, stop incrementing it, to avoid overflow. Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which register I is used, if it is only used once. Otherwise, it is set to 0 (for no uses) or const0_rtx for more than one use. This parameter may be zero, in which case this processing is not done. Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not optimize register I. */ static void loop_regs_scan (const struct loop *loop, int extra_size) { struct loop_regs *regs = LOOP_REGS (loop); int old_nregs; /* last_set[n] is nonzero iff reg n has been set in the current basic block. In that case, it is the insn that last set reg n. */ rtx *last_set; rtx insn; int i; old_nregs = regs->num; regs->num = max_reg_num (); /* Grow the regs array if not allocated or too small. */ if (regs->num >= regs->size) { regs->size = regs->num + extra_size; regs->array = xrealloc (regs->array, regs->size * sizeof (*regs->array)); /* Zero the new elements. */ memset (regs->array + old_nregs, 0, (regs->size - old_nregs) * sizeof (*regs->array)); } /* Clear previously scanned fields but do not clear n_times_set. */ for (i = 0; i < old_nregs; i++) { regs->array[i].set_in_loop = 0; regs->array[i].may_not_optimize = 0; regs->array[i].single_usage = NULL_RTX; } last_set = xcalloc (regs->num, sizeof (rtx)); /* Scan the loop, recording register usage. */ for (insn = loop->top ? loop->top : loop->start; insn != loop->end; insn = NEXT_INSN (insn)) { if (INSN_P (insn)) { /* Record registers that have exactly one use. */ find_single_use_in_loop (regs, insn, PATTERN (insn)); /* Include uses in REG_EQUAL notes. */ if (REG_NOTES (insn)) find_single_use_in_loop (regs, insn, REG_NOTES (insn)); if (GET_CODE (PATTERN (insn)) == SET || GET_CODE (PATTERN (insn)) == CLOBBER) count_one_set (regs, insn, PATTERN (insn), last_set); else if (GET_CODE (PATTERN (insn)) == PARALLEL) { int i; for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--) count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i), last_set); } } if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN) memset (last_set, 0, regs->num * sizeof (rtx)); /* Invalidate all registers used for function argument passing. We check rtx_varies_p for the same reason as below, to allow optimizing PIC calculations. */ if (GET_CODE (insn) == CALL_INSN) { rtx link; for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1)) { rtx op, reg; if (GET_CODE (op = XEXP (link, 0)) == USE && REG_P (reg = XEXP (op, 0)) && rtx_varies_p (reg, 1)) regs->array[REGNO (reg)].may_not_optimize = 1; } } } /* Invalidate all hard registers clobbered by calls. With one exception: a call-clobbered PIC register is still function-invariant for our purposes, since we can hoist any PIC calculations out of the loop. Thus the call to rtx_varies_p. */ if (LOOP_INFO (loop)->has_call) for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i) && rtx_varies_p (regno_reg_rtx[i], 1)) { regs->array[i].may_not_optimize = 1; regs->array[i].set_in_loop = 1; } #ifdef AVOID_CCMODE_COPIES /* Don't try to move insns which set CC registers if we should not create CCmode register copies. */ for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--) if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC) regs->array[i].may_not_optimize = 1; #endif /* Set regs->array[I].n_times_set for the new registers. */ for (i = old_nregs; i < regs->num; i++) regs->array[i].n_times_set = regs->array[i].set_in_loop; free (last_set); } /* Returns the number of real INSNs in the LOOP. */ static int count_insns_in_loop (const struct loop *loop) { int count = 0; rtx insn; for (insn = loop->top ? loop->top : loop->start; insn != loop->end; insn = NEXT_INSN (insn)) if (INSN_P (insn)) ++count; return count; } /* Move MEMs into registers for the duration of the loop. */ static void load_mems (const struct loop *loop) { struct loop_info *loop_info = LOOP_INFO (loop); struct loop_regs *regs = LOOP_REGS (loop); int maybe_never = 0; int i; rtx p, prev_ebb_head; rtx label = NULL_RTX; rtx end_label; /* Nonzero if the next instruction may never be executed. */ int next_maybe_never = 0; unsigned int last_max_reg = max_reg_num (); if (loop_info->mems_idx == 0) return; /* We cannot use next_label here because it skips over normal insns. */ end_label = next_nonnote_insn (loop->end); if (end_label && GET_CODE (end_label) != CODE_LABEL) end_label = NULL_RTX; /* Check to see if it's possible that some instructions in the loop are never executed. Also check if there is a goto out of the loop other than right after the end of the loop. */ for (p = next_insn_in_loop (loop, loop->scan_start); p != NULL_RTX; p = next_insn_in_loop (loop, p)) { if (GET_CODE (p) == CODE_LABEL) maybe_never = 1; else if (GET_CODE (p) == JUMP_INSN /* If we enter the loop in the middle, and scan around to the beginning, don't set maybe_never for that. This must be an unconditional jump, otherwise the code at the top of the loop might never be executed. Unconditional jumps are followed a by barrier then loop end. */ && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop->top && NEXT_INSN (NEXT_INSN (p)) == loop->end && any_uncondjump_p (p))) { /* If this is a jump outside of the loop but not right after the end of the loop, we would have to emit new fixup sequences for each such label. */ if (/* If we can't tell where control might go when this JUMP_INSN is executed, we must be conservative. */ !JUMP_LABEL (p) || (JUMP_LABEL (p) != end_label && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop || LOOP_INSN_LUID (JUMP_LABEL (p)) < LOOP_INSN_LUID (loop->start) || LOOP_INSN_LUID (JUMP_LABEL (p)) > LOOP_INSN_LUID (loop->end)))) return; if (!any_condjump_p (p)) /* Something complicated. */ maybe_never = 1; else /* If there are any more instructions in the loop, they might not be reached. */ next_maybe_never = 1; } else if (next_maybe_never) maybe_never = 1; } /* Find start of the extended basic block that enters the loop. */ for (p = loop->start; PREV_INSN (p) && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p)) ; prev_ebb_head = p; cselib_init (true); /* Build table of mems that get set to constant values before the loop. */ for (; p != loop->start; p = NEXT_INSN (p)) cselib_process_insn (p); /* Actually move the MEMs. */ for (i = 0; i < loop_info->mems_idx; ++i) { regset_head load_copies; regset_head store_copies; int written = 0; rtx reg; rtx mem = loop_info->mems[i].mem; rtx mem_list_entry; if (MEM_VOLATILE_P (mem) || loop_invariant_p (loop, XEXP (mem, 0)) != 1) /* There's no telling whether or not MEM is modified. */ loop_info->mems[i].optimize = 0; /* Go through the MEMs written to in the loop to see if this one is aliased by one of them. */ mem_list_entry = loop_info->store_mems; while (mem_list_entry) { if (rtx_equal_p (mem, XEXP (mem_list_entry, 0))) written = 1; else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode, mem, rtx_varies_p)) { /* MEM is indeed aliased by this store. */ loop_info->mems[i].optimize = 0; break; } mem_list_entry = XEXP (mem_list_entry, 1); } if (flag_float_store && written && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT) loop_info->mems[i].optimize = 0; /* If this MEM is written to, we must be sure that there are no reads from another MEM that aliases this one. */ if (loop_info->mems[i].optimize && written) { int j; for (j = 0; j < loop_info->mems_idx; ++j) { if (j == i) continue; else if (true_dependence (mem, VOIDmode, loop_info->mems[j].mem, rtx_varies_p)) { /* It's not safe to hoist loop_info->mems[i] out of the loop because writes to it might not be seen by reads from loop_info->mems[j]. */ loop_info->mems[i].optimize = 0; break; } } } if (maybe_never && may_trap_p (mem)) /* We can't access the MEM outside the loop; it might cause a trap that wouldn't have happened otherwise. */ loop_info->mems[i].optimize = 0; if (!loop_info->mems[i].optimize) /* We thought we were going to lift this MEM out of the loop, but later discovered that we could not. */ continue; INIT_REG_SET (&load_copies); INIT_REG_SET (&store_copies); /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in order to keep scan_loop from moving stores to this MEM out of the loop just because this REG is neither a user-variable nor used in the loop test. */ reg = gen_reg_rtx (GET_MODE (mem)); REG_USERVAR_P (reg) = 1; loop_info->mems[i].reg = reg; /* Now, replace all references to the MEM with the corresponding pseudos. */ maybe_never = 0; for (p = next_insn_in_loop (loop, loop->scan_start); p != NULL_RTX; p = next_insn_in_loop (loop, p)) { if (INSN_P (p)) { rtx set; set = single_set (p); /* See if this copies the mem into a register that isn't modified afterwards. We'll try to do copy propagation a little further on. */ if (set /* @@@ This test is _way_ too conservative. */ && ! maybe_never && REG_P (SET_DEST (set)) && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER && REGNO (SET_DEST (set)) < last_max_reg && regs->array[REGNO (SET_DEST (set))].n_times_set == 1 && rtx_equal_p (SET_SRC (set), mem)) SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set))); /* See if this copies the mem from a register that isn't modified afterwards. We'll try to remove the redundant copy later on by doing a little register renaming and copy propagation. This will help to untangle things for the BIV detection code. */ if (set && ! maybe_never && REG_P (SET_SRC (set)) && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER && REGNO (SET_SRC (set)) < last_max_reg && regs->array[REGNO (SET_SRC (set))].n_times_set == 1 && rtx_equal_p (SET_DEST (set), mem)) SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set))); /* If this is a call which uses / clobbers this memory location, we must not change the interface here. */ if (GET_CODE (p) == CALL_INSN && reg_mentioned_p (loop_info->mems[i].mem, CALL_INSN_FUNCTION_USAGE (p))) { cancel_changes (0); loop_info->mems[i].optimize = 0; break; } else /* Replace the memory reference with the shadow register. */ replace_loop_mems (p, loop_info->mems[i].mem, loop_info->mems[i].reg, written); } if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN) maybe_never = 1; } if (! loop_info->mems[i].optimize) ; /* We found we couldn't do the replacement, so do nothing. */ else if (! apply_change_group ()) /* We couldn't replace all occurrences of the MEM. */ loop_info->mems[i].optimize = 0; else { /* Load the memory immediately before LOOP->START, which is the NOTE_LOOP_BEG. */ cselib_val *e = cselib_lookup (mem, VOIDmode, 0); rtx set; rtx best = mem; int j; struct elt_loc_list *const_equiv = 0; if (e) { struct elt_loc_list *equiv; struct elt_loc_list *best_equiv = 0; for (equiv = e->locs; equiv; equiv = equiv->next) { if (CONSTANT_P (equiv->loc)) const_equiv = equiv; else if (REG_P (equiv->loc) /* Extending hard register lifetimes causes crash on SRC targets. Doing so on non-SRC is probably also not good idea, since we most probably have pseudoregister equivalence as well. */ && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER) best_equiv = equiv; } /* Use the constant equivalence if that is cheap enough. */ if (! best_equiv) best_equiv = const_equiv; else if (const_equiv && (rtx_cost (const_equiv->loc, SET) <= rtx_cost (best_equiv->loc, SET))) { best_equiv = const_equiv; const_equiv = 0; } /* If best_equiv is nonzero, we know that MEM is set to a constant or register before the loop. We will use this knowledge to initialize the shadow register with that constant or reg rather than by loading from MEM. */ if (best_equiv) best = copy_rtx (best_equiv->loc); } set = gen_move_insn (reg, best); set = loop_insn_hoist (loop, set); if (REG_P (best)) { for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p)) if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p)) { REGNO_LAST_UID (REGNO (best)) = INSN_UID (set); break; } } if (const_equiv) set_unique_reg_note (set, REG_EQUAL, copy_rtx (const_equiv->loc)); if (written) { if (label == NULL_RTX) { label = gen_label_rtx (); emit_label_after (label, loop->end); } /* Store the memory immediately after END, which is the NOTE_LOOP_END. */ set = gen_move_insn (copy_rtx (mem), reg); loop_insn_emit_after (loop, 0, label, set); } if (loop_dump_stream) { fprintf (loop_dump_stream, "Hoisted regno %d %s from ", REGNO (reg), (written ? "r/w" : "r/o")); print_rtl (loop_dump_stream, mem); fputc ('\n', loop_dump_stream); } /* Attempt a bit of copy propagation. This helps untangle the data flow, and enables {basic,general}_induction_var to find more bivs/givs. */ EXECUTE_IF_SET_IN_REG_SET (&load_copies, FIRST_PSEUDO_REGISTER, j, { try_copy_prop (loop, reg, j); }); CLEAR_REG_SET (&load_copies); EXECUTE_IF_SET_IN_REG_SET (&store_copies, FIRST_PSEUDO_REGISTER, j, { try_swap_copy_prop (loop, reg, j); }); CLEAR_REG_SET (&store_copies); } } /* Now, we need to replace all references to the previous exit label with the new one. */ if (label != NULL_RTX && end_label != NULL_RTX) for (p = loop->start; p != loop->end; p = NEXT_INSN (p)) if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == end_label) redirect_jump (p, label, false); cselib_finish (); } /* For communication between note_reg_stored and its caller. */ struct note_reg_stored_arg { int set_seen; rtx reg; }; /* Called via note_stores, record in SET_SEEN whether X, which is written, is equal to ARG. */ static void note_reg_stored (rtx x, rtx setter ATTRIBUTE_UNUSED, void *arg) { struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg; if (t->reg == x) t->set_seen = 1; } /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT. There must be exactly one insn that sets this pseudo; it will be deleted if all replacements succeed and we can prove that the register is not used after the loop. */ static void try_copy_prop (const struct loop *loop, rtx replacement, unsigned int regno) { /* This is the reg that we are copying from. */ rtx reg_rtx = regno_reg_rtx[regno]; rtx init_insn = 0; rtx insn; /* These help keep track of whether we replaced all uses of the reg. */ int replaced_last = 0; int store_is_first = 0; for (insn = next_insn_in_loop (loop, loop->scan_start); insn != NULL_RTX; insn = next_insn_in_loop (loop, insn)) { rtx set; /* Only substitute within one extended basic block from the initializing insn. */ if (GET_CODE (insn) == CODE_LABEL && init_insn) break; if (! INSN_P (insn)) continue; /* Is this the initializing insn? */ set = single_set (insn); if (set && REG_P (SET_DEST (set)) && REGNO (SET_DEST (set)) == regno) { if (init_insn) abort (); init_insn = insn; if (REGNO_FIRST_UID (regno) == INSN_UID (insn)) store_is_first = 1; } /* Only substitute after seeing the initializing insn. */ if (init_insn && insn != init_insn) { struct note_reg_stored_arg arg; replace_loop_regs (insn, reg_rtx, replacement); if (REGNO_LAST_UID (regno) == INSN_UID (insn)) replaced_last = 1; /* Stop replacing when REPLACEMENT is modified. */ arg.reg = replacement; arg.set_seen = 0; note_stores (PATTERN (insn), note_reg_stored, &arg); if (arg.set_seen) { rtx note = find_reg_note (insn, REG_EQUAL, NULL); /* It is possible that we've turned previously valid REG_EQUAL to invalid, as we change the REGNO to REPLACEMENT and unlike REGNO, REPLACEMENT is modified, we get different meaning. */ if (note && reg_mentioned_p (replacement, XEXP (note, 0))) remove_note (insn, note); break; } } } if (! init_insn) abort (); if (apply_change_group ()) { if (loop_dump_stream) fprintf (loop_dump_stream, " Replaced reg %d", regno); if (store_is_first && replaced_last) { rtx first; rtx retval_note; /* Assume we're just deleting INIT_INSN. */ first = init_insn; /* Look for REG_RETVAL note. If we're deleting the end of the libcall sequence, the whole sequence can go. */ retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX); /* If we found a REG_RETVAL note, find the first instruction in the sequence. */ if (retval_note) first = XEXP (retval_note, 0); /* Delete the instructions. */ loop_delete_insns (first, init_insn); } if (loop_dump_stream) fprintf (loop_dump_stream, ".\n"); } } /* Replace all the instructions from FIRST up to and including LAST with NOTE_INSN_DELETED notes. */ static void loop_delete_insns (rtx first, rtx last) { while (1) { if (loop_dump_stream) fprintf (loop_dump_stream, ", deleting init_insn (%d)", INSN_UID (first)); delete_insn (first); /* If this was the LAST instructions we're supposed to delete, we're done. */ if (first == last) break; first = NEXT_INSN (first); } } /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within loop LOOP if the order of the sets of these registers can be swapped. There must be exactly one insn within the loop that sets this pseudo followed immediately by a move insn that sets REPLACEMENT with REGNO. */ static void try_swap_copy_prop (const struct loop *loop, rtx replacement, unsigned int regno) { rtx insn; rtx set = NULL_RTX; unsigned int new_regno; new_regno = REGNO (replacement); for (insn = next_insn_in_loop (loop, loop->scan_start); insn != NULL_RTX; insn = next_insn_in_loop (loop, insn)) { /* Search for the insn that copies REGNO to NEW_REGNO? */ if (INSN_P (insn) && (set = single_set (insn)) && REG_P (SET_DEST (set)) && REGNO (SET_DEST (set)) == new_regno && REG_P (SET_SRC (set)) && REGNO (SET_SRC (set)) == regno) break; } if (insn != NULL_RTX) { rtx prev_insn; rtx prev_set; /* Some DEF-USE info would come in handy here to make this function more general. For now, just check the previous insn which is the most likely candidate for setting REGNO. */ prev_insn = PREV_INSN (insn); if (INSN_P (insn) && (prev_set = single_set (prev_insn)) && REG_P (SET_DEST (prev_set)) && REGNO (SET_DEST (prev_set)) == regno) { /* We have: (set (reg regno) (expr)) (set (reg new_regno) (reg regno)) so try converting this to: (set (reg new_regno) (expr)) (set (reg regno) (reg new_regno)) The former construct is often generated when a global variable used for an induction variable is shadowed by a register (NEW_REGNO). The latter construct improves the chances of GIV replacement and BIV elimination. */ validate_change (prev_insn, &SET_DEST (prev_set), replacement, 1); validate_change (insn, &SET_DEST (set), SET_SRC (set), 1); validate_change (insn, &SET_SRC (set), replacement, 1); if (apply_change_group ()) { if (loop_dump_stream) fprintf (loop_dump_stream, " Swapped set of reg %d at %d with reg %d at %d.\n", regno, INSN_UID (insn), new_regno, INSN_UID (prev_insn)); /* Update first use of REGNO. */ if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn)) REGNO_FIRST_UID (regno) = INSN_UID (insn); /* Now perform copy propagation to hopefully remove all uses of REGNO within the loop. */ try_copy_prop (loop, replacement, regno); } } } } /* Worker function for find_mem_in_note, called via for_each_rtx. */ static int find_mem_in_note_1 (rtx *x, void *data) { if (*x != NULL_RTX && MEM_P (*x)) { rtx *res = (rtx *) data; *res = *x; return 1; } return 0; } /* Returns the first MEM found in NOTE by depth-first search. */ static rtx find_mem_in_note (rtx note) { if (note && for_each_rtx (¬e, find_mem_in_note_1, ¬e)) return note; return NULL_RTX; } /* Replace MEM with its associated pseudo register. This function is called from load_mems via for_each_rtx. DATA is actually a pointer to a structure describing the instruction currently being scanned and the MEM we are currently replacing. */ static int replace_loop_mem (rtx *mem, void *data) { loop_replace_args *args = (loop_replace_args *) data; rtx m = *mem; if (m == NULL_RTX) return 0; switch (GET_CODE (m)) { case MEM: break; case CONST_DOUBLE: /* We're not interested in the MEM associated with a CONST_DOUBLE, so there's no need to traverse into one. */ return -1; default: /* This is not a MEM. */ return 0; } if (!rtx_equal_p (args->match, m)) /* This is not the MEM we are currently replacing. */ return 0; /* Actually replace the MEM. */ validate_change (args->insn, mem, args->replacement, 1); return 0; } static void replace_loop_mems (rtx insn, rtx mem, rtx reg, int written) { loop_replace_args args; args.insn = insn; args.match = mem; args.replacement = reg; for_each_rtx (&insn, replace_loop_mem, &args); /* If we hoist a mem write out of the loop, then REG_EQUAL notes referring to the mem are no longer valid. */ if (written) { rtx note, sub; rtx *link; for (link = ®_NOTES (insn); (note = *link); link = &XEXP (note, 1)) { if (REG_NOTE_KIND (note) == REG_EQUAL && (sub = find_mem_in_note (note)) && true_dependence (mem, VOIDmode, sub, rtx_varies_p)) { /* Remove the note. */ validate_change (NULL_RTX, link, XEXP (note, 1), 1); break; } } } } /* Replace one register with another. Called through for_each_rtx; PX points to the rtx being scanned. DATA is actually a pointer to a structure of arguments. */ static int replace_loop_reg (rtx *px, void *data) { rtx x = *px; loop_replace_args *args = (loop_replace_args *) data; if (x == NULL_RTX) return 0; if (x == args->match) validate_change (args->insn, px, args->replacement, 1); return 0; } static void replace_loop_regs (rtx insn, rtx reg, rtx replacement) { loop_replace_args args; args.insn = insn; args.match = reg; args.replacement = replacement; for_each_rtx (&insn, replace_loop_reg, &args); } /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB (ignored in the interim). */ static rtx loop_insn_emit_after (const struct loop *loop ATTRIBUTE_UNUSED, basic_block where_bb ATTRIBUTE_UNUSED, rtx where_insn, rtx pattern) { return emit_insn_after (pattern, where_insn); } /* If WHERE_INSN is nonzero emit insn for PATTERN before WHERE_INSN in basic block WHERE_BB (ignored in the interim) within the loop otherwise hoist PATTERN into the loop pre-header. */ rtx loop_insn_emit_before (const struct loop *loop, basic_block where_bb ATTRIBUTE_UNUSED, rtx where_insn, rtx pattern) { if (! where_insn) return loop_insn_hoist (loop, pattern); return emit_insn_before (pattern, where_insn); } /* Emit call insn for PATTERN before WHERE_INSN in basic block WHERE_BB (ignored in the interim) within the loop. */ static rtx loop_call_insn_emit_before (const struct loop *loop ATTRIBUTE_UNUSED, basic_block where_bb ATTRIBUTE_UNUSED, rtx where_insn, rtx pattern) { return emit_call_insn_before (pattern, where_insn); } /* Hoist insn for PATTERN into the loop pre-header. */ rtx loop_insn_hoist (const struct loop *loop, rtx pattern) { return loop_insn_emit_before (loop, 0, loop->start, pattern); } /* Hoist call insn for PATTERN into the loop pre-header. */ static rtx loop_call_insn_hoist (const struct loop *loop, rtx pattern) { return loop_call_insn_emit_before (loop, 0, loop->start, pattern); } /* Sink insn for PATTERN after the loop end. */ rtx loop_insn_sink (const struct loop *loop, rtx pattern) { return loop_insn_emit_before (loop, 0, loop->sink, pattern); } /* bl->final_value can be either general_operand or PLUS of general_operand and constant. Emit sequence of instructions to load it into REG. */ static rtx gen_load_of_final_value (rtx reg, rtx final_value) { rtx seq; start_sequence (); final_value = force_operand (final_value, reg); if (final_value != reg) emit_move_insn (reg, final_value); seq = get_insns (); end_sequence (); return seq; } /* If the loop has multiple exits, emit insn for PATTERN before the loop to ensure that it will always be executed no matter how the loop exits. Otherwise, emit the insn for PATTERN after the loop, since this is slightly more efficient. */ static rtx loop_insn_sink_or_swim (const struct loop *loop, rtx pattern) { if (loop->exit_count) return loop_insn_hoist (loop, pattern); else return loop_insn_sink (loop, pattern); } static void loop_ivs_dump (const struct loop *loop, FILE *file, int verbose) { struct iv_class *bl; int iv_num = 0; if (! loop || ! file) return; for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next) iv_num++; fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num); for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next) { loop_iv_class_dump (bl, file, verbose); fputc ('\n', file); } } static void loop_iv_class_dump (const struct iv_class *bl, FILE *file, int verbose ATTRIBUTE_UNUSED) { struct induction *v; rtx incr; int i; if (! bl || ! file) return; fprintf (file, "IV class for reg %d, benefit %d\n", bl->regno, bl->total_benefit); fprintf (file, " Init insn %d", INSN_UID (bl->init_insn)); if (bl->initial_value) { fprintf (file, ", init val: "); print_simple_rtl (file, bl->initial_value); } if (bl->initial_test) { fprintf (file, ", init test: "); print_simple_rtl (file, bl->initial_test); } fputc ('\n', file); if (bl->final_value) { fprintf (file, " Final val: "); print_simple_rtl (file, bl->final_value); fputc ('\n', file); } if ((incr = biv_total_increment (bl))) { fprintf (file, " Total increment: "); print_simple_rtl (file, incr); fputc ('\n', file); } /* List the increments. */ for (i = 0, v = bl->biv; v; v = v->next_iv, i++) { fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn)); print_simple_rtl (file, v->add_val); fputc ('\n', file); } /* List the givs. */ for (i = 0, v = bl->giv; v; v = v->next_iv, i++) { fprintf (file, " Giv%d: insn %d, benefit %d, ", i, INSN_UID (v->insn), v->benefit); if (v->giv_type == DEST_ADDR) print_simple_rtl (file, v->mem); else print_simple_rtl (file, single_set (v->insn)); fputc ('\n', file); } } static void loop_biv_dump (const struct induction *v, FILE *file, int verbose) { if (! v || ! file) return; fprintf (file, "Biv %d: insn %d", REGNO (v->dest_reg), INSN_UID (v->insn)); fprintf (file, " const "); print_simple_rtl (file, v->add_val); if (verbose && v->final_value) { fputc ('\n', file); fprintf (file, " final "); print_simple_rtl (file, v->final_value); } fputc ('\n', file); } static void loop_giv_dump (const struct induction *v, FILE *file, int verbose) { if (! v || ! file) return; if (v->giv_type == DEST_REG) fprintf (file, "Giv %d: insn %d", REGNO (v->dest_reg), INSN_UID (v->insn)); else fprintf (file, "Dest address: insn %d", INSN_UID (v->insn)); fprintf (file, " src reg %d benefit %d", REGNO (v->src_reg), v->benefit); fprintf (file, " lifetime %d", v->lifetime); if (v->replaceable) fprintf (file, " replaceable"); if (v->no_const_addval) fprintf (file, " ncav"); if (v->ext_dependent) { switch (GET_CODE (v->ext_dependent)) { case SIGN_EXTEND: fprintf (file, " ext se"); break; case ZERO_EXTEND: fprintf (file, " ext ze"); break; case TRUNCATE: fprintf (file, " ext tr"); break; default: abort (); } } fputc ('\n', file); fprintf (file, " mult "); print_simple_rtl (file, v->mult_val); fputc ('\n', file); fprintf (file, " add "); print_simple_rtl (file, v->add_val); if (verbose && v->final_value) { fputc ('\n', file); fprintf (file, " final "); print_simple_rtl (file, v->final_value); } fputc ('\n', file); } void debug_ivs (const struct loop *loop) { loop_ivs_dump (loop, stderr, 1); } void debug_iv_class (const struct iv_class *bl) { loop_iv_class_dump (bl, stderr, 1); } void debug_biv (const struct induction *v) { loop_biv_dump (v, stderr, 1); } void debug_giv (const struct induction *v) { loop_giv_dump (v, stderr, 1); } #define LOOP_BLOCK_NUM_1(INSN) \ ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1) /* The notes do not have an assigned block, so look at the next insn. */ #define LOOP_BLOCK_NUM(INSN) \ ((INSN) ? (GET_CODE (INSN) == NOTE \ ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \ : LOOP_BLOCK_NUM_1 (INSN)) \ : -1) #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1) static void loop_dump_aux (const struct loop *loop, FILE *file, int verbose ATTRIBUTE_UNUSED) { rtx label; if (! loop || ! file) return; /* Print diagnostics to compare our concept of a loop with what the loop notes say. */ if (! PREV_INSN (BB_HEAD (loop->first)) || GET_CODE (PREV_INSN (BB_HEAD (loop->first))) != NOTE || NOTE_LINE_NUMBER (PREV_INSN (BB_HEAD (loop->first))) != NOTE_INSN_LOOP_BEG) fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n", INSN_UID (PREV_INSN (BB_HEAD (loop->first)))); if (! NEXT_INSN (BB_END (loop->last)) || GET_CODE (NEXT_INSN (BB_END (loop->last))) != NOTE || NOTE_LINE_NUMBER (NEXT_INSN (BB_END (loop->last))) != NOTE_INSN_LOOP_END) fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n", INSN_UID (NEXT_INSN (BB_END (loop->last)))); if (loop->start) { fprintf (file, ";; start %d (%d), cont dom %d (%d), cont %d (%d), vtop %d (%d), end %d (%d)\n", LOOP_BLOCK_NUM (loop->start), LOOP_INSN_UID (loop->start), LOOP_BLOCK_NUM (loop->cont), LOOP_INSN_UID (loop->cont), LOOP_BLOCK_NUM (loop->cont), LOOP_INSN_UID (loop->cont), LOOP_BLOCK_NUM (loop->vtop), LOOP_INSN_UID (loop->vtop), LOOP_BLOCK_NUM (loop->end), LOOP_INSN_UID (loop->end)); fprintf (file, ";; top %d (%d), scan start %d (%d)\n", LOOP_BLOCK_NUM (loop->top), LOOP_INSN_UID (loop->top), LOOP_BLOCK_NUM (loop->scan_start), LOOP_INSN_UID (loop->scan_start)); fprintf (file, ";; exit_count %d", loop->exit_count); if (loop->exit_count) { fputs (", labels:", file); for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label)) { fprintf (file, " %d ", LOOP_INSN_UID (XEXP (label, 0))); } } fputs ("\n", file); /* This can happen when a marked loop appears as two nested loops, say from while (a || b) {}. The inner loop won't match the loop markers but the outer one will. */ if (LOOP_BLOCK_NUM (loop->cont) != loop->latch->index) fprintf (file, ";; NOTE_INSN_LOOP_CONT not in loop latch\n"); } } /* Call this function from the debugger to dump LOOP. */ void debug_loop (const struct loop *loop) { flow_loop_dump (loop, stderr, loop_dump_aux, 1); } /* Call this function from the debugger to dump LOOPS. */ void debug_loops (const struct loops *loops) { flow_loops_dump (loops, stderr, loop_dump_aux, 1); } /* Swing Modulo Scheduling implementation. Copyright (C) 2004 Free Software Foundation, Inc. Contributed by Ayal Zaks and Mustafa Hagog This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifdef INSN_SCHEDULING /* This file contains the implementation of the Swing Modulo Scheduler, described in the following references: [1] J. Llosa, A. Gonzalez, E. Ayguade, M. Valero., and J. Eckhardt. Lifetime--sensitive modulo scheduling in a production environment. IEEE Trans. on Comps., 50(3), March 2001 [2] J. Llosa, A. Gonzalez, E. Ayguade, and M. Valero. Swing Modulo Scheduling: A Lifetime Sensitive Approach. PACT '96 , pages 80-87, October 1996 (Boston - Massachusetts - USA). The basic structure is: 1. Build a data-dependence graph (DDG) for each loop. 2. Use the DDG to order the insns of a loop (not in topological order necessarily, but rather) trying to place each insn after all its predecessors _or_ after all its successors. 3. Compute MII: a lower bound on the number of cycles to schedule the loop. 4. Use the ordering to perform list-scheduling of the loop: 1. Set II = MII. We will try to schedule the loop within II cycles. 2. Try to schedule the insns one by one according to the ordering. For each insn compute an interval of cycles by considering already- scheduled preds and succs (and associated latencies); try to place the insn in the cycles of this window checking for potential resource conflicts (using the DFA interface). Note: this is different from the cycle-scheduling of schedule_insns; here the insns are not scheduled monotonically top-down (nor bottom- up). 3. If failed in scheduling all insns - bump II++ and try again, unless II reaches an upper bound MaxII, in which case report failure. 5. If we succeeded in scheduling the loop within II cycles, we now generate prolog and epilog, decrease the counter of the loop, and perform modulo variable expansion for live ranges that span more than II cycles (i.e. use register copies to prevent a def from overwriting itself before reaching the use). */ /* This page defines partial-schedule structures and functions for modulo scheduling. */ typedef struct partial_schedule *partial_schedule_ptr; typedef struct ps_insn *ps_insn_ptr; /* The minimum (absolute) cycle that a node of ps was scheduled in. */ #define PS_MIN_CYCLE(ps) (((partial_schedule_ptr)(ps))->min_cycle) /* The maximum (absolute) cycle that a node of ps was scheduled in. */ #define PS_MAX_CYCLE(ps) (((partial_schedule_ptr)(ps))->max_cycle) /* Perform signed modulo, always returning a non-negative value. */ #define SMODULO(x,y) ((x) % (y) < 0 ? ((x) % (y) + (y)) : (x) % (y)) /* The number of different iterations the nodes in ps span, assuming the stage boundaries are placed efficiently. */ #define PS_STAGE_COUNT(ps) ((PS_MAX_CYCLE (ps) - PS_MIN_CYCLE (ps) \ + 1 + (ps)->ii - 1) / (ps)->ii) #define CFG_HOOKS cfg_layout_rtl_cfg_hooks /* A single instruction in the partial schedule. */ struct ps_insn { /* The corresponding DDG_NODE. */ ddg_node_ptr node; /* The (absolute) cycle in which the PS instruction is scheduled. Same as SCHED_TIME (node). */ int cycle; /* The next/prev PS_INSN in the same row. */ ps_insn_ptr next_in_row, prev_in_row; /* The number of nodes in the same row that come after this node. */ int row_rest_count; }; /* Holds the partial schedule as an array of II rows. Each entry of the array points to a linked list of PS_INSNs, which represents the instructions that are scheduled for that row. */ struct partial_schedule { int ii; /* Number of rows in the partial schedule. */ int history; /* Threshold for conflict checking using DFA. */ /* rows[i] points to linked list of insns scheduled in row i (0<=iaux.info)->asap) #define SCHED_TIME(x) (((node_sched_params_ptr)(x)->aux.info)->time) #define SCHED_FIRST_REG_MOVE(x) \ (((node_sched_params_ptr)(x)->aux.info)->first_reg_move) #define SCHED_NREG_MOVES(x) \ (((node_sched_params_ptr)(x)->aux.info)->nreg_moves) #define SCHED_ROW(x) (((node_sched_params_ptr)(x)->aux.info)->row) #define SCHED_STAGE(x) (((node_sched_params_ptr)(x)->aux.info)->stage) #define SCHED_COLUMN(x) (((node_sched_params_ptr)(x)->aux.info)->column) /* The scheduling parameters held for each node. */ typedef struct node_sched_params { int asap; /* A lower-bound on the absolute scheduling cycle. */ int time; /* The absolute scheduling cycle (time >= asap). */ /* The following field (first_reg_move) is a pointer to the first register-move instruction added to handle the modulo-variable-expansion of the register defined by this node. This register-move copies the original register defined by the node. */ rtx first_reg_move; /* The number of register-move instructions added, immediately preceding first_reg_move. */ int nreg_moves; int row; /* Holds time % ii. */ int stage; /* Holds time / ii. */ /* The column of a node inside the ps. If nodes u, v are on the same row, u will precede v if column (u) < column (v). */ int column; } *node_sched_params_ptr; /* The following three functions are copied from the current scheduler code in order to use sched_analyze() for computing the dependencies. They are used when initializing the sched_info structure. */ static const char * sms_print_insn (rtx insn, int aligned ATTRIBUTE_UNUSED) { static char tmp[80]; sprintf (tmp, "i%4d", INSN_UID (insn)); return tmp; } static int contributes_to_priority_modulo (rtx next, rtx insn) { return BLOCK_NUM (next) == BLOCK_NUM (insn); } static void compute_jump_reg_dependencies_modulo (rtx insn ATTRIBUTE_UNUSED, regset cond_exec ATTRIBUTE_UNUSED, regset used ATTRIBUTE_UNUSED, regset set ATTRIBUTE_UNUSED) { } static struct sched_info sms_sched_info = { NULL, NULL, NULL, NULL, NULL, sms_print_insn, contributes_to_priority_modulo, compute_jump_reg_dependencies_modulo, NULL, NULL, NULL, NULL, 0, 0, 0 }; /* Return the register decremented and tested or zero if it is not a decrement and branch jump insn (similar to doloop_condition_get). */ static rtx doloop_register_get (rtx insn, rtx *comp) { rtx pattern, cmp, inc, reg, condition; if (GET_CODE (insn) != JUMP_INSN) return NULL_RTX; pattern = PATTERN (insn); /* The canonical doloop pattern we expect is: (parallel [(set (pc) (if_then_else (condition) (label_ref (label)) (pc))) (set (reg) (plus (reg) (const_int -1))) (additional clobbers and uses)]) where condition is further restricted to be (ne (reg) (const_int 1)). */ if (GET_CODE (pattern) != PARALLEL) return NULL_RTX; cmp = XVECEXP (pattern, 0, 0); inc = XVECEXP (pattern, 0, 1); /* Return the compare rtx. */ *comp = cmp; /* Check for (set (reg) (something)). */ if (GET_CODE (inc) != SET || ! REG_P (SET_DEST (inc))) return NULL_RTX; /* Extract loop counter register. */ reg = SET_DEST (inc); /* Check if something = (plus (reg) (const_int -1)). */ if (GET_CODE (SET_SRC (inc)) != PLUS || XEXP (SET_SRC (inc), 0) != reg || XEXP (SET_SRC (inc), 1) != constm1_rtx) return NULL_RTX; /* Check for (set (pc) (if_then_else (condition) (label_ref (label)) (pc))). */ if (GET_CODE (cmp) != SET || SET_DEST (cmp) != pc_rtx || GET_CODE (SET_SRC (cmp)) != IF_THEN_ELSE || GET_CODE (XEXP (SET_SRC (cmp), 1)) != LABEL_REF || XEXP (SET_SRC (cmp), 2) != pc_rtx) return NULL_RTX; /* Extract loop termination condition. */ condition = XEXP (SET_SRC (cmp), 0); /* Check if condition = (ne (reg) (const_int 1)), which is more restrictive than the check in doloop_condition_get: if ((GET_CODE (condition) != GE && GET_CODE (condition) != NE) || GET_CODE (XEXP (condition, 1)) != CONST_INT). */ if (GET_CODE (condition) != NE || XEXP (condition, 1) != const1_rtx) return NULL_RTX; if (XEXP (condition, 0) == reg) return reg; return NULL_RTX; } /* Check if COUNT_REG is set to a constant in the PRE_HEADER block, so that the number of iterations is a compile-time constant. If so, return the rtx that sets COUNT_REG to a constant, and set COUNT to this constant. Otherwise return 0. */ static rtx const_iteration_count (rtx count_reg, basic_block pre_header, HOST_WIDEST_INT * count) { rtx insn; rtx head, tail; get_block_head_tail (pre_header->index, &head, &tail); for (insn = tail; insn != PREV_INSN (head); insn = PREV_INSN (insn)) if (INSN_P (insn) && single_set (insn) && rtx_equal_p (count_reg, SET_DEST (single_set (insn)))) { rtx pat = single_set (insn); if (GET_CODE (SET_SRC (pat)) == CONST_INT) { *count = INTVAL (SET_SRC (pat)); return insn; } return NULL_RTX; } return NULL_RTX; } /* A very simple resource-based lower bound on the initiation interval. ??? Improve the accuracy of this bound by considering the utilization of various units. */ static int res_MII (ddg_ptr g) { return (g->num_nodes / issue_rate); } /* Points to the array that contains the sched data for each node. */ static node_sched_params_ptr node_sched_params; /* Allocate sched_params for each node and initialize it. Assumes that the aux field of each node contain the asap bound (computed earlier), and copies it into the sched_params field. */ static void set_node_sched_params (ddg_ptr g) { int i; /* Allocate for each node in the DDG a place to hold the "sched_data". */ /* Initialize ASAP/ALAP/HIGHT to zero. */ node_sched_params = (node_sched_params_ptr) xcalloc (g->num_nodes, sizeof (struct node_sched_params)); /* Set the pointer of the general data of the node to point to the appropriate sched_params structure. */ for (i = 0; i < g->num_nodes; i++) { /* Watch out for aliasing problems? */ node_sched_params[i].asap = g->nodes[i].aux.count; g->nodes[i].aux.info = &node_sched_params[i]; } } static void print_node_sched_params (FILE * dump_file, int num_nodes) { int i; for (i = 0; i < num_nodes; i++) { node_sched_params_ptr nsp = &node_sched_params[i]; rtx reg_move = nsp->first_reg_move; int j; fprintf (dump_file, "Node %d:\n", i); fprintf (dump_file, " asap = %d:\n", nsp->asap); fprintf (dump_file, " time = %d:\n", nsp->time); fprintf (dump_file, " nreg_moves = %d:\n", nsp->nreg_moves); for (j = 0; j < nsp->nreg_moves; j++) { fprintf (dump_file, " reg_move = "); print_rtl_single (dump_file, reg_move); reg_move = PREV_INSN (reg_move); } } } /* Calculate an upper bound for II. SMS should not schedule the loop if it requires more cycles than this bound. Currently set to the sum of the longest latency edge for each node. Reset based on experiments. */ static int calculate_maxii (ddg_ptr g) { int i; int maxii = 0; for (i = 0; i < g->num_nodes; i++) { ddg_node_ptr u = &g->nodes[i]; ddg_edge_ptr e; int max_edge_latency = 0; for (e = u->out; e; e = e->next_out) max_edge_latency = MAX (max_edge_latency, e->latency); maxii += max_edge_latency; } return maxii; } /* Given the partial schedule, generate register moves when the length of the register live range is more than ii; the number of moves is determined according to the following equation: SCHED_TIME (use) - SCHED_TIME (def) { 1 broken loop-carried nreg_moves = ----------------------------------- - { dependence. ii { 0 if not. This handles the modulo-variable-expansions (mve's) needed for the ps. */ static void generate_reg_moves (partial_schedule_ptr ps) { ddg_ptr g = ps->g; int ii = ps->ii; int i; for (i = 0; i < g->num_nodes; i++) { ddg_node_ptr u = &g->nodes[i]; ddg_edge_ptr e; int nreg_moves = 0, i_reg_move; sbitmap *uses_of_defs; rtx last_reg_move; rtx prev_reg, old_reg; /* Compute the number of reg_moves needed for u, by looking at life ranges started at u (excluding self-loops). */ for (e = u->out; e; e = e->next_out) if (e->type == TRUE_DEP && e->dest != e->src) { int nreg_moves4e = (SCHED_TIME (e->dest) - SCHED_TIME (e->src)) / ii; /* If dest precedes src in the schedule of the kernel, then dest will read before src writes and we can save one reg_copy. */ if (SCHED_ROW (e->dest) == SCHED_ROW (e->src) && SCHED_COLUMN (e->dest) < SCHED_COLUMN (e->src)) nreg_moves4e--; nreg_moves = MAX (nreg_moves, nreg_moves4e); } if (nreg_moves == 0) continue; /* Every use of the register defined by node may require a different copy of this register, depending on the time the use is scheduled. Set a bitmap vector, telling which nodes use each copy of this register. */ uses_of_defs = sbitmap_vector_alloc (nreg_moves, g->num_nodes); sbitmap_vector_zero (uses_of_defs, nreg_moves); for (e = u->out; e; e = e->next_out) if (e->type == TRUE_DEP && e->dest != e->src) { int dest_copy = (SCHED_TIME (e->dest) - SCHED_TIME (e->src)) / ii; if (SCHED_ROW (e->dest) == SCHED_ROW (e->src) && SCHED_COLUMN (e->dest) < SCHED_COLUMN (e->src)) dest_copy--; if (dest_copy) SET_BIT (uses_of_defs[dest_copy - 1], e->dest->cuid); } /* Now generate the reg_moves, attaching relevant uses to them. */ SCHED_NREG_MOVES (u) = nreg_moves; old_reg = prev_reg = copy_rtx (SET_DEST (single_set (u->insn))); last_reg_move = u->insn; for (i_reg_move = 0; i_reg_move < nreg_moves; i_reg_move++) { int i_use; rtx new_reg = gen_reg_rtx (GET_MODE (prev_reg)); rtx reg_move = gen_move_insn (new_reg, prev_reg); add_insn_before (reg_move, last_reg_move); last_reg_move = reg_move; if (!SCHED_FIRST_REG_MOVE (u)) SCHED_FIRST_REG_MOVE (u) = reg_move; EXECUTE_IF_SET_IN_SBITMAP (uses_of_defs[i_reg_move], 0, i_use, replace_rtx (g->nodes[i_use].insn, old_reg, new_reg)); prev_reg = new_reg; } } } /* Bump the SCHED_TIMEs of all nodes to start from zero. Set the values of SCHED_ROW and SCHED_STAGE. */ static void normalize_sched_times (partial_schedule_ptr ps) { int i; ddg_ptr g = ps->g; int amount = PS_MIN_CYCLE (ps); int ii = ps->ii; for (i = 0; i < g->num_nodes; i++) { ddg_node_ptr u = &g->nodes[i]; int normalized_time = SCHED_TIME (u) - amount; if (normalized_time < 0) abort (); SCHED_TIME (u) = normalized_time; SCHED_ROW (u) = normalized_time % ii; SCHED_STAGE (u) = normalized_time / ii; } } /* Set SCHED_COLUMN of each node according to its position in PS. */ static void set_columns_for_ps (partial_schedule_ptr ps) { int row; for (row = 0; row < ps->ii; row++) { ps_insn_ptr cur_insn = ps->rows[row]; int column = 0; for (; cur_insn; cur_insn = cur_insn->next_in_row) SCHED_COLUMN (cur_insn->node) = column++; } } /* Permute the insns according to their order in PS, from row 0 to row ii-1, and position them right before LAST. This schedules the insns of the loop kernel. */ static void permute_partial_schedule (partial_schedule_ptr ps, rtx last) { int ii = ps->ii; int row; ps_insn_ptr ps_ij; for (row = 0; row < ii ; row++) for (ps_ij = ps->rows[row]; ps_ij; ps_ij = ps_ij->next_in_row) if (PREV_INSN (last) != ps_ij->node->insn) reorder_insns_nobb (ps_ij->node->first_note, ps_ij->node->insn, PREV_INSN (last)); } /* Used to generate the prologue & epilogue. Duplicate the subset of nodes whose stages are between FROM_STAGE and TO_STAGE (inclusive of both), together with a prefix/suffix of their reg_moves. */ static void duplicate_insns_of_cycles (partial_schedule_ptr ps, int from_stage, int to_stage, int for_prolog) { int row; ps_insn_ptr ps_ij; for (row = 0; row < ps->ii; row++) for (ps_ij = ps->rows[row]; ps_ij; ps_ij = ps_ij->next_in_row) { ddg_node_ptr u_node = ps_ij->node; int j, i_reg_moves; rtx reg_move = NULL_RTX; if (for_prolog) { /* SCHED_STAGE (u_node) >= from_stage == 0. Generate increasing number of reg_moves starting with the second occurrence of u_node, which is generated if its SCHED_STAGE <= to_stage. */ i_reg_moves = to_stage - SCHED_STAGE (u_node); i_reg_moves = MAX (i_reg_moves, 0); i_reg_moves = MIN (i_reg_moves, SCHED_NREG_MOVES (u_node)); /* The reg_moves start from the *first* reg_move backwards. */ if (i_reg_moves) { reg_move = SCHED_FIRST_REG_MOVE (u_node); for (j = 1; j < i_reg_moves; j++) reg_move = PREV_INSN (reg_move); } } else /* It's for the epilog. */ { /* SCHED_STAGE (u_node) <= to_stage. Generate all reg_moves, starting to decrease one stage after u_node no longer occurs; that is, generate all reg_moves until SCHED_STAGE (u_node) == from_stage - 1. */ i_reg_moves = SCHED_NREG_MOVES (u_node) - (from_stage - SCHED_STAGE (u_node) - 1); i_reg_moves = MAX (i_reg_moves, 0); i_reg_moves = MIN (i_reg_moves, SCHED_NREG_MOVES (u_node)); /* The reg_moves start from the *last* reg_move forwards. */ if (i_reg_moves) { reg_move = SCHED_FIRST_REG_MOVE (u_node); for (j = 1; j < SCHED_NREG_MOVES (u_node); j++) reg_move = PREV_INSN (reg_move); } } for (j = 0; j < i_reg_moves; j++, reg_move = NEXT_INSN (reg_move)) emit_insn (copy_rtx (PATTERN (reg_move))); if (SCHED_STAGE (u_node) >= from_stage && SCHED_STAGE (u_node) <= to_stage) duplicate_insn_chain (u_node->first_note, u_node->insn); } } /* Generate the instructions (including reg_moves) for prolog & epilog. */ static void generate_prolog_epilog (partial_schedule_ptr ps, rtx orig_loop_beg, rtx orig_loop_end, int unknown_count) { int i; int last_stage = PS_STAGE_COUNT (ps) - 1; edge e; rtx c_reg = NULL_RTX; rtx cmp = NULL_RTX; rtx precond_jump = NULL_RTX; rtx precond_exit_label = NULL_RTX; rtx precond_exit_label_insn = NULL_RTX; rtx last_epilog_insn = NULL_RTX; rtx loop_exit_label = NULL_RTX; rtx loop_exit_label_insn = NULL_RTX; rtx orig_loop_bct = NULL_RTX; /* Loop header edge. */ e = ps->g->bb->pred; if (e->src == ps->g->bb) e = e->pred_next; /* Generate the prolog, inserting its insns on the loop-entry edge. */ start_sequence (); /* This is the place where we want to insert the precondition. */ if (unknown_count) precond_jump = emit_note (NOTE_INSN_DELETED); for (i = 0; i < last_stage; i++) duplicate_insns_of_cycles (ps, 0, i, 1); /* No need to call insert_insn_on_edge; we prepared the sequence. */ e->insns.r = get_insns (); end_sequence (); /* Generate the epilog, inserting its insns on the loop-exit edge. */ start_sequence (); for (i = 0; i < last_stage; i++) duplicate_insns_of_cycles (ps, i + 1, last_stage, 0); last_epilog_insn = emit_note (NOTE_INSN_DELETED); /* Emit the label where to put the original loop code. */ if (unknown_count) { rtx label, cond; precond_exit_label = gen_label_rtx (); precond_exit_label_insn = emit_label (precond_exit_label); /* Put the original loop code. */ reorder_insns_nobb (orig_loop_beg, orig_loop_end, precond_exit_label_insn); /* Change the label of the BCT to be the PRECOND_EXIT_LABEL. */ orig_loop_bct = get_last_insn (); c_reg = doloop_register_get (orig_loop_bct, &cmp); label = XEXP (SET_SRC (cmp), 1); cond = XEXP (SET_SRC (cmp), 0); if (! c_reg || GET_CODE (cond) != NE) abort (); XEXP (label, 0) = precond_exit_label; JUMP_LABEL (orig_loop_bct) = precond_exit_label_insn; LABEL_NUSES (precond_exit_label_insn)++; /* Generate the loop exit label. */ loop_exit_label = gen_label_rtx (); loop_exit_label_insn = emit_label (loop_exit_label); } e = ps->g->bb->succ; if (e->dest == ps->g->bb) e = e->succ_next; e->insns.r = get_insns (); end_sequence (); commit_edge_insertions (); if (unknown_count) { rtx precond_insns, epilog_jump, insert_after_insn; basic_block loop_exit_bb = BLOCK_FOR_INSN (loop_exit_label_insn); basic_block epilog_bb = BLOCK_FOR_INSN (last_epilog_insn); basic_block precond_bb = BLOCK_FOR_INSN (precond_jump); basic_block orig_loop_bb = BLOCK_FOR_INSN (precond_exit_label_insn); edge epilog_exit_edge = epilog_bb->succ; /* Do loop preconditioning to take care of cases were the loop count is less than the stage count. Update the CFG properly. */ insert_after_insn = precond_jump; start_sequence (); c_reg = doloop_register_get (ps->g->closing_branch->insn, &cmp); emit_cmp_and_jump_insns (c_reg, GEN_INT (PS_STAGE_COUNT (ps)), LT, NULL, GET_MODE (c_reg), 1, precond_exit_label); precond_insns = get_insns (); precond_jump = get_last_insn (); end_sequence (); reorder_insns (precond_insns, precond_jump, insert_after_insn); /* Generate a subtract instruction at the beginning of the prolog to adjust the loop count by STAGE_COUNT. */ emit_insn_after (gen_sub2_insn (c_reg, GEN_INT (PS_STAGE_COUNT (ps) - 1)), precond_jump); update_bb_for_insn (precond_bb); delete_insn (insert_after_insn); /* Update label info for the precondition jump. */ JUMP_LABEL (precond_jump) = precond_exit_label_insn; LABEL_NUSES (precond_exit_label_insn)++; /* Update the CFG. */ split_block (precond_bb, precond_jump); make_edge (precond_bb, orig_loop_bb, 0); /* Add a jump at end of the epilog to the LOOP_EXIT_LABEL to jump over the original loop copy and update the CFG. */ epilog_jump = emit_jump_insn_after (gen_jump (loop_exit_label), last_epilog_insn); delete_insn (last_epilog_insn); JUMP_LABEL (epilog_jump) = loop_exit_label_insn; LABEL_NUSES (loop_exit_label_insn)++; redirect_edge_succ (epilog_exit_edge, loop_exit_bb); epilog_exit_edge->flags &= ~EDGE_FALLTHRU; emit_barrier_after (BB_END (epilog_bb)); } } /* Return the line note insn preceding INSN, for debugging. Taken from emit-rtl.c. */ static rtx find_line_note_modulo (rtx insn) { for (; insn; insn = PREV_INSN (insn)) if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) >= 0) break; return insn; } /* Main entry point, perform SMS scheduling on the loops of the function that consist of single basic blocks. */ void sms_schedule (FILE *dump_file) { static int passes = 0; rtx insn; ddg_ptr *g_arr, g; basic_block bb, pre_header = NULL; int * node_order; int maxii; int i; partial_schedule_ptr ps; int max_bb_index = last_basic_block; struct df *df; /* SMS uses the DFA interface. */ if (! targetm.sched.use_dfa_pipeline_interface || ! (*targetm.sched.use_dfa_pipeline_interface) ()) return; stats_file = dump_file; /* Initialize issue_rate. */ if (targetm.sched.issue_rate) { int temp = reload_completed; reload_completed = 1; issue_rate = (*targetm.sched.issue_rate) (); reload_completed = temp; } else issue_rate = 1; /* Initialize the scheduler. */ current_sched_info = &sms_sched_info; sched_init (NULL); /* Init Data Flow analysis, to be used in interloop dep calculation. */ df = df_init (); df_analyze (df, 0, DF_ALL); /* Allocate memory to hold the DDG array. */ g_arr = xcalloc (max_bb_index, sizeof (ddg_ptr)); /* Build DDGs for all the relevant loops and hold them in G_ARR indexed by the loop BB index. */ FOR_EACH_BB (bb) { rtx head, tail; rtx count_reg, comp; edge e, pre_header_edge; if (bb->index < 0) continue; /* Check if bb has two successors, one being itself. */ e = bb->succ; if (!e || !e->succ_next || e->succ_next->succ_next) continue; if (e->dest != bb && e->succ_next->dest != bb) continue; if ((e->flags & EDGE_COMPLEX) || (e->succ_next->flags & EDGE_COMPLEX)) continue; /* Check if bb has two predecessors, one being itself. */ /* In view of above tests, suffices to check e->pred_next->pred_next? */ e = bb->pred; if (!e || !e->pred_next || e->pred_next->pred_next) continue; if (e->src != bb && e->pred_next->src != bb) continue; if ((e->flags & EDGE_COMPLEX) || (e->pred_next->flags & EDGE_COMPLEX)) continue; /* For debugging. */ if (passes++ > MAX_SMS_LOOP_NUMBER && MAX_SMS_LOOP_NUMBER != -1) { if (dump_file) fprintf (dump_file, "SMS reached MAX_PASSES... \n"); break; } get_block_head_tail (bb->index, &head, &tail); pre_header_edge = bb->pred; if (bb->pred->src != bb) pre_header_edge = bb->pred->pred_next; /* Perfrom SMS only on loops that their average count is above threshold. */ if (bb->count < pre_header_edge->count * SMS_LOOP_AVERAGE_COUNT_THRESHOLD) { if (stats_file) { rtx line_note = find_line_note_modulo (tail); if (line_note) { expanded_location xloc; NOTE_EXPANDED_LOCATION (xloc, line_note); fprintf (stats_file, "SMS bb %s %d (file, line)\n", xloc.file, xloc.line); } fprintf (stats_file, "SMS single-bb-loop\n"); if (profile_info && flag_branch_probabilities) { fprintf (stats_file, "SMS loop-count "); fprintf (stats_file, HOST_WIDEST_INT_PRINT_DEC, (HOST_WIDEST_INT) bb->count); fprintf (stats_file, "\n"); fprintf (stats_file, "SMS preheader-count "); fprintf (stats_file, HOST_WIDEST_INT_PRINT_DEC, (HOST_WIDEST_INT) pre_header_edge->count); fprintf (stats_file, "\n"); fprintf (stats_file, "SMS profile-sum-max "); fprintf (stats_file, HOST_WIDEST_INT_PRINT_DEC, (HOST_WIDEST_INT) profile_info->sum_max); fprintf (stats_file, "\n"); } } continue; } /* Make sure this is a doloop. */ if ( !(count_reg = doloop_register_get (tail, &comp))) continue; e = bb->pred; if (e->src == bb) pre_header = e->pred_next->src; else pre_header = e->src; /* Don't handle BBs with calls or barriers, or !single_set insns. */ for (insn = head; insn != NEXT_INSN (tail); insn = NEXT_INSN (insn)) if (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == BARRIER || (INSN_P (insn) && GET_CODE (insn) != JUMP_INSN && !single_set (insn) && GET_CODE (PATTERN (insn)) != USE)) break; if (insn != NEXT_INSN (tail)) { if (stats_file) { if (GET_CODE (insn) == CALL_INSN) fprintf (stats_file, "SMS loop-with-call\n"); else if (GET_CODE (insn) == BARRIER) fprintf (stats_file, "SMS loop-with-barrier\n"); else fprintf (stats_file, "SMS loop-with-not-single-set\n"); print_rtl_single (stats_file, insn); } continue; } if (! (g = create_ddg (bb, df, 0))) { if (stats_file) fprintf (stats_file, "SMS doloop\n"); continue; } g_arr[bb->index] = g; } /* Release Data Flow analysis data structures. */ df_finish (df); /* Go over the built DDGs and perfrom SMS for each one of them. */ for (i = 0; i < max_bb_index; i++) { rtx head, tail; rtx count_reg, count_init, comp; edge pre_header_edge; int mii, rec_mii; int stage_count = 0; HOST_WIDEST_INT loop_count = 0; if (! (g = g_arr[i])) continue; if (dump_file) print_ddg (dump_file, g); get_block_head_tail (g->bb->index, &head, &tail); pre_header_edge = g->bb->pred; if (g->bb->pred->src != g->bb) pre_header_edge = g->bb->pred->pred_next; if (stats_file) { rtx line_note = find_line_note_modulo (tail); if (line_note) { expanded_location xloc; NOTE_EXPANDED_LOCATION (xloc, line_note); fprintf (stats_file, "SMS bb %s %d (file, line)\n", xloc.file, xloc.line); } fprintf (stats_file, "SMS single-bb-loop\n"); if (profile_info && flag_branch_probabilities) { fprintf (stats_file, "SMS loop-count "); fprintf (stats_file, HOST_WIDEST_INT_PRINT_DEC, (HOST_WIDEST_INT) bb->count); fprintf (stats_file, "\n"); fprintf (stats_file, "SMS preheader-count "); fprintf (stats_file, HOST_WIDEST_INT_PRINT_DEC, (HOST_WIDEST_INT) pre_header_edge->count); fprintf (stats_file, "\n"); fprintf (stats_file, "SMS profile-sum-max "); fprintf (stats_file, HOST_WIDEST_INT_PRINT_DEC, (HOST_WIDEST_INT) profile_info->sum_max); fprintf (stats_file, "\n"); } fprintf (stats_file, "SMS doloop\n"); fprintf (stats_file, "SMS built-ddg %d\n", g->num_nodes); fprintf (stats_file, "SMS num-loads %d\n", g->num_loads); fprintf (stats_file, "SMS num-stores %d\n", g->num_stores); } /* Make sure this is a doloop. */ if ( !(count_reg = doloop_register_get (tail, &comp))) abort (); /* This should be NULL_RTX if the count is unknown at compile time. */ count_init = const_iteration_count (count_reg, pre_header, &loop_count); if (stats_file && count_init) { fprintf (stats_file, "SMS const-doloop "); fprintf (stats_file, HOST_WIDEST_INT_PRINT_DEC, loop_count); fprintf (stats_file, "\n"); } node_order = (int *) xmalloc (sizeof (int) * g->num_nodes); mii = 1; /* Need to pass some estimate of mii. */ rec_mii = sms_order_nodes (g, mii, node_order); mii = MAX (res_MII (g), rec_mii); maxii = (calculate_maxii (g) * SMS_MAX_II_FACTOR) / 100; if (stats_file) fprintf (stats_file, "SMS iis %d %d %d (rec_mii, mii, maxii)\n", rec_mii, mii, maxii); /* After sms_order_nodes and before sms_schedule_by_order, to copy over ASAP. */ set_node_sched_params (g); ps = sms_schedule_by_order (g, mii, maxii, node_order, dump_file); if (ps) stage_count = PS_STAGE_COUNT (ps); if (stage_count == 0 || (count_init && (stage_count > loop_count))) { if (dump_file) fprintf (dump_file, "SMS failed... \n"); if (stats_file) fprintf (stats_file, "SMS sched-failed %d\n", stage_count); } else { rtx orig_loop_beg = NULL_RTX; rtx orig_loop_end = NULL_RTX; if (stats_file) { fprintf (stats_file, "SMS succeeded %d %d (with ii, sc)\n", ps->ii, stage_count); print_partial_schedule (ps, dump_file); fprintf (dump_file, "SMS Branch (%d) will later be scheduled at cycle %d.\n", g->closing_branch->cuid, PS_MIN_CYCLE (ps) - 1); } /* Save the original loop if we want to do loop preconditioning in case the BCT count is not known. */ if (! count_init) { int i; start_sequence (); /* Copy the original loop code before modifying it - so we can use it later. */ for (i = 0; i < ps->g->num_nodes; i++) duplicate_insn_chain (ps->g->nodes[i].first_note, ps->g->nodes[i].insn); orig_loop_beg = get_insns (); orig_loop_end = get_last_insn (); end_sequence (); } /* Set the stage boundaries. If the DDG is built with closing_branch_deps, the closing_branch was scheduled and should appear in the last (ii-1) row. Otherwise, we are free to schedule the branch, and we let nodes that were scheduled at the first PS_MIN_CYCLE cycle appear in the first row; this should reduce stage_count to minimum. */ normalize_sched_times (ps); rotate_partial_schedule (ps, PS_MIN_CYCLE (ps)); set_columns_for_ps (ps); permute_partial_schedule (ps, g->closing_branch->first_note); generate_reg_moves (ps); if (dump_file) print_node_sched_params (dump_file, g->num_nodes); /* Set new iteration count of loop kernel. */ if (count_init) SET_SRC (single_set (count_init)) = GEN_INT (loop_count - stage_count + 1); /* Generate prolog and epilog. */ generate_prolog_epilog (ps, orig_loop_beg, orig_loop_end, count_init ? 0 : 1); } free_partial_schedule (ps); free (node_sched_params); free (node_order); free_ddg (g); } /* Release scheduler data, needed until now because of DFA. */ sched_finish (); } /* The SMS scheduling algorithm itself ----------------------------------- Input: 'O' an ordered list of insns of a loop. Output: A scheduling of the loop - kernel, prolog, and epilogue. 'Q' is the empty Set 'PS' is the partial schedule; it holds the currently scheduled nodes with their cycle/slot. 'PSP' previously scheduled predecessors. 'PSS' previously scheduled successors. 't(u)' the cycle where u is scheduled. 'l(u)' is the latency of u. 'd(v,u)' is the dependence distance from v to u. 'ASAP(u)' the earliest time at which u could be scheduled as computed in the node ordering phase. 'check_hardware_resources_conflicts(u, PS, c)' run a trace around cycle/slot through DFA model to check resource conflicts involving instruction u at cycle c given the partial schedule PS. 'add_to_partial_schedule_at_time(u, PS, c)' Add the node/instruction u to the partial schedule PS at time c. 'calculate_register_pressure(PS)' Given a schedule of instructions, calculate the register pressure it implies. One implementation could be the maximum number of overlapping live ranges. 'maxRP' The maximum allowed register pressure, it is usually derived from the number registers available in the hardware. 1. II = MII. 2. PS = empty list 3. for each node u in O in pre-computed order 4. if (PSP(u) != Q && PSS(u) == Q) then 5. Early_start(u) = max ( t(v) + l(v) - d(v,u)*II ) over all every v in PSP(u). 6. start = Early_start; end = Early_start + II - 1; step = 1 11. else if (PSP(u) == Q && PSS(u) != Q) then 12. Late_start(u) = min ( t(v) - l(v) + d(v,u)*II ) over all every v in PSS(u). 13. start = Late_start; end = Late_start - II + 1; step = -1 14. else if (PSP(u) != Q && PSS(u) != Q) then 15. Early_start(u) = max ( t(v) + l(v) - d(v,u)*II ) over all every v in PSP(u). 16. Late_start(u) = min ( t(v) - l(v) + d(v,u)*II ) over all every v in PSS(u). 17. start = Early_start; 18. end = min(Early_start + II - 1 , Late_start); 19. step = 1 20. else "if (PSP(u) == Q && PSS(u) == Q)" 21. start = ASAP(u); end = start + II - 1; step = 1 22. endif 23. success = false 24. for (c = start ; c != end ; c += step) 25. if check_hardware_resources_conflicts(u, PS, c) then 26. add_to_partial_schedule_at_time(u, PS, c) 27. success = true 28. break 29. endif 30. endfor 31. if (success == false) then 32. II = II + 1 33. if (II > maxII) then 34. finish - failed to schedule 35. endif 36. goto 2. 37. endif 38. endfor 39. if (calculate_register_pressure(PS) > maxRP) then 40. goto 32. 41. endif 42. compute epilogue & prologue 43. finish - succeeded to schedule */ /* A limit on the number of cycles that resource conflicts can span. ??? Should be provided by DFA, and be dependent on the type of insn scheduled. Currently set to 0 to save compile time. */ #define DFA_HISTORY SMS_DFA_HISTORY static partial_schedule_ptr sms_schedule_by_order (ddg_ptr g, int mii, int maxii, int *nodes_order, FILE *dump_file) { int ii = mii; int i, c, success; int try_again_with_larger_ii = true; int num_nodes = g->num_nodes; ddg_edge_ptr e; int start, end, step; /* Place together into one struct? */ sbitmap sched_nodes = sbitmap_alloc (num_nodes); sbitmap psp = sbitmap_alloc (num_nodes); sbitmap pss = sbitmap_alloc (num_nodes); partial_schedule_ptr ps = create_partial_schedule (ii, g, DFA_HISTORY); while (try_again_with_larger_ii && ii < maxii) { if (dump_file) fprintf(dump_file, "Starting with ii=%d\n", ii); try_again_with_larger_ii = false; sbitmap_zero (sched_nodes); for (i = 0; i < num_nodes; i++) { int u = nodes_order[i]; ddg_node_ptr u_node = &g->nodes[u]; sbitmap u_node_preds = NODE_PREDECESSORS (u_node); sbitmap u_node_succs = NODE_SUCCESSORS (u_node); int psp_not_empty; int pss_not_empty; rtx insn = u_node->insn; if (!INSN_P (insn)) continue; if (GET_CODE (insn) == JUMP_INSN) /* Closing branch handled later. */ continue; /* 1. compute sched window for u (start, end, step). */ sbitmap_zero (psp); sbitmap_zero (pss); psp_not_empty = sbitmap_a_and_b_cg (psp, u_node_preds, sched_nodes); pss_not_empty = sbitmap_a_and_b_cg (pss, u_node_succs, sched_nodes); if (psp_not_empty && !pss_not_empty) { int early_start = 0; end = INT_MAX; for (e = u_node->in; e != 0; e = e->next_in) { ddg_node_ptr v_node = e->src; if (TEST_BIT (sched_nodes, v_node->cuid)) { early_start = MAX (early_start, SCHED_TIME (v_node) + e->latency - (e->distance * ii)); if (e->data_type == MEM_DEP) end = MIN (end, SCHED_TIME (v_node) + ii - 1); } } start = early_start; end = MIN (end, early_start + ii); step = 1; } else if (!psp_not_empty && pss_not_empty) { int late_start = INT_MAX; end = INT_MIN; for (e = u_node->out; e != 0; e = e->next_out) { ddg_node_ptr v_node = e->dest; if (TEST_BIT (sched_nodes, v_node->cuid)) { late_start = MIN (late_start, SCHED_TIME (v_node) - e->latency + (e->distance * ii)); if (e->data_type == MEM_DEP) end = MAX (end, SCHED_TIME (v_node) - ii + 1); } } start = late_start; end = MAX (end, late_start - ii); step = -1; } else if (psp_not_empty && pss_not_empty) { int early_start = 0; int late_start = INT_MAX; start = INT_MIN; end = INT_MAX; for (e = u_node->in; e != 0; e = e->next_in) { ddg_node_ptr v_node = e->src; if (TEST_BIT (sched_nodes, v_node->cuid)) { early_start = MAX (early_start, SCHED_TIME (v_node) + e->latency - (e->distance * ii)); if (e->data_type == MEM_DEP) end = MIN (end, SCHED_TIME (v_node) + ii - 1); } } for (e = u_node->out; e != 0; e = e->next_out) { ddg_node_ptr v_node = e->dest; if (TEST_BIT (sched_nodes, v_node->cuid)) { late_start = MIN (late_start, SCHED_TIME (v_node) - e->latency + (e->distance * ii)); if (e->data_type == MEM_DEP) start = MAX (start, SCHED_TIME (v_node) - ii + 1); } } start = MAX (start, early_start); end = MIN (end, MIN (early_start + ii, late_start + 1)); step = 1; } else /* psp is empty && pss is empty. */ { start = SCHED_ASAP (u_node); end = start + ii; step = 1; } /* 2. Try scheduling u in window. */ if (dump_file) fprintf(dump_file, "Trying to schedule node %d in (%d .. %d) step %d\n", u, start, end, step); success = 0; if ((step > 0 && start < end) || (step < 0 && start > end)) for (c = start; c != end; c += step) { ps_insn_ptr psi = ps_add_node_check_conflicts (ps, u_node, c); if (psi) { SCHED_TIME (u_node) = c; SET_BIT (sched_nodes, u); success = 1; if (dump_file) fprintf(dump_file, "Schedule in %d\n", c); break; } } if (!success) { /* ??? Try backtracking instead of immediately ii++? */ ii++; try_again_with_larger_ii = true; reset_partial_schedule (ps, ii); break; } /* ??? If (success), check register pressure estimates. */ } /* Continue with next node. */ } /* While try_again_with_larger_ii. */ sbitmap_free (sched_nodes); sbitmap_free (psp); sbitmap_free (pss); if (ii >= maxii) { free_partial_schedule (ps); ps = NULL; } return ps; } /* This page implements the algorithm for ordering the nodes of a DDG for modulo scheduling, activated through the "int sms_order_nodes (ddg_ptr, int mii, int * result)" API. */ #define ORDER_PARAMS(x) ((struct node_order_params *) (x)->aux.info) #define ASAP(x) (ORDER_PARAMS ((x))->asap) #define ALAP(x) (ORDER_PARAMS ((x))->alap) #define HEIGHT(x) (ORDER_PARAMS ((x))->height) #define MOB(x) (ALAP ((x)) - ASAP ((x))) #define DEPTH(x) (ASAP ((x))) typedef struct node_order_params * nopa; static void order_nodes_of_sccs (ddg_all_sccs_ptr, int * result); static int order_nodes_in_scc (ddg_ptr, sbitmap, sbitmap, int*, int); static nopa calculate_order_params (ddg_ptr, int mii); static int find_max_asap (ddg_ptr, sbitmap); static int find_max_hv_min_mob (ddg_ptr, sbitmap); static int find_max_dv_min_mob (ddg_ptr, sbitmap); enum sms_direction {BOTTOMUP, TOPDOWN}; struct node_order_params { int asap; int alap; int height; }; /* Check if NODE_ORDER contains a permutation of 0 .. NUM_NODES-1. */ static void check_nodes_order (int *node_order, int num_nodes) { int i; sbitmap tmp = sbitmap_alloc (num_nodes); sbitmap_zero (tmp); for (i = 0; i < num_nodes; i++) { int u = node_order[i]; if (u >= num_nodes || u < 0 || TEST_BIT (tmp, u)) abort (); SET_BIT (tmp, u); } sbitmap_free (tmp); } /* Order the nodes of G for scheduling and pass the result in NODE_ORDER. Also set aux.count of each node to ASAP. Return the recMII for the given DDG. */ static int sms_order_nodes (ddg_ptr g, int mii, int * node_order) { int i; int rec_mii = 0; ddg_all_sccs_ptr sccs = create_ddg_all_sccs (g); nopa nops = calculate_order_params (g, mii); order_nodes_of_sccs (sccs, node_order); if (sccs->num_sccs > 0) /* First SCC has the largest recurrence_length. */ rec_mii = sccs->sccs[0]->recurrence_length; /* Save ASAP before destroying node_order_params. */ for (i = 0; i < g->num_nodes; i++) { ddg_node_ptr v = &g->nodes[i]; v->aux.count = ASAP (v); } free (nops); free_ddg_all_sccs (sccs); check_nodes_order (node_order, g->num_nodes); return rec_mii; } static void order_nodes_of_sccs (ddg_all_sccs_ptr all_sccs, int * node_order) { int i, pos = 0; ddg_ptr g = all_sccs->ddg; int num_nodes = g->num_nodes; sbitmap prev_sccs = sbitmap_alloc (num_nodes); sbitmap on_path = sbitmap_alloc (num_nodes); sbitmap tmp = sbitmap_alloc (num_nodes); sbitmap ones = sbitmap_alloc (num_nodes); sbitmap_zero (prev_sccs); sbitmap_ones (ones); /* Perfrom the node ordering starting from the SCC with the highest recMII. For each SCC order the nodes according to their ASAP/ALAP/HEIGHT etc. */ for (i = 0; i < all_sccs->num_sccs; i++) { ddg_scc_ptr scc = all_sccs->sccs[i]; /* Add nodes on paths from previous SCCs to the current SCC. */ find_nodes_on_paths (on_path, g, prev_sccs, scc->nodes); sbitmap_a_or_b (tmp, scc->nodes, on_path); /* Add nodes on paths from the current SCC to previous SCCs. */ find_nodes_on_paths (on_path, g, scc->nodes, prev_sccs); sbitmap_a_or_b (tmp, tmp, on_path); /* Remove nodes of previous SCCs from current extended SCC. */ sbitmap_difference (tmp, tmp, prev_sccs); pos = order_nodes_in_scc (g, prev_sccs, tmp, node_order, pos); /* Above call to order_nodes_in_scc updated prev_sccs |= tmp. */ } /* Handle the remaining nodes that do not belong to any scc. Each call to order_nodes_in_scc handles a single connected component. */ while (pos < g->num_nodes) { sbitmap_difference (tmp, ones, prev_sccs); pos = order_nodes_in_scc (g, prev_sccs, tmp, node_order, pos); } sbitmap_free (prev_sccs); sbitmap_free (on_path); sbitmap_free (tmp); sbitmap_free (ones); } /* MII is needed if we consider backarcs (that do not close recursive cycles). */ static struct node_order_params * calculate_order_params (ddg_ptr g, int mii ATTRIBUTE_UNUSED) { int u; int max_asap; int num_nodes = g->num_nodes; ddg_edge_ptr e; /* Allocate a place to hold ordering params for each node in the DDG. */ nopa node_order_params_arr; /* Initialize of ASAP/ALAP/HEIGHT to zero. */ node_order_params_arr = (nopa) xcalloc (num_nodes, sizeof (struct node_order_params)); /* Set the aux pointer of each node to point to its order_params structure. */ for (u = 0; u < num_nodes; u++) g->nodes[u].aux.info = &node_order_params_arr[u]; /* Disregarding a backarc from each recursive cycle to obtain a DAG, calculate ASAP, ALAP, mobility, distance, and height for each node in the dependence (direct acyclic) graph. */ /* We assume that the nodes in the array are in topological order. */ max_asap = 0; for (u = 0; u < num_nodes; u++) { ddg_node_ptr u_node = &g->nodes[u]; ASAP (u_node) = 0; for (e = u_node->in; e; e = e->next_in) if (e->distance == 0) ASAP (u_node) = MAX (ASAP (u_node), ASAP (e->src) + e->latency); max_asap = MAX (max_asap, ASAP (u_node)); } for (u = num_nodes - 1; u > -1; u--) { ddg_node_ptr u_node = &g->nodes[u]; ALAP (u_node) = max_asap; HEIGHT (u_node) = 0; for (e = u_node->out; e; e = e->next_out) if (e->distance == 0) { ALAP (u_node) = MIN (ALAP (u_node), ALAP (e->dest) - e->latency); HEIGHT (u_node) = MAX (HEIGHT (u_node), HEIGHT (e->dest) + e->latency); } } return node_order_params_arr; } static int find_max_asap (ddg_ptr g, sbitmap nodes) { int u; int max_asap = -1; int result = -1; EXECUTE_IF_SET_IN_SBITMAP (nodes, 0, u, { ddg_node_ptr u_node = &g->nodes[u]; if (max_asap < ASAP (u_node)) { max_asap = ASAP (u_node); result = u; } }); return result; } static int find_max_hv_min_mob (ddg_ptr g, sbitmap nodes) { int u; int max_hv = -1; int min_mob = INT_MAX; int result = -1; EXECUTE_IF_SET_IN_SBITMAP (nodes, 0, u, { ddg_node_ptr u_node = &g->nodes[u]; if (max_hv < HEIGHT (u_node)) { max_hv = HEIGHT (u_node); min_mob = MOB (u_node); result = u; } else if ((max_hv == HEIGHT (u_node)) && (min_mob > MOB (u_node))) { min_mob = MOB (u_node); result = u; } }); return result; } static int find_max_dv_min_mob (ddg_ptr g, sbitmap nodes) { int u; int max_dv = -1; int min_mob = INT_MAX; int result = -1; EXECUTE_IF_SET_IN_SBITMAP (nodes, 0, u, { ddg_node_ptr u_node = &g->nodes[u]; if (max_dv < DEPTH (u_node)) { max_dv = DEPTH (u_node); min_mob = MOB (u_node); result = u; } else if ((max_dv == DEPTH (u_node)) && (min_mob > MOB (u_node))) { min_mob = MOB (u_node); result = u; } }); return result; } /* Places the nodes of SCC into the NODE_ORDER array starting at position POS, according to the SMS ordering algorithm. NODES_ORDERED (in&out parameter) holds the bitset of all nodes in the NODE_ORDER array, starting from position zero. */ static int order_nodes_in_scc (ddg_ptr g, sbitmap nodes_ordered, sbitmap scc, int * node_order, int pos) { enum sms_direction dir; int num_nodes = g->num_nodes; sbitmap workset = sbitmap_alloc (num_nodes); sbitmap tmp = sbitmap_alloc (num_nodes); sbitmap zero_bitmap = sbitmap_alloc (num_nodes); sbitmap predecessors = sbitmap_alloc (num_nodes); sbitmap successors = sbitmap_alloc (num_nodes); sbitmap_zero (predecessors); find_predecessors (predecessors, g, nodes_ordered); sbitmap_zero (successors); find_successors (successors, g, nodes_ordered); sbitmap_zero (tmp); if (sbitmap_a_and_b_cg (tmp, predecessors, scc)) { sbitmap_copy (workset, tmp); dir = BOTTOMUP; } else if (sbitmap_a_and_b_cg (tmp, successors, scc)) { sbitmap_copy (workset, tmp); dir = TOPDOWN; } else { int u; sbitmap_zero (workset); if ((u = find_max_asap (g, scc)) >= 0) SET_BIT (workset, u); dir = BOTTOMUP; } sbitmap_zero (zero_bitmap); while (!sbitmap_equal (workset, zero_bitmap)) { int v; ddg_node_ptr v_node; sbitmap v_node_preds; sbitmap v_node_succs; if (dir == TOPDOWN) { while (!sbitmap_equal (workset, zero_bitmap)) { v = find_max_hv_min_mob (g, workset); v_node = &g->nodes[v]; node_order[pos++] = v; v_node_succs = NODE_SUCCESSORS (v_node); sbitmap_a_and_b (tmp, v_node_succs, scc); /* Don't consider the already ordered successors again. */ sbitmap_difference (tmp, tmp, nodes_ordered); sbitmap_a_or_b (workset, workset, tmp); RESET_BIT (workset, v); SET_BIT (nodes_ordered, v); } dir = BOTTOMUP; sbitmap_zero (predecessors); find_predecessors (predecessors, g, nodes_ordered); sbitmap_a_and_b (workset, predecessors, scc); } else { while (!sbitmap_equal (workset, zero_bitmap)) { v = find_max_dv_min_mob (g, workset); v_node = &g->nodes[v]; node_order[pos++] = v; v_node_preds = NODE_PREDECESSORS (v_node); sbitmap_a_and_b (tmp, v_node_preds, scc); /* Don't consider the already ordered predecessors again. */ sbitmap_difference (tmp, tmp, nodes_ordered); sbitmap_a_or_b (workset, workset, tmp); RESET_BIT (workset, v); SET_BIT (nodes_ordered, v); } dir = TOPDOWN; sbitmap_zero (successors); find_successors (successors, g, nodes_ordered); sbitmap_a_and_b (workset, successors, scc); } } sbitmap_free (tmp); sbitmap_free (workset); sbitmap_free (zero_bitmap); sbitmap_free (predecessors); sbitmap_free (successors); return pos; } /* This page contains functions for manipulating partial-schedules during modulo scheduling. */ /* Create a partial schedule and allocate a memory to hold II rows. */ partial_schedule_ptr create_partial_schedule (int ii, ddg_ptr g, int history) { partial_schedule_ptr ps = (partial_schedule_ptr) xmalloc (sizeof (struct partial_schedule)); ps->rows = (ps_insn_ptr *) xcalloc (ii, sizeof (ps_insn_ptr)); ps->ii = ii; ps->history = history; ps->min_cycle = INT_MAX; ps->max_cycle = INT_MIN; ps->g = g; return ps; } /* Free the PS_INSNs in rows array of the given partial schedule. ??? Consider caching the PS_INSN's. */ static void free_ps_insns (partial_schedule_ptr ps) { int i; for (i = 0; i < ps->ii; i++) { while (ps->rows[i]) { ps_insn_ptr ps_insn = ps->rows[i]->next_in_row; free (ps->rows[i]); ps->rows[i] = ps_insn; } ps->rows[i] = NULL; } } /* Free all the memory allocated to the partial schedule. */ void free_partial_schedule (partial_schedule_ptr ps) { if (!ps) return; free_ps_insns (ps); free (ps->rows); free (ps); } /* Clear the rows array with its PS_INSNs, and create a new one with NEW_II rows. */ void reset_partial_schedule (partial_schedule_ptr ps, int new_ii) { if (!ps) return; free_ps_insns (ps); if (new_ii == ps->ii) return; ps->rows = (ps_insn_ptr *) xrealloc (ps->rows, new_ii * sizeof (ps_insn_ptr)); memset (ps->rows, 0, new_ii * sizeof (ps_insn_ptr)); ps->ii = new_ii; ps->min_cycle = INT_MAX; ps->max_cycle = INT_MIN; } /* Prints the partial schedule as an ii rows array, for each rows print the ids of the insns in it. */ void print_partial_schedule (partial_schedule_ptr ps, FILE *dump) { int i; for (i = 0; i < ps->ii; i++) { ps_insn_ptr ps_i = ps->rows[i]; fprintf (dump, "\n[CYCLE %d ]: ", i); while (ps_i) { fprintf (dump, "%d, ", INSN_UID (ps_i->node->insn)); ps_i = ps_i->next_in_row; } } } /* Creates an object of PS_INSN and initializes it to the given parameters. */ static ps_insn_ptr create_ps_insn (ddg_node_ptr node, int rest_count, int cycle) { ps_insn_ptr ps_i = xmalloc (sizeof (struct ps_insn)); ps_i->node = node; ps_i->next_in_row = NULL; ps_i->prev_in_row = NULL; ps_i->row_rest_count = rest_count; ps_i->cycle = cycle; return ps_i; } /* Removes the given PS_INSN from the partial schedule. Returns false if the node is not found in the partial schedule, else returns true. */ static int remove_node_from_ps (partial_schedule_ptr ps, ps_insn_ptr ps_i) { int row; if (!ps || !ps_i) return false; row = SMODULO (ps_i->cycle, ps->ii); if (! ps_i->prev_in_row) { if (ps_i != ps->rows[row]) return false; ps->rows[row] = ps_i->next_in_row; if (ps->rows[row]) ps->rows[row]->prev_in_row = NULL; } else { ps_i->prev_in_row->next_in_row = ps_i->next_in_row; if (ps_i->next_in_row) ps_i->next_in_row->prev_in_row = ps_i->prev_in_row; } free (ps_i); return true; } /* Advances the PS_INSN one column in its current row; returns false in failure and true in success. */ static int ps_insn_advance_column (partial_schedule_ptr ps, ps_insn_ptr ps_i) { ps_insn_ptr prev, next; int row; if (!ps || !ps_i) return false; row = SMODULO (ps_i->cycle, ps->ii); if (! ps_i->next_in_row) return false; /* Check if next_in_row is dependent on ps_i, both having same sched times (typically ANTI_DEP). If so, ps_i cannot skip over it. */ if (ps_i->cycle == ps_i->next_in_row->cycle) { ddg_edge_ptr e; ddg_node_ptr next_node = ps_i->next_in_row->node; for (e = ps_i->node->out; e; e = e->next_out) if (e->dest == next_node) return false; } /* Advace PS_I over its next_in_row in the doubly linked list. */ prev = ps_i->prev_in_row; next = ps_i->next_in_row; if (ps_i == ps->rows[row]) ps->rows[row] = next; ps_i->next_in_row = next->next_in_row; if (next->next_in_row) next->next_in_row->prev_in_row = ps_i; next->next_in_row = ps_i; ps_i->prev_in_row = next; next->prev_in_row = prev; if (prev) prev->next_in_row = next; return true; } /* Inserts a DDG_NODE to the given partial schedule at the given cycle. Returns 0 if this is not possible and a PS_INSN otherwise. */ static ps_insn_ptr add_node_to_ps (partial_schedule_ptr ps, ddg_node_ptr node, int cycle) { ps_insn_ptr ps_i, next_ps_i, advance_after; int rest_count = 1; int row = SMODULO (cycle, ps->ii); ddg_edge_ptr e; if (ps->rows[row] && ps->rows[row]->row_rest_count >= issue_rate) return NULL; if (ps->rows[row]) rest_count += ps->rows[row]->row_rest_count; ps_i = create_ps_insn (node, rest_count, cycle); ps_i->next_in_row = ps->rows[row]; ps_i->prev_in_row = NULL; if (ps_i->next_in_row) ps_i->next_in_row->prev_in_row = ps_i; ps->rows[row] = ps_i; /* Check if n is dependent on an insn already in row, having same cycle (typically ANTI_DEP). If so, n must skip over it. */ advance_after = NULL; for (next_ps_i = ps_i->next_in_row; next_ps_i; next_ps_i = next_ps_i->next_in_row) if (next_ps_i->cycle == cycle) for (e = node->in; e; e = e->next_in) if (e->src == next_ps_i->node) advance_after = next_ps_i; if (advance_after) while (ps_i->prev_in_row != advance_after) if (!ps_insn_advance_column (ps, ps_i)) { remove_node_from_ps (ps, ps_i); return NULL; } return ps_i; } /* Advance time one cycle. Assumes DFA is being used. */ static void advance_one_cycle_modulo (void) { if (targetm.sched.use_dfa_pipeline_interface && (*targetm.sched.use_dfa_pipeline_interface) ()) { if (targetm.sched.dfa_pre_cycle_insn) state_transition (curr_state, (*targetm.sched.dfa_pre_cycle_insn) ()); state_transition (curr_state, NULL); if (targetm.sched.dfa_post_cycle_insn) state_transition (curr_state, (*targetm.sched.dfa_post_cycle_insn) ()); } } /* Checks if PS has resource conflicts according to DFA, starting from FROM cycle to TO cycle; returns true if there are conflicts and false if there are no conflicts. Assumes DFA is being used. */ static int ps_has_conflicts (partial_schedule_ptr ps, int from, int to) { int cycle; if (! targetm.sched.use_dfa_pipeline_interface || ! (*targetm.sched.use_dfa_pipeline_interface) ()) return true; state_reset (curr_state); for (cycle = from; cycle <= to; cycle++) { ps_insn_ptr crr_insn; /* Holds the remaining issue slots in the current row. */ int can_issue_more = issue_rate; /* Walk through the DFA for the current row. */ for (crr_insn = ps->rows[SMODULO (cycle, ps->ii)]; crr_insn; crr_insn = crr_insn->next_in_row) { rtx insn = crr_insn->node->insn; if (!INSN_P (insn)) continue; /* Check if there is room for the current insn. */ if (!can_issue_more || state_dead_lock_p (curr_state)) return true; /* Update the DFA state and return with failure if the DFA found recource conflicts. */ if (state_transition (curr_state, insn) >= 0) return true; if (targetm.sched.variable_issue) can_issue_more = (*targetm.sched.variable_issue) (sched_dump, sched_verbose, insn, can_issue_more); /* A naked CLOBBER or USE generates no instruction, so don't let them consume issue slots. */ else if (GET_CODE (PATTERN (insn)) != USE && GET_CODE (PATTERN (insn)) != CLOBBER) can_issue_more--; } /* Advance the DFA to the next cycle. */ advance_one_cycle_modulo (); } return false; } /* Checks if the given node causes resource conflicts when added to PS at cycle C. If not the node is added to PS and returned; otherwise zero is returned. */ ps_insn_ptr ps_add_node_check_conflicts (partial_schedule_ptr ps, ddg_node_ptr n, int c) { int has_conflicts = 0; ps_insn_ptr ps_i; /* First add the node to the PS, if this succeeds check for conflicts, trying different issue slots in the same row. */ if (! (ps_i = add_node_to_ps (ps, n, c))) return NULL; /* Failed to insert the node at the given cycle. */ has_conflicts = ps_has_conflicts (ps, c, c) || (ps->history > 0 && ps_has_conflicts (ps, c - ps->history, c + ps->history)); /* Try different issue slots to find one that the given node can be scheduled in without conflicts. */ while (has_conflicts) { if (! ps_insn_advance_column (ps, ps_i)) break; has_conflicts = ps_has_conflicts (ps, c, c) || (ps->history > 0 && ps_has_conflicts (ps, c - ps->history, c + ps->history)); } if (has_conflicts) { remove_node_from_ps (ps, ps_i); return NULL; } ps->min_cycle = MIN (ps->min_cycle, c); ps->max_cycle = MAX (ps->max_cycle, c); return ps_i; } /* Rotate the rows of PS such that insns scheduled at time START_CYCLE will appear in row 0. Updates max/min_cycles. */ void rotate_partial_schedule (partial_schedule_ptr ps, int start_cycle) { int i, row, backward_rotates; int last_row = ps->ii - 1; if (start_cycle == 0) return; backward_rotates = SMODULO (start_cycle, ps->ii); /* Revisit later and optimize this into a single loop. */ for (i = 0; i < backward_rotates; i++) { ps_insn_ptr first_row = ps->rows[0]; for (row = 0; row < last_row; row++) ps->rows[row] = ps->rows[row+1]; ps->rows[last_row] = first_row; } ps->max_cycle -= start_cycle; ps->min_cycle -= start_cycle; } #endif /* INSN_SCHEDULING*/ /* Expand the basic unary and binary arithmetic operations, for GNU compiler. Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Include insn-config.h before expr.h so that HAVE_conditional_move is properly defined. */ /* Each optab contains info on how this target machine can perform a particular operation for all sizes and kinds of operands. The operation to be performed is often specified by passing one of these optabs as an argument. See expr.h for documentation of these optabs. */ optab optab_table[OTI_MAX]; rtx libfunc_table[LTI_MAX]; /* Tables of patterns for converting one mode to another. */ convert_optab convert_optab_table[CONVERT_OPTAB_MAX]; /* Contains the optab used for each rtx code. */ optab code_to_optab[NUM_RTX_CODE + 1]; /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...) gives the gen_function to make a branch to test that condition. */ rtxfun bcc_gen_fctn[NUM_RTX_CODE]; /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...) gives the insn code to make a store-condition insn to test that condition. */ enum insn_code setcc_gen_code[NUM_RTX_CODE]; #ifdef HAVE_conditional_move /* Indexed by the machine mode, gives the insn code to make a conditional move insn. This is not indexed by the rtx-code like bcc_gen_fctn and setcc_gen_code to cut down on the number of named patterns. Consider a day when a lot more rtx codes are conditional (eg: for the ARM). */ enum insn_code movcc_gen_code[NUM_MACHINE_MODES]; #endif /* The insn generating function can not take an rtx_code argument. TRAP_RTX is used as an rtx argument. Its code is replaced with the code to be used in the trap insn and all other fields are ignored. */ static GTY(()) rtx trap_rtx; static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx); static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int, int); static int expand_cmplxdiv_straight (rtx, rtx, rtx, rtx, rtx, rtx, enum machine_mode, int, enum optab_methods, enum mode_class, optab); static int expand_cmplxdiv_wide (rtx, rtx, rtx, rtx, rtx, rtx, enum machine_mode, int, enum optab_methods, enum mode_class, optab); static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx, enum machine_mode *, int *, enum can_compare_purpose); static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int, int *); static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int); static optab new_optab (void); static convert_optab new_convert_optab (void); static inline optab init_optab (enum rtx_code); static inline optab init_optabv (enum rtx_code); static inline convert_optab init_convert_optab (enum rtx_code); static void init_libfuncs (optab, int, int, const char *, int); static void init_integral_libfuncs (optab, const char *, int); static void init_floating_libfuncs (optab, const char *, int); static void init_interclass_conv_libfuncs (convert_optab, const char *, enum mode_class, enum mode_class); static void init_intraclass_conv_libfuncs (convert_optab, const char *, enum mode_class, bool); static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode, enum rtx_code, int, rtx); static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *, enum machine_mode *, int *); static rtx expand_vector_binop (enum machine_mode, optab, rtx, rtx, rtx, int, enum optab_methods); static rtx expand_vector_unop (enum machine_mode, optab, rtx, rtx, int); static rtx widen_clz (enum machine_mode, rtx, rtx); static rtx expand_parity (enum machine_mode, rtx, rtx); #ifndef HAVE_conditional_trap #define HAVE_conditional_trap 0 #define gen_conditional_trap(a,b) (abort (), NULL_RTX) #endif /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to the result of operation CODE applied to OP0 (and OP1 if it is a binary operation). If the last insn does not set TARGET, don't do anything, but return 1. If a previous insn sets TARGET and TARGET is one of OP0 or OP1, don't add the REG_EQUAL note but return 0. Our caller can then try again, ensuring that TARGET is not one of the operands. */ static int add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1) { rtx last_insn, insn, set; rtx note; if (! insns || ! INSN_P (insns) || NEXT_INSN (insns) == NULL_RTX) abort (); if (GET_RTX_CLASS (code) != RTX_COMM_ARITH && GET_RTX_CLASS (code) != RTX_BIN_ARITH && GET_RTX_CLASS (code) != RTX_COMM_COMPARE && GET_RTX_CLASS (code) != RTX_COMPARE && GET_RTX_CLASS (code) != RTX_UNARY) return 1; if (GET_CODE (target) == ZERO_EXTRACT) return 1; for (last_insn = insns; NEXT_INSN (last_insn) != NULL_RTX; last_insn = NEXT_INSN (last_insn)) ; set = single_set (last_insn); if (set == NULL_RTX) return 1; if (! rtx_equal_p (SET_DEST (set), target) /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */ && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target))) return 1; /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET besides the last insn. */ if (reg_overlap_mentioned_p (target, op0) || (op1 && reg_overlap_mentioned_p (target, op1))) { insn = PREV_INSN (last_insn); while (insn != NULL_RTX) { if (reg_set_p (target, insn)) return 0; insn = PREV_INSN (insn); } } if (GET_RTX_CLASS (code) == RTX_UNARY) note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0)); else note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1)); set_unique_reg_note (last_insn, REG_EQUAL, note); return 1; } /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need not actually do a sign-extend or zero-extend, but can leave the higher-order bits of the result rtx undefined, for example, in the case of logical operations, but not right shifts. */ static rtx widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode, int unsignedp, int no_extend) { rtx result; /* If we don't have to extend and this is a constant, return it. */ if (no_extend && GET_MODE (op) == VOIDmode) return op; /* If we must extend do so. If OP is a SUBREG for a promoted object, also extend since it will be more efficient to do so unless the signedness of a promoted object differs from our extension. */ if (! no_extend || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op) && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp)) return convert_modes (mode, oldmode, op, unsignedp); /* If MODE is no wider than a single word, we return a paradoxical SUBREG. */ if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD) return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0); /* Otherwise, get an object of MODE, clobber it, and set the low-order part to OP. */ result = gen_reg_rtx (mode); emit_insn (gen_rtx_CLOBBER (VOIDmode, result)); emit_move_insn (gen_lowpart (GET_MODE (op), result), op); return result; } /* Generate code to perform a straightforward complex divide. */ static int expand_cmplxdiv_straight (rtx real0, rtx real1, rtx imag0, rtx imag1, rtx realr, rtx imagr, enum machine_mode submode, int unsignedp, enum optab_methods methods, enum mode_class class, optab binoptab) { rtx divisor; rtx real_t, imag_t; rtx temp1, temp2; rtx res; optab this_add_optab = add_optab; optab this_sub_optab = sub_optab; optab this_neg_optab = neg_optab; optab this_mul_optab = smul_optab; if (binoptab == sdivv_optab) { this_add_optab = addv_optab; this_sub_optab = subv_optab; this_neg_optab = negv_optab; this_mul_optab = smulv_optab; } /* Don't fetch these from memory more than once. */ real0 = force_reg (submode, real0); real1 = force_reg (submode, real1); if (imag0 != 0) imag0 = force_reg (submode, imag0); imag1 = force_reg (submode, imag1); /* Divisor: c*c + d*d. */ temp1 = expand_binop (submode, this_mul_optab, real1, real1, NULL_RTX, unsignedp, methods); temp2 = expand_binop (submode, this_mul_optab, imag1, imag1, NULL_RTX, unsignedp, methods); if (temp1 == 0 || temp2 == 0) return 0; divisor = expand_binop (submode, this_add_optab, temp1, temp2, NULL_RTX, unsignedp, methods); if (divisor == 0) return 0; if (imag0 == 0) { /* Mathematically, ((a)(c-id))/divisor. */ /* Computationally, (a+i0) / (c+id) = (ac/(cc+dd)) + i(-ad/(cc+dd)). */ /* Calculate the dividend. */ real_t = expand_binop (submode, this_mul_optab, real0, real1, NULL_RTX, unsignedp, methods); imag_t = expand_binop (submode, this_mul_optab, real0, imag1, NULL_RTX, unsignedp, methods); if (real_t == 0 || imag_t == 0) return 0; imag_t = expand_unop (submode, this_neg_optab, imag_t, NULL_RTX, unsignedp); } else { /* Mathematically, ((a+ib)(c-id))/divider. */ /* Calculate the dividend. */ temp1 = expand_binop (submode, this_mul_optab, real0, real1, NULL_RTX, unsignedp, methods); temp2 = expand_binop (submode, this_mul_optab, imag0, imag1, NULL_RTX, unsignedp, methods); if (temp1 == 0 || temp2 == 0) return 0; real_t = expand_binop (submode, this_add_optab, temp1, temp2, NULL_RTX, unsignedp, methods); temp1 = expand_binop (submode, this_mul_optab, imag0, real1, NULL_RTX, unsignedp, methods); temp2 = expand_binop (submode, this_mul_optab, real0, imag1, NULL_RTX, unsignedp, methods); if (temp1 == 0 || temp2 == 0) return 0; imag_t = expand_binop (submode, this_sub_optab, temp1, temp2, NULL_RTX, unsignedp, methods); if (real_t == 0 || imag_t == 0) return 0; } if (class == MODE_COMPLEX_FLOAT) res = expand_binop (submode, binoptab, real_t, divisor, realr, unsignedp, methods); else res = expand_divmod (0, TRUNC_DIV_EXPR, submode, real_t, divisor, realr, unsignedp); if (res == 0) return 0; if (res != realr) emit_move_insn (realr, res); if (class == MODE_COMPLEX_FLOAT) res = expand_binop (submode, binoptab, imag_t, divisor, imagr, unsignedp, methods); else res = expand_divmod (0, TRUNC_DIV_EXPR, submode, imag_t, divisor, imagr, unsignedp); if (res == 0) return 0; if (res != imagr) emit_move_insn (imagr, res); return 1; } /* Generate code to perform a wide-input-range-acceptable complex divide. */ static int expand_cmplxdiv_wide (rtx real0, rtx real1, rtx imag0, rtx imag1, rtx realr, rtx imagr, enum machine_mode submode, int unsignedp, enum optab_methods methods, enum mode_class class, optab binoptab) { rtx ratio, divisor; rtx real_t, imag_t; rtx temp1, temp2, lab1, lab2; enum machine_mode mode; rtx res; optab this_add_optab = add_optab; optab this_sub_optab = sub_optab; optab this_neg_optab = neg_optab; optab this_mul_optab = smul_optab; if (binoptab == sdivv_optab) { this_add_optab = addv_optab; this_sub_optab = subv_optab; this_neg_optab = negv_optab; this_mul_optab = smulv_optab; } /* Don't fetch these from memory more than once. */ real0 = force_reg (submode, real0); real1 = force_reg (submode, real1); if (imag0 != 0) imag0 = force_reg (submode, imag0); imag1 = force_reg (submode, imag1); /* XXX What's an "unsigned" complex number? */ if (unsignedp) { temp1 = real1; temp2 = imag1; } else { temp1 = expand_abs (submode, real1, NULL_RTX, unsignedp, 1); temp2 = expand_abs (submode, imag1, NULL_RTX, unsignedp, 1); } if (temp1 == 0 || temp2 == 0) return 0; mode = GET_MODE (temp1); lab1 = gen_label_rtx (); emit_cmp_and_jump_insns (temp1, temp2, LT, NULL_RTX, mode, unsignedp, lab1); /* |c| >= |d|; use ratio d/c to scale dividend and divisor. */ if (class == MODE_COMPLEX_FLOAT) ratio = expand_binop (submode, binoptab, imag1, real1, NULL_RTX, unsignedp, methods); else ratio = expand_divmod (0, TRUNC_DIV_EXPR, submode, imag1, real1, NULL_RTX, unsignedp); if (ratio == 0) return 0; /* Calculate divisor. */ temp1 = expand_binop (submode, this_mul_optab, imag1, ratio, NULL_RTX, unsignedp, methods); if (temp1 == 0) return 0; divisor = expand_binop (submode, this_add_optab, temp1, real1, NULL_RTX, unsignedp, methods); if (divisor == 0) return 0; /* Calculate dividend. */ if (imag0 == 0) { real_t = real0; /* Compute a / (c+id) as a / (c+d(d/c)) + i (-a(d/c)) / (c+d(d/c)). */ imag_t = expand_binop (submode, this_mul_optab, real0, ratio, NULL_RTX, unsignedp, methods); if (imag_t == 0) return 0; imag_t = expand_unop (submode, this_neg_optab, imag_t, NULL_RTX, unsignedp); if (real_t == 0 || imag_t == 0) return 0; } else { /* Compute (a+ib)/(c+id) as (a+b(d/c))/(c+d(d/c) + i(b-a(d/c))/(c+d(d/c)). */ temp1 = expand_binop (submode, this_mul_optab, imag0, ratio, NULL_RTX, unsignedp, methods); if (temp1 == 0) return 0; real_t = expand_binop (submode, this_add_optab, temp1, real0, NULL_RTX, unsignedp, methods); temp1 = expand_binop (submode, this_mul_optab, real0, ratio, NULL_RTX, unsignedp, methods); if (temp1 == 0) return 0; imag_t = expand_binop (submode, this_sub_optab, imag0, temp1, NULL_RTX, unsignedp, methods); if (real_t == 0 || imag_t == 0) return 0; } if (class == MODE_COMPLEX_FLOAT) res = expand_binop (submode, binoptab, real_t, divisor, realr, unsignedp, methods); else res = expand_divmod (0, TRUNC_DIV_EXPR, submode, real_t, divisor, realr, unsignedp); if (res == 0) return 0; if (res != realr) emit_move_insn (realr, res); if (class == MODE_COMPLEX_FLOAT) res = expand_binop (submode, binoptab, imag_t, divisor, imagr, unsignedp, methods); else res = expand_divmod (0, TRUNC_DIV_EXPR, submode, imag_t, divisor, imagr, unsignedp); if (res == 0) return 0; if (res != imagr) emit_move_insn (imagr, res); lab2 = gen_label_rtx (); emit_jump_insn (gen_jump (lab2)); emit_barrier (); emit_label (lab1); /* |d| > |c|; use ratio c/d to scale dividend and divisor. */ if (class == MODE_COMPLEX_FLOAT) ratio = expand_binop (submode, binoptab, real1, imag1, NULL_RTX, unsignedp, methods); else ratio = expand_divmod (0, TRUNC_DIV_EXPR, submode, real1, imag1, NULL_RTX, unsignedp); if (ratio == 0) return 0; /* Calculate divisor. */ temp1 = expand_binop (submode, this_mul_optab, real1, ratio, NULL_RTX, unsignedp, methods); if (temp1 == 0) return 0; divisor = expand_binop (submode, this_add_optab, temp1, imag1, NULL_RTX, unsignedp, methods); if (divisor == 0) return 0; /* Calculate dividend. */ if (imag0 == 0) { /* Compute a / (c+id) as a(c/d) / (c(c/d)+d) + i (-a) / (c(c/d)+d). */ real_t = expand_binop (submode, this_mul_optab, real0, ratio, NULL_RTX, unsignedp, methods); imag_t = expand_unop (submode, this_neg_optab, real0, NULL_RTX, unsignedp); if (real_t == 0 || imag_t == 0) return 0; } else { /* Compute (a+ib)/(c+id) as (a(c/d)+b)/(c(c/d)+d) + i (b(c/d)-a)/(c(c/d)+d). */ temp1 = expand_binop (submode, this_mul_optab, real0, ratio, NULL_RTX, unsignedp, methods); if (temp1 == 0) return 0; real_t = expand_binop (submode, this_add_optab, temp1, imag0, NULL_RTX, unsignedp, methods); temp1 = expand_binop (submode, this_mul_optab, imag0, ratio, NULL_RTX, unsignedp, methods); if (temp1 == 0) return 0; imag_t = expand_binop (submode, this_sub_optab, temp1, real0, NULL_RTX, unsignedp, methods); if (real_t == 0 || imag_t == 0) return 0; } if (class == MODE_COMPLEX_FLOAT) res = expand_binop (submode, binoptab, real_t, divisor, realr, unsignedp, methods); else res = expand_divmod (0, TRUNC_DIV_EXPR, submode, real_t, divisor, realr, unsignedp); if (res == 0) return 0; if (res != realr) emit_move_insn (realr, res); if (class == MODE_COMPLEX_FLOAT) res = expand_binop (submode, binoptab, imag_t, divisor, imagr, unsignedp, methods); else res = expand_divmod (0, TRUNC_DIV_EXPR, submode, imag_t, divisor, imagr, unsignedp); if (res == 0) return 0; if (res != imagr) emit_move_insn (imagr, res); emit_label (lab2); return 1; } /* Wrapper around expand_binop which takes an rtx code to specify the operation to perform, not an optab pointer. All other arguments are the same. */ rtx expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0, rtx op1, rtx target, int unsignedp, enum optab_methods methods) { optab binop = code_to_optab[(int) code]; if (binop == 0) abort (); return expand_binop (mode, binop, op0, op1, target, unsignedp, methods); } /* Generate code to perform an operation specified by BINOPTAB on operands OP0 and OP1, with result having machine-mode MODE. UNSIGNEDP is for the case where we have to widen the operands to perform the operation. It says to use zero-extension. If TARGET is nonzero, the value is generated there, if it is convenient to do so. In all cases an rtx is returned for the locus of the value; this may or may not be TARGET. */ rtx expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1, rtx target, int unsignedp, enum optab_methods methods) { enum optab_methods next_methods = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN ? OPTAB_WIDEN : methods); enum mode_class class; enum machine_mode wider_mode; rtx temp; int commutative_op = 0; int shift_op = (binoptab->code == ASHIFT || binoptab->code == ASHIFTRT || binoptab->code == LSHIFTRT || binoptab->code == ROTATE || binoptab->code == ROTATERT); rtx entry_last = get_last_insn (); rtx last; class = GET_MODE_CLASS (mode); op0 = protect_from_queue (op0, 0); op1 = protect_from_queue (op1, 0); if (target) target = protect_from_queue (target, 1); if (flag_force_mem) { /* Load duplicate non-volatile operands once. */ if (rtx_equal_p (op0, op1) && ! volatile_refs_p (op0)) { op0 = force_not_mem (op0); op1 = op0; } else { op0 = force_not_mem (op0); op1 = force_not_mem (op1); } } /* If subtracting an integer constant, convert this into an addition of the negated constant. */ if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT) { op1 = negate_rtx (mode, op1); binoptab = add_optab; } /* If we are inside an appropriately-short loop and one operand is an expensive constant, force it into a register. */ if (CONSTANT_P (op0) && preserve_subexpressions_p () && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1)) op0 = force_reg (mode, op0); if (CONSTANT_P (op1) && preserve_subexpressions_p () && ! shift_op && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1)) op1 = force_reg (mode, op1); /* Record where to delete back to if we backtrack. */ last = get_last_insn (); /* If operation is commutative, try to make the first operand a register. Even better, try to make it the same as the target. Also try to make the last operand a constant. */ if (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH || binoptab == smul_widen_optab || binoptab == umul_widen_optab || binoptab == smul_highpart_optab || binoptab == umul_highpart_optab) { commutative_op = 1; if (((target == 0 || REG_P (target)) ? ((REG_P (op1) && !REG_P (op0)) || target == op1) : rtx_equal_p (op1, target)) || GET_CODE (op0) == CONST_INT) { temp = op1; op1 = op0; op0 = temp; } } /* If we can do it with a three-operand insn, do so. */ if (methods != OPTAB_MUST_WIDEN && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing) { int icode = (int) binoptab->handlers[(int) mode].insn_code; enum machine_mode mode0 = insn_data[icode].operand[1].mode; enum machine_mode mode1 = insn_data[icode].operand[2].mode; rtx pat; rtx xop0 = op0, xop1 = op1; if (target) temp = target; else temp = gen_reg_rtx (mode); /* If it is a commutative operator and the modes would match if we would swap the operands, we can save the conversions. */ if (commutative_op) { if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1 && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0) { rtx tmp; tmp = op0; op0 = op1; op1 = tmp; tmp = xop0; xop0 = xop1; xop1 = tmp; } } /* In case the insn wants input operands in modes different from those of the actual operands, convert the operands. It would seem that we don't need to convert CONST_INTs, but we do, so that they're properly zero-extended, sign-extended or truncated for their mode. */ if (GET_MODE (op0) != mode0 && mode0 != VOIDmode) xop0 = convert_modes (mode0, GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : mode, xop0, unsignedp); if (GET_MODE (op1) != mode1 && mode1 != VOIDmode) xop1 = convert_modes (mode1, GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : mode, xop1, unsignedp); /* Now, if insn's predicates don't allow our operands, put them into pseudo regs. */ if (! (*insn_data[icode].operand[1].predicate) (xop0, mode0) && mode0 != VOIDmode) xop0 = copy_to_mode_reg (mode0, xop0); if (! (*insn_data[icode].operand[2].predicate) (xop1, mode1) && mode1 != VOIDmode) xop1 = copy_to_mode_reg (mode1, xop1); if (! (*insn_data[icode].operand[0].predicate) (temp, mode)) temp = gen_reg_rtx (mode); pat = GEN_FCN (icode) (temp, xop0, xop1); if (pat) { /* If PAT is composed of more than one insn, try to add an appropriate REG_EQUAL note to it. If we can't because TEMP conflicts with an operand, call ourselves again, this time without a target. */ if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1)) { delete_insns_since (last); return expand_binop (mode, binoptab, op0, op1, NULL_RTX, unsignedp, methods); } emit_insn (pat); return temp; } else delete_insns_since (last); } /* If this is a multiply, see if we can do a widening operation that takes operands of this mode and makes a wider mode. */ if (binoptab == smul_optab && GET_MODE_WIDER_MODE (mode) != VOIDmode && (((unsignedp ? umul_widen_optab : smul_widen_optab) ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code) != CODE_FOR_nothing)) { temp = expand_binop (GET_MODE_WIDER_MODE (mode), unsignedp ? umul_widen_optab : smul_widen_optab, op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT); if (temp != 0) { if (GET_MODE_CLASS (mode) == MODE_INT) return gen_lowpart (mode, temp); else return convert_to_mode (mode, temp, unsignedp); } } /* Look for a wider mode of the same class for which we think we can open-code the operation. Check for a widening multiply at the wider mode as well. */ if ((class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT) && methods != OPTAB_DIRECT && methods != OPTAB_LIB) for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode; wider_mode = GET_MODE_WIDER_MODE (wider_mode)) { if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing || (binoptab == smul_optab && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode && (((unsignedp ? umul_widen_optab : smul_widen_optab) ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code) != CODE_FOR_nothing))) { rtx xop0 = op0, xop1 = op1; int no_extend = 0; /* For certain integer operations, we need not actually extend the narrow operands, as long as we will truncate the results to the same narrowness. */ if ((binoptab == ior_optab || binoptab == and_optab || binoptab == xor_optab || binoptab == add_optab || binoptab == sub_optab || binoptab == smul_optab || binoptab == ashl_optab) && class == MODE_INT) no_extend = 1; xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend); /* The second operand of a shift must always be extended. */ xop1 = widen_operand (xop1, wider_mode, mode, unsignedp, no_extend && binoptab != ashl_optab); temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX, unsignedp, OPTAB_DIRECT); if (temp) { if (class != MODE_INT) { if (target == 0) target = gen_reg_rtx (mode); convert_move (target, temp, 0); return target; } else return gen_lowpart (mode, temp); } else delete_insns_since (last); } } /* These can be done a word at a time. */ if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab) && class == MODE_INT && GET_MODE_SIZE (mode) > UNITS_PER_WORD && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing) { int i; rtx insns; rtx equiv_value; /* If TARGET is the same as one of the operands, the REG_EQUAL note won't be accurate, so use a new target. */ if (target == 0 || target == op0 || target == op1) target = gen_reg_rtx (mode); start_sequence (); /* Do the actual arithmetic. */ for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++) { rtx target_piece = operand_subword (target, i, 1, mode); rtx x = expand_binop (word_mode, binoptab, operand_subword_force (op0, i, mode), operand_subword_force (op1, i, mode), target_piece, unsignedp, next_methods); if (x == 0) break; if (target_piece != x) emit_move_insn (target_piece, x); } insns = get_insns (); end_sequence (); if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD) { if (binoptab->code != UNKNOWN) equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, copy_rtx (op0), copy_rtx (op1)); else equiv_value = 0; emit_no_conflict_block (insns, target, op0, op1, equiv_value); return target; } } /* Synthesize double word shifts from single word shifts. */ if ((binoptab == lshr_optab || binoptab == ashl_optab || binoptab == ashr_optab) && class == MODE_INT && GET_CODE (op1) == CONST_INT && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing) { rtx insns, inter, equiv_value; rtx into_target, outof_target; rtx into_input, outof_input; int shift_count, left_shift, outof_word; /* If TARGET is the same as one of the operands, the REG_EQUAL note won't be accurate, so use a new target. */ if (target == 0 || target == op0 || target == op1) target = gen_reg_rtx (mode); start_sequence (); shift_count = INTVAL (op1); /* OUTOF_* is the word we are shifting bits away from, and INTO_* is the word that we are shifting bits towards, thus they differ depending on the direction of the shift and WORDS_BIG_ENDIAN. */ left_shift = binoptab == ashl_optab; outof_word = left_shift ^ ! WORDS_BIG_ENDIAN; outof_target = operand_subword (target, outof_word, 1, mode); into_target = operand_subword (target, 1 - outof_word, 1, mode); outof_input = operand_subword_force (op0, outof_word, mode); into_input = operand_subword_force (op0, 1 - outof_word, mode); if (shift_count >= BITS_PER_WORD) { inter = expand_binop (word_mode, binoptab, outof_input, GEN_INT (shift_count - BITS_PER_WORD), into_target, unsignedp, next_methods); if (inter != 0 && inter != into_target) emit_move_insn (into_target, inter); /* For a signed right shift, we must fill the word we are shifting out of with copies of the sign bit. Otherwise it is zeroed. */ if (inter != 0 && binoptab != ashr_optab) inter = CONST0_RTX (word_mode); else if (inter != 0) inter = expand_binop (word_mode, binoptab, outof_input, GEN_INT (BITS_PER_WORD - 1), outof_target, unsignedp, next_methods); if (inter != 0 && inter != outof_target) emit_move_insn (outof_target, inter); } else { rtx carries; optab reverse_unsigned_shift, unsigned_shift; /* For a shift of less then BITS_PER_WORD, to compute the carry, we must do a logical shift in the opposite direction of the desired shift. */ reverse_unsigned_shift = (left_shift ? lshr_optab : ashl_optab); /* For a shift of less than BITS_PER_WORD, to compute the word shifted towards, we need to unsigned shift the orig value of that word. */ unsigned_shift = (left_shift ? ashl_optab : lshr_optab); carries = expand_binop (word_mode, reverse_unsigned_shift, outof_input, GEN_INT (BITS_PER_WORD - shift_count), 0, unsignedp, next_methods); if (carries == 0) inter = 0; else inter = expand_binop (word_mode, unsigned_shift, into_input, op1, 0, unsignedp, next_methods); if (inter != 0) inter = expand_binop (word_mode, ior_optab, carries, inter, into_target, unsignedp, next_methods); if (inter != 0 && inter != into_target) emit_move_insn (into_target, inter); if (inter != 0) inter = expand_binop (word_mode, binoptab, outof_input, op1, outof_target, unsignedp, next_methods); if (inter != 0 && inter != outof_target) emit_move_insn (outof_target, inter); } insns = get_insns (); end_sequence (); if (inter != 0) { if (binoptab->code != UNKNOWN) equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1); else equiv_value = 0; emit_no_conflict_block (insns, target, op0, op1, equiv_value); return target; } } /* Synthesize double word rotates from single word shifts. */ if ((binoptab == rotl_optab || binoptab == rotr_optab) && class == MODE_INT && GET_CODE (op1) == CONST_INT && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing) { rtx insns, equiv_value; rtx into_target, outof_target; rtx into_input, outof_input; rtx inter; int shift_count, left_shift, outof_word; /* If TARGET is the same as one of the operands, the REG_EQUAL note won't be accurate, so use a new target. Do this also if target is not a REG, first because having a register instead may open optimization opportunities, and second because if target and op0 happen to be MEMs designating the same location, we would risk clobbering it too early in the code sequence we generate below. */ if (target == 0 || target == op0 || target == op1 || ! REG_P (target)) target = gen_reg_rtx (mode); start_sequence (); shift_count = INTVAL (op1); /* OUTOF_* is the word we are shifting bits away from, and INTO_* is the word that we are shifting bits towards, thus they differ depending on the direction of the shift and WORDS_BIG_ENDIAN. */ left_shift = (binoptab == rotl_optab); outof_word = left_shift ^ ! WORDS_BIG_ENDIAN; outof_target = operand_subword (target, outof_word, 1, mode); into_target = operand_subword (target, 1 - outof_word, 1, mode); outof_input = operand_subword_force (op0, outof_word, mode); into_input = operand_subword_force (op0, 1 - outof_word, mode); if (shift_count == BITS_PER_WORD) { /* This is just a word swap. */ emit_move_insn (outof_target, into_input); emit_move_insn (into_target, outof_input); inter = const0_rtx; } else { rtx into_temp1, into_temp2, outof_temp1, outof_temp2; rtx first_shift_count, second_shift_count; optab reverse_unsigned_shift, unsigned_shift; reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD) ? lshr_optab : ashl_optab); unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD) ? ashl_optab : lshr_optab); if (shift_count > BITS_PER_WORD) { first_shift_count = GEN_INT (shift_count - BITS_PER_WORD); second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count); } else { first_shift_count = GEN_INT (BITS_PER_WORD - shift_count); second_shift_count = GEN_INT (shift_count); } into_temp1 = expand_binop (word_mode, unsigned_shift, outof_input, first_shift_count, NULL_RTX, unsignedp, next_methods); into_temp2 = expand_binop (word_mode, reverse_unsigned_shift, into_input, second_shift_count, NULL_RTX, unsignedp, next_methods); if (into_temp1 != 0 && into_temp2 != 0) inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2, into_target, unsignedp, next_methods); else inter = 0; if (inter != 0 && inter != into_target) emit_move_insn (into_target, inter); outof_temp1 = expand_binop (word_mode, unsigned_shift, into_input, first_shift_count, NULL_RTX, unsignedp, next_methods); outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift, outof_input, second_shift_count, NULL_RTX, unsignedp, next_methods); if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0) inter = expand_binop (word_mode, ior_optab, outof_temp1, outof_temp2, outof_target, unsignedp, next_methods); if (inter != 0 && inter != outof_target) emit_move_insn (outof_target, inter); } insns = get_insns (); end_sequence (); if (inter != 0) { if (binoptab->code != UNKNOWN) equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1); else equiv_value = 0; /* We can't make this a no conflict block if this is a word swap, because the word swap case fails if the input and output values are in the same register. */ if (shift_count != BITS_PER_WORD) emit_no_conflict_block (insns, target, op0, op1, equiv_value); else emit_insn (insns); return target; } } /* These can be done a word at a time by propagating carries. */ if ((binoptab == add_optab || binoptab == sub_optab) && class == MODE_INT && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing) { unsigned int i; optab otheroptab = binoptab == add_optab ? sub_optab : add_optab; const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD; rtx carry_in = NULL_RTX, carry_out = NULL_RTX; rtx xop0, xop1, xtarget; /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG value is one of those, use it. Otherwise, use 1 since it is the one easiest to get. */ #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1 int normalizep = STORE_FLAG_VALUE; #else int normalizep = 1; #endif /* Prepare the operands. */ xop0 = force_reg (mode, op0); xop1 = force_reg (mode, op1); xtarget = gen_reg_rtx (mode); if (target == 0 || !REG_P (target)) target = xtarget; /* Indicate for flow that the entire target reg is being set. */ if (REG_P (target)) emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget)); /* Do the actual arithmetic. */ for (i = 0; i < nwords; i++) { int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i); rtx target_piece = operand_subword (xtarget, index, 1, mode); rtx op0_piece = operand_subword_force (xop0, index, mode); rtx op1_piece = operand_subword_force (xop1, index, mode); rtx x; /* Main add/subtract of the input operands. */ x = expand_binop (word_mode, binoptab, op0_piece, op1_piece, target_piece, unsignedp, next_methods); if (x == 0) break; if (i + 1 < nwords) { /* Store carry from main add/subtract. */ carry_out = gen_reg_rtx (word_mode); carry_out = emit_store_flag_force (carry_out, (binoptab == add_optab ? LT : GT), x, op0_piece, word_mode, 1, normalizep); } if (i > 0) { rtx newx; /* Add/subtract previous carry to main result. */ newx = expand_binop (word_mode, normalizep == 1 ? binoptab : otheroptab, x, carry_in, NULL_RTX, 1, next_methods); if (i + 1 < nwords) { /* Get out carry from adding/subtracting carry in. */ rtx carry_tmp = gen_reg_rtx (word_mode); carry_tmp = emit_store_flag_force (carry_tmp, (binoptab == add_optab ? LT : GT), newx, x, word_mode, 1, normalizep); /* Logical-ior the two poss. carry together. */ carry_out = expand_binop (word_mode, ior_optab, carry_out, carry_tmp, carry_out, 0, next_methods); if (carry_out == 0) break; } emit_move_insn (target_piece, newx); } carry_in = carry_out; } if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD) { if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing || ! rtx_equal_p (target, xtarget)) { rtx temp = emit_move_insn (target, xtarget); set_unique_reg_note (temp, REG_EQUAL, gen_rtx_fmt_ee (binoptab->code, mode, copy_rtx (xop0), copy_rtx (xop1))); } else target = xtarget; return target; } else delete_insns_since (last); } /* If we want to multiply two two-word values and have normal and widening multiplies of single-word values, we can do this with three smaller multiplications. Note that we do not make a REG_NO_CONFLICT block here because we are not operating on one word at a time. The multiplication proceeds as follows: _______________________ [__op0_high_|__op0_low__] _______________________ * [__op1_high_|__op1_low__] _______________________________________________ _______________________ (1) [__op0_low__*__op1_low__] _______________________ (2a) [__op0_low__*__op1_high_] _______________________ (2b) [__op0_high_*__op1_low__] _______________________ (3) [__op0_high_*__op1_high_] This gives a 4-word result. Since we are only interested in the lower 2 words, partial result (3) and the upper words of (2a) and (2b) don't need to be calculated. Hence (2a) and (2b) can be calculated using non-widening multiplication. (1), however, needs to be calculated with an unsigned widening multiplication. If this operation is not directly supported we try using a signed widening multiplication and adjust the result. This adjustment works as follows: If both operands are positive then no adjustment is needed. If the operands have different signs, for example op0_low < 0 and op1_low >= 0, the instruction treats the most significant bit of op0_low as a sign bit instead of a bit with significance 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low with 2**BITS_PER_WORD - op0_low, and two's complements the result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to the result. Similarly, if both operands are negative, we need to add (op0_low + op1_low) * 2**BITS_PER_WORD. We use a trick to adjust quickly. We logically shift op0_low right (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to op0_high (op1_high) before it is used to calculate 2b (2a). If no logical shift exists, we do an arithmetic right shift and subtract the 0 or -1. */ if (binoptab == smul_optab && class == MODE_INT && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing && ((umul_widen_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing) || (smul_widen_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing))) { int low = (WORDS_BIG_ENDIAN ? 1 : 0); int high = (WORDS_BIG_ENDIAN ? 0 : 1); rtx op0_high = operand_subword_force (op0, high, mode); rtx op0_low = operand_subword_force (op0, low, mode); rtx op1_high = operand_subword_force (op1, high, mode); rtx op1_low = operand_subword_force (op1, low, mode); rtx product = 0; rtx op0_xhigh = NULL_RTX; rtx op1_xhigh = NULL_RTX; /* If the target is the same as one of the inputs, don't use it. This prevents problems with the REG_EQUAL note. */ if (target == op0 || target == op1 || (target != 0 && !REG_P (target))) target = 0; /* Multiply the two lower words to get a double-word product. If unsigned widening multiplication is available, use that; otherwise use the signed form and compensate. */ if (umul_widen_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing) { product = expand_binop (mode, umul_widen_optab, op0_low, op1_low, target, 1, OPTAB_DIRECT); /* If we didn't succeed, delete everything we did so far. */ if (product == 0) delete_insns_since (last); else op0_xhigh = op0_high, op1_xhigh = op1_high; } if (product == 0 && smul_widen_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing) { rtx wordm1 = GEN_INT (BITS_PER_WORD - 1); product = expand_binop (mode, smul_widen_optab, op0_low, op1_low, target, 1, OPTAB_DIRECT); op0_xhigh = expand_binop (word_mode, lshr_optab, op0_low, wordm1, NULL_RTX, 1, next_methods); if (op0_xhigh) op0_xhigh = expand_binop (word_mode, add_optab, op0_high, op0_xhigh, op0_xhigh, 0, next_methods); else { op0_xhigh = expand_binop (word_mode, ashr_optab, op0_low, wordm1, NULL_RTX, 0, next_methods); if (op0_xhigh) op0_xhigh = expand_binop (word_mode, sub_optab, op0_high, op0_xhigh, op0_xhigh, 0, next_methods); } op1_xhigh = expand_binop (word_mode, lshr_optab, op1_low, wordm1, NULL_RTX, 1, next_methods); if (op1_xhigh) op1_xhigh = expand_binop (word_mode, add_optab, op1_high, op1_xhigh, op1_xhigh, 0, next_methods); else { op1_xhigh = expand_binop (word_mode, ashr_optab, op1_low, wordm1, NULL_RTX, 0, next_methods); if (op1_xhigh) op1_xhigh = expand_binop (word_mode, sub_optab, op1_high, op1_xhigh, op1_xhigh, 0, next_methods); } } /* If we have been able to directly compute the product of the low-order words of the operands and perform any required adjustments of the operands, we proceed by trying two more multiplications and then computing the appropriate sum. We have checked above that the required addition is provided. Full-word addition will normally always succeed, especially if it is provided at all, so we don't worry about its failure. The multiplication may well fail, however, so we do handle that. */ if (product && op0_xhigh && op1_xhigh) { rtx product_high = operand_subword (product, high, 1, mode); rtx temp = expand_binop (word_mode, binoptab, op0_low, op1_xhigh, NULL_RTX, 0, OPTAB_DIRECT); if (!REG_P (product_high)) product_high = force_reg (word_mode, product_high); if (temp != 0) temp = expand_binop (word_mode, add_optab, temp, product_high, product_high, 0, next_methods); if (temp != 0 && temp != product_high) emit_move_insn (product_high, temp); if (temp != 0) temp = expand_binop (word_mode, binoptab, op1_low, op0_xhigh, NULL_RTX, 0, OPTAB_DIRECT); if (temp != 0) temp = expand_binop (word_mode, add_optab, temp, product_high, product_high, 0, next_methods); if (temp != 0 && temp != product_high) emit_move_insn (product_high, temp); emit_move_insn (operand_subword (product, high, 1, mode), product_high); if (temp != 0) { if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing) { temp = emit_move_insn (product, product); set_unique_reg_note (temp, REG_EQUAL, gen_rtx_fmt_ee (MULT, mode, copy_rtx (op0), copy_rtx (op1))); } return product; } } /* If we get here, we couldn't do it for some reason even though we originally thought we could. Delete anything we've emitted in trying to do it. */ delete_insns_since (last); } /* Open-code the vector operations if we have no hardware support for them. */ if (class == MODE_VECTOR_INT || class == MODE_VECTOR_FLOAT) return expand_vector_binop (mode, binoptab, op0, op1, target, unsignedp, methods); /* We need to open-code the complex type operations: '+, -, * and /' */ /* At this point we allow operations between two similar complex numbers, and also if one of the operands is not a complex number but rather of MODE_FLOAT or MODE_INT. However, the caller must make sure that the MODE of the non-complex operand matches the SUBMODE of the complex operand. */ if (class == MODE_COMPLEX_FLOAT || class == MODE_COMPLEX_INT) { rtx real0 = 0, imag0 = 0; rtx real1 = 0, imag1 = 0; rtx realr, imagr, res; rtx seq, result; int ok = 0; /* Find the correct mode for the real and imaginary parts. */ enum machine_mode submode = GET_MODE_INNER (mode); if (submode == BLKmode) abort (); start_sequence (); if (GET_MODE (op0) == mode) { real0 = gen_realpart (submode, op0); imag0 = gen_imagpart (submode, op0); } else real0 = op0; if (GET_MODE (op1) == mode) { real1 = gen_realpart (submode, op1); imag1 = gen_imagpart (submode, op1); } else real1 = op1; if (real0 == 0 || real1 == 0 || ! (imag0 != 0 || imag1 != 0)) abort (); result = gen_reg_rtx (mode); realr = gen_realpart (submode, result); imagr = gen_imagpart (submode, result); switch (binoptab->code) { case PLUS: /* (a+ib) + (c+id) = (a+c) + i(b+d) */ case MINUS: /* (a+ib) - (c+id) = (a-c) + i(b-d) */ res = expand_binop (submode, binoptab, real0, real1, realr, unsignedp, methods); if (res == 0) break; else if (res != realr) emit_move_insn (realr, res); if (imag0 != 0 && imag1 != 0) res = expand_binop (submode, binoptab, imag0, imag1, imagr, unsignedp, methods); else if (imag0 != 0) res = imag0; else if (binoptab->code == MINUS) res = expand_unop (submode, binoptab == subv_optab ? negv_optab : neg_optab, imag1, imagr, unsignedp); else res = imag1; if (res == 0) break; else if (res != imagr) emit_move_insn (imagr, res); ok = 1; break; case MULT: /* (a+ib) * (c+id) = (ac-bd) + i(ad+cb) */ if (imag0 != 0 && imag1 != 0) { rtx temp1, temp2; /* Don't fetch these from memory more than once. */ real0 = force_reg (submode, real0); real1 = force_reg (submode, real1); imag0 = force_reg (submode, imag0); imag1 = force_reg (submode, imag1); temp1 = expand_binop (submode, binoptab, real0, real1, NULL_RTX, unsignedp, methods); temp2 = expand_binop (submode, binoptab, imag0, imag1, NULL_RTX, unsignedp, methods); if (temp1 == 0 || temp2 == 0) break; res = (expand_binop (submode, binoptab == smulv_optab ? subv_optab : sub_optab, temp1, temp2, realr, unsignedp, methods)); if (res == 0) break; else if (res != realr) emit_move_insn (realr, res); temp1 = expand_binop (submode, binoptab, real0, imag1, NULL_RTX, unsignedp, methods); /* Avoid expanding redundant multiplication for the common case of squaring a complex number. */ if (rtx_equal_p (real0, real1) && rtx_equal_p (imag0, imag1)) temp2 = temp1; else temp2 = expand_binop (submode, binoptab, real1, imag0, NULL_RTX, unsignedp, methods); if (temp1 == 0 || temp2 == 0) break; res = (expand_binop (submode, binoptab == smulv_optab ? addv_optab : add_optab, temp1, temp2, imagr, unsignedp, methods)); if (res == 0) break; else if (res != imagr) emit_move_insn (imagr, res); ok = 1; } else { /* Don't fetch these from memory more than once. */ real0 = force_reg (submode, real0); real1 = force_reg (submode, real1); res = expand_binop (submode, binoptab, real0, real1, realr, unsignedp, methods); if (res == 0) break; else if (res != realr) emit_move_insn (realr, res); if (imag0 != 0) res = expand_binop (submode, binoptab, real1, imag0, imagr, unsignedp, methods); else res = expand_binop (submode, binoptab, real0, imag1, imagr, unsignedp, methods); if (res == 0) break; else if (res != imagr) emit_move_insn (imagr, res); ok = 1; } break; case DIV: /* (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd)) */ if (imag1 == 0) { /* (a+ib) / (c+i0) = (a/c) + i(b/c) */ /* Don't fetch these from memory more than once. */ real1 = force_reg (submode, real1); /* Simply divide the real and imaginary parts by `c' */ if (class == MODE_COMPLEX_FLOAT) res = expand_binop (submode, binoptab, real0, real1, realr, unsignedp, methods); else res = expand_divmod (0, TRUNC_DIV_EXPR, submode, real0, real1, realr, unsignedp); if (res == 0) break; else if (res != realr) emit_move_insn (realr, res); if (class == MODE_COMPLEX_FLOAT) res = expand_binop (submode, binoptab, imag0, real1, imagr, unsignedp, methods); else res = expand_divmod (0, TRUNC_DIV_EXPR, submode, imag0, real1, imagr, unsignedp); if (res == 0) break; else if (res != imagr) emit_move_insn (imagr, res); ok = 1; } else { switch (flag_complex_divide_method) { case 0: ok = expand_cmplxdiv_straight (real0, real1, imag0, imag1, realr, imagr, submode, unsignedp, methods, class, binoptab); break; case 1: ok = expand_cmplxdiv_wide (real0, real1, imag0, imag1, realr, imagr, submode, unsignedp, methods, class, binoptab); break; default: abort (); } } break; default: abort (); } seq = get_insns (); end_sequence (); if (ok) { rtx equiv = gen_rtx_fmt_ee (binoptab->code, mode, copy_rtx (op0), copy_rtx (op1)); emit_no_conflict_block (seq, result, op0, op1, equiv); return result; } } /* It can't be open-coded in this mode. Use a library call if one is available and caller says that's ok. */ if (binoptab->handlers[(int) mode].libfunc && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN)) { rtx insns; rtx op1x = op1; enum machine_mode op1_mode = mode; rtx value; start_sequence (); if (shift_op) { op1_mode = word_mode; /* Specify unsigned here, since negative shift counts are meaningless. */ op1x = convert_to_mode (word_mode, op1, 1); } if (GET_MODE (op0) != VOIDmode && GET_MODE (op0) != mode) op0 = convert_to_mode (mode, op0, unsignedp); /* Pass 1 for NO_QUEUE so we don't lose any increments if the libcall is cse'd or moved. */ value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc, NULL_RTX, LCT_CONST, mode, 2, op0, mode, op1x, op1_mode); insns = get_insns (); end_sequence (); target = gen_reg_rtx (mode); emit_libcall_block (insns, target, value, gen_rtx_fmt_ee (binoptab->code, mode, op0, op1)); return target; } delete_insns_since (last); /* It can't be done in this mode. Can we do it in a wider mode? */ if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN || methods == OPTAB_MUST_WIDEN)) { /* Caller says, don't even try. */ delete_insns_since (entry_last); return 0; } /* Compute the value of METHODS to pass to recursive calls. Don't allow widening to be tried recursively. */ methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT); /* Look for a wider mode of the same class for which it appears we can do the operation. */ if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT) { for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode; wider_mode = GET_MODE_WIDER_MODE (wider_mode)) { if ((binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing) || (methods == OPTAB_LIB && binoptab->handlers[(int) wider_mode].libfunc)) { rtx xop0 = op0, xop1 = op1; int no_extend = 0; /* For certain integer operations, we need not actually extend the narrow operands, as long as we will truncate the results to the same narrowness. */ if ((binoptab == ior_optab || binoptab == and_optab || binoptab == xor_optab || binoptab == add_optab || binoptab == sub_optab || binoptab == smul_optab || binoptab == ashl_optab) && class == MODE_INT) no_extend = 1; xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend); /* The second operand of a shift must always be extended. */ xop1 = widen_operand (xop1, wider_mode, mode, unsignedp, no_extend && binoptab != ashl_optab); temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX, unsignedp, methods); if (temp) { if (class != MODE_INT) { if (target == 0) target = gen_reg_rtx (mode); convert_move (target, temp, 0); return target; } else return gen_lowpart (mode, temp); } else delete_insns_since (last); } } } delete_insns_since (entry_last); return 0; } /* Like expand_binop, but for open-coding vectors binops. */ static rtx expand_vector_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1, rtx target, int unsignedp, enum optab_methods methods) { enum machine_mode submode, tmode; int size, elts, subsize, subbitsize, i; rtx t, a, b, res, seq; enum mode_class class; class = GET_MODE_CLASS (mode); size = GET_MODE_SIZE (mode); submode = GET_MODE_INNER (mode); /* Search for the widest vector mode with the same inner mode that is still narrower than MODE and that allows to open-code this operator. Note, if we find such a mode and the handler later decides it can't do the expansion, we'll be called recursively with the narrower mode. */ for (tmode = GET_CLASS_NARROWEST_MODE (class); GET_MODE_SIZE (tmode) < GET_MODE_SIZE (mode); tmode = GET_MODE_WIDER_MODE (tmode)) { if (GET_MODE_INNER (tmode) == GET_MODE_INNER (mode) && binoptab->handlers[(int) tmode].insn_code != CODE_FOR_nothing) submode = tmode; } switch (binoptab->code) { case AND: case IOR: case XOR: tmode = int_mode_for_mode (mode); if (tmode != BLKmode) submode = tmode; case PLUS: case MINUS: case MULT: case DIV: subsize = GET_MODE_SIZE (submode); subbitsize = GET_MODE_BITSIZE (submode); elts = size / subsize; /* If METHODS is OPTAB_DIRECT, we don't insist on the exact mode, but that we operate on more than one element at a time. */ if (subsize == GET_MODE_UNIT_SIZE (mode) && methods == OPTAB_DIRECT) return 0; start_sequence (); /* Errors can leave us with a const0_rtx as operand. */ if (GET_MODE (op0) != mode) op0 = copy_to_mode_reg (mode, op0); if (GET_MODE (op1) != mode) op1 = copy_to_mode_reg (mode, op1); if (!target) target = gen_reg_rtx (mode); for (i = 0; i < elts; ++i) { /* If this is part of a register, and not the first item in the word, we can't store using a SUBREG - that would clobber previous results. And storing with a SUBREG is only possible for the least significant part, hence we can't do it for big endian (unless we want to permute the evaluation order. */ if (REG_P (target) && (BYTES_BIG_ENDIAN ? subsize < UNITS_PER_WORD : ((i * subsize) % UNITS_PER_WORD) != 0)) t = NULL_RTX; else t = simplify_gen_subreg (submode, target, mode, i * subsize); if (CONSTANT_P (op0)) a = simplify_gen_subreg (submode, op0, mode, i * subsize); else a = extract_bit_field (op0, subbitsize, i * subbitsize, unsignedp, NULL_RTX, submode, submode, size); if (CONSTANT_P (op1)) b = simplify_gen_subreg (submode, op1, mode, i * subsize); else b = extract_bit_field (op1, subbitsize, i * subbitsize, unsignedp, NULL_RTX, submode, submode, size); if (binoptab->code == DIV) { if (class == MODE_VECTOR_FLOAT) res = expand_binop (submode, binoptab, a, b, t, unsignedp, methods); else res = expand_divmod (0, TRUNC_DIV_EXPR, submode, a, b, t, unsignedp); } else res = expand_binop (submode, binoptab, a, b, t, unsignedp, methods); if (res == 0) break; if (t) emit_move_insn (t, res); else store_bit_field (target, subbitsize, i * subbitsize, submode, res, size); } break; default: abort (); } seq = get_insns (); end_sequence (); emit_insn (seq); return target; } /* Like expand_unop but for open-coding vector unops. */ static rtx expand_vector_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target, int unsignedp) { enum machine_mode submode, tmode; int size, elts, subsize, subbitsize, i; rtx t, a, res, seq; size = GET_MODE_SIZE (mode); submode = GET_MODE_INNER (mode); /* Search for the widest vector mode with the same inner mode that is still narrower than MODE and that allows to open-code this operator. Note, if we find such a mode and the handler later decides it can't do the expansion, we'll be called recursively with the narrower mode. */ for (tmode = GET_CLASS_NARROWEST_MODE (GET_MODE_CLASS (mode)); GET_MODE_SIZE (tmode) < GET_MODE_SIZE (mode); tmode = GET_MODE_WIDER_MODE (tmode)) { if (GET_MODE_INNER (tmode) == GET_MODE_INNER (mode) && unoptab->handlers[(int) tmode].insn_code != CODE_FOR_nothing) submode = tmode; } /* If there is no negate operation, try doing a subtract from zero. */ if (unoptab == neg_optab && GET_MODE_CLASS (submode) == MODE_INT /* Avoid infinite recursion when an error has left us with the wrong mode. */ && GET_MODE (op0) == mode) { rtx temp; temp = expand_binop (mode, sub_optab, CONST0_RTX (mode), op0, target, unsignedp, OPTAB_DIRECT); if (temp) return temp; } if (unoptab == one_cmpl_optab) { tmode = int_mode_for_mode (mode); if (tmode != BLKmode) submode = tmode; } subsize = GET_MODE_SIZE (submode); subbitsize = GET_MODE_BITSIZE (submode); elts = size / subsize; /* Errors can leave us with a const0_rtx as operand. */ if (GET_MODE (op0) != mode) op0 = copy_to_mode_reg (mode, op0); if (!target) target = gen_reg_rtx (mode); start_sequence (); for (i = 0; i < elts; ++i) { /* If this is part of a register, and not the first item in the word, we can't store using a SUBREG - that would clobber previous results. And storing with a SUBREG is only possible for the least significant part, hence we can't do it for big endian (unless we want to permute the evaluation order. */ if (REG_P (target) && (BYTES_BIG_ENDIAN ? subsize < UNITS_PER_WORD : ((i * subsize) % UNITS_PER_WORD) != 0)) t = NULL_RTX; else t = simplify_gen_subreg (submode, target, mode, i * subsize); if (CONSTANT_P (op0)) a = simplify_gen_subreg (submode, op0, mode, i * subsize); else a = extract_bit_field (op0, subbitsize, i * subbitsize, unsignedp, t, submode, submode, size); res = expand_unop (submode, unoptab, a, t, unsignedp); if (t) emit_move_insn (t, res); else store_bit_field (target, subbitsize, i * subbitsize, submode, res, size); } seq = get_insns (); end_sequence (); emit_insn (seq); return target; } /* Expand a binary operator which has both signed and unsigned forms. UOPTAB is the optab for unsigned operations, and SOPTAB is for signed operations. If we widen unsigned operands, we may use a signed wider operation instead of an unsigned wider operation, since the result would be the same. */ rtx sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab, rtx op0, rtx op1, rtx target, int unsignedp, enum optab_methods methods) { rtx temp; optab direct_optab = unsignedp ? uoptab : soptab; struct optab wide_soptab; /* Do it without widening, if possible. */ temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_DIRECT); if (temp || methods == OPTAB_DIRECT) return temp; /* Try widening to a signed int. Make a fake signed optab that hides any signed insn for direct use. */ wide_soptab = *soptab; wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing; wide_soptab.handlers[(int) mode].libfunc = 0; temp = expand_binop (mode, &wide_soptab, op0, op1, target, unsignedp, OPTAB_WIDEN); /* For unsigned operands, try widening to an unsigned int. */ if (temp == 0 && unsignedp) temp = expand_binop (mode, uoptab, op0, op1, target, unsignedp, OPTAB_WIDEN); if (temp || methods == OPTAB_WIDEN) return temp; /* Use the right width lib call if that exists. */ temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB); if (temp || methods == OPTAB_LIB) return temp; /* Must widen and use a lib call, use either signed or unsigned. */ temp = expand_binop (mode, &wide_soptab, op0, op1, target, unsignedp, methods); if (temp != 0) return temp; if (unsignedp) return expand_binop (mode, uoptab, op0, op1, target, unsignedp, methods); return 0; } /* Generate code to perform an operation specified by UNOPPTAB on operand OP0, with two results to TARG0 and TARG1. We assume that the order of the operands for the instruction is TARG0, TARG1, OP0. Either TARG0 or TARG1 may be zero, but what that means is that the result is not actually wanted. We will generate it into a dummy pseudo-reg and discard it. They may not both be zero. Returns 1 if this operation can be performed; 0 if not. */ int expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1, int unsignedp) { enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1); enum mode_class class; enum machine_mode wider_mode; rtx entry_last = get_last_insn (); rtx last; class = GET_MODE_CLASS (mode); op0 = protect_from_queue (op0, 0); if (flag_force_mem) { op0 = force_not_mem (op0); } if (targ0) targ0 = protect_from_queue (targ0, 1); else targ0 = gen_reg_rtx (mode); if (targ1) targ1 = protect_from_queue (targ1, 1); else targ1 = gen_reg_rtx (mode); /* Record where to go back to if we fail. */ last = get_last_insn (); if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing) { int icode = (int) unoptab->handlers[(int) mode].insn_code; enum machine_mode mode0 = insn_data[icode].operand[2].mode; rtx pat; rtx xop0 = op0; if (GET_MODE (xop0) != VOIDmode && GET_MODE (xop0) != mode0) xop0 = convert_to_mode (mode0, xop0, unsignedp); /* Now, if insn doesn't accept these operands, put them into pseudos. */ if (! (*insn_data[icode].operand[2].predicate) (xop0, mode0)) xop0 = copy_to_mode_reg (mode0, xop0); /* We could handle this, but we should always be called with a pseudo for our targets and all insns should take them as outputs. */ if (! (*insn_data[icode].operand[0].predicate) (targ0, mode) || ! (*insn_data[icode].operand[1].predicate) (targ1, mode)) abort (); pat = GEN_FCN (icode) (targ0, targ1, xop0); if (pat) { emit_insn (pat); return 1; } else delete_insns_since (last); } /* It can't be done in this mode. Can we do it in a wider mode? */ if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT) { for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode; wider_mode = GET_MODE_WIDER_MODE (wider_mode)) { if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing) { rtx t0 = gen_reg_rtx (wider_mode); rtx t1 = gen_reg_rtx (wider_mode); rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp); if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp)) { convert_move (targ0, t0, unsignedp); convert_move (targ1, t1, unsignedp); return 1; } else delete_insns_since (last); } } } delete_insns_since (entry_last); return 0; } /* Generate code to perform an operation specified by BINOPTAB on operands OP0 and OP1, with two results to TARG1 and TARG2. We assume that the order of the operands for the instruction is TARG0, OP0, OP1, TARG1, which would fit a pattern like [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))]. Either TARG0 or TARG1 may be zero, but what that means is that the result is not actually wanted. We will generate it into a dummy pseudo-reg and discard it. They may not both be zero. Returns 1 if this operation can be performed; 0 if not. */ int expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1, int unsignedp) { enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1); enum mode_class class; enum machine_mode wider_mode; rtx entry_last = get_last_insn (); rtx last; class = GET_MODE_CLASS (mode); op0 = protect_from_queue (op0, 0); op1 = protect_from_queue (op1, 0); if (flag_force_mem) { op0 = force_not_mem (op0); op1 = force_not_mem (op1); } /* If we are inside an appropriately-short loop and one operand is an expensive constant, force it into a register. */ if (CONSTANT_P (op0) && preserve_subexpressions_p () && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1)) op0 = force_reg (mode, op0); if (CONSTANT_P (op1) && preserve_subexpressions_p () && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1)) op1 = force_reg (mode, op1); if (targ0) targ0 = protect_from_queue (targ0, 1); else targ0 = gen_reg_rtx (mode); if (targ1) targ1 = protect_from_queue (targ1, 1); else targ1 = gen_reg_rtx (mode); /* Record where to go back to if we fail. */ last = get_last_insn (); if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing) { int icode = (int) binoptab->handlers[(int) mode].insn_code; enum machine_mode mode0 = insn_data[icode].operand[1].mode; enum machine_mode mode1 = insn_data[icode].operand[2].mode; rtx pat; rtx xop0 = op0, xop1 = op1; /* In case the insn wants input operands in modes different from those of the actual operands, convert the operands. It would seem that we don't need to convert CONST_INTs, but we do, so that they're properly zero-extended, sign-extended or truncated for their mode. */ if (GET_MODE (op0) != mode0 && mode0 != VOIDmode) xop0 = convert_modes (mode0, GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : mode, xop0, unsignedp); if (GET_MODE (op1) != mode1 && mode1 != VOIDmode) xop1 = convert_modes (mode1, GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : mode, xop1, unsignedp); /* Now, if insn doesn't accept these operands, put them into pseudos. */ if (! (*insn_data[icode].operand[1].predicate) (xop0, mode0)) xop0 = copy_to_mode_reg (mode0, xop0); if (! (*insn_data[icode].operand[2].predicate) (xop1, mode1)) xop1 = copy_to_mode_reg (mode1, xop1); /* We could handle this, but we should always be called with a pseudo for our targets and all insns should take them as outputs. */ if (! (*insn_data[icode].operand[0].predicate) (targ0, mode) || ! (*insn_data[icode].operand[3].predicate) (targ1, mode)) abort (); pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1); if (pat) { emit_insn (pat); return 1; } else delete_insns_since (last); } /* It can't be done in this mode. Can we do it in a wider mode? */ if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT) { for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode; wider_mode = GET_MODE_WIDER_MODE (wider_mode)) { if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing) { rtx t0 = gen_reg_rtx (wider_mode); rtx t1 = gen_reg_rtx (wider_mode); rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp); rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp); if (expand_twoval_binop (binoptab, cop0, cop1, t0, t1, unsignedp)) { convert_move (targ0, t0, unsignedp); convert_move (targ1, t1, unsignedp); return 1; } else delete_insns_since (last); } } } delete_insns_since (entry_last); return 0; } /* Wrapper around expand_unop which takes an rtx code to specify the operation to perform, not an optab pointer. All other arguments are the same. */ rtx expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0, rtx target, int unsignedp) { optab unop = code_to_optab[(int) code]; if (unop == 0) abort (); return expand_unop (mode, unop, op0, target, unsignedp); } /* Try calculating (clz:narrow x) as (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */ static rtx widen_clz (enum machine_mode mode, rtx op0, rtx target) { enum mode_class class = GET_MODE_CLASS (mode); if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT) { enum machine_mode wider_mode; for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode; wider_mode = GET_MODE_WIDER_MODE (wider_mode)) { if (clz_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing) { rtx xop0, temp, last; last = get_last_insn (); if (target == 0) target = gen_reg_rtx (mode); xop0 = widen_operand (op0, wider_mode, mode, true, false); temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true); if (temp != 0) temp = expand_binop (wider_mode, sub_optab, temp, GEN_INT (GET_MODE_BITSIZE (wider_mode) - GET_MODE_BITSIZE (mode)), target, true, OPTAB_DIRECT); if (temp == 0) delete_insns_since (last); return temp; } } } return 0; } /* Try calculating (parity x) as (and (popcount x) 1), where popcount can also be done in a wider mode. */ static rtx expand_parity (enum machine_mode mode, rtx op0, rtx target) { enum mode_class class = GET_MODE_CLASS (mode); if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT) { enum machine_mode wider_mode; for (wider_mode = mode; wider_mode != VOIDmode; wider_mode = GET_MODE_WIDER_MODE (wider_mode)) { if (popcount_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing) { rtx xop0, temp, last; last = get_last_insn (); if (target == 0) target = gen_reg_rtx (mode); xop0 = widen_operand (op0, wider_mode, mode, true, false); temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX, true); if (temp != 0) temp = expand_binop (wider_mode, and_optab, temp, const1_rtx, target, true, OPTAB_DIRECT); if (temp == 0) delete_insns_since (last); return temp; } } } return 0; } /* Generate code to perform an operation specified by UNOPTAB on operand OP0, with result having machine-mode MODE. UNSIGNEDP is for the case where we have to widen the operands to perform the operation. It says to use zero-extension. If TARGET is nonzero, the value is generated there, if it is convenient to do so. In all cases an rtx is returned for the locus of the value; this may or may not be TARGET. */ rtx expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target, int unsignedp) { enum mode_class class; enum machine_mode wider_mode; rtx temp; rtx last = get_last_insn (); rtx pat; class = GET_MODE_CLASS (mode); op0 = protect_from_queue (op0, 0); if (flag_force_mem) { op0 = force_not_mem (op0); } if (target) target = protect_from_queue (target, 1); if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing) { int icode = (int) unoptab->handlers[(int) mode].insn_code; enum machine_mode mode0 = insn_data[icode].operand[1].mode; rtx xop0 = op0; if (target) temp = target; else temp = gen_reg_rtx (mode); if (GET_MODE (xop0) != VOIDmode && GET_MODE (xop0) != mode0) xop0 = convert_to_mode (mode0, xop0, unsignedp); /* Now, if insn doesn't accept our operand, put it into a pseudo. */ if (! (*insn_data[icode].operand[1].predicate) (xop0, mode0)) xop0 = copy_to_mode_reg (mode0, xop0); if (! (*insn_data[icode].operand[0].predicate) (temp, mode)) temp = gen_reg_rtx (mode); pat = GEN_FCN (icode) (temp, xop0); if (pat) { if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX)) { delete_insns_since (last); return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp); } emit_insn (pat); return temp; } else delete_insns_since (last); } /* It can't be done in this mode. Can we open-code it in a wider mode? */ /* Widening clz needs special treatment. */ if (unoptab == clz_optab) { temp = widen_clz (mode, op0, target); if (temp) return temp; else goto try_libcall; } if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT) for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode; wider_mode = GET_MODE_WIDER_MODE (wider_mode)) { if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing) { rtx xop0 = op0; /* For certain operations, we need not actually extend the narrow operand, as long as we will truncate the results to the same narrowness. */ xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, (unoptab == neg_optab || unoptab == one_cmpl_optab) && class == MODE_INT); temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX, unsignedp); if (temp) { if (class != MODE_INT) { if (target == 0) target = gen_reg_rtx (mode); convert_move (target, temp, 0); return target; } else return gen_lowpart (mode, temp); } else delete_insns_since (last); } } /* These can be done a word at a time. */ if (unoptab == one_cmpl_optab && class == MODE_INT && GET_MODE_SIZE (mode) > UNITS_PER_WORD && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing) { int i; rtx insns; if (target == 0 || target == op0) target = gen_reg_rtx (mode); start_sequence (); /* Do the actual arithmetic. */ for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++) { rtx target_piece = operand_subword (target, i, 1, mode); rtx x = expand_unop (word_mode, unoptab, operand_subword_force (op0, i, mode), target_piece, unsignedp); if (target_piece != x) emit_move_insn (target_piece, x); } insns = get_insns (); end_sequence (); emit_no_conflict_block (insns, target, op0, NULL_RTX, gen_rtx_fmt_e (unoptab->code, mode, copy_rtx (op0))); return target; } /* Open-code the complex negation operation. */ else if (unoptab->code == NEG && (class == MODE_COMPLEX_FLOAT || class == MODE_COMPLEX_INT)) { rtx target_piece; rtx x; rtx seq; /* Find the correct mode for the real and imaginary parts. */ enum machine_mode submode = GET_MODE_INNER (mode); if (submode == BLKmode) abort (); if (target == 0) target = gen_reg_rtx (mode); start_sequence (); target_piece = gen_imagpart (submode, target); x = expand_unop (submode, unoptab, gen_imagpart (submode, op0), target_piece, unsignedp); if (target_piece != x) emit_move_insn (target_piece, x); target_piece = gen_realpart (submode, target); x = expand_unop (submode, unoptab, gen_realpart (submode, op0), target_piece, unsignedp); if (target_piece != x) emit_move_insn (target_piece, x); seq = get_insns (); end_sequence (); emit_no_conflict_block (seq, target, op0, 0, gen_rtx_fmt_e (unoptab->code, mode, copy_rtx (op0))); return target; } /* Try negating floating point values by flipping the sign bit. */ if (unoptab->code == NEG && class == MODE_FLOAT && GET_MODE_BITSIZE (mode) <= 2 * HOST_BITS_PER_WIDE_INT) { const struct real_format *fmt = REAL_MODE_FORMAT (mode); enum machine_mode imode = int_mode_for_mode (mode); int bitpos = (fmt != 0) ? fmt->signbit : -1; if (imode != BLKmode && bitpos >= 0 && fmt->has_signed_zero) { HOST_WIDE_INT hi, lo; rtx last = get_last_insn (); /* Handle targets with different FP word orders. */ if (FLOAT_WORDS_BIG_ENDIAN != WORDS_BIG_ENDIAN) { int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD; int word = nwords - (bitpos / BITS_PER_WORD) - 1; bitpos = word * BITS_PER_WORD + bitpos % BITS_PER_WORD; } if (bitpos < HOST_BITS_PER_WIDE_INT) { hi = 0; lo = (HOST_WIDE_INT) 1 << bitpos; } else { hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT); lo = 0; } temp = expand_binop (imode, xor_optab, gen_lowpart (imode, op0), immed_double_const (lo, hi, imode), NULL_RTX, 1, OPTAB_LIB_WIDEN); if (temp != 0) { rtx insn; if (target == 0) target = gen_reg_rtx (mode); insn = emit_move_insn (target, gen_lowpart (mode, temp)); set_unique_reg_note (insn, REG_EQUAL, gen_rtx_fmt_e (NEG, mode, copy_rtx (op0))); return target; } delete_insns_since (last); } } /* Try calculating parity (x) as popcount (x) % 2. */ if (unoptab == parity_optab) { temp = expand_parity (mode, op0, target); if (temp) return temp; } /* If there is no negation pattern, try subtracting from zero. */ if (unoptab == neg_optab && class == MODE_INT) { temp = expand_binop (mode, sub_optab, CONST0_RTX (mode), op0, target, unsignedp, OPTAB_DIRECT); if (temp) return temp; } try_libcall: /* Now try a library call in this mode. */ if (unoptab->handlers[(int) mode].libfunc) { rtx insns; rtx value; enum machine_mode outmode = mode; /* All of these functions return small values. Thus we choose to have them return something that isn't a double-word. */ if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab || unoptab == popcount_optab || unoptab == parity_optab) outmode = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node))); start_sequence (); /* Pass 1 for NO_QUEUE so we don't lose any increments if the libcall is cse'd or moved. */ value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc, NULL_RTX, LCT_CONST, outmode, 1, op0, mode); insns = get_insns (); end_sequence (); target = gen_reg_rtx (outmode); emit_libcall_block (insns, target, value, gen_rtx_fmt_e (unoptab->code, mode, op0)); return target; } if (class == MODE_VECTOR_FLOAT || class == MODE_VECTOR_INT) return expand_vector_unop (mode, unoptab, op0, target, unsignedp); /* It can't be done in this mode. Can we do it in a wider mode? */ if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT) { for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode; wider_mode = GET_MODE_WIDER_MODE (wider_mode)) { if ((unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing) || unoptab->handlers[(int) wider_mode].libfunc) { rtx xop0 = op0; /* For certain operations, we need not actually extend the narrow operand, as long as we will truncate the results to the same narrowness. */ xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, (unoptab == neg_optab || unoptab == one_cmpl_optab) && class == MODE_INT); temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX, unsignedp); /* If we are generating clz using wider mode, adjust the result. */ if (unoptab == clz_optab && temp != 0) temp = expand_binop (wider_mode, sub_optab, temp, GEN_INT (GET_MODE_BITSIZE (wider_mode) - GET_MODE_BITSIZE (mode)), target, true, OPTAB_DIRECT); if (temp) { if (class != MODE_INT) { if (target == 0) target = gen_reg_rtx (mode); convert_move (target, temp, 0); return target; } else return gen_lowpart (mode, temp); } else delete_insns_since (last); } } } /* If there is no negate operation, try doing a subtract from zero. The US Software GOFAST library needs this. */ if (unoptab->code == NEG) { rtx temp; temp = expand_binop (mode, unoptab == negv_optab ? subv_optab : sub_optab, CONST0_RTX (mode), op0, target, unsignedp, OPTAB_LIB_WIDEN); if (temp) return temp; } return 0; } /* Emit code to compute the absolute value of OP0, with result to TARGET if convenient. (TARGET may be 0.) The return value says where the result actually is to be found. MODE is the mode of the operand; the mode of the result is different but can be deduced from MODE. */ rtx expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target, int result_unsignedp) { rtx temp; if (! flag_trapv) result_unsignedp = 1; /* First try to do it with a special abs instruction. */ temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab, op0, target, 0); if (temp != 0) return temp; /* For floating point modes, try clearing the sign bit. */ if (GET_MODE_CLASS (mode) == MODE_FLOAT && GET_MODE_BITSIZE (mode) <= 2 * HOST_BITS_PER_WIDE_INT) { const struct real_format *fmt = REAL_MODE_FORMAT (mode); enum machine_mode imode = int_mode_for_mode (mode); int bitpos = (fmt != 0) ? fmt->signbit : -1; if (imode != BLKmode && bitpos >= 0) { HOST_WIDE_INT hi, lo; rtx last = get_last_insn (); /* Handle targets with different FP word orders. */ if (FLOAT_WORDS_BIG_ENDIAN != WORDS_BIG_ENDIAN) { int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD; int word = nwords - (bitpos / BITS_PER_WORD) - 1; bitpos = word * BITS_PER_WORD + bitpos % BITS_PER_WORD; } if (bitpos < HOST_BITS_PER_WIDE_INT) { hi = 0; lo = (HOST_WIDE_INT) 1 << bitpos; } else { hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT); lo = 0; } temp = expand_binop (imode, and_optab, gen_lowpart (imode, op0), immed_double_const (~lo, ~hi, imode), NULL_RTX, 1, OPTAB_LIB_WIDEN); if (temp != 0) { rtx insn; if (target == 0) target = gen_reg_rtx (mode); insn = emit_move_insn (target, gen_lowpart (mode, temp)); set_unique_reg_note (insn, REG_EQUAL, gen_rtx_fmt_e (ABS, mode, copy_rtx (op0))); return target; } delete_insns_since (last); } } /* If we have a MAX insn, we can do this as MAX (x, -x). */ if (smax_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing) { rtx last = get_last_insn (); temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0); if (temp != 0) temp = expand_binop (mode, smax_optab, op0, temp, target, 0, OPTAB_WIDEN); if (temp != 0) return temp; delete_insns_since (last); } /* If this machine has expensive jumps, we can do integer absolute value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)), where W is the width of MODE. */ if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2) { rtx extended = expand_shift (RSHIFT_EXPR, mode, op0, size_int (GET_MODE_BITSIZE (mode) - 1), NULL_RTX, 0); temp = expand_binop (mode, xor_optab, extended, op0, target, 0, OPTAB_LIB_WIDEN); if (temp != 0) temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab, temp, extended, target, 0, OPTAB_LIB_WIDEN); if (temp != 0) return temp; } return NULL_RTX; } rtx expand_abs (enum machine_mode mode, rtx op0, rtx target, int result_unsignedp, int safe) { rtx temp, op1; if (! flag_trapv) result_unsignedp = 1; temp = expand_abs_nojump (mode, op0, target, result_unsignedp); if (temp != 0) return temp; /* If that does not win, use conditional jump and negate. */ /* It is safe to use the target if it is the same as the source if this is also a pseudo register */ if (op0 == target && REG_P (op0) && REGNO (op0) >= FIRST_PSEUDO_REGISTER) safe = 1; op1 = gen_label_rtx (); if (target == 0 || ! safe || GET_MODE (target) != mode || (MEM_P (target) && MEM_VOLATILE_P (target)) || (REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER)) target = gen_reg_rtx (mode); emit_move_insn (target, op0); NO_DEFER_POP; /* If this mode is an integer too wide to compare properly, compare word by word. Rely on CSE to optimize constant cases. */ if (GET_MODE_CLASS (mode) == MODE_INT && ! can_compare_p (GE, mode, ccp_jump)) do_jump_by_parts_greater_rtx (mode, 0, target, const0_rtx, NULL_RTX, op1); else do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode, NULL_RTX, NULL_RTX, op1); op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab, target, target, 0); if (op0 != target) emit_move_insn (target, op0); emit_label (op1); OK_DEFER_POP; return target; } /* Emit code to compute the absolute value of OP0, with result to TARGET if convenient. (TARGET may be 0.) The return value says where the result actually is to be found. MODE is the mode of the operand; the mode of the result is different but can be deduced from MODE. UNSIGNEDP is relevant for complex integer modes. */ rtx expand_complex_abs (enum machine_mode mode, rtx op0, rtx target, int unsignedp) { enum mode_class class = GET_MODE_CLASS (mode); enum machine_mode wider_mode; rtx temp; rtx entry_last = get_last_insn (); rtx last; rtx pat; optab this_abs_optab; /* Find the correct mode for the real and imaginary parts. */ enum machine_mode submode = GET_MODE_INNER (mode); if (submode == BLKmode) abort (); op0 = protect_from_queue (op0, 0); if (flag_force_mem) { op0 = force_not_mem (op0); } last = get_last_insn (); if (target) target = protect_from_queue (target, 1); this_abs_optab = ! unsignedp && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT) ? absv_optab : abs_optab; if (this_abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing) { int icode = (int) this_abs_optab->handlers[(int) mode].insn_code; enum machine_mode mode0 = insn_data[icode].operand[1].mode; rtx xop0 = op0; if (target) temp = target; else temp = gen_reg_rtx (submode); if (GET_MODE (xop0) != VOIDmode && GET_MODE (xop0) != mode0) xop0 = convert_to_mode (mode0, xop0, unsignedp); /* Now, if insn doesn't accept our operand, put it into a pseudo. */ if (! (*insn_data[icode].operand[1].predicate) (xop0, mode0)) xop0 = copy_to_mode_reg (mode0, xop0); if (! (*insn_data[icode].operand[0].predicate) (temp, submode)) temp = gen_reg_rtx (submode); pat = GEN_FCN (icode) (temp, xop0); if (pat) { if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && ! add_equal_note (pat, temp, this_abs_optab->code, xop0, NULL_RTX)) { delete_insns_since (last); return expand_unop (mode, this_abs_optab, op0, NULL_RTX, unsignedp); } emit_insn (pat); return temp; } else delete_insns_since (last); } /* It can't be done in this mode. Can we open-code it in a wider mode? */ for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode; wider_mode = GET_MODE_WIDER_MODE (wider_mode)) { if (this_abs_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing) { rtx xop0 = op0; xop0 = convert_modes (wider_mode, mode, xop0, unsignedp); temp = expand_complex_abs (wider_mode, xop0, NULL_RTX, unsignedp); if (temp) { if (class != MODE_COMPLEX_INT) { if (target == 0) target = gen_reg_rtx (submode); convert_move (target, temp, 0); return target; } else return gen_lowpart (submode, temp); } else delete_insns_since (last); } } /* Open-code the complex absolute-value operation if we can open-code sqrt. Otherwise it's not worth while. */ if (sqrt_optab->handlers[(int) submode].insn_code != CODE_FOR_nothing && ! flag_trapv) { rtx real, imag, total; real = gen_realpart (submode, op0); imag = gen_imagpart (submode, op0); /* Square both parts. */ real = expand_mult (submode, real, real, NULL_RTX, 0); imag = expand_mult (submode, imag, imag, NULL_RTX, 0); /* Sum the parts. */ total = expand_binop (submode, add_optab, real, imag, NULL_RTX, 0, OPTAB_LIB_WIDEN); /* Get sqrt in TARGET. Set TARGET to where the result is. */ target = expand_unop (submode, sqrt_optab, total, target, 0); if (target == 0) delete_insns_since (last); else return target; } /* Now try a library call in this mode. */ if (this_abs_optab->handlers[(int) mode].libfunc) { rtx insns; rtx value; start_sequence (); /* Pass 1 for NO_QUEUE so we don't lose any increments if the libcall is cse'd or moved. */ value = emit_library_call_value (abs_optab->handlers[(int) mode].libfunc, NULL_RTX, LCT_CONST, submode, 1, op0, mode); insns = get_insns (); end_sequence (); target = gen_reg_rtx (submode); emit_libcall_block (insns, target, value, gen_rtx_fmt_e (this_abs_optab->code, mode, op0)); return target; } /* It can't be done in this mode. Can we do it in a wider mode? */ for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode; wider_mode = GET_MODE_WIDER_MODE (wider_mode)) { if ((this_abs_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing) || this_abs_optab->handlers[(int) wider_mode].libfunc) { rtx xop0 = op0; xop0 = convert_modes (wider_mode, mode, xop0, unsignedp); temp = expand_complex_abs (wider_mode, xop0, NULL_RTX, unsignedp); if (temp) { if (class != MODE_COMPLEX_INT) { if (target == 0) target = gen_reg_rtx (submode); convert_move (target, temp, 0); return target; } else return gen_lowpart (submode, temp); } else delete_insns_since (last); } } delete_insns_since (entry_last); return 0; } /* Generate an instruction whose insn-code is INSN_CODE, with two operands: an output TARGET and an input OP0. TARGET *must* be nonzero, and the output is always stored there. CODE is an rtx code such that (CODE OP0) is an rtx that describes the value that is stored into TARGET. */ void emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code) { rtx temp; enum machine_mode mode0 = insn_data[icode].operand[1].mode; rtx pat; temp = target = protect_from_queue (target, 1); op0 = protect_from_queue (op0, 0); /* Sign and zero extension from memory is often done specially on RISC machines, so forcing into a register here can pessimize code. */ if (flag_force_mem && code != SIGN_EXTEND && code != ZERO_EXTEND) op0 = force_not_mem (op0); /* Now, if insn does not accept our operands, put them into pseudos. */ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0)) op0 = copy_to_mode_reg (mode0, op0); if (! (*insn_data[icode].operand[0].predicate) (temp, GET_MODE (temp)) || (flag_force_mem && MEM_P (temp))) temp = gen_reg_rtx (GET_MODE (temp)); pat = GEN_FCN (icode) (temp, op0); if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN) add_equal_note (pat, temp, code, op0, NULL_RTX); emit_insn (pat); if (temp != target) emit_move_insn (target, temp); } /* Emit code to perform a series of operations on a multi-word quantity, one word at a time. Such a block is preceded by a CLOBBER of the output, consists of multiple insns, each setting one word of the output, and followed by a SET copying the output to itself. Each of the insns setting words of the output receives a REG_NO_CONFLICT note indicating that it doesn't conflict with the (also multi-word) inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL notes. INSNS is a block of code generated to perform the operation, not including the CLOBBER and final copy. All insns that compute intermediate values are first emitted, followed by the block as described above. TARGET, OP0, and OP1 are the output and inputs of the operations, respectively. OP1 may be zero for a unary operation. EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note on the last insn. If TARGET is not a register, INSNS is simply emitted with no special processing. Likewise if anything in INSNS is not an INSN or if there is a libcall block inside INSNS. The final insn emitted is returned. */ rtx emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv) { rtx prev, next, first, last, insn; if (!REG_P (target) || reload_in_progress) return emit_insn (insns); else for (insn = insns; insn; insn = NEXT_INSN (insn)) if (GET_CODE (insn) != INSN || find_reg_note (insn, REG_LIBCALL, NULL_RTX)) return emit_insn (insns); /* First emit all insns that do not store into words of the output and remove these from the list. */ for (insn = insns; insn; insn = next) { rtx set = 0, note; int i; next = NEXT_INSN (insn); /* Some ports (cris) create a libcall regions at their own. We must avoid any potential nesting of LIBCALLs. */ if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL) remove_note (insn, note); if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL) remove_note (insn, note); if (GET_CODE (PATTERN (insn)) == SET || GET_CODE (PATTERN (insn)) == USE || GET_CODE (PATTERN (insn)) == CLOBBER) set = PATTERN (insn); else if (GET_CODE (PATTERN (insn)) == PARALLEL) { for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++) if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET) { set = XVECEXP (PATTERN (insn), 0, i); break; } } if (set == 0) abort (); if (! reg_overlap_mentioned_p (target, SET_DEST (set))) { if (PREV_INSN (insn)) NEXT_INSN (PREV_INSN (insn)) = next; else insns = next; if (next) PREV_INSN (next) = PREV_INSN (insn); add_insn (insn); } } prev = get_last_insn (); /* Now write the CLOBBER of the output, followed by the setting of each of the words, followed by the final copy. */ if (target != op0 && target != op1) emit_insn (gen_rtx_CLOBBER (VOIDmode, target)); for (insn = insns; insn; insn = next) { next = NEXT_INSN (insn); add_insn (insn); if (op1 && REG_P (op1)) REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1, REG_NOTES (insn)); if (op0 && REG_P (op0)) REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0, REG_NOTES (insn)); } if (mov_optab->handlers[(int) GET_MODE (target)].insn_code != CODE_FOR_nothing) { last = emit_move_insn (target, target); if (equiv) set_unique_reg_note (last, REG_EQUAL, equiv); } else { last = get_last_insn (); /* Remove any existing REG_EQUAL note from "last", or else it will be mistaken for a note referring to the full contents of the alleged libcall value when found together with the REG_RETVAL note added below. An existing note can come from an insn expansion at "last". */ remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX)); } if (prev == 0) first = get_insns (); else first = NEXT_INSN (prev); /* Encapsulate the block so it gets manipulated as a unit. */ REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last, REG_NOTES (first)); REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first, REG_NOTES (last)); return last; } /* Emit code to make a call to a constant function or a library call. INSNS is a list containing all insns emitted in the call. These insns leave the result in RESULT. Our block is to copy RESULT to TARGET, which is logically equivalent to EQUIV. We first emit any insns that set a pseudo on the assumption that these are loading constants into registers; doing so allows them to be safely cse'ed between blocks. Then we emit all the other insns in the block, followed by an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL note with an operand of EQUIV. Moving assignments to pseudos outside of the block is done to improve the generated code, but is not required to generate correct code, hence being unable to move an assignment is not grounds for not making a libcall block. There are two reasons why it is safe to leave these insns inside the block: First, we know that these pseudos cannot be used in generated RTL outside the block since they are created for temporary purposes within the block. Second, CSE will not record the values of anything set inside a libcall block, so we know they must be dead at the end of the block. Except for the first group of insns (the ones setting pseudos), the block is delimited by REG_RETVAL and REG_LIBCALL notes. */ void emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv) { rtx final_dest = target; rtx prev, next, first, last, insn; /* If this is a reg with REG_USERVAR_P set, then it could possibly turn into a MEM later. Protect the libcall block from this change. */ if (! REG_P (target) || REG_USERVAR_P (target)) target = gen_reg_rtx (GET_MODE (target)); /* If we're using non-call exceptions, a libcall corresponding to an operation that may trap may also trap. */ if (flag_non_call_exceptions && may_trap_p (equiv)) { for (insn = insns; insn; insn = NEXT_INSN (insn)) if (GET_CODE (insn) == CALL_INSN) { rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX); if (note != 0 && INTVAL (XEXP (note, 0)) <= 0) remove_note (insn, note); } } else /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION reg note to indicate that this call cannot throw or execute a nonlocal goto (unless there is already a REG_EH_REGION note, in which case we update it). */ for (insn = insns; insn; insn = NEXT_INSN (insn)) if (GET_CODE (insn) == CALL_INSN) { rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX); if (note != 0) XEXP (note, 0) = constm1_rtx; else REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx, REG_NOTES (insn)); } /* First emit all insns that set pseudos. Remove them from the list as we go. Avoid insns that set pseudos which were referenced in previous insns. These can be generated by move_by_pieces, for example, to update an address. Similarly, avoid insns that reference things set in previous insns. */ for (insn = insns; insn; insn = next) { rtx set = single_set (insn); rtx note; /* Some ports (cris) create a libcall regions at their own. We must avoid any potential nesting of LIBCALLs. */ if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL) remove_note (insn, note); if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL) remove_note (insn, note); next = NEXT_INSN (insn); if (set != 0 && REG_P (SET_DEST (set)) && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER && (insn == insns || ((! INSN_P(insns) || ! reg_mentioned_p (SET_DEST (set), PATTERN (insns))) && ! reg_used_between_p (SET_DEST (set), insns, insn) && ! modified_in_p (SET_SRC (set), insns) && ! modified_between_p (SET_SRC (set), insns, insn)))) { if (PREV_INSN (insn)) NEXT_INSN (PREV_INSN (insn)) = next; else insns = next; if (next) PREV_INSN (next) = PREV_INSN (insn); add_insn (insn); } /* Some ports use a loop to copy large arguments onto the stack. Don't move anything outside such a loop. */ if (GET_CODE (insn) == CODE_LABEL) break; } prev = get_last_insn (); /* Write the remaining insns followed by the final copy. */ for (insn = insns; insn; insn = next) { next = NEXT_INSN (insn); add_insn (insn); } last = emit_move_insn (target, result); if (mov_optab->handlers[(int) GET_MODE (target)].insn_code != CODE_FOR_nothing) set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv)); else { /* Remove any existing REG_EQUAL note from "last", or else it will be mistaken for a note referring to the full contents of the libcall value when found together with the REG_RETVAL note added below. An existing note can come from an insn expansion at "last". */ remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX)); } if (final_dest != target) emit_move_insn (final_dest, target); if (prev == 0) first = get_insns (); else first = NEXT_INSN (prev); /* Encapsulate the block so it gets manipulated as a unit. */ if (!flag_non_call_exceptions || !may_trap_p (equiv)) { /* We can't attach the REG_LIBCALL and REG_RETVAL notes when the encapsulated region would not be in one basic block, i.e. when there is a control_flow_insn_p insn between FIRST and LAST. */ bool attach_libcall_retval_notes = true; next = NEXT_INSN (last); for (insn = first; insn != next; insn = NEXT_INSN (insn)) if (control_flow_insn_p (insn)) { attach_libcall_retval_notes = false; break; } if (attach_libcall_retval_notes) { REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last, REG_NOTES (first)); REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first, REG_NOTES (last)); } } } /* Generate code to store zero in X. */ void emit_clr_insn (rtx x) { emit_move_insn (x, const0_rtx); } /* Generate code to store 1 in X assuming it contains zero beforehand. */ void emit_0_to_1_insn (rtx x) { emit_move_insn (x, const1_rtx); } /* Nonzero if we can perform a comparison of mode MODE straightforwardly. PURPOSE describes how this comparison will be used. CODE is the rtx comparison code we will be using. ??? Actually, CODE is slightly weaker than that. A target is still required to implement all of the normal bcc operations, but not required to implement all (or any) of the unordered bcc operations. */ int can_compare_p (enum rtx_code code, enum machine_mode mode, enum can_compare_purpose purpose) { do { if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing) { if (purpose == ccp_jump) return bcc_gen_fctn[(int) code] != NULL; else if (purpose == ccp_store_flag) return setcc_gen_code[(int) code] != CODE_FOR_nothing; else /* There's only one cmov entry point, and it's allowed to fail. */ return 1; } if (purpose == ccp_jump && cbranch_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing) return 1; if (purpose == ccp_cmov && cmov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing) return 1; if (purpose == ccp_store_flag && cstore_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing) return 1; mode = GET_MODE_WIDER_MODE (mode); } while (mode != VOIDmode); return 0; } /* This function is called when we are going to emit a compare instruction that compares the values found in *PX and *PY, using the rtl operator COMPARISON. *PMODE is the mode of the inputs (in case they are const_int). *PUNSIGNEDP nonzero says that the operands are unsigned; this matters if they need to be widened. If they have mode BLKmode, then SIZE specifies the size of both operands. This function performs all the setup necessary so that the caller only has to emit a single comparison insn. This setup can involve doing a BLKmode comparison or emitting a library call to perform the comparison if no insn is available to handle it. The values which are passed in through pointers can be modified; the caller should perform the comparison on the modified values. */ static void prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size, enum machine_mode *pmode, int *punsignedp, enum can_compare_purpose purpose) { enum machine_mode mode = *pmode; rtx x = *px, y = *py; int unsignedp = *punsignedp; enum mode_class class; class = GET_MODE_CLASS (mode); /* They could both be VOIDmode if both args are immediate constants, but we should fold that at an earlier stage. With no special code here, this will call abort, reminding the programmer to implement such folding. */ if (mode != BLKmode && flag_force_mem) { /* Load duplicate non-volatile operands once. */ if (rtx_equal_p (x, y) && ! volatile_refs_p (x)) { x = force_not_mem (x); y = x; } else { x = force_not_mem (x); y = force_not_mem (y); } } /* If we are inside an appropriately-short loop and one operand is an expensive constant, force it into a register. */ if (CONSTANT_P (x) && preserve_subexpressions_p () && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1)) x = force_reg (mode, x); if (CONSTANT_P (y) && preserve_subexpressions_p () && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1)) y = force_reg (mode, y); #ifdef HAVE_cc0 /* Abort if we have a non-canonical comparison. The RTL documentation states that canonical comparisons are required only for targets which have cc0. */ if (CONSTANT_P (x) && ! CONSTANT_P (y)) abort (); #endif /* Don't let both operands fail to indicate the mode. */ if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode) x = force_reg (mode, x); /* Handle all BLKmode compares. */ if (mode == BLKmode) { enum machine_mode cmp_mode, result_mode; enum insn_code cmp_code; tree length_type; rtx libfunc; rtx result; rtx opalign = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT); if (size == 0) abort (); emit_queue (); x = protect_from_queue (x, 0); y = protect_from_queue (y, 0); size = protect_from_queue (size, 0); /* Try to use a memory block compare insn - either cmpstr or cmpmem will do. */ for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); cmp_mode != VOIDmode; cmp_mode = GET_MODE_WIDER_MODE (cmp_mode)) { cmp_code = cmpmem_optab[cmp_mode]; if (cmp_code == CODE_FOR_nothing) cmp_code = cmpstr_optab[cmp_mode]; if (cmp_code == CODE_FOR_nothing) continue; /* Must make sure the size fits the insn's mode. */ if ((GET_CODE (size) == CONST_INT && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode))) || (GET_MODE_BITSIZE (GET_MODE (size)) > GET_MODE_BITSIZE (cmp_mode))) continue; result_mode = insn_data[cmp_code].operand[0].mode; result = gen_reg_rtx (result_mode); size = convert_to_mode (cmp_mode, size, 1); emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign)); *px = result; *py = const0_rtx; *pmode = result_mode; return; } /* Otherwise call a library function, memcmp. */ libfunc = memcmp_libfunc; length_type = sizetype; result_mode = TYPE_MODE (integer_type_node); cmp_mode = TYPE_MODE (length_type); size = convert_to_mode (TYPE_MODE (length_type), size, TYPE_UNSIGNED (length_type)); result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK, result_mode, 3, XEXP (x, 0), Pmode, XEXP (y, 0), Pmode, size, cmp_mode); *px = result; *py = const0_rtx; *pmode = result_mode; return; } /* Don't allow operands to the compare to trap, as that can put the compare and branch in different basic blocks. */ if (flag_non_call_exceptions) { if (may_trap_p (x)) x = force_reg (mode, x); if (may_trap_p (y)) y = force_reg (mode, y); } *px = x; *py = y; if (can_compare_p (*pcomparison, mode, purpose)) return; /* Handle a lib call just for the mode we are using. */ if (cmp_optab->handlers[(int) mode].libfunc && class != MODE_FLOAT) { rtx libfunc = cmp_optab->handlers[(int) mode].libfunc; rtx result; /* If we want unsigned, and this mode has a distinct unsigned comparison routine, use that. */ if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc) libfunc = ucmp_optab->handlers[(int) mode].libfunc; result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK, word_mode, 2, x, mode, y, mode); /* Integer comparison returns a result that must be compared against 1, so that even if we do an unsigned compare afterward, there is still a value that can represent the result "less than". */ *px = result; *py = const1_rtx; *pmode = word_mode; return; } if (class == MODE_FLOAT) prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp); else abort (); } /* Before emitting an insn with code ICODE, make sure that X, which is going to be used for operand OPNUM of the insn, is converted from mode MODE to WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and that it is accepted by the operand predicate. Return the new value. */ rtx prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode, enum machine_mode wider_mode, int unsignedp) { x = protect_from_queue (x, 0); if (mode != wider_mode) x = convert_modes (wider_mode, mode, x, unsignedp); if (! (*insn_data[icode].operand[opnum].predicate) (x, insn_data[icode].operand[opnum].mode)) { if (no_new_pseudos) return NULL_RTX; x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x); } return x; } /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know we can do the comparison. The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may be NULL_RTX which indicates that only a comparison is to be generated. */ static void emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode, enum rtx_code comparison, int unsignedp, rtx label) { rtx test = gen_rtx_fmt_ee (comparison, mode, x, y); enum mode_class class = GET_MODE_CLASS (mode); enum machine_mode wider_mode = mode; /* Try combined insns first. */ do { enum insn_code icode; PUT_MODE (test, wider_mode); if (label) { icode = cbranch_optab->handlers[(int) wider_mode].insn_code; if (icode != CODE_FOR_nothing && (*insn_data[icode].operand[0].predicate) (test, wider_mode)) { x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp); y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp); emit_jump_insn (GEN_FCN (icode) (test, x, y, label)); return; } } /* Handle some compares against zero. */ icode = (int) tst_optab->handlers[(int) wider_mode].insn_code; if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing) { x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp); emit_insn (GEN_FCN (icode) (x)); if (label) emit_jump_insn ((*bcc_gen_fctn[(int) comparison]) (label)); return; } /* Handle compares for which there is a directly suitable insn. */ icode = (int) cmp_optab->handlers[(int) wider_mode].insn_code; if (icode != CODE_FOR_nothing) { x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp); y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp); emit_insn (GEN_FCN (icode) (x, y)); if (label) emit_jump_insn ((*bcc_gen_fctn[(int) comparison]) (label)); return; } if (class != MODE_INT && class != MODE_FLOAT && class != MODE_COMPLEX_FLOAT) break; wider_mode = GET_MODE_WIDER_MODE (wider_mode); } while (wider_mode != VOIDmode); abort (); } /* Generate code to compare X with Y so that the condition codes are set and to jump to LABEL if the condition is true. If X is a constant and Y is not a constant, then the comparison is swapped to ensure that the comparison RTL has the canonical form. UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select the proper branch condition code. If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y. MODE is the mode of the inputs (in case they are const_int). COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will be passed unchanged to emit_cmp_insn, then potentially converted into an unsigned variant based on UNSIGNEDP to select a proper jump instruction. */ void emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size, enum machine_mode mode, int unsignedp, rtx label) { rtx op0 = x, op1 = y; /* Swap operands and condition to ensure canonical RTL. */ if (swap_commutative_operands_p (x, y)) { /* If we're not emitting a branch, this means some caller is out of sync. */ if (! label) abort (); op0 = y, op1 = x; comparison = swap_condition (comparison); } #ifdef HAVE_cc0 /* If OP0 is still a constant, then both X and Y must be constants. Force X into a register to avoid aborting in emit_cmp_insn due to non-canonical RTL. */ if (CONSTANT_P (op0)) op0 = force_reg (mode, op0); #endif emit_queue (); if (unsignedp) comparison = unsigned_condition (comparison); prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp, ccp_jump); emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label); } /* Like emit_cmp_and_jump_insns, but generate only the comparison. */ void emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size, enum machine_mode mode, int unsignedp) { emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0); } /* Emit a library call comparison between floating point X and Y. COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */ static void prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison, enum machine_mode *pmode, int *punsignedp) { enum rtx_code comparison = *pcomparison; enum rtx_code swapped = swap_condition (comparison); rtx x = protect_from_queue (*px, 0); rtx y = protect_from_queue (*py, 0); enum machine_mode orig_mode = GET_MODE (x); enum machine_mode mode; rtx value, target, insns, equiv; rtx libfunc = 0; for (mode = orig_mode; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode)) { if ((libfunc = code_to_optab[comparison]->handlers[mode].libfunc)) break; if ((libfunc = code_to_optab[swapped]->handlers[mode].libfunc)) { rtx tmp; tmp = x; x = y; y = tmp; comparison = swapped; break; } } if (mode == VOIDmode) abort (); if (mode != orig_mode) { x = convert_to_mode (mode, x, 0); y = convert_to_mode (mode, y, 0); } /* Attach a REG_EQUAL note describing the semantics of the libcall to the RTL. The allows the RTL optimizers to delete the libcall if the condition can be determined at compile-time. */ if (comparison == UNORDERED) { rtx temp = simplify_gen_relational (NE, word_mode, mode, x, x); equiv = simplify_gen_relational (NE, word_mode, mode, y, y); equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode, temp, const_true_rtx, equiv); } else { equiv = simplify_gen_relational (comparison, word_mode, mode, x, y); if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison)) { rtx true_rtx, false_rtx; switch (comparison) { case EQ: true_rtx = const0_rtx; false_rtx = const_true_rtx; break; case NE: true_rtx = const_true_rtx; false_rtx = const0_rtx; break; case GT: true_rtx = const1_rtx; false_rtx = const0_rtx; break; case GE: true_rtx = const0_rtx; false_rtx = constm1_rtx; break; case LT: true_rtx = constm1_rtx; false_rtx = const0_rtx; break; case LE: true_rtx = const0_rtx; false_rtx = const1_rtx; break; default: abort (); } equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode, equiv, true_rtx, false_rtx); } } start_sequence (); value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, word_mode, 2, x, mode, y, mode); insns = get_insns (); end_sequence (); target = gen_reg_rtx (word_mode); emit_libcall_block (insns, target, value, equiv); if (comparison == UNORDERED || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison)) comparison = NE; *px = target; *py = const0_rtx; *pmode = word_mode; *pcomparison = comparison; *punsignedp = 0; } /* Generate code to indirectly jump to a location given in the rtx LOC. */ void emit_indirect_jump (rtx loc) { if (! ((*insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate) (loc, Pmode))) loc = copy_to_mode_reg (Pmode, loc); emit_jump_insn (gen_indirect_jump (loc)); emit_barrier (); } #ifdef HAVE_conditional_move /* Emit a conditional move instruction if the machine supports one for that condition and machine mode. OP0 and OP1 are the operands that should be compared using CODE. CMODE is the mode to use should they be constants. If it is VOIDmode, they cannot both be constants. OP2 should be stored in TARGET if the comparison is true, otherwise OP3 should be stored there. MODE is the mode to use should they be constants. If it is VOIDmode, they cannot both be constants. The result is either TARGET (perhaps modified) or NULL_RTX if the operation is not supported. */ rtx emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1, enum machine_mode cmode, rtx op2, rtx op3, enum machine_mode mode, int unsignedp) { rtx tem, subtarget, comparison, insn; enum insn_code icode; enum rtx_code reversed; /* If one operand is constant, make it the second one. Only do this if the other operand is not constant as well. */ if (swap_commutative_operands_p (op0, op1)) { tem = op0; op0 = op1; op1 = tem; code = swap_condition (code); } /* get_condition will prefer to generate LT and GT even if the old comparison was against zero, so undo that canonicalization here since comparisons against zero are cheaper. */ if (code == LT && op1 == const1_rtx) code = LE, op1 = const0_rtx; else if (code == GT && op1 == constm1_rtx) code = GE, op1 = const0_rtx; if (cmode == VOIDmode) cmode = GET_MODE (op0); if (swap_commutative_operands_p (op2, op3) && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL)) != UNKNOWN)) { tem = op2; op2 = op3; op3 = tem; code = reversed; } if (mode == VOIDmode) mode = GET_MODE (op2); icode = movcc_gen_code[mode]; if (icode == CODE_FOR_nothing) return 0; if (flag_force_mem) { op2 = force_not_mem (op2); op3 = force_not_mem (op3); } if (target) target = protect_from_queue (target, 1); else target = gen_reg_rtx (mode); subtarget = target; emit_queue (); op2 = protect_from_queue (op2, 0); op3 = protect_from_queue (op3, 0); /* If the insn doesn't accept these operands, put them in pseudos. */ if (! (*insn_data[icode].operand[0].predicate) (subtarget, insn_data[icode].operand[0].mode)) subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode); if (! (*insn_data[icode].operand[2].predicate) (op2, insn_data[icode].operand[2].mode)) op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2); if (! (*insn_data[icode].operand[3].predicate) (op3, insn_data[icode].operand[3].mode)) op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3); /* Everything should now be in the suitable form, so emit the compare insn and then the conditional move. */ comparison = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX); /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */ /* We can get const0_rtx or const_true_rtx in some circumstances. Just return NULL and let the caller figure out how best to deal with this situation. */ if (GET_CODE (comparison) != code) return NULL_RTX; insn = GEN_FCN (icode) (subtarget, comparison, op2, op3); /* If that failed, then give up. */ if (insn == 0) return 0; emit_insn (insn); if (subtarget != target) convert_move (target, subtarget, 0); return target; } /* Return nonzero if a conditional move of mode MODE is supported. This function is for combine so it can tell whether an insn that looks like a conditional move is actually supported by the hardware. If we guess wrong we lose a bit on optimization, but that's it. */ /* ??? sparc64 supports conditionally moving integers values based on fp comparisons, and vice versa. How do we handle them? */ int can_conditionally_move_p (enum machine_mode mode) { if (movcc_gen_code[mode] != CODE_FOR_nothing) return 1; return 0; } #endif /* HAVE_conditional_move */ /* Emit a conditional addition instruction if the machine supports one for that condition and machine mode. OP0 and OP1 are the operands that should be compared using CODE. CMODE is the mode to use should they be constants. If it is VOIDmode, they cannot both be constants. OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3 should be stored there. MODE is the mode to use should they be constants. If it is VOIDmode, they cannot both be constants. The result is either TARGET (perhaps modified) or NULL_RTX if the operation is not supported. */ rtx emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1, enum machine_mode cmode, rtx op2, rtx op3, enum machine_mode mode, int unsignedp) { rtx tem, subtarget, comparison, insn; enum insn_code icode; enum rtx_code reversed; /* If one operand is constant, make it the second one. Only do this if the other operand is not constant as well. */ if (swap_commutative_operands_p (op0, op1)) { tem = op0; op0 = op1; op1 = tem; code = swap_condition (code); } /* get_condition will prefer to generate LT and GT even if the old comparison was against zero, so undo that canonicalization here since comparisons against zero are cheaper. */ if (code == LT && op1 == const1_rtx) code = LE, op1 = const0_rtx; else if (code == GT && op1 == constm1_rtx) code = GE, op1 = const0_rtx; if (cmode == VOIDmode) cmode = GET_MODE (op0); if (swap_commutative_operands_p (op2, op3) && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL)) != UNKNOWN)) { tem = op2; op2 = op3; op3 = tem; code = reversed; } if (mode == VOIDmode) mode = GET_MODE (op2); icode = addcc_optab->handlers[(int) mode].insn_code; if (icode == CODE_FOR_nothing) return 0; if (flag_force_mem) { op2 = force_not_mem (op2); op3 = force_not_mem (op3); } if (target) target = protect_from_queue (target, 1); else target = gen_reg_rtx (mode); subtarget = target; emit_queue (); op2 = protect_from_queue (op2, 0); op3 = protect_from_queue (op3, 0); /* If the insn doesn't accept these operands, put them in pseudos. */ if (! (*insn_data[icode].operand[0].predicate) (subtarget, insn_data[icode].operand[0].mode)) subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode); if (! (*insn_data[icode].operand[2].predicate) (op2, insn_data[icode].operand[2].mode)) op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2); if (! (*insn_data[icode].operand[3].predicate) (op3, insn_data[icode].operand[3].mode)) op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3); /* Everything should now be in the suitable form, so emit the compare insn and then the conditional move. */ comparison = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX); /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */ /* We can get const0_rtx or const_true_rtx in some circumstances. Just return NULL and let the caller figure out how best to deal with this situation. */ if (GET_CODE (comparison) != code) return NULL_RTX; insn = GEN_FCN (icode) (subtarget, comparison, op2, op3); /* If that failed, then give up. */ if (insn == 0) return 0; emit_insn (insn); if (subtarget != target) convert_move (target, subtarget, 0); return target; } /* These functions attempt to generate an insn body, rather than emitting the insn, but if the gen function already emits them, we make no attempt to turn them back into naked patterns. They do not protect from queued increments, because they may be used 1) in protect_from_queue itself and 2) in other passes where there is no queue. */ /* Generate and return an insn body to add Y to X. */ rtx gen_add2_insn (rtx x, rtx y) { int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code; if (! ((*insn_data[icode].operand[0].predicate) (x, insn_data[icode].operand[0].mode)) || ! ((*insn_data[icode].operand[1].predicate) (x, insn_data[icode].operand[1].mode)) || ! ((*insn_data[icode].operand[2].predicate) (y, insn_data[icode].operand[2].mode))) abort (); return (GEN_FCN (icode) (x, x, y)); } /* Generate and return an insn body to add r1 and c, storing the result in r0. */ rtx gen_add3_insn (rtx r0, rtx r1, rtx c) { int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code; if (icode == CODE_FOR_nothing || ! ((*insn_data[icode].operand[0].predicate) (r0, insn_data[icode].operand[0].mode)) || ! ((*insn_data[icode].operand[1].predicate) (r1, insn_data[icode].operand[1].mode)) || ! ((*insn_data[icode].operand[2].predicate) (c, insn_data[icode].operand[2].mode))) return NULL_RTX; return (GEN_FCN (icode) (r0, r1, c)); } int have_add2_insn (rtx x, rtx y) { int icode; if (GET_MODE (x) == VOIDmode) abort (); icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code; if (icode == CODE_FOR_nothing) return 0; if (! ((*insn_data[icode].operand[0].predicate) (x, insn_data[icode].operand[0].mode)) || ! ((*insn_data[icode].operand[1].predicate) (x, insn_data[icode].operand[1].mode)) || ! ((*insn_data[icode].operand[2].predicate) (y, insn_data[icode].operand[2].mode))) return 0; return 1; } /* Generate and return an insn body to subtract Y from X. */ rtx gen_sub2_insn (rtx x, rtx y) { int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code; if (! ((*insn_data[icode].operand[0].predicate) (x, insn_data[icode].operand[0].mode)) || ! ((*insn_data[icode].operand[1].predicate) (x, insn_data[icode].operand[1].mode)) || ! ((*insn_data[icode].operand[2].predicate) (y, insn_data[icode].operand[2].mode))) abort (); return (GEN_FCN (icode) (x, x, y)); } /* Generate and return an insn body to subtract r1 and c, storing the result in r0. */ rtx gen_sub3_insn (rtx r0, rtx r1, rtx c) { int icode = (int) sub_optab->handlers[(int) GET_MODE (r0)].insn_code; if (icode == CODE_FOR_nothing || ! ((*insn_data[icode].operand[0].predicate) (r0, insn_data[icode].operand[0].mode)) || ! ((*insn_data[icode].operand[1].predicate) (r1, insn_data[icode].operand[1].mode)) || ! ((*insn_data[icode].operand[2].predicate) (c, insn_data[icode].operand[2].mode))) return NULL_RTX; return (GEN_FCN (icode) (r0, r1, c)); } int have_sub2_insn (rtx x, rtx y) { int icode; if (GET_MODE (x) == VOIDmode) abort (); icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code; if (icode == CODE_FOR_nothing) return 0; if (! ((*insn_data[icode].operand[0].predicate) (x, insn_data[icode].operand[0].mode)) || ! ((*insn_data[icode].operand[1].predicate) (x, insn_data[icode].operand[1].mode)) || ! ((*insn_data[icode].operand[2].predicate) (y, insn_data[icode].operand[2].mode))) return 0; return 1; } /* Generate the body of an instruction to copy Y into X. It may be a list of insns, if one insn isn't enough. */ rtx gen_move_insn (rtx x, rtx y) { rtx seq; start_sequence (); emit_move_insn_1 (x, y); seq = get_insns (); end_sequence (); return seq; } /* Return the insn code used to extend FROM_MODE to TO_MODE. UNSIGNEDP specifies zero-extension instead of sign-extension. If no such operation exists, CODE_FOR_nothing will be returned. */ enum insn_code can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode, int unsignedp) { convert_optab tab; #ifdef HAVE_ptr_extend if (unsignedp < 0) return CODE_FOR_ptr_extend; #endif tab = unsignedp ? zext_optab : sext_optab; return tab->handlers[to_mode][from_mode].insn_code; } /* Generate the body of an insn to extend Y (with mode MFROM) into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */ rtx gen_extend_insn (rtx x, rtx y, enum machine_mode mto, enum machine_mode mfrom, int unsignedp) { enum insn_code icode = can_extend_p (mto, mfrom, unsignedp); return GEN_FCN (icode) (x, y); } /* can_fix_p and can_float_p say whether the target machine can directly convert a given fixed point type to a given floating point type, or vice versa. The returned value is the CODE_FOR_... value to use, or CODE_FOR_nothing if these modes cannot be directly converted. *TRUNCP_PTR is set to 1 if it is necessary to output an explicit FTRUNC insn before the fix insn; otherwise 0. */ static enum insn_code can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode, int unsignedp, int *truncp_ptr) { convert_optab tab; enum insn_code icode; tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab; icode = tab->handlers[fixmode][fltmode].insn_code; if (icode != CODE_FOR_nothing) { *truncp_ptr = 0; return icode; } /* FIXME: This requires a port to define both FIX and FTRUNC pattern for this to work. We need to rework the fix* and ftrunc* patterns and documentation. */ tab = unsignedp ? ufix_optab : sfix_optab; icode = tab->handlers[fixmode][fltmode].insn_code; if (icode != CODE_FOR_nothing && ftrunc_optab->handlers[fltmode].insn_code != CODE_FOR_nothing) { *truncp_ptr = 1; return icode; } *truncp_ptr = 0; return CODE_FOR_nothing; } static enum insn_code can_float_p (enum machine_mode fltmode, enum machine_mode fixmode, int unsignedp) { convert_optab tab; tab = unsignedp ? ufloat_optab : sfloat_optab; return tab->handlers[fltmode][fixmode].insn_code; } /* Generate code to convert FROM to floating point and store in TO. FROM must be fixed point and not VOIDmode. UNSIGNEDP nonzero means regard FROM as unsigned. Normally this is done by correcting the final value if it is negative. */ void expand_float (rtx to, rtx from, int unsignedp) { enum insn_code icode; rtx target = to; enum machine_mode fmode, imode; /* Crash now, because we won't be able to decide which mode to use. */ if (GET_MODE (from) == VOIDmode) abort (); /* Look for an insn to do the conversion. Do it in the specified modes if possible; otherwise convert either input, output or both to wider mode. If the integer mode is wider than the mode of FROM, we can do the conversion signed even if the input is unsigned. */ for (fmode = GET_MODE (to); fmode != VOIDmode; fmode = GET_MODE_WIDER_MODE (fmode)) for (imode = GET_MODE (from); imode != VOIDmode; imode = GET_MODE_WIDER_MODE (imode)) { int doing_unsigned = unsignedp; if (fmode != GET_MODE (to) && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from))) continue; icode = can_float_p (fmode, imode, unsignedp); if (icode == CODE_FOR_nothing && imode != GET_MODE (from) && unsignedp) icode = can_float_p (fmode, imode, 0), doing_unsigned = 0; if (icode != CODE_FOR_nothing) { to = protect_from_queue (to, 1); from = protect_from_queue (from, 0); if (imode != GET_MODE (from)) from = convert_to_mode (imode, from, unsignedp); if (fmode != GET_MODE (to)) target = gen_reg_rtx (fmode); emit_unop_insn (icode, target, from, doing_unsigned ? UNSIGNED_FLOAT : FLOAT); if (target != to) convert_move (to, target, 0); return; } } /* Unsigned integer, and no way to convert directly. Convert as signed, then conditionally adjust the result. */ if (unsignedp) { rtx label = gen_label_rtx (); rtx temp; REAL_VALUE_TYPE offset; emit_queue (); to = protect_from_queue (to, 1); from = protect_from_queue (from, 0); if (flag_force_mem) from = force_not_mem (from); /* Look for a usable floating mode FMODE wider than the source and at least as wide as the target. Using FMODE will avoid rounding woes with unsigned values greater than the signed maximum value. */ for (fmode = GET_MODE (to); fmode != VOIDmode; fmode = GET_MODE_WIDER_MODE (fmode)) if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode) && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing) break; if (fmode == VOIDmode) { /* There is no such mode. Pretend the target is wide enough. */ fmode = GET_MODE (to); /* Avoid double-rounding when TO is narrower than FROM. */ if ((significand_size (fmode) + 1) < GET_MODE_BITSIZE (GET_MODE (from))) { rtx temp1; rtx neglabel = gen_label_rtx (); /* Don't use TARGET if it isn't a register, is a hard register, or is the wrong mode. */ if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER || GET_MODE (target) != fmode) target = gen_reg_rtx (fmode); imode = GET_MODE (from); do_pending_stack_adjust (); /* Test whether the sign bit is set. */ emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode, 0, neglabel); /* The sign bit is not set. Convert as signed. */ expand_float (target, from, 0); emit_jump_insn (gen_jump (label)); emit_barrier (); /* The sign bit is set. Convert to a usable (positive signed) value by shifting right one bit, while remembering if a nonzero bit was shifted out; i.e., compute (from & 1) | (from >> 1). */ emit_label (neglabel); temp = expand_binop (imode, and_optab, from, const1_rtx, NULL_RTX, 1, OPTAB_LIB_WIDEN); temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node, NULL_RTX, 1); temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1, OPTAB_LIB_WIDEN); expand_float (target, temp, 0); /* Multiply by 2 to undo the shift above. */ temp = expand_binop (fmode, add_optab, target, target, target, 0, OPTAB_LIB_WIDEN); if (temp != target) emit_move_insn (target, temp); do_pending_stack_adjust (); emit_label (label); goto done; } } /* If we are about to do some arithmetic to correct for an unsigned operand, do it in a pseudo-register. */ if (GET_MODE (to) != fmode || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER) target = gen_reg_rtx (fmode); /* Convert as signed integer to floating. */ expand_float (target, from, 0); /* If FROM is negative (and therefore TO is negative), correct its value by 2**bitwidth. */ do_pending_stack_adjust (); emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from), 0, label); real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from))); temp = expand_binop (fmode, add_optab, target, CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode), target, 0, OPTAB_LIB_WIDEN); if (temp != target) emit_move_insn (target, temp); do_pending_stack_adjust (); emit_label (label); goto done; } /* No hardware instruction available; call a library routine. */ { rtx libfunc; rtx insns; rtx value; convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab; to = protect_from_queue (to, 1); from = protect_from_queue (from, 0); if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode)) from = convert_to_mode (SImode, from, unsignedp); if (flag_force_mem) from = force_not_mem (from); libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc; if (!libfunc) abort (); start_sequence (); value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, GET_MODE (to), 1, from, GET_MODE (from)); insns = get_insns (); end_sequence (); emit_libcall_block (insns, target, value, gen_rtx_FLOAT (GET_MODE (to), from)); } done: /* Copy result to requested destination if we have been computing in a temp location. */ if (target != to) { if (GET_MODE (target) == GET_MODE (to)) emit_move_insn (to, target); else convert_move (to, target, 0); } } /* Generate code to convert FROM to fixed point and store in TO. FROM must be floating point. */ void expand_fix (rtx to, rtx from, int unsignedp) { enum insn_code icode; rtx target = to; enum machine_mode fmode, imode; int must_trunc = 0; /* We first try to find a pair of modes, one real and one integer, at least as wide as FROM and TO, respectively, in which we can open-code this conversion. If the integer mode is wider than the mode of TO, we can do the conversion either signed or unsigned. */ for (fmode = GET_MODE (from); fmode != VOIDmode; fmode = GET_MODE_WIDER_MODE (fmode)) for (imode = GET_MODE (to); imode != VOIDmode; imode = GET_MODE_WIDER_MODE (imode)) { int doing_unsigned = unsignedp; icode = can_fix_p (imode, fmode, unsignedp, &must_trunc); if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp) icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0; if (icode != CODE_FOR_nothing) { to = protect_from_queue (to, 1); from = protect_from_queue (from, 0); if (fmode != GET_MODE (from)) from = convert_to_mode (fmode, from, 0); if (must_trunc) { rtx temp = gen_reg_rtx (GET_MODE (from)); from = expand_unop (GET_MODE (from), ftrunc_optab, from, temp, 0); } if (imode != GET_MODE (to)) target = gen_reg_rtx (imode); emit_unop_insn (icode, target, from, doing_unsigned ? UNSIGNED_FIX : FIX); if (target != to) convert_move (to, target, unsignedp); return; } } /* For an unsigned conversion, there is one more way to do it. If we have a signed conversion, we generate code that compares the real value to the largest representable positive number. If if is smaller, the conversion is done normally. Otherwise, subtract one plus the highest signed number, convert, and add it back. We only need to check all real modes, since we know we didn't find anything with a wider integer mode. This code used to extend FP value into mode wider than the destination. This is not needed. Consider, for instance conversion from SFmode into DImode. The hot path trought the code is dealing with inputs smaller than 2^63 and doing just the conversion, so there is no bits to lose. In the other path we know the value is positive in the range 2^63..2^64-1 inclusive. (as for other imput overflow happens and result is undefined) So we know that the most important bit set in mantissa corresponds to 2^63. The subtraction of 2^63 should not generate any rounding as it simply clears out that bit. The rest is trivial. */ if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT) for (fmode = GET_MODE (from); fmode != VOIDmode; fmode = GET_MODE_WIDER_MODE (fmode)) if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0, &must_trunc)) { int bitsize; REAL_VALUE_TYPE offset; rtx limit, lab1, lab2, insn; bitsize = GET_MODE_BITSIZE (GET_MODE (to)); real_2expN (&offset, bitsize - 1); limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode); lab1 = gen_label_rtx (); lab2 = gen_label_rtx (); emit_queue (); to = protect_from_queue (to, 1); from = protect_from_queue (from, 0); if (flag_force_mem) from = force_not_mem (from); if (fmode != GET_MODE (from)) from = convert_to_mode (fmode, from, 0); /* See if we need to do the subtraction. */ do_pending_stack_adjust (); emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from), 0, lab1); /* If not, do the signed "fix" and branch around fixup code. */ expand_fix (to, from, 0); emit_jump_insn (gen_jump (lab2)); emit_barrier (); /* Otherwise, subtract 2**(N-1), convert to signed number, then add 2**(N-1). Do the addition using XOR since this will often generate better code. */ emit_label (lab1); target = expand_binop (GET_MODE (from), sub_optab, from, limit, NULL_RTX, 0, OPTAB_LIB_WIDEN); expand_fix (to, target, 0); target = expand_binop (GET_MODE (to), xor_optab, to, gen_int_mode ((HOST_WIDE_INT) 1 << (bitsize - 1), GET_MODE (to)), to, 1, OPTAB_LIB_WIDEN); if (target != to) emit_move_insn (to, target); emit_label (lab2); if (mov_optab->handlers[(int) GET_MODE (to)].insn_code != CODE_FOR_nothing) { /* Make a place for a REG_NOTE and add it. */ insn = emit_move_insn (to, to); set_unique_reg_note (insn, REG_EQUAL, gen_rtx_fmt_e (UNSIGNED_FIX, GET_MODE (to), copy_rtx (from))); } return; } /* We can't do it with an insn, so use a library call. But first ensure that the mode of TO is at least as wide as SImode, since those are the only library calls we know about. */ if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode)) { target = gen_reg_rtx (SImode); expand_fix (target, from, unsignedp); } else { rtx insns; rtx value; rtx libfunc; convert_optab tab = unsignedp ? ufix_optab : sfix_optab; libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc; if (!libfunc) abort (); to = protect_from_queue (to, 1); from = protect_from_queue (from, 0); if (flag_force_mem) from = force_not_mem (from); start_sequence (); value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, GET_MODE (to), 1, from, GET_MODE (from)); insns = get_insns (); end_sequence (); emit_libcall_block (insns, target, value, gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX, GET_MODE (to), from)); } if (target != to) { if (GET_MODE (to) == GET_MODE (target)) emit_move_insn (to, target); else convert_move (to, target, 0); } } /* Report whether we have an instruction to perform the operation specified by CODE on operands of mode MODE. */ int have_insn_for (enum rtx_code code, enum machine_mode mode) { return (code_to_optab[(int) code] != 0 && (code_to_optab[(int) code]->handlers[(int) mode].insn_code != CODE_FOR_nothing)); } /* Create a blank optab. */ static optab new_optab (void) { int i; optab op = ggc_alloc (sizeof (struct optab)); for (i = 0; i < NUM_MACHINE_MODES; i++) { op->handlers[i].insn_code = CODE_FOR_nothing; op->handlers[i].libfunc = 0; } return op; } static convert_optab new_convert_optab (void) { int i, j; convert_optab op = ggc_alloc (sizeof (struct convert_optab)); for (i = 0; i < NUM_MACHINE_MODES; i++) for (j = 0; j < NUM_MACHINE_MODES; j++) { op->handlers[i][j].insn_code = CODE_FOR_nothing; op->handlers[i][j].libfunc = 0; } return op; } /* Same, but fill in its code as CODE, and write it into the code_to_optab table. */ static inline optab init_optab (enum rtx_code code) { optab op = new_optab (); op->code = code; code_to_optab[(int) code] = op; return op; } /* Same, but fill in its code as CODE, and do _not_ write it into the code_to_optab table. */ static inline optab init_optabv (enum rtx_code code) { optab op = new_optab (); op->code = code; return op; } /* Conversion optabs never go in the code_to_optab table. */ static inline convert_optab init_convert_optab (enum rtx_code code) { convert_optab op = new_convert_optab (); op->code = code; return op; } /* Initialize the libfunc fields of an entire group of entries in some optab. Each entry is set equal to a string consisting of a leading pair of underscores followed by a generic operation name followed by a mode name (downshifted to lowercase) followed by a single character representing the number of operands for the given operation (which is usually one of the characters '2', '3', or '4'). OPTABLE is the table in which libfunc fields are to be initialized. FIRST_MODE is the first machine mode index in the given optab to initialize. LAST_MODE is the last machine mode index in the given optab to initialize. OPNAME is the generic (string) name of the operation. SUFFIX is the character which specifies the number of operands for the given generic operation. */ static void init_libfuncs (optab optable, int first_mode, int last_mode, const char *opname, int suffix) { int mode; unsigned opname_len = strlen (opname); for (mode = first_mode; (int) mode <= (int) last_mode; mode = (enum machine_mode) ((int) mode + 1)) { const char *mname = GET_MODE_NAME (mode); unsigned mname_len = strlen (mname); char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1); char *p; const char *q; p = libfunc_name; *p++ = '_'; *p++ = '_'; for (q = opname; *q; ) *p++ = *q++; for (q = mname; *q; q++) *p++ = TOLOWER (*q); *p++ = suffix; *p = '\0'; optable->handlers[(int) mode].libfunc = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name)); } } /* Initialize the libfunc fields of an entire group of entries in some optab which correspond to all integer mode operations. The parameters have the same meaning as similarly named ones for the `init_libfuncs' routine. (See above). */ static void init_integral_libfuncs (optab optable, const char *opname, int suffix) { int maxsize = 2*BITS_PER_WORD; if (maxsize < LONG_LONG_TYPE_SIZE) maxsize = LONG_LONG_TYPE_SIZE; init_libfuncs (optable, word_mode, mode_for_size (maxsize, MODE_INT, 0), opname, suffix); } /* Initialize the libfunc fields of an entire group of entries in some optab which correspond to all real mode operations. The parameters have the same meaning as similarly named ones for the `init_libfuncs' routine. (See above). */ static void init_floating_libfuncs (optab optable, const char *opname, int suffix) { init_libfuncs (optable, MIN_MODE_FLOAT, MAX_MODE_FLOAT, opname, suffix); } /* Initialize the libfunc fields of an entire group of entries of an inter-mode-class conversion optab. The string formation rules are similar to the ones for init_libfuncs, above, but instead of having a mode name and an operand count these functions have two mode names and no operand count. */ static void init_interclass_conv_libfuncs (convert_optab tab, const char *opname, enum mode_class from_class, enum mode_class to_class) { enum machine_mode first_from_mode = GET_CLASS_NARROWEST_MODE (from_class); enum machine_mode first_to_mode = GET_CLASS_NARROWEST_MODE (to_class); size_t opname_len = strlen (opname); size_t max_mname_len = 0; enum machine_mode fmode, tmode; const char *fname, *tname; const char *q; char *libfunc_name, *suffix; char *p; for (fmode = first_from_mode; fmode != VOIDmode; fmode = GET_MODE_WIDER_MODE (fmode)) max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (fmode))); for (tmode = first_to_mode; tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode)) max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (tmode))); libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1); libfunc_name[0] = '_'; libfunc_name[1] = '_'; memcpy (&libfunc_name[2], opname, opname_len); suffix = libfunc_name + opname_len + 2; for (fmode = first_from_mode; fmode != VOIDmode; fmode = GET_MODE_WIDER_MODE (fmode)) for (tmode = first_to_mode; tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode)) { fname = GET_MODE_NAME (fmode); tname = GET_MODE_NAME (tmode); p = suffix; for (q = fname; *q; p++, q++) *p = TOLOWER (*q); for (q = tname; *q; p++, q++) *p = TOLOWER (*q); *p = '\0'; tab->handlers[tmode][fmode].libfunc = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name)); } } /* Initialize the libfunc fields of an entire group of entries of an intra-mode-class conversion optab. The string formation rules are similar to the ones for init_libfunc, above. WIDENING says whether the optab goes from narrow to wide modes or vice versa. These functions have two mode names _and_ an operand count. */ static void init_intraclass_conv_libfuncs (convert_optab tab, const char *opname, enum mode_class class, bool widening) { enum machine_mode first_mode = GET_CLASS_NARROWEST_MODE (class); size_t opname_len = strlen (opname); size_t max_mname_len = 0; enum machine_mode nmode, wmode; const char *nname, *wname; const char *q; char *libfunc_name, *suffix; char *p; for (nmode = first_mode; nmode != VOIDmode; nmode = GET_MODE_WIDER_MODE (nmode)) max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (nmode))); libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1); libfunc_name[0] = '_'; libfunc_name[1] = '_'; memcpy (&libfunc_name[2], opname, opname_len); suffix = libfunc_name + opname_len + 2; for (nmode = first_mode; nmode != VOIDmode; nmode = GET_MODE_WIDER_MODE (nmode)) for (wmode = GET_MODE_WIDER_MODE (nmode); wmode != VOIDmode; wmode = GET_MODE_WIDER_MODE (wmode)) { nname = GET_MODE_NAME (nmode); wname = GET_MODE_NAME (wmode); p = suffix; for (q = widening ? nname : wname; *q; p++, q++) *p = TOLOWER (*q); for (q = widening ? wname : nname; *q; p++, q++) *p = TOLOWER (*q); *p++ = '2'; *p = '\0'; tab->handlers[widening ? wmode : nmode] [widening ? nmode : wmode].libfunc = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name)); } } rtx init_one_libfunc (const char *name) { rtx symbol; /* Create a FUNCTION_DECL that can be passed to targetm.encode_section_info. */ /* ??? We don't have any type information except for this is a function. Pretend this is "int foo()". */ tree decl = build_decl (FUNCTION_DECL, get_identifier (name), build_function_type (integer_type_node, NULL_TREE)); DECL_ARTIFICIAL (decl) = 1; DECL_EXTERNAL (decl) = 1; TREE_PUBLIC (decl) = 1; symbol = XEXP (DECL_RTL (decl), 0); /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with are the flags assigned by targetm.encode_section_info. */ SYMBOL_REF_DECL (symbol) = 0; return symbol; } /* Call this to reset the function entry for one optab (OPTABLE) in mode MODE to NAME, which should be either 0 or a string constant. */ void set_optab_libfunc (optab optable, enum machine_mode mode, const char *name) { if (name) optable->handlers[mode].libfunc = init_one_libfunc (name); else optable->handlers[mode].libfunc = 0; } /* Call this to reset the function entry for one conversion optab (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be either 0 or a string constant. */ void set_conv_libfunc (convert_optab optable, enum machine_mode tmode, enum machine_mode fmode, const char *name) { if (name) optable->handlers[tmode][fmode].libfunc = init_one_libfunc (name); else optable->handlers[tmode][fmode].libfunc = 0; } /* Call this once to initialize the contents of the optabs appropriately for the current target machine. */ void init_optabs (void) { unsigned int i; /* Start by initializing all tables to contain CODE_FOR_nothing. */ for (i = 0; i < NUM_RTX_CODE; i++) setcc_gen_code[i] = CODE_FOR_nothing; #ifdef HAVE_conditional_move for (i = 0; i < NUM_MACHINE_MODES; i++) movcc_gen_code[i] = CODE_FOR_nothing; #endif add_optab = init_optab (PLUS); addv_optab = init_optabv (PLUS); sub_optab = init_optab (MINUS); subv_optab = init_optabv (MINUS); smul_optab = init_optab (MULT); smulv_optab = init_optabv (MULT); smul_highpart_optab = init_optab (UNKNOWN); umul_highpart_optab = init_optab (UNKNOWN); smul_widen_optab = init_optab (UNKNOWN); umul_widen_optab = init_optab (UNKNOWN); sdiv_optab = init_optab (DIV); sdivv_optab = init_optabv (DIV); sdivmod_optab = init_optab (UNKNOWN); udiv_optab = init_optab (UDIV); udivmod_optab = init_optab (UNKNOWN); smod_optab = init_optab (MOD); umod_optab = init_optab (UMOD); fmod_optab = init_optab (UNKNOWN); drem_optab = init_optab (UNKNOWN); ftrunc_optab = init_optab (UNKNOWN); and_optab = init_optab (AND); ior_optab = init_optab (IOR); xor_optab = init_optab (XOR); ashl_optab = init_optab (ASHIFT); ashr_optab = init_optab (ASHIFTRT); lshr_optab = init_optab (LSHIFTRT); rotl_optab = init_optab (ROTATE); rotr_optab = init_optab (ROTATERT); smin_optab = init_optab (SMIN); smax_optab = init_optab (SMAX); umin_optab = init_optab (UMIN); umax_optab = init_optab (UMAX); pow_optab = init_optab (UNKNOWN); atan2_optab = init_optab (UNKNOWN); /* These three have codes assigned exclusively for the sake of have_insn_for. */ mov_optab = init_optab (SET); movstrict_optab = init_optab (STRICT_LOW_PART); cmp_optab = init_optab (COMPARE); ucmp_optab = init_optab (UNKNOWN); tst_optab = init_optab (UNKNOWN); eq_optab = init_optab (EQ); ne_optab = init_optab (NE); gt_optab = init_optab (GT); ge_optab = init_optab (GE); lt_optab = init_optab (LT); le_optab = init_optab (LE); unord_optab = init_optab (UNORDERED); neg_optab = init_optab (NEG); negv_optab = init_optabv (NEG); abs_optab = init_optab (ABS); absv_optab = init_optabv (ABS); addcc_optab = init_optab (UNKNOWN); one_cmpl_optab = init_optab (NOT); ffs_optab = init_optab (FFS); clz_optab = init_optab (CLZ); ctz_optab = init_optab (CTZ); popcount_optab = init_optab (POPCOUNT); parity_optab = init_optab (PARITY); sqrt_optab = init_optab (SQRT); floor_optab = init_optab (UNKNOWN); ceil_optab = init_optab (UNKNOWN); round_optab = init_optab (UNKNOWN); btrunc_optab = init_optab (UNKNOWN); nearbyint_optab = init_optab (UNKNOWN); sincos_optab = init_optab (UNKNOWN); sin_optab = init_optab (UNKNOWN); asin_optab = init_optab (UNKNOWN); cos_optab = init_optab (UNKNOWN); acos_optab = init_optab (UNKNOWN); exp_optab = init_optab (UNKNOWN); exp10_optab = init_optab (UNKNOWN); exp2_optab = init_optab (UNKNOWN); expm1_optab = init_optab (UNKNOWN); logb_optab = init_optab (UNKNOWN); ilogb_optab = init_optab (UNKNOWN); log_optab = init_optab (UNKNOWN); log10_optab = init_optab (UNKNOWN); log2_optab = init_optab (UNKNOWN); log1p_optab = init_optab (UNKNOWN); tan_optab = init_optab (UNKNOWN); atan_optab = init_optab (UNKNOWN); strlen_optab = init_optab (UNKNOWN); cbranch_optab = init_optab (UNKNOWN); cmov_optab = init_optab (UNKNOWN); cstore_optab = init_optab (UNKNOWN); push_optab = init_optab (UNKNOWN); vec_extract_optab = init_optab (UNKNOWN); vec_set_optab = init_optab (UNKNOWN); vec_init_optab = init_optab (UNKNOWN); /* Conversions. */ sext_optab = init_convert_optab (SIGN_EXTEND); zext_optab = init_convert_optab (ZERO_EXTEND); trunc_optab = init_convert_optab (TRUNCATE); sfix_optab = init_convert_optab (FIX); ufix_optab = init_convert_optab (UNSIGNED_FIX); sfixtrunc_optab = init_convert_optab (UNKNOWN); ufixtrunc_optab = init_convert_optab (UNKNOWN); sfloat_optab = init_convert_optab (FLOAT); ufloat_optab = init_convert_optab (UNSIGNED_FLOAT); for (i = 0; i < NUM_MACHINE_MODES; i++) { movstr_optab[i] = CODE_FOR_nothing; clrstr_optab[i] = CODE_FOR_nothing; cmpstr_optab[i] = CODE_FOR_nothing; cmpmem_optab[i] = CODE_FOR_nothing; #ifdef HAVE_SECONDARY_RELOADS reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing; #endif } /* Fill in the optabs with the insns we support. */ init_all_optabs (); /* Initialize the optabs with the names of the library functions. */ init_integral_libfuncs (add_optab, "add", '3'); init_floating_libfuncs (add_optab, "add", '3'); init_integral_libfuncs (addv_optab, "addv", '3'); init_floating_libfuncs (addv_optab, "add", '3'); init_integral_libfuncs (sub_optab, "sub", '3'); init_floating_libfuncs (sub_optab, "sub", '3'); init_integral_libfuncs (subv_optab, "subv", '3'); init_floating_libfuncs (subv_optab, "sub", '3'); init_integral_libfuncs (smul_optab, "mul", '3'); init_floating_libfuncs (smul_optab, "mul", '3'); init_integral_libfuncs (smulv_optab, "mulv", '3'); init_floating_libfuncs (smulv_optab, "mul", '3'); init_integral_libfuncs (sdiv_optab, "div", '3'); init_floating_libfuncs (sdiv_optab, "div", '3'); init_integral_libfuncs (sdivv_optab, "divv", '3'); init_integral_libfuncs (udiv_optab, "udiv", '3'); init_integral_libfuncs (sdivmod_optab, "divmod", '4'); init_integral_libfuncs (udivmod_optab, "udivmod", '4'); init_integral_libfuncs (smod_optab, "mod", '3'); init_integral_libfuncs (umod_optab, "umod", '3'); init_floating_libfuncs (ftrunc_optab, "ftrunc", '2'); init_integral_libfuncs (and_optab, "and", '3'); init_integral_libfuncs (ior_optab, "ior", '3'); init_integral_libfuncs (xor_optab, "xor", '3'); init_integral_libfuncs (ashl_optab, "ashl", '3'); init_integral_libfuncs (ashr_optab, "ashr", '3'); init_integral_libfuncs (lshr_optab, "lshr", '3'); init_integral_libfuncs (smin_optab, "min", '3'); init_floating_libfuncs (smin_optab, "min", '3'); init_integral_libfuncs (smax_optab, "max", '3'); init_floating_libfuncs (smax_optab, "max", '3'); init_integral_libfuncs (umin_optab, "umin", '3'); init_integral_libfuncs (umax_optab, "umax", '3'); init_integral_libfuncs (neg_optab, "neg", '2'); init_floating_libfuncs (neg_optab, "neg", '2'); init_integral_libfuncs (negv_optab, "negv", '2'); init_floating_libfuncs (negv_optab, "neg", '2'); init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2'); init_integral_libfuncs (ffs_optab, "ffs", '2'); init_integral_libfuncs (clz_optab, "clz", '2'); init_integral_libfuncs (ctz_optab, "ctz", '2'); init_integral_libfuncs (popcount_optab, "popcount", '2'); init_integral_libfuncs (parity_optab, "parity", '2'); /* Comparison libcalls for integers MUST come in pairs, signed/unsigned. */ init_integral_libfuncs (cmp_optab, "cmp", '2'); init_integral_libfuncs (ucmp_optab, "ucmp", '2'); init_floating_libfuncs (cmp_optab, "cmp", '2'); /* EQ etc are floating point only. */ init_floating_libfuncs (eq_optab, "eq", '2'); init_floating_libfuncs (ne_optab, "ne", '2'); init_floating_libfuncs (gt_optab, "gt", '2'); init_floating_libfuncs (ge_optab, "ge", '2'); init_floating_libfuncs (lt_optab, "lt", '2'); init_floating_libfuncs (le_optab, "le", '2'); init_floating_libfuncs (unord_optab, "unord", '2'); /* Conversions. */ init_interclass_conv_libfuncs (sfloat_optab, "float", MODE_INT, MODE_FLOAT); init_interclass_conv_libfuncs (sfix_optab, "fix", MODE_FLOAT, MODE_INT); init_interclass_conv_libfuncs (ufix_optab, "fixuns", MODE_FLOAT, MODE_INT); /* sext_optab is also used for FLOAT_EXTEND. */ init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, true); init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, false); /* Use cabs for double complex abs, since systems generally have cabs. Don't define any libcall for float complex, so that cabs will be used. */ if (complex_double_type_node) abs_optab->handlers[TYPE_MODE (complex_double_type_node)].libfunc = init_one_libfunc ("cabs"); /* The ffs function operates on `int'. */ ffs_optab->handlers[(int) mode_for_size (INT_TYPE_SIZE, MODE_INT, 0)].libfunc = init_one_libfunc ("ffs"); abort_libfunc = init_one_libfunc ("abort"); memcpy_libfunc = init_one_libfunc ("memcpy"); memmove_libfunc = init_one_libfunc ("memmove"); memcmp_libfunc = init_one_libfunc ("memcmp"); memset_libfunc = init_one_libfunc ("memset"); setbits_libfunc = init_one_libfunc ("__setbits"); unwind_resume_libfunc = init_one_libfunc (USING_SJLJ_EXCEPTIONS ? "_Unwind_SjLj_Resume" : "_Unwind_Resume"); #ifndef DONT_USE_BUILTIN_SETJMP setjmp_libfunc = init_one_libfunc ("__builtin_setjmp"); longjmp_libfunc = init_one_libfunc ("__builtin_longjmp"); #else setjmp_libfunc = init_one_libfunc ("setjmp"); longjmp_libfunc = init_one_libfunc ("longjmp"); #endif unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register"); unwind_sjlj_unregister_libfunc = init_one_libfunc ("_Unwind_SjLj_Unregister"); /* For function entry/exit instrumentation. */ profile_function_entry_libfunc = init_one_libfunc ("__cyg_profile_func_enter"); profile_function_exit_libfunc = init_one_libfunc ("__cyg_profile_func_exit"); gcov_flush_libfunc = init_one_libfunc ("__gcov_flush"); if (HAVE_conditional_trap) trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX); /* Allow the target to add more libcalls or rename some, etc. */ targetm.init_libfuncs (); } /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition CODE. Return 0 on failure. */ rtx gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1, rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED) { enum machine_mode mode = GET_MODE (op1); enum insn_code icode; rtx insn; if (!HAVE_conditional_trap) return 0; if (mode == VOIDmode) return 0; icode = cmp_optab->handlers[(int) mode].insn_code; if (icode == CODE_FOR_nothing) return 0; start_sequence (); op1 = prepare_operand (icode, op1, 0, mode, mode, 0); op2 = prepare_operand (icode, op2, 1, mode, mode, 0); if (!op1 || !op2) { end_sequence (); return 0; } emit_insn (GEN_FCN (icode) (op1, op2)); PUT_CODE (trap_rtx, code); insn = gen_conditional_trap (trap_rtx, tcode); if (insn) { emit_insn (insn); insn = get_insns (); } end_sequence (); return insn; } /* Type information for optabs.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_optabs_h[] = { { &trap_rtx, 1, sizeof (trap_rtx), >_ggc_mx_rtx_def, >_pch_nx_rtx_def }, LAST_GGC_ROOT_TAB }; /* Command line option handling. Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Neil Booth. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Value of the -G xx switch, and whether it was passed or not. */ unsigned HOST_WIDE_INT g_switch_value; bool g_switch_set; /* True if we should exit after parsing options. */ bool exit_after_options; /* Print various extra warnings. -W/-Wextra. */ bool extra_warnings; /* True to warn about any objects definitions whose size is larger than N bytes. Also want about function definitions whose returned values are larger than N bytes, where N is `larger_than_size'. */ bool warn_larger_than; HOST_WIDE_INT larger_than_size; /* Nonzero means warn about constructs which might not be strict-aliasing safe. */ int warn_strict_aliasing; /* Hack for cooperation between set_Wunused and set_Wextra. */ static bool maybe_warn_unused_parameter; /* Type(s) of debugging information we are producing (if any). See flags.h for the definitions of the different possible types of debugging information. */ enum debug_info_type write_symbols = NO_DEBUG; /* Level of debugging information we are producing. See flags.h for the definitions of the different possible levels. */ enum debug_info_level debug_info_level = DINFO_LEVEL_NONE; /* Nonzero means use GNU-only extensions in the generated symbolic debugging information. Currently, this only has an effect when write_symbols is set to DBX_DEBUG, XCOFF_DEBUG, or DWARF_DEBUG. */ bool use_gnu_debug_info_extensions; /* Columns of --help display. */ static unsigned int columns = 80; /* What to print when a switch has no documentation. */ static const char undocumented_msg[] = N_("This switch lacks documentation"); /* Used for bookkeeping on whether user set these flags so -fprofile-use/-fprofile-generate does not use them. */ static bool profile_arc_flag_set, flag_profile_values_set; static bool flag_unroll_loops_set, flag_tracer_set; static bool flag_value_profile_transformations_set; static bool flag_peel_loops_set, flag_branch_probabilities_set; /* Input file names. */ const char **in_fnames; unsigned num_in_fnames; unsigned cur_in_fname; static size_t find_opt (const char *, int); static int common_handle_option (size_t scode, const char *arg, int value); static void handle_param (const char *); static void set_Wextra (int); static unsigned int handle_option (const char **argv, unsigned int lang_mask); static char *write_langs (unsigned int lang_mask); static void complain_wrong_lang (const char *, const struct cl_option *, unsigned int lang_mask); static void handle_options (unsigned int, const char **, unsigned int); static void wrap_help (const char *help, const char *item, unsigned int); static void print_help (void); static void print_param_help (void); static void print_filtered_help (unsigned int flag); static unsigned int print_switch (const char *text, unsigned int indent); static void set_debug_level (enum debug_info_type type, int extended, const char *arg); /* Perform a binary search to find which option the command-line INPUT matches. Returns its index in the option array, and N_OPTS (cl_options_count) on failure. This routine is quite subtle. A normal binary search is not good enough because some options can be suffixed with an argument, and multiple sub-matches can occur, e.g. input of "-pedantic" matching the initial substring of "-pedantic-errors". A more complicated example is -gstabs. It should match "-g" with an argument of "stabs". Suppose, however, that the number and list of switches are such that the binary search tests "-gen-decls" before having tested "-g". This doesn't match, and as "-gen-decls" is less than "-gstabs", it will become the lower bound of the binary search range, and "-g" will never be seen. To resolve this issue, opts.sh makes "-gen-decls" point, via the back_chain member, to "-g" so that failed searches that end between "-gen-decls" and the lexicographically subsequent switch know to go back and see if "-g" causes a match (which it does in this example). This search is done in such a way that the longest match for the front end in question wins. If there is no match for the current front end, the longest match for a different front end is returned (or N_OPTS if none) and the caller emits an error message. */ static size_t find_opt (const char *input, int lang_mask) { size_t mn, mx, md, opt_len; size_t match_wrong_lang; int comp; mn = 0; mx = cl_options_count; /* Find mn such this lexicographical inequality holds: cl_options[mn] <= input < cl_options[mn + 1]. */ while (mx - mn > 1) { md = (mn + mx) / 2; opt_len = cl_options[md].opt_len; comp = strncmp (input, cl_options[md].opt_text + 1, opt_len); if (comp < 0) mx = md; else mn = md; } /* This is the switch that is the best match but for a different front end, or cl_options_count if there is no match at all. */ match_wrong_lang = cl_options_count; /* Backtrace the chain of possible matches, returning the longest one, if any, that fits best. With current GCC switches, this loop executes at most twice. */ do { const struct cl_option *opt = &cl_options[mn]; /* Is this switch a prefix of the input? */ if (!strncmp (input, opt->opt_text + 1, opt->opt_len)) { /* If language is OK, and the match is exact or the switch takes a joined argument, return it. */ if ((opt->flags & lang_mask) && (input[opt->opt_len] == '\0' || (opt->flags & CL_JOINED))) return mn; /* If we haven't remembered a prior match, remember this one. Any prior match is necessarily better. */ if (match_wrong_lang == cl_options_count) match_wrong_lang = mn; } /* Try the next possibility. This is cl_options_count if there are no more. */ mn = opt->back_chain; } while (mn != cl_options_count); /* Return the best wrong match, or cl_options_count if none. */ return match_wrong_lang; } /* If ARG is a non-negative integer made up solely of digits, return its value, otherwise return -1. */ static int integral_argument (const char *arg) { const char *p = arg; while (*p && ISDIGIT (*p)) p++; if (*p == '\0') return atoi (arg); return -1; } /* Return a malloced slash-separated list of languages in MASK. */ static char * write_langs (unsigned int mask) { unsigned int n = 0, len = 0; const char *lang_name; char *result; for (n = 0; (lang_name = lang_names[n]) != 0; n++) if (mask & (1U << n)) len += strlen (lang_name) + 1; result = xmalloc (len); len = 0; for (n = 0; (lang_name = lang_names[n]) != 0; n++) if (mask & (1U << n)) { if (len) result[len++] = '/'; strcpy (result + len, lang_name); len += strlen (lang_name); } result[len] = 0; return result; } /* Complain that switch OPT_INDEX does not apply to this front end. */ static void complain_wrong_lang (const char *text, const struct cl_option *option, unsigned int lang_mask) { char *ok_langs, *bad_lang; ok_langs = write_langs (option->flags); bad_lang = write_langs (lang_mask); /* Eventually this should become a hard error IMO. */ warning ("command line option \"%s\" is valid for %s but not for %s", text, ok_langs, bad_lang); free (ok_langs); free (bad_lang); } /* Handle the switch beginning at ARGV for the language indicated by LANG_MASK. Returns the number of switches consumed. */ static unsigned int handle_option (const char **argv, unsigned int lang_mask) { size_t opt_index; const char *opt, *arg = 0; char *dup = 0; int value = 1; unsigned int result = 0; const struct cl_option *option; opt = argv[0]; /* Drop the "no-" from negative switches. */ if ((opt[1] == 'W' || opt[1] == 'f') && opt[2] == 'n' && opt[3] == 'o' && opt[4] == '-') { size_t len = strlen (opt) - 3; dup = xmalloc (len + 1); dup[0] = '-'; dup[1] = opt[1]; memcpy (dup + 2, opt + 5, len - 2 + 1); opt = dup; value = 0; } opt_index = find_opt (opt + 1, lang_mask | CL_COMMON); if (opt_index == cl_options_count) goto done; option = &cl_options[opt_index]; /* Reject negative form of switches that don't take negatives as unrecognized. */ if (!value && (option->flags & CL_REJECT_NEGATIVE)) goto done; /* We've recognized this switch. */ result = 1; /* Sort out any argument the switch takes. */ if (option->flags & CL_JOINED) { /* Have arg point to the original switch. This is because some code, such as disable_builtin_function, expects its argument to be persistent until the program exits. */ arg = argv[0] + cl_options[opt_index].opt_len + 1; if (!value) arg += strlen ("no-"); if (*arg == '\0' && !(option->flags & CL_MISSING_OK)) { if (option->flags & CL_SEPARATE) { arg = argv[1]; result = 2; } else /* Missing argument. */ arg = NULL; } } else if (option->flags & CL_SEPARATE) { arg = argv[1]; result = 2; } /* Now we've swallowed any potential argument, complain if this is a switch for a different front end. */ if (!(option->flags & (lang_mask | CL_COMMON))) { complain_wrong_lang (argv[0], option, lang_mask); goto done; } if (arg == NULL && (option->flags & (CL_JOINED | CL_SEPARATE))) { if (!lang_hooks.missing_argument (opt, opt_index)) error ("missing argument to \"%s\"", opt); goto done; } /* If the switch takes an integer, convert it. */ if (arg && (option->flags & CL_UINTEGER)) { value = integral_argument (arg); if (value == -1) { error ("argument to \"%s\" should be a non-negative integer", option->opt_text); goto done; } } if (option->flag_var) { if (option->has_set_value) { if (value) *option->flag_var = option->set_value; else *option->flag_var = !option->set_value; } else *option->flag_var = value; } if (option->flags & lang_mask) if (lang_hooks.handle_option (opt_index, arg, value) == 0) result = 0; if (result && (option->flags & CL_COMMON)) if (common_handle_option (opt_index, arg, value) == 0) result = 0; done: if (dup) free (dup); return result; } /* Decode and handle the vector of command line options. LANG_MASK contains has a single bit set representing the current language. */ static void handle_options (unsigned int argc, const char **argv, unsigned int lang_mask) { unsigned int n, i; for (i = 1; i < argc; i += n) { const char *opt = argv[i]; /* Interpret "-" or a non-switch as a file name. */ if (opt[0] != '-' || opt[1] == '\0') { if (main_input_filename == NULL) main_input_filename = opt; add_input_filename (opt); n = 1; continue; } n = handle_option (argv + i, lang_mask); if (!n) { n = 1; error ("unrecognized command line option \"%s\"", opt); } } } /* Handle FILENAME from the command line. */ void add_input_filename (const char *filename) { num_in_fnames++; in_fnames = xrealloc (in_fnames, num_in_fnames * sizeof (in_fnames[0])); in_fnames[num_in_fnames - 1] = filename; } /* Parse command line options and set default flag values. Do minimal options processing. */ void decode_options (unsigned int argc, const char **argv) { unsigned int i, lang_mask; /* Perform language-specific options initialization. */ lang_mask = lang_hooks.init_options (argc, argv); lang_hooks.initialize_diagnostics (global_dc); /* Scan to see what optimization level has been specified. That will determine the default value of many flags. */ for (i = 1; i < argc; i++) { if (!strcmp (argv[i], "-O")) { optimize = 1; optimize_size = 0; } else if (argv[i][0] == '-' && argv[i][1] == 'O') { /* Handle -Os, -O2, -O3, -O69, ... */ const char *p = &argv[i][2]; if ((p[0] == 's') && (p[1] == 0)) { optimize_size = 1; /* Optimizing for size forces optimize to be 2. */ optimize = 2; } else { const int optimize_val = read_integral_parameter (p, p - 2, -1); if (optimize_val != -1) { optimize = optimize_val; optimize_size = 0; } } } } if (!optimize) { flag_merge_constants = 0; } if (optimize >= 1) { flag_defer_pop = 1; flag_thread_jumps = 1; #ifdef DELAY_SLOTS flag_delayed_branch = 1; #endif #ifdef CAN_DEBUG_WITHOUT_FP flag_omit_frame_pointer = 1; #endif flag_guess_branch_prob = 1; flag_cprop_registers = 1; flag_loop_optimize = 1; flag_if_conversion = 1; flag_if_conversion2 = 1; flag_tree_ccp = 1; flag_tree_dce = 1; flag_tree_dom = 1; flag_tree_dse = 1; flag_tree_pre = 1; flag_tree_ter = 1; flag_tree_live_range_split = 1; flag_tree_sra = 1; flag_tree_copyrename = 1; flag_tree_fre = 1; if (!optimize_size) { /* Loop header copying usually increases size of the code. This used not to be true, since quite often it is possible to verify that the condition is satisfied in the first iteration and therefore to eliminate it. Jump threading handles these cases now. */ flag_tree_ch = 1; } } if (optimize >= 2) { flag_crossjumping = 1; flag_optimize_sibling_calls = 1; flag_cse_follow_jumps = 1; flag_cse_skip_blocks = 1; flag_gcse = 1; flag_expensive_optimizations = 1; flag_strength_reduce = 1; flag_rerun_cse_after_loop = 1; flag_rerun_loop_opt = 1; flag_caller_saves = 1; flag_force_mem = 1; flag_peephole2 = 1; #ifdef INSN_SCHEDULING flag_schedule_insns = 1; flag_schedule_insns_after_reload = 1; #endif flag_regmove = 1; flag_strict_aliasing = 1; flag_delete_null_pointer_checks = 1; flag_reorder_blocks = 1; flag_reorder_functions = 1; flag_unit_at_a_time = 1; } if (optimize >= 3) { flag_inline_functions = 1; flag_unswitch_loops = 1; flag_gcse_after_reload = 1; } if (optimize < 2 || optimize_size) { align_loops = 1; align_jumps = 1; align_labels = 1; align_functions = 1; /* Don't reorder blocks when optimizing for size because extra jump insns may be created; also barrier may create extra padding. More correctly we should have a block reordering mode that tried to minimize the combined size of all the jumps. This would more or less automatically remove extra jumps, but would also try to use more short jumps instead of long jumps. */ flag_reorder_blocks = 0; flag_reorder_blocks_and_partition = 0; } if (optimize_size) { /* Inlining of very small functions usually reduces total size. */ set_param_value ("max-inline-insns-single", 5); set_param_value ("max-inline-insns-auto", 5); set_param_value ("max-inline-insns-rtl", 10); flag_inline_functions = 1; } /* Initialize whether `char' is signed. */ flag_signed_char = DEFAULT_SIGNED_CHAR; /* Set this to a special "uninitialized" value. The actual default is set after target options have been processed. */ flag_short_enums = 2; /* Initialize target_flags before OPTIMIZATION_OPTIONS so the latter can modify it. */ target_flags = 0; set_target_switch (""); /* Unwind tables are always present in an ABI-conformant IA-64 object file, so the default should be ON. */ #ifdef IA64_UNWIND_INFO flag_unwind_tables = IA64_UNWIND_INFO; #endif #ifdef OPTIMIZATION_OPTIONS /* Allow default optimizations to be specified on a per-machine basis. */ OPTIMIZATION_OPTIONS (optimize, optimize_size); #endif handle_options (argc, argv, lang_mask); if (flag_pie) flag_pic = flag_pie; if (flag_pic && !flag_pie) flag_shlib = 1; if (flag_no_inline == 2) flag_no_inline = 0; else flag_really_no_inline = flag_no_inline; /* Set flag_no_inline before the post_options () hook. The C front ends use it to determine tree inlining defaults. FIXME: such code should be lang-independent when all front ends use tree inlining, in which case it, and this condition, should be moved to the top of process_options() instead. */ if (optimize == 0) { /* Inlining does not work if not optimizing, so force it not to be done. */ flag_no_inline = 1; warn_inline = 0; /* The c_decode_option function and decode_option hook set this to `2' if -Wall is used, so we can avoid giving out lots of errors for people who don't realize what -Wall does. */ if (warn_uninitialized == 1) warning ("-Wuninitialized is not supported without -O"); } if (flag_really_no_inline == 2) flag_really_no_inline = flag_no_inline; /* The optimization to partition hot and cold basic blocks into separate sections of the .o and executable files does not work (currently) with exception handling. If flag_exceptions is turned on we need to turn off the partitioning optimization. */ if (flag_exceptions && flag_reorder_blocks_and_partition) { warning ("-freorder-blocks-and-partition does not work with exceptions"); flag_reorder_blocks_and_partition = 0; flag_reorder_blocks = 1; } } /* Handle target- and language-independent options. Return zero to generate an "unknown option" message. Only options that need extra handling need to be listed here; if you simply want VALUE assigned to a variable, it happens automatically. */ static int common_handle_option (size_t scode, const char *arg, int value) { enum opt_code code = (enum opt_code) scode; switch (code) { case OPT__help: print_help (); exit_after_options = true; break; case OPT__param: handle_param (arg); break; case OPT__target_help: display_target_options (); exit_after_options = true; break; case OPT__version: print_version (stderr, ""); exit_after_options = true; break; case OPT_G: g_switch_value = value; g_switch_set = true; break; case OPT_O: case OPT_Os: /* Currently handled in a prescan. */ break; case OPT_W: /* For backward compatibility, -W is the same as -Wextra. */ set_Wextra (value); break; case OPT_Wextra: set_Wextra (value); break; case OPT_Wlarger_than_: larger_than_size = value; warn_larger_than = value != -1; break; case OPT_Wstrict_aliasing: case OPT_Wstrict_aliasing_: warn_strict_aliasing = value; break; case OPT_Wunused: set_Wunused (value); break; case OPT_aux_info: case OPT_aux_info_: aux_info_file_name = arg; flag_gen_aux_info = 1; break; case OPT_auxbase: aux_base_name = arg; break; case OPT_auxbase_strip: { char *tmp = xstrdup (arg); strip_off_ending (tmp, strlen (tmp)); if (tmp[0]) aux_base_name = tmp; } break; case OPT_d: decode_d_option (arg); break; case OPT_dumpbase: dump_base_name = arg; break; case OPT_falign_functions_: align_functions = value; break; case OPT_falign_jumps_: align_jumps = value; break; case OPT_falign_labels_: align_labels = value; break; case OPT_falign_loops_: align_loops = value; break; case OPT_fbranch_probabilities: flag_branch_probabilities_set = true; break; case OPT_fcall_used_: fix_register (arg, 0, 1); break; case OPT_fcall_saved_: fix_register (arg, 0, 0); break; case OPT_fdiagnostics_show_location_: if (!strcmp (arg, "once")) diagnostic_prefixing_rule (global_dc) = DIAGNOSTICS_SHOW_PREFIX_ONCE; else if (!strcmp (arg, "every-line")) diagnostic_prefixing_rule (global_dc) = DIAGNOSTICS_SHOW_PREFIX_EVERY_LINE; else return 0; break; case OPT_fdump_: if (!dump_switch_p (arg)) return 0; break; case OPT_ffast_math: set_fast_math_flags (value); break; case OPT_ffixed_: fix_register (arg, 1, 1); break; case OPT_finline_limit_: case OPT_finline_limit_eq: set_param_value ("max-inline-insns-single", value / 2); set_param_value ("max-inline-insns-auto", value / 2); set_param_value ("max-inline-insns-rtl", value); break; case OPT_fmessage_length_: pp_set_line_maximum_length (global_dc->printer, value); break; case OPT_fpeel_loops: flag_peel_loops_set = true; break; case OPT_fprofile_arcs: profile_arc_flag_set = true; break; case OPT_fprofile_use: if (!flag_branch_probabilities_set) flag_branch_probabilities = value; if (!flag_profile_values_set) flag_profile_values = value; if (!flag_unroll_loops_set) flag_unroll_loops = value; if (!flag_peel_loops_set) flag_peel_loops = value; if (!flag_tracer_set) flag_tracer = value; if (!flag_value_profile_transformations_set) flag_value_profile_transformations = value; break; case OPT_fprofile_generate: if (!profile_arc_flag_set) profile_arc_flag = value; if (!flag_profile_values_set) flag_profile_values = value; if (!flag_value_profile_transformations_set) flag_value_profile_transformations = value; break; case OPT_fprofile_values: flag_profile_values_set = true; break; case OPT_fvpt: flag_value_profile_transformations_set = value; break; case OPT_frandom_seed: /* The real switch is -fno-random-seed. */ if (value) return 0; flag_random_seed = NULL; break; case OPT_frandom_seed_: flag_random_seed = arg; break; case OPT_fsched_verbose_: #ifdef INSN_SCHEDULING fix_sched_param ("verbose", arg); break; #else return 0; #endif case OPT_fsched_stalled_insns_: flag_sched_stalled_insns = value; if (flag_sched_stalled_insns == 0) flag_sched_stalled_insns = -1; break; case OPT_fsched_stalled_insns_dep_: flag_sched_stalled_insns_dep = value; break; case OPT_fstack_limit: /* The real switch is -fno-stack-limit. */ if (value) return 0; stack_limit_rtx = NULL_RTX; break; case OPT_fstack_limit_register_: { int reg = decode_reg_name (arg); if (reg < 0) error ("unrecognized register name \"%s\"", arg); else stack_limit_rtx = gen_rtx_REG (Pmode, reg); } break; case OPT_fstack_limit_symbol_: stack_limit_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (arg)); break; case OPT_ftls_model_: if (!strcmp (arg, "global-dynamic")) flag_tls_default = TLS_MODEL_GLOBAL_DYNAMIC; else if (!strcmp (arg, "local-dynamic")) flag_tls_default = TLS_MODEL_LOCAL_DYNAMIC; else if (!strcmp (arg, "initial-exec")) flag_tls_default = TLS_MODEL_INITIAL_EXEC; else if (!strcmp (arg, "local-exec")) flag_tls_default = TLS_MODEL_LOCAL_EXEC; else warning ("unknown tls-model \"%s\"", arg); break; case OPT_ftracer: flag_tracer_set = true; break; case OPT_ftree_points_to_: if (!strcmp (arg, "andersen")) #ifdef HAVE_BANSHEE flag_tree_points_to = PTA_ANDERSEN; #else warning ("Andersen's PTA not available - libbanshee not compiled."); #endif else if (!strcmp (arg, "none")) flag_tree_points_to = PTA_NONE; else { warning ("`%s`: unknown points-to analysis algorithm", arg); return 0; } break; case OPT_funroll_loops: flag_unroll_loops_set = true; break; case OPT_g: set_debug_level (NO_DEBUG, DEFAULT_GDB_EXTENSIONS, arg); break; case OPT_gcoff: set_debug_level (SDB_DEBUG, false, arg); break; case OPT_gdwarf_2: set_debug_level (DWARF2_DEBUG, false, arg); break; case OPT_ggdb: set_debug_level (NO_DEBUG, 2, arg); break; case OPT_gstabs: case OPT_gstabs_: set_debug_level (DBX_DEBUG, code == OPT_gstabs_, arg); break; case OPT_gvms: set_debug_level (VMS_DEBUG, false, arg); break; case OPT_gxcoff: case OPT_gxcoff_: set_debug_level (XCOFF_DEBUG, code == OPT_gxcoff_, arg); break; case OPT_m: set_target_switch (arg); break; case OPT_o: asm_file_name = arg; break; case OPT_pedantic_errors: flag_pedantic_errors = pedantic = 1; break; default: /* If the flag was handled in a standard way, assume the lack of processing here is intentional. */ if (cl_options[scode].flag_var) break; abort (); } return 1; } /* Handle --param NAME=VALUE. */ static void handle_param (const char *carg) { char *equal, *arg; int value; arg = xstrdup (carg); equal = strchr (arg, '='); if (!equal) error ("%s: --param arguments should be of the form NAME=VALUE", arg); else { value = integral_argument (equal + 1); if (value == -1) error ("invalid --param value `%s'", equal + 1); else { *equal = '\0'; set_param_value (arg, value); } } free (arg); } /* Handle -W and -Wextra. */ static void set_Wextra (int setting) { extra_warnings = setting; warn_unused_value = setting; warn_unused_parameter = (setting && maybe_warn_unused_parameter); /* We save the value of warn_uninitialized, since if they put -Wuninitialized on the command line, we need to generate a warning about not using it without also specifying -O. */ if (setting == 0) warn_uninitialized = 0; else if (warn_uninitialized != 1) warn_uninitialized = 2; } /* Initialize unused warning flags. */ void set_Wunused (int setting) { warn_unused_function = setting; warn_unused_label = setting; /* Unused function parameter warnings are reported when either ``-Wextra -Wunused'' or ``-Wunused-parameter'' is specified. Thus, if -Wextra has already been seen, set warn_unused_parameter; otherwise set maybe_warn_extra_parameter, which will be picked up by set_Wextra. */ maybe_warn_unused_parameter = setting; warn_unused_parameter = (setting && extra_warnings); warn_unused_variable = setting; warn_unused_value = setting; } /* The following routines are useful in setting all the flags that -ffast-math and -fno-fast-math imply. */ void set_fast_math_flags (int set) { flag_trapping_math = !set; flag_unsafe_math_optimizations = set; flag_finite_math_only = set; flag_errno_math = !set; if (set) { flag_signaling_nans = 0; flag_rounding_math = 0; } } /* Return true iff flags are set as if -ffast-math. */ bool fast_math_flags_set_p (void) { return (!flag_trapping_math && flag_unsafe_math_optimizations && flag_finite_math_only && !flag_errno_math); } /* Handle a debug output -g switch. EXTENDED is true or false to support extended output (2 is special and means "-ggdb" was given). */ static void set_debug_level (enum debug_info_type type, int extended, const char *arg) { static bool type_explicit; use_gnu_debug_info_extensions = extended; if (type == NO_DEBUG) { if (write_symbols == NO_DEBUG) { write_symbols = PREFERRED_DEBUGGING_TYPE; if (extended == 2) { #ifdef DWARF2_DEBUGGING_INFO write_symbols = DWARF2_DEBUG; #elif defined DBX_DEBUGGING_INFO write_symbols = DBX_DEBUG; #endif } if (write_symbols == NO_DEBUG) warning ("target system does not support debug output"); } } else { /* Does it conflict with an already selected type? */ if (type_explicit && write_symbols != NO_DEBUG && type != write_symbols) error ("debug format \"%s\" conflicts with prior selection", debug_type_names[type]); write_symbols = type; type_explicit = true; } /* A debug flag without a level defaults to level 2. */ if (*arg == '\0') { if (!debug_info_level) debug_info_level = 2; } else { debug_info_level = integral_argument (arg); if (debug_info_level == (unsigned int) -1) error ("unrecognised debug output level \"%s\"", arg); else if (debug_info_level > 3) error ("debug output level %s is too high", arg); } } /* Output --help text. */ static void print_help (void) { size_t i; const char *p; GET_ENVIRONMENT (p, "COLUMNS"); if (p) { int value = atoi (p); if (value > 0) columns = value; } puts (_("The following options are language-independent:\n")); print_filtered_help (CL_COMMON); print_param_help (); for (i = 0; lang_names[i]; i++) { printf (_("The %s front end recognizes the following options:\n\n"), lang_names[i]); print_filtered_help (1U << i); } display_target_options (); } /* Print the help for --param. */ static void print_param_help (void) { size_t i; puts (_("The --param option recognizes the following as parameters:\n")); for (i = 0; i < LAST_PARAM; i++) { const char *help = compiler_params[i].help; const char *param = compiler_params[i].option; if (help == NULL || *help == '\0') help = undocumented_msg; /* Get the translation. */ help = _(help); wrap_help (help, param, strlen (param)); } putchar ('\n'); } /* Print help for a specific front-end, etc. */ static void print_filtered_help (unsigned int flag) { unsigned int i, len, filter, indent = 0; bool duplicates = false; const char *help, *opt, *tab; static char *printed; if (flag == CL_COMMON) { filter = flag; if (!printed) printed = xmalloc (cl_options_count); memset (printed, 0, cl_options_count); } else { /* Don't print COMMON options twice. */ filter = flag | CL_COMMON; for (i = 0; i < cl_options_count; i++) { if ((cl_options[i].flags & filter) != flag) continue; /* Skip help for internal switches. */ if (cl_options[i].flags & CL_UNDOCUMENTED) continue; /* Skip switches that have already been printed, mark them to be listed later. */ if (printed[i]) { duplicates = true; indent = print_switch (cl_options[i].opt_text, indent); } } if (duplicates) { putchar ('\n'); putchar ('\n'); } } for (i = 0; i < cl_options_count; i++) { if ((cl_options[i].flags & filter) != flag) continue; /* Skip help for internal switches. */ if (cl_options[i].flags & CL_UNDOCUMENTED) continue; /* Skip switches that have already been printed. */ if (printed[i]) continue; printed[i] = true; help = cl_options[i].help; if (!help) help = undocumented_msg; /* Get the translation. */ help = _(help); tab = strchr (help, '\t'); if (tab) { len = tab - help; opt = help; help = tab + 1; } else { opt = cl_options[i].opt_text; len = strlen (opt); } wrap_help (help, opt, len); } putchar ('\n'); } /* Output ITEM, of length ITEM_WIDTH, in the left column, followed by word-wrapped HELP in a second column. */ static unsigned int print_switch (const char *text, unsigned int indent) { unsigned int len = strlen (text) + 1; /* trailing comma */ if (indent) { putchar (','); if (indent + len > columns) { putchar ('\n'); putchar (' '); indent = 1; } } else putchar (' '); putchar (' '); fputs (text, stdout); return indent + len + 1; } /* Output ITEM, of length ITEM_WIDTH, in the left column, followed by word-wrapped HELP in a second column. */ static void wrap_help (const char *help, const char *item, unsigned int item_width) { unsigned int col_width = 27; unsigned int remaining, room, len; remaining = strlen (help); do { room = columns - 3 - MAX (col_width, item_width); if (room > columns) room = 0; len = remaining; if (room < len) { unsigned int i; for (i = 0; help[i]; i++) { if (i >= room && len != remaining) break; if (help[i] == ' ') len = i; else if ((help[i] == '-' || help[i] == '/') && help[i + 1] != ' ' && i > 0 && ISALPHA (help[i - 1])) len = i + 1; } } printf( " %-*.*s %.*s\n", col_width, item_width, item, len, help); item_width = 0; while (help[len] == ' ') len++; help += len; remaining -= len; } while (remaining); } /* params.c - Run-time parameters. Copyright (C) 2001, 2003 Free Software Foundation, Inc. Written by Mark Mitchell . This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* An array containing the compiler parameters and their current values. */ param_info *compiler_params; /* The number of entries in the table. */ static size_t num_compiler_params; /* Add the N PARAMS to the current list of compiler parameters. */ void add_params (const param_info params[], size_t n) { /* Allocate enough space for the new parameters. */ compiler_params = xrealloc (compiler_params, (num_compiler_params + n) * sizeof (param_info)); /* Copy them into the table. */ memcpy (compiler_params + num_compiler_params, params, n * sizeof (param_info)); /* Keep track of how many parameters we have. */ num_compiler_params += n; } /* Set the VALUE associated with the parameter given by NAME. */ void set_param_value (const char *name, int value) { size_t i; /* Make sure nobody tries to set a parameter to an invalid value. */ if (value == INVALID_PARAM_VAL) abort (); /* Scan the parameter table to find a matching entry. */ for (i = 0; i < num_compiler_params; ++i) if (strcmp (compiler_params[i].option, name) == 0) { compiler_params[i].value = value; return; } /* If we didn't find this parameter, issue an error message. */ error ("invalid parameter `%s'", name); } /* Perform simple optimizations to clean up the result of reload. Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ static int reload_cse_noop_set_p (rtx); static void reload_cse_simplify (rtx, rtx); static void reload_cse_regs_1 (rtx); static int reload_cse_simplify_set (rtx, rtx); static int reload_cse_simplify_operands (rtx, rtx); static void reload_combine (void); static void reload_combine_note_use (rtx *, rtx); static void reload_combine_note_store (rtx, rtx, void *); static void reload_cse_move2add (rtx); static void move2add_note_store (rtx, rtx, void *); /* Call cse / combine like post-reload optimization phases. FIRST is the first instruction. */ void reload_cse_regs (rtx first ATTRIBUTE_UNUSED) { reload_cse_regs_1 (first); reload_combine (); reload_cse_move2add (first); if (flag_expensive_optimizations) reload_cse_regs_1 (first); } /* See whether a single set SET is a noop. */ static int reload_cse_noop_set_p (rtx set) { if (cselib_reg_set_mode (SET_DEST (set)) != GET_MODE (SET_DEST (set))) return 0; return rtx_equal_for_cselib_p (SET_DEST (set), SET_SRC (set)); } /* Try to simplify INSN. */ static void reload_cse_simplify (rtx insn, rtx testreg) { rtx body = PATTERN (insn); if (GET_CODE (body) == SET) { int count = 0; /* Simplify even if we may think it is a no-op. We may think a memory load of a value smaller than WORD_SIZE is redundant because we haven't taken into account possible implicit extension. reload_cse_simplify_set() will bring this out, so it's safer to simplify before we delete. */ count += reload_cse_simplify_set (body, insn); if (!count && reload_cse_noop_set_p (body)) { rtx value = SET_DEST (body); if (REG_P (value) && ! REG_FUNCTION_VALUE_P (value)) value = 0; delete_insn_and_edges (insn); return; } if (count > 0) apply_change_group (); else reload_cse_simplify_operands (insn, testreg); } else if (GET_CODE (body) == PARALLEL) { int i; int count = 0; rtx value = NULL_RTX; /* If every action in a PARALLEL is a noop, we can delete the entire PARALLEL. */ for (i = XVECLEN (body, 0) - 1; i >= 0; --i) { rtx part = XVECEXP (body, 0, i); if (GET_CODE (part) == SET) { if (! reload_cse_noop_set_p (part)) break; if (REG_P (SET_DEST (part)) && REG_FUNCTION_VALUE_P (SET_DEST (part))) { if (value) break; value = SET_DEST (part); } } else if (GET_CODE (part) != CLOBBER) break; } if (i < 0) { delete_insn_and_edges (insn); /* We're done with this insn. */ return; } /* It's not a no-op, but we can try to simplify it. */ for (i = XVECLEN (body, 0) - 1; i >= 0; --i) if (GET_CODE (XVECEXP (body, 0, i)) == SET) count += reload_cse_simplify_set (XVECEXP (body, 0, i), insn); if (count > 0) apply_change_group (); else reload_cse_simplify_operands (insn, testreg); } } /* Do a very simple CSE pass over the hard registers. This function detects no-op moves where we happened to assign two different pseudo-registers to the same hard register, and then copied one to the other. Reload will generate a useless instruction copying a register to itself. This function also detects cases where we load a value from memory into two different registers, and (if memory is more expensive than registers) changes it to simply copy the first register into the second register. Another optimization is performed that scans the operands of each instruction to see whether the value is already available in a hard register. It then replaces the operand with the hard register if possible, much like an optional reload would. */ static void reload_cse_regs_1 (rtx first) { rtx insn; rtx testreg = gen_rtx_REG (VOIDmode, -1); cselib_init (true); init_alias_analysis (); for (insn = first; insn; insn = NEXT_INSN (insn)) { if (INSN_P (insn)) reload_cse_simplify (insn, testreg); cselib_process_insn (insn); } /* Clean up. */ end_alias_analysis (); cselib_finish (); } /* Try to simplify a single SET instruction. SET is the set pattern. INSN is the instruction it came from. This function only handles one case: if we set a register to a value which is not a register, we try to find that value in some other register and change the set into a register copy. */ static int reload_cse_simplify_set (rtx set, rtx insn) { int did_change = 0; int dreg; rtx src; enum reg_class dclass; int old_cost; cselib_val *val; struct elt_loc_list *l; #ifdef LOAD_EXTEND_OP enum rtx_code extend_op = NIL; #endif dreg = true_regnum (SET_DEST (set)); if (dreg < 0) return 0; src = SET_SRC (set); if (side_effects_p (src) || true_regnum (src) >= 0) return 0; dclass = REGNO_REG_CLASS (dreg); #ifdef LOAD_EXTEND_OP /* When replacing a memory with a register, we need to honor assumptions that combine made wrt the contents of sign bits. We'll do this by generating an extend instruction instead of a reg->reg copy. Thus the destination must be a register that we can widen. */ if (MEM_P (src) && GET_MODE_BITSIZE (GET_MODE (src)) < BITS_PER_WORD && (extend_op = LOAD_EXTEND_OP (GET_MODE (src))) != NIL && !REG_P (SET_DEST (set))) return 0; #endif val = cselib_lookup (src, GET_MODE (SET_DEST (set)), 0); if (! val) return 0; /* If memory loads are cheaper than register copies, don't change them. */ if (MEM_P (src)) old_cost = MEMORY_MOVE_COST (GET_MODE (src), dclass, 1); else if (REG_P (src)) old_cost = REGISTER_MOVE_COST (GET_MODE (src), REGNO_REG_CLASS (REGNO (src)), dclass); else old_cost = rtx_cost (src, SET); for (l = val->locs; l; l = l->next) { rtx this_rtx = l->loc; int this_cost; if (CONSTANT_P (this_rtx) && ! references_value_p (this_rtx, 0)) { #ifdef LOAD_EXTEND_OP if (extend_op != NIL) { HOST_WIDE_INT this_val; /* ??? I'm lazy and don't wish to handle CONST_DOUBLE. Other constants, such as SYMBOL_REF, cannot be extended. */ if (GET_CODE (this_rtx) != CONST_INT) continue; this_val = INTVAL (this_rtx); switch (extend_op) { case ZERO_EXTEND: this_val &= GET_MODE_MASK (GET_MODE (src)); break; case SIGN_EXTEND: /* ??? In theory we're already extended. */ if (this_val == trunc_int_for_mode (this_val, GET_MODE (src))) break; default: abort (); } this_rtx = GEN_INT (this_val); } #endif this_cost = rtx_cost (this_rtx, SET); } else if (REG_P (this_rtx)) { #ifdef LOAD_EXTEND_OP if (extend_op != NIL) { this_rtx = gen_rtx_fmt_e (extend_op, word_mode, this_rtx); this_cost = rtx_cost (this_rtx, SET); } else #endif this_cost = REGISTER_MOVE_COST (GET_MODE (this_rtx), REGNO_REG_CLASS (REGNO (this_rtx)), dclass); } else continue; /* If equal costs, prefer registers over anything else. That tends to lead to smaller instructions on some machines. */ if (this_cost < old_cost || (this_cost == old_cost && REG_P (this_rtx) && !REG_P (SET_SRC (set)))) { #ifdef LOAD_EXTEND_OP if (GET_MODE_BITSIZE (GET_MODE (SET_DEST (set))) < BITS_PER_WORD && extend_op != NIL #ifdef CANNOT_CHANGE_MODE_CLASS && !CANNOT_CHANGE_MODE_CLASS (GET_MODE (SET_DEST (set)), word_mode, REGNO_REG_CLASS (REGNO (SET_DEST (set)))) #endif ) { rtx wide_dest = gen_rtx_REG (word_mode, REGNO (SET_DEST (set))); ORIGINAL_REGNO (wide_dest) = ORIGINAL_REGNO (SET_DEST (set)); validate_change (insn, &SET_DEST (set), wide_dest, 1); } #endif validate_change (insn, &SET_SRC (set), copy_rtx (this_rtx), 1); old_cost = this_cost, did_change = 1; } } return did_change; } /* Try to replace operands in INSN with equivalent values that are already in registers. This can be viewed as optional reloading. For each non-register operand in the insn, see if any hard regs are known to be equivalent to that operand. Record the alternatives which can accept these hard registers. Among all alternatives, select the ones which are better or equal to the one currently matching, where "better" is in terms of '?' and '!' constraints. Among the remaining alternatives, select the one which replaces most operands with hard registers. */ static int reload_cse_simplify_operands (rtx insn, rtx testreg) { int i, j; /* For each operand, all registers that are equivalent to it. */ HARD_REG_SET equiv_regs[MAX_RECOG_OPERANDS]; const char *constraints[MAX_RECOG_OPERANDS]; /* Vector recording how bad an alternative is. */ int *alternative_reject; /* Vector recording how many registers can be introduced by choosing this alternative. */ int *alternative_nregs; /* Array of vectors recording, for each operand and each alternative, which hard register to substitute, or -1 if the operand should be left as it is. */ int *op_alt_regno[MAX_RECOG_OPERANDS]; /* Array of alternatives, sorted in order of decreasing desirability. */ int *alternative_order; extract_insn (insn); if (recog_data.n_alternatives == 0 || recog_data.n_operands == 0) return 0; /* Figure out which alternative currently matches. */ if (! constrain_operands (1)) fatal_insn_not_found (insn); alternative_reject = alloca (recog_data.n_alternatives * sizeof (int)); alternative_nregs = alloca (recog_data.n_alternatives * sizeof (int)); alternative_order = alloca (recog_data.n_alternatives * sizeof (int)); memset (alternative_reject, 0, recog_data.n_alternatives * sizeof (int)); memset (alternative_nregs, 0, recog_data.n_alternatives * sizeof (int)); /* For each operand, find out which regs are equivalent. */ for (i = 0; i < recog_data.n_operands; i++) { cselib_val *v; struct elt_loc_list *l; rtx op; enum machine_mode mode; CLEAR_HARD_REG_SET (equiv_regs[i]); /* cselib blows up on CODE_LABELs. Trying to fix that doesn't seem right, so avoid the problem here. Likewise if we have a constant and the insn pattern doesn't tell us the mode we need. */ if (GET_CODE (recog_data.operand[i]) == CODE_LABEL || (CONSTANT_P (recog_data.operand[i]) && recog_data.operand_mode[i] == VOIDmode)) continue; op = recog_data.operand[i]; mode = GET_MODE (op); #ifdef LOAD_EXTEND_OP if (MEM_P (op) && GET_MODE_BITSIZE (mode) < BITS_PER_WORD && LOAD_EXTEND_OP (mode) != NIL) { rtx set = single_set (insn); /* We might have multiple sets, some of which do implicit extension. Punt on this for now. */ if (! set) continue; /* If the destination is a also MEM or a STRICT_LOW_PART, no extension applies. Also, if there is an explicit extension, we don't have to worry about an implicit one. */ else if (MEM_P (SET_DEST (set)) || GET_CODE (SET_DEST (set)) == STRICT_LOW_PART || GET_CODE (SET_SRC (set)) == ZERO_EXTEND || GET_CODE (SET_SRC (set)) == SIGN_EXTEND) ; /* Continue ordinary processing. */ #ifdef CANNOT_CHANGE_MODE_CLASS /* If the register cannot change mode to word_mode, it follows that it cannot have been used in word_mode. */ else if (REG_P (SET_DEST (set)) && CANNOT_CHANGE_MODE_CLASS (GET_MODE (SET_DEST (set)), word_mode, REGNO_REG_CLASS (REGNO (SET_DEST (set))))) ; /* Continue ordinary processing. */ #endif /* If this is a straight load, make the extension explicit. */ else if (REG_P (SET_DEST (set)) && recog_data.n_operands == 2 && SET_SRC (set) == op && SET_DEST (set) == recog_data.operand[1-i]) { validate_change (insn, recog_data.operand_loc[i], gen_rtx_fmt_e (LOAD_EXTEND_OP (mode), word_mode, op), 1); validate_change (insn, recog_data.operand_loc[1-i], gen_rtx_REG (word_mode, REGNO (SET_DEST (set))), 1); if (! apply_change_group ()) return 0; return reload_cse_simplify_operands (insn, testreg); } else /* ??? There might be arithmetic operations with memory that are safe to optimize, but is it worth the trouble? */ continue; } #endif /* LOAD_EXTEND_OP */ v = cselib_lookup (op, recog_data.operand_mode[i], 0); if (! v) continue; for (l = v->locs; l; l = l->next) if (REG_P (l->loc)) SET_HARD_REG_BIT (equiv_regs[i], REGNO (l->loc)); } for (i = 0; i < recog_data.n_operands; i++) { enum machine_mode mode; int regno; const char *p; op_alt_regno[i] = alloca (recog_data.n_alternatives * sizeof (int)); for (j = 0; j < recog_data.n_alternatives; j++) op_alt_regno[i][j] = -1; p = constraints[i] = recog_data.constraints[i]; mode = recog_data.operand_mode[i]; /* Add the reject values for each alternative given by the constraints for this operand. */ j = 0; while (*p != '\0') { char c = *p++; if (c == ',') j++; else if (c == '?') alternative_reject[j] += 3; else if (c == '!') alternative_reject[j] += 300; } /* We won't change operands which are already registers. We also don't want to modify output operands. */ regno = true_regnum (recog_data.operand[i]); if (regno >= 0 || constraints[i][0] == '=' || constraints[i][0] == '+') continue; for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) { int class = (int) NO_REGS; if (! TEST_HARD_REG_BIT (equiv_regs[i], regno)) continue; REGNO (testreg) = regno; PUT_MODE (testreg, mode); /* We found a register equal to this operand. Now look for all alternatives that can accept this register and have not been assigned a register they can use yet. */ j = 0; p = constraints[i]; for (;;) { char c = *p; switch (c) { case '=': case '+': case '?': case '#': case '&': case '!': case '*': case '%': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'm': case '<': case '>': case 'V': case 'o': case 'E': case 'F': case 'G': case 'H': case 's': case 'i': case 'n': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'p': case 'X': /* These don't say anything we care about. */ break; case 'g': case 'r': class = reg_class_subunion[(int) class][(int) GENERAL_REGS]; break; default: class = (reg_class_subunion [(int) class] [(int) REG_CLASS_FROM_CONSTRAINT ((unsigned char) c, p)]); break; case ',': case '\0': /* See if REGNO fits this alternative, and set it up as the replacement register if we don't have one for this alternative yet and the operand being replaced is not a cheap CONST_INT. */ if (op_alt_regno[i][j] == -1 && reg_fits_class_p (testreg, class, 0, mode) && (GET_CODE (recog_data.operand[i]) != CONST_INT || (rtx_cost (recog_data.operand[i], SET) > rtx_cost (testreg, SET)))) { alternative_nregs[j]++; op_alt_regno[i][j] = regno; } j++; break; } p += CONSTRAINT_LEN (c, p); if (c == '\0') break; } } } /* Record all alternatives which are better or equal to the currently matching one in the alternative_order array. */ for (i = j = 0; i < recog_data.n_alternatives; i++) if (alternative_reject[i] <= alternative_reject[which_alternative]) alternative_order[j++] = i; recog_data.n_alternatives = j; /* Sort it. Given a small number of alternatives, a dumb algorithm won't hurt too much. */ for (i = 0; i < recog_data.n_alternatives - 1; i++) { int best = i; int best_reject = alternative_reject[alternative_order[i]]; int best_nregs = alternative_nregs[alternative_order[i]]; int tmp; for (j = i + 1; j < recog_data.n_alternatives; j++) { int this_reject = alternative_reject[alternative_order[j]]; int this_nregs = alternative_nregs[alternative_order[j]]; if (this_reject < best_reject || (this_reject == best_reject && this_nregs < best_nregs)) { best = j; best_reject = this_reject; best_nregs = this_nregs; } } tmp = alternative_order[best]; alternative_order[best] = alternative_order[i]; alternative_order[i] = tmp; } /* Substitute the operands as determined by op_alt_regno for the best alternative. */ j = alternative_order[0]; for (i = 0; i < recog_data.n_operands; i++) { enum machine_mode mode = recog_data.operand_mode[i]; if (op_alt_regno[i][j] == -1) continue; validate_change (insn, recog_data.operand_loc[i], gen_rtx_REG (mode, op_alt_regno[i][j]), 1); } for (i = recog_data.n_dups - 1; i >= 0; i--) { int op = recog_data.dup_num[i]; enum machine_mode mode = recog_data.operand_mode[op]; if (op_alt_regno[op][j] == -1) continue; validate_change (insn, recog_data.dup_loc[i], gen_rtx_REG (mode, op_alt_regno[op][j]), 1); } return apply_change_group (); } /* If reload couldn't use reg+reg+offset addressing, try to use reg+reg addressing now. This code might also be useful when reload gave up on reg+reg addressing because of clashes between the return register and INDEX_REG_CLASS. */ /* The maximum number of uses of a register we can keep track of to replace them with reg+reg addressing. */ #define RELOAD_COMBINE_MAX_USES 6 /* INSN is the insn where a register has ben used, and USEP points to the location of the register within the rtl. */ struct reg_use_postreload { rtx insn, *usep; }; /* If the register is used in some unknown fashion, USE_INDEX is negative. If it is dead, USE_INDEX is RELOAD_COMBINE_MAX_USES, and STORE_RUID indicates where it becomes live again. Otherwise, USE_INDEX is the index of the last encountered use of the register (which is first among these we have seen since we scan backwards), OFFSET contains the constant offset that is added to the register in all encountered uses, and USE_RUID indicates the first encountered, i.e. last, of these uses. STORE_RUID is always meaningful if we only want to use a value in a register in a different place: it denotes the next insn in the insn stream (i.e. the last encountered) that sets or clobbers the register. */ static struct { struct reg_use_postreload reg_use[RELOAD_COMBINE_MAX_USES]; int use_index; rtx offset; int store_ruid; int use_ruid; } reg_state[FIRST_PSEUDO_REGISTER]; /* Reverse linear uid. This is increased in reload_combine while scanning the instructions from last to first. It is used to set last_label_ruid and the store_ruid / use_ruid fields in reg_state. */ static int reload_combine_ruid; #define LABEL_LIVE(LABEL) \ (label_live[CODE_LABEL_NUMBER (LABEL) - min_labelno]) static void reload_combine (void) { rtx insn, set; int first_index_reg = -1; int last_index_reg = 0; int i; basic_block bb; unsigned int r; int last_label_ruid; int min_labelno, n_labels; HARD_REG_SET ever_live_at_start, *label_live; /* If reg+reg can be used in offsetable memory addresses, the main chunk of reload has already used it where appropriate, so there is no use in trying to generate it now. */ if (double_reg_address_ok && INDEX_REG_CLASS != NO_REGS) return; /* To avoid wasting too much time later searching for an index register, determine the minimum and maximum index register numbers. */ for (r = 0; r < FIRST_PSEUDO_REGISTER; r++) if (TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS], r)) { if (first_index_reg == -1) first_index_reg = r; last_index_reg = r; } /* If no index register is available, we can quit now. */ if (first_index_reg == -1) return; /* Set up LABEL_LIVE and EVER_LIVE_AT_START. The register lifetime information is a bit fuzzy immediately after reload, but it's still good enough to determine which registers are live at a jump destination. */ min_labelno = get_first_label_num (); n_labels = max_label_num () - min_labelno; label_live = xmalloc (n_labels * sizeof (HARD_REG_SET)); CLEAR_HARD_REG_SET (ever_live_at_start); FOR_EACH_BB_REVERSE (bb) { insn = BB_HEAD (bb); if (GET_CODE (insn) == CODE_LABEL) { HARD_REG_SET live; REG_SET_TO_HARD_REG_SET (live, bb->global_live_at_start); compute_use_by_pseudos (&live, bb->global_live_at_start); COPY_HARD_REG_SET (LABEL_LIVE (insn), live); IOR_HARD_REG_SET (ever_live_at_start, live); } } /* Initialize last_label_ruid, reload_combine_ruid and reg_state. */ last_label_ruid = reload_combine_ruid = 0; for (r = 0; r < FIRST_PSEUDO_REGISTER; r++) { reg_state[r].store_ruid = reload_combine_ruid; if (fixed_regs[r]) reg_state[r].use_index = -1; else reg_state[r].use_index = RELOAD_COMBINE_MAX_USES; } for (insn = get_last_insn (); insn; insn = PREV_INSN (insn)) { rtx note; /* We cannot do our optimization across labels. Invalidating all the use information we have would be costly, so we just note where the label is and then later disable any optimization that would cross it. */ if (GET_CODE (insn) == CODE_LABEL) last_label_ruid = reload_combine_ruid; else if (GET_CODE (insn) == BARRIER) for (r = 0; r < FIRST_PSEUDO_REGISTER; r++) if (! fixed_regs[r]) reg_state[r].use_index = RELOAD_COMBINE_MAX_USES; if (! INSN_P (insn)) continue; reload_combine_ruid++; /* Look for (set (REGX) (CONST_INT)) (set (REGX) (PLUS (REGX) (REGY))) ... ... (MEM (REGX)) ... and convert it to (set (REGZ) (CONST_INT)) ... ... (MEM (PLUS (REGZ) (REGY)))... . First, check that we have (set (REGX) (PLUS (REGX) (REGY))) and that we know all uses of REGX before it dies. Also, explicitly check that REGX != REGY; our life information does not yet show whether REGY changes in this insn. */ set = single_set (insn); if (set != NULL_RTX && REG_P (SET_DEST (set)) && (hard_regno_nregs[REGNO (SET_DEST (set))] [GET_MODE (SET_DEST (set))] == 1) && GET_CODE (SET_SRC (set)) == PLUS && REG_P (XEXP (SET_SRC (set), 1)) && rtx_equal_p (XEXP (SET_SRC (set), 0), SET_DEST (set)) && !rtx_equal_p (XEXP (SET_SRC (set), 1), SET_DEST (set)) && last_label_ruid < reg_state[REGNO (SET_DEST (set))].use_ruid) { rtx reg = SET_DEST (set); rtx plus = SET_SRC (set); rtx base = XEXP (plus, 1); rtx prev = prev_nonnote_insn (insn); rtx prev_set = prev ? single_set (prev) : NULL_RTX; unsigned int regno = REGNO (reg); rtx const_reg = NULL_RTX; rtx reg_sum = NULL_RTX; /* Now, we need an index register. We'll set index_reg to this index register, const_reg to the register that is to be loaded with the constant (denoted as REGZ in the substitution illustration above), and reg_sum to the register-register that we want to use to substitute uses of REG (typically in MEMs) with. First check REG and BASE for being index registers; we can use them even if they are not dead. */ if (TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS], regno) || TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS], REGNO (base))) { const_reg = reg; reg_sum = plus; } else { /* Otherwise, look for a free index register. Since we have checked above that neither REG nor BASE are index registers, if we find anything at all, it will be different from these two registers. */ for (i = first_index_reg; i <= last_index_reg; i++) { if (TEST_HARD_REG_BIT (reg_class_contents[INDEX_REG_CLASS], i) && reg_state[i].use_index == RELOAD_COMBINE_MAX_USES && reg_state[i].store_ruid <= reg_state[regno].use_ruid && hard_regno_nregs[i][GET_MODE (reg)] == 1) { rtx index_reg = gen_rtx_REG (GET_MODE (reg), i); const_reg = index_reg; reg_sum = gen_rtx_PLUS (GET_MODE (reg), index_reg, base); break; } } } /* Check that PREV_SET is indeed (set (REGX) (CONST_INT)) and that (REGY), i.e. BASE, is not clobbered before the last use we'll create. */ if (prev_set != 0 && GET_CODE (SET_SRC (prev_set)) == CONST_INT && rtx_equal_p (SET_DEST (prev_set), reg) && reg_state[regno].use_index >= 0 && (reg_state[REGNO (base)].store_ruid <= reg_state[regno].use_ruid) && reg_sum != 0) { int i; /* Change destination register and, if necessary, the constant value in PREV, the constant loading instruction. */ validate_change (prev, &SET_DEST (prev_set), const_reg, 1); if (reg_state[regno].offset != const0_rtx) validate_change (prev, &SET_SRC (prev_set), GEN_INT (INTVAL (SET_SRC (prev_set)) + INTVAL (reg_state[regno].offset)), 1); /* Now for every use of REG that we have recorded, replace REG with REG_SUM. */ for (i = reg_state[regno].use_index; i < RELOAD_COMBINE_MAX_USES; i++) validate_change (reg_state[regno].reg_use[i].insn, reg_state[regno].reg_use[i].usep, /* Each change must have its own replacement. */ copy_rtx (reg_sum), 1); if (apply_change_group ()) { rtx *np; /* Delete the reg-reg addition. */ delete_insn (insn); if (reg_state[regno].offset != const0_rtx) /* Previous REG_EQUIV / REG_EQUAL notes for PREV are now invalid. */ for (np = ®_NOTES (prev); *np;) { if (REG_NOTE_KIND (*np) == REG_EQUAL || REG_NOTE_KIND (*np) == REG_EQUIV) *np = XEXP (*np, 1); else np = &XEXP (*np, 1); } reg_state[regno].use_index = RELOAD_COMBINE_MAX_USES; reg_state[REGNO (const_reg)].store_ruid = reload_combine_ruid; continue; } } } note_stores (PATTERN (insn), reload_combine_note_store, NULL); if (GET_CODE (insn) == CALL_INSN) { rtx link; for (r = 0; r < FIRST_PSEUDO_REGISTER; r++) if (call_used_regs[r]) { reg_state[r].use_index = RELOAD_COMBINE_MAX_USES; reg_state[r].store_ruid = reload_combine_ruid; } for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1)) { rtx usage_rtx = XEXP (XEXP (link, 0), 0); if (REG_P (usage_rtx)) { unsigned int i; unsigned int start_reg = REGNO (usage_rtx); unsigned int num_regs = hard_regno_nregs[start_reg][GET_MODE (usage_rtx)]; unsigned int end_reg = start_reg + num_regs - 1; for (i = start_reg; i <= end_reg; i++) if (GET_CODE (XEXP (link, 0)) == CLOBBER) { reg_state[i].use_index = RELOAD_COMBINE_MAX_USES; reg_state[i].store_ruid = reload_combine_ruid; } else reg_state[i].use_index = -1; } } } else if (GET_CODE (insn) == JUMP_INSN && GET_CODE (PATTERN (insn)) != RETURN) { /* Non-spill registers might be used at the call destination in some unknown fashion, so we have to mark the unknown use. */ HARD_REG_SET *live; if ((condjump_p (insn) || condjump_in_parallel_p (insn)) && JUMP_LABEL (insn)) live = &LABEL_LIVE (JUMP_LABEL (insn)); else live = &ever_live_at_start; for (i = FIRST_PSEUDO_REGISTER - 1; i >= 0; --i) if (TEST_HARD_REG_BIT (*live, i)) reg_state[i].use_index = -1; } reload_combine_note_use (&PATTERN (insn), insn); for (note = REG_NOTES (insn); note; note = XEXP (note, 1)) { if (REG_NOTE_KIND (note) == REG_INC && REG_P (XEXP (note, 0))) { int regno = REGNO (XEXP (note, 0)); reg_state[regno].store_ruid = reload_combine_ruid; reg_state[regno].use_index = -1; } } } free (label_live); } /* Check if DST is a register or a subreg of a register; if it is, update reg_state[regno].store_ruid and reg_state[regno].use_index accordingly. Called via note_stores from reload_combine. */ static void reload_combine_note_store (rtx dst, rtx set, void *data ATTRIBUTE_UNUSED) { int regno = 0; int i; enum machine_mode mode = GET_MODE (dst); if (GET_CODE (dst) == SUBREG) { regno = subreg_regno_offset (REGNO (SUBREG_REG (dst)), GET_MODE (SUBREG_REG (dst)), SUBREG_BYTE (dst), GET_MODE (dst)); dst = SUBREG_REG (dst); } if (!REG_P (dst)) return; regno += REGNO (dst); /* note_stores might have stripped a STRICT_LOW_PART, so we have to be careful with registers / register parts that are not full words. Similarly for ZERO_EXTRACT and SIGN_EXTRACT. */ if (GET_CODE (set) != SET || GET_CODE (SET_DEST (set)) == ZERO_EXTRACT || GET_CODE (SET_DEST (set)) == SIGN_EXTRACT || GET_CODE (SET_DEST (set)) == STRICT_LOW_PART) { for (i = hard_regno_nregs[regno][mode] - 1 + regno; i >= regno; i--) { reg_state[i].use_index = -1; reg_state[i].store_ruid = reload_combine_ruid; } } else { for (i = hard_regno_nregs[regno][mode] - 1 + regno; i >= regno; i--) { reg_state[i].store_ruid = reload_combine_ruid; reg_state[i].use_index = RELOAD_COMBINE_MAX_USES; } } } /* XP points to a piece of rtl that has to be checked for any uses of registers. *XP is the pattern of INSN, or a part of it. Called from reload_combine, and recursively by itself. */ static void reload_combine_note_use (rtx *xp, rtx insn) { rtx x = *xp; enum rtx_code code = x->code; const char *fmt; int i, j; rtx offset = const0_rtx; /* For the REG case below. */ switch (code) { case SET: if (REG_P (SET_DEST (x))) { reload_combine_note_use (&SET_SRC (x), insn); return; } break; case USE: /* If this is the USE of a return value, we can't change it. */ if (REG_P (XEXP (x, 0)) && REG_FUNCTION_VALUE_P (XEXP (x, 0))) { /* Mark the return register as used in an unknown fashion. */ rtx reg = XEXP (x, 0); int regno = REGNO (reg); int nregs = hard_regno_nregs[regno][GET_MODE (reg)]; while (--nregs >= 0) reg_state[regno + nregs].use_index = -1; return; } break; case CLOBBER: if (REG_P (SET_DEST (x))) { /* No spurious CLOBBERs of pseudo registers may remain. */ if (REGNO (SET_DEST (x)) >= FIRST_PSEUDO_REGISTER) abort (); return; } break; case PLUS: /* We are interested in (plus (reg) (const_int)) . */ if (!REG_P (XEXP (x, 0)) || GET_CODE (XEXP (x, 1)) != CONST_INT) break; offset = XEXP (x, 1); x = XEXP (x, 0); /* Fall through. */ case REG: { int regno = REGNO (x); int use_index; int nregs; /* No spurious USEs of pseudo registers may remain. */ if (regno >= FIRST_PSEUDO_REGISTER) abort (); nregs = hard_regno_nregs[regno][GET_MODE (x)]; /* We can't substitute into multi-hard-reg uses. */ if (nregs > 1) { while (--nregs >= 0) reg_state[regno + nregs].use_index = -1; return; } /* If this register is already used in some unknown fashion, we can't do anything. If we decrement the index from zero to -1, we can't store more uses, so this register becomes used in an unknown fashion. */ use_index = --reg_state[regno].use_index; if (use_index < 0) return; if (use_index != RELOAD_COMBINE_MAX_USES - 1) { /* We have found another use for a register that is already used later. Check if the offsets match; if not, mark the register as used in an unknown fashion. */ if (! rtx_equal_p (offset, reg_state[regno].offset)) { reg_state[regno].use_index = -1; return; } } else { /* This is the first use of this register we have seen since we marked it as dead. */ reg_state[regno].offset = offset; reg_state[regno].use_ruid = reload_combine_ruid; } reg_state[regno].reg_use[use_index].insn = insn; reg_state[regno].reg_use[use_index].usep = xp; return; } default: break; } /* Recursively process the components of X. */ fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') reload_combine_note_use (&XEXP (x, i), insn); else if (fmt[i] == 'E') { for (j = XVECLEN (x, i) - 1; j >= 0; j--) reload_combine_note_use (&XVECEXP (x, i, j), insn); } } } /* See if we can reduce the cost of a constant by replacing a move with an add. We track situations in which a register is set to a constant or to a register plus a constant. */ /* We cannot do our optimization across labels. Invalidating all the information about register contents we have would be costly, so we use move2add_last_label_luid to note where the label is and then later disable any optimization that would cross it. reg_offset_postreload[n] / reg_base_reg[n] / reg_mode[n] are only valid if reg_set_luid[n] is greater than move2add_last_label_luid. */ static int reg_set_luid[FIRST_PSEUDO_REGISTER]; /* If reg_base_reg[n] is negative, register n has been set to reg_offset_postreload[n] in mode reg_mode[n] . If reg_base_reg[n] is non-negative, register n has been set to the sum of reg_offset_postreload[n] and the value of register reg_base_reg[n] before reg_set_luid[n], calculated in mode reg_mode[n] . */ static HOST_WIDE_INT reg_offset_postreload[FIRST_PSEUDO_REGISTER]; static int reg_base_reg[FIRST_PSEUDO_REGISTER]; static enum machine_mode reg_mode[FIRST_PSEUDO_REGISTER]; /* move2add_luid is linearly increased while scanning the instructions from first to last. It is used to set reg_set_luid in reload_cse_move2add and move2add_note_store. */ static int move2add_luid; /* move2add_last_label_luid is set whenever a label is found. Labels invalidate all previously collected reg_offset_postreload data. */ static int move2add_last_label_luid; /* ??? We don't know how zero / sign extension is handled, hence we can't go from a narrower to a wider mode. */ #define MODES_OK_FOR_MOVE2ADD(OUTMODE, INMODE) \ (GET_MODE_SIZE (OUTMODE) == GET_MODE_SIZE (INMODE) \ || (GET_MODE_SIZE (OUTMODE) <= GET_MODE_SIZE (INMODE) \ && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (OUTMODE), \ GET_MODE_BITSIZE (INMODE)))) static void reload_cse_move2add (rtx first) { int i; rtx insn; for (i = FIRST_PSEUDO_REGISTER - 1; i >= 0; i--) reg_set_luid[i] = 0; move2add_last_label_luid = 0; move2add_luid = 2; for (insn = first; insn; insn = NEXT_INSN (insn), move2add_luid++) { rtx pat, note; if (GET_CODE (insn) == CODE_LABEL) { move2add_last_label_luid = move2add_luid; /* We're going to increment move2add_luid twice after a label, so that we can use move2add_last_label_luid + 1 as the luid for constants. */ move2add_luid++; continue; } if (! INSN_P (insn)) continue; pat = PATTERN (insn); /* For simplicity, we only perform this optimization on straightforward SETs. */ if (GET_CODE (pat) == SET && REG_P (SET_DEST (pat))) { rtx reg = SET_DEST (pat); int regno = REGNO (reg); rtx src = SET_SRC (pat); /* Check if we have valid information on the contents of this register in the mode of REG. */ if (reg_set_luid[regno] > move2add_last_label_luid && MODES_OK_FOR_MOVE2ADD (GET_MODE (reg), reg_mode[regno])) { /* Try to transform (set (REGX) (CONST_INT A)) ... (set (REGX) (CONST_INT B)) to (set (REGX) (CONST_INT A)) ... (set (REGX) (plus (REGX) (CONST_INT B-A))) or (set (REGX) (CONST_INT A)) ... (set (STRICT_LOW_PART (REGX)) (CONST_INT B)) */ if (GET_CODE (src) == CONST_INT && reg_base_reg[regno] < 0) { rtx new_src = GEN_INT (trunc_int_for_mode (INTVAL (src) - reg_offset_postreload[regno], GET_MODE (reg))); /* (set (reg) (plus (reg) (const_int 0))) is not canonical; use (set (reg) (reg)) instead. We don't delete this insn, nor do we convert it into a note, to avoid losing register notes or the return value flag. jump2 already knows how to get rid of no-op moves. */ if (new_src == const0_rtx) { /* If the constants are different, this is a truncation, that, if turned into (set (reg) (reg)), would be discarded. Maybe we should try a truncMN pattern? */ if (INTVAL (src) == reg_offset_postreload [regno]) validate_change (insn, &SET_SRC (pat), reg, 0); } else if (rtx_cost (new_src, PLUS) < rtx_cost (src, SET) && have_add2_insn (reg, new_src)) { rtx tem = gen_rtx_PLUS (GET_MODE (reg), reg, new_src); validate_change (insn, &SET_SRC (pat), tem, 0); } else { enum machine_mode narrow_mode; for (narrow_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); narrow_mode != GET_MODE (reg); narrow_mode = GET_MODE_WIDER_MODE (narrow_mode)) { if (have_insn_for (STRICT_LOW_PART, narrow_mode) && ((reg_offset_postreload[regno] & ~GET_MODE_MASK (narrow_mode)) == (INTVAL (src) & ~GET_MODE_MASK (narrow_mode)))) { rtx narrow_reg = gen_rtx_REG (narrow_mode, REGNO (reg)); rtx narrow_src = GEN_INT (trunc_int_for_mode (INTVAL (src), narrow_mode)); rtx new_set = gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, narrow_reg), narrow_src); if (validate_change (insn, &PATTERN (insn), new_set, 0)) break; } } } reg_set_luid[regno] = move2add_luid; reg_mode[regno] = GET_MODE (reg); reg_offset_postreload[regno] = INTVAL (src); continue; } /* Try to transform (set (REGX) (REGY)) (set (REGX) (PLUS (REGX) (CONST_INT A))) ... (set (REGX) (REGY)) (set (REGX) (PLUS (REGX) (CONST_INT B))) to (set (REGX) (REGY)) (set (REGX) (PLUS (REGX) (CONST_INT A))) ... (set (REGX) (plus (REGX) (CONST_INT B-A))) */ else if (REG_P (src) && reg_set_luid[regno] == reg_set_luid[REGNO (src)] && reg_base_reg[regno] == reg_base_reg[REGNO (src)] && MODES_OK_FOR_MOVE2ADD (GET_MODE (reg), reg_mode[REGNO (src)])) { rtx next = next_nonnote_insn (insn); rtx set = NULL_RTX; if (next) set = single_set (next); if (set && SET_DEST (set) == reg && GET_CODE (SET_SRC (set)) == PLUS && XEXP (SET_SRC (set), 0) == reg && GET_CODE (XEXP (SET_SRC (set), 1)) == CONST_INT) { rtx src3 = XEXP (SET_SRC (set), 1); HOST_WIDE_INT added_offset = INTVAL (src3); HOST_WIDE_INT base_offset = reg_offset_postreload[REGNO (src)]; HOST_WIDE_INT regno_offset = reg_offset_postreload[regno]; rtx new_src = GEN_INT (trunc_int_for_mode (added_offset + base_offset - regno_offset, GET_MODE (reg))); int success = 0; if (new_src == const0_rtx) /* See above why we create (set (reg) (reg)) here. */ success = validate_change (next, &SET_SRC (set), reg, 0); else if ((rtx_cost (new_src, PLUS) < COSTS_N_INSNS (1) + rtx_cost (src3, SET)) && have_add2_insn (reg, new_src)) { rtx newpat = gen_rtx_SET (VOIDmode, reg, gen_rtx_PLUS (GET_MODE (reg), reg, new_src)); success = validate_change (next, &PATTERN (next), newpat, 0); } if (success) delete_insn (insn); insn = next; reg_mode[regno] = GET_MODE (reg); reg_offset_postreload[regno] = trunc_int_for_mode (added_offset + base_offset, GET_MODE (reg)); continue; } } } } for (note = REG_NOTES (insn); note; note = XEXP (note, 1)) { if (REG_NOTE_KIND (note) == REG_INC && REG_P (XEXP (note, 0))) { /* Reset the information about this register. */ int regno = REGNO (XEXP (note, 0)); if (regno < FIRST_PSEUDO_REGISTER) reg_set_luid[regno] = 0; } } note_stores (PATTERN (insn), move2add_note_store, NULL); /* If INSN is a conditional branch, we try to extract an implicit set out of it. */ if (any_condjump_p (insn) && onlyjump_p (insn)) { rtx cnd = fis_get_condition (insn); if (cnd != NULL_RTX && GET_CODE (cnd) == NE && REG_P (XEXP (cnd, 0)) /* The following two checks, which are also in move2add_note_store, are intended to reduce the number of calls to gen_rtx_SET to avoid memory allocation if possible. */ && SCALAR_INT_MODE_P (GET_MODE (XEXP (cnd, 0))) && hard_regno_nregs[REGNO (XEXP (cnd, 0))][GET_MODE (XEXP (cnd, 0))] == 1 && GET_CODE (XEXP (cnd, 1)) == CONST_INT) { rtx implicit_set = gen_rtx_SET (VOIDmode, XEXP (cnd, 0), XEXP (cnd, 1)); move2add_note_store (SET_DEST (implicit_set), implicit_set, 0); } } /* If this is a CALL_INSN, all call used registers are stored with unknown values. */ if (GET_CODE (insn) == CALL_INSN) { for (i = FIRST_PSEUDO_REGISTER - 1; i >= 0; i--) { if (call_used_regs[i]) /* Reset the information about this register. */ reg_set_luid[i] = 0; } } } } /* SET is a SET or CLOBBER that sets DST. Update reg_set_luid, reg_offset_postreload and reg_base_reg accordingly. Called from reload_cse_move2add via note_stores. */ static void move2add_note_store (rtx dst, rtx set, void *data ATTRIBUTE_UNUSED) { unsigned int regno = 0; unsigned int i; enum machine_mode mode = GET_MODE (dst); if (GET_CODE (dst) == SUBREG) { regno = subreg_regno_offset (REGNO (SUBREG_REG (dst)), GET_MODE (SUBREG_REG (dst)), SUBREG_BYTE (dst), GET_MODE (dst)); dst = SUBREG_REG (dst); } /* Some targets do argument pushes without adding REG_INC notes. */ if (MEM_P (dst)) { dst = XEXP (dst, 0); if (GET_CODE (dst) == PRE_INC || GET_CODE (dst) == POST_INC || GET_CODE (dst) == PRE_DEC || GET_CODE (dst) == POST_DEC) reg_set_luid[REGNO (XEXP (dst, 0))] = 0; return; } if (!REG_P (dst)) return; regno += REGNO (dst); if (SCALAR_INT_MODE_P (mode) && hard_regno_nregs[regno][mode] == 1 && GET_CODE (set) == SET && GET_CODE (SET_DEST (set)) != ZERO_EXTRACT && GET_CODE (SET_DEST (set)) != SIGN_EXTRACT && GET_CODE (SET_DEST (set)) != STRICT_LOW_PART) { rtx src = SET_SRC (set); rtx base_reg; HOST_WIDE_INT offset; int base_regno; /* This may be different from mode, if SET_DEST (set) is a SUBREG. */ enum machine_mode dst_mode = GET_MODE (dst); switch (GET_CODE (src)) { case PLUS: if (REG_P (XEXP (src, 0))) { base_reg = XEXP (src, 0); if (GET_CODE (XEXP (src, 1)) == CONST_INT) offset = INTVAL (XEXP (src, 1)); else if (REG_P (XEXP (src, 1)) && (reg_set_luid[REGNO (XEXP (src, 1))] > move2add_last_label_luid) && (MODES_OK_FOR_MOVE2ADD (dst_mode, reg_mode[REGNO (XEXP (src, 1))]))) { if (reg_base_reg[REGNO (XEXP (src, 1))] < 0) offset = reg_offset_postreload[REGNO (XEXP (src, 1))]; /* Maybe the first register is known to be a constant. */ else if (reg_set_luid[REGNO (base_reg)] > move2add_last_label_luid && (MODES_OK_FOR_MOVE2ADD (dst_mode, reg_mode[REGNO (XEXP (src, 1))])) && reg_base_reg[REGNO (base_reg)] < 0) { offset = reg_offset_postreload[REGNO (base_reg)]; base_reg = XEXP (src, 1); } else goto invalidate; } else goto invalidate; break; } goto invalidate; case REG: base_reg = src; offset = 0; break; case CONST_INT: /* Start tracking the register as a constant. */ reg_base_reg[regno] = -1; reg_offset_postreload[regno] = INTVAL (SET_SRC (set)); /* We assign the same luid to all registers set to constants. */ reg_set_luid[regno] = move2add_last_label_luid + 1; reg_mode[regno] = mode; return; default: invalidate: /* Invalidate the contents of the register. */ reg_set_luid[regno] = 0; return; } base_regno = REGNO (base_reg); /* If information about the base register is not valid, set it up as a new base register, pretending its value is known starting from the current insn. */ if (reg_set_luid[base_regno] <= move2add_last_label_luid) { reg_base_reg[base_regno] = base_regno; reg_offset_postreload[base_regno] = 0; reg_set_luid[base_regno] = move2add_luid; reg_mode[base_regno] = mode; } else if (! MODES_OK_FOR_MOVE2ADD (dst_mode, reg_mode[base_regno])) goto invalidate; reg_mode[regno] = mode; /* Copy base information from our base register. */ reg_set_luid[regno] = reg_set_luid[base_regno]; reg_base_reg[regno] = reg_base_reg[base_regno]; /* Compute the sum of the offsets or constants. */ reg_offset_postreload[regno] = trunc_int_for_mode (offset + reg_offset_postreload[base_regno], dst_mode); } else { unsigned int endregno = regno + hard_regno_nregs[regno][mode]; for (i = regno; i < endregno; i++) /* Reset the information about this register. */ reg_set_luid[i] = 0; } } /* Branch prediction routines for the GNU compiler. Copyright (C) 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* References: [1] "Branch Prediction for Free" Ball and Larus; PLDI '93. [2] "Static Branch Frequency and Program Profile Analysis" Wu and Larus; MICRO-27. [3] "Corpus-based Static Branch Prediction" Calder, Grunwald, Lindsay, Martin, Mozer, and Zorn; PLDI '95. */ /* Definitions for simple data type for positive real numbers. Copyright (C) 2002, 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_SREAL_H #define GCC_SREAL_H /* SREAL_PART_BITS has to be an even number. */ #if (HOST_BITS_PER_WIDE_INT / 2) % 2 == 1 #define SREAL_PART_BITS (HOST_BITS_PER_WIDE_INT / 2 - 1) #else #define SREAL_PART_BITS (HOST_BITS_PER_WIDE_INT / 2) #endif #define uhwi unsigned HOST_WIDE_INT #define MAX_HOST_WIDE_INT (((uhwi) 1 << (HOST_BITS_PER_WIDE_INT - 1)) - 1) #define SREAL_MIN_SIG ((uhwi) 1 << (SREAL_PART_BITS - 1)) #define SREAL_MAX_SIG (((uhwi) 1 << SREAL_PART_BITS) - 1) #define SREAL_MAX_EXP (INT_MAX / 4) #if SREAL_PART_BITS < 32 #define SREAL_BITS (SREAL_PART_BITS * 2) #else #define SREAL_BITS SREAL_PART_BITS #endif /* Structure for holding a simple real number. */ typedef struct sreal { #if SREAL_PART_BITS < 32 unsigned HOST_WIDE_INT sig_lo; /* Significant (lower part). */ unsigned HOST_WIDE_INT sig_hi; /* Significant (higher part). */ #else unsigned HOST_WIDE_INT sig; /* Significant. */ #endif signed int exp; /* Exponent. */ } sreal; extern void dump_sreal (FILE *, sreal *); extern sreal *sreal_init (sreal *, unsigned HOST_WIDE_INT, signed int); extern HOST_WIDE_INT sreal_to_int (sreal *); extern int sreal_compare (sreal *, sreal *); extern sreal *sreal_add (sreal *, sreal *, sreal *); extern sreal *sreal_sub (sreal *, sreal *, sreal *); extern sreal *sreal_mul (sreal *, sreal *, sreal *); extern sreal *sreal_div (sreal *, sreal *, sreal *); #endif /* real constants: 0, 1, 1-1/REG_BR_PROB_BASE, REG_BR_PROB_BASE, 1/REG_BR_PROB_BASE, 0.5, BB_FREQ_MAX. */ static sreal real_zero, real_one, real_almost_one, real_br_prob_base, real_inv_br_prob_base, real_one_half, real_bb_freq_max; /* Random guesstimation given names. */ #define PROB_VERY_UNLIKELY (REG_BR_PROB_BASE / 10 - 1) #define PROB_EVEN (REG_BR_PROB_BASE / 2) #define PROB_VERY_LIKELY (REG_BR_PROB_BASE - PROB_VERY_UNLIKELY) #define PROB_ALWAYS (REG_BR_PROB_BASE) static void combine_predictions_for_insn (rtx, basic_block); static void dump_prediction (FILE *, enum br_predictor, int, basic_block, int); static void estimate_loops_at_level (struct loop *loop); static void propagate_freq (struct loop *); static void estimate_bb_frequencies (struct loops *); static int counts_to_freqs (void); static void process_note_predictions (basic_block, int *); static void process_note_prediction (basic_block, int *, int, int); static bool last_basic_block_p (basic_block); static void compute_function_frequency (void); static void choose_function_section (void); static bool can_predict_insn_p (rtx); /* Information we hold about each branch predictor. Filled using information from predict.def. */ struct predictor_info { const char *const name; /* Name used in the debugging dumps. */ const int hitrate; /* Expected hitrate used by predict_insn_def call. */ const int flags; }; /* Use given predictor without Dempster-Shaffer theory if it matches using first_match heuristics. */ #define PRED_FLAG_FIRST_MATCH 1 /* Recompute hitrate in percent to our representation. */ #define HITRATE(VAL) ((int) ((VAL) * REG_BR_PROB_BASE + 50) / 100) #define DEF_PREDICTOR(ENUM, NAME, HITRATE, FLAGS) {NAME, HITRATE, FLAGS}, static const struct predictor_info predictor_info[]= { /* Definitions for the branch prediction routines in the GNU compiler. Copyright (C) 2001, 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Before including this file, you should define a macro: DEF_PREDICTOR (ENUM, NAME, HITRATE) This macro will be called once for each predictor. The ENUM will be of type `enum predictor', and will enumerate all supported predictors. The order of DEF_PREDICTOR calls is important, as in the first match combining heuristics, the predictor appearing first in this file will win. NAME is used in the debugging output to determine predictor type. HITRATE is the probability that edge predicted by predictor as taken will be really taken (so it should be always above REG_BR_PROB_BASE / 2). */ /* A value used as final outcome of all heuristics. */ DEF_PREDICTOR (PRED_COMBINED, "combined", PROB_ALWAYS, 0) /* An outcome estimated by Dempster-Shaffer theory. */ DEF_PREDICTOR (PRED_DS_THEORY, "DS theory", PROB_ALWAYS, 0) /* An combined heuristics using probability determined by first matching heuristics from this list. */ DEF_PREDICTOR (PRED_FIRST_MATCH, "first match", PROB_ALWAYS, 0) /* Heuristic applying when no heuristic below applies. */ DEF_PREDICTOR (PRED_NO_PREDICTION, "no prediction", PROB_ALWAYS, 0) /* Mark unconditional jump as taken. */ DEF_PREDICTOR (PRED_UNCONDITIONAL, "unconditional jump", PROB_ALWAYS, PRED_FLAG_FIRST_MATCH) /* Use number of loop iterations determined by loop unroller to set probability. We don't want to use Dempster-Shaffer theory here, as the predictions is exact. */ DEF_PREDICTOR (PRED_LOOP_ITERATIONS, "loop iterations", PROB_ALWAYS, PRED_FLAG_FIRST_MATCH) /* Hints dropped by user via __builtin_expect feature. */ DEF_PREDICTOR (PRED_BUILTIN_EXPECT, "__builtin_expect", PROB_VERY_LIKELY, PRED_FLAG_FIRST_MATCH) /* Branch containing goto is probably not taken. */ DEF_PREDICTOR (PRED_CONTINUE, "continue", HITRATE (56), 0) /* Branch to basic block containing call marked by noreturn attribute. */ DEF_PREDICTOR (PRED_NORETURN, "noreturn call", HITRATE (99), PRED_FLAG_FIRST_MATCH) /* Loopback edge is taken. */ DEF_PREDICTOR (PRED_LOOP_BRANCH, "loop branch", HITRATE (89), PRED_FLAG_FIRST_MATCH) /* Edge causing loop to terminate is probably not taken. */ DEF_PREDICTOR (PRED_LOOP_EXIT, "loop exit", HITRATE (90), PRED_FLAG_FIRST_MATCH) /* Condition emitted by preconditiong code to ensure that variable setting number of iterations is greater than initial value of iterator. */ DEF_PREDICTOR (PRED_LOOP_CONDITION, "loop condition", PROB_VERY_LIKELY, 0) /* Preconditioning makes linear list of branches. */ DEF_PREDICTOR (PRED_LOOP_PRECONDITIONING, "loop preconditioning", PROB_VERY_LIKELY, 0) /* Copied condition for the first iteration of loop is probably true. */ DEF_PREDICTOR (PRED_LOOP_HEADER, "loop header", HITRATE (64), 0) /* Pointers are usually not NULL. */ DEF_PREDICTOR (PRED_POINTER, "pointer", HITRATE (81), 0) DEF_PREDICTOR (PRED_TREE_POINTER, "pointer (on trees)", HITRATE (81), 0) /* NE is probable, EQ not etc... */ DEF_PREDICTOR (PRED_OPCODE_POSITIVE, "opcode values positive", HITRATE (79), 0) DEF_PREDICTOR (PRED_OPCODE_NONEQUAL, "opcode values nonequal", HITRATE (71), 0) DEF_PREDICTOR (PRED_FPOPCODE, "fp_opcode", HITRATE (90), 0) DEF_PREDICTOR (PRED_TREE_OPCODE_POSITIVE, "opcode values positive (on trees)", HITRATE (79), 0) DEF_PREDICTOR (PRED_TREE_OPCODE_NONEQUAL, "opcode values nonequal (on trees)", HITRATE (71), 0) DEF_PREDICTOR (PRED_TREE_FPOPCODE, "fp_opcode (on trees)", HITRATE (90), 0) /* Branch guarding call is probably taken. */ DEF_PREDICTOR (PRED_CALL, "call", HITRATE (70), 0) /* Branch causing function to terminate is probably not taken. */ DEF_PREDICTOR (PRED_EARLY_RETURN, "early return", HITRATE (67), 0) /* Branch containing goto is probably not taken. */ DEF_PREDICTOR (PRED_GOTO, "goto", HITRATE (70), 0) /* Branch ending with return constant is probably not taken. */ DEF_PREDICTOR (PRED_CONST_RETURN, "const return", HITRATE (95), 0) /* Branch ending with return negative constant is probably not taken. */ DEF_PREDICTOR (PRED_NEGATIVE_RETURN, "negative return", HITRATE (96), 0) /* Branch ending with return; is probably not taken */ DEF_PREDICTOR (PRED_NULL_RETURN, "null return", HITRATE (90), 0) /* Branches to a mudflap bounds check are extremely unlikely. */ DEF_PREDICTOR (PRED_MUDFLAP, "mudflap check", HITRATE (99), 0) /* Upper bound on predictors. */ {NULL, 0, 0} }; #undef DEF_PREDICTOR /* Return true in case BB can be CPU intensive and should be optimized for maximal performance. */ bool maybe_hot_bb_p (basic_block bb) { if (profile_info && flag_branch_probabilities && (bb->count < profile_info->sum_max / PARAM_VALUE (HOT_BB_COUNT_FRACTION))) return false; if (bb->frequency < BB_FREQ_MAX / PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION)) return false; return true; } /* Return true in case BB is cold and should be optimized for size. */ bool probably_cold_bb_p (basic_block bb) { if (profile_info && flag_branch_probabilities && (bb->count < profile_info->sum_max / PARAM_VALUE (HOT_BB_COUNT_FRACTION))) return true; if (bb->frequency < BB_FREQ_MAX / PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION)) return true; return false; } /* Return true in case BB is probably never executed. */ bool probably_never_executed_bb_p (basic_block bb) { if (profile_info && flag_branch_probabilities) return ((bb->count + profile_info->runs / 2) / profile_info->runs) == 0; return false; } /* Return true if the one of outgoing edges is already predicted by PREDICTOR. */ bool rtl_predicted_by_p (basic_block bb, enum br_predictor predictor) { rtx note; if (!INSN_P (BB_END (bb))) return false; for (note = REG_NOTES (BB_END (bb)); note; note = XEXP (note, 1)) if (REG_NOTE_KIND (note) == REG_BR_PRED && INTVAL (XEXP (XEXP (note, 0), 0)) == (int)predictor) return true; return false; } /* Return true if the one of outgoing edges is already predicted by PREDICTOR. */ bool tree_predicted_by_p (basic_block bb, enum br_predictor predictor) { struct edge_prediction *i = bb_ann (bb)->predictions; for (i = bb_ann (bb)->predictions; i; i = i->next) if (i->predictor == predictor) return true; return false; } void predict_insn (rtx insn, enum br_predictor predictor, int probability) { if (!any_condjump_p (insn)) abort (); if (!flag_guess_branch_prob) return; REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_BR_PRED, gen_rtx_CONCAT (VOIDmode, GEN_INT ((int) predictor), GEN_INT ((int) probability)), REG_NOTES (insn)); } /* Predict insn by given predictor. */ void predict_insn_def (rtx insn, enum br_predictor predictor, enum prediction taken) { int probability = predictor_info[(int) predictor].hitrate; if (taken != TAKEN) probability = REG_BR_PROB_BASE - probability; predict_insn (insn, predictor, probability); } /* Predict edge E with given probability if possible. */ void rtl_predict_edge (edge e, enum br_predictor predictor, int probability) { rtx last_insn; last_insn = BB_END (e->src); /* We can store the branch prediction information only about conditional jumps. */ if (!any_condjump_p (last_insn)) return; /* We always store probability of branching. */ if (e->flags & EDGE_FALLTHRU) probability = REG_BR_PROB_BASE - probability; predict_insn (last_insn, predictor, probability); } /* Predict edge E with the given PROBABILITY. */ void tree_predict_edge (edge e, enum br_predictor predictor, int probability) { struct edge_prediction *i = ggc_alloc (sizeof (struct edge_prediction)); i->next = bb_ann (e->src)->predictions; bb_ann (e->src)->predictions = i; i->probability = probability; i->predictor = predictor; i->edge = e; } /* Return true when we can store prediction on insn INSN. At the moment we represent predictions only on conditional jumps, not at computed jump or other complicated cases. */ static bool can_predict_insn_p (rtx insn) { return (GET_CODE (insn) == JUMP_INSN && any_condjump_p (insn) && BLOCK_FOR_INSN (insn)->succ->succ_next); } /* Predict edge E by given predictor if possible. */ void predict_edge_def (edge e, enum br_predictor predictor, enum prediction taken) { int probability = predictor_info[(int) predictor].hitrate; if (taken != TAKEN) probability = REG_BR_PROB_BASE - probability; predict_edge (e, predictor, probability); } /* Invert all branch predictions or probability notes in the INSN. This needs to be done each time we invert the condition used by the jump. */ void invert_br_probabilities (rtx insn) { rtx note; for (note = REG_NOTES (insn); note; note = XEXP (note, 1)) if (REG_NOTE_KIND (note) == REG_BR_PROB) XEXP (note, 0) = GEN_INT (REG_BR_PROB_BASE - INTVAL (XEXP (note, 0))); else if (REG_NOTE_KIND (note) == REG_BR_PRED) XEXP (XEXP (note, 0), 1) = GEN_INT (REG_BR_PROB_BASE - INTVAL (XEXP (XEXP (note, 0), 1))); } /* Dump information about the branch prediction to the output file. */ static void dump_prediction (FILE *file, enum br_predictor predictor, int probability, basic_block bb, int used) { edge e = bb->succ; if (!file) return; while (e && (e->flags & EDGE_FALLTHRU)) e = e->succ_next; fprintf (file, " %s heuristics%s: %.1f%%", predictor_info[predictor].name, used ? "" : " (ignored)", probability * 100.0 / REG_BR_PROB_BASE); if (bb->count) { fprintf (file, " exec "); fprintf (file, HOST_WIDEST_INT_PRINT_DEC, bb->count); if (e) { fprintf (file, " hit "); fprintf (file, HOST_WIDEST_INT_PRINT_DEC, e->count); fprintf (file, " (%.1f%%)", e->count * 100.0 / bb->count); } } fprintf (file, "\n"); } /* Combine all REG_BR_PRED notes into single probability and attach REG_BR_PROB note if not already present. Remove now useless REG_BR_PRED notes. */ static void combine_predictions_for_insn (rtx insn, basic_block bb) { rtx prob_note = find_reg_note (insn, REG_BR_PROB, 0); rtx *pnote = ®_NOTES (insn); rtx note; int best_probability = PROB_EVEN; int best_predictor = END_PREDICTORS; int combined_probability = REG_BR_PROB_BASE / 2; int d; bool first_match = false; bool found = false; if (dump_file) fprintf (dump_file, "Predictions for insn %i bb %i\n", INSN_UID (insn), bb->index); /* We implement "first match" heuristics and use probability guessed by predictor with smallest index. */ for (note = REG_NOTES (insn); note; note = XEXP (note, 1)) if (REG_NOTE_KIND (note) == REG_BR_PRED) { int predictor = INTVAL (XEXP (XEXP (note, 0), 0)); int probability = INTVAL (XEXP (XEXP (note, 0), 1)); found = true; if (best_predictor > predictor) best_probability = probability, best_predictor = predictor; d = (combined_probability * probability + (REG_BR_PROB_BASE - combined_probability) * (REG_BR_PROB_BASE - probability)); /* Use FP math to avoid overflows of 32bit integers. */ if (d == 0) /* If one probability is 0% and one 100%, avoid division by zero. */ combined_probability = REG_BR_PROB_BASE / 2; else combined_probability = (((double) combined_probability) * probability * REG_BR_PROB_BASE / d + 0.5); } /* Decide which heuristic to use. In case we didn't match anything, use no_prediction heuristic, in case we did match, use either first match or Dempster-Shaffer theory depending on the flags. */ if (predictor_info [best_predictor].flags & PRED_FLAG_FIRST_MATCH) first_match = true; if (!found) dump_prediction (dump_file, PRED_NO_PREDICTION, combined_probability, bb, true); else { dump_prediction (dump_file, PRED_DS_THEORY, combined_probability, bb, !first_match); dump_prediction (dump_file, PRED_FIRST_MATCH, best_probability, bb, first_match); } if (first_match) combined_probability = best_probability; dump_prediction (dump_file, PRED_COMBINED, combined_probability, bb, true); while (*pnote) { if (REG_NOTE_KIND (*pnote) == REG_BR_PRED) { int predictor = INTVAL (XEXP (XEXP (*pnote, 0), 0)); int probability = INTVAL (XEXP (XEXP (*pnote, 0), 1)); dump_prediction (dump_file, predictor, probability, bb, !first_match || best_predictor == predictor); *pnote = XEXP (*pnote, 1); } else pnote = &XEXP (*pnote, 1); } if (!prob_note) { REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_BR_PROB, GEN_INT (combined_probability), REG_NOTES (insn)); /* Save the prediction into CFG in case we are seeing non-degenerated conditional jump. */ if (bb->succ->succ_next) { BRANCH_EDGE (bb)->probability = combined_probability; FALLTHRU_EDGE (bb)->probability = REG_BR_PROB_BASE - combined_probability; } } } /* Combine predictions into single probability and store them into CFG. Remove now useless prediction entries. */ static void combine_predictions_for_bb (FILE *file, basic_block bb) { int best_probability = PROB_EVEN; int best_predictor = END_PREDICTORS; int combined_probability = REG_BR_PROB_BASE / 2; int d; bool first_match = false; bool found = false; struct edge_prediction *pred; int nedges = 0; edge e, first = NULL, second = NULL; for (e = bb->succ; e; e = e->succ_next) if (!(e->flags & (EDGE_EH | EDGE_FAKE))) { nedges ++; if (first && !second) second = e; if (!first) first = e; } /* When there is no successor or only one choice, prediction is easy. We are lazy for now and predict only basic blocks with two outgoing edges. It is possible to predict generic case too, but we have to ignore first match heuristics and do more involved combining. Implement this later. */ if (nedges != 2) { for (e = bb->succ; e; e = e->succ_next) if (!(e->flags & (EDGE_EH | EDGE_FAKE))) e->probability = (REG_BR_PROB_BASE + nedges / 2) / nedges; else e->probability = 0; bb_ann (bb)->predictions = NULL; if (file) fprintf (file, "%i edges in bb %i predicted to even probabilities\n", nedges, bb->index); return; } if (file) fprintf (file, "Predictions for bb %i\n", bb->index); /* We implement "first match" heuristics and use probability guessed by predictor with smallest index. */ for (pred = bb_ann (bb)->predictions; pred; pred = pred->next) { int predictor = pred->predictor; int probability = pred->probability; if (pred->edge != first) probability = REG_BR_PROB_BASE - probability; found = true; if (best_predictor > predictor) best_probability = probability, best_predictor = predictor; d = (combined_probability * probability + (REG_BR_PROB_BASE - combined_probability) * (REG_BR_PROB_BASE - probability)); /* Use FP math to avoid overflows of 32bit integers. */ if (d == 0) /* If one probability is 0% and one 100%, avoid division by zero. */ combined_probability = REG_BR_PROB_BASE / 2; else combined_probability = (((double) combined_probability) * probability * REG_BR_PROB_BASE / d + 0.5); } /* Decide which heuristic to use. In case we didn't match anything, use no_prediction heuristic, in case we did match, use either first match or Dempster-Shaffer theory depending on the flags. */ if (predictor_info [best_predictor].flags & PRED_FLAG_FIRST_MATCH) first_match = true; if (!found) dump_prediction (file, PRED_NO_PREDICTION, combined_probability, bb, true); else { dump_prediction (file, PRED_DS_THEORY, combined_probability, bb, !first_match); dump_prediction (file, PRED_FIRST_MATCH, best_probability, bb, first_match); } if (first_match) combined_probability = best_probability; dump_prediction (file, PRED_COMBINED, combined_probability, bb, true); for (pred = bb_ann (bb)->predictions; pred; pred = pred->next) { int predictor = pred->predictor; int probability = pred->probability; if (pred->edge != bb->succ) probability = REG_BR_PROB_BASE - probability; dump_prediction (file, predictor, probability, bb, !first_match || best_predictor == predictor); } bb_ann (bb)->predictions = NULL; first->probability = combined_probability; second->probability = REG_BR_PROB_BASE - combined_probability; } /* Predict edge probabilities by exploiting loop structure. When SIMPLELOOPS is set, attempt to count number of iterations by analyzing RTL. */ static void predict_loops (struct loops *loops_info, bool simpleloops) { unsigned i; /* Try to predict out blocks in a loop that are not part of a natural loop. */ for (i = 1; i < loops_info->num; i++) { basic_block bb, *bbs; unsigned j; int exits; struct loop *loop = loops_info->parray[i]; struct niter_desc desc; unsigned HOST_WIDE_INT niter; flow_loop_scan (loop, LOOP_EXIT_EDGES); exits = loop->num_exits; if (simpleloops) { iv_analysis_loop_init (loop); find_simple_exit (loop, &desc); if (desc.simple_p && desc.const_iter) { int prob; niter = desc.niter + 1; if (niter == 0) /* We might overflow here. */ niter = desc.niter; prob = (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + niter /2) / niter); /* Branch prediction algorithm gives 0 frequency for everything after the end of loop for loop having 0 probability to finish. */ if (prob == REG_BR_PROB_BASE) prob = REG_BR_PROB_BASE - 1; predict_edge (desc.in_edge, PRED_LOOP_ITERATIONS, prob); } } bbs = get_loop_body (loop); for (j = 0; j < loop->num_nodes; j++) { int header_found = 0; edge e; bb = bbs[j]; /* Bypass loop heuristics on continue statement. These statements construct loops via "non-loop" constructs in the source language and are better to be handled separately. */ if ((simpleloops && !can_predict_insn_p (BB_END (bb))) || predicted_by_p (bb, PRED_CONTINUE)) continue; /* Loop branch heuristics - predict an edge back to a loop's head as taken. */ for (e = bb->succ; e; e = e->succ_next) if (e->dest == loop->header && e->src == loop->latch) { header_found = 1; predict_edge_def (e, PRED_LOOP_BRANCH, TAKEN); } /* Loop exit heuristics - predict an edge exiting the loop if the conditional has no loop header successors as not taken. */ if (!header_found) for (e = bb->succ; e; e = e->succ_next) if (e->dest->index < 0 || !flow_bb_inside_loop_p (loop, e->dest)) predict_edge (e, PRED_LOOP_EXIT, (REG_BR_PROB_BASE - predictor_info [(int) PRED_LOOP_EXIT].hitrate) / exits); } /* Free basic blocks from get_loop_body. */ free (bbs); } } /* Statically estimate the probability that a branch will be taken and produce estimated profile. When profile feedback is present never executed portions of function gets estimated. */ void estimate_probability (struct loops *loops_info) { basic_block bb; connect_infinite_loops_to_exit (); calculate_dominance_info (CDI_DOMINATORS); calculate_dominance_info (CDI_POST_DOMINATORS); predict_loops (loops_info, true); iv_analysis_done (); /* Attempt to predict conditional jumps using a number of heuristics. */ FOR_EACH_BB (bb) { rtx last_insn = BB_END (bb); rtx cond, earliest; edge e; if (! can_predict_insn_p (last_insn)) continue; for (e = bb->succ; e; e = e->succ_next) { /* Predict early returns to be probable, as we've already taken care for error returns and other are often used for fast paths trought function. */ if ((e->dest == EXIT_BLOCK_PTR || (e->dest->succ && !e->dest->succ->succ_next && e->dest->succ->dest == EXIT_BLOCK_PTR)) && !predicted_by_p (bb, PRED_NULL_RETURN) && !predicted_by_p (bb, PRED_CONST_RETURN) && !predicted_by_p (bb, PRED_NEGATIVE_RETURN) && !last_basic_block_p (e->dest)) predict_edge_def (e, PRED_EARLY_RETURN, TAKEN); /* Look for block we are guarding (ie we dominate it, but it doesn't postdominate us). */ if (e->dest != EXIT_BLOCK_PTR && e->dest != bb && dominated_by_p (CDI_DOMINATORS, e->dest, e->src) && !dominated_by_p (CDI_POST_DOMINATORS, e->src, e->dest)) { rtx insn; /* The call heuristic claims that a guarded function call is improbable. This is because such calls are often used to signal exceptional situations such as printing error messages. */ for (insn = BB_HEAD (e->dest); insn != NEXT_INSN (BB_END (e->dest)); insn = NEXT_INSN (insn)) if (GET_CODE (insn) == CALL_INSN /* Constant and pure calls are hardly used to signalize something exceptional. */ && ! CONST_OR_PURE_CALL_P (insn)) { predict_edge_def (e, PRED_CALL, NOT_TAKEN); break; } } } cond = get_condition (last_insn, &earliest, false); if (! cond) continue; /* Try "pointer heuristic." A comparison ptr == 0 is predicted as false. Similarly, a comparison ptr1 == ptr2 is predicted as false. */ if (COMPARISON_P (cond) && ((REG_P (XEXP (cond, 0)) && REG_POINTER (XEXP (cond, 0))) || (REG_P (XEXP (cond, 1)) && REG_POINTER (XEXP (cond, 1))))) { if (GET_CODE (cond) == EQ) predict_insn_def (last_insn, PRED_POINTER, NOT_TAKEN); else if (GET_CODE (cond) == NE) predict_insn_def (last_insn, PRED_POINTER, TAKEN); } else /* Try "opcode heuristic." EQ tests are usually false and NE tests are usually true. Also, most quantities are positive, so we can make the appropriate guesses about signed comparisons against zero. */ switch (GET_CODE (cond)) { case CONST_INT: /* Unconditional branch. */ predict_insn_def (last_insn, PRED_UNCONDITIONAL, cond == const0_rtx ? NOT_TAKEN : TAKEN); break; case EQ: case UNEQ: /* Floating point comparisons appears to behave in a very unpredictable way because of special role of = tests in FP code. */ if (FLOAT_MODE_P (GET_MODE (XEXP (cond, 0)))) ; /* Comparisons with 0 are often used for booleans and there is nothing useful to predict about them. */ else if (XEXP (cond, 1) == const0_rtx || XEXP (cond, 0) == const0_rtx) ; else predict_insn_def (last_insn, PRED_OPCODE_NONEQUAL, NOT_TAKEN); break; case NE: case LTGT: /* Floating point comparisons appears to behave in a very unpredictable way because of special role of = tests in FP code. */ if (FLOAT_MODE_P (GET_MODE (XEXP (cond, 0)))) ; /* Comparisons with 0 are often used for booleans and there is nothing useful to predict about them. */ else if (XEXP (cond, 1) == const0_rtx || XEXP (cond, 0) == const0_rtx) ; else predict_insn_def (last_insn, PRED_OPCODE_NONEQUAL, TAKEN); break; case ORDERED: predict_insn_def (last_insn, PRED_FPOPCODE, TAKEN); break; case UNORDERED: predict_insn_def (last_insn, PRED_FPOPCODE, NOT_TAKEN); break; case LE: case LT: if (XEXP (cond, 1) == const0_rtx || XEXP (cond, 1) == const1_rtx || XEXP (cond, 1) == constm1_rtx) predict_insn_def (last_insn, PRED_OPCODE_POSITIVE, NOT_TAKEN); break; case GE: case GT: if (XEXP (cond, 1) == const0_rtx || XEXP (cond, 1) == const1_rtx || XEXP (cond, 1) == constm1_rtx) predict_insn_def (last_insn, PRED_OPCODE_POSITIVE, TAKEN); break; default: break; } } /* Attach the combined probability to each conditional jump. */ FOR_EACH_BB (bb) if (GET_CODE (BB_END (bb)) == JUMP_INSN && any_condjump_p (BB_END (bb)) && bb->succ->succ_next != NULL) combine_predictions_for_insn (BB_END (bb), bb); remove_fake_edges (); /* Fill in the probability values in flowgraph based on the REG_BR_PROB notes. */ FOR_EACH_BB (bb) { rtx last_insn = BB_END (bb); if (!can_predict_insn_p (last_insn)) { /* We can predict only conditional jumps at the moment. Expect each edge to be equally probable. ?? In the future we want to make abnormal edges improbable. */ int nedges = 0; edge e; for (e = bb->succ; e; e = e->succ_next) { nedges++; if (e->probability != 0) break; } if (!e) for (e = bb->succ; e; e = e->succ_next) e->probability = (REG_BR_PROB_BASE + nedges / 2) / nedges; } } estimate_bb_frequencies (loops_info); free_dominance_info (CDI_POST_DOMINATORS); } /* Predict using opcode of the last statement in basic block. */ static void tree_predict_by_opcode (basic_block bb) { tree stmt = last_stmt (bb); edge then_edge; tree cond; tree op0; tree type; if (!stmt || TREE_CODE (stmt) != COND_EXPR) return; for (then_edge = bb->succ; then_edge; then_edge = then_edge->succ_next) if (then_edge->flags & EDGE_TRUE_VALUE) break; cond = TREE_OPERAND (stmt, 0); if (TREE_CODE_CLASS (TREE_CODE (cond)) != '<') return; op0 = TREE_OPERAND (cond, 0); type = TREE_TYPE (op0); /* Try "pointer heuristic." A comparison ptr == 0 is predicted as false. Similarly, a comparison ptr1 == ptr2 is predicted as false. */ if (POINTER_TYPE_P (type)) { if (TREE_CODE (cond) == EQ_EXPR) predict_edge_def (then_edge, PRED_TREE_POINTER, NOT_TAKEN); else if (TREE_CODE (cond) == NE_EXPR) predict_edge_def (then_edge, PRED_TREE_POINTER, TAKEN); } else /* Try "opcode heuristic." EQ tests are usually false and NE tests are usually true. Also, most quantities are positive, so we can make the appropriate guesses about signed comparisons against zero. */ switch (TREE_CODE (cond)) { case EQ_EXPR: case UNEQ_EXPR: /* Floating point comparisons appears to behave in a very unpredictable way because of special role of = tests in FP code. */ if (FLOAT_TYPE_P (type)) ; /* Comparisons with 0 are often used for booleans and there is nothing useful to predict about them. */ else if (integer_zerop (op0) || integer_zerop (TREE_OPERAND (cond, 1))) ; else predict_edge_def (then_edge, PRED_TREE_OPCODE_NONEQUAL, NOT_TAKEN); break; case NE_EXPR: case LTGT_EXPR: /* Floating point comparisons appears to behave in a very unpredictable way because of special role of = tests in FP code. */ if (FLOAT_TYPE_P (type)) ; /* Comparisons with 0 are often used for booleans and there is nothing useful to predict about them. */ else if (integer_zerop (op0) || integer_zerop (TREE_OPERAND (cond, 1))) ; else predict_edge_def (then_edge, PRED_TREE_OPCODE_NONEQUAL, TAKEN); break; case ORDERED_EXPR: predict_edge_def (then_edge, PRED_TREE_FPOPCODE, TAKEN); break; case UNORDERED_EXPR: predict_edge_def (then_edge, PRED_TREE_FPOPCODE, NOT_TAKEN); break; case LE_EXPR: case LT_EXPR: if (integer_zerop (TREE_OPERAND (cond, 1)) || integer_onep (TREE_OPERAND (cond, 1)) || integer_all_onesp (TREE_OPERAND (cond, 1)) || real_zerop (TREE_OPERAND (cond, 1)) || real_onep (TREE_OPERAND (cond, 1)) || real_minus_onep (TREE_OPERAND (cond, 1))) predict_edge_def (then_edge, PRED_TREE_OPCODE_POSITIVE, NOT_TAKEN); break; case GE_EXPR: case GT_EXPR: if (integer_zerop (TREE_OPERAND (cond, 1)) || integer_onep (TREE_OPERAND (cond, 1)) || integer_all_onesp (TREE_OPERAND (cond, 1)) || real_zerop (TREE_OPERAND (cond, 1)) || real_onep (TREE_OPERAND (cond, 1)) || real_minus_onep (TREE_OPERAND (cond, 1))) predict_edge_def (then_edge, PRED_TREE_OPCODE_POSITIVE, TAKEN); break; default: break; } } /* Predict branch probabilities and estimate profile of the tree CFG. */ static void tree_estimate_probability (void) { basic_block bb; struct loops loops_info; flow_loops_find (&loops_info, LOOP_TREE); if (dump_file && (dump_flags & TDF_DETAILS)) flow_loops_dump (&loops_info, dump_file, NULL, 0); connect_infinite_loops_to_exit (); calculate_dominance_info (CDI_DOMINATORS); calculate_dominance_info (CDI_POST_DOMINATORS); predict_loops (&loops_info, false); FOR_EACH_BB (bb) { edge e; for (e = bb->succ; e; e = e->succ_next) { /* Predict early returns to be probable, as we've already taken care for error returns and other are often used for fast paths trought function. */ if ((e->dest == EXIT_BLOCK_PTR || (e->dest->succ && !e->dest->succ->succ_next && e->dest->succ->dest == EXIT_BLOCK_PTR)) && !predicted_by_p (bb, PRED_NULL_RETURN) && !predicted_by_p (bb, PRED_CONST_RETURN) && !predicted_by_p (bb, PRED_NEGATIVE_RETURN) && !last_basic_block_p (e->dest)) predict_edge_def (e, PRED_EARLY_RETURN, TAKEN); /* Look for block we are guarding (ie we dominate it, but it doesn't postdominate us). */ if (e->dest != EXIT_BLOCK_PTR && e->dest != bb && dominated_by_p (CDI_DOMINATORS, e->dest, e->src) && !dominated_by_p (CDI_POST_DOMINATORS, e->src, e->dest)) { block_stmt_iterator bi; /* The call heuristic claims that a guarded function call is improbable. This is because such calls are often used to signal exceptional situations such as printing error messages. */ for (bi = bsi_start (e->dest); !bsi_end_p (bi); bsi_next (&bi)) { tree stmt = bsi_stmt (bi); if ((TREE_CODE (stmt) == CALL_EXPR || (TREE_CODE (stmt) == MODIFY_EXPR && TREE_CODE (TREE_OPERAND (stmt, 1)) == CALL_EXPR)) /* Constant and pure calls are hardly used to signalize something exceptional. */ && TREE_SIDE_EFFECTS (stmt)) { predict_edge_def (e, PRED_CALL, NOT_TAKEN); break; } } } } tree_predict_by_opcode (bb); } FOR_EACH_BB (bb) combine_predictions_for_bb (dump_file, bb); estimate_bb_frequencies (&loops_info); free_dominance_info (CDI_POST_DOMINATORS); remove_fake_edges (); flow_loops_free (&loops_info); if (dump_file && (dump_flags & TDF_DETAILS)) dump_tree_cfg (dump_file, dump_flags); } /* __builtin_expect dropped tokens into the insn stream describing expected values of registers. Generate branch probabilities based off these values. */ void expected_value_to_br_prob (void) { rtx insn, cond, ev = NULL_RTX, ev_reg = NULL_RTX; for (insn = get_insns (); insn ; insn = NEXT_INSN (insn)) { switch (GET_CODE (insn)) { case NOTE: /* Look for expected value notes. */ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EXPECTED_VALUE) { ev = NOTE_EXPECTED_VALUE (insn); ev_reg = XEXP (ev, 0); delete_insn (insn); } continue; case CODE_LABEL: /* Never propagate across labels. */ ev = NULL_RTX; continue; case JUMP_INSN: /* Look for simple conditional branches. If we haven't got an expected value yet, no point going further. */ if (GET_CODE (insn) != JUMP_INSN || ev == NULL_RTX || ! any_condjump_p (insn)) continue; break; default: /* Look for insns that clobber the EV register. */ if (ev && reg_set_p (ev_reg, insn)) ev = NULL_RTX; continue; } /* Collect the branch condition, hopefully relative to EV_REG. */ /* ??? At present we'll miss things like (expected_value (eq r70 0)) (set r71 -1) (set r80 (lt r70 r71)) (set pc (if_then_else (ne r80 0) ...)) as canonicalize_condition will render this to us as (lt r70, r71) Could use cselib to try and reduce this further. */ cond = XEXP (SET_SRC (pc_set (insn)), 0); cond = canonicalize_condition (insn, cond, 0, NULL, ev_reg, false); if (! cond || XEXP (cond, 0) != ev_reg || GET_CODE (XEXP (cond, 1)) != CONST_INT) continue; /* Substitute and simplify. Given that the expression we're building involves two constants, we should wind up with either true or false. */ cond = gen_rtx_fmt_ee (GET_CODE (cond), VOIDmode, XEXP (ev, 1), XEXP (cond, 1)); cond = simplify_rtx (cond); /* Turn the condition into a scaled branch probability. */ if (cond != const_true_rtx && cond != const0_rtx) abort (); predict_insn_def (insn, PRED_BUILTIN_EXPECT, cond == const_true_rtx ? TAKEN : NOT_TAKEN); } } /* Check whether this is the last basic block of function. Commonly there is one extra common cleanup block. */ static bool last_basic_block_p (basic_block bb) { if (bb == EXIT_BLOCK_PTR) return false; return (bb->next_bb == EXIT_BLOCK_PTR || (bb->next_bb->next_bb == EXIT_BLOCK_PTR && bb->succ && !bb->succ->succ_next && bb->succ->dest->next_bb == EXIT_BLOCK_PTR)); } /* Sets branch probabilities according to PREDiction and FLAGS. HEADS[bb->index] should be index of basic block in that we need to alter branch predictions (i.e. the first of our dominators such that we do not post-dominate it) (but we fill this information on demand, so -1 may be there in case this was not needed yet). */ static void process_note_prediction (basic_block bb, int *heads, int pred, int flags) { edge e; int y; bool taken; taken = flags & IS_TAKEN; if (heads[bb->index] < 0) { /* This is first time we need this field in heads array; so find first dominator that we do not post-dominate (we are using already known members of heads array). */ basic_block ai = bb; basic_block next_ai = get_immediate_dominator (CDI_DOMINATORS, bb); int head; while (heads[next_ai->index] < 0) { if (!dominated_by_p (CDI_POST_DOMINATORS, next_ai, bb)) break; heads[next_ai->index] = ai->index; ai = next_ai; next_ai = get_immediate_dominator (CDI_DOMINATORS, next_ai); } if (!dominated_by_p (CDI_POST_DOMINATORS, next_ai, bb)) head = next_ai->index; else head = heads[next_ai->index]; while (next_ai != bb) { next_ai = ai; if (heads[ai->index] == ENTRY_BLOCK) ai = ENTRY_BLOCK_PTR; else ai = BASIC_BLOCK (heads[ai->index]); heads[next_ai->index] = head; } } y = heads[bb->index]; /* Now find the edge that leads to our branch and aply the prediction. */ if (y == last_basic_block || !can_predict_insn_p (BB_END (BASIC_BLOCK (y)))) return; for (e = BASIC_BLOCK (y)->succ; e; e = e->succ_next) if (e->dest->index >= 0 && dominated_by_p (CDI_POST_DOMINATORS, e->dest, bb)) predict_edge_def (e, pred, taken); } /* Gathers NOTE_INSN_PREDICTIONs in given basic block and turns them into branch probabilities. For description of heads array, see process_note_prediction. */ static void process_note_predictions (basic_block bb, int *heads) { rtx insn; edge e; /* Additionally, we check here for blocks with no successors. */ int contained_noreturn_call = 0; int was_bb_head = 0; int noreturn_block = 1; for (insn = BB_END (bb); insn; was_bb_head |= (insn == BB_HEAD (bb)), insn = PREV_INSN (insn)) { if (GET_CODE (insn) != NOTE) { if (was_bb_head) break; else { /* Noreturn calls cause program to exit, therefore they are always predicted as not taken. */ if (GET_CODE (insn) == CALL_INSN && find_reg_note (insn, REG_NORETURN, NULL)) contained_noreturn_call = 1; continue; } } if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_PREDICTION) { int alg = (int) NOTE_PREDICTION_ALG (insn); /* Process single prediction note. */ process_note_prediction (bb, heads, alg, (int) NOTE_PREDICTION_FLAGS (insn)); delete_insn (insn); } } for (e = bb->succ; e; e = e->succ_next) if (!(e->flags & EDGE_FAKE)) noreturn_block = 0; if (contained_noreturn_call) { /* This block ended from other reasons than because of return. If it is because of noreturn call, this should certainly not be taken. Otherwise it is probably some error recovery. */ process_note_prediction (bb, heads, PRED_NORETURN, NOT_TAKEN); } } /* Gathers NOTE_INSN_PREDICTIONs and turns them into branch probabilities. */ void note_prediction_to_br_prob (void) { basic_block bb; int *heads; /* To enable handling of noreturn blocks. */ add_noreturn_fake_exit_edges (); connect_infinite_loops_to_exit (); calculate_dominance_info (CDI_POST_DOMINATORS); calculate_dominance_info (CDI_DOMINATORS); heads = xmalloc (sizeof (int) * last_basic_block); memset (heads, -1, sizeof (int) * last_basic_block); heads[ENTRY_BLOCK_PTR->next_bb->index] = last_basic_block; /* Process all prediction notes. */ FOR_EACH_BB (bb) process_note_predictions (bb, heads); free_dominance_info (CDI_POST_DOMINATORS); free_dominance_info (CDI_DOMINATORS); free (heads); remove_fake_edges (); } /* This is used to carry information about basic blocks. It is attached to the AUX field of the standard CFG block. */ typedef struct predict_block_info_def { /* Estimated frequency of execution of basic_block. */ sreal frequency; /* To keep queue of basic blocks to process. */ basic_block next; /* True if block needs to be visited in propagate_freq. */ unsigned int tovisit:1; /* Number of predecessors we need to visit first. */ int npredecessors; } *predict_block_info; /* Similar information for edges. */ typedef struct edge_info_def { /* In case edge is an loopback edge, the probability edge will be reached in case header is. Estimated number of iterations of the loop can be then computed as 1 / (1 - back_edge_prob). */ sreal back_edge_prob; /* True if the edge is an loopback edge in the natural loop. */ unsigned int back_edge:1; } *edge_info; #define PREDICT_BLOCK_INFO(B) ((predict_block_info) (B)->aux) #define PRED_EDGE_INFO(E) ((edge_info) (E)->aux) /* Helper function for estimate_bb_frequencies. Propagate the frequencies for LOOP. */ static void propagate_freq (struct loop *loop) { basic_block head = loop->header; basic_block bb; basic_block last; edge e; basic_block nextbb; /* For each basic block we need to visit count number of his predecessors we need to visit first. */ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) { if (PREDICT_BLOCK_INFO (bb)->tovisit) { int count = 0; for (e = bb->pred; e; e = e->pred_next) if (PREDICT_BLOCK_INFO (e->src)->tovisit && !(e->flags & EDGE_DFS_BACK)) count++; else if (PREDICT_BLOCK_INFO (e->src)->tovisit && dump_file && !PRED_EDGE_INFO (e)->back_edge) fprintf (dump_file, "Irreducible region hit, ignoring edge to %i->%i\n", e->src->index, bb->index); PREDICT_BLOCK_INFO (bb)->npredecessors = count; } } memcpy (&PREDICT_BLOCK_INFO (head)->frequency, &real_one, sizeof (real_one)); last = head; for (bb = head; bb; bb = nextbb) { sreal cyclic_probability, frequency; memcpy (&cyclic_probability, &real_zero, sizeof (real_zero)); memcpy (&frequency, &real_zero, sizeof (real_zero)); nextbb = PREDICT_BLOCK_INFO (bb)->next; PREDICT_BLOCK_INFO (bb)->next = NULL; /* Compute frequency of basic block. */ if (bb != head) { #ifdef ENABLE_CHECKING for (e = bb->pred; e; e = e->pred_next) if (PREDICT_BLOCK_INFO (e->src)->tovisit && !(e->flags & EDGE_DFS_BACK)) abort (); #endif for (e = bb->pred; e; e = e->pred_next) if (PRED_EDGE_INFO (e)->back_edge) { sreal_add (&cyclic_probability, &cyclic_probability, &PRED_EDGE_INFO (e)->back_edge_prob); } else if (!(e->flags & EDGE_DFS_BACK)) { sreal tmp; /* frequency += (e->probability * PREDICT_BLOCK_INFO (e->src)->frequency / REG_BR_PROB_BASE); */ sreal_init (&tmp, e->probability, 0); sreal_mul (&tmp, &tmp, &PREDICT_BLOCK_INFO (e->src)->frequency); sreal_mul (&tmp, &tmp, &real_inv_br_prob_base); sreal_add (&frequency, &frequency, &tmp); } if (sreal_compare (&cyclic_probability, &real_zero) == 0) { memcpy (&PREDICT_BLOCK_INFO (bb)->frequency, &frequency, sizeof (frequency)); } else { if (sreal_compare (&cyclic_probability, &real_almost_one) > 0) { memcpy (&cyclic_probability, &real_almost_one, sizeof (real_almost_one)); } /* PREDICT_BLOCK_INFO (bb)->frequency = frequency / (1 - cyclic_probability) */ sreal_sub (&cyclic_probability, &real_one, &cyclic_probability); sreal_div (&PREDICT_BLOCK_INFO (bb)->frequency, &frequency, &cyclic_probability); } } PREDICT_BLOCK_INFO (bb)->tovisit = 0; /* Compute back edge frequencies. */ for (e = bb->succ; e; e = e->succ_next) if (e->dest == head) { sreal tmp; /* PRED_EDGE_INFO (e)->back_edge_prob = ((e->probability * PREDICT_BLOCK_INFO (bb)->frequency) / REG_BR_PROB_BASE); */ sreal_init (&tmp, e->probability, 0); sreal_mul (&tmp, &tmp, &PREDICT_BLOCK_INFO (bb)->frequency); sreal_mul (&PRED_EDGE_INFO (e)->back_edge_prob, &tmp, &real_inv_br_prob_base); } /* Propagate to successor blocks. */ for (e = bb->succ; e; e = e->succ_next) if (!(e->flags & EDGE_DFS_BACK) && PREDICT_BLOCK_INFO (e->dest)->npredecessors) { PREDICT_BLOCK_INFO (e->dest)->npredecessors--; if (!PREDICT_BLOCK_INFO (e->dest)->npredecessors) { if (!nextbb) nextbb = e->dest; else PREDICT_BLOCK_INFO (last)->next = e->dest; last = e->dest; } } } } /* Estimate probabilities of loopback edges in loops at same nest level. */ static void estimate_loops_at_level (struct loop *first_loop) { struct loop *loop; for (loop = first_loop; loop; loop = loop->next) { edge e; basic_block *bbs; unsigned i; estimate_loops_at_level (loop->inner); if (loop->latch->succ) /* Do not do this for dummy function loop. */ { /* Find current loop back edge and mark it. */ e = loop_latch_edge (loop); PRED_EDGE_INFO (e)->back_edge = 1; } bbs = get_loop_body (loop); for (i = 0; i < loop->num_nodes; i++) PREDICT_BLOCK_INFO (bbs[i])->tovisit = 1; free (bbs); propagate_freq (loop); } } /* Convert counts measured by profile driven feedback to frequencies. Return nonzero iff there was any nonzero execution count. */ static int counts_to_freqs (void) { gcov_type count_max, true_count_max = 0; basic_block bb; FOR_EACH_BB (bb) true_count_max = MAX (bb->count, true_count_max); count_max = MAX (true_count_max, 1); FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) bb->frequency = (bb->count * BB_FREQ_MAX + count_max / 2) / count_max; return true_count_max; } /* Return true if function is likely to be expensive, so there is no point to optimize performance of prologue, epilogue or do inlining at the expense of code size growth. THRESHOLD is the limit of number of instructions function can execute at average to be still considered not expensive. */ bool expensive_function_p (int threshold) { unsigned int sum = 0; basic_block bb; unsigned int limit; /* We can not compute accurately for large thresholds due to scaled frequencies. */ if (threshold > BB_FREQ_MAX) abort (); /* Frequencies are out of range. This either means that function contains internal loop executing more than BB_FREQ_MAX times or profile feedback is available and function has not been executed at all. */ if (ENTRY_BLOCK_PTR->frequency == 0) return true; /* Maximally BB_FREQ_MAX^2 so overflow won't happen. */ limit = ENTRY_BLOCK_PTR->frequency * threshold; FOR_EACH_BB (bb) { rtx insn; for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = NEXT_INSN (insn)) if (active_insn_p (insn)) { sum += bb->frequency; if (sum > limit) return true; } } return false; } /* Estimate basic blocks frequency by given branch probabilities. */ static void estimate_bb_frequencies (struct loops *loops) { basic_block bb; sreal freq_max; if (!flag_branch_probabilities || !counts_to_freqs ()) { static int real_values_initialized = 0; if (!real_values_initialized) { real_values_initialized = 1; sreal_init (&real_zero, 0, 0); sreal_init (&real_one, 1, 0); sreal_init (&real_br_prob_base, REG_BR_PROB_BASE, 0); sreal_init (&real_bb_freq_max, BB_FREQ_MAX, 0); sreal_init (&real_one_half, 1, -1); sreal_div (&real_inv_br_prob_base, &real_one, &real_br_prob_base); sreal_sub (&real_almost_one, &real_one, &real_inv_br_prob_base); } mark_dfs_back_edges (); ENTRY_BLOCK_PTR->succ->probability = REG_BR_PROB_BASE; /* Set up block info for each basic block. */ alloc_aux_for_blocks (sizeof (struct predict_block_info_def)); alloc_aux_for_edges (sizeof (struct edge_info_def)); FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) { edge e; PREDICT_BLOCK_INFO (bb)->tovisit = 0; for (e = bb->succ; e; e = e->succ_next) { sreal_init (&PRED_EDGE_INFO (e)->back_edge_prob, e->probability, 0); sreal_mul (&PRED_EDGE_INFO (e)->back_edge_prob, &PRED_EDGE_INFO (e)->back_edge_prob, &real_inv_br_prob_base); } } /* First compute probabilities locally for each loop from innermost to outermost to examine probabilities for back edges. */ estimate_loops_at_level (loops->tree_root); memcpy (&freq_max, &real_zero, sizeof (real_zero)); FOR_EACH_BB (bb) if (sreal_compare (&freq_max, &PREDICT_BLOCK_INFO (bb)->frequency) < 0) memcpy (&freq_max, &PREDICT_BLOCK_INFO (bb)->frequency, sizeof (freq_max)); sreal_div (&freq_max, &real_bb_freq_max, &freq_max); FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) { sreal tmp; sreal_mul (&tmp, &PREDICT_BLOCK_INFO (bb)->frequency, &freq_max); sreal_add (&tmp, &tmp, &real_one_half); bb->frequency = sreal_to_int (&tmp); } free_aux_for_blocks (); free_aux_for_edges (); } compute_function_frequency (); if (flag_reorder_functions) choose_function_section (); } /* Decide whether function is hot, cold or unlikely executed. */ static void compute_function_frequency (void) { basic_block bb; if (!profile_info || !flag_branch_probabilities) return; cfun->function_frequency = FUNCTION_FREQUENCY_UNLIKELY_EXECUTED; FOR_EACH_BB (bb) { if (maybe_hot_bb_p (bb)) { cfun->function_frequency = FUNCTION_FREQUENCY_HOT; return; } if (!probably_never_executed_bb_p (bb)) cfun->function_frequency = FUNCTION_FREQUENCY_NORMAL; } } /* Choose appropriate section for the function. */ static void choose_function_section (void) { if (DECL_SECTION_NAME (current_function_decl) || !targetm.have_named_sections /* Theoretically we can split the gnu.linkonce text section too, but this requires more work as the frequency needs to match for all generated objects so we need to merge the frequency of all instances. For now just never set frequency for these. */ || DECL_ONE_ONLY (current_function_decl)) return; if (cfun->function_frequency == FUNCTION_FREQUENCY_HOT) DECL_SECTION_NAME (current_function_decl) = build_string (strlen (HOT_TEXT_SECTION_NAME), HOT_TEXT_SECTION_NAME); if (cfun->function_frequency == FUNCTION_FREQUENCY_UNLIKELY_EXECUTED) DECL_SECTION_NAME (current_function_decl) = build_string (strlen (UNLIKELY_EXECUTED_TEXT_SECTION_NAME), UNLIKELY_EXECUTED_TEXT_SECTION_NAME); } struct tree_opt_pass pass_profile = { "profile", /* name */ NULL, /* gate */ tree_estimate_probability, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ TV_BRANCH_PROB, /* tv_id */ PROP_cfg, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_ggc_collect | TODO_verify_ssa /* todo_flags_finish */ }; /* Print RTL for GCC. Copyright (C) 1987, 1988, 1992, 1997, 1998, 1999, 2000, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* We don't want the tree code checking code for the access to the DECL_NAME to be included in the gen* programs. */ #undef ENABLE_TREE_CHECKING static FILE *outfile; static int sawclose = 0; static int indent; static void print_rtx (rtx); /* String printed at beginning of each RTL when it is dumped. This string is set to ASM_COMMENT_START when the RTL is dumped in the assembly output file. */ const char *print_rtx_head = ""; /* Nonzero means suppress output of instruction numbers and line number notes in debugging dumps. This must be defined here so that programs like gencodes can be linked. */ int flag_dump_unnumbered = 0; /* Nonzero means use simplified format without flags, modes, etc. */ int flag_simple = 0; /* Nonzero if we are dumping graphical description. */ int dump_for_graph; void print_mem_expr (FILE *outfile, tree expr) { if (TREE_CODE (expr) == COMPONENT_REF) { if (TREE_OPERAND (expr, 0)) print_mem_expr (outfile, TREE_OPERAND (expr, 0)); else fputs (" ", outfile); if (DECL_NAME (TREE_OPERAND (expr, 1))) fprintf (outfile, ".%s", IDENTIFIER_POINTER (DECL_NAME (TREE_OPERAND (expr, 1)))); } else if (TREE_CODE (expr) == INDIRECT_REF) { fputs (" (*", outfile); print_mem_expr (outfile, TREE_OPERAND (expr, 0)); fputs (")", outfile); } else if (DECL_NAME (expr)) fprintf (outfile, " %s", IDENTIFIER_POINTER (DECL_NAME (expr))); else if (TREE_CODE (expr) == RESULT_DECL) fputs (" ", outfile); else fputs (" ", outfile); } /* Print IN_RTX onto OUTFILE. This is the recursive part of printing. */ static void print_rtx (rtx in_rtx) { int i = 0; int j; const char *format_ptr; int is_insn; if (sawclose) { if (flag_simple) fputc (' ', outfile); else fprintf (outfile, "\n%s%*s", print_rtx_head, indent * 2, ""); sawclose = 0; } if (in_rtx == 0) { fputs ("(nil)", outfile); sawclose = 1; return; } else if (GET_CODE (in_rtx) > NUM_RTX_CODE) { fprintf (outfile, "(??? bad code %d\n)", GET_CODE (in_rtx)); sawclose = 1; return; } is_insn = INSN_P (in_rtx); /* When printing in VCG format we write INSNs, NOTE, LABEL, and BARRIER in separate nodes and therefore have to handle them special here. */ if (dump_for_graph && (is_insn || GET_CODE (in_rtx) == NOTE || GET_CODE (in_rtx) == CODE_LABEL || GET_CODE (in_rtx) == BARRIER)) { i = 3; indent = 0; } else { /* Print name of expression code. */ if (flag_simple && GET_CODE (in_rtx) == CONST_INT) fputc ('(', outfile); else fprintf (outfile, "(%s", GET_RTX_NAME (GET_CODE (in_rtx))); if (! flag_simple) { if (RTX_FLAG (in_rtx, in_struct)) fputs ("/s", outfile); if (RTX_FLAG (in_rtx, volatil)) fputs ("/v", outfile); if (RTX_FLAG (in_rtx, unchanging)) fputs ("/u", outfile); if (RTX_FLAG (in_rtx, frame_related)) fputs ("/f", outfile); if (RTX_FLAG (in_rtx, jump)) fputs ("/j", outfile); if (RTX_FLAG (in_rtx, call)) fputs ("/c", outfile); if (RTX_FLAG (in_rtx, return_val)) fputs ("/i", outfile); if (GET_MODE (in_rtx) != VOIDmode) { /* Print REG_NOTE names for EXPR_LIST and INSN_LIST. */ if (GET_CODE (in_rtx) == EXPR_LIST || GET_CODE (in_rtx) == INSN_LIST) fprintf (outfile, ":%s", GET_REG_NOTE_NAME (GET_MODE (in_rtx))); else fprintf (outfile, ":%s", GET_MODE_NAME (GET_MODE (in_rtx))); } } } #ifndef GENERATOR_FILE if (GET_CODE (in_rtx) == CONST_DOUBLE && FLOAT_MODE_P (GET_MODE (in_rtx))) i = 5; #endif /* Get the format string and skip the first elements if we have handled them already. */ format_ptr = GET_RTX_FORMAT (GET_CODE (in_rtx)) + i; for (; i < GET_RTX_LENGTH (GET_CODE (in_rtx)); i++) switch (*format_ptr++) { const char *str; case 'T': str = XTMPL (in_rtx, i); goto string; case 'S': case 's': str = XSTR (in_rtx, i); string: if (str == 0) fputs (dump_for_graph ? " \\\"\\\"" : " \"\"", outfile); else { if (dump_for_graph) fprintf (outfile, " (\\\"%s\\\")", str); else fprintf (outfile, " (\"%s\")", str); } sawclose = 1; break; /* 0 indicates a field for internal use that should not be printed. An exception is the third field of a NOTE, where it indicates that the field has several different valid contents. */ case '0': if (i == 1 && REG_P (in_rtx)) { if (REGNO (in_rtx) != ORIGINAL_REGNO (in_rtx)) fprintf (outfile, " [%d]", ORIGINAL_REGNO (in_rtx)); } #ifndef GENERATOR_FILE else if (i == 1 && GET_CODE (in_rtx) == SYMBOL_REF) { int flags = SYMBOL_REF_FLAGS (in_rtx); if (flags) fprintf (outfile, " [flags 0x%x]", flags); } else if (i == 2 && GET_CODE (in_rtx) == SYMBOL_REF) { tree decl = SYMBOL_REF_DECL (in_rtx); if (decl) print_node_brief (outfile, "", decl, 0); } #endif else if (i == 4 && GET_CODE (in_rtx) == NOTE) { switch (NOTE_LINE_NUMBER (in_rtx)) { case NOTE_INSN_EH_REGION_BEG: case NOTE_INSN_EH_REGION_END: if (flag_dump_unnumbered) fprintf (outfile, " #"); else fprintf (outfile, " %d", NOTE_EH_HANDLER (in_rtx)); sawclose = 1; break; case NOTE_INSN_BLOCK_BEG: case NOTE_INSN_BLOCK_END: fprintf (outfile, " "); if (flag_dump_unnumbered) fprintf (outfile, "#"); else fprintf (outfile, HOST_PTR_PRINTF, (char *) NOTE_BLOCK (in_rtx)); sawclose = 1; break; case NOTE_INSN_BASIC_BLOCK: { basic_block bb = NOTE_BASIC_BLOCK (in_rtx); if (bb != 0) fprintf (outfile, " [bb %d]", bb->index); break; } case NOTE_INSN_EXPECTED_VALUE: indent += 2; if (!sawclose) fprintf (outfile, " "); print_rtx (NOTE_EXPECTED_VALUE (in_rtx)); indent -= 2; break; case NOTE_INSN_DELETED_LABEL: { const char *label = NOTE_DELETED_LABEL_NAME (in_rtx); if (label) fprintf (outfile, " (\"%s\")", label); else fprintf (outfile, " \"\""); } break; case NOTE_INSN_PREDICTION: if (NOTE_PREDICTION (in_rtx)) fprintf (outfile, " [ %d %d ] ", (int)NOTE_PREDICTION_ALG (in_rtx), (int) NOTE_PREDICTION_FLAGS (in_rtx)); else fprintf (outfile, " [ ERROR ]"); break; case NOTE_INSN_UNLIKELY_EXECUTED_CODE: { basic_block bb = NOTE_BASIC_BLOCK (in_rtx); if (bb != 0) fprintf (outfile, " [bb %d]", bb->index); break; } case NOTE_INSN_VAR_LOCATION: fprintf (outfile, " ("); print_mem_expr (outfile, NOTE_VAR_LOCATION_DECL (in_rtx)); fprintf (outfile, " "); print_rtx (NOTE_VAR_LOCATION_LOC (in_rtx)); fprintf (outfile, ")"); break; default: { const char * const str = X0STR (in_rtx, i); if (NOTE_LINE_NUMBER (in_rtx) < 0) ; else if (str == 0) fputs (dump_for_graph ? " \\\"\\\"" : " \"\"", outfile); else { if (dump_for_graph) fprintf (outfile, " (\\\"%s\\\")", str); else fprintf (outfile, " (\"%s\")", str); } break; } } } break; case 'e': do_e: indent += 2; if (!sawclose) fprintf (outfile, " "); print_rtx (XEXP (in_rtx, i)); indent -= 2; break; case 'E': case 'V': indent += 2; if (sawclose) { fprintf (outfile, "\n%s%*s", print_rtx_head, indent * 2, ""); sawclose = 0; } fputs (" [", outfile); if (NULL != XVEC (in_rtx, i)) { indent += 2; if (XVECLEN (in_rtx, i)) sawclose = 1; for (j = 0; j < XVECLEN (in_rtx, i); j++) print_rtx (XVECEXP (in_rtx, i, j)); indent -= 2; } if (sawclose) fprintf (outfile, "\n%s%*s", print_rtx_head, indent * 2, ""); fputs ("]", outfile); sawclose = 1; indent -= 2; break; case 'w': if (! flag_simple) fprintf (outfile, " "); fprintf (outfile, HOST_WIDE_INT_PRINT_DEC, XWINT (in_rtx, i)); if (! flag_simple) fprintf (outfile, " [" HOST_WIDE_INT_PRINT_HEX "]", XWINT (in_rtx, i)); break; case 'i': if (i == 4 && INSN_P (in_rtx)) { #ifndef GENERATOR_FILE /* Pretty-print insn locators. Ignore scoping as it is mostly redundant with line number information and do not print anything when there is no location information available. */ if (INSN_LOCATOR (in_rtx) && insn_file (in_rtx)) fprintf(outfile, " %s:%i", insn_file (in_rtx), insn_line (in_rtx)); #endif } else if (i == 6 && GET_CODE (in_rtx) == NOTE) { /* This field is only used for NOTE_INSN_DELETED_LABEL, and other times often contains garbage from INSN->NOTE death. */ if (NOTE_LINE_NUMBER (in_rtx) == NOTE_INSN_DELETED_LABEL) fprintf (outfile, " %d", XINT (in_rtx, i)); } else { int value = XINT (in_rtx, i); const char *name; #ifndef GENERATOR_FILE if (REG_P (in_rtx) && value < FIRST_PSEUDO_REGISTER) fprintf (outfile, " %d %s", REGNO (in_rtx), reg_names[REGNO (in_rtx)]); else if (REG_P (in_rtx) && value <= LAST_VIRTUAL_REGISTER) { if (value == VIRTUAL_INCOMING_ARGS_REGNUM) fprintf (outfile, " %d virtual-incoming-args", value); else if (value == VIRTUAL_STACK_VARS_REGNUM) fprintf (outfile, " %d virtual-stack-vars", value); else if (value == VIRTUAL_STACK_DYNAMIC_REGNUM) fprintf (outfile, " %d virtual-stack-dynamic", value); else if (value == VIRTUAL_OUTGOING_ARGS_REGNUM) fprintf (outfile, " %d virtual-outgoing-args", value); else if (value == VIRTUAL_CFA_REGNUM) fprintf (outfile, " %d virtual-cfa", value); else fprintf (outfile, " %d virtual-reg-%d", value, value-FIRST_VIRTUAL_REGISTER); } else #endif if (flag_dump_unnumbered && (is_insn || GET_CODE (in_rtx) == NOTE)) fputc ('#', outfile); else fprintf (outfile, " %d", value); if (REG_P (in_rtx) && REG_ATTRS (in_rtx)) { fputs (" [", outfile); if (ORIGINAL_REGNO (in_rtx) != REGNO (in_rtx)) fprintf (outfile, "orig:%i", ORIGINAL_REGNO (in_rtx)); if (REG_EXPR (in_rtx)) print_mem_expr (outfile, REG_EXPR (in_rtx)); if (REG_OFFSET (in_rtx)) fprintf (outfile, "+" HOST_WIDE_INT_PRINT_DEC, REG_OFFSET (in_rtx)); fputs (" ]", outfile); } if (is_insn && &INSN_CODE (in_rtx) == &XINT (in_rtx, i) && XINT (in_rtx, i) >= 0 && (name = get_insn_name (XINT (in_rtx, i))) != NULL) fprintf (outfile, " {%s}", name); sawclose = 0; } break; /* Print NOTE_INSN names rather than integer codes. */ case 'n': if (XINT (in_rtx, i) >= (int) NOTE_INSN_BIAS && XINT (in_rtx, i) < (int) NOTE_INSN_MAX) fprintf (outfile, " %s", GET_NOTE_INSN_NAME (XINT (in_rtx, i))); else fprintf (outfile, " %d", XINT (in_rtx, i)); sawclose = 0; break; case 'u': if (XEXP (in_rtx, i) != NULL) { rtx sub = XEXP (in_rtx, i); enum rtx_code subc = GET_CODE (sub); if (GET_CODE (in_rtx) == LABEL_REF) { if (subc == NOTE && NOTE_LINE_NUMBER (sub) == NOTE_INSN_DELETED_LABEL) { if (flag_dump_unnumbered) fprintf (outfile, " [# deleted]"); else fprintf (outfile, " [%d deleted]", INSN_UID (sub)); sawclose = 0; break; } if (subc != CODE_LABEL) goto do_e; } if (flag_dump_unnumbered) fputs (" #", outfile); else fprintf (outfile, " %d", INSN_UID (sub)); } else fputs (" 0", outfile); sawclose = 0; break; case 'b': if (XBITMAP (in_rtx, i) == NULL) fputs (" {null}", outfile); else bitmap_print (outfile, XBITMAP (in_rtx, i), " {", "}"); sawclose = 0; break; case 't': fprintf (outfile, " " HOST_PTR_PRINTF, (void *) XTREE (in_rtx, i)); break; case '*': fputs (" Unknown", outfile); sawclose = 0; break; case 'B': if (XBBDEF (in_rtx, i)) fprintf (outfile, " %i", XBBDEF (in_rtx, i)->index); break; default: fprintf (stderr, "switch format wrong in rtl.print_rtx(). format was: %c.\n", format_ptr[-1]); abort (); } switch (GET_CODE (in_rtx)) { #ifndef GENERATOR_FILE case MEM: fprintf (outfile, " [" HOST_WIDE_INT_PRINT_DEC, MEM_ALIAS_SET (in_rtx)); if (MEM_EXPR (in_rtx)) print_mem_expr (outfile, MEM_EXPR (in_rtx)); if (MEM_OFFSET (in_rtx)) fprintf (outfile, "+" HOST_WIDE_INT_PRINT_DEC, INTVAL (MEM_OFFSET (in_rtx))); if (MEM_SIZE (in_rtx)) fprintf (outfile, " S" HOST_WIDE_INT_PRINT_DEC, INTVAL (MEM_SIZE (in_rtx))); if (MEM_ALIGN (in_rtx) != 1) fprintf (outfile, " A%u", MEM_ALIGN (in_rtx)); fputc (']', outfile); break; case CONST_DOUBLE: if (FLOAT_MODE_P (GET_MODE (in_rtx))) { char s[60]; real_to_decimal (s, CONST_DOUBLE_REAL_VALUE (in_rtx), sizeof (s), 0, 1); fprintf (outfile, " %s", s); real_to_hexadecimal (s, CONST_DOUBLE_REAL_VALUE (in_rtx), sizeof (s), 0, 1); fprintf (outfile, " [%s]", s); } break; #endif case CODE_LABEL: fprintf (outfile, " [%d uses]", LABEL_NUSES (in_rtx)); switch (LABEL_KIND (in_rtx)) { case LABEL_NORMAL: break; case LABEL_STATIC_ENTRY: fputs (" [entry]", outfile); break; case LABEL_GLOBAL_ENTRY: fputs (" [global entry]", outfile); break; case LABEL_WEAK_ENTRY: fputs (" [weak entry]", outfile); break; default: abort(); } break; default: break; } if (dump_for_graph && (is_insn || GET_CODE (in_rtx) == NOTE || GET_CODE (in_rtx) == CODE_LABEL || GET_CODE (in_rtx) == BARRIER)) sawclose = 0; else { fputc (')', outfile); sawclose = 1; } } /* Print an rtx on the current line of FILE. Initially indent IND characters. */ void print_inline_rtx (FILE *outf, rtx x, int ind) { int oldsaw = sawclose; int oldindent = indent; sawclose = 0; indent = ind; outfile = outf; print_rtx (x); sawclose = oldsaw; indent = oldindent; } /* Call this function from the debugger to see what X looks like. */ void debug_rtx (rtx x) { outfile = stderr; sawclose = 0; print_rtx (x); fprintf (stderr, "\n"); } /* Count of rtx's to print with debug_rtx_list. This global exists because gdb user defined commands have no arguments. */ int debug_rtx_count = 0; /* 0 is treated as equivalent to 1 */ /* Call this function to print list from X on. N is a count of the rtx's to print. Positive values print from the specified rtx on. Negative values print a window around the rtx. EG: -5 prints 2 rtx's on either side (in addition to the specified rtx). */ void debug_rtx_list (rtx x, int n) { int i,count; rtx insn; count = n == 0 ? 1 : n < 0 ? -n : n; /* If we are printing a window, back up to the start. */ if (n < 0) for (i = count / 2; i > 0; i--) { if (PREV_INSN (x) == 0) break; x = PREV_INSN (x); } for (i = count, insn = x; i > 0 && insn != 0; i--, insn = NEXT_INSN (insn)) { debug_rtx (insn); fprintf (stderr, "\n"); } } /* Call this function to print an rtx list from START to END inclusive. */ void debug_rtx_range (rtx start, rtx end) { while (1) { debug_rtx (start); fprintf (stderr, "\n"); if (!start || start == end) break; start = NEXT_INSN (start); } } /* Call this function to search an rtx list to find one with insn uid UID, and then call debug_rtx_list to print it, using DEBUG_RTX_COUNT. The found insn is returned to enable further debugging analysis. */ rtx debug_rtx_find (rtx x, int uid) { while (x != 0 && INSN_UID (x) != uid) x = NEXT_INSN (x); if (x != 0) { debug_rtx_list (x, debug_rtx_count); return x; } else { fprintf (stderr, "insn uid %d not found\n", uid); return 0; } } /* External entry point for printing a chain of insns starting with RTX_FIRST onto file OUTF. A blank line separates insns. If RTX_FIRST is not an insn, then it alone is printed, with no newline. */ void print_rtl (FILE *outf, rtx rtx_first) { rtx tmp_rtx; outfile = outf; sawclose = 0; if (rtx_first == 0) { fputs (print_rtx_head, outf); fputs ("(nil)\n", outf); } else switch (GET_CODE (rtx_first)) { case INSN: case JUMP_INSN: case CALL_INSN: case NOTE: case CODE_LABEL: case BARRIER: for (tmp_rtx = rtx_first; tmp_rtx != 0; tmp_rtx = NEXT_INSN (tmp_rtx)) if (! flag_dump_unnumbered || GET_CODE (tmp_rtx) != NOTE || NOTE_LINE_NUMBER (tmp_rtx) < 0) { fputs (print_rtx_head, outfile); print_rtx (tmp_rtx); fprintf (outfile, "\n"); } break; default: fputs (print_rtx_head, outfile); print_rtx (rtx_first); } } /* Like print_rtx, except specify a file. */ /* Return nonzero if we actually printed anything. */ int print_rtl_single (FILE *outf, rtx x) { outfile = outf; sawclose = 0; if (! flag_dump_unnumbered || GET_CODE (x) != NOTE || NOTE_LINE_NUMBER (x) < 0) { fputs (print_rtx_head, outfile); print_rtx (x); putc ('\n', outf); return 1; } return 0; } /* Like print_rtl except without all the detail; for example, if RTX is a CONST_INT then print in decimal format. */ void print_simple_rtl (FILE *outf, rtx x) { flag_simple = 1; print_rtl (outf, x); flag_simple = 0; } /* Prints out tree in human readable form - GCC Copyright (C) 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Define the hash table of nodes already seen. Such nodes are not repeated; brief cross-references are used. */ #define PT_HASH_SIZE 37 struct bucket { tree node; struct bucket *next; }; static struct bucket **seen_table; /* Print the node NODE on standard error, for debugging. Most nodes referred to by this one are printed recursively down to a depth of six. */ void debug_tree (tree node) { seen_table = xcalloc (PT_HASH_SIZE, sizeof (struct bucket *)); print_node (stderr, "", node, 0); free (seen_table); seen_table = 0; putc ('\n', stderr); } /* Print a node in brief fashion, with just the code, address and name. */ void print_node_brief (FILE *file, const char *prefix, tree node, int indent) { char class; if (node == 0) return; class = TREE_CODE_CLASS (TREE_CODE (node)); /* Always print the slot this node is in, and its code, address and name if any. */ if (indent > 0) fprintf (file, " "); fprintf (file, "%s <%s " HOST_PTR_PRINTF, prefix, tree_code_name[(int) TREE_CODE (node)], (char *) node); if (class == 'd') { if (DECL_NAME (node)) fprintf (file, " %s", IDENTIFIER_POINTER (DECL_NAME (node))); } else if (class == 't') { if (TYPE_NAME (node)) { if (TREE_CODE (TYPE_NAME (node)) == IDENTIFIER_NODE) fprintf (file, " %s", IDENTIFIER_POINTER (TYPE_NAME (node))); else if (TREE_CODE (TYPE_NAME (node)) == TYPE_DECL && DECL_NAME (TYPE_NAME (node))) fprintf (file, " %s", IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (node)))); } } if (TREE_CODE (node) == IDENTIFIER_NODE) fprintf (file, " %s", IDENTIFIER_POINTER (node)); /* We might as well always print the value of an integer or real. */ if (TREE_CODE (node) == INTEGER_CST) { if (TREE_CONSTANT_OVERFLOW (node)) fprintf (file, " overflow"); fprintf (file, " "); if (TREE_INT_CST_HIGH (node) == 0) fprintf (file, HOST_WIDE_INT_PRINT_UNSIGNED, TREE_INT_CST_LOW (node)); else if (TREE_INT_CST_HIGH (node) == -1 && TREE_INT_CST_LOW (node) != 0) fprintf (file, "-" HOST_WIDE_INT_PRINT_UNSIGNED, -TREE_INT_CST_LOW (node)); else fprintf (file, HOST_WIDE_INT_PRINT_DOUBLE_HEX, TREE_INT_CST_HIGH (node), TREE_INT_CST_LOW (node)); } if (TREE_CODE (node) == REAL_CST) { REAL_VALUE_TYPE d; if (TREE_OVERFLOW (node)) fprintf (file, " overflow"); d = TREE_REAL_CST (node); if (REAL_VALUE_ISINF (d)) fprintf (file, " Inf"); else if (REAL_VALUE_ISNAN (d)) fprintf (file, " Nan"); else { char string[60]; real_to_decimal (string, &d, sizeof (string), 0, 1); fprintf (file, " %s", string); } } fprintf (file, ">"); } void indent_to (FILE *file, int column) { int i; /* Since this is the long way, indent to desired column. */ if (column > 0) fprintf (file, "\n"); for (i = 0; i < column; i++) fprintf (file, " "); } /* Print the node NODE in full on file FILE, preceded by PREFIX, starting in column INDENT. */ void print_node (FILE *file, const char *prefix, tree node, int indent) { int hash; struct bucket *b; enum machine_mode mode; char class; int len; int first_rtl; int i; expanded_location xloc; if (node == 0) return; class = TREE_CODE_CLASS (TREE_CODE (node)); /* Don't get too deep in nesting. If the user wants to see deeper, it is easy to use the address of a lowest-level node as an argument in another call to debug_tree. */ if (indent > 24) { print_node_brief (file, prefix, node, indent); return; } if (indent > 8 && (class == 't' || class == 'd')) { print_node_brief (file, prefix, node, indent); return; } /* It is unsafe to look at any other fields of an ERROR_MARK node. */ if (TREE_CODE (node) == ERROR_MARK) { print_node_brief (file, prefix, node, indent); return; } hash = ((unsigned long) node) % PT_HASH_SIZE; /* If node is in the table, just mention its address. */ for (b = seen_table[hash]; b; b = b->next) if (b->node == node) { print_node_brief (file, prefix, node, indent); return; } /* Add this node to the table. */ b = xmalloc (sizeof (struct bucket)); b->node = node; b->next = seen_table[hash]; seen_table[hash] = b; /* Indent to the specified column, since this is the long form. */ indent_to (file, indent); /* Print the slot this node is in, and its code, and address. */ fprintf (file, "%s <%s " HOST_PTR_PRINTF, prefix, tree_code_name[(int) TREE_CODE (node)], (void *) node); /* Print the name, if any. */ if (class == 'd') { if (DECL_NAME (node)) fprintf (file, " %s", IDENTIFIER_POINTER (DECL_NAME (node))); } else if (class == 't') { if (TYPE_NAME (node)) { if (TREE_CODE (TYPE_NAME (node)) == IDENTIFIER_NODE) fprintf (file, " %s", IDENTIFIER_POINTER (TYPE_NAME (node))); else if (TREE_CODE (TYPE_NAME (node)) == TYPE_DECL && DECL_NAME (TYPE_NAME (node))) fprintf (file, " %s", IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (node)))); } } if (TREE_CODE (node) == IDENTIFIER_NODE) fprintf (file, " %s", IDENTIFIER_POINTER (node)); if (TREE_CODE (node) == INTEGER_CST) { if (indent <= 4) print_node_brief (file, "type", TREE_TYPE (node), indent + 4); } else { print_node (file, "type", TREE_TYPE (node), indent + 4); if (TREE_TYPE (node)) indent_to (file, indent + 3); } if (!TYPE_P (node) && TREE_SIDE_EFFECTS (node)) fputs (" side-effects", file); if (TYPE_P (node) ? TYPE_READONLY (node) : TREE_READONLY (node)) fputs (" readonly", file); if (!TYPE_P (node) && TREE_CONSTANT (node)) fputs (" constant", file); if (TREE_INVARIANT (node)) fputs (" invariant", file); if (TREE_ADDRESSABLE (node)) fputs (" addressable", file); if (TREE_THIS_VOLATILE (node)) fputs (" volatile", file); if (TREE_ASM_WRITTEN (node)) fputs (" asm_written", file); if (TREE_USED (node)) fputs (" used", file); if (TREE_NOTHROW (node)) fputs (TYPE_P (node) ? " align-ok" : " nothrow", file); if (TREE_PUBLIC (node)) fputs (" public", file); if (TREE_PRIVATE (node)) fputs (" private", file); if (TREE_PROTECTED (node)) fputs (" protected", file); if (TREE_STATIC (node)) fputs (" static", file); if (TREE_DEPRECATED (node)) fputs (" deprecated", file); if (TREE_VISITED (node)) fputs (" visited", file); if (TREE_LANG_FLAG_0 (node)) fputs (" tree_0", file); if (TREE_LANG_FLAG_1 (node)) fputs (" tree_1", file); if (TREE_LANG_FLAG_2 (node)) fputs (" tree_2", file); if (TREE_LANG_FLAG_3 (node)) fputs (" tree_3", file); if (TREE_LANG_FLAG_4 (node)) fputs (" tree_4", file); if (TREE_LANG_FLAG_5 (node)) fputs (" tree_5", file); if (TREE_LANG_FLAG_6 (node)) fputs (" tree_6", file); /* DECL_ nodes have additional attributes. */ switch (TREE_CODE_CLASS (TREE_CODE (node))) { case 'd': mode = DECL_MODE (node); if (DECL_UNSIGNED (node)) fputs (" unsigned", file); if (DECL_IGNORED_P (node)) fputs (" ignored", file); if (DECL_ABSTRACT (node)) fputs (" abstract", file); if (DECL_IN_SYSTEM_HEADER (node)) fputs (" in_system_header", file); if (DECL_COMMON (node)) fputs (" common", file); if (DECL_EXTERNAL (node)) fputs (" external", file); if (DECL_WEAK (node)) fputs (" weak", file); if (DECL_REGISTER (node) && TREE_CODE (node) != FIELD_DECL && TREE_CODE (node) != FUNCTION_DECL && TREE_CODE (node) != LABEL_DECL) fputs (" regdecl", file); if (DECL_NONLOCAL (node)) fputs (" nonlocal", file); if (TREE_CODE (node) == TYPE_DECL && TYPE_DECL_SUPPRESS_DEBUG (node)) fputs (" suppress-debug", file); if (TREE_CODE (node) == FUNCTION_DECL && DECL_INLINE (node)) fputs (DECL_DECLARED_INLINE_P (node) ? " inline" : " autoinline", file); if (TREE_CODE (node) == FUNCTION_DECL && DECL_BUILT_IN (node)) fputs (" built-in", file); if (TREE_CODE (node) == FUNCTION_DECL && DECL_NO_STATIC_CHAIN (node)) fputs (" no-static-chain", file); if (TREE_CODE (node) == FIELD_DECL && DECL_PACKED (node)) fputs (" packed", file); if (TREE_CODE (node) == FIELD_DECL && DECL_BIT_FIELD (node)) fputs (" bit-field", file); if (TREE_CODE (node) == FIELD_DECL && DECL_NONADDRESSABLE_P (node)) fputs (" nonaddressable", file); if (TREE_CODE (node) == LABEL_DECL && DECL_TOO_LATE (node)) fputs (" too-late", file); if (TREE_CODE (node) == LABEL_DECL && DECL_ERROR_ISSUED (node)) fputs (" error-issued", file); if (TREE_CODE (node) == VAR_DECL && DECL_IN_TEXT_SECTION (node)) fputs (" in-text-section", file); if (TREE_CODE (node) == VAR_DECL && DECL_THREAD_LOCAL (node)) fputs (" thread-local", file); if (TREE_CODE (node) == PARM_DECL && DECL_TRANSPARENT_UNION (node)) fputs (" transparent-union", file); if (DECL_VIRTUAL_P (node)) fputs (" virtual", file); if (DECL_DEFER_OUTPUT (node)) fputs (" defer-output", file); if (DECL_LANG_FLAG_0 (node)) fputs (" decl_0", file); if (DECL_LANG_FLAG_1 (node)) fputs (" decl_1", file); if (DECL_LANG_FLAG_2 (node)) fputs (" decl_2", file); if (DECL_LANG_FLAG_3 (node)) fputs (" decl_3", file); if (DECL_LANG_FLAG_4 (node)) fputs (" decl_4", file); if (DECL_LANG_FLAG_5 (node)) fputs (" decl_5", file); if (DECL_LANG_FLAG_6 (node)) fputs (" decl_6", file); if (DECL_LANG_FLAG_7 (node)) fputs (" decl_7", file); fprintf (file, " %s", GET_MODE_NAME (mode)); xloc = expand_location (DECL_SOURCE_LOCATION (node)); fprintf (file, " file %s line %d", xloc.file, xloc.line); print_node (file, "size", DECL_SIZE (node), indent + 4); print_node (file, "unit size", DECL_SIZE_UNIT (node), indent + 4); if (TREE_CODE (node) != FUNCTION_DECL || DECL_INLINE (node) || DECL_BUILT_IN (node)) indent_to (file, indent + 3); if (TREE_CODE (node) != FUNCTION_DECL) { if (DECL_USER_ALIGN (node)) fprintf (file, " user"); fprintf (file, " align %d", DECL_ALIGN (node)); if (TREE_CODE (node) == FIELD_DECL) fprintf (file, " offset_align " HOST_WIDE_INT_PRINT_UNSIGNED, DECL_OFFSET_ALIGN (node)); } else if (DECL_BUILT_IN (node)) { if (DECL_BUILT_IN_CLASS (node) == BUILT_IN_MD) fprintf (file, " built-in BUILT_IN_MD %d", DECL_FUNCTION_CODE (node)); else fprintf (file, " built-in %s:%s", built_in_class_names[(int) DECL_BUILT_IN_CLASS (node)], built_in_names[(int) DECL_FUNCTION_CODE (node)]); } if (DECL_POINTER_ALIAS_SET_KNOWN_P (node)) fprintf (file, " alias set " HOST_WIDE_INT_PRINT_DEC, DECL_POINTER_ALIAS_SET (node)); if (TREE_CODE (node) == FIELD_DECL) { print_node (file, "offset", DECL_FIELD_OFFSET (node), indent + 4); print_node (file, "bit offset", DECL_FIELD_BIT_OFFSET (node), indent + 4); } print_node_brief (file, "context", DECL_CONTEXT (node), indent + 4); print_node_brief (file, "attributes", DECL_ATTRIBUTES (node), indent + 4); print_node_brief (file, "abstract_origin", DECL_ABSTRACT_ORIGIN (node), indent + 4); print_node (file, "arguments", DECL_ARGUMENTS (node), indent + 4); print_node (file, "result", DECL_RESULT_FLD (node), indent + 4); print_node_brief (file, "initial", DECL_INITIAL (node), indent + 4); lang_hooks.print_decl (file, node, indent); if (DECL_RTL_SET_P (node)) { indent_to (file, indent + 4); print_rtl (file, DECL_RTL (node)); } if (TREE_CODE (node) == PARM_DECL) { print_node (file, "arg-type", DECL_ARG_TYPE (node), indent + 4); print_node (file, "arg-type-as-written", DECL_ARG_TYPE_AS_WRITTEN (node), indent + 4); if (DECL_INCOMING_RTL (node) != 0) { indent_to (file, indent + 4); fprintf (file, "incoming-rtl "); print_rtl (file, DECL_INCOMING_RTL (node)); } } else if (TREE_CODE (node) == FUNCTION_DECL && DECL_STRUCT_FUNCTION (node) != 0) { indent_to (file, indent + 4); fprintf (file, "saved-insns " HOST_PTR_PRINTF, (void *) DECL_STRUCT_FUNCTION (node)); } /* Print the decl chain only if decl is at second level. */ if (indent == 4) print_node (file, "chain", TREE_CHAIN (node), indent + 4); else print_node_brief (file, "chain", TREE_CHAIN (node), indent + 4); break; case 't': if (TYPE_UNSIGNED (node)) fputs (" unsigned", file); /* The no-force-blk flag is used for different things in different types. */ if ((TREE_CODE (node) == RECORD_TYPE || TREE_CODE (node) == UNION_TYPE || TREE_CODE (node) == QUAL_UNION_TYPE) && TYPE_NO_FORCE_BLK (node)) fputs (" no-force-blk", file); else if (TREE_CODE (node) == INTEGER_TYPE && TYPE_IS_SIZETYPE (node)) fputs (" sizetype", file); else if (TREE_CODE (node) == FUNCTION_TYPE && TYPE_RETURNS_STACK_DEPRESSED (node)) fputs (" returns-stack-depressed", file); if (TYPE_STRING_FLAG (node)) fputs (" string-flag", file); if (TYPE_NEEDS_CONSTRUCTING (node)) fputs (" needs-constructing", file); /* The transparent-union flag is used for different things in different nodes. */ if (TREE_CODE (node) == UNION_TYPE && TYPE_TRANSPARENT_UNION (node)) fputs (" transparent-union", file); else if (TREE_CODE (node) == ARRAY_TYPE && TYPE_NONALIASED_COMPONENT (node)) fputs (" nonaliased-component", file); if (TYPE_PACKED (node)) fputs (" packed", file); if (TYPE_RESTRICT (node)) fputs (" restrict", file); if (TYPE_LANG_FLAG_0 (node)) fputs (" type_0", file); if (TYPE_LANG_FLAG_1 (node)) fputs (" type_1", file); if (TYPE_LANG_FLAG_2 (node)) fputs (" type_2", file); if (TYPE_LANG_FLAG_3 (node)) fputs (" type_3", file); if (TYPE_LANG_FLAG_4 (node)) fputs (" type_4", file); if (TYPE_LANG_FLAG_5 (node)) fputs (" type_5", file); if (TYPE_LANG_FLAG_6 (node)) fputs (" type_6", file); mode = TYPE_MODE (node); fprintf (file, " %s", GET_MODE_NAME (mode)); print_node (file, "size", TYPE_SIZE (node), indent + 4); print_node (file, "unit size", TYPE_SIZE_UNIT (node), indent + 4); indent_to (file, indent + 3); if (TYPE_USER_ALIGN (node)) fprintf (file, " user"); fprintf (file, " align %d symtab %d alias set " HOST_WIDE_INT_PRINT_DEC, TYPE_ALIGN (node), TYPE_SYMTAB_ADDRESS (node), TYPE_ALIAS_SET (node)); print_node (file, "attributes", TYPE_ATTRIBUTES (node), indent + 4); if (INTEGRAL_TYPE_P (node) || TREE_CODE (node) == REAL_TYPE) { fprintf (file, " precision %d", TYPE_PRECISION (node)); print_node_brief (file, "min", TYPE_MIN_VALUE (node), indent + 4); print_node_brief (file, "max", TYPE_MAX_VALUE (node), indent + 4); } if (TREE_CODE (node) == ENUMERAL_TYPE) print_node (file, "values", TYPE_VALUES (node), indent + 4); else if (TREE_CODE (node) == ARRAY_TYPE || TREE_CODE (node) == SET_TYPE) print_node (file, "domain", TYPE_DOMAIN (node), indent + 4); else if (TREE_CODE (node) == RECORD_TYPE || TREE_CODE (node) == UNION_TYPE || TREE_CODE (node) == QUAL_UNION_TYPE) print_node (file, "fields", TYPE_FIELDS (node), indent + 4); else if (TREE_CODE (node) == FUNCTION_TYPE || TREE_CODE (node) == METHOD_TYPE) { if (TYPE_METHOD_BASETYPE (node)) print_node_brief (file, "method basetype", TYPE_METHOD_BASETYPE (node), indent + 4); print_node (file, "arg-types", TYPE_ARG_TYPES (node), indent + 4); } else if (TREE_CODE (node) == OFFSET_TYPE) print_node_brief (file, "basetype", TYPE_OFFSET_BASETYPE (node), indent + 4); if (TYPE_CONTEXT (node)) print_node_brief (file, "context", TYPE_CONTEXT (node), indent + 4); lang_hooks.print_type (file, node, indent); if (TYPE_POINTER_TO (node) || TREE_CHAIN (node)) indent_to (file, indent + 3); print_node_brief (file, "pointer_to_this", TYPE_POINTER_TO (node), indent + 4); print_node_brief (file, "reference_to_this", TYPE_REFERENCE_TO (node), indent + 4); print_node_brief (file, "chain", TREE_CHAIN (node), indent + 4); break; case 'e': case '<': case '1': case '2': case 'r': case 's': if (TREE_CODE (node) == BIT_FIELD_REF && BIT_FIELD_REF_UNSIGNED (node)) fputs (" unsigned", file); if (TREE_CODE (node) == BIND_EXPR) { print_node (file, "vars", TREE_OPERAND (node, 0), indent + 4); print_node (file, "body", TREE_OPERAND (node, 1), indent + 4); print_node (file, "block", TREE_OPERAND (node, 2), indent + 4); break; } len = TREE_CODE_LENGTH (TREE_CODE (node)); /* Some nodes contain rtx's, not trees, after a certain point. Print the rtx's as rtx's. */ first_rtl = first_rtl_op (TREE_CODE (node)); for (i = 0; i < len; i++) { if (i >= first_rtl) { indent_to (file, indent + 4); fprintf (file, "rtl %d ", i); if (TREE_OPERAND (node, i)) print_rtl (file, (rtx) TREE_OPERAND (node, i)); else fprintf (file, "(nil)"); fprintf (file, "\n"); } else { char temp[10]; sprintf (temp, "arg %d", i); print_node (file, temp, TREE_OPERAND (node, i), indent + 4); } } print_node (file, "chain", TREE_CHAIN (node), indent + 4); break; case 'c': case 'x': switch (TREE_CODE (node)) { case INTEGER_CST: if (TREE_CONSTANT_OVERFLOW (node)) fprintf (file, " overflow"); fprintf (file, " "); if (TREE_INT_CST_HIGH (node) == 0) fprintf (file, HOST_WIDE_INT_PRINT_UNSIGNED, TREE_INT_CST_LOW (node)); else if (TREE_INT_CST_HIGH (node) == -1 && TREE_INT_CST_LOW (node) != 0) fprintf (file, "-" HOST_WIDE_INT_PRINT_UNSIGNED, -TREE_INT_CST_LOW (node)); else fprintf (file, HOST_WIDE_INT_PRINT_DOUBLE_HEX, TREE_INT_CST_HIGH (node), TREE_INT_CST_LOW (node)); break; case REAL_CST: { REAL_VALUE_TYPE d; if (TREE_OVERFLOW (node)) fprintf (file, " overflow"); d = TREE_REAL_CST (node); if (REAL_VALUE_ISINF (d)) fprintf (file, " Inf"); else if (REAL_VALUE_ISNAN (d)) fprintf (file, " Nan"); else { char string[64]; real_to_decimal (string, &d, sizeof (string), 0, 1); fprintf (file, " %s", string); } } break; case VECTOR_CST: { tree vals = TREE_VECTOR_CST_ELTS (node); char buf[10]; tree link; int i; i = 0; for (link = vals; link; link = TREE_CHAIN (link), ++i) { sprintf (buf, "elt%d: ", i); print_node (file, buf, TREE_VALUE (link), indent + 4); } } break; case COMPLEX_CST: print_node (file, "real", TREE_REALPART (node), indent + 4); print_node (file, "imag", TREE_IMAGPART (node), indent + 4); break; case STRING_CST: { const char *p = TREE_STRING_POINTER (node); int i = TREE_STRING_LENGTH (node); fputs (" \"", file); while (--i >= 0) { char ch = *p++; if (ch >= ' ' && ch < 127) putc (ch, file); else fprintf(file, "\\%03o", ch & 0xFF); } fputc ('\"', file); } /* Print the chain at second level. */ if (indent == 4) print_node (file, "chain", TREE_CHAIN (node), indent + 4); else print_node_brief (file, "chain", TREE_CHAIN (node), indent + 4); break; case IDENTIFIER_NODE: lang_hooks.print_identifier (file, node, indent); break; case TREE_LIST: print_node (file, "purpose", TREE_PURPOSE (node), indent + 4); print_node (file, "value", TREE_VALUE (node), indent + 4); print_node (file, "chain", TREE_CHAIN (node), indent + 4); break; case TREE_VEC: len = TREE_VEC_LENGTH (node); for (i = 0; i < len; i++) if (TREE_VEC_ELT (node, i)) { char temp[10]; sprintf (temp, "elt %d", i); indent_to (file, indent + 4); print_node_brief (file, temp, TREE_VEC_ELT (node, i), 0); } break; case BLOCK: print_node (file, "vars", BLOCK_VARS (node), indent + 4); print_node (file, "supercontext", BLOCK_SUPERCONTEXT (node), indent + 4); print_node (file, "subblocks", BLOCK_SUBBLOCKS (node), indent + 4); print_node (file, "chain", BLOCK_CHAIN (node), indent + 4); print_node (file, "abstract_origin", BLOCK_ABSTRACT_ORIGIN (node), indent + 4); break; default: if (TREE_CODE_CLASS (TREE_CODE (node)) == 'x') lang_hooks.print_xnode (file, node, indent); break; } break; } if (EXPR_HAS_LOCATION (node)) { expanded_location xloc = expand_location (EXPR_LOCATION (node)); indent_to (file, indent+4); fprintf (file, "%s:%d", xloc.file, xloc.line); } fprintf (file, ">"); } /* Transformations based on profile information for values. Copyright (C) 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Definitions for transformations based on profile information for values. Copyright (C) 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_VALUE_PROF_H #define GCC_VALUE_PROF_H /* Supported histogram types. */ enum hist_type { HIST_TYPE_INTERVAL, /* Measures histogram of values inside a specified interval. */ HIST_TYPE_POW2, /* Histogram of power of 2 values. */ HIST_TYPE_SINGLE_VALUE, /* Tries to identify the value that is (almost) always constant. */ HIST_TYPE_CONST_DELTA /* Tries to identify the (almost) always constant difference between two evaluations of a value. */ }; #define COUNTER_FOR_HIST_TYPE(TYPE) ((int) (TYPE) + GCOV_FIRST_VALUE_COUNTER) #define HIST_TYPE_FOR_COUNTER(COUNTER) \ ((enum hist_type) ((COUNTER) - GCOV_FIRST_VALUE_COUNTER)) /* The value to measure. */ /* The void *'s are either rtx or tree, depending on which IR is in use. */ struct histogram_value { void * value; /* The value to profile. */ enum machine_mode mode; /* And its mode. */ void * seq; /* Insns required to count the profiled value. */ void * insn; /* Insn before that to measure. */ enum hist_type type; /* Type of information to measure. */ unsigned n_counters; /* Number of required counters. */ union { struct { int int_start; /* First value in interval. */ int steps; /* Number of values in it. */ int may_be_less; /* May the value be below? */ int may_be_more; /* Or above. */ } intvl; /* Interval histogram data. */ struct { int may_be_other; /* If the value may be non-positive or not 2^k. */ } pow2; /* Power of 2 histogram data. */ } hdata; /* Profiled information specific data. */ }; /* Hooks registration. */ extern void rtl_register_value_prof_hooks (void); extern void tree_register_value_prof_hooks (void); /* IR-independent entry points. */ extern void find_values_to_profile (unsigned *, struct histogram_value **); extern void free_profiled_values (unsigned, struct histogram_value *); extern bool value_profile_transformations (void); /* External declarations for edge-based profiling. */ struct profile_hooks { /* Insert code to increment an edge count. */ void (*gen_edge_profiler) (int, edge); /* Insert code to increment the interval histogram counter. */ void (*gen_interval_profiler) (struct histogram_value *, unsigned, unsigned); /* Insert code to increment the power of two histogram counter. */ void (*gen_pow2_profiler) (struct histogram_value *, unsigned, unsigned); /* Insert code to find the most common value. */ void (*gen_one_value_profiler) (struct histogram_value *, unsigned, unsigned); /* Insert code to find the most common value of a difference between two evaluations of an expression. */ void (*gen_const_delta_profiler) (struct histogram_value *, unsigned, unsigned); FILE * (*profile_dump_file) (void); }; /* In profile.c. */ extern void init_branch_prob (void); extern void branch_prob (void); extern void end_branch_prob (void); extern void tree_register_profile_hooks (void); extern void rtl_register_profile_hooks (void); /* In tree-profile.c. */ extern struct profile_hooks tree_profile_hooks; /* In rtl-profile.c. */ extern struct profile_hooks rtl_profile_hooks; #endif /* GCC_VALUE_PROF_H */ static struct value_prof_hooks *value_prof_hooks; /* In this file value profile based optimizations will be placed (none are here just now, but they are hopefully coming soon). Every such optimization should add its requirements for profiled values to insn_values_to_profile function. This function is called from branch_prob in profile.c and the requested values are instrumented by it in the first compilation with -fprofile-arcs. The optimization may then read the gathered data in the second compilation with -fbranch-probabilities. The measured data is appended as REG_VALUE_PROFILE note to the instrumented insn. The argument to the note consists of an EXPR_LIST where its members have the following meaning (from the first to the last): -- type of information gathered (HIST_TYPE*) -- the expression that is profiled -- list of counters starting from the first one. */ static void insn_divmod_values_to_profile (rtx, unsigned *, struct histogram_value **); static void insn_values_to_profile (rtx, unsigned *, struct histogram_value **); static rtx gen_divmod_fixed_value (enum machine_mode, enum rtx_code, rtx, rtx, rtx, gcov_type, int); static rtx gen_mod_pow2 (enum machine_mode, enum rtx_code, rtx, rtx, rtx, int); static rtx gen_mod_subtract (enum machine_mode, enum rtx_code, rtx, rtx, rtx, int, int, int); static bool divmod_fixed_value_transform (rtx insn); static bool mod_pow2_value_transform (rtx); static bool mod_subtract_transform (rtx); /* Release the list of VALUES of length N_VALUES for that we want to measure histograms. */ void free_profiled_values (unsigned n_values ATTRIBUTE_UNUSED, struct histogram_value *values) { free (values); } /* Find values inside INSN for that we want to measure histograms for division/modulo optimization. */ static void insn_divmod_values_to_profile (rtx insn, unsigned *n_values, struct histogram_value **values) { rtx set, set_src, op1, op2; enum machine_mode mode; if (!INSN_P (insn)) return; set = single_set (insn); if (!set) return; mode = GET_MODE (SET_DEST (set)); if (!INTEGRAL_MODE_P (mode)) return; set_src = SET_SRC (set); switch (GET_CODE (set_src)) { case DIV: case MOD: case UDIV: case UMOD: op1 = XEXP (set_src, 0); op2 = XEXP (set_src, 1); if (side_effects_p (op2)) return; /* Check for a special case where the divisor is power of 2. */ if ((GET_CODE (set_src) == UMOD) && !CONSTANT_P (op2)) { *values = xrealloc (*values, (*n_values + 1) * sizeof (struct histogram_value)); (*values)[*n_values].value = op2; (*values)[*n_values].seq = NULL_RTX; (*values)[*n_values].mode = mode; (*values)[*n_values].insn = insn; (*values)[*n_values].type = HIST_TYPE_POW2; (*values)[*n_values].hdata.pow2.may_be_other = 1; (*n_values)++; } /* Check whether the divisor is not in fact a constant. */ if (!CONSTANT_P (op2)) { *values = xrealloc (*values, (*n_values + 1) * sizeof (struct histogram_value)); (*values)[*n_values].value = op2; (*values)[*n_values].mode = mode; (*values)[*n_values].seq = NULL_RTX; (*values)[*n_values].insn = insn; (*values)[*n_values].type = HIST_TYPE_SINGLE_VALUE; (*n_values)++; } /* For mod, check whether it is not often a noop (or replaceable by a few subtractions). */ if (GET_CODE (set_src) == UMOD && !side_effects_p (op1)) { rtx tmp; *values = xrealloc (*values, (*n_values + 1) * sizeof (struct histogram_value)); start_sequence (); tmp = simplify_gen_binary (DIV, mode, copy_rtx (op1), copy_rtx (op2)); (*values)[*n_values].value = force_operand (tmp, NULL_RTX); (*values)[*n_values].seq = get_insns (); end_sequence (); (*values)[*n_values].mode = mode; (*values)[*n_values].insn = insn; (*values)[*n_values].type = HIST_TYPE_INTERVAL; (*values)[*n_values].hdata.intvl.int_start = 0; (*values)[*n_values].hdata.intvl.steps = 2; (*values)[*n_values].hdata.intvl.may_be_less = 1; (*values)[*n_values].hdata.intvl.may_be_more = 1; (*n_values)++; } return; default: return; } } /* Find values inside INSN for that we want to measure histograms and adds them to list VALUES (increasing the record of its length in N_VALUES). */ static void insn_values_to_profile (rtx insn, unsigned *n_values, struct histogram_value **values) { if (flag_value_profile_transformations) insn_divmod_values_to_profile (insn, n_values, values); } /* Find list of values for that we want to measure histograms. */ static void rtl_find_values_to_profile (unsigned *n_values, struct histogram_value **values) { rtx insn; unsigned i; life_analysis (NULL, PROP_DEATH_NOTES); *n_values = 0; *values = NULL; for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) insn_values_to_profile (insn, n_values, values); for (i = 0; i < *n_values; i++) { switch ((*values)[i].type) { case HIST_TYPE_INTERVAL: if (dump_file) fprintf (dump_file, "Interval counter for insn %d, range %d -- %d.\n", INSN_UID ((rtx)(*values)[i].insn), (*values)[i].hdata.intvl.int_start, ((*values)[i].hdata.intvl.int_start + (*values)[i].hdata.intvl.steps - 1)); (*values)[i].n_counters = (*values)[i].hdata.intvl.steps + ((*values)[i].hdata.intvl.may_be_less ? 1 : 0) + ((*values)[i].hdata.intvl.may_be_more ? 1 : 0); break; case HIST_TYPE_POW2: if (dump_file) fprintf (dump_file, "Pow2 counter for insn %d.\n", INSN_UID ((rtx)(*values)[i].insn)); (*values)[i].n_counters = GET_MODE_BITSIZE ((*values)[i].mode) + ((*values)[i].hdata.pow2.may_be_other ? 1 : 0); break; case HIST_TYPE_SINGLE_VALUE: if (dump_file) fprintf (dump_file, "Single value counter for insn %d.\n", INSN_UID ((rtx)(*values)[i].insn)); (*values)[i].n_counters = 3; break; case HIST_TYPE_CONST_DELTA: if (dump_file) fprintf (dump_file, "Constant delta counter for insn %d.\n", INSN_UID ((rtx)(*values)[i].insn)); (*values)[i].n_counters = 4; break; default: abort (); } } allocate_reg_info (max_reg_num (), FALSE, FALSE); } /* Main entry point. Finds REG_VALUE_PROFILE notes from profiler and uses them to identify and exploit properties of values that are hard to analyze statically. We do following transformations: 1) x = a / b; where b is almost always a constant N is transformed to if (b == N) x = a / N; else x = a / b; Analogically with % 2) x = a % b where b is almost always a power of 2 and the division is unsigned TODO -- handle signed case as well if ((b & (b - 1)) == 0) x = a & (b - 1); else x = x % b; Note that when b = 0, no error will occur and x = a; this is correct, as result of such operation is undefined. 3) x = a % b where a is almost always less then b and the division is unsigned TODO -- handle signed case as well x = a; if (x >= b) x %= b; 4) x = a % b where a is almost always less then 2 * b and the division is unsigned TODO -- handle signed case as well x = a; if (x >= b) x -= b; if (x >= b) x %= b; It would be possible to continue analogically for K * b for other small K's, but it is probably not useful. TODO: There are other useful cases that could be handled by a similar mechanism, for example: for (i = 0; i < n; i++) ... transform to (for constant N): if (n == N) for (i = 0; i < N; i++) ... else for (i = 0; i < n; i++) ... making unroller happy. Since this may grow the code significantly, we would have to be very careful here. */ static bool rtl_value_profile_transformations (void) { rtx insn, next; int changed = false; for (insn = get_insns (); insn; insn = next) { next = NEXT_INSN (insn); if (!INSN_P (insn)) continue; /* Scan for insn carrying a histogram. */ if (!find_reg_note (insn, REG_VALUE_PROFILE, 0)) continue; /* Ignore cold areas -- we are growing a code. */ if (!maybe_hot_bb_p (BLOCK_FOR_INSN (insn))) continue; if (dump_file) { fprintf (dump_file, "Trying transformations on insn %d\n", INSN_UID (insn)); print_rtl_single (dump_file, insn); } /* Transformations: */ if (flag_value_profile_transformations && (mod_subtract_transform (insn) || divmod_fixed_value_transform (insn) || mod_pow2_value_transform (insn))) changed = true; } if (changed) { commit_edge_insertions (); allocate_reg_info (max_reg_num (), FALSE, FALSE); } return changed; } /* Generate code for transformation 1 (with MODE and OPERATION, operands OP1 and OP2, whose value is expected to be VALUE, result TARGET and probability of taking the optimal path PROB). */ static rtx gen_divmod_fixed_value (enum machine_mode mode, enum rtx_code operation, rtx target, rtx op1, rtx op2, gcov_type value, int prob) { rtx tmp, tmp1, jump; rtx neq_label = gen_label_rtx (); rtx end_label = gen_label_rtx (); rtx sequence; start_sequence (); if (!REG_P (op2)) { tmp = gen_reg_rtx (mode); emit_move_insn (tmp, copy_rtx (op2)); } else tmp = op2; do_compare_rtx_and_jump (tmp, GEN_INT (value), NE, 0, mode, NULL_RTX, NULL_RTX, neq_label); /* Add branch probability to jump we just created. */ jump = get_last_insn (); REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_BR_PROB, GEN_INT (REG_BR_PROB_BASE - prob), REG_NOTES (jump)); tmp1 = simplify_gen_binary (operation, mode, copy_rtx (op1), GEN_INT (value)); tmp1 = force_operand (tmp1, target); if (tmp1 != target) emit_move_insn (copy_rtx (target), copy_rtx (tmp1)); emit_jump_insn (gen_jump (end_label)); emit_barrier (); emit_label (neq_label); tmp1 = simplify_gen_binary (operation, mode, copy_rtx (op1), copy_rtx (tmp)); tmp1 = force_operand (tmp1, target); if (tmp1 != target) emit_move_insn (copy_rtx (target), copy_rtx (tmp1)); emit_label (end_label); sequence = get_insns (); end_sequence (); rebuild_jump_labels (sequence); return sequence; } /* Do transform 1) on INSN if applicable. */ static bool divmod_fixed_value_transform (rtx insn) { rtx set, set_src, set_dest, op1, op2, value, histogram; enum rtx_code code; enum machine_mode mode; gcov_type val, count, all; edge e; int prob; set = single_set (insn); if (!set) return false; set_src = SET_SRC (set); set_dest = SET_DEST (set); code = GET_CODE (set_src); mode = GET_MODE (set_dest); if (code != DIV && code != MOD && code != UDIV && code != UMOD) return false; op1 = XEXP (set_src, false); op2 = XEXP (set_src, 1); for (histogram = REG_NOTES (insn); histogram; histogram = XEXP (histogram, 1)) if (REG_NOTE_KIND (histogram) == REG_VALUE_PROFILE && XEXP (XEXP (histogram, 0), 0) == GEN_INT (HIST_TYPE_SINGLE_VALUE)) break; if (!histogram) return false; histogram = XEXP (XEXP (histogram, 0), 1); value = XEXP (histogram, 0); histogram = XEXP (histogram, 1); val = INTVAL (XEXP (histogram, 0)); histogram = XEXP (histogram, 1); count = INTVAL (XEXP (histogram, 0)); histogram = XEXP (histogram, 1); all = INTVAL (XEXP (histogram, 0)); /* We require that count is at least half of all; this means that for the transformation to fire the value must be constant at least 50% of time (and 75% gives the guarantee of usage). */ if (!rtx_equal_p (op2, value) || 2 * count < all) return false; if (dump_file) fprintf (dump_file, "Div/mod by constant transformation on insn %d\n", INSN_UID (insn)); /* Compute probability of taking the optimal path. */ prob = (count * REG_BR_PROB_BASE + all / 2) / all; e = split_block (BLOCK_FOR_INSN (insn), PREV_INSN (insn)); delete_insn (insn); insert_insn_on_edge ( gen_divmod_fixed_value (mode, code, set_dest, op1, op2, val, prob), e); return true; } /* Generate code for transformation 2 (with MODE and OPERATION, operands OP1 and OP2, result TARGET and probability of taking the optimal path PROB). */ static rtx gen_mod_pow2 (enum machine_mode mode, enum rtx_code operation, rtx target, rtx op1, rtx op2, int prob) { rtx tmp, tmp1, tmp2, tmp3, jump; rtx neq_label = gen_label_rtx (); rtx end_label = gen_label_rtx (); rtx sequence; start_sequence (); if (!REG_P (op2)) { tmp = gen_reg_rtx (mode); emit_move_insn (tmp, copy_rtx (op2)); } else tmp = op2; tmp1 = expand_simple_binop (mode, PLUS, tmp, constm1_rtx, NULL_RTX, 0, OPTAB_WIDEN); tmp2 = expand_simple_binop (mode, AND, tmp, tmp1, NULL_RTX, 0, OPTAB_WIDEN); do_compare_rtx_and_jump (tmp2, const0_rtx, NE, 0, mode, NULL_RTX, NULL_RTX, neq_label); /* Add branch probability to jump we just created. */ jump = get_last_insn (); REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_BR_PROB, GEN_INT (REG_BR_PROB_BASE - prob), REG_NOTES (jump)); tmp3 = expand_simple_binop (mode, AND, op1, tmp1, target, 0, OPTAB_WIDEN); if (tmp3 != target) emit_move_insn (copy_rtx (target), tmp3); emit_jump_insn (gen_jump (end_label)); emit_barrier (); emit_label (neq_label); tmp1 = simplify_gen_binary (operation, mode, copy_rtx (op1), copy_rtx (tmp)); tmp1 = force_operand (tmp1, target); if (tmp1 != target) emit_move_insn (target, tmp1); emit_label (end_label); sequence = get_insns (); end_sequence (); rebuild_jump_labels (sequence); return sequence; } /* Do transform 2) on INSN if applicable. */ static bool mod_pow2_value_transform (rtx insn) { rtx set, set_src, set_dest, op1, op2, value, histogram; enum rtx_code code; enum machine_mode mode; gcov_type wrong_values, count; edge e; int i, all, prob; set = single_set (insn); if (!set) return false; set_src = SET_SRC (set); set_dest = SET_DEST (set); code = GET_CODE (set_src); mode = GET_MODE (set_dest); if (code != UMOD) return false; op1 = XEXP (set_src, 0); op2 = XEXP (set_src, 1); for (histogram = REG_NOTES (insn); histogram; histogram = XEXP (histogram, 1)) if (REG_NOTE_KIND (histogram) == REG_VALUE_PROFILE && XEXP (XEXP (histogram, 0), 0) == GEN_INT (HIST_TYPE_POW2)) break; if (!histogram) return false; histogram = XEXP (XEXP (histogram, 0), 1); value = XEXP (histogram, 0); histogram = XEXP (histogram, 1); wrong_values =INTVAL (XEXP (histogram, 0)); histogram = XEXP (histogram, 1); count = 0; for (i = 0; i < GET_MODE_BITSIZE (mode); i++) { count += INTVAL (XEXP (histogram, 0)); histogram = XEXP (histogram, 1); } if (!rtx_equal_p (op2, value)) return false; /* We require that we hit a power of two at least half of all evaluations. */ if (count < wrong_values) return false; if (dump_file) fprintf (dump_file, "Mod power of 2 transformation on insn %d\n", INSN_UID (insn)); /* Compute probability of taking the optimal path. */ all = count + wrong_values; prob = (count * REG_BR_PROB_BASE + all / 2) / all; e = split_block (BLOCK_FOR_INSN (insn), PREV_INSN (insn)); delete_insn (insn); insert_insn_on_edge ( gen_mod_pow2 (mode, code, set_dest, op1, op2, prob), e); return true; } /* Generate code for transformations 3 and 4 (with MODE and OPERATION, operands OP1 and OP2, result TARGET, at most SUB subtractions, and probability of taking the optimal path(s) PROB1 and PROB2). */ static rtx gen_mod_subtract (enum machine_mode mode, enum rtx_code operation, rtx target, rtx op1, rtx op2, int sub, int prob1, int prob2) { rtx tmp, tmp1, jump; rtx end_label = gen_label_rtx (); rtx sequence; int i; start_sequence (); if (!REG_P (op2)) { tmp = gen_reg_rtx (mode); emit_move_insn (tmp, copy_rtx (op2)); } else tmp = op2; emit_move_insn (target, copy_rtx (op1)); do_compare_rtx_and_jump (target, tmp, LTU, 0, mode, NULL_RTX, NULL_RTX, end_label); /* Add branch probability to jump we just created. */ jump = get_last_insn (); REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_BR_PROB, GEN_INT (prob1), REG_NOTES (jump)); for (i = 0; i < sub; i++) { tmp1 = expand_simple_binop (mode, MINUS, target, tmp, target, 0, OPTAB_WIDEN); if (tmp1 != target) emit_move_insn (target, tmp1); do_compare_rtx_and_jump (target, tmp, LTU, 0, mode, NULL_RTX, NULL_RTX, end_label); /* Add branch probability to jump we just created. */ jump = get_last_insn (); REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_BR_PROB, GEN_INT (prob2), REG_NOTES (jump)); } tmp1 = simplify_gen_binary (operation, mode, copy_rtx (target), copy_rtx (tmp)); tmp1 = force_operand (tmp1, target); if (tmp1 != target) emit_move_insn (target, tmp1); emit_label (end_label); sequence = get_insns (); end_sequence (); rebuild_jump_labels (sequence); return sequence; } /* Do transforms 3) and 4) on INSN if applicable. */ static bool mod_subtract_transform (rtx insn) { rtx set, set_src, set_dest, op1, op2, value, histogram; enum rtx_code code; enum machine_mode mode; gcov_type wrong_values, counts[2], count, all; edge e; int i, prob1, prob2; set = single_set (insn); if (!set) return false; set_src = SET_SRC (set); set_dest = SET_DEST (set); code = GET_CODE (set_src); mode = GET_MODE (set_dest); if (code != UMOD) return false; op1 = XEXP (set_src, 0); op2 = XEXP (set_src, 1); for (histogram = REG_NOTES (insn); histogram; histogram = XEXP (histogram, 1)) if (REG_NOTE_KIND (histogram) == REG_VALUE_PROFILE && XEXP (XEXP (histogram, 0), 0) == GEN_INT (HIST_TYPE_INTERVAL)) break; if (!histogram) return false; histogram = XEXP (XEXP (histogram, 0), 1); value = XEXP (histogram, 0); histogram = XEXP (histogram, 1); all = 0; for (i = 0; i < 2; i++) { counts[i] = INTVAL (XEXP (histogram, 0)); all += counts[i]; histogram = XEXP (histogram, 1); } wrong_values = INTVAL (XEXP (histogram, 0)); histogram = XEXP (histogram, 1); wrong_values += INTVAL (XEXP (histogram, 0)); all += wrong_values; /* We require that we use just subtractions in at least 50% of all evaluations. */ count = 0; for (i = 0; i < 2; i++) { count += counts[i]; if (count * 2 >= all) break; } if (i == 2) return false; if (dump_file) fprintf (dump_file, "Mod subtract transformation on insn %d\n", INSN_UID (insn)); /* Compute probability of taking the optimal path(s). */ prob1 = (counts[0] * REG_BR_PROB_BASE + all / 2) / all; prob2 = (counts[1] * REG_BR_PROB_BASE + all / 2) / all; e = split_block (BLOCK_FOR_INSN (insn), PREV_INSN (insn)); delete_insn (insn); insert_insn_on_edge ( gen_mod_subtract (mode, code, set_dest, op1, op2, i, prob1, prob2), e); return true; } /* Connection to the outside world. */ /* Struct for IR-dependent hooks. */ struct value_prof_hooks { /* Find list of values for which we want to measure histograms. */ void (*find_values_to_profile) (unsigned *, struct histogram_value **); /* Identify and exploit properties of values that are hard to analyze statically. See value-prof.c for more detail. */ bool (*value_profile_transformations) (void); }; /* Hooks for RTL-based versions (the only ones that currently work). */ static struct value_prof_hooks rtl_value_prof_hooks = { rtl_find_values_to_profile, rtl_value_profile_transformations }; void rtl_register_value_prof_hooks (void) { value_prof_hooks = &rtl_value_prof_hooks; if (ir_type ()) abort (); } /* Tree-based versions are stubs for now. */ static void tree_find_values_to_profile (unsigned *n_values, struct histogram_value **values) { (void)n_values; (void)values; abort (); } static bool tree_value_profile_transformations (void) { abort (); } static struct value_prof_hooks tree_value_prof_hooks = { tree_find_values_to_profile, tree_value_profile_transformations }; void tree_register_value_prof_hooks (void) { value_prof_hooks = &tree_value_prof_hooks; if (!ir_type ()) abort (); } /* IR-independent entry points. */ void find_values_to_profile (unsigned *n_values, struct histogram_value **values) { (value_prof_hooks->find_values_to_profile) (n_values, values); } bool value_profile_transformations (void) { return (value_prof_hooks->value_profile_transformations) (); } /* Variable tracking routines for the GNU compiler. Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file contains the variable tracking pass. It computes where variables are located (which registers or where in memory) at each position in instruction stream and emits notes describing the locations. Debug information (DWARF2 location lists) is finally generated from these notes. With this debug information, it is possible to show variables even when debugging optimized code. How does the variable tracking pass work? First, it scans RTL code for uses, stores and clobbers (register/memory references in instructions), for call insns and for stack adjustments separately for each basic block and saves them to an array of micro operations. The micro operations of one instruction are ordered so that pre-modifying stack adjustment < use < use with no var < call insn < < set < clobber < post-modifying stack adjustment Then, a forward dataflow analysis is performed to find out how locations of variables change through code and to propagate the variable locations along control flow graph. The IN set for basic block BB is computed as a union of OUT sets of BB's predecessors, the OUT set for BB is copied from the IN set for BB and is changed according to micro operations in BB. The IN and OUT sets for basic blocks consist of a current stack adjustment (used for adjusting offset of variables addressed using stack pointer), the table of structures describing the locations of parts of a variable and for each physical register a linked list for each physical register. The linked list is a list of variable parts stored in the register, i.e. it is a list of triplets (reg, decl, offset) where decl is REG_EXPR (reg) and offset is REG_OFFSET (reg). The linked list is used for effective deleting appropriate variable parts when we set or clobber the register. There may be more than one variable part in a register. The linked lists should be pretty short so it is a good data structure here. For example in the following code, register allocator may assign same register to variables A and B, and both of them are stored in the same register in CODE: if (cond) set A; else set B; CODE; if (cond) use A; else use B; Finally, the NOTE_INSN_VAR_LOCATION notes describing the variable locations are emitted to appropriate positions in RTL code. Each such a note describes the location of one variable at the point in instruction stream where the note is. There is no need to emit a note for each variable before each instruction, we only emit these notes where the location of variable changes (this means that we also emit notes for changes between the OUT set of the previous block and the IN set of the current block). The notes consist of two parts: 1. the declaration (from REG_EXPR or MEM_EXPR) 2. the location of a variable - it is either a simple register/memory reference (for simple variables, for example int), or a parallel of register/memory references (for a large variables which consist of several parts, for example long long). */ /* Type of micro operation. */ enum micro_operation_type { MO_USE, /* Use location (REG or MEM). */ MO_USE_NO_VAR,/* Use location which is not associated with a variable or the variable is not trackable. */ MO_SET, /* Set location. */ MO_CLOBBER, /* Clobber location. */ MO_CALL, /* Call insn. */ MO_ADJUST /* Adjust stack pointer. */ }; /* Where shall the note be emitted? BEFORE or AFTER the instruction. */ enum emit_note_where { EMIT_NOTE_BEFORE_INSN, EMIT_NOTE_AFTER_INSN }; /* Structure holding information about micro operation. */ typedef struct micro_operation_def { /* Type of micro operation. */ enum micro_operation_type type; union { /* Location. */ rtx loc; /* Stack adjustment. */ HOST_WIDE_INT adjust; } u; /* The instruction which the micro operation is in. */ rtx insn; } micro_operation; /* Structure for passing some other parameters to function emit_note_insn_var_location. */ typedef struct emit_note_data_def { /* The instruction which the note will be emitted before/after. */ rtx insn; /* Where the note will be emitted (before/after insn)? */ enum emit_note_where where; } emit_note_data; /* Description of location of a part of a variable. The content of a physical register is described by a chain of these structures. The chains are pretty short (usually 1 or 2 elements) and thus chain is the best data structure. */ typedef struct attrs_def { /* Pointer to next member of the list. */ struct attrs_def *next; /* The rtx of register. */ rtx loc; /* The declaration corresponding to LOC. */ tree decl; /* Offset from start of DECL. */ HOST_WIDE_INT offset; } *attrs; /* Structure holding the IN or OUT set for a basic block. */ typedef struct dataflow_set_def { /* Adjustment of stack offset. */ HOST_WIDE_INT stack_adjust; /* Attributes for registers (lists of attrs). */ attrs regs[FIRST_PSEUDO_REGISTER]; /* Variable locations. */ htab_t vars; } dataflow_set; /* The structure (one for each basic block) containing the information needed for variable tracking. */ typedef struct variable_tracking_info_def { /* Number of micro operations stored in the MOS array. */ int n_mos; /* The array of micro operations. */ micro_operation *mos; /* The IN and OUT set for dataflow analysis. */ dataflow_set in; dataflow_set out; /* Has the block been visited in DFS? */ bool visited; } *variable_tracking_info; /* Structure for chaining the locations. */ typedef struct location_chain_def { /* Next element in the chain. */ struct location_chain_def *next; /* The location (REG or MEM). */ rtx loc; } *location_chain; /* Structure describing one part of variable. */ typedef struct variable_part_def { /* Chain of locations of the part. */ location_chain loc_chain; /* Location which was last emitted to location list. */ rtx cur_loc; /* The offset in the variable. */ HOST_WIDE_INT offset; } variable_part; /* Maximum number of location parts. */ #define MAX_VAR_PARTS 16 /* Structure describing where the variable is located. */ typedef struct variable_def { /* The declaration of the variable. */ tree decl; /* Reference count. */ int refcount; /* Number of variable parts. */ int n_var_parts; /* The variable parts. */ variable_part var_part[MAX_VAR_PARTS]; } *variable; /* Hash function for DECL for VARIABLE_HTAB. */ #define VARIABLE_HASH_VAL(decl) ((size_t) (decl)) /* Pointer to the BB's information specific to variable tracking pass. */ #define VTI(BB) ((variable_tracking_info) (BB)->aux) /* Alloc pool for struct attrs_def. */ static alloc_pool attrs_pool; /* Alloc pool for struct variable_def. */ static alloc_pool var_pool; /* Alloc pool for struct location_chain_def. */ static alloc_pool loc_chain_pool; /* Changed variables, notes will be emitted for them. */ static htab_t changed_variables; /* Shall notes be emitted? */ static bool emit_notes; /* Fake variable for stack pointer. */ tree frame_base_decl; /* Stack adjust caused by function prologue. */ static HOST_WIDE_INT frame_stack_adjust; /* Local function prototypes. */ static void stack_adjust_offset_pre_post (rtx, HOST_WIDE_INT *, HOST_WIDE_INT *); static void insn_stack_adjust_offset_pre_post (rtx, HOST_WIDE_INT *, HOST_WIDE_INT *); static void bb_stack_adjust_offset (basic_block); static HOST_WIDE_INT prologue_stack_adjust (void); static bool vt_stack_adjustments (void); static rtx adjust_stack_reference (rtx, HOST_WIDE_INT); static hashval_t variable_htab_hash (const void *); static int variable_htab_eq (const void *, const void *); static void variable_htab_free (void *); static void init_attrs_list_set (attrs *); static void attrs_list_clear (attrs *); static attrs attrs_list_member (attrs, tree, HOST_WIDE_INT); static void attrs_list_insert (attrs *, tree, HOST_WIDE_INT, rtx); static void attrs_list_copy (attrs *, attrs); static void attrs_list_union (attrs *, attrs); static void vars_clear (htab_t); static variable unshare_variable (dataflow_set *set, variable var); static int vars_copy_1 (void **, void *); static void vars_copy (htab_t, htab_t); static void var_reg_delete_and_set (dataflow_set *, rtx); static void var_reg_delete (dataflow_set *, rtx); static void var_regno_delete (dataflow_set *, int); static void var_mem_delete_and_set (dataflow_set *, rtx); static void var_mem_delete (dataflow_set *, rtx); static void dataflow_set_init (dataflow_set *, int); static void dataflow_set_clear (dataflow_set *); static void dataflow_set_copy_vt (dataflow_set *, dataflow_set *); static int variable_union_info_cmp_pos (const void *, const void *); static int variable_union (void **, void *); static void dataflow_set_union (dataflow_set *, dataflow_set *); static bool variable_part_different_p (variable_part *, variable_part *); static bool variable_different_p (variable, variable, bool); static int dataflow_set_different_1 (void **, void *); static int dataflow_set_different_2 (void **, void *); static bool dataflow_set_different (dataflow_set *, dataflow_set *); static void dataflow_set_destroy (dataflow_set *); static bool contains_symbol_ref (rtx); static bool track_expr_p (tree); static int count_uses (rtx *, void *); static void count_uses_1 (rtx *, void *); static void count_stores (rtx, rtx, void *); static int add_uses (rtx *, void *); static void add_uses_1 (rtx *, void *); static void add_stores (rtx, rtx, void *); static bool compute_bb_dataflow (basic_block); static void vt_find_locations (void); static void dump_attrs_list (attrs); static int dump_variable_vt (void **, void *); static void dump_vars (htab_t); static void dump_dataflow_set (dataflow_set *); static void dump_dataflow_sets (void); static void variable_was_changed (variable, htab_t); static void set_frame_base_location (dataflow_set *, rtx); static void set_variable_part (dataflow_set *, rtx, tree, HOST_WIDE_INT); static void delete_variable_part (dataflow_set *, rtx, tree, HOST_WIDE_INT); static int emit_note_insn_var_location (void **, void *); static void emit_notes_for_changes (rtx, enum emit_note_where); static int emit_notes_for_differences_1 (void **, void *); static int emit_notes_for_differences_2 (void **, void *); static void emit_notes_for_differences (rtx, dataflow_set *, dataflow_set *); static void emit_notes_in_bb (basic_block); static void vt_emit_notes (void); static bool vt_get_decl_and_offset (rtx, tree *, HOST_WIDE_INT *); static void vt_add_function_parameters (void); static void vt_initialize (void); static void vt_finalize (void); /* Given a SET, calculate the amount of stack adjustment it contains PRE- and POST-modifying stack pointer. This function is similar to stack_adjust_offset. */ static void stack_adjust_offset_pre_post (rtx pattern, HOST_WIDE_INT *pre, HOST_WIDE_INT *post) { rtx src = SET_SRC (pattern); rtx dest = SET_DEST (pattern); enum rtx_code code; if (dest == stack_pointer_rtx) { /* (set (reg sp) (plus (reg sp) (const_int))) */ code = GET_CODE (src); if (! (code == PLUS || code == MINUS) || XEXP (src, 0) != stack_pointer_rtx || GET_CODE (XEXP (src, 1)) != CONST_INT) return; if (code == MINUS) *post += INTVAL (XEXP (src, 1)); else *post -= INTVAL (XEXP (src, 1)); } else if (MEM_P (dest)) { /* (set (mem (pre_dec (reg sp))) (foo)) */ src = XEXP (dest, 0); code = GET_CODE (src); switch (code) { case PRE_MODIFY: case POST_MODIFY: if (XEXP (src, 0) == stack_pointer_rtx) { rtx val = XEXP (XEXP (src, 1), 1); /* We handle only adjustments by constant amount. */ if (GET_CODE (XEXP (src, 1)) != PLUS || GET_CODE (val) != CONST_INT) abort (); if (code == PRE_MODIFY) *pre -= INTVAL (val); else *post -= INTVAL (val); break; } return; case PRE_DEC: if (XEXP (src, 0) == stack_pointer_rtx) { *pre += GET_MODE_SIZE (GET_MODE (dest)); break; } return; case POST_DEC: if (XEXP (src, 0) == stack_pointer_rtx) { *post += GET_MODE_SIZE (GET_MODE (dest)); break; } return; case PRE_INC: if (XEXP (src, 0) == stack_pointer_rtx) { *pre -= GET_MODE_SIZE (GET_MODE (dest)); break; } return; case POST_INC: if (XEXP (src, 0) == stack_pointer_rtx) { *post -= GET_MODE_SIZE (GET_MODE (dest)); break; } return; default: return; } } } /* Given an INSN, calculate the amount of stack adjustment it contains PRE- and POST-modifying stack pointer. */ static void insn_stack_adjust_offset_pre_post (rtx insn, HOST_WIDE_INT *pre, HOST_WIDE_INT *post) { *pre = 0; *post = 0; if (GET_CODE (PATTERN (insn)) == SET) stack_adjust_offset_pre_post (PATTERN (insn), pre, post); else if (GET_CODE (PATTERN (insn)) == PARALLEL || GET_CODE (PATTERN (insn)) == SEQUENCE) { int i; /* There may be stack adjustments inside compound insns. Search for them. */ for ( i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--) if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET) stack_adjust_offset_pre_post (XVECEXP (PATTERN (insn), 0, i), pre, post); } } /* Compute stack adjustment in basic block BB. */ static void bb_stack_adjust_offset (basic_block bb) { HOST_WIDE_INT offset; int i; offset = VTI (bb)->in.stack_adjust; for (i = 0; i < VTI (bb)->n_mos; i++) { if (VTI (bb)->mos[i].type == MO_ADJUST) offset += VTI (bb)->mos[i].u.adjust; else if (VTI (bb)->mos[i].type != MO_CALL) { if (MEM_P (VTI (bb)->mos[i].u.loc)) { VTI (bb)->mos[i].u.loc = adjust_stack_reference (VTI (bb)->mos[i].u.loc, -offset); } } } VTI (bb)->out.stack_adjust = offset; } /* Compute stack adjustment caused by function prologue. */ static HOST_WIDE_INT prologue_stack_adjust (void) { HOST_WIDE_INT offset = 0; basic_block bb = ENTRY_BLOCK_PTR->next_bb; rtx insn; rtx end; if (!BB_END (bb)) return 0; end = NEXT_INSN (BB_END (bb)); for (insn = BB_HEAD (bb); insn != end; insn = NEXT_INSN (insn)) { if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) == NOTE_INSN_PROLOGUE_END) break; if (INSN_P (insn)) { HOST_WIDE_INT tmp; insn_stack_adjust_offset_pre_post (insn, &tmp, &tmp); offset += tmp; } } return offset; } /* Compute stack adjustments for all blocks by traversing DFS tree. Return true when the adjustments on all incoming edges are consistent. Heavily borrowed from flow_depth_first_order_compute. */ static bool vt_stack_adjustments (void) { edge *stack; int sp; /* Initialize entry block. */ VTI (ENTRY_BLOCK_PTR)->visited = true; VTI (ENTRY_BLOCK_PTR)->out.stack_adjust = frame_stack_adjust; /* Allocate stack for back-tracking up CFG. */ stack = xmalloc ((n_basic_blocks + 1) * sizeof (edge)); sp = 0; /* Push the first edge on to the stack. */ stack[sp++] = ENTRY_BLOCK_PTR->succ; while (sp) { edge e; basic_block src; basic_block dest; /* Look at the edge on the top of the stack. */ e = stack[sp - 1]; src = e->src; dest = e->dest; /* Check if the edge destination has been visited yet. */ if (!VTI (dest)->visited) { VTI (dest)->visited = true; VTI (dest)->in.stack_adjust = VTI (src)->out.stack_adjust; bb_stack_adjust_offset (dest); if (dest->succ) /* Since the DEST node has been visited for the first time, check its successors. */ stack[sp++] = dest->succ; } else { /* Check whether the adjustments on the edges are the same. */ if (VTI (dest)->in.stack_adjust != VTI (src)->out.stack_adjust) { free (stack); return false; } if (e->succ_next) /* Go to the next edge. */ stack[sp - 1] = e->succ_next; else /* Return to previous level if there are no more edges. */ sp--; } } free (stack); return true; } /* Adjust stack reference MEM by ADJUSTMENT bytes and return the new rtx. */ static rtx adjust_stack_reference (rtx mem, HOST_WIDE_INT adjustment) { rtx adjusted_mem; rtx tmp; if (adjustment == 0) return mem; adjusted_mem = copy_rtx (mem); XEXP (adjusted_mem, 0) = replace_rtx (XEXP (adjusted_mem, 0), stack_pointer_rtx, gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (adjustment))); tmp = simplify_rtx (XEXP (adjusted_mem, 0)); if (tmp) XEXP (adjusted_mem, 0) = tmp; return adjusted_mem; } /* The hash function for variable_htab, computes the hash value from the declaration of variable X. */ static hashval_t variable_htab_hash (const void *x) { const variable v = (const variable) x; return (VARIABLE_HASH_VAL (v->decl)); } /* Compare the declaration of variable X with declaration Y. */ static int variable_htab_eq (const void *x, const void *y) { const variable v = (const variable) x; const tree decl = (const tree) y; return (VARIABLE_HASH_VAL (v->decl) == VARIABLE_HASH_VAL (decl)); } /* Free the element of VARIABLE_HTAB (its type is struct variable_def). */ static void variable_htab_free (void *elem) { int i; variable var = (variable) elem; location_chain node, next; #ifdef ENABLE_CHECKING if (var->refcount <= 0) abort (); #endif var->refcount--; if (var->refcount > 0) return; for (i = 0; i < var->n_var_parts; i++) { for (node = var->var_part[i].loc_chain; node; node = next) { next = node->next; pool_free (loc_chain_pool, node); } var->var_part[i].loc_chain = NULL; } pool_free (var_pool, var); } /* Initialize the set (array) SET of attrs to empty lists. */ static void init_attrs_list_set (attrs *set) { int i; for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) set[i] = NULL; } /* Make the list *LISTP empty. */ static void attrs_list_clear (attrs *listp) { attrs list, next; for (list = *listp; list; list = next) { next = list->next; pool_free (attrs_pool, list); } *listp = NULL; } /* Return true if the pair of DECL and OFFSET is the member of the LIST. */ static attrs attrs_list_member (attrs list, tree decl, HOST_WIDE_INT offset) { for (; list; list = list->next) if (list->decl == decl && list->offset == offset) return list; return NULL; } /* Insert the triplet DECL, OFFSET, LOC to the list *LISTP. */ static void attrs_list_insert (attrs *listp, tree decl, HOST_WIDE_INT offset, rtx loc) { attrs list; list = pool_alloc (attrs_pool); list->loc = loc; list->decl = decl; list->offset = offset; list->next = *listp; *listp = list; } /* Copy all nodes from SRC and create a list *DSTP of the copies. */ static void attrs_list_copy (attrs *dstp, attrs src) { attrs n; attrs_list_clear (dstp); for (; src; src = src->next) { n = pool_alloc (attrs_pool); n->loc = src->loc; n->decl = src->decl; n->offset = src->offset; n->next = *dstp; *dstp = n; } } /* Add all nodes from SRC which are not in *DSTP to *DSTP. */ static void attrs_list_union (attrs *dstp, attrs src) { for (; src; src = src->next) { if (!attrs_list_member (*dstp, src->decl, src->offset)) attrs_list_insert (dstp, src->decl, src->offset, src->loc); } } /* Delete all variables from hash table VARS. */ static void vars_clear (htab_t vars) { htab_empty (vars); } /* Return a copy of a variable VAR and insert it to dataflow set SET. */ static variable unshare_variable (dataflow_set *set, variable var) { void **slot; variable new_var; int i; new_var = pool_alloc (var_pool); new_var->decl = var->decl; new_var->refcount = 1; var->refcount--; new_var->n_var_parts = var->n_var_parts; for (i = 0; i < var->n_var_parts; i++) { location_chain node; location_chain *nextp; new_var->var_part[i].offset = var->var_part[i].offset; nextp = &new_var->var_part[i].loc_chain; for (node = var->var_part[i].loc_chain; node; node = node->next) { location_chain new_lc; new_lc = pool_alloc (loc_chain_pool); new_lc->next = NULL; new_lc->loc = node->loc; *nextp = new_lc; nextp = &new_lc->next; } /* We are at the basic block boundary when copying variable description so set the CUR_LOC to be the first element of the chain. */ if (new_var->var_part[i].loc_chain) new_var->var_part[i].cur_loc = new_var->var_part[i].loc_chain->loc; else new_var->var_part[i].cur_loc = NULL; } slot = htab_find_slot_with_hash (set->vars, new_var->decl, VARIABLE_HASH_VAL (new_var->decl), INSERT); *slot = new_var; return new_var; } /* Add a variable from *SLOT to hash table DATA and increase its reference count. */ static int vars_copy_1 (void **slot, void *data) { htab_t dst = (htab_t) data; variable src, *dstp; src = *(variable *) slot; src->refcount++; dstp = (variable *) htab_find_slot_with_hash (dst, src->decl, VARIABLE_HASH_VAL (src->decl), INSERT); *dstp = src; /* Continue traversing the hash table. */ return 1; } /* Copy all variables from hash table SRC to hash table DST. */ static void vars_copy (htab_t dst, htab_t src) { vars_clear (dst); htab_traverse (src, vars_copy_1, dst); } /* Delete current content of register LOC in dataflow set SET and set the register to contain REG_EXPR (LOC), REG_OFFSET (LOC). */ static void var_reg_delete_and_set (dataflow_set *set, rtx loc) { tree decl = REG_EXPR (loc); HOST_WIDE_INT offset = REG_OFFSET (loc); attrs node, next; attrs *nextp; nextp = &set->regs[REGNO (loc)]; for (node = *nextp; node; node = next) { next = node->next; if (node->decl != decl || node->offset != offset) { delete_variable_part (set, node->loc, node->decl, node->offset); pool_free (attrs_pool, node); *nextp = next; } else { node->loc = loc; nextp = &node->next; } } if (set->regs[REGNO (loc)] == NULL) attrs_list_insert (&set->regs[REGNO (loc)], decl, offset, loc); set_variable_part (set, loc, decl, offset); } /* Delete current content of register LOC in dataflow set SET. */ static void var_reg_delete (dataflow_set *set, rtx loc) { attrs *reg = &set->regs[REGNO (loc)]; attrs node, next; for (node = *reg; node; node = next) { next = node->next; delete_variable_part (set, node->loc, node->decl, node->offset); pool_free (attrs_pool, node); } *reg = NULL; } /* Delete content of register with number REGNO in dataflow set SET. */ static void var_regno_delete (dataflow_set *set, int regno) { attrs *reg = &set->regs[regno]; attrs node, next; for (node = *reg; node; node = next) { next = node->next; delete_variable_part (set, node->loc, node->decl, node->offset); pool_free (attrs_pool, node); } *reg = NULL; } /* Delete and set the location part of variable MEM_EXPR (LOC) in dataflow set SET to LOC. Adjust the address first if it is stack pointer based. */ static void var_mem_delete_and_set (dataflow_set *set, rtx loc) { tree decl = MEM_EXPR (loc); HOST_WIDE_INT offset = MEM_OFFSET (loc) ? INTVAL (MEM_OFFSET (loc)) : 0; set_variable_part (set, loc, decl, offset); } /* Delete the location part LOC from dataflow set SET. Adjust the address first if it is stack pointer based. */ static void var_mem_delete (dataflow_set *set, rtx loc) { tree decl = MEM_EXPR (loc); HOST_WIDE_INT offset = MEM_OFFSET (loc) ? INTVAL (MEM_OFFSET (loc)) : 0; delete_variable_part (set, loc, decl, offset); } /* Initialize dataflow set SET to be empty. VARS_SIZE is the initial size of hash table VARS. */ static void dataflow_set_init (dataflow_set *set, int vars_size) { init_attrs_list_set (set->regs); set->vars = htab_create (vars_size, variable_htab_hash, variable_htab_eq, variable_htab_free); set->stack_adjust = 0; } /* Delete the contents of dataflow set SET. */ static void dataflow_set_clear (dataflow_set *set) { int i; for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) attrs_list_clear (&set->regs[i]); vars_clear (set->vars); } /* Copy the contents of dataflow set SRC to DST. */ static void dataflow_set_copy_vt (dataflow_set *dst, dataflow_set *src) { int i; for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) attrs_list_copy (&dst->regs[i], src->regs[i]); vars_copy (dst->vars, src->vars); dst->stack_adjust = src->stack_adjust; } /* Information for merging lists of locations for a given offset of variable. */ struct variable_union_info { /* Node of the location chain. */ location_chain lc; /* The sum of positions in the input chains. */ int pos; /* The position in the chains of SRC and DST dataflow sets. */ int pos_src; int pos_dst; }; /* Compare function for qsort, order the structures by POS element. */ static int variable_union_info_cmp_pos (const void *n1, const void *n2) { const struct variable_union_info *i1 = n1; const struct variable_union_info *i2 = n2; if (i1->pos != i2->pos) return i1->pos - i2->pos; return (i1->pos_dst - i2->pos_dst); } /* Compute union of location parts of variable *SLOT and the same variable from hash table DATA. Compute "sorted" union of the location chains for common offsets, i.e. the locations of a variable part are sorted by a priority where the priority is the sum of the positions in the 2 chains (if a location is only in one list the position in the second list is defined to be larger than the length of the chains). When we are updating the location parts the newest location is in the beginning of the chain, so when we do the described "sorted" union we keep the newest locations in the beginning. */ static int variable_union (void **slot, void *data) { variable src, dst, *dstp; dataflow_set *set = (dataflow_set *) data; int i, j, k; src = *(variable *) slot; dstp = (variable *) htab_find_slot_with_hash (set->vars, src->decl, VARIABLE_HASH_VAL (src->decl), INSERT); if (!*dstp) { src->refcount++; /* If CUR_LOC of some variable part is not the first element of the location chain we are going to change it so we have to make a copy of the variable. */ for (k = 0; k < src->n_var_parts; k++) { if (src->var_part[k].loc_chain) { #ifdef ENABLE_CHECKING if (src->var_part[k].cur_loc == NULL) abort (); #endif if (src->var_part[k].cur_loc != src->var_part[k].loc_chain->loc) break; } #ifdef ENABLE_CHECKING else { if (src->var_part[k].cur_loc != NULL) abort (); } #endif } if (k < src->n_var_parts) unshare_variable (set, src); else *dstp = src; /* Continue traversing the hash table. */ return 1; } else dst = *dstp; #ifdef ENABLE_CHECKING if (src->n_var_parts == 0) abort (); #endif /* Count the number of location parts, result is K. */ for (i = 0, j = 0, k = 0; i < src->n_var_parts && j < dst->n_var_parts; k++) { if (src->var_part[i].offset == dst->var_part[j].offset) { i++; j++; } else if (src->var_part[i].offset < dst->var_part[j].offset) i++; else j++; } k += src->n_var_parts - i; k += dst->n_var_parts - j; #ifdef ENABLE_CHECKING /* We track only variables whose size is <= MAX_VAR_PARTS bytes thus there are at most MAX_VAR_PARTS different offsets. */ if (k > MAX_VAR_PARTS) abort (); #endif if (dst->refcount > 1 && dst->n_var_parts != k) dst = unshare_variable (set, dst); i = src->n_var_parts - 1; j = dst->n_var_parts - 1; dst->n_var_parts = k; for (k--; k >= 0; k--) { location_chain node, node2; if (i >= 0 && j >= 0 && src->var_part[i].offset == dst->var_part[j].offset) { /* Compute the "sorted" union of the chains, i.e. the locations which are in both chains go first, they are sorted by the sum of positions in the chains. */ int dst_l, src_l; int ii, jj, n; struct variable_union_info *vui; /* If DST is shared compare the location chains. If they are different we will modify the chain in DST with high probability so make a copy of DST. */ if (dst->refcount > 1) { for (node = src->var_part[i].loc_chain, node2 = dst->var_part[j].loc_chain; node && node2; node = node->next, node2 = node2->next) { if (!((REG_P (node2->loc) && REG_P (node->loc) && REGNO (node2->loc) == REGNO (node->loc)) || rtx_equal_p (node2->loc, node->loc))) break; } if (node || node2) dst = unshare_variable (set, dst); } src_l = 0; for (node = src->var_part[i].loc_chain; node; node = node->next) src_l++; dst_l = 0; for (node = dst->var_part[j].loc_chain; node; node = node->next) dst_l++; vui = xcalloc (src_l + dst_l, sizeof (struct variable_union_info)); /* Fill in the locations from DST. */ for (node = dst->var_part[j].loc_chain, jj = 0; node; node = node->next, jj++) { vui[jj].lc = node; vui[jj].pos_dst = jj; /* Value larger than a sum of 2 valid positions. */ vui[jj].pos_src = src_l + dst_l; } /* Fill in the locations from SRC. */ n = dst_l; for (node = src->var_part[i].loc_chain, ii = 0; node; node = node->next, ii++) { /* Find location from NODE. */ for (jj = 0; jj < dst_l; jj++) { if ((REG_P (vui[jj].lc->loc) && REG_P (node->loc) && REGNO (vui[jj].lc->loc) == REGNO (node->loc)) || rtx_equal_p (vui[jj].lc->loc, node->loc)) { vui[jj].pos_src = ii; break; } } if (jj >= dst_l) /* The location has not been found. */ { location_chain new_node; /* Copy the location from SRC. */ new_node = pool_alloc (loc_chain_pool); new_node->loc = node->loc; vui[n].lc = new_node; vui[n].pos_src = ii; vui[n].pos_dst = src_l + dst_l; n++; } } for (ii = 0; ii < src_l + dst_l; ii++) vui[ii].pos = vui[ii].pos_src + vui[ii].pos_dst; qsort (vui, n, sizeof (struct variable_union_info), variable_union_info_cmp_pos); /* Reconnect the nodes in sorted order. */ for (ii = 1; ii < n; ii++) vui[ii - 1].lc->next = vui[ii].lc; vui[n - 1].lc->next = NULL; dst->var_part[k].loc_chain = vui[0].lc; dst->var_part[k].offset = dst->var_part[j].offset; free (vui); i--; j--; } else if ((i >= 0 && j >= 0 && src->var_part[i].offset < dst->var_part[j].offset) || i < 0) { dst->var_part[k] = dst->var_part[j]; j--; } else if ((i >= 0 && j >= 0 && src->var_part[i].offset > dst->var_part[j].offset) || j < 0) { location_chain *nextp; /* Copy the chain from SRC. */ nextp = &dst->var_part[k].loc_chain; for (node = src->var_part[i].loc_chain; node; node = node->next) { location_chain new_lc; new_lc = pool_alloc (loc_chain_pool); new_lc->next = NULL; new_lc->loc = node->loc; *nextp = new_lc; nextp = &new_lc->next; } dst->var_part[k].offset = src->var_part[i].offset; i--; } /* We are at the basic block boundary when computing union so set the CUR_LOC to be the first element of the chain. */ if (dst->var_part[k].loc_chain) dst->var_part[k].cur_loc = dst->var_part[k].loc_chain->loc; else dst->var_part[k].cur_loc = NULL; } /* Continue traversing the hash table. */ return 1; } /* Compute union of dataflow sets SRC and DST and store it to DST. */ static void dataflow_set_union (dataflow_set *dst, dataflow_set *src) { int i; for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) attrs_list_union (&dst->regs[i], src->regs[i]); htab_traverse (src->vars, variable_union, dst); } /* Flag whether two dataflow sets being compared contain different data. */ static bool dataflow_set_different_value; static bool variable_part_different_p (variable_part *vp1, variable_part *vp2) { location_chain lc1, lc2; for (lc1 = vp1->loc_chain; lc1; lc1 = lc1->next) { for (lc2 = vp2->loc_chain; lc2; lc2 = lc2->next) { if (REG_P (lc1->loc) && REG_P (lc2->loc)) { if (REGNO (lc1->loc) == REGNO (lc2->loc)) break; } if (rtx_equal_p (lc1->loc, lc2->loc)) break; } if (!lc2) return true; } return false; } /* Return true if variables VAR1 and VAR2 are different. If COMPARE_CURRENT_LOCATION is true compare also the cur_loc of each variable part. */ static bool variable_different_p (variable var1, variable var2, bool compare_current_location) { int i; if (var1 == var2) return false; if (var1->n_var_parts != var2->n_var_parts) return true; for (i = 0; i < var1->n_var_parts; i++) { if (var1->var_part[i].offset != var2->var_part[i].offset) return true; if (compare_current_location) { if (!((REG_P (var1->var_part[i].cur_loc) && REG_P (var2->var_part[i].cur_loc) && (REGNO (var1->var_part[i].cur_loc) == REGNO (var2->var_part[i].cur_loc))) || rtx_equal_p (var1->var_part[i].cur_loc, var2->var_part[i].cur_loc))) return true; } if (variable_part_different_p (&var1->var_part[i], &var2->var_part[i])) return true; if (variable_part_different_p (&var2->var_part[i], &var1->var_part[i])) return true; } return false; } /* Compare variable *SLOT with the same variable in hash table DATA and set DATAFLOW_SET_DIFFERENT_VALUE if they are different. */ static int dataflow_set_different_1 (void **slot, void *data) { htab_t htab = (htab_t) data; variable var1, var2; var1 = *(variable *) slot; var2 = htab_find_with_hash (htab, var1->decl, VARIABLE_HASH_VAL (var1->decl)); if (!var2) { dataflow_set_different_value = true; /* Stop traversing the hash table. */ return 0; } if (variable_different_p (var1, var2, false)) { dataflow_set_different_value = true; /* Stop traversing the hash table. */ return 0; } /* Continue traversing the hash table. */ return 1; } /* Compare variable *SLOT with the same variable in hash table DATA and set DATAFLOW_SET_DIFFERENT_VALUE if they are different. */ static int dataflow_set_different_2 (void **slot, void *data) { htab_t htab = (htab_t) data; variable var1, var2; var1 = *(variable *) slot; var2 = htab_find_with_hash (htab, var1->decl, VARIABLE_HASH_VAL (var1->decl)); if (!var2) { dataflow_set_different_value = true; /* Stop traversing the hash table. */ return 0; } #ifdef ENABLE_CHECKING /* If both variables are defined they have been already checked for equivalence. */ if (variable_different_p (var1, var2, false)) abort (); #endif /* Continue traversing the hash table. */ return 1; } /* Return true if dataflow sets OLD_SET and NEW_SET differ. */ static bool dataflow_set_different (dataflow_set *old_set, dataflow_set *new_set) { dataflow_set_different_value = false; htab_traverse (old_set->vars, dataflow_set_different_1, new_set->vars); if (!dataflow_set_different_value) { /* We have compared the variables which are in both hash tables so now only check whether there are some variables in NEW_SET->VARS which are not in OLD_SET->VARS. */ htab_traverse (new_set->vars, dataflow_set_different_2, old_set->vars); } return dataflow_set_different_value; } /* Free the contents of dataflow set SET. */ static void dataflow_set_destroy (dataflow_set *set) { int i; for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) attrs_list_clear (&set->regs[i]); htab_delete (set->vars); set->vars = NULL; } /* Return true if RTL X contains a SYMBOL_REF. */ static bool contains_symbol_ref (rtx x) { const char *fmt; RTX_CODE code; int i; if (!x) return false; code = GET_CODE (x); if (code == SYMBOL_REF) return true; fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') { if (contains_symbol_ref (XEXP (x, i))) return true; } else if (fmt[i] == 'E') { int j; for (j = 0; j < XVECLEN (x, i); j++) if (contains_symbol_ref (XVECEXP (x, i, j))) return true; } } return false; } /* Shall EXPR be tracked? */ static bool track_expr_p (tree expr) { rtx decl_rtl; /* If EXPR is not a parameter or a variable do not track it. */ if (TREE_CODE (expr) != VAR_DECL && TREE_CODE (expr) != PARM_DECL) return 0; /* It also must have a name... */ if (!DECL_NAME (expr)) return 0; /* ... and a RTL assigned to it. */ decl_rtl = DECL_RTL_IF_SET (expr); if (!decl_rtl) return 0; /* Do not track EXPR if it should be ignored for debugging purposes. */ if (DECL_IGNORED_P (expr)) return 0; /* Do not track global variables until we are able to emit correct location list for them. */ if (TREE_STATIC (expr)) return 0; /* When the EXPR is a DECL for alias of some variable (see example) the TREE_STATIC flag is not used. Disable tracking all DECLs whose DECL_RTL contains SYMBOL_REF. Example: extern char **_dl_argv_internal __attribute__ ((alias ("_dl_argv"))); char **_dl_argv; */ if (MEM_P (decl_rtl) && contains_symbol_ref (XEXP (decl_rtl, 0))) return 0; /* If RTX is a memory it should not be very large (because it would be an array or struct). */ if (MEM_P (decl_rtl)) { /* Do not track structures and arrays. */ if (GET_MODE (decl_rtl) == BLKmode) return 0; if (MEM_SIZE (decl_rtl) && INTVAL (MEM_SIZE (decl_rtl)) > MAX_VAR_PARTS) return 0; } return 1; } /* Count uses (register and memory references) LOC which will be tracked. INSN is instruction which the LOC is part of. */ static int count_uses (rtx *loc, void *insn) { basic_block bb = BLOCK_FOR_INSN ((rtx) insn); if (REG_P (*loc)) { #ifdef ENABLE_CHECKING if (REGNO (*loc) >= FIRST_PSEUDO_REGISTER) abort (); #endif VTI (bb)->n_mos++; } else if (MEM_P (*loc) && MEM_EXPR (*loc) && track_expr_p (MEM_EXPR (*loc))) { VTI (bb)->n_mos++; } return 0; } /* Helper function for finding all uses of REG/MEM in X in insn INSN. */ static void count_uses_1 (rtx *x, void *insn) { for_each_rtx (x, count_uses, insn); } /* Count stores (register and memory references) LOC which will be tracked. INSN is instruction which the LOC is part of. */ static void count_stores (rtx loc, rtx expr ATTRIBUTE_UNUSED, void *insn) { count_uses (&loc, insn); } /* Add uses (register and memory references) LOC which will be tracked to VTI (bb)->mos. INSN is instruction which the LOC is part of. */ static int add_uses (rtx *loc, void *insn) { if (REG_P (*loc)) { basic_block bb = BLOCK_FOR_INSN ((rtx) insn); micro_operation *mo = VTI (bb)->mos + VTI (bb)->n_mos++; mo->type = ((REG_EXPR (*loc) && track_expr_p (REG_EXPR (*loc))) ? MO_USE : MO_USE_NO_VAR); mo->u.loc = *loc; mo->insn = (rtx) insn; } else if (MEM_P (*loc) && MEM_EXPR (*loc) && track_expr_p (MEM_EXPR (*loc))) { basic_block bb = BLOCK_FOR_INSN ((rtx) insn); micro_operation *mo = VTI (bb)->mos + VTI (bb)->n_mos++; mo->type = MO_USE; mo->u.loc = *loc; mo->insn = (rtx) insn; } return 0; } /* Helper function for finding all uses of REG/MEM in X in insn INSN. */ static void add_uses_1 (rtx *x, void *insn) { for_each_rtx (x, add_uses, insn); } /* Add stores (register and memory references) LOC which will be tracked to VTI (bb)->mos. EXPR is the RTL expression containing the store. INSN is instruction which the LOC is part of. */ static void add_stores (rtx loc, rtx expr, void *insn) { if (REG_P (loc)) { basic_block bb = BLOCK_FOR_INSN ((rtx) insn); micro_operation *mo = VTI (bb)->mos + VTI (bb)->n_mos++; mo->type = ((GET_CODE (expr) != CLOBBER && REG_EXPR (loc) && track_expr_p (REG_EXPR (loc))) ? MO_SET : MO_CLOBBER); mo->u.loc = loc; mo->insn = (rtx) insn; } else if (MEM_P (loc) && MEM_EXPR (loc) && track_expr_p (MEM_EXPR (loc))) { basic_block bb = BLOCK_FOR_INSN ((rtx) insn); micro_operation *mo = VTI (bb)->mos + VTI (bb)->n_mos++; mo->type = GET_CODE (expr) == CLOBBER ? MO_CLOBBER : MO_SET; mo->u.loc = loc; mo->insn = (rtx) insn; } } /* Compute the changes of variable locations in the basic block BB. */ static bool compute_bb_dataflow (basic_block bb) { int i, n, r; bool changed; dataflow_set old_out; dataflow_set *in = &VTI (bb)->in; dataflow_set *out = &VTI (bb)->out; dataflow_set_init (&old_out, htab_elements (VTI (bb)->out.vars) + 3); dataflow_set_copy_vt (&old_out, out); dataflow_set_copy_vt (out, in); n = VTI (bb)->n_mos; for (i = 0; i < n; i++) { switch (VTI (bb)->mos[i].type) { case MO_CALL: for (r = 0; r < FIRST_PSEUDO_REGISTER; r++) if (TEST_HARD_REG_BIT (call_used_reg_set, r)) var_regno_delete (out, r); break; case MO_USE: case MO_SET: { rtx loc = VTI (bb)->mos[i].u.loc; if (REG_P (loc)) var_reg_delete_and_set (out, loc); else if (MEM_P (loc)) var_mem_delete_and_set (out, loc); } break; case MO_USE_NO_VAR: case MO_CLOBBER: { rtx loc = VTI (bb)->mos[i].u.loc; if (REG_P (loc)) var_reg_delete (out, loc); else if (MEM_P (loc)) var_mem_delete (out, loc); } break; case MO_ADJUST: { rtx base; out->stack_adjust += VTI (bb)->mos[i].u.adjust; base = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx, out->stack_adjust)); set_frame_base_location (out, base); } break; } } changed = dataflow_set_different (&old_out, out); dataflow_set_destroy (&old_out); return changed; } /* Find the locations of variables in the whole function. */ static void vt_find_locations (void) { fibheap_t worklist, pending, fibheap_swap; sbitmap visited, in_worklist, in_pending, sbitmap_swap; basic_block bb; edge e; int *bb_order; int *rc_order; int i; /* Compute reverse completion order of depth first search of the CFG so that the data-flow runs faster. */ rc_order = xmalloc (n_basic_blocks * sizeof (int)); bb_order = xmalloc (last_basic_block * sizeof (int)); flow_depth_first_order_compute (NULL, rc_order); for (i = 0; i < n_basic_blocks; i++) bb_order[rc_order[i]] = i; free (rc_order); worklist = fibheap_new (); pending = fibheap_new (); visited = sbitmap_alloc (last_basic_block); in_worklist = sbitmap_alloc (last_basic_block); in_pending = sbitmap_alloc (last_basic_block); sbitmap_zero (in_worklist); sbitmap_zero (in_pending); FOR_EACH_BB (bb) { fibheap_insert (pending, bb_order[bb->index], bb); SET_BIT (in_pending, bb->index); } while (!fibheap_empty (pending)) { fibheap_swap = pending; pending = worklist; worklist = fibheap_swap; sbitmap_swap = in_pending; in_pending = in_worklist; in_worklist = sbitmap_swap; sbitmap_zero (visited); while (!fibheap_empty (worklist)) { bb = fibheap_extract_min (worklist); RESET_BIT (in_worklist, bb->index); if (!TEST_BIT (visited, bb->index)) { bool changed; SET_BIT (visited, bb->index); /* Calculate the IN set as union of predecessor OUT sets. */ dataflow_set_clear (&VTI (bb)->in); for (e = bb->pred; e; e = e->pred_next) { dataflow_set_union (&VTI (bb)->in, &VTI (e->src)->out); } changed = compute_bb_dataflow (bb); if (changed) { for (e = bb->succ; e; e = e->succ_next) { if (e->dest == EXIT_BLOCK_PTR) continue; if (e->dest == bb) continue; if (TEST_BIT (visited, e->dest->index)) { if (!TEST_BIT (in_pending, e->dest->index)) { /* Send E->DEST to next round. */ SET_BIT (in_pending, e->dest->index); fibheap_insert (pending, bb_order[e->dest->index], e->dest); } } else if (!TEST_BIT (in_worklist, e->dest->index)) { /* Add E->DEST to current round. */ SET_BIT (in_worklist, e->dest->index); fibheap_insert (worklist, bb_order[e->dest->index], e->dest); } } } } } } free (bb_order); fibheap_delete (worklist); fibheap_delete (pending); sbitmap_free (visited); sbitmap_free (in_worklist); sbitmap_free (in_pending); } /* Print the content of the LIST to dump file. */ static void dump_attrs_list (attrs list) { for (; list; list = list->next) { print_mem_expr (dump_file, list->decl); fprintf (dump_file, "+"); fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC, list->offset); } fprintf (dump_file, "\n"); } /* Print the information about variable *SLOT to dump file. */ static int dump_variable_vt (void **slot, void *data ATTRIBUTE_UNUSED) { variable var = *(variable *) slot; int i; location_chain node; fprintf (dump_file, " name: %s\n", IDENTIFIER_POINTER (DECL_NAME (var->decl))); for (i = 0; i < var->n_var_parts; i++) { fprintf (dump_file, " offset %ld\n", (long) var->var_part[i].offset); for (node = var->var_part[i].loc_chain; node; node = node->next) { fprintf (dump_file, " "); print_rtl_single (dump_file, node->loc); } } /* Continue traversing the hash table. */ return 1; } /* Print the information about variables from hash table VARS to dump file. */ static void dump_vars (htab_t vars) { if (htab_elements (vars) > 0) { fprintf (dump_file, "Variables:\n"); htab_traverse (vars, dump_variable_vt, NULL); } } /* Print the dataflow set SET to dump file. */ static void dump_dataflow_set (dataflow_set *set) { int i; fprintf (dump_file, "Stack adjustment: "); fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC, set->stack_adjust); fprintf (dump_file, "\n"); for (i = 1; i < FIRST_PSEUDO_REGISTER; i++) { if (set->regs[i]) { fprintf (dump_file, "Reg %d:", i); dump_attrs_list (set->regs[i]); } } dump_vars (set->vars); fprintf (dump_file, "\n"); } /* Print the IN and OUT sets for each basic block to dump file. */ static void dump_dataflow_sets (void) { basic_block bb; FOR_EACH_BB (bb) { fprintf (dump_file, "\nBasic block %d:\n", bb->index); fprintf (dump_file, "IN:\n"); dump_dataflow_set (&VTI (bb)->in); fprintf (dump_file, "OUT:\n"); dump_dataflow_set (&VTI (bb)->out); } } /* Add variable VAR to the hash table of changed variables and if it has no locations delete it from hash table HTAB. */ static void variable_was_changed (variable var, htab_t htab) { hashval_t hash = VARIABLE_HASH_VAL (var->decl); if (emit_notes) { variable *slot; slot = (variable *) htab_find_slot_with_hash (changed_variables, var->decl, hash, INSERT); if (htab && var->n_var_parts == 0) { variable empty_var; void **old; empty_var = pool_alloc (var_pool); empty_var->decl = var->decl; empty_var->refcount = 1; empty_var->n_var_parts = 0; *slot = empty_var; old = htab_find_slot_with_hash (htab, var->decl, hash, NO_INSERT); if (old) htab_clear_slot (htab, old); } else { *slot = var; } } else { #ifdef ENABLE_CHECKING if (!htab) abort (); #endif if (var->n_var_parts == 0) { void **slot = htab_find_slot_with_hash (htab, var->decl, hash, NO_INSERT); if (slot) htab_clear_slot (htab, slot); } } } /* Set the location of frame_base_decl to LOC in dataflow set SET. This function expects that frame_base_decl has already one location for offset 0 in the variable table. */ static void set_frame_base_location (dataflow_set *set, rtx loc) { variable var; var = htab_find_with_hash (set->vars, frame_base_decl, VARIABLE_HASH_VAL (frame_base_decl)); #ifdef ENABLE_CHECKING if (!var) abort (); if (var->n_var_parts != 1) abort (); if (var->var_part[0].offset != 0) abort (); if (!var->var_part[0].loc_chain) abort (); #endif /* If frame_base_decl is shared unshare it first. */ if (var->refcount > 1) var = unshare_variable (set, var); var->var_part[0].loc_chain->loc = loc; var->var_part[0].cur_loc = loc; variable_was_changed (var, set->vars); } /* Set the part of variable's location in the dataflow set SET. The variable part is specified by variable's declaration DECL and offset OFFSET and the part's location by LOC. */ static void set_variable_part (dataflow_set *set, rtx loc, tree decl, HOST_WIDE_INT offset) { int pos, low, high; location_chain node, next; location_chain *nextp; variable var; void **slot; slot = htab_find_slot_with_hash (set->vars, decl, VARIABLE_HASH_VAL (decl), INSERT); if (!*slot) { /* Create new variable information. */ var = pool_alloc (var_pool); var->decl = decl; var->refcount = 1; var->n_var_parts = 1; var->var_part[0].offset = offset; var->var_part[0].loc_chain = NULL; var->var_part[0].cur_loc = NULL; *slot = var; pos = 0; } else { var = (variable) *slot; /* Find the location part. */ low = 0; high = var->n_var_parts; while (low != high) { pos = (low + high) / 2; if (var->var_part[pos].offset < offset) low = pos + 1; else high = pos; } pos = low; if (pos < var->n_var_parts && var->var_part[pos].offset == offset) { node = var->var_part[pos].loc_chain; if (node && ((REG_P (node->loc) && REG_P (loc) && REGNO (node->loc) == REGNO (loc)) || rtx_equal_p (node->loc, loc))) { /* LOC is in the beginning of the chain so we have nothing to do. */ return; } else { /* We have to make a copy of a shared variable. */ if (var->refcount > 1) var = unshare_variable (set, var); } } else { /* We have not found the location part, new one will be created. */ /* We have to make a copy of the shared variable. */ if (var->refcount > 1) var = unshare_variable (set, var); #ifdef ENABLE_CHECKING /* We track only variables whose size is <= MAX_VAR_PARTS bytes thus there are at most MAX_VAR_PARTS different offsets. */ if (var->n_var_parts >= MAX_VAR_PARTS) abort (); #endif /* We have to move the elements of array starting at index low to the next position. */ for (high = var->n_var_parts; high > low; high--) var->var_part[high] = var->var_part[high - 1]; var->n_var_parts++; var->var_part[pos].offset = offset; var->var_part[pos].loc_chain = NULL; var->var_part[pos].cur_loc = NULL; } } /* Delete the location from the list. */ nextp = &var->var_part[pos].loc_chain; for (node = var->var_part[pos].loc_chain; node; node = next) { next = node->next; if ((REG_P (node->loc) && REG_P (loc) && REGNO (node->loc) == REGNO (loc)) || rtx_equal_p (node->loc, loc)) { pool_free (loc_chain_pool, node); *nextp = next; break; } else nextp = &node->next; } /* Add the location to the beginning. */ node = pool_alloc (loc_chain_pool); node->loc = loc; node->next = var->var_part[pos].loc_chain; var->var_part[pos].loc_chain = node; /* If no location was emitted do so. */ if (var->var_part[pos].cur_loc == NULL) { var->var_part[pos].cur_loc = loc; variable_was_changed (var, set->vars); } } /* Delete the part of variable's location from dataflow set SET. The variable part is specified by variable's declaration DECL and offset OFFSET and the part's location by LOC. */ static void delete_variable_part (dataflow_set *set, rtx loc, tree decl, HOST_WIDE_INT offset) { int pos, low, high; void **slot; slot = htab_find_slot_with_hash (set->vars, decl, VARIABLE_HASH_VAL (decl), NO_INSERT); if (slot) { variable var = (variable) *slot; /* Find the location part. */ low = 0; high = var->n_var_parts; while (low != high) { pos = (low + high) / 2; if (var->var_part[pos].offset < offset) low = pos + 1; else high = pos; } pos = low; if (pos < var->n_var_parts && var->var_part[pos].offset == offset) { location_chain node, next; location_chain *nextp; bool changed; if (var->refcount > 1) { /* If the variable contains the location part we have to make a copy of the variable. */ for (node = var->var_part[pos].loc_chain; node; node = node->next) { if ((REG_P (node->loc) && REG_P (loc) && REGNO (node->loc) == REGNO (loc)) || rtx_equal_p (node->loc, loc)) { var = unshare_variable (set, var); break; } } } /* Delete the location part. */ nextp = &var->var_part[pos].loc_chain; for (node = *nextp; node; node = next) { next = node->next; if ((REG_P (node->loc) && REG_P (loc) && REGNO (node->loc) == REGNO (loc)) || rtx_equal_p (node->loc, loc)) { pool_free (loc_chain_pool, node); *nextp = next; break; } else nextp = &node->next; } /* If we have deleted the location which was last emitted we have to emit new location so add the variable to set of changed variables. */ if (var->var_part[pos].cur_loc && ((REG_P (loc) && REG_P (var->var_part[pos].cur_loc) && REGNO (loc) == REGNO (var->var_part[pos].cur_loc)) || rtx_equal_p (loc, var->var_part[pos].cur_loc))) { changed = true; if (var->var_part[pos].loc_chain) var->var_part[pos].cur_loc = var->var_part[pos].loc_chain->loc; } else changed = false; if (var->var_part[pos].loc_chain == NULL) { var->n_var_parts--; while (pos < var->n_var_parts) { var->var_part[pos] = var->var_part[pos + 1]; pos++; } } if (changed) variable_was_changed (var, set->vars); } } } /* Emit the NOTE_INSN_VAR_LOCATION for variable *VARP. DATA contains additional parameters: WHERE specifies whether the note shall be emitted before of after instruction INSN. */ static int emit_note_insn_var_location (void **varp, void *data) { variable var = *(variable *) varp; rtx insn = ((emit_note_data *)data)->insn; enum emit_note_where where = ((emit_note_data *)data)->where; rtx note; int i; bool complete; HOST_WIDE_INT last_limit; tree type_size_unit; #ifdef ENABLE_CHECKING if (!var->decl) abort (); #endif complete = true; last_limit = 0; for (i = 0; i < var->n_var_parts; i++) { if (last_limit < var->var_part[i].offset) { complete = false; break; } last_limit = (var->var_part[i].offset + GET_MODE_SIZE (GET_MODE (var->var_part[i].loc_chain->loc))); } type_size_unit = TYPE_SIZE_UNIT (TREE_TYPE (var->decl)); if ((unsigned HOST_WIDE_INT) last_limit < TREE_INT_CST_LOW (type_size_unit)) complete = false; if (where == EMIT_NOTE_AFTER_INSN) note = emit_note_after (NOTE_INSN_VAR_LOCATION, insn); else note = emit_note_before (NOTE_INSN_VAR_LOCATION, insn); if (!complete) { NOTE_VAR_LOCATION (note) = gen_rtx_VAR_LOCATION (VOIDmode, var->decl, NULL_RTX); } else if (var->n_var_parts == 1) { rtx expr_list = gen_rtx_EXPR_LIST (VOIDmode, var->var_part[0].loc_chain->loc, GEN_INT (var->var_part[0].offset)); NOTE_VAR_LOCATION (note) = gen_rtx_VAR_LOCATION (VOIDmode, var->decl, expr_list); } else if (var->n_var_parts) { rtx argp[MAX_VAR_PARTS]; rtx parallel; for (i = 0; i < var->n_var_parts; i++) argp[i] = gen_rtx_EXPR_LIST (VOIDmode, var->var_part[i].loc_chain->loc, GEN_INT (var->var_part[i].offset)); parallel = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (var->n_var_parts, argp)); NOTE_VAR_LOCATION (note) = gen_rtx_VAR_LOCATION (VOIDmode, var->decl, parallel); } htab_clear_slot (changed_variables, varp); /* When there are no location parts the variable has been already removed from hash table and a new empty variable was created. Free the empty variable. */ if (var->n_var_parts == 0) { pool_free (var_pool, var); } /* Continue traversing the hash table. */ return 1; } /* Emit NOTE_INSN_VAR_LOCATION note for each variable from a chain CHANGED_VARIABLES and delete this chain. WHERE specifies whether the notes shall be emitted before of after instruction INSN. */ static void emit_notes_for_changes (rtx insn, enum emit_note_where where) { emit_note_data data; data.insn = insn; data.where = where; htab_traverse (changed_variables, emit_note_insn_var_location, &data); } /* Add variable *SLOT to the chain CHANGED_VARIABLES if it differs from the same variable in hash table DATA or is not there at all. */ static int emit_notes_for_differences_1 (void **slot, void *data) { htab_t new_vars = (htab_t) data; variable old_var, new_var; old_var = *(variable *) slot; new_var = htab_find_with_hash (new_vars, old_var->decl, VARIABLE_HASH_VAL (old_var->decl)); if (!new_var) { /* Variable has disappeared. */ variable empty_var; empty_var = pool_alloc (var_pool); empty_var->decl = old_var->decl; empty_var->refcount = 1; empty_var->n_var_parts = 0; variable_was_changed (empty_var, NULL); } else if (variable_different_p (old_var, new_var, true)) { variable_was_changed (new_var, NULL); } /* Continue traversing the hash table. */ return 1; } /* Add variable *SLOT to the chain CHANGED_VARIABLES if it is not in hash table DATA. */ static int emit_notes_for_differences_2 (void **slot, void *data) { htab_t old_vars = (htab_t) data; variable old_var, new_var; new_var = *(variable *) slot; old_var = htab_find_with_hash (old_vars, new_var->decl, VARIABLE_HASH_VAL (new_var->decl)); if (!old_var) { /* Variable has appeared. */ variable_was_changed (new_var, NULL); } /* Continue traversing the hash table. */ return 1; } /* Emit notes before INSN for differences between dataflow sets OLD_SET and NEW_SET. */ static void emit_notes_for_differences (rtx insn, dataflow_set *old_set, dataflow_set *new_set) { htab_traverse (old_set->vars, emit_notes_for_differences_1, new_set->vars); htab_traverse (new_set->vars, emit_notes_for_differences_2, old_set->vars); emit_notes_for_changes (insn, EMIT_NOTE_BEFORE_INSN); } /* Emit the notes for changes of location parts in the basic block BB. */ static void emit_notes_in_bb (basic_block bb) { int i; dataflow_set set; dataflow_set_init (&set, htab_elements (VTI (bb)->in.vars) + 3); dataflow_set_copy_vt (&set, &VTI (bb)->in); for (i = 0; i < VTI (bb)->n_mos; i++) { rtx insn = VTI (bb)->mos[i].insn; switch (VTI (bb)->mos[i].type) { case MO_CALL: { int r; for (r = 0; r < FIRST_PSEUDO_REGISTER; r++) if (TEST_HARD_REG_BIT (call_used_reg_set, r)) { var_regno_delete (&set, r); } emit_notes_for_changes (insn, EMIT_NOTE_AFTER_INSN); } break; case MO_USE: case MO_SET: { rtx loc = VTI (bb)->mos[i].u.loc; if (REG_P (loc)) var_reg_delete_and_set (&set, loc); else var_mem_delete_and_set (&set, loc); if (VTI (bb)->mos[i].type == MO_USE) emit_notes_for_changes (insn, EMIT_NOTE_BEFORE_INSN); else emit_notes_for_changes (insn, EMIT_NOTE_AFTER_INSN); } break; case MO_USE_NO_VAR: case MO_CLOBBER: { rtx loc = VTI (bb)->mos[i].u.loc; if (REG_P (loc)) var_reg_delete (&set, loc); else var_mem_delete (&set, loc); if (VTI (bb)->mos[i].type == MO_USE_NO_VAR) emit_notes_for_changes (insn, EMIT_NOTE_BEFORE_INSN); else emit_notes_for_changes (insn, EMIT_NOTE_AFTER_INSN); } break; case MO_ADJUST: { rtx base; set.stack_adjust += VTI (bb)->mos[i].u.adjust; base = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx, set.stack_adjust)); set_frame_base_location (&set, base); emit_notes_for_changes (insn, EMIT_NOTE_AFTER_INSN); } break; } } dataflow_set_destroy (&set); } /* Emit notes for the whole function. */ static void vt_emit_notes (void) { basic_block bb; dataflow_set *last_out; dataflow_set empty; #ifdef ENABLE_CHECKING if (htab_elements (changed_variables)) abort (); #endif /* Enable emitting notes by functions (mainly by set_variable_part and delete_variable_part). */ emit_notes = true; dataflow_set_init (&empty, 7); last_out = ∅ FOR_EACH_BB (bb) { /* Emit the notes for changes of variable locations between two subsequent basic blocks. */ emit_notes_for_differences (BB_HEAD (bb), last_out, &VTI (bb)->in); /* Emit the notes for the changes in the basic block itself. */ emit_notes_in_bb (bb); last_out = &VTI (bb)->out; } dataflow_set_destroy (&empty); emit_notes = false; } /* If there is a declaration and offset associated with register/memory RTL assign declaration to *DECLP and offset to *OFFSETP, and return true. */ static bool vt_get_decl_and_offset (rtx rtl, tree *declp, HOST_WIDE_INT *offsetp) { if (REG_P (rtl)) { if (REG_ATTRS (rtl)) { *declp = REG_EXPR (rtl); *offsetp = REG_OFFSET (rtl); return true; } } else if (MEM_P (rtl)) { if (MEM_ATTRS (rtl)) { *declp = MEM_EXPR (rtl); *offsetp = MEM_OFFSET (rtl) ? INTVAL (MEM_OFFSET (rtl)) : 0; return true; } } return false; } /* Insert function parameters to IN and OUT sets of ENTRY_BLOCK. */ static void vt_add_function_parameters (void) { tree parm; for (parm = DECL_ARGUMENTS (current_function_decl); parm; parm = TREE_CHAIN (parm)) { rtx decl_rtl = DECL_RTL_IF_SET (parm); rtx incoming = DECL_INCOMING_RTL (parm); tree decl; HOST_WIDE_INT offset; dataflow_set *out; if (TREE_CODE (parm) != PARM_DECL) continue; if (!DECL_NAME (parm)) continue; if (!decl_rtl || !incoming) continue; if (GET_MODE (decl_rtl) == BLKmode || GET_MODE (incoming) == BLKmode) continue; if (!vt_get_decl_and_offset (incoming, &decl, &offset)) if (!vt_get_decl_and_offset (decl_rtl, &decl, &offset)) continue; if (!decl) continue; #ifdef ENABLE_CHECKING if (parm != decl) abort (); #endif incoming = eliminate_regs (incoming, 0, NULL_RTX); out = &VTI (ENTRY_BLOCK_PTR)->out; if (REG_P (incoming)) { #ifdef ENABLE_CHECKING if (REGNO (incoming) >= FIRST_PSEUDO_REGISTER) abort (); #endif attrs_list_insert (&out->regs[REGNO (incoming)], parm, offset, incoming); set_variable_part (out, incoming, parm, offset); } else if (MEM_P (incoming)) { set_variable_part (out, incoming, parm, offset); } } } /* Allocate and initialize the data structures for variable tracking and parse the RTL to get the micro operations. */ static void vt_initialize (void) { basic_block bb; alloc_aux_for_blocks (sizeof (struct variable_tracking_info_def)); FOR_EACH_BB (bb) { rtx insn; HOST_WIDE_INT pre, post; /* Count the number of micro operations. */ VTI (bb)->n_mos = 0; for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = NEXT_INSN (insn)) { if (INSN_P (insn)) { if (!frame_pointer_needed) { insn_stack_adjust_offset_pre_post (insn, &pre, &post); if (pre) VTI (bb)->n_mos++; if (post) VTI (bb)->n_mos++; } note_uses (&PATTERN (insn), count_uses_1, insn); note_stores (PATTERN (insn), count_stores, insn); if (GET_CODE (insn) == CALL_INSN) VTI (bb)->n_mos++; } } /* Add the micro-operations to the array. */ VTI (bb)->mos = xmalloc (VTI (bb)->n_mos * sizeof (struct micro_operation_def)); VTI (bb)->n_mos = 0; for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = NEXT_INSN (insn)) { if (INSN_P (insn)) { int n1, n2; if (!frame_pointer_needed) { insn_stack_adjust_offset_pre_post (insn, &pre, &post); if (pre) { micro_operation *mo = VTI (bb)->mos + VTI (bb)->n_mos++; mo->type = MO_ADJUST; mo->u.adjust = pre; mo->insn = insn; } } n1 = VTI (bb)->n_mos; note_uses (&PATTERN (insn), add_uses_1, insn); n2 = VTI (bb)->n_mos - 1; /* Order the MO_USEs to be before MO_USE_NO_VARs. */ while (n1 < n2) { while (n1 < n2 && VTI (bb)->mos[n1].type == MO_USE) n1++; while (n1 < n2 && VTI (bb)->mos[n2].type == MO_USE_NO_VAR) n2--; if (n1 < n2) { micro_operation sw; sw = VTI (bb)->mos[n1]; VTI (bb)->mos[n1] = VTI (bb)->mos[n2]; VTI (bb)->mos[n2] = sw; } } if (GET_CODE (insn) == CALL_INSN) { micro_operation *mo = VTI (bb)->mos + VTI (bb)->n_mos++; mo->type = MO_CALL; mo->insn = insn; } n1 = VTI (bb)->n_mos; note_stores (PATTERN (insn), add_stores, insn); n2 = VTI (bb)->n_mos - 1; /* Order the MO_SETs to be before MO_CLOBBERs. */ while (n1 < n2) { while (n1 < n2 && VTI (bb)->mos[n1].type == MO_SET) n1++; while (n1 < n2 && VTI (bb)->mos[n2].type == MO_CLOBBER) n2--; if (n1 < n2) { micro_operation sw; sw = VTI (bb)->mos[n1]; VTI (bb)->mos[n1] = VTI (bb)->mos[n2]; VTI (bb)->mos[n2] = sw; } } if (!frame_pointer_needed && post) { micro_operation *mo = VTI (bb)->mos + VTI (bb)->n_mos++; mo->type = MO_ADJUST; mo->u.adjust = post; mo->insn = insn; } } } } /* Init the IN and OUT sets. */ FOR_ALL_BB (bb) { VTI (bb)->visited = false; dataflow_set_init (&VTI (bb)->in, 7); dataflow_set_init (&VTI (bb)->out, 7); } attrs_pool = create_alloc_pool ("attrs_def pool", sizeof (struct attrs_def), 1024); var_pool = create_alloc_pool ("variable_def pool", sizeof (struct variable_def), 64); loc_chain_pool = create_alloc_pool ("location_chain_def pool", sizeof (struct location_chain_def), 1024); changed_variables = htab_create (10, variable_htab_hash, variable_htab_eq, NULL); vt_add_function_parameters (); if (!frame_pointer_needed) { rtx base; /* Create fake variable for tracking stack pointer changes. */ frame_base_decl = make_node (VAR_DECL); DECL_NAME (frame_base_decl) = get_identifier ("___frame_base_decl"); TREE_TYPE (frame_base_decl) = char_type_node; DECL_ARTIFICIAL (frame_base_decl) = 1; /* Set its initial "location". */ frame_stack_adjust = -prologue_stack_adjust (); base = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx, frame_stack_adjust)); set_variable_part (&VTI (ENTRY_BLOCK_PTR)->out, base, frame_base_decl, 0); } else { frame_base_decl = NULL; } } /* Free the data structures needed for variable tracking. */ static void vt_finalize (void) { basic_block bb; FOR_EACH_BB (bb) { free (VTI (bb)->mos); } FOR_ALL_BB (bb) { dataflow_set_destroy (&VTI (bb)->in); dataflow_set_destroy (&VTI (bb)->out); } free_aux_for_blocks (); free_alloc_pool (attrs_pool); free_alloc_pool (var_pool); free_alloc_pool (loc_chain_pool); htab_delete (changed_variables); } /* The entry point to variable tracking pass. */ void variable_tracking_main (void) { if (n_basic_blocks > 500 && n_edges / n_basic_blocks >= 20) return; mark_dfs_back_edges (); vt_initialize (); if (!frame_pointer_needed) { if (!vt_stack_adjustments ()) { vt_finalize (); return; } } vt_find_locations (); vt_emit_notes (); if (dump_file) { dump_dataflow_sets (); dump_flow_info (dump_file); } vt_finalize (); } /* Calculate branch probabilities, and basic block execution counts. Copyright (C) 1990, 1991, 1992, 1993, 1994, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by James E. Wilson, UC Berkeley/Cygnus Support; based on some ideas from Dain Samples of UC Berkeley. Further mangling by Bob Manson, Cygnus Support. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Generate basic block profile instrumentation and auxiliary files. Profile generation is optimized, so that not all arcs in the basic block graph need instrumenting. First, the BB graph is closed with one entry (function start), and one exit (function exit). Any ABNORMAL_EDGE cannot be instrumented (because there is no control path to place the code). We close the graph by inserting fake EDGE_FAKE edges to the EXIT_BLOCK, from the sources of abnormal edges that do not go to the exit_block. We ignore such abnormal edges. Naturally these fake edges are never directly traversed, and so *cannot* be directly instrumented. Some other graph massaging is done. To optimize the instrumentation we generate the BB minimal span tree, only edges that are not on the span tree (plus the entry point) need instrumenting. From that information all other edge counts can be deduced. By construction all fake edges must be on the spanning tree. We also attempt to place EDGE_CRITICAL edges on the spanning tree. The auxiliary files generated are .gcno (at compile time) and .gcda (at run time). The format is described in full in gcov-io.h. */ /* ??? Register allocation should use basic block execution counts to give preference to the most commonly executed blocks. */ /* ??? Should calculate branch probabilities before instrumenting code, since then we can use arc counts to help decide which arcs to instrument. */ /* Hooks for profiling. */ static struct profile_hooks* profile_hooks; /* File for profiling debug output. */ static inline FILE* profile_dump_file (void) { return profile_hooks->profile_dump_file (); } /* Additional information about the edges we need. */ struct edge_info { unsigned int count_valid : 1; /* Is on the spanning tree. */ unsigned int on_tree : 1; /* Pretend this edge does not exist (it is abnormal and we've inserted a fake to compensate). */ unsigned int ignore : 1; }; struct bb_prof_info { unsigned int count_valid : 1; /* Number of successor and predecessor edges. */ gcov_type succ_count; gcov_type pred_count; }; #define PROF_EDGE_INFO(e) ((struct edge_info *) (e)->aux) #define PROF_BB_INFO(b) ((struct bb_prof_info *) (b)->aux) /* Counter summary from the last set of coverage counts read. */ const struct gcov_ctr_summary *profile_info; /* Collect statistics on the performance of this pass for the entire source file. */ static int total_num_blocks; static int total_num_edges; static int total_num_edges_ignored; static int total_num_edges_instrumented; static int total_num_blocks_created; static int total_num_passes; static int total_num_times_called; static int total_hist_br_prob[20]; static int total_num_never_executed; static int total_num_branches; /* Forward declarations. */ static void find_spanning_tree (struct edge_list *); static unsigned instrument_edges (struct edge_list *); static void instrument_values (unsigned, struct histogram_value *); static void compute_branch_probabilities (void); static void compute_value_histograms (unsigned, struct histogram_value *); static gcov_type * get_exec_counts (void); static basic_block find_group (basic_block); static void union_groups (basic_block, basic_block); /* Add edge instrumentation code to the entire insn chain. F is the first insn of the chain. NUM_BLOCKS is the number of basic blocks found in F. */ static unsigned instrument_edges (struct edge_list *el) { unsigned num_instr_edges = 0; int num_edges = NUM_EDGES (el); basic_block bb; remove_fake_edges (); FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) { edge e; for (e = bb->succ; e; e = e->succ_next) { struct edge_info *inf = PROF_EDGE_INFO (e); if (!inf->ignore && !inf->on_tree) { if (e->flags & EDGE_ABNORMAL) abort (); if (dump_file) fprintf (dump_file, "Edge %d to %d instrumented%s\n", e->src->index, e->dest->index, EDGE_CRITICAL_P (e) ? " (and split)" : ""); (profile_hooks->gen_edge_profiler) (num_instr_edges++, e); } } } total_num_blocks_created += num_edges; if (dump_file) fprintf (dump_file, "%d edges instrumented\n", num_instr_edges); return num_instr_edges; } /* Add code to measure histograms list of VALUES of length N_VALUES. */ static void instrument_values (unsigned n_values, struct histogram_value *values) { unsigned i, t; /* Emit code to generate the histograms before the insns. */ for (i = 0; i < n_values; i++) { switch (values[i].type) { case HIST_TYPE_INTERVAL: t = GCOV_COUNTER_V_INTERVAL; break; case HIST_TYPE_POW2: t = GCOV_COUNTER_V_POW2; break; case HIST_TYPE_SINGLE_VALUE: t = GCOV_COUNTER_V_SINGLE; break; case HIST_TYPE_CONST_DELTA: t = GCOV_COUNTER_V_DELTA; break; default: abort (); } if (!coverage_counter_alloc (t, values[i].n_counters)) continue; switch (values[i].type) { case HIST_TYPE_INTERVAL: (profile_hooks->gen_interval_profiler) (values + i, t, 0); break; case HIST_TYPE_POW2: (profile_hooks->gen_pow2_profiler) (values + i, t, 0); break; case HIST_TYPE_SINGLE_VALUE: (profile_hooks->gen_one_value_profiler) (values + i, t, 0); break; case HIST_TYPE_CONST_DELTA: (profile_hooks->gen_const_delta_profiler) (values + i, t, 0); break; default: abort (); } } } /* Computes hybrid profile for all matching entries in da_file. */ static gcov_type * get_exec_counts (void) { unsigned num_edges = 0; basic_block bb; gcov_type *counts; /* Count the edges to be (possibly) instrumented. */ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) { edge e; for (e = bb->succ; e; e = e->succ_next) if (!PROF_EDGE_INFO (e)->ignore && !PROF_EDGE_INFO (e)->on_tree) num_edges++; } counts = get_coverage_counts (GCOV_COUNTER_ARCS, num_edges, &profile_info); if (!counts) return NULL; if (dump_file && profile_info) fprintf(dump_file, "Merged %u profiles with maximal count %u.\n", profile_info->runs, (unsigned) profile_info->sum_max); return counts; } /* Compute the branch probabilities for the various branches. Annotate them accordingly. */ static void compute_branch_probabilities (void) { basic_block bb; int i; int num_edges = 0; int changes; int passes; int hist_br_prob[20]; int num_never_executed; int num_branches; gcov_type *exec_counts = get_exec_counts (); int exec_counts_pos = 0; /* Very simple sanity checks so we catch bugs in our profiling code. */ if (profile_info) { if (profile_info->run_max * profile_info->runs < profile_info->sum_max) { error ("corrupted profile info: run_max * runs < sum_max"); exec_counts = NULL; } if (profile_info->sum_all < profile_info->sum_max) { error ("corrupted profile info: sum_all is smaller than sum_max"); exec_counts = NULL; } } /* Attach extra info block to each bb. */ alloc_aux_for_blocks (sizeof (struct bb_prof_info)); FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) { edge e; for (e = bb->succ; e; e = e->succ_next) if (!PROF_EDGE_INFO (e)->ignore) PROF_BB_INFO (bb)->succ_count++; for (e = bb->pred; e; e = e->pred_next) if (!PROF_EDGE_INFO (e)->ignore) PROF_BB_INFO (bb)->pred_count++; } /* Avoid predicting entry on exit nodes. */ PROF_BB_INFO (EXIT_BLOCK_PTR)->succ_count = 2; PROF_BB_INFO (ENTRY_BLOCK_PTR)->pred_count = 2; /* For each edge not on the spanning tree, set its execution count from the .da file. */ /* The first count in the .da file is the number of times that the function was entered. This is the exec_count for block zero. */ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) { edge e; for (e = bb->succ; e; e = e->succ_next) if (!PROF_EDGE_INFO (e)->ignore && !PROF_EDGE_INFO (e)->on_tree) { num_edges++; if (exec_counts) { e->count = exec_counts[exec_counts_pos++]; if (e->count > profile_info->sum_max) { error ("corrupted profile info: edge from %i to %i exceeds maximal count", bb->index, e->dest->index); } } else e->count = 0; PROF_EDGE_INFO (e)->count_valid = 1; PROF_BB_INFO (bb)->succ_count--; PROF_BB_INFO (e->dest)->pred_count--; if (dump_file) { fprintf (dump_file, "\nRead edge from %i to %i, count:", bb->index, e->dest->index); fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC, (HOST_WIDEST_INT) e->count); } } } if (dump_file) fprintf (dump_file, "\n%d edge counts read\n", num_edges); /* For every block in the file, - if every exit/entrance edge has a known count, then set the block count - if the block count is known, and every exit/entrance edge but one has a known execution count, then set the count of the remaining edge As edge counts are set, decrement the succ/pred count, but don't delete the edge, that way we can easily tell when all edges are known, or only one edge is unknown. */ /* The order that the basic blocks are iterated through is important. Since the code that finds spanning trees starts with block 0, low numbered edges are put on the spanning tree in preference to high numbered edges. Hence, most instrumented edges are at the end. Graph solving works much faster if we propagate numbers from the end to the start. This takes an average of slightly more than 3 passes. */ changes = 1; passes = 0; while (changes) { passes++; changes = 0; FOR_BB_BETWEEN (bb, EXIT_BLOCK_PTR, NULL, prev_bb) { struct bb_prof_info *bi = PROF_BB_INFO (bb); if (! bi->count_valid) { if (bi->succ_count == 0) { edge e; gcov_type total = 0; for (e = bb->succ; e; e = e->succ_next) total += e->count; bb->count = total; bi->count_valid = 1; changes = 1; } else if (bi->pred_count == 0) { edge e; gcov_type total = 0; for (e = bb->pred; e; e = e->pred_next) total += e->count; bb->count = total; bi->count_valid = 1; changes = 1; } } if (bi->count_valid) { if (bi->succ_count == 1) { edge e; gcov_type total = 0; /* One of the counts will be invalid, but it is zero, so adding it in also doesn't hurt. */ for (e = bb->succ; e; e = e->succ_next) total += e->count; /* Seedgeh for the invalid edge, and set its count. */ for (e = bb->succ; e; e = e->succ_next) if (! PROF_EDGE_INFO (e)->count_valid && ! PROF_EDGE_INFO (e)->ignore) break; /* Calculate count for remaining edge by conservation. */ total = bb->count - total; if (! e) abort (); PROF_EDGE_INFO (e)->count_valid = 1; e->count = total; bi->succ_count--; PROF_BB_INFO (e->dest)->pred_count--; changes = 1; } if (bi->pred_count == 1) { edge e; gcov_type total = 0; /* One of the counts will be invalid, but it is zero, so adding it in also doesn't hurt. */ for (e = bb->pred; e; e = e->pred_next) total += e->count; /* Search for the invalid edge, and set its count. */ for (e = bb->pred; e; e = e->pred_next) if (!PROF_EDGE_INFO (e)->count_valid && !PROF_EDGE_INFO (e)->ignore) break; /* Calculate count for remaining edge by conservation. */ total = bb->count - total + e->count; if (! e) abort (); PROF_EDGE_INFO (e)->count_valid = 1; e->count = total; bi->pred_count--; PROF_BB_INFO (e->src)->succ_count--; changes = 1; } } } } if (dump_file) dump_flow_info (dump_file); total_num_passes += passes; if (dump_file) fprintf (dump_file, "Graph solving took %d passes.\n\n", passes); /* If the graph has been correctly solved, every block will have a succ and pred count of zero. */ FOR_EACH_BB (bb) { if (PROF_BB_INFO (bb)->succ_count || PROF_BB_INFO (bb)->pred_count) abort (); } /* For every edge, calculate its branch probability and add a reg_note to the branch insn to indicate this. */ for (i = 0; i < 20; i++) hist_br_prob[i] = 0; num_never_executed = 0; num_branches = 0; FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) { edge e; rtx note; if (bb->count < 0) { error ("corrupted profile info: number of iterations for basic block %d thought to be %i", bb->index, (int)bb->count); bb->count = 0; } for (e = bb->succ; e; e = e->succ_next) { /* Function may return twice in the cased the called function is setjmp or calls fork, but we can't represent this by extra edge from the entry, since extra edge from the exit is already present. We get negative frequency from the entry point. */ if ((e->count < 0 && e->dest == EXIT_BLOCK_PTR) || (e->count > bb->count && e->dest != EXIT_BLOCK_PTR)) { if (block_ends_with_call_p (bb)) e->count = e->count < 0 ? 0 : bb->count; } if (e->count < 0 || e->count > bb->count) { error ("corrupted profile info: number of executions for edge %d-%d thought to be %i", e->src->index, e->dest->index, (int)e->count); e->count = bb->count / 2; } } if (bb->count) { for (e = bb->succ; e; e = e->succ_next) e->probability = (e->count * REG_BR_PROB_BASE + bb->count / 2) / bb->count; if (bb->index >= 0 && block_ends_with_condjump_p (bb) && bb->succ->succ_next) { int prob; edge e; int index; /* Find the branch edge. It is possible that we do have fake edges here. */ for (e = bb->succ; e->flags & (EDGE_FAKE | EDGE_FALLTHRU); e = e->succ_next) continue; /* Loop body has been intentionally left blank. */ prob = e->probability; index = prob * 20 / REG_BR_PROB_BASE; if (index == 20) index = 19; hist_br_prob[index]++; /* Do this for RTL only. */ if (!ir_type ()) { note = find_reg_note (BB_END (bb), REG_BR_PROB, 0); /* There may be already note put by some other pass, such as builtin_expect expander. */ if (note) XEXP (note, 0) = GEN_INT (prob); else REG_NOTES (BB_END (bb)) = gen_rtx_EXPR_LIST (REG_BR_PROB, GEN_INT (prob), REG_NOTES (BB_END (bb))); } num_branches++; } } /* Otherwise distribute the probabilities evenly so we get sane sum. Use simple heuristics that if there are normal edges, give all abnormals frequency of 0, otherwise distribute the frequency over abnormals (this is the case of noreturn calls). */ else { int total = 0; for (e = bb->succ; e; e = e->succ_next) if (!(e->flags & (EDGE_COMPLEX | EDGE_FAKE))) total ++; if (total) { for (e = bb->succ; e; e = e->succ_next) if (!(e->flags & (EDGE_COMPLEX | EDGE_FAKE))) e->probability = REG_BR_PROB_BASE / total; else e->probability = 0; } else { for (e = bb->succ; e; e = e->succ_next) total ++; for (e = bb->succ; e; e = e->succ_next) e->probability = REG_BR_PROB_BASE / total; } if (bb->index >= 0 && block_ends_with_condjump_p (bb) && bb->succ->succ_next) num_branches++, num_never_executed; } } if (dump_file) { fprintf (dump_file, "%d branches\n", num_branches); fprintf (dump_file, "%d branches never executed\n", num_never_executed); if (num_branches) for (i = 0; i < 10; i++) fprintf (dump_file, "%d%% branches in range %d-%d%%\n", (hist_br_prob[i] + hist_br_prob[19-i]) * 100 / num_branches, 5 * i, 5 * i + 5); total_num_branches += num_branches; total_num_never_executed += num_never_executed; for (i = 0; i < 20; i++) total_hist_br_prob[i] += hist_br_prob[i]; fputc ('\n', dump_file); fputc ('\n', dump_file); } free_aux_for_blocks (); } /* Load value histograms for N_VALUES values whose description is stored in VALUES array from .da file. */ static void compute_value_histograms (unsigned n_values, struct histogram_value *values) { unsigned i, j, t, any; unsigned n_histogram_counters[GCOV_N_VALUE_COUNTERS]; gcov_type *histogram_counts[GCOV_N_VALUE_COUNTERS]; gcov_type *act_count[GCOV_N_VALUE_COUNTERS]; gcov_type *aact_count; for (t = 0; t < GCOV_N_VALUE_COUNTERS; t++) n_histogram_counters[t] = 0; for (i = 0; i < n_values; i++) n_histogram_counters[(int) (values[i].type)] += values[i].n_counters; any = 0; for (t = 0; t < GCOV_N_VALUE_COUNTERS; t++) { if (!n_histogram_counters[t]) { histogram_counts[t] = NULL; continue; } histogram_counts[t] = get_coverage_counts (COUNTER_FOR_HIST_TYPE (t), n_histogram_counters[t], NULL); if (histogram_counts[t]) any = 1; act_count[t] = histogram_counts[t]; } if (!any) return; for (i = 0; i < n_values; i++) { rtx hist_list = NULL_RTX; t = (int) (values[i].type); /* FIXME: make this work for trees. */ if (!ir_type ()) { aact_count = act_count[t]; act_count[t] += values[i].n_counters; for (j = values[i].n_counters; j > 0; j--) hist_list = alloc_EXPR_LIST (0, GEN_INT (aact_count[j - 1]), hist_list); hist_list = alloc_EXPR_LIST (0, copy_rtx ((rtx)values[i].value), hist_list); hist_list = alloc_EXPR_LIST (0, GEN_INT (values[i].type), hist_list); REG_NOTES ((rtx)values[i].insn) = alloc_EXPR_LIST (REG_VALUE_PROFILE, hist_list, REG_NOTES ((rtx)values[i].insn)); } } for (t = 0; t < GCOV_N_VALUE_COUNTERS; t++) if (histogram_counts[t]) free (histogram_counts[t]); } /* Instrument and/or analyze program behavior based on program flow graph. In either case, this function builds a flow graph for the function being compiled. The flow graph is stored in BB_GRAPH. When FLAG_PROFILE_ARCS is nonzero, this function instruments the edges in the flow graph that are needed to reconstruct the dynamic behavior of the flow graph. When FLAG_BRANCH_PROBABILITIES is nonzero, this function reads auxiliary information from a data file containing edge count information from previous executions of the function being compiled. In this case, the flow graph is annotated with actual execution counts, which are later propagated into the rtl for optimization purposes. Main entry point of this file. */ void branch_prob (void) { basic_block bb; unsigned i; unsigned num_edges, ignored_edges; unsigned num_instrumented; struct edge_list *el; unsigned n_values = 0; struct histogram_value *values = NULL; total_num_times_called++; flow_call_edges_add (NULL); add_noreturn_fake_exit_edges (); /* We can't handle cyclic regions constructed using abnormal edges. To avoid these we replace every source of abnormal edge by a fake edge from entry node and every destination by fake edge to exit. This keeps graph acyclic and our calculation exact for all normal edges except for exit and entrance ones. We also add fake exit edges for each call and asm statement in the basic, since it may not return. */ FOR_EACH_BB (bb) { int need_exit_edge = 0, need_entry_edge = 0; int have_exit_edge = 0, have_entry_edge = 0; edge e; /* Functions returning multiple times are not handled by extra edges. Instead we simply allow negative counts on edges from exit to the block past call and corresponding probabilities. We can't go with the extra edges because that would result in flowgraph that needs to have fake edges outside the spanning tree. */ for (e = bb->succ; e; e = e->succ_next) { if ((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL)) && e->dest != EXIT_BLOCK_PTR) need_exit_edge = 1; if (e->dest == EXIT_BLOCK_PTR) have_exit_edge = 1; } for (e = bb->pred; e; e = e->pred_next) { if ((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL)) && e->src != ENTRY_BLOCK_PTR) need_entry_edge = 1; if (e->src == ENTRY_BLOCK_PTR) have_entry_edge = 1; } if (need_exit_edge && !have_exit_edge) { if (dump_file) fprintf (dump_file, "Adding fake exit edge to bb %i\n", bb->index); make_edge (bb, EXIT_BLOCK_PTR, EDGE_FAKE); } if (need_entry_edge && !have_entry_edge) { if (dump_file) fprintf (dump_file, "Adding fake entry edge to bb %i\n", bb->index); make_edge (ENTRY_BLOCK_PTR, bb, EDGE_FAKE); } } el = create_edge_list (); num_edges = NUM_EDGES (el); alloc_aux_for_edges (sizeof (struct edge_info)); /* The basic blocks are expected to be numbered sequentially. */ compact_blocks (); ignored_edges = 0; for (i = 0 ; i < num_edges ; i++) { edge e = INDEX_EDGE (el, i); e->count = 0; /* Mark edges we've replaced by fake edges above as ignored. */ if ((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL)) && e->src != ENTRY_BLOCK_PTR && e->dest != EXIT_BLOCK_PTR) { PROF_EDGE_INFO (e)->ignore = 1; ignored_edges++; } } #ifdef ENABLE_CHECKING verify_flow_info (); #endif /* Create spanning tree from basic block graph, mark each edge that is on the spanning tree. We insert as many abnormal and critical edges as possible to minimize number of edge splits necessary. */ find_spanning_tree (el); /* Fake edges that are not on the tree will not be instrumented, so mark them ignored. */ for (num_instrumented = i = 0; i < num_edges; i++) { edge e = INDEX_EDGE (el, i); struct edge_info *inf = PROF_EDGE_INFO (e); if (inf->ignore || inf->on_tree) /*NOP*/; else if (e->flags & EDGE_FAKE) { inf->ignore = 1; ignored_edges++; } else num_instrumented++; } total_num_blocks += n_basic_blocks + 2; if (dump_file) fprintf (dump_file, "%d basic blocks\n", n_basic_blocks); total_num_edges += num_edges; if (dump_file) fprintf (dump_file, "%d edges\n", num_edges); total_num_edges_ignored += ignored_edges; if (dump_file) fprintf (dump_file, "%d ignored edges\n", ignored_edges); /* Write the data from which gcov can reconstruct the basic block graph. */ /* Basic block flags */ if (coverage_begin_output ()) { gcov_position_t offset; offset = gcov_write_tag (GCOV_TAG_BLOCKS); for (i = 0; i != (unsigned) (n_basic_blocks + 2); i++) gcov_write_unsigned (0); gcov_write_length (offset); } /* Keep all basic block indexes nonnegative in the gcov output. Index 0 is used for entry block, last index is for exit block. */ ENTRY_BLOCK_PTR->index = -1; EXIT_BLOCK_PTR->index = last_basic_block; #define BB_TO_GCOV_INDEX(bb) ((bb)->index + 1) /* Arcs */ if (coverage_begin_output ()) { gcov_position_t offset; FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { edge e; offset = gcov_write_tag (GCOV_TAG_ARCS); gcov_write_unsigned (BB_TO_GCOV_INDEX (bb)); for (e = bb->succ; e; e = e->succ_next) { struct edge_info *i = PROF_EDGE_INFO (e); if (!i->ignore) { unsigned flag_bits = 0; if (i->on_tree) flag_bits |= GCOV_ARC_ON_TREE; if (e->flags & EDGE_FAKE) flag_bits |= GCOV_ARC_FAKE; if (e->flags & EDGE_FALLTHRU) flag_bits |= GCOV_ARC_FALLTHROUGH; gcov_write_unsigned (BB_TO_GCOV_INDEX (e->dest)); gcov_write_unsigned (flag_bits); } } gcov_write_length (offset); } } /* Line numbers. */ /* FIXME: make this work for trees. (Line numbers are in location_t objects, but aren't always attached to the obvious tree...) */ if (coverage_begin_output () && !ir_type ()) { char const *prev_file_name = NULL; gcov_position_t offset; FOR_EACH_BB (bb) { rtx insn = BB_HEAD (bb); int ignore_next_note = 0; offset = 0; /* We are looking for line number notes. Search backward before basic block to find correct ones. */ insn = prev_nonnote_insn (insn); if (!insn) insn = get_insns (); else insn = NEXT_INSN (insn); while (insn != BB_END (bb)) { if (GET_CODE (insn) == NOTE) { /* Must ignore the line number notes that immediately follow the end of an inline function to avoid counting it twice. There is a note before the call, and one after the call. */ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_REPEATED_LINE_NUMBER) ignore_next_note = 1; else if (NOTE_LINE_NUMBER (insn) <= 0) /*NOP*/; else if (ignore_next_note) ignore_next_note = 0; else { expanded_location s; if (!offset) { offset = gcov_write_tag (GCOV_TAG_LINES); gcov_write_unsigned (BB_TO_GCOV_INDEX (bb)); } NOTE_EXPANDED_LOCATION (s, insn); /* If this is a new source file, then output the file's name to the .bb file. */ if (!prev_file_name || strcmp (s.file, prev_file_name)) { prev_file_name = s.file; gcov_write_unsigned (0); gcov_write_string (prev_file_name); } gcov_write_unsigned (s.line); } } insn = NEXT_INSN (insn); } if (offset) { /* A file of NULL indicates the end of run. */ gcov_write_unsigned (0); gcov_write_string (NULL); gcov_write_length (offset); } } } ENTRY_BLOCK_PTR->index = ENTRY_BLOCK; EXIT_BLOCK_PTR->index = EXIT_BLOCK; #undef BB_TO_GCOV_INDEX if (flag_profile_values) find_values_to_profile (&n_values, &values); if (flag_branch_probabilities) { compute_branch_probabilities (); if (flag_profile_values) compute_value_histograms (n_values, values); } /* For each edge not on the spanning tree, add counting code. */ if (profile_arc_flag && coverage_counter_alloc (GCOV_COUNTER_ARCS, num_instrumented)) { unsigned n_instrumented = instrument_edges (el); if (n_instrumented != num_instrumented) abort (); if (flag_profile_values) instrument_values (n_values, values); /* Commit changes done by instrumentation. */ if (ir_type ()) bsi_commit_edge_inserts ((int *)NULL); else { commit_edge_insertions_watch_calls (); allocate_reg_info (max_reg_num (), FALSE, FALSE); } } remove_fake_edges (); free_aux_for_edges (); if (!ir_type ()) { /* Re-merge split basic blocks and the mess introduced by insert_insn_on_edge. */ cleanup_cfg (profile_arc_flag ? CLEANUP_EXPENSIVE : 0); if (profile_dump_file()) dump_flow_info (profile_dump_file()); } free_edge_list (el); } /* Union find algorithm implementation for the basic blocks using aux fields. */ static basic_block find_group (basic_block bb) { basic_block group = bb, bb1; while ((basic_block) group->aux != group) group = (basic_block) group->aux; /* Compress path. */ while ((basic_block) bb->aux != group) { bb1 = (basic_block) bb->aux; bb->aux = (void *) group; bb = bb1; } return group; } static void union_groups (basic_block bb1, basic_block bb2) { basic_block bb1g = find_group (bb1); basic_block bb2g = find_group (bb2); /* ??? I don't have a place for the rank field. OK. Lets go w/o it, this code is unlikely going to be performance problem anyway. */ if (bb1g == bb2g) abort (); bb1g->aux = bb2g; } /* This function searches all of the edges in the program flow graph, and puts as many bad edges as possible onto the spanning tree. Bad edges include abnormals edges, which can't be instrumented at the moment. Since it is possible for fake edges to form a cycle, we will have to develop some better way in the future. Also put critical edges to the tree, since they are more expensive to instrument. */ static void find_spanning_tree (struct edge_list *el) { int i; int num_edges = NUM_EDGES (el); basic_block bb; /* We use aux field for standard union-find algorithm. */ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) bb->aux = bb; /* Add fake edge exit to entry we can't instrument. */ union_groups (EXIT_BLOCK_PTR, ENTRY_BLOCK_PTR); /* First add all abnormal edges to the tree unless they form a cycle. Also add all edges to EXIT_BLOCK_PTR to avoid inserting profiling code behind setting return value from function. */ for (i = 0; i < num_edges; i++) { edge e = INDEX_EDGE (el, i); if (((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL | EDGE_FAKE)) || e->dest == EXIT_BLOCK_PTR) && !PROF_EDGE_INFO (e)->ignore && (find_group (e->src) != find_group (e->dest))) { if (dump_file) fprintf (dump_file, "Abnormal edge %d to %d put to tree\n", e->src->index, e->dest->index); PROF_EDGE_INFO (e)->on_tree = 1; union_groups (e->src, e->dest); } } /* Now insert all critical edges to the tree unless they form a cycle. */ for (i = 0; i < num_edges; i++) { edge e = INDEX_EDGE (el, i); if (EDGE_CRITICAL_P (e) && !PROF_EDGE_INFO (e)->ignore && find_group (e->src) != find_group (e->dest)) { if (dump_file) fprintf (dump_file, "Critical edge %d to %d put to tree\n", e->src->index, e->dest->index); PROF_EDGE_INFO (e)->on_tree = 1; union_groups (e->src, e->dest); } } /* And now the rest. */ for (i = 0; i < num_edges; i++) { edge e = INDEX_EDGE (el, i); if (!PROF_EDGE_INFO (e)->ignore && find_group (e->src) != find_group (e->dest)) { if (dump_file) fprintf (dump_file, "Normal edge %d to %d put to tree\n", e->src->index, e->dest->index); PROF_EDGE_INFO (e)->on_tree = 1; union_groups (e->src, e->dest); } } FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) bb->aux = NULL; } /* Perform file-level initialization for branch-prob processing. */ void init_branch_prob (void) { int i; total_num_blocks = 0; total_num_edges = 0; total_num_edges_ignored = 0; total_num_edges_instrumented = 0; total_num_blocks_created = 0; total_num_passes = 0; total_num_times_called = 0; total_num_branches = 0; total_num_never_executed = 0; for (i = 0; i < 20; i++) total_hist_br_prob[i] = 0; } /* Performs file-level cleanup after branch-prob processing is completed. */ void end_branch_prob (void) { if (dump_file) { fprintf (dump_file, "\n"); fprintf (dump_file, "Total number of blocks: %d\n", total_num_blocks); fprintf (dump_file, "Total number of edges: %d\n", total_num_edges); fprintf (dump_file, "Total number of ignored edges: %d\n", total_num_edges_ignored); fprintf (dump_file, "Total number of instrumented edges: %d\n", total_num_edges_instrumented); fprintf (dump_file, "Total number of blocks created: %d\n", total_num_blocks_created); fprintf (dump_file, "Total number of graph solution passes: %d\n", total_num_passes); if (total_num_times_called != 0) fprintf (dump_file, "Average number of graph solution passes: %d\n", (total_num_passes + (total_num_times_called >> 1)) / total_num_times_called); fprintf (dump_file, "Total number of branches: %d\n", total_num_branches); fprintf (dump_file, "Total number of branches never executed: %d\n", total_num_never_executed); if (total_num_branches) { int i; for (i = 0; i < 10; i++) fprintf (dump_file, "%d%% branches in range %d-%d%%\n", (total_hist_br_prob[i] + total_hist_br_prob[19-i]) * 100 / total_num_branches, 5*i, 5*i+5); } } } /* Set up hooks to enable tree-based profiling. */ void tree_register_profile_hooks (void) { profile_hooks = &tree_profile_hooks; if (!ir_type ()) abort (); } /* Set up hooks to enable RTL-based profiling. */ void rtl_register_profile_hooks (void) { profile_hooks = &rtl_profile_hooks; if (ir_type ()) abort (); } /* Graph coloring register allocator Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Michael Matz and Daniel Berlin . This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Graph coloring register allocator Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Michael Matz and Daniel Berlin . This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_RA_H #define GCC_RA_H /* Double linked list to implement the per-type lists of webs and moves. */ struct dlist { struct dlist *prev; struct dlist *next; union { struct web *web; struct move *move; } value; }; /* Simple helper macros for ease of misuse. */ #define DLIST_WEB(l) ((l)->value.web) #define DLIST_MOVE(l) ((l)->value.move) /* Classification of a given node (i.e. what state it's in). */ enum node_type { INITIAL = 0, FREE, PRECOLORED, SIMPLIFY, SIMPLIFY_SPILL, SIMPLIFY_FAT, FREEZE, SPILL, SELECT, SPILLED, COALESCED, COLORED, LAST_NODE_TYPE }; /* A list of conflict bitmaps, factorized on the exact part of the source, which conflicts with the DEFs, whose ID are noted in the bitmap. This is used while building web-parts with conflicts. */ struct tagged_conflict { struct tagged_conflict *next; bitmap conflicts; /* If the part of source identified by size S, byteoffset O conflicts, then size_word == S | (O << 16). */ unsigned int size_word; }; /* Such a structure is allocated initially for each def and use. In the process of building the interference graph web parts are connected together, if they have common instructions and reference the same register. That way live ranges are build (by connecting defs and uses) and implicitly complete webs (by connecting web parts in common uses). */ struct web_part { /* The def or use for this web part. */ struct ref *ref; /* The uplink implementing the disjoint set. */ struct web_part *uplink; /* Here dynamic information associated with each def/use is saved. This all is only valid for root web parts (uplink==NULL). That's the information we need to merge, if web parts are unioned. */ /* Number of spanned insns containing any deaths. */ unsigned int spanned_deaths; /* The list of bitmaps of DEF ID's with which this part conflicts. */ struct tagged_conflict *sub_conflicts; /* If there's any call_insn, while this part is live. */ unsigned int crosses_call : 1; }; /* Web structure used to store info about connected live ranges. This represents the nodes of the interference graph, which gets colored. It can also hold subwebs, which are contained in webs and represent subregs. */ struct web { /* Unique web ID. */ unsigned int id; /* Register number of the live range's variable. */ unsigned int regno; /* How many insns containing deaths do we span? */ unsigned int span_deaths; /* Spill_temp indicates if this web was part of a web spilled in the last iteration, or or reasons why this shouldn't be spilled again. 1 spill web, can't be spilled. 2 big spill web (live over some deaths). Discouraged, but not impossible to spill again. 3 short web (spans no deaths), can't be spilled. */ unsigned int spill_temp; /* When coalescing we might change spill_temp. If breaking aliases we need to restore it. */ unsigned int orig_spill_temp; /* Cost of spilling. */ unsigned HOST_WIDE_INT spill_cost; unsigned HOST_WIDE_INT orig_spill_cost; /* How many webs are aliased to us? */ unsigned int num_aliased; /* The color we got. This is a hardreg number. */ int color; /* 1 + the color this web got in the last pass. If it hadn't got a color, or we are in the first pass, or this web is a new one, this is zero. */ int old_color; /* Now follow some flags characterizing the web. */ /* Nonzero, if we should use usable_regs for this web, instead of preferred_class() or alternate_class(). */ unsigned int use_my_regs:1; /* Nonzero if we selected this web as possible spill candidate in select_spill(). */ unsigned int was_spilled:1; /* We need to distinguish also webs which are targets of coalescing (all x with some y, so that x==alias(y)), but the alias field is only set for sources of coalescing. This flag is set for all webs involved in coalescing in some way. */ unsigned int is_coalesced:1; /* Nonzero, if this web (or subweb) doesn't correspond with any of the current functions actual use of reg rtx. Happens e.g. with conflicts to a web, of which only a part was still undefined at the point of that conflict. In this case we construct a subweb representing these yet undefined bits to have a target for the conflict. Suppose e.g. this sequence: (set (reg:DI x) ...) (set (reg:SI y) ...) (set (subreg:SI (reg:DI x) 0) ...) (use (reg:DI x)) Here x only partly conflicts with y. Namely only (subreg:SI (reg:DI x) 1) conflicts with it, but this rtx doesn't show up in the program. For such things an "artificial" subweb is built, and this flag is true for them. */ unsigned int artificial:1; /* Nonzero if we span a call_insn. */ unsigned int crosses_call:1; /* Wether the web is involved in a move insn. */ unsigned int move_related:1; /* 1 when this web (or parts thereof) are live over an abnormal edge. */ unsigned int live_over_abnormal:1; /* Nonzero if this web is used in subregs where the mode change was illegal for hardregs in CLASS_CANNOT_CHANGE_MODE. */ unsigned int mode_changed:1; /* Nonzero if some references of this web, where in subreg context, but the actual subreg is already stripped (i.e. we don't know the outer mode of the actual reference). */ unsigned int subreg_stripped:1; /* Nonzero, when this web stems from the last pass of the allocator, and all info is still valid (i.e. it wasn't spilled). */ unsigned int old_web:1; /* Used in rewrite_program2() to remember webs, which are already marked for (re)loading. */ unsigned int in_load:1; /* If in_load is != 0, then this is nonzero, if only one use was seen since insertion in loadlist. Zero if more uses currently need a reload. Used to differentiate between inserting register loads or directly substituting the stackref. */ unsigned int one_load:1; /* When using rewrite_program2() this flag gets set if some insns were inserted on behalf of this web. IR spilling might ignore some deaths up to the def, so no code might be emitted and we need not to spill such a web again. */ unsigned int changed:1; /* With interference region spilling it's sometimes the case, that a bb border is also an IR border for webs, which were targets of moves, which are already removed due to coalescing. All webs, which are a destination of such a removed move, have this flag set. */ unsigned int target_of_spilled_move:1; /* For optimistic coalescing we need to be able to break aliases, which includes restoring conflicts to those before coalescing. This flag is set, if we have a list of conflicts before coalescing. It's needed because that list is lazily constructed only when actually needed. */ unsigned int have_orig_conflicts:1; /* Current state of the node. */ ENUM_BITFIELD(node_type) type:5; /* A regclass, combined from preferred and alternate class, or calculated from usable_regs. Used only for debugging, and to determine add_hardregs. */ ENUM_BITFIELD(reg_class) regclass:10; /* Additional consecutive hardregs needed for this web. */ int add_hardregs; /* Number of conflicts currently. */ int num_conflicts; /* Numbers of uses and defs, which belong to this web. */ unsigned int num_uses; unsigned int num_defs; /* The (reg:M a) or (subreg:M1 (reg:M2 a) x) rtx which this web is based on. This is used to distinguish subreg webs from it's reg parents, and to get hold of the mode. */ rtx orig_x; /* If this web is a subweb, this point to the super web. Otherwise it's NULL. */ struct web *parent_web; /* If this web is a subweb, but not the last one, this points to the next subweb of the same super web. Otherwise it's NULL. */ struct web *subreg_next; /* The set of webs (or subwebs), this web conflicts with. */ struct conflict_link *conflict_list; /* If have_orig_conflicts is set this contains a copy of conflict_list, as it was right after building the interference graph. It's used for incremental i-graph building and for breaking coalescings again. */ struct conflict_link *orig_conflict_list; /* Bitmap of all conflicts which don't count this pass, because of non-intersecting hardregs of the conflicting webs. See also record_conflict(). */ bitmap useless_conflicts; /* Different sets of hard registers, for all usable registers, ... */ HARD_REG_SET usable_regs; /* ... the same before coalescing, ... */ HARD_REG_SET orig_usable_regs; /* ... colors of all already colored neighbors (used when biased coloring is active), and ... */ HARD_REG_SET bias_colors; /* ... colors of PRECOLORED webs this web is connected to by a move. */ HARD_REG_SET prefer_colors; /* Number of usable colors in usable_regs. */ int num_freedom; /* After successful coloring the graph each web gets a new reg rtx, with which the original uses and defs are replaced. This is it. */ rtx reg_rtx; /* While spilling this is the rtx of the home of spilled webs. It can be a mem ref (a stack slot), or a pseudo register. */ rtx stack_slot; /* Used in rewrite_program2() to remember the using insn last seen for webs needing (re)loads. */ rtx last_use_insn; /* If this web is rematerializable, this contains the RTL pattern usable as source for that. Otherwise it's NULL. */ rtx pattern; /* All the defs and uses. There are num_defs, resp. num_uses elements. */ struct ref **defs; /* [0..num_defs-1] */ struct ref **uses; /* [0..num_uses-1] */ /* The web to which this web is aliased (coalesced). If NULL, this web is not coalesced into some other (but might still be a target for other webs). */ struct web *alias; /* With iterated coalescing this is a list of active moves this web is involved in. */ struct move_list *moves; /* The list implementation. */ struct dlist *dlink; /* While building webs, out of web-parts, this holds a (partial) list of all refs for this web seen so far. */ struct df_link *temp_refs; }; /* For implementing a single linked list. */ struct web_link { struct web_link *next; struct web *web; }; /* A subconflict is part of a conflict edge to track precisely, which parts of two webs conflict, in case not all of both webs do. */ struct sub_conflict { /* The next partial conflict. For one such list the parent-web of all the S webs, resp. the parent of all the T webs are constant. */ struct sub_conflict *next; struct web *s; struct web *t; }; /* This represents an edge in the conflict graph. */ struct conflict_link { struct conflict_link *next; /* The web we conflict with (the Target of the edge). */ struct web *t; /* If not the complete source web and T conflict, this points to the list of parts which really conflict. */ struct sub_conflict *sub; }; /* For iterated coalescing the moves can be in these states. */ enum move_type { WORKLIST, MV_COALESCED, CONSTRAINED, FROZEN, ACTIVE, LAST_MOVE_TYPE }; /* Structure of a move we are considering coalescing. */ struct move { rtx insn; struct web *source_web; struct web *target_web; enum move_type type; struct dlist *dlink; }; /* List of moves. */ struct move_list { struct move_list *next; struct move *move; }; /* To have fast access to the defs and uses per insn, we have one such structure per insn. The difference to the normal df.c structures is, that it doesn't contain any NULL refs, which df.c produces in case an insn was modified and it only contains refs to pseudo regs, or to hardregs which matter for allocation, i.e. those not in never_use_colors. */ struct ra_insn_info { unsigned int num_defs, num_uses; struct ref **defs, **uses; }; /* The above structures are stored in this array, indexed by UID... */ extern struct ra_insn_info *insn_df; /* ... and the size of that array, as we add insn after setting it up. */ extern int insn_df_max_uid; /* The interference graph. */ extern sbitmap igraph; /* And how to access it. I and J are web indices. If the bit igraph_index(I, J) is set, then they conflict. Note, that if only parts of webs conflict, then also only those parts are noted in the I-graph (i.e. the parent webs not). */ #define igraph_index(i, j) ((i) < (j) ? ((j)*((j)-1)/2)+(i) : ((i)*((i)-1)/2)+(j)) /* This is the bitmap of all (even partly) conflicting super webs. If bit I*num_webs+J or J*num_webs+I is set, then I and J (both being super web indices) conflict, maybe only partially. Note the asymmetry. */ extern sbitmap sup_igraph; /* After the first pass, and when interference region spilling is activated, bit I is set, when the insn with UID I contains some refs to pseudos which die at the insn. */ extern sbitmap insns_with_deaths; /* The size of that sbitmap. */ extern int death_insns_max_uid; /* All the web-parts. There are exactly as many web-parts as there are register refs in the insn stream. */ extern struct web_part *web_parts; /* The number of all webs, including subwebs. */ extern unsigned int num_webs; /* The number of just the subwebs. */ extern unsigned int num_subwebs; /* The number of all webs, including subwebs. */ extern unsigned int num_allwebs; /* For easy access when given a web ID: id2web[W->id] == W. */ extern struct web **id2web; /* For each hardreg, the web which represents it. */ extern struct web *hardreg2web[FIRST_PSEUDO_REGISTER]; /* Given the ID of a df_ref, which represent a DEF, def2web[ID] is the web, to which this def belongs. */ extern struct web **def2web; /* The same as def2web, just for uses. */ extern struct web **use2web; /* The list of all recognized and coalescable move insns. */ extern struct move_list *wl_moves; /* Some parts of the compiler which we run after colorizing clean reg_renumber[], so we need another place for the colors. This is copied to reg_renumber[] just before returning to toplev. */ extern short *ra_reg_renumber; /* The size of that array. Some passes after coloring might have created new pseudos, which will get no color. */ extern int ra_max_regno; /* The dataflow structure of the current function, while regalloc runs. */ extern struct df *df; /* For each basic block B we have a bitmap of DF_REF_ID's of uses, which backward reach the end of B. */ extern bitmap *live_at_end; /* One pass is: collecting registers refs, building I-graph, spilling. And this is how often we already ran that for the current function. */ extern int ra_pass; /* The maximum pseudo regno, just before register allocation starts. While regalloc runs all pseudos with a larger number represent potentially stack slots or hardregs. I call them stackwebs or stackpseudos. */ extern unsigned int max_normal_pseudo; /* One of the fixed colors. It must be < FIRST_PSEUDO_REGISTER, because we sometimes want to check the color against a HARD_REG_SET. It must be >= 0, because negative values mean "no color". This color is used for the above stackwebs, when they can't be colored. I.e. normally they would be spilled, but they already represent stackslots. So they are colored with an invalid color. It has the property that even webs which conflict can have that color at the same time. I.e. a stackweb with that color really represents a stackslot. */ extern int an_unusable_color; /* While building the I-graph, every time insn UID is looked at, number_seen[UID] is incremented. For debugging. */ extern int *number_seen; /* The different lists on which a web can be (based on the type). */ extern struct dlist *web_lists[(int) LAST_NODE_TYPE]; #define WEBS(type) (web_lists[(int)(type)]) /* The largest DF_REF_ID of defs resp. uses, as it was in the last pass. In the first pass this is zero. Used to distinguish new from old references. */ extern unsigned int last_def_id; extern unsigned int last_use_id; /* Similar for UIDs and number of webs. */ extern int last_max_uid; extern unsigned int last_num_webs; /* If I is the ID of an old use, and last_check_uses[I] is set, then we must reevaluate it's flow while building the new I-graph. */ extern sbitmap last_check_uses; /* If nonzero, record_conflict() saves the current conflict list of webs in orig_conflict_list, when not already done so, and the conflict list is going to be changed. It is set, after initially building the I-graph. I.e. new conflicts due to coalescing trigger that copying. */ extern unsigned int remember_conflicts; /* The maximum UID right before calling regalloc(). Used to detect any instructions inserted by the allocator. */ extern int orig_max_uid; /* A HARD_REG_SET of those color, which can't be used for coalescing. Includes e.g. fixed_regs. */ extern HARD_REG_SET never_use_colors; /* For each class C this is reg_class_contents[C] \ never_use_colors. */ extern HARD_REG_SET usable_regs[N_REG_CLASSES]; /* For each class C the count of hardregs in usable_regs[C]. */ extern unsigned int num_free_regs[N_REG_CLASSES]; /* For each mode M the hardregs, which are MODE_OK for M, and have enough space behind them to hold an M value. Additionally if reg R is OK for mode M, but it needs two hardregs, then R+1 will also be set here, even if R+1 itself is not OK for M. I.e. this represent the possible resources which could be taken away be a value in mode M. */ extern HARD_REG_SET hardregs_for_mode[NUM_MACHINE_MODES]; /* The set of hardregs, for which _any_ mode change is invalid. */ extern HARD_REG_SET invalid_mode_change_regs; /* For 0 <= I <= 255, the number of bits set in I. Used to calculate the number of set bits in a HARD_REG_SET. */ extern unsigned char byte2bitcount[256]; /* Expressive helper macros. */ #define ID2WEB(I) id2web[I] #define NUM_REGS(W) (((W)->type == PRECOLORED) ? 1 : (W)->num_freedom) #define SUBWEB_P(W) (GET_CODE ((W)->orig_x) == SUBREG) /* Constant usable as debug area to ra_debug_msg. */ #define DUMP_COSTS 0x0001 #define DUMP_WEBS 0x0002 #define DUMP_IGRAPH 0x0004 #define DUMP_PROCESS 0x0008 #define DUMP_COLORIZE 0x0010 #define DUMP_ASM 0x0020 #define DUMP_CONSTRAINTS 0x0040 #define DUMP_RESULTS 0x0080 #define DUMP_DF 0x0100 #define DUMP_RTL 0x0200 #define DUMP_FINAL_RTL 0x0400 #define DUMP_REGCLASS 0x0800 #define DUMP_SM 0x1000 #define DUMP_LAST_FLOW 0x2000 #define DUMP_LAST_RTL 0x4000 #define DUMP_REBUILD 0x8000 #define DUMP_IGRAPH_M 0x10000 #define DUMP_VALIDIFY 0x20000 #define DUMP_EVER ((unsigned int)-1) #define DUMP_NEARLY_EVER (DUMP_EVER - DUMP_COSTS - DUMP_IGRAPH_M) /* All the wanted debug levels as ORing of the various DUMP_xxx constants. */ extern unsigned int debug_new_regalloc; /* Nonzero means we want biased coloring. */ extern int flag_ra_biased; /* Nonzero if we want to use improved (and slow) spilling. This includes also interference region spilling (see below). */ extern int flag_ra_improved_spilling; /* Nonzero for using interference region spilling. Zero for improved Chaintin style spilling (only at deaths). */ extern int flag_ra_ir_spilling; /* Nonzero if we use optimistic coalescing, zero for iterated coalescing. */ extern int flag_ra_optimistic_coalescing; /* Nonzero if we want to break aliases of spilled webs. Forced to nonzero, when flag_ra_optimistic_coalescing is. */ extern int flag_ra_break_aliases; /* Nonzero if we want to merge the spill costs of webs which are coalesced. */ extern int flag_ra_merge_spill_costs; /* Nonzero if we want to spill at every use, instead of at deaths, or interference region borders. */ extern int flag_ra_spill_every_use; /* Nonzero to output all notes in the debug dumps. */ extern int flag_ra_dump_notes; extern void * ra_alloc (size_t); extern void * ra_calloc (size_t); extern int hard_regs_count (HARD_REG_SET); extern rtx ra_emit_move_insn (rtx, rtx); extern void ra_debug_msg (unsigned int, const char *, ...) ATTRIBUTE_PRINTF_2; extern int hard_regs_intersect_p (HARD_REG_SET *, HARD_REG_SET *); extern unsigned int rtx_to_bits (rtx); extern struct web * find_subweb (struct web *, rtx); extern struct web * find_subweb_2 (struct web *, unsigned int); extern struct web * find_web_for_subweb_1 (struct web *); #define find_web_for_subweb(w) (((w)->parent_web) \ ? find_web_for_subweb_1 ((w)->parent_web) \ : (w)) extern void ra_build_realloc (struct df *); extern void ra_build_free (void); extern void ra_build_free_all (struct df *); extern void ra_colorize_init (void); extern void ra_colorize_free_all (void); extern void ra_rewrite_init (void); extern void ra_print_rtx (FILE *, rtx, int); extern void ra_print_rtx_top (FILE *, rtx, int); extern void ra_debug_rtx (rtx); extern void ra_debug_insns (rtx, int); extern void ra_debug_bbi (int); extern void ra_print_rtl_with_bb (FILE *, rtx); extern void dump_igraph (struct df *); extern void dump_igraph_machine (void); extern void dump_constraints (void); extern void dump_cost (unsigned int); extern void dump_graph_cost (unsigned int, const char *); extern void dump_ra (struct df *); extern void dump_number_seen (void); extern void dump_static_insn_cost (FILE *, const char *, const char *); extern void dump_web_conflicts (struct web *); extern void dump_web_insns (struct web*); extern int web_conflicts_p (struct web *, struct web *); extern void debug_hard_reg_set (HARD_REG_SET); extern void remove_list (struct dlist *, struct dlist **); extern struct dlist * pop_list (struct dlist **); extern void record_conflict (struct web *, struct web *); extern int memref_is_stack_slot (rtx); extern void build_i_graph (struct df *); extern void put_web (struct web *, enum node_type); extern void remove_web_from_list (struct web *); extern void reset_lists (void); extern struct web * alias (struct web *); extern void merge_moves (struct web *, struct web *); extern void ra_colorize_graph (struct df *); extern void actual_spill (void); extern void emit_colors (struct df *); extern void delete_moves (void); extern void setup_renumber (int); extern void remove_suspicious_death_notes (void); #endif /* GCC_RA_H */ /* This is the toplevel file of a graph coloring register allocator. It is able to act like a George & Appel allocator, i.e. with iterative coalescing plus spill coalescing/propagation. And it can act as a traditional Briggs allocator, although with optimistic coalescing. Additionally it has a custom pass, which tries to reduce the overall cost of the colored graph. We support two modes of spilling: spill-everywhere, which is extremely fast, and interference region spilling, which reduces spill code to a large extent, but is slower. Helpful documents: Briggs, P., Cooper, K. D., and Torczon, L. 1994. Improvements to graph coloring register allocation. ACM Trans. Program. Lang. Syst. 16, 3 (May), 428-455. Bergner, P., Dahl, P., Engebretsen, D., and O'Keefe, M. 1997. Spill code minimization via interference region spilling. In Proc. ACM SIGPLAN '97 Conf. on Prog. Language Design and Implementation. ACM, 287-295. George, L., Appel, A.W. 1996. Iterated register coalescing. ACM Trans. Program. Lang. Syst. 18, 3 (May), 300-324. */ /* This file contains the main entry point (reg_alloc), some helper routines used by more than one file of the register allocator, and the toplevel driver procedure (one_pass). */ /* Things, one might do somewhen: * Lattice based rematerialization * create definitions of ever-life regs at the beginning of the insn chain * insert loads as soon, stores as late as possible * insert spill insns as outward as possible (either looptree, or LCM) * reuse stack-slots * delete coalesced insns. Partly done. The rest can only go, when we get rid of reload. * don't destroy coalescing information completely when spilling * use the constraints from asms */ static struct obstack ra_obstack; static void create_insn_info (struct df *); static void free_insn_info (void); static void alloc_mem (struct df *); static void free_mem (struct df *); static void free_all_mem (struct df *df); static int one_pass (struct df *, int); static void check_df (struct df *); static void init_ra (void); void reg_alloc (void); /* These global variables are "internal" to the register allocator. They are all documented at their declarations in ra.h. */ /* Somewhen we want to get rid of one of those sbitmaps. (for now I need the sup_igraph to note if there is any conflict between parts of webs at all. I can't use igraph for this, as there only the real conflicts are noted.) This is only used to prevent coalescing two conflicting webs, were only parts of them are in conflict. */ sbitmap igraph; sbitmap sup_igraph; /* Note the insns not inserted by the allocator, where we detected any deaths of pseudos. It is used to detect closeness of defs and uses. In the first pass this is empty (we could initialize it from REG_DEAD notes), in the other passes it is left from the pass before. */ sbitmap insns_with_deaths; int death_insns_max_uid; struct web_part *web_parts; unsigned int num_webs; unsigned int num_subwebs; unsigned int num_allwebs; struct web **id2web; struct web *hardreg2web[FIRST_PSEUDO_REGISTER]; struct web **def2web; struct web **use2web; struct move_list *wl_moves; int ra_max_regno; short *ra_reg_renumber; struct df *df; bitmap *live_at_end; int ra_pass; unsigned int max_normal_pseudo; int an_unusable_color; /* The different lists on which a web can be (based on the type). */ struct dlist *web_lists[(int) LAST_NODE_TYPE]; unsigned int last_def_id; unsigned int last_use_id; unsigned int last_num_webs; int last_max_uid; sbitmap last_check_uses; unsigned int remember_conflicts; int orig_max_uid; HARD_REG_SET never_use_colors; HARD_REG_SET usable_regs[N_REG_CLASSES]; unsigned int num_free_regs[N_REG_CLASSES]; HARD_REG_SET hardregs_for_mode[NUM_MACHINE_MODES]; HARD_REG_SET invalid_mode_change_regs; unsigned char byte2bitcount[256]; unsigned int debug_new_regalloc = -1; int flag_ra_biased = 0; int flag_ra_improved_spilling = 0; int flag_ra_ir_spilling = 0; int flag_ra_optimistic_coalescing = 0; int flag_ra_break_aliases = 0; int flag_ra_merge_spill_costs = 0; int flag_ra_spill_every_use = 0; int flag_ra_dump_notes = 0; /* Fast allocation of small objects, which live until the allocator is done. Allocate an object of SIZE bytes. */ void * ra_alloc (size_t size) { return obstack_alloc (&ra_obstack, size); } /* Like ra_alloc(), but clear the returned memory. */ void * ra_calloc (size_t size) { void *p = obstack_alloc (&ra_obstack, size); memset (p, 0, size); return p; } /* Returns the number of hardregs in HARD_REG_SET RS. */ int hard_regs_count (HARD_REG_SET rs) { int count = 0; #ifdef HARD_REG_SET while (rs) { unsigned char byte = rs & 0xFF; rs >>= 8; /* Avoid memory access, if nothing is set. */ if (byte) count += byte2bitcount[byte]; } #else unsigned int ofs; for (ofs = 0; ofs < HARD_REG_SET_LONGS; ofs++) { HARD_REG_ELT_TYPE elt = rs[ofs]; while (elt) { unsigned char byte = elt & 0xFF; elt >>= 8; if (byte) count += byte2bitcount[byte]; } } #endif return count; } /* Basically like emit_move_insn (i.e. validifies constants and such), but also handle MODE_CC moves (but then the operands must already be basically valid. */ rtx ra_emit_move_insn (rtx x, rtx y) { enum machine_mode mode = GET_MODE (x); if (GET_MODE_CLASS (mode) == MODE_CC) return emit_insn (gen_move_insn (x, y)); else return emit_move_insn (x, y); } int insn_df_max_uid; struct ra_insn_info *insn_df; static struct ref **refs_for_insn_df; /* Create the insn_df structure for each insn to have fast access to all valid defs and uses in an insn. */ static void create_insn_info (struct df *df) { rtx insn; struct ref **act_refs; insn_df_max_uid = get_max_uid (); insn_df = xcalloc (insn_df_max_uid, sizeof (insn_df[0])); refs_for_insn_df = xcalloc (df->def_id + df->use_id, sizeof (struct ref *)); act_refs = refs_for_insn_df; /* We create those things backwards to mimic the order in which the insns are visited in rewrite_program2() and live_in(). */ for (insn = get_last_insn (); insn; insn = PREV_INSN (insn)) { int uid = INSN_UID (insn); unsigned int n; struct df_link *link; if (!INSN_P (insn)) continue; for (n = 0, link = DF_INSN_DEFS (df, insn); link; link = link->next) if (link->ref && (DF_REF_REGNO (link->ref) >= FIRST_PSEUDO_REGISTER || !TEST_HARD_REG_BIT (never_use_colors, DF_REF_REGNO (link->ref)))) { if (n == 0) insn_df[uid].defs = act_refs; insn_df[uid].defs[n++] = link->ref; } act_refs += n; insn_df[uid].num_defs = n; for (n = 0, link = DF_INSN_USES (df, insn); link; link = link->next) if (link->ref && (DF_REF_REGNO (link->ref) >= FIRST_PSEUDO_REGISTER || !TEST_HARD_REG_BIT (never_use_colors, DF_REF_REGNO (link->ref)))) { if (n == 0) insn_df[uid].uses = act_refs; insn_df[uid].uses[n++] = link->ref; } act_refs += n; insn_df[uid].num_uses = n; } if (refs_for_insn_df + (df->def_id + df->use_id) < act_refs) abort (); } /* Free the insn_df structures. */ static void free_insn_info (void) { free (refs_for_insn_df); refs_for_insn_df = NULL; free (insn_df); insn_df = NULL; insn_df_max_uid = 0; } /* Search WEB for a subweb, which represents REG. REG needs to be a SUBREG, and the inner reg of it needs to be the one which is represented by WEB. Returns the matching subweb or NULL. */ struct web * find_subweb (struct web *web, rtx reg) { struct web *w; if (GET_CODE (reg) != SUBREG) abort (); for (w = web->subreg_next; w; w = w->subreg_next) if (GET_MODE (w->orig_x) == GET_MODE (reg) && SUBREG_BYTE (w->orig_x) == SUBREG_BYTE (reg)) return w; return NULL; } /* Similar to find_subweb(), but matches according to SIZE_WORD, a collection of the needed size and offset (in bytes). */ struct web * find_subweb_2 (struct web *web, unsigned int size_word) { struct web *w = web; if (size_word == GET_MODE_SIZE (GET_MODE (web->orig_x))) /* size_word == size means BYTE_BEGIN(size_word) == 0. */ return web; for (w = web->subreg_next; w; w = w->subreg_next) { unsigned int bl = rtx_to_bits (w->orig_x); if (size_word == bl) return w; } return NULL; } /* Returns the superweb for SUBWEB. */ struct web * find_web_for_subweb_1 (struct web *subweb) { while (subweb->parent_web) subweb = subweb->parent_web; return subweb; } /* Determine if two hard register sets intersect. Return 1 if they do. */ int hard_regs_intersect_p (HARD_REG_SET *a, HARD_REG_SET *b) { HARD_REG_SET c; COPY_HARD_REG_SET (c, *a); AND_HARD_REG_SET (c, *b); GO_IF_HARD_REG_SUBSET (c, reg_class_contents[(int) NO_REGS], lose); return 1; lose: return 0; } /* Allocate and initialize the memory necessary for one pass of the register allocator. */ static void alloc_mem (struct df *df) { int i; ra_build_realloc (df); if (!live_at_end) { live_at_end = xmalloc ((last_basic_block + 2) * sizeof (bitmap)); for (i = 0; i < last_basic_block + 2; i++) live_at_end[i] = BITMAP_XMALLOC (); live_at_end += 2; } create_insn_info (df); } /* Free the memory which isn't necessary for the next pass. */ static void free_mem (struct df *df ATTRIBUTE_UNUSED) { free_insn_info (); ra_build_free (); } /* Free all memory allocated for the register allocator. Used, when it's done. */ static void free_all_mem (struct df *df) { unsigned int i; live_at_end -= 2; for (i = 0; i < (unsigned)last_basic_block + 2; i++) BITMAP_XFREE (live_at_end[i]); free (live_at_end); ra_colorize_free_all (); ra_build_free_all (df); obstack_free (&ra_obstack, NULL); } static long ticks_build; static long ticks_rebuild; /* Perform one pass of allocation. Returns nonzero, if some spill code was added, i.e. if the allocator needs to rerun. */ static int one_pass (struct df *df, int rebuild) { long ticks = clock (); int something_spilled; remember_conflicts = 0; /* Build the complete interference graph, or if this is not the first pass, rebuild it incrementally. */ build_i_graph (df); /* From now on, if we create new conflicts, we need to remember the initial list of conflicts per web. */ remember_conflicts = 1; if (!rebuild) dump_igraph_machine (); /* Colorize the I-graph. This results in either a list of spilled_webs, in which case we need to run the spill phase, and rerun the allocator, or that list is empty, meaning we are done. */ ra_colorize_graph (df); last_max_uid = get_max_uid (); /* actual_spill() might change WEBS(SPILLED) and even empty it, so we need to remember it's state. */ something_spilled = !!WEBS(SPILLED); /* Add spill code if necessary. */ if (something_spilled) actual_spill (); ticks = clock () - ticks; if (rebuild) ticks_rebuild += ticks; else ticks_build += ticks; return something_spilled; } /* Initialize various arrays for the register allocator. */ static void init_ra (void) { int i; HARD_REG_SET rs; #ifdef ELIMINABLE_REGS static const struct {const int from, to; } eliminables[] = ELIMINABLE_REGS; unsigned int j; #endif int need_fp = (! flag_omit_frame_pointer || (current_function_calls_alloca && EXIT_IGNORE_STACK) || FRAME_POINTER_REQUIRED); ra_colorize_init (); /* We can't ever use any of the fixed regs. */ COPY_HARD_REG_SET (never_use_colors, fixed_reg_set); /* Additionally don't even try to use hardregs, which we already know are not eliminable. This includes also either the hard framepointer or all regs which are eliminable into the stack pointer, if need_fp is set. */ #ifdef ELIMINABLE_REGS for (j = 0; j < ARRAY_SIZE (eliminables); j++) { if (! CAN_ELIMINATE (eliminables[j].from, eliminables[j].to) || (eliminables[j].to == STACK_POINTER_REGNUM && need_fp)) for (i = hard_regno_nregs[eliminables[j].from][Pmode]; i--;) SET_HARD_REG_BIT (never_use_colors, eliminables[j].from + i); } #if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM if (need_fp) for (i = hard_regno_nregs[HARD_FRAME_POINTER_REGNUM][Pmode]; i--;) SET_HARD_REG_BIT (never_use_colors, HARD_FRAME_POINTER_REGNUM + i); #endif #else if (need_fp) for (i = hard_regno_nregs[FRAME_POINTER_REGNUM][Pmode]; i--;) SET_HARD_REG_BIT (never_use_colors, FRAME_POINTER_REGNUM + i); #endif /* Stack and argument pointer are also rather useless to us. */ for (i = hard_regno_nregs[STACK_POINTER_REGNUM][Pmode]; i--;) SET_HARD_REG_BIT (never_use_colors, STACK_POINTER_REGNUM + i); for (i = hard_regno_nregs[ARG_POINTER_REGNUM][Pmode]; i--;) SET_HARD_REG_BIT (never_use_colors, ARG_POINTER_REGNUM + i); for (i = 0; i < 256; i++) { unsigned char byte = ((unsigned) i) & 0xFF; unsigned char count = 0; while (byte) { if (byte & 1) count++; byte >>= 1; } byte2bitcount[i] = count; } for (i = 0; i < N_REG_CLASSES; i++) { int size; COPY_HARD_REG_SET (rs, reg_class_contents[i]); AND_COMPL_HARD_REG_SET (rs, never_use_colors); size = hard_regs_count (rs); num_free_regs[i] = size; COPY_HARD_REG_SET (usable_regs[i], rs); } /* Setup hardregs_for_mode[]. We are not interested only in the beginning of a multi-reg, but in all the hardregs involved. Maybe HARD_REGNO_MODE_OK() only ok's for beginnings. */ for (i = 0; i < NUM_MACHINE_MODES; i++) { int reg, size; CLEAR_HARD_REG_SET (rs); for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++) if (HARD_REGNO_MODE_OK (reg, i) /* Ignore VOIDmode and similar things. */ && (size = hard_regno_nregs[reg][i]) != 0 && (reg + size) <= FIRST_PSEUDO_REGISTER) { while (size--) SET_HARD_REG_BIT (rs, reg + size); } COPY_HARD_REG_SET (hardregs_for_mode[i], rs); } CLEAR_HARD_REG_SET (invalid_mode_change_regs); #ifdef CANNOT_CHANGE_MODE_CLASS if (0) for (i = 0; i < NUM_MACHINE_MODES; i++) { enum machine_mode from = (enum machine_mode) i; enum machine_mode to; for (to = VOIDmode; to < MAX_MACHINE_MODE; ++to) { int r; for (r = 0; r < FIRST_PSEUDO_REGISTER; r++) if (REG_CANNOT_CHANGE_MODE_P (from, to, r)) SET_HARD_REG_BIT (invalid_mode_change_regs, r); } } #endif for (an_unusable_color = 0; an_unusable_color < FIRST_PSEUDO_REGISTER; an_unusable_color++) if (TEST_HARD_REG_BIT (never_use_colors, an_unusable_color)) break; if (an_unusable_color == FIRST_PSEUDO_REGISTER) abort (); orig_max_uid = get_max_uid (); compute_bb_for_insn (); ra_reg_renumber = NULL; insns_with_deaths = sbitmap_alloc (orig_max_uid); death_insns_max_uid = orig_max_uid; sbitmap_ones (insns_with_deaths); gcc_obstack_init (&ra_obstack); } /* Check the consistency of DF. This aborts if it violates some invariances we expect. */ static void check_df (struct df *df) { struct df_link *link; rtx insn; int regno; unsigned int ui; bitmap b = BITMAP_XMALLOC (); bitmap empty_defs = BITMAP_XMALLOC (); bitmap empty_uses = BITMAP_XMALLOC (); /* Collect all the IDs of NULL references in the ID->REF arrays, as df.c leaves them when updating the df structure. */ for (ui = 0; ui < df->def_id; ui++) if (!df->defs[ui]) bitmap_set_bit (empty_defs, ui); for (ui = 0; ui < df->use_id; ui++) if (!df->uses[ui]) bitmap_set_bit (empty_uses, ui); /* For each insn we check if the chain of references contain each ref only once, doesn't contain NULL refs, or refs whose ID is invalid (it df->refs[id] element is NULL). */ for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) if (INSN_P (insn)) { bitmap_clear (b); for (link = DF_INSN_DEFS (df, insn); link; link = link->next) if (!link->ref || bitmap_bit_p (empty_defs, DF_REF_ID (link->ref)) || bitmap_bit_p (b, DF_REF_ID (link->ref))) abort (); else bitmap_set_bit (b, DF_REF_ID (link->ref)); bitmap_clear (b); for (link = DF_INSN_USES (df, insn); link; link = link->next) if (!link->ref || bitmap_bit_p (empty_uses, DF_REF_ID (link->ref)) || bitmap_bit_p (b, DF_REF_ID (link->ref))) abort (); else bitmap_set_bit (b, DF_REF_ID (link->ref)); } /* Now the same for the chains per register number. */ for (regno = 0; regno < max_reg_num (); regno++) { bitmap_clear (b); for (link = df->regs[regno].defs; link; link = link->next) if (!link->ref || bitmap_bit_p (empty_defs, DF_REF_ID (link->ref)) || bitmap_bit_p (b, DF_REF_ID (link->ref))) abort (); else bitmap_set_bit (b, DF_REF_ID (link->ref)); bitmap_clear (b); for (link = df->regs[regno].uses; link; link = link->next) if (!link->ref || bitmap_bit_p (empty_uses, DF_REF_ID (link->ref)) || bitmap_bit_p (b, DF_REF_ID (link->ref))) abort (); else bitmap_set_bit (b, DF_REF_ID (link->ref)); } BITMAP_XFREE (empty_uses); BITMAP_XFREE (empty_defs); BITMAP_XFREE (b); } /* Main register allocator entry point. */ void reg_alloc (void) { int changed; FILE *ra_dump_file = dump_file; rtx last = get_last_insn (); if (! INSN_P (last)) last = prev_real_insn (last); /* If this is an empty function we shouldn't do all the following, but instead just setup what's necessary, and return. */ /* We currently rely on the existence of the return value USE as one of the last insns. Add it if it's not there anymore. */ if (last) { edge e; for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next) { basic_block bb = e->src; last = BB_END (bb); if (!INSN_P (last) || GET_CODE (PATTERN (last)) != USE) { rtx insns; start_sequence (); use_return_register (); insns = get_insns (); end_sequence (); emit_insn_after (insns, last); } } } /* Setup debugging levels. */ switch (0) { /* Some useful presets of the debug level, I often use. */ case 0: debug_new_regalloc = DUMP_EVER; break; case 1: debug_new_regalloc = DUMP_COSTS; break; case 2: debug_new_regalloc = DUMP_IGRAPH_M; break; case 3: debug_new_regalloc = DUMP_COLORIZE + DUMP_COSTS; break; case 4: debug_new_regalloc = DUMP_COLORIZE + DUMP_COSTS + DUMP_WEBS; break; case 5: debug_new_regalloc = DUMP_FINAL_RTL + DUMP_COSTS + DUMP_CONSTRAINTS; break; case 6: debug_new_regalloc = DUMP_VALIDIFY; break; } if (!dump_file) debug_new_regalloc = 0; /* Run regclass first, so we know the preferred and alternate classes for each pseudo. Deactivate emitting of debug info, if it's not explicitly requested. */ if ((debug_new_regalloc & DUMP_REGCLASS) == 0) dump_file = NULL; regclass (get_insns (), max_reg_num (), dump_file); dump_file = ra_dump_file; /* We don't use those NOTEs, and as we anyway change all registers, they only make problems later. */ count_or_remove_death_notes (NULL, 1); /* Initialize the different global arrays and regsets. */ init_ra (); /* And some global variables. */ ra_pass = 0; no_new_pseudos = 0; max_normal_pseudo = (unsigned) max_reg_num (); ra_rewrite_init (); last_def_id = 0; last_use_id = 0; last_num_webs = 0; last_max_uid = 0; last_check_uses = NULL; live_at_end = NULL; WEBS(INITIAL) = NULL; WEBS(FREE) = NULL; memset (hardreg2web, 0, sizeof (hardreg2web)); ticks_build = ticks_rebuild = 0; /* The default is to use optimistic coalescing with interference region spilling, without biased coloring. */ flag_ra_biased = 0; flag_ra_spill_every_use = 0; flag_ra_improved_spilling = 1; flag_ra_ir_spilling = 1; flag_ra_break_aliases = 0; flag_ra_optimistic_coalescing = 1; flag_ra_merge_spill_costs = 1; if (flag_ra_optimistic_coalescing) flag_ra_break_aliases = 1; flag_ra_dump_notes = 0; /* Allocate the global df structure. */ df = df_init (); /* This is the main loop, calling one_pass as long as there are still some spilled webs. */ do { ra_debug_msg (DUMP_NEARLY_EVER, "RegAlloc Pass %d\n\n", ra_pass); if (ra_pass++ > 40) internal_error ("Didn't find a coloring.\n"); /* First collect all the register refs and put them into chains per insn, and per regno. In later passes only update that info from the new and modified insns. */ df_analyze (df, (ra_pass == 1) ? 0 : (bitmap) -1, DF_HARD_REGS | DF_RD_CHAIN | DF_RU_CHAIN | DF_FOR_REGALLOC); if ((debug_new_regalloc & DUMP_DF) != 0) { rtx insn; df_dump (df, DF_HARD_REGS, dump_file); for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) if (INSN_P (insn)) df_insn_debug_regno (df, insn, dump_file); } check_df (df); /* Now allocate the memory needed for this pass, or (if it's not the first pass), reallocate only additional memory. */ alloc_mem (df); /* Build and colorize the interference graph, and possibly emit spill insns. This also might delete certain move insns. */ changed = one_pass (df, ra_pass > 1); /* If that produced no changes, the graph was colorizable. */ if (!changed) { /* Change the insns to refer to the new pseudos (one per web). */ emit_colors (df); /* Already setup a preliminary reg_renumber[] array, but don't free our own version. reg_renumber[] will again be destroyed later. We right now need it in dump_constraints() for constrain_operands(1) whose subproc sometimes reference it (because we are checking strictly, i.e. as if after reload). */ setup_renumber (0); /* Delete some more of the coalesced moves. */ delete_moves (); dump_constraints (); } else { /* If there were changes, this means spill code was added, therefore repeat some things, including some initialization of global data structures. */ if ((debug_new_regalloc & DUMP_REGCLASS) == 0) dump_file = NULL; /* We have new pseudos (the stackwebs). */ allocate_reg_info (max_reg_num (), FALSE, FALSE); /* And new insns. */ compute_bb_for_insn (); /* Some of them might be dead. */ delete_trivially_dead_insns (get_insns (), max_reg_num ()); /* Those new pseudos need to have their REFS count set. */ reg_scan_update (get_insns (), NULL, max_regno); max_regno = max_reg_num (); /* And they need useful classes too. */ regclass (get_insns (), max_reg_num (), dump_file); dump_file = ra_dump_file; /* Remember the number of defs and uses, so we can distinguish new from old refs in the next pass. */ last_def_id = df->def_id; last_use_id = df->use_id; } /* Output the graph, and possibly the current insn sequence. */ dump_ra (df); if (changed && (debug_new_regalloc & DUMP_RTL) != 0) { ra_print_rtl_with_bb (dump_file, get_insns ()); fflush (dump_file); } /* Reset the web lists. */ reset_lists (); free_mem (df); } while (changed); /* We are done with allocation, free all memory and output some debug info. */ free_all_mem (df); df_finish (df); if ((debug_new_regalloc & DUMP_RESULTS) == 0) dump_cost (DUMP_COSTS); ra_debug_msg (DUMP_COSTS, "ticks for build-phase: %ld\n", ticks_build); ra_debug_msg (DUMP_COSTS, "ticks for rebuild-phase: %ld\n", ticks_rebuild); if ((debug_new_regalloc & (DUMP_FINAL_RTL | DUMP_RTL)) != 0) ra_print_rtl_with_bb (dump_file, get_insns ()); /* We might have new pseudos, so allocate the info arrays for them. */ if ((debug_new_regalloc & DUMP_SM) == 0) dump_file = NULL; no_new_pseudos = 0; allocate_reg_info (max_reg_num (), FALSE, FALSE); no_new_pseudos = 1; dump_file = ra_dump_file; /* Some spill insns could've been inserted after trapping calls, i.e. at the end of a basic block, which really ends at that call. Fixup that breakages by adjusting basic block boundaries. */ fixup_abnormal_edges (); /* Cleanup the flow graph. */ if ((debug_new_regalloc & DUMP_LAST_FLOW) == 0) dump_file = NULL; life_analysis (dump_file, PROP_DEATH_NOTES | PROP_LOG_LINKS | PROP_REG_INFO); cleanup_cfg (CLEANUP_EXPENSIVE); recompute_reg_usage (get_insns (), TRUE); if (dump_file) dump_flow_info (dump_file); dump_file = ra_dump_file; /* update_equiv_regs() can't be called after register allocation. It might delete some pseudos, and insert other insns setting up those pseudos in different places. This of course screws up the allocation because that may destroy a hardreg for another pseudo. XXX we probably should do something like that on our own. I.e. creating REG_EQUIV notes. */ /*update_equiv_regs ();*/ /* Setup the reg_renumber[] array for reload. */ setup_renumber (1); sbitmap_free (insns_with_deaths); /* Remove REG_DEAD notes which are incorrectly set. See the docu of that function. */ remove_suspicious_death_notes (); if ((debug_new_regalloc & DUMP_LAST_RTL) != 0) ra_print_rtl_with_bb (dump_file, get_insns ()); dump_static_insn_cost (dump_file, "after allocation/spilling, before reload", NULL); /* Allocate the reg_equiv_memory_loc array for reload. */ VARRAY_GROW (reg_equiv_memory_loc_varray, max_regno); reg_equiv_memory_loc = &VARRAY_RTX (reg_equiv_memory_loc_varray, 0); /* And possibly initialize it. */ allocate_initial_values (reg_equiv_memory_loc); /* And one last regclass pass just before reload. */ regclass (get_insns (), max_reg_num (), dump_file); } /* vim:cinoptions={.5s,g0,p5,t0,(0,^-0.5s,n-0.5s:tw=78:cindent:sw=4: */ /* Graph coloring register allocator Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Michael Matz and Daniel Berlin This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is part of the graph coloring register allocator. It deals with building the interference graph. When rebuilding the graph for a function after spilling, we rebuild only those parts needed, i.e. it works incrementally. The first part (the functions called from build_web_parts_and_conflicts() ) constructs a web_part for each pseudo register reference in the insn stream, then goes backward from each use, until it reaches defs for that pseudo. While going back it remember seen defs for other registers as conflicts. By connecting the uses and defs, which reach each other, webs (or live ranges) are built conceptually. The second part (make_webs() and children) deals with converting that structure to the nodes and edges, on which our interference graph is built. For each root web part constructed above, an instance of struct web is created. For all subregs of pseudos, which matter for allocation, a subweb of the corresponding super web is built. Finally all the conflicts noted in the first part (as bitmaps) are transformed into real edges. As part of that process the webs are also classified (their spill cost is calculated, and if they are spillable at all, and if not, for what reason; or if they are rematerializable), and move insns are collected, which are potentially coalescable. The top-level entry of this file (build_i_graph) puts it all together, and leaves us with a complete interference graph, which just has to be colored. */ struct curr_use; static unsigned HOST_WIDE_INT rtx_to_undefined (rtx); static bitmap find_sub_conflicts (struct web_part *, unsigned int); static bitmap get_sub_conflicts (struct web_part *, unsigned int); static unsigned int undef_to_size_word (rtx, unsigned HOST_WIDE_INT *); static bitmap undef_to_bitmap (struct web_part *, unsigned HOST_WIDE_INT *); static struct web_part * find_web_part_1 (struct web_part *); static struct web_part * union_web_part_roots (struct web_part *, struct web_part *); static int defuse_overlap_p_1 (rtx, struct curr_use *); static int live_out_1 (struct df *, struct curr_use *, rtx); static int live_out (struct df *, struct curr_use *, rtx); static rtx live_in_edge ( struct df *, struct curr_use *, edge); static void live_in (struct df *, struct curr_use *, rtx); static int copy_insn_p (rtx, rtx *, rtx *); static void remember_move (rtx); static void handle_asm_insn (struct df *, rtx); static void prune_hardregs_for_mode (HARD_REG_SET *, enum machine_mode); static void init_one_web_common (struct web *, rtx); static void init_one_web (struct web *, rtx); static void reinit_one_web (struct web *, rtx); static struct web * add_subweb (struct web *, rtx); static struct web * add_subweb_2 (struct web *, unsigned int); static void init_web_parts (struct df *); static void copy_conflict_list (struct web *); static void add_conflict_edge (struct web *, struct web *); static void build_inverse_webs (struct web *); static void copy_web (struct web *, struct web_link **); static void compare_and_free_webs (struct web_link **); static void init_webs_defs_uses (void); static unsigned int parts_to_webs_1 (struct df *, struct web_link **, struct df_link *); static void parts_to_webs (struct df *); static void reset_conflicts (void); #if 0 static void check_conflict_numbers (void) #endif static void conflicts_between_webs (struct df *); static void remember_web_was_spilled (struct web *); static void detect_spill_temps (void); static int contains_pseudo (rtx); static int want_to_remat (rtx x); static void detect_remat_webs (void); static void determine_web_costs (void); static void detect_webs_set_in_cond_jump (void); static void make_webs (struct df *); static void moves_to_webs (struct df *); static void connect_rmw_web_parts (struct df *); static void update_regnos_mentioned (void); static void livethrough_conflicts_bb (basic_block); static void init_bb_info (void); static void free_bb_info_ra (void); static void build_web_parts_and_conflicts (struct df *); /* A sbitmap of DF_REF_IDs of uses, which are live over an abnormal edge. */ static sbitmap live_over_abnormal; /* To cache if we already saw a certain edge while analyzing one use, we use a tick count incremented per use. */ static unsigned int visited_pass; /* A sbitmap of UIDs of move insns, which we already analyzed. */ static sbitmap move_handled; /* One such structed is allocated per insn, and traces for the currently analyzed use, which web part belongs to it, and how many bytes of it were still undefined when that insn was reached. */ struct visit_trace { struct web_part *wp; unsigned HOST_WIDE_INT undefined; }; /* Indexed by UID. */ static struct visit_trace *visit_trace; /* Per basic block we have one such structure, used to speed up the backtracing of uses. */ struct ra_bb_info { /* The value of visited_pass, as the first insn of this BB was reached the last time. If this equals the current visited_pass, then undefined is valid. Otherwise not. */ unsigned int pass; /* The still undefined bytes at that time. The use to which this is relative is the current use. */ unsigned HOST_WIDE_INT undefined; /* Bit regno is set, if that regno is mentioned in this BB as a def, or the source of a copy insn. In these cases we can not skip the whole block if we reach it from the end. */ bitmap regnos_mentioned; /* If a use reaches the end of a BB, and that BB doesn't mention its regno, we skip the block, and remember the ID of that use as living throughout the whole block. */ bitmap live_throughout; /* The content of the aux field before placing a pointer to this structure there. */ void *old_aux; }; /* We need a fast way to describe a certain part of a register. Therefore we put together the size and offset (in bytes) in one integer. */ #define BL_TO_WORD(b, l) ((((b) & 0xFFFF) << 16) | ((l) & 0xFFFF)) #define BYTE_BEGIN(i) (((unsigned int)(i) >> 16) & 0xFFFF) #define BYTE_LENGTH(i) ((unsigned int)(i) & 0xFFFF) /* For a REG or SUBREG expression X return the size/offset pair as an integer. */ unsigned int rtx_to_bits (rtx x) { unsigned int len, beg; len = GET_MODE_SIZE (GET_MODE (x)); beg = (GET_CODE (x) == SUBREG) ? SUBREG_BYTE (x) : 0; return BL_TO_WORD (beg, len); } /* X is a REG or SUBREG rtx. Return the bytes it touches as a bitmask. */ static unsigned HOST_WIDE_INT rtx_to_undefined (rtx x) { unsigned int len, beg; unsigned HOST_WIDE_INT ret; len = GET_MODE_SIZE (GET_MODE (x)); beg = (GET_CODE (x) == SUBREG) ? SUBREG_BYTE (x) : 0; ret = ~ ((unsigned HOST_WIDE_INT) 0); ret = (~(ret << len)) << beg; return ret; } /* We remember if we've analyzed an insn for being a move insn, and if yes between which operands. */ struct copy_p_cache { int seen; rtx source; rtx target; }; /* On demand cache, for if insns are copy insns, and if yes, what source/target they have. */ static struct copy_p_cache * copy_cache; int *number_seen; /* For INSN, return nonzero, if it's a move insn, we consider to coalesce later, and place the operands in *SOURCE and *TARGET, if they are not NULL. */ static int copy_insn_p (rtx insn, rtx *source, rtx *target) { rtx d, s; unsigned int d_regno, s_regno; int uid = INSN_UID (insn); if (!INSN_P (insn)) abort (); /* First look, if we already saw this insn. */ if (copy_cache[uid].seen) { /* And if we saw it, if it's actually a copy insn. */ if (copy_cache[uid].seen == 1) { if (source) *source = copy_cache[uid].source; if (target) *target = copy_cache[uid].target; return 1; } return 0; } /* Mark it as seen, but not being a copy insn. */ copy_cache[uid].seen = 2; insn = single_set (insn); if (!insn) return 0; d = SET_DEST (insn); s = SET_SRC (insn); /* We recognize moves between subreg's as copy insns. This is used to avoid conflicts of those subwebs. But they are currently _not_ used for coalescing (the check for this is in remember_move() below). */ while (GET_CODE (d) == STRICT_LOW_PART) d = XEXP (d, 0); if (!REG_P (d) && (GET_CODE (d) != SUBREG || !REG_P (SUBREG_REG (d)))) return 0; while (GET_CODE (s) == STRICT_LOW_PART) s = XEXP (s, 0); if (!REG_P (s) && (GET_CODE (s) != SUBREG || !REG_P (SUBREG_REG (s)))) return 0; s_regno = (unsigned) REGNO (GET_CODE (s) == SUBREG ? SUBREG_REG (s) : s); d_regno = (unsigned) REGNO (GET_CODE (d) == SUBREG ? SUBREG_REG (d) : d); /* Copies between hardregs are useless for us, as not coalesable anyway. */ if ((s_regno < FIRST_PSEUDO_REGISTER && d_regno < FIRST_PSEUDO_REGISTER) || s_regno >= max_normal_pseudo || d_regno >= max_normal_pseudo) return 0; if (source) *source = s; if (target) *target = d; /* Still mark it as seen, but as a copy insn this time. */ copy_cache[uid].seen = 1; copy_cache[uid].source = s; copy_cache[uid].target = d; return 1; } /* We build webs, as we process the conflicts. For each use we go upward the insn stream, noting any defs as potentially conflicting with the current use. We stop at defs of the current regno. The conflicts are only potentially, because we may never reach a def, so this is an undefined use, which conflicts with nothing. */ /* Given a web part WP, and the location of a reg part SIZE_WORD return the conflict bitmap for that reg part, or NULL if it doesn't exist yet in WP. */ static bitmap find_sub_conflicts (struct web_part *wp, unsigned int size_word) { struct tagged_conflict *cl; cl = wp->sub_conflicts; for (; cl; cl = cl->next) if (cl->size_word == size_word) return cl->conflicts; return NULL; } /* Similar to find_sub_conflicts(), but creates that bitmap, if it doesn't exist. I.e. this never returns NULL. */ static bitmap get_sub_conflicts (struct web_part *wp, unsigned int size_word) { bitmap b = find_sub_conflicts (wp, size_word); if (!b) { struct tagged_conflict *cl = ra_alloc (sizeof *cl); cl->conflicts = BITMAP_XMALLOC (); cl->size_word = size_word; cl->next = wp->sub_conflicts; wp->sub_conflicts = cl; b = cl->conflicts; } return b; } /* Helper table for undef_to_size_word() below for small values of UNDEFINED. Offsets and lengths are byte based. */ static struct undef_table_s { unsigned int new_undef; /* size | (byte << 16) */ unsigned int size_word; } const undef_table [] = { { 0, BL_TO_WORD (0, 0)}, /* 0 */ { 0, BL_TO_WORD (0, 1)}, { 0, BL_TO_WORD (1, 1)}, { 0, BL_TO_WORD (0, 2)}, { 0, BL_TO_WORD (2, 1)}, /* 4 */ { 1, BL_TO_WORD (2, 1)}, { 2, BL_TO_WORD (2, 1)}, { 3, BL_TO_WORD (2, 1)}, { 0, BL_TO_WORD (3, 1)}, /* 8 */ { 1, BL_TO_WORD (3, 1)}, { 2, BL_TO_WORD (3, 1)}, { 3, BL_TO_WORD (3, 1)}, { 0, BL_TO_WORD (2, 2)}, /* 12 */ { 1, BL_TO_WORD (2, 2)}, { 2, BL_TO_WORD (2, 2)}, { 0, BL_TO_WORD (0, 4)}}; /* Interpret *UNDEFINED as bitmask where each bit corresponds to a byte. A set bit means an undefined byte. Factor all undefined bytes into groups, and return a size/ofs pair of consecutive undefined bytes, but according to certain borders. Clear out those bits corresponding to bytes overlaid by that size/ofs pair. REG is only used for the mode, to detect if it's a floating mode or not. For example: *UNDEFINED size+ofs new *UNDEFINED 1111 4+0 0 1100 2+2 0 1101 2+2 1 0001 1+0 0 10101 1+4 101 */ static unsigned int undef_to_size_word (rtx reg, unsigned HOST_WIDE_INT *undefined) { /* When only the lower four bits are possibly set, we use a fast lookup table. */ if (*undefined <= 15) { struct undef_table_s u; u = undef_table[*undefined]; *undefined = u.new_undef; return u.size_word; } /* Otherwise we handle certain cases directly. */ if (*undefined <= 0xffff) switch ((int) *undefined) { case 0x00f0 : *undefined = 0; return BL_TO_WORD (4, 4); case 0x00ff : *undefined = 0; return BL_TO_WORD (0, 8); case 0x0f00 : *undefined = 0; return BL_TO_WORD (8, 4); case 0x0ff0 : *undefined = 0xf0; return BL_TO_WORD (8, 4); case 0x0fff : if (INTEGRAL_MODE_P (GET_MODE (reg))) { *undefined = 0xff; return BL_TO_WORD (8, 4); } else { *undefined = 0; return BL_TO_WORD (0, 12); /* XFmode */ } case 0xf000 : *undefined = 0; return BL_TO_WORD (12, 4); case 0xff00 : *undefined = 0; return BL_TO_WORD (8, 8); case 0xfff0 : *undefined = 0xf0; return BL_TO_WORD (8, 8); case 0xffff : *undefined = 0; return BL_TO_WORD (0, 16); } /* And if nothing matched fall back to the general solution. For now unknown undefined bytes are converted to sequences of maximal length 4 bytes. We could make this larger if necessary. */ { unsigned HOST_WIDE_INT u = *undefined; int word; struct undef_table_s tab; for (word = 0; (u & 15) == 0; word += 4) u >>= 4; u = u & 15; tab = undef_table[u]; u = tab.new_undef; u = (*undefined & ~((unsigned HOST_WIDE_INT)15 << word)) | (u << word); *undefined = u; /* Size remains the same, only the begin is moved up move bytes. */ return tab.size_word + BL_TO_WORD (word, 0); } } /* Put the above three functions together. For a set of undefined bytes as bitmap *UNDEFINED, look for (create if necessary) and return the corresponding conflict bitmap. Change *UNDEFINED to remove the bytes covered by the part for that bitmap. */ static bitmap undef_to_bitmap (struct web_part *wp, unsigned HOST_WIDE_INT *undefined) { unsigned int size_word = undef_to_size_word (DF_REF_REAL_REG (wp->ref), undefined); return get_sub_conflicts (wp, size_word); } /* Returns the root of the web part P is a member of. Additionally it compresses the path. P may not be NULL. */ static struct web_part * find_web_part_1 (struct web_part *p) { struct web_part *r = p; struct web_part *p_next; while (r->uplink) r = r->uplink; for (; p != r; p = p_next) { p_next = p->uplink; p->uplink = r; } return r; } /* Fast macro for the common case (WP either being the root itself, or the end of an already compressed path. */ #define find_web_part(wp) ((! (wp)->uplink) ? (wp) \ : (! (wp)->uplink->uplink) ? (wp)->uplink : find_web_part_1 (wp)) /* Unions together the parts R1 resp. R2 is a root of. All dynamic information associated with the parts (number of spanned insns and so on) is also merged. The root of the resulting (possibly larger) web part is returned. */ static struct web_part * union_web_part_roots (struct web_part *r1, struct web_part *r2) { if (r1 != r2) { /* The new root is the smaller (pointerwise) of both. This is crucial to make the construction of webs from web parts work (so, when scanning all parts, we see the roots before all its children). Additionally this ensures, that if the web has a def at all, than the root is a def (because all def parts are before use parts in the web_parts[] array), or put another way, as soon, as the root of a web part is not a def, this is an uninitialized web part. The way we construct the I-graph ensures, that if a web is initialized, then the first part we find (besides trivial 1 item parts) has a def. */ if (r1 > r2) { struct web_part *h = r1; r1 = r2; r2 = h; } r2->uplink = r1; num_webs--; /* Now we merge the dynamic information of R1 and R2. */ r1->spanned_deaths += r2->spanned_deaths; if (!r1->sub_conflicts) r1->sub_conflicts = r2->sub_conflicts; else if (r2->sub_conflicts) /* We need to merge the conflict bitmaps from R2 into R1. */ { struct tagged_conflict *cl1, *cl2; /* First those from R2, which are also contained in R1. We union the bitmaps, and free those from R2, resetting them to 0. */ for (cl1 = r1->sub_conflicts; cl1; cl1 = cl1->next) for (cl2 = r2->sub_conflicts; cl2; cl2 = cl2->next) if (cl1->size_word == cl2->size_word) { bitmap_operation (cl1->conflicts, cl1->conflicts, cl2->conflicts, BITMAP_IOR); BITMAP_XFREE (cl2->conflicts); cl2->conflicts = NULL; } /* Now the conflict lists from R2 which weren't in R1. We simply copy the entries from R2 into R1' list. */ for (cl2 = r2->sub_conflicts; cl2;) { struct tagged_conflict *cl_next = cl2->next; if (cl2->conflicts) { cl2->next = r1->sub_conflicts; r1->sub_conflicts = cl2; } cl2 = cl_next; } } r2->sub_conflicts = NULL; r1->crosses_call |= r2->crosses_call; } return r1; } /* Convenience macro, that is capable of unioning also non-roots. */ #define union_web_parts(p1, p2) \ ((p1 == p2) ? find_web_part (p1) \ : union_web_part_roots (find_web_part (p1), find_web_part (p2))) /* Remember that we've handled a given move, so we don't reprocess it. */ static void remember_move (rtx insn) { if (!TEST_BIT (move_handled, INSN_UID (insn))) { rtx s, d; SET_BIT (move_handled, INSN_UID (insn)); if (copy_insn_p (insn, &s, &d)) { /* Some sanity test for the copy insn. */ struct df_link *slink = DF_INSN_USES (df, insn); struct df_link *link = DF_INSN_DEFS (df, insn); if (!link || !link->ref || !slink || !slink->ref) abort (); /* The following (link->next != 0) happens when a hardreg is used in wider mode (REG:DI %eax). Then df.* creates a def/use for each hardreg contained therein. We only allow hardregs here. */ if (link->next && DF_REF_REGNO (link->next->ref) >= FIRST_PSEUDO_REGISTER) abort (); } else abort (); /* XXX for now we don't remember move insns involving any subregs. Those would be difficult to coalesce (we would need to implement handling of all the subwebs in the allocator, including that such subwebs could be source and target of coalescing). */ if (REG_P (s) && REG_P (d)) { struct move *m = ra_calloc (sizeof (struct move)); struct move_list *ml; m->insn = insn; ml = ra_alloc (sizeof (struct move_list)); ml->move = m; ml->next = wl_moves; wl_moves = ml; } } } /* This describes the USE currently looked at in the main-loop in build_web_parts_and_conflicts(). */ struct curr_use { struct web_part *wp; /* This has a 1-bit for each byte in the USE, which is still undefined. */ unsigned HOST_WIDE_INT undefined; /* For easy access. */ unsigned int regno; rtx x; /* If some bits of this USE are live over an abnormal edge. */ unsigned int live_over_abnormal; }; /* Returns nonzero iff rtx DEF and USE have bits in common (but see below). It is only called with DEF and USE being (reg:M a) or (subreg:M1 (reg:M2 a) x) rtx's. Furthermore if it's a subreg rtx M1 is at least one word wide, and a is a multi-word pseudo. If DEF or USE are hardregs, they are in word_mode, so we don't need to check for further hardregs which would result from wider references. We are never called with paradoxical subregs. This returns: 0 for no common bits, 1 if DEF and USE exactly cover the same bytes, 2 if the DEF only covers a part of the bits of USE 3 if the DEF covers more than the bits of the USE, and 4 if both are SUBREG's of different size, but have bytes in common. -1 is a special case, for when DEF and USE refer to the same regno, but have for other reasons no bits in common (can only happen with subregs referring to different words, or to words which already were defined for this USE). Furthermore it modifies use->undefined to clear the bits which get defined by DEF (only for cases with partial overlap). I.e. if bit 1 is set for the result != -1, the USE was completely covered, otherwise a test is needed to track the already defined bytes. */ static int defuse_overlap_p_1 (rtx def, struct curr_use *use) { int mode = 0; if (def == use->x) return 1; if (!def) return 0; if (GET_CODE (def) == SUBREG) { if (REGNO (SUBREG_REG (def)) != use->regno) return 0; mode |= 1; } else if (REGNO (def) != use->regno) return 0; if (GET_CODE (use->x) == SUBREG) mode |= 2; switch (mode) { case 0: /* REG, REG */ return 1; case 1: /* SUBREG, REG */ { unsigned HOST_WIDE_INT old_u = use->undefined; use->undefined &= ~ rtx_to_undefined (def); return (old_u != use->undefined) ? 2 : -1; } case 2: /* REG, SUBREG */ return 3; case 3: /* SUBREG, SUBREG */ if (GET_MODE_SIZE (GET_MODE (def)) == GET_MODE_SIZE (GET_MODE (use->x))) /* If the size of both things is the same, the subreg's overlap if they refer to the same word. */ if (SUBREG_BYTE (def) == SUBREG_BYTE (use->x)) return 1; /* Now the more difficult part: the same regno is referred, but the sizes of the references or the words differ. E.g. (subreg:SI (reg:CDI a) 0) and (subreg:DI (reg:CDI a) 2) do not overlap, whereas the latter overlaps with (subreg:SI (reg:CDI a) 3). */ { unsigned HOST_WIDE_INT old_u; int b1, e1, b2, e2; unsigned int bl1, bl2; bl1 = rtx_to_bits (def); bl2 = rtx_to_bits (use->x); b1 = BYTE_BEGIN (bl1); b2 = BYTE_BEGIN (bl2); e1 = b1 + BYTE_LENGTH (bl1) - 1; e2 = b2 + BYTE_LENGTH (bl2) - 1; if (b1 > e2 || b2 > e1) return -1; old_u = use->undefined; use->undefined &= ~ rtx_to_undefined (def); return (old_u != use->undefined) ? 4 : -1; } default: abort (); } } /* Macro for the common case of either def and use having the same rtx, or based on different regnos. */ #define defuse_overlap_p(def, use) \ ((def) == (use)->x ? 1 : \ (REGNO (GET_CODE (def) == SUBREG \ ? SUBREG_REG (def) : def) != use->regno \ ? 0 : defuse_overlap_p_1 (def, use))) /* The use USE flows into INSN (backwards). Determine INSNs effect on it, and return nonzero, if (parts of) that USE are also live before it. This also notes conflicts between the USE and all DEFS in that insn, and modifies the undefined bits of USE in case parts of it were set in this insn. */ static int live_out_1 (struct df *df ATTRIBUTE_UNUSED, struct curr_use *use, rtx insn) { int defined = 0; int uid = INSN_UID (insn); struct web_part *wp = use->wp; /* Mark, that this insn needs this webpart live. */ visit_trace[uid].wp = wp; visit_trace[uid].undefined = use->undefined; if (INSN_P (insn)) { unsigned int source_regno = ~0; unsigned int regno = use->regno; unsigned HOST_WIDE_INT orig_undef = use->undefined; unsigned HOST_WIDE_INT final_undef = use->undefined; rtx s = NULL; unsigned int n, num_defs = insn_df[uid].num_defs; struct ref **defs = insn_df[uid].defs; /* We want to access the root webpart. */ wp = find_web_part (wp); if (GET_CODE (insn) == CALL_INSN) wp->crosses_call = 1; else if (copy_insn_p (insn, &s, NULL)) source_regno = REGNO (GET_CODE (s) == SUBREG ? SUBREG_REG (s) : s); /* Look at all DEFS in this insn. */ for (n = 0; n < num_defs; n++) { struct ref *ref = defs[n]; int lap; /* Reset the undefined bits for each iteration, in case this insn has more than one set, and one of them sets this regno. But still the original undefined part conflicts with the other sets. */ use->undefined = orig_undef; if ((lap = defuse_overlap_p (DF_REF_REG (ref), use)) != 0) { if (lap == -1) /* Same regnos but non-overlapping or already defined bits, so ignore this DEF, or better said make the yet undefined part and this DEF conflicting. */ { unsigned HOST_WIDE_INT undef; undef = use->undefined; while (undef) bitmap_set_bit (undef_to_bitmap (wp, &undef), DF_REF_ID (ref)); continue; } if ((lap & 1) != 0) /* The current DEF completely covers the USE, so we can stop traversing the code looking for further DEFs. */ defined = 1; else /* We have a partial overlap. */ { final_undef &= use->undefined; if (final_undef == 0) /* Now the USE is completely defined, which means, that we can stop looking for former DEFs. */ defined = 1; /* If this is a partial overlap, which left some bits in USE undefined, we normally would need to create conflicts between that undefined part and the part of this DEF which overlapped with some of the formerly undefined bits. We don't need to do this, because both parts of this DEF (that which overlaps, and that which doesn't) are written together in this one DEF, and can not be colored in a way which would conflict with the USE. This is only true for partial overlap, because only then the DEF and USE have bits in common, which makes the DEF move, if the USE moves, making them aligned. If they have no bits in common (lap == -1), they are really independent. Therefore we there made a conflict above. */ } /* This is at least a partial overlap, so we need to union the web parts. */ wp = union_web_parts (wp, &web_parts[DF_REF_ID (ref)]); } else { /* The DEF and the USE don't overlap at all, different regnos. I.e. make conflicts between the undefined bits, and that DEF. */ unsigned HOST_WIDE_INT undef = use->undefined; if (regno == source_regno) /* This triggers only, when this was a copy insn and the source is at least a part of the USE currently looked at. In this case only the bits of the USE conflict with the DEF, which are not covered by the source of this copy insn, and which are still undefined. I.e. in the best case (the whole reg being the source), _no_ conflicts between that USE and this DEF (the target of the move) are created by this insn (though they might be by others). This is a super case of the normal copy insn only between full regs. */ { undef &= ~ rtx_to_undefined (s); } if (undef) { /*struct web_part *cwp; cwp = find_web_part (&web_parts[DF_REF_ID (ref)]);*/ /* TODO: somehow instead of noting the ID of the LINK use an ID nearer to the root webpart of that LINK. We can't use the root itself, because we later use the ID to look at the form (reg or subreg, and if yes, which subreg) of this conflict. This means, that we need to remember in the root an ID for each form, and maintaining this, when merging web parts. This makes the bitmaps smaller. */ do bitmap_set_bit (undef_to_bitmap (wp, &undef), DF_REF_ID (ref)); while (undef); } } } if (defined) use->undefined = 0; else { /* If this insn doesn't completely define the USE, increment also it's spanned deaths count (if this insn contains a death). */ if (uid >= death_insns_max_uid) abort (); if (TEST_BIT (insns_with_deaths, uid)) wp->spanned_deaths++; use->undefined = final_undef; } } return !defined; } /* Same as live_out_1() (actually calls it), but caches some information. E.g. if we reached this INSN with the current regno already, and the current undefined bits are a subset of those as we came here, we simply connect the web parts of the USE, and the one cached for this INSN, and additionally return zero, indicating we don't need to traverse this path any longer (all effect were already seen, as we first reached this insn). */ static inline int live_out (struct df *df, struct curr_use *use, rtx insn) { unsigned int uid = INSN_UID (insn); if (visit_trace[uid].wp && DF_REF_REGNO (visit_trace[uid].wp->ref) == use->regno && (use->undefined & ~visit_trace[uid].undefined) == 0) { union_web_parts (visit_trace[uid].wp, use->wp); /* Don't search any further, as we already were here with this regno. */ return 0; } else return live_out_1 (df, use, insn); } /* The current USE reached a basic block head. The edge E is one of the predecessors edges. This evaluates the effect of the predecessor block onto the USE, and returns the next insn, which should be looked at. This either is the last insn of that pred. block, or the first one. The latter happens, when the pred. block has no possible effect on the USE, except for conflicts. In that case, it's remembered, that the USE is live over that whole block, and it's skipped. Otherwise we simply continue with the last insn of the block. This also determines the effects of abnormal edges, and remembers which uses are live at the end of that basic block. */ static rtx live_in_edge (struct df *df, struct curr_use *use, edge e) { struct ra_bb_info *info_pred; rtx next_insn; /* Call used hard regs die over an exception edge, ergo they don't reach the predecessor block, so ignore such uses. And also don't set the live_over_abnormal flag for them. */ if ((e->flags & EDGE_EH) && use->regno < FIRST_PSEUDO_REGISTER && call_used_regs[use->regno]) return NULL_RTX; if (e->flags & EDGE_ABNORMAL) use->live_over_abnormal = 1; bitmap_set_bit (live_at_end[e->src->index], DF_REF_ID (use->wp->ref)); info_pred = (struct ra_bb_info *) e->src->aux; next_insn = BB_END (e->src); /* If the last insn of the pred. block doesn't completely define the current use, we need to check the block. */ if (live_out (df, use, next_insn)) { /* If the current regno isn't mentioned anywhere in the whole block, and the complete use is still undefined... */ if (!bitmap_bit_p (info_pred->regnos_mentioned, use->regno) && (rtx_to_undefined (use->x) & ~use->undefined) == 0) { /* ...we can hop over the whole block and defer conflict creation to later. */ bitmap_set_bit (info_pred->live_throughout, DF_REF_ID (use->wp->ref)); next_insn = BB_HEAD (e->src); } return next_insn; } else return NULL_RTX; } /* USE flows into the end of the insns preceding INSN. Determine their effects (in live_out()) and possibly loop over the preceding INSN, or call itself recursively on a basic block border. When a topleve call of this function returns the USE is completely analyzed. I.e. its def-use chain (at least) is built, possibly connected with other def-use chains, and all defs during that chain are noted. */ static void live_in (struct df *df, struct curr_use *use, rtx insn) { unsigned int loc_vpass = visited_pass; /* Note, that, even _if_ we are called with use->wp a root-part, this might become non-root in the for() loop below (due to live_out() unioning it). So beware, not to change use->wp in a way, for which only root-webs are allowed. */ while (1) { int uid = INSN_UID (insn); basic_block bb = BLOCK_FOR_INSN (insn); number_seen[uid]++; /* We want to be as fast as possible, so explicitly write this loop. */ for (insn = PREV_INSN (insn); insn && !INSN_P (insn); insn = PREV_INSN (insn)) ; if (!insn) return; if (bb != BLOCK_FOR_INSN (insn)) { edge e; unsigned HOST_WIDE_INT undef = use->undefined; struct ra_bb_info *info = (struct ra_bb_info *) bb->aux; if ((e = bb->pred) == NULL) return; /* We now check, if we already traversed the predecessors of this block for the current pass and the current set of undefined bits. If yes, we don't need to check the predecessors again. So, conceptually this information is tagged to the first insn of a basic block. */ if (info->pass == loc_vpass && (undef & ~info->undefined) == 0) return; info->pass = loc_vpass; info->undefined = undef; /* All but the last predecessor are handled recursively. */ for (; e->pred_next; e = e->pred_next) { insn = live_in_edge (df, use, e); if (insn) live_in (df, use, insn); use->undefined = undef; } insn = live_in_edge (df, use, e); if (!insn) return; } else if (!live_out (df, use, insn)) return; } } /* Determine all regnos which are mentioned in a basic block, in an interesting way. Interesting here means either in a def, or as the source of a move insn. We only look at insns added since the last pass. */ static void update_regnos_mentioned (void) { int last_uid = last_max_uid; rtx insn; basic_block bb; for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) if (INSN_P (insn)) { /* Don't look at old insns. */ if (INSN_UID (insn) < last_uid) { /* XXX We should also remember moves over iterations (we already save the cache, but not the movelist). */ if (copy_insn_p (insn, NULL, NULL)) remember_move (insn); } else if ((bb = BLOCK_FOR_INSN (insn)) != NULL) { rtx source; struct ra_bb_info *info = (struct ra_bb_info *) bb->aux; bitmap mentioned = info->regnos_mentioned; struct df_link *link; if (copy_insn_p (insn, &source, NULL)) { remember_move (insn); bitmap_set_bit (mentioned, REGNO (GET_CODE (source) == SUBREG ? SUBREG_REG (source) : source)); } for (link = DF_INSN_DEFS (df, insn); link; link = link->next) if (link->ref) bitmap_set_bit (mentioned, DF_REF_REGNO (link->ref)); } } } /* Handle the uses which reach a block end, but were deferred due to it's regno not being mentioned in that block. This adds the remaining conflicts and updates also the crosses_call and spanned_deaths members. */ static void livethrough_conflicts_bb (basic_block bb) { struct ra_bb_info *info = (struct ra_bb_info *) bb->aux; rtx insn; bitmap all_defs; int first, use_id; unsigned int deaths = 0; unsigned int contains_call = 0; /* If there are no deferred uses, just return. */ if ((first = bitmap_first_set_bit (info->live_throughout)) < 0) return; /* First collect the IDs of all defs, count the number of death containing insns, and if there's some call_insn here. */ all_defs = BITMAP_XMALLOC (); for (insn = BB_HEAD (bb); insn; insn = NEXT_INSN (insn)) { if (INSN_P (insn)) { unsigned int n; struct ra_insn_info info; info = insn_df[INSN_UID (insn)]; for (n = 0; n < info.num_defs; n++) bitmap_set_bit (all_defs, DF_REF_ID (info.defs[n])); if (TEST_BIT (insns_with_deaths, INSN_UID (insn))) deaths++; if (GET_CODE (insn) == CALL_INSN) contains_call = 1; } if (insn == BB_END (bb)) break; } /* And now, if we have found anything, make all live_through uses conflict with all defs, and update their other members. */ if (deaths > 0 || contains_call || bitmap_first_set_bit (all_defs) >= 0) EXECUTE_IF_SET_IN_BITMAP (info->live_throughout, first, use_id, { struct web_part *wp = &web_parts[df->def_id + use_id]; unsigned int bl = rtx_to_bits (DF_REF_REG (wp->ref)); bitmap conflicts; wp = find_web_part (wp); wp->spanned_deaths += deaths; wp->crosses_call |= contains_call; conflicts = get_sub_conflicts (wp, bl); bitmap_operation (conflicts, conflicts, all_defs, BITMAP_IOR); }); BITMAP_XFREE (all_defs); } /* Allocate the per basic block info for traversing the insn stream for building live ranges. */ static void init_bb_info (void) { basic_block bb; FOR_ALL_BB (bb) { struct ra_bb_info *info = xcalloc (1, sizeof *info); info->regnos_mentioned = BITMAP_XMALLOC (); info->live_throughout = BITMAP_XMALLOC (); info->old_aux = bb->aux; bb->aux = (void *) info; } } /* Free that per basic block info. */ static void free_bb_info_ra (void) { basic_block bb; FOR_ALL_BB (bb) { struct ra_bb_info *info = (struct ra_bb_info *) bb->aux; BITMAP_XFREE (info->regnos_mentioned); BITMAP_XFREE (info->live_throughout); bb->aux = info->old_aux; free (info); } } /* Toplevel function for the first part of this file. Connect web parts, thereby implicitly building webs, and remember their conflicts. */ static void build_web_parts_and_conflicts (struct df *df) { struct df_link *link; struct curr_use use; basic_block bb; number_seen = xcalloc (get_max_uid (), sizeof (int)); visit_trace = xcalloc (get_max_uid (), sizeof (visit_trace[0])); update_regnos_mentioned (); /* Here's the main loop. It goes through all insn's, connects web parts along the way, notes conflicts between webparts, and remembers move instructions. */ visited_pass = 0; for (use.regno = 0; use.regno < (unsigned int)max_regno; use.regno++) if (use.regno >= FIRST_PSEUDO_REGISTER || !fixed_regs[use.regno]) for (link = df->regs[use.regno].uses; link; link = link->next) if (link->ref) { struct ref *ref = link->ref; rtx insn = DF_REF_INSN (ref); /* Only recheck marked or new uses, or uses from hardregs. */ if (use.regno >= FIRST_PSEUDO_REGISTER && DF_REF_ID (ref) < last_use_id && !TEST_BIT (last_check_uses, DF_REF_ID (ref))) continue; use.wp = &web_parts[df->def_id + DF_REF_ID (ref)]; use.x = DF_REF_REG (ref); use.live_over_abnormal = 0; use.undefined = rtx_to_undefined (use.x); visited_pass++; live_in (df, &use, insn); if (use.live_over_abnormal) SET_BIT (live_over_abnormal, DF_REF_ID (ref)); } dump_number_seen (); FOR_ALL_BB (bb) { struct ra_bb_info *info = (struct ra_bb_info *) bb->aux; livethrough_conflicts_bb (bb); bitmap_zero (info->live_throughout); info->pass = 0; } free (visit_trace); free (number_seen); } /* Here we look per insn, for DF references being in uses _and_ defs. This means, in the RTL a (REG xx) expression was seen as a read/modify/write, as happens for (set (subreg:SI (reg:DI xx)) (...)) e.g. Our code has created two webs for this, as it should. Unfortunately, as the REG reference is only one time in the RTL we can't color both webs different (arguably this also would be wrong for a real read-mod-write instruction), so we must reconnect such webs. */ static void connect_rmw_web_parts (struct df *df) { unsigned int i; for (i = 0; i < df->use_id; i++) { struct web_part *wp1 = &web_parts[df->def_id + i]; rtx reg; struct df_link *link; if (!wp1->ref) continue; /* If it's an uninitialized web, we don't want to connect it to others, as the read cycle in read-mod-write had probably no effect. */ if (find_web_part (wp1) >= &web_parts[df->def_id]) continue; reg = DF_REF_REAL_REG (wp1->ref); link = DF_INSN_DEFS (df, DF_REF_INSN (wp1->ref)); for (; link; link = link->next) if (reg == DF_REF_REAL_REG (link->ref)) { struct web_part *wp2 = &web_parts[DF_REF_ID (link->ref)]; union_web_parts (wp1, wp2); } } } /* Deletes all hardregs from *S which are not allowed for MODE. */ static void prune_hardregs_for_mode (HARD_REG_SET *s, enum machine_mode mode) { AND_HARD_REG_SET (*s, hardregs_for_mode[(int) mode]); } /* Initialize the members of a web, which are deducible from REG. */ static void init_one_web_common (struct web *web, rtx reg) { if (!REG_P (reg)) abort (); /* web->id isn't initialized here. */ web->regno = REGNO (reg); web->orig_x = reg; if (!web->dlink) { web->dlink = ra_calloc (sizeof (struct dlist)); DLIST_WEB (web->dlink) = web; } /* XXX the former (superunion) doesn't constrain the graph enough. E.g. on x86 QImode _requires_ QI_REGS, but as alternate class usually GENERAL_REGS is given. So the graph is not constrained enough, thinking it has more freedom then it really has, which leads to repeated spill tryings. OTOH the latter (only using preferred class) is too constrained, as normally (e.g. with all SImode pseudos), they can be allocated also in the alternate class. What we really want, are the _exact_ hard regs allowed, not just a class. Later. */ /*web->regclass = reg_class_superunion [reg_preferred_class (web->regno)] [reg_alternate_class (web->regno)];*/ /*web->regclass = reg_preferred_class (web->regno);*/ web->regclass = reg_class_subunion [reg_preferred_class (web->regno)] [reg_alternate_class (web->regno)]; web->regclass = reg_preferred_class (web->regno); if (web->regno < FIRST_PSEUDO_REGISTER) { web->color = web->regno; put_web (web, PRECOLORED); web->num_conflicts = UINT_MAX; web->add_hardregs = 0; CLEAR_HARD_REG_SET (web->usable_regs); SET_HARD_REG_BIT (web->usable_regs, web->regno); web->num_freedom = 1; } else { HARD_REG_SET alternate; web->color = -1; put_web (web, INITIAL); /* add_hardregs is wrong in multi-length classes, e.g. using a DFmode pseudo on x86 can result in class FLOAT_INT_REGS, where, if it finally is allocated to GENERAL_REGS it needs two, if allocated to FLOAT_REGS only one hardreg. XXX */ web->add_hardregs = CLASS_MAX_NREGS (web->regclass, PSEUDO_REGNO_MODE (web->regno)) - 1; web->num_conflicts = 0 * web->add_hardregs; COPY_HARD_REG_SET (web->usable_regs, reg_class_contents[reg_preferred_class (web->regno)]); COPY_HARD_REG_SET (alternate, reg_class_contents[reg_alternate_class (web->regno)]); IOR_HARD_REG_SET (web->usable_regs, alternate); /*IOR_HARD_REG_SET (web->usable_regs, reg_class_contents[reg_alternate_class (web->regno)]);*/ AND_COMPL_HARD_REG_SET (web->usable_regs, never_use_colors); prune_hardregs_for_mode (&web->usable_regs, PSEUDO_REGNO_MODE (web->regno)); #ifdef CANNOT_CHANGE_MODE_CLASS if (web->mode_changed) AND_COMPL_HARD_REG_SET (web->usable_regs, invalid_mode_change_regs); #endif web->num_freedom = hard_regs_count (web->usable_regs); web->num_freedom -= web->add_hardregs; if (!web->num_freedom) abort(); } COPY_HARD_REG_SET (web->orig_usable_regs, web->usable_regs); } /* Initializes WEBs members from REG or zero them. */ static void init_one_web (struct web *web, rtx reg) { memset (web, 0, sizeof (struct web)); init_one_web_common (web, reg); web->useless_conflicts = BITMAP_XMALLOC (); } /* WEB is an old web, meaning it came from the last pass, and got a color. We want to remember some of it's info, so zero only some members. */ static void reinit_one_web (struct web *web, rtx reg) { web->old_color = web->color + 1; init_one_web_common (web, reg); web->span_deaths = 0; web->spill_temp = 0; web->orig_spill_temp = 0; web->use_my_regs = 0; web->spill_cost = 0; web->was_spilled = 0; web->is_coalesced = 0; web->artificial = 0; web->live_over_abnormal = 0; web->mode_changed = 0; web->subreg_stripped = 0; web->move_related = 0; web->in_load = 0; web->target_of_spilled_move = 0; web->num_aliased = 0; if (web->type == PRECOLORED) { web->num_defs = 0; web->num_uses = 0; web->orig_spill_cost = 0; } CLEAR_HARD_REG_SET (web->bias_colors); CLEAR_HARD_REG_SET (web->prefer_colors); web->reg_rtx = NULL; web->stack_slot = NULL; web->pattern = NULL; web->alias = NULL; if (web->moves) abort (); if (!web->useless_conflicts) abort (); } /* Insert and returns a subweb corresponding to REG into WEB (which becomes its super web). It must not exist already. */ static struct web * add_subweb (struct web *web, rtx reg) { struct web *w; if (GET_CODE (reg) != SUBREG) abort (); w = xmalloc (sizeof (struct web)); /* Copy most content from parent-web. */ *w = *web; /* And initialize the private stuff. */ w->orig_x = reg; w->add_hardregs = CLASS_MAX_NREGS (web->regclass, GET_MODE (reg)) - 1; w->num_conflicts = 0 * w->add_hardregs; w->num_defs = 0; w->num_uses = 0; w->dlink = NULL; w->parent_web = web; w->subreg_next = web->subreg_next; web->subreg_next = w; return w; } /* Similar to add_subweb(), but instead of relying on a given SUBREG, we have just a size and an offset of the subpart of the REG rtx. In difference to add_subweb() this marks the new subweb as artificial. */ static struct web * add_subweb_2 (struct web *web, unsigned int size_word) { /* To get a correct mode for the to be produced subreg, we don't want to simply do a mode_for_size() for the mode_class of the whole web. Suppose we deal with a CDImode web, but search for a 8 byte part. Now mode_for_size() would only search in the class MODE_COMPLEX_INT and would find CSImode which probably is not what we want. Instead we want DImode, which is in a completely other class. For this to work we instead first search the already existing subwebs, and take _their_ modeclasses as base for a search for ourself. */ rtx ref_rtx = (web->subreg_next ? web->subreg_next : web)->orig_x; unsigned int size = BYTE_LENGTH (size_word) * BITS_PER_UNIT; enum machine_mode mode; mode = mode_for_size (size, GET_MODE_CLASS (GET_MODE (ref_rtx)), 0); if (mode == BLKmode) mode = mode_for_size (size, MODE_INT, 0); if (mode == BLKmode) abort (); web = add_subweb (web, gen_rtx_SUBREG (mode, web->orig_x, BYTE_BEGIN (size_word))); web->artificial = 1; return web; } /* Initialize all the web parts we are going to need. */ static void init_web_parts (struct df *df) { int regno; unsigned int no; num_webs = 0; for (no = 0; no < df->def_id; no++) { if (df->defs[no]) { if (no < last_def_id && web_parts[no].ref != df->defs[no]) abort (); web_parts[no].ref = df->defs[no]; /* Uplink might be set from the last iteration. */ if (!web_parts[no].uplink) num_webs++; } else /* The last iteration might have left .ref set, while df_analyze() removed that ref (due to a removed copy insn) from the df->defs[] array. As we don't check for that in realloc_web_parts() we do that here. */ web_parts[no].ref = NULL; } for (no = 0; no < df->use_id; no++) { if (df->uses[no]) { if (no < last_use_id && web_parts[no + df->def_id].ref != df->uses[no]) abort (); web_parts[no + df->def_id].ref = df->uses[no]; if (!web_parts[no + df->def_id].uplink) num_webs++; } else web_parts[no + df->def_id].ref = NULL; } /* We want to have only one web for each precolored register. */ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) { struct web_part *r1 = NULL; struct df_link *link; /* Here once was a test, if there is any DEF at all, and only then to merge all the parts. This was incorrect, we really also want to have only one web-part for hardregs, even if there is no explicit DEF. */ /* Link together all defs... */ for (link = df->regs[regno].defs; link; link = link->next) if (link->ref) { struct web_part *r2 = &web_parts[DF_REF_ID (link->ref)]; if (!r1) r1 = r2; else r1 = union_web_parts (r1, r2); } /* ... and all uses. */ for (link = df->regs[regno].uses; link; link = link->next) if (link->ref) { struct web_part *r2 = &web_parts[df->def_id + DF_REF_ID (link->ref)]; if (!r1) r1 = r2; else r1 = union_web_parts (r1, r2); } } } /* In case we want to remember the conflict list of a WEB, before adding new conflicts, we copy it here to orig_conflict_list. */ static void copy_conflict_list (struct web *web) { struct conflict_link *cl; if (web->orig_conflict_list || web->have_orig_conflicts) abort (); web->have_orig_conflicts = 1; for (cl = web->conflict_list; cl; cl = cl->next) { struct conflict_link *ncl; ncl = ra_alloc (sizeof *ncl); ncl->t = cl->t; ncl->sub = NULL; ncl->next = web->orig_conflict_list; web->orig_conflict_list = ncl; if (cl->sub) { struct sub_conflict *sl, *nsl; for (sl = cl->sub; sl; sl = sl->next) { nsl = ra_alloc (sizeof *nsl); nsl->s = sl->s; nsl->t = sl->t; nsl->next = ncl->sub; ncl->sub = nsl; } } } } /* Possibly add an edge from web FROM to TO marking a conflict between those two. This is one half of marking a complete conflict, which notes in FROM, that TO is a conflict. Adding TO to FROM's conflicts might make other conflicts superfluous, because the current TO overlaps some web already being in conflict with FROM. In this case the smaller webs are deleted from the conflict list. Likewise if TO is overlapped by a web already in the list, it isn't added at all. Note, that this can only happen, if SUBREG webs are involved. */ static void add_conflict_edge (struct web *from, struct web *to) { if (from->type != PRECOLORED) { struct web *pfrom = find_web_for_subweb (from); struct web *pto = find_web_for_subweb (to); struct sub_conflict *sl; struct conflict_link *cl = pfrom->conflict_list; int may_delete = 1; /* This can happen when subwebs of one web conflict with each other. In live_out_1() we created such conflicts between yet undefined webparts and defs of parts which didn't overlap with the undefined bits. Then later they nevertheless could have merged into one web, and then we land here. */ if (pfrom == pto) return; if (remember_conflicts && !pfrom->have_orig_conflicts) copy_conflict_list (pfrom); if (!TEST_BIT (sup_igraph, (pfrom->id * num_webs + pto->id))) { cl = ra_alloc (sizeof (*cl)); cl->t = pto; cl->sub = NULL; cl->next = pfrom->conflict_list; pfrom->conflict_list = cl; if (pto->type != SELECT && pto->type != COALESCED) pfrom->num_conflicts += 1 + pto->add_hardregs; SET_BIT (sup_igraph, (pfrom->id * num_webs + pto->id)); may_delete = 0; } else /* We don't need to test for cl==NULL, because at this point a cl with cl->t==pto is guaranteed to exist. */ while (cl->t != pto) cl = cl->next; if (pfrom != from || pto != to) { /* This is a subconflict which should be added. If we inserted cl in this invocation, we really need to add this subconflict. If we did _not_ add it here, we only add the subconflict, if cl already had subconflicts, because otherwise this indicated, that the whole webs already conflict, which means we are not interested in this subconflict. */ if (!may_delete || cl->sub != NULL) { sl = ra_alloc (sizeof (*sl)); sl->s = from; sl->t = to; sl->next = cl->sub; cl->sub = sl; } } else /* pfrom == from && pto == to means, that we are not interested anymore in the subconflict list for this pair, because anyway the whole webs conflict. */ cl->sub = NULL; } } /* Record a conflict between two webs, if we haven't recorded it already. */ void record_conflict (struct web *web1, struct web *web2) { unsigned int id1 = web1->id, id2 = web2->id; unsigned int index = igraph_index (id1, id2); /* Trivial non-conflict or already recorded conflict. */ if (web1 == web2 || TEST_BIT (igraph, index)) return; if (id1 == id2) abort (); /* As fixed_regs are no targets for allocation, conflicts with them are pointless. */ if ((web1->regno < FIRST_PSEUDO_REGISTER && fixed_regs[web1->regno]) || (web2->regno < FIRST_PSEUDO_REGISTER && fixed_regs[web2->regno])) return; /* Conflicts with hardregs, which are not even a candidate for this pseudo are also pointless. */ if ((web1->type == PRECOLORED && ! TEST_HARD_REG_BIT (web2->usable_regs, web1->regno)) || (web2->type == PRECOLORED && ! TEST_HARD_REG_BIT (web1->usable_regs, web2->regno))) return; /* Similar if the set of possible hardregs don't intersect. This iteration those conflicts are useless (and would make num_conflicts wrong, because num_freedom is calculated from the set of possible hardregs). But in presence of spilling and incremental building of the graph we need to note all uses of webs conflicting with the spilled ones. Because the set of possible hardregs can change in the next round for spilled webs, we possibly have then conflicts with webs which would be excluded now (because then hardregs intersect). But we actually need to check those uses, and to get hold of them, we need to remember also webs conflicting with this one, although not conflicting in this round because of non-intersecting hardregs. */ if (web1->type != PRECOLORED && web2->type != PRECOLORED && ! hard_regs_intersect_p (&web1->usable_regs, &web2->usable_regs)) { struct web *p1 = find_web_for_subweb (web1); struct web *p2 = find_web_for_subweb (web2); /* We expect these to be rare enough to justify bitmaps. And because we have only a special use for it, we note only the superwebs. */ bitmap_set_bit (p1->useless_conflicts, p2->id); bitmap_set_bit (p2->useless_conflicts, p1->id); return; } SET_BIT (igraph, index); add_conflict_edge (web1, web2); add_conflict_edge (web2, web1); } /* For each web W this produces the missing subwebs Wx, such that it's possible to exactly specify (W-Wy) for all already existing subwebs Wy. */ static void build_inverse_webs (struct web *web) { struct web *sweb = web->subreg_next; unsigned HOST_WIDE_INT undef; undef = rtx_to_undefined (web->orig_x); for (; sweb; sweb = sweb->subreg_next) /* Only create inverses of non-artificial webs. */ if (!sweb->artificial) { unsigned HOST_WIDE_INT bits; bits = undef & ~ rtx_to_undefined (sweb->orig_x); while (bits) { unsigned int size_word = undef_to_size_word (web->orig_x, &bits); if (!find_subweb_2 (web, size_word)) add_subweb_2 (web, size_word); } } } /* Copies the content of WEB to a new one, and link it into WL. Used for consistency checking. */ static void copy_web (struct web *web, struct web_link **wl) { struct web *cweb = xmalloc (sizeof *cweb); struct web_link *link = ra_alloc (sizeof *link); link->next = *wl; *wl = link; link->web = cweb; *cweb = *web; } /* Given a list of webs LINK, compare the content of the webs therein with the global webs of the same ID. For consistency checking. */ static void compare_and_free_webs (struct web_link **link) { struct web_link *wl; for (wl = *link; wl; wl = wl->next) { struct web *web1 = wl->web; struct web *web2 = ID2WEB (web1->id); if (web1->regno != web2->regno || web1->mode_changed != web2->mode_changed || !rtx_equal_p (web1->orig_x, web2->orig_x) || web1->type != web2->type /* Only compare num_defs/num_uses with non-hardreg webs. E.g. the number of uses of the framepointer changes due to inserting spill code. */ || (web1->type != PRECOLORED && (web1->num_uses != web2->num_uses || web1->num_defs != web2->num_defs)) /* Similarly, if the framepointer was unreferenced originally but we added spills, these fields may not match. */ || (web1->type != PRECOLORED && web1->crosses_call != web2->crosses_call) || (web1->type != PRECOLORED && web1->live_over_abnormal != web2->live_over_abnormal)) abort (); if (web1->type != PRECOLORED) { unsigned int i; for (i = 0; i < web1->num_defs; i++) if (web1->defs[i] != web2->defs[i]) abort (); for (i = 0; i < web1->num_uses; i++) if (web1->uses[i] != web2->uses[i]) abort (); } if (web1->type == PRECOLORED) { if (web1->defs) free (web1->defs); if (web1->uses) free (web1->uses); } free (web1); } *link = NULL; } /* Setup and fill uses[] and defs[] arrays of the webs. */ static void init_webs_defs_uses (void) { struct dlist *d; for (d = WEBS(INITIAL); d; d = d->next) { struct web *web = DLIST_WEB (d); unsigned int def_i, use_i; struct df_link *link; if (web->old_web) continue; if (web->type == PRECOLORED) { web->num_defs = web->num_uses = 0; continue; } if (web->num_defs) web->defs = xmalloc (web->num_defs * sizeof (web->defs[0])); if (web->num_uses) web->uses = xmalloc (web->num_uses * sizeof (web->uses[0])); def_i = use_i = 0; for (link = web->temp_refs; link; link = link->next) { if (DF_REF_REG_DEF_P (link->ref)) web->defs[def_i++] = link->ref; else web->uses[use_i++] = link->ref; } web->temp_refs = NULL; if (def_i != web->num_defs || use_i != web->num_uses) abort (); } } /* Called by parts_to_webs(). This creates (or recreates) the webs (and subwebs) from web parts, gives them IDs (only to super webs), and sets up use2web and def2web arrays. */ static unsigned int parts_to_webs_1 (struct df *df, struct web_link **copy_webs, struct df_link *all_refs) { unsigned int i; unsigned int webnum; unsigned int def_id = df->def_id; unsigned int use_id = df->use_id; struct web_part *wp_first_use = &web_parts[def_id]; /* For each root web part: create and initialize a new web, setup def2web[] and use2web[] for all defs and uses, and id2web for all new webs. */ webnum = 0; for (i = 0; i < def_id + use_id; i++) { struct web *subweb, *web = 0; /* Initialize web to silence warnings. */ struct web_part *wp = &web_parts[i]; struct ref *ref = wp->ref; unsigned int ref_id; rtx reg; if (!ref) continue; ref_id = i; if (i >= def_id) ref_id -= def_id; all_refs[i].ref = ref; reg = DF_REF_REG (ref); if (! wp->uplink) { /* If we have a web part root, create a new web. */ unsigned int newid = ~(unsigned)0; unsigned int old_web = 0; /* In the first pass, there are no old webs, so unconditionally allocate a new one. */ if (ra_pass == 1) { web = xmalloc (sizeof (struct web)); newid = last_num_webs++; init_one_web (web, GET_CODE (reg) == SUBREG ? SUBREG_REG (reg) : reg); } /* Otherwise, we look for an old web. */ else { /* Remember, that use2web == def2web + def_id. Ergo is def2web[i] == use2web[i - def_id] for i >= def_id. So we only need to look into def2web[] array. Try to look at the web, which formerly belonged to this def (or use). */ web = def2web[i]; /* Or which belonged to this hardreg. */ if (!web && DF_REF_REGNO (ref) < FIRST_PSEUDO_REGISTER) web = hardreg2web[DF_REF_REGNO (ref)]; if (web) { /* If we found one, reuse it. */ web = find_web_for_subweb (web); remove_list (web->dlink, &WEBS(INITIAL)); old_web = 1; copy_web (web, copy_webs); } else { /* Otherwise use a new one. First from the free list. */ if (WEBS(FREE)) web = DLIST_WEB (pop_list (&WEBS(FREE))); else { /* Else allocate a new one. */ web = xmalloc (sizeof (struct web)); newid = last_num_webs++; } } /* The id is zeroed in init_one_web(). */ if (newid == ~(unsigned)0) newid = web->id; if (old_web) reinit_one_web (web, GET_CODE (reg) == SUBREG ? SUBREG_REG (reg) : reg); else init_one_web (web, GET_CODE (reg) == SUBREG ? SUBREG_REG (reg) : reg); web->old_web = (old_web && web->type != PRECOLORED) ? 1 : 0; } web->span_deaths = wp->spanned_deaths; web->crosses_call = wp->crosses_call; web->id = newid; web->temp_refs = NULL; webnum++; if (web->regno < FIRST_PSEUDO_REGISTER && !hardreg2web[web->regno]) hardreg2web[web->regno] = web; else if (web->regno < FIRST_PSEUDO_REGISTER && hardreg2web[web->regno] != web) abort (); } /* If this reference already had a web assigned, we are done. This test better is equivalent to the web being an old web. Otherwise something is screwed. (This is tested) */ if (def2web[i] != NULL) { web = def2web[i]; web = find_web_for_subweb (web); /* But if this ref includes a mode change, or was a use live over an abnormal call, set appropriate flags in the web. */ if ((DF_REF_FLAGS (ref) & DF_REF_MODE_CHANGE) != 0 && web->regno >= FIRST_PSEUDO_REGISTER) web->mode_changed = 1; if ((DF_REF_FLAGS (ref) & DF_REF_STRIPPED) != 0 && web->regno >= FIRST_PSEUDO_REGISTER) web->subreg_stripped = 1; if (i >= def_id && TEST_BIT (live_over_abnormal, ref_id)) web->live_over_abnormal = 1; /* And check, that it's not a newly allocated web. This would be an inconsistency. */ if (!web->old_web || web->type == PRECOLORED) abort (); continue; } /* In case this was no web part root, we need to initialize WEB from the ref2web array belonging to the root. */ if (wp->uplink) { struct web_part *rwp = find_web_part (wp); unsigned int j = DF_REF_ID (rwp->ref); if (rwp < wp_first_use) web = def2web[j]; else web = use2web[j]; web = find_web_for_subweb (web); } /* Remember all references for a web in a single linked list. */ all_refs[i].next = web->temp_refs; web->temp_refs = &all_refs[i]; /* And the test, that if def2web[i] was NULL above, that we are _not_ an old web. */ if (web->old_web && web->type != PRECOLORED) abort (); /* Possible create a subweb, if this ref was a subreg. */ if (GET_CODE (reg) == SUBREG) { subweb = find_subweb (web, reg); if (!subweb) { subweb = add_subweb (web, reg); if (web->old_web) abort (); } } else subweb = web; /* And look, if the ref involves an invalid mode change. */ if ((DF_REF_FLAGS (ref) & DF_REF_MODE_CHANGE) != 0 && web->regno >= FIRST_PSEUDO_REGISTER) web->mode_changed = 1; if ((DF_REF_FLAGS (ref) & DF_REF_STRIPPED) != 0 && web->regno >= FIRST_PSEUDO_REGISTER) web->subreg_stripped = 1; /* Setup def2web, or use2web, and increment num_defs or num_uses. */ if (i < def_id) { /* Some sanity checks. */ if (ra_pass > 1) { struct web *compare = def2web[i]; if (i < last_def_id) { if (web->old_web && compare != subweb) abort (); } if (!web->old_web && compare) abort (); if (compare && compare != subweb) abort (); } def2web[i] = subweb; web->num_defs++; } else { if (ra_pass > 1) { struct web *compare = use2web[ref_id]; if (ref_id < last_use_id) { if (web->old_web && compare != subweb) abort (); } if (!web->old_web && compare) abort (); if (compare && compare != subweb) abort (); } use2web[ref_id] = subweb; web->num_uses++; if (TEST_BIT (live_over_abnormal, ref_id)) web->live_over_abnormal = 1; } } /* We better now have exactly as many webs as we had web part roots. */ if (webnum != num_webs) abort (); return webnum; } /* This builds full webs out of web parts, without relating them to each other (i.e. without creating the conflict edges). */ static void parts_to_webs (struct df *df) { unsigned int i; unsigned int webnum; struct web_link *copy_webs = NULL; struct dlist *d; struct df_link *all_refs; num_subwebs = 0; /* First build webs and ordinary subwebs. */ all_refs = xcalloc (df->def_id + df->use_id, sizeof (all_refs[0])); webnum = parts_to_webs_1 (df, ©_webs, all_refs); /* Setup the webs for hardregs which are still missing (weren't mentioned in the code). */ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (!hardreg2web[i]) { struct web *web = xmalloc (sizeof (struct web)); init_one_web (web, gen_rtx_REG (reg_raw_mode[i], i)); web->id = last_num_webs++; hardreg2web[web->regno] = web; } num_webs = last_num_webs; /* Now create all artificial subwebs, i.e. those, which do not correspond to a real subreg in the current function's RTL, but which nevertheless is a target of a conflict. XXX we need to merge this loop with the one above, which means, we need a way to later override the artificiality. Beware: currently add_subweb_2() relies on the existence of normal subwebs for deducing a sane mode to use for the artificial subwebs. */ for (i = 0; i < df->def_id + df->use_id; i++) { struct web_part *wp = &web_parts[i]; struct tagged_conflict *cl; struct web *web; if (wp->uplink || !wp->ref) { if (wp->sub_conflicts) abort (); continue; } web = def2web[i]; web = find_web_for_subweb (web); for (cl = wp->sub_conflicts; cl; cl = cl->next) if (!find_subweb_2 (web, cl->size_word)) add_subweb_2 (web, cl->size_word); } /* And now create artificial subwebs needed for representing the inverse of some subwebs. This also gives IDs to all subwebs. */ webnum = last_num_webs; for (d = WEBS(INITIAL); d; d = d->next) { struct web *web = DLIST_WEB (d); if (web->subreg_next) { struct web *sweb; build_inverse_webs (web); for (sweb = web->subreg_next; sweb; sweb = sweb->subreg_next) sweb->id = webnum++; } } /* Now that everyone has an ID, we can setup the id2web array. */ id2web = xcalloc (webnum, sizeof (id2web[0])); for (d = WEBS(INITIAL); d; d = d->next) { struct web *web = DLIST_WEB (d); ID2WEB (web->id) = web; for (web = web->subreg_next; web; web = web->subreg_next) ID2WEB (web->id) = web; } num_subwebs = webnum - last_num_webs; num_allwebs = num_webs + num_subwebs; num_webs += num_subwebs; /* Allocate and clear the conflict graph bitmaps. */ igraph = sbitmap_alloc (num_webs * num_webs / 2); sup_igraph = sbitmap_alloc (num_webs * num_webs); sbitmap_zero (igraph); sbitmap_zero (sup_igraph); /* Distribute the references to their webs. */ init_webs_defs_uses (); /* And do some sanity checks if old webs, and those recreated from the really are the same. */ compare_and_free_webs (©_webs); free (all_refs); } /* This deletes all conflicts to and from webs which need to be renewed in this pass of the allocator, i.e. those which were spilled in the last pass. Furthermore it also rebuilds the bitmaps for the remaining conflicts. */ static void reset_conflicts (void) { unsigned int i; bitmap newwebs = BITMAP_XMALLOC (); for (i = 0; i < num_webs - num_subwebs; i++) { struct web *web = ID2WEB (i); /* Hardreg webs and non-old webs are new webs (which need rebuilding). */ if (web->type == PRECOLORED || !web->old_web) bitmap_set_bit (newwebs, web->id); } for (i = 0; i < num_webs - num_subwebs; i++) { struct web *web = ID2WEB (i); struct conflict_link *cl; struct conflict_link **pcl; pcl = &(web->conflict_list); /* First restore the conflict list to be like it was before coalescing. */ if (web->have_orig_conflicts) { web->conflict_list = web->orig_conflict_list; web->orig_conflict_list = NULL; } if (web->orig_conflict_list) abort (); /* New non-precolored webs, have no conflict list. */ if (web->type != PRECOLORED && !web->old_web) { *pcl = NULL; /* Useless conflicts will be rebuilt completely. But check for cleanliness, as the web might have come from the free list. */ if (bitmap_first_set_bit (web->useless_conflicts) >= 0) abort (); } else { /* Useless conflicts with new webs will be rebuilt if they are still there. */ bitmap_operation (web->useless_conflicts, web->useless_conflicts, newwebs, BITMAP_AND_COMPL); /* Go through all conflicts, and retain those to old webs. */ for (cl = web->conflict_list; cl; cl = cl->next) { if (cl->t->old_web || cl->t->type == PRECOLORED) { *pcl = cl; pcl = &(cl->next); /* Also restore the entries in the igraph bitmaps. */ web->num_conflicts += 1 + cl->t->add_hardregs; SET_BIT (sup_igraph, (web->id * num_webs + cl->t->id)); /* No subconflicts mean full webs conflict. */ if (!cl->sub) SET_BIT (igraph, igraph_index (web->id, cl->t->id)); else /* Else only the parts in cl->sub must be in the bitmap. */ { struct sub_conflict *sl; for (sl = cl->sub; sl; sl = sl->next) SET_BIT (igraph, igraph_index (sl->s->id, sl->t->id)); } } } *pcl = NULL; } web->have_orig_conflicts = 0; } BITMAP_XFREE (newwebs); } /* For each web check it's num_conflicts member against that number, as calculated from scratch from all neighbors. */ #if 0 static void check_conflict_numbers (void) { unsigned int i; for (i = 0; i < num_webs; i++) { struct web *web = ID2WEB (i); int new_conf = 0 * web->add_hardregs; struct conflict_link *cl; for (cl = web->conflict_list; cl; cl = cl->next) if (cl->t->type != SELECT && cl->t->type != COALESCED) new_conf += 1 + cl->t->add_hardregs; if (web->type != PRECOLORED && new_conf != web->num_conflicts) abort (); } } #endif /* Convert the conflicts between web parts to conflicts between full webs. This can't be done in parts_to_webs(), because for recording conflicts between webs we need to know their final usable_regs set, which is used to discard non-conflicts (between webs having no hard reg in common). But this is set for spill temporaries only after the webs itself are built. Until then the usable_regs set is based on the pseudo regno used in this web, which may contain far less registers than later determined. This would result in us loosing conflicts (due to record_conflict() thinking that a web can only be allocated to the current usable_regs, whereas later this is extended) leading to colorings, where some regs which in reality conflict get the same color. */ static void conflicts_between_webs (struct df *df) { unsigned int i; #ifdef STACK_REGS struct dlist *d; #endif bitmap ignore_defs = BITMAP_XMALLOC (); unsigned int have_ignored; unsigned int *pass_cache = xcalloc (num_webs, sizeof (int)); unsigned int pass = 0; if (ra_pass > 1) reset_conflicts (); /* It is possible, that in the conflict bitmaps still some defs I are noted, which have web_parts[I].ref being NULL. This can happen, when from the last iteration the conflict bitmap for this part wasn't deleted, but a conflicting move insn was removed. It's DEF is still in the conflict bitmap, but it doesn't exist anymore in df->defs. To not have to check it in the tight loop below, we instead remember the ID's of them in a bitmap, and loop only over IDs which are not in it. */ for (i = 0; i < df->def_id; i++) if (web_parts[i].ref == NULL) bitmap_set_bit (ignore_defs, i); have_ignored = (bitmap_first_set_bit (ignore_defs) >= 0); /* Now record all conflicts between webs. Note that we only check the conflict bitmaps of all defs. Conflict bitmaps are only in webpart roots. If they are in uses, those uses are roots, which means, that this is an uninitialized web, whose conflicts don't matter. Nevertheless for hardregs we also need to check uses. E.g. hardregs used for argument passing have no DEF in the RTL, but if they have uses, they indeed conflict with all DEFs they overlap. */ for (i = 0; i < df->def_id + df->use_id; i++) { struct tagged_conflict *cl = web_parts[i].sub_conflicts; struct web *supweb1; if (!cl || (i >= df->def_id && DF_REF_REGNO (web_parts[i].ref) >= FIRST_PSEUDO_REGISTER)) continue; supweb1 = def2web[i]; supweb1 = find_web_for_subweb (supweb1); for (; cl; cl = cl->next) if (cl->conflicts) { int j; struct web *web1 = find_subweb_2 (supweb1, cl->size_word); if (have_ignored) bitmap_operation (cl->conflicts, cl->conflicts, ignore_defs, BITMAP_AND_COMPL); /* We reduce the number of calls to record_conflict() with this pass thing. record_conflict() itself also has some early-out optimizations, but here we can use the special properties of the loop (constant web1) to reduce that even more. We once used an sbitmap of already handled web indices, but sbitmaps are slow to clear and bitmaps are slow to set/test. The current approach needs more memory, but locality is large. */ pass++; /* Note, that there are only defs in the conflicts bitset. */ EXECUTE_IF_SET_IN_BITMAP ( cl->conflicts, 0, j, { struct web *web2 = def2web[j]; unsigned int id2 = web2->id; if (pass_cache[id2] != pass) { pass_cache[id2] = pass; record_conflict (web1, web2); } }); } } free (pass_cache); BITMAP_XFREE (ignore_defs); #ifdef STACK_REGS /* Pseudos can't go in stack regs if they are live at the beginning of a block that is reached by an abnormal edge. */ for (d = WEBS(INITIAL); d; d = d->next) { struct web *web = DLIST_WEB (d); int j; if (web->live_over_abnormal) for (j = FIRST_STACK_REG; j <= LAST_STACK_REG; j++) record_conflict (web, hardreg2web[j]); } #endif } /* Remember that a web was spilled, and change some characteristics accordingly. */ static void remember_web_was_spilled (struct web *web) { int i; unsigned int found_size = 0; int adjust; web->spill_temp = 1; /* From now on don't use reg_pref/alt_class (regno) anymore for this web, but instead usable_regs. We can't use spill_temp for this, as it might get reset later, when we are coalesced to a non-spill-temp. In that case we still want to use usable_regs. */ web->use_my_regs = 1; /* We don't constrain spill temporaries in any way for now. It's wrong sometimes to have the same constraints or preferences as the original pseudo, esp. if they were very narrow. (E.g. there once was a reg wanting class AREG (only one register) without alternative class. As long, as also the spill-temps for this pseudo had the same constraints it was spilled over and over. Ideally we want some constraints also on spill-temps: Because they are not only loaded/stored, but also worked with, any constraints from insn alternatives needs applying. Currently this is dealt with by reload, as many other things, but at some time we want to integrate that functionality into the allocator. */ if (web->regno >= max_normal_pseudo) { COPY_HARD_REG_SET (web->usable_regs, reg_class_contents[reg_preferred_class (web->regno)]); IOR_HARD_REG_SET (web->usable_regs, reg_class_contents[reg_alternate_class (web->regno)]); } else COPY_HARD_REG_SET (web->usable_regs, reg_class_contents[(int) GENERAL_REGS]); AND_COMPL_HARD_REG_SET (web->usable_regs, never_use_colors); prune_hardregs_for_mode (&web->usable_regs, PSEUDO_REGNO_MODE (web->regno)); #ifdef CANNOT_CHANGE_MODE_CLASS if (web->mode_changed) AND_COMPL_HARD_REG_SET (web->usable_regs, invalid_mode_change_regs); #endif web->num_freedom = hard_regs_count (web->usable_regs); if (!web->num_freedom) abort(); COPY_HARD_REG_SET (web->orig_usable_regs, web->usable_regs); /* Now look for a class, which is subset of our constraints, to setup add_hardregs, and regclass for debug output. */ web->regclass = NO_REGS; for (i = (int) ALL_REGS - 1; i > 0; i--) { unsigned int size; HARD_REG_SET test; COPY_HARD_REG_SET (test, reg_class_contents[i]); AND_COMPL_HARD_REG_SET (test, never_use_colors); GO_IF_HARD_REG_SUBSET (test, web->usable_regs, found); continue; found: /* Measure the actual number of bits which really are overlapping the target regset, not just the reg_class_size. */ size = hard_regs_count (test); if (found_size < size) { web->regclass = (enum reg_class) i; found_size = size; } } adjust = 0 * web->add_hardregs; web->add_hardregs = CLASS_MAX_NREGS (web->regclass, PSEUDO_REGNO_MODE (web->regno)) - 1; web->num_freedom -= web->add_hardregs; if (!web->num_freedom) abort(); adjust -= 0 * web->add_hardregs; web->num_conflicts -= adjust; } /* Look at each web, if it is used as spill web. Or better said, if it will be spillable in this pass. */ static void detect_spill_temps (void) { struct dlist *d; bitmap already = BITMAP_XMALLOC (); /* Detect webs used for spill temporaries. */ for (d = WEBS(INITIAL); d; d = d->next) { struct web *web = DLIST_WEB (d); /* Below only the detection of spill temporaries. We never spill precolored webs, so those can't be spill temporaries. The code above (remember_web_was_spilled) can't currently cope with hardregs anyway. */ if (web->regno < FIRST_PSEUDO_REGISTER) continue; /* Uninitialized webs can't be spill-temporaries. */ if (web->num_defs == 0) continue; /* A web with only defs and no uses can't be spilled. Nevertheless it must get a color, as it takes away a register from all webs live at these defs. So we make it a short web. */ if (web->num_uses == 0) web->spill_temp = 3; /* A web which was spilled last time, but for which no insns were emitted (can happen with IR spilling ignoring sometimes all deaths). */ else if (web->changed) web->spill_temp = 1; /* A spill temporary has one def, one or more uses, all uses are in one insn, and either the def or use insn was inserted by the allocator. */ /* XXX not correct currently. There might also be spill temps involving more than one def. Usually that's an additional clobber in the using instruction. We might also constrain ourself to that, instead of like currently marking all webs involving any spill insns at all. */ else { unsigned int i; int spill_involved = 0; for (i = 0; i < web->num_uses && !spill_involved; i++) if (DF_REF_INSN_UID (web->uses[i]) >= orig_max_uid) spill_involved = 1; for (i = 0; i < web->num_defs && !spill_involved; i++) if (DF_REF_INSN_UID (web->defs[i]) >= orig_max_uid) spill_involved = 1; if (spill_involved/* && ra_pass > 2*/) { int num_deaths = web->span_deaths; /* Mark webs involving at least one spill insn as spill temps. */ remember_web_was_spilled (web); /* Search for insns which define and use the web in question at the same time, i.e. look for rmw insns. If these insns are also deaths of other webs they might have been counted as such into web->span_deaths. But because of the rmw nature of this insn it is no point where a load/reload could be placed successfully (it would still conflict with the dead web), so reduce the number of spanned deaths by those insns. Note that sometimes such deaths are _not_ counted, so negative values can result. */ bitmap_zero (already); for (i = 0; i < web->num_defs; i++) { rtx insn = web->defs[i]->insn; if (TEST_BIT (insns_with_deaths, INSN_UID (insn)) && !bitmap_bit_p (already, INSN_UID (insn))) { unsigned int j; bitmap_set_bit (already, INSN_UID (insn)); /* Only decrement it once for each insn. */ for (j = 0; j < web->num_uses; j++) if (web->uses[j]->insn == insn) { num_deaths--; break; } } } /* But mark them specially if they could possibly be spilled, either because they cross some deaths (without the above mentioned ones) or calls. */ if (web->crosses_call || num_deaths > 0) web->spill_temp = 1 * 2; } /* A web spanning no deaths can't be spilled either. No loads would be created for it, ergo no defs. So the insns wouldn't change making the graph not easier to color. Make this also a short web. Don't do this if it crosses calls, as these are also points of reloads. */ else if (web->span_deaths == 0 && !web->crosses_call) web->spill_temp = 3; } web->orig_spill_temp = web->spill_temp; } BITMAP_XFREE (already); } /* Returns nonzero if the rtx MEM refers somehow to a stack location. */ int memref_is_stack_slot (rtx mem) { rtx ad = XEXP (mem, 0); rtx x; if (GET_CODE (ad) != PLUS || GET_CODE (XEXP (ad, 1)) != CONST_INT) return 0; x = XEXP (ad, 0); if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]) || x == stack_pointer_rtx) return 1; return 0; } /* Returns nonzero, if rtx X somewhere contains any pseudo register. */ static int contains_pseudo (rtx x) { const char *fmt; int i; if (GET_CODE (x) == SUBREG) x = SUBREG_REG (x); if (REG_P (x)) { if (REGNO (x) >= FIRST_PSEUDO_REGISTER) return 1; else return 0; } fmt = GET_RTX_FORMAT (GET_CODE (x)); for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--) if (fmt[i] == 'e') { if (contains_pseudo (XEXP (x, i))) return 1; } else if (fmt[i] == 'E') { int j; for (j = 0; j < XVECLEN (x, i); j++) if (contains_pseudo (XVECEXP (x, i, j))) return 1; } return 0; } /* Returns nonzero, if we are able to rematerialize something with value X. If it's not a general operand, we test if we can produce a valid insn which set a pseudo to that value, and that insn doesn't clobber anything. */ static GTY(()) rtx remat_test_insn; static int want_to_remat (rtx x) { int num_clobbers = 0; int icode; /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */ if (general_operand (x, GET_MODE (x))) return 1; /* Otherwise, check if we can make a valid insn from it. First initialize our test insn if we haven't already. */ if (remat_test_insn == 0) { remat_test_insn = make_insn_raw (gen_rtx_SET (VOIDmode, gen_rtx_REG (word_mode, FIRST_PSEUDO_REGISTER * 2), const0_rtx)); NEXT_INSN (remat_test_insn) = PREV_INSN (remat_test_insn) = 0; } /* Now make an insn like the one we would make when rematerializing the value X and see if valid. */ PUT_MODE (SET_DEST (PATTERN (remat_test_insn)), GET_MODE (x)); SET_SRC (PATTERN (remat_test_insn)) = x; /* XXX For now we don't allow any clobbers to be added, not just no hardreg clobbers. */ return ((icode = recog (PATTERN (remat_test_insn), remat_test_insn, &num_clobbers)) >= 0 && (num_clobbers == 0 /*|| ! added_clobbers_hard_reg_p (icode)*/)); } /* Look at all webs, if they perhaps are rematerializable. They are, if all their defs are simple sets to the same value, and that value is simple enough, and want_to_remat() holds for it. */ static void detect_remat_webs (void) { struct dlist *d; for (d = WEBS(INITIAL); d; d = d->next) { struct web *web = DLIST_WEB (d); unsigned int i; rtx pat = NULL_RTX; /* Hardregs and useless webs aren't spilled -> no remat necessary. Defless webs obviously also can't be rematerialized. */ if (web->regno < FIRST_PSEUDO_REGISTER || !web->num_defs || !web->num_uses) continue; for (i = 0; i < web->num_defs; i++) { rtx insn; rtx set = single_set (insn = DF_REF_INSN (web->defs[i])); rtx src; if (!set) break; src = SET_SRC (set); /* When only subregs of the web are set it isn't easily rematerializable. */ if (!rtx_equal_p (SET_DEST (set), web->orig_x)) break; /* If we already have a pattern it must be equal to the current. */ if (pat && !rtx_equal_p (pat, src)) break; /* Don't do the expensive checks multiple times. */ if (pat) continue; /* For now we allow only constant sources. */ if ((CONSTANT_P (src) /* If the whole thing is stable already, it is a source for remat, no matter how complicated (probably all needed resources for it are live everywhere, and don't take additional register resources). */ /* XXX Currently we can't use patterns which contain pseudos, _even_ if they are stable. The code simply isn't prepared for that. All those operands can't be spilled (or the dependent remat webs are not remat anymore), so they would be oldwebs in the next iteration. But currently oldwebs can't have their references changed. The incremental machinery barfs on that. */ || (!rtx_unstable_p (src) && !contains_pseudo (src)) /* Additionally also memrefs to stack-slots are useful, when we created them ourself. They might not have set their unchanging flag set, but nevertheless they are stable across the livetime in question. */ || (MEM_P (src) && INSN_UID (insn) >= orig_max_uid && memref_is_stack_slot (src))) /* And we must be able to construct an insn without side-effects to actually load that value into a reg. */ && want_to_remat (src)) pat = src; else break; } if (pat && i == web->num_defs) web->pattern = pat; } } /* Determine the spill costs of all webs. */ static void determine_web_costs (void) { struct dlist *d; for (d = WEBS(INITIAL); d; d = d->next) { unsigned int i, num_loads; int load_cost, store_cost; unsigned HOST_WIDE_INT w; struct web *web = DLIST_WEB (d); if (web->type == PRECOLORED) continue; /* Get costs for one load/store. Note that we offset them by 1, because some patterns have a zero rtx_cost(), but we of course still need the actual load/store insns. With zero all those webs would be the same, no matter how often and where they are used. */ if (web->pattern) { /* This web is rematerializable. Beware, we set store_cost to zero optimistically assuming, that we indeed don't emit any stores in the spill-code addition. This might be wrong if at the point of the load not all needed resources are available, in which case we emit a stack-based load, for which we in turn need the according stores. */ load_cost = 1 + rtx_cost (web->pattern, 0); store_cost = 0; } else { load_cost = 1 + MEMORY_MOVE_COST (GET_MODE (web->orig_x), web->regclass, 1); store_cost = 1 + MEMORY_MOVE_COST (GET_MODE (web->orig_x), web->regclass, 0); } /* We create only loads at deaths, whose number is in span_deaths. */ num_loads = MIN (web->span_deaths, web->num_uses); for (w = 0, i = 0; i < web->num_uses; i++) w += DF_REF_BB (web->uses[i])->frequency + 1; if (num_loads < web->num_uses) w = (w * num_loads + web->num_uses - 1) / web->num_uses; web->spill_cost = w * load_cost; if (store_cost) { for (w = 0, i = 0; i < web->num_defs; i++) w += DF_REF_BB (web->defs[i])->frequency + 1; web->spill_cost += w * store_cost; } web->orig_spill_cost = web->spill_cost; } } /* Detect webs which are set in a conditional jump insn (possibly a decrement-and-branch type of insn), and mark them not to be spillable. The stores for them would need to be placed on edges, which destroys the CFG. (Somewhen we want to deal with that XXX) */ static void detect_webs_set_in_cond_jump (void) { basic_block bb; FOR_EACH_BB (bb) if (GET_CODE (BB_END (bb)) == JUMP_INSN) { struct df_link *link; for (link = DF_INSN_DEFS (df, BB_END (bb)); link; link = link->next) if (link->ref && DF_REF_REGNO (link->ref) >= FIRST_PSEUDO_REGISTER) { struct web *web = def2web[DF_REF_ID (link->ref)]; web->orig_spill_temp = web->spill_temp = 3; } } } /* Second top-level function of this file. Converts the connected web parts to full webs. This means, it allocates all webs, and initializes all fields, including detecting spill temporaries. It does not distribute moves to their corresponding webs, though. */ static void make_webs (struct df *df) { /* First build all the webs itself. They are not related with others yet. */ parts_to_webs (df); /* Now detect spill temporaries to initialize their usable_regs set. */ detect_spill_temps (); detect_webs_set_in_cond_jump (); /* And finally relate them to each other, meaning to record all possible conflicts between webs (see the comment there). */ conflicts_between_webs (df); detect_remat_webs (); determine_web_costs (); } /* Distribute moves to the corresponding webs. */ static void moves_to_webs (struct df *df) { struct df_link *link; struct move_list *ml; /* Distribute all moves to their corresponding webs, making sure, each move is in a web maximally one time (happens on some strange insns). */ for (ml = wl_moves; ml; ml = ml->next) { struct move *m = ml->move; struct web *web; struct move_list *newml; if (!m) continue; m->type = WORKLIST; m->dlink = NULL; /* Multiple defs/uses can happen in moves involving hard-regs in a wider mode. For those df.* creates use/def references for each real hard-reg involved. For coalescing we are interested in the smallest numbered hard-reg. */ for (link = DF_INSN_DEFS (df, m->insn); link; link = link->next) if (link->ref) { web = def2web[DF_REF_ID (link->ref)]; web = find_web_for_subweb (web); if (!m->target_web || web->regno < m->target_web->regno) m->target_web = web; } for (link = DF_INSN_USES (df, m->insn); link; link = link->next) if (link->ref) { web = use2web[DF_REF_ID (link->ref)]; web = find_web_for_subweb (web); if (!m->source_web || web->regno < m->source_web->regno) m->source_web = web; } if (m->source_web && m->target_web /* If the usable_regs don't intersect we can't coalesce the two webs anyway, as this is no simple copy insn (it might even need an intermediate stack temp to execute this "copy" insn). */ && hard_regs_intersect_p (&m->source_web->usable_regs, &m->target_web->usable_regs)) { if (!flag_ra_optimistic_coalescing) { struct move_list *test = m->source_web->moves; for (; test && test->move != m; test = test->next); if (! test) { newml = ra_alloc (sizeof (struct move_list)); newml->move = m; newml->next = m->source_web->moves; m->source_web->moves = newml; } test = m->target_web->moves; for (; test && test->move != m; test = test->next); if (! test) { newml = ra_alloc (sizeof (struct move_list)); newml->move = m; newml->next = m->target_web->moves; m->target_web->moves = newml; } } } else /* Delete this move. */ ml->move = NULL; } } /* Handle tricky asm insns. Supposed to create conflicts to hardregs which aren't allowed in the constraints. Doesn't actually do that, as it might confuse and constrain the allocator too much. */ static void handle_asm_insn (struct df *df, rtx insn) { const char *constraints[MAX_RECOG_OPERANDS]; enum machine_mode operand_mode[MAX_RECOG_OPERANDS]; int i, noperands, in_output; HARD_REG_SET clobbered, allowed, conflict; rtx pat; if (! INSN_P (insn) || (noperands = asm_noperands (PATTERN (insn))) < 0) return; pat = PATTERN (insn); CLEAR_HARD_REG_SET (clobbered); if (GET_CODE (pat) == PARALLEL) for (i = 0; i < XVECLEN (pat, 0); i++) { rtx t = XVECEXP (pat, 0, i); if (GET_CODE (t) == CLOBBER && REG_P (XEXP (t, 0)) && REGNO (XEXP (t, 0)) < FIRST_PSEUDO_REGISTER) SET_HARD_REG_BIT (clobbered, REGNO (XEXP (t, 0))); } decode_asm_operands (pat, recog_data.operand, recog_data.operand_loc, constraints, operand_mode); in_output = 1; for (i = 0; i < noperands; i++) { const char *p = constraints[i]; int cls = (int) NO_REGS; struct df_link *link; rtx reg; struct web *web; int nothing_allowed = 1; reg = recog_data.operand[i]; /* Look, if the constraints apply to a pseudo reg, and not to e.g. a mem. */ while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT || GET_CODE (reg) == SIGN_EXTRACT || GET_CODE (reg) == STRICT_LOW_PART) reg = XEXP (reg, 0); if (!REG_P (reg) || REGNO (reg) < FIRST_PSEUDO_REGISTER) continue; /* Search the web corresponding to this operand. We depend on that decode_asm_operands() places the output operands before the input operands. */ while (1) { if (in_output) link = df->insns[INSN_UID (insn)].defs; else link = df->insns[INSN_UID (insn)].uses; while (link && link->ref && DF_REF_REAL_REG (link->ref) != reg) link = link->next; if (!link || !link->ref) { if (in_output) in_output = 0; else abort (); } else break; } if (in_output) web = def2web[DF_REF_ID (link->ref)]; else web = use2web[DF_REF_ID (link->ref)]; reg = DF_REF_REG (link->ref); /* Find the constraints, noting the allowed hardregs in allowed. */ CLEAR_HARD_REG_SET (allowed); while (1) { char c = *p; if (c == '\0' || c == ',' || c == '#') { /* End of one alternative - mark the regs in the current class, and reset the class. */ p++; IOR_HARD_REG_SET (allowed, reg_class_contents[cls]); if (cls != NO_REGS) nothing_allowed = 0; cls = NO_REGS; if (c == '#') do { c = *p++; } while (c != '\0' && c != ','); if (c == '\0') break; continue; } switch (c) { case '=': case '+': case '*': case '%': case '?': case '!': case '0': case '1': case '2': case '3': case '4': case 'm': case '<': case '>': case 'V': case 'o': case '&': case 'E': case 'F': case 's': case 'i': case 'n': case 'X': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': break; case 'p': cls = (int) reg_class_subunion[cls][(int) BASE_REG_CLASS]; nothing_allowed = 0; break; case 'g': case 'r': cls = (int) reg_class_subunion[cls][(int) GENERAL_REGS]; nothing_allowed = 0; break; default: cls = (int) reg_class_subunion[cls][(int) REG_CLASS_FROM_CONSTRAINT (c, p)]; } p += CONSTRAINT_LEN (c, p); } /* Now make conflicts between this web, and all hardregs, which are not allowed by the constraints. */ if (nothing_allowed) { /* If we had no real constraints nothing was explicitly allowed, so we allow the whole class (i.e. we make no additional conflicts). */ CLEAR_HARD_REG_SET (conflict); } else { COPY_HARD_REG_SET (conflict, usable_regs [reg_preferred_class (web->regno)]); IOR_HARD_REG_SET (conflict, usable_regs [reg_alternate_class (web->regno)]); AND_COMPL_HARD_REG_SET (conflict, allowed); /* We can't yet establish these conflicts. Reload must go first (or better said, we must implement some functionality of reload). E.g. if some operands must match, and they need the same color we don't see yet, that they do not conflict (because they match). For us it looks like two normal references with different DEFs, so they conflict, and as they both need the same color, the graph becomes uncolorable. */ #if 0 for (c = 0; c < FIRST_PSEUDO_REGISTER; c++) if (TEST_HARD_REG_BIT (conflict, c)) record_conflict (web, hardreg2web[c]); #endif } if (dump_file) { int c; ra_debug_msg (DUMP_ASM, " ASM constrain Web %d conflicts with:", web->id); for (c = 0; c < FIRST_PSEUDO_REGISTER; c++) if (TEST_HARD_REG_BIT (conflict, c)) ra_debug_msg (DUMP_ASM, " %d", c); ra_debug_msg (DUMP_ASM, "\n"); } } } /* The real toplevel function in this file. Build (or rebuilds) the complete interference graph with webs and conflicts. */ void build_i_graph (struct df *df) { rtx insn; init_web_parts (df); sbitmap_zero (move_handled); wl_moves = NULL; build_web_parts_and_conflicts (df); /* For read-modify-write instructions we may have created two webs. Reconnect them here. (s.a.) */ connect_rmw_web_parts (df); /* The webs are conceptually complete now, but still scattered around as connected web parts. Collect all information and build the webs including all conflicts between webs (instead web parts). */ make_webs (df); moves_to_webs (df); /* Look for additional constraints given by asms. */ for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) handle_asm_insn (df, insn); } /* Allocates or reallocates most memory for the interference graph and associated structures. If it reallocates memory (meaning, this is not the first pass), this also changes some structures to reflect the additional entries in various array, and the higher number of defs and uses. */ void ra_build_realloc (struct df *df) { struct web_part *last_web_parts = web_parts; struct web **last_def2web = def2web; struct web **last_use2web = use2web; sbitmap last_live_over_abnormal = live_over_abnormal; unsigned int i; struct dlist *d; move_handled = sbitmap_alloc (get_max_uid () ); web_parts = xcalloc (df->def_id + df->use_id, sizeof web_parts[0]); def2web = xcalloc (df->def_id + df->use_id, sizeof def2web[0]); use2web = &def2web[df->def_id]; live_over_abnormal = sbitmap_alloc (df->use_id); sbitmap_zero (live_over_abnormal); /* First go through all old defs and uses. */ for (i = 0; i < last_def_id + last_use_id; i++) { /* And relocate them to the new array. This is made ugly by the fact, that defs and uses are placed consecutive into one array. */ struct web_part *dest = &web_parts[i < last_def_id ? i : (df->def_id + i - last_def_id)]; struct web_part *up; *dest = last_web_parts[i]; up = dest->uplink; dest->uplink = NULL; /* Also relocate the uplink to point into the new array. */ if (up && up->ref) { unsigned int id = DF_REF_ID (up->ref); if (up < &last_web_parts[last_def_id]) { if (df->defs[id]) dest->uplink = &web_parts[DF_REF_ID (up->ref)]; } else if (df->uses[id]) dest->uplink = &web_parts[df->def_id + DF_REF_ID (up->ref)]; } } /* Also set up the def2web and use2web arrays, from the last pass.i Remember also the state of live_over_abnormal. */ for (i = 0; i < last_def_id; i++) { struct web *web = last_def2web[i]; if (web) { web = find_web_for_subweb (web); if (web->type != FREE && web->type != PRECOLORED) def2web[i] = last_def2web[i]; } } for (i = 0; i < last_use_id; i++) { struct web *web = last_use2web[i]; if (web) { web = find_web_for_subweb (web); if (web->type != FREE && web->type != PRECOLORED) use2web[i] = last_use2web[i]; } if (TEST_BIT (last_live_over_abnormal, i)) SET_BIT (live_over_abnormal, i); } /* We don't have any subwebs for now. Somewhen we might want to remember them too, instead of recreating all of them every time. The problem is, that which subwebs we need, depends also on what other webs and subwebs exist, and which conflicts are there. OTOH it should be no problem, if we had some more subwebs than strictly needed. Later. */ for (d = WEBS(FREE); d; d = d->next) { struct web *web = DLIST_WEB (d); struct web *wnext; for (web = web->subreg_next; web; web = wnext) { wnext = web->subreg_next; free (web); } DLIST_WEB (d)->subreg_next = NULL; } /* The uses we anyway are going to check, are not yet live over an abnormal edge. In fact, they might actually not anymore, due to added loads. */ if (last_check_uses) sbitmap_difference (live_over_abnormal, live_over_abnormal, last_check_uses); if (last_def_id || last_use_id) { sbitmap_free (last_live_over_abnormal); free (last_web_parts); free (last_def2web); } if (!last_max_uid) { /* Setup copy cache, for copy_insn_p (). */ copy_cache = xcalloc (get_max_uid (), sizeof (copy_cache[0])); init_bb_info (); } else { copy_cache = xrealloc (copy_cache, get_max_uid () * sizeof (copy_cache[0])); memset (©_cache[last_max_uid], 0, (get_max_uid () - last_max_uid) * sizeof (copy_cache[0])); } } /* Free up/clear some memory, only needed for one pass. */ void ra_build_free (void) { struct dlist *d; unsigned int i; /* Clear the moves associated with a web (we also need to look into subwebs here). */ for (i = 0; i < num_webs; i++) { struct web *web = ID2WEB (i); if (!web) abort (); if (i >= num_webs - num_subwebs && (web->conflict_list || web->orig_conflict_list)) abort (); web->moves = NULL; } /* All webs in the free list have no defs or uses anymore. */ for (d = WEBS(FREE); d; d = d->next) { struct web *web = DLIST_WEB (d); if (web->defs) free (web->defs); web->defs = NULL; if (web->uses) free (web->uses); web->uses = NULL; /* We can't free the subwebs here, as they are referenced from def2web[], and possibly needed in the next ra_build_realloc(). We free them there (or in free_all_mem()). */ } /* Free all conflict bitmaps from web parts. Note that we clear _all_ these conflicts, and don't rebuild them next time for uses which aren't rechecked. This mean, that those conflict bitmaps only contain the incremental information. The cumulative one is still contained in the edges of the I-graph, i.e. in conflict_list (or orig_conflict_list) of the webs. */ for (i = 0; i < df->def_id + df->use_id; i++) { struct tagged_conflict *cl; for (cl = web_parts[i].sub_conflicts; cl; cl = cl->next) { if (cl->conflicts) BITMAP_XFREE (cl->conflicts); } web_parts[i].sub_conflicts = NULL; } wl_moves = NULL; free (id2web); free (move_handled); sbitmap_free (sup_igraph); sbitmap_free (igraph); } /* Free all memory for the interference graph structures. */ void ra_build_free_all (struct df *df) { unsigned int i; free_bb_info_ra (); free (copy_cache); copy_cache = NULL; for (i = 0; i < df->def_id + df->use_id; i++) { struct tagged_conflict *cl; for (cl = web_parts[i].sub_conflicts; cl; cl = cl->next) { if (cl->conflicts) BITMAP_XFREE (cl->conflicts); } web_parts[i].sub_conflicts = NULL; } sbitmap_free (live_over_abnormal); free (web_parts); web_parts = NULL; if (last_check_uses) sbitmap_free (last_check_uses); last_check_uses = NULL; free (def2web); use2web = NULL; def2web = NULL; } /* Type information for ra-build.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_ra_build_h[] = { { &remat_test_insn, 1, sizeof (remat_test_insn), >_ggc_mx_rtx_def, >_pch_nx_rtx_def }, LAST_GGC_ROOT_TAB }; /* vim:cinoptions={.5s,g0,p5,t0,(0,^-0.5s,n-0.5s:tw=78:cindent:sw=4: */ /* Graph coloring register allocator Copyright (C) 2001, 2002, 2004 Free Software Foundation, Inc. Contributed by Michael Matz and Daniel Berlin . This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is part of the graph coloring register allocator. It contains the graph colorizer. Given an interference graph as set up in ra-build.c the toplevel function in this file (ra_colorize_graph) colorizes the graph, leaving a list of colored, coalesced and spilled nodes. The algorithm used is a merge of George & Appels iterative coalescing and optimistic coalescing, switchable at runtime. The current default is "optimistic coalescing +", which is based on the normal Briggs/Cooper framework. We can also use biased coloring. Most of the structure here follows the different papers. Additionally there is a custom step to locally improve the overall spill cost of the colored graph (recolor_spills). */ static void push_list (struct dlist *, struct dlist **); static void push_list_end (struct dlist *, struct dlist **); static void free_dlist (struct dlist **); static void put_web_at_end (struct web *, enum node_type); static void put_move (struct move *, enum move_type); static void build_worklists (struct df *); static void enable_move (struct web *); static void decrement_degree (struct web *, int); static void simplify (void); static void remove_move_1 (struct web *, struct move *); static void remove_move (struct web *, struct move *); static void add_worklist (struct web *); static int ok (struct web *, struct web *); static int conservative (struct web *, struct web *); static inline unsigned int simplify_p (enum node_type); static void combine (struct web *, struct web *); static void coalesce (void); static void freeze_moves (struct web *); static void freeze (void); static void select_spill (void); static int color_usable_p (int, HARD_REG_SET, HARD_REG_SET, enum machine_mode); int get_free_reg (HARD_REG_SET, HARD_REG_SET, enum machine_mode); static int get_biased_reg (HARD_REG_SET, HARD_REG_SET, HARD_REG_SET, HARD_REG_SET, enum machine_mode); static int count_long_blocks (HARD_REG_SET, int); static char * hardregset_to_string (HARD_REG_SET); static void calculate_dont_begin (struct web *, HARD_REG_SET *); static void colorize_one_web (struct web *, int); static void assign_colors (void); static void try_recolor_web (struct web *); static void insert_coalesced_conflicts (void); static int comp_webs_maxcost (const void *, const void *); static void recolor_spills (void); static void check_colors (void); static void restore_conflicts_from_coalesce (struct web *); static void break_coalesced_spills (void); static void unalias_web (struct web *); static void break_aliases_to_web (struct web *); static void break_precolored_alias (struct web *); static void init_web_pairs (void); static void add_web_pair_cost (struct web *, struct web *, unsigned HOST_WIDE_INT, unsigned int); static int comp_web_pairs (const void *, const void *); static void sort_and_combine_web_pairs (int); static void aggressive_coalesce (void); static void extended_coalesce_2 (void); static void check_uncoalesced_moves (void); static struct dlist *mv_worklist, *mv_coalesced, *mv_constrained; static struct dlist *mv_frozen, *mv_active; /* Push a node onto the front of the list. */ static void push_list (struct dlist *x, struct dlist **list) { if (x->next || x->prev) abort (); x->next = *list; if (*list) (*list)->prev = x; *list = x; } static void push_list_end (struct dlist *x, struct dlist **list) { if (x->prev || x->next) abort (); if (!*list) { *list = x; return; } while ((*list)->next) list = &((*list)->next); x->prev = *list; (*list)->next = x; } /* Remove a node from the list. */ void remove_list (struct dlist *x, struct dlist **list) { struct dlist *y = x->prev; if (y) y->next = x->next; else *list = x->next; y = x->next; if (y) y->prev = x->prev; x->next = x->prev = NULL; } /* Pop the front of the list. */ struct dlist * pop_list (struct dlist **list) { struct dlist *r = *list; if (r) remove_list (r, list); return r; } /* Free the given double linked list. */ static void free_dlist (struct dlist **list) { *list = NULL; } /* The web WEB should get the given new TYPE. Put it onto the appropriate list. Inline, because it's called with constant TYPE every time. */ void put_web (struct web *web, enum node_type type) { switch (type) { case INITIAL: case FREE: case FREEZE: case SPILL: case SPILLED: case COALESCED: case COLORED: case SELECT: push_list (web->dlink, &WEBS(type)); break; case PRECOLORED: push_list (web->dlink, &WEBS(INITIAL)); break; case SIMPLIFY: if (web->spill_temp) push_list (web->dlink, &WEBS(type = SIMPLIFY_SPILL)); else if (web->add_hardregs) push_list (web->dlink, &WEBS(type = SIMPLIFY_FAT)); else push_list (web->dlink, &WEBS(SIMPLIFY)); break; default: abort (); } web->type = type; } /* After we are done with the whole pass of coloring/spilling, we reset the lists of webs, in preparation of the next pass. The spilled webs become free, colored webs go to the initial list, coalesced webs become free or initial, according to what type of web they are coalesced to. */ void reset_lists (void) { struct dlist *d; unsigned int i; if (WEBS(SIMPLIFY) || WEBS(SIMPLIFY_SPILL) || WEBS(SIMPLIFY_FAT) || WEBS(FREEZE) || WEBS(SPILL) || WEBS(SELECT)) abort (); while ((d = pop_list (&WEBS(COALESCED))) != NULL) { struct web *web = DLIST_WEB (d); struct web *aweb = alias (web); /* Note, how alias() becomes invalid through the two put_web()'s below. It might set the type of a web to FREE (from COALESCED), which itself is a target of aliasing (i.e. in the middle of an alias chain). We can handle this by checking also for type == FREE. Note nevertheless, that alias() is invalid henceforth. */ if (aweb->type == SPILLED || aweb->type == FREE) put_web (web, FREE); else put_web (web, INITIAL); } while ((d = pop_list (&WEBS(SPILLED))) != NULL) put_web (DLIST_WEB (d), FREE); while ((d = pop_list (&WEBS(COLORED))) != NULL) put_web (DLIST_WEB (d), INITIAL); /* All free webs have no conflicts anymore. */ for (d = WEBS(FREE); d; d = d->next) { struct web *web = DLIST_WEB (d); BITMAP_XFREE (web->useless_conflicts); web->useless_conflicts = NULL; } /* Sanity check, that we only have free, initial or precolored webs. */ for (i = 0; i < num_webs; i++) { struct web *web = ID2WEB (i); if (web->type != INITIAL && web->type != FREE && web->type != PRECOLORED) abort (); } free_dlist (&mv_worklist); free_dlist (&mv_coalesced); free_dlist (&mv_constrained); free_dlist (&mv_frozen); free_dlist (&mv_active); } /* Similar to put_web(), but add the web to the end of the appropriate list. Additionally TYPE may not be SIMPLIFY. */ static void put_web_at_end (struct web *web, enum node_type type) { if (type == PRECOLORED) type = INITIAL; else if (type == SIMPLIFY) abort (); push_list_end (web->dlink, &WEBS(type)); web->type = type; } /* Unlink WEB from the list it's currently on (which corresponds to its current type). */ void remove_web_from_list (struct web *web) { if (web->type == PRECOLORED) remove_list (web->dlink, &WEBS(INITIAL)); else remove_list (web->dlink, &WEBS(web->type)); } /* Give MOVE the TYPE, and link it into the correct list. */ static inline void put_move (struct move *move, enum move_type type) { switch (type) { case WORKLIST: push_list (move->dlink, &mv_worklist); break; case MV_COALESCED: push_list (move->dlink, &mv_coalesced); break; case CONSTRAINED: push_list (move->dlink, &mv_constrained); break; case FROZEN: push_list (move->dlink, &mv_frozen); break; case ACTIVE: push_list (move->dlink, &mv_active); break; default: abort (); } move->type = type; } /* Build the worklists we are going to process. */ static void build_worklists (struct df *df ATTRIBUTE_UNUSED) { struct dlist *d, *d_next; struct move_list *ml; /* If we are not the first pass, put all stackwebs (which are still backed by a new pseudo, but conceptually can stand for a stackslot, i.e. it doesn't really matter if they get a color or not), on the SELECT stack first, those with lowest cost first. This way they will be colored last, so do not constrain the coloring of the normal webs. But still those with the highest count are colored before, i.e. get a color more probable. The use of stackregs is a pure optimization, and all would work, if we used real stackslots from the begin. */ if (ra_pass > 1) { unsigned int i, num, max_num; struct web **order2web; max_num = num_webs - num_subwebs; order2web = xmalloc (max_num * sizeof (order2web[0])); for (i = 0, num = 0; i < max_num; i++) if (id2web[i]->regno >= max_normal_pseudo) order2web[num++] = id2web[i]; if (num) { qsort (order2web, num, sizeof (order2web[0]), comp_webs_maxcost); for (i = num - 1;; i--) { struct web *web = order2web[i]; struct conflict_link *wl; remove_list (web->dlink, &WEBS(INITIAL)); put_web (web, SELECT); for (wl = web->conflict_list; wl; wl = wl->next) { struct web *pweb = wl->t; pweb->num_conflicts -= 1 + web->add_hardregs; } if (i == 0) break; } } free (order2web); } /* For all remaining initial webs, classify them. */ for (d = WEBS(INITIAL); d; d = d_next) { struct web *web = DLIST_WEB (d); d_next = d->next; if (web->type == PRECOLORED) continue; remove_list (d, &WEBS(INITIAL)); if (web->num_conflicts >= NUM_REGS (web)) put_web (web, SPILL); else if (web->moves) put_web (web, FREEZE); else put_web (web, SIMPLIFY); } /* And put all moves on the worklist for iterated coalescing. Note, that if iterated coalescing is off, then wl_moves doesn't contain any moves. */ for (ml = wl_moves; ml; ml = ml->next) if (ml->move) { struct move *m = ml->move; d = ra_calloc (sizeof (struct dlist)); DLIST_MOVE (d) = m; m->dlink = d; put_move (m, WORKLIST); } } /* Enable the active moves, in which WEB takes part, to be processed. */ static void enable_move (struct web *web) { struct move_list *ml; for (ml = web->moves; ml; ml = ml->next) if (ml->move->type == ACTIVE) { remove_list (ml->move->dlink, &mv_active); put_move (ml->move, WORKLIST); } } /* Decrement the degree of node WEB by the amount DEC. Possibly change the type of WEB, if the number of conflicts is now smaller than its freedom. */ static void decrement_degree (struct web *web, int dec) { int before = web->num_conflicts; web->num_conflicts -= dec; if (web->num_conflicts < NUM_REGS (web) && before >= NUM_REGS (web)) { struct conflict_link *a; enable_move (web); for (a = web->conflict_list; a; a = a->next) { struct web *aweb = a->t; if (aweb->type != SELECT && aweb->type != COALESCED) enable_move (aweb); } if (web->type != FREEZE) { remove_web_from_list (web); if (web->moves) put_web (web, FREEZE); else put_web (web, SIMPLIFY); } } } /* Repeatedly simplify the nodes on the simplify worklists. */ static void simplify (void) { struct dlist *d; struct web *web; struct conflict_link *wl; while (1) { /* We try hard to color all the webs resulting from spills first. Without that on register starved machines (x86 e.g) with some live DImode pseudos, -fPIC, and an asm requiring %edx, it might be, that we do rounds over rounds, because the conflict graph says, we can simplify those short webs, but later due to irregularities we can't color those pseudos. So we have to spill them, which in later rounds leads to other spills. */ d = pop_list (&WEBS(SIMPLIFY)); if (!d) d = pop_list (&WEBS(SIMPLIFY_FAT)); if (!d) d = pop_list (&WEBS(SIMPLIFY_SPILL)); if (!d) break; web = DLIST_WEB (d); ra_debug_msg (DUMP_PROCESS, " simplifying web %3d, conflicts = %d\n", web->id, web->num_conflicts); put_web (web, SELECT); for (wl = web->conflict_list; wl; wl = wl->next) { struct web *pweb = wl->t; if (pweb->type != SELECT && pweb->type != COALESCED) { decrement_degree (pweb, 1 + web->add_hardregs); } } } } /* Helper function to remove a move from the movelist of the web. */ static void remove_move_1 (struct web *web, struct move *move) { struct move_list *ml = web->moves; if (!ml) return; if (ml->move == move) { web->moves = ml->next; return; } for (; ml->next && ml->next->move != move; ml = ml->next) ; if (!ml->next) return; ml->next = ml->next->next; } /* Remove a move from the movelist of the web. Actually this is just a wrapper around remove_move_1(), making sure, the removed move really is not in the list anymore. */ static void remove_move (struct web *web, struct move *move) { struct move_list *ml; remove_move_1 (web, move); for (ml = web->moves; ml; ml = ml->next) if (ml->move == move) abort (); } /* Merge the moves for the two webs into the first web's movelist. */ void merge_moves (struct web *u, struct web *v) { regset seen; struct move_list *ml, *ml_next; seen = BITMAP_XMALLOC (); for (ml = u->moves; ml; ml = ml->next) bitmap_set_bit (seen, INSN_UID (ml->move->insn)); for (ml = v->moves; ml; ml = ml_next) { ml_next = ml->next; if (! bitmap_bit_p (seen, INSN_UID (ml->move->insn))) { ml->next = u->moves; u->moves = ml; } } BITMAP_XFREE (seen); v->moves = NULL; } /* Add a web to the simplify worklist, from the freeze worklist. */ static void add_worklist (struct web *web) { if (web->type != PRECOLORED && !web->moves && web->num_conflicts < NUM_REGS (web)) { remove_list (web->dlink, &WEBS(FREEZE)); put_web (web, SIMPLIFY); } } /* Precolored node coalescing heuristic. */ static int ok (struct web *target, struct web *source) { struct conflict_link *wl; int i; int color = source->color; int size; /* Normally one would think, the next test wouldn't be needed. We try to coalesce S and T, and S has already a color, and we checked when processing the insns, that both have the same mode. So naively we could conclude, that of course that mode was valid for this color. Hah. But there is sparc. Before reload there are copy insns (e.g. the ones copying arguments to locals) which happily refer to colors in invalid modes. We can't coalesce those things. */ if (! HARD_REGNO_MODE_OK (source->color, GET_MODE (target->orig_x))) return 0; /* Sanity for funny modes. */ size = hard_regno_nregs[color][GET_MODE (target->orig_x)]; if (!size) return 0; /* We can't coalesce target with a precolored register which isn't in usable_regs. */ for (i = size; i--;) if (TEST_HARD_REG_BIT (never_use_colors, color + i) || !TEST_HARD_REG_BIT (target->usable_regs, color + i) /* Before usually calling ok() at all, we already test, if the candidates conflict in sup_igraph. But when wide webs are coalesced to hardregs, we only test the hardweb coalesced into. This is only the begin color. When actually coalescing both, it will also take the following size colors, i.e. their webs. We nowhere checked if the candidate possibly conflicts with one of _those_, which is possible with partial conflicts, so we simply do it here (this does one bit-test more than necessary, the first color). Note, that if X is precolored bit [X*num_webs + Y] can't be set (see add_conflict_edge()). */ || TEST_BIT (sup_igraph, target->id * num_webs + hardreg2web[color + i]->id)) return 0; for (wl = target->conflict_list; wl; wl = wl->next) { struct web *pweb = wl->t; if (pweb->type == SELECT || pweb->type == COALESCED) continue; /* Coalescing target (T) and source (S) is o.k, if for all conflicts C of T it is true, that: 1) C will be colored, or 2) C is a hardreg (precolored), or 3) C already conflicts with S too, or 4) a web which contains C conflicts already with S. XXX: we handle here only the special case of 4), that C is a subreg, and the containing thing is the reg itself, i.e. we dont handle the situation, were T conflicts with (subreg:SI x 1), and S conflicts with (subreg:DI x 0), which would be allowed also, as the S-conflict overlaps the T-conflict. So, we first test the whole web for any of these conditions, and continue with the next C, if 1, 2 or 3 is true. */ if (pweb->num_conflicts < NUM_REGS (pweb) || pweb->type == PRECOLORED || TEST_BIT (igraph, igraph_index (source->id, pweb->id)) ) continue; /* This is reached, if not one of 1, 2 or 3 was true. In the case C has no subwebs, 4 can't be true either, so we can't coalesce S and T. */ if (wl->sub == NULL) return 0; else { /* The main webs do _not_ conflict, only some parts of both. This means, that 4 is possibly true, so we need to check this too. For this we go through all sub conflicts between T and C, and see if the target part of C already conflicts with S. When this is not the case we disallow coalescing. */ struct sub_conflict *sl; for (sl = wl->sub; sl; sl = sl->next) { if (!TEST_BIT (igraph, igraph_index (source->id, sl->t->id))) return 0; } } } return 1; } /* Non-precolored node coalescing heuristic. */ static int conservative (struct web *target, struct web *source) { unsigned int k; unsigned int loop; regset seen; struct conflict_link *wl; unsigned int num_regs = NUM_REGS (target); /* XXX */ /* k counts the resulting conflict weight, if target and source would be merged, and all low-degree neighbors would be removed. */ k = 0 * MAX (target->add_hardregs, source->add_hardregs); seen = BITMAP_XMALLOC (); for (loop = 0; loop < 2; loop++) for (wl = ((loop == 0) ? target : source)->conflict_list; wl; wl = wl->next) { struct web *pweb = wl->t; if (pweb->type != SELECT && pweb->type != COALESCED && pweb->num_conflicts >= NUM_REGS (pweb) && ! REGNO_REG_SET_P (seen, pweb->id)) { SET_REGNO_REG_SET (seen, pweb->id); k += 1 + pweb->add_hardregs; } } BITMAP_XFREE (seen); if (k >= num_regs) return 0; return 1; } /* If the web is coalesced, return it's alias. Otherwise, return what was passed in. */ struct web * alias (struct web *web) { while (web->type == COALESCED) web = web->alias; return web; } /* Returns nonzero, if the TYPE belongs to one of those representing SIMPLIFY types. */ static inline unsigned int simplify_p (enum node_type type) { return type == SIMPLIFY || type == SIMPLIFY_SPILL || type == SIMPLIFY_FAT; } /* Actually combine two webs, that can be coalesced. */ static void combine (struct web *u, struct web *v) { int i; struct conflict_link *wl; if (u == v || v->type == COALESCED) abort (); if ((u->regno >= max_normal_pseudo) != (v->regno >= max_normal_pseudo)) abort (); remove_web_from_list (v); put_web (v, COALESCED); v->alias = u; u->is_coalesced = 1; v->is_coalesced = 1; u->num_aliased += 1 + v->num_aliased; if (flag_ra_merge_spill_costs && u->type != PRECOLORED) u->spill_cost += v->spill_cost; /*u->spill_cost = MAX (u->spill_cost, v->spill_cost);*/ merge_moves (u, v); /* combine add_hardregs's of U and V. */ for (wl = v->conflict_list; wl; wl = wl->next) { struct web *pweb = wl->t; /* We don't strictly need to move conflicts between webs which are already coalesced or selected, if we do iterated coalescing, or better if we need not to be able to break aliases again. I.e. normally we would use the condition (pweb->type != SELECT && pweb->type != COALESCED). But for now we simply merge all conflicts. It doesn't take that much time. */ if (1) { struct web *web = u; int nregs = 1 + v->add_hardregs; if (u->type == PRECOLORED) nregs = hard_regno_nregs[u->color][GET_MODE (v->orig_x)]; /* For precolored U's we need to make conflicts between V's neighbors and as many hardregs from U as V needed if it gets color U. For now we approximate this by V->add_hardregs, which could be too much in multi-length classes. We should really count how many hardregs are needed for V with color U. When U isn't precolored this loop breaks out after one iteration. */ for (i = 0; i < nregs; i++) { if (u->type == PRECOLORED) web = hardreg2web[i + u->color]; if (wl->sub == NULL) record_conflict (web, pweb); else { struct sub_conflict *sl; /* So, between V and PWEB there are sub_conflicts. We need to relocate those conflicts to be between WEB (== U when it wasn't precolored) and PWEB. In the case only a part of V conflicted with (part of) PWEB we nevertheless make the new conflict between the whole U and the (part of) PWEB. Later we might try to find in U the correct subpart corresponding (by size and offset) to the part of V (sl->s) which was the source of the conflict. */ for (sl = wl->sub; sl; sl = sl->next) { /* Beware: sl->s is no subweb of web (== U) but of V. We try to search a corresponding subpart of U. If we found none we let it conflict with the whole U. Note that find_subweb() only looks for mode and subreg_byte of the REG rtx but not for the pseudo reg number (otherwise it would be guaranteed to _not_ find any subpart). */ struct web *sweb = NULL; if (SUBWEB_P (sl->s)) sweb = find_subweb (web, sl->s->orig_x); if (!sweb) sweb = web; record_conflict (sweb, sl->t); } } if (u->type != PRECOLORED) break; } if (pweb->type != SELECT && pweb->type != COALESCED) decrement_degree (pweb, 1 + v->add_hardregs); } } /* Now merge the usable_regs together. */ /* XXX That merging might normally make it necessary to adjust add_hardregs, which also means to adjust neighbors. This can result in making some more webs trivially colorable, (or the opposite, if this increases our add_hardregs). Because we intersect the usable_regs it should only be possible to decrease add_hardregs. So a conservative solution for now is to simply don't change it. */ u->use_my_regs = 1; AND_HARD_REG_SET (u->usable_regs, v->usable_regs); u->regclass = reg_class_subunion[u->regclass][v->regclass]; /* Count number of possible hardregs. This might make U a spillweb, but that could also happen, if U and V together had too many conflicts. */ u->num_freedom = hard_regs_count (u->usable_regs); u->num_freedom -= u->add_hardregs; /* The next would mean an invalid coalesced move (both webs have no possible hardreg in common), so abort. */ if (!u->num_freedom) abort(); if (u->num_conflicts >= NUM_REGS (u) && (u->type == FREEZE || simplify_p (u->type))) { remove_web_from_list (u); put_web (u, SPILL); } /* We want the most relaxed combination of spill_temp state. I.e. if any was no spilltemp or a spilltemp2, the result is so too, otherwise if any is short, the result is too. It remains, when both are normal spilltemps. */ if (v->spill_temp == 0) u->spill_temp = 0; else if (v->spill_temp == 2 && u->spill_temp != 0) u->spill_temp = 2; else if (v->spill_temp == 3 && u->spill_temp == 1) u->spill_temp = 3; } /* Attempt to coalesce the first thing on the move worklist. This is used only for iterated coalescing. */ static void coalesce (void) { struct dlist *d = pop_list (&mv_worklist); struct move *m = DLIST_MOVE (d); struct web *source = alias (m->source_web); struct web *target = alias (m->target_web); if (target->type == PRECOLORED) { struct web *h = source; source = target; target = h; } if (source == target) { remove_move (source, m); put_move (m, MV_COALESCED); add_worklist (source); } else if (target->type == PRECOLORED || TEST_BIT (sup_igraph, source->id * num_webs + target->id) || TEST_BIT (sup_igraph, target->id * num_webs + source->id)) { remove_move (source, m); remove_move (target, m); put_move (m, CONSTRAINED); add_worklist (source); add_worklist (target); } else if ((source->type == PRECOLORED && ok (target, source)) || (source->type != PRECOLORED && conservative (target, source))) { remove_move (source, m); remove_move (target, m); put_move (m, MV_COALESCED); combine (source, target); add_worklist (source); } else put_move (m, ACTIVE); } /* Freeze the moves associated with the web. Used for iterated coalescing. */ static void freeze_moves (struct web *web) { struct move_list *ml, *ml_next; for (ml = web->moves; ml; ml = ml_next) { struct move *m = ml->move; struct web *src, *dest; ml_next = ml->next; if (m->type == ACTIVE) remove_list (m->dlink, &mv_active); else remove_list (m->dlink, &mv_worklist); put_move (m, FROZEN); remove_move (web, m); src = alias (m->source_web); dest = alias (m->target_web); src = (src == web) ? dest : src; remove_move (src, m); /* XXX GA use the original v, instead of alias(v) */ if (!src->moves && src->num_conflicts < NUM_REGS (src)) { remove_list (src->dlink, &WEBS(FREEZE)); put_web (src, SIMPLIFY); } } } /* Freeze the first thing on the freeze worklist (only for iterated coalescing). */ static void freeze (void) { struct dlist *d = pop_list (&WEBS(FREEZE)); put_web (DLIST_WEB (d), SIMPLIFY); freeze_moves (DLIST_WEB (d)); } /* The current spill heuristic. Returns a number for a WEB. Webs with higher numbers are selected later. */ static unsigned HOST_WIDE_INT (*spill_heuristic) (struct web *); static unsigned HOST_WIDE_INT default_spill_heuristic (struct web *); /* Our default heuristic is similar to spill_cost / num_conflicts. Just scaled for integer arithmetic, and it favors coalesced webs, and webs which span more insns with deaths. */ static unsigned HOST_WIDE_INT default_spill_heuristic (struct web *web) { unsigned HOST_WIDE_INT ret; unsigned int divisor = 1; /* Make coalesce targets cheaper to spill, because they will be broken up again into smaller parts. */ if (flag_ra_break_aliases) divisor += web->num_aliased; divisor += web->num_conflicts; ret = ((web->spill_cost << 8) + divisor - 1) / divisor; /* It is better to spill webs that span more insns (deaths in our case) than other webs with the otherwise same spill_cost. So make them a little bit cheaper. Remember that spill_cost is unsigned. */ if (web->span_deaths < ret) ret -= web->span_deaths; return ret; } /* Select the cheapest spill to be potentially spilled (we don't *actually* spill until we need to). */ static void select_spill (void) { unsigned HOST_WIDE_INT best = (unsigned HOST_WIDE_INT) -1; struct dlist *bestd = NULL; unsigned HOST_WIDE_INT best2 = (unsigned HOST_WIDE_INT) -1; struct dlist *bestd2 = NULL; struct dlist *d; for (d = WEBS(SPILL); d; d = d->next) { struct web *w = DLIST_WEB (d); unsigned HOST_WIDE_INT cost = spill_heuristic (w); if ((!w->spill_temp) && cost < best) { best = cost; bestd = d; } /* Specially marked spill temps can be spilled. Also coalesce targets can. Eventually they will be broken up later in the colorizing process, so if we have nothing better take that. */ else if ((w->spill_temp == 2 || w->is_coalesced) && cost < best2) { best2 = cost; bestd2 = d; } } if (!bestd) { bestd = bestd2; best = best2; } if (!bestd) abort (); /* Note the potential spill. */ DLIST_WEB (bestd)->was_spilled = 1; remove_list (bestd, &WEBS(SPILL)); put_web (DLIST_WEB (bestd), SIMPLIFY); freeze_moves (DLIST_WEB (bestd)); ra_debug_msg (DUMP_PROCESS, " potential spill web %3d, conflicts = %d\n", DLIST_WEB (bestd)->id, DLIST_WEB (bestd)->num_conflicts); } /* Given a set of forbidden colors to begin at, and a set of still free colors, and MODE, returns nonzero of color C is still usable. */ static int color_usable_p (int c, HARD_REG_SET dont_begin_colors, HARD_REG_SET free_colors, enum machine_mode mode) { if (!TEST_HARD_REG_BIT (dont_begin_colors, c) && TEST_HARD_REG_BIT (free_colors, c) && HARD_REGNO_MODE_OK (c, mode)) { int i, size; size = hard_regno_nregs[c][mode]; for (i = 1; i < size && TEST_HARD_REG_BIT (free_colors, c + i); i++); if (i == size) return 1; } return 0; } /* I don't want to clutter up the actual code with ifdef's. */ #ifdef REG_ALLOC_ORDER #define INV_REG_ALLOC_ORDER(c) inv_reg_alloc_order[c] #else #define INV_REG_ALLOC_ORDER(c) c #endif /* Searches in FREE_COLORS for a block of hardregs of the right length for MODE, which doesn't begin at a hardreg mentioned in DONT_BEGIN_COLORS. If it needs more than one hardreg it prefers blocks beginning at an even hardreg, and only gives an odd begin reg if no other block could be found. */ int get_free_reg (HARD_REG_SET dont_begin_colors, HARD_REG_SET free_colors, enum machine_mode mode) { int c; int last_resort_reg = -1; int pref_reg = -1; int pref_reg_order = INT_MAX; int last_resort_reg_order = INT_MAX; for (c = 0; c < FIRST_PSEUDO_REGISTER; c++) if (!TEST_HARD_REG_BIT (dont_begin_colors, c) && TEST_HARD_REG_BIT (free_colors, c) && HARD_REGNO_MODE_OK (c, mode)) { int i, size; size = hard_regno_nregs[c][mode]; for (i = 1; i < size && TEST_HARD_REG_BIT (free_colors, c + i); i++); if (i != size) { c += i; continue; } if (i == size) { if (size < 2 || (c & 1) == 0) { if (INV_REG_ALLOC_ORDER (c) < pref_reg_order) { pref_reg = c; pref_reg_order = INV_REG_ALLOC_ORDER (c); } } else if (INV_REG_ALLOC_ORDER (c) < last_resort_reg_order) { last_resort_reg = c; last_resort_reg_order = INV_REG_ALLOC_ORDER (c); } } else c += i; } return pref_reg >= 0 ? pref_reg : last_resort_reg; } /* Similar to get_free_reg(), but first search in colors provided by BIAS _and_ PREFER_COLORS, then in BIAS alone, then in PREFER_COLORS alone, and only then for any free color. If flag_ra_biased is zero only do the last two steps. */ static int get_biased_reg (HARD_REG_SET dont_begin_colors, HARD_REG_SET bias, HARD_REG_SET prefer_colors, HARD_REG_SET free_colors, enum machine_mode mode) { int c = -1; HARD_REG_SET s; if (flag_ra_biased) { COPY_HARD_REG_SET (s, dont_begin_colors); IOR_COMPL_HARD_REG_SET (s, bias); IOR_COMPL_HARD_REG_SET (s, prefer_colors); c = get_free_reg (s, free_colors, mode); if (c >= 0) return c; COPY_HARD_REG_SET (s, dont_begin_colors); IOR_COMPL_HARD_REG_SET (s, bias); c = get_free_reg (s, free_colors, mode); if (c >= 0) return c; } COPY_HARD_REG_SET (s, dont_begin_colors); IOR_COMPL_HARD_REG_SET (s, prefer_colors); c = get_free_reg (s, free_colors, mode); if (c >= 0) return c; c = get_free_reg (dont_begin_colors, free_colors, mode); return c; } /* Counts the number of non-overlapping bitblocks of length LEN in FREE_COLORS. */ static int count_long_blocks (HARD_REG_SET free_colors, int len) { int i, j; int count = 0; for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) { if (!TEST_HARD_REG_BIT (free_colors, i)) continue; for (j = 1; j < len; j++) if (!TEST_HARD_REG_BIT (free_colors, i + j)) break; /* Bits [i .. i+j-1] are free. */ if (j == len) count++; i += j - 1; } return count; } /* Given a hardreg set S, return a string representing it. Either as 0/1 string, or as hex value depending on the implementation of hardreg sets. Note that this string is statically allocated. */ static char * hardregset_to_string (HARD_REG_SET s) { static char string[/*FIRST_PSEUDO_REGISTER + 30*/1024]; #if FIRST_PSEUDO_REGISTER <= HOST_BITS_PER_WIDE_INT sprintf (string, HOST_WIDE_INT_PRINT_HEX, s); #else char *c = string; int i,j; c += sprintf (c, "{ "); for (i = 0;i < HARD_REG_SET_LONGS; i++) { for (j = 0; j < HOST_BITS_PER_WIDE_INT; j++) c += sprintf (c, "%s", ( 1 << j) & s[i] ? "1" : "0"); c += sprintf (c, "%s", i ? ", " : ""); } c += sprintf (c, " }"); #endif return string; } /* For WEB, look at its already colored neighbors, and calculate the set of hardregs which is not allowed as color for WEB. Place that set int *RESULT. Note that the set of forbidden begin colors is not the same as all colors taken up by neighbors. E.g. suppose two DImode webs, but only the lo-part from one conflicts with the hipart from the other, and suppose the other gets colors 2 and 3 (it needs two SImode hardregs). Now the first can take also color 1 or 2, although in those cases there's a partial overlap. Only 3 can't be used as begin color. */ static void calculate_dont_begin (struct web *web, HARD_REG_SET *result) { struct conflict_link *wl; HARD_REG_SET dont_begin; /* The bits set in dont_begin correspond to the hardregs, at which WEB may not begin. This differs from the set of _all_ hardregs which are taken by WEB's conflicts in the presence of wide webs, where only some parts conflict with others. */ CLEAR_HARD_REG_SET (dont_begin); for (wl = web->conflict_list; wl; wl = wl->next) { struct web *w; struct web *ptarget = alias (wl->t); struct sub_conflict *sl = wl->sub; w = sl ? sl->t : wl->t; while (w) { if (ptarget->type == COLORED || ptarget->type == PRECOLORED) { struct web *source = (sl) ? sl->s : web; unsigned int tsize = hard_regno_nregs[ptarget->color] [GET_MODE (w->orig_x)]; /* ssize is only a first guess for the size. */ unsigned int ssize = hard_regno_nregs[ptarget->color][GET_MODE (source->orig_x)]; unsigned int tofs = 0; unsigned int sofs = 0; /* C1 and C2 can become negative, so unsigned would be wrong. */ int c1, c2; if (SUBWEB_P (w) && GET_MODE_SIZE (GET_MODE (w->orig_x)) >= UNITS_PER_WORD) tofs = (SUBREG_BYTE (w->orig_x) / UNITS_PER_WORD); if (SUBWEB_P (source) && GET_MODE_SIZE (GET_MODE (source->orig_x)) >= UNITS_PER_WORD) sofs = (SUBREG_BYTE (source->orig_x) / UNITS_PER_WORD); c1 = ptarget->color + tofs - sofs - ssize + 1; c2 = ptarget->color + tofs + tsize - 1 - sofs; if (c2 >= 0) { if (c1 < 0) c1 = 0; /* Because ssize was only guessed above, which influenced our begin color (c1), we need adjustment, if for that color another size would be needed. This is done by moving c1 to a place, where the last of sources hardregs does not overlap the first of targets colors. */ while (c1 + sofs + hard_regno_nregs[c1][GET_MODE (source->orig_x)] - 1 < ptarget->color + tofs) c1++; while (c1 > 0 && c1 + sofs + hard_regno_nregs[c1][GET_MODE (source->orig_x)] - 1 > ptarget->color + tofs) c1--; for (; c1 <= c2; c1++) SET_HARD_REG_BIT (dont_begin, c1); } } /* The next if() only gets true, if there was no wl->sub at all, in which case we are only making one go through this loop with W being a whole web. */ if (!sl) break; sl = sl->next; w = sl ? sl->t : NULL; } } COPY_HARD_REG_SET (*result, dont_begin); } /* Try to assign a color to WEB. If HARD if nonzero, we try many tricks to get it one color, including respilling already colored neighbors. We also trie very hard, to not constrain the uncolored non-spill neighbors, which need more hardregs than we. Consider a situation, 2 hardregs free for us (0 and 1), and one of our neighbors needs 2 hardregs, and only conflicts with us. There are 3 hardregs at all. Now a simple minded method might choose 1 as color for us. Then our neighbor has two free colors (0 and 2) as it should, but they are not consecutive, so coloring it later would fail. This leads to nasty problems on register starved machines, so we try to avoid this. */ static void colorize_one_web (struct web *web, int hard) { struct conflict_link *wl; HARD_REG_SET colors, dont_begin; int c = -1; int bestc = -1; int neighbor_needs= 0; struct web *fats_parent = NULL; int num_fat = 0; int long_blocks = 0; int best_long_blocks = -1; HARD_REG_SET fat_colors; HARD_REG_SET bias; CLEAR_HARD_REG_SET (fat_colors); if (web->regno >= max_normal_pseudo) hard = 0; /* First we want to know the colors at which we can't begin. */ calculate_dont_begin (web, &dont_begin); CLEAR_HARD_REG_SET (bias); /* Now setup the set of colors used by our neighbors neighbors, and search the biggest noncolored neighbor. */ neighbor_needs = web->add_hardregs + 1; for (wl = web->conflict_list; wl; wl = wl->next) { struct web *w; struct web *ptarget = alias (wl->t); struct sub_conflict *sl = wl->sub; IOR_HARD_REG_SET (bias, ptarget->bias_colors); w = sl ? sl->t : wl->t; if (ptarget->type != COLORED && ptarget->type != PRECOLORED && !ptarget->was_spilled) while (w) { if (find_web_for_subweb (w)->type != COALESCED && w->add_hardregs >= neighbor_needs) { neighbor_needs = w->add_hardregs; fats_parent = ptarget; num_fat++; } if (!sl) break; sl = sl->next; w = sl ? sl->t : NULL; } } ra_debug_msg (DUMP_COLORIZE, "colorize web %d [don't begin at %s]", web->id, hardregset_to_string (dont_begin)); /* If there are some fat neighbors, remember their usable regs, and how many blocks are free in it for that neighbor. */ if (num_fat) { COPY_HARD_REG_SET (fat_colors, fats_parent->usable_regs); long_blocks = count_long_blocks (fat_colors, neighbor_needs + 1); } /* We break out, if we found a color which doesn't constrain neighbors, or if we can't find any colors. */ while (1) { HARD_REG_SET call_clobbered; /* Here we choose a hard-reg for the current web. For non spill temporaries we first search in the hardregs for it's preferred class, then, if we found nothing appropriate, in those of the alternate class. For spill temporaries we only search in usable_regs of this web (which is probably larger than that of the preferred or alternate class). All searches first try to find a non-call-clobbered hard-reg. XXX this should be more finegraned... First look into preferred non-callclobbered hardregs, then _if_ the web crosses calls, in alternate non-cc hardregs, and only _then_ also in preferred cc hardregs (and alternate ones). Currently we don't track the number of calls crossed for webs. We should. */ if (web->use_my_regs) { COPY_HARD_REG_SET (colors, web->usable_regs); AND_HARD_REG_SET (colors, usable_regs[reg_preferred_class (web->regno)]); } else COPY_HARD_REG_SET (colors, usable_regs[reg_preferred_class (web->regno)]); #ifdef CANNOT_CHANGE_MODE_CLASS if (web->mode_changed) AND_COMPL_HARD_REG_SET (colors, invalid_mode_change_regs); #endif COPY_HARD_REG_SET (call_clobbered, colors); AND_HARD_REG_SET (call_clobbered, call_used_reg_set); /* If this web got a color in the last pass, try to give it the same color again. This will to much better colorization down the line, as we spilled for a certain coloring last time. */ if (web->old_color) { c = web->old_color - 1; if (!color_usable_p (c, dont_begin, colors, PSEUDO_REGNO_MODE (web->regno))) c = -1; } else c = -1; if (c < 0) c = get_biased_reg (dont_begin, bias, web->prefer_colors, call_clobbered, PSEUDO_REGNO_MODE (web->regno)); if (c < 0) c = get_biased_reg (dont_begin, bias, web->prefer_colors, colors, PSEUDO_REGNO_MODE (web->regno)); if (c < 0) { if (web->use_my_regs) IOR_HARD_REG_SET (colors, web->usable_regs); else IOR_HARD_REG_SET (colors, usable_regs [reg_alternate_class (web->regno)]); #ifdef CANNOT_CHANGE_MODE_CLASS if (web->mode_changed) AND_COMPL_HARD_REG_SET (colors, invalid_mode_change_regs); #endif COPY_HARD_REG_SET (call_clobbered, colors); AND_HARD_REG_SET (call_clobbered, call_used_reg_set); c = get_biased_reg (dont_begin, bias, web->prefer_colors, call_clobbered, PSEUDO_REGNO_MODE (web->regno)); if (c < 0) c = get_biased_reg (dont_begin, bias, web->prefer_colors, colors, PSEUDO_REGNO_MODE (web->regno)); } if (c < 0) break; if (bestc < 0) bestc = c; /* If one of the yet uncolored neighbors, which is not a potential spill needs a block of hardregs be sure, not to destroy such a block by coloring one reg in the middle. */ if (num_fat) { int i; int new_long; HARD_REG_SET colors1; COPY_HARD_REG_SET (colors1, fat_colors); for (i = 0; i < 1 + web->add_hardregs; i++) CLEAR_HARD_REG_BIT (colors1, c + i); new_long = count_long_blocks (colors1, neighbor_needs + 1); /* If we changed the number of long blocks, and it's now smaller than needed, we try to avoid this color. */ if (long_blocks != new_long && new_long < num_fat) { if (new_long > best_long_blocks) { best_long_blocks = new_long; bestc = c; } SET_HARD_REG_BIT (dont_begin, c); ra_debug_msg (DUMP_COLORIZE, " avoid %d", c); } else /* We found a color which doesn't destroy a block. */ break; } /* If we havee no fat neighbors, the current color won't become "better", so we've found it. */ else break; } ra_debug_msg (DUMP_COLORIZE, " --> got %d", c < 0 ? bestc : c); if (bestc >= 0 && c < 0 && !web->was_spilled) { /* This is a non-potential-spill web, which got a color, which did destroy a hardreg block for one of it's neighbors. We color this web anyway and hope for the best for the neighbor, if we are a spill temp. */ if (1 || web->spill_temp) c = bestc; ra_debug_msg (DUMP_COLORIZE, " [constrains neighbors]"); } ra_debug_msg (DUMP_COLORIZE, "\n"); if (c < 0) { /* Guard against a simplified node being spilled. */ /* Don't abort. This can happen, when e.g. enough registers are available in colors, but they are not consecutive. This is a very serious issue if this web is a short live one, because even if we spill this one here, the situation won't become better in the next iteration. It probably will have the same conflicts, those will have the same colors, and we would come here again, for all parts, in which this one gets split by the spill. This can result in endless iteration spilling the same register again and again. That's why we try to find a neighbor, which spans more instructions that ourself, and got a color, and try to spill _that_. if (DLIST_WEB (d)->was_spilled < 0) abort (); */ if (hard && (!web->was_spilled || web->spill_temp)) { unsigned int loop; struct web *try = NULL; struct web *candidates[8]; ra_debug_msg (DUMP_COLORIZE, " *** %d spilled, although %s ***\n", web->id, web->spill_temp ? "spilltemp" : "non-spill"); /* We make multiple passes over our conflicts, first trying to spill those webs, which only got a color by chance, but were potential spill ones, and if that isn't enough, in a second pass also to spill normal colored webs. If we still didn't find a candidate, but we are a spill-temp, we make a third pass and include also webs, which were targets for coalescing, and spill those. */ memset (candidates, 0, sizeof candidates); #define set_cand(i, w) \ do { \ if (!candidates[(i)] \ || (candidates[(i)]->spill_cost < (w)->spill_cost)) \ candidates[(i)] = (w); \ } while (0) for (wl = web->conflict_list; wl; wl = wl->next) { struct web *w = wl->t; struct web *aw = alias (w); /* If we are a spill-temp, we also look at webs coalesced to precolored ones. Otherwise we only look at webs which themselves were colored, or coalesced to one. */ if (aw->type == PRECOLORED && w != aw && web->spill_temp && flag_ra_optimistic_coalescing) { if (!w->spill_temp) set_cand (4, w); else if (web->spill_temp == 2 && w->spill_temp == 2 && w->spill_cost < web->spill_cost) set_cand (5, w); else if (web->spill_temp != 2 && (w->spill_temp == 2 || w->spill_cost < web->spill_cost)) set_cand (6, w); continue; } if (aw->type != COLORED) continue; if (w->type == COLORED && !w->spill_temp && !w->is_coalesced && w->was_spilled) { if (w->spill_cost < web->spill_cost) set_cand (0, w); else if (web->spill_temp) set_cand (1, w); } if (w->type == COLORED && !w->spill_temp && !w->is_coalesced && !w->was_spilled) { if (w->spill_cost < web->spill_cost) set_cand (2, w); else if (web->spill_temp && web->spill_temp != 2) set_cand (3, w); } if (web->spill_temp) { if (w->type == COLORED && w->spill_temp == 2 && !w->is_coalesced && (w->spill_cost < web->spill_cost || web->spill_temp != 2)) set_cand (4, w); if (!aw->spill_temp) set_cand (5, aw); if (aw->spill_temp == 2 && (aw->spill_cost < web->spill_cost || web->spill_temp != 2)) set_cand (6, aw); /* For boehm-gc/misc.c. If we are a difficult spilltemp, also coalesced neighbors are a chance, _even_ if they too are spilltemps. At least their coalescing can be broken up, which may be reset usable_regs, and makes it easier colorable. */ if (web->spill_temp != 2 && aw->is_coalesced && flag_ra_optimistic_coalescing) set_cand (7, aw); } } for (loop = 0; try == NULL && loop < 8; loop++) if (candidates[loop]) try = candidates[loop]; #undef set_cand if (try) { int old_c = try->color; if (try->type == COALESCED) { if (alias (try)->type != PRECOLORED) abort (); ra_debug_msg (DUMP_COLORIZE, " breaking alias %d -> %d\n", try->id, alias (try)->id); break_precolored_alias (try); colorize_one_web (web, hard); } else { remove_list (try->dlink, &WEBS(COLORED)); put_web (try, SPILLED); /* Now try to colorize us again. Can recursively make other webs also spill, until there are no more unspilled neighbors. */ ra_debug_msg (DUMP_COLORIZE, " trying to spill %d\n", try->id); colorize_one_web (web, hard); if (web->type != COLORED) { /* We tried recursively to spill all already colored neighbors, but we are still uncolorable. So it made no sense to spill those neighbors. Recolor them. */ remove_list (try->dlink, &WEBS(SPILLED)); put_web (try, COLORED); try->color = old_c; ra_debug_msg (DUMP_COLORIZE, " spilling %d was useless\n", try->id); } else { ra_debug_msg (DUMP_COLORIZE, " to spill %d was a good idea\n", try->id); remove_list (try->dlink, &WEBS(SPILLED)); if (try->was_spilled) colorize_one_web (try, 0); else colorize_one_web (try, hard - 1); } } } else /* No more chances to get a color, so give up hope and spill us. */ put_web (web, SPILLED); } else put_web (web, SPILLED); } else { put_web (web, COLORED); web->color = c; if (flag_ra_biased) { int nregs = hard_regno_nregs[c][GET_MODE (web->orig_x)]; for (wl = web->conflict_list; wl; wl = wl->next) { struct web *ptarget = alias (wl->t); int i; for (i = 0; i < nregs; i++) SET_HARD_REG_BIT (ptarget->bias_colors, c + i); } } } if (web->regno >= max_normal_pseudo && web->type == SPILLED) { web->color = an_unusable_color; remove_list (web->dlink, &WEBS(SPILLED)); put_web (web, COLORED); } if (web->type == SPILLED && flag_ra_optimistic_coalescing && web->is_coalesced) { ra_debug_msg (DUMP_COLORIZE, "breaking aliases to web %d:", web->id); restore_conflicts_from_coalesce (web); break_aliases_to_web (web); insert_coalesced_conflicts (); ra_debug_msg (DUMP_COLORIZE, "\n"); remove_list (web->dlink, &WEBS(SPILLED)); put_web (web, SELECT); web->color = -1; } } /* Assign the colors to all nodes on the select stack. And update the colors of coalesced webs. */ static void assign_colors (void) { struct dlist *d; while (WEBS(SELECT)) { d = pop_list (&WEBS(SELECT)); colorize_one_web (DLIST_WEB (d), 1); } for (d = WEBS(COALESCED); d; d = d->next) { struct web *a = alias (DLIST_WEB (d)); DLIST_WEB (d)->color = a->color; } } /* WEB is a spilled web. Look if we can improve the cost of the graph, by coloring WEB, even if we then need to spill some of it's neighbors. For this we calculate the cost for each color C, that results when we _would_ give WEB color C (i.e. the cost of the then spilled neighbors). If the lowest cost among them is smaller than the spillcost of WEB, we do that recoloring, and instead spill the neighbors. This can sometime help, when due to irregularities in register file, and due to multi word pseudos, the colorization is suboptimal. But be aware, that currently this pass is quite slow. */ static void try_recolor_web (struct web *web) { struct conflict_link *wl; unsigned HOST_WIDE_INT *cost_neighbors; unsigned int *min_color; int newcol, c; HARD_REG_SET precolored_neighbors, spill_temps; HARD_REG_SET possible_begin, wide_seen; cost_neighbors = xcalloc (FIRST_PSEUDO_REGISTER, sizeof (cost_neighbors[0])); /* For each hard-regs count the number of preceding hardregs, which would overlap this color, if used in WEB's mode. */ min_color = xcalloc (FIRST_PSEUDO_REGISTER, sizeof (int)); CLEAR_HARD_REG_SET (possible_begin); for (c = 0; c < FIRST_PSEUDO_REGISTER; c++) { int i, nregs; if (!HARD_REGNO_MODE_OK (c, GET_MODE (web->orig_x))) continue; nregs = hard_regno_nregs[c][GET_MODE (web->orig_x)]; for (i = 0; i < nregs; i++) if (!TEST_HARD_REG_BIT (web->usable_regs, c + i)) break; if (i < nregs || nregs == 0) continue; SET_HARD_REG_BIT (possible_begin, c); for (; nregs--;) if (!min_color[c + nregs]) min_color[c + nregs] = 1 + c; } CLEAR_HARD_REG_SET (precolored_neighbors); CLEAR_HARD_REG_SET (spill_temps); CLEAR_HARD_REG_SET (wide_seen); for (wl = web->conflict_list; wl; wl = wl->next) { HARD_REG_SET dont_begin; struct web *web2 = alias (wl->t); struct conflict_link *nn; int c1, c2; int wide_p = 0; if (wl->t->type == COALESCED || web2->type != COLORED) { if (web2->type == PRECOLORED) { c1 = min_color[web2->color]; c1 = (c1 == 0) ? web2->color : (c1 - 1); c2 = web2->color; for (; c1 <= c2; c1++) SET_HARD_REG_BIT (precolored_neighbors, c1); } continue; } /* Mark colors for which some wide webs are involved. For those the independent sets are not simply one-node graphs, so they can't be recolored independent from their neighborhood. This means, that our cost calculation can be incorrect (assuming it can avoid spilling a web because it thinks some colors are available, although it's neighbors which itself need recoloring might take away exactly those colors). */ if (web2->add_hardregs) wide_p = 1; for (nn = web2->conflict_list; nn && !wide_p; nn = nn->next) if (alias (nn->t)->add_hardregs) wide_p = 1; calculate_dont_begin (web2, &dont_begin); c1 = min_color[web2->color]; /* Note that min_color[] contains 1-based values (zero means undef). */ c1 = c1 == 0 ? web2->color : (c1 - 1); c2 = web2->color + hard_regno_nregs[web2->color][GET_MODE (web2->orig_x)] - 1; for (; c1 <= c2; c1++) if (TEST_HARD_REG_BIT (possible_begin, c1)) { int nregs; HARD_REG_SET colors; nregs = hard_regno_nregs[c1][GET_MODE (web->orig_x)]; COPY_HARD_REG_SET (colors, web2->usable_regs); for (; nregs--;) CLEAR_HARD_REG_BIT (colors, c1 + nregs); if (wide_p) SET_HARD_REG_BIT (wide_seen, c1); if (get_free_reg (dont_begin, colors, GET_MODE (web2->orig_x)) < 0) { if (web2->spill_temp) SET_HARD_REG_BIT (spill_temps, c1); else cost_neighbors[c1] += web2->spill_cost; } } } newcol = -1; for (c = 0; c < FIRST_PSEUDO_REGISTER; c++) if (TEST_HARD_REG_BIT (possible_begin, c) && !TEST_HARD_REG_BIT (precolored_neighbors, c) && !TEST_HARD_REG_BIT (spill_temps, c) && (newcol == -1 || cost_neighbors[c] < cost_neighbors[newcol])) newcol = c; if (newcol >= 0 && cost_neighbors[newcol] < web->spill_cost) { int nregs = hard_regno_nregs[newcol][GET_MODE (web->orig_x)]; unsigned HOST_WIDE_INT cost = 0; int *old_colors; struct conflict_link *wl_next; ra_debug_msg (DUMP_COLORIZE, "try to set web %d to color %d\n", web->id, newcol); remove_list (web->dlink, &WEBS(SPILLED)); put_web (web, COLORED); web->color = newcol; old_colors = xcalloc (num_webs, sizeof (int)); for (wl = web->conflict_list; wl; wl = wl_next) { struct web *web2 = alias (wl->t); /* If web2 is a coalesce-target, and will become spilled below in colorize_one_web(), and the current conflict wl between web and web2 was only the result of that coalescing this conflict will be deleted, making wl invalid. So save the next conflict right now. Note that if web2 has indeed such state, then wl->next can not be deleted in this iteration. */ wl_next = wl->next; if (web2->type == COLORED) { int nregs2 = hard_regno_nregs[web2->color][GET_MODE (web2->orig_x)]; if (web->color >= web2->color + nregs2 || web2->color >= web->color + nregs) continue; old_colors[web2->id] = web2->color + 1; web2->color = -1; remove_list (web2->dlink, &WEBS(COLORED)); web2->type = SELECT; /* Allow webs to be spilled. */ if (web2->spill_temp == 0 || web2->spill_temp == 2) web2->was_spilled = 1; colorize_one_web (web2, 1); if (web2->type == SPILLED) cost += web2->spill_cost; } } /* The actual cost may be smaller than the guessed one, because partial conflicts could result in some conflicting webs getting a color, where we assumed it must be spilled. See the comment above what happens, when wide webs are involved, and why in that case there might actually be some webs spilled although thought to be colorable. */ if (cost > cost_neighbors[newcol] && nregs == 1 && !TEST_HARD_REG_BIT (wide_seen, newcol)) abort (); /* But if the new spill-cost is higher than our own, then really loose. Respill us and recolor neighbors as before. */ if (cost > web->spill_cost) { ra_debug_msg (DUMP_COLORIZE, "reset coloring of web %d, too expensive\n", web->id); remove_list (web->dlink, &WEBS(COLORED)); web->color = -1; put_web (web, SPILLED); for (wl = web->conflict_list; wl; wl = wl->next) { struct web *web2 = alias (wl->t); if (old_colors[web2->id]) { if (web2->type == SPILLED) { remove_list (web2->dlink, &WEBS(SPILLED)); web2->color = old_colors[web2->id] - 1; put_web (web2, COLORED); } else if (web2->type == COLORED) web2->color = old_colors[web2->id] - 1; else if (web2->type == SELECT) /* This means, that WEB2 once was a part of a coalesced web, which got spilled in the above colorize_one_web() call, and whose parts then got split and put back onto the SELECT stack. As the cause for that splitting (the coloring of WEB) was worthless, we should again coalesce the parts, as they were before. For now we simply leave them SELECTed, for our caller to take care. */ ; else abort (); } } } free (old_colors); } free (min_color); free (cost_neighbors); } /* This ensures that all conflicts of coalesced webs are seen from the webs coalesced into. combine() only adds the conflicts which at the time of combining were not already SELECTed or COALESCED to not destroy num_conflicts. Here we add all remaining conflicts and thereby destroy num_conflicts. This should be used when num_conflicts isn't used anymore, e.g. on a completely colored graph. */ static void insert_coalesced_conflicts (void) { struct dlist *d; for (d = WEBS(COALESCED); 0 && d; d = d->next) { struct web *web = DLIST_WEB (d); struct web *aweb = alias (web); struct conflict_link *wl; for (wl = web->conflict_list; wl; wl = wl->next) { struct web *tweb = aweb; int i; int nregs = 1 + web->add_hardregs; if (aweb->type == PRECOLORED) nregs = hard_regno_nregs[aweb->color][GET_MODE (web->orig_x)]; for (i = 0; i < nregs; i++) { if (aweb->type == PRECOLORED) tweb = hardreg2web[i + aweb->color]; /* There might be some conflict edges laying around where the usable_regs don't intersect. This can happen when first some webs were coalesced and conflicts propagated, then some combining narrowed usable_regs and further coalescing ignored those conflicts. Now there are some edges to COALESCED webs but not to it's alias. So abort only when they really should conflict. */ if ((!(tweb->type == PRECOLORED || TEST_BIT (sup_igraph, tweb->id * num_webs + wl->t->id)) || !(wl->t->type == PRECOLORED || TEST_BIT (sup_igraph, wl->t->id * num_webs + tweb->id))) && hard_regs_intersect_p (&tweb->usable_regs, &wl->t->usable_regs)) abort (); /*if (wl->sub == NULL) record_conflict (tweb, wl->t); else { struct sub_conflict *sl; for (sl = wl->sub; sl; sl = sl->next) record_conflict (tweb, sl->t); }*/ if (aweb->type != PRECOLORED) break; } } } } /* A function suitable to pass to qsort(). Compare the spill costs of webs W1 and W2. When used by qsort, this would order webs with largest cost first. */ static int comp_webs_maxcost (const void *w1, const void *w2) { struct web *web1 = *(struct web **)w1; struct web *web2 = *(struct web **)w2; if (web1->spill_cost > web2->spill_cost) return -1; else if (web1->spill_cost < web2->spill_cost) return 1; else return 0; } /* This tries to recolor all spilled webs. See try_recolor_web() how this is done. This just calls it for each spilled web. */ static void recolor_spills (void) { unsigned int i, num; struct web **order2web; num = num_webs - num_subwebs; order2web = xmalloc (num * sizeof (order2web[0])); for (i = 0; i < num; i++) { order2web[i] = id2web[i]; /* If we aren't breaking aliases, combine() wasn't merging the spill_costs. So do that here to have sane measures. */ if (!flag_ra_merge_spill_costs && id2web[i]->type == COALESCED) alias (id2web[i])->spill_cost += id2web[i]->spill_cost; } qsort (order2web, num, sizeof (order2web[0]), comp_webs_maxcost); insert_coalesced_conflicts (); dump_graph_cost (DUMP_COSTS, "before spill-recolor"); for (i = 0; i < num; i++) { struct web *web = order2web[i]; if (web->type == SPILLED) try_recolor_web (web); } /* It might have been decided in try_recolor_web() (in colorize_one_web()) that a coalesced web should be spilled, so it was put on the select stack. Those webs need recoloring again, and all remaining coalesced webs might need their color updated, so simply call assign_colors() again. */ assign_colors (); free (order2web); } /* This checks the current color assignment for obvious errors, like two conflicting webs overlapping in colors, or the used colors not being in usable regs. */ static void check_colors (void) { unsigned int i; for (i = 0; i < num_webs - num_subwebs; i++) { struct web *web = id2web[i]; struct web *aweb = alias (web); struct conflict_link *wl; int nregs, c; if (aweb->type == SPILLED || web->regno >= max_normal_pseudo) continue; else if (aweb->type == COLORED) nregs = hard_regno_nregs[aweb->color][GET_MODE (web->orig_x)]; else if (aweb->type == PRECOLORED) nregs = 1; else abort (); /* The color must be valid for the original usable_regs. */ for (c = 0; c < nregs; c++) if (!TEST_HARD_REG_BIT (web->usable_regs, aweb->color + c)) abort (); /* Search the original (pre-coalesce) conflict list. In the current one some imprecise conflicts may be noted (due to combine() or insert_coalesced_conflicts() relocating partial conflicts) making it look like some wide webs are in conflict and having the same color. */ wl = (web->have_orig_conflicts ? web->orig_conflict_list : web->conflict_list); for (; wl; wl = wl->next) if (wl->t->regno >= max_normal_pseudo) continue; else if (!wl->sub) { struct web *web2 = alias (wl->t); int nregs2; if (web2->type == COLORED) nregs2 = hard_regno_nregs[web2->color][GET_MODE (web2->orig_x)]; else if (web2->type == PRECOLORED) nregs2 = 1; else continue; if (aweb->color >= web2->color + nregs2 || web2->color >= aweb->color + nregs) continue; abort (); } else { struct sub_conflict *sl; int scol = aweb->color; int tcol = alias (wl->t)->color; if (alias (wl->t)->type == SPILLED) continue; for (sl = wl->sub; sl; sl = sl->next) { int ssize = hard_regno_nregs[scol][GET_MODE (sl->s->orig_x)]; int tsize = hard_regno_nregs[tcol][GET_MODE (sl->t->orig_x)]; int sofs = 0, tofs = 0; if (SUBWEB_P (sl->t) && GET_MODE_SIZE (GET_MODE (sl->t->orig_x)) >= UNITS_PER_WORD) tofs = (SUBREG_BYTE (sl->t->orig_x) / UNITS_PER_WORD); if (SUBWEB_P (sl->s) && GET_MODE_SIZE (GET_MODE (sl->s->orig_x)) >= UNITS_PER_WORD) sofs = (SUBREG_BYTE (sl->s->orig_x) / UNITS_PER_WORD); if ((tcol + tofs >= scol + sofs + ssize) || (scol + sofs >= tcol + tofs + tsize)) continue; abort (); } } } } /* WEB was a coalesced web. Make it unaliased again, and put it back onto SELECT stack. */ static void unalias_web (struct web *web) { web->alias = NULL; web->is_coalesced = 0; web->color = -1; /* Well, initially everything was spilled, so it isn't incorrect, that also the individual parts can be spilled. XXX this isn't entirely correct, as we also relaxed the spill_temp flag in combine(), which might have made components spill, although they were a short or spilltemp web. */ web->was_spilled = 1; remove_list (web->dlink, &WEBS(COALESCED)); /* Spilltemps must be colored right now (i.e. as early as possible), other webs can be deferred to the end (the code building the stack assumed that in this stage only one web was colored). */ if (web->spill_temp && web->spill_temp != 2) put_web (web, SELECT); else put_web_at_end (web, SELECT); } /* WEB is a _target_ for coalescing which got spilled. Break all aliases to WEB, and restore some of its member to the state they were before coalescing. Due to the suboptimal structure of the interference graph we need to go through all coalesced webs. Somewhen we'll change this to be more sane. */ static void break_aliases_to_web (struct web *web) { struct dlist *d, *d_next; if (web->type != SPILLED) abort (); for (d = WEBS(COALESCED); d; d = d_next) { struct web *other = DLIST_WEB (d); d_next = d->next; /* Beware: Don't use alias() here. We really want to check only one level of aliasing, i.e. only break up webs directly aliased to WEB, not also those aliased through other webs. */ if (other->alias == web) { unalias_web (other); ra_debug_msg (DUMP_COLORIZE, " %d", other->id); } } web->spill_temp = web->orig_spill_temp; web->spill_cost = web->orig_spill_cost; /* Beware: The following possibly widens usable_regs again. While it was narrower there might have been some conflicts added which got ignored because of non-intersecting hardregsets. All those conflicts would now matter again. Fortunately we only add conflicts when coalescing, which is also the time of narrowing. And we remove all those added conflicts again now that we unalias this web. Therefore this is safe to do. */ COPY_HARD_REG_SET (web->usable_regs, web->orig_usable_regs); web->is_coalesced = 0; web->num_aliased = 0; web->was_spilled = 1; /* Reset is_coalesced flag for webs which itself are target of coalescing. It was cleared above if it was coalesced to WEB. */ for (d = WEBS(COALESCED); d; d = d->next) DLIST_WEB (d)->alias->is_coalesced = 1; } /* WEB is a web coalesced into a precolored one. Break that alias, making WEB SELECTed again. Also restores the conflicts which resulted from initially coalescing both. */ static void break_precolored_alias (struct web *web) { struct web *pre = web->alias; struct conflict_link *wl; unsigned int c = pre->color; unsigned int nregs = hard_regno_nregs[c][GET_MODE (web->orig_x)]; if (pre->type != PRECOLORED) abort (); unalias_web (web); /* Now we need to look at each conflict X of WEB, if it conflicts with [PRE, PRE+nregs), and remove such conflicts, of X has not other conflicts, which are coalesced into those precolored webs. */ for (wl = web->conflict_list; wl; wl = wl->next) { struct web *x = wl->t; struct web *y; unsigned int i; struct conflict_link *wl2; struct conflict_link **pcl; HARD_REG_SET regs; if (!x->have_orig_conflicts) continue; /* First look at which colors can not go away, due to other coalesces still existing. */ CLEAR_HARD_REG_SET (regs); for (i = 0; i < nregs; i++) SET_HARD_REG_BIT (regs, c + i); for (wl2 = x->conflict_list; wl2; wl2 = wl2->next) if (wl2->t->type == COALESCED && alias (wl2->t)->type == PRECOLORED) CLEAR_HARD_REG_BIT (regs, alias (wl2->t)->color); /* Now also remove the colors of those conflicts which already were there before coalescing at all. */ for (wl2 = x->orig_conflict_list; wl2; wl2 = wl2->next) if (wl2->t->type == PRECOLORED) CLEAR_HARD_REG_BIT (regs, wl2->t->color); /* The colors now still set are those for which WEB was the last cause, i.e. those which can be removed. */ y = NULL; for (i = 0; i < nregs; i++) if (TEST_HARD_REG_BIT (regs, c + i)) { struct web *sub; y = hardreg2web[c + i]; RESET_BIT (sup_igraph, x->id * num_webs + y->id); RESET_BIT (sup_igraph, y->id * num_webs + x->id); RESET_BIT (igraph, igraph_index (x->id, y->id)); for (sub = x->subreg_next; sub; sub = sub->subreg_next) RESET_BIT (igraph, igraph_index (sub->id, y->id)); } if (!y) continue; pcl = &(x->conflict_list); while (*pcl) { struct web *y = (*pcl)->t; if (y->type != PRECOLORED || !TEST_HARD_REG_BIT (regs, y->color)) pcl = &((*pcl)->next); else *pcl = (*pcl)->next; } } } /* WEB is a spilled web which was target for coalescing. Delete all interference edges which were added due to that coalescing, and break up the coalescing. */ static void restore_conflicts_from_coalesce (struct web *web) { struct conflict_link **pcl; struct conflict_link *wl; pcl = &(web->conflict_list); /* No original conflict list means no conflict was added at all after building the graph. So neither we nor any neighbors have conflicts due to this coalescing. */ if (!web->have_orig_conflicts) return; while (*pcl) { struct web *other = (*pcl)->t; for (wl = web->orig_conflict_list; wl; wl = wl->next) if (wl->t == other) break; if (wl) { /* We found this conflict also in the original list, so this was no new conflict. */ pcl = &((*pcl)->next); } else { /* This is a new conflict, so delete it from us and the neighbor. */ struct conflict_link **opcl; struct conflict_link *owl; struct sub_conflict *sl; wl = *pcl; *pcl = wl->next; if (!other->have_orig_conflicts && other->type != PRECOLORED) abort (); for (owl = other->orig_conflict_list; owl; owl = owl->next) if (owl->t == web) break; if (owl) abort (); opcl = &(other->conflict_list); while (*opcl) { if ((*opcl)->t == web) { owl = *opcl; *opcl = owl->next; break; } else { opcl = &((*opcl)->next); } } if (!owl && other->type != PRECOLORED) abort (); /* wl and owl contain the edge data to be deleted. */ RESET_BIT (sup_igraph, web->id * num_webs + other->id); RESET_BIT (sup_igraph, other->id * num_webs + web->id); RESET_BIT (igraph, igraph_index (web->id, other->id)); for (sl = wl->sub; sl; sl = sl->next) RESET_BIT (igraph, igraph_index (sl->s->id, sl->t->id)); if (other->type != PRECOLORED) { for (sl = owl->sub; sl; sl = sl->next) RESET_BIT (igraph, igraph_index (sl->s->id, sl->t->id)); } } } /* We must restore usable_regs because record_conflict will use it. */ COPY_HARD_REG_SET (web->usable_regs, web->orig_usable_regs); /* We might have deleted some conflicts above, which really are still there (diamond pattern coalescing). This is because we don't reference count interference edges but some of them were the result of different coalesces. */ for (wl = web->conflict_list; wl; wl = wl->next) if (wl->t->type == COALESCED) { struct web *tweb; for (tweb = wl->t->alias; tweb; tweb = tweb->alias) { if (wl->sub == NULL) record_conflict (web, tweb); else { struct sub_conflict *sl; for (sl = wl->sub; sl; sl = sl->next) { struct web *sweb = NULL; if (SUBWEB_P (sl->t)) sweb = find_subweb (tweb, sl->t->orig_x); if (!sweb) sweb = tweb; record_conflict (sl->s, sweb); } } if (tweb->type != COALESCED) break; } } } /* Repeatedly break aliases for spilled webs, which were target for coalescing, and recolorize the resulting parts. Do this as long as there are any spilled coalesce targets. */ static void break_coalesced_spills (void) { int changed = 0; while (1) { struct dlist *d; struct web *web; for (d = WEBS(SPILLED); d; d = d->next) if (DLIST_WEB (d)->is_coalesced) break; if (!d) break; changed = 1; web = DLIST_WEB (d); ra_debug_msg (DUMP_COLORIZE, "breaking aliases to web %d:", web->id); restore_conflicts_from_coalesce (web); break_aliases_to_web (web); /* WEB was a spilled web and isn't anymore. Everything coalesced to WEB is now SELECTed and might potentially get a color. If those other webs were itself targets of coalescing it might be that there are still some conflicts from aliased webs missing, because they were added in combine() right into the now SELECTed web. So we need to add those missing conflicts here. */ insert_coalesced_conflicts (); ra_debug_msg (DUMP_COLORIZE, "\n"); remove_list (d, &WEBS(SPILLED)); put_web (web, SELECT); web->color = -1; while (WEBS(SELECT)) { d = pop_list (&WEBS(SELECT)); colorize_one_web (DLIST_WEB (d), 1); } } if (changed) { struct dlist *d; for (d = WEBS(COALESCED); d; d = d->next) { struct web *a = alias (DLIST_WEB (d)); DLIST_WEB (d)->color = a->color; } } dump_graph_cost (DUMP_COSTS, "after alias-breaking"); } /* A structure for fast hashing of a pair of webs. Used to cumulate savings (from removing copy insns) for coalesced webs. All the pairs are also put into a single linked list. */ struct web_pair { struct web_pair *next_hash; struct web_pair *next_list; struct web *smaller; struct web *larger; unsigned int conflicts; unsigned HOST_WIDE_INT cost; }; /* The actual hash table. */ #define WEB_PAIR_HASH_SIZE 8192 static struct web_pair *web_pair_hash[WEB_PAIR_HASH_SIZE]; static struct web_pair *web_pair_list; static unsigned int num_web_pairs; /* Clear the hash table of web pairs. */ static void init_web_pairs (void) { memset (web_pair_hash, 0, sizeof web_pair_hash); num_web_pairs = 0; web_pair_list = NULL; } /* Given two webs connected by a move with cost COST which together have CONFLICTS conflicts, add that pair to the hash table, or if already in, cumulate the costs and conflict number. */ static void add_web_pair_cost (struct web *web1, struct web *web2, unsigned HOST_WIDE_INT cost, unsigned int conflicts) { unsigned int hash; struct web_pair *p; if (web1->id > web2->id) { struct web *h = web1; web1 = web2; web2 = h; } hash = (web1->id * num_webs + web2->id) % WEB_PAIR_HASH_SIZE; for (p = web_pair_hash[hash]; p; p = p->next_hash) if (p->smaller == web1 && p->larger == web2) { p->cost += cost; p->conflicts += conflicts; return; } p = ra_alloc (sizeof *p); p->next_hash = web_pair_hash[hash]; p->next_list = web_pair_list; p->smaller = web1; p->larger = web2; p->conflicts = conflicts; p->cost = cost; web_pair_hash[hash] = p; web_pair_list = p; num_web_pairs++; } /* Suitable to be passed to qsort(). Sort web pairs so, that those with more conflicts and higher cost (which actually is a saving when the moves are removed) come first. */ static int comp_web_pairs (const void *w1, const void *w2) { struct web_pair *p1 = *(struct web_pair **)w1; struct web_pair *p2 = *(struct web_pair **)w2; if (p1->conflicts > p2->conflicts) return -1; else if (p1->conflicts < p2->conflicts) return 1; else if (p1->cost > p2->cost) return -1; else if (p1->cost < p2->cost) return 1; else return 0; } /* Given the list of web pairs, begin to combine them from the one with the most savings. */ static void sort_and_combine_web_pairs (int for_move) { unsigned int i; struct web_pair **sorted; struct web_pair *p; if (!num_web_pairs) return; sorted = xmalloc (num_web_pairs * sizeof (sorted[0])); for (p = web_pair_list, i = 0; p; p = p->next_list) sorted[i++] = p; if (i != num_web_pairs) abort (); qsort (sorted, num_web_pairs, sizeof (sorted[0]), comp_web_pairs); /* After combining one pair, we actually should adjust the savings of the other pairs, if they are connected to one of the just coalesced pair. Later. */ for (i = 0; i < num_web_pairs; i++) { struct web *w1, *w2; p = sorted[i]; w1 = alias (p->smaller); w2 = alias (p->larger); if (!for_move && (w1->type == PRECOLORED || w2->type == PRECOLORED)) continue; else if (w2->type == PRECOLORED) { struct web *h = w1; w1 = w2; w2 = h; } if (w1 != w2 && !TEST_BIT (sup_igraph, w1->id * num_webs + w2->id) && !TEST_BIT (sup_igraph, w2->id * num_webs + w1->id) && w2->type != PRECOLORED && hard_regs_intersect_p (&w1->usable_regs, &w2->usable_regs)) { if (w1->type != PRECOLORED || (w1->type == PRECOLORED && ok (w2, w1))) combine (w1, w2); else if (w1->type == PRECOLORED) SET_HARD_REG_BIT (w2->prefer_colors, w1->color); } } free (sorted); } /* Greedily coalesce all moves possible. Begin with the web pair giving the most saving if coalesced. */ static void aggressive_coalesce (void) { struct dlist *d; struct move *m; init_web_pairs (); while ((d = pop_list (&mv_worklist)) != NULL) if ((m = DLIST_MOVE (d))) { struct web *s = alias (m->source_web); struct web *t = alias (m->target_web); if (t->type == PRECOLORED) { struct web *h = s; s = t; t = h; } if (s != t && t->type != PRECOLORED && !TEST_BIT (sup_igraph, s->id * num_webs + t->id) && !TEST_BIT (sup_igraph, t->id * num_webs + s->id)) { if ((s->type == PRECOLORED && ok (t, s)) || s->type != PRECOLORED) { put_move (m, MV_COALESCED); add_web_pair_cost (s, t, BLOCK_FOR_INSN (m->insn)->frequency, 0); } else if (s->type == PRECOLORED) /* It is !ok(t, s). But later when coloring the graph it might be possible to take that color. So we remember the preferred color to try that first. */ { put_move (m, CONSTRAINED); SET_HARD_REG_BIT (t->prefer_colors, s->color); } } else { put_move (m, CONSTRAINED); } } sort_and_combine_web_pairs (1); } /* This is the difference between optimistic coalescing and optimistic coalescing+. Extended coalesce tries to coalesce also non-conflicting nodes, not related by a move. The criteria here is, the one web must be a source, the other a destination of the same insn. This actually makes sense, as (because they are in the same insn) they share many of their neighbors, and if they are coalesced, reduce the number of conflicts of those neighbors by one. For this we sort the candidate pairs again according to savings (and this time also conflict number). This is also a comparatively slow operation, as we need to go through all insns, and for each insn, through all defs and uses. */ static void extended_coalesce_2 (void) { rtx insn; struct ra_insn_info info; unsigned int n; init_web_pairs (); for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) if (INSN_P (insn) && (info = insn_df[INSN_UID (insn)]).num_defs) for (n = 0; n < info.num_defs; n++) { struct web *dest = def2web[DF_REF_ID (info.defs[n])]; dest = alias (find_web_for_subweb (dest)); if (dest->type != PRECOLORED && dest->regno < max_normal_pseudo) { unsigned int n2; for (n2 = 0; n2 < info.num_uses; n2++) { struct web *source = use2web[DF_REF_ID (info.uses[n2])]; source = alias (find_web_for_subweb (source)); if (source->type != PRECOLORED && source != dest && source->regno < max_normal_pseudo /* Coalesced webs end up using the same REG rtx in emit_colors(). So we can only coalesce something of equal modes. */ && GET_MODE (source->orig_x) == GET_MODE (dest->orig_x) && !TEST_BIT (sup_igraph, dest->id * num_webs + source->id) && !TEST_BIT (sup_igraph, source->id * num_webs + dest->id) && hard_regs_intersect_p (&source->usable_regs, &dest->usable_regs)) add_web_pair_cost (dest, source, BLOCK_FOR_INSN (insn)->frequency, dest->num_conflicts + source->num_conflicts); } } } sort_and_combine_web_pairs (0); } /* Check if we forgot to coalesce some moves. */ static void check_uncoalesced_moves (void) { struct move_list *ml; struct move *m; for (ml = wl_moves; ml; ml = ml->next) if ((m = ml->move)) { struct web *s = alias (m->source_web); struct web *t = alias (m->target_web); if (t->type == PRECOLORED) { struct web *h = s; s = t; t = h; } if (s != t && m->type != CONSTRAINED /* Following can happen when a move was coalesced, but later broken up again. Then s!=t, but m is still MV_COALESCED. */ && m->type != MV_COALESCED && t->type != PRECOLORED && ((s->type == PRECOLORED && ok (t, s)) || s->type != PRECOLORED) && !TEST_BIT (sup_igraph, s->id * num_webs + t->id) && !TEST_BIT (sup_igraph, t->id * num_webs + s->id)) abort (); } } /* The toplevel function in this file. Precondition is, that the interference graph is built completely by ra-build.c. This produces a list of spilled, colored and coalesced nodes. */ void ra_colorize_graph (struct df *df) { if (dump_file) dump_igraph (df); build_worklists (df); /* With optimistic coalescing we coalesce everything we can. */ if (flag_ra_optimistic_coalescing) { aggressive_coalesce (); extended_coalesce_2 (); } /* Now build the select stack. */ do { simplify (); if (mv_worklist) coalesce (); else if (WEBS(FREEZE)) freeze (); else if (WEBS(SPILL)) select_spill (); } while (WEBS(SIMPLIFY) || WEBS(SIMPLIFY_FAT) || WEBS(SIMPLIFY_SPILL) || mv_worklist || WEBS(FREEZE) || WEBS(SPILL)); if (flag_ra_optimistic_coalescing) check_uncoalesced_moves (); /* Actually colorize the webs from the select stack. */ assign_colors (); check_colors (); dump_graph_cost (DUMP_COSTS, "initially"); if (flag_ra_break_aliases) break_coalesced_spills (); check_colors (); /* And try to improve the cost by recoloring spilled webs. */ recolor_spills (); dump_graph_cost (DUMP_COSTS, "after spill-recolor"); check_colors (); } /* Initialize this module. */ void ra_colorize_init (void) { /* FIXME: Choose spill heuristic for platform if we have one */ spill_heuristic = default_spill_heuristic; } /* Free all memory. (Note that we don't need to free any per pass memory). */ void ra_colorize_free_all (void) { struct dlist *d; while ((d = pop_list (&WEBS(FREE))) != NULL) put_web (DLIST_WEB (d), INITIAL); while ((d = pop_list (&WEBS(INITIAL))) != NULL) { struct web *web = DLIST_WEB (d); struct web *wnext; web->orig_conflict_list = NULL; web->conflict_list = NULL; for (web = web->subreg_next; web; web = wnext) { wnext = web->subreg_next; free (web); } free (DLIST_WEB (d)); } } /* vim:cinoptions={.5s,g0,p5,t0,(0,^-0.5s,n-0.5s:tw=78:cindent:sw=4: */ /* Graph coloring register allocator Copyright (C) 2001, 2002, 2004 Free Software Foundation, Inc. Contributed by Michael Matz and Daniel Berlin . This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file contains various dumping and debug functions for the graph coloring register allocator. */ static void ra_print_rtx_1op (FILE *, rtx); static void ra_print_rtx_2op (FILE *, rtx); static void ra_print_rtx_3op (FILE *, rtx); static void ra_print_rtx_object (FILE *, rtx); /* The hardregs as names, for debugging. */ static const char *const reg_class_names1[] = REG_CLASS_NAMES; /* Print a message to the dump file, if debug_new_regalloc and LEVEL have any bits in common. */ void ra_debug_msg (unsigned int level, const char *format, ...) { va_list ap; va_start (ap, format); if ((debug_new_regalloc & level) != 0 && dump_file != NULL) vfprintf (dump_file, format, ap); va_end (ap); } /* The following ra_print_xxx() functions print RTL expressions in concise infix form. If the mode can be seen from context it's left out. Most operators are represented by their graphical characters, e.g. LE as "<=". Unknown constructs are currently printed with print_inline_rtx(), which disrupts the nice layout. Currently only the inline asm things are written this way. */ /* Print rtx X, which is a one operand rtx (op:mode (Y)), as "op(Y)" to FILE. */ static void ra_print_rtx_1op (FILE *file, rtx x) { enum rtx_code code = GET_CODE (x); rtx op0 = XEXP (x, 0); switch (code) { case NEG: case NOT: fputs ((code == NEG) ? "-(" : "~(", file); ra_print_rtx (file, op0, 0); fputs (")", file); break; case HIGH: fputs ("hi(", file); ra_print_rtx (file, op0, 0); fputs (")", file); break; default: fprintf (file, "%s", GET_RTX_NAME (code)); if (GET_MODE (x) != VOIDmode) fprintf (file, ":%s(", GET_MODE_NAME (GET_MODE (x))); else fputs ("(", file); ra_print_rtx (file, op0, 0); fputs (")", file); break; } } /* Print rtx X, which is a two operand rtx (op:mode (Y) (Z)) as "(Y op Z)", if the operand is know, or as "op(Y, Z)", if not, to FILE. */ static void ra_print_rtx_2op (FILE *file, rtx x) { int infix = 1; const char *opname = "shitop"; enum rtx_code code = GET_CODE (x); rtx op0 = XEXP (x, 0); rtx op1 = XEXP (x, 1); switch (code) { /* class '2' */ case COMPARE: opname = "?"; break; case MINUS: opname = "-"; break; case DIV: opname = "/"; break; case UDIV: opname = "u/"; break; case MOD: opname = "%"; break; case UMOD: opname = "u%"; break; case ASHIFT: opname = "<<"; break; case ASHIFTRT: opname = "a>>"; break; case LSHIFTRT: opname = "l>>"; break; /* class 'c' */ case PLUS: opname = "+"; break; case MULT: opname = "*"; break; case AND: opname = "&"; break; case IOR: opname = "|"; break; case XOR: opname = "^"; break; /* class '=' */ case NE: opname = "!="; break; case EQ: opname = "=="; break; case LTGT: opname = "<>"; break; /* class '<' */ case GE: opname = "s>="; break; case GT: opname = "s>"; break; case LE: opname = "s<="; break; case LT: opname = "s<"; break; case GEU: opname = "u>="; break; case GTU: opname = "u>"; break; case LEU: opname = "u<="; break; case LTU: opname = "u<"; break; default: infix = 0; opname = GET_RTX_NAME (code); break; } if (infix) { fputs ("(", file); ra_print_rtx (file, op0, 0); fprintf (file, " %s ", opname); ra_print_rtx (file, op1, 0); fputs (")", file); } else { fprintf (file, "%s(", opname); ra_print_rtx (file, op0, 0); fputs (", ", file); ra_print_rtx (file, op1, 0); fputs (")", file); } } /* Print rtx X, which a three operand rtx to FILE. I.e. X is either an IF_THEN_ELSE, or a bitmap operation. */ static void ra_print_rtx_3op (FILE *file, rtx x) { enum rtx_code code = GET_CODE (x); rtx op0 = XEXP (x, 0); rtx op1 = XEXP (x, 1); rtx op2 = XEXP (x, 2); if (code == IF_THEN_ELSE) { ra_print_rtx (file, op0, 0); fputs (" ? ", file); ra_print_rtx (file, op1, 0); fputs (" : ", file); ra_print_rtx (file, op2, 0); } else { /* Bitmap-operation */ fprintf (file, "%s:%s(", GET_RTX_NAME (code), GET_MODE_NAME (GET_MODE (x))); ra_print_rtx (file, op0, 0); fputs (", ", file); ra_print_rtx (file, op1, 0); fputs (", ", file); ra_print_rtx (file, op2, 0); fputs (")", file); } } /* Print rtx X, which represents an object (class 'o', 'C', or some constructs of class 'x' (e.g. subreg)), to FILE. (reg XX) rtl is represented as "pXX", of XX was a pseudo, as "name" it name is the nonnull hardreg name, or as "hXX", if XX is a hardreg, whose name is NULL, or empty. */ static void ra_print_rtx_object (FILE *file, rtx x) { enum rtx_code code = GET_CODE (x); enum machine_mode mode = GET_MODE (x); switch (code) { case CONST_INT: fprintf (file, HOST_WIDE_INT_PRINT_DEC, XWINT (x, 0)); break; case CONST_DOUBLE: { int i, num = 0; const char *fmt = GET_RTX_FORMAT (code); fputs ("dbl(", file); for (i = 0; i < GET_RTX_LENGTH (code); i++) { if (num) fputs (", ", file); if (fmt[i] == 'e' && XEXP (x, i)) /* The MEM or other stuff */ { ra_print_rtx (file, XEXP (x, i), 0); num++; } else if (fmt[i] == 'w') { fprintf (file, HOST_WIDE_INT_PRINT_HEX, XWINT (x, i)); num++; } } break; } case CONST_STRING: fprintf (file, "\"%s\"", XSTR (x, 0)); break; case CONST: fputs ("const(", file); ra_print_rtx (file, XEXP (x, 0), 0); fputs (")", file); break; case PC: fputs ("pc", file); break; case REG: { int regno = REGNO (x); if (regno < FIRST_PSEUDO_REGISTER) { int i, nregs = hard_regno_nregs[regno][mode]; if (nregs > 1) fputs ("[", file); for (i = 0; i < nregs; i++) { if (i) fputs (", ", file); if (reg_names[regno+i] && *reg_names[regno + i]) fprintf (file, "%s", reg_names[regno + i]); else fprintf (file, "h%d", regno + i); } if (nregs > 1) fputs ("]", file); } else fprintf (file, "p%d", regno); break; } case SUBREG: { rtx sub = SUBREG_REG (x); int ofs = SUBREG_BYTE (x); if (REG_P (sub) && REGNO (sub) < FIRST_PSEUDO_REGISTER) { int regno = REGNO (sub); int i, nregs = hard_regno_nregs[regno][mode]; regno += subreg_regno_offset (regno, GET_MODE (sub), ofs, mode); if (nregs > 1) fputs ("[", file); for (i = 0; i < nregs; i++) { if (i) fputs (", ", file); if (reg_names[regno+i]) fprintf (file, "%s", reg_names[regno + i]); else fprintf (file, "h%d", regno + i); } if (nregs > 1) fputs ("]", file); } else { ra_print_rtx (file, sub, 0); fprintf (file, ":[%s+%d]", GET_MODE_NAME (mode), ofs); } break; } case SCRATCH: fputs ("scratch", file); break; case CONCAT: ra_print_rtx_2op (file, x); break; case HIGH: ra_print_rtx_1op (file, x); break; case LO_SUM: fputs ("(", file); ra_print_rtx (file, XEXP (x, 0), 0); fputs (" + lo(", file); ra_print_rtx (file, XEXP (x, 1), 0); fputs ("))", file); break; case MEM: fputs ("[", file); ra_print_rtx (file, XEXP (x, 0), 0); fprintf (file, "]:%s", GET_MODE_NAME (GET_MODE (x))); /* XXX print alias set too ?? */ break; case LABEL_REF: { rtx sub = XEXP (x, 0); if (GET_CODE (sub) == NOTE && NOTE_LINE_NUMBER (sub) == NOTE_INSN_DELETED_LABEL) fprintf (file, "(deleted uid=%d)", INSN_UID (sub)); else if (GET_CODE (sub) == CODE_LABEL) fprintf (file, "L%d", CODE_LABEL_NUMBER (sub)); else fprintf (file, "(nonlabel uid=%d)", INSN_UID (sub)); } break; case SYMBOL_REF: fprintf (file, "sym(\"%s\")", XSTR (x, 0)); break; case CC0: fputs ("cc0", file); break; default: print_inline_rtx (file, x, 0); break; } } /* Print a general rtx X to FILE in nice infix form. If WITH_PN is set, and X is one of the toplevel constructs (insns, notes, labels or barriers), then print also the UIDs of the preceding and following insn. */ void ra_print_rtx (FILE *file, rtx x, int with_pn) { enum rtx_code code; int unhandled = 0; if (!x) return; code = GET_CODE (x); /* First handle the insn like constructs. */ if (INSN_P (x) || code == NOTE || code == CODE_LABEL || code == BARRIER) { if (INSN_P (x)) fputs (" ", file); /* Non-insns are prefixed by a ';'. */ if (code == BARRIER) fputs ("; ", file); else if (code == NOTE) /* But notes are indented very far right. */ fprintf (file, "\t\t\t\t\t; "); else if (code == CODE_LABEL) /* And labels have their Lxx name first, before the actual UID. */ { fprintf (file, "L%d:\t; ", CODE_LABEL_NUMBER (x)); if (LABEL_NAME (x)) fprintf (file, "(%s) ", LABEL_NAME (x)); switch (LABEL_KIND (x)) { case LABEL_NORMAL: break; case LABEL_STATIC_ENTRY: fputs (" (entry)", file); break; case LABEL_GLOBAL_ENTRY: fputs (" (global entry)", file); break; case LABEL_WEAK_ENTRY: fputs (" (weak entry)", file); break; default: abort(); } fprintf (file, " [%d uses] uid=(", LABEL_NUSES (x)); } fprintf (file, "%d", INSN_UID (x)); if (with_pn) fprintf (file, " %d %d", PREV_INSN (x) ? INSN_UID (PREV_INSN (x)) : 0, NEXT_INSN (x) ? INSN_UID (NEXT_INSN (x)) : 0); if (code == BARRIER) fputs (" -------- barrier ---------", file); else if (code == CODE_LABEL) fputs (")", file); else if (code == NOTE) { int ln = NOTE_LINE_NUMBER (x); if (ln >= (int) NOTE_INSN_BIAS && ln < (int) NOTE_INSN_MAX) fprintf (file, " %s", GET_NOTE_INSN_NAME (ln)); else { expanded_location s; NOTE_EXPANDED_LOCATION (s, x); fprintf (file, " line %d", s.line); if (s.file != NULL) fprintf (file, ":%s", s.file); } } else { fprintf (file, "\t"); ra_print_rtx (file, PATTERN (x), 0); } return; } switch (code) { /* Top-level stuff. */ case PARALLEL: { int j; for (j = 0; j < XVECLEN (x, 0); j++) { if (j) fputs ("\t;; ", file); ra_print_rtx (file, XVECEXP (x, 0, j), 0); } break; } case UNSPEC: case UNSPEC_VOLATILE: { int j; fprintf (file, "unspec%s(%d", (code == UNSPEC) ? "" : "_vol", XINT (x, 1)); for (j = 0; j < XVECLEN (x, 0); j++) { fputs (", ", file); ra_print_rtx (file, XVECEXP (x, 0, j), 0); } fputs (")", file); break; } case SET: if (GET_CODE (SET_DEST (x)) == PC) { if (GET_CODE (SET_SRC (x)) == IF_THEN_ELSE && GET_CODE (XEXP (SET_SRC(x), 2)) == PC) { fputs ("if ", file); ra_print_rtx (file, XEXP (SET_SRC (x), 0), 0); fputs (" jump ", file); ra_print_rtx (file, XEXP (SET_SRC (x), 1), 0); } else { fputs ("jump ", file); ra_print_rtx (file, SET_SRC (x), 0); } } else { ra_print_rtx (file, SET_DEST (x), 0); fputs (" <= ", file); ra_print_rtx (file, SET_SRC (x), 0); } break; case USE: fputs ("use <= ", file); ra_print_rtx (file, XEXP (x, 0), 0); break; case CLOBBER: ra_print_rtx (file, XEXP (x, 0), 0); fputs (" <= clobber", file); break; case CALL: fputs ("call ", file); ra_print_rtx (file, XEXP (x, 0), 0); /* Address */ fputs (" numargs=", file); ra_print_rtx (file, XEXP (x, 1), 0); /* Num arguments */ break; case RETURN: fputs ("return", file); break; case TRAP_IF: fputs ("if (", file); ra_print_rtx (file, XEXP (x, 0), 0); fputs (") trap ", file); ra_print_rtx (file, XEXP (x, 1), 0); break; case RESX: fprintf (file, "resx from region %d", XINT (x, 0)); break; /* Different things of class 'x' */ case SUBREG: ra_print_rtx_object (file, x); break; case STRICT_LOW_PART: fputs ("low(", file); ra_print_rtx (file, XEXP (x, 0), 0); fputs (")", file); break; default: unhandled = 1; break; } if (!unhandled) return; switch (GET_RTX_CLASS (code)) { case RTX_UNARY: ra_print_rtx_1op (file, x); break; case RTX_BIN_ARITH: case RTX_COMM_ARITH: case RTX_COMPARE: case RTX_COMM_COMPARE: ra_print_rtx_2op (file, x); break; case RTX_TERNARY: case RTX_BITFIELD_OPS: ra_print_rtx_3op (file, x); break; case RTX_OBJ: case RTX_CONST_OBJ: ra_print_rtx_object (file, x); break; default: print_inline_rtx (file, x, 0); break; } } /* This only calls ra_print_rtx(), but emits a final newline. */ void ra_print_rtx_top (FILE *file, rtx x, int with_pn) { ra_print_rtx (file, x, with_pn); fprintf (file, "\n"); } /* Callable from gdb. This prints rtx X onto stderr. */ void ra_debug_rtx (rtx x) { ra_print_rtx_top (stderr, x, 1); } /* This prints the content of basic block with index BBI. The first and last insn are emitted with UIDs of prev and next insns. */ void ra_debug_bbi (int bbi) { basic_block bb = BASIC_BLOCK (bbi); rtx insn; for (insn = BB_HEAD (bb); insn; insn = NEXT_INSN (insn)) { ra_print_rtx_top (stderr, insn, (insn == BB_HEAD (bb) || insn == BB_END (bb))); fprintf (stderr, "\n"); if (insn == BB_END (bb)) break; } } /* Beginning from INSN, emit NUM insns (if NUM is non-negative) or emit a window of NUM insns around INSN, to stderr. */ void ra_debug_insns (rtx insn, int num) { int i, count = (num == 0 ? 1 : num < 0 ? -num : num); if (num < 0) for (i = count / 2; i > 0 && PREV_INSN (insn); i--) insn = PREV_INSN (insn); for (i = count; i > 0 && insn; insn = NEXT_INSN (insn), i--) { if (GET_CODE (insn) == CODE_LABEL) fprintf (stderr, "\n"); ra_print_rtx_top (stderr, insn, (i == count || i == 1)); } } /* Beginning with INSN, emit the whole insn chain into FILE. This also outputs comments when basic blocks start or end and omits some notes, if flag_ra_dump_notes is zero. */ void ra_print_rtl_with_bb (FILE *file, rtx insn) { basic_block last_bb, bb; unsigned int num = 0; if (!insn) fputs ("nil", file); last_bb = NULL; for (; insn; insn = NEXT_INSN (insn)) { if (GET_CODE (insn) == BARRIER) bb = NULL; else bb = BLOCK_FOR_INSN (insn); if (bb != last_bb) { if (last_bb) fprintf (file, ";; End of basic block %d\n", last_bb->index); if (bb) fprintf (file, ";; Begin of basic block %d\n", bb->index); last_bb = bb; } if (GET_CODE (insn) == CODE_LABEL) fputc ('\n', file); if (GET_CODE (insn) == NOTE) { /* Ignore basic block and maybe other notes not referencing deleted things. */ if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_BASIC_BLOCK && (flag_ra_dump_notes || NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED || NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED_LABEL)) { ra_print_rtx_top (file, insn, (num == 0 || !NEXT_INSN (insn))); num++; } } else { ra_print_rtx_top (file, insn, (num == 0 || !NEXT_INSN (insn))); num++; } } } /* Count how many insns were seen how often, while building the interference graph, and prints the findings. */ void dump_number_seen (void) { #define N 17 int num[N]; int i; for (i = 0; i < N; i++) num[i] = 0; for (i = 0; i < get_max_uid (); i++) if (number_seen[i] < N - 1) num[number_seen[i]]++; else num[N - 1]++; for (i = 0; i < N - 1; i++) if (num[i]) ra_debug_msg (DUMP_PROCESS, "%d insns seen %d times\n", num[i], i); if (num[N - 1]) ra_debug_msg (DUMP_PROCESS, "%d insns seen %d and more times\n", num[i], N - 1); ra_debug_msg (DUMP_PROCESS, "from overall %d insns\n", get_max_uid ()); #undef N } /* Dump the interference graph, the move list and the webs. */ void dump_igraph (struct df *df ATTRIBUTE_UNUSED) { struct move_list *ml; unsigned int def1, def2; int num = 0; int num2; unsigned int i; if (!dump_file || (debug_new_regalloc & (DUMP_IGRAPH | DUMP_WEBS)) == 0) return; ra_debug_msg (DUMP_IGRAPH, "conflicts:\n "); for (def1 = 0; def1 < num_webs; def1++) { int num1 = num; num2 = 0; for (def2 = 0; def2 < num_webs; def2++) if (def1 != def2 && TEST_BIT (igraph, igraph_index (def1, def2))) { if (num1 == num) { if (SUBWEB_P (ID2WEB (def1))) ra_debug_msg (DUMP_IGRAPH, "%d (SUBREG %d, %d) with ", def1, ID2WEB (def1)->regno, SUBREG_BYTE (ID2WEB (def1)->orig_x)); else ra_debug_msg (DUMP_IGRAPH, "%d (REG %d) with ", def1, ID2WEB (def1)->regno); } if ((num2 % 9) == 8) ra_debug_msg (DUMP_IGRAPH, "\n "); num++; num2++; if (SUBWEB_P (ID2WEB (def2))) ra_debug_msg (DUMP_IGRAPH, "%d(%d,%d) ", def2, ID2WEB (def2)->regno, SUBREG_BYTE (ID2WEB (def2)->orig_x)); else ra_debug_msg (DUMP_IGRAPH, "%d(%d) ", def2, ID2WEB (def2)->regno); } if (num1 != num) ra_debug_msg (DUMP_IGRAPH, "\n "); } ra_debug_msg (DUMP_IGRAPH, "\n"); for (ml = wl_moves; ml; ml = ml->next) if (ml->move) { ra_debug_msg (DUMP_IGRAPH, "move: insn %d: Web %d <-- Web %d\n", INSN_UID (ml->move->insn), ml->move->target_web->id, ml->move->source_web->id); } ra_debug_msg (DUMP_WEBS, "\nWebs:\n"); for (i = 0; i < num_webs; i++) { struct web *web = ID2WEB (i); ra_debug_msg (DUMP_WEBS, " %4d : regno %3d", i, web->regno); if (SUBWEB_P (web)) { ra_debug_msg (DUMP_WEBS, " sub %d", SUBREG_BYTE (web->orig_x)); ra_debug_msg (DUMP_WEBS, " par %d", find_web_for_subweb (web)->id); } ra_debug_msg (DUMP_WEBS, " +%d (span %d, cost " HOST_WIDE_INT_PRINT_DEC ") (%s)", web->add_hardregs, web->span_deaths, web->spill_cost, reg_class_names1[web->regclass]); if (web->spill_temp == 1) ra_debug_msg (DUMP_WEBS, " (spilltemp)"); else if (web->spill_temp == 2) ra_debug_msg (DUMP_WEBS, " (spilltem2)"); else if (web->spill_temp == 3) ra_debug_msg (DUMP_WEBS, " (short)"); if (web->type == PRECOLORED) ra_debug_msg (DUMP_WEBS, " (precolored, color=%d)", web->color); else if (find_web_for_subweb (web)->num_uses == 0) ra_debug_msg (DUMP_WEBS, " dead"); if (web->crosses_call) ra_debug_msg (DUMP_WEBS, " xcall"); if (web->regno >= max_normal_pseudo) ra_debug_msg (DUMP_WEBS, " stack"); ra_debug_msg (DUMP_WEBS, "\n"); } } /* Dump the interference graph and webs in a format easily parsable by programs. Used to emit real world interference graph to my custom graph colorizer. */ void dump_igraph_machine (void) { unsigned int i; if (!dump_file || (debug_new_regalloc & DUMP_IGRAPH_M) == 0) return; ra_debug_msg (DUMP_IGRAPH_M, "g %d %d\n", num_webs - num_subwebs, FIRST_PSEUDO_REGISTER); for (i = 0; i < num_webs - num_subwebs; i++) { struct web *web = ID2WEB (i); struct conflict_link *cl; int flags = 0; int numc = 0; int col = 0; flags = web->spill_temp & 0xF; flags |= ((web->type == PRECOLORED) ? 1 : 0) << 4; flags |= (web->add_hardregs & 0xF) << 5; for (cl = web->conflict_list; cl; cl = cl->next) if (cl->t->id < web->id) numc++; ra_debug_msg (DUMP_IGRAPH_M, "n %d %d %d %d %d %d %d\n", web->id, web->color, flags, (unsigned int)web->spill_cost, web->num_defs, web->num_uses, numc); if (web->type != PRECOLORED) { ra_debug_msg (DUMP_IGRAPH_M, "s %d", web->id); while (1) { unsigned int u = 0; int n; for (n = 0; n < 32 && col < FIRST_PSEUDO_REGISTER; n++, col++) if (TEST_HARD_REG_BIT (web->usable_regs, col)) u |= 1 << n; ra_debug_msg (DUMP_IGRAPH_M, " %u", u); if (col >= FIRST_PSEUDO_REGISTER) break; } ra_debug_msg (DUMP_IGRAPH_M, "\n"); } if (numc) { ra_debug_msg (DUMP_IGRAPH_M, "c %d", web->id); for (cl = web->conflict_list; cl; cl = cl->next) { if (cl->t->id < web->id) ra_debug_msg (DUMP_IGRAPH_M, " %d", cl->t->id); } ra_debug_msg (DUMP_IGRAPH_M, "\n"); } } ra_debug_msg (DUMP_IGRAPH_M, "e\n"); } /* This runs after colorization and changing the insn stream. It temporarily replaces all pseudo registers with their colors, and emits information, if the resulting insns are strictly valid. */ void dump_constraints (void) { rtx insn; int i; if (!dump_file || (debug_new_regalloc & DUMP_CONSTRAINTS) == 0) return; for (i = FIRST_PSEUDO_REGISTER; i < ra_max_regno; i++) if (regno_reg_rtx[i] && REG_P (regno_reg_rtx[i])) REGNO (regno_reg_rtx[i]) = ra_reg_renumber[i] >= 0 ? ra_reg_renumber[i] : i; for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) if (INSN_P (insn)) { int code; int uid = INSN_UID (insn); int o; /* Don't simply force rerecognition, as combine might left us with some unrecognizable ones, which later leads to aborts in regclass, if we now destroy the remembered INSN_CODE(). */ /*INSN_CODE (insn) = -1;*/ code = recog_memoized (insn); if (code < 0) { ra_debug_msg (DUMP_CONSTRAINTS, "%d: asm insn or not recognizable.\n", uid); continue; } ra_debug_msg (DUMP_CONSTRAINTS, "%d: code %d {%s}, %d operands, constraints: ", uid, code, insn_data[code].name, recog_data.n_operands); extract_insn (insn); /*preprocess_constraints ();*/ for (o = 0; o < recog_data.n_operands; o++) { ra_debug_msg (DUMP_CONSTRAINTS, "%d:%s ", o, recog_data.constraints[o]); } if (constrain_operands (1)) ra_debug_msg (DUMP_CONSTRAINTS, "matches strictly alternative %d", which_alternative); else ra_debug_msg (DUMP_CONSTRAINTS, "doesn't match strictly"); ra_debug_msg (DUMP_CONSTRAINTS, "\n"); } for (i = FIRST_PSEUDO_REGISTER; i < ra_max_regno; i++) if (regno_reg_rtx[i] && REG_P (regno_reg_rtx[i])) REGNO (regno_reg_rtx[i]) = i; } /* This counts and emits the cumulated cost of all spilled webs, preceded by a custom message MSG, with debug level LEVEL. */ void dump_graph_cost (unsigned int level, const char *msg) { unsigned int i; unsigned HOST_WIDE_INT cost; if (!dump_file || (debug_new_regalloc & level) == 0) return; cost = 0; for (i = 0; i < num_webs; i++) { struct web *web = id2web[i]; if (alias (web)->type == SPILLED) cost += web->orig_spill_cost; } ra_debug_msg (level, " spill cost of graph (%s) = " HOST_WIDE_INT_PRINT_UNSIGNED "\n", msg ? msg : "", cost); } /* Dump the color assignment per web, the coalesced and spilled webs. */ void dump_ra (struct df *df ATTRIBUTE_UNUSED) { struct web *web; struct dlist *d; if (!dump_file || (debug_new_regalloc & DUMP_RESULTS) == 0) return; ra_debug_msg (DUMP_RESULTS, "\nColored:\n"); for (d = WEBS(COLORED); d; d = d->next) { web = DLIST_WEB (d); ra_debug_msg (DUMP_RESULTS, " %4d : color %d\n", web->id, web->color); } ra_debug_msg (DUMP_RESULTS, "\nCoalesced:\n"); for (d = WEBS(COALESCED); d; d = d->next) { web = DLIST_WEB (d); ra_debug_msg (DUMP_RESULTS, " %4d : to web %d, color %d\n", web->id, alias (web)->id, web->color); } ra_debug_msg (DUMP_RESULTS, "\nSpilled:\n"); for (d = WEBS(SPILLED); d; d = d->next) { web = DLIST_WEB (d); ra_debug_msg (DUMP_RESULTS, " %4d\n", web->id); } ra_debug_msg (DUMP_RESULTS, "\n"); dump_cost (DUMP_RESULTS); } /* Calculate and dump the cumulated costs of certain types of insns (loads, stores and copies). */ void dump_static_insn_cost (FILE *file, const char *message, const char *prefix) { struct cost { unsigned HOST_WIDE_INT cost; unsigned int count; }; basic_block bb; struct cost load, store, regcopy, selfcopy, overall; memset (&load, 0, sizeof(load)); memset (&store, 0, sizeof(store)); memset (®copy, 0, sizeof(regcopy)); memset (&selfcopy, 0, sizeof(selfcopy)); memset (&overall, 0, sizeof(overall)); if (!file) return; FOR_EACH_BB (bb) { unsigned HOST_WIDE_INT block_cost = bb->frequency; rtx insn, set; for (insn = BB_HEAD (bb); insn; insn = NEXT_INSN (insn)) { /* Yes, yes. We don't calculate the costs precisely. Only for "simple enough" insns. Those containing single sets only. */ if (INSN_P (insn) && ((set = single_set (insn)) != NULL)) { rtx src = SET_SRC (set); rtx dest = SET_DEST (set); struct cost *pcost = NULL; overall.cost += block_cost; overall.count++; if (rtx_equal_p (src, dest)) pcost = &selfcopy; else if (GET_CODE (src) == GET_CODE (dest) && ((REG_P (src)) || (GET_CODE (src) == SUBREG && REG_P (SUBREG_REG (src)) && REG_P (SUBREG_REG (dest))))) /* XXX is dest guaranteed to be a subreg? */ pcost = ®copy; else { if (GET_CODE (src) == SUBREG) src = SUBREG_REG (src); if (GET_CODE (dest) == SUBREG) dest = SUBREG_REG (dest); if (MEM_P (src) && !MEM_P (dest) && memref_is_stack_slot (src)) pcost = &load; else if (!MEM_P (src) && MEM_P (dest) && memref_is_stack_slot (dest)) pcost = &store; } if (pcost) { pcost->cost += block_cost; pcost->count++; } } if (insn == BB_END (bb)) break; } } if (!prefix) prefix = ""; fprintf (file, "static insn cost %s\n", message ? message : ""); fprintf (file, " %soverall:\tnum=%6d\tcost=% 8" HOST_WIDE_INT_PRINT "d\n", prefix, overall.count, overall.cost); fprintf (file, " %sloads:\tnum=%6d\tcost=% 8" HOST_WIDE_INT_PRINT "d\n", prefix, load.count, load.cost); fprintf (file, " %sstores:\tnum=%6d\tcost=% 8" HOST_WIDE_INT_PRINT "d\n", prefix, store.count, store.cost); fprintf (file, " %sregcopy:\tnum=%6d\tcost=% 8" HOST_WIDE_INT_PRINT "d\n", prefix, regcopy.count, regcopy.cost); fprintf (file, " %sselfcpy:\tnum=%6d\tcost=% 8" HOST_WIDE_INT_PRINT "d\n", prefix, selfcopy.count, selfcopy.cost); } /* Returns nonzero, if WEB1 and WEB2 have some possible hardregs in common. */ int web_conflicts_p (struct web *web1, struct web *web2) { if (web1->type == PRECOLORED && web2->type == PRECOLORED) return 0; if (web1->type == PRECOLORED) return TEST_HARD_REG_BIT (web2->usable_regs, web1->regno); if (web2->type == PRECOLORED) return TEST_HARD_REG_BIT (web1->usable_regs, web2->regno); return hard_regs_intersect_p (&web1->usable_regs, &web2->usable_regs); } /* Dump all uids of insns in which WEB is mentioned. */ void dump_web_insns (struct web *web) { unsigned int i; ra_debug_msg (DUMP_EVER, "Web: %i(%i)+%i class: %s freedom: %i degree %i\n", web->id, web->regno, web->add_hardregs, reg_class_names1[web->regclass], web->num_freedom, web->num_conflicts); ra_debug_msg (DUMP_EVER, " def insns:"); for (i = 0; i < web->num_defs; ++i) { ra_debug_msg (DUMP_EVER, " %d ", INSN_UID (web->defs[i]->insn)); } ra_debug_msg (DUMP_EVER, "\n use insns:"); for (i = 0; i < web->num_uses; ++i) { ra_debug_msg (DUMP_EVER, " %d ", INSN_UID (web->uses[i]->insn)); } ra_debug_msg (DUMP_EVER, "\n"); } /* Dump conflicts for web WEB. */ void dump_web_conflicts (struct web *web) { int num = 0; unsigned int def2; ra_debug_msg (DUMP_EVER, "Web: %i(%i)+%i class: %s freedom: %i degree %i\n", web->id, web->regno, web->add_hardregs, reg_class_names1[web->regclass], web->num_freedom, web->num_conflicts); for (def2 = 0; def2 < num_webs; def2++) if (TEST_BIT (igraph, igraph_index (web->id, def2)) && web->id != def2) { if ((num % 9) == 5) ra_debug_msg (DUMP_EVER, "\n "); num++; ra_debug_msg (DUMP_EVER, " %d(%d)", def2, id2web[def2]->regno); if (id2web[def2]->add_hardregs) ra_debug_msg (DUMP_EVER, "+%d", id2web[def2]->add_hardregs); if (web_conflicts_p (web, id2web[def2])) ra_debug_msg (DUMP_EVER, "/x"); if (id2web[def2]->type == SELECT) ra_debug_msg (DUMP_EVER, "/s"); if (id2web[def2]->type == COALESCED) ra_debug_msg (DUMP_EVER,"/c/%d", alias (id2web[def2])->id); } ra_debug_msg (DUMP_EVER, "\n"); { struct conflict_link *wl; num = 0; ra_debug_msg (DUMP_EVER, "By conflicts: "); for (wl = web->conflict_list; wl; wl = wl->next) { struct web* w = wl->t; if ((num % 9) == 8) ra_debug_msg (DUMP_EVER, "\n "); num++; ra_debug_msg (DUMP_EVER, "%d(%d)%s ", w->id, w->regno, web_conflicts_p (web, w) ? "+" : ""); } ra_debug_msg (DUMP_EVER, "\n"); } } /* Output HARD_REG_SET to stderr. */ void debug_hard_reg_set (HARD_REG_SET set) { int i; for (i = 0; i < FIRST_PSEUDO_REGISTER; ++i) { if (TEST_HARD_REG_BIT (set, i)) { fprintf (stderr, "%s ", reg_names[i]); } } fprintf (stderr, "\n"); } /* vim:cinoptions={.5s,g0,p5,t0,(0,^-0.5s,n-0.5s:tw=78:cindent:sw=4: */ /* Graph coloring register allocator Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Michael Matz and Daniel Berlin . This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is part of the graph coloring register allocator, and contains the functions to change the insn stream. I.e. it adds spill code, rewrites insns to use the new registers after coloring and deletes coalesced moves. */ struct rewrite_info; struct rtx_list; static void spill_coalescing (sbitmap, sbitmap); static unsigned HOST_WIDE_INT spill_prop_savings (struct web *, sbitmap); static void spill_prop_insert (struct web *, sbitmap, sbitmap); static int spill_propagation (sbitmap, sbitmap, sbitmap); static void spill_coalprop (void); static void allocate_spill_web (struct web *); static void choose_spill_colors (void); static void rewrite_program (bitmap); static void remember_slot (struct rtx_list **, rtx); static int slots_overlap_p (rtx, rtx); static void delete_overlapping_slots (struct rtx_list **, rtx); static int slot_member_p (struct rtx_list *, rtx); static void insert_stores (bitmap); static int spill_same_color_p (struct web *, struct web *); static bool is_partly_live_1 (sbitmap, struct web *); static void update_spill_colors (HARD_REG_SET *, struct web *, int); static int spill_is_free (HARD_REG_SET *, struct web *); static void emit_loads (struct rewrite_info *, int, rtx); static void reloads_to_loads (struct rewrite_info *, struct ref **, unsigned int, struct web **); static void rewrite_program2 (bitmap); static void mark_refs_for_checking (struct web *, bitmap); static void detect_web_parts_to_rebuild (void); static void delete_useless_defs (void); static void detect_non_changed_webs (void); static void reset_changed_flag (void); /* For tracking some statistics, we count the number (and cost) of deleted move insns. */ static unsigned int deleted_move_insns; static unsigned HOST_WIDE_INT deleted_move_cost; /* This is the spill coalescing phase. In SPILLED the IDs of all already spilled webs are noted. In COALESCED the IDs of webs still to check for coalescing. This tries to coalesce two webs, which were spilled, are connected by a move, and don't conflict. Greatly reduces memory shuffling. */ static void spill_coalescing (sbitmap coalesce, sbitmap spilled) { struct move_list *ml; struct move *m; for (ml = wl_moves; ml; ml = ml->next) if ((m = ml->move) != NULL) { struct web *s = alias (m->source_web); struct web *t = alias (m->target_web); if ((TEST_BIT (spilled, s->id) && TEST_BIT (coalesce, t->id)) || (TEST_BIT (spilled, t->id) && TEST_BIT (coalesce, s->id))) { struct conflict_link *wl; if (TEST_BIT (sup_igraph, s->id * num_webs + t->id) || TEST_BIT (sup_igraph, t->id * num_webs + s->id) || s->pattern || t->pattern) continue; deleted_move_insns++; deleted_move_cost += BLOCK_FOR_INSN (m->insn)->frequency + 1; PUT_CODE (m->insn, NOTE); NOTE_LINE_NUMBER (m->insn) = NOTE_INSN_DELETED; df_insn_modify (df, BLOCK_FOR_INSN (m->insn), m->insn); m->target_web->target_of_spilled_move = 1; if (s == t) /* May be, already coalesced due to a former move. */ continue; /* Merge the nodes S and T in the I-graph. Beware: the merging of conflicts relies on the fact, that in the conflict list of T all of it's conflicts are noted. This is currently not the case if T would be the target of a coalesced web, because then (in combine () above) only those conflicts were noted in T from the web which was coalesced into T, which at the time of combine() were not already on the SELECT stack or were itself coalesced to something other. */ if (t->type != SPILLED || s->type != SPILLED) abort (); remove_list (t->dlink, &WEBS(SPILLED)); put_web (t, COALESCED); t->alias = s; s->is_coalesced = 1; t->is_coalesced = 1; merge_moves (s, t); for (wl = t->conflict_list; wl; wl = wl->next) { struct web *pweb = wl->t; if (wl->sub == NULL) record_conflict (s, pweb); else { struct sub_conflict *sl; for (sl = wl->sub; sl; sl = sl->next) { struct web *sweb = NULL; if (SUBWEB_P (sl->s)) sweb = find_subweb (s, sl->s->orig_x); if (!sweb) sweb = s; record_conflict (sweb, sl->t); } } /* No decrement_degree here, because we already have colored the graph, and don't want to insert pweb into any other list. */ pweb->num_conflicts -= 1 + t->add_hardregs; } } } } /* Returns the probable saving of coalescing WEB with webs from SPILLED, in terms of removed move insn cost. */ static unsigned HOST_WIDE_INT spill_prop_savings (struct web *web, sbitmap spilled) { unsigned HOST_WIDE_INT savings = 0; struct move_list *ml; struct move *m; unsigned int cost; if (web->pattern) return 0; cost = 1 + MEMORY_MOVE_COST (GET_MODE (web->orig_x), web->regclass, 1); cost += 1 + MEMORY_MOVE_COST (GET_MODE (web->orig_x), web->regclass, 0); for (ml = wl_moves; ml; ml = ml->next) if ((m = ml->move) != NULL) { struct web *s = alias (m->source_web); struct web *t = alias (m->target_web); if (s != web) { struct web *h = s; s = t; t = h; } if (s != web || !TEST_BIT (spilled, t->id) || t->pattern || TEST_BIT (sup_igraph, s->id * num_webs + t->id) || TEST_BIT (sup_igraph, t->id * num_webs + s->id)) continue; savings += BLOCK_FOR_INSN (m->insn)->frequency * cost; } return savings; } /* This add all IDs of colored webs, which are connected to WEB by a move to LIST and PROCESSED. */ static void spill_prop_insert (struct web *web, sbitmap list, sbitmap processed) { struct move_list *ml; struct move *m; for (ml = wl_moves; ml; ml = ml->next) if ((m = ml->move) != NULL) { struct web *s = alias (m->source_web); struct web *t = alias (m->target_web); if (s != web) { struct web *h = s; s = t; t = h; } if (s != web || t->type != COLORED || TEST_BIT (processed, t->id)) continue; SET_BIT (list, t->id); SET_BIT (processed, t->id); } } /* The spill propagation pass. If we have to spilled webs, the first connected through a move to a colored one, and the second also connected to that colored one, and this colored web is only used to connect both spilled webs, it might be worthwhile to spill that colored one. This is the case, if the cost of the removed copy insns (all three webs could be placed into the same stack slot) is higher than the spill cost of the web. TO_PROP are the webs we try to propagate from (i.e. spilled ones), SPILLED the set of all spilled webs so far and PROCESSED the set of all webs processed so far, so we don't do work twice. */ static int spill_propagation (sbitmap to_prop, sbitmap spilled, sbitmap processed) { int id; int again = 0; sbitmap list = sbitmap_alloc (num_webs); sbitmap_zero (list); /* First insert colored move neighbors into the candidate list. */ EXECUTE_IF_SET_IN_SBITMAP (to_prop, 0, id, { spill_prop_insert (ID2WEB (id), list, processed); }); sbitmap_zero (to_prop); /* For all candidates, see, if the savings are higher than it's spill cost. */ while ((id = sbitmap_first_set_bit (list)) >= 0) { struct web *web = ID2WEB (id); RESET_BIT (list, id); if (spill_prop_savings (web, spilled) >= web->spill_cost) { /* If so, we found a new spilled web. Insert it's colored move neighbors again, and mark, that we need to repeat the whole mainloop of spillprog/coalescing again. */ remove_web_from_list (web); web->color = -1; put_web (web, SPILLED); SET_BIT (spilled, id); SET_BIT (to_prop, id); spill_prop_insert (web, list, processed); again = 1; } } sbitmap_free (list); return again; } /* The main phase to improve spill costs. This repeatedly runs spill coalescing and spill propagation, until nothing changes. */ static void spill_coalprop (void) { sbitmap spilled, processed, to_prop; struct dlist *d; int again; spilled = sbitmap_alloc (num_webs); processed = sbitmap_alloc (num_webs); to_prop = sbitmap_alloc (num_webs); sbitmap_zero (spilled); for (d = WEBS(SPILLED); d; d = d->next) SET_BIT (spilled, DLIST_WEB (d)->id); sbitmap_copy (to_prop, spilled); sbitmap_zero (processed); do { spill_coalescing (to_prop, spilled); /* XXX Currently (with optimistic coalescing) spill_propagation() doesn't give better code, sometimes it gives worse (but not by much) code. I believe this is because of slightly wrong cost measurements. Anyway right now it isn't worth the time it takes, so deactivate it for now. */ again = 0 && spill_propagation (to_prop, spilled, processed); } while (again); sbitmap_free (to_prop); sbitmap_free (processed); sbitmap_free (spilled); } /* Allocate a spill slot for a WEB. Currently we spill to pseudo registers, to be able to track also webs for "stack slots", and also to possibly colorize them. These pseudos are sometimes handled in a special way, where we know, that they also can represent MEM references. */ static void allocate_spill_web (struct web *web) { int regno = web->regno; rtx slot; if (web->stack_slot) return; slot = gen_reg_rtx (PSEUDO_REGNO_MODE (regno)); web->stack_slot = slot; } /* This chooses a color for all SPILLED webs for interference region spilling. The heuristic isn't good in any way. */ static void choose_spill_colors (void) { struct dlist *d; unsigned HOST_WIDE_INT *costs = xmalloc (FIRST_PSEUDO_REGISTER * sizeof (costs[0])); for (d = WEBS(SPILLED); d; d = d->next) { struct web *web = DLIST_WEB (d); struct conflict_link *wl; int bestc, c; HARD_REG_SET avail; memset (costs, 0, FIRST_PSEUDO_REGISTER * sizeof (costs[0])); for (wl = web->conflict_list; wl; wl = wl->next) { struct web *pweb = wl->t; if (pweb->type == COLORED || pweb->type == PRECOLORED) costs[pweb->color] += pweb->spill_cost; } COPY_HARD_REG_SET (avail, web->usable_regs); if (web->crosses_call) { /* Add an arbitrary constant cost to colors not usable by call-crossing webs without saves/loads. */ for (c = 0; c < FIRST_PSEUDO_REGISTER; c++) if (TEST_HARD_REG_BIT (call_used_reg_set, c)) costs[c] += 1000; } bestc = -1; for (c = 0; c < FIRST_PSEUDO_REGISTER; c++) if ((bestc < 0 || costs[bestc] > costs[c]) && TEST_HARD_REG_BIT (avail, c) && HARD_REGNO_MODE_OK (c, PSEUDO_REGNO_MODE (web->regno))) { int i, size; size = hard_regno_nregs[c][PSEUDO_REGNO_MODE (web->regno)]; for (i = 1; i < size && TEST_HARD_REG_BIT (avail, c + i); i++); if (i == size) bestc = c; } web->color = bestc; ra_debug_msg (DUMP_PROCESS, "choosing color %d for spilled web %d\n", bestc, web->id); } free (costs); } /* For statistics sake we count the number and cost of all new loads, stores and emitted rematerializations. */ static unsigned int emitted_spill_loads; static unsigned int emitted_spill_stores; static unsigned int emitted_remat; static unsigned HOST_WIDE_INT spill_load_cost; static unsigned HOST_WIDE_INT spill_store_cost; static unsigned HOST_WIDE_INT spill_remat_cost; /* In rewrite_program2() we detect if some def us useless, in the sense, that the pseudo set is not live anymore at that point. The REF_IDs of such defs are noted here. */ static bitmap useless_defs; /* This is the simple and fast version of rewriting the program to include spill code. It spills at every insn containing spilled defs or uses. Loads are added only if flag_ra_spill_every_use is nonzero, otherwise only stores will be added. This doesn't support rematerialization. NEW_DEATHS is filled with uids for insns, which probably contain deaths. */ static void rewrite_program (bitmap new_deaths) { unsigned int i; struct dlist *d; bitmap b = BITMAP_XMALLOC (); /* We walk over all webs, over all uses/defs. For all webs, we need to look at spilled webs, and webs coalesced to spilled ones, in case their alias isn't broken up, or they got spill coalesced. */ for (i = 0; i < 2; i++) for (d = (i == 0) ? WEBS(SPILLED) : WEBS(COALESCED); d; d = d->next) { struct web *web = DLIST_WEB (d); struct web *aweb = alias (web); unsigned int j; rtx slot; /* Is trivially true for spilled webs, but not for coalesced ones. */ if (aweb->type != SPILLED) continue; /* First add loads before every use, if we have to. */ if (flag_ra_spill_every_use) { bitmap_clear (b); allocate_spill_web (aweb); slot = aweb->stack_slot; for (j = 0; j < web->num_uses; j++) { rtx insns, target, source; rtx insn = DF_REF_INSN (web->uses[j]); rtx prev = PREV_INSN (insn); basic_block bb = BLOCK_FOR_INSN (insn); /* Happens when spill_coalescing() deletes move insns. */ if (!INSN_P (insn)) continue; /* Check that we didn't already added a load for this web and insn. Happens, when the an insn uses the same web multiple times. */ if (bitmap_bit_p (b, INSN_UID (insn))) continue; bitmap_set_bit (b, INSN_UID (insn)); target = DF_REF_REG (web->uses[j]); source = slot; start_sequence (); if (GET_CODE (target) == SUBREG) source = simplify_gen_subreg (GET_MODE (target), source, GET_MODE (source), SUBREG_BYTE (target)); ra_emit_move_insn (target, source); insns = get_insns (); end_sequence (); emit_insn_before (insns, insn); if (BB_HEAD (bb) == insn) BB_HEAD (bb) = NEXT_INSN (prev); for (insn = PREV_INSN (insn); insn != prev; insn = PREV_INSN (insn)) { set_block_for_insn (insn, bb); df_insn_modify (df, bb, insn); } emitted_spill_loads++; spill_load_cost += bb->frequency + 1; } } /* Now emit the stores after each def. If any uses were loaded from stackslots (compared to rematerialized or not reloaded due to IR spilling), aweb->stack_slot will be set. If not, we don't need to emit any stack stores. */ slot = aweb->stack_slot; bitmap_clear (b); if (slot) for (j = 0; j < web->num_defs; j++) { rtx insns, source, dest; rtx insn = DF_REF_INSN (web->defs[j]); rtx following = NEXT_INSN (insn); basic_block bb = BLOCK_FOR_INSN (insn); /* Happens when spill_coalescing() deletes move insns. */ if (!INSN_P (insn)) continue; if (bitmap_bit_p (b, INSN_UID (insn))) continue; bitmap_set_bit (b, INSN_UID (insn)); start_sequence (); source = DF_REF_REG (web->defs[j]); dest = slot; if (GET_CODE (source) == SUBREG) dest = simplify_gen_subreg (GET_MODE (source), dest, GET_MODE (dest), SUBREG_BYTE (source)); ra_emit_move_insn (dest, source); insns = get_insns (); end_sequence (); if (insns) { emit_insn_after (insns, insn); if (BB_END (bb) == insn) BB_END (bb) = PREV_INSN (following); for (insn = insns; insn != following; insn = NEXT_INSN (insn)) { set_block_for_insn (insn, bb); df_insn_modify (df, bb, insn); } } else df_insn_modify (df, bb, insn); emitted_spill_stores++; spill_store_cost += bb->frequency + 1; /* XXX we should set new_deaths for all inserted stores whose pseudo dies here. Note, that this isn't the case for _all_ stores. */ /* I.e. the next is wrong, and might cause some spilltemps to be categorized as spilltemp2's (i.e. live over a death), although they aren't. This might make them spill again, which causes endlessness in the case, this insn is in fact _no_ death. */ bitmap_set_bit (new_deaths, INSN_UID (PREV_INSN (following))); } } BITMAP_XFREE (b); } /* A simple list of rtx's. */ struct rtx_list { struct rtx_list *next; rtx x; }; /* Adds X to *LIST. */ static void remember_slot (struct rtx_list **list, rtx x) { struct rtx_list *l; /* PRE: X is not already in LIST. */ l = ra_alloc (sizeof (*l)); l->next = *list; l->x = x; *list = l; } /* Given two rtx' S1 and S2, either being REGs or MEMs (or SUBREGs thereof), return nonzero, if they overlap. REGs and MEMs don't overlap, and if they are MEMs they must have an easy address (plus (basereg) (const_inst x)), otherwise they overlap. */ static int slots_overlap_p (rtx s1, rtx s2) { rtx base1, base2; HOST_WIDE_INT ofs1 = 0, ofs2 = 0; int size1 = GET_MODE_SIZE (GET_MODE (s1)); int size2 = GET_MODE_SIZE (GET_MODE (s2)); if (GET_CODE (s1) == SUBREG) ofs1 = SUBREG_BYTE (s1), s1 = SUBREG_REG (s1); if (GET_CODE (s2) == SUBREG) ofs2 = SUBREG_BYTE (s2), s2 = SUBREG_REG (s2); if (s1 == s2) return 1; if (GET_CODE (s1) != GET_CODE (s2)) return 0; if (REG_P (s1) && REG_P (s2)) { if (REGNO (s1) != REGNO (s2)) return 0; if (ofs1 >= ofs2 + size2 || ofs2 >= ofs1 + size1) return 0; return 1; } if (!MEM_P (s1) || GET_CODE (s2) != MEM) abort (); s1 = XEXP (s1, 0); s2 = XEXP (s2, 0); if (GET_CODE (s1) != PLUS || !REG_P (XEXP (s1, 0)) || GET_CODE (XEXP (s1, 1)) != CONST_INT) return 1; if (GET_CODE (s2) != PLUS || !REG_P (XEXP (s2, 0)) || GET_CODE (XEXP (s2, 1)) != CONST_INT) return 1; base1 = XEXP (s1, 0); base2 = XEXP (s2, 0); if (!rtx_equal_p (base1, base2)) return 1; ofs1 += INTVAL (XEXP (s1, 1)); ofs2 += INTVAL (XEXP (s2, 1)); if (ofs1 >= ofs2 + size2 || ofs2 >= ofs1 + size1) return 0; return 1; } /* This deletes from *LIST all rtx's which overlap with X in the sense of slots_overlap_p(). */ static void delete_overlapping_slots (struct rtx_list **list, rtx x) { while (*list) { if (slots_overlap_p ((*list)->x, x)) *list = (*list)->next; else list = &((*list)->next); } } /* Returns nonzero, of X is member of LIST. */ static int slot_member_p (struct rtx_list *list, rtx x) { for (;list; list = list->next) if (rtx_equal_p (list->x, x)) return 1; return 0; } /* A more sophisticated (and slower) method of adding the stores, than rewrite_program(). This goes backward the insn stream, adding stores as it goes, but only if it hasn't just added a store to the same location. NEW_DEATHS is a bitmap filled with uids of insns containing deaths. */ static void insert_stores (bitmap new_deaths) { rtx insn; rtx last_slot = NULL_RTX; struct rtx_list *slots = NULL; /* We go simply backwards over basic block borders. */ for (insn = get_last_insn (); insn; insn = PREV_INSN (insn)) { int uid = INSN_UID (insn); /* If we reach a basic block border, which has more than one outgoing edge, we simply forget all already emitted stores. */ if (GET_CODE (insn) == BARRIER || JUMP_P (insn) || can_throw_internal (insn)) { last_slot = NULL_RTX; slots = NULL; } if (!INSN_P (insn)) continue; /* If this insn was not just added in this pass. */ if (uid < insn_df_max_uid) { unsigned int n; rtx following = NEXT_INSN (insn); basic_block bb = BLOCK_FOR_INSN (insn); struct ra_insn_info info; info = insn_df[uid]; for (n = 0; n < info.num_defs; n++) { struct web *web = def2web[DF_REF_ID (info.defs[n])]; struct web *aweb = alias (find_web_for_subweb (web)); rtx slot, source; if (aweb->type != SPILLED || !aweb->stack_slot) continue; slot = aweb->stack_slot; source = DF_REF_REG (info.defs[n]); /* adjust_address() might generate code. */ start_sequence (); if (GET_CODE (source) == SUBREG) slot = simplify_gen_subreg (GET_MODE (source), slot, GET_MODE (slot), SUBREG_BYTE (source)); /* If we have no info about emitted stores, or it didn't contain the location we intend to use soon, then add the store. */ if ((!last_slot || !rtx_equal_p (slot, last_slot)) && ! slot_member_p (slots, slot)) { rtx insns, ni; last_slot = slot; remember_slot (&slots, slot); ra_emit_move_insn (slot, source); insns = get_insns (); end_sequence (); if (insns) { emit_insn_after (insns, insn); if (BB_END (bb) == insn) BB_END (bb) = PREV_INSN (following); for (ni = insns; ni != following; ni = NEXT_INSN (ni)) { set_block_for_insn (ni, bb); df_insn_modify (df, bb, ni); } } else df_insn_modify (df, bb, insn); emitted_spill_stores++; spill_store_cost += bb->frequency + 1; bitmap_set_bit (new_deaths, INSN_UID (PREV_INSN (following))); } else { /* Otherwise ignore insns from adjust_address() above. */ end_sequence (); } } } /* If we look at a load generated by the allocator, forget the last emitted slot, and additionally clear all slots overlapping it's source (after all, we need it again). */ /* XXX If we emit the stack-ref directly into the using insn the following needs a change, because that is no new insn. Preferably we would add some notes to the insn, what stackslots are needed for it. */ if (uid >= last_max_uid) { rtx set = single_set (insn); last_slot = NULL_RTX; /* If this was no simple set, give up, and forget everything. */ if (!set) slots = NULL; else { if (1 || MEM_P (SET_SRC (set))) delete_overlapping_slots (&slots, SET_SRC (set)); } } } } /* Returns 1 if both colored webs have some hardregs in common, even if they are not the same width. */ static int spill_same_color_p (struct web *web1, struct web *web2) { int c1, size1, c2, size2; if ((c1 = alias (web1)->color) < 0 || c1 == an_unusable_color) return 0; if ((c2 = alias (web2)->color) < 0 || c2 == an_unusable_color) return 0; size1 = web1->type == PRECOLORED ? 1 : hard_regno_nregs[c1][PSEUDO_REGNO_MODE (web1->regno)]; size2 = web2->type == PRECOLORED ? 1 : hard_regno_nregs[c2][PSEUDO_REGNO_MODE (web2->regno)]; if (c1 >= c2 + size2 || c2 >= c1 + size1) return 0; return 1; } /* Given the set of live web IDs LIVE, returns nonzero, if any of WEBs subwebs (or WEB itself) is live. */ static bool is_partly_live_1 (sbitmap live, struct web *web) { do if (TEST_BIT (live, web->id)) return 1; while ((web = web->subreg_next)); return 0; } /* Fast version in case WEB has no subwebs. */ #define is_partly_live(live, web) ((!web->subreg_next) \ ? TEST_BIT (live, web->id) \ : is_partly_live_1 (live, web)) /* Change the set of currently IN_USE colors according to WEB's color. Either add those colors to the hardreg set (if ADD is nonzero), or remove them. */ static void update_spill_colors (HARD_REG_SET *in_use, struct web *web, int add) { int c, size; if ((c = alias (find_web_for_subweb (web))->color) < 0 || c == an_unusable_color) return; size = hard_regno_nregs[c][GET_MODE (web->orig_x)]; if (SUBWEB_P (web)) { c += subreg_regno_offset (c, GET_MODE (SUBREG_REG (web->orig_x)), SUBREG_BYTE (web->orig_x), GET_MODE (web->orig_x)); } else if (web->type == PRECOLORED) size = 1; if (add) for (; size--;) SET_HARD_REG_BIT (*in_use, c + size); else for (; size--;) CLEAR_HARD_REG_BIT (*in_use, c + size); } /* Given a set of hardregs currently IN_USE and the color C of WEB, return -1 if WEB has no color, 1 of it has the unusable color, 0 if one of it's used hardregs are in use, and 1 otherwise. Generally, if WEB can't be left colorized return 1. */ static int spill_is_free (HARD_REG_SET *in_use, struct web *web) { int c, size; if ((c = alias (web)->color) < 0) return -1; if (c == an_unusable_color) return 1; size = web->type == PRECOLORED ? 1 : hard_regno_nregs[c][PSEUDO_REGNO_MODE (web->regno)]; for (; size--;) if (TEST_HARD_REG_BIT (*in_use, c + size)) return 0; return 1; } /* Structure for passing between rewrite_program2() and emit_loads(). */ struct rewrite_info { /* The web IDs which currently would need a reload. These are currently live spilled webs, whose color was still free. */ bitmap need_reload; /* We need a scratch bitmap, but don't want to allocate one a zillion times. */ bitmap scratch; /* Web IDs of currently live webs. This are the precise IDs, not just those of the superwebs. If only on part is live, only that ID is placed here. */ sbitmap live; /* An array of webs, which currently need a load added. They will be emitted when seeing the first death. */ struct web **needed_loads; /* The current number of entries in needed_loads. */ int nl_size; /* The number of bits set in need_reload. */ int num_reloads; /* The current set of hardregs not available. */ HARD_REG_SET colors_in_use; /* Nonzero, if we just added some spill temps to need_reload or needed_loads. In this case we don't wait for the next death to emit their loads. */ int any_spilltemps_spilled; /* Nonzero, if we currently need to emit the loads. E.g. when we saw an insn containing deaths. */ int need_load; }; /* The needed_loads list of RI contains some webs for which we add the actual load insns here. They are added just before their use last seen. NL_FIRST_RELOAD is the index of the first load which is a converted reload, all other entries are normal loads. LAST_BLOCK_INSN is the last insn of the current basic block. */ static void emit_loads (struct rewrite_info *ri, int nl_first_reload, rtx last_block_insn) { int j; for (j = ri->nl_size; j;) { struct web *web = ri->needed_loads[--j]; struct web *supweb; struct web *aweb; rtx ni, slot, reg; rtx before = NULL_RTX, after = NULL_RTX; basic_block bb; /* When spilltemps were spilled for the last insns, their loads already are emitted, which is noted by setting needed_loads[] for it to 0. */ if (!web) continue; supweb = find_web_for_subweb (web); if (supweb->regno >= max_normal_pseudo) abort (); /* Check for web being a spilltemp, if we only want to load spilltemps. Also remember, that we emitted that load, which we don't need to do when we have a death, because then all of needed_loads[] is emptied. */ if (!ri->need_load) { if (!supweb->spill_temp) continue; else ri->needed_loads[j] = 0; } web->in_load = 0; /* The adding of reloads doesn't depend on liveness. */ if (j < nl_first_reload && !TEST_BIT (ri->live, web->id)) continue; aweb = alias (supweb); aweb->changed = 1; start_sequence (); if (supweb->pattern) { /* XXX If we later allow non-constant sources for rematerialization we must also disallow coalescing _to_ rematerialized webs (at least then disallow spilling them, which we already ensure when flag_ra_break_aliases), or not take the pattern but a stackslot. */ if (aweb != supweb) abort (); slot = copy_rtx (supweb->pattern); reg = copy_rtx (supweb->orig_x); /* Sanity check. orig_x should be a REG rtx, which should be shared over all RTL, so copy_rtx should have no effect. */ if (reg != supweb->orig_x) abort (); } else { allocate_spill_web (aweb); slot = aweb->stack_slot; /* If we don't copy the RTL there might be some SUBREG rtx shared in the next iteration although being in different webs, which leads to wrong code. */ reg = copy_rtx (web->orig_x); if (GET_CODE (reg) == SUBREG) /*slot = adjust_address (slot, GET_MODE (reg), SUBREG_BYTE (reg));*/ slot = simplify_gen_subreg (GET_MODE (reg), slot, GET_MODE (slot), SUBREG_BYTE (reg)); } ra_emit_move_insn (reg, slot); ni = get_insns (); end_sequence (); before = web->last_use_insn; web->last_use_insn = NULL_RTX; if (!before) { if (JUMP_P (last_block_insn)) before = last_block_insn; else after = last_block_insn; } if (after) { rtx foll = NEXT_INSN (after); bb = BLOCK_FOR_INSN (after); emit_insn_after (ni, after); if (BB_END (bb) == after) BB_END (bb) = PREV_INSN (foll); for (ni = NEXT_INSN (after); ni != foll; ni = NEXT_INSN (ni)) { set_block_for_insn (ni, bb); df_insn_modify (df, bb, ni); } } else { rtx prev = PREV_INSN (before); bb = BLOCK_FOR_INSN (before); emit_insn_before (ni, before); if (BB_HEAD (bb) == before) BB_HEAD (bb) = NEXT_INSN (prev); for (; ni != before; ni = NEXT_INSN (ni)) { set_block_for_insn (ni, bb); df_insn_modify (df, bb, ni); } } if (supweb->pattern) { emitted_remat++; spill_remat_cost += bb->frequency + 1; } else { emitted_spill_loads++; spill_load_cost += bb->frequency + 1; } RESET_BIT (ri->live, web->id); /* In the special case documented above only emit the reloads and one load. */ if (ri->need_load == 2 && j < nl_first_reload) break; } if (ri->need_load) ri->nl_size = j; } /* Given a set of reloads in RI, an array of NUM_REFS references (either uses or defs) in REFS, and REF2WEB to translate ref IDs to webs (either use2web or def2web) convert some reloads to loads. This looks at the webs referenced, and how they change the set of available colors. Now put all still live webs, which needed reloads, and whose colors isn't free anymore, on the needed_loads list. */ static void reloads_to_loads (struct rewrite_info *ri, struct ref **refs, unsigned int num_refs, struct web **ref2web) { unsigned int n; int num_reloads = ri->num_reloads; for (n = 0; n < num_refs && num_reloads; n++) { struct web *web = ref2web[DF_REF_ID (refs[n])]; struct web *supweb = find_web_for_subweb (web); int is_death; int j; /* Only emit reloads when entering their interference region. A use of a spilled web never opens an interference region, independent of it's color. */ if (alias (supweb)->type == SPILLED) continue; if (supweb->type == PRECOLORED && TEST_HARD_REG_BIT (never_use_colors, supweb->color)) continue; /* Note, that if web (and supweb) are DEFs, we already cleared the corresponding bits in live. I.e. is_death becomes true, which is what we want. */ is_death = !TEST_BIT (ri->live, supweb->id); is_death &= !TEST_BIT (ri->live, web->id); if (is_death) { int old_num_r = num_reloads; bitmap_clear (ri->scratch); EXECUTE_IF_SET_IN_BITMAP (ri->need_reload, 0, j, { struct web *web2 = ID2WEB (j); struct web *aweb2 = alias (find_web_for_subweb (web2)); if (spill_is_free (&(ri->colors_in_use), aweb2) == 0) abort (); if (spill_same_color_p (supweb, aweb2) /* && interfere (web, web2) */) { if (!web2->in_load) { ri->needed_loads[ri->nl_size++] = web2; web2->in_load = 1; } bitmap_set_bit (ri->scratch, j); num_reloads--; } }); if (num_reloads != old_num_r) bitmap_operation (ri->need_reload, ri->need_reload, ri->scratch, BITMAP_AND_COMPL); } } ri->num_reloads = num_reloads; } /* This adds loads for spilled webs to the program. It uses a kind of interference region spilling. If flag_ra_ir_spilling is zero it only uses improved chaitin spilling (adding loads only at insns containing deaths). */ static void rewrite_program2 (bitmap new_deaths) { basic_block bb = NULL; int nl_first_reload; struct rewrite_info ri; rtx insn; ri.needed_loads = xmalloc (num_webs * sizeof (struct web *)); ri.need_reload = BITMAP_XMALLOC (); ri.scratch = BITMAP_XMALLOC (); ri.live = sbitmap_alloc (num_webs); ri.nl_size = 0; ri.num_reloads = 0; for (insn = get_last_insn (); insn; insn = PREV_INSN (insn)) { basic_block last_bb = NULL; rtx last_block_insn; int i, j; if (!INSN_P (insn)) insn = prev_real_insn (insn); while (insn && !(bb = BLOCK_FOR_INSN (insn))) insn = prev_real_insn (insn); if (!insn) break; i = bb->index + 2; last_block_insn = insn; sbitmap_zero (ri.live); CLEAR_HARD_REG_SET (ri.colors_in_use); EXECUTE_IF_SET_IN_BITMAP (live_at_end[i - 2], 0, j, { struct web *web = use2web[j]; struct web *aweb = alias (find_web_for_subweb (web)); /* A web is only live at end, if it isn't spilled. If we wouldn't check this, the last uses of spilled web per basic block wouldn't be detected as deaths, although they are in the final code. This would lead to cumulating many loads without need, only increasing register pressure. */ /* XXX do add also spilled webs which got a color for IR spilling. Remember to not add to colors_in_use in that case. */ if (aweb->type != SPILLED /*|| aweb->color >= 0*/) { SET_BIT (ri.live, web->id); if (aweb->type != SPILLED) update_spill_colors (&(ri.colors_in_use), web, 1); } }); bitmap_clear (ri.need_reload); ri.num_reloads = 0; ri.any_spilltemps_spilled = 0; if (flag_ra_ir_spilling) { struct dlist *d; int pass; /* XXX If we don't add spilled nodes into live above, the following becomes an empty loop. */ for (pass = 0; pass < 2; pass++) for (d = (pass) ? WEBS(SPILLED) : WEBS(COALESCED); d; d = d->next) { struct web *web = DLIST_WEB (d); struct web *aweb = alias (web); if (aweb->type != SPILLED) continue; if (is_partly_live (ri.live, web) && spill_is_free (&(ri.colors_in_use), web) > 0) { ri.num_reloads++; bitmap_set_bit (ri.need_reload, web->id); /* Last using insn is somewhere in another block. */ web->last_use_insn = NULL_RTX; } } } last_bb = bb; for (; insn; insn = PREV_INSN (insn)) { struct ra_insn_info info; unsigned int n; memset (&info, 0, sizeof info); if (INSN_P (insn) && BLOCK_FOR_INSN (insn) != last_bb) { int index = BLOCK_FOR_INSN (insn)->index + 2; EXECUTE_IF_SET_IN_BITMAP (live_at_end[index - 2], 0, j, { struct web *web = use2web[j]; struct web *aweb = alias (find_web_for_subweb (web)); if (aweb->type != SPILLED) { SET_BIT (ri.live, web->id); update_spill_colors (&(ri.colors_in_use), web, 1); } }); bitmap_clear (ri.scratch); EXECUTE_IF_SET_IN_BITMAP (ri.need_reload, 0, j, { struct web *web2 = ID2WEB (j); struct web *supweb2 = find_web_for_subweb (web2); struct web *aweb2 = alias (supweb2); if (spill_is_free (&(ri.colors_in_use), aweb2) <= 0) { if (!web2->in_load) { ri.needed_loads[ri.nl_size++] = web2; web2->in_load = 1; } bitmap_set_bit (ri.scratch, j); ri.num_reloads--; } }); bitmap_operation (ri.need_reload, ri.need_reload, ri.scratch, BITMAP_AND_COMPL); last_bb = BLOCK_FOR_INSN (insn); last_block_insn = insn; if (!INSN_P (last_block_insn)) last_block_insn = prev_real_insn (last_block_insn); } ri.need_load = 0; if (INSN_P (insn)) info = insn_df[INSN_UID (insn)]; if (INSN_P (insn)) for (n = 0; n < info.num_defs; n++) { struct ref *ref = info.defs[n]; struct web *web = def2web[DF_REF_ID (ref)]; struct web *supweb = find_web_for_subweb (web); int is_non_def = 0; unsigned int n2; supweb = find_web_for_subweb (web); /* Webs which are defined here, but also used in the same insn are rmw webs, or this use isn't a death because of looping constructs. In neither case makes this def available it's resources. Reloads for it are still needed, it's still live and it's colors don't become free. */ for (n2 = 0; n2 < info.num_uses; n2++) { struct web *web2 = use2web[DF_REF_ID (info.uses[n2])]; if (supweb == find_web_for_subweb (web2)) { is_non_def = 1; break; } } if (is_non_def) continue; if (!is_partly_live (ri.live, supweb)) bitmap_set_bit (useless_defs, DF_REF_ID (ref)); RESET_BIT (ri.live, web->id); if (bitmap_bit_p (ri.need_reload, web->id)) { ri.num_reloads--; bitmap_clear_bit (ri.need_reload, web->id); } if (web != supweb) { /* XXX subwebs aren't precisely tracked here. We have everything we need (inverse webs), but the code isn't yet written. We need to make all completely overlapping web parts non-live here. */ /* If by luck now the whole web isn't live anymore, no reloads for it are needed. */ if (!is_partly_live (ri.live, supweb) && bitmap_bit_p (ri.need_reload, supweb->id)) { ri.num_reloads--; bitmap_clear_bit (ri.need_reload, supweb->id); } } else { struct web *sweb; /* If the whole web is defined here, no parts of it are live anymore and no reloads are needed for them. */ for (sweb = supweb->subreg_next; sweb; sweb = sweb->subreg_next) { RESET_BIT (ri.live, sweb->id); if (bitmap_bit_p (ri.need_reload, sweb->id)) { ri.num_reloads--; bitmap_clear_bit (ri.need_reload, sweb->id); } } } if (alias (supweb)->type != SPILLED) update_spill_colors (&(ri.colors_in_use), web, 0); } nl_first_reload = ri.nl_size; /* CALL_INSNs are not really deaths, but still more registers are free after a call, than before. XXX Note, that sometimes reload barfs when we emit insns between a call and the insn which copies the return register into a pseudo. */ if (GET_CODE (insn) == CALL_INSN) ri.need_load = 1; else if (INSN_P (insn)) for (n = 0; n < info.num_uses; n++) { struct web *web = use2web[DF_REF_ID (info.uses[n])]; struct web *supweb = find_web_for_subweb (web); int is_death; if (supweb->type == PRECOLORED && TEST_HARD_REG_BIT (never_use_colors, supweb->color)) continue; is_death = !TEST_BIT (ri.live, supweb->id); is_death &= !TEST_BIT (ri.live, web->id); if (is_death) { ri.need_load = 1; bitmap_set_bit (new_deaths, INSN_UID (insn)); break; } } if (INSN_P (insn) && ri.num_reloads) { int old_num_reloads = ri.num_reloads; reloads_to_loads (&ri, info.uses, info.num_uses, use2web); /* If this insn sets a pseudo, which isn't used later (i.e. wasn't live before) it is a dead store. We need to emit all reloads which have the same color as this def. We don't need to check for non-liveness here to detect the deadness (it anyway is too late, as we already cleared the liveness in the first loop over the defs), because if it _would_ be live here, no reload could have that color, as they would already have been converted to a load. */ if (ri.num_reloads) reloads_to_loads (&ri, info.defs, info.num_defs, def2web); if (ri.num_reloads != old_num_reloads && !ri.need_load) ri.need_load = 1; } if (ri.nl_size && (ri.need_load || ri.any_spilltemps_spilled)) emit_loads (&ri, nl_first_reload, last_block_insn); if (INSN_P (insn) && flag_ra_ir_spilling) for (n = 0; n < info.num_uses; n++) { struct web *web = use2web[DF_REF_ID (info.uses[n])]; struct web *aweb = alias (find_web_for_subweb (web)); if (aweb->type != SPILLED) update_spill_colors (&(ri.colors_in_use), web, 1); } ri.any_spilltemps_spilled = 0; if (INSN_P (insn)) for (n = 0; n < info.num_uses; n++) { struct web *web = use2web[DF_REF_ID (info.uses[n])]; struct web *supweb = find_web_for_subweb (web); struct web *aweb = alias (supweb); SET_BIT (ri.live, web->id); if (aweb->type != SPILLED) continue; if (supweb->spill_temp) ri.any_spilltemps_spilled = 1; web->last_use_insn = insn; if (!web->in_load) { if (spill_is_free (&(ri.colors_in_use), aweb) <= 0 || !flag_ra_ir_spilling) { ri.needed_loads[ri.nl_size++] = web; web->in_load = 1; web->one_load = 1; } else if (!bitmap_bit_p (ri.need_reload, web->id)) { bitmap_set_bit (ri.need_reload, web->id); ri.num_reloads++; web->one_load = 1; } else web->one_load = 0; } else web->one_load = 0; } if (GET_CODE (insn) == CODE_LABEL) break; } nl_first_reload = ri.nl_size; if (ri.num_reloads) { int in_ir = 0; edge e; int num = 0; HARD_REG_SET cum_colors, colors; CLEAR_HARD_REG_SET (cum_colors); for (e = bb->pred; e && num < 5; e = e->pred_next, num++) { int j; CLEAR_HARD_REG_SET (colors); EXECUTE_IF_SET_IN_BITMAP (live_at_end[e->src->index], 0, j, { struct web *web = use2web[j]; struct web *aweb = alias (find_web_for_subweb (web)); if (aweb->type != SPILLED) update_spill_colors (&colors, web, 1); }); IOR_HARD_REG_SET (cum_colors, colors); } if (num == 5) in_ir = 1; bitmap_clear (ri.scratch); EXECUTE_IF_SET_IN_BITMAP (ri.need_reload, 0, j, { struct web *web2 = ID2WEB (j); struct web *supweb2 = find_web_for_subweb (web2); struct web *aweb2 = alias (supweb2); /* block entry is IR boundary for aweb2? Currently more some tries for good conditions. */ if (((ra_pass > 0 || supweb2->target_of_spilled_move) && (1 || in_ir || spill_is_free (&cum_colors, aweb2) <= 0)) || (ra_pass == 1 && (in_ir || spill_is_free (&cum_colors, aweb2) <= 0))) { if (!web2->in_load) { ri.needed_loads[ri.nl_size++] = web2; web2->in_load = 1; } bitmap_set_bit (ri.scratch, j); ri.num_reloads--; } }); bitmap_operation (ri.need_reload, ri.need_reload, ri.scratch, BITMAP_AND_COMPL); } ri.need_load = 1; emit_loads (&ri, nl_first_reload, last_block_insn); if (ri.nl_size != 0 /*|| ri.num_reloads != 0*/) abort (); if (!insn) break; } free (ri.needed_loads); sbitmap_free (ri.live); BITMAP_XFREE (ri.scratch); BITMAP_XFREE (ri.need_reload); } /* WEBS is a web conflicting with a spilled one. Prepare it to be able to rescan it in the next pass. Mark all it's uses for checking, and clear the some members of their web parts (of defs and uses). Notably don't clear the uplink. We don't change the layout of this web, just it's conflicts. Also remember all IDs of its uses in USES_AS_BITMAP. */ static void mark_refs_for_checking (struct web *web, bitmap uses_as_bitmap) { unsigned int i; for (i = 0; i < web->num_uses; i++) { unsigned int id = DF_REF_ID (web->uses[i]); SET_BIT (last_check_uses, id); bitmap_set_bit (uses_as_bitmap, id); web_parts[df->def_id + id].spanned_deaths = 0; web_parts[df->def_id + id].crosses_call = 0; } for (i = 0; i < web->num_defs; i++) { unsigned int id = DF_REF_ID (web->defs[i]); web_parts[id].spanned_deaths = 0; web_parts[id].crosses_call = 0; } } /* The last step of the spill phase is to set up the structures for incrementally rebuilding the interference graph. We break up the web part structure of all spilled webs, mark their uses for rechecking, look at their neighbors, and clean up some global information, we will rebuild. */ static void detect_web_parts_to_rebuild (void) { bitmap uses_as_bitmap; unsigned int i, pass; struct dlist *d; sbitmap already_webs = sbitmap_alloc (num_webs); uses_as_bitmap = BITMAP_XMALLOC (); if (last_check_uses) sbitmap_free (last_check_uses); last_check_uses = sbitmap_alloc (df->use_id); sbitmap_zero (last_check_uses); sbitmap_zero (already_webs); /* We need to recheck all uses of all webs involved in spilling (and the uses added by spill insns, but those are not analyzed yet). Those are the spilled webs themselves, webs coalesced to spilled ones, and webs conflicting with any of them. */ for (pass = 0; pass < 2; pass++) for (d = (pass == 0) ? WEBS(SPILLED) : WEBS(COALESCED); d; d = d->next) { struct web *web = DLIST_WEB (d); struct conflict_link *wl; unsigned int j; /* This check is only needed for coalesced nodes, but hey. */ if (alias (web)->type != SPILLED) continue; /* For the spilled web itself we also need to clear it's uplink, to be able to rebuild smaller webs. After all spilling has split the web. */ for (i = 0; i < web->num_uses; i++) { unsigned int id = DF_REF_ID (web->uses[i]); SET_BIT (last_check_uses, id); bitmap_set_bit (uses_as_bitmap, id); web_parts[df->def_id + id].uplink = NULL; web_parts[df->def_id + id].spanned_deaths = 0; web_parts[df->def_id + id].crosses_call = 0; } for (i = 0; i < web->num_defs; i++) { unsigned int id = DF_REF_ID (web->defs[i]); web_parts[id].uplink = NULL; web_parts[id].spanned_deaths = 0; web_parts[id].crosses_call = 0; } /* Now look at all neighbors of this spilled web. */ if (web->have_orig_conflicts) wl = web->orig_conflict_list; else wl = web->conflict_list; for (; wl; wl = wl->next) { if (TEST_BIT (already_webs, wl->t->id)) continue; SET_BIT (already_webs, wl->t->id); mark_refs_for_checking (wl->t, uses_as_bitmap); } EXECUTE_IF_SET_IN_BITMAP (web->useless_conflicts, 0, j, { struct web *web2 = ID2WEB (j); if (TEST_BIT (already_webs, web2->id)) continue; SET_BIT (already_webs, web2->id); mark_refs_for_checking (web2, uses_as_bitmap); }); } /* We also recheck unconditionally all uses of any hardregs. This means we _can_ delete all these uses from the live_at_end[] bitmaps. And because we sometimes delete insn referring to hardregs (when they became useless because they setup a rematerializable pseudo, which then was rematerialized), some of those uses will go away with the next df_analyze(). This means we even _must_ delete those uses from the live_at_end[] bitmaps. For simplicity we simply delete all of them. */ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (!fixed_regs[i]) { struct df_link *link; for (link = df->regs[i].uses; link; link = link->next) if (link->ref) bitmap_set_bit (uses_as_bitmap, DF_REF_ID (link->ref)); } /* The information in live_at_end[] will be rebuild for all uses we recheck, so clear it here (the uses of spilled webs, might indeed not become member of it again). */ live_at_end -= 2; for (i = 0; i < (unsigned int) last_basic_block + 2; i++) bitmap_operation (live_at_end[i], live_at_end[i], uses_as_bitmap, BITMAP_AND_COMPL); live_at_end += 2; if (dump_file && (debug_new_regalloc & DUMP_REBUILD) != 0) { ra_debug_msg (DUMP_REBUILD, "need to check these uses:\n"); dump_sbitmap_file (dump_file, last_check_uses); } sbitmap_free (already_webs); BITMAP_XFREE (uses_as_bitmap); } /* Statistics about deleted insns, which are useless now. */ static unsigned int deleted_def_insns; static unsigned HOST_WIDE_INT deleted_def_cost; /* In rewrite_program2() we noticed, when a certain insn set a pseudo which wasn't live. Try to delete all those insns. */ static void delete_useless_defs (void) { unsigned int i; /* If the insn only sets the def without any sideeffect (besides clobbers or uses), we can delete it. single_set() also tests for INSN_P(insn). */ EXECUTE_IF_SET_IN_BITMAP (useless_defs, 0, i, { rtx insn = DF_REF_INSN (df->defs[i]); rtx set = single_set (insn); struct web *web = find_web_for_subweb (def2web[i]); if (set && web->type == SPILLED && web->stack_slot == NULL) { deleted_def_insns++; deleted_def_cost += BLOCK_FOR_INSN (insn)->frequency + 1; PUT_CODE (insn, NOTE); NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED; df_insn_modify (df, BLOCK_FOR_INSN (insn), insn); } }); } /* Look for spilled webs, on whose behalf no insns were emitted. We inversify (sp?) the changed flag of the webs, so after this function a nonzero changed flag means, that this web was not spillable (at least in this pass). */ static void detect_non_changed_webs (void) { struct dlist *d, *d_next; for (d = WEBS(SPILLED); d; d = d_next) { struct web *web = DLIST_WEB (d); d_next = d->next; if (!web->changed) { ra_debug_msg (DUMP_PROCESS, "no insns emitted for spilled web %d\n", web->id); remove_web_from_list (web); put_web (web, COLORED); web->changed = 1; } else web->changed = 0; /* From now on web->changed is used as the opposite flag. I.e. colored webs, which have changed set were formerly spilled webs for which no insns were emitted. */ } } /* Before spilling we clear the changed flags for all spilled webs. */ static void reset_changed_flag (void) { struct dlist *d; for (d = WEBS(SPILLED); d; d = d->next) DLIST_WEB(d)->changed = 0; } /* The toplevel function for this file. Given a colorized graph, and lists of spilled, coalesced and colored webs, we add some spill code. This also sets up the structures for incrementally building the interference graph in the next pass. */ void actual_spill (void) { int i; bitmap new_deaths = BITMAP_XMALLOC (); reset_changed_flag (); spill_coalprop (); choose_spill_colors (); useless_defs = BITMAP_XMALLOC (); if (flag_ra_improved_spilling) rewrite_program2 (new_deaths); else rewrite_program (new_deaths); insert_stores (new_deaths); delete_useless_defs (); BITMAP_XFREE (useless_defs); sbitmap_free (insns_with_deaths); insns_with_deaths = sbitmap_alloc (get_max_uid ()); death_insns_max_uid = get_max_uid (); sbitmap_zero (insns_with_deaths); EXECUTE_IF_SET_IN_BITMAP (new_deaths, 0, i, { SET_BIT (insns_with_deaths, i);}); detect_non_changed_webs (); detect_web_parts_to_rebuild (); BITMAP_XFREE (new_deaths); } /* A bitmap of pseudo reg numbers which are coalesced directly to a hardreg. Set in emit_colors(), used and freed in remove_suspicious_death_notes(). */ static bitmap regnos_coalesced_to_hardregs; /* Create new pseudos for each web we colored, change insns to use those pseudos and set up ra_reg_renumber. */ void emit_colors (struct df *df) { unsigned int i; int si; struct web *web; int old_max_regno = max_reg_num (); regset old_regs; basic_block bb; /* This bitmap is freed in remove_suspicious_death_notes(), which is also the user of it. */ regnos_coalesced_to_hardregs = BITMAP_XMALLOC (); /* First create the (REG xx) rtx's for all webs, as we need to know the number, to make sure, flow has enough memory for them in the various tables. */ for (i = 0; i < num_webs - num_subwebs; i++) { web = ID2WEB (i); if (web->type != COLORED && web->type != COALESCED) continue; if (web->type == COALESCED && alias (web)->type == COLORED) continue; if (web->reg_rtx || web->regno < FIRST_PSEUDO_REGISTER) abort (); if (web->regno >= max_normal_pseudo) { rtx place; if (web->color == an_unusable_color) { unsigned int inherent_size = PSEUDO_REGNO_BYTES (web->regno); unsigned int total_size = MAX (inherent_size, 0); place = assign_stack_local (PSEUDO_REGNO_MODE (web->regno), total_size, inherent_size == total_size ? 0 : -1); RTX_UNCHANGING_P (place) = RTX_UNCHANGING_P (regno_reg_rtx[web->regno]); set_mem_alias_set (place, new_alias_set ()); } else { place = gen_reg_rtx (PSEUDO_REGNO_MODE (web->regno)); } web->reg_rtx = place; } else { /* Special case for i386 'fix_truncdi_nomemory' insn. We must choose mode from insns not from PSEUDO_REGNO_MODE. Actual only for clobbered register. */ if (web->num_uses == 0 && web->num_defs == 1) web->reg_rtx = gen_reg_rtx (GET_MODE (DF_REF_REAL_REG (web->defs[0]))); else web->reg_rtx = gen_reg_rtx (PSEUDO_REGNO_MODE (web->regno)); /* Remember the different parts directly coalesced to a hardreg. */ if (web->type == COALESCED) bitmap_set_bit (regnos_coalesced_to_hardregs, REGNO (web->reg_rtx)); } } ra_max_regno = max_regno = max_reg_num (); allocate_reg_info (max_regno, FALSE, FALSE); ra_reg_renumber = xmalloc (max_regno * sizeof (short)); for (si = 0; si < max_regno; si++) ra_reg_renumber[si] = -1; /* Then go through all references, and replace them by a new pseudoreg for each web. All uses. */ /* XXX Beware: The order of replacements (first uses, then defs) matters only for read-mod-write insns, where the RTL expression for the REG is shared between def and use. For normal rmw insns we connected all such webs, i.e. both the use and the def (which are the same memory) there get the same new pseudo-reg, so order would not matter. _However_ we did not connect webs, were the read cycle was an uninitialized read. If we now would first replace the def reference and then the use ref, we would initialize it with a REG rtx, which gets never initialized, and yet more wrong, which would overwrite the definition of the other REG rtx. So we must replace the defs last. */ for (i = 0; i < df->use_id; i++) if (df->uses[i]) { regset rs = DF_REF_BB (df->uses[i])->global_live_at_start; rtx regrtx; web = use2web[i]; web = find_web_for_subweb (web); if (web->type != COLORED && web->type != COALESCED) continue; regrtx = alias (web)->reg_rtx; if (!regrtx) regrtx = web->reg_rtx; *DF_REF_REAL_LOC (df->uses[i]) = regrtx; if (REGNO_REG_SET_P (rs, web->regno) && REG_P (regrtx)) { /*CLEAR_REGNO_REG_SET (rs, web->regno);*/ SET_REGNO_REG_SET (rs, REGNO (regrtx)); } } /* And all defs. */ for (i = 0; i < df->def_id; i++) { regset rs; rtx regrtx; if (!df->defs[i]) continue; rs = DF_REF_BB (df->defs[i])->global_live_at_start; web = def2web[i]; web = find_web_for_subweb (web); if (web->type != COLORED && web->type != COALESCED) continue; regrtx = alias (web)->reg_rtx; if (!regrtx) regrtx = web->reg_rtx; *DF_REF_REAL_LOC (df->defs[i]) = regrtx; if (REGNO_REG_SET_P (rs, web->regno) && REG_P (regrtx)) { /* Don't simply clear the current regno, as it might be replaced by two webs. */ /*CLEAR_REGNO_REG_SET (rs, web->regno);*/ SET_REGNO_REG_SET (rs, REGNO (regrtx)); } } /* And now set up the ra_reg_renumber array for reload with all the new pseudo-regs. */ for (i = 0; i < num_webs - num_subwebs; i++) { web = ID2WEB (i); if (web->reg_rtx && REG_P (web->reg_rtx)) { int r = REGNO (web->reg_rtx); ra_reg_renumber[r] = web->color; ra_debug_msg (DUMP_COLORIZE, "Renumber pseudo %d (== web %d) to %d\n", r, web->id, ra_reg_renumber[r]); } } old_regs = BITMAP_XMALLOC (); for (si = FIRST_PSEUDO_REGISTER; si < old_max_regno; si++) SET_REGNO_REG_SET (old_regs, si); FOR_EACH_BB (bb) { AND_COMPL_REG_SET (bb->global_live_at_start, old_regs); AND_COMPL_REG_SET (bb->global_live_at_end, old_regs); } BITMAP_XFREE (old_regs); } /* Delete some coalesced moves from the insn stream. */ void delete_moves (void) { struct move_list *ml; struct web *s, *t; /* XXX Beware: We normally would test here each copy insn, if source and target got the same color (either by coalescing or by pure luck), and then delete it. This will currently not work. One problem is, that we don't color the regs ourself, but instead defer to reload. So the colorization is only a kind of suggestion, which reload doesn't have to follow. For webs which are coalesced to a normal colored web, we only have one new pseudo, so in this case we indeed can delete copy insns involving those (because even if reload colors them different from our suggestion, it still has to color them the same, as only one pseudo exists). But for webs coalesced to precolored ones, we have not a single pseudo, but instead one for each coalesced web. This means, that we can't delete copy insns, where source and target are webs coalesced to precolored ones, because then the connection between both webs is destroyed. Note that this not only means copy insns, where one side is the precolored one itself, but also those between webs which are coalesced to one color. Also because reload we can't delete copy insns which involve any precolored web at all. These often have also special meaning (e.g. copying a return value of a call to a pseudo, or copying pseudo to the return register), and the deletion would confuse reload in thinking the pseudo isn't needed. One of those days reload will get away and we can do everything we want. In effect because of the later reload, we can't base our deletion on the colors itself, but instead need to base them on the newly created pseudos. */ for (ml = wl_moves; ml; ml = ml->next) /* The real condition we would ideally use is: s->color == t->color. Additionally: s->type != PRECOLORED && t->type != PRECOLORED, in case we want to prevent deletion of "special" copies. */ if (ml->move && (s = alias (ml->move->source_web))->reg_rtx == (t = alias (ml->move->target_web))->reg_rtx && s->type != PRECOLORED && t->type != PRECOLORED) { basic_block bb = BLOCK_FOR_INSN (ml->move->insn); df_insn_delete (df, bb, ml->move->insn); deleted_move_insns++; deleted_move_cost += bb->frequency + 1; } } /* Due to reasons documented elsewhere we create different pseudos for all webs coalesced to hardregs. For these parts life_analysis() might have added REG_DEAD notes without considering, that only this part but not the whole coalesced web dies. The RTL is correct, there is no coalescing yet. But if later reload's alter_reg() substitutes the hardreg into the REG rtx it looks like that particular hardreg dies here, although (due to coalescing) it still is live. This might make different places of reload think, it can use that hardreg for reload regs, accidentally overwriting it. So we need to remove those REG_DEAD notes. (Or better teach life_analysis() and reload about our coalescing, but that comes later) Bah. */ void remove_suspicious_death_notes (void) { rtx insn; for (insn = get_insns(); insn; insn = NEXT_INSN (insn)) if (INSN_P (insn)) { rtx *pnote = ®_NOTES (insn); while (*pnote) { rtx note = *pnote; if ((REG_NOTE_KIND (note) == REG_DEAD || REG_NOTE_KIND (note) == REG_UNUSED) && (REG_P (XEXP (note, 0)) && bitmap_bit_p (regnos_coalesced_to_hardregs, REGNO (XEXP (note, 0))))) *pnote = XEXP (note, 1); else pnote = &XEXP (*pnote, 1); } } BITMAP_XFREE (regnos_coalesced_to_hardregs); regnos_coalesced_to_hardregs = NULL; } /* Allocate space for max_reg_num() pseudo registers, and fill reg_renumber[] from ra_reg_renumber[]. If FREE_IT is nonzero, also free ra_reg_renumber and reset ra_max_regno. */ void setup_renumber (int free_it) { int i; max_regno = max_reg_num (); allocate_reg_info (max_regno, FALSE, TRUE); for (i = 0; i < max_regno; i++) { reg_renumber[i] = (i < ra_max_regno) ? ra_reg_renumber[i] : -1; } if (free_it) { free (ra_reg_renumber); ra_reg_renumber = NULL; ra_max_regno = 0; } } /* Dump the costs and savings due to spilling, i.e. of added spill insns and removed moves or useless defs. */ void dump_cost (unsigned int level) { ra_debug_msg (level, "Instructions for spilling\n added:\n"); ra_debug_msg (level, " loads =%d cost=" HOST_WIDE_INT_PRINT_UNSIGNED "\n", emitted_spill_loads, spill_load_cost); ra_debug_msg (level, " stores=%d cost=" HOST_WIDE_INT_PRINT_UNSIGNED "\n", emitted_spill_stores, spill_store_cost); ra_debug_msg (level, " remat =%d cost=" HOST_WIDE_INT_PRINT_UNSIGNED "\n", emitted_remat, spill_remat_cost); ra_debug_msg (level, " removed:\n moves =%d cost=" HOST_WIDE_INT_PRINT_UNSIGNED "\n", deleted_move_insns, deleted_move_cost); ra_debug_msg (level, " others=%d cost=" HOST_WIDE_INT_PRINT_UNSIGNED "\n", deleted_def_insns, deleted_def_cost); } /* Initialization of the rewrite phase. */ void ra_rewrite_init (void) { emitted_spill_loads = 0; emitted_spill_stores = 0; emitted_remat = 0; spill_load_cost = 0; spill_store_cost = 0; spill_remat_cost = 0; deleted_move_insns = 0; deleted_move_cost = 0; deleted_def_insns = 0; deleted_def_cost = 0; } /* vim:cinoptions={.5s,g0,p5,t0,(0,^-0.5s,n-0.5s:tw=78:cindent:sw=4: */ /* real.c - software floating point emulation. Copyright (C) 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Stephen L. Moshier (moshier@world.std.com). Re-written by Richard Henderson This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* The floating point model used internally is not exactly IEEE 754 compliant, and close to the description in the ISO C99 standard, section 5.2.4.2.2 Characteristics of floating types. Specifically x = s * b^e * \sum_{k=1}^p f_k * b^{-k} where s = sign (+- 1) b = base or radix, here always 2 e = exponent p = precision (the number of base-b digits in the significand) f_k = the digits of the significand. We differ from typical IEEE 754 encodings in that the entire significand is fractional. Normalized significands are in the range [0.5, 1.0). A requirement of the model is that P be larger than the largest supported target floating-point type by at least 2 bits. This gives us proper rounding when we truncate to the target type. In addition, E must be large enough to hold the smallest supported denormal number in a normalized form. Both of these requirements are easily satisfied. The largest target significand is 113 bits; we store at least 160. The smallest denormal number fits in 17 exponent bits; we store 27. Note that the decimal string conversion routines are sensitive to rounding errors. Since the raw arithmetic routines do not themselves have guard digits or rounding, the computation of 10**exp can accumulate more than a few digits of error. The previous incarnation of real.c successfully used a 144-bit fraction; given the current layout of REAL_VALUE_TYPE we're forced to expand to at least 160 bits. Target floating point models that use base 16 instead of base 2 (i.e. IBM 370), are handled during round_for_format, in which we canonicalize the exponent to be a multiple of 4 (log2(16)), and adjust the significand to match. */ /* Used to classify two numbers simultaneously. */ #define CLASS2(A, B) ((A) << 2 | (B)) #if HOST_BITS_PER_LONG != 64 && HOST_BITS_PER_LONG != 32 #error "Some constant folding done by hand to avoid shift count warnings" #endif static void get_zero (REAL_VALUE_TYPE *, int); static void get_canonical_qnan (REAL_VALUE_TYPE *, int); static void get_canonical_snan (REAL_VALUE_TYPE *, int); static void get_inf (REAL_VALUE_TYPE *, int); static bool sticky_rshift_significand (REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *, unsigned int); static void rshift_significand (REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *, unsigned int); static void lshift_significand (REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *, unsigned int); static void lshift_significand_1 (REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *); static bool add_significands (REAL_VALUE_TYPE *r, const REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *); static bool sub_significands (REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *, int); static void neg_significand (REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *); static int cmp_significands (const REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *); static int cmp_significand_0 (const REAL_VALUE_TYPE *); static void set_significand_bit (REAL_VALUE_TYPE *, unsigned int); static void clear_significand_bit (REAL_VALUE_TYPE *, unsigned int); static bool test_significand_bit (REAL_VALUE_TYPE *, unsigned int); static void clear_significand_below (REAL_VALUE_TYPE *, unsigned int); static bool div_significands (REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *); static void normalize_val (REAL_VALUE_TYPE *); static bool do_add (REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *, int); static bool do_multiply (REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *); static bool do_divide (REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *); static int do_compare (const REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *, int); static void do_fix_trunc (REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *); static unsigned long rtd_divmod (REAL_VALUE_TYPE *, REAL_VALUE_TYPE *); static const REAL_VALUE_TYPE * ten_to_ptwo (int); static const REAL_VALUE_TYPE * ten_to_mptwo (int); static const REAL_VALUE_TYPE * real_digit (int); static void times_pten (REAL_VALUE_TYPE *, int); static void round_for_format (const struct real_format *, REAL_VALUE_TYPE *); /* Initialize R with a positive zero. */ static inline void get_zero (REAL_VALUE_TYPE *r, int sign) { memset (r, 0, sizeof (*r)); r->sign = sign; } /* Initialize R with the canonical quiet NaN. */ static inline void get_canonical_qnan (REAL_VALUE_TYPE *r, int sign) { memset (r, 0, sizeof (*r)); r->class = rvc_nan; r->sign = sign; r->canonical = 1; } static inline void get_canonical_snan (REAL_VALUE_TYPE *r, int sign) { memset (r, 0, sizeof (*r)); r->class = rvc_nan; r->sign = sign; r->signalling = 1; r->canonical = 1; } static inline void get_inf (REAL_VALUE_TYPE *r, int sign) { memset (r, 0, sizeof (*r)); r->class = rvc_inf; r->sign = sign; } /* Right-shift the significand of A by N bits; put the result in the significand of R. If any one bits are shifted out, return true. */ static bool sticky_rshift_significand (REAL_VALUE_TYPE *r, const REAL_VALUE_TYPE *a, unsigned int n) { unsigned long sticky = 0; unsigned int i, ofs = 0; if (n >= HOST_BITS_PER_LONG) { for (i = 0, ofs = n / HOST_BITS_PER_LONG; i < ofs; ++i) sticky |= a->sig[i]; n &= HOST_BITS_PER_LONG - 1; } if (n != 0) { sticky |= a->sig[ofs] & (((unsigned long)1 << n) - 1); for (i = 0; i < SIGSZ; ++i) { r->sig[i] = (((ofs + i >= SIGSZ ? 0 : a->sig[ofs + i]) >> n) | ((ofs + i + 1 >= SIGSZ ? 0 : a->sig[ofs + i + 1]) << (HOST_BITS_PER_LONG - n))); } } else { for (i = 0; ofs + i < SIGSZ; ++i) r->sig[i] = a->sig[ofs + i]; for (; i < SIGSZ; ++i) r->sig[i] = 0; } return sticky != 0; } /* Right-shift the significand of A by N bits; put the result in the significand of R. */ static void rshift_significand (REAL_VALUE_TYPE *r, const REAL_VALUE_TYPE *a, unsigned int n) { unsigned int i, ofs = n / HOST_BITS_PER_LONG; n &= HOST_BITS_PER_LONG - 1; if (n != 0) { for (i = 0; i < SIGSZ; ++i) { r->sig[i] = (((ofs + i >= SIGSZ ? 0 : a->sig[ofs + i]) >> n) | ((ofs + i + 1 >= SIGSZ ? 0 : a->sig[ofs + i + 1]) << (HOST_BITS_PER_LONG - n))); } } else { for (i = 0; ofs + i < SIGSZ; ++i) r->sig[i] = a->sig[ofs + i]; for (; i < SIGSZ; ++i) r->sig[i] = 0; } } /* Left-shift the significand of A by N bits; put the result in the significand of R. */ static void lshift_significand (REAL_VALUE_TYPE *r, const REAL_VALUE_TYPE *a, unsigned int n) { unsigned int i, ofs = n / HOST_BITS_PER_LONG; n &= HOST_BITS_PER_LONG - 1; if (n == 0) { for (i = 0; ofs + i < SIGSZ; ++i) r->sig[SIGSZ-1-i] = a->sig[SIGSZ-1-i-ofs]; for (; i < SIGSZ; ++i) r->sig[SIGSZ-1-i] = 0; } else for (i = 0; i < SIGSZ; ++i) { r->sig[SIGSZ-1-i] = (((ofs + i >= SIGSZ ? 0 : a->sig[SIGSZ-1-i-ofs]) << n) | ((ofs + i + 1 >= SIGSZ ? 0 : a->sig[SIGSZ-1-i-ofs-1]) >> (HOST_BITS_PER_LONG - n))); } } /* Likewise, but N is specialized to 1. */ static inline void lshift_significand_1 (REAL_VALUE_TYPE *r, const REAL_VALUE_TYPE *a) { unsigned int i; for (i = SIGSZ - 1; i > 0; --i) r->sig[i] = (a->sig[i] << 1) | (a->sig[i-1] >> (HOST_BITS_PER_LONG - 1)); r->sig[0] = a->sig[0] << 1; } /* Add the significands of A and B, placing the result in R. Return true if there was carry out of the most significant word. */ static inline bool add_significands (REAL_VALUE_TYPE *r, const REAL_VALUE_TYPE *a, const REAL_VALUE_TYPE *b) { bool carry = false; int i; for (i = 0; i < SIGSZ; ++i) { unsigned long ai = a->sig[i]; unsigned long ri = ai + b->sig[i]; if (carry) { carry = ri < ai; carry |= ++ri == 0; } else carry = ri < ai; r->sig[i] = ri; } return carry; } /* Subtract the significands of A and B, placing the result in R. CARRY is true if there's a borrow incoming to the least significant word. Return true if there was borrow out of the most significant word. */ static inline bool sub_significands (REAL_VALUE_TYPE *r, const REAL_VALUE_TYPE *a, const REAL_VALUE_TYPE *b, int carry) { int i; for (i = 0; i < SIGSZ; ++i) { unsigned long ai = a->sig[i]; unsigned long ri = ai - b->sig[i]; if (carry) { carry = ri > ai; carry |= ~--ri == 0; } else carry = ri > ai; r->sig[i] = ri; } return carry; } /* Negate the significand A, placing the result in R. */ static inline void neg_significand (REAL_VALUE_TYPE *r, const REAL_VALUE_TYPE *a) { bool carry = true; int i; for (i = 0; i < SIGSZ; ++i) { unsigned long ri, ai = a->sig[i]; if (carry) { if (ai) { ri = -ai; carry = false; } else ri = ai; } else ri = ~ai; r->sig[i] = ri; } } /* Compare significands. Return tri-state vs zero. */ static inline int cmp_significands (const REAL_VALUE_TYPE *a, const REAL_VALUE_TYPE *b) { int i; for (i = SIGSZ - 1; i >= 0; --i) { unsigned long ai = a->sig[i]; unsigned long bi = b->sig[i]; if (ai > bi) return 1; if (ai < bi) return -1; } return 0; } /* Return true if A is nonzero. */ static inline int cmp_significand_0 (const REAL_VALUE_TYPE *a) { int i; for (i = SIGSZ - 1; i >= 0; --i) if (a->sig[i]) return 1; return 0; } /* Set bit N of the significand of R. */ static inline void set_significand_bit (REAL_VALUE_TYPE *r, unsigned int n) { r->sig[n / HOST_BITS_PER_LONG] |= (unsigned long)1 << (n % HOST_BITS_PER_LONG); } /* Clear bit N of the significand of R. */ static inline void clear_significand_bit (REAL_VALUE_TYPE *r, unsigned int n) { r->sig[n / HOST_BITS_PER_LONG] &= ~((unsigned long)1 << (n % HOST_BITS_PER_LONG)); } /* Test bit N of the significand of R. */ static inline bool test_significand_bit (REAL_VALUE_TYPE *r, unsigned int n) { /* ??? Compiler bug here if we return this expression directly. The conversion to bool strips the "&1" and we wind up testing e.g. 2 != 0 -> true. Seen in gcc version 3.2 20020520. */ int t = (r->sig[n / HOST_BITS_PER_LONG] >> (n % HOST_BITS_PER_LONG)) & 1; return t; } /* Clear bits 0..N-1 of the significand of R. */ static void clear_significand_below (REAL_VALUE_TYPE *r, unsigned int n) { int i, w = n / HOST_BITS_PER_LONG; for (i = 0; i < w; ++i) r->sig[i] = 0; r->sig[w] &= ~(((unsigned long)1 << (n % HOST_BITS_PER_LONG)) - 1); } /* Divide the significands of A and B, placing the result in R. Return true if the division was inexact. */ static inline bool div_significands (REAL_VALUE_TYPE *r, const REAL_VALUE_TYPE *a, const REAL_VALUE_TYPE *b) { REAL_VALUE_TYPE u; int i, bit = SIGNIFICAND_BITS - 1; unsigned long msb, inexact; u = *a; memset (r->sig, 0, sizeof (r->sig)); msb = 0; goto start; do { msb = u.sig[SIGSZ-1] & SIG_MSB; lshift_significand_1 (&u, &u); start: if (msb || cmp_significands (&u, b) >= 0) { sub_significands (&u, &u, b, 0); set_significand_bit (r, bit); } } while (--bit >= 0); for (i = 0, inexact = 0; i < SIGSZ; i++) inexact |= u.sig[i]; return inexact != 0; } /* Adjust the exponent and significand of R such that the most significant bit is set. We underflow to zero and overflow to infinity here, without denormals. (The intermediate representation exponent is large enough to handle target denormals normalized.) */ static void normalize_val (REAL_VALUE_TYPE *r) { int shift = 0, exp; int i, j; /* Find the first word that is nonzero. */ for (i = SIGSZ - 1; i >= 0; i--) if (r->sig[i] == 0) shift += HOST_BITS_PER_LONG; else break; /* Zero significand flushes to zero. */ if (i < 0) { r->class = rvc_zero; SET_REAL_EXP (r, 0); return; } /* Find the first bit that is nonzero. */ for (j = 0; ; j++) if (r->sig[i] & ((unsigned long)1 << (HOST_BITS_PER_LONG - 1 - j))) break; shift += j; if (shift > 0) { exp = REAL_EXP (r) - shift; if (exp > MAX_EXP) get_inf (r, r->sign); else if (exp < -MAX_EXP) get_zero (r, r->sign); else { SET_REAL_EXP (r, exp); lshift_significand (r, r, shift); } } } /* Calculate R = A + (SUBTRACT_P ? -B : B). Return true if the result may be inexact due to a loss of precision. */ static bool do_add (REAL_VALUE_TYPE *r, const REAL_VALUE_TYPE *a, const REAL_VALUE_TYPE *b, int subtract_p) { int dexp, sign, exp; REAL_VALUE_TYPE t; bool inexact = false; /* Determine if we need to add or subtract. */ sign = a->sign; subtract_p = (sign ^ b->sign) ^ subtract_p; switch (CLASS2 (a->class, b->class)) { case CLASS2 (rvc_zero, rvc_zero): /* -0 + -0 = -0, -0 - +0 = -0; all other cases yield +0. */ get_zero (r, sign & !subtract_p); return false; case CLASS2 (rvc_zero, rvc_normal): case CLASS2 (rvc_zero, rvc_inf): case CLASS2 (rvc_zero, rvc_nan): /* 0 + ANY = ANY. */ case CLASS2 (rvc_normal, rvc_nan): case CLASS2 (rvc_inf, rvc_nan): case CLASS2 (rvc_nan, rvc_nan): /* ANY + NaN = NaN. */ case CLASS2 (rvc_normal, rvc_inf): /* R + Inf = Inf. */ *r = *b; r->sign = sign ^ subtract_p; return false; case CLASS2 (rvc_normal, rvc_zero): case CLASS2 (rvc_inf, rvc_zero): case CLASS2 (rvc_nan, rvc_zero): /* ANY + 0 = ANY. */ case CLASS2 (rvc_nan, rvc_normal): case CLASS2 (rvc_nan, rvc_inf): /* NaN + ANY = NaN. */ case CLASS2 (rvc_inf, rvc_normal): /* Inf + R = Inf. */ *r = *a; return false; case CLASS2 (rvc_inf, rvc_inf): if (subtract_p) /* Inf - Inf = NaN. */ get_canonical_qnan (r, 0); else /* Inf + Inf = Inf. */ *r = *a; return false; case CLASS2 (rvc_normal, rvc_normal): break; default: abort (); } /* Swap the arguments such that A has the larger exponent. */ dexp = REAL_EXP (a) - REAL_EXP (b); if (dexp < 0) { const REAL_VALUE_TYPE *t; t = a, a = b, b = t; dexp = -dexp; sign ^= subtract_p; } exp = REAL_EXP (a); /* If the exponents are not identical, we need to shift the significand of B down. */ if (dexp > 0) { /* If the exponents are too far apart, the significands do not overlap, which makes the subtraction a noop. */ if (dexp >= SIGNIFICAND_BITS) { *r = *a; r->sign = sign; return true; } inexact |= sticky_rshift_significand (&t, b, dexp); b = &t; } if (subtract_p) { if (sub_significands (r, a, b, inexact)) { /* We got a borrow out of the subtraction. That means that A and B had the same exponent, and B had the larger significand. We need to swap the sign and negate the significand. */ sign ^= 1; neg_significand (r, r); } } else { if (add_significands (r, a, b)) { /* We got carry out of the addition. This means we need to shift the significand back down one bit and increase the exponent. */ inexact |= sticky_rshift_significand (r, r, 1); r->sig[SIGSZ-1] |= SIG_MSB; if (++exp > MAX_EXP) { get_inf (r, sign); return true; } } } r->class = rvc_normal; r->sign = sign; SET_REAL_EXP (r, exp); /* Re-normalize the result. */ normalize_val (r); /* Special case: if the subtraction results in zero, the result is positive. */ if (r->class == rvc_zero) r->sign = 0; else r->sig[0] |= inexact; return inexact; } /* Calculate R = A * B. Return true if the result may be inexact. */ static bool do_multiply (REAL_VALUE_TYPE *r, const REAL_VALUE_TYPE *a, const REAL_VALUE_TYPE *b) { REAL_VALUE_TYPE u, t, *rr; unsigned int i, j, k; int sign = a->sign ^ b->sign; bool inexact = false; switch (CLASS2 (a->class, b->class)) { case CLASS2 (rvc_zero, rvc_zero): case CLASS2 (rvc_zero, rvc_normal): case CLASS2 (rvc_normal, rvc_zero): /* +-0 * ANY = 0 with appropriate sign. */ get_zero (r, sign); return false; case CLASS2 (rvc_zero, rvc_nan): case CLASS2 (rvc_normal, rvc_nan): case CLASS2 (rvc_inf, rvc_nan): case CLASS2 (rvc_nan, rvc_nan): /* ANY * NaN = NaN. */ *r = *b; r->sign = sign; return false; case CLASS2 (rvc_nan, rvc_zero): case CLASS2 (rvc_nan, rvc_normal): case CLASS2 (rvc_nan, rvc_inf): /* NaN * ANY = NaN. */ *r = *a; r->sign = sign; return false; case CLASS2 (rvc_zero, rvc_inf): case CLASS2 (rvc_inf, rvc_zero): /* 0 * Inf = NaN */ get_canonical_qnan (r, sign); return false; case CLASS2 (rvc_inf, rvc_inf): case CLASS2 (rvc_normal, rvc_inf): case CLASS2 (rvc_inf, rvc_normal): /* Inf * Inf = Inf, R * Inf = Inf */ get_inf (r, sign); return false; case CLASS2 (rvc_normal, rvc_normal): break; default: abort (); } if (r == a || r == b) rr = &t; else rr = r; get_zero (rr, 0); /* Collect all the partial products. Since we don't have sure access to a widening multiply, we split each long into two half-words. Consider the long-hand form of a four half-word multiplication: A B C D * E F G H -------------- DE DF DG DH CE CF CG CH BE BF BG BH AE AF AG AH We construct partial products of the widened half-word products that are known to not overlap, e.g. DF+DH. Each such partial product is given its proper exponent, which allows us to sum them and obtain the finished product. */ for (i = 0; i < SIGSZ * 2; ++i) { unsigned long ai = a->sig[i / 2]; if (i & 1) ai >>= HOST_BITS_PER_LONG / 2; else ai &= ((unsigned long)1 << (HOST_BITS_PER_LONG / 2)) - 1; if (ai == 0) continue; for (j = 0; j < 2; ++j) { int exp = (REAL_EXP (a) - (2*SIGSZ-1-i)*(HOST_BITS_PER_LONG/2) + (REAL_EXP (b) - (1-j)*(HOST_BITS_PER_LONG/2))); if (exp > MAX_EXP) { get_inf (r, sign); return true; } if (exp < -MAX_EXP) { /* Would underflow to zero, which we shouldn't bother adding. */ inexact = true; continue; } memset (&u, 0, sizeof (u)); u.class = rvc_normal; SET_REAL_EXP (&u, exp); for (k = j; k < SIGSZ * 2; k += 2) { unsigned long bi = b->sig[k / 2]; if (k & 1) bi >>= HOST_BITS_PER_LONG / 2; else bi &= ((unsigned long)1 << (HOST_BITS_PER_LONG / 2)) - 1; u.sig[k / 2] = ai * bi; } normalize_val (&u); inexact |= do_add (rr, rr, &u, 0); } } rr->sign = sign; if (rr != r) *r = t; return inexact; } /* Calculate R = A / B. Return true if the result may be inexact. */ static bool do_divide (REAL_VALUE_TYPE *r, const REAL_VALUE_TYPE *a, const REAL_VALUE_TYPE *b) { int exp, sign = a->sign ^ b->sign; REAL_VALUE_TYPE t, *rr; bool inexact; switch (CLASS2 (a->class, b->class)) { case CLASS2 (rvc_zero, rvc_zero): /* 0 / 0 = NaN. */ case CLASS2 (rvc_inf, rvc_inf): /* Inf / Inf = NaN. */ get_canonical_qnan (r, sign); return false; case CLASS2 (rvc_zero, rvc_normal): case CLASS2 (rvc_zero, rvc_inf): /* 0 / ANY = 0. */ case CLASS2 (rvc_normal, rvc_inf): /* R / Inf = 0. */ get_zero (r, sign); return false; case CLASS2 (rvc_normal, rvc_zero): /* R / 0 = Inf. */ case CLASS2 (rvc_inf, rvc_zero): /* Inf / 0 = Inf. */ get_inf (r, sign); return false; case CLASS2 (rvc_zero, rvc_nan): case CLASS2 (rvc_normal, rvc_nan): case CLASS2 (rvc_inf, rvc_nan): case CLASS2 (rvc_nan, rvc_nan): /* ANY / NaN = NaN. */ *r = *b; r->sign = sign; return false; case CLASS2 (rvc_nan, rvc_zero): case CLASS2 (rvc_nan, rvc_normal): case CLASS2 (rvc_nan, rvc_inf): /* NaN / ANY = NaN. */ *r = *a; r->sign = sign; return false; case CLASS2 (rvc_inf, rvc_normal): /* Inf / R = Inf. */ get_inf (r, sign); return false; case CLASS2 (rvc_normal, rvc_normal): break; default: abort (); } if (r == a || r == b) rr = &t; else rr = r; /* Make sure all fields in the result are initialized. */ get_zero (rr, 0); rr->class = rvc_normal; rr->sign = sign; exp = REAL_EXP (a) - REAL_EXP (b) + 1; if (exp > MAX_EXP) { get_inf (r, sign); return true; } if (exp < -MAX_EXP) { get_zero (r, sign); return true; } SET_REAL_EXP (rr, exp); inexact = div_significands (rr, a, b); /* Re-normalize the result. */ normalize_val (rr); rr->sig[0] |= inexact; if (rr != r) *r = t; return inexact; } /* Return a tri-state comparison of A vs B. Return NAN_RESULT if one of the two operands is a NaN. */ static int do_compare (const REAL_VALUE_TYPE *a, const REAL_VALUE_TYPE *b, int nan_result) { int ret; switch (CLASS2 (a->class, b->class)) { case CLASS2 (rvc_zero, rvc_zero): /* Sign of zero doesn't matter for compares. */ return 0; case CLASS2 (rvc_inf, rvc_zero): case CLASS2 (rvc_inf, rvc_normal): case CLASS2 (rvc_normal, rvc_zero): return (a->sign ? -1 : 1); case CLASS2 (rvc_inf, rvc_inf): return -a->sign - -b->sign; case CLASS2 (rvc_zero, rvc_normal): case CLASS2 (rvc_zero, rvc_inf): case CLASS2 (rvc_normal, rvc_inf): return (b->sign ? 1 : -1); case CLASS2 (rvc_zero, rvc_nan): case CLASS2 (rvc_normal, rvc_nan): case CLASS2 (rvc_inf, rvc_nan): case CLASS2 (rvc_nan, rvc_nan): case CLASS2 (rvc_nan, rvc_zero): case CLASS2 (rvc_nan, rvc_normal): case CLASS2 (rvc_nan, rvc_inf): return nan_result; case CLASS2 (rvc_normal, rvc_normal): break; default: abort (); } if (a->sign != b->sign) return -a->sign - -b->sign; if (REAL_EXP (a) > REAL_EXP (b)) ret = 1; else if (REAL_EXP (a) < REAL_EXP (b)) ret = -1; else ret = cmp_significands (a, b); return (a->sign ? -ret : ret); } /* Return A truncated to an integral value toward zero. */ static void do_fix_trunc (REAL_VALUE_TYPE *r, const REAL_VALUE_TYPE *a) { *r = *a; switch (r->class) { case rvc_zero: case rvc_inf: case rvc_nan: break; case rvc_normal: if (REAL_EXP (r) <= 0) get_zero (r, r->sign); else if (REAL_EXP (r) < SIGNIFICAND_BITS) clear_significand_below (r, SIGNIFICAND_BITS - REAL_EXP (r)); break; default: abort (); } } /* Perform the binary or unary operation described by CODE. For a unary operation, leave OP1 NULL. */ void real_arithmetic (REAL_VALUE_TYPE *r, int icode, const REAL_VALUE_TYPE *op0, const REAL_VALUE_TYPE *op1) { enum tree_code code = icode; switch (code) { case PLUS_EXPR: do_add (r, op0, op1, 0); break; case MINUS_EXPR: do_add (r, op0, op1, 1); break; case MULT_EXPR: do_multiply (r, op0, op1); break; case RDIV_EXPR: do_divide (r, op0, op1); break; case MIN_EXPR: if (op1->class == rvc_nan) *r = *op1; else if (do_compare (op0, op1, -1) < 0) *r = *op0; else *r = *op1; break; case MAX_EXPR: if (op1->class == rvc_nan) *r = *op1; else if (do_compare (op0, op1, 1) < 0) *r = *op1; else *r = *op0; break; case NEGATE_EXPR: *r = *op0; r->sign ^= 1; break; case ABS_EXPR: *r = *op0; r->sign = 0; break; case FIX_TRUNC_EXPR: do_fix_trunc (r, op0); break; default: abort (); } } /* Legacy. Similar, but return the result directly. */ REAL_VALUE_TYPE real_arithmetic2 (int icode, const REAL_VALUE_TYPE *op0, const REAL_VALUE_TYPE *op1) { REAL_VALUE_TYPE r; real_arithmetic (&r, icode, op0, op1); return r; } bool real_compare (int icode, const REAL_VALUE_TYPE *op0, const REAL_VALUE_TYPE *op1) { enum tree_code code = icode; switch (code) { case LT_EXPR: return do_compare (op0, op1, 1) < 0; case LE_EXPR: return do_compare (op0, op1, 1) <= 0; case GT_EXPR: return do_compare (op0, op1, -1) > 0; case GE_EXPR: return do_compare (op0, op1, -1) >= 0; case EQ_EXPR: return do_compare (op0, op1, -1) == 0; case NE_EXPR: return do_compare (op0, op1, -1) != 0; case UNORDERED_EXPR: return op0->class == rvc_nan || op1->class == rvc_nan; case ORDERED_EXPR: return op0->class != rvc_nan && op1->class != rvc_nan; case UNLT_EXPR: return do_compare (op0, op1, -1) < 0; case UNLE_EXPR: return do_compare (op0, op1, -1) <= 0; case UNGT_EXPR: return do_compare (op0, op1, 1) > 0; case UNGE_EXPR: return do_compare (op0, op1, 1) >= 0; case UNEQ_EXPR: return do_compare (op0, op1, 0) == 0; case LTGT_EXPR: return do_compare (op0, op1, 0) != 0; default: abort (); } } /* Return floor log2(R). */ int real_exponent (const REAL_VALUE_TYPE *r) { switch (r->class) { case rvc_zero: return 0; case rvc_inf: case rvc_nan: return (unsigned int)-1 >> 1; case rvc_normal: return REAL_EXP (r); default: abort (); } } /* R = OP0 * 2**EXP. */ void real_ldexp (REAL_VALUE_TYPE *r, const REAL_VALUE_TYPE *op0, int exp) { *r = *op0; switch (r->class) { case rvc_zero: case rvc_inf: case rvc_nan: break; case rvc_normal: exp += REAL_EXP (op0); if (exp > MAX_EXP) get_inf (r, r->sign); else if (exp < -MAX_EXP) get_zero (r, r->sign); else SET_REAL_EXP (r, exp); break; default: abort (); } } /* Determine whether a floating-point value X is infinite. */ bool real_isinf (const REAL_VALUE_TYPE *r) { return (r->class == rvc_inf); } /* Determine whether a floating-point value X is a NaN. */ bool real_isnan (const REAL_VALUE_TYPE *r) { return (r->class == rvc_nan); } /* Determine whether a floating-point value X is negative. */ bool real_isneg (const REAL_VALUE_TYPE *r) { return r->sign; } /* Determine whether a floating-point value X is minus zero. */ bool real_isnegzero (const REAL_VALUE_TYPE *r) { return r->sign && r->class == rvc_zero; } /* Compare two floating-point objects for bitwise identity. */ bool real_identical (const REAL_VALUE_TYPE *a, const REAL_VALUE_TYPE *b) { int i; if (a->class != b->class) return false; if (a->sign != b->sign) return false; switch (a->class) { case rvc_zero: case rvc_inf: return true; case rvc_normal: if (REAL_EXP (a) != REAL_EXP (b)) return false; break; case rvc_nan: if (a->signalling != b->signalling) return false; /* The significand is ignored for canonical NaNs. */ if (a->canonical || b->canonical) return a->canonical == b->canonical; break; default: abort (); } for (i = 0; i < SIGSZ; ++i) if (a->sig[i] != b->sig[i]) return false; return true; } /* Try to change R into its exact multiplicative inverse in machine mode MODE. Return true if successful. */ bool exact_real_inverse (enum machine_mode mode, REAL_VALUE_TYPE *r) { const REAL_VALUE_TYPE *one = real_digit (1); REAL_VALUE_TYPE u; int i; if (r->class != rvc_normal) return false; /* Check for a power of two: all significand bits zero except the MSB. */ for (i = 0; i < SIGSZ-1; ++i) if (r->sig[i] != 0) return false; if (r->sig[SIGSZ-1] != SIG_MSB) return false; /* Find the inverse and truncate to the required mode. */ do_divide (&u, one, r); real_convert (&u, mode, &u); /* The rounding may have overflowed. */ if (u.class != rvc_normal) return false; for (i = 0; i < SIGSZ-1; ++i) if (u.sig[i] != 0) return false; if (u.sig[SIGSZ-1] != SIG_MSB) return false; *r = u; return true; } /* Render R as an integer. */ HOST_WIDE_INT real_to_integer (const REAL_VALUE_TYPE *r) { unsigned HOST_WIDE_INT i; switch (r->class) { case rvc_zero: underflow: return 0; case rvc_inf: case rvc_nan: overflow: i = (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1); if (!r->sign) i--; return i; case rvc_normal: if (REAL_EXP (r) <= 0) goto underflow; /* Only force overflow for unsigned overflow. Signed overflow is undefined, so it doesn't matter what we return, and some callers expect to be able to use this routine for both signed and unsigned conversions. */ if (REAL_EXP (r) > HOST_BITS_PER_WIDE_INT) goto overflow; if (HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG) i = r->sig[SIGSZ-1]; else if (HOST_BITS_PER_WIDE_INT == 2*HOST_BITS_PER_LONG) { i = r->sig[SIGSZ-1]; i = i << (HOST_BITS_PER_LONG - 1) << 1; i |= r->sig[SIGSZ-2]; } else abort (); i >>= HOST_BITS_PER_WIDE_INT - REAL_EXP (r); if (r->sign) i = -i; return i; default: abort (); } } /* Likewise, but to an integer pair, HI+LOW. */ void real_to_integer2 (HOST_WIDE_INT *plow, HOST_WIDE_INT *phigh, const REAL_VALUE_TYPE *r) { REAL_VALUE_TYPE t; HOST_WIDE_INT low, high; int exp; switch (r->class) { case rvc_zero: underflow: low = high = 0; break; case rvc_inf: case rvc_nan: overflow: high = (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1); if (r->sign) low = 0; else { high--; low = -1; } break; case rvc_normal: exp = REAL_EXP (r); if (exp <= 0) goto underflow; /* Only force overflow for unsigned overflow. Signed overflow is undefined, so it doesn't matter what we return, and some callers expect to be able to use this routine for both signed and unsigned conversions. */ if (exp > 2*HOST_BITS_PER_WIDE_INT) goto overflow; rshift_significand (&t, r, 2*HOST_BITS_PER_WIDE_INT - exp); if (HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG) { high = t.sig[SIGSZ-1]; low = t.sig[SIGSZ-2]; } else if (HOST_BITS_PER_WIDE_INT == 2*HOST_BITS_PER_LONG) { high = t.sig[SIGSZ-1]; high = high << (HOST_BITS_PER_LONG - 1) << 1; high |= t.sig[SIGSZ-2]; low = t.sig[SIGSZ-3]; low = low << (HOST_BITS_PER_LONG - 1) << 1; low |= t.sig[SIGSZ-4]; } else abort (); if (r->sign) { if (low == 0) high = -high; else low = -low, high = ~high; } break; default: abort (); } *plow = low; *phigh = high; } /* A subroutine of real_to_decimal. Compute the quotient and remainder of NUM / DEN. Return the quotient and place the remainder in NUM. It is expected that NUM / DEN are close enough that the quotient is small. */ static unsigned long rtd_divmod (REAL_VALUE_TYPE *num, REAL_VALUE_TYPE *den) { unsigned long q, msb; int expn = REAL_EXP (num), expd = REAL_EXP (den); if (expn < expd) return 0; q = msb = 0; goto start; do { msb = num->sig[SIGSZ-1] & SIG_MSB; q <<= 1; lshift_significand_1 (num, num); start: if (msb || cmp_significands (num, den) >= 0) { sub_significands (num, num, den, 0); q |= 1; } } while (--expn >= expd); SET_REAL_EXP (num, expd); normalize_val (num); return q; } /* Render R as a decimal floating point constant. Emit DIGITS significant digits in the result, bounded by BUF_SIZE. If DIGITS is 0, choose the maximum for the representation. If CROP_TRAILING_ZEROS, strip trailing zeros. */ #define M_LOG10_2 0.30102999566398119521 void real_to_decimal (char *str, const REAL_VALUE_TYPE *r_orig, size_t buf_size, size_t digits, int crop_trailing_zeros) { const REAL_VALUE_TYPE *one, *ten; REAL_VALUE_TYPE r, pten, u, v; int dec_exp, cmp_one, digit; size_t max_digits; char *p, *first, *last; bool sign; r = *r_orig; switch (r.class) { case rvc_zero: strcpy (str, (r.sign ? "-0.0" : "0.0")); return; case rvc_normal: break; case rvc_inf: strcpy (str, (r.sign ? "-Inf" : "+Inf")); return; case rvc_nan: /* ??? Print the significand as well, if not canonical? */ strcpy (str, (r.sign ? "-NaN" : "+NaN")); return; default: abort (); } /* Bound the number of digits printed by the size of the representation. */ max_digits = SIGNIFICAND_BITS * M_LOG10_2; if (digits == 0 || digits > max_digits) digits = max_digits; /* Estimate the decimal exponent, and compute the length of the string it will print as. Be conservative and add one to account for possible overflow or rounding error. */ dec_exp = REAL_EXP (&r) * M_LOG10_2; for (max_digits = 1; dec_exp ; max_digits++) dec_exp /= 10; /* Bound the number of digits printed by the size of the output buffer. */ max_digits = buf_size - 1 - 1 - 2 - max_digits - 1; if (max_digits > buf_size) abort (); if (digits > max_digits) digits = max_digits; one = real_digit (1); ten = ten_to_ptwo (0); sign = r.sign; r.sign = 0; dec_exp = 0; pten = *one; cmp_one = do_compare (&r, one, 0); if (cmp_one > 0) { int m; /* Number is greater than one. Convert significand to an integer and strip trailing decimal zeros. */ u = r; SET_REAL_EXP (&u, SIGNIFICAND_BITS - 1); /* Largest M, such that 10**2**M fits within SIGNIFICAND_BITS. */ m = floor_log2 (max_digits); /* Iterate over the bits of the possible powers of 10 that might be present in U and eliminate them. That is, if we find that 10**2**M divides U evenly, keep the division and increase DEC_EXP by 2**M. */ do { REAL_VALUE_TYPE t; do_divide (&t, &u, ten_to_ptwo (m)); do_fix_trunc (&v, &t); if (cmp_significands (&v, &t) == 0) { u = t; dec_exp += 1 << m; } } while (--m >= 0); /* Revert the scaling to integer that we performed earlier. */ SET_REAL_EXP (&u, REAL_EXP (&u) + REAL_EXP (&r) - (SIGNIFICAND_BITS - 1)); r = u; /* Find power of 10. Do this by dividing out 10**2**M when this is larger than the current remainder. Fill PTEN with the power of 10 that we compute. */ if (REAL_EXP (&r) > 0) { m = floor_log2 ((int)(REAL_EXP (&r) * M_LOG10_2)) + 1; do { const REAL_VALUE_TYPE *ptentwo = ten_to_ptwo (m); if (do_compare (&u, ptentwo, 0) >= 0) { do_divide (&u, &u, ptentwo); do_multiply (&pten, &pten, ptentwo); dec_exp += 1 << m; } } while (--m >= 0); } else /* We managed to divide off enough tens in the above reduction loop that we've now got a negative exponent. Fall into the less-than-one code to compute the proper value for PTEN. */ cmp_one = -1; } if (cmp_one < 0) { int m; /* Number is less than one. Pad significand with leading decimal zeros. */ v = r; while (1) { /* Stop if we'd shift bits off the bottom. */ if (v.sig[0] & 7) break; do_multiply (&u, &v, ten); /* Stop if we're now >= 1. */ if (REAL_EXP (&u) > 0) break; v = u; dec_exp -= 1; } r = v; /* Find power of 10. Do this by multiplying in P=10**2**M when the current remainder is smaller than 1/P. Fill PTEN with the power of 10 that we compute. */ m = floor_log2 ((int)(-REAL_EXP (&r) * M_LOG10_2)) + 1; do { const REAL_VALUE_TYPE *ptentwo = ten_to_ptwo (m); const REAL_VALUE_TYPE *ptenmtwo = ten_to_mptwo (m); if (do_compare (&v, ptenmtwo, 0) <= 0) { do_multiply (&v, &v, ptentwo); do_multiply (&pten, &pten, ptentwo); dec_exp -= 1 << m; } } while (--m >= 0); /* Invert the positive power of 10 that we've collected so far. */ do_divide (&pten, one, &pten); } p = str; if (sign) *p++ = '-'; first = p++; /* At this point, PTEN should contain the nearest power of 10 smaller than R, such that this division produces the first digit. Using a divide-step primitive that returns the complete integral remainder avoids the rounding error that would be produced if we were to use do_divide here and then simply multiply by 10 for each subsequent digit. */ digit = rtd_divmod (&r, &pten); /* Be prepared for error in that division via underflow ... */ if (digit == 0 && cmp_significand_0 (&r)) { /* Multiply by 10 and try again. */ do_multiply (&r, &r, ten); digit = rtd_divmod (&r, &pten); dec_exp -= 1; if (digit == 0) abort (); } /* ... or overflow. */ if (digit == 10) { *p++ = '1'; if (--digits > 0) *p++ = '0'; dec_exp += 1; } else if (digit > 10) abort (); else *p++ = digit + '0'; /* Generate subsequent digits. */ while (--digits > 0) { do_multiply (&r, &r, ten); digit = rtd_divmod (&r, &pten); *p++ = digit + '0'; } last = p; /* Generate one more digit with which to do rounding. */ do_multiply (&r, &r, ten); digit = rtd_divmod (&r, &pten); /* Round the result. */ if (digit == 5) { /* Round to nearest. If R is nonzero there are additional nonzero digits to be extracted. */ if (cmp_significand_0 (&r)) digit++; /* Round to even. */ else if ((p[-1] - '0') & 1) digit++; } if (digit > 5) { while (p > first) { digit = *--p; if (digit == '9') *p = '0'; else { *p = digit + 1; break; } } /* Carry out of the first digit. This means we had all 9's and now have all 0's. "Prepend" a 1 by overwriting the first 0. */ if (p == first) { first[1] = '1'; dec_exp++; } } /* Insert the decimal point. */ first[0] = first[1]; first[1] = '.'; /* If requested, drop trailing zeros. Never crop past "1.0". */ if (crop_trailing_zeros) while (last > first + 3 && last[-1] == '0') last--; /* Append the exponent. */ sprintf (last, "e%+d", dec_exp); } /* Render R as a hexadecimal floating point constant. Emit DIGITS significant digits in the result, bounded by BUF_SIZE. If DIGITS is 0, choose the maximum for the representation. If CROP_TRAILING_ZEROS, strip trailing zeros. */ void real_to_hexadecimal (char *str, const REAL_VALUE_TYPE *r, size_t buf_size, size_t digits, int crop_trailing_zeros) { int i, j, exp = REAL_EXP (r); char *p, *first; char exp_buf[16]; size_t max_digits; switch (r->class) { case rvc_zero: exp = 0; break; case rvc_normal: break; case rvc_inf: strcpy (str, (r->sign ? "-Inf" : "+Inf")); return; case rvc_nan: /* ??? Print the significand as well, if not canonical? */ strcpy (str, (r->sign ? "-NaN" : "+NaN")); return; default: abort (); } if (digits == 0) digits = SIGNIFICAND_BITS / 4; /* Bound the number of digits printed by the size of the output buffer. */ sprintf (exp_buf, "p%+d", exp); max_digits = buf_size - strlen (exp_buf) - r->sign - 4 - 1; if (max_digits > buf_size) abort (); if (digits > max_digits) digits = max_digits; p = str; if (r->sign) *p++ = '-'; *p++ = '0'; *p++ = 'x'; *p++ = '0'; *p++ = '.'; first = p; for (i = SIGSZ - 1; i >= 0; --i) for (j = HOST_BITS_PER_LONG - 4; j >= 0; j -= 4) { *p++ = "0123456789abcdef"[(r->sig[i] >> j) & 15]; if (--digits == 0) goto out; } out: if (crop_trailing_zeros) while (p > first + 1 && p[-1] == '0') p--; sprintf (p, "p%+d", exp); } /* Initialize R from a decimal or hexadecimal string. The string is assumed to have been syntax checked already. */ void real_from_string (REAL_VALUE_TYPE *r, const char *str) { int exp = 0; bool sign = false; get_zero (r, 0); if (*str == '-') { sign = true; str++; } else if (*str == '+') str++; if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) { /* Hexadecimal floating point. */ int pos = SIGNIFICAND_BITS - 4, d; str += 2; while (*str == '0') str++; while (1) { d = hex_value (*str); if (d == _hex_bad) break; if (pos >= 0) { r->sig[pos / HOST_BITS_PER_LONG] |= (unsigned long) d << (pos % HOST_BITS_PER_LONG); pos -= 4; } exp += 4; str++; } if (*str == '.') { str++; if (pos == SIGNIFICAND_BITS - 4) { while (*str == '0') str++, exp -= 4; } while (1) { d = hex_value (*str); if (d == _hex_bad) break; if (pos >= 0) { r->sig[pos / HOST_BITS_PER_LONG] |= (unsigned long) d << (pos % HOST_BITS_PER_LONG); pos -= 4; } str++; } } if (*str == 'p' || *str == 'P') { bool exp_neg = false; str++; if (*str == '-') { exp_neg = true; str++; } else if (*str == '+') str++; d = 0; while (ISDIGIT (*str)) { d *= 10; d += *str - '0'; if (d > MAX_EXP) { /* Overflowed the exponent. */ if (exp_neg) goto underflow; else goto overflow; } str++; } if (exp_neg) d = -d; exp += d; } r->class = rvc_normal; SET_REAL_EXP (r, exp); normalize_val (r); } else { /* Decimal floating point. */ const REAL_VALUE_TYPE *ten = ten_to_ptwo (0); int d; while (*str == '0') str++; while (ISDIGIT (*str)) { d = *str++ - '0'; do_multiply (r, r, ten); if (d) do_add (r, r, real_digit (d), 0); } if (*str == '.') { str++; if (r->class == rvc_zero) { while (*str == '0') str++, exp--; } while (ISDIGIT (*str)) { d = *str++ - '0'; do_multiply (r, r, ten); if (d) do_add (r, r, real_digit (d), 0); exp--; } } if (*str == 'e' || *str == 'E') { bool exp_neg = false; str++; if (*str == '-') { exp_neg = true; str++; } else if (*str == '+') str++; d = 0; while (ISDIGIT (*str)) { d *= 10; d += *str - '0'; if (d > MAX_EXP) { /* Overflowed the exponent. */ if (exp_neg) goto underflow; else goto overflow; } str++; } if (exp_neg) d = -d; exp += d; } if (exp) times_pten (r, exp); } r->sign = sign; return; underflow: get_zero (r, sign); return; overflow: get_inf (r, sign); return; } /* Legacy. Similar, but return the result directly. */ REAL_VALUE_TYPE real_from_string2 (const char *s, enum machine_mode mode) { REAL_VALUE_TYPE r; real_from_string (&r, s); if (mode != VOIDmode) real_convert (&r, mode, &r); return r; } /* Initialize R from the integer pair HIGH+LOW. */ void real_from_integer (REAL_VALUE_TYPE *r, enum machine_mode mode, unsigned HOST_WIDE_INT low, HOST_WIDE_INT high, int unsigned_p) { if (low == 0 && high == 0) get_zero (r, 0); else { r->class = rvc_normal; r->sign = high < 0 && !unsigned_p; SET_REAL_EXP (r, 2 * HOST_BITS_PER_WIDE_INT); if (r->sign) { high = ~high; if (low == 0) high += 1; else low = -low; } if (HOST_BITS_PER_LONG == HOST_BITS_PER_WIDE_INT) { r->sig[SIGSZ-1] = high; r->sig[SIGSZ-2] = low; memset (r->sig, 0, sizeof(long)*(SIGSZ-2)); } else if (HOST_BITS_PER_LONG*2 == HOST_BITS_PER_WIDE_INT) { r->sig[SIGSZ-1] = high >> (HOST_BITS_PER_LONG - 1) >> 1; r->sig[SIGSZ-2] = high; r->sig[SIGSZ-3] = low >> (HOST_BITS_PER_LONG - 1) >> 1; r->sig[SIGSZ-4] = low; if (SIGSZ > 4) memset (r->sig, 0, sizeof(long)*(SIGSZ-4)); } else abort (); normalize_val (r); } if (mode != VOIDmode) real_convert (r, mode, r); } /* Returns 10**2**N. */ static const REAL_VALUE_TYPE * ten_to_ptwo (int n) { static REAL_VALUE_TYPE tens[EXP_BITS]; if (n < 0 || n >= EXP_BITS) abort (); if (tens[n].class == rvc_zero) { if (n < (HOST_BITS_PER_WIDE_INT == 64 ? 5 : 4)) { HOST_WIDE_INT t = 10; int i; for (i = 0; i < n; ++i) t *= t; real_from_integer (&tens[n], VOIDmode, t, 0, 1); } else { const REAL_VALUE_TYPE *t = ten_to_ptwo (n - 1); do_multiply (&tens[n], t, t); } } return &tens[n]; } /* Returns 10**(-2**N). */ static const REAL_VALUE_TYPE * ten_to_mptwo (int n) { static REAL_VALUE_TYPE tens[EXP_BITS]; if (n < 0 || n >= EXP_BITS) abort (); if (tens[n].class == rvc_zero) do_divide (&tens[n], real_digit (1), ten_to_ptwo (n)); return &tens[n]; } /* Returns N. */ static const REAL_VALUE_TYPE * real_digit (int n) { static REAL_VALUE_TYPE num[10]; if (n < 0 || n > 9) abort (); if (n > 0 && num[n].class == rvc_zero) real_from_integer (&num[n], VOIDmode, n, 0, 1); return &num[n]; } /* Multiply R by 10**EXP. */ static void times_pten (REAL_VALUE_TYPE *r, int exp) { REAL_VALUE_TYPE pten, *rr; bool negative = (exp < 0); int i; if (negative) { exp = -exp; pten = *real_digit (1); rr = &pten; } else rr = r; for (i = 0; exp > 0; ++i, exp >>= 1) if (exp & 1) do_multiply (rr, rr, ten_to_ptwo (i)); if (negative) do_divide (r, r, &pten); } /* Fills R with +Inf. */ void real_inf (REAL_VALUE_TYPE *r) { get_inf (r, 0); } /* Fills R with a NaN whose significand is described by STR. If QUIET, we force a QNaN, else we force an SNaN. The string, if not empty, is parsed as a number and placed in the significand. Return true if the string was successfully parsed. */ bool real_nan (REAL_VALUE_TYPE *r, const char *str, int quiet, enum machine_mode mode) { const struct real_format *fmt; fmt = REAL_MODE_FORMAT (mode); if (fmt == NULL) abort (); if (*str == 0) { if (quiet) get_canonical_qnan (r, 0); else get_canonical_snan (r, 0); } else { int base = 10, d; bool neg = false; memset (r, 0, sizeof (*r)); r->class = rvc_nan; /* Parse akin to strtol into the significand of R. */ while (ISSPACE (*str)) str++; if (*str == '-') str++, neg = true; else if (*str == '+') str++; if (*str == '0') { if (*++str == 'x') str++, base = 16; else base = 8; } while ((d = hex_value (*str)) < base) { REAL_VALUE_TYPE u; switch (base) { case 8: lshift_significand (r, r, 3); break; case 16: lshift_significand (r, r, 4); break; case 10: lshift_significand_1 (&u, r); lshift_significand (r, r, 3); add_significands (r, r, &u); break; default: abort (); } get_zero (&u, 0); u.sig[0] = d; add_significands (r, r, &u); str++; } /* Must have consumed the entire string for success. */ if (*str != 0) return false; /* Shift the significand into place such that the bits are in the most significant bits for the format. */ lshift_significand (r, r, SIGNIFICAND_BITS - fmt->pnan); /* Our MSB is always unset for NaNs. */ r->sig[SIGSZ-1] &= ~SIG_MSB; /* Force quiet or signalling NaN. */ r->signalling = !quiet; } return true; } /* Fills R with the largest finite value representable in mode MODE. If SIGN is nonzero, R is set to the most negative finite value. */ void real_maxval (REAL_VALUE_TYPE *r, int sign, enum machine_mode mode) { const struct real_format *fmt; int np2; fmt = REAL_MODE_FORMAT (mode); if (fmt == NULL) abort (); r->class = rvc_normal; r->sign = sign; r->signalling = 0; r->canonical = 0; SET_REAL_EXP (r, fmt->emax * fmt->log2_b); np2 = SIGNIFICAND_BITS - fmt->p * fmt->log2_b; memset (r->sig, -1, SIGSZ * sizeof (unsigned long)); clear_significand_below (r, np2); } /* Fills R with 2**N. */ void real_2expN (REAL_VALUE_TYPE *r, int n) { memset (r, 0, sizeof (*r)); n++; if (n > MAX_EXP) r->class = rvc_inf; else if (n < -MAX_EXP) ; else { r->class = rvc_normal; SET_REAL_EXP (r, n); r->sig[SIGSZ-1] = SIG_MSB; } } static void round_for_format (const struct real_format *fmt, REAL_VALUE_TYPE *r) { int p2, np2, i, w; unsigned long sticky; bool guard, lsb; int emin2m1, emax2; p2 = fmt->p * fmt->log2_b; emin2m1 = (fmt->emin - 1) * fmt->log2_b; emax2 = fmt->emax * fmt->log2_b; np2 = SIGNIFICAND_BITS - p2; switch (r->class) { underflow: get_zero (r, r->sign); case rvc_zero: if (!fmt->has_signed_zero) r->sign = 0; return; overflow: get_inf (r, r->sign); case rvc_inf: return; case rvc_nan: clear_significand_below (r, np2); return; case rvc_normal: break; default: abort (); } /* If we're not base2, normalize the exponent to a multiple of the true base. */ if (fmt->log2_b != 1) { int shift = REAL_EXP (r) & (fmt->log2_b - 1); if (shift) { shift = fmt->log2_b - shift; r->sig[0] |= sticky_rshift_significand (r, r, shift); SET_REAL_EXP (r, REAL_EXP (r) + shift); } } /* Check the range of the exponent. If we're out of range, either underflow or overflow. */ if (REAL_EXP (r) > emax2) goto overflow; else if (REAL_EXP (r) <= emin2m1) { int diff; if (!fmt->has_denorm) { /* Don't underflow completely until we've had a chance to round. */ if (REAL_EXP (r) < emin2m1) goto underflow; } else { diff = emin2m1 - REAL_EXP (r) + 1; if (diff > p2) goto underflow; /* De-normalize the significand. */ r->sig[0] |= sticky_rshift_significand (r, r, diff); SET_REAL_EXP (r, REAL_EXP (r) + diff); } } /* There are P2 true significand bits, followed by one guard bit, followed by one sticky bit, followed by stuff. Fold nonzero stuff into the sticky bit. */ sticky = 0; for (i = 0, w = (np2 - 1) / HOST_BITS_PER_LONG; i < w; ++i) sticky |= r->sig[i]; sticky |= r->sig[w] & (((unsigned long)1 << ((np2 - 1) % HOST_BITS_PER_LONG)) - 1); guard = test_significand_bit (r, np2 - 1); lsb = test_significand_bit (r, np2); /* Round to even. */ if (guard && (sticky || lsb)) { REAL_VALUE_TYPE u; get_zero (&u, 0); set_significand_bit (&u, np2); if (add_significands (r, r, &u)) { /* Overflow. Means the significand had been all ones, and is now all zeros. Need to increase the exponent, and possibly re-normalize it. */ SET_REAL_EXP (r, REAL_EXP (r) + 1); if (REAL_EXP (r) > emax2) goto overflow; r->sig[SIGSZ-1] = SIG_MSB; if (fmt->log2_b != 1) { int shift = REAL_EXP (r) & (fmt->log2_b - 1); if (shift) { shift = fmt->log2_b - shift; rshift_significand (r, r, shift); SET_REAL_EXP (r, REAL_EXP (r) + shift); if (REAL_EXP (r) > emax2) goto overflow; } } } } /* Catch underflow that we deferred until after rounding. */ if (REAL_EXP (r) <= emin2m1) goto underflow; /* Clear out trailing garbage. */ clear_significand_below (r, np2); } /* Extend or truncate to a new mode. */ void real_convert (REAL_VALUE_TYPE *r, enum machine_mode mode, const REAL_VALUE_TYPE *a) { const struct real_format *fmt; fmt = REAL_MODE_FORMAT (mode); if (fmt == NULL) abort (); *r = *a; round_for_format (fmt, r); /* round_for_format de-normalizes denormals. Undo just that part. */ if (r->class == rvc_normal) normalize_val (r); } /* Legacy. Likewise, except return the struct directly. */ REAL_VALUE_TYPE real_value_truncate (enum machine_mode mode, REAL_VALUE_TYPE a) { REAL_VALUE_TYPE r; real_convert (&r, mode, &a); return r; } /* Return true if truncating to MODE is exact. */ bool exact_real_truncate (enum machine_mode mode, const REAL_VALUE_TYPE *a) { REAL_VALUE_TYPE t; real_convert (&t, mode, a); return real_identical (&t, a); } /* Write R to the given target format. Place the words of the result in target word order in BUF. There are always 32 bits in each long, no matter the size of the host long. Legacy: return word 0 for implementing REAL_VALUE_TO_TARGET_SINGLE. */ long real_to_target_fmt (long *buf, const REAL_VALUE_TYPE *r_orig, const struct real_format *fmt) { REAL_VALUE_TYPE r; long buf1; r = *r_orig; round_for_format (fmt, &r); if (!buf) buf = &buf1; (*fmt->encode) (fmt, buf, &r); return *buf; } /* Similar, but look up the format from MODE. */ long real_to_target (long *buf, const REAL_VALUE_TYPE *r, enum machine_mode mode) { const struct real_format *fmt; fmt = REAL_MODE_FORMAT (mode); if (fmt == NULL) abort (); return real_to_target_fmt (buf, r, fmt); } /* Read R from the given target format. Read the words of the result in target word order in BUF. There are always 32 bits in each long, no matter the size of the host long. */ void real_from_target_fmt (REAL_VALUE_TYPE *r, const long *buf, const struct real_format *fmt) { (*fmt->decode) (fmt, r, buf); } /* Similar, but look up the format from MODE. */ void real_from_target (REAL_VALUE_TYPE *r, const long *buf, enum machine_mode mode) { const struct real_format *fmt; fmt = REAL_MODE_FORMAT (mode); if (fmt == NULL) abort (); (*fmt->decode) (fmt, r, buf); } /* Return the number of bits in the significand for MODE. */ /* ??? Legacy. Should get access to real_format directly. */ int significand_size (enum machine_mode mode) { const struct real_format *fmt; fmt = REAL_MODE_FORMAT (mode); if (fmt == NULL) return 0; return fmt->p * fmt->log2_b; } /* Return a hash value for the given real value. */ /* ??? The "unsigned int" return value is intended to be hashval_t, but I didn't want to pull hashtab.h into real.h. */ unsigned int real_hash (const REAL_VALUE_TYPE *r) { unsigned int h; size_t i; h = r->class | (r->sign << 2); switch (r->class) { case rvc_zero: case rvc_inf: return h; case rvc_normal: h |= REAL_EXP (r) << 3; break; case rvc_nan: if (r->signalling) h ^= (unsigned int)-1; if (r->canonical) return h; break; default: abort (); } if (sizeof(unsigned long) > sizeof(unsigned int)) for (i = 0; i < SIGSZ; ++i) { unsigned long s = r->sig[i]; h ^= s ^ (s >> (HOST_BITS_PER_LONG / 2)); } else for (i = 0; i < SIGSZ; ++i) h ^= r->sig[i]; return h; } /* IEEE single-precision format. */ static void encode_ieee_single (const struct real_format *fmt, long *, const REAL_VALUE_TYPE *); static void decode_ieee_single (const struct real_format *, REAL_VALUE_TYPE *, const long *); static void encode_ieee_single (const struct real_format *fmt, long *buf, const REAL_VALUE_TYPE *r) { unsigned long image, sig, exp; unsigned long sign = r->sign; bool denormal = (r->sig[SIGSZ-1] & SIG_MSB) == 0; image = sign << 31; sig = (r->sig[SIGSZ-1] >> (HOST_BITS_PER_LONG - 24)) & 0x7fffff; switch (r->class) { case rvc_zero: break; case rvc_inf: if (fmt->has_inf) image |= 255 << 23; else image |= 0x7fffffff; break; case rvc_nan: if (fmt->has_nans) { if (r->canonical) sig = 0; if (r->signalling == fmt->qnan_msb_set) sig &= ~(1 << 22); else sig |= 1 << 22; /* We overload qnan_msb_set here: it's only clear for mips_ieee_single, which wants all mantissa bits but the quiet/signalling one set in canonical NaNs (at least Quiet ones). */ if (r->canonical && !fmt->qnan_msb_set) sig |= (1 << 22) - 1; else if (sig == 0) sig = 1 << 21; image |= 255 << 23; image |= sig; } else image |= 0x7fffffff; break; case rvc_normal: /* Recall that IEEE numbers are interpreted as 1.F x 2**exp, whereas the intermediate representation is 0.F x 2**exp. Which means we're off by one. */ if (denormal) exp = 0; else exp = REAL_EXP (r) + 127 - 1; image |= exp << 23; image |= sig; break; default: abort (); } buf[0] = image; } static void decode_ieee_single (const struct real_format *fmt, REAL_VALUE_TYPE *r, const long *buf) { unsigned long image = buf[0] & 0xffffffff; bool sign = (image >> 31) & 1; int exp = (image >> 23) & 0xff; memset (r, 0, sizeof (*r)); image <<= HOST_BITS_PER_LONG - 24; image &= ~SIG_MSB; if (exp == 0) { if (image && fmt->has_denorm) { r->class = rvc_normal; r->sign = sign; SET_REAL_EXP (r, -126); r->sig[SIGSZ-1] = image << 1; normalize_val (r); } else if (fmt->has_signed_zero) r->sign = sign; } else if (exp == 255 && (fmt->has_nans || fmt->has_inf)) { if (image) { r->class = rvc_nan; r->sign = sign; r->signalling = (((image >> (HOST_BITS_PER_LONG - 2)) & 1) ^ fmt->qnan_msb_set); r->sig[SIGSZ-1] = image; } else { r->class = rvc_inf; r->sign = sign; } } else { r->class = rvc_normal; r->sign = sign; SET_REAL_EXP (r, exp - 127 + 1); r->sig[SIGSZ-1] = image | SIG_MSB; } } const struct real_format ieee_single_format = { encode_ieee_single, decode_ieee_single, 2, 1, 24, 24, -125, 128, 31, true, true, true, true, true }; const struct real_format mips_single_format = { encode_ieee_single, decode_ieee_single, 2, 1, 24, 24, -125, 128, 31, true, true, true, true, false }; /* IEEE double-precision format. */ static void encode_ieee_double (const struct real_format *fmt, long *, const REAL_VALUE_TYPE *); static void decode_ieee_double (const struct real_format *, REAL_VALUE_TYPE *, const long *); static void encode_ieee_double (const struct real_format *fmt, long *buf, const REAL_VALUE_TYPE *r) { unsigned long image_lo, image_hi, sig_lo, sig_hi, exp; bool denormal = (r->sig[SIGSZ-1] & SIG_MSB) == 0; image_hi = r->sign << 31; image_lo = 0; if (HOST_BITS_PER_LONG == 64) { sig_hi = r->sig[SIGSZ-1]; sig_lo = (sig_hi >> (64 - 53)) & 0xffffffff; sig_hi = (sig_hi >> (64 - 53 + 1) >> 31) & 0xfffff; } else { sig_hi = r->sig[SIGSZ-1]; sig_lo = r->sig[SIGSZ-2]; sig_lo = (sig_hi << 21) | (sig_lo >> 11); sig_hi = (sig_hi >> 11) & 0xfffff; } switch (r->class) { case rvc_zero: break; case rvc_inf: if (fmt->has_inf) image_hi |= 2047 << 20; else { image_hi |= 0x7fffffff; image_lo = 0xffffffff; } break; case rvc_nan: if (fmt->has_nans) { if (r->canonical) sig_hi = sig_lo = 0; if (r->signalling == fmt->qnan_msb_set) sig_hi &= ~(1 << 19); else sig_hi |= 1 << 19; /* We overload qnan_msb_set here: it's only clear for mips_ieee_single, which wants all mantissa bits but the quiet/signalling one set in canonical NaNs (at least Quiet ones). */ if (r->canonical && !fmt->qnan_msb_set) { sig_hi |= (1 << 19) - 1; sig_lo = 0xffffffff; } else if (sig_hi == 0 && sig_lo == 0) sig_hi = 1 << 18; image_hi |= 2047 << 20; image_hi |= sig_hi; image_lo = sig_lo; } else { image_hi |= 0x7fffffff; image_lo = 0xffffffff; } break; case rvc_normal: /* Recall that IEEE numbers are interpreted as 1.F x 2**exp, whereas the intermediate representation is 0.F x 2**exp. Which means we're off by one. */ if (denormal) exp = 0; else exp = REAL_EXP (r) + 1023 - 1; image_hi |= exp << 20; image_hi |= sig_hi; image_lo = sig_lo; break; default: abort (); } if (FLOAT_WORDS_BIG_ENDIAN) buf[0] = image_hi, buf[1] = image_lo; else buf[0] = image_lo, buf[1] = image_hi; } static void decode_ieee_double (const struct real_format *fmt, REAL_VALUE_TYPE *r, const long *buf) { unsigned long image_hi, image_lo; bool sign; int exp; if (FLOAT_WORDS_BIG_ENDIAN) image_hi = buf[0], image_lo = buf[1]; else image_lo = buf[0], image_hi = buf[1]; image_lo &= 0xffffffff; image_hi &= 0xffffffff; sign = (image_hi >> 31) & 1; exp = (image_hi >> 20) & 0x7ff; memset (r, 0, sizeof (*r)); image_hi <<= 32 - 21; image_hi |= image_lo >> 21; image_hi &= 0x7fffffff; image_lo <<= 32 - 21; if (exp == 0) { if ((image_hi || image_lo) && fmt->has_denorm) { r->class = rvc_normal; r->sign = sign; SET_REAL_EXP (r, -1022); if (HOST_BITS_PER_LONG == 32) { image_hi = (image_hi << 1) | (image_lo >> 31); image_lo <<= 1; r->sig[SIGSZ-1] = image_hi; r->sig[SIGSZ-2] = image_lo; } else { image_hi = (image_hi << 31 << 2) | (image_lo << 1); r->sig[SIGSZ-1] = image_hi; } normalize_val (r); } else if (fmt->has_signed_zero) r->sign = sign; } else if (exp == 2047 && (fmt->has_nans || fmt->has_inf)) { if (image_hi || image_lo) { r->class = rvc_nan; r->sign = sign; r->signalling = ((image_hi >> 30) & 1) ^ fmt->qnan_msb_set; if (HOST_BITS_PER_LONG == 32) { r->sig[SIGSZ-1] = image_hi; r->sig[SIGSZ-2] = image_lo; } else r->sig[SIGSZ-1] = (image_hi << 31 << 1) | image_lo; } else { r->class = rvc_inf; r->sign = sign; } } else { r->class = rvc_normal; r->sign = sign; SET_REAL_EXP (r, exp - 1023 + 1); if (HOST_BITS_PER_LONG == 32) { r->sig[SIGSZ-1] = image_hi | SIG_MSB; r->sig[SIGSZ-2] = image_lo; } else r->sig[SIGSZ-1] = (image_hi << 31 << 1) | image_lo | SIG_MSB; } } const struct real_format ieee_double_format = { encode_ieee_double, decode_ieee_double, 2, 1, 53, 53, -1021, 1024, 63, true, true, true, true, true }; const struct real_format mips_double_format = { encode_ieee_double, decode_ieee_double, 2, 1, 53, 53, -1021, 1024, 63, true, true, true, true, false }; /* IEEE extended real format. This comes in three flavors: Intel's as a 12 byte image, Intel's as a 16 byte image, and Motorola's. Intel 12- and 16-byte images may be big- or little endian; Motorola's is always big endian. */ /* Helper subroutine which converts from the internal format to the 12-byte little-endian Intel format. Functions below adjust this for the other possible formats. */ static void encode_ieee_extended (const struct real_format *fmt, long *buf, const REAL_VALUE_TYPE *r) { unsigned long image_hi, sig_hi, sig_lo; bool denormal = (r->sig[SIGSZ-1] & SIG_MSB) == 0; image_hi = r->sign << 15; sig_hi = sig_lo = 0; switch (r->class) { case rvc_zero: break; case rvc_inf: if (fmt->has_inf) { image_hi |= 32767; /* Intel requires the explicit integer bit to be set, otherwise it considers the value a "pseudo-infinity". Motorola docs say it doesn't care. */ sig_hi = 0x80000000; } else { image_hi |= 32767; sig_lo = sig_hi = 0xffffffff; } break; case rvc_nan: if (fmt->has_nans) { image_hi |= 32767; if (HOST_BITS_PER_LONG == 32) { sig_hi = r->sig[SIGSZ-1]; sig_lo = r->sig[SIGSZ-2]; } else { sig_lo = r->sig[SIGSZ-1]; sig_hi = sig_lo >> 31 >> 1; sig_lo &= 0xffffffff; } if (r->signalling == fmt->qnan_msb_set) sig_hi &= ~(1 << 30); else sig_hi |= 1 << 30; if ((sig_hi & 0x7fffffff) == 0 && sig_lo == 0) sig_hi = 1 << 29; /* Intel requires the explicit integer bit to be set, otherwise it considers the value a "pseudo-nan". Motorola docs say it doesn't care. */ sig_hi |= 0x80000000; } else { image_hi |= 32767; sig_lo = sig_hi = 0xffffffff; } break; case rvc_normal: { int exp = REAL_EXP (r); /* Recall that IEEE numbers are interpreted as 1.F x 2**exp, whereas the intermediate representation is 0.F x 2**exp. Which means we're off by one. Except for Motorola, which consider exp=0 and explicit integer bit set to continue to be normalized. In theory this discrepancy has been taken care of by the difference in fmt->emin in round_for_format. */ if (denormal) exp = 0; else { exp += 16383 - 1; if (exp < 0) abort (); } image_hi |= exp; if (HOST_BITS_PER_LONG == 32) { sig_hi = r->sig[SIGSZ-1]; sig_lo = r->sig[SIGSZ-2]; } else { sig_lo = r->sig[SIGSZ-1]; sig_hi = sig_lo >> 31 >> 1; sig_lo &= 0xffffffff; } } break; default: abort (); } buf[0] = sig_lo, buf[1] = sig_hi, buf[2] = image_hi; } /* Convert from the internal format to the 12-byte Motorola format for an IEEE extended real. */ static void encode_ieee_extended_motorola (const struct real_format *fmt, long *buf, const REAL_VALUE_TYPE *r) { long intermed[3]; encode_ieee_extended (fmt, intermed, r); /* Motorola chips are assumed always to be big-endian. Also, the padding in a Motorola extended real goes between the exponent and the mantissa. At this point the mantissa is entirely within elements 0 and 1 of intermed, and the exponent entirely within element 2, so all we have to do is swap the order around, and shift element 2 left 16 bits. */ buf[0] = intermed[2] << 16; buf[1] = intermed[1]; buf[2] = intermed[0]; } /* Convert from the internal format to the 12-byte Intel format for an IEEE extended real. */ static void encode_ieee_extended_intel_96 (const struct real_format *fmt, long *buf, const REAL_VALUE_TYPE *r) { if (FLOAT_WORDS_BIG_ENDIAN) { /* All the padding in an Intel-format extended real goes at the high end, which in this case is after the mantissa, not the exponent. Therefore we must shift everything down 16 bits. */ long intermed[3]; encode_ieee_extended (fmt, intermed, r); buf[0] = ((intermed[2] << 16) | ((unsigned long)(intermed[1] & 0xFFFF0000) >> 16)); buf[1] = ((intermed[1] << 16) | ((unsigned long)(intermed[0] & 0xFFFF0000) >> 16)); buf[2] = (intermed[0] << 16); } else /* encode_ieee_extended produces what we want directly. */ encode_ieee_extended (fmt, buf, r); } /* Convert from the internal format to the 16-byte Intel format for an IEEE extended real. */ static void encode_ieee_extended_intel_128 (const struct real_format *fmt, long *buf, const REAL_VALUE_TYPE *r) { /* All the padding in an Intel-format extended real goes at the high end. */ encode_ieee_extended_intel_96 (fmt, buf, r); buf[3] = 0; } /* As above, we have a helper function which converts from 12-byte little-endian Intel format to internal format. Functions below adjust for the other possible formats. */ static void decode_ieee_extended (const struct real_format *fmt, REAL_VALUE_TYPE *r, const long *buf) { unsigned long image_hi, sig_hi, sig_lo; bool sign; int exp; sig_lo = buf[0], sig_hi = buf[1], image_hi = buf[2]; sig_lo &= 0xffffffff; sig_hi &= 0xffffffff; image_hi &= 0xffffffff; sign = (image_hi >> 15) & 1; exp = image_hi & 0x7fff; memset (r, 0, sizeof (*r)); if (exp == 0) { if ((sig_hi || sig_lo) && fmt->has_denorm) { r->class = rvc_normal; r->sign = sign; /* When the IEEE format contains a hidden bit, we know that it's zero at this point, and so shift up the significand and decrease the exponent to match. In this case, Motorola defines the explicit integer bit to be valid, so we don't know whether the msb is set or not. */ SET_REAL_EXP (r, fmt->emin); if (HOST_BITS_PER_LONG == 32) { r->sig[SIGSZ-1] = sig_hi; r->sig[SIGSZ-2] = sig_lo; } else r->sig[SIGSZ-1] = (sig_hi << 31 << 1) | sig_lo; normalize_val (r); } else if (fmt->has_signed_zero) r->sign = sign; } else if (exp == 32767 && (fmt->has_nans || fmt->has_inf)) { /* See above re "pseudo-infinities" and "pseudo-nans". Short summary is that the MSB will likely always be set, and that we don't care about it. */ sig_hi &= 0x7fffffff; if (sig_hi || sig_lo) { r->class = rvc_nan; r->sign = sign; r->signalling = ((sig_hi >> 30) & 1) ^ fmt->qnan_msb_set; if (HOST_BITS_PER_LONG == 32) { r->sig[SIGSZ-1] = sig_hi; r->sig[SIGSZ-2] = sig_lo; } else r->sig[SIGSZ-1] = (sig_hi << 31 << 1) | sig_lo; } else { r->class = rvc_inf; r->sign = sign; } } else { r->class = rvc_normal; r->sign = sign; SET_REAL_EXP (r, exp - 16383 + 1); if (HOST_BITS_PER_LONG == 32) { r->sig[SIGSZ-1] = sig_hi; r->sig[SIGSZ-2] = sig_lo; } else r->sig[SIGSZ-1] = (sig_hi << 31 << 1) | sig_lo; } } /* Convert from the internal format to the 12-byte Motorola format for an IEEE extended real. */ static void decode_ieee_extended_motorola (const struct real_format *fmt, REAL_VALUE_TYPE *r, const long *buf) { long intermed[3]; /* Motorola chips are assumed always to be big-endian. Also, the padding in a Motorola extended real goes between the exponent and the mantissa; remove it. */ intermed[0] = buf[2]; intermed[1] = buf[1]; intermed[2] = (unsigned long)buf[0] >> 16; decode_ieee_extended (fmt, r, intermed); } /* Convert from the internal format to the 12-byte Intel format for an IEEE extended real. */ static void decode_ieee_extended_intel_96 (const struct real_format *fmt, REAL_VALUE_TYPE *r, const long *buf) { if (FLOAT_WORDS_BIG_ENDIAN) { /* All the padding in an Intel-format extended real goes at the high end, which in this case is after the mantissa, not the exponent. Therefore we must shift everything up 16 bits. */ long intermed[3]; intermed[0] = (((unsigned long)buf[2] >> 16) | (buf[1] << 16)); intermed[1] = (((unsigned long)buf[1] >> 16) | (buf[0] << 16)); intermed[2] = ((unsigned long)buf[0] >> 16); decode_ieee_extended (fmt, r, intermed); } else /* decode_ieee_extended produces what we want directly. */ decode_ieee_extended (fmt, r, buf); } /* Convert from the internal format to the 16-byte Intel format for an IEEE extended real. */ static void decode_ieee_extended_intel_128 (const struct real_format *fmt, REAL_VALUE_TYPE *r, const long *buf) { /* All the padding in an Intel-format extended real goes at the high end. */ decode_ieee_extended_intel_96 (fmt, r, buf); } const struct real_format ieee_extended_motorola_format = { encode_ieee_extended_motorola, decode_ieee_extended_motorola, 2, 1, 64, 64, -16382, 16384, 95, true, true, true, true, true }; const struct real_format ieee_extended_intel_96_format = { encode_ieee_extended_intel_96, decode_ieee_extended_intel_96, 2, 1, 64, 64, -16381, 16384, 79, true, true, true, true, true }; const struct real_format ieee_extended_intel_128_format = { encode_ieee_extended_intel_128, decode_ieee_extended_intel_128, 2, 1, 64, 64, -16381, 16384, 79, true, true, true, true, true }; /* The following caters to i386 systems that set the rounding precision to 53 bits instead of 64, e.g. FreeBSD. */ const struct real_format ieee_extended_intel_96_round_53_format = { encode_ieee_extended_intel_96, decode_ieee_extended_intel_96, 2, 1, 53, 53, -16381, 16384, 79, true, true, true, true, true }; /* IBM 128-bit extended precision format: a pair of IEEE double precision numbers whose sum is equal to the extended precision value. The number with greater magnitude is first. This format has the same magnitude range as an IEEE double precision value, but effectively 106 bits of significand precision. Infinity and NaN are represented by their IEEE double precision value stored in the first number, the second number is ignored. Zeroes, Infinities, and NaNs are set in both doubles due to precedent. */ static void encode_ibm_extended (const struct real_format *fmt, long *, const REAL_VALUE_TYPE *); static void decode_ibm_extended (const struct real_format *, REAL_VALUE_TYPE *, const long *); static void encode_ibm_extended (const struct real_format *fmt, long *buf, const REAL_VALUE_TYPE *r) { REAL_VALUE_TYPE u, normr, v; const struct real_format *base_fmt; base_fmt = fmt->qnan_msb_set ? &ieee_double_format : &mips_double_format; /* Renormlize R before doing any arithmetic on it. */ normr = *r; if (normr.class == rvc_normal) normalize_val (&normr); /* u = IEEE double precision portion of significand. */ u = normr; round_for_format (base_fmt, &u); encode_ieee_double (base_fmt, &buf[0], &u); if (u.class == rvc_normal) { do_add (&v, &normr, &u, 1); /* Call round_for_format since we might need to denormalize. */ round_for_format (base_fmt, &v); encode_ieee_double (base_fmt, &buf[2], &v); } else { /* Inf, NaN, 0 are all representable as doubles, so the least-significant part can be 0.0. */ buf[2] = 0; buf[3] = 0; } } static void decode_ibm_extended (const struct real_format *fmt ATTRIBUTE_UNUSED, REAL_VALUE_TYPE *r, const long *buf) { REAL_VALUE_TYPE u, v; const struct real_format *base_fmt; base_fmt = fmt->qnan_msb_set ? &ieee_double_format : &mips_double_format; decode_ieee_double (base_fmt, &u, &buf[0]); if (u.class != rvc_zero && u.class != rvc_inf && u.class != rvc_nan) { decode_ieee_double (base_fmt, &v, &buf[2]); do_add (r, &u, &v, 0); } else *r = u; } const struct real_format ibm_extended_format = { encode_ibm_extended, decode_ibm_extended, 2, 1, 53 + 53, 53, -1021 + 53, 1024, -1, true, true, true, true, true }; const struct real_format mips_extended_format = { encode_ibm_extended, decode_ibm_extended, 2, 1, 53 + 53, 53, -1021 + 53, 1024, -1, true, true, true, true, false }; /* IEEE quad precision format. */ static void encode_ieee_quad (const struct real_format *fmt, long *, const REAL_VALUE_TYPE *); static void decode_ieee_quad (const struct real_format *, REAL_VALUE_TYPE *, const long *); static void encode_ieee_quad (const struct real_format *fmt, long *buf, const REAL_VALUE_TYPE *r) { unsigned long image3, image2, image1, image0, exp; bool denormal = (r->sig[SIGSZ-1] & SIG_MSB) == 0; REAL_VALUE_TYPE u; image3 = r->sign << 31; image2 = 0; image1 = 0; image0 = 0; rshift_significand (&u, r, SIGNIFICAND_BITS - 113); switch (r->class) { case rvc_zero: break; case rvc_inf: if (fmt->has_inf) image3 |= 32767 << 16; else { image3 |= 0x7fffffff; image2 = 0xffffffff; image1 = 0xffffffff; image0 = 0xffffffff; } break; case rvc_nan: if (fmt->has_nans) { image3 |= 32767 << 16; if (r->canonical) { /* Don't use bits from the significand. The initialization above is right. */ } else if (HOST_BITS_PER_LONG == 32) { image0 = u.sig[0]; image1 = u.sig[1]; image2 = u.sig[2]; image3 |= u.sig[3] & 0xffff; } else { image0 = u.sig[0]; image1 = image0 >> 31 >> 1; image2 = u.sig[1]; image3 |= (image2 >> 31 >> 1) & 0xffff; image0 &= 0xffffffff; image2 &= 0xffffffff; } if (r->signalling == fmt->qnan_msb_set) image3 &= ~0x8000; else image3 |= 0x8000; /* We overload qnan_msb_set here: it's only clear for mips_ieee_single, which wants all mantissa bits but the quiet/signalling one set in canonical NaNs (at least Quiet ones). */ if (r->canonical && !fmt->qnan_msb_set) { image3 |= 0x7fff; image2 = image1 = image0 = 0xffffffff; } else if (((image3 & 0xffff) | image2 | image1 | image0) == 0) image3 |= 0x4000; } else { image3 |= 0x7fffffff; image2 = 0xffffffff; image1 = 0xffffffff; image0 = 0xffffffff; } break; case rvc_normal: /* Recall that IEEE numbers are interpreted as 1.F x 2**exp, whereas the intermediate representation is 0.F x 2**exp. Which means we're off by one. */ if (denormal) exp = 0; else exp = REAL_EXP (r) + 16383 - 1; image3 |= exp << 16; if (HOST_BITS_PER_LONG == 32) { image0 = u.sig[0]; image1 = u.sig[1]; image2 = u.sig[2]; image3 |= u.sig[3] & 0xffff; } else { image0 = u.sig[0]; image1 = image0 >> 31 >> 1; image2 = u.sig[1]; image3 |= (image2 >> 31 >> 1) & 0xffff; image0 &= 0xffffffff; image2 &= 0xffffffff; } break; default: abort (); } if (FLOAT_WORDS_BIG_ENDIAN) { buf[0] = image3; buf[1] = image2; buf[2] = image1; buf[3] = image0; } else { buf[0] = image0; buf[1] = image1; buf[2] = image2; buf[3] = image3; } } static void decode_ieee_quad (const struct real_format *fmt, REAL_VALUE_TYPE *r, const long *buf) { unsigned long image3, image2, image1, image0; bool sign; int exp; if (FLOAT_WORDS_BIG_ENDIAN) { image3 = buf[0]; image2 = buf[1]; image1 = buf[2]; image0 = buf[3]; } else { image0 = buf[0]; image1 = buf[1]; image2 = buf[2]; image3 = buf[3]; } image0 &= 0xffffffff; image1 &= 0xffffffff; image2 &= 0xffffffff; sign = (image3 >> 31) & 1; exp = (image3 >> 16) & 0x7fff; image3 &= 0xffff; memset (r, 0, sizeof (*r)); if (exp == 0) { if ((image3 | image2 | image1 | image0) && fmt->has_denorm) { r->class = rvc_normal; r->sign = sign; SET_REAL_EXP (r, -16382 + (SIGNIFICAND_BITS - 112)); if (HOST_BITS_PER_LONG == 32) { r->sig[0] = image0; r->sig[1] = image1; r->sig[2] = image2; r->sig[3] = image3; } else { r->sig[0] = (image1 << 31 << 1) | image0; r->sig[1] = (image3 << 31 << 1) | image2; } normalize_val (r); } else if (fmt->has_signed_zero) r->sign = sign; } else if (exp == 32767 && (fmt->has_nans || fmt->has_inf)) { if (image3 | image2 | image1 | image0) { r->class = rvc_nan; r->sign = sign; r->signalling = ((image3 >> 15) & 1) ^ fmt->qnan_msb_set; if (HOST_BITS_PER_LONG == 32) { r->sig[0] = image0; r->sig[1] = image1; r->sig[2] = image2; r->sig[3] = image3; } else { r->sig[0] = (image1 << 31 << 1) | image0; r->sig[1] = (image3 << 31 << 1) | image2; } lshift_significand (r, r, SIGNIFICAND_BITS - 113); } else { r->class = rvc_inf; r->sign = sign; } } else { r->class = rvc_normal; r->sign = sign; SET_REAL_EXP (r, exp - 16383 + 1); if (HOST_BITS_PER_LONG == 32) { r->sig[0] = image0; r->sig[1] = image1; r->sig[2] = image2; r->sig[3] = image3; } else { r->sig[0] = (image1 << 31 << 1) | image0; r->sig[1] = (image3 << 31 << 1) | image2; } lshift_significand (r, r, SIGNIFICAND_BITS - 113); r->sig[SIGSZ-1] |= SIG_MSB; } } const struct real_format ieee_quad_format = { encode_ieee_quad, decode_ieee_quad, 2, 1, 113, 113, -16381, 16384, 127, true, true, true, true, true }; const struct real_format mips_quad_format = { encode_ieee_quad, decode_ieee_quad, 2, 1, 113, 113, -16381, 16384, 127, true, true, true, true, false }; /* Descriptions of VAX floating point formats can be found beginning at http://h71000.www7.hp.com/doc/73FINAL/4515/4515pro_013.html#f_floating_point_format The thing to remember is that they're almost IEEE, except for word order, exponent bias, and the lack of infinities, nans, and denormals. We don't implement the H_floating format here, simply because neither the VAX or Alpha ports use it. */ static void encode_vax_f (const struct real_format *fmt, long *, const REAL_VALUE_TYPE *); static void decode_vax_f (const struct real_format *, REAL_VALUE_TYPE *, const long *); static void encode_vax_d (const struct real_format *fmt, long *, const REAL_VALUE_TYPE *); static void decode_vax_d (const struct real_format *, REAL_VALUE_TYPE *, const long *); static void encode_vax_g (const struct real_format *fmt, long *, const REAL_VALUE_TYPE *); static void decode_vax_g (const struct real_format *, REAL_VALUE_TYPE *, const long *); static void encode_vax_f (const struct real_format *fmt ATTRIBUTE_UNUSED, long *buf, const REAL_VALUE_TYPE *r) { unsigned long sign, exp, sig, image; sign = r->sign << 15; switch (r->class) { case rvc_zero: image = 0; break; case rvc_inf: case rvc_nan: image = 0xffff7fff | sign; break; case rvc_normal: sig = (r->sig[SIGSZ-1] >> (HOST_BITS_PER_LONG - 24)) & 0x7fffff; exp = REAL_EXP (r) + 128; image = (sig << 16) & 0xffff0000; image |= sign; image |= exp << 7; image |= sig >> 16; break; default: abort (); } buf[0] = image; } static void decode_vax_f (const struct real_format *fmt ATTRIBUTE_UNUSED, REAL_VALUE_TYPE *r, const long *buf) { unsigned long image = buf[0] & 0xffffffff; int exp = (image >> 7) & 0xff; memset (r, 0, sizeof (*r)); if (exp != 0) { r->class = rvc_normal; r->sign = (image >> 15) & 1; SET_REAL_EXP (r, exp - 128); image = ((image & 0x7f) << 16) | ((image >> 16) & 0xffff); r->sig[SIGSZ-1] = (image << (HOST_BITS_PER_LONG - 24)) | SIG_MSB; } } static void encode_vax_d (const struct real_format *fmt ATTRIBUTE_UNUSED, long *buf, const REAL_VALUE_TYPE *r) { unsigned long image0, image1, sign = r->sign << 15; switch (r->class) { case rvc_zero: image0 = image1 = 0; break; case rvc_inf: case rvc_nan: image0 = 0xffff7fff | sign; image1 = 0xffffffff; break; case rvc_normal: /* Extract the significand into straight hi:lo. */ if (HOST_BITS_PER_LONG == 64) { image0 = r->sig[SIGSZ-1]; image1 = (image0 >> (64 - 56)) & 0xffffffff; image0 = (image0 >> (64 - 56 + 1) >> 31) & 0x7fffff; } else { image0 = r->sig[SIGSZ-1]; image1 = r->sig[SIGSZ-2]; image1 = (image0 << 24) | (image1 >> 8); image0 = (image0 >> 8) & 0xffffff; } /* Rearrange the half-words of the significand to match the external format. */ image0 = ((image0 << 16) | (image0 >> 16)) & 0xffff007f; image1 = ((image1 << 16) | (image1 >> 16)) & 0xffffffff; /* Add the sign and exponent. */ image0 |= sign; image0 |= (REAL_EXP (r) + 128) << 7; break; default: abort (); } if (FLOAT_WORDS_BIG_ENDIAN) buf[0] = image1, buf[1] = image0; else buf[0] = image0, buf[1] = image1; } static void decode_vax_d (const struct real_format *fmt ATTRIBUTE_UNUSED, REAL_VALUE_TYPE *r, const long *buf) { unsigned long image0, image1; int exp; if (FLOAT_WORDS_BIG_ENDIAN) image1 = buf[0], image0 = buf[1]; else image0 = buf[0], image1 = buf[1]; image0 &= 0xffffffff; image1 &= 0xffffffff; exp = (image0 >> 7) & 0xff; memset (r, 0, sizeof (*r)); if (exp != 0) { r->class = rvc_normal; r->sign = (image0 >> 15) & 1; SET_REAL_EXP (r, exp - 128); /* Rearrange the half-words of the external format into proper ascending order. */ image0 = ((image0 & 0x7f) << 16) | ((image0 >> 16) & 0xffff); image1 = ((image1 & 0xffff) << 16) | ((image1 >> 16) & 0xffff); if (HOST_BITS_PER_LONG == 64) { image0 = (image0 << 31 << 1) | image1; image0 <<= 64 - 56; image0 |= SIG_MSB; r->sig[SIGSZ-1] = image0; } else { r->sig[SIGSZ-1] = image0; r->sig[SIGSZ-2] = image1; lshift_significand (r, r, 2*HOST_BITS_PER_LONG - 56); r->sig[SIGSZ-1] |= SIG_MSB; } } } static void encode_vax_g (const struct real_format *fmt ATTRIBUTE_UNUSED, long *buf, const REAL_VALUE_TYPE *r) { unsigned long image0, image1, sign = r->sign << 15; switch (r->class) { case rvc_zero: image0 = image1 = 0; break; case rvc_inf: case rvc_nan: image0 = 0xffff7fff | sign; image1 = 0xffffffff; break; case rvc_normal: /* Extract the significand into straight hi:lo. */ if (HOST_BITS_PER_LONG == 64) { image0 = r->sig[SIGSZ-1]; image1 = (image0 >> (64 - 53)) & 0xffffffff; image0 = (image0 >> (64 - 53 + 1) >> 31) & 0xfffff; } else { image0 = r->sig[SIGSZ-1]; image1 = r->sig[SIGSZ-2]; image1 = (image0 << 21) | (image1 >> 11); image0 = (image0 >> 11) & 0xfffff; } /* Rearrange the half-words of the significand to match the external format. */ image0 = ((image0 << 16) | (image0 >> 16)) & 0xffff000f; image1 = ((image1 << 16) | (image1 >> 16)) & 0xffffffff; /* Add the sign and exponent. */ image0 |= sign; image0 |= (REAL_EXP (r) + 1024) << 4; break; default: abort (); } if (FLOAT_WORDS_BIG_ENDIAN) buf[0] = image1, buf[1] = image0; else buf[0] = image0, buf[1] = image1; } static void decode_vax_g (const struct real_format *fmt ATTRIBUTE_UNUSED, REAL_VALUE_TYPE *r, const long *buf) { unsigned long image0, image1; int exp; if (FLOAT_WORDS_BIG_ENDIAN) image1 = buf[0], image0 = buf[1]; else image0 = buf[0], image1 = buf[1]; image0 &= 0xffffffff; image1 &= 0xffffffff; exp = (image0 >> 4) & 0x7ff; memset (r, 0, sizeof (*r)); if (exp != 0) { r->class = rvc_normal; r->sign = (image0 >> 15) & 1; SET_REAL_EXP (r, exp - 1024); /* Rearrange the half-words of the external format into proper ascending order. */ image0 = ((image0 & 0xf) << 16) | ((image0 >> 16) & 0xffff); image1 = ((image1 & 0xffff) << 16) | ((image1 >> 16) & 0xffff); if (HOST_BITS_PER_LONG == 64) { image0 = (image0 << 31 << 1) | image1; image0 <<= 64 - 53; image0 |= SIG_MSB; r->sig[SIGSZ-1] = image0; } else { r->sig[SIGSZ-1] = image0; r->sig[SIGSZ-2] = image1; lshift_significand (r, r, 64 - 53); r->sig[SIGSZ-1] |= SIG_MSB; } } } const struct real_format vax_f_format = { encode_vax_f, decode_vax_f, 2, 1, 24, 24, -127, 127, 15, false, false, false, false, false }; const struct real_format vax_d_format = { encode_vax_d, decode_vax_d, 2, 1, 56, 56, -127, 127, 15, false, false, false, false, false }; const struct real_format vax_g_format = { encode_vax_g, decode_vax_g, 2, 1, 53, 53, -1023, 1023, 15, false, false, false, false, false }; /* A good reference for these can be found in chapter 9 of "ESA/390 Principles of Operation", IBM document number SA22-7201-01. An on-line version can be found here: http://publibz.boulder.ibm.com/cgi-bin/bookmgr_OS390/BOOKS/DZ9AR001/9.1?DT=19930923083613 */ static void encode_i370_single (const struct real_format *fmt, long *, const REAL_VALUE_TYPE *); static void decode_i370_single (const struct real_format *, REAL_VALUE_TYPE *, const long *); static void encode_i370_double (const struct real_format *fmt, long *, const REAL_VALUE_TYPE *); static void decode_i370_double (const struct real_format *, REAL_VALUE_TYPE *, const long *); static void encode_i370_single (const struct real_format *fmt ATTRIBUTE_UNUSED, long *buf, const REAL_VALUE_TYPE *r) { unsigned long sign, exp, sig, image; sign = r->sign << 31; switch (r->class) { case rvc_zero: image = 0; break; case rvc_inf: case rvc_nan: image = 0x7fffffff | sign; break; case rvc_normal: sig = (r->sig[SIGSZ-1] >> (HOST_BITS_PER_LONG - 24)) & 0xffffff; exp = ((REAL_EXP (r) / 4) + 64) << 24; image = sign | exp | sig; break; default: abort (); } buf[0] = image; } static void decode_i370_single (const struct real_format *fmt ATTRIBUTE_UNUSED, REAL_VALUE_TYPE *r, const long *buf) { unsigned long sign, sig, image = buf[0]; int exp; sign = (image >> 31) & 1; exp = (image >> 24) & 0x7f; sig = image & 0xffffff; memset (r, 0, sizeof (*r)); if (exp || sig) { r->class = rvc_normal; r->sign = sign; SET_REAL_EXP (r, (exp - 64) * 4); r->sig[SIGSZ-1] = sig << (HOST_BITS_PER_LONG - 24); normalize_val (r); } } static void encode_i370_double (const struct real_format *fmt ATTRIBUTE_UNUSED, long *buf, const REAL_VALUE_TYPE *r) { unsigned long sign, exp, image_hi, image_lo; sign = r->sign << 31; switch (r->class) { case rvc_zero: image_hi = image_lo = 0; break; case rvc_inf: case rvc_nan: image_hi = 0x7fffffff | sign; image_lo = 0xffffffff; break; case rvc_normal: if (HOST_BITS_PER_LONG == 64) { image_hi = r->sig[SIGSZ-1]; image_lo = (image_hi >> (64 - 56)) & 0xffffffff; image_hi = (image_hi >> (64 - 56 + 1) >> 31) & 0xffffff; } else { image_hi = r->sig[SIGSZ-1]; image_lo = r->sig[SIGSZ-2]; image_lo = (image_lo >> 8) | (image_hi << 24); image_hi >>= 8; } exp = ((REAL_EXP (r) / 4) + 64) << 24; image_hi |= sign | exp; break; default: abort (); } if (FLOAT_WORDS_BIG_ENDIAN) buf[0] = image_hi, buf[1] = image_lo; else buf[0] = image_lo, buf[1] = image_hi; } static void decode_i370_double (const struct real_format *fmt ATTRIBUTE_UNUSED, REAL_VALUE_TYPE *r, const long *buf) { unsigned long sign, image_hi, image_lo; int exp; if (FLOAT_WORDS_BIG_ENDIAN) image_hi = buf[0], image_lo = buf[1]; else image_lo = buf[0], image_hi = buf[1]; sign = (image_hi >> 31) & 1; exp = (image_hi >> 24) & 0x7f; image_hi &= 0xffffff; image_lo &= 0xffffffff; memset (r, 0, sizeof (*r)); if (exp || image_hi || image_lo) { r->class = rvc_normal; r->sign = sign; SET_REAL_EXP (r, (exp - 64) * 4 + (SIGNIFICAND_BITS - 56)); if (HOST_BITS_PER_LONG == 32) { r->sig[0] = image_lo; r->sig[1] = image_hi; } else r->sig[0] = image_lo | (image_hi << 31 << 1); normalize_val (r); } } const struct real_format i370_single_format = { encode_i370_single, decode_i370_single, 16, 4, 6, 6, -64, 63, 31, false, false, false, /* ??? The encoding does allow for "unnormals". */ false, /* ??? The encoding does allow for "unnormals". */ false }; const struct real_format i370_double_format = { encode_i370_double, decode_i370_double, 16, 4, 14, 14, -64, 63, 63, false, false, false, /* ??? The encoding does allow for "unnormals". */ false, /* ??? The encoding does allow for "unnormals". */ false }; /* The "twos-complement" c4x format is officially defined as x = s(~s).f * 2**e This is rather misleading. One must remember that F is signed. A better description would be x = -1**s * ((s + 1 + .f) * 2**e So if we have a (4 bit) fraction of .1000 with a sign bit of 1, that's -1 * (1+1+(-.5)) == -1.5. I think. The constructions here are taken from Tables 5-1 and 5-2 of the TMS320C4x User's Guide wherein step-by-step instructions for conversion from IEEE are presented. That's close enough to our internal representation so as to make things easy. See http://www-s.ti.com/sc/psheets/spru063c/spru063c.pdf */ static void encode_c4x_single (const struct real_format *fmt, long *, const REAL_VALUE_TYPE *); static void decode_c4x_single (const struct real_format *, REAL_VALUE_TYPE *, const long *); static void encode_c4x_extended (const struct real_format *fmt, long *, const REAL_VALUE_TYPE *); static void decode_c4x_extended (const struct real_format *, REAL_VALUE_TYPE *, const long *); static void encode_c4x_single (const struct real_format *fmt ATTRIBUTE_UNUSED, long *buf, const REAL_VALUE_TYPE *r) { unsigned long image, exp, sig; switch (r->class) { case rvc_zero: exp = -128; sig = 0; break; case rvc_inf: case rvc_nan: exp = 127; sig = 0x800000 - r->sign; break; case rvc_normal: exp = REAL_EXP (r) - 1; sig = (r->sig[SIGSZ-1] >> (HOST_BITS_PER_LONG - 24)) & 0x7fffff; if (r->sign) { if (sig) sig = -sig; else exp--; sig |= 0x800000; } break; default: abort (); } image = ((exp & 0xff) << 24) | (sig & 0xffffff); buf[0] = image; } static void decode_c4x_single (const struct real_format *fmt ATTRIBUTE_UNUSED, REAL_VALUE_TYPE *r, const long *buf) { unsigned long image = buf[0]; unsigned long sig; int exp, sf; exp = (((image >> 24) & 0xff) ^ 0x80) - 0x80; sf = ((image & 0xffffff) ^ 0x800000) - 0x800000; memset (r, 0, sizeof (*r)); if (exp != -128) { r->class = rvc_normal; sig = sf & 0x7fffff; if (sf < 0) { r->sign = 1; if (sig) sig = -sig; else exp++; } sig = (sig << (HOST_BITS_PER_LONG - 24)) | SIG_MSB; SET_REAL_EXP (r, exp + 1); r->sig[SIGSZ-1] = sig; } } static void encode_c4x_extended (const struct real_format *fmt ATTRIBUTE_UNUSED, long *buf, const REAL_VALUE_TYPE *r) { unsigned long exp, sig; switch (r->class) { case rvc_zero: exp = -128; sig = 0; break; case rvc_inf: case rvc_nan: exp = 127; sig = 0x80000000 - r->sign; break; case rvc_normal: exp = REAL_EXP (r) - 1; sig = r->sig[SIGSZ-1]; if (HOST_BITS_PER_LONG == 64) sig = sig >> 1 >> 31; sig &= 0x7fffffff; if (r->sign) { if (sig) sig = -sig; else exp--; sig |= 0x80000000; } break; default: abort (); } exp = (exp & 0xff) << 24; sig &= 0xffffffff; if (FLOAT_WORDS_BIG_ENDIAN) buf[0] = exp, buf[1] = sig; else buf[0] = sig, buf[0] = exp; } static void decode_c4x_extended (const struct real_format *fmt ATTRIBUTE_UNUSED, REAL_VALUE_TYPE *r, const long *buf) { unsigned long sig; int exp, sf; if (FLOAT_WORDS_BIG_ENDIAN) exp = buf[0], sf = buf[1]; else sf = buf[0], exp = buf[1]; exp = (((exp >> 24) & 0xff) & 0x80) - 0x80; sf = ((sf & 0xffffffff) ^ 0x80000000) - 0x80000000; memset (r, 0, sizeof (*r)); if (exp != -128) { r->class = rvc_normal; sig = sf & 0x7fffffff; if (sf < 0) { r->sign = 1; if (sig) sig = -sig; else exp++; } if (HOST_BITS_PER_LONG == 64) sig = sig << 1 << 31; sig |= SIG_MSB; SET_REAL_EXP (r, exp + 1); r->sig[SIGSZ-1] = sig; } } const struct real_format c4x_single_format = { encode_c4x_single, decode_c4x_single, 2, 1, 24, 24, -126, 128, -1, false, false, false, false, false }; const struct real_format c4x_extended_format = { encode_c4x_extended, decode_c4x_extended, 2, 1, 32, 32, -126, 128, -1, false, false, false, false, false }; /* A synthetic "format" for internal arithmetic. It's the size of the internal significand minus the two bits needed for proper rounding. The encode and decode routines exist only to satisfy our paranoia harness. */ static void encode_internal (const struct real_format *fmt, long *, const REAL_VALUE_TYPE *); static void decode_internal (const struct real_format *, REAL_VALUE_TYPE *, const long *); static void encode_internal (const struct real_format *fmt ATTRIBUTE_UNUSED, long *buf, const REAL_VALUE_TYPE *r) { memcpy (buf, r, sizeof (*r)); } static void decode_internal (const struct real_format *fmt ATTRIBUTE_UNUSED, REAL_VALUE_TYPE *r, const long *buf) { memcpy (r, buf, sizeof (*r)); } const struct real_format real_internal_format = { encode_internal, decode_internal, 2, 1, SIGNIFICAND_BITS - 2, SIGNIFICAND_BITS - 2, -MAX_EXP, MAX_EXP, -1, true, true, false, true, true }; /* Calculate the square root of X in mode MODE, and store the result in R. Return TRUE if the operation does not raise an exception. For details see "High Precision Division and Square Root", Alan H. Karp and Peter Markstein, HP Lab Report 93-93-42, June 1993. http://www.hpl.hp.com/techreports/93/HPL-93-42.pdf. */ bool real_sqrt (REAL_VALUE_TYPE *r, enum machine_mode mode, const REAL_VALUE_TYPE *x) { static REAL_VALUE_TYPE halfthree; static bool init = false; REAL_VALUE_TYPE h, t, i; int iter, exp; /* sqrt(-0.0) is -0.0. */ if (real_isnegzero (x)) { *r = *x; return false; } /* Negative arguments return NaN. */ if (real_isneg (x)) { get_canonical_qnan (r, 0); return false; } /* Infinity and NaN return themselves. */ if (real_isinf (x) || real_isnan (x)) { *r = *x; return false; } if (!init) { do_add (&halfthree, &dconst1, &dconsthalf, 0); init = true; } /* Initial guess for reciprocal sqrt, i. */ exp = real_exponent (x); real_ldexp (&i, &dconst1, -exp/2); /* Newton's iteration for reciprocal sqrt, i. */ for (iter = 0; iter < 16; iter++) { /* i(n+1) = i(n) * (1.5 - 0.5*i(n)*i(n)*x). */ do_multiply (&t, x, &i); do_multiply (&h, &t, &i); do_multiply (&t, &h, &dconsthalf); do_add (&h, &halfthree, &t, 1); do_multiply (&t, &i, &h); /* Check for early convergence. */ if (iter >= 6 && real_identical (&i, &t)) break; /* ??? Unroll loop to avoid copying. */ i = t; } /* Final iteration: r = i*x + 0.5*i*x*(1.0 - i*(i*x)). */ do_multiply (&t, x, &i); do_multiply (&h, &t, &i); do_add (&i, &dconst1, &h, 1); do_multiply (&h, &t, &i); do_multiply (&i, &dconsthalf, &h); do_add (&h, &t, &i, 0); /* ??? We need a Tuckerman test to get the last bit. */ real_convert (r, mode, &h); return true; } /* Calculate X raised to the integer exponent N in mode MODE and store the result in R. Return true if the result may be inexact due to loss of precision. The algorithm is the classic "left-to-right binary method" described in section 4.6.3 of Donald Knuth's "Seminumerical Algorithms", "The Art of Computer Programming", Volume 2. */ bool real_powi (REAL_VALUE_TYPE *r, enum machine_mode mode, const REAL_VALUE_TYPE *x, HOST_WIDE_INT n) { unsigned HOST_WIDE_INT bit; REAL_VALUE_TYPE t; bool inexact = false; bool init = false; bool neg; int i; if (n == 0) { *r = dconst1; return false; } else if (n < 0) { /* Don't worry about overflow, from now on n is unsigned. */ neg = true; n = -n; } else neg = false; t = *x; bit = (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1); for (i = 0; i < HOST_BITS_PER_WIDE_INT; i++) { if (init) { inexact |= do_multiply (&t, &t, &t); if (n & bit) inexact |= do_multiply (&t, &t, x); } else if (n & bit) init = true; bit >>= 1; } if (neg) inexact |= do_divide (&t, &dconst1, &t); real_convert (r, mode, &t); return inexact; } /* Round X to the nearest integer not larger in absolute value, i.e. towards zero, placing the result in R in mode MODE. */ void real_trunc (REAL_VALUE_TYPE *r, enum machine_mode mode, const REAL_VALUE_TYPE *x) { do_fix_trunc (r, x); if (mode != VOIDmode) real_convert (r, mode, r); } /* Round X to the largest integer not greater in value, i.e. round down, placing the result in R in mode MODE. */ void real_floor (REAL_VALUE_TYPE *r, enum machine_mode mode, const REAL_VALUE_TYPE *x) { REAL_VALUE_TYPE t; do_fix_trunc (&t, x); if (! real_identical (&t, x) && x->sign) do_add (&t, &t, &dconstm1, 0); if (mode != VOIDmode) real_convert (r, mode, &t); } /* Round X to the smallest integer not less then argument, i.e. round up, placing the result in R in mode MODE. */ void real_ceil (REAL_VALUE_TYPE *r, enum machine_mode mode, const REAL_VALUE_TYPE *x) { REAL_VALUE_TYPE t; do_fix_trunc (&t, x); if (! real_identical (&t, x) && ! x->sign) do_add (&t, &t, &dconst1, 0); if (mode != VOIDmode) real_convert (r, mode, &t); } /* Round X to the nearest integer, but round halfway cases away from zero. */ void real_round (REAL_VALUE_TYPE *r, enum machine_mode mode, const REAL_VALUE_TYPE *x) { do_add (r, x, &dconsthalf, x->sign); do_fix_trunc (r, r); if (mode != VOIDmode) real_convert (r, mode, r); } /* Set the sign of R to the sign of X. */ void real_copysign (REAL_VALUE_TYPE *r, const REAL_VALUE_TYPE *x) { r->sign = x->sign; } /* Subroutines used by or related to instruction recognition. Copyright (C) 1987, 1988, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef STACK_PUSH_CODE #ifdef STACK_GROWS_DOWNWARD #define STACK_PUSH_CODE PRE_DEC #else #define STACK_PUSH_CODE PRE_INC #endif #endif #ifndef STACK_POP_CODE #ifdef STACK_GROWS_DOWNWARD #define STACK_POP_CODE POST_INC #else #define STACK_POP_CODE POST_DEC #endif #endif static void validate_replace_rtx_1 (rtx *, rtx, rtx, rtx); static rtx *find_single_use_1 (rtx, rtx *); static void validate_replace_src_1 (rtx *, void *); static rtx split_insn (rtx); /* Nonzero means allow operands to be volatile. This should be 0 if you are generating rtl, such as if you are calling the functions in optabs.c and expmed.c (most of the time). This should be 1 if all valid insns need to be recognized, such as in regclass.c and final.c and reload.c. init_recog and init_recog_no_volatile are responsible for setting this. */ int volatile_ok; struct recog_data recog_data; /* Contains a vector of operand_alternative structures for every operand. Set up by preprocess_constraints. */ struct operand_alternative recog_op_alt[MAX_RECOG_OPERANDS][MAX_RECOG_ALTERNATIVES]; /* On return from `constrain_operands', indicate which alternative was satisfied. */ int which_alternative; /* Nonzero after end of reload pass. Set to 1 or 0 by toplev.c. Controls the significance of (SUBREG (MEM)). */ int reload_completed; /* Nonzero after thread_prologue_and_epilogue_insns has run. */ int epilogue_completed; /* Initialize data used by the function `recog'. This must be called once in the compilation of a function before any insn recognition may be done in the function. */ void init_recog_no_volatile (void) { volatile_ok = 0; } void init_recog (void) { volatile_ok = 1; } /* Try recognizing the instruction INSN, and return the code number that results. Remember the code so that repeated calls do not need to spend the time for actual rerecognition. This function is the normal interface to instruction recognition. The automatically-generated function `recog' is normally called through this one. (The only exception is in combine.c.) */ int recog_memoized_1 (rtx insn) { if (INSN_CODE (insn) < 0) INSN_CODE (insn) = recog (PATTERN (insn), insn, 0); return INSN_CODE (insn); } /* Check that X is an insn-body for an `asm' with operands and that the operands mentioned in it are legitimate. */ int check_asm_operands (rtx x) { int noperands; rtx *operands; const char **constraints; int i; /* Post-reload, be more strict with things. */ if (reload_completed) { /* ??? Doh! We've not got the wrapping insn. Cook one up. */ extract_insn (make_insn_raw (x)); constrain_operands (1); return which_alternative >= 0; } noperands = asm_noperands (x); if (noperands < 0) return 0; if (noperands == 0) return 1; operands = alloca (noperands * sizeof (rtx)); constraints = alloca (noperands * sizeof (char *)); decode_asm_operands (x, operands, NULL, constraints, NULL); for (i = 0; i < noperands; i++) { const char *c = constraints[i]; if (c[0] == '%') c++; if (ISDIGIT ((unsigned char) c[0]) && c[1] == '\0') c = constraints[c[0] - '0']; if (! asm_operand_ok (operands[i], c)) return 0; } return 1; } /* Static data for the next two routines. */ typedef struct change_t { rtx object; int old_code; rtx *loc; rtx old; } change_t; static change_t *changes; static int changes_allocated; static int num_changes = 0; /* Validate a proposed change to OBJECT. LOC is the location in the rtl at which NEW will be placed. If OBJECT is zero, no validation is done, the change is simply made. Two types of objects are supported: If OBJECT is a MEM, memory_address_p will be called with the address and mode as parameters. If OBJECT is an INSN, CALL_INSN, or JUMP_INSN, the insn will be re-recognized with the change in place. IN_GROUP is nonzero if this is part of a group of changes that must be performed as a group. In that case, the changes will be stored. The function `apply_change_group' will validate and apply the changes. If IN_GROUP is zero, this is a single change. Try to recognize the insn or validate the memory reference with the change applied. If the result is not valid for the machine, suppress the change and return zero. Otherwise, perform the change and return 1. */ int validate_change (rtx object, rtx *loc, rtx new, int in_group) { rtx old = *loc; if (old == new || rtx_equal_p (old, new)) return 1; if (in_group == 0 && num_changes != 0) abort (); *loc = new; /* Save the information describing this change. */ if (num_changes >= changes_allocated) { if (changes_allocated == 0) /* This value allows for repeated substitutions inside complex indexed addresses, or changes in up to 5 insns. */ changes_allocated = MAX_RECOG_OPERANDS * 5; else changes_allocated *= 2; changes = xrealloc (changes, sizeof (change_t) * changes_allocated); } changes[num_changes].object = object; changes[num_changes].loc = loc; changes[num_changes].old = old; if (object && !MEM_P (object)) { /* Set INSN_CODE to force rerecognition of insn. Save old code in case invalid. */ changes[num_changes].old_code = INSN_CODE (object); INSN_CODE (object) = -1; } num_changes++; /* If we are making a group of changes, return 1. Otherwise, validate the change group we made. */ if (in_group) return 1; else return apply_change_group (); } /* This subroutine of apply_change_group verifies whether the changes to INSN were valid; i.e. whether INSN can still be recognized. */ int insn_invalid_p (rtx insn) { rtx pat = PATTERN (insn); int num_clobbers = 0; /* If we are before reload and the pattern is a SET, see if we can add clobbers. */ int icode = recog (pat, insn, (GET_CODE (pat) == SET && ! reload_completed && ! reload_in_progress) ? &num_clobbers : 0); int is_asm = icode < 0 && asm_noperands (PATTERN (insn)) >= 0; /* If this is an asm and the operand aren't legal, then fail. Likewise if this is not an asm and the insn wasn't recognized. */ if ((is_asm && ! check_asm_operands (PATTERN (insn))) || (!is_asm && icode < 0)) return 1; /* If we have to add CLOBBERs, fail if we have to add ones that reference hard registers since our callers can't know if they are live or not. Otherwise, add them. */ if (num_clobbers > 0) { rtx newpat; if (added_clobbers_hard_reg_p (icode)) return 1; newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_clobbers + 1)); XVECEXP (newpat, 0, 0) = pat; add_clobbers (newpat, icode); PATTERN (insn) = pat = newpat; } /* After reload, verify that all constraints are satisfied. */ if (reload_completed) { extract_insn (insn); if (! constrain_operands (1)) return 1; } INSN_CODE (insn) = icode; return 0; } /* Return number of changes made and not validated yet. */ int num_changes_pending (void) { return num_changes; } /* Apply a group of changes previously issued with `validate_change'. Return 1 if all changes are valid, zero otherwise. */ int apply_change_group (void) { int i; rtx last_validated = NULL_RTX; /* The changes have been applied and all INSN_CODEs have been reset to force rerecognition. The changes are valid if we aren't given an object, or if we are given a MEM and it still is a valid address, or if this is in insn and it is recognized. In the latter case, if reload has completed, we also require that the operands meet the constraints for the insn. */ for (i = 0; i < num_changes; i++) { rtx object = changes[i].object; /* If there is no object to test or if it is the same as the one we already tested, ignore it. */ if (object == 0 || object == last_validated) continue; if (MEM_P (object)) { if (! memory_address_p (GET_MODE (object), XEXP (object, 0))) break; } else if (insn_invalid_p (object)) { rtx pat = PATTERN (object); /* Perhaps we couldn't recognize the insn because there were extra CLOBBERs at the end. If so, try to re-recognize without the last CLOBBER (later iterations will cause each of them to be eliminated, in turn). But don't do this if we have an ASM_OPERAND. */ if (GET_CODE (pat) == PARALLEL && GET_CODE (XVECEXP (pat, 0, XVECLEN (pat, 0) - 1)) == CLOBBER && asm_noperands (PATTERN (object)) < 0) { rtx newpat; if (XVECLEN (pat, 0) == 2) newpat = XVECEXP (pat, 0, 0); else { int j; newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (XVECLEN (pat, 0) - 1)); for (j = 0; j < XVECLEN (newpat, 0); j++) XVECEXP (newpat, 0, j) = XVECEXP (pat, 0, j); } /* Add a new change to this group to replace the pattern with this new pattern. Then consider this change as having succeeded. The change we added will cause the entire call to fail if things remain invalid. Note that this can lose if a later change than the one we are processing specified &XVECEXP (PATTERN (object), 0, X) but this shouldn't occur. */ validate_change (object, &PATTERN (object), newpat, 1); continue; } else if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER) /* If this insn is a CLOBBER or USE, it is always valid, but is never recognized. */ continue; else break; } last_validated = object; } if (i == num_changes) { basic_block bb; for (i = 0; i < num_changes; i++) if (changes[i].object && INSN_P (changes[i].object) && (bb = BLOCK_FOR_INSN (changes[i].object))) bb->flags |= BB_DIRTY; num_changes = 0; return 1; } else { cancel_changes (0); return 0; } } /* Return the number of changes so far in the current group. */ int num_validated_changes (void) { return num_changes; } /* Retract the changes numbered NUM and up. */ void cancel_changes (int num) { int i; /* Back out all the changes. Do this in the opposite order in which they were made. */ for (i = num_changes - 1; i >= num; i--) { *changes[i].loc = changes[i].old; if (changes[i].object && !MEM_P (changes[i].object)) INSN_CODE (changes[i].object) = changes[i].old_code; } num_changes = num; } /* Replace every occurrence of FROM in X with TO. Mark each change with validate_change passing OBJECT. */ static void validate_replace_rtx_1 (rtx *loc, rtx from, rtx to, rtx object) { int i, j; const char *fmt; rtx x = *loc; enum rtx_code code; enum machine_mode op0_mode = VOIDmode; int prev_changes = num_changes; rtx new; if (!x) return; code = GET_CODE (x); fmt = GET_RTX_FORMAT (code); if (fmt[0] == 'e') op0_mode = GET_MODE (XEXP (x, 0)); /* X matches FROM if it is the same rtx or they are both referring to the same register in the same mode. Avoid calling rtx_equal_p unless the operands look similar. */ if (x == from || (REG_P (x) && REG_P (from) && GET_MODE (x) == GET_MODE (from) && REGNO (x) == REGNO (from)) || (GET_CODE (x) == GET_CODE (from) && GET_MODE (x) == GET_MODE (from) && rtx_equal_p (x, from))) { validate_change (object, loc, to, 1); return; } /* Call ourself recursively to perform the replacements. We must not replace inside already replaced expression, otherwise we get infinite recursion for replacements like (reg X)->(subreg (reg X)) done by regmove, so we must special case shared ASM_OPERANDS. */ if (GET_CODE (x) == PARALLEL) { for (j = XVECLEN (x, 0) - 1; j >= 0; j--) { if (j && GET_CODE (XVECEXP (x, 0, j)) == SET && GET_CODE (SET_SRC (XVECEXP (x, 0, j))) == ASM_OPERANDS) { /* Verify that operands are really shared. */ if (ASM_OPERANDS_INPUT_VEC (SET_SRC (XVECEXP (x, 0, 0))) != ASM_OPERANDS_INPUT_VEC (SET_SRC (XVECEXP (x, 0, j)))) abort (); validate_replace_rtx_1 (&SET_DEST (XVECEXP (x, 0, j)), from, to, object); } else validate_replace_rtx_1 (&XVECEXP (x, 0, j), from, to, object); } } else for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') validate_replace_rtx_1 (&XEXP (x, i), from, to, object); else if (fmt[i] == 'E') for (j = XVECLEN (x, i) - 1; j >= 0; j--) validate_replace_rtx_1 (&XVECEXP (x, i, j), from, to, object); } /* If we didn't substitute, there is nothing more to do. */ if (num_changes == prev_changes) return; /* Allow substituted expression to have different mode. This is used by regmove to change mode of pseudo register. */ if (fmt[0] == 'e' && GET_MODE (XEXP (x, 0)) != VOIDmode) op0_mode = GET_MODE (XEXP (x, 0)); /* Do changes needed to keep rtx consistent. Don't do any other simplifications, as it is not our job. */ if (SWAPPABLE_OPERANDS_P (x) && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1))) { validate_change (object, loc, gen_rtx_fmt_ee (COMMUTATIVE_ARITH_P (x) ? code : swap_condition (code), GET_MODE (x), XEXP (x, 1), XEXP (x, 0)), 1); x = *loc; code = GET_CODE (x); } switch (code) { case PLUS: /* If we have a PLUS whose second operand is now a CONST_INT, use simplify_gen_binary to try to simplify it. ??? We may want later to remove this, once simplification is separated from this function. */ if (GET_CODE (XEXP (x, 1)) == CONST_INT && XEXP (x, 1) == to) validate_change (object, loc, simplify_gen_binary (PLUS, GET_MODE (x), XEXP (x, 0), XEXP (x, 1)), 1); break; case MINUS: if (GET_CODE (XEXP (x, 1)) == CONST_INT || GET_CODE (XEXP (x, 1)) == CONST_DOUBLE) validate_change (object, loc, simplify_gen_binary (PLUS, GET_MODE (x), XEXP (x, 0), simplify_gen_unary (NEG, GET_MODE (x), XEXP (x, 1), GET_MODE (x))), 1); break; case ZERO_EXTEND: case SIGN_EXTEND: if (GET_MODE (XEXP (x, 0)) == VOIDmode) { new = simplify_gen_unary (code, GET_MODE (x), XEXP (x, 0), op0_mode); /* If any of the above failed, substitute in something that we know won't be recognized. */ if (!new) new = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx); validate_change (object, loc, new, 1); } break; case SUBREG: /* All subregs possible to simplify should be simplified. */ new = simplify_subreg (GET_MODE (x), SUBREG_REG (x), op0_mode, SUBREG_BYTE (x)); /* Subregs of VOIDmode operands are incorrect. */ if (!new && GET_MODE (SUBREG_REG (x)) == VOIDmode) new = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx); if (new) validate_change (object, loc, new, 1); break; case ZERO_EXTRACT: case SIGN_EXTRACT: /* If we are replacing a register with memory, try to change the memory to be the mode required for memory in extract operations (this isn't likely to be an insertion operation; if it was, nothing bad will happen, we might just fail in some cases). */ if (MEM_P (XEXP (x, 0)) && GET_CODE (XEXP (x, 1)) == CONST_INT && GET_CODE (XEXP (x, 2)) == CONST_INT && !mode_dependent_address_p (XEXP (XEXP (x, 0), 0)) && !MEM_VOLATILE_P (XEXP (x, 0))) { enum machine_mode wanted_mode = VOIDmode; enum machine_mode is_mode = GET_MODE (XEXP (x, 0)); int pos = INTVAL (XEXP (x, 2)); if (GET_CODE (x) == ZERO_EXTRACT) { enum machine_mode new_mode = mode_for_extraction (EP_extzv, 1); if (new_mode != MAX_MACHINE_MODE) wanted_mode = new_mode; } else if (GET_CODE (x) == SIGN_EXTRACT) { enum machine_mode new_mode = mode_for_extraction (EP_extv, 1); if (new_mode != MAX_MACHINE_MODE) wanted_mode = new_mode; } /* If we have a narrower mode, we can do something. */ if (wanted_mode != VOIDmode && GET_MODE_SIZE (wanted_mode) < GET_MODE_SIZE (is_mode)) { int offset = pos / BITS_PER_UNIT; rtx newmem; /* If the bytes and bits are counted differently, we must adjust the offset. */ if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN) offset = (GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (wanted_mode) - offset); pos %= GET_MODE_BITSIZE (wanted_mode); newmem = adjust_address_nv (XEXP (x, 0), wanted_mode, offset); validate_change (object, &XEXP (x, 2), GEN_INT (pos), 1); validate_change (object, &XEXP (x, 0), newmem, 1); } } break; default: break; } } /* Try replacing every occurrence of FROM in subexpression LOC of INSN with TO. After all changes have been made, validate by seeing if INSN is still valid. */ int validate_replace_rtx_subexp (rtx from, rtx to, rtx insn, rtx *loc) { validate_replace_rtx_1 (loc, from, to, insn); return apply_change_group (); } /* Try replacing every occurrence of FROM in INSN with TO. After all changes have been made, validate by seeing if INSN is still valid. */ int validate_replace_rtx (rtx from, rtx to, rtx insn) { validate_replace_rtx_1 (&PATTERN (insn), from, to, insn); return apply_change_group (); } /* Try replacing every occurrence of FROM in INSN with TO. */ void validate_replace_rtx_group (rtx from, rtx to, rtx insn) { validate_replace_rtx_1 (&PATTERN (insn), from, to, insn); } /* Function called by note_uses to replace used subexpressions. */ struct validate_replace_src_data { rtx from; /* Old RTX */ rtx to; /* New RTX */ rtx insn; /* Insn in which substitution is occurring. */ }; static void validate_replace_src_1 (rtx *x, void *data) { struct validate_replace_src_data *d = (struct validate_replace_src_data *) data; validate_replace_rtx_1 (x, d->from, d->to, d->insn); } /* Try replacing every occurrence of FROM in INSN with TO, avoiding SET_DESTs. */ void validate_replace_src_group (rtx from, rtx to, rtx insn) { struct validate_replace_src_data d; d.from = from; d.to = to; d.insn = insn; note_uses (&PATTERN (insn), validate_replace_src_1, &d); } #ifdef HAVE_cc0 /* Return 1 if the insn using CC0 set by INSN does not contain any ordered tests applied to the condition codes. EQ and NE tests do not count. */ int next_insn_tests_no_inequality (rtx insn) { rtx next = next_cc0_user (insn); /* If there is no next insn, we have to take the conservative choice. */ if (next == 0) return 0; return ((GET_CODE (next) == JUMP_INSN || GET_CODE (next) == INSN || GET_CODE (next) == CALL_INSN) && ! inequality_comparisons_p (PATTERN (next))); } #endif /* This is used by find_single_use to locate an rtx that contains exactly one use of DEST, which is typically either a REG or CC0. It returns a pointer to the innermost rtx expression containing DEST. Appearances of DEST that are being used to totally replace it are not counted. */ static rtx * find_single_use_1 (rtx dest, rtx *loc) { rtx x = *loc; enum rtx_code code = GET_CODE (x); rtx *result = 0; rtx *this_result; int i; const char *fmt; switch (code) { case CONST_INT: case CONST: case LABEL_REF: case SYMBOL_REF: case CONST_DOUBLE: case CONST_VECTOR: case CLOBBER: return 0; case SET: /* If the destination is anything other than CC0, PC, a REG or a SUBREG of a REG that occupies all of the REG, the insn uses DEST if it is mentioned in the destination or the source. Otherwise, we need just check the source. */ if (GET_CODE (SET_DEST (x)) != CC0 && GET_CODE (SET_DEST (x)) != PC && !REG_P (SET_DEST (x)) && ! (GET_CODE (SET_DEST (x)) == SUBREG && REG_P (SUBREG_REG (SET_DEST (x))) && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x)))) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD) == ((GET_MODE_SIZE (GET_MODE (SET_DEST (x))) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))) break; return find_single_use_1 (dest, &SET_SRC (x)); case MEM: case SUBREG: return find_single_use_1 (dest, &XEXP (x, 0)); default: break; } /* If it wasn't one of the common cases above, check each expression and vector of this code. Look for a unique usage of DEST. */ fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') { if (dest == XEXP (x, i) || (REG_P (dest) && REG_P (XEXP (x, i)) && REGNO (dest) == REGNO (XEXP (x, i)))) this_result = loc; else this_result = find_single_use_1 (dest, &XEXP (x, i)); if (result == 0) result = this_result; else if (this_result) /* Duplicate usage. */ return 0; } else if (fmt[i] == 'E') { int j; for (j = XVECLEN (x, i) - 1; j >= 0; j--) { if (XVECEXP (x, i, j) == dest || (REG_P (dest) && REG_P (XVECEXP (x, i, j)) && REGNO (XVECEXP (x, i, j)) == REGNO (dest))) this_result = loc; else this_result = find_single_use_1 (dest, &XVECEXP (x, i, j)); if (result == 0) result = this_result; else if (this_result) return 0; } } } return result; } /* See if DEST, produced in INSN, is used only a single time in the sequel. If so, return a pointer to the innermost rtx expression in which it is used. If PLOC is nonzero, *PLOC is set to the insn containing the single use. This routine will return usually zero either before flow is called (because there will be no LOG_LINKS notes) or after reload (because the REG_DEAD note can't be trusted). If DEST is cc0_rtx, we look only at the next insn. In that case, we don't care about REG_DEAD notes or LOG_LINKS. Otherwise, we find the single use by finding an insn that has a LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is only referenced once in that insn, we know that it must be the first and last insn referencing DEST. */ rtx * find_single_use (rtx dest, rtx insn, rtx *ploc) { rtx next; rtx *result; rtx link; #ifdef HAVE_cc0 if (dest == cc0_rtx) { next = NEXT_INSN (insn); if (next == 0 || (GET_CODE (next) != INSN && GET_CODE (next) != JUMP_INSN)) return 0; result = find_single_use_1 (dest, &PATTERN (next)); if (result && ploc) *ploc = next; return result; } #endif if (reload_completed || reload_in_progress || !REG_P (dest)) return 0; for (next = next_nonnote_insn (insn); next != 0 && GET_CODE (next) != CODE_LABEL; next = next_nonnote_insn (next)) if (INSN_P (next) && dead_or_set_p (next, dest)) { for (link = LOG_LINKS (next); link; link = XEXP (link, 1)) if (XEXP (link, 0) == insn) break; if (link) { result = find_single_use_1 (dest, &PATTERN (next)); if (ploc) *ploc = next; return result; } } return 0; } /* Return 1 if OP is a valid general operand for machine mode MODE. This is either a register reference, a memory reference, or a constant. In the case of a memory reference, the address is checked for general validity for the target machine. Register and memory references must have mode MODE in order to be valid, but some constants have no machine mode and are valid for any mode. If MODE is VOIDmode, OP is checked for validity for whatever mode it has. The main use of this function is as a predicate in match_operand expressions in the machine description. For an explanation of this function's behavior for registers of class NO_REGS, see the comment for `register_operand'. */ int general_operand (rtx op, enum machine_mode mode) { enum rtx_code code = GET_CODE (op); if (mode == VOIDmode) mode = GET_MODE (op); /* Don't accept CONST_INT or anything similar if the caller wants something floating. */ if (GET_MODE (op) == VOIDmode && mode != VOIDmode && GET_MODE_CLASS (mode) != MODE_INT && GET_MODE_CLASS (mode) != MODE_PARTIAL_INT) return 0; if (GET_CODE (op) == CONST_INT && mode != VOIDmode && trunc_int_for_mode (INTVAL (op), mode) != INTVAL (op)) return 0; if (CONSTANT_P (op)) return ((GET_MODE (op) == VOIDmode || GET_MODE (op) == mode || mode == VOIDmode) #ifdef LEGITIMATE_PIC_OPERAND_P && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op)) #endif && LEGITIMATE_CONSTANT_P (op)); /* Except for certain constants with VOIDmode, already checked for, OP's mode must match MODE if MODE specifies a mode. */ if (GET_MODE (op) != mode) return 0; if (code == SUBREG) { rtx sub = SUBREG_REG (op); #ifdef INSN_SCHEDULING /* On machines that have insn scheduling, we want all memory reference to be explicit, so outlaw paradoxical SUBREGs. */ if (MEM_P (sub) && GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (sub))) return 0; #endif /* Avoid memories with nonzero SUBREG_BYTE, as offsetting the memory may result in incorrect reference. We should simplify all valid subregs of MEM anyway. But allow this after reload because we might be called from cleanup_subreg_operands. ??? This is a kludge. */ if (!reload_completed && SUBREG_BYTE (op) != 0 && MEM_P (sub)) return 0; /* FLOAT_MODE subregs can't be paradoxical. Combine will occasionally create such rtl, and we must reject it. */ if (GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT && GET_MODE_SIZE (GET_MODE (op)) > GET_MODE_SIZE (GET_MODE (sub))) return 0; op = sub; code = GET_CODE (op); } if (code == REG) /* A register whose class is NO_REGS is not a general operand. */ return (REGNO (op) >= FIRST_PSEUDO_REGISTER || REGNO_REG_CLASS (REGNO (op)) != NO_REGS); if (code == MEM) { rtx y = XEXP (op, 0); if (! volatile_ok && MEM_VOLATILE_P (op)) return 0; /* Use the mem's mode, since it will be reloaded thus. */ mode = GET_MODE (op); GO_IF_LEGITIMATE_ADDRESS (mode, y, win); } return 0; win: return 1; } /* Return 1 if OP is a valid memory address for a memory reference of mode MODE. The main use of this function is as a predicate in match_operand expressions in the machine description. */ int address_operand (rtx op, enum machine_mode mode) { return memory_address_p (mode, op); } /* Return 1 if OP is a register reference of mode MODE. If MODE is VOIDmode, accept a register in any mode. The main use of this function is as a predicate in match_operand expressions in the machine description. As a special exception, registers whose class is NO_REGS are not accepted by `register_operand'. The reason for this change is to allow the representation of special architecture artifacts (such as a condition code register) without extending the rtl definitions. Since registers of class NO_REGS cannot be used as registers in any case where register classes are examined, it is most consistent to keep this function from accepting them. */ int register_operand (rtx op, enum machine_mode mode) { if (GET_MODE (op) != mode && mode != VOIDmode) return 0; if (GET_CODE (op) == SUBREG) { rtx sub = SUBREG_REG (op); /* Before reload, we can allow (SUBREG (MEM...)) as a register operand because it is guaranteed to be reloaded into one. Just make sure the MEM is valid in itself. (Ideally, (SUBREG (MEM)...) should not exist after reload, but currently it does result from (SUBREG (REG)...) where the reg went on the stack.) */ if (! reload_completed && MEM_P (sub)) return general_operand (op, mode); #ifdef CANNOT_CHANGE_MODE_CLASS if (REG_P (sub) && REGNO (sub) < FIRST_PSEUDO_REGISTER && REG_CANNOT_CHANGE_MODE_P (REGNO (sub), GET_MODE (sub), mode) && GET_MODE_CLASS (GET_MODE (sub)) != MODE_COMPLEX_INT && GET_MODE_CLASS (GET_MODE (sub)) != MODE_COMPLEX_FLOAT) return 0; #endif /* FLOAT_MODE subregs can't be paradoxical. Combine will occasionally create such rtl, and we must reject it. */ if (GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT && GET_MODE_SIZE (GET_MODE (op)) > GET_MODE_SIZE (GET_MODE (sub))) return 0; op = sub; } /* We don't consider registers whose class is NO_REGS to be a register operand. */ return (REG_P (op) && (REGNO (op) >= FIRST_PSEUDO_REGISTER || REGNO_REG_CLASS (REGNO (op)) != NO_REGS)); } /* Return 1 for a register in Pmode; ignore the tested mode. */ int pmode_register_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { return register_operand (op, Pmode); } /* Return 1 if OP should match a MATCH_SCRATCH, i.e., if it is a SCRATCH or a hard register. */ int scratch_operand (rtx op, enum machine_mode mode) { if (GET_MODE (op) != mode && mode != VOIDmode) return 0; return (GET_CODE (op) == SCRATCH || (REG_P (op) && REGNO (op) < FIRST_PSEUDO_REGISTER)); } /* Return 1 if OP is a valid immediate operand for mode MODE. The main use of this function is as a predicate in match_operand expressions in the machine description. */ int immediate_operand (rtx op, enum machine_mode mode) { /* Don't accept CONST_INT or anything similar if the caller wants something floating. */ if (GET_MODE (op) == VOIDmode && mode != VOIDmode && GET_MODE_CLASS (mode) != MODE_INT && GET_MODE_CLASS (mode) != MODE_PARTIAL_INT) return 0; if (GET_CODE (op) == CONST_INT && mode != VOIDmode && trunc_int_for_mode (INTVAL (op), mode) != INTVAL (op)) return 0; return (CONSTANT_P (op) && (GET_MODE (op) == mode || mode == VOIDmode || GET_MODE (op) == VOIDmode) #ifdef LEGITIMATE_PIC_OPERAND_P && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op)) #endif && LEGITIMATE_CONSTANT_P (op)); } /* Returns 1 if OP is an operand that is a CONST_INT. */ int const_int_operand (rtx op, enum machine_mode mode) { if (GET_CODE (op) != CONST_INT) return 0; if (mode != VOIDmode && trunc_int_for_mode (INTVAL (op), mode) != INTVAL (op)) return 0; return 1; } /* Returns 1 if OP is an operand that is a constant integer or constant floating-point number. */ int const_double_operand (rtx op, enum machine_mode mode) { /* Don't accept CONST_INT or anything similar if the caller wants something floating. */ if (GET_MODE (op) == VOIDmode && mode != VOIDmode && GET_MODE_CLASS (mode) != MODE_INT && GET_MODE_CLASS (mode) != MODE_PARTIAL_INT) return 0; return ((GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT) && (mode == VOIDmode || GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)); } /* Return 1 if OP is a general operand that is not an immediate operand. */ int nonimmediate_operand (rtx op, enum machine_mode mode) { return (general_operand (op, mode) && ! CONSTANT_P (op)); } /* Return 1 if OP is a register reference or immediate value of mode MODE. */ int nonmemory_operand (rtx op, enum machine_mode mode) { if (CONSTANT_P (op)) { /* Don't accept CONST_INT or anything similar if the caller wants something floating. */ if (GET_MODE (op) == VOIDmode && mode != VOIDmode && GET_MODE_CLASS (mode) != MODE_INT && GET_MODE_CLASS (mode) != MODE_PARTIAL_INT) return 0; if (GET_CODE (op) == CONST_INT && mode != VOIDmode && trunc_int_for_mode (INTVAL (op), mode) != INTVAL (op)) return 0; return ((GET_MODE (op) == VOIDmode || GET_MODE (op) == mode || mode == VOIDmode) #ifdef LEGITIMATE_PIC_OPERAND_P && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op)) #endif && LEGITIMATE_CONSTANT_P (op)); } if (GET_MODE (op) != mode && mode != VOIDmode) return 0; if (GET_CODE (op) == SUBREG) { /* Before reload, we can allow (SUBREG (MEM...)) as a register operand because it is guaranteed to be reloaded into one. Just make sure the MEM is valid in itself. (Ideally, (SUBREG (MEM)...) should not exist after reload, but currently it does result from (SUBREG (REG)...) where the reg went on the stack.) */ if (! reload_completed && MEM_P (SUBREG_REG (op))) return general_operand (op, mode); op = SUBREG_REG (op); } /* We don't consider registers whose class is NO_REGS to be a register operand. */ return (REG_P (op) && (REGNO (op) >= FIRST_PSEUDO_REGISTER || REGNO_REG_CLASS (REGNO (op)) != NO_REGS)); } /* Return 1 if OP is a valid operand that stands for pushing a value of mode MODE onto the stack. The main use of this function is as a predicate in match_operand expressions in the machine description. */ int push_operand (rtx op, enum machine_mode mode) { unsigned int rounded_size = GET_MODE_SIZE (mode); #ifdef PUSH_ROUNDING rounded_size = PUSH_ROUNDING (rounded_size); #endif if (!MEM_P (op)) return 0; if (mode != VOIDmode && GET_MODE (op) != mode) return 0; op = XEXP (op, 0); if (rounded_size == GET_MODE_SIZE (mode)) { if (GET_CODE (op) != STACK_PUSH_CODE) return 0; } else { if (GET_CODE (op) != PRE_MODIFY || GET_CODE (XEXP (op, 1)) != PLUS || XEXP (XEXP (op, 1), 0) != XEXP (op, 0) || GET_CODE (XEXP (XEXP (op, 1), 1)) != CONST_INT #ifdef STACK_GROWS_DOWNWARD || INTVAL (XEXP (XEXP (op, 1), 1)) != - (int) rounded_size #else || INTVAL (XEXP (XEXP (op, 1), 1)) != (int) rounded_size #endif ) return 0; } return XEXP (op, 0) == stack_pointer_rtx; } /* Return 1 if OP is a valid operand that stands for popping a value of mode MODE off the stack. The main use of this function is as a predicate in match_operand expressions in the machine description. */ int pop_operand (rtx op, enum machine_mode mode) { if (!MEM_P (op)) return 0; if (mode != VOIDmode && GET_MODE (op) != mode) return 0; op = XEXP (op, 0); if (GET_CODE (op) != STACK_POP_CODE) return 0; return XEXP (op, 0) == stack_pointer_rtx; } /* Return 1 if ADDR is a valid memory address for mode MODE. */ int memory_address_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx addr) { GO_IF_LEGITIMATE_ADDRESS (mode, addr, win); return 0; win: return 1; } /* Return 1 if OP is a valid memory reference with mode MODE, including a valid address. The main use of this function is as a predicate in match_operand expressions in the machine description. */ int memory_operand (rtx op, enum machine_mode mode) { rtx inner; if (! reload_completed) /* Note that no SUBREG is a memory operand before end of reload pass, because (SUBREG (MEM...)) forces reloading into a register. */ return MEM_P (op) && general_operand (op, mode); if (mode != VOIDmode && GET_MODE (op) != mode) return 0; inner = op; if (GET_CODE (inner) == SUBREG) inner = SUBREG_REG (inner); return (MEM_P (inner) && general_operand (op, mode)); } /* Return 1 if OP is a valid indirect memory reference with mode MODE; that is, a memory reference whose address is a general_operand. */ int indirect_operand (rtx op, enum machine_mode mode) { /* Before reload, a SUBREG isn't in memory (see memory_operand, above). */ if (! reload_completed && GET_CODE (op) == SUBREG && MEM_P (SUBREG_REG (op))) { int offset = SUBREG_BYTE (op); rtx inner = SUBREG_REG (op); if (mode != VOIDmode && GET_MODE (op) != mode) return 0; /* The only way that we can have a general_operand as the resulting address is if OFFSET is zero and the address already is an operand or if the address is (plus Y (const_int -OFFSET)) and Y is an operand. */ return ((offset == 0 && general_operand (XEXP (inner, 0), Pmode)) || (GET_CODE (XEXP (inner, 0)) == PLUS && GET_CODE (XEXP (XEXP (inner, 0), 1)) == CONST_INT && INTVAL (XEXP (XEXP (inner, 0), 1)) == -offset && general_operand (XEXP (XEXP (inner, 0), 0), Pmode))); } return (MEM_P (op) && memory_operand (op, mode) && general_operand (XEXP (op, 0), Pmode)); } /* Return 1 if this is a comparison operator. This allows the use of MATCH_OPERATOR to recognize all the branch insns. */ int comparison_operator (rtx op, enum machine_mode mode) { return ((mode == VOIDmode || GET_MODE (op) == mode) && COMPARISON_P (op)); } /* If BODY is an insn body that uses ASM_OPERANDS, return the number of operands (both input and output) in the insn. Otherwise return -1. */ int asm_noperands (rtx body) { switch (GET_CODE (body)) { case ASM_OPERANDS: /* No output operands: return number of input operands. */ return ASM_OPERANDS_INPUT_LENGTH (body); case SET: if (GET_CODE (SET_SRC (body)) == ASM_OPERANDS) /* Single output operand: BODY is (set OUTPUT (asm_operands ...)). */ return ASM_OPERANDS_INPUT_LENGTH (SET_SRC (body)) + 1; else return -1; case PARALLEL: if (GET_CODE (XVECEXP (body, 0, 0)) == SET && GET_CODE (SET_SRC (XVECEXP (body, 0, 0))) == ASM_OPERANDS) { /* Multiple output operands, or 1 output plus some clobbers: body is [(set OUTPUT (asm_operands ...))... (clobber (reg ...))...]. */ int i; int n_sets; /* Count backwards through CLOBBERs to determine number of SETs. */ for (i = XVECLEN (body, 0); i > 0; i--) { if (GET_CODE (XVECEXP (body, 0, i - 1)) == SET) break; if (GET_CODE (XVECEXP (body, 0, i - 1)) != CLOBBER) return -1; } /* N_SETS is now number of output operands. */ n_sets = i; /* Verify that all the SETs we have came from a single original asm_operands insn (so that invalid combinations are blocked). */ for (i = 0; i < n_sets; i++) { rtx elt = XVECEXP (body, 0, i); if (GET_CODE (elt) != SET) return -1; if (GET_CODE (SET_SRC (elt)) != ASM_OPERANDS) return -1; /* If these ASM_OPERANDS rtx's came from different original insns then they aren't allowed together. */ if (ASM_OPERANDS_INPUT_VEC (SET_SRC (elt)) != ASM_OPERANDS_INPUT_VEC (SET_SRC (XVECEXP (body, 0, 0)))) return -1; } return (ASM_OPERANDS_INPUT_LENGTH (SET_SRC (XVECEXP (body, 0, 0))) + n_sets); } else if (GET_CODE (XVECEXP (body, 0, 0)) == ASM_OPERANDS) { /* 0 outputs, but some clobbers: body is [(asm_operands ...) (clobber (reg ...))...]. */ int i; /* Make sure all the other parallel things really are clobbers. */ for (i = XVECLEN (body, 0) - 1; i > 0; i--) if (GET_CODE (XVECEXP (body, 0, i)) != CLOBBER) return -1; return ASM_OPERANDS_INPUT_LENGTH (XVECEXP (body, 0, 0)); } else return -1; default: return -1; } } /* Assuming BODY is an insn body that uses ASM_OPERANDS, copy its operands (both input and output) into the vector OPERANDS, the locations of the operands within the insn into the vector OPERAND_LOCS, and the constraints for the operands into CONSTRAINTS. Write the modes of the operands into MODES. Return the assembler-template. If MODES, OPERAND_LOCS, CONSTRAINTS or OPERANDS is 0, we don't store that info. */ const char * decode_asm_operands (rtx body, rtx *operands, rtx **operand_locs, const char **constraints, enum machine_mode *modes) { int i; int noperands; const char *template = 0; if (GET_CODE (body) == SET && GET_CODE (SET_SRC (body)) == ASM_OPERANDS) { rtx asmop = SET_SRC (body); /* Single output operand: BODY is (set OUTPUT (asm_operands ....)). */ noperands = ASM_OPERANDS_INPUT_LENGTH (asmop) + 1; for (i = 1; i < noperands; i++) { if (operand_locs) operand_locs[i] = &ASM_OPERANDS_INPUT (asmop, i - 1); if (operands) operands[i] = ASM_OPERANDS_INPUT (asmop, i - 1); if (constraints) constraints[i] = ASM_OPERANDS_INPUT_CONSTRAINT (asmop, i - 1); if (modes) modes[i] = ASM_OPERANDS_INPUT_MODE (asmop, i - 1); } /* The output is in the SET. Its constraint is in the ASM_OPERANDS itself. */ if (operands) operands[0] = SET_DEST (body); if (operand_locs) operand_locs[0] = &SET_DEST (body); if (constraints) constraints[0] = ASM_OPERANDS_OUTPUT_CONSTRAINT (asmop); if (modes) modes[0] = GET_MODE (SET_DEST (body)); template = ASM_OPERANDS_TEMPLATE (asmop); } else if (GET_CODE (body) == ASM_OPERANDS) { rtx asmop = body; /* No output operands: BODY is (asm_operands ....). */ noperands = ASM_OPERANDS_INPUT_LENGTH (asmop); /* The input operands are found in the 1st element vector. */ /* Constraints for inputs are in the 2nd element vector. */ for (i = 0; i < noperands; i++) { if (operand_locs) operand_locs[i] = &ASM_OPERANDS_INPUT (asmop, i); if (operands) operands[i] = ASM_OPERANDS_INPUT (asmop, i); if (constraints) constraints[i] = ASM_OPERANDS_INPUT_CONSTRAINT (asmop, i); if (modes) modes[i] = ASM_OPERANDS_INPUT_MODE (asmop, i); } template = ASM_OPERANDS_TEMPLATE (asmop); } else if (GET_CODE (body) == PARALLEL && GET_CODE (XVECEXP (body, 0, 0)) == SET && GET_CODE (SET_SRC (XVECEXP (body, 0, 0))) == ASM_OPERANDS) { rtx asmop = SET_SRC (XVECEXP (body, 0, 0)); int nparallel = XVECLEN (body, 0); /* Includes CLOBBERs. */ int nin = ASM_OPERANDS_INPUT_LENGTH (asmop); int nout = 0; /* Does not include CLOBBERs. */ /* At least one output, plus some CLOBBERs. */ /* The outputs are in the SETs. Their constraints are in the ASM_OPERANDS itself. */ for (i = 0; i < nparallel; i++) { if (GET_CODE (XVECEXP (body, 0, i)) == CLOBBER) break; /* Past last SET */ if (operands) operands[i] = SET_DEST (XVECEXP (body, 0, i)); if (operand_locs) operand_locs[i] = &SET_DEST (XVECEXP (body, 0, i)); if (constraints) constraints[i] = XSTR (SET_SRC (XVECEXP (body, 0, i)), 1); if (modes) modes[i] = GET_MODE (SET_DEST (XVECEXP (body, 0, i))); nout++; } for (i = 0; i < nin; i++) { if (operand_locs) operand_locs[i + nout] = &ASM_OPERANDS_INPUT (asmop, i); if (operands) operands[i + nout] = ASM_OPERANDS_INPUT (asmop, i); if (constraints) constraints[i + nout] = ASM_OPERANDS_INPUT_CONSTRAINT (asmop, i); if (modes) modes[i + nout] = ASM_OPERANDS_INPUT_MODE (asmop, i); } template = ASM_OPERANDS_TEMPLATE (asmop); } else if (GET_CODE (body) == PARALLEL && GET_CODE (XVECEXP (body, 0, 0)) == ASM_OPERANDS) { /* No outputs, but some CLOBBERs. */ rtx asmop = XVECEXP (body, 0, 0); int nin = ASM_OPERANDS_INPUT_LENGTH (asmop); for (i = 0; i < nin; i++) { if (operand_locs) operand_locs[i] = &ASM_OPERANDS_INPUT (asmop, i); if (operands) operands[i] = ASM_OPERANDS_INPUT (asmop, i); if (constraints) constraints[i] = ASM_OPERANDS_INPUT_CONSTRAINT (asmop, i); if (modes) modes[i] = ASM_OPERANDS_INPUT_MODE (asmop, i); } template = ASM_OPERANDS_TEMPLATE (asmop); } return template; } /* Check if an asm_operand matches its constraints. Return > 0 if ok, = 0 if bad, < 0 if inconclusive. */ int asm_operand_ok (rtx op, const char *constraint) { int result = 0; /* Use constrain_operands after reload. */ if (reload_completed) abort (); while (*constraint) { char c = *constraint; int len; switch (c) { case ',': constraint++; continue; case '=': case '+': case '*': case '%': case '!': case '#': case '&': case '?': break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': /* For best results, our caller should have given us the proper matching constraint, but we can't actually fail the check if they didn't. Indicate that results are inconclusive. */ do constraint++; while (ISDIGIT (*constraint)); if (! result) result = -1; continue; case 'p': if (address_operand (op, VOIDmode)) result = 1; break; case 'm': case 'V': /* non-offsettable */ if (memory_operand (op, VOIDmode)) result = 1; break; case 'o': /* offsettable */ if (offsettable_nonstrict_memref_p (op)) result = 1; break; case '<': /* ??? Before flow, auto inc/dec insns are not supposed to exist, excepting those that expand_call created. Further, on some machines which do not have generalized auto inc/dec, an inc/dec is not a memory_operand. Match any memory and hope things are resolved after reload. */ if (MEM_P (op) && (1 || GET_CODE (XEXP (op, 0)) == PRE_DEC || GET_CODE (XEXP (op, 0)) == POST_DEC)) result = 1; break; case '>': if (MEM_P (op) && (1 || GET_CODE (XEXP (op, 0)) == PRE_INC || GET_CODE (XEXP (op, 0)) == POST_INC)) result = 1; break; case 'E': case 'F': if (GET_CODE (op) == CONST_DOUBLE || (GET_CODE (op) == CONST_VECTOR && GET_MODE_CLASS (GET_MODE (op)) == MODE_VECTOR_FLOAT)) result = 1; break; case 'G': if (GET_CODE (op) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_CONSTRAINT_P (op, 'G', constraint)) result = 1; break; case 'H': if (GET_CODE (op) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_CONSTRAINT_P (op, 'H', constraint)) result = 1; break; case 's': if (GET_CODE (op) == CONST_INT || (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == VOIDmode)) break; /* Fall through. */ case 'i': if (CONSTANT_P (op) #ifdef LEGITIMATE_PIC_OPERAND_P && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op)) #endif ) result = 1; break; case 'n': if (GET_CODE (op) == CONST_INT || (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == VOIDmode)) result = 1; break; case 'I': if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_CONSTRAINT_P (INTVAL (op), 'I', constraint)) result = 1; break; case 'J': if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_CONSTRAINT_P (INTVAL (op), 'J', constraint)) result = 1; break; case 'K': if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_CONSTRAINT_P (INTVAL (op), 'K', constraint)) result = 1; break; case 'L': if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_CONSTRAINT_P (INTVAL (op), 'L', constraint)) result = 1; break; case 'M': if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_CONSTRAINT_P (INTVAL (op), 'M', constraint)) result = 1; break; case 'N': if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_CONSTRAINT_P (INTVAL (op), 'N', constraint)) result = 1; break; case 'O': if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_CONSTRAINT_P (INTVAL (op), 'O', constraint)) result = 1; break; case 'P': if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_CONSTRAINT_P (INTVAL (op), 'P', constraint)) result = 1; break; case 'X': result = 1; break; case 'g': if (general_operand (op, VOIDmode)) result = 1; break; default: /* For all other letters, we first check for a register class, otherwise it is an EXTRA_CONSTRAINT. */ if (REG_CLASS_FROM_CONSTRAINT (c, constraint) != NO_REGS) { case 'r': if (GET_MODE (op) == BLKmode) break; if (register_operand (op, VOIDmode)) result = 1; } #ifdef EXTRA_CONSTRAINT_STR else if (EXTRA_CONSTRAINT_STR (op, c, constraint)) result = 1; else if (EXTRA_MEMORY_CONSTRAINT (c, constraint) /* Every memory operand can be reloaded to fit. */ && memory_operand (op, VOIDmode)) result = 1; else if (EXTRA_ADDRESS_CONSTRAINT (c, constraint) /* Every address operand can be reloaded to fit. */ && address_operand (op, VOIDmode)) result = 1; #endif break; } len = CONSTRAINT_LEN (c, constraint); do constraint++; while (--len && *constraint); if (len) return 0; } return result; } /* Given an rtx *P, if it is a sum containing an integer constant term, return the location (type rtx *) of the pointer to that constant term. Otherwise, return a null pointer. */ rtx * find_constant_term_loc (rtx *p) { rtx *tem; enum rtx_code code = GET_CODE (*p); /* If *P IS such a constant term, P is its location. */ if (code == CONST_INT || code == SYMBOL_REF || code == LABEL_REF || code == CONST) return p; /* Otherwise, if not a sum, it has no constant term. */ if (GET_CODE (*p) != PLUS) return 0; /* If one of the summands is constant, return its location. */ if (XEXP (*p, 0) && CONSTANT_P (XEXP (*p, 0)) && XEXP (*p, 1) && CONSTANT_P (XEXP (*p, 1))) return p; /* Otherwise, check each summand for containing a constant term. */ if (XEXP (*p, 0) != 0) { tem = find_constant_term_loc (&XEXP (*p, 0)); if (tem != 0) return tem; } if (XEXP (*p, 1) != 0) { tem = find_constant_term_loc (&XEXP (*p, 1)); if (tem != 0) return tem; } return 0; } /* Return 1 if OP is a memory reference whose address contains no side effects and remains valid after the addition of a positive integer less than the size of the object being referenced. We assume that the original address is valid and do not check it. This uses strict_memory_address_p as a subroutine, so don't use it before reload. */ int offsettable_memref_p (rtx op) { return ((MEM_P (op)) && offsettable_address_p (1, GET_MODE (op), XEXP (op, 0))); } /* Similar, but don't require a strictly valid mem ref: consider pseudo-regs valid as index or base regs. */ int offsettable_nonstrict_memref_p (rtx op) { return ((MEM_P (op)) && offsettable_address_p (0, GET_MODE (op), XEXP (op, 0))); } /* Return 1 if Y is a memory address which contains no side effects and would remain valid after the addition of a positive integer less than the size of that mode. We assume that the original address is valid and do not check it. We do check that it is valid for narrower modes. If STRICTP is nonzero, we require a strictly valid address, for the sake of use in reload.c. */ int offsettable_address_p (int strictp, enum machine_mode mode, rtx y) { enum rtx_code ycode = GET_CODE (y); rtx z; rtx y1 = y; rtx *y2; int (*addressp) (enum machine_mode, rtx) = (strictp ? strict_memory_address_p : memory_address_p); unsigned int mode_sz = GET_MODE_SIZE (mode); if (CONSTANT_ADDRESS_P (y)) return 1; /* Adjusting an offsettable address involves changing to a narrower mode. Make sure that's OK. */ if (mode_dependent_address_p (y)) return 0; /* ??? How much offset does an offsettable BLKmode reference need? Clearly that depends on the situation in which it's being used. However, the current situation in which we test 0xffffffff is less than ideal. Caveat user. */ if (mode_sz == 0) mode_sz = BIGGEST_ALIGNMENT / BITS_PER_UNIT; /* If the expression contains a constant term, see if it remains valid when max possible offset is added. */ if ((ycode == PLUS) && (y2 = find_constant_term_loc (&y1))) { int good; y1 = *y2; *y2 = plus_constant (*y2, mode_sz - 1); /* Use QImode because an odd displacement may be automatically invalid for any wider mode. But it should be valid for a single byte. */ good = (*addressp) (QImode, y); /* In any case, restore old contents of memory. */ *y2 = y1; return good; } if (GET_RTX_CLASS (ycode) == RTX_AUTOINC) return 0; /* The offset added here is chosen as the maximum offset that any instruction could need to add when operating on something of the specified mode. We assume that if Y and Y+c are valid addresses then so is Y+d for all 0= 0) return; extract_insn (insn); recog_data.insn = insn; } /* Do cached extract_insn, constrain_operands and complain about failures. Used by insn_attrtab. */ void extract_constrain_insn_cached (rtx insn) { extract_insn_cached (insn); if (which_alternative == -1 && !constrain_operands (reload_completed)) fatal_insn_not_found (insn); } /* Do cached constrain_operands and complain about failures. */ int constrain_operands_cached (int strict) { if (which_alternative == -1) return constrain_operands (strict); else return 1; } /* Analyze INSN and fill in recog_data. */ void extract_insn (rtx insn) { int i; int icode; int noperands; rtx body = PATTERN (insn); recog_data.insn = NULL; recog_data.n_operands = 0; recog_data.n_alternatives = 0; recog_data.n_dups = 0; which_alternative = -1; switch (GET_CODE (body)) { case USE: case CLOBBER: case ASM_INPUT: case ADDR_VEC: case ADDR_DIFF_VEC: return; case SET: if (GET_CODE (SET_SRC (body)) == ASM_OPERANDS) goto asm_insn; else goto normal_insn; case PARALLEL: if ((GET_CODE (XVECEXP (body, 0, 0)) == SET && GET_CODE (SET_SRC (XVECEXP (body, 0, 0))) == ASM_OPERANDS) || GET_CODE (XVECEXP (body, 0, 0)) == ASM_OPERANDS) goto asm_insn; else goto normal_insn; case ASM_OPERANDS: asm_insn: recog_data.n_operands = noperands = asm_noperands (body); if (noperands >= 0) { /* This insn is an `asm' with operands. */ /* expand_asm_operands makes sure there aren't too many operands. */ if (noperands > MAX_RECOG_OPERANDS) abort (); /* Now get the operand values and constraints out of the insn. */ decode_asm_operands (body, recog_data.operand, recog_data.operand_loc, recog_data.constraints, recog_data.operand_mode); if (noperands > 0) { const char *p = recog_data.constraints[0]; recog_data.n_alternatives = 1; while (*p) recog_data.n_alternatives += (*p++ == ','); } break; } fatal_insn_not_found (insn); default: normal_insn: /* Ordinary insn: recognize it, get the operands via insn_extract and get the constraints. */ icode = recog_memoized (insn); if (icode < 0) fatal_insn_not_found (insn); recog_data.n_operands = noperands = insn_data[icode].n_operands; recog_data.n_alternatives = insn_data[icode].n_alternatives; recog_data.n_dups = insn_data[icode].n_dups; insn_extract (insn); for (i = 0; i < noperands; i++) { recog_data.constraints[i] = insn_data[icode].operand[i].constraint; recog_data.operand_mode[i] = insn_data[icode].operand[i].mode; /* VOIDmode match_operands gets mode from their real operand. */ if (recog_data.operand_mode[i] == VOIDmode) recog_data.operand_mode[i] = GET_MODE (recog_data.operand[i]); } } for (i = 0; i < noperands; i++) recog_data.operand_type[i] = (recog_data.constraints[i][0] == '=' ? OP_OUT : recog_data.constraints[i][0] == '+' ? OP_INOUT : OP_IN); if (recog_data.n_alternatives > MAX_RECOG_ALTERNATIVES) abort (); } /* After calling extract_insn, you can use this function to extract some information from the constraint strings into a more usable form. The collected data is stored in recog_op_alt. */ void preprocess_constraints (void) { int i; for (i = 0; i < recog_data.n_operands; i++) memset (recog_op_alt[i], 0, (recog_data.n_alternatives * sizeof (struct operand_alternative))); for (i = 0; i < recog_data.n_operands; i++) { int j; struct operand_alternative *op_alt; const char *p = recog_data.constraints[i]; op_alt = recog_op_alt[i]; for (j = 0; j < recog_data.n_alternatives; j++) { op_alt[j].class = NO_REGS; op_alt[j].constraint = p; op_alt[j].matches = -1; op_alt[j].matched = -1; if (*p == '\0' || *p == ',') { op_alt[j].anything_ok = 1; continue; } for (;;) { char c = *p; if (c == '#') do c = *++p; while (c != ',' && c != '\0'); if (c == ',' || c == '\0') { p++; break; } switch (c) { case '=': case '+': case '*': case '%': case 'E': case 'F': case 'G': case 'H': case 's': case 'i': case 'n': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': /* These don't say anything we care about. */ break; case '?': op_alt[j].reject += 6; break; case '!': op_alt[j].reject += 600; break; case '&': op_alt[j].earlyclobber = 1; break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { char *end; op_alt[j].matches = strtoul (p, &end, 10); recog_op_alt[op_alt[j].matches][j].matched = i; p = end; } continue; case 'm': op_alt[j].memory_ok = 1; break; case '<': op_alt[j].decmem_ok = 1; break; case '>': op_alt[j].incmem_ok = 1; break; case 'V': op_alt[j].nonoffmem_ok = 1; break; case 'o': op_alt[j].offmem_ok = 1; break; case 'X': op_alt[j].anything_ok = 1; break; case 'p': op_alt[j].is_address = 1; op_alt[j].class = reg_class_subunion[(int) op_alt[j].class] [(int) MODE_BASE_REG_CLASS (VOIDmode)]; break; case 'g': case 'r': op_alt[j].class = reg_class_subunion[(int) op_alt[j].class][(int) GENERAL_REGS]; break; default: if (EXTRA_MEMORY_CONSTRAINT (c, p)) { op_alt[j].memory_ok = 1; break; } if (EXTRA_ADDRESS_CONSTRAINT (c, p)) { op_alt[j].is_address = 1; op_alt[j].class = (reg_class_subunion [(int) op_alt[j].class] [(int) MODE_BASE_REG_CLASS (VOIDmode)]); break; } op_alt[j].class = (reg_class_subunion [(int) op_alt[j].class] [(int) REG_CLASS_FROM_CONSTRAINT ((unsigned char) c, p)]); break; } p += CONSTRAINT_LEN (c, p); } } } } /* Check the operands of an insn against the insn's operand constraints and return 1 if they are valid. The information about the insn's operands, constraints, operand modes etc. is obtained from the global variables set up by extract_insn. WHICH_ALTERNATIVE is set to a number which indicates which alternative of constraints was matched: 0 for the first alternative, 1 for the next, etc. In addition, when two operands are required to match and it happens that the output operand is (reg) while the input operand is --(reg) or ++(reg) (a pre-inc or pre-dec), make the output operand look like the input. This is because the output operand is the one the template will print. This is used in final, just before printing the assembler code and by the routines that determine an insn's attribute. If STRICT is a positive nonzero value, it means that we have been called after reload has been completed. In that case, we must do all checks strictly. If it is zero, it means that we have been called before reload has completed. In that case, we first try to see if we can find an alternative that matches strictly. If not, we try again, this time assuming that reload will fix up the insn. This provides a "best guess" for the alternative and is used to compute attributes of insns prior to reload. A negative value of STRICT is used for this internal call. */ struct funny_match { int this, other; }; int constrain_operands (int strict) { const char *constraints[MAX_RECOG_OPERANDS]; int matching_operands[MAX_RECOG_OPERANDS]; int earlyclobber[MAX_RECOG_OPERANDS]; int c; struct funny_match funny_match[MAX_RECOG_OPERANDS]; int funny_match_index; which_alternative = 0; if (recog_data.n_operands == 0 || recog_data.n_alternatives == 0) return 1; for (c = 0; c < recog_data.n_operands; c++) { constraints[c] = recog_data.constraints[c]; matching_operands[c] = -1; } do { int opno; int lose = 0; funny_match_index = 0; for (opno = 0; opno < recog_data.n_operands; opno++) { rtx op = recog_data.operand[opno]; enum machine_mode mode = GET_MODE (op); const char *p = constraints[opno]; int offset = 0; int win = 0; int val; int len; earlyclobber[opno] = 0; /* A unary operator may be accepted by the predicate, but it is irrelevant for matching constraints. */ if (UNARY_P (op)) op = XEXP (op, 0); if (GET_CODE (op) == SUBREG) { if (REG_P (SUBREG_REG (op)) && REGNO (SUBREG_REG (op)) < FIRST_PSEUDO_REGISTER) offset = subreg_regno_offset (REGNO (SUBREG_REG (op)), GET_MODE (SUBREG_REG (op)), SUBREG_BYTE (op), GET_MODE (op)); op = SUBREG_REG (op); } /* An empty constraint or empty alternative allows anything which matched the pattern. */ if (*p == 0 || *p == ',') win = 1; do switch (c = *p, len = CONSTRAINT_LEN (c, p), c) { case '\0': len = 0; break; case ',': c = '\0'; break; case '?': case '!': case '*': case '%': case '=': case '+': break; case '#': /* Ignore rest of this alternative as far as constraint checking is concerned. */ do p++; while (*p && *p != ','); len = 0; break; case '&': earlyclobber[opno] = 1; break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { /* This operand must be the same as a previous one. This kind of constraint is used for instructions such as add when they take only two operands. Note that the lower-numbered operand is passed first. If we are not testing strictly, assume that this constraint will be satisfied. */ char *end; int match; match = strtoul (p, &end, 10); p = end; if (strict < 0) val = 1; else { rtx op1 = recog_data.operand[match]; rtx op2 = recog_data.operand[opno]; /* A unary operator may be accepted by the predicate, but it is irrelevant for matching constraints. */ if (UNARY_P (op1)) op1 = XEXP (op1, 0); if (UNARY_P (op2)) op2 = XEXP (op2, 0); val = operands_match_p (op1, op2); } matching_operands[opno] = match; matching_operands[match] = opno; if (val != 0) win = 1; /* If output is *x and input is *--x, arrange later to change the output to *--x as well, since the output op is the one that will be printed. */ if (val == 2 && strict > 0) { funny_match[funny_match_index].this = opno; funny_match[funny_match_index++].other = match; } } len = 0; break; case 'p': /* p is used for address_operands. When we are called by gen_reload, no one will have checked that the address is strictly valid, i.e., that all pseudos requiring hard regs have gotten them. */ if (strict <= 0 || (strict_memory_address_p (recog_data.operand_mode[opno], op))) win = 1; break; /* No need to check general_operand again; it was done in insn-recog.c. */ case 'g': /* Anything goes unless it is a REG and really has a hard reg but the hard reg is not in the class GENERAL_REGS. */ if (strict < 0 || GENERAL_REGS == ALL_REGS || !REG_P (op) || (reload_in_progress && REGNO (op) >= FIRST_PSEUDO_REGISTER) || reg_fits_class_p (op, GENERAL_REGS, offset, mode)) win = 1; break; case 'X': /* This is used for a MATCH_SCRATCH in the cases when we don't actually need anything. So anything goes any time. */ win = 1; break; case 'm': /* Memory operands must be valid, to the extent required by STRICT. */ if (MEM_P (op)) { if (strict > 0 && !strict_memory_address_p (GET_MODE (op), XEXP (op, 0))) break; if (strict == 0 && !memory_address_p (GET_MODE (op), XEXP (op, 0))) break; win = 1; } /* Before reload, accept what reload can turn into mem. */ else if (strict < 0 && CONSTANT_P (op)) win = 1; /* During reload, accept a pseudo */ else if (reload_in_progress && REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER) win = 1; break; case '<': if (MEM_P (op) && (GET_CODE (XEXP (op, 0)) == PRE_DEC || GET_CODE (XEXP (op, 0)) == POST_DEC)) win = 1; break; case '>': if (MEM_P (op) && (GET_CODE (XEXP (op, 0)) == PRE_INC || GET_CODE (XEXP (op, 0)) == POST_INC)) win = 1; break; case 'E': case 'F': if (GET_CODE (op) == CONST_DOUBLE || (GET_CODE (op) == CONST_VECTOR && GET_MODE_CLASS (GET_MODE (op)) == MODE_VECTOR_FLOAT)) win = 1; break; case 'G': case 'H': if (GET_CODE (op) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_CONSTRAINT_P (op, c, p)) win = 1; break; case 's': if (GET_CODE (op) == CONST_INT || (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == VOIDmode)) break; case 'i': if (CONSTANT_P (op)) win = 1; break; case 'n': if (GET_CODE (op) == CONST_INT || (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == VOIDmode)) win = 1; break; case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_CONSTRAINT_P (INTVAL (op), c, p)) win = 1; break; case 'V': if (MEM_P (op) && ((strict > 0 && ! offsettable_memref_p (op)) || (strict < 0 && !(CONSTANT_P (op) || MEM_P (op))) || (reload_in_progress && !(REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)))) win = 1; break; case 'o': if ((strict > 0 && offsettable_memref_p (op)) || (strict == 0 && offsettable_nonstrict_memref_p (op)) /* Before reload, accept what reload can handle. */ || (strict < 0 && (CONSTANT_P (op) || MEM_P (op))) /* During reload, accept a pseudo */ || (reload_in_progress && REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)) win = 1; break; default: { enum reg_class class; class = (c == 'r' ? GENERAL_REGS : REG_CLASS_FROM_CONSTRAINT (c, p)); if (class != NO_REGS) { if (strict < 0 || (strict == 0 && REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER) || (strict == 0 && GET_CODE (op) == SCRATCH) || (REG_P (op) && reg_fits_class_p (op, class, offset, mode))) win = 1; } #ifdef EXTRA_CONSTRAINT_STR else if (EXTRA_CONSTRAINT_STR (op, c, p)) win = 1; else if (EXTRA_MEMORY_CONSTRAINT (c, p) /* Every memory operand can be reloaded to fit. */ && ((strict < 0 && MEM_P (op)) /* Before reload, accept what reload can turn into mem. */ || (strict < 0 && CONSTANT_P (op)) /* During reload, accept a pseudo */ || (reload_in_progress && REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER))) win = 1; else if (EXTRA_ADDRESS_CONSTRAINT (c, p) /* Every address operand can be reloaded to fit. */ && strict < 0) win = 1; #endif break; } } while (p += len, c); constraints[opno] = p; /* If this operand did not win somehow, this alternative loses. */ if (! win) lose = 1; } /* This alternative won; the operands are ok. Change whichever operands this alternative says to change. */ if (! lose) { int opno, eopno; /* See if any earlyclobber operand conflicts with some other operand. */ if (strict > 0) for (eopno = 0; eopno < recog_data.n_operands; eopno++) /* Ignore earlyclobber operands now in memory, because we would often report failure when we have two memory operands, one of which was formerly a REG. */ if (earlyclobber[eopno] && REG_P (recog_data.operand[eopno])) for (opno = 0; opno < recog_data.n_operands; opno++) if ((MEM_P (recog_data.operand[opno]) || recog_data.operand_type[opno] != OP_OUT) && opno != eopno /* Ignore things like match_operator operands. */ && *recog_data.constraints[opno] != 0 && ! (matching_operands[opno] == eopno && operands_match_p (recog_data.operand[opno], recog_data.operand[eopno])) && ! safe_from_earlyclobber (recog_data.operand[opno], recog_data.operand[eopno])) lose = 1; if (! lose) { while (--funny_match_index >= 0) { recog_data.operand[funny_match[funny_match_index].other] = recog_data.operand[funny_match[funny_match_index].this]; } return 1; } } which_alternative++; } while (which_alternative < recog_data.n_alternatives); which_alternative = -1; /* If we are about to reject this, but we are not to test strictly, try a very loose test. Only return failure if it fails also. */ if (strict == 0) return constrain_operands (-1); else return 0; } /* Return 1 iff OPERAND (assumed to be a REG rtx) is a hard reg in class CLASS when its regno is offset by OFFSET and changed to mode MODE. If REG occupies multiple hard regs, all of them must be in CLASS. */ int reg_fits_class_p (rtx operand, enum reg_class class, int offset, enum machine_mode mode) { int regno = REGNO (operand); if (regno < FIRST_PSEUDO_REGISTER && TEST_HARD_REG_BIT (reg_class_contents[(int) class], regno + offset)) { int sr; regno += offset; for (sr = hard_regno_nregs[regno][mode] - 1; sr > 0; sr--) if (! TEST_HARD_REG_BIT (reg_class_contents[(int) class], regno + sr)) break; return sr == 0; } return 0; } /* Split single instruction. Helper function for split_all_insns and split_all_insns_noflow. Return last insn in the sequence if successful, or NULL if unsuccessful. */ static rtx split_insn (rtx insn) { /* Split insns here to get max fine-grain parallelism. */ rtx first = PREV_INSN (insn); rtx last = try_split (PATTERN (insn), insn, 1); if (last == insn) return NULL_RTX; /* try_split returns the NOTE that INSN became. */ SET_INSN_DELETED (insn); /* ??? Coddle to md files that generate subregs in post-reload splitters instead of computing the proper hard register. */ if (reload_completed && first != last) { first = NEXT_INSN (first); for (;;) { if (INSN_P (first)) cleanup_subreg_operands (first); if (first == last) break; first = NEXT_INSN (first); } } return last; } /* Split all insns in the function. If UPD_LIFE, update life info after. */ void split_all_insns (int upd_life) { sbitmap blocks; bool changed; basic_block bb; blocks = sbitmap_alloc (last_basic_block); sbitmap_zero (blocks); changed = false; FOR_EACH_BB_REVERSE (bb) { rtx insn, next; bool finish = false; for (insn = BB_HEAD (bb); !finish ; insn = next) { /* Can't use `next_real_insn' because that might go across CODE_LABELS and short-out basic blocks. */ next = NEXT_INSN (insn); finish = (insn == BB_END (bb)); if (INSN_P (insn)) { rtx set = single_set (insn); /* Don't split no-op move insns. These should silently disappear later in final. Splitting such insns would break the code that handles REG_NO_CONFLICT blocks. */ if (set && set_noop_p (set)) { /* Nops get in the way while scheduling, so delete them now if register allocation has already been done. It is too risky to try to do this before register allocation, and there are unlikely to be very many nops then anyways. */ if (reload_completed) { /* If the no-op set has a REG_UNUSED note, we need to update liveness information. */ if (find_reg_note (insn, REG_UNUSED, NULL_RTX)) { SET_BIT (blocks, bb->index); changed = true; } /* ??? Is life info affected by deleting edges? */ delete_insn_and_edges (insn); } } else { rtx last = split_insn (insn); if (last) { /* The split sequence may include barrier, but the BB boundary we are interested in will be set to previous one. */ while (GET_CODE (last) == BARRIER) last = PREV_INSN (last); SET_BIT (blocks, bb->index); changed = true; } } } } } if (changed) { int old_last_basic_block = last_basic_block; find_many_sub_basic_blocks (blocks); if (old_last_basic_block != last_basic_block && upd_life) blocks = sbitmap_resize (blocks, last_basic_block, 1); } if (changed && upd_life) update_life_info (blocks, UPDATE_LIFE_GLOBAL_RM_NOTES, PROP_DEATH_NOTES); #ifdef ENABLE_CHECKING verify_flow_info (); #endif sbitmap_free (blocks); } /* Same as split_all_insns, but do not expect CFG to be available. Used by machine dependent reorg passes. */ void split_all_insns_noflow (void) { rtx next, insn; for (insn = get_insns (); insn; insn = next) { next = NEXT_INSN (insn); if (INSN_P (insn)) { /* Don't split no-op move insns. These should silently disappear later in final. Splitting such insns would break the code that handles REG_NO_CONFLICT blocks. */ rtx set = single_set (insn); if (set && set_noop_p (set)) { /* Nops get in the way while scheduling, so delete them now if register allocation has already been done. It is too risky to try to do this before register allocation, and there are unlikely to be very many nops then anyways. ??? Should we use delete_insn when the CFG isn't valid? */ if (reload_completed) delete_insn_and_edges (insn); } else split_insn (insn); } } } #ifdef HAVE_peephole2 struct peep2_insn_data { rtx insn; regset live_before; }; static struct peep2_insn_data peep2_insn_data[MAX_INSNS_PER_PEEP2 + 1]; static int peep2_current; /* A non-insn marker indicating the last insn of the block. The live_before regset for this element is correct, indicating global_live_at_end for the block. */ #define PEEP2_EOB pc_rtx /* Return the Nth non-note insn after `current', or return NULL_RTX if it does not exist. Used by the recognizer to find the next insn to match in a multi-insn pattern. */ rtx peep2_next_insn (int n) { if (n >= MAX_INSNS_PER_PEEP2 + 1) abort (); n += peep2_current; if (n >= MAX_INSNS_PER_PEEP2 + 1) n -= MAX_INSNS_PER_PEEP2 + 1; if (peep2_insn_data[n].insn == PEEP2_EOB) return NULL_RTX; return peep2_insn_data[n].insn; } /* Return true if REGNO is dead before the Nth non-note insn after `current'. */ int peep2_regno_dead_p (int ofs, int regno) { if (ofs >= MAX_INSNS_PER_PEEP2 + 1) abort (); ofs += peep2_current; if (ofs >= MAX_INSNS_PER_PEEP2 + 1) ofs -= MAX_INSNS_PER_PEEP2 + 1; if (peep2_insn_data[ofs].insn == NULL_RTX) abort (); return ! REGNO_REG_SET_P (peep2_insn_data[ofs].live_before, regno); } /* Similarly for a REG. */ int peep2_reg_dead_p (int ofs, rtx reg) { int regno, n; if (ofs >= MAX_INSNS_PER_PEEP2 + 1) abort (); ofs += peep2_current; if (ofs >= MAX_INSNS_PER_PEEP2 + 1) ofs -= MAX_INSNS_PER_PEEP2 + 1; if (peep2_insn_data[ofs].insn == NULL_RTX) abort (); regno = REGNO (reg); n = hard_regno_nregs[regno][GET_MODE (reg)]; while (--n >= 0) if (REGNO_REG_SET_P (peep2_insn_data[ofs].live_before, regno + n)) return 0; return 1; } /* Try to find a hard register of mode MODE, matching the register class in CLASS_STR, which is available at the beginning of insn CURRENT_INSN and remains available until the end of LAST_INSN. LAST_INSN may be NULL_RTX, in which case the only condition is that the register must be available before CURRENT_INSN. Registers that already have bits set in REG_SET will not be considered. If an appropriate register is available, it will be returned and the corresponding bit(s) in REG_SET will be set; otherwise, NULL_RTX is returned. */ rtx peep2_find_free_register (int from, int to, const char *class_str, enum machine_mode mode, HARD_REG_SET *reg_set) { static int search_ofs; enum reg_class class; HARD_REG_SET live; int i; if (from >= MAX_INSNS_PER_PEEP2 + 1 || to >= MAX_INSNS_PER_PEEP2 + 1) abort (); from += peep2_current; if (from >= MAX_INSNS_PER_PEEP2 + 1) from -= MAX_INSNS_PER_PEEP2 + 1; to += peep2_current; if (to >= MAX_INSNS_PER_PEEP2 + 1) to -= MAX_INSNS_PER_PEEP2 + 1; if (peep2_insn_data[from].insn == NULL_RTX) abort (); REG_SET_TO_HARD_REG_SET (live, peep2_insn_data[from].live_before); while (from != to) { HARD_REG_SET this_live; if (++from >= MAX_INSNS_PER_PEEP2 + 1) from = 0; if (peep2_insn_data[from].insn == NULL_RTX) abort (); REG_SET_TO_HARD_REG_SET (this_live, peep2_insn_data[from].live_before); IOR_HARD_REG_SET (live, this_live); } class = (class_str[0] == 'r' ? GENERAL_REGS : REG_CLASS_FROM_CONSTRAINT (class_str[0], class_str)); for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) { int raw_regno, regno, success, j; /* Distribute the free registers as much as possible. */ raw_regno = search_ofs + i; if (raw_regno >= FIRST_PSEUDO_REGISTER) raw_regno -= FIRST_PSEUDO_REGISTER; #ifdef REG_ALLOC_ORDER regno = reg_alloc_order[raw_regno]; #else regno = raw_regno; #endif /* Don't allocate fixed registers. */ if (fixed_regs[regno]) continue; /* Make sure the register is of the right class. */ if (! TEST_HARD_REG_BIT (reg_class_contents[class], regno)) continue; /* And can support the mode we need. */ if (! HARD_REGNO_MODE_OK (regno, mode)) continue; /* And that we don't create an extra save/restore. */ if (! call_used_regs[regno] && ! regs_ever_live[regno]) continue; /* And we don't clobber traceback for noreturn functions. */ if ((regno == FRAME_POINTER_REGNUM || regno == HARD_FRAME_POINTER_REGNUM) && (! reload_completed || frame_pointer_needed)) continue; success = 1; for (j = hard_regno_nregs[regno][mode] - 1; j >= 0; j--) { if (TEST_HARD_REG_BIT (*reg_set, regno + j) || TEST_HARD_REG_BIT (live, regno + j)) { success = 0; break; } } if (success) { for (j = hard_regno_nregs[regno][mode] - 1; j >= 0; j--) SET_HARD_REG_BIT (*reg_set, regno + j); /* Start the next search with the next register. */ if (++raw_regno >= FIRST_PSEUDO_REGISTER) raw_regno = 0; search_ofs = raw_regno; return gen_rtx_REG (mode, regno); } } search_ofs = 0; return NULL_RTX; } /* Perform the peephole2 optimization pass. */ void peephole2_optimize (FILE *dump_file ATTRIBUTE_UNUSED) { regset_head rs_heads[MAX_INSNS_PER_PEEP2 + 2]; rtx insn, prev; regset live; int i; basic_block bb; #ifdef HAVE_conditional_execution sbitmap blocks; bool changed; #endif bool do_cleanup_cfg = false; bool do_rebuild_jump_labels = false; /* Initialize the regsets we're going to use. */ for (i = 0; i < MAX_INSNS_PER_PEEP2 + 1; ++i) peep2_insn_data[i].live_before = INITIALIZE_REG_SET (rs_heads[i]); live = INITIALIZE_REG_SET (rs_heads[i]); #ifdef HAVE_conditional_execution blocks = sbitmap_alloc (last_basic_block); sbitmap_zero (blocks); changed = false; #else count_or_remove_death_notes (NULL, 1); #endif FOR_EACH_BB_REVERSE (bb) { struct propagate_block_info *pbi; /* Indicate that all slots except the last holds invalid data. */ for (i = 0; i < MAX_INSNS_PER_PEEP2; ++i) peep2_insn_data[i].insn = NULL_RTX; /* Indicate that the last slot contains live_after data. */ peep2_insn_data[MAX_INSNS_PER_PEEP2].insn = PEEP2_EOB; peep2_current = MAX_INSNS_PER_PEEP2; /* Start up propagation. */ COPY_REG_SET (live, bb->global_live_at_end); COPY_REG_SET (peep2_insn_data[MAX_INSNS_PER_PEEP2].live_before, live); #ifdef HAVE_conditional_execution pbi = init_propagate_block_info (bb, live, NULL, NULL, 0); #else pbi = init_propagate_block_info (bb, live, NULL, NULL, PROP_DEATH_NOTES); #endif for (insn = BB_END (bb); ; insn = prev) { prev = PREV_INSN (insn); if (INSN_P (insn)) { rtx try, before_try, x; int match_len; rtx note; bool was_call = false; /* Record this insn. */ if (--peep2_current < 0) peep2_current = MAX_INSNS_PER_PEEP2; peep2_insn_data[peep2_current].insn = insn; propagate_one_insn (pbi, insn); COPY_REG_SET (peep2_insn_data[peep2_current].live_before, live); /* Match the peephole. */ try = peephole2_insns (PATTERN (insn), insn, &match_len); if (try != NULL) { /* If we are splitting a CALL_INSN, look for the CALL_INSN in SEQ and copy our CALL_INSN_FUNCTION_USAGE and other cfg-related call notes. */ for (i = 0; i <= match_len; ++i) { int j; rtx old_insn, new_insn, note; j = i + peep2_current; if (j >= MAX_INSNS_PER_PEEP2 + 1) j -= MAX_INSNS_PER_PEEP2 + 1; old_insn = peep2_insn_data[j].insn; if (GET_CODE (old_insn) != CALL_INSN) continue; was_call = true; new_insn = try; while (new_insn != NULL_RTX) { if (GET_CODE (new_insn) == CALL_INSN) break; new_insn = NEXT_INSN (new_insn); } if (new_insn == NULL_RTX) abort (); CALL_INSN_FUNCTION_USAGE (new_insn) = CALL_INSN_FUNCTION_USAGE (old_insn); for (note = REG_NOTES (old_insn); note; note = XEXP (note, 1)) switch (REG_NOTE_KIND (note)) { case REG_NORETURN: case REG_SETJMP: case REG_ALWAYS_RETURN: REG_NOTES (new_insn) = gen_rtx_EXPR_LIST (REG_NOTE_KIND (note), XEXP (note, 0), REG_NOTES (new_insn)); default: /* Discard all other reg notes. */ break; } /* Croak if there is another call in the sequence. */ while (++i <= match_len) { j = i + peep2_current; if (j >= MAX_INSNS_PER_PEEP2 + 1) j -= MAX_INSNS_PER_PEEP2 + 1; old_insn = peep2_insn_data[j].insn; if (GET_CODE (old_insn) == CALL_INSN) abort (); } break; } i = match_len + peep2_current; if (i >= MAX_INSNS_PER_PEEP2 + 1) i -= MAX_INSNS_PER_PEEP2 + 1; note = find_reg_note (peep2_insn_data[i].insn, REG_EH_REGION, NULL_RTX); /* Replace the old sequence with the new. */ try = emit_insn_after_setloc (try, peep2_insn_data[i].insn, INSN_LOCATOR (peep2_insn_data[i].insn)); before_try = PREV_INSN (insn); delete_insn_chain (insn, peep2_insn_data[i].insn); /* Re-insert the EH_REGION notes. */ if (note || (was_call && nonlocal_goto_handler_labels)) { edge eh_edge; for (eh_edge = bb->succ; eh_edge ; eh_edge = eh_edge->succ_next) if (eh_edge->flags & (EDGE_EH | EDGE_ABNORMAL_CALL)) break; for (x = try ; x != before_try ; x = PREV_INSN (x)) if (GET_CODE (x) == CALL_INSN || (flag_non_call_exceptions && may_trap_p (PATTERN (x)) && !find_reg_note (x, REG_EH_REGION, NULL))) { if (note) REG_NOTES (x) = gen_rtx_EXPR_LIST (REG_EH_REGION, XEXP (note, 0), REG_NOTES (x)); if (x != BB_END (bb) && eh_edge) { edge nfte, nehe; int flags; nfte = split_block (bb, x); flags = (eh_edge->flags & (EDGE_EH | EDGE_ABNORMAL)); if (GET_CODE (x) == CALL_INSN) flags |= EDGE_ABNORMAL_CALL; nehe = make_edge (nfte->src, eh_edge->dest, flags); nehe->probability = eh_edge->probability; nfte->probability = REG_BR_PROB_BASE - nehe->probability; do_cleanup_cfg |= purge_dead_edges (nfte->dest); #ifdef HAVE_conditional_execution SET_BIT (blocks, nfte->dest->index); changed = true; #endif bb = nfte->src; eh_edge = nehe; } } /* Converting possibly trapping insn to non-trapping is possible. Zap dummy outgoing edges. */ do_cleanup_cfg |= purge_dead_edges (bb); } #ifdef HAVE_conditional_execution /* With conditional execution, we cannot back up the live information so easily, since the conditional death data structures are not so self-contained. So record that we've made a modification to this block and update life information at the end. */ SET_BIT (blocks, bb->index); changed = true; for (i = 0; i < MAX_INSNS_PER_PEEP2 + 1; ++i) peep2_insn_data[i].insn = NULL_RTX; peep2_insn_data[peep2_current].insn = PEEP2_EOB; #else /* Back up lifetime information past the end of the newly created sequence. */ if (++i >= MAX_INSNS_PER_PEEP2 + 1) i = 0; COPY_REG_SET (live, peep2_insn_data[i].live_before); /* Update life information for the new sequence. */ x = try; do { if (INSN_P (x)) { if (--i < 0) i = MAX_INSNS_PER_PEEP2; peep2_insn_data[i].insn = x; propagate_one_insn (pbi, x); COPY_REG_SET (peep2_insn_data[i].live_before, live); } x = PREV_INSN (x); } while (x != prev); /* ??? Should verify that LIVE now matches what we had before the new sequence. */ peep2_current = i; #endif /* If we generated a jump instruction, it won't have JUMP_LABEL set. Recompute after we're done. */ for (x = try; x != before_try; x = PREV_INSN (x)) if (GET_CODE (x) == JUMP_INSN) { do_rebuild_jump_labels = true; break; } } } if (insn == BB_HEAD (bb)) break; } free_propagate_block_info (pbi); } for (i = 0; i < MAX_INSNS_PER_PEEP2 + 1; ++i) FREE_REG_SET (peep2_insn_data[i].live_before); FREE_REG_SET (live); if (do_rebuild_jump_labels) rebuild_jump_labels (get_insns ()); /* If we eliminated EH edges, we may be able to merge blocks. Further, we've changed global life since exception handlers are no longer reachable. */ if (do_cleanup_cfg) { cleanup_cfg (0); update_life_info (0, UPDATE_LIFE_GLOBAL_RM_NOTES, PROP_DEATH_NOTES); } #ifdef HAVE_conditional_execution else { count_or_remove_death_notes (blocks, 1); update_life_info (blocks, UPDATE_LIFE_LOCAL, PROP_DEATH_NOTES); } sbitmap_free (blocks); #endif } #endif /* HAVE_peephole2 */ /* Common predicates for use with define_bypass. */ /* True if the dependency between OUT_INSN and IN_INSN is on the store data not the address operand(s) of the store. IN_INSN must be single_set. OUT_INSN must be either a single_set or a PARALLEL with SETs inside. */ int store_data_bypass_p (rtx out_insn, rtx in_insn) { rtx out_set, in_set; in_set = single_set (in_insn); if (! in_set) abort (); if (!MEM_P (SET_DEST (in_set))) return false; out_set = single_set (out_insn); if (out_set) { if (reg_mentioned_p (SET_DEST (out_set), SET_DEST (in_set))) return false; } else { rtx out_pat; int i; out_pat = PATTERN (out_insn); if (GET_CODE (out_pat) != PARALLEL) abort (); for (i = 0; i < XVECLEN (out_pat, 0); i++) { rtx exp = XVECEXP (out_pat, 0, i); if (GET_CODE (exp) == CLOBBER) continue; if (GET_CODE (exp) != SET) abort (); if (reg_mentioned_p (SET_DEST (exp), SET_DEST (in_set))) return false; } } return true; } /* True if the dependency between OUT_INSN and IN_INSN is in the IF_THEN_ELSE condition, and not the THEN or ELSE branch. OUT_INSN may be either a single or multiple set; IN_INSN should be single_set for truth, but for convenience of insn categorization may be any JUMP or CALL insn. */ int if_test_bypass_p (rtx out_insn, rtx in_insn) { rtx out_set, in_set; in_set = single_set (in_insn); if (! in_set) { if (GET_CODE (in_insn) == JUMP_INSN || GET_CODE (in_insn) == CALL_INSN) return false; abort (); } if (GET_CODE (SET_SRC (in_set)) != IF_THEN_ELSE) return false; in_set = SET_SRC (in_set); out_set = single_set (out_insn); if (out_set) { if (reg_mentioned_p (SET_DEST (out_set), XEXP (in_set, 1)) || reg_mentioned_p (SET_DEST (out_set), XEXP (in_set, 2))) return false; } else { rtx out_pat; int i; out_pat = PATTERN (out_insn); if (GET_CODE (out_pat) != PARALLEL) abort (); for (i = 0; i < XVECLEN (out_pat, 0); i++) { rtx exp = XVECEXP (out_pat, 0, i); if (GET_CODE (exp) == CLOBBER) continue; if (GET_CODE (exp) != SET) abort (); if (reg_mentioned_p (SET_DEST (out_set), XEXP (in_set, 1)) || reg_mentioned_p (SET_DEST (out_set), XEXP (in_set, 2))) return false; } } return true; } /* Register to Stack convert for GNU compiler. Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This pass converts stack-like registers from the "flat register file" model that gcc uses, to a stack convention that the 387 uses. * The form of the input: On input, the function consists of insn that have had their registers fully allocated to a set of "virtual" registers. Note that the word "virtual" is used differently here than elsewhere in gcc: for each virtual stack reg, there is a hard reg, but the mapping between them is not known until this pass is run. On output, hard register numbers have been substituted, and various pop and exchange insns have been emitted. The hard register numbers and the virtual register numbers completely overlap - before this pass, all stack register numbers are virtual, and afterward they are all hard. The virtual registers can be manipulated normally by gcc, and their semantics are the same as for normal registers. After the hard register numbers are substituted, the semantics of an insn containing stack-like regs are not the same as for an insn with normal regs: for instance, it is not safe to delete an insn that appears to be a no-op move. In general, no insn containing hard regs should be changed after this pass is done. * The form of the output: After this pass, hard register numbers represent the distance from the current top of stack to the desired register. A reference to FIRST_STACK_REG references the top of stack, FIRST_STACK_REG + 1, represents the register just below that, and so forth. Also, REG_DEAD notes indicate whether or not a stack register should be popped. A "swap" insn looks like a parallel of two patterns, where each pattern is a SET: one sets A to B, the other B to A. A "push" or "load" insn is a SET whose SET_DEST is FIRST_STACK_REG and whose SET_DEST is REG or MEM. Any other SET_DEST, such as PLUS, will replace the existing stack top, not push a new value. A store insn is a SET whose SET_DEST is FIRST_STACK_REG, and whose SET_SRC is REG or MEM. The case where the SET_SRC and SET_DEST are both FIRST_STACK_REG appears ambiguous. As a special case, the presence of a REG_DEAD note for FIRST_STACK_REG differentiates between a load insn and a pop. If a REG_DEAD is present, the insn represents a "pop" that discards the top of the register stack. If there is no REG_DEAD note, then the insn represents a "dup" or a push of the current top of stack onto the stack. * Methodology: Existing REG_DEAD and REG_UNUSED notes for stack registers are deleted and recreated from scratch. REG_DEAD is never created for a SET_DEST, only REG_UNUSED. * asm_operands: There are several rules on the usage of stack-like regs in asm_operands insns. These rules apply only to the operands that are stack-like regs: 1. Given a set of input regs that die in an asm_operands, it is necessary to know which are implicitly popped by the asm, and which must be explicitly popped by gcc. An input reg that is implicitly popped by the asm must be explicitly clobbered, unless it is constrained to match an output operand. 2. For any input reg that is implicitly popped by an asm, it is necessary to know how to adjust the stack to compensate for the pop. If any non-popped input is closer to the top of the reg-stack than the implicitly popped reg, it would not be possible to know what the stack looked like - it's not clear how the rest of the stack "slides up". All implicitly popped input regs must be closer to the top of the reg-stack than any input that is not implicitly popped. 3. It is possible that if an input dies in an insn, reload might use the input reg for an output reload. Consider this example: asm ("foo" : "=t" (a) : "f" (b)); This asm says that input B is not popped by the asm, and that the asm pushes a result onto the reg-stack, ie, the stack is one deeper after the asm than it was before. But, it is possible that reload will think that it can use the same reg for both the input and the output, if input B dies in this insn. If any input operand uses the "f" constraint, all output reg constraints must use the "&" earlyclobber. The asm above would be written as asm ("foo" : "=&t" (a) : "f" (b)); 4. Some operands need to be in particular places on the stack. All output operands fall in this category - there is no other way to know which regs the outputs appear in unless the user indicates this in the constraints. Output operands must specifically indicate which reg an output appears in after an asm. "=f" is not allowed: the operand constraints must select a class with a single reg. 5. Output operands may not be "inserted" between existing stack regs. Since no 387 opcode uses a read/write operand, all output operands are dead before the asm_operands, and are pushed by the asm_operands. It makes no sense to push anywhere but the top of the reg-stack. Output operands must start at the top of the reg-stack: output operands may not "skip" a reg. 6. Some asm statements may need extra stack space for internal calculations. This can be guaranteed by clobbering stack registers unrelated to the inputs and outputs. Here are a couple of reasonable asms to want to write. This asm takes one input, which is internally popped, and produces two outputs. asm ("fsincos" : "=t" (cos), "=u" (sin) : "0" (inp)); This asm takes two inputs, which are popped by the fyl2xp1 opcode, and replaces them with one output. The user must code the "st(1)" clobber for reg-stack.c to know that fyl2xp1 pops both inputs. asm ("fyl2xp1" : "=t" (result) : "0" (x), "u" (y) : "st(1)"); */ /* We use this array to cache info about insns, because otherwise we spend too much time in stack_regs_mentioned_p. Indexed by insn UIDs. A value of zero is uninitialized, one indicates the insn uses stack registers, two indicates the insn does not use stack registers. */ static GTY(()) varray_type stack_regs_mentioned_data; #ifdef STACK_REGS #define REG_STACK_SIZE (LAST_STACK_REG - FIRST_STACK_REG + 1) /* This is the basic stack record. TOP is an index into REG[] such that REG[TOP] is the top of stack. If TOP is -1 the stack is empty. If TOP is -2, REG[] is not yet initialized. Stack initialization consists of placing each live reg in array `reg' and setting `top' appropriately. REG_SET indicates which registers are live. */ typedef struct stack_def { int top; /* index to top stack element */ HARD_REG_SET reg_set; /* set of live registers */ unsigned char reg[REG_STACK_SIZE];/* register - stack mapping */ } *stack; /* This is used to carry information about basic blocks. It is attached to the AUX field of the standard CFG block. */ typedef struct r2s_block_info_def { struct stack_def stack_in; /* Input stack configuration. */ struct stack_def stack_out; /* Output stack configuration. */ HARD_REG_SET out_reg_set; /* Stack regs live on output. */ int done; /* True if block already converted. */ int predecessors; /* Number of predecessors that needs to be visited. */ } *r2s_block_info; #define R2S_BLOCK_INFO(B) ((r2s_block_info) (B)->aux) /* Passed to change_stack to indicate where to emit insns. */ enum emit_where { EMIT_AFTER, EMIT_BEFORE }; /* The block we're currently working on. */ static basic_block current_block; /* This is the register file for all register after conversion. */ static rtx FP_mode_reg[LAST_STACK_REG+1-FIRST_STACK_REG][(int) MAX_MACHINE_MODE]; #define FP_MODE_REG(regno,mode) \ (FP_mode_reg[(regno)-FIRST_STACK_REG][(int) (mode)]) /* Used to initialize uninitialized registers. */ static rtx not_a_num; /* Forward declarations */ static int stack_regs_mentioned_p (rtx pat); static void straighten_stack (rtx, stack); static void pop_stack (stack, int); static rtx *get_true_reg (rtx *); static int check_asm_stack_operands (rtx); static int get_asm_operand_n_inputs (rtx); static rtx stack_result (tree); static void replace_reg (rtx *, int); static void remove_regno_note (rtx, enum reg_note, unsigned int); static int get_hard_regnum (stack, rtx); static rtx emit_pop_insn (rtx, stack, rtx, enum emit_where); static void emit_swap_insn (rtx, stack, rtx); static void swap_to_top(rtx, stack, rtx, rtx); static bool move_for_stack_reg (rtx, stack, rtx); static int swap_rtx_condition_1 (rtx); static int swap_rtx_condition (rtx); static void compare_for_stack_reg (rtx, stack, rtx); static bool subst_stack_regs_pat (rtx, stack, rtx); static void subst_asm_stack_regs (rtx, stack); static bool subst_stack_regs (rtx, stack); static void change_stack (rtx, stack, stack, enum emit_where); static int convert_regs_entry (void); static void convert_regs_exit (void); static int convert_regs_1 (FILE *, basic_block); static int convert_regs_2 (FILE *, basic_block); static int convert_regs (FILE *); static void print_stack (FILE *, stack); static rtx next_flags_user (rtx); static void record_label_references (rtx, rtx); static bool compensate_edge (edge, FILE *); /* Return nonzero if any stack register is mentioned somewhere within PAT. */ static int stack_regs_mentioned_p (rtx pat) { const char *fmt; int i; if (STACK_REG_P (pat)) return 1; fmt = GET_RTX_FORMAT (GET_CODE (pat)); for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--) { if (fmt[i] == 'E') { int j; for (j = XVECLEN (pat, i) - 1; j >= 0; j--) if (stack_regs_mentioned_p (XVECEXP (pat, i, j))) return 1; } else if (fmt[i] == 'e' && stack_regs_mentioned_p (XEXP (pat, i))) return 1; } return 0; } /* Return nonzero if INSN mentions stacked registers, else return zero. */ int stack_regs_mentioned (rtx insn) { unsigned int uid, max; int test; if (! INSN_P (insn) || !stack_regs_mentioned_data) return 0; uid = INSN_UID (insn); max = VARRAY_SIZE (stack_regs_mentioned_data); if (uid >= max) { /* Allocate some extra size to avoid too many reallocs, but do not grow too quickly. */ max = uid + uid / 20; VARRAY_GROW (stack_regs_mentioned_data, max); } test = VARRAY_CHAR (stack_regs_mentioned_data, uid); if (test == 0) { /* This insn has yet to be examined. Do so now. */ test = stack_regs_mentioned_p (PATTERN (insn)) ? 1 : 2; VARRAY_CHAR (stack_regs_mentioned_data, uid) = test; } return test == 1; } static rtx ix86_flags_rtx; static rtx next_flags_user (rtx insn) { /* Search forward looking for the first use of this value. Stop at block boundaries. */ while (insn != BB_END (current_block)) { insn = NEXT_INSN (insn); if (INSN_P (insn) && reg_mentioned_p (ix86_flags_rtx, PATTERN (insn))) return insn; if (GET_CODE (insn) == CALL_INSN) return NULL_RTX; } return NULL_RTX; } /* Reorganize the stack into ascending numbers, after this insn. */ static void straighten_stack (rtx insn, stack regstack) { struct stack_def temp_stack; int top; /* If there is only a single register on the stack, then the stack is already in increasing order and no reorganization is needed. Similarly if the stack is empty. */ if (regstack->top <= 0) return; COPY_HARD_REG_SET (temp_stack.reg_set, regstack->reg_set); for (top = temp_stack.top = regstack->top; top >= 0; top--) temp_stack.reg[top] = FIRST_STACK_REG + temp_stack.top - top; change_stack (insn, regstack, &temp_stack, EMIT_AFTER); } /* Pop a register from the stack. */ static void pop_stack (stack regstack, int regno) { int top = regstack->top; CLEAR_HARD_REG_BIT (regstack->reg_set, regno); regstack->top--; /* If regno was not at the top of stack then adjust stack. */ if (regstack->reg [top] != regno) { int i; for (i = regstack->top; i >= 0; i--) if (regstack->reg [i] == regno) { int j; for (j = i; j < top; j++) regstack->reg [j] = regstack->reg [j + 1]; break; } } } /* Convert register usage from "flat" register file usage to a "stack register file. FILE is the dump file, if used. Construct a CFG and run life analysis. Then convert each insn one by one. Run a last cleanup_cfg pass, if optimizing, to eliminate code duplication created when the converter inserts pop insns on the edges. */ bool reg_to_stack (FILE *file) { basic_block bb; int i; int max_uid; /* Clean up previous run. */ stack_regs_mentioned_data = 0; /* See if there is something to do. Flow analysis is quite expensive so we might save some compilation time. */ for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++) if (regs_ever_live[i]) break; if (i > LAST_STACK_REG) return false; /* Ok, floating point instructions exist. If not optimizing, build the CFG and run life analysis. Also need to rebuild life when superblock scheduling is done as it don't update liveness yet. */ if (!optimize || (flag_sched2_use_superblocks && flag_schedule_insns_after_reload)) { count_or_remove_death_notes (NULL, 1); life_analysis (file, PROP_DEATH_NOTES); } mark_dfs_back_edges (); /* Set up block info for each basic block. */ alloc_aux_for_blocks (sizeof (struct r2s_block_info_def)); FOR_EACH_BB_REVERSE (bb) { edge e; for (e = bb->pred; e; e = e->pred_next) if (!(e->flags & EDGE_DFS_BACK) && e->src != ENTRY_BLOCK_PTR) R2S_BLOCK_INFO (bb)->predecessors++; } /* Create the replacement registers up front. */ for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++) { enum machine_mode mode; for (mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT); mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode)) FP_MODE_REG (i, mode) = gen_rtx_REG (mode, i); for (mode = GET_CLASS_NARROWEST_MODE (MODE_COMPLEX_FLOAT); mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode)) FP_MODE_REG (i, mode) = gen_rtx_REG (mode, i); } ix86_flags_rtx = gen_rtx_REG (CCmode, FLAGS_REG); /* A QNaN for initializing uninitialized variables. ??? We can't load from constant memory in PIC mode, because we're inserting these instructions before the prologue and the PIC register hasn't been set up. In that case, fall back on zero, which we can get from `ldz'. */ if (flag_pic) not_a_num = CONST0_RTX (SFmode); else { not_a_num = gen_lowpart (SFmode, GEN_INT (0x7fc00000)); not_a_num = force_const_mem (SFmode, not_a_num); } /* Allocate a cache for stack_regs_mentioned. */ max_uid = get_max_uid (); VARRAY_CHAR_INIT (stack_regs_mentioned_data, max_uid + 1, "stack_regs_mentioned cache"); convert_regs (file); free_aux_for_blocks (); return true; } /* Check PAT, which is in INSN, for LABEL_REFs. Add INSN to the label's chain of references, and note which insn contains each reference. */ static void record_label_references (rtx insn, rtx pat) { enum rtx_code code = GET_CODE (pat); int i; const char *fmt; if (code == LABEL_REF) { rtx label = XEXP (pat, 0); rtx ref; if (GET_CODE (label) != CODE_LABEL) abort (); /* If this is an undefined label, LABEL_REFS (label) contains garbage. */ if (INSN_UID (label) == 0) return; /* Don't make a duplicate in the code_label's chain. */ for (ref = LABEL_REFS (label); ref && ref != label; ref = LABEL_NEXTREF (ref)) if (CONTAINING_INSN (ref) == insn) return; CONTAINING_INSN (pat) = insn; LABEL_NEXTREF (pat) = LABEL_REFS (label); LABEL_REFS (label) = pat; return; } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') record_label_references (insn, XEXP (pat, i)); if (fmt[i] == 'E') { int j; for (j = 0; j < XVECLEN (pat, i); j++) record_label_references (insn, XVECEXP (pat, i, j)); } } } /* Return a pointer to the REG expression within PAT. If PAT is not a REG, possible enclosed by a conversion rtx, return the inner part of PAT that stopped the search. */ static rtx * get_true_reg (rtx *pat) { for (;;) switch (GET_CODE (*pat)) { case SUBREG: /* Eliminate FP subregister accesses in favor of the actual FP register in use. */ { rtx subreg; if (FP_REG_P (subreg = SUBREG_REG (*pat))) { int regno_off = subreg_regno_offset (REGNO (subreg), GET_MODE (subreg), SUBREG_BYTE (*pat), GET_MODE (*pat)); *pat = FP_MODE_REG (REGNO (subreg) + regno_off, GET_MODE (subreg)); default: return pat; } } case FLOAT: case FIX: case FLOAT_EXTEND: pat = & XEXP (*pat, 0); break; case FLOAT_TRUNCATE: if (!flag_unsafe_math_optimizations) return pat; pat = & XEXP (*pat, 0); break; } } /* Set if we find any malformed asms in a block. */ static bool any_malformed_asm; /* There are many rules that an asm statement for stack-like regs must follow. Those rules are explained at the top of this file: the rule numbers below refer to that explanation. */ static int check_asm_stack_operands (rtx insn) { int i; int n_clobbers; int malformed_asm = 0; rtx body = PATTERN (insn); char reg_used_as_output[FIRST_PSEUDO_REGISTER]; char implicitly_dies[FIRST_PSEUDO_REGISTER]; int alt; rtx *clobber_reg = 0; int n_inputs, n_outputs; /* Find out what the constraints require. If no constraint alternative matches, this asm is malformed. */ extract_insn (insn); constrain_operands (1); alt = which_alternative; preprocess_constraints (); n_inputs = get_asm_operand_n_inputs (body); n_outputs = recog_data.n_operands - n_inputs; if (alt < 0) { malformed_asm = 1; /* Avoid further trouble with this insn. */ PATTERN (insn) = gen_rtx_USE (VOIDmode, const0_rtx); return 0; } /* Strip SUBREGs here to make the following code simpler. */ for (i = 0; i < recog_data.n_operands; i++) if (GET_CODE (recog_data.operand[i]) == SUBREG && REG_P (SUBREG_REG (recog_data.operand[i]))) recog_data.operand[i] = SUBREG_REG (recog_data.operand[i]); /* Set up CLOBBER_REG. */ n_clobbers = 0; if (GET_CODE (body) == PARALLEL) { clobber_reg = alloca (XVECLEN (body, 0) * sizeof (rtx)); for (i = 0; i < XVECLEN (body, 0); i++) if (GET_CODE (XVECEXP (body, 0, i)) == CLOBBER) { rtx clobber = XVECEXP (body, 0, i); rtx reg = XEXP (clobber, 0); if (GET_CODE (reg) == SUBREG && REG_P (SUBREG_REG (reg))) reg = SUBREG_REG (reg); if (STACK_REG_P (reg)) { clobber_reg[n_clobbers] = reg; n_clobbers++; } } } /* Enforce rule #4: Output operands must specifically indicate which reg an output appears in after an asm. "=f" is not allowed: the operand constraints must select a class with a single reg. Also enforce rule #5: Output operands must start at the top of the reg-stack: output operands may not "skip" a reg. */ memset (reg_used_as_output, 0, sizeof (reg_used_as_output)); for (i = 0; i < n_outputs; i++) if (STACK_REG_P (recog_data.operand[i])) { if (reg_class_size[(int) recog_op_alt[i][alt].class] != 1) { error_for_asm (insn, "output constraint %d must specify a single register", i); malformed_asm = 1; } else { int j; for (j = 0; j < n_clobbers; j++) if (REGNO (recog_data.operand[i]) == REGNO (clobber_reg[j])) { error_for_asm (insn, "output constraint %d cannot be specified together with \"%s\" clobber", i, reg_names [REGNO (clobber_reg[j])]); malformed_asm = 1; break; } if (j == n_clobbers) reg_used_as_output[REGNO (recog_data.operand[i])] = 1; } } /* Search for first non-popped reg. */ for (i = FIRST_STACK_REG; i < LAST_STACK_REG + 1; i++) if (! reg_used_as_output[i]) break; /* If there are any other popped regs, that's an error. */ for (; i < LAST_STACK_REG + 1; i++) if (reg_used_as_output[i]) break; if (i != LAST_STACK_REG + 1) { error_for_asm (insn, "output regs must be grouped at top of stack"); malformed_asm = 1; } /* Enforce rule #2: All implicitly popped input regs must be closer to the top of the reg-stack than any input that is not implicitly popped. */ memset (implicitly_dies, 0, sizeof (implicitly_dies)); for (i = n_outputs; i < n_outputs + n_inputs; i++) if (STACK_REG_P (recog_data.operand[i])) { /* An input reg is implicitly popped if it is tied to an output, or if there is a CLOBBER for it. */ int j; for (j = 0; j < n_clobbers; j++) if (operands_match_p (clobber_reg[j], recog_data.operand[i])) break; if (j < n_clobbers || recog_op_alt[i][alt].matches >= 0) implicitly_dies[REGNO (recog_data.operand[i])] = 1; } /* Search for first non-popped reg. */ for (i = FIRST_STACK_REG; i < LAST_STACK_REG + 1; i++) if (! implicitly_dies[i]) break; /* If there are any other popped regs, that's an error. */ for (; i < LAST_STACK_REG + 1; i++) if (implicitly_dies[i]) break; if (i != LAST_STACK_REG + 1) { error_for_asm (insn, "implicitly popped regs must be grouped at top of stack"); malformed_asm = 1; } /* Enforce rule #3: If any input operand uses the "f" constraint, all output constraints must use the "&" earlyclobber. ??? Detect this more deterministically by having constrain_asm_operands record any earlyclobber. */ for (i = n_outputs; i < n_outputs + n_inputs; i++) if (recog_op_alt[i][alt].matches == -1) { int j; for (j = 0; j < n_outputs; j++) if (operands_match_p (recog_data.operand[j], recog_data.operand[i])) { error_for_asm (insn, "output operand %d must use `&' constraint", j); malformed_asm = 1; } } if (malformed_asm) { /* Avoid further trouble with this insn. */ PATTERN (insn) = gen_rtx_USE (VOIDmode, const0_rtx); any_malformed_asm = true; return 0; } return 1; } /* Calculate the number of inputs and outputs in BODY, an asm_operands. N_OPERANDS is the total number of operands, and N_INPUTS and N_OUTPUTS are pointers to ints into which the results are placed. */ static int get_asm_operand_n_inputs (rtx body) { if (GET_CODE (body) == SET && GET_CODE (SET_SRC (body)) == ASM_OPERANDS) return ASM_OPERANDS_INPUT_LENGTH (SET_SRC (body)); else if (GET_CODE (body) == ASM_OPERANDS) return ASM_OPERANDS_INPUT_LENGTH (body); else if (GET_CODE (body) == PARALLEL && GET_CODE (XVECEXP (body, 0, 0)) == SET) return ASM_OPERANDS_INPUT_LENGTH (SET_SRC (XVECEXP (body, 0, 0))); else if (GET_CODE (body) == PARALLEL && GET_CODE (XVECEXP (body, 0, 0)) == ASM_OPERANDS) return ASM_OPERANDS_INPUT_LENGTH (XVECEXP (body, 0, 0)); abort (); } /* If current function returns its result in an fp stack register, return the REG. Otherwise, return 0. */ static rtx stack_result (tree decl) { rtx result; /* If the value is supposed to be returned in memory, then clearly it is not returned in a stack register. */ if (aggregate_value_p (DECL_RESULT (decl), decl)) return 0; result = DECL_RTL_IF_SET (DECL_RESULT (decl)); if (result != 0) { #ifdef FUNCTION_OUTGOING_VALUE result = FUNCTION_OUTGOING_VALUE (TREE_TYPE (DECL_RESULT (decl)), decl); #else result = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (decl)), decl); #endif } return result != 0 && STACK_REG_P (result) ? result : 0; } /* * This section deals with stack register substitution, and forms the second * pass over the RTL. */ /* Replace REG, which is a pointer to a stack reg RTX, with an RTX for the desired hard REGNO. */ static void replace_reg (rtx *reg, int regno) { if (regno < FIRST_STACK_REG || regno > LAST_STACK_REG || ! STACK_REG_P (*reg)) abort (); switch (GET_MODE_CLASS (GET_MODE (*reg))) { default: abort (); case MODE_FLOAT: case MODE_COMPLEX_FLOAT:; } *reg = FP_MODE_REG (regno, GET_MODE (*reg)); } /* Remove a note of type NOTE, which must be found, for register number REGNO from INSN. Remove only one such note. */ static void remove_regno_note (rtx insn, enum reg_note note, unsigned int regno) { rtx *note_link, this; note_link = ®_NOTES (insn); for (this = *note_link; this; this = XEXP (this, 1)) if (REG_NOTE_KIND (this) == note && REG_P (XEXP (this, 0)) && REGNO (XEXP (this, 0)) == regno) { *note_link = XEXP (this, 1); return; } else note_link = &XEXP (this, 1); abort (); } /* Find the hard register number of virtual register REG in REGSTACK. The hard register number is relative to the top of the stack. -1 is returned if the register is not found. */ static int get_hard_regnum (stack regstack, rtx reg) { int i; if (! STACK_REG_P (reg)) abort (); for (i = regstack->top; i >= 0; i--) if (regstack->reg[i] == REGNO (reg)) break; return i >= 0 ? (FIRST_STACK_REG + regstack->top - i) : -1; } /* Emit an insn to pop virtual register REG before or after INSN. REGSTACK is the stack state after INSN and is updated to reflect this pop. WHEN is either emit_insn_before or emit_insn_after. A pop insn is represented as a SET whose destination is the register to be popped and source is the top of stack. A death note for the top of stack cases the movdf pattern to pop. */ static rtx emit_pop_insn (rtx insn, stack regstack, rtx reg, enum emit_where where) { rtx pop_insn, pop_rtx; int hard_regno; /* For complex types take care to pop both halves. These may survive in CLOBBER and USE expressions. */ if (COMPLEX_MODE_P (GET_MODE (reg))) { rtx reg1 = FP_MODE_REG (REGNO (reg), DFmode); rtx reg2 = FP_MODE_REG (REGNO (reg) + 1, DFmode); pop_insn = NULL_RTX; if (get_hard_regnum (regstack, reg1) >= 0) pop_insn = emit_pop_insn (insn, regstack, reg1, where); if (get_hard_regnum (regstack, reg2) >= 0) pop_insn = emit_pop_insn (insn, regstack, reg2, where); if (!pop_insn) abort (); return pop_insn; } hard_regno = get_hard_regnum (regstack, reg); if (hard_regno < FIRST_STACK_REG) abort (); pop_rtx = gen_rtx_SET (VOIDmode, FP_MODE_REG (hard_regno, DFmode), FP_MODE_REG (FIRST_STACK_REG, DFmode)); if (where == EMIT_AFTER) pop_insn = emit_insn_after (pop_rtx, insn); else pop_insn = emit_insn_before (pop_rtx, insn); REG_NOTES (pop_insn) = gen_rtx_EXPR_LIST (REG_DEAD, FP_MODE_REG (FIRST_STACK_REG, DFmode), REG_NOTES (pop_insn)); regstack->reg[regstack->top - (hard_regno - FIRST_STACK_REG)] = regstack->reg[regstack->top]; regstack->top -= 1; CLEAR_HARD_REG_BIT (regstack->reg_set, REGNO (reg)); return pop_insn; } /* Emit an insn before or after INSN to swap virtual register REG with the top of stack. REGSTACK is the stack state before the swap, and is updated to reflect the swap. A swap insn is represented as a PARALLEL of two patterns: each pattern moves one reg to the other. If REG is already at the top of the stack, no insn is emitted. */ static void emit_swap_insn (rtx insn, stack regstack, rtx reg) { int hard_regno; rtx swap_rtx; int tmp, other_reg; /* swap regno temps */ rtx i1; /* the stack-reg insn prior to INSN */ rtx i1set = NULL_RTX; /* the SET rtx within I1 */ hard_regno = get_hard_regnum (regstack, reg); if (hard_regno < FIRST_STACK_REG) abort (); if (hard_regno == FIRST_STACK_REG) return; other_reg = regstack->top - (hard_regno - FIRST_STACK_REG); tmp = regstack->reg[other_reg]; regstack->reg[other_reg] = regstack->reg[regstack->top]; regstack->reg[regstack->top] = tmp; /* Find the previous insn involving stack regs, but don't pass a block boundary. */ i1 = NULL; if (current_block && insn != BB_HEAD (current_block)) { rtx tmp = PREV_INSN (insn); rtx limit = PREV_INSN (BB_HEAD (current_block)); while (tmp != limit) { if (GET_CODE (tmp) == CODE_LABEL || GET_CODE (tmp) == CALL_INSN || NOTE_INSN_BASIC_BLOCK_P (tmp) || (GET_CODE (tmp) == INSN && stack_regs_mentioned (tmp))) { i1 = tmp; break; } tmp = PREV_INSN (tmp); } } if (i1 != NULL_RTX && (i1set = single_set (i1)) != NULL_RTX) { rtx i1src = *get_true_reg (&SET_SRC (i1set)); rtx i1dest = *get_true_reg (&SET_DEST (i1set)); /* If the previous register stack push was from the reg we are to swap with, omit the swap. */ if (REG_P (i1dest) && REGNO (i1dest) == FIRST_STACK_REG && REG_P (i1src) && REGNO (i1src) == (unsigned) hard_regno - 1 && find_regno_note (i1, REG_DEAD, FIRST_STACK_REG) == NULL_RTX) return; /* If the previous insn wrote to the reg we are to swap with, omit the swap. */ if (REG_P (i1dest) && REGNO (i1dest) == (unsigned) hard_regno && REG_P (i1src) && REGNO (i1src) == FIRST_STACK_REG && find_regno_note (i1, REG_DEAD, FIRST_STACK_REG) == NULL_RTX) return; } swap_rtx = gen_swapxf (FP_MODE_REG (hard_regno, XFmode), FP_MODE_REG (FIRST_STACK_REG, XFmode)); if (i1) emit_insn_after (swap_rtx, i1); else if (current_block) emit_insn_before (swap_rtx, BB_HEAD (current_block)); else emit_insn_before (swap_rtx, insn); } /* Emit an insns before INSN to swap virtual register SRC1 with the top of stack and virtual register SRC2 with second stack slot. REGSTACK is the stack state before the swaps, and is updated to reflect the swaps. A swap insn is represented as a PARALLEL of two patterns: each pattern moves one reg to the other. If SRC1 and/or SRC2 are already at the right place, no swap insn is emitted. */ static void swap_to_top (rtx insn, stack regstack, rtx src1, rtx src2) { struct stack_def temp_stack; int regno, j, k, temp; temp_stack = *regstack; /* Place operand 1 at the top of stack. */ regno = get_hard_regnum (&temp_stack, src1); if (regno < 0) abort (); if (regno != FIRST_STACK_REG) { k = temp_stack.top - (regno - FIRST_STACK_REG); j = temp_stack.top; temp = temp_stack.reg[k]; temp_stack.reg[k] = temp_stack.reg[j]; temp_stack.reg[j] = temp; } /* Place operand 2 next on the stack. */ regno = get_hard_regnum (&temp_stack, src2); if (regno < 0) abort (); if (regno != FIRST_STACK_REG + 1) { k = temp_stack.top - (regno - FIRST_STACK_REG); j = temp_stack.top - 1; temp = temp_stack.reg[k]; temp_stack.reg[k] = temp_stack.reg[j]; temp_stack.reg[j] = temp; } change_stack (insn, regstack, &temp_stack, EMIT_BEFORE); } /* Handle a move to or from a stack register in PAT, which is in INSN. REGSTACK is the current stack. Return whether a control flow insn was deleted in the process. */ static bool move_for_stack_reg (rtx insn, stack regstack, rtx pat) { rtx *psrc = get_true_reg (&SET_SRC (pat)); rtx *pdest = get_true_reg (&SET_DEST (pat)); rtx src, dest; rtx note; bool control_flow_insn_deleted = false; src = *psrc; dest = *pdest; if (STACK_REG_P (src) && STACK_REG_P (dest)) { /* Write from one stack reg to another. If SRC dies here, then just change the register mapping and delete the insn. */ note = find_regno_note (insn, REG_DEAD, REGNO (src)); if (note) { int i; /* If this is a no-op move, there must not be a REG_DEAD note. */ if (REGNO (src) == REGNO (dest)) abort (); for (i = regstack->top; i >= 0; i--) if (regstack->reg[i] == REGNO (src)) break; /* The source must be live, and the dest must be dead. */ if (i < 0 || get_hard_regnum (regstack, dest) >= FIRST_STACK_REG) abort (); /* It is possible that the dest is unused after this insn. If so, just pop the src. */ if (find_regno_note (insn, REG_UNUSED, REGNO (dest))) emit_pop_insn (insn, regstack, src, EMIT_AFTER); else { regstack->reg[i] = REGNO (dest); SET_HARD_REG_BIT (regstack->reg_set, REGNO (dest)); CLEAR_HARD_REG_BIT (regstack->reg_set, REGNO (src)); } control_flow_insn_deleted |= control_flow_insn_p (insn); delete_insn (insn); return control_flow_insn_deleted; } /* The source reg does not die. */ /* If this appears to be a no-op move, delete it, or else it will confuse the machine description output patterns. But if it is REG_UNUSED, we must pop the reg now, as per-insn processing for REG_UNUSED will not work for deleted insns. */ if (REGNO (src) == REGNO (dest)) { if (find_regno_note (insn, REG_UNUSED, REGNO (dest))) emit_pop_insn (insn, regstack, dest, EMIT_AFTER); control_flow_insn_deleted |= control_flow_insn_p (insn); delete_insn (insn); return control_flow_insn_deleted; } /* The destination ought to be dead. */ if (get_hard_regnum (regstack, dest) >= FIRST_STACK_REG) abort (); replace_reg (psrc, get_hard_regnum (regstack, src)); regstack->reg[++regstack->top] = REGNO (dest); SET_HARD_REG_BIT (regstack->reg_set, REGNO (dest)); replace_reg (pdest, FIRST_STACK_REG); } else if (STACK_REG_P (src)) { /* Save from a stack reg to MEM, or possibly integer reg. Since only top of stack may be saved, emit an exchange first if needs be. */ emit_swap_insn (insn, regstack, src); note = find_regno_note (insn, REG_DEAD, REGNO (src)); if (note) { replace_reg (&XEXP (note, 0), FIRST_STACK_REG); regstack->top--; CLEAR_HARD_REG_BIT (regstack->reg_set, REGNO (src)); } else if ((GET_MODE (src) == XFmode) && regstack->top < REG_STACK_SIZE - 1) { /* A 387 cannot write an XFmode value to a MEM without clobbering the source reg. The output code can handle this by reading back the value from the MEM. But it is more efficient to use a temp register if one is available. Push the source value here if the register stack is not full, and then write the value to memory via a pop. */ rtx push_rtx, push_insn; rtx top_stack_reg = FP_MODE_REG (FIRST_STACK_REG, GET_MODE (src)); push_rtx = gen_movxf (top_stack_reg, top_stack_reg); push_insn = emit_insn_before (push_rtx, insn); REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD, top_stack_reg, REG_NOTES (insn)); } replace_reg (psrc, FIRST_STACK_REG); } else if (STACK_REG_P (dest)) { /* Load from MEM, or possibly integer REG or constant, into the stack regs. The actual target is always the top of the stack. The stack mapping is changed to reflect that DEST is now at top of stack. */ /* The destination ought to be dead. */ if (get_hard_regnum (regstack, dest) >= FIRST_STACK_REG) abort (); if (regstack->top >= REG_STACK_SIZE) abort (); regstack->reg[++regstack->top] = REGNO (dest); SET_HARD_REG_BIT (regstack->reg_set, REGNO (dest)); replace_reg (pdest, FIRST_STACK_REG); } else abort (); return control_flow_insn_deleted; } /* Swap the condition on a branch, if there is one. Return true if we found a condition to swap. False if the condition was not used as such. */ static int swap_rtx_condition_1 (rtx pat) { const char *fmt; int i, r = 0; if (COMPARISON_P (pat)) { PUT_CODE (pat, swap_condition (GET_CODE (pat))); r = 1; } else { fmt = GET_RTX_FORMAT (GET_CODE (pat)); for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--) { if (fmt[i] == 'E') { int j; for (j = XVECLEN (pat, i) - 1; j >= 0; j--) r |= swap_rtx_condition_1 (XVECEXP (pat, i, j)); } else if (fmt[i] == 'e') r |= swap_rtx_condition_1 (XEXP (pat, i)); } } return r; } static int swap_rtx_condition (rtx insn) { rtx pat = PATTERN (insn); /* We're looking for a single set to cc0 or an HImode temporary. */ if (GET_CODE (pat) == SET && REG_P (SET_DEST (pat)) && REGNO (SET_DEST (pat)) == FLAGS_REG) { insn = next_flags_user (insn); if (insn == NULL_RTX) return 0; pat = PATTERN (insn); } /* See if this is, or ends in, a fnstsw, aka unspec 9. If so, we're not doing anything with the cc value right now. We may be able to search for one though. */ if (GET_CODE (pat) == SET && GET_CODE (SET_SRC (pat)) == UNSPEC && XINT (SET_SRC (pat), 1) == UNSPEC_FNSTSW) { rtx dest = SET_DEST (pat); /* Search forward looking for the first use of this value. Stop at block boundaries. */ while (insn != BB_END (current_block)) { insn = NEXT_INSN (insn); if (INSN_P (insn) && reg_mentioned_p (dest, insn)) break; if (GET_CODE (insn) == CALL_INSN) return 0; } /* So we've found the insn using this value. If it is anything other than sahf, aka unspec 10, or the value does not die (meaning we'd have to search further), then we must give up. */ pat = PATTERN (insn); if (GET_CODE (pat) != SET || GET_CODE (SET_SRC (pat)) != UNSPEC || XINT (SET_SRC (pat), 1) != UNSPEC_SAHF || ! dead_or_set_p (insn, dest)) return 0; /* Now we are prepared to handle this as a normal cc0 setter. */ insn = next_flags_user (insn); if (insn == NULL_RTX) return 0; pat = PATTERN (insn); } if (swap_rtx_condition_1 (pat)) { int fail = 0; INSN_CODE (insn) = -1; if (recog_memoized (insn) == -1) fail = 1; /* In case the flags don't die here, recurse to try fix following user too. */ else if (! dead_or_set_p (insn, ix86_flags_rtx)) { insn = next_flags_user (insn); if (!insn || !swap_rtx_condition (insn)) fail = 1; } if (fail) { swap_rtx_condition_1 (pat); return 0; } return 1; } return 0; } /* Handle a comparison. Special care needs to be taken to avoid causing comparisons that a 387 cannot do correctly, such as EQ. Also, a pop insn may need to be emitted. The 387 does have an `fcompp' insn that can pop two regs, but it is sometimes too expensive to do this - a `fcomp' followed by a `fstpl %st(0)' may be easier to set up. */ static void compare_for_stack_reg (rtx insn, stack regstack, rtx pat_src) { rtx *src1, *src2; rtx src1_note, src2_note; rtx flags_user; src1 = get_true_reg (&XEXP (pat_src, 0)); src2 = get_true_reg (&XEXP (pat_src, 1)); flags_user = next_flags_user (insn); /* ??? If fxch turns out to be cheaper than fstp, give priority to registers that die in this insn - move those to stack top first. */ if ((! STACK_REG_P (*src1) || (STACK_REG_P (*src2) && get_hard_regnum (regstack, *src2) == FIRST_STACK_REG)) && swap_rtx_condition (insn)) { rtx temp; temp = XEXP (pat_src, 0); XEXP (pat_src, 0) = XEXP (pat_src, 1); XEXP (pat_src, 1) = temp; src1 = get_true_reg (&XEXP (pat_src, 0)); src2 = get_true_reg (&XEXP (pat_src, 1)); INSN_CODE (insn) = -1; } /* We will fix any death note later. */ src1_note = find_regno_note (insn, REG_DEAD, REGNO (*src1)); if (STACK_REG_P (*src2)) src2_note = find_regno_note (insn, REG_DEAD, REGNO (*src2)); else src2_note = NULL_RTX; emit_swap_insn (insn, regstack, *src1); replace_reg (src1, FIRST_STACK_REG); if (STACK_REG_P (*src2)) replace_reg (src2, get_hard_regnum (regstack, *src2)); if (src1_note) { pop_stack (regstack, REGNO (XEXP (src1_note, 0))); replace_reg (&XEXP (src1_note, 0), FIRST_STACK_REG); } /* If the second operand dies, handle that. But if the operands are the same stack register, don't bother, because only one death is needed, and it was just handled. */ if (src2_note && ! (STACK_REG_P (*src1) && STACK_REG_P (*src2) && REGNO (*src1) == REGNO (*src2))) { /* As a special case, two regs may die in this insn if src2 is next to top of stack and the top of stack also dies. Since we have already popped src1, "next to top of stack" is really at top (FIRST_STACK_REG) now. */ if (get_hard_regnum (regstack, XEXP (src2_note, 0)) == FIRST_STACK_REG && src1_note) { pop_stack (regstack, REGNO (XEXP (src2_note, 0))); replace_reg (&XEXP (src2_note, 0), FIRST_STACK_REG + 1); } else { /* The 386 can only represent death of the first operand in the case handled above. In all other cases, emit a separate pop and remove the death note from here. */ /* link_cc0_insns (insn); */ remove_regno_note (insn, REG_DEAD, REGNO (XEXP (src2_note, 0))); emit_pop_insn (insn, regstack, XEXP (src2_note, 0), EMIT_AFTER); } } } /* Substitute new registers in PAT, which is part of INSN. REGSTACK is the current register layout. Return whether a control flow insn was deleted in the process. */ static bool subst_stack_regs_pat (rtx insn, stack regstack, rtx pat) { rtx *dest, *src; bool control_flow_insn_deleted = false; switch (GET_CODE (pat)) { case USE: /* Deaths in USE insns can happen in non optimizing compilation. Handle them by popping the dying register. */ src = get_true_reg (&XEXP (pat, 0)); if (STACK_REG_P (*src) && find_regno_note (insn, REG_DEAD, REGNO (*src))) { emit_pop_insn (insn, regstack, *src, EMIT_AFTER); return control_flow_insn_deleted; } /* ??? Uninitialized USE should not happen. */ else if (get_hard_regnum (regstack, *src) == -1) abort (); break; case CLOBBER: { rtx note; dest = get_true_reg (&XEXP (pat, 0)); if (STACK_REG_P (*dest)) { note = find_reg_note (insn, REG_DEAD, *dest); if (pat != PATTERN (insn)) { /* The fix_truncdi_1 pattern wants to be able to allocate it's own scratch register. It does this by clobbering an fp reg so that it is assured of an empty reg-stack register. If the register is live, kill it now. Remove the DEAD/UNUSED note so we don't try to kill it later too. */ if (note) emit_pop_insn (insn, regstack, *dest, EMIT_BEFORE); else { note = find_reg_note (insn, REG_UNUSED, *dest); if (!note) abort (); } remove_note (insn, note); replace_reg (dest, FIRST_STACK_REG + 1); } else { /* A top-level clobber with no REG_DEAD, and no hard-regnum indicates an uninitialized value. Because reload removed all other clobbers, this must be due to a function returning without a value. Load up a NaN. */ if (! note && get_hard_regnum (regstack, *dest) == -1) { pat = gen_rtx_SET (VOIDmode, FP_MODE_REG (REGNO (*dest), SFmode), not_a_num); PATTERN (insn) = pat; control_flow_insn_deleted |= move_for_stack_reg (insn, regstack, pat); } if (! note && COMPLEX_MODE_P (GET_MODE (*dest)) && get_hard_regnum (regstack, FP_MODE_REG (REGNO (*dest), DFmode)) == -1) { pat = gen_rtx_SET (VOIDmode, FP_MODE_REG (REGNO (*dest) + 1, SFmode), not_a_num); PATTERN (insn) = pat; control_flow_insn_deleted |= move_for_stack_reg (insn, regstack, pat); } } } break; } case SET: { rtx *src1 = (rtx *) 0, *src2; rtx src1_note, src2_note; rtx pat_src; dest = get_true_reg (&SET_DEST (pat)); src = get_true_reg (&SET_SRC (pat)); pat_src = SET_SRC (pat); /* See if this is a `movM' pattern, and handle elsewhere if so. */ if (STACK_REG_P (*src) || (STACK_REG_P (*dest) && (REG_P (*src) || MEM_P (*src) || GET_CODE (*src) == CONST_DOUBLE))) { control_flow_insn_deleted |= move_for_stack_reg (insn, regstack, pat); break; } switch (GET_CODE (pat_src)) { case COMPARE: compare_for_stack_reg (insn, regstack, pat_src); break; case CALL: { int count; for (count = hard_regno_nregs[REGNO (*dest)][GET_MODE (*dest)]; --count >= 0;) { regstack->reg[++regstack->top] = REGNO (*dest) + count; SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest) + count); } } replace_reg (dest, FIRST_STACK_REG); break; case REG: /* This is a `tstM2' case. */ if (*dest != cc0_rtx) abort (); src1 = src; /* Fall through. */ case FLOAT_TRUNCATE: case SQRT: case ABS: case NEG: /* These insns only operate on the top of the stack. DEST might be cc0_rtx if we're processing a tstM pattern. Also, it's possible that the tstM case results in a REG_DEAD note on the source. */ if (src1 == 0) src1 = get_true_reg (&XEXP (pat_src, 0)); emit_swap_insn (insn, regstack, *src1); src1_note = find_regno_note (insn, REG_DEAD, REGNO (*src1)); if (STACK_REG_P (*dest)) replace_reg (dest, FIRST_STACK_REG); if (src1_note) { replace_reg (&XEXP (src1_note, 0), FIRST_STACK_REG); regstack->top--; CLEAR_HARD_REG_BIT (regstack->reg_set, REGNO (*src1)); } replace_reg (src1, FIRST_STACK_REG); break; case MINUS: case DIV: /* On i386, reversed forms of subM3 and divM3 exist for MODE_FLOAT, so the same code that works for addM3 and mulM3 can be used. */ case MULT: case PLUS: /* These insns can accept the top of stack as a destination from a stack reg or mem, or can use the top of stack as a source and some other stack register (possibly top of stack) as a destination. */ src1 = get_true_reg (&XEXP (pat_src, 0)); src2 = get_true_reg (&XEXP (pat_src, 1)); /* We will fix any death note later. */ if (STACK_REG_P (*src1)) src1_note = find_regno_note (insn, REG_DEAD, REGNO (*src1)); else src1_note = NULL_RTX; if (STACK_REG_P (*src2)) src2_note = find_regno_note (insn, REG_DEAD, REGNO (*src2)); else src2_note = NULL_RTX; /* If either operand is not a stack register, then the dest must be top of stack. */ if (! STACK_REG_P (*src1) || ! STACK_REG_P (*src2)) emit_swap_insn (insn, regstack, *dest); else { /* Both operands are REG. If neither operand is already at the top of stack, choose to make the one that is the dest the new top of stack. */ int src1_hard_regnum, src2_hard_regnum; src1_hard_regnum = get_hard_regnum (regstack, *src1); src2_hard_regnum = get_hard_regnum (regstack, *src2); if (src1_hard_regnum == -1 || src2_hard_regnum == -1) abort (); if (src1_hard_regnum != FIRST_STACK_REG && src2_hard_regnum != FIRST_STACK_REG) emit_swap_insn (insn, regstack, *dest); } if (STACK_REG_P (*src1)) replace_reg (src1, get_hard_regnum (regstack, *src1)); if (STACK_REG_P (*src2)) replace_reg (src2, get_hard_regnum (regstack, *src2)); if (src1_note) { rtx src1_reg = XEXP (src1_note, 0); /* If the register that dies is at the top of stack, then the destination is somewhere else - merely substitute it. But if the reg that dies is not at top of stack, then move the top of stack to the dead reg, as though we had done the insn and then a store-with-pop. */ if (REGNO (src1_reg) == regstack->reg[regstack->top]) { SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest)); replace_reg (dest, get_hard_regnum (regstack, *dest)); } else { int regno = get_hard_regnum (regstack, src1_reg); SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest)); replace_reg (dest, regno); regstack->reg[regstack->top - (regno - FIRST_STACK_REG)] = regstack->reg[regstack->top]; } CLEAR_HARD_REG_BIT (regstack->reg_set, REGNO (XEXP (src1_note, 0))); replace_reg (&XEXP (src1_note, 0), FIRST_STACK_REG); regstack->top--; } else if (src2_note) { rtx src2_reg = XEXP (src2_note, 0); if (REGNO (src2_reg) == regstack->reg[regstack->top]) { SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest)); replace_reg (dest, get_hard_regnum (regstack, *dest)); } else { int regno = get_hard_regnum (regstack, src2_reg); SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest)); replace_reg (dest, regno); regstack->reg[regstack->top - (regno - FIRST_STACK_REG)] = regstack->reg[regstack->top]; } CLEAR_HARD_REG_BIT (regstack->reg_set, REGNO (XEXP (src2_note, 0))); replace_reg (&XEXP (src2_note, 0), FIRST_STACK_REG); regstack->top--; } else { SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest)); replace_reg (dest, get_hard_regnum (regstack, *dest)); } /* Keep operand 1 matching with destination. */ if (COMMUTATIVE_ARITH_P (pat_src) && REG_P (*src1) && REG_P (*src2) && REGNO (*src1) != REGNO (*dest)) { int tmp = REGNO (*src1); replace_reg (src1, REGNO (*src2)); replace_reg (src2, tmp); } break; case UNSPEC: switch (XINT (pat_src, 1)) { case UNSPEC_SIN: case UNSPEC_COS: case UNSPEC_FRNDINT: case UNSPEC_F2XM1: /* These insns only operate on the top of the stack. */ src1 = get_true_reg (&XVECEXP (pat_src, 0, 0)); emit_swap_insn (insn, regstack, *src1); /* Input should never die, it is replaced with output. */ src1_note = find_regno_note (insn, REG_DEAD, REGNO (*src1)); if (src1_note) abort(); if (STACK_REG_P (*dest)) replace_reg (dest, FIRST_STACK_REG); replace_reg (src1, FIRST_STACK_REG); break; case UNSPEC_FPATAN: case UNSPEC_FYL2X: case UNSPEC_FYL2XP1: /* These insns operate on the top two stack slots. */ src1 = get_true_reg (&XVECEXP (pat_src, 0, 0)); src2 = get_true_reg (&XVECEXP (pat_src, 0, 1)); src1_note = find_regno_note (insn, REG_DEAD, REGNO (*src1)); src2_note = find_regno_note (insn, REG_DEAD, REGNO (*src2)); swap_to_top (insn, regstack, *src1, *src2); replace_reg (src1, FIRST_STACK_REG); replace_reg (src2, FIRST_STACK_REG + 1); if (src1_note) replace_reg (&XEXP (src1_note, 0), FIRST_STACK_REG); if (src2_note) replace_reg (&XEXP (src2_note, 0), FIRST_STACK_REG + 1); /* Pop both input operands from the stack. */ CLEAR_HARD_REG_BIT (regstack->reg_set, regstack->reg[regstack->top]); CLEAR_HARD_REG_BIT (regstack->reg_set, regstack->reg[regstack->top - 1]); regstack->top -= 2; /* Push the result back onto the stack. */ regstack->reg[++regstack->top] = REGNO (*dest); SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest)); replace_reg (dest, FIRST_STACK_REG); break; case UNSPEC_FSCALE_FRACT: case UNSPEC_FPREM_F: case UNSPEC_FPREM1_F: /* These insns operate on the top two stack slots. first part of double input, double output insn. */ src1 = get_true_reg (&XVECEXP (pat_src, 0, 0)); src2 = get_true_reg (&XVECEXP (pat_src, 0, 1)); src1_note = find_regno_note (insn, REG_DEAD, REGNO (*src1)); src2_note = find_regno_note (insn, REG_DEAD, REGNO (*src2)); /* Inputs should never die, they are replaced with outputs. */ if ((src1_note) || (src2_note)) abort(); swap_to_top (insn, regstack, *src1, *src2); /* Push the result back onto stack. Empty stack slot will be filled in second part of insn. */ if (STACK_REG_P (*dest)) { regstack->reg[regstack->top] = REGNO (*dest); SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest)); replace_reg (dest, FIRST_STACK_REG); } replace_reg (src1, FIRST_STACK_REG); replace_reg (src2, FIRST_STACK_REG + 1); break; case UNSPEC_FSCALE_EXP: case UNSPEC_FPREM_U: case UNSPEC_FPREM1_U: /* These insns operate on the top two stack slots./ second part of double input, double output insn. */ src1 = get_true_reg (&XVECEXP (pat_src, 0, 0)); src2 = get_true_reg (&XVECEXP (pat_src, 0, 1)); src1_note = find_regno_note (insn, REG_DEAD, REGNO (*src1)); src2_note = find_regno_note (insn, REG_DEAD, REGNO (*src2)); /* Inputs should never die, they are replaced with outputs. */ if ((src1_note) || (src2_note)) abort(); swap_to_top (insn, regstack, *src1, *src2); /* Push the result back onto stack. Fill empty slot from first part of insn and fix top of stack pointer. */ if (STACK_REG_P (*dest)) { regstack->reg[regstack->top - 1] = REGNO (*dest); SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest)); replace_reg (dest, FIRST_STACK_REG + 1); } replace_reg (src1, FIRST_STACK_REG); replace_reg (src2, FIRST_STACK_REG + 1); break; case UNSPEC_SINCOS_COS: case UNSPEC_TAN_ONE: case UNSPEC_XTRACT_FRACT: /* These insns operate on the top two stack slots, first part of one input, double output insn. */ src1 = get_true_reg (&XVECEXP (pat_src, 0, 0)); emit_swap_insn (insn, regstack, *src1); /* Input should never die, it is replaced with output. */ src1_note = find_regno_note (insn, REG_DEAD, REGNO (*src1)); if (src1_note) abort(); /* Push the result back onto stack. Empty stack slot will be filled in second part of insn. */ if (STACK_REG_P (*dest)) { regstack->reg[regstack->top + 1] = REGNO (*dest); SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest)); replace_reg (dest, FIRST_STACK_REG); } replace_reg (src1, FIRST_STACK_REG); break; case UNSPEC_SINCOS_SIN: case UNSPEC_TAN_TAN: case UNSPEC_XTRACT_EXP: /* These insns operate on the top two stack slots, second part of one input, double output insn. */ src1 = get_true_reg (&XVECEXP (pat_src, 0, 0)); emit_swap_insn (insn, regstack, *src1); /* Input should never die, it is replaced with output. */ src1_note = find_regno_note (insn, REG_DEAD, REGNO (*src1)); if (src1_note) abort(); /* Push the result back onto stack. Fill empty slot from first part of insn and fix top of stack pointer. */ if (STACK_REG_P (*dest)) { regstack->reg[regstack->top] = REGNO (*dest); SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest)); replace_reg (dest, FIRST_STACK_REG + 1); regstack->top++; } replace_reg (src1, FIRST_STACK_REG); break; case UNSPEC_SAHF: /* (unspec [(unspec [(compare)] UNSPEC_FNSTSW)] UNSPEC_SAHF) The combination matches the PPRO fcomi instruction. */ pat_src = XVECEXP (pat_src, 0, 0); if (GET_CODE (pat_src) != UNSPEC || XINT (pat_src, 1) != UNSPEC_FNSTSW) abort (); /* Fall through. */ case UNSPEC_FNSTSW: /* Combined fcomp+fnstsw generated for doing well with CSE. When optimizing this would have been broken up before now. */ pat_src = XVECEXP (pat_src, 0, 0); if (GET_CODE (pat_src) != COMPARE) abort (); compare_for_stack_reg (insn, regstack, pat_src); break; default: abort (); } break; case IF_THEN_ELSE: /* This insn requires the top of stack to be the destination. */ src1 = get_true_reg (&XEXP (pat_src, 1)); src2 = get_true_reg (&XEXP (pat_src, 2)); src1_note = find_regno_note (insn, REG_DEAD, REGNO (*src1)); src2_note = find_regno_note (insn, REG_DEAD, REGNO (*src2)); /* If the comparison operator is an FP comparison operator, it is handled correctly by compare_for_stack_reg () who will move the destination to the top of stack. But if the comparison operator is not an FP comparison operator, we have to handle it here. */ if (get_hard_regnum (regstack, *dest) >= FIRST_STACK_REG && REGNO (*dest) != regstack->reg[regstack->top]) { /* In case one of operands is the top of stack and the operands dies, it is safe to make it the destination operand by reversing the direction of cmove and avoid fxch. */ if ((REGNO (*src1) == regstack->reg[regstack->top] && src1_note) || (REGNO (*src2) == regstack->reg[regstack->top] && src2_note)) { int idx1 = (get_hard_regnum (regstack, *src1) - FIRST_STACK_REG); int idx2 = (get_hard_regnum (regstack, *src2) - FIRST_STACK_REG); /* Make reg-stack believe that the operands are already swapped on the stack */ regstack->reg[regstack->top - idx1] = REGNO (*src2); regstack->reg[regstack->top - idx2] = REGNO (*src1); /* Reverse condition to compensate the operand swap. i386 do have comparison always reversible. */ PUT_CODE (XEXP (pat_src, 0), reversed_comparison_code (XEXP (pat_src, 0), insn)); } else emit_swap_insn (insn, regstack, *dest); } { rtx src_note [3]; int i; src_note[0] = 0; src_note[1] = src1_note; src_note[2] = src2_note; if (STACK_REG_P (*src1)) replace_reg (src1, get_hard_regnum (regstack, *src1)); if (STACK_REG_P (*src2)) replace_reg (src2, get_hard_regnum (regstack, *src2)); for (i = 1; i <= 2; i++) if (src_note [i]) { int regno = REGNO (XEXP (src_note[i], 0)); /* If the register that dies is not at the top of stack, then move the top of stack to the dead reg */ if (regno != regstack->reg[regstack->top]) { remove_regno_note (insn, REG_DEAD, regno); emit_pop_insn (insn, regstack, XEXP (src_note[i], 0), EMIT_AFTER); } else /* Top of stack never dies, as it is the destination. */ abort (); } } /* Make dest the top of stack. Add dest to regstack if not present. */ if (get_hard_regnum (regstack, *dest) < FIRST_STACK_REG) regstack->reg[++regstack->top] = REGNO (*dest); SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest)); replace_reg (dest, FIRST_STACK_REG); break; default: abort (); } break; } default: break; } return control_flow_insn_deleted; } /* Substitute hard regnums for any stack regs in INSN, which has N_INPUTS inputs and N_OUTPUTS outputs. REGSTACK is the stack info before the insn, and is updated with changes made here. There are several requirements and assumptions about the use of stack-like regs in asm statements. These rules are enforced by record_asm_stack_regs; see comments there for details. Any asm_operands left in the RTL at this point may be assume to meet the requirements, since record_asm_stack_regs removes any problem asm. */ static void subst_asm_stack_regs (rtx insn, stack regstack) { rtx body = PATTERN (insn); int alt; rtx *note_reg; /* Array of note contents */ rtx **note_loc; /* Address of REG field of each note */ enum reg_note *note_kind; /* The type of each note */ rtx *clobber_reg = 0; rtx **clobber_loc = 0; struct stack_def temp_stack; int n_notes; int n_clobbers; rtx note; int i; int n_inputs, n_outputs; if (! check_asm_stack_operands (insn)) return; /* Find out what the constraints required. If no constraint alternative matches, that is a compiler bug: we should have caught such an insn in check_asm_stack_operands. */ extract_insn (insn); constrain_operands (1); alt = which_alternative; preprocess_constraints (); n_inputs = get_asm_operand_n_inputs (body); n_outputs = recog_data.n_operands - n_inputs; if (alt < 0) abort (); /* Strip SUBREGs here to make the following code simpler. */ for (i = 0; i < recog_data.n_operands; i++) if (GET_CODE (recog_data.operand[i]) == SUBREG && REG_P (SUBREG_REG (recog_data.operand[i]))) { recog_data.operand_loc[i] = & SUBREG_REG (recog_data.operand[i]); recog_data.operand[i] = SUBREG_REG (recog_data.operand[i]); } /* Set up NOTE_REG, NOTE_LOC and NOTE_KIND. */ for (i = 0, note = REG_NOTES (insn); note; note = XEXP (note, 1)) i++; note_reg = alloca (i * sizeof (rtx)); note_loc = alloca (i * sizeof (rtx *)); note_kind = alloca (i * sizeof (enum reg_note)); n_notes = 0; for (note = REG_NOTES (insn); note; note = XEXP (note, 1)) { rtx reg = XEXP (note, 0); rtx *loc = & XEXP (note, 0); if (GET_CODE (reg) == SUBREG && REG_P (SUBREG_REG (reg))) { loc = & SUBREG_REG (reg); reg = SUBREG_REG (reg); } if (STACK_REG_P (reg) && (REG_NOTE_KIND (note) == REG_DEAD || REG_NOTE_KIND (note) == REG_UNUSED)) { note_reg[n_notes] = reg; note_loc[n_notes] = loc; note_kind[n_notes] = REG_NOTE_KIND (note); n_notes++; } } /* Set up CLOBBER_REG and CLOBBER_LOC. */ n_clobbers = 0; if (GET_CODE (body) == PARALLEL) { clobber_reg = alloca (XVECLEN (body, 0) * sizeof (rtx)); clobber_loc = alloca (XVECLEN (body, 0) * sizeof (rtx *)); for (i = 0; i < XVECLEN (body, 0); i++) if (GET_CODE (XVECEXP (body, 0, i)) == CLOBBER) { rtx clobber = XVECEXP (body, 0, i); rtx reg = XEXP (clobber, 0); rtx *loc = & XEXP (clobber, 0); if (GET_CODE (reg) == SUBREG && REG_P (SUBREG_REG (reg))) { loc = & SUBREG_REG (reg); reg = SUBREG_REG (reg); } if (STACK_REG_P (reg)) { clobber_reg[n_clobbers] = reg; clobber_loc[n_clobbers] = loc; n_clobbers++; } } } temp_stack = *regstack; /* Put the input regs into the desired place in TEMP_STACK. */ for (i = n_outputs; i < n_outputs + n_inputs; i++) if (STACK_REG_P (recog_data.operand[i]) && reg_class_subset_p (recog_op_alt[i][alt].class, FLOAT_REGS) && recog_op_alt[i][alt].class != FLOAT_REGS) { /* If an operand needs to be in a particular reg in FLOAT_REGS, the constraint was either 't' or 'u'. Since these constraints are for single register classes, and reload guaranteed that operand[i] is already in that class, we can just use REGNO (recog_data.operand[i]) to know which actual reg this operand needs to be in. */ int regno = get_hard_regnum (&temp_stack, recog_data.operand[i]); if (regno < 0) abort (); if ((unsigned int) regno != REGNO (recog_data.operand[i])) { /* recog_data.operand[i] is not in the right place. Find it and swap it with whatever is already in I's place. K is where recog_data.operand[i] is now. J is where it should be. */ int j, k, temp; k = temp_stack.top - (regno - FIRST_STACK_REG); j = (temp_stack.top - (REGNO (recog_data.operand[i]) - FIRST_STACK_REG)); temp = temp_stack.reg[k]; temp_stack.reg[k] = temp_stack.reg[j]; temp_stack.reg[j] = temp; } } /* Emit insns before INSN to make sure the reg-stack is in the right order. */ change_stack (insn, regstack, &temp_stack, EMIT_BEFORE); /* Make the needed input register substitutions. Do death notes and clobbers too, because these are for inputs, not outputs. */ for (i = n_outputs; i < n_outputs + n_inputs; i++) if (STACK_REG_P (recog_data.operand[i])) { int regnum = get_hard_regnum (regstack, recog_data.operand[i]); if (regnum < 0) abort (); replace_reg (recog_data.operand_loc[i], regnum); } for (i = 0; i < n_notes; i++) if (note_kind[i] == REG_DEAD) { int regnum = get_hard_regnum (regstack, note_reg[i]); if (regnum < 0) abort (); replace_reg (note_loc[i], regnum); } for (i = 0; i < n_clobbers; i++) { /* It's OK for a CLOBBER to reference a reg that is not live. Don't try to replace it in that case. */ int regnum = get_hard_regnum (regstack, clobber_reg[i]); if (regnum >= 0) { /* Sigh - clobbers always have QImode. But replace_reg knows that these regs can't be MODE_INT and will abort. Just put the right reg there without calling replace_reg. */ *clobber_loc[i] = FP_MODE_REG (regnum, DFmode); } } /* Now remove from REGSTACK any inputs that the asm implicitly popped. */ for (i = n_outputs; i < n_outputs + n_inputs; i++) if (STACK_REG_P (recog_data.operand[i])) { /* An input reg is implicitly popped if it is tied to an output, or if there is a CLOBBER for it. */ int j; for (j = 0; j < n_clobbers; j++) if (operands_match_p (clobber_reg[j], recog_data.operand[i])) break; if (j < n_clobbers || recog_op_alt[i][alt].matches >= 0) { /* recog_data.operand[i] might not be at the top of stack. But that's OK, because all we need to do is pop the right number of regs off of the top of the reg-stack. record_asm_stack_regs guaranteed that all implicitly popped regs were grouped at the top of the reg-stack. */ CLEAR_HARD_REG_BIT (regstack->reg_set, regstack->reg[regstack->top]); regstack->top--; } } /* Now add to REGSTACK any outputs that the asm implicitly pushed. Note that there isn't any need to substitute register numbers. ??? Explain why this is true. */ for (i = LAST_STACK_REG; i >= FIRST_STACK_REG; i--) { /* See if there is an output for this hard reg. */ int j; for (j = 0; j < n_outputs; j++) if (STACK_REG_P (recog_data.operand[j]) && REGNO (recog_data.operand[j]) == (unsigned) i) { regstack->reg[++regstack->top] = i; SET_HARD_REG_BIT (regstack->reg_set, i); break; } } /* Now emit a pop insn for any REG_UNUSED output, or any REG_DEAD input that the asm didn't implicitly pop. If the asm didn't implicitly pop an input reg, that reg will still be live. Note that we can't use find_regno_note here: the register numbers in the death notes have already been substituted. */ for (i = 0; i < n_outputs; i++) if (STACK_REG_P (recog_data.operand[i])) { int j; for (j = 0; j < n_notes; j++) if (REGNO (recog_data.operand[i]) == REGNO (note_reg[j]) && note_kind[j] == REG_UNUSED) { insn = emit_pop_insn (insn, regstack, recog_data.operand[i], EMIT_AFTER); break; } } for (i = n_outputs; i < n_outputs + n_inputs; i++) if (STACK_REG_P (recog_data.operand[i])) { int j; for (j = 0; j < n_notes; j++) if (REGNO (recog_data.operand[i]) == REGNO (note_reg[j]) && note_kind[j] == REG_DEAD && TEST_HARD_REG_BIT (regstack->reg_set, REGNO (recog_data.operand[i]))) { insn = emit_pop_insn (insn, regstack, recog_data.operand[i], EMIT_AFTER); break; } } } /* Substitute stack hard reg numbers for stack virtual registers in INSN. Non-stack register numbers are not changed. REGSTACK is the current stack content. Insns may be emitted as needed to arrange the stack for the 387 based on the contents of the insn. Return whether a control flow insn was deleted in the process. */ static bool subst_stack_regs (rtx insn, stack regstack) { rtx *note_link, note; bool control_flow_insn_deleted = false; int i; if (GET_CODE (insn) == CALL_INSN) { int top = regstack->top; /* If there are any floating point parameters to be passed in registers for this call, make sure they are in the right order. */ if (top >= 0) { straighten_stack (PREV_INSN (insn), regstack); /* Now mark the arguments as dead after the call. */ while (regstack->top >= 0) { CLEAR_HARD_REG_BIT (regstack->reg_set, FIRST_STACK_REG + regstack->top); regstack->top--; } } } /* Do the actual substitution if any stack regs are mentioned. Since we only record whether entire insn mentions stack regs, and subst_stack_regs_pat only works for patterns that contain stack regs, we must check each pattern in a parallel here. A call_value_pop could fail otherwise. */ if (stack_regs_mentioned (insn)) { int n_operands = asm_noperands (PATTERN (insn)); if (n_operands >= 0) { /* This insn is an `asm' with operands. Decode the operands, decide how many are inputs, and do register substitution. Any REG_UNUSED notes will be handled by subst_asm_stack_regs. */ subst_asm_stack_regs (insn, regstack); return control_flow_insn_deleted; } if (GET_CODE (PATTERN (insn)) == PARALLEL) for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++) { if (stack_regs_mentioned_p (XVECEXP (PATTERN (insn), 0, i))) { if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == CLOBBER) XVECEXP (PATTERN (insn), 0, i) = shallow_copy_rtx (XVECEXP (PATTERN (insn), 0, i)); control_flow_insn_deleted |= subst_stack_regs_pat (insn, regstack, XVECEXP (PATTERN (insn), 0, i)); } } else control_flow_insn_deleted |= subst_stack_regs_pat (insn, regstack, PATTERN (insn)); } /* subst_stack_regs_pat may have deleted a no-op insn. If so, any REG_UNUSED will already have been dealt with, so just return. */ if (GET_CODE (insn) == NOTE || INSN_DELETED_P (insn)) return control_flow_insn_deleted; /* If there is a REG_UNUSED note on a stack register on this insn, the indicated reg must be popped. The REG_UNUSED note is removed, since the form of the newly emitted pop insn references the reg, making it no longer `unset'. */ note_link = ®_NOTES (insn); for (note = *note_link; note; note = XEXP (note, 1)) if (REG_NOTE_KIND (note) == REG_UNUSED && STACK_REG_P (XEXP (note, 0))) { *note_link = XEXP (note, 1); insn = emit_pop_insn (insn, regstack, XEXP (note, 0), EMIT_AFTER); } else note_link = &XEXP (note, 1); return control_flow_insn_deleted; } /* Change the organization of the stack so that it fits a new basic block. Some registers might have to be popped, but there can never be a register live in the new block that is not now live. Insert any needed insns before or after INSN, as indicated by WHERE. OLD is the original stack layout, and NEW is the desired form. OLD is updated to reflect the code emitted, ie, it will be the same as NEW upon return. This function will not preserve block_end[]. But that information is no longer needed once this has executed. */ static void change_stack (rtx insn, stack old, stack new, enum emit_where where) { int reg; int update_end = 0; /* We will be inserting new insns "backwards". If we are to insert after INSN, find the next insn, and insert before it. */ if (where == EMIT_AFTER) { if (current_block && BB_END (current_block) == insn) update_end = 1; insn = NEXT_INSN (insn); } /* Pop any registers that are not needed in the new block. */ for (reg = old->top; reg >= 0; reg--) if (! TEST_HARD_REG_BIT (new->reg_set, old->reg[reg])) emit_pop_insn (insn, old, FP_MODE_REG (old->reg[reg], DFmode), EMIT_BEFORE); if (new->top == -2) { /* If the new block has never been processed, then it can inherit the old stack order. */ new->top = old->top; memcpy (new->reg, old->reg, sizeof (new->reg)); } else { /* This block has been entered before, and we must match the previously selected stack order. */ /* By now, the only difference should be the order of the stack, not their depth or liveliness. */ GO_IF_HARD_REG_EQUAL (old->reg_set, new->reg_set, win); abort (); win: if (old->top != new->top) abort (); /* If the stack is not empty (new->top != -1), loop here emitting swaps until the stack is correct. The worst case number of swaps emitted is N + 2, where N is the depth of the stack. In some cases, the reg at the top of stack may be correct, but swapped anyway in order to fix other regs. But since we never swap any other reg away from its correct slot, this algorithm will converge. */ if (new->top != -1) do { /* Swap the reg at top of stack into the position it is supposed to be in, until the correct top of stack appears. */ while (old->reg[old->top] != new->reg[new->top]) { for (reg = new->top; reg >= 0; reg--) if (new->reg[reg] == old->reg[old->top]) break; if (reg == -1) abort (); emit_swap_insn (insn, old, FP_MODE_REG (old->reg[reg], DFmode)); } /* See if any regs remain incorrect. If so, bring an incorrect reg to the top of stack, and let the while loop above fix it. */ for (reg = new->top; reg >= 0; reg--) if (new->reg[reg] != old->reg[reg]) { emit_swap_insn (insn, old, FP_MODE_REG (old->reg[reg], DFmode)); break; } } while (reg >= 0); /* At this point there must be no differences. */ for (reg = old->top; reg >= 0; reg--) if (old->reg[reg] != new->reg[reg]) abort (); } if (update_end) BB_END (current_block) = PREV_INSN (insn); } /* Print stack configuration. */ static void print_stack (FILE *file, stack s) { if (! file) return; if (s->top == -2) fprintf (file, "uninitialized\n"); else if (s->top == -1) fprintf (file, "empty\n"); else { int i; fputs ("[ ", file); for (i = 0; i <= s->top; ++i) fprintf (file, "%d ", s->reg[i]); fputs ("]\n", file); } } /* This function was doing life analysis. We now let the regular live code do it's job, so we only need to check some extra invariants that reg-stack expects. Primary among these being that all registers are initialized before use. The function returns true when code was emitted to CFG edges and commit_edge_insertions needs to be called. */ static int convert_regs_entry (void) { int inserted = 0; edge e; basic_block block; FOR_EACH_BB_REVERSE (block) { r2s_block_info bi = R2S_BLOCK_INFO (block); int reg; /* Set current register status at last instruction `uninitialized'. */ bi->stack_in.top = -2; /* Copy live_at_end and live_at_start into temporaries. */ for (reg = FIRST_STACK_REG; reg <= LAST_STACK_REG; reg++) { if (REGNO_REG_SET_P (block->global_live_at_end, reg)) SET_HARD_REG_BIT (bi->out_reg_set, reg); if (REGNO_REG_SET_P (block->global_live_at_start, reg)) SET_HARD_REG_BIT (bi->stack_in.reg_set, reg); } } /* Load something into each stack register live at function entry. Such live registers can be caused by uninitialized variables or functions not returning values on all paths. In order to keep the push/pop code happy, and to not scrog the register stack, we must put something in these registers. Use a QNaN. Note that we are inserting converted code here. This code is never seen by the convert_regs pass. */ for (e = ENTRY_BLOCK_PTR->succ; e ; e = e->succ_next) { basic_block block = e->dest; r2s_block_info bi = R2S_BLOCK_INFO (block); int reg, top = -1; for (reg = LAST_STACK_REG; reg >= FIRST_STACK_REG; --reg) if (TEST_HARD_REG_BIT (bi->stack_in.reg_set, reg)) { rtx init; bi->stack_in.reg[++top] = reg; init = gen_rtx_SET (VOIDmode, FP_MODE_REG (FIRST_STACK_REG, SFmode), not_a_num); insert_insn_on_edge (init, e); inserted = 1; } bi->stack_in.top = top; } return inserted; } /* Construct the desired stack for function exit. This will either be `empty', or the function return value at top-of-stack. */ static void convert_regs_exit (void) { int value_reg_low, value_reg_high; stack output_stack; rtx retvalue; retvalue = stack_result (current_function_decl); value_reg_low = value_reg_high = -1; if (retvalue) { value_reg_low = REGNO (retvalue); value_reg_high = value_reg_low + hard_regno_nregs[value_reg_low][GET_MODE (retvalue)] - 1; } output_stack = &R2S_BLOCK_INFO (EXIT_BLOCK_PTR)->stack_in; if (value_reg_low == -1) output_stack->top = -1; else { int reg; output_stack->top = value_reg_high - value_reg_low; for (reg = value_reg_low; reg <= value_reg_high; ++reg) { output_stack->reg[value_reg_high - reg] = reg; SET_HARD_REG_BIT (output_stack->reg_set, reg); } } } /* Adjust the stack of this block on exit to match the stack of the target block, or copy stack info into the stack of the successor of the successor hasn't been processed yet. */ static bool compensate_edge (edge e, FILE *file) { basic_block block = e->src, target = e->dest; r2s_block_info bi = R2S_BLOCK_INFO (block); struct stack_def regstack, tmpstack; stack target_stack = &R2S_BLOCK_INFO (target)->stack_in; int reg; current_block = block; regstack = bi->stack_out; if (file) fprintf (file, "Edge %d->%d: ", block->index, target->index); if (target_stack->top == -2) { /* The target block hasn't had a stack order selected. We need merely ensure that no pops are needed. */ for (reg = regstack.top; reg >= 0; --reg) if (!TEST_HARD_REG_BIT (target_stack->reg_set, regstack.reg[reg])) break; if (reg == -1) { if (file) fprintf (file, "new block; copying stack position\n"); /* change_stack kills values in regstack. */ tmpstack = regstack; change_stack (BB_END (block), &tmpstack, target_stack, EMIT_AFTER); return false; } if (file) fprintf (file, "new block; pops needed\n"); } else { if (target_stack->top == regstack.top) { for (reg = target_stack->top; reg >= 0; --reg) if (target_stack->reg[reg] != regstack.reg[reg]) break; if (reg == -1) { if (file) fprintf (file, "no changes needed\n"); return false; } } if (file) { fprintf (file, "correcting stack to "); print_stack (file, target_stack); } } /* Care for non-call EH edges specially. The normal return path have values in registers. These will be popped en masse by the unwind library. */ if ((e->flags & (EDGE_EH | EDGE_ABNORMAL_CALL)) == EDGE_EH) target_stack->top = -1; /* Other calls may appear to have values live in st(0), but the abnormal return path will not have actually loaded the values. */ else if (e->flags & EDGE_ABNORMAL_CALL) { /* Assert that the lifetimes are as we expect -- one value live at st(0) on the end of the source block, and no values live at the beginning of the destination block. */ HARD_REG_SET tmp; CLEAR_HARD_REG_SET (tmp); GO_IF_HARD_REG_EQUAL (target_stack->reg_set, tmp, eh1); abort (); eh1: /* We are sure that there is st(0) live, otherwise we won't compensate. For complex return values, we may have st(1) live as well. */ SET_HARD_REG_BIT (tmp, FIRST_STACK_REG); if (TEST_HARD_REG_BIT (regstack.reg_set, FIRST_STACK_REG + 1)) SET_HARD_REG_BIT (tmp, FIRST_STACK_REG + 1); GO_IF_HARD_REG_EQUAL (regstack.reg_set, tmp, eh2); abort (); eh2: target_stack->top = -1; } /* It is better to output directly to the end of the block instead of to the edge, because emit_swap can do minimal insn scheduling. We can do this when there is only one edge out, and it is not abnormal. */ else if (block->succ->succ_next == NULL && !(e->flags & EDGE_ABNORMAL)) { /* change_stack kills values in regstack. */ tmpstack = regstack; change_stack (BB_END (block), &tmpstack, target_stack, (GET_CODE (BB_END (block)) == JUMP_INSN ? EMIT_BEFORE : EMIT_AFTER)); } else { rtx seq, after; /* We don't support abnormal edges. Global takes care to avoid any live register across them, so we should never have to insert instructions on such edges. */ if (e->flags & EDGE_ABNORMAL) abort (); current_block = NULL; start_sequence (); /* ??? change_stack needs some point to emit insns after. */ after = emit_note (NOTE_INSN_DELETED); tmpstack = regstack; change_stack (after, &tmpstack, target_stack, EMIT_BEFORE); seq = get_insns (); end_sequence (); insert_insn_on_edge (seq, e); return true; } return false; } /* Convert stack register references in one block. */ static int convert_regs_1 (FILE *file, basic_block block) { struct stack_def regstack; r2s_block_info bi = R2S_BLOCK_INFO (block); int deleted, inserted, reg; rtx insn, next; edge e, beste = NULL; bool control_flow_insn_deleted = false; inserted = 0; deleted = 0; any_malformed_asm = false; /* Find the edge we will copy stack from. It should be the most frequent one as it will get cheapest after compensation code is generated, if multiple such exists, take one with largest count, prefer critical one (as splitting critical edges is more expensive), or one with lowest index, to avoid random changes with different orders of the edges. */ for (e = block->pred; e ; e = e->pred_next) { if (e->flags & EDGE_DFS_BACK) ; else if (! beste) beste = e; else if (EDGE_FREQUENCY (beste) < EDGE_FREQUENCY (e)) beste = e; else if (EDGE_FREQUENCY (beste) > EDGE_FREQUENCY (e)) ; else if (beste->count < e->count) beste = e; else if (beste->count > e->count) ; else if ((EDGE_CRITICAL_P (e) != 0) != (EDGE_CRITICAL_P (beste) != 0)) { if (EDGE_CRITICAL_P (e)) beste = e; } else if (e->src->index < beste->src->index) beste = e; } /* Initialize stack at block entry. */ if (bi->stack_in.top == -2) { if (beste) inserted |= compensate_edge (beste, file); else { /* No predecessors. Create an arbitrary input stack. */ int reg; bi->stack_in.top = -1; for (reg = LAST_STACK_REG; reg >= FIRST_STACK_REG; --reg) if (TEST_HARD_REG_BIT (bi->stack_in.reg_set, reg)) bi->stack_in.reg[++bi->stack_in.top] = reg; } } else /* Entry blocks do have stack already initialized. */ beste = NULL; current_block = block; if (file) { fprintf (file, "\nBasic block %d\nInput stack: ", block->index); print_stack (file, &bi->stack_in); } /* Process all insns in this block. Keep track of NEXT so that we don't process insns emitted while substituting in INSN. */ next = BB_HEAD (block); regstack = bi->stack_in; do { insn = next; next = NEXT_INSN (insn); /* Ensure we have not missed a block boundary. */ if (next == NULL) abort (); if (insn == BB_END (block)) next = NULL; /* Don't bother processing unless there is a stack reg mentioned or if it's a CALL_INSN. */ if (stack_regs_mentioned (insn) || GET_CODE (insn) == CALL_INSN) { if (file) { fprintf (file, " insn %d input stack: ", INSN_UID (insn)); print_stack (file, ®stack); } control_flow_insn_deleted |= subst_stack_regs (insn, ®stack); } } while (next); if (file) { fprintf (file, "Expected live registers ["); for (reg = FIRST_STACK_REG; reg <= LAST_STACK_REG; ++reg) if (TEST_HARD_REG_BIT (bi->out_reg_set, reg)) fprintf (file, " %d", reg); fprintf (file, " ]\nOutput stack: "); print_stack (file, ®stack); } insn = BB_END (block); if (GET_CODE (insn) == JUMP_INSN) insn = PREV_INSN (insn); /* If the function is declared to return a value, but it returns one in only some cases, some registers might come live here. Emit necessary moves for them. */ for (reg = FIRST_STACK_REG; reg <= LAST_STACK_REG; ++reg) { if (TEST_HARD_REG_BIT (bi->out_reg_set, reg) && ! TEST_HARD_REG_BIT (regstack.reg_set, reg)) { rtx set; if (file) { fprintf (file, "Emitting insn initializing reg %d\n", reg); } set = gen_rtx_SET (VOIDmode, FP_MODE_REG (reg, SFmode), not_a_num); insn = emit_insn_after (set, insn); control_flow_insn_deleted |= subst_stack_regs (insn, ®stack); } } /* Amongst the insns possibly deleted during the substitution process above, might have been the only trapping insn in the block. We purge the now possibly dead EH edges here to avoid an ICE from fixup_abnormal_edges, called at the end of convert_regs. The order in which we process the blocks ensures that we never delete an already processed edge. Note that, at this point, the CFG may have been damaged by the emission of instructions after an abnormal call, which moves the basic block end (and is the reason why we call fixup_abnormal_edges later). So we must be sure that the trapping insn has been deleted before trying to purge dead edges, otherwise we risk purging valid edges. ??? We are normally supposed not to delete trapping insns, so we pretend that the insns deleted above don't actually trap. It would have been better to detect this earlier and avoid creating the EH edge in the first place, still, but we don't have enough information at that time. */ if (control_flow_insn_deleted) purge_dead_edges (block); /* Something failed if the stack lives don't match. If we had malformed asms, we zapped the instruction itself, but that didn't produce the same pattern of register kills as before. */ GO_IF_HARD_REG_EQUAL (regstack.reg_set, bi->out_reg_set, win); if (!any_malformed_asm) abort (); win: bi->stack_out = regstack; /* Compensate the back edges, as those wasn't visited yet. */ for (e = block->succ; e ; e = e->succ_next) { if (e->flags & EDGE_DFS_BACK || (e->dest == EXIT_BLOCK_PTR)) { if (!R2S_BLOCK_INFO (e->dest)->done && e->dest != block) abort (); inserted |= compensate_edge (e, file); } } for (e = block->pred; e ; e = e->pred_next) { if (e != beste && !(e->flags & EDGE_DFS_BACK) && e->src != ENTRY_BLOCK_PTR) { if (!R2S_BLOCK_INFO (e->src)->done) abort (); inserted |= compensate_edge (e, file); } } return inserted; } /* Convert registers in all blocks reachable from BLOCK. */ static int convert_regs_2 (FILE *file, basic_block block) { basic_block *stack, *sp; int inserted; /* We process the blocks in a top-down manner, in a way such that one block is only processed after all its predecessors. The number of predecessors of every block has already been computed. */ stack = xmalloc (sizeof (*stack) * n_basic_blocks); sp = stack; *sp++ = block; inserted = 0; do { edge e; block = *--sp; /* Processing BLOCK is achieved by convert_regs_1, which may purge some dead EH outgoing edge after the deletion of the trapping insn inside the block. Since the number of predecessors of BLOCK's successors was computed based on the initial edge set, we check the necessity to process some of these successors before such an edge deletion may happen. However, there is a pitfall: if BLOCK is the only predecessor of a successor and the edge between them happens to be deleted, the successor becomes unreachable and should not be processed. The problem is that there is no way to preventively detect this case so we stack the successor in all cases and hand over the task of fixing up the discrepancy to convert_regs_1. */ for (e = block->succ; e ; e = e->succ_next) if (! (e->flags & EDGE_DFS_BACK)) { R2S_BLOCK_INFO (e->dest)->predecessors--; if (!R2S_BLOCK_INFO (e->dest)->predecessors) *sp++ = e->dest; } inserted |= convert_regs_1 (file, block); R2S_BLOCK_INFO (block)->done = 1; } while (sp != stack); return inserted; } /* Traverse all basic blocks in a function, converting the register references in each insn from the "flat" register file that gcc uses, to the stack-like registers the 387 uses. */ static int convert_regs (FILE *file) { int inserted; basic_block b; edge e; /* Initialize uninitialized registers on function entry. */ inserted = convert_regs_entry (); /* Construct the desired stack for function exit. */ convert_regs_exit (); R2S_BLOCK_INFO (EXIT_BLOCK_PTR)->done = 1; /* ??? Future: process inner loops first, and give them arbitrary initial stacks which emit_swap_insn can modify. This ought to prevent double fxch that often appears at the head of a loop. */ /* Process all blocks reachable from all entry points. */ for (e = ENTRY_BLOCK_PTR->succ; e ; e = e->succ_next) inserted |= convert_regs_2 (file, e->dest); /* ??? Process all unreachable blocks. Though there's no excuse for keeping these even when not optimizing. */ FOR_EACH_BB (b) { r2s_block_info bi = R2S_BLOCK_INFO (b); if (! bi->done) inserted |= convert_regs_2 (file, b); } clear_aux_for_blocks (); fixup_abnormal_edges (); if (inserted) commit_edge_insertions (); if (file) fputc ('\n', file); return inserted; } #endif /* STACK_REGS */ /* Type information for reg-stack.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_reg_stack_h[] = { { &stack_regs_mentioned_data, 1, sizeof (stack_regs_mentioned_data), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, LAST_GGC_ROOT_TAB }; /* Compute register class preferences for pseudo-registers. Copyright (C) 1987, 1988, 1991, 1992, 1993, 1994, 1995, 1996 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file contains two passes of the compiler: reg_scan and reg_class. It also defines some tables of information about the hardware registers and a function init_reg_sets to initialize the tables. */ static void init_reg_sets_1 (void); static void init_reg_autoinc (void); /* If we have auto-increment or auto-decrement and we can have secondary reloads, we are not allowed to use classes requiring secondary reloads for pseudos auto-incremented since reload can't handle it. */ #ifdef AUTO_INC_DEC #if defined(SECONDARY_INPUT_RELOAD_CLASS) || defined(SECONDARY_OUTPUT_RELOAD_CLASS) #define FORBIDDEN_INC_DEC_CLASSES #endif #endif /* Register tables used by many passes. */ /* Indexed by hard register number, contains 1 for registers that are fixed use (stack pointer, pc, frame pointer, etc.). These are the registers that cannot be used to allocate a pseudo reg for general use. */ char fixed_regs[FIRST_PSEUDO_REGISTER]; /* Same info as a HARD_REG_SET. */ HARD_REG_SET fixed_reg_set; /* Data for initializing the above. */ static const char initial_fixed_regs[] = FIXED_REGISTERS; /* Indexed by hard register number, contains 1 for registers that are fixed use or are clobbered by function calls. These are the registers that cannot be used to allocate a pseudo reg whose life crosses calls unless we are able to save/restore them across the calls. */ char call_used_regs[FIRST_PSEUDO_REGISTER]; /* Same info as a HARD_REG_SET. */ HARD_REG_SET call_used_reg_set; /* HARD_REG_SET of registers we want to avoid caller saving. */ HARD_REG_SET losing_caller_save_reg_set; /* Data for initializing the above. */ static const char initial_call_used_regs[] = CALL_USED_REGISTERS; /* This is much like call_used_regs, except it doesn't have to be a superset of FIXED_REGISTERS. This vector indicates what is really call clobbered, and is used when defining regs_invalidated_by_call. */ #ifdef CALL_REALLY_USED_REGISTERS char call_really_used_regs[] = CALL_REALLY_USED_REGISTERS; #endif /* Indexed by hard register number, contains 1 for registers that are fixed use or call used registers that cannot hold quantities across calls even if we are willing to save and restore them. call fixed registers are a subset of call used registers. */ char call_fixed_regs[FIRST_PSEUDO_REGISTER]; /* The same info as a HARD_REG_SET. */ HARD_REG_SET call_fixed_reg_set; /* Number of non-fixed registers. */ int n_non_fixed_regs; /* Indexed by hard register number, contains 1 for registers that are being used for global register decls. These must be exempt from ordinary flow analysis and are also considered fixed. */ char global_regs[FIRST_PSEUDO_REGISTER]; /* Contains 1 for registers that are set or clobbered by calls. */ /* ??? Ideally, this would be just call_used_regs plus global_regs, but for someone's bright idea to have call_used_regs strictly include fixed_regs. Which leaves us guessing as to the set of fixed_regs that are actually preserved. We know for sure that those associated with the local stack frame are safe, but scant others. */ HARD_REG_SET regs_invalidated_by_call; /* Table of register numbers in the order in which to try to use them. */ #ifdef REG_ALLOC_ORDER int reg_alloc_order[FIRST_PSEUDO_REGISTER] = REG_ALLOC_ORDER; /* The inverse of reg_alloc_order. */ int inv_reg_alloc_order[FIRST_PSEUDO_REGISTER]; #endif /* For each reg class, a HARD_REG_SET saying which registers are in it. */ HARD_REG_SET reg_class_contents[N_REG_CLASSES]; /* The same information, but as an array of unsigned ints. We copy from these unsigned ints to the table above. We do this so the tm.h files do not have to be aware of the wordsize for machines with <= 64 regs. Note that we hard-code 32 here, not HOST_BITS_PER_INT. */ #define N_REG_INTS \ ((FIRST_PSEUDO_REGISTER + (32 - 1)) / 32) static const unsigned int_reg_class_contents[N_REG_CLASSES][N_REG_INTS] = REG_CLASS_CONTENTS; /* For each reg class, number of regs it contains. */ unsigned int reg_class_size[N_REG_CLASSES]; /* For each reg class, table listing all the containing classes. */ enum reg_class reg_class_superclasses[N_REG_CLASSES][N_REG_CLASSES]; /* For each reg class, table listing all the classes contained in it. */ enum reg_class reg_class_subclasses[N_REG_CLASSES][N_REG_CLASSES]; /* For each pair of reg classes, a largest reg class contained in their union. */ enum reg_class reg_class_subunion[N_REG_CLASSES][N_REG_CLASSES]; /* For each pair of reg classes, the smallest reg class containing their union. */ enum reg_class reg_class_superunion[N_REG_CLASSES][N_REG_CLASSES]; /* Array containing all of the register names. */ const char * reg_names[] = REGISTER_NAMES; /* For each hard register, the widest mode object that it can contain. This will be a MODE_INT mode if the register can hold integers. Otherwise it will be a MODE_FLOAT or a MODE_CC mode, whichever is valid for the register. */ enum machine_mode reg_raw_mode[FIRST_PSEUDO_REGISTER]; /* 1 if class does contain register of given mode. */ static char contains_reg_of_mode [N_REG_CLASSES] [MAX_MACHINE_MODE]; /* Maximum cost of moving from a register in one class to a register in another class. Based on REGISTER_MOVE_COST. */ static int move_cost[MAX_MACHINE_MODE][N_REG_CLASSES][N_REG_CLASSES]; /* Similar, but here we don't have to move if the first index is a subset of the second so in that case the cost is zero. */ static int may_move_in_cost[MAX_MACHINE_MODE][N_REG_CLASSES][N_REG_CLASSES]; /* Similar, but here we don't have to move if the first index is a superset of the second so in that case the cost is zero. */ static int may_move_out_cost[MAX_MACHINE_MODE][N_REG_CLASSES][N_REG_CLASSES]; #ifdef FORBIDDEN_INC_DEC_CLASSES /* These are the classes that regs which are auto-incremented or decremented cannot be put in. */ static int forbidden_inc_dec_class[N_REG_CLASSES]; /* Indexed by n, is nonzero if (REG n) is used in an auto-inc or auto-dec context. */ static char *in_inc_dec; #endif /* FORBIDDEN_INC_DEC_CLASSES */ #ifdef CANNOT_CHANGE_MODE_CLASS /* All registers that have been subreged. Indexed by regno * MAX_MACHINE_MODE + mode. */ bitmap_head subregs_of_mode; #endif /* Sample MEM values for use by memory_move_secondary_cost. */ static GTY(()) rtx top_of_stack[MAX_MACHINE_MODE]; /* Linked list of reg_info structures allocated for reg_n_info array. Grouping all of the allocated structures together in one lump means only one call to bzero to clear them, rather than n smaller calls. */ struct reg_info_data { struct reg_info_data *next; /* next set of reg_info structures */ size_t min_index; /* minimum index # */ size_t max_index; /* maximum index # */ char used_p; /* nonzero if this has been used previously */ reg_info data[1]; /* beginning of the reg_info data */ }; static struct reg_info_data *reg_info_head; /* No more global register variables may be declared; true once regclass has been initialized. */ static int no_global_reg_vars = 0; /* Specify number of hard registers given machine mode occupy. */ unsigned char hard_regno_nregs[FIRST_PSEUDO_REGISTER][MAX_MACHINE_MODE]; /* Function called only once to initialize the above data on reg usage. Once this is done, various switches may override. */ void init_reg_sets (void) { int i, j; /* First copy the register information from the initial int form into the regsets. */ for (i = 0; i < N_REG_CLASSES; i++) { CLEAR_HARD_REG_SET (reg_class_contents[i]); /* Note that we hard-code 32 here, not HOST_BITS_PER_INT. */ for (j = 0; j < FIRST_PSEUDO_REGISTER; j++) if (int_reg_class_contents[i][j / 32] & ((unsigned) 1 << (j % 32))) SET_HARD_REG_BIT (reg_class_contents[i], j); } /* Sanity check: make sure the target macros FIXED_REGISTERS and CALL_USED_REGISTERS had the right number of initializers. */ if (sizeof fixed_regs != sizeof initial_fixed_regs || sizeof call_used_regs != sizeof initial_call_used_regs) abort(); memcpy (fixed_regs, initial_fixed_regs, sizeof fixed_regs); memcpy (call_used_regs, initial_call_used_regs, sizeof call_used_regs); memset (global_regs, 0, sizeof global_regs); /* Do any additional initialization regsets may need. */ INIT_ONCE_REG_SET (); #ifdef REG_ALLOC_ORDER for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) inv_reg_alloc_order[reg_alloc_order[i]] = i; #endif } /* After switches have been processed, which perhaps alter `fixed_regs' and `call_used_regs', convert them to HARD_REG_SETs. */ static void init_reg_sets_1 (void) { unsigned int i, j; unsigned int /* enum machine_mode */ m; char allocatable_regs_of_mode [MAX_MACHINE_MODE]; /* This macro allows the fixed or call-used registers and the register classes to depend on target flags. */ #ifdef CONDITIONAL_REGISTER_USAGE CONDITIONAL_REGISTER_USAGE; #endif /* Compute number of hard regs in each class. */ memset (reg_class_size, 0, sizeof reg_class_size); for (i = 0; i < N_REG_CLASSES; i++) for (j = 0; j < FIRST_PSEUDO_REGISTER; j++) if (TEST_HARD_REG_BIT (reg_class_contents[i], j)) reg_class_size[i]++; /* Initialize the table of subunions. reg_class_subunion[I][J] gets the largest-numbered reg-class that is contained in the union of classes I and J. */ for (i = 0; i < N_REG_CLASSES; i++) { for (j = 0; j < N_REG_CLASSES; j++) { HARD_REG_SET c; int k; COPY_HARD_REG_SET (c, reg_class_contents[i]); IOR_HARD_REG_SET (c, reg_class_contents[j]); for (k = 0; k < N_REG_CLASSES; k++) { GO_IF_HARD_REG_SUBSET (reg_class_contents[k], c, subclass1); continue; subclass1: /* Keep the largest subclass. */ /* SPEE 900308 */ GO_IF_HARD_REG_SUBSET (reg_class_contents[k], reg_class_contents[(int) reg_class_subunion[i][j]], subclass2); reg_class_subunion[i][j] = (enum reg_class) k; subclass2: ; } } } /* Initialize the table of superunions. reg_class_superunion[I][J] gets the smallest-numbered reg-class containing the union of classes I and J. */ for (i = 0; i < N_REG_CLASSES; i++) { for (j = 0; j < N_REG_CLASSES; j++) { HARD_REG_SET c; int k; COPY_HARD_REG_SET (c, reg_class_contents[i]); IOR_HARD_REG_SET (c, reg_class_contents[j]); for (k = 0; k < N_REG_CLASSES; k++) GO_IF_HARD_REG_SUBSET (c, reg_class_contents[k], superclass); superclass: reg_class_superunion[i][j] = (enum reg_class) k; } } /* Initialize the tables of subclasses and superclasses of each reg class. First clear the whole table, then add the elements as they are found. */ for (i = 0; i < N_REG_CLASSES; i++) { for (j = 0; j < N_REG_CLASSES; j++) { reg_class_superclasses[i][j] = LIM_REG_CLASSES; reg_class_subclasses[i][j] = LIM_REG_CLASSES; } } for (i = 0; i < N_REG_CLASSES; i++) { if (i == (int) NO_REGS) continue; for (j = i + 1; j < N_REG_CLASSES; j++) { enum reg_class *p; GO_IF_HARD_REG_SUBSET (reg_class_contents[i], reg_class_contents[j], subclass); continue; subclass: /* Reg class I is a subclass of J. Add J to the table of superclasses of I. */ p = ®_class_superclasses[i][0]; while (*p != LIM_REG_CLASSES) p++; *p = (enum reg_class) j; /* Add I to the table of superclasses of J. */ p = ®_class_subclasses[j][0]; while (*p != LIM_REG_CLASSES) p++; *p = (enum reg_class) i; } } /* Initialize "constant" tables. */ CLEAR_HARD_REG_SET (fixed_reg_set); CLEAR_HARD_REG_SET (call_used_reg_set); CLEAR_HARD_REG_SET (call_fixed_reg_set); CLEAR_HARD_REG_SET (regs_invalidated_by_call); memcpy (call_fixed_regs, fixed_regs, sizeof call_fixed_regs); n_non_fixed_regs = 0; for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) { if (fixed_regs[i]) SET_HARD_REG_BIT (fixed_reg_set, i); else n_non_fixed_regs++; if (call_used_regs[i]) SET_HARD_REG_BIT (call_used_reg_set, i); if (call_fixed_regs[i]) SET_HARD_REG_BIT (call_fixed_reg_set, i); if (CLASS_LIKELY_SPILLED_P (REGNO_REG_CLASS (i))) SET_HARD_REG_BIT (losing_caller_save_reg_set, i); /* There are a couple of fixed registers that we know are safe to exclude from being clobbered by calls: The frame pointer is always preserved across calls. The arg pointer is if it is fixed. The stack pointer usually is, unless RETURN_POPS_ARGS, in which case an explicit CLOBBER will be present. If we are generating PIC code, the PIC offset table register is preserved across calls, though the target can override that. */ if (i == STACK_POINTER_REGNUM || i == FRAME_POINTER_REGNUM) ; #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM else if (i == HARD_FRAME_POINTER_REGNUM) ; #endif #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM else if (i == ARG_POINTER_REGNUM && fixed_regs[i]) ; #endif #ifndef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED else if (i == (unsigned) PIC_OFFSET_TABLE_REGNUM && fixed_regs[i]) ; #endif else if (0 #ifdef CALL_REALLY_USED_REGISTERS || call_really_used_regs[i] #else || call_used_regs[i] #endif || global_regs[i]) SET_HARD_REG_BIT (regs_invalidated_by_call, i); } memset (contains_reg_of_mode, 0, sizeof (contains_reg_of_mode)); memset (allocatable_regs_of_mode, 0, sizeof (allocatable_regs_of_mode)); for (m = 0; m < (unsigned int) MAX_MACHINE_MODE; m++) for (i = 0; i < N_REG_CLASSES; i++) if ((unsigned) CLASS_MAX_NREGS (i, m) <= reg_class_size[i]) for (j = 0; j < FIRST_PSEUDO_REGISTER; j++) if (!fixed_regs [j] && TEST_HARD_REG_BIT (reg_class_contents[i], j) && HARD_REGNO_MODE_OK (j, m)) { contains_reg_of_mode [i][m] = 1; allocatable_regs_of_mode [m] = 1; break; } /* Initialize the move cost table. Find every subset of each class and take the maximum cost of moving any subset to any other. */ for (m = 0; m < (unsigned int) MAX_MACHINE_MODE; m++) if (allocatable_regs_of_mode [m]) { for (i = 0; i < N_REG_CLASSES; i++) if (contains_reg_of_mode [i][m]) for (j = 0; j < N_REG_CLASSES; j++) { int cost; enum reg_class *p1, *p2; if (!contains_reg_of_mode [j][m]) { move_cost[m][i][j] = 65536; may_move_in_cost[m][i][j] = 65536; may_move_out_cost[m][i][j] = 65536; } else { cost = REGISTER_MOVE_COST (m, i, j); for (p2 = ®_class_subclasses[j][0]; *p2 != LIM_REG_CLASSES; p2++) if (*p2 != i && contains_reg_of_mode [*p2][m]) cost = MAX (cost, move_cost [m][i][*p2]); for (p1 = ®_class_subclasses[i][0]; *p1 != LIM_REG_CLASSES; p1++) if (*p1 != j && contains_reg_of_mode [*p1][m]) cost = MAX (cost, move_cost [m][*p1][j]); move_cost[m][i][j] = cost; if (reg_class_subset_p (i, j)) may_move_in_cost[m][i][j] = 0; else may_move_in_cost[m][i][j] = cost; if (reg_class_subset_p (j, i)) may_move_out_cost[m][i][j] = 0; else may_move_out_cost[m][i][j] = cost; } } else for (j = 0; j < N_REG_CLASSES; j++) { move_cost[m][i][j] = 65536; may_move_in_cost[m][i][j] = 65536; may_move_out_cost[m][i][j] = 65536; } } } /* Compute the table of register modes. These values are used to record death information for individual registers (as opposed to a multi-register mode). */ void init_reg_modes_once (void) { int i, j; for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) for (j = 0; j < MAX_MACHINE_MODE; j++) hard_regno_nregs[i][j] = HARD_REGNO_NREGS(i, (enum machine_mode)j); for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) { reg_raw_mode[i] = choose_hard_reg_mode (i, 1, false); /* If we couldn't find a valid mode, just use the previous mode. ??? One situation in which we need to do this is on the mips where HARD_REGNO_NREGS (fpreg, [SD]Fmode) returns 2. Ideally we'd like to use DF mode for the even registers and VOIDmode for the odd (for the cpu models where the odd ones are inaccessible). */ if (reg_raw_mode[i] == VOIDmode) reg_raw_mode[i] = i == 0 ? word_mode : reg_raw_mode[i-1]; } } /* Finish initializing the register sets and initialize the register modes. */ void init_regs (void) { /* This finishes what was started by init_reg_sets, but couldn't be done until after register usage was specified. */ init_reg_sets_1 (); init_reg_autoinc (); } /* Initialize some fake stack-frame MEM references for use in memory_move_secondary_cost. */ void init_fake_stack_mems (void) { #ifdef HAVE_SECONDARY_RELOADS { int i; for (i = 0; i < MAX_MACHINE_MODE; i++) top_of_stack[i] = gen_rtx_MEM (i, stack_pointer_rtx); } #endif } #ifdef HAVE_SECONDARY_RELOADS /* Compute extra cost of moving registers to/from memory due to reloads. Only needed if secondary reloads are required for memory moves. */ int memory_move_secondary_cost (enum machine_mode mode, enum reg_class class, int in) { enum reg_class altclass; int partial_cost = 0; /* We need a memory reference to feed to SECONDARY... macros. */ /* mem may be unused even if the SECONDARY_ macros are defined. */ rtx mem ATTRIBUTE_UNUSED = top_of_stack[(int) mode]; if (in) { #ifdef SECONDARY_INPUT_RELOAD_CLASS altclass = SECONDARY_INPUT_RELOAD_CLASS (class, mode, mem); #else altclass = NO_REGS; #endif } else { #ifdef SECONDARY_OUTPUT_RELOAD_CLASS altclass = SECONDARY_OUTPUT_RELOAD_CLASS (class, mode, mem); #else altclass = NO_REGS; #endif } if (altclass == NO_REGS) return 0; if (in) partial_cost = REGISTER_MOVE_COST (mode, altclass, class); else partial_cost = REGISTER_MOVE_COST (mode, class, altclass); if (class == altclass) /* This isn't simply a copy-to-temporary situation. Can't guess what it is, so MEMORY_MOVE_COST really ought not to be calling here in that case. I'm tempted to put in an abort here, but returning this will probably only give poor estimates, which is what we would've had before this code anyways. */ return partial_cost; /* Check if the secondary reload register will also need a secondary reload. */ return memory_move_secondary_cost (mode, altclass, in) + partial_cost; } #endif /* Return a machine mode that is legitimate for hard reg REGNO and large enough to save nregs. If we can't find one, return VOIDmode. If CALL_SAVED is true, only consider modes that are call saved. */ enum machine_mode choose_hard_reg_mode (unsigned int regno ATTRIBUTE_UNUSED, unsigned int nregs, bool call_saved) { unsigned int /* enum machine_mode */ m; enum machine_mode found_mode = VOIDmode, mode; /* We first look for the largest integer mode that can be validly held in REGNO. If none, we look for the largest floating-point mode. If we still didn't find a valid mode, try CCmode. */ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode)) if ((unsigned) hard_regno_nregs[regno][mode] == nregs && HARD_REGNO_MODE_OK (regno, mode) && (! call_saved || ! HARD_REGNO_CALL_PART_CLOBBERED (regno, mode))) found_mode = mode; if (found_mode != VOIDmode) return found_mode; for (mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT); mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode)) if ((unsigned) hard_regno_nregs[regno][mode] == nregs && HARD_REGNO_MODE_OK (regno, mode) && (! call_saved || ! HARD_REGNO_CALL_PART_CLOBBERED (regno, mode))) found_mode = mode; if (found_mode != VOIDmode) return found_mode; for (mode = GET_CLASS_NARROWEST_MODE (MODE_VECTOR_FLOAT); mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode)) if ((unsigned) hard_regno_nregs[regno][mode] == nregs && HARD_REGNO_MODE_OK (regno, mode) && (! call_saved || ! HARD_REGNO_CALL_PART_CLOBBERED (regno, mode))) found_mode = mode; if (found_mode != VOIDmode) return found_mode; for (mode = GET_CLASS_NARROWEST_MODE (MODE_VECTOR_INT); mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode)) if ((unsigned) hard_regno_nregs[regno][mode] == nregs && HARD_REGNO_MODE_OK (regno, mode) && (! call_saved || ! HARD_REGNO_CALL_PART_CLOBBERED (regno, mode))) found_mode = mode; if (found_mode != VOIDmode) return found_mode; /* Iterate over all of the CCmodes. */ for (m = (unsigned int) CCmode; m < (unsigned int) NUM_MACHINE_MODES; ++m) { mode = (enum machine_mode) m; if ((unsigned) hard_regno_nregs[regno][mode] == nregs && HARD_REGNO_MODE_OK (regno, mode) && (! call_saved || ! HARD_REGNO_CALL_PART_CLOBBERED (regno, mode))) return mode; } /* We can't find a mode valid for this register. */ return VOIDmode; } /* Specify the usage characteristics of the register named NAME. It should be a fixed register if FIXED and a call-used register if CALL_USED. */ void fix_register (const char *name, int fixed, int call_used) { int i; /* Decode the name and update the primary form of the register info. */ if ((i = decode_reg_name (name)) >= 0) { if ((i == STACK_POINTER_REGNUM #ifdef HARD_FRAME_POINTER_REGNUM || i == HARD_FRAME_POINTER_REGNUM #else || i == FRAME_POINTER_REGNUM #endif ) && (fixed == 0 || call_used == 0)) { static const char * const what_option[2][2] = { { "call-saved", "call-used" }, { "no-such-option", "fixed" }}; error ("can't use '%s' as a %s register", name, what_option[fixed][call_used]); } else { fixed_regs[i] = fixed; call_used_regs[i] = call_used; #ifdef CALL_REALLY_USED_REGISTERS if (fixed == 0) call_really_used_regs[i] = call_used; #endif } } else { warning ("unknown register name: %s", name); } } /* Mark register number I as global. */ void globalize_reg (int i) { if (fixed_regs[i] == 0 && no_global_reg_vars) error ("global register variable follows a function definition"); if (global_regs[i]) { warning ("register used for two global register variables"); return; } if (call_used_regs[i] && ! fixed_regs[i]) warning ("call-clobbered register used for global register variable"); global_regs[i] = 1; /* If already fixed, nothing else to do. */ if (fixed_regs[i]) return; fixed_regs[i] = call_used_regs[i] = call_fixed_regs[i] = 1; n_non_fixed_regs--; SET_HARD_REG_BIT (fixed_reg_set, i); SET_HARD_REG_BIT (call_used_reg_set, i); SET_HARD_REG_BIT (call_fixed_reg_set, i); SET_HARD_REG_BIT (regs_invalidated_by_call, i); } /* Now the data and code for the `regclass' pass, which happens just before local-alloc. */ /* The `costs' struct records the cost of using a hard register of each class and of using memory for each pseudo. We use this data to set up register class preferences. */ struct costs { int cost[N_REG_CLASSES]; int mem_cost; }; /* Structure used to record preferences of given pseudo. */ struct reg_pref { /* (enum reg_class) prefclass is the preferred class. */ char prefclass; /* altclass is a register class that we should use for allocating pseudo if no register in the preferred class is available. If no register in this class is available, memory is preferred. It might appear to be more general to have a bitmask of classes here, but since it is recommended that there be a class corresponding to the union of most major pair of classes, that generality is not required. */ char altclass; }; /* Record the cost of each class for each pseudo. */ static struct costs *costs; /* Initialized once, and used to initialize cost values for each insn. */ static struct costs init_cost; /* Record preferences of each pseudo. This is available after `regclass' is run. */ static struct reg_pref *reg_pref; /* Allocated buffers for reg_pref. */ static struct reg_pref *reg_pref_buffer; /* Frequency of executions of current insn. */ static int frequency; static rtx scan_one_insn (rtx, int); static void record_operand_costs (rtx, struct costs *, struct reg_pref *); static void dump_regclass (FILE *); static void record_reg_classes (int, int, rtx *, enum machine_mode *, const char **, rtx, struct costs *, struct reg_pref *); static int compute_copy_cost (rtx, enum machine_mode, enum reg_class, int); static void record_address_regs (rtx, enum reg_class, int); #ifdef FORBIDDEN_INC_DEC_CLASSES static int auto_inc_dec_reg_p (rtx, enum machine_mode); #endif static void reg_scan_mark_refs (rtx, rtx, int, unsigned int); /* Return the reg_class in which pseudo reg number REGNO is best allocated. This function is sometimes called before the info has been computed. When that happens, just return GENERAL_REGS, which is innocuous. */ enum reg_class reg_preferred_class (int regno) { if (reg_pref == 0) return GENERAL_REGS; return (enum reg_class) reg_pref[regno].prefclass; } enum reg_class reg_alternate_class (int regno) { if (reg_pref == 0) return ALL_REGS; return (enum reg_class) reg_pref[regno].altclass; } /* Initialize some global data for this pass. */ void regclass_init (void) { int i; init_cost.mem_cost = 10000; for (i = 0; i < N_REG_CLASSES; i++) init_cost.cost[i] = 10000; /* This prevents dump_flow_info from losing if called before regclass is run. */ reg_pref = NULL; /* No more global register variables may be declared. */ no_global_reg_vars = 1; } /* Dump register costs. */ static void dump_regclass (FILE *dump) { static const char *const reg_class_names2[] = REG_CLASS_NAMES; int i; for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++) { int /* enum reg_class */ class; if (REG_N_REFS (i)) { fprintf (dump, " Register %i costs:", i); for (class = 0; class < (int) N_REG_CLASSES; class++) if (contains_reg_of_mode [(enum reg_class) class][PSEUDO_REGNO_MODE (i)] #ifdef FORBIDDEN_INC_DEC_CLASSES && (!in_inc_dec[i] || !forbidden_inc_dec_class[(enum reg_class) class]) #endif #ifdef CANNOT_CHANGE_MODE_CLASS && ! invalid_mode_change_p (i, (enum reg_class) class, PSEUDO_REGNO_MODE (i)) #endif ) fprintf (dump, " %s:%i", reg_class_names2[class], costs[i].cost[(enum reg_class) class]); fprintf (dump, " MEM:%i\n", costs[i].mem_cost); } } } /* Calculate the costs of insn operands. */ static void record_operand_costs (rtx insn, struct costs *op_costs, struct reg_pref *reg_pref) { const char *constraints[MAX_RECOG_OPERANDS]; enum machine_mode modes[MAX_RECOG_OPERANDS]; int i; for (i = 0; i < recog_data.n_operands; i++) { constraints[i] = recog_data.constraints[i]; modes[i] = recog_data.operand_mode[i]; } /* If we get here, we are set up to record the costs of all the operands for this insn. Start by initializing the costs. Then handle any address registers. Finally record the desired classes for any pseudos, doing it twice if some pair of operands are commutative. */ for (i = 0; i < recog_data.n_operands; i++) { op_costs[i] = init_cost; if (GET_CODE (recog_data.operand[i]) == SUBREG) recog_data.operand[i] = SUBREG_REG (recog_data.operand[i]); if (MEM_P (recog_data.operand[i])) record_address_regs (XEXP (recog_data.operand[i], 0), MODE_BASE_REG_CLASS (modes[i]), frequency * 2); else if (constraints[i][0] == 'p' || EXTRA_ADDRESS_CONSTRAINT (constraints[i][0], constraints[i])) record_address_regs (recog_data.operand[i], MODE_BASE_REG_CLASS (modes[i]), frequency * 2); } /* Check for commutative in a separate loop so everything will have been initialized. We must do this even if one operand is a constant--see addsi3 in m68k.md. */ for (i = 0; i < (int) recog_data.n_operands - 1; i++) if (constraints[i][0] == '%') { const char *xconstraints[MAX_RECOG_OPERANDS]; int j; /* Handle commutative operands by swapping the constraints. We assume the modes are the same. */ for (j = 0; j < recog_data.n_operands; j++) xconstraints[j] = constraints[j]; xconstraints[i] = constraints[i+1]; xconstraints[i+1] = constraints[i]; record_reg_classes (recog_data.n_alternatives, recog_data.n_operands, recog_data.operand, modes, xconstraints, insn, op_costs, reg_pref); } record_reg_classes (recog_data.n_alternatives, recog_data.n_operands, recog_data.operand, modes, constraints, insn, op_costs, reg_pref); } /* Subroutine of regclass, processes one insn INSN. Scan it and record each time it would save code to put a certain register in a certain class. PASS, when nonzero, inhibits some optimizations which need only be done once. Return the last insn processed, so that the scan can be continued from there. */ static rtx scan_one_insn (rtx insn, int pass) { enum rtx_code pat_code; rtx set, note; int i, j; struct costs op_costs[MAX_RECOG_OPERANDS]; if (!INSN_P (insn)) return insn; pat_code = GET_CODE (PATTERN (insn)); if (pat_code == USE || pat_code == CLOBBER || pat_code == ASM_INPUT || pat_code == ADDR_VEC || pat_code == ADDR_DIFF_VEC) return insn; set = single_set (insn); extract_insn (insn); /* If this insn loads a parameter from its stack slot, then it represents a savings, rather than a cost, if the parameter is stored in memory. Record this fact. */ if (set != 0 && REG_P (SET_DEST (set)) && MEM_P (SET_SRC (set)) && (note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != 0 && MEM_P (XEXP (note, 0))) { costs[REGNO (SET_DEST (set))].mem_cost -= (MEMORY_MOVE_COST (GET_MODE (SET_DEST (set)), GENERAL_REGS, 1) * frequency); record_address_regs (XEXP (SET_SRC (set), 0), MODE_BASE_REG_CLASS (VOIDmode), frequency * 2); return insn; } /* Improve handling of two-address insns such as (set X (ashift CONST Y)) where CONST must be made to match X. Change it into two insns: (set X CONST) (set X (ashift X Y)). If we left this for reloading, it would probably get three insns because X and Y might go in the same place. This prevents X and Y from receiving the same hard reg. We can only do this if the modes of operands 0 and 1 (which might not be the same) are tieable and we only need do this during our first pass. */ if (pass == 0 && optimize && recog_data.n_operands >= 3 && recog_data.constraints[1][0] == '0' && recog_data.constraints[1][1] == 0 && CONSTANT_P (recog_data.operand[1]) && ! rtx_equal_p (recog_data.operand[0], recog_data.operand[1]) && ! rtx_equal_p (recog_data.operand[0], recog_data.operand[2]) && REG_P (recog_data.operand[0]) && MODES_TIEABLE_P (GET_MODE (recog_data.operand[0]), recog_data.operand_mode[1])) { rtx previnsn = prev_real_insn (insn); rtx dest = gen_lowpart (recog_data.operand_mode[1], recog_data.operand[0]); rtx newinsn = emit_insn_before (gen_move_insn (dest, recog_data.operand[1]), insn); /* If this insn was the start of a basic block, include the new insn in that block. We need not check for code_label here; while a basic block can start with a code_label, INSN could not be at the beginning of that block. */ if (previnsn == 0 || GET_CODE (previnsn) == JUMP_INSN) { basic_block b; FOR_EACH_BB (b) if (insn == BB_HEAD (b)) BB_HEAD (b) = newinsn; } /* This makes one more setting of new insns's dest. */ REG_N_SETS (REGNO (recog_data.operand[0]))++; REG_N_REFS (REGNO (recog_data.operand[0]))++; REG_FREQ (REGNO (recog_data.operand[0])) += frequency; *recog_data.operand_loc[1] = recog_data.operand[0]; REG_N_REFS (REGNO (recog_data.operand[0]))++; REG_FREQ (REGNO (recog_data.operand[0])) += frequency; for (i = recog_data.n_dups - 1; i >= 0; i--) if (recog_data.dup_num[i] == 1) { *recog_data.dup_loc[i] = recog_data.operand[0]; REG_N_REFS (REGNO (recog_data.operand[0]))++; REG_FREQ (REGNO (recog_data.operand[0])) += frequency; } return PREV_INSN (newinsn); } record_operand_costs (insn, op_costs, reg_pref); /* Now add the cost for each operand to the total costs for its register. */ for (i = 0; i < recog_data.n_operands; i++) if (REG_P (recog_data.operand[i]) && REGNO (recog_data.operand[i]) >= FIRST_PSEUDO_REGISTER) { int regno = REGNO (recog_data.operand[i]); struct costs *p = &costs[regno], *q = &op_costs[i]; p->mem_cost += q->mem_cost * frequency; for (j = 0; j < N_REG_CLASSES; j++) p->cost[j] += q->cost[j] * frequency; } return insn; } /* Initialize information about which register classes can be used for pseudos that are auto-incremented or auto-decremented. */ static void init_reg_autoinc (void) { #ifdef FORBIDDEN_INC_DEC_CLASSES int i; for (i = 0; i < N_REG_CLASSES; i++) { rtx r = gen_rtx_raw_REG (VOIDmode, 0); enum machine_mode m; int j; for (j = 0; j < FIRST_PSEUDO_REGISTER; j++) if (TEST_HARD_REG_BIT (reg_class_contents[i], j)) { REGNO (r) = j; for (m = VOIDmode; (int) m < (int) MAX_MACHINE_MODE; m = (enum machine_mode) ((int) m + 1)) if (HARD_REGNO_MODE_OK (j, m)) { PUT_MODE (r, m); /* If a register is not directly suitable for an auto-increment or decrement addressing mode and requires secondary reloads, disallow its class from being used in such addresses. */ if ((0 #ifdef SECONDARY_RELOAD_CLASS || (SECONDARY_RELOAD_CLASS (MODE_BASE_REG_CLASS (VOIDmode), m, r) != NO_REGS) #else #ifdef SECONDARY_INPUT_RELOAD_CLASS || (SECONDARY_INPUT_RELOAD_CLASS (MODE_BASE_REG_CLASS (VOIDmode), m, r) != NO_REGS) #endif #ifdef SECONDARY_OUTPUT_RELOAD_CLASS || (SECONDARY_OUTPUT_RELOAD_CLASS (MODE_BASE_REG_CLASS (VOIDmode), m, r) != NO_REGS) #endif #endif ) && ! auto_inc_dec_reg_p (r, m)) forbidden_inc_dec_class[i] = 1; } } } #endif /* FORBIDDEN_INC_DEC_CLASSES */ } /* This is a pass of the compiler that scans all instructions and calculates the preferred class for each pseudo-register. This information can be accessed later by calling `reg_preferred_class'. This pass comes just before local register allocation. */ void regclass (rtx f, int nregs, FILE *dump) { rtx insn; int i; int pass; init_recog (); costs = xmalloc (nregs * sizeof (struct costs)); #ifdef FORBIDDEN_INC_DEC_CLASSES in_inc_dec = xmalloc (nregs); #endif /* FORBIDDEN_INC_DEC_CLASSES */ /* Normally we scan the insns once and determine the best class to use for each register. However, if -fexpensive_optimizations are on, we do so twice, the second time using the tentative best classes to guide the selection. */ for (pass = 0; pass <= flag_expensive_optimizations; pass++) { basic_block bb; if (dump) fprintf (dump, "\n\nPass %i\n\n",pass); /* Zero out our accumulation of the cost of each class for each reg. */ memset (costs, 0, nregs * sizeof (struct costs)); #ifdef FORBIDDEN_INC_DEC_CLASSES memset (in_inc_dec, 0, nregs); #endif /* Scan the instructions and record each time it would save code to put a certain register in a certain class. */ if (!optimize) { frequency = REG_FREQ_MAX; for (insn = f; insn; insn = NEXT_INSN (insn)) insn = scan_one_insn (insn, pass); } else FOR_EACH_BB (bb) { /* Show that an insn inside a loop is likely to be executed three times more than insns outside a loop. This is much more aggressive than the assumptions made elsewhere and is being tried as an experiment. */ frequency = REG_FREQ_FROM_BB (bb); for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn)) { insn = scan_one_insn (insn, pass); if (insn == BB_END (bb)) break; } } /* Now for each register look at how desirable each class is and find which class is preferred. Store that in `prefclass'. Record in `altclass' the largest register class any of whose registers is better than memory. */ if (pass == 0) reg_pref = reg_pref_buffer; if (dump) { dump_regclass (dump); fprintf (dump,"\n"); } for (i = FIRST_PSEUDO_REGISTER; i < nregs; i++) { int best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1; enum reg_class best = ALL_REGS, alt = NO_REGS; /* This is an enum reg_class, but we call it an int to save lots of casts. */ int class; struct costs *p = &costs[i]; /* In non-optimizing compilation REG_N_REFS is not initialized yet. */ if (optimize && !REG_N_REFS (i) && !REG_N_SETS (i)) continue; for (class = (int) ALL_REGS - 1; class > 0; class--) { /* Ignore classes that are too small for this operand or invalid for an operand that was auto-incremented. */ if (!contains_reg_of_mode [class][PSEUDO_REGNO_MODE (i)] #ifdef FORBIDDEN_INC_DEC_CLASSES || (in_inc_dec[i] && forbidden_inc_dec_class[class]) #endif #ifdef CANNOT_CHANGE_MODE_CLASS || invalid_mode_change_p (i, (enum reg_class) class, PSEUDO_REGNO_MODE (i)) #endif ) ; else if (p->cost[class] < best_cost) { best_cost = p->cost[class]; best = (enum reg_class) class; } else if (p->cost[class] == best_cost) best = reg_class_subunion[(int) best][class]; } /* Record the alternate register class; i.e., a class for which every register in it is better than using memory. If adding a class would make a smaller class (i.e., no union of just those classes exists), skip that class. The major unions of classes should be provided as a register class. Don't do this if we will be doing it again later. */ if ((pass == 1 || dump) || ! flag_expensive_optimizations) for (class = 0; class < N_REG_CLASSES; class++) if (p->cost[class] < p->mem_cost && (reg_class_size[(int) reg_class_subunion[(int) alt][class]] > reg_class_size[(int) alt]) #ifdef FORBIDDEN_INC_DEC_CLASSES && ! (in_inc_dec[i] && forbidden_inc_dec_class[class]) #endif #ifdef CANNOT_CHANGE_MODE_CLASS && ! invalid_mode_change_p (i, (enum reg_class) class, PSEUDO_REGNO_MODE (i)) #endif ) alt = reg_class_subunion[(int) alt][class]; /* If we don't add any classes, nothing to try. */ if (alt == best) alt = NO_REGS; if (dump && (reg_pref[i].prefclass != (int) best || reg_pref[i].altclass != (int) alt)) { static const char *const reg_class_names2[] = REG_CLASS_NAMES; fprintf (dump, " Register %i", i); if (alt == ALL_REGS || best == ALL_REGS) fprintf (dump, " pref %s\n", reg_class_names2[(int) best]); else if (alt == NO_REGS) fprintf (dump, " pref %s or none\n", reg_class_names2[(int) best]); else fprintf (dump, " pref %s, else %s\n", reg_class_names2[(int) best], reg_class_names2[(int) alt]); } /* We cast to (int) because (char) hits bugs in some compilers. */ reg_pref[i].prefclass = (int) best; reg_pref[i].altclass = (int) alt; } } #ifdef FORBIDDEN_INC_DEC_CLASSES free (in_inc_dec); #endif free (costs); } /* Record the cost of using memory or registers of various classes for the operands in INSN. N_ALTS is the number of alternatives. N_OPS is the number of operands. OPS is an array of the operands. MODES are the modes of the operands, in case any are VOIDmode. CONSTRAINTS are the constraints to use for the operands. This array is modified by this procedure. This procedure works alternative by alternative. For each alternative we assume that we will be able to allocate all pseudos to their ideal register class and calculate the cost of using that alternative. Then we compute for each operand that is a pseudo-register, the cost of having the pseudo allocated to each register class and using it in that alternative. To this cost is added the cost of the alternative. The cost of each class for this insn is its lowest cost among all the alternatives. */ static void record_reg_classes (int n_alts, int n_ops, rtx *ops, enum machine_mode *modes, const char **constraints, rtx insn, struct costs *op_costs, struct reg_pref *reg_pref) { int alt; int i, j; rtx set; /* Process each alternative, each time minimizing an operand's cost with the cost for each operand in that alternative. */ for (alt = 0; alt < n_alts; alt++) { struct costs this_op_costs[MAX_RECOG_OPERANDS]; int alt_fail = 0; int alt_cost = 0; enum reg_class classes[MAX_RECOG_OPERANDS]; int allows_mem[MAX_RECOG_OPERANDS]; int class; for (i = 0; i < n_ops; i++) { const char *p = constraints[i]; rtx op = ops[i]; enum machine_mode mode = modes[i]; int allows_addr = 0; int win = 0; unsigned char c; /* Initially show we know nothing about the register class. */ classes[i] = NO_REGS; allows_mem[i] = 0; /* If this operand has no constraints at all, we can conclude nothing about it since anything is valid. */ if (*p == 0) { if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER) memset (&this_op_costs[i], 0, sizeof this_op_costs[i]); continue; } /* If this alternative is only relevant when this operand matches a previous operand, we do different things depending on whether this operand is a pseudo-reg or not. We must process any modifiers for the operand before we can make this test. */ while (*p == '%' || *p == '=' || *p == '+' || *p == '&') p++; if (p[0] >= '0' && p[0] <= '0' + i && (p[1] == ',' || p[1] == 0)) { /* Copy class and whether memory is allowed from the matching alternative. Then perform any needed cost computations and/or adjustments. */ j = p[0] - '0'; classes[i] = classes[j]; allows_mem[i] = allows_mem[j]; if (!REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER) { /* If this matches the other operand, we have no added cost and we win. */ if (rtx_equal_p (ops[j], op)) win = 1; /* If we can put the other operand into a register, add to the cost of this alternative the cost to copy this operand to the register used for the other operand. */ else if (classes[j] != NO_REGS) alt_cost += compute_copy_cost (op, mode, classes[j], 1), win = 1; } else if (!REG_P (ops[j]) || REGNO (ops[j]) < FIRST_PSEUDO_REGISTER) { /* This op is a pseudo but the one it matches is not. */ /* If we can't put the other operand into a register, this alternative can't be used. */ if (classes[j] == NO_REGS) alt_fail = 1; /* Otherwise, add to the cost of this alternative the cost to copy the other operand to the register used for this operand. */ else alt_cost += compute_copy_cost (ops[j], mode, classes[j], 1); } else { /* The costs of this operand are not the same as the other operand since move costs are not symmetric. Moreover, if we cannot tie them, this alternative needs to do a copy, which is one instruction. */ struct costs *pp = &this_op_costs[i]; for (class = 0; class < N_REG_CLASSES; class++) pp->cost[class] = ((recog_data.operand_type[i] != OP_OUT ? may_move_in_cost[mode][class][(int) classes[i]] : 0) + (recog_data.operand_type[i] != OP_IN ? may_move_out_cost[mode][(int) classes[i]][class] : 0)); /* If the alternative actually allows memory, make things a bit cheaper since we won't need an extra insn to load it. */ pp->mem_cost = ((recog_data.operand_type[i] != OP_IN ? MEMORY_MOVE_COST (mode, classes[i], 0) : 0) + (recog_data.operand_type[i] != OP_OUT ? MEMORY_MOVE_COST (mode, classes[i], 1) : 0) - allows_mem[i]); /* If we have assigned a class to this register in our first pass, add a cost to this alternative corresponding to what we would add if this register were not in the appropriate class. */ if (reg_pref) alt_cost += (may_move_in_cost[mode] [(unsigned char) reg_pref[REGNO (op)].prefclass] [(int) classes[i]]); if (REGNO (ops[i]) != REGNO (ops[j]) && ! find_reg_note (insn, REG_DEAD, op)) alt_cost += 2; /* This is in place of ordinary cost computation for this operand, so skip to the end of the alternative (should be just one character). */ while (*p && *p++ != ',') ; constraints[i] = p; continue; } } /* Scan all the constraint letters. See if the operand matches any of the constraints. Collect the valid register classes and see if this operand accepts memory. */ while ((c = *p)) { switch (c) { case ',': break; case '*': /* Ignore the next letter for this pass. */ c = *++p; break; case '?': alt_cost += 2; case '!': case '#': case '&': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': break; case 'p': allows_addr = 1; win = address_operand (op, GET_MODE (op)); /* We know this operand is an address, so we want it to be allocated to a register that can be the base of an address, ie BASE_REG_CLASS. */ classes[i] = reg_class_subunion[(int) classes[i]] [(int) MODE_BASE_REG_CLASS (VOIDmode)]; break; case 'm': case 'o': case 'V': /* It doesn't seem worth distinguishing between offsettable and non-offsettable addresses here. */ allows_mem[i] = 1; if (MEM_P (op)) win = 1; break; case '<': if (MEM_P (op) && (GET_CODE (XEXP (op, 0)) == PRE_DEC || GET_CODE (XEXP (op, 0)) == POST_DEC)) win = 1; break; case '>': if (MEM_P (op) && (GET_CODE (XEXP (op, 0)) == PRE_INC || GET_CODE (XEXP (op, 0)) == POST_INC)) win = 1; break; case 'E': case 'F': if (GET_CODE (op) == CONST_DOUBLE || (GET_CODE (op) == CONST_VECTOR && (GET_MODE_CLASS (GET_MODE (op)) == MODE_VECTOR_FLOAT))) win = 1; break; case 'G': case 'H': if (GET_CODE (op) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_CONSTRAINT_P (op, c, p)) win = 1; break; case 's': if (GET_CODE (op) == CONST_INT || (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == VOIDmode)) break; case 'i': if (CONSTANT_P (op) #ifdef LEGITIMATE_PIC_OPERAND_P && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op)) #endif ) win = 1; break; case 'n': if (GET_CODE (op) == CONST_INT || (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == VOIDmode)) win = 1; break; case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_CONSTRAINT_P (INTVAL (op), c, p)) win = 1; break; case 'X': win = 1; break; case 'g': if (MEM_P (op) || (CONSTANT_P (op) #ifdef LEGITIMATE_PIC_OPERAND_P && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op)) #endif )) win = 1; allows_mem[i] = 1; case 'r': classes[i] = reg_class_subunion[(int) classes[i]][(int) GENERAL_REGS]; break; default: if (REG_CLASS_FROM_CONSTRAINT (c, p) != NO_REGS) classes[i] = reg_class_subunion[(int) classes[i]] [(int) REG_CLASS_FROM_CONSTRAINT (c, p)]; #ifdef EXTRA_CONSTRAINT_STR else if (EXTRA_CONSTRAINT_STR (op, c, p)) win = 1; if (EXTRA_MEMORY_CONSTRAINT (c, p)) { /* Every MEM can be reloaded to fit. */ allows_mem[i] = 1; if (MEM_P (op)) win = 1; } if (EXTRA_ADDRESS_CONSTRAINT (c, p)) { /* Every address can be reloaded to fit. */ allows_addr = 1; if (address_operand (op, GET_MODE (op))) win = 1; /* We know this operand is an address, so we want it to be allocated to a register that can be the base of an address, ie BASE_REG_CLASS. */ classes[i] = reg_class_subunion[(int) classes[i]] [(int) MODE_BASE_REG_CLASS (VOIDmode)]; } #endif break; } p += CONSTRAINT_LEN (c, p); if (c == ',') break; } constraints[i] = p; /* How we account for this operand now depends on whether it is a pseudo register or not. If it is, we first check if any register classes are valid. If not, we ignore this alternative, since we want to assume that all pseudos get allocated for register preferencing. If some register class is valid, compute the costs of moving the pseudo into that class. */ if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER) { if (classes[i] == NO_REGS) { /* We must always fail if the operand is a REG, but we did not find a suitable class. Otherwise we may perform an uninitialized read from this_op_costs after the `continue' statement below. */ alt_fail = 1; } else { struct costs *pp = &this_op_costs[i]; for (class = 0; class < N_REG_CLASSES; class++) pp->cost[class] = ((recog_data.operand_type[i] != OP_OUT ? may_move_in_cost[mode][class][(int) classes[i]] : 0) + (recog_data.operand_type[i] != OP_IN ? may_move_out_cost[mode][(int) classes[i]][class] : 0)); /* If the alternative actually allows memory, make things a bit cheaper since we won't need an extra insn to load it. */ pp->mem_cost = ((recog_data.operand_type[i] != OP_IN ? MEMORY_MOVE_COST (mode, classes[i], 0) : 0) + (recog_data.operand_type[i] != OP_OUT ? MEMORY_MOVE_COST (mode, classes[i], 1) : 0) - allows_mem[i]); /* If we have assigned a class to this register in our first pass, add a cost to this alternative corresponding to what we would add if this register were not in the appropriate class. */ if (reg_pref) alt_cost += (may_move_in_cost[mode] [(unsigned char) reg_pref[REGNO (op)].prefclass] [(int) classes[i]]); } } /* Otherwise, if this alternative wins, either because we have already determined that or if we have a hard register of the proper class, there is no cost for this alternative. */ else if (win || (REG_P (op) && reg_fits_class_p (op, classes[i], 0, GET_MODE (op)))) ; /* If registers are valid, the cost of this alternative includes copying the object to and/or from a register. */ else if (classes[i] != NO_REGS) { if (recog_data.operand_type[i] != OP_OUT) alt_cost += compute_copy_cost (op, mode, classes[i], 1); if (recog_data.operand_type[i] != OP_IN) alt_cost += compute_copy_cost (op, mode, classes[i], 0); } /* The only other way this alternative can be used is if this is a constant that could be placed into memory. */ else if (CONSTANT_P (op) && (allows_addr || allows_mem[i])) alt_cost += MEMORY_MOVE_COST (mode, classes[i], 1); else alt_fail = 1; } if (alt_fail) continue; /* Finally, update the costs with the information we've calculated about this alternative. */ for (i = 0; i < n_ops; i++) if (REG_P (ops[i]) && REGNO (ops[i]) >= FIRST_PSEUDO_REGISTER) { struct costs *pp = &op_costs[i], *qq = &this_op_costs[i]; int scale = 1 + (recog_data.operand_type[i] == OP_INOUT); pp->mem_cost = MIN (pp->mem_cost, (qq->mem_cost + alt_cost) * scale); for (class = 0; class < N_REG_CLASSES; class++) pp->cost[class] = MIN (pp->cost[class], (qq->cost[class] + alt_cost) * scale); } } /* If this insn is a single set copying operand 1 to operand 0 and one operand is a pseudo with the other a hard reg or a pseudo that prefers a register that is in its own register class then we may want to adjust the cost of that register class to -1. Avoid the adjustment if the source does not die to avoid stressing of register allocator by preferrencing two colliding registers into single class. Also avoid the adjustment if a copy between registers of the class is expensive (ten times the cost of a default copy is considered arbitrarily expensive). This avoids losing when the preferred class is very expensive as the source of a copy instruction. */ if ((set = single_set (insn)) != 0 && ops[0] == SET_DEST (set) && ops[1] == SET_SRC (set) && REG_P (ops[0]) && REG_P (ops[1]) && find_regno_note (insn, REG_DEAD, REGNO (ops[1]))) for (i = 0; i <= 1; i++) if (REGNO (ops[i]) >= FIRST_PSEUDO_REGISTER) { unsigned int regno = REGNO (ops[!i]); enum machine_mode mode = GET_MODE (ops[!i]); int class; unsigned int nr; if (regno >= FIRST_PSEUDO_REGISTER && reg_pref != 0) { enum reg_class pref = reg_pref[regno].prefclass; if ((reg_class_size[(unsigned char) pref] == (unsigned) CLASS_MAX_NREGS (pref, mode)) && REGISTER_MOVE_COST (mode, pref, pref) < 10 * 2) op_costs[i].cost[(unsigned char) pref] = -1; } else if (regno < FIRST_PSEUDO_REGISTER) for (class = 0; class < N_REG_CLASSES; class++) if (TEST_HARD_REG_BIT (reg_class_contents[class], regno) && reg_class_size[class] == (unsigned) CLASS_MAX_NREGS (class, mode)) { if (reg_class_size[class] == 1) op_costs[i].cost[class] = -1; else { for (nr = 0; nr < (unsigned) hard_regno_nregs[regno][mode]; nr++) { if (! TEST_HARD_REG_BIT (reg_class_contents[class], regno + nr)) break; } if (nr == (unsigned) hard_regno_nregs[regno][mode]) op_costs[i].cost[class] = -1; } } } } /* Compute the cost of loading X into (if TO_P is nonzero) or from (if TO_P is zero) a register of class CLASS in mode MODE. X must not be a pseudo. */ static int compute_copy_cost (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED, enum reg_class class, int to_p ATTRIBUTE_UNUSED) { #ifdef HAVE_SECONDARY_RELOADS enum reg_class secondary_class = NO_REGS; #endif /* If X is a SCRATCH, there is actually nothing to move since we are assuming optimal allocation. */ if (GET_CODE (x) == SCRATCH) return 0; /* Get the class we will actually use for a reload. */ class = PREFERRED_RELOAD_CLASS (x, class); #ifdef HAVE_SECONDARY_RELOADS /* If we need a secondary reload (we assume here that we are using the secondary reload as an intermediate, not a scratch register), the cost is that to load the input into the intermediate register, then to copy them. We use a special value of TO_P to avoid recursion. */ #ifdef SECONDARY_INPUT_RELOAD_CLASS if (to_p == 1) secondary_class = SECONDARY_INPUT_RELOAD_CLASS (class, mode, x); #endif #ifdef SECONDARY_OUTPUT_RELOAD_CLASS if (! to_p) secondary_class = SECONDARY_OUTPUT_RELOAD_CLASS (class, mode, x); #endif if (secondary_class != NO_REGS) return (move_cost[mode][(int) secondary_class][(int) class] + compute_copy_cost (x, mode, secondary_class, 2)); #endif /* HAVE_SECONDARY_RELOADS */ /* For memory, use the memory move cost, for (hard) registers, use the cost to move between the register classes, and use 2 for everything else (constants). */ if (MEM_P (x) || class == NO_REGS) return MEMORY_MOVE_COST (mode, class, to_p); else if (REG_P (x)) return move_cost[mode][(int) REGNO_REG_CLASS (REGNO (x))][(int) class]; else /* If this is a constant, we may eventually want to call rtx_cost here. */ return COSTS_N_INSNS (1); } /* Record the pseudo registers we must reload into hard registers in a subexpression of a memory address, X. CLASS is the class that the register needs to be in and is either BASE_REG_CLASS or INDEX_REG_CLASS. SCALE is twice the amount to multiply the cost by (it is twice so we can represent half-cost adjustments). */ static void record_address_regs (rtx x, enum reg_class class, int scale) { enum rtx_code code = GET_CODE (x); switch (code) { case CONST_INT: case CONST: case CC0: case PC: case SYMBOL_REF: case LABEL_REF: return; case PLUS: /* When we have an address that is a sum, we must determine whether registers are "base" or "index" regs. If there is a sum of two registers, we must choose one to be the "base". Luckily, we can use the REG_POINTER to make a good choice most of the time. We only need to do this on machines that can have two registers in an address and where the base and index register classes are different. ??? This code used to set REGNO_POINTER_FLAG in some cases, but that seems bogus since it should only be set when we are sure the register is being used as a pointer. */ { rtx arg0 = XEXP (x, 0); rtx arg1 = XEXP (x, 1); enum rtx_code code0 = GET_CODE (arg0); enum rtx_code code1 = GET_CODE (arg1); /* Look inside subregs. */ if (code0 == SUBREG) arg0 = SUBREG_REG (arg0), code0 = GET_CODE (arg0); if (code1 == SUBREG) arg1 = SUBREG_REG (arg1), code1 = GET_CODE (arg1); /* If this machine only allows one register per address, it must be in the first operand. */ if (MAX_REGS_PER_ADDRESS == 1) record_address_regs (arg0, class, scale); /* If index and base registers are the same on this machine, just record registers in any non-constant operands. We assume here, as well as in the tests below, that all addresses are in canonical form. */ else if (INDEX_REG_CLASS == MODE_BASE_REG_CLASS (VOIDmode)) { record_address_regs (arg0, class, scale); if (! CONSTANT_P (arg1)) record_address_regs (arg1, class, scale); } /* If the second operand is a constant integer, it doesn't change what class the first operand must be. */ else if (code1 == CONST_INT || code1 == CONST_DOUBLE) record_address_regs (arg0, class, scale); /* If the second operand is a symbolic constant, the first operand must be an index register. */ else if (code1 == SYMBOL_REF || code1 == CONST || code1 == LABEL_REF) record_address_regs (arg0, INDEX_REG_CLASS, scale); /* If both operands are registers but one is already a hard register of index or base class, give the other the class that the hard register is not. */ #ifdef REG_OK_FOR_BASE_P else if (code0 == REG && code1 == REG && REGNO (arg0) < FIRST_PSEUDO_REGISTER && (REG_OK_FOR_BASE_P (arg0) || REG_OK_FOR_INDEX_P (arg0))) record_address_regs (arg1, REG_OK_FOR_BASE_P (arg0) ? INDEX_REG_CLASS : MODE_BASE_REG_CLASS (VOIDmode), scale); else if (code0 == REG && code1 == REG && REGNO (arg1) < FIRST_PSEUDO_REGISTER && (REG_OK_FOR_BASE_P (arg1) || REG_OK_FOR_INDEX_P (arg1))) record_address_regs (arg0, REG_OK_FOR_BASE_P (arg1) ? INDEX_REG_CLASS : MODE_BASE_REG_CLASS (VOIDmode), scale); #endif /* If one operand is known to be a pointer, it must be the base with the other operand the index. Likewise if the other operand is a MULT. */ else if ((code0 == REG && REG_POINTER (arg0)) || code1 == MULT) { record_address_regs (arg0, MODE_BASE_REG_CLASS (VOIDmode), scale); record_address_regs (arg1, INDEX_REG_CLASS, scale); } else if ((code1 == REG && REG_POINTER (arg1)) || code0 == MULT) { record_address_regs (arg0, INDEX_REG_CLASS, scale); record_address_regs (arg1, MODE_BASE_REG_CLASS (VOIDmode), scale); } /* Otherwise, count equal chances that each might be a base or index register. This case should be rare. */ else { record_address_regs (arg0, MODE_BASE_REG_CLASS (VOIDmode), scale / 2); record_address_regs (arg0, INDEX_REG_CLASS, scale / 2); record_address_regs (arg1, MODE_BASE_REG_CLASS (VOIDmode), scale / 2); record_address_regs (arg1, INDEX_REG_CLASS, scale / 2); } } break; /* Double the importance of a pseudo register that is incremented or decremented, since it would take two extra insns if it ends up in the wrong place. */ case POST_MODIFY: case PRE_MODIFY: record_address_regs (XEXP (x, 0), MODE_BASE_REG_CLASS (VOIDmode), 2 * scale); if (REG_P (XEXP (XEXP (x, 1), 1))) record_address_regs (XEXP (XEXP (x, 1), 1), INDEX_REG_CLASS, 2 * scale); break; case POST_INC: case PRE_INC: case POST_DEC: case PRE_DEC: /* Double the importance of a pseudo register that is incremented or decremented, since it would take two extra insns if it ends up in the wrong place. If the operand is a pseudo, show it is being used in an INC_DEC context. */ #ifdef FORBIDDEN_INC_DEC_CLASSES if (REG_P (XEXP (x, 0)) && REGNO (XEXP (x, 0)) >= FIRST_PSEUDO_REGISTER) in_inc_dec[REGNO (XEXP (x, 0))] = 1; #endif record_address_regs (XEXP (x, 0), class, 2 * scale); break; case REG: { struct costs *pp = &costs[REGNO (x)]; int i; pp->mem_cost += (MEMORY_MOVE_COST (Pmode, class, 1) * scale) / 2; for (i = 0; i < N_REG_CLASSES; i++) pp->cost[i] += (may_move_in_cost[Pmode][i][(int) class] * scale) / 2; } break; default: { const char *fmt = GET_RTX_FORMAT (code); int i; for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) if (fmt[i] == 'e') record_address_regs (XEXP (x, i), class, scale); } } } #ifdef FORBIDDEN_INC_DEC_CLASSES /* Return 1 if REG is valid as an auto-increment memory reference to an object of MODE. */ static int auto_inc_dec_reg_p (rtx reg, enum machine_mode mode) { if (HAVE_POST_INCREMENT && memory_address_p (mode, gen_rtx_POST_INC (Pmode, reg))) return 1; if (HAVE_POST_DECREMENT && memory_address_p (mode, gen_rtx_POST_DEC (Pmode, reg))) return 1; if (HAVE_PRE_INCREMENT && memory_address_p (mode, gen_rtx_PRE_INC (Pmode, reg))) return 1; if (HAVE_PRE_DECREMENT && memory_address_p (mode, gen_rtx_PRE_DEC (Pmode, reg))) return 1; return 0; } #endif static short *renumber; static size_t regno_allocated; static unsigned int reg_n_max; /* Allocate enough space to hold NUM_REGS registers for the tables used for reg_scan and flow_analysis that are indexed by the register number. If NEW_P is nonzero, initialize all of the registers, otherwise only initialize the new registers allocated. The same table is kept from function to function, only reallocating it when we need more room. If RENUMBER_P is nonzero, allocate the reg_renumber array also. */ void allocate_reg_info (size_t num_regs, int new_p, int renumber_p) { size_t size_info; size_t size_renumber; size_t min = (new_p) ? 0 : reg_n_max; struct reg_info_data *reg_data; if (num_regs > regno_allocated) { size_t old_allocated = regno_allocated; regno_allocated = num_regs + (num_regs / 20); /* Add some slop space. */ size_renumber = regno_allocated * sizeof (short); if (!reg_n_info) { VARRAY_REG_INIT (reg_n_info, regno_allocated, "reg_n_info"); renumber = xmalloc (size_renumber); reg_pref_buffer = xmalloc (regno_allocated * sizeof (struct reg_pref)); } else { VARRAY_GROW (reg_n_info, regno_allocated); if (new_p) /* If we're zapping everything, no need to realloc. */ { free ((char *) renumber); free ((char *) reg_pref); renumber = xmalloc (size_renumber); reg_pref_buffer = xmalloc (regno_allocated * sizeof (struct reg_pref)); } else { renumber = xrealloc (renumber, size_renumber); reg_pref_buffer = xrealloc (reg_pref_buffer, regno_allocated * sizeof (struct reg_pref)); } } size_info = (regno_allocated - old_allocated) * sizeof (reg_info) + sizeof (struct reg_info_data) - sizeof (reg_info); reg_data = xcalloc (size_info, 1); reg_data->min_index = old_allocated; reg_data->max_index = regno_allocated - 1; reg_data->next = reg_info_head; reg_info_head = reg_data; } reg_n_max = num_regs; if (min < num_regs) { /* Loop through each of the segments allocated for the actual reg_info pages, and set up the pointers, zero the pages, etc. */ for (reg_data = reg_info_head; reg_data && reg_data->max_index >= min; reg_data = reg_data->next) { size_t min_index = reg_data->min_index; size_t max_index = reg_data->max_index; size_t max = MIN (max_index, num_regs); size_t local_min = min - min_index; size_t i; if (reg_data->min_index > num_regs) continue; if (min < min_index) local_min = 0; if (!reg_data->used_p) /* page just allocated with calloc */ reg_data->used_p = 1; /* no need to zero */ else memset (®_data->data[local_min], 0, sizeof (reg_info) * (max - min_index - local_min + 1)); for (i = min_index+local_min; i <= max; i++) { VARRAY_REG (reg_n_info, i) = ®_data->data[i-min_index]; REG_BASIC_BLOCK (i) = REG_BLOCK_UNKNOWN; renumber[i] = -1; reg_pref_buffer[i].prefclass = (char) NO_REGS; reg_pref_buffer[i].altclass = (char) NO_REGS; } } } /* If {pref,alt}class have already been allocated, update the pointers to the newly realloced ones. */ if (reg_pref) reg_pref = reg_pref_buffer; if (renumber_p) reg_renumber = renumber; /* Tell the regset code about the new number of registers. */ MAX_REGNO_REG_SET (num_regs, new_p, renumber_p); } /* Free up the space allocated by allocate_reg_info. */ void free_reg_info (void) { if (reg_n_info) { struct reg_info_data *reg_data; struct reg_info_data *reg_next; VARRAY_FREE (reg_n_info); for (reg_data = reg_info_head; reg_data; reg_data = reg_next) { reg_next = reg_data->next; free ((char *) reg_data); } free (reg_pref_buffer); reg_pref_buffer = (struct reg_pref *) 0; reg_info_head = (struct reg_info_data *) 0; renumber = (short *) 0; } regno_allocated = 0; reg_n_max = 0; } /* This is the `regscan' pass of the compiler, run just before cse and again just before loop. It finds the first and last use of each pseudo-register and records them in the vectors regno_first_uid, regno_last_uid and counts the number of sets in the vector reg_n_sets. REPEAT is nonzero the second time this is called. */ /* Maximum number of parallel sets and clobbers in any insn in this fn. Always at least 3, since the combiner could put that many together and we want this to remain correct for all the remaining passes. This corresponds to the maximum number of times note_stores will call a function for any insn. */ int max_parallel; /* Used as a temporary to record the largest number of registers in PARALLEL in a SET_DEST. This is added to max_parallel. */ static int max_set_parallel; void reg_scan (rtx f, unsigned int nregs, int repeat ATTRIBUTE_UNUSED) { rtx insn; timevar_push (TV_REG_SCAN); allocate_reg_info (nregs, TRUE, FALSE); max_parallel = 3; max_set_parallel = 0; for (insn = f; insn; insn = NEXT_INSN (insn)) if (INSN_P (insn)) { rtx pat = PATTERN (insn); if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > max_parallel) max_parallel = XVECLEN (pat, 0); reg_scan_mark_refs (pat, insn, 0, 0); if (REG_NOTES (insn)) reg_scan_mark_refs (REG_NOTES (insn), insn, 1, 0); } max_parallel += max_set_parallel; timevar_pop (TV_REG_SCAN); } /* Update 'regscan' information by looking at the insns from FIRST to LAST. Some new REGs have been created, and any REG with number greater than OLD_MAX_REGNO is such a REG. We only update information for those. */ void reg_scan_update (rtx first, rtx last, unsigned int old_max_regno) { rtx insn; allocate_reg_info (max_reg_num (), FALSE, FALSE); for (insn = first; insn != last; insn = NEXT_INSN (insn)) if (INSN_P (insn)) { rtx pat = PATTERN (insn); if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > max_parallel) max_parallel = XVECLEN (pat, 0); reg_scan_mark_refs (pat, insn, 0, old_max_regno); if (REG_NOTES (insn)) reg_scan_mark_refs (REG_NOTES (insn), insn, 1, old_max_regno); } } /* X is the expression to scan. INSN is the insn it appears in. NOTE_FLAG is nonzero if X is from INSN's notes rather than its body. We should only record information for REGs with numbers greater than or equal to MIN_REGNO. */ static void reg_scan_mark_refs (rtx x, rtx insn, int note_flag, unsigned int min_regno) { enum rtx_code code; rtx dest; rtx note; if (!x) return; code = GET_CODE (x); switch (code) { case CONST: case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case CC0: case PC: case SYMBOL_REF: case LABEL_REF: case ADDR_VEC: case ADDR_DIFF_VEC: return; case REG: { unsigned int regno = REGNO (x); if (regno >= min_regno) { REGNO_LAST_NOTE_UID (regno) = INSN_UID (insn); if (!note_flag) REGNO_LAST_UID (regno) = INSN_UID (insn); if (REGNO_FIRST_UID (regno) == 0) REGNO_FIRST_UID (regno) = INSN_UID (insn); /* If we are called by reg_scan_update() (indicated by min_regno being set), we also need to update the reference count. */ if (min_regno) REG_N_REFS (regno)++; } } break; case EXPR_LIST: if (XEXP (x, 0)) reg_scan_mark_refs (XEXP (x, 0), insn, note_flag, min_regno); if (XEXP (x, 1)) reg_scan_mark_refs (XEXP (x, 1), insn, note_flag, min_regno); break; case INSN_LIST: if (XEXP (x, 1)) reg_scan_mark_refs (XEXP (x, 1), insn, note_flag, min_regno); break; case CLOBBER: { rtx reg = XEXP (x, 0); if (REG_P (reg) && REGNO (reg) >= min_regno) { REG_N_SETS (REGNO (reg))++; REG_N_REFS (REGNO (reg))++; } else if (MEM_P (reg)) reg_scan_mark_refs (XEXP (reg, 0), insn, note_flag, min_regno); } break; case SET: /* Count a set of the destination if it is a register. */ for (dest = SET_DEST (x); GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == ZERO_EXTEND; dest = XEXP (dest, 0)) ; /* For a PARALLEL, record the number of things (less the usual one for a SET) that are set. */ if (GET_CODE (dest) == PARALLEL) max_set_parallel = MAX (max_set_parallel, XVECLEN (dest, 0) - 1); if (REG_P (dest) && REGNO (dest) >= min_regno) { REG_N_SETS (REGNO (dest))++; REG_N_REFS (REGNO (dest))++; } /* If this is setting a pseudo from another pseudo or the sum of a pseudo and a constant integer and the other pseudo is known to be a pointer, set the destination to be a pointer as well. Likewise if it is setting the destination from an address or from a value equivalent to an address or to the sum of an address and something else. But don't do any of this if the pseudo corresponds to a user variable since it should have already been set as a pointer based on the type. */ if (REG_P (SET_DEST (x)) && REGNO (SET_DEST (x)) >= FIRST_PSEUDO_REGISTER && REGNO (SET_DEST (x)) >= min_regno /* If the destination pseudo is set more than once, then other sets might not be to a pointer value (consider access to a union in two threads of control in the presence of global optimizations). So only set REG_POINTER on the destination pseudo if this is the only set of that pseudo. */ && REG_N_SETS (REGNO (SET_DEST (x))) == 1 && ! REG_USERVAR_P (SET_DEST (x)) && ! REG_POINTER (SET_DEST (x)) && ((REG_P (SET_SRC (x)) && REG_POINTER (SET_SRC (x))) || ((GET_CODE (SET_SRC (x)) == PLUS || GET_CODE (SET_SRC (x)) == LO_SUM) && GET_CODE (XEXP (SET_SRC (x), 1)) == CONST_INT && REG_P (XEXP (SET_SRC (x), 0)) && REG_POINTER (XEXP (SET_SRC (x), 0))) || GET_CODE (SET_SRC (x)) == CONST || GET_CODE (SET_SRC (x)) == SYMBOL_REF || GET_CODE (SET_SRC (x)) == LABEL_REF || (GET_CODE (SET_SRC (x)) == HIGH && (GET_CODE (XEXP (SET_SRC (x), 0)) == CONST || GET_CODE (XEXP (SET_SRC (x), 0)) == SYMBOL_REF || GET_CODE (XEXP (SET_SRC (x), 0)) == LABEL_REF)) || ((GET_CODE (SET_SRC (x)) == PLUS || GET_CODE (SET_SRC (x)) == LO_SUM) && (GET_CODE (XEXP (SET_SRC (x), 1)) == CONST || GET_CODE (XEXP (SET_SRC (x), 1)) == SYMBOL_REF || GET_CODE (XEXP (SET_SRC (x), 1)) == LABEL_REF)) || ((note = find_reg_note (insn, REG_EQUAL, 0)) != 0 && (GET_CODE (XEXP (note, 0)) == CONST || GET_CODE (XEXP (note, 0)) == SYMBOL_REF || GET_CODE (XEXP (note, 0)) == LABEL_REF)))) REG_POINTER (SET_DEST (x)) = 1; /* If this is setting a register from a register or from a simple conversion of a register, propagate REG_EXPR. */ if (REG_P (dest)) { rtx src = SET_SRC (x); while (GET_CODE (src) == SIGN_EXTEND || GET_CODE (src) == ZERO_EXTEND || GET_CODE (src) == TRUNCATE || (GET_CODE (src) == SUBREG && subreg_lowpart_p (src))) src = XEXP (src, 0); if (!REG_ATTRS (dest) && REG_P (src)) REG_ATTRS (dest) = REG_ATTRS (src); if (!REG_ATTRS (dest) && MEM_P (src)) set_reg_attrs_from_mem (dest, src); } /* ... fall through ... */ default: { const char *fmt = GET_RTX_FORMAT (code); int i; for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') reg_scan_mark_refs (XEXP (x, i), insn, note_flag, min_regno); else if (fmt[i] == 'E' && XVEC (x, i) != 0) { int j; for (j = XVECLEN (x, i) - 1; j >= 0; j--) reg_scan_mark_refs (XVECEXP (x, i, j), insn, note_flag, min_regno); } } } } } /* Return nonzero if C1 is a subset of C2, i.e., if every register in C1 is also in C2. */ int reg_class_subset_p (enum reg_class c1, enum reg_class c2) { if (c1 == c2) return 1; if (c2 == ALL_REGS) win: return 1; GO_IF_HARD_REG_SUBSET (reg_class_contents[(int) c1], reg_class_contents[(int) c2], win); return 0; } /* Return nonzero if there is a register that is in both C1 and C2. */ int reg_classes_intersect_p (enum reg_class c1, enum reg_class c2) { HARD_REG_SET c; if (c1 == c2) return 1; if (c1 == ALL_REGS || c2 == ALL_REGS) return 1; COPY_HARD_REG_SET (c, reg_class_contents[(int) c1]); AND_HARD_REG_SET (c, reg_class_contents[(int) c2]); GO_IF_HARD_REG_SUBSET (c, reg_class_contents[(int) NO_REGS], lose); return 1; lose: return 0; } /* Release any memory allocated by register sets. */ void regset_release_memory (void) { bitmap_release_memory (); } #ifdef CANNOT_CHANGE_MODE_CLASS /* Set bits in *USED which correspond to registers which can't change their mode from FROM to any mode in which REGNO was encountered. */ void cannot_change_mode_set_regs (HARD_REG_SET *used, enum machine_mode from, unsigned int regno) { enum machine_mode to; int n, i; int start = regno * MAX_MACHINE_MODE; EXECUTE_IF_SET_IN_BITMAP (&subregs_of_mode, start, n, if (n >= MAX_MACHINE_MODE + start) return; to = n - start; for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (! TEST_HARD_REG_BIT (*used, i) && REG_CANNOT_CHANGE_MODE_P (i, from, to)) SET_HARD_REG_BIT (*used, i); ); } /* Return 1 if REGNO has had an invalid mode change in CLASS from FROM mode. */ bool invalid_mode_change_p (unsigned int regno, enum reg_class class, enum machine_mode from_mode) { enum machine_mode to_mode; int n; int start = regno * MAX_MACHINE_MODE; EXECUTE_IF_SET_IN_BITMAP (&subregs_of_mode, start, n, if (n >= MAX_MACHINE_MODE + start) return 0; to_mode = n - start; if (CANNOT_CHANGE_MODE_CLASS (from_mode, to_mode, class)) return 1; ); return 0; } #endif /* CANNOT_CHANGE_MODE_CLASS */ /* Type information for regclass.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_regclass_h[] = { { &top_of_stack[0], 1 * (MAX_MACHINE_MODE), sizeof (top_of_stack[0]), >_ggc_mx_rtx_def, >_pch_nx_rtx_def }, LAST_GGC_ROOT_TAB }; /* Move registers around to reduce number of move instructions needed. Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This module looks for cases where matching constraints would force an instruction to need a reload, and this reload would be a register to register move. It then attempts to change the registers used by the instruction to avoid the move instruction. */ /* Turn STACK_GROWS_DOWNWARD into a boolean. */ #ifdef STACK_GROWS_DOWNWARD #undef STACK_GROWS_DOWNWARD #define STACK_GROWS_DOWNWARD 1 #else #define STACK_GROWS_DOWNWARD 0 #endif static int perhaps_ends_bb_p (rtx); static int optimize_reg_copy_1 (rtx, rtx, rtx); static void optimize_reg_copy_2 (rtx, rtx, rtx); static void optimize_reg_copy_3 (rtx, rtx, rtx); static void copy_src_to_dest (rtx, rtx, rtx, int); static int *regmove_bb_head; struct match { int with[MAX_RECOG_OPERANDS]; enum { READ, WRITE, READWRITE } use[MAX_RECOG_OPERANDS]; int commutative[MAX_RECOG_OPERANDS]; int early_clobber[MAX_RECOG_OPERANDS]; }; static rtx discover_flags_reg (void); static void mark_flags_life_zones (rtx); static void flags_set_1 (rtx, rtx, void *); static int try_auto_increment (rtx, rtx, rtx, rtx, HOST_WIDE_INT, int); static int find_matches (rtx, struct match *); static void replace_in_call_usage (rtx *, unsigned int, rtx, rtx); static int fixup_match_1 (rtx, rtx, rtx, rtx, rtx, int, int, int, FILE *); static int reg_is_remote_constant_p (rtx, rtx, rtx); static int stable_and_no_regs_but_for_p (rtx, rtx, rtx); static int regclass_compatible_p (int, int); static int replacement_quality (rtx); static int fixup_match_2 (rtx, rtx, rtx, rtx, FILE *); /* Return nonzero if registers with CLASS1 and CLASS2 can be merged without causing too much register allocation problems. */ static int regclass_compatible_p (int class0, int class1) { return (class0 == class1 || (reg_class_subset_p (class0, class1) && ! CLASS_LIKELY_SPILLED_P (class0)) || (reg_class_subset_p (class1, class0) && ! CLASS_LIKELY_SPILLED_P (class1))); } /* INC_INSN is an instruction that adds INCREMENT to REG. Try to fold INC_INSN as a post/pre in/decrement into INSN. Iff INC_INSN_SET is nonzero, inc_insn has a destination different from src. Return nonzero for success. */ static int try_auto_increment (rtx insn, rtx inc_insn, rtx inc_insn_set, rtx reg, HOST_WIDE_INT increment, int pre) { enum rtx_code inc_code; rtx pset = single_set (insn); if (pset) { /* Can't use the size of SET_SRC, we might have something like (sign_extend:SI (mem:QI ... */ rtx use = find_use_as_address (pset, reg, 0); if (use != 0 && use != (rtx) (size_t) 1) { int size = GET_MODE_SIZE (GET_MODE (use)); if (0 || (HAVE_POST_INCREMENT && pre == 0 && (inc_code = POST_INC, increment == size)) || (HAVE_PRE_INCREMENT && pre == 1 && (inc_code = PRE_INC, increment == size)) || (HAVE_POST_DECREMENT && pre == 0 && (inc_code = POST_DEC, increment == -size)) || (HAVE_PRE_DECREMENT && pre == 1 && (inc_code = PRE_DEC, increment == -size)) ) { if (inc_insn_set) validate_change (inc_insn, &SET_SRC (inc_insn_set), XEXP (SET_SRC (inc_insn_set), 0), 1); validate_change (insn, &XEXP (use, 0), gen_rtx_fmt_e (inc_code, Pmode, reg), 1); if (apply_change_group ()) { /* If there is a REG_DEAD note on this insn, we must change this not to REG_UNUSED meaning that the register is set, but the value is dead. Failure to do so will result in a sched1 abort -- when it recomputes lifetime information, the number of REG_DEAD notes will have changed. */ rtx note = find_reg_note (insn, REG_DEAD, reg); if (note) PUT_MODE (note, REG_UNUSED); REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_INC, reg, REG_NOTES (insn)); if (! inc_insn_set) delete_insn (inc_insn); return 1; } } } } return 0; } /* Determine if the pattern generated by add_optab has a clobber, such as might be issued for a flags hard register. To make the code elsewhere simpler, we handle cc0 in this same framework. Return the register if one was discovered. Return NULL_RTX if if no flags were found. Return pc_rtx if we got confused. */ static rtx discover_flags_reg (void) { rtx tmp; tmp = gen_rtx_REG (word_mode, 10000); tmp = gen_add3_insn (tmp, tmp, const2_rtx); /* If we get something that isn't a simple set, or a [(set ..) (clobber ..)], this whole function will go wrong. */ if (GET_CODE (tmp) == SET) return NULL_RTX; else if (GET_CODE (tmp) == PARALLEL) { int found; if (XVECLEN (tmp, 0) != 2) return pc_rtx; tmp = XVECEXP (tmp, 0, 1); if (GET_CODE (tmp) != CLOBBER) return pc_rtx; tmp = XEXP (tmp, 0); /* Don't do anything foolish if the md wanted to clobber a scratch or something. We only care about hard regs. Moreover we don't like the notion of subregs of hard regs. */ if (GET_CODE (tmp) == SUBREG && REG_P (SUBREG_REG (tmp)) && REGNO (SUBREG_REG (tmp)) < FIRST_PSEUDO_REGISTER) return pc_rtx; found = (REG_P (tmp) && REGNO (tmp) < FIRST_PSEUDO_REGISTER); return (found ? tmp : NULL_RTX); } return pc_rtx; } /* It is a tedious task identifying when the flags register is live and when it is safe to optimize. Since we process the instruction stream multiple times, locate and record these live zones by marking the mode of the instructions -- QImode is used on the instruction at which the flags becomes live. HImode is used within the range (exclusive) that the flags are live. Thus the user of the flags is not marked. All other instructions are cleared to VOIDmode. */ /* Used to communicate with flags_set_1. */ static rtx flags_set_1_rtx; static int flags_set_1_set; static void mark_flags_life_zones (rtx flags) { int flags_regno; int flags_nregs; basic_block block; #ifdef HAVE_cc0 /* If we found a flags register on a cc0 host, bail. */ if (flags == NULL_RTX) flags = cc0_rtx; else if (flags != cc0_rtx) flags = pc_rtx; #endif /* Simple cases first: if no flags, clear all modes. If confusing, mark the entire function as being in a flags shadow. */ if (flags == NULL_RTX || flags == pc_rtx) { enum machine_mode mode = (flags ? HImode : VOIDmode); rtx insn; for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) PUT_MODE (insn, mode); return; } #ifdef HAVE_cc0 flags_regno = -1; flags_nregs = 1; #else flags_regno = REGNO (flags); flags_nregs = hard_regno_nregs[flags_regno][GET_MODE (flags)]; #endif flags_set_1_rtx = flags; /* Process each basic block. */ FOR_EACH_BB_REVERSE (block) { rtx insn, end; int live; insn = BB_HEAD (block); end = BB_END (block); /* Look out for the (unlikely) case of flags being live across basic block boundaries. */ live = 0; #ifndef HAVE_cc0 { int i; for (i = 0; i < flags_nregs; ++i) live |= REGNO_REG_SET_P (block->global_live_at_start, flags_regno + i); } #endif while (1) { /* Process liveness in reverse order of importance -- alive, death, birth. This lets more important info overwrite the mode of lesser info. */ if (INSN_P (insn)) { #ifdef HAVE_cc0 /* In the cc0 case, death is not marked in reg notes, but is instead the mere use of cc0 when it is alive. */ if (live && reg_mentioned_p (cc0_rtx, PATTERN (insn))) live = 0; #else /* In the hard reg case, we watch death notes. */ if (live && find_regno_note (insn, REG_DEAD, flags_regno)) live = 0; #endif PUT_MODE (insn, (live ? HImode : VOIDmode)); /* In either case, birth is denoted simply by its presence as the destination of a set. */ flags_set_1_set = 0; note_stores (PATTERN (insn), flags_set_1, NULL); if (flags_set_1_set) { live = 1; PUT_MODE (insn, QImode); } } else PUT_MODE (insn, (live ? HImode : VOIDmode)); if (insn == end) break; insn = NEXT_INSN (insn); } } } /* A subroutine of mark_flags_life_zones, called through note_stores. */ static void flags_set_1 (rtx x, rtx pat, void *data ATTRIBUTE_UNUSED) { if (GET_CODE (pat) == SET && reg_overlap_mentioned_p (x, flags_set_1_rtx)) flags_set_1_set = 1; } static int *regno_src_regno; /* Indicate how good a choice REG (which appears as a source) is to replace a destination register with. The higher the returned value, the better the choice. The main objective is to avoid using a register that is a candidate for tying to a hard register, since the output might in turn be a candidate to be tied to a different hard register. */ static int replacement_quality (rtx reg) { int src_regno; /* Bad if this isn't a register at all. */ if (!REG_P (reg)) return 0; /* If this register is not meant to get a hard register, it is a poor choice. */ if (REG_LIVE_LENGTH (REGNO (reg)) < 0) return 0; src_regno = regno_src_regno[REGNO (reg)]; /* If it was not copied from another register, it is fine. */ if (src_regno < 0) return 3; /* Copied from a hard register? */ if (src_regno < FIRST_PSEUDO_REGISTER) return 1; /* Copied from a pseudo register - not as bad as from a hard register, yet still cumbersome, since the register live length will be lengthened when the registers get tied. */ return 2; } /* Return 1 if INSN might end a basic block. */ static int perhaps_ends_bb_p (rtx insn) { switch (GET_CODE (insn)) { case CODE_LABEL: case JUMP_INSN: /* These always end a basic block. */ return 1; case CALL_INSN: /* A CALL_INSN might be the last insn of a basic block, if it is inside an EH region or if there are nonlocal gotos. Note that this test is very conservative. */ if (nonlocal_goto_handler_labels) return 1; /* Fall through. */ default: return can_throw_internal (insn); } } /* INSN is a copy from SRC to DEST, both registers, and SRC does not die in INSN. Search forward to see if SRC dies before either it or DEST is modified, but don't scan past the end of a basic block. If so, we can replace SRC with DEST and let SRC die in INSN. This will reduce the number of registers live in that range and may enable DEST to be tied to SRC, thus often saving one register in addition to a register-register copy. */ static int optimize_reg_copy_1 (rtx insn, rtx dest, rtx src) { rtx p, q; rtx note; rtx dest_death = 0; int sregno = REGNO (src); int dregno = REGNO (dest); /* We don't want to mess with hard regs if register classes are small. */ if (sregno == dregno || (SMALL_REGISTER_CLASSES && (sregno < FIRST_PSEUDO_REGISTER || dregno < FIRST_PSEUDO_REGISTER)) /* We don't see all updates to SP if they are in an auto-inc memory reference, so we must disallow this optimization on them. */ || sregno == STACK_POINTER_REGNUM || dregno == STACK_POINTER_REGNUM) return 0; for (p = NEXT_INSN (insn); p; p = NEXT_INSN (p)) { /* ??? We can't scan past the end of a basic block without updating the register lifetime info (REG_DEAD/basic_block_live_at_start). */ if (perhaps_ends_bb_p (p)) break; else if (! INSN_P (p)) continue; if (reg_set_p (src, p) || reg_set_p (dest, p) /* If SRC is an asm-declared register, it must not be replaced in any asm. Unfortunately, the REG_EXPR tree for the asm variable may be absent in the SRC rtx, so we can't check the actual register declaration easily (the asm operand will have it, though). To avoid complicating the test for a rare case, we just don't perform register replacement for a hard reg mentioned in an asm. */ || (sregno < FIRST_PSEUDO_REGISTER && asm_noperands (PATTERN (p)) >= 0 && reg_overlap_mentioned_p (src, PATTERN (p))) /* Don't change a USE of a register. */ || (GET_CODE (PATTERN (p)) == USE && reg_overlap_mentioned_p (src, XEXP (PATTERN (p), 0)))) break; /* See if all of SRC dies in P. This test is slightly more conservative than it needs to be. */ if ((note = find_regno_note (p, REG_DEAD, sregno)) != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (src)) { int failed = 0; int d_length = 0; int s_length = 0; int d_n_calls = 0; int s_n_calls = 0; /* We can do the optimization. Scan forward from INSN again, replacing regs as we go. Set FAILED if a replacement can't be done. In that case, we can't move the death note for SRC. This should be rare. */ /* Set to stop at next insn. */ for (q = next_real_insn (insn); q != next_real_insn (p); q = next_real_insn (q)) { if (reg_overlap_mentioned_p (src, PATTERN (q))) { /* If SRC is a hard register, we might miss some overlapping registers with validate_replace_rtx, so we would have to undo it. We can't if DEST is present in the insn, so fail in that combination of cases. */ if (sregno < FIRST_PSEUDO_REGISTER && reg_mentioned_p (dest, PATTERN (q))) failed = 1; /* Replace all uses and make sure that the register isn't still present. */ else if (validate_replace_rtx (src, dest, q) && (sregno >= FIRST_PSEUDO_REGISTER || ! reg_overlap_mentioned_p (src, PATTERN (q)))) ; else { validate_replace_rtx (dest, src, q); failed = 1; } } /* For SREGNO, count the total number of insns scanned. For DREGNO, count the total number of insns scanned after passing the death note for DREGNO. */ s_length++; if (dest_death) d_length++; /* If the insn in which SRC dies is a CALL_INSN, don't count it as a call that has been crossed. Otherwise, count it. */ if (q != p && GET_CODE (q) == CALL_INSN) { /* Similarly, total calls for SREGNO, total calls beyond the death note for DREGNO. */ s_n_calls++; if (dest_death) d_n_calls++; } /* If DEST dies here, remove the death note and save it for later. Make sure ALL of DEST dies here; again, this is overly conservative. */ if (dest_death == 0 && (dest_death = find_regno_note (q, REG_DEAD, dregno)) != 0) { if (GET_MODE (XEXP (dest_death, 0)) != GET_MODE (dest)) failed = 1, dest_death = 0; else remove_note (q, dest_death); } } if (! failed) { /* These counters need to be updated if and only if we are going to move the REG_DEAD note. */ if (sregno >= FIRST_PSEUDO_REGISTER) { if (REG_LIVE_LENGTH (sregno) >= 0) { REG_LIVE_LENGTH (sregno) -= s_length; /* REG_LIVE_LENGTH is only an approximation after combine if sched is not run, so make sure that we still have a reasonable value. */ if (REG_LIVE_LENGTH (sregno) < 2) REG_LIVE_LENGTH (sregno) = 2; } REG_N_CALLS_CROSSED (sregno) -= s_n_calls; } /* Move death note of SRC from P to INSN. */ remove_note (p, note); XEXP (note, 1) = REG_NOTES (insn); REG_NOTES (insn) = note; } /* DEST is also dead if INSN has a REG_UNUSED note for DEST. */ if (! dest_death && (dest_death = find_regno_note (insn, REG_UNUSED, dregno))) { PUT_REG_NOTE_KIND (dest_death, REG_DEAD); remove_note (insn, dest_death); } /* Put death note of DEST on P if we saw it die. */ if (dest_death) { XEXP (dest_death, 1) = REG_NOTES (p); REG_NOTES (p) = dest_death; if (dregno >= FIRST_PSEUDO_REGISTER) { /* If and only if we are moving the death note for DREGNO, then we need to update its counters. */ if (REG_LIVE_LENGTH (dregno) >= 0) REG_LIVE_LENGTH (dregno) += d_length; REG_N_CALLS_CROSSED (dregno) += d_n_calls; } } return ! failed; } /* If SRC is a hard register which is set or killed in some other way, we can't do this optimization. */ else if (sregno < FIRST_PSEUDO_REGISTER && dead_or_set_p (p, src)) break; } return 0; } /* INSN is a copy of SRC to DEST, in which SRC dies. See if we now have a sequence of insns that modify DEST followed by an insn that sets SRC to DEST in which DEST dies, with no prior modification of DEST. (There is no need to check if the insns in between actually modify DEST. We should not have cases where DEST is not modified, but the optimization is safe if no such modification is detected.) In that case, we can replace all uses of DEST, starting with INSN and ending with the set of SRC to DEST, with SRC. We do not do this optimization if a CALL_INSN is crossed unless SRC already crosses a call or if DEST dies before the copy back to SRC. It is assumed that DEST and SRC are pseudos; it is too complicated to do this for hard registers since the substitutions we may make might fail. */ static void optimize_reg_copy_2 (rtx insn, rtx dest, rtx src) { rtx p, q; rtx set; int sregno = REGNO (src); int dregno = REGNO (dest); for (p = NEXT_INSN (insn); p; p = NEXT_INSN (p)) { /* ??? We can't scan past the end of a basic block without updating the register lifetime info (REG_DEAD/basic_block_live_at_start). */ if (perhaps_ends_bb_p (p)) break; else if (! INSN_P (p)) continue; set = single_set (p); if (set && SET_SRC (set) == dest && SET_DEST (set) == src && find_reg_note (p, REG_DEAD, dest)) { /* We can do the optimization. Scan forward from INSN again, replacing regs as we go. */ /* Set to stop at next insn. */ for (q = insn; q != NEXT_INSN (p); q = NEXT_INSN (q)) if (INSN_P (q)) { if (reg_mentioned_p (dest, PATTERN (q))) PATTERN (q) = replace_rtx (PATTERN (q), dest, src); if (GET_CODE (q) == CALL_INSN) { REG_N_CALLS_CROSSED (dregno)--; REG_N_CALLS_CROSSED (sregno)++; } } remove_note (p, find_reg_note (p, REG_DEAD, dest)); REG_N_DEATHS (dregno)--; remove_note (insn, find_reg_note (insn, REG_DEAD, src)); REG_N_DEATHS (sregno)--; return; } if (reg_set_p (src, p) || find_reg_note (p, REG_DEAD, dest) || (GET_CODE (p) == CALL_INSN && REG_N_CALLS_CROSSED (sregno) == 0)) break; } } /* INSN is a ZERO_EXTEND or SIGN_EXTEND of SRC to DEST. Look if SRC dies there, and if it is only set once, by loading it from memory. If so, try to incorporate the zero/sign extension into the memory read, change SRC to the mode of DEST, and alter the remaining accesses to use the appropriate SUBREG. This allows SRC and DEST to be tied later. */ static void optimize_reg_copy_3 (rtx insn, rtx dest, rtx src) { rtx src_reg = XEXP (src, 0); int src_no = REGNO (src_reg); int dst_no = REGNO (dest); rtx p, set, subreg; enum machine_mode old_mode; if (src_no < FIRST_PSEUDO_REGISTER || dst_no < FIRST_PSEUDO_REGISTER || ! find_reg_note (insn, REG_DEAD, src_reg) || REG_N_DEATHS (src_no) != 1 || REG_N_SETS (src_no) != 1) return; for (p = PREV_INSN (insn); p && ! reg_set_p (src_reg, p); p = PREV_INSN (p)) /* ??? We can't scan past the end of a basic block without updating the register lifetime info (REG_DEAD/basic_block_live_at_start). */ if (perhaps_ends_bb_p (p)) break; if (! p) return; if (! (set = single_set (p)) || !MEM_P (SET_SRC (set)) /* If there's a REG_EQUIV note, this must be an insn that loads an argument. Prefer keeping the note over doing this optimization. */ || find_reg_note (p, REG_EQUIV, NULL_RTX) || SET_DEST (set) != src_reg) return; /* Be conservative: although this optimization is also valid for volatile memory references, that could cause trouble in later passes. */ if (MEM_VOLATILE_P (SET_SRC (set))) return; /* Do not use a SUBREG to truncate from one mode to another if truncation is not a nop. */ if (GET_MODE_BITSIZE (GET_MODE (src_reg)) <= GET_MODE_BITSIZE (GET_MODE (src)) && !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (src)), GET_MODE_BITSIZE (GET_MODE (src_reg)))) return; old_mode = GET_MODE (src_reg); PUT_MODE (src_reg, GET_MODE (src)); XEXP (src, 0) = SET_SRC (set); /* Include this change in the group so that it's easily undone if one of the changes in the group is invalid. */ validate_change (p, &SET_SRC (set), src, 1); /* Now walk forward making additional replacements. We want to be able to undo all the changes if a later substitution fails. */ subreg = gen_lowpart_SUBREG (old_mode, src_reg); while (p = NEXT_INSN (p), p != insn) { if (! INSN_P (p)) continue; /* Make a tentative change. */ validate_replace_rtx_group (src_reg, subreg, p); } validate_replace_rtx_group (src, src_reg, insn); /* Now see if all the changes are valid. */ if (! apply_change_group ()) { /* One or more changes were no good. Back out everything. */ PUT_MODE (src_reg, old_mode); XEXP (src, 0) = src_reg; } else { rtx note = find_reg_note (p, REG_EQUAL, NULL_RTX); if (note) remove_note (p, note); } } /* If we were not able to update the users of src to use dest directly, try instead moving the value to dest directly before the operation. */ static void copy_src_to_dest (rtx insn, rtx src, rtx dest, int old_max_uid) { rtx seq; rtx link; rtx next; rtx set; rtx move_insn; rtx *p_insn_notes; rtx *p_move_notes; int src_regno; int dest_regno; int bb; int insn_uid; int move_uid; /* A REG_LIVE_LENGTH of -1 indicates the register is equivalent to a constant or memory location and is used infrequently; a REG_LIVE_LENGTH of -2 is parameter when there is no frame pointer that is not allocated a register. For now, we just reject them, rather than incrementing the live length. */ if (REG_P (src) && REG_LIVE_LENGTH (REGNO (src)) > 0 && REG_P (dest) && !RTX_UNCHANGING_P (dest) && REG_LIVE_LENGTH (REGNO (dest)) > 0 && (set = single_set (insn)) != NULL_RTX && !reg_mentioned_p (dest, SET_SRC (set)) && GET_MODE (src) == GET_MODE (dest)) { int old_num_regs = reg_rtx_no; /* Generate the src->dest move. */ start_sequence (); emit_move_insn (dest, src); seq = get_insns (); end_sequence (); /* If this sequence uses new registers, we may not use it. */ if (old_num_regs != reg_rtx_no || ! validate_replace_rtx (src, dest, insn)) { /* We have to restore reg_rtx_no to its old value, lest recompute_reg_usage will try to compute the usage of the new regs, yet reg_n_info is not valid for them. */ reg_rtx_no = old_num_regs; return; } emit_insn_before (seq, insn); move_insn = PREV_INSN (insn); p_move_notes = ®_NOTES (move_insn); p_insn_notes = ®_NOTES (insn); /* Move any notes mentioning src to the move instruction. */ for (link = REG_NOTES (insn); link != NULL_RTX; link = next) { next = XEXP (link, 1); if (XEXP (link, 0) == src) { *p_move_notes = link; p_move_notes = &XEXP (link, 1); } else { *p_insn_notes = link; p_insn_notes = &XEXP (link, 1); } } *p_move_notes = NULL_RTX; *p_insn_notes = NULL_RTX; /* Is the insn the head of a basic block? If so extend it. */ insn_uid = INSN_UID (insn); move_uid = INSN_UID (move_insn); if (insn_uid < old_max_uid) { bb = regmove_bb_head[insn_uid]; if (bb >= 0) { BB_HEAD (BASIC_BLOCK (bb)) = move_insn; regmove_bb_head[insn_uid] = -1; } } /* Update the various register tables. */ dest_regno = REGNO (dest); REG_N_SETS (dest_regno) ++; REG_LIVE_LENGTH (dest_regno)++; if (REGNO_FIRST_UID (dest_regno) == insn_uid) REGNO_FIRST_UID (dest_regno) = move_uid; src_regno = REGNO (src); if (! find_reg_note (move_insn, REG_DEAD, src)) REG_LIVE_LENGTH (src_regno)++; if (REGNO_FIRST_UID (src_regno) == insn_uid) REGNO_FIRST_UID (src_regno) = move_uid; if (REGNO_LAST_UID (src_regno) == insn_uid) REGNO_LAST_UID (src_regno) = move_uid; if (REGNO_LAST_NOTE_UID (src_regno) == insn_uid) REGNO_LAST_NOTE_UID (src_regno) = move_uid; } } /* Return whether REG is set in only one location, and is set to a constant, but is set in a different basic block from INSN (an instructions which uses REG). In this case REG is equivalent to a constant, and we don't want to break that equivalence, because that may increase register pressure and make reload harder. If REG is set in the same basic block as INSN, we don't worry about it, because we'll probably need a register anyhow (??? but what if REG is used in a different basic block as well as this one?). FIRST is the first insn in the function. */ static int reg_is_remote_constant_p (rtx reg, rtx insn, rtx first) { rtx p; if (REG_N_SETS (REGNO (reg)) != 1) return 0; /* Look for the set. */ for (p = LOG_LINKS (insn); p; p = XEXP (p, 1)) { rtx s; if (REG_NOTE_KIND (p) != 0) continue; s = single_set (XEXP (p, 0)); if (s != 0 && REG_P (SET_DEST (s)) && REGNO (SET_DEST (s)) == REGNO (reg)) { /* The register is set in the same basic block. */ return 0; } } for (p = first; p && p != insn; p = NEXT_INSN (p)) { rtx s; if (! INSN_P (p)) continue; s = single_set (p); if (s != 0 && REG_P (SET_DEST (s)) && REGNO (SET_DEST (s)) == REGNO (reg)) { /* This is the instruction which sets REG. If there is a REG_EQUAL note, then REG is equivalent to a constant. */ if (find_reg_note (p, REG_EQUAL, NULL_RTX)) return 1; return 0; } } return 0; } /* INSN is adding a CONST_INT to a REG. We search backwards looking for another add immediate instruction with the same source and dest registers, and if we find one, we change INSN to an increment, and return 1. If no changes are made, we return 0. This changes (set (reg100) (plus reg1 offset1)) ... (set (reg100) (plus reg1 offset2)) to (set (reg100) (plus reg1 offset1)) ... (set (reg100) (plus reg100 offset2-offset1)) */ /* ??? What does this comment mean? */ /* cse disrupts preincrement / postdecrement sequences when it finds a hard register as ultimate source, like the frame pointer. */ static int fixup_match_2 (rtx insn, rtx dst, rtx src, rtx offset, FILE *regmove_dump_file) { rtx p, dst_death = 0; int length, num_calls = 0; /* If SRC dies in INSN, we'd have to move the death note. This is considered to be very unlikely, so we just skip the optimization in this case. */ if (find_regno_note (insn, REG_DEAD, REGNO (src))) return 0; /* Scan backward to find the first instruction that sets DST. */ for (length = 0, p = PREV_INSN (insn); p; p = PREV_INSN (p)) { rtx pset; /* ??? We can't scan past the end of a basic block without updating the register lifetime info (REG_DEAD/basic_block_live_at_start). */ if (perhaps_ends_bb_p (p)) break; else if (! INSN_P (p)) continue; if (find_regno_note (p, REG_DEAD, REGNO (dst))) dst_death = p; if (! dst_death) length++; pset = single_set (p); if (pset && SET_DEST (pset) == dst && GET_CODE (SET_SRC (pset)) == PLUS && XEXP (SET_SRC (pset), 0) == src && GET_CODE (XEXP (SET_SRC (pset), 1)) == CONST_INT) { HOST_WIDE_INT newconst = INTVAL (offset) - INTVAL (XEXP (SET_SRC (pset), 1)); rtx add = gen_add3_insn (dst, dst, GEN_INT (newconst)); if (add && validate_change (insn, &PATTERN (insn), add, 0)) { /* Remove the death note for DST from DST_DEATH. */ if (dst_death) { remove_death (REGNO (dst), dst_death); REG_LIVE_LENGTH (REGNO (dst)) += length; REG_N_CALLS_CROSSED (REGNO (dst)) += num_calls; } if (regmove_dump_file) fprintf (regmove_dump_file, "Fixed operand of insn %d.\n", INSN_UID (insn)); #ifdef AUTO_INC_DEC for (p = PREV_INSN (insn); p; p = PREV_INSN (p)) { if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN) break; if (! INSN_P (p)) continue; if (reg_overlap_mentioned_p (dst, PATTERN (p))) { if (try_auto_increment (p, insn, 0, dst, newconst, 0)) return 1; break; } } for (p = NEXT_INSN (insn); p; p = NEXT_INSN (p)) { if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN) break; if (! INSN_P (p)) continue; if (reg_overlap_mentioned_p (dst, PATTERN (p))) { try_auto_increment (p, insn, 0, dst, newconst, 1); break; } } #endif return 1; } } if (reg_set_p (dst, PATTERN (p))) break; /* If we have passed a call instruction, and the pseudo-reg SRC is not already live across a call, then don't perform the optimization. */ /* reg_set_p is overly conservative for CALL_INSNS, thinks that all hard regs are clobbered. Thus, we only use it for src for non-call insns. */ if (GET_CODE (p) == CALL_INSN) { if (! dst_death) num_calls++; if (REG_N_CALLS_CROSSED (REGNO (src)) == 0) break; if (call_used_regs [REGNO (dst)] || find_reg_fusage (p, CLOBBER, dst)) break; } else if (reg_set_p (src, PATTERN (p))) break; } return 0; } /* Main entry for the register move optimization. F is the first instruction. NREGS is one plus the highest pseudo-reg number used in the instruction. REGMOVE_DUMP_FILE is a stream for output of a trace of actions taken (or 0 if none should be output). */ void regmove_optimize (rtx f, int nregs, FILE *regmove_dump_file) { int old_max_uid = get_max_uid (); rtx insn; struct match match; int pass; int i; rtx copy_src, copy_dst; basic_block bb; /* ??? Hack. Regmove doesn't examine the CFG, and gets mightily confused by non-call exceptions ending blocks. */ if (flag_non_call_exceptions) return; /* Find out where a potential flags register is live, and so that we can suppress some optimizations in those zones. */ mark_flags_life_zones (discover_flags_reg ()); regno_src_regno = xmalloc (sizeof *regno_src_regno * nregs); for (i = nregs; --i >= 0; ) regno_src_regno[i] = -1; regmove_bb_head = xmalloc (sizeof (int) * (old_max_uid + 1)); for (i = old_max_uid; i >= 0; i--) regmove_bb_head[i] = -1; FOR_EACH_BB (bb) regmove_bb_head[INSN_UID (BB_HEAD (bb))] = bb->index; /* A forward/backward pass. Replace output operands with input operands. */ for (pass = 0; pass <= 2; pass++) { if (! flag_regmove && pass >= flag_expensive_optimizations) goto done; if (regmove_dump_file) fprintf (regmove_dump_file, "Starting %s pass...\n", pass ? "backward" : "forward"); for (insn = pass ? get_last_insn () : f; insn; insn = pass ? PREV_INSN (insn) : NEXT_INSN (insn)) { rtx set; int op_no, match_no; set = single_set (insn); if (! set) continue; if (flag_expensive_optimizations && ! pass && (GET_CODE (SET_SRC (set)) == SIGN_EXTEND || GET_CODE (SET_SRC (set)) == ZERO_EXTEND) && REG_P (XEXP (SET_SRC (set), 0)) && REG_P (SET_DEST (set))) optimize_reg_copy_3 (insn, SET_DEST (set), SET_SRC (set)); if (flag_expensive_optimizations && ! pass && REG_P (SET_SRC (set)) && REG_P (SET_DEST (set))) { /* If this is a register-register copy where SRC is not dead, see if we can optimize it. If this optimization succeeds, it will become a copy where SRC is dead. */ if ((find_reg_note (insn, REG_DEAD, SET_SRC (set)) || optimize_reg_copy_1 (insn, SET_DEST (set), SET_SRC (set))) && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER) { /* Similarly for a pseudo-pseudo copy when SRC is dead. */ if (REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER) optimize_reg_copy_2 (insn, SET_DEST (set), SET_SRC (set)); if (regno_src_regno[REGNO (SET_DEST (set))] < 0 && SET_SRC (set) != SET_DEST (set)) { int srcregno = REGNO (SET_SRC (set)); if (regno_src_regno[srcregno] >= 0) srcregno = regno_src_regno[srcregno]; regno_src_regno[REGNO (SET_DEST (set))] = srcregno; } } } if (! flag_regmove) continue; if (! find_matches (insn, &match)) continue; /* Now scan through the operands looking for a source operand which is supposed to match the destination operand. Then scan forward for an instruction which uses the dest operand. If it dies there, then replace the dest in both operands with the source operand. */ for (op_no = 0; op_no < recog_data.n_operands; op_no++) { rtx src, dst, src_subreg; enum reg_class src_class, dst_class; match_no = match.with[op_no]; /* Nothing to do if the two operands aren't supposed to match. */ if (match_no < 0) continue; src = recog_data.operand[op_no]; dst = recog_data.operand[match_no]; if (!REG_P (src)) continue; src_subreg = src; if (GET_CODE (dst) == SUBREG && GET_MODE_SIZE (GET_MODE (dst)) >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (dst)))) { src_subreg = gen_rtx_SUBREG (GET_MODE (SUBREG_REG (dst)), src, SUBREG_BYTE (dst)); dst = SUBREG_REG (dst); } if (!REG_P (dst) || REGNO (dst) < FIRST_PSEUDO_REGISTER) continue; if (REGNO (src) < FIRST_PSEUDO_REGISTER) { if (match.commutative[op_no] < op_no) regno_src_regno[REGNO (dst)] = REGNO (src); continue; } if (REG_LIVE_LENGTH (REGNO (src)) < 0) continue; /* op_no/src must be a read-only operand, and match_operand/dst must be a write-only operand. */ if (match.use[op_no] != READ || match.use[match_no] != WRITE) continue; if (match.early_clobber[match_no] && count_occurrences (PATTERN (insn), src, 0) > 1) continue; /* Make sure match_operand is the destination. */ if (recog_data.operand[match_no] != SET_DEST (set)) continue; /* If the operands already match, then there is nothing to do. */ if (operands_match_p (src, dst)) continue; /* But in the commutative case, we might find a better match. */ if (match.commutative[op_no] >= 0) { rtx comm = recog_data.operand[match.commutative[op_no]]; if (operands_match_p (comm, dst) && (replacement_quality (comm) >= replacement_quality (src))) continue; } src_class = reg_preferred_class (REGNO (src)); dst_class = reg_preferred_class (REGNO (dst)); if (! regclass_compatible_p (src_class, dst_class)) continue; if (GET_MODE (src) != GET_MODE (dst)) continue; if (fixup_match_1 (insn, set, src, src_subreg, dst, pass, op_no, match_no, regmove_dump_file)) break; } } } /* A backward pass. Replace input operands with output operands. */ if (regmove_dump_file) fprintf (regmove_dump_file, "Starting backward pass...\n"); for (insn = get_last_insn (); insn; insn = PREV_INSN (insn)) { if (INSN_P (insn)) { int op_no, match_no; int success = 0; if (! find_matches (insn, &match)) continue; /* Now scan through the operands looking for a destination operand which is supposed to match a source operand. Then scan backward for an instruction which sets the source operand. If safe, then replace the source operand with the dest operand in both instructions. */ copy_src = NULL_RTX; copy_dst = NULL_RTX; for (op_no = 0; op_no < recog_data.n_operands; op_no++) { rtx set, p, src, dst; rtx src_note, dst_note; int num_calls = 0; enum reg_class src_class, dst_class; int length; match_no = match.with[op_no]; /* Nothing to do if the two operands aren't supposed to match. */ if (match_no < 0) continue; dst = recog_data.operand[match_no]; src = recog_data.operand[op_no]; if (!REG_P (src)) continue; if (!REG_P (dst) || REGNO (dst) < FIRST_PSEUDO_REGISTER || REG_LIVE_LENGTH (REGNO (dst)) < 0 || RTX_UNCHANGING_P (dst) || GET_MODE (src) != GET_MODE (dst)) continue; /* If the operands already match, then there is nothing to do. */ if (operands_match_p (src, dst)) continue; if (match.commutative[op_no] >= 0) { rtx comm = recog_data.operand[match.commutative[op_no]]; if (operands_match_p (comm, dst)) continue; } set = single_set (insn); if (! set) continue; /* Note that single_set ignores parts of a parallel set for which one of the destinations is REG_UNUSED. We can't handle that here, since we can wind up rewriting things such that a single register is set twice within a single parallel. */ if (reg_set_p (src, insn)) continue; /* match_no/dst must be a write-only operand, and operand_operand/src must be a read-only operand. */ if (match.use[op_no] != READ || match.use[match_no] != WRITE) continue; if (match.early_clobber[match_no] && count_occurrences (PATTERN (insn), src, 0) > 1) continue; /* Make sure match_no is the destination. */ if (recog_data.operand[match_no] != SET_DEST (set)) continue; if (REGNO (src) < FIRST_PSEUDO_REGISTER) { if (GET_CODE (SET_SRC (set)) == PLUS && GET_CODE (XEXP (SET_SRC (set), 1)) == CONST_INT && XEXP (SET_SRC (set), 0) == src && fixup_match_2 (insn, dst, src, XEXP (SET_SRC (set), 1), regmove_dump_file)) break; continue; } src_class = reg_preferred_class (REGNO (src)); dst_class = reg_preferred_class (REGNO (dst)); if (! (src_note = find_reg_note (insn, REG_DEAD, src))) { /* We used to force the copy here like in other cases, but it produces worse code, as it eliminates no copy instructions and the copy emitted will be produced by reload anyway. On patterns with multiple alternatives, there may be better solution available. In particular this change produced slower code for numeric i387 programs. */ continue; } if (! regclass_compatible_p (src_class, dst_class)) { if (!copy_src) { copy_src = src; copy_dst = dst; } continue; } /* Can not modify an earlier insn to set dst if this insn uses an old value in the source. */ if (reg_overlap_mentioned_p (dst, SET_SRC (set))) { if (!copy_src) { copy_src = src; copy_dst = dst; } continue; } /* If src is set once in a different basic block, and is set equal to a constant, then do not use it for this optimization, as this would make it no longer equivalent to a constant. */ if (reg_is_remote_constant_p (src, insn, f)) { if (!copy_src) { copy_src = src; copy_dst = dst; } continue; } if (regmove_dump_file) fprintf (regmove_dump_file, "Could fix operand %d of insn %d matching operand %d.\n", op_no, INSN_UID (insn), match_no); /* Scan backward to find the first instruction that uses the input operand. If the operand is set here, then replace it in both instructions with match_no. */ for (length = 0, p = PREV_INSN (insn); p; p = PREV_INSN (p)) { rtx pset; /* ??? We can't scan past the end of a basic block without updating the register lifetime info (REG_DEAD/basic_block_live_at_start). */ if (perhaps_ends_bb_p (p)) break; else if (! INSN_P (p)) continue; length++; /* ??? See if all of SRC is set in P. This test is much more conservative than it needs to be. */ pset = single_set (p); if (pset && SET_DEST (pset) == src) { /* We use validate_replace_rtx, in case there are multiple identical source operands. All of them have to be changed at the same time. */ if (validate_replace_rtx (src, dst, insn)) { if (validate_change (p, &SET_DEST (pset), dst, 0)) success = 1; else { /* Change all source operands back. This modifies the dst as a side-effect. */ validate_replace_rtx (dst, src, insn); /* Now make sure the dst is right. */ validate_change (insn, recog_data.operand_loc[match_no], dst, 0); } } break; } if (reg_overlap_mentioned_p (src, PATTERN (p)) || reg_overlap_mentioned_p (dst, PATTERN (p))) break; /* If we have passed a call instruction, and the pseudo-reg DST is not already live across a call, then don't perform the optimization. */ if (GET_CODE (p) == CALL_INSN) { num_calls++; if (REG_N_CALLS_CROSSED (REGNO (dst)) == 0) break; } } if (success) { int dstno, srcno; /* Remove the death note for SRC from INSN. */ remove_note (insn, src_note); /* Move the death note for SRC to P if it is used there. */ if (reg_overlap_mentioned_p (src, PATTERN (p))) { XEXP (src_note, 1) = REG_NOTES (p); REG_NOTES (p) = src_note; } /* If there is a REG_DEAD note for DST on P, then remove it, because DST is now set there. */ if ((dst_note = find_reg_note (p, REG_DEAD, dst))) remove_note (p, dst_note); dstno = REGNO (dst); srcno = REGNO (src); REG_N_SETS (dstno)++; REG_N_SETS (srcno)--; REG_N_CALLS_CROSSED (dstno) += num_calls; REG_N_CALLS_CROSSED (srcno) -= num_calls; REG_LIVE_LENGTH (dstno) += length; if (REG_LIVE_LENGTH (srcno) >= 0) { REG_LIVE_LENGTH (srcno) -= length; /* REG_LIVE_LENGTH is only an approximation after combine if sched is not run, so make sure that we still have a reasonable value. */ if (REG_LIVE_LENGTH (srcno) < 2) REG_LIVE_LENGTH (srcno) = 2; } if (regmove_dump_file) fprintf (regmove_dump_file, "Fixed operand %d of insn %d matching operand %d.\n", op_no, INSN_UID (insn), match_no); break; } } /* If we weren't able to replace any of the alternatives, try an alternative approach of copying the source to the destination. */ if (!success && copy_src != NULL_RTX) copy_src_to_dest (insn, copy_src, copy_dst, old_max_uid); } } /* In fixup_match_1, some insns may have been inserted after basic block ends. Fix that here. */ FOR_EACH_BB (bb) { rtx end = BB_END (bb); rtx new = end; rtx next = NEXT_INSN (new); while (next != 0 && INSN_UID (next) >= old_max_uid && (bb->next_bb == EXIT_BLOCK_PTR || BB_HEAD (bb->next_bb) != next)) new = next, next = NEXT_INSN (new); BB_END (bb) = new; } done: /* Clean up. */ free (regno_src_regno); free (regmove_bb_head); } /* Returns nonzero if INSN's pattern has matching constraints for any operand. Returns 0 if INSN can't be recognized, or if the alternative can't be determined. Initialize the info in MATCHP based on the constraints. */ static int find_matches (rtx insn, struct match *matchp) { int likely_spilled[MAX_RECOG_OPERANDS]; int op_no; int any_matches = 0; extract_insn (insn); if (! constrain_operands (0)) return 0; /* Must initialize this before main loop, because the code for the commutative case may set matches for operands other than the current one. */ for (op_no = recog_data.n_operands; --op_no >= 0; ) matchp->with[op_no] = matchp->commutative[op_no] = -1; for (op_no = 0; op_no < recog_data.n_operands; op_no++) { const char *p; char c; int i = 0; p = recog_data.constraints[op_no]; likely_spilled[op_no] = 0; matchp->use[op_no] = READ; matchp->early_clobber[op_no] = 0; if (*p == '=') matchp->use[op_no] = WRITE; else if (*p == '+') matchp->use[op_no] = READWRITE; for (;*p && i < which_alternative; p++) if (*p == ',') i++; while ((c = *p) != '\0' && c != ',') { switch (c) { case '=': break; case '+': break; case '&': matchp->early_clobber[op_no] = 1; break; case '%': matchp->commutative[op_no] = op_no + 1; matchp->commutative[op_no + 1] = op_no; break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { char *end; unsigned long match_ul = strtoul (p, &end, 10); int match = match_ul; p = end; if (match < op_no && likely_spilled[match]) continue; matchp->with[op_no] = match; any_matches = 1; if (matchp->commutative[op_no] >= 0) matchp->with[matchp->commutative[op_no]] = match; } continue; case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'h': case 'j': case 'k': case 'l': case 'p': case 'q': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': case 'A': case 'B': case 'C': case 'D': case 'W': case 'Y': case 'Z': if (CLASS_LIKELY_SPILLED_P (REG_CLASS_FROM_CONSTRAINT ((unsigned char) c, p) )) likely_spilled[op_no] = 1; break; } p += CONSTRAINT_LEN (c, p); } } return any_matches; } /* Try to replace all occurrences of DST_REG with SRC in LOC, that is assumed to be in INSN. */ static void replace_in_call_usage (rtx *loc, unsigned int dst_reg, rtx src, rtx insn) { rtx x = *loc; enum rtx_code code; const char *fmt; int i, j; if (! x) return; code = GET_CODE (x); if (code == REG) { if (REGNO (x) != dst_reg) return; validate_change (insn, loc, src, 1); return; } /* Process each of our operands recursively. */ fmt = GET_RTX_FORMAT (code); for (i = 0; i < GET_RTX_LENGTH (code); i++, fmt++) if (*fmt == 'e') replace_in_call_usage (&XEXP (x, i), dst_reg, src, insn); else if (*fmt == 'E') for (j = 0; j < XVECLEN (x, i); j++) replace_in_call_usage (& XVECEXP (x, i, j), dst_reg, src, insn); } /* Try to replace output operand DST in SET, with input operand SRC. SET is the only set in INSN. INSN has just been recognized and constrained. SRC is operand number OPERAND_NUMBER in INSN. DST is operand number MATCH_NUMBER in INSN. If BACKWARD is nonzero, we have been called in a backward pass. Return nonzero for success. */ static int fixup_match_1 (rtx insn, rtx set, rtx src, rtx src_subreg, rtx dst, int backward, int operand_number, int match_number, FILE *regmove_dump_file) { rtx p; rtx post_inc = 0, post_inc_set = 0, search_end = 0; int success = 0; int num_calls = 0, s_num_calls = 0; enum rtx_code code = NOTE; HOST_WIDE_INT insn_const = 0, newconst = 0; rtx overlap = 0; /* need to move insn ? */ rtx src_note = find_reg_note (insn, REG_DEAD, src), dst_note = NULL_RTX; int length, s_length; /* If SRC is marked as unchanging, we may not change it. ??? Maybe we could get better code by removing the unchanging bit instead, and changing it back if we don't succeed? */ if (RTX_UNCHANGING_P (src)) return 0; if (! src_note) { /* Look for (set (regX) (op regA constX)) (set (regY) (op regA constY)) and change that to (set (regA) (op regA constX)). (set (regY) (op regA constY-constX)). This works for add and shift operations, if regA is dead after or set by the second insn. */ code = GET_CODE (SET_SRC (set)); if ((code == PLUS || code == LSHIFTRT || code == ASHIFT || code == ASHIFTRT) && XEXP (SET_SRC (set), 0) == src && GET_CODE (XEXP (SET_SRC (set), 1)) == CONST_INT) insn_const = INTVAL (XEXP (SET_SRC (set), 1)); else if (! stable_and_no_regs_but_for_p (SET_SRC (set), src, dst)) return 0; else /* We might find a src_note while scanning. */ code = NOTE; } if (regmove_dump_file) fprintf (regmove_dump_file, "Could fix operand %d of insn %d matching operand %d.\n", operand_number, INSN_UID (insn), match_number); /* If SRC is equivalent to a constant set in a different basic block, then do not use it for this optimization. We want the equivalence so that if we have to reload this register, we can reload the constant, rather than extending the lifespan of the register. */ if (reg_is_remote_constant_p (src, insn, get_insns ())) return 0; /* Scan forward to find the next instruction that uses the output operand. If the operand dies here, then replace it in both instructions with operand_number. */ for (length = s_length = 0, p = NEXT_INSN (insn); p; p = NEXT_INSN (p)) { if (GET_CODE (p) == CALL_INSN) replace_in_call_usage (& CALL_INSN_FUNCTION_USAGE (p), REGNO (dst), src, p); /* ??? We can't scan past the end of a basic block without updating the register lifetime info (REG_DEAD/basic_block_live_at_start). */ if (perhaps_ends_bb_p (p)) break; else if (! INSN_P (p)) continue; length++; if (src_note) s_length++; if (reg_set_p (src, p) || reg_set_p (dst, p) || (GET_CODE (PATTERN (p)) == USE && reg_overlap_mentioned_p (src, XEXP (PATTERN (p), 0)))) break; /* See if all of DST dies in P. This test is slightly more conservative than it needs to be. */ if ((dst_note = find_regno_note (p, REG_DEAD, REGNO (dst))) && (GET_MODE (XEXP (dst_note, 0)) == GET_MODE (dst))) { /* If we would be moving INSN, check that we won't move it into the shadow of a live a live flags register. */ /* ??? We only try to move it in front of P, although we could move it anywhere between OVERLAP and P. */ if (overlap && GET_MODE (PREV_INSN (p)) != VOIDmode) break; if (! src_note) { rtx q; rtx set2 = NULL_RTX; /* If an optimization is done, the value of SRC while P is executed will be changed. Check that this is OK. */ if (reg_overlap_mentioned_p (src, PATTERN (p))) break; for (q = p; q; q = NEXT_INSN (q)) { /* ??? We can't scan past the end of a basic block without updating the register lifetime info (REG_DEAD/basic_block_live_at_start). */ if (perhaps_ends_bb_p (q)) { q = 0; break; } else if (! INSN_P (q)) continue; else if (reg_overlap_mentioned_p (src, PATTERN (q)) || reg_set_p (src, q)) break; } if (q) set2 = single_set (q); if (! q || ! set2 || GET_CODE (SET_SRC (set2)) != code || XEXP (SET_SRC (set2), 0) != src || GET_CODE (XEXP (SET_SRC (set2), 1)) != CONST_INT || (SET_DEST (set2) != src && ! find_reg_note (q, REG_DEAD, src))) { /* If this is a PLUS, we can still save a register by doing src += insn_const; P; src -= insn_const; . This also gives opportunities for subsequent optimizations in the backward pass, so do it there. */ if (code == PLUS && backward /* Don't do this if we can likely tie DST to SET_DEST of P later; we can't do this tying here if we got a hard register. */ && ! (dst_note && ! REG_N_CALLS_CROSSED (REGNO (dst)) && single_set (p) && REG_P (SET_DEST (single_set (p))) && (REGNO (SET_DEST (single_set (p))) < FIRST_PSEUDO_REGISTER)) /* We may only emit an insn directly after P if we are not in the shadow of a live flags register. */ && GET_MODE (p) == VOIDmode) { search_end = q; q = insn; set2 = set; newconst = -insn_const; code = MINUS; } else break; } else { newconst = INTVAL (XEXP (SET_SRC (set2), 1)) - insn_const; /* Reject out of range shifts. */ if (code != PLUS && (newconst < 0 || ((unsigned HOST_WIDE_INT) newconst >= (GET_MODE_BITSIZE (GET_MODE (SET_SRC (set2))))))) break; if (code == PLUS) { post_inc = q; if (SET_DEST (set2) != src) post_inc_set = set2; } } /* We use 1 as last argument to validate_change so that all changes are accepted or rejected together by apply_change_group when it is called by validate_replace_rtx . */ validate_change (q, &XEXP (SET_SRC (set2), 1), GEN_INT (newconst), 1); } validate_change (insn, recog_data.operand_loc[match_number], src, 1); if (validate_replace_rtx (dst, src_subreg, p)) success = 1; break; } if (reg_overlap_mentioned_p (dst, PATTERN (p))) break; if (! src_note && reg_overlap_mentioned_p (src, PATTERN (p))) { /* INSN was already checked to be movable wrt. the registers that it sets / uses when we found no REG_DEAD note for src on it, but it still might clobber the flags register. We'll have to check that we won't insert it into the shadow of a live flags register when we finally know where we are to move it. */ overlap = p; src_note = find_reg_note (p, REG_DEAD, src); } /* If we have passed a call instruction, and the pseudo-reg SRC is not already live across a call, then don't perform the optimization. */ if (GET_CODE (p) == CALL_INSN) { if (REG_N_CALLS_CROSSED (REGNO (src)) == 0) break; num_calls++; if (src_note) s_num_calls++; } } if (! success) return 0; /* Remove the death note for DST from P. */ remove_note (p, dst_note); if (code == MINUS) { post_inc = emit_insn_after (copy_rtx (PATTERN (insn)), p); if ((HAVE_PRE_INCREMENT || HAVE_PRE_DECREMENT) && search_end && try_auto_increment (search_end, post_inc, 0, src, newconst, 1)) post_inc = 0; validate_change (insn, &XEXP (SET_SRC (set), 1), GEN_INT (insn_const), 0); REG_N_SETS (REGNO (src))++; REG_LIVE_LENGTH (REGNO (src))++; } if (overlap) { /* The lifetime of src and dest overlap, but we can change this by moving insn. */ rtx pat = PATTERN (insn); if (src_note) remove_note (overlap, src_note); if ((HAVE_POST_INCREMENT || HAVE_POST_DECREMENT) && code == PLUS && try_auto_increment (overlap, insn, 0, src, insn_const, 0)) insn = overlap; else { rtx notes = REG_NOTES (insn); emit_insn_after_with_line_notes (pat, PREV_INSN (p), insn); delete_insn (insn); /* emit_insn_after_with_line_notes has no return value, so search for the new insn. */ insn = p; while (! INSN_P (insn) || PATTERN (insn) != pat) insn = PREV_INSN (insn); REG_NOTES (insn) = notes; } } /* Sometimes we'd generate src = const; src += n; if so, replace the instruction that set src in the first place. */ if (! overlap && (code == PLUS || code == MINUS)) { rtx note = find_reg_note (insn, REG_EQUAL, NULL_RTX); rtx q, set2 = NULL_RTX; int num_calls2 = 0, s_length2 = 0; if (note && CONSTANT_P (XEXP (note, 0))) { for (q = PREV_INSN (insn); q; q = PREV_INSN (q)) { /* ??? We can't scan past the end of a basic block without updating the register lifetime info (REG_DEAD/basic_block_live_at_start). */ if (perhaps_ends_bb_p (q)) { q = 0; break; } else if (! INSN_P (q)) continue; s_length2++; if (reg_set_p (src, q)) { set2 = single_set (q); break; } if (reg_overlap_mentioned_p (src, PATTERN (q))) { q = 0; break; } if (GET_CODE (p) == CALL_INSN) num_calls2++; } if (q && set2 && SET_DEST (set2) == src && CONSTANT_P (SET_SRC (set2)) && validate_change (insn, &SET_SRC (set), XEXP (note, 0), 0)) { delete_insn (q); REG_N_SETS (REGNO (src))--; REG_N_CALLS_CROSSED (REGNO (src)) -= num_calls2; REG_LIVE_LENGTH (REGNO (src)) -= s_length2; insn_const = 0; } } } if ((HAVE_PRE_INCREMENT || HAVE_PRE_DECREMENT) && (code == PLUS || code == MINUS) && insn_const && try_auto_increment (p, insn, 0, src, insn_const, 1)) insn = p; else if ((HAVE_POST_INCREMENT || HAVE_POST_DECREMENT) && post_inc && try_auto_increment (p, post_inc, post_inc_set, src, newconst, 0)) post_inc = 0; /* If post_inc still prevails, try to find an insn where it can be used as a pre-in/decrement. If code is MINUS, this was already tried. */ if (post_inc && code == PLUS /* Check that newconst is likely to be usable in a pre-in/decrement before starting the search. */ && ((HAVE_PRE_INCREMENT && newconst > 0 && newconst <= MOVE_MAX) || (HAVE_PRE_DECREMENT && newconst < 0 && newconst >= -MOVE_MAX)) && exact_log2 (newconst)) { rtx q, inc_dest; inc_dest = post_inc_set ? SET_DEST (post_inc_set) : src; for (q = post_inc; (q = NEXT_INSN (q)); ) { /* ??? We can't scan past the end of a basic block without updating the register lifetime info (REG_DEAD/basic_block_live_at_start). */ if (perhaps_ends_bb_p (q)) break; else if (! INSN_P (q)) continue; else if (src != inc_dest && (reg_overlap_mentioned_p (src, PATTERN (q)) || reg_set_p (src, q))) break; else if (reg_set_p (inc_dest, q)) break; else if (reg_overlap_mentioned_p (inc_dest, PATTERN (q))) { try_auto_increment (q, post_inc, post_inc_set, inc_dest, newconst, 1); break; } } } /* Move the death note for DST to INSN if it is used there. */ if (reg_overlap_mentioned_p (dst, PATTERN (insn))) { XEXP (dst_note, 1) = REG_NOTES (insn); REG_NOTES (insn) = dst_note; } if (src_note) { /* Move the death note for SRC from INSN to P. */ if (! overlap) remove_note (insn, src_note); XEXP (src_note, 1) = REG_NOTES (p); REG_NOTES (p) = src_note; REG_N_CALLS_CROSSED (REGNO (src)) += s_num_calls; } REG_N_SETS (REGNO (src))++; REG_N_SETS (REGNO (dst))--; REG_N_CALLS_CROSSED (REGNO (dst)) -= num_calls; REG_LIVE_LENGTH (REGNO (src)) += s_length; if (REG_LIVE_LENGTH (REGNO (dst)) >= 0) { REG_LIVE_LENGTH (REGNO (dst)) -= length; /* REG_LIVE_LENGTH is only an approximation after combine if sched is not run, so make sure that we still have a reasonable value. */ if (REG_LIVE_LENGTH (REGNO (dst)) < 2) REG_LIVE_LENGTH (REGNO (dst)) = 2; } if (regmove_dump_file) fprintf (regmove_dump_file, "Fixed operand %d of insn %d matching operand %d.\n", operand_number, INSN_UID (insn), match_number); return 1; } /* Return nonzero if X is stable and mentions no registers but for mentioning SRC or mentioning / changing DST . If in doubt, presume it is unstable. The rationale is that we want to check if we can move an insn easily while just paying attention to SRC and DST. A register is considered stable if it has the RTX_UNCHANGING_P bit set, but that would still leave the burden to update REG_DEAD / REG_UNUSED notes, so we don't want any registers but SRC and DST. */ static int stable_and_no_regs_but_for_p (rtx x, rtx src, rtx dst) { RTX_CODE code = GET_CODE (x); switch (GET_RTX_CLASS (code)) { case RTX_UNARY: case RTX_BIN_ARITH: case RTX_COMM_ARITH: case RTX_COMPARE: case RTX_COMM_COMPARE: case RTX_TERNARY: case RTX_BITFIELD_OPS: { int i; const char *fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) if (fmt[i] == 'e' && ! stable_and_no_regs_but_for_p (XEXP (x, i), src, dst)) return 0; return 1; } case RTX_OBJ: if (code == REG) return x == src || x == dst; /* If this is a MEM, look inside - there might be a register hidden in the address of an unchanging MEM. */ if (code == MEM && ! stable_and_no_regs_but_for_p (XEXP (x, 0), src, dst)) return 0; /* Fall through. */ default: return ! rtx_unstable_p (x); } } /* Track stack adjustments and stack memory references. Attempt to reduce the number of stack adjustments by back-propagating across the memory references. This is intended primarily for use with targets that do not define ACCUMULATE_OUTGOING_ARGS. It is of significantly more value to targets that define PREFERRED_STACK_BOUNDARY more aligned than STACK_BOUNDARY (e.g. x86), or if not all registers can be pushed (e.g. x86 fp regs) which would ordinarily have to be implemented as a sub/mov pair due to restrictions in calls.c. Propagation stops when any of the insns that need adjusting are (a) no longer valid because we've exceeded their range, (b) a non-trivial push instruction, or (c) a call instruction. Restriction B is based on the assumption that push instructions are smaller or faster. If a port really wants to remove all pushes, it should have defined ACCUMULATE_OUTGOING_ARGS. The one exception that is made is for an add immediately followed by a push. */ /* This structure records stack memory references between stack adjusting instructions. */ struct csa_memlist { HOST_WIDE_INT sp_offset; rtx insn, *mem; struct csa_memlist *next; }; static int stack_memref_p (rtx); static rtx single_set_for_csa (rtx); static void free_csa_memlist (struct csa_memlist *); static struct csa_memlist *record_one_stack_memref (rtx, rtx *, struct csa_memlist *); static int try_apply_stack_adjustment (rtx, struct csa_memlist *, HOST_WIDE_INT, HOST_WIDE_INT); static void combine_stack_adjustments_for_block (basic_block); static int record_stack_memrefs (rtx *, void *); /* Main entry point for stack adjustment combination. */ void combine_stack_adjustments (void) { basic_block bb; FOR_EACH_BB (bb) combine_stack_adjustments_for_block (bb); } /* Recognize a MEM of the form (sp) or (plus sp const). */ static int stack_memref_p (rtx x) { if (!MEM_P (x)) return 0; x = XEXP (x, 0); if (x == stack_pointer_rtx) return 1; if (GET_CODE (x) == PLUS && XEXP (x, 0) == stack_pointer_rtx && GET_CODE (XEXP (x, 1)) == CONST_INT) return 1; return 0; } /* Recognize either normal single_set or the hack in i386.md for tying fp and sp adjustments. */ static rtx single_set_for_csa (rtx insn) { int i; rtx tmp = single_set (insn); if (tmp) return tmp; if (GET_CODE (insn) != INSN || GET_CODE (PATTERN (insn)) != PARALLEL) return NULL_RTX; tmp = PATTERN (insn); if (GET_CODE (XVECEXP (tmp, 0, 0)) != SET) return NULL_RTX; for (i = 1; i < XVECLEN (tmp, 0); ++i) { rtx this = XVECEXP (tmp, 0, i); /* The special case is allowing a no-op set. */ if (GET_CODE (this) == SET && SET_SRC (this) == SET_DEST (this)) ; else if (GET_CODE (this) != CLOBBER && GET_CODE (this) != USE) return NULL_RTX; } return XVECEXP (tmp, 0, 0); } /* Free the list of csa_memlist nodes. */ static void free_csa_memlist (struct csa_memlist *memlist) { struct csa_memlist *next; for (; memlist ; memlist = next) { next = memlist->next; free (memlist); } } /* Create a new csa_memlist node from the given memory reference. It is already known that the memory is stack_memref_p. */ static struct csa_memlist * record_one_stack_memref (rtx insn, rtx *mem, struct csa_memlist *next_memlist) { struct csa_memlist *ml; ml = xmalloc (sizeof (*ml)); if (XEXP (*mem, 0) == stack_pointer_rtx) ml->sp_offset = 0; else ml->sp_offset = INTVAL (XEXP (XEXP (*mem, 0), 1)); ml->insn = insn; ml->mem = mem; ml->next = next_memlist; return ml; } /* Attempt to apply ADJUST to the stack adjusting insn INSN, as well as each of the memories in MEMLIST. Return true on success. */ static int try_apply_stack_adjustment (rtx insn, struct csa_memlist *memlist, HOST_WIDE_INT new_adjust, HOST_WIDE_INT delta) { struct csa_memlist *ml; rtx set; set = single_set_for_csa (insn); validate_change (insn, &XEXP (SET_SRC (set), 1), GEN_INT (new_adjust), 1); for (ml = memlist; ml ; ml = ml->next) validate_change (ml->insn, ml->mem, replace_equiv_address_nv (*ml->mem, plus_constant (stack_pointer_rtx, ml->sp_offset - delta)), 1); if (apply_change_group ()) { /* Succeeded. Update our knowledge of the memory references. */ for (ml = memlist; ml ; ml = ml->next) ml->sp_offset -= delta; return 1; } else return 0; } /* Called via for_each_rtx and used to record all stack memory references in the insn and discard all other stack pointer references. */ struct record_stack_memrefs_data { rtx insn; struct csa_memlist *memlist; }; static int record_stack_memrefs (rtx *xp, void *data) { rtx x = *xp; struct record_stack_memrefs_data *d = (struct record_stack_memrefs_data *) data; if (!x) return 0; switch (GET_CODE (x)) { case MEM: if (!reg_mentioned_p (stack_pointer_rtx, x)) return -1; /* We are not able to handle correctly all possible memrefs containing stack pointer, so this check is necessary. */ if (stack_memref_p (x)) { d->memlist = record_one_stack_memref (d->insn, xp, d->memlist); return -1; } return 1; case REG: /* ??? We want be able to handle non-memory stack pointer references later. For now just discard all insns referring to stack pointer outside mem expressions. We would probably want to teach validate_replace to simplify expressions first. We can't just compare with STACK_POINTER_RTX because the reference to the stack pointer might be in some other mode. In particular, an explicit clobber in an asm statement will result in a QImode clobber. */ if (REGNO (x) == STACK_POINTER_REGNUM) return 1; break; default: break; } return 0; } /* Subroutine of combine_stack_adjustments, called for each basic block. */ static void combine_stack_adjustments_for_block (basic_block bb) { HOST_WIDE_INT last_sp_adjust = 0; rtx last_sp_set = NULL_RTX; struct csa_memlist *memlist = NULL; rtx insn, next, set; struct record_stack_memrefs_data data; bool end_of_block = false; for (insn = BB_HEAD (bb); !end_of_block ; insn = next) { end_of_block = insn == BB_END (bb); next = NEXT_INSN (insn); if (! INSN_P (insn)) continue; set = single_set_for_csa (insn); if (set) { rtx dest = SET_DEST (set); rtx src = SET_SRC (set); /* Find constant additions to the stack pointer. */ if (dest == stack_pointer_rtx && GET_CODE (src) == PLUS && XEXP (src, 0) == stack_pointer_rtx && GET_CODE (XEXP (src, 1)) == CONST_INT) { HOST_WIDE_INT this_adjust = INTVAL (XEXP (src, 1)); /* If we've not seen an adjustment previously, record it now and continue. */ if (! last_sp_set) { last_sp_set = insn; last_sp_adjust = this_adjust; continue; } /* If not all recorded memrefs can be adjusted, or the adjustment is now too large for a constant addition, we cannot merge the two stack adjustments. Also we need to be careful to not move stack pointer such that we create stack accesses outside the allocated area. We can combine an allocation into the first insn, or a deallocation into the second insn. We can not combine an allocation followed by a deallocation. The only somewhat frequent occurrence of the later is when a function allocates a stack frame but does not use it. For this case, we would need to analyze rtl stream to be sure that allocated area is really unused. This means not only checking the memory references, but also all registers or global memory references possibly containing a stack frame address. Perhaps the best way to address this problem is to teach gcc not to allocate stack for objects never used. */ /* Combine an allocation into the first instruction. */ if (STACK_GROWS_DOWNWARD ? this_adjust <= 0 : this_adjust >= 0) { if (try_apply_stack_adjustment (last_sp_set, memlist, last_sp_adjust + this_adjust, this_adjust)) { /* It worked! */ delete_insn (insn); last_sp_adjust += this_adjust; continue; } } /* Otherwise we have a deallocation. Do not combine with a previous allocation. Combine into the second insn. */ else if (STACK_GROWS_DOWNWARD ? last_sp_adjust >= 0 : last_sp_adjust <= 0) { if (try_apply_stack_adjustment (insn, memlist, last_sp_adjust + this_adjust, -last_sp_adjust)) { /* It worked! */ delete_insn (last_sp_set); last_sp_set = insn; last_sp_adjust += this_adjust; free_csa_memlist (memlist); memlist = NULL; continue; } } /* Combination failed. Restart processing from here. If deallocation+allocation conspired to cancel, we can delete the old deallocation insn. */ if (last_sp_set && last_sp_adjust == 0) delete_insn (insn); free_csa_memlist (memlist); memlist = NULL; last_sp_set = insn; last_sp_adjust = this_adjust; continue; } /* Find a predecrement of exactly the previous adjustment and turn it into a direct store. Obviously we can't do this if there were any intervening uses of the stack pointer. */ if (memlist == NULL && MEM_P (dest) && ((GET_CODE (XEXP (dest, 0)) == PRE_DEC && (last_sp_adjust == (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (dest)))) || (GET_CODE (XEXP (dest, 0)) == PRE_MODIFY && GET_CODE (XEXP (XEXP (dest, 0), 1)) == PLUS && XEXP (XEXP (XEXP (dest, 0), 1), 0) == stack_pointer_rtx && (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1)) == CONST_INT) && (INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1)) == -last_sp_adjust))) && XEXP (XEXP (dest, 0), 0) == stack_pointer_rtx && ! reg_mentioned_p (stack_pointer_rtx, src) && memory_address_p (GET_MODE (dest), stack_pointer_rtx) && validate_change (insn, &SET_DEST (set), replace_equiv_address (dest, stack_pointer_rtx), 0)) { delete_insn (last_sp_set); free_csa_memlist (memlist); memlist = NULL; last_sp_set = NULL_RTX; last_sp_adjust = 0; continue; } } data.insn = insn; data.memlist = memlist; if (GET_CODE (insn) != CALL_INSN && last_sp_set && !for_each_rtx (&PATTERN (insn), record_stack_memrefs, &data)) { memlist = data.memlist; continue; } memlist = data.memlist; /* Otherwise, we were not able to process the instruction. Do not continue collecting data across such a one. */ if (last_sp_set && (GET_CODE (insn) == CALL_INSN || reg_mentioned_p (stack_pointer_rtx, PATTERN (insn)))) { if (last_sp_set && last_sp_adjust == 0) delete_insn (last_sp_set); free_csa_memlist (memlist); memlist = NULL; last_sp_set = NULL_RTX; last_sp_adjust = 0; } } if (last_sp_set && last_sp_adjust == 0) delete_insn (last_sp_set); } /* Register renaming for the GNU compiler. Copyright (C) 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define REG_OK_STRICT #ifdef ONE_COMPILATION_UNIT #undef REG_OK_FOR_INDEX_P #undef REG_OK_FOR_BASE_P #undef GO_IF_LEGITIMATE_ADDRESS #define REG_OK_FOR_INDEX_P(X) REG_OK_FOR_INDEX_STRICT_P (X) #define REG_OK_FOR_BASE_P(X) REG_OK_FOR_BASE_STRICT_P (X) #define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \ do { \ if (legitimate_address_p ((MODE), (X), 1)) \ goto ADDR; \ } while (0) #endif #ifndef REG_MODE_OK_FOR_BASE_P #define REG_MODE_OK_FOR_BASE_P(REGNO, MODE) REG_OK_FOR_BASE_P (REGNO) #endif static const char *const reg_class_names3[] = REG_CLASS_NAMES; struct du_chain { struct du_chain *next_chain; struct du_chain *next_use; rtx insn; rtx *loc; ENUM_BITFIELD(reg_class) class : 16; unsigned int need_caller_save_reg:1; unsigned int earlyclobber:1; }; enum scan_actions { terminate_all_read, terminate_overlapping_read, terminate_write, terminate_dead, mark_read, mark_write }; static const char * const scan_actions_name[] = { "terminate_all_read", "terminate_overlapping_read", "terminate_write", "terminate_dead", "mark_read", "mark_write" }; static struct obstack rename_obstack; static void do_replace (struct du_chain *, int); static void scan_rtx_reg (rtx, rtx *, enum reg_class, enum scan_actions, enum op_type, int); static void scan_rtx_address (rtx, rtx *, enum reg_class, enum scan_actions, enum machine_mode); static void scan_rtx (rtx, rtx *, enum reg_class, enum scan_actions, enum op_type, int); static struct du_chain *build_def_use (basic_block); static void dump_def_use_chain (struct du_chain *); static void note_sets (rtx, rtx, void *); static void clear_dead_regs (HARD_REG_SET *, enum machine_mode, rtx); static void merge_overlapping_regs (basic_block, HARD_REG_SET *, struct du_chain *); /* Called through note_stores from update_life. Find sets of registers, and record them in *DATA (which is actually a HARD_REG_SET *). */ static void note_sets (rtx x, rtx set ATTRIBUTE_UNUSED, void *data) { HARD_REG_SET *pset = (HARD_REG_SET *) data; unsigned int regno; int nregs; if (!REG_P (x)) return; regno = REGNO (x); nregs = hard_regno_nregs[regno][GET_MODE (x)]; /* There must not be pseudos at this point. */ if (regno + nregs > FIRST_PSEUDO_REGISTER) abort (); while (nregs-- > 0) SET_HARD_REG_BIT (*pset, regno + nregs); } /* Clear all registers from *PSET for which a note of kind KIND can be found in the list NOTES. */ static void clear_dead_regs (HARD_REG_SET *pset, enum machine_mode kind, rtx notes) { rtx note; for (note = notes; note; note = XEXP (note, 1)) if (REG_NOTE_KIND (note) == kind && REG_P (XEXP (note, 0))) { rtx reg = XEXP (note, 0); unsigned int regno = REGNO (reg); int nregs = hard_regno_nregs[regno][GET_MODE (reg)]; /* There must not be pseudos at this point. */ if (regno + nregs > FIRST_PSEUDO_REGISTER) abort (); while (nregs-- > 0) CLEAR_HARD_REG_BIT (*pset, regno + nregs); } } /* For a def-use chain CHAIN in basic block B, find which registers overlap its lifetime and set the corresponding bits in *PSET. */ static void merge_overlapping_regs (basic_block b, HARD_REG_SET *pset, struct du_chain *chain) { struct du_chain *t = chain; rtx insn; HARD_REG_SET live; REG_SET_TO_HARD_REG_SET (live, b->global_live_at_start); insn = BB_HEAD (b); while (t) { /* Search forward until the next reference to the register to be renamed. */ while (insn != t->insn) { if (INSN_P (insn)) { clear_dead_regs (&live, REG_DEAD, REG_NOTES (insn)); note_stores (PATTERN (insn), note_sets, (void *) &live); /* Only record currently live regs if we are inside the reg's live range. */ if (t != chain) IOR_HARD_REG_SET (*pset, live); clear_dead_regs (&live, REG_UNUSED, REG_NOTES (insn)); } insn = NEXT_INSN (insn); } IOR_HARD_REG_SET (*pset, live); /* For the last reference, also merge in all registers set in the same insn. @@@ We only have take earlyclobbered sets into account. */ if (! t->next_use) note_stores (PATTERN (insn), note_sets, (void *) pset); t = t->next_use; } } /* Perform register renaming on the current function. */ void regrename_optimize (void) { int tick[FIRST_PSEUDO_REGISTER]; int this_tick = 0; basic_block bb; char *first_obj; memset (tick, 0, sizeof tick); gcc_obstack_init (&rename_obstack); first_obj = obstack_alloc (&rename_obstack, 0); FOR_EACH_BB (bb) { struct du_chain *all_chains = 0; HARD_REG_SET unavailable; HARD_REG_SET regs_seen; CLEAR_HARD_REG_SET (unavailable); if (dump_file) fprintf (dump_file, "\nBasic block %d:\n", bb->index); all_chains = build_def_use (bb); if (dump_file) dump_def_use_chain (all_chains); CLEAR_HARD_REG_SET (unavailable); /* Don't clobber traceback for noreturn functions. */ if (frame_pointer_needed) { int i; for (i = hard_regno_nregs[FRAME_POINTER_REGNUM][Pmode]; i--;) SET_HARD_REG_BIT (unavailable, FRAME_POINTER_REGNUM + i); #if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM for (i = hard_regno_nregs[HARD_FRAME_POINTER_REGNUM][Pmode]; i--;) SET_HARD_REG_BIT (unavailable, HARD_FRAME_POINTER_REGNUM + i); #endif } CLEAR_HARD_REG_SET (regs_seen); while (all_chains) { int new_reg, best_new_reg; int n_uses; struct du_chain *this = all_chains; struct du_chain *tmp, *last; HARD_REG_SET this_unavailable; int reg = REGNO (*this->loc); int i; all_chains = this->next_chain; best_new_reg = reg; #if 0 /* This just disables optimization opportunities. */ /* Only rename once we've seen the reg more than once. */ if (! TEST_HARD_REG_BIT (regs_seen, reg)) { SET_HARD_REG_BIT (regs_seen, reg); continue; } #endif if (fixed_regs[reg] || global_regs[reg] #if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM || (frame_pointer_needed && reg == HARD_FRAME_POINTER_REGNUM) #else || (frame_pointer_needed && reg == FRAME_POINTER_REGNUM) #endif ) continue; COPY_HARD_REG_SET (this_unavailable, unavailable); /* Find last entry on chain (which has the need_caller_save bit), count number of uses, and narrow the set of registers we can use for renaming. */ n_uses = 0; for (last = this; last->next_use; last = last->next_use) { n_uses++; IOR_COMPL_HARD_REG_SET (this_unavailable, reg_class_contents[last->class]); } if (n_uses < 1) continue; IOR_COMPL_HARD_REG_SET (this_unavailable, reg_class_contents[last->class]); if (this->need_caller_save_reg) IOR_HARD_REG_SET (this_unavailable, call_used_reg_set); merge_overlapping_regs (bb, &this_unavailable, this); /* Now potential_regs is a reasonable approximation, let's have a closer look at each register still in there. */ for (new_reg = 0; new_reg < FIRST_PSEUDO_REGISTER; new_reg++) { int nregs = hard_regno_nregs[new_reg][GET_MODE (*this->loc)]; for (i = nregs - 1; i >= 0; --i) if (TEST_HARD_REG_BIT (this_unavailable, new_reg + i) || fixed_regs[new_reg + i] || global_regs[new_reg + i] /* Can't use regs which aren't saved by the prologue. */ || (! regs_ever_live[new_reg + i] && ! call_used_regs[new_reg + i]) #ifdef LEAF_REGISTERS /* We can't use a non-leaf register if we're in a leaf function. */ || (current_function_is_leaf && !LEAF_REGISTERS[new_reg + i]) #endif #ifdef HARD_REGNO_RENAME_OK || ! HARD_REGNO_RENAME_OK (reg + i, new_reg + i) #endif ) break; if (i >= 0) continue; /* See whether it accepts all modes that occur in definition and uses. */ for (tmp = this; tmp; tmp = tmp->next_use) if (! HARD_REGNO_MODE_OK (new_reg, GET_MODE (*tmp->loc)) || (tmp->need_caller_save_reg && ! (HARD_REGNO_CALL_PART_CLOBBERED (reg, GET_MODE (*tmp->loc))) && (HARD_REGNO_CALL_PART_CLOBBERED (new_reg, GET_MODE (*tmp->loc))))) break; if (! tmp) { if (tick[best_new_reg] > tick[new_reg]) best_new_reg = new_reg; } } if (dump_file) { fprintf (dump_file, "Register %s in insn %d", reg_names[reg], INSN_UID (last->insn)); if (last->need_caller_save_reg) fprintf (dump_file, " crosses a call"); } if (best_new_reg == reg) { tick[reg] = ++this_tick; if (dump_file) fprintf (dump_file, "; no available better choice\n"); continue; } do_replace (this, best_new_reg); tick[best_new_reg] = ++this_tick; regs_ever_live[best_new_reg] = 1; if (dump_file) fprintf (dump_file, ", renamed as %s\n", reg_names[best_new_reg]); } obstack_free (&rename_obstack, first_obj); } obstack_free (&rename_obstack, NULL); if (dump_file) fputc ('\n', dump_file); count_or_remove_death_notes (NULL, 1); update_life_info (NULL, UPDATE_LIFE_LOCAL, PROP_DEATH_NOTES); } static void do_replace (struct du_chain *chain, int reg) { while (chain) { unsigned int regno = ORIGINAL_REGNO (*chain->loc); struct reg_attrs * attr = REG_ATTRS (*chain->loc); *chain->loc = gen_raw_REG (GET_MODE (*chain->loc), reg); if (regno >= FIRST_PSEUDO_REGISTER) ORIGINAL_REGNO (*chain->loc) = regno; REG_ATTRS (*chain->loc) = attr; chain = chain->next_use; } } static struct du_chain *open_chains; static struct du_chain *closed_chains; static void scan_rtx_reg (rtx insn, rtx *loc, enum reg_class class, enum scan_actions action, enum op_type type, int earlyclobber) { struct du_chain **p; rtx x = *loc; enum machine_mode mode = GET_MODE (x); int this_regno = REGNO (x); int this_nregs = hard_regno_nregs[this_regno][mode]; if (action == mark_write) { if (type == OP_OUT) { struct du_chain *this = obstack_alloc (&rename_obstack, sizeof (struct du_chain)); this->next_use = 0; this->next_chain = open_chains; this->loc = loc; this->insn = insn; this->class = class; this->need_caller_save_reg = 0; this->earlyclobber = earlyclobber; open_chains = this; } return; } if ((type == OP_OUT && action != terminate_write) || (type != OP_OUT && action == terminate_write)) return; for (p = &open_chains; *p;) { struct du_chain *this = *p; /* Check if the chain has been terminated if it has then skip to the next chain. This can happen when we've already appended the location to the chain in Step 3, but are trying to hide in-out operands from terminate_write in Step 5. */ if (*this->loc == cc0_rtx) p = &this->next_chain; else { int regno = REGNO (*this->loc); int nregs = hard_regno_nregs[regno][GET_MODE (*this->loc)]; int exact_match = (regno == this_regno && nregs == this_nregs); if (regno + nregs <= this_regno || this_regno + this_nregs <= regno) { p = &this->next_chain; continue; } if (action == mark_read) { if (! exact_match) abort (); /* ??? Class NO_REGS can happen if the md file makes use of EXTRA_CONSTRAINTS to match registers. Which is arguably wrong, but there we are. Since we know not what this may be replaced with, terminate the chain. */ if (class != NO_REGS) { this = obstack_alloc (&rename_obstack, sizeof (struct du_chain)); this->next_use = 0; this->next_chain = (*p)->next_chain; this->loc = loc; this->insn = insn; this->class = class; this->need_caller_save_reg = 0; while (*p) p = &(*p)->next_use; *p = this; return; } } if (action != terminate_overlapping_read || ! exact_match) { struct du_chain *next = this->next_chain; /* Whether the terminated chain can be used for renaming depends on the action and this being an exact match. In either case, we remove this element from open_chains. */ if ((action == terminate_dead || action == terminate_write) && exact_match) { this->next_chain = closed_chains; closed_chains = this; if (dump_file) fprintf (dump_file, "Closing chain %s at insn %d (%s)\n", reg_names[REGNO (*this->loc)], INSN_UID (insn), scan_actions_name[(int) action]); } else { if (dump_file) fprintf (dump_file, "Discarding chain %s at insn %d (%s)\n", reg_names[REGNO (*this->loc)], INSN_UID (insn), scan_actions_name[(int) action]); } *p = next; } else p = &this->next_chain; } } } /* Adapted from find_reloads_address_1. CLASS is INDEX_REG_CLASS or BASE_REG_CLASS depending on how the register is being considered. */ static void scan_rtx_address (rtx insn, rtx *loc, enum reg_class class, enum scan_actions action, enum machine_mode mode) { rtx x = *loc; RTX_CODE code = GET_CODE (x); const char *fmt; int i, j; if (action == mark_write) return; switch (code) { case PLUS: { rtx orig_op0 = XEXP (x, 0); rtx orig_op1 = XEXP (x, 1); RTX_CODE code0 = GET_CODE (orig_op0); RTX_CODE code1 = GET_CODE (orig_op1); rtx op0 = orig_op0; rtx op1 = orig_op1; rtx *locI = NULL; rtx *locB = NULL; if (GET_CODE (op0) == SUBREG) { op0 = SUBREG_REG (op0); code0 = GET_CODE (op0); } if (GET_CODE (op1) == SUBREG) { op1 = SUBREG_REG (op1); code1 = GET_CODE (op1); } if (code0 == MULT || code0 == SIGN_EXTEND || code0 == TRUNCATE || code0 == ZERO_EXTEND || code1 == MEM) { locI = &XEXP (x, 0); locB = &XEXP (x, 1); } else if (code1 == MULT || code1 == SIGN_EXTEND || code1 == TRUNCATE || code1 == ZERO_EXTEND || code0 == MEM) { locI = &XEXP (x, 1); locB = &XEXP (x, 0); } else if (code0 == CONST_INT || code0 == CONST || code0 == SYMBOL_REF || code0 == LABEL_REF) locB = &XEXP (x, 1); else if (code1 == CONST_INT || code1 == CONST || code1 == SYMBOL_REF || code1 == LABEL_REF) locB = &XEXP (x, 0); else if (code0 == REG && code1 == REG) { int index_op; if (REG_OK_FOR_INDEX_P (op0) && REG_MODE_OK_FOR_BASE_P (op1, mode)) index_op = 0; else if (REG_OK_FOR_INDEX_P (op1) && REG_MODE_OK_FOR_BASE_P (op0, mode)) index_op = 1; else if (REG_MODE_OK_FOR_BASE_P (op1, mode)) index_op = 0; else if (REG_MODE_OK_FOR_BASE_P (op0, mode)) index_op = 1; else if (REG_OK_FOR_INDEX_P (op1)) index_op = 1; else index_op = 0; locI = &XEXP (x, index_op); locB = &XEXP (x, !index_op); } else if (code0 == REG) { locI = &XEXP (x, 0); locB = &XEXP (x, 1); } else if (code1 == REG) { locI = &XEXP (x, 1); locB = &XEXP (x, 0); } if (locI) scan_rtx_address (insn, locI, INDEX_REG_CLASS, action, mode); if (locB) scan_rtx_address (insn, locB, MODE_BASE_REG_CLASS (mode), action, mode); return; } case POST_INC: case POST_DEC: case POST_MODIFY: case PRE_INC: case PRE_DEC: case PRE_MODIFY: #ifndef AUTO_INC_DEC /* If the target doesn't claim to handle autoinc, this must be something special, like a stack push. Kill this chain. */ action = terminate_all_read; #endif break; case MEM: scan_rtx_address (insn, &XEXP (x, 0), MODE_BASE_REG_CLASS (GET_MODE (x)), action, GET_MODE (x)); return; case REG: scan_rtx_reg (insn, loc, class, action, OP_IN, 0); return; default: break; } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') scan_rtx_address (insn, &XEXP (x, i), class, action, mode); else if (fmt[i] == 'E') for (j = XVECLEN (x, i) - 1; j >= 0; j--) scan_rtx_address (insn, &XVECEXP (x, i, j), class, action, mode); } } static void scan_rtx (rtx insn, rtx *loc, enum reg_class class, enum scan_actions action, enum op_type type, int earlyclobber) { const char *fmt; rtx x = *loc; enum rtx_code code = GET_CODE (x); int i, j; code = GET_CODE (x); switch (code) { case CONST: case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case SYMBOL_REF: case LABEL_REF: case CC0: case PC: return; case REG: scan_rtx_reg (insn, loc, class, action, type, earlyclobber); return; case MEM: scan_rtx_address (insn, &XEXP (x, 0), MODE_BASE_REG_CLASS (GET_MODE (x)), action, GET_MODE (x)); return; case SET: scan_rtx (insn, &SET_SRC (x), class, action, OP_IN, 0); scan_rtx (insn, &SET_DEST (x), class, action, OP_OUT, 0); return; case STRICT_LOW_PART: scan_rtx (insn, &XEXP (x, 0), class, action, OP_INOUT, earlyclobber); return; case ZERO_EXTRACT: case SIGN_EXTRACT: scan_rtx (insn, &XEXP (x, 0), class, action, type == OP_IN ? OP_IN : OP_INOUT, earlyclobber); scan_rtx (insn, &XEXP (x, 1), class, action, OP_IN, 0); scan_rtx (insn, &XEXP (x, 2), class, action, OP_IN, 0); return; case POST_INC: case PRE_INC: case POST_DEC: case PRE_DEC: case POST_MODIFY: case PRE_MODIFY: /* Should only happen inside MEM. */ abort (); case CLOBBER: scan_rtx (insn, &SET_DEST (x), class, action, OP_OUT, 1); return; case EXPR_LIST: scan_rtx (insn, &XEXP (x, 0), class, action, type, 0); if (XEXP (x, 1)) scan_rtx (insn, &XEXP (x, 1), class, action, type, 0); return; default: break; } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') scan_rtx (insn, &XEXP (x, i), class, action, type, 0); else if (fmt[i] == 'E') for (j = XVECLEN (x, i) - 1; j >= 0; j--) scan_rtx (insn, &XVECEXP (x, i, j), class, action, type, 0); } } /* Build def/use chain. */ static struct du_chain * build_def_use (basic_block bb) { rtx insn; open_chains = closed_chains = NULL; for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn)) { if (INSN_P (insn)) { int n_ops; rtx note; rtx old_operands[MAX_RECOG_OPERANDS]; rtx old_dups[MAX_DUP_OPERANDS]; int i, icode; int alt; int predicated; /* Process the insn, determining its effect on the def-use chains. We perform the following steps with the register references in the insn: (1) Any read that overlaps an open chain, but doesn't exactly match, causes that chain to be closed. We can't deal with overlaps yet. (2) Any read outside an operand causes any chain it overlaps with to be closed, since we can't replace it. (3) Any read inside an operand is added if there's already an open chain for it. (4) For any REG_DEAD note we find, close open chains that overlap it. (5) For any write we find, close open chains that overlap it. (6) For any write we find in an operand, make a new chain. (7) For any REG_UNUSED, close any chains we just opened. */ icode = recog_memoized (insn); extract_insn (insn); if (! constrain_operands (1)) fatal_insn_not_found (insn); preprocess_constraints (); alt = which_alternative; n_ops = recog_data.n_operands; /* Simplify the code below by rewriting things to reflect matching constraints. Also promote OP_OUT to OP_INOUT in predicated instructions. */ predicated = GET_CODE (PATTERN (insn)) == COND_EXEC; for (i = 0; i < n_ops; ++i) { int matches = recog_op_alt[i][alt].matches; if (matches >= 0) recog_op_alt[i][alt].class = recog_op_alt[matches][alt].class; if (matches >= 0 || recog_op_alt[i][alt].matched >= 0 || (predicated && recog_data.operand_type[i] == OP_OUT)) recog_data.operand_type[i] = OP_INOUT; } /* Step 1: Close chains for which we have overlapping reads. */ for (i = 0; i < n_ops; i++) scan_rtx (insn, recog_data.operand_loc[i], NO_REGS, terminate_overlapping_read, recog_data.operand_type[i], 0); /* Step 2: Close chains for which we have reads outside operands. We do this by munging all operands into CC0, and closing everything remaining. */ for (i = 0; i < n_ops; i++) { old_operands[i] = recog_data.operand[i]; /* Don't squash match_operator or match_parallel here, since we don't know that all of the contained registers are reachable by proper operands. */ if (recog_data.constraints[i][0] == '\0') continue; *recog_data.operand_loc[i] = cc0_rtx; } for (i = 0; i < recog_data.n_dups; i++) { int dup_num = recog_data.dup_num[i]; old_dups[i] = *recog_data.dup_loc[i]; *recog_data.dup_loc[i] = cc0_rtx; /* For match_dup of match_operator or match_parallel, share them, so that we don't miss changes in the dup. */ if (icode >= 0 && insn_data[icode].operand[dup_num].eliminable == 0) old_dups[i] = recog_data.operand[dup_num]; } scan_rtx (insn, &PATTERN (insn), NO_REGS, terminate_all_read, OP_IN, 0); for (i = 0; i < recog_data.n_dups; i++) *recog_data.dup_loc[i] = old_dups[i]; for (i = 0; i < n_ops; i++) *recog_data.operand_loc[i] = old_operands[i]; /* Step 2B: Can't rename function call argument registers. */ if (GET_CODE (insn) == CALL_INSN && CALL_INSN_FUNCTION_USAGE (insn)) scan_rtx (insn, &CALL_INSN_FUNCTION_USAGE (insn), NO_REGS, terminate_all_read, OP_IN, 0); /* Step 2C: Can't rename asm operands that were originally hard registers. */ if (asm_noperands (PATTERN (insn)) > 0) for (i = 0; i < n_ops; i++) { rtx *loc = recog_data.operand_loc[i]; rtx op = *loc; if (REG_P (op) && REGNO (op) == ORIGINAL_REGNO (op) && (recog_data.operand_type[i] == OP_IN || recog_data.operand_type[i] == OP_INOUT)) scan_rtx (insn, loc, NO_REGS, terminate_all_read, OP_IN, 0); } /* Step 3: Append to chains for reads inside operands. */ for (i = 0; i < n_ops + recog_data.n_dups; i++) { int opn = i < n_ops ? i : recog_data.dup_num[i - n_ops]; rtx *loc = (i < n_ops ? recog_data.operand_loc[opn] : recog_data.dup_loc[i - n_ops]); enum reg_class class = recog_op_alt[opn][alt].class; enum op_type type = recog_data.operand_type[opn]; /* Don't scan match_operand here, since we've no reg class information to pass down. Any operands that we could substitute in will be represented elsewhere. */ if (recog_data.constraints[opn][0] == '\0') continue; if (recog_op_alt[opn][alt].is_address) scan_rtx_address (insn, loc, class, mark_read, VOIDmode); else scan_rtx (insn, loc, class, mark_read, type, 0); } /* Step 4: Close chains for registers that die here. Also record updates for REG_INC notes. */ for (note = REG_NOTES (insn); note; note = XEXP (note, 1)) { if (REG_NOTE_KIND (note) == REG_DEAD) scan_rtx (insn, &XEXP (note, 0), NO_REGS, terminate_dead, OP_IN, 0); else if (REG_NOTE_KIND (note) == REG_INC) scan_rtx (insn, &XEXP (note, 0), ALL_REGS, mark_read, OP_INOUT, 0); } /* Step 4B: If this is a call, any chain live at this point requires a caller-saved reg. */ if (GET_CODE (insn) == CALL_INSN) { struct du_chain *p; for (p = open_chains; p; p = p->next_chain) p->need_caller_save_reg = 1; } /* Step 5: Close open chains that overlap writes. Similar to step 2, we hide in-out operands, since we do not want to close these chains. */ for (i = 0; i < n_ops; i++) { old_operands[i] = recog_data.operand[i]; if (recog_data.operand_type[i] == OP_INOUT) *recog_data.operand_loc[i] = cc0_rtx; } for (i = 0; i < recog_data.n_dups; i++) { int opn = recog_data.dup_num[i]; old_dups[i] = *recog_data.dup_loc[i]; if (recog_data.operand_type[opn] == OP_INOUT) *recog_data.dup_loc[i] = cc0_rtx; } scan_rtx (insn, &PATTERN (insn), NO_REGS, terminate_write, OP_IN, 0); for (i = 0; i < recog_data.n_dups; i++) *recog_data.dup_loc[i] = old_dups[i]; for (i = 0; i < n_ops; i++) *recog_data.operand_loc[i] = old_operands[i]; /* Step 6: Begin new chains for writes inside operands. */ /* ??? Many targets have output constraints on the SET_DEST of a call insn, which is stupid, since these are certainly ABI defined hard registers. Don't change calls at all. Similarly take special care for asm statement that originally referenced hard registers. */ if (asm_noperands (PATTERN (insn)) > 0) { for (i = 0; i < n_ops; i++) if (recog_data.operand_type[i] == OP_OUT) { rtx *loc = recog_data.operand_loc[i]; rtx op = *loc; enum reg_class class = recog_op_alt[i][alt].class; if (REG_P (op) && REGNO (op) == ORIGINAL_REGNO (op)) continue; scan_rtx (insn, loc, class, mark_write, OP_OUT, recog_op_alt[i][alt].earlyclobber); } } else if (GET_CODE (insn) != CALL_INSN) for (i = 0; i < n_ops + recog_data.n_dups; i++) { int opn = i < n_ops ? i : recog_data.dup_num[i - n_ops]; rtx *loc = (i < n_ops ? recog_data.operand_loc[opn] : recog_data.dup_loc[i - n_ops]); enum reg_class class = recog_op_alt[opn][alt].class; if (recog_data.operand_type[opn] == OP_OUT) scan_rtx (insn, loc, class, mark_write, OP_OUT, recog_op_alt[opn][alt].earlyclobber); } /* Step 7: Close chains for registers that were never really used here. */ for (note = REG_NOTES (insn); note; note = XEXP (note, 1)) if (REG_NOTE_KIND (note) == REG_UNUSED) scan_rtx (insn, &XEXP (note, 0), NO_REGS, terminate_dead, OP_IN, 0); } if (insn == BB_END (bb)) break; } /* Since we close every chain when we find a REG_DEAD note, anything that is still open lives past the basic block, so it can't be renamed. */ return closed_chains; } /* Dump all def/use chains in CHAINS to DUMP_FILE. They are printed in reverse order as that's how we build them. */ static void dump_def_use_chain (struct du_chain *chains) { while (chains) { struct du_chain *this = chains; int r = REGNO (*this->loc); int nregs = hard_regno_nregs[r][GET_MODE (*this->loc)]; fprintf (dump_file, "Register %s (%d):", reg_names[r], nregs); while (this) { fprintf (dump_file, " %d [%s]", INSN_UID (this->insn), reg_class_names3[this->class]); this = this->next_use; } fprintf (dump_file, "\n"); chains = chains->next_chain; } } /* The following code does forward propagation of hard register copies. The object is to eliminate as many dependencies as possible, so that we have the most scheduling freedom. As a side effect, we also clean up some silly register allocation decisions made by reload. This code may be obsoleted by a new register allocator. */ /* For each register, we have a list of registers that contain the same value. The OLDEST_REGNO field points to the head of the list, and the NEXT_REGNO field runs through the list. The MODE field indicates what mode the data is known to be in; this field is VOIDmode when the register is not known to contain valid data. */ struct value_data_entry { enum machine_mode mode; unsigned int oldest_regno; unsigned int next_regno; }; struct value_data { struct value_data_entry e[FIRST_PSEUDO_REGISTER]; unsigned int max_value_regs; }; static void kill_value_regno (unsigned, struct value_data *); static void kill_value (rtx, struct value_data *); static void set_value_regno (unsigned, enum machine_mode, struct value_data *); static void init_value_data (struct value_data *); static void kill_clobbered_value (rtx, rtx, void *); static void kill_set_value (rtx, rtx, void *); static int kill_autoinc_value (rtx *, void *); static void copy_value (rtx, rtx, struct value_data *); static bool mode_change_ok (enum machine_mode, enum machine_mode, unsigned int); static rtx maybe_mode_change (enum machine_mode, enum machine_mode, enum machine_mode, unsigned int, unsigned int); static rtx find_oldest_value_reg (enum reg_class, rtx, struct value_data *); static bool replace_oldest_value_reg (rtx *, enum reg_class, rtx, struct value_data *); static bool replace_oldest_value_addr (rtx *, enum reg_class, enum machine_mode, rtx, struct value_data *); static bool replace_oldest_value_mem (rtx, rtx, struct value_data *); static bool copyprop_hardreg_forward_1 (basic_block, struct value_data *); extern void debug_value_data (struct value_data *); #ifdef ENABLE_CHECKING static void validate_value_data (struct value_data *); #endif /* Kill register REGNO. This involves removing it from any value lists, and resetting the value mode to VOIDmode. */ static void kill_value_regno (unsigned int regno, struct value_data *vd) { unsigned int i, next; if (vd->e[regno].oldest_regno != regno) { for (i = vd->e[regno].oldest_regno; vd->e[i].next_regno != regno; i = vd->e[i].next_regno) continue; vd->e[i].next_regno = vd->e[regno].next_regno; } else if ((next = vd->e[regno].next_regno) != INVALID_REGNUM) { for (i = next; i != INVALID_REGNUM; i = vd->e[i].next_regno) vd->e[i].oldest_regno = next; } vd->e[regno].mode = VOIDmode; vd->e[regno].oldest_regno = regno; vd->e[regno].next_regno = INVALID_REGNUM; #ifdef ENABLE_CHECKING validate_value_data (vd); #endif } /* Kill X. This is a convenience function for kill_value_regno so that we mind the mode the register is in. */ static void kill_value (rtx x, struct value_data *vd) { /* SUBREGS are supposed to have been eliminated by now. But some ports, e.g. i386 sse, use them to smuggle vector type information through to instruction selection. Each such SUBREG should simplify, so if we get a NULL we've done something wrong elsewhere. */ if (GET_CODE (x) == SUBREG) x = simplify_subreg (GET_MODE (x), SUBREG_REG (x), GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x)); if (REG_P (x)) { unsigned int regno = REGNO (x); unsigned int n = hard_regno_nregs[regno][GET_MODE (x)]; unsigned int i, j; /* Kill the value we're told to kill. */ for (i = 0; i < n; ++i) kill_value_regno (regno + i, vd); /* Kill everything that overlapped what we're told to kill. */ if (regno < vd->max_value_regs) j = 0; else j = regno - vd->max_value_regs; for (; j < regno; ++j) { if (vd->e[j].mode == VOIDmode) continue; n = hard_regno_nregs[j][vd->e[j].mode]; if (j + n > regno) for (i = 0; i < n; ++i) kill_value_regno (j + i, vd); } } } /* Remember that REGNO is valid in MODE. */ static void set_value_regno (unsigned int regno, enum machine_mode mode, struct value_data *vd) { unsigned int nregs; vd->e[regno].mode = mode; nregs = hard_regno_nregs[regno][mode]; if (nregs > vd->max_value_regs) vd->max_value_regs = nregs; } /* Initialize VD such that there are no known relationships between regs. */ static void init_value_data (struct value_data *vd) { int i; for (i = 0; i < FIRST_PSEUDO_REGISTER; ++i) { vd->e[i].mode = VOIDmode; vd->e[i].oldest_regno = i; vd->e[i].next_regno = INVALID_REGNUM; } vd->max_value_regs = 0; } /* Called through note_stores. If X is clobbered, kill its value. */ static void kill_clobbered_value (rtx x, rtx set, void *data) { struct value_data *vd = data; if (GET_CODE (set) == CLOBBER) kill_value (x, vd); } /* Called through note_stores. If X is set, not clobbered, kill its current value and install it as the root of its own value list. */ static void kill_set_value (rtx x, rtx set, void *data) { struct value_data *vd = data; if (GET_CODE (set) != CLOBBER) { kill_value (x, vd); if (REG_P (x)) set_value_regno (REGNO (x), GET_MODE (x), vd); } } /* Called through for_each_rtx. Kill any register used as the base of an auto-increment expression, and install that register as the root of its own value list. */ static int kill_autoinc_value (rtx *px, void *data) { rtx x = *px; struct value_data *vd = data; if (GET_RTX_CLASS (GET_CODE (x)) == RTX_AUTOINC) { x = XEXP (x, 0); kill_value (x, vd); set_value_regno (REGNO (x), Pmode, vd); return -1; } return 0; } /* Assert that SRC has been copied to DEST. Adjust the data structures to reflect that SRC contains an older copy of the shared value. */ static void copy_value (rtx dest, rtx src, struct value_data *vd) { unsigned int dr = REGNO (dest); unsigned int sr = REGNO (src); unsigned int dn, sn; unsigned int i; /* ??? At present, it's possible to see noop sets. It'd be nice if this were cleaned up beforehand... */ if (sr == dr) return; /* Do not propagate copies to the stack pointer, as that can leave memory accesses with no scheduling dependency on the stack update. */ if (dr == STACK_POINTER_REGNUM) return; /* Likewise with the frame pointer, if we're using one. */ if (frame_pointer_needed && dr == HARD_FRAME_POINTER_REGNUM) return; /* If SRC and DEST overlap, don't record anything. */ dn = hard_regno_nregs[dr][GET_MODE (dest)]; sn = hard_regno_nregs[sr][GET_MODE (dest)]; if ((dr > sr && dr < sr + sn) || (sr > dr && sr < dr + dn)) return; /* If SRC had no assigned mode (i.e. we didn't know it was live) assign it now and assume the value came from an input argument or somesuch. */ if (vd->e[sr].mode == VOIDmode) set_value_regno (sr, vd->e[dr].mode, vd); /* If we are narrowing the input to a smaller number of hard regs, and it is in big endian, we are really extracting a high part. Since we generally associate a low part of a value with the value itself, we must not do the same for the high part. Note we can still get low parts for the same mode combination through a two-step copy involving differently sized hard regs. Assume hard regs fr* are 32 bits bits each, while r* are 64 bits each: (set (reg:DI r0) (reg:DI fr0)) (set (reg:SI fr2) (reg:SI r0)) loads the low part of (reg:DI fr0) - i.e. fr1 - into fr2, while: (set (reg:SI fr2) (reg:SI fr0)) loads the high part of (reg:DI fr0) into fr2. We can't properly represent the latter case in our tables, so don't record anything then. */ else if (sn < (unsigned int) hard_regno_nregs[sr][vd->e[sr].mode] && (GET_MODE_SIZE (vd->e[sr].mode) > UNITS_PER_WORD ? WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN)) return; /* If SRC had been assigned a mode narrower than the copy, we can't link DEST into the chain, because not all of the pieces of the copy came from oldest_regno. */ else if (sn > (unsigned int) hard_regno_nregs[sr][vd->e[sr].mode]) return; /* Link DR at the end of the value chain used by SR. */ vd->e[dr].oldest_regno = vd->e[sr].oldest_regno; for (i = sr; vd->e[i].next_regno != INVALID_REGNUM; i = vd->e[i].next_regno) continue; vd->e[i].next_regno = dr; #ifdef ENABLE_CHECKING validate_value_data (vd); #endif } /* Return true if a mode change from ORIG to NEW is allowed for REGNO. */ static bool mode_change_ok (enum machine_mode orig_mode, enum machine_mode new_mode, unsigned int regno ATTRIBUTE_UNUSED) { if (GET_MODE_SIZE (orig_mode) < GET_MODE_SIZE (new_mode)) return false; #ifdef CANNOT_CHANGE_MODE_CLASS return !REG_CANNOT_CHANGE_MODE_P (regno, orig_mode, new_mode); #endif return true; } /* Register REGNO was originally set in ORIG_MODE. It - or a copy of it - was copied in COPY_MODE to COPY_REGNO, and then COPY_REGNO was accessed in NEW_MODE. Return a NEW_MODE rtx for REGNO if that's OK, otherwise return NULL_RTX. */ static rtx maybe_mode_change (enum machine_mode orig_mode, enum machine_mode copy_mode, enum machine_mode new_mode, unsigned int regno, unsigned int copy_regno ATTRIBUTE_UNUSED) { if (orig_mode == new_mode) return gen_rtx_raw_REG (new_mode, regno); else if (mode_change_ok (orig_mode, new_mode, regno)) { int copy_nregs = hard_regno_nregs[copy_regno][copy_mode]; int use_nregs = hard_regno_nregs[copy_regno][new_mode]; int copy_offset = GET_MODE_SIZE (copy_mode) / copy_nregs * (copy_nregs - use_nregs); int offset = GET_MODE_SIZE (orig_mode) - GET_MODE_SIZE (new_mode) - copy_offset; int byteoffset = offset % UNITS_PER_WORD; int wordoffset = offset - byteoffset; offset = ((WORDS_BIG_ENDIAN ? wordoffset : 0) + (BYTES_BIG_ENDIAN ? byteoffset : 0)); return gen_rtx_raw_REG (new_mode, regno + subreg_regno_offset (regno, orig_mode, offset, new_mode)); } return NULL_RTX; } /* Find the oldest copy of the value contained in REGNO that is in register class CLASS and has mode MODE. If found, return an rtx of that oldest register, otherwise return NULL. */ static rtx find_oldest_value_reg (enum reg_class class, rtx reg, struct value_data *vd) { unsigned int regno = REGNO (reg); enum machine_mode mode = GET_MODE (reg); unsigned int i; /* If we are accessing REG in some mode other that what we set it in, make sure that the replacement is valid. In particular, consider (set (reg:DI r11) (...)) (set (reg:SI r9) (reg:SI r11)) (set (reg:SI r10) (...)) (set (...) (reg:DI r9)) Replacing r9 with r11 is invalid. */ if (mode != vd->e[regno].mode) { if (hard_regno_nregs[regno][mode] > hard_regno_nregs[regno][vd->e[regno].mode]) return NULL_RTX; } for (i = vd->e[regno].oldest_regno; i != regno; i = vd->e[i].next_regno) { enum machine_mode oldmode = vd->e[i].mode; rtx new; unsigned int last; for (last = i; last < i + hard_regno_nregs[i][mode]; last++) if (!TEST_HARD_REG_BIT (reg_class_contents[class], last)) return NULL_RTX; new = maybe_mode_change (oldmode, vd->e[regno].mode, mode, i, regno); if (new) { ORIGINAL_REGNO (new) = ORIGINAL_REGNO (reg); REG_ATTRS (new) = REG_ATTRS (reg); return new; } } return NULL_RTX; } /* If possible, replace the register at *LOC with the oldest register in register class CLASS. Return true if successfully replaced. */ static bool replace_oldest_value_reg (rtx *loc, enum reg_class class, rtx insn, struct value_data *vd) { rtx new = find_oldest_value_reg (class, *loc, vd); if (new) { if (dump_file) fprintf (dump_file, "insn %u: replaced reg %u with %u\n", INSN_UID (insn), REGNO (*loc), REGNO (new)); *loc = new; return true; } return false; } /* Similar to replace_oldest_value_reg, but *LOC contains an address. Adapted from find_reloads_address_1. CLASS is INDEX_REG_CLASS or BASE_REG_CLASS depending on how the register is being considered. */ static bool replace_oldest_value_addr (rtx *loc, enum reg_class class, enum machine_mode mode, rtx insn, struct value_data *vd) { rtx x = *loc; RTX_CODE code = GET_CODE (x); const char *fmt; int i, j; bool changed = false; switch (code) { case PLUS: { rtx orig_op0 = XEXP (x, 0); rtx orig_op1 = XEXP (x, 1); RTX_CODE code0 = GET_CODE (orig_op0); RTX_CODE code1 = GET_CODE (orig_op1); rtx op0 = orig_op0; rtx op1 = orig_op1; rtx *locI = NULL; rtx *locB = NULL; if (GET_CODE (op0) == SUBREG) { op0 = SUBREG_REG (op0); code0 = GET_CODE (op0); } if (GET_CODE (op1) == SUBREG) { op1 = SUBREG_REG (op1); code1 = GET_CODE (op1); } if (code0 == MULT || code0 == SIGN_EXTEND || code0 == TRUNCATE || code0 == ZERO_EXTEND || code1 == MEM) { locI = &XEXP (x, 0); locB = &XEXP (x, 1); } else if (code1 == MULT || code1 == SIGN_EXTEND || code1 == TRUNCATE || code1 == ZERO_EXTEND || code0 == MEM) { locI = &XEXP (x, 1); locB = &XEXP (x, 0); } else if (code0 == CONST_INT || code0 == CONST || code0 == SYMBOL_REF || code0 == LABEL_REF) locB = &XEXP (x, 1); else if (code1 == CONST_INT || code1 == CONST || code1 == SYMBOL_REF || code1 == LABEL_REF) locB = &XEXP (x, 0); else if (code0 == REG && code1 == REG) { int index_op; if (REG_OK_FOR_INDEX_P (op0) && REG_MODE_OK_FOR_BASE_P (op1, mode)) index_op = 0; else if (REG_OK_FOR_INDEX_P (op1) && REG_MODE_OK_FOR_BASE_P (op0, mode)) index_op = 1; else if (REG_MODE_OK_FOR_BASE_P (op1, mode)) index_op = 0; else if (REG_MODE_OK_FOR_BASE_P (op0, mode)) index_op = 1; else if (REG_OK_FOR_INDEX_P (op1)) index_op = 1; else index_op = 0; locI = &XEXP (x, index_op); locB = &XEXP (x, !index_op); } else if (code0 == REG) { locI = &XEXP (x, 0); locB = &XEXP (x, 1); } else if (code1 == REG) { locI = &XEXP (x, 1); locB = &XEXP (x, 0); } if (locI) changed |= replace_oldest_value_addr (locI, INDEX_REG_CLASS, mode, insn, vd); if (locB) changed |= replace_oldest_value_addr (locB, MODE_BASE_REG_CLASS (mode), mode, insn, vd); return changed; } case POST_INC: case POST_DEC: case POST_MODIFY: case PRE_INC: case PRE_DEC: case PRE_MODIFY: return false; case MEM: return replace_oldest_value_mem (x, insn, vd); case REG: return replace_oldest_value_reg (loc, class, insn, vd); default: break; } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') changed |= replace_oldest_value_addr (&XEXP (x, i), class, mode, insn, vd); else if (fmt[i] == 'E') for (j = XVECLEN (x, i) - 1; j >= 0; j--) changed |= replace_oldest_value_addr (&XVECEXP (x, i, j), class, mode, insn, vd); } return changed; } /* Similar to replace_oldest_value_reg, but X contains a memory. */ static bool replace_oldest_value_mem (rtx x, rtx insn, struct value_data *vd) { return replace_oldest_value_addr (&XEXP (x, 0), MODE_BASE_REG_CLASS (GET_MODE (x)), GET_MODE (x), insn, vd); } /* Perform the forward copy propagation on basic block BB. */ static bool copyprop_hardreg_forward_1 (basic_block bb, struct value_data *vd) { bool changed = false; rtx insn; for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn)) { int n_ops, i, alt, predicated; bool is_asm; rtx set; if (! INSN_P (insn)) { if (insn == BB_END (bb)) break; else continue; } set = single_set (insn); extract_insn (insn); if (! constrain_operands (1)) fatal_insn_not_found (insn); preprocess_constraints (); alt = which_alternative; n_ops = recog_data.n_operands; is_asm = asm_noperands (PATTERN (insn)) >= 0; /* Simplify the code below by rewriting things to reflect matching constraints. Also promote OP_OUT to OP_INOUT in predicated instructions. */ predicated = GET_CODE (PATTERN (insn)) == COND_EXEC; for (i = 0; i < n_ops; ++i) { int matches = recog_op_alt[i][alt].matches; if (matches >= 0) recog_op_alt[i][alt].class = recog_op_alt[matches][alt].class; if (matches >= 0 || recog_op_alt[i][alt].matched >= 0 || (predicated && recog_data.operand_type[i] == OP_OUT)) recog_data.operand_type[i] = OP_INOUT; } /* For each earlyclobber operand, zap the value data. */ for (i = 0; i < n_ops; i++) if (recog_op_alt[i][alt].earlyclobber) kill_value (recog_data.operand[i], vd); /* Within asms, a clobber cannot overlap inputs or outputs. I wouldn't think this were true for regular insns, but scan_rtx treats them like that... */ note_stores (PATTERN (insn), kill_clobbered_value, vd); /* Kill all auto-incremented values. */ /* ??? REG_INC is useless, since stack pushes aren't done that way. */ for_each_rtx (&PATTERN (insn), kill_autoinc_value, vd); /* Kill all early-clobbered operands. */ for (i = 0; i < n_ops; i++) if (recog_op_alt[i][alt].earlyclobber) kill_value (recog_data.operand[i], vd); /* Special-case plain move instructions, since we may well be able to do the move from a different register class. */ if (set && REG_P (SET_SRC (set))) { rtx src = SET_SRC (set); unsigned int regno = REGNO (src); enum machine_mode mode = GET_MODE (src); unsigned int i; rtx new; /* If we are accessing SRC in some mode other that what we set it in, make sure that the replacement is valid. */ if (mode != vd->e[regno].mode) { if (hard_regno_nregs[regno][mode] > hard_regno_nregs[regno][vd->e[regno].mode]) goto no_move_special_case; } /* If the destination is also a register, try to find a source register in the same class. */ if (REG_P (SET_DEST (set))) { new = find_oldest_value_reg (REGNO_REG_CLASS (regno), src, vd); if (new && validate_change (insn, &SET_SRC (set), new, 0)) { if (dump_file) fprintf (dump_file, "insn %u: replaced reg %u with %u\n", INSN_UID (insn), regno, REGNO (new)); changed = true; goto did_replacement; } } /* Otherwise, try all valid registers and see if its valid. */ for (i = vd->e[regno].oldest_regno; i != regno; i = vd->e[i].next_regno) { new = maybe_mode_change (vd->e[i].mode, vd->e[regno].mode, mode, i, regno); if (new != NULL_RTX) { if (validate_change (insn, &SET_SRC (set), new, 0)) { ORIGINAL_REGNO (new) = ORIGINAL_REGNO (src); REG_ATTRS (new) = REG_ATTRS (src); if (dump_file) fprintf (dump_file, "insn %u: replaced reg %u with %u\n", INSN_UID (insn), regno, REGNO (new)); changed = true; goto did_replacement; } } } } no_move_special_case: /* For each input operand, replace a hard register with the eldest live copy that's in an appropriate register class. */ for (i = 0; i < n_ops; i++) { bool replaced = false; /* Don't scan match_operand here, since we've no reg class information to pass down. Any operands that we could substitute in will be represented elsewhere. */ if (recog_data.constraints[i][0] == '\0') continue; /* Don't replace in asms intentionally referencing hard regs. */ if (is_asm && REG_P (recog_data.operand[i]) && (REGNO (recog_data.operand[i]) == ORIGINAL_REGNO (recog_data.operand[i]))) continue; if (recog_data.operand_type[i] == OP_IN) { if (recog_op_alt[i][alt].is_address) replaced = replace_oldest_value_addr (recog_data.operand_loc[i], recog_op_alt[i][alt].class, VOIDmode, insn, vd); else if (REG_P (recog_data.operand[i])) replaced = replace_oldest_value_reg (recog_data.operand_loc[i], recog_op_alt[i][alt].class, insn, vd); else if (MEM_P (recog_data.operand[i])) replaced = replace_oldest_value_mem (recog_data.operand[i], insn, vd); } else if (MEM_P (recog_data.operand[i])) replaced = replace_oldest_value_mem (recog_data.operand[i], insn, vd); /* If we performed any replacement, update match_dups. */ if (replaced) { int j; rtx new; changed = true; new = *recog_data.operand_loc[i]; recog_data.operand[i] = new; for (j = 0; j < recog_data.n_dups; j++) if (recog_data.dup_num[j] == i) *recog_data.dup_loc[j] = new; } } did_replacement: /* Clobber call-clobbered registers. */ if (GET_CODE (insn) == CALL_INSN) for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)) kill_value_regno (i, vd); /* Notice stores. */ note_stores (PATTERN (insn), kill_set_value, vd); /* Notice copies. */ if (set && REG_P (SET_DEST (set)) && REG_P (SET_SRC (set))) copy_value (SET_DEST (set), SET_SRC (set), vd); if (insn == BB_END (bb)) break; } return changed; } /* Main entry point for the forward copy propagation optimization. */ void copyprop_hardreg_forward (void) { struct value_data *all_vd; bool need_refresh; basic_block bb, bbp = 0; need_refresh = false; all_vd = xmalloc (sizeof (struct value_data) * last_basic_block); FOR_EACH_BB (bb) { /* If a block has a single predecessor, that we've already processed, begin with the value data that was live at the end of the predecessor block. */ /* ??? Ought to use more intelligent queuing of blocks. */ if (bb->pred) for (bbp = bb; bbp && bbp != bb->pred->src; bbp = bbp->prev_bb); if (bb->pred && ! bb->pred->pred_next && ! (bb->pred->flags & (EDGE_ABNORMAL_CALL | EDGE_EH)) && bb->pred->src != ENTRY_BLOCK_PTR && bbp) all_vd[bb->index] = all_vd[bb->pred->src->index]; else init_value_data (all_vd + bb->index); if (copyprop_hardreg_forward_1 (bb, all_vd + bb->index)) need_refresh = true; } if (need_refresh) { if (dump_file) fputs ("\n\n", dump_file); /* ??? Irritatingly, delete_noop_moves does not take a set of blocks to scan, so we have to do a life update with no initial set of blocks Just In Case. */ delete_noop_moves (); update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES, PROP_DEATH_NOTES | PROP_SCAN_DEAD_CODE | PROP_KILL_DEAD_CODE); } free (all_vd); } /* Dump the value chain data to stderr. */ void debug_value_data (struct value_data *vd) { HARD_REG_SET set; unsigned int i, j; CLEAR_HARD_REG_SET (set); for (i = 0; i < FIRST_PSEUDO_REGISTER; ++i) if (vd->e[i].oldest_regno == i) { if (vd->e[i].mode == VOIDmode) { if (vd->e[i].next_regno != INVALID_REGNUM) fprintf (stderr, "[%u] Bad next_regno for empty chain (%u)\n", i, vd->e[i].next_regno); continue; } SET_HARD_REG_BIT (set, i); fprintf (stderr, "[%u %s] ", i, GET_MODE_NAME (vd->e[i].mode)); for (j = vd->e[i].next_regno; j != INVALID_REGNUM; j = vd->e[j].next_regno) { if (TEST_HARD_REG_BIT (set, j)) { fprintf (stderr, "[%u] Loop in regno chain\n", j); return; } if (vd->e[j].oldest_regno != i) { fprintf (stderr, "[%u] Bad oldest_regno (%u)\n", j, vd->e[j].oldest_regno); return; } SET_HARD_REG_BIT (set, j); fprintf (stderr, "[%u %s] ", j, GET_MODE_NAME (vd->e[j].mode)); } fputc ('\n', stderr); } for (i = 0; i < FIRST_PSEUDO_REGISTER; ++i) if (! TEST_HARD_REG_BIT (set, i) && (vd->e[i].mode != VOIDmode || vd->e[i].oldest_regno != i || vd->e[i].next_regno != INVALID_REGNUM)) fprintf (stderr, "[%u] Non-empty reg in chain (%s %u %i)\n", i, GET_MODE_NAME (vd->e[i].mode), vd->e[i].oldest_regno, vd->e[i].next_regno); } #ifdef ENABLE_CHECKING static void validate_value_data (struct value_data *vd) { HARD_REG_SET set; unsigned int i, j; CLEAR_HARD_REG_SET (set); for (i = 0; i < FIRST_PSEUDO_REGISTER; ++i) if (vd->e[i].oldest_regno == i) { if (vd->e[i].mode == VOIDmode) { if (vd->e[i].next_regno != INVALID_REGNUM) internal_error ("validate_value_data: [%u] Bad next_regno for empty chain (%u)", i, vd->e[i].next_regno); continue; } SET_HARD_REG_BIT (set, i); for (j = vd->e[i].next_regno; j != INVALID_REGNUM; j = vd->e[j].next_regno) { if (TEST_HARD_REG_BIT (set, j)) internal_error ("validate_value_data: Loop in regno chain (%u)", j); if (vd->e[j].oldest_regno != i) internal_error ("validate_value_data: [%u] Bad oldest_regno (%u)", j, vd->e[j].oldest_regno); SET_HARD_REG_BIT (set, j); } } for (i = 0; i < FIRST_PSEUDO_REGISTER; ++i) if (! TEST_HARD_REG_BIT (set, i) && (vd->e[i].mode != VOIDmode || vd->e[i].oldest_regno != i || vd->e[i].next_regno != INVALID_REGNUM)) internal_error ("validate_value_data: [%u] Non-empty reg in chain (%s %u %i)", i, GET_MODE_NAME (vd->e[i].mode), vd->e[i].oldest_regno, vd->e[i].next_regno); } #endif #ifdef ONE_COMPILATION_UNIT #undef REG_OK_FOR_INDEX_P #undef REG_OK_FOR_BASE_P #undef GO_IF_LEGITIMATE_ADDRESS #define REG_OK_FOR_INDEX_P(X) REG_OK_FOR_INDEX_NONSTRICT_P (X) #define REG_OK_FOR_BASE_P(X) REG_OK_FOR_BASE_NONSTRICT_P (X) #define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \ do { \ if (legitimate_address_p ((MODE), (X), 0)) \ goto ADDR; \ } while (0) #endif /* Search an insn for pseudo regs that must be in hard regs and are not. Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file contains subroutines used only from the file reload1.c. It knows how to scan one insn for operands and values that need to be copied into registers to make valid code. It also finds other operands and values which are valid but for which equivalent values in registers exist and ought to be used instead. Before processing the first insn of the function, call `init_reload'. init_reload actually has to be called earlier anyway. To scan an insn, call `find_reloads'. This does two things: 1. sets up tables describing which values must be reloaded for this insn, and what kind of hard regs they must be reloaded into; 2. optionally record the locations where those values appear in the data, so they can be replaced properly later. This is done only if the second arg to `find_reloads' is nonzero. The third arg to `find_reloads' specifies the number of levels of indirect addressing supported by the machine. If it is zero, indirect addressing is not valid. If it is one, (MEM (REG n)) is valid even if (REG n) did not get a hard register; if it is two, (MEM (MEM (REG n))) is also valid even if (REG n) did not get a hard register, and similarly for higher values. Then you must choose the hard regs to reload those pseudo regs into, and generate appropriate load insns before this insn and perhaps also store insns after this insn. Set up the array `reload_reg_rtx' to contain the REG rtx's for the registers you used. In some cases `find_reloads' will return a nonzero value in `reload_reg_rtx' for certain reloads. Then that tells you which register to use, so you do not need to allocate one. But you still do need to add extra instructions to copy the value into and out of that register. Finally you must call `subst_reloads' to substitute the reload reg rtx's into the locations already recorded. NOTE SIDE EFFECTS: find_reloads can alter the operands of the instruction it is called on. 1. Two operands of any sort may be interchanged, if they are in a commutative instruction. This happens only if find_reloads thinks the instruction will compile better that way. 2. Pseudo-registers that are equivalent to constants are replaced with those constants if they are not in hard registers. 1 happens every time find_reloads is called. 2 happens only when REPLACE is 1, which is only when actually doing the reloads, not when just counting them. Using a reload register for several reloads in one insn: When an insn has reloads, it is considered as having three parts: the input reloads, the insn itself after reloading, and the output reloads. Reloads of values used in memory addresses are often needed for only one part. When this is so, reload_when_needed records which part needs the reload. Two reloads for different parts of the insn can share the same reload register. When a reload is used for addresses in multiple parts, or when it is an ordinary operand, it is classified as RELOAD_OTHER, and cannot share a register with any other reload. */ #define REG_OK_STRICT #ifdef ONE_COMPILATION_UNIT #undef REG_OK_FOR_INDEX_P #undef REG_OK_FOR_BASE_P #undef GO_IF_LEGITIMATE_ADDRESS #define REG_OK_FOR_INDEX_P(X) REG_OK_FOR_INDEX_STRICT_P (X) #define REG_OK_FOR_BASE_P(X) REG_OK_FOR_BASE_STRICT_P (X) #define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \ do { \ if (legitimate_address_p ((MODE), (X), 1)) \ goto ADDR; \ } while (0) #endif #ifndef REGNO_MODE_OK_FOR_BASE_P #define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) REGNO_OK_FOR_BASE_P (REGNO) #endif #ifndef REG_MODE_OK_FOR_BASE_P #define REG_MODE_OK_FOR_BASE_P(REGNO, MODE) REG_OK_FOR_BASE_P (REGNO) #endif /* True if X is a constant that can be forced into the constant pool. */ #define CONST_POOL_OK_P(X) \ (CONSTANT_P (X) \ && GET_CODE (X) != HIGH \ && !targetm.cannot_force_const_mem (X)) /* All reloads of the current insn are recorded here. See reload.h for comments. */ int n_reloads; struct reload rld[MAX_RELOADS]; /* All the "earlyclobber" operands of the current insn are recorded here. */ int n_earlyclobbers; rtx reload_earlyclobbers[MAX_RECOG_OPERANDS]; int reload_n_operands; /* Replacing reloads. If `replace_reloads' is nonzero, then as each reload is recorded an entry is made for it in the table `replacements'. Then later `subst_reloads' can look through that table and perform all the replacements needed. */ /* Nonzero means record the places to replace. */ static int replace_reloads; /* Each replacement is recorded with a structure like this. */ struct replacement { rtx *where; /* Location to store in */ rtx *subreg_loc; /* Location of SUBREG if WHERE is inside a SUBREG; 0 otherwise. */ int what; /* which reload this is for */ enum machine_mode mode; /* mode it must have */ }; static struct replacement replacements[MAX_RECOG_OPERANDS * ((MAX_REGS_PER_ADDRESS * 2) + 1)]; /* Number of replacements currently recorded. */ static int n_replacements; /* Used to track what is modified by an operand. */ struct decomposition { int reg_flag; /* Nonzero if referencing a register. */ int safe; /* Nonzero if this can't conflict with anything. */ rtx base; /* Base address for MEM. */ HOST_WIDE_INT start; /* Starting offset or register number. */ HOST_WIDE_INT end; /* Ending offset or register number. */ }; #ifdef SECONDARY_MEMORY_NEEDED /* Save MEMs needed to copy from one class of registers to another. One MEM is used per mode, but normally only one or two modes are ever used. We keep two versions, before and after register elimination. The one after register elimination is record separately for each operand. This is done in case the address is not valid to be sure that we separately reload each. */ static rtx secondary_memlocs[NUM_MACHINE_MODES]; static rtx secondary_memlocs_elim[NUM_MACHINE_MODES][MAX_RECOG_OPERANDS]; static int secondary_memlocs_elim_used = 0; #endif /* The instruction we are doing reloads for; so we can test whether a register dies in it. */ static rtx this_insn; /* Nonzero if this instruction is a user-specified asm with operands. */ static int this_insn_is_asm; /* If hard_regs_live_known is nonzero, we can tell which hard regs are currently live, at least enough to succeed in choosing dummy reloads. */ static int hard_regs_live_known; /* Indexed by hard reg number, element is nonnegative if hard reg has been spilled. This vector is passed to `find_reloads' as an argument and is not changed here. */ static short *static_reload_reg_p; /* Set to 1 in subst_reg_equivs if it changes anything. */ static int subst_reg_equivs_changed; /* On return from push_reload, holds the reload-number for the OUT operand, which can be different for that from the input operand. */ static int output_reloadnum; /* Compare two RTX's. */ #define MATCHES(x, y) \ (x == y || (x != 0 && (REG_P (x) \ ? REG_P (y) && REGNO (x) == REGNO (y) \ : rtx_equal_p (x, y) && ! side_effects_p (x)))) /* Indicates if two reloads purposes are for similar enough things that we can merge their reloads. */ #define MERGABLE_RELOADS(when1, when2, op1, op2) \ ((when1) == RELOAD_OTHER || (when2) == RELOAD_OTHER \ || ((when1) == (when2) && (op1) == (op2)) \ || ((when1) == RELOAD_FOR_INPUT && (when2) == RELOAD_FOR_INPUT) \ || ((when1) == RELOAD_FOR_OPERAND_ADDRESS \ && (when2) == RELOAD_FOR_OPERAND_ADDRESS) \ || ((when1) == RELOAD_FOR_OTHER_ADDRESS \ && (when2) == RELOAD_FOR_OTHER_ADDRESS)) /* Nonzero if these two reload purposes produce RELOAD_OTHER when merged. */ #define MERGE_TO_OTHER(when1, when2, op1, op2) \ ((when1) != (when2) \ || ! ((op1) == (op2) \ || (when1) == RELOAD_FOR_INPUT \ || (when1) == RELOAD_FOR_OPERAND_ADDRESS \ || (when1) == RELOAD_FOR_OTHER_ADDRESS)) /* If we are going to reload an address, compute the reload type to use. */ #define ADDR_TYPE(type) \ ((type) == RELOAD_FOR_INPUT_ADDRESS \ ? RELOAD_FOR_INPADDR_ADDRESS \ : ((type) == RELOAD_FOR_OUTPUT_ADDRESS \ ? RELOAD_FOR_OUTADDR_ADDRESS \ : (type))) #ifdef HAVE_SECONDARY_RELOADS static int push_secondary_reload (int, rtx, int, int, enum reg_class, enum machine_mode, enum reload_type, enum insn_code *); #endif static enum reg_class find_valid_class (enum machine_mode, int, unsigned int); static int reload_inner_reg_of_subreg (rtx, enum machine_mode, int); static void push_replacement (rtx *, int, enum machine_mode); static void dup_replacements (rtx *, rtx *); static void combine_reloads (void); static int find_reusable_reload (rtx *, rtx, enum reg_class, enum reload_type, int, int); static rtx find_dummy_reload (rtx, rtx, rtx *, rtx *, enum machine_mode, enum machine_mode, enum reg_class, int, int); static int hard_reg_set_here_p (unsigned int, unsigned int, rtx); static struct decomposition decompose (rtx); static int immune_p (rtx, rtx, struct decomposition); static int alternative_allows_memconst (const char *, int); static rtx find_reloads_toplev (rtx, int, enum reload_type, int, int, rtx, int *); static rtx make_memloc (rtx, int); static int maybe_memory_address_p (enum machine_mode, rtx, rtx *); static int find_reloads_address (enum machine_mode, rtx *, rtx, rtx *, int, enum reload_type, int, rtx); static rtx subst_reg_equivs (rtx, rtx); static rtx subst_indexed_address (rtx); static void update_auto_inc_notes (rtx, int, int); static int find_reloads_address_1 (enum machine_mode, rtx, int, rtx *, int, enum reload_type,int, rtx); static void find_reloads_address_part (rtx, rtx *, enum reg_class, enum machine_mode, int, enum reload_type, int); static rtx find_reloads_subreg_address (rtx, int, int, enum reload_type, int, rtx); static void copy_replacements_1 (rtx *, rtx *, int); static int find_inc_amount (rtx, rtx); #ifdef HAVE_SECONDARY_RELOADS /* Determine if any secondary reloads are needed for loading (if IN_P is nonzero) or storing (if IN_P is zero) X to or from a reload register of register class RELOAD_CLASS in mode RELOAD_MODE. If secondary reloads are needed, push them. Return the reload number of the secondary reload we made, or -1 if we didn't need one. *PICODE is set to the insn_code to use if we do need a secondary reload. */ static int push_secondary_reload (int in_p, rtx x, int opnum, int optional, enum reg_class reload_class, enum machine_mode reload_mode, enum reload_type type, enum insn_code *picode) { enum reg_class class = NO_REGS; enum machine_mode mode = reload_mode; enum insn_code icode = CODE_FOR_nothing; enum reg_class t_class = NO_REGS; enum machine_mode t_mode = VOIDmode; enum insn_code t_icode = CODE_FOR_nothing; enum reload_type secondary_type; int s_reload, t_reload = -1; if (type == RELOAD_FOR_INPUT_ADDRESS || type == RELOAD_FOR_OUTPUT_ADDRESS || type == RELOAD_FOR_INPADDR_ADDRESS || type == RELOAD_FOR_OUTADDR_ADDRESS) secondary_type = type; else secondary_type = in_p ? RELOAD_FOR_INPUT_ADDRESS : RELOAD_FOR_OUTPUT_ADDRESS; *picode = CODE_FOR_nothing; /* If X is a paradoxical SUBREG, use the inner value to determine both the mode and object being reloaded. */ if (GET_CODE (x) == SUBREG && (GET_MODE_SIZE (GET_MODE (x)) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))) { x = SUBREG_REG (x); reload_mode = GET_MODE (x); } /* If X is a pseudo-register that has an equivalent MEM (actually, if it is still a pseudo-register by now, it *must* have an equivalent MEM but we don't want to assume that), use that equivalent when seeing if a secondary reload is needed since whether or not a reload is needed might be sensitive to the form of the MEM. */ if (REG_P (x) && REGNO (x) >= FIRST_PSEUDO_REGISTER && reg_equiv_mem[REGNO (x)] != 0) x = reg_equiv_mem[REGNO (x)]; #ifdef SECONDARY_INPUT_RELOAD_CLASS if (in_p) class = SECONDARY_INPUT_RELOAD_CLASS (reload_class, reload_mode, x); #endif #ifdef SECONDARY_OUTPUT_RELOAD_CLASS if (! in_p) class = SECONDARY_OUTPUT_RELOAD_CLASS (reload_class, reload_mode, x); #endif /* If we don't need any secondary registers, done. */ if (class == NO_REGS) return -1; /* Get a possible insn to use. If the predicate doesn't accept X, don't use the insn. */ icode = (in_p ? reload_in_optab[(int) reload_mode] : reload_out_optab[(int) reload_mode]); if (icode != CODE_FOR_nothing && insn_data[(int) icode].operand[in_p].predicate && (! (insn_data[(int) icode].operand[in_p].predicate) (x, reload_mode))) icode = CODE_FOR_nothing; /* If we will be using an insn, see if it can directly handle the reload register we will be using. If it can, the secondary reload is for a scratch register. If it can't, we will use the secondary reload for an intermediate register and require a tertiary reload for the scratch register. */ if (icode != CODE_FOR_nothing) { /* If IN_P is nonzero, the reload register will be the output in operand 0. If IN_P is zero, the reload register will be the input in operand 1. Outputs should have an initial "=", which we must skip. */ enum reg_class insn_class; if (insn_data[(int) icode].operand[!in_p].constraint[0] == 0) insn_class = ALL_REGS; else { const char *insn_constraint = &insn_data[(int) icode].operand[!in_p].constraint[in_p]; char insn_letter = *insn_constraint; insn_class = (insn_letter == 'r' ? GENERAL_REGS : REG_CLASS_FROM_CONSTRAINT ((unsigned char) insn_letter, insn_constraint)); if (insn_class == NO_REGS) abort (); if (in_p && insn_data[(int) icode].operand[!in_p].constraint[0] != '=') abort (); } /* The scratch register's constraint must start with "=&". */ if (insn_data[(int) icode].operand[2].constraint[0] != '=' || insn_data[(int) icode].operand[2].constraint[1] != '&') abort (); if (reg_class_subset_p (reload_class, insn_class)) mode = insn_data[(int) icode].operand[2].mode; else { const char *t_constraint = &insn_data[(int) icode].operand[2].constraint[2]; char t_letter = *t_constraint; class = insn_class; t_mode = insn_data[(int) icode].operand[2].mode; t_class = (t_letter == 'r' ? GENERAL_REGS : REG_CLASS_FROM_CONSTRAINT ((unsigned char) t_letter, t_constraint)); t_icode = icode; icode = CODE_FOR_nothing; } } /* This case isn't valid, so fail. Reload is allowed to use the same register for RELOAD_FOR_INPUT_ADDRESS and RELOAD_FOR_INPUT reloads, but in the case of a secondary register, we actually need two different registers for correct code. We fail here to prevent the possibility of silently generating incorrect code later. The convention is that secondary input reloads are valid only if the secondary_class is different from class. If you have such a case, you can not use secondary reloads, you must work around the problem some other way. Allow this when a reload_in/out pattern is being used. I.e. assume that the generated code handles this case. */ if (in_p && class == reload_class && icode == CODE_FOR_nothing && t_icode == CODE_FOR_nothing) abort (); /* If we need a tertiary reload, see if we have one we can reuse or else make a new one. */ if (t_class != NO_REGS) { for (t_reload = 0; t_reload < n_reloads; t_reload++) if (rld[t_reload].secondary_p && (reg_class_subset_p (t_class, rld[t_reload].class) || reg_class_subset_p (rld[t_reload].class, t_class)) && ((in_p && rld[t_reload].inmode == t_mode) || (! in_p && rld[t_reload].outmode == t_mode)) && ((in_p && (rld[t_reload].secondary_in_icode == CODE_FOR_nothing)) || (! in_p &&(rld[t_reload].secondary_out_icode == CODE_FOR_nothing))) && (reg_class_size[(int) t_class] == 1 || SMALL_REGISTER_CLASSES) && MERGABLE_RELOADS (secondary_type, rld[t_reload].when_needed, opnum, rld[t_reload].opnum)) { if (in_p) rld[t_reload].inmode = t_mode; if (! in_p) rld[t_reload].outmode = t_mode; if (reg_class_subset_p (t_class, rld[t_reload].class)) rld[t_reload].class = t_class; rld[t_reload].opnum = MIN (rld[t_reload].opnum, opnum); rld[t_reload].optional &= optional; rld[t_reload].secondary_p = 1; if (MERGE_TO_OTHER (secondary_type, rld[t_reload].when_needed, opnum, rld[t_reload].opnum)) rld[t_reload].when_needed = RELOAD_OTHER; } if (t_reload == n_reloads) { /* We need to make a new tertiary reload for this register class. */ rld[t_reload].in = rld[t_reload].out = 0; rld[t_reload].class = t_class; rld[t_reload].inmode = in_p ? t_mode : VOIDmode; rld[t_reload].outmode = ! in_p ? t_mode : VOIDmode; rld[t_reload].reg_rtx = 0; rld[t_reload].optional = optional; rld[t_reload].inc = 0; /* Maybe we could combine these, but it seems too tricky. */ rld[t_reload].nocombine = 1; rld[t_reload].in_reg = 0; rld[t_reload].out_reg = 0; rld[t_reload].opnum = opnum; rld[t_reload].when_needed = secondary_type; rld[t_reload].secondary_in_reload = -1; rld[t_reload].secondary_out_reload = -1; rld[t_reload].secondary_in_icode = CODE_FOR_nothing; rld[t_reload].secondary_out_icode = CODE_FOR_nothing; rld[t_reload].secondary_p = 1; n_reloads++; } } /* See if we can reuse an existing secondary reload. */ for (s_reload = 0; s_reload < n_reloads; s_reload++) if (rld[s_reload].secondary_p && (reg_class_subset_p (class, rld[s_reload].class) || reg_class_subset_p (rld[s_reload].class, class)) && ((in_p && rld[s_reload].inmode == mode) || (! in_p && rld[s_reload].outmode == mode)) && ((in_p && rld[s_reload].secondary_in_reload == t_reload) || (! in_p && rld[s_reload].secondary_out_reload == t_reload)) && ((in_p && rld[s_reload].secondary_in_icode == t_icode) || (! in_p && rld[s_reload].secondary_out_icode == t_icode)) && (reg_class_size[(int) class] == 1 || SMALL_REGISTER_CLASSES) && MERGABLE_RELOADS (secondary_type, rld[s_reload].when_needed, opnum, rld[s_reload].opnum)) { if (in_p) rld[s_reload].inmode = mode; if (! in_p) rld[s_reload].outmode = mode; if (reg_class_subset_p (class, rld[s_reload].class)) rld[s_reload].class = class; rld[s_reload].opnum = MIN (rld[s_reload].opnum, opnum); rld[s_reload].optional &= optional; rld[s_reload].secondary_p = 1; if (MERGE_TO_OTHER (secondary_type, rld[s_reload].when_needed, opnum, rld[s_reload].opnum)) rld[s_reload].when_needed = RELOAD_OTHER; } if (s_reload == n_reloads) { #ifdef SECONDARY_MEMORY_NEEDED /* If we need a memory location to copy between the two reload regs, set it up now. Note that we do the input case before making the reload and the output case after. This is due to the way reloads are output. */ if (in_p && icode == CODE_FOR_nothing && SECONDARY_MEMORY_NEEDED (class, reload_class, mode)) { get_secondary_mem (x, reload_mode, opnum, type); /* We may have just added new reloads. Make sure we add the new reload at the end. */ s_reload = n_reloads; } #endif /* We need to make a new secondary reload for this register class. */ rld[s_reload].in = rld[s_reload].out = 0; rld[s_reload].class = class; rld[s_reload].inmode = in_p ? mode : VOIDmode; rld[s_reload].outmode = ! in_p ? mode : VOIDmode; rld[s_reload].reg_rtx = 0; rld[s_reload].optional = optional; rld[s_reload].inc = 0; /* Maybe we could combine these, but it seems too tricky. */ rld[s_reload].nocombine = 1; rld[s_reload].in_reg = 0; rld[s_reload].out_reg = 0; rld[s_reload].opnum = opnum; rld[s_reload].when_needed = secondary_type; rld[s_reload].secondary_in_reload = in_p ? t_reload : -1; rld[s_reload].secondary_out_reload = ! in_p ? t_reload : -1; rld[s_reload].secondary_in_icode = in_p ? t_icode : CODE_FOR_nothing; rld[s_reload].secondary_out_icode = ! in_p ? t_icode : CODE_FOR_nothing; rld[s_reload].secondary_p = 1; n_reloads++; #ifdef SECONDARY_MEMORY_NEEDED if (! in_p && icode == CODE_FOR_nothing && SECONDARY_MEMORY_NEEDED (reload_class, class, mode)) get_secondary_mem (x, mode, opnum, type); #endif } *picode = icode; return s_reload; } #endif /* HAVE_SECONDARY_RELOADS */ #ifdef SECONDARY_MEMORY_NEEDED /* Return a memory location that will be used to copy X in mode MODE. If we haven't already made a location for this mode in this insn, call find_reloads_address on the location being returned. */ rtx get_secondary_mem (rtx x ATTRIBUTE_UNUSED, enum machine_mode mode, int opnum, enum reload_type type) { rtx loc; int mem_valid; /* By default, if MODE is narrower than a word, widen it to a word. This is required because most machines that require these memory locations do not support short load and stores from all registers (e.g., FP registers). */ #ifdef SECONDARY_MEMORY_NEEDED_MODE mode = SECONDARY_MEMORY_NEEDED_MODE (mode); #else if (GET_MODE_BITSIZE (mode) < BITS_PER_WORD && INTEGRAL_MODE_P (mode)) mode = mode_for_size (BITS_PER_WORD, GET_MODE_CLASS (mode), 0); #endif /* If we already have made a MEM for this operand in MODE, return it. */ if (secondary_memlocs_elim[(int) mode][opnum] != 0) return secondary_memlocs_elim[(int) mode][opnum]; /* If this is the first time we've tried to get a MEM for this mode, allocate a new one. `something_changed' in reload will get set by noticing that the frame size has changed. */ if (secondary_memlocs[(int) mode] == 0) { #ifdef SECONDARY_MEMORY_NEEDED_RTX secondary_memlocs[(int) mode] = SECONDARY_MEMORY_NEEDED_RTX (mode); #else secondary_memlocs[(int) mode] = assign_stack_local (mode, GET_MODE_SIZE (mode), 0); #endif } /* Get a version of the address doing any eliminations needed. If that didn't give us a new MEM, make a new one if it isn't valid. */ loc = eliminate_regs (secondary_memlocs[(int) mode], VOIDmode, NULL_RTX); mem_valid = strict_memory_address_p (mode, XEXP (loc, 0)); if (! mem_valid && loc == secondary_memlocs[(int) mode]) loc = copy_rtx (loc); /* The only time the call below will do anything is if the stack offset is too large. In that case IND_LEVELS doesn't matter, so we can just pass a zero. Adjust the type to be the address of the corresponding object. If the address was valid, save the eliminated address. If it wasn't valid, we need to make a reload each time, so don't save it. */ if (! mem_valid) { type = (type == RELOAD_FOR_INPUT ? RELOAD_FOR_INPUT_ADDRESS : type == RELOAD_FOR_OUTPUT ? RELOAD_FOR_OUTPUT_ADDRESS : RELOAD_OTHER); find_reloads_address (mode, &loc, XEXP (loc, 0), &XEXP (loc, 0), opnum, type, 0, 0); } secondary_memlocs_elim[(int) mode][opnum] = loc; if (secondary_memlocs_elim_used <= (int)mode) secondary_memlocs_elim_used = (int)mode + 1; return loc; } /* Clear any secondary memory locations we've made. */ void clear_secondary_mem (void) { memset (secondary_memlocs, 0, sizeof secondary_memlocs); } #endif /* SECONDARY_MEMORY_NEEDED */ /* Find the largest class for which every register number plus N is valid in M1 (if in range) and is cheap to move into REGNO. Abort if no such class exists. */ static enum reg_class find_valid_class (enum machine_mode m1 ATTRIBUTE_UNUSED, int n, unsigned int dest_regno ATTRIBUTE_UNUSED) { int best_cost = -1; int class; int regno; enum reg_class best_class = NO_REGS; enum reg_class dest_class ATTRIBUTE_UNUSED = REGNO_REG_CLASS (dest_regno); unsigned int best_size = 0; int cost; for (class = 1; class < N_REG_CLASSES; class++) { int bad = 0; for (regno = 0; regno < FIRST_PSEUDO_REGISTER && ! bad; regno++) if (TEST_HARD_REG_BIT (reg_class_contents[class], regno) && TEST_HARD_REG_BIT (reg_class_contents[class], regno + n) && ! HARD_REGNO_MODE_OK (regno + n, m1)) bad = 1; if (bad) continue; cost = REGISTER_MOVE_COST (m1, class, dest_class); if ((reg_class_size[class] > best_size && (best_cost < 0 || best_cost >= cost)) || best_cost > cost) { best_class = class; best_size = reg_class_size[class]; best_cost = REGISTER_MOVE_COST (m1, class, dest_class); } } if (best_size == 0) abort (); return best_class; } /* Return the number of a previously made reload that can be combined with a new one, or n_reloads if none of the existing reloads can be used. OUT, CLASS, TYPE and OPNUM are the same arguments as passed to push_reload, they determine the kind of the new reload that we try to combine. P_IN points to the corresponding value of IN, which can be modified by this function. DONT_SHARE is nonzero if we can't share any input-only reload for IN. */ static int find_reusable_reload (rtx *p_in, rtx out, enum reg_class class, enum reload_type type, int opnum, int dont_share) { rtx in = *p_in; int i; /* We can't merge two reloads if the output of either one is earlyclobbered. */ if (earlyclobber_operand_p (out)) return n_reloads; /* We can use an existing reload if the class is right and at least one of IN and OUT is a match and the other is at worst neutral. (A zero compared against anything is neutral.) If SMALL_REGISTER_CLASSES, don't use existing reloads unless they are for the same thing since that can cause us to need more reload registers than we otherwise would. */ for (i = 0; i < n_reloads; i++) if ((reg_class_subset_p (class, rld[i].class) || reg_class_subset_p (rld[i].class, class)) /* If the existing reload has a register, it must fit our class. */ && (rld[i].reg_rtx == 0 || TEST_HARD_REG_BIT (reg_class_contents[(int) class], true_regnum (rld[i].reg_rtx))) && ((in != 0 && MATCHES (rld[i].in, in) && ! dont_share && (out == 0 || rld[i].out == 0 || MATCHES (rld[i].out, out))) || (out != 0 && MATCHES (rld[i].out, out) && (in == 0 || rld[i].in == 0 || MATCHES (rld[i].in, in)))) && (rld[i].out == 0 || ! earlyclobber_operand_p (rld[i].out)) && (reg_class_size[(int) class] == 1 || SMALL_REGISTER_CLASSES) && MERGABLE_RELOADS (type, rld[i].when_needed, opnum, rld[i].opnum)) return i; /* Reloading a plain reg for input can match a reload to postincrement that reg, since the postincrement's value is the right value. Likewise, it can match a preincrement reload, since we regard the preincrementation as happening before any ref in this insn to that register. */ for (i = 0; i < n_reloads; i++) if ((reg_class_subset_p (class, rld[i].class) || reg_class_subset_p (rld[i].class, class)) /* If the existing reload has a register, it must fit our class. */ && (rld[i].reg_rtx == 0 || TEST_HARD_REG_BIT (reg_class_contents[(int) class], true_regnum (rld[i].reg_rtx))) && out == 0 && rld[i].out == 0 && rld[i].in != 0 && ((REG_P (in) && GET_RTX_CLASS (GET_CODE (rld[i].in)) == RTX_AUTOINC && MATCHES (XEXP (rld[i].in, 0), in)) || (REG_P (rld[i].in) && GET_RTX_CLASS (GET_CODE (in)) == RTX_AUTOINC && MATCHES (XEXP (in, 0), rld[i].in))) && (rld[i].out == 0 || ! earlyclobber_operand_p (rld[i].out)) && (reg_class_size[(int) class] == 1 || SMALL_REGISTER_CLASSES) && MERGABLE_RELOADS (type, rld[i].when_needed, opnum, rld[i].opnum)) { /* Make sure reload_in ultimately has the increment, not the plain register. */ if (REG_P (in)) *p_in = rld[i].in; return i; } return n_reloads; } /* Return nonzero if X is a SUBREG which will require reloading of its SUBREG_REG expression. */ static int reload_inner_reg_of_subreg (rtx x, enum machine_mode mode, int output) { rtx inner; /* Only SUBREGs are problematical. */ if (GET_CODE (x) != SUBREG) return 0; inner = SUBREG_REG (x); /* If INNER is a constant or PLUS, then INNER must be reloaded. */ if (CONSTANT_P (inner) || GET_CODE (inner) == PLUS) return 1; /* If INNER is not a hard register, then INNER will not need to be reloaded. */ if (!REG_P (inner) || REGNO (inner) >= FIRST_PSEUDO_REGISTER) return 0; /* If INNER is not ok for MODE, then INNER will need reloading. */ if (! HARD_REGNO_MODE_OK (subreg_regno (x), mode)) return 1; /* If the outer part is a word or smaller, INNER larger than a word and the number of regs for INNER is not the same as the number of words in INNER, then INNER will need reloading. */ return (GET_MODE_SIZE (mode) <= UNITS_PER_WORD && output && GET_MODE_SIZE (GET_MODE (inner)) > UNITS_PER_WORD && ((GET_MODE_SIZE (GET_MODE (inner)) / UNITS_PER_WORD) != (int) hard_regno_nregs[REGNO (inner)][GET_MODE (inner)])); } /* Return nonzero if IN can be reloaded into REGNO with mode MODE without requiring an extra reload register. The caller has already found that IN contains some reference to REGNO, so check that we can produce the new value in a single step. E.g. if we have (set (reg r13) (plus (reg r13) (const int 1))), and there is an instruction that adds one to a register, this should succeed. However, if we have something like (set (reg r13) (plus (reg r13) (const int 999))), and the constant 999 needs to be loaded into a register first, we need a separate reload register. Such PLUS reloads are generated by find_reload_address_part. The out-of-range PLUS expressions are usually introduced in the instruction patterns by register elimination and substituting pseudos without a home by their function-invariant equivalences. */ static int can_reload_into (rtx in, int regno, enum machine_mode mode) { rtx dst, test_insn; int r = 0; struct recog_data save_recog_data; /* For matching constraints, we often get notional input reloads where we want to use the original register as the reload register. I.e. technically this is a non-optional input-output reload, but IN is already a valid register, and has been chosen as the reload register. Speed this up, since it trivially works. */ if (REG_P (in)) return 1; /* To test MEMs properly, we'd have to take into account all the reloads that are already scheduled, which can become quite complicated. And since we've already handled address reloads for this MEM, it should always succeed anyway. */ if (MEM_P (in)) return 1; /* If we can make a simple SET insn that does the job, everything should be fine. */ dst = gen_rtx_REG (mode, regno); test_insn = make_insn_raw (gen_rtx_SET (VOIDmode, dst, in)); save_recog_data = recog_data; if (recog_memoized (test_insn) >= 0) { extract_insn (test_insn); r = constrain_operands (1); } recog_data = save_recog_data; return r; } /* Record one reload that needs to be performed. IN is an rtx saying where the data are to be found before this instruction. OUT says where they must be stored after the instruction. (IN is zero for data not read, and OUT is zero for data not written.) INLOC and OUTLOC point to the places in the instructions where IN and OUT were found. If IN and OUT are both nonzero, it means the same register must be used to reload both IN and OUT. CLASS is a register class required for the reloaded data. INMODE is the machine mode that the instruction requires for the reg that replaces IN and OUTMODE is likewise for OUT. If IN is zero, then OUT's location and mode should be passed as INLOC and INMODE. STRICT_LOW is the 1 if there is a containing STRICT_LOW_PART rtx. OPTIONAL nonzero means this reload does not need to be performed: it can be discarded if that is more convenient. OPNUM and TYPE say what the purpose of this reload is. The return value is the reload-number for this reload. If both IN and OUT are nonzero, in some rare cases we might want to make two separate reloads. (Actually we never do this now.) Therefore, the reload-number for OUT is stored in output_reloadnum when we return; the return value applies to IN. Usually (presently always), when IN and OUT are nonzero, the two reload-numbers are equal, but the caller should be careful to distinguish them. */ int push_reload (rtx in, rtx out, rtx *inloc, rtx *outloc, enum reg_class class, enum machine_mode inmode, enum machine_mode outmode, int strict_low, int optional, int opnum, enum reload_type type) { int i; int dont_share = 0; int dont_remove_subreg = 0; rtx *in_subreg_loc = 0, *out_subreg_loc = 0; int secondary_in_reload = -1, secondary_out_reload = -1; enum insn_code secondary_in_icode = CODE_FOR_nothing; enum insn_code secondary_out_icode = CODE_FOR_nothing; /* INMODE and/or OUTMODE could be VOIDmode if no mode has been specified for the operand. In that case, use the operand's mode as the mode to reload. */ if (inmode == VOIDmode && in != 0) inmode = GET_MODE (in); if (outmode == VOIDmode && out != 0) outmode = GET_MODE (out); /* If IN is a pseudo register everywhere-equivalent to a constant, and it is not in a hard register, reload straight from the constant, since we want to get rid of such pseudo registers. Often this is done earlier, but not always in find_reloads_address. */ if (in != 0 && REG_P (in)) { int regno = REGNO (in); if (regno >= FIRST_PSEUDO_REGISTER && reg_renumber[regno] < 0 && reg_equiv_constant[regno] != 0) in = reg_equiv_constant[regno]; } /* Likewise for OUT. Of course, OUT will never be equivalent to an actual constant, but it might be equivalent to a memory location (in the case of a parameter). */ if (out != 0 && REG_P (out)) { int regno = REGNO (out); if (regno >= FIRST_PSEUDO_REGISTER && reg_renumber[regno] < 0 && reg_equiv_constant[regno] != 0) out = reg_equiv_constant[regno]; } /* If we have a read-write operand with an address side-effect, change either IN or OUT so the side-effect happens only once. */ if (in != 0 && out != 0 && MEM_P (in) && rtx_equal_p (in, out)) switch (GET_CODE (XEXP (in, 0))) { case POST_INC: case POST_DEC: case POST_MODIFY: in = replace_equiv_address_nv (in, XEXP (XEXP (in, 0), 0)); break; case PRE_INC: case PRE_DEC: case PRE_MODIFY: out = replace_equiv_address_nv (out, XEXP (XEXP (out, 0), 0)); break; default: break; } /* If we are reloading a (SUBREG constant ...), really reload just the inside expression in its own mode. Similarly for (SUBREG (PLUS ...)). If we have (SUBREG:M1 (MEM:M2 ...) ...) (or an inner REG that is still a pseudo and hence will become a MEM) with M1 wider than M2 and the register is a pseudo, also reload the inside expression. For machines that extend byte loads, do this for any SUBREG of a pseudo where both M1 and M2 are a word or smaller, M1 is wider than M2, and M2 is an integral mode that gets extended when loaded. Similar issue for (SUBREG:M1 (REG:M2 ...) ...) for a hard register R where either M1 is not valid for R or M2 is wider than a word but we only need one word to store an M2-sized quantity in R. (However, if OUT is nonzero, we need to reload the reg *and* the subreg, so do nothing here, and let following statement handle it.) Note that the case of (SUBREG (CONST_INT...)...) is handled elsewhere; we can't handle it here because CONST_INT does not indicate a mode. Similarly, we must reload the inside expression if we have a STRICT_LOW_PART (presumably, in == out in the cas). Also reload the inner expression if it does not require a secondary reload but the SUBREG does. Finally, reload the inner expression if it is a register that is in the class whose registers cannot be referenced in a different size and M1 is not the same size as M2. If subreg_lowpart_p is false, we cannot reload just the inside since we might end up with the wrong register class. But if it is inside a STRICT_LOW_PART, we have no choice, so we hope we do get the right register class there. */ if (in != 0 && GET_CODE (in) == SUBREG && (subreg_lowpart_p (in) || strict_low) #ifdef CANNOT_CHANGE_MODE_CLASS && !CANNOT_CHANGE_MODE_CLASS (GET_MODE (SUBREG_REG (in)), inmode, class) #endif && (CONSTANT_P (SUBREG_REG (in)) || GET_CODE (SUBREG_REG (in)) == PLUS || strict_low || (((REG_P (SUBREG_REG (in)) && REGNO (SUBREG_REG (in)) >= FIRST_PSEUDO_REGISTER) || MEM_P (SUBREG_REG (in))) && ((GET_MODE_SIZE (inmode) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (in)))) #ifdef LOAD_EXTEND_OP || (GET_MODE_SIZE (inmode) <= UNITS_PER_WORD && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (in))) <= UNITS_PER_WORD) && (GET_MODE_SIZE (inmode) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (in)))) && INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (in))) && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (in))) != NIL) #endif #ifdef WORD_REGISTER_OPERATIONS || ((GET_MODE_SIZE (inmode) < GET_MODE_SIZE (GET_MODE (SUBREG_REG (in)))) && ((GET_MODE_SIZE (inmode) - 1) / UNITS_PER_WORD == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (in))) - 1) / UNITS_PER_WORD))) #endif )) || (REG_P (SUBREG_REG (in)) && REGNO (SUBREG_REG (in)) < FIRST_PSEUDO_REGISTER /* The case where out is nonzero is handled differently in the following statement. */ && (out == 0 || subreg_lowpart_p (in)) && ((GET_MODE_SIZE (inmode) <= UNITS_PER_WORD && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (in))) > UNITS_PER_WORD) && ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (in))) / UNITS_PER_WORD) != (int) hard_regno_nregs[REGNO (SUBREG_REG (in))] [GET_MODE (SUBREG_REG (in))])) || ! HARD_REGNO_MODE_OK (subreg_regno (in), inmode))) #ifdef SECONDARY_INPUT_RELOAD_CLASS || (SECONDARY_INPUT_RELOAD_CLASS (class, inmode, in) != NO_REGS && (SECONDARY_INPUT_RELOAD_CLASS (class, GET_MODE (SUBREG_REG (in)), SUBREG_REG (in)) == NO_REGS)) #endif #ifdef CANNOT_CHANGE_MODE_CLASS || (REG_P (SUBREG_REG (in)) && REGNO (SUBREG_REG (in)) < FIRST_PSEUDO_REGISTER && REG_CANNOT_CHANGE_MODE_P (REGNO (SUBREG_REG (in)), GET_MODE (SUBREG_REG (in)), inmode)) #endif )) { in_subreg_loc = inloc; inloc = &SUBREG_REG (in); in = *inloc; #if ! defined (LOAD_EXTEND_OP) && ! defined (WORD_REGISTER_OPERATIONS) if (MEM_P (in)) /* This is supposed to happen only for paradoxical subregs made by combine.c. (SUBREG (MEM)) isn't supposed to occur other ways. */ if (GET_MODE_SIZE (GET_MODE (in)) > GET_MODE_SIZE (inmode)) abort (); #endif inmode = GET_MODE (in); } /* Similar issue for (SUBREG:M1 (REG:M2 ...) ...) for a hard register R where either M1 is not valid for R or M2 is wider than a word but we only need one word to store an M2-sized quantity in R. However, we must reload the inner reg *as well as* the subreg in that case. */ /* Similar issue for (SUBREG constant ...) if it was not handled by the code above. This can happen if SUBREG_BYTE != 0. */ if (in != 0 && reload_inner_reg_of_subreg (in, inmode, 0)) { enum reg_class in_class = class; if (REG_P (SUBREG_REG (in))) in_class = find_valid_class (inmode, subreg_regno_offset (REGNO (SUBREG_REG (in)), GET_MODE (SUBREG_REG (in)), SUBREG_BYTE (in), GET_MODE (in)), REGNO (SUBREG_REG (in))); /* This relies on the fact that emit_reload_insns outputs the instructions for input reloads of type RELOAD_OTHER in the same order as the reloads. Thus if the outer reload is also of type RELOAD_OTHER, we are guaranteed that this inner reload will be output before the outer reload. */ push_reload (SUBREG_REG (in), NULL_RTX, &SUBREG_REG (in), (rtx *) 0, in_class, VOIDmode, VOIDmode, 0, 0, opnum, type); dont_remove_subreg = 1; } /* Similarly for paradoxical and problematical SUBREGs on the output. Note that there is no reason we need worry about the previous value of SUBREG_REG (out); even if wider than out, storing in a subreg is entitled to clobber it all (except in the case of STRICT_LOW_PART, and in that case the constraint should label it input-output.) */ if (out != 0 && GET_CODE (out) == SUBREG && (subreg_lowpart_p (out) || strict_low) #ifdef CANNOT_CHANGE_MODE_CLASS && !CANNOT_CHANGE_MODE_CLASS (GET_MODE (SUBREG_REG (out)), outmode, class) #endif && (CONSTANT_P (SUBREG_REG (out)) || strict_low || (((REG_P (SUBREG_REG (out)) && REGNO (SUBREG_REG (out)) >= FIRST_PSEUDO_REGISTER) || MEM_P (SUBREG_REG (out))) && ((GET_MODE_SIZE (outmode) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (out)))) #ifdef WORD_REGISTER_OPERATIONS || ((GET_MODE_SIZE (outmode) < GET_MODE_SIZE (GET_MODE (SUBREG_REG (out)))) && ((GET_MODE_SIZE (outmode) - 1) / UNITS_PER_WORD == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (out))) - 1) / UNITS_PER_WORD))) #endif )) || (REG_P (SUBREG_REG (out)) && REGNO (SUBREG_REG (out)) < FIRST_PSEUDO_REGISTER && ((GET_MODE_SIZE (outmode) <= UNITS_PER_WORD && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (out))) > UNITS_PER_WORD) && ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (out))) / UNITS_PER_WORD) != (int) hard_regno_nregs[REGNO (SUBREG_REG (out))] [GET_MODE (SUBREG_REG (out))])) || ! HARD_REGNO_MODE_OK (subreg_regno (out), outmode))) #ifdef SECONDARY_OUTPUT_RELOAD_CLASS || (SECONDARY_OUTPUT_RELOAD_CLASS (class, outmode, out) != NO_REGS && (SECONDARY_OUTPUT_RELOAD_CLASS (class, GET_MODE (SUBREG_REG (out)), SUBREG_REG (out)) == NO_REGS)) #endif #ifdef CANNOT_CHANGE_MODE_CLASS || (REG_P (SUBREG_REG (out)) && REGNO (SUBREG_REG (out)) < FIRST_PSEUDO_REGISTER && REG_CANNOT_CHANGE_MODE_P (REGNO (SUBREG_REG (out)), GET_MODE (SUBREG_REG (out)), outmode)) #endif )) { out_subreg_loc = outloc; outloc = &SUBREG_REG (out); out = *outloc; #if ! defined (LOAD_EXTEND_OP) && ! defined (WORD_REGISTER_OPERATIONS) if (MEM_P (out) && GET_MODE_SIZE (GET_MODE (out)) > GET_MODE_SIZE (outmode)) abort (); #endif outmode = GET_MODE (out); } /* Similar issue for (SUBREG:M1 (REG:M2 ...) ...) for a hard register R where either M1 is not valid for R or M2 is wider than a word but we only need one word to store an M2-sized quantity in R. However, we must reload the inner reg *as well as* the subreg in that case. In this case, the inner reg is an in-out reload. */ if (out != 0 && reload_inner_reg_of_subreg (out, outmode, 1)) { /* This relies on the fact that emit_reload_insns outputs the instructions for output reloads of type RELOAD_OTHER in reverse order of the reloads. Thus if the outer reload is also of type RELOAD_OTHER, we are guaranteed that this inner reload will be output after the outer reload. */ dont_remove_subreg = 1; push_reload (SUBREG_REG (out), SUBREG_REG (out), &SUBREG_REG (out), &SUBREG_REG (out), find_valid_class (outmode, subreg_regno_offset (REGNO (SUBREG_REG (out)), GET_MODE (SUBREG_REG (out)), SUBREG_BYTE (out), GET_MODE (out)), REGNO (SUBREG_REG (out))), VOIDmode, VOIDmode, 0, 0, opnum, RELOAD_OTHER); } /* If IN appears in OUT, we can't share any input-only reload for IN. */ if (in != 0 && out != 0 && MEM_P (out) && (REG_P (in) || MEM_P (in)) && reg_overlap_mentioned_for_reload_p (in, XEXP (out, 0))) dont_share = 1; /* If IN is a SUBREG of a hard register, make a new REG. This simplifies some of the cases below. */ if (in != 0 && GET_CODE (in) == SUBREG && REG_P (SUBREG_REG (in)) && REGNO (SUBREG_REG (in)) < FIRST_PSEUDO_REGISTER && ! dont_remove_subreg) in = gen_rtx_REG (GET_MODE (in), subreg_regno (in)); /* Similarly for OUT. */ if (out != 0 && GET_CODE (out) == SUBREG && REG_P (SUBREG_REG (out)) && REGNO (SUBREG_REG (out)) < FIRST_PSEUDO_REGISTER && ! dont_remove_subreg) out = gen_rtx_REG (GET_MODE (out), subreg_regno (out)); /* Narrow down the class of register wanted if that is desirable on this machine for efficiency. */ if (in != 0) class = PREFERRED_RELOAD_CLASS (in, class); /* Output reloads may need analogous treatment, different in detail. */ #ifdef PREFERRED_OUTPUT_RELOAD_CLASS if (out != 0) class = PREFERRED_OUTPUT_RELOAD_CLASS (out, class); #endif /* Make sure we use a class that can handle the actual pseudo inside any subreg. For example, on the 386, QImode regs can appear within SImode subregs. Although GENERAL_REGS can handle SImode, QImode needs a smaller class. */ #ifdef LIMIT_RELOAD_CLASS if (in_subreg_loc) class = LIMIT_RELOAD_CLASS (inmode, class); else if (in != 0 && GET_CODE (in) == SUBREG) class = LIMIT_RELOAD_CLASS (GET_MODE (SUBREG_REG (in)), class); if (out_subreg_loc) class = LIMIT_RELOAD_CLASS (outmode, class); if (out != 0 && GET_CODE (out) == SUBREG) class = LIMIT_RELOAD_CLASS (GET_MODE (SUBREG_REG (out)), class); #endif /* Verify that this class is at least possible for the mode that is specified. */ if (this_insn_is_asm) { enum machine_mode mode; if (GET_MODE_SIZE (inmode) > GET_MODE_SIZE (outmode)) mode = inmode; else mode = outmode; if (mode == VOIDmode) { error_for_asm (this_insn, "cannot reload integer constant operand in `asm'"); mode = word_mode; if (in != 0) inmode = word_mode; if (out != 0) outmode = word_mode; } for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (HARD_REGNO_MODE_OK (i, mode) && TEST_HARD_REG_BIT (reg_class_contents[(int) class], i)) { int nregs = hard_regno_nregs[i][mode]; int j; for (j = 1; j < nregs; j++) if (! TEST_HARD_REG_BIT (reg_class_contents[(int) class], i + j)) break; if (j == nregs) break; } if (i == FIRST_PSEUDO_REGISTER) { error_for_asm (this_insn, "impossible register constraint in `asm'"); class = ALL_REGS; } } /* Optional output reloads are always OK even if we have no register class, since the function of these reloads is only to have spill_reg_store etc. set, so that the storing insn can be deleted later. */ if (class == NO_REGS && (optional == 0 || type != RELOAD_FOR_OUTPUT)) abort (); i = find_reusable_reload (&in, out, class, type, opnum, dont_share); if (i == n_reloads) { /* See if we need a secondary reload register to move between CLASS and IN or CLASS and OUT. Get the icode and push any required reloads needed for each of them if so. */ #ifdef SECONDARY_INPUT_RELOAD_CLASS if (in != 0) secondary_in_reload = push_secondary_reload (1, in, opnum, optional, class, inmode, type, &secondary_in_icode); #endif #ifdef SECONDARY_OUTPUT_RELOAD_CLASS if (out != 0 && GET_CODE (out) != SCRATCH) secondary_out_reload = push_secondary_reload (0, out, opnum, optional, class, outmode, type, &secondary_out_icode); #endif /* We found no existing reload suitable for re-use. So add an additional reload. */ #ifdef SECONDARY_MEMORY_NEEDED /* If a memory location is needed for the copy, make one. */ if (in != 0 && (REG_P (in) || GET_CODE (in) == SUBREG) && reg_or_subregno (in) < FIRST_PSEUDO_REGISTER && SECONDARY_MEMORY_NEEDED (REGNO_REG_CLASS (reg_or_subregno (in)), class, inmode)) get_secondary_mem (in, inmode, opnum, type); #endif i = n_reloads; rld[i].in = in; rld[i].out = out; rld[i].class = class; rld[i].inmode = inmode; rld[i].outmode = outmode; rld[i].reg_rtx = 0; rld[i].optional = optional; rld[i].inc = 0; rld[i].nocombine = 0; rld[i].in_reg = inloc ? *inloc : 0; rld[i].out_reg = outloc ? *outloc : 0; rld[i].opnum = opnum; rld[i].when_needed = type; rld[i].secondary_in_reload = secondary_in_reload; rld[i].secondary_out_reload = secondary_out_reload; rld[i].secondary_in_icode = secondary_in_icode; rld[i].secondary_out_icode = secondary_out_icode; rld[i].secondary_p = 0; n_reloads++; #ifdef SECONDARY_MEMORY_NEEDED if (out != 0 && (REG_P (out) || GET_CODE (out) == SUBREG) && reg_or_subregno (out) < FIRST_PSEUDO_REGISTER && SECONDARY_MEMORY_NEEDED (class, REGNO_REG_CLASS (reg_or_subregno (out)), outmode)) get_secondary_mem (out, outmode, opnum, type); #endif } else { /* We are reusing an existing reload, but we may have additional information for it. For example, we may now have both IN and OUT while the old one may have just one of them. */ /* The modes can be different. If they are, we want to reload in the larger mode, so that the value is valid for both modes. */ if (inmode != VOIDmode && GET_MODE_SIZE (inmode) > GET_MODE_SIZE (rld[i].inmode)) rld[i].inmode = inmode; if (outmode != VOIDmode && GET_MODE_SIZE (outmode) > GET_MODE_SIZE (rld[i].outmode)) rld[i].outmode = outmode; if (in != 0) { rtx in_reg = inloc ? *inloc : 0; /* If we merge reloads for two distinct rtl expressions that are identical in content, there might be duplicate address reloads. Remove the extra set now, so that if we later find that we can inherit this reload, we can get rid of the address reloads altogether. Do not do this if both reloads are optional since the result would be an optional reload which could potentially leave unresolved address replacements. It is not sufficient to call transfer_replacements since choose_reload_regs will remove the replacements for address reloads of inherited reloads which results in the same problem. */ if (rld[i].in != in && rtx_equal_p (in, rld[i].in) && ! (rld[i].optional && optional)) { /* We must keep the address reload with the lower operand number alive. */ if (opnum > rld[i].opnum) { remove_address_replacements (in); in = rld[i].in; in_reg = rld[i].in_reg; } else remove_address_replacements (rld[i].in); } rld[i].in = in; rld[i].in_reg = in_reg; } if (out != 0) { rld[i].out = out; rld[i].out_reg = outloc ? *outloc : 0; } if (reg_class_subset_p (class, rld[i].class)) rld[i].class = class; rld[i].optional &= optional; if (MERGE_TO_OTHER (type, rld[i].when_needed, opnum, rld[i].opnum)) rld[i].when_needed = RELOAD_OTHER; rld[i].opnum = MIN (rld[i].opnum, opnum); } /* If the ostensible rtx being reloaded differs from the rtx found in the location to substitute, this reload is not safe to combine because we cannot reliably tell whether it appears in the insn. */ if (in != 0 && in != *inloc) rld[i].nocombine = 1; #if 0 /* This was replaced by changes in find_reloads_address_1 and the new function inc_for_reload, which go with a new meaning of reload_inc. */ /* If this is an IN/OUT reload in an insn that sets the CC, it must be for an autoincrement. It doesn't work to store the incremented value after the insn because that would clobber the CC. So we must do the increment of the value reloaded from, increment it, store it back, then decrement again. */ if (out != 0 && sets_cc0_p (PATTERN (this_insn))) { out = 0; rld[i].out = 0; rld[i].inc = find_inc_amount (PATTERN (this_insn), in); /* If we did not find a nonzero amount-to-increment-by, that contradicts the belief that IN is being incremented in an address in this insn. */ if (rld[i].inc == 0) abort (); } #endif /* If we will replace IN and OUT with the reload-reg, record where they are located so that substitution need not do a tree walk. */ if (replace_reloads) { if (inloc != 0) { struct replacement *r = &replacements[n_replacements++]; r->what = i; r->subreg_loc = in_subreg_loc; r->where = inloc; r->mode = inmode; } if (outloc != 0 && outloc != inloc) { struct replacement *r = &replacements[n_replacements++]; r->what = i; r->where = outloc; r->subreg_loc = out_subreg_loc; r->mode = outmode; } } /* If this reload is just being introduced and it has both an incoming quantity and an outgoing quantity that are supposed to be made to match, see if either one of the two can serve as the place to reload into. If one of them is acceptable, set rld[i].reg_rtx to that one. */ if (in != 0 && out != 0 && in != out && rld[i].reg_rtx == 0) { rld[i].reg_rtx = find_dummy_reload (in, out, inloc, outloc, inmode, outmode, rld[i].class, i, earlyclobber_operand_p (out)); /* If the outgoing register already contains the same value as the incoming one, we can dispense with loading it. The easiest way to tell the caller that is to give a phony value for the incoming operand (same as outgoing one). */ if (rld[i].reg_rtx == out && (REG_P (in) || CONSTANT_P (in)) && 0 != find_equiv_reg (in, this_insn, 0, REGNO (out), static_reload_reg_p, i, inmode)) rld[i].in = out; } /* If this is an input reload and the operand contains a register that dies in this insn and is used nowhere else, see if it is the right class to be used for this reload. Use it if so. (This occurs most commonly in the case of paradoxical SUBREGs and in-out reloads). We cannot do this if it is also an output reload that mentions the register unless the output is a SUBREG that clobbers an entire register. Note that the operand might be one of the spill regs, if it is a pseudo reg and we are in a block where spilling has not taken place. But if there is no spilling in this block, that is OK. An explicitly used hard reg cannot be a spill reg. */ if (rld[i].reg_rtx == 0 && in != 0) { rtx note; int regno; enum machine_mode rel_mode = inmode; if (out && GET_MODE_SIZE (outmode) > GET_MODE_SIZE (inmode)) rel_mode = outmode; for (note = REG_NOTES (this_insn); note; note = XEXP (note, 1)) if (REG_NOTE_KIND (note) == REG_DEAD && REG_P (XEXP (note, 0)) && (regno = REGNO (XEXP (note, 0))) < FIRST_PSEUDO_REGISTER && reg_mentioned_p (XEXP (note, 0), in) && ! refers_to_regno_for_reload_p (regno, (regno + hard_regno_nregs[regno] [rel_mode]), PATTERN (this_insn), inloc) /* If this is also an output reload, IN cannot be used as the reload register if it is set in this insn unless IN is also OUT. */ && (out == 0 || in == out || ! hard_reg_set_here_p (regno, (regno + hard_regno_nregs[regno] [rel_mode]), PATTERN (this_insn))) /* ??? Why is this code so different from the previous? Is there any simple coherent way to describe the two together? What's going on here. */ && (in != out || (GET_CODE (in) == SUBREG && (((GET_MODE_SIZE (GET_MODE (in)) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD) == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (in))) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))) /* Make sure the operand fits in the reg that dies. */ && (GET_MODE_SIZE (rel_mode) <= GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))) && HARD_REGNO_MODE_OK (regno, inmode) && HARD_REGNO_MODE_OK (regno, outmode)) { unsigned int offs; unsigned int nregs = MAX (hard_regno_nregs[regno][inmode], hard_regno_nregs[regno][outmode]); for (offs = 0; offs < nregs; offs++) if (fixed_regs[regno + offs] || ! TEST_HARD_REG_BIT (reg_class_contents[(int) class], regno + offs)) break; if (offs == nregs && (! (refers_to_regno_for_reload_p (regno, (regno + hard_regno_nregs[regno][inmode]), in, (rtx *)0)) || can_reload_into (in, regno, inmode))) { rld[i].reg_rtx = gen_rtx_REG (rel_mode, regno); break; } } } if (out) output_reloadnum = i; return i; } /* Record an additional place we must replace a value for which we have already recorded a reload. RELOADNUM is the value returned by push_reload when the reload was recorded. This is used in insn patterns that use match_dup. */ static void push_replacement (rtx *loc, int reloadnum, enum machine_mode mode) { if (replace_reloads) { struct replacement *r = &replacements[n_replacements++]; r->what = reloadnum; r->where = loc; r->subreg_loc = 0; r->mode = mode; } } /* Duplicate any replacement we have recorded to apply at location ORIG_LOC to also be performed at DUP_LOC. This is used in insn patterns that use match_dup. */ static void dup_replacements (rtx *dup_loc, rtx *orig_loc) { int i, n = n_replacements; for (i = 0; i < n; i++) { struct replacement *r = &replacements[i]; if (r->where == orig_loc) push_replacement (dup_loc, r->what, r->mode); } } /* Transfer all replacements that used to be in reload FROM to be in reload TO. */ void transfer_replacements (int to, int from) { int i; for (i = 0; i < n_replacements; i++) if (replacements[i].what == from) replacements[i].what = to; } /* IN_RTX is the value loaded by a reload that we now decided to inherit, or a subpart of it. If we have any replacements registered for IN_RTX, cancel the reloads that were supposed to load them. Return nonzero if we canceled any reloads. */ int remove_address_replacements (rtx in_rtx) { int i, j; char reload_flags[MAX_RELOADS]; int something_changed = 0; memset (reload_flags, 0, sizeof reload_flags); for (i = 0, j = 0; i < n_replacements; i++) { if (loc_mentioned_in_p (replacements[i].where, in_rtx)) reload_flags[replacements[i].what] |= 1; else { replacements[j++] = replacements[i]; reload_flags[replacements[i].what] |= 2; } } /* Note that the following store must be done before the recursive calls. */ n_replacements = j; for (i = n_reloads - 1; i >= 0; i--) { if (reload_flags[i] == 1) { deallocate_reload_reg (i); remove_address_replacements (rld[i].in); rld[i].in = 0; something_changed = 1; } } return something_changed; } /* If there is only one output reload, and it is not for an earlyclobber operand, try to combine it with a (logically unrelated) input reload to reduce the number of reload registers needed. This is safe if the input reload does not appear in the value being output-reloaded, because this implies it is not needed any more once the original insn completes. If that doesn't work, see we can use any of the registers that die in this insn as a reload register. We can if it is of the right class and does not appear in the value being output-reloaded. */ static void combine_reloads (void) { int i; int output_reload = -1; int secondary_out = -1; rtx note; /* Find the output reload; return unless there is exactly one and that one is mandatory. */ for (i = 0; i < n_reloads; i++) if (rld[i].out != 0) { if (output_reload >= 0) return; output_reload = i; } if (output_reload < 0 || rld[output_reload].optional) return; /* An input-output reload isn't combinable. */ if (rld[output_reload].in != 0) return; /* If this reload is for an earlyclobber operand, we can't do anything. */ if (earlyclobber_operand_p (rld[output_reload].out)) return; /* If there is a reload for part of the address of this operand, we would need to chnage it to RELOAD_FOR_OTHER_ADDRESS. But that would extend its life to the point where doing this combine would not lower the number of spill registers needed. */ for (i = 0; i < n_reloads; i++) if ((rld[i].when_needed == RELOAD_FOR_OUTPUT_ADDRESS || rld[i].when_needed == RELOAD_FOR_OUTADDR_ADDRESS) && rld[i].opnum == rld[output_reload].opnum) return; /* Check each input reload; can we combine it? */ for (i = 0; i < n_reloads; i++) if (rld[i].in && ! rld[i].optional && ! rld[i].nocombine /* Life span of this reload must not extend past main insn. */ && rld[i].when_needed != RELOAD_FOR_OUTPUT_ADDRESS && rld[i].when_needed != RELOAD_FOR_OUTADDR_ADDRESS && rld[i].when_needed != RELOAD_OTHER && (CLASS_MAX_NREGS (rld[i].class, rld[i].inmode) == CLASS_MAX_NREGS (rld[output_reload].class, rld[output_reload].outmode)) && rld[i].inc == 0 && rld[i].reg_rtx == 0 #ifdef SECONDARY_MEMORY_NEEDED /* Don't combine two reloads with different secondary memory locations. */ && (secondary_memlocs_elim[(int) rld[output_reload].outmode][rld[i].opnum] == 0 || secondary_memlocs_elim[(int) rld[output_reload].outmode][rld[output_reload].opnum] == 0 || rtx_equal_p (secondary_memlocs_elim[(int) rld[output_reload].outmode][rld[i].opnum], secondary_memlocs_elim[(int) rld[output_reload].outmode][rld[output_reload].opnum])) #endif && (SMALL_REGISTER_CLASSES ? (rld[i].class == rld[output_reload].class) : (reg_class_subset_p (rld[i].class, rld[output_reload].class) || reg_class_subset_p (rld[output_reload].class, rld[i].class))) && (MATCHES (rld[i].in, rld[output_reload].out) /* Args reversed because the first arg seems to be the one that we imagine being modified while the second is the one that might be affected. */ || (! reg_overlap_mentioned_for_reload_p (rld[output_reload].out, rld[i].in) /* However, if the input is a register that appears inside the output, then we also can't share. Imagine (set (mem (reg 69)) (plus (reg 69) ...)). If the same reload reg is used for both reg 69 and the result to be stored in memory, then that result will clobber the address of the memory ref. */ && ! (REG_P (rld[i].in) && reg_overlap_mentioned_for_reload_p (rld[i].in, rld[output_reload].out)))) && ! reload_inner_reg_of_subreg (rld[i].in, rld[i].inmode, rld[i].when_needed != RELOAD_FOR_INPUT) && (reg_class_size[(int) rld[i].class] || SMALL_REGISTER_CLASSES) /* We will allow making things slightly worse by combining an input and an output, but no worse than that. */ && (rld[i].when_needed == RELOAD_FOR_INPUT || rld[i].when_needed == RELOAD_FOR_OUTPUT)) { int j; /* We have found a reload to combine with! */ rld[i].out = rld[output_reload].out; rld[i].out_reg = rld[output_reload].out_reg; rld[i].outmode = rld[output_reload].outmode; /* Mark the old output reload as inoperative. */ rld[output_reload].out = 0; /* The combined reload is needed for the entire insn. */ rld[i].when_needed = RELOAD_OTHER; /* If the output reload had a secondary reload, copy it. */ if (rld[output_reload].secondary_out_reload != -1) { rld[i].secondary_out_reload = rld[output_reload].secondary_out_reload; rld[i].secondary_out_icode = rld[output_reload].secondary_out_icode; } #ifdef SECONDARY_MEMORY_NEEDED /* Copy any secondary MEM. */ if (secondary_memlocs_elim[(int) rld[output_reload].outmode][rld[output_reload].opnum] != 0) secondary_memlocs_elim[(int) rld[output_reload].outmode][rld[i].opnum] = secondary_memlocs_elim[(int) rld[output_reload].outmode][rld[output_reload].opnum]; #endif /* If required, minimize the register class. */ if (reg_class_subset_p (rld[output_reload].class, rld[i].class)) rld[i].class = rld[output_reload].class; /* Transfer all replacements from the old reload to the combined. */ for (j = 0; j < n_replacements; j++) if (replacements[j].what == output_reload) replacements[j].what = i; return; } /* If this insn has only one operand that is modified or written (assumed to be the first), it must be the one corresponding to this reload. It is safe to use anything that dies in this insn for that output provided that it does not occur in the output (we already know it isn't an earlyclobber. If this is an asm insn, give up. */ if (INSN_CODE (this_insn) == -1) return; for (i = 1; i < insn_data[INSN_CODE (this_insn)].n_operands; i++) if (insn_data[INSN_CODE (this_insn)].operand[i].constraint[0] == '=' || insn_data[INSN_CODE (this_insn)].operand[i].constraint[0] == '+') return; /* See if some hard register that dies in this insn and is not used in the output is the right class. Only works if the register we pick up can fully hold our output reload. */ for (note = REG_NOTES (this_insn); note; note = XEXP (note, 1)) if (REG_NOTE_KIND (note) == REG_DEAD && REG_P (XEXP (note, 0)) && ! reg_overlap_mentioned_for_reload_p (XEXP (note, 0), rld[output_reload].out) && REGNO (XEXP (note, 0)) < FIRST_PSEUDO_REGISTER && HARD_REGNO_MODE_OK (REGNO (XEXP (note, 0)), rld[output_reload].outmode) && TEST_HARD_REG_BIT (reg_class_contents[(int) rld[output_reload].class], REGNO (XEXP (note, 0))) && (hard_regno_nregs[REGNO (XEXP (note, 0))][rld[output_reload].outmode] <= hard_regno_nregs[REGNO (XEXP (note, 0))][GET_MODE (XEXP (note, 0))]) /* Ensure that a secondary or tertiary reload for this output won't want this register. */ && ((secondary_out = rld[output_reload].secondary_out_reload) == -1 || (! (TEST_HARD_REG_BIT (reg_class_contents[(int) rld[secondary_out].class], REGNO (XEXP (note, 0)))) && ((secondary_out = rld[secondary_out].secondary_out_reload) == -1 || ! (TEST_HARD_REG_BIT (reg_class_contents[(int) rld[secondary_out].class], REGNO (XEXP (note, 0))))))) && ! fixed_regs[REGNO (XEXP (note, 0))]) { rld[output_reload].reg_rtx = gen_rtx_REG (rld[output_reload].outmode, REGNO (XEXP (note, 0))); return; } } /* Try to find a reload register for an in-out reload (expressions IN and OUT). See if one of IN and OUT is a register that may be used; this is desirable since a spill-register won't be needed. If so, return the register rtx that proves acceptable. INLOC and OUTLOC are locations where IN and OUT appear in the insn. CLASS is the register class required for the reload. If FOR_REAL is >= 0, it is the number of the reload, and in some cases when it can be discovered that OUT doesn't need to be computed, clear out rld[FOR_REAL].out. If FOR_REAL is -1, this should not be done, because this call is just to see if a register can be found, not to find and install it. EARLYCLOBBER is nonzero if OUT is an earlyclobber operand. This puts an additional constraint on being able to use IN for OUT since IN must not appear elsewhere in the insn (it is assumed that IN itself is safe from the earlyclobber). */ static rtx find_dummy_reload (rtx real_in, rtx real_out, rtx *inloc, rtx *outloc, enum machine_mode inmode, enum machine_mode outmode, enum reg_class class, int for_real, int earlyclobber) { rtx in = real_in; rtx out = real_out; int in_offset = 0; int out_offset = 0; rtx value = 0; /* If operands exceed a word, we can't use either of them unless they have the same size. */ if (GET_MODE_SIZE (outmode) != GET_MODE_SIZE (inmode) && (GET_MODE_SIZE (outmode) > UNITS_PER_WORD || GET_MODE_SIZE (inmode) > UNITS_PER_WORD)) return 0; /* Note that {in,out}_offset are needed only when 'in' or 'out' respectively refers to a hard register. */ /* Find the inside of any subregs. */ while (GET_CODE (out) == SUBREG) { if (REG_P (SUBREG_REG (out)) && REGNO (SUBREG_REG (out)) < FIRST_PSEUDO_REGISTER) out_offset += subreg_regno_offset (REGNO (SUBREG_REG (out)), GET_MODE (SUBREG_REG (out)), SUBREG_BYTE (out), GET_MODE (out)); out = SUBREG_REG (out); } while (GET_CODE (in) == SUBREG) { if (REG_P (SUBREG_REG (in)) && REGNO (SUBREG_REG (in)) < FIRST_PSEUDO_REGISTER) in_offset += subreg_regno_offset (REGNO (SUBREG_REG (in)), GET_MODE (SUBREG_REG (in)), SUBREG_BYTE (in), GET_MODE (in)); in = SUBREG_REG (in); } /* Narrow down the reg class, the same way push_reload will; otherwise we might find a dummy now, but push_reload won't. */ class = PREFERRED_RELOAD_CLASS (in, class); /* See if OUT will do. */ if (REG_P (out) && REGNO (out) < FIRST_PSEUDO_REGISTER) { unsigned int regno = REGNO (out) + out_offset; unsigned int nwords = hard_regno_nregs[regno][outmode]; rtx saved_rtx; /* When we consider whether the insn uses OUT, ignore references within IN. They don't prevent us from copying IN into OUT, because those refs would move into the insn that reloads IN. However, we only ignore IN in its role as this reload. If the insn uses IN elsewhere and it contains OUT, that counts. We can't be sure it's the "same" operand so it might not go through this reload. */ saved_rtx = *inloc; *inloc = const0_rtx; if (regno < FIRST_PSEUDO_REGISTER && HARD_REGNO_MODE_OK (regno, outmode) && ! refers_to_regno_for_reload_p (regno, regno + nwords, PATTERN (this_insn), outloc)) { unsigned int i; for (i = 0; i < nwords; i++) if (! TEST_HARD_REG_BIT (reg_class_contents[(int) class], regno + i)) break; if (i == nwords) { if (REG_P (real_out)) value = real_out; else value = gen_rtx_REG (outmode, regno); } } *inloc = saved_rtx; } /* Consider using IN if OUT was not acceptable or if OUT dies in this insn (like the quotient in a divmod insn). We can't use IN unless it is dies in this insn, which means we must know accurately which hard regs are live. Also, the result can't go in IN if IN is used within OUT, or if OUT is an earlyclobber and IN appears elsewhere in the insn. */ if (hard_regs_live_known && REG_P (in) && REGNO (in) < FIRST_PSEUDO_REGISTER && (value == 0 || find_reg_note (this_insn, REG_UNUSED, real_out)) && find_reg_note (this_insn, REG_DEAD, real_in) && !fixed_regs[REGNO (in)] && HARD_REGNO_MODE_OK (REGNO (in), /* The only case where out and real_out might have different modes is where real_out is a subreg, and in that case, out has a real mode. */ (GET_MODE (out) != VOIDmode ? GET_MODE (out) : outmode))) { unsigned int regno = REGNO (in) + in_offset; unsigned int nwords = hard_regno_nregs[regno][inmode]; if (! refers_to_regno_for_reload_p (regno, regno + nwords, out, (rtx*) 0) && ! hard_reg_set_here_p (regno, regno + nwords, PATTERN (this_insn)) && (! earlyclobber || ! refers_to_regno_for_reload_p (regno, regno + nwords, PATTERN (this_insn), inloc))) { unsigned int i; for (i = 0; i < nwords; i++) if (! TEST_HARD_REG_BIT (reg_class_contents[(int) class], regno + i)) break; if (i == nwords) { /* If we were going to use OUT as the reload reg and changed our mind, it means OUT is a dummy that dies here. So don't bother copying value to it. */ if (for_real >= 0 && value == real_out) rld[for_real].out = 0; if (REG_P (real_in)) value = real_in; else value = gen_rtx_REG (inmode, regno); } } } return value; } /* This page contains subroutines used mainly for determining whether the IN or an OUT of a reload can serve as the reload register. */ /* Return 1 if X is an operand of an insn that is being earlyclobbered. */ int earlyclobber_operand_p (rtx x) { int i; for (i = 0; i < n_earlyclobbers; i++) if (reload_earlyclobbers[i] == x) return 1; return 0; } /* Return 1 if expression X alters a hard reg in the range from BEG_REGNO (inclusive) to END_REGNO (exclusive), either explicitly or in the guise of a pseudo-reg allocated to REGNO. X should be the body of an instruction. */ static int hard_reg_set_here_p (unsigned int beg_regno, unsigned int end_regno, rtx x) { if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER) { rtx op0 = SET_DEST (x); while (GET_CODE (op0) == SUBREG) op0 = SUBREG_REG (op0); if (REG_P (op0)) { unsigned int r = REGNO (op0); /* See if this reg overlaps range under consideration. */ if (r < end_regno && r + hard_regno_nregs[r][GET_MODE (op0)] > beg_regno) return 1; } } else if (GET_CODE (x) == PARALLEL) { int i = XVECLEN (x, 0) - 1; for (; i >= 0; i--) if (hard_reg_set_here_p (beg_regno, end_regno, XVECEXP (x, 0, i))) return 1; } return 0; } /* Return 1 if ADDR is a valid memory address for mode MODE, and check that each pseudo reg has the proper kind of hard reg. */ int strict_memory_address_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx addr) { GO_IF_LEGITIMATE_ADDRESS (mode, addr, win); return 0; win: return 1; } /* Like rtx_equal_p except that it allows a REG and a SUBREG to match if they are the same hard reg, and has special hacks for autoincrement and autodecrement. This is specifically intended for find_reloads to use in determining whether two operands match. X is the operand whose number is the lower of the two. The value is 2 if Y contains a pre-increment that matches a non-incrementing address in X. */ /* ??? To be completely correct, we should arrange to pass for X the output operand and for Y the input operand. For now, we assume that the output operand has the lower number because that is natural in (SET output (... input ...)). */ int operands_match_p (rtx x, rtx y) { int i; RTX_CODE code = GET_CODE (x); const char *fmt; int success_2; if (x == y) return 1; if ((code == REG || (code == SUBREG && REG_P (SUBREG_REG (x)))) && (REG_P (y) || (GET_CODE (y) == SUBREG && REG_P (SUBREG_REG (y))))) { int j; if (code == SUBREG) { i = REGNO (SUBREG_REG (x)); if (i >= FIRST_PSEUDO_REGISTER) goto slow; i += subreg_regno_offset (REGNO (SUBREG_REG (x)), GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x), GET_MODE (x)); } else i = REGNO (x); if (GET_CODE (y) == SUBREG) { j = REGNO (SUBREG_REG (y)); if (j >= FIRST_PSEUDO_REGISTER) goto slow; j += subreg_regno_offset (REGNO (SUBREG_REG (y)), GET_MODE (SUBREG_REG (y)), SUBREG_BYTE (y), GET_MODE (y)); } else j = REGNO (y); /* On a WORDS_BIG_ENDIAN machine, point to the last register of a multiple hard register group, so that for example (reg:DI 0) and (reg:SI 1) will be considered the same register. */ if (WORDS_BIG_ENDIAN && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD && i < FIRST_PSEUDO_REGISTER) i += hard_regno_nregs[i][GET_MODE (x)] - 1; if (WORDS_BIG_ENDIAN && GET_MODE_SIZE (GET_MODE (y)) > UNITS_PER_WORD && j < FIRST_PSEUDO_REGISTER) j += hard_regno_nregs[j][GET_MODE (y)] - 1; return i == j; } /* If two operands must match, because they are really a single operand of an assembler insn, then two postincrements are invalid because the assembler insn would increment only once. On the other hand, a postincrement matches ordinary indexing if the postincrement is the output operand. */ if (code == POST_DEC || code == POST_INC || code == POST_MODIFY) return operands_match_p (XEXP (x, 0), y); /* Two preincrements are invalid because the assembler insn would increment only once. On the other hand, a preincrement matches ordinary indexing if the preincrement is the input operand. In this case, return 2, since some callers need to do special things when this happens. */ if (GET_CODE (y) == PRE_DEC || GET_CODE (y) == PRE_INC || GET_CODE (y) == PRE_MODIFY) return operands_match_p (x, XEXP (y, 0)) ? 2 : 0; slow: /* Now we have disposed of all the cases in which different rtx codes can match. */ if (code != GET_CODE (y)) return 0; if (code == LABEL_REF) return XEXP (x, 0) == XEXP (y, 0); if (code == SYMBOL_REF) return XSTR (x, 0) == XSTR (y, 0); /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */ if (GET_MODE (x) != GET_MODE (y)) return 0; /* Compare the elements. If any pair of corresponding elements fail to match, return 0 for the whole things. */ success_2 = 0; fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { int val, j; switch (fmt[i]) { case 'w': if (XWINT (x, i) != XWINT (y, i)) return 0; break; case 'i': if (XINT (x, i) != XINT (y, i)) return 0; break; case 'e': val = operands_match_p (XEXP (x, i), XEXP (y, i)); if (val == 0) return 0; /* If any subexpression returns 2, we should return 2 if we are successful. */ if (val == 2) success_2 = 1; break; case '0': break; case 'E': if (XVECLEN (x, i) != XVECLEN (y, i)) return 0; for (j = XVECLEN (x, i) - 1; j >= 0; --j) { val = operands_match_p (XVECEXP (x, i, j), XVECEXP (y, i, j)); if (val == 0) return 0; if (val == 2) success_2 = 1; } break; /* It is believed that rtx's at this level will never contain anything but integers and other rtx's, except for within LABEL_REFs and SYMBOL_REFs. */ default: abort (); } } return 1 + success_2; } /* Describe the range of registers or memory referenced by X. If X is a register, set REG_FLAG and put the first register number into START and the last plus one into END. If X is a memory reference, put a base address into BASE and a range of integer offsets into START and END. If X is pushing on the stack, we can assume it causes no trouble, so we set the SAFE field. */ static struct decomposition decompose (rtx x) { struct decomposition val; int all_const = 0; memset (&val, 0, sizeof (val)); if (MEM_P (x)) { rtx base = NULL_RTX, offset = 0; rtx addr = XEXP (x, 0); if (GET_CODE (addr) == PRE_DEC || GET_CODE (addr) == PRE_INC || GET_CODE (addr) == POST_DEC || GET_CODE (addr) == POST_INC) { val.base = XEXP (addr, 0); val.start = -GET_MODE_SIZE (GET_MODE (x)); val.end = GET_MODE_SIZE (GET_MODE (x)); val.safe = REGNO (val.base) == STACK_POINTER_REGNUM; return val; } if (GET_CODE (addr) == PRE_MODIFY || GET_CODE (addr) == POST_MODIFY) { if (GET_CODE (XEXP (addr, 1)) == PLUS && XEXP (addr, 0) == XEXP (XEXP (addr, 1), 0) && CONSTANT_P (XEXP (XEXP (addr, 1), 1))) { val.base = XEXP (addr, 0); val.start = -INTVAL (XEXP (XEXP (addr, 1), 1)); val.end = INTVAL (XEXP (XEXP (addr, 1), 1)); val.safe = REGNO (val.base) == STACK_POINTER_REGNUM; return val; } } if (GET_CODE (addr) == CONST) { addr = XEXP (addr, 0); all_const = 1; } if (GET_CODE (addr) == PLUS) { if (CONSTANT_P (XEXP (addr, 0))) { base = XEXP (addr, 1); offset = XEXP (addr, 0); } else if (CONSTANT_P (XEXP (addr, 1))) { base = XEXP (addr, 0); offset = XEXP (addr, 1); } } if (offset == 0) { base = addr; offset = const0_rtx; } if (GET_CODE (offset) == CONST) offset = XEXP (offset, 0); if (GET_CODE (offset) == PLUS) { if (GET_CODE (XEXP (offset, 0)) == CONST_INT) { base = gen_rtx_PLUS (GET_MODE (base), base, XEXP (offset, 1)); offset = XEXP (offset, 0); } else if (GET_CODE (XEXP (offset, 1)) == CONST_INT) { base = gen_rtx_PLUS (GET_MODE (base), base, XEXP (offset, 0)); offset = XEXP (offset, 1); } else { base = gen_rtx_PLUS (GET_MODE (base), base, offset); offset = const0_rtx; } } else if (GET_CODE (offset) != CONST_INT) { base = gen_rtx_PLUS (GET_MODE (base), base, offset); offset = const0_rtx; } if (all_const && GET_CODE (base) == PLUS) base = gen_rtx_CONST (GET_MODE (base), base); if (GET_CODE (offset) != CONST_INT) abort (); val.start = INTVAL (offset); val.end = val.start + GET_MODE_SIZE (GET_MODE (x)); val.base = base; return val; } else if (REG_P (x)) { val.reg_flag = 1; val.start = true_regnum (x); if (val.start < 0) { /* A pseudo with no hard reg. */ val.start = REGNO (x); val.end = val.start + 1; } else /* A hard reg. */ val.end = val.start + hard_regno_nregs[val.start][GET_MODE (x)]; } else if (GET_CODE (x) == SUBREG) { if (!REG_P (SUBREG_REG (x))) /* This could be more precise, but it's good enough. */ return decompose (SUBREG_REG (x)); val.reg_flag = 1; val.start = true_regnum (x); if (val.start < 0) return decompose (SUBREG_REG (x)); else /* A hard reg. */ val.end = val.start + hard_regno_nregs[val.start][GET_MODE (x)]; } else if (CONSTANT_P (x) /* This hasn't been assigned yet, so it can't conflict yet. */ || GET_CODE (x) == SCRATCH) val.safe = 1; else abort (); return val; } /* Return 1 if altering Y will not modify the value of X. Y is also described by YDATA, which should be decompose (Y). */ static int immune_p (rtx x, rtx y, struct decomposition ydata) { struct decomposition xdata; if (ydata.reg_flag) return !refers_to_regno_for_reload_p (ydata.start, ydata.end, x, (rtx*) 0); if (ydata.safe) return 1; if (!MEM_P (y)) abort (); /* If Y is memory and X is not, Y can't affect X. */ if (!MEM_P (x)) return 1; xdata = decompose (x); if (! rtx_equal_p (xdata.base, ydata.base)) { /* If bases are distinct symbolic constants, there is no overlap. */ if (CONSTANT_P (xdata.base) && CONSTANT_P (ydata.base)) return 1; /* Constants and stack slots never overlap. */ if (CONSTANT_P (xdata.base) && (ydata.base == frame_pointer_rtx || ydata.base == hard_frame_pointer_rtx || ydata.base == stack_pointer_rtx)) return 1; if (CONSTANT_P (ydata.base) && (xdata.base == frame_pointer_rtx || xdata.base == hard_frame_pointer_rtx || xdata.base == stack_pointer_rtx)) return 1; /* If either base is variable, we don't know anything. */ return 0; } return (xdata.start >= ydata.end || ydata.start >= xdata.end); } /* Similar, but calls decompose. */ int safe_from_earlyclobber (rtx op, rtx clobber) { struct decomposition early_data; early_data = decompose (clobber); return immune_p (op, clobber, early_data); } /* Main entry point of this file: search the body of INSN for values that need reloading and record them with push_reload. REPLACE nonzero means record also where the values occur so that subst_reloads can be used. IND_LEVELS says how many levels of indirection are supported by this machine; a value of zero means that a memory reference is not a valid memory address. LIVE_KNOWN says we have valid information about which hard regs are live at each point in the program; this is true when we are called from global_alloc but false when stupid register allocation has been done. RELOAD_REG_P if nonzero is a vector indexed by hard reg number which is nonnegative if the reg has been commandeered for reloading into. It is copied into STATIC_RELOAD_REG_P and referenced from there by various subroutines. Return TRUE if some operands need to be changed, because of swapping commutative operands, reg_equiv_address substitution, or whatever. */ int find_reloads (rtx insn, int replace, int ind_levels, int live_known, short *reload_reg_p) { int insn_code_number; int i, j; int noperands; /* These start out as the constraints for the insn and they are chewed up as we consider alternatives. */ char *constraints[MAX_RECOG_OPERANDS]; /* These are the preferred classes for an operand, or NO_REGS if it isn't a register. */ enum reg_class preferred_class[MAX_RECOG_OPERANDS]; char pref_or_nothing[MAX_RECOG_OPERANDS]; /* Nonzero for a MEM operand whose entire address needs a reload. */ int address_reloaded[MAX_RECOG_OPERANDS]; /* Nonzero for an address operand that needs to be completely reloaded. */ int address_operand_reloaded[MAX_RECOG_OPERANDS]; /* Value of enum reload_type to use for operand. */ enum reload_type operand_type[MAX_RECOG_OPERANDS]; /* Value of enum reload_type to use within address of operand. */ enum reload_type address_type[MAX_RECOG_OPERANDS]; /* Save the usage of each operand. */ enum reload_usage { RELOAD_READ, RELOAD_READ_WRITE, RELOAD_WRITE } modified[MAX_RECOG_OPERANDS]; int no_input_reloads = 0, no_output_reloads = 0; int n_alternatives; int this_alternative[MAX_RECOG_OPERANDS]; char this_alternative_match_win[MAX_RECOG_OPERANDS]; char this_alternative_win[MAX_RECOG_OPERANDS]; char this_alternative_offmemok[MAX_RECOG_OPERANDS]; char this_alternative_earlyclobber[MAX_RECOG_OPERANDS]; int this_alternative_matches[MAX_RECOG_OPERANDS]; int swapped; int goal_alternative[MAX_RECOG_OPERANDS]; int this_alternative_number; int goal_alternative_number = 0; int operand_reloadnum[MAX_RECOG_OPERANDS]; int goal_alternative_matches[MAX_RECOG_OPERANDS]; int goal_alternative_matched[MAX_RECOG_OPERANDS]; char goal_alternative_match_win[MAX_RECOG_OPERANDS]; char goal_alternative_win[MAX_RECOG_OPERANDS]; char goal_alternative_offmemok[MAX_RECOG_OPERANDS]; char goal_alternative_earlyclobber[MAX_RECOG_OPERANDS]; int goal_alternative_swapped; int best; int commutative; char operands_match[MAX_RECOG_OPERANDS][MAX_RECOG_OPERANDS]; rtx substed_operand[MAX_RECOG_OPERANDS]; rtx body = PATTERN (insn); rtx set = single_set (insn); int goal_earlyclobber = 0, this_earlyclobber; enum machine_mode operand_mode[MAX_RECOG_OPERANDS]; int retval = 0; this_insn = insn; n_reloads = 0; n_replacements = 0; n_earlyclobbers = 0; replace_reloads = replace; hard_regs_live_known = live_known; static_reload_reg_p = reload_reg_p; /* JUMP_INSNs and CALL_INSNs are not allowed to have any output reloads; neither are insns that SET cc0. Insns that use CC0 are not allowed to have any input reloads. */ if (GET_CODE (insn) == JUMP_INSN || GET_CODE (insn) == CALL_INSN) no_output_reloads = 1; #ifdef HAVE_cc0 if (reg_referenced_p (cc0_rtx, PATTERN (insn))) no_input_reloads = 1; if (reg_set_p (cc0_rtx, PATTERN (insn))) no_output_reloads = 1; #endif #ifdef SECONDARY_MEMORY_NEEDED /* The eliminated forms of any secondary memory locations are per-insn, so clear them out here. */ if (secondary_memlocs_elim_used) { memset (secondary_memlocs_elim, 0, sizeof (secondary_memlocs_elim[0]) * secondary_memlocs_elim_used); secondary_memlocs_elim_used = 0; } #endif /* Dispose quickly of (set (reg..) (reg..)) if both have hard regs and it is cheap to move between them. If it is not, there may not be an insn to do the copy, so we may need a reload. */ if (GET_CODE (body) == SET && REG_P (SET_DEST (body)) && REGNO (SET_DEST (body)) < FIRST_PSEUDO_REGISTER && REG_P (SET_SRC (body)) && REGNO (SET_SRC (body)) < FIRST_PSEUDO_REGISTER && REGISTER_MOVE_COST (GET_MODE (SET_SRC (body)), REGNO_REG_CLASS (REGNO (SET_SRC (body))), REGNO_REG_CLASS (REGNO (SET_DEST (body)))) == 2) return 0; extract_insn (insn); noperands = reload_n_operands = recog_data.n_operands; n_alternatives = recog_data.n_alternatives; /* Just return "no reloads" if insn has no operands with constraints. */ if (noperands == 0 || n_alternatives == 0) return 0; insn_code_number = INSN_CODE (insn); this_insn_is_asm = insn_code_number < 0; memcpy (operand_mode, recog_data.operand_mode, noperands * sizeof (enum machine_mode)); memcpy (constraints, recog_data.constraints, noperands * sizeof (char *)); commutative = -1; /* If we will need to know, later, whether some pair of operands are the same, we must compare them now and save the result. Reloading the base and index registers will clobber them and afterward they will fail to match. */ for (i = 0; i < noperands; i++) { char *p; int c; substed_operand[i] = recog_data.operand[i]; p = constraints[i]; modified[i] = RELOAD_READ; /* Scan this operand's constraint to see if it is an output operand, an in-out operand, is commutative, or should match another. */ while ((c = *p)) { p += CONSTRAINT_LEN (c, p); switch (c) { case '=': modified[i] = RELOAD_WRITE; break; case '+': modified[i] = RELOAD_READ_WRITE; break; case '%': { /* The last operand should not be marked commutative. */ if (i == noperands - 1) abort (); /* We currently only support one commutative pair of operands. Some existing asm code currently uses more than one pair. Previously, that would usually work, but sometimes it would crash the compiler. We continue supporting that case as well as we can by silently ignoring all but the first pair. In the future we may handle it correctly. */ if (commutative < 0) commutative = i; else if (!this_insn_is_asm) abort (); } break; /* Use of ISDIGIT is tempting here, but it may get expensive because of locale support we don't want. */ case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { c = strtoul (p - 1, &p, 10); operands_match[c][i] = operands_match_p (recog_data.operand[c], recog_data.operand[i]); /* An operand may not match itself. */ if (c == i) abort (); /* If C can be commuted with C+1, and C might need to match I, then C+1 might also need to match I. */ if (commutative >= 0) { if (c == commutative || c == commutative + 1) { int other = c + (c == commutative ? 1 : -1); operands_match[other][i] = operands_match_p (recog_data.operand[other], recog_data.operand[i]); } if (i == commutative || i == commutative + 1) { int other = i + (i == commutative ? 1 : -1); operands_match[c][other] = operands_match_p (recog_data.operand[c], recog_data.operand[other]); } /* Note that C is supposed to be less than I. No need to consider altering both C and I because in that case we would alter one into the other. */ } } } } } /* Examine each operand that is a memory reference or memory address and reload parts of the addresses into index registers. Also here any references to pseudo regs that didn't get hard regs but are equivalent to constants get replaced in the insn itself with those constants. Nobody will ever see them again. Finally, set up the preferred classes of each operand. */ for (i = 0; i < noperands; i++) { RTX_CODE code = GET_CODE (recog_data.operand[i]); address_reloaded[i] = 0; address_operand_reloaded[i] = 0; operand_type[i] = (modified[i] == RELOAD_READ ? RELOAD_FOR_INPUT : modified[i] == RELOAD_WRITE ? RELOAD_FOR_OUTPUT : RELOAD_OTHER); address_type[i] = (modified[i] == RELOAD_READ ? RELOAD_FOR_INPUT_ADDRESS : modified[i] == RELOAD_WRITE ? RELOAD_FOR_OUTPUT_ADDRESS : RELOAD_OTHER); if (*constraints[i] == 0) /* Ignore things like match_operator operands. */ ; else if (constraints[i][0] == 'p' || EXTRA_ADDRESS_CONSTRAINT (constraints[i][0], constraints[i])) { address_operand_reloaded[i] = find_reloads_address (recog_data.operand_mode[i], (rtx*) 0, recog_data.operand[i], recog_data.operand_loc[i], i, operand_type[i], ind_levels, insn); /* If we now have a simple operand where we used to have a PLUS or MULT, re-recognize and try again. */ if ((OBJECT_P (*recog_data.operand_loc[i]) || GET_CODE (*recog_data.operand_loc[i]) == SUBREG) && (GET_CODE (recog_data.operand[i]) == MULT || GET_CODE (recog_data.operand[i]) == PLUS)) { INSN_CODE (insn) = -1; retval = find_reloads (insn, replace, ind_levels, live_known, reload_reg_p); return retval; } recog_data.operand[i] = *recog_data.operand_loc[i]; substed_operand[i] = recog_data.operand[i]; /* Address operands are reloaded in their existing mode, no matter what is specified in the machine description. */ operand_mode[i] = GET_MODE (recog_data.operand[i]); } else if (code == MEM) { address_reloaded[i] = find_reloads_address (GET_MODE (recog_data.operand[i]), recog_data.operand_loc[i], XEXP (recog_data.operand[i], 0), &XEXP (recog_data.operand[i], 0), i, address_type[i], ind_levels, insn); recog_data.operand[i] = *recog_data.operand_loc[i]; substed_operand[i] = recog_data.operand[i]; } else if (code == SUBREG) { rtx reg = SUBREG_REG (recog_data.operand[i]); rtx op = find_reloads_toplev (recog_data.operand[i], i, address_type[i], ind_levels, set != 0 && &SET_DEST (set) == recog_data.operand_loc[i], insn, &address_reloaded[i]); /* If we made a MEM to load (a part of) the stackslot of a pseudo that didn't get a hard register, emit a USE with a REG_EQUAL note in front so that we might inherit a previous, possibly wider reload. */ if (replace && MEM_P (op) && REG_P (reg) && (GET_MODE_SIZE (GET_MODE (reg)) >= GET_MODE_SIZE (GET_MODE (op)))) set_unique_reg_note (emit_insn_before (gen_rtx_USE (VOIDmode, reg), insn), REG_EQUAL, reg_equiv_memory_loc[REGNO (reg)]); substed_operand[i] = recog_data.operand[i] = op; } else if (code == PLUS || GET_RTX_CLASS (code) == RTX_UNARY) /* We can get a PLUS as an "operand" as a result of register elimination. See eliminate_regs and gen_reload. We handle a unary operator by reloading the operand. */ substed_operand[i] = recog_data.operand[i] = find_reloads_toplev (recog_data.operand[i], i, address_type[i], ind_levels, 0, insn, &address_reloaded[i]); else if (code == REG) { /* This is equivalent to calling find_reloads_toplev. The code is duplicated for speed. When we find a pseudo always equivalent to a constant, we replace it by the constant. We must be sure, however, that we don't try to replace it in the insn in which it is being set. */ int regno = REGNO (recog_data.operand[i]); if (reg_equiv_constant[regno] != 0 && (set == 0 || &SET_DEST (set) != recog_data.operand_loc[i])) { /* Record the existing mode so that the check if constants are allowed will work when operand_mode isn't specified. */ if (operand_mode[i] == VOIDmode) operand_mode[i] = GET_MODE (recog_data.operand[i]); substed_operand[i] = recog_data.operand[i] = reg_equiv_constant[regno]; } if (reg_equiv_memory_loc[regno] != 0 && (reg_equiv_address[regno] != 0 || num_not_at_initial_offset)) /* We need not give a valid is_set_dest argument since the case of a constant equivalence was checked above. */ substed_operand[i] = recog_data.operand[i] = find_reloads_toplev (recog_data.operand[i], i, address_type[i], ind_levels, 0, insn, &address_reloaded[i]); } /* If the operand is still a register (we didn't replace it with an equivalent), get the preferred class to reload it into. */ code = GET_CODE (recog_data.operand[i]); preferred_class[i] = ((code == REG && REGNO (recog_data.operand[i]) >= FIRST_PSEUDO_REGISTER) ? reg_preferred_class (REGNO (recog_data.operand[i])) : NO_REGS); pref_or_nothing[i] = (code == REG && REGNO (recog_data.operand[i]) >= FIRST_PSEUDO_REGISTER && reg_alternate_class (REGNO (recog_data.operand[i])) == NO_REGS); } /* If this is simply a copy from operand 1 to operand 0, merge the preferred classes for the operands. */ if (set != 0 && noperands >= 2 && recog_data.operand[0] == SET_DEST (set) && recog_data.operand[1] == SET_SRC (set)) { preferred_class[0] = preferred_class[1] = reg_class_subunion[(int) preferred_class[0]][(int) preferred_class[1]]; pref_or_nothing[0] |= pref_or_nothing[1]; pref_or_nothing[1] |= pref_or_nothing[0]; } /* Now see what we need for pseudo-regs that didn't get hard regs or got the wrong kind of hard reg. For this, we must consider all the operands together against the register constraints. */ best = MAX_RECOG_OPERANDS * 2 + 600; swapped = 0; goal_alternative_swapped = 0; try_swapped: /* The constraints are made of several alternatives. Each operand's constraint looks like foo,bar,... with commas separating the alternatives. The first alternatives for all operands go together, the second alternatives go together, etc. First loop over alternatives. */ for (this_alternative_number = 0; this_alternative_number < n_alternatives; this_alternative_number++) { /* Loop over operands for one constraint alternative. */ /* LOSERS counts those that don't fit this alternative and would require loading. */ int losers = 0; /* BAD is set to 1 if it some operand can't fit this alternative even after reloading. */ int bad = 0; /* REJECT is a count of how undesirable this alternative says it is if any reloading is required. If the alternative matches exactly then REJECT is ignored, but otherwise it gets this much counted against it in addition to the reloading needed. Each ? counts three times here since we want the disparaging caused by a bad register class to only count 1/3 as much. */ int reject = 0; this_earlyclobber = 0; for (i = 0; i < noperands; i++) { char *p = constraints[i]; char *end; int len; int win = 0; int did_match = 0; /* 0 => this operand can be reloaded somehow for this alternative. */ int badop = 1; /* 0 => this operand can be reloaded if the alternative allows regs. */ int winreg = 0; int c; int m; rtx operand = recog_data.operand[i]; int offset = 0; /* Nonzero means this is a MEM that must be reloaded into a reg regardless of what the constraint says. */ int force_reload = 0; int offmemok = 0; /* Nonzero if a constant forced into memory would be OK for this operand. */ int constmemok = 0; int earlyclobber = 0; /* If the predicate accepts a unary operator, it means that we need to reload the operand, but do not do this for match_operator and friends. */ if (UNARY_P (operand) && *p != 0) operand = XEXP (operand, 0); /* If the operand is a SUBREG, extract the REG or MEM (or maybe even a constant) within. (Constants can occur as a result of reg_equiv_constant.) */ while (GET_CODE (operand) == SUBREG) { /* Offset only matters when operand is a REG and it is a hard reg. This is because it is passed to reg_fits_class_p if it is a REG and all pseudos return 0 from that function. */ if (REG_P (SUBREG_REG (operand)) && REGNO (SUBREG_REG (operand)) < FIRST_PSEUDO_REGISTER) { if (!subreg_offset_representable_p (REGNO (SUBREG_REG (operand)), GET_MODE (SUBREG_REG (operand)), SUBREG_BYTE (operand), GET_MODE (operand))) force_reload = 1; offset += subreg_regno_offset (REGNO (SUBREG_REG (operand)), GET_MODE (SUBREG_REG (operand)), SUBREG_BYTE (operand), GET_MODE (operand)); } operand = SUBREG_REG (operand); /* Force reload if this is a constant or PLUS or if there may be a problem accessing OPERAND in the outer mode. */ if (CONSTANT_P (operand) || GET_CODE (operand) == PLUS /* We must force a reload of paradoxical SUBREGs of a MEM because the alignment of the inner value may not be enough to do the outer reference. On big-endian machines, it may also reference outside the object. On machines that extend byte operations and we have a SUBREG where both the inner and outer modes are no wider than a word and the inner mode is narrower, is integral, and gets extended when loaded from memory, combine.c has made assumptions about the behavior of the machine in such register access. If the data is, in fact, in memory we must always load using the size assumed to be in the register and let the insn do the different-sized accesses. This is doubly true if WORD_REGISTER_OPERATIONS. In this case eliminate_regs has left non-paradoxical subregs for push_reload to see. Make sure it does by forcing the reload. ??? When is it right at this stage to have a subreg of a mem that is _not_ to be handled specially? IMO those should have been reduced to just a mem. */ || ((MEM_P (operand) || (REG_P (operand) && REGNO (operand) >= FIRST_PSEUDO_REGISTER)) #ifndef WORD_REGISTER_OPERATIONS && (((GET_MODE_BITSIZE (GET_MODE (operand)) < BIGGEST_ALIGNMENT) && (GET_MODE_SIZE (operand_mode[i]) > GET_MODE_SIZE (GET_MODE (operand)))) || BYTES_BIG_ENDIAN #ifdef LOAD_EXTEND_OP || (GET_MODE_SIZE (operand_mode[i]) <= UNITS_PER_WORD && (GET_MODE_SIZE (GET_MODE (operand)) <= UNITS_PER_WORD) && (GET_MODE_SIZE (operand_mode[i]) > GET_MODE_SIZE (GET_MODE (operand))) && INTEGRAL_MODE_P (GET_MODE (operand)) && LOAD_EXTEND_OP (GET_MODE (operand)) != NIL) #endif ) #endif ) ) force_reload = 1; } this_alternative[i] = (int) NO_REGS; this_alternative_win[i] = 0; this_alternative_match_win[i] = 0; this_alternative_offmemok[i] = 0; this_alternative_earlyclobber[i] = 0; this_alternative_matches[i] = -1; /* An empty constraint or empty alternative allows anything which matched the pattern. */ if (*p == 0 || *p == ',') win = 1, badop = 0; /* Scan this alternative's specs for this operand; set WIN if the operand fits any letter in this alternative. Otherwise, clear BADOP if this operand could fit some letter after reloads, or set WINREG if this operand could fit after reloads provided the constraint allows some registers. */ do switch ((c = *p, len = CONSTRAINT_LEN (c, p)), c) { case '\0': len = 0; break; case ',': c = '\0'; break; case '=': case '+': case '*': break; case '%': /* We only support one commutative marker, the first one. We already set commutative above. */ break; case '?': reject += 6; break; case '!': reject = 600; break; case '#': /* Ignore rest of this alternative as far as reloading is concerned. */ do p++; while (*p && *p != ','); len = 0; break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': m = strtoul (p, &end, 10); p = end; len = 0; this_alternative_matches[i] = m; /* We are supposed to match a previous operand. If we do, we win if that one did. If we do not, count both of the operands as losers. (This is too conservative, since most of the time only a single reload insn will be needed to make the two operands win. As a result, this alternative may be rejected when it is actually desirable.) */ if ((swapped && (m != commutative || i != commutative + 1)) /* If we are matching as if two operands were swapped, also pretend that operands_match had been computed with swapped. But if I is the second of those and C is the first, don't exchange them, because operands_match is valid only on one side of its diagonal. */ ? (operands_match [(m == commutative || m == commutative + 1) ? 2 * commutative + 1 - m : m] [(i == commutative || i == commutative + 1) ? 2 * commutative + 1 - i : i]) : operands_match[m][i]) { /* If we are matching a non-offsettable address where an offsettable address was expected, then we must reject this combination, because we can't reload it. */ if (this_alternative_offmemok[m] && MEM_P (recog_data.operand[m]) && this_alternative[m] == (int) NO_REGS && ! this_alternative_win[m]) bad = 1; did_match = this_alternative_win[m]; } else { /* Operands don't match. */ rtx value; /* Retroactively mark the operand we had to match as a loser, if it wasn't already. */ if (this_alternative_win[m]) losers++; this_alternative_win[m] = 0; if (this_alternative[m] == (int) NO_REGS) bad = 1; /* But count the pair only once in the total badness of this alternative, if the pair can be a dummy reload. */ value = find_dummy_reload (recog_data.operand[i], recog_data.operand[m], recog_data.operand_loc[i], recog_data.operand_loc[m], operand_mode[i], operand_mode[m], this_alternative[m], -1, this_alternative_earlyclobber[m]); if (value != 0) losers--; } /* This can be fixed with reloads if the operand we are supposed to match can be fixed with reloads. */ badop = 0; this_alternative[i] = this_alternative[m]; /* If we have to reload this operand and some previous operand also had to match the same thing as this operand, we don't know how to do that. So reject this alternative. */ if (! did_match || force_reload) for (j = 0; j < i; j++) if (this_alternative_matches[j] == this_alternative_matches[i]) badop = 1; break; case 'p': /* All necessary reloads for an address_operand were handled in find_reloads_address. */ this_alternative[i] = (int) MODE_BASE_REG_CLASS (VOIDmode); win = 1; badop = 0; break; case 'm': if (force_reload) break; if (MEM_P (operand) || (REG_P (operand) && REGNO (operand) >= FIRST_PSEUDO_REGISTER && reg_renumber[REGNO (operand)] < 0)) win = 1; if (CONST_POOL_OK_P (operand)) badop = 0; constmemok = 1; break; case '<': if (MEM_P (operand) && ! address_reloaded[i] && (GET_CODE (XEXP (operand, 0)) == PRE_DEC || GET_CODE (XEXP (operand, 0)) == POST_DEC)) win = 1; break; case '>': if (MEM_P (operand) && ! address_reloaded[i] && (GET_CODE (XEXP (operand, 0)) == PRE_INC || GET_CODE (XEXP (operand, 0)) == POST_INC)) win = 1; break; /* Memory operand whose address is not offsettable. */ case 'V': if (force_reload) break; if (MEM_P (operand) && ! (ind_levels ? offsettable_memref_p (operand) : offsettable_nonstrict_memref_p (operand)) /* Certain mem addresses will become offsettable after they themselves are reloaded. This is important; we don't want our own handling of unoffsettables to override the handling of reg_equiv_address. */ && !(REG_P (XEXP (operand, 0)) && (ind_levels == 0 || reg_equiv_address[REGNO (XEXP (operand, 0))] != 0))) win = 1; break; /* Memory operand whose address is offsettable. */ case 'o': if (force_reload) break; if ((MEM_P (operand) /* If IND_LEVELS, find_reloads_address won't reload a pseudo that didn't get a hard reg, so we have to reject that case. */ && ((ind_levels ? offsettable_memref_p (operand) : offsettable_nonstrict_memref_p (operand)) /* A reloaded address is offsettable because it is now just a simple register indirect. */ || address_reloaded[i])) || (REG_P (operand) && REGNO (operand) >= FIRST_PSEUDO_REGISTER && reg_renumber[REGNO (operand)] < 0 /* If reg_equiv_address is nonzero, we will be loading it into a register; hence it will be offsettable, but we cannot say that reg_equiv_mem is offsettable without checking. */ && ((reg_equiv_mem[REGNO (operand)] != 0 && offsettable_memref_p (reg_equiv_mem[REGNO (operand)])) || (reg_equiv_address[REGNO (operand)] != 0)))) win = 1; if (CONST_POOL_OK_P (operand) || MEM_P (operand)) badop = 0; constmemok = 1; offmemok = 1; break; case '&': /* Output operand that is stored before the need for the input operands (and their index registers) is over. */ earlyclobber = 1, this_earlyclobber = 1; break; case 'E': case 'F': if (GET_CODE (operand) == CONST_DOUBLE || (GET_CODE (operand) == CONST_VECTOR && (GET_MODE_CLASS (GET_MODE (operand)) == MODE_VECTOR_FLOAT))) win = 1; break; case 'G': case 'H': if (GET_CODE (operand) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_CONSTRAINT_P (operand, c, p)) win = 1; break; case 's': if (GET_CODE (operand) == CONST_INT || (GET_CODE (operand) == CONST_DOUBLE && GET_MODE (operand) == VOIDmode)) break; case 'i': if (CONSTANT_P (operand) #ifdef LEGITIMATE_PIC_OPERAND_P && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (operand)) #endif ) win = 1; break; case 'n': if (GET_CODE (operand) == CONST_INT || (GET_CODE (operand) == CONST_DOUBLE && GET_MODE (operand) == VOIDmode)) win = 1; break; case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': if (GET_CODE (operand) == CONST_INT && CONST_OK_FOR_CONSTRAINT_P (INTVAL (operand), c, p)) win = 1; break; case 'X': win = 1; break; case 'g': if (! force_reload /* A PLUS is never a valid operand, but reload can make it from a register when eliminating registers. */ && GET_CODE (operand) != PLUS /* A SCRATCH is not a valid operand. */ && GET_CODE (operand) != SCRATCH #ifdef LEGITIMATE_PIC_OPERAND_P && (! CONSTANT_P (operand) || ! flag_pic || LEGITIMATE_PIC_OPERAND_P (operand)) #endif && (GENERAL_REGS == ALL_REGS || !REG_P (operand) || (REGNO (operand) >= FIRST_PSEUDO_REGISTER && reg_renumber[REGNO (operand)] < 0))) win = 1; /* Drop through into 'r' case. */ case 'r': this_alternative[i] = (int) reg_class_subunion[this_alternative[i]][(int) GENERAL_REGS]; goto reg; default: if (REG_CLASS_FROM_CONSTRAINT (c, p) == NO_REGS) { #ifdef EXTRA_CONSTRAINT_STR if (EXTRA_MEMORY_CONSTRAINT (c, p)) { if (force_reload) break; if (EXTRA_CONSTRAINT_STR (operand, c, p)) win = 1; /* If the address was already reloaded, we win as well. */ else if (MEM_P (operand) && address_reloaded[i]) win = 1; /* Likewise if the address will be reloaded because reg_equiv_address is nonzero. For reg_equiv_mem we have to check. */ else if (REG_P (operand) && REGNO (operand) >= FIRST_PSEUDO_REGISTER && reg_renumber[REGNO (operand)] < 0 && ((reg_equiv_mem[REGNO (operand)] != 0 && EXTRA_CONSTRAINT_STR (reg_equiv_mem[REGNO (operand)], c, p)) || (reg_equiv_address[REGNO (operand)] != 0))) win = 1; /* If we didn't already win, we can reload constants via force_const_mem, and other MEMs by reloading the address like for 'o'. */ if (CONST_POOL_OK_P (operand) || MEM_P (operand)) badop = 0; constmemok = 1; offmemok = 1; break; } if (EXTRA_ADDRESS_CONSTRAINT (c, p)) { if (EXTRA_CONSTRAINT_STR (operand, c, p)) win = 1; /* If we didn't already win, we can reload the address into a base register. */ this_alternative[i] = (int) MODE_BASE_REG_CLASS (VOIDmode); badop = 0; break; } if (EXTRA_CONSTRAINT_STR (operand, c, p)) win = 1; #endif break; } this_alternative[i] = (int) (reg_class_subunion [this_alternative[i]] [(int) REG_CLASS_FROM_CONSTRAINT (c, p)]); reg: if (GET_MODE (operand) == BLKmode) break; winreg = 1; if (REG_P (operand) && reg_fits_class_p (operand, this_alternative[i], offset, GET_MODE (recog_data.operand[i]))) win = 1; break; } while ((p += len), c); constraints[i] = p; /* If this operand could be handled with a reg, and some reg is allowed, then this operand can be handled. */ if (winreg && this_alternative[i] != (int) NO_REGS) badop = 0; /* Record which operands fit this alternative. */ this_alternative_earlyclobber[i] = earlyclobber; if (win && ! force_reload) this_alternative_win[i] = 1; else if (did_match && ! force_reload) this_alternative_match_win[i] = 1; else { int const_to_mem = 0; this_alternative_offmemok[i] = offmemok; losers++; if (badop) bad = 1; /* Alternative loses if it has no regs for a reg operand. */ if (REG_P (operand) && this_alternative[i] == (int) NO_REGS && this_alternative_matches[i] < 0) bad = 1; /* If this is a constant that is reloaded into the desired class by copying it to memory first, count that as another reload. This is consistent with other code and is required to avoid choosing another alternative when the constant is moved into memory by this function on an early reload pass. Note that the test here is precisely the same as in the code below that calls force_const_mem. */ if (CONST_POOL_OK_P (operand) && ((PREFERRED_RELOAD_CLASS (operand, (enum reg_class) this_alternative[i]) == NO_REGS) || no_input_reloads) && operand_mode[i] != VOIDmode) { const_to_mem = 1; if (this_alternative[i] != (int) NO_REGS) losers++; } /* If we can't reload this value at all, reject this alternative. Note that we could also lose due to LIMIT_RELOAD_RELOAD_CLASS, but we don't check that here. */ if (! CONSTANT_P (operand) && (enum reg_class) this_alternative[i] != NO_REGS && (PREFERRED_RELOAD_CLASS (operand, (enum reg_class) this_alternative[i]) == NO_REGS)) bad = 1; /* Alternative loses if it requires a type of reload not permitted for this insn. We can always reload SCRATCH and objects with a REG_UNUSED note. */ else if (GET_CODE (operand) != SCRATCH && modified[i] != RELOAD_READ && no_output_reloads && ! find_reg_note (insn, REG_UNUSED, operand)) bad = 1; else if (modified[i] != RELOAD_WRITE && no_input_reloads && ! const_to_mem) bad = 1; /* We prefer to reload pseudos over reloading other things, since such reloads may be able to be eliminated later. If we are reloading a SCRATCH, we won't be generating any insns, just using a register, so it is also preferred. So bump REJECT in other cases. Don't do this in the case where we are forcing a constant into memory and it will then win since we don't want to have a different alternative match then. */ if (! (REG_P (operand) && REGNO (operand) >= FIRST_PSEUDO_REGISTER) && GET_CODE (operand) != SCRATCH && ! (const_to_mem && constmemok)) reject += 2; /* Input reloads can be inherited more often than output reloads can be removed, so penalize output reloads. */ if (operand_type[i] != RELOAD_FOR_INPUT && GET_CODE (operand) != SCRATCH) reject++; } /* If this operand is a pseudo register that didn't get a hard reg and this alternative accepts some register, see if the class that we want is a subset of the preferred class for this register. If not, but it intersects that class, use the preferred class instead. If it does not intersect the preferred class, show that usage of this alternative should be discouraged; it will be discouraged more still if the register is `preferred or nothing'. We do this because it increases the chance of reusing our spill register in a later insn and avoiding a pair of memory stores and loads. Don't bother with this if this alternative will accept this operand. Don't do this for a multiword operand, since it is only a small win and has the risk of requiring more spill registers, which could cause a large loss. Don't do this if the preferred class has only one register because we might otherwise exhaust the class. */ if (! win && ! did_match && this_alternative[i] != (int) NO_REGS && GET_MODE_SIZE (operand_mode[i]) <= UNITS_PER_WORD && reg_class_size[(int) preferred_class[i]] > 1) { if (! reg_class_subset_p (this_alternative[i], preferred_class[i])) { /* Since we don't have a way of forming the intersection, we just do something special if the preferred class is a subset of the class we have; that's the most common case anyway. */ if (reg_class_subset_p (preferred_class[i], this_alternative[i])) this_alternative[i] = (int) preferred_class[i]; else reject += (2 + 2 * pref_or_nothing[i]); } } } /* Now see if any output operands that are marked "earlyclobber" in this alternative conflict with any input operands or any memory addresses. */ for (i = 0; i < noperands; i++) if (this_alternative_earlyclobber[i] && (this_alternative_win[i] || this_alternative_match_win[i])) { struct decomposition early_data; early_data = decompose (recog_data.operand[i]); if (modified[i] == RELOAD_READ) abort (); if (this_alternative[i] == NO_REGS) { this_alternative_earlyclobber[i] = 0; if (this_insn_is_asm) error_for_asm (this_insn, "`&' constraint used with no register class"); else abort (); } for (j = 0; j < noperands; j++) /* Is this an input operand or a memory ref? */ if ((MEM_P (recog_data.operand[j]) || modified[j] != RELOAD_WRITE) && j != i /* Ignore things like match_operator operands. */ && *recog_data.constraints[j] != 0 /* Don't count an input operand that is constrained to match the early clobber operand. */ && ! (this_alternative_matches[j] == i && rtx_equal_p (recog_data.operand[i], recog_data.operand[j])) /* Is it altered by storing the earlyclobber operand? */ && !immune_p (recog_data.operand[j], recog_data.operand[i], early_data)) { /* If the output is in a single-reg class, it's costly to reload it, so reload the input instead. */ if (reg_class_size[this_alternative[i]] == 1 && (REG_P (recog_data.operand[j]) || GET_CODE (recog_data.operand[j]) == SUBREG)) { losers++; this_alternative_win[j] = 0; this_alternative_match_win[j] = 0; } else break; } /* If an earlyclobber operand conflicts with something, it must be reloaded, so request this and count the cost. */ if (j != noperands) { losers++; this_alternative_win[i] = 0; this_alternative_match_win[j] = 0; for (j = 0; j < noperands; j++) if (this_alternative_matches[j] == i && this_alternative_match_win[j]) { this_alternative_win[j] = 0; this_alternative_match_win[j] = 0; losers++; } } } /* If one alternative accepts all the operands, no reload required, choose that alternative; don't consider the remaining ones. */ if (losers == 0) { /* Unswap these so that they are never swapped at `finish'. */ if (commutative >= 0) { recog_data.operand[commutative] = substed_operand[commutative]; recog_data.operand[commutative + 1] = substed_operand[commutative + 1]; } for (i = 0; i < noperands; i++) { goal_alternative_win[i] = this_alternative_win[i]; goal_alternative_match_win[i] = this_alternative_match_win[i]; goal_alternative[i] = this_alternative[i]; goal_alternative_offmemok[i] = this_alternative_offmemok[i]; goal_alternative_matches[i] = this_alternative_matches[i]; goal_alternative_earlyclobber[i] = this_alternative_earlyclobber[i]; } goal_alternative_number = this_alternative_number; goal_alternative_swapped = swapped; goal_earlyclobber = this_earlyclobber; goto finish; } /* REJECT, set by the ! and ? constraint characters and when a register would be reloaded into a non-preferred class, discourages the use of this alternative for a reload goal. REJECT is incremented by six for each ? and two for each non-preferred class. */ losers = losers * 6 + reject; /* If this alternative can be made to work by reloading, and it needs less reloading than the others checked so far, record it as the chosen goal for reloading. */ if (! bad && best > losers) { for (i = 0; i < noperands; i++) { goal_alternative[i] = this_alternative[i]; goal_alternative_win[i] = this_alternative_win[i]; goal_alternative_match_win[i] = this_alternative_match_win[i]; goal_alternative_offmemok[i] = this_alternative_offmemok[i]; goal_alternative_matches[i] = this_alternative_matches[i]; goal_alternative_earlyclobber[i] = this_alternative_earlyclobber[i]; } goal_alternative_swapped = swapped; best = losers; goal_alternative_number = this_alternative_number; goal_earlyclobber = this_earlyclobber; } } /* If insn is commutative (it's safe to exchange a certain pair of operands) then we need to try each alternative twice, the second time matching those two operands as if we had exchanged them. To do this, really exchange them in operands. If we have just tried the alternatives the second time, return operands to normal and drop through. */ if (commutative >= 0) { swapped = !swapped; if (swapped) { enum reg_class tclass; int t; recog_data.operand[commutative] = substed_operand[commutative + 1]; recog_data.operand[commutative + 1] = substed_operand[commutative]; /* Swap the duplicates too. */ for (i = 0; i < recog_data.n_dups; i++) if (recog_data.dup_num[i] == commutative || recog_data.dup_num[i] == commutative + 1) *recog_data.dup_loc[i] = recog_data.operand[(int) recog_data.dup_num[i]]; tclass = preferred_class[commutative]; preferred_class[commutative] = preferred_class[commutative + 1]; preferred_class[commutative + 1] = tclass; t = pref_or_nothing[commutative]; pref_or_nothing[commutative] = pref_or_nothing[commutative + 1]; pref_or_nothing[commutative + 1] = t; memcpy (constraints, recog_data.constraints, noperands * sizeof (char *)); goto try_swapped; } else { recog_data.operand[commutative] = substed_operand[commutative]; recog_data.operand[commutative + 1] = substed_operand[commutative + 1]; /* Unswap the duplicates too. */ for (i = 0; i < recog_data.n_dups; i++) if (recog_data.dup_num[i] == commutative || recog_data.dup_num[i] == commutative + 1) *recog_data.dup_loc[i] = recog_data.operand[(int) recog_data.dup_num[i]]; } } /* The operands don't meet the constraints. goal_alternative describes the alternative that we could reach by reloading the fewest operands. Reload so as to fit it. */ if (best == MAX_RECOG_OPERANDS * 2 + 600) { /* No alternative works with reloads?? */ if (insn_code_number >= 0) fatal_insn ("unable to generate reloads for:", insn); error_for_asm (insn, "inconsistent operand constraints in an `asm'"); /* Avoid further trouble with this insn. */ PATTERN (insn) = gen_rtx_USE (VOIDmode, const0_rtx); n_reloads = 0; return 0; } /* Jump to `finish' from above if all operands are valid already. In that case, goal_alternative_win is all 1. */ finish: /* Right now, for any pair of operands I and J that are required to match, with I < J, goal_alternative_matches[J] is I. Set up goal_alternative_matched as the inverse function: goal_alternative_matched[I] = J. */ for (i = 0; i < noperands; i++) goal_alternative_matched[i] = -1; for (i = 0; i < noperands; i++) if (! goal_alternative_win[i] && goal_alternative_matches[i] >= 0) goal_alternative_matched[goal_alternative_matches[i]] = i; for (i = 0; i < noperands; i++) goal_alternative_win[i] |= goal_alternative_match_win[i]; /* If the best alternative is with operands 1 and 2 swapped, consider them swapped before reporting the reloads. Update the operand numbers of any reloads already pushed. */ if (goal_alternative_swapped) { rtx tem; tem = substed_operand[commutative]; substed_operand[commutative] = substed_operand[commutative + 1]; substed_operand[commutative + 1] = tem; tem = recog_data.operand[commutative]; recog_data.operand[commutative] = recog_data.operand[commutative + 1]; recog_data.operand[commutative + 1] = tem; tem = *recog_data.operand_loc[commutative]; *recog_data.operand_loc[commutative] = *recog_data.operand_loc[commutative + 1]; *recog_data.operand_loc[commutative + 1] = tem; for (i = 0; i < n_reloads; i++) { if (rld[i].opnum == commutative) rld[i].opnum = commutative + 1; else if (rld[i].opnum == commutative + 1) rld[i].opnum = commutative; } } for (i = 0; i < noperands; i++) { operand_reloadnum[i] = -1; /* If this is an earlyclobber operand, we need to widen the scope. The reload must remain valid from the start of the insn being reloaded until after the operand is stored into its destination. We approximate this with RELOAD_OTHER even though we know that we do not conflict with RELOAD_FOR_INPUT_ADDRESS reloads. One special case that is worth checking is when we have an output that is earlyclobber but isn't used past the insn (typically a SCRATCH). In this case, we only need have the reload live through the insn itself, but not for any of our input or output reloads. But we must not accidentally narrow the scope of an existing RELOAD_OTHER reload - leave these alone. In any case, anything needed to address this operand can remain however they were previously categorized. */ if (goal_alternative_earlyclobber[i] && operand_type[i] != RELOAD_OTHER) operand_type[i] = (find_reg_note (insn, REG_UNUSED, recog_data.operand[i]) ? RELOAD_FOR_INSN : RELOAD_OTHER); } /* Any constants that aren't allowed and can't be reloaded into registers are here changed into memory references. */ for (i = 0; i < noperands; i++) if (! goal_alternative_win[i] && CONST_POOL_OK_P (recog_data.operand[i]) && ((PREFERRED_RELOAD_CLASS (recog_data.operand[i], (enum reg_class) goal_alternative[i]) == NO_REGS) || no_input_reloads) && operand_mode[i] != VOIDmode) { substed_operand[i] = recog_data.operand[i] = find_reloads_toplev (force_const_mem (operand_mode[i], recog_data.operand[i]), i, address_type[i], ind_levels, 0, insn, NULL); if (alternative_allows_memconst (recog_data.constraints[i], goal_alternative_number)) goal_alternative_win[i] = 1; } /* Record the values of the earlyclobber operands for the caller. */ if (goal_earlyclobber) for (i = 0; i < noperands; i++) if (goal_alternative_earlyclobber[i]) reload_earlyclobbers[n_earlyclobbers++] = recog_data.operand[i]; /* Now record reloads for all the operands that need them. */ for (i = 0; i < noperands; i++) if (! goal_alternative_win[i]) { /* Operands that match previous ones have already been handled. */ if (goal_alternative_matches[i] >= 0) ; /* Handle an operand with a nonoffsettable address appearing where an offsettable address will do by reloading the address into a base register. ??? We can also do this when the operand is a register and reg_equiv_mem is not offsettable, but this is a bit tricky, so we don't bother with it. It may not be worth doing. */ else if (goal_alternative_matched[i] == -1 && goal_alternative_offmemok[i] && MEM_P (recog_data.operand[i])) { operand_reloadnum[i] = push_reload (XEXP (recog_data.operand[i], 0), NULL_RTX, &XEXP (recog_data.operand[i], 0), (rtx*) 0, MODE_BASE_REG_CLASS (VOIDmode), GET_MODE (XEXP (recog_data.operand[i], 0)), VOIDmode, 0, 0, i, RELOAD_FOR_INPUT); rld[operand_reloadnum[i]].inc = GET_MODE_SIZE (GET_MODE (recog_data.operand[i])); /* If this operand is an output, we will have made any reloads for its address as RELOAD_FOR_OUTPUT_ADDRESS, but now we are treating part of the operand as an input, so we must change these to RELOAD_FOR_INPUT_ADDRESS. */ if (modified[i] == RELOAD_WRITE) { for (j = 0; j < n_reloads; j++) { if (rld[j].opnum == i) { if (rld[j].when_needed == RELOAD_FOR_OUTPUT_ADDRESS) rld[j].when_needed = RELOAD_FOR_INPUT_ADDRESS; else if (rld[j].when_needed == RELOAD_FOR_OUTADDR_ADDRESS) rld[j].when_needed = RELOAD_FOR_INPADDR_ADDRESS; } } } } else if (goal_alternative_matched[i] == -1) { operand_reloadnum[i] = push_reload ((modified[i] != RELOAD_WRITE ? recog_data.operand[i] : 0), (modified[i] != RELOAD_READ ? recog_data.operand[i] : 0), (modified[i] != RELOAD_WRITE ? recog_data.operand_loc[i] : 0), (modified[i] != RELOAD_READ ? recog_data.operand_loc[i] : 0), (enum reg_class) goal_alternative[i], (modified[i] == RELOAD_WRITE ? VOIDmode : operand_mode[i]), (modified[i] == RELOAD_READ ? VOIDmode : operand_mode[i]), (insn_code_number < 0 ? 0 : insn_data[insn_code_number].operand[i].strict_low), 0, i, operand_type[i]); } /* In a matching pair of operands, one must be input only and the other must be output only. Pass the input operand as IN and the other as OUT. */ else if (modified[i] == RELOAD_READ && modified[goal_alternative_matched[i]] == RELOAD_WRITE) { operand_reloadnum[i] = push_reload (recog_data.operand[i], recog_data.operand[goal_alternative_matched[i]], recog_data.operand_loc[i], recog_data.operand_loc[goal_alternative_matched[i]], (enum reg_class) goal_alternative[i], operand_mode[i], operand_mode[goal_alternative_matched[i]], 0, 0, i, RELOAD_OTHER); operand_reloadnum[goal_alternative_matched[i]] = output_reloadnum; } else if (modified[i] == RELOAD_WRITE && modified[goal_alternative_matched[i]] == RELOAD_READ) { operand_reloadnum[goal_alternative_matched[i]] = push_reload (recog_data.operand[goal_alternative_matched[i]], recog_data.operand[i], recog_data.operand_loc[goal_alternative_matched[i]], recog_data.operand_loc[i], (enum reg_class) goal_alternative[i], operand_mode[goal_alternative_matched[i]], operand_mode[i], 0, 0, i, RELOAD_OTHER); operand_reloadnum[i] = output_reloadnum; } else if (insn_code_number >= 0) abort (); else { error_for_asm (insn, "inconsistent operand constraints in an `asm'"); /* Avoid further trouble with this insn. */ PATTERN (insn) = gen_rtx_USE (VOIDmode, const0_rtx); n_reloads = 0; return 0; } } else if (goal_alternative_matched[i] < 0 && goal_alternative_matches[i] < 0 && !address_operand_reloaded[i] && optimize) { /* For each non-matching operand that's a MEM or a pseudo-register that didn't get a hard register, make an optional reload. This may get done even if the insn needs no reloads otherwise. */ rtx operand = recog_data.operand[i]; while (GET_CODE (operand) == SUBREG) operand = SUBREG_REG (operand); if ((MEM_P (operand) || (REG_P (operand) && REGNO (operand) >= FIRST_PSEUDO_REGISTER)) /* If this is only for an output, the optional reload would not actually cause us to use a register now, just note that something is stored here. */ && ((enum reg_class) goal_alternative[i] != NO_REGS || modified[i] == RELOAD_WRITE) && ! no_input_reloads /* An optional output reload might allow to delete INSN later. We mustn't make in-out reloads on insns that are not permitted output reloads. If this is an asm, we can't delete it; we must not even call push_reload for an optional output reload in this case, because we can't be sure that the constraint allows a register, and push_reload verifies the constraints for asms. */ && (modified[i] == RELOAD_READ || (! no_output_reloads && ! this_insn_is_asm))) operand_reloadnum[i] = push_reload ((modified[i] != RELOAD_WRITE ? recog_data.operand[i] : 0), (modified[i] != RELOAD_READ ? recog_data.operand[i] : 0), (modified[i] != RELOAD_WRITE ? recog_data.operand_loc[i] : 0), (modified[i] != RELOAD_READ ? recog_data.operand_loc[i] : 0), (enum reg_class) goal_alternative[i], (modified[i] == RELOAD_WRITE ? VOIDmode : operand_mode[i]), (modified[i] == RELOAD_READ ? VOIDmode : operand_mode[i]), (insn_code_number < 0 ? 0 : insn_data[insn_code_number].operand[i].strict_low), 1, i, operand_type[i]); /* If a memory reference remains (either as a MEM or a pseudo that did not get a hard register), yet we can't make an optional reload, check if this is actually a pseudo register reference; we then need to emit a USE and/or a CLOBBER so that reload inheritance will do the right thing. */ else if (replace && (MEM_P (operand) || (REG_P (operand) && REGNO (operand) >= FIRST_PSEUDO_REGISTER && reg_renumber [REGNO (operand)] < 0))) { operand = *recog_data.operand_loc[i]; while (GET_CODE (operand) == SUBREG) operand = SUBREG_REG (operand); if (REG_P (operand)) { if (modified[i] != RELOAD_WRITE) /* We mark the USE with QImode so that we recognize it as one that can be safely deleted at the end of reload. */ PUT_MODE (emit_insn_before (gen_rtx_USE (VOIDmode, operand), insn), QImode); if (modified[i] != RELOAD_READ) emit_insn_after (gen_rtx_CLOBBER (VOIDmode, operand), insn); } } } else if (goal_alternative_matches[i] >= 0 && goal_alternative_win[goal_alternative_matches[i]] && modified[i] == RELOAD_READ && modified[goal_alternative_matches[i]] == RELOAD_WRITE && ! no_input_reloads && ! no_output_reloads && optimize) { /* Similarly, make an optional reload for a pair of matching objects that are in MEM or a pseudo that didn't get a hard reg. */ rtx operand = recog_data.operand[i]; while (GET_CODE (operand) == SUBREG) operand = SUBREG_REG (operand); if ((MEM_P (operand) || (REG_P (operand) && REGNO (operand) >= FIRST_PSEUDO_REGISTER)) && ((enum reg_class) goal_alternative[goal_alternative_matches[i]] != NO_REGS)) operand_reloadnum[i] = operand_reloadnum[goal_alternative_matches[i]] = push_reload (recog_data.operand[goal_alternative_matches[i]], recog_data.operand[i], recog_data.operand_loc[goal_alternative_matches[i]], recog_data.operand_loc[i], (enum reg_class) goal_alternative[goal_alternative_matches[i]], operand_mode[goal_alternative_matches[i]], operand_mode[i], 0, 1, goal_alternative_matches[i], RELOAD_OTHER); } /* Perform whatever substitutions on the operands we are supposed to make due to commutativity or replacement of registers with equivalent constants or memory slots. */ for (i = 0; i < noperands; i++) { /* We only do this on the last pass through reload, because it is possible for some data (like reg_equiv_address) to be changed during later passes. Moreover, we loose the opportunity to get a useful reload_{in,out}_reg when we do these replacements. */ if (replace) { rtx substitution = substed_operand[i]; *recog_data.operand_loc[i] = substitution; /* If we're replacing an operand with a LABEL_REF, we need to make sure that there's a REG_LABEL note attached to this instruction. */ if (GET_CODE (insn) != JUMP_INSN && GET_CODE (substitution) == LABEL_REF && !find_reg_note (insn, REG_LABEL, XEXP (substitution, 0))) REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (substitution, 0), REG_NOTES (insn)); } else retval |= (substed_operand[i] != *recog_data.operand_loc[i]); } /* If this insn pattern contains any MATCH_DUP's, make sure that they will be substituted if the operands they match are substituted. Also do now any substitutions we already did on the operands. Don't do this if we aren't making replacements because we might be propagating things allocated by frame pointer elimination into places it doesn't expect. */ if (insn_code_number >= 0 && replace) for (i = insn_data[insn_code_number].n_dups - 1; i >= 0; i--) { int opno = recog_data.dup_num[i]; *recog_data.dup_loc[i] = *recog_data.operand_loc[opno]; dup_replacements (recog_data.dup_loc[i], recog_data.operand_loc[opno]); } #if 0 /* This loses because reloading of prior insns can invalidate the equivalence (or at least find_equiv_reg isn't smart enough to find it any more), causing this insn to need more reload regs than it needed before. It may be too late to make the reload regs available. Now this optimization is done safely in choose_reload_regs. */ /* For each reload of a reg into some other class of reg, search for an existing equivalent reg (same value now) in the right class. We can use it as long as we don't need to change its contents. */ for (i = 0; i < n_reloads; i++) if (rld[i].reg_rtx == 0 && rld[i].in != 0 && REG_P (rld[i].in) && rld[i].out == 0) { rld[i].reg_rtx = find_equiv_reg (rld[i].in, insn, rld[i].class, -1, static_reload_reg_p, 0, rld[i].inmode); /* Prevent generation of insn to load the value because the one we found already has the value. */ if (rld[i].reg_rtx) rld[i].in = rld[i].reg_rtx; } #endif /* Perhaps an output reload can be combined with another to reduce needs by one. */ if (!goal_earlyclobber) combine_reloads (); /* If we have a pair of reloads for parts of an address, they are reloading the same object, the operands themselves were not reloaded, and they are for two operands that are supposed to match, merge the reloads and change the type of the surviving reload to RELOAD_FOR_OPERAND_ADDRESS. */ for (i = 0; i < n_reloads; i++) { int k; for (j = i + 1; j < n_reloads; j++) if ((rld[i].when_needed == RELOAD_FOR_INPUT_ADDRESS || rld[i].when_needed == RELOAD_FOR_OUTPUT_ADDRESS || rld[i].when_needed == RELOAD_FOR_INPADDR_ADDRESS || rld[i].when_needed == RELOAD_FOR_OUTADDR_ADDRESS) && (rld[j].when_needed == RELOAD_FOR_INPUT_ADDRESS || rld[j].when_needed == RELOAD_FOR_OUTPUT_ADDRESS || rld[j].when_needed == RELOAD_FOR_INPADDR_ADDRESS || rld[j].when_needed == RELOAD_FOR_OUTADDR_ADDRESS) && rtx_equal_p (rld[i].in, rld[j].in) && (operand_reloadnum[rld[i].opnum] < 0 || rld[operand_reloadnum[rld[i].opnum]].optional) && (operand_reloadnum[rld[j].opnum] < 0 || rld[operand_reloadnum[rld[j].opnum]].optional) && (goal_alternative_matches[rld[i].opnum] == rld[j].opnum || (goal_alternative_matches[rld[j].opnum] == rld[i].opnum))) { for (k = 0; k < n_replacements; k++) if (replacements[k].what == j) replacements[k].what = i; if (rld[i].when_needed == RELOAD_FOR_INPADDR_ADDRESS || rld[i].when_needed == RELOAD_FOR_OUTADDR_ADDRESS) rld[i].when_needed = RELOAD_FOR_OPADDR_ADDR; else rld[i].when_needed = RELOAD_FOR_OPERAND_ADDRESS; rld[j].in = 0; } } /* Scan all the reloads and update their type. If a reload is for the address of an operand and we didn't reload that operand, change the type. Similarly, change the operand number of a reload when two operands match. If a reload is optional, treat it as though the operand isn't reloaded. ??? This latter case is somewhat odd because if we do the optional reload, it means the object is hanging around. Thus we need only do the address reload if the optional reload was NOT done. Change secondary reloads to be the address type of their operand, not the normal type. If an operand's reload is now RELOAD_OTHER, change any RELOAD_FOR_INPUT_ADDRESS reloads of that operand to RELOAD_FOR_OTHER_ADDRESS. */ for (i = 0; i < n_reloads; i++) { if (rld[i].secondary_p && rld[i].when_needed == operand_type[rld[i].opnum]) rld[i].when_needed = address_type[rld[i].opnum]; if ((rld[i].when_needed == RELOAD_FOR_INPUT_ADDRESS || rld[i].when_needed == RELOAD_FOR_OUTPUT_ADDRESS || rld[i].when_needed == RELOAD_FOR_INPADDR_ADDRESS || rld[i].when_needed == RELOAD_FOR_OUTADDR_ADDRESS) && (operand_reloadnum[rld[i].opnum] < 0 || rld[operand_reloadnum[rld[i].opnum]].optional)) { /* If we have a secondary reload to go along with this reload, change its type to RELOAD_FOR_OPADDR_ADDR. */ if ((rld[i].when_needed == RELOAD_FOR_INPUT_ADDRESS || rld[i].when_needed == RELOAD_FOR_INPADDR_ADDRESS) && rld[i].secondary_in_reload != -1) { int secondary_in_reload = rld[i].secondary_in_reload; rld[secondary_in_reload].when_needed = RELOAD_FOR_OPADDR_ADDR; /* If there's a tertiary reload we have to change it also. */ if (secondary_in_reload > 0 && rld[secondary_in_reload].secondary_in_reload != -1) rld[rld[secondary_in_reload].secondary_in_reload].when_needed = RELOAD_FOR_OPADDR_ADDR; } if ((rld[i].when_needed == RELOAD_FOR_OUTPUT_ADDRESS || rld[i].when_needed == RELOAD_FOR_OUTADDR_ADDRESS) && rld[i].secondary_out_reload != -1) { int secondary_out_reload = rld[i].secondary_out_reload; rld[secondary_out_reload].when_needed = RELOAD_FOR_OPADDR_ADDR; /* If there's a tertiary reload we have to change it also. */ if (secondary_out_reload && rld[secondary_out_reload].secondary_out_reload != -1) rld[rld[secondary_out_reload].secondary_out_reload].when_needed = RELOAD_FOR_OPADDR_ADDR; } if (rld[i].when_needed == RELOAD_FOR_INPADDR_ADDRESS || rld[i].when_needed == RELOAD_FOR_OUTADDR_ADDRESS) rld[i].when_needed = RELOAD_FOR_OPADDR_ADDR; else rld[i].when_needed = RELOAD_FOR_OPERAND_ADDRESS; } if ((rld[i].when_needed == RELOAD_FOR_INPUT_ADDRESS || rld[i].when_needed == RELOAD_FOR_INPADDR_ADDRESS) && operand_reloadnum[rld[i].opnum] >= 0 && (rld[operand_reloadnum[rld[i].opnum]].when_needed == RELOAD_OTHER)) rld[i].when_needed = RELOAD_FOR_OTHER_ADDRESS; if (goal_alternative_matches[rld[i].opnum] >= 0) rld[i].opnum = goal_alternative_matches[rld[i].opnum]; } /* Scan all the reloads, and check for RELOAD_FOR_OPERAND_ADDRESS reloads. If we have more than one, then convert all RELOAD_FOR_OPADDR_ADDR reloads to RELOAD_FOR_OPERAND_ADDRESS reloads. choose_reload_regs assumes that RELOAD_FOR_OPADDR_ADDR reloads never conflict with RELOAD_FOR_OPERAND_ADDRESS reloads. This is true for a single pair of RELOAD_FOR_OPADDR_ADDR/RELOAD_FOR_OPERAND_ADDRESS reloads. However, if there is more than one RELOAD_FOR_OPERAND_ADDRESS reload, then a RELOAD_FOR_OPADDR_ADDR reload conflicts with all RELOAD_FOR_OPERAND_ADDRESS reloads other than the one that uses it. This is complicated by the fact that a single operand can have more than one RELOAD_FOR_OPERAND_ADDRESS reload. It is very difficult to fix choose_reload_regs without affecting code quality, and cases that actually fail are extremely rare, so it turns out to be better to fix the problem here by not generating cases that choose_reload_regs will fail for. */ /* There is a similar problem with RELOAD_FOR_INPUT_ADDRESS / RELOAD_FOR_OUTPUT_ADDRESS when there is more than one of a kind for a single operand. We can reduce the register pressure by exploiting that a RELOAD_FOR_X_ADDR_ADDR that precedes all RELOAD_FOR_X_ADDRESS reloads does not conflict with any of them, if it is only used for the first of the RELOAD_FOR_X_ADDRESS reloads. */ { int first_op_addr_num = -2; int first_inpaddr_num[MAX_RECOG_OPERANDS]; int first_outpaddr_num[MAX_RECOG_OPERANDS]; int need_change = 0; /* We use last_op_addr_reload and the contents of the above arrays first as flags - -2 means no instance encountered, -1 means exactly one instance encountered. If more than one instance has been encountered, we store the reload number of the first reload of the kind in question; reload numbers are known to be non-negative. */ for (i = 0; i < noperands; i++) first_inpaddr_num[i] = first_outpaddr_num[i] = -2; for (i = n_reloads - 1; i >= 0; i--) { switch (rld[i].when_needed) { case RELOAD_FOR_OPERAND_ADDRESS: if (++first_op_addr_num >= 0) { first_op_addr_num = i; need_change = 1; } break; case RELOAD_FOR_INPUT_ADDRESS: if (++first_inpaddr_num[rld[i].opnum] >= 0) { first_inpaddr_num[rld[i].opnum] = i; need_change = 1; } break; case RELOAD_FOR_OUTPUT_ADDRESS: if (++first_outpaddr_num[rld[i].opnum] >= 0) { first_outpaddr_num[rld[i].opnum] = i; need_change = 1; } break; default: break; } } if (need_change) { for (i = 0; i < n_reloads; i++) { int first_num; enum reload_type type; switch (rld[i].when_needed) { case RELOAD_FOR_OPADDR_ADDR: first_num = first_op_addr_num; type = RELOAD_FOR_OPERAND_ADDRESS; break; case RELOAD_FOR_INPADDR_ADDRESS: first_num = first_inpaddr_num[rld[i].opnum]; type = RELOAD_FOR_INPUT_ADDRESS; break; case RELOAD_FOR_OUTADDR_ADDRESS: first_num = first_outpaddr_num[rld[i].opnum]; type = RELOAD_FOR_OUTPUT_ADDRESS; break; default: continue; } if (first_num < 0) continue; else if (i > first_num) rld[i].when_needed = type; else { /* Check if the only TYPE reload that uses reload I is reload FIRST_NUM. */ for (j = n_reloads - 1; j > first_num; j--) { if (rld[j].when_needed == type && (rld[i].secondary_p ? rld[j].secondary_in_reload == i : reg_mentioned_p (rld[i].in, rld[j].in))) { rld[i].when_needed = type; break; } } } } } } /* See if we have any reloads that are now allowed to be merged because we've changed when the reload is needed to RELOAD_FOR_OPERAND_ADDRESS or RELOAD_FOR_OTHER_ADDRESS. Only check for the most common cases. */ for (i = 0; i < n_reloads; i++) if (rld[i].in != 0 && rld[i].out == 0 && (rld[i].when_needed == RELOAD_FOR_OPERAND_ADDRESS || rld[i].when_needed == RELOAD_FOR_OPADDR_ADDR || rld[i].when_needed == RELOAD_FOR_OTHER_ADDRESS)) for (j = 0; j < n_reloads; j++) if (i != j && rld[j].in != 0 && rld[j].out == 0 && rld[j].when_needed == rld[i].when_needed && MATCHES (rld[i].in, rld[j].in) && rld[i].class == rld[j].class && !rld[i].nocombine && !rld[j].nocombine && rld[i].reg_rtx == rld[j].reg_rtx) { rld[i].opnum = MIN (rld[i].opnum, rld[j].opnum); transfer_replacements (i, j); rld[j].in = 0; } #ifdef HAVE_cc0 /* If we made any reloads for addresses, see if they violate a "no input reloads" requirement for this insn. But loads that we do after the insn (such as for output addresses) are fine. */ if (no_input_reloads) for (i = 0; i < n_reloads; i++) if (rld[i].in != 0 && rld[i].when_needed != RELOAD_FOR_OUTADDR_ADDRESS && rld[i].when_needed != RELOAD_FOR_OUTPUT_ADDRESS) abort (); #endif /* Compute reload_mode and reload_nregs. */ for (i = 0; i < n_reloads; i++) { rld[i].mode = (rld[i].inmode == VOIDmode || (GET_MODE_SIZE (rld[i].outmode) > GET_MODE_SIZE (rld[i].inmode))) ? rld[i].outmode : rld[i].inmode; rld[i].nregs = CLASS_MAX_NREGS (rld[i].class, rld[i].mode); } /* Special case a simple move with an input reload and a destination of a hard reg, if the hard reg is ok, use it. */ for (i = 0; i < n_reloads; i++) if (rld[i].when_needed == RELOAD_FOR_INPUT && GET_CODE (PATTERN (insn)) == SET && REG_P (SET_DEST (PATTERN (insn))) && SET_SRC (PATTERN (insn)) == rld[i].in) { rtx dest = SET_DEST (PATTERN (insn)); unsigned int regno = REGNO (dest); if (regno < FIRST_PSEUDO_REGISTER && TEST_HARD_REG_BIT (reg_class_contents[rld[i].class], regno) && HARD_REGNO_MODE_OK (regno, rld[i].mode)) { int nr = hard_regno_nregs[regno][rld[i].mode]; int ok = 1, nri; for (nri = 1; nri < nr; nri ++) if (! TEST_HARD_REG_BIT (reg_class_contents[rld[i].class], regno + nri)) ok = 0; if (ok) rld[i].reg_rtx = dest; } } return retval; } /* Return 1 if alternative number ALTNUM in constraint-string CONSTRAINT accepts a memory operand with constant address. */ static int alternative_allows_memconst (const char *constraint, int altnum) { int c; /* Skip alternatives before the one requested. */ while (altnum > 0) { while (*constraint++ != ','); altnum--; } /* Scan the requested alternative for 'm' or 'o'. If one of them is present, this alternative accepts memory constants. */ for (; (c = *constraint) && c != ',' && c != '#'; constraint += CONSTRAINT_LEN (c, constraint)) if (c == 'm' || c == 'o' || EXTRA_MEMORY_CONSTRAINT (c, constraint)) return 1; return 0; } /* Scan X for memory references and scan the addresses for reloading. Also checks for references to "constant" regs that we want to eliminate and replaces them with the values they stand for. We may alter X destructively if it contains a reference to such. If X is just a constant reg, we return the equivalent value instead of X. IND_LEVELS says how many levels of indirect addressing this machine supports. OPNUM and TYPE identify the purpose of the reload. IS_SET_DEST is true if X is the destination of a SET, which is not appropriate to be replaced by a constant. INSN, if nonzero, is the insn in which we do the reload. It is used to determine if we may generate output reloads, and where to put USEs for pseudos that we have to replace with stack slots. ADDRESS_RELOADED. If nonzero, is a pointer to where we put the result of find_reloads_address. */ static rtx find_reloads_toplev (rtx x, int opnum, enum reload_type type, int ind_levels, int is_set_dest, rtx insn, int *address_reloaded) { RTX_CODE code = GET_CODE (x); const char *fmt = GET_RTX_FORMAT (code); int i; int copied; if (code == REG) { /* This code is duplicated for speed in find_reloads. */ int regno = REGNO (x); if (reg_equiv_constant[regno] != 0 && !is_set_dest) x = reg_equiv_constant[regno]; #if 0 /* This creates (subreg (mem...)) which would cause an unnecessary reload of the mem. */ else if (reg_equiv_mem[regno] != 0) x = reg_equiv_mem[regno]; #endif else if (reg_equiv_memory_loc[regno] && (reg_equiv_address[regno] != 0 || num_not_at_initial_offset)) { rtx mem = make_memloc (x, regno); if (reg_equiv_address[regno] || ! rtx_equal_p (mem, reg_equiv_mem[regno])) { /* If this is not a toplevel operand, find_reloads doesn't see this substitution. We have to emit a USE of the pseudo so that delete_output_reload can see it. */ if (replace_reloads && recog_data.operand[opnum] != x) /* We mark the USE with QImode so that we recognize it as one that can be safely deleted at the end of reload. */ PUT_MODE (emit_insn_before (gen_rtx_USE (VOIDmode, x), insn), QImode); x = mem; i = find_reloads_address (GET_MODE (x), &x, XEXP (x, 0), &XEXP (x, 0), opnum, type, ind_levels, insn); if (address_reloaded) *address_reloaded = i; } } return x; } if (code == MEM) { rtx tem = x; i = find_reloads_address (GET_MODE (x), &tem, XEXP (x, 0), &XEXP (x, 0), opnum, type, ind_levels, insn); if (address_reloaded) *address_reloaded = i; return tem; } if (code == SUBREG && REG_P (SUBREG_REG (x))) { /* Check for SUBREG containing a REG that's equivalent to a constant. If the constant has a known value, truncate it right now. Similarly if we are extracting a single-word of a multi-word constant. If the constant is symbolic, allow it to be substituted normally. push_reload will strip the subreg later. If the constant is VOIDmode, abort because we will lose the mode of the register (this should never happen because one of the cases above should handle it). */ int regno = REGNO (SUBREG_REG (x)); rtx tem; if (subreg_lowpart_p (x) && regno >= FIRST_PSEUDO_REGISTER && reg_renumber[regno] < 0 && reg_equiv_constant[regno] != 0 && (tem = gen_lowpart_common (GET_MODE (x), reg_equiv_constant[regno])) != 0) return tem; if (regno >= FIRST_PSEUDO_REGISTER && reg_renumber[regno] < 0 && reg_equiv_constant[regno] != 0) { tem = simplify_gen_subreg (GET_MODE (x), reg_equiv_constant[regno], GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x)); if (!tem) abort (); return tem; } /* If the subreg contains a reg that will be converted to a mem, convert the subreg to a narrower memref now. Otherwise, we would get (subreg (mem ...) ...), which would force reload of the mem. We also need to do this if there is an equivalent MEM that is not offsettable. In that case, alter_subreg would produce an invalid address on big-endian machines. For machines that extend byte loads, we must not reload using a wider mode if we have a paradoxical SUBREG. find_reloads will force a reload in that case. So we should not do anything here. */ else if (regno >= FIRST_PSEUDO_REGISTER #ifdef LOAD_EXTEND_OP && (GET_MODE_SIZE (GET_MODE (x)) <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))) #endif && (reg_equiv_address[regno] != 0 || (reg_equiv_mem[regno] != 0 && (! strict_memory_address_p (GET_MODE (x), XEXP (reg_equiv_mem[regno], 0)) || ! offsettable_memref_p (reg_equiv_mem[regno]) || num_not_at_initial_offset)))) x = find_reloads_subreg_address (x, 1, opnum, type, ind_levels, insn); } for (copied = 0, i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') { rtx new_part = find_reloads_toplev (XEXP (x, i), opnum, type, ind_levels, is_set_dest, insn, address_reloaded); /* If we have replaced a reg with it's equivalent memory loc - that can still be handled here e.g. if it's in a paradoxical subreg - we must make the change in a copy, rather than using a destructive change. This way, find_reloads can still elect not to do the change. */ if (new_part != XEXP (x, i) && ! CONSTANT_P (new_part) && ! copied) { x = shallow_copy_rtx (x); copied = 1; } XEXP (x, i) = new_part; } } return x; } /* Return a mem ref for the memory equivalent of reg REGNO. This mem ref is not shared with anything. */ static rtx make_memloc (rtx ad, int regno) { /* We must rerun eliminate_regs, in case the elimination offsets have changed. */ rtx tem = XEXP (eliminate_regs (reg_equiv_memory_loc[regno], 0, NULL_RTX), 0); /* If TEM might contain a pseudo, we must copy it to avoid modifying it when we do the substitution for the reload. */ if (rtx_varies_p (tem, 0)) tem = copy_rtx (tem); tem = replace_equiv_address_nv (reg_equiv_memory_loc[regno], tem); tem = adjust_address_nv (tem, GET_MODE (ad), 0); /* Copy the result if it's still the same as the equivalence, to avoid modifying it when we do the substitution for the reload. */ if (tem == reg_equiv_memory_loc[regno]) tem = copy_rtx (tem); return tem; } /* Returns true if AD could be turned into a valid memory reference to mode MODE by reloading the part pointed to by PART into a register. */ static int maybe_memory_address_p (enum machine_mode mode, rtx ad, rtx *part) { int retv; rtx tem = *part; rtx reg = gen_rtx_REG (GET_MODE (tem), max_reg_num ()); *part = reg; retv = memory_address_p (mode, ad); *part = tem; return retv; } /* Record all reloads needed for handling memory address AD which appears in *LOC in a memory reference to mode MODE which itself is found in location *MEMREFLOC. Note that we take shortcuts assuming that no multi-reg machine mode occurs as part of an address. OPNUM and TYPE specify the purpose of this reload. IND_LEVELS says how many levels of indirect addressing this machine supports. INSN, if nonzero, is the insn in which we do the reload. It is used to determine if we may generate output reloads, and where to put USEs for pseudos that we have to replace with stack slots. Value is nonzero if this address is reloaded or replaced as a whole. This is interesting to the caller if the address is an autoincrement. Note that there is no verification that the address will be valid after this routine does its work. Instead, we rely on the fact that the address was valid when reload started. So we need only undo things that reload could have broken. These are wrong register types, pseudos not allocated to a hard register, and frame pointer elimination. */ static int find_reloads_address (enum machine_mode mode, rtx *memrefloc, rtx ad, rtx *loc, int opnum, enum reload_type type, int ind_levels, rtx insn) { int regno; int removed_and = 0; rtx tem; /* If the address is a register, see if it is a legitimate address and reload if not. We first handle the cases where we need not reload or where we must reload in a non-standard way. */ if (REG_P (ad)) { regno = REGNO (ad); /* If the register is equivalent to an invariant expression, substitute the invariant, and eliminate any eliminable register references. */ tem = reg_equiv_constant[regno]; if (tem != 0 && (tem = eliminate_regs (tem, mode, insn)) && strict_memory_address_p (mode, tem)) { *loc = ad = tem; return 0; } tem = reg_equiv_memory_loc[regno]; if (tem != 0) { if (reg_equiv_address[regno] != 0 || num_not_at_initial_offset) { tem = make_memloc (ad, regno); if (! strict_memory_address_p (GET_MODE (tem), XEXP (tem, 0))) { find_reloads_address (GET_MODE (tem), &tem, XEXP (tem, 0), &XEXP (tem, 0), opnum, ADDR_TYPE (type), ind_levels, insn); } /* We can avoid a reload if the register's equivalent memory expression is valid as an indirect memory address. But not all addresses are valid in a mem used as an indirect address: only reg or reg+constant. */ if (ind_levels > 0 && strict_memory_address_p (mode, tem) && (REG_P (XEXP (tem, 0)) || (GET_CODE (XEXP (tem, 0)) == PLUS && REG_P (XEXP (XEXP (tem, 0), 0)) && CONSTANT_P (XEXP (XEXP (tem, 0), 1))))) { /* TEM is not the same as what we'll be replacing the pseudo with after reload, put a USE in front of INSN in the final reload pass. */ if (replace_reloads && num_not_at_initial_offset && ! rtx_equal_p (tem, reg_equiv_mem[regno])) { *loc = tem; /* We mark the USE with QImode so that we recognize it as one that can be safely deleted at the end of reload. */ PUT_MODE (emit_insn_before (gen_rtx_USE (VOIDmode, ad), insn), QImode); /* This doesn't really count as replacing the address as a whole, since it is still a memory access. */ } return 0; } ad = tem; } } /* The only remaining case where we can avoid a reload is if this is a hard register that is valid as a base register and which is not the subject of a CLOBBER in this insn. */ else if (regno < FIRST_PSEUDO_REGISTER && REGNO_MODE_OK_FOR_BASE_P (regno, mode) && ! regno_clobbered_p (regno, this_insn, mode, 0)) return 0; /* If we do not have one of the cases above, we must do the reload. */ push_reload (ad, NULL_RTX, loc, (rtx*) 0, MODE_BASE_REG_CLASS (mode), GET_MODE (ad), VOIDmode, 0, 0, opnum, type); return 1; } if (strict_memory_address_p (mode, ad)) { /* The address appears valid, so reloads are not needed. But the address may contain an eliminable register. This can happen because a machine with indirect addressing may consider a pseudo register by itself a valid address even when it has failed to get a hard reg. So do a tree-walk to find and eliminate all such regs. */ /* But first quickly dispose of a common case. */ if (GET_CODE (ad) == PLUS && GET_CODE (XEXP (ad, 1)) == CONST_INT && REG_P (XEXP (ad, 0)) && reg_equiv_constant[REGNO (XEXP (ad, 0))] == 0) return 0; subst_reg_equivs_changed = 0; *loc = subst_reg_equivs (ad, insn); if (! subst_reg_equivs_changed) return 0; /* Check result for validity after substitution. */ if (strict_memory_address_p (mode, ad)) return 0; } #ifdef LEGITIMIZE_RELOAD_ADDRESS do { if (memrefloc) { LEGITIMIZE_RELOAD_ADDRESS (ad, GET_MODE (*memrefloc), opnum, type, ind_levels, win); } break; win: *memrefloc = copy_rtx (*memrefloc); XEXP (*memrefloc, 0) = ad; move_replacements (&ad, &XEXP (*memrefloc, 0)); return 1; } while (0); #endif /* The address is not valid. We have to figure out why. First see if we have an outer AND and remove it if so. Then analyze what's inside. */ if (GET_CODE (ad) == AND) { removed_and = 1; loc = &XEXP (ad, 0); ad = *loc; } /* One possibility for why the address is invalid is that it is itself a MEM. This can happen when the frame pointer is being eliminated, a pseudo is not allocated to a hard register, and the offset between the frame and stack pointers is not its initial value. In that case the pseudo will have been replaced by a MEM referring to the stack pointer. */ if (MEM_P (ad)) { /* First ensure that the address in this MEM is valid. Then, unless indirect addresses are valid, reload the MEM into a register. */ tem = ad; find_reloads_address (GET_MODE (ad), &tem, XEXP (ad, 0), &XEXP (ad, 0), opnum, ADDR_TYPE (type), ind_levels == 0 ? 0 : ind_levels - 1, insn); /* If tem was changed, then we must create a new memory reference to hold it and store it back into memrefloc. */ if (tem != ad && memrefloc) { *memrefloc = copy_rtx (*memrefloc); copy_replacements (tem, XEXP (*memrefloc, 0)); loc = &XEXP (*memrefloc, 0); if (removed_and) loc = &XEXP (*loc, 0); } /* Check similar cases as for indirect addresses as above except that we can allow pseudos and a MEM since they should have been taken care of above. */ if (ind_levels == 0 || (GET_CODE (XEXP (tem, 0)) == SYMBOL_REF && ! indirect_symref_ok) || MEM_P (XEXP (tem, 0)) || ! (REG_P (XEXP (tem, 0)) || (GET_CODE (XEXP (tem, 0)) == PLUS && REG_P (XEXP (XEXP (tem, 0), 0)) && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT))) { /* Must use TEM here, not AD, since it is the one that will have any subexpressions reloaded, if needed. */ push_reload (tem, NULL_RTX, loc, (rtx*) 0, MODE_BASE_REG_CLASS (mode), GET_MODE (tem), VOIDmode, 0, 0, opnum, type); return ! removed_and; } else return 0; } /* If we have address of a stack slot but it's not valid because the displacement is too large, compute the sum in a register. Handle all base registers here, not just fp/ap/sp, because on some targets (namely SH) we can also get too large displacements from big-endian corrections. */ else if (GET_CODE (ad) == PLUS && REG_P (XEXP (ad, 0)) && REGNO (XEXP (ad, 0)) < FIRST_PSEUDO_REGISTER && REG_MODE_OK_FOR_BASE_P (XEXP (ad, 0), mode) && GET_CODE (XEXP (ad, 1)) == CONST_INT) { /* Unshare the MEM rtx so we can safely alter it. */ if (memrefloc) { *memrefloc = copy_rtx (*memrefloc); loc = &XEXP (*memrefloc, 0); if (removed_and) loc = &XEXP (*loc, 0); } if (double_reg_address_ok) { /* Unshare the sum as well. */ *loc = ad = copy_rtx (ad); /* Reload the displacement into an index reg. We assume the frame pointer or arg pointer is a base reg. */ find_reloads_address_part (XEXP (ad, 1), &XEXP (ad, 1), INDEX_REG_CLASS, GET_MODE (ad), opnum, type, ind_levels); return 0; } else { /* If the sum of two regs is not necessarily valid, reload the sum into a base reg. That will at least work. */ find_reloads_address_part (ad, loc, MODE_BASE_REG_CLASS (mode), Pmode, opnum, type, ind_levels); } return ! removed_and; } /* If we have an indexed stack slot, there are three possible reasons why it might be invalid: The index might need to be reloaded, the address might have been made by frame pointer elimination and hence have a constant out of range, or both reasons might apply. We can easily check for an index needing reload, but even if that is the case, we might also have an invalid constant. To avoid making the conservative assumption and requiring two reloads, we see if this address is valid when not interpreted strictly. If it is, the only problem is that the index needs a reload and find_reloads_address_1 will take care of it. Handle all base registers here, not just fp/ap/sp, because on some targets (namely SPARC) we can also get invalid addresses from preventive subreg big-endian corrections made by find_reloads_toplev. If we decide to do something, it must be that `double_reg_address_ok' is true. We generate a reload of the base register + constant and rework the sum so that the reload register will be added to the index. This is safe because we know the address isn't shared. We check for the base register as both the first and second operand of the innermost PLUS. */ else if (GET_CODE (ad) == PLUS && GET_CODE (XEXP (ad, 1)) == CONST_INT && GET_CODE (XEXP (ad, 0)) == PLUS && REG_P (XEXP (XEXP (ad, 0), 0)) && REGNO (XEXP (XEXP (ad, 0), 0)) < FIRST_PSEUDO_REGISTER && (REG_MODE_OK_FOR_BASE_P (XEXP (XEXP (ad, 0), 0), mode) || XEXP (XEXP (ad, 0), 0) == frame_pointer_rtx #if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM || XEXP (XEXP (ad, 0), 0) == hard_frame_pointer_rtx #endif #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM || XEXP (XEXP (ad, 0), 0) == arg_pointer_rtx #endif || XEXP (XEXP (ad, 0), 0) == stack_pointer_rtx) && ! maybe_memory_address_p (mode, ad, &XEXP (XEXP (ad, 0), 1))) { *loc = ad = gen_rtx_PLUS (GET_MODE (ad), plus_constant (XEXP (XEXP (ad, 0), 0), INTVAL (XEXP (ad, 1))), XEXP (XEXP (ad, 0), 1)); find_reloads_address_part (XEXP (ad, 0), &XEXP (ad, 0), MODE_BASE_REG_CLASS (mode), GET_MODE (ad), opnum, type, ind_levels); find_reloads_address_1 (mode, XEXP (ad, 1), 1, &XEXP (ad, 1), opnum, type, 0, insn); return 0; } else if (GET_CODE (ad) == PLUS && GET_CODE (XEXP (ad, 1)) == CONST_INT && GET_CODE (XEXP (ad, 0)) == PLUS && REG_P (XEXP (XEXP (ad, 0), 1)) && REGNO (XEXP (XEXP (ad, 0), 1)) < FIRST_PSEUDO_REGISTER && (REG_MODE_OK_FOR_BASE_P (XEXP (XEXP (ad, 0), 1), mode) || XEXP (XEXP (ad, 0), 1) == frame_pointer_rtx #if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM || XEXP (XEXP (ad, 0), 1) == hard_frame_pointer_rtx #endif #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM || XEXP (XEXP (ad, 0), 1) == arg_pointer_rtx #endif || XEXP (XEXP (ad, 0), 1) == stack_pointer_rtx) && ! maybe_memory_address_p (mode, ad, &XEXP (XEXP (ad, 0), 0))) { *loc = ad = gen_rtx_PLUS (GET_MODE (ad), XEXP (XEXP (ad, 0), 0), plus_constant (XEXP (XEXP (ad, 0), 1), INTVAL (XEXP (ad, 1)))); find_reloads_address_part (XEXP (ad, 1), &XEXP (ad, 1), MODE_BASE_REG_CLASS (mode), GET_MODE (ad), opnum, type, ind_levels); find_reloads_address_1 (mode, XEXP (ad, 0), 1, &XEXP (ad, 0), opnum, type, 0, insn); return 0; } /* See if address becomes valid when an eliminable register in a sum is replaced. */ tem = ad; if (GET_CODE (ad) == PLUS) tem = subst_indexed_address (ad); if (tem != ad && strict_memory_address_p (mode, tem)) { /* Ok, we win that way. Replace any additional eliminable registers. */ subst_reg_equivs_changed = 0; tem = subst_reg_equivs (tem, insn); /* Make sure that didn't make the address invalid again. */ if (! subst_reg_equivs_changed || strict_memory_address_p (mode, tem)) { *loc = tem; return 0; } } /* If constants aren't valid addresses, reload the constant address into a register. */ if (CONSTANT_P (ad) && ! strict_memory_address_p (mode, ad)) { /* If AD is an address in the constant pool, the MEM rtx may be shared. Unshare it so we can safely alter it. */ if (memrefloc && GET_CODE (ad) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (ad)) { *memrefloc = copy_rtx (*memrefloc); loc = &XEXP (*memrefloc, 0); if (removed_and) loc = &XEXP (*loc, 0); } find_reloads_address_part (ad, loc, MODE_BASE_REG_CLASS (mode), Pmode, opnum, type, ind_levels); return ! removed_and; } return find_reloads_address_1 (mode, ad, 0, loc, opnum, type, ind_levels, insn); } /* Find all pseudo regs appearing in AD that are eliminable in favor of equivalent values and do not have hard regs; replace them by their equivalents. INSN, if nonzero, is the insn in which we do the reload. We put USEs in front of it for pseudos that we have to replace with stack slots. */ static rtx subst_reg_equivs (rtx ad, rtx insn) { RTX_CODE code = GET_CODE (ad); int i; const char *fmt; switch (code) { case HIGH: case CONST_INT: case CONST: case CONST_DOUBLE: case CONST_VECTOR: case SYMBOL_REF: case LABEL_REF: case PC: case CC0: return ad; case REG: { int regno = REGNO (ad); if (reg_equiv_constant[regno] != 0) { subst_reg_equivs_changed = 1; return reg_equiv_constant[regno]; } if (reg_equiv_memory_loc[regno] && num_not_at_initial_offset) { rtx mem = make_memloc (ad, regno); if (! rtx_equal_p (mem, reg_equiv_mem[regno])) { subst_reg_equivs_changed = 1; /* We mark the USE with QImode so that we recognize it as one that can be safely deleted at the end of reload. */ PUT_MODE (emit_insn_before (gen_rtx_USE (VOIDmode, ad), insn), QImode); return mem; } } } return ad; case PLUS: /* Quickly dispose of a common case. */ if (XEXP (ad, 0) == frame_pointer_rtx && GET_CODE (XEXP (ad, 1)) == CONST_INT) return ad; break; default: break; } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) if (fmt[i] == 'e') XEXP (ad, i) = subst_reg_equivs (XEXP (ad, i), insn); return ad; } /* Compute the sum of X and Y, making canonicalizations assumed in an address, namely: sum constant integers, surround the sum of two constants with a CONST, put the constant as the second operand, and group the constant on the outermost sum. This routine assumes both inputs are already in canonical form. */ rtx form_sum (rtx x, rtx y) { rtx tem; enum machine_mode mode = GET_MODE (x); if (mode == VOIDmode) mode = GET_MODE (y); if (mode == VOIDmode) mode = Pmode; if (GET_CODE (x) == CONST_INT) return plus_constant (y, INTVAL (x)); else if (GET_CODE (y) == CONST_INT) return plus_constant (x, INTVAL (y)); else if (CONSTANT_P (x)) tem = x, x = y, y = tem; if (GET_CODE (x) == PLUS && CONSTANT_P (XEXP (x, 1))) return form_sum (XEXP (x, 0), form_sum (XEXP (x, 1), y)); /* Note that if the operands of Y are specified in the opposite order in the recursive calls below, infinite recursion will occur. */ if (GET_CODE (y) == PLUS && CONSTANT_P (XEXP (y, 1))) return form_sum (form_sum (x, XEXP (y, 0)), XEXP (y, 1)); /* If both constant, encapsulate sum. Otherwise, just form sum. A constant will have been placed second. */ if (CONSTANT_P (x) && CONSTANT_P (y)) { if (GET_CODE (x) == CONST) x = XEXP (x, 0); if (GET_CODE (y) == CONST) y = XEXP (y, 0); return gen_rtx_CONST (VOIDmode, gen_rtx_PLUS (mode, x, y)); } return gen_rtx_PLUS (mode, x, y); } /* If ADDR is a sum containing a pseudo register that should be replaced with a constant (from reg_equiv_constant), return the result of doing so, and also apply the associative law so that the result is more likely to be a valid address. (But it is not guaranteed to be one.) Note that at most one register is replaced, even if more are replaceable. Also, we try to put the result into a canonical form so it is more likely to be a valid address. In all other cases, return ADDR. */ static rtx subst_indexed_address (rtx addr) { rtx op0 = 0, op1 = 0, op2 = 0; rtx tem; int regno; if (GET_CODE (addr) == PLUS) { /* Try to find a register to replace. */ op0 = XEXP (addr, 0), op1 = XEXP (addr, 1), op2 = 0; if (REG_P (op0) && (regno = REGNO (op0)) >= FIRST_PSEUDO_REGISTER && reg_renumber[regno] < 0 && reg_equiv_constant[regno] != 0) op0 = reg_equiv_constant[regno]; else if (REG_P (op1) && (regno = REGNO (op1)) >= FIRST_PSEUDO_REGISTER && reg_renumber[regno] < 0 && reg_equiv_constant[regno] != 0) op1 = reg_equiv_constant[regno]; else if (GET_CODE (op0) == PLUS && (tem = subst_indexed_address (op0)) != op0) op0 = tem; else if (GET_CODE (op1) == PLUS && (tem = subst_indexed_address (op1)) != op1) op1 = tem; else return addr; /* Pick out up to three things to add. */ if (GET_CODE (op1) == PLUS) op2 = XEXP (op1, 1), op1 = XEXP (op1, 0); else if (GET_CODE (op0) == PLUS) op2 = op1, op1 = XEXP (op0, 1), op0 = XEXP (op0, 0); /* Compute the sum. */ if (op2 != 0) op1 = form_sum (op1, op2); if (op1 != 0) op0 = form_sum (op0, op1); return op0; } return addr; } /* Update the REG_INC notes for an insn. It updates all REG_INC notes for the instruction which refer to REGNO the to refer to the reload number. INSN is the insn for which any REG_INC notes need updating. REGNO is the register number which has been reloaded. RELOADNUM is the reload number. */ static void update_auto_inc_notes (rtx insn ATTRIBUTE_UNUSED, int regno ATTRIBUTE_UNUSED, int reloadnum ATTRIBUTE_UNUSED) { #ifdef AUTO_INC_DEC rtx link; for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) if (REG_NOTE_KIND (link) == REG_INC && (int) REGNO (XEXP (link, 0)) == regno) push_replacement (&XEXP (link, 0), reloadnum, VOIDmode); #endif } /* Record the pseudo registers we must reload into hard registers in a subexpression of a would-be memory address, X referring to a value in mode MODE. (This function is not called if the address we find is strictly valid.) CONTEXT = 1 means we are considering regs as index regs, = 0 means we are considering them as base regs. OPNUM and TYPE specify the purpose of any reloads made. IND_LEVELS says how many levels of indirect addressing are supported at this point in the address. INSN, if nonzero, is the insn in which we do the reload. It is used to determine if we may generate output reloads. We return nonzero if X, as a whole, is reloaded or replaced. */ /* Note that we take shortcuts assuming that no multi-reg machine mode occurs as part of an address. Also, this is not fully machine-customizable; it works for machines such as VAXen and 68000's and 32000's, but other possible machines could have addressing modes that this does not handle right. */ static int find_reloads_address_1 (enum machine_mode mode, rtx x, int context, rtx *loc, int opnum, enum reload_type type, int ind_levels, rtx insn) { RTX_CODE code = GET_CODE (x); switch (code) { case PLUS: { rtx orig_op0 = XEXP (x, 0); rtx orig_op1 = XEXP (x, 1); RTX_CODE code0 = GET_CODE (orig_op0); RTX_CODE code1 = GET_CODE (orig_op1); rtx op0 = orig_op0; rtx op1 = orig_op1; if (GET_CODE (op0) == SUBREG) { op0 = SUBREG_REG (op0); code0 = GET_CODE (op0); if (code0 == REG && REGNO (op0) < FIRST_PSEUDO_REGISTER) op0 = gen_rtx_REG (word_mode, (REGNO (op0) + subreg_regno_offset (REGNO (SUBREG_REG (orig_op0)), GET_MODE (SUBREG_REG (orig_op0)), SUBREG_BYTE (orig_op0), GET_MODE (orig_op0)))); } if (GET_CODE (op1) == SUBREG) { op1 = SUBREG_REG (op1); code1 = GET_CODE (op1); if (code1 == REG && REGNO (op1) < FIRST_PSEUDO_REGISTER) /* ??? Why is this given op1's mode and above for ??? op0 SUBREGs we use word_mode? */ op1 = gen_rtx_REG (GET_MODE (op1), (REGNO (op1) + subreg_regno_offset (REGNO (SUBREG_REG (orig_op1)), GET_MODE (SUBREG_REG (orig_op1)), SUBREG_BYTE (orig_op1), GET_MODE (orig_op1)))); } /* Plus in the index register may be created only as a result of register remateralization for expression like &localvar*4. Reload it. It may be possible to combine the displacement on the outer level, but it is probably not worthwhile to do so. */ if (context) { find_reloads_address (GET_MODE (x), loc, XEXP (x, 0), &XEXP (x, 0), opnum, ADDR_TYPE (type), ind_levels, insn); push_reload (*loc, NULL_RTX, loc, (rtx*) 0, (context ? INDEX_REG_CLASS : MODE_BASE_REG_CLASS (mode)), GET_MODE (x), VOIDmode, 0, 0, opnum, type); return 1; } if (code0 == MULT || code0 == SIGN_EXTEND || code0 == TRUNCATE || code0 == ZERO_EXTEND || code1 == MEM) { find_reloads_address_1 (mode, orig_op0, 1, &XEXP (x, 0), opnum, type, ind_levels, insn); find_reloads_address_1 (mode, orig_op1, 0, &XEXP (x, 1), opnum, type, ind_levels, insn); } else if (code1 == MULT || code1 == SIGN_EXTEND || code1 == TRUNCATE || code1 == ZERO_EXTEND || code0 == MEM) { find_reloads_address_1 (mode, orig_op0, 0, &XEXP (x, 0), opnum, type, ind_levels, insn); find_reloads_address_1 (mode, orig_op1, 1, &XEXP (x, 1), opnum, type, ind_levels, insn); } else if (code0 == CONST_INT || code0 == CONST || code0 == SYMBOL_REF || code0 == LABEL_REF) find_reloads_address_1 (mode, orig_op1, 0, &XEXP (x, 1), opnum, type, ind_levels, insn); else if (code1 == CONST_INT || code1 == CONST || code1 == SYMBOL_REF || code1 == LABEL_REF) find_reloads_address_1 (mode, orig_op0, 0, &XEXP (x, 0), opnum, type, ind_levels, insn); else if (code0 == REG && code1 == REG) { if (REG_OK_FOR_INDEX_P (op0) && REG_MODE_OK_FOR_BASE_P (op1, mode)) return 0; else if (REG_OK_FOR_INDEX_P (op1) && REG_MODE_OK_FOR_BASE_P (op0, mode)) return 0; else if (REG_MODE_OK_FOR_BASE_P (op1, mode)) find_reloads_address_1 (mode, orig_op0, 1, &XEXP (x, 0), opnum, type, ind_levels, insn); else if (REG_MODE_OK_FOR_BASE_P (op0, mode)) find_reloads_address_1 (mode, orig_op1, 1, &XEXP (x, 1), opnum, type, ind_levels, insn); else if (REG_OK_FOR_INDEX_P (op1)) find_reloads_address_1 (mode, orig_op0, 0, &XEXP (x, 0), opnum, type, ind_levels, insn); else if (REG_OK_FOR_INDEX_P (op0)) find_reloads_address_1 (mode, orig_op1, 0, &XEXP (x, 1), opnum, type, ind_levels, insn); else { find_reloads_address_1 (mode, orig_op0, 1, &XEXP (x, 0), opnum, type, ind_levels, insn); find_reloads_address_1 (mode, orig_op1, 0, &XEXP (x, 1), opnum, type, ind_levels, insn); } } else if (code0 == REG) { find_reloads_address_1 (mode, orig_op0, 1, &XEXP (x, 0), opnum, type, ind_levels, insn); find_reloads_address_1 (mode, orig_op1, 0, &XEXP (x, 1), opnum, type, ind_levels, insn); } else if (code1 == REG) { find_reloads_address_1 (mode, orig_op1, 1, &XEXP (x, 1), opnum, type, ind_levels, insn); find_reloads_address_1 (mode, orig_op0, 0, &XEXP (x, 0), opnum, type, ind_levels, insn); } } return 0; case POST_MODIFY: case PRE_MODIFY: { rtx op0 = XEXP (x, 0); rtx op1 = XEXP (x, 1); if (GET_CODE (op1) != PLUS && GET_CODE (op1) != MINUS) return 0; /* Currently, we only support {PRE,POST}_MODIFY constructs where a base register is {inc,dec}remented by the contents of another register or by a constant value. Thus, these operands must match. */ if (op0 != XEXP (op1, 0)) abort (); /* Require index register (or constant). Let's just handle the register case in the meantime... If the target allows auto-modify by a constant then we could try replacing a pseudo register with its equivalent constant where applicable. */ if (REG_P (XEXP (op1, 1))) if (!REGNO_OK_FOR_INDEX_P (REGNO (XEXP (op1, 1)))) find_reloads_address_1 (mode, XEXP (op1, 1), 1, &XEXP (op1, 1), opnum, type, ind_levels, insn); if (REG_P (XEXP (op1, 0))) { int regno = REGNO (XEXP (op1, 0)); int reloadnum; /* A register that is incremented cannot be constant! */ if (regno >= FIRST_PSEUDO_REGISTER && reg_equiv_constant[regno] != 0) abort (); /* Handle a register that is equivalent to a memory location which cannot be addressed directly. */ if (reg_equiv_memory_loc[regno] != 0 && (reg_equiv_address[regno] != 0 || num_not_at_initial_offset)) { rtx tem = make_memloc (XEXP (x, 0), regno); if (reg_equiv_address[regno] || ! rtx_equal_p (tem, reg_equiv_mem[regno])) { /* First reload the memory location's address. We can't use ADDR_TYPE (type) here, because we need to write back the value after reading it, hence we actually need two registers. */ find_reloads_address (GET_MODE (tem), &tem, XEXP (tem, 0), &XEXP (tem, 0), opnum, RELOAD_OTHER, ind_levels, insn); /* Then reload the memory location into a base register. */ reloadnum = push_reload (tem, tem, &XEXP (x, 0), &XEXP (op1, 0), MODE_BASE_REG_CLASS (mode), GET_MODE (x), GET_MODE (x), 0, 0, opnum, RELOAD_OTHER); update_auto_inc_notes (this_insn, regno, reloadnum); return 0; } } if (reg_renumber[regno] >= 0) regno = reg_renumber[regno]; /* We require a base register here... */ if (!REGNO_MODE_OK_FOR_BASE_P (regno, GET_MODE (x))) { reloadnum = push_reload (XEXP (op1, 0), XEXP (x, 0), &XEXP (op1, 0), &XEXP (x, 0), MODE_BASE_REG_CLASS (mode), GET_MODE (x), GET_MODE (x), 0, 0, opnum, RELOAD_OTHER); update_auto_inc_notes (this_insn, regno, reloadnum); return 0; } } else abort (); } return 0; case POST_INC: case POST_DEC: case PRE_INC: case PRE_DEC: if (REG_P (XEXP (x, 0))) { int regno = REGNO (XEXP (x, 0)); int value = 0; rtx x_orig = x; /* A register that is incremented cannot be constant! */ if (regno >= FIRST_PSEUDO_REGISTER && reg_equiv_constant[regno] != 0) abort (); /* Handle a register that is equivalent to a memory location which cannot be addressed directly. */ if (reg_equiv_memory_loc[regno] != 0 && (reg_equiv_address[regno] != 0 || num_not_at_initial_offset)) { rtx tem = make_memloc (XEXP (x, 0), regno); if (reg_equiv_address[regno] || ! rtx_equal_p (tem, reg_equiv_mem[regno])) { /* First reload the memory location's address. We can't use ADDR_TYPE (type) here, because we need to write back the value after reading it, hence we actually need two registers. */ find_reloads_address (GET_MODE (tem), &tem, XEXP (tem, 0), &XEXP (tem, 0), opnum, type, ind_levels, insn); /* Put this inside a new increment-expression. */ x = gen_rtx_fmt_e (GET_CODE (x), GET_MODE (x), tem); /* Proceed to reload that, as if it contained a register. */ } } /* If we have a hard register that is ok as an index, don't make a reload. If an autoincrement of a nice register isn't "valid", it must be that no autoincrement is "valid". If that is true and something made an autoincrement anyway, this must be a special context where one is allowed. (For example, a "push" instruction.) We can't improve this address, so leave it alone. */ /* Otherwise, reload the autoincrement into a suitable hard reg and record how much to increment by. */ if (reg_renumber[regno] >= 0) regno = reg_renumber[regno]; if ((regno >= FIRST_PSEUDO_REGISTER || !(context ? REGNO_OK_FOR_INDEX_P (regno) : REGNO_MODE_OK_FOR_BASE_P (regno, mode)))) { int reloadnum; /* If we can output the register afterwards, do so, this saves the extra update. We can do so if we have an INSN - i.e. no JUMP_INSN nor CALL_INSN - and it does not set CC0. But don't do this if we cannot directly address the memory location, since this will make it harder to reuse address reloads, and increases register pressure. Also don't do this if we can probably update x directly. */ rtx equiv = (MEM_P (XEXP (x, 0)) ? XEXP (x, 0) : reg_equiv_mem[regno]); int icode = (int) add_optab->handlers[(int) Pmode].insn_code; if (insn && GET_CODE (insn) == INSN && equiv && memory_operand (equiv, GET_MODE (equiv)) #ifdef HAVE_cc0 && ! sets_cc0_p (PATTERN (insn)) #endif && ! (icode != CODE_FOR_nothing && ((*insn_data[icode].operand[0].predicate) (equiv, Pmode)) && ((*insn_data[icode].operand[1].predicate) (equiv, Pmode)))) { /* We use the original pseudo for loc, so that emit_reload_insns() knows which pseudo this reload refers to and updates the pseudo rtx, not its equivalent memory location, as well as the corresponding entry in reg_last_reload_reg. */ loc = &XEXP (x_orig, 0); x = XEXP (x, 0); reloadnum = push_reload (x, x, loc, loc, (context ? INDEX_REG_CLASS : MODE_BASE_REG_CLASS (mode)), GET_MODE (x), GET_MODE (x), 0, 0, opnum, RELOAD_OTHER); } else { reloadnum = push_reload (x, NULL_RTX, loc, (rtx*) 0, (context ? INDEX_REG_CLASS : MODE_BASE_REG_CLASS (mode)), GET_MODE (x), GET_MODE (x), 0, 0, opnum, type); rld[reloadnum].inc = find_inc_amount (PATTERN (this_insn), XEXP (x_orig, 0)); value = 1; } update_auto_inc_notes (this_insn, REGNO (XEXP (x_orig, 0)), reloadnum); } return value; } else if (MEM_P (XEXP (x, 0))) { /* This is probably the result of a substitution, by eliminate_regs, of an equivalent address for a pseudo that was not allocated to a hard register. Verify that the specified address is valid and reload it into a register. */ /* Variable `tem' might or might not be used in FIND_REG_INC_NOTE. */ rtx tem ATTRIBUTE_UNUSED = XEXP (x, 0); rtx link; int reloadnum; /* Since we know we are going to reload this item, don't decrement for the indirection level. Note that this is actually conservative: it would be slightly more efficient to use the value of SPILL_INDIRECT_LEVELS from reload1.c here. */ /* We can't use ADDR_TYPE (type) here, because we need to write back the value after reading it, hence we actually need two registers. */ find_reloads_address (GET_MODE (x), &XEXP (x, 0), XEXP (XEXP (x, 0), 0), &XEXP (XEXP (x, 0), 0), opnum, type, ind_levels, insn); reloadnum = push_reload (x, NULL_RTX, loc, (rtx*) 0, (context ? INDEX_REG_CLASS : MODE_BASE_REG_CLASS (mode)), GET_MODE (x), VOIDmode, 0, 0, opnum, type); rld[reloadnum].inc = find_inc_amount (PATTERN (this_insn), XEXP (x, 0)); link = FIND_REG_INC_NOTE (this_insn, tem); if (link != 0) push_replacement (&XEXP (link, 0), reloadnum, VOIDmode); return 1; } return 0; case MEM: /* This is probably the result of a substitution, by eliminate_regs, of an equivalent address for a pseudo that was not allocated to a hard register. Verify that the specified address is valid and reload it into a register. Since we know we are going to reload this item, don't decrement for the indirection level. Note that this is actually conservative: it would be slightly more efficient to use the value of SPILL_INDIRECT_LEVELS from reload1.c here. */ find_reloads_address (GET_MODE (x), loc, XEXP (x, 0), &XEXP (x, 0), opnum, ADDR_TYPE (type), ind_levels, insn); push_reload (*loc, NULL_RTX, loc, (rtx*) 0, (context ? INDEX_REG_CLASS : MODE_BASE_REG_CLASS (mode)), GET_MODE (x), VOIDmode, 0, 0, opnum, type); return 1; case REG: { int regno = REGNO (x); if (reg_equiv_constant[regno] != 0) { find_reloads_address_part (reg_equiv_constant[regno], loc, (context ? INDEX_REG_CLASS : MODE_BASE_REG_CLASS (mode)), GET_MODE (x), opnum, type, ind_levels); return 1; } #if 0 /* This might screw code in reload1.c to delete prior output-reload that feeds this insn. */ if (reg_equiv_mem[regno] != 0) { push_reload (reg_equiv_mem[regno], NULL_RTX, loc, (rtx*) 0, (context ? INDEX_REG_CLASS : MODE_BASE_REG_CLASS (mode)), GET_MODE (x), VOIDmode, 0, 0, opnum, type); return 1; } #endif if (reg_equiv_memory_loc[regno] && (reg_equiv_address[regno] != 0 || num_not_at_initial_offset)) { rtx tem = make_memloc (x, regno); if (reg_equiv_address[regno] != 0 || ! rtx_equal_p (tem, reg_equiv_mem[regno])) { x = tem; find_reloads_address (GET_MODE (x), &x, XEXP (x, 0), &XEXP (x, 0), opnum, ADDR_TYPE (type), ind_levels, insn); } } if (reg_renumber[regno] >= 0) regno = reg_renumber[regno]; if ((regno >= FIRST_PSEUDO_REGISTER || !(context ? REGNO_OK_FOR_INDEX_P (regno) : REGNO_MODE_OK_FOR_BASE_P (regno, mode)))) { push_reload (x, NULL_RTX, loc, (rtx*) 0, (context ? INDEX_REG_CLASS : MODE_BASE_REG_CLASS (mode)), GET_MODE (x), VOIDmode, 0, 0, opnum, type); return 1; } /* If a register appearing in an address is the subject of a CLOBBER in this insn, reload it into some other register to be safe. The CLOBBER is supposed to make the register unavailable from before this insn to after it. */ if (regno_clobbered_p (regno, this_insn, GET_MODE (x), 0)) { push_reload (x, NULL_RTX, loc, (rtx*) 0, (context ? INDEX_REG_CLASS : MODE_BASE_REG_CLASS (mode)), GET_MODE (x), VOIDmode, 0, 0, opnum, type); return 1; } } return 0; case SUBREG: if (REG_P (SUBREG_REG (x))) { /* If this is a SUBREG of a hard register and the resulting register is of the wrong class, reload the whole SUBREG. This avoids needless copies if SUBREG_REG is multi-word. */ if (REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER) { int regno ATTRIBUTE_UNUSED = subreg_regno (x); if (! (context ? REGNO_OK_FOR_INDEX_P (regno) : REGNO_MODE_OK_FOR_BASE_P (regno, mode))) { push_reload (x, NULL_RTX, loc, (rtx*) 0, (context ? INDEX_REG_CLASS : MODE_BASE_REG_CLASS (mode)), GET_MODE (x), VOIDmode, 0, 0, opnum, type); return 1; } } /* If this is a SUBREG of a pseudo-register, and the pseudo-register is larger than the class size, then reload the whole SUBREG. */ else { enum reg_class class = (context ? INDEX_REG_CLASS : MODE_BASE_REG_CLASS (mode)); if ((unsigned) CLASS_MAX_NREGS (class, GET_MODE (SUBREG_REG (x))) > reg_class_size[class]) { x = find_reloads_subreg_address (x, 0, opnum, type, ind_levels, insn); push_reload (x, NULL_RTX, loc, (rtx*) 0, class, GET_MODE (x), VOIDmode, 0, 0, opnum, type); return 1; } } } break; default: break; } { const char *fmt = GET_RTX_FORMAT (code); int i; for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') find_reloads_address_1 (mode, XEXP (x, i), context, &XEXP (x, i), opnum, type, ind_levels, insn); } } return 0; } /* X, which is found at *LOC, is a part of an address that needs to be reloaded into a register of class CLASS. If X is a constant, or if X is a PLUS that contains a constant, check that the constant is a legitimate operand and that we are supposed to be able to load it into the register. If not, force the constant into memory and reload the MEM instead. MODE is the mode to use, in case X is an integer constant. OPNUM and TYPE describe the purpose of any reloads made. IND_LEVELS says how many levels of indirect addressing this machine supports. */ static void find_reloads_address_part (rtx x, rtx *loc, enum reg_class class, enum machine_mode mode, int opnum, enum reload_type type, int ind_levels) { if (CONSTANT_P (x) && (! LEGITIMATE_CONSTANT_P (x) || PREFERRED_RELOAD_CLASS (x, class) == NO_REGS)) { rtx tem; tem = x = force_const_mem (mode, x); find_reloads_address (mode, &tem, XEXP (tem, 0), &XEXP (tem, 0), opnum, type, ind_levels, 0); } else if (GET_CODE (x) == PLUS && CONSTANT_P (XEXP (x, 1)) && (! LEGITIMATE_CONSTANT_P (XEXP (x, 1)) || PREFERRED_RELOAD_CLASS (XEXP (x, 1), class) == NO_REGS)) { rtx tem; tem = force_const_mem (GET_MODE (x), XEXP (x, 1)); x = gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0), tem); find_reloads_address (mode, &tem, XEXP (tem, 0), &XEXP (tem, 0), opnum, type, ind_levels, 0); } push_reload (x, NULL_RTX, loc, (rtx*) 0, class, mode, VOIDmode, 0, 0, opnum, type); } /* X, a subreg of a pseudo, is a part of an address that needs to be reloaded. If the pseudo is equivalent to a memory location that cannot be directly addressed, make the necessary address reloads. If address reloads have been necessary, or if the address is changed by register elimination, return the rtx of the memory location; otherwise, return X. If FORCE_REPLACE is nonzero, unconditionally replace the subreg with the memory location. OPNUM and TYPE identify the purpose of the reload. IND_LEVELS says how many levels of indirect addressing are supported at this point in the address. INSN, if nonzero, is the insn in which we do the reload. It is used to determine where to put USEs for pseudos that we have to replace with stack slots. */ static rtx find_reloads_subreg_address (rtx x, int force_replace, int opnum, enum reload_type type, int ind_levels, rtx insn) { int regno = REGNO (SUBREG_REG (x)); if (reg_equiv_memory_loc[regno]) { /* If the address is not directly addressable, or if the address is not offsettable, then it must be replaced. */ if (! force_replace && (reg_equiv_address[regno] || ! offsettable_memref_p (reg_equiv_mem[regno]))) force_replace = 1; if (force_replace || num_not_at_initial_offset) { rtx tem = make_memloc (SUBREG_REG (x), regno); /* If the address changes because of register elimination, then it must be replaced. */ if (force_replace || ! rtx_equal_p (tem, reg_equiv_mem[regno])) { unsigned outer_size = GET_MODE_SIZE (GET_MODE (x)); unsigned inner_size = GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))); int offset; /* For big-endian paradoxical subregs, SUBREG_BYTE does not hold the correct (negative) byte offset. */ if (BYTES_BIG_ENDIAN && outer_size > inner_size) offset = inner_size - outer_size; else offset = SUBREG_BYTE (x); XEXP (tem, 0) = plus_constant (XEXP (tem, 0), offset); PUT_MODE (tem, GET_MODE (x)); /* If this was a paradoxical subreg that we replaced, the resulting memory must be sufficiently aligned to allow us to widen the mode of the memory. */ if (outer_size > inner_size && STRICT_ALIGNMENT) { rtx base; base = XEXP (tem, 0); if (GET_CODE (base) == PLUS) { if (GET_CODE (XEXP (base, 1)) == CONST_INT && INTVAL (XEXP (base, 1)) % outer_size != 0) return x; base = XEXP (base, 0); } if (!REG_P (base) || (REGNO_POINTER_ALIGN (REGNO (base)) < outer_size * BITS_PER_UNIT)) return x; } find_reloads_address (GET_MODE (tem), &tem, XEXP (tem, 0), &XEXP (tem, 0), opnum, ADDR_TYPE (type), ind_levels, insn); /* If this is not a toplevel operand, find_reloads doesn't see this substitution. We have to emit a USE of the pseudo so that delete_output_reload can see it. */ if (replace_reloads && recog_data.operand[opnum] != x) /* We mark the USE with QImode so that we recognize it as one that can be safely deleted at the end of reload. */ PUT_MODE (emit_insn_before (gen_rtx_USE (VOIDmode, SUBREG_REG (x)), insn), QImode); x = tem; } } } return x; } /* Substitute into the current INSN the registers into which we have reloaded the things that need reloading. The array `replacements' contains the locations of all pointers that must be changed and says what to replace them with. Return the rtx that X translates into; usually X, but modified. */ void subst_reloads (rtx insn) { int i; for (i = 0; i < n_replacements; i++) { struct replacement *r = &replacements[i]; rtx reloadreg = rld[r->what].reg_rtx; if (reloadreg) { #ifdef ENABLE_CHECKING /* Internal consistency test. Check that we don't modify anything in the equivalence arrays. Whenever something from those arrays needs to be reloaded, it must be unshared before being substituted into; the equivalence must not be modified. Otherwise, if the equivalence is used after that, it will have been modified, and the thing substituted (probably a register) is likely overwritten and not a usable equivalence. */ int check_regno; for (check_regno = 0; check_regno < max_regno; check_regno++) { #define CHECK_MODF(ARRAY) \ if (ARRAY[check_regno] \ && loc_mentioned_in_p (r->where, \ ARRAY[check_regno])) \ abort () CHECK_MODF (reg_equiv_constant); CHECK_MODF (reg_equiv_memory_loc); CHECK_MODF (reg_equiv_address); CHECK_MODF (reg_equiv_mem); #undef CHECK_MODF } #endif /* ENABLE_CHECKING */ /* If we're replacing a LABEL_REF with a register, add a REG_LABEL note to indicate to flow which label this register refers to. */ if (GET_CODE (*r->where) == LABEL_REF && GET_CODE (insn) == JUMP_INSN) REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (*r->where, 0), REG_NOTES (insn)); /* Encapsulate RELOADREG so its machine mode matches what used to be there. Note that gen_lowpart_common will do the wrong thing if RELOADREG is multi-word. RELOADREG will always be a REG here. */ if (GET_MODE (reloadreg) != r->mode && r->mode != VOIDmode) reloadreg = reload_adjust_reg_for_mode (reloadreg, r->mode); /* If we are putting this into a SUBREG and RELOADREG is a SUBREG, we would be making nested SUBREGs, so we have to fix this up. Note that r->where == &SUBREG_REG (*r->subreg_loc). */ if (r->subreg_loc != 0 && GET_CODE (reloadreg) == SUBREG) { if (GET_MODE (*r->subreg_loc) == GET_MODE (SUBREG_REG (reloadreg))) *r->subreg_loc = SUBREG_REG (reloadreg); else { int final_offset = SUBREG_BYTE (*r->subreg_loc) + SUBREG_BYTE (reloadreg); /* When working with SUBREGs the rule is that the byte offset must be a multiple of the SUBREG's mode. */ final_offset = (final_offset / GET_MODE_SIZE (GET_MODE (*r->subreg_loc))); final_offset = (final_offset * GET_MODE_SIZE (GET_MODE (*r->subreg_loc))); *r->where = SUBREG_REG (reloadreg); SUBREG_BYTE (*r->subreg_loc) = final_offset; } } else *r->where = reloadreg; } /* If reload got no reg and isn't optional, something's wrong. */ else if (! rld[r->what].optional) abort (); } } /* Make a copy of any replacements being done into X and move those copies to locations in Y, a copy of X. */ void copy_replacements (rtx x, rtx y) { /* We can't support X being a SUBREG because we might then need to know its location if something inside it was replaced. */ if (GET_CODE (x) == SUBREG) abort (); copy_replacements_1 (&x, &y, n_replacements); } static void copy_replacements_1 (rtx *px, rtx *py, int orig_replacements) { int i, j; rtx x, y; struct replacement *r; enum rtx_code code; const char *fmt; for (j = 0; j < orig_replacements; j++) { if (replacements[j].subreg_loc == px) { r = &replacements[n_replacements++]; r->where = replacements[j].where; r->subreg_loc = py; r->what = replacements[j].what; r->mode = replacements[j].mode; } else if (replacements[j].where == px) { r = &replacements[n_replacements++]; r->where = py; r->subreg_loc = 0; r->what = replacements[j].what; r->mode = replacements[j].mode; } } x = *px; y = *py; code = GET_CODE (x); fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') copy_replacements_1 (&XEXP (x, i), &XEXP (y, i), orig_replacements); else if (fmt[i] == 'E') for (j = XVECLEN (x, i); --j >= 0; ) copy_replacements_1 (&XVECEXP (x, i, j), &XVECEXP (y, i, j), orig_replacements); } } /* Change any replacements being done to *X to be done to *Y. */ void move_replacements (rtx *x, rtx *y) { int i; for (i = 0; i < n_replacements; i++) if (replacements[i].subreg_loc == x) replacements[i].subreg_loc = y; else if (replacements[i].where == x) { replacements[i].where = y; replacements[i].subreg_loc = 0; } } /* If LOC was scheduled to be replaced by something, return the replacement. Otherwise, return *LOC. */ rtx find_replacement (rtx *loc) { struct replacement *r; for (r = &replacements[0]; r < &replacements[n_replacements]; r++) { rtx reloadreg = rld[r->what].reg_rtx; if (reloadreg && r->where == loc) { if (r->mode != VOIDmode && GET_MODE (reloadreg) != r->mode) reloadreg = gen_rtx_REG (r->mode, REGNO (reloadreg)); return reloadreg; } else if (reloadreg && r->subreg_loc == loc) { /* RELOADREG must be either a REG or a SUBREG. ??? Is it actually still ever a SUBREG? If so, why? */ if (REG_P (reloadreg)) return gen_rtx_REG (GET_MODE (*loc), (REGNO (reloadreg) + subreg_regno_offset (REGNO (SUBREG_REG (*loc)), GET_MODE (SUBREG_REG (*loc)), SUBREG_BYTE (*loc), GET_MODE (*loc)))); else if (GET_MODE (reloadreg) == GET_MODE (*loc)) return reloadreg; else { int final_offset = SUBREG_BYTE (reloadreg) + SUBREG_BYTE (*loc); /* When working with SUBREGs the rule is that the byte offset must be a multiple of the SUBREG's mode. */ final_offset = (final_offset / GET_MODE_SIZE (GET_MODE (*loc))); final_offset = (final_offset * GET_MODE_SIZE (GET_MODE (*loc))); return gen_rtx_SUBREG (GET_MODE (*loc), SUBREG_REG (reloadreg), final_offset); } } } /* If *LOC is a PLUS, MINUS, or MULT, see if a replacement is scheduled for what's inside and make a new rtl if so. */ if (GET_CODE (*loc) == PLUS || GET_CODE (*loc) == MINUS || GET_CODE (*loc) == MULT) { rtx x = find_replacement (&XEXP (*loc, 0)); rtx y = find_replacement (&XEXP (*loc, 1)); if (x != XEXP (*loc, 0) || y != XEXP (*loc, 1)) return gen_rtx_fmt_ee (GET_CODE (*loc), GET_MODE (*loc), x, y); } return *loc; } /* Return nonzero if register in range [REGNO, ENDREGNO) appears either explicitly or implicitly in X other than being stored into (except for earlyclobber operands). References contained within the substructure at LOC do not count. LOC may be zero, meaning don't ignore anything. This is similar to refers_to_regno_p in rtlanal.c except that we look at equivalences for pseudos that didn't get hard registers. */ int refers_to_regno_for_reload_p (unsigned int regno, unsigned int endregno, rtx x, rtx *loc) { int i; unsigned int r; RTX_CODE code; const char *fmt; if (x == 0) return 0; repeat: code = GET_CODE (x); switch (code) { case REG: r = REGNO (x); /* If this is a pseudo, a hard register must not have been allocated. X must therefore either be a constant or be in memory. */ if (r >= FIRST_PSEUDO_REGISTER) { if (reg_equiv_memory_loc[r]) return refers_to_regno_for_reload_p (regno, endregno, reg_equiv_memory_loc[r], (rtx*) 0); if (reg_equiv_constant[r]) return 0; abort (); } return (endregno > r && regno < r + (r < FIRST_PSEUDO_REGISTER ? hard_regno_nregs[r][GET_MODE (x)] : 1)); case SUBREG: /* If this is a SUBREG of a hard reg, we can see exactly which registers are being modified. Otherwise, handle normally. */ if (REG_P (SUBREG_REG (x)) && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER) { unsigned int inner_regno = subreg_regno (x); unsigned int inner_endregno = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER ? hard_regno_nregs[inner_regno][GET_MODE (x)] : 1); return endregno > inner_regno && regno < inner_endregno; } break; case CLOBBER: case SET: if (&SET_DEST (x) != loc /* Note setting a SUBREG counts as referring to the REG it is in for a pseudo but not for hard registers since we can treat each word individually. */ && ((GET_CODE (SET_DEST (x)) == SUBREG && loc != &SUBREG_REG (SET_DEST (x)) && REG_P (SUBREG_REG (SET_DEST (x))) && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER && refers_to_regno_for_reload_p (regno, endregno, SUBREG_REG (SET_DEST (x)), loc)) /* If the output is an earlyclobber operand, this is a conflict. */ || ((!REG_P (SET_DEST (x)) || earlyclobber_operand_p (SET_DEST (x))) && refers_to_regno_for_reload_p (regno, endregno, SET_DEST (x), loc)))) return 1; if (code == CLOBBER || loc == &SET_SRC (x)) return 0; x = SET_SRC (x); goto repeat; default: break; } /* X does not match, so try its subexpressions. */ fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e' && loc != &XEXP (x, i)) { if (i == 0) { x = XEXP (x, 0); goto repeat; } else if (refers_to_regno_for_reload_p (regno, endregno, XEXP (x, i), loc)) return 1; } else if (fmt[i] == 'E') { int j; for (j = XVECLEN (x, i) - 1; j >= 0; j--) if (loc != &XVECEXP (x, i, j) && refers_to_regno_for_reload_p (regno, endregno, XVECEXP (x, i, j), loc)) return 1; } } return 0; } /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG, we check if any register number in X conflicts with the relevant register numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN contains a MEM (we don't bother checking for memory addresses that can't conflict because we expect this to be a rare case. This function is similar to reg_overlap_mentioned_p in rtlanal.c except that we look at equivalences for pseudos that didn't get hard registers. */ int reg_overlap_mentioned_for_reload_p (rtx x, rtx in) { int regno, endregno; /* Overly conservative. */ if (GET_CODE (x) == STRICT_LOW_PART || GET_RTX_CLASS (GET_CODE (x)) == RTX_AUTOINC) x = XEXP (x, 0); /* If either argument is a constant, then modifying X can not affect IN. */ if (CONSTANT_P (x) || CONSTANT_P (in)) return 0; else if (GET_CODE (x) == SUBREG) { regno = REGNO (SUBREG_REG (x)); if (regno < FIRST_PSEUDO_REGISTER) regno += subreg_regno_offset (REGNO (SUBREG_REG (x)), GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x), GET_MODE (x)); } else if (REG_P (x)) { regno = REGNO (x); /* If this is a pseudo, it must not have been assigned a hard register. Therefore, it must either be in memory or be a constant. */ if (regno >= FIRST_PSEUDO_REGISTER) { if (reg_equiv_memory_loc[regno]) return refers_to_mem_for_reload_p (in); else if (reg_equiv_constant[regno]) return 0; abort (); } } else if (MEM_P (x)) return refers_to_mem_for_reload_p (in); else if (GET_CODE (x) == SCRATCH || GET_CODE (x) == PC || GET_CODE (x) == CC0) return reg_mentioned_p (x, in); else if (GET_CODE (x) == PLUS) { /* We actually want to know if X is mentioned somewhere inside IN. We must not say that (plus (sp) (const_int 124)) is in (plus (sp) (const_int 64)), since that can lead to incorrect reload allocation when spuriously changing a RELOAD_FOR_OUTPUT_ADDRESS into a RELOAD_OTHER on behalf of another RELOAD_OTHER. */ while (MEM_P (in)) in = XEXP (in, 0); if (REG_P (in)) return 0; else if (GET_CODE (in) == PLUS) return (reg_overlap_mentioned_for_reload_p (x, XEXP (in, 0)) || reg_overlap_mentioned_for_reload_p (x, XEXP (in, 1))); else return (reg_overlap_mentioned_for_reload_p (XEXP (x, 0), in) || reg_overlap_mentioned_for_reload_p (XEXP (x, 1), in)); } else abort (); endregno = regno + (regno < FIRST_PSEUDO_REGISTER ? hard_regno_nregs[regno][GET_MODE (x)] : 1); return refers_to_regno_for_reload_p (regno, endregno, in, (rtx*) 0); } /* Return nonzero if anything in X contains a MEM. Look also for pseudo registers. */ int refers_to_mem_for_reload_p (rtx x) { const char *fmt; int i; if (MEM_P (x)) return 1; if (REG_P (x)) return (REGNO (x) >= FIRST_PSEUDO_REGISTER && reg_equiv_memory_loc[REGNO (x)]); fmt = GET_RTX_FORMAT (GET_CODE (x)); for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--) if (fmt[i] == 'e' && (MEM_P (XEXP (x, i)) || refers_to_mem_for_reload_p (XEXP (x, i)))) return 1; return 0; } /* Check the insns before INSN to see if there is a suitable register containing the same value as GOAL. If OTHER is -1, look for a register in class CLASS. Otherwise, just see if register number OTHER shares GOAL's value. Return an rtx for the register found, or zero if none is found. If RELOAD_REG_P is (short *)1, we reject any hard reg that appears in reload_reg_rtx because such a hard reg is also needed coming into this insn. If RELOAD_REG_P is any other nonzero value, it is a vector indexed by hard reg number and we reject any hard reg whose element in the vector is nonnegative as well as any that appears in reload_reg_rtx. If GOAL is zero, then GOALREG is a register number; we look for an equivalent for that register. MODE is the machine mode of the value we want an equivalence for. If GOAL is nonzero and not VOIDmode, then it must have mode MODE. This function is used by jump.c as well as in the reload pass. If GOAL is the sum of the stack pointer and a constant, we treat it as if it were a constant except that sp is required to be unchanging. */ rtx find_equiv_reg (rtx goal, rtx insn, enum reg_class class, int other, short *reload_reg_p, int goalreg, enum machine_mode mode) { rtx p = insn; rtx goaltry, valtry, value, where; rtx pat; int regno = -1; int valueno; int goal_mem = 0; int goal_const = 0; int goal_mem_addr_varies = 0; int need_stable_sp = 0; int nregs; int valuenregs; int num = 0; if (goal == 0) regno = goalreg; else if (REG_P (goal)) regno = REGNO (goal); else if (MEM_P (goal)) { enum rtx_code code = GET_CODE (XEXP (goal, 0)); if (MEM_VOLATILE_P (goal)) return 0; if (flag_float_store && GET_MODE_CLASS (GET_MODE (goal)) == MODE_FLOAT) return 0; /* An address with side effects must be reexecuted. */ switch (code) { case POST_INC: case PRE_INC: case POST_DEC: case PRE_DEC: case POST_MODIFY: case PRE_MODIFY: return 0; default: break; } goal_mem = 1; } else if (CONSTANT_P (goal)) goal_const = 1; else if (GET_CODE (goal) == PLUS && XEXP (goal, 0) == stack_pointer_rtx && CONSTANT_P (XEXP (goal, 1))) goal_const = need_stable_sp = 1; else if (GET_CODE (goal) == PLUS && XEXP (goal, 0) == frame_pointer_rtx && CONSTANT_P (XEXP (goal, 1))) goal_const = 1; else return 0; num = 0; /* Scan insns back from INSN, looking for one that copies a value into or out of GOAL. Stop and give up if we reach a label. */ while (1) { p = PREV_INSN (p); num++; if (p == 0 || GET_CODE (p) == CODE_LABEL || num > PARAM_VALUE (PARAM_MAX_RELOAD_SEARCH_INSNS)) return 0; if (GET_CODE (p) == INSN /* If we don't want spill regs ... */ && (! (reload_reg_p != 0 && reload_reg_p != (short *) (HOST_WIDE_INT) 1) /* ... then ignore insns introduced by reload; they aren't useful and can cause results in reload_as_needed to be different from what they were when calculating the need for spills. If we notice an input-reload insn here, we will reject it below, but it might hide a usable equivalent. That makes bad code. It may even abort: perhaps no reg was spilled for this insn because it was assumed we would find that equivalent. */ || INSN_UID (p) < reload_first_uid)) { rtx tem; pat = single_set (p); /* First check for something that sets some reg equal to GOAL. */ if (pat != 0 && ((regno >= 0 && true_regnum (SET_SRC (pat)) == regno && (valueno = true_regnum (valtry = SET_DEST (pat))) >= 0) || (regno >= 0 && true_regnum (SET_DEST (pat)) == regno && (valueno = true_regnum (valtry = SET_SRC (pat))) >= 0) || (goal_const && rtx_equal_p (SET_SRC (pat), goal) /* When looking for stack pointer + const, make sure we don't use a stack adjust. */ && !reg_overlap_mentioned_for_reload_p (SET_DEST (pat), goal) && (valueno = true_regnum (valtry = SET_DEST (pat))) >= 0) || (goal_mem && (valueno = true_regnum (valtry = SET_DEST (pat))) >= 0 && rtx_renumbered_equal_p (goal, SET_SRC (pat))) || (goal_mem && (valueno = true_regnum (valtry = SET_SRC (pat))) >= 0 && rtx_renumbered_equal_p (goal, SET_DEST (pat))) /* If we are looking for a constant, and something equivalent to that constant was copied into a reg, we can use that reg. */ || (goal_const && REG_NOTES (p) != 0 && (tem = find_reg_note (p, REG_EQUIV, NULL_RTX)) && ((rtx_equal_p (XEXP (tem, 0), goal) && (valueno = true_regnum (valtry = SET_DEST (pat))) >= 0) || (REG_P (SET_DEST (pat)) && GET_CODE (XEXP (tem, 0)) == CONST_DOUBLE && (GET_MODE_CLASS (GET_MODE (XEXP (tem, 0))) == MODE_FLOAT) && GET_CODE (goal) == CONST_INT && 0 != (goaltry = operand_subword (XEXP (tem, 0), 0, 0, VOIDmode)) && rtx_equal_p (goal, goaltry) && (valtry = operand_subword (SET_DEST (pat), 0, 0, VOIDmode)) && (valueno = true_regnum (valtry)) >= 0))) || (goal_const && (tem = find_reg_note (p, REG_EQUIV, NULL_RTX)) && REG_P (SET_DEST (pat)) && GET_CODE (XEXP (tem, 0)) == CONST_DOUBLE && (GET_MODE_CLASS (GET_MODE (XEXP (tem, 0))) == MODE_FLOAT) && GET_CODE (goal) == CONST_INT && 0 != (goaltry = operand_subword (XEXP (tem, 0), 1, 0, VOIDmode)) && rtx_equal_p (goal, goaltry) && (valtry = operand_subword (SET_DEST (pat), 1, 0, VOIDmode)) && (valueno = true_regnum (valtry)) >= 0))) { if (other >= 0) { if (valueno != other) continue; } else if ((unsigned) valueno >= FIRST_PSEUDO_REGISTER) continue; else { int i; for (i = hard_regno_nregs[valueno][mode] - 1; i >= 0; i--) if (! TEST_HARD_REG_BIT (reg_class_contents[(int) class], valueno + i)) break; if (i >= 0) continue; } value = valtry; where = p; break; } } } /* We found a previous insn copying GOAL into a suitable other reg VALUE (or copying VALUE into GOAL, if GOAL is also a register). Now verify that VALUE is really valid. */ /* VALUENO is the register number of VALUE; a hard register. */ /* Don't try to re-use something that is killed in this insn. We want to be able to trust REG_UNUSED notes. */ if (REG_NOTES (where) != 0 && find_reg_note (where, REG_UNUSED, value)) return 0; /* If we propose to get the value from the stack pointer or if GOAL is a MEM based on the stack pointer, we need a stable SP. */ if (valueno == STACK_POINTER_REGNUM || regno == STACK_POINTER_REGNUM || (goal_mem && reg_overlap_mentioned_for_reload_p (stack_pointer_rtx, goal))) need_stable_sp = 1; /* Reject VALUE if the copy-insn moved the wrong sort of datum. */ if (GET_MODE (value) != mode) return 0; /* Reject VALUE if it was loaded from GOAL and is also a register that appears in the address of GOAL. */ if (goal_mem && value == SET_DEST (single_set (where)) && refers_to_regno_for_reload_p (valueno, (valueno + hard_regno_nregs[valueno][mode]), goal, (rtx*) 0)) return 0; /* Reject registers that overlap GOAL. */ if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER) nregs = hard_regno_nregs[regno][mode]; else nregs = 1; valuenregs = hard_regno_nregs[valueno][mode]; if (!goal_mem && !goal_const && regno + nregs > valueno && regno < valueno + valuenregs) return 0; /* Reject VALUE if it is one of the regs reserved for reloads. Reload1 knows how to reuse them anyway, and it would get confused if we allocated one without its knowledge. (Now that insns introduced by reload are ignored above, this case shouldn't happen, but I'm not positive.) */ if (reload_reg_p != 0 && reload_reg_p != (short *) (HOST_WIDE_INT) 1) { int i; for (i = 0; i < valuenregs; ++i) if (reload_reg_p[valueno + i] >= 0) return 0; } /* Reject VALUE if it is a register being used for an input reload even if it is not one of those reserved. */ if (reload_reg_p != 0) { int i; for (i = 0; i < n_reloads; i++) if (rld[i].reg_rtx != 0 && rld[i].in) { int regno1 = REGNO (rld[i].reg_rtx); int nregs1 = hard_regno_nregs[regno1] [GET_MODE (rld[i].reg_rtx)]; if (regno1 < valueno + valuenregs && regno1 + nregs1 > valueno) return 0; } } if (goal_mem) /* We must treat frame pointer as varying here, since it can vary--in a nonlocal goto as generated by expand_goto. */ goal_mem_addr_varies = !CONSTANT_ADDRESS_P (XEXP (goal, 0)); /* Now verify that the values of GOAL and VALUE remain unaltered until INSN is reached. */ p = insn; while (1) { p = PREV_INSN (p); if (p == where) return value; /* Don't trust the conversion past a function call if either of the two is in a call-clobbered register, or memory. */ if (GET_CODE (p) == CALL_INSN) { int i; if (goal_mem || need_stable_sp) return 0; if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER) for (i = 0; i < nregs; ++i) if (call_used_regs[regno + i]) return 0; if (valueno >= 0 && valueno < FIRST_PSEUDO_REGISTER) for (i = 0; i < valuenregs; ++i) if (call_used_regs[valueno + i]) return 0; #ifdef NON_SAVING_SETJMP if (NON_SAVING_SETJMP && find_reg_note (p, REG_SETJMP, NULL)) return 0; #endif } if (INSN_P (p)) { pat = PATTERN (p); /* Watch out for unspec_volatile, and volatile asms. */ if (volatile_insn_p (pat)) return 0; /* If this insn P stores in either GOAL or VALUE, return 0. If GOAL is a memory ref and this insn writes memory, return 0. If GOAL is a memory ref and its address is not constant, and this insn P changes a register used in GOAL, return 0. */ if (GET_CODE (pat) == COND_EXEC) pat = COND_EXEC_CODE (pat); if (GET_CODE (pat) == SET || GET_CODE (pat) == CLOBBER) { rtx dest = SET_DEST (pat); while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT || GET_CODE (dest) == STRICT_LOW_PART) dest = XEXP (dest, 0); if (REG_P (dest)) { int xregno = REGNO (dest); int xnregs; if (REGNO (dest) < FIRST_PSEUDO_REGISTER) xnregs = hard_regno_nregs[xregno][GET_MODE (dest)]; else xnregs = 1; if (xregno < regno + nregs && xregno + xnregs > regno) return 0; if (xregno < valueno + valuenregs && xregno + xnregs > valueno) return 0; if (goal_mem_addr_varies && reg_overlap_mentioned_for_reload_p (dest, goal)) return 0; if (xregno == STACK_POINTER_REGNUM && need_stable_sp) return 0; } else if (goal_mem && MEM_P (dest) && ! push_operand (dest, GET_MODE (dest))) return 0; else if (MEM_P (dest) && regno >= FIRST_PSEUDO_REGISTER && reg_equiv_memory_loc[regno] != 0) return 0; else if (need_stable_sp && push_operand (dest, GET_MODE (dest))) return 0; } else if (GET_CODE (pat) == PARALLEL) { int i; for (i = XVECLEN (pat, 0) - 1; i >= 0; i--) { rtx v1 = XVECEXP (pat, 0, i); if (GET_CODE (v1) == COND_EXEC) v1 = COND_EXEC_CODE (v1); if (GET_CODE (v1) == SET || GET_CODE (v1) == CLOBBER) { rtx dest = SET_DEST (v1); while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT || GET_CODE (dest) == STRICT_LOW_PART) dest = XEXP (dest, 0); if (REG_P (dest)) { int xregno = REGNO (dest); int xnregs; if (REGNO (dest) < FIRST_PSEUDO_REGISTER) xnregs = hard_regno_nregs[xregno][GET_MODE (dest)]; else xnregs = 1; if (xregno < regno + nregs && xregno + xnregs > regno) return 0; if (xregno < valueno + valuenregs && xregno + xnregs > valueno) return 0; if (goal_mem_addr_varies && reg_overlap_mentioned_for_reload_p (dest, goal)) return 0; if (xregno == STACK_POINTER_REGNUM && need_stable_sp) return 0; } else if (goal_mem && MEM_P (dest) && ! push_operand (dest, GET_MODE (dest))) return 0; else if (MEM_P (dest) && regno >= FIRST_PSEUDO_REGISTER && reg_equiv_memory_loc[regno] != 0) return 0; else if (need_stable_sp && push_operand (dest, GET_MODE (dest))) return 0; } } } if (GET_CODE (p) == CALL_INSN && CALL_INSN_FUNCTION_USAGE (p)) { rtx link; for (link = CALL_INSN_FUNCTION_USAGE (p); XEXP (link, 1) != 0; link = XEXP (link, 1)) { pat = XEXP (link, 0); if (GET_CODE (pat) == CLOBBER) { rtx dest = SET_DEST (pat); if (REG_P (dest)) { int xregno = REGNO (dest); int xnregs = hard_regno_nregs[xregno][GET_MODE (dest)]; if (xregno < regno + nregs && xregno + xnregs > regno) return 0; else if (xregno < valueno + valuenregs && xregno + xnregs > valueno) return 0; else if (goal_mem_addr_varies && reg_overlap_mentioned_for_reload_p (dest, goal)) return 0; } else if (goal_mem && MEM_P (dest) && ! push_operand (dest, GET_MODE (dest))) return 0; else if (need_stable_sp && push_operand (dest, GET_MODE (dest))) return 0; } } } #ifdef AUTO_INC_DEC /* If this insn auto-increments or auto-decrements either regno or valueno, return 0 now. If GOAL is a memory ref and its address is not constant, and this insn P increments a register used in GOAL, return 0. */ { rtx link; for (link = REG_NOTES (p); link; link = XEXP (link, 1)) if (REG_NOTE_KIND (link) == REG_INC && REG_P (XEXP (link, 0))) { int incno = REGNO (XEXP (link, 0)); if (incno < regno + nregs && incno >= regno) return 0; if (incno < valueno + valuenregs && incno >= valueno) return 0; if (goal_mem_addr_varies && reg_overlap_mentioned_for_reload_p (XEXP (link, 0), goal)) return 0; } } #endif } } } /* Find a place where INCED appears in an increment or decrement operator within X, and return the amount INCED is incremented or decremented by. The value is always positive. */ static int find_inc_amount (rtx x, rtx inced) { enum rtx_code code = GET_CODE (x); const char *fmt; int i; if (code == MEM) { rtx addr = XEXP (x, 0); if ((GET_CODE (addr) == PRE_DEC || GET_CODE (addr) == POST_DEC || GET_CODE (addr) == PRE_INC || GET_CODE (addr) == POST_INC) && XEXP (addr, 0) == inced) return GET_MODE_SIZE (GET_MODE (x)); else if ((GET_CODE (addr) == PRE_MODIFY || GET_CODE (addr) == POST_MODIFY) && GET_CODE (XEXP (addr, 1)) == PLUS && XEXP (addr, 0) == XEXP (XEXP (addr, 1), 0) && XEXP (addr, 0) == inced && GET_CODE (XEXP (XEXP (addr, 1), 1)) == CONST_INT) { i = INTVAL (XEXP (XEXP (addr, 1), 1)); return i < 0 ? -i : i; } } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') { int tem = find_inc_amount (XEXP (x, i), inced); if (tem != 0) return tem; } if (fmt[i] == 'E') { int j; for (j = XVECLEN (x, i) - 1; j >= 0; j--) { int tem = find_inc_amount (XVECEXP (x, i, j), inced); if (tem != 0) return tem; } } } return 0; } /* Return 1 if register REGNO is the subject of a clobber in insn INSN. If SETS is nonzero, also consider SETs. */ int regno_clobbered_p (unsigned int regno, rtx insn, enum machine_mode mode, int sets) { unsigned int nregs = hard_regno_nregs[regno][mode]; unsigned int endregno = regno + nregs; if ((GET_CODE (PATTERN (insn)) == CLOBBER || (sets && GET_CODE (PATTERN (insn)) == SET)) && REG_P (XEXP (PATTERN (insn), 0))) { unsigned int test = REGNO (XEXP (PATTERN (insn), 0)); return test >= regno && test < endregno; } if (GET_CODE (PATTERN (insn)) == PARALLEL) { int i = XVECLEN (PATTERN (insn), 0) - 1; for (; i >= 0; i--) { rtx elt = XVECEXP (PATTERN (insn), 0, i); if ((GET_CODE (elt) == CLOBBER || (sets && GET_CODE (PATTERN (insn)) == SET)) && REG_P (XEXP (elt, 0))) { unsigned int test = REGNO (XEXP (elt, 0)); if (test >= regno && test < endregno) return 1; } } } return 0; } /* Find the low part, with mode MODE, of a hard regno RELOADREG. */ rtx reload_adjust_reg_for_mode (rtx reloadreg, enum machine_mode mode) { int regno; if (GET_MODE (reloadreg) == mode) return reloadreg; regno = REGNO (reloadreg); if (WORDS_BIG_ENDIAN) regno += (int) hard_regno_nregs[regno][GET_MODE (reloadreg)] - (int) hard_regno_nregs[regno][mode]; return gen_rtx_REG (mode, regno); } static const char *const reload_when_needed_name[] = { "RELOAD_FOR_INPUT", "RELOAD_FOR_OUTPUT", "RELOAD_FOR_INSN", "RELOAD_FOR_INPUT_ADDRESS", "RELOAD_FOR_INPADDR_ADDRESS", "RELOAD_FOR_OUTPUT_ADDRESS", "RELOAD_FOR_OUTADDR_ADDRESS", "RELOAD_FOR_OPERAND_ADDRESS", "RELOAD_FOR_OPADDR_ADDR", "RELOAD_OTHER", "RELOAD_FOR_OTHER_ADDRESS" }; static const char * const reg_class_names4[] = REG_CLASS_NAMES; /* These functions are used to print the variables set by 'find_reloads' */ void debug_reload_to_stream (FILE *f) { int r; const char *prefix; if (! f) f = stderr; for (r = 0; r < n_reloads; r++) { fprintf (f, "Reload %d: ", r); if (rld[r].in != 0) { fprintf (f, "reload_in (%s) = ", GET_MODE_NAME (rld[r].inmode)); print_inline_rtx (f, rld[r].in, 24); fprintf (f, "\n\t"); } if (rld[r].out != 0) { fprintf (f, "reload_out (%s) = ", GET_MODE_NAME (rld[r].outmode)); print_inline_rtx (f, rld[r].out, 24); fprintf (f, "\n\t"); } fprintf (f, "%s, ", reg_class_names4[(int) rld[r].class]); fprintf (f, "%s (opnum = %d)", reload_when_needed_name[(int) rld[r].when_needed], rld[r].opnum); if (rld[r].optional) fprintf (f, ", optional"); if (rld[r].nongroup) fprintf (f, ", nongroup"); if (rld[r].inc != 0) fprintf (f, ", inc by %d", rld[r].inc); if (rld[r].nocombine) fprintf (f, ", can't combine"); if (rld[r].secondary_p) fprintf (f, ", secondary_reload_p"); if (rld[r].in_reg != 0) { fprintf (f, "\n\treload_in_reg: "); print_inline_rtx (f, rld[r].in_reg, 24); } if (rld[r].out_reg != 0) { fprintf (f, "\n\treload_out_reg: "); print_inline_rtx (f, rld[r].out_reg, 24); } if (rld[r].reg_rtx != 0) { fprintf (f, "\n\treload_reg_rtx: "); print_inline_rtx (f, rld[r].reg_rtx, 24); } prefix = "\n\t"; if (rld[r].secondary_in_reload != -1) { fprintf (f, "%ssecondary_in_reload = %d", prefix, rld[r].secondary_in_reload); prefix = ", "; } if (rld[r].secondary_out_reload != -1) fprintf (f, "%ssecondary_out_reload = %d\n", prefix, rld[r].secondary_out_reload); prefix = "\n\t"; if (rld[r].secondary_in_icode != CODE_FOR_nothing) { fprintf (f, "%ssecondary_in_icode = %s", prefix, insn_data[rld[r].secondary_in_icode].name); prefix = ", "; } if (rld[r].secondary_out_icode != CODE_FOR_nothing) fprintf (f, "%ssecondary_out_icode = %s", prefix, insn_data[rld[r].secondary_out_icode].name); fprintf (f, "\n"); } } void debug_reload (void) { debug_reload_to_stream (stderr); } #ifdef ONE_COMPILATION_UNIT #undef REG_OK_FOR_INDEX_P #undef REG_OK_FOR_BASE_P #undef GO_IF_LEGITIMATE_ADDRESS #define REG_OK_FOR_INDEX_P(X) REG_OK_FOR_INDEX_NONSTRICT_P (X) #define REG_OK_FOR_BASE_P(X) REG_OK_FOR_BASE_NONSTRICT_P (X) #define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \ do { \ if (legitimate_address_p ((MODE), (X), 0)) \ goto ADDR; \ } while (0) #endif /* Reload pseudo regs into hard regs for insns that require hard regs. Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file contains the reload pass of the compiler, which is run after register allocation has been done. It checks that each insn is valid (operands required to be in registers really are in registers of the proper class) and fixes up invalid ones by copying values temporarily into registers for the insns that need them. The results of register allocation are described by the vector reg_renumber; the insns still contain pseudo regs, but reg_renumber can be used to find which hard reg, if any, a pseudo reg is in. The technique we always use is to free up a few hard regs that are called ``reload regs'', and for each place where a pseudo reg must be in a hard reg, copy it temporarily into one of the reload regs. Reload regs are allocated locally for every instruction that needs reloads. When there are pseudos which are allocated to a register that has been chosen as a reload reg, such pseudos must be ``spilled''. This means that they go to other hard regs, or to stack slots if no other available hard regs can be found. Spilling can invalidate more insns, requiring additional need for reloads, so we must keep checking until the process stabilizes. For machines with different classes of registers, we must keep track of the register class needed for each reload, and make sure that we allocate enough reload registers of each class. The file reload.c contains the code that checks one insn for validity and reports the reloads that it needs. This file is in charge of scanning the entire rtl code, accumulating the reload needs, spilling, assigning reload registers to use for fixing up each insn, and generating the new insns to copy values into the reload registers. */ /* During reload_as_needed, element N contains a REG rtx for the hard reg into which reg N has been reloaded (perhaps for a previous insn). */ static rtx *reg_last_reload_reg; /* Elt N nonzero if reg_last_reload_reg[N] has been set in this insn for an output reload that stores into reg N. */ static char *reg_has_output_reload; /* Indicates which hard regs are reload-registers for an output reload in the current insn. */ static HARD_REG_SET reg_is_output_reload; /* Element N is the constant value to which pseudo reg N is equivalent, or zero if pseudo reg N is not equivalent to a constant. find_reloads looks at this in order to replace pseudo reg N with the constant it stands for. */ rtx *reg_equiv_constant; /* Element N is a memory location to which pseudo reg N is equivalent, prior to any register elimination (such as frame pointer to stack pointer). Depending on whether or not it is a valid address, this value is transferred to either reg_equiv_address or reg_equiv_mem. */ rtx *reg_equiv_memory_loc; /* We allocate reg_equiv_memory_loc inside a varray so that the garbage collector can keep track of what is inside. */ varray_type reg_equiv_memory_loc_varray; /* Element N is the address of stack slot to which pseudo reg N is equivalent. This is used when the address is not valid as a memory address (because its displacement is too big for the machine.) */ rtx *reg_equiv_address; /* Element N is the memory slot to which pseudo reg N is equivalent, or zero if pseudo reg N is not equivalent to a memory slot. */ rtx *reg_equiv_mem; /* Widest width in which each pseudo reg is referred to (via subreg). */ static unsigned int *reg_max_ref_width; /* Element N is the list of insns that initialized reg N from its equivalent constant or memory slot. */ static rtx *reg_equiv_init; /* Vector to remember old contents of reg_renumber before spilling. */ static short *reg_old_renumber; /* During reload_as_needed, element N contains the last pseudo regno reloaded into hard register N. If that pseudo reg occupied more than one register, reg_reloaded_contents points to that pseudo for each spill register in use; all of these must remain set for an inheritance to occur. */ static int reg_reloaded_contents[FIRST_PSEUDO_REGISTER]; /* During reload_as_needed, element N contains the insn for which hard register N was last used. Its contents are significant only when reg_reloaded_valid is set for this register. */ static rtx reg_reloaded_insn[FIRST_PSEUDO_REGISTER]; /* Indicate if reg_reloaded_insn / reg_reloaded_contents is valid. */ static HARD_REG_SET reg_reloaded_valid; /* Indicate if the register was dead at the end of the reload. This is only valid if reg_reloaded_contents is set and valid. */ static HARD_REG_SET reg_reloaded_dead; /* Indicate whether the register's current value is one that is not safe to retain across a call, even for registers that are normally call-saved. */ static HARD_REG_SET reg_reloaded_call_part_clobbered; /* Number of spill-regs so far; number of valid elements of spill_regs. */ static int n_spills; /* In parallel with spill_regs, contains REG rtx's for those regs. Holds the last rtx used for any given reg, or 0 if it has never been used for spilling yet. This rtx is reused, provided it has the proper mode. */ static rtx spill_reg_rtx[FIRST_PSEUDO_REGISTER]; /* In parallel with spill_regs, contains nonzero for a spill reg that was stored after the last time it was used. The precise value is the insn generated to do the store. */ static rtx spill_reg_store[FIRST_PSEUDO_REGISTER]; /* This is the register that was stored with spill_reg_store. This is a copy of reload_out / reload_out_reg when the value was stored; if reload_out is a MEM, spill_reg_stored_to will be set to reload_out_reg. */ static rtx spill_reg_stored_to[FIRST_PSEUDO_REGISTER]; /* This table is the inverse mapping of spill_regs: indexed by hard reg number, it contains the position of that reg in spill_regs, or -1 for something that is not in spill_regs. ?!? This is no longer accurate. */ static short spill_reg_order[FIRST_PSEUDO_REGISTER]; /* This reg set indicates registers that can't be used as spill registers for the currently processed insn. These are the hard registers which are live during the insn, but not allocated to pseudos, as well as fixed registers. */ static HARD_REG_SET bad_spill_regs; /* These are the hard registers that can't be used as spill register for any insn. This includes registers used for user variables and registers that we can't eliminate. A register that appears in this set also can't be used to retry register allocation. */ static HARD_REG_SET bad_spill_regs_global; /* Describes order of use of registers for reloading of spilled pseudo-registers. `n_spills' is the number of elements that are actually valid; new ones are added at the end. Both spill_regs and spill_reg_order are used on two occasions: once during find_reload_regs, where they keep track of the spill registers for a single insn, but also during reload_as_needed where they show all the registers ever used by reload. For the latter case, the information is calculated during finish_spills. */ static short spill_regs[FIRST_PSEUDO_REGISTER]; /* This vector of reg sets indicates, for each pseudo, which hard registers may not be used for retrying global allocation because the register was formerly spilled from one of them. If we allowed reallocating a pseudo to a register that it was already allocated to, reload might not terminate. */ static HARD_REG_SET *pseudo_previous_regs; /* This vector of reg sets indicates, for each pseudo, which hard registers may not be used for retrying global allocation because they are used as spill registers during one of the insns in which the pseudo is live. */ static HARD_REG_SET *pseudo_forbidden_regs; /* All hard regs that have been used as spill registers for any insn are marked in this set. */ static HARD_REG_SET used_spill_regs; /* Index of last register assigned as a spill register. We allocate in a round-robin fashion. */ static int last_spill_reg; /* Nonzero if indirect addressing is supported on the machine; this means that spilling (REG n) does not require reloading it into a register in order to do (MEM (REG n)) or (MEM (PLUS (REG n) (CONST_INT c))). The value indicates the level of indirect addressing supported, e.g., two means that (MEM (MEM (REG n))) is also valid if (REG n) does not get a hard register. */ static char spill_indirect_levels; /* Nonzero if indirect addressing is supported when the innermost MEM is of the form (MEM (SYMBOL_REF sym)). It is assumed that the level to which these are valid is the same as spill_indirect_levels, above. */ char indirect_symref_ok; /* Nonzero if an address (plus (reg frame_pointer) (reg ...)) is valid. */ char double_reg_address_ok; /* Record the stack slot for each spilled hard register. */ static rtx spill_stack_slot[FIRST_PSEUDO_REGISTER]; /* Width allocated so far for that stack slot. */ static unsigned int spill_stack_slot_width[FIRST_PSEUDO_REGISTER]; /* Record which pseudos needed to be spilled. */ static regset_head spilled_pseudos; /* Used for communication between order_regs_for_reload and count_pseudo. Used to avoid counting one pseudo twice. */ static regset_head pseudos_counted; /* First uid used by insns created by reload in this function. Used in find_equiv_reg. */ int reload_first_uid; /* Flag set by local-alloc or global-alloc if anything is live in a call-clobbered reg across calls. */ int caller_save_needed; /* Set to 1 while reload_as_needed is operating. Required by some machines to handle any generated moves differently. */ int reload_in_progress = 0; /* These arrays record the insn_code of insns that may be needed to perform input and output reloads of special objects. They provide a place to pass a scratch register. */ enum insn_code reload_in_optab[NUM_MACHINE_MODES]; enum insn_code reload_out_optab[NUM_MACHINE_MODES]; /* This obstack is used for allocation of rtl during register elimination. The allocated storage can be freed once find_reloads has processed the insn. */ struct obstack reload_obstack; /* Points to the beginning of the reload_obstack. All insn_chain structures are allocated first. */ char *reload_startobj; /* The point after all insn_chain structures. Used to quickly deallocate memory allocated in copy_reloads during calculate_needs_all_insns. */ char *reload_firstobj; /* This points before all local rtl generated by register elimination. Used to quickly free all memory after processing one insn. */ static char *reload_insn_firstobj; /* List of insn_chain instructions, one for every insn that reload needs to examine. */ struct insn_chain *reload_insn_chain; /* List of all insns needing reloads. */ static struct insn_chain *insns_need_reload; /* This structure is used to record information about register eliminations. Each array entry describes one possible way of eliminating a register in favor of another. If there is more than one way of eliminating a particular register, the most preferred should be specified first. */ struct elim_table { int from; /* Register number to be eliminated. */ int to; /* Register number used as replacement. */ HOST_WIDE_INT initial_offset; /* Initial difference between values. */ int can_eliminate; /* Nonzero if this elimination can be done. */ int can_eliminate_previous; /* Value of CAN_ELIMINATE in previous scan over insns made by reload. */ HOST_WIDE_INT offset; /* Current offset between the two regs. */ HOST_WIDE_INT previous_offset;/* Offset at end of previous insn. */ int ref_outside_mem; /* "to" has been referenced outside a MEM. */ rtx from_rtx; /* REG rtx for the register to be eliminated. We cannot simply compare the number since we might then spuriously replace a hard register corresponding to a pseudo assigned to the reg to be eliminated. */ rtx to_rtx; /* REG rtx for the replacement. */ }; static struct elim_table *reg_eliminate = 0; /* This is an intermediate structure to initialize the table. It has exactly the members provided by ELIMINABLE_REGS. */ static const struct elim_table_1 { const int from; const int to; } reg_eliminate_1[] = /* If a set of eliminable registers was specified, define the table from it. Otherwise, default to the normal case of the frame pointer being replaced by the stack pointer. */ #ifdef ELIMINABLE_REGS ELIMINABLE_REGS; #else {{ FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}}; #endif #define NUM_ELIMINABLE_REGS ARRAY_SIZE (reg_eliminate_1) /* Record the number of pending eliminations that have an offset not equal to their initial offset. If nonzero, we use a new copy of each replacement result in any insns encountered. */ int num_not_at_initial_offset; /* Count the number of registers that we may be able to eliminate. */ static int num_eliminable; /* And the number of registers that are equivalent to a constant that can be eliminated to frame_pointer / arg_pointer + constant. */ static int num_eliminable_invariants; /* For each label, we record the offset of each elimination. If we reach a label by more than one path and an offset differs, we cannot do the elimination. This information is indexed by the difference of the number of the label and the first label number. We can't offset the pointer itself as this can cause problems on machines with segmented memory. The first table is an array of flags that records whether we have yet encountered a label and the second table is an array of arrays, one entry in the latter array for each elimination. */ static int first_label_num; static char *offsets_known_at; static HOST_WIDE_INT (*offsets_at)[NUM_ELIMINABLE_REGS]; /* Number of labels in the current function. */ static int num_labels; static void replace_pseudos_in (rtx *, enum machine_mode, rtx); static void maybe_fix_stack_asms (void); static void copy_reloads (struct insn_chain *); static void calculate_needs_all_insns (int); static int find_reg_rl1 (struct insn_chain *, int); static void find_reload_regs (struct insn_chain *); static void select_reload_regs (void); static void delete_caller_save_insns (void); static void spill_failure (rtx, enum reg_class); static void count_spilled_pseudo (int, int, int); static void delete_dead_insn (rtx); static void alter_reg (int, int); static void set_label_offsets (rtx, rtx, int); static void check_eliminable_occurrences (rtx); static void elimination_effects (rtx, enum machine_mode); static int eliminate_regs_in_insn (rtx, int); static void update_eliminable_offsets (void); static void mark_not_eliminable (rtx, rtx, void *); static void set_initial_elim_offsets (void); static void verify_initial_elim_offsets (void); static void set_initial_label_offsets (void); static void set_offsets_for_label (rtx); static void init_elim_table (void); static void update_eliminables (HARD_REG_SET *); static void spill_hard_reg (unsigned int, int); static int finish_spills (int); static void ior_hard_reg_set (HARD_REG_SET *, HARD_REG_SET *); static void scan_paradoxical_subregs (rtx); static void count_pseudo (int); static void order_regs_for_reload (struct insn_chain *); static void reload_as_needed (int); static void forget_old_reloads_1 (rtx, rtx, void *); static int reload_reg_class_lower (const void *, const void *); static void mark_reload_reg_in_use (unsigned int, int, enum reload_type, enum machine_mode); static void clear_reload_reg_in_use (unsigned int, int, enum reload_type, enum machine_mode); static int reload_reg_free_p (unsigned int, int, enum reload_type); static int reload_reg_free_for_value_p (int, int, int, enum reload_type, rtx, rtx, int, int); static int free_for_value_p (int, enum machine_mode, int, enum reload_type, rtx, rtx, int, int); static int function_invariant_p (rtx); static int reload_reg_reaches_end_p (unsigned int, int, enum reload_type); static int allocate_reload_reg (struct insn_chain *, int, int); static int conflicts_with_override (rtx); static void failed_reload (rtx, int); static int set_reload_reg (int, int); static void choose_reload_regs_init (struct insn_chain *, rtx *); static void choose_reload_regs (struct insn_chain *); static void merge_assigned_reloads (rtx); static void emit_input_reload_insns (struct insn_chain *, struct reload *, rtx, int); static void emit_output_reload_insns (struct insn_chain *, struct reload *, int); static void do_input_reload (struct insn_chain *, struct reload *, int); static void do_output_reload (struct insn_chain *, struct reload *, int); static bool inherit_piecemeal_p (int, int); static void emit_reload_insns (struct insn_chain *); static void delete_output_reload (rtx, int, int); static void delete_address_reloads (rtx, rtx); static void delete_address_reloads_1 (rtx, rtx, rtx); static rtx inc_for_reload (rtx, rtx, rtx, int); #ifdef AUTO_INC_DEC static void add_auto_inc_notes (rtx, rtx); #endif static void copy_eh_notes (rtx, rtx); /* Initialize the reload pass once per compilation. */ void init_reload (void) { int i; /* Often (MEM (REG n)) is still valid even if (REG n) is put on the stack. Set spill_indirect_levels to the number of levels such addressing is permitted, zero if it is not permitted at all. */ rtx tem = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, LAST_VIRTUAL_REGISTER + 1), GEN_INT (4))); spill_indirect_levels = 0; while (memory_address_p (QImode, tem)) { spill_indirect_levels++; tem = gen_rtx_MEM (Pmode, tem); } /* See if indirect addressing is valid for (MEM (SYMBOL_REF ...)). */ tem = gen_rtx_MEM (Pmode, gen_rtx_SYMBOL_REF (Pmode, "foo")); indirect_symref_ok = memory_address_p (QImode, tem); /* See if reg+reg is a valid (and offsettable) address. */ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) { tem = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM), gen_rtx_REG (Pmode, i)); /* This way, we make sure that reg+reg is an offsettable address. */ tem = plus_constant (tem, 4); if (memory_address_p (QImode, tem)) { double_reg_address_ok = 1; break; } } /* Initialize obstack for our rtl allocation. */ gcc_obstack_init (&reload_obstack); reload_startobj = obstack_alloc (&reload_obstack, 0); INIT_REG_SET (&spilled_pseudos); INIT_REG_SET (&pseudos_counted); VARRAY_RTX_INIT (reg_equiv_memory_loc_varray, 0, "reg_equiv_memory_loc"); } /* List of insn chains that are currently unused. */ static struct insn_chain *unused_insn_chains = 0; /* Allocate an empty insn_chain structure. */ struct insn_chain * new_insn_chain (void) { struct insn_chain *c; if (unused_insn_chains == 0) { c = obstack_alloc (&reload_obstack, sizeof (struct insn_chain)); INIT_REG_SET (&c->live_throughout); INIT_REG_SET (&c->dead_or_set); } else { c = unused_insn_chains; unused_insn_chains = c->next; } c->is_caller_save_insn = 0; c->need_operand_change = 0; c->need_reload = 0; c->need_elim = 0; return c; } /* Small utility function to set all regs in hard reg set TO which are allocated to pseudos in regset FROM. */ void compute_use_by_pseudos (HARD_REG_SET *to, regset from) { unsigned int regno; EXECUTE_IF_SET_IN_REG_SET (from, FIRST_PSEUDO_REGISTER, regno, { int r = reg_renumber[regno]; int nregs; if (r < 0) { /* reload_combine uses the information from BASIC_BLOCK->global_live_at_start, which might still contain registers that have not actually been allocated since they have an equivalence. */ if (! reload_completed) abort (); } else { nregs = hard_regno_nregs[r][PSEUDO_REGNO_MODE (regno)]; while (nregs-- > 0) SET_HARD_REG_BIT (*to, r + nregs); } }); } /* Replace all pseudos found in LOC with their corresponding equivalences. */ static void replace_pseudos_in (rtx *loc, enum machine_mode mem_mode, rtx usage) { rtx x = *loc; enum rtx_code code; const char *fmt; int i, j; if (! x) return; code = GET_CODE (x); if (code == REG) { unsigned int regno = REGNO (x); if (regno < FIRST_PSEUDO_REGISTER) return; x = eliminate_regs (x, mem_mode, usage); if (x != *loc) { *loc = x; replace_pseudos_in (loc, mem_mode, usage); return; } if (reg_equiv_constant[regno]) *loc = reg_equiv_constant[regno]; else if (reg_equiv_mem[regno]) *loc = reg_equiv_mem[regno]; else if (reg_equiv_address[regno]) *loc = gen_rtx_MEM (GET_MODE (x), reg_equiv_address[regno]); else if (!REG_P (regno_reg_rtx[regno]) || REGNO (regno_reg_rtx[regno]) != regno) *loc = regno_reg_rtx[regno]; else abort (); return; } else if (code == MEM) { replace_pseudos_in (& XEXP (x, 0), GET_MODE (x), usage); return; } /* Process each of our operands recursively. */ fmt = GET_RTX_FORMAT (code); for (i = 0; i < GET_RTX_LENGTH (code); i++, fmt++) if (*fmt == 'e') replace_pseudos_in (&XEXP (x, i), mem_mode, usage); else if (*fmt == 'E') for (j = 0; j < XVECLEN (x, i); j++) replace_pseudos_in (& XVECEXP (x, i, j), mem_mode, usage); } /* Global variables used by reload and its subroutines. */ /* Set during calculate_needs if an insn needs register elimination. */ static int something_needs_elimination; /* Set during calculate_needs if an insn needs an operand changed. */ int something_needs_operands_changed; /* Nonzero means we couldn't get enough spill regs. */ static int failure; /* Main entry point for the reload pass. FIRST is the first insn of the function being compiled. GLOBAL nonzero means we were called from global_alloc and should attempt to reallocate any pseudoregs that we displace from hard regs we will use for reloads. If GLOBAL is zero, we do not have enough information to do that, so any pseudo reg that is spilled must go to the stack. Return value is nonzero if reload failed and we must not do any more for this function. */ int reload (rtx first, int global) { int i; rtx insn; struct elim_table *ep; basic_block bb; /* Make sure even insns with volatile mem refs are recognizable. */ init_recog (); failure = 0; reload_firstobj = obstack_alloc (&reload_obstack, 0); /* Make sure that the last insn in the chain is not something that needs reloading. */ emit_note (NOTE_INSN_DELETED); /* Enable find_equiv_reg to distinguish insns made by reload. */ reload_first_uid = get_max_uid (); #ifdef SECONDARY_MEMORY_NEEDED /* Initialize the secondary memory table. */ clear_secondary_mem (); #endif /* We don't have a stack slot for any spill reg yet. */ memset (spill_stack_slot, 0, sizeof spill_stack_slot); memset (spill_stack_slot_width, 0, sizeof spill_stack_slot_width); /* Initialize the save area information for caller-save, in case some are needed. */ init_save_areas (); /* Compute which hard registers are now in use as homes for pseudo registers. This is done here rather than (eg) in global_alloc because this point is reached even if not optimizing. */ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++) mark_home_live (i); /* A function that receives a nonlocal goto must save all call-saved registers. */ if (current_function_has_nonlocal_label) for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (! call_used_regs[i] && ! fixed_regs[i] && ! LOCAL_REGNO (i)) regs_ever_live[i] = 1; #ifdef NON_SAVING_SETJMP /* A function that calls setjmp should save and restore all the call-saved registers on a system where longjmp clobbers them. */ if (NON_SAVING_SETJMP && current_function_calls_setjmp) { for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (! call_used_regs[i]) regs_ever_live[i] = 1; } #endif /* Find all the pseudo registers that didn't get hard regs but do have known equivalent constants or memory slots. These include parameters (known equivalent to parameter slots) and cse'd or loop-moved constant memory addresses. Record constant equivalents in reg_equiv_constant so they will be substituted by find_reloads. Record memory equivalents in reg_mem_equiv so they can be substituted eventually by altering the REG-rtx's. */ reg_equiv_constant = xcalloc (max_regno, sizeof (rtx)); reg_equiv_mem = xcalloc (max_regno, sizeof (rtx)); reg_equiv_init = xcalloc (max_regno, sizeof (rtx)); reg_equiv_address = xcalloc (max_regno, sizeof (rtx)); reg_max_ref_width = xcalloc (max_regno, sizeof (int)); reg_old_renumber = xcalloc (max_regno, sizeof (short)); memcpy (reg_old_renumber, reg_renumber, max_regno * sizeof (short)); pseudo_forbidden_regs = xmalloc (max_regno * sizeof (HARD_REG_SET)); pseudo_previous_regs = xcalloc (max_regno, sizeof (HARD_REG_SET)); CLEAR_HARD_REG_SET (bad_spill_regs_global); /* Look for REG_EQUIV notes; record what each pseudo is equivalent to. Also find all paradoxical subregs and find largest such for each pseudo. */ num_eliminable_invariants = 0; for (insn = first; insn; insn = NEXT_INSN (insn)) { rtx set = single_set (insn); /* We may introduce USEs that we want to remove at the end, so we'll mark them with QImode. Make sure there are no previously-marked insns left by say regmove. */ if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == USE && GET_MODE (insn) != VOIDmode) PUT_MODE (insn, VOIDmode); if (set != 0 && REG_P (SET_DEST (set))) { rtx note = find_reg_note (insn, REG_EQUIV, NULL_RTX); if (note #ifdef LEGITIMATE_PIC_OPERAND_P && (! function_invariant_p (XEXP (note, 0)) || ! flag_pic /* A function invariant is often CONSTANT_P but may include a register. We promise to only pass CONSTANT_P objects to LEGITIMATE_PIC_OPERAND_P. */ || (CONSTANT_P (XEXP (note, 0)) && LEGITIMATE_PIC_OPERAND_P (XEXP (note, 0)))) #endif ) { rtx x = XEXP (note, 0); i = REGNO (SET_DEST (set)); if (i > LAST_VIRTUAL_REGISTER) { /* It can happen that a REG_EQUIV note contains a MEM that is not a legitimate memory operand. As later stages of reload assume that all addresses found in the reg_equiv_* arrays were originally legitimate, we ignore such REG_EQUIV notes. */ if (memory_operand (x, VOIDmode)) { /* Always unshare the equivalence, so we can substitute into this insn without touching the equivalence. */ reg_equiv_memory_loc[i] = copy_rtx (x); } else if (function_invariant_p (x)) { if (GET_CODE (x) == PLUS) { /* This is PLUS of frame pointer and a constant, and might be shared. Unshare it. */ reg_equiv_constant[i] = copy_rtx (x); num_eliminable_invariants++; } else if (x == frame_pointer_rtx || x == arg_pointer_rtx) { reg_equiv_constant[i] = x; num_eliminable_invariants++; } else if (LEGITIMATE_CONSTANT_P (x)) reg_equiv_constant[i] = x; else { reg_equiv_memory_loc[i] = force_const_mem (GET_MODE (SET_DEST (set)), x); if (!reg_equiv_memory_loc[i]) continue; } } else continue; /* If this register is being made equivalent to a MEM and the MEM is not SET_SRC, the equivalencing insn is one with the MEM as a SET_DEST and it occurs later. So don't mark this insn now. */ if (!MEM_P (x) || rtx_equal_p (SET_SRC (set), x)) reg_equiv_init[i] = gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv_init[i]); } } } /* If this insn is setting a MEM from a register equivalent to it, this is the equivalencing insn. */ else if (set && MEM_P (SET_DEST (set)) && REG_P (SET_SRC (set)) && reg_equiv_memory_loc[REGNO (SET_SRC (set))] && rtx_equal_p (SET_DEST (set), reg_equiv_memory_loc[REGNO (SET_SRC (set))])) reg_equiv_init[REGNO (SET_SRC (set))] = gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv_init[REGNO (SET_SRC (set))]); if (INSN_P (insn)) scan_paradoxical_subregs (PATTERN (insn)); } init_elim_table (); first_label_num = get_first_label_num (); num_labels = max_label_num () - first_label_num; /* Allocate the tables used to store offset information at labels. */ /* We used to use alloca here, but the size of what it would try to allocate would occasionally cause it to exceed the stack limit and cause a core dump. */ offsets_known_at = xmalloc (num_labels); offsets_at = xmalloc (num_labels * NUM_ELIMINABLE_REGS * sizeof (HOST_WIDE_INT)); /* Alter each pseudo-reg rtx to contain its hard reg number. Assign stack slots to the pseudos that lack hard regs or equivalents. Do not touch virtual registers. */ for (i = LAST_VIRTUAL_REGISTER + 1; i < max_regno; i++) alter_reg (i, -1); /* If we have some registers we think can be eliminated, scan all insns to see if there is an insn that sets one of these registers to something other than itself plus a constant. If so, the register cannot be eliminated. Doing this scan here eliminates an extra pass through the main reload loop in the most common case where register elimination cannot be done. */ for (insn = first; insn && num_eliminable; insn = NEXT_INSN (insn)) if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN || GET_CODE (insn) == CALL_INSN) note_stores (PATTERN (insn), mark_not_eliminable, NULL); maybe_fix_stack_asms (); insns_need_reload = 0; something_needs_elimination = 0; /* Initialize to -1, which means take the first spill register. */ last_spill_reg = -1; /* Spill any hard regs that we know we can't eliminate. */ CLEAR_HARD_REG_SET (used_spill_regs); /* There can be multiple ways to eliminate a register; they should be listed adjacently. Elimination for any register fails only if all possible ways fail. */ for (ep = reg_eliminate; ep < ®_eliminate[NUM_ELIMINABLE_REGS]; ) { int from = ep->from; int can_eliminate = 0; do { can_eliminate |= ep->can_eliminate; ep++; } while (ep < ®_eliminate[NUM_ELIMINABLE_REGS] && ep->from == from); if (! can_eliminate) spill_hard_reg (from, 1); } #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM if (frame_pointer_needed) spill_hard_reg (HARD_FRAME_POINTER_REGNUM, 1); #endif finish_spills (global); /* From now on, we may need to generate moves differently. We may also allow modifications of insns which cause them to not be recognized. Any such modifications will be cleaned up during reload itself. */ reload_in_progress = 1; /* This loop scans the entire function each go-round and repeats until one repetition spills no additional hard regs. */ for (;;) { int something_changed; int did_spill; HOST_WIDE_INT starting_frame_size; /* Round size of stack frame to stack_alignment_needed. This must be done here because the stack size may be a part of the offset computation for register elimination, and there might have been new stack slots created in the last iteration of this loop. */ if (cfun->stack_alignment_needed) assign_stack_local (BLKmode, 0, cfun->stack_alignment_needed); starting_frame_size = get_frame_size (); set_initial_elim_offsets (); set_initial_label_offsets (); /* For each pseudo register that has an equivalent location defined, try to eliminate any eliminable registers (such as the frame pointer) assuming initial offsets for the replacement register, which is the normal case. If the resulting location is directly addressable, substitute the MEM we just got directly for the old REG. If it is not addressable but is a constant or the sum of a hard reg and constant, it is probably not addressable because the constant is out of range, in that case record the address; we will generate hairy code to compute the address in a register each time it is needed. Similarly if it is a hard register, but one that is not valid as an address register. If the location is not addressable, but does not have one of the above forms, assign a stack slot. We have to do this to avoid the potential of producing lots of reloads if, e.g., a location involves a pseudo that didn't get a hard register and has an equivalent memory location that also involves a pseudo that didn't get a hard register. Perhaps at some point we will improve reload_when_needed handling so this problem goes away. But that's very hairy. */ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++) if (reg_renumber[i] < 0 && reg_equiv_memory_loc[i]) { rtx x = eliminate_regs (reg_equiv_memory_loc[i], 0, NULL_RTX); if (strict_memory_address_p (GET_MODE (regno_reg_rtx[i]), XEXP (x, 0))) reg_equiv_mem[i] = x, reg_equiv_address[i] = 0; else if (CONSTANT_P (XEXP (x, 0)) || (REG_P (XEXP (x, 0)) && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER) || (GET_CODE (XEXP (x, 0)) == PLUS && REG_P (XEXP (XEXP (x, 0), 0)) && (REGNO (XEXP (XEXP (x, 0), 0)) < FIRST_PSEUDO_REGISTER) && CONSTANT_P (XEXP (XEXP (x, 0), 1)))) reg_equiv_address[i] = XEXP (x, 0), reg_equiv_mem[i] = 0; else { /* Make a new stack slot. Then indicate that something changed so we go back and recompute offsets for eliminable registers because the allocation of memory below might change some offset. reg_equiv_{mem,address} will be set up for this pseudo on the next pass around the loop. */ reg_equiv_memory_loc[i] = 0; reg_equiv_init[i] = 0; alter_reg (i, -1); } } if (caller_save_needed) setup_save_areas (); /* If we allocated another stack slot, redo elimination bookkeeping. */ if (starting_frame_size != get_frame_size ()) continue; if (caller_save_needed) { save_call_clobbered_regs (); /* That might have allocated new insn_chain structures. */ reload_firstobj = obstack_alloc (&reload_obstack, 0); } calculate_needs_all_insns (global); CLEAR_REG_SET (&spilled_pseudos); did_spill = 0; something_changed = 0; /* If we allocated any new memory locations, make another pass since it might have changed elimination offsets. */ if (starting_frame_size != get_frame_size ()) something_changed = 1; { HARD_REG_SET to_spill; CLEAR_HARD_REG_SET (to_spill); update_eliminables (&to_spill); for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (TEST_HARD_REG_BIT (to_spill, i)) { spill_hard_reg (i, 1); did_spill = 1; /* Regardless of the state of spills, if we previously had a register that we thought we could eliminate, but now can not eliminate, we must run another pass. Consider pseudos which have an entry in reg_equiv_* which reference an eliminable register. We must make another pass to update reg_equiv_* so that we do not substitute in the old value from when we thought the elimination could be performed. */ something_changed = 1; } } select_reload_regs (); if (failure) goto failed; if (insns_need_reload != 0 || did_spill) something_changed |= finish_spills (global); if (! something_changed) break; if (caller_save_needed) delete_caller_save_insns (); obstack_free (&reload_obstack, reload_firstobj); } /* If global-alloc was run, notify it of any register eliminations we have done. */ if (global) for (ep = reg_eliminate; ep < ®_eliminate[NUM_ELIMINABLE_REGS]; ep++) if (ep->can_eliminate) mark_elimination (ep->from, ep->to); /* If a pseudo has no hard reg, delete the insns that made the equivalence. If that insn didn't set the register (i.e., it copied the register to memory), just delete that insn instead of the equivalencing insn plus anything now dead. If we call delete_dead_insn on that insn, we may delete the insn that actually sets the register if the register dies there and that is incorrect. */ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++) { if (reg_renumber[i] < 0 && reg_equiv_init[i] != 0) { rtx list; for (list = reg_equiv_init[i]; list; list = XEXP (list, 1)) { rtx equiv_insn = XEXP (list, 0); /* If we already deleted the insn or if it may trap, we can't delete it. The latter case shouldn't happen, but can if an insn has a variable address, gets a REG_EH_REGION note added to it, and then gets converted into an load from a constant address. */ if (GET_CODE (equiv_insn) == NOTE || can_throw_internal (equiv_insn)) ; else if (reg_set_p (regno_reg_rtx[i], PATTERN (equiv_insn))) delete_dead_insn (equiv_insn); else SET_INSN_DELETED (equiv_insn); } } } /* Use the reload registers where necessary by generating move instructions to move the must-be-register values into or out of the reload registers. */ if (insns_need_reload != 0 || something_needs_elimination || something_needs_operands_changed) { HOST_WIDE_INT old_frame_size = get_frame_size (); reload_as_needed (global); if (old_frame_size != get_frame_size ()) abort (); if (num_eliminable) verify_initial_elim_offsets (); } /* If we were able to eliminate the frame pointer, show that it is no longer live at the start of any basic block. If it ls live by virtue of being in a pseudo, that pseudo will be marked live and hence the frame pointer will be known to be live via that pseudo. */ if (! frame_pointer_needed) FOR_EACH_BB (bb) CLEAR_REGNO_REG_SET (bb->global_live_at_start, HARD_FRAME_POINTER_REGNUM); /* Come here (with failure set nonzero) if we can't get enough spill regs and we decide not to abort about it. */ failed: CLEAR_REG_SET (&spilled_pseudos); reload_in_progress = 0; /* Now eliminate all pseudo regs by modifying them into their equivalent memory references. The REG-rtx's for the pseudos are modified in place, so all insns that used to refer to them now refer to memory. For a reg that has a reg_equiv_address, all those insns were changed by reloading so that no insns refer to it any longer; but the DECL_RTL of a variable decl may refer to it, and if so this causes the debugging info to mention the variable. */ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++) { rtx addr = 0; if (reg_equiv_mem[i]) addr = XEXP (reg_equiv_mem[i], 0); if (reg_equiv_address[i]) addr = reg_equiv_address[i]; if (addr) { if (reg_renumber[i] < 0) { rtx reg = regno_reg_rtx[i]; REG_USERVAR_P (reg) = 0; PUT_CODE (reg, MEM); XEXP (reg, 0) = addr; if (reg_equiv_memory_loc[i]) MEM_COPY_ATTRIBUTES (reg, reg_equiv_memory_loc[i]); else { RTX_UNCHANGING_P (reg) = MEM_IN_STRUCT_P (reg) = MEM_SCALAR_P (reg) = 0; MEM_ATTRS (reg) = 0; } } else if (reg_equiv_mem[i]) XEXP (reg_equiv_mem[i], 0) = addr; } } /* We must set reload_completed now since the cleanup_subreg_operands call below will re-recognize each insn and reload may have generated insns which are only valid during and after reload. */ reload_completed = 1; /* Make a pass over all the insns and delete all USEs which we inserted only to tag a REG_EQUAL note on them. Remove all REG_DEAD and REG_UNUSED notes. Delete all CLOBBER insns, except those that refer to the return value and the special mem:BLK CLOBBERs added to prevent the scheduler from misarranging variable-array code, and simplify (subreg (reg)) operands. Also remove all REG_RETVAL and REG_LIBCALL notes since they are no longer useful or accurate. Strip and regenerate REG_INC notes that may have been moved around. */ for (insn = first; insn; insn = NEXT_INSN (insn)) if (INSN_P (insn)) { rtx *pnote; if (GET_CODE (insn) == CALL_INSN) replace_pseudos_in (& CALL_INSN_FUNCTION_USAGE (insn), VOIDmode, CALL_INSN_FUNCTION_USAGE (insn)); if ((GET_CODE (PATTERN (insn)) == USE /* We mark with QImode USEs introduced by reload itself. */ && (GET_MODE (insn) == QImode || find_reg_note (insn, REG_EQUAL, NULL_RTX))) || (GET_CODE (PATTERN (insn)) == CLOBBER && (!MEM_P (XEXP (PATTERN (insn), 0)) || GET_MODE (XEXP (PATTERN (insn), 0)) != BLKmode || (GET_CODE (XEXP (XEXP (PATTERN (insn), 0), 0)) != SCRATCH && XEXP (XEXP (PATTERN (insn), 0), 0) != stack_pointer_rtx)) && (!REG_P (XEXP (PATTERN (insn), 0)) || ! REG_FUNCTION_VALUE_P (XEXP (PATTERN (insn), 0))))) { delete_insn (insn); continue; } /* Some CLOBBERs may survive until here and still reference unassigned pseudos with const equivalent, which may in turn cause ICE in later passes if the reference remains in place. */ if (GET_CODE (PATTERN (insn)) == CLOBBER) replace_pseudos_in (& XEXP (PATTERN (insn), 0), VOIDmode, PATTERN (insn)); pnote = ®_NOTES (insn); while (*pnote != 0) { if (REG_NOTE_KIND (*pnote) == REG_DEAD || REG_NOTE_KIND (*pnote) == REG_UNUSED || REG_NOTE_KIND (*pnote) == REG_INC || REG_NOTE_KIND (*pnote) == REG_RETVAL || REG_NOTE_KIND (*pnote) == REG_LIBCALL) *pnote = XEXP (*pnote, 1); else pnote = &XEXP (*pnote, 1); } #ifdef AUTO_INC_DEC add_auto_inc_notes (insn, PATTERN (insn)); #endif /* And simplify (subreg (reg)) if it appears as an operand. */ cleanup_subreg_operands (insn); } /* If we are doing stack checking, give a warning if this function's frame size is larger than we expect. */ if (flag_stack_check && ! STACK_CHECK_BUILTIN) { HOST_WIDE_INT size = get_frame_size () + STACK_CHECK_FIXED_FRAME_SIZE; static int verbose_warned = 0; for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (regs_ever_live[i] && ! fixed_regs[i] && call_used_regs[i]) size += UNITS_PER_WORD; if (size > STACK_CHECK_MAX_FRAME_SIZE) { warning ("frame size too large for reliable stack checking"); if (! verbose_warned) { warning ("try reducing the number of local variables"); verbose_warned = 1; } } } /* Indicate that we no longer have known memory locations or constants. */ if (reg_equiv_constant) free (reg_equiv_constant); reg_equiv_constant = 0; VARRAY_GROW (reg_equiv_memory_loc_varray, 0); reg_equiv_memory_loc = 0; if (offsets_known_at) free (offsets_known_at); if (offsets_at) free (offsets_at); free (reg_equiv_mem); free (reg_equiv_init); free (reg_equiv_address); free (reg_max_ref_width); free (reg_old_renumber); free (pseudo_previous_regs); free (pseudo_forbidden_regs); CLEAR_HARD_REG_SET (used_spill_regs); for (i = 0; i < n_spills; i++) SET_HARD_REG_BIT (used_spill_regs, spill_regs[i]); /* Free all the insn_chain structures at once. */ obstack_free (&reload_obstack, reload_startobj); unused_insn_chains = 0; fixup_abnormal_edges (); /* Replacing pseudos with their memory equivalents might have created shared rtx. Subsequent passes would get confused by this, so unshare everything here. */ unshare_all_rtl_again (first); #ifdef STACK_BOUNDARY /* init_emit has set the alignment of the hard frame pointer to STACK_BOUNDARY. It is very likely no longer valid if the hard frame pointer was used for register allocation. */ if (!frame_pointer_needed) REGNO_POINTER_ALIGN (HARD_FRAME_POINTER_REGNUM) = BITS_PER_UNIT; #endif return failure; } /* Yet another special case. Unfortunately, reg-stack forces people to write incorrect clobbers in asm statements. These clobbers must not cause the register to appear in bad_spill_regs, otherwise we'll call fatal_insn later. We clear the corresponding regnos in the live register sets to avoid this. The whole thing is rather sick, I'm afraid. */ static void maybe_fix_stack_asms (void) { #ifdef STACK_REGS const char *constraints[MAX_RECOG_OPERANDS]; enum machine_mode operand_mode[MAX_RECOG_OPERANDS]; struct insn_chain *chain; for (chain = reload_insn_chain; chain != 0; chain = chain->next) { int i, noperands; HARD_REG_SET clobbered, allowed; rtx pat; if (! INSN_P (chain->insn) || (noperands = asm_noperands (PATTERN (chain->insn))) < 0) continue; pat = PATTERN (chain->insn); if (GET_CODE (pat) != PARALLEL) continue; CLEAR_HARD_REG_SET (clobbered); CLEAR_HARD_REG_SET (allowed); /* First, make a mask of all stack regs that are clobbered. */ for (i = 0; i < XVECLEN (pat, 0); i++) { rtx t = XVECEXP (pat, 0, i); if (GET_CODE (t) == CLOBBER && STACK_REG_P (XEXP (t, 0))) SET_HARD_REG_BIT (clobbered, REGNO (XEXP (t, 0))); } /* Get the operand values and constraints out of the insn. */ decode_asm_operands (pat, recog_data.operand, recog_data.operand_loc, constraints, operand_mode); /* For every operand, see what registers are allowed. */ for (i = 0; i < noperands; i++) { const char *p = constraints[i]; /* For every alternative, we compute the class of registers allowed for reloading in CLS, and merge its contents into the reg set ALLOWED. */ int cls = (int) NO_REGS; for (;;) { char c = *p; if (c == '\0' || c == ',' || c == '#') { /* End of one alternative - mark the regs in the current class, and reset the class. */ IOR_HARD_REG_SET (allowed, reg_class_contents[cls]); cls = NO_REGS; p++; if (c == '#') do { c = *p++; } while (c != '\0' && c != ','); if (c == '\0') break; continue; } switch (c) { case '=': case '+': case '*': case '%': case '?': case '!': case '0': case '1': case '2': case '3': case '4': case 'm': case '<': case '>': case 'V': case 'o': case '&': case 'E': case 'F': case 's': case 'i': case 'n': case 'X': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': break; case 'p': cls = (int) reg_class_subunion[cls] [(int) MODE_BASE_REG_CLASS (VOIDmode)]; break; case 'g': case 'r': cls = (int) reg_class_subunion[cls][(int) GENERAL_REGS]; break; default: if (EXTRA_ADDRESS_CONSTRAINT (c, p)) cls = (int) reg_class_subunion[cls] [(int) MODE_BASE_REG_CLASS (VOIDmode)]; else cls = (int) reg_class_subunion[cls] [(int) REG_CLASS_FROM_CONSTRAINT (c, p)]; } p += CONSTRAINT_LEN (c, p); } } /* Those of the registers which are clobbered, but allowed by the constraints, must be usable as reload registers. So clear them out of the life information. */ AND_HARD_REG_SET (allowed, clobbered); for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (TEST_HARD_REG_BIT (allowed, i)) { CLEAR_REGNO_REG_SET (&chain->live_throughout, i); CLEAR_REGNO_REG_SET (&chain->dead_or_set, i); } } #endif } /* Copy the global variables n_reloads and rld into the corresponding elts of CHAIN. */ static void copy_reloads (struct insn_chain *chain) { chain->n_reloads = n_reloads; chain->rld = obstack_alloc (&reload_obstack, n_reloads * sizeof (struct reload)); memcpy (chain->rld, rld, n_reloads * sizeof (struct reload)); reload_insn_firstobj = obstack_alloc (&reload_obstack, 0); } /* Walk the chain of insns, and determine for each whether it needs reloads and/or eliminations. Build the corresponding insns_need_reload list, and set something_needs_elimination as appropriate. */ static void calculate_needs_all_insns (int global) { struct insn_chain **pprev_reload = &insns_need_reload; struct insn_chain *chain, *next = 0; something_needs_elimination = 0; reload_insn_firstobj = obstack_alloc (&reload_obstack, 0); for (chain = reload_insn_chain; chain != 0; chain = next) { rtx insn = chain->insn; next = chain->next; /* Clear out the shortcuts. */ chain->n_reloads = 0; chain->need_elim = 0; chain->need_reload = 0; chain->need_operand_change = 0; /* If this is a label, a JUMP_INSN, or has REG_NOTES (which might include REG_LABEL), we need to see what effects this has on the known offsets at labels. */ if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN || (INSN_P (insn) && REG_NOTES (insn) != 0)) set_label_offsets (insn, insn, 0); if (INSN_P (insn)) { rtx old_body = PATTERN (insn); int old_code = INSN_CODE (insn); rtx old_notes = REG_NOTES (insn); int did_elimination = 0; int operands_changed = 0; rtx set = single_set (insn); /* Skip insns that only set an equivalence. */ if (set && REG_P (SET_DEST (set)) && reg_renumber[REGNO (SET_DEST (set))] < 0 && reg_equiv_constant[REGNO (SET_DEST (set))]) continue; /* If needed, eliminate any eliminable registers. */ if (num_eliminable || num_eliminable_invariants) did_elimination = eliminate_regs_in_insn (insn, 0); /* Analyze the instruction. */ operands_changed = find_reloads (insn, 0, spill_indirect_levels, global, spill_reg_order); /* If a no-op set needs more than one reload, this is likely to be something that needs input address reloads. We can't get rid of this cleanly later, and it is of no use anyway, so discard it now. We only do this when expensive_optimizations is enabled, since this complements reload inheritance / output reload deletion, and it can make debugging harder. */ if (flag_expensive_optimizations && n_reloads > 1) { rtx set = single_set (insn); if (set && SET_SRC (set) == SET_DEST (set) && REG_P (SET_SRC (set)) && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER) { delete_insn (insn); /* Delete it from the reload chain. */ if (chain->prev) chain->prev->next = next; else reload_insn_chain = next; if (next) next->prev = chain->prev; chain->next = unused_insn_chains; unused_insn_chains = chain; continue; } } if (num_eliminable) update_eliminable_offsets (); /* Remember for later shortcuts which insns had any reloads or register eliminations. */ chain->need_elim = did_elimination; chain->need_reload = n_reloads > 0; chain->need_operand_change = operands_changed; /* Discard any register replacements done. */ if (did_elimination) { obstack_free (&reload_obstack, reload_insn_firstobj); PATTERN (insn) = old_body; INSN_CODE (insn) = old_code; REG_NOTES (insn) = old_notes; something_needs_elimination = 1; } something_needs_operands_changed |= operands_changed; if (n_reloads != 0) { copy_reloads (chain); *pprev_reload = chain; pprev_reload = &chain->next_need_reload; } } } *pprev_reload = 0; } /* Comparison function for qsort to decide which of two reloads should be handled first. *P1 and *P2 are the reload numbers. */ static int reload_reg_class_lower (const void *r1p, const void *r2p) { int r1 = *(const short *) r1p, r2 = *(const short *) r2p; int t; /* Consider required reloads before optional ones. */ t = rld[r1].optional - rld[r2].optional; if (t != 0) return t; /* Count all solitary classes before non-solitary ones. */ t = ((reg_class_size[(int) rld[r2].class] == 1) - (reg_class_size[(int) rld[r1].class] == 1)); if (t != 0) return t; /* Aside from solitaires, consider all multi-reg groups first. */ t = rld[r2].nregs - rld[r1].nregs; if (t != 0) return t; /* Consider reloads in order of increasing reg-class number. */ t = (int) rld[r1].class - (int) rld[r2].class; if (t != 0) return t; /* If reloads are equally urgent, sort by reload number, so that the results of qsort leave nothing to chance. */ return r1 - r2; } /* The cost of spilling each hard reg. */ static int spill_cost_rl1[FIRST_PSEUDO_REGISTER]; /* When spilling multiple hard registers, we use SPILL_COST for the first spilled hard reg and SPILL_ADD_COST for subsequent regs. SPILL_ADD_COST only the first hard reg for a multi-reg pseudo. */ static int spill_add_cost[FIRST_PSEUDO_REGISTER]; /* Update the spill cost arrays, considering that pseudo REG is live. */ static void count_pseudo (int reg) { int freq = REG_FREQ (reg); int r = reg_renumber[reg]; int nregs; if (REGNO_REG_SET_P (&pseudos_counted, reg) || REGNO_REG_SET_P (&spilled_pseudos, reg)) return; SET_REGNO_REG_SET (&pseudos_counted, reg); if (r < 0) abort (); spill_add_cost[r] += freq; nregs = hard_regno_nregs[r][PSEUDO_REGNO_MODE (reg)]; while (nregs-- > 0) spill_cost_rl1[r + nregs] += freq; } /* Calculate the SPILL_COST and SPILL_ADD_COST arrays and determine the contents of BAD_SPILL_REGS for the insn described by CHAIN. */ static void order_regs_for_reload (struct insn_chain *chain) { int i; HARD_REG_SET used_by_pseudos; HARD_REG_SET used_by_pseudos2; COPY_HARD_REG_SET (bad_spill_regs, fixed_reg_set); memset (spill_cost_rl1, 0, sizeof spill_cost_rl1); memset (spill_add_cost, 0, sizeof spill_add_cost); /* Count number of uses of each hard reg by pseudo regs allocated to it and then order them by decreasing use. First exclude hard registers that are live in or across this insn. */ REG_SET_TO_HARD_REG_SET (used_by_pseudos, &chain->live_throughout); REG_SET_TO_HARD_REG_SET (used_by_pseudos2, &chain->dead_or_set); IOR_HARD_REG_SET (bad_spill_regs, used_by_pseudos); IOR_HARD_REG_SET (bad_spill_regs, used_by_pseudos2); /* Now find out which pseudos are allocated to it, and update hard_reg_n_uses. */ CLEAR_REG_SET (&pseudos_counted); EXECUTE_IF_SET_IN_REG_SET (&chain->live_throughout, FIRST_PSEUDO_REGISTER, i, { count_pseudo (i); }); EXECUTE_IF_SET_IN_REG_SET (&chain->dead_or_set, FIRST_PSEUDO_REGISTER, i, { count_pseudo (i); }); CLEAR_REG_SET (&pseudos_counted); } /* Vector of reload-numbers showing the order in which the reloads should be processed. */ static short reload_order[MAX_RELOADS]; /* This is used to keep track of the spill regs used in one insn. */ static HARD_REG_SET used_spill_regs_local; /* We decided to spill hard register SPILLED, which has a size of SPILLED_NREGS. Determine how pseudo REG, which is live during the insn, is affected. We will add it to SPILLED_PSEUDOS if necessary, and we will update SPILL_COST/SPILL_ADD_COST. */ static void count_spilled_pseudo (int spilled, int spilled_nregs, int reg) { int r = reg_renumber[reg]; int nregs = hard_regno_nregs[r][PSEUDO_REGNO_MODE (reg)]; if (REGNO_REG_SET_P (&spilled_pseudos, reg) || spilled + spilled_nregs <= r || r + nregs <= spilled) return; SET_REGNO_REG_SET (&spilled_pseudos, reg); spill_add_cost[r] -= REG_FREQ (reg); while (nregs-- > 0) spill_cost_rl1[r + nregs] -= REG_FREQ (reg); } /* Find reload register to use for reload number ORDER. */ static int find_reg_rl1 (struct insn_chain *chain, int order) { int rnum = reload_order[order]; struct reload *rl = rld + rnum; int best_cost = INT_MAX; int best_reg = -1; unsigned int i, j; int k; HARD_REG_SET not_usable; HARD_REG_SET used_by_other_reload; COPY_HARD_REG_SET (not_usable, bad_spill_regs); IOR_HARD_REG_SET (not_usable, bad_spill_regs_global); IOR_COMPL_HARD_REG_SET (not_usable, reg_class_contents[rl->class]); CLEAR_HARD_REG_SET (used_by_other_reload); for (k = 0; k < order; k++) { int other = reload_order[k]; if (rld[other].regno >= 0 && reloads_conflict (other, rnum)) for (j = 0; j < rld[other].nregs; j++) SET_HARD_REG_BIT (used_by_other_reload, rld[other].regno + j); } for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) { unsigned int regno = i; if (! TEST_HARD_REG_BIT (not_usable, regno) && ! TEST_HARD_REG_BIT (used_by_other_reload, regno) && HARD_REGNO_MODE_OK (regno, rl->mode)) { int this_cost = spill_cost_rl1[regno]; int ok = 1; unsigned int this_nregs = hard_regno_nregs[regno][rl->mode]; for (j = 1; j < this_nregs; j++) { this_cost += spill_add_cost[regno + j]; if ((TEST_HARD_REG_BIT (not_usable, regno + j)) || TEST_HARD_REG_BIT (used_by_other_reload, regno + j)) ok = 0; } if (! ok) continue; if (rl->in && REG_P (rl->in) && REGNO (rl->in) == regno) this_cost--; if (rl->out && REG_P (rl->out) && REGNO (rl->out) == regno) this_cost--; if (this_cost < best_cost /* Among registers with equal cost, prefer caller-saved ones, or use REG_ALLOC_ORDER if it is defined. */ || (this_cost == best_cost #ifdef REG_ALLOC_ORDER && (inv_reg_alloc_order[regno] < inv_reg_alloc_order[best_reg]) #else && call_used_regs[regno] && ! call_used_regs[best_reg] #endif )) { best_reg = regno; best_cost = this_cost; } } } if (best_reg == -1) return 0; if (dump_file) fprintf (dump_file, "Using reg %d for reload %d\n", best_reg, rnum); rl->nregs = hard_regno_nregs[best_reg][rl->mode]; rl->regno = best_reg; EXECUTE_IF_SET_IN_REG_SET (&chain->live_throughout, FIRST_PSEUDO_REGISTER, j, { count_spilled_pseudo (best_reg, rl->nregs, j); }); EXECUTE_IF_SET_IN_REG_SET (&chain->dead_or_set, FIRST_PSEUDO_REGISTER, j, { count_spilled_pseudo (best_reg, rl->nregs, j); }); for (i = 0; i < rl->nregs; i++) { if (spill_cost_rl1[best_reg + i] != 0 || spill_add_cost[best_reg + i] != 0) abort (); SET_HARD_REG_BIT (used_spill_regs_local, best_reg + i); } return 1; } /* Find more reload regs to satisfy the remaining need of an insn, which is given by CHAIN. Do it by ascending class number, since otherwise a reg might be spilled for a big class and might fail to count for a smaller class even though it belongs to that class. */ static void find_reload_regs (struct insn_chain *chain) { int i; /* In order to be certain of getting the registers we need, we must sort the reloads into order of increasing register class. Then our grabbing of reload registers will parallel the process that provided the reload registers. */ for (i = 0; i < chain->n_reloads; i++) { /* Show whether this reload already has a hard reg. */ if (chain->rld[i].reg_rtx) { int regno = REGNO (chain->rld[i].reg_rtx); chain->rld[i].regno = regno; chain->rld[i].nregs = hard_regno_nregs[regno][GET_MODE (chain->rld[i].reg_rtx)]; } else chain->rld[i].regno = -1; reload_order[i] = i; } n_reloads = chain->n_reloads; memcpy (rld, chain->rld, n_reloads * sizeof (struct reload)); CLEAR_HARD_REG_SET (used_spill_regs_local); if (dump_file) fprintf (dump_file, "Spilling for insn %d.\n", INSN_UID (chain->insn)); qsort (reload_order, n_reloads, sizeof (short), reload_reg_class_lower); /* Compute the order of preference for hard registers to spill. */ order_regs_for_reload (chain); for (i = 0; i < n_reloads; i++) { int r = reload_order[i]; /* Ignore reloads that got marked inoperative. */ if ((rld[r].out != 0 || rld[r].in != 0 || rld[r].secondary_p) && ! rld[r].optional && rld[r].regno == -1) if (! find_reg_rl1 (chain, i)) { spill_failure (chain->insn, rld[r].class); failure = 1; return; } } COPY_HARD_REG_SET (chain->used_spill_regs, used_spill_regs_local); IOR_HARD_REG_SET (used_spill_regs, used_spill_regs_local); memcpy (chain->rld, rld, n_reloads * sizeof (struct reload)); } static void select_reload_regs (void) { struct insn_chain *chain; /* Try to satisfy the needs for each insn. */ for (chain = insns_need_reload; chain != 0; chain = chain->next_need_reload) find_reload_regs (chain); } /* Delete all insns that were inserted by emit_caller_save_insns during this iteration. */ static void delete_caller_save_insns (void) { struct insn_chain *c = reload_insn_chain; while (c != 0) { while (c != 0 && c->is_caller_save_insn) { struct insn_chain *next = c->next; rtx insn = c->insn; if (c == reload_insn_chain) reload_insn_chain = next; delete_insn (insn); if (next) next->prev = c->prev; if (c->prev) c->prev->next = next; c->next = unused_insn_chains; unused_insn_chains = c; c = next; } if (c != 0) c = c->next; } } /* Handle the failure to find a register to spill. INSN should be one of the insns which needed this particular spill reg. */ static void spill_failure (rtx insn, enum reg_class class) { static const char *const reg_class_names[] = REG_CLASS_NAMES; if (asm_noperands (PATTERN (insn)) >= 0) error_for_asm (insn, "can't find a register in class `%s' while reloading `asm'", reg_class_names[class]); else { error ("unable to find a register to spill in class `%s'", reg_class_names[class]); fatal_insn ("this is the insn:", insn); } } /* Delete an unneeded INSN and any previous insns who sole purpose is loading data that is dead in INSN. */ static void delete_dead_insn (rtx insn) { rtx prev = prev_real_insn (insn); rtx prev_dest; /* If the previous insn sets a register that dies in our insn, delete it too. */ if (prev && GET_CODE (PATTERN (prev)) == SET && (prev_dest = SET_DEST (PATTERN (prev)), REG_P (prev_dest)) && reg_mentioned_p (prev_dest, PATTERN (insn)) && find_regno_note (insn, REG_DEAD, REGNO (prev_dest)) && ! side_effects_p (SET_SRC (PATTERN (prev)))) delete_dead_insn (prev); SET_INSN_DELETED (insn); } /* Modify the home of pseudo-reg I. The new home is present in reg_renumber[I]. FROM_REG may be the hard reg that the pseudo-reg is being spilled from; or it may be -1, meaning there is none or it is not relevant. This is used so that all pseudos spilled from a given hard reg can share one stack slot. */ static void alter_reg (int i, int from_reg) { /* When outputting an inline function, this can happen for a reg that isn't actually used. */ if (regno_reg_rtx[i] == 0) return; /* If the reg got changed to a MEM at rtl-generation time, ignore it. */ if (!REG_P (regno_reg_rtx[i])) return; /* Modify the reg-rtx to contain the new hard reg number or else to contain its pseudo reg number. */ REGNO (regno_reg_rtx[i]) = reg_renumber[i] >= 0 ? reg_renumber[i] : i; /* If we have a pseudo that is needed but has no hard reg or equivalent, allocate a stack slot for it. */ if (reg_renumber[i] < 0 && REG_N_REFS (i) > 0 && reg_equiv_constant[i] == 0 && reg_equiv_memory_loc[i] == 0) { rtx x; unsigned int inherent_size = PSEUDO_REGNO_BYTES (i); unsigned int total_size = MAX (inherent_size, reg_max_ref_width[i]); int adjust = 0; /* Each pseudo reg has an inherent size which comes from its own mode, and a total size which provides room for paradoxical subregs which refer to the pseudo reg in wider modes. We can use a slot already allocated if it provides both enough inherent space and enough total space. Otherwise, we allocate a new slot, making sure that it has no less inherent space, and no less total space, then the previous slot. */ if (from_reg == -1) { /* No known place to spill from => no slot to reuse. */ x = assign_stack_local (GET_MODE (regno_reg_rtx[i]), total_size, inherent_size == total_size ? 0 : -1); if (BYTES_BIG_ENDIAN) /* Cancel the big-endian correction done in assign_stack_local. Get the address of the beginning of the slot. This is so we can do a big-endian correction unconditionally below. */ adjust = inherent_size - total_size; RTX_UNCHANGING_P (x) = RTX_UNCHANGING_P (regno_reg_rtx[i]); /* Nothing can alias this slot except this pseudo. */ set_mem_alias_set (x, new_alias_set ()); } /* Reuse a stack slot if possible. */ else if (spill_stack_slot[from_reg] != 0 && spill_stack_slot_width[from_reg] >= total_size && (GET_MODE_SIZE (GET_MODE (spill_stack_slot[from_reg])) >= inherent_size)) x = spill_stack_slot[from_reg]; /* Allocate a bigger slot. */ else { /* Compute maximum size needed, both for inherent size and for total size. */ enum machine_mode mode = GET_MODE (regno_reg_rtx[i]); rtx stack_slot; if (spill_stack_slot[from_reg]) { if (GET_MODE_SIZE (GET_MODE (spill_stack_slot[from_reg])) > inherent_size) mode = GET_MODE (spill_stack_slot[from_reg]); if (spill_stack_slot_width[from_reg] > total_size) total_size = spill_stack_slot_width[from_reg]; } /* Make a slot with that size. */ x = assign_stack_local (mode, total_size, inherent_size == total_size ? 0 : -1); stack_slot = x; /* All pseudos mapped to this slot can alias each other. */ if (spill_stack_slot[from_reg]) set_mem_alias_set (x, MEM_ALIAS_SET (spill_stack_slot[from_reg])); else set_mem_alias_set (x, new_alias_set ()); if (BYTES_BIG_ENDIAN) { /* Cancel the big-endian correction done in assign_stack_local. Get the address of the beginning of the slot. This is so we can do a big-endian correction unconditionally below. */ adjust = GET_MODE_SIZE (mode) - total_size; if (adjust) stack_slot = adjust_address_nv (x, mode_for_size (total_size * BITS_PER_UNIT, MODE_INT, 1), adjust); } spill_stack_slot[from_reg] = stack_slot; spill_stack_slot_width[from_reg] = total_size; } /* On a big endian machine, the "address" of the slot is the address of the low part that fits its inherent mode. */ if (BYTES_BIG_ENDIAN && inherent_size < total_size) adjust += (total_size - inherent_size); /* If we have any adjustment to make, or if the stack slot is the wrong mode, make a new stack slot. */ x = adjust_address_nv (x, GET_MODE (regno_reg_rtx[i]), adjust); /* If we have a decl for the original register, set it for the memory. If this is a shared MEM, make a copy. */ if (REG_EXPR (regno_reg_rtx[i]) && TREE_CODE_CLASS (TREE_CODE (REG_EXPR (regno_reg_rtx[i]))) == 'd') { rtx decl = DECL_RTL_IF_SET (REG_EXPR (regno_reg_rtx[i])); /* We can do this only for the DECLs home pseudo, not for any copies of it, since otherwise when the stack slot is reused, nonoverlapping_memrefs_p might think they cannot overlap. */ if (decl && REG_P (decl) && REGNO (decl) == (unsigned) i) { if (from_reg != -1 && spill_stack_slot[from_reg] == x) x = copy_rtx (x); set_mem_attrs_from_reg (x, regno_reg_rtx[i]); } } /* Save the stack slot for later. */ reg_equiv_memory_loc[i] = x; } } /* Mark the slots in regs_ever_live for the hard regs used by pseudo-reg number REGNO. */ void mark_home_live (int regno) { int i, lim; i = reg_renumber[regno]; if (i < 0) return; lim = i + hard_regno_nregs[i][PSEUDO_REGNO_MODE (regno)]; while (i < lim) regs_ever_live[i++] = 1; } /* This function handles the tracking of elimination offsets around branches. X is a piece of RTL being scanned. INSN is the insn that it came from, if any. INITIAL_P is nonzero if we are to set the offset to be the initial offset and zero if we are setting the offset of the label to be the current offset. */ static void set_label_offsets (rtx x, rtx insn, int initial_p) { enum rtx_code code = GET_CODE (x); rtx tem; unsigned int i; struct elim_table *p; switch (code) { case LABEL_REF: if (LABEL_REF_NONLOCAL_P (x)) return; x = XEXP (x, 0); /* ... fall through ... */ case CODE_LABEL: /* If we know nothing about this label, set the desired offsets. Note that this sets the offset at a label to be the offset before a label if we don't know anything about the label. This is not correct for the label after a BARRIER, but is the best guess we can make. If we guessed wrong, we will suppress an elimination that might have been possible had we been able to guess correctly. */ if (! offsets_known_at[CODE_LABEL_NUMBER (x) - first_label_num]) { for (i = 0; i < NUM_ELIMINABLE_REGS; i++) offsets_at[CODE_LABEL_NUMBER (x) - first_label_num][i] = (initial_p ? reg_eliminate[i].initial_offset : reg_eliminate[i].offset); offsets_known_at[CODE_LABEL_NUMBER (x) - first_label_num] = 1; } /* Otherwise, if this is the definition of a label and it is preceded by a BARRIER, set our offsets to the known offset of that label. */ else if (x == insn && (tem = prev_nonnote_insn (insn)) != 0 && GET_CODE (tem) == BARRIER) set_offsets_for_label (insn); else /* If neither of the above cases is true, compare each offset with those previously recorded and suppress any eliminations where the offsets disagree. */ for (i = 0; i < NUM_ELIMINABLE_REGS; i++) if (offsets_at[CODE_LABEL_NUMBER (x) - first_label_num][i] != (initial_p ? reg_eliminate[i].initial_offset : reg_eliminate[i].offset)) reg_eliminate[i].can_eliminate = 0; return; case JUMP_INSN: set_label_offsets (PATTERN (insn), insn, initial_p); /* ... fall through ... */ case INSN: case CALL_INSN: /* Any labels mentioned in REG_LABEL notes can be branched to indirectly and hence must have all eliminations at their initial offsets. */ for (tem = REG_NOTES (x); tem; tem = XEXP (tem, 1)) if (REG_NOTE_KIND (tem) == REG_LABEL) set_label_offsets (XEXP (tem, 0), insn, 1); return; case PARALLEL: case ADDR_VEC: case ADDR_DIFF_VEC: /* Each of the labels in the parallel or address vector must be at their initial offsets. We want the first field for PARALLEL and ADDR_VEC and the second field for ADDR_DIFF_VEC. */ for (i = 0; i < (unsigned) XVECLEN (x, code == ADDR_DIFF_VEC); i++) set_label_offsets (XVECEXP (x, code == ADDR_DIFF_VEC, i), insn, initial_p); return; case SET: /* We only care about setting PC. If the source is not RETURN, IF_THEN_ELSE, or a label, disable any eliminations not at their initial offsets. Similarly if any arm of the IF_THEN_ELSE isn't one of those possibilities. For branches to a label, call ourselves recursively. Note that this can disable elimination unnecessarily when we have a non-local goto since it will look like a non-constant jump to someplace in the current function. This isn't a significant problem since such jumps will normally be when all elimination pairs are back to their initial offsets. */ if (SET_DEST (x) != pc_rtx) return; switch (GET_CODE (SET_SRC (x))) { case PC: case RETURN: return; case LABEL_REF: set_label_offsets (XEXP (SET_SRC (x), 0), insn, initial_p); return; case IF_THEN_ELSE: tem = XEXP (SET_SRC (x), 1); if (GET_CODE (tem) == LABEL_REF) set_label_offsets (XEXP (tem, 0), insn, initial_p); else if (GET_CODE (tem) != PC && GET_CODE (tem) != RETURN) break; tem = XEXP (SET_SRC (x), 2); if (GET_CODE (tem) == LABEL_REF) set_label_offsets (XEXP (tem, 0), insn, initial_p); else if (GET_CODE (tem) != PC && GET_CODE (tem) != RETURN) break; return; default: break; } /* If we reach here, all eliminations must be at their initial offset because we are doing a jump to a variable address. */ for (p = reg_eliminate; p < ®_eliminate[NUM_ELIMINABLE_REGS]; p++) if (p->offset != p->initial_offset) p->can_eliminate = 0; break; default: break; } } /* Scan X and replace any eliminable registers (such as fp) with a replacement (such as sp), plus an offset. MEM_MODE is the mode of an enclosing MEM. We need this to know how much to adjust a register for, e.g., PRE_DEC. Also, if we are inside a MEM, we are allowed to replace a sum of a register and the constant zero with the register, which we cannot do outside a MEM. In addition, we need to record the fact that a register is referenced outside a MEM. If INSN is an insn, it is the insn containing X. If we replace a REG in a SET_DEST with an equivalent MEM and INSN is nonzero, write a CLOBBER of the pseudo after INSN so find_equiv_regs will know that the REG is being modified. Alternatively, INSN may be a note (an EXPR_LIST or INSN_LIST). That's used when we eliminate in expressions stored in notes. This means, do not set ref_outside_mem even if the reference is outside of MEMs. REG_EQUIV_MEM and REG_EQUIV_ADDRESS contain address that have had replacements done assuming all offsets are at their initial values. If they are not, or if REG_EQUIV_ADDRESS is nonzero for a pseudo we encounter, return the actual location so that find_reloads will do the proper thing. */ rtx eliminate_regs (rtx x, enum machine_mode mem_mode, rtx insn) { enum rtx_code code = GET_CODE (x); struct elim_table *ep; int regno; rtx new; int i, j; const char *fmt; int copied = 0; if (! current_function_decl) return x; switch (code) { case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case CONST: case SYMBOL_REF: case CODE_LABEL: case PC: case CC0: case ASM_INPUT: case ADDR_VEC: case ADDR_DIFF_VEC: case RETURN: return x; case REG: regno = REGNO (x); /* First handle the case where we encounter a bare register that is eliminable. Replace it with a PLUS. */ if (regno < FIRST_PSEUDO_REGISTER) { for (ep = reg_eliminate; ep < ®_eliminate[NUM_ELIMINABLE_REGS]; ep++) if (ep->from_rtx == x && ep->can_eliminate) return plus_constant (ep->to_rtx, ep->previous_offset); } else if (reg_renumber && reg_renumber[regno] < 0 && reg_equiv_constant && reg_equiv_constant[regno] && ! CONSTANT_P (reg_equiv_constant[regno])) return eliminate_regs (copy_rtx (reg_equiv_constant[regno]), mem_mode, insn); return x; /* You might think handling MINUS in a manner similar to PLUS is a good idea. It is not. It has been tried multiple times and every time the change has had to have been reverted. Other parts of reload know a PLUS is special (gen_reload for example) and require special code to handle code a reloaded PLUS operand. Also consider backends where the flags register is clobbered by a MINUS, but we can emit a PLUS that does not clobber flags (IA-32, lea instruction comes to mind). If we try to reload a MINUS, we may kill the flags register that was holding a useful value. So, please before trying to handle MINUS, consider reload as a whole instead of this little section as well as the backend issues. */ case PLUS: /* If this is the sum of an eliminable register and a constant, rework the sum. */ if (REG_P (XEXP (x, 0)) && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER && CONSTANT_P (XEXP (x, 1))) { for (ep = reg_eliminate; ep < ®_eliminate[NUM_ELIMINABLE_REGS]; ep++) if (ep->from_rtx == XEXP (x, 0) && ep->can_eliminate) { /* The only time we want to replace a PLUS with a REG (this occurs when the constant operand of the PLUS is the negative of the offset) is when we are inside a MEM. We won't want to do so at other times because that would change the structure of the insn in a way that reload can't handle. We special-case the commonest situation in eliminate_regs_in_insn, so just replace a PLUS with a PLUS here, unless inside a MEM. */ if (mem_mode != 0 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == - ep->previous_offset) return ep->to_rtx; else return gen_rtx_PLUS (Pmode, ep->to_rtx, plus_constant (XEXP (x, 1), ep->previous_offset)); } /* If the register is not eliminable, we are done since the other operand is a constant. */ return x; } /* If this is part of an address, we want to bring any constant to the outermost PLUS. We will do this by doing register replacement in our operands and seeing if a constant shows up in one of them. Note that there is no risk of modifying the structure of the insn, since we only get called for its operands, thus we are either modifying the address inside a MEM, or something like an address operand of a load-address insn. */ { rtx new0 = eliminate_regs (XEXP (x, 0), mem_mode, insn); rtx new1 = eliminate_regs (XEXP (x, 1), mem_mode, insn); if (reg_renumber && (new0 != XEXP (x, 0) || new1 != XEXP (x, 1))) { /* If one side is a PLUS and the other side is a pseudo that didn't get a hard register but has a reg_equiv_constant, we must replace the constant here since it may no longer be in the position of any operand. */ if (GET_CODE (new0) == PLUS && REG_P (new1) && REGNO (new1) >= FIRST_PSEUDO_REGISTER && reg_renumber[REGNO (new1)] < 0 && reg_equiv_constant != 0 && reg_equiv_constant[REGNO (new1)] != 0) new1 = reg_equiv_constant[REGNO (new1)]; else if (GET_CODE (new1) == PLUS && REG_P (new0) && REGNO (new0) >= FIRST_PSEUDO_REGISTER && reg_renumber[REGNO (new0)] < 0 && reg_equiv_constant[REGNO (new0)] != 0) new0 = reg_equiv_constant[REGNO (new0)]; new = form_sum (new0, new1); /* As above, if we are not inside a MEM we do not want to turn a PLUS into something else. We might try to do so here for an addition of 0 if we aren't optimizing. */ if (! mem_mode && GET_CODE (new) != PLUS) return gen_rtx_PLUS (GET_MODE (x), new, const0_rtx); else return new; } } return x; case MULT: /* If this is the product of an eliminable register and a constant, apply the distribute law and move the constant out so that we have (plus (mult ..) ..). This is needed in order to keep load-address insns valid. This case is pathological. We ignore the possibility of overflow here. */ if (REG_P (XEXP (x, 0)) && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER && GET_CODE (XEXP (x, 1)) == CONST_INT) for (ep = reg_eliminate; ep < ®_eliminate[NUM_ELIMINABLE_REGS]; ep++) if (ep->from_rtx == XEXP (x, 0) && ep->can_eliminate) { if (! mem_mode /* Refs inside notes don't count for this purpose. */ && ! (insn != 0 && (GET_CODE (insn) == EXPR_LIST || GET_CODE (insn) == INSN_LIST))) ep->ref_outside_mem = 1; return plus_constant (gen_rtx_MULT (Pmode, ep->to_rtx, XEXP (x, 1)), ep->previous_offset * INTVAL (XEXP (x, 1))); } /* ... fall through ... */ case CALL: case COMPARE: /* See comments before PLUS about handling MINUS. */ case MINUS: case DIV: case UDIV: case MOD: case UMOD: case AND: case IOR: case XOR: case ROTATERT: case ROTATE: case ASHIFTRT: case LSHIFTRT: case ASHIFT: case NE: case EQ: case GE: case GT: case GEU: case GTU: case LE: case LT: case LEU: case LTU: { rtx new0 = eliminate_regs (XEXP (x, 0), mem_mode, insn); rtx new1 = XEXP (x, 1) ? eliminate_regs (XEXP (x, 1), mem_mode, insn) : 0; if (new0 != XEXP (x, 0) || new1 != XEXP (x, 1)) return gen_rtx_fmt_ee (code, GET_MODE (x), new0, new1); } return x; case EXPR_LIST: /* If we have something in XEXP (x, 0), the usual case, eliminate it. */ if (XEXP (x, 0)) { new = eliminate_regs (XEXP (x, 0), mem_mode, insn); if (new != XEXP (x, 0)) { /* If this is a REG_DEAD note, it is not valid anymore. Using the eliminated version could result in creating a REG_DEAD note for the stack or frame pointer. */ if (GET_MODE (x) == REG_DEAD) return (XEXP (x, 1) ? eliminate_regs (XEXP (x, 1), mem_mode, insn) : NULL_RTX); x = gen_rtx_EXPR_LIST (REG_NOTE_KIND (x), new, XEXP (x, 1)); } } /* ... fall through ... */ case INSN_LIST: /* Now do eliminations in the rest of the chain. If this was an EXPR_LIST, this might result in allocating more memory than is strictly needed, but it simplifies the code. */ if (XEXP (x, 1)) { new = eliminate_regs (XEXP (x, 1), mem_mode, insn); if (new != XEXP (x, 1)) return gen_rtx_fmt_ee (GET_CODE (x), GET_MODE (x), XEXP (x, 0), new); } return x; case PRE_INC: case POST_INC: case PRE_DEC: case POST_DEC: case STRICT_LOW_PART: case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND: case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT: case FIX: case UNSIGNED_FIX: case UNSIGNED_FLOAT: case ABS: case SQRT: case FFS: case CLZ: case CTZ: case POPCOUNT: case PARITY: new = eliminate_regs (XEXP (x, 0), mem_mode, insn); if (new != XEXP (x, 0)) return gen_rtx_fmt_e (code, GET_MODE (x), new); return x; case SUBREG: /* Similar to above processing, but preserve SUBREG_BYTE. Convert (subreg (mem)) to (mem) if not paradoxical. Also, if we have a non-paradoxical (subreg (pseudo)) and the pseudo didn't get a hard reg, we must replace this with the eliminated version of the memory location because push_reload may do the replacement in certain circumstances. */ if (REG_P (SUBREG_REG (x)) && (GET_MODE_SIZE (GET_MODE (x)) <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))) && reg_equiv_memory_loc != 0 && reg_equiv_memory_loc[REGNO (SUBREG_REG (x))] != 0) { new = SUBREG_REG (x); } else new = eliminate_regs (SUBREG_REG (x), mem_mode, insn); if (new != SUBREG_REG (x)) { int x_size = GET_MODE_SIZE (GET_MODE (x)); int new_size = GET_MODE_SIZE (GET_MODE (new)); if (MEM_P (new) && ((x_size < new_size #ifdef WORD_REGISTER_OPERATIONS /* On these machines, combine can create rtl of the form (set (subreg:m1 (reg:m2 R) 0) ...) where m1 < m2, and expects something interesting to happen to the entire word. Moreover, it will use the (reg:m2 R) later, expecting all bits to be preserved. So if the number of words is the same, preserve the subreg so that push_reload can see it. */ && ! ((x_size - 1) / UNITS_PER_WORD == (new_size -1 ) / UNITS_PER_WORD) #endif ) || x_size == new_size) ) return adjust_address_nv (new, GET_MODE (x), SUBREG_BYTE (x)); else return gen_rtx_SUBREG (GET_MODE (x), new, SUBREG_BYTE (x)); } return x; case MEM: /* Our only special processing is to pass the mode of the MEM to our recursive call and copy the flags. While we are here, handle this case more efficiently. */ return replace_equiv_address_nv (x, eliminate_regs (XEXP (x, 0), GET_MODE (x), insn)); case USE: /* Handle insn_list USE that a call to a pure function may generate. */ new = eliminate_regs (XEXP (x, 0), 0, insn); if (new != XEXP (x, 0)) return gen_rtx_USE (GET_MODE (x), new); return x; case CLOBBER: case ASM_OPERANDS: case SET: abort (); default: break; } /* Process each of our operands recursively. If any have changed, make a copy of the rtx. */ fmt = GET_RTX_FORMAT (code); for (i = 0; i < GET_RTX_LENGTH (code); i++, fmt++) { if (*fmt == 'e') { new = eliminate_regs (XEXP (x, i), mem_mode, insn); if (new != XEXP (x, i) && ! copied) { rtx new_x = rtx_alloc (code); memcpy (new_x, x, RTX_SIZE (code)); x = new_x; copied = 1; } XEXP (x, i) = new; } else if (*fmt == 'E') { int copied_vec = 0; for (j = 0; j < XVECLEN (x, i); j++) { new = eliminate_regs (XVECEXP (x, i, j), mem_mode, insn); if (new != XVECEXP (x, i, j) && ! copied_vec) { rtvec new_v = gen_rtvec_v (XVECLEN (x, i), XVEC (x, i)->elem); if (! copied) { rtx new_x = rtx_alloc (code); memcpy (new_x, x, RTX_SIZE (code)); x = new_x; copied = 1; } XVEC (x, i) = new_v; copied_vec = 1; } XVECEXP (x, i, j) = new; } } } return x; } /* Scan rtx X for modifications of elimination target registers. Update the table of eliminables to reflect the changed state. MEM_MODE is the mode of an enclosing MEM rtx, or VOIDmode if not within a MEM. */ static void elimination_effects (rtx x, enum machine_mode mem_mode) { enum rtx_code code = GET_CODE (x); struct elim_table *ep; int regno; int i, j; const char *fmt; switch (code) { case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case CONST: case SYMBOL_REF: case CODE_LABEL: case PC: case CC0: case ASM_INPUT: case ADDR_VEC: case ADDR_DIFF_VEC: case RETURN: return; case REG: regno = REGNO (x); /* First handle the case where we encounter a bare register that is eliminable. Replace it with a PLUS. */ if (regno < FIRST_PSEUDO_REGISTER) { for (ep = reg_eliminate; ep < ®_eliminate[NUM_ELIMINABLE_REGS]; ep++) if (ep->from_rtx == x && ep->can_eliminate) { if (! mem_mode) ep->ref_outside_mem = 1; return; } } else if (reg_renumber[regno] < 0 && reg_equiv_constant && reg_equiv_constant[regno] && ! function_invariant_p (reg_equiv_constant[regno])) elimination_effects (reg_equiv_constant[regno], mem_mode); return; case PRE_INC: case POST_INC: case PRE_DEC: case POST_DEC: case POST_MODIFY: case PRE_MODIFY: for (ep = reg_eliminate; ep < ®_eliminate[NUM_ELIMINABLE_REGS]; ep++) if (ep->to_rtx == XEXP (x, 0)) { int size = GET_MODE_SIZE (mem_mode); /* If more bytes than MEM_MODE are pushed, account for them. */ #ifdef PUSH_ROUNDING if (ep->to_rtx == stack_pointer_rtx) size = PUSH_ROUNDING (size); #endif if (code == PRE_DEC || code == POST_DEC) ep->offset += size; else if (code == PRE_INC || code == POST_INC) ep->offset -= size; else if ((code == PRE_MODIFY || code == POST_MODIFY) && GET_CODE (XEXP (x, 1)) == PLUS && XEXP (x, 0) == XEXP (XEXP (x, 1), 0) && CONSTANT_P (XEXP (XEXP (x, 1), 1))) ep->offset -= INTVAL (XEXP (XEXP (x, 1), 1)); } /* These two aren't unary operators. */ if (code == POST_MODIFY || code == PRE_MODIFY) break; /* Fall through to generic unary operation case. */ case STRICT_LOW_PART: case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND: case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT: case FIX: case UNSIGNED_FIX: case UNSIGNED_FLOAT: case ABS: case SQRT: case FFS: case CLZ: case CTZ: case POPCOUNT: case PARITY: elimination_effects (XEXP (x, 0), mem_mode); return; case SUBREG: if (REG_P (SUBREG_REG (x)) && (GET_MODE_SIZE (GET_MODE (x)) <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))) && reg_equiv_memory_loc != 0 && reg_equiv_memory_loc[REGNO (SUBREG_REG (x))] != 0) return; elimination_effects (SUBREG_REG (x), mem_mode); return; case USE: /* If using a register that is the source of an eliminate we still think can be performed, note it cannot be performed since we don't know how this register is used. */ for (ep = reg_eliminate; ep < ®_eliminate[NUM_ELIMINABLE_REGS]; ep++) if (ep->from_rtx == XEXP (x, 0)) ep->can_eliminate = 0; elimination_effects (XEXP (x, 0), mem_mode); return; case CLOBBER: /* If clobbering a register that is the replacement register for an elimination we still think can be performed, note that it cannot be performed. Otherwise, we need not be concerned about it. */ for (ep = reg_eliminate; ep < ®_eliminate[NUM_ELIMINABLE_REGS]; ep++) if (ep->to_rtx == XEXP (x, 0)) ep->can_eliminate = 0; elimination_effects (XEXP (x, 0), mem_mode); return; case SET: /* Check for setting a register that we know about. */ if (REG_P (SET_DEST (x))) { /* See if this is setting the replacement register for an elimination. If DEST is the hard frame pointer, we do nothing because we assume that all assignments to the frame pointer are for non-local gotos and are being done at a time when they are valid and do not disturb anything else. Some machines want to eliminate a fake argument pointer (or even a fake frame pointer) with either the real frame or the stack pointer. Assignments to the hard frame pointer must not prevent this elimination. */ for (ep = reg_eliminate; ep < ®_eliminate[NUM_ELIMINABLE_REGS]; ep++) if (ep->to_rtx == SET_DEST (x) && SET_DEST (x) != hard_frame_pointer_rtx) { /* If it is being incremented, adjust the offset. Otherwise, this elimination can't be done. */ rtx src = SET_SRC (x); if (GET_CODE (src) == PLUS && XEXP (src, 0) == SET_DEST (x) && GET_CODE (XEXP (src, 1)) == CONST_INT) ep->offset -= INTVAL (XEXP (src, 1)); else ep->can_eliminate = 0; } } elimination_effects (SET_DEST (x), 0); elimination_effects (SET_SRC (x), 0); return; case MEM: /* Our only special processing is to pass the mode of the MEM to our recursive call. */ elimination_effects (XEXP (x, 0), GET_MODE (x)); return; default: break; } fmt = GET_RTX_FORMAT (code); for (i = 0; i < GET_RTX_LENGTH (code); i++, fmt++) { if (*fmt == 'e') elimination_effects (XEXP (x, i), mem_mode); else if (*fmt == 'E') for (j = 0; j < XVECLEN (x, i); j++) elimination_effects (XVECEXP (x, i, j), mem_mode); } } /* Descend through rtx X and verify that no references to eliminable registers remain. If any do remain, mark the involved register as not eliminable. */ static void check_eliminable_occurrences (rtx x) { const char *fmt; int i; enum rtx_code code; if (x == 0) return; code = GET_CODE (x); if (code == REG && REGNO (x) < FIRST_PSEUDO_REGISTER) { struct elim_table *ep; for (ep = reg_eliminate; ep < ®_eliminate[NUM_ELIMINABLE_REGS]; ep++) if (ep->from_rtx == x) ep->can_eliminate = 0; return; } fmt = GET_RTX_FORMAT (code); for (i = 0; i < GET_RTX_LENGTH (code); i++, fmt++) { if (*fmt == 'e') check_eliminable_occurrences (XEXP (x, i)); else if (*fmt == 'E') { int j; for (j = 0; j < XVECLEN (x, i); j++) check_eliminable_occurrences (XVECEXP (x, i, j)); } } } /* Scan INSN and eliminate all eliminable registers in it. If REPLACE is nonzero, do the replacement destructively. Also delete the insn as dead it if it is setting an eliminable register. If REPLACE is zero, do all our allocations in reload_obstack. If no eliminations were done and this insn doesn't require any elimination processing (these are not identical conditions: it might be updating sp, but not referencing fp; this needs to be seen during reload_as_needed so that the offset between fp and sp can be taken into consideration), zero is returned. Otherwise, 1 is returned. */ static int eliminate_regs_in_insn (rtx insn, int replace) { int icode = recog_memoized (insn); rtx old_body = PATTERN (insn); int insn_is_asm = asm_noperands (old_body) >= 0; rtx old_set = single_set (insn); rtx new_body; int val = 0; int i; rtx substed_operand[MAX_RECOG_OPERANDS]; rtx orig_operand[MAX_RECOG_OPERANDS]; struct elim_table *ep; rtx plus_src; if (! insn_is_asm && icode < 0) { if (GET_CODE (PATTERN (insn)) == USE || GET_CODE (PATTERN (insn)) == CLOBBER || GET_CODE (PATTERN (insn)) == ADDR_VEC || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC || GET_CODE (PATTERN (insn)) == ASM_INPUT) return 0; abort (); } if (old_set != 0 && REG_P (SET_DEST (old_set)) && REGNO (SET_DEST (old_set)) < FIRST_PSEUDO_REGISTER) { /* Check for setting an eliminable register. */ for (ep = reg_eliminate; ep < ®_eliminate[NUM_ELIMINABLE_REGS]; ep++) if (ep->from_rtx == SET_DEST (old_set) && ep->can_eliminate) { #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM /* If this is setting the frame pointer register to the hardware frame pointer register and this is an elimination that will be done (tested above), this insn is really adjusting the frame pointer downward to compensate for the adjustment done before a nonlocal goto. */ if (ep->from == FRAME_POINTER_REGNUM && ep->to == HARD_FRAME_POINTER_REGNUM) { rtx base = SET_SRC (old_set); rtx base_insn = insn; HOST_WIDE_INT offset = 0; while (base != ep->to_rtx) { rtx prev_insn, prev_set; if (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) == CONST_INT) { offset += INTVAL (XEXP (base, 1)); base = XEXP (base, 0); } else if ((prev_insn = prev_nonnote_insn (base_insn)) != 0 && (prev_set = single_set (prev_insn)) != 0 && rtx_equal_p (SET_DEST (prev_set), base)) { base = SET_SRC (prev_set); base_insn = prev_insn; } else break; } if (base == ep->to_rtx) { rtx src = plus_constant (ep->to_rtx, offset - ep->offset); new_body = old_body; if (! replace) { new_body = copy_insn (old_body); if (REG_NOTES (insn)) REG_NOTES (insn) = copy_insn_1 (REG_NOTES (insn)); } PATTERN (insn) = new_body; old_set = single_set (insn); /* First see if this insn remains valid when we make the change. If not, keep the INSN_CODE the same and let reload fit it up. */ validate_change (insn, &SET_SRC (old_set), src, 1); validate_change (insn, &SET_DEST (old_set), ep->to_rtx, 1); if (! apply_change_group ()) { SET_SRC (old_set) = src; SET_DEST (old_set) = ep->to_rtx; } val = 1; goto done; } } #endif /* In this case this insn isn't serving a useful purpose. We will delete it in reload_as_needed once we know that this elimination is, in fact, being done. If REPLACE isn't set, we can't delete this insn, but needn't process it since it won't be used unless something changes. */ if (replace) { delete_dead_insn (insn); return 1; } val = 1; goto done; } } /* We allow one special case which happens to work on all machines we currently support: a single set with the source or a REG_EQUAL note being a PLUS of an eliminable register and a constant. */ plus_src = 0; if (old_set && REG_P (SET_DEST (old_set))) { /* First see if the source is of the form (plus (reg) CST). */ if (GET_CODE (SET_SRC (old_set)) == PLUS && REG_P (XEXP (SET_SRC (old_set), 0)) && GET_CODE (XEXP (SET_SRC (old_set), 1)) == CONST_INT && REGNO (XEXP (SET_SRC (old_set), 0)) < FIRST_PSEUDO_REGISTER) plus_src = SET_SRC (old_set); else if (REG_P (SET_SRC (old_set))) { /* Otherwise, see if we have a REG_EQUAL note of the form (plus (reg) CST). */ rtx links; for (links = REG_NOTES (insn); links; links = XEXP (links, 1)) { if (REG_NOTE_KIND (links) == REG_EQUAL && GET_CODE (XEXP (links, 0)) == PLUS && REG_P (XEXP (XEXP (links, 0), 0)) && GET_CODE (XEXP (XEXP (links, 0), 1)) == CONST_INT && REGNO (XEXP (XEXP (links, 0), 0)) < FIRST_PSEUDO_REGISTER) { plus_src = XEXP (links, 0); break; } } } } if (plus_src) { rtx reg = XEXP (plus_src, 0); HOST_WIDE_INT offset = INTVAL (XEXP (plus_src, 1)); for (ep = reg_eliminate; ep < ®_eliminate[NUM_ELIMINABLE_REGS]; ep++) if (ep->from_rtx == reg && ep->can_eliminate) { offset += ep->offset; if (offset == 0) { int num_clobbers; /* We assume here that if we need a PARALLEL with CLOBBERs for this assignment, we can do with the MATCH_SCRATCHes that add_clobbers allocates. There's not much we can do if that doesn't work. */ PATTERN (insn) = gen_rtx_SET (VOIDmode, SET_DEST (old_set), ep->to_rtx); num_clobbers = 0; INSN_CODE (insn) = recog (PATTERN (insn), insn, &num_clobbers); if (num_clobbers) { rtvec vec = rtvec_alloc (num_clobbers + 1); vec->elem[0] = PATTERN (insn); PATTERN (insn) = gen_rtx_PARALLEL (VOIDmode, vec); add_clobbers (PATTERN (insn), INSN_CODE (insn)); } if (INSN_CODE (insn) < 0) abort (); } /* If we have a nonzero offset, and the source is already a simple REG, the following transformation would increase the cost of the insn by replacing a simple REG with (plus (reg sp) CST). So try only when plus_src comes from old_set proper, not REG_NOTES. */ else if (SET_SRC (old_set) == plus_src) { new_body = old_body; if (! replace) { new_body = copy_insn (old_body); if (REG_NOTES (insn)) REG_NOTES (insn) = copy_insn_1 (REG_NOTES (insn)); } PATTERN (insn) = new_body; old_set = single_set (insn); XEXP (SET_SRC (old_set), 0) = ep->to_rtx; XEXP (SET_SRC (old_set), 1) = GEN_INT (offset); } else break; val = 1; /* This can't have an effect on elimination offsets, so skip right to the end. */ goto done; } } /* Determine the effects of this insn on elimination offsets. */ elimination_effects (old_body, 0); /* Eliminate all eliminable registers occurring in operands that can be handled by reload. */ extract_insn (insn); for (i = 0; i < recog_data.n_operands; i++) { orig_operand[i] = recog_data.operand[i]; substed_operand[i] = recog_data.operand[i]; /* For an asm statement, every operand is eliminable. */ if (insn_is_asm || insn_data[icode].operand[i].eliminable) { /* Check for setting a register that we know about. */ if (recog_data.operand_type[i] != OP_IN && REG_P (orig_operand[i])) { /* If we are assigning to a register that can be eliminated, it must be as part of a PARALLEL, since the code above handles single SETs. We must indicate that we can no longer eliminate this reg. */ for (ep = reg_eliminate; ep < ®_eliminate[NUM_ELIMINABLE_REGS]; ep++) if (ep->from_rtx == orig_operand[i]) ep->can_eliminate = 0; } substed_operand[i] = eliminate_regs (recog_data.operand[i], 0, replace ? insn : NULL_RTX); if (substed_operand[i] != orig_operand[i]) val = 1; /* Terminate the search in check_eliminable_occurrences at this point. */ *recog_data.operand_loc[i] = 0; /* If an output operand changed from a REG to a MEM and INSN is an insn, write a CLOBBER insn. */ if (recog_data.operand_type[i] != OP_IN && REG_P (orig_operand[i]) && MEM_P (substed_operand[i]) && replace) emit_insn_after (gen_rtx_CLOBBER (VOIDmode, orig_operand[i]), insn); } } for (i = 0; i < recog_data.n_dups; i++) *recog_data.dup_loc[i] = *recog_data.operand_loc[(int) recog_data.dup_num[i]]; /* If any eliminable remain, they aren't eliminable anymore. */ check_eliminable_occurrences (old_body); /* Substitute the operands; the new values are in the substed_operand array. */ for (i = 0; i < recog_data.n_operands; i++) *recog_data.operand_loc[i] = substed_operand[i]; for (i = 0; i < recog_data.n_dups; i++) *recog_data.dup_loc[i] = substed_operand[(int) recog_data.dup_num[i]]; /* If we are replacing a body that was a (set X (plus Y Z)), try to re-recognize the insn. We do this in case we had a simple addition but now can do this as a load-address. This saves an insn in this common case. If re-recognition fails, the old insn code number will still be used, and some register operands may have changed into PLUS expressions. These will be handled by find_reloads by loading them into a register again. */ if (val) { /* If we aren't replacing things permanently and we changed something, make another copy to ensure that all the RTL is new. Otherwise things can go wrong if find_reload swaps commutative operands and one is inside RTL that has been copied while the other is not. */ new_body = old_body; if (! replace) { new_body = copy_insn (old_body); if (REG_NOTES (insn)) REG_NOTES (insn) = copy_insn_1 (REG_NOTES (insn)); } PATTERN (insn) = new_body; /* If we had a move insn but now we don't, rerecognize it. This will cause spurious re-recognition if the old move had a PARALLEL since the new one still will, but we can't call single_set without having put NEW_BODY into the insn and the re-recognition won't hurt in this rare case. */ /* ??? Why this huge if statement - why don't we just rerecognize the thing always? */ if (! insn_is_asm && old_set != 0 && ((REG_P (SET_SRC (old_set)) && (GET_CODE (new_body) != SET || !REG_P (SET_SRC (new_body)))) /* If this was a load from or store to memory, compare the MEM in recog_data.operand to the one in the insn. If they are not equal, then rerecognize the insn. */ || (old_set != 0 && ((MEM_P (SET_SRC (old_set)) && SET_SRC (old_set) != recog_data.operand[1]) || (MEM_P (SET_DEST (old_set)) && SET_DEST (old_set) != recog_data.operand[0]))) /* If this was an add insn before, rerecognize. */ || GET_CODE (SET_SRC (old_set)) == PLUS)) { int new_icode = recog (PATTERN (insn), insn, 0); if (new_icode < 0) INSN_CODE (insn) = icode; } } /* Restore the old body. If there were any changes to it, we made a copy of it while the changes were still in place, so we'll correctly return a modified insn below. */ if (! replace) { /* Restore the old body. */ for (i = 0; i < recog_data.n_operands; i++) *recog_data.operand_loc[i] = orig_operand[i]; for (i = 0; i < recog_data.n_dups; i++) *recog_data.dup_loc[i] = orig_operand[(int) recog_data.dup_num[i]]; } /* Update all elimination pairs to reflect the status after the current insn. The changes we make were determined by the earlier call to elimination_effects. We also detect cases where register elimination cannot be done, namely, if a register would be both changed and referenced outside a MEM in the resulting insn since such an insn is often undefined and, even if not, we cannot know what meaning will be given to it. Note that it is valid to have a register used in an address in an insn that changes it (presumably with a pre- or post-increment or decrement). If anything changes, return nonzero. */ for (ep = reg_eliminate; ep < ®_eliminate[NUM_ELIMINABLE_REGS]; ep++) { if (ep->previous_offset != ep->offset && ep->ref_outside_mem) ep->can_eliminate = 0; ep->ref_outside_mem = 0; if (ep->previous_offset != ep->offset) val = 1; } done: /* If we changed something, perform elimination in REG_NOTES. This is needed even when REPLACE is zero because a REG_DEAD note might refer to a register that we eliminate and could cause a different number of spill registers to be needed in the final reload pass than in the pre-passes. */ if (val && REG_NOTES (insn) != 0) REG_NOTES (insn) = eliminate_regs (REG_NOTES (insn), 0, REG_NOTES (insn)); return val; } /* Loop through all elimination pairs. Recalculate the number not at initial offset. Compute the maximum offset (minimum offset if the stack does not grow downward) for each elimination pair. */ static void update_eliminable_offsets (void) { struct elim_table *ep; num_not_at_initial_offset = 0; for (ep = reg_eliminate; ep < ®_eliminate[NUM_ELIMINABLE_REGS]; ep++) { ep->previous_offset = ep->offset; if (ep->can_eliminate && ep->offset != ep->initial_offset) num_not_at_initial_offset++; } } /* Given X, a SET or CLOBBER of DEST, if DEST is the target of a register replacement we currently believe is valid, mark it as not eliminable if X modifies DEST in any way other than by adding a constant integer to it. If DEST is the frame pointer, we do nothing because we assume that all assignments to the hard frame pointer are nonlocal gotos and are being done at a time when they are valid and do not disturb anything else. Some machines want to eliminate a fake argument pointer with either the frame or stack pointer. Assignments to the hard frame pointer must not prevent this elimination. Called via note_stores from reload before starting its passes to scan the insns of the function. */ static void mark_not_eliminable (rtx dest, rtx x, void *data ATTRIBUTE_UNUSED) { unsigned int i; /* A SUBREG of a hard register here is just changing its mode. We should not see a SUBREG of an eliminable hard register, but check just in case. */ if (GET_CODE (dest) == SUBREG) dest = SUBREG_REG (dest); if (dest == hard_frame_pointer_rtx) return; for (i = 0; i < NUM_ELIMINABLE_REGS; i++) if (reg_eliminate[i].can_eliminate && dest == reg_eliminate[i].to_rtx && (GET_CODE (x) != SET || GET_CODE (SET_SRC (x)) != PLUS || XEXP (SET_SRC (x), 0) != dest || GET_CODE (XEXP (SET_SRC (x), 1)) != CONST_INT)) { reg_eliminate[i].can_eliminate_previous = reg_eliminate[i].can_eliminate = 0; num_eliminable--; } } /* Verify that the initial elimination offsets did not change since the last call to set_initial_elim_offsets. This is used to catch cases where something illegal happened during reload_as_needed that could cause incorrect code to be generated if we did not check for it. */ static void verify_initial_elim_offsets (void) { HOST_WIDE_INT t; #ifdef ELIMINABLE_REGS struct elim_table *ep; for (ep = reg_eliminate; ep < ®_eliminate[NUM_ELIMINABLE_REGS]; ep++) { INITIAL_ELIMINATION_OFFSET (ep->from, ep->to, t); if (t != ep->initial_offset) abort (); } #else INITIAL_FRAME_POINTER_OFFSET (t); if (t != reg_eliminate[0].initial_offset) abort (); #endif } /* Reset all offsets on eliminable registers to their initial values. */ static void set_initial_elim_offsets (void) { struct elim_table *ep = reg_eliminate; #ifdef ELIMINABLE_REGS for (; ep < ®_eliminate[NUM_ELIMINABLE_REGS]; ep++) { INITIAL_ELIMINATION_OFFSET (ep->from, ep->to, ep->initial_offset); ep->previous_offset = ep->offset = ep->initial_offset; } #else INITIAL_FRAME_POINTER_OFFSET (ep->initial_offset); ep->previous_offset = ep->offset = ep->initial_offset; #endif num_not_at_initial_offset = 0; } /* Initialize the known label offsets. Set a known offset for each forced label to be at the initial offset of each elimination. We do this because we assume that all computed jumps occur from a location where each elimination is at its initial offset. For all other labels, show that we don't know the offsets. */ static void set_initial_label_offsets (void) { rtx x; memset (offsets_known_at, 0, num_labels); for (x = forced_labels; x; x = XEXP (x, 1)) if (XEXP (x, 0)) set_label_offsets (XEXP (x, 0), NULL_RTX, 1); } /* Set all elimination offsets to the known values for the code label given by INSN. */ static void set_offsets_for_label (rtx insn) { unsigned int i; int label_nr = CODE_LABEL_NUMBER (insn); struct elim_table *ep; num_not_at_initial_offset = 0; for (i = 0, ep = reg_eliminate; i < NUM_ELIMINABLE_REGS; ep++, i++) { ep->offset = ep->previous_offset = offsets_at[label_nr - first_label_num][i]; if (ep->can_eliminate && ep->offset != ep->initial_offset) num_not_at_initial_offset++; } } /* See if anything that happened changes which eliminations are valid. For example, on the SPARC, whether or not the frame pointer can be eliminated can depend on what registers have been used. We need not check some conditions again (such as flag_omit_frame_pointer) since they can't have changed. */ static void update_eliminables (HARD_REG_SET *pset) { int previous_frame_pointer_needed = frame_pointer_needed; struct elim_table *ep; for (ep = reg_eliminate; ep < ®_eliminate[NUM_ELIMINABLE_REGS]; ep++) if ((ep->from == HARD_FRAME_POINTER_REGNUM && FRAME_POINTER_REQUIRED) #ifdef ELIMINABLE_REGS || ! CAN_ELIMINATE (ep->from, ep->to) #endif ) ep->can_eliminate = 0; /* Look for the case where we have discovered that we can't replace register A with register B and that means that we will now be trying to replace register A with register C. This means we can no longer replace register C with register B and we need to disable such an elimination, if it exists. This occurs often with A == ap, B == sp, and C == fp. */ for (ep = reg_eliminate; ep < ®_eliminate[NUM_ELIMINABLE_REGS]; ep++) { struct elim_table *op; int new_to = -1; if (! ep->can_eliminate && ep->can_eliminate_previous) { /* Find the current elimination for ep->from, if there is a new one. */ for (op = reg_eliminate; op < ®_eliminate[NUM_ELIMINABLE_REGS]; op++) if (op->from == ep->from && op->can_eliminate) { new_to = op->to; break; } /* See if there is an elimination of NEW_TO -> EP->TO. If so, disable it. */ for (op = reg_eliminate; op < ®_eliminate[NUM_ELIMINABLE_REGS]; op++) if (op->from == new_to && op->to == ep->to) op->can_eliminate = 0; } } /* See if any registers that we thought we could eliminate the previous time are no longer eliminable. If so, something has changed and we must spill the register. Also, recompute the number of eliminable registers and see if the frame pointer is needed; it is if there is no elimination of the frame pointer that we can perform. */ frame_pointer_needed = 1; for (ep = reg_eliminate; ep < ®_eliminate[NUM_ELIMINABLE_REGS]; ep++) { if (ep->can_eliminate && ep->from == FRAME_POINTER_REGNUM && ep->to != HARD_FRAME_POINTER_REGNUM) frame_pointer_needed = 0; if (! ep->can_eliminate && ep->can_eliminate_previous) { ep->can_eliminate_previous = 0; SET_HARD_REG_BIT (*pset, ep->from); num_eliminable--; } } /* If we didn't need a frame pointer last time, but we do now, spill the hard frame pointer. */ if (frame_pointer_needed && ! previous_frame_pointer_needed) SET_HARD_REG_BIT (*pset, HARD_FRAME_POINTER_REGNUM); } /* Initialize the table of registers to eliminate. */ static void init_elim_table (void) { struct elim_table *ep; #ifdef ELIMINABLE_REGS const struct elim_table_1 *ep1; #endif if (!reg_eliminate) reg_eliminate = xcalloc (sizeof (struct elim_table), NUM_ELIMINABLE_REGS); /* Does this function require a frame pointer? */ frame_pointer_needed = (! flag_omit_frame_pointer /* ?? If EXIT_IGNORE_STACK is set, we will not save and restore sp for alloca. So we can't eliminate the frame pointer in that case. At some point, we should improve this by emitting the sp-adjusting insns for this case. */ || (current_function_calls_alloca && EXIT_IGNORE_STACK) || FRAME_POINTER_REQUIRED); num_eliminable = 0; #ifdef ELIMINABLE_REGS for (ep = reg_eliminate, ep1 = reg_eliminate_1; ep < ®_eliminate[NUM_ELIMINABLE_REGS]; ep++, ep1++) { ep->from = ep1->from; ep->to = ep1->to; ep->can_eliminate = ep->can_eliminate_previous = (CAN_ELIMINATE (ep->from, ep->to) && ! (ep->to == STACK_POINTER_REGNUM && frame_pointer_needed)); } #else reg_eliminate[0].from = reg_eliminate_1[0].from; reg_eliminate[0].to = reg_eliminate_1[0].to; reg_eliminate[0].can_eliminate = reg_eliminate[0].can_eliminate_previous = ! frame_pointer_needed; #endif /* Count the number of eliminable registers and build the FROM and TO REG rtx's. Note that code in gen_rtx_REG will cause, e.g., gen_rtx_REG (Pmode, STACK_POINTER_REGNUM) to equal stack_pointer_rtx. We depend on this. */ for (ep = reg_eliminate; ep < ®_eliminate[NUM_ELIMINABLE_REGS]; ep++) { num_eliminable += ep->can_eliminate; ep->from_rtx = gen_rtx_REG (Pmode, ep->from); ep->to_rtx = gen_rtx_REG (Pmode, ep->to); } } /* Kick all pseudos out of hard register REGNO. If CANT_ELIMINATE is nonzero, it means that we are doing this spill because we found we can't eliminate some register. In the case, no pseudos are allowed to be in the register, even if they are only in a block that doesn't require spill registers, unlike the case when we are spilling this hard reg to produce another spill register. Return nonzero if any pseudos needed to be kicked out. */ static void spill_hard_reg (unsigned int regno, int cant_eliminate) { int i; if (cant_eliminate) { SET_HARD_REG_BIT (bad_spill_regs_global, regno); regs_ever_live[regno] = 1; } /* Spill every pseudo reg that was allocated to this reg or to something that overlaps this reg. */ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++) if (reg_renumber[i] >= 0 && (unsigned int) reg_renumber[i] <= regno && ((unsigned int) reg_renumber[i] + hard_regno_nregs[(unsigned int) reg_renumber[i]] [PSEUDO_REGNO_MODE (i)] > regno)) SET_REGNO_REG_SET (&spilled_pseudos, i); } /* I'm getting weird preprocessor errors if I use IOR_HARD_REG_SET from within EXECUTE_IF_SET_IN_REG_SET. Hence this awkwardness. */ static void ior_hard_reg_set (HARD_REG_SET *set1, HARD_REG_SET *set2) { IOR_HARD_REG_SET (*set1, *set2); } /* After find_reload_regs has been run for all insn that need reloads, and/or spill_hard_regs was called, this function is used to actually spill pseudo registers and try to reallocate them. It also sets up the spill_regs array for use by choose_reload_regs. */ static int finish_spills (int global) { struct insn_chain *chain; int something_changed = 0; int i; /* Build the spill_regs array for the function. */ /* If there are some registers still to eliminate and one of the spill regs wasn't ever used before, additional stack space may have to be allocated to store this register. Thus, we may have changed the offset between the stack and frame pointers, so mark that something has changed. One might think that we need only set VAL to 1 if this is a call-used register. However, the set of registers that must be saved by the prologue is not identical to the call-used set. For example, the register used by the call insn for the return PC is a call-used register, but must be saved by the prologue. */ n_spills = 0; for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (TEST_HARD_REG_BIT (used_spill_regs, i)) { spill_reg_order[i] = n_spills; spill_regs[n_spills++] = i; if (num_eliminable && ! regs_ever_live[i]) something_changed = 1; regs_ever_live[i] = 1; } else spill_reg_order[i] = -1; EXECUTE_IF_SET_IN_REG_SET (&spilled_pseudos, FIRST_PSEUDO_REGISTER, i, { /* Record the current hard register the pseudo is allocated to in pseudo_previous_regs so we avoid reallocating it to the same hard reg in a later pass. */ if (reg_renumber[i] < 0) abort (); SET_HARD_REG_BIT (pseudo_previous_regs[i], reg_renumber[i]); /* Mark it as no longer having a hard register home. */ reg_renumber[i] = -1; /* We will need to scan everything again. */ something_changed = 1; }); /* Retry global register allocation if possible. */ if (global) { memset (pseudo_forbidden_regs, 0, max_regno * sizeof (HARD_REG_SET)); /* For every insn that needs reloads, set the registers used as spill regs in pseudo_forbidden_regs for every pseudo live across the insn. */ for (chain = insns_need_reload; chain; chain = chain->next_need_reload) { EXECUTE_IF_SET_IN_REG_SET (&chain->live_throughout, FIRST_PSEUDO_REGISTER, i, { ior_hard_reg_set (pseudo_forbidden_regs + i, &chain->used_spill_regs); }); EXECUTE_IF_SET_IN_REG_SET (&chain->dead_or_set, FIRST_PSEUDO_REGISTER, i, { ior_hard_reg_set (pseudo_forbidden_regs + i, &chain->used_spill_regs); }); } /* Retry allocating the spilled pseudos. For each reg, merge the various reg sets that indicate which hard regs can't be used, and call retry_global_alloc. We change spill_pseudos here to only contain pseudos that did not get a new hard register. */ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++) if (reg_old_renumber[i] != reg_renumber[i]) { HARD_REG_SET forbidden; COPY_HARD_REG_SET (forbidden, bad_spill_regs_global); IOR_HARD_REG_SET (forbidden, pseudo_forbidden_regs[i]); IOR_HARD_REG_SET (forbidden, pseudo_previous_regs[i]); retry_global_alloc (i, forbidden); if (reg_renumber[i] >= 0) CLEAR_REGNO_REG_SET (&spilled_pseudos, i); } } /* Fix up the register information in the insn chain. This involves deleting those of the spilled pseudos which did not get a new hard register home from the live_{before,after} sets. */ for (chain = reload_insn_chain; chain; chain = chain->next) { HARD_REG_SET used_by_pseudos; HARD_REG_SET used_by_pseudos2; AND_COMPL_REG_SET (&chain->live_throughout, &spilled_pseudos); AND_COMPL_REG_SET (&chain->dead_or_set, &spilled_pseudos); /* Mark any unallocated hard regs as available for spills. That makes inheritance work somewhat better. */ if (chain->need_reload) { REG_SET_TO_HARD_REG_SET (used_by_pseudos, &chain->live_throughout); REG_SET_TO_HARD_REG_SET (used_by_pseudos2, &chain->dead_or_set); IOR_HARD_REG_SET (used_by_pseudos, used_by_pseudos2); /* Save the old value for the sanity test below. */ COPY_HARD_REG_SET (used_by_pseudos2, chain->used_spill_regs); compute_use_by_pseudos (&used_by_pseudos, &chain->live_throughout); compute_use_by_pseudos (&used_by_pseudos, &chain->dead_or_set); COMPL_HARD_REG_SET (chain->used_spill_regs, used_by_pseudos); AND_HARD_REG_SET (chain->used_spill_regs, used_spill_regs); /* Make sure we only enlarge the set. */ GO_IF_HARD_REG_SUBSET (used_by_pseudos2, chain->used_spill_regs, ok); abort (); ok:; } } /* Let alter_reg modify the reg rtx's for the modified pseudos. */ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++) { int regno = reg_renumber[i]; if (reg_old_renumber[i] == regno) continue; alter_reg (i, reg_old_renumber[i]); reg_old_renumber[i] = regno; if (dump_file) { if (regno == -1) fprintf (dump_file, " Register %d now on stack.\n\n", i); else fprintf (dump_file, " Register %d now in %d.\n\n", i, reg_renumber[i]); } } return something_changed; } /* Find all paradoxical subregs within X and update reg_max_ref_width. */ static void scan_paradoxical_subregs (rtx x) { int i; const char *fmt; enum rtx_code code = GET_CODE (x); switch (code) { case REG: case CONST_INT: case CONST: case SYMBOL_REF: case LABEL_REF: case CONST_DOUBLE: case CONST_VECTOR: /* shouldn't happen, but just in case. */ case CC0: case PC: case USE: case CLOBBER: return; case SUBREG: if (REG_P (SUBREG_REG (x)) && GET_MODE_SIZE (GET_MODE (x)) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))) reg_max_ref_width[REGNO (SUBREG_REG (x))] = GET_MODE_SIZE (GET_MODE (x)); return; default: break; } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') scan_paradoxical_subregs (XEXP (x, i)); else if (fmt[i] == 'E') { int j; for (j = XVECLEN (x, i) - 1; j >= 0; j--) scan_paradoxical_subregs (XVECEXP (x, i, j)); } } } /* Reload pseudo-registers into hard regs around each insn as needed. Additional register load insns are output before the insn that needs it and perhaps store insns after insns that modify the reloaded pseudo reg. reg_last_reload_reg and reg_reloaded_contents keep track of which registers are already available in reload registers. We update these for the reloads that we perform, as the insns are scanned. */ static void reload_as_needed (int live_known) { struct insn_chain *chain; #if defined (AUTO_INC_DEC) int i; #endif rtx x; memset (spill_reg_rtx, 0, sizeof spill_reg_rtx); memset (spill_reg_store, 0, sizeof spill_reg_store); reg_last_reload_reg = xcalloc (max_regno, sizeof (rtx)); reg_has_output_reload = xmalloc (max_regno); CLEAR_HARD_REG_SET (reg_reloaded_valid); CLEAR_HARD_REG_SET (reg_reloaded_call_part_clobbered); set_initial_elim_offsets (); for (chain = reload_insn_chain; chain; chain = chain->next) { rtx prev = 0; rtx insn = chain->insn; rtx old_next = NEXT_INSN (insn); /* If we pass a label, copy the offsets from the label information into the current offsets of each elimination. */ if (GET_CODE (insn) == CODE_LABEL) set_offsets_for_label (insn); else if (INSN_P (insn)) { rtx oldpat = copy_rtx (PATTERN (insn)); /* If this is a USE and CLOBBER of a MEM, ensure that any references to eliminable registers have been removed. */ if ((GET_CODE (PATTERN (insn)) == USE || GET_CODE (PATTERN (insn)) == CLOBBER) && MEM_P (XEXP (PATTERN (insn), 0))) XEXP (XEXP (PATTERN (insn), 0), 0) = eliminate_regs (XEXP (XEXP (PATTERN (insn), 0), 0), GET_MODE (XEXP (PATTERN (insn), 0)), NULL_RTX); /* If we need to do register elimination processing, do so. This might delete the insn, in which case we are done. */ if ((num_eliminable || num_eliminable_invariants) && chain->need_elim) { eliminate_regs_in_insn (insn, 1); if (GET_CODE (insn) == NOTE) { update_eliminable_offsets (); continue; } } /* If need_elim is nonzero but need_reload is zero, one might think that we could simply set n_reloads to 0. However, find_reloads could have done some manipulation of the insn (such as swapping commutative operands), and these manipulations are lost during the first pass for every insn that needs register elimination. So the actions of find_reloads must be redone here. */ if (! chain->need_elim && ! chain->need_reload && ! chain->need_operand_change) n_reloads = 0; /* First find the pseudo regs that must be reloaded for this insn. This info is returned in the tables reload_... (see reload.h). Also modify the body of INSN by substituting RELOAD rtx's for those pseudo regs. */ else { memset (reg_has_output_reload, 0, max_regno); CLEAR_HARD_REG_SET (reg_is_output_reload); find_reloads (insn, 1, spill_indirect_levels, live_known, spill_reg_order); } if (n_reloads > 0) { rtx next = NEXT_INSN (insn); rtx p; prev = PREV_INSN (insn); /* Now compute which reload regs to reload them into. Perhaps reusing reload regs from previous insns, or else output load insns to reload them. Maybe output store insns too. Record the choices of reload reg in reload_reg_rtx. */ choose_reload_regs (chain); /* Merge any reloads that we didn't combine for fear of increasing the number of spill registers needed but now discover can be safely merged. */ if (SMALL_REGISTER_CLASSES) merge_assigned_reloads (insn); /* Generate the insns to reload operands into or out of their reload regs. */ emit_reload_insns (chain); /* Substitute the chosen reload regs from reload_reg_rtx into the insn's body (or perhaps into the bodies of other load and store insn that we just made for reloading and that we moved the structure into). */ subst_reloads (insn); /* If this was an ASM, make sure that all the reload insns we have generated are valid. If not, give an error and delete them. */ if (asm_noperands (PATTERN (insn)) >= 0) for (p = NEXT_INSN (prev); p != next; p = NEXT_INSN (p)) if (p != insn && INSN_P (p) && GET_CODE (PATTERN (p)) != USE && (recog_memoized (p) < 0 || (extract_insn (p), ! constrain_operands (1)))) { error_for_asm (insn, "`asm' operand requires impossible reload"); delete_insn (p); } } if (num_eliminable && chain->need_elim) update_eliminable_offsets (); /* Any previously reloaded spilled pseudo reg, stored in this insn, is no longer validly lying around to save a future reload. Note that this does not detect pseudos that were reloaded for this insn in order to be stored in (obeying register constraints). That is correct; such reload registers ARE still valid. */ note_stores (oldpat, forget_old_reloads_1, NULL); /* There may have been CLOBBER insns placed after INSN. So scan between INSN and NEXT and use them to forget old reloads. */ for (x = NEXT_INSN (insn); x != old_next; x = NEXT_INSN (x)) if (GET_CODE (x) == INSN && GET_CODE (PATTERN (x)) == CLOBBER) note_stores (PATTERN (x), forget_old_reloads_1, NULL); #ifdef AUTO_INC_DEC /* Likewise for regs altered by auto-increment in this insn. REG_INC notes have been changed by reloading: find_reloads_address_1 records substitutions for them, which have been performed by subst_reloads above. */ for (i = n_reloads - 1; i >= 0; i--) { rtx in_reg = rld[i].in_reg; if (in_reg) { enum rtx_code code = GET_CODE (in_reg); /* PRE_INC / PRE_DEC will have the reload register ending up with the same value as the stack slot, but that doesn't hold true for POST_INC / POST_DEC. Either we have to convert the memory access to a true POST_INC / POST_DEC, or we can't use the reload register for inheritance. */ if ((code == POST_INC || code == POST_DEC) && TEST_HARD_REG_BIT (reg_reloaded_valid, REGNO (rld[i].reg_rtx)) /* Make sure it is the inc/dec pseudo, and not some other (e.g. output operand) pseudo. */ && ((unsigned) reg_reloaded_contents[REGNO (rld[i].reg_rtx)] == REGNO (XEXP (in_reg, 0)))) { rtx reload_reg = rld[i].reg_rtx; enum machine_mode mode = GET_MODE (reload_reg); int n = 0; rtx p; for (p = PREV_INSN (old_next); p != prev; p = PREV_INSN (p)) { /* We really want to ignore REG_INC notes here, so use PATTERN (p) as argument to reg_set_p . */ if (reg_set_p (reload_reg, PATTERN (p))) break; n = count_occurrences (PATTERN (p), reload_reg, 0); if (! n) continue; if (n == 1) { n = validate_replace_rtx (reload_reg, gen_rtx_fmt_e (code, mode, reload_reg), p); /* We must also verify that the constraints are met after the replacement. */ extract_insn (p); if (n) n = constrain_operands (1); else break; /* If the constraints were not met, then undo the replacement. */ if (!n) { validate_replace_rtx (gen_rtx_fmt_e (code, mode, reload_reg), reload_reg, p); break; } } break; } if (n == 1) { REG_NOTES (p) = gen_rtx_EXPR_LIST (REG_INC, reload_reg, REG_NOTES (p)); /* Mark this as having an output reload so that the REG_INC processing code below won't invalidate the reload for inheritance. */ SET_HARD_REG_BIT (reg_is_output_reload, REGNO (reload_reg)); reg_has_output_reload[REGNO (XEXP (in_reg, 0))] = 1; } else forget_old_reloads_1 (XEXP (in_reg, 0), NULL_RTX, NULL); } else if ((code == PRE_INC || code == PRE_DEC) && TEST_HARD_REG_BIT (reg_reloaded_valid, REGNO (rld[i].reg_rtx)) /* Make sure it is the inc/dec pseudo, and not some other (e.g. output operand) pseudo. */ && ((unsigned) reg_reloaded_contents[REGNO (rld[i].reg_rtx)] == REGNO (XEXP (in_reg, 0)))) { SET_HARD_REG_BIT (reg_is_output_reload, REGNO (rld[i].reg_rtx)); reg_has_output_reload[REGNO (XEXP (in_reg, 0))] = 1; } } } /* If a pseudo that got a hard register is auto-incremented, we must purge records of copying it into pseudos without hard registers. */ for (x = REG_NOTES (insn); x; x = XEXP (x, 1)) if (REG_NOTE_KIND (x) == REG_INC) { /* See if this pseudo reg was reloaded in this insn. If so, its last-reload info is still valid because it is based on this insn's reload. */ for (i = 0; i < n_reloads; i++) if (rld[i].out == XEXP (x, 0)) break; if (i == n_reloads) forget_old_reloads_1 (XEXP (x, 0), NULL_RTX, NULL); } #endif } /* A reload reg's contents are unknown after a label. */ if (GET_CODE (insn) == CODE_LABEL) CLEAR_HARD_REG_SET (reg_reloaded_valid); /* Don't assume a reload reg is still good after a call insn if it is a call-used reg, or if it contains a value that will be partially clobbered by the call. */ else if (GET_CODE (insn) == CALL_INSN) { AND_COMPL_HARD_REG_SET (reg_reloaded_valid, call_used_reg_set); AND_COMPL_HARD_REG_SET (reg_reloaded_valid, reg_reloaded_call_part_clobbered); } } /* Clean up. */ free (reg_last_reload_reg); free (reg_has_output_reload); } /* Discard all record of any value reloaded from X, or reloaded in X from someplace else; unless X is an output reload reg of the current insn. X may be a hard reg (the reload reg) or it may be a pseudo reg that was reloaded from. */ static void forget_old_reloads_1 (rtx x, rtx ignored ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED) { unsigned int regno; unsigned int nr; /* note_stores does give us subregs of hard regs, subreg_regno_offset will abort if it is not a hard reg. */ while (GET_CODE (x) == SUBREG) { /* We ignore the subreg offset when calculating the regno, because we are using the entire underlying hard register below. */ x = SUBREG_REG (x); } if (!REG_P (x)) return; regno = REGNO (x); if (regno >= FIRST_PSEUDO_REGISTER) nr = 1; else { unsigned int i; nr = hard_regno_nregs[regno][GET_MODE (x)]; /* Storing into a spilled-reg invalidates its contents. This can happen if a block-local pseudo is allocated to that reg and it wasn't spilled because this block's total need is 0. Then some insn might have an optional reload and use this reg. */ for (i = 0; i < nr; i++) /* But don't do this if the reg actually serves as an output reload reg in the current instruction. */ if (n_reloads == 0 || ! TEST_HARD_REG_BIT (reg_is_output_reload, regno + i)) { CLEAR_HARD_REG_BIT (reg_reloaded_valid, regno + i); CLEAR_HARD_REG_BIT (reg_reloaded_call_part_clobbered, regno + i); spill_reg_store[regno + i] = 0; } } /* Since value of X has changed, forget any value previously copied from it. */ while (nr-- > 0) /* But don't forget a copy if this is the output reload that establishes the copy's validity. */ if (n_reloads == 0 || reg_has_output_reload[regno + nr] == 0) reg_last_reload_reg[regno + nr] = 0; } /* The following HARD_REG_SETs indicate when each hard register is used for a reload of various parts of the current insn. */ /* If reg is unavailable for all reloads. */ static HARD_REG_SET reload_reg_unavailable; /* If reg is in use as a reload reg for a RELOAD_OTHER reload. */ static HARD_REG_SET reload_reg_used; /* If reg is in use for a RELOAD_FOR_INPUT_ADDRESS reload for operand I. */ static HARD_REG_SET reload_reg_used_in_input_addr[MAX_RECOG_OPERANDS]; /* If reg is in use for a RELOAD_FOR_INPADDR_ADDRESS reload for operand I. */ static HARD_REG_SET reload_reg_used_in_inpaddr_addr[MAX_RECOG_OPERANDS]; /* If reg is in use for a RELOAD_FOR_OUTPUT_ADDRESS reload for operand I. */ static HARD_REG_SET reload_reg_used_in_output_addr[MAX_RECOG_OPERANDS]; /* If reg is in use for a RELOAD_FOR_OUTADDR_ADDRESS reload for operand I. */ static HARD_REG_SET reload_reg_used_in_outaddr_addr[MAX_RECOG_OPERANDS]; /* If reg is in use for a RELOAD_FOR_INPUT reload for operand I. */ static HARD_REG_SET reload_reg_used_in_input[MAX_RECOG_OPERANDS]; /* If reg is in use for a RELOAD_FOR_OUTPUT reload for operand I. */ static HARD_REG_SET reload_reg_used_in_output[MAX_RECOG_OPERANDS]; /* If reg is in use for a RELOAD_FOR_OPERAND_ADDRESS reload. */ static HARD_REG_SET reload_reg_used_in_op_addr; /* If reg is in use for a RELOAD_FOR_OPADDR_ADDR reload. */ static HARD_REG_SET reload_reg_used_in_op_addr_reload; /* If reg is in use for a RELOAD_FOR_INSN reload. */ static HARD_REG_SET reload_reg_used_in_insn; /* If reg is in use for a RELOAD_FOR_OTHER_ADDRESS reload. */ static HARD_REG_SET reload_reg_used_in_other_addr; /* If reg is in use as a reload reg for any sort of reload. */ static HARD_REG_SET reload_reg_used_at_all; /* If reg is use as an inherited reload. We just mark the first register in the group. */ static HARD_REG_SET reload_reg_used_for_inherit; /* Records which hard regs are used in any way, either as explicit use or by being allocated to a pseudo during any point of the current insn. */ static HARD_REG_SET reg_used_in_insn; /* Mark reg REGNO as in use for a reload of the sort spec'd by OPNUM and TYPE. MODE is used to indicate how many consecutive regs are actually used. */ static void mark_reload_reg_in_use (unsigned int regno, int opnum, enum reload_type type, enum machine_mode mode) { unsigned int nregs = hard_regno_nregs[regno][mode]; unsigned int i; for (i = regno; i < nregs + regno; i++) { switch (type) { case RELOAD_OTHER: SET_HARD_REG_BIT (reload_reg_used, i); break; case RELOAD_FOR_INPUT_ADDRESS: SET_HARD_REG_BIT (reload_reg_used_in_input_addr[opnum], i); break; case RELOAD_FOR_INPADDR_ADDRESS: SET_HARD_REG_BIT (reload_reg_used_in_inpaddr_addr[opnum], i); break; case RELOAD_FOR_OUTPUT_ADDRESS: SET_HARD_REG_BIT (reload_reg_used_in_output_addr[opnum], i); break; case RELOAD_FOR_OUTADDR_ADDRESS: SET_HARD_REG_BIT (reload_reg_used_in_outaddr_addr[opnum], i); break; case RELOAD_FOR_OPERAND_ADDRESS: SET_HARD_REG_BIT (reload_reg_used_in_op_addr, i); break; case RELOAD_FOR_OPADDR_ADDR: SET_HARD_REG_BIT (reload_reg_used_in_op_addr_reload, i); break; case RELOAD_FOR_OTHER_ADDRESS: SET_HARD_REG_BIT (reload_reg_used_in_other_addr, i); break; case RELOAD_FOR_INPUT: SET_HARD_REG_BIT (reload_reg_used_in_input[opnum], i); break; case RELOAD_FOR_OUTPUT: SET_HARD_REG_BIT (reload_reg_used_in_output[opnum], i); break; case RELOAD_FOR_INSN: SET_HARD_REG_BIT (reload_reg_used_in_insn, i); break; } SET_HARD_REG_BIT (reload_reg_used_at_all, i); } } /* Similarly, but show REGNO is no longer in use for a reload. */ static void clear_reload_reg_in_use (unsigned int regno, int opnum, enum reload_type type, enum machine_mode mode) { unsigned int nregs = hard_regno_nregs[regno][mode]; unsigned int start_regno, end_regno, r; int i; /* A complication is that for some reload types, inheritance might allow multiple reloads of the same types to share a reload register. We set check_opnum if we have to check only reloads with the same operand number, and check_any if we have to check all reloads. */ int check_opnum = 0; int check_any = 0; HARD_REG_SET *used_in_set; switch (type) { case RELOAD_OTHER: used_in_set = &reload_reg_used; break; case RELOAD_FOR_INPUT_ADDRESS: used_in_set = &reload_reg_used_in_input_addr[opnum]; break; case RELOAD_FOR_INPADDR_ADDRESS: check_opnum = 1; used_in_set = &reload_reg_used_in_inpaddr_addr[opnum]; break; case RELOAD_FOR_OUTPUT_ADDRESS: used_in_set = &reload_reg_used_in_output_addr[opnum]; break; case RELOAD_FOR_OUTADDR_ADDRESS: check_opnum = 1; used_in_set = &reload_reg_used_in_outaddr_addr[opnum]; break; case RELOAD_FOR_OPERAND_ADDRESS: used_in_set = &reload_reg_used_in_op_addr; break; case RELOAD_FOR_OPADDR_ADDR: check_any = 1; used_in_set = &reload_reg_used_in_op_addr_reload; break; case RELOAD_FOR_OTHER_ADDRESS: used_in_set = &reload_reg_used_in_other_addr; check_any = 1; break; case RELOAD_FOR_INPUT: used_in_set = &reload_reg_used_in_input[opnum]; break; case RELOAD_FOR_OUTPUT: used_in_set = &reload_reg_used_in_output[opnum]; break; case RELOAD_FOR_INSN: used_in_set = &reload_reg_used_in_insn; break; default: abort (); } /* We resolve conflicts with remaining reloads of the same type by excluding the intervals of reload registers by them from the interval of freed reload registers. Since we only keep track of one set of interval bounds, we might have to exclude somewhat more than what would be necessary if we used a HARD_REG_SET here. But this should only happen very infrequently, so there should be no reason to worry about it. */ start_regno = regno; end_regno = regno + nregs; if (check_opnum || check_any) { for (i = n_reloads - 1; i >= 0; i--) { if (rld[i].when_needed == type && (check_any || rld[i].opnum == opnum) && rld[i].reg_rtx) { unsigned int conflict_start = true_regnum (rld[i].reg_rtx); unsigned int conflict_end = (conflict_start + hard_regno_nregs[conflict_start][rld[i].mode]); /* If there is an overlap with the first to-be-freed register, adjust the interval start. */ if (conflict_start <= start_regno && conflict_end > start_regno) start_regno = conflict_end; /* Otherwise, if there is a conflict with one of the other to-be-freed registers, adjust the interval end. */ if (conflict_start > start_regno && conflict_start < end_regno) end_regno = conflict_start; } } } for (r = start_regno; r < end_regno; r++) CLEAR_HARD_REG_BIT (*used_in_set, r); } /* 1 if reg REGNO is free as a reload reg for a reload of the sort specified by OPNUM and TYPE. */ static int reload_reg_free_p (unsigned int regno, int opnum, enum reload_type type) { int i; /* In use for a RELOAD_OTHER means it's not available for anything. */ if (TEST_HARD_REG_BIT (reload_reg_used, regno) || TEST_HARD_REG_BIT (reload_reg_unavailable, regno)) return 0; switch (type) { case RELOAD_OTHER: /* In use for anything means we can't use it for RELOAD_OTHER. */ if (TEST_HARD_REG_BIT (reload_reg_used_in_other_addr, regno) || TEST_HARD_REG_BIT (reload_reg_used_in_op_addr, regno) || TEST_HARD_REG_BIT (reload_reg_used_in_op_addr_reload, regno) || TEST_HARD_REG_BIT (reload_reg_used_in_insn, regno)) return 0; for (i = 0; i < reload_n_operands; i++) if (TEST_HARD_REG_BIT (reload_reg_used_in_input_addr[i], regno) || TEST_HARD_REG_BIT (reload_reg_used_in_inpaddr_addr[i], regno) || TEST_HARD_REG_BIT (reload_reg_used_in_output_addr[i], regno) || TEST_HARD_REG_BIT (reload_reg_used_in_outaddr_addr[i], regno) || TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno) || TEST_HARD_REG_BIT (reload_reg_used_in_output[i], regno)) return 0; return 1; case RELOAD_FOR_INPUT: if (TEST_HARD_REG_BIT (reload_reg_used_in_insn, regno) || TEST_HARD_REG_BIT (reload_reg_used_in_op_addr, regno)) return 0; if (TEST_HARD_REG_BIT (reload_reg_used_in_op_addr_reload, regno)) return 0; /* If it is used for some other input, can't use it. */ for (i = 0; i < reload_n_operands; i++) if (TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno)) return 0; /* If it is used in a later operand's address, can't use it. */ for (i = opnum + 1; i < reload_n_operands; i++) if (TEST_HARD_REG_BIT (reload_reg_used_in_input_addr[i], regno) || TEST_HARD_REG_BIT (reload_reg_used_in_inpaddr_addr[i], regno)) return 0; return 1; case RELOAD_FOR_INPUT_ADDRESS: /* Can't use a register if it is used for an input address for this operand or used as an input in an earlier one. */ if (TEST_HARD_REG_BIT (reload_reg_used_in_input_addr[opnum], regno) || TEST_HARD_REG_BIT (reload_reg_used_in_inpaddr_addr[opnum], regno)) return 0; for (i = 0; i < opnum; i++) if (TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno)) return 0; return 1; case RELOAD_FOR_INPADDR_ADDRESS: /* Can't use a register if it is used for an input address for this operand or used as an input in an earlier one. */ if (TEST_HARD_REG_BIT (reload_reg_used_in_inpaddr_addr[opnum], regno)) return 0; for (i = 0; i < opnum; i++) if (TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno)) return 0; return 1; case RELOAD_FOR_OUTPUT_ADDRESS: /* Can't use a register if it is used for an output address for this operand or used as an output in this or a later operand. Note that multiple output operands are emitted in reverse order, so the conflicting ones are those with lower indices. */ if (TEST_HARD_REG_BIT (reload_reg_used_in_output_addr[opnum], regno)) return 0; for (i = 0; i <= opnum; i++) if (TEST_HARD_REG_BIT (reload_reg_used_in_output[i], regno)) return 0; return 1; case RELOAD_FOR_OUTADDR_ADDRESS: /* Can't use a register if it is used for an output address for this operand or used as an output in this or a later operand. Note that multiple output operands are emitted in reverse order, so the conflicting ones are those with lower indices. */ if (TEST_HARD_REG_BIT (reload_reg_used_in_outaddr_addr[opnum], regno)) return 0; for (i = 0; i <= opnum; i++) if (TEST_HARD_REG_BIT (reload_reg_used_in_output[i], regno)) return 0; return 1; case RELOAD_FOR_OPERAND_ADDRESS: for (i = 0; i < reload_n_operands; i++) if (TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno)) return 0; return (! TEST_HARD_REG_BIT (reload_reg_used_in_insn, regno) && ! TEST_HARD_REG_BIT (reload_reg_used_in_op_addr, regno)); case RELOAD_FOR_OPADDR_ADDR: for (i = 0; i < reload_n_operands; i++) if (TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno)) return 0; return (!TEST_HARD_REG_BIT (reload_reg_used_in_op_addr_reload, regno)); case RELOAD_FOR_OUTPUT: /* This cannot share a register with RELOAD_FOR_INSN reloads, other outputs, or an operand address for this or an earlier output. Note that multiple output operands are emitted in reverse order, so the conflicting ones are those with higher indices. */ if (TEST_HARD_REG_BIT (reload_reg_used_in_insn, regno)) return 0; for (i = 0; i < reload_n_operands; i++) if (TEST_HARD_REG_BIT (reload_reg_used_in_output[i], regno)) return 0; for (i = opnum; i < reload_n_operands; i++) if (TEST_HARD_REG_BIT (reload_reg_used_in_output_addr[i], regno) || TEST_HARD_REG_BIT (reload_reg_used_in_outaddr_addr[i], regno)) return 0; return 1; case RELOAD_FOR_INSN: for (i = 0; i < reload_n_operands; i++) if (TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno) || TEST_HARD_REG_BIT (reload_reg_used_in_output[i], regno)) return 0; return (! TEST_HARD_REG_BIT (reload_reg_used_in_insn, regno) && ! TEST_HARD_REG_BIT (reload_reg_used_in_op_addr, regno)); case RELOAD_FOR_OTHER_ADDRESS: return ! TEST_HARD_REG_BIT (reload_reg_used_in_other_addr, regno); } abort (); } /* Return 1 if the value in reload reg REGNO, as used by a reload needed for the part of the insn specified by OPNUM and TYPE, is still available in REGNO at the end of the insn. We can assume that the reload reg was already tested for availability at the time it is needed, and we should not check this again, in case the reg has already been marked in use. */ static int reload_reg_reaches_end_p (unsigned int regno, int opnum, enum reload_type type) { int i; switch (type) { case RELOAD_OTHER: /* Since a RELOAD_OTHER reload claims the reg for the entire insn, its value must reach the end. */ return 1; /* If this use is for part of the insn, its value reaches if no subsequent part uses the same register. Just like the above function, don't try to do this with lots of fallthroughs. */ case RELOAD_FOR_OTHER_ADDRESS: /* Here we check for everything else, since these don't conflict with anything else and everything comes later. */ for (i = 0; i < reload_n_operands; i++) if (TEST_HARD_REG_BIT (reload_reg_used_in_output_addr[i], regno) || TEST_HARD_REG_BIT (reload_reg_used_in_outaddr_addr[i], regno) || TEST_HARD_REG_BIT (reload_reg_used_in_output[i], regno) || TEST_HARD_REG_BIT (reload_reg_used_in_input_addr[i], regno) || TEST_HARD_REG_BIT (reload_reg_used_in_inpaddr_addr[i], regno) || TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno)) return 0; return (! TEST_HARD_REG_BIT (reload_reg_used_in_op_addr, regno) && ! TEST_HARD_REG_BIT (reload_reg_used_in_op_addr_reload, regno) && ! TEST_HARD_REG_BIT (reload_reg_used_in_insn, regno) && ! TEST_HARD_REG_BIT (reload_reg_used, regno)); case RELOAD_FOR_INPUT_ADDRESS: case RELOAD_FOR_INPADDR_ADDRESS: /* Similar, except that we check only for this and subsequent inputs and the address of only subsequent inputs and we do not need to check for RELOAD_OTHER objects since they are known not to conflict. */ for (i = opnum; i < reload_n_operands; i++) if (TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno)) return 0; for (i = opnum + 1; i < reload_n_operands; i++) if (TEST_HARD_REG_BIT (reload_reg_used_in_input_addr[i], regno) || TEST_HARD_REG_BIT (reload_reg_used_in_inpaddr_addr[i], regno)) return 0; for (i = 0; i < reload_n_operands; i++) if (TEST_HARD_REG_BIT (reload_reg_used_in_output_addr[i], regno) || TEST_HARD_REG_BIT (reload_reg_used_in_outaddr_addr[i], regno) || TEST_HARD_REG_BIT (reload_reg_used_in_output[i], regno)) return 0; if (TEST_HARD_REG_BIT (reload_reg_used_in_op_addr_reload, regno)) return 0; return (!TEST_HARD_REG_BIT (reload_reg_used_in_op_addr, regno) && !TEST_HARD_REG_BIT (reload_reg_used_in_insn, regno) && !TEST_HARD_REG_BIT (reload_reg_used, regno)); case RELOAD_FOR_INPUT: /* Similar to input address, except we start at the next operand for both input and input address and we do not check for RELOAD_FOR_OPERAND_ADDRESS and RELOAD_FOR_INSN since these would conflict. */ for (i = opnum + 1; i < reload_n_operands; i++) if (TEST_HARD_REG_BIT (reload_reg_used_in_input_addr[i], regno) || TEST_HARD_REG_BIT (reload_reg_used_in_inpaddr_addr[i], regno) || TEST_HARD_REG_BIT (reload_reg_used_in_input[i], regno)) return 0; /* ... fall through ... */ case RELOAD_FOR_OPERAND_ADDRESS: /* Check outputs and their addresses. */ for (i = 0; i < reload_n_operands; i++) if (TEST_HARD_REG_BIT (reload_reg_used_in_output_addr[i], regno) || TEST_HARD_REG_BIT (reload_reg_used_in_outaddr_addr[i], regno) || TEST_HARD_REG_BIT (reload_reg_used_in_output[i], regno)) return 0; return (!TEST_HARD_REG_BIT (reload_reg_used, regno)); case RELOAD_FOR_OPADDR_ADDR: for (i = 0; i < reload_n_operands; i++) if (TEST_HARD_REG_BIT (reload_reg_used_in_output_addr[i], regno) || TEST_HARD_REG_BIT (reload_reg_used_in_outaddr_addr[i], regno) || TEST_HARD_REG_BIT (reload_reg_used_in_output[i], regno)) return 0; return (!TEST_HARD_REG_BIT (reload_reg_used_in_op_addr, regno) && !TEST_HARD_REG_BIT (reload_reg_used_in_insn, regno) && !TEST_HARD_REG_BIT (reload_reg_used, regno)); case RELOAD_FOR_INSN: /* These conflict with other outputs with RELOAD_OTHER. So we need only check for output addresses. */ opnum = reload_n_operands; /* ... fall through ... */ case RELOAD_FOR_OUTPUT: case RELOAD_FOR_OUTPUT_ADDRESS: case RELOAD_FOR_OUTADDR_ADDRESS: /* We already know these can't conflict with a later output. So the only thing to check are later output addresses. Note that multiple output operands are emitted in reverse order, so the conflicting ones are those with lower indices. */ for (i = 0; i < opnum; i++) if (TEST_HARD_REG_BIT (reload_reg_used_in_output_addr[i], regno) || TEST_HARD_REG_BIT (reload_reg_used_in_outaddr_addr[i], regno)) return 0; return 1; } abort (); } /* Return 1 if the reloads denoted by R1 and R2 cannot share a register. Return 0 otherwise. This function uses the same algorithm as reload_reg_free_p above. */ int reloads_conflict (int r1, int r2) { enum reload_type r1_type = rld[r1].when_needed; enum reload_type r2_type = rld[r2].when_needed; int r1_opnum = rld[r1].opnum; int r2_opnum = rld[r2].opnum; /* RELOAD_OTHER conflicts with everything. */ if (r2_type == RELOAD_OTHER) return 1; /* Otherwise, check conflicts differently for each type. */ switch (r1_type) { case RELOAD_FOR_INPUT: return (r2_type == RELOAD_FOR_INSN || r2_type == RELOAD_FOR_OPERAND_ADDRESS || r2_type == RELOAD_FOR_OPADDR_ADDR || r2_type == RELOAD_FOR_INPUT || ((r2_type == RELOAD_FOR_INPUT_ADDRESS || r2_type == RELOAD_FOR_INPADDR_ADDRESS) && r2_opnum > r1_opnum)); case RELOAD_FOR_INPUT_ADDRESS: return ((r2_type == RELOAD_FOR_INPUT_ADDRESS && r1_opnum == r2_opnum) || (r2_type == RELOAD_FOR_INPUT && r2_opnum < r1_opnum)); case RELOAD_FOR_INPADDR_ADDRESS: return ((r2_type == RELOAD_FOR_INPADDR_ADDRESS && r1_opnum == r2_opnum) || (r2_type == RELOAD_FOR_INPUT && r2_opnum < r1_opnum)); case RELOAD_FOR_OUTPUT_ADDRESS: return ((r2_type == RELOAD_FOR_OUTPUT_ADDRESS && r2_opnum == r1_opnum) || (r2_type == RELOAD_FOR_OUTPUT && r2_opnum <= r1_opnum)); case RELOAD_FOR_OUTADDR_ADDRESS: return ((r2_type == RELOAD_FOR_OUTADDR_ADDRESS && r2_opnum == r1_opnum) || (r2_type == RELOAD_FOR_OUTPUT && r2_opnum <= r1_opnum)); case RELOAD_FOR_OPERAND_ADDRESS: return (r2_type == RELOAD_FOR_INPUT || r2_type == RELOAD_FOR_INSN || r2_type == RELOAD_FOR_OPERAND_ADDRESS); case RELOAD_FOR_OPADDR_ADDR: return (r2_type == RELOAD_FOR_INPUT || r2_type == RELOAD_FOR_OPADDR_ADDR); case RELOAD_FOR_OUTPUT: return (r2_type == RELOAD_FOR_INSN || r2_type == RELOAD_FOR_OUTPUT || ((r2_type == RELOAD_FOR_OUTPUT_ADDRESS || r2_type == RELOAD_FOR_OUTADDR_ADDRESS) && r2_opnum >= r1_opnum)); case RELOAD_FOR_INSN: return (r2_type == RELOAD_FOR_INPUT || r2_type == RELOAD_FOR_OUTPUT || r2_type == RELOAD_FOR_INSN || r2_type == RELOAD_FOR_OPERAND_ADDRESS); case RELOAD_FOR_OTHER_ADDRESS: return r2_type == RELOAD_FOR_OTHER_ADDRESS; case RELOAD_OTHER: return 1; default: abort (); } } /* Indexed by reload number, 1 if incoming value inherited from previous insns. */ char reload_inherited[MAX_RELOADS]; /* For an inherited reload, this is the insn the reload was inherited from, if we know it. Otherwise, this is 0. */ rtx reload_inheritance_insn[MAX_RELOADS]; /* If nonzero, this is a place to get the value of the reload, rather than using reload_in. */ rtx reload_override_in[MAX_RELOADS]; /* For each reload, the hard register number of the register used, or -1 if we did not need a register for this reload. */ int reload_spill_index[MAX_RELOADS]; /* Subroutine of free_for_value_p, used to check a single register. START_REGNO is the starting regno of the full reload register (possibly comprising multiple hard registers) that we are considering. */ static int reload_reg_free_for_value_p (int start_regno, int regno, int opnum, enum reload_type type, rtx value, rtx out, int reloadnum, int ignore_address_reloads) { int time1; /* Set if we see an input reload that must not share its reload register with any new earlyclobber, but might otherwise share the reload register with an output or input-output reload. */ int check_earlyclobber = 0; int i; int copy = 0; if (TEST_HARD_REG_BIT (reload_reg_unavailable, regno)) return 0; if (out == const0_rtx) { copy = 1; out = NULL_RTX; } /* We use some pseudo 'time' value to check if the lifetimes of the new register use would overlap with the one of a previous reload that is not read-only or uses a different value. The 'time' used doesn't have to be linear in any shape or form, just monotonic. Some reload types use different 'buckets' for each operand. So there are MAX_RECOG_OPERANDS different time values for each such reload type. We compute TIME1 as the time when the register for the prospective new reload ceases to be live, and TIME2 for each existing reload as the time when that the reload register of that reload becomes live. Where there is little to be gained by exact lifetime calculations, we just make conservative assumptions, i.e. a longer lifetime; this is done in the 'default:' cases. */ switch (type) { case RELOAD_FOR_OTHER_ADDRESS: /* RELOAD_FOR_OTHER_ADDRESS conflicts with RELOAD_OTHER reloads. */ time1 = copy ? 0 : 1; break; case RELOAD_OTHER: time1 = copy ? 1 : MAX_RECOG_OPERANDS * 5 + 5; break; /* For each input, we may have a sequence of RELOAD_FOR_INPADDR_ADDRESS, RELOAD_FOR_INPUT_ADDRESS and RELOAD_FOR_INPUT. By adding 0 / 1 / 2 , respectively, to the time values for these, we get distinct time values. To get distinct time values for each operand, we have to multiply opnum by at least three. We round that up to four because multiply by four is often cheaper. */ case RELOAD_FOR_INPADDR_ADDRESS: time1 = opnum * 4 + 2; break; case RELOAD_FOR_INPUT_ADDRESS: time1 = opnum * 4 + 3; break; case RELOAD_FOR_INPUT: /* All RELOAD_FOR_INPUT reloads remain live till the instruction executes (inclusive). */ time1 = copy ? opnum * 4 + 4 : MAX_RECOG_OPERANDS * 4 + 3; break; case RELOAD_FOR_OPADDR_ADDR: /* opnum * 4 + 4 <= (MAX_RECOG_OPERANDS - 1) * 4 + 4 == MAX_RECOG_OPERANDS * 4 */ time1 = MAX_RECOG_OPERANDS * 4 + 1; break; case RELOAD_FOR_OPERAND_ADDRESS: /* RELOAD_FOR_OPERAND_ADDRESS reloads are live even while the insn is executed. */ time1 = copy ? MAX_RECOG_OPERANDS * 4 + 2 : MAX_RECOG_OPERANDS * 4 + 3; break; case RELOAD_FOR_OUTADDR_ADDRESS: time1 = MAX_RECOG_OPERANDS * 4 + 4 + opnum; break; case RELOAD_FOR_OUTPUT_ADDRESS: time1 = MAX_RECOG_OPERANDS * 4 + 5 + opnum; break; default: time1 = MAX_RECOG_OPERANDS * 5 + 5; } for (i = 0; i < n_reloads; i++) { rtx reg = rld[i].reg_rtx; if (reg && REG_P (reg) && ((unsigned) regno - true_regnum (reg) <= hard_regno_nregs[REGNO (reg)][GET_MODE (reg)] - (unsigned) 1) && i != reloadnum) { rtx other_input = rld[i].in; /* If the other reload loads the same input value, that will not cause a conflict only if it's loading it into the same register. */ if (true_regnum (reg) != start_regno) other_input = NULL_RTX; if (! other_input || ! rtx_equal_p (other_input, value) || rld[i].out || out) { int time2; switch (rld[i].when_needed) { case RELOAD_FOR_OTHER_ADDRESS: time2 = 0; break; case RELOAD_FOR_INPADDR_ADDRESS: /* find_reloads makes sure that a RELOAD_FOR_{INP,OP,OUT}ADDR_ADDRESS reload is only used by at most one - the first - RELOAD_FOR_{INPUT,OPERAND,OUTPUT}_ADDRESS . If the address reload is inherited, the address address reload goes away, so we can ignore this conflict. */ if (type == RELOAD_FOR_INPUT_ADDRESS && reloadnum == i + 1 && ignore_address_reloads /* Unless the RELOAD_FOR_INPUT is an auto_inc expression. Then the address address is still needed to store back the new address. */ && ! rld[reloadnum].out) continue; /* Likewise, if a RELOAD_FOR_INPUT can inherit a value, its RELOAD_FOR_INPUT_ADDRESS / RELOAD_FOR_INPADDR_ADDRESS reloads go away. */ if (type == RELOAD_FOR_INPUT && opnum == rld[i].opnum && ignore_address_reloads /* Unless we are reloading an auto_inc expression. */ && ! rld[reloadnum].out) continue; time2 = rld[i].opnum * 4 + 2; break; case RELOAD_FOR_INPUT_ADDRESS: if (type == RELOAD_FOR_INPUT && opnum == rld[i].opnum && ignore_address_reloads && ! rld[reloadnum].out) continue; time2 = rld[i].opnum * 4 + 3; break; case RELOAD_FOR_INPUT: time2 = rld[i].opnum * 4 + 4; check_earlyclobber = 1; break; /* rld[i].opnum * 4 + 4 <= (MAX_RECOG_OPERAND - 1) * 4 + 4 == MAX_RECOG_OPERAND * 4 */ case RELOAD_FOR_OPADDR_ADDR: if (type == RELOAD_FOR_OPERAND_ADDRESS && reloadnum == i + 1 && ignore_address_reloads && ! rld[reloadnum].out) continue; time2 = MAX_RECOG_OPERANDS * 4 + 1; break; case RELOAD_FOR_OPERAND_ADDRESS: time2 = MAX_RECOG_OPERANDS * 4 + 2; check_earlyclobber = 1; break; case RELOAD_FOR_INSN: time2 = MAX_RECOG_OPERANDS * 4 + 3; break; case RELOAD_FOR_OUTPUT: /* All RELOAD_FOR_OUTPUT reloads become live just after the instruction is executed. */ time2 = MAX_RECOG_OPERANDS * 4 + 4; break; /* The first RELOAD_FOR_OUTADDR_ADDRESS reload conflicts with the RELOAD_FOR_OUTPUT reloads, so assign it the same time value. */ case RELOAD_FOR_OUTADDR_ADDRESS: if (type == RELOAD_FOR_OUTPUT_ADDRESS && reloadnum == i + 1 && ignore_address_reloads && ! rld[reloadnum].out) continue; time2 = MAX_RECOG_OPERANDS * 4 + 4 + rld[i].opnum; break; case RELOAD_FOR_OUTPUT_ADDRESS: time2 = MAX_RECOG_OPERANDS * 4 + 5 + rld[i].opnum; break; case RELOAD_OTHER: /* If there is no conflict in the input part, handle this like an output reload. */ if (! rld[i].in || rtx_equal_p (other_input, value)) { time2 = MAX_RECOG_OPERANDS * 4 + 4; /* Earlyclobbered outputs must conflict with inputs. */ if (earlyclobber_operand_p (rld[i].out)) time2 = MAX_RECOG_OPERANDS * 4 + 3; break; } time2 = 1; /* RELOAD_OTHER might be live beyond instruction execution, but this is not obvious when we set time2 = 1. So check here if there might be a problem with the new reload clobbering the register used by the RELOAD_OTHER. */ if (out) return 0; break; default: return 0; } if ((time1 >= time2 && (! rld[i].in || rld[i].out || ! rtx_equal_p (other_input, value))) || (out && rld[reloadnum].out_reg && time2 >= MAX_RECOG_OPERANDS * 4 + 3)) return 0; } } } /* Earlyclobbered outputs must conflict with inputs. */ if (check_earlyclobber && out && earlyclobber_operand_p (out)) return 0; return 1; } /* Return 1 if the value in reload reg REGNO, as used by a reload needed for the part of the insn specified by OPNUM and TYPE, may be used to load VALUE into it. MODE is the mode in which the register is used, this is needed to determine how many hard regs to test. Other read-only reloads with the same value do not conflict unless OUT is nonzero and these other reloads have to live while output reloads live. If OUT is CONST0_RTX, this is a special case: it means that the test should not be for using register REGNO as reload register, but for copying from register REGNO into the reload register. RELOADNUM is the number of the reload we want to load this value for; a reload does not conflict with itself. When IGNORE_ADDRESS_RELOADS is set, we can not have conflicts with reloads that load an address for the very reload we are considering. The caller has to make sure that there is no conflict with the return register. */ static int free_for_value_p (int regno, enum machine_mode mode, int opnum, enum reload_type type, rtx value, rtx out, int reloadnum, int ignore_address_reloads) { int nregs = hard_regno_nregs[regno][mode]; while (nregs-- > 0) if (! reload_reg_free_for_value_p (regno, regno + nregs, opnum, type, value, out, reloadnum, ignore_address_reloads)) return 0; return 1; } /* Return nonzero if the rtx X is invariant over the current function. */ /* ??? Actually, the places where we use this expect exactly what * is tested here, and not everything that is function invariant. In * particular, the frame pointer and arg pointer are special cased; * pic_offset_table_rtx is not, and this will cause aborts when we * go to spill these things to memory. */ static int function_invariant_p (rtx x) { if (CONSTANT_P (x)) return 1; if (x == frame_pointer_rtx || x == arg_pointer_rtx) return 1; if (GET_CODE (x) == PLUS && (XEXP (x, 0) == frame_pointer_rtx || XEXP (x, 0) == arg_pointer_rtx) && CONSTANT_P (XEXP (x, 1))) return 1; return 0; } /* Determine whether the reload reg X overlaps any rtx'es used for overriding inheritance. Return nonzero if so. */ static int conflicts_with_override (rtx x) { int i; for (i = 0; i < n_reloads; i++) if (reload_override_in[i] && reg_overlap_mentioned_p (x, reload_override_in[i])) return 1; return 0; } /* Give an error message saying we failed to find a reload for INSN, and clear out reload R. */ static void failed_reload (rtx insn, int r) { if (asm_noperands (PATTERN (insn)) < 0) /* It's the compiler's fault. */ fatal_insn ("could not find a spill register", insn); /* It's the user's fault; the operand's mode and constraint don't match. Disable this reload so we don't crash in final. */ error_for_asm (insn, "`asm' operand constraint incompatible with operand size"); rld[r].in = 0; rld[r].out = 0; rld[r].reg_rtx = 0; rld[r].optional = 1; rld[r].secondary_p = 1; } /* I is the index in SPILL_REG_RTX of the reload register we are to allocate for reload R. If it's valid, get an rtx for it. Return nonzero if successful. */ static int set_reload_reg (int i, int r) { int regno; rtx reg = spill_reg_rtx[i]; if (reg == 0 || GET_MODE (reg) != rld[r].mode) spill_reg_rtx[i] = reg = gen_rtx_REG (rld[r].mode, spill_regs[i]); regno = true_regnum (reg); /* Detect when the reload reg can't hold the reload mode. This used to be one `if', but Sequent compiler can't handle that. */ if (HARD_REGNO_MODE_OK (regno, rld[r].mode)) { enum machine_mode test_mode = VOIDmode; if (rld[r].in) test_mode = GET_MODE (rld[r].in); /* If rld[r].in has VOIDmode, it means we will load it in whatever mode the reload reg has: to wit, rld[r].mode. We have already tested that for validity. */ /* Aside from that, we need to test that the expressions to reload from or into have modes which are valid for this reload register. Otherwise the reload insns would be invalid. */ if (! (rld[r].in != 0 && test_mode != VOIDmode && ! HARD_REGNO_MODE_OK (regno, test_mode))) if (! (rld[r].out != 0 && ! HARD_REGNO_MODE_OK (regno, GET_MODE (rld[r].out)))) { /* The reg is OK. */ last_spill_reg = i; /* Mark as in use for this insn the reload regs we use for this. */ mark_reload_reg_in_use (spill_regs[i], rld[r].opnum, rld[r].when_needed, rld[r].mode); rld[r].reg_rtx = reg; reload_spill_index[r] = spill_regs[i]; return 1; } } return 0; } /* Find a spill register to use as a reload register for reload R. LAST_RELOAD is nonzero if this is the last reload for the insn being processed. Set rld[R].reg_rtx to the register allocated. We return 1 if successful, or 0 if we couldn't find a spill reg and we didn't change anything. */ static int allocate_reload_reg (struct insn_chain *chain ATTRIBUTE_UNUSED, int r, int last_reload) { int i, pass, count; /* If we put this reload ahead, thinking it is a group, then insist on finding a group. Otherwise we can grab a reg that some other reload needs. (That can happen when we have a 68000 DATA_OR_FP_REG which is a group of data regs or one fp reg.) We need not be so restrictive if there are no more reloads for this insn. ??? Really it would be nicer to have smarter handling for that kind of reg class, where a problem like this is normal. Perhaps those classes should be avoided for reloading by use of more alternatives. */ int force_group = rld[r].nregs > 1 && ! last_reload; /* If we want a single register and haven't yet found one, take any reg in the right class and not in use. If we want a consecutive group, here is where we look for it. We use two passes so we can first look for reload regs to reuse, which are already in use for other reloads in this insn, and only then use additional registers. I think that maximizing reuse is needed to make sure we don't run out of reload regs. Suppose we have three reloads, and reloads A and B can share regs. These need two regs. Suppose A and B are given different regs. That leaves none for C. */ for (pass = 0; pass < 2; pass++) { /* I is the index in spill_regs. We advance it round-robin between insns to use all spill regs equally, so that inherited reloads have a chance of leapfrogging each other. */ i = last_spill_reg; for (count = 0; count < n_spills; count++) { int class = (int) rld[r].class; int regnum; i++; if (i >= n_spills) i -= n_spills; regnum = spill_regs[i]; if ((reload_reg_free_p (regnum, rld[r].opnum, rld[r].when_needed) || (rld[r].in /* We check reload_reg_used to make sure we don't clobber the return register. */ && ! TEST_HARD_REG_BIT (reload_reg_used, regnum) && free_for_value_p (regnum, rld[r].mode, rld[r].opnum, rld[r].when_needed, rld[r].in, rld[r].out, r, 1))) && TEST_HARD_REG_BIT (reg_class_contents[class], regnum) && HARD_REGNO_MODE_OK (regnum, rld[r].mode) /* Look first for regs to share, then for unshared. But don't share regs used for inherited reloads; they are the ones we want to preserve. */ && (pass || (TEST_HARD_REG_BIT (reload_reg_used_at_all, regnum) && ! TEST_HARD_REG_BIT (reload_reg_used_for_inherit, regnum)))) { int nr = hard_regno_nregs[regnum][rld[r].mode]; /* Avoid the problem where spilling a GENERAL_OR_FP_REG (on 68000) got us two FP regs. If NR is 1, we would reject both of them. */ if (force_group) nr = rld[r].nregs; /* If we need only one reg, we have already won. */ if (nr == 1) { /* But reject a single reg if we demand a group. */ if (force_group) continue; break; } /* Otherwise check that as many consecutive regs as we need are available here. */ while (nr > 1) { int regno = regnum + nr - 1; if (!(TEST_HARD_REG_BIT (reg_class_contents[class], regno) && spill_reg_order[regno] >= 0 && reload_reg_free_p (regno, rld[r].opnum, rld[r].when_needed))) break; nr--; } if (nr == 1) break; } } /* If we found something on pass 1, omit pass 2. */ if (count < n_spills) break; } /* We should have found a spill register by now. */ if (count >= n_spills) return 0; /* I is the index in SPILL_REG_RTX of the reload register we are to allocate. Get an rtx for it and find its register number. */ return set_reload_reg (i, r); } /* Initialize all the tables needed to allocate reload registers. CHAIN is the insn currently being processed; SAVE_RELOAD_REG_RTX is the array we use to restore the reg_rtx field for every reload. */ static void choose_reload_regs_init (struct insn_chain *chain, rtx *save_reload_reg_rtx) { int i; for (i = 0; i < n_reloads; i++) rld[i].reg_rtx = save_reload_reg_rtx[i]; memset (reload_inherited, 0, MAX_RELOADS); memset (reload_inheritance_insn, 0, MAX_RELOADS * sizeof (rtx)); memset (reload_override_in, 0, MAX_RELOADS * sizeof (rtx)); CLEAR_HARD_REG_SET (reload_reg_used); CLEAR_HARD_REG_SET (reload_reg_used_at_all); CLEAR_HARD_REG_SET (reload_reg_used_in_op_addr); CLEAR_HARD_REG_SET (reload_reg_used_in_op_addr_reload); CLEAR_HARD_REG_SET (reload_reg_used_in_insn); CLEAR_HARD_REG_SET (reload_reg_used_in_other_addr); CLEAR_HARD_REG_SET (reg_used_in_insn); { HARD_REG_SET tmp; REG_SET_TO_HARD_REG_SET (tmp, &chain->live_throughout); IOR_HARD_REG_SET (reg_used_in_insn, tmp); REG_SET_TO_HARD_REG_SET (tmp, &chain->dead_or_set); IOR_HARD_REG_SET (reg_used_in_insn, tmp); compute_use_by_pseudos (®_used_in_insn, &chain->live_throughout); compute_use_by_pseudos (®_used_in_insn, &chain->dead_or_set); } for (i = 0; i < reload_n_operands; i++) { CLEAR_HARD_REG_SET (reload_reg_used_in_output[i]); CLEAR_HARD_REG_SET (reload_reg_used_in_input[i]); CLEAR_HARD_REG_SET (reload_reg_used_in_input_addr[i]); CLEAR_HARD_REG_SET (reload_reg_used_in_inpaddr_addr[i]); CLEAR_HARD_REG_SET (reload_reg_used_in_output_addr[i]); CLEAR_HARD_REG_SET (reload_reg_used_in_outaddr_addr[i]); } COMPL_HARD_REG_SET (reload_reg_unavailable, chain->used_spill_regs); CLEAR_HARD_REG_SET (reload_reg_used_for_inherit); for (i = 0; i < n_reloads; i++) /* If we have already decided to use a certain register, don't use it in another way. */ if (rld[i].reg_rtx) mark_reload_reg_in_use (REGNO (rld[i].reg_rtx), rld[i].opnum, rld[i].when_needed, rld[i].mode); } /* Assign hard reg targets for the pseudo-registers we must reload into hard regs for this insn. Also output the instructions to copy them in and out of the hard regs. For machines with register classes, we are responsible for finding a reload reg in the proper class. */ static void choose_reload_regs (struct insn_chain *chain) { rtx insn = chain->insn; int i, j; unsigned int max_group_size = 1; enum reg_class group_class = NO_REGS; int pass, win, inheritance; rtx save_reload_reg_rtx[MAX_RELOADS]; /* In order to be certain of getting the registers we need, we must sort the reloads into order of increasing register class. Then our grabbing of reload registers will parallel the process that provided the reload registers. Also note whether any of the reloads wants a consecutive group of regs. If so, record the maximum size of the group desired and what register class contains all the groups needed by this insn. */ for (j = 0; j < n_reloads; j++) { reload_order[j] = j; reload_spill_index[j] = -1; if (rld[j].nregs > 1) { max_group_size = MAX (rld[j].nregs, max_group_size); group_class = reg_class_superunion[(int) rld[j].class][(int) group_class]; } save_reload_reg_rtx[j] = rld[j].reg_rtx; } if (n_reloads > 1) qsort (reload_order, n_reloads, sizeof (short), reload_reg_class_lower); /* If -O, try first with inheritance, then turning it off. If not -O, don't do inheritance. Using inheritance when not optimizing leads to paradoxes with fp on the 68k: fp numbers (not NaNs) fail to be equal to themselves because one side of the comparison might be inherited. */ win = 0; for (inheritance = optimize > 0; inheritance >= 0; inheritance--) { choose_reload_regs_init (chain, save_reload_reg_rtx); /* Process the reloads in order of preference just found. Beyond this point, subregs can be found in reload_reg_rtx. This used to look for an existing reloaded home for all of the reloads, and only then perform any new reloads. But that could lose if the reloads were done out of reg-class order because a later reload with a looser constraint might have an old home in a register needed by an earlier reload with a tighter constraint. To solve this, we make two passes over the reloads, in the order described above. In the first pass we try to inherit a reload from a previous insn. If there is a later reload that needs a class that is a proper subset of the class being processed, we must also allocate a spill register during the first pass. Then make a second pass over the reloads to allocate any reloads that haven't been given registers yet. */ for (j = 0; j < n_reloads; j++) { int r = reload_order[j]; rtx search_equiv = NULL_RTX; /* Ignore reloads that got marked inoperative. */ if (rld[r].out == 0 && rld[r].in == 0 && ! rld[r].secondary_p) continue; /* If find_reloads chose to use reload_in or reload_out as a reload register, we don't need to chose one. Otherwise, try even if it found one since we might save an insn if we find the value lying around. Try also when reload_in is a pseudo without a hard reg. */ if (rld[r].in != 0 && rld[r].reg_rtx != 0 && (rtx_equal_p (rld[r].in, rld[r].reg_rtx) || (rtx_equal_p (rld[r].out, rld[r].reg_rtx) && !MEM_P (rld[r].in) && true_regnum (rld[r].in) < FIRST_PSEUDO_REGISTER))) continue; #if 0 /* No longer needed for correct operation. It might give better code, or might not; worth an experiment? */ /* If this is an optional reload, we can't inherit from earlier insns until we are sure that any non-optional reloads have been allocated. The following code takes advantage of the fact that optional reloads are at the end of reload_order. */ if (rld[r].optional != 0) for (i = 0; i < j; i++) if ((rld[reload_order[i]].out != 0 || rld[reload_order[i]].in != 0 || rld[reload_order[i]].secondary_p) && ! rld[reload_order[i]].optional && rld[reload_order[i]].reg_rtx == 0) allocate_reload_reg (chain, reload_order[i], 0); #endif /* First see if this pseudo is already available as reloaded for a previous insn. We cannot try to inherit for reloads that are smaller than the maximum number of registers needed for groups unless the register we would allocate cannot be used for the groups. We could check here to see if this is a secondary reload for an object that is already in a register of the desired class. This would avoid the need for the secondary reload register. But this is complex because we can't easily determine what objects might want to be loaded via this reload. So let a register be allocated here. In `emit_reload_insns' we suppress one of the loads in the case described above. */ if (inheritance) { int byte = 0; int regno = -1; enum machine_mode mode = VOIDmode; if (rld[r].in == 0) ; else if (REG_P (rld[r].in)) { regno = REGNO (rld[r].in); mode = GET_MODE (rld[r].in); } else if (REG_P (rld[r].in_reg)) { regno = REGNO (rld[r].in_reg); mode = GET_MODE (rld[r].in_reg); } else if (GET_CODE (rld[r].in_reg) == SUBREG && REG_P (SUBREG_REG (rld[r].in_reg))) { byte = SUBREG_BYTE (rld[r].in_reg); regno = REGNO (SUBREG_REG (rld[r].in_reg)); if (regno < FIRST_PSEUDO_REGISTER) regno = subreg_regno (rld[r].in_reg); mode = GET_MODE (rld[r].in_reg); } #ifdef AUTO_INC_DEC else if ((GET_CODE (rld[r].in_reg) == PRE_INC || GET_CODE (rld[r].in_reg) == PRE_DEC || GET_CODE (rld[r].in_reg) == POST_INC || GET_CODE (rld[r].in_reg) == POST_DEC) && REG_P (XEXP (rld[r].in_reg, 0))) { regno = REGNO (XEXP (rld[r].in_reg, 0)); mode = GET_MODE (XEXP (rld[r].in_reg, 0)); rld[r].out = rld[r].in; } #endif #if 0 /* This won't work, since REGNO can be a pseudo reg number. Also, it takes much more hair to keep track of all the things that can invalidate an inherited reload of part of a pseudoreg. */ else if (GET_CODE (rld[r].in) == SUBREG && REG_P (SUBREG_REG (rld[r].in))) regno = subreg_regno (rld[r].in); #endif if (regno >= 0 && reg_last_reload_reg[regno] != 0) { enum reg_class class = rld[r].class, last_class; rtx last_reg = reg_last_reload_reg[regno]; enum machine_mode need_mode; i = REGNO (last_reg); i += subreg_regno_offset (i, GET_MODE (last_reg), byte, mode); last_class = REGNO_REG_CLASS (i); if (byte == 0) need_mode = mode; else need_mode = smallest_mode_for_size (GET_MODE_SIZE (mode) + byte, GET_MODE_CLASS (mode)); if ( #ifdef CANNOT_CHANGE_MODE_CLASS (!REG_CANNOT_CHANGE_MODE_P (i, GET_MODE (last_reg), need_mode) && #endif (GET_MODE_SIZE (GET_MODE (last_reg)) >= GET_MODE_SIZE (need_mode)) #ifdef CANNOT_CHANGE_MODE_CLASS ) #endif && reg_reloaded_contents[i] == regno && TEST_HARD_REG_BIT (reg_reloaded_valid, i) && HARD_REGNO_MODE_OK (i, rld[r].mode) && (TEST_HARD_REG_BIT (reg_class_contents[(int) class], i) /* Even if we can't use this register as a reload register, we might use it for reload_override_in, if copying it to the desired class is cheap enough. */ || ((REGISTER_MOVE_COST (mode, last_class, class) < MEMORY_MOVE_COST (mode, class, 1)) #ifdef SECONDARY_INPUT_RELOAD_CLASS && (SECONDARY_INPUT_RELOAD_CLASS (class, mode, last_reg) == NO_REGS) #endif #ifdef SECONDARY_MEMORY_NEEDED && ! SECONDARY_MEMORY_NEEDED (last_class, class, mode) #endif )) && (rld[r].nregs == max_group_size || ! TEST_HARD_REG_BIT (reg_class_contents[(int) group_class], i)) && free_for_value_p (i, rld[r].mode, rld[r].opnum, rld[r].when_needed, rld[r].in, const0_rtx, r, 1)) { /* If a group is needed, verify that all the subsequent registers still have their values intact. */ int nr = hard_regno_nregs[i][rld[r].mode]; int k; for (k = 1; k < nr; k++) if (reg_reloaded_contents[i + k] != regno || ! TEST_HARD_REG_BIT (reg_reloaded_valid, i + k)) break; if (k == nr) { int i1; int bad_for_class; last_reg = (GET_MODE (last_reg) == mode ? last_reg : gen_rtx_REG (mode, i)); bad_for_class = 0; for (k = 0; k < nr; k++) bad_for_class |= ! TEST_HARD_REG_BIT (reg_class_contents[(int) rld[r].class], i+k); /* We found a register that contains the value we need. If this register is the same as an `earlyclobber' operand of the current insn, just mark it as a place to reload from since we can't use it as the reload register itself. */ for (i1 = 0; i1 < n_earlyclobbers; i1++) if (reg_overlap_mentioned_for_reload_p (reg_last_reload_reg[regno], reload_earlyclobbers[i1])) break; if (i1 != n_earlyclobbers || ! (free_for_value_p (i, rld[r].mode, rld[r].opnum, rld[r].when_needed, rld[r].in, rld[r].out, r, 1)) /* Don't use it if we'd clobber a pseudo reg. */ || (TEST_HARD_REG_BIT (reg_used_in_insn, i) && rld[r].out && ! TEST_HARD_REG_BIT (reg_reloaded_dead, i)) /* Don't clobber the frame pointer. */ || (i == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed && rld[r].out) /* Don't really use the inherited spill reg if we need it wider than we've got it. */ || (GET_MODE_SIZE (rld[r].mode) > GET_MODE_SIZE (mode)) || bad_for_class /* If find_reloads chose reload_out as reload register, stay with it - that leaves the inherited register for subsequent reloads. */ || (rld[r].out && rld[r].reg_rtx && rtx_equal_p (rld[r].out, rld[r].reg_rtx))) { if (! rld[r].optional) { reload_override_in[r] = last_reg; reload_inheritance_insn[r] = reg_reloaded_insn[i]; } } else { int k; /* We can use this as a reload reg. */ /* Mark the register as in use for this part of the insn. */ mark_reload_reg_in_use (i, rld[r].opnum, rld[r].when_needed, rld[r].mode); rld[r].reg_rtx = last_reg; reload_inherited[r] = 1; reload_inheritance_insn[r] = reg_reloaded_insn[i]; reload_spill_index[r] = i; for (k = 0; k < nr; k++) SET_HARD_REG_BIT (reload_reg_used_for_inherit, i + k); } } } } } /* Here's another way to see if the value is already lying around. */ if (inheritance && rld[r].in != 0 && ! reload_inherited[r] && rld[r].out == 0 && (CONSTANT_P (rld[r].in) || GET_CODE (rld[r].in) == PLUS || REG_P (rld[r].in) || MEM_P (rld[r].in)) && (rld[r].nregs == max_group_size || ! reg_classes_intersect_p (rld[r].class, group_class))) search_equiv = rld[r].in; /* If this is an output reload from a simple move insn, look if an equivalence for the input is available. */ else if (inheritance && rld[r].in == 0 && rld[r].out != 0) { rtx set = single_set (insn); if (set && rtx_equal_p (rld[r].out, SET_DEST (set)) && CONSTANT_P (SET_SRC (set))) search_equiv = SET_SRC (set); } if (search_equiv) { rtx equiv = find_equiv_reg (search_equiv, insn, rld[r].class, -1, NULL, 0, rld[r].mode); int regno = 0; if (equiv != 0) { if (REG_P (equiv)) regno = REGNO (equiv); else if (GET_CODE (equiv) == SUBREG) { /* This must be a SUBREG of a hard register. Make a new REG since this might be used in an address and not all machines support SUBREGs there. */ regno = subreg_regno (equiv); equiv = gen_rtx_REG (rld[r].mode, regno); } else abort (); } /* If we found a spill reg, reject it unless it is free and of the desired class. */ if (equiv != 0) { int regs_used = 0; int bad_for_class = 0; int max_regno = regno + rld[r].nregs; for (i = regno; i < max_regno; i++) { regs_used |= TEST_HARD_REG_BIT (reload_reg_used_at_all, i); bad_for_class |= ! TEST_HARD_REG_BIT (reg_class_contents[(int) rld[r].class], i); } if ((regs_used && ! free_for_value_p (regno, rld[r].mode, rld[r].opnum, rld[r].when_needed, rld[r].in, rld[r].out, r, 1)) || bad_for_class) equiv = 0; } if (equiv != 0 && ! HARD_REGNO_MODE_OK (regno, rld[r].mode)) equiv = 0; /* We found a register that contains the value we need. If this register is the same as an `earlyclobber' operand of the current insn, just mark it as a place to reload from since we can't use it as the reload register itself. */ if (equiv != 0) for (i = 0; i < n_earlyclobbers; i++) if (reg_overlap_mentioned_for_reload_p (equiv, reload_earlyclobbers[i])) { if (! rld[r].optional) reload_override_in[r] = equiv; equiv = 0; break; } /* If the equiv register we have found is explicitly clobbered in the current insn, it depends on the reload type if we can use it, use it for reload_override_in, or not at all. In particular, we then can't use EQUIV for a RELOAD_FOR_OUTPUT_ADDRESS reload. */ if (equiv != 0) { if (regno_clobbered_p (regno, insn, rld[r].mode, 0)) switch (rld[r].when_needed) { case RELOAD_FOR_OTHER_ADDRESS: case RELOAD_FOR_INPADDR_ADDRESS: case RELOAD_FOR_INPUT_ADDRESS: case RELOAD_FOR_OPADDR_ADDR: break; case RELOAD_OTHER: case RELOAD_FOR_INPUT: case RELOAD_FOR_OPERAND_ADDRESS: if (! rld[r].optional) reload_override_in[r] = equiv; /* Fall through. */ default: equiv = 0; break; } else if (regno_clobbered_p (regno, insn, rld[r].mode, 1)) switch (rld[r].when_needed) { case RELOAD_FOR_OTHER_ADDRESS: case RELOAD_FOR_INPADDR_ADDRESS: case RELOAD_FOR_INPUT_ADDRESS: case RELOAD_FOR_OPADDR_ADDR: case RELOAD_FOR_OPERAND_ADDRESS: case RELOAD_FOR_INPUT: break; case RELOAD_OTHER: if (! rld[r].optional) reload_override_in[r] = equiv; /* Fall through. */ default: equiv = 0; break; } } /* If we found an equivalent reg, say no code need be generated to load it, and use it as our reload reg. */ if (equiv != 0 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed)) { int nr = hard_regno_nregs[regno][rld[r].mode]; int k; rld[r].reg_rtx = equiv; reload_inherited[r] = 1; /* If reg_reloaded_valid is not set for this register, there might be a stale spill_reg_store lying around. We must clear it, since otherwise emit_reload_insns might delete the store. */ if (! TEST_HARD_REG_BIT (reg_reloaded_valid, regno)) spill_reg_store[regno] = NULL_RTX; /* If any of the hard registers in EQUIV are spill registers, mark them as in use for this insn. */ for (k = 0; k < nr; k++) { i = spill_reg_order[regno + k]; if (i >= 0) { mark_reload_reg_in_use (regno, rld[r].opnum, rld[r].when_needed, rld[r].mode); SET_HARD_REG_BIT (reload_reg_used_for_inherit, regno + k); } } } } /* If we found a register to use already, or if this is an optional reload, we are done. */ if (rld[r].reg_rtx != 0 || rld[r].optional != 0) continue; #if 0 /* No longer needed for correct operation. Might or might not give better code on the average. Want to experiment? */ /* See if there is a later reload that has a class different from our class that intersects our class or that requires less register than our reload. If so, we must allocate a register to this reload now, since that reload might inherit a previous reload and take the only available register in our class. Don't do this for optional reloads since they will force all previous reloads to be allocated. Also don't do this for reloads that have been turned off. */ for (i = j + 1; i < n_reloads; i++) { int s = reload_order[i]; if ((rld[s].in == 0 && rld[s].out == 0 && ! rld[s].secondary_p) || rld[s].optional) continue; if ((rld[s].class != rld[r].class && reg_classes_intersect_p (rld[r].class, rld[s].class)) || rld[s].nregs < rld[r].nregs) break; } if (i == n_reloads) continue; allocate_reload_reg (chain, r, j == n_reloads - 1); #endif } /* Now allocate reload registers for anything non-optional that didn't get one yet. */ for (j = 0; j < n_reloads; j++) { int r = reload_order[j]; /* Ignore reloads that got marked inoperative. */ if (rld[r].out == 0 && rld[r].in == 0 && ! rld[r].secondary_p) continue; /* Skip reloads that already have a register allocated or are optional. */ if (rld[r].reg_rtx != 0 || rld[r].optional) continue; if (! allocate_reload_reg (chain, r, j == n_reloads - 1)) break; } /* If that loop got all the way, we have won. */ if (j == n_reloads) { win = 1; break; } /* Loop around and try without any inheritance. */ } if (! win) { /* First undo everything done by the failed attempt to allocate with inheritance. */ choose_reload_regs_init (chain, save_reload_reg_rtx); /* Some sanity tests to verify that the reloads found in the first pass are identical to the ones we have now. */ if (chain->n_reloads != n_reloads) abort (); for (i = 0; i < n_reloads; i++) { if (chain->rld[i].regno < 0 || chain->rld[i].reg_rtx != 0) continue; if (chain->rld[i].when_needed != rld[i].when_needed) abort (); for (j = 0; j < n_spills; j++) if (spill_regs[j] == chain->rld[i].regno) if (! set_reload_reg (j, i)) failed_reload (chain->insn, i); } } /* If we thought we could inherit a reload, because it seemed that nothing else wanted the same reload register earlier in the insn, verify that assumption, now that all reloads have been assigned. Likewise for reloads where reload_override_in has been set. */ /* If doing expensive optimizations, do one preliminary pass that doesn't cancel any inheritance, but removes reloads that have been needed only for reloads that we know can be inherited. */ for (pass = flag_expensive_optimizations; pass >= 0; pass--) { for (j = 0; j < n_reloads; j++) { int r = reload_order[j]; rtx check_reg; if (reload_inherited[r] && rld[r].reg_rtx) check_reg = rld[r].reg_rtx; else if (reload_override_in[r] && (REG_P (reload_override_in[r]) || GET_CODE (reload_override_in[r]) == SUBREG)) check_reg = reload_override_in[r]; else continue; if (! free_for_value_p (true_regnum (check_reg), rld[r].mode, rld[r].opnum, rld[r].when_needed, rld[r].in, (reload_inherited[r] ? rld[r].out : const0_rtx), r, 1)) { if (pass) continue; reload_inherited[r] = 0; reload_override_in[r] = 0; } /* If we can inherit a RELOAD_FOR_INPUT, or can use a reload_override_in, then we do not need its related RELOAD_FOR_INPUT_ADDRESS / RELOAD_FOR_INPADDR_ADDRESS reloads; likewise for other reload types. We handle this by removing a reload when its only replacement is mentioned in reload_in of the reload we are going to inherit. A special case are auto_inc expressions; even if the input is inherited, we still need the address for the output. We can recognize them because they have RELOAD_OUT set to RELOAD_IN. If we succeeded removing some reload and we are doing a preliminary pass just to remove such reloads, make another pass, since the removal of one reload might allow us to inherit another one. */ else if (rld[r].in && rld[r].out != rld[r].in && remove_address_replacements (rld[r].in) && pass) pass = 2; } } /* Now that reload_override_in is known valid, actually override reload_in. */ for (j = 0; j < n_reloads; j++) if (reload_override_in[j]) rld[j].in = reload_override_in[j]; /* If this reload won't be done because it has been canceled or is optional and not inherited, clear reload_reg_rtx so other routines (such as subst_reloads) don't get confused. */ for (j = 0; j < n_reloads; j++) if (rld[j].reg_rtx != 0 && ((rld[j].optional && ! reload_inherited[j]) || (rld[j].in == 0 && rld[j].out == 0 && ! rld[j].secondary_p))) { int regno = true_regnum (rld[j].reg_rtx); if (spill_reg_order[regno] >= 0) clear_reload_reg_in_use (regno, rld[j].opnum, rld[j].when_needed, rld[j].mode); rld[j].reg_rtx = 0; reload_spill_index[j] = -1; } /* Record which pseudos and which spill regs have output reloads. */ for (j = 0; j < n_reloads; j++) { int r = reload_order[j]; i = reload_spill_index[r]; /* I is nonneg if this reload uses a register. If rld[r].reg_rtx is 0, this is an optional reload that we opted to ignore. */ if (rld[r].out_reg != 0 && REG_P (rld[r].out_reg) && rld[r].reg_rtx != 0) { int nregno = REGNO (rld[r].out_reg); int nr = 1; if (nregno < FIRST_PSEUDO_REGISTER) nr = hard_regno_nregs[nregno][rld[r].mode]; while (--nr >= 0) reg_has_output_reload[nregno + nr] = 1; if (i >= 0) { nr = hard_regno_nregs[i][rld[r].mode]; while (--nr >= 0) SET_HARD_REG_BIT (reg_is_output_reload, i + nr); } if (rld[r].when_needed != RELOAD_OTHER && rld[r].when_needed != RELOAD_FOR_OUTPUT && rld[r].when_needed != RELOAD_FOR_INSN) abort (); } } } /* Deallocate the reload register for reload R. This is called from remove_address_replacements. */ void deallocate_reload_reg (int r) { int regno; if (! rld[r].reg_rtx) return; regno = true_regnum (rld[r].reg_rtx); rld[r].reg_rtx = 0; if (spill_reg_order[regno] >= 0) clear_reload_reg_in_use (regno, rld[r].opnum, rld[r].when_needed, rld[r].mode); reload_spill_index[r] = -1; } /* If SMALL_REGISTER_CLASSES is nonzero, we may not have merged two reloads of the same item for fear that we might not have enough reload registers. However, normally they will get the same reload register and hence actually need not be loaded twice. Here we check for the most common case of this phenomenon: when we have a number of reloads for the same object, each of which were allocated the same reload_reg_rtx, that reload_reg_rtx is not used for any other reload, and is not modified in the insn itself. If we find such, merge all the reloads and set the resulting reload to RELOAD_OTHER. This will not increase the number of spill registers needed and will prevent redundant code. */ static void merge_assigned_reloads (rtx insn) { int i, j; /* Scan all the reloads looking for ones that only load values and are not already RELOAD_OTHER and ones whose reload_reg_rtx are assigned and not modified by INSN. */ for (i = 0; i < n_reloads; i++) { int conflicting_input = 0; int max_input_address_opnum = -1; int min_conflicting_input_opnum = MAX_RECOG_OPERANDS; if (rld[i].in == 0 || rld[i].when_needed == RELOAD_OTHER || rld[i].out != 0 || rld[i].reg_rtx == 0 || reg_set_p (rld[i].reg_rtx, insn)) continue; /* Look at all other reloads. Ensure that the only use of this reload_reg_rtx is in a reload that just loads the same value as we do. Note that any secondary reloads must be of the identical class since the values, modes, and result registers are the same, so we need not do anything with any secondary reloads. */ for (j = 0; j < n_reloads; j++) { if (i == j || rld[j].reg_rtx == 0 || ! reg_overlap_mentioned_p (rld[j].reg_rtx, rld[i].reg_rtx)) continue; if (rld[j].when_needed == RELOAD_FOR_INPUT_ADDRESS && rld[j].opnum > max_input_address_opnum) max_input_address_opnum = rld[j].opnum; /* If the reload regs aren't exactly the same (e.g, different modes) or if the values are different, we can't merge this reload. But if it is an input reload, we might still merge RELOAD_FOR_INPUT_ADDRESS and RELOAD_FOR_OTHER_ADDRESS reloads. */ if (! rtx_equal_p (rld[i].reg_rtx, rld[j].reg_rtx) || rld[j].out != 0 || rld[j].in == 0 || ! rtx_equal_p (rld[i].in, rld[j].in)) { if (rld[j].when_needed != RELOAD_FOR_INPUT || ((rld[i].when_needed != RELOAD_FOR_INPUT_ADDRESS || rld[i].opnum > rld[j].opnum) && rld[i].when_needed != RELOAD_FOR_OTHER_ADDRESS)) break; conflicting_input = 1; if (min_conflicting_input_opnum > rld[j].opnum) min_conflicting_input_opnum = rld[j].opnum; } } /* If all is OK, merge the reloads. Only set this to RELOAD_OTHER if we, in fact, found any matching reloads. */ if (j == n_reloads && max_input_address_opnum <= min_conflicting_input_opnum) { for (j = 0; j < n_reloads; j++) if (i != j && rld[j].reg_rtx != 0 && rtx_equal_p (rld[i].reg_rtx, rld[j].reg_rtx) && (! conflicting_input || rld[j].when_needed == RELOAD_FOR_INPUT_ADDRESS || rld[j].when_needed == RELOAD_FOR_OTHER_ADDRESS)) { rld[i].when_needed = RELOAD_OTHER; rld[j].in = 0; reload_spill_index[j] = -1; transfer_replacements (i, j); } /* If this is now RELOAD_OTHER, look for any reloads that load parts of this operand and set them to RELOAD_FOR_OTHER_ADDRESS if they were for inputs, RELOAD_OTHER for outputs. Note that this test is equivalent to looking for reloads for this operand number. */ /* We must take special care when there are two or more reloads to be merged and a RELOAD_FOR_OUTPUT_ADDRESS reload that loads the same value or a part of it; we must not change its type if there is a conflicting input. */ if (rld[i].when_needed == RELOAD_OTHER) for (j = 0; j < n_reloads; j++) if (rld[j].in != 0 && rld[j].when_needed != RELOAD_OTHER && rld[j].when_needed != RELOAD_FOR_OTHER_ADDRESS && (! conflicting_input || rld[j].when_needed == RELOAD_FOR_INPUT_ADDRESS || rld[j].when_needed == RELOAD_FOR_INPADDR_ADDRESS) && reg_overlap_mentioned_for_reload_p (rld[j].in, rld[i].in)) { int k; rld[j].when_needed = ((rld[j].when_needed == RELOAD_FOR_INPUT_ADDRESS || rld[j].when_needed == RELOAD_FOR_INPADDR_ADDRESS) ? RELOAD_FOR_OTHER_ADDRESS : RELOAD_OTHER); /* Check to see if we accidentally converted two reloads that use the same reload register with different inputs to the same type. If so, the resulting code won't work, so abort. */ if (rld[j].reg_rtx) for (k = 0; k < j; k++) if (rld[k].in != 0 && rld[k].reg_rtx != 0 && rld[k].when_needed == rld[j].when_needed && rtx_equal_p (rld[k].reg_rtx, rld[j].reg_rtx) && ! rtx_equal_p (rld[k].in, rld[j].in)) abort (); } } } } /* These arrays are filled by emit_reload_insns and its subroutines. */ static rtx input_reload_insns[MAX_RECOG_OPERANDS]; static rtx other_input_address_reload_insns = 0; static rtx other_input_reload_insns = 0; static rtx input_address_reload_insns[MAX_RECOG_OPERANDS]; static rtx inpaddr_address_reload_insns[MAX_RECOG_OPERANDS]; static rtx output_reload_insns[MAX_RECOG_OPERANDS]; static rtx output_address_reload_insns[MAX_RECOG_OPERANDS]; static rtx outaddr_address_reload_insns[MAX_RECOG_OPERANDS]; static rtx operand_reload_insns = 0; static rtx other_operand_reload_insns = 0; static rtx other_output_reload_insns[MAX_RECOG_OPERANDS]; /* Values to be put in spill_reg_store are put here first. */ static rtx new_spill_reg_store[FIRST_PSEUDO_REGISTER]; static HARD_REG_SET reg_reloaded_died; /* Generate insns to perform reload RL, which is for the insn in CHAIN and has the number J. OLD contains the value to be used as input. */ static void emit_input_reload_insns (struct insn_chain *chain, struct reload *rl, rtx old, int j) { rtx insn = chain->insn; rtx reloadreg = rl->reg_rtx; rtx oldequiv_reg = 0; rtx oldequiv = 0; int special = 0; enum machine_mode mode; rtx *where; /* Determine the mode to reload in. This is very tricky because we have three to choose from. There is the mode the insn operand wants (rl->inmode). There is the mode of the reload register RELOADREG. There is the intrinsic mode of the operand, which we could find by stripping some SUBREGs. It turns out that RELOADREG's mode is irrelevant: we can change that arbitrarily. Consider (SUBREG:SI foo:QI) as an operand that must be SImode; then the reload reg may not support QImode moves, so use SImode. If foo is in memory due to spilling a pseudo reg, this is safe, because the QImode value is in the least significant part of a slot big enough for a SImode. If foo is some other sort of memory reference, then it is impossible to reload this case, so previous passes had better make sure this never happens. Then consider a one-word union which has SImode and one of its members is a float, being fetched as (SUBREG:SF union:SI). We must fetch that as SFmode because we could be loading into a float-only register. In this case OLD's mode is correct. Consider an immediate integer: it has VOIDmode. Here we need to get a mode from something else. In some cases, there is a fourth mode, the operand's containing mode. If the insn specifies a containing mode for this operand, it overrides all others. I am not sure whether the algorithm here is always right, but it does the right things in those cases. */ mode = GET_MODE (old); if (mode == VOIDmode) mode = rl->inmode; #ifdef SECONDARY_INPUT_RELOAD_CLASS /* If we need a secondary register for this operation, see if the value is already in a register in that class. Don't do this if the secondary register will be used as a scratch register. */ if (rl->secondary_in_reload >= 0 && rl->secondary_in_icode == CODE_FOR_nothing && optimize) oldequiv = find_equiv_reg (old, insn, rld[rl->secondary_in_reload].class, -1, NULL, 0, mode); #endif /* If reloading from memory, see if there is a register that already holds the same value. If so, reload from there. We can pass 0 as the reload_reg_p argument because any other reload has either already been emitted, in which case find_equiv_reg will see the reload-insn, or has yet to be emitted, in which case it doesn't matter because we will use this equiv reg right away. */ if (oldequiv == 0 && optimize && (MEM_P (old) || (REG_P (old) && REGNO (old) >= FIRST_PSEUDO_REGISTER && reg_renumber[REGNO (old)] < 0))) oldequiv = find_equiv_reg (old, insn, ALL_REGS, -1, NULL, 0, mode); if (oldequiv) { unsigned int regno = true_regnum (oldequiv); /* Don't use OLDEQUIV if any other reload changes it at an earlier stage of this insn or at this stage. */ if (! free_for_value_p (regno, rl->mode, rl->opnum, rl->when_needed, rl->in, const0_rtx, j, 0)) oldequiv = 0; /* If it is no cheaper to copy from OLDEQUIV into the reload register than it would be to move from memory, don't use it. Likewise, if we need a secondary register or memory. */ if (oldequiv != 0 && (((enum reg_class) REGNO_REG_CLASS (regno) != rl->class && (REGISTER_MOVE_COST (mode, REGNO_REG_CLASS (regno), rl->class) >= MEMORY_MOVE_COST (mode, rl->class, 1))) #ifdef SECONDARY_INPUT_RELOAD_CLASS || (SECONDARY_INPUT_RELOAD_CLASS (rl->class, mode, oldequiv) != NO_REGS) #endif #ifdef SECONDARY_MEMORY_NEEDED || SECONDARY_MEMORY_NEEDED (REGNO_REG_CLASS (regno), rl->class, mode) #endif )) oldequiv = 0; } /* delete_output_reload is only invoked properly if old contains the original pseudo register. Since this is replaced with a hard reg when RELOAD_OVERRIDE_IN is set, see if we can find the pseudo in RELOAD_IN_REG. */ if (oldequiv == 0 && reload_override_in[j] && REG_P (rl->in_reg)) { oldequiv = old; old = rl->in_reg; } if (oldequiv == 0) oldequiv = old; else if (REG_P (oldequiv)) oldequiv_reg = oldequiv; else if (GET_CODE (oldequiv) == SUBREG) oldequiv_reg = SUBREG_REG (oldequiv); /* If we are reloading from a register that was recently stored in with an output-reload, see if we can prove there was actually no need to store the old value in it. */ if (optimize && REG_P (oldequiv) && REGNO (oldequiv) < FIRST_PSEUDO_REGISTER && spill_reg_store[REGNO (oldequiv)] && REG_P (old) && (dead_or_set_p (insn, spill_reg_stored_to[REGNO (oldequiv)]) || rtx_equal_p (spill_reg_stored_to[REGNO (oldequiv)], rl->out_reg))) delete_output_reload (insn, j, REGNO (oldequiv)); /* Encapsulate both RELOADREG and OLDEQUIV into that mode, then load RELOADREG from OLDEQUIV. Note that we cannot use gen_lowpart_common since it can do the wrong thing when RELOADREG has a multi-word mode. Note that RELOADREG must always be a REG here. */ if (GET_MODE (reloadreg) != mode) reloadreg = reload_adjust_reg_for_mode (reloadreg, mode); while (GET_CODE (oldequiv) == SUBREG && GET_MODE (oldequiv) != mode) oldequiv = SUBREG_REG (oldequiv); if (GET_MODE (oldequiv) != VOIDmode && mode != GET_MODE (oldequiv)) oldequiv = gen_lowpart_SUBREG (mode, oldequiv); /* Switch to the right place to emit the reload insns. */ switch (rl->when_needed) { case RELOAD_OTHER: where = &other_input_reload_insns; break; case RELOAD_FOR_INPUT: where = &input_reload_insns[rl->opnum]; break; case RELOAD_FOR_INPUT_ADDRESS: where = &input_address_reload_insns[rl->opnum]; break; case RELOAD_FOR_INPADDR_ADDRESS: where = &inpaddr_address_reload_insns[rl->opnum]; break; case RELOAD_FOR_OUTPUT_ADDRESS: where = &output_address_reload_insns[rl->opnum]; break; case RELOAD_FOR_OUTADDR_ADDRESS: where = &outaddr_address_reload_insns[rl->opnum]; break; case RELOAD_FOR_OPERAND_ADDRESS: where = &operand_reload_insns; break; case RELOAD_FOR_OPADDR_ADDR: where = &other_operand_reload_insns; break; case RELOAD_FOR_OTHER_ADDRESS: where = &other_input_address_reload_insns; break; default: abort (); } push_to_sequence (*where); /* Auto-increment addresses must be reloaded in a special way. */ if (rl->out && ! rl->out_reg) { /* We are not going to bother supporting the case where a incremented register can't be copied directly from OLDEQUIV since this seems highly unlikely. */ if (rl->secondary_in_reload >= 0) abort (); if (reload_inherited[j]) oldequiv = reloadreg; old = XEXP (rl->in_reg, 0); if (optimize && REG_P (oldequiv) && REGNO (oldequiv) < FIRST_PSEUDO_REGISTER && spill_reg_store[REGNO (oldequiv)] && REG_P (old) && (dead_or_set_p (insn, spill_reg_stored_to[REGNO (oldequiv)]) || rtx_equal_p (spill_reg_stored_to[REGNO (oldequiv)], old))) delete_output_reload (insn, j, REGNO (oldequiv)); /* Prevent normal processing of this reload. */ special = 1; /* Output a special code sequence for this case. */ new_spill_reg_store[REGNO (reloadreg)] = inc_for_reload (reloadreg, oldequiv, rl->out, rl->inc); } /* If we are reloading a pseudo-register that was set by the previous insn, see if we can get rid of that pseudo-register entirely by redirecting the previous insn into our reload register. */ else if (optimize && REG_P (old) && REGNO (old) >= FIRST_PSEUDO_REGISTER && dead_or_set_p (insn, old) /* This is unsafe if some other reload uses the same reg first. */ && ! conflicts_with_override (reloadreg) && free_for_value_p (REGNO (reloadreg), rl->mode, rl->opnum, rl->when_needed, old, rl->out, j, 0)) { rtx temp = PREV_INSN (insn); while (temp && GET_CODE (temp) == NOTE) temp = PREV_INSN (temp); if (temp && GET_CODE (temp) == INSN && GET_CODE (PATTERN (temp)) == SET && SET_DEST (PATTERN (temp)) == old /* Make sure we can access insn_operand_constraint. */ && asm_noperands (PATTERN (temp)) < 0 /* This is unsafe if operand occurs more than once in current insn. Perhaps some occurrences aren't reloaded. */ && count_occurrences (PATTERN (insn), old, 0) == 1) { rtx old = SET_DEST (PATTERN (temp)); /* Store into the reload register instead of the pseudo. */ SET_DEST (PATTERN (temp)) = reloadreg; /* Verify that resulting insn is valid. */ extract_insn (temp); if (constrain_operands (1)) { /* If the previous insn is an output reload, the source is a reload register, and its spill_reg_store entry will contain the previous destination. This is now invalid. */ if (REG_P (SET_SRC (PATTERN (temp))) && REGNO (SET_SRC (PATTERN (temp))) < FIRST_PSEUDO_REGISTER) { spill_reg_store[REGNO (SET_SRC (PATTERN (temp)))] = 0; spill_reg_stored_to[REGNO (SET_SRC (PATTERN (temp)))] = 0; } /* If these are the only uses of the pseudo reg, pretend for GDB it lives in the reload reg we used. */ if (REG_N_DEATHS (REGNO (old)) == 1 && REG_N_SETS (REGNO (old)) == 1) { reg_renumber[REGNO (old)] = REGNO (rl->reg_rtx); alter_reg (REGNO (old), -1); } special = 1; } else { SET_DEST (PATTERN (temp)) = old; } } } /* We can't do that, so output an insn to load RELOADREG. */ #ifdef SECONDARY_INPUT_RELOAD_CLASS /* If we have a secondary reload, pick up the secondary register and icode, if any. If OLDEQUIV and OLD are different or if this is an in-out reload, recompute whether or not we still need a secondary register and what the icode should be. If we still need a secondary register and the class or icode is different, go back to reloading from OLD if using OLDEQUIV means that we got the wrong type of register. We cannot have different class or icode due to an in-out reload because we don't make such reloads when both the input and output need secondary reload registers. */ if (! special && rl->secondary_in_reload >= 0) { rtx second_reload_reg = 0; int secondary_reload = rl->secondary_in_reload; rtx real_oldequiv = oldequiv; rtx real_old = old; rtx tmp; enum insn_code icode; /* If OLDEQUIV is a pseudo with a MEM, get the real MEM and similarly for OLD. See comments in get_secondary_reload in reload.c. */ /* If it is a pseudo that cannot be replaced with its equivalent MEM, we must fall back to reload_in, which will have all the necessary substitutions registered. Likewise for a pseudo that can't be replaced with its equivalent constant. Take extra care for subregs of such pseudos. Note that we cannot use reg_equiv_mem in this case because it is not in the right mode. */ tmp = oldequiv; if (GET_CODE (tmp) == SUBREG) tmp = SUBREG_REG (tmp); if (REG_P (tmp) && REGNO (tmp) >= FIRST_PSEUDO_REGISTER && (reg_equiv_memory_loc[REGNO (tmp)] != 0 || reg_equiv_constant[REGNO (tmp)] != 0)) { if (! reg_equiv_mem[REGNO (tmp)] || num_not_at_initial_offset || GET_CODE (oldequiv) == SUBREG) real_oldequiv = rl->in; else real_oldequiv = reg_equiv_mem[REGNO (tmp)]; } tmp = old; if (GET_CODE (tmp) == SUBREG) tmp = SUBREG_REG (tmp); if (REG_P (tmp) && REGNO (tmp) >= FIRST_PSEUDO_REGISTER && (reg_equiv_memory_loc[REGNO (tmp)] != 0 || reg_equiv_constant[REGNO (tmp)] != 0)) { if (! reg_equiv_mem[REGNO (tmp)] || num_not_at_initial_offset || GET_CODE (old) == SUBREG) real_old = rl->in; else real_old = reg_equiv_mem[REGNO (tmp)]; } second_reload_reg = rld[secondary_reload].reg_rtx; icode = rl->secondary_in_icode; if ((old != oldequiv && ! rtx_equal_p (old, oldequiv)) || (rl->in != 0 && rl->out != 0)) { enum reg_class new_class = SECONDARY_INPUT_RELOAD_CLASS (rl->class, mode, real_oldequiv); if (new_class == NO_REGS) second_reload_reg = 0; else { enum insn_code new_icode; enum machine_mode new_mode; if (! TEST_HARD_REG_BIT (reg_class_contents[(int) new_class], REGNO (second_reload_reg))) oldequiv = old, real_oldequiv = real_old; else { new_icode = reload_in_optab[(int) mode]; if (new_icode != CODE_FOR_nothing && ((insn_data[(int) new_icode].operand[0].predicate && ! ((*insn_data[(int) new_icode].operand[0].predicate) (reloadreg, mode))) || (insn_data[(int) new_icode].operand[1].predicate && ! ((*insn_data[(int) new_icode].operand[1].predicate) (real_oldequiv, mode))))) new_icode = CODE_FOR_nothing; if (new_icode == CODE_FOR_nothing) new_mode = mode; else new_mode = insn_data[(int) new_icode].operand[2].mode; if (GET_MODE (second_reload_reg) != new_mode) { if (!HARD_REGNO_MODE_OK (REGNO (second_reload_reg), new_mode)) oldequiv = old, real_oldequiv = real_old; else second_reload_reg = reload_adjust_reg_for_mode (second_reload_reg, new_mode); } } } } /* If we still need a secondary reload register, check to see if it is being used as a scratch or intermediate register and generate code appropriately. If we need a scratch register, use REAL_OLDEQUIV since the form of the insn may depend on the actual address if it is a MEM. */ if (second_reload_reg) { if (icode != CODE_FOR_nothing) { emit_insn (GEN_FCN (icode) (reloadreg, real_oldequiv, second_reload_reg)); special = 1; } else { /* See if we need a scratch register to load the intermediate register (a tertiary reload). */ enum insn_code tertiary_icode = rld[secondary_reload].secondary_in_icode; if (tertiary_icode != CODE_FOR_nothing) { rtx third_reload_reg = rld[rld[secondary_reload].secondary_in_reload].reg_rtx; emit_insn ((GEN_FCN (tertiary_icode) (second_reload_reg, real_oldequiv, third_reload_reg))); } else gen_reload (second_reload_reg, real_oldequiv, rl->opnum, rl->when_needed); oldequiv = second_reload_reg; } } } #endif if (! special && ! rtx_equal_p (reloadreg, oldequiv)) { rtx real_oldequiv = oldequiv; if ((REG_P (oldequiv) && REGNO (oldequiv) >= FIRST_PSEUDO_REGISTER && (reg_equiv_memory_loc[REGNO (oldequiv)] != 0 || reg_equiv_constant[REGNO (oldequiv)] != 0)) || (GET_CODE (oldequiv) == SUBREG && REG_P (SUBREG_REG (oldequiv)) && (REGNO (SUBREG_REG (oldequiv)) >= FIRST_PSEUDO_REGISTER) && ((reg_equiv_memory_loc [REGNO (SUBREG_REG (oldequiv))] != 0) || (reg_equiv_constant [REGNO (SUBREG_REG (oldequiv))] != 0))) || (CONSTANT_P (oldequiv) && (PREFERRED_RELOAD_CLASS (oldequiv, REGNO_REG_CLASS (REGNO (reloadreg))) == NO_REGS))) real_oldequiv = rl->in; gen_reload (reloadreg, real_oldequiv, rl->opnum, rl->when_needed); } if (flag_non_call_exceptions) copy_eh_notes (insn, get_insns ()); /* End this sequence. */ *where = get_insns (); end_sequence (); /* Update reload_override_in so that delete_address_reloads_1 can see the actual register usage. */ if (oldequiv_reg) reload_override_in[j] = oldequiv; } /* Generate insns to for the output reload RL, which is for the insn described by CHAIN and has the number J. */ static void emit_output_reload_insns (struct insn_chain *chain, struct reload *rl, int j) { rtx reloadreg = rl->reg_rtx; rtx insn = chain->insn; int special = 0; rtx old = rl->out; enum machine_mode mode = GET_MODE (old); rtx p; if (rl->when_needed == RELOAD_OTHER) start_sequence (); else push_to_sequence (output_reload_insns[rl->opnum]); /* Determine the mode to reload in. See comments above (for input reloading). */ if (mode == VOIDmode) { /* VOIDmode should never happen for an output. */ if (asm_noperands (PATTERN (insn)) < 0) /* It's the compiler's fault. */ fatal_insn ("VOIDmode on an output", insn); error_for_asm (insn, "output operand is constant in `asm'"); /* Prevent crash--use something we know is valid. */ mode = word_mode; old = gen_rtx_REG (mode, REGNO (reloadreg)); } if (GET_MODE (reloadreg) != mode) reloadreg = reload_adjust_reg_for_mode (reloadreg, mode); #ifdef SECONDARY_OUTPUT_RELOAD_CLASS /* If we need two reload regs, set RELOADREG to the intermediate one, since it will be stored into OLD. We might need a secondary register only for an input reload, so check again here. */ if (rl->secondary_out_reload >= 0) { rtx real_old = old; if (REG_P (old) && REGNO (old) >= FIRST_PSEUDO_REGISTER && reg_equiv_mem[REGNO (old)] != 0) real_old = reg_equiv_mem[REGNO (old)]; if ((SECONDARY_OUTPUT_RELOAD_CLASS (rl->class, mode, real_old) != NO_REGS)) { rtx second_reloadreg = reloadreg; reloadreg = rld[rl->secondary_out_reload].reg_rtx; /* See if RELOADREG is to be used as a scratch register or as an intermediate register. */ if (rl->secondary_out_icode != CODE_FOR_nothing) { emit_insn ((GEN_FCN (rl->secondary_out_icode) (real_old, second_reloadreg, reloadreg))); special = 1; } else { /* See if we need both a scratch and intermediate reload register. */ int secondary_reload = rl->secondary_out_reload; enum insn_code tertiary_icode = rld[secondary_reload].secondary_out_icode; if (GET_MODE (reloadreg) != mode) reloadreg = reload_adjust_reg_for_mode (reloadreg, mode); if (tertiary_icode != CODE_FOR_nothing) { rtx third_reloadreg = rld[rld[secondary_reload].secondary_out_reload].reg_rtx; rtx tem; /* Copy primary reload reg to secondary reload reg. (Note that these have been swapped above, then secondary reload reg to OLD using our insn.) */ /* If REAL_OLD is a paradoxical SUBREG, remove it and try to put the opposite SUBREG on RELOADREG. */ if (GET_CODE (real_old) == SUBREG && (GET_MODE_SIZE (GET_MODE (real_old)) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (real_old)))) && 0 != (tem = gen_lowpart_common (GET_MODE (SUBREG_REG (real_old)), reloadreg))) real_old = SUBREG_REG (real_old), reloadreg = tem; gen_reload (reloadreg, second_reloadreg, rl->opnum, rl->when_needed); emit_insn ((GEN_FCN (tertiary_icode) (real_old, reloadreg, third_reloadreg))); special = 1; } else /* Copy between the reload regs here and then to OUT later. */ gen_reload (reloadreg, second_reloadreg, rl->opnum, rl->when_needed); } } } #endif /* Output the last reload insn. */ if (! special) { rtx set; /* Don't output the last reload if OLD is not the dest of INSN and is in the src and is clobbered by INSN. */ if (! flag_expensive_optimizations || !REG_P (old) || !(set = single_set (insn)) || rtx_equal_p (old, SET_DEST (set)) || !reg_mentioned_p (old, SET_SRC (set)) || !regno_clobbered_p (REGNO (old), insn, rl->mode, 0)) gen_reload (old, reloadreg, rl->opnum, rl->when_needed); } /* Look at all insns we emitted, just to be safe. */ for (p = get_insns (); p; p = NEXT_INSN (p)) if (INSN_P (p)) { rtx pat = PATTERN (p); /* If this output reload doesn't come from a spill reg, clear any memory of reloaded copies of the pseudo reg. If this output reload comes from a spill reg, reg_has_output_reload will make this do nothing. */ note_stores (pat, forget_old_reloads_1, NULL); if (reg_mentioned_p (rl->reg_rtx, pat)) { rtx set = single_set (insn); if (reload_spill_index[j] < 0 && set && SET_SRC (set) == rl->reg_rtx) { int src = REGNO (SET_SRC (set)); reload_spill_index[j] = src; SET_HARD_REG_BIT (reg_is_output_reload, src); if (find_regno_note (insn, REG_DEAD, src)) SET_HARD_REG_BIT (reg_reloaded_died, src); } if (REGNO (rl->reg_rtx) < FIRST_PSEUDO_REGISTER) { int s = rl->secondary_out_reload; set = single_set (p); /* If this reload copies only to the secondary reload register, the secondary reload does the actual store. */ if (s >= 0 && set == NULL_RTX) /* We can't tell what function the secondary reload has and where the actual store to the pseudo is made; leave new_spill_reg_store alone. */ ; else if (s >= 0 && SET_SRC (set) == rl->reg_rtx && SET_DEST (set) == rld[s].reg_rtx) { /* Usually the next instruction will be the secondary reload insn; if we can confirm that it is, setting new_spill_reg_store to that insn will allow an extra optimization. */ rtx s_reg = rld[s].reg_rtx; rtx next = NEXT_INSN (p); rld[s].out = rl->out; rld[s].out_reg = rl->out_reg; set = single_set (next); if (set && SET_SRC (set) == s_reg && ! new_spill_reg_store[REGNO (s_reg)]) { SET_HARD_REG_BIT (reg_is_output_reload, REGNO (s_reg)); new_spill_reg_store[REGNO (s_reg)] = next; } } else new_spill_reg_store[REGNO (rl->reg_rtx)] = p; } } } if (rl->when_needed == RELOAD_OTHER) { emit_insn (other_output_reload_insns[rl->opnum]); other_output_reload_insns[rl->opnum] = get_insns (); } else output_reload_insns[rl->opnum] = get_insns (); if (flag_non_call_exceptions) copy_eh_notes (insn, get_insns ()); end_sequence (); } /* Do input reloading for reload RL, which is for the insn described by CHAIN and has the number J. */ static void do_input_reload (struct insn_chain *chain, struct reload *rl, int j) { rtx insn = chain->insn; rtx old = (rl->in && MEM_P (rl->in) ? rl->in_reg : rl->in); if (old != 0 /* AUTO_INC reloads need to be handled even if inherited. We got an AUTO_INC reload if reload_out is set but reload_out_reg isn't. */ && (! reload_inherited[j] || (rl->out && ! rl->out_reg)) && ! rtx_equal_p (rl->reg_rtx, old) && rl->reg_rtx != 0) emit_input_reload_insns (chain, rld + j, old, j); /* When inheriting a wider reload, we have a MEM in rl->in, e.g. inheriting a SImode output reload for (mem:HI (plus:SI (reg:SI 14 fp) (const_int 10))) */ if (optimize && reload_inherited[j] && rl->in && MEM_P (rl->in) && MEM_P (rl->in_reg) && reload_spill_index[j] >= 0 && TEST_HARD_REG_BIT (reg_reloaded_valid, reload_spill_index[j])) rl->in = regno_reg_rtx[reg_reloaded_contents[reload_spill_index[j]]]; /* If we are reloading a register that was recently stored in with an output-reload, see if we can prove there was actually no need to store the old value in it. */ if (optimize && (reload_inherited[j] || reload_override_in[j]) && rl->reg_rtx && REG_P (rl->reg_rtx) && spill_reg_store[REGNO (rl->reg_rtx)] != 0 #if 0 /* There doesn't seem to be any reason to restrict this to pseudos and doing so loses in the case where we are copying from a register of the wrong class. */ && (REGNO (spill_reg_stored_to[REGNO (rl->reg_rtx)]) >= FIRST_PSEUDO_REGISTER) #endif /* The insn might have already some references to stackslots replaced by MEMs, while reload_out_reg still names the original pseudo. */ && (dead_or_set_p (insn, spill_reg_stored_to[REGNO (rl->reg_rtx)]) || rtx_equal_p (spill_reg_stored_to[REGNO (rl->reg_rtx)], rl->out_reg))) delete_output_reload (insn, j, REGNO (rl->reg_rtx)); } /* Do output reloading for reload RL, which is for the insn described by CHAIN and has the number J. ??? At some point we need to support handling output reloads of JUMP_INSNs or insns that set cc0. */ static void do_output_reload (struct insn_chain *chain, struct reload *rl, int j) { rtx note, old; rtx insn = chain->insn; /* If this is an output reload that stores something that is not loaded in this same reload, see if we can eliminate a previous store. */ rtx pseudo = rl->out_reg; if (pseudo && optimize && REG_P (pseudo) && ! rtx_equal_p (rl->in_reg, pseudo) && REGNO (pseudo) >= FIRST_PSEUDO_REGISTER && reg_last_reload_reg[REGNO (pseudo)]) { int pseudo_no = REGNO (pseudo); int last_regno = REGNO (reg_last_reload_reg[pseudo_no]); /* We don't need to test full validity of last_regno for inherit here; we only want to know if the store actually matches the pseudo. */ if (TEST_HARD_REG_BIT (reg_reloaded_valid, last_regno) && reg_reloaded_contents[last_regno] == pseudo_no && spill_reg_store[last_regno] && rtx_equal_p (pseudo, spill_reg_stored_to[last_regno])) delete_output_reload (insn, j, last_regno); } old = rl->out_reg; if (old == 0 || rl->reg_rtx == old || rl->reg_rtx == 0) return; /* An output operand that dies right away does need a reload, but need not be copied from it. Show the new location in the REG_UNUSED note. */ if ((REG_P (old) || GET_CODE (old) == SCRATCH) && (note = find_reg_note (insn, REG_UNUSED, old)) != 0) { XEXP (note, 0) = rl->reg_rtx; return; } /* Likewise for a SUBREG of an operand that dies. */ else if (GET_CODE (old) == SUBREG && REG_P (SUBREG_REG (old)) && 0 != (note = find_reg_note (insn, REG_UNUSED, SUBREG_REG (old)))) { XEXP (note, 0) = gen_lowpart_common (GET_MODE (old), rl->reg_rtx); return; } else if (GET_CODE (old) == SCRATCH) /* If we aren't optimizing, there won't be a REG_UNUSED note, but we don't want to make an output reload. */ return; /* If is a JUMP_INSN, we can't support output reloads yet. */ if (GET_CODE (insn) == JUMP_INSN) abort (); emit_output_reload_insns (chain, rld + j, j); } /* Reload number R reloads from or to a group of hard registers starting at register REGNO. Return true if it can be treated for inheritance purposes like a group of reloads, each one reloading a single hard register. The caller has already checked that the spill register and REGNO use the same number of registers to store the reload value. */ static bool inherit_piecemeal_p (int r ATTRIBUTE_UNUSED, int regno ATTRIBUTE_UNUSED) { #ifdef CANNOT_CHANGE_MODE_CLASS return (!REG_CANNOT_CHANGE_MODE_P (reload_spill_index[r], GET_MODE (rld[r].reg_rtx), reg_raw_mode[reload_spill_index[r]]) && !REG_CANNOT_CHANGE_MODE_P (regno, GET_MODE (rld[r].reg_rtx), reg_raw_mode[regno])); #else return true; #endif } /* Output insns to reload values in and out of the chosen reload regs. */ static void emit_reload_insns (struct insn_chain *chain) { rtx insn = chain->insn; int j; CLEAR_HARD_REG_SET (reg_reloaded_died); for (j = 0; j < reload_n_operands; j++) input_reload_insns[j] = input_address_reload_insns[j] = inpaddr_address_reload_insns[j] = output_reload_insns[j] = output_address_reload_insns[j] = outaddr_address_reload_insns[j] = other_output_reload_insns[j] = 0; other_input_address_reload_insns = 0; other_input_reload_insns = 0; operand_reload_insns = 0; other_operand_reload_insns = 0; /* Dump reloads into the dump file. */ if (dump_file) { fprintf (dump_file, "\nReloads for insn # %d\n", INSN_UID (insn)); debug_reload_to_stream (dump_file); } /* Now output the instructions to copy the data into and out of the reload registers. Do these in the order that the reloads were reported, since reloads of base and index registers precede reloads of operands and the operands may need the base and index registers reloaded. */ for (j = 0; j < n_reloads; j++) { if (rld[j].reg_rtx && REGNO (rld[j].reg_rtx) < FIRST_PSEUDO_REGISTER) new_spill_reg_store[REGNO (rld[j].reg_rtx)] = 0; do_input_reload (chain, rld + j, j); do_output_reload (chain, rld + j, j); } /* Now write all the insns we made for reloads in the order expected by the allocation functions. Prior to the insn being reloaded, we write the following reloads: RELOAD_FOR_OTHER_ADDRESS reloads for input addresses. RELOAD_OTHER reloads. For each operand, any RELOAD_FOR_INPADDR_ADDRESS reloads followed by any RELOAD_FOR_INPUT_ADDRESS reloads followed by the RELOAD_FOR_INPUT reload for the operand. RELOAD_FOR_OPADDR_ADDRS reloads. RELOAD_FOR_OPERAND_ADDRESS reloads. After the insn being reloaded, we write the following: For each operand, any RELOAD_FOR_OUTADDR_ADDRESS reloads followed by any RELOAD_FOR_OUTPUT_ADDRESS reload followed by the RELOAD_FOR_OUTPUT reload, followed by any RELOAD_OTHER output reloads for the operand. The RELOAD_OTHER output reloads are output in descending order by reload number. */ emit_insn_before_sameloc (other_input_address_reload_insns, insn); emit_insn_before_sameloc (other_input_reload_insns, insn); for (j = 0; j < reload_n_operands; j++) { emit_insn_before_sameloc (inpaddr_address_reload_insns[j], insn); emit_insn_before_sameloc (input_address_reload_insns[j], insn); emit_insn_before_sameloc (input_reload_insns[j], insn); } emit_insn_before_sameloc (other_operand_reload_insns, insn); emit_insn_before_sameloc (operand_reload_insns, insn); for (j = 0; j < reload_n_operands; j++) { rtx x = emit_insn_after_sameloc (outaddr_address_reload_insns[j], insn); x = emit_insn_after_sameloc (output_address_reload_insns[j], x); x = emit_insn_after_sameloc (output_reload_insns[j], x); emit_insn_after_sameloc (other_output_reload_insns[j], x); } /* For all the spill regs newly reloaded in this instruction, record what they were reloaded from, so subsequent instructions can inherit the reloads. Update spill_reg_store for the reloads of this insn. Copy the elements that were updated in the loop above. */ for (j = 0; j < n_reloads; j++) { int r = reload_order[j]; int i = reload_spill_index[r]; /* If this is a non-inherited input reload from a pseudo, we must clear any memory of a previous store to the same pseudo. Only do something if there will not be an output reload for the pseudo being reloaded. */ if (rld[r].in_reg != 0 && ! (reload_inherited[r] || reload_override_in[r])) { rtx reg = rld[r].in_reg; if (GET_CODE (reg) == SUBREG) reg = SUBREG_REG (reg); if (REG_P (reg) && REGNO (reg) >= FIRST_PSEUDO_REGISTER && ! reg_has_output_reload[REGNO (reg)]) { int nregno = REGNO (reg); if (reg_last_reload_reg[nregno]) { int last_regno = REGNO (reg_last_reload_reg[nregno]); if (reg_reloaded_contents[last_regno] == nregno) spill_reg_store[last_regno] = 0; } } } /* I is nonneg if this reload used a register. If rld[r].reg_rtx is 0, this is an optional reload that we opted to ignore. */ if (i >= 0 && rld[r].reg_rtx != 0) { int nr = hard_regno_nregs[i][GET_MODE (rld[r].reg_rtx)]; int k; int part_reaches_end = 0; int all_reaches_end = 1; /* For a multi register reload, we need to check if all or part of the value lives to the end. */ for (k = 0; k < nr; k++) { if (reload_reg_reaches_end_p (i + k, rld[r].opnum, rld[r].when_needed)) part_reaches_end = 1; else all_reaches_end = 0; } /* Ignore reloads that don't reach the end of the insn in entirety. */ if (all_reaches_end) { /* First, clear out memory of what used to be in this spill reg. If consecutive registers are used, clear them all. */ for (k = 0; k < nr; k++) { CLEAR_HARD_REG_BIT (reg_reloaded_valid, i + k); CLEAR_HARD_REG_BIT (reg_reloaded_call_part_clobbered, i + k); } /* Maybe the spill reg contains a copy of reload_out. */ if (rld[r].out != 0 && (REG_P (rld[r].out) #ifdef AUTO_INC_DEC || ! rld[r].out_reg #endif || REG_P (rld[r].out_reg))) { rtx out = (REG_P (rld[r].out) ? rld[r].out : rld[r].out_reg ? rld[r].out_reg /* AUTO_INC */ : XEXP (rld[r].in_reg, 0)); int nregno = REGNO (out); int nnr = (nregno >= FIRST_PSEUDO_REGISTER ? 1 : hard_regno_nregs[nregno] [GET_MODE (rld[r].reg_rtx)]); bool piecemeal; spill_reg_store[i] = new_spill_reg_store[i]; spill_reg_stored_to[i] = out; reg_last_reload_reg[nregno] = rld[r].reg_rtx; piecemeal = (nregno < FIRST_PSEUDO_REGISTER && nr == nnr && inherit_piecemeal_p (r, nregno)); /* If NREGNO is a hard register, it may occupy more than one register. If it does, say what is in the rest of the registers assuming that both registers agree on how many words the object takes. If not, invalidate the subsequent registers. */ if (nregno < FIRST_PSEUDO_REGISTER) for (k = 1; k < nnr; k++) reg_last_reload_reg[nregno + k] = (piecemeal ? regno_reg_rtx[REGNO (rld[r].reg_rtx) + k] : 0); /* Now do the inverse operation. */ for (k = 0; k < nr; k++) { CLEAR_HARD_REG_BIT (reg_reloaded_dead, i + k); reg_reloaded_contents[i + k] = (nregno >= FIRST_PSEUDO_REGISTER || !piecemeal ? nregno : nregno + k); reg_reloaded_insn[i + k] = insn; SET_HARD_REG_BIT (reg_reloaded_valid, i + k); if (HARD_REGNO_CALL_PART_CLOBBERED (i + k, GET_MODE (out))) SET_HARD_REG_BIT (reg_reloaded_call_part_clobbered, i + k); } } /* Maybe the spill reg contains a copy of reload_in. Only do something if there will not be an output reload for the register being reloaded. */ else if (rld[r].out_reg == 0 && rld[r].in != 0 && ((REG_P (rld[r].in) && REGNO (rld[r].in) >= FIRST_PSEUDO_REGISTER && ! reg_has_output_reload[REGNO (rld[r].in)]) || (REG_P (rld[r].in_reg) && ! reg_has_output_reload[REGNO (rld[r].in_reg)])) && ! reg_set_p (rld[r].reg_rtx, PATTERN (insn))) { int nregno; int nnr; rtx in; bool piecemeal; if (REG_P (rld[r].in) && REGNO (rld[r].in) >= FIRST_PSEUDO_REGISTER) in = rld[r].in; else if (REG_P (rld[r].in_reg)) in = rld[r].in_reg; else in = XEXP (rld[r].in_reg, 0); nregno = REGNO (in); nnr = (nregno >= FIRST_PSEUDO_REGISTER ? 1 : hard_regno_nregs[nregno] [GET_MODE (rld[r].reg_rtx)]); reg_last_reload_reg[nregno] = rld[r].reg_rtx; piecemeal = (nregno < FIRST_PSEUDO_REGISTER && nr == nnr && inherit_piecemeal_p (r, nregno)); if (nregno < FIRST_PSEUDO_REGISTER) for (k = 1; k < nnr; k++) reg_last_reload_reg[nregno + k] = (piecemeal ? regno_reg_rtx[REGNO (rld[r].reg_rtx) + k] : 0); /* Unless we inherited this reload, show we haven't recently done a store. Previous stores of inherited auto_inc expressions also have to be discarded. */ if (! reload_inherited[r] || (rld[r].out && ! rld[r].out_reg)) spill_reg_store[i] = 0; for (k = 0; k < nr; k++) { CLEAR_HARD_REG_BIT (reg_reloaded_dead, i + k); reg_reloaded_contents[i + k] = (nregno >= FIRST_PSEUDO_REGISTER || !piecemeal ? nregno : nregno + k); reg_reloaded_insn[i + k] = insn; SET_HARD_REG_BIT (reg_reloaded_valid, i + k); if (HARD_REGNO_CALL_PART_CLOBBERED (i + k, GET_MODE (in))) SET_HARD_REG_BIT (reg_reloaded_call_part_clobbered, i + k); } } } /* However, if part of the reload reaches the end, then we must invalidate the old info for the part that survives to the end. */ else if (part_reaches_end) { for (k = 0; k < nr; k++) if (reload_reg_reaches_end_p (i + k, rld[r].opnum, rld[r].when_needed)) CLEAR_HARD_REG_BIT (reg_reloaded_valid, i + k); } } /* The following if-statement was #if 0'd in 1.34 (or before...). It's reenabled in 1.35 because supposedly nothing else deals with this problem. */ /* If a register gets output-reloaded from a non-spill register, that invalidates any previous reloaded copy of it. But forget_old_reloads_1 won't get to see it, because it thinks only about the original insn. So invalidate it here. */ if (i < 0 && rld[r].out != 0 && (REG_P (rld[r].out) || (MEM_P (rld[r].out) && REG_P (rld[r].out_reg)))) { rtx out = (REG_P (rld[r].out) ? rld[r].out : rld[r].out_reg); int nregno = REGNO (out); if (nregno >= FIRST_PSEUDO_REGISTER) { rtx src_reg, store_insn = NULL_RTX; reg_last_reload_reg[nregno] = 0; /* If we can find a hard register that is stored, record the storing insn so that we may delete this insn with delete_output_reload. */ src_reg = rld[r].reg_rtx; /* If this is an optional reload, try to find the source reg from an input reload. */ if (! src_reg) { rtx set = single_set (insn); if (set && SET_DEST (set) == rld[r].out) { int k; src_reg = SET_SRC (set); store_insn = insn; for (k = 0; k < n_reloads; k++) { if (rld[k].in == src_reg) { src_reg = rld[k].reg_rtx; break; } } } } else store_insn = new_spill_reg_store[REGNO (src_reg)]; if (src_reg && REG_P (src_reg) && REGNO (src_reg) < FIRST_PSEUDO_REGISTER) { int src_regno = REGNO (src_reg); int nr = hard_regno_nregs[src_regno][rld[r].mode]; /* The place where to find a death note varies with PRESERVE_DEATH_INFO_REGNO_P . The condition is not necessarily checked exactly in the code that moves notes, so just check both locations. */ rtx note = find_regno_note (insn, REG_DEAD, src_regno); if (! note && store_insn) note = find_regno_note (store_insn, REG_DEAD, src_regno); while (nr-- > 0) { spill_reg_store[src_regno + nr] = store_insn; spill_reg_stored_to[src_regno + nr] = out; reg_reloaded_contents[src_regno + nr] = nregno; reg_reloaded_insn[src_regno + nr] = store_insn; CLEAR_HARD_REG_BIT (reg_reloaded_dead, src_regno + nr); SET_HARD_REG_BIT (reg_reloaded_valid, src_regno + nr); if (HARD_REGNO_CALL_PART_CLOBBERED (src_regno + nr, GET_MODE (src_reg))) SET_HARD_REG_BIT (reg_reloaded_call_part_clobbered, src_regno + nr); SET_HARD_REG_BIT (reg_is_output_reload, src_regno + nr); if (note) SET_HARD_REG_BIT (reg_reloaded_died, src_regno); else CLEAR_HARD_REG_BIT (reg_reloaded_died, src_regno); } reg_last_reload_reg[nregno] = src_reg; /* We have to set reg_has_output_reload here, or else forget_old_reloads_1 will clear reg_last_reload_reg right away. */ reg_has_output_reload[nregno] = 1; } } else { int num_regs = hard_regno_nregs[nregno][GET_MODE (rld[r].out)]; while (num_regs-- > 0) reg_last_reload_reg[nregno + num_regs] = 0; } } } IOR_HARD_REG_SET (reg_reloaded_dead, reg_reloaded_died); } /* Emit code to perform a reload from IN (which may be a reload register) to OUT (which may also be a reload register). IN or OUT is from operand OPNUM with reload type TYPE. Returns first insn emitted. */ rtx gen_reload (rtx out, rtx in, int opnum, enum reload_type type) { rtx last = get_last_insn (); rtx tem; /* If IN is a paradoxical SUBREG, remove it and try to put the opposite SUBREG on OUT. Likewise for a paradoxical SUBREG on OUT. */ if (GET_CODE (in) == SUBREG && (GET_MODE_SIZE (GET_MODE (in)) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (in)))) && (tem = gen_lowpart_common (GET_MODE (SUBREG_REG (in)), out)) != 0) in = SUBREG_REG (in), out = tem; else if (GET_CODE (out) == SUBREG && (GET_MODE_SIZE (GET_MODE (out)) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (out)))) && (tem = gen_lowpart_common (GET_MODE (SUBREG_REG (out)), in)) != 0) out = SUBREG_REG (out), in = tem; /* How to do this reload can get quite tricky. Normally, we are being asked to reload a simple operand, such as a MEM, a constant, or a pseudo register that didn't get a hard register. In that case we can just call emit_move_insn. We can also be asked to reload a PLUS that adds a register or a MEM to another register, constant or MEM. This can occur during frame pointer elimination and while reloading addresses. This case is handled by trying to emit a single insn to perform the add. If it is not valid, we use a two insn sequence. Finally, we could be called to handle an 'o' constraint by putting an address into a register. In that case, we first try to do this with a named pattern of "reload_load_address". If no such pattern exists, we just emit a SET insn and hope for the best (it will normally be valid on machines that use 'o'). This entire process is made complex because reload will never process the insns we generate here and so we must ensure that they will fit their constraints and also by the fact that parts of IN might be being reloaded separately and replaced with spill registers. Because of this, we are, in some sense, just guessing the right approach here. The one listed above seems to work. ??? At some point, this whole thing needs to be rethought. */ if (GET_CODE (in) == PLUS && (REG_P (XEXP (in, 0)) || GET_CODE (XEXP (in, 0)) == SUBREG || MEM_P (XEXP (in, 0))) && (REG_P (XEXP (in, 1)) || GET_CODE (XEXP (in, 1)) == SUBREG || CONSTANT_P (XEXP (in, 1)) || MEM_P (XEXP (in, 1)))) { /* We need to compute the sum of a register or a MEM and another register, constant, or MEM, and put it into the reload register. The best possible way of doing this is if the machine has a three-operand ADD insn that accepts the required operands. The simplest approach is to try to generate such an insn and see if it is recognized and matches its constraints. If so, it can be used. It might be better not to actually emit the insn unless it is valid, but we need to pass the insn as an operand to `recog' and `extract_insn' and it is simpler to emit and then delete the insn if not valid than to dummy things up. */ rtx op0, op1, tem, insn; int code; op0 = find_replacement (&XEXP (in, 0)); op1 = find_replacement (&XEXP (in, 1)); /* Since constraint checking is strict, commutativity won't be checked, so we need to do that here to avoid spurious failure if the add instruction is two-address and the second operand of the add is the same as the reload reg, which is frequently the case. If the insn would be A = B + A, rearrange it so it will be A = A + B as constrain_operands expects. */ if (REG_P (XEXP (in, 1)) && REGNO (out) == REGNO (XEXP (in, 1))) tem = op0, op0 = op1, op1 = tem; if (op0 != XEXP (in, 0) || op1 != XEXP (in, 1)) in = gen_rtx_PLUS (GET_MODE (in), op0, op1); insn = emit_insn (gen_rtx_SET (VOIDmode, out, in)); code = recog_memoized (insn); if (code >= 0) { extract_insn (insn); /* We want constrain operands to treat this insn strictly in its validity determination, i.e., the way it would after reload has completed. */ if (constrain_operands (1)) return insn; } delete_insns_since (last); /* If that failed, we must use a conservative two-insn sequence. Use a move to copy one operand into the reload register. Prefer to reload a constant, MEM or pseudo since the move patterns can handle an arbitrary operand. If OP1 is not a constant, MEM or pseudo and OP1 is not a valid operand for an add instruction, then reload OP1. After reloading one of the operands into the reload register, add the reload register to the output register. If there is another way to do this for a specific machine, a DEFINE_PEEPHOLE should be specified that recognizes the sequence we emit below. */ code = (int) add_optab->handlers[(int) GET_MODE (out)].insn_code; if (CONSTANT_P (op1) || MEM_P (op1) || GET_CODE (op1) == SUBREG || (REG_P (op1) && REGNO (op1) >= FIRST_PSEUDO_REGISTER) || (code != CODE_FOR_nothing && ! ((*insn_data[code].operand[2].predicate) (op1, insn_data[code].operand[2].mode)))) tem = op0, op0 = op1, op1 = tem; gen_reload (out, op0, opnum, type); /* If OP0 and OP1 are the same, we can use OUT for OP1. This fixes a problem on the 32K where the stack pointer cannot be used as an operand of an add insn. */ if (rtx_equal_p (op0, op1)) op1 = out; insn = emit_insn (gen_add2_insn (out, op1)); /* If that failed, copy the address register to the reload register. Then add the constant to the reload register. */ code = recog_memoized (insn); if (code >= 0) { extract_insn (insn); /* We want constrain operands to treat this insn strictly in its validity determination, i.e., the way it would after reload has completed. */ if (constrain_operands (1)) { /* Add a REG_EQUIV note so that find_equiv_reg can find it. */ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUIV, in, REG_NOTES (insn)); return insn; } } delete_insns_since (last); gen_reload (out, op1, opnum, type); insn = emit_insn (gen_add2_insn (out, op0)); REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUIV, in, REG_NOTES (insn)); } #ifdef SECONDARY_MEMORY_NEEDED /* If we need a memory location to do the move, do it that way. */ else if ((REG_P (in) || GET_CODE (in) == SUBREG) && reg_or_subregno (in) < FIRST_PSEUDO_REGISTER && (REG_P (out) || GET_CODE (out) == SUBREG) && reg_or_subregno (out) < FIRST_PSEUDO_REGISTER && SECONDARY_MEMORY_NEEDED (REGNO_REG_CLASS (reg_or_subregno (in)), REGNO_REG_CLASS (reg_or_subregno (out)), GET_MODE (out))) { /* Get the memory to use and rewrite both registers to its mode. */ rtx loc = get_secondary_mem (in, GET_MODE (out), opnum, type); if (GET_MODE (loc) != GET_MODE (out)) out = gen_rtx_REG (GET_MODE (loc), REGNO (out)); if (GET_MODE (loc) != GET_MODE (in)) in = gen_rtx_REG (GET_MODE (loc), REGNO (in)); gen_reload (loc, in, opnum, type); gen_reload (out, loc, opnum, type); } #endif /* If IN is a simple operand, use gen_move_insn. */ else if (OBJECT_P (in) || GET_CODE (in) == SUBREG) emit_insn (gen_move_insn (out, in)); #ifdef HAVE_reload_load_address else if (HAVE_reload_load_address) emit_insn (gen_reload_load_address (out, in)); #endif /* Otherwise, just write (set OUT IN) and hope for the best. */ else emit_insn (gen_rtx_SET (VOIDmode, out, in)); /* Return the first insn emitted. We can not just return get_last_insn, because there may have been multiple instructions emitted. Also note that gen_move_insn may emit more than one insn itself, so we can not assume that there is one insn emitted per emit_insn_before call. */ return last ? NEXT_INSN (last) : get_insns (); } /* Delete a previously made output-reload whose result we now believe is not needed. First we double-check. INSN is the insn now being processed. LAST_RELOAD_REG is the hard register number for which we want to delete the last output reload. J is the reload-number that originally used REG. The caller has made certain that reload J doesn't use REG any longer for input. */ static void delete_output_reload (rtx insn, int j, int last_reload_reg) { rtx output_reload_insn = spill_reg_store[last_reload_reg]; rtx reg = spill_reg_stored_to[last_reload_reg]; int k; int n_occurrences; int n_inherited = 0; rtx i1; rtx substed; /* It is possible that this reload has been only used to set another reload we eliminated earlier and thus deleted this instruction too. */ if (INSN_DELETED_P (output_reload_insn)) return; /* Get the raw pseudo-register referred to. */ while (GET_CODE (reg) == SUBREG) reg = SUBREG_REG (reg); substed = reg_equiv_memory_loc[REGNO (reg)]; /* This is unsafe if the operand occurs more often in the current insn than it is inherited. */ for (k = n_reloads - 1; k >= 0; k--) { rtx reg2 = rld[k].in; if (! reg2) continue; if (MEM_P (reg2) || reload_override_in[k]) reg2 = rld[k].in_reg; #ifdef AUTO_INC_DEC if (rld[k].out && ! rld[k].out_reg) reg2 = XEXP (rld[k].in_reg, 0); #endif while (GET_CODE (reg2) == SUBREG) reg2 = SUBREG_REG (reg2); if (rtx_equal_p (reg2, reg)) { if (reload_inherited[k] || reload_override_in[k] || k == j) { n_inherited++; reg2 = rld[k].out_reg; if (! reg2) continue; while (GET_CODE (reg2) == SUBREG) reg2 = XEXP (reg2, 0); if (rtx_equal_p (reg2, reg)) n_inherited++; } else return; } } n_occurrences = count_occurrences (PATTERN (insn), reg, 0); if (substed) n_occurrences += count_occurrences (PATTERN (insn), eliminate_regs (substed, 0, NULL_RTX), 0); if (n_occurrences > n_inherited) return; /* If the pseudo-reg we are reloading is no longer referenced anywhere between the store into it and here, and no jumps or labels intervene, then the value can get here through the reload reg alone. Otherwise, give up--return. */ for (i1 = NEXT_INSN (output_reload_insn); i1 != insn; i1 = NEXT_INSN (i1)) { if (GET_CODE (i1) == CODE_LABEL || GET_CODE (i1) == JUMP_INSN) return; if ((GET_CODE (i1) == INSN || GET_CODE (i1) == CALL_INSN) && reg_mentioned_p (reg, PATTERN (i1))) { /* If this is USE in front of INSN, we only have to check that there are no more references than accounted for by inheritance. */ while (GET_CODE (i1) == INSN && GET_CODE (PATTERN (i1)) == USE) { n_occurrences += rtx_equal_p (reg, XEXP (PATTERN (i1), 0)) != 0; i1 = NEXT_INSN (i1); } if (n_occurrences <= n_inherited && i1 == insn) break; return; } } /* We will be deleting the insn. Remove the spill reg information. */ for (k = hard_regno_nregs[last_reload_reg][GET_MODE (reg)]; k-- > 0; ) { spill_reg_store[last_reload_reg + k] = 0; spill_reg_stored_to[last_reload_reg + k] = 0; } /* The caller has already checked that REG dies or is set in INSN. It has also checked that we are optimizing, and thus some inaccuracies in the debugging information are acceptable. So we could just delete output_reload_insn. But in some cases we can improve the debugging information without sacrificing optimization - maybe even improving the code: See if the pseudo reg has been completely replaced with reload regs. If so, delete the store insn and forget we had a stack slot for the pseudo. */ if (rld[j].out != rld[j].in && REG_N_DEATHS (REGNO (reg)) == 1 && REG_N_SETS (REGNO (reg)) == 1 && REG_BASIC_BLOCK (REGNO (reg)) >= 0 && find_regno_note (insn, REG_DEAD, REGNO (reg))) { rtx i2; /* We know that it was used only between here and the beginning of the current basic block. (We also know that the last use before INSN was the output reload we are thinking of deleting, but never mind that.) Search that range; see if any ref remains. */ for (i2 = PREV_INSN (insn); i2; i2 = PREV_INSN (i2)) { rtx set = single_set (i2); /* Uses which just store in the pseudo don't count, since if they are the only uses, they are dead. */ if (set != 0 && SET_DEST (set) == reg) continue; if (GET_CODE (i2) == CODE_LABEL || GET_CODE (i2) == JUMP_INSN) break; if ((GET_CODE (i2) == INSN || GET_CODE (i2) == CALL_INSN) && reg_mentioned_p (reg, PATTERN (i2))) { /* Some other ref remains; just delete the output reload we know to be dead. */ delete_address_reloads (output_reload_insn, insn); delete_insn (output_reload_insn); return; } } /* Delete the now-dead stores into this pseudo. Note that this loop also takes care of deleting output_reload_insn. */ for (i2 = PREV_INSN (insn); i2; i2 = PREV_INSN (i2)) { rtx set = single_set (i2); if (set != 0 && SET_DEST (set) == reg) { delete_address_reloads (i2, insn); delete_insn (i2); } if (GET_CODE (i2) == CODE_LABEL || GET_CODE (i2) == JUMP_INSN) break; } /* For the debugging info, say the pseudo lives in this reload reg. */ reg_renumber[REGNO (reg)] = REGNO (rld[j].reg_rtx); alter_reg (REGNO (reg), -1); } else { delete_address_reloads (output_reload_insn, insn); delete_insn (output_reload_insn); } } /* We are going to delete DEAD_INSN. Recursively delete loads of reload registers used in DEAD_INSN that are not used till CURRENT_INSN. CURRENT_INSN is being reloaded, so we have to check its reloads too. */ static void delete_address_reloads (rtx dead_insn, rtx current_insn) { rtx set = single_set (dead_insn); rtx set2, dst, prev, next; if (set) { rtx dst = SET_DEST (set); if (MEM_P (dst)) delete_address_reloads_1 (dead_insn, XEXP (dst, 0), current_insn); } /* If we deleted the store from a reloaded post_{in,de}c expression, we can delete the matching adds. */ prev = PREV_INSN (dead_insn); next = NEXT_INSN (dead_insn); if (! prev || ! next) return; set = single_set (next); set2 = single_set (prev); if (! set || ! set2 || GET_CODE (SET_SRC (set)) != PLUS || GET_CODE (SET_SRC (set2)) != PLUS || GET_CODE (XEXP (SET_SRC (set), 1)) != CONST_INT || GET_CODE (XEXP (SET_SRC (set2), 1)) != CONST_INT) return; dst = SET_DEST (set); if (! rtx_equal_p (dst, SET_DEST (set2)) || ! rtx_equal_p (dst, XEXP (SET_SRC (set), 0)) || ! rtx_equal_p (dst, XEXP (SET_SRC (set2), 0)) || (INTVAL (XEXP (SET_SRC (set), 1)) != -INTVAL (XEXP (SET_SRC (set2), 1)))) return; delete_related_insns (prev); delete_related_insns (next); } /* Subfunction of delete_address_reloads: process registers found in X. */ static void delete_address_reloads_1 (rtx dead_insn, rtx x, rtx current_insn) { rtx prev, set, dst, i2; int i, j; enum rtx_code code = GET_CODE (x); if (code != REG) { const char *fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') delete_address_reloads_1 (dead_insn, XEXP (x, i), current_insn); else if (fmt[i] == 'E') { for (j = XVECLEN (x, i) - 1; j >= 0; j--) delete_address_reloads_1 (dead_insn, XVECEXP (x, i, j), current_insn); } } return; } if (spill_reg_order[REGNO (x)] < 0) return; /* Scan backwards for the insn that sets x. This might be a way back due to inheritance. */ for (prev = PREV_INSN (dead_insn); prev; prev = PREV_INSN (prev)) { code = GET_CODE (prev); if (code == CODE_LABEL || code == JUMP_INSN) return; if (!INSN_P (prev)) continue; if (reg_set_p (x, PATTERN (prev))) break; if (reg_referenced_p (x, PATTERN (prev))) return; } if (! prev || INSN_UID (prev) < reload_first_uid) return; /* Check that PREV only sets the reload register. */ set = single_set (prev); if (! set) return; dst = SET_DEST (set); if (!REG_P (dst) || ! rtx_equal_p (dst, x)) return; if (! reg_set_p (dst, PATTERN (dead_insn))) { /* Check if DST was used in a later insn - it might have been inherited. */ for (i2 = NEXT_INSN (dead_insn); i2; i2 = NEXT_INSN (i2)) { if (GET_CODE (i2) == CODE_LABEL) break; if (! INSN_P (i2)) continue; if (reg_referenced_p (dst, PATTERN (i2))) { /* If there is a reference to the register in the current insn, it might be loaded in a non-inherited reload. If no other reload uses it, that means the register is set before referenced. */ if (i2 == current_insn) { for (j = n_reloads - 1; j >= 0; j--) if ((rld[j].reg_rtx == dst && reload_inherited[j]) || reload_override_in[j] == dst) return; for (j = n_reloads - 1; j >= 0; j--) if (rld[j].in && rld[j].reg_rtx == dst) break; if (j >= 0) break; } return; } if (GET_CODE (i2) == JUMP_INSN) break; /* If DST is still live at CURRENT_INSN, check if it is used for any reload. Note that even if CURRENT_INSN sets DST, we still have to check the reloads. */ if (i2 == current_insn) { for (j = n_reloads - 1; j >= 0; j--) if ((rld[j].reg_rtx == dst && reload_inherited[j]) || reload_override_in[j] == dst) return; /* ??? We can't finish the loop here, because dst might be allocated to a pseudo in this block if no reload in this block needs any of the classes containing DST - see spill_hard_reg. There is no easy way to tell this, so we have to scan till the end of the basic block. */ } if (reg_set_p (dst, PATTERN (i2))) break; } } delete_address_reloads_1 (prev, SET_SRC (set), current_insn); reg_reloaded_contents[REGNO (dst)] = -1; delete_insn (prev); } /* Output reload-insns to reload VALUE into RELOADREG. VALUE is an autoincrement or autodecrement RTX whose operand is a register or memory location; so reloading involves incrementing that location. IN is either identical to VALUE, or some cheaper place to reload from. INC_AMOUNT is the number to increment or decrement by (always positive). This cannot be deduced from VALUE. Return the instruction that stores into RELOADREG. */ static rtx inc_for_reload (rtx reloadreg, rtx in, rtx value, int inc_amount) { /* REG or MEM to be copied and incremented. */ rtx incloc = XEXP (value, 0); /* Nonzero if increment after copying. */ int post = (GET_CODE (value) == POST_DEC || GET_CODE (value) == POST_INC); rtx last; rtx inc; rtx add_insn; int code; rtx store; rtx real_in = in == value ? XEXP (in, 0) : in; /* No hard register is equivalent to this register after inc/dec operation. If REG_LAST_RELOAD_REG were nonzero, we could inc/dec that register as well (maybe even using it for the source), but I'm not sure it's worth worrying about. */ if (REG_P (incloc)) reg_last_reload_reg[REGNO (incloc)] = 0; if (GET_CODE (value) == PRE_DEC || GET_CODE (value) == POST_DEC) inc_amount = -inc_amount; inc = GEN_INT (inc_amount); /* If this is post-increment, first copy the location to the reload reg. */ if (post && real_in != reloadreg) emit_insn (gen_move_insn (reloadreg, real_in)); if (in == value) { /* See if we can directly increment INCLOC. Use a method similar to that in gen_reload. */ last = get_last_insn (); add_insn = emit_insn (gen_rtx_SET (VOIDmode, incloc, gen_rtx_PLUS (GET_MODE (incloc), incloc, inc))); code = recog_memoized (add_insn); if (code >= 0) { extract_insn (add_insn); if (constrain_operands (1)) { /* If this is a pre-increment and we have incremented the value where it lives, copy the incremented value to RELOADREG to be used as an address. */ if (! post) emit_insn (gen_move_insn (reloadreg, incloc)); return add_insn; } } delete_insns_since (last); } /* If couldn't do the increment directly, must increment in RELOADREG. The way we do this depends on whether this is pre- or post-increment. For pre-increment, copy INCLOC to the reload register, increment it there, then save back. */ if (! post) { if (in != reloadreg) emit_insn (gen_move_insn (reloadreg, real_in)); emit_insn (gen_add2_insn (reloadreg, inc)); store = emit_insn (gen_move_insn (incloc, reloadreg)); } else { /* Postincrement. Because this might be a jump insn or a compare, and because RELOADREG may not be available after the insn in an input reload, we must do the incrementation before the insn being reloaded for. We have already copied IN to RELOADREG. Increment the copy in RELOADREG, save that back, then decrement RELOADREG so it has the original value. */ emit_insn (gen_add2_insn (reloadreg, inc)); store = emit_insn (gen_move_insn (incloc, reloadreg)); emit_insn (gen_add2_insn (reloadreg, GEN_INT (-inc_amount))); } return store; } #ifdef AUTO_INC_DEC static void add_auto_inc_notes (rtx insn, rtx x) { enum rtx_code code = GET_CODE (x); const char *fmt; int i, j; if (code == MEM && auto_inc_p (XEXP (x, 0))) { REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_INC, XEXP (XEXP (x, 0), 0), REG_NOTES (insn)); return; } /* Scan all the operand sub-expressions. */ fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') add_auto_inc_notes (insn, XEXP (x, i)); else if (fmt[i] == 'E') for (j = XVECLEN (x, i) - 1; j >= 0; j--) add_auto_inc_notes (insn, XVECEXP (x, i, j)); } } #endif /* Copy EH notes from an insn to its reloads. */ static void copy_eh_notes (rtx insn, rtx x) { rtx eh_note = find_reg_note (insn, REG_EH_REGION, NULL_RTX); if (eh_note) { for (; x != 0; x = NEXT_INSN (x)) { if (may_trap_p (PATTERN (x))) REG_NOTES (x) = gen_rtx_EXPR_LIST (REG_EH_REGION, XEXP (eh_note, 0), REG_NOTES (x)); } } } /* This is used by reload pass, that does emit some instructions after abnormal calls moving basic block end, but in fact it wants to emit them on the edge. Looks for abnormal call edges, find backward the proper call and fix the damage. Similar handle instructions throwing exceptions internally. */ void fixup_abnormal_edges (void) { bool inserted = false; basic_block bb; FOR_EACH_BB (bb) { edge e; /* Look for cases we are interested in - calls or instructions causing exceptions. */ for (e = bb->succ; e; e = e->succ_next) { if (e->flags & EDGE_ABNORMAL_CALL) break; if ((e->flags & (EDGE_ABNORMAL | EDGE_EH)) == (EDGE_ABNORMAL | EDGE_EH)) break; } if (e && GET_CODE (BB_END (bb)) != CALL_INSN && !can_throw_internal (BB_END (bb))) { rtx insn = BB_END (bb), stop = NEXT_INSN (BB_END (bb)); rtx next; for (e = bb->succ; e; e = e->succ_next) if (e->flags & EDGE_FALLTHRU) break; /* Get past the new insns generated. Allow notes, as the insns may be already deleted. */ while ((GET_CODE (insn) == INSN || GET_CODE (insn) == NOTE) && !can_throw_internal (insn) && insn != BB_HEAD (bb)) insn = PREV_INSN (insn); if (GET_CODE (insn) != CALL_INSN && !can_throw_internal (insn)) abort (); BB_END (bb) = insn; inserted = true; insn = NEXT_INSN (insn); while (insn && insn != stop) { next = NEXT_INSN (insn); if (INSN_P (insn)) { delete_insn (insn); /* Sometimes there's still the return value USE. If it's placed after a trapping call (i.e. that call is the last insn anyway), we have no fallthru edge. Simply delete this use and don't try to insert on the non-existent edge. */ if (GET_CODE (PATTERN (insn)) != USE) { /* We're not deleting it, we're moving it. */ INSN_DELETED_P (insn) = 0; PREV_INSN (insn) = NULL_RTX; NEXT_INSN (insn) = NULL_RTX; insert_insn_on_edge (insn, e); } } insn = next; } } } /* We've possibly turned single trapping insn into multiple ones. */ if (flag_non_call_exceptions) { sbitmap blocks; blocks = sbitmap_alloc (last_basic_block); sbitmap_ones (blocks); find_many_sub_basic_blocks (blocks); } if (inserted) commit_edge_insertions (); } /* Perform instruction reorganizations for delay slot filling. Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu). Hacked by Michael Tiemann (tiemann@cygnus.com). This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Instruction reorganization pass. This pass runs after register allocation and final jump optimization. It should be the last pass to run before peephole. It serves primarily to fill delay slots of insns, typically branch and call insns. Other insns typically involve more complicated interactions of data dependencies and resource constraints, and are better handled by scheduling before register allocation (by the function `schedule_insns'). The Branch Penalty is the number of extra cycles that are needed to execute a branch insn. On an ideal machine, branches take a single cycle, and the Branch Penalty is 0. Several RISC machines approach branch delays differently: The MIPS has a single branch delay slot. Most insns (except other branches) can be used to fill this slot. When the slot is filled, two insns execute in two cycles, reducing the branch penalty to zero. The SPARC always has a branch delay slot, but its effects can be annulled when the branch is not taken. This means that failing to find other sources of insns, we can hoist an insn from the branch target that would only be safe to execute knowing that the branch is taken. The HP-PA always has a branch delay slot. For unconditional branches its effects can be annulled when the branch is taken. The effects of the delay slot in a conditional branch can be nullified for forward taken branches, or for untaken backward branches. This means we can hoist insns from the fall-through path for forward branches or steal insns from the target of backward branches. The TMS320C3x and C4x have three branch delay slots. When the three slots are filled, the branch penalty is zero. Most insns can fill the delay slots except jump insns. Three techniques for filling delay slots have been implemented so far: (1) `fill_simple_delay_slots' is the simplest, most efficient way to fill delay slots. This pass first looks for insns which come from before the branch and which are safe to execute after the branch. Then it searches after the insn requiring delay slots or, in the case of a branch, for insns that are after the point at which the branch merges into the fallthrough code, if such a point exists. When such insns are found, the branch penalty decreases and no code expansion takes place. (2) `fill_eager_delay_slots' is more complicated: it is used for scheduling conditional jumps, or for scheduling jumps which cannot be filled using (1). A machine need not have annulled jumps to use this strategy, but it helps (by keeping more options open). `fill_eager_delay_slots' tries to guess the direction the branch will go; if it guesses right 100% of the time, it can reduce the branch penalty as much as `fill_simple_delay_slots' does. If it guesses wrong 100% of the time, it might as well schedule nops. When `fill_eager_delay_slots' takes insns from the fall-through path of the jump, usually there is no code expansion; when it takes insns from the branch target, there is code expansion if it is not the only way to reach that target. (3) `relax_delay_slots' uses a set of rules to simplify code that has been reorganized by (1) and (2). It finds cases where conditional test can be eliminated, jumps can be threaded, extra insns can be eliminated, etc. It is the job of (1) and (2) to do a good job of scheduling locally; `relax_delay_slots' takes care of making the various individual schedules work well together. It is especially tuned to handle the control flow interactions of branch insns. It does nothing for insns with delay slots that do not branch. On machines that use CC0, we are very conservative. We will not make a copy of an insn involving CC0 since we want to maintain a 1-1 correspondence between the insn that sets and uses CC0. The insns are allowed to be separated by placing an insn that sets CC0 (but not an insn that uses CC0; we could do this, but it doesn't seem worthwhile) in a delay slot. In that case, we point each insn at the other with REG_CC_USER and REG_CC_SETTER notes. Note that these restrictions affect very few machines because most RISC machines with delay slots will not use CC0 (the RT is the only known exception at this point). Not yet implemented: The Acorn Risc Machine can conditionally execute most insns, so it is profitable to move single insns into a position to execute based on the condition code of the previous insn. The HP-PA can conditionally nullify insns, providing a similar effect to the ARM, differing mostly in which insn is "in charge". */ #ifdef DELAY_SLOTS #ifndef ANNUL_IFTRUE_SLOTS #define eligible_for_annul_true(INSN, SLOTS, TRIAL, FLAGS) 0 #endif #ifndef ANNUL_IFFALSE_SLOTS #define eligible_for_annul_false(INSN, SLOTS, TRIAL, FLAGS) 0 #endif /* Insns which have delay slots that have not yet been filled. */ static struct obstack unfilled_slots_obstack; static rtx *unfilled_firstobj; /* Define macros to refer to the first and last slot containing unfilled insns. These are used because the list may move and its address should be recomputed at each use. */ #define unfilled_slots_base \ ((rtx *) obstack_base (&unfilled_slots_obstack)) #define unfilled_slots_next \ ((rtx *) obstack_next_free (&unfilled_slots_obstack)) /* Points to the label before the end of the function. */ static rtx end_of_function_label; /* Mapping between INSN_UID's and position in the code since INSN_UID's do not always monotonically increase. */ static int *uid_to_ruid; /* Highest valid index in `uid_to_ruid'. */ static int max_uid; static int stop_search_p (rtx, int); static int resource_conflicts_p (struct resources *, struct resources *); static int insn_references_resource_p (rtx, struct resources *, int); static int insn_sets_resource_p (rtx, struct resources *, int); static rtx find_end_label (void); static rtx emit_delay_sequence (rtx, rtx, int); static rtx add_to_delay_list (rtx, rtx); static rtx delete_from_delay_slot (rtx); static void delete_scheduled_jump (rtx); static void note_delay_statistics (int, int); #if defined(ANNUL_IFFALSE_SLOTS) || defined(ANNUL_IFTRUE_SLOTS) static rtx optimize_skip (rtx); #endif static int get_jump_flags (rtx, rtx); static int rare_destination (rtx); static int mostly_true_jump (rtx, rtx); static rtx get_branch_condition (rtx, rtx); static int condition_dominates_p (rtx, rtx); static int redirect_with_delay_slots_safe_p (rtx, rtx, rtx); static int redirect_with_delay_list_safe_p (rtx, rtx, rtx); static int check_annul_list_true_false (int, rtx); static rtx steal_delay_list_from_target (rtx, rtx, rtx, rtx, struct resources *, struct resources *, struct resources *, int, int *, int *, rtx *); static rtx steal_delay_list_from_fallthrough (rtx, rtx, rtx, rtx, struct resources *, struct resources *, struct resources *, int, int *, int *); static void try_merge_delay_insns (rtx, rtx); static rtx redundant_insn (rtx, rtx, rtx); static int own_thread_p (rtx, rtx, int); static void update_block (rtx, rtx); static int reorg_redirect_jump (rtx, rtx); static void update_reg_dead_notes (rtx, rtx); static void fix_reg_dead_note (rtx, rtx); static void update_reg_unused_notes (rtx, rtx); static void fill_simple_delay_slots (int); static rtx fill_slots_from_thread (rtx, rtx, rtx, rtx, int, int, int, int, int *, rtx); static void fill_eager_delay_slots (void); static void relax_delay_slots (rtx); #ifdef HAVE_return static void make_return_insns (rtx); #endif /* Return TRUE if this insn should stop the search for insn to fill delay slots. LABELS_P indicates that labels should terminate the search. In all cases, jumps terminate the search. */ static int stop_search_p (rtx insn, int labels_p) { if (insn == 0) return 1; /* If the insn can throw an exception that is caught within the function, it may effectively perform a jump from the viewpoint of the function. Therefore act like for a jump. */ if (can_throw_internal (insn)) return 1; switch (GET_CODE (insn)) { case NOTE: case CALL_INSN: return 0; case CODE_LABEL: return labels_p; case JUMP_INSN: case BARRIER: return 1; case INSN: /* OK unless it contains a delay slot or is an `asm' insn of some type. We don't know anything about these. */ return (GET_CODE (PATTERN (insn)) == SEQUENCE || GET_CODE (PATTERN (insn)) == ASM_INPUT || asm_noperands (PATTERN (insn)) >= 0); default: abort (); } } /* Return TRUE if any resources are marked in both RES1 and RES2 or if either resource set contains a volatile memory reference. Otherwise, return FALSE. */ static int resource_conflicts_p (struct resources *res1, struct resources *res2) { if ((res1->cc && res2->cc) || (res1->memory && res2->memory) || (res1->unch_memory && res2->unch_memory) || res1->volatil || res2->volatil) return 1; #ifdef HARD_REG_SET return (res1->regs & res2->regs) != HARD_CONST (0); #else { int i; for (i = 0; i < HARD_REG_SET_LONGS; i++) if ((res1->regs[i] & res2->regs[i]) != 0) return 1; return 0; } #endif } /* Return TRUE if any resource marked in RES, a `struct resources', is referenced by INSN. If INCLUDE_DELAYED_EFFECTS is set, return if the called routine is using those resources. We compute this by computing all the resources referenced by INSN and seeing if this conflicts with RES. It might be faster to directly check ourselves, and this is the way it used to work, but it means duplicating a large block of complex code. */ static int insn_references_resource_p (rtx insn, struct resources *res, int include_delayed_effects) { struct resources insn_res; CLEAR_RESOURCE (&insn_res); mark_referenced_resources (insn, &insn_res, include_delayed_effects); return resource_conflicts_p (&insn_res, res); } /* Return TRUE if INSN modifies resources that are marked in RES. INCLUDE_DELAYED_EFFECTS is set if the actions of that routine should be included. CC0 is only modified if it is explicitly set; see comments in front of mark_set_resources for details. */ static int insn_sets_resource_p (rtx insn, struct resources *res, int include_delayed_effects) { struct resources insn_sets; CLEAR_RESOURCE (&insn_sets); mark_set_resources (insn, &insn_sets, 0, include_delayed_effects); return resource_conflicts_p (&insn_sets, res); } /* Return TRUE if INSN is a return, possibly with a filled delay slot. */ static bool return_insn_p (rtx insn) { if (GET_CODE (insn) == JUMP_INSN && GET_CODE (PATTERN (insn)) == RETURN) return true; if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE) return return_insn_p (XVECEXP (PATTERN (insn), 0, 0)); return false; } /* Find a label at the end of the function or before a RETURN. If there is none, make one. */ static rtx find_end_label (void) { rtx insn; /* If we found one previously, return it. */ if (end_of_function_label) return end_of_function_label; /* Otherwise, see if there is a label at the end of the function. If there is, it must be that RETURN insns aren't needed, so that is our return label and we don't have to do anything else. */ insn = get_last_insn (); while (GET_CODE (insn) == NOTE || (GET_CODE (insn) == INSN && (GET_CODE (PATTERN (insn)) == USE || GET_CODE (PATTERN (insn)) == CLOBBER))) insn = PREV_INSN (insn); /* When a target threads its epilogue we might already have a suitable return insn. If so put a label before it for the end_of_function_label. */ if (GET_CODE (insn) == BARRIER && return_insn_p (PREV_INSN (insn))) { rtx temp = PREV_INSN (PREV_INSN (insn)); end_of_function_label = gen_label_rtx (); LABEL_NUSES (end_of_function_label) = 0; /* Put the label before an USE insn that may precede the RETURN insn. */ while (GET_CODE (temp) == USE) temp = PREV_INSN (temp); emit_label_after (end_of_function_label, temp); } else if (GET_CODE (insn) == CODE_LABEL) end_of_function_label = insn; else { end_of_function_label = gen_label_rtx (); LABEL_NUSES (end_of_function_label) = 0; /* If the basic block reorder pass moves the return insn to some other place try to locate it again and put our end_of_function_label there. */ while (insn && ! return_insn_p (insn)) insn = PREV_INSN (insn); if (insn) { insn = PREV_INSN (insn); /* Put the label before an USE insns that may proceed the RETURN insn. */ while (GET_CODE (insn) == USE) insn = PREV_INSN (insn); emit_label_after (end_of_function_label, insn); } else { /* Otherwise, make a new label and emit a RETURN and BARRIER, if needed. */ emit_label (end_of_function_label); #ifdef HAVE_return if (HAVE_return) { /* The return we make may have delay slots too. */ rtx insn = gen_return (); insn = emit_jump_insn (insn); emit_barrier (); if (num_delay_slots (insn) > 0) obstack_ptr_grow (&unfilled_slots_obstack, insn); } #endif } } /* Show one additional use for this label so it won't go away until we are done. */ ++LABEL_NUSES (end_of_function_label); return end_of_function_label; } /* Put INSN and LIST together in a SEQUENCE rtx of LENGTH, and replace the pattern of INSN with the SEQUENCE. Chain the insns so that NEXT_INSN of each insn in the sequence points to the next and NEXT_INSN of the last insn in the sequence points to the first insn after the sequence. Similarly for PREV_INSN. This makes it easier to scan all insns. Returns the SEQUENCE that replaces INSN. */ static rtx emit_delay_sequence (rtx insn, rtx list, int length) { int i = 1; rtx li; int had_barrier = 0; /* Allocate the rtvec to hold the insns and the SEQUENCE. */ rtvec seqv = rtvec_alloc (length + 1); rtx seq = gen_rtx_SEQUENCE (VOIDmode, seqv); rtx seq_insn = make_insn_raw (seq); rtx first = get_insns (); rtx last = get_last_insn (); /* Make a copy of the insn having delay slots. */ rtx delay_insn = copy_rtx (insn); /* If INSN is followed by a BARRIER, delete the BARRIER since it will only confuse further processing. Update LAST in case it was the last insn. We will put the BARRIER back in later. */ if (NEXT_INSN (insn) && GET_CODE (NEXT_INSN (insn)) == BARRIER) { delete_related_insns (NEXT_INSN (insn)); last = get_last_insn (); had_barrier = 1; } /* Splice our SEQUENCE into the insn stream where INSN used to be. */ NEXT_INSN (seq_insn) = NEXT_INSN (insn); PREV_INSN (seq_insn) = PREV_INSN (insn); if (insn != last) PREV_INSN (NEXT_INSN (seq_insn)) = seq_insn; if (insn != first) NEXT_INSN (PREV_INSN (seq_insn)) = seq_insn; /* Note the calls to set_new_first_and_last_insn must occur after SEQ_INSN has been completely spliced into the insn stream. Otherwise CUR_INSN_UID will get set to an incorrect value because set_new_first_and_last_insn will not find SEQ_INSN in the chain. */ if (insn == last) set_new_first_and_last_insn (first, seq_insn); if (insn == first) set_new_first_and_last_insn (seq_insn, last); /* Build our SEQUENCE and rebuild the insn chain. */ XVECEXP (seq, 0, 0) = delay_insn; INSN_DELETED_P (delay_insn) = 0; PREV_INSN (delay_insn) = PREV_INSN (seq_insn); for (li = list; li; li = XEXP (li, 1), i++) { rtx tem = XEXP (li, 0); rtx note, next; /* Show that this copy of the insn isn't deleted. */ INSN_DELETED_P (tem) = 0; XVECEXP (seq, 0, i) = tem; PREV_INSN (tem) = XVECEXP (seq, 0, i - 1); NEXT_INSN (XVECEXP (seq, 0, i - 1)) = tem; /* SPARC assembler, for instance, emit warning when debug info is output into the delay slot. */ if (INSN_LOCATOR (tem) && !INSN_LOCATOR (seq_insn)) INSN_LOCATOR (seq_insn) = INSN_LOCATOR (tem); INSN_LOCATOR (tem) = 0; for (note = REG_NOTES (tem); note; note = next) { next = XEXP (note, 1); switch (REG_NOTE_KIND (note)) { case REG_DEAD: /* Remove any REG_DEAD notes because we can't rely on them now that the insn has been moved. */ remove_note (tem, note); break; case REG_LABEL: /* Keep the label reference count up to date. */ if (GET_CODE (XEXP (note, 0)) == CODE_LABEL) LABEL_NUSES (XEXP (note, 0)) ++; break; default: break; } } } NEXT_INSN (XVECEXP (seq, 0, length)) = NEXT_INSN (seq_insn); /* If the previous insn is a SEQUENCE, update the NEXT_INSN pointer on the last insn in that SEQUENCE to point to us. Similarly for the first insn in the following insn if it is a SEQUENCE. */ if (PREV_INSN (seq_insn) && GET_CODE (PREV_INSN (seq_insn)) == INSN && GET_CODE (PATTERN (PREV_INSN (seq_insn))) == SEQUENCE) NEXT_INSN (XVECEXP (PATTERN (PREV_INSN (seq_insn)), 0, XVECLEN (PATTERN (PREV_INSN (seq_insn)), 0) - 1)) = seq_insn; if (NEXT_INSN (seq_insn) && GET_CODE (NEXT_INSN (seq_insn)) == INSN && GET_CODE (PATTERN (NEXT_INSN (seq_insn))) == SEQUENCE) PREV_INSN (XVECEXP (PATTERN (NEXT_INSN (seq_insn)), 0, 0)) = seq_insn; /* If there used to be a BARRIER, put it back. */ if (had_barrier) emit_barrier_after (seq_insn); if (i != length + 1) abort (); return seq_insn; } /* Add INSN to DELAY_LIST and return the head of the new list. The list must be in the order in which the insns are to be executed. */ static rtx add_to_delay_list (rtx insn, rtx delay_list) { /* If we have an empty list, just make a new list element. If INSN has its block number recorded, clear it since we may be moving the insn to a new block. */ if (delay_list == 0) { clear_hashed_info_for_insn (insn); return gen_rtx_INSN_LIST (VOIDmode, insn, NULL_RTX); } /* Otherwise this must be an INSN_LIST. Add INSN to the end of the list. */ XEXP (delay_list, 1) = add_to_delay_list (insn, XEXP (delay_list, 1)); return delay_list; } /* Delete INSN from the delay slot of the insn that it is in, which may produce an insn with no delay slots. Return the new insn. */ static rtx delete_from_delay_slot (rtx insn) { rtx trial, seq_insn, seq, prev; rtx delay_list = 0; int i; int had_barrier = 0; /* We first must find the insn containing the SEQUENCE with INSN in its delay slot. Do this by finding an insn, TRIAL, where PREV_INSN (NEXT_INSN (TRIAL)) != TRIAL. */ for (trial = insn; PREV_INSN (NEXT_INSN (trial)) == trial; trial = NEXT_INSN (trial)) ; seq_insn = PREV_INSN (NEXT_INSN (trial)); seq = PATTERN (seq_insn); if (NEXT_INSN (seq_insn) && GET_CODE (NEXT_INSN (seq_insn)) == BARRIER) had_barrier = 1; /* Create a delay list consisting of all the insns other than the one we are deleting (unless we were the only one). */ if (XVECLEN (seq, 0) > 2) for (i = 1; i < XVECLEN (seq, 0); i++) if (XVECEXP (seq, 0, i) != insn) delay_list = add_to_delay_list (XVECEXP (seq, 0, i), delay_list); /* Delete the old SEQUENCE, re-emit the insn that used to have the delay list, and rebuild the delay list if non-empty. */ prev = PREV_INSN (seq_insn); trial = XVECEXP (seq, 0, 0); delete_related_insns (seq_insn); add_insn_after (trial, prev); /* If there was a barrier after the old SEQUENCE, remit it. */ if (had_barrier) emit_barrier_after (trial); /* If there are any delay insns, remit them. Otherwise clear the annul flag. */ if (delay_list) trial = emit_delay_sequence (trial, delay_list, XVECLEN (seq, 0) - 2); else if (GET_CODE (trial) == JUMP_INSN || GET_CODE (trial) == CALL_INSN || GET_CODE (trial) == INSN) INSN_ANNULLED_BRANCH_P (trial) = 0; INSN_FROM_TARGET_P (insn) = 0; /* Show we need to fill this insn again. */ obstack_ptr_grow (&unfilled_slots_obstack, trial); return trial; } /* Delete INSN, a JUMP_INSN. If it is a conditional jump, we must track down the insn that sets CC0 for it and delete it too. */ static void delete_scheduled_jump (rtx insn) { /* Delete the insn that sets cc0 for us. On machines without cc0, we could delete the insn that sets the condition code, but it is hard to find it. Since this case is rare anyway, don't bother trying; there would likely be other insns that became dead anyway, which we wouldn't know to delete. */ #ifdef HAVE_cc0 if (reg_mentioned_p (cc0_rtx, insn)) { rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX); /* If a reg-note was found, it points to an insn to set CC0. This insn is in the delay list of some other insn. So delete it from the delay list it was in. */ if (note) { if (! FIND_REG_INC_NOTE (XEXP (note, 0), NULL_RTX) && sets_cc0_p (PATTERN (XEXP (note, 0))) == 1) delete_from_delay_slot (XEXP (note, 0)); } else { /* The insn setting CC0 is our previous insn, but it may be in a delay slot. It will be the last insn in the delay slot, if it is. */ rtx trial = previous_insn (insn); if (GET_CODE (trial) == NOTE) trial = prev_nonnote_insn (trial); if (sets_cc0_p (PATTERN (trial)) != 1 || FIND_REG_INC_NOTE (trial, NULL_RTX)) return; if (PREV_INSN (NEXT_INSN (trial)) == trial) delete_related_insns (trial); else delete_from_delay_slot (trial); } } #endif delete_related_insns (insn); } /* Counters for delay-slot filling. */ #define NUM_REORG_FUNCTIONS 2 #define MAX_DELAY_HISTOGRAM 3 #define MAX_REORG_PASSES 2 static int num_insns_needing_delays[NUM_REORG_FUNCTIONS][MAX_REORG_PASSES]; static int num_filled_delays[NUM_REORG_FUNCTIONS][MAX_DELAY_HISTOGRAM+1][MAX_REORG_PASSES]; static int reorg_pass_number; static void note_delay_statistics (int slots_filled, int index) { num_insns_needing_delays[index][reorg_pass_number]++; if (slots_filled > MAX_DELAY_HISTOGRAM) slots_filled = MAX_DELAY_HISTOGRAM; num_filled_delays[index][slots_filled][reorg_pass_number]++; } #if defined(ANNUL_IFFALSE_SLOTS) || defined(ANNUL_IFTRUE_SLOTS) /* Optimize the following cases: 1. When a conditional branch skips over only one instruction, use an annulling branch and put that insn in the delay slot. Use either a branch that annuls when the condition if true or invert the test with a branch that annuls when the condition is false. This saves insns, since otherwise we must copy an insn from the L1 target. (orig) (skip) (otherwise) Bcc.n L1 Bcc',a L1 Bcc,a L1' insn insn insn2 L1: L1: L1: insn2 insn2 insn2 insn3 insn3 L1': insn3 2. When a conditional branch skips over only one instruction, and after that, it unconditionally branches somewhere else, perform the similar optimization. This saves executing the second branch in the case where the inverted condition is true. Bcc.n L1 Bcc',a L2 insn insn L1: L1: Bra L2 Bra L2 INSN is a JUMP_INSN. This should be expanded to skip over N insns, where N is the number of delay slots required. */ static rtx optimize_skip (rtx insn) { rtx trial = next_nonnote_insn (insn); rtx next_trial = next_active_insn (trial); rtx delay_list = 0; rtx target_label; int flags; flags = get_jump_flags (insn, JUMP_LABEL (insn)); if (trial == 0 || GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) == SEQUENCE || recog_memoized (trial) < 0 || (! eligible_for_annul_false (insn, 0, trial, flags) && ! eligible_for_annul_true (insn, 0, trial, flags)) || can_throw_internal (trial)) return 0; /* There are two cases where we are just executing one insn (we assume here that a branch requires only one insn; this should be generalized at some point): Where the branch goes around a single insn or where we have one insn followed by a branch to the same label we branch to. In both of these cases, inverting the jump and annulling the delay slot give the same effect in fewer insns. */ if ((next_trial == next_active_insn (JUMP_LABEL (insn)) && ! (next_trial == 0 && current_function_epilogue_delay_list != 0)) || (next_trial != 0 && GET_CODE (next_trial) == JUMP_INSN && JUMP_LABEL (insn) == JUMP_LABEL (next_trial) && (simplejump_p (next_trial) || GET_CODE (PATTERN (next_trial)) == RETURN))) { if (eligible_for_annul_false (insn, 0, trial, flags)) { if (invert_jump (insn, JUMP_LABEL (insn), 1)) INSN_FROM_TARGET_P (trial) = 1; else if (! eligible_for_annul_true (insn, 0, trial, flags)) return 0; } delay_list = add_to_delay_list (trial, NULL_RTX); next_trial = next_active_insn (trial); update_block (trial, trial); delete_related_insns (trial); /* Also, if we are targeting an unconditional branch, thread our jump to the target of that branch. Don't change this into a RETURN here, because it may not accept what we have in the delay slot. We'll fix this up later. */ if (next_trial && GET_CODE (next_trial) == JUMP_INSN && (simplejump_p (next_trial) || GET_CODE (PATTERN (next_trial)) == RETURN)) { target_label = JUMP_LABEL (next_trial); if (target_label == 0) target_label = find_end_label (); /* Recompute the flags based on TARGET_LABEL since threading the jump to TARGET_LABEL may change the direction of the jump (which may change the circumstances in which the delay slot is nullified). */ flags = get_jump_flags (insn, target_label); if (eligible_for_annul_true (insn, 0, trial, flags)) reorg_redirect_jump (insn, target_label); } INSN_ANNULLED_BRANCH_P (insn) = 1; } return delay_list; } #endif /* Encode and return branch direction and prediction information for INSN assuming it will jump to LABEL. Non conditional branches return no direction information and are predicted as very likely taken. */ static int get_jump_flags (rtx insn, rtx label) { int flags; /* get_jump_flags can be passed any insn with delay slots, these may be INSNs, CALL_INSNs, or JUMP_INSNs. Only JUMP_INSNs have branch direction information, and only if they are conditional jumps. If LABEL is zero, then there is no way to determine the branch direction. */ if (GET_CODE (insn) == JUMP_INSN && (condjump_p (insn) || condjump_in_parallel_p (insn)) && INSN_UID (insn) <= max_uid && label != 0 && INSN_UID (label) <= max_uid) flags = (uid_to_ruid[INSN_UID (label)] > uid_to_ruid[INSN_UID (insn)]) ? ATTR_FLAG_forward : ATTR_FLAG_backward; /* No valid direction information. */ else flags = 0; /* If insn is a conditional branch call mostly_true_jump to get determine the branch prediction. Non conditional branches are predicted as very likely taken. */ if (GET_CODE (insn) == JUMP_INSN && (condjump_p (insn) || condjump_in_parallel_p (insn))) { int prediction; prediction = mostly_true_jump (insn, get_branch_condition (insn, label)); switch (prediction) { case 2: flags |= (ATTR_FLAG_very_likely | ATTR_FLAG_likely); break; case 1: flags |= ATTR_FLAG_likely; break; case 0: flags |= ATTR_FLAG_unlikely; break; case -1: flags |= (ATTR_FLAG_very_unlikely | ATTR_FLAG_unlikely); break; default: abort (); } } else flags |= (ATTR_FLAG_very_likely | ATTR_FLAG_likely); return flags; } /* Return 1 if INSN is a destination that will be branched to rarely (the return point of a function); return 2 if DEST will be branched to very rarely (a call to a function that doesn't return). Otherwise, return 0. */ static int rare_destination (rtx insn) { int jump_count = 0; rtx next; for (; insn; insn = next) { if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE) insn = XVECEXP (PATTERN (insn), 0, 0); next = NEXT_INSN (insn); switch (GET_CODE (insn)) { case CODE_LABEL: return 0; case BARRIER: /* A BARRIER can either be after a JUMP_INSN or a CALL_INSN. We don't scan past JUMP_INSNs, so any barrier we find here must have been after a CALL_INSN and hence mean the call doesn't return. */ return 2; case JUMP_INSN: if (GET_CODE (PATTERN (insn)) == RETURN) return 1; else if (simplejump_p (insn) && jump_count++ < 10) next = JUMP_LABEL (insn); else return 0; default: break; } } /* If we got here it means we hit the end of the function. So this is an unlikely destination. */ return 1; } /* Return truth value of the statement that this branch is mostly taken. If we think that the branch is extremely likely to be taken, we return 2. If the branch is slightly more likely to be taken, return 1. If the branch is slightly less likely to be taken, return 0 and if the branch is highly unlikely to be taken, return -1. CONDITION, if nonzero, is the condition that JUMP_INSN is testing. */ static int mostly_true_jump (rtx jump_insn, rtx condition) { rtx target_label = JUMP_LABEL (jump_insn); rtx insn, note; int rare_dest = rare_destination (target_label); int rare_fallthrough = rare_destination (NEXT_INSN (jump_insn)); /* If branch probabilities are available, then use that number since it always gives a correct answer. */ note = find_reg_note (jump_insn, REG_BR_PROB, 0); if (note) { int prob = INTVAL (XEXP (note, 0)); if (prob >= REG_BR_PROB_BASE * 9 / 10) return 2; else if (prob >= REG_BR_PROB_BASE / 2) return 1; else if (prob >= REG_BR_PROB_BASE / 10) return 0; else return -1; } /* ??? Ought to use estimate_probability instead. */ /* If this is a branch outside a loop, it is highly unlikely. */ if (GET_CODE (PATTERN (jump_insn)) == SET && GET_CODE (SET_SRC (PATTERN (jump_insn))) == IF_THEN_ELSE && ((GET_CODE (XEXP (SET_SRC (PATTERN (jump_insn)), 1)) == LABEL_REF && LABEL_OUTSIDE_LOOP_P (XEXP (SET_SRC (PATTERN (jump_insn)), 1))) || (GET_CODE (XEXP (SET_SRC (PATTERN (jump_insn)), 2)) == LABEL_REF && LABEL_OUTSIDE_LOOP_P (XEXP (SET_SRC (PATTERN (jump_insn)), 2))))) return -1; if (target_label) { /* If this is the test of a loop, it is very likely true. We scan backwards from the target label. If we find a NOTE_INSN_LOOP_BEG before the next real insn, we assume the branch is to the top of the loop. */ for (insn = PREV_INSN (target_label); insn && GET_CODE (insn) == NOTE; insn = PREV_INSN (insn)) if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG) return 2; /* If this is a jump to the test of a loop, it is likely true. We scan forwards from the target label. If we find a NOTE_INSN_LOOP_VTOP before the next real insn, we assume the branch is to the loop branch test. */ for (insn = NEXT_INSN (target_label); insn && GET_CODE (insn) == NOTE; insn = PREV_INSN (insn)) if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_VTOP) return 1; } /* Look at the relative rarities of the fallthrough and destination. If they differ, we can predict the branch that way. */ switch (rare_fallthrough - rare_dest) { case -2: return -1; case -1: return 0; case 0: break; case 1: return 1; case 2: return 2; } /* If we couldn't figure out what this jump was, assume it won't be taken. This should be rare. */ if (condition == 0) return 0; /* EQ tests are usually false and NE tests are usually true. Also, most quantities are positive, so we can make the appropriate guesses about signed comparisons against zero. */ switch (GET_CODE (condition)) { case CONST_INT: /* Unconditional branch. */ return 1; case EQ: return 0; case NE: return 1; case LE: case LT: if (XEXP (condition, 1) == const0_rtx) return 0; break; case GE: case GT: if (XEXP (condition, 1) == const0_rtx) return 1; break; default: break; } /* Predict backward branches usually take, forward branches usually not. If we don't know whether this is forward or backward, assume the branch will be taken, since most are. */ return (target_label == 0 || INSN_UID (jump_insn) > max_uid || INSN_UID (target_label) > max_uid || (uid_to_ruid[INSN_UID (jump_insn)] > uid_to_ruid[INSN_UID (target_label)])); } /* Return the condition under which INSN will branch to TARGET. If TARGET is zero, return the condition under which INSN will return. If INSN is an unconditional branch, return const_true_rtx. If INSN isn't a simple type of jump, or it doesn't go to TARGET, return 0. */ static rtx get_branch_condition (rtx insn, rtx target) { rtx pat = PATTERN (insn); rtx src; if (condjump_in_parallel_p (insn)) pat = XVECEXP (pat, 0, 0); if (GET_CODE (pat) == RETURN) return target == 0 ? const_true_rtx : 0; else if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx) return 0; src = SET_SRC (pat); if (GET_CODE (src) == LABEL_REF && XEXP (src, 0) == target) return const_true_rtx; else if (GET_CODE (src) == IF_THEN_ELSE && ((target == 0 && GET_CODE (XEXP (src, 1)) == RETURN) || (GET_CODE (XEXP (src, 1)) == LABEL_REF && XEXP (XEXP (src, 1), 0) == target)) && XEXP (src, 2) == pc_rtx) return XEXP (src, 0); else if (GET_CODE (src) == IF_THEN_ELSE && ((target == 0 && GET_CODE (XEXP (src, 2)) == RETURN) || (GET_CODE (XEXP (src, 2)) == LABEL_REF && XEXP (XEXP (src, 2), 0) == target)) && XEXP (src, 1) == pc_rtx) { enum rtx_code rev; rev = reversed_comparison_code (XEXP (src, 0), insn); if (rev != UNKNOWN) return gen_rtx_fmt_ee (rev, GET_MODE (XEXP (src, 0)), XEXP (XEXP (src, 0), 0), XEXP (XEXP (src, 0), 1)); } return 0; } /* Return nonzero if CONDITION is more strict than the condition of INSN, i.e., if INSN will always branch if CONDITION is true. */ static int condition_dominates_p (rtx condition, rtx insn) { rtx other_condition = get_branch_condition (insn, JUMP_LABEL (insn)); enum rtx_code code = GET_CODE (condition); enum rtx_code other_code; if (rtx_equal_p (condition, other_condition) || other_condition == const_true_rtx) return 1; else if (condition == const_true_rtx || other_condition == 0) return 0; other_code = GET_CODE (other_condition); if (GET_RTX_LENGTH (code) != 2 || GET_RTX_LENGTH (other_code) != 2 || ! rtx_equal_p (XEXP (condition, 0), XEXP (other_condition, 0)) || ! rtx_equal_p (XEXP (condition, 1), XEXP (other_condition, 1))) return 0; return comparison_dominates_p (code, other_code); } /* Return nonzero if redirecting JUMP to NEWLABEL does not invalidate any insns already in the delay slot of JUMP. */ static int redirect_with_delay_slots_safe_p (rtx jump, rtx newlabel, rtx seq) { int flags, i; rtx pat = PATTERN (seq); /* Make sure all the delay slots of this jump would still be valid after threading the jump. If they are still valid, then return nonzero. */ flags = get_jump_flags (jump, newlabel); for (i = 1; i < XVECLEN (pat, 0); i++) if (! ( #ifdef ANNUL_IFFALSE_SLOTS (INSN_ANNULLED_BRANCH_P (jump) && INSN_FROM_TARGET_P (XVECEXP (pat, 0, i))) ? eligible_for_annul_false (jump, i - 1, XVECEXP (pat, 0, i), flags) : #endif #ifdef ANNUL_IFTRUE_SLOTS (INSN_ANNULLED_BRANCH_P (jump) && ! INSN_FROM_TARGET_P (XVECEXP (pat, 0, i))) ? eligible_for_annul_true (jump, i - 1, XVECEXP (pat, 0, i), flags) : #endif eligible_for_delay (jump, i - 1, XVECEXP (pat, 0, i), flags))) break; return (i == XVECLEN (pat, 0)); } /* Return nonzero if redirecting JUMP to NEWLABEL does not invalidate any insns we wish to place in the delay slot of JUMP. */ static int redirect_with_delay_list_safe_p (rtx jump, rtx newlabel, rtx delay_list) { int flags, i; rtx li; /* Make sure all the insns in DELAY_LIST would still be valid after threading the jump. If they are still valid, then return nonzero. */ flags = get_jump_flags (jump, newlabel); for (li = delay_list, i = 0; li; li = XEXP (li, 1), i++) if (! ( #ifdef ANNUL_IFFALSE_SLOTS (INSN_ANNULLED_BRANCH_P (jump) && INSN_FROM_TARGET_P (XEXP (li, 0))) ? eligible_for_annul_false (jump, i, XEXP (li, 0), flags) : #endif #ifdef ANNUL_IFTRUE_SLOTS (INSN_ANNULLED_BRANCH_P (jump) && ! INSN_FROM_TARGET_P (XEXP (li, 0))) ? eligible_for_annul_true (jump, i, XEXP (li, 0), flags) : #endif eligible_for_delay (jump, i, XEXP (li, 0), flags))) break; return (li == NULL); } /* DELAY_LIST is a list of insns that have already been placed into delay slots. See if all of them have the same annulling status as ANNUL_TRUE_P. If not, return 0; otherwise return 1. */ static int check_annul_list_true_false (int annul_true_p, rtx delay_list) { rtx temp; if (delay_list) { for (temp = delay_list; temp; temp = XEXP (temp, 1)) { rtx trial = XEXP (temp, 0); if ((annul_true_p && INSN_FROM_TARGET_P (trial)) || (!annul_true_p && !INSN_FROM_TARGET_P (trial))) return 0; } } return 1; } /* INSN branches to an insn whose pattern SEQ is a SEQUENCE. Given that the condition tested by INSN is CONDITION and the resources shown in OTHER_NEEDED are needed after INSN, see whether INSN can take all the insns from SEQ's delay list, in addition to whatever insns it may execute (in DELAY_LIST). SETS and NEEDED are denote resources already set and needed while searching for delay slot insns. Return the concatenated delay list if possible, otherwise, return 0. SLOTS_TO_FILL is the total number of slots required by INSN, and PSLOTS_FILLED points to the number filled so far (also the number of insns in DELAY_LIST). It is updated with the number that have been filled from the SEQUENCE, if any. PANNUL_P points to a nonzero value if we already know that we need to annul INSN. If this routine determines that annulling is needed, it may set that value nonzero. PNEW_THREAD points to a location that is to receive the place at which execution should continue. */ static rtx steal_delay_list_from_target (rtx insn, rtx condition, rtx seq, rtx delay_list, struct resources *sets, struct resources *needed, struct resources *other_needed, int slots_to_fill, int *pslots_filled, int *pannul_p, rtx *pnew_thread) { rtx temp; int slots_remaining = slots_to_fill - *pslots_filled; int total_slots_filled = *pslots_filled; rtx new_delay_list = 0; int must_annul = *pannul_p; int used_annul = 0; int i; struct resources cc_set; /* We can't do anything if there are more delay slots in SEQ than we can handle, or if we don't know that it will be a taken branch. We know that it will be a taken branch if it is either an unconditional branch or a conditional branch with a stricter branch condition. Also, exit if the branch has more than one set, since then it is computing other results that can't be ignored, e.g. the HPPA mov&branch instruction. ??? It may be possible to move other sets into INSN in addition to moving the instructions in the delay slots. We can not steal the delay list if one of the instructions in the current delay_list modifies the condition codes and the jump in the sequence is a conditional jump. We can not do this because we can not change the direction of the jump because the condition codes will effect the direction of the jump in the sequence. */ CLEAR_RESOURCE (&cc_set); for (temp = delay_list; temp; temp = XEXP (temp, 1)) { rtx trial = XEXP (temp, 0); mark_set_resources (trial, &cc_set, 0, MARK_SRC_DEST_CALL); if (insn_references_resource_p (XVECEXP (seq , 0, 0), &cc_set, 0)) return delay_list; } if (XVECLEN (seq, 0) - 1 > slots_remaining || ! condition_dominates_p (condition, XVECEXP (seq, 0, 0)) || ! single_set (XVECEXP (seq, 0, 0))) return delay_list; #ifdef MD_CAN_REDIRECT_BRANCH /* On some targets, branches with delay slots can have a limited displacement. Give the back end a chance to tell us we can't do this. */ if (! MD_CAN_REDIRECT_BRANCH (insn, XVECEXP (seq, 0, 0))) return delay_list; #endif for (i = 1; i < XVECLEN (seq, 0); i++) { rtx trial = XVECEXP (seq, 0, i); int flags; if (insn_references_resource_p (trial, sets, 0) || insn_sets_resource_p (trial, needed, 0) || insn_sets_resource_p (trial, sets, 0) #ifdef HAVE_cc0 /* If TRIAL sets CC0, we can't copy it, so we can't steal this delay list. */ || find_reg_note (trial, REG_CC_USER, NULL_RTX) #endif /* If TRIAL is from the fallthrough code of an annulled branch insn in SEQ, we cannot use it. */ || (INSN_ANNULLED_BRANCH_P (XVECEXP (seq, 0, 0)) && ! INSN_FROM_TARGET_P (trial))) return delay_list; /* If this insn was already done (usually in a previous delay slot), pretend we put it in our delay slot. */ if (redundant_insn (trial, insn, new_delay_list)) continue; /* We will end up re-vectoring this branch, so compute flags based on jumping to the new label. */ flags = get_jump_flags (insn, JUMP_LABEL (XVECEXP (seq, 0, 0))); if (! must_annul && ((condition == const_true_rtx || (! insn_sets_resource_p (trial, other_needed, 0) && ! may_trap_p (PATTERN (trial))))) ? eligible_for_delay (insn, total_slots_filled, trial, flags) : (must_annul || (delay_list == NULL && new_delay_list == NULL)) && (must_annul = 1, check_annul_list_true_false (0, delay_list) && check_annul_list_true_false (0, new_delay_list) && eligible_for_annul_false (insn, total_slots_filled, trial, flags))) { if (must_annul) used_annul = 1; temp = copy_rtx (trial); INSN_FROM_TARGET_P (temp) = 1; new_delay_list = add_to_delay_list (temp, new_delay_list); total_slots_filled++; if (--slots_remaining == 0) break; } else return delay_list; } /* Show the place to which we will be branching. */ *pnew_thread = next_active_insn (JUMP_LABEL (XVECEXP (seq, 0, 0))); /* Add any new insns to the delay list and update the count of the number of slots filled. */ *pslots_filled = total_slots_filled; if (used_annul) *pannul_p = 1; if (delay_list == 0) return new_delay_list; for (temp = new_delay_list; temp; temp = XEXP (temp, 1)) delay_list = add_to_delay_list (XEXP (temp, 0), delay_list); return delay_list; } /* Similar to steal_delay_list_from_target except that SEQ is on the fallthrough path of INSN. Here we only do something if the delay insn of SEQ is an unconditional branch. In that case we steal its delay slot for INSN since unconditional branches are much easier to fill. */ static rtx steal_delay_list_from_fallthrough (rtx insn, rtx condition, rtx seq, rtx delay_list, struct resources *sets, struct resources *needed, struct resources *other_needed, int slots_to_fill, int *pslots_filled, int *pannul_p) { int i; int flags; int must_annul = *pannul_p; int used_annul = 0; flags = get_jump_flags (insn, JUMP_LABEL (insn)); /* We can't do anything if SEQ's delay insn isn't an unconditional branch. */ if (! simplejump_p (XVECEXP (seq, 0, 0)) && GET_CODE (PATTERN (XVECEXP (seq, 0, 0))) != RETURN) return delay_list; for (i = 1; i < XVECLEN (seq, 0); i++) { rtx trial = XVECEXP (seq, 0, i); /* If TRIAL sets CC0, stealing it will move it too far from the use of CC0. */ if (insn_references_resource_p (trial, sets, 0) || insn_sets_resource_p (trial, needed, 0) || insn_sets_resource_p (trial, sets, 0) #ifdef HAVE_cc0 || sets_cc0_p (PATTERN (trial)) #endif ) break; /* If this insn was already done, we don't need it. */ if (redundant_insn (trial, insn, delay_list)) { delete_from_delay_slot (trial); continue; } if (! must_annul && ((condition == const_true_rtx || (! insn_sets_resource_p (trial, other_needed, 0) && ! may_trap_p (PATTERN (trial))))) ? eligible_for_delay (insn, *pslots_filled, trial, flags) : (must_annul || delay_list == NULL) && (must_annul = 1, check_annul_list_true_false (1, delay_list) && eligible_for_annul_true (insn, *pslots_filled, trial, flags))) { if (must_annul) used_annul = 1; delete_from_delay_slot (trial); delay_list = add_to_delay_list (trial, delay_list); if (++(*pslots_filled) == slots_to_fill) break; } else break; } if (used_annul) *pannul_p = 1; return delay_list; } /* Try merging insns starting at THREAD which match exactly the insns in INSN's delay list. If all insns were matched and the insn was previously annulling, the annul bit will be cleared. For each insn that is merged, if the branch is or will be non-annulling, we delete the merged insn. */ static void try_merge_delay_insns (rtx insn, rtx thread) { rtx trial, next_trial; rtx delay_insn = XVECEXP (PATTERN (insn), 0, 0); int annul_p = INSN_ANNULLED_BRANCH_P (delay_insn); int slot_number = 1; int num_slots = XVECLEN (PATTERN (insn), 0); rtx next_to_match = XVECEXP (PATTERN (insn), 0, slot_number); struct resources set, needed; rtx merged_insns = 0; int i; int flags; flags = get_jump_flags (delay_insn, JUMP_LABEL (delay_insn)); CLEAR_RESOURCE (&needed); CLEAR_RESOURCE (&set); /* If this is not an annulling branch, take into account anything needed in INSN's delay slot. This prevents two increments from being incorrectly folded into one. If we are annulling, this would be the correct thing to do. (The alternative, looking at things set in NEXT_TO_MATCH will essentially disable this optimization. This method is somewhat of a kludge, but I don't see a better way.) */ if (! annul_p) for (i = 1 ; i < num_slots; i++) if (XVECEXP (PATTERN (insn), 0, i)) mark_referenced_resources (XVECEXP (PATTERN (insn), 0, i), &needed, 1); for (trial = thread; !stop_search_p (trial, 1); trial = next_trial) { rtx pat = PATTERN (trial); rtx oldtrial = trial; next_trial = next_nonnote_insn (trial); /* TRIAL must be a CALL_INSN or INSN. Skip USE and CLOBBER. */ if (GET_CODE (trial) == INSN && (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)) continue; if (GET_CODE (next_to_match) == GET_CODE (trial) #ifdef HAVE_cc0 /* We can't share an insn that sets cc0. */ && ! sets_cc0_p (pat) #endif && ! insn_references_resource_p (trial, &set, 1) && ! insn_sets_resource_p (trial, &set, 1) && ! insn_sets_resource_p (trial, &needed, 1) && (trial = try_split (pat, trial, 0)) != 0 /* Update next_trial, in case try_split succeeded. */ && (next_trial = next_nonnote_insn (trial)) /* Likewise THREAD. */ && (thread = oldtrial == thread ? trial : thread) && rtx_equal_p (PATTERN (next_to_match), PATTERN (trial)) /* Have to test this condition if annul condition is different from (and less restrictive than) non-annulling one. */ && eligible_for_delay (delay_insn, slot_number - 1, trial, flags)) { if (! annul_p) { update_block (trial, thread); if (trial == thread) thread = next_active_insn (thread); delete_related_insns (trial); INSN_FROM_TARGET_P (next_to_match) = 0; } else merged_insns = gen_rtx_INSN_LIST (VOIDmode, trial, merged_insns); if (++slot_number == num_slots) break; next_to_match = XVECEXP (PATTERN (insn), 0, slot_number); } mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL); mark_referenced_resources (trial, &needed, 1); } /* See if we stopped on a filled insn. If we did, try to see if its delay slots match. */ if (slot_number != num_slots && trial && GET_CODE (trial) == INSN && GET_CODE (PATTERN (trial)) == SEQUENCE && ! INSN_ANNULLED_BRANCH_P (XVECEXP (PATTERN (trial), 0, 0))) { rtx pat = PATTERN (trial); rtx filled_insn = XVECEXP (pat, 0, 0); /* Account for resources set/needed by the filled insn. */ mark_set_resources (filled_insn, &set, 0, MARK_SRC_DEST_CALL); mark_referenced_resources (filled_insn, &needed, 1); for (i = 1; i < XVECLEN (pat, 0); i++) { rtx dtrial = XVECEXP (pat, 0, i); if (! insn_references_resource_p (dtrial, &set, 1) && ! insn_sets_resource_p (dtrial, &set, 1) && ! insn_sets_resource_p (dtrial, &needed, 1) #ifdef HAVE_cc0 && ! sets_cc0_p (PATTERN (dtrial)) #endif && rtx_equal_p (PATTERN (next_to_match), PATTERN (dtrial)) && eligible_for_delay (delay_insn, slot_number - 1, dtrial, flags)) { if (! annul_p) { rtx new; update_block (dtrial, thread); new = delete_from_delay_slot (dtrial); if (INSN_DELETED_P (thread)) thread = new; INSN_FROM_TARGET_P (next_to_match) = 0; } else merged_insns = gen_rtx_INSN_LIST (SImode, dtrial, merged_insns); if (++slot_number == num_slots) break; next_to_match = XVECEXP (PATTERN (insn), 0, slot_number); } else { /* Keep track of the set/referenced resources for the delay slots of any trial insns we encounter. */ mark_set_resources (dtrial, &set, 0, MARK_SRC_DEST_CALL); mark_referenced_resources (dtrial, &needed, 1); } } } /* If all insns in the delay slot have been matched and we were previously annulling the branch, we need not any more. In that case delete all the merged insns. Also clear the INSN_FROM_TARGET_P bit of each insn in the delay list so that we know that it isn't only being used at the target. */ if (slot_number == num_slots && annul_p) { for (; merged_insns; merged_insns = XEXP (merged_insns, 1)) { if (GET_MODE (merged_insns) == SImode) { rtx new; update_block (XEXP (merged_insns, 0), thread); new = delete_from_delay_slot (XEXP (merged_insns, 0)); if (INSN_DELETED_P (thread)) thread = new; } else { update_block (XEXP (merged_insns, 0), thread); delete_related_insns (XEXP (merged_insns, 0)); } } INSN_ANNULLED_BRANCH_P (delay_insn) = 0; for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++) INSN_FROM_TARGET_P (XVECEXP (PATTERN (insn), 0, i)) = 0; } } /* See if INSN is redundant with an insn in front of TARGET. Often this is called when INSN is a candidate for a delay slot of TARGET. DELAY_LIST are insns that will be placed in delay slots of TARGET in front of INSN. Often INSN will be redundant with an insn in a delay slot of some previous insn. This happens when we have a series of branches to the same label; in that case the first insn at the target might want to go into each of the delay slots. If we are not careful, this routine can take up a significant fraction of the total compilation time (4%), but only wins rarely. Hence we speed this routine up by making two passes. The first pass goes back until it hits a label and sees if it finds an insn with an identical pattern. Only in this (relatively rare) event does it check for data conflicts. We do not split insns we encounter. This could cause us not to find a redundant insn, but the cost of splitting seems greater than the possible gain in rare cases. */ static rtx redundant_insn (rtx insn, rtx target, rtx delay_list) { rtx target_main = target; rtx ipat = PATTERN (insn); rtx trial, pat; struct resources needed, set; int i; unsigned insns_to_search; /* If INSN has any REG_UNUSED notes, it can't match anything since we are allowed to not actually assign to such a register. */ if (find_reg_note (insn, REG_UNUSED, NULL_RTX) != 0) return 0; /* Scan backwards looking for a match. */ for (trial = PREV_INSN (target), insns_to_search = MAX_DELAY_SLOT_INSN_SEARCH; trial && insns_to_search > 0; trial = PREV_INSN (trial), --insns_to_search) { if (GET_CODE (trial) == CODE_LABEL) return 0; if (! INSN_P (trial)) continue; pat = PATTERN (trial); if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER) continue; if (GET_CODE (pat) == SEQUENCE) { /* Stop for a CALL and its delay slots because it is difficult to track its resource needs correctly. */ if (GET_CODE (XVECEXP (pat, 0, 0)) == CALL_INSN) return 0; /* Stop for an INSN or JUMP_INSN with delayed effects and its delay slots because it is difficult to track its resource needs correctly. */ #ifdef INSN_SETS_ARE_DELAYED if (INSN_SETS_ARE_DELAYED (XVECEXP (pat, 0, 0))) return 0; #endif #ifdef INSN_REFERENCES_ARE_DELAYED if (INSN_REFERENCES_ARE_DELAYED (XVECEXP (pat, 0, 0))) return 0; #endif /* See if any of the insns in the delay slot match, updating resource requirements as we go. */ for (i = XVECLEN (pat, 0) - 1; i > 0; i--) if (GET_CODE (XVECEXP (pat, 0, i)) == GET_CODE (insn) && rtx_equal_p (PATTERN (XVECEXP (pat, 0, i)), ipat) && ! find_reg_note (XVECEXP (pat, 0, i), REG_UNUSED, NULL_RTX)) break; /* If found a match, exit this loop early. */ if (i > 0) break; } else if (GET_CODE (trial) == GET_CODE (insn) && rtx_equal_p (pat, ipat) && ! find_reg_note (trial, REG_UNUSED, NULL_RTX)) break; } /* If we didn't find an insn that matches, return 0. */ if (trial == 0) return 0; /* See what resources this insn sets and needs. If they overlap, or if this insn references CC0, it can't be redundant. */ CLEAR_RESOURCE (&needed); CLEAR_RESOURCE (&set); mark_set_resources (insn, &set, 0, MARK_SRC_DEST_CALL); mark_referenced_resources (insn, &needed, 1); /* If TARGET is a SEQUENCE, get the main insn. */ if (GET_CODE (target) == INSN && GET_CODE (PATTERN (target)) == SEQUENCE) target_main = XVECEXP (PATTERN (target), 0, 0); if (resource_conflicts_p (&needed, &set) #ifdef HAVE_cc0 || reg_mentioned_p (cc0_rtx, ipat) #endif /* The insn requiring the delay may not set anything needed or set by INSN. */ || insn_sets_resource_p (target_main, &needed, 1) || insn_sets_resource_p (target_main, &set, 1)) return 0; /* Insns we pass may not set either NEEDED or SET, so merge them for simpler tests. */ needed.memory |= set.memory; needed.unch_memory |= set.unch_memory; IOR_HARD_REG_SET (needed.regs, set.regs); /* This insn isn't redundant if it conflicts with an insn that either is or will be in a delay slot of TARGET. */ while (delay_list) { if (insn_sets_resource_p (XEXP (delay_list, 0), &needed, 1)) return 0; delay_list = XEXP (delay_list, 1); } if (GET_CODE (target) == INSN && GET_CODE (PATTERN (target)) == SEQUENCE) for (i = 1; i < XVECLEN (PATTERN (target), 0); i++) if (insn_sets_resource_p (XVECEXP (PATTERN (target), 0, i), &needed, 1)) return 0; /* Scan backwards until we reach a label or an insn that uses something INSN sets or sets something insn uses or sets. */ for (trial = PREV_INSN (target), insns_to_search = MAX_DELAY_SLOT_INSN_SEARCH; trial && GET_CODE (trial) != CODE_LABEL && insns_to_search > 0; trial = PREV_INSN (trial), --insns_to_search) { if (GET_CODE (trial) != INSN && GET_CODE (trial) != CALL_INSN && GET_CODE (trial) != JUMP_INSN) continue; pat = PATTERN (trial); if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER) continue; if (GET_CODE (pat) == SEQUENCE) { /* If this is a CALL_INSN and its delay slots, it is hard to track the resource needs properly, so give up. */ if (GET_CODE (XVECEXP (pat, 0, 0)) == CALL_INSN) return 0; /* If this is an INSN or JUMP_INSN with delayed effects, it is hard to track the resource needs properly, so give up. */ #ifdef INSN_SETS_ARE_DELAYED if (INSN_SETS_ARE_DELAYED (XVECEXP (pat, 0, 0))) return 0; #endif #ifdef INSN_REFERENCES_ARE_DELAYED if (INSN_REFERENCES_ARE_DELAYED (XVECEXP (pat, 0, 0))) return 0; #endif /* See if any of the insns in the delay slot match, updating resource requirements as we go. */ for (i = XVECLEN (pat, 0) - 1; i > 0; i--) { rtx candidate = XVECEXP (pat, 0, i); /* If an insn will be annulled if the branch is false, it isn't considered as a possible duplicate insn. */ if (rtx_equal_p (PATTERN (candidate), ipat) && ! (INSN_ANNULLED_BRANCH_P (XVECEXP (pat, 0, 0)) && INSN_FROM_TARGET_P (candidate))) { /* Show that this insn will be used in the sequel. */ INSN_FROM_TARGET_P (candidate) = 0; return candidate; } /* Unless this is an annulled insn from the target of a branch, we must stop if it sets anything needed or set by INSN. */ if ((! INSN_ANNULLED_BRANCH_P (XVECEXP (pat, 0, 0)) || ! INSN_FROM_TARGET_P (candidate)) && insn_sets_resource_p (candidate, &needed, 1)) return 0; } /* If the insn requiring the delay slot conflicts with INSN, we must stop. */ if (insn_sets_resource_p (XVECEXP (pat, 0, 0), &needed, 1)) return 0; } else { /* See if TRIAL is the same as INSN. */ pat = PATTERN (trial); if (rtx_equal_p (pat, ipat)) return trial; /* Can't go any further if TRIAL conflicts with INSN. */ if (insn_sets_resource_p (trial, &needed, 1)) return 0; } } return 0; } /* Return 1 if THREAD can only be executed in one way. If LABEL is nonzero, it is the target of the branch insn being scanned. If ALLOW_FALLTHROUGH is nonzero, we are allowed to fall into this thread; otherwise, we are not. If LABEL is used more than one or we pass a label other than LABEL before finding an active insn, we do not own this thread. */ static int own_thread_p (rtx thread, rtx label, int allow_fallthrough) { rtx active_insn; rtx insn; /* We don't own the function end. */ if (thread == 0) return 0; /* Get the first active insn, or THREAD, if it is an active insn. */ active_insn = next_active_insn (PREV_INSN (thread)); for (insn = thread; insn != active_insn; insn = NEXT_INSN (insn)) if (GET_CODE (insn) == CODE_LABEL && (insn != label || LABEL_NUSES (insn) != 1)) return 0; if (allow_fallthrough) return 1; /* Ensure that we reach a BARRIER before any insn or label. */ for (insn = prev_nonnote_insn (thread); insn == 0 || GET_CODE (insn) != BARRIER; insn = prev_nonnote_insn (insn)) if (insn == 0 || GET_CODE (insn) == CODE_LABEL || (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) != USE && GET_CODE (PATTERN (insn)) != CLOBBER)) return 0; return 1; } /* Called when INSN is being moved from a location near the target of a jump. We leave a marker of the form (use (INSN)) immediately in front of WHERE for mark_target_live_regs. These markers will be deleted when reorg finishes. We used to try to update the live status of registers if WHERE is at the start of a basic block, but that can't work since we may remove a BARRIER in relax_delay_slots. */ static void update_block (rtx insn, rtx where) { /* Ignore if this was in a delay slot and it came from the target of a branch. */ if (INSN_FROM_TARGET_P (insn)) return; emit_insn_before (gen_rtx_USE (VOIDmode, insn), where); /* INSN might be making a value live in a block where it didn't use to be. So recompute liveness information for this block. */ incr_ticks_for_insn (insn); } /* Similar to REDIRECT_JUMP except that we update the BB_TICKS entry for the basic block containing the jump. */ static int reorg_redirect_jump (rtx jump, rtx nlabel) { incr_ticks_for_insn (jump); return redirect_jump (jump, nlabel, 1); } /* Called when INSN is being moved forward into a delay slot of DELAYED_INSN. We check every instruction between INSN and DELAYED_INSN for REG_DEAD notes that reference values used in INSN. If we find one, then we move the REG_DEAD note to INSN. This is needed to handle the case where an later insn (after INSN) has a REG_DEAD note for a register used by INSN, and this later insn subsequently gets moved before a CODE_LABEL because it is a redundant insn. In this case, mark_target_live_regs may be confused into thinking the register is dead because it sees a REG_DEAD note immediately before a CODE_LABEL. */ static void update_reg_dead_notes (rtx insn, rtx delayed_insn) { rtx p, link, next; for (p = next_nonnote_insn (insn); p != delayed_insn; p = next_nonnote_insn (p)) for (link = REG_NOTES (p); link; link = next) { next = XEXP (link, 1); if (REG_NOTE_KIND (link) != REG_DEAD || !REG_P (XEXP (link, 0))) continue; if (reg_referenced_p (XEXP (link, 0), PATTERN (insn))) { /* Move the REG_DEAD note from P to INSN. */ remove_note (p, link); XEXP (link, 1) = REG_NOTES (insn); REG_NOTES (insn) = link; } } } /* Called when an insn redundant with start_insn is deleted. If there is a REG_DEAD note for the target of start_insn between start_insn and stop_insn, then the REG_DEAD note needs to be deleted since the value no longer dies there. If the REG_DEAD note isn't deleted, then mark_target_live_regs may be confused into thinking the register is dead. */ static void fix_reg_dead_note (rtx start_insn, rtx stop_insn) { rtx p, link, next; for (p = next_nonnote_insn (start_insn); p != stop_insn; p = next_nonnote_insn (p)) for (link = REG_NOTES (p); link; link = next) { next = XEXP (link, 1); if (REG_NOTE_KIND (link) != REG_DEAD || !REG_P (XEXP (link, 0))) continue; if (reg_set_p (XEXP (link, 0), PATTERN (start_insn))) { remove_note (p, link); return; } } } /* Delete any REG_UNUSED notes that exist on INSN but not on REDUNDANT_INSN. This handles the case of udivmodXi4 instructions which optimize their output depending on whether any REG_UNUSED notes are present. we must make sure that INSN calculates as many results as REDUNDANT_INSN does. */ static void update_reg_unused_notes (rtx insn, rtx redundant_insn) { rtx link, next; for (link = REG_NOTES (insn); link; link = next) { next = XEXP (link, 1); if (REG_NOTE_KIND (link) != REG_UNUSED || !REG_P (XEXP (link, 0))) continue; if (! find_regno_note (redundant_insn, REG_UNUSED, REGNO (XEXP (link, 0)))) remove_note (insn, link); } } /* Scan a function looking for insns that need a delay slot and find insns to put into the delay slot. NON_JUMPS_P is nonzero if we are to only try to fill non-jump insns (such as calls). We do these first since we don't want jump insns (that are easier to fill) to get the only insns that could be used for non-jump insns. When it is zero, only try to fill JUMP_INSNs. When slots are filled in this manner, the insns (including the delay_insn) are put together in a SEQUENCE rtx. In this fashion, it is possible to tell whether a delay slot has really been filled or not. `final' knows how to deal with this, by communicating through FINAL_SEQUENCE. */ static void fill_simple_delay_slots (int non_jumps_p) { rtx insn, pat, trial, next_trial; int i; int num_unfilled_slots = unfilled_slots_next - unfilled_slots_base; struct resources needed, set; int slots_to_fill, slots_filled; rtx delay_list; for (i = 0; i < num_unfilled_slots; i++) { int flags; /* Get the next insn to fill. If it has already had any slots assigned, we can't do anything with it. Maybe we'll improve this later. */ insn = unfilled_slots_base[i]; if (insn == 0 || INSN_DELETED_P (insn) || (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE) || (GET_CODE (insn) == JUMP_INSN && non_jumps_p) || (GET_CODE (insn) != JUMP_INSN && ! non_jumps_p)) continue; /* It may have been that this insn used to need delay slots, but now doesn't; ignore in that case. This can happen, for example, on the HP PA RISC, where the number of delay slots depends on what insns are nearby. */ slots_to_fill = num_delay_slots (insn); /* Some machine description have defined instructions to have delay slots only in certain circumstances which may depend on nearby insns (which change due to reorg's actions). For example, the PA port normally has delay slots for unconditional jumps. However, the PA port claims such jumps do not have a delay slot if they are immediate successors of certain CALL_INSNs. This allows the port to favor filling the delay slot of the call with the unconditional jump. */ if (slots_to_fill == 0) continue; /* This insn needs, or can use, some delay slots. SLOTS_TO_FILL says how many. After initialization, first try optimizing call _foo call _foo nop add %o7,.-L1,%o7 b,a L1 nop If this case applies, the delay slot of the call is filled with the unconditional jump. This is done first to avoid having the delay slot of the call filled in the backward scan. Also, since the unconditional jump is likely to also have a delay slot, that insn must exist when it is subsequently scanned. This is tried on each insn with delay slots as some machines have insns which perform calls, but are not represented as CALL_INSNs. */ slots_filled = 0; delay_list = 0; if (GET_CODE (insn) == JUMP_INSN) flags = get_jump_flags (insn, JUMP_LABEL (insn)); else flags = get_jump_flags (insn, NULL_RTX); if ((trial = next_active_insn (insn)) && GET_CODE (trial) == JUMP_INSN && simplejump_p (trial) && eligible_for_delay (insn, slots_filled, trial, flags) && no_labels_between_p (insn, trial) && ! can_throw_internal (trial)) { rtx *tmp; slots_filled++; delay_list = add_to_delay_list (trial, delay_list); /* TRIAL may have had its delay slot filled, then unfilled. When the delay slot is unfilled, TRIAL is placed back on the unfilled slots obstack. Unfortunately, it is placed on the end of the obstack, not in its original location. Therefore, we must search from entry i + 1 to the end of the unfilled slots obstack to try and find TRIAL. */ tmp = &unfilled_slots_base[i + 1]; while (*tmp != trial && tmp != unfilled_slots_next) tmp++; /* Remove the unconditional jump from consideration for delay slot filling and unthread it. */ if (*tmp == trial) *tmp = 0; { rtx next = NEXT_INSN (trial); rtx prev = PREV_INSN (trial); if (prev) NEXT_INSN (prev) = next; if (next) PREV_INSN (next) = prev; } } /* Now, scan backwards from the insn to search for a potential delay-slot candidate. Stop searching when a label or jump is hit. For each candidate, if it is to go into the delay slot (moved forward in execution sequence), it must not need or set any resources that were set by later insns and must not set any resources that are needed for those insns. The delay slot insn itself sets resources unless it is a call (in which case the called routine, not the insn itself, is doing the setting). */ if (slots_filled < slots_to_fill) { CLEAR_RESOURCE (&needed); CLEAR_RESOURCE (&set); mark_set_resources (insn, &set, 0, MARK_SRC_DEST); mark_referenced_resources (insn, &needed, 0); for (trial = prev_nonnote_insn (insn); ! stop_search_p (trial, 1); trial = next_trial) { next_trial = prev_nonnote_insn (trial); /* This must be an INSN or CALL_INSN. */ pat = PATTERN (trial); /* USE and CLOBBER at this level was just for flow; ignore it. */ if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER) continue; /* Check for resource conflict first, to avoid unnecessary splitting. */ if (! insn_references_resource_p (trial, &set, 1) && ! insn_sets_resource_p (trial, &set, 1) && ! insn_sets_resource_p (trial, &needed, 1) #ifdef HAVE_cc0 /* Can't separate set of cc0 from its use. */ && ! (reg_mentioned_p (cc0_rtx, pat) && ! sets_cc0_p (pat)) #endif && ! can_throw_internal (trial)) { trial = try_split (pat, trial, 1); next_trial = prev_nonnote_insn (trial); if (eligible_for_delay (insn, slots_filled, trial, flags)) { /* In this case, we are searching backward, so if we find insns to put on the delay list, we want to put them at the head, rather than the tail, of the list. */ update_reg_dead_notes (trial, insn); delay_list = gen_rtx_INSN_LIST (VOIDmode, trial, delay_list); update_block (trial, trial); delete_related_insns (trial); if (slots_to_fill == ++slots_filled) break; continue; } } mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL); mark_referenced_resources (trial, &needed, 1); } } /* If all needed slots haven't been filled, we come here. */ /* Try to optimize case of jumping around a single insn. */ #if defined(ANNUL_IFFALSE_SLOTS) || defined(ANNUL_IFTRUE_SLOTS) if (slots_filled != slots_to_fill && delay_list == 0 && GET_CODE (insn) == JUMP_INSN && (condjump_p (insn) || condjump_in_parallel_p (insn))) { delay_list = optimize_skip (insn); if (delay_list) slots_filled += 1; } #endif /* Try to get insns from beyond the insn needing the delay slot. These insns can neither set or reference resources set in insns being skipped, cannot set resources in the insn being skipped, and, if this is a CALL_INSN (or a CALL_INSN is passed), cannot trap (because the call might not return). There used to be code which continued past the target label if we saw all uses of the target label. This code did not work, because it failed to account for some instructions which were both annulled and marked as from the target. This can happen as a result of optimize_skip. Since this code was redundant with fill_eager_delay_slots anyways, it was just deleted. */ if (slots_filled != slots_to_fill /* If this instruction could throw an exception which is caught in the same function, then it's not safe to fill the delay slot with an instruction from beyond this point. For example, consider: int i = 2; try { f(); i = 3; } catch (...) {} return i; Even though `i' is a local variable, we must be sure not to put `i = 3' in the delay slot if `f' might throw an exception. Presumably, we should also check to see if we could get back to this function via `setjmp'. */ && ! can_throw_internal (insn) && (GET_CODE (insn) != JUMP_INSN || ((condjump_p (insn) || condjump_in_parallel_p (insn)) && ! simplejump_p (insn) && JUMP_LABEL (insn) != 0))) { /* Invariant: If insn is a JUMP_INSN, the insn's jump label. Otherwise, zero. */ rtx target = 0; int maybe_never = 0; rtx pat, trial_delay; CLEAR_RESOURCE (&needed); CLEAR_RESOURCE (&set); if (GET_CODE (insn) == CALL_INSN) { mark_set_resources (insn, &set, 0, MARK_SRC_DEST_CALL); mark_referenced_resources (insn, &needed, 1); maybe_never = 1; } else { mark_set_resources (insn, &set, 0, MARK_SRC_DEST_CALL); mark_referenced_resources (insn, &needed, 1); if (GET_CODE (insn) == JUMP_INSN) target = JUMP_LABEL (insn); } if (target == 0) for (trial = next_nonnote_insn (insn); trial; trial = next_trial) { next_trial = next_nonnote_insn (trial); if (GET_CODE (trial) == CODE_LABEL || GET_CODE (trial) == BARRIER) break; /* We must have an INSN, JUMP_INSN, or CALL_INSN. */ pat = PATTERN (trial); /* Stand-alone USE and CLOBBER are just for flow. */ if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER) continue; /* If this already has filled delay slots, get the insn needing the delay slots. */ if (GET_CODE (pat) == SEQUENCE) trial_delay = XVECEXP (pat, 0, 0); else trial_delay = trial; /* Stop our search when seeing an unconditional jump. */ if (GET_CODE (trial_delay) == JUMP_INSN) break; /* See if we have a resource problem before we try to split. */ if (GET_CODE (pat) != SEQUENCE && ! insn_references_resource_p (trial, &set, 1) && ! insn_sets_resource_p (trial, &set, 1) && ! insn_sets_resource_p (trial, &needed, 1) #ifdef HAVE_cc0 && ! (reg_mentioned_p (cc0_rtx, pat) && ! sets_cc0_p (pat)) #endif && ! (maybe_never && may_trap_p (pat)) && (trial = try_split (pat, trial, 0)) && eligible_for_delay (insn, slots_filled, trial, flags) && ! can_throw_internal(trial)) { next_trial = next_nonnote_insn (trial); delay_list = add_to_delay_list (trial, delay_list); #ifdef HAVE_cc0 if (reg_mentioned_p (cc0_rtx, pat)) link_cc0_insns (trial); #endif delete_related_insns (trial); if (slots_to_fill == ++slots_filled) break; continue; } mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL); mark_referenced_resources (trial, &needed, 1); /* Ensure we don't put insns between the setting of cc and the comparison by moving a setting of cc into an earlier delay slot since these insns could clobber the condition code. */ set.cc = 1; /* If this is a call or jump, we might not get here. */ if (GET_CODE (trial_delay) == CALL_INSN || GET_CODE (trial_delay) == JUMP_INSN) maybe_never = 1; } /* If there are slots left to fill and our search was stopped by an unconditional branch, try the insn at the branch target. We can redirect the branch if it works. Don't do this if the insn at the branch target is a branch. */ if (slots_to_fill != slots_filled && trial && GET_CODE (trial) == JUMP_INSN && simplejump_p (trial) && (target == 0 || JUMP_LABEL (trial) == target) && (next_trial = next_active_insn (JUMP_LABEL (trial))) != 0 && ! (GET_CODE (next_trial) == INSN && GET_CODE (PATTERN (next_trial)) == SEQUENCE) && GET_CODE (next_trial) != JUMP_INSN && ! insn_references_resource_p (next_trial, &set, 1) && ! insn_sets_resource_p (next_trial, &set, 1) && ! insn_sets_resource_p (next_trial, &needed, 1) #ifdef HAVE_cc0 && ! reg_mentioned_p (cc0_rtx, PATTERN (next_trial)) #endif && ! (maybe_never && may_trap_p (PATTERN (next_trial))) && (next_trial = try_split (PATTERN (next_trial), next_trial, 0)) && eligible_for_delay (insn, slots_filled, next_trial, flags) && ! can_throw_internal (trial)) { /* See comment in relax_delay_slots about necessity of using next_real_insn here. */ rtx new_label = next_real_insn (next_trial); if (new_label != 0) new_label = get_label_before (new_label); else new_label = find_end_label (); delay_list = add_to_delay_list (copy_rtx (next_trial), delay_list); slots_filled++; reorg_redirect_jump (trial, new_label); /* If we merged because we both jumped to the same place, redirect the original insn also. */ if (target) reorg_redirect_jump (insn, new_label); } } /* If this is an unconditional jump, then try to get insns from the target of the jump. */ if (GET_CODE (insn) == JUMP_INSN && simplejump_p (insn) && slots_filled != slots_to_fill) delay_list = fill_slots_from_thread (insn, const_true_rtx, next_active_insn (JUMP_LABEL (insn)), NULL, 1, 1, own_thread_p (JUMP_LABEL (insn), JUMP_LABEL (insn), 0), slots_to_fill, &slots_filled, delay_list); if (delay_list) unfilled_slots_base[i] = emit_delay_sequence (insn, delay_list, slots_filled); if (slots_to_fill == slots_filled) unfilled_slots_base[i] = 0; note_delay_statistics (slots_filled, 0); } #ifdef DELAY_SLOTS_FOR_EPILOGUE /* See if the epilogue needs any delay slots. Try to fill them if so. The only thing we can do is scan backwards from the end of the function. If we did this in a previous pass, it is incorrect to do it again. */ if (current_function_epilogue_delay_list) return; slots_to_fill = DELAY_SLOTS_FOR_EPILOGUE; if (slots_to_fill == 0) return; slots_filled = 0; CLEAR_RESOURCE (&set); /* The frame pointer and stack pointer are needed at the beginning of the epilogue, so instructions setting them can not be put in the epilogue delay slot. However, everything else needed at function end is safe, so we don't want to use end_of_function_needs here. */ CLEAR_RESOURCE (&needed); if (frame_pointer_needed) { SET_HARD_REG_BIT (needed.regs, FRAME_POINTER_REGNUM); #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM SET_HARD_REG_BIT (needed.regs, HARD_FRAME_POINTER_REGNUM); #endif if (! EXIT_IGNORE_STACK || current_function_sp_is_unchanging) SET_HARD_REG_BIT (needed.regs, STACK_POINTER_REGNUM); } else SET_HARD_REG_BIT (needed.regs, STACK_POINTER_REGNUM); #ifdef EPILOGUE_USES for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) { if (EPILOGUE_USES (i)) SET_HARD_REG_BIT (needed.regs, i); } #endif for (trial = get_last_insn (); ! stop_search_p (trial, 1); trial = PREV_INSN (trial)) { if (GET_CODE (trial) == NOTE) continue; pat = PATTERN (trial); if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER) continue; if (! insn_references_resource_p (trial, &set, 1) && ! insn_sets_resource_p (trial, &needed, 1) && ! insn_sets_resource_p (trial, &set, 1) #ifdef HAVE_cc0 /* Don't want to mess with cc0 here. */ && ! reg_mentioned_p (cc0_rtx, pat) #endif && ! can_throw_internal (trial)) { trial = try_split (pat, trial, 1); if (ELIGIBLE_FOR_EPILOGUE_DELAY (trial, slots_filled)) { /* Here as well we are searching backward, so put the insns we find on the head of the list. */ current_function_epilogue_delay_list = gen_rtx_INSN_LIST (VOIDmode, trial, current_function_epilogue_delay_list); mark_end_of_function_resources (trial, 1); update_block (trial, trial); delete_related_insns (trial); /* Clear deleted bit so final.c will output the insn. */ INSN_DELETED_P (trial) = 0; if (slots_to_fill == ++slots_filled) break; continue; } } mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL); mark_referenced_resources (trial, &needed, 1); } note_delay_statistics (slots_filled, 0); #endif } /* Try to find insns to place in delay slots. INSN is the jump needing SLOTS_TO_FILL delay slots. It tests CONDITION or is an unconditional branch if CONDITION is const_true_rtx. *PSLOTS_FILLED is updated with the number of slots that we have filled. THREAD is a flow-of-control, either the insns to be executed if the branch is true or if the branch is false, THREAD_IF_TRUE says which. OPPOSITE_THREAD is the thread in the opposite direction. It is used to see if any potential delay slot insns set things needed there. LIKELY is nonzero if it is extremely likely that the branch will be taken and THREAD_IF_TRUE is set. This is used for the branch at the end of a loop back up to the top. OWN_THREAD and OWN_OPPOSITE_THREAD are true if we are the only user of the thread. I.e., it is the fallthrough code of our jump or the target of the jump when we are the only jump going there. If OWN_THREAD is false, it must be the "true" thread of a jump. In that case, we can only take insns from the head of the thread for our delay slot. We then adjust the jump to point after the insns we have taken. */ static rtx fill_slots_from_thread (rtx insn, rtx condition, rtx thread, rtx opposite_thread, int likely, int thread_if_true, int own_thread, int slots_to_fill, int *pslots_filled, rtx delay_list) { rtx new_thread; struct resources opposite_needed, set, needed; rtx trial; int lose = 0; int must_annul = 0; int flags; /* Validate our arguments. */ if ((condition == const_true_rtx && ! thread_if_true) || (! own_thread && ! thread_if_true)) abort (); flags = get_jump_flags (insn, JUMP_LABEL (insn)); /* If our thread is the end of subroutine, we can't get any delay insns from that. */ if (thread == 0) return delay_list; /* If this is an unconditional branch, nothing is needed at the opposite thread. Otherwise, compute what is needed there. */ if (condition == const_true_rtx) CLEAR_RESOURCE (&opposite_needed); else mark_target_live_regs (get_insns (), opposite_thread, &opposite_needed); /* If the insn at THREAD can be split, do it here to avoid having to update THREAD and NEW_THREAD if it is done in the loop below. Also initialize NEW_THREAD. */ new_thread = thread = try_split (PATTERN (thread), thread, 0); /* Scan insns at THREAD. We are looking for an insn that can be removed from THREAD (it neither sets nor references resources that were set ahead of it and it doesn't set anything needs by the insns ahead of it) and that either can be placed in an annulling insn or aren't needed at OPPOSITE_THREAD. */ CLEAR_RESOURCE (&needed); CLEAR_RESOURCE (&set); /* If we do not own this thread, we must stop as soon as we find something that we can't put in a delay slot, since all we can do is branch into THREAD at a later point. Therefore, labels stop the search if this is not the `true' thread. */ for (trial = thread; ! stop_search_p (trial, ! thread_if_true) && (! lose || own_thread); trial = next_nonnote_insn (trial)) { rtx pat, old_trial; /* If we have passed a label, we no longer own this thread. */ if (GET_CODE (trial) == CODE_LABEL) { own_thread = 0; continue; } pat = PATTERN (trial); if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER) continue; /* If TRIAL conflicts with the insns ahead of it, we lose. Also, don't separate or copy insns that set and use CC0. */ if (! insn_references_resource_p (trial, &set, 1) && ! insn_sets_resource_p (trial, &set, 1) && ! insn_sets_resource_p (trial, &needed, 1) #ifdef HAVE_cc0 && ! (reg_mentioned_p (cc0_rtx, pat) && (! own_thread || ! sets_cc0_p (pat))) #endif && ! can_throw_internal (trial)) { rtx prior_insn; /* If TRIAL is redundant with some insn before INSN, we don't actually need to add it to the delay list; we can merely pretend we did. */ if ((prior_insn = redundant_insn (trial, insn, delay_list))) { fix_reg_dead_note (prior_insn, insn); if (own_thread) { update_block (trial, thread); if (trial == thread) { thread = next_active_insn (thread); if (new_thread == trial) new_thread = thread; } delete_related_insns (trial); } else { update_reg_unused_notes (prior_insn, trial); new_thread = next_active_insn (trial); } continue; } /* There are two ways we can win: If TRIAL doesn't set anything needed at the opposite thread and can't trap, or if it can go into an annulled delay slot. */ if (!must_annul && (condition == const_true_rtx || (! insn_sets_resource_p (trial, &opposite_needed, 1) && ! may_trap_p (pat)))) { old_trial = trial; trial = try_split (pat, trial, 0); if (new_thread == old_trial) new_thread = trial; if (thread == old_trial) thread = trial; pat = PATTERN (trial); if (eligible_for_delay (insn, *pslots_filled, trial, flags)) goto winner; } else if (0 #ifdef ANNUL_IFTRUE_SLOTS || ! thread_if_true #endif #ifdef ANNUL_IFFALSE_SLOTS || thread_if_true #endif ) { old_trial = trial; trial = try_split (pat, trial, 0); if (new_thread == old_trial) new_thread = trial; if (thread == old_trial) thread = trial; pat = PATTERN (trial); if ((must_annul || delay_list == NULL) && (thread_if_true ? check_annul_list_true_false (0, delay_list) && eligible_for_annul_false (insn, *pslots_filled, trial, flags) : check_annul_list_true_false (1, delay_list) && eligible_for_annul_true (insn, *pslots_filled, trial, flags))) { rtx temp; must_annul = 1; winner: #ifdef HAVE_cc0 if (reg_mentioned_p (cc0_rtx, pat)) link_cc0_insns (trial); #endif /* If we own this thread, delete the insn. If this is the destination of a branch, show that a basic block status may have been updated. In any case, mark the new starting point of this thread. */ if (own_thread) { rtx note; update_block (trial, thread); if (trial == thread) { thread = next_active_insn (thread); if (new_thread == trial) new_thread = thread; } /* We are moving this insn, not deleting it. We must temporarily increment the use count on any referenced label lest it be deleted by delete_related_insns. */ note = find_reg_note (trial, REG_LABEL, 0); /* REG_LABEL could be NOTE_INSN_DELETED_LABEL too. */ if (note && GET_CODE (XEXP (note, 0)) == CODE_LABEL) LABEL_NUSES (XEXP (note, 0))++; delete_related_insns (trial); if (note && GET_CODE (XEXP (note, 0)) == CODE_LABEL) LABEL_NUSES (XEXP (note, 0))--; } else new_thread = next_active_insn (trial); temp = own_thread ? trial : copy_rtx (trial); if (thread_if_true) INSN_FROM_TARGET_P (temp) = 1; delay_list = add_to_delay_list (temp, delay_list); if (slots_to_fill == ++(*pslots_filled)) { /* Even though we have filled all the slots, we may be branching to a location that has a redundant insn. Skip any if so. */ while (new_thread && ! own_thread && ! insn_sets_resource_p (new_thread, &set, 1) && ! insn_sets_resource_p (new_thread, &needed, 1) && ! insn_references_resource_p (new_thread, &set, 1) && (prior_insn = redundant_insn (new_thread, insn, delay_list))) { /* We know we do not own the thread, so no need to call update_block and delete_insn. */ fix_reg_dead_note (prior_insn, insn); update_reg_unused_notes (prior_insn, new_thread); new_thread = next_active_insn (new_thread); } break; } continue; } } } /* This insn can't go into a delay slot. */ lose = 1; mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL); mark_referenced_resources (trial, &needed, 1); /* Ensure we don't put insns between the setting of cc and the comparison by moving a setting of cc into an earlier delay slot since these insns could clobber the condition code. */ set.cc = 1; /* If this insn is a register-register copy and the next insn has a use of our destination, change it to use our source. That way, it will become a candidate for our delay slot the next time through this loop. This case occurs commonly in loops that scan a list. We could check for more complex cases than those tested below, but it doesn't seem worth it. It might also be a good idea to try to swap the two insns. That might do better. We can't do this if the next insn modifies our destination, because that would make the replacement into the insn invalid. We also can't do this if it modifies our source, because it might be an earlyclobber operand. This latter test also prevents updating the contents of a PRE_INC. We also can't do this if there's overlap of source and destination. Overlap may happen for larger-than-register-size modes. */ if (GET_CODE (trial) == INSN && GET_CODE (pat) == SET && REG_P (SET_SRC (pat)) && REG_P (SET_DEST (pat)) && !reg_overlap_mentioned_p (SET_DEST (pat), SET_SRC (pat))) { rtx next = next_nonnote_insn (trial); if (next && GET_CODE (next) == INSN && GET_CODE (PATTERN (next)) != USE && ! reg_set_p (SET_DEST (pat), next) && ! reg_set_p (SET_SRC (pat), next) && reg_referenced_p (SET_DEST (pat), PATTERN (next)) && ! modified_in_p (SET_DEST (pat), next)) validate_replace_rtx (SET_DEST (pat), SET_SRC (pat), next); } } /* If we stopped on a branch insn that has delay slots, see if we can steal some of the insns in those slots. */ if (trial && GET_CODE (trial) == INSN && GET_CODE (PATTERN (trial)) == SEQUENCE && GET_CODE (XVECEXP (PATTERN (trial), 0, 0)) == JUMP_INSN) { /* If this is the `true' thread, we will want to follow the jump, so we can only do this if we have taken everything up to here. */ if (thread_if_true && trial == new_thread) { delay_list = steal_delay_list_from_target (insn, condition, PATTERN (trial), delay_list, &set, &needed, &opposite_needed, slots_to_fill, pslots_filled, &must_annul, &new_thread); /* If we owned the thread and are told that it branched elsewhere, make sure we own the thread at the new location. */ if (own_thread && trial != new_thread) own_thread = own_thread_p (new_thread, new_thread, 0); } else if (! thread_if_true) delay_list = steal_delay_list_from_fallthrough (insn, condition, PATTERN (trial), delay_list, &set, &needed, &opposite_needed, slots_to_fill, pslots_filled, &must_annul); } /* If we haven't found anything for this delay slot and it is very likely that the branch will be taken, see if the insn at our target increments or decrements a register with an increment that does not depend on the destination register. If so, try to place the opposite arithmetic insn after the jump insn and put the arithmetic insn in the delay slot. If we can't do this, return. */ if (delay_list == 0 && likely && new_thread && GET_CODE (new_thread) == INSN && GET_CODE (PATTERN (new_thread)) != ASM_INPUT && asm_noperands (PATTERN (new_thread)) < 0) { rtx pat = PATTERN (new_thread); rtx dest; rtx src; trial = new_thread; pat = PATTERN (trial); if (GET_CODE (trial) != INSN || GET_CODE (pat) != SET || ! eligible_for_delay (insn, 0, trial, flags) || can_throw_internal (trial)) return 0; dest = SET_DEST (pat), src = SET_SRC (pat); if ((GET_CODE (src) == PLUS || GET_CODE (src) == MINUS) && rtx_equal_p (XEXP (src, 0), dest) && ! reg_overlap_mentioned_p (dest, XEXP (src, 1)) && ! side_effects_p (pat)) { rtx other = XEXP (src, 1); rtx new_arith; rtx ninsn; /* If this is a constant adjustment, use the same code with the negated constant. Otherwise, reverse the sense of the arithmetic. */ if (GET_CODE (other) == CONST_INT) new_arith = gen_rtx_fmt_ee (GET_CODE (src), GET_MODE (src), dest, negate_rtx (GET_MODE (src), other)); else new_arith = gen_rtx_fmt_ee (GET_CODE (src) == PLUS ? MINUS : PLUS, GET_MODE (src), dest, other); ninsn = emit_insn_after (gen_rtx_SET (VOIDmode, dest, new_arith), insn); if (recog_memoized (ninsn) < 0 || (extract_insn (ninsn), ! constrain_operands (1))) { delete_related_insns (ninsn); return 0; } if (own_thread) { update_block (trial, thread); if (trial == thread) { thread = next_active_insn (thread); if (new_thread == trial) new_thread = thread; } delete_related_insns (trial); } else new_thread = next_active_insn (trial); ninsn = own_thread ? trial : copy_rtx (trial); if (thread_if_true) INSN_FROM_TARGET_P (ninsn) = 1; delay_list = add_to_delay_list (ninsn, NULL_RTX); (*pslots_filled)++; } } if (delay_list && must_annul) INSN_ANNULLED_BRANCH_P (insn) = 1; /* If we are to branch into the middle of this thread, find an appropriate label or make a new one if none, and redirect INSN to it. If we hit the end of the function, use the end-of-function label. */ if (new_thread != thread) { rtx label; if (! thread_if_true) abort (); if (new_thread && GET_CODE (new_thread) == JUMP_INSN && (simplejump_p (new_thread) || GET_CODE (PATTERN (new_thread)) == RETURN) && redirect_with_delay_list_safe_p (insn, JUMP_LABEL (new_thread), delay_list)) new_thread = follow_jumps (JUMP_LABEL (new_thread)); if (new_thread == 0) label = find_end_label (); else if (GET_CODE (new_thread) == CODE_LABEL) label = new_thread; else label = get_label_before (new_thread); reorg_redirect_jump (insn, label); } return delay_list; } /* Make another attempt to find insns to place in delay slots. We previously looked for insns located in front of the delay insn and, for non-jump delay insns, located behind the delay insn. Here only try to schedule jump insns and try to move insns from either the target or the following insns into the delay slot. If annulling is supported, we will be likely to do this. Otherwise, we can do this only if safe. */ static void fill_eager_delay_slots (void) { rtx insn; int i; int num_unfilled_slots = unfilled_slots_next - unfilled_slots_base; for (i = 0; i < num_unfilled_slots; i++) { rtx condition; rtx target_label, insn_at_target, fallthrough_insn; rtx delay_list = 0; int own_target; int own_fallthrough; int prediction, slots_to_fill, slots_filled; insn = unfilled_slots_base[i]; if (insn == 0 || INSN_DELETED_P (insn) || GET_CODE (insn) != JUMP_INSN || ! (condjump_p (insn) || condjump_in_parallel_p (insn))) continue; slots_to_fill = num_delay_slots (insn); /* Some machine description have defined instructions to have delay slots only in certain circumstances which may depend on nearby insns (which change due to reorg's actions). For example, the PA port normally has delay slots for unconditional jumps. However, the PA port claims such jumps do not have a delay slot if they are immediate successors of certain CALL_INSNs. This allows the port to favor filling the delay slot of the call with the unconditional jump. */ if (slots_to_fill == 0) continue; slots_filled = 0; target_label = JUMP_LABEL (insn); condition = get_branch_condition (insn, target_label); if (condition == 0) continue; /* Get the next active fallthrough and target insns and see if we own them. Then see whether the branch is likely true. We don't need to do a lot of this for unconditional branches. */ insn_at_target = next_active_insn (target_label); own_target = own_thread_p (target_label, target_label, 0); if (condition == const_true_rtx) { own_fallthrough = 0; fallthrough_insn = 0; prediction = 2; } else { fallthrough_insn = next_active_insn (insn); own_fallthrough = own_thread_p (NEXT_INSN (insn), NULL_RTX, 1); prediction = mostly_true_jump (insn, condition); } /* If this insn is expected to branch, first try to get insns from our target, then our fallthrough insns. If it is not expected to branch, try the other order. */ if (prediction > 0) { delay_list = fill_slots_from_thread (insn, condition, insn_at_target, fallthrough_insn, prediction == 2, 1, own_target, slots_to_fill, &slots_filled, delay_list); if (delay_list == 0 && own_fallthrough) { /* Even though we didn't find anything for delay slots, we might have found a redundant insn which we deleted from the thread that was filled. So we have to recompute the next insn at the target. */ target_label = JUMP_LABEL (insn); insn_at_target = next_active_insn (target_label); delay_list = fill_slots_from_thread (insn, condition, fallthrough_insn, insn_at_target, 0, 0, own_fallthrough, slots_to_fill, &slots_filled, delay_list); } } else { if (own_fallthrough) delay_list = fill_slots_from_thread (insn, condition, fallthrough_insn, insn_at_target, 0, 0, own_fallthrough, slots_to_fill, &slots_filled, delay_list); if (delay_list == 0) delay_list = fill_slots_from_thread (insn, condition, insn_at_target, next_active_insn (insn), 0, 1, own_target, slots_to_fill, &slots_filled, delay_list); } if (delay_list) unfilled_slots_base[i] = emit_delay_sequence (insn, delay_list, slots_filled); if (slots_to_fill == slots_filled) unfilled_slots_base[i] = 0; note_delay_statistics (slots_filled, 1); } } /* Once we have tried two ways to fill a delay slot, make a pass over the code to try to improve the results and to do such things as more jump threading. */ static void relax_delay_slots (rtx first) { rtx insn, next, pat; rtx trial, delay_insn, target_label; /* Look at every JUMP_INSN and see if we can improve it. */ for (insn = first; insn; insn = next) { rtx other; next = next_active_insn (insn); /* If this is a jump insn, see if it now jumps to a jump, jumps to the next insn, or jumps to a label that is not the last of a group of consecutive labels. */ if (GET_CODE (insn) == JUMP_INSN && (condjump_p (insn) || condjump_in_parallel_p (insn)) && (target_label = JUMP_LABEL (insn)) != 0) { target_label = skip_consecutive_labels (follow_jumps (target_label)); if (target_label == 0) target_label = find_end_label (); if (next_active_insn (target_label) == next && ! condjump_in_parallel_p (insn)) { delete_jump (insn); continue; } if (target_label != JUMP_LABEL (insn)) reorg_redirect_jump (insn, target_label); /* See if this jump branches around an unconditional jump. If so, invert this jump and point it to the target of the second jump. */ if (next && GET_CODE (next) == JUMP_INSN && (simplejump_p (next) || GET_CODE (PATTERN (next)) == RETURN) && next_active_insn (target_label) == next_active_insn (next) && no_labels_between_p (insn, next)) { rtx label = JUMP_LABEL (next); /* Be careful how we do this to avoid deleting code or labels that are momentarily dead. See similar optimization in jump.c. We also need to ensure we properly handle the case when invert_jump fails. */ ++LABEL_NUSES (target_label); if (label) ++LABEL_NUSES (label); if (invert_jump (insn, label, 1)) { delete_related_insns (next); next = insn; } if (label) --LABEL_NUSES (label); if (--LABEL_NUSES (target_label) == 0) delete_related_insns (target_label); continue; } } /* If this is an unconditional jump and the previous insn is a conditional jump, try reversing the condition of the previous insn and swapping our targets. The next pass might be able to fill the slots. Don't do this if we expect the conditional branch to be true, because we would then be making the more common case longer. */ if (GET_CODE (insn) == JUMP_INSN && (simplejump_p (insn) || GET_CODE (PATTERN (insn)) == RETURN) && (other = prev_active_insn (insn)) != 0 && (condjump_p (other) || condjump_in_parallel_p (other)) && no_labels_between_p (other, insn) && 0 > mostly_true_jump (other, get_branch_condition (other, JUMP_LABEL (other)))) { rtx other_target = JUMP_LABEL (other); target_label = JUMP_LABEL (insn); if (invert_jump (other, target_label, 0)) reorg_redirect_jump (insn, other_target); } /* Now look only at cases where we have filled a delay slot. */ if (GET_CODE (insn) != INSN || GET_CODE (PATTERN (insn)) != SEQUENCE) continue; pat = PATTERN (insn); delay_insn = XVECEXP (pat, 0, 0); /* See if the first insn in the delay slot is redundant with some previous insn. Remove it from the delay slot if so; then set up to reprocess this insn. */ if (redundant_insn (XVECEXP (pat, 0, 1), delay_insn, 0)) { delete_from_delay_slot (XVECEXP (pat, 0, 1)); next = prev_active_insn (next); continue; } /* See if we have a RETURN insn with a filled delay slot followed by a RETURN insn with an unfilled a delay slot. If so, we can delete the first RETURN (but not its delay insn). This gives the same effect in fewer instructions. Only do so if optimizing for size since this results in slower, but smaller code. */ if (optimize_size && GET_CODE (PATTERN (delay_insn)) == RETURN && next && GET_CODE (next) == JUMP_INSN && GET_CODE (PATTERN (next)) == RETURN) { rtx after; int i; /* Delete the RETURN and just execute the delay list insns. We do this by deleting the INSN containing the SEQUENCE, then re-emitting the insns separately, and then deleting the RETURN. This allows the count of the jump target to be properly decremented. */ /* Clear the from target bit, since these insns are no longer in delay slots. */ for (i = 0; i < XVECLEN (pat, 0); i++) INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)) = 0; trial = PREV_INSN (insn); delete_related_insns (insn); if (GET_CODE (pat) != SEQUENCE) abort (); after = trial; for (i = 0; i < XVECLEN (pat, 0); i++) { rtx this_insn = XVECEXP (pat, 0, i); add_insn_after (this_insn, after); after = this_insn; } delete_scheduled_jump (delay_insn); continue; } /* Now look only at the cases where we have a filled JUMP_INSN. */ if (GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) != JUMP_INSN || ! (condjump_p (XVECEXP (PATTERN (insn), 0, 0)) || condjump_in_parallel_p (XVECEXP (PATTERN (insn), 0, 0)))) continue; target_label = JUMP_LABEL (delay_insn); if (target_label) { /* If this jump goes to another unconditional jump, thread it, but don't convert a jump into a RETURN here. */ trial = skip_consecutive_labels (follow_jumps (target_label)); if (trial == 0) trial = find_end_label (); if (trial != target_label && redirect_with_delay_slots_safe_p (delay_insn, trial, insn)) { reorg_redirect_jump (delay_insn, trial); target_label = trial; } /* If the first insn at TARGET_LABEL is redundant with a previous insn, redirect the jump to the following insn process again. */ trial = next_active_insn (target_label); if (trial && GET_CODE (PATTERN (trial)) != SEQUENCE && redundant_insn (trial, insn, 0) && ! can_throw_internal (trial)) { rtx tmp; /* Figure out where to emit the special USE insn so we don't later incorrectly compute register live/death info. */ tmp = next_active_insn (trial); if (tmp == 0) tmp = find_end_label (); /* Insert the special USE insn and update dataflow info. */ update_block (trial, tmp); /* Now emit a label before the special USE insn, and redirect our jump to the new label. */ target_label = get_label_before (PREV_INSN (tmp)); reorg_redirect_jump (delay_insn, target_label); next = insn; continue; } /* Similarly, if it is an unconditional jump with one insn in its delay list and that insn is redundant, thread the jump. */ if (trial && GET_CODE (PATTERN (trial)) == SEQUENCE && XVECLEN (PATTERN (trial), 0) == 2 && GET_CODE (XVECEXP (PATTERN (trial), 0, 0)) == JUMP_INSN && (simplejump_p (XVECEXP (PATTERN (trial), 0, 0)) || GET_CODE (PATTERN (XVECEXP (PATTERN (trial), 0, 0))) == RETURN) && redundant_insn (XVECEXP (PATTERN (trial), 0, 1), insn, 0)) { target_label = JUMP_LABEL (XVECEXP (PATTERN (trial), 0, 0)); if (target_label == 0) { target_label = find_end_label (); /* The following condition may be true if TRIAL contains the unique RETURN. In this case, threading would be a nop and we would enter an infinite loop if we did it. */ if (next_active_insn (target_label) == trial) target_label = 0; } if (target_label && redirect_with_delay_slots_safe_p (delay_insn, target_label, insn)) { reorg_redirect_jump (delay_insn, target_label); next = insn; continue; } } } if (! INSN_ANNULLED_BRANCH_P (delay_insn) && prev_active_insn (target_label) == insn && ! condjump_in_parallel_p (delay_insn) #ifdef HAVE_cc0 /* If the last insn in the delay slot sets CC0 for some insn, various code assumes that it is in a delay slot. We could put it back where it belonged and delete the register notes, but it doesn't seem worthwhile in this uncommon case. */ && ! find_reg_note (XVECEXP (pat, 0, XVECLEN (pat, 0) - 1), REG_CC_USER, NULL_RTX) #endif ) { rtx after; int i; /* All this insn does is execute its delay list and jump to the following insn. So delete the jump and just execute the delay list insns. We do this by deleting the INSN containing the SEQUENCE, then re-emitting the insns separately, and then deleting the jump. This allows the count of the jump target to be properly decremented. */ /* Clear the from target bit, since these insns are no longer in delay slots. */ for (i = 0; i < XVECLEN (pat, 0); i++) INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)) = 0; trial = PREV_INSN (insn); delete_related_insns (insn); if (GET_CODE (pat) != SEQUENCE) abort (); after = trial; for (i = 0; i < XVECLEN (pat, 0); i++) { rtx this_insn = XVECEXP (pat, 0, i); add_insn_after (this_insn, after); after = this_insn; } delete_scheduled_jump (delay_insn); continue; } /* See if this is an unconditional jump around a single insn which is identical to the one in its delay slot. In this case, we can just delete the branch and the insn in its delay slot. */ if (next && GET_CODE (next) == INSN && prev_label (next_active_insn (next)) == target_label && simplejump_p (insn) && XVECLEN (pat, 0) == 2 && rtx_equal_p (PATTERN (next), PATTERN (XVECEXP (pat, 0, 1)))) { delete_related_insns (insn); continue; } /* See if this jump (with its delay slots) branches around another jump (without delay slots). If so, invert this jump and point it to the target of the second jump. We cannot do this for annulled jumps, though. Again, don't convert a jump to a RETURN here. */ if (! INSN_ANNULLED_BRANCH_P (delay_insn) && next && GET_CODE (next) == JUMP_INSN && (simplejump_p (next) || GET_CODE (PATTERN (next)) == RETURN) && next_active_insn (target_label) == next_active_insn (next) && no_labels_between_p (insn, next)) { rtx label = JUMP_LABEL (next); rtx old_label = JUMP_LABEL (delay_insn); if (label == 0) label = find_end_label (); /* find_end_label can generate a new label. Check this first. */ if (no_labels_between_p (insn, next) && redirect_with_delay_slots_safe_p (delay_insn, label, insn)) { /* Be careful how we do this to avoid deleting code or labels that are momentarily dead. See similar optimization in jump.c */ if (old_label) ++LABEL_NUSES (old_label); if (invert_jump (delay_insn, label, 1)) { int i; /* Must update the INSN_FROM_TARGET_P bits now that the branch is reversed, so that mark_target_live_regs will handle the delay slot insn correctly. */ for (i = 1; i < XVECLEN (PATTERN (insn), 0); i++) { rtx slot = XVECEXP (PATTERN (insn), 0, i); INSN_FROM_TARGET_P (slot) = ! INSN_FROM_TARGET_P (slot); } delete_related_insns (next); next = insn; } if (old_label && --LABEL_NUSES (old_label) == 0) delete_related_insns (old_label); continue; } } /* If we own the thread opposite the way this insn branches, see if we can merge its delay slots with following insns. */ if (INSN_FROM_TARGET_P (XVECEXP (pat, 0, 1)) && own_thread_p (NEXT_INSN (insn), 0, 1)) try_merge_delay_insns (insn, next); else if (! INSN_FROM_TARGET_P (XVECEXP (pat, 0, 1)) && own_thread_p (target_label, target_label, 0)) try_merge_delay_insns (insn, next_active_insn (target_label)); /* If we get here, we haven't deleted INSN. But we may have deleted NEXT, so recompute it. */ next = next_active_insn (insn); } } #ifdef HAVE_return /* Look for filled jumps to the end of function label. We can try to convert them into RETURN insns if the insns in the delay slot are valid for the RETURN as well. */ static void make_return_insns (rtx first) { rtx insn, jump_insn, pat; rtx real_return_label = end_of_function_label; int slots, i; #ifdef DELAY_SLOTS_FOR_EPILOGUE /* If a previous pass filled delay slots in the epilogue, things get a bit more complicated, as those filler insns would generally (without data flow analysis) have to be executed after any existing branch delay slot filler insns. It is also unknown whether such a transformation would actually be profitable. Note that the existing code only cares for branches with (some) filled delay slots. */ if (current_function_epilogue_delay_list != NULL) return; #endif /* See if there is a RETURN insn in the function other than the one we made for END_OF_FUNCTION_LABEL. If so, set up anything we can't change into a RETURN to jump to it. */ for (insn = first; insn; insn = NEXT_INSN (insn)) if (GET_CODE (insn) == JUMP_INSN && GET_CODE (PATTERN (insn)) == RETURN) { real_return_label = get_label_before (insn); break; } /* Show an extra usage of REAL_RETURN_LABEL so it won't go away if it was equal to END_OF_FUNCTION_LABEL. */ LABEL_NUSES (real_return_label)++; /* Clear the list of insns to fill so we can use it. */ obstack_free (&unfilled_slots_obstack, unfilled_firstobj); for (insn = first; insn; insn = NEXT_INSN (insn)) { int flags; /* Only look at filled JUMP_INSNs that go to the end of function label. */ if (GET_CODE (insn) != INSN || GET_CODE (PATTERN (insn)) != SEQUENCE || GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) != JUMP_INSN || JUMP_LABEL (XVECEXP (PATTERN (insn), 0, 0)) != end_of_function_label) continue; pat = PATTERN (insn); jump_insn = XVECEXP (pat, 0, 0); /* If we can't make the jump into a RETURN, try to redirect it to the best RETURN and go on to the next insn. */ if (! reorg_redirect_jump (jump_insn, NULL_RTX)) { /* Make sure redirecting the jump will not invalidate the delay slot insns. */ if (redirect_with_delay_slots_safe_p (jump_insn, real_return_label, insn)) reorg_redirect_jump (jump_insn, real_return_label); continue; } /* See if this RETURN can accept the insns current in its delay slot. It can if it has more or an equal number of slots and the contents of each is valid. */ flags = get_jump_flags (jump_insn, JUMP_LABEL (jump_insn)); slots = num_delay_slots (jump_insn); if (slots >= XVECLEN (pat, 0) - 1) { for (i = 1; i < XVECLEN (pat, 0); i++) if (! ( #ifdef ANNUL_IFFALSE_SLOTS (INSN_ANNULLED_BRANCH_P (jump_insn) && INSN_FROM_TARGET_P (XVECEXP (pat, 0, i))) ? eligible_for_annul_false (jump_insn, i - 1, XVECEXP (pat, 0, i), flags) : #endif #ifdef ANNUL_IFTRUE_SLOTS (INSN_ANNULLED_BRANCH_P (jump_insn) && ! INSN_FROM_TARGET_P (XVECEXP (pat, 0, i))) ? eligible_for_annul_true (jump_insn, i - 1, XVECEXP (pat, 0, i), flags) : #endif eligible_for_delay (jump_insn, i - 1, XVECEXP (pat, 0, i), flags))) break; } else i = 0; if (i == XVECLEN (pat, 0)) continue; /* We have to do something with this insn. If it is an unconditional RETURN, delete the SEQUENCE and output the individual insns, followed by the RETURN. Then set things up so we try to find insns for its delay slots, if it needs some. */ if (GET_CODE (PATTERN (jump_insn)) == RETURN) { rtx prev = PREV_INSN (insn); delete_related_insns (insn); for (i = 1; i < XVECLEN (pat, 0); i++) prev = emit_insn_after (PATTERN (XVECEXP (pat, 0, i)), prev); insn = emit_jump_insn_after (PATTERN (jump_insn), prev); emit_barrier_after (insn); if (slots) obstack_ptr_grow (&unfilled_slots_obstack, insn); } else /* It is probably more efficient to keep this with its current delay slot as a branch to a RETURN. */ reorg_redirect_jump (jump_insn, real_return_label); } /* Now delete REAL_RETURN_LABEL if we never used it. Then try to fill any new delay slots we have created. */ if (--LABEL_NUSES (real_return_label) == 0) delete_related_insns (real_return_label); fill_simple_delay_slots (1); fill_simple_delay_slots (0); } #endif /* Try to find insns to place in delay slots. */ void dbr_schedule (rtx first, FILE *file) { rtx insn, next, epilogue_insn = 0; int i; #if 0 int old_flag_no_peephole = flag_no_peephole; /* Execute `final' once in prescan mode to delete any insns that won't be used. Don't let final try to do any peephole optimization--it will ruin dataflow information for this pass. */ flag_no_peephole = 1; final (first, 0, NO_DEBUG, 1, 1); flag_no_peephole = old_flag_no_peephole; #endif /* If the current function has no insns other than the prologue and epilogue, then do not try to fill any delay slots. */ if (n_basic_blocks == 0) return; /* Find the highest INSN_UID and allocate and initialize our map from INSN_UID's to position in code. */ for (max_uid = 0, insn = first; insn; insn = NEXT_INSN (insn)) { if (INSN_UID (insn) > max_uid) max_uid = INSN_UID (insn); if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) == NOTE_INSN_EPILOGUE_BEG) epilogue_insn = insn; } uid_to_ruid = xmalloc ((max_uid + 1) * sizeof (int)); for (i = 0, insn = first; insn; i++, insn = NEXT_INSN (insn)) uid_to_ruid[INSN_UID (insn)] = i; /* Initialize the list of insns that need filling. */ if (unfilled_firstobj == 0) { gcc_obstack_init (&unfilled_slots_obstack); unfilled_firstobj = obstack_alloc (&unfilled_slots_obstack, 0); } for (insn = next_active_insn (first); insn; insn = next_active_insn (insn)) { rtx target; INSN_ANNULLED_BRANCH_P (insn) = 0; INSN_FROM_TARGET_P (insn) = 0; /* Skip vector tables. We can't get attributes for them. */ if (GET_CODE (insn) == JUMP_INSN && (GET_CODE (PATTERN (insn)) == ADDR_VEC || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)) continue; if (num_delay_slots (insn) > 0) obstack_ptr_grow (&unfilled_slots_obstack, insn); /* Ensure all jumps go to the last of a set of consecutive labels. */ if (GET_CODE (insn) == JUMP_INSN && (condjump_p (insn) || condjump_in_parallel_p (insn)) && JUMP_LABEL (insn) != 0 && ((target = skip_consecutive_labels (JUMP_LABEL (insn))) != JUMP_LABEL (insn))) redirect_jump (insn, target, 1); } init_resource_info (epilogue_insn); /* Show we haven't computed an end-of-function label yet. */ end_of_function_label = 0; /* Initialize the statistics for this function. */ memset (num_insns_needing_delays, 0, sizeof num_insns_needing_delays); memset (num_filled_delays, 0, sizeof num_filled_delays); /* Now do the delay slot filling. Try everything twice in case earlier changes make more slots fillable. */ for (reorg_pass_number = 0; reorg_pass_number < MAX_REORG_PASSES; reorg_pass_number++) { fill_simple_delay_slots (1); fill_simple_delay_slots (0); fill_eager_delay_slots (); relax_delay_slots (first); } /* Delete any USE insns made by update_block; subsequent passes don't need them or know how to deal with them. */ for (insn = first; insn; insn = next) { next = NEXT_INSN (insn); if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == USE && INSN_P (XEXP (PATTERN (insn), 0))) next = delete_related_insns (insn); } /* If we made an end of function label, indicate that it is now safe to delete it by undoing our prior adjustment to LABEL_NUSES. If it is now unused, delete it. */ if (end_of_function_label && --LABEL_NUSES (end_of_function_label) == 0) delete_related_insns (end_of_function_label); #ifdef HAVE_return if (HAVE_return && end_of_function_label != 0) make_return_insns (first); #endif obstack_free (&unfilled_slots_obstack, unfilled_firstobj); /* It is not clear why the line below is needed, but it does seem to be. */ unfilled_firstobj = obstack_alloc (&unfilled_slots_obstack, 0); if (file) { int i, j, need_comma; int total_delay_slots[MAX_DELAY_HISTOGRAM + 1]; int total_annul_slots[MAX_DELAY_HISTOGRAM + 1]; for (reorg_pass_number = 0; reorg_pass_number < MAX_REORG_PASSES; reorg_pass_number++) { fprintf (file, ";; Reorg pass #%d:\n", reorg_pass_number + 1); for (i = 0; i < NUM_REORG_FUNCTIONS; i++) { need_comma = 0; fprintf (file, ";; Reorg function #%d\n", i); fprintf (file, ";; %d insns needing delay slots\n;; ", num_insns_needing_delays[i][reorg_pass_number]); for (j = 0; j < MAX_DELAY_HISTOGRAM + 1; j++) if (num_filled_delays[i][j][reorg_pass_number]) { if (need_comma) fprintf (file, ", "); need_comma = 1; fprintf (file, "%d got %d delays", num_filled_delays[i][j][reorg_pass_number], j); } fprintf (file, "\n"); } } memset (total_delay_slots, 0, sizeof total_delay_slots); memset (total_annul_slots, 0, sizeof total_annul_slots); for (insn = first; insn; insn = NEXT_INSN (insn)) { if (! INSN_DELETED_P (insn) && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) != USE && GET_CODE (PATTERN (insn)) != CLOBBER) { if (GET_CODE (PATTERN (insn)) == SEQUENCE) { j = XVECLEN (PATTERN (insn), 0) - 1; if (j > MAX_DELAY_HISTOGRAM) j = MAX_DELAY_HISTOGRAM; if (INSN_ANNULLED_BRANCH_P (XVECEXP (PATTERN (insn), 0, 0))) total_annul_slots[j]++; else total_delay_slots[j]++; } else if (num_delay_slots (insn) > 0) total_delay_slots[0]++; } } fprintf (file, ";; Reorg totals: "); need_comma = 0; for (j = 0; j < MAX_DELAY_HISTOGRAM + 1; j++) { if (total_delay_slots[j]) { if (need_comma) fprintf (file, ", "); need_comma = 1; fprintf (file, "%d got %d delays", total_delay_slots[j], j); } } fprintf (file, "\n"); #if defined (ANNUL_IFTRUE_SLOTS) || defined (ANNUL_IFFALSE_SLOTS) fprintf (file, ";; Reorg annuls: "); need_comma = 0; for (j = 0; j < MAX_DELAY_HISTOGRAM + 1; j++) { if (total_annul_slots[j]) { if (need_comma) fprintf (file, ", "); need_comma = 1; fprintf (file, "%d got %d delays", total_annul_slots[j], j); } } fprintf (file, "\n"); #endif fprintf (file, "\n"); } /* For all JUMP insns, fill in branch prediction notes, so that during assembler output a target can set branch prediction bits in the code. We have to do this now, as up until this point the destinations of JUMPS can be moved around and changed, but past right here that cannot happen. */ for (insn = first; insn; insn = NEXT_INSN (insn)) { int pred_flags; if (GET_CODE (insn) == INSN) { rtx pat = PATTERN (insn); if (GET_CODE (pat) == SEQUENCE) insn = XVECEXP (pat, 0, 0); } if (GET_CODE (insn) != JUMP_INSN) continue; pred_flags = get_jump_flags (insn, JUMP_LABEL (insn)); REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_BR_PRED, GEN_INT (pred_flags), REG_NOTES (insn)); } free_resource_info (); free (uid_to_ruid); #ifdef DELAY_SLOTS_FOR_EPILOGUE /* SPARC assembler, for instance, emit warning when debug info is output into the delay slot. */ { rtx link; for (link = current_function_epilogue_delay_list; link; link = XEXP (link, 1)) INSN_LOCATOR (XEXP (link, 0)) = 0; } #endif } #endif /* DELAY_SLOTS */ /* Definitions for computing resource usage of specific insns. Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This structure is used to record liveness information at the targets or fallthrough insns of branches. We will most likely need the information at targets again, so save them in a hash table rather than recomputing them each time. */ struct target_info { int uid; /* INSN_UID of target. */ struct target_info *next; /* Next info for same hash bucket. */ HARD_REG_SET live_regs; /* Registers live at target. */ int block; /* Basic block number containing target. */ int bb_tick; /* Generation count of basic block info. */ }; #define TARGET_HASH_PRIME 257 /* Indicates what resources are required at the beginning of the epilogue. */ static struct resources start_of_epilogue_needs; /* Indicates what resources are required at function end. */ static struct resources end_of_function_needs; /* Define the hash table itself. */ static struct target_info **target_hash_table = NULL; /* For each basic block, we maintain a generation number of its basic block info, which is updated each time we move an insn from the target of a jump. This is the generation number indexed by block number. */ static int *bb_ticks; /* Marks registers possibly live at the current place being scanned by mark_target_live_regs. Also used by update_live_status. */ static HARD_REG_SET current_live_regs; /* Marks registers for which we have seen a REG_DEAD note but no assignment. Also only used by the next two functions. */ static HARD_REG_SET pending_dead_regs; static void update_live_status (rtx, rtx, void *); static int find_basic_block (rtx, int); static rtx next_insn_no_annul (rtx); static rtx find_dead_or_set_registers (rtx, struct resources*, rtx*, int, struct resources, struct resources); /* Utility function called from mark_target_live_regs via note_stores. It deadens any CLOBBERed registers and livens any SET registers. */ static void update_live_status (rtx dest, rtx x, void *data ATTRIBUTE_UNUSED) { int first_regno, last_regno; int i; if (!REG_P (dest) && (GET_CODE (dest) != SUBREG || !REG_P (SUBREG_REG (dest)))) return; if (GET_CODE (dest) == SUBREG) first_regno = subreg_regno (dest); else first_regno = REGNO (dest); last_regno = first_regno + hard_regno_nregs[first_regno][GET_MODE (dest)]; if (GET_CODE (x) == CLOBBER) for (i = first_regno; i < last_regno; i++) CLEAR_HARD_REG_BIT (current_live_regs, i); else for (i = first_regno; i < last_regno; i++) { SET_HARD_REG_BIT (current_live_regs, i); CLEAR_HARD_REG_BIT (pending_dead_regs, i); } } /* Find the number of the basic block with correct live register information that starts closest to INSN. Return -1 if we couldn't find such a basic block or the beginning is more than SEARCH_LIMIT instructions before INSN. Use SEARCH_LIMIT = -1 for an unlimited search. The delay slot filling code destroys the control-flow graph so, instead of finding the basic block containing INSN, we search backwards toward a BARRIER where the live register information is correct. */ static int find_basic_block (rtx insn, int search_limit) { basic_block bb; /* Scan backwards to the previous BARRIER. Then see if we can find a label that starts a basic block. Return the basic block number. */ for (insn = prev_nonnote_insn (insn); insn && GET_CODE (insn) != BARRIER && search_limit != 0; insn = prev_nonnote_insn (insn), --search_limit) ; /* The closest BARRIER is too far away. */ if (search_limit == 0) return -1; /* The start of the function. */ else if (insn == 0) return ENTRY_BLOCK_PTR->next_bb->index; /* See if any of the upcoming CODE_LABELs start a basic block. If we reach anything other than a CODE_LABEL or note, we can't find this code. */ for (insn = next_nonnote_insn (insn); insn && GET_CODE (insn) == CODE_LABEL; insn = next_nonnote_insn (insn)) { FOR_EACH_BB (bb) if (insn == BB_HEAD (bb)) return bb->index; } return -1; } /* Similar to next_insn, but ignores insns in the delay slots of an annulled branch. */ static rtx next_insn_no_annul (rtx insn) { if (insn) { /* If INSN is an annulled branch, skip any insns from the target of the branch. */ if ((GET_CODE (insn) == JUMP_INSN || GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == INSN) && INSN_ANNULLED_BRANCH_P (insn) && NEXT_INSN (PREV_INSN (insn)) != insn) { rtx next = NEXT_INSN (insn); enum rtx_code code = GET_CODE (next); while ((code == INSN || code == JUMP_INSN || code == CALL_INSN) && INSN_FROM_TARGET_P (next)) { insn = next; next = NEXT_INSN (insn); code = GET_CODE (next); } } insn = NEXT_INSN (insn); if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE) insn = XVECEXP (PATTERN (insn), 0, 0); } return insn; } /* Given X, some rtl, and RES, a pointer to a `struct resource', mark which resources are referenced by the insn. If INCLUDE_DELAYED_EFFECTS is TRUE, resources used by the called routine will be included for CALL_INSNs. */ void mark_referenced_resources (rtx x, struct resources *res, int include_delayed_effects) { enum rtx_code code = GET_CODE (x); int i, j; unsigned int r; const char *format_ptr; /* Handle leaf items for which we set resource flags. Also, special-case CALL, SET and CLOBBER operators. */ switch (code) { case CONST: case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case PC: case SYMBOL_REF: case LABEL_REF: return; case SUBREG: if (!REG_P (SUBREG_REG (x))) mark_referenced_resources (SUBREG_REG (x), res, 0); else { unsigned int regno = subreg_regno (x); unsigned int last_regno = regno + hard_regno_nregs[regno][GET_MODE (x)]; if (last_regno > FIRST_PSEUDO_REGISTER) abort (); for (r = regno; r < last_regno; r++) SET_HARD_REG_BIT (res->regs, r); } return; case REG: { unsigned int regno = REGNO (x); unsigned int last_regno = regno + hard_regno_nregs[regno][GET_MODE (x)]; if (last_regno > FIRST_PSEUDO_REGISTER) abort (); for (r = regno; r < last_regno; r++) SET_HARD_REG_BIT (res->regs, r); } return; case MEM: /* If this memory shouldn't change, it really isn't referencing memory. */ if (RTX_UNCHANGING_P (x)) res->unch_memory = 1; else res->memory = 1; res->volatil |= MEM_VOLATILE_P (x); /* Mark registers used to access memory. */ mark_referenced_resources (XEXP (x, 0), res, 0); return; case CC0: res->cc = 1; return; case UNSPEC_VOLATILE: case ASM_INPUT: /* Traditional asm's are always volatile. */ res->volatil = 1; return; case TRAP_IF: res->volatil = 1; break; case ASM_OPERANDS: res->volatil |= MEM_VOLATILE_P (x); /* For all ASM_OPERANDS, we must traverse the vector of input operands. We can not just fall through here since then we would be confused by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate traditional asms unlike their normal usage. */ for (i = 0; i < ASM_OPERANDS_INPUT_LENGTH (x); i++) mark_referenced_resources (ASM_OPERANDS_INPUT (x, i), res, 0); return; case CALL: /* The first operand will be a (MEM (xxx)) but doesn't really reference memory. The second operand may be referenced, though. */ mark_referenced_resources (XEXP (XEXP (x, 0), 0), res, 0); mark_referenced_resources (XEXP (x, 1), res, 0); return; case SET: /* Usually, the first operand of SET is set, not referenced. But registers used to access memory are referenced. SET_DEST is also referenced if it is a ZERO_EXTRACT or SIGN_EXTRACT. */ mark_referenced_resources (SET_SRC (x), res, 0); x = SET_DEST (x); if (GET_CODE (x) == SIGN_EXTRACT || GET_CODE (x) == ZERO_EXTRACT || GET_CODE (x) == STRICT_LOW_PART) mark_referenced_resources (x, res, 0); else if (GET_CODE (x) == SUBREG) x = SUBREG_REG (x); if (MEM_P (x)) mark_referenced_resources (XEXP (x, 0), res, 0); return; case CLOBBER: return; case CALL_INSN: if (include_delayed_effects) { /* A CALL references memory, the frame pointer if it exists, the stack pointer, any global registers and any registers given in USE insns immediately in front of the CALL. However, we may have moved some of the parameter loading insns into the delay slot of this CALL. If so, the USE's for them don't count and should be skipped. */ rtx insn = PREV_INSN (x); rtx sequence = 0; int seq_size = 0; int i; /* If we are part of a delay slot sequence, point at the SEQUENCE. */ if (NEXT_INSN (insn) != x) { sequence = PATTERN (NEXT_INSN (insn)); seq_size = XVECLEN (sequence, 0); if (GET_CODE (sequence) != SEQUENCE) abort (); } res->memory = 1; SET_HARD_REG_BIT (res->regs, STACK_POINTER_REGNUM); if (frame_pointer_needed) { SET_HARD_REG_BIT (res->regs, FRAME_POINTER_REGNUM); #if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM SET_HARD_REG_BIT (res->regs, HARD_FRAME_POINTER_REGNUM); #endif } for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (global_regs[i]) SET_HARD_REG_BIT (res->regs, i); /* Check for a REG_SETJMP. If it exists, then we must assume that this call can need any register. This is done to be more conservative about how we handle setjmp. We assume that they both use and set all registers. Using all registers ensures that a register will not be considered dead just because it crosses a setjmp call. A register should be considered dead only if the setjmp call returns nonzero. */ if (find_reg_note (x, REG_SETJMP, NULL)) SET_HARD_REG_SET (res->regs); { rtx link; for (link = CALL_INSN_FUNCTION_USAGE (x); link; link = XEXP (link, 1)) if (GET_CODE (XEXP (link, 0)) == USE) { for (i = 1; i < seq_size; i++) { rtx slot_pat = PATTERN (XVECEXP (sequence, 0, i)); if (GET_CODE (slot_pat) == SET && rtx_equal_p (SET_DEST (slot_pat), XEXP (XEXP (link, 0), 0))) break; } if (i >= seq_size) mark_referenced_resources (XEXP (XEXP (link, 0), 0), res, 0); } } } /* ... fall through to other INSN processing ... */ case INSN: case JUMP_INSN: #ifdef INSN_REFERENCES_ARE_DELAYED if (! include_delayed_effects && INSN_REFERENCES_ARE_DELAYED (x)) return; #endif /* No special processing, just speed up. */ mark_referenced_resources (PATTERN (x), res, include_delayed_effects); return; default: break; } /* Process each sub-expression and flag what it needs. */ format_ptr = GET_RTX_FORMAT (code); for (i = 0; i < GET_RTX_LENGTH (code); i++) switch (*format_ptr++) { case 'e': mark_referenced_resources (XEXP (x, i), res, include_delayed_effects); break; case 'E': for (j = 0; j < XVECLEN (x, i); j++) mark_referenced_resources (XVECEXP (x, i, j), res, include_delayed_effects); break; } } /* A subroutine of mark_target_live_regs. Search forward from TARGET looking for registers that are set before they are used. These are dead. Stop after passing a few conditional jumps, and/or a small number of unconditional branches. */ static rtx find_dead_or_set_registers (rtx target, struct resources *res, rtx *jump_target, int jump_count, struct resources set, struct resources needed) { HARD_REG_SET scratch; rtx insn, next; rtx jump_insn = 0; int i; for (insn = target; insn; insn = next) { rtx this_jump_insn = insn; next = NEXT_INSN (insn); /* If this instruction can throw an exception, then we don't know where we might end up next. That means that we have to assume that whatever we have already marked as live really is live. */ if (can_throw_internal (insn)) break; switch (GET_CODE (insn)) { case CODE_LABEL: /* After a label, any pending dead registers that weren't yet used can be made dead. */ AND_COMPL_HARD_REG_SET (pending_dead_regs, needed.regs); AND_COMPL_HARD_REG_SET (res->regs, pending_dead_regs); CLEAR_HARD_REG_SET (pending_dead_regs); continue; case BARRIER: case NOTE: continue; case INSN: if (GET_CODE (PATTERN (insn)) == USE) { /* If INSN is a USE made by update_block, we care about the underlying insn. Any registers set by the underlying insn are live since the insn is being done somewhere else. */ if (INSN_P (XEXP (PATTERN (insn), 0))) mark_set_resources (XEXP (PATTERN (insn), 0), res, 0, MARK_SRC_DEST_CALL); /* All other USE insns are to be ignored. */ continue; } else if (GET_CODE (PATTERN (insn)) == CLOBBER) continue; else if (GET_CODE (PATTERN (insn)) == SEQUENCE) { /* An unconditional jump can be used to fill the delay slot of a call, so search for a JUMP_INSN in any position. */ for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++) { this_jump_insn = XVECEXP (PATTERN (insn), 0, i); if (GET_CODE (this_jump_insn) == JUMP_INSN) break; } } default: break; } if (GET_CODE (this_jump_insn) == JUMP_INSN) { if (jump_count++ < 10) { if (any_uncondjump_p (this_jump_insn) || GET_CODE (PATTERN (this_jump_insn)) == RETURN) { next = JUMP_LABEL (this_jump_insn); if (jump_insn == 0) { jump_insn = insn; if (jump_target) *jump_target = JUMP_LABEL (this_jump_insn); } } else if (any_condjump_p (this_jump_insn)) { struct resources target_set, target_res; struct resources fallthrough_res; /* We can handle conditional branches here by following both paths, and then IOR the results of the two paths together, which will give us registers that are dead on both paths. Since this is expensive, we give it a much higher cost than unconditional branches. The cost was chosen so that we will follow at most 1 conditional branch. */ jump_count += 4; if (jump_count >= 10) break; mark_referenced_resources (insn, &needed, 1); /* For an annulled branch, mark_set_resources ignores slots filled by instructions from the target. This is correct if the branch is not taken. Since we are following both paths from the branch, we must also compute correct info if the branch is taken. We do this by inverting all of the INSN_FROM_TARGET_P bits, calling mark_set_resources, and then inverting the INSN_FROM_TARGET_P bits again. */ if (GET_CODE (PATTERN (insn)) == SEQUENCE && INSN_ANNULLED_BRANCH_P (this_jump_insn)) { for (i = 1; i < XVECLEN (PATTERN (insn), 0); i++) INSN_FROM_TARGET_P (XVECEXP (PATTERN (insn), 0, i)) = ! INSN_FROM_TARGET_P (XVECEXP (PATTERN (insn), 0, i)); target_set = set; mark_set_resources (insn, &target_set, 0, MARK_SRC_DEST_CALL); for (i = 1; i < XVECLEN (PATTERN (insn), 0); i++) INSN_FROM_TARGET_P (XVECEXP (PATTERN (insn), 0, i)) = ! INSN_FROM_TARGET_P (XVECEXP (PATTERN (insn), 0, i)); mark_set_resources (insn, &set, 0, MARK_SRC_DEST_CALL); } else { mark_set_resources (insn, &set, 0, MARK_SRC_DEST_CALL); target_set = set; } target_res = *res; COPY_HARD_REG_SET (scratch, target_set.regs); AND_COMPL_HARD_REG_SET (scratch, needed.regs); AND_COMPL_HARD_REG_SET (target_res.regs, scratch); fallthrough_res = *res; COPY_HARD_REG_SET (scratch, set.regs); AND_COMPL_HARD_REG_SET (scratch, needed.regs); AND_COMPL_HARD_REG_SET (fallthrough_res.regs, scratch); find_dead_or_set_registers (JUMP_LABEL (this_jump_insn), &target_res, 0, jump_count, target_set, needed); find_dead_or_set_registers (next, &fallthrough_res, 0, jump_count, set, needed); IOR_HARD_REG_SET (fallthrough_res.regs, target_res.regs); AND_HARD_REG_SET (res->regs, fallthrough_res.regs); break; } else break; } else { /* Don't try this optimization if we expired our jump count above, since that would mean there may be an infinite loop in the function being compiled. */ jump_insn = 0; break; } } mark_referenced_resources (insn, &needed, 1); mark_set_resources (insn, &set, 0, MARK_SRC_DEST_CALL); COPY_HARD_REG_SET (scratch, set.regs); AND_COMPL_HARD_REG_SET (scratch, needed.regs); AND_COMPL_HARD_REG_SET (res->regs, scratch); } return jump_insn; } /* Given X, a part of an insn, and a pointer to a `struct resource', RES, indicate which resources are modified by the insn. If MARK_TYPE is MARK_SRC_DEST_CALL, also mark resources potentially set by the called routine. If IN_DEST is nonzero, it means we are inside a SET. Otherwise, objects are being referenced instead of set. We never mark the insn as modifying the condition code unless it explicitly SETs CC0 even though this is not totally correct. The reason for this is that we require a SET of CC0 to immediately precede the reference to CC0. So if some other insn sets CC0 as a side-effect, we know it cannot affect our computation and thus may be placed in a delay slot. */ void mark_set_resources (rtx x, struct resources *res, int in_dest, enum mark_resource_type mark_type) { enum rtx_code code; int i, j; unsigned int r; const char *format_ptr; restart: code = GET_CODE (x); switch (code) { case NOTE: case BARRIER: case CODE_LABEL: case USE: case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case LABEL_REF: case SYMBOL_REF: case CONST: case PC: /* These don't set any resources. */ return; case CC0: if (in_dest) res->cc = 1; return; case CALL_INSN: /* Called routine modifies the condition code, memory, any registers that aren't saved across calls, global registers and anything explicitly CLOBBERed immediately after the CALL_INSN. */ if (mark_type == MARK_SRC_DEST_CALL) { rtx link; res->cc = res->memory = 1; for (r = 0; r < FIRST_PSEUDO_REGISTER; r++) if (call_used_regs[r] || global_regs[r]) SET_HARD_REG_BIT (res->regs, r); for (link = CALL_INSN_FUNCTION_USAGE (x); link; link = XEXP (link, 1)) if (GET_CODE (XEXP (link, 0)) == CLOBBER) mark_set_resources (SET_DEST (XEXP (link, 0)), res, 1, MARK_SRC_DEST); /* Check for a REG_SETJMP. If it exists, then we must assume that this call can clobber any register. */ if (find_reg_note (x, REG_SETJMP, NULL)) SET_HARD_REG_SET (res->regs); } /* ... and also what its RTL says it modifies, if anything. */ case JUMP_INSN: case INSN: /* An insn consisting of just a CLOBBER (or USE) is just for flow and doesn't actually do anything, so we ignore it. */ #ifdef INSN_SETS_ARE_DELAYED if (mark_type != MARK_SRC_DEST_CALL && INSN_SETS_ARE_DELAYED (x)) return; #endif x = PATTERN (x); if (GET_CODE (x) != USE && GET_CODE (x) != CLOBBER) goto restart; return; case SET: /* If the source of a SET is a CALL, this is actually done by the called routine. So only include it if we are to include the effects of the calling routine. */ mark_set_resources (SET_DEST (x), res, (mark_type == MARK_SRC_DEST_CALL || GET_CODE (SET_SRC (x)) != CALL), mark_type); mark_set_resources (SET_SRC (x), res, 0, MARK_SRC_DEST); return; case CLOBBER: mark_set_resources (XEXP (x, 0), res, 1, MARK_SRC_DEST); return; case SEQUENCE: for (i = 0; i < XVECLEN (x, 0); i++) if (! (INSN_ANNULLED_BRANCH_P (XVECEXP (x, 0, 0)) && INSN_FROM_TARGET_P (XVECEXP (x, 0, i)))) mark_set_resources (XVECEXP (x, 0, i), res, 0, mark_type); return; case POST_INC: case PRE_INC: case POST_DEC: case PRE_DEC: mark_set_resources (XEXP (x, 0), res, 1, MARK_SRC_DEST); return; case PRE_MODIFY: case POST_MODIFY: mark_set_resources (XEXP (x, 0), res, 1, MARK_SRC_DEST); mark_set_resources (XEXP (XEXP (x, 1), 0), res, 0, MARK_SRC_DEST); mark_set_resources (XEXP (XEXP (x, 1), 1), res, 0, MARK_SRC_DEST); return; case SIGN_EXTRACT: case ZERO_EXTRACT: mark_set_resources (XEXP (x, 0), res, in_dest, MARK_SRC_DEST); mark_set_resources (XEXP (x, 1), res, 0, MARK_SRC_DEST); mark_set_resources (XEXP (x, 2), res, 0, MARK_SRC_DEST); return; case MEM: if (in_dest) { res->memory = 1; res->unch_memory |= RTX_UNCHANGING_P (x); res->volatil |= MEM_VOLATILE_P (x); } mark_set_resources (XEXP (x, 0), res, 0, MARK_SRC_DEST); return; case SUBREG: if (in_dest) { if (!REG_P (SUBREG_REG (x))) mark_set_resources (SUBREG_REG (x), res, in_dest, mark_type); else { unsigned int regno = subreg_regno (x); unsigned int last_regno = regno + hard_regno_nregs[regno][GET_MODE (x)]; if (last_regno > FIRST_PSEUDO_REGISTER) abort (); for (r = regno; r < last_regno; r++) SET_HARD_REG_BIT (res->regs, r); } } return; case REG: if (in_dest) { unsigned int regno = REGNO (x); unsigned int last_regno = regno + hard_regno_nregs[regno][GET_MODE (x)]; if (last_regno > FIRST_PSEUDO_REGISTER) abort (); for (r = regno; r < last_regno; r++) SET_HARD_REG_BIT (res->regs, r); } return; case UNSPEC_VOLATILE: case ASM_INPUT: /* Traditional asm's are always volatile. */ res->volatil = 1; return; case TRAP_IF: res->volatil = 1; break; case ASM_OPERANDS: res->volatil |= MEM_VOLATILE_P (x); /* For all ASM_OPERANDS, we must traverse the vector of input operands. We can not just fall through here since then we would be confused by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate traditional asms unlike their normal usage. */ for (i = 0; i < ASM_OPERANDS_INPUT_LENGTH (x); i++) mark_set_resources (ASM_OPERANDS_INPUT (x, i), res, in_dest, MARK_SRC_DEST); return; default: break; } /* Process each sub-expression and flag what it needs. */ format_ptr = GET_RTX_FORMAT (code); for (i = 0; i < GET_RTX_LENGTH (code); i++) switch (*format_ptr++) { case 'e': mark_set_resources (XEXP (x, i), res, in_dest, mark_type); break; case 'E': for (j = 0; j < XVECLEN (x, i); j++) mark_set_resources (XVECEXP (x, i, j), res, in_dest, mark_type); break; } } /* Set the resources that are live at TARGET. If TARGET is zero, we refer to the end of the current function and can return our precomputed value. Otherwise, we try to find out what is live by consulting the basic block information. This is tricky, because we must consider the actions of reload and jump optimization, which occur after the basic block information has been computed. Accordingly, we proceed as follows:: We find the previous BARRIER and look at all immediately following labels (with no intervening active insns) to see if any of them start a basic block. If we hit the start of the function first, we use block 0. Once we have found a basic block and a corresponding first insns, we can accurately compute the live status from basic_block_live_regs and reg_renumber. (By starting at a label following a BARRIER, we are immune to actions taken by reload and jump.) Then we scan all insns between that point and our target. For each CLOBBER (or for call-clobbered regs when we pass a CALL_INSN), mark the appropriate registers are dead. For a SET, mark them as live. We have to be careful when using REG_DEAD notes because they are not updated by such things as find_equiv_reg. So keep track of registers marked as dead that haven't been assigned to, and mark them dead at the next CODE_LABEL since reload and jump won't propagate values across labels. If we cannot find the start of a basic block (should be a very rare case, if it can happen at all), mark everything as potentially live. Next, scan forward from TARGET looking for things set or clobbered before they are used. These are not live. Because we can be called many times on the same target, save our results in a hash table indexed by INSN_UID. This is only done if the function init_resource_info () was invoked before we are called. */ void mark_target_live_regs (rtx insns, rtx target, struct resources *res) { int b = -1; unsigned int i; struct target_info *tinfo = NULL; rtx insn; rtx jump_insn = 0; rtx jump_target; HARD_REG_SET scratch; struct resources set, needed; /* Handle end of function. */ if (target == 0) { *res = end_of_function_needs; return; } /* We have to assume memory is needed, but the CC isn't. */ res->memory = 1; res->volatil = res->unch_memory = 0; res->cc = 0; /* See if we have computed this value already. */ if (target_hash_table != NULL) { for (tinfo = target_hash_table[INSN_UID (target) % TARGET_HASH_PRIME]; tinfo; tinfo = tinfo->next) if (tinfo->uid == INSN_UID (target)) break; /* Start by getting the basic block number. If we have saved information, we can get it from there unless the insn at the start of the basic block has been deleted. */ if (tinfo && tinfo->block != -1 && ! INSN_DELETED_P (BB_HEAD (BASIC_BLOCK (tinfo->block)))) b = tinfo->block; } if (b == -1) b = find_basic_block (target, MAX_DELAY_SLOT_LIVE_SEARCH); if (target_hash_table != NULL) { if (tinfo) { /* If the information is up-to-date, use it. Otherwise, we will update it below. */ if (b == tinfo->block && b != -1 && tinfo->bb_tick == bb_ticks[b]) { COPY_HARD_REG_SET (res->regs, tinfo->live_regs); return; } } else { /* Allocate a place to put our results and chain it into the hash table. */ tinfo = xmalloc (sizeof (struct target_info)); tinfo->uid = INSN_UID (target); tinfo->block = b; tinfo->next = target_hash_table[INSN_UID (target) % TARGET_HASH_PRIME]; target_hash_table[INSN_UID (target) % TARGET_HASH_PRIME] = tinfo; } } CLEAR_HARD_REG_SET (pending_dead_regs); /* If we found a basic block, get the live registers from it and update them with anything set or killed between its start and the insn before TARGET. Otherwise, we must assume everything is live. */ if (b != -1) { regset regs_live = BASIC_BLOCK (b)->global_live_at_start; unsigned int j; unsigned int regno; rtx start_insn, stop_insn; /* Compute hard regs live at start of block -- this is the real hard regs marked live, plus live pseudo regs that have been renumbered to hard regs. */ REG_SET_TO_HARD_REG_SET (current_live_regs, regs_live); EXECUTE_IF_SET_IN_REG_SET (regs_live, FIRST_PSEUDO_REGISTER, i, { if (reg_renumber[i] >= 0) { regno = reg_renumber[i]; for (j = regno; j < regno + hard_regno_nregs[regno] [PSEUDO_REGNO_MODE (i)]; j++) SET_HARD_REG_BIT (current_live_regs, j); } }); /* Get starting and ending insn, handling the case where each might be a SEQUENCE. */ start_insn = (b == 0 ? insns : BB_HEAD (BASIC_BLOCK (b))); stop_insn = target; if (GET_CODE (start_insn) == INSN && GET_CODE (PATTERN (start_insn)) == SEQUENCE) start_insn = XVECEXP (PATTERN (start_insn), 0, 0); if (GET_CODE (stop_insn) == INSN && GET_CODE (PATTERN (stop_insn)) == SEQUENCE) stop_insn = next_insn (PREV_INSN (stop_insn)); for (insn = start_insn; insn != stop_insn; insn = next_insn_no_annul (insn)) { rtx link; rtx real_insn = insn; enum rtx_code code = GET_CODE (insn); /* If this insn is from the target of a branch, it isn't going to be used in the sequel. If it is used in both cases, this test will not be true. */ if ((code == INSN || code == JUMP_INSN || code == CALL_INSN) && INSN_FROM_TARGET_P (insn)) continue; /* If this insn is a USE made by update_block, we care about the underlying insn. */ if (code == INSN && GET_CODE (PATTERN (insn)) == USE && INSN_P (XEXP (PATTERN (insn), 0))) real_insn = XEXP (PATTERN (insn), 0); if (GET_CODE (real_insn) == CALL_INSN) { /* CALL clobbers all call-used regs that aren't fixed except sp, ap, and fp. Do this before setting the result of the call live. */ AND_COMPL_HARD_REG_SET (current_live_regs, regs_invalidated_by_call); /* A CALL_INSN sets any global register live, since it may have been modified by the call. */ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (global_regs[i]) SET_HARD_REG_BIT (current_live_regs, i); } /* Mark anything killed in an insn to be deadened at the next label. Ignore USE insns; the only REG_DEAD notes will be for parameters. But they might be early. A CALL_INSN will usually clobber registers used for parameters. It isn't worth bothering with the unlikely case when it won't. */ if ((GET_CODE (real_insn) == INSN && GET_CODE (PATTERN (real_insn)) != USE && GET_CODE (PATTERN (real_insn)) != CLOBBER) || GET_CODE (real_insn) == JUMP_INSN || GET_CODE (real_insn) == CALL_INSN) { for (link = REG_NOTES (real_insn); link; link = XEXP (link, 1)) if (REG_NOTE_KIND (link) == REG_DEAD && REG_P (XEXP (link, 0)) && REGNO (XEXP (link, 0)) < FIRST_PSEUDO_REGISTER) { unsigned int first_regno = REGNO (XEXP (link, 0)); unsigned int last_regno = (first_regno + hard_regno_nregs[first_regno] [GET_MODE (XEXP (link, 0))]); for (i = first_regno; i < last_regno; i++) SET_HARD_REG_BIT (pending_dead_regs, i); } note_stores (PATTERN (real_insn), update_live_status, NULL); /* If any registers were unused after this insn, kill them. These notes will always be accurate. */ for (link = REG_NOTES (real_insn); link; link = XEXP (link, 1)) if (REG_NOTE_KIND (link) == REG_UNUSED && REG_P (XEXP (link, 0)) && REGNO (XEXP (link, 0)) < FIRST_PSEUDO_REGISTER) { unsigned int first_regno = REGNO (XEXP (link, 0)); unsigned int last_regno = (first_regno + hard_regno_nregs[first_regno] [GET_MODE (XEXP (link, 0))]); for (i = first_regno; i < last_regno; i++) CLEAR_HARD_REG_BIT (current_live_regs, i); } } else if (GET_CODE (real_insn) == CODE_LABEL) { /* A label clobbers the pending dead registers since neither reload nor jump will propagate a value across a label. */ AND_COMPL_HARD_REG_SET (current_live_regs, pending_dead_regs); CLEAR_HARD_REG_SET (pending_dead_regs); } /* The beginning of the epilogue corresponds to the end of the RTL chain when there are no epilogue insns. Certain resources are implicitly required at that point. */ else if (GET_CODE (real_insn) == NOTE && NOTE_LINE_NUMBER (real_insn) == NOTE_INSN_EPILOGUE_BEG) IOR_HARD_REG_SET (current_live_regs, start_of_epilogue_needs.regs); } COPY_HARD_REG_SET (res->regs, current_live_regs); if (tinfo != NULL) { tinfo->block = b; tinfo->bb_tick = bb_ticks[b]; } } else /* We didn't find the start of a basic block. Assume everything in use. This should happen only extremely rarely. */ SET_HARD_REG_SET (res->regs); CLEAR_RESOURCE (&set); CLEAR_RESOURCE (&needed); jump_insn = find_dead_or_set_registers (target, res, &jump_target, 0, set, needed); /* If we hit an unconditional branch, we have another way of finding out what is live: we can see what is live at the branch target and include anything used but not set before the branch. We add the live resources found using the test below to those found until now. */ if (jump_insn) { struct resources new_resources; rtx stop_insn = next_active_insn (jump_insn); mark_target_live_regs (insns, next_active_insn (jump_target), &new_resources); CLEAR_RESOURCE (&set); CLEAR_RESOURCE (&needed); /* Include JUMP_INSN in the needed registers. */ for (insn = target; insn != stop_insn; insn = next_active_insn (insn)) { mark_referenced_resources (insn, &needed, 1); COPY_HARD_REG_SET (scratch, needed.regs); AND_COMPL_HARD_REG_SET (scratch, set.regs); IOR_HARD_REG_SET (new_resources.regs, scratch); mark_set_resources (insn, &set, 0, MARK_SRC_DEST_CALL); } IOR_HARD_REG_SET (res->regs, new_resources.regs); } if (tinfo != NULL) { COPY_HARD_REG_SET (tinfo->live_regs, res->regs); } } /* Initialize the resources required by mark_target_live_regs (). This should be invoked before the first call to mark_target_live_regs. */ void init_resource_info (rtx epilogue_insn) { int i; /* Indicate what resources are required to be valid at the end of the current function. The condition code never is and memory always is. If the frame pointer is needed, it is and so is the stack pointer unless EXIT_IGNORE_STACK is nonzero. If the frame pointer is not needed, the stack pointer is. Registers used to return the function value are needed. Registers holding global variables are needed. */ end_of_function_needs.cc = 0; end_of_function_needs.memory = 1; end_of_function_needs.unch_memory = 0; CLEAR_HARD_REG_SET (end_of_function_needs.regs); if (frame_pointer_needed) { SET_HARD_REG_BIT (end_of_function_needs.regs, FRAME_POINTER_REGNUM); #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM SET_HARD_REG_BIT (end_of_function_needs.regs, HARD_FRAME_POINTER_REGNUM); #endif if (! EXIT_IGNORE_STACK || current_function_sp_is_unchanging) SET_HARD_REG_BIT (end_of_function_needs.regs, STACK_POINTER_REGNUM); } else SET_HARD_REG_BIT (end_of_function_needs.regs, STACK_POINTER_REGNUM); if (current_function_return_rtx != 0) mark_referenced_resources (current_function_return_rtx, &end_of_function_needs, 1); for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (global_regs[i] #ifdef EPILOGUE_USES || EPILOGUE_USES (i) #endif ) SET_HARD_REG_BIT (end_of_function_needs.regs, i); /* The registers required to be live at the end of the function are represented in the flow information as being dead just prior to reaching the end of the function. For example, the return of a value might be represented by a USE of the return register immediately followed by an unconditional jump to the return label where the return label is the end of the RTL chain. The end of the RTL chain is then taken to mean that the return register is live. This sequence is no longer maintained when epilogue instructions are added to the RTL chain. To reconstruct the original meaning, the start of the epilogue (NOTE_INSN_EPILOGUE_BEG) is regarded as the point where these registers become live (start_of_epilogue_needs). If epilogue instructions are present, the registers set by those instructions won't have been processed by flow. Thus, those registers are additionally required at the end of the RTL chain (end_of_function_needs). */ start_of_epilogue_needs = end_of_function_needs; while ((epilogue_insn = next_nonnote_insn (epilogue_insn))) mark_set_resources (epilogue_insn, &end_of_function_needs, 0, MARK_SRC_DEST_CALL); /* Allocate and initialize the tables used by mark_target_live_regs. */ target_hash_table = xcalloc (TARGET_HASH_PRIME, sizeof (struct target_info *)); bb_ticks = xcalloc (last_basic_block, sizeof (int)); } /* Free up the resources allocated to mark_target_live_regs (). This should be invoked after the last call to mark_target_live_regs (). */ void free_resource_info (void) { if (target_hash_table != NULL) { int i; for (i = 0; i < TARGET_HASH_PRIME; ++i) { struct target_info *ti = target_hash_table[i]; while (ti) { struct target_info *next = ti->next; free (ti); ti = next; } } free (target_hash_table); target_hash_table = NULL; } if (bb_ticks != NULL) { free (bb_ticks); bb_ticks = NULL; } } /* Clear any hashed information that we have stored for INSN. */ void clear_hashed_info_for_insn (rtx insn) { struct target_info *tinfo; if (target_hash_table != NULL) { for (tinfo = target_hash_table[INSN_UID (insn) % TARGET_HASH_PRIME]; tinfo; tinfo = tinfo->next) if (tinfo->uid == INSN_UID (insn)) break; if (tinfo) tinfo->block = -1; } } /* Increment the tick count for the basic block that contains INSN. */ void incr_ticks_for_insn (rtx insn) { int b = find_basic_block (insn, MAX_DELAY_SLOT_LIVE_SEARCH); if (b != -1) bb_ticks[b]++; } /* Add TRIAL to the set of resources used at the end of the current function. */ void mark_end_of_function_resources (rtx trial, int include_delayed_effects) { mark_referenced_resources (trial, &end_of_function_needs, include_delayed_effects); } /* RTL utility routines. Copyright (C) 1987, 1988, 1991, 1994, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Indexed by rtx code, gives number of operands for an rtx with that code. Does NOT include rtx header data (code and links). */ #define DEF_RTL_EXPR(ENUM, NAME, FORMAT, CLASS) sizeof FORMAT - 1 , const unsigned char rtx_length[NUM_RTX_CODE] = { /* This file contains the definitions and documentation for the Register Transfer Expressions (rtx's) that make up the Register Transfer Language (rtl) used in the Back End of the GNU compiler. Copyright (C) 1987, 1988, 1992, 1994, 1995, 1997, 1998, 1999, 2000, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Expression definitions and descriptions for all targets are in this file. Some will not be used for some targets. The fields in the cpp macro call "DEF_RTL_EXPR()" are used to create declarations in the C source of the compiler. The fields are: 1. The internal name of the rtx used in the C source. It is a tag in the enumeration "enum rtx_code" defined in "rtl.h". By convention these are in UPPER_CASE. 2. The name of the rtx in the external ASCII format read by read_rtx(), and printed by print_rtx(). These names are stored in rtx_name[]. By convention these are the internal (field 1) names in lower_case. 3. The print format, and type of each rtx->u.fld[] (field) in this rtx. These formats are stored in rtx_format[]. The meaning of the formats is documented in front of this array in rtl.c 4. The class of the rtx. These are stored in rtx_class and are accessed via the GET_RTX_CLASS macro. They are defined as follows: RTX_CONST_OBJ an rtx code that can be used to represent a constant object (e.g, CONST_INT) RTX_OBJ an rtx code that can be used to represent an object (e.g, REG, MEM) RTX_COMPARE an rtx code for a comparison (e.g, LT, GT) RTX_COMM_COMPARE an rtx code for a commutative comparison (e.g, EQ, NE, ORDERED) RTX_UNARY an rtx code for a unary arithmetic expression (e.g, NEG, NOT) RTX_COMM_ARITH an rtx code for a commutative binary operation (e.g,, PLUS, MULT) RTX_TERNARY an rtx code for a non-bitfield three input operation (IF_THEN_ELSE) RTX_BIN_ARITH an rtx code for a non-commutative binary operation (e.g., MINUS, DIV) RTX_BITFIELD_OPS an rtx code for a bit-field operation (ZERO_EXTRACT, SIGN_EXTRACT) RTX_INSN an rtx code for a machine insn (INSN, JUMP_INSN, CALL_INSN) RTX_MATCH an rtx code for something that matches in insns (e.g, MATCH_DUP) RTX_AUTOINC an rtx code for autoincrement addressing modes (e.g. POST_DEC) RTX_EXTRA everything else */ /* --------------------------------------------------------------------- Expressions (and "meta" expressions) used for structuring the rtl representation of a program. --------------------------------------------------------------------- */ /* an expression code name unknown to the reader */ DEF_RTL_EXPR(UNKNOWN, "UnKnown", "*", RTX_EXTRA) /* (NIL) is used by rtl reader and printer to represent a null pointer. */ DEF_RTL_EXPR(NIL, "nil", "*", RTX_EXTRA) /* include a file */ DEF_RTL_EXPR(INCLUDE, "include", "s", RTX_EXTRA) /* --------------------------------------------------------------------- Expressions used in constructing lists. --------------------------------------------------------------------- */ /* a linked list of expressions */ DEF_RTL_EXPR(EXPR_LIST, "expr_list", "ee", RTX_EXTRA) /* a linked list of instructions. The insns are represented in print by their uids. */ DEF_RTL_EXPR(INSN_LIST, "insn_list", "ue", RTX_EXTRA) /* ---------------------------------------------------------------------- Expression types for machine descriptions. These do not appear in actual rtl code in the compiler. ---------------------------------------------------------------------- */ /* Appears only in machine descriptions. Means use the function named by the second arg (the string) as a predicate; if matched, store the structure that was matched in the operand table at index specified by the first arg (the integer). If the second arg is the null string, the structure is just stored. A third string argument indicates to the register allocator restrictions on where the operand can be allocated. If the target needs no restriction on any instruction this field should be the null string. The string is prepended by: '=' to indicate the operand is only written to. '+' to indicate the operand is both read and written to. Each character in the string represents an allocable class for an operand. 'g' indicates the operand can be any valid class. 'i' indicates the operand can be immediate (in the instruction) data. 'r' indicates the operand can be in a register. 'm' indicates the operand can be in memory. 'o' a subset of the 'm' class. Those memory addressing modes that can be offset at compile time (have a constant added to them). Other characters indicate target dependent operand classes and are described in each target's machine description. For instructions with more than one operand, sets of classes can be separated by a comma to indicate the appropriate multi-operand constraints. There must be a 1 to 1 correspondence between these sets of classes in all operands for an instruction. */ DEF_RTL_EXPR(MATCH_OPERAND, "match_operand", "iss", RTX_MATCH) /* Appears only in machine descriptions. Means match a SCRATCH or a register. When used to generate rtl, a SCRATCH is generated. As for MATCH_OPERAND, the mode specifies the desired mode and the first argument is the operand number. The second argument is the constraint. */ DEF_RTL_EXPR(MATCH_SCRATCH, "match_scratch", "is", RTX_MATCH) /* Appears only in machine descriptions. Means match only something equal to what is stored in the operand table at the index specified by the argument. */ DEF_RTL_EXPR(MATCH_DUP, "match_dup", "i", RTX_MATCH) /* Appears only in machine descriptions. Means apply a predicate, AND match recursively the operands of the rtx. Operand 0 is the operand-number, as in match_operand. Operand 1 is a predicate to apply (as a string, a function name). Operand 2 is a vector of expressions, each of which must match one subexpression of the rtx this construct is matching. */ DEF_RTL_EXPR(MATCH_OPERATOR, "match_operator", "isE", RTX_MATCH) /* Appears only in machine descriptions. Means to match a PARALLEL of arbitrary length. The predicate is applied to the PARALLEL and the initial expressions in the PARALLEL are matched. Operand 0 is the operand-number, as in match_operand. Operand 1 is a predicate to apply to the PARALLEL. Operand 2 is a vector of expressions, each of which must match the corresponding element in the PARALLEL. */ DEF_RTL_EXPR(MATCH_PARALLEL, "match_parallel", "isE", RTX_MATCH) /* Appears only in machine descriptions. Means match only something equal to what is stored in the operand table at the index specified by the argument. For MATCH_OPERATOR. */ DEF_RTL_EXPR(MATCH_OP_DUP, "match_op_dup", "iE", RTX_MATCH) /* Appears only in machine descriptions. Means match only something equal to what is stored in the operand table at the index specified by the argument. For MATCH_PARALLEL. */ DEF_RTL_EXPR(MATCH_PAR_DUP, "match_par_dup", "iE", RTX_MATCH) /* Appears only in machine descriptions. Defines the pattern for one kind of instruction. Operand: 0: names this instruction. If the name is the null string, the instruction is in the machine description just to be recognized, and will never be emitted by the tree to rtl expander. 1: is the pattern. 2: is a string which is a C expression giving an additional condition for recognizing this pattern. A null string means no extra condition. 3: is the action to execute if this pattern is matched. If this assembler code template starts with a * then it is a fragment of C code to run to decide on a template to use. Otherwise, it is the template to use. 4: optionally, a vector of attributes for this insn. */ DEF_RTL_EXPR(DEFINE_INSN, "define_insn", "sEsTV", RTX_EXTRA) /* Definition of a peephole optimization. 1st operand: vector of insn patterns to match 2nd operand: C expression that must be true 3rd operand: template or C code to produce assembler output. 4: optionally, a vector of attributes for this insn. */ DEF_RTL_EXPR(DEFINE_PEEPHOLE, "define_peephole", "EsTV", RTX_EXTRA) /* Definition of a split operation. 1st operand: insn pattern to match 2nd operand: C expression that must be true 3rd operand: vector of insn patterns to place into a SEQUENCE 4th operand: optionally, some C code to execute before generating the insns. This might, for example, create some RTX's and store them in elements of `recog_data.operand' for use by the vector of insn-patterns. (`operands' is an alias here for `recog_data.operand'). */ DEF_RTL_EXPR(DEFINE_SPLIT, "define_split", "EsES", RTX_EXTRA) /* Definition of an insn and associated split. This is the concatenation, with a few modifications, of a define_insn and a define_split which share the same pattern. Operand: 0: names this instruction. If the name is the null string, the instruction is in the machine description just to be recognized, and will never be emitted by the tree to rtl expander. 1: is the pattern. 2: is a string which is a C expression giving an additional condition for recognizing this pattern. A null string means no extra condition. 3: is the action to execute if this pattern is matched. If this assembler code template starts with a * then it is a fragment of C code to run to decide on a template to use. Otherwise, it is the template to use. 4: C expression that must be true for split. This may start with "&&" in which case the split condition is the logical and of the insn condition and what follows the "&&" of this operand. 5: vector of insn patterns to place into a SEQUENCE 6: optionally, some C code to execute before generating the insns. This might, for example, create some RTX's and store them in elements of `recog_data.operand' for use by the vector of insn-patterns. (`operands' is an alias here for `recog_data.operand'). 7: optionally, a vector of attributes for this insn. */ DEF_RTL_EXPR(DEFINE_INSN_AND_SPLIT, "define_insn_and_split", "sEsTsESV", RTX_EXTRA) /* Definition of an RTL peephole operation. Follows the same arguments as define_split. */ DEF_RTL_EXPR(DEFINE_PEEPHOLE2, "define_peephole2", "EsES", RTX_EXTRA) /* Define how to generate multiple insns for a standard insn name. 1st operand: the insn name. 2nd operand: vector of insn-patterns. Use match_operand to substitute an element of `recog_data.operand'. 3rd operand: C expression that must be true for this to be available. This may not test any operands. 4th operand: Extra C code to execute before generating the insns. This might, for example, create some RTX's and store them in elements of `recog_data.operand' for use by the vector of insn-patterns. (`operands' is an alias here for `recog_data.operand'). */ DEF_RTL_EXPR(DEFINE_EXPAND, "define_expand", "sEss", RTX_EXTRA) /* Define a requirement for delay slots. 1st operand: Condition involving insn attributes that, if true, indicates that the insn requires the number of delay slots shown. 2nd operand: Vector whose length is the three times the number of delay slots required. Each entry gives three conditions, each involving attributes. The first must be true for an insn to occupy that delay slot location. The second is true for all insns that can be annulled if the branch is true and the third is true for all insns that can be annulled if the branch is false. Multiple DEFINE_DELAYs may be present. They indicate differing requirements for delay slots. */ DEF_RTL_EXPR(DEFINE_DELAY, "define_delay", "eE", RTX_EXTRA) /* Define a set of insns that requires a function unit. This means that these insns produce their result after a delay and that there may be restrictions on the number of insns of this type that can be scheduled simultaneously. More than one DEFINE_FUNCTION_UNIT can be specified for a function unit. Each gives a set of operations and associated delays. The first three operands must be the same for each operation for the same function unit. All delays are specified in cycles. 1st operand: Name of function unit (mostly for documentation) 2nd operand: Number of identical function units in CPU 3rd operand: Total number of simultaneous insns that can execute on this function unit; 0 if unlimited. 4th operand: Condition involving insn attribute, that, if true, specifies those insns that this expression applies to. 5th operand: Constant delay after which insn result will be available. 6th operand: Delay until next insn can be scheduled on the function unit executing this operation. The meaning depends on whether or not the next operand is supplied. 7th operand: If this operand is not specified, the 6th operand gives the number of cycles after the instruction matching the 4th operand begins using the function unit until a subsequent insn can begin. A value of zero should be used for a unit with no issue constraints. If only one operation can be executed a time and the unit is busy for the entire time, the 3rd operand should be specified as 1, the 6th operand should be specified as 0, and the 7th operand should not be specified. If this operand is specified, it is a list of attribute expressions. If an insn for which any of these expressions is true is currently executing on the function unit, the issue delay will be given by the 6th operand. Otherwise, the insn can be immediately scheduled (subject to the limit on the number of simultaneous operations executing on the unit.) */ DEF_RTL_EXPR(DEFINE_FUNCTION_UNIT, "define_function_unit", "siieiiV", RTX_EXTRA) /* Define attribute computation for `asm' instructions. */ DEF_RTL_EXPR(DEFINE_ASM_ATTRIBUTES, "define_asm_attributes", "V", RTX_EXTRA) /* Definition of a conditional execution meta operation. Automatically generates new instances of DEFINE_INSN, selected by having attribute "predicable" true. The new pattern will contain a COND_EXEC and the predicate at top-level. Operand: 0: The predicate pattern. The top-level form should match a relational operator. Operands should have only one alternative. 1: A C expression giving an additional condition for recognizing the generated pattern. 2: A template or C code to produce assembler output. */ DEF_RTL_EXPR(DEFINE_COND_EXEC, "define_cond_exec", "Ess", RTX_EXTRA) /* SEQUENCE appears in the result of a `gen_...' function for a DEFINE_EXPAND that wants to make several insns. Its elements are the bodies of the insns that should be made. `emit_insn' takes the SEQUENCE apart and makes separate insns. */ DEF_RTL_EXPR(SEQUENCE, "sequence", "E", RTX_EXTRA) /* Refers to the address of its argument. This is only used in alias.c. */ DEF_RTL_EXPR(ADDRESS, "address", "e", RTX_MATCH) /* ---------------------------------------------------------------------- Constructions for CPU pipeline description described by NDFAs. These do not appear in actual rtl code in the compiler. ---------------------------------------------------------------------- */ /* (define_cpu_unit string [string]) describes cpu functional units (separated by comma). 1st operand: Names of cpu functional units. 2nd operand: Name of automaton (see comments for DEFINE_AUTOMATON). All define_reservations, define_cpu_units, and define_query_cpu_units should have unique names which may not be "nothing". */ DEF_RTL_EXPR(DEFINE_CPU_UNIT, "define_cpu_unit", "sS", RTX_EXTRA) /* (define_query_cpu_unit string [string]) describes cpu functional units analogously to define_cpu_unit. The reservation of such units can be queried for automaton state. */ DEF_RTL_EXPR(DEFINE_QUERY_CPU_UNIT, "define_query_cpu_unit", "sS", RTX_EXTRA) /* (exclusion_set string string) means that each CPU functional unit in the first string can not be reserved simultaneously with any unit whose name is in the second string and vise versa. CPU units in the string are separated by commas. For example, it is useful for description CPU with fully pipelined floating point functional unit which can execute simultaneously only single floating point insns or only double floating point insns. All CPU functional units in a set should belong to the same automaton. */ DEF_RTL_EXPR(EXCLUSION_SET, "exclusion_set", "ss", RTX_EXTRA) /* (presence_set string string) means that each CPU functional unit in the first string can not be reserved unless at least one of pattern of units whose names are in the second string is reserved. This is an asymmetric relation. CPU units or unit patterns in the strings are separated by commas. Pattern is one unit name or unit names separated by white-spaces. For example, it is useful for description that slot1 is reserved after slot0 reservation for a VLIW processor. We could describe it by the following construction (presence_set "slot1" "slot0") Or slot1 is reserved only after slot0 and unit b0 reservation. In this case we could write (presence_set "slot1" "slot0 b0") All CPU functional units in a set should belong to the same automaton. */ DEF_RTL_EXPR(PRESENCE_SET, "presence_set", "ss", RTX_EXTRA) /* (final_presence_set string string) is analogous to `presence_set'. The difference between them is when checking is done. When an instruction is issued in given automaton state reflecting all current and planned unit reservations, the automaton state is changed. The first state is a source state, the second one is a result state. Checking for `presence_set' is done on the source state reservation, checking for `final_presence_set' is done on the result reservation. This construction is useful to describe a reservation which is actually two subsequent reservations. For example, if we use (presence_set "slot1" "slot0") the following insn will be never issued (because slot1 requires slot0 which is absent in the source state). (define_reservation "insn_and_nop" "slot0 + slot1") but it can be issued if we use analogous `final_presence_set'. */ DEF_RTL_EXPR(FINAL_PRESENCE_SET, "final_presence_set", "ss", RTX_EXTRA) /* (absence_set string string) means that each CPU functional unit in the first string can be reserved only if each pattern of units whose names are in the second string is not reserved. This is an asymmetric relation (actually exclusion set is analogous to this one but it is symmetric). CPU units or unit patterns in the string are separated by commas. Pattern is one unit name or unit names separated by white-spaces. For example, it is useful for description that slot0 can not be reserved after slot1 or slot2 reservation for a VLIW processor. We could describe it by the following construction (absence_set "slot2" "slot0, slot1") Or slot2 can not be reserved if slot0 and unit b0 are reserved or slot1 and unit b1 are reserved . In this case we could write (absence_set "slot2" "slot0 b0, slot1 b1") All CPU functional units in a set should to belong the same automaton. */ DEF_RTL_EXPR(ABSENCE_SET, "absence_set", "ss", RTX_EXTRA) /* (final_absence_set string string) is analogous to `absence_set' but checking is done on the result (state) reservation. See comments for `final_presence_set'. */ DEF_RTL_EXPR(FINAL_ABSENCE_SET, "final_absence_set", "ss", RTX_EXTRA) /* (define_bypass number out_insn_names in_insn_names) names bypass with given latency (the first number) from insns given by the first string (see define_insn_reservation) into insns given by the second string. Insn names in the strings are separated by commas. The third operand is optional name of function which is additional guard for the bypass. The function will get the two insns as parameters. If the function returns zero the bypass will be ignored for this case. Additional guard is necessary to recognize complicated bypasses, e.g. when consumer is load address. */ DEF_RTL_EXPR(DEFINE_BYPASS, "define_bypass", "issS", RTX_EXTRA) /* (define_automaton string) describes names of automata generated and used for pipeline hazards recognition. The names are separated by comma. Actually it is possibly to generate the single automaton but unfortunately it can be very large. If we use more one automata, the summary size of the automata usually is less than the single one. The automaton name is used in define_cpu_unit and define_query_cpu_unit. All automata should have unique names. */ DEF_RTL_EXPR(DEFINE_AUTOMATON, "define_automaton", "s", RTX_EXTRA) /* (automata_option string) describes option for generation of automata. Currently there are the following options: o "no-minimization" which makes no minimization of automata. This is only worth to do when we are debugging the description and need to look more accurately at reservations of states. o "time" which means printing additional time statistics about generation of automata. o "v" which means generation of file describing the result automata. The file has suffix `.dfa' and can be used for the description verification and debugging. o "w" which means generation of warning instead of error for non-critical errors. o "ndfa" which makes nondeterministic finite state automata. o "progress" which means output of a progress bar showing how many states were generated so far for automaton being processed. */ DEF_RTL_EXPR(AUTOMATA_OPTION, "automata_option", "s", RTX_EXTRA) /* (define_reservation string string) names reservation (the first string) of cpu functional units (the 2nd string). Sometimes unit reservations for different insns contain common parts. In such case, you can describe common part and use its name (the 1st parameter) in regular expression in define_insn_reservation. All define_reservations, define_cpu_units, and define_query_cpu_units should have unique names which may not be "nothing". */ DEF_RTL_EXPR(DEFINE_RESERVATION, "define_reservation", "ss", RTX_EXTRA) /* (define_insn_reservation name default_latency condition regexpr) describes reservation of cpu functional units (the 3nd operand) for instruction which is selected by the condition (the 2nd parameter). The first parameter is used for output of debugging information. The reservations are described by a regular expression according the following syntax: regexp = regexp "," oneof | oneof oneof = oneof "|" allof | allof allof = allof "+" repeat | repeat repeat = element "*" number | element element = cpu_function_unit_name | reservation_name | result_name | "nothing" | "(" regexp ")" 1. "," is used for describing start of the next cycle in reservation. 2. "|" is used for describing the reservation described by the first regular expression *or* the reservation described by the second regular expression *or* etc. 3. "+" is used for describing the reservation described by the first regular expression *and* the reservation described by the second regular expression *and* etc. 4. "*" is used for convenience and simply means sequence in which the regular expression are repeated NUMBER times with cycle advancing (see ","). 5. cpu functional unit name which means its reservation. 6. reservation name -- see define_reservation. 7. string "nothing" means no units reservation. */ DEF_RTL_EXPR(DEFINE_INSN_RESERVATION, "define_insn_reservation", "sies", RTX_EXTRA) /* ---------------------------------------------------------------------- Expressions used for insn attributes. These also do not appear in actual rtl code in the compiler. ---------------------------------------------------------------------- */ /* Definition of an insn attribute. 1st operand: name of the attribute 2nd operand: comma-separated list of possible attribute values 3rd operand: expression for the default value of the attribute. */ DEF_RTL_EXPR(DEFINE_ATTR, "define_attr", "sse", RTX_EXTRA) /* Marker for the name of an attribute. */ DEF_RTL_EXPR(ATTR, "attr", "s", RTX_EXTRA) /* For use in the last (optional) operand of DEFINE_INSN or DEFINE_PEEPHOLE and in DEFINE_ASM_INSN to specify an attribute to assign to insns matching that pattern. (set_attr "name" "value") is equivalent to (set (attr "name") (const_string "value")) */ DEF_RTL_EXPR(SET_ATTR, "set_attr", "ss", RTX_EXTRA) /* In the last operand of DEFINE_INSN and DEFINE_PEEPHOLE, this can be used to specify that attribute values are to be assigned according to the alternative matched. The following three expressions are equivalent: (set (attr "att") (cond [(eq_attrq "alternative" "1") (const_string "a1") (eq_attrq "alternative" "2") (const_string "a2")] (const_string "a3"))) (set_attr_alternative "att" [(const_string "a1") (const_string "a2") (const_string "a3")]) (set_attr "att" "a1,a2,a3") */ DEF_RTL_EXPR(SET_ATTR_ALTERNATIVE, "set_attr_alternative", "sE", RTX_EXTRA) /* A conditional expression true if the value of the specified attribute of the current insn equals the specified value. The first operand is the attribute name and the second is the comparison value. */ DEF_RTL_EXPR(EQ_ATTR, "eq_attr", "ss", RTX_EXTRA) /* A special case of the above representing a set of alternatives. The first operand is bitmap of the set, the second one is the default value. */ DEF_RTL_EXPR(EQ_ATTR_ALT, "eq_attr_alt", "ii", RTX_EXTRA) /* A conditional expression which is true if the specified flag is true for the insn being scheduled in reorg. genattr.c defines the following flags which can be tested by (attr_flag "foo") expressions in eligible_for_delay. forward, backward, very_likely, likely, very_unlikely, and unlikely. */ DEF_RTL_EXPR (ATTR_FLAG, "attr_flag", "s", RTX_EXTRA) /* ---------------------------------------------------------------------- Expression types used for things in the instruction chain. All formats must start with "iuu" to handle the chain. Each insn expression holds an rtl instruction and its semantics during back-end processing. See macros's in "rtl.h" for the meaning of each rtx->u.fld[]. ---------------------------------------------------------------------- */ /* An instruction that cannot jump. */ DEF_RTL_EXPR(INSN, "insn", "iuuBieiee", RTX_INSN) /* An instruction that can possibly jump. Fields ( rtx->u.fld[] ) have exact same meaning as INSN's. */ DEF_RTL_EXPR(JUMP_INSN, "jump_insn", "iuuBieiee0", RTX_INSN) /* An instruction that can possibly call a subroutine but which will not change which instruction comes next in the current function. Field ( rtx->u.fld[9] ) is CALL_INSN_FUNCTION_USAGE. All other fields ( rtx->u.fld[] ) have exact same meaning as INSN's. */ DEF_RTL_EXPR(CALL_INSN, "call_insn", "iuuBieieee", RTX_INSN) /* A marker that indicates that control will not flow through. */ DEF_RTL_EXPR(BARRIER, "barrier", "iuu000000", RTX_EXTRA) /* Holds a label that is followed by instructions. Operand: 4: is used in jump.c for the use-count of the label. 5: is used in flow.c to point to the chain of label_ref's to this label. 6: is a number that is unique in the entire compilation. 7: is the user-given name of the label, if any. */ DEF_RTL_EXPR(CODE_LABEL, "code_label", "iuuB00is", RTX_EXTRA) #ifdef USE_MAPPED_LOCATION /* Say where in the code a source line starts, for symbol table's sake. Operand: 4: unused if line number > 0, note-specific data otherwise. 5: line number if > 0, enum note_insn otherwise. 6: CODE_LABEL_NUMBER if line number == NOTE_INSN_DELETED_LABEL. */ #else /* Say where in the code a source line starts, for symbol table's sake. Operand: 4: filename, if line number > 0, note-specific data otherwise. 5: line number if > 0, enum note_insn otherwise. 6: unique number if line number == note_insn_deleted_label. */ #endif DEF_RTL_EXPR(NOTE, "note", "iuuB0ni", RTX_EXTRA) /* ---------------------------------------------------------------------- Top level constituents of INSN, JUMP_INSN and CALL_INSN. ---------------------------------------------------------------------- */ /* Conditionally execute code. Operand 0 is the condition that if true, the code is executed. Operand 1 is the code to be executed (typically a SET). Semantics are that there are no side effects if the condition is false. This pattern is created automatically by the if_convert pass run after reload or by target-specific splitters. */ DEF_RTL_EXPR(COND_EXEC, "cond_exec", "ee", RTX_EXTRA) /* Several operations to be done in parallel (perhaps under COND_EXEC). */ DEF_RTL_EXPR(PARALLEL, "parallel", "E", RTX_EXTRA) /* A string that is passed through to the assembler as input. One can obviously pass comments through by using the assembler comment syntax. These occur in an insn all by themselves as the PATTERN. They also appear inside an ASM_OPERANDS as a convenient way to hold a string. */ DEF_RTL_EXPR(ASM_INPUT, "asm_input", "s", RTX_EXTRA) #ifdef USE_MAPPED_LOCATION /* An assembler instruction with operands. 1st operand is the instruction template. 2nd operand is the constraint for the output. 3rd operand is the number of the output this expression refers to. When an insn stores more than one value, a separate ASM_OPERANDS is made for each output; this integer distinguishes them. 4th is a vector of values of input operands. 5th is a vector of modes and constraints for the input operands. Each element is an ASM_INPUT containing a constraint string and whose mode indicates the mode of the input operand. 6th is the source line number. */ DEF_RTL_EXPR(ASM_OPERANDS, "asm_operands", "ssiEEi", RTX_EXTRA) #else /* An assembler instruction with operands. 1st operand is the instruction template. 2nd operand is the constraint for the output. 3rd operand is the number of the output this expression refers to. When an insn stores more than one value, a separate ASM_OPERANDS is made for each output; this integer distinguishes them. 4th is a vector of values of input operands. 5th is a vector of modes and constraints for the input operands. Each element is an ASM_INPUT containing a constraint string and whose mode indicates the mode of the input operand. 6th is the name of the containing source file. 7th is the source line number. */ DEF_RTL_EXPR(ASM_OPERANDS, "asm_operands", "ssiEEsi", RTX_EXTRA) #endif /* A machine-specific operation. 1st operand is a vector of operands being used by the operation so that any needed reloads can be done. 2nd operand is a unique value saying which of a number of machine-specific operations is to be performed. (Note that the vector must be the first operand because of the way that genrecog.c record positions within an insn.) This can occur all by itself in a PATTERN, as a component of a PARALLEL, or inside an expression. */ DEF_RTL_EXPR(UNSPEC, "unspec", "Ei", RTX_EXTRA) /* Similar, but a volatile operation and one which may trap. */ DEF_RTL_EXPR(UNSPEC_VOLATILE, "unspec_volatile", "Ei", RTX_EXTRA) /* Vector of addresses, stored as full words. */ /* Each element is a LABEL_REF to a CODE_LABEL whose address we want. */ DEF_RTL_EXPR(ADDR_VEC, "addr_vec", "E", RTX_EXTRA) /* Vector of address differences X0 - BASE, X1 - BASE, ... First operand is BASE; the vector contains the X's. The machine mode of this rtx says how much space to leave for each difference and is adjusted by branch shortening if CASE_VECTOR_SHORTEN_MODE is defined. The third and fourth operands store the target labels with the minimum and maximum addresses respectively. The fifth operand stores flags for use by branch shortening. Set at the start of shorten_branches: min_align: the minimum alignment for any of the target labels. base_after_vec: true iff BASE is after the ADDR_DIFF_VEC. min_after_vec: true iff minimum addr target label is after the ADDR_DIFF_VEC. max_after_vec: true iff maximum addr target label is after the ADDR_DIFF_VEC. min_after_base: true iff minimum address target label is after BASE. max_after_base: true iff maximum address target label is after BASE. Set by the actual branch shortening process: offset_unsigned: true iff offsets have to be treated as unsigned. scale: scaling that is necessary to make offsets fit into the mode. The third, fourth and fifth operands are only valid when CASE_VECTOR_SHORTEN_MODE is defined, and only in an optimizing compilations. */ DEF_RTL_EXPR(ADDR_DIFF_VEC, "addr_diff_vec", "eEee0", RTX_EXTRA) /* Memory prefetch, with attributes supported on some targets. Operand 1 is the address of the memory to fetch. Operand 2 is 1 for a write access, 0 otherwise. Operand 3 is the level of temporal locality; 0 means there is no temporal locality and 1, 2, and 3 are for increasing levels of temporal locality. The attributes specified by operands 2 and 3 are ignored for targets whose prefetch instructions do not support them. */ DEF_RTL_EXPR(PREFETCH, "prefetch", "eee", RTX_EXTRA) /* ---------------------------------------------------------------------- At the top level of an instruction (perhaps under PARALLEL). ---------------------------------------------------------------------- */ /* Assignment. Operand 1 is the location (REG, MEM, PC, CC0 or whatever) assigned to. Operand 2 is the value stored there. ALL assignment must use SET. Instructions that do multiple assignments must use multiple SET, under PARALLEL. */ DEF_RTL_EXPR(SET, "set", "ee", RTX_EXTRA) /* Indicate something is used in a way that we don't want to explain. For example, subroutine calls will use the register in which the static chain is passed. */ DEF_RTL_EXPR(USE, "use", "e", RTX_EXTRA) /* Indicate something is clobbered in a way that we don't want to explain. For example, subroutine calls will clobber some physical registers (the ones that are by convention not saved). */ DEF_RTL_EXPR(CLOBBER, "clobber", "e", RTX_EXTRA) /* Call a subroutine. Operand 1 is the address to call. Operand 2 is the number of arguments. */ DEF_RTL_EXPR(CALL, "call", "ee", RTX_EXTRA) /* Return from a subroutine. */ DEF_RTL_EXPR(RETURN, "return", "", RTX_EXTRA) /* Conditional trap. Operand 1 is the condition. Operand 2 is the trap code. For an unconditional trap, make the condition (const_int 1). */ DEF_RTL_EXPR(TRAP_IF, "trap_if", "ee", RTX_EXTRA) /* Placeholder for _Unwind_Resume before we know if a function call or a branch is needed. Operand 1 is the exception region from which control is flowing. */ DEF_RTL_EXPR(RESX, "resx", "i", RTX_EXTRA) /* ---------------------------------------------------------------------- Primitive values for use in expressions. ---------------------------------------------------------------------- */ /* numeric integer constant */ DEF_RTL_EXPR(CONST_INT, "const_int", "w", RTX_CONST_OBJ) /* numeric floating point constant. Operands hold the value. They are all 'w' and there may be from 2 to 6; see real.h. */ DEF_RTL_EXPR(CONST_DOUBLE, "const_double", CONST_DOUBLE_FORMAT, RTX_CONST_OBJ) /* Describes a vector constant. */ DEF_RTL_EXPR(CONST_VECTOR, "const_vector", "E", RTX_EXTRA) /* String constant. Used only for attributes right now. */ DEF_RTL_EXPR(CONST_STRING, "const_string", "s", RTX_OBJ) /* This is used to encapsulate an expression whose value is constant (such as the sum of a SYMBOL_REF and a CONST_INT) so that it will be recognized as a constant operand rather than by arithmetic instructions. */ DEF_RTL_EXPR(CONST, "const", "e", RTX_CONST_OBJ) /* program counter. Ordinary jumps are represented by a SET whose first operand is (PC). */ DEF_RTL_EXPR(PC, "pc", "", RTX_OBJ) /* Used in the cselib routines to describe a value. */ DEF_RTL_EXPR(VALUE, "value", "0", RTX_OBJ) /* A register. The "operand" is the register number, accessed with the REGNO macro. If this number is less than FIRST_PSEUDO_REGISTER than a hardware register is being referred to. The second operand holds the original register number - this will be different for a pseudo register that got turned into a hard register. This rtx needs to have as many (or more) fields as a MEM, since we can change REG rtx's into MEMs during reload. */ DEF_RTL_EXPR(REG, "reg", "i00", RTX_OBJ) /* A scratch register. This represents a register used only within a single insn. It will be turned into a REG during register allocation or reload unless the constraint indicates that the register won't be needed, in which case it can remain a SCRATCH. This code is marked as having one operand so it can be turned into a REG. */ DEF_RTL_EXPR(SCRATCH, "scratch", "0", RTX_OBJ) /* One word of a multi-word value. The first operand is the complete value; the second says which word. The WORDS_BIG_ENDIAN flag controls whether word number 0 (as numbered in a SUBREG) is the most or least significant word. This is also used to refer to a value in a different machine mode. For example, it can be used to refer to a SImode value as if it were Qimode, or vice versa. Then the word number is always 0. */ DEF_RTL_EXPR(SUBREG, "subreg", "ei", RTX_EXTRA) /* This one-argument rtx is used for move instructions that are guaranteed to alter only the low part of a destination. Thus, (SET (SUBREG:HI (REG...)) (MEM:HI ...)) has an unspecified effect on the high part of REG, but (SET (STRICT_LOW_PART (SUBREG:HI (REG...))) (MEM:HI ...)) is guaranteed to alter only the bits of REG that are in HImode. The actual instruction used is probably the same in both cases, but the register constraints may be tighter when STRICT_LOW_PART is in use. */ DEF_RTL_EXPR(STRICT_LOW_PART, "strict_low_part", "e", RTX_EXTRA) /* (CONCAT a b) represents the virtual concatenation of a and b to make a value that has as many bits as a and b put together. This is used for complex values. Normally it appears only in DECL_RTLs and during RTL generation, but not in the insn chain. */ DEF_RTL_EXPR(CONCAT, "concat", "ee", RTX_OBJ) /* A memory location; operand is the address. The second operand is the alias set to which this MEM belongs. We use `0' instead of `w' for this field so that the field need not be specified in machine descriptions. */ DEF_RTL_EXPR(MEM, "mem", "e0", RTX_OBJ) /* Reference to an assembler label in the code for this function. The operand is a CODE_LABEL found in the insn chain. The unprinted fields 1 and 2 are used in flow.c for the LABEL_NEXTREF and CONTAINING_INSN. */ DEF_RTL_EXPR(LABEL_REF, "label_ref", "u00", RTX_CONST_OBJ) /* Reference to a named label: Operand 0: label name Operand 1: flags (see SYMBOL_FLAG_* in rtl.h) Operand 2: tree from which this symbol is derived, or null. This is either a DECL node, or some kind of constant. */ DEF_RTL_EXPR(SYMBOL_REF, "symbol_ref", "s00", RTX_CONST_OBJ) /* The condition code register is represented, in our imagination, as a register holding a value that can be compared to zero. In fact, the machine has already compared them and recorded the results; but instructions that look at the condition code pretend to be looking at the entire value and comparing it. */ DEF_RTL_EXPR(CC0, "cc0", "", RTX_OBJ) /* ===================================================================== A QUEUED expression really points to a member of the queue of instructions to be output later for postincrement/postdecrement. QUEUED expressions never become part of instructions. When a QUEUED expression would be put into an instruction, instead either the incremented variable or a copy of its previous value is used. Operands are: 0. the variable to be incremented (a REG rtx). 1. the incrementing instruction, or 0 if it hasn't been output yet. 2. A REG rtx for a copy of the old value of the variable, or 0 if none yet. 3. the body to use for the incrementing instruction 4. the next QUEUED expression in the queue. ====================================================================== */ DEF_RTL_EXPR(QUEUED, "queued", "eeeee", RTX_EXTRA) /* ---------------------------------------------------------------------- Expressions for operators in an rtl pattern ---------------------------------------------------------------------- */ /* if_then_else. This is used in representing ordinary conditional jump instructions. Operand: 0: condition 1: then expr 2: else expr */ DEF_RTL_EXPR(IF_THEN_ELSE, "if_then_else", "eee", RTX_TERNARY) /* General conditional. The first operand is a vector composed of pairs of expressions. The first element of each pair is evaluated, in turn. The value of the conditional is the second expression of the first pair whose first expression evaluates nonzero. If none of the expressions is true, the second operand will be used as the value of the conditional. This should be replaced with use of IF_THEN_ELSE. */ DEF_RTL_EXPR(COND, "cond", "Ee", RTX_EXTRA) /* Comparison, produces a condition code result. */ DEF_RTL_EXPR(COMPARE, "compare", "ee", RTX_BIN_ARITH) /* plus */ DEF_RTL_EXPR(PLUS, "plus", "ee", RTX_COMM_ARITH) /* Operand 0 minus operand 1. */ DEF_RTL_EXPR(MINUS, "minus", "ee", RTX_BIN_ARITH) /* Minus operand 0. */ DEF_RTL_EXPR(NEG, "neg", "e", RTX_UNARY) DEF_RTL_EXPR(MULT, "mult", "ee", RTX_COMM_ARITH) /* Operand 0 divided by operand 1. */ DEF_RTL_EXPR(DIV, "div", "ee", RTX_BIN_ARITH) /* Remainder of operand 0 divided by operand 1. */ DEF_RTL_EXPR(MOD, "mod", "ee", RTX_BIN_ARITH) /* Unsigned divide and remainder. */ DEF_RTL_EXPR(UDIV, "udiv", "ee", RTX_BIN_ARITH) DEF_RTL_EXPR(UMOD, "umod", "ee", RTX_BIN_ARITH) /* Bitwise operations. */ DEF_RTL_EXPR(AND, "and", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(IOR, "ior", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(XOR, "xor", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(NOT, "not", "e", RTX_UNARY) /* Operand: 0: value to be shifted. 1: number of bits. */ DEF_RTL_EXPR(ASHIFT, "ashift", "ee", RTX_BIN_ARITH) /* shift left */ DEF_RTL_EXPR(ROTATE, "rotate", "ee", RTX_BIN_ARITH) /* rotate left */ DEF_RTL_EXPR(ASHIFTRT, "ashiftrt", "ee", RTX_BIN_ARITH) /* arithmetic shift right */ DEF_RTL_EXPR(LSHIFTRT, "lshiftrt", "ee", RTX_BIN_ARITH) /* logical shift right */ DEF_RTL_EXPR(ROTATERT, "rotatert", "ee", RTX_BIN_ARITH) /* rotate right */ /* Minimum and maximum values of two operands. We need both signed and unsigned forms. (We cannot use MIN for SMIN because it conflicts with a macro of the same name.) */ DEF_RTL_EXPR(SMIN, "smin", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(SMAX, "smax", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(UMIN, "umin", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(UMAX, "umax", "ee", RTX_COMM_ARITH) /* These unary operations are used to represent incrementation and decrementation as they occur in memory addresses. The amount of increment or decrement are not represented because they can be understood from the machine-mode of the containing MEM. These operations exist in only two cases: 1. pushes onto the stack. 2. created automatically by the life_analysis pass in flow.c. */ DEF_RTL_EXPR(PRE_DEC, "pre_dec", "e", RTX_AUTOINC) DEF_RTL_EXPR(PRE_INC, "pre_inc", "e", RTX_AUTOINC) DEF_RTL_EXPR(POST_DEC, "post_dec", "e", RTX_AUTOINC) DEF_RTL_EXPR(POST_INC, "post_inc", "e", RTX_AUTOINC) /* These binary operations are used to represent generic address side-effects in memory addresses, except for simple incrementation or decrementation which use the above operations. They are created automatically by the life_analysis pass in flow.c. The first operand is a REG which is used as the address. The second operand is an expression that is assigned to the register, either before (PRE_MODIFY) or after (POST_MODIFY) evaluating the address. Currently, the compiler can only handle second operands of the form (plus (reg) (reg)) and (plus (reg) (const_int)), where the first operand of the PLUS has to be the same register as the first operand of the *_MODIFY. */ DEF_RTL_EXPR(PRE_MODIFY, "pre_modify", "ee", RTX_AUTOINC) DEF_RTL_EXPR(POST_MODIFY, "post_modify", "ee", RTX_AUTOINC) /* Comparison operations. The ordered comparisons exist in two flavors, signed and unsigned. */ DEF_RTL_EXPR(NE, "ne", "ee", RTX_COMM_COMPARE) DEF_RTL_EXPR(EQ, "eq", "ee", RTX_COMM_COMPARE) DEF_RTL_EXPR(GE, "ge", "ee", RTX_COMPARE) DEF_RTL_EXPR(GT, "gt", "ee", RTX_COMPARE) DEF_RTL_EXPR(LE, "le", "ee", RTX_COMPARE) DEF_RTL_EXPR(LT, "lt", "ee", RTX_COMPARE) DEF_RTL_EXPR(GEU, "geu", "ee", RTX_COMPARE) DEF_RTL_EXPR(GTU, "gtu", "ee", RTX_COMPARE) DEF_RTL_EXPR(LEU, "leu", "ee", RTX_COMPARE) DEF_RTL_EXPR(LTU, "ltu", "ee", RTX_COMPARE) /* Additional floating point unordered comparison flavors. */ DEF_RTL_EXPR(UNORDERED, "unordered", "ee", RTX_COMM_COMPARE) DEF_RTL_EXPR(ORDERED, "ordered", "ee", RTX_COMM_COMPARE) /* These are equivalent to unordered or ... */ DEF_RTL_EXPR(UNEQ, "uneq", "ee", RTX_COMM_COMPARE) DEF_RTL_EXPR(UNGE, "unge", "ee", RTX_COMPARE) DEF_RTL_EXPR(UNGT, "ungt", "ee", RTX_COMPARE) DEF_RTL_EXPR(UNLE, "unle", "ee", RTX_COMPARE) DEF_RTL_EXPR(UNLT, "unlt", "ee", RTX_COMPARE) /* This is an ordered NE, ie !UNEQ, ie false for NaN. */ DEF_RTL_EXPR(LTGT, "ltgt", "ee", RTX_COMM_COMPARE) /* Represents the result of sign-extending the sole operand. The machine modes of the operand and of the SIGN_EXTEND expression determine how much sign-extension is going on. */ DEF_RTL_EXPR(SIGN_EXTEND, "sign_extend", "e", RTX_UNARY) /* Similar for zero-extension (such as unsigned short to int). */ DEF_RTL_EXPR(ZERO_EXTEND, "zero_extend", "e", RTX_UNARY) /* Similar but here the operand has a wider mode. */ DEF_RTL_EXPR(TRUNCATE, "truncate", "e", RTX_UNARY) /* Similar for extending floating-point values (such as SFmode to DFmode). */ DEF_RTL_EXPR(FLOAT_EXTEND, "float_extend", "e", RTX_UNARY) DEF_RTL_EXPR(FLOAT_TRUNCATE, "float_truncate", "e", RTX_UNARY) /* Conversion of fixed point operand to floating point value. */ DEF_RTL_EXPR(FLOAT, "float", "e", RTX_UNARY) /* With fixed-point machine mode: Conversion of floating point operand to fixed point value. Value is defined only when the operand's value is an integer. With floating-point machine mode (and operand with same mode): Operand is rounded toward zero to produce an integer value represented in floating point. */ DEF_RTL_EXPR(FIX, "fix", "e", RTX_UNARY) /* Conversion of unsigned fixed point operand to floating point value. */ DEF_RTL_EXPR(UNSIGNED_FLOAT, "unsigned_float", "e", RTX_UNARY) /* With fixed-point machine mode: Conversion of floating point operand to *unsigned* fixed point value. Value is defined only when the operand's value is an integer. */ DEF_RTL_EXPR(UNSIGNED_FIX, "unsigned_fix", "e", RTX_UNARY) /* Absolute value */ DEF_RTL_EXPR(ABS, "abs", "e", RTX_UNARY) /* Square root */ DEF_RTL_EXPR(SQRT, "sqrt", "e", RTX_UNARY) /* Find first bit that is set. Value is 1 + number of trailing zeros in the arg., or 0 if arg is 0. */ DEF_RTL_EXPR(FFS, "ffs", "e", RTX_UNARY) /* Count leading zeros. */ DEF_RTL_EXPR(CLZ, "clz", "e", RTX_UNARY) /* Count trailing zeros. */ DEF_RTL_EXPR(CTZ, "ctz", "e", RTX_UNARY) /* Population count (number of 1 bits). */ DEF_RTL_EXPR(POPCOUNT, "popcount", "e", RTX_UNARY) /* Population parity (number of 1 bits modulo 2). */ DEF_RTL_EXPR(PARITY, "parity", "e", RTX_UNARY) /* Reference to a signed bit-field of specified size and position. Operand 0 is the memory unit (usually SImode or QImode) which contains the field's first bit. Operand 1 is the width, in bits. Operand 2 is the number of bits in the memory unit before the first bit of this field. If BITS_BIG_ENDIAN is defined, the first bit is the msb and operand 2 counts from the msb of the memory unit. Otherwise, the first bit is the lsb and operand 2 counts from the lsb of the memory unit. */ DEF_RTL_EXPR(SIGN_EXTRACT, "sign_extract", "eee", RTX_BITFIELD_OPS) /* Similar for unsigned bit-field. */ DEF_RTL_EXPR(ZERO_EXTRACT, "zero_extract", "eee", RTX_BITFIELD_OPS) /* For RISC machines. These save memory when splitting insns. */ /* HIGH are the high-order bits of a constant expression. */ DEF_RTL_EXPR(HIGH, "high", "e", RTX_CONST_OBJ) /* LO_SUM is the sum of a register and the low-order bits of a constant expression. */ DEF_RTL_EXPR(LO_SUM, "lo_sum", "ee", RTX_OBJ) /* Header for range information. Operand 0 is the NOTE_INSN_RANGE_BEG insn. Operand 1 is the NOTE_INSN_RANGE_END insn. Operand 2 is a vector of all of the registers that can be substituted within this range. Operand 3 is the number of calls in the range. Operand 4 is the number of insns in the range. Operand 5 is the unique range number for this range. Operand 6 is the basic block # of the start of the live range. Operand 7 is the basic block # of the end of the live range. Operand 8 is the loop depth. Operand 9 is a bitmap of the registers live at the start of the range. Operand 10 is a bitmap of the registers live at the end of the range. Operand 11 is marker number for the start of the range. Operand 12 is the marker number for the end of the range. */ DEF_RTL_EXPR(RANGE_INFO, "range_info", "uuEiiiiiibbii", RTX_EXTRA) /* Registers that can be substituted within the range. Operand 0 is the original pseudo register number. Operand 1 will be filled in with the pseudo register the value is copied for the duration of the range. Operand 2 is the number of references within the range to the register. Operand 3 is the number of sets or clobbers of the register in the range. Operand 4 is the number of deaths the register has. Operand 5 is the copy flags that give the status of whether a copy is needed from the original register to the new register at the beginning of the range, or whether a copy from the new register back to the original at the end of the range. Operand 6 is the live length. Operand 7 is the number of calls that this register is live across. Operand 8 is the symbol node of the variable if the register is a user variable. Operand 9 is the block node that the variable is declared in if the register is a user variable. */ DEF_RTL_EXPR(RANGE_REG, "range_reg", "iiiiiiiitt", RTX_EXTRA) /* Information about a local variable's ranges. Operand 0 is an EXPR_LIST of the different ranges a variable is in where it is copied to a different pseudo register. Operand 1 is the block that the variable is declared in. Operand 2 is the number of distinct ranges. */ DEF_RTL_EXPR(RANGE_VAR, "range_var", "eti", RTX_EXTRA) /* Information about the registers that are live at the current point. Operand 0 is the live bitmap. Operand 1 is the original block number. */ DEF_RTL_EXPR(RANGE_LIVE, "range_live", "bi", RTX_EXTRA) /* Describes a merge operation between two vector values. Operands 0 and 1 are the vectors to be merged, operand 2 is a bitmask that specifies where the parts of the result are taken from. Set bits indicate operand 0, clear bits indicate operand 1. The parts are defined by the mode of the vectors. */ DEF_RTL_EXPR(VEC_MERGE, "vec_merge", "eee", RTX_TERNARY) /* Describes an operation that selects parts of a vector. Operands 0 is the source vector, operand 1 is a PARALLEL that contains a CONST_INT for each of the subparts of the result vector, giving the number of the source subpart that should be stored into it. */ DEF_RTL_EXPR(VEC_SELECT, "vec_select", "ee", RTX_BIN_ARITH) /* Describes a vector concat operation. Operands 0 and 1 are the source vectors, the result is a vector that is as long as operands 0 and 1 combined and is the concatenation of the two source vectors. */ DEF_RTL_EXPR(VEC_CONCAT, "vec_concat", "ee", RTX_BIN_ARITH) /* Describes an operation that converts a small vector into a larger one by duplicating the input values. The output vector mode must have the same submodes as the input vector mode, and the number of output parts must be an integer multiple of the number of input parts. */ DEF_RTL_EXPR(VEC_DUPLICATE, "vec_duplicate", "e", RTX_UNARY) /* Addition with signed saturation */ DEF_RTL_EXPR(SS_PLUS, "ss_plus", "ee", RTX_COMM_ARITH) /* Addition with unsigned saturation */ DEF_RTL_EXPR(US_PLUS, "us_plus", "ee", RTX_COMM_ARITH) /* Operand 0 minus operand 1, with signed saturation. */ DEF_RTL_EXPR(SS_MINUS, "ss_minus", "ee", RTX_BIN_ARITH) /* Operand 0 minus operand 1, with unsigned saturation. */ DEF_RTL_EXPR(US_MINUS, "us_minus", "ee", RTX_BIN_ARITH) /* Signed saturating truncate. */ DEF_RTL_EXPR(SS_TRUNCATE, "ss_truncate", "e", RTX_UNARY) /* Unsigned saturating truncate. */ DEF_RTL_EXPR(US_TRUNCATE, "us_truncate", "e", RTX_UNARY) /* Information about the variable and its location. */ DEF_RTL_EXPR(VAR_LOCATION, "var_location", "te", RTX_EXTRA) /* Local variables: mode:c End: */ }; #undef DEF_RTL_EXPR /* Indexed by rtx code, gives the name of that kind of rtx, as a C string. */ #define DEF_RTL_EXPR(ENUM, NAME, FORMAT, CLASS) NAME , const char * const rtx_name[NUM_RTX_CODE] = { /* This file contains the definitions and documentation for the Register Transfer Expressions (rtx's) that make up the Register Transfer Language (rtl) used in the Back End of the GNU compiler. Copyright (C) 1987, 1988, 1992, 1994, 1995, 1997, 1998, 1999, 2000, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Expression definitions and descriptions for all targets are in this file. Some will not be used for some targets. The fields in the cpp macro call "DEF_RTL_EXPR()" are used to create declarations in the C source of the compiler. The fields are: 1. The internal name of the rtx used in the C source. It is a tag in the enumeration "enum rtx_code" defined in "rtl.h". By convention these are in UPPER_CASE. 2. The name of the rtx in the external ASCII format read by read_rtx(), and printed by print_rtx(). These names are stored in rtx_name[]. By convention these are the internal (field 1) names in lower_case. 3. The print format, and type of each rtx->u.fld[] (field) in this rtx. These formats are stored in rtx_format[]. The meaning of the formats is documented in front of this array in rtl.c 4. The class of the rtx. These are stored in rtx_class and are accessed via the GET_RTX_CLASS macro. They are defined as follows: RTX_CONST_OBJ an rtx code that can be used to represent a constant object (e.g, CONST_INT) RTX_OBJ an rtx code that can be used to represent an object (e.g, REG, MEM) RTX_COMPARE an rtx code for a comparison (e.g, LT, GT) RTX_COMM_COMPARE an rtx code for a commutative comparison (e.g, EQ, NE, ORDERED) RTX_UNARY an rtx code for a unary arithmetic expression (e.g, NEG, NOT) RTX_COMM_ARITH an rtx code for a commutative binary operation (e.g,, PLUS, MULT) RTX_TERNARY an rtx code for a non-bitfield three input operation (IF_THEN_ELSE) RTX_BIN_ARITH an rtx code for a non-commutative binary operation (e.g., MINUS, DIV) RTX_BITFIELD_OPS an rtx code for a bit-field operation (ZERO_EXTRACT, SIGN_EXTRACT) RTX_INSN an rtx code for a machine insn (INSN, JUMP_INSN, CALL_INSN) RTX_MATCH an rtx code for something that matches in insns (e.g, MATCH_DUP) RTX_AUTOINC an rtx code for autoincrement addressing modes (e.g. POST_DEC) RTX_EXTRA everything else */ /* --------------------------------------------------------------------- Expressions (and "meta" expressions) used for structuring the rtl representation of a program. --------------------------------------------------------------------- */ /* an expression code name unknown to the reader */ DEF_RTL_EXPR(UNKNOWN, "UnKnown", "*", RTX_EXTRA) /* (NIL) is used by rtl reader and printer to represent a null pointer. */ DEF_RTL_EXPR(NIL, "nil", "*", RTX_EXTRA) /* include a file */ DEF_RTL_EXPR(INCLUDE, "include", "s", RTX_EXTRA) /* --------------------------------------------------------------------- Expressions used in constructing lists. --------------------------------------------------------------------- */ /* a linked list of expressions */ DEF_RTL_EXPR(EXPR_LIST, "expr_list", "ee", RTX_EXTRA) /* a linked list of instructions. The insns are represented in print by their uids. */ DEF_RTL_EXPR(INSN_LIST, "insn_list", "ue", RTX_EXTRA) /* ---------------------------------------------------------------------- Expression types for machine descriptions. These do not appear in actual rtl code in the compiler. ---------------------------------------------------------------------- */ /* Appears only in machine descriptions. Means use the function named by the second arg (the string) as a predicate; if matched, store the structure that was matched in the operand table at index specified by the first arg (the integer). If the second arg is the null string, the structure is just stored. A third string argument indicates to the register allocator restrictions on where the operand can be allocated. If the target needs no restriction on any instruction this field should be the null string. The string is prepended by: '=' to indicate the operand is only written to. '+' to indicate the operand is both read and written to. Each character in the string represents an allocable class for an operand. 'g' indicates the operand can be any valid class. 'i' indicates the operand can be immediate (in the instruction) data. 'r' indicates the operand can be in a register. 'm' indicates the operand can be in memory. 'o' a subset of the 'm' class. Those memory addressing modes that can be offset at compile time (have a constant added to them). Other characters indicate target dependent operand classes and are described in each target's machine description. For instructions with more than one operand, sets of classes can be separated by a comma to indicate the appropriate multi-operand constraints. There must be a 1 to 1 correspondence between these sets of classes in all operands for an instruction. */ DEF_RTL_EXPR(MATCH_OPERAND, "match_operand", "iss", RTX_MATCH) /* Appears only in machine descriptions. Means match a SCRATCH or a register. When used to generate rtl, a SCRATCH is generated. As for MATCH_OPERAND, the mode specifies the desired mode and the first argument is the operand number. The second argument is the constraint. */ DEF_RTL_EXPR(MATCH_SCRATCH, "match_scratch", "is", RTX_MATCH) /* Appears only in machine descriptions. Means match only something equal to what is stored in the operand table at the index specified by the argument. */ DEF_RTL_EXPR(MATCH_DUP, "match_dup", "i", RTX_MATCH) /* Appears only in machine descriptions. Means apply a predicate, AND match recursively the operands of the rtx. Operand 0 is the operand-number, as in match_operand. Operand 1 is a predicate to apply (as a string, a function name). Operand 2 is a vector of expressions, each of which must match one subexpression of the rtx this construct is matching. */ DEF_RTL_EXPR(MATCH_OPERATOR, "match_operator", "isE", RTX_MATCH) /* Appears only in machine descriptions. Means to match a PARALLEL of arbitrary length. The predicate is applied to the PARALLEL and the initial expressions in the PARALLEL are matched. Operand 0 is the operand-number, as in match_operand. Operand 1 is a predicate to apply to the PARALLEL. Operand 2 is a vector of expressions, each of which must match the corresponding element in the PARALLEL. */ DEF_RTL_EXPR(MATCH_PARALLEL, "match_parallel", "isE", RTX_MATCH) /* Appears only in machine descriptions. Means match only something equal to what is stored in the operand table at the index specified by the argument. For MATCH_OPERATOR. */ DEF_RTL_EXPR(MATCH_OP_DUP, "match_op_dup", "iE", RTX_MATCH) /* Appears only in machine descriptions. Means match only something equal to what is stored in the operand table at the index specified by the argument. For MATCH_PARALLEL. */ DEF_RTL_EXPR(MATCH_PAR_DUP, "match_par_dup", "iE", RTX_MATCH) /* Appears only in machine descriptions. Defines the pattern for one kind of instruction. Operand: 0: names this instruction. If the name is the null string, the instruction is in the machine description just to be recognized, and will never be emitted by the tree to rtl expander. 1: is the pattern. 2: is a string which is a C expression giving an additional condition for recognizing this pattern. A null string means no extra condition. 3: is the action to execute if this pattern is matched. If this assembler code template starts with a * then it is a fragment of C code to run to decide on a template to use. Otherwise, it is the template to use. 4: optionally, a vector of attributes for this insn. */ DEF_RTL_EXPR(DEFINE_INSN, "define_insn", "sEsTV", RTX_EXTRA) /* Definition of a peephole optimization. 1st operand: vector of insn patterns to match 2nd operand: C expression that must be true 3rd operand: template or C code to produce assembler output. 4: optionally, a vector of attributes for this insn. */ DEF_RTL_EXPR(DEFINE_PEEPHOLE, "define_peephole", "EsTV", RTX_EXTRA) /* Definition of a split operation. 1st operand: insn pattern to match 2nd operand: C expression that must be true 3rd operand: vector of insn patterns to place into a SEQUENCE 4th operand: optionally, some C code to execute before generating the insns. This might, for example, create some RTX's and store them in elements of `recog_data.operand' for use by the vector of insn-patterns. (`operands' is an alias here for `recog_data.operand'). */ DEF_RTL_EXPR(DEFINE_SPLIT, "define_split", "EsES", RTX_EXTRA) /* Definition of an insn and associated split. This is the concatenation, with a few modifications, of a define_insn and a define_split which share the same pattern. Operand: 0: names this instruction. If the name is the null string, the instruction is in the machine description just to be recognized, and will never be emitted by the tree to rtl expander. 1: is the pattern. 2: is a string which is a C expression giving an additional condition for recognizing this pattern. A null string means no extra condition. 3: is the action to execute if this pattern is matched. If this assembler code template starts with a * then it is a fragment of C code to run to decide on a template to use. Otherwise, it is the template to use. 4: C expression that must be true for split. This may start with "&&" in which case the split condition is the logical and of the insn condition and what follows the "&&" of this operand. 5: vector of insn patterns to place into a SEQUENCE 6: optionally, some C code to execute before generating the insns. This might, for example, create some RTX's and store them in elements of `recog_data.operand' for use by the vector of insn-patterns. (`operands' is an alias here for `recog_data.operand'). 7: optionally, a vector of attributes for this insn. */ DEF_RTL_EXPR(DEFINE_INSN_AND_SPLIT, "define_insn_and_split", "sEsTsESV", RTX_EXTRA) /* Definition of an RTL peephole operation. Follows the same arguments as define_split. */ DEF_RTL_EXPR(DEFINE_PEEPHOLE2, "define_peephole2", "EsES", RTX_EXTRA) /* Define how to generate multiple insns for a standard insn name. 1st operand: the insn name. 2nd operand: vector of insn-patterns. Use match_operand to substitute an element of `recog_data.operand'. 3rd operand: C expression that must be true for this to be available. This may not test any operands. 4th operand: Extra C code to execute before generating the insns. This might, for example, create some RTX's and store them in elements of `recog_data.operand' for use by the vector of insn-patterns. (`operands' is an alias here for `recog_data.operand'). */ DEF_RTL_EXPR(DEFINE_EXPAND, "define_expand", "sEss", RTX_EXTRA) /* Define a requirement for delay slots. 1st operand: Condition involving insn attributes that, if true, indicates that the insn requires the number of delay slots shown. 2nd operand: Vector whose length is the three times the number of delay slots required. Each entry gives three conditions, each involving attributes. The first must be true for an insn to occupy that delay slot location. The second is true for all insns that can be annulled if the branch is true and the third is true for all insns that can be annulled if the branch is false. Multiple DEFINE_DELAYs may be present. They indicate differing requirements for delay slots. */ DEF_RTL_EXPR(DEFINE_DELAY, "define_delay", "eE", RTX_EXTRA) /* Define a set of insns that requires a function unit. This means that these insns produce their result after a delay and that there may be restrictions on the number of insns of this type that can be scheduled simultaneously. More than one DEFINE_FUNCTION_UNIT can be specified for a function unit. Each gives a set of operations and associated delays. The first three operands must be the same for each operation for the same function unit. All delays are specified in cycles. 1st operand: Name of function unit (mostly for documentation) 2nd operand: Number of identical function units in CPU 3rd operand: Total number of simultaneous insns that can execute on this function unit; 0 if unlimited. 4th operand: Condition involving insn attribute, that, if true, specifies those insns that this expression applies to. 5th operand: Constant delay after which insn result will be available. 6th operand: Delay until next insn can be scheduled on the function unit executing this operation. The meaning depends on whether or not the next operand is supplied. 7th operand: If this operand is not specified, the 6th operand gives the number of cycles after the instruction matching the 4th operand begins using the function unit until a subsequent insn can begin. A value of zero should be used for a unit with no issue constraints. If only one operation can be executed a time and the unit is busy for the entire time, the 3rd operand should be specified as 1, the 6th operand should be specified as 0, and the 7th operand should not be specified. If this operand is specified, it is a list of attribute expressions. If an insn for which any of these expressions is true is currently executing on the function unit, the issue delay will be given by the 6th operand. Otherwise, the insn can be immediately scheduled (subject to the limit on the number of simultaneous operations executing on the unit.) */ DEF_RTL_EXPR(DEFINE_FUNCTION_UNIT, "define_function_unit", "siieiiV", RTX_EXTRA) /* Define attribute computation for `asm' instructions. */ DEF_RTL_EXPR(DEFINE_ASM_ATTRIBUTES, "define_asm_attributes", "V", RTX_EXTRA) /* Definition of a conditional execution meta operation. Automatically generates new instances of DEFINE_INSN, selected by having attribute "predicable" true. The new pattern will contain a COND_EXEC and the predicate at top-level. Operand: 0: The predicate pattern. The top-level form should match a relational operator. Operands should have only one alternative. 1: A C expression giving an additional condition for recognizing the generated pattern. 2: A template or C code to produce assembler output. */ DEF_RTL_EXPR(DEFINE_COND_EXEC, "define_cond_exec", "Ess", RTX_EXTRA) /* SEQUENCE appears in the result of a `gen_...' function for a DEFINE_EXPAND that wants to make several insns. Its elements are the bodies of the insns that should be made. `emit_insn' takes the SEQUENCE apart and makes separate insns. */ DEF_RTL_EXPR(SEQUENCE, "sequence", "E", RTX_EXTRA) /* Refers to the address of its argument. This is only used in alias.c. */ DEF_RTL_EXPR(ADDRESS, "address", "e", RTX_MATCH) /* ---------------------------------------------------------------------- Constructions for CPU pipeline description described by NDFAs. These do not appear in actual rtl code in the compiler. ---------------------------------------------------------------------- */ /* (define_cpu_unit string [string]) describes cpu functional units (separated by comma). 1st operand: Names of cpu functional units. 2nd operand: Name of automaton (see comments for DEFINE_AUTOMATON). All define_reservations, define_cpu_units, and define_query_cpu_units should have unique names which may not be "nothing". */ DEF_RTL_EXPR(DEFINE_CPU_UNIT, "define_cpu_unit", "sS", RTX_EXTRA) /* (define_query_cpu_unit string [string]) describes cpu functional units analogously to define_cpu_unit. The reservation of such units can be queried for automaton state. */ DEF_RTL_EXPR(DEFINE_QUERY_CPU_UNIT, "define_query_cpu_unit", "sS", RTX_EXTRA) /* (exclusion_set string string) means that each CPU functional unit in the first string can not be reserved simultaneously with any unit whose name is in the second string and vise versa. CPU units in the string are separated by commas. For example, it is useful for description CPU with fully pipelined floating point functional unit which can execute simultaneously only single floating point insns or only double floating point insns. All CPU functional units in a set should belong to the same automaton. */ DEF_RTL_EXPR(EXCLUSION_SET, "exclusion_set", "ss", RTX_EXTRA) /* (presence_set string string) means that each CPU functional unit in the first string can not be reserved unless at least one of pattern of units whose names are in the second string is reserved. This is an asymmetric relation. CPU units or unit patterns in the strings are separated by commas. Pattern is one unit name or unit names separated by white-spaces. For example, it is useful for description that slot1 is reserved after slot0 reservation for a VLIW processor. We could describe it by the following construction (presence_set "slot1" "slot0") Or slot1 is reserved only after slot0 and unit b0 reservation. In this case we could write (presence_set "slot1" "slot0 b0") All CPU functional units in a set should belong to the same automaton. */ DEF_RTL_EXPR(PRESENCE_SET, "presence_set", "ss", RTX_EXTRA) /* (final_presence_set string string) is analogous to `presence_set'. The difference between them is when checking is done. When an instruction is issued in given automaton state reflecting all current and planned unit reservations, the automaton state is changed. The first state is a source state, the second one is a result state. Checking for `presence_set' is done on the source state reservation, checking for `final_presence_set' is done on the result reservation. This construction is useful to describe a reservation which is actually two subsequent reservations. For example, if we use (presence_set "slot1" "slot0") the following insn will be never issued (because slot1 requires slot0 which is absent in the source state). (define_reservation "insn_and_nop" "slot0 + slot1") but it can be issued if we use analogous `final_presence_set'. */ DEF_RTL_EXPR(FINAL_PRESENCE_SET, "final_presence_set", "ss", RTX_EXTRA) /* (absence_set string string) means that each CPU functional unit in the first string can be reserved only if each pattern of units whose names are in the second string is not reserved. This is an asymmetric relation (actually exclusion set is analogous to this one but it is symmetric). CPU units or unit patterns in the string are separated by commas. Pattern is one unit name or unit names separated by white-spaces. For example, it is useful for description that slot0 can not be reserved after slot1 or slot2 reservation for a VLIW processor. We could describe it by the following construction (absence_set "slot2" "slot0, slot1") Or slot2 can not be reserved if slot0 and unit b0 are reserved or slot1 and unit b1 are reserved . In this case we could write (absence_set "slot2" "slot0 b0, slot1 b1") All CPU functional units in a set should to belong the same automaton. */ DEF_RTL_EXPR(ABSENCE_SET, "absence_set", "ss", RTX_EXTRA) /* (final_absence_set string string) is analogous to `absence_set' but checking is done on the result (state) reservation. See comments for `final_presence_set'. */ DEF_RTL_EXPR(FINAL_ABSENCE_SET, "final_absence_set", "ss", RTX_EXTRA) /* (define_bypass number out_insn_names in_insn_names) names bypass with given latency (the first number) from insns given by the first string (see define_insn_reservation) into insns given by the second string. Insn names in the strings are separated by commas. The third operand is optional name of function which is additional guard for the bypass. The function will get the two insns as parameters. If the function returns zero the bypass will be ignored for this case. Additional guard is necessary to recognize complicated bypasses, e.g. when consumer is load address. */ DEF_RTL_EXPR(DEFINE_BYPASS, "define_bypass", "issS", RTX_EXTRA) /* (define_automaton string) describes names of automata generated and used for pipeline hazards recognition. The names are separated by comma. Actually it is possibly to generate the single automaton but unfortunately it can be very large. If we use more one automata, the summary size of the automata usually is less than the single one. The automaton name is used in define_cpu_unit and define_query_cpu_unit. All automata should have unique names. */ DEF_RTL_EXPR(DEFINE_AUTOMATON, "define_automaton", "s", RTX_EXTRA) /* (automata_option string) describes option for generation of automata. Currently there are the following options: o "no-minimization" which makes no minimization of automata. This is only worth to do when we are debugging the description and need to look more accurately at reservations of states. o "time" which means printing additional time statistics about generation of automata. o "v" which means generation of file describing the result automata. The file has suffix `.dfa' and can be used for the description verification and debugging. o "w" which means generation of warning instead of error for non-critical errors. o "ndfa" which makes nondeterministic finite state automata. o "progress" which means output of a progress bar showing how many states were generated so far for automaton being processed. */ DEF_RTL_EXPR(AUTOMATA_OPTION, "automata_option", "s", RTX_EXTRA) /* (define_reservation string string) names reservation (the first string) of cpu functional units (the 2nd string). Sometimes unit reservations for different insns contain common parts. In such case, you can describe common part and use its name (the 1st parameter) in regular expression in define_insn_reservation. All define_reservations, define_cpu_units, and define_query_cpu_units should have unique names which may not be "nothing". */ DEF_RTL_EXPR(DEFINE_RESERVATION, "define_reservation", "ss", RTX_EXTRA) /* (define_insn_reservation name default_latency condition regexpr) describes reservation of cpu functional units (the 3nd operand) for instruction which is selected by the condition (the 2nd parameter). The first parameter is used for output of debugging information. The reservations are described by a regular expression according the following syntax: regexp = regexp "," oneof | oneof oneof = oneof "|" allof | allof allof = allof "+" repeat | repeat repeat = element "*" number | element element = cpu_function_unit_name | reservation_name | result_name | "nothing" | "(" regexp ")" 1. "," is used for describing start of the next cycle in reservation. 2. "|" is used for describing the reservation described by the first regular expression *or* the reservation described by the second regular expression *or* etc. 3. "+" is used for describing the reservation described by the first regular expression *and* the reservation described by the second regular expression *and* etc. 4. "*" is used for convenience and simply means sequence in which the regular expression are repeated NUMBER times with cycle advancing (see ","). 5. cpu functional unit name which means its reservation. 6. reservation name -- see define_reservation. 7. string "nothing" means no units reservation. */ DEF_RTL_EXPR(DEFINE_INSN_RESERVATION, "define_insn_reservation", "sies", RTX_EXTRA) /* ---------------------------------------------------------------------- Expressions used for insn attributes. These also do not appear in actual rtl code in the compiler. ---------------------------------------------------------------------- */ /* Definition of an insn attribute. 1st operand: name of the attribute 2nd operand: comma-separated list of possible attribute values 3rd operand: expression for the default value of the attribute. */ DEF_RTL_EXPR(DEFINE_ATTR, "define_attr", "sse", RTX_EXTRA) /* Marker for the name of an attribute. */ DEF_RTL_EXPR(ATTR, "attr", "s", RTX_EXTRA) /* For use in the last (optional) operand of DEFINE_INSN or DEFINE_PEEPHOLE and in DEFINE_ASM_INSN to specify an attribute to assign to insns matching that pattern. (set_attr "name" "value") is equivalent to (set (attr "name") (const_string "value")) */ DEF_RTL_EXPR(SET_ATTR, "set_attr", "ss", RTX_EXTRA) /* In the last operand of DEFINE_INSN and DEFINE_PEEPHOLE, this can be used to specify that attribute values are to be assigned according to the alternative matched. The following three expressions are equivalent: (set (attr "att") (cond [(eq_attrq "alternative" "1") (const_string "a1") (eq_attrq "alternative" "2") (const_string "a2")] (const_string "a3"))) (set_attr_alternative "att" [(const_string "a1") (const_string "a2") (const_string "a3")]) (set_attr "att" "a1,a2,a3") */ DEF_RTL_EXPR(SET_ATTR_ALTERNATIVE, "set_attr_alternative", "sE", RTX_EXTRA) /* A conditional expression true if the value of the specified attribute of the current insn equals the specified value. The first operand is the attribute name and the second is the comparison value. */ DEF_RTL_EXPR(EQ_ATTR, "eq_attr", "ss", RTX_EXTRA) /* A special case of the above representing a set of alternatives. The first operand is bitmap of the set, the second one is the default value. */ DEF_RTL_EXPR(EQ_ATTR_ALT, "eq_attr_alt", "ii", RTX_EXTRA) /* A conditional expression which is true if the specified flag is true for the insn being scheduled in reorg. genattr.c defines the following flags which can be tested by (attr_flag "foo") expressions in eligible_for_delay. forward, backward, very_likely, likely, very_unlikely, and unlikely. */ DEF_RTL_EXPR (ATTR_FLAG, "attr_flag", "s", RTX_EXTRA) /* ---------------------------------------------------------------------- Expression types used for things in the instruction chain. All formats must start with "iuu" to handle the chain. Each insn expression holds an rtl instruction and its semantics during back-end processing. See macros's in "rtl.h" for the meaning of each rtx->u.fld[]. ---------------------------------------------------------------------- */ /* An instruction that cannot jump. */ DEF_RTL_EXPR(INSN, "insn", "iuuBieiee", RTX_INSN) /* An instruction that can possibly jump. Fields ( rtx->u.fld[] ) have exact same meaning as INSN's. */ DEF_RTL_EXPR(JUMP_INSN, "jump_insn", "iuuBieiee0", RTX_INSN) /* An instruction that can possibly call a subroutine but which will not change which instruction comes next in the current function. Field ( rtx->u.fld[9] ) is CALL_INSN_FUNCTION_USAGE. All other fields ( rtx->u.fld[] ) have exact same meaning as INSN's. */ DEF_RTL_EXPR(CALL_INSN, "call_insn", "iuuBieieee", RTX_INSN) /* A marker that indicates that control will not flow through. */ DEF_RTL_EXPR(BARRIER, "barrier", "iuu000000", RTX_EXTRA) /* Holds a label that is followed by instructions. Operand: 4: is used in jump.c for the use-count of the label. 5: is used in flow.c to point to the chain of label_ref's to this label. 6: is a number that is unique in the entire compilation. 7: is the user-given name of the label, if any. */ DEF_RTL_EXPR(CODE_LABEL, "code_label", "iuuB00is", RTX_EXTRA) #ifdef USE_MAPPED_LOCATION /* Say where in the code a source line starts, for symbol table's sake. Operand: 4: unused if line number > 0, note-specific data otherwise. 5: line number if > 0, enum note_insn otherwise. 6: CODE_LABEL_NUMBER if line number == NOTE_INSN_DELETED_LABEL. */ #else /* Say where in the code a source line starts, for symbol table's sake. Operand: 4: filename, if line number > 0, note-specific data otherwise. 5: line number if > 0, enum note_insn otherwise. 6: unique number if line number == note_insn_deleted_label. */ #endif DEF_RTL_EXPR(NOTE, "note", "iuuB0ni", RTX_EXTRA) /* ---------------------------------------------------------------------- Top level constituents of INSN, JUMP_INSN and CALL_INSN. ---------------------------------------------------------------------- */ /* Conditionally execute code. Operand 0 is the condition that if true, the code is executed. Operand 1 is the code to be executed (typically a SET). Semantics are that there are no side effects if the condition is false. This pattern is created automatically by the if_convert pass run after reload or by target-specific splitters. */ DEF_RTL_EXPR(COND_EXEC, "cond_exec", "ee", RTX_EXTRA) /* Several operations to be done in parallel (perhaps under COND_EXEC). */ DEF_RTL_EXPR(PARALLEL, "parallel", "E", RTX_EXTRA) /* A string that is passed through to the assembler as input. One can obviously pass comments through by using the assembler comment syntax. These occur in an insn all by themselves as the PATTERN. They also appear inside an ASM_OPERANDS as a convenient way to hold a string. */ DEF_RTL_EXPR(ASM_INPUT, "asm_input", "s", RTX_EXTRA) #ifdef USE_MAPPED_LOCATION /* An assembler instruction with operands. 1st operand is the instruction template. 2nd operand is the constraint for the output. 3rd operand is the number of the output this expression refers to. When an insn stores more than one value, a separate ASM_OPERANDS is made for each output; this integer distinguishes them. 4th is a vector of values of input operands. 5th is a vector of modes and constraints for the input operands. Each element is an ASM_INPUT containing a constraint string and whose mode indicates the mode of the input operand. 6th is the source line number. */ DEF_RTL_EXPR(ASM_OPERANDS, "asm_operands", "ssiEEi", RTX_EXTRA) #else /* An assembler instruction with operands. 1st operand is the instruction template. 2nd operand is the constraint for the output. 3rd operand is the number of the output this expression refers to. When an insn stores more than one value, a separate ASM_OPERANDS is made for each output; this integer distinguishes them. 4th is a vector of values of input operands. 5th is a vector of modes and constraints for the input operands. Each element is an ASM_INPUT containing a constraint string and whose mode indicates the mode of the input operand. 6th is the name of the containing source file. 7th is the source line number. */ DEF_RTL_EXPR(ASM_OPERANDS, "asm_operands", "ssiEEsi", RTX_EXTRA) #endif /* A machine-specific operation. 1st operand is a vector of operands being used by the operation so that any needed reloads can be done. 2nd operand is a unique value saying which of a number of machine-specific operations is to be performed. (Note that the vector must be the first operand because of the way that genrecog.c record positions within an insn.) This can occur all by itself in a PATTERN, as a component of a PARALLEL, or inside an expression. */ DEF_RTL_EXPR(UNSPEC, "unspec", "Ei", RTX_EXTRA) /* Similar, but a volatile operation and one which may trap. */ DEF_RTL_EXPR(UNSPEC_VOLATILE, "unspec_volatile", "Ei", RTX_EXTRA) /* Vector of addresses, stored as full words. */ /* Each element is a LABEL_REF to a CODE_LABEL whose address we want. */ DEF_RTL_EXPR(ADDR_VEC, "addr_vec", "E", RTX_EXTRA) /* Vector of address differences X0 - BASE, X1 - BASE, ... First operand is BASE; the vector contains the X's. The machine mode of this rtx says how much space to leave for each difference and is adjusted by branch shortening if CASE_VECTOR_SHORTEN_MODE is defined. The third and fourth operands store the target labels with the minimum and maximum addresses respectively. The fifth operand stores flags for use by branch shortening. Set at the start of shorten_branches: min_align: the minimum alignment for any of the target labels. base_after_vec: true iff BASE is after the ADDR_DIFF_VEC. min_after_vec: true iff minimum addr target label is after the ADDR_DIFF_VEC. max_after_vec: true iff maximum addr target label is after the ADDR_DIFF_VEC. min_after_base: true iff minimum address target label is after BASE. max_after_base: true iff maximum address target label is after BASE. Set by the actual branch shortening process: offset_unsigned: true iff offsets have to be treated as unsigned. scale: scaling that is necessary to make offsets fit into the mode. The third, fourth and fifth operands are only valid when CASE_VECTOR_SHORTEN_MODE is defined, and only in an optimizing compilations. */ DEF_RTL_EXPR(ADDR_DIFF_VEC, "addr_diff_vec", "eEee0", RTX_EXTRA) /* Memory prefetch, with attributes supported on some targets. Operand 1 is the address of the memory to fetch. Operand 2 is 1 for a write access, 0 otherwise. Operand 3 is the level of temporal locality; 0 means there is no temporal locality and 1, 2, and 3 are for increasing levels of temporal locality. The attributes specified by operands 2 and 3 are ignored for targets whose prefetch instructions do not support them. */ DEF_RTL_EXPR(PREFETCH, "prefetch", "eee", RTX_EXTRA) /* ---------------------------------------------------------------------- At the top level of an instruction (perhaps under PARALLEL). ---------------------------------------------------------------------- */ /* Assignment. Operand 1 is the location (REG, MEM, PC, CC0 or whatever) assigned to. Operand 2 is the value stored there. ALL assignment must use SET. Instructions that do multiple assignments must use multiple SET, under PARALLEL. */ DEF_RTL_EXPR(SET, "set", "ee", RTX_EXTRA) /* Indicate something is used in a way that we don't want to explain. For example, subroutine calls will use the register in which the static chain is passed. */ DEF_RTL_EXPR(USE, "use", "e", RTX_EXTRA) /* Indicate something is clobbered in a way that we don't want to explain. For example, subroutine calls will clobber some physical registers (the ones that are by convention not saved). */ DEF_RTL_EXPR(CLOBBER, "clobber", "e", RTX_EXTRA) /* Call a subroutine. Operand 1 is the address to call. Operand 2 is the number of arguments. */ DEF_RTL_EXPR(CALL, "call", "ee", RTX_EXTRA) /* Return from a subroutine. */ DEF_RTL_EXPR(RETURN, "return", "", RTX_EXTRA) /* Conditional trap. Operand 1 is the condition. Operand 2 is the trap code. For an unconditional trap, make the condition (const_int 1). */ DEF_RTL_EXPR(TRAP_IF, "trap_if", "ee", RTX_EXTRA) /* Placeholder for _Unwind_Resume before we know if a function call or a branch is needed. Operand 1 is the exception region from which control is flowing. */ DEF_RTL_EXPR(RESX, "resx", "i", RTX_EXTRA) /* ---------------------------------------------------------------------- Primitive values for use in expressions. ---------------------------------------------------------------------- */ /* numeric integer constant */ DEF_RTL_EXPR(CONST_INT, "const_int", "w", RTX_CONST_OBJ) /* numeric floating point constant. Operands hold the value. They are all 'w' and there may be from 2 to 6; see real.h. */ DEF_RTL_EXPR(CONST_DOUBLE, "const_double", CONST_DOUBLE_FORMAT, RTX_CONST_OBJ) /* Describes a vector constant. */ DEF_RTL_EXPR(CONST_VECTOR, "const_vector", "E", RTX_EXTRA) /* String constant. Used only for attributes right now. */ DEF_RTL_EXPR(CONST_STRING, "const_string", "s", RTX_OBJ) /* This is used to encapsulate an expression whose value is constant (such as the sum of a SYMBOL_REF and a CONST_INT) so that it will be recognized as a constant operand rather than by arithmetic instructions. */ DEF_RTL_EXPR(CONST, "const", "e", RTX_CONST_OBJ) /* program counter. Ordinary jumps are represented by a SET whose first operand is (PC). */ DEF_RTL_EXPR(PC, "pc", "", RTX_OBJ) /* Used in the cselib routines to describe a value. */ DEF_RTL_EXPR(VALUE, "value", "0", RTX_OBJ) /* A register. The "operand" is the register number, accessed with the REGNO macro. If this number is less than FIRST_PSEUDO_REGISTER than a hardware register is being referred to. The second operand holds the original register number - this will be different for a pseudo register that got turned into a hard register. This rtx needs to have as many (or more) fields as a MEM, since we can change REG rtx's into MEMs during reload. */ DEF_RTL_EXPR(REG, "reg", "i00", RTX_OBJ) /* A scratch register. This represents a register used only within a single insn. It will be turned into a REG during register allocation or reload unless the constraint indicates that the register won't be needed, in which case it can remain a SCRATCH. This code is marked as having one operand so it can be turned into a REG. */ DEF_RTL_EXPR(SCRATCH, "scratch", "0", RTX_OBJ) /* One word of a multi-word value. The first operand is the complete value; the second says which word. The WORDS_BIG_ENDIAN flag controls whether word number 0 (as numbered in a SUBREG) is the most or least significant word. This is also used to refer to a value in a different machine mode. For example, it can be used to refer to a SImode value as if it were Qimode, or vice versa. Then the word number is always 0. */ DEF_RTL_EXPR(SUBREG, "subreg", "ei", RTX_EXTRA) /* This one-argument rtx is used for move instructions that are guaranteed to alter only the low part of a destination. Thus, (SET (SUBREG:HI (REG...)) (MEM:HI ...)) has an unspecified effect on the high part of REG, but (SET (STRICT_LOW_PART (SUBREG:HI (REG...))) (MEM:HI ...)) is guaranteed to alter only the bits of REG that are in HImode. The actual instruction used is probably the same in both cases, but the register constraints may be tighter when STRICT_LOW_PART is in use. */ DEF_RTL_EXPR(STRICT_LOW_PART, "strict_low_part", "e", RTX_EXTRA) /* (CONCAT a b) represents the virtual concatenation of a and b to make a value that has as many bits as a and b put together. This is used for complex values. Normally it appears only in DECL_RTLs and during RTL generation, but not in the insn chain. */ DEF_RTL_EXPR(CONCAT, "concat", "ee", RTX_OBJ) /* A memory location; operand is the address. The second operand is the alias set to which this MEM belongs. We use `0' instead of `w' for this field so that the field need not be specified in machine descriptions. */ DEF_RTL_EXPR(MEM, "mem", "e0", RTX_OBJ) /* Reference to an assembler label in the code for this function. The operand is a CODE_LABEL found in the insn chain. The unprinted fields 1 and 2 are used in flow.c for the LABEL_NEXTREF and CONTAINING_INSN. */ DEF_RTL_EXPR(LABEL_REF, "label_ref", "u00", RTX_CONST_OBJ) /* Reference to a named label: Operand 0: label name Operand 1: flags (see SYMBOL_FLAG_* in rtl.h) Operand 2: tree from which this symbol is derived, or null. This is either a DECL node, or some kind of constant. */ DEF_RTL_EXPR(SYMBOL_REF, "symbol_ref", "s00", RTX_CONST_OBJ) /* The condition code register is represented, in our imagination, as a register holding a value that can be compared to zero. In fact, the machine has already compared them and recorded the results; but instructions that look at the condition code pretend to be looking at the entire value and comparing it. */ DEF_RTL_EXPR(CC0, "cc0", "", RTX_OBJ) /* ===================================================================== A QUEUED expression really points to a member of the queue of instructions to be output later for postincrement/postdecrement. QUEUED expressions never become part of instructions. When a QUEUED expression would be put into an instruction, instead either the incremented variable or a copy of its previous value is used. Operands are: 0. the variable to be incremented (a REG rtx). 1. the incrementing instruction, or 0 if it hasn't been output yet. 2. A REG rtx for a copy of the old value of the variable, or 0 if none yet. 3. the body to use for the incrementing instruction 4. the next QUEUED expression in the queue. ====================================================================== */ DEF_RTL_EXPR(QUEUED, "queued", "eeeee", RTX_EXTRA) /* ---------------------------------------------------------------------- Expressions for operators in an rtl pattern ---------------------------------------------------------------------- */ /* if_then_else. This is used in representing ordinary conditional jump instructions. Operand: 0: condition 1: then expr 2: else expr */ DEF_RTL_EXPR(IF_THEN_ELSE, "if_then_else", "eee", RTX_TERNARY) /* General conditional. The first operand is a vector composed of pairs of expressions. The first element of each pair is evaluated, in turn. The value of the conditional is the second expression of the first pair whose first expression evaluates nonzero. If none of the expressions is true, the second operand will be used as the value of the conditional. This should be replaced with use of IF_THEN_ELSE. */ DEF_RTL_EXPR(COND, "cond", "Ee", RTX_EXTRA) /* Comparison, produces a condition code result. */ DEF_RTL_EXPR(COMPARE, "compare", "ee", RTX_BIN_ARITH) /* plus */ DEF_RTL_EXPR(PLUS, "plus", "ee", RTX_COMM_ARITH) /* Operand 0 minus operand 1. */ DEF_RTL_EXPR(MINUS, "minus", "ee", RTX_BIN_ARITH) /* Minus operand 0. */ DEF_RTL_EXPR(NEG, "neg", "e", RTX_UNARY) DEF_RTL_EXPR(MULT, "mult", "ee", RTX_COMM_ARITH) /* Operand 0 divided by operand 1. */ DEF_RTL_EXPR(DIV, "div", "ee", RTX_BIN_ARITH) /* Remainder of operand 0 divided by operand 1. */ DEF_RTL_EXPR(MOD, "mod", "ee", RTX_BIN_ARITH) /* Unsigned divide and remainder. */ DEF_RTL_EXPR(UDIV, "udiv", "ee", RTX_BIN_ARITH) DEF_RTL_EXPR(UMOD, "umod", "ee", RTX_BIN_ARITH) /* Bitwise operations. */ DEF_RTL_EXPR(AND, "and", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(IOR, "ior", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(XOR, "xor", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(NOT, "not", "e", RTX_UNARY) /* Operand: 0: value to be shifted. 1: number of bits. */ DEF_RTL_EXPR(ASHIFT, "ashift", "ee", RTX_BIN_ARITH) /* shift left */ DEF_RTL_EXPR(ROTATE, "rotate", "ee", RTX_BIN_ARITH) /* rotate left */ DEF_RTL_EXPR(ASHIFTRT, "ashiftrt", "ee", RTX_BIN_ARITH) /* arithmetic shift right */ DEF_RTL_EXPR(LSHIFTRT, "lshiftrt", "ee", RTX_BIN_ARITH) /* logical shift right */ DEF_RTL_EXPR(ROTATERT, "rotatert", "ee", RTX_BIN_ARITH) /* rotate right */ /* Minimum and maximum values of two operands. We need both signed and unsigned forms. (We cannot use MIN for SMIN because it conflicts with a macro of the same name.) */ DEF_RTL_EXPR(SMIN, "smin", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(SMAX, "smax", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(UMIN, "umin", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(UMAX, "umax", "ee", RTX_COMM_ARITH) /* These unary operations are used to represent incrementation and decrementation as they occur in memory addresses. The amount of increment or decrement are not represented because they can be understood from the machine-mode of the containing MEM. These operations exist in only two cases: 1. pushes onto the stack. 2. created automatically by the life_analysis pass in flow.c. */ DEF_RTL_EXPR(PRE_DEC, "pre_dec", "e", RTX_AUTOINC) DEF_RTL_EXPR(PRE_INC, "pre_inc", "e", RTX_AUTOINC) DEF_RTL_EXPR(POST_DEC, "post_dec", "e", RTX_AUTOINC) DEF_RTL_EXPR(POST_INC, "post_inc", "e", RTX_AUTOINC) /* These binary operations are used to represent generic address side-effects in memory addresses, except for simple incrementation or decrementation which use the above operations. They are created automatically by the life_analysis pass in flow.c. The first operand is a REG which is used as the address. The second operand is an expression that is assigned to the register, either before (PRE_MODIFY) or after (POST_MODIFY) evaluating the address. Currently, the compiler can only handle second operands of the form (plus (reg) (reg)) and (plus (reg) (const_int)), where the first operand of the PLUS has to be the same register as the first operand of the *_MODIFY. */ DEF_RTL_EXPR(PRE_MODIFY, "pre_modify", "ee", RTX_AUTOINC) DEF_RTL_EXPR(POST_MODIFY, "post_modify", "ee", RTX_AUTOINC) /* Comparison operations. The ordered comparisons exist in two flavors, signed and unsigned. */ DEF_RTL_EXPR(NE, "ne", "ee", RTX_COMM_COMPARE) DEF_RTL_EXPR(EQ, "eq", "ee", RTX_COMM_COMPARE) DEF_RTL_EXPR(GE, "ge", "ee", RTX_COMPARE) DEF_RTL_EXPR(GT, "gt", "ee", RTX_COMPARE) DEF_RTL_EXPR(LE, "le", "ee", RTX_COMPARE) DEF_RTL_EXPR(LT, "lt", "ee", RTX_COMPARE) DEF_RTL_EXPR(GEU, "geu", "ee", RTX_COMPARE) DEF_RTL_EXPR(GTU, "gtu", "ee", RTX_COMPARE) DEF_RTL_EXPR(LEU, "leu", "ee", RTX_COMPARE) DEF_RTL_EXPR(LTU, "ltu", "ee", RTX_COMPARE) /* Additional floating point unordered comparison flavors. */ DEF_RTL_EXPR(UNORDERED, "unordered", "ee", RTX_COMM_COMPARE) DEF_RTL_EXPR(ORDERED, "ordered", "ee", RTX_COMM_COMPARE) /* These are equivalent to unordered or ... */ DEF_RTL_EXPR(UNEQ, "uneq", "ee", RTX_COMM_COMPARE) DEF_RTL_EXPR(UNGE, "unge", "ee", RTX_COMPARE) DEF_RTL_EXPR(UNGT, "ungt", "ee", RTX_COMPARE) DEF_RTL_EXPR(UNLE, "unle", "ee", RTX_COMPARE) DEF_RTL_EXPR(UNLT, "unlt", "ee", RTX_COMPARE) /* This is an ordered NE, ie !UNEQ, ie false for NaN. */ DEF_RTL_EXPR(LTGT, "ltgt", "ee", RTX_COMM_COMPARE) /* Represents the result of sign-extending the sole operand. The machine modes of the operand and of the SIGN_EXTEND expression determine how much sign-extension is going on. */ DEF_RTL_EXPR(SIGN_EXTEND, "sign_extend", "e", RTX_UNARY) /* Similar for zero-extension (such as unsigned short to int). */ DEF_RTL_EXPR(ZERO_EXTEND, "zero_extend", "e", RTX_UNARY) /* Similar but here the operand has a wider mode. */ DEF_RTL_EXPR(TRUNCATE, "truncate", "e", RTX_UNARY) /* Similar for extending floating-point values (such as SFmode to DFmode). */ DEF_RTL_EXPR(FLOAT_EXTEND, "float_extend", "e", RTX_UNARY) DEF_RTL_EXPR(FLOAT_TRUNCATE, "float_truncate", "e", RTX_UNARY) /* Conversion of fixed point operand to floating point value. */ DEF_RTL_EXPR(FLOAT, "float", "e", RTX_UNARY) /* With fixed-point machine mode: Conversion of floating point operand to fixed point value. Value is defined only when the operand's value is an integer. With floating-point machine mode (and operand with same mode): Operand is rounded toward zero to produce an integer value represented in floating point. */ DEF_RTL_EXPR(FIX, "fix", "e", RTX_UNARY) /* Conversion of unsigned fixed point operand to floating point value. */ DEF_RTL_EXPR(UNSIGNED_FLOAT, "unsigned_float", "e", RTX_UNARY) /* With fixed-point machine mode: Conversion of floating point operand to *unsigned* fixed point value. Value is defined only when the operand's value is an integer. */ DEF_RTL_EXPR(UNSIGNED_FIX, "unsigned_fix", "e", RTX_UNARY) /* Absolute value */ DEF_RTL_EXPR(ABS, "abs", "e", RTX_UNARY) /* Square root */ DEF_RTL_EXPR(SQRT, "sqrt", "e", RTX_UNARY) /* Find first bit that is set. Value is 1 + number of trailing zeros in the arg., or 0 if arg is 0. */ DEF_RTL_EXPR(FFS, "ffs", "e", RTX_UNARY) /* Count leading zeros. */ DEF_RTL_EXPR(CLZ, "clz", "e", RTX_UNARY) /* Count trailing zeros. */ DEF_RTL_EXPR(CTZ, "ctz", "e", RTX_UNARY) /* Population count (number of 1 bits). */ DEF_RTL_EXPR(POPCOUNT, "popcount", "e", RTX_UNARY) /* Population parity (number of 1 bits modulo 2). */ DEF_RTL_EXPR(PARITY, "parity", "e", RTX_UNARY) /* Reference to a signed bit-field of specified size and position. Operand 0 is the memory unit (usually SImode or QImode) which contains the field's first bit. Operand 1 is the width, in bits. Operand 2 is the number of bits in the memory unit before the first bit of this field. If BITS_BIG_ENDIAN is defined, the first bit is the msb and operand 2 counts from the msb of the memory unit. Otherwise, the first bit is the lsb and operand 2 counts from the lsb of the memory unit. */ DEF_RTL_EXPR(SIGN_EXTRACT, "sign_extract", "eee", RTX_BITFIELD_OPS) /* Similar for unsigned bit-field. */ DEF_RTL_EXPR(ZERO_EXTRACT, "zero_extract", "eee", RTX_BITFIELD_OPS) /* For RISC machines. These save memory when splitting insns. */ /* HIGH are the high-order bits of a constant expression. */ DEF_RTL_EXPR(HIGH, "high", "e", RTX_CONST_OBJ) /* LO_SUM is the sum of a register and the low-order bits of a constant expression. */ DEF_RTL_EXPR(LO_SUM, "lo_sum", "ee", RTX_OBJ) /* Header for range information. Operand 0 is the NOTE_INSN_RANGE_BEG insn. Operand 1 is the NOTE_INSN_RANGE_END insn. Operand 2 is a vector of all of the registers that can be substituted within this range. Operand 3 is the number of calls in the range. Operand 4 is the number of insns in the range. Operand 5 is the unique range number for this range. Operand 6 is the basic block # of the start of the live range. Operand 7 is the basic block # of the end of the live range. Operand 8 is the loop depth. Operand 9 is a bitmap of the registers live at the start of the range. Operand 10 is a bitmap of the registers live at the end of the range. Operand 11 is marker number for the start of the range. Operand 12 is the marker number for the end of the range. */ DEF_RTL_EXPR(RANGE_INFO, "range_info", "uuEiiiiiibbii", RTX_EXTRA) /* Registers that can be substituted within the range. Operand 0 is the original pseudo register number. Operand 1 will be filled in with the pseudo register the value is copied for the duration of the range. Operand 2 is the number of references within the range to the register. Operand 3 is the number of sets or clobbers of the register in the range. Operand 4 is the number of deaths the register has. Operand 5 is the copy flags that give the status of whether a copy is needed from the original register to the new register at the beginning of the range, or whether a copy from the new register back to the original at the end of the range. Operand 6 is the live length. Operand 7 is the number of calls that this register is live across. Operand 8 is the symbol node of the variable if the register is a user variable. Operand 9 is the block node that the variable is declared in if the register is a user variable. */ DEF_RTL_EXPR(RANGE_REG, "range_reg", "iiiiiiiitt", RTX_EXTRA) /* Information about a local variable's ranges. Operand 0 is an EXPR_LIST of the different ranges a variable is in where it is copied to a different pseudo register. Operand 1 is the block that the variable is declared in. Operand 2 is the number of distinct ranges. */ DEF_RTL_EXPR(RANGE_VAR, "range_var", "eti", RTX_EXTRA) /* Information about the registers that are live at the current point. Operand 0 is the live bitmap. Operand 1 is the original block number. */ DEF_RTL_EXPR(RANGE_LIVE, "range_live", "bi", RTX_EXTRA) /* Describes a merge operation between two vector values. Operands 0 and 1 are the vectors to be merged, operand 2 is a bitmask that specifies where the parts of the result are taken from. Set bits indicate operand 0, clear bits indicate operand 1. The parts are defined by the mode of the vectors. */ DEF_RTL_EXPR(VEC_MERGE, "vec_merge", "eee", RTX_TERNARY) /* Describes an operation that selects parts of a vector. Operands 0 is the source vector, operand 1 is a PARALLEL that contains a CONST_INT for each of the subparts of the result vector, giving the number of the source subpart that should be stored into it. */ DEF_RTL_EXPR(VEC_SELECT, "vec_select", "ee", RTX_BIN_ARITH) /* Describes a vector concat operation. Operands 0 and 1 are the source vectors, the result is a vector that is as long as operands 0 and 1 combined and is the concatenation of the two source vectors. */ DEF_RTL_EXPR(VEC_CONCAT, "vec_concat", "ee", RTX_BIN_ARITH) /* Describes an operation that converts a small vector into a larger one by duplicating the input values. The output vector mode must have the same submodes as the input vector mode, and the number of output parts must be an integer multiple of the number of input parts. */ DEF_RTL_EXPR(VEC_DUPLICATE, "vec_duplicate", "e", RTX_UNARY) /* Addition with signed saturation */ DEF_RTL_EXPR(SS_PLUS, "ss_plus", "ee", RTX_COMM_ARITH) /* Addition with unsigned saturation */ DEF_RTL_EXPR(US_PLUS, "us_plus", "ee", RTX_COMM_ARITH) /* Operand 0 minus operand 1, with signed saturation. */ DEF_RTL_EXPR(SS_MINUS, "ss_minus", "ee", RTX_BIN_ARITH) /* Operand 0 minus operand 1, with unsigned saturation. */ DEF_RTL_EXPR(US_MINUS, "us_minus", "ee", RTX_BIN_ARITH) /* Signed saturating truncate. */ DEF_RTL_EXPR(SS_TRUNCATE, "ss_truncate", "e", RTX_UNARY) /* Unsigned saturating truncate. */ DEF_RTL_EXPR(US_TRUNCATE, "us_truncate", "e", RTX_UNARY) /* Information about the variable and its location. */ DEF_RTL_EXPR(VAR_LOCATION, "var_location", "te", RTX_EXTRA) /* Local variables: mode:c End: */ }; #undef DEF_RTL_EXPR /* Indexed by rtx code, gives a sequence of operand-types for rtx's of that code. The sequence is a C string in which each character describes one operand. */ const char * const rtx_format[NUM_RTX_CODE] = { /* "*" undefined. can cause a warning message "0" field is unused (or used in a phase-dependent manner) prints nothing "i" an integer prints the integer "n" like "i", but prints entries from `note_insn_name' "w" an integer of width HOST_BITS_PER_WIDE_INT prints the integer "s" a pointer to a string prints the string "S" like "s", but optional: the containing rtx may end before this operand "T" like "s", but treated specially by the RTL reader; only found in machine description patterns. "e" a pointer to an rtl expression prints the expression "E" a pointer to a vector that points to a number of rtl expressions prints a list of the rtl expressions "V" like "E", but optional: the containing rtx may end before this operand "u" a pointer to another insn prints the uid of the insn. "b" is a pointer to a bitmap header. "B" is a basic block pointer. "t" is a tree pointer. */ #define DEF_RTL_EXPR(ENUM, NAME, FORMAT, CLASS) FORMAT , /* This file contains the definitions and documentation for the Register Transfer Expressions (rtx's) that make up the Register Transfer Language (rtl) used in the Back End of the GNU compiler. Copyright (C) 1987, 1988, 1992, 1994, 1995, 1997, 1998, 1999, 2000, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Expression definitions and descriptions for all targets are in this file. Some will not be used for some targets. The fields in the cpp macro call "DEF_RTL_EXPR()" are used to create declarations in the C source of the compiler. The fields are: 1. The internal name of the rtx used in the C source. It is a tag in the enumeration "enum rtx_code" defined in "rtl.h". By convention these are in UPPER_CASE. 2. The name of the rtx in the external ASCII format read by read_rtx(), and printed by print_rtx(). These names are stored in rtx_name[]. By convention these are the internal (field 1) names in lower_case. 3. The print format, and type of each rtx->u.fld[] (field) in this rtx. These formats are stored in rtx_format[]. The meaning of the formats is documented in front of this array in rtl.c 4. The class of the rtx. These are stored in rtx_class and are accessed via the GET_RTX_CLASS macro. They are defined as follows: RTX_CONST_OBJ an rtx code that can be used to represent a constant object (e.g, CONST_INT) RTX_OBJ an rtx code that can be used to represent an object (e.g, REG, MEM) RTX_COMPARE an rtx code for a comparison (e.g, LT, GT) RTX_COMM_COMPARE an rtx code for a commutative comparison (e.g, EQ, NE, ORDERED) RTX_UNARY an rtx code for a unary arithmetic expression (e.g, NEG, NOT) RTX_COMM_ARITH an rtx code for a commutative binary operation (e.g,, PLUS, MULT) RTX_TERNARY an rtx code for a non-bitfield three input operation (IF_THEN_ELSE) RTX_BIN_ARITH an rtx code for a non-commutative binary operation (e.g., MINUS, DIV) RTX_BITFIELD_OPS an rtx code for a bit-field operation (ZERO_EXTRACT, SIGN_EXTRACT) RTX_INSN an rtx code for a machine insn (INSN, JUMP_INSN, CALL_INSN) RTX_MATCH an rtx code for something that matches in insns (e.g, MATCH_DUP) RTX_AUTOINC an rtx code for autoincrement addressing modes (e.g. POST_DEC) RTX_EXTRA everything else */ /* --------------------------------------------------------------------- Expressions (and "meta" expressions) used for structuring the rtl representation of a program. --------------------------------------------------------------------- */ /* an expression code name unknown to the reader */ DEF_RTL_EXPR(UNKNOWN, "UnKnown", "*", RTX_EXTRA) /* (NIL) is used by rtl reader and printer to represent a null pointer. */ DEF_RTL_EXPR(NIL, "nil", "*", RTX_EXTRA) /* include a file */ DEF_RTL_EXPR(INCLUDE, "include", "s", RTX_EXTRA) /* --------------------------------------------------------------------- Expressions used in constructing lists. --------------------------------------------------------------------- */ /* a linked list of expressions */ DEF_RTL_EXPR(EXPR_LIST, "expr_list", "ee", RTX_EXTRA) /* a linked list of instructions. The insns are represented in print by their uids. */ DEF_RTL_EXPR(INSN_LIST, "insn_list", "ue", RTX_EXTRA) /* ---------------------------------------------------------------------- Expression types for machine descriptions. These do not appear in actual rtl code in the compiler. ---------------------------------------------------------------------- */ /* Appears only in machine descriptions. Means use the function named by the second arg (the string) as a predicate; if matched, store the structure that was matched in the operand table at index specified by the first arg (the integer). If the second arg is the null string, the structure is just stored. A third string argument indicates to the register allocator restrictions on where the operand can be allocated. If the target needs no restriction on any instruction this field should be the null string. The string is prepended by: '=' to indicate the operand is only written to. '+' to indicate the operand is both read and written to. Each character in the string represents an allocable class for an operand. 'g' indicates the operand can be any valid class. 'i' indicates the operand can be immediate (in the instruction) data. 'r' indicates the operand can be in a register. 'm' indicates the operand can be in memory. 'o' a subset of the 'm' class. Those memory addressing modes that can be offset at compile time (have a constant added to them). Other characters indicate target dependent operand classes and are described in each target's machine description. For instructions with more than one operand, sets of classes can be separated by a comma to indicate the appropriate multi-operand constraints. There must be a 1 to 1 correspondence between these sets of classes in all operands for an instruction. */ DEF_RTL_EXPR(MATCH_OPERAND, "match_operand", "iss", RTX_MATCH) /* Appears only in machine descriptions. Means match a SCRATCH or a register. When used to generate rtl, a SCRATCH is generated. As for MATCH_OPERAND, the mode specifies the desired mode and the first argument is the operand number. The second argument is the constraint. */ DEF_RTL_EXPR(MATCH_SCRATCH, "match_scratch", "is", RTX_MATCH) /* Appears only in machine descriptions. Means match only something equal to what is stored in the operand table at the index specified by the argument. */ DEF_RTL_EXPR(MATCH_DUP, "match_dup", "i", RTX_MATCH) /* Appears only in machine descriptions. Means apply a predicate, AND match recursively the operands of the rtx. Operand 0 is the operand-number, as in match_operand. Operand 1 is a predicate to apply (as a string, a function name). Operand 2 is a vector of expressions, each of which must match one subexpression of the rtx this construct is matching. */ DEF_RTL_EXPR(MATCH_OPERATOR, "match_operator", "isE", RTX_MATCH) /* Appears only in machine descriptions. Means to match a PARALLEL of arbitrary length. The predicate is applied to the PARALLEL and the initial expressions in the PARALLEL are matched. Operand 0 is the operand-number, as in match_operand. Operand 1 is a predicate to apply to the PARALLEL. Operand 2 is a vector of expressions, each of which must match the corresponding element in the PARALLEL. */ DEF_RTL_EXPR(MATCH_PARALLEL, "match_parallel", "isE", RTX_MATCH) /* Appears only in machine descriptions. Means match only something equal to what is stored in the operand table at the index specified by the argument. For MATCH_OPERATOR. */ DEF_RTL_EXPR(MATCH_OP_DUP, "match_op_dup", "iE", RTX_MATCH) /* Appears only in machine descriptions. Means match only something equal to what is stored in the operand table at the index specified by the argument. For MATCH_PARALLEL. */ DEF_RTL_EXPR(MATCH_PAR_DUP, "match_par_dup", "iE", RTX_MATCH) /* Appears only in machine descriptions. Defines the pattern for one kind of instruction. Operand: 0: names this instruction. If the name is the null string, the instruction is in the machine description just to be recognized, and will never be emitted by the tree to rtl expander. 1: is the pattern. 2: is a string which is a C expression giving an additional condition for recognizing this pattern. A null string means no extra condition. 3: is the action to execute if this pattern is matched. If this assembler code template starts with a * then it is a fragment of C code to run to decide on a template to use. Otherwise, it is the template to use. 4: optionally, a vector of attributes for this insn. */ DEF_RTL_EXPR(DEFINE_INSN, "define_insn", "sEsTV", RTX_EXTRA) /* Definition of a peephole optimization. 1st operand: vector of insn patterns to match 2nd operand: C expression that must be true 3rd operand: template or C code to produce assembler output. 4: optionally, a vector of attributes for this insn. */ DEF_RTL_EXPR(DEFINE_PEEPHOLE, "define_peephole", "EsTV", RTX_EXTRA) /* Definition of a split operation. 1st operand: insn pattern to match 2nd operand: C expression that must be true 3rd operand: vector of insn patterns to place into a SEQUENCE 4th operand: optionally, some C code to execute before generating the insns. This might, for example, create some RTX's and store them in elements of `recog_data.operand' for use by the vector of insn-patterns. (`operands' is an alias here for `recog_data.operand'). */ DEF_RTL_EXPR(DEFINE_SPLIT, "define_split", "EsES", RTX_EXTRA) /* Definition of an insn and associated split. This is the concatenation, with a few modifications, of a define_insn and a define_split which share the same pattern. Operand: 0: names this instruction. If the name is the null string, the instruction is in the machine description just to be recognized, and will never be emitted by the tree to rtl expander. 1: is the pattern. 2: is a string which is a C expression giving an additional condition for recognizing this pattern. A null string means no extra condition. 3: is the action to execute if this pattern is matched. If this assembler code template starts with a * then it is a fragment of C code to run to decide on a template to use. Otherwise, it is the template to use. 4: C expression that must be true for split. This may start with "&&" in which case the split condition is the logical and of the insn condition and what follows the "&&" of this operand. 5: vector of insn patterns to place into a SEQUENCE 6: optionally, some C code to execute before generating the insns. This might, for example, create some RTX's and store them in elements of `recog_data.operand' for use by the vector of insn-patterns. (`operands' is an alias here for `recog_data.operand'). 7: optionally, a vector of attributes for this insn. */ DEF_RTL_EXPR(DEFINE_INSN_AND_SPLIT, "define_insn_and_split", "sEsTsESV", RTX_EXTRA) /* Definition of an RTL peephole operation. Follows the same arguments as define_split. */ DEF_RTL_EXPR(DEFINE_PEEPHOLE2, "define_peephole2", "EsES", RTX_EXTRA) /* Define how to generate multiple insns for a standard insn name. 1st operand: the insn name. 2nd operand: vector of insn-patterns. Use match_operand to substitute an element of `recog_data.operand'. 3rd operand: C expression that must be true for this to be available. This may not test any operands. 4th operand: Extra C code to execute before generating the insns. This might, for example, create some RTX's and store them in elements of `recog_data.operand' for use by the vector of insn-patterns. (`operands' is an alias here for `recog_data.operand'). */ DEF_RTL_EXPR(DEFINE_EXPAND, "define_expand", "sEss", RTX_EXTRA) /* Define a requirement for delay slots. 1st operand: Condition involving insn attributes that, if true, indicates that the insn requires the number of delay slots shown. 2nd operand: Vector whose length is the three times the number of delay slots required. Each entry gives three conditions, each involving attributes. The first must be true for an insn to occupy that delay slot location. The second is true for all insns that can be annulled if the branch is true and the third is true for all insns that can be annulled if the branch is false. Multiple DEFINE_DELAYs may be present. They indicate differing requirements for delay slots. */ DEF_RTL_EXPR(DEFINE_DELAY, "define_delay", "eE", RTX_EXTRA) /* Define a set of insns that requires a function unit. This means that these insns produce their result after a delay and that there may be restrictions on the number of insns of this type that can be scheduled simultaneously. More than one DEFINE_FUNCTION_UNIT can be specified for a function unit. Each gives a set of operations and associated delays. The first three operands must be the same for each operation for the same function unit. All delays are specified in cycles. 1st operand: Name of function unit (mostly for documentation) 2nd operand: Number of identical function units in CPU 3rd operand: Total number of simultaneous insns that can execute on this function unit; 0 if unlimited. 4th operand: Condition involving insn attribute, that, if true, specifies those insns that this expression applies to. 5th operand: Constant delay after which insn result will be available. 6th operand: Delay until next insn can be scheduled on the function unit executing this operation. The meaning depends on whether or not the next operand is supplied. 7th operand: If this operand is not specified, the 6th operand gives the number of cycles after the instruction matching the 4th operand begins using the function unit until a subsequent insn can begin. A value of zero should be used for a unit with no issue constraints. If only one operation can be executed a time and the unit is busy for the entire time, the 3rd operand should be specified as 1, the 6th operand should be specified as 0, and the 7th operand should not be specified. If this operand is specified, it is a list of attribute expressions. If an insn for which any of these expressions is true is currently executing on the function unit, the issue delay will be given by the 6th operand. Otherwise, the insn can be immediately scheduled (subject to the limit on the number of simultaneous operations executing on the unit.) */ DEF_RTL_EXPR(DEFINE_FUNCTION_UNIT, "define_function_unit", "siieiiV", RTX_EXTRA) /* Define attribute computation for `asm' instructions. */ DEF_RTL_EXPR(DEFINE_ASM_ATTRIBUTES, "define_asm_attributes", "V", RTX_EXTRA) /* Definition of a conditional execution meta operation. Automatically generates new instances of DEFINE_INSN, selected by having attribute "predicable" true. The new pattern will contain a COND_EXEC and the predicate at top-level. Operand: 0: The predicate pattern. The top-level form should match a relational operator. Operands should have only one alternative. 1: A C expression giving an additional condition for recognizing the generated pattern. 2: A template or C code to produce assembler output. */ DEF_RTL_EXPR(DEFINE_COND_EXEC, "define_cond_exec", "Ess", RTX_EXTRA) /* SEQUENCE appears in the result of a `gen_...' function for a DEFINE_EXPAND that wants to make several insns. Its elements are the bodies of the insns that should be made. `emit_insn' takes the SEQUENCE apart and makes separate insns. */ DEF_RTL_EXPR(SEQUENCE, "sequence", "E", RTX_EXTRA) /* Refers to the address of its argument. This is only used in alias.c. */ DEF_RTL_EXPR(ADDRESS, "address", "e", RTX_MATCH) /* ---------------------------------------------------------------------- Constructions for CPU pipeline description described by NDFAs. These do not appear in actual rtl code in the compiler. ---------------------------------------------------------------------- */ /* (define_cpu_unit string [string]) describes cpu functional units (separated by comma). 1st operand: Names of cpu functional units. 2nd operand: Name of automaton (see comments for DEFINE_AUTOMATON). All define_reservations, define_cpu_units, and define_query_cpu_units should have unique names which may not be "nothing". */ DEF_RTL_EXPR(DEFINE_CPU_UNIT, "define_cpu_unit", "sS", RTX_EXTRA) /* (define_query_cpu_unit string [string]) describes cpu functional units analogously to define_cpu_unit. The reservation of such units can be queried for automaton state. */ DEF_RTL_EXPR(DEFINE_QUERY_CPU_UNIT, "define_query_cpu_unit", "sS", RTX_EXTRA) /* (exclusion_set string string) means that each CPU functional unit in the first string can not be reserved simultaneously with any unit whose name is in the second string and vise versa. CPU units in the string are separated by commas. For example, it is useful for description CPU with fully pipelined floating point functional unit which can execute simultaneously only single floating point insns or only double floating point insns. All CPU functional units in a set should belong to the same automaton. */ DEF_RTL_EXPR(EXCLUSION_SET, "exclusion_set", "ss", RTX_EXTRA) /* (presence_set string string) means that each CPU functional unit in the first string can not be reserved unless at least one of pattern of units whose names are in the second string is reserved. This is an asymmetric relation. CPU units or unit patterns in the strings are separated by commas. Pattern is one unit name or unit names separated by white-spaces. For example, it is useful for description that slot1 is reserved after slot0 reservation for a VLIW processor. We could describe it by the following construction (presence_set "slot1" "slot0") Or slot1 is reserved only after slot0 and unit b0 reservation. In this case we could write (presence_set "slot1" "slot0 b0") All CPU functional units in a set should belong to the same automaton. */ DEF_RTL_EXPR(PRESENCE_SET, "presence_set", "ss", RTX_EXTRA) /* (final_presence_set string string) is analogous to `presence_set'. The difference between them is when checking is done. When an instruction is issued in given automaton state reflecting all current and planned unit reservations, the automaton state is changed. The first state is a source state, the second one is a result state. Checking for `presence_set' is done on the source state reservation, checking for `final_presence_set' is done on the result reservation. This construction is useful to describe a reservation which is actually two subsequent reservations. For example, if we use (presence_set "slot1" "slot0") the following insn will be never issued (because slot1 requires slot0 which is absent in the source state). (define_reservation "insn_and_nop" "slot0 + slot1") but it can be issued if we use analogous `final_presence_set'. */ DEF_RTL_EXPR(FINAL_PRESENCE_SET, "final_presence_set", "ss", RTX_EXTRA) /* (absence_set string string) means that each CPU functional unit in the first string can be reserved only if each pattern of units whose names are in the second string is not reserved. This is an asymmetric relation (actually exclusion set is analogous to this one but it is symmetric). CPU units or unit patterns in the string are separated by commas. Pattern is one unit name or unit names separated by white-spaces. For example, it is useful for description that slot0 can not be reserved after slot1 or slot2 reservation for a VLIW processor. We could describe it by the following construction (absence_set "slot2" "slot0, slot1") Or slot2 can not be reserved if slot0 and unit b0 are reserved or slot1 and unit b1 are reserved . In this case we could write (absence_set "slot2" "slot0 b0, slot1 b1") All CPU functional units in a set should to belong the same automaton. */ DEF_RTL_EXPR(ABSENCE_SET, "absence_set", "ss", RTX_EXTRA) /* (final_absence_set string string) is analogous to `absence_set' but checking is done on the result (state) reservation. See comments for `final_presence_set'. */ DEF_RTL_EXPR(FINAL_ABSENCE_SET, "final_absence_set", "ss", RTX_EXTRA) /* (define_bypass number out_insn_names in_insn_names) names bypass with given latency (the first number) from insns given by the first string (see define_insn_reservation) into insns given by the second string. Insn names in the strings are separated by commas. The third operand is optional name of function which is additional guard for the bypass. The function will get the two insns as parameters. If the function returns zero the bypass will be ignored for this case. Additional guard is necessary to recognize complicated bypasses, e.g. when consumer is load address. */ DEF_RTL_EXPR(DEFINE_BYPASS, "define_bypass", "issS", RTX_EXTRA) /* (define_automaton string) describes names of automata generated and used for pipeline hazards recognition. The names are separated by comma. Actually it is possibly to generate the single automaton but unfortunately it can be very large. If we use more one automata, the summary size of the automata usually is less than the single one. The automaton name is used in define_cpu_unit and define_query_cpu_unit. All automata should have unique names. */ DEF_RTL_EXPR(DEFINE_AUTOMATON, "define_automaton", "s", RTX_EXTRA) /* (automata_option string) describes option for generation of automata. Currently there are the following options: o "no-minimization" which makes no minimization of automata. This is only worth to do when we are debugging the description and need to look more accurately at reservations of states. o "time" which means printing additional time statistics about generation of automata. o "v" which means generation of file describing the result automata. The file has suffix `.dfa' and can be used for the description verification and debugging. o "w" which means generation of warning instead of error for non-critical errors. o "ndfa" which makes nondeterministic finite state automata. o "progress" which means output of a progress bar showing how many states were generated so far for automaton being processed. */ DEF_RTL_EXPR(AUTOMATA_OPTION, "automata_option", "s", RTX_EXTRA) /* (define_reservation string string) names reservation (the first string) of cpu functional units (the 2nd string). Sometimes unit reservations for different insns contain common parts. In such case, you can describe common part and use its name (the 1st parameter) in regular expression in define_insn_reservation. All define_reservations, define_cpu_units, and define_query_cpu_units should have unique names which may not be "nothing". */ DEF_RTL_EXPR(DEFINE_RESERVATION, "define_reservation", "ss", RTX_EXTRA) /* (define_insn_reservation name default_latency condition regexpr) describes reservation of cpu functional units (the 3nd operand) for instruction which is selected by the condition (the 2nd parameter). The first parameter is used for output of debugging information. The reservations are described by a regular expression according the following syntax: regexp = regexp "," oneof | oneof oneof = oneof "|" allof | allof allof = allof "+" repeat | repeat repeat = element "*" number | element element = cpu_function_unit_name | reservation_name | result_name | "nothing" | "(" regexp ")" 1. "," is used for describing start of the next cycle in reservation. 2. "|" is used for describing the reservation described by the first regular expression *or* the reservation described by the second regular expression *or* etc. 3. "+" is used for describing the reservation described by the first regular expression *and* the reservation described by the second regular expression *and* etc. 4. "*" is used for convenience and simply means sequence in which the regular expression are repeated NUMBER times with cycle advancing (see ","). 5. cpu functional unit name which means its reservation. 6. reservation name -- see define_reservation. 7. string "nothing" means no units reservation. */ DEF_RTL_EXPR(DEFINE_INSN_RESERVATION, "define_insn_reservation", "sies", RTX_EXTRA) /* ---------------------------------------------------------------------- Expressions used for insn attributes. These also do not appear in actual rtl code in the compiler. ---------------------------------------------------------------------- */ /* Definition of an insn attribute. 1st operand: name of the attribute 2nd operand: comma-separated list of possible attribute values 3rd operand: expression for the default value of the attribute. */ DEF_RTL_EXPR(DEFINE_ATTR, "define_attr", "sse", RTX_EXTRA) /* Marker for the name of an attribute. */ DEF_RTL_EXPR(ATTR, "attr", "s", RTX_EXTRA) /* For use in the last (optional) operand of DEFINE_INSN or DEFINE_PEEPHOLE and in DEFINE_ASM_INSN to specify an attribute to assign to insns matching that pattern. (set_attr "name" "value") is equivalent to (set (attr "name") (const_string "value")) */ DEF_RTL_EXPR(SET_ATTR, "set_attr", "ss", RTX_EXTRA) /* In the last operand of DEFINE_INSN and DEFINE_PEEPHOLE, this can be used to specify that attribute values are to be assigned according to the alternative matched. The following three expressions are equivalent: (set (attr "att") (cond [(eq_attrq "alternative" "1") (const_string "a1") (eq_attrq "alternative" "2") (const_string "a2")] (const_string "a3"))) (set_attr_alternative "att" [(const_string "a1") (const_string "a2") (const_string "a3")]) (set_attr "att" "a1,a2,a3") */ DEF_RTL_EXPR(SET_ATTR_ALTERNATIVE, "set_attr_alternative", "sE", RTX_EXTRA) /* A conditional expression true if the value of the specified attribute of the current insn equals the specified value. The first operand is the attribute name and the second is the comparison value. */ DEF_RTL_EXPR(EQ_ATTR, "eq_attr", "ss", RTX_EXTRA) /* A special case of the above representing a set of alternatives. The first operand is bitmap of the set, the second one is the default value. */ DEF_RTL_EXPR(EQ_ATTR_ALT, "eq_attr_alt", "ii", RTX_EXTRA) /* A conditional expression which is true if the specified flag is true for the insn being scheduled in reorg. genattr.c defines the following flags which can be tested by (attr_flag "foo") expressions in eligible_for_delay. forward, backward, very_likely, likely, very_unlikely, and unlikely. */ DEF_RTL_EXPR (ATTR_FLAG, "attr_flag", "s", RTX_EXTRA) /* ---------------------------------------------------------------------- Expression types used for things in the instruction chain. All formats must start with "iuu" to handle the chain. Each insn expression holds an rtl instruction and its semantics during back-end processing. See macros's in "rtl.h" for the meaning of each rtx->u.fld[]. ---------------------------------------------------------------------- */ /* An instruction that cannot jump. */ DEF_RTL_EXPR(INSN, "insn", "iuuBieiee", RTX_INSN) /* An instruction that can possibly jump. Fields ( rtx->u.fld[] ) have exact same meaning as INSN's. */ DEF_RTL_EXPR(JUMP_INSN, "jump_insn", "iuuBieiee0", RTX_INSN) /* An instruction that can possibly call a subroutine but which will not change which instruction comes next in the current function. Field ( rtx->u.fld[9] ) is CALL_INSN_FUNCTION_USAGE. All other fields ( rtx->u.fld[] ) have exact same meaning as INSN's. */ DEF_RTL_EXPR(CALL_INSN, "call_insn", "iuuBieieee", RTX_INSN) /* A marker that indicates that control will not flow through. */ DEF_RTL_EXPR(BARRIER, "barrier", "iuu000000", RTX_EXTRA) /* Holds a label that is followed by instructions. Operand: 4: is used in jump.c for the use-count of the label. 5: is used in flow.c to point to the chain of label_ref's to this label. 6: is a number that is unique in the entire compilation. 7: is the user-given name of the label, if any. */ DEF_RTL_EXPR(CODE_LABEL, "code_label", "iuuB00is", RTX_EXTRA) #ifdef USE_MAPPED_LOCATION /* Say where in the code a source line starts, for symbol table's sake. Operand: 4: unused if line number > 0, note-specific data otherwise. 5: line number if > 0, enum note_insn otherwise. 6: CODE_LABEL_NUMBER if line number == NOTE_INSN_DELETED_LABEL. */ #else /* Say where in the code a source line starts, for symbol table's sake. Operand: 4: filename, if line number > 0, note-specific data otherwise. 5: line number if > 0, enum note_insn otherwise. 6: unique number if line number == note_insn_deleted_label. */ #endif DEF_RTL_EXPR(NOTE, "note", "iuuB0ni", RTX_EXTRA) /* ---------------------------------------------------------------------- Top level constituents of INSN, JUMP_INSN and CALL_INSN. ---------------------------------------------------------------------- */ /* Conditionally execute code. Operand 0 is the condition that if true, the code is executed. Operand 1 is the code to be executed (typically a SET). Semantics are that there are no side effects if the condition is false. This pattern is created automatically by the if_convert pass run after reload or by target-specific splitters. */ DEF_RTL_EXPR(COND_EXEC, "cond_exec", "ee", RTX_EXTRA) /* Several operations to be done in parallel (perhaps under COND_EXEC). */ DEF_RTL_EXPR(PARALLEL, "parallel", "E", RTX_EXTRA) /* A string that is passed through to the assembler as input. One can obviously pass comments through by using the assembler comment syntax. These occur in an insn all by themselves as the PATTERN. They also appear inside an ASM_OPERANDS as a convenient way to hold a string. */ DEF_RTL_EXPR(ASM_INPUT, "asm_input", "s", RTX_EXTRA) #ifdef USE_MAPPED_LOCATION /* An assembler instruction with operands. 1st operand is the instruction template. 2nd operand is the constraint for the output. 3rd operand is the number of the output this expression refers to. When an insn stores more than one value, a separate ASM_OPERANDS is made for each output; this integer distinguishes them. 4th is a vector of values of input operands. 5th is a vector of modes and constraints for the input operands. Each element is an ASM_INPUT containing a constraint string and whose mode indicates the mode of the input operand. 6th is the source line number. */ DEF_RTL_EXPR(ASM_OPERANDS, "asm_operands", "ssiEEi", RTX_EXTRA) #else /* An assembler instruction with operands. 1st operand is the instruction template. 2nd operand is the constraint for the output. 3rd operand is the number of the output this expression refers to. When an insn stores more than one value, a separate ASM_OPERANDS is made for each output; this integer distinguishes them. 4th is a vector of values of input operands. 5th is a vector of modes and constraints for the input operands. Each element is an ASM_INPUT containing a constraint string and whose mode indicates the mode of the input operand. 6th is the name of the containing source file. 7th is the source line number. */ DEF_RTL_EXPR(ASM_OPERANDS, "asm_operands", "ssiEEsi", RTX_EXTRA) #endif /* A machine-specific operation. 1st operand is a vector of operands being used by the operation so that any needed reloads can be done. 2nd operand is a unique value saying which of a number of machine-specific operations is to be performed. (Note that the vector must be the first operand because of the way that genrecog.c record positions within an insn.) This can occur all by itself in a PATTERN, as a component of a PARALLEL, or inside an expression. */ DEF_RTL_EXPR(UNSPEC, "unspec", "Ei", RTX_EXTRA) /* Similar, but a volatile operation and one which may trap. */ DEF_RTL_EXPR(UNSPEC_VOLATILE, "unspec_volatile", "Ei", RTX_EXTRA) /* Vector of addresses, stored as full words. */ /* Each element is a LABEL_REF to a CODE_LABEL whose address we want. */ DEF_RTL_EXPR(ADDR_VEC, "addr_vec", "E", RTX_EXTRA) /* Vector of address differences X0 - BASE, X1 - BASE, ... First operand is BASE; the vector contains the X's. The machine mode of this rtx says how much space to leave for each difference and is adjusted by branch shortening if CASE_VECTOR_SHORTEN_MODE is defined. The third and fourth operands store the target labels with the minimum and maximum addresses respectively. The fifth operand stores flags for use by branch shortening. Set at the start of shorten_branches: min_align: the minimum alignment for any of the target labels. base_after_vec: true iff BASE is after the ADDR_DIFF_VEC. min_after_vec: true iff minimum addr target label is after the ADDR_DIFF_VEC. max_after_vec: true iff maximum addr target label is after the ADDR_DIFF_VEC. min_after_base: true iff minimum address target label is after BASE. max_after_base: true iff maximum address target label is after BASE. Set by the actual branch shortening process: offset_unsigned: true iff offsets have to be treated as unsigned. scale: scaling that is necessary to make offsets fit into the mode. The third, fourth and fifth operands are only valid when CASE_VECTOR_SHORTEN_MODE is defined, and only in an optimizing compilations. */ DEF_RTL_EXPR(ADDR_DIFF_VEC, "addr_diff_vec", "eEee0", RTX_EXTRA) /* Memory prefetch, with attributes supported on some targets. Operand 1 is the address of the memory to fetch. Operand 2 is 1 for a write access, 0 otherwise. Operand 3 is the level of temporal locality; 0 means there is no temporal locality and 1, 2, and 3 are for increasing levels of temporal locality. The attributes specified by operands 2 and 3 are ignored for targets whose prefetch instructions do not support them. */ DEF_RTL_EXPR(PREFETCH, "prefetch", "eee", RTX_EXTRA) /* ---------------------------------------------------------------------- At the top level of an instruction (perhaps under PARALLEL). ---------------------------------------------------------------------- */ /* Assignment. Operand 1 is the location (REG, MEM, PC, CC0 or whatever) assigned to. Operand 2 is the value stored there. ALL assignment must use SET. Instructions that do multiple assignments must use multiple SET, under PARALLEL. */ DEF_RTL_EXPR(SET, "set", "ee", RTX_EXTRA) /* Indicate something is used in a way that we don't want to explain. For example, subroutine calls will use the register in which the static chain is passed. */ DEF_RTL_EXPR(USE, "use", "e", RTX_EXTRA) /* Indicate something is clobbered in a way that we don't want to explain. For example, subroutine calls will clobber some physical registers (the ones that are by convention not saved). */ DEF_RTL_EXPR(CLOBBER, "clobber", "e", RTX_EXTRA) /* Call a subroutine. Operand 1 is the address to call. Operand 2 is the number of arguments. */ DEF_RTL_EXPR(CALL, "call", "ee", RTX_EXTRA) /* Return from a subroutine. */ DEF_RTL_EXPR(RETURN, "return", "", RTX_EXTRA) /* Conditional trap. Operand 1 is the condition. Operand 2 is the trap code. For an unconditional trap, make the condition (const_int 1). */ DEF_RTL_EXPR(TRAP_IF, "trap_if", "ee", RTX_EXTRA) /* Placeholder for _Unwind_Resume before we know if a function call or a branch is needed. Operand 1 is the exception region from which control is flowing. */ DEF_RTL_EXPR(RESX, "resx", "i", RTX_EXTRA) /* ---------------------------------------------------------------------- Primitive values for use in expressions. ---------------------------------------------------------------------- */ /* numeric integer constant */ DEF_RTL_EXPR(CONST_INT, "const_int", "w", RTX_CONST_OBJ) /* numeric floating point constant. Operands hold the value. They are all 'w' and there may be from 2 to 6; see real.h. */ DEF_RTL_EXPR(CONST_DOUBLE, "const_double", CONST_DOUBLE_FORMAT, RTX_CONST_OBJ) /* Describes a vector constant. */ DEF_RTL_EXPR(CONST_VECTOR, "const_vector", "E", RTX_EXTRA) /* String constant. Used only for attributes right now. */ DEF_RTL_EXPR(CONST_STRING, "const_string", "s", RTX_OBJ) /* This is used to encapsulate an expression whose value is constant (such as the sum of a SYMBOL_REF and a CONST_INT) so that it will be recognized as a constant operand rather than by arithmetic instructions. */ DEF_RTL_EXPR(CONST, "const", "e", RTX_CONST_OBJ) /* program counter. Ordinary jumps are represented by a SET whose first operand is (PC). */ DEF_RTL_EXPR(PC, "pc", "", RTX_OBJ) /* Used in the cselib routines to describe a value. */ DEF_RTL_EXPR(VALUE, "value", "0", RTX_OBJ) /* A register. The "operand" is the register number, accessed with the REGNO macro. If this number is less than FIRST_PSEUDO_REGISTER than a hardware register is being referred to. The second operand holds the original register number - this will be different for a pseudo register that got turned into a hard register. This rtx needs to have as many (or more) fields as a MEM, since we can change REG rtx's into MEMs during reload. */ DEF_RTL_EXPR(REG, "reg", "i00", RTX_OBJ) /* A scratch register. This represents a register used only within a single insn. It will be turned into a REG during register allocation or reload unless the constraint indicates that the register won't be needed, in which case it can remain a SCRATCH. This code is marked as having one operand so it can be turned into a REG. */ DEF_RTL_EXPR(SCRATCH, "scratch", "0", RTX_OBJ) /* One word of a multi-word value. The first operand is the complete value; the second says which word. The WORDS_BIG_ENDIAN flag controls whether word number 0 (as numbered in a SUBREG) is the most or least significant word. This is also used to refer to a value in a different machine mode. For example, it can be used to refer to a SImode value as if it were Qimode, or vice versa. Then the word number is always 0. */ DEF_RTL_EXPR(SUBREG, "subreg", "ei", RTX_EXTRA) /* This one-argument rtx is used for move instructions that are guaranteed to alter only the low part of a destination. Thus, (SET (SUBREG:HI (REG...)) (MEM:HI ...)) has an unspecified effect on the high part of REG, but (SET (STRICT_LOW_PART (SUBREG:HI (REG...))) (MEM:HI ...)) is guaranteed to alter only the bits of REG that are in HImode. The actual instruction used is probably the same in both cases, but the register constraints may be tighter when STRICT_LOW_PART is in use. */ DEF_RTL_EXPR(STRICT_LOW_PART, "strict_low_part", "e", RTX_EXTRA) /* (CONCAT a b) represents the virtual concatenation of a and b to make a value that has as many bits as a and b put together. This is used for complex values. Normally it appears only in DECL_RTLs and during RTL generation, but not in the insn chain. */ DEF_RTL_EXPR(CONCAT, "concat", "ee", RTX_OBJ) /* A memory location; operand is the address. The second operand is the alias set to which this MEM belongs. We use `0' instead of `w' for this field so that the field need not be specified in machine descriptions. */ DEF_RTL_EXPR(MEM, "mem", "e0", RTX_OBJ) /* Reference to an assembler label in the code for this function. The operand is a CODE_LABEL found in the insn chain. The unprinted fields 1 and 2 are used in flow.c for the LABEL_NEXTREF and CONTAINING_INSN. */ DEF_RTL_EXPR(LABEL_REF, "label_ref", "u00", RTX_CONST_OBJ) /* Reference to a named label: Operand 0: label name Operand 1: flags (see SYMBOL_FLAG_* in rtl.h) Operand 2: tree from which this symbol is derived, or null. This is either a DECL node, or some kind of constant. */ DEF_RTL_EXPR(SYMBOL_REF, "symbol_ref", "s00", RTX_CONST_OBJ) /* The condition code register is represented, in our imagination, as a register holding a value that can be compared to zero. In fact, the machine has already compared them and recorded the results; but instructions that look at the condition code pretend to be looking at the entire value and comparing it. */ DEF_RTL_EXPR(CC0, "cc0", "", RTX_OBJ) /* ===================================================================== A QUEUED expression really points to a member of the queue of instructions to be output later for postincrement/postdecrement. QUEUED expressions never become part of instructions. When a QUEUED expression would be put into an instruction, instead either the incremented variable or a copy of its previous value is used. Operands are: 0. the variable to be incremented (a REG rtx). 1. the incrementing instruction, or 0 if it hasn't been output yet. 2. A REG rtx for a copy of the old value of the variable, or 0 if none yet. 3. the body to use for the incrementing instruction 4. the next QUEUED expression in the queue. ====================================================================== */ DEF_RTL_EXPR(QUEUED, "queued", "eeeee", RTX_EXTRA) /* ---------------------------------------------------------------------- Expressions for operators in an rtl pattern ---------------------------------------------------------------------- */ /* if_then_else. This is used in representing ordinary conditional jump instructions. Operand: 0: condition 1: then expr 2: else expr */ DEF_RTL_EXPR(IF_THEN_ELSE, "if_then_else", "eee", RTX_TERNARY) /* General conditional. The first operand is a vector composed of pairs of expressions. The first element of each pair is evaluated, in turn. The value of the conditional is the second expression of the first pair whose first expression evaluates nonzero. If none of the expressions is true, the second operand will be used as the value of the conditional. This should be replaced with use of IF_THEN_ELSE. */ DEF_RTL_EXPR(COND, "cond", "Ee", RTX_EXTRA) /* Comparison, produces a condition code result. */ DEF_RTL_EXPR(COMPARE, "compare", "ee", RTX_BIN_ARITH) /* plus */ DEF_RTL_EXPR(PLUS, "plus", "ee", RTX_COMM_ARITH) /* Operand 0 minus operand 1. */ DEF_RTL_EXPR(MINUS, "minus", "ee", RTX_BIN_ARITH) /* Minus operand 0. */ DEF_RTL_EXPR(NEG, "neg", "e", RTX_UNARY) DEF_RTL_EXPR(MULT, "mult", "ee", RTX_COMM_ARITH) /* Operand 0 divided by operand 1. */ DEF_RTL_EXPR(DIV, "div", "ee", RTX_BIN_ARITH) /* Remainder of operand 0 divided by operand 1. */ DEF_RTL_EXPR(MOD, "mod", "ee", RTX_BIN_ARITH) /* Unsigned divide and remainder. */ DEF_RTL_EXPR(UDIV, "udiv", "ee", RTX_BIN_ARITH) DEF_RTL_EXPR(UMOD, "umod", "ee", RTX_BIN_ARITH) /* Bitwise operations. */ DEF_RTL_EXPR(AND, "and", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(IOR, "ior", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(XOR, "xor", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(NOT, "not", "e", RTX_UNARY) /* Operand: 0: value to be shifted. 1: number of bits. */ DEF_RTL_EXPR(ASHIFT, "ashift", "ee", RTX_BIN_ARITH) /* shift left */ DEF_RTL_EXPR(ROTATE, "rotate", "ee", RTX_BIN_ARITH) /* rotate left */ DEF_RTL_EXPR(ASHIFTRT, "ashiftrt", "ee", RTX_BIN_ARITH) /* arithmetic shift right */ DEF_RTL_EXPR(LSHIFTRT, "lshiftrt", "ee", RTX_BIN_ARITH) /* logical shift right */ DEF_RTL_EXPR(ROTATERT, "rotatert", "ee", RTX_BIN_ARITH) /* rotate right */ /* Minimum and maximum values of two operands. We need both signed and unsigned forms. (We cannot use MIN for SMIN because it conflicts with a macro of the same name.) */ DEF_RTL_EXPR(SMIN, "smin", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(SMAX, "smax", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(UMIN, "umin", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(UMAX, "umax", "ee", RTX_COMM_ARITH) /* These unary operations are used to represent incrementation and decrementation as they occur in memory addresses. The amount of increment or decrement are not represented because they can be understood from the machine-mode of the containing MEM. These operations exist in only two cases: 1. pushes onto the stack. 2. created automatically by the life_analysis pass in flow.c. */ DEF_RTL_EXPR(PRE_DEC, "pre_dec", "e", RTX_AUTOINC) DEF_RTL_EXPR(PRE_INC, "pre_inc", "e", RTX_AUTOINC) DEF_RTL_EXPR(POST_DEC, "post_dec", "e", RTX_AUTOINC) DEF_RTL_EXPR(POST_INC, "post_inc", "e", RTX_AUTOINC) /* These binary operations are used to represent generic address side-effects in memory addresses, except for simple incrementation or decrementation which use the above operations. They are created automatically by the life_analysis pass in flow.c. The first operand is a REG which is used as the address. The second operand is an expression that is assigned to the register, either before (PRE_MODIFY) or after (POST_MODIFY) evaluating the address. Currently, the compiler can only handle second operands of the form (plus (reg) (reg)) and (plus (reg) (const_int)), where the first operand of the PLUS has to be the same register as the first operand of the *_MODIFY. */ DEF_RTL_EXPR(PRE_MODIFY, "pre_modify", "ee", RTX_AUTOINC) DEF_RTL_EXPR(POST_MODIFY, "post_modify", "ee", RTX_AUTOINC) /* Comparison operations. The ordered comparisons exist in two flavors, signed and unsigned. */ DEF_RTL_EXPR(NE, "ne", "ee", RTX_COMM_COMPARE) DEF_RTL_EXPR(EQ, "eq", "ee", RTX_COMM_COMPARE) DEF_RTL_EXPR(GE, "ge", "ee", RTX_COMPARE) DEF_RTL_EXPR(GT, "gt", "ee", RTX_COMPARE) DEF_RTL_EXPR(LE, "le", "ee", RTX_COMPARE) DEF_RTL_EXPR(LT, "lt", "ee", RTX_COMPARE) DEF_RTL_EXPR(GEU, "geu", "ee", RTX_COMPARE) DEF_RTL_EXPR(GTU, "gtu", "ee", RTX_COMPARE) DEF_RTL_EXPR(LEU, "leu", "ee", RTX_COMPARE) DEF_RTL_EXPR(LTU, "ltu", "ee", RTX_COMPARE) /* Additional floating point unordered comparison flavors. */ DEF_RTL_EXPR(UNORDERED, "unordered", "ee", RTX_COMM_COMPARE) DEF_RTL_EXPR(ORDERED, "ordered", "ee", RTX_COMM_COMPARE) /* These are equivalent to unordered or ... */ DEF_RTL_EXPR(UNEQ, "uneq", "ee", RTX_COMM_COMPARE) DEF_RTL_EXPR(UNGE, "unge", "ee", RTX_COMPARE) DEF_RTL_EXPR(UNGT, "ungt", "ee", RTX_COMPARE) DEF_RTL_EXPR(UNLE, "unle", "ee", RTX_COMPARE) DEF_RTL_EXPR(UNLT, "unlt", "ee", RTX_COMPARE) /* This is an ordered NE, ie !UNEQ, ie false for NaN. */ DEF_RTL_EXPR(LTGT, "ltgt", "ee", RTX_COMM_COMPARE) /* Represents the result of sign-extending the sole operand. The machine modes of the operand and of the SIGN_EXTEND expression determine how much sign-extension is going on. */ DEF_RTL_EXPR(SIGN_EXTEND, "sign_extend", "e", RTX_UNARY) /* Similar for zero-extension (such as unsigned short to int). */ DEF_RTL_EXPR(ZERO_EXTEND, "zero_extend", "e", RTX_UNARY) /* Similar but here the operand has a wider mode. */ DEF_RTL_EXPR(TRUNCATE, "truncate", "e", RTX_UNARY) /* Similar for extending floating-point values (such as SFmode to DFmode). */ DEF_RTL_EXPR(FLOAT_EXTEND, "float_extend", "e", RTX_UNARY) DEF_RTL_EXPR(FLOAT_TRUNCATE, "float_truncate", "e", RTX_UNARY) /* Conversion of fixed point operand to floating point value. */ DEF_RTL_EXPR(FLOAT, "float", "e", RTX_UNARY) /* With fixed-point machine mode: Conversion of floating point operand to fixed point value. Value is defined only when the operand's value is an integer. With floating-point machine mode (and operand with same mode): Operand is rounded toward zero to produce an integer value represented in floating point. */ DEF_RTL_EXPR(FIX, "fix", "e", RTX_UNARY) /* Conversion of unsigned fixed point operand to floating point value. */ DEF_RTL_EXPR(UNSIGNED_FLOAT, "unsigned_float", "e", RTX_UNARY) /* With fixed-point machine mode: Conversion of floating point operand to *unsigned* fixed point value. Value is defined only when the operand's value is an integer. */ DEF_RTL_EXPR(UNSIGNED_FIX, "unsigned_fix", "e", RTX_UNARY) /* Absolute value */ DEF_RTL_EXPR(ABS, "abs", "e", RTX_UNARY) /* Square root */ DEF_RTL_EXPR(SQRT, "sqrt", "e", RTX_UNARY) /* Find first bit that is set. Value is 1 + number of trailing zeros in the arg., or 0 if arg is 0. */ DEF_RTL_EXPR(FFS, "ffs", "e", RTX_UNARY) /* Count leading zeros. */ DEF_RTL_EXPR(CLZ, "clz", "e", RTX_UNARY) /* Count trailing zeros. */ DEF_RTL_EXPR(CTZ, "ctz", "e", RTX_UNARY) /* Population count (number of 1 bits). */ DEF_RTL_EXPR(POPCOUNT, "popcount", "e", RTX_UNARY) /* Population parity (number of 1 bits modulo 2). */ DEF_RTL_EXPR(PARITY, "parity", "e", RTX_UNARY) /* Reference to a signed bit-field of specified size and position. Operand 0 is the memory unit (usually SImode or QImode) which contains the field's first bit. Operand 1 is the width, in bits. Operand 2 is the number of bits in the memory unit before the first bit of this field. If BITS_BIG_ENDIAN is defined, the first bit is the msb and operand 2 counts from the msb of the memory unit. Otherwise, the first bit is the lsb and operand 2 counts from the lsb of the memory unit. */ DEF_RTL_EXPR(SIGN_EXTRACT, "sign_extract", "eee", RTX_BITFIELD_OPS) /* Similar for unsigned bit-field. */ DEF_RTL_EXPR(ZERO_EXTRACT, "zero_extract", "eee", RTX_BITFIELD_OPS) /* For RISC machines. These save memory when splitting insns. */ /* HIGH are the high-order bits of a constant expression. */ DEF_RTL_EXPR(HIGH, "high", "e", RTX_CONST_OBJ) /* LO_SUM is the sum of a register and the low-order bits of a constant expression. */ DEF_RTL_EXPR(LO_SUM, "lo_sum", "ee", RTX_OBJ) /* Header for range information. Operand 0 is the NOTE_INSN_RANGE_BEG insn. Operand 1 is the NOTE_INSN_RANGE_END insn. Operand 2 is a vector of all of the registers that can be substituted within this range. Operand 3 is the number of calls in the range. Operand 4 is the number of insns in the range. Operand 5 is the unique range number for this range. Operand 6 is the basic block # of the start of the live range. Operand 7 is the basic block # of the end of the live range. Operand 8 is the loop depth. Operand 9 is a bitmap of the registers live at the start of the range. Operand 10 is a bitmap of the registers live at the end of the range. Operand 11 is marker number for the start of the range. Operand 12 is the marker number for the end of the range. */ DEF_RTL_EXPR(RANGE_INFO, "range_info", "uuEiiiiiibbii", RTX_EXTRA) /* Registers that can be substituted within the range. Operand 0 is the original pseudo register number. Operand 1 will be filled in with the pseudo register the value is copied for the duration of the range. Operand 2 is the number of references within the range to the register. Operand 3 is the number of sets or clobbers of the register in the range. Operand 4 is the number of deaths the register has. Operand 5 is the copy flags that give the status of whether a copy is needed from the original register to the new register at the beginning of the range, or whether a copy from the new register back to the original at the end of the range. Operand 6 is the live length. Operand 7 is the number of calls that this register is live across. Operand 8 is the symbol node of the variable if the register is a user variable. Operand 9 is the block node that the variable is declared in if the register is a user variable. */ DEF_RTL_EXPR(RANGE_REG, "range_reg", "iiiiiiiitt", RTX_EXTRA) /* Information about a local variable's ranges. Operand 0 is an EXPR_LIST of the different ranges a variable is in where it is copied to a different pseudo register. Operand 1 is the block that the variable is declared in. Operand 2 is the number of distinct ranges. */ DEF_RTL_EXPR(RANGE_VAR, "range_var", "eti", RTX_EXTRA) /* Information about the registers that are live at the current point. Operand 0 is the live bitmap. Operand 1 is the original block number. */ DEF_RTL_EXPR(RANGE_LIVE, "range_live", "bi", RTX_EXTRA) /* Describes a merge operation between two vector values. Operands 0 and 1 are the vectors to be merged, operand 2 is a bitmask that specifies where the parts of the result are taken from. Set bits indicate operand 0, clear bits indicate operand 1. The parts are defined by the mode of the vectors. */ DEF_RTL_EXPR(VEC_MERGE, "vec_merge", "eee", RTX_TERNARY) /* Describes an operation that selects parts of a vector. Operands 0 is the source vector, operand 1 is a PARALLEL that contains a CONST_INT for each of the subparts of the result vector, giving the number of the source subpart that should be stored into it. */ DEF_RTL_EXPR(VEC_SELECT, "vec_select", "ee", RTX_BIN_ARITH) /* Describes a vector concat operation. Operands 0 and 1 are the source vectors, the result is a vector that is as long as operands 0 and 1 combined and is the concatenation of the two source vectors. */ DEF_RTL_EXPR(VEC_CONCAT, "vec_concat", "ee", RTX_BIN_ARITH) /* Describes an operation that converts a small vector into a larger one by duplicating the input values. The output vector mode must have the same submodes as the input vector mode, and the number of output parts must be an integer multiple of the number of input parts. */ DEF_RTL_EXPR(VEC_DUPLICATE, "vec_duplicate", "e", RTX_UNARY) /* Addition with signed saturation */ DEF_RTL_EXPR(SS_PLUS, "ss_plus", "ee", RTX_COMM_ARITH) /* Addition with unsigned saturation */ DEF_RTL_EXPR(US_PLUS, "us_plus", "ee", RTX_COMM_ARITH) /* Operand 0 minus operand 1, with signed saturation. */ DEF_RTL_EXPR(SS_MINUS, "ss_minus", "ee", RTX_BIN_ARITH) /* Operand 0 minus operand 1, with unsigned saturation. */ DEF_RTL_EXPR(US_MINUS, "us_minus", "ee", RTX_BIN_ARITH) /* Signed saturating truncate. */ DEF_RTL_EXPR(SS_TRUNCATE, "ss_truncate", "e", RTX_UNARY) /* Unsigned saturating truncate. */ DEF_RTL_EXPR(US_TRUNCATE, "us_truncate", "e", RTX_UNARY) /* Information about the variable and its location. */ DEF_RTL_EXPR(VAR_LOCATION, "var_location", "te", RTX_EXTRA) /* Local variables: mode:c End: */ #undef DEF_RTL_EXPR }; /* Indexed by rtx code, gives a character representing the "class" of that rtx code. See rtl.def for documentation on the defined classes. */ const enum rtx_class rtx_class[NUM_RTX_CODE] = { #define DEF_RTL_EXPR(ENUM, NAME, FORMAT, CLASS) CLASS, /* This file contains the definitions and documentation for the Register Transfer Expressions (rtx's) that make up the Register Transfer Language (rtl) used in the Back End of the GNU compiler. Copyright (C) 1987, 1988, 1992, 1994, 1995, 1997, 1998, 1999, 2000, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Expression definitions and descriptions for all targets are in this file. Some will not be used for some targets. The fields in the cpp macro call "DEF_RTL_EXPR()" are used to create declarations in the C source of the compiler. The fields are: 1. The internal name of the rtx used in the C source. It is a tag in the enumeration "enum rtx_code" defined in "rtl.h". By convention these are in UPPER_CASE. 2. The name of the rtx in the external ASCII format read by read_rtx(), and printed by print_rtx(). These names are stored in rtx_name[]. By convention these are the internal (field 1) names in lower_case. 3. The print format, and type of each rtx->u.fld[] (field) in this rtx. These formats are stored in rtx_format[]. The meaning of the formats is documented in front of this array in rtl.c 4. The class of the rtx. These are stored in rtx_class and are accessed via the GET_RTX_CLASS macro. They are defined as follows: RTX_CONST_OBJ an rtx code that can be used to represent a constant object (e.g, CONST_INT) RTX_OBJ an rtx code that can be used to represent an object (e.g, REG, MEM) RTX_COMPARE an rtx code for a comparison (e.g, LT, GT) RTX_COMM_COMPARE an rtx code for a commutative comparison (e.g, EQ, NE, ORDERED) RTX_UNARY an rtx code for a unary arithmetic expression (e.g, NEG, NOT) RTX_COMM_ARITH an rtx code for a commutative binary operation (e.g,, PLUS, MULT) RTX_TERNARY an rtx code for a non-bitfield three input operation (IF_THEN_ELSE) RTX_BIN_ARITH an rtx code for a non-commutative binary operation (e.g., MINUS, DIV) RTX_BITFIELD_OPS an rtx code for a bit-field operation (ZERO_EXTRACT, SIGN_EXTRACT) RTX_INSN an rtx code for a machine insn (INSN, JUMP_INSN, CALL_INSN) RTX_MATCH an rtx code for something that matches in insns (e.g, MATCH_DUP) RTX_AUTOINC an rtx code for autoincrement addressing modes (e.g. POST_DEC) RTX_EXTRA everything else */ /* --------------------------------------------------------------------- Expressions (and "meta" expressions) used for structuring the rtl representation of a program. --------------------------------------------------------------------- */ /* an expression code name unknown to the reader */ DEF_RTL_EXPR(UNKNOWN, "UnKnown", "*", RTX_EXTRA) /* (NIL) is used by rtl reader and printer to represent a null pointer. */ DEF_RTL_EXPR(NIL, "nil", "*", RTX_EXTRA) /* include a file */ DEF_RTL_EXPR(INCLUDE, "include", "s", RTX_EXTRA) /* --------------------------------------------------------------------- Expressions used in constructing lists. --------------------------------------------------------------------- */ /* a linked list of expressions */ DEF_RTL_EXPR(EXPR_LIST, "expr_list", "ee", RTX_EXTRA) /* a linked list of instructions. The insns are represented in print by their uids. */ DEF_RTL_EXPR(INSN_LIST, "insn_list", "ue", RTX_EXTRA) /* ---------------------------------------------------------------------- Expression types for machine descriptions. These do not appear in actual rtl code in the compiler. ---------------------------------------------------------------------- */ /* Appears only in machine descriptions. Means use the function named by the second arg (the string) as a predicate; if matched, store the structure that was matched in the operand table at index specified by the first arg (the integer). If the second arg is the null string, the structure is just stored. A third string argument indicates to the register allocator restrictions on where the operand can be allocated. If the target needs no restriction on any instruction this field should be the null string. The string is prepended by: '=' to indicate the operand is only written to. '+' to indicate the operand is both read and written to. Each character in the string represents an allocable class for an operand. 'g' indicates the operand can be any valid class. 'i' indicates the operand can be immediate (in the instruction) data. 'r' indicates the operand can be in a register. 'm' indicates the operand can be in memory. 'o' a subset of the 'm' class. Those memory addressing modes that can be offset at compile time (have a constant added to them). Other characters indicate target dependent operand classes and are described in each target's machine description. For instructions with more than one operand, sets of classes can be separated by a comma to indicate the appropriate multi-operand constraints. There must be a 1 to 1 correspondence between these sets of classes in all operands for an instruction. */ DEF_RTL_EXPR(MATCH_OPERAND, "match_operand", "iss", RTX_MATCH) /* Appears only in machine descriptions. Means match a SCRATCH or a register. When used to generate rtl, a SCRATCH is generated. As for MATCH_OPERAND, the mode specifies the desired mode and the first argument is the operand number. The second argument is the constraint. */ DEF_RTL_EXPR(MATCH_SCRATCH, "match_scratch", "is", RTX_MATCH) /* Appears only in machine descriptions. Means match only something equal to what is stored in the operand table at the index specified by the argument. */ DEF_RTL_EXPR(MATCH_DUP, "match_dup", "i", RTX_MATCH) /* Appears only in machine descriptions. Means apply a predicate, AND match recursively the operands of the rtx. Operand 0 is the operand-number, as in match_operand. Operand 1 is a predicate to apply (as a string, a function name). Operand 2 is a vector of expressions, each of which must match one subexpression of the rtx this construct is matching. */ DEF_RTL_EXPR(MATCH_OPERATOR, "match_operator", "isE", RTX_MATCH) /* Appears only in machine descriptions. Means to match a PARALLEL of arbitrary length. The predicate is applied to the PARALLEL and the initial expressions in the PARALLEL are matched. Operand 0 is the operand-number, as in match_operand. Operand 1 is a predicate to apply to the PARALLEL. Operand 2 is a vector of expressions, each of which must match the corresponding element in the PARALLEL. */ DEF_RTL_EXPR(MATCH_PARALLEL, "match_parallel", "isE", RTX_MATCH) /* Appears only in machine descriptions. Means match only something equal to what is stored in the operand table at the index specified by the argument. For MATCH_OPERATOR. */ DEF_RTL_EXPR(MATCH_OP_DUP, "match_op_dup", "iE", RTX_MATCH) /* Appears only in machine descriptions. Means match only something equal to what is stored in the operand table at the index specified by the argument. For MATCH_PARALLEL. */ DEF_RTL_EXPR(MATCH_PAR_DUP, "match_par_dup", "iE", RTX_MATCH) /* Appears only in machine descriptions. Defines the pattern for one kind of instruction. Operand: 0: names this instruction. If the name is the null string, the instruction is in the machine description just to be recognized, and will never be emitted by the tree to rtl expander. 1: is the pattern. 2: is a string which is a C expression giving an additional condition for recognizing this pattern. A null string means no extra condition. 3: is the action to execute if this pattern is matched. If this assembler code template starts with a * then it is a fragment of C code to run to decide on a template to use. Otherwise, it is the template to use. 4: optionally, a vector of attributes for this insn. */ DEF_RTL_EXPR(DEFINE_INSN, "define_insn", "sEsTV", RTX_EXTRA) /* Definition of a peephole optimization. 1st operand: vector of insn patterns to match 2nd operand: C expression that must be true 3rd operand: template or C code to produce assembler output. 4: optionally, a vector of attributes for this insn. */ DEF_RTL_EXPR(DEFINE_PEEPHOLE, "define_peephole", "EsTV", RTX_EXTRA) /* Definition of a split operation. 1st operand: insn pattern to match 2nd operand: C expression that must be true 3rd operand: vector of insn patterns to place into a SEQUENCE 4th operand: optionally, some C code to execute before generating the insns. This might, for example, create some RTX's and store them in elements of `recog_data.operand' for use by the vector of insn-patterns. (`operands' is an alias here for `recog_data.operand'). */ DEF_RTL_EXPR(DEFINE_SPLIT, "define_split", "EsES", RTX_EXTRA) /* Definition of an insn and associated split. This is the concatenation, with a few modifications, of a define_insn and a define_split which share the same pattern. Operand: 0: names this instruction. If the name is the null string, the instruction is in the machine description just to be recognized, and will never be emitted by the tree to rtl expander. 1: is the pattern. 2: is a string which is a C expression giving an additional condition for recognizing this pattern. A null string means no extra condition. 3: is the action to execute if this pattern is matched. If this assembler code template starts with a * then it is a fragment of C code to run to decide on a template to use. Otherwise, it is the template to use. 4: C expression that must be true for split. This may start with "&&" in which case the split condition is the logical and of the insn condition and what follows the "&&" of this operand. 5: vector of insn patterns to place into a SEQUENCE 6: optionally, some C code to execute before generating the insns. This might, for example, create some RTX's and store them in elements of `recog_data.operand' for use by the vector of insn-patterns. (`operands' is an alias here for `recog_data.operand'). 7: optionally, a vector of attributes for this insn. */ DEF_RTL_EXPR(DEFINE_INSN_AND_SPLIT, "define_insn_and_split", "sEsTsESV", RTX_EXTRA) /* Definition of an RTL peephole operation. Follows the same arguments as define_split. */ DEF_RTL_EXPR(DEFINE_PEEPHOLE2, "define_peephole2", "EsES", RTX_EXTRA) /* Define how to generate multiple insns for a standard insn name. 1st operand: the insn name. 2nd operand: vector of insn-patterns. Use match_operand to substitute an element of `recog_data.operand'. 3rd operand: C expression that must be true for this to be available. This may not test any operands. 4th operand: Extra C code to execute before generating the insns. This might, for example, create some RTX's and store them in elements of `recog_data.operand' for use by the vector of insn-patterns. (`operands' is an alias here for `recog_data.operand'). */ DEF_RTL_EXPR(DEFINE_EXPAND, "define_expand", "sEss", RTX_EXTRA) /* Define a requirement for delay slots. 1st operand: Condition involving insn attributes that, if true, indicates that the insn requires the number of delay slots shown. 2nd operand: Vector whose length is the three times the number of delay slots required. Each entry gives three conditions, each involving attributes. The first must be true for an insn to occupy that delay slot location. The second is true for all insns that can be annulled if the branch is true and the third is true for all insns that can be annulled if the branch is false. Multiple DEFINE_DELAYs may be present. They indicate differing requirements for delay slots. */ DEF_RTL_EXPR(DEFINE_DELAY, "define_delay", "eE", RTX_EXTRA) /* Define a set of insns that requires a function unit. This means that these insns produce their result after a delay and that there may be restrictions on the number of insns of this type that can be scheduled simultaneously. More than one DEFINE_FUNCTION_UNIT can be specified for a function unit. Each gives a set of operations and associated delays. The first three operands must be the same for each operation for the same function unit. All delays are specified in cycles. 1st operand: Name of function unit (mostly for documentation) 2nd operand: Number of identical function units in CPU 3rd operand: Total number of simultaneous insns that can execute on this function unit; 0 if unlimited. 4th operand: Condition involving insn attribute, that, if true, specifies those insns that this expression applies to. 5th operand: Constant delay after which insn result will be available. 6th operand: Delay until next insn can be scheduled on the function unit executing this operation. The meaning depends on whether or not the next operand is supplied. 7th operand: If this operand is not specified, the 6th operand gives the number of cycles after the instruction matching the 4th operand begins using the function unit until a subsequent insn can begin. A value of zero should be used for a unit with no issue constraints. If only one operation can be executed a time and the unit is busy for the entire time, the 3rd operand should be specified as 1, the 6th operand should be specified as 0, and the 7th operand should not be specified. If this operand is specified, it is a list of attribute expressions. If an insn for which any of these expressions is true is currently executing on the function unit, the issue delay will be given by the 6th operand. Otherwise, the insn can be immediately scheduled (subject to the limit on the number of simultaneous operations executing on the unit.) */ DEF_RTL_EXPR(DEFINE_FUNCTION_UNIT, "define_function_unit", "siieiiV", RTX_EXTRA) /* Define attribute computation for `asm' instructions. */ DEF_RTL_EXPR(DEFINE_ASM_ATTRIBUTES, "define_asm_attributes", "V", RTX_EXTRA) /* Definition of a conditional execution meta operation. Automatically generates new instances of DEFINE_INSN, selected by having attribute "predicable" true. The new pattern will contain a COND_EXEC and the predicate at top-level. Operand: 0: The predicate pattern. The top-level form should match a relational operator. Operands should have only one alternative. 1: A C expression giving an additional condition for recognizing the generated pattern. 2: A template or C code to produce assembler output. */ DEF_RTL_EXPR(DEFINE_COND_EXEC, "define_cond_exec", "Ess", RTX_EXTRA) /* SEQUENCE appears in the result of a `gen_...' function for a DEFINE_EXPAND that wants to make several insns. Its elements are the bodies of the insns that should be made. `emit_insn' takes the SEQUENCE apart and makes separate insns. */ DEF_RTL_EXPR(SEQUENCE, "sequence", "E", RTX_EXTRA) /* Refers to the address of its argument. This is only used in alias.c. */ DEF_RTL_EXPR(ADDRESS, "address", "e", RTX_MATCH) /* ---------------------------------------------------------------------- Constructions for CPU pipeline description described by NDFAs. These do not appear in actual rtl code in the compiler. ---------------------------------------------------------------------- */ /* (define_cpu_unit string [string]) describes cpu functional units (separated by comma). 1st operand: Names of cpu functional units. 2nd operand: Name of automaton (see comments for DEFINE_AUTOMATON). All define_reservations, define_cpu_units, and define_query_cpu_units should have unique names which may not be "nothing". */ DEF_RTL_EXPR(DEFINE_CPU_UNIT, "define_cpu_unit", "sS", RTX_EXTRA) /* (define_query_cpu_unit string [string]) describes cpu functional units analogously to define_cpu_unit. The reservation of such units can be queried for automaton state. */ DEF_RTL_EXPR(DEFINE_QUERY_CPU_UNIT, "define_query_cpu_unit", "sS", RTX_EXTRA) /* (exclusion_set string string) means that each CPU functional unit in the first string can not be reserved simultaneously with any unit whose name is in the second string and vise versa. CPU units in the string are separated by commas. For example, it is useful for description CPU with fully pipelined floating point functional unit which can execute simultaneously only single floating point insns or only double floating point insns. All CPU functional units in a set should belong to the same automaton. */ DEF_RTL_EXPR(EXCLUSION_SET, "exclusion_set", "ss", RTX_EXTRA) /* (presence_set string string) means that each CPU functional unit in the first string can not be reserved unless at least one of pattern of units whose names are in the second string is reserved. This is an asymmetric relation. CPU units or unit patterns in the strings are separated by commas. Pattern is one unit name or unit names separated by white-spaces. For example, it is useful for description that slot1 is reserved after slot0 reservation for a VLIW processor. We could describe it by the following construction (presence_set "slot1" "slot0") Or slot1 is reserved only after slot0 and unit b0 reservation. In this case we could write (presence_set "slot1" "slot0 b0") All CPU functional units in a set should belong to the same automaton. */ DEF_RTL_EXPR(PRESENCE_SET, "presence_set", "ss", RTX_EXTRA) /* (final_presence_set string string) is analogous to `presence_set'. The difference between them is when checking is done. When an instruction is issued in given automaton state reflecting all current and planned unit reservations, the automaton state is changed. The first state is a source state, the second one is a result state. Checking for `presence_set' is done on the source state reservation, checking for `final_presence_set' is done on the result reservation. This construction is useful to describe a reservation which is actually two subsequent reservations. For example, if we use (presence_set "slot1" "slot0") the following insn will be never issued (because slot1 requires slot0 which is absent in the source state). (define_reservation "insn_and_nop" "slot0 + slot1") but it can be issued if we use analogous `final_presence_set'. */ DEF_RTL_EXPR(FINAL_PRESENCE_SET, "final_presence_set", "ss", RTX_EXTRA) /* (absence_set string string) means that each CPU functional unit in the first string can be reserved only if each pattern of units whose names are in the second string is not reserved. This is an asymmetric relation (actually exclusion set is analogous to this one but it is symmetric). CPU units or unit patterns in the string are separated by commas. Pattern is one unit name or unit names separated by white-spaces. For example, it is useful for description that slot0 can not be reserved after slot1 or slot2 reservation for a VLIW processor. We could describe it by the following construction (absence_set "slot2" "slot0, slot1") Or slot2 can not be reserved if slot0 and unit b0 are reserved or slot1 and unit b1 are reserved . In this case we could write (absence_set "slot2" "slot0 b0, slot1 b1") All CPU functional units in a set should to belong the same automaton. */ DEF_RTL_EXPR(ABSENCE_SET, "absence_set", "ss", RTX_EXTRA) /* (final_absence_set string string) is analogous to `absence_set' but checking is done on the result (state) reservation. See comments for `final_presence_set'. */ DEF_RTL_EXPR(FINAL_ABSENCE_SET, "final_absence_set", "ss", RTX_EXTRA) /* (define_bypass number out_insn_names in_insn_names) names bypass with given latency (the first number) from insns given by the first string (see define_insn_reservation) into insns given by the second string. Insn names in the strings are separated by commas. The third operand is optional name of function which is additional guard for the bypass. The function will get the two insns as parameters. If the function returns zero the bypass will be ignored for this case. Additional guard is necessary to recognize complicated bypasses, e.g. when consumer is load address. */ DEF_RTL_EXPR(DEFINE_BYPASS, "define_bypass", "issS", RTX_EXTRA) /* (define_automaton string) describes names of automata generated and used for pipeline hazards recognition. The names are separated by comma. Actually it is possibly to generate the single automaton but unfortunately it can be very large. If we use more one automata, the summary size of the automata usually is less than the single one. The automaton name is used in define_cpu_unit and define_query_cpu_unit. All automata should have unique names. */ DEF_RTL_EXPR(DEFINE_AUTOMATON, "define_automaton", "s", RTX_EXTRA) /* (automata_option string) describes option for generation of automata. Currently there are the following options: o "no-minimization" which makes no minimization of automata. This is only worth to do when we are debugging the description and need to look more accurately at reservations of states. o "time" which means printing additional time statistics about generation of automata. o "v" which means generation of file describing the result automata. The file has suffix `.dfa' and can be used for the description verification and debugging. o "w" which means generation of warning instead of error for non-critical errors. o "ndfa" which makes nondeterministic finite state automata. o "progress" which means output of a progress bar showing how many states were generated so far for automaton being processed. */ DEF_RTL_EXPR(AUTOMATA_OPTION, "automata_option", "s", RTX_EXTRA) /* (define_reservation string string) names reservation (the first string) of cpu functional units (the 2nd string). Sometimes unit reservations for different insns contain common parts. In such case, you can describe common part and use its name (the 1st parameter) in regular expression in define_insn_reservation. All define_reservations, define_cpu_units, and define_query_cpu_units should have unique names which may not be "nothing". */ DEF_RTL_EXPR(DEFINE_RESERVATION, "define_reservation", "ss", RTX_EXTRA) /* (define_insn_reservation name default_latency condition regexpr) describes reservation of cpu functional units (the 3nd operand) for instruction which is selected by the condition (the 2nd parameter). The first parameter is used for output of debugging information. The reservations are described by a regular expression according the following syntax: regexp = regexp "," oneof | oneof oneof = oneof "|" allof | allof allof = allof "+" repeat | repeat repeat = element "*" number | element element = cpu_function_unit_name | reservation_name | result_name | "nothing" | "(" regexp ")" 1. "," is used for describing start of the next cycle in reservation. 2. "|" is used for describing the reservation described by the first regular expression *or* the reservation described by the second regular expression *or* etc. 3. "+" is used for describing the reservation described by the first regular expression *and* the reservation described by the second regular expression *and* etc. 4. "*" is used for convenience and simply means sequence in which the regular expression are repeated NUMBER times with cycle advancing (see ","). 5. cpu functional unit name which means its reservation. 6. reservation name -- see define_reservation. 7. string "nothing" means no units reservation. */ DEF_RTL_EXPR(DEFINE_INSN_RESERVATION, "define_insn_reservation", "sies", RTX_EXTRA) /* ---------------------------------------------------------------------- Expressions used for insn attributes. These also do not appear in actual rtl code in the compiler. ---------------------------------------------------------------------- */ /* Definition of an insn attribute. 1st operand: name of the attribute 2nd operand: comma-separated list of possible attribute values 3rd operand: expression for the default value of the attribute. */ DEF_RTL_EXPR(DEFINE_ATTR, "define_attr", "sse", RTX_EXTRA) /* Marker for the name of an attribute. */ DEF_RTL_EXPR(ATTR, "attr", "s", RTX_EXTRA) /* For use in the last (optional) operand of DEFINE_INSN or DEFINE_PEEPHOLE and in DEFINE_ASM_INSN to specify an attribute to assign to insns matching that pattern. (set_attr "name" "value") is equivalent to (set (attr "name") (const_string "value")) */ DEF_RTL_EXPR(SET_ATTR, "set_attr", "ss", RTX_EXTRA) /* In the last operand of DEFINE_INSN and DEFINE_PEEPHOLE, this can be used to specify that attribute values are to be assigned according to the alternative matched. The following three expressions are equivalent: (set (attr "att") (cond [(eq_attrq "alternative" "1") (const_string "a1") (eq_attrq "alternative" "2") (const_string "a2")] (const_string "a3"))) (set_attr_alternative "att" [(const_string "a1") (const_string "a2") (const_string "a3")]) (set_attr "att" "a1,a2,a3") */ DEF_RTL_EXPR(SET_ATTR_ALTERNATIVE, "set_attr_alternative", "sE", RTX_EXTRA) /* A conditional expression true if the value of the specified attribute of the current insn equals the specified value. The first operand is the attribute name and the second is the comparison value. */ DEF_RTL_EXPR(EQ_ATTR, "eq_attr", "ss", RTX_EXTRA) /* A special case of the above representing a set of alternatives. The first operand is bitmap of the set, the second one is the default value. */ DEF_RTL_EXPR(EQ_ATTR_ALT, "eq_attr_alt", "ii", RTX_EXTRA) /* A conditional expression which is true if the specified flag is true for the insn being scheduled in reorg. genattr.c defines the following flags which can be tested by (attr_flag "foo") expressions in eligible_for_delay. forward, backward, very_likely, likely, very_unlikely, and unlikely. */ DEF_RTL_EXPR (ATTR_FLAG, "attr_flag", "s", RTX_EXTRA) /* ---------------------------------------------------------------------- Expression types used for things in the instruction chain. All formats must start with "iuu" to handle the chain. Each insn expression holds an rtl instruction and its semantics during back-end processing. See macros's in "rtl.h" for the meaning of each rtx->u.fld[]. ---------------------------------------------------------------------- */ /* An instruction that cannot jump. */ DEF_RTL_EXPR(INSN, "insn", "iuuBieiee", RTX_INSN) /* An instruction that can possibly jump. Fields ( rtx->u.fld[] ) have exact same meaning as INSN's. */ DEF_RTL_EXPR(JUMP_INSN, "jump_insn", "iuuBieiee0", RTX_INSN) /* An instruction that can possibly call a subroutine but which will not change which instruction comes next in the current function. Field ( rtx->u.fld[9] ) is CALL_INSN_FUNCTION_USAGE. All other fields ( rtx->u.fld[] ) have exact same meaning as INSN's. */ DEF_RTL_EXPR(CALL_INSN, "call_insn", "iuuBieieee", RTX_INSN) /* A marker that indicates that control will not flow through. */ DEF_RTL_EXPR(BARRIER, "barrier", "iuu000000", RTX_EXTRA) /* Holds a label that is followed by instructions. Operand: 4: is used in jump.c for the use-count of the label. 5: is used in flow.c to point to the chain of label_ref's to this label. 6: is a number that is unique in the entire compilation. 7: is the user-given name of the label, if any. */ DEF_RTL_EXPR(CODE_LABEL, "code_label", "iuuB00is", RTX_EXTRA) #ifdef USE_MAPPED_LOCATION /* Say where in the code a source line starts, for symbol table's sake. Operand: 4: unused if line number > 0, note-specific data otherwise. 5: line number if > 0, enum note_insn otherwise. 6: CODE_LABEL_NUMBER if line number == NOTE_INSN_DELETED_LABEL. */ #else /* Say where in the code a source line starts, for symbol table's sake. Operand: 4: filename, if line number > 0, note-specific data otherwise. 5: line number if > 0, enum note_insn otherwise. 6: unique number if line number == note_insn_deleted_label. */ #endif DEF_RTL_EXPR(NOTE, "note", "iuuB0ni", RTX_EXTRA) /* ---------------------------------------------------------------------- Top level constituents of INSN, JUMP_INSN and CALL_INSN. ---------------------------------------------------------------------- */ /* Conditionally execute code. Operand 0 is the condition that if true, the code is executed. Operand 1 is the code to be executed (typically a SET). Semantics are that there are no side effects if the condition is false. This pattern is created automatically by the if_convert pass run after reload or by target-specific splitters. */ DEF_RTL_EXPR(COND_EXEC, "cond_exec", "ee", RTX_EXTRA) /* Several operations to be done in parallel (perhaps under COND_EXEC). */ DEF_RTL_EXPR(PARALLEL, "parallel", "E", RTX_EXTRA) /* A string that is passed through to the assembler as input. One can obviously pass comments through by using the assembler comment syntax. These occur in an insn all by themselves as the PATTERN. They also appear inside an ASM_OPERANDS as a convenient way to hold a string. */ DEF_RTL_EXPR(ASM_INPUT, "asm_input", "s", RTX_EXTRA) #ifdef USE_MAPPED_LOCATION /* An assembler instruction with operands. 1st operand is the instruction template. 2nd operand is the constraint for the output. 3rd operand is the number of the output this expression refers to. When an insn stores more than one value, a separate ASM_OPERANDS is made for each output; this integer distinguishes them. 4th is a vector of values of input operands. 5th is a vector of modes and constraints for the input operands. Each element is an ASM_INPUT containing a constraint string and whose mode indicates the mode of the input operand. 6th is the source line number. */ DEF_RTL_EXPR(ASM_OPERANDS, "asm_operands", "ssiEEi", RTX_EXTRA) #else /* An assembler instruction with operands. 1st operand is the instruction template. 2nd operand is the constraint for the output. 3rd operand is the number of the output this expression refers to. When an insn stores more than one value, a separate ASM_OPERANDS is made for each output; this integer distinguishes them. 4th is a vector of values of input operands. 5th is a vector of modes and constraints for the input operands. Each element is an ASM_INPUT containing a constraint string and whose mode indicates the mode of the input operand. 6th is the name of the containing source file. 7th is the source line number. */ DEF_RTL_EXPR(ASM_OPERANDS, "asm_operands", "ssiEEsi", RTX_EXTRA) #endif /* A machine-specific operation. 1st operand is a vector of operands being used by the operation so that any needed reloads can be done. 2nd operand is a unique value saying which of a number of machine-specific operations is to be performed. (Note that the vector must be the first operand because of the way that genrecog.c record positions within an insn.) This can occur all by itself in a PATTERN, as a component of a PARALLEL, or inside an expression. */ DEF_RTL_EXPR(UNSPEC, "unspec", "Ei", RTX_EXTRA) /* Similar, but a volatile operation and one which may trap. */ DEF_RTL_EXPR(UNSPEC_VOLATILE, "unspec_volatile", "Ei", RTX_EXTRA) /* Vector of addresses, stored as full words. */ /* Each element is a LABEL_REF to a CODE_LABEL whose address we want. */ DEF_RTL_EXPR(ADDR_VEC, "addr_vec", "E", RTX_EXTRA) /* Vector of address differences X0 - BASE, X1 - BASE, ... First operand is BASE; the vector contains the X's. The machine mode of this rtx says how much space to leave for each difference and is adjusted by branch shortening if CASE_VECTOR_SHORTEN_MODE is defined. The third and fourth operands store the target labels with the minimum and maximum addresses respectively. The fifth operand stores flags for use by branch shortening. Set at the start of shorten_branches: min_align: the minimum alignment for any of the target labels. base_after_vec: true iff BASE is after the ADDR_DIFF_VEC. min_after_vec: true iff minimum addr target label is after the ADDR_DIFF_VEC. max_after_vec: true iff maximum addr target label is after the ADDR_DIFF_VEC. min_after_base: true iff minimum address target label is after BASE. max_after_base: true iff maximum address target label is after BASE. Set by the actual branch shortening process: offset_unsigned: true iff offsets have to be treated as unsigned. scale: scaling that is necessary to make offsets fit into the mode. The third, fourth and fifth operands are only valid when CASE_VECTOR_SHORTEN_MODE is defined, and only in an optimizing compilations. */ DEF_RTL_EXPR(ADDR_DIFF_VEC, "addr_diff_vec", "eEee0", RTX_EXTRA) /* Memory prefetch, with attributes supported on some targets. Operand 1 is the address of the memory to fetch. Operand 2 is 1 for a write access, 0 otherwise. Operand 3 is the level of temporal locality; 0 means there is no temporal locality and 1, 2, and 3 are for increasing levels of temporal locality. The attributes specified by operands 2 and 3 are ignored for targets whose prefetch instructions do not support them. */ DEF_RTL_EXPR(PREFETCH, "prefetch", "eee", RTX_EXTRA) /* ---------------------------------------------------------------------- At the top level of an instruction (perhaps under PARALLEL). ---------------------------------------------------------------------- */ /* Assignment. Operand 1 is the location (REG, MEM, PC, CC0 or whatever) assigned to. Operand 2 is the value stored there. ALL assignment must use SET. Instructions that do multiple assignments must use multiple SET, under PARALLEL. */ DEF_RTL_EXPR(SET, "set", "ee", RTX_EXTRA) /* Indicate something is used in a way that we don't want to explain. For example, subroutine calls will use the register in which the static chain is passed. */ DEF_RTL_EXPR(USE, "use", "e", RTX_EXTRA) /* Indicate something is clobbered in a way that we don't want to explain. For example, subroutine calls will clobber some physical registers (the ones that are by convention not saved). */ DEF_RTL_EXPR(CLOBBER, "clobber", "e", RTX_EXTRA) /* Call a subroutine. Operand 1 is the address to call. Operand 2 is the number of arguments. */ DEF_RTL_EXPR(CALL, "call", "ee", RTX_EXTRA) /* Return from a subroutine. */ DEF_RTL_EXPR(RETURN, "return", "", RTX_EXTRA) /* Conditional trap. Operand 1 is the condition. Operand 2 is the trap code. For an unconditional trap, make the condition (const_int 1). */ DEF_RTL_EXPR(TRAP_IF, "trap_if", "ee", RTX_EXTRA) /* Placeholder for _Unwind_Resume before we know if a function call or a branch is needed. Operand 1 is the exception region from which control is flowing. */ DEF_RTL_EXPR(RESX, "resx", "i", RTX_EXTRA) /* ---------------------------------------------------------------------- Primitive values for use in expressions. ---------------------------------------------------------------------- */ /* numeric integer constant */ DEF_RTL_EXPR(CONST_INT, "const_int", "w", RTX_CONST_OBJ) /* numeric floating point constant. Operands hold the value. They are all 'w' and there may be from 2 to 6; see real.h. */ DEF_RTL_EXPR(CONST_DOUBLE, "const_double", CONST_DOUBLE_FORMAT, RTX_CONST_OBJ) /* Describes a vector constant. */ DEF_RTL_EXPR(CONST_VECTOR, "const_vector", "E", RTX_EXTRA) /* String constant. Used only for attributes right now. */ DEF_RTL_EXPR(CONST_STRING, "const_string", "s", RTX_OBJ) /* This is used to encapsulate an expression whose value is constant (such as the sum of a SYMBOL_REF and a CONST_INT) so that it will be recognized as a constant operand rather than by arithmetic instructions. */ DEF_RTL_EXPR(CONST, "const", "e", RTX_CONST_OBJ) /* program counter. Ordinary jumps are represented by a SET whose first operand is (PC). */ DEF_RTL_EXPR(PC, "pc", "", RTX_OBJ) /* Used in the cselib routines to describe a value. */ DEF_RTL_EXPR(VALUE, "value", "0", RTX_OBJ) /* A register. The "operand" is the register number, accessed with the REGNO macro. If this number is less than FIRST_PSEUDO_REGISTER than a hardware register is being referred to. The second operand holds the original register number - this will be different for a pseudo register that got turned into a hard register. This rtx needs to have as many (or more) fields as a MEM, since we can change REG rtx's into MEMs during reload. */ DEF_RTL_EXPR(REG, "reg", "i00", RTX_OBJ) /* A scratch register. This represents a register used only within a single insn. It will be turned into a REG during register allocation or reload unless the constraint indicates that the register won't be needed, in which case it can remain a SCRATCH. This code is marked as having one operand so it can be turned into a REG. */ DEF_RTL_EXPR(SCRATCH, "scratch", "0", RTX_OBJ) /* One word of a multi-word value. The first operand is the complete value; the second says which word. The WORDS_BIG_ENDIAN flag controls whether word number 0 (as numbered in a SUBREG) is the most or least significant word. This is also used to refer to a value in a different machine mode. For example, it can be used to refer to a SImode value as if it were Qimode, or vice versa. Then the word number is always 0. */ DEF_RTL_EXPR(SUBREG, "subreg", "ei", RTX_EXTRA) /* This one-argument rtx is used for move instructions that are guaranteed to alter only the low part of a destination. Thus, (SET (SUBREG:HI (REG...)) (MEM:HI ...)) has an unspecified effect on the high part of REG, but (SET (STRICT_LOW_PART (SUBREG:HI (REG...))) (MEM:HI ...)) is guaranteed to alter only the bits of REG that are in HImode. The actual instruction used is probably the same in both cases, but the register constraints may be tighter when STRICT_LOW_PART is in use. */ DEF_RTL_EXPR(STRICT_LOW_PART, "strict_low_part", "e", RTX_EXTRA) /* (CONCAT a b) represents the virtual concatenation of a and b to make a value that has as many bits as a and b put together. This is used for complex values. Normally it appears only in DECL_RTLs and during RTL generation, but not in the insn chain. */ DEF_RTL_EXPR(CONCAT, "concat", "ee", RTX_OBJ) /* A memory location; operand is the address. The second operand is the alias set to which this MEM belongs. We use `0' instead of `w' for this field so that the field need not be specified in machine descriptions. */ DEF_RTL_EXPR(MEM, "mem", "e0", RTX_OBJ) /* Reference to an assembler label in the code for this function. The operand is a CODE_LABEL found in the insn chain. The unprinted fields 1 and 2 are used in flow.c for the LABEL_NEXTREF and CONTAINING_INSN. */ DEF_RTL_EXPR(LABEL_REF, "label_ref", "u00", RTX_CONST_OBJ) /* Reference to a named label: Operand 0: label name Operand 1: flags (see SYMBOL_FLAG_* in rtl.h) Operand 2: tree from which this symbol is derived, or null. This is either a DECL node, or some kind of constant. */ DEF_RTL_EXPR(SYMBOL_REF, "symbol_ref", "s00", RTX_CONST_OBJ) /* The condition code register is represented, in our imagination, as a register holding a value that can be compared to zero. In fact, the machine has already compared them and recorded the results; but instructions that look at the condition code pretend to be looking at the entire value and comparing it. */ DEF_RTL_EXPR(CC0, "cc0", "", RTX_OBJ) /* ===================================================================== A QUEUED expression really points to a member of the queue of instructions to be output later for postincrement/postdecrement. QUEUED expressions never become part of instructions. When a QUEUED expression would be put into an instruction, instead either the incremented variable or a copy of its previous value is used. Operands are: 0. the variable to be incremented (a REG rtx). 1. the incrementing instruction, or 0 if it hasn't been output yet. 2. A REG rtx for a copy of the old value of the variable, or 0 if none yet. 3. the body to use for the incrementing instruction 4. the next QUEUED expression in the queue. ====================================================================== */ DEF_RTL_EXPR(QUEUED, "queued", "eeeee", RTX_EXTRA) /* ---------------------------------------------------------------------- Expressions for operators in an rtl pattern ---------------------------------------------------------------------- */ /* if_then_else. This is used in representing ordinary conditional jump instructions. Operand: 0: condition 1: then expr 2: else expr */ DEF_RTL_EXPR(IF_THEN_ELSE, "if_then_else", "eee", RTX_TERNARY) /* General conditional. The first operand is a vector composed of pairs of expressions. The first element of each pair is evaluated, in turn. The value of the conditional is the second expression of the first pair whose first expression evaluates nonzero. If none of the expressions is true, the second operand will be used as the value of the conditional. This should be replaced with use of IF_THEN_ELSE. */ DEF_RTL_EXPR(COND, "cond", "Ee", RTX_EXTRA) /* Comparison, produces a condition code result. */ DEF_RTL_EXPR(COMPARE, "compare", "ee", RTX_BIN_ARITH) /* plus */ DEF_RTL_EXPR(PLUS, "plus", "ee", RTX_COMM_ARITH) /* Operand 0 minus operand 1. */ DEF_RTL_EXPR(MINUS, "minus", "ee", RTX_BIN_ARITH) /* Minus operand 0. */ DEF_RTL_EXPR(NEG, "neg", "e", RTX_UNARY) DEF_RTL_EXPR(MULT, "mult", "ee", RTX_COMM_ARITH) /* Operand 0 divided by operand 1. */ DEF_RTL_EXPR(DIV, "div", "ee", RTX_BIN_ARITH) /* Remainder of operand 0 divided by operand 1. */ DEF_RTL_EXPR(MOD, "mod", "ee", RTX_BIN_ARITH) /* Unsigned divide and remainder. */ DEF_RTL_EXPR(UDIV, "udiv", "ee", RTX_BIN_ARITH) DEF_RTL_EXPR(UMOD, "umod", "ee", RTX_BIN_ARITH) /* Bitwise operations. */ DEF_RTL_EXPR(AND, "and", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(IOR, "ior", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(XOR, "xor", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(NOT, "not", "e", RTX_UNARY) /* Operand: 0: value to be shifted. 1: number of bits. */ DEF_RTL_EXPR(ASHIFT, "ashift", "ee", RTX_BIN_ARITH) /* shift left */ DEF_RTL_EXPR(ROTATE, "rotate", "ee", RTX_BIN_ARITH) /* rotate left */ DEF_RTL_EXPR(ASHIFTRT, "ashiftrt", "ee", RTX_BIN_ARITH) /* arithmetic shift right */ DEF_RTL_EXPR(LSHIFTRT, "lshiftrt", "ee", RTX_BIN_ARITH) /* logical shift right */ DEF_RTL_EXPR(ROTATERT, "rotatert", "ee", RTX_BIN_ARITH) /* rotate right */ /* Minimum and maximum values of two operands. We need both signed and unsigned forms. (We cannot use MIN for SMIN because it conflicts with a macro of the same name.) */ DEF_RTL_EXPR(SMIN, "smin", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(SMAX, "smax", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(UMIN, "umin", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(UMAX, "umax", "ee", RTX_COMM_ARITH) /* These unary operations are used to represent incrementation and decrementation as they occur in memory addresses. The amount of increment or decrement are not represented because they can be understood from the machine-mode of the containing MEM. These operations exist in only two cases: 1. pushes onto the stack. 2. created automatically by the life_analysis pass in flow.c. */ DEF_RTL_EXPR(PRE_DEC, "pre_dec", "e", RTX_AUTOINC) DEF_RTL_EXPR(PRE_INC, "pre_inc", "e", RTX_AUTOINC) DEF_RTL_EXPR(POST_DEC, "post_dec", "e", RTX_AUTOINC) DEF_RTL_EXPR(POST_INC, "post_inc", "e", RTX_AUTOINC) /* These binary operations are used to represent generic address side-effects in memory addresses, except for simple incrementation or decrementation which use the above operations. They are created automatically by the life_analysis pass in flow.c. The first operand is a REG which is used as the address. The second operand is an expression that is assigned to the register, either before (PRE_MODIFY) or after (POST_MODIFY) evaluating the address. Currently, the compiler can only handle second operands of the form (plus (reg) (reg)) and (plus (reg) (const_int)), where the first operand of the PLUS has to be the same register as the first operand of the *_MODIFY. */ DEF_RTL_EXPR(PRE_MODIFY, "pre_modify", "ee", RTX_AUTOINC) DEF_RTL_EXPR(POST_MODIFY, "post_modify", "ee", RTX_AUTOINC) /* Comparison operations. The ordered comparisons exist in two flavors, signed and unsigned. */ DEF_RTL_EXPR(NE, "ne", "ee", RTX_COMM_COMPARE) DEF_RTL_EXPR(EQ, "eq", "ee", RTX_COMM_COMPARE) DEF_RTL_EXPR(GE, "ge", "ee", RTX_COMPARE) DEF_RTL_EXPR(GT, "gt", "ee", RTX_COMPARE) DEF_RTL_EXPR(LE, "le", "ee", RTX_COMPARE) DEF_RTL_EXPR(LT, "lt", "ee", RTX_COMPARE) DEF_RTL_EXPR(GEU, "geu", "ee", RTX_COMPARE) DEF_RTL_EXPR(GTU, "gtu", "ee", RTX_COMPARE) DEF_RTL_EXPR(LEU, "leu", "ee", RTX_COMPARE) DEF_RTL_EXPR(LTU, "ltu", "ee", RTX_COMPARE) /* Additional floating point unordered comparison flavors. */ DEF_RTL_EXPR(UNORDERED, "unordered", "ee", RTX_COMM_COMPARE) DEF_RTL_EXPR(ORDERED, "ordered", "ee", RTX_COMM_COMPARE) /* These are equivalent to unordered or ... */ DEF_RTL_EXPR(UNEQ, "uneq", "ee", RTX_COMM_COMPARE) DEF_RTL_EXPR(UNGE, "unge", "ee", RTX_COMPARE) DEF_RTL_EXPR(UNGT, "ungt", "ee", RTX_COMPARE) DEF_RTL_EXPR(UNLE, "unle", "ee", RTX_COMPARE) DEF_RTL_EXPR(UNLT, "unlt", "ee", RTX_COMPARE) /* This is an ordered NE, ie !UNEQ, ie false for NaN. */ DEF_RTL_EXPR(LTGT, "ltgt", "ee", RTX_COMM_COMPARE) /* Represents the result of sign-extending the sole operand. The machine modes of the operand and of the SIGN_EXTEND expression determine how much sign-extension is going on. */ DEF_RTL_EXPR(SIGN_EXTEND, "sign_extend", "e", RTX_UNARY) /* Similar for zero-extension (such as unsigned short to int). */ DEF_RTL_EXPR(ZERO_EXTEND, "zero_extend", "e", RTX_UNARY) /* Similar but here the operand has a wider mode. */ DEF_RTL_EXPR(TRUNCATE, "truncate", "e", RTX_UNARY) /* Similar for extending floating-point values (such as SFmode to DFmode). */ DEF_RTL_EXPR(FLOAT_EXTEND, "float_extend", "e", RTX_UNARY) DEF_RTL_EXPR(FLOAT_TRUNCATE, "float_truncate", "e", RTX_UNARY) /* Conversion of fixed point operand to floating point value. */ DEF_RTL_EXPR(FLOAT, "float", "e", RTX_UNARY) /* With fixed-point machine mode: Conversion of floating point operand to fixed point value. Value is defined only when the operand's value is an integer. With floating-point machine mode (and operand with same mode): Operand is rounded toward zero to produce an integer value represented in floating point. */ DEF_RTL_EXPR(FIX, "fix", "e", RTX_UNARY) /* Conversion of unsigned fixed point operand to floating point value. */ DEF_RTL_EXPR(UNSIGNED_FLOAT, "unsigned_float", "e", RTX_UNARY) /* With fixed-point machine mode: Conversion of floating point operand to *unsigned* fixed point value. Value is defined only when the operand's value is an integer. */ DEF_RTL_EXPR(UNSIGNED_FIX, "unsigned_fix", "e", RTX_UNARY) /* Absolute value */ DEF_RTL_EXPR(ABS, "abs", "e", RTX_UNARY) /* Square root */ DEF_RTL_EXPR(SQRT, "sqrt", "e", RTX_UNARY) /* Find first bit that is set. Value is 1 + number of trailing zeros in the arg., or 0 if arg is 0. */ DEF_RTL_EXPR(FFS, "ffs", "e", RTX_UNARY) /* Count leading zeros. */ DEF_RTL_EXPR(CLZ, "clz", "e", RTX_UNARY) /* Count trailing zeros. */ DEF_RTL_EXPR(CTZ, "ctz", "e", RTX_UNARY) /* Population count (number of 1 bits). */ DEF_RTL_EXPR(POPCOUNT, "popcount", "e", RTX_UNARY) /* Population parity (number of 1 bits modulo 2). */ DEF_RTL_EXPR(PARITY, "parity", "e", RTX_UNARY) /* Reference to a signed bit-field of specified size and position. Operand 0 is the memory unit (usually SImode or QImode) which contains the field's first bit. Operand 1 is the width, in bits. Operand 2 is the number of bits in the memory unit before the first bit of this field. If BITS_BIG_ENDIAN is defined, the first bit is the msb and operand 2 counts from the msb of the memory unit. Otherwise, the first bit is the lsb and operand 2 counts from the lsb of the memory unit. */ DEF_RTL_EXPR(SIGN_EXTRACT, "sign_extract", "eee", RTX_BITFIELD_OPS) /* Similar for unsigned bit-field. */ DEF_RTL_EXPR(ZERO_EXTRACT, "zero_extract", "eee", RTX_BITFIELD_OPS) /* For RISC machines. These save memory when splitting insns. */ /* HIGH are the high-order bits of a constant expression. */ DEF_RTL_EXPR(HIGH, "high", "e", RTX_CONST_OBJ) /* LO_SUM is the sum of a register and the low-order bits of a constant expression. */ DEF_RTL_EXPR(LO_SUM, "lo_sum", "ee", RTX_OBJ) /* Header for range information. Operand 0 is the NOTE_INSN_RANGE_BEG insn. Operand 1 is the NOTE_INSN_RANGE_END insn. Operand 2 is a vector of all of the registers that can be substituted within this range. Operand 3 is the number of calls in the range. Operand 4 is the number of insns in the range. Operand 5 is the unique range number for this range. Operand 6 is the basic block # of the start of the live range. Operand 7 is the basic block # of the end of the live range. Operand 8 is the loop depth. Operand 9 is a bitmap of the registers live at the start of the range. Operand 10 is a bitmap of the registers live at the end of the range. Operand 11 is marker number for the start of the range. Operand 12 is the marker number for the end of the range. */ DEF_RTL_EXPR(RANGE_INFO, "range_info", "uuEiiiiiibbii", RTX_EXTRA) /* Registers that can be substituted within the range. Operand 0 is the original pseudo register number. Operand 1 will be filled in with the pseudo register the value is copied for the duration of the range. Operand 2 is the number of references within the range to the register. Operand 3 is the number of sets or clobbers of the register in the range. Operand 4 is the number of deaths the register has. Operand 5 is the copy flags that give the status of whether a copy is needed from the original register to the new register at the beginning of the range, or whether a copy from the new register back to the original at the end of the range. Operand 6 is the live length. Operand 7 is the number of calls that this register is live across. Operand 8 is the symbol node of the variable if the register is a user variable. Operand 9 is the block node that the variable is declared in if the register is a user variable. */ DEF_RTL_EXPR(RANGE_REG, "range_reg", "iiiiiiiitt", RTX_EXTRA) /* Information about a local variable's ranges. Operand 0 is an EXPR_LIST of the different ranges a variable is in where it is copied to a different pseudo register. Operand 1 is the block that the variable is declared in. Operand 2 is the number of distinct ranges. */ DEF_RTL_EXPR(RANGE_VAR, "range_var", "eti", RTX_EXTRA) /* Information about the registers that are live at the current point. Operand 0 is the live bitmap. Operand 1 is the original block number. */ DEF_RTL_EXPR(RANGE_LIVE, "range_live", "bi", RTX_EXTRA) /* Describes a merge operation between two vector values. Operands 0 and 1 are the vectors to be merged, operand 2 is a bitmask that specifies where the parts of the result are taken from. Set bits indicate operand 0, clear bits indicate operand 1. The parts are defined by the mode of the vectors. */ DEF_RTL_EXPR(VEC_MERGE, "vec_merge", "eee", RTX_TERNARY) /* Describes an operation that selects parts of a vector. Operands 0 is the source vector, operand 1 is a PARALLEL that contains a CONST_INT for each of the subparts of the result vector, giving the number of the source subpart that should be stored into it. */ DEF_RTL_EXPR(VEC_SELECT, "vec_select", "ee", RTX_BIN_ARITH) /* Describes a vector concat operation. Operands 0 and 1 are the source vectors, the result is a vector that is as long as operands 0 and 1 combined and is the concatenation of the two source vectors. */ DEF_RTL_EXPR(VEC_CONCAT, "vec_concat", "ee", RTX_BIN_ARITH) /* Describes an operation that converts a small vector into a larger one by duplicating the input values. The output vector mode must have the same submodes as the input vector mode, and the number of output parts must be an integer multiple of the number of input parts. */ DEF_RTL_EXPR(VEC_DUPLICATE, "vec_duplicate", "e", RTX_UNARY) /* Addition with signed saturation */ DEF_RTL_EXPR(SS_PLUS, "ss_plus", "ee", RTX_COMM_ARITH) /* Addition with unsigned saturation */ DEF_RTL_EXPR(US_PLUS, "us_plus", "ee", RTX_COMM_ARITH) /* Operand 0 minus operand 1, with signed saturation. */ DEF_RTL_EXPR(SS_MINUS, "ss_minus", "ee", RTX_BIN_ARITH) /* Operand 0 minus operand 1, with unsigned saturation. */ DEF_RTL_EXPR(US_MINUS, "us_minus", "ee", RTX_BIN_ARITH) /* Signed saturating truncate. */ DEF_RTL_EXPR(SS_TRUNCATE, "ss_truncate", "e", RTX_UNARY) /* Unsigned saturating truncate. */ DEF_RTL_EXPR(US_TRUNCATE, "us_truncate", "e", RTX_UNARY) /* Information about the variable and its location. */ DEF_RTL_EXPR(VAR_LOCATION, "var_location", "te", RTX_EXTRA) /* Local variables: mode:c End: */ #undef DEF_RTL_EXPR }; /* Indexed by rtx code, gives the size of the rtx in bytes. */ const unsigned char rtx_size[NUM_RTX_CODE] = { #define DEF_RTL_EXPR(ENUM, NAME, FORMAT, CLASS) \ ((ENUM) == CONST_INT || (ENUM) == CONST_DOUBLE \ ? RTX_HDR_SIZE + (sizeof FORMAT - 1) * sizeof (HOST_WIDE_INT) \ : RTX_HDR_SIZE + (sizeof FORMAT - 1) * sizeof (rtunion)), /* This file contains the definitions and documentation for the Register Transfer Expressions (rtx's) that make up the Register Transfer Language (rtl) used in the Back End of the GNU compiler. Copyright (C) 1987, 1988, 1992, 1994, 1995, 1997, 1998, 1999, 2000, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Expression definitions and descriptions for all targets are in this file. Some will not be used for some targets. The fields in the cpp macro call "DEF_RTL_EXPR()" are used to create declarations in the C source of the compiler. The fields are: 1. The internal name of the rtx used in the C source. It is a tag in the enumeration "enum rtx_code" defined in "rtl.h". By convention these are in UPPER_CASE. 2. The name of the rtx in the external ASCII format read by read_rtx(), and printed by print_rtx(). These names are stored in rtx_name[]. By convention these are the internal (field 1) names in lower_case. 3. The print format, and type of each rtx->u.fld[] (field) in this rtx. These formats are stored in rtx_format[]. The meaning of the formats is documented in front of this array in rtl.c 4. The class of the rtx. These are stored in rtx_class and are accessed via the GET_RTX_CLASS macro. They are defined as follows: RTX_CONST_OBJ an rtx code that can be used to represent a constant object (e.g, CONST_INT) RTX_OBJ an rtx code that can be used to represent an object (e.g, REG, MEM) RTX_COMPARE an rtx code for a comparison (e.g, LT, GT) RTX_COMM_COMPARE an rtx code for a commutative comparison (e.g, EQ, NE, ORDERED) RTX_UNARY an rtx code for a unary arithmetic expression (e.g, NEG, NOT) RTX_COMM_ARITH an rtx code for a commutative binary operation (e.g,, PLUS, MULT) RTX_TERNARY an rtx code for a non-bitfield three input operation (IF_THEN_ELSE) RTX_BIN_ARITH an rtx code for a non-commutative binary operation (e.g., MINUS, DIV) RTX_BITFIELD_OPS an rtx code for a bit-field operation (ZERO_EXTRACT, SIGN_EXTRACT) RTX_INSN an rtx code for a machine insn (INSN, JUMP_INSN, CALL_INSN) RTX_MATCH an rtx code for something that matches in insns (e.g, MATCH_DUP) RTX_AUTOINC an rtx code for autoincrement addressing modes (e.g. POST_DEC) RTX_EXTRA everything else */ /* --------------------------------------------------------------------- Expressions (and "meta" expressions) used for structuring the rtl representation of a program. --------------------------------------------------------------------- */ /* an expression code name unknown to the reader */ DEF_RTL_EXPR(UNKNOWN, "UnKnown", "*", RTX_EXTRA) /* (NIL) is used by rtl reader and printer to represent a null pointer. */ DEF_RTL_EXPR(NIL, "nil", "*", RTX_EXTRA) /* include a file */ DEF_RTL_EXPR(INCLUDE, "include", "s", RTX_EXTRA) /* --------------------------------------------------------------------- Expressions used in constructing lists. --------------------------------------------------------------------- */ /* a linked list of expressions */ DEF_RTL_EXPR(EXPR_LIST, "expr_list", "ee", RTX_EXTRA) /* a linked list of instructions. The insns are represented in print by their uids. */ DEF_RTL_EXPR(INSN_LIST, "insn_list", "ue", RTX_EXTRA) /* ---------------------------------------------------------------------- Expression types for machine descriptions. These do not appear in actual rtl code in the compiler. ---------------------------------------------------------------------- */ /* Appears only in machine descriptions. Means use the function named by the second arg (the string) as a predicate; if matched, store the structure that was matched in the operand table at index specified by the first arg (the integer). If the second arg is the null string, the structure is just stored. A third string argument indicates to the register allocator restrictions on where the operand can be allocated. If the target needs no restriction on any instruction this field should be the null string. The string is prepended by: '=' to indicate the operand is only written to. '+' to indicate the operand is both read and written to. Each character in the string represents an allocable class for an operand. 'g' indicates the operand can be any valid class. 'i' indicates the operand can be immediate (in the instruction) data. 'r' indicates the operand can be in a register. 'm' indicates the operand can be in memory. 'o' a subset of the 'm' class. Those memory addressing modes that can be offset at compile time (have a constant added to them). Other characters indicate target dependent operand classes and are described in each target's machine description. For instructions with more than one operand, sets of classes can be separated by a comma to indicate the appropriate multi-operand constraints. There must be a 1 to 1 correspondence between these sets of classes in all operands for an instruction. */ DEF_RTL_EXPR(MATCH_OPERAND, "match_operand", "iss", RTX_MATCH) /* Appears only in machine descriptions. Means match a SCRATCH or a register. When used to generate rtl, a SCRATCH is generated. As for MATCH_OPERAND, the mode specifies the desired mode and the first argument is the operand number. The second argument is the constraint. */ DEF_RTL_EXPR(MATCH_SCRATCH, "match_scratch", "is", RTX_MATCH) /* Appears only in machine descriptions. Means match only something equal to what is stored in the operand table at the index specified by the argument. */ DEF_RTL_EXPR(MATCH_DUP, "match_dup", "i", RTX_MATCH) /* Appears only in machine descriptions. Means apply a predicate, AND match recursively the operands of the rtx. Operand 0 is the operand-number, as in match_operand. Operand 1 is a predicate to apply (as a string, a function name). Operand 2 is a vector of expressions, each of which must match one subexpression of the rtx this construct is matching. */ DEF_RTL_EXPR(MATCH_OPERATOR, "match_operator", "isE", RTX_MATCH) /* Appears only in machine descriptions. Means to match a PARALLEL of arbitrary length. The predicate is applied to the PARALLEL and the initial expressions in the PARALLEL are matched. Operand 0 is the operand-number, as in match_operand. Operand 1 is a predicate to apply to the PARALLEL. Operand 2 is a vector of expressions, each of which must match the corresponding element in the PARALLEL. */ DEF_RTL_EXPR(MATCH_PARALLEL, "match_parallel", "isE", RTX_MATCH) /* Appears only in machine descriptions. Means match only something equal to what is stored in the operand table at the index specified by the argument. For MATCH_OPERATOR. */ DEF_RTL_EXPR(MATCH_OP_DUP, "match_op_dup", "iE", RTX_MATCH) /* Appears only in machine descriptions. Means match only something equal to what is stored in the operand table at the index specified by the argument. For MATCH_PARALLEL. */ DEF_RTL_EXPR(MATCH_PAR_DUP, "match_par_dup", "iE", RTX_MATCH) /* Appears only in machine descriptions. Defines the pattern for one kind of instruction. Operand: 0: names this instruction. If the name is the null string, the instruction is in the machine description just to be recognized, and will never be emitted by the tree to rtl expander. 1: is the pattern. 2: is a string which is a C expression giving an additional condition for recognizing this pattern. A null string means no extra condition. 3: is the action to execute if this pattern is matched. If this assembler code template starts with a * then it is a fragment of C code to run to decide on a template to use. Otherwise, it is the template to use. 4: optionally, a vector of attributes for this insn. */ DEF_RTL_EXPR(DEFINE_INSN, "define_insn", "sEsTV", RTX_EXTRA) /* Definition of a peephole optimization. 1st operand: vector of insn patterns to match 2nd operand: C expression that must be true 3rd operand: template or C code to produce assembler output. 4: optionally, a vector of attributes for this insn. */ DEF_RTL_EXPR(DEFINE_PEEPHOLE, "define_peephole", "EsTV", RTX_EXTRA) /* Definition of a split operation. 1st operand: insn pattern to match 2nd operand: C expression that must be true 3rd operand: vector of insn patterns to place into a SEQUENCE 4th operand: optionally, some C code to execute before generating the insns. This might, for example, create some RTX's and store them in elements of `recog_data.operand' for use by the vector of insn-patterns. (`operands' is an alias here for `recog_data.operand'). */ DEF_RTL_EXPR(DEFINE_SPLIT, "define_split", "EsES", RTX_EXTRA) /* Definition of an insn and associated split. This is the concatenation, with a few modifications, of a define_insn and a define_split which share the same pattern. Operand: 0: names this instruction. If the name is the null string, the instruction is in the machine description just to be recognized, and will never be emitted by the tree to rtl expander. 1: is the pattern. 2: is a string which is a C expression giving an additional condition for recognizing this pattern. A null string means no extra condition. 3: is the action to execute if this pattern is matched. If this assembler code template starts with a * then it is a fragment of C code to run to decide on a template to use. Otherwise, it is the template to use. 4: C expression that must be true for split. This may start with "&&" in which case the split condition is the logical and of the insn condition and what follows the "&&" of this operand. 5: vector of insn patterns to place into a SEQUENCE 6: optionally, some C code to execute before generating the insns. This might, for example, create some RTX's and store them in elements of `recog_data.operand' for use by the vector of insn-patterns. (`operands' is an alias here for `recog_data.operand'). 7: optionally, a vector of attributes for this insn. */ DEF_RTL_EXPR(DEFINE_INSN_AND_SPLIT, "define_insn_and_split", "sEsTsESV", RTX_EXTRA) /* Definition of an RTL peephole operation. Follows the same arguments as define_split. */ DEF_RTL_EXPR(DEFINE_PEEPHOLE2, "define_peephole2", "EsES", RTX_EXTRA) /* Define how to generate multiple insns for a standard insn name. 1st operand: the insn name. 2nd operand: vector of insn-patterns. Use match_operand to substitute an element of `recog_data.operand'. 3rd operand: C expression that must be true for this to be available. This may not test any operands. 4th operand: Extra C code to execute before generating the insns. This might, for example, create some RTX's and store them in elements of `recog_data.operand' for use by the vector of insn-patterns. (`operands' is an alias here for `recog_data.operand'). */ DEF_RTL_EXPR(DEFINE_EXPAND, "define_expand", "sEss", RTX_EXTRA) /* Define a requirement for delay slots. 1st operand: Condition involving insn attributes that, if true, indicates that the insn requires the number of delay slots shown. 2nd operand: Vector whose length is the three times the number of delay slots required. Each entry gives three conditions, each involving attributes. The first must be true for an insn to occupy that delay slot location. The second is true for all insns that can be annulled if the branch is true and the third is true for all insns that can be annulled if the branch is false. Multiple DEFINE_DELAYs may be present. They indicate differing requirements for delay slots. */ DEF_RTL_EXPR(DEFINE_DELAY, "define_delay", "eE", RTX_EXTRA) /* Define a set of insns that requires a function unit. This means that these insns produce their result after a delay and that there may be restrictions on the number of insns of this type that can be scheduled simultaneously. More than one DEFINE_FUNCTION_UNIT can be specified for a function unit. Each gives a set of operations and associated delays. The first three operands must be the same for each operation for the same function unit. All delays are specified in cycles. 1st operand: Name of function unit (mostly for documentation) 2nd operand: Number of identical function units in CPU 3rd operand: Total number of simultaneous insns that can execute on this function unit; 0 if unlimited. 4th operand: Condition involving insn attribute, that, if true, specifies those insns that this expression applies to. 5th operand: Constant delay after which insn result will be available. 6th operand: Delay until next insn can be scheduled on the function unit executing this operation. The meaning depends on whether or not the next operand is supplied. 7th operand: If this operand is not specified, the 6th operand gives the number of cycles after the instruction matching the 4th operand begins using the function unit until a subsequent insn can begin. A value of zero should be used for a unit with no issue constraints. If only one operation can be executed a time and the unit is busy for the entire time, the 3rd operand should be specified as 1, the 6th operand should be specified as 0, and the 7th operand should not be specified. If this operand is specified, it is a list of attribute expressions. If an insn for which any of these expressions is true is currently executing on the function unit, the issue delay will be given by the 6th operand. Otherwise, the insn can be immediately scheduled (subject to the limit on the number of simultaneous operations executing on the unit.) */ DEF_RTL_EXPR(DEFINE_FUNCTION_UNIT, "define_function_unit", "siieiiV", RTX_EXTRA) /* Define attribute computation for `asm' instructions. */ DEF_RTL_EXPR(DEFINE_ASM_ATTRIBUTES, "define_asm_attributes", "V", RTX_EXTRA) /* Definition of a conditional execution meta operation. Automatically generates new instances of DEFINE_INSN, selected by having attribute "predicable" true. The new pattern will contain a COND_EXEC and the predicate at top-level. Operand: 0: The predicate pattern. The top-level form should match a relational operator. Operands should have only one alternative. 1: A C expression giving an additional condition for recognizing the generated pattern. 2: A template or C code to produce assembler output. */ DEF_RTL_EXPR(DEFINE_COND_EXEC, "define_cond_exec", "Ess", RTX_EXTRA) /* SEQUENCE appears in the result of a `gen_...' function for a DEFINE_EXPAND that wants to make several insns. Its elements are the bodies of the insns that should be made. `emit_insn' takes the SEQUENCE apart and makes separate insns. */ DEF_RTL_EXPR(SEQUENCE, "sequence", "E", RTX_EXTRA) /* Refers to the address of its argument. This is only used in alias.c. */ DEF_RTL_EXPR(ADDRESS, "address", "e", RTX_MATCH) /* ---------------------------------------------------------------------- Constructions for CPU pipeline description described by NDFAs. These do not appear in actual rtl code in the compiler. ---------------------------------------------------------------------- */ /* (define_cpu_unit string [string]) describes cpu functional units (separated by comma). 1st operand: Names of cpu functional units. 2nd operand: Name of automaton (see comments for DEFINE_AUTOMATON). All define_reservations, define_cpu_units, and define_query_cpu_units should have unique names which may not be "nothing". */ DEF_RTL_EXPR(DEFINE_CPU_UNIT, "define_cpu_unit", "sS", RTX_EXTRA) /* (define_query_cpu_unit string [string]) describes cpu functional units analogously to define_cpu_unit. The reservation of such units can be queried for automaton state. */ DEF_RTL_EXPR(DEFINE_QUERY_CPU_UNIT, "define_query_cpu_unit", "sS", RTX_EXTRA) /* (exclusion_set string string) means that each CPU functional unit in the first string can not be reserved simultaneously with any unit whose name is in the second string and vise versa. CPU units in the string are separated by commas. For example, it is useful for description CPU with fully pipelined floating point functional unit which can execute simultaneously only single floating point insns or only double floating point insns. All CPU functional units in a set should belong to the same automaton. */ DEF_RTL_EXPR(EXCLUSION_SET, "exclusion_set", "ss", RTX_EXTRA) /* (presence_set string string) means that each CPU functional unit in the first string can not be reserved unless at least one of pattern of units whose names are in the second string is reserved. This is an asymmetric relation. CPU units or unit patterns in the strings are separated by commas. Pattern is one unit name or unit names separated by white-spaces. For example, it is useful for description that slot1 is reserved after slot0 reservation for a VLIW processor. We could describe it by the following construction (presence_set "slot1" "slot0") Or slot1 is reserved only after slot0 and unit b0 reservation. In this case we could write (presence_set "slot1" "slot0 b0") All CPU functional units in a set should belong to the same automaton. */ DEF_RTL_EXPR(PRESENCE_SET, "presence_set", "ss", RTX_EXTRA) /* (final_presence_set string string) is analogous to `presence_set'. The difference between them is when checking is done. When an instruction is issued in given automaton state reflecting all current and planned unit reservations, the automaton state is changed. The first state is a source state, the second one is a result state. Checking for `presence_set' is done on the source state reservation, checking for `final_presence_set' is done on the result reservation. This construction is useful to describe a reservation which is actually two subsequent reservations. For example, if we use (presence_set "slot1" "slot0") the following insn will be never issued (because slot1 requires slot0 which is absent in the source state). (define_reservation "insn_and_nop" "slot0 + slot1") but it can be issued if we use analogous `final_presence_set'. */ DEF_RTL_EXPR(FINAL_PRESENCE_SET, "final_presence_set", "ss", RTX_EXTRA) /* (absence_set string string) means that each CPU functional unit in the first string can be reserved only if each pattern of units whose names are in the second string is not reserved. This is an asymmetric relation (actually exclusion set is analogous to this one but it is symmetric). CPU units or unit patterns in the string are separated by commas. Pattern is one unit name or unit names separated by white-spaces. For example, it is useful for description that slot0 can not be reserved after slot1 or slot2 reservation for a VLIW processor. We could describe it by the following construction (absence_set "slot2" "slot0, slot1") Or slot2 can not be reserved if slot0 and unit b0 are reserved or slot1 and unit b1 are reserved . In this case we could write (absence_set "slot2" "slot0 b0, slot1 b1") All CPU functional units in a set should to belong the same automaton. */ DEF_RTL_EXPR(ABSENCE_SET, "absence_set", "ss", RTX_EXTRA) /* (final_absence_set string string) is analogous to `absence_set' but checking is done on the result (state) reservation. See comments for `final_presence_set'. */ DEF_RTL_EXPR(FINAL_ABSENCE_SET, "final_absence_set", "ss", RTX_EXTRA) /* (define_bypass number out_insn_names in_insn_names) names bypass with given latency (the first number) from insns given by the first string (see define_insn_reservation) into insns given by the second string. Insn names in the strings are separated by commas. The third operand is optional name of function which is additional guard for the bypass. The function will get the two insns as parameters. If the function returns zero the bypass will be ignored for this case. Additional guard is necessary to recognize complicated bypasses, e.g. when consumer is load address. */ DEF_RTL_EXPR(DEFINE_BYPASS, "define_bypass", "issS", RTX_EXTRA) /* (define_automaton string) describes names of automata generated and used for pipeline hazards recognition. The names are separated by comma. Actually it is possibly to generate the single automaton but unfortunately it can be very large. If we use more one automata, the summary size of the automata usually is less than the single one. The automaton name is used in define_cpu_unit and define_query_cpu_unit. All automata should have unique names. */ DEF_RTL_EXPR(DEFINE_AUTOMATON, "define_automaton", "s", RTX_EXTRA) /* (automata_option string) describes option for generation of automata. Currently there are the following options: o "no-minimization" which makes no minimization of automata. This is only worth to do when we are debugging the description and need to look more accurately at reservations of states. o "time" which means printing additional time statistics about generation of automata. o "v" which means generation of file describing the result automata. The file has suffix `.dfa' and can be used for the description verification and debugging. o "w" which means generation of warning instead of error for non-critical errors. o "ndfa" which makes nondeterministic finite state automata. o "progress" which means output of a progress bar showing how many states were generated so far for automaton being processed. */ DEF_RTL_EXPR(AUTOMATA_OPTION, "automata_option", "s", RTX_EXTRA) /* (define_reservation string string) names reservation (the first string) of cpu functional units (the 2nd string). Sometimes unit reservations for different insns contain common parts. In such case, you can describe common part and use its name (the 1st parameter) in regular expression in define_insn_reservation. All define_reservations, define_cpu_units, and define_query_cpu_units should have unique names which may not be "nothing". */ DEF_RTL_EXPR(DEFINE_RESERVATION, "define_reservation", "ss", RTX_EXTRA) /* (define_insn_reservation name default_latency condition regexpr) describes reservation of cpu functional units (the 3nd operand) for instruction which is selected by the condition (the 2nd parameter). The first parameter is used for output of debugging information. The reservations are described by a regular expression according the following syntax: regexp = regexp "," oneof | oneof oneof = oneof "|" allof | allof allof = allof "+" repeat | repeat repeat = element "*" number | element element = cpu_function_unit_name | reservation_name | result_name | "nothing" | "(" regexp ")" 1. "," is used for describing start of the next cycle in reservation. 2. "|" is used for describing the reservation described by the first regular expression *or* the reservation described by the second regular expression *or* etc. 3. "+" is used for describing the reservation described by the first regular expression *and* the reservation described by the second regular expression *and* etc. 4. "*" is used for convenience and simply means sequence in which the regular expression are repeated NUMBER times with cycle advancing (see ","). 5. cpu functional unit name which means its reservation. 6. reservation name -- see define_reservation. 7. string "nothing" means no units reservation. */ DEF_RTL_EXPR(DEFINE_INSN_RESERVATION, "define_insn_reservation", "sies", RTX_EXTRA) /* ---------------------------------------------------------------------- Expressions used for insn attributes. These also do not appear in actual rtl code in the compiler. ---------------------------------------------------------------------- */ /* Definition of an insn attribute. 1st operand: name of the attribute 2nd operand: comma-separated list of possible attribute values 3rd operand: expression for the default value of the attribute. */ DEF_RTL_EXPR(DEFINE_ATTR, "define_attr", "sse", RTX_EXTRA) /* Marker for the name of an attribute. */ DEF_RTL_EXPR(ATTR, "attr", "s", RTX_EXTRA) /* For use in the last (optional) operand of DEFINE_INSN or DEFINE_PEEPHOLE and in DEFINE_ASM_INSN to specify an attribute to assign to insns matching that pattern. (set_attr "name" "value") is equivalent to (set (attr "name") (const_string "value")) */ DEF_RTL_EXPR(SET_ATTR, "set_attr", "ss", RTX_EXTRA) /* In the last operand of DEFINE_INSN and DEFINE_PEEPHOLE, this can be used to specify that attribute values are to be assigned according to the alternative matched. The following three expressions are equivalent: (set (attr "att") (cond [(eq_attrq "alternative" "1") (const_string "a1") (eq_attrq "alternative" "2") (const_string "a2")] (const_string "a3"))) (set_attr_alternative "att" [(const_string "a1") (const_string "a2") (const_string "a3")]) (set_attr "att" "a1,a2,a3") */ DEF_RTL_EXPR(SET_ATTR_ALTERNATIVE, "set_attr_alternative", "sE", RTX_EXTRA) /* A conditional expression true if the value of the specified attribute of the current insn equals the specified value. The first operand is the attribute name and the second is the comparison value. */ DEF_RTL_EXPR(EQ_ATTR, "eq_attr", "ss", RTX_EXTRA) /* A special case of the above representing a set of alternatives. The first operand is bitmap of the set, the second one is the default value. */ DEF_RTL_EXPR(EQ_ATTR_ALT, "eq_attr_alt", "ii", RTX_EXTRA) /* A conditional expression which is true if the specified flag is true for the insn being scheduled in reorg. genattr.c defines the following flags which can be tested by (attr_flag "foo") expressions in eligible_for_delay. forward, backward, very_likely, likely, very_unlikely, and unlikely. */ DEF_RTL_EXPR (ATTR_FLAG, "attr_flag", "s", RTX_EXTRA) /* ---------------------------------------------------------------------- Expression types used for things in the instruction chain. All formats must start with "iuu" to handle the chain. Each insn expression holds an rtl instruction and its semantics during back-end processing. See macros's in "rtl.h" for the meaning of each rtx->u.fld[]. ---------------------------------------------------------------------- */ /* An instruction that cannot jump. */ DEF_RTL_EXPR(INSN, "insn", "iuuBieiee", RTX_INSN) /* An instruction that can possibly jump. Fields ( rtx->u.fld[] ) have exact same meaning as INSN's. */ DEF_RTL_EXPR(JUMP_INSN, "jump_insn", "iuuBieiee0", RTX_INSN) /* An instruction that can possibly call a subroutine but which will not change which instruction comes next in the current function. Field ( rtx->u.fld[9] ) is CALL_INSN_FUNCTION_USAGE. All other fields ( rtx->u.fld[] ) have exact same meaning as INSN's. */ DEF_RTL_EXPR(CALL_INSN, "call_insn", "iuuBieieee", RTX_INSN) /* A marker that indicates that control will not flow through. */ DEF_RTL_EXPR(BARRIER, "barrier", "iuu000000", RTX_EXTRA) /* Holds a label that is followed by instructions. Operand: 4: is used in jump.c for the use-count of the label. 5: is used in flow.c to point to the chain of label_ref's to this label. 6: is a number that is unique in the entire compilation. 7: is the user-given name of the label, if any. */ DEF_RTL_EXPR(CODE_LABEL, "code_label", "iuuB00is", RTX_EXTRA) #ifdef USE_MAPPED_LOCATION /* Say where in the code a source line starts, for symbol table's sake. Operand: 4: unused if line number > 0, note-specific data otherwise. 5: line number if > 0, enum note_insn otherwise. 6: CODE_LABEL_NUMBER if line number == NOTE_INSN_DELETED_LABEL. */ #else /* Say where in the code a source line starts, for symbol table's sake. Operand: 4: filename, if line number > 0, note-specific data otherwise. 5: line number if > 0, enum note_insn otherwise. 6: unique number if line number == note_insn_deleted_label. */ #endif DEF_RTL_EXPR(NOTE, "note", "iuuB0ni", RTX_EXTRA) /* ---------------------------------------------------------------------- Top level constituents of INSN, JUMP_INSN and CALL_INSN. ---------------------------------------------------------------------- */ /* Conditionally execute code. Operand 0 is the condition that if true, the code is executed. Operand 1 is the code to be executed (typically a SET). Semantics are that there are no side effects if the condition is false. This pattern is created automatically by the if_convert pass run after reload or by target-specific splitters. */ DEF_RTL_EXPR(COND_EXEC, "cond_exec", "ee", RTX_EXTRA) /* Several operations to be done in parallel (perhaps under COND_EXEC). */ DEF_RTL_EXPR(PARALLEL, "parallel", "E", RTX_EXTRA) /* A string that is passed through to the assembler as input. One can obviously pass comments through by using the assembler comment syntax. These occur in an insn all by themselves as the PATTERN. They also appear inside an ASM_OPERANDS as a convenient way to hold a string. */ DEF_RTL_EXPR(ASM_INPUT, "asm_input", "s", RTX_EXTRA) #ifdef USE_MAPPED_LOCATION /* An assembler instruction with operands. 1st operand is the instruction template. 2nd operand is the constraint for the output. 3rd operand is the number of the output this expression refers to. When an insn stores more than one value, a separate ASM_OPERANDS is made for each output; this integer distinguishes them. 4th is a vector of values of input operands. 5th is a vector of modes and constraints for the input operands. Each element is an ASM_INPUT containing a constraint string and whose mode indicates the mode of the input operand. 6th is the source line number. */ DEF_RTL_EXPR(ASM_OPERANDS, "asm_operands", "ssiEEi", RTX_EXTRA) #else /* An assembler instruction with operands. 1st operand is the instruction template. 2nd operand is the constraint for the output. 3rd operand is the number of the output this expression refers to. When an insn stores more than one value, a separate ASM_OPERANDS is made for each output; this integer distinguishes them. 4th is a vector of values of input operands. 5th is a vector of modes and constraints for the input operands. Each element is an ASM_INPUT containing a constraint string and whose mode indicates the mode of the input operand. 6th is the name of the containing source file. 7th is the source line number. */ DEF_RTL_EXPR(ASM_OPERANDS, "asm_operands", "ssiEEsi", RTX_EXTRA) #endif /* A machine-specific operation. 1st operand is a vector of operands being used by the operation so that any needed reloads can be done. 2nd operand is a unique value saying which of a number of machine-specific operations is to be performed. (Note that the vector must be the first operand because of the way that genrecog.c record positions within an insn.) This can occur all by itself in a PATTERN, as a component of a PARALLEL, or inside an expression. */ DEF_RTL_EXPR(UNSPEC, "unspec", "Ei", RTX_EXTRA) /* Similar, but a volatile operation and one which may trap. */ DEF_RTL_EXPR(UNSPEC_VOLATILE, "unspec_volatile", "Ei", RTX_EXTRA) /* Vector of addresses, stored as full words. */ /* Each element is a LABEL_REF to a CODE_LABEL whose address we want. */ DEF_RTL_EXPR(ADDR_VEC, "addr_vec", "E", RTX_EXTRA) /* Vector of address differences X0 - BASE, X1 - BASE, ... First operand is BASE; the vector contains the X's. The machine mode of this rtx says how much space to leave for each difference and is adjusted by branch shortening if CASE_VECTOR_SHORTEN_MODE is defined. The third and fourth operands store the target labels with the minimum and maximum addresses respectively. The fifth operand stores flags for use by branch shortening. Set at the start of shorten_branches: min_align: the minimum alignment for any of the target labels. base_after_vec: true iff BASE is after the ADDR_DIFF_VEC. min_after_vec: true iff minimum addr target label is after the ADDR_DIFF_VEC. max_after_vec: true iff maximum addr target label is after the ADDR_DIFF_VEC. min_after_base: true iff minimum address target label is after BASE. max_after_base: true iff maximum address target label is after BASE. Set by the actual branch shortening process: offset_unsigned: true iff offsets have to be treated as unsigned. scale: scaling that is necessary to make offsets fit into the mode. The third, fourth and fifth operands are only valid when CASE_VECTOR_SHORTEN_MODE is defined, and only in an optimizing compilations. */ DEF_RTL_EXPR(ADDR_DIFF_VEC, "addr_diff_vec", "eEee0", RTX_EXTRA) /* Memory prefetch, with attributes supported on some targets. Operand 1 is the address of the memory to fetch. Operand 2 is 1 for a write access, 0 otherwise. Operand 3 is the level of temporal locality; 0 means there is no temporal locality and 1, 2, and 3 are for increasing levels of temporal locality. The attributes specified by operands 2 and 3 are ignored for targets whose prefetch instructions do not support them. */ DEF_RTL_EXPR(PREFETCH, "prefetch", "eee", RTX_EXTRA) /* ---------------------------------------------------------------------- At the top level of an instruction (perhaps under PARALLEL). ---------------------------------------------------------------------- */ /* Assignment. Operand 1 is the location (REG, MEM, PC, CC0 or whatever) assigned to. Operand 2 is the value stored there. ALL assignment must use SET. Instructions that do multiple assignments must use multiple SET, under PARALLEL. */ DEF_RTL_EXPR(SET, "set", "ee", RTX_EXTRA) /* Indicate something is used in a way that we don't want to explain. For example, subroutine calls will use the register in which the static chain is passed. */ DEF_RTL_EXPR(USE, "use", "e", RTX_EXTRA) /* Indicate something is clobbered in a way that we don't want to explain. For example, subroutine calls will clobber some physical registers (the ones that are by convention not saved). */ DEF_RTL_EXPR(CLOBBER, "clobber", "e", RTX_EXTRA) /* Call a subroutine. Operand 1 is the address to call. Operand 2 is the number of arguments. */ DEF_RTL_EXPR(CALL, "call", "ee", RTX_EXTRA) /* Return from a subroutine. */ DEF_RTL_EXPR(RETURN, "return", "", RTX_EXTRA) /* Conditional trap. Operand 1 is the condition. Operand 2 is the trap code. For an unconditional trap, make the condition (const_int 1). */ DEF_RTL_EXPR(TRAP_IF, "trap_if", "ee", RTX_EXTRA) /* Placeholder for _Unwind_Resume before we know if a function call or a branch is needed. Operand 1 is the exception region from which control is flowing. */ DEF_RTL_EXPR(RESX, "resx", "i", RTX_EXTRA) /* ---------------------------------------------------------------------- Primitive values for use in expressions. ---------------------------------------------------------------------- */ /* numeric integer constant */ DEF_RTL_EXPR(CONST_INT, "const_int", "w", RTX_CONST_OBJ) /* numeric floating point constant. Operands hold the value. They are all 'w' and there may be from 2 to 6; see real.h. */ DEF_RTL_EXPR(CONST_DOUBLE, "const_double", CONST_DOUBLE_FORMAT, RTX_CONST_OBJ) /* Describes a vector constant. */ DEF_RTL_EXPR(CONST_VECTOR, "const_vector", "E", RTX_EXTRA) /* String constant. Used only for attributes right now. */ DEF_RTL_EXPR(CONST_STRING, "const_string", "s", RTX_OBJ) /* This is used to encapsulate an expression whose value is constant (such as the sum of a SYMBOL_REF and a CONST_INT) so that it will be recognized as a constant operand rather than by arithmetic instructions. */ DEF_RTL_EXPR(CONST, "const", "e", RTX_CONST_OBJ) /* program counter. Ordinary jumps are represented by a SET whose first operand is (PC). */ DEF_RTL_EXPR(PC, "pc", "", RTX_OBJ) /* Used in the cselib routines to describe a value. */ DEF_RTL_EXPR(VALUE, "value", "0", RTX_OBJ) /* A register. The "operand" is the register number, accessed with the REGNO macro. If this number is less than FIRST_PSEUDO_REGISTER than a hardware register is being referred to. The second operand holds the original register number - this will be different for a pseudo register that got turned into a hard register. This rtx needs to have as many (or more) fields as a MEM, since we can change REG rtx's into MEMs during reload. */ DEF_RTL_EXPR(REG, "reg", "i00", RTX_OBJ) /* A scratch register. This represents a register used only within a single insn. It will be turned into a REG during register allocation or reload unless the constraint indicates that the register won't be needed, in which case it can remain a SCRATCH. This code is marked as having one operand so it can be turned into a REG. */ DEF_RTL_EXPR(SCRATCH, "scratch", "0", RTX_OBJ) /* One word of a multi-word value. The first operand is the complete value; the second says which word. The WORDS_BIG_ENDIAN flag controls whether word number 0 (as numbered in a SUBREG) is the most or least significant word. This is also used to refer to a value in a different machine mode. For example, it can be used to refer to a SImode value as if it were Qimode, or vice versa. Then the word number is always 0. */ DEF_RTL_EXPR(SUBREG, "subreg", "ei", RTX_EXTRA) /* This one-argument rtx is used for move instructions that are guaranteed to alter only the low part of a destination. Thus, (SET (SUBREG:HI (REG...)) (MEM:HI ...)) has an unspecified effect on the high part of REG, but (SET (STRICT_LOW_PART (SUBREG:HI (REG...))) (MEM:HI ...)) is guaranteed to alter only the bits of REG that are in HImode. The actual instruction used is probably the same in both cases, but the register constraints may be tighter when STRICT_LOW_PART is in use. */ DEF_RTL_EXPR(STRICT_LOW_PART, "strict_low_part", "e", RTX_EXTRA) /* (CONCAT a b) represents the virtual concatenation of a and b to make a value that has as many bits as a and b put together. This is used for complex values. Normally it appears only in DECL_RTLs and during RTL generation, but not in the insn chain. */ DEF_RTL_EXPR(CONCAT, "concat", "ee", RTX_OBJ) /* A memory location; operand is the address. The second operand is the alias set to which this MEM belongs. We use `0' instead of `w' for this field so that the field need not be specified in machine descriptions. */ DEF_RTL_EXPR(MEM, "mem", "e0", RTX_OBJ) /* Reference to an assembler label in the code for this function. The operand is a CODE_LABEL found in the insn chain. The unprinted fields 1 and 2 are used in flow.c for the LABEL_NEXTREF and CONTAINING_INSN. */ DEF_RTL_EXPR(LABEL_REF, "label_ref", "u00", RTX_CONST_OBJ) /* Reference to a named label: Operand 0: label name Operand 1: flags (see SYMBOL_FLAG_* in rtl.h) Operand 2: tree from which this symbol is derived, or null. This is either a DECL node, or some kind of constant. */ DEF_RTL_EXPR(SYMBOL_REF, "symbol_ref", "s00", RTX_CONST_OBJ) /* The condition code register is represented, in our imagination, as a register holding a value that can be compared to zero. In fact, the machine has already compared them and recorded the results; but instructions that look at the condition code pretend to be looking at the entire value and comparing it. */ DEF_RTL_EXPR(CC0, "cc0", "", RTX_OBJ) /* ===================================================================== A QUEUED expression really points to a member of the queue of instructions to be output later for postincrement/postdecrement. QUEUED expressions never become part of instructions. When a QUEUED expression would be put into an instruction, instead either the incremented variable or a copy of its previous value is used. Operands are: 0. the variable to be incremented (a REG rtx). 1. the incrementing instruction, or 0 if it hasn't been output yet. 2. A REG rtx for a copy of the old value of the variable, or 0 if none yet. 3. the body to use for the incrementing instruction 4. the next QUEUED expression in the queue. ====================================================================== */ DEF_RTL_EXPR(QUEUED, "queued", "eeeee", RTX_EXTRA) /* ---------------------------------------------------------------------- Expressions for operators in an rtl pattern ---------------------------------------------------------------------- */ /* if_then_else. This is used in representing ordinary conditional jump instructions. Operand: 0: condition 1: then expr 2: else expr */ DEF_RTL_EXPR(IF_THEN_ELSE, "if_then_else", "eee", RTX_TERNARY) /* General conditional. The first operand is a vector composed of pairs of expressions. The first element of each pair is evaluated, in turn. The value of the conditional is the second expression of the first pair whose first expression evaluates nonzero. If none of the expressions is true, the second operand will be used as the value of the conditional. This should be replaced with use of IF_THEN_ELSE. */ DEF_RTL_EXPR(COND, "cond", "Ee", RTX_EXTRA) /* Comparison, produces a condition code result. */ DEF_RTL_EXPR(COMPARE, "compare", "ee", RTX_BIN_ARITH) /* plus */ DEF_RTL_EXPR(PLUS, "plus", "ee", RTX_COMM_ARITH) /* Operand 0 minus operand 1. */ DEF_RTL_EXPR(MINUS, "minus", "ee", RTX_BIN_ARITH) /* Minus operand 0. */ DEF_RTL_EXPR(NEG, "neg", "e", RTX_UNARY) DEF_RTL_EXPR(MULT, "mult", "ee", RTX_COMM_ARITH) /* Operand 0 divided by operand 1. */ DEF_RTL_EXPR(DIV, "div", "ee", RTX_BIN_ARITH) /* Remainder of operand 0 divided by operand 1. */ DEF_RTL_EXPR(MOD, "mod", "ee", RTX_BIN_ARITH) /* Unsigned divide and remainder. */ DEF_RTL_EXPR(UDIV, "udiv", "ee", RTX_BIN_ARITH) DEF_RTL_EXPR(UMOD, "umod", "ee", RTX_BIN_ARITH) /* Bitwise operations. */ DEF_RTL_EXPR(AND, "and", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(IOR, "ior", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(XOR, "xor", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(NOT, "not", "e", RTX_UNARY) /* Operand: 0: value to be shifted. 1: number of bits. */ DEF_RTL_EXPR(ASHIFT, "ashift", "ee", RTX_BIN_ARITH) /* shift left */ DEF_RTL_EXPR(ROTATE, "rotate", "ee", RTX_BIN_ARITH) /* rotate left */ DEF_RTL_EXPR(ASHIFTRT, "ashiftrt", "ee", RTX_BIN_ARITH) /* arithmetic shift right */ DEF_RTL_EXPR(LSHIFTRT, "lshiftrt", "ee", RTX_BIN_ARITH) /* logical shift right */ DEF_RTL_EXPR(ROTATERT, "rotatert", "ee", RTX_BIN_ARITH) /* rotate right */ /* Minimum and maximum values of two operands. We need both signed and unsigned forms. (We cannot use MIN for SMIN because it conflicts with a macro of the same name.) */ DEF_RTL_EXPR(SMIN, "smin", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(SMAX, "smax", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(UMIN, "umin", "ee", RTX_COMM_ARITH) DEF_RTL_EXPR(UMAX, "umax", "ee", RTX_COMM_ARITH) /* These unary operations are used to represent incrementation and decrementation as they occur in memory addresses. The amount of increment or decrement are not represented because they can be understood from the machine-mode of the containing MEM. These operations exist in only two cases: 1. pushes onto the stack. 2. created automatically by the life_analysis pass in flow.c. */ DEF_RTL_EXPR(PRE_DEC, "pre_dec", "e", RTX_AUTOINC) DEF_RTL_EXPR(PRE_INC, "pre_inc", "e", RTX_AUTOINC) DEF_RTL_EXPR(POST_DEC, "post_dec", "e", RTX_AUTOINC) DEF_RTL_EXPR(POST_INC, "post_inc", "e", RTX_AUTOINC) /* These binary operations are used to represent generic address side-effects in memory addresses, except for simple incrementation or decrementation which use the above operations. They are created automatically by the life_analysis pass in flow.c. The first operand is a REG which is used as the address. The second operand is an expression that is assigned to the register, either before (PRE_MODIFY) or after (POST_MODIFY) evaluating the address. Currently, the compiler can only handle second operands of the form (plus (reg) (reg)) and (plus (reg) (const_int)), where the first operand of the PLUS has to be the same register as the first operand of the *_MODIFY. */ DEF_RTL_EXPR(PRE_MODIFY, "pre_modify", "ee", RTX_AUTOINC) DEF_RTL_EXPR(POST_MODIFY, "post_modify", "ee", RTX_AUTOINC) /* Comparison operations. The ordered comparisons exist in two flavors, signed and unsigned. */ DEF_RTL_EXPR(NE, "ne", "ee", RTX_COMM_COMPARE) DEF_RTL_EXPR(EQ, "eq", "ee", RTX_COMM_COMPARE) DEF_RTL_EXPR(GE, "ge", "ee", RTX_COMPARE) DEF_RTL_EXPR(GT, "gt", "ee", RTX_COMPARE) DEF_RTL_EXPR(LE, "le", "ee", RTX_COMPARE) DEF_RTL_EXPR(LT, "lt", "ee", RTX_COMPARE) DEF_RTL_EXPR(GEU, "geu", "ee", RTX_COMPARE) DEF_RTL_EXPR(GTU, "gtu", "ee", RTX_COMPARE) DEF_RTL_EXPR(LEU, "leu", "ee", RTX_COMPARE) DEF_RTL_EXPR(LTU, "ltu", "ee", RTX_COMPARE) /* Additional floating point unordered comparison flavors. */ DEF_RTL_EXPR(UNORDERED, "unordered", "ee", RTX_COMM_COMPARE) DEF_RTL_EXPR(ORDERED, "ordered", "ee", RTX_COMM_COMPARE) /* These are equivalent to unordered or ... */ DEF_RTL_EXPR(UNEQ, "uneq", "ee", RTX_COMM_COMPARE) DEF_RTL_EXPR(UNGE, "unge", "ee", RTX_COMPARE) DEF_RTL_EXPR(UNGT, "ungt", "ee", RTX_COMPARE) DEF_RTL_EXPR(UNLE, "unle", "ee", RTX_COMPARE) DEF_RTL_EXPR(UNLT, "unlt", "ee", RTX_COMPARE) /* This is an ordered NE, ie !UNEQ, ie false for NaN. */ DEF_RTL_EXPR(LTGT, "ltgt", "ee", RTX_COMM_COMPARE) /* Represents the result of sign-extending the sole operand. The machine modes of the operand and of the SIGN_EXTEND expression determine how much sign-extension is going on. */ DEF_RTL_EXPR(SIGN_EXTEND, "sign_extend", "e", RTX_UNARY) /* Similar for zero-extension (such as unsigned short to int). */ DEF_RTL_EXPR(ZERO_EXTEND, "zero_extend", "e", RTX_UNARY) /* Similar but here the operand has a wider mode. */ DEF_RTL_EXPR(TRUNCATE, "truncate", "e", RTX_UNARY) /* Similar for extending floating-point values (such as SFmode to DFmode). */ DEF_RTL_EXPR(FLOAT_EXTEND, "float_extend", "e", RTX_UNARY) DEF_RTL_EXPR(FLOAT_TRUNCATE, "float_truncate", "e", RTX_UNARY) /* Conversion of fixed point operand to floating point value. */ DEF_RTL_EXPR(FLOAT, "float", "e", RTX_UNARY) /* With fixed-point machine mode: Conversion of floating point operand to fixed point value. Value is defined only when the operand's value is an integer. With floating-point machine mode (and operand with same mode): Operand is rounded toward zero to produce an integer value represented in floating point. */ DEF_RTL_EXPR(FIX, "fix", "e", RTX_UNARY) /* Conversion of unsigned fixed point operand to floating point value. */ DEF_RTL_EXPR(UNSIGNED_FLOAT, "unsigned_float", "e", RTX_UNARY) /* With fixed-point machine mode: Conversion of floating point operand to *unsigned* fixed point value. Value is defined only when the operand's value is an integer. */ DEF_RTL_EXPR(UNSIGNED_FIX, "unsigned_fix", "e", RTX_UNARY) /* Absolute value */ DEF_RTL_EXPR(ABS, "abs", "e", RTX_UNARY) /* Square root */ DEF_RTL_EXPR(SQRT, "sqrt", "e", RTX_UNARY) /* Find first bit that is set. Value is 1 + number of trailing zeros in the arg., or 0 if arg is 0. */ DEF_RTL_EXPR(FFS, "ffs", "e", RTX_UNARY) /* Count leading zeros. */ DEF_RTL_EXPR(CLZ, "clz", "e", RTX_UNARY) /* Count trailing zeros. */ DEF_RTL_EXPR(CTZ, "ctz", "e", RTX_UNARY) /* Population count (number of 1 bits). */ DEF_RTL_EXPR(POPCOUNT, "popcount", "e", RTX_UNARY) /* Population parity (number of 1 bits modulo 2). */ DEF_RTL_EXPR(PARITY, "parity", "e", RTX_UNARY) /* Reference to a signed bit-field of specified size and position. Operand 0 is the memory unit (usually SImode or QImode) which contains the field's first bit. Operand 1 is the width, in bits. Operand 2 is the number of bits in the memory unit before the first bit of this field. If BITS_BIG_ENDIAN is defined, the first bit is the msb and operand 2 counts from the msb of the memory unit. Otherwise, the first bit is the lsb and operand 2 counts from the lsb of the memory unit. */ DEF_RTL_EXPR(SIGN_EXTRACT, "sign_extract", "eee", RTX_BITFIELD_OPS) /* Similar for unsigned bit-field. */ DEF_RTL_EXPR(ZERO_EXTRACT, "zero_extract", "eee", RTX_BITFIELD_OPS) /* For RISC machines. These save memory when splitting insns. */ /* HIGH are the high-order bits of a constant expression. */ DEF_RTL_EXPR(HIGH, "high", "e", RTX_CONST_OBJ) /* LO_SUM is the sum of a register and the low-order bits of a constant expression. */ DEF_RTL_EXPR(LO_SUM, "lo_sum", "ee", RTX_OBJ) /* Header for range information. Operand 0 is the NOTE_INSN_RANGE_BEG insn. Operand 1 is the NOTE_INSN_RANGE_END insn. Operand 2 is a vector of all of the registers that can be substituted within this range. Operand 3 is the number of calls in the range. Operand 4 is the number of insns in the range. Operand 5 is the unique range number for this range. Operand 6 is the basic block # of the start of the live range. Operand 7 is the basic block # of the end of the live range. Operand 8 is the loop depth. Operand 9 is a bitmap of the registers live at the start of the range. Operand 10 is a bitmap of the registers live at the end of the range. Operand 11 is marker number for the start of the range. Operand 12 is the marker number for the end of the range. */ DEF_RTL_EXPR(RANGE_INFO, "range_info", "uuEiiiiiibbii", RTX_EXTRA) /* Registers that can be substituted within the range. Operand 0 is the original pseudo register number. Operand 1 will be filled in with the pseudo register the value is copied for the duration of the range. Operand 2 is the number of references within the range to the register. Operand 3 is the number of sets or clobbers of the register in the range. Operand 4 is the number of deaths the register has. Operand 5 is the copy flags that give the status of whether a copy is needed from the original register to the new register at the beginning of the range, or whether a copy from the new register back to the original at the end of the range. Operand 6 is the live length. Operand 7 is the number of calls that this register is live across. Operand 8 is the symbol node of the variable if the register is a user variable. Operand 9 is the block node that the variable is declared in if the register is a user variable. */ DEF_RTL_EXPR(RANGE_REG, "range_reg", "iiiiiiiitt", RTX_EXTRA) /* Information about a local variable's ranges. Operand 0 is an EXPR_LIST of the different ranges a variable is in where it is copied to a different pseudo register. Operand 1 is the block that the variable is declared in. Operand 2 is the number of distinct ranges. */ DEF_RTL_EXPR(RANGE_VAR, "range_var", "eti", RTX_EXTRA) /* Information about the registers that are live at the current point. Operand 0 is the live bitmap. Operand 1 is the original block number. */ DEF_RTL_EXPR(RANGE_LIVE, "range_live", "bi", RTX_EXTRA) /* Describes a merge operation between two vector values. Operands 0 and 1 are the vectors to be merged, operand 2 is a bitmask that specifies where the parts of the result are taken from. Set bits indicate operand 0, clear bits indicate operand 1. The parts are defined by the mode of the vectors. */ DEF_RTL_EXPR(VEC_MERGE, "vec_merge", "eee", RTX_TERNARY) /* Describes an operation that selects parts of a vector. Operands 0 is the source vector, operand 1 is a PARALLEL that contains a CONST_INT for each of the subparts of the result vector, giving the number of the source subpart that should be stored into it. */ DEF_RTL_EXPR(VEC_SELECT, "vec_select", "ee", RTX_BIN_ARITH) /* Describes a vector concat operation. Operands 0 and 1 are the source vectors, the result is a vector that is as long as operands 0 and 1 combined and is the concatenation of the two source vectors. */ DEF_RTL_EXPR(VEC_CONCAT, "vec_concat", "ee", RTX_BIN_ARITH) /* Describes an operation that converts a small vector into a larger one by duplicating the input values. The output vector mode must have the same submodes as the input vector mode, and the number of output parts must be an integer multiple of the number of input parts. */ DEF_RTL_EXPR(VEC_DUPLICATE, "vec_duplicate", "e", RTX_UNARY) /* Addition with signed saturation */ DEF_RTL_EXPR(SS_PLUS, "ss_plus", "ee", RTX_COMM_ARITH) /* Addition with unsigned saturation */ DEF_RTL_EXPR(US_PLUS, "us_plus", "ee", RTX_COMM_ARITH) /* Operand 0 minus operand 1, with signed saturation. */ DEF_RTL_EXPR(SS_MINUS, "ss_minus", "ee", RTX_BIN_ARITH) /* Operand 0 minus operand 1, with unsigned saturation. */ DEF_RTL_EXPR(US_MINUS, "us_minus", "ee", RTX_BIN_ARITH) /* Signed saturating truncate. */ DEF_RTL_EXPR(SS_TRUNCATE, "ss_truncate", "e", RTX_UNARY) /* Unsigned saturating truncate. */ DEF_RTL_EXPR(US_TRUNCATE, "us_truncate", "e", RTX_UNARY) /* Information about the variable and its location. */ DEF_RTL_EXPR(VAR_LOCATION, "var_location", "te", RTX_EXTRA) /* Local variables: mode:c End: */ #undef DEF_RTL_EXPR }; /* Names for kinds of NOTEs and REG_NOTEs. */ const char * const note_insn_name[NOTE_INSN_MAX - NOTE_INSN_BIAS] = { "", "NOTE_INSN_DELETED", "NOTE_INSN_BLOCK_BEG", "NOTE_INSN_BLOCK_END", "NOTE_INSN_LOOP_BEG", "NOTE_INSN_LOOP_END", "NOTE_INSN_LOOP_CONT", "NOTE_INSN_LOOP_VTOP", "NOTE_INSN_LOOP_END_TOP_COND", "NOTE_INSN_FUNCTION_END", "NOTE_INSN_PROLOGUE_END", "NOTE_INSN_EPILOGUE_BEG", "NOTE_INSN_DELETED_LABEL", "NOTE_INSN_FUNCTION_BEG", "NOTE_INSN_EH_REGION_BEG", "NOTE_INSN_EH_REGION_END", "NOTE_INSN_REPEATED_LINE_NUMBER", "NOTE_INSN_BASIC_BLOCK", "NOTE_INSN_EXPECTED_VALUE", "NOTE_INSN_PREDICTION", "NOTE_INSN_UNLIKELY_EXECUTED_CODE", "NOTE_INSN_VAR_LOCATION" }; const char * const reg_note_name[] = { "", "REG_DEAD", "REG_INC", "REG_EQUIV", "REG_EQUAL", "REG_RETVAL", "REG_LIBCALL", "REG_NONNEG", "REG_NO_CONFLICT", "REG_UNUSED", "REG_CC_SETTER", "REG_CC_USER", "REG_LABEL", "REG_DEP_ANTI", "REG_DEP_OUTPUT", "REG_BR_PROB", "REG_VALUE_PROFILE", "REG_NOALIAS", "REG_SAVE_AREA", "REG_BR_PRED", "REG_FRAME_RELATED_EXPR", "REG_EH_CONTEXT", "REG_EH_REGION", "REG_SAVE_NOTE", "REG_MAYBE_DEAD", "REG_NORETURN", "REG_NON_LOCAL_GOTO", "REG_CROSSING_JUMP", "REG_SETJMP", "REG_ALWAYS_RETURN", "REG_VTABLE_REF" }; #ifdef GATHER_STATISTICS static int rtx_alloc_counts[(int) LAST_AND_UNUSED_RTX_CODE]; static int rtx_alloc_sizes[(int) LAST_AND_UNUSED_RTX_CODE]; static int rtvec_alloc_counts; static int rtvec_alloc_sizes; #endif /* Allocate an rtx vector of N elements. Store the length, and initialize all elements to zero. */ rtvec rtvec_alloc (int n) { rtvec rt; rt = ggc_alloc_rtvec (n); /* Clear out the vector. */ memset (&rt->elem[0], 0, n * sizeof (rtx)); PUT_NUM_ELEM (rt, n); #ifdef GATHER_STATISTICS rtvec_alloc_counts++; rtvec_alloc_sizes += n * sizeof (rtx); #endif return rt; } /* Allocate an rtx of code CODE. The CODE is stored in the rtx; all the rest is initialized to zero. */ rtx rtx_alloc_stat (RTX_CODE code MEM_STAT_DECL) { rtx rt; rt = ggc_alloc_typed_stat (gt_ggc_e_7rtx_def, RTX_SIZE (code) PASS_MEM_STAT); /* We want to clear everything up to the FLD array. Normally, this is one int, but we don't want to assume that and it isn't very portable anyway; this is. */ memset (rt, 0, RTX_HDR_SIZE); PUT_CODE (rt, code); #ifdef GATHER_STATISTICS rtx_alloc_counts[code]++; rtx_alloc_sizes[code] += RTX_SIZE (code); #endif return rt; } /* Create a new copy of an rtx. Recursively copies the operands of the rtx, except for those few rtx codes that are sharable. */ rtx copy_rtx (rtx orig) { rtx copy; int i, j; RTX_CODE code; const char *format_ptr; code = GET_CODE (orig); switch (code) { case REG: case QUEUED: case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case SYMBOL_REF: case CODE_LABEL: case PC: case CC0: case SCRATCH: /* SCRATCH must be shared because they represent distinct values. */ return orig; case CLOBBER: if (REG_P (XEXP (orig, 0)) && REGNO (XEXP (orig, 0)) < FIRST_PSEUDO_REGISTER) return orig; break; case CONST: /* CONST can be shared if it contains a SYMBOL_REF. If it contains a LABEL_REF, it isn't sharable. */ if (GET_CODE (XEXP (orig, 0)) == PLUS && GET_CODE (XEXP (XEXP (orig, 0), 0)) == SYMBOL_REF && GET_CODE (XEXP (XEXP (orig, 0), 1)) == CONST_INT) return orig; break; /* A MEM with a constant address is not sharable. The problem is that the constant address may need to be reloaded. If the mem is shared, then reloading one copy of this mem will cause all copies to appear to have been reloaded. */ default: break; } copy = rtx_alloc (code); /* Copy the various flags, and other information. We assume that all fields need copying, and then clear the fields that should not be copied. That is the sensible default behavior, and forces us to explicitly document why we are *not* copying a flag. */ memcpy (copy, orig, RTX_HDR_SIZE); /* We do not copy the USED flag, which is used as a mark bit during walks over the RTL. */ RTX_FLAG (copy, used) = 0; /* We do not copy FRAME_RELATED for INSNs. */ if (INSN_P (orig)) RTX_FLAG (copy, frame_related) = 0; RTX_FLAG (copy, jump) = RTX_FLAG (orig, jump); RTX_FLAG (copy, call) = RTX_FLAG (orig, call); format_ptr = GET_RTX_FORMAT (GET_CODE (copy)); for (i = 0; i < GET_RTX_LENGTH (GET_CODE (copy)); i++) { copy->u.fld[i] = orig->u.fld[i]; switch (*format_ptr++) { case 'e': if (XEXP (orig, i) != NULL) XEXP (copy, i) = copy_rtx (XEXP (orig, i)); break; case 'E': case 'V': if (XVEC (orig, i) != NULL) { XVEC (copy, i) = rtvec_alloc (XVECLEN (orig, i)); for (j = 0; j < XVECLEN (copy, i); j++) XVECEXP (copy, i, j) = copy_rtx (XVECEXP (orig, i, j)); } break; case 't': case 'w': case 'i': case 's': case 'S': case 'T': case 'u': case 'B': case '0': /* These are left unchanged. */ break; default: abort (); } } return copy; } /* Create a new copy of an rtx. Only copy just one level. */ rtx shallow_copy_rtx_stat (rtx orig MEM_STAT_DECL) { rtx copy; copy = ggc_alloc_typed_stat (gt_ggc_e_7rtx_def, RTX_SIZE (GET_CODE (orig)) PASS_MEM_STAT); memcpy (copy, orig, RTX_SIZE (GET_CODE (orig))); return copy; } /* This is 1 until after the rtl generation pass. */ int rtx_equal_function_value_matters; /* Nonzero when we are generating CONCATs. */ int generating_concat_p; /* Return 1 if X and Y are identical-looking rtx's. This is the Lisp function EQUAL for rtx arguments. */ int rtx_equal_p (rtx x, rtx y) { int i; int j; enum rtx_code code; const char *fmt; if (x == y) return 1; if (x == 0 || y == 0) return 0; code = GET_CODE (x); /* Rtx's of different codes cannot be equal. */ if (code != GET_CODE (y)) return 0; /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. (REG:SI x) and (REG:HI x) are NOT equivalent. */ if (GET_MODE (x) != GET_MODE (y)) return 0; /* Some RTL can be compared nonrecursively. */ switch (code) { case REG: /* Until rtl generation is complete, don't consider a reference to the return register of the current function the same as the return from a called function. This eases the job of function integration. Once the distinction is no longer needed, they can be considered equivalent. */ return (REGNO (x) == REGNO (y) && (! rtx_equal_function_value_matters || REG_FUNCTION_VALUE_P (x) == REG_FUNCTION_VALUE_P (y))); case LABEL_REF: return XEXP (x, 0) == XEXP (y, 0); case SYMBOL_REF: return XSTR (x, 0) == XSTR (y, 0); case SCRATCH: case CONST_DOUBLE: case CONST_INT: case CONST_VECTOR: return 0; default: break; } /* Compare the elements. If any pair of corresponding elements fail to match, return 0 for the whole thing. */ fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { switch (fmt[i]) { case 'w': if (XWINT (x, i) != XWINT (y, i)) return 0; break; case 'n': case 'i': if (XINT (x, i) != XINT (y, i)) return 0; break; case 'V': case 'E': /* Two vectors must have the same length. */ if (XVECLEN (x, i) != XVECLEN (y, i)) return 0; /* And the corresponding elements must match. */ for (j = 0; j < XVECLEN (x, i); j++) if (rtx_equal_p (XVECEXP (x, i, j), XVECEXP (y, i, j)) == 0) return 0; break; case 'e': if (rtx_equal_p (XEXP (x, i), XEXP (y, i)) == 0) return 0; break; case 'S': case 's': if ((XSTR (x, i) || XSTR (y, i)) && (! XSTR (x, i) || ! XSTR (y, i) || strcmp (XSTR (x, i), XSTR (y, i)))) return 0; break; case 'u': /* These are just backpointers, so they don't matter. */ break; case '0': case 't': break; /* It is believed that rtx's at this level will never contain anything but integers and other rtx's, except for within LABEL_REFs and SYMBOL_REFs. */ default: abort (); } } return 1; } void dump_rtx_statistics (void) { #ifdef GATHER_STATISTICS int i; int total_counts = 0; int total_sizes = 0; fprintf (stderr, "\nRTX Kind Count Bytes\n"); fprintf (stderr, "---------------------------------------\n"); for (i = 0; i < LAST_AND_UNUSED_RTX_CODE; i++) if (rtx_alloc_counts[i]) { fprintf (stderr, "%-20s %7d %10d\n", GET_RTX_NAME (i), rtx_alloc_counts[i], rtx_alloc_sizes[i]); total_counts += rtx_alloc_counts[i]; total_sizes += rtx_alloc_sizes[i]; } if (rtvec_alloc_counts) { fprintf (stderr, "%-20s %7d %10d\n", "rtvec", rtvec_alloc_counts, rtvec_alloc_sizes); total_counts += rtvec_alloc_counts; total_sizes += rtvec_alloc_sizes; } fprintf (stderr, "---------------------------------------\n"); fprintf (stderr, "%-20s %7d %10d\n", "Total", total_counts, total_sizes); fprintf (stderr, "---------------------------------------\n"); #endif } #if defined ENABLE_RTL_CHECKING && (GCC_VERSION >= 2007) void rtl_check_failed_bounds (rtx r, int n, const char *file, int line, const char *func) { internal_error ("RTL check: access of elt %d of `%s' with last elt %d in %s, at %s:%d", n, GET_RTX_NAME (GET_CODE (r)), GET_RTX_LENGTH (GET_CODE (r)) - 1, func, trim_filename (file), line); } void rtl_check_failed_type1 (rtx r, int n, int c1, const char *file, int line, const char *func) { internal_error ("RTL check: expected elt %d type '%c', have '%c' (rtx %s) in %s, at %s:%d", n, c1, GET_RTX_FORMAT (GET_CODE (r))[n], GET_RTX_NAME (GET_CODE (r)), func, trim_filename (file), line); } void rtl_check_failed_type2 (rtx r, int n, int c1, int c2, const char *file, int line, const char *func) { internal_error ("RTL check: expected elt %d type '%c' or '%c', have '%c' (rtx %s) in %s, at %s:%d", n, c1, c2, GET_RTX_FORMAT (GET_CODE (r))[n], GET_RTX_NAME (GET_CODE (r)), func, trim_filename (file), line); } void rtl_check_failed_code1 (rtx r, enum rtx_code code, const char *file, int line, const char *func) { internal_error ("RTL check: expected code `%s', have `%s' in %s, at %s:%d", GET_RTX_NAME (code), GET_RTX_NAME (GET_CODE (r)), func, trim_filename (file), line); } void rtl_check_failed_code2 (rtx r, enum rtx_code code1, enum rtx_code code2, const char *file, int line, const char *func) { internal_error ("RTL check: expected code `%s' or `%s', have `%s' in %s, at %s:%d", GET_RTX_NAME (code1), GET_RTX_NAME (code2), GET_RTX_NAME (GET_CODE (r)), func, trim_filename (file), line); } /* XXX Maybe print the vector? */ void rtvec_check_failed_bounds (rtvec r, int n, const char *file, int line, const char *func) { internal_error ("RTL check: access of elt %d of vector with last elt %d in %s, at %s:%d", n, GET_NUM_ELEM (r) - 1, func, trim_filename (file), line); } #endif /* ENABLE_RTL_CHECKING */ #if defined ENABLE_RTL_FLAG_CHECKING void rtl_check_failed_flag (const char *name, rtx r, const char *file, int line, const char *func) { internal_error ("RTL flag check: %s used with unexpected rtx code `%s' in %s, at %s:%d", name, GET_RTX_NAME (GET_CODE (r)), func, trim_filename (file), line); } #endif /* ENABLE_RTL_FLAG_CHECKING */ /* Analyze RTL for C-Compiler Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Forward declarations */ static int global_reg_mentioned_p_1 (rtx *, void *); static void set_of_1 (rtx, rtx, void *); static void insn_dependent_p_1 (rtx, rtx, void *); static int rtx_referenced_p_1 (rtx *, void *); static int computed_jump_p_1 (rtx); static void parms_set (rtx, rtx, void *); static bool hoist_test_store (rtx, rtx, regset); static void hoist_update_store (rtx, rtx *, rtx, rtx); static unsigned HOST_WIDE_INT cached_nonzero_bits (rtx, enum machine_mode, rtx, enum machine_mode, unsigned HOST_WIDE_INT); static unsigned HOST_WIDE_INT nonzero_bits1 (rtx, enum machine_mode, rtx, enum machine_mode, unsigned HOST_WIDE_INT); static unsigned int cached_num_sign_bit_copies (rtx, enum machine_mode, rtx, enum machine_mode, unsigned int); static unsigned int num_sign_bit_copies1 (rtx, enum machine_mode, rtx, enum machine_mode, unsigned int); /* Bit flags that specify the machine subtype we are compiling for. Bits are tested using macros TARGET_... defined in the tm.h file and set by `-m...' switches. Must be defined in rtlanal.c. */ int target_flags; /* Return 1 if the value of X is unstable (would be different at a different point in the program). The frame pointer, arg pointer, etc. are considered stable (within one function) and so is anything marked `unchanging'. */ int rtx_unstable_p (rtx x) { RTX_CODE code = GET_CODE (x); int i; const char *fmt; switch (code) { case MEM: return ! RTX_UNCHANGING_P (x) || rtx_unstable_p (XEXP (x, 0)); case QUEUED: return 1; case CONST: case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case SYMBOL_REF: case LABEL_REF: return 0; case REG: /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */ if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx /* The arg pointer varies if it is not a fixed register. */ || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]) || RTX_UNCHANGING_P (x)) return 0; #ifndef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED /* ??? When call-clobbered, the value is stable modulo the restore that must happen after a call. This currently screws up local-alloc into believing that the restore is not needed. */ if (x == pic_offset_table_rtx) return 0; #endif return 1; case ASM_OPERANDS: if (MEM_VOLATILE_P (x)) return 1; /* Fall through. */ default: break; } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) if (fmt[i] == 'e') { if (rtx_unstable_p (XEXP (x, i))) return 1; } else if (fmt[i] == 'E') { int j; for (j = 0; j < XVECLEN (x, i); j++) if (rtx_unstable_p (XVECEXP (x, i, j))) return 1; } return 0; } /* Return 1 if X has a value that can vary even between two executions of the program. 0 means X can be compared reliably against certain constants or near-constants. FOR_ALIAS is nonzero if we are called from alias analysis; if it is zero, we are slightly more conservative. The frame pointer and the arg pointer are considered constant. */ int rtx_varies_p (rtx x, int for_alias) { RTX_CODE code; int i; const char *fmt; if (!x) return 0; code = GET_CODE (x); switch (code) { case MEM: return ! RTX_UNCHANGING_P (x) || rtx_varies_p (XEXP (x, 0), for_alias); case QUEUED: return 1; case CONST: case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case SYMBOL_REF: case LABEL_REF: return 0; case REG: /* Note that we have to test for the actual rtx used for the frame and arg pointers and not just the register number in case we have eliminated the frame and/or arg pointer and are using it for pseudos. */ if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx /* The arg pointer varies if it is not a fixed register. */ || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])) return 0; if (x == pic_offset_table_rtx #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED /* ??? When call-clobbered, the value is stable modulo the restore that must happen after a call. This currently screws up local-alloc into believing that the restore is not needed, so we must return 0 only if we are called from alias analysis. */ && for_alias #endif ) return 0; return 1; case LO_SUM: /* The operand 0 of a LO_SUM is considered constant (in fact it is related specifically to operand 1) during alias analysis. */ return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias)) || rtx_varies_p (XEXP (x, 1), for_alias); case ASM_OPERANDS: if (MEM_VOLATILE_P (x)) return 1; /* Fall through. */ default: break; } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) if (fmt[i] == 'e') { if (rtx_varies_p (XEXP (x, i), for_alias)) return 1; } else if (fmt[i] == 'E') { int j; for (j = 0; j < XVECLEN (x, i); j++) if (rtx_varies_p (XVECEXP (x, i, j), for_alias)) return 1; } return 0; } /* Return 0 if the use of X as an address in a MEM can cause a trap. */ int rtx_addr_can_trap_p (rtx x) { enum rtx_code code = GET_CODE (x); switch (code) { case SYMBOL_REF: return SYMBOL_REF_WEAK (x); case LABEL_REF: return 0; case REG: /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */ if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx || x == stack_pointer_rtx /* The arg pointer varies if it is not a fixed register. */ || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])) return 0; /* All of the virtual frame registers are stack references. */ if (REGNO (x) >= FIRST_VIRTUAL_REGISTER && REGNO (x) <= LAST_VIRTUAL_REGISTER) return 0; return 1; case CONST: return rtx_addr_can_trap_p (XEXP (x, 0)); case PLUS: /* An address is assumed not to trap if it is an address that can't trap plus a constant integer or it is the pic register plus a constant. */ return ! ((! rtx_addr_can_trap_p (XEXP (x, 0)) && GET_CODE (XEXP (x, 1)) == CONST_INT) || (XEXP (x, 0) == pic_offset_table_rtx && CONSTANT_P (XEXP (x, 1)))); case LO_SUM: case PRE_MODIFY: return rtx_addr_can_trap_p (XEXP (x, 1)); case PRE_DEC: case PRE_INC: case POST_DEC: case POST_INC: case POST_MODIFY: return rtx_addr_can_trap_p (XEXP (x, 0)); default: break; } /* If it isn't one of the case above, it can cause a trap. */ return 1; } /* Return true if X is an address that is known to not be zero. */ bool nonzero_address_p (rtx x) { enum rtx_code code = GET_CODE (x); switch (code) { case SYMBOL_REF: return !SYMBOL_REF_WEAK (x); case LABEL_REF: return true; case REG: /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */ if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx || x == stack_pointer_rtx || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])) return true; /* All of the virtual frame registers are stack references. */ if (REGNO (x) >= FIRST_VIRTUAL_REGISTER && REGNO (x) <= LAST_VIRTUAL_REGISTER) return true; return false; case CONST: return nonzero_address_p (XEXP (x, 0)); case PLUS: if (GET_CODE (XEXP (x, 1)) == CONST_INT) { /* Pointers aren't allowed to wrap. If we've got a register that is known to be a pointer, and a positive offset, then the composite can't be zero. */ if (INTVAL (XEXP (x, 1)) > 0 && REG_P (XEXP (x, 0)) && REG_POINTER (XEXP (x, 0))) return true; return nonzero_address_p (XEXP (x, 0)); } /* Handle PIC references. */ else if (XEXP (x, 0) == pic_offset_table_rtx && CONSTANT_P (XEXP (x, 1))) return true; return false; case PRE_MODIFY: /* Similar to the above; allow positive offsets. Further, since auto-inc is only allowed in memories, the register must be a pointer. */ if (GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) > 0) return true; return nonzero_address_p (XEXP (x, 0)); case PRE_INC: /* Similarly. Further, the offset is always positive. */ return true; case PRE_DEC: case POST_DEC: case POST_INC: case POST_MODIFY: return nonzero_address_p (XEXP (x, 0)); case LO_SUM: return nonzero_address_p (XEXP (x, 1)); default: break; } /* If it isn't one of the case above, might be zero. */ return false; } /* Return 1 if X refers to a memory location whose address cannot be compared reliably with constant addresses, or if X refers to a BLKmode memory object. FOR_ALIAS is nonzero if we are called from alias analysis; if it is zero, we are slightly more conservative. */ int rtx_addr_varies_p (rtx x, int for_alias) { enum rtx_code code; int i; const char *fmt; if (x == 0) return 0; code = GET_CODE (x); if (code == MEM) return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias); fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) if (fmt[i] == 'e') { if (rtx_addr_varies_p (XEXP (x, i), for_alias)) return 1; } else if (fmt[i] == 'E') { int j; for (j = 0; j < XVECLEN (x, i); j++) if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias)) return 1; } return 0; } /* Return the value of the integer term in X, if one is apparent; otherwise return 0. Only obvious integer terms are detected. This is used in cse.c with the `related_value' field. */ HOST_WIDE_INT get_integer_term (rtx x) { if (GET_CODE (x) == CONST) x = XEXP (x, 0); if (GET_CODE (x) == MINUS && GET_CODE (XEXP (x, 1)) == CONST_INT) return - INTVAL (XEXP (x, 1)); if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT) return INTVAL (XEXP (x, 1)); return 0; } /* If X is a constant, return the value sans apparent integer term; otherwise return 0. Only obvious integer terms are detected. */ rtx get_related_value (rtx x) { if (GET_CODE (x) != CONST) return 0; x = XEXP (x, 0); if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT) return XEXP (x, 0); else if (GET_CODE (x) == MINUS && GET_CODE (XEXP (x, 1)) == CONST_INT) return XEXP (x, 0); return 0; } /* Given a tablejump insn INSN, return the RTL expression for the offset into the jump table. If the offset cannot be determined, then return NULL_RTX. If EARLIEST is nonzero, it is a pointer to a place where the earliest insn used in locating the offset was found. */ rtx get_jump_table_offset (rtx insn, rtx *earliest) { rtx label = NULL; rtx table = NULL; rtx set; rtx old_insn; rtx x; rtx old_x; rtx y; rtx old_y; int i; if (!tablejump_p (insn, &label, &table) || !(set = single_set (insn))) return NULL_RTX; x = SET_SRC (set); /* Some targets (eg, ARM) emit a tablejump that also contains the out-of-range target. */ if (GET_CODE (x) == IF_THEN_ELSE && GET_CODE (XEXP (x, 2)) == LABEL_REF) x = XEXP (x, 1); /* Search backwards and locate the expression stored in X. */ for (old_x = NULL_RTX; REG_P (x) && x != old_x; old_x = x, x = find_last_value (x, &insn, NULL_RTX, 0)) ; /* If X is an expression using a relative address then strip off the addition / subtraction of PC, PIC_OFFSET_TABLE_REGNUM, or the jump table label. */ if (GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS)) { for (i = 0; i < 2; i++) { old_insn = insn; y = XEXP (x, i); if (y == pc_rtx || y == pic_offset_table_rtx) break; for (old_y = NULL_RTX; REG_P (y) && y != old_y; old_y = y, y = find_last_value (y, &old_insn, NULL_RTX, 0)) ; if ((GET_CODE (y) == LABEL_REF && XEXP (y, 0) == label)) break; } if (i >= 2) return NULL_RTX; x = XEXP (x, 1 - i); for (old_x = NULL_RTX; REG_P (x) && x != old_x; old_x = x, x = find_last_value (x, &insn, NULL_RTX, 0)) ; } /* Strip off any sign or zero extension. */ if (GET_CODE (x) == SIGN_EXTEND || GET_CODE (x) == ZERO_EXTEND) { x = XEXP (x, 0); for (old_x = NULL_RTX; REG_P (x) && x != old_x; old_x = x, x = find_last_value (x, &insn, NULL_RTX, 0)) ; } /* If X isn't a MEM then this isn't a tablejump we understand. */ if (!MEM_P (x)) return NULL_RTX; /* Strip off the MEM. */ x = XEXP (x, 0); for (old_x = NULL_RTX; REG_P (x) && x != old_x; old_x = x, x = find_last_value (x, &insn, NULL_RTX, 0)) ; /* If X isn't a PLUS than this isn't a tablejump we understand. */ if (GET_CODE (x) != PLUS) return NULL_RTX; /* At this point we should have an expression representing the jump table plus an offset. Examine each operand in order to determine which one represents the jump table. Knowing that tells us that the other operand must represent the offset. */ for (i = 0; i < 2; i++) { old_insn = insn; y = XEXP (x, i); for (old_y = NULL_RTX; REG_P (y) && y != old_y; old_y = y, y = find_last_value (y, &old_insn, NULL_RTX, 0)) ; if ((GET_CODE (y) == CONST || GET_CODE (y) == LABEL_REF) && reg_mentioned_p (label, y)) break; } if (i >= 2) return NULL_RTX; x = XEXP (x, 1 - i); /* Strip off the addition / subtraction of PIC_OFFSET_TABLE_REGNUM. */ if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS) for (i = 0; i < 2; i++) if (XEXP (x, i) == pic_offset_table_rtx) { x = XEXP (x, 1 - i); break; } if (earliest) *earliest = insn; /* Return the RTL expression representing the offset. */ return x; } /* A subroutine of global_reg_mentioned_p, returns 1 if *LOC mentions a global register. */ static int global_reg_mentioned_p_1 (rtx *loc, void *data ATTRIBUTE_UNUSED) { int regno; rtx x = *loc; if (! x) return 0; switch (GET_CODE (x)) { case SUBREG: if (REG_P (SUBREG_REG (x))) { if (REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER && global_regs[subreg_regno (x)]) return 1; return 0; } break; case REG: regno = REGNO (x); if (regno < FIRST_PSEUDO_REGISTER && global_regs[regno]) return 1; return 0; case SCRATCH: case PC: case CC0: case CONST_INT: case CONST_DOUBLE: case CONST: case LABEL_REF: return 0; case CALL: /* A non-constant call might use a global register. */ return 1; default: break; } return 0; } /* Returns nonzero if X mentions a global register. */ int global_reg_mentioned_p (rtx x) { if (INSN_P (x)) { if (GET_CODE (x) == CALL_INSN) { if (! CONST_OR_PURE_CALL_P (x)) return 1; x = CALL_INSN_FUNCTION_USAGE (x); if (x == 0) return 0; } else x = PATTERN (x); } return for_each_rtx (&x, global_reg_mentioned_p_1, NULL); } /* Return the number of places FIND appears within X. If COUNT_DEST is zero, we do not count occurrences inside the destination of a SET. */ int count_occurrences (rtx x, rtx find, int count_dest) { int i, j; enum rtx_code code; const char *format_ptr; int count; if (x == find) return 1; code = GET_CODE (x); switch (code) { case REG: case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case SYMBOL_REF: case CODE_LABEL: case PC: case CC0: return 0; case MEM: if (MEM_P (find) && rtx_equal_p (x, find)) return 1; break; case SET: if (SET_DEST (x) == find && ! count_dest) return count_occurrences (SET_SRC (x), find, count_dest); break; default: break; } format_ptr = GET_RTX_FORMAT (code); count = 0; for (i = 0; i < GET_RTX_LENGTH (code); i++) { switch (*format_ptr++) { case 'e': count += count_occurrences (XEXP (x, i), find, count_dest); break; case 'E': for (j = 0; j < XVECLEN (x, i); j++) count += count_occurrences (XVECEXP (x, i, j), find, count_dest); break; } } return count; } /* Nonzero if register REG appears somewhere within IN. Also works if REG is not a register; in this case it checks for a subexpression of IN that is Lisp "equal" to REG. */ int reg_mentioned_p (rtx reg, rtx in) { const char *fmt; int i; enum rtx_code code; if (in == 0) return 0; if (reg == in) return 1; if (GET_CODE (in) == LABEL_REF) return reg == XEXP (in, 0); code = GET_CODE (in); switch (code) { /* Compare registers by number. */ case REG: return REG_P (reg) && REGNO (in) == REGNO (reg); /* These codes have no constituent expressions and are unique. */ case SCRATCH: case CC0: case PC: return 0; case CONST_INT: case CONST_VECTOR: case CONST_DOUBLE: /* These are kept unique for a given value. */ return 0; default: break; } if (GET_CODE (reg) == code && rtx_equal_p (reg, in)) return 1; fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'E') { int j; for (j = XVECLEN (in, i) - 1; j >= 0; j--) if (reg_mentioned_p (reg, XVECEXP (in, i, j))) return 1; } else if (fmt[i] == 'e' && reg_mentioned_p (reg, XEXP (in, i))) return 1; } return 0; } /* Return 1 if in between BEG and END, exclusive of BEG and END, there is no CODE_LABEL insn. */ int no_labels_between_p (rtx beg, rtx end) { rtx p; if (beg == end) return 0; for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p)) if (GET_CODE (p) == CODE_LABEL) return 0; return 1; } /* Return 1 if in between BEG and END, exclusive of BEG and END, there is no JUMP_INSN insn. */ int no_jumps_between_p (rtx beg, rtx end) { rtx p; for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p)) if (GET_CODE (p) == JUMP_INSN) return 0; return 1; } /* Nonzero if register REG is used in an insn between FROM_INSN and TO_INSN (exclusive of those two). */ int reg_used_between_p (rtx reg, rtx from_insn, rtx to_insn) { rtx insn; if (from_insn == to_insn) return 0; for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn)) if (INSN_P (insn) && (reg_overlap_mentioned_p (reg, PATTERN (insn)) || (GET_CODE (insn) == CALL_INSN && (find_reg_fusage (insn, USE, reg) || find_reg_fusage (insn, CLOBBER, reg))))) return 1; return 0; } /* Nonzero if the old value of X, a register, is referenced in BODY. If X is entirely replaced by a new value and the only use is as a SET_DEST, we do not consider it a reference. */ int reg_referenced_p (rtx x, rtx body) { int i; switch (GET_CODE (body)) { case SET: if (reg_overlap_mentioned_p (x, SET_SRC (body))) return 1; /* If the destination is anything other than CC0, PC, a REG or a SUBREG of a REG that occupies all of the REG, the insn references X if it is mentioned in the destination. */ if (GET_CODE (SET_DEST (body)) != CC0 && GET_CODE (SET_DEST (body)) != PC && !REG_P (SET_DEST (body)) && ! (GET_CODE (SET_DEST (body)) == SUBREG && REG_P (SUBREG_REG (SET_DEST (body))) && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body)))) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD) == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body))) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))) && reg_overlap_mentioned_p (x, SET_DEST (body))) return 1; return 0; case ASM_OPERANDS: for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--) if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i))) return 1; return 0; case CALL: case USE: case IF_THEN_ELSE: return reg_overlap_mentioned_p (x, body); case TRAP_IF: return reg_overlap_mentioned_p (x, TRAP_CONDITION (body)); case PREFETCH: return reg_overlap_mentioned_p (x, XEXP (body, 0)); case UNSPEC: case UNSPEC_VOLATILE: for (i = XVECLEN (body, 0) - 1; i >= 0; i--) if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i))) return 1; return 0; case PARALLEL: for (i = XVECLEN (body, 0) - 1; i >= 0; i--) if (reg_referenced_p (x, XVECEXP (body, 0, i))) return 1; return 0; case CLOBBER: if (MEM_P (XEXP (body, 0))) if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0))) return 1; return 0; case COND_EXEC: if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body))) return 1; return reg_referenced_p (x, COND_EXEC_CODE (body)); default: return 0; } } /* Nonzero if register REG is referenced in an insn between FROM_INSN and TO_INSN (exclusive of those two). Sets of REG do not count. */ int reg_referenced_between_p (rtx reg, rtx from_insn, rtx to_insn) { rtx insn; if (from_insn == to_insn) return 0; for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn)) if (INSN_P (insn) && (reg_referenced_p (reg, PATTERN (insn)) || (GET_CODE (insn) == CALL_INSN && find_reg_fusage (insn, USE, reg)))) return 1; return 0; } /* Nonzero if register REG is set or clobbered in an insn between FROM_INSN and TO_INSN (exclusive of those two). */ int reg_set_between_p (rtx reg, rtx from_insn, rtx to_insn) { rtx insn; if (from_insn == to_insn) return 0; for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn)) if (INSN_P (insn) && reg_set_p (reg, insn)) return 1; return 0; } /* Internals of reg_set_between_p. */ int reg_set_p (rtx reg, rtx insn) { /* We can be passed an insn or part of one. If we are passed an insn, check if a side-effect of the insn clobbers REG. */ if (INSN_P (insn) && (FIND_REG_INC_NOTE (insn, reg) || (GET_CODE (insn) == CALL_INSN /* We'd like to test call_used_regs here, but rtlanal.c can't reference that variable due to its use in genattrtab. So we'll just be more conservative. ??? Unless we could ensure that the CALL_INSN_FUNCTION_USAGE information holds all clobbered registers. */ && ((REG_P (reg) && REGNO (reg) < FIRST_PSEUDO_REGISTER) || MEM_P (reg) || find_reg_fusage (insn, CLOBBER, reg))))) return 1; return set_of (reg, insn) != NULL_RTX; } /* Similar to reg_set_between_p, but check all registers in X. Return 0 only if none of them are modified between START and END. Do not consider non-registers one way or the other. */ int regs_set_between_p (rtx x, rtx start, rtx end) { enum rtx_code code = GET_CODE (x); const char *fmt; int i, j; switch (code) { case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case CONST: case SYMBOL_REF: case LABEL_REF: case PC: case CC0: return 0; case REG: return reg_set_between_p (x, start, end); default: break; } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e' && regs_set_between_p (XEXP (x, i), start, end)) return 1; else if (fmt[i] == 'E') for (j = XVECLEN (x, i) - 1; j >= 0; j--) if (regs_set_between_p (XVECEXP (x, i, j), start, end)) return 1; } return 0; } /* Similar to reg_set_between_p, but check all registers in X. Return 0 only if none of them are modified between START and END. Return 1 if X contains a MEM; this routine does usememory aliasing. */ int modified_between_p (rtx x, rtx start, rtx end) { enum rtx_code code = GET_CODE (x); const char *fmt; int i, j; rtx insn; if (start == end) return 0; switch (code) { case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case CONST: case SYMBOL_REF: case LABEL_REF: return 0; case PC: case CC0: return 1; case MEM: if (RTX_UNCHANGING_P (x)) return 0; if (modified_between_p (XEXP (x, 0), start, end)) return 1; for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn)) if (memory_modified_in_insn_p (x, insn)) return 1; return 0; break; case REG: return reg_set_between_p (x, start, end); default: break; } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end)) return 1; else if (fmt[i] == 'E') for (j = XVECLEN (x, i) - 1; j >= 0; j--) if (modified_between_p (XVECEXP (x, i, j), start, end)) return 1; } return 0; } /* Similar to reg_set_p, but check all registers in X. Return 0 only if none of them are modified in INSN. Return 1 if X contains a MEM; this routine does use memory aliasing. */ int modified_in_p (rtx x, rtx insn) { enum rtx_code code = GET_CODE (x); const char *fmt; int i, j; switch (code) { case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case CONST: case SYMBOL_REF: case LABEL_REF: return 0; case PC: case CC0: return 1; case MEM: if (RTX_UNCHANGING_P (x)) return 0; if (modified_in_p (XEXP (x, 0), insn)) return 1; if (memory_modified_in_insn_p (x, insn)) return 1; return 0; break; case REG: return reg_set_p (x, insn); default: break; } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn)) return 1; else if (fmt[i] == 'E') for (j = XVECLEN (x, i) - 1; j >= 0; j--) if (modified_in_p (XVECEXP (x, i, j), insn)) return 1; } return 0; } /* Return true if anything in insn X is (anti,output,true) dependent on anything in insn Y. */ int insn_dependent_p (rtx x, rtx y) { rtx tmp; if (! INSN_P (x) || ! INSN_P (y)) abort (); tmp = PATTERN (y); note_stores (PATTERN (x), insn_dependent_p_1, &tmp); if (tmp == NULL_RTX) return 1; tmp = PATTERN (x); note_stores (PATTERN (y), insn_dependent_p_1, &tmp); if (tmp == NULL_RTX) return 1; return 0; } /* A helper routine for insn_dependent_p called through note_stores. */ static void insn_dependent_p_1 (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data) { rtx * pinsn = (rtx *) data; if (*pinsn && reg_mentioned_p (x, *pinsn)) *pinsn = NULL_RTX; } /* Helper function for set_of. */ struct set_of_data { rtx found; rtx pat; }; static void set_of_1 (rtx x, rtx pat, void *data1) { struct set_of_data *data = (struct set_of_data *) (data1); if (rtx_equal_p (x, data->pat) || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x))) data->found = pat; } /* Give an INSN, return a SET or CLOBBER expression that does modify PAT (either directly or via STRICT_LOW_PART and similar modifiers). */ rtx set_of (rtx pat, rtx insn) { struct set_of_data data; data.found = NULL_RTX; data.pat = pat; note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data); return data.found; } /* Given an INSN, return a SET expression if this insn has only a single SET. It may also have CLOBBERs, USEs, or SET whose output will not be used, which we ignore. */ rtx single_set_2 (rtx insn, rtx pat) { rtx set = NULL; int set_verified = 1; int i; if (GET_CODE (pat) == PARALLEL) { for (i = 0; i < XVECLEN (pat, 0); i++) { rtx sub = XVECEXP (pat, 0, i); switch (GET_CODE (sub)) { case USE: case CLOBBER: break; case SET: /* We can consider insns having multiple sets, where all but one are dead as single set insns. In common case only single set is present in the pattern so we want to avoid checking for REG_UNUSED notes unless necessary. When we reach set first time, we just expect this is the single set we are looking for and only when more sets are found in the insn, we check them. */ if (!set_verified) { if (find_reg_note (insn, REG_UNUSED, SET_DEST (set)) && !side_effects_p (set)) set = NULL; else set_verified = 1; } if (!set) set = sub, set_verified = 0; else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub)) || side_effects_p (sub)) return NULL_RTX; break; default: return NULL_RTX; } } } return set; } /* Given an INSN, return nonzero if it has more than one SET, else return zero. */ int multiple_sets (rtx insn) { int found; int i; /* INSN must be an insn. */ if (! INSN_P (insn)) return 0; /* Only a PARALLEL can have multiple SETs. */ if (GET_CODE (PATTERN (insn)) == PARALLEL) { for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++) if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET) { /* If we have already found a SET, then return now. */ if (found) return 1; else found = 1; } } /* Either zero or one SET. */ return 0; } /* Return nonzero if the destination of SET equals the source and there are no side effects. */ int set_noop_p (rtx set) { rtx src = SET_SRC (set); rtx dst = SET_DEST (set); if (dst == pc_rtx && src == pc_rtx) return 1; if (MEM_P (dst) && MEM_P (src)) return rtx_equal_p (dst, src) && !side_effects_p (dst); if (GET_CODE (dst) == SIGN_EXTRACT || GET_CODE (dst) == ZERO_EXTRACT) return rtx_equal_p (XEXP (dst, 0), src) && ! BYTES_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx && !side_effects_p (src); if (GET_CODE (dst) == STRICT_LOW_PART) dst = XEXP (dst, 0); if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG) { if (SUBREG_BYTE (src) != SUBREG_BYTE (dst)) return 0; src = SUBREG_REG (src); dst = SUBREG_REG (dst); } return (REG_P (src) && REG_P (dst) && REGNO (src) == REGNO (dst)); } /* Return nonzero if an insn consists only of SETs, each of which only sets a value to itself. */ int noop_move_p (rtx insn) { rtx pat = PATTERN (insn); if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE) return 1; /* Insns carrying these notes are useful later on. */ if (find_reg_note (insn, REG_EQUAL, NULL_RTX)) return 0; /* For now treat an insn with a REG_RETVAL note as a a special insn which should not be considered a no-op. */ if (find_reg_note (insn, REG_RETVAL, NULL_RTX)) return 0; if (GET_CODE (pat) == SET && set_noop_p (pat)) return 1; if (GET_CODE (pat) == PARALLEL) { int i; /* If nothing but SETs of registers to themselves, this insn can also be deleted. */ for (i = 0; i < XVECLEN (pat, 0); i++) { rtx tem = XVECEXP (pat, 0, i); if (GET_CODE (tem) == USE || GET_CODE (tem) == CLOBBER) continue; if (GET_CODE (tem) != SET || ! set_noop_p (tem)) return 0; } return 1; } return 0; } /* Return the last thing that X was assigned from before *PINSN. If VALID_TO is not NULL_RTX then verify that the object is not modified up to VALID_TO. If the object was modified, if we hit a partial assignment to X, or hit a CODE_LABEL first, return X. If we found an assignment, update *PINSN to point to it. ALLOW_HWREG is set to 1 if hardware registers are allowed to be the src. */ rtx find_last_value (rtx x, rtx *pinsn, rtx valid_to, int allow_hwreg) { rtx p; for (p = PREV_INSN (*pinsn); p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p)) if (INSN_P (p)) { rtx set = single_set (p); rtx note = find_reg_note (p, REG_EQUAL, NULL_RTX); if (set && rtx_equal_p (x, SET_DEST (set))) { rtx src = SET_SRC (set); if (note && GET_CODE (XEXP (note, 0)) != EXPR_LIST) src = XEXP (note, 0); if ((valid_to == NULL_RTX || ! modified_between_p (src, PREV_INSN (p), valid_to)) /* Reject hard registers because we don't usually want to use them; we'd rather use a pseudo. */ && (! (REG_P (src) && REGNO (src) < FIRST_PSEUDO_REGISTER) || allow_hwreg)) { *pinsn = p; return src; } } /* If set in non-simple way, we don't have a value. */ if (reg_set_p (x, p)) break; } return x; } /* Return nonzero if register in range [REGNO, ENDREGNO) appears either explicitly or implicitly in X other than being stored into. References contained within the substructure at LOC do not count. LOC may be zero, meaning don't ignore anything. */ int refers_to_regno_p (unsigned int regno, unsigned int endregno, rtx x, rtx *loc) { int i; unsigned int x_regno; RTX_CODE code; const char *fmt; repeat: /* The contents of a REG_NONNEG note is always zero, so we must come here upon repeat in case the last REG_NOTE is a REG_NONNEG note. */ if (x == 0) return 0; code = GET_CODE (x); switch (code) { case REG: x_regno = REGNO (x); /* If we modifying the stack, frame, or argument pointer, it will clobber a virtual register. In fact, we could be more precise, but it isn't worth it. */ if ((x_regno == STACK_POINTER_REGNUM #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM || x_regno == ARG_POINTER_REGNUM #endif || x_regno == FRAME_POINTER_REGNUM) && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER) return 1; return (endregno > x_regno && regno < x_regno + (x_regno < FIRST_PSEUDO_REGISTER ? hard_regno_nregs[x_regno][GET_MODE (x)] : 1)); case SUBREG: /* If this is a SUBREG of a hard reg, we can see exactly which registers are being modified. Otherwise, handle normally. */ if (REG_P (SUBREG_REG (x)) && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER) { unsigned int inner_regno = subreg_regno (x); unsigned int inner_endregno = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER ? hard_regno_nregs[inner_regno][GET_MODE (x)] : 1); return endregno > inner_regno && regno < inner_endregno; } break; case CLOBBER: case SET: if (&SET_DEST (x) != loc /* Note setting a SUBREG counts as referring to the REG it is in for a pseudo but not for hard registers since we can treat each word individually. */ && ((GET_CODE (SET_DEST (x)) == SUBREG && loc != &SUBREG_REG (SET_DEST (x)) && REG_P (SUBREG_REG (SET_DEST (x))) && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER && refers_to_regno_p (regno, endregno, SUBREG_REG (SET_DEST (x)), loc)) || (!REG_P (SET_DEST (x)) && refers_to_regno_p (regno, endregno, SET_DEST (x), loc)))) return 1; if (code == CLOBBER || loc == &SET_SRC (x)) return 0; x = SET_SRC (x); goto repeat; default: break; } /* X does not match, so try its subexpressions. */ fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e' && loc != &XEXP (x, i)) { if (i == 0) { x = XEXP (x, 0); goto repeat; } else if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc)) return 1; } else if (fmt[i] == 'E') { int j; for (j = XVECLEN (x, i) - 1; j >= 0; j--) if (loc != &XVECEXP (x, i, j) && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc)) return 1; } } return 0; } /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG, we check if any register number in X conflicts with the relevant register numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN contains a MEM (we don't bother checking for memory addresses that can't conflict because we expect this to be a rare case. */ int reg_overlap_mentioned_p (rtx x, rtx in) { unsigned int regno, endregno; /* If either argument is a constant, then modifying X can not affect IN. Here we look at IN, we can profitably combine CONSTANT_P (x) with the switch statement below. */ if (CONSTANT_P (in)) return 0; recurse: switch (GET_CODE (x)) { case STRICT_LOW_PART: case ZERO_EXTRACT: case SIGN_EXTRACT: /* Overly conservative. */ x = XEXP (x, 0); goto recurse; case SUBREG: regno = REGNO (SUBREG_REG (x)); if (regno < FIRST_PSEUDO_REGISTER) regno = subreg_regno (x); goto do_reg; case REG: regno = REGNO (x); do_reg: endregno = regno + (regno < FIRST_PSEUDO_REGISTER ? hard_regno_nregs[regno][GET_MODE (x)] : 1); return refers_to_regno_p (regno, endregno, in, (rtx*) 0); case MEM: { const char *fmt; int i; if (MEM_P (in)) return 1; fmt = GET_RTX_FORMAT (GET_CODE (in)); for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--) if (fmt[i] == 'e' && reg_overlap_mentioned_p (x, XEXP (in, i))) return 1; return 0; } case SCRATCH: case PC: case CC0: return reg_mentioned_p (x, in); case PARALLEL: { int i; /* If any register in here refers to it we return true. */ for (i = XVECLEN (x, 0) - 1; i >= 0; i--) if (XEXP (XVECEXP (x, 0, i), 0) != 0 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in)) return 1; return 0; } default: #ifdef ENABLE_CHECKING if (!CONSTANT_P (x)) abort (); #endif return 0; } } /* Call FUN on each register or MEM that is stored into or clobbered by X. (X would be the pattern of an insn). FUN receives two arguments: the REG, MEM, CC0 or PC being stored in or clobbered, the SET or CLOBBER rtx that does the store. If the item being stored in or clobbered is a SUBREG of a hard register, the SUBREG will be passed. */ void note_stores (rtx x, void (*fun) (rtx, rtx, void *), void *data) { int i; if (GET_CODE (x) == COND_EXEC) x = COND_EXEC_CODE (x); if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER) { rtx dest = SET_DEST (x); while ((GET_CODE (dest) == SUBREG && (!REG_P (SUBREG_REG (dest)) || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER)) || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT || GET_CODE (dest) == STRICT_LOW_PART) dest = XEXP (dest, 0); /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions, each of whose first operand is a register. */ if (GET_CODE (dest) == PARALLEL) { for (i = XVECLEN (dest, 0) - 1; i >= 0; i--) if (XEXP (XVECEXP (dest, 0, i), 0) != 0) (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data); } else (*fun) (dest, x, data); } else if (GET_CODE (x) == PARALLEL) for (i = XVECLEN (x, 0) - 1; i >= 0; i--) note_stores (XVECEXP (x, 0, i), fun, data); } /* Like notes_stores, but call FUN for each expression that is being referenced in PBODY, a pointer to the PATTERN of an insn. We only call FUN for each expression, not any interior subexpressions. FUN receives a pointer to the expression and the DATA passed to this function. Note that this is not quite the same test as that done in reg_referenced_p since that considers something as being referenced if it is being partially set, while we do not. */ void note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data) { rtx body = *pbody; int i; switch (GET_CODE (body)) { case COND_EXEC: (*fun) (&COND_EXEC_TEST (body), data); note_uses (&COND_EXEC_CODE (body), fun, data); return; case PARALLEL: for (i = XVECLEN (body, 0) - 1; i >= 0; i--) note_uses (&XVECEXP (body, 0, i), fun, data); return; case USE: (*fun) (&XEXP (body, 0), data); return; case ASM_OPERANDS: for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--) (*fun) (&ASM_OPERANDS_INPUT (body, i), data); return; case TRAP_IF: (*fun) (&TRAP_CONDITION (body), data); return; case PREFETCH: (*fun) (&XEXP (body, 0), data); return; case UNSPEC: case UNSPEC_VOLATILE: for (i = XVECLEN (body, 0) - 1; i >= 0; i--) (*fun) (&XVECEXP (body, 0, i), data); return; case CLOBBER: if (MEM_P (XEXP (body, 0))) (*fun) (&XEXP (XEXP (body, 0), 0), data); return; case SET: { rtx dest = SET_DEST (body); /* For sets we replace everything in source plus registers in memory expression in store and operands of a ZERO_EXTRACT. */ (*fun) (&SET_SRC (body), data); if (GET_CODE (dest) == ZERO_EXTRACT) { (*fun) (&XEXP (dest, 1), data); (*fun) (&XEXP (dest, 2), data); } while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART) dest = XEXP (dest, 0); if (MEM_P (dest)) (*fun) (&XEXP (dest, 0), data); } return; default: /* All the other possibilities never store. */ (*fun) (pbody, data); return; } } /* Return nonzero if X's old contents don't survive after INSN. This will be true if X is (cc0) or if X is a register and X dies in INSN or because INSN entirely sets X. "Entirely set" means set directly and not through a SUBREG, ZERO_EXTRACT or SIGN_EXTRACT, so no trace of the old contents remains. Likewise, REG_INC does not count. REG may be a hard or pseudo reg. Renumbering is not taken into account, but for this use that makes no difference, since regs don't overlap during their lifetimes. Therefore, this function may be used at any time after deaths have been computed (in flow.c). If REG is a hard reg that occupies multiple machine registers, this function will only return 1 if each of those registers will be replaced by INSN. */ int dead_or_set_p (rtx insn, rtx x) { unsigned int regno, last_regno; unsigned int i; /* Can't use cc0_rtx below since this file is used by genattrtab.c. */ if (GET_CODE (x) == CC0) return 1; if (!REG_P (x)) abort (); regno = REGNO (x); last_regno = (regno >= FIRST_PSEUDO_REGISTER ? regno : regno + hard_regno_nregs[regno][GET_MODE (x)] - 1); for (i = regno; i <= last_regno; i++) if (! dead_or_set_regno_p (insn, i)) return 0; return 1; } /* Utility function for dead_or_set_p to check an individual register. Also called from flow.c. */ int dead_or_set_regno_p (rtx insn, unsigned int test_regno) { unsigned int regno, endregno; rtx pattern; /* See if there is a death note for something that includes TEST_REGNO. */ if (find_regno_note (insn, REG_DEAD, test_regno)) return 1; if (GET_CODE (insn) == CALL_INSN && find_regno_fusage (insn, CLOBBER, test_regno)) return 1; pattern = PATTERN (insn); if (GET_CODE (pattern) == COND_EXEC) pattern = COND_EXEC_CODE (pattern); if (GET_CODE (pattern) == SET) { rtx dest = SET_DEST (pattern); /* A value is totally replaced if it is the destination or the destination is a SUBREG of REGNO that does not change the number of words in it. */ if (GET_CODE (dest) == SUBREG && (((GET_MODE_SIZE (GET_MODE (dest)) + UNITS_PER_WORD - 1) / UNITS_PER_WORD) == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))) dest = SUBREG_REG (dest); if (!REG_P (dest)) return 0; regno = REGNO (dest); endregno = (regno >= FIRST_PSEUDO_REGISTER ? regno + 1 : regno + hard_regno_nregs[regno][GET_MODE (dest)]); return (test_regno >= regno && test_regno < endregno); } else if (GET_CODE (pattern) == PARALLEL) { int i; for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--) { rtx body = XVECEXP (pattern, 0, i); if (GET_CODE (body) == COND_EXEC) body = COND_EXEC_CODE (body); if (GET_CODE (body) == SET || GET_CODE (body) == CLOBBER) { rtx dest = SET_DEST (body); if (GET_CODE (dest) == SUBREG && (((GET_MODE_SIZE (GET_MODE (dest)) + UNITS_PER_WORD - 1) / UNITS_PER_WORD) == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))) dest = SUBREG_REG (dest); if (!REG_P (dest)) continue; regno = REGNO (dest); endregno = (regno >= FIRST_PSEUDO_REGISTER ? regno + 1 : regno + hard_regno_nregs[regno][GET_MODE (dest)]); if (test_regno >= regno && test_regno < endregno) return 1; } } } return 0; } /* Return the reg-note of kind KIND in insn INSN, if there is one. If DATUM is nonzero, look for one whose datum is DATUM. */ rtx find_reg_note (rtx insn, enum reg_note kind, rtx datum) { rtx link; /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */ if (! INSN_P (insn)) return 0; if (datum == 0) { for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) if (REG_NOTE_KIND (link) == kind) return link; return 0; } for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0)) return link; return 0; } /* Return the reg-note of kind KIND in insn INSN which applies to register number REGNO, if any. Return 0 if there is no such reg-note. Note that the REGNO of this NOTE need not be REGNO if REGNO is a hard register; it might be the case that the note overlaps REGNO. */ rtx find_regno_note (rtx insn, enum reg_note kind, unsigned int regno) { rtx link; /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */ if (! INSN_P (insn)) return 0; for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) if (REG_NOTE_KIND (link) == kind /* Verify that it is a register, so that scratch and MEM won't cause a problem here. */ && REG_P (XEXP (link, 0)) && REGNO (XEXP (link, 0)) <= regno && ((REGNO (XEXP (link, 0)) + (REGNO (XEXP (link, 0)) >= FIRST_PSEUDO_REGISTER ? 1 : hard_regno_nregs[REGNO (XEXP (link, 0))] [GET_MODE (XEXP (link, 0))])) > regno)) return link; return 0; } /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and has such a note. */ rtx find_reg_equal_equiv_note (rtx insn) { rtx link; if (!INSN_P (insn)) return 0; for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) if (REG_NOTE_KIND (link) == REG_EQUAL || REG_NOTE_KIND (link) == REG_EQUIV) { if (single_set (insn) == 0) return 0; return link; } return NULL; } /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found in the CALL_INSN_FUNCTION_USAGE information of INSN. */ int find_reg_fusage (rtx insn, enum rtx_code code, rtx datum) { /* If it's not a CALL_INSN, it can't possibly have a CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */ if (GET_CODE (insn) != CALL_INSN) return 0; if (! datum) abort (); if (!REG_P (datum)) { rtx link; for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1)) if (GET_CODE (XEXP (link, 0)) == code && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0))) return 1; } else { unsigned int regno = REGNO (datum); /* CALL_INSN_FUNCTION_USAGE information cannot contain references to pseudo registers, so don't bother checking. */ if (regno < FIRST_PSEUDO_REGISTER) { unsigned int end_regno = regno + hard_regno_nregs[regno][GET_MODE (datum)]; unsigned int i; for (i = regno; i < end_regno; i++) if (find_regno_fusage (insn, code, i)) return 1; } } return 0; } /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found in the CALL_INSN_FUNCTION_USAGE information of INSN. */ int find_regno_fusage (rtx insn, enum rtx_code code, unsigned int regno) { rtx link; /* CALL_INSN_FUNCTION_USAGE information cannot contain references to pseudo registers, so don't bother checking. */ if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (insn) != CALL_INSN ) return 0; for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1)) { unsigned int regnote; rtx op, reg; if (GET_CODE (op = XEXP (link, 0)) == code && REG_P (reg = XEXP (op, 0)) && (regnote = REGNO (reg)) <= regno && regnote + hard_regno_nregs[regnote][GET_MODE (reg)] > regno) return 1; } return 0; } /* Return true if INSN is a call to a pure function. */ int pure_call_p (rtx insn) { rtx link; if (GET_CODE (insn) != CALL_INSN || ! CONST_OR_PURE_CALL_P (insn)) return 0; /* Look for the note that differentiates const and pure functions. */ for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1)) { rtx u, m; if (GET_CODE (u = XEXP (link, 0)) == USE && MEM_P (m = XEXP (u, 0)) && GET_MODE (m) == BLKmode && GET_CODE (XEXP (m, 0)) == SCRATCH) return 1; } return 0; } /* Remove register note NOTE from the REG_NOTES of INSN. */ void remove_note (rtx insn, rtx note) { rtx link; if (note == NULL_RTX) return; if (REG_NOTES (insn) == note) { REG_NOTES (insn) = XEXP (note, 1); return; } for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) if (XEXP (link, 1) == note) { XEXP (link, 1) = XEXP (note, 1); return; } abort (); } /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and return 1 if it is found. A simple equality test is used to determine if NODE matches. */ int in_expr_list_p (rtx listp, rtx node) { rtx x; for (x = listp; x; x = XEXP (x, 1)) if (node == XEXP (x, 0)) return 1; return 0; } /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and remove that entry from the list if it is found. A simple equality test is used to determine if NODE matches. */ void remove_node_from_expr_list (rtx node, rtx *listp) { rtx temp = *listp; rtx prev = NULL_RTX; while (temp) { if (node == XEXP (temp, 0)) { /* Splice the node out of the list. */ if (prev) XEXP (prev, 1) = XEXP (temp, 1); else *listp = XEXP (temp, 1); return; } prev = temp; temp = XEXP (temp, 1); } } /* Nonzero if X contains any volatile instructions. These are instructions which may cause unpredictable machine state instructions, and thus no instructions should be moved or combined across them. This includes only volatile asms and UNSPEC_VOLATILE instructions. */ int volatile_insn_p (rtx x) { RTX_CODE code; code = GET_CODE (x); switch (code) { case LABEL_REF: case SYMBOL_REF: case CONST_INT: case CONST: case CONST_DOUBLE: case CONST_VECTOR: case CC0: case PC: case REG: case SCRATCH: case CLOBBER: case ADDR_VEC: case ADDR_DIFF_VEC: case CALL: case MEM: return 0; case UNSPEC_VOLATILE: /* case TRAP_IF: This isn't clear yet. */ return 1; case ASM_INPUT: case ASM_OPERANDS: if (MEM_VOLATILE_P (x)) return 1; default: break; } /* Recursively scan the operands of this expression. */ { const char *fmt = GET_RTX_FORMAT (code); int i; for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') { if (volatile_insn_p (XEXP (x, i))) return 1; } else if (fmt[i] == 'E') { int j; for (j = 0; j < XVECLEN (x, i); j++) if (volatile_insn_p (XVECEXP (x, i, j))) return 1; } } } return 0; } /* Nonzero if X contains any volatile memory references UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */ int volatile_refs_p (rtx x) { RTX_CODE code; code = GET_CODE (x); switch (code) { case LABEL_REF: case SYMBOL_REF: case CONST_INT: case CONST: case CONST_DOUBLE: case CONST_VECTOR: case CC0: case PC: case REG: case SCRATCH: case CLOBBER: case ADDR_VEC: case ADDR_DIFF_VEC: return 0; case UNSPEC_VOLATILE: return 1; case MEM: case ASM_INPUT: case ASM_OPERANDS: if (MEM_VOLATILE_P (x)) return 1; default: break; } /* Recursively scan the operands of this expression. */ { const char *fmt = GET_RTX_FORMAT (code); int i; for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') { if (volatile_refs_p (XEXP (x, i))) return 1; } else if (fmt[i] == 'E') { int j; for (j = 0; j < XVECLEN (x, i); j++) if (volatile_refs_p (XVECEXP (x, i, j))) return 1; } } } return 0; } /* Similar to above, except that it also rejects register pre- and post- incrementing. */ int side_effects_p (rtx x) { RTX_CODE code; code = GET_CODE (x); switch (code) { case LABEL_REF: case SYMBOL_REF: case CONST_INT: case CONST: case CONST_DOUBLE: case CONST_VECTOR: case CC0: case PC: case REG: case SCRATCH: case ADDR_VEC: case ADDR_DIFF_VEC: return 0; case CLOBBER: /* Reject CLOBBER with a non-VOID mode. These are made by combine.c when some combination can't be done. If we see one, don't think that we can simplify the expression. */ return (GET_MODE (x) != VOIDmode); case PRE_INC: case PRE_DEC: case POST_INC: case POST_DEC: case PRE_MODIFY: case POST_MODIFY: case CALL: case UNSPEC_VOLATILE: /* case TRAP_IF: This isn't clear yet. */ return 1; case MEM: case ASM_INPUT: case ASM_OPERANDS: if (MEM_VOLATILE_P (x)) return 1; default: break; } /* Recursively scan the operands of this expression. */ { const char *fmt = GET_RTX_FORMAT (code); int i; for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') { if (side_effects_p (XEXP (x, i))) return 1; } else if (fmt[i] == 'E') { int j; for (j = 0; j < XVECLEN (x, i); j++) if (side_effects_p (XVECEXP (x, i, j))) return 1; } } } return 0; } /* Return nonzero if evaluating rtx X might cause a trap. */ int may_trap_p (rtx x) { int i; enum rtx_code code; const char *fmt; if (x == 0) return 0; code = GET_CODE (x); switch (code) { /* Handle these cases quickly. */ case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case SYMBOL_REF: case LABEL_REF: case CONST: case PC: case CC0: case REG: case SCRATCH: return 0; case ASM_INPUT: case UNSPEC_VOLATILE: case TRAP_IF: return 1; case ASM_OPERANDS: return MEM_VOLATILE_P (x); /* Memory ref can trap unless it's a static var or a stack slot. */ case MEM: if (MEM_NOTRAP_P (x)) return 0; return rtx_addr_can_trap_p (XEXP (x, 0)); /* Division by a non-constant might trap. */ case DIV: case MOD: case UDIV: case UMOD: if (HONOR_SNANS (GET_MODE (x))) return 1; if (! CONSTANT_P (XEXP (x, 1)) || (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT && flag_trapping_math)) return 1; if (XEXP (x, 1) == const0_rtx) return 1; break; case EXPR_LIST: /* An EXPR_LIST is used to represent a function call. This certainly may trap. */ return 1; case GE: case GT: case LE: case LT: case LTGT: case COMPARE: /* Some floating point comparisons may trap. */ if (!flag_trapping_math) break; /* ??? There is no machine independent way to check for tests that trap when COMPARE is used, though many targets do make this distinction. For instance, sparc uses CCFPE for compares which generate exceptions and CCFP for compares which do not generate exceptions. */ if (HONOR_NANS (GET_MODE (x))) return 1; /* But often the compare has some CC mode, so check operand modes as well. */ if (HONOR_NANS (GET_MODE (XEXP (x, 0))) || HONOR_NANS (GET_MODE (XEXP (x, 1)))) return 1; break; case EQ: case NE: if (HONOR_SNANS (GET_MODE (x))) return 1; /* Often comparison is CC mode, so check operand modes. */ if (HONOR_SNANS (GET_MODE (XEXP (x, 0))) || HONOR_SNANS (GET_MODE (XEXP (x, 1)))) return 1; break; case FIX: /* Conversion of floating point might trap. */ if (flag_trapping_math && HONOR_NANS (GET_MODE (XEXP (x, 0)))) return 1; break; case NEG: case ABS: /* These operations don't trap even with floating point. */ break; default: /* Any floating arithmetic may trap. */ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT && flag_trapping_math) return 1; } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') { if (may_trap_p (XEXP (x, i))) return 1; } else if (fmt[i] == 'E') { int j; for (j = 0; j < XVECLEN (x, i); j++) if (may_trap_p (XVECEXP (x, i, j))) return 1; } } return 0; } /* Return nonzero if X contains a comparison that is not either EQ or NE, i.e., an inequality. */ int inequality_comparisons_p (rtx x) { const char *fmt; int len, i; enum rtx_code code = GET_CODE (x); switch (code) { case REG: case SCRATCH: case PC: case CC0: case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case CONST: case LABEL_REF: case SYMBOL_REF: return 0; case LT: case LTU: case GT: case GTU: case LE: case LEU: case GE: case GEU: return 1; default: break; } len = GET_RTX_LENGTH (code); fmt = GET_RTX_FORMAT (code); for (i = 0; i < len; i++) { if (fmt[i] == 'e') { if (inequality_comparisons_p (XEXP (x, i))) return 1; } else if (fmt[i] == 'E') { int j; for (j = XVECLEN (x, i) - 1; j >= 0; j--) if (inequality_comparisons_p (XVECEXP (x, i, j))) return 1; } } return 0; } /* Replace any occurrence of FROM in X with TO. The function does not enter into CONST_DOUBLE for the replace. Note that copying is not done so X must not be shared unless all copies are to be modified. */ rtx replace_rtx (rtx x, rtx from, rtx to) { int i, j; const char *fmt; /* The following prevents loops occurrence when we change MEM in CONST_DOUBLE onto the same CONST_DOUBLE. */ if (x != 0 && GET_CODE (x) == CONST_DOUBLE) return x; if (x == from) return to; /* Allow this function to make replacements in EXPR_LISTs. */ if (x == 0) return 0; if (GET_CODE (x) == SUBREG) { rtx new = replace_rtx (SUBREG_REG (x), from, to); if (GET_CODE (new) == CONST_INT) { x = simplify_subreg (GET_MODE (x), new, GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x)); if (! x) abort (); } else SUBREG_REG (x) = new; return x; } else if (GET_CODE (x) == ZERO_EXTEND) { rtx new = replace_rtx (XEXP (x, 0), from, to); if (GET_CODE (new) == CONST_INT) { x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x), new, GET_MODE (XEXP (x, 0))); if (! x) abort (); } else XEXP (x, 0) = new; return x; } fmt = GET_RTX_FORMAT (GET_CODE (x)); for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--) { if (fmt[i] == 'e') XEXP (x, i) = replace_rtx (XEXP (x, i), from, to); else if (fmt[i] == 'E') for (j = XVECLEN (x, i) - 1; j >= 0; j--) XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j), from, to); } return x; } /* Throughout the rtx X, replace many registers according to REG_MAP. Return the replacement for X (which may be X with altered contents). REG_MAP[R] is the replacement for register R, or 0 for don't replace. NREGS is the length of REG_MAP; regs >= NREGS are not mapped. We only support REG_MAP entries of REG or SUBREG. Also, hard registers should not be mapped to pseudos or vice versa since validate_change is not called. If REPLACE_DEST is 1, replacements are also done in destinations; otherwise, only sources are replaced. */ rtx replace_regs (rtx x, rtx *reg_map, unsigned int nregs, int replace_dest) { enum rtx_code code; int i; const char *fmt; if (x == 0) return x; code = GET_CODE (x); switch (code) { case SCRATCH: case PC: case CC0: case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case CONST: case SYMBOL_REF: case LABEL_REF: return x; case REG: /* Verify that the register has an entry before trying to access it. */ if (REGNO (x) < nregs && reg_map[REGNO (x)] != 0) { /* SUBREGs can't be shared. Always return a copy to ensure that if this replacement occurs more than once then each instance will get distinct rtx. */ if (GET_CODE (reg_map[REGNO (x)]) == SUBREG) return copy_rtx (reg_map[REGNO (x)]); return reg_map[REGNO (x)]; } return x; case SUBREG: /* Prevent making nested SUBREGs. */ if (REG_P (SUBREG_REG (x)) && REGNO (SUBREG_REG (x)) < nregs && reg_map[REGNO (SUBREG_REG (x))] != 0 && GET_CODE (reg_map[REGNO (SUBREG_REG (x))]) == SUBREG) { rtx map_val = reg_map[REGNO (SUBREG_REG (x))]; return simplify_gen_subreg (GET_MODE (x), map_val, GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x)); } break; case SET: if (replace_dest) SET_DEST (x) = replace_regs (SET_DEST (x), reg_map, nregs, 0); else if (MEM_P (SET_DEST (x)) || GET_CODE (SET_DEST (x)) == STRICT_LOW_PART) /* Even if we are not to replace destinations, replace register if it is CONTAINED in destination (destination is memory or STRICT_LOW_PART). */ XEXP (SET_DEST (x), 0) = replace_regs (XEXP (SET_DEST (x), 0), reg_map, nregs, 0); else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT) /* Similarly, for ZERO_EXTRACT we replace all operands. */ break; SET_SRC (x) = replace_regs (SET_SRC (x), reg_map, nregs, 0); return x; default: break; } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') XEXP (x, i) = replace_regs (XEXP (x, i), reg_map, nregs, replace_dest); else if (fmt[i] == 'E') { int j; for (j = 0; j < XVECLEN (x, i); j++) XVECEXP (x, i, j) = replace_regs (XVECEXP (x, i, j), reg_map, nregs, replace_dest); } } return x; } /* Replace occurrences of the old label in *X with the new one. DATA is a REPLACE_LABEL_DATA containing the old and new labels. */ int replace_label (rtx *x, void *data) { rtx l = *x; rtx old_label = ((replace_label_data *) data)->r1; rtx new_label = ((replace_label_data *) data)->r2; bool update_label_nuses = ((replace_label_data *) data)->update_label_nuses; if (l == NULL_RTX) return 0; if (GET_CODE (l) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (l)) { rtx c = get_pool_constant (l); if (rtx_referenced_p (old_label, c)) { rtx new_c, new_l; replace_label_data *d = (replace_label_data *) data; /* Create a copy of constant C; replace the label inside but do not update LABEL_NUSES because uses in constant pool are not counted. */ new_c = copy_rtx (c); d->update_label_nuses = false; for_each_rtx (&new_c, replace_label, data); d->update_label_nuses = update_label_nuses; /* Add the new constant NEW_C to constant pool and replace the old reference to constant by new reference. */ new_l = XEXP (force_const_mem (get_pool_mode (l), new_c), 0); *x = replace_rtx (l, l, new_l); } return 0; } /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL field. This is not handled by for_each_rtx because it doesn't handle unprinted ('0') fields. */ if (GET_CODE (l) == JUMP_INSN && JUMP_LABEL (l) == old_label) JUMP_LABEL (l) = new_label; if ((GET_CODE (l) == LABEL_REF || GET_CODE (l) == INSN_LIST) && XEXP (l, 0) == old_label) { XEXP (l, 0) = new_label; if (update_label_nuses) { ++LABEL_NUSES (new_label); --LABEL_NUSES (old_label); } return 0; } return 0; } /* When *BODY is equal to X or X is directly referenced by *BODY return nonzero, thus FOR_EACH_RTX stops traversing and returns nonzero too, otherwise FOR_EACH_RTX continues traversing *BODY. */ static int rtx_referenced_p_1 (rtx *body, void *x) { rtx y = (rtx) x; if (*body == NULL_RTX) return y == NULL_RTX; /* Return true if a label_ref *BODY refers to label Y. */ if (GET_CODE (*body) == LABEL_REF && GET_CODE (y) == CODE_LABEL) return XEXP (*body, 0) == y; /* If *BODY is a reference to pool constant traverse the constant. */ if (GET_CODE (*body) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (*body)) return rtx_referenced_p (y, get_pool_constant (*body)); /* By default, compare the RTL expressions. */ return rtx_equal_p (*body, y); } /* Return true if X is referenced in BODY. */ int rtx_referenced_p (rtx x, rtx body) { return for_each_rtx (&body, rtx_referenced_p_1, x); } /* If INSN is a tablejump return true and store the label (before jump table) to *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */ bool tablejump_p (rtx insn, rtx *labelp, rtx *tablep) { rtx label, table; if (GET_CODE (insn) == JUMP_INSN && (label = JUMP_LABEL (insn)) != NULL_RTX && (table = next_active_insn (label)) != NULL_RTX && GET_CODE (table) == JUMP_INSN && (GET_CODE (PATTERN (table)) == ADDR_VEC || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC)) { if (labelp) *labelp = label; if (tablep) *tablep = table; return true; } return false; } /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or constant that is not in the constant pool and not in the condition of an IF_THEN_ELSE. */ static int computed_jump_p_1 (rtx x) { enum rtx_code code = GET_CODE (x); int i, j; const char *fmt; switch (code) { case LABEL_REF: case PC: return 0; case CONST: case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case SYMBOL_REF: case REG: return 1; case MEM: return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0))); case IF_THEN_ELSE: return (computed_jump_p_1 (XEXP (x, 1)) || computed_jump_p_1 (XEXP (x, 2))); default: break; } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e' && computed_jump_p_1 (XEXP (x, i))) return 1; else if (fmt[i] == 'E') for (j = 0; j < XVECLEN (x, i); j++) if (computed_jump_p_1 (XVECEXP (x, i, j))) return 1; } return 0; } /* Return nonzero if INSN is an indirect jump (aka computed jump). Tablejumps and casesi insns are not considered indirect jumps; we can recognize them by a (use (label_ref)). */ int computed_jump_p (rtx insn) { int i; if (GET_CODE (insn) == JUMP_INSN) { rtx pat = PATTERN (insn); if (find_reg_note (insn, REG_LABEL, NULL_RTX)) return 0; else if (GET_CODE (pat) == PARALLEL) { int len = XVECLEN (pat, 0); int has_use_labelref = 0; for (i = len - 1; i >= 0; i--) if (GET_CODE (XVECEXP (pat, 0, i)) == USE && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0)) == LABEL_REF)) has_use_labelref = 1; if (! has_use_labelref) for (i = len - 1; i >= 0; i--) if (GET_CODE (XVECEXP (pat, 0, i)) == SET && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i)))) return 1; } else if (GET_CODE (pat) == SET && SET_DEST (pat) == pc_rtx && computed_jump_p_1 (SET_SRC (pat))) return 1; } return 0; } /* Traverse X via depth-first search, calling F for each sub-expression (including X itself). F is also passed the DATA. If F returns -1, do not traverse sub-expressions, but continue traversing the rest of the tree. If F ever returns any other nonzero value, stop the traversal, and return the value returned by F. Otherwise, return 0. This function does not traverse inside tree structure that contains RTX_EXPRs, or into sub-expressions whose format code is `0' since it is not known whether or not those codes are actually RTL. This routine is very general, and could (should?) be used to implement many of the other routines in this file. */ int for_each_rtx (rtx *x, rtx_function f, void *data) { int result; int length; const char *format; int i; /* Call F on X. */ result = (*f) (x, data); if (result == -1) /* Do not traverse sub-expressions. */ return 0; else if (result != 0) /* Stop the traversal. */ return result; if (*x == NULL_RTX) /* There are no sub-expressions. */ return 0; length = GET_RTX_LENGTH (GET_CODE (*x)); format = GET_RTX_FORMAT (GET_CODE (*x)); for (i = 0; i < length; ++i) { switch (format[i]) { case 'e': result = for_each_rtx (&XEXP (*x, i), f, data); if (result != 0) return result; break; case 'V': case 'E': if (XVEC (*x, i) != 0) { int j; for (j = 0; j < XVECLEN (*x, i); ++j) { result = for_each_rtx (&XVECEXP (*x, i, j), f, data); if (result != 0) return result; } } break; default: /* Nothing to do. */ break; } } return 0; } /* Searches X for any reference to REGNO, returning the rtx of the reference found if any. Otherwise, returns NULL_RTX. */ rtx regno_use_in (unsigned int regno, rtx x) { const char *fmt; int i, j; rtx tem; if (REG_P (x) && REGNO (x) == regno) return x; fmt = GET_RTX_FORMAT (GET_CODE (x)); for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--) { if (fmt[i] == 'e') { if ((tem = regno_use_in (regno, XEXP (x, i)))) return tem; } else if (fmt[i] == 'E') for (j = XVECLEN (x, i) - 1; j >= 0; j--) if ((tem = regno_use_in (regno , XVECEXP (x, i, j)))) return tem; } return NULL_RTX; } /* Return a value indicating whether OP, an operand of a commutative operation, is preferred as the first or second operand. The higher the value, the stronger the preference for being the first operand. We use negative values to indicate a preference for the first operand and positive values for the second operand. */ int commutative_operand_precedence (rtx op) { enum rtx_code code = GET_CODE (op); /* Constants always come the second operand. Prefer "nice" constants. */ if (code == CONST_INT) return -7; if (code == CONST_DOUBLE) return -6; op = avoid_constant_pool_reference (op); switch (GET_RTX_CLASS (code)) { case RTX_CONST_OBJ: if (code == CONST_INT) return -5; if (code == CONST_DOUBLE) return -4; return -3; case RTX_EXTRA: /* SUBREGs of objects should come second. */ if (code == SUBREG && OBJECT_P (SUBREG_REG (op))) return -2; if (!CONSTANT_P (op)) return 0; else /* As for RTX_CONST_OBJ. */ return -3; case RTX_OBJ: /* Complex expressions should be the first, so decrease priority of objects. */ return -1; case RTX_COMM_ARITH: /* Prefer operands that are themselves commutative to be first. This helps to make things linear. In particular, (and (and (reg) (reg)) (not (reg))) is canonical. */ return 4; case RTX_BIN_ARITH: /* If only one operand is a binary expression, it will be the first operand. In particular, (plus (minus (reg) (reg)) (neg (reg))) is canonical, although it will usually be further simplified. */ return 2; case RTX_UNARY: /* Then prefer NEG and NOT. */ if (code == NEG || code == NOT) return 1; default: return 0; } } /* Return 1 iff it is necessary to swap operands of commutative operation in order to canonicalize expression. */ int swap_commutative_operands_p (rtx x, rtx y) { return (commutative_operand_precedence (x) < commutative_operand_precedence (y)); } /* Return 1 if X is an autoincrement side effect and the register is not the stack pointer. */ int auto_inc_p (rtx x) { switch (GET_CODE (x)) { case PRE_INC: case POST_INC: case PRE_DEC: case POST_DEC: case PRE_MODIFY: case POST_MODIFY: /* There are no REG_INC notes for SP. */ if (XEXP (x, 0) != stack_pointer_rtx) return 1; default: break; } return 0; } /* Return 1 if the sequence of instructions beginning with FROM and up to and including TO is safe to move. If NEW_TO is non-NULL, and the sequence is not already safe to move, but can be easily extended to a sequence which is safe, then NEW_TO will point to the end of the extended sequence. For now, this function only checks that the region contains whole exception regions, but it could be extended to check additional conditions as well. */ int insns_safe_to_move_p (rtx from, rtx to, rtx *new_to) { int eh_region_count = 0; int past_to_p = 0; rtx r = from; /* By default, assume the end of the region will be what was suggested. */ if (new_to) *new_to = to; while (r) { if (GET_CODE (r) == NOTE) { switch (NOTE_LINE_NUMBER (r)) { case NOTE_INSN_EH_REGION_BEG: ++eh_region_count; break; case NOTE_INSN_EH_REGION_END: if (eh_region_count == 0) /* This sequence of instructions contains the end of an exception region, but not he beginning. Moving it will cause chaos. */ return 0; --eh_region_count; break; default: break; } } else if (past_to_p) /* If we've passed TO, and we see a non-note instruction, we can't extend the sequence to a movable sequence. */ return 0; if (r == to) { if (!new_to) /* It's OK to move the sequence if there were matched sets of exception region notes. */ return eh_region_count == 0; past_to_p = 1; } /* It's OK to move the sequence if there were matched sets of exception region notes. */ if (past_to_p && eh_region_count == 0) { *new_to = r; return 1; } /* Go to the next instruction. */ r = NEXT_INSN (r); } return 0; } /* Return nonzero if IN contains a piece of rtl that has the address LOC. */ int loc_mentioned_in_p (rtx *loc, rtx in) { enum rtx_code code = GET_CODE (in); const char *fmt = GET_RTX_FORMAT (code); int i, j; for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (loc == &in->u.fld[i].rtx) return 1; if (fmt[i] == 'e') { if (loc_mentioned_in_p (loc, XEXP (in, i))) return 1; } else if (fmt[i] == 'E') for (j = XVECLEN (in, i) - 1; j >= 0; j--) if (loc_mentioned_in_p (loc, XVECEXP (in, i, j))) return 1; } return 0; } /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE, and SUBREG_BYTE, return the bit offset where the subreg begins (counting from the least significant bit of the operand). */ unsigned int subreg_lsb_1 (enum machine_mode outer_mode, enum machine_mode inner_mode, unsigned int subreg_byte) { unsigned int bitpos; unsigned int byte; unsigned int word; /* A paradoxical subreg begins at bit position 0. */ if (GET_MODE_BITSIZE (outer_mode) > GET_MODE_BITSIZE (inner_mode)) return 0; if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN) /* If the subreg crosses a word boundary ensure that it also begins and ends on a word boundary. */ if ((subreg_byte % UNITS_PER_WORD + GET_MODE_SIZE (outer_mode)) > UNITS_PER_WORD && (subreg_byte % UNITS_PER_WORD || GET_MODE_SIZE (outer_mode) % UNITS_PER_WORD)) abort (); if (WORDS_BIG_ENDIAN) word = (GET_MODE_SIZE (inner_mode) - (subreg_byte + GET_MODE_SIZE (outer_mode))) / UNITS_PER_WORD; else word = subreg_byte / UNITS_PER_WORD; bitpos = word * BITS_PER_WORD; if (BYTES_BIG_ENDIAN) byte = (GET_MODE_SIZE (inner_mode) - (subreg_byte + GET_MODE_SIZE (outer_mode))) % UNITS_PER_WORD; else byte = subreg_byte % UNITS_PER_WORD; bitpos += byte * BITS_PER_UNIT; return bitpos; } /* Given a subreg X, return the bit offset where the subreg begins (counting from the least significant bit of the reg). */ unsigned int subreg_lsb (rtx x) { return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x)); } /* This function returns the regno offset of a subreg expression. xregno - A regno of an inner hard subreg_reg (or what will become one). xmode - The mode of xregno. offset - The byte offset. ymode - The mode of a top level SUBREG (or what may become one). RETURN - The regno offset which would be used. */ unsigned int subreg_regno_offset (unsigned int xregno, enum machine_mode xmode, unsigned int offset, enum machine_mode ymode) { int nregs_xmode, nregs_ymode; int mode_multiple, nregs_multiple; int y_offset; if (xregno >= FIRST_PSEUDO_REGISTER) abort (); nregs_xmode = hard_regno_nregs[xregno][xmode]; nregs_ymode = hard_regno_nregs[xregno][ymode]; /* If this is a big endian paradoxical subreg, which uses more actual hard registers than the original register, we must return a negative offset so that we find the proper highpart of the register. */ if (offset == 0 && nregs_ymode > nregs_xmode && (GET_MODE_SIZE (ymode) > UNITS_PER_WORD ? WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN)) return nregs_xmode - nregs_ymode; if (offset == 0 || nregs_xmode == nregs_ymode) return 0; /* size of ymode must not be greater than the size of xmode. */ mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode); if (mode_multiple == 0) abort (); y_offset = offset / GET_MODE_SIZE (ymode); nregs_multiple = nregs_xmode / nregs_ymode; return (y_offset / (mode_multiple / nregs_multiple)) * nregs_ymode; } /* This function returns true when the offset is representable via subreg_offset in the given regno. xregno - A regno of an inner hard subreg_reg (or what will become one). xmode - The mode of xregno. offset - The byte offset. ymode - The mode of a top level SUBREG (or what may become one). RETURN - The regno offset which would be used. */ bool subreg_offset_representable_p (unsigned int xregno, enum machine_mode xmode, unsigned int offset, enum machine_mode ymode) { int nregs_xmode, nregs_ymode; int mode_multiple, nregs_multiple; int y_offset; if (xregno >= FIRST_PSEUDO_REGISTER) abort (); nregs_xmode = hard_regno_nregs[xregno][xmode]; nregs_ymode = hard_regno_nregs[xregno][ymode]; /* Paradoxical subregs are always valid. */ if (offset == 0 && nregs_ymode > nregs_xmode && (GET_MODE_SIZE (ymode) > UNITS_PER_WORD ? WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN)) return true; /* Lowpart subregs are always valid. */ if (offset == subreg_lowpart_offset (ymode, xmode)) return true; #ifdef ENABLE_CHECKING /* This should always pass, otherwise we don't know how to verify the constraint. These conditions may be relaxed but subreg_offset would need to be redesigned. */ if (GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode) || GET_MODE_SIZE (ymode) % nregs_ymode || nregs_xmode % nregs_ymode) abort (); #endif /* The XMODE value can be seen as a vector of NREGS_XMODE values. The subreg must represent a lowpart of given field. Compute what field it is. */ offset -= subreg_lowpart_offset (ymode, mode_for_size (GET_MODE_BITSIZE (xmode) / nregs_xmode, MODE_INT, 0)); /* size of ymode must not be greater than the size of xmode. */ mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode); if (mode_multiple == 0) abort (); y_offset = offset / GET_MODE_SIZE (ymode); nregs_multiple = nregs_xmode / nregs_ymode; #ifdef ENABLE_CHECKING if (offset % GET_MODE_SIZE (ymode) || mode_multiple % nregs_multiple) abort (); #endif return (!(y_offset % (mode_multiple / nregs_multiple))); } /* Return the final regno that a subreg expression refers to. */ unsigned int subreg_regno (rtx x) { unsigned int ret; rtx subreg = SUBREG_REG (x); int regno = REGNO (subreg); ret = regno + subreg_regno_offset (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x)); return ret; } struct parms_set_data { int nregs; HARD_REG_SET regs; }; /* Helper function for noticing stores to parameter registers. */ static void parms_set (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data) { struct parms_set_data *d = data; if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER && TEST_HARD_REG_BIT (d->regs, REGNO (x))) { CLEAR_HARD_REG_BIT (d->regs, REGNO (x)); d->nregs--; } } /* Look backward for first parameter to be loaded. Do not skip BOUNDARY. */ rtx find_first_parameter_load (rtx call_insn, rtx boundary) { struct parms_set_data parm; rtx p, before; /* Since different machines initialize their parameter registers in different orders, assume nothing. Collect the set of all parameter registers. */ CLEAR_HARD_REG_SET (parm.regs); parm.nregs = 0; for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1)) if (GET_CODE (XEXP (p, 0)) == USE && REG_P (XEXP (XEXP (p, 0), 0))) { if (REGNO (XEXP (XEXP (p, 0), 0)) >= FIRST_PSEUDO_REGISTER) abort (); /* We only care about registers which can hold function arguments. */ if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0)))) continue; SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0))); parm.nregs++; } before = call_insn; /* Search backward for the first set of a register in this set. */ while (parm.nregs && before != boundary) { before = PREV_INSN (before); /* It is possible that some loads got CSEed from one call to another. Stop in that case. */ if (GET_CODE (before) == CALL_INSN) break; /* Our caller needs either ensure that we will find all sets (in case code has not been optimized yet), or take care for possible labels in a way by setting boundary to preceding CODE_LABEL. */ if (GET_CODE (before) == CODE_LABEL) { if (before != boundary) abort (); break; } if (INSN_P (before)) note_stores (PATTERN (before), parms_set, &parm); } return before; } /* Return true if we should avoid inserting code between INSN and preceding call instruction. */ bool keep_with_call_p (rtx insn) { rtx set; if (INSN_P (insn) && (set = single_set (insn)) != NULL) { if (REG_P (SET_DEST (set)) && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER && fixed_regs[REGNO (SET_DEST (set))] && general_operand (SET_SRC (set), VOIDmode)) return true; if (REG_P (SET_SRC (set)) && FUNCTION_VALUE_REGNO_P (REGNO (SET_SRC (set))) && REG_P (SET_DEST (set)) && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER) return true; /* There may be a stack pop just after the call and before the store of the return register. Search for the actual store when deciding if we can break or not. */ if (SET_DEST (set) == stack_pointer_rtx) { rtx i2 = next_nonnote_insn (insn); if (i2 && keep_with_call_p (i2)) return true; } } return false; } /* Return true when store to register X can be hoisted to the place with LIVE registers (can be NULL). Value VAL contains destination whose value will be used. */ static bool hoist_test_store (rtx x, rtx val, regset live) { if (GET_CODE (x) == SCRATCH) return true; if (rtx_equal_p (x, val)) return true; /* Allow subreg of X in case it is not writing just part of multireg pseudo. Then we would need to update all users to care hoisting the store too. Caller may represent that by specifying whole subreg as val. */ if (GET_CODE (x) == SUBREG && rtx_equal_p (SUBREG_REG (x), val)) { if (GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))) > UNITS_PER_WORD && GET_MODE_BITSIZE (GET_MODE (x)) < GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))) return false; return true; } if (GET_CODE (x) == SUBREG) x = SUBREG_REG (x); /* Anything except register store is not hoistable. This includes the partial stores to registers. */ if (!REG_P (x)) return false; /* Pseudo registers can be always replaced by another pseudo to avoid the side effect, for hard register we must ensure that they are dead. Eventually we may want to add code to try turn pseudos to hards, but it is unlikely useful. */ if (REGNO (x) < FIRST_PSEUDO_REGISTER) { int regno = REGNO (x); int n = hard_regno_nregs[regno][GET_MODE (x)]; if (!live) return false; if (REGNO_REG_SET_P (live, regno)) return false; while (--n > 0) if (REGNO_REG_SET_P (live, regno + n)) return false; } return true; } /* Return true if INSN can be hoisted to place with LIVE hard registers (LIVE can be NULL when unknown). VAL is expected to be stored by the insn and used by the hoisting pass. */ bool can_hoist_insn_p (rtx insn, rtx val, regset live) { rtx pat = PATTERN (insn); int i; /* It probably does not worth the complexity to handle multiple set stores. */ if (!single_set (insn)) return false; /* We can move CALL_INSN, but we need to check that all caller clobbered regs are dead. */ if (GET_CODE (insn) == CALL_INSN) return false; /* In future we will handle hoisting of libcall sequences, but give up for now. */ if (find_reg_note (insn, REG_RETVAL, NULL_RTX)) return false; switch (GET_CODE (pat)) { case SET: if (!hoist_test_store (SET_DEST (pat), val, live)) return false; break; case USE: /* USES do have sick semantics, so do not move them. */ return false; break; case CLOBBER: if (!hoist_test_store (XEXP (pat, 0), val, live)) return false; break; case PARALLEL: for (i = 0; i < XVECLEN (pat, 0); i++) { rtx x = XVECEXP (pat, 0, i); switch (GET_CODE (x)) { case SET: if (!hoist_test_store (SET_DEST (x), val, live)) return false; break; case USE: /* We need to fix callers to really ensure availability of all values insn uses, but for now it is safe to prohibit hoisting of any insn having such a hidden uses. */ return false; break; case CLOBBER: if (!hoist_test_store (SET_DEST (x), val, live)) return false; break; default: break; } } break; default: abort (); } return true; } /* Update store after hoisting - replace all stores to pseudo registers by new ones to avoid clobbering of values except for store to VAL that will be updated to NEW. */ static void hoist_update_store (rtx insn, rtx *xp, rtx val, rtx new) { rtx x = *xp; if (GET_CODE (x) == SCRATCH) return; if (GET_CODE (x) == SUBREG && SUBREG_REG (x) == val) validate_change (insn, xp, simplify_gen_subreg (GET_MODE (x), new, GET_MODE (new), SUBREG_BYTE (x)), 1); if (rtx_equal_p (x, val)) { validate_change (insn, xp, new, 1); return; } if (GET_CODE (x) == SUBREG) { xp = &SUBREG_REG (x); x = *xp; } if (!REG_P (x)) abort (); /* We've verified that hard registers are dead, so we may keep the side effect. Otherwise replace it by new pseudo. */ if (REGNO (x) >= FIRST_PSEUDO_REGISTER) validate_change (insn, xp, gen_reg_rtx (GET_MODE (x)), 1); REG_NOTES (insn) = alloc_EXPR_LIST (REG_UNUSED, *xp, REG_NOTES (insn)); } /* Create a copy of INSN after AFTER replacing store of VAL to NEW and each other side effect to pseudo register by new pseudo register. */ rtx hoist_insn_after (rtx insn, rtx after, rtx val, rtx new) { rtx pat; int i; rtx note; insn = emit_copy_of_insn_after (insn, after); pat = PATTERN (insn); /* Remove REG_UNUSED notes as we will re-emit them. */ while ((note = find_reg_note (insn, REG_UNUSED, NULL_RTX))) remove_note (insn, note); /* To get this working callers must ensure to move everything referenced by REG_EQUAL/REG_EQUIV notes too. Lets remove them, it is probably easier. */ while ((note = find_reg_note (insn, REG_EQUAL, NULL_RTX))) remove_note (insn, note); while ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX))) remove_note (insn, note); /* Remove REG_DEAD notes as they might not be valid anymore in case we create redundancy. */ while ((note = find_reg_note (insn, REG_DEAD, NULL_RTX))) remove_note (insn, note); switch (GET_CODE (pat)) { case SET: hoist_update_store (insn, &SET_DEST (pat), val, new); break; case USE: break; case CLOBBER: hoist_update_store (insn, &XEXP (pat, 0), val, new); break; case PARALLEL: for (i = 0; i < XVECLEN (pat, 0); i++) { rtx x = XVECEXP (pat, 0, i); switch (GET_CODE (x)) { case SET: hoist_update_store (insn, &SET_DEST (x), val, new); break; case USE: break; case CLOBBER: hoist_update_store (insn, &SET_DEST (x), val, new); break; default: break; } } break; default: abort (); } if (!apply_change_group ()) abort (); return insn; } rtx hoist_insn_to_edge (rtx insn, edge e, rtx val, rtx new) { rtx new_insn; /* We cannot insert instructions on an abnormal critical edge. It will be easier to find the culprit if we die now. */ if ((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e)) abort (); /* Do not use emit_insn_on_edge as we want to preserve notes and similar stuff. We also emit CALL_INSNS and firends. */ if (e->insns.r == NULL_RTX) { start_sequence (); emit_note (NOTE_INSN_DELETED); } else push_to_sequence (e->insns.r); new_insn = hoist_insn_after (insn, get_last_insn (), val, new); e->insns.r = get_insns (); end_sequence (); return new_insn; } /* Return true if LABEL is a target of JUMP_INSN. This applies only to non-complex jumps. That is, direct unconditional, conditional, and tablejumps, but not computed jumps or returns. It also does not apply to the fallthru case of a conditional jump. */ bool label_is_jump_target_p (rtx label, rtx jump_insn) { rtx tmp = JUMP_LABEL (jump_insn); if (label == tmp) return true; if (tablejump_p (jump_insn, NULL, &tmp)) { rtvec vec = XVEC (PATTERN (tmp), GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC); int i, veclen = GET_NUM_ELEM (vec); for (i = 0; i < veclen; ++i) if (XEXP (RTVEC_ELT (vec, i), 0) == label) return true; } return false; } /* Return an estimate of the cost of computing rtx X. One use is in cse, to decide which expression to keep in the hash table. Another is in rtl generation, to pick the cheapest way to multiply. Other uses like the latter are expected in the future. */ int rtx_cost (rtx x, enum rtx_code outer_code ATTRIBUTE_UNUSED) { int i, j; enum rtx_code code; const char *fmt; int total; if (x == 0) return 0; /* Compute the default costs of certain things. Note that targetm.rtx_costs can override the defaults. */ code = GET_CODE (x); switch (code) { case MULT: total = COSTS_N_INSNS (5); break; case DIV: case UDIV: case MOD: case UMOD: total = COSTS_N_INSNS (7); break; case USE: /* Used in loop.c and combine.c as a marker. */ total = 0; break; default: total = COSTS_N_INSNS (1); } switch (code) { case REG: return 0; case SUBREG: /* If we can't tie these modes, make this expensive. The larger the mode, the more expensive it is. */ if (! MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (x)))) return COSTS_N_INSNS (2 + GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD); break; default: if (targetm.rtx_costs (x, code, outer_code, &total)) return total; break; } /* Sum the costs of the sub-rtx's, plus cost of this operation, which is already in total. */ fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) if (fmt[i] == 'e') total += rtx_cost (XEXP (x, i), code); else if (fmt[i] == 'E') for (j = 0; j < XVECLEN (x, i); j++) total += rtx_cost (XVECEXP (x, i, j), code); return total; } /* Return cost of address expression X. Expect that X is properly formed address reference. */ int address_cost (rtx x, enum machine_mode mode) { /* We may be asked for cost of various unusual addresses, such as operands of push instruction. It is not worthwhile to complicate writing of the target hook by such cases. */ if (!memory_address_p (mode, x)) return 1000; return targetm.address_cost (x); } /* If the target doesn't override, compute the cost as with arithmetic. */ int default_address_cost (rtx x) { return rtx_cost (x, MEM); } unsigned HOST_WIDE_INT nonzero_bits (rtx x, enum machine_mode mode) { return cached_nonzero_bits (x, mode, NULL_RTX, VOIDmode, 0); } unsigned int num_sign_bit_copies (rtx x, enum machine_mode mode) { return cached_num_sign_bit_copies (x, mode, NULL_RTX, VOIDmode, 0); } /* The function cached_nonzero_bits is a wrapper around nonzero_bits1. It avoids exponential behavior in nonzero_bits1 when X has identical subexpressions on the first or the second level. */ static unsigned HOST_WIDE_INT cached_nonzero_bits (rtx x, enum machine_mode mode, rtx known_x, enum machine_mode known_mode, unsigned HOST_WIDE_INT known_ret) { if (x == known_x && mode == known_mode) return known_ret; /* Try to find identical subexpressions. If found call nonzero_bits1 on X with the subexpressions as KNOWN_X and the precomputed value for the subexpression as KNOWN_RET. */ if (ARITHMETIC_P (x)) { rtx x0 = XEXP (x, 0); rtx x1 = XEXP (x, 1); /* Check the first level. */ if (x0 == x1) return nonzero_bits1 (x, mode, x0, mode, cached_nonzero_bits (x0, mode, known_x, known_mode, known_ret)); /* Check the second level. */ if (ARITHMETIC_P (x0) && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1))) return nonzero_bits1 (x, mode, x1, mode, cached_nonzero_bits (x1, mode, known_x, known_mode, known_ret)); if (ARITHMETIC_P (x1) && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1))) return nonzero_bits1 (x, mode, x0, mode, cached_nonzero_bits (x0, mode, known_x, known_mode, known_ret)); } return nonzero_bits1 (x, mode, known_x, known_mode, known_ret); } /* We let num_sign_bit_copies recur into nonzero_bits as that is useful. We don't let nonzero_bits recur into num_sign_bit_copies, because that is less useful. We can't allow both, because that results in exponential run time recursion. There is a nullstone testcase that triggered this. This macro avoids accidental uses of num_sign_bit_copies. */ #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior /* Given an expression, X, compute which bits in X can be nonzero. We don't care about bits outside of those defined in MODE. For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is an arithmetic operation, we can do better. */ static unsigned HOST_WIDE_INT nonzero_bits1 (rtx x, enum machine_mode mode, rtx known_x, enum machine_mode known_mode, unsigned HOST_WIDE_INT known_ret) { unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode); unsigned HOST_WIDE_INT inner_nz; enum rtx_code code; unsigned int mode_width = GET_MODE_BITSIZE (mode); /* For floating-point values, assume all bits are needed. */ if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode)) return nonzero; /* If X is wider than MODE, use its mode instead. */ if (GET_MODE_BITSIZE (GET_MODE (x)) > mode_width) { mode = GET_MODE (x); nonzero = GET_MODE_MASK (mode); mode_width = GET_MODE_BITSIZE (mode); } if (mode_width > HOST_BITS_PER_WIDE_INT) /* Our only callers in this case look for single bit values. So just return the mode mask. Those tests will then be false. */ return nonzero; #ifndef WORD_REGISTER_OPERATIONS /* If MODE is wider than X, but both are a single word for both the host and target machines, we can compute this from which bits of the object might be nonzero in its own mode, taking into account the fact that on many CISC machines, accessing an object in a wider mode causes the high-order bits to become undefined. So they are not known to be zero. */ if (GET_MODE (x) != VOIDmode && GET_MODE (x) != mode && GET_MODE_BITSIZE (GET_MODE (x)) <= BITS_PER_WORD && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT && GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (GET_MODE (x))) { nonzero &= cached_nonzero_bits (x, GET_MODE (x), known_x, known_mode, known_ret); nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x)); return nonzero; } #endif code = GET_CODE (x); switch (code) { case REG: #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend) /* If pointers extend unsigned and this is a pointer in Pmode, say that all the bits above ptr_mode are known to be zero. */ if (POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode && REG_POINTER (x)) nonzero &= GET_MODE_MASK (ptr_mode); #endif /* Include declared information about alignment of pointers. */ /* ??? We don't properly preserve REG_POINTER changes across pointer-to-integer casts, so we can't trust it except for things that we know must be pointers. See execute/960116-1.c. */ if ((x == stack_pointer_rtx || x == frame_pointer_rtx || x == arg_pointer_rtx) && REGNO_POINTER_ALIGN (REGNO (x))) { unsigned HOST_WIDE_INT alignment = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT; #ifdef PUSH_ROUNDING /* If PUSH_ROUNDING is defined, it is possible for the stack to be momentarily aligned only to that amount, so we pick the least alignment. */ if (x == stack_pointer_rtx && PUSH_ARGS) alignment = MIN ((unsigned HOST_WIDE_INT) PUSH_ROUNDING (1), alignment); #endif nonzero &= ~(alignment - 1); } { unsigned HOST_WIDE_INT nonzero_for_hook = nonzero; rtx new = rtl_hooks.reg_nonzero_bits (x, mode, known_x, known_mode, known_ret, &nonzero_for_hook); if (new) nonzero_for_hook &= cached_nonzero_bits (new, mode, known_x, known_mode, known_ret); return nonzero_for_hook; } case CONST_INT: #ifdef SHORT_IMMEDIATES_SIGN_EXTEND /* If X is negative in MODE, sign-extend the value. */ if (INTVAL (x) > 0 && mode_width < BITS_PER_WORD && 0 != (INTVAL (x) & ((HOST_WIDE_INT) 1 << (mode_width - 1)))) return (INTVAL (x) | ((HOST_WIDE_INT) (-1) << mode_width)); #endif return INTVAL (x); case MEM: #ifdef LOAD_EXTEND_OP /* In many, if not most, RISC machines, reading a byte from memory zeros the rest of the register. Noticing that fact saves a lot of extra zero-extends. */ if (LOAD_EXTEND_OP (GET_MODE (x)) == ZERO_EXTEND) nonzero &= GET_MODE_MASK (GET_MODE (x)); #endif break; case EQ: case NE: case UNEQ: case LTGT: case GT: case GTU: case UNGT: case LT: case LTU: case UNLT: case GE: case GEU: case UNGE: case LE: case LEU: case UNLE: case UNORDERED: case ORDERED: /* If this produces an integer result, we know which bits are set. Code here used to clear bits outside the mode of X, but that is now done above. */ if (GET_MODE_CLASS (mode) == MODE_INT && mode_width <= HOST_BITS_PER_WIDE_INT) nonzero = STORE_FLAG_VALUE; break; case NEG: #if 0 /* Disabled to avoid exponential mutual recursion between nonzero_bits and num_sign_bit_copies. */ if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x)) == GET_MODE_BITSIZE (GET_MODE (x))) nonzero = 1; #endif if (GET_MODE_SIZE (GET_MODE (x)) < mode_width) nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x))); break; case ABS: #if 0 /* Disabled to avoid exponential mutual recursion between nonzero_bits and num_sign_bit_copies. */ if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x)) == GET_MODE_BITSIZE (GET_MODE (x))) nonzero = 1; #endif break; case TRUNCATE: nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode, known_x, known_mode, known_ret) & GET_MODE_MASK (mode)); break; case ZERO_EXTEND: nonzero &= cached_nonzero_bits (XEXP (x, 0), mode, known_x, known_mode, known_ret); if (GET_MODE (XEXP (x, 0)) != VOIDmode) nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0))); break; case SIGN_EXTEND: /* If the sign bit is known clear, this is the same as ZERO_EXTEND. Otherwise, show all the bits in the outer mode but not the inner may be nonzero. */ inner_nz = cached_nonzero_bits (XEXP (x, 0), mode, known_x, known_mode, known_ret); if (GET_MODE (XEXP (x, 0)) != VOIDmode) { inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0))); if (inner_nz & (((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - 1)))) inner_nz |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))); } nonzero &= inner_nz; break; case AND: nonzero &= cached_nonzero_bits (XEXP (x, 0), mode, known_x, known_mode, known_ret) & cached_nonzero_bits (XEXP (x, 1), mode, known_x, known_mode, known_ret); break; case XOR: case IOR: case UMIN: case UMAX: case SMIN: case SMAX: { unsigned HOST_WIDE_INT nonzero0 = cached_nonzero_bits (XEXP (x, 0), mode, known_x, known_mode, known_ret); /* Don't call nonzero_bits for the second time if it cannot change anything. */ if ((nonzero & nonzero0) != nonzero) nonzero &= nonzero0 | cached_nonzero_bits (XEXP (x, 1), mode, known_x, known_mode, known_ret); } break; case PLUS: case MINUS: case MULT: case DIV: case UDIV: case MOD: case UMOD: /* We can apply the rules of arithmetic to compute the number of high- and low-order zero bits of these operations. We start by computing the width (position of the highest-order nonzero bit) and the number of low-order zero bits for each value. */ { unsigned HOST_WIDE_INT nz0 = cached_nonzero_bits (XEXP (x, 0), mode, known_x, known_mode, known_ret); unsigned HOST_WIDE_INT nz1 = cached_nonzero_bits (XEXP (x, 1), mode, known_x, known_mode, known_ret); int sign_index = GET_MODE_BITSIZE (GET_MODE (x)) - 1; int width0 = floor_log2 (nz0) + 1; int width1 = floor_log2 (nz1) + 1; int low0 = floor_log2 (nz0 & -nz0); int low1 = floor_log2 (nz1 & -nz1); HOST_WIDE_INT op0_maybe_minusp = (nz0 & ((HOST_WIDE_INT) 1 << sign_index)); HOST_WIDE_INT op1_maybe_minusp = (nz1 & ((HOST_WIDE_INT) 1 << sign_index)); unsigned int result_width = mode_width; int result_low = 0; switch (code) { case PLUS: result_width = MAX (width0, width1) + 1; result_low = MIN (low0, low1); break; case MINUS: result_low = MIN (low0, low1); break; case MULT: result_width = width0 + width1; result_low = low0 + low1; break; case DIV: if (width1 == 0) break; if (! op0_maybe_minusp && ! op1_maybe_minusp) result_width = width0; break; case UDIV: if (width1 == 0) break; result_width = width0; break; case MOD: if (width1 == 0) break; if (! op0_maybe_minusp && ! op1_maybe_minusp) result_width = MIN (width0, width1); result_low = MIN (low0, low1); break; case UMOD: if (width1 == 0) break; result_width = MIN (width0, width1); result_low = MIN (low0, low1); break; default: abort (); } if (result_width < mode_width) nonzero &= ((HOST_WIDE_INT) 1 << result_width) - 1; if (result_low > 0) nonzero &= ~(((HOST_WIDE_INT) 1 << result_low) - 1); #ifdef POINTERS_EXTEND_UNSIGNED /* If pointers extend unsigned and this is an addition or subtraction to a pointer in Pmode, all the bits above ptr_mode are known to be zero. */ if (POINTERS_EXTEND_UNSIGNED > 0 && GET_MODE (x) == Pmode && (code == PLUS || code == MINUS) && REG_P (XEXP (x, 0)) && REG_POINTER (XEXP (x, 0))) nonzero &= GET_MODE_MASK (ptr_mode); #endif } break; case ZERO_EXTRACT: if (GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT) nonzero &= ((HOST_WIDE_INT) 1 << INTVAL (XEXP (x, 1))) - 1; break; case SUBREG: /* If this is a SUBREG formed for a promoted variable that has been zero-extended, we know that at least the high-order bits are zero, though others might be too. */ if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x) > 0) nonzero = GET_MODE_MASK (GET_MODE (x)) & cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x), known_x, known_mode, known_ret); /* If the inner mode is a single word for both the host and target machines, we can compute this from which bits of the inner object might be nonzero. */ if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) <= BITS_PER_WORD && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) <= HOST_BITS_PER_WIDE_INT)) { nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode, known_x, known_mode, known_ret); #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP) /* If this is a typical RISC machine, we only have to worry about the way loads are extended. */ if ((LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND ? (((nonzero & (((unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) - 1)))) != 0)) : LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) != ZERO_EXTEND) || !MEM_P (SUBREG_REG (x))) #endif { /* On many CISC machines, accessing an object in a wider mode causes the high-order bits to become undefined. So they are not known to be zero. */ if (GET_MODE_SIZE (GET_MODE (x)) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))) nonzero |= (GET_MODE_MASK (GET_MODE (x)) & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))); } } break; case ASHIFTRT: case LSHIFTRT: case ASHIFT: case ROTATE: /* The nonzero bits are in two classes: any bits within MODE that aren't in GET_MODE (x) are always significant. The rest of the nonzero bits are those that are significant in the operand of the shift when shifted the appropriate number of bits. This shows that high-order bits are cleared by the right shift and low-order bits by left shifts. */ if (GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) >= 0 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT) { enum machine_mode inner_mode = GET_MODE (x); unsigned int width = GET_MODE_BITSIZE (inner_mode); int count = INTVAL (XEXP (x, 1)); unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode); unsigned HOST_WIDE_INT op_nonzero = cached_nonzero_bits (XEXP (x, 0), mode, known_x, known_mode, known_ret); unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask; unsigned HOST_WIDE_INT outer = 0; if (mode_width > width) outer = (op_nonzero & nonzero & ~mode_mask); if (code == LSHIFTRT) inner >>= count; else if (code == ASHIFTRT) { inner >>= count; /* If the sign bit may have been nonzero before the shift, we need to mark all the places it could have been copied to by the shift as possibly nonzero. */ if (inner & ((HOST_WIDE_INT) 1 << (width - 1 - count))) inner |= (((HOST_WIDE_INT) 1 << count) - 1) << (width - count); } else if (code == ASHIFT) inner <<= count; else inner = ((inner << (count % width) | (inner >> (width - (count % width)))) & mode_mask); nonzero &= (outer | inner); } break; case FFS: case POPCOUNT: /* This is at most the number of bits in the mode. */ nonzero = ((HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1; break; case CLZ: /* If CLZ has a known value at zero, then the nonzero bits are that value, plus the number of bits in the mode minus one. */ if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero)) nonzero |= ((HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1; else nonzero = -1; break; case CTZ: /* If CTZ has a known value at zero, then the nonzero bits are that value, plus the number of bits in the mode minus one. */ if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero)) nonzero |= ((HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1; else nonzero = -1; break; case PARITY: nonzero = 1; break; case IF_THEN_ELSE: { unsigned HOST_WIDE_INT nonzero_true = cached_nonzero_bits (XEXP (x, 1), mode, known_x, known_mode, known_ret); /* Don't call nonzero_bits for the second time if it cannot change anything. */ if ((nonzero & nonzero_true) != nonzero) nonzero &= nonzero_true | cached_nonzero_bits (XEXP (x, 2), mode, known_x, known_mode, known_ret); } break; default: break; } return nonzero; } /* See the macro definition above. */ #undef cached_num_sign_bit_copies /* The function cached_num_sign_bit_copies is a wrapper around num_sign_bit_copies1. It avoids exponential behavior in num_sign_bit_copies1 when X has identical subexpressions on the first or the second level. */ static unsigned int cached_num_sign_bit_copies (rtx x, enum machine_mode mode, rtx known_x, enum machine_mode known_mode, unsigned int known_ret) { if (x == known_x && mode == known_mode) return known_ret; /* Try to find identical subexpressions. If found call num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and the precomputed value for the subexpression as KNOWN_RET. */ if (ARITHMETIC_P (x)) { rtx x0 = XEXP (x, 0); rtx x1 = XEXP (x, 1); /* Check the first level. */ if (x0 == x1) return num_sign_bit_copies1 (x, mode, x0, mode, cached_num_sign_bit_copies (x0, mode, known_x, known_mode, known_ret)); /* Check the second level. */ if (ARITHMETIC_P (x0) && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1))) return num_sign_bit_copies1 (x, mode, x1, mode, cached_num_sign_bit_copies (x1, mode, known_x, known_mode, known_ret)); if (ARITHMETIC_P (x1) && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1))) return num_sign_bit_copies1 (x, mode, x0, mode, cached_num_sign_bit_copies (x0, mode, known_x, known_mode, known_ret)); } return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret); } /* Return the number of bits at the high-order end of X that are known to be equal to the sign bit. X will be used in mode MODE; if MODE is VOIDmode, X will be used in its own mode. The returned value will always be between 1 and the number of bits in MODE. */ static unsigned int num_sign_bit_copies1 (rtx x, enum machine_mode mode, rtx known_x, enum machine_mode known_mode, unsigned int known_ret) { enum rtx_code code = GET_CODE (x); unsigned int bitwidth = GET_MODE_BITSIZE (mode); int num0, num1, result; unsigned HOST_WIDE_INT nonzero; /* If we weren't given a mode, use the mode of X. If the mode is still VOIDmode, we don't know anything. Likewise if one of the modes is floating-point. */ if (mode == VOIDmode) mode = GET_MODE (x); if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x))) return 1; /* For a smaller object, just ignore the high bits. */ if (bitwidth < GET_MODE_BITSIZE (GET_MODE (x))) { num0 = cached_num_sign_bit_copies (x, GET_MODE (x), known_x, known_mode, known_ret); return MAX (1, num0 - (int) (GET_MODE_BITSIZE (GET_MODE (x)) - bitwidth)); } if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_BITSIZE (GET_MODE (x))) { #ifndef WORD_REGISTER_OPERATIONS /* If this machine does not do all register operations on the entire register and MODE is wider than the mode of X, we can say nothing at all about the high-order bits. */ return 1; #else /* Likewise on machines that do, if the mode of the object is smaller than a word and loads of that size don't sign extend, we can say nothing about the high order bits. */ if (GET_MODE_BITSIZE (GET_MODE (x)) < BITS_PER_WORD #ifdef LOAD_EXTEND_OP && LOAD_EXTEND_OP (GET_MODE (x)) != SIGN_EXTEND #endif ) return 1; #endif } switch (code) { case REG: #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend) /* If pointers extend signed and this is a pointer in Pmode, say that all the bits above ptr_mode are known to be sign bit copies. */ if (! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode && mode == Pmode && REG_POINTER (x)) return GET_MODE_BITSIZE (Pmode) - GET_MODE_BITSIZE (ptr_mode) + 1; #endif { unsigned int copies_for_hook = 1, copies = 1; rtx new = rtl_hooks.reg_num_sign_bit_copies (x, mode, known_x, known_mode, known_ret, &copies_for_hook); if (new) copies = cached_num_sign_bit_copies (new, mode, known_x, known_mode, known_ret); if (copies > 1 || copies_for_hook > 1) return MAX (copies, copies_for_hook); /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */ } break; case MEM: #ifdef LOAD_EXTEND_OP /* Some RISC machines sign-extend all loads of smaller than a word. */ if (LOAD_EXTEND_OP (GET_MODE (x)) == SIGN_EXTEND) return MAX (1, ((int) bitwidth - (int) GET_MODE_BITSIZE (GET_MODE (x)) + 1)); #endif break; case CONST_INT: /* If the constant is negative, take its 1's complement and remask. Then see how many zero bits we have. */ nonzero = INTVAL (x) & GET_MODE_MASK (mode); if (bitwidth <= HOST_BITS_PER_WIDE_INT && (nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0) nonzero = (~nonzero) & GET_MODE_MASK (mode); return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1); case SUBREG: /* If this is a SUBREG for a promoted object that is sign-extended and we are looking at it in a wider mode, we know that at least the high-order bits are known to be sign bit copies. */ if (SUBREG_PROMOTED_VAR_P (x) && ! SUBREG_PROMOTED_UNSIGNED_P (x)) { num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode, known_x, known_mode, known_ret); return MAX ((int) bitwidth - (int) GET_MODE_BITSIZE (GET_MODE (x)) + 1, num0); } /* For a smaller object, just ignore the high bits. */ if (bitwidth <= GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))) { num0 = cached_num_sign_bit_copies (SUBREG_REG (x), VOIDmode, known_x, known_mode, known_ret); return MAX (1, (num0 - (int) (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) - bitwidth))); } #ifdef WORD_REGISTER_OPERATIONS #ifdef LOAD_EXTEND_OP /* For paradoxical SUBREGs on machines where all register operations affect the entire register, just look inside. Note that we are passing MODE to the recursive call, so the number of sign bit copies will remain relative to that mode, not the inner mode. */ /* This works only if loads sign extend. Otherwise, if we get a reload for the inner part, it may be loaded from the stack, and then we lose all sign bit copies that existed before the store to the stack. */ if ((GET_MODE_SIZE (GET_MODE (x)) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))) && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND && MEM_P (SUBREG_REG (x))) return cached_num_sign_bit_copies (SUBREG_REG (x), mode, known_x, known_mode, known_ret); #endif #endif break; case SIGN_EXTRACT: if (GET_CODE (XEXP (x, 1)) == CONST_INT) return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1))); break; case SIGN_EXTEND: return (bitwidth - GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) + cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode, known_x, known_mode, known_ret)); case TRUNCATE: /* For a smaller object, just ignore the high bits. */ num0 = cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode, known_x, known_mode, known_ret); return MAX (1, (num0 - (int) (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - bitwidth))); case NOT: return cached_num_sign_bit_copies (XEXP (x, 0), mode, known_x, known_mode, known_ret); case ROTATE: case ROTATERT: /* If we are rotating left by a number of bits less than the number of sign bit copies, we can just subtract that amount from the number. */ if (GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) >= 0 && INTVAL (XEXP (x, 1)) < (int) bitwidth) { num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode, known_x, known_mode, known_ret); return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1)) : (int) bitwidth - INTVAL (XEXP (x, 1)))); } break; case NEG: /* In general, this subtracts one sign bit copy. But if the value is known to be positive, the number of sign bit copies is the same as that of the input. Finally, if the input has just one bit that might be nonzero, all the bits are copies of the sign bit. */ num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode, known_x, known_mode, known_ret); if (bitwidth > HOST_BITS_PER_WIDE_INT) return num0 > 1 ? num0 - 1 : 1; nonzero = nonzero_bits (XEXP (x, 0), mode); if (nonzero == 1) return bitwidth; if (num0 > 1 && (((HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero)) num0--; return num0; case IOR: case AND: case XOR: case SMIN: case SMAX: case UMIN: case UMAX: /* Logical operations will preserve the number of sign-bit copies. MIN and MAX operations always return one of the operands. */ num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode, known_x, known_mode, known_ret); num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode, known_x, known_mode, known_ret); return MIN (num0, num1); case PLUS: case MINUS: /* For addition and subtraction, we can have a 1-bit carry. However, if we are subtracting 1 from a positive number, there will not be such a carry. Furthermore, if the positive number is known to be 0 or 1, we know the result is either -1 or 0. */ if (code == PLUS && XEXP (x, 1) == constm1_rtx && bitwidth <= HOST_BITS_PER_WIDE_INT) { nonzero = nonzero_bits (XEXP (x, 0), mode); if ((((HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero) == 0) return (nonzero == 1 || nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1); } num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode, known_x, known_mode, known_ret); num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode, known_x, known_mode, known_ret); result = MAX (1, MIN (num0, num1) - 1); #ifdef POINTERS_EXTEND_UNSIGNED /* If pointers extend signed and this is an addition or subtraction to a pointer in Pmode, all the bits above ptr_mode are known to be sign bit copies. */ if (! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode && (code == PLUS || code == MINUS) && REG_P (XEXP (x, 0)) && REG_POINTER (XEXP (x, 0))) result = MAX ((int) (GET_MODE_BITSIZE (Pmode) - GET_MODE_BITSIZE (ptr_mode) + 1), result); #endif return result; case MULT: /* The number of bits of the product is the sum of the number of bits of both terms. However, unless one of the terms if known to be positive, we must allow for an additional bit since negating a negative number can remove one sign bit copy. */ num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode, known_x, known_mode, known_ret); num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode, known_x, known_mode, known_ret); result = bitwidth - (bitwidth - num0) - (bitwidth - num1); if (result > 0 && (bitwidth > HOST_BITS_PER_WIDE_INT || (((nonzero_bits (XEXP (x, 0), mode) & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0) && ((nonzero_bits (XEXP (x, 1), mode) & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)))) result--; return MAX (1, result); case UDIV: /* The result must be <= the first operand. If the first operand has the high bit set, we know nothing about the number of sign bit copies. */ if (bitwidth > HOST_BITS_PER_WIDE_INT) return 1; else if ((nonzero_bits (XEXP (x, 0), mode) & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0) return 1; else return cached_num_sign_bit_copies (XEXP (x, 0), mode, known_x, known_mode, known_ret); case UMOD: /* The result must be <= the second operand. */ return cached_num_sign_bit_copies (XEXP (x, 1), mode, known_x, known_mode, known_ret); case DIV: /* Similar to unsigned division, except that we have to worry about the case where the divisor is negative, in which case we have to add 1. */ result = cached_num_sign_bit_copies (XEXP (x, 0), mode, known_x, known_mode, known_ret); if (result > 1 && (bitwidth > HOST_BITS_PER_WIDE_INT || (nonzero_bits (XEXP (x, 1), mode) & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)) result--; return result; case MOD: result = cached_num_sign_bit_copies (XEXP (x, 1), mode, known_x, known_mode, known_ret); if (result > 1 && (bitwidth > HOST_BITS_PER_WIDE_INT || (nonzero_bits (XEXP (x, 1), mode) & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)) result--; return result; case ASHIFTRT: /* Shifts by a constant add to the number of bits equal to the sign bit. */ num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode, known_x, known_mode, known_ret); if (GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) > 0) num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1))); return num0; case ASHIFT: /* Left shifts destroy copies. */ if (GET_CODE (XEXP (x, 1)) != CONST_INT || INTVAL (XEXP (x, 1)) < 0 || INTVAL (XEXP (x, 1)) >= (int) bitwidth) return 1; num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode, known_x, known_mode, known_ret); return MAX (1, num0 - INTVAL (XEXP (x, 1))); case IF_THEN_ELSE: num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode, known_x, known_mode, known_ret); num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode, known_x, known_mode, known_ret); return MIN (num0, num1); case EQ: case NE: case GE: case GT: case LE: case LT: case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT: case GEU: case GTU: case LEU: case LTU: case UNORDERED: case ORDERED: /* If the constant is negative, take its 1's complement and remask. Then see how many zero bits we have. */ nonzero = STORE_FLAG_VALUE; if (bitwidth <= HOST_BITS_PER_WIDE_INT && (nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0) nonzero = (~nonzero) & GET_MODE_MASK (mode); return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1); default: break; } /* If we haven't been able to figure it out by one of the above rules, see if some of the high-order bits are known to be zero. If so, count those bits and return one less than that amount. If we can't safely compute the mask for this mode, always return BITWIDTH. */ bitwidth = GET_MODE_BITSIZE (mode); if (bitwidth > HOST_BITS_PER_WIDE_INT) return 1; nonzero = nonzero_bits (x, mode); return nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1)) ? 1 : bitwidth - floor_log2 (nonzero) - 1; } /* RTL specific diagnostic subroutines for GCC Copyright (C) 2001, 2002, 2003 Free Software Foundation, Inc. Contributed by Gabriel Dos Reis This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #undef FLOAT /* This is for hpux. They should change hpux. */ #undef FFS /* Some systems define this in param.h. */ static location_t location_for_asm (rtx); static void diagnostic_for_asm (rtx, const char *, va_list *, diagnostic_t); /* Figure the location of the given INSN. */ static location_t location_for_asm (rtx insn) { rtx body = PATTERN (insn); rtx asmop; location_t loc; /* Find the (or one of the) ASM_OPERANDS in the insn. */ if (GET_CODE (body) == SET && GET_CODE (SET_SRC (body)) == ASM_OPERANDS) asmop = SET_SRC (body); else if (GET_CODE (body) == ASM_OPERANDS) asmop = body; else if (GET_CODE (body) == PARALLEL && GET_CODE (XVECEXP (body, 0, 0)) == SET) asmop = SET_SRC (XVECEXP (body, 0, 0)); else if (GET_CODE (body) == PARALLEL && GET_CODE (XVECEXP (body, 0, 0)) == ASM_OPERANDS) asmop = XVECEXP (body, 0, 0); else asmop = NULL; if (asmop) #ifdef USE_MAPPED_LOCATION loc = ASM_OPERANDS_SOURCE_LOCATION (asmop); #else { loc.file = ASM_OPERANDS_SOURCE_FILE (asmop); loc.line = ASM_OPERANDS_SOURCE_LINE (asmop); } #endif else loc = input_location; return loc; } /* Report a diagnostic MESSAGE (an errror or a WARNING) at the line number of the insn INSN. This is used only when INSN is an `asm' with operands, and each ASM_OPERANDS records its own source file and line. */ static void diagnostic_for_asm (rtx insn, const char *msg, va_list *args_ptr, diagnostic_t kind) { diagnostic_info diagnostic; diagnostic_set_info (&diagnostic, msg, args_ptr, location_for_asm (insn), kind); report_diagnostic (&diagnostic); } void error_for_asm (rtx insn, const char *msgid, ...) { va_list ap; va_start (ap, msgid); diagnostic_for_asm (insn, msgid, &ap, DK_ERROR); va_end (ap); } void warning_for_asm (rtx insn, const char *msgid, ...) { va_list ap; va_start (ap, msgid); diagnostic_for_asm (insn, msgid, &ap, DK_WARNING); va_end (ap); } void _fatal_insn (const char *msgid, rtx insn, const char *file, int line, const char *function) { error ("%s", _(msgid)); /* The above incremented error_count, but isn't an error that we want to count, so reset it here. */ errorcount--; debug_rtx (insn); fancy_abort (file, line, function); } void _fatal_insn_not_found (rtx insn, const char *file, int line, const char *function) { if (INSN_CODE (insn) < 0) _fatal_insn ("unrecognizable insn:", insn, file, line, function); else _fatal_insn ("insn does not satisfy its constraints:", insn, file, line, function); } /* Simple bitmaps. Copyright (C) 1999, 2000, 2002, 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Bitmap manipulation routines. */ /* Allocate a simple bitmap of N_ELMS bits. */ sbitmap sbitmap_alloc (unsigned int n_elms) { unsigned int bytes, size, amt; sbitmap bmap; size = SBITMAP_SET_SIZE (n_elms); bytes = size * sizeof (SBITMAP_ELT_TYPE); amt = (sizeof (struct simple_bitmap_def) + bytes - sizeof (SBITMAP_ELT_TYPE)); bmap = xmalloc (amt); bmap->n_bits = n_elms; bmap->size = size; bmap->bytes = bytes; return bmap; } /* Resize a simple bitmap BMAP to N_ELMS bits. If increasing the size of BMAP, clear the new bits to zero if the DEF argument is zero, and set them to one otherwise. */ sbitmap sbitmap_resize (sbitmap bmap, unsigned int n_elms, int def) { unsigned int bytes, size, amt; unsigned int last_bit; size = SBITMAP_SET_SIZE (n_elms); bytes = size * sizeof (SBITMAP_ELT_TYPE); if (bytes > bmap->bytes) { amt = (sizeof (struct simple_bitmap_def) + bytes - sizeof (SBITMAP_ELT_TYPE)); bmap = xrealloc (bmap, amt); } if (n_elms > bmap->n_bits) { if (def) { memset (bmap->elms + bmap->size, -1, bytes - bmap->bytes); /* Set the new bits if the original last element. */ last_bit = bmap->n_bits % SBITMAP_ELT_BITS; if (last_bit) bmap->elms[bmap->size - 1] |= ~((SBITMAP_ELT_TYPE)-1 >> (SBITMAP_ELT_BITS - last_bit)); /* Clear the unused bit in the new last element. */ last_bit = n_elms % SBITMAP_ELT_BITS; if (last_bit) bmap->elms[size - 1] &= (SBITMAP_ELT_TYPE)-1 >> (SBITMAP_ELT_BITS - last_bit); } else memset (bmap->elms + bmap->size, 0, bytes - bmap->bytes); } else if (n_elms < bmap->n_bits) { /* Clear the surplus bits in the last word. */ last_bit = n_elms % SBITMAP_ELT_BITS; if (last_bit) bmap->elms[size - 1] &= (SBITMAP_ELT_TYPE)-1 >> (SBITMAP_ELT_BITS - last_bit); } bmap->n_bits = n_elms; bmap->size = size; bmap->bytes = bytes; return bmap; } /* Re-allocate a simple bitmap of N_ELMS bits. New storage is uninitialized. */ sbitmap sbitmap_realloc (sbitmap src, unsigned int n_elms) { unsigned int bytes, size, amt; sbitmap bmap; size = SBITMAP_SET_SIZE (n_elms); bytes = size * sizeof (SBITMAP_ELT_TYPE); amt = (sizeof (struct simple_bitmap_def) + bytes - sizeof (SBITMAP_ELT_TYPE)); if (src->bytes >= bytes) { src->n_bits = n_elms; return src; } bmap = (sbitmap) xrealloc (src, amt); bmap->n_bits = n_elms; bmap->size = size; bmap->bytes = bytes; return bmap; } /* Allocate a vector of N_VECS bitmaps of N_ELMS bits. */ sbitmap * sbitmap_vector_alloc (unsigned int n_vecs, unsigned int n_elms) { unsigned int i, bytes, offset, elm_bytes, size, amt, vector_bytes; sbitmap *bitmap_vector; size = SBITMAP_SET_SIZE (n_elms); bytes = size * sizeof (SBITMAP_ELT_TYPE); elm_bytes = (sizeof (struct simple_bitmap_def) + bytes - sizeof (SBITMAP_ELT_TYPE)); vector_bytes = n_vecs * sizeof (sbitmap *); /* Round up `vector_bytes' to account for the alignment requirements of an sbitmap. One could allocate the vector-table and set of sbitmaps separately, but that requires maintaining two pointers or creating a cover struct to hold both pointers (so our result is still just one pointer). Neither is a bad idea, but this is simpler for now. */ { /* Based on DEFAULT_ALIGNMENT computation in obstack.c. */ struct { char x; SBITMAP_ELT_TYPE y; } align; int alignment = (char *) & align.y - & align.x; vector_bytes = (vector_bytes + alignment - 1) & ~ (alignment - 1); } amt = vector_bytes + (n_vecs * elm_bytes); bitmap_vector = xmalloc (amt); for (i = 0, offset = vector_bytes; i < n_vecs; i++, offset += elm_bytes) { sbitmap b = (sbitmap) ((char *) bitmap_vector + offset); bitmap_vector[i] = b; b->n_bits = n_elms; b->size = size; b->bytes = bytes; } return bitmap_vector; } /* Copy sbitmap SRC to DST. */ void sbitmap_copy (sbitmap dst, sbitmap src) { memcpy (dst->elms, src->elms, sizeof (SBITMAP_ELT_TYPE) * dst->size); } /* Determine if a == b. */ int sbitmap_equal (sbitmap a, sbitmap b) { return !memcmp (a->elms, b->elms, sizeof (SBITMAP_ELT_TYPE) * a->size); } /* Zero all elements in a bitmap. */ void sbitmap_zero (sbitmap bmap) { memset (bmap->elms, 0, bmap->bytes); } /* Set all elements in a bitmap to ones. */ void sbitmap_ones (sbitmap bmap) { unsigned int last_bit; memset (bmap->elms, -1, bmap->bytes); last_bit = bmap->n_bits % SBITMAP_ELT_BITS; if (last_bit) bmap->elms[bmap->size - 1] = (SBITMAP_ELT_TYPE)-1 >> (SBITMAP_ELT_BITS - last_bit); } /* Zero a vector of N_VECS bitmaps. */ void sbitmap_vector_zero (sbitmap *bmap, unsigned int n_vecs) { unsigned int i; for (i = 0; i < n_vecs; i++) sbitmap_zero (bmap[i]); } /* Set a vector of N_VECS bitmaps to ones. */ void sbitmap_vector_ones (sbitmap *bmap, unsigned int n_vecs) { unsigned int i; for (i = 0; i < n_vecs; i++) sbitmap_ones (bmap[i]); } /* Set DST to be A union (B - C). DST = A | (B & ~C). Returns true if any change is made. */ bool sbitmap_union_of_diff_cg (sbitmap dst, sbitmap a, sbitmap b, sbitmap c) { unsigned int i, n = dst->size; sbitmap_ptr dstp = dst->elms; sbitmap_ptr ap = a->elms; sbitmap_ptr bp = b->elms; sbitmap_ptr cp = c->elms; SBITMAP_ELT_TYPE changed = 0; for (i = 0; i < n; i++) { SBITMAP_ELT_TYPE tmp = *ap++ | (*bp++ & ~*cp++); changed |= *dstp ^ tmp; *dstp++ = tmp; } return changed != 0; } void sbitmap_union_of_diff (sbitmap dst, sbitmap a, sbitmap b, sbitmap c) { unsigned int i, n = dst->size; sbitmap_ptr dstp = dst->elms; sbitmap_ptr ap = a->elms; sbitmap_ptr bp = b->elms; sbitmap_ptr cp = c->elms; for (i = 0; i < n; i++) *dstp++ = *ap++ | (*bp++ & ~*cp++); } /* Set bitmap DST to the bitwise negation of the bitmap SRC. */ void sbitmap_not (sbitmap dst, sbitmap src) { unsigned int i, n = dst->size; sbitmap_ptr dstp = dst->elms; sbitmap_ptr srcp = src->elms; unsigned int last_bit; for (i = 0; i < n; i++) *dstp++ = ~*srcp++; /* Zero all bits past n_bits, by ANDing dst with sbitmap_ones. */ last_bit = src->n_bits % SBITMAP_ELT_BITS; if (last_bit) dst->elms[n-1] = dst->elms[n-1] & ((SBITMAP_ELT_TYPE)-1 >> (SBITMAP_ELT_BITS - last_bit)); } /* Set the bits in DST to be the difference between the bits in A and the bits in B. i.e. dst = a & (~b). */ void sbitmap_difference (sbitmap dst, sbitmap a, sbitmap b) { unsigned int i, dst_size = dst->size; unsigned int min_size = dst->size; sbitmap_ptr dstp = dst->elms; sbitmap_ptr ap = a->elms; sbitmap_ptr bp = b->elms; /* A should be at least as large as DEST, to have a defined source. */ if (a->size < dst_size) abort (); /* If minuend is smaller, we simply pretend it to be zero bits, i.e. only copy the subtrahend into dest. */ if (b->size < min_size) min_size = b->size; for (i = 0; i < min_size; i++) *dstp++ = *ap++ & (~*bp++); /* Now fill the rest of dest from A, if B was too short. This makes sense only when destination and A differ. */ if (dst != a && i != dst_size) for (; i < dst_size; i++) *dstp++ = *ap++; } /* Set DST to be (A and B). Return nonzero if any change is made. */ bool sbitmap_a_and_b_cg (sbitmap dst, sbitmap a, sbitmap b) { unsigned int i, n = dst->size; sbitmap_ptr dstp = dst->elms; sbitmap_ptr ap = a->elms; sbitmap_ptr bp = b->elms; SBITMAP_ELT_TYPE changed = 0; for (i = 0; i < n; i++) { SBITMAP_ELT_TYPE tmp = *ap++ & *bp++; changed |= *dstp ^ tmp; *dstp++ = tmp; } return changed != 0; } void sbitmap_a_and_b (sbitmap dst, sbitmap a, sbitmap b) { unsigned int i, n = dst->size; sbitmap_ptr dstp = dst->elms; sbitmap_ptr ap = a->elms; sbitmap_ptr bp = b->elms; for (i = 0; i < n; i++) *dstp++ = *ap++ & *bp++; } /* Set DST to be (A xor B)). Return nonzero if any change is made. */ bool sbitmap_a_xor_b_cg (sbitmap dst, sbitmap a, sbitmap b) { unsigned int i, n = dst->size; sbitmap_ptr dstp = dst->elms; sbitmap_ptr ap = a->elms; sbitmap_ptr bp = b->elms; SBITMAP_ELT_TYPE changed = 0; for (i = 0; i < n; i++) { SBITMAP_ELT_TYPE tmp = *ap++ ^ *bp++; changed |= *dstp ^ tmp; *dstp++ = tmp; } return changed != 0; } void sbitmap_a_xor_b (sbitmap dst, sbitmap a, sbitmap b) { unsigned int i, n = dst->size; sbitmap_ptr dstp = dst->elms; sbitmap_ptr ap = a->elms; sbitmap_ptr bp = b->elms; for (i = 0; i < n; i++) *dstp++ = *ap++ ^ *bp++; } /* Set DST to be (A or B)). Return nonzero if any change is made. */ bool sbitmap_a_or_b_cg (sbitmap dst, sbitmap a, sbitmap b) { unsigned int i, n = dst->size; sbitmap_ptr dstp = dst->elms; sbitmap_ptr ap = a->elms; sbitmap_ptr bp = b->elms; SBITMAP_ELT_TYPE changed = 0; for (i = 0; i < n; i++) { SBITMAP_ELT_TYPE tmp = *ap++ | *bp++; changed |= *dstp ^ tmp; *dstp++ = tmp; } return changed != 0; } void sbitmap_a_or_b (sbitmap dst, sbitmap a, sbitmap b) { unsigned int i, n = dst->size; sbitmap_ptr dstp = dst->elms; sbitmap_ptr ap = a->elms; sbitmap_ptr bp = b->elms; for (i = 0; i < n; i++) *dstp++ = *ap++ | *bp++; } /* Return nonzero if A is a subset of B. */ bool sbitmap_a_subset_b_p (sbitmap a, sbitmap b) { unsigned int i, n = a->size; sbitmap_ptr ap, bp; for (ap = a->elms, bp = b->elms, i = 0; i < n; i++, ap++, bp++) if ((*ap | *bp) != *bp) return false; return true; } /* Set DST to be (A or (B and C)). Return nonzero if any change is made. */ bool sbitmap_a_or_b_and_c_cg (sbitmap dst, sbitmap a, sbitmap b, sbitmap c) { unsigned int i, n = dst->size; sbitmap_ptr dstp = dst->elms; sbitmap_ptr ap = a->elms; sbitmap_ptr bp = b->elms; sbitmap_ptr cp = c->elms; SBITMAP_ELT_TYPE changed = 0; for (i = 0; i < n; i++) { SBITMAP_ELT_TYPE tmp = *ap++ | (*bp++ & *cp++); changed |= *dstp ^ tmp; *dstp++ = tmp; } return changed != 0; } void sbitmap_a_or_b_and_c (sbitmap dst, sbitmap a, sbitmap b, sbitmap c) { unsigned int i, n = dst->size; sbitmap_ptr dstp = dst->elms; sbitmap_ptr ap = a->elms; sbitmap_ptr bp = b->elms; sbitmap_ptr cp = c->elms; for (i = 0; i < n; i++) *dstp++ = *ap++ | (*bp++ & *cp++); } /* Set DST to be (A and (B or C)). Return nonzero if any change is made. */ bool sbitmap_a_and_b_or_c_cg (sbitmap dst, sbitmap a, sbitmap b, sbitmap c) { unsigned int i, n = dst->size; sbitmap_ptr dstp = dst->elms; sbitmap_ptr ap = a->elms; sbitmap_ptr bp = b->elms; sbitmap_ptr cp = c->elms; SBITMAP_ELT_TYPE changed = 0; for (i = 0; i < n; i++) { SBITMAP_ELT_TYPE tmp = *ap++ & (*bp++ | *cp++); changed |= *dstp ^ tmp; *dstp++ = tmp; } return changed != 0; } void sbitmap_a_and_b_or_c (sbitmap dst, sbitmap a, sbitmap b, sbitmap c) { unsigned int i, n = dst->size; sbitmap_ptr dstp = dst->elms; sbitmap_ptr ap = a->elms; sbitmap_ptr bp = b->elms; sbitmap_ptr cp = c->elms; for (i = 0; i < n; i++) *dstp++ = *ap++ & (*bp++ | *cp++); } #ifdef IN_GCC /* Set the bitmap DST to the intersection of SRC of successors of block number BB, using the new flow graph structures. */ void sbitmap_intersection_of_succs (sbitmap dst, sbitmap *src, int bb) { basic_block b = BASIC_BLOCK (bb); unsigned int set_size = dst->size; edge e; for (e = b->succ; e != 0; e = e->succ_next) { if (e->dest == EXIT_BLOCK_PTR) continue; sbitmap_copy (dst, src[e->dest->index]); break; } if (e == 0) sbitmap_ones (dst); else for (e = e->succ_next; e != 0; e = e->succ_next) { unsigned int i; sbitmap_ptr p, r; if (e->dest == EXIT_BLOCK_PTR) continue; p = src[e->dest->index]->elms; r = dst->elms; for (i = 0; i < set_size; i++) *r++ &= *p++; } } /* Set the bitmap DST to the intersection of SRC of predecessors of block number BB, using the new flow graph structures. */ void sbitmap_intersection_of_preds (sbitmap dst, sbitmap *src, int bb) { basic_block b = BASIC_BLOCK (bb); unsigned int set_size = dst->size; edge e; for (e = b->pred; e != 0; e = e->pred_next) { if (e->src == ENTRY_BLOCK_PTR) continue; sbitmap_copy (dst, src[e->src->index]); break; } if (e == 0) sbitmap_ones (dst); else for (e = e->pred_next; e != 0; e = e->pred_next) { unsigned int i; sbitmap_ptr p, r; if (e->src == ENTRY_BLOCK_PTR) continue; p = src[e->src->index]->elms; r = dst->elms; for (i = 0; i < set_size; i++) *r++ &= *p++; } } /* Set the bitmap DST to the union of SRC of successors of block number BB, using the new flow graph structures. */ void sbitmap_union_of_succs (sbitmap dst, sbitmap *src, int bb) { basic_block b = BASIC_BLOCK (bb); unsigned int set_size = dst->size; edge e; for (e = b->succ; e != 0; e = e->succ_next) { if (e->dest == EXIT_BLOCK_PTR) continue; sbitmap_copy (dst, src[e->dest->index]); break; } if (e == 0) sbitmap_zero (dst); else for (e = e->succ_next; e != 0; e = e->succ_next) { unsigned int i; sbitmap_ptr p, r; if (e->dest == EXIT_BLOCK_PTR) continue; p = src[e->dest->index]->elms; r = dst->elms; for (i = 0; i < set_size; i++) *r++ |= *p++; } } /* Set the bitmap DST to the union of SRC of predecessors of block number BB, using the new flow graph structures. */ void sbitmap_union_of_preds (sbitmap dst, sbitmap *src, int bb) { basic_block b = BASIC_BLOCK (bb); unsigned int set_size = dst->size; edge e; for (e = b->pred; e != 0; e = e->pred_next) { if (e->src== ENTRY_BLOCK_PTR) continue; sbitmap_copy (dst, src[e->src->index]); break; } if (e == 0) sbitmap_zero (dst); else for (e = e->pred_next; e != 0; e = e->pred_next) { unsigned int i; sbitmap_ptr p, r; if (e->src == ENTRY_BLOCK_PTR) continue; p = src[e->src->index]->elms; r = dst->elms; for (i = 0; i < set_size; i++) *r++ |= *p++; } } #endif /* Return number of first bit set in the bitmap, -1 if none. */ int sbitmap_first_set_bit (sbitmap bmap) { unsigned int n; EXECUTE_IF_SET_IN_SBITMAP (bmap, 0, n, { return n; }); return -1; } /* Return number of last bit set in the bitmap, -1 if none. */ int sbitmap_last_set_bit (sbitmap bmap) { int i; SBITMAP_ELT_TYPE *ptr = bmap->elms; for (i = bmap->size - 1; i >= 0; i--) { SBITMAP_ELT_TYPE word = ptr[i]; if (word != 0) { unsigned int index = (i + 1) * SBITMAP_ELT_BITS - 1; SBITMAP_ELT_TYPE mask = (SBITMAP_ELT_TYPE) 1 << (SBITMAP_ELT_BITS - 1); while (1) { if ((word & mask) != 0) return index; mask >>= 1; index--; } } } return -1; } void dump_sbitmap (FILE *file, sbitmap bmap) { unsigned int i, n, j; unsigned int set_size = bmap->size; unsigned int total_bits = bmap->n_bits; fprintf (file, " "); for (i = n = 0; i < set_size && n < total_bits; i++) for (j = 0; j < SBITMAP_ELT_BITS && n < total_bits; j++, n++) { if (n != 0 && n % 10 == 0) fprintf (file, " "); fprintf (file, "%d", (bmap->elms[i] & ((SBITMAP_ELT_TYPE) 1 << j)) != 0); } fprintf (file, "\n"); } void dump_sbitmap_file (FILE *file, sbitmap bmap) { unsigned int i, pos; fprintf (file, "n_bits = %d, set = {", bmap->n_bits); for (pos = 30, i = 0; i < bmap->n_bits; i++) if (TEST_BIT (bmap, i)) { if (pos > 70) { fprintf (file, "\n "); pos = 0; } fprintf (file, "%d ", i); pos += 2 + (i >= 10) + (i >= 100) + (i >= 1000); } fprintf (file, "}\n"); } void debug_sbitmap (sbitmap bmap) { dump_sbitmap_file (stderr, bmap); } void dump_sbitmap_vector (FILE *file, const char *title, const char *subtitle, sbitmap *bmaps, int n_maps) { int bb; fprintf (file, "%s\n", title); for (bb = 0; bb < n_maps; bb++) { fprintf (file, "%s %d\n", subtitle, bb); dump_sbitmap (file, bmaps[bb]); } fprintf (file, "\n"); } /* Instruction scheduling pass. This file computes dependencies between instructions. Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by, and currently maintained by, Jim Wilson (wilson@cygnus.com) This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ static regset_head reg_pending_sets_head; static regset_head reg_pending_clobbers_head; static regset_head reg_pending_uses_head; static regset reg_pending_sets; static regset reg_pending_clobbers; static regset reg_pending_uses; /* The following enumeration values tell us what dependencies we should use to implement the barrier. We use true-dependencies for TRUE_BARRIER and anti-dependencies for MOVE_BARRIER. */ enum reg_pending_barrier_mode { NOT_A_BARRIER = 0, MOVE_BARRIER, TRUE_BARRIER }; static enum reg_pending_barrier_mode reg_pending_barrier; /* To speed up the test for duplicate dependency links we keep a record of dependencies created by add_dependence when the average number of instructions in a basic block is very large. Studies have shown that there is typically around 5 instructions between branches for typical C code. So we can make a guess that the average basic block is approximately 5 instructions long; we will choose 100X the average size as a very large basic block. Each insn has associated bitmaps for its dependencies. Each bitmap has enough entries to represent a dependency on any other insn in the insn chain. All bitmap for true dependencies cache is allocated then the rest two ones are also allocated. */ static bitmap_head *true_dependency_cache; static bitmap_head *anti_dependency_cache; static bitmap_head *output_dependency_cache; int cache_size; /* To speed up checking consistency of formed forward insn dependencies we use the following cache. Another possible solution could be switching off checking duplication of insns in forward dependencies. */ #ifdef ENABLE_CHECKING static bitmap_head *forward_dependency_cache; #endif static int deps_may_trap_p (rtx); static void add_dependence_list (rtx, rtx, enum reg_note); static void add_dependence_list_and_free (rtx, rtx *, enum reg_note); static void set_sched_group_p (rtx); static void flush_pending_lists (struct deps *, rtx, int, int); static void sched_analyze_1 (struct deps *, rtx, rtx); static void sched_analyze_2 (struct deps *, rtx, rtx); static void sched_analyze_insn (struct deps *, rtx, rtx, rtx); static rtx get_insn_condition (rtx); static int conditions_mutex_p (rtx, rtx); /* Return nonzero if a load of the memory reference MEM can cause a trap. */ static int deps_may_trap_p (rtx mem) { rtx addr = XEXP (mem, 0); if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER) { rtx t = get_reg_known_value (REGNO (addr)); if (t) addr = t; } return rtx_addr_can_trap_p (addr); } /* Return the INSN_LIST containing INSN in LIST, or NULL if LIST does not contain INSN. */ rtx find_insn_list (rtx insn, rtx list) { while (list) { if (XEXP (list, 0) == insn) return list; list = XEXP (list, 1); } return 0; } /* Find the condition under which INSN is executed. */ static rtx get_insn_condition (rtx insn) { rtx pat = PATTERN (insn); rtx cond; if (pat == 0) return 0; if (GET_CODE (pat) == COND_EXEC) return COND_EXEC_TEST (pat); if (GET_CODE (insn) != JUMP_INSN) return 0; if (GET_CODE (pat) != SET || SET_SRC (pat) != pc_rtx) return 0; if (GET_CODE (SET_DEST (pat)) != IF_THEN_ELSE) return 0; pat = SET_DEST (pat); cond = XEXP (pat, 0); if (GET_CODE (XEXP (cond, 1)) == LABEL_REF && XEXP (cond, 2) == pc_rtx) return cond; else if (GET_CODE (XEXP (cond, 2)) == LABEL_REF && XEXP (cond, 1) == pc_rtx) return gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond)), GET_MODE (cond), XEXP (cond, 0), XEXP (cond, 1)); else return 0; } /* Return nonzero if conditions COND1 and COND2 can never be both true. */ static int conditions_mutex_p (rtx cond1, rtx cond2) { if (COMPARISON_P (cond1) && COMPARISON_P (cond2) && GET_CODE (cond1) == reverse_condition (GET_CODE (cond2)) && XEXP (cond1, 0) == XEXP (cond2, 0) && XEXP (cond1, 1) == XEXP (cond2, 1)) return 1; return 0; } /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type of dependence that this link represents. The function returns nonzero if a new entry has been added to insn's LOG_LINK. */ int add_dependence (rtx insn, rtx elem, enum reg_note dep_type) { rtx link; int present_p; rtx cond1, cond2; /* Don't depend an insn on itself. */ if (insn == elem) return 0; /* We can get a dependency on deleted insns due to optimizations in the register allocation and reloading or due to splitting. Any such dependency is useless and can be ignored. */ if (GET_CODE (elem) == NOTE) return 0; /* flow.c doesn't handle conditional lifetimes entirely correctly; calls mess up the conditional lifetimes. */ /* ??? add_dependence is the wrong place to be eliding dependencies, as that forgets that the condition expressions themselves may be dependent. */ if (GET_CODE (insn) != CALL_INSN && GET_CODE (elem) != CALL_INSN) { cond1 = get_insn_condition (insn); cond2 = get_insn_condition (elem); if (cond1 && cond2 && conditions_mutex_p (cond1, cond2) /* Make sure first instruction doesn't affect condition of second instruction if switched. */ && !modified_in_p (cond1, elem) /* Make sure second instruction doesn't affect condition of first instruction if switched. */ && !modified_in_p (cond2, insn)) return 0; } present_p = 1; #ifdef INSN_SCHEDULING /* ??? No good way to tell from here whether we're doing interblock scheduling. Possibly add another callback. */ #if 0 /* (This code is guarded by INSN_SCHEDULING, otherwise INSN_BB is undefined.) No need for interblock dependences with calls, since calls are not moved between blocks. Note: the edge where elem is a CALL is still required. */ if (GET_CODE (insn) == CALL_INSN && (INSN_BB (elem) != INSN_BB (insn))) return 0; #endif /* If we already have a dependency for ELEM, then we do not need to do anything. Avoiding the list walk below can cut compile times dramatically for some code. */ if (true_dependency_cache != NULL) { enum reg_note present_dep_type = 0; if (anti_dependency_cache == NULL || output_dependency_cache == NULL) abort (); if (bitmap_bit_p (&true_dependency_cache[SCHED_INSN_LUID (insn)], SCHED_INSN_LUID (elem))) /* Do nothing (present_set_type is already 0). */ ; else if (bitmap_bit_p (&anti_dependency_cache[SCHED_INSN_LUID (insn)], SCHED_INSN_LUID (elem))) present_dep_type = REG_DEP_ANTI; else if (bitmap_bit_p (&output_dependency_cache[SCHED_INSN_LUID (insn)], SCHED_INSN_LUID (elem))) present_dep_type = REG_DEP_OUTPUT; else present_p = 0; if (present_p && (int) dep_type >= (int) present_dep_type) return 0; } #endif /* Check that we don't already have this dependence. */ if (present_p) for (link = LOG_LINKS (insn); link; link = XEXP (link, 1)) if (XEXP (link, 0) == elem) { #ifdef INSN_SCHEDULING /* Clear corresponding cache entry because type of the link may be changed. */ if (true_dependency_cache != NULL) { if (REG_NOTE_KIND (link) == REG_DEP_ANTI) bitmap_clear_bit (&anti_dependency_cache[SCHED_INSN_LUID (insn)], SCHED_INSN_LUID (elem)); else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT && output_dependency_cache) bitmap_clear_bit (&output_dependency_cache[SCHED_INSN_LUID (insn)], SCHED_INSN_LUID (elem)); else abort (); } #endif /* If this is a more restrictive type of dependence than the existing one, then change the existing dependence to this type. */ if ((int) dep_type < (int) REG_NOTE_KIND (link)) PUT_REG_NOTE_KIND (link, dep_type); #ifdef INSN_SCHEDULING /* If we are adding a dependency to INSN's LOG_LINKs, then note that in the bitmap caches of dependency information. */ if (true_dependency_cache != NULL) { if ((int) REG_NOTE_KIND (link) == 0) bitmap_set_bit (&true_dependency_cache[SCHED_INSN_LUID (insn)], SCHED_INSN_LUID (elem)); else if (REG_NOTE_KIND (link) == REG_DEP_ANTI) bitmap_set_bit (&anti_dependency_cache[SCHED_INSN_LUID (insn)], SCHED_INSN_LUID (elem)); else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT) bitmap_set_bit (&output_dependency_cache[SCHED_INSN_LUID (insn)], SCHED_INSN_LUID (elem)); } #endif return 0; } /* Might want to check one level of transitivity to save conses. */ link = alloc_INSN_LIST (elem, LOG_LINKS (insn)); LOG_LINKS (insn) = link; /* Insn dependency, not data dependency. */ PUT_REG_NOTE_KIND (link, dep_type); #ifdef INSN_SCHEDULING /* If we are adding a dependency to INSN's LOG_LINKs, then note that in the bitmap caches of dependency information. */ if (true_dependency_cache != NULL) { if ((int) dep_type == 0) bitmap_set_bit (&true_dependency_cache[SCHED_INSN_LUID (insn)], SCHED_INSN_LUID (elem)); else if (dep_type == REG_DEP_ANTI) bitmap_set_bit (&anti_dependency_cache[SCHED_INSN_LUID (insn)], SCHED_INSN_LUID (elem)); else if (dep_type == REG_DEP_OUTPUT) bitmap_set_bit (&output_dependency_cache[SCHED_INSN_LUID (insn)], SCHED_INSN_LUID (elem)); } #endif return 1; } /* A convenience wrapper to operate on an entire list. */ static void add_dependence_list (rtx insn, rtx list, enum reg_note dep_type) { for (; list; list = XEXP (list, 1)) add_dependence (insn, XEXP (list, 0), dep_type); } /* Similar, but free *LISTP at the same time. */ static void add_dependence_list_and_free (rtx insn, rtx *listp, enum reg_note dep_type) { rtx list, next; for (list = *listp, *listp = NULL; list ; list = next) { next = XEXP (list, 1); add_dependence (insn, XEXP (list, 0), dep_type); free_INSN_LIST_node (list); } } /* Set SCHED_GROUP_P and care for the rest of the bookkeeping that goes along with that. */ static void set_sched_group_p (rtx insn) { rtx prev; SCHED_GROUP_P (insn) = 1; prev = prev_nonnote_insn (insn); add_dependence (insn, prev, REG_DEP_ANTI); } /* Process an insn's memory dependencies. There are four kinds of dependencies: (0) read dependence: read follows read (1) true dependence: read follows write (2) anti dependence: write follows read (3) output dependence: write follows write We are careful to build only dependencies which actually exist, and use transitivity to avoid building too many links. */ /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST. The MEM is a memory reference contained within INSN, which we are saving so that we can do memory aliasing on it. */ void add_insn_mem_dependence (struct deps *deps, rtx *insn_list, rtx *mem_list, rtx insn, rtx mem) { rtx link; link = alloc_INSN_LIST (insn, *insn_list); *insn_list = link; if (current_sched_info->use_cselib) { mem = shallow_copy_rtx (mem); XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0)); } link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list); *mem_list = link; deps->pending_lists_length++; } /* Make a dependency between every memory reference on the pending lists and INSN, thus flushing the pending lists. FOR_READ is true if emitting dependencies for a read operation, similarly with FOR_WRITE. */ static void flush_pending_lists (struct deps *deps, rtx insn, int for_read, int for_write) { if (for_write) { add_dependence_list_and_free (insn, &deps->pending_read_insns, REG_DEP_ANTI); free_EXPR_LIST_list (&deps->pending_read_mems); } add_dependence_list_and_free (insn, &deps->pending_write_insns, for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT); free_EXPR_LIST_list (&deps->pending_write_mems); deps->pending_lists_length = 0; add_dependence_list_and_free (insn, &deps->last_pending_memory_flush, for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT); deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX); deps->pending_flush_length = 1; } /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC rtx, X, creating all dependencies generated by the write to the destination of X, and reads of everything mentioned. */ static void sched_analyze_1 (struct deps *deps, rtx x, rtx insn) { int regno; rtx dest = XEXP (x, 0); enum rtx_code code = GET_CODE (x); if (dest == 0) return; if (GET_CODE (dest) == PARALLEL) { int i; for (i = XVECLEN (dest, 0) - 1; i >= 0; i--) if (XEXP (XVECEXP (dest, 0, i), 0) != 0) sched_analyze_1 (deps, gen_rtx_CLOBBER (VOIDmode, XEXP (XVECEXP (dest, 0, i), 0)), insn); if (GET_CODE (x) == SET) sched_analyze_2 (deps, SET_SRC (x), insn); return; } while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT) { if (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT || read_modify_subreg_p (dest)) { /* These both read and modify the result. We must handle them as writes to get proper dependencies for following instructions. We must handle them as reads to get proper dependencies from this to previous instructions. Thus we need to call sched_analyze_2. */ sched_analyze_2 (deps, XEXP (dest, 0), insn); } if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT) { /* The second and third arguments are values read by this insn. */ sched_analyze_2 (deps, XEXP (dest, 1), insn); sched_analyze_2 (deps, XEXP (dest, 2), insn); } dest = XEXP (dest, 0); } if (REG_P (dest)) { regno = REGNO (dest); /* A hard reg in a wide mode may really be multiple registers. If so, mark all of them just like the first. */ if (regno < FIRST_PSEUDO_REGISTER) { int i = hard_regno_nregs[regno][GET_MODE (dest)]; if (code == SET) { while (--i >= 0) SET_REGNO_REG_SET (reg_pending_sets, regno + i); } else { while (--i >= 0) SET_REGNO_REG_SET (reg_pending_clobbers, regno + i); } } /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that it does not reload. Ignore these as they have served their purpose already. */ else if (regno >= deps->max_reg) { if (GET_CODE (PATTERN (insn)) != USE && GET_CODE (PATTERN (insn)) != CLOBBER) abort (); } else { if (code == SET) SET_REGNO_REG_SET (reg_pending_sets, regno); else SET_REGNO_REG_SET (reg_pending_clobbers, regno); /* Pseudos that are REG_EQUIV to something may be replaced by that during reloading. We need only add dependencies for the address in the REG_EQUIV note. */ if (!reload_completed && get_reg_known_equiv_p (regno)) { rtx t = get_reg_known_value (regno); if (MEM_P (t)) sched_analyze_2 (deps, XEXP (t, 0), insn); } /* Don't let it cross a call after scheduling if it doesn't already cross one. */ if (REG_N_CALLS_CROSSED (regno) == 0) add_dependence_list (insn, deps->last_function_call, REG_DEP_ANTI); } } else if (MEM_P (dest)) { /* Writing memory. */ rtx t = dest; if (current_sched_info->use_cselib) { t = shallow_copy_rtx (dest); cselib_lookup (XEXP (t, 0), Pmode, 1); XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0)); } t = canon_rtx (t); if (deps->pending_lists_length > MAX_PENDING_LIST_LENGTH) { /* Flush all pending reads and writes to prevent the pending lists from getting any larger. Insn scheduling runs too slowly when these lists get long. When compiling GCC with itself, this flush occurs 8 times for sparc, and 10 times for m88k using the default value of 32. */ flush_pending_lists (deps, insn, false, true); } else { rtx pending, pending_mem; pending = deps->pending_read_insns; pending_mem = deps->pending_read_mems; while (pending) { if (anti_dependence (XEXP (pending_mem, 0), t)) add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI); pending = XEXP (pending, 1); pending_mem = XEXP (pending_mem, 1); } pending = deps->pending_write_insns; pending_mem = deps->pending_write_mems; while (pending) { if (output_dependence (XEXP (pending_mem, 0), t)) add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT); pending = XEXP (pending, 1); pending_mem = XEXP (pending_mem, 1); } add_dependence_list (insn, deps->last_pending_memory_flush, REG_DEP_ANTI); add_insn_mem_dependence (deps, &deps->pending_write_insns, &deps->pending_write_mems, insn, dest); } sched_analyze_2 (deps, XEXP (dest, 0), insn); } /* Analyze reads. */ if (GET_CODE (x) == SET) sched_analyze_2 (deps, SET_SRC (x), insn); } /* Analyze the uses of memory and registers in rtx X in INSN. */ static void sched_analyze_2 (struct deps *deps, rtx x, rtx insn) { int i; int j; enum rtx_code code; const char *fmt; if (x == 0) return; code = GET_CODE (x); switch (code) { case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR: case SYMBOL_REF: case CONST: case LABEL_REF: /* Ignore constants. Note that we must handle CONST_DOUBLE here because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but this does not mean that this insn is using cc0. */ return; #ifdef HAVE_cc0 case CC0: /* User of CC0 depends on immediately preceding insn. */ set_sched_group_p (insn); /* Don't move CC0 setter to another block (it can set up the same flag for previous CC0 users which is safe). */ CANT_MOVE (prev_nonnote_insn (insn)) = 1; return; #endif case REG: { int regno = REGNO (x); if (regno < FIRST_PSEUDO_REGISTER) { int i = hard_regno_nregs[regno][GET_MODE (x)]; while (--i >= 0) SET_REGNO_REG_SET (reg_pending_uses, regno + i); } /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that it does not reload. Ignore these as they have served their purpose already. */ else if (regno >= deps->max_reg) { if (GET_CODE (PATTERN (insn)) != USE && GET_CODE (PATTERN (insn)) != CLOBBER) abort (); } else { SET_REGNO_REG_SET (reg_pending_uses, regno); /* Pseudos that are REG_EQUIV to something may be replaced by that during reloading. We need only add dependencies for the address in the REG_EQUIV note. */ if (!reload_completed && get_reg_known_equiv_p (regno)) { rtx t = get_reg_known_value (regno); if (MEM_P (t)) sched_analyze_2 (deps, XEXP (t, 0), insn); } /* If the register does not already cross any calls, then add this insn to the sched_before_next_call list so that it will still not cross calls after scheduling. */ if (REG_N_CALLS_CROSSED (regno) == 0) deps->sched_before_next_call = alloc_INSN_LIST (insn, deps->sched_before_next_call); } return; } case MEM: { /* Reading memory. */ rtx u; rtx pending, pending_mem; rtx t = x; if (current_sched_info->use_cselib) { t = shallow_copy_rtx (t); cselib_lookup (XEXP (t, 0), Pmode, 1); XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0)); } t = canon_rtx (t); pending = deps->pending_read_insns; pending_mem = deps->pending_read_mems; while (pending) { if (read_dependence (XEXP (pending_mem, 0), t)) add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI); pending = XEXP (pending, 1); pending_mem = XEXP (pending_mem, 1); } pending = deps->pending_write_insns; pending_mem = deps->pending_write_mems; while (pending) { if (true_dependence (XEXP (pending_mem, 0), VOIDmode, t, rtx_varies_p)) add_dependence (insn, XEXP (pending, 0), 0); pending = XEXP (pending, 1); pending_mem = XEXP (pending_mem, 1); } for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1)) if (GET_CODE (XEXP (u, 0)) != JUMP_INSN || deps_may_trap_p (x)) add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); /* Always add these dependencies to pending_reads, since this insn may be followed by a write. */ add_insn_mem_dependence (deps, &deps->pending_read_insns, &deps->pending_read_mems, insn, x); /* Take advantage of tail recursion here. */ sched_analyze_2 (deps, XEXP (x, 0), insn); return; } /* Force pending stores to memory in case a trap handler needs them. */ case TRAP_IF: flush_pending_lists (deps, insn, true, false); break; case ASM_OPERANDS: case ASM_INPUT: case UNSPEC_VOLATILE: { /* Traditional and volatile asm instructions must be considered to use and clobber all hard registers, all pseudo-registers and all of memory. So must TRAP_IF and UNSPEC_VOLATILE operations. Consider for instance a volatile asm that changes the fpu rounding mode. An insn should not be moved across this even if it only uses pseudo-regs because it might give an incorrectly rounded result. */ if (code != ASM_OPERANDS || MEM_VOLATILE_P (x)) reg_pending_barrier = TRUE_BARRIER; /* For all ASM_OPERANDS, we must traverse the vector of input operands. We can not just fall through here since then we would be confused by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate traditional asms unlike their normal usage. */ if (code == ASM_OPERANDS) { for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++) sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn); return; } break; } case PRE_DEC: case POST_DEC: case PRE_INC: case POST_INC: /* These both read and modify the result. We must handle them as writes to get proper dependencies for following instructions. We must handle them as reads to get proper dependencies from this to previous instructions. Thus we need to pass them to both sched_analyze_1 and sched_analyze_2. We must call sched_analyze_2 first in order to get the proper antecedent for the read. */ sched_analyze_2 (deps, XEXP (x, 0), insn); sched_analyze_1 (deps, x, insn); return; case POST_MODIFY: case PRE_MODIFY: /* op0 = op0 + op1 */ sched_analyze_2 (deps, XEXP (x, 0), insn); sched_analyze_2 (deps, XEXP (x, 1), insn); sched_analyze_1 (deps, x, insn); return; default: break; } /* Other cases: walk the insn. */ fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') sched_analyze_2 (deps, XEXP (x, i), insn); else if (fmt[i] == 'E') for (j = 0; j < XVECLEN (x, i); j++) sched_analyze_2 (deps, XVECEXP (x, i, j), insn); } } /* Analyze an INSN with pattern X to find all dependencies. */ static void sched_analyze_insn (struct deps *deps, rtx x, rtx insn, rtx loop_notes) { RTX_CODE code = GET_CODE (x); rtx link; int i; if (code == COND_EXEC) { sched_analyze_2 (deps, COND_EXEC_TEST (x), insn); /* ??? Should be recording conditions so we reduce the number of false dependencies. */ x = COND_EXEC_CODE (x); code = GET_CODE (x); } if (code == SET || code == CLOBBER) { sched_analyze_1 (deps, x, insn); /* Bare clobber insns are used for letting life analysis, reg-stack and others know that a value is dead. Depend on the last call instruction so that reg-stack won't get confused. */ if (code == CLOBBER) add_dependence_list (insn, deps->last_function_call, REG_DEP_OUTPUT); } else if (code == PARALLEL) { int i; for (i = XVECLEN (x, 0) - 1; i >= 0; i--) { rtx sub = XVECEXP (x, 0, i); code = GET_CODE (sub); if (code == COND_EXEC) { sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn); sub = COND_EXEC_CODE (sub); code = GET_CODE (sub); } if (code == SET || code == CLOBBER) sched_analyze_1 (deps, sub, insn); else sched_analyze_2 (deps, sub, insn); } } else sched_analyze_2 (deps, x, insn); /* Mark registers CLOBBERED or used by called function. */ if (GET_CODE (insn) == CALL_INSN) { for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1)) { if (GET_CODE (XEXP (link, 0)) == CLOBBER) sched_analyze_1 (deps, XEXP (link, 0), insn); else sched_analyze_2 (deps, XEXP (link, 0), insn); } if (find_reg_note (insn, REG_SETJMP, NULL)) reg_pending_barrier = MOVE_BARRIER; } if (GET_CODE (insn) == JUMP_INSN) { rtx next; next = next_nonnote_insn (insn); if (next && GET_CODE (next) == BARRIER) reg_pending_barrier = TRUE_BARRIER; else { rtx pending, pending_mem; regset_head tmp_uses, tmp_sets; INIT_REG_SET (&tmp_uses); INIT_REG_SET (&tmp_sets); (*current_sched_info->compute_jump_reg_dependencies) (insn, &deps->reg_conditional_sets, &tmp_uses, &tmp_sets); /* Make latency of jump equal to 0 by using anti-dependence. */ EXECUTE_IF_SET_IN_REG_SET (&tmp_uses, 0, i, { struct deps_reg *reg_last = &deps->reg_last[i]; add_dependence_list (insn, reg_last->sets, REG_DEP_ANTI); add_dependence_list (insn, reg_last->clobbers, REG_DEP_ANTI); reg_last->uses_length++; reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses); }); IOR_REG_SET (reg_pending_sets, &tmp_sets); CLEAR_REG_SET (&tmp_uses); CLEAR_REG_SET (&tmp_sets); /* All memory writes and volatile reads must happen before the jump. Non-volatile reads must happen before the jump iff the result is needed by the above register used mask. */ pending = deps->pending_write_insns; pending_mem = deps->pending_write_mems; while (pending) { add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT); pending = XEXP (pending, 1); pending_mem = XEXP (pending_mem, 1); } pending = deps->pending_read_insns; pending_mem = deps->pending_read_mems; while (pending) { if (MEM_VOLATILE_P (XEXP (pending_mem, 0))) add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT); pending = XEXP (pending, 1); pending_mem = XEXP (pending_mem, 1); } add_dependence_list (insn, deps->last_pending_memory_flush, REG_DEP_ANTI); } } /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic block, then we must be sure that no instructions are scheduled across it. Otherwise, the reg_n_refs info (which depends on loop_depth) would become incorrect. */ if (loop_notes) { rtx link; /* Update loop_notes with any notes from this insn. Also determine if any of the notes on the list correspond to instruction scheduling barriers (loop, eh & setjmp notes, but not range notes). */ link = loop_notes; while (XEXP (link, 1)) { if (INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_BEG || INTVAL (XEXP (link, 0)) == NOTE_INSN_LOOP_END || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_BEG || INTVAL (XEXP (link, 0)) == NOTE_INSN_EH_REGION_END) reg_pending_barrier = MOVE_BARRIER; link = XEXP (link, 1); } XEXP (link, 1) = REG_NOTES (insn); REG_NOTES (insn) = loop_notes; } /* If this instruction can throw an exception, then moving it changes where block boundaries fall. This is mighty confusing elsewhere. Therefore, prevent such an instruction from being moved. */ if (can_throw_internal (insn)) reg_pending_barrier = MOVE_BARRIER; /* Add dependencies if a scheduling barrier was found. */ if (reg_pending_barrier) { /* In the case of barrier the most added dependencies are not real, so we use anti-dependence here. */ if (GET_CODE (PATTERN (insn)) == COND_EXEC) { EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, { struct deps_reg *reg_last = &deps->reg_last[i]; add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI); add_dependence_list (insn, reg_last->sets, reg_pending_barrier == TRUE_BARRIER ? 0 : REG_DEP_ANTI); add_dependence_list (insn, reg_last->clobbers, reg_pending_barrier == TRUE_BARRIER ? 0 : REG_DEP_ANTI); }); } else { EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, { struct deps_reg *reg_last = &deps->reg_last[i]; add_dependence_list_and_free (insn, ®_last->uses, REG_DEP_ANTI); add_dependence_list_and_free (insn, ®_last->sets, reg_pending_barrier == TRUE_BARRIER ? 0 : REG_DEP_ANTI); add_dependence_list_and_free (insn, ®_last->clobbers, reg_pending_barrier == TRUE_BARRIER ? 0 : REG_DEP_ANTI); reg_last->uses_length = 0; reg_last->clobbers_length = 0; }); } for (i = 0; i < deps->max_reg; i++) { struct deps_reg *reg_last = &deps->reg_last[i]; reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets); SET_REGNO_REG_SET (&deps->reg_last_in_use, i); } flush_pending_lists (deps, insn, true, true); CLEAR_REG_SET (&deps->reg_conditional_sets); reg_pending_barrier = NOT_A_BARRIER; } else { /* If the current insn is conditional, we can't free any of the lists. */ if (GET_CODE (PATTERN (insn)) == COND_EXEC) { EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, { struct deps_reg *reg_last = &deps->reg_last[i]; add_dependence_list (insn, reg_last->sets, 0); add_dependence_list (insn, reg_last->clobbers, 0); reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses); reg_last->uses_length++; }); EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, { struct deps_reg *reg_last = &deps->reg_last[i]; add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT); add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI); reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers); reg_last->clobbers_length++; }); EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, { struct deps_reg *reg_last = &deps->reg_last[i]; add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT); add_dependence_list (insn, reg_last->clobbers, REG_DEP_OUTPUT); add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI); reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets); SET_REGNO_REG_SET (&deps->reg_conditional_sets, i); }); } else { EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, { struct deps_reg *reg_last = &deps->reg_last[i]; add_dependence_list (insn, reg_last->sets, 0); add_dependence_list (insn, reg_last->clobbers, 0); reg_last->uses_length++; reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses); }); EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, { struct deps_reg *reg_last = &deps->reg_last[i]; if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH) { add_dependence_list_and_free (insn, ®_last->sets, REG_DEP_OUTPUT); add_dependence_list_and_free (insn, ®_last->uses, REG_DEP_ANTI); add_dependence_list_and_free (insn, ®_last->clobbers, REG_DEP_OUTPUT); reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets); reg_last->clobbers_length = 0; reg_last->uses_length = 0; } else { add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT); add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI); } reg_last->clobbers_length++; reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers); }); EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, { struct deps_reg *reg_last = &deps->reg_last[i]; add_dependence_list_and_free (insn, ®_last->sets, REG_DEP_OUTPUT); add_dependence_list_and_free (insn, ®_last->clobbers, REG_DEP_OUTPUT); add_dependence_list_and_free (insn, ®_last->uses, REG_DEP_ANTI); reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets); reg_last->uses_length = 0; reg_last->clobbers_length = 0; CLEAR_REGNO_REG_SET (&deps->reg_conditional_sets, i); }); } IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses); IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers); IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets); } CLEAR_REG_SET (reg_pending_uses); CLEAR_REG_SET (reg_pending_clobbers); CLEAR_REG_SET (reg_pending_sets); /* If we are currently in a libcall scheduling group, then mark the current insn as being in a scheduling group and that it can not be moved into a different basic block. */ if (deps->libcall_block_tail_insn) { set_sched_group_p (insn); CANT_MOVE (insn) = 1; } /* If a post-call group is still open, see if it should remain so. This insn must be a simple move of a hard reg to a pseudo or vice-versa. We must avoid moving these insns for correctness on SMALL_REGISTER_CLASS machines, and for special registers like PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all hard regs for all targets. */ if (deps->in_post_call_group_p) { rtx tmp, set = single_set (insn); int src_regno, dest_regno; if (set == NULL) goto end_call_group; tmp = SET_DEST (set); if (GET_CODE (tmp) == SUBREG) tmp = SUBREG_REG (tmp); if (REG_P (tmp)) dest_regno = REGNO (tmp); else goto end_call_group; tmp = SET_SRC (set); if (GET_CODE (tmp) == SUBREG) tmp = SUBREG_REG (tmp); if ((GET_CODE (tmp) == PLUS || GET_CODE (tmp) == MINUS) && REG_P (XEXP (tmp, 0)) && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM && dest_regno == STACK_POINTER_REGNUM) src_regno = STACK_POINTER_REGNUM; else if (REG_P (tmp)) src_regno = REGNO (tmp); else goto end_call_group; if (src_regno < FIRST_PSEUDO_REGISTER || dest_regno < FIRST_PSEUDO_REGISTER) { /* If we are inside a post-call group right at the start of the scheduling region, we must not add a dependency. */ if (deps->in_post_call_group_p == post_call_initial) { SCHED_GROUP_P (insn) = 1; deps->in_post_call_group_p = post_call; } else set_sched_group_p (insn); CANT_MOVE (insn) = 1; } else { end_call_group: deps->in_post_call_group_p = not_post_call; } } } /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS for every dependency. */ void sched_analyze (struct deps *deps, rtx head, rtx tail) { rtx insn; rtx loop_notes = 0; if (current_sched_info->use_cselib) cselib_init (true); /* Before reload, if the previous block ended in a call, show that we are inside a post-call group, so as to keep the lifetimes of hard registers correct. */ if (! reload_completed && GET_CODE (head) != CODE_LABEL) { insn = prev_nonnote_insn (head); if (insn && GET_CODE (insn) == CALL_INSN) deps->in_post_call_group_p = post_call_initial; } for (insn = head;; insn = NEXT_INSN (insn)) { rtx link, end_seq, r0, set; if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN) { /* Clear out the stale LOG_LINKS from flow. */ free_INSN_LIST_list (&LOG_LINKS (insn)); /* Make each JUMP_INSN a scheduling barrier for memory references. */ if (GET_CODE (insn) == JUMP_INSN) { /* Keep the list a reasonable size. */ if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH) flush_pending_lists (deps, insn, true, true); else deps->last_pending_memory_flush = alloc_INSN_LIST (insn, deps->last_pending_memory_flush); } sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes); loop_notes = 0; } else if (GET_CODE (insn) == CALL_INSN) { int i; CANT_MOVE (insn) = 1; /* Clear out the stale LOG_LINKS from flow. */ free_INSN_LIST_list (&LOG_LINKS (insn)); if (find_reg_note (insn, REG_SETJMP, NULL)) { /* This is setjmp. Assume that all registers, not just hard registers, may be clobbered by this call. */ reg_pending_barrier = MOVE_BARRIER; } else { for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) /* A call may read and modify global register variables. */ if (global_regs[i]) { SET_REGNO_REG_SET (reg_pending_sets, i); SET_REGNO_REG_SET (reg_pending_uses, i); } /* Other call-clobbered hard regs may be clobbered. Since we only have a choice between 'might be clobbered' and 'definitely not clobbered', we must include all partly call-clobbered registers here. */ else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i]) || TEST_HARD_REG_BIT (regs_invalidated_by_call, i)) SET_REGNO_REG_SET (reg_pending_clobbers, i); /* We don't know what set of fixed registers might be used by the function, but it is certain that the stack pointer is among them, but be conservative. */ else if (fixed_regs[i]) SET_REGNO_REG_SET (reg_pending_uses, i); /* The frame pointer is normally not used by the function itself, but by the debugger. */ /* ??? MIPS o32 is an exception. It uses the frame pointer in the macro expansion of jal but does not represent this fact in the call_insn rtl. */ else if (i == FRAME_POINTER_REGNUM || (i == HARD_FRAME_POINTER_REGNUM && (! reload_completed || frame_pointer_needed))) SET_REGNO_REG_SET (reg_pending_uses, i); } /* For each insn which shouldn't cross a call, add a dependence between that insn and this call insn. */ add_dependence_list_and_free (insn, &deps->sched_before_next_call, REG_DEP_ANTI); sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes); loop_notes = 0; /* In the absence of interprocedural alias analysis, we must flush all pending reads and writes, and start new dependencies starting from here. But only flush writes for constant calls (which may be passed a pointer to something we haven't written yet). */ flush_pending_lists (deps, insn, true, !CONST_OR_PURE_CALL_P (insn)); /* Remember the last function call for limiting lifetimes. */ free_INSN_LIST_list (&deps->last_function_call); deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX); /* Before reload, begin a post-call group, so as to keep the lifetimes of hard registers correct. */ if (! reload_completed) deps->in_post_call_group_p = post_call; } /* See comments on reemit_notes as to why we do this. ??? Actually, the reemit_notes just say what is done, not why. */ if (GET_CODE (insn) == NOTE && (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END)) { rtx rtx_region; if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_BEG || NOTE_LINE_NUMBER (insn) == NOTE_INSN_EH_REGION_END) rtx_region = GEN_INT (NOTE_EH_HANDLER (insn)); else rtx_region = const0_rtx; loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE, rtx_region, loop_notes); loop_notes = alloc_EXPR_LIST (REG_SAVE_NOTE, GEN_INT (NOTE_LINE_NUMBER (insn)), loop_notes); CONST_OR_PURE_CALL_P (loop_notes) = CONST_OR_PURE_CALL_P (insn); } if (current_sched_info->use_cselib) cselib_process_insn (insn); /* Now that we have completed handling INSN, check and see if it is a CLOBBER beginning a libcall block. If it is, record the end of the libcall sequence. We want to schedule libcall blocks as a unit before reload. While this restricts scheduling, it preserves the meaning of a libcall block. As a side effect, we may get better code due to decreased register pressure as well as less chance of a foreign insn appearing in a libcall block. */ if (!reload_completed /* Note we may have nested libcall sequences. We only care about the outermost libcall sequence. */ && deps->libcall_block_tail_insn == 0 /* The sequence must start with a clobber of a register. */ && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == CLOBBER && (r0 = XEXP (PATTERN (insn), 0), REG_P (r0)) && REG_P (XEXP (PATTERN (insn), 0)) /* The CLOBBER must also have a REG_LIBCALL note attached. */ && (link = find_reg_note (insn, REG_LIBCALL, NULL_RTX)) != 0 && (end_seq = XEXP (link, 0)) != 0 /* The insn referenced by the REG_LIBCALL note must be a simple nop copy with the same destination as the register mentioned in the clobber. */ && (set = single_set (end_seq)) != 0 && SET_DEST (set) == r0 && SET_SRC (set) == r0 /* And finally the insn referenced by the REG_LIBCALL must also contain a REG_EQUAL note and a REG_RETVAL note. */ && find_reg_note (end_seq, REG_EQUAL, NULL_RTX) != 0 && find_reg_note (end_seq, REG_RETVAL, NULL_RTX) != 0) deps->libcall_block_tail_insn = XEXP (link, 0); /* If we have reached the end of a libcall block, then close the block. */ if (deps->libcall_block_tail_insn == insn) deps->libcall_block_tail_insn = 0; if (insn == tail) { if (current_sched_info->use_cselib) cselib_finish (); return; } } abort (); } /* The following function adds forward dependence (FROM, TO) with given DEP_TYPE. The forward dependence should be not exist before. */ void add_forward_dependence (rtx from, rtx to, enum reg_note dep_type) { rtx new_link; #ifdef ENABLE_CHECKING /* If add_dependence is working properly there should never be notes, deleted insns or duplicates in the backward links. Thus we need not check for them here. However, if we have enabled checking we might as well go ahead and verify that add_dependence worked properly. */ if (GET_CODE (from) == NOTE || INSN_DELETED_P (from) || (forward_dependency_cache != NULL && bitmap_bit_p (&forward_dependency_cache[SCHED_INSN_LUID (from)], SCHED_INSN_LUID (to))) || (forward_dependency_cache == NULL && find_insn_list (to, INSN_DEPEND (from)))) abort (); if (forward_dependency_cache != NULL) bitmap_bit_p (&forward_dependency_cache[SCHED_INSN_LUID (from)], SCHED_INSN_LUID (to)); #endif new_link = alloc_INSN_LIST (to, INSN_DEPEND (from)); PUT_REG_NOTE_KIND (new_link, dep_type); INSN_DEPEND (from) = new_link; INSN_DEP_COUNT (to) += 1; } /* Examine insns in the range [ HEAD, TAIL ] and Use the backward dependences from LOG_LINKS to build forward dependences in INSN_DEPEND. */ void compute_forward_dependences (rtx head, rtx tail) { rtx insn, link; rtx next_tail; next_tail = NEXT_INSN (tail); for (insn = head; insn != next_tail; insn = NEXT_INSN (insn)) { if (! INSN_P (insn)) continue; for (link = LOG_LINKS (insn); link; link = XEXP (link, 1)) add_forward_dependence (XEXP (link, 0), insn, REG_NOTE_KIND (link)); } } /* Initialize variables for region data dependence analysis. n_bbs is the number of region blocks. */ void init_deps (struct deps *deps) { int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ()); deps->max_reg = max_reg; deps->reg_last = xcalloc (max_reg, sizeof (struct deps_reg)); INIT_REG_SET (&deps->reg_last_in_use); INIT_REG_SET (&deps->reg_conditional_sets); deps->pending_read_insns = 0; deps->pending_read_mems = 0; deps->pending_write_insns = 0; deps->pending_write_mems = 0; deps->pending_lists_length = 0; deps->pending_flush_length = 0; deps->last_pending_memory_flush = 0; deps->last_function_call = 0; deps->sched_before_next_call = 0; deps->in_post_call_group_p = not_post_call; deps->libcall_block_tail_insn = 0; } /* Free insn lists found in DEPS. */ void free_deps (struct deps *deps) { int i; free_INSN_LIST_list (&deps->pending_read_insns); free_EXPR_LIST_list (&deps->pending_read_mems); free_INSN_LIST_list (&deps->pending_write_insns); free_EXPR_LIST_list (&deps->pending_write_mems); free_INSN_LIST_list (&deps->last_pending_memory_flush); /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions times. For a testcase with 42000 regs and 8000 small basic blocks, this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */ EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, { struct deps_reg *reg_last = &deps->reg_last[i]; if (reg_last->uses) free_INSN_LIST_list (®_last->uses); if (reg_last->sets) free_INSN_LIST_list (®_last->sets); if (reg_last->clobbers) free_INSN_LIST_list (®_last->clobbers); }); CLEAR_REG_SET (&deps->reg_last_in_use); CLEAR_REG_SET (&deps->reg_conditional_sets); free (deps->reg_last); } /* If it is profitable to use them, initialize caches for tracking dependency information. LUID is the number of insns to be scheduled, it is used in the estimate of profitability. */ void init_dependency_caches (int luid) { /* ?!? We could save some memory by computing a per-region luid mapping which could reduce both the number of vectors in the cache and the size of each vector. Instead we just avoid the cache entirely unless the average number of instructions in a basic block is very high. See the comment before the declaration of true_dependency_cache for what we consider "very high". */ if (luid / n_basic_blocks > 100 * 5) { int i; true_dependency_cache = xmalloc (luid * sizeof (bitmap_head)); anti_dependency_cache = xmalloc (luid * sizeof (bitmap_head)); output_dependency_cache = xmalloc (luid * sizeof (bitmap_head)); #ifdef ENABLE_CHECKING forward_dependency_cache = xmalloc (luid * sizeof (bitmap_head)); #endif for (i = 0; i < luid; i++) { bitmap_initialize (&true_dependency_cache[i], 0); bitmap_initialize (&anti_dependency_cache[i], 0); bitmap_initialize (&output_dependency_cache[i], 0); #ifdef ENABLE_CHECKING bitmap_initialize (&forward_dependency_cache[i], 0); #endif } cache_size = luid; } } /* Free the caches allocated in init_dependency_caches. */ void free_dependency_caches (void) { if (true_dependency_cache) { int i; for (i = 0; i < cache_size; i++) { bitmap_clear (&true_dependency_cache[i]); bitmap_clear (&anti_dependency_cache[i]); bitmap_clear (&output_dependency_cache[i]); #ifdef ENABLE_CHECKING bitmap_clear (&forward_dependency_cache[i]); #endif } free (true_dependency_cache); true_dependency_cache = NULL; free (anti_dependency_cache); anti_dependency_cache = NULL; free (output_dependency_cache); output_dependency_cache = NULL; #ifdef ENABLE_CHECKING free (forward_dependency_cache); forward_dependency_cache = NULL; #endif } } /* Initialize some global variables needed by the dependency analysis code. */ void init_deps_global (void) { reg_pending_sets = INITIALIZE_REG_SET (reg_pending_sets_head); reg_pending_clobbers = INITIALIZE_REG_SET (reg_pending_clobbers_head); reg_pending_uses = INITIALIZE_REG_SET (reg_pending_uses_head); reg_pending_barrier = NOT_A_BARRIER; } /* Free everything used by the dependency analysis code. */ void finish_deps_global (void) { FREE_REG_SET (reg_pending_sets); FREE_REG_SET (reg_pending_clobbers); FREE_REG_SET (reg_pending_uses); } /* Instruction scheduling pass. Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by, and currently maintained by, Jim Wilson (wilson@cygnus.com) This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* The number of insns to be scheduled in total. */ static int target_n_insns; /* The number of insns scheduled so far. */ static int sched_n_insns; /* Implementations of the sched_info functions for region scheduling. */ static void init_ready_list_ebb (struct ready_list *); static int can_schedule_ready_p_ebb (rtx); static int new_ready_ebb (rtx); static int schedule_more_p_ebb (void); static const char *ebb_print_insn (rtx, int); static int rank (rtx, rtx); static int contributes_to_priority_ebb (rtx, rtx); static void compute_jump_reg_dependencies_ebb (rtx, regset, regset, regset); static basic_block earliest_block_with_similiar_load (basic_block, rtx); static void add_deps_for_risky_insns (rtx, rtx); static basic_block schedule_ebb (rtx, rtx); static basic_block fix_basic_block_boundaries (basic_block, basic_block, rtx, rtx); static void add_missing_bbs (rtx, basic_block, basic_block); /* Return nonzero if there are more insns that should be scheduled. */ static int schedule_more_p_ebb (void) { return sched_n_insns < target_n_insns; } /* Add all insns that are initially ready to the ready list READY. Called once before scheduling a set of insns. */ static void init_ready_list_ebb (struct ready_list *ready) { rtx prev_head = current_sched_info->prev_head; rtx next_tail = current_sched_info->next_tail; rtx insn; target_n_insns = 0; sched_n_insns = 0; #if 0 /* Print debugging information. */ if (sched_verbose >= 5) debug_dependencies (); #endif /* Initialize ready list with all 'ready' insns in target block. Count number of insns in the target block being scheduled. */ for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn)) { if (INSN_DEP_COUNT (insn) == 0) ready_add (ready, insn); target_n_insns++; } } /* Called after taking INSN from the ready list. Returns nonzero if this insn can be scheduled, nonzero if we should silently discard it. */ static int can_schedule_ready_p_ebb (rtx insn ATTRIBUTE_UNUSED) { sched_n_insns++; return 1; } /* Called after INSN has all its dependencies resolved. Return nonzero if it should be moved to the ready list or the queue, or zero if we should silently discard it. */ static int new_ready_ebb (rtx next ATTRIBUTE_UNUSED) { return 1; } /* Return a string that contains the insn uid and optionally anything else necessary to identify this insn in an output. It's valid to use a static buffer for this. The ALIGNED parameter should cause the string to be formatted so that multiple output lines will line up nicely. */ static const char * ebb_print_insn (rtx insn, int aligned ATTRIBUTE_UNUSED) { static char tmp[80]; sprintf (tmp, "%4d", INSN_UID (insn)); return tmp; } /* Compare priority of two insns. Return a positive number if the second insn is to be preferred for scheduling, and a negative one if the first is to be preferred. Zero if they are equally good. */ static int rank (rtx insn1, rtx insn2) { basic_block bb1 = BLOCK_FOR_INSN (insn1); basic_block bb2 = BLOCK_FOR_INSN (insn2); if (bb1->count > bb2->count || bb1->frequency > bb2->frequency) return -1; if (bb1->count < bb2->count || bb1->frequency < bb2->frequency) return 1; return 0; } /* NEXT is an instruction that depends on INSN (a backward dependence); return nonzero if we should include this dependence in priority calculations. */ static int contributes_to_priority_ebb (rtx next ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED) { return 1; } /* INSN is a JUMP_INSN, COND_SET is the set of registers that are conditionally set before INSN. Store the set of registers that must be considered as used by this jump in USED and that of registers that must be considered as set in SET. */ static void compute_jump_reg_dependencies_ebb (rtx insn, regset cond_set, regset used, regset set) { basic_block b = BLOCK_FOR_INSN (insn); edge e; for (e = b->succ; e; e = e->succ_next) if (e->flags & EDGE_FALLTHRU) /* The jump may be a by-product of a branch that has been merged in the main codepath after being conditionalized. Therefore it may guard the fallthrough block from using a value that has conditionally overwritten that of the main codepath. So we consider that it restores the value of the main codepath. */ bitmap_operation (set, e->dest->global_live_at_start, cond_set, BITMAP_AND); else bitmap_operation (used, used, e->dest->global_live_at_start, BITMAP_IOR); } /* Used in schedule_insns to initialize current_sched_info for scheduling regions (or single basic blocks). */ static struct sched_info ebb_sched_info = { init_ready_list_ebb, can_schedule_ready_p_ebb, schedule_more_p_ebb, new_ready_ebb, rank, ebb_print_insn, contributes_to_priority_ebb, compute_jump_reg_dependencies_ebb, NULL, NULL, NULL, NULL, 0, 1, 0 }; /* It is possible that ebb scheduling eliminated some blocks. Place blocks from FIRST to LAST before BEFORE. */ static void add_missing_bbs (rtx before, basic_block first, basic_block last) { for (; last != first->prev_bb; last = last->prev_bb) { before = emit_note_before (NOTE_INSN_BASIC_BLOCK, before); NOTE_BASIC_BLOCK (before) = last; BB_HEAD (last) = before; BB_END (last) = before; update_bb_for_insn (last); } } /* Fixup the CFG after EBB scheduling. Re-recognize the basic block boundaries in between HEAD and TAIL and update basic block structures between BB and LAST. */ static basic_block fix_basic_block_boundaries (basic_block bb, basic_block last, rtx head, rtx tail) { rtx insn = head; rtx last_inside = BB_HEAD (bb); rtx aftertail = NEXT_INSN (tail); head = BB_HEAD (bb); for (; insn != aftertail; insn = NEXT_INSN (insn)) { if (GET_CODE (insn) == CODE_LABEL) abort (); /* Create new basic blocks just before first insn. */ if (inside_basic_block_p (insn)) { if (!last_inside) { rtx note; /* Re-emit the basic block note for newly found BB header. */ if (GET_CODE (insn) == CODE_LABEL) { note = emit_note_after (NOTE_INSN_BASIC_BLOCK, insn); head = insn; last_inside = note; } else { note = emit_note_before (NOTE_INSN_BASIC_BLOCK, insn); head = note; last_inside = insn; } } else last_inside = insn; } /* Control flow instruction terminate basic block. It is possible that we've eliminated some basic blocks (made them empty). Find the proper basic block using BLOCK_FOR_INSN and arrange things in a sensible way by inserting empty basic blocks as needed. */ if (control_flow_insn_p (insn) || (insn == tail && last_inside)) { basic_block curr_bb = BLOCK_FOR_INSN (insn); rtx note; if (!control_flow_insn_p (insn)) curr_bb = last; if (bb == last->next_bb) { edge f; rtx h; /* An obscure special case, where we do have partially dead instruction scheduled after last control flow instruction. In this case we can create new basic block. It is always exactly one basic block last in the sequence. Handle it by splitting the edge and repositioning the block. This is somewhat hackish, but at least avoid cut&paste A safer solution can be to bring the code into sequence, do the split and re-emit it back in case this will ever trigger problem. */ f = bb->prev_bb->succ; while (f && !(f->flags & EDGE_FALLTHRU)) f = f->succ_next; if (f) { last = curr_bb = split_edge (f); h = BB_HEAD (curr_bb); BB_HEAD (curr_bb) = head; BB_END (curr_bb) = insn; /* Edge splitting created misplaced BASIC_BLOCK note, kill it. */ delete_insn (h); } /* It may happen that code got moved past unconditional jump in case the code is completely dead. Kill it. */ else { rtx next = next_nonnote_insn (insn); delete_insn_chain (head, insn); /* We keep some notes in the way that may split barrier from the jump. */ if (GET_CODE (next) == BARRIER) { emit_barrier_after (prev_nonnote_insn (head)); delete_insn (next); } insn = NULL; } } else { BB_HEAD (curr_bb) = head; BB_END (curr_bb) = insn; add_missing_bbs (BB_HEAD (curr_bb), bb, curr_bb->prev_bb); } note = GET_CODE (head) == CODE_LABEL ? NEXT_INSN (head) : head; NOTE_BASIC_BLOCK (note) = curr_bb; update_bb_for_insn (curr_bb); bb = curr_bb->next_bb; last_inside = NULL; if (!insn) break; } } add_missing_bbs (BB_HEAD (last->next_bb), bb, last); return bb->prev_bb; } /* Returns the earliest block in EBB currently being processed where a "similar load" 'insn2' is found, and hence LOAD_INSN can move speculatively into the found block. All the following must hold: (1) both loads have 1 base register (PFREE_CANDIDATEs). (2) load_insn and load2 have a def-use dependence upon the same insn 'insn1'. From all these we can conclude that the two loads access memory addresses that differ at most by a constant, and hence if moving load_insn would cause an exception, it would have been caused by load2 anyhow. The function uses list (given by LAST_BLOCK) of already processed blocks in EBB. The list is formed in `add_deps_for_risky_insns'. */ static basic_block earliest_block_with_similiar_load (basic_block last_block, rtx load_insn) { rtx back_link; basic_block bb, earliest_block = NULL; for (back_link = LOG_LINKS (load_insn); back_link; back_link = XEXP (back_link, 1)) { rtx insn1 = XEXP (back_link, 0); if (GET_MODE (back_link) == VOIDmode) { /* Found a DEF-USE dependence (insn1, load_insn). */ rtx fore_link; for (fore_link = INSN_DEPEND (insn1); fore_link; fore_link = XEXP (fore_link, 1)) { rtx insn2 = XEXP (fore_link, 0); basic_block insn2_block = BLOCK_FOR_INSN (insn2); if (GET_MODE (fore_link) == VOIDmode) { if (earliest_block != NULL && earliest_block->index < insn2_block->index) continue; /* Found a DEF-USE dependence (insn1, insn2). */ if (haifa_classify_insn (insn2) != PFREE_CANDIDATE) /* insn2 not guaranteed to be a 1 base reg load. */ continue; for (bb = last_block; bb; bb = bb->aux) if (insn2_block == bb) break; if (!bb) /* insn2 is the similar load. */ earliest_block = insn2_block; } } } } return earliest_block; } /* The following function adds dependencies between jumps and risky insns in given ebb. */ static void add_deps_for_risky_insns (rtx head, rtx tail) { rtx insn, prev; int class; rtx last_jump = NULL_RTX; rtx next_tail = NEXT_INSN (tail); basic_block last_block = NULL, bb; for (insn = head; insn != next_tail; insn = NEXT_INSN (insn)) if (GET_CODE (insn) == JUMP_INSN) { bb = BLOCK_FOR_INSN (insn); bb->aux = last_block; last_block = bb; last_jump = insn; } else if (INSN_P (insn) && last_jump != NULL_RTX) { class = haifa_classify_insn (insn); prev = last_jump; switch (class) { case PFREE_CANDIDATE: if (flag_schedule_speculative_load) { bb = earliest_block_with_similiar_load (last_block, insn); if (bb) { bb = bb->aux; if (!bb) break; prev = BB_END (bb); } } /* Fall through. */ case TRAP_RISKY: case IRISKY: case PRISKY_CANDIDATE: /* ??? We could implement better checking PRISKY_CANDIDATEs analogous to sched-rgn.c. */ /* We can not change the mode of the backward dependency because REG_DEP_ANTI has the lowest rank. */ if (add_dependence (insn, prev, REG_DEP_ANTI)) add_forward_dependence (prev, insn, REG_DEP_ANTI); break; default: break; } } /* Maintain the invariant that bb->aux is clear after use. */ while (last_block) { bb = last_block->aux; last_block->aux = NULL; last_block = bb; } } /* Schedule a single extended basic block, defined by the boundaries HEAD and TAIL. */ static basic_block schedule_ebb (rtx head, rtx tail) { int n_insns; basic_block b; struct deps tmp_deps; basic_block first_bb = BLOCK_FOR_INSN (head); basic_block last_bb = BLOCK_FOR_INSN (tail); if (no_real_insns_p (head, tail)) return BLOCK_FOR_INSN (tail); init_deps_global (); /* Compute LOG_LINKS. */ init_deps (&tmp_deps); sched_analyze (&tmp_deps, head, tail); free_deps (&tmp_deps); /* Compute INSN_DEPEND. */ compute_forward_dependences (head, tail); add_deps_for_risky_insns (head, tail); if (targetm.sched.dependencies_evaluation_hook) targetm.sched.dependencies_evaluation_hook (head, tail); /* Set priorities. */ n_insns = set_priorities (head, tail); current_sched_info->prev_head = PREV_INSN (head); current_sched_info->next_tail = NEXT_INSN (tail); if (write_symbols != NO_DEBUG) { save_line_notes (first_bb->index, head, tail); rm_line_notes (head, tail); } /* rm_other_notes only removes notes which are _inside_ the block---that is, it won't remove notes before the first real insn or after the last real insn of the block. So if the first insn has a REG_SAVE_NOTE which would otherwise be emitted before the insn, it is redundant with the note before the start of the block, and so we have to take it out. */ if (INSN_P (head)) { rtx note; for (note = REG_NOTES (head); note; note = XEXP (note, 1)) if (REG_NOTE_KIND (note) == REG_SAVE_NOTE) { remove_note (head, note); note = XEXP (note, 1); remove_note (head, note); } } /* Remove remaining note insns from the block, save them in note_list. These notes are restored at the end of schedule_block (). */ rm_other_notes (head, tail); current_sched_info->queue_must_finish_empty = 1; schedule_block (-1, n_insns); /* Sanity check: verify that all region insns were scheduled. */ if (sched_n_insns != n_insns) abort (); head = current_sched_info->head; tail = current_sched_info->tail; if (write_symbols != NO_DEBUG) restore_line_notes (head, tail); b = fix_basic_block_boundaries (first_bb, last_bb, head, tail); finish_deps_global (); return b; } /* The one entry point in this file. DUMP_FILE is the dump file for this pass. */ void schedule_ebbs (FILE *dump_file) { basic_block bb; int probability_cutoff; if (profile_info && flag_branch_probabilities) probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK); else probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY); probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff; /* Taking care of this degenerate case makes the rest of this code simpler. */ if (n_basic_blocks == 0) return; sched_init (dump_file); current_sched_info = &ebb_sched_info; compute_bb_for_insn (); /* Schedule every region in the subroutine. */ FOR_EACH_BB (bb) { rtx head = BB_HEAD (bb); rtx tail; for (;;) { edge e; tail = BB_END (bb); if (bb->next_bb == EXIT_BLOCK_PTR || GET_CODE (BB_HEAD (bb->next_bb)) == CODE_LABEL) break; for (e = bb->succ; e; e = e->succ_next) if ((e->flags & EDGE_FALLTHRU) != 0) break; if (! e) break; if (e->probability <= probability_cutoff) break; bb = bb->next_bb; } /* Blah. We should fix the rest of the code not to get confused by a note or two. */ while (head != tail) { if (GET_CODE (head) == NOTE) head = NEXT_INSN (head); else if (GET_CODE (tail) == NOTE) tail = PREV_INSN (tail); else if (GET_CODE (head) == CODE_LABEL) head = NEXT_INSN (head); else break; } bb = schedule_ebb (head, tail); } /* Updating life info can be done by local propagation over the modified superblocks. */ /* Reposition the prologue and epilogue notes in case we moved the prologue/epilogue insns. */ if (reload_completed) reposition_prologue_and_epilogue_notes (get_insns ()); if (write_symbols != NO_DEBUG) rm_redundant_line_notes (); sched_finish (); } /* Instruction scheduling pass. Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by, and currently maintained by, Jim Wilson (wilson@cygnus.com) This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This pass implements list scheduling within basic blocks. It is run twice: (1) after flow analysis, but before register allocation, and (2) after register allocation. The first run performs interblock scheduling, moving insns between different blocks in the same "region", and the second runs only basic block scheduling. Interblock motions performed are useful motions and speculative motions, including speculative loads. Motions requiring code duplication are not supported. The identification of motion type and the check for validity of speculative motions requires construction and analysis of the function's control flow graph. The main entry point for this pass is schedule_insns(), called for each function. The work of the scheduler is organized in three levels: (1) function level: insns are subject to splitting, control-flow-graph is constructed, regions are computed (after reload, each region is of one block), (2) region level: control flow graph attributes required for interblock scheduling are computed (dominators, reachability, etc.), data dependences and priorities are computed, and (3) block level: insns in the block are actually scheduled. */ /* Define when we want to do count REG_DEAD notes before and after scheduling for sanity checking. We can't do that when conditional execution is used, as REG_DEAD exist only for unconditional deaths. */ #if !defined (HAVE_conditional_execution) && defined (ENABLE_CHECKING) #define CHECK_DEAD_NOTES 1 #else #define CHECK_DEAD_NOTES 0 #endif #ifdef INSN_SCHEDULING /* Some accessor macros for h_i_d members only used within this file. */ #define INSN_REF_COUNT(INSN) (h_i_d[INSN_UID (INSN)].ref_count) #define FED_BY_SPEC_LOAD(insn) (h_i_d[INSN_UID (insn)].fed_by_spec_load) #define IS_LOAD_INSN(insn) (h_i_d[INSN_UID (insn)].is_load_insn) /* nr_inter/spec counts interblock/speculative motion for the function. */ static int nr_inter, nr_spec; /* Control flow graph edges are kept in circular lists. */ typedef struct { int from_block; int to_block; int next_in; int next_out; } haifa_edge; static haifa_edge *edge_table; #define NEXT_IN(edge) (edge_table[edge].next_in) #define NEXT_OUT(edge) (edge_table[edge].next_out) #define FROM_BLOCK(edge) (edge_table[edge].from_block) #define TO_BLOCK(edge) (edge_table[edge].to_block) /* Number of edges in the control flow graph. (In fact, larger than that by 1, since edge 0 is unused.) */ static int nr_edges; /* Circular list of incoming/outgoing edges of a block. */ static int *in_edges; static int *out_edges; #define IN_EDGES(block) (in_edges[block]) #define OUT_EDGES(block) (out_edges[block]) static int is_cfg_nonregular (void); static int build_control_flow (struct edge_list *); static void new_edge (int, int); /* A region is the main entity for interblock scheduling: insns are allowed to move between blocks in the same region, along control flow graph edges, in the 'up' direction. */ typedef struct { int rgn_nr_blocks; /* Number of blocks in region. */ int rgn_blocks; /* cblocks in the region (actually index in rgn_bb_table). */ } region; /* Number of regions in the procedure. */ static int nr_regions; /* Table of region descriptions. */ static region *rgn_table; /* Array of lists of regions' blocks. */ static int *rgn_bb_table; /* Topological order of blocks in the region (if b2 is reachable from b1, block_to_bb[b2] > block_to_bb[b1]). Note: A basic block is always referred to by either block or b, while its topological order name (in the region) is referred to by bb. */ static int *block_to_bb; /* The number of the region containing a block. */ static int *containing_rgn; #define RGN_NR_BLOCKS(rgn) (rgn_table[rgn].rgn_nr_blocks) #define RGN_BLOCKS(rgn) (rgn_table[rgn].rgn_blocks) #define BLOCK_TO_BB(block) (block_to_bb[block]) #define CONTAINING_RGN(block) (containing_rgn[block]) void debug_regions (void); static void find_single_block_region (void); static void find_rgns (struct edge_list *); static bool too_large (int, int *, int *); extern void debug_live (int, int); /* Blocks of the current region being scheduled. */ static int current_nr_blocks; static int current_blocks; /* The mapping from bb to block. */ #define BB_TO_BLOCK(bb) (rgn_bb_table[current_blocks + (bb)]) typedef struct { int *first_member; /* Pointer to the list start in bitlst_table. */ int nr_members; /* The number of members of the bit list. */ } bitlst; static int bitlst_table_last; static int *bitlst_table; static void extract_bitlst (sbitmap, bitlst *); /* Target info declarations. The block currently being scheduled is referred to as the "target" block, while other blocks in the region from which insns can be moved to the target are called "source" blocks. The candidate structure holds info about such sources: are they valid? Speculative? Etc. */ typedef bitlst bblst; typedef struct { char is_valid; char is_speculative; int src_prob; bblst split_bbs; bblst update_bbs; } candidate; static candidate *candidate_table; /* A speculative motion requires checking live information on the path from 'source' to 'target'. The split blocks are those to be checked. After a speculative motion, live information should be modified in the 'update' blocks. Lists of split and update blocks for each candidate of the current target are in array bblst_table. */ static int *bblst_table, bblst_size, bblst_last; #define IS_VALID(src) ( candidate_table[src].is_valid ) #define IS_SPECULATIVE(src) ( candidate_table[src].is_speculative ) #define SRC_PROB(src) ( candidate_table[src].src_prob ) /* The bb being currently scheduled. */ static int target_bb; /* List of edges. */ typedef bitlst edgelst; /* Target info functions. */ static void split_edges (int, int, edgelst *); static void compute_trg_info (int); void debug_candidate (int); void debug_candidates (int); /* Dominators array: dom[i] contains the sbitmap of dominators of bb i in the region. */ static sbitmap *dom; /* bb 0 is the only region entry. */ #define IS_RGN_ENTRY(bb) (!bb) /* Is bb_src dominated by bb_trg. */ #define IS_DOMINATED(bb_src, bb_trg) \ ( TEST_BIT (dom[bb_src], bb_trg) ) /* Probability: Prob[i] is a float in [0, 1] which is the probability of bb i relative to the region entry. */ static float *prob; /* The probability of bb_src, relative to bb_trg. Note, that while the 'prob[bb]' is a float in [0, 1], this macro returns an integer in [0, 100]. */ #define GET_SRC_PROB(bb_src, bb_trg) ((int) (100.0 * (prob[bb_src] / \ prob[bb_trg]))) /* Bit-set of edges, where bit i stands for edge i. */ typedef sbitmap edgeset; /* Number of edges in the region. */ static int rgn_nr_edges; /* Array of size rgn_nr_edges. */ static int *rgn_edges; /* Mapping from each edge in the graph to its number in the rgn. */ static int *edge_to_bit; #define EDGE_TO_BIT(edge) (edge_to_bit[edge]) /* The split edges of a source bb is different for each target bb. In order to compute this efficiently, the 'potential-split edges' are computed for each bb prior to scheduling a region. This is actually the split edges of each bb relative to the region entry. pot_split[bb] is the set of potential split edges of bb. */ static edgeset *pot_split; /* For every bb, a set of its ancestor edges. */ static edgeset *ancestor_edges; static void compute_dom_prob_ps (int); #define INSN_PROBABILITY(INSN) (SRC_PROB (BLOCK_TO_BB (BLOCK_NUM (INSN)))) #define IS_SPECULATIVE_INSN(INSN) (IS_SPECULATIVE (BLOCK_TO_BB (BLOCK_NUM (INSN)))) #define INSN_BB(INSN) (BLOCK_TO_BB (BLOCK_NUM (INSN))) /* Parameters affecting the decision of rank_for_schedule(). ??? Nope. But MIN_PROBABILITY is used in compute_trg_info. */ #define MIN_PROBABILITY 40 /* Speculative scheduling functions. */ static int check_live_1 (int, rtx); static void update_live_1 (int, rtx); static int check_live (rtx, int); static void update_live (rtx, int); static void set_spec_fed (rtx); static int is_pfree (rtx, int, int); static int find_conditional_protection (rtx, int); static int is_conditionally_protected (rtx, int, int); static int is_prisky (rtx, int, int); static int is_exception_free (rtx, int, int); static bool sets_likely_spilled (rtx); static void sets_likely_spilled_1 (rtx, rtx, void *); static void add_branch_dependences (rtx, rtx); static void compute_block_backward_dependences (int); void debug_dependencies (void); static void init_regions (void); static void schedule_region (int); static rtx concat_INSN_LIST (rtx, rtx); static void concat_insn_mem_list (rtx, rtx, rtx *, rtx *); static void propagate_deps (int, struct deps *); static void free_pending_lists (void); /* Functions for construction of the control flow graph. */ /* Return 1 if control flow graph should not be constructed, 0 otherwise. We decide not to build the control flow graph if there is possibly more than one entry to the function, if computed branches exist, of if we have nonlocal gotos. */ static int is_cfg_nonregular (void) { basic_block b; rtx insn; RTX_CODE code; /* If we have a label that could be the target of a nonlocal goto, then the cfg is not well structured. */ if (nonlocal_goto_handler_labels) return 1; /* If we have any forced labels, then the cfg is not well structured. */ if (forced_labels) return 1; /* If this function has a computed jump, then we consider the cfg not well structured. */ if (current_function_has_computed_jump) return 1; /* If we have exception handlers, then we consider the cfg not well structured. ?!? We should be able to handle this now that flow.c computes an accurate cfg for EH. */ if (current_function_has_exception_handlers ()) return 1; /* If we have non-jumping insns which refer to labels, then we consider the cfg not well structured. */ /* Check for labels referred to other thn by jumps. */ FOR_EACH_BB (b) for (insn = BB_HEAD (b); ; insn = NEXT_INSN (insn)) { code = GET_CODE (insn); if (INSN_P (insn) && code != JUMP_INSN) { rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX); if (note && ! (GET_CODE (NEXT_INSN (insn)) == JUMP_INSN && find_reg_note (NEXT_INSN (insn), REG_LABEL, XEXP (note, 0)))) return 1; } if (insn == BB_END (b)) break; } /* All the tests passed. Consider the cfg well structured. */ return 0; } /* Build the control flow graph and set nr_edges. Instead of trying to build a cfg ourselves, we rely on flow to do it for us. Stamp out useless code (and bug) duplication. Return nonzero if an irregularity in the cfg is found which would prevent cross block scheduling. */ static int build_control_flow (struct edge_list *edge_list) { int i, unreachable, num_edges; basic_block b; /* This already accounts for entry/exit edges. */ num_edges = NUM_EDGES (edge_list); /* Unreachable loops with more than one basic block are detected during the DFS traversal in find_rgns. Unreachable loops with a single block are detected here. This test is redundant with the one in find_rgns, but it's much cheaper to go ahead and catch the trivial case here. */ unreachable = 0; FOR_EACH_BB (b) { if (b->pred == NULL || (b->pred->src == b && b->pred->pred_next == NULL)) unreachable = 1; } /* ??? We can kill these soon. */ in_edges = xcalloc (last_basic_block, sizeof (int)); out_edges = xcalloc (last_basic_block, sizeof (int)); edge_table = xcalloc (num_edges, sizeof (haifa_edge)); nr_edges = 0; for (i = 0; i < num_edges; i++) { edge e = INDEX_EDGE (edge_list, i); if (e->dest != EXIT_BLOCK_PTR && e->src != ENTRY_BLOCK_PTR) new_edge (e->src->index, e->dest->index); } /* Increment by 1, since edge 0 is unused. */ nr_edges++; return unreachable; } /* Record an edge in the control flow graph from SOURCE to TARGET. In theory, this is redundant with the s_succs computed above, but we have not converted all of haifa to use information from the integer lists. */ static void new_edge (int source, int target) { int e, next_edge; int curr_edge, fst_edge; /* Check for duplicates. */ fst_edge = curr_edge = OUT_EDGES (source); while (curr_edge) { if (FROM_BLOCK (curr_edge) == source && TO_BLOCK (curr_edge) == target) { return; } curr_edge = NEXT_OUT (curr_edge); if (fst_edge == curr_edge) break; } e = ++nr_edges; FROM_BLOCK (e) = source; TO_BLOCK (e) = target; if (OUT_EDGES (source)) { next_edge = NEXT_OUT (OUT_EDGES (source)); NEXT_OUT (OUT_EDGES (source)) = e; NEXT_OUT (e) = next_edge; } else { OUT_EDGES (source) = e; NEXT_OUT (e) = e; } if (IN_EDGES (target)) { next_edge = NEXT_IN (IN_EDGES (target)); NEXT_IN (IN_EDGES (target)) = e; NEXT_IN (e) = next_edge; } else { IN_EDGES (target) = e; NEXT_IN (e) = e; } } /* Translate a bit-set SET to a list BL of the bit-set members. */ static void extract_bitlst (sbitmap set, bitlst *bl) { int i; /* bblst table space is reused in each call to extract_bitlst. */ bitlst_table_last = 0; bl->first_member = &bitlst_table[bitlst_table_last]; bl->nr_members = 0; /* Iterate over each word in the bitset. */ EXECUTE_IF_SET_IN_SBITMAP (set, 0, i, { bitlst_table[bitlst_table_last++] = i; (bl->nr_members)++; }); } /* Functions for the construction of regions. */ /* Print the regions, for debugging purposes. Callable from debugger. */ void debug_regions (void) { int rgn, bb; fprintf (sched_dump, "\n;; ------------ REGIONS ----------\n\n"); for (rgn = 0; rgn < nr_regions; rgn++) { fprintf (sched_dump, ";;\trgn %d nr_blocks %d:\n", rgn, rgn_table[rgn].rgn_nr_blocks); fprintf (sched_dump, ";;\tbb/block: "); for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++) { current_blocks = RGN_BLOCKS (rgn); if (bb != BLOCK_TO_BB (BB_TO_BLOCK (bb))) abort (); fprintf (sched_dump, " %d/%d ", bb, BB_TO_BLOCK (bb)); } fprintf (sched_dump, "\n\n"); } } /* Build a single block region for each basic block in the function. This allows for using the same code for interblock and basic block scheduling. */ static void find_single_block_region (void) { basic_block bb; nr_regions = 0; FOR_EACH_BB (bb) { rgn_bb_table[nr_regions] = bb->index; RGN_NR_BLOCKS (nr_regions) = 1; RGN_BLOCKS (nr_regions) = nr_regions; CONTAINING_RGN (bb->index) = nr_regions; BLOCK_TO_BB (bb->index) = 0; nr_regions++; } } /* Update number of blocks and the estimate for number of insns in the region. Return true if the region is "too large" for interblock scheduling (compile time considerations). */ static bool too_large (int block, int *num_bbs, int *num_insns) { (*num_bbs)++; (*num_insns) += (SCHED_INSN_LUID (BB_END (BASIC_BLOCK (block))) - SCHED_INSN_LUID (BB_HEAD (BASIC_BLOCK (block)))); return ((*num_bbs > PARAM_VALUE (PARAM_MAX_SCHED_REGION_BLOCKS)) || (*num_insns > PARAM_VALUE (PARAM_MAX_SCHED_REGION_INSNS))); } /* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk] is still an inner loop. Put in max_hdr[blk] the header of the most inner loop containing blk. */ #define UPDATE_LOOP_RELATIONS(blk, hdr) \ { \ if (max_hdr[blk] == -1) \ max_hdr[blk] = hdr; \ else if (dfs_nr[max_hdr[blk]] > dfs_nr[hdr]) \ RESET_BIT (inner, hdr); \ else if (dfs_nr[max_hdr[blk]] < dfs_nr[hdr]) \ { \ RESET_BIT (inner,max_hdr[blk]); \ max_hdr[blk] = hdr; \ } \ } /* Find regions for interblock scheduling. A region for scheduling can be: * A loop-free procedure, or * A reducible inner loop, or * A basic block not contained in any other region. ?!? In theory we could build other regions based on extended basic blocks or reverse extended basic blocks. Is it worth the trouble? Loop blocks that form a region are put into the region's block list in topological order. This procedure stores its results into the following global (ick) variables * rgn_nr * rgn_table * rgn_bb_table * block_to_bb * containing region We use dominator relationships to avoid making regions out of non-reducible loops. This procedure needs to be converted to work on pred/succ lists instead of edge tables. That would simplify it somewhat. */ static void find_rgns (struct edge_list *edge_list) { int *max_hdr, *dfs_nr, *stack, *degree; char no_loops = 1; int node, child, loop_head, i, head, tail; int count = 0, sp, idx = 0; int current_edge = out_edges[ENTRY_BLOCK_PTR->succ->dest->index]; int num_bbs, num_insns, unreachable; int too_large_failure; basic_block bb; /* Note if an edge has been passed. */ sbitmap passed; /* Note if a block is a natural loop header. */ sbitmap header; /* Note if a block is a natural inner loop header. */ sbitmap inner; /* Note if a block is in the block queue. */ sbitmap in_queue; /* Note if a block is in the block queue. */ sbitmap in_stack; int num_edges = NUM_EDGES (edge_list); /* Perform a DFS traversal of the cfg. Identify loop headers, inner loops and a mapping from block to its loop header (if the block is contained in a loop, else -1). Store results in HEADER, INNER, and MAX_HDR respectively, these will be used as inputs to the second traversal. STACK, SP and DFS_NR are only used during the first traversal. */ /* Allocate and initialize variables for the first traversal. */ max_hdr = xmalloc (last_basic_block * sizeof (int)); dfs_nr = xcalloc (last_basic_block, sizeof (int)); stack = xmalloc (nr_edges * sizeof (int)); inner = sbitmap_alloc (last_basic_block); sbitmap_ones (inner); header = sbitmap_alloc (last_basic_block); sbitmap_zero (header); passed = sbitmap_alloc (nr_edges); sbitmap_zero (passed); in_queue = sbitmap_alloc (last_basic_block); sbitmap_zero (in_queue); in_stack = sbitmap_alloc (last_basic_block); sbitmap_zero (in_stack); for (i = 0; i < last_basic_block; i++) max_hdr[i] = -1; /* DFS traversal to find inner loops in the cfg. */ sp = -1; while (1) { if (current_edge == 0 || TEST_BIT (passed, current_edge)) { /* We have reached a leaf node or a node that was already processed. Pop edges off the stack until we find an edge that has not yet been processed. */ while (sp >= 0 && (current_edge == 0 || TEST_BIT (passed, current_edge))) { /* Pop entry off the stack. */ current_edge = stack[sp--]; node = FROM_BLOCK (current_edge); child = TO_BLOCK (current_edge); RESET_BIT (in_stack, child); if (max_hdr[child] >= 0 && TEST_BIT (in_stack, max_hdr[child])) UPDATE_LOOP_RELATIONS (node, max_hdr[child]); current_edge = NEXT_OUT (current_edge); } /* See if have finished the DFS tree traversal. */ if (sp < 0 && TEST_BIT (passed, current_edge)) break; /* Nope, continue the traversal with the popped node. */ continue; } /* Process a node. */ node = FROM_BLOCK (current_edge); child = TO_BLOCK (current_edge); SET_BIT (in_stack, node); dfs_nr[node] = ++count; /* If the successor is in the stack, then we've found a loop. Mark the loop, if it is not a natural loop, then it will be rejected during the second traversal. */ if (TEST_BIT (in_stack, child)) { no_loops = 0; SET_BIT (header, child); UPDATE_LOOP_RELATIONS (node, child); SET_BIT (passed, current_edge); current_edge = NEXT_OUT (current_edge); continue; } /* If the child was already visited, then there is no need to visit it again. Just update the loop relationships and restart with a new edge. */ if (dfs_nr[child]) { if (max_hdr[child] >= 0 && TEST_BIT (in_stack, max_hdr[child])) UPDATE_LOOP_RELATIONS (node, max_hdr[child]); SET_BIT (passed, current_edge); current_edge = NEXT_OUT (current_edge); continue; } /* Push an entry on the stack and continue DFS traversal. */ stack[++sp] = current_edge; SET_BIT (passed, current_edge); current_edge = OUT_EDGES (child); /* This is temporary until haifa is converted to use rth's new cfg routines which have true entry/exit blocks and the appropriate edges from/to those blocks. Generally we update dfs_nr for a node when we process its out edge. However, if the node has no out edge then we will not set dfs_nr for that node. This can confuse the scheduler into thinking that we have unreachable blocks, which in turn disables cross block scheduling. So, if we have a node with no out edges, go ahead and mark it as reachable now. */ if (current_edge == 0) dfs_nr[child] = ++count; } /* Another check for unreachable blocks. The earlier test in is_cfg_nonregular only finds unreachable blocks that do not form a loop. The DFS traversal will mark every block that is reachable from the entry node by placing a nonzero value in dfs_nr. Thus if dfs_nr is zero for any block, then it must be unreachable. */ unreachable = 0; FOR_EACH_BB (bb) if (dfs_nr[bb->index] == 0) { unreachable = 1; break; } /* Gross. To avoid wasting memory, the second pass uses the dfs_nr array to hold degree counts. */ degree = dfs_nr; FOR_EACH_BB (bb) degree[bb->index] = 0; for (i = 0; i < num_edges; i++) { edge e = INDEX_EDGE (edge_list, i); if (e->dest != EXIT_BLOCK_PTR) degree[e->dest->index]++; } /* Do not perform region scheduling if there are any unreachable blocks. */ if (!unreachable) { int *queue; if (no_loops) SET_BIT (header, 0); /* Second traversal:find reducible inner loops and topologically sort block of each region. */ queue = xmalloc (n_basic_blocks * sizeof (int)); /* Find blocks which are inner loop headers. We still have non-reducible loops to consider at this point. */ FOR_EACH_BB (bb) { if (TEST_BIT (header, bb->index) && TEST_BIT (inner, bb->index)) { edge e; basic_block jbb; /* Now check that the loop is reducible. We do this separate from finding inner loops so that we do not find a reducible loop which contains an inner non-reducible loop. A simple way to find reducible/natural loops is to verify that each block in the loop is dominated by the loop header. If there exists a block that is not dominated by the loop header, then the block is reachable from outside the loop and thus the loop is not a natural loop. */ FOR_EACH_BB (jbb) { /* First identify blocks in the loop, except for the loop entry block. */ if (bb->index == max_hdr[jbb->index] && bb != jbb) { /* Now verify that the block is dominated by the loop header. */ if (!dominated_by_p (CDI_DOMINATORS, jbb, bb)) break; } } /* If we exited the loop early, then I is the header of a non-reducible loop and we should quit processing it now. */ if (jbb != EXIT_BLOCK_PTR) continue; /* I is a header of an inner loop, or block 0 in a subroutine with no loops at all. */ head = tail = -1; too_large_failure = 0; loop_head = max_hdr[bb->index]; /* Decrease degree of all I's successors for topological ordering. */ for (e = bb->succ; e; e = e->succ_next) if (e->dest != EXIT_BLOCK_PTR) --degree[e->dest->index]; /* Estimate # insns, and count # blocks in the region. */ num_bbs = 1; num_insns = (SCHED_INSN_LUID (BB_END (bb)) - SCHED_INSN_LUID (BB_HEAD (bb))); /* Find all loop latches (blocks with back edges to the loop header) or all the leaf blocks in the cfg has no loops. Place those blocks into the queue. */ if (no_loops) { FOR_EACH_BB (jbb) /* Leaf nodes have only a single successor which must be EXIT_BLOCK. */ if (jbb->succ && jbb->succ->dest == EXIT_BLOCK_PTR && jbb->succ->succ_next == NULL) { queue[++tail] = jbb->index; SET_BIT (in_queue, jbb->index); if (too_large (jbb->index, &num_bbs, &num_insns)) { too_large_failure = 1; break; } } } else { edge e; for (e = bb->pred; e; e = e->pred_next) { if (e->src == ENTRY_BLOCK_PTR) continue; node = e->src->index; if (max_hdr[node] == loop_head && node != bb->index) { /* This is a loop latch. */ queue[++tail] = node; SET_BIT (in_queue, node); if (too_large (node, &num_bbs, &num_insns)) { too_large_failure = 1; break; } } } } /* Now add all the blocks in the loop to the queue. We know the loop is a natural loop; however the algorithm above will not always mark certain blocks as being in the loop. Consider: node children a b,c b c c a,d d b The algorithm in the DFS traversal may not mark B & D as part of the loop (ie they will not have max_hdr set to A). We know they can not be loop latches (else they would have had max_hdr set since they'd have a backedge to a dominator block). So we don't need them on the initial queue. We know they are part of the loop because they are dominated by the loop header and can be reached by a backwards walk of the edges starting with nodes on the initial queue. It is safe and desirable to include those nodes in the loop/scheduling region. To do so we would need to decrease the degree of a node if it is the target of a backedge within the loop itself as the node is placed in the queue. We do not do this because I'm not sure that the actual scheduling code will properly handle this case. ?!? */ while (head < tail && !too_large_failure) { edge e; child = queue[++head]; for (e = BASIC_BLOCK (child)->pred; e; e = e->pred_next) { node = e->src->index; /* See discussion above about nodes not marked as in this loop during the initial DFS traversal. */ if (e->src == ENTRY_BLOCK_PTR || max_hdr[node] != loop_head) { tail = -1; break; } else if (!TEST_BIT (in_queue, node) && node != bb->index) { queue[++tail] = node; SET_BIT (in_queue, node); if (too_large (node, &num_bbs, &num_insns)) { too_large_failure = 1; break; } } } } if (tail >= 0 && !too_large_failure) { /* Place the loop header into list of region blocks. */ degree[bb->index] = -1; rgn_bb_table[idx] = bb->index; RGN_NR_BLOCKS (nr_regions) = num_bbs; RGN_BLOCKS (nr_regions) = idx++; CONTAINING_RGN (bb->index) = nr_regions; BLOCK_TO_BB (bb->index) = count = 0; /* Remove blocks from queue[] when their in degree becomes zero. Repeat until no blocks are left on the list. This produces a topological list of blocks in the region. */ while (tail >= 0) { if (head < 0) head = tail; child = queue[head]; if (degree[child] == 0) { edge e; degree[child] = -1; rgn_bb_table[idx++] = child; BLOCK_TO_BB (child) = ++count; CONTAINING_RGN (child) = nr_regions; queue[head] = queue[tail--]; for (e = BASIC_BLOCK (child)->succ; e; e = e->succ_next) if (e->dest != EXIT_BLOCK_PTR) --degree[e->dest->index]; } else --head; } ++nr_regions; } } } free (queue); } /* Any block that did not end up in a region is placed into a region by itself. */ FOR_EACH_BB (bb) if (degree[bb->index] >= 0) { rgn_bb_table[idx] = bb->index; RGN_NR_BLOCKS (nr_regions) = 1; RGN_BLOCKS (nr_regions) = idx++; CONTAINING_RGN (bb->index) = nr_regions++; BLOCK_TO_BB (bb->index) = 0; } free (max_hdr); free (dfs_nr); free (stack); sbitmap_free (passed); sbitmap_free (header); sbitmap_free (inner); sbitmap_free (in_queue); sbitmap_free (in_stack); } /* Functions for regions scheduling information. */ /* Compute dominators, probability, and potential-split-edges of bb. Assume that these values were already computed for bb's predecessors. */ static void compute_dom_prob_ps (int bb) { int nxt_in_edge, fst_in_edge, pred; int fst_out_edge, nxt_out_edge, nr_out_edges, nr_rgn_out_edges; prob[bb] = 0.0; if (IS_RGN_ENTRY (bb)) { SET_BIT (dom[bb], 0); prob[bb] = 1.0; return; } fst_in_edge = nxt_in_edge = IN_EDGES (BB_TO_BLOCK (bb)); /* Initialize dom[bb] to '111..1'. */ sbitmap_ones (dom[bb]); do { pred = FROM_BLOCK (nxt_in_edge); sbitmap_a_and_b (dom[bb], dom[bb], dom[BLOCK_TO_BB (pred)]); sbitmap_a_or_b (ancestor_edges[bb], ancestor_edges[bb], ancestor_edges[BLOCK_TO_BB (pred)]); SET_BIT (ancestor_edges[bb], EDGE_TO_BIT (nxt_in_edge)); nr_out_edges = 1; nr_rgn_out_edges = 0; fst_out_edge = OUT_EDGES (pred); nxt_out_edge = NEXT_OUT (fst_out_edge); sbitmap_a_or_b (pot_split[bb], pot_split[bb], pot_split[BLOCK_TO_BB (pred)]); SET_BIT (pot_split[bb], EDGE_TO_BIT (fst_out_edge)); /* The successor doesn't belong in the region? */ if (CONTAINING_RGN (TO_BLOCK (fst_out_edge)) != CONTAINING_RGN (BB_TO_BLOCK (bb))) ++nr_rgn_out_edges; while (fst_out_edge != nxt_out_edge) { ++nr_out_edges; /* The successor doesn't belong in the region? */ if (CONTAINING_RGN (TO_BLOCK (nxt_out_edge)) != CONTAINING_RGN (BB_TO_BLOCK (bb))) ++nr_rgn_out_edges; SET_BIT (pot_split[bb], EDGE_TO_BIT (nxt_out_edge)); nxt_out_edge = NEXT_OUT (nxt_out_edge); } /* Now nr_rgn_out_edges is the number of region-exit edges from pred, and nr_out_edges will be the number of pred out edges not leaving the region. */ nr_out_edges -= nr_rgn_out_edges; if (nr_rgn_out_edges > 0) prob[bb] += 0.9 * prob[BLOCK_TO_BB (pred)] / nr_out_edges; else prob[bb] += prob[BLOCK_TO_BB (pred)] / nr_out_edges; nxt_in_edge = NEXT_IN (nxt_in_edge); } while (fst_in_edge != nxt_in_edge); SET_BIT (dom[bb], bb); sbitmap_difference (pot_split[bb], pot_split[bb], ancestor_edges[bb]); if (sched_verbose >= 2) fprintf (sched_dump, ";; bb_prob(%d, %d) = %3d\n", bb, BB_TO_BLOCK (bb), (int) (100.0 * prob[bb])); } /* Functions for target info. */ /* Compute in BL the list of split-edges of bb_src relatively to bb_trg. Note that bb_trg dominates bb_src. */ static void split_edges (int bb_src, int bb_trg, edgelst *bl) { sbitmap src = sbitmap_alloc (pot_split[bb_src]->n_bits); sbitmap_copy (src, pot_split[bb_src]); sbitmap_difference (src, src, pot_split[bb_trg]); extract_bitlst (src, bl); sbitmap_free (src); } /* Find the valid candidate-source-blocks for the target block TRG, compute their probability, and check if they are speculative or not. For speculative sources, compute their update-blocks and split-blocks. */ static void compute_trg_info (int trg) { candidate *sp; edgelst el; int check_block, update_idx; int i, j, k, fst_edge, nxt_edge; /* Define some of the fields for the target bb as well. */ sp = candidate_table + trg; sp->is_valid = 1; sp->is_speculative = 0; sp->src_prob = 100; for (i = trg + 1; i < current_nr_blocks; i++) { sp = candidate_table + i; sp->is_valid = IS_DOMINATED (i, trg); if (sp->is_valid) { sp->src_prob = GET_SRC_PROB (i, trg); sp->is_valid = (sp->src_prob >= MIN_PROBABILITY); } if (sp->is_valid) { split_edges (i, trg, &el); sp->is_speculative = (el.nr_members) ? 1 : 0; if (sp->is_speculative && !flag_schedule_speculative) sp->is_valid = 0; } if (sp->is_valid) { char *update_blocks; /* Compute split blocks and store them in bblst_table. The TO block of every split edge is a split block. */ sp->split_bbs.first_member = &bblst_table[bblst_last]; sp->split_bbs.nr_members = el.nr_members; for (j = 0; j < el.nr_members; bblst_last++, j++) bblst_table[bblst_last] = TO_BLOCK (rgn_edges[el.first_member[j]]); sp->update_bbs.first_member = &bblst_table[bblst_last]; /* Compute update blocks and store them in bblst_table. For every split edge, look at the FROM block, and check all out edges. For each out edge that is not a split edge, add the TO block to the update block list. This list can end up with a lot of duplicates. We need to weed them out to avoid overrunning the end of the bblst_table. */ update_blocks = alloca (last_basic_block); memset (update_blocks, 0, last_basic_block); update_idx = 0; for (j = 0; j < el.nr_members; j++) { check_block = FROM_BLOCK (rgn_edges[el.first_member[j]]); fst_edge = nxt_edge = OUT_EDGES (check_block); do { if (! update_blocks[TO_BLOCK (nxt_edge)]) { for (k = 0; k < el.nr_members; k++) if (EDGE_TO_BIT (nxt_edge) == el.first_member[k]) break; if (k >= el.nr_members) { bblst_table[bblst_last++] = TO_BLOCK (nxt_edge); update_blocks[TO_BLOCK (nxt_edge)] = 1; update_idx++; } } nxt_edge = NEXT_OUT (nxt_edge); } while (fst_edge != nxt_edge); } sp->update_bbs.nr_members = update_idx; /* Make sure we didn't overrun the end of bblst_table. */ if (bblst_last > bblst_size) abort (); } else { sp->split_bbs.nr_members = sp->update_bbs.nr_members = 0; sp->is_speculative = 0; sp->src_prob = 0; } } } /* Print candidates info, for debugging purposes. Callable from debugger. */ void debug_candidate (int i) { if (!candidate_table[i].is_valid) return; if (candidate_table[i].is_speculative) { int j; fprintf (sched_dump, "src b %d bb %d speculative \n", BB_TO_BLOCK (i), i); fprintf (sched_dump, "split path: "); for (j = 0; j < candidate_table[i].split_bbs.nr_members; j++) { int b = candidate_table[i].split_bbs.first_member[j]; fprintf (sched_dump, " %d ", b); } fprintf (sched_dump, "\n"); fprintf (sched_dump, "update path: "); for (j = 0; j < candidate_table[i].update_bbs.nr_members; j++) { int b = candidate_table[i].update_bbs.first_member[j]; fprintf (sched_dump, " %d ", b); } fprintf (sched_dump, "\n"); } else { fprintf (sched_dump, " src %d equivalent\n", BB_TO_BLOCK (i)); } } /* Print candidates info, for debugging purposes. Callable from debugger. */ void debug_candidates (int trg) { int i; fprintf (sched_dump, "----------- candidate table: target: b=%d bb=%d ---\n", BB_TO_BLOCK (trg), trg); for (i = trg + 1; i < current_nr_blocks; i++) debug_candidate (i); } /* Functions for speculative scheduling. */ /* Return 0 if x is a set of a register alive in the beginning of one of the split-blocks of src, otherwise return 1. */ static int check_live_1 (int src, rtx x) { int i; int regno; rtx reg = SET_DEST (x); if (reg == 0) return 1; while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT || GET_CODE (reg) == SIGN_EXTRACT || GET_CODE (reg) == STRICT_LOW_PART) reg = XEXP (reg, 0); if (GET_CODE (reg) == PARALLEL) { int i; for (i = XVECLEN (reg, 0) - 1; i >= 0; i--) if (XEXP (XVECEXP (reg, 0, i), 0) != 0) if (check_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0))) return 1; return 0; } if (!REG_P (reg)) return 1; regno = REGNO (reg); if (regno < FIRST_PSEUDO_REGISTER && global_regs[regno]) { /* Global registers are assumed live. */ return 0; } else { if (regno < FIRST_PSEUDO_REGISTER) { /* Check for hard registers. */ int j = hard_regno_nregs[regno][GET_MODE (reg)]; while (--j >= 0) { for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++) { int b = candidate_table[src].split_bbs.first_member[i]; if (REGNO_REG_SET_P (BASIC_BLOCK (b)->global_live_at_start, regno + j)) { return 0; } } } } else { /* Check for pseudo registers. */ for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++) { int b = candidate_table[src].split_bbs.first_member[i]; if (REGNO_REG_SET_P (BASIC_BLOCK (b)->global_live_at_start, regno)) { return 0; } } } } return 1; } /* If x is a set of a register R, mark that R is alive in the beginning of every update-block of src. */ static void update_live_1 (int src, rtx x) { int i; int regno; rtx reg = SET_DEST (x); if (reg == 0) return; while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT || GET_CODE (reg) == SIGN_EXTRACT || GET_CODE (reg) == STRICT_LOW_PART) reg = XEXP (reg, 0); if (GET_CODE (reg) == PARALLEL) { int i; for (i = XVECLEN (reg, 0) - 1; i >= 0; i--) if (XEXP (XVECEXP (reg, 0, i), 0) != 0) update_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0)); return; } if (!REG_P (reg)) return; /* Global registers are always live, so the code below does not apply to them. */ regno = REGNO (reg); if (regno >= FIRST_PSEUDO_REGISTER || !global_regs[regno]) { if (regno < FIRST_PSEUDO_REGISTER) { int j = hard_regno_nregs[regno][GET_MODE (reg)]; while (--j >= 0) { for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++) { int b = candidate_table[src].update_bbs.first_member[i]; SET_REGNO_REG_SET (BASIC_BLOCK (b)->global_live_at_start, regno + j); } } } else { for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++) { int b = candidate_table[src].update_bbs.first_member[i]; SET_REGNO_REG_SET (BASIC_BLOCK (b)->global_live_at_start, regno); } } } } /* Return 1 if insn can be speculatively moved from block src to trg, otherwise return 0. Called before first insertion of insn to ready-list or before the scheduling. */ static int check_live (rtx insn, int src) { /* Find the registers set by instruction. */ if (GET_CODE (PATTERN (insn)) == SET || GET_CODE (PATTERN (insn)) == CLOBBER) return check_live_1 (src, PATTERN (insn)); else if (GET_CODE (PATTERN (insn)) == PARALLEL) { int j; for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--) if ((GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER) && !check_live_1 (src, XVECEXP (PATTERN (insn), 0, j))) return 0; return 1; } return 1; } /* Update the live registers info after insn was moved speculatively from block src to trg. */ static void update_live (rtx insn, int src) { /* Find the registers set by instruction. */ if (GET_CODE (PATTERN (insn)) == SET || GET_CODE (PATTERN (insn)) == CLOBBER) update_live_1 (src, PATTERN (insn)); else if (GET_CODE (PATTERN (insn)) == PARALLEL) { int j; for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--) if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER) update_live_1 (src, XVECEXP (PATTERN (insn), 0, j)); } } /* Nonzero if block bb_to is equal to, or reachable from block bb_from. */ #define IS_REACHABLE(bb_from, bb_to) \ (bb_from == bb_to \ || IS_RGN_ENTRY (bb_from) \ || (TEST_BIT (ancestor_edges[bb_to], \ EDGE_TO_BIT (IN_EDGES (BB_TO_BLOCK (bb_from)))))) /* Turns on the fed_by_spec_load flag for insns fed by load_insn. */ static void set_spec_fed (rtx load_insn) { rtx link; for (link = INSN_DEPEND (load_insn); link; link = XEXP (link, 1)) if (GET_MODE (link) == VOIDmode) FED_BY_SPEC_LOAD (XEXP (link, 0)) = 1; } /* set_spec_fed */ /* On the path from the insn to load_insn_bb, find a conditional branch depending on insn, that guards the speculative load. */ static int find_conditional_protection (rtx insn, int load_insn_bb) { rtx link; /* Iterate through DEF-USE forward dependences. */ for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1)) { rtx next = XEXP (link, 0); if ((CONTAINING_RGN (BLOCK_NUM (next)) == CONTAINING_RGN (BB_TO_BLOCK (load_insn_bb))) && IS_REACHABLE (INSN_BB (next), load_insn_bb) && load_insn_bb != INSN_BB (next) && GET_MODE (link) == VOIDmode && (GET_CODE (next) == JUMP_INSN || find_conditional_protection (next, load_insn_bb))) return 1; } return 0; } /* find_conditional_protection */ /* Returns 1 if the same insn1 that participates in the computation of load_insn's address is feeding a conditional branch that is guarding on load_insn. This is true if we find a the two DEF-USE chains: insn1 -> ... -> conditional-branch insn1 -> ... -> load_insn, and if a flow path exist: insn1 -> ... -> conditional-branch -> ... -> load_insn, and if insn1 is on the path region-entry -> ... -> bb_trg -> ... load_insn. Locate insn1 by climbing on LOG_LINKS from load_insn. Locate the branch by following INSN_DEPEND from insn1. */ static int is_conditionally_protected (rtx load_insn, int bb_src, int bb_trg) { rtx link; for (link = LOG_LINKS (load_insn); link; link = XEXP (link, 1)) { rtx insn1 = XEXP (link, 0); /* Must be a DEF-USE dependence upon non-branch. */ if (GET_MODE (link) != VOIDmode || GET_CODE (insn1) == JUMP_INSN) continue; /* Must exist a path: region-entry -> ... -> bb_trg -> ... load_insn. */ if (INSN_BB (insn1) == bb_src || (CONTAINING_RGN (BLOCK_NUM (insn1)) != CONTAINING_RGN (BB_TO_BLOCK (bb_src))) || (!IS_REACHABLE (bb_trg, INSN_BB (insn1)) && !IS_REACHABLE (INSN_BB (insn1), bb_trg))) continue; /* Now search for the conditional-branch. */ if (find_conditional_protection (insn1, bb_src)) return 1; /* Recursive step: search another insn1, "above" current insn1. */ return is_conditionally_protected (insn1, bb_src, bb_trg); } /* The chain does not exist. */ return 0; } /* is_conditionally_protected */ /* Returns 1 if a clue for "similar load" 'insn2' is found, and hence load_insn can move speculatively from bb_src to bb_trg. All the following must hold: (1) both loads have 1 base register (PFREE_CANDIDATEs). (2) load_insn and load1 have a def-use dependence upon the same insn 'insn1'. (3) either load2 is in bb_trg, or: - there's only one split-block, and - load1 is on the escape path, and From all these we can conclude that the two loads access memory addresses that differ at most by a constant, and hence if moving load_insn would cause an exception, it would have been caused by load2 anyhow. */ static int is_pfree (rtx load_insn, int bb_src, int bb_trg) { rtx back_link; candidate *candp = candidate_table + bb_src; if (candp->split_bbs.nr_members != 1) /* Must have exactly one escape block. */ return 0; for (back_link = LOG_LINKS (load_insn); back_link; back_link = XEXP (back_link, 1)) { rtx insn1 = XEXP (back_link, 0); if (GET_MODE (back_link) == VOIDmode) { /* Found a DEF-USE dependence (insn1, load_insn). */ rtx fore_link; for (fore_link = INSN_DEPEND (insn1); fore_link; fore_link = XEXP (fore_link, 1)) { rtx insn2 = XEXP (fore_link, 0); if (GET_MODE (fore_link) == VOIDmode) { /* Found a DEF-USE dependence (insn1, insn2). */ if (haifa_classify_insn (insn2) != PFREE_CANDIDATE) /* insn2 not guaranteed to be a 1 base reg load. */ continue; if (INSN_BB (insn2) == bb_trg) /* insn2 is the similar load, in the target block. */ return 1; if (*(candp->split_bbs.first_member) == BLOCK_NUM (insn2)) /* insn2 is a similar load, in a split-block. */ return 1; } } } } /* Couldn't find a similar load. */ return 0; } /* is_pfree */ /* Return 1 if load_insn is prisky (i.e. if load_insn is fed by a load moved speculatively, or if load_insn is protected by a compare on load_insn's address). */ static int is_prisky (rtx load_insn, int bb_src, int bb_trg) { if (FED_BY_SPEC_LOAD (load_insn)) return 1; if (LOG_LINKS (load_insn) == NULL) /* Dependence may 'hide' out of the region. */ return 1; if (is_conditionally_protected (load_insn, bb_src, bb_trg)) return 1; return 0; } /* Insn is a candidate to be moved speculatively from bb_src to bb_trg. Return 1 if insn is exception-free (and the motion is valid) and 0 otherwise. */ static int is_exception_free (rtx insn, int bb_src, int bb_trg) { int insn_class = haifa_classify_insn (insn); /* Handle non-load insns. */ switch (insn_class) { case TRAP_FREE: return 1; case TRAP_RISKY: return 0; default:; } /* Handle loads. */ if (!flag_schedule_speculative_load) return 0; IS_LOAD_INSN (insn) = 1; switch (insn_class) { case IFREE: return (1); case IRISKY: return 0; case PFREE_CANDIDATE: if (is_pfree (insn, bb_src, bb_trg)) return 1; /* Don't 'break' here: PFREE-candidate is also PRISKY-candidate. */ case PRISKY_CANDIDATE: if (!flag_schedule_speculative_load_dangerous || is_prisky (insn, bb_src, bb_trg)) return 0; break; default:; } return flag_schedule_speculative_load_dangerous; } /* The number of insns from the current block scheduled so far. */ static int sched_target_n_insns; /* The number of insns from the current block to be scheduled in total. */ static int target_n_insns; /* The number of insns from the entire region scheduled so far. */ static int sched_n_insns; /* Nonzero if the last scheduled insn was a jump. */ static int last_was_jump; /* Implementations of the sched_info functions for region scheduling. */ static void init_ready_list_rgn (struct ready_list *); static int can_schedule_ready_p_rgn (rtx); static int new_ready_rgn (rtx); static int schedule_more_p_rgn (void); static const char *rgn_print_insn (rtx, int); static int rgn_rank (rtx, rtx); static int contributes_to_priority_rgn (rtx, rtx); static void compute_jump_reg_dependencies_rgn (rtx, regset, regset, regset); /* Return nonzero if there are more insns that should be scheduled. */ static int schedule_more_p_rgn (void) { return ! last_was_jump && sched_target_n_insns < target_n_insns; } /* Add all insns that are initially ready to the ready list READY. Called once before scheduling a set of insns. */ static void init_ready_list_rgn (struct ready_list *ready) { rtx prev_head = current_sched_info->prev_head; rtx next_tail = current_sched_info->next_tail; int bb_src; rtx insn; target_n_insns = 0; sched_target_n_insns = 0; sched_n_insns = 0; last_was_jump = 0; /* Print debugging information. */ if (sched_verbose >= 5) debug_dependencies (); /* Prepare current target block info. */ if (current_nr_blocks > 1) { candidate_table = xmalloc (current_nr_blocks * sizeof (candidate)); bblst_last = 0; /* bblst_table holds split blocks and update blocks for each block after the current one in the region. split blocks and update blocks are the TO blocks of region edges, so there can be at most rgn_nr_edges of them. */ bblst_size = (current_nr_blocks - target_bb) * rgn_nr_edges; bblst_table = xmalloc (bblst_size * sizeof (int)); bitlst_table_last = 0; bitlst_table = xmalloc (rgn_nr_edges * sizeof (int)); compute_trg_info (target_bb); } /* Initialize ready list with all 'ready' insns in target block. Count number of insns in the target block being scheduled. */ for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn)) { if (INSN_DEP_COUNT (insn) == 0) { ready_add (ready, insn); if (targetm.sched.adjust_priority) INSN_PRIORITY (insn) = targetm.sched.adjust_priority (insn, INSN_PRIORITY (insn)); } target_n_insns++; } /* Add to ready list all 'ready' insns in valid source blocks. For speculative insns, check-live, exception-free, and issue-delay. */ for (bb_src = target_bb + 1; bb_src < current_nr_blocks; bb_src++) if (IS_VALID (bb_src)) { rtx src_head; rtx src_next_tail; rtx tail, head; get_block_head_tail (BB_TO_BLOCK (bb_src), &head, &tail); src_next_tail = NEXT_INSN (tail); src_head = head; for (insn = src_head; insn != src_next_tail; insn = NEXT_INSN (insn)) { if (! INSN_P (insn)) continue; if (!CANT_MOVE (insn) && (!IS_SPECULATIVE_INSN (insn) || ((((!targetm.sched.use_dfa_pipeline_interface || !targetm.sched.use_dfa_pipeline_interface ()) && insn_issue_delay (insn) <= 3) || (targetm.sched.use_dfa_pipeline_interface && targetm.sched.use_dfa_pipeline_interface () && (recog_memoized (insn) < 0 || min_insn_conflict_delay (curr_state, insn, insn) <= 3))) && check_live (insn, bb_src) && is_exception_free (insn, bb_src, target_bb)))) if (INSN_DEP_COUNT (insn) == 0) { ready_add (ready, insn); if (targetm.sched.adjust_priority) INSN_PRIORITY (insn) = targetm.sched.adjust_priority (insn, INSN_PRIORITY (insn)); } } } } /* Called after taking INSN from the ready list. Returns nonzero if this insn can be scheduled, nonzero if we should silently discard it. */ static int can_schedule_ready_p_rgn (rtx insn) { if (GET_CODE (insn) == JUMP_INSN) last_was_jump = 1; /* An interblock motion? */ if (INSN_BB (insn) != target_bb) { basic_block b1; if (IS_SPECULATIVE_INSN (insn)) { if (!check_live (insn, INSN_BB (insn))) return 0; update_live (insn, INSN_BB (insn)); /* For speculative load, mark insns fed by it. */ if (IS_LOAD_INSN (insn) || FED_BY_SPEC_LOAD (insn)) set_spec_fed (insn); nr_spec++; } nr_inter++; /* Update source block boundaries. */ b1 = BLOCK_FOR_INSN (insn); if (insn == BB_HEAD (b1) && insn == BB_END (b1)) { /* We moved all the insns in the basic block. Emit a note after the last insn and update the begin/end boundaries to point to the note. */ rtx note = emit_note_after (NOTE_INSN_DELETED, insn); BB_HEAD (b1) = note; BB_END (b1) = note; } else if (insn == BB_END (b1)) { /* We took insns from the end of the basic block, so update the end of block boundary so that it points to the first insn we did not move. */ BB_END (b1) = PREV_INSN (insn); } else if (insn == BB_HEAD (b1)) { /* We took insns from the start of the basic block, so update the start of block boundary so that it points to the first insn we did not move. */ BB_HEAD (b1) = NEXT_INSN (insn); } } else { /* In block motion. */ sched_target_n_insns++; } sched_n_insns++; return 1; } /* Called after INSN has all its dependencies resolved. Return nonzero if it should be moved to the ready list or the queue, or zero if we should silently discard it. */ static int new_ready_rgn (rtx next) { /* For speculative insns, before inserting to ready/queue, check live, exception-free, and issue-delay. */ if (INSN_BB (next) != target_bb && (!IS_VALID (INSN_BB (next)) || CANT_MOVE (next) || (IS_SPECULATIVE_INSN (next) && (0 || (targetm.sched.use_dfa_pipeline_interface && targetm.sched.use_dfa_pipeline_interface () && recog_memoized (next) >= 0 && min_insn_conflict_delay (curr_state, next, next) > 3) || ((!targetm.sched.use_dfa_pipeline_interface || !targetm.sched.use_dfa_pipeline_interface ()) && insn_issue_delay (next) > 3) || !check_live (next, INSN_BB (next)) || !is_exception_free (next, INSN_BB (next), target_bb))))) return 0; return 1; } /* Return a string that contains the insn uid and optionally anything else necessary to identify this insn in an output. It's valid to use a static buffer for this. The ALIGNED parameter should cause the string to be formatted so that multiple output lines will line up nicely. */ static const char * rgn_print_insn (rtx insn, int aligned) { static char tmp[80]; if (aligned) sprintf (tmp, "b%3d: i%4d", INSN_BB (insn), INSN_UID (insn)); else { if (current_nr_blocks > 1 && INSN_BB (insn) != target_bb) sprintf (tmp, "%d/b%d", INSN_UID (insn), INSN_BB (insn)); else sprintf (tmp, "%d", INSN_UID (insn)); } return tmp; } /* Compare priority of two insns. Return a positive number if the second insn is to be preferred for scheduling, and a negative one if the first is to be preferred. Zero if they are equally good. */ static int rgn_rank (rtx insn1, rtx insn2) { /* Some comparison make sense in interblock scheduling only. */ if (INSN_BB (insn1) != INSN_BB (insn2)) { int spec_val, prob_val; /* Prefer an inblock motion on an interblock motion. */ if ((INSN_BB (insn2) == target_bb) && (INSN_BB (insn1) != target_bb)) return 1; if ((INSN_BB (insn1) == target_bb) && (INSN_BB (insn2) != target_bb)) return -1; /* Prefer a useful motion on a speculative one. */ spec_val = IS_SPECULATIVE_INSN (insn1) - IS_SPECULATIVE_INSN (insn2); if (spec_val) return spec_val; /* Prefer a more probable (speculative) insn. */ prob_val = INSN_PROBABILITY (insn2) - INSN_PROBABILITY (insn1); if (prob_val) return prob_val; } return 0; } /* NEXT is an instruction that depends on INSN (a backward dependence); return nonzero if we should include this dependence in priority calculations. */ static int contributes_to_priority_rgn (rtx next, rtx insn) { return BLOCK_NUM (next) == BLOCK_NUM (insn); } /* INSN is a JUMP_INSN, COND_SET is the set of registers that are conditionally set before INSN. Store the set of registers that must be considered as used by this jump in USED and that of registers that must be considered as set in SET. */ static void compute_jump_reg_dependencies_rgn (rtx insn ATTRIBUTE_UNUSED, regset cond_exec ATTRIBUTE_UNUSED, regset used ATTRIBUTE_UNUSED, regset set ATTRIBUTE_UNUSED) { /* Nothing to do here, since we postprocess jumps in add_branch_dependences. */ } /* Used in schedule_insns to initialize current_sched_info for scheduling regions (or single basic blocks). */ static struct sched_info region_sched_info = { init_ready_list_rgn, can_schedule_ready_p_rgn, schedule_more_p_rgn, new_ready_rgn, rgn_rank, rgn_print_insn, contributes_to_priority_rgn, compute_jump_reg_dependencies_rgn, NULL, NULL, NULL, NULL, 0, 0, 0 }; /* Determine if PAT sets a CLASS_LIKELY_SPILLED_P register. */ static bool sets_likely_spilled (rtx pat) { bool ret = false; note_stores (pat, sets_likely_spilled_1, &ret); return ret; } static void sets_likely_spilled_1 (rtx x, rtx pat, void *data) { bool *ret = (bool *) data; if (GET_CODE (pat) == SET && REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER && CLASS_LIKELY_SPILLED_P (REGNO_REG_CLASS (REGNO (x)))) *ret = true; } /* Add dependences so that branches are scheduled to run last in their block. */ static void add_branch_dependences (rtx head, rtx tail) { rtx insn, last; /* For all branches, calls, uses, clobbers, cc0 setters, and instructions that can throw exceptions, force them to remain in order at the end of the block by adding dependencies and giving the last a high priority. There may be notes present, and prev_head may also be a note. Branches must obviously remain at the end. Calls should remain at the end since moving them results in worse register allocation. Uses remain at the end to ensure proper register allocation. cc0 setters remain at the end because they can't be moved away from their cc0 user. Insns setting CLASS_LIKELY_SPILLED_P registers (usually return values) are not moved before reload because we can wind up with register allocation failures. */ insn = tail; last = 0; while (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN || (GET_CODE (insn) == INSN && (GET_CODE (PATTERN (insn)) == USE || GET_CODE (PATTERN (insn)) == CLOBBER || can_throw_internal (insn) #ifdef HAVE_cc0 || sets_cc0_p (PATTERN (insn)) #endif || (!reload_completed && sets_likely_spilled (PATTERN (insn))))) || GET_CODE (insn) == NOTE) { if (GET_CODE (insn) != NOTE) { if (last != 0 && !find_insn_list (insn, LOG_LINKS (last))) { add_dependence (last, insn, REG_DEP_ANTI); INSN_REF_COUNT (insn)++; } CANT_MOVE (insn) = 1; last = insn; } /* Don't overrun the bounds of the basic block. */ if (insn == head) break; insn = PREV_INSN (insn); } /* Make sure these insns are scheduled last in their block. */ insn = last; if (insn != 0) while (insn != head) { insn = prev_nonnote_insn (insn); if (INSN_REF_COUNT (insn) != 0) continue; add_dependence (last, insn, REG_DEP_ANTI); INSN_REF_COUNT (insn) = 1; } } /* Data structures for the computation of data dependences in a regions. We keep one `deps' structure for every basic block. Before analyzing the data dependences for a bb, its variables are initialized as a function of the variables of its predecessors. When the analysis for a bb completes, we save the contents to the corresponding bb_deps[bb] variable. */ static struct deps *bb_deps; /* Duplicate the INSN_LIST elements of COPY and prepend them to OLD. */ static rtx concat_INSN_LIST (rtx copy, rtx old) { rtx new = old; for (; copy ; copy = XEXP (copy, 1)) new = alloc_INSN_LIST (XEXP (copy, 0), new); return new; } static void concat_insn_mem_list (rtx copy_insns, rtx copy_mems, rtx *old_insns_p, rtx *old_mems_p) { rtx new_insns = *old_insns_p; rtx new_mems = *old_mems_p; while (copy_insns) { new_insns = alloc_INSN_LIST (XEXP (copy_insns, 0), new_insns); new_mems = alloc_EXPR_LIST (VOIDmode, XEXP (copy_mems, 0), new_mems); copy_insns = XEXP (copy_insns, 1); copy_mems = XEXP (copy_mems, 1); } *old_insns_p = new_insns; *old_mems_p = new_mems; } /* After computing the dependencies for block BB, propagate the dependencies found in TMP_DEPS to the successors of the block. */ static void propagate_deps (int bb, struct deps *pred_deps) { int b = BB_TO_BLOCK (bb); int e, first_edge; /* bb's structures are inherited by its successors. */ first_edge = e = OUT_EDGES (b); if (e > 0) do { int b_succ = TO_BLOCK (e); int bb_succ = BLOCK_TO_BB (b_succ); struct deps *succ_deps = bb_deps + bb_succ; int reg; /* Only bbs "below" bb, in the same region, are interesting. */ if (CONTAINING_RGN (b) != CONTAINING_RGN (b_succ) || bb_succ <= bb) { e = NEXT_OUT (e); continue; } /* The reg_last lists are inherited by bb_succ. */ EXECUTE_IF_SET_IN_REG_SET (&pred_deps->reg_last_in_use, 0, reg, { struct deps_reg *pred_rl = &pred_deps->reg_last[reg]; struct deps_reg *succ_rl = &succ_deps->reg_last[reg]; succ_rl->uses = concat_INSN_LIST (pred_rl->uses, succ_rl->uses); succ_rl->sets = concat_INSN_LIST (pred_rl->sets, succ_rl->sets); succ_rl->clobbers = concat_INSN_LIST (pred_rl->clobbers, succ_rl->clobbers); succ_rl->uses_length += pred_rl->uses_length; succ_rl->clobbers_length += pred_rl->clobbers_length; }); IOR_REG_SET (&succ_deps->reg_last_in_use, &pred_deps->reg_last_in_use); /* Mem read/write lists are inherited by bb_succ. */ concat_insn_mem_list (pred_deps->pending_read_insns, pred_deps->pending_read_mems, &succ_deps->pending_read_insns, &succ_deps->pending_read_mems); concat_insn_mem_list (pred_deps->pending_write_insns, pred_deps->pending_write_mems, &succ_deps->pending_write_insns, &succ_deps->pending_write_mems); succ_deps->last_pending_memory_flush = concat_INSN_LIST (pred_deps->last_pending_memory_flush, succ_deps->last_pending_memory_flush); succ_deps->pending_lists_length += pred_deps->pending_lists_length; succ_deps->pending_flush_length += pred_deps->pending_flush_length; /* last_function_call is inherited by bb_succ. */ succ_deps->last_function_call = concat_INSN_LIST (pred_deps->last_function_call, succ_deps->last_function_call); /* sched_before_next_call is inherited by bb_succ. */ succ_deps->sched_before_next_call = concat_INSN_LIST (pred_deps->sched_before_next_call, succ_deps->sched_before_next_call); e = NEXT_OUT (e); } while (e != first_edge); /* These lists should point to the right place, for correct freeing later. */ bb_deps[bb].pending_read_insns = pred_deps->pending_read_insns; bb_deps[bb].pending_read_mems = pred_deps->pending_read_mems; bb_deps[bb].pending_write_insns = pred_deps->pending_write_insns; bb_deps[bb].pending_write_mems = pred_deps->pending_write_mems; /* Can't allow these to be freed twice. */ pred_deps->pending_read_insns = 0; pred_deps->pending_read_mems = 0; pred_deps->pending_write_insns = 0; pred_deps->pending_write_mems = 0; } /* Compute backward dependences inside bb. In a multiple blocks region: (1) a bb is analyzed after its predecessors, and (2) the lists in effect at the end of bb (after analyzing for bb) are inherited by bb's successors. Specifically for reg-reg data dependences, the block insns are scanned by sched_analyze () top-to-bottom. Two lists are maintained by sched_analyze (): reg_last[].sets for register DEFs, and reg_last[].uses for register USEs. When analysis is completed for bb, we update for its successors: ; - DEFS[succ] = Union (DEFS [succ], DEFS [bb]) ; - USES[succ] = Union (USES [succ], DEFS [bb]) The mechanism for computing mem-mem data dependence is very similar, and the result is interblock dependences in the region. */ static void compute_block_backward_dependences (int bb) { rtx head, tail; struct deps tmp_deps; tmp_deps = bb_deps[bb]; /* Do the analysis for this block. */ get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail); sched_analyze (&tmp_deps, head, tail); add_branch_dependences (head, tail); if (current_nr_blocks > 1) propagate_deps (bb, &tmp_deps); /* Free up the INSN_LISTs. */ free_deps (&tmp_deps); } /* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add them to the unused_*_list variables, so that they can be reused. */ static void free_pending_lists (void) { int bb; for (bb = 0; bb < current_nr_blocks; bb++) { free_INSN_LIST_list (&bb_deps[bb].pending_read_insns); free_INSN_LIST_list (&bb_deps[bb].pending_write_insns); free_EXPR_LIST_list (&bb_deps[bb].pending_read_mems); free_EXPR_LIST_list (&bb_deps[bb].pending_write_mems); } } /* Print dependences for debugging, callable from debugger. */ void debug_dependencies (void) { int bb; fprintf (sched_dump, ";; --------------- forward dependences: ------------ \n"); for (bb = 0; bb < current_nr_blocks; bb++) { if (1) { rtx head, tail; rtx next_tail; rtx insn; get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail); next_tail = NEXT_INSN (tail); fprintf (sched_dump, "\n;; --- Region Dependences --- b %d bb %d \n", BB_TO_BLOCK (bb), bb); if (targetm.sched.use_dfa_pipeline_interface && targetm.sched.use_dfa_pipeline_interface ()) { fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n", "insn", "code", "bb", "dep", "prio", "cost", "reservation"); fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n", "----", "----", "--", "---", "----", "----", "-----------"); } else { fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%11s%6s\n", "insn", "code", "bb", "dep", "prio", "cost", "blockage", "units"); fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%11s%6s\n", "----", "----", "--", "---", "----", "----", "--------", "-----"); } for (insn = head; insn != next_tail; insn = NEXT_INSN (insn)) { rtx link; if (! INSN_P (insn)) { int n; fprintf (sched_dump, ";; %6d ", INSN_UID (insn)); if (GET_CODE (insn) == NOTE) { n = NOTE_LINE_NUMBER (insn); if (n < 0) fprintf (sched_dump, "%s\n", GET_NOTE_INSN_NAME (n)); else { expanded_location xloc; NOTE_EXPANDED_LOCATION (xloc, insn); fprintf (sched_dump, "line %d, file %s\n", xloc.line, xloc.file); } } else fprintf (sched_dump, " {%s}\n", GET_RTX_NAME (GET_CODE (insn))); continue; } if (targetm.sched.use_dfa_pipeline_interface && targetm.sched.use_dfa_pipeline_interface ()) { fprintf (sched_dump, ";; %s%5d%6d%6d%6d%6d%6d ", (SCHED_GROUP_P (insn) ? "+" : " "), INSN_UID (insn), INSN_CODE (insn), INSN_BB (insn), INSN_DEP_COUNT (insn), INSN_PRIORITY (insn), insn_cost (insn, 0, 0)); if (recog_memoized (insn) < 0) fprintf (sched_dump, "nothing"); else print_reservation (sched_dump, insn); } else { int unit = insn_unit (insn); int range = (unit < 0 || function_units[unit].blockage_range_function == 0 ? 0 : function_units[unit].blockage_range_function (insn)); fprintf (sched_dump, ";; %s%5d%6d%6d%6d%6d%6d %3d -%3d ", (SCHED_GROUP_P (insn) ? "+" : " "), INSN_UID (insn), INSN_CODE (insn), INSN_BB (insn), INSN_DEP_COUNT (insn), INSN_PRIORITY (insn), insn_cost (insn, 0, 0), (int) MIN_BLOCKAGE_COST (range), (int) MAX_BLOCKAGE_COST (range)); insn_print_units (insn); } fprintf (sched_dump, "\t: "); for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1)) fprintf (sched_dump, "%d ", INSN_UID (XEXP (link, 0))); fprintf (sched_dump, "\n"); } } } fprintf (sched_dump, "\n"); } /* Schedule a region. A region is either an inner loop, a loop-free subroutine, or a single basic block. Each bb in the region is scheduled after its flow predecessors. */ static void schedule_region (int rgn) { int bb; int rgn_n_insns = 0; int sched_rgn_n_insns = 0; /* Set variables for the current region. */ current_nr_blocks = RGN_NR_BLOCKS (rgn); current_blocks = RGN_BLOCKS (rgn); init_deps_global (); /* Initializations for region data dependence analysis. */ bb_deps = xmalloc (sizeof (struct deps) * current_nr_blocks); for (bb = 0; bb < current_nr_blocks; bb++) init_deps (bb_deps + bb); /* Compute LOG_LINKS. */ for (bb = 0; bb < current_nr_blocks; bb++) compute_block_backward_dependences (bb); /* Compute INSN_DEPEND. */ for (bb = current_nr_blocks - 1; bb >= 0; bb--) { rtx head, tail; get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail); compute_forward_dependences (head, tail); if (targetm.sched.dependencies_evaluation_hook) targetm.sched.dependencies_evaluation_hook (head, tail); } /* Set priorities. */ for (bb = 0; bb < current_nr_blocks; bb++) { rtx head, tail; get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail); rgn_n_insns += set_priorities (head, tail); } /* Compute interblock info: probabilities, split-edges, dominators, etc. */ if (current_nr_blocks > 1) { int i; prob = xmalloc ((current_nr_blocks) * sizeof (float)); dom = sbitmap_vector_alloc (current_nr_blocks, current_nr_blocks); sbitmap_vector_zero (dom, current_nr_blocks); /* Edge to bit. */ rgn_nr_edges = 0; edge_to_bit = xmalloc (nr_edges * sizeof (int)); for (i = 1; i < nr_edges; i++) if (CONTAINING_RGN (FROM_BLOCK (i)) == rgn) EDGE_TO_BIT (i) = rgn_nr_edges++; rgn_edges = xmalloc (rgn_nr_edges * sizeof (int)); rgn_nr_edges = 0; for (i = 1; i < nr_edges; i++) if (CONTAINING_RGN (FROM_BLOCK (i)) == (rgn)) rgn_edges[rgn_nr_edges++] = i; /* Split edges. */ pot_split = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges); sbitmap_vector_zero (pot_split, current_nr_blocks); ancestor_edges = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges); sbitmap_vector_zero (ancestor_edges, current_nr_blocks); /* Compute probabilities, dominators, split_edges. */ for (bb = 0; bb < current_nr_blocks; bb++) compute_dom_prob_ps (bb); } /* Now we can schedule all blocks. */ for (bb = 0; bb < current_nr_blocks; bb++) { rtx head, tail; int b = BB_TO_BLOCK (bb); get_block_head_tail (b, &head, &tail); if (no_real_insns_p (head, tail)) continue; current_sched_info->prev_head = PREV_INSN (head); current_sched_info->next_tail = NEXT_INSN (tail); if (write_symbols != NO_DEBUG) { save_line_notes (b, head, tail); rm_line_notes (head, tail); } /* rm_other_notes only removes notes which are _inside_ the block---that is, it won't remove notes before the first real insn or after the last real insn of the block. So if the first insn has a REG_SAVE_NOTE which would otherwise be emitted before the insn, it is redundant with the note before the start of the block, and so we have to take it out. */ if (INSN_P (head)) { rtx note; for (note = REG_NOTES (head); note; note = XEXP (note, 1)) if (REG_NOTE_KIND (note) == REG_SAVE_NOTE) { remove_note (head, note); note = XEXP (note, 1); remove_note (head, note); } } /* Remove remaining note insns from the block, save them in note_list. These notes are restored at the end of schedule_block (). */ rm_other_notes (head, tail); target_bb = bb; current_sched_info->queue_must_finish_empty = current_nr_blocks > 1 && !flag_schedule_interblock; schedule_block (b, rgn_n_insns); sched_rgn_n_insns += sched_n_insns; /* Update target block boundaries. */ if (head == BB_HEAD (BASIC_BLOCK (b))) BB_HEAD (BASIC_BLOCK (b)) = current_sched_info->head; if (tail == BB_END (BASIC_BLOCK (b))) BB_END (BASIC_BLOCK (b)) = current_sched_info->tail; /* Clean up. */ if (current_nr_blocks > 1) { free (candidate_table); free (bblst_table); free (bitlst_table); } } /* Sanity check: verify that all region insns were scheduled. */ if (sched_rgn_n_insns != rgn_n_insns) abort (); /* Restore line notes. */ if (write_symbols != NO_DEBUG) { for (bb = 0; bb < current_nr_blocks; bb++) { rtx head, tail; get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail); restore_line_notes (head, tail); } } /* Done with this region. */ free_pending_lists (); finish_deps_global (); free (bb_deps); if (current_nr_blocks > 1) { free (prob); sbitmap_vector_free (dom); sbitmap_vector_free (pot_split); sbitmap_vector_free (ancestor_edges); free (edge_to_bit); free (rgn_edges); } } /* Indexed by region, holds the number of death notes found in that region. Used for consistency checks. */ static int *deaths_in_region; /* Initialize data structures for region scheduling. */ static void init_regions (void) { sbitmap blocks; int rgn; nr_regions = 0; rgn_table = xmalloc ((n_basic_blocks) * sizeof (region)); rgn_bb_table = xmalloc ((n_basic_blocks) * sizeof (int)); block_to_bb = xmalloc ((last_basic_block) * sizeof (int)); containing_rgn = xmalloc ((last_basic_block) * sizeof (int)); /* Compute regions for scheduling. */ if (reload_completed || n_basic_blocks == 1 || !flag_schedule_interblock) { find_single_block_region (); } else { /* Verify that a 'good' control flow graph can be built. */ if (is_cfg_nonregular ()) { find_single_block_region (); } else { struct edge_list *edge_list; /* The scheduler runs after estimate_probabilities; therefore, we can't blindly call back into find_basic_blocks since doing so could invalidate the branch probability info. We could, however, call cleanup_cfg. */ edge_list = create_edge_list (); /* Compute the dominators and post dominators. */ calculate_dominance_info (CDI_DOMINATORS); /* build_control_flow will return nonzero if it detects unreachable blocks or any other irregularity with the cfg which prevents cross block scheduling. */ if (build_control_flow (edge_list) != 0) find_single_block_region (); else find_rgns (edge_list); if (sched_verbose >= 3) debug_regions (); /* We are done with flow's edge list. */ free_edge_list (edge_list); /* For now. This will move as more and more of haifa is converted to using the cfg code in flow.c. */ free_dominance_info (CDI_DOMINATORS); } } if (CHECK_DEAD_NOTES) { blocks = sbitmap_alloc (last_basic_block); deaths_in_region = xmalloc (sizeof (int) * nr_regions); /* Remove all death notes from the subroutine. */ for (rgn = 0; rgn < nr_regions; rgn++) { int b; sbitmap_zero (blocks); for (b = RGN_NR_BLOCKS (rgn) - 1; b >= 0; --b) SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn) + b]); deaths_in_region[rgn] = count_or_remove_death_notes (blocks, 1); } sbitmap_free (blocks); } else count_or_remove_death_notes (NULL, 1); } /* The one entry point in this file. DUMP_FILE is the dump file for this pass. */ void schedule_insns (FILE *dump_file) { sbitmap large_region_blocks, blocks; int rgn; int any_large_regions; basic_block bb; /* Taking care of this degenerate case makes the rest of this code simpler. */ if (n_basic_blocks == 0) return; nr_inter = 0; nr_spec = 0; sched_init (dump_file); init_regions (); current_sched_info = ®ion_sched_info; /* Schedule every region in the subroutine. */ for (rgn = 0; rgn < nr_regions; rgn++) schedule_region (rgn); /* Update life analysis for the subroutine. Do single block regions first so that we can verify that live_at_start didn't change. Then do all other blocks. */ /* ??? There is an outside possibility that update_life_info, or more to the point propagate_block, could get called with nonzero flags more than once for one basic block. This would be kinda bad if it were to happen, since REG_INFO would be accumulated twice for the block, and we'd have twice the REG_DEAD notes. I'm fairly certain that this _shouldn't_ happen, since I don't think that live_at_start should change at region heads. Not sure what the best way to test for this kind of thing... */ allocate_reg_life_data (); compute_bb_for_insn (); any_large_regions = 0; large_region_blocks = sbitmap_alloc (last_basic_block); sbitmap_zero (large_region_blocks); FOR_EACH_BB (bb) SET_BIT (large_region_blocks, bb->index); blocks = sbitmap_alloc (last_basic_block); sbitmap_zero (blocks); /* Update life information. For regions consisting of multiple blocks we've possibly done interblock scheduling that affects global liveness. For regions consisting of single blocks we need to do only local liveness. */ for (rgn = 0; rgn < nr_regions; rgn++) if (RGN_NR_BLOCKS (rgn) > 1) any_large_regions = 1; else { SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn)]); RESET_BIT (large_region_blocks, rgn_bb_table[RGN_BLOCKS (rgn)]); } /* Don't update reg info after reload, since that affects regs_ever_live, which should not change after reload. */ update_life_info (blocks, UPDATE_LIFE_LOCAL, (reload_completed ? PROP_DEATH_NOTES : PROP_DEATH_NOTES | PROP_REG_INFO)); if (any_large_regions) { update_life_info (large_region_blocks, UPDATE_LIFE_GLOBAL, PROP_DEATH_NOTES | PROP_REG_INFO); } if (CHECK_DEAD_NOTES) { /* Verify the counts of basic block notes in single the basic block regions. */ for (rgn = 0; rgn < nr_regions; rgn++) if (RGN_NR_BLOCKS (rgn) == 1) { sbitmap_zero (blocks); SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn)]); if (deaths_in_region[rgn] != count_or_remove_death_notes (blocks, 0)) abort (); } free (deaths_in_region); } /* Reposition the prologue and epilogue notes in case we moved the prologue/epilogue insns. */ if (reload_completed) reposition_prologue_and_epilogue_notes (get_insns ()); /* Delete redundant line notes. */ if (write_symbols != NO_DEBUG) rm_redundant_line_notes (); if (sched_verbose) { if (reload_completed == 0 && flag_schedule_interblock) { fprintf (sched_dump, "\n;; Procedure interblock/speculative motions == %d/%d \n", nr_inter, nr_spec); } else { if (nr_inter > 0) abort (); } fprintf (sched_dump, "\n\n"); } /* Clean up. */ free (rgn_table); free (rgn_bb_table); free (block_to_bb); free (containing_rgn); sched_finish (); if (edge_table) { free (edge_table); edge_table = NULL; } if (in_edges) { free (in_edges); in_edges = NULL; } if (out_edges) { free (out_edges); out_edges = NULL; } sbitmap_free (blocks); sbitmap_free (large_region_blocks); } #endif /* Instruction scheduling pass. Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by, and currently maintained by, Jim Wilson (wilson@cygnus.com) This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifdef INSN_SCHEDULING /* target_units bitmask has 1 for each unit in the cpu. It should be possible to compute this variable from the machine description. But currently it is computed by examining the insn list. Since this is only needed for visualization, it seems an acceptable solution. (For understanding the mapping of bits to units, see definition of function_units[] in "insn-attrtab.c".) The scheduler using only DFA description should never use the following variable. */ static int target_units = 0; static char *safe_concat (char *, char *, const char *); static int get_visual_tbl_length (void); static void print_exp (char *, rtx, int); static void print_value (char *, rtx, int); static void print_pattern (char *, rtx, int); /* Print names of units on which insn can/should execute, for debugging. */ void insn_print_units (rtx insn) { int i; int unit = insn_unit (insn); if (unit == -1) fprintf (sched_dump, "none"); else if (unit >= 0) fprintf (sched_dump, "%s", function_units[unit].name); else { fprintf (sched_dump, "["); for (i = 0, unit = ~unit; unit; i++, unit >>= 1) if (unit & 1) { fprintf (sched_dump, "%s", function_units[i].name); if (unit != 1) fprintf (sched_dump, " "); } fprintf (sched_dump, "]"); } } /* MAX_VISUAL_LINES is the maximum number of lines in visualization table of a basic block. If more lines are needed, table is split to two. n_visual_lines is the number of lines printed so far for a block. visual_tbl contains the block visualization info. vis_no_unit holds insns in a cycle that are not mapped to any unit. */ #define MAX_VISUAL_LINES 100 #define INSN_LEN 30 int n_visual_lines; static unsigned visual_tbl_line_length; char *visual_tbl; int n_vis_no_unit; #define MAX_VISUAL_NO_UNIT 20 rtx vis_no_unit[MAX_VISUAL_NO_UNIT]; /* Finds units that are in use in this function. Required only for visualization. */ void init_target_units (void) { rtx insn; int unit; for (insn = get_last_insn (); insn; insn = PREV_INSN (insn)) { if (! INSN_P (insn)) continue; unit = insn_unit (insn); if (unit < 0) target_units |= ~unit; else target_units |= (1 << unit); } } /* Return the length of the visualization table. */ static int get_visual_tbl_length (void) { int unit, i; int n, n1; char *s; if (targetm.sched.use_dfa_pipeline_interface && targetm.sched.use_dfa_pipeline_interface ()) { visual_tbl_line_length = 1; return 1; /* Can't return 0 because that will cause problems with alloca. */ } /* Compute length of one field in line. */ s = alloca (INSN_LEN + 6); sprintf (s, " %33s", "uname"); n1 = strlen (s); /* Compute length of one line. */ n = strlen (";; "); n += n1; for (unit = 0; unit < FUNCTION_UNITS_SIZE; unit++) if (function_units[unit].bitmask & target_units) for (i = 0; i < function_units[unit].multiplicity; i++) n += n1; n += n1; n += strlen ("\n") + 2; visual_tbl_line_length = n; /* Compute length of visualization string. */ return (MAX_VISUAL_LINES * n); } /* Init block visualization debugging info. */ void init_block_visualization (void) { strcpy (visual_tbl, ""); n_visual_lines = 0; n_vis_no_unit = 0; } #define BUF_LEN 2048 static char * safe_concat (char *buf, char *cur, const char *str) { char *end = buf + BUF_LEN - 2; /* Leave room for null. */ int c; if (cur > end) { *end = '\0'; return end; } while (cur < end && (c = *str++) != '\0') *cur++ = c; *cur = '\0'; return cur; } /* This recognizes rtx, I classified as expressions. These are always represent some action on values or results of other expression, that may be stored in objects representing values. */ static void print_exp (char *buf, rtx x, int verbose) { char tmp[BUF_LEN]; const char *st[4]; char *cur = buf; const char *fun = (char *) 0; const char *sep; rtx op[4]; int i; for (i = 0; i < 4; i++) { st[i] = (char *) 0; op[i] = NULL_RTX; } switch (GET_CODE (x)) { case PLUS: op[0] = XEXP (x, 0); if (GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) < 0) { st[1] = "-"; op[1] = GEN_INT (-INTVAL (XEXP (x, 1))); } else { st[1] = "+"; op[1] = XEXP (x, 1); } break; case LO_SUM: op[0] = XEXP (x, 0); st[1] = "+low("; op[1] = XEXP (x, 1); st[2] = ")"; break; case MINUS: op[0] = XEXP (x, 0); st[1] = "-"; op[1] = XEXP (x, 1); break; case COMPARE: fun = "cmp"; op[0] = XEXP (x, 0); op[1] = XEXP (x, 1); break; case NEG: st[0] = "-"; op[0] = XEXP (x, 0); break; case MULT: op[0] = XEXP (x, 0); st[1] = "*"; op[1] = XEXP (x, 1); break; case DIV: op[0] = XEXP (x, 0); st[1] = "/"; op[1] = XEXP (x, 1); break; case UDIV: fun = "udiv"; op[0] = XEXP (x, 0); op[1] = XEXP (x, 1); break; case MOD: op[0] = XEXP (x, 0); st[1] = "%"; op[1] = XEXP (x, 1); break; case UMOD: fun = "umod"; op[0] = XEXP (x, 0); op[1] = XEXP (x, 1); break; case SMIN: fun = "smin"; op[0] = XEXP (x, 0); op[1] = XEXP (x, 1); break; case SMAX: fun = "smax"; op[0] = XEXP (x, 0); op[1] = XEXP (x, 1); break; case UMIN: fun = "umin"; op[0] = XEXP (x, 0); op[1] = XEXP (x, 1); break; case UMAX: fun = "umax"; op[0] = XEXP (x, 0); op[1] = XEXP (x, 1); break; case NOT: st[0] = "!"; op[0] = XEXP (x, 0); break; case AND: op[0] = XEXP (x, 0); st[1] = "&"; op[1] = XEXP (x, 1); break; case IOR: op[0] = XEXP (x, 0); st[1] = "|"; op[1] = XEXP (x, 1); break; case XOR: op[0] = XEXP (x, 0); st[1] = "^"; op[1] = XEXP (x, 1); break; case ASHIFT: op[0] = XEXP (x, 0); st[1] = "<<"; op[1] = XEXP (x, 1); break; case LSHIFTRT: op[0] = XEXP (x, 0); st[1] = " 0>>"; op[1] = XEXP (x, 1); break; case ASHIFTRT: op[0] = XEXP (x, 0); st[1] = ">>"; op[1] = XEXP (x, 1); break; case ROTATE: op[0] = XEXP (x, 0); st[1] = "<-<"; op[1] = XEXP (x, 1); break; case ROTATERT: op[0] = XEXP (x, 0); st[1] = ">->"; op[1] = XEXP (x, 1); break; case ABS: fun = "abs"; op[0] = XEXP (x, 0); break; case SQRT: fun = "sqrt"; op[0] = XEXP (x, 0); break; case FFS: fun = "ffs"; op[0] = XEXP (x, 0); break; case EQ: op[0] = XEXP (x, 0); st[1] = "=="; op[1] = XEXP (x, 1); break; case NE: op[0] = XEXP (x, 0); st[1] = "!="; op[1] = XEXP (x, 1); break; case GT: op[0] = XEXP (x, 0); st[1] = ">"; op[1] = XEXP (x, 1); break; case GTU: fun = "gtu"; op[0] = XEXP (x, 0); op[1] = XEXP (x, 1); break; case LT: op[0] = XEXP (x, 0); st[1] = "<"; op[1] = XEXP (x, 1); break; case LTU: fun = "ltu"; op[0] = XEXP (x, 0); op[1] = XEXP (x, 1); break; case GE: op[0] = XEXP (x, 0); st[1] = ">="; op[1] = XEXP (x, 1); break; case GEU: fun = "geu"; op[0] = XEXP (x, 0); op[1] = XEXP (x, 1); break; case LE: op[0] = XEXP (x, 0); st[1] = "<="; op[1] = XEXP (x, 1); break; case LEU: fun = "leu"; op[0] = XEXP (x, 0); op[1] = XEXP (x, 1); break; case SIGN_EXTRACT: fun = (verbose) ? "sign_extract" : "sxt"; op[0] = XEXP (x, 0); op[1] = XEXP (x, 1); op[2] = XEXP (x, 2); break; case ZERO_EXTRACT: fun = (verbose) ? "zero_extract" : "zxt"; op[0] = XEXP (x, 0); op[1] = XEXP (x, 1); op[2] = XEXP (x, 2); break; case SIGN_EXTEND: fun = (verbose) ? "sign_extend" : "sxn"; op[0] = XEXP (x, 0); break; case ZERO_EXTEND: fun = (verbose) ? "zero_extend" : "zxn"; op[0] = XEXP (x, 0); break; case FLOAT_EXTEND: fun = (verbose) ? "float_extend" : "fxn"; op[0] = XEXP (x, 0); break; case TRUNCATE: fun = (verbose) ? "trunc" : "trn"; op[0] = XEXP (x, 0); break; case FLOAT_TRUNCATE: fun = (verbose) ? "float_trunc" : "ftr"; op[0] = XEXP (x, 0); break; case FLOAT: fun = (verbose) ? "float" : "flt"; op[0] = XEXP (x, 0); break; case UNSIGNED_FLOAT: fun = (verbose) ? "uns_float" : "ufl"; op[0] = XEXP (x, 0); break; case FIX: fun = "fix"; op[0] = XEXP (x, 0); break; case UNSIGNED_FIX: fun = (verbose) ? "uns_fix" : "ufx"; op[0] = XEXP (x, 0); break; case PRE_DEC: st[0] = "--"; op[0] = XEXP (x, 0); break; case PRE_INC: st[0] = "++"; op[0] = XEXP (x, 0); break; case POST_DEC: op[0] = XEXP (x, 0); st[1] = "--"; break; case POST_INC: op[0] = XEXP (x, 0); st[1] = "++"; break; case CALL: st[0] = "call "; op[0] = XEXP (x, 0); if (verbose) { st[1] = " argc:"; op[1] = XEXP (x, 1); } break; case IF_THEN_ELSE: st[0] = "{("; op[0] = XEXP (x, 0); st[1] = ")?"; op[1] = XEXP (x, 1); st[2] = ":"; op[2] = XEXP (x, 2); st[3] = "}"; break; case TRAP_IF: fun = "trap_if"; op[0] = TRAP_CONDITION (x); break; case PREFETCH: fun = "prefetch"; op[0] = XEXP (x, 0); op[1] = XEXP (x, 1); op[2] = XEXP (x, 2); break; case UNSPEC: case UNSPEC_VOLATILE: { cur = safe_concat (buf, cur, "unspec"); if (GET_CODE (x) == UNSPEC_VOLATILE) cur = safe_concat (buf, cur, "/v"); cur = safe_concat (buf, cur, "["); sep = ""; for (i = 0; i < XVECLEN (x, 0); i++) { print_pattern (tmp, XVECEXP (x, 0, i), verbose); cur = safe_concat (buf, cur, sep); cur = safe_concat (buf, cur, tmp); sep = ","; } cur = safe_concat (buf, cur, "] "); sprintf (tmp, "%d", XINT (x, 1)); cur = safe_concat (buf, cur, tmp); } break; default: /* If (verbose) debug_rtx (x); */ st[0] = GET_RTX_NAME (GET_CODE (x)); break; } /* Print this as a function? */ if (fun) { cur = safe_concat (buf, cur, fun); cur = safe_concat (buf, cur, "("); } for (i = 0; i < 4; i++) { if (st[i]) cur = safe_concat (buf, cur, st[i]); if (op[i]) { if (fun && i != 0) cur = safe_concat (buf, cur, ","); print_value (tmp, op[i], verbose); cur = safe_concat (buf, cur, tmp); } } if (fun) cur = safe_concat (buf, cur, ")"); } /* print_exp */ /* Prints rtxes, I customarily classified as values. They're constants, registers, labels, symbols and memory accesses. */ static void print_value (char *buf, rtx x, int verbose) { char t[BUF_LEN]; char *cur = buf; switch (GET_CODE (x)) { case CONST_INT: sprintf (t, HOST_WIDE_INT_PRINT_HEX, INTVAL (x)); cur = safe_concat (buf, cur, t); break; case CONST_DOUBLE: if (FLOAT_MODE_P (GET_MODE (x))) real_to_decimal (t, CONST_DOUBLE_REAL_VALUE (x), sizeof (t), 0, 1); else sprintf (t, "<0x%lx,0x%lx>", (long) XWINT (x, 2), (long) XWINT (x, 3)); cur = safe_concat (buf, cur, t); break; case CONST_STRING: cur = safe_concat (buf, cur, "\""); cur = safe_concat (buf, cur, XSTR (x, 0)); cur = safe_concat (buf, cur, "\""); break; case SYMBOL_REF: cur = safe_concat (buf, cur, "`"); cur = safe_concat (buf, cur, XSTR (x, 0)); cur = safe_concat (buf, cur, "'"); break; case LABEL_REF: sprintf (t, "L%d", INSN_UID (XEXP (x, 0))); cur = safe_concat (buf, cur, t); break; case CONST: print_value (t, XEXP (x, 0), verbose); cur = safe_concat (buf, cur, "const("); cur = safe_concat (buf, cur, t); cur = safe_concat (buf, cur, ")"); break; case HIGH: print_value (t, XEXP (x, 0), verbose); cur = safe_concat (buf, cur, "high("); cur = safe_concat (buf, cur, t); cur = safe_concat (buf, cur, ")"); break; case REG: if (REGNO (x) < FIRST_PSEUDO_REGISTER) { int c = reg_names[REGNO (x)][0]; if (ISDIGIT (c)) cur = safe_concat (buf, cur, "%"); cur = safe_concat (buf, cur, reg_names[REGNO (x)]); } else { sprintf (t, "r%d", REGNO (x)); cur = safe_concat (buf, cur, t); } break; case SUBREG: print_value (t, SUBREG_REG (x), verbose); cur = safe_concat (buf, cur, t); sprintf (t, "#%d", SUBREG_BYTE (x)); cur = safe_concat (buf, cur, t); break; case SCRATCH: cur = safe_concat (buf, cur, "scratch"); break; case CC0: cur = safe_concat (buf, cur, "cc0"); break; case PC: cur = safe_concat (buf, cur, "pc"); break; case MEM: print_value (t, XEXP (x, 0), verbose); cur = safe_concat (buf, cur, "["); cur = safe_concat (buf, cur, t); cur = safe_concat (buf, cur, "]"); break; default: print_exp (t, x, verbose); cur = safe_concat (buf, cur, t); break; } } /* print_value */ /* The next step in insn detalization, its pattern recognition. */ static void print_pattern (char *buf, rtx x, int verbose) { char t1[BUF_LEN], t2[BUF_LEN], t3[BUF_LEN]; switch (GET_CODE (x)) { case SET: print_value (t1, SET_DEST (x), verbose); print_value (t2, SET_SRC (x), verbose); sprintf (buf, "%s=%s", t1, t2); break; case RETURN: sprintf (buf, "return"); break; case CALL: print_exp (buf, x, verbose); break; case CLOBBER: print_value (t1, XEXP (x, 0), verbose); sprintf (buf, "clobber %s", t1); break; case USE: print_value (t1, XEXP (x, 0), verbose); sprintf (buf, "use %s", t1); break; case COND_EXEC: if (GET_CODE (COND_EXEC_TEST (x)) == NE && XEXP (COND_EXEC_TEST (x), 1) == const0_rtx) print_value (t1, XEXP (COND_EXEC_TEST (x), 0), verbose); else if (GET_CODE (COND_EXEC_TEST (x)) == EQ && XEXP (COND_EXEC_TEST (x), 1) == const0_rtx) { t1[0] = '!'; print_value (t1 + 1, XEXP (COND_EXEC_TEST (x), 0), verbose); } else print_value (t1, COND_EXEC_TEST (x), verbose); print_pattern (t2, COND_EXEC_CODE (x), verbose); sprintf (buf, "(%s) %s", t1, t2); break; case PARALLEL: { int i; sprintf (t1, "{"); for (i = 0; i < XVECLEN (x, 0); i++) { print_pattern (t2, XVECEXP (x, 0, i), verbose); sprintf (t3, "%s%s;", t1, t2); strcpy (t1, t3); } sprintf (buf, "%s}", t1); } break; case SEQUENCE: /* Should never see SEQUENCE codes until after reorg. */ abort (); break; case ASM_INPUT: sprintf (buf, "asm {%s}", XSTR (x, 0)); break; case ADDR_VEC: break; case ADDR_DIFF_VEC: print_value (buf, XEXP (x, 0), verbose); break; case TRAP_IF: print_value (t1, TRAP_CONDITION (x), verbose); sprintf (buf, "trap_if %s", t1); break; case UNSPEC: { int i; sprintf (t1, "unspec{"); for (i = 0; i < XVECLEN (x, 0); i++) { print_pattern (t2, XVECEXP (x, 0, i), verbose); sprintf (t3, "%s%s;", t1, t2); strcpy (t1, t3); } sprintf (buf, "%s}", t1); } break; case UNSPEC_VOLATILE: { int i; sprintf (t1, "unspec/v{"); for (i = 0; i < XVECLEN (x, 0); i++) { print_pattern (t2, XVECEXP (x, 0, i), verbose); sprintf (t3, "%s%s;", t1, t2); strcpy (t1, t3); } sprintf (buf, "%s}", t1); } break; default: print_value (buf, x, verbose); } } /* print_pattern */ /* This is the main function in rtl visualization mechanism. It accepts an rtx and tries to recognize it as an insn, then prints it properly in human readable form, resembling assembler mnemonics. For every insn it prints its UID and BB the insn belongs too. (Probably the last "option" should be extended somehow, since it depends now on sched.c inner variables ...) */ void print_insn (char *buf, rtx x, int verbose) { char t[BUF_LEN]; rtx insn = x; switch (GET_CODE (x)) { case INSN: print_pattern (t, PATTERN (x), verbose); if (verbose) sprintf (buf, "%s: %s", (*current_sched_info->print_insn) (x, 1), t); else sprintf (buf, "%-4d %s", INSN_UID (x), t); break; case JUMP_INSN: print_pattern (t, PATTERN (x), verbose); if (verbose) sprintf (buf, "%s: jump %s", (*current_sched_info->print_insn) (x, 1), t); else sprintf (buf, "%-4d %s", INSN_UID (x), t); break; case CALL_INSN: x = PATTERN (insn); if (GET_CODE (x) == PARALLEL) { x = XVECEXP (x, 0, 0); print_pattern (t, x, verbose); } else strcpy (t, "call <...>"); if (verbose) sprintf (buf, "%s: %s", (*current_sched_info->print_insn) (x, 1), t); else sprintf (buf, "%-4d %s", INSN_UID (insn), t); break; case CODE_LABEL: sprintf (buf, "L%d:", INSN_UID (x)); break; case BARRIER: sprintf (buf, "i% 4d: barrier", INSN_UID (x)); break; case NOTE: if (NOTE_LINE_NUMBER (x) > 0) { expanded_location xloc; NOTE_EXPANDED_LOCATION (xloc, x); sprintf (buf, "%4d note \"%s\" %d", INSN_UID (x), xloc.file, xloc.line); } else sprintf (buf, "%4d %s", INSN_UID (x), GET_NOTE_INSN_NAME (NOTE_LINE_NUMBER (x))); break; default: if (verbose) { sprintf (buf, "Not an INSN at all\n"); debug_rtx (x); } else sprintf (buf, "i%-4d ", INSN_UID (x)); } } /* print_insn */ /* Print visualization debugging info. The scheduler using only DFA description should never use the following function. */ void print_block_visualization (const char *s) { int unit, i; /* Print header. */ fprintf (sched_dump, "\n;; ==================== scheduling visualization %s \n", s); /* Print names of units. */ fprintf (sched_dump, ";; %-8s", "clock"); for (unit = 0; unit < FUNCTION_UNITS_SIZE; unit++) if (function_units[unit].bitmask & target_units) for (i = 0; i < function_units[unit].multiplicity; i++) fprintf (sched_dump, " %-33s", function_units[unit].name); fprintf (sched_dump, " %-8s\n", "no-unit"); fprintf (sched_dump, ";; %-8s", "====="); for (unit = 0; unit < FUNCTION_UNITS_SIZE; unit++) if (function_units[unit].bitmask & target_units) for (i = 0; i < function_units[unit].multiplicity; i++) fprintf (sched_dump, " %-33s", "=============================="); fprintf (sched_dump, " %-8s\n", "======="); /* Print insns in each cycle. */ fprintf (sched_dump, "%s\n", visual_tbl); } /* Print insns in the 'no_unit' column of visualization. */ void visualize_no_unit (rtx insn) { if (n_vis_no_unit < MAX_VISUAL_NO_UNIT) { vis_no_unit[n_vis_no_unit] = insn; n_vis_no_unit++; } } /* Print insns scheduled in clock, for visualization. */ void visualize_scheduled_insns (int clock) { int i, unit; /* If no more room, split table into two. */ if (n_visual_lines >= MAX_VISUAL_LINES) { print_block_visualization ("(incomplete)"); init_block_visualization (); } n_visual_lines++; sprintf (visual_tbl + strlen (visual_tbl), ";; %-8d", clock); for (unit = 0; unit < FUNCTION_UNITS_SIZE; unit++) if (function_units[unit].bitmask & target_units) for (i = 0; i < function_units[unit].multiplicity; i++) { int instance = unit + i * FUNCTION_UNITS_SIZE; rtx insn = get_unit_last_insn (instance); /* Print insns that still keep the unit busy. */ if (insn && actual_hazard_this_instance (unit, instance, insn, clock, 0)) { char str[BUF_LEN]; print_insn (str, insn, 0); str[INSN_LEN] = '\0'; sprintf (visual_tbl + strlen (visual_tbl), " %-33s", str); } else sprintf (visual_tbl + strlen (visual_tbl), " %-33s", "------------------------------"); } /* Print insns that are not assigned to any unit. */ for (i = 0; i < n_vis_no_unit; i++) sprintf (visual_tbl + strlen (visual_tbl), " %-8d", INSN_UID (vis_no_unit[i])); n_vis_no_unit = 0; sprintf (visual_tbl + strlen (visual_tbl), "\n"); } /* Print stalled cycles. */ void visualize_stall_cycles (int stalls) { static const char *const prefix = ";; "; const char *suffix = "\n"; char *p; /* If no more room, split table into two. */ if (n_visual_lines >= MAX_VISUAL_LINES) { print_block_visualization ("(incomplete)"); init_block_visualization (); } n_visual_lines++; p = visual_tbl + strlen (visual_tbl); strcpy (p, prefix); p += strlen (prefix); if ((unsigned) stalls > visual_tbl_line_length - strlen (prefix) - strlen (suffix)) { suffix = "[...]\n"; stalls = visual_tbl_line_length - strlen (prefix) - strlen (suffix); } memset (p, '.', stalls); p += stalls; strcpy (p, suffix); } /* Allocate data used for visualization during scheduling. */ void visualize_alloc (void) { visual_tbl = xmalloc (get_visual_tbl_length ()); } /* Free data used for visualization. */ void visualize_free (void) { free (visual_tbl); } #endif /* Output sdb-format symbol table information from GNU compiler. Copyright (C) 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* mike@tredysvr.Tredydev.Unisys.COM says: I modified the struct.c example and have a nm of a .o resulting from the AT&T C compiler. From the example below I would conclude the following: 1. All .defs from structures are emitted as scanned. The example below clearly shows the symbol table entries for BoxRec2 are after the first function. 2. All functions and their locals (including statics) are emitted as scanned. 3. All nested unnamed union and structure .defs must be emitted before the structure in which they are nested. The AT&T assembler is a one pass beast as far as symbolics are concerned. 4. All structure .defs are emitted before the typedefs that refer to them. 5. All top level static and external variable definitions are moved to the end of file with all top level statics occurring first before externs. 6. All undefined references are at the end of the file. */ static GTY(()) tree anonymous_types; /* Counter for sdbout_source_line. */ static GTY(()) int sdbout_source_line_counter; /* Counter to generate unique "names" for nameless struct members. */ static GTY(()) int unnamed_struct_number; /* Declarations whose debug info was deferred till end of compilation. */ static GTY(()) varray_type deferred_global_decls; /* The C front end may call sdbout_symbol before sdbout_init runs. We save all such decls in this list and output them when we get to sdbout_init. */ static GTY(()) tree preinit_symbols; static GTY(()) bool sdbout_initialized; #ifdef SDB_DEBUGGING_INFO /* For cross compilation, use the portable definitions from the COFF documentation. */ #define __GNU_SYMS__ enum sdb_storage_class { C_EFCN = -1, C_NULL = 0, C_AUTO = 1, C_EXT = 2, C_STAT = 3, C_REG = 4, C_EXTDEF = 5, C_LABEL = 6, C_ULABEL = 7, C_MOS = 8, C_ARG = 9, C_STRTAG = 10, C_MOU = 11, C_UNTAG = 12, C_TPDEF = 13, C_USTATIC = 14, C_ENTAG = 15, C_MOE = 16, C_REGPARM = 17, C_FIELD = 18, C_BLOCK = 100, C_FCN = 101, C_EOS = 102, C_FILE = 103, C_LINE = 104, C_ALIAS = 105, C_HIDDEN = 106 }; enum sdb_type { T_NULL = 0, T_ARG = 1, T_VOID = 1, T_CHAR = 2, T_SHORT = 3, T_INT = 4, T_LONG = 5, T_FLOAT = 6, T_DOUBLE = 7, T_STRUCT = 8, T_UNION = 9, T_ENUM = 10, T_MOE = 11, T_UCHAR = 12, T_USHORT = 13, T_UINT = 14, T_ULONG = 15 #ifdef EXTENDED_SDB_BASIC_TYPES , T_LNGDBL = 16 #endif }; enum sdb_type_class { DT_NON = 0, DT_PTR = 1, DT_FCN = 2, DT_ARY = 3 }; enum sdb_masks { #ifdef EXTENDED_SDB_BASIC_TYPES N_BTMASK = 0x1f, N_TMASK = 0x60, N_TMASK1 = 0x300, N_TMASK2 = 0x360, N_BTSHFT = 5, #else N_BTMASK = 017, N_TMASK = 060, N_TMASK1 = 0300, N_TMASK2 = 0360, N_BTSHFT = 4, #endif N_TSHIFT = 2 }; /* 1 if PARM is passed to this function in memory. */ #define PARM_PASSED_IN_MEMORY(PARM) \ (MEM_P (DECL_INCOMING_RTL (PARM))) /* A C expression for the integer offset value of an automatic variable (C_AUTO) having address X (an RTX). */ #ifndef DEBUGGER_AUTO_OFFSET #define DEBUGGER_AUTO_OFFSET(X) \ (GET_CODE (X) == PLUS ? INTVAL (XEXP (X, 1)) : 0) #endif /* A C expression for the integer offset value of an argument (C_ARG) having address X (an RTX). The nominal offset is OFFSET. */ #ifndef DEBUGGER_ARG_OFFSET #define DEBUGGER_ARG_OFFSET(OFFSET, X) (OFFSET) #endif /* Line number of beginning of current function, minus one. Negative means not in a function or not using sdb. */ int sdb_begin_function_line = -1; extern FILE *asm_out_file; extern tree current_function_decl; /* sdbout.h - Various declarations for functions found in sdbout.c Copyright (C) 1998, 2000, 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ extern void sdbout_symbol (tree, int); extern void sdbout_types (tree); static void sdbout_init (const char *); static void sdbout_finish (const char *); static void sdbout_start_source_file (unsigned int, const char *); static void sdbout_end_source_file (unsigned int); static void sdbout_begin_block (unsigned int, unsigned int); static void sdbout_end_block (unsigned int, unsigned int); static void sdbout_source_line (unsigned int, const char *); static void sdbout_end_epilogue (unsigned int, const char *); static void sdbout_global_decl (tree); #ifndef MIPS_DEBUGGING_INFO static void sdbout_begin_prologue (unsigned int, const char *); #endif static void sdbout_end_prologue (unsigned int, const char *); static void sdbout_begin_function (tree); static void sdbout_end_function (unsigned int); static void sdbout_toplevel_data (tree); static void sdbout_label (rtx); static char *gen_fake_label (void); static int plain_type (tree); static int template_name_p (tree); static void sdbout_record_type_name (tree); static int plain_type_1 (tree, int); static void sdbout_block (tree); static void sdbout_syms (tree); #ifdef SDB_ALLOW_FORWARD_REFERENCES static void sdbout_queue_anonymous_type (tree); static void sdbout_dequeue_anonymous_types (void); #endif static void sdbout_type (tree); static void sdbout_field_types (tree); static void sdbout_one_type (tree); static void sdbout_parms (tree); static void sdbout_reg_parms (tree); static void sdbout_global_decl (tree); /* Random macros describing parts of SDB data. */ /* Default value of delimiter is ";". */ #ifndef SDB_DELIM #define SDB_DELIM ";" #endif /* Maximum number of dimensions the assembler will allow. */ #ifndef SDB_MAX_DIM #define SDB_MAX_DIM 4 #endif #ifndef PUT_SDB_SCL #define PUT_SDB_SCL(a) fprintf(asm_out_file, "\t.scl\t%d%s", (a), SDB_DELIM) #endif #ifndef PUT_SDB_INT_VAL #define PUT_SDB_INT_VAL(a) \ do { \ fprintf (asm_out_file, "\t.val\t" HOST_WIDE_INT_PRINT_DEC "%s", \ (HOST_WIDE_INT) (a), SDB_DELIM); \ } while (0) #endif #ifndef PUT_SDB_VAL #define PUT_SDB_VAL(a) \ ( fputs ("\t.val\t", asm_out_file), \ output_addr_const (asm_out_file, (a)), \ fprintf (asm_out_file, SDB_DELIM)) #endif #ifndef PUT_SDB_DEF #define PUT_SDB_DEF(a) \ do { fprintf (asm_out_file, "\t.def\t"); \ assemble_name (asm_out_file, a); \ fprintf (asm_out_file, SDB_DELIM); } while (0) #endif #ifndef PUT_SDB_PLAIN_DEF #define PUT_SDB_PLAIN_DEF(a) fprintf(asm_out_file,"\t.def\t.%s%s",a, SDB_DELIM) #endif #ifndef PUT_SDB_ENDEF #define PUT_SDB_ENDEF fputs("\t.endef\n", asm_out_file) #endif #ifndef PUT_SDB_TYPE #define PUT_SDB_TYPE(a) fprintf(asm_out_file, "\t.type\t0%o%s", a, SDB_DELIM) #endif #ifndef PUT_SDB_SIZE #define PUT_SDB_SIZE(a) \ do { \ fprintf (asm_out_file, "\t.size\t" HOST_WIDE_INT_PRINT_DEC "%s", \ (HOST_WIDE_INT) (a), SDB_DELIM); \ } while(0) #endif #ifndef PUT_SDB_START_DIM #define PUT_SDB_START_DIM fprintf(asm_out_file, "\t.dim\t") #endif #ifndef PUT_SDB_NEXT_DIM #define PUT_SDB_NEXT_DIM(a) fprintf(asm_out_file, "%d,", a) #endif #ifndef PUT_SDB_LAST_DIM #define PUT_SDB_LAST_DIM(a) fprintf(asm_out_file, "%d%s", a, SDB_DELIM) #endif #ifndef PUT_SDB_TAG #define PUT_SDB_TAG(a) \ do { fprintf (asm_out_file, "\t.tag\t"); \ assemble_name (asm_out_file, a); \ fprintf (asm_out_file, SDB_DELIM); } while (0) #endif #ifndef PUT_SDB_BLOCK_START #define PUT_SDB_BLOCK_START(LINE) \ fprintf (asm_out_file, \ "\t.def\t.bb%s\t.val\t.%s\t.scl\t100%s\t.line\t%d%s\t.endef\n", \ SDB_DELIM, SDB_DELIM, SDB_DELIM, (LINE), SDB_DELIM) #endif #ifndef PUT_SDB_BLOCK_END #define PUT_SDB_BLOCK_END(LINE) \ fprintf (asm_out_file, \ "\t.def\t.eb%s\t.val\t.%s\t.scl\t100%s\t.line\t%d%s\t.endef\n", \ SDB_DELIM, SDB_DELIM, SDB_DELIM, (LINE), SDB_DELIM) #endif #ifndef PUT_SDB_FUNCTION_START #define PUT_SDB_FUNCTION_START(LINE) \ fprintf (asm_out_file, \ "\t.def\t.bf%s\t.val\t.%s\t.scl\t101%s\t.line\t%d%s\t.endef\n", \ SDB_DELIM, SDB_DELIM, SDB_DELIM, (LINE), SDB_DELIM) #endif #ifndef PUT_SDB_FUNCTION_END #define PUT_SDB_FUNCTION_END(LINE) \ fprintf (asm_out_file, \ "\t.def\t.ef%s\t.val\t.%s\t.scl\t101%s\t.line\t%d%s\t.endef\n", \ SDB_DELIM, SDB_DELIM, SDB_DELIM, (LINE), SDB_DELIM) #endif #ifndef SDB_GENERATE_FAKE #define SDB_GENERATE_FAKE(BUFFER, NUMBER) \ sprintf ((BUFFER), ".%dfake", (NUMBER)); #endif /* Return the sdb tag identifier string for TYPE if TYPE has already been defined; otherwise return a null pointer. */ #define KNOWN_TYPE_TAG(type) TYPE_SYMTAB_POINTER (type) /* Set the sdb tag identifier string for TYPE to NAME. */ #define SET_KNOWN_TYPE_TAG(TYPE, NAME) \ TYPE_SYMTAB_POINTER (TYPE) = (char *)(NAME) /* Return the name (a string) of the struct, union or enum tag described by the TREE_LIST node LINK. This is 0 for an anonymous one. */ #define TAG_NAME(link) \ (((link) && TREE_PURPOSE ((link)) \ && IDENTIFIER_POINTER (TREE_PURPOSE ((link)))) \ ? IDENTIFIER_POINTER (TREE_PURPOSE ((link))) : (char *) 0) /* Ensure we don't output a negative line number. */ #define MAKE_LINE_SAFE(line) \ if ((int) line <= sdb_begin_function_line) \ line = sdb_begin_function_line + 1 /* Perform linker optimization of merging header file definitions together for targets with MIPS_DEBUGGING_INFO defined. This won't work without a post 960826 version of GAS. Nothing breaks with earlier versions of GAS, the optimization just won't be done. The native assembler already has the necessary support. */ #ifdef MIPS_DEBUGGING_INFO #ifndef PUT_SDB_SRC_FILE #define PUT_SDB_SRC_FILE(FILENAME) \ output_file_directive (asm_out_file, (FILENAME)) #endif /* ECOFF linkers have an optimization that does the same kind of thing as N_BINCL/E_INCL in stabs: eliminate duplicate debug information in the executable. To achieve this, GCC must output a .file for each file name change. */ /* This is a stack of input files. */ struct sdb_file { struct sdb_file *next; const char *name; }; /* This is the top of the stack. */ static struct sdb_file *current_file; #endif /* MIPS_DEBUGGING_INFO */ /* The debug hooks structure. */ const struct gcc_debug_hooks sdb_debug_hooks = { sdbout_init, /* init */ sdbout_finish, /* finish */ debug_nothing_int_charstar, /* define */ debug_nothing_int_charstar, /* undef */ sdbout_start_source_file, /* start_source_file */ sdbout_end_source_file, /* end_source_file */ sdbout_begin_block, /* begin_block */ sdbout_end_block, /* end_block */ debug_true_tree, /* ignore_block */ sdbout_source_line, /* source_line */ #ifdef MIPS_DEBUGGING_INFO /* Defer on MIPS systems so that parameter descriptions follow function entry. */ debug_nothing_int_charstar, /* begin_prologue */ sdbout_end_prologue, /* end_prologue */ #else sdbout_begin_prologue, /* begin_prologue */ debug_nothing_int_charstar, /* end_prologue */ #endif sdbout_end_epilogue, /* end_epilogue */ sdbout_begin_function, /* begin_function */ sdbout_end_function, /* end_function */ debug_nothing_tree, /* function_decl */ sdbout_global_decl, /* global_decl */ sdbout_symbol, /* type_decl */ debug_nothing_tree_tree, /* imported_module_or_decl */ debug_nothing_tree, /* deferred_inline_function */ debug_nothing_tree, /* outlining_inline_function */ sdbout_label, /* label */ debug_nothing_int, /* handle_pch */ debug_nothing_rtx /* var_location */ }; /* Return a unique string to name an anonymous type. */ static char * gen_fake_label (void) { char label[10]; char *labelstr; SDB_GENERATE_FAKE (label, unnamed_struct_number); unnamed_struct_number++; labelstr = xstrdup (label); return labelstr; } /* Return the number which describes TYPE for SDB. For pointers, etc., this function is recursive. Each record, union or enumeral type must already have had a tag number output. */ /* The number is given by d6d5d4d3d2d1bbbb where bbbb is 4 bit basic type, and di indicate one of notype,ptr,fn,array. Thus, char *foo () has bbbb=T_CHAR d1=D_FCN d2=D_PTR N_BTMASK= 017 1111 basic type field. N_TSHIFT= 2 derived type shift N_BTSHFT= 4 Basic type shift */ /* Produce the number that describes a pointer, function or array type. PREV is the number describing the target, value or element type. DT_type describes how to transform that type. */ #define PUSH_DERIVED_LEVEL(DT_type,PREV) \ ((((PREV) & ~(int) N_BTMASK) << (int) N_TSHIFT) \ | ((int) DT_type << (int) N_BTSHFT) \ | ((PREV) & (int) N_BTMASK)) /* Number of elements used in sdb_dims. */ static int sdb_n_dims = 0; /* Table of array dimensions of current type. */ static int sdb_dims[SDB_MAX_DIM]; /* Size of outermost array currently being processed. */ static int sdb_type_size = -1; static int plain_type (tree type) { int val = plain_type_1 (type, 0); /* If we have already saved up some array dimensions, print them now. */ if (sdb_n_dims > 0) { int i; PUT_SDB_START_DIM; for (i = sdb_n_dims - 1; i > 0; i--) PUT_SDB_NEXT_DIM (sdb_dims[i]); PUT_SDB_LAST_DIM (sdb_dims[0]); sdb_n_dims = 0; sdb_type_size = int_size_in_bytes (type); /* Don't kill sdb if type is not laid out or has variable size. */ if (sdb_type_size < 0) sdb_type_size = 0; } /* If we have computed the size of an array containing this type, print it now. */ if (sdb_type_size >= 0) { PUT_SDB_SIZE (sdb_type_size); sdb_type_size = -1; } return val; } static int template_name_p (tree name) { const char *ptr = IDENTIFIER_POINTER (name); while (*ptr && *ptr != '<') ptr++; return *ptr != '\0'; } static void sdbout_record_type_name (tree type) { const char *name = 0; int no_name; if (KNOWN_TYPE_TAG (type)) return; if (TYPE_NAME (type) != 0) { tree t = 0; /* Find the IDENTIFIER_NODE for the type name. */ if (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE) t = TYPE_NAME (type); else if (TREE_CODE (TYPE_NAME (type)) == TYPE_DECL) { t = DECL_NAME (TYPE_NAME (type)); /* The DECL_NAME for templates includes "<>", which breaks most assemblers. Use its assembler name instead, which has been mangled into being safe. */ if (t && template_name_p (t)) t = DECL_ASSEMBLER_NAME (TYPE_NAME (type)); } /* Now get the name as a string, or invent one. */ if (t != NULL_TREE) name = IDENTIFIER_POINTER (t); } no_name = (name == 0 || *name == 0); if (no_name) name = gen_fake_label (); SET_KNOWN_TYPE_TAG (type, name); #ifdef SDB_ALLOW_FORWARD_REFERENCES if (no_name) sdbout_queue_anonymous_type (type); #endif } /* Return the .type value for type TYPE. LEVEL indicates how many levels deep we have recursed into the type. The SDB debug format can only represent 6 derived levels of types. After that, we must output inaccurate debug info. We deliberately stop before the 7th level, so that ADA recursive types will not give an infinite loop. */ static int plain_type_1 (tree type, int level) { if (type == 0) type = void_type_node; else if (type == error_mark_node) type = integer_type_node; else type = TYPE_MAIN_VARIANT (type); switch (TREE_CODE (type)) { case VOID_TYPE: return T_VOID; case BOOLEAN_TYPE: case INTEGER_TYPE: { int size = int_size_in_bytes (type) * BITS_PER_UNIT; /* Carefully distinguish all the standard types of C, without messing up if the language is not C. Note that we check only for the names that contain spaces; other names might occur by coincidence in other languages. */ if (TYPE_NAME (type) != 0 && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL && DECL_NAME (TYPE_NAME (type)) != 0 && TREE_CODE (DECL_NAME (TYPE_NAME (type))) == IDENTIFIER_NODE) { const char *const name = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type))); if (!strcmp (name, "char")) return T_CHAR; if (!strcmp (name, "unsigned char")) return T_UCHAR; if (!strcmp (name, "signed char")) return T_CHAR; if (!strcmp (name, "int")) return T_INT; if (!strcmp (name, "unsigned int")) return T_UINT; if (!strcmp (name, "short int")) return T_SHORT; if (!strcmp (name, "short unsigned int")) return T_USHORT; if (!strcmp (name, "long int")) return T_LONG; if (!strcmp (name, "long unsigned int")) return T_ULONG; } if (size == INT_TYPE_SIZE) return (TYPE_UNSIGNED (type) ? T_UINT : T_INT); if (size == CHAR_TYPE_SIZE) return (TYPE_UNSIGNED (type) ? T_UCHAR : T_CHAR); if (size == SHORT_TYPE_SIZE) return (TYPE_UNSIGNED (type) ? T_USHORT : T_SHORT); if (size == LONG_TYPE_SIZE) return (TYPE_UNSIGNED (type) ? T_ULONG : T_LONG); if (size == LONG_LONG_TYPE_SIZE) /* better than nothing */ return (TYPE_UNSIGNED (type) ? T_ULONG : T_LONG); return 0; } case REAL_TYPE: { int precision = TYPE_PRECISION (type); if (precision == FLOAT_TYPE_SIZE) return T_FLOAT; if (precision == DOUBLE_TYPE_SIZE) return T_DOUBLE; #ifdef EXTENDED_SDB_BASIC_TYPES if (precision == LONG_DOUBLE_TYPE_SIZE) return T_LNGDBL; #else if (precision == LONG_DOUBLE_TYPE_SIZE) return T_DOUBLE; /* better than nothing */ #endif return 0; } case ARRAY_TYPE: { int m; if (level >= 6) return T_VOID; else m = plain_type_1 (TREE_TYPE (type), level+1); if (sdb_n_dims < SDB_MAX_DIM) sdb_dims[sdb_n_dims++] = (TYPE_DOMAIN (type) && TYPE_MIN_VALUE (TYPE_DOMAIN (type)) != 0 && TYPE_MAX_VALUE (TYPE_DOMAIN (type)) != 0 && host_integerp (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), 0) && host_integerp (TYPE_MIN_VALUE (TYPE_DOMAIN (type)), 0) ? (tree_low_cst (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), 0) - tree_low_cst (TYPE_MIN_VALUE (TYPE_DOMAIN (type)), 0) + 1) : 0); return PUSH_DERIVED_LEVEL (DT_ARY, m); } case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: case ENUMERAL_TYPE: { char *tag; #ifdef SDB_ALLOW_FORWARD_REFERENCES sdbout_record_type_name (type); #endif #ifndef SDB_ALLOW_UNKNOWN_REFERENCES if ((TREE_ASM_WRITTEN (type) && KNOWN_TYPE_TAG (type) != 0) #ifdef SDB_ALLOW_FORWARD_REFERENCES || TYPE_MODE (type) != VOIDmode #endif ) #endif { /* Output the referenced structure tag name only if the .def has already been finished. At least on 386, the Unix assembler cannot handle forward references to tags. */ /* But the 88100, it requires them, sigh... */ /* And the MIPS requires unknown refs as well... */ tag = KNOWN_TYPE_TAG (type); PUT_SDB_TAG (tag); /* These 3 lines used to follow the close brace. However, a size of 0 without a tag implies a tag of 0, so if we don't know a tag, we can't mention the size. */ sdb_type_size = int_size_in_bytes (type); if (sdb_type_size < 0) sdb_type_size = 0; } return ((TREE_CODE (type) == RECORD_TYPE) ? T_STRUCT : (TREE_CODE (type) == UNION_TYPE) ? T_UNION : (TREE_CODE (type) == QUAL_UNION_TYPE) ? T_UNION : T_ENUM); } case POINTER_TYPE: case REFERENCE_TYPE: { int m; if (level >= 6) return T_VOID; else m = plain_type_1 (TREE_TYPE (type), level+1); return PUSH_DERIVED_LEVEL (DT_PTR, m); } case FUNCTION_TYPE: case METHOD_TYPE: { int m; if (level >= 6) return T_VOID; else m = plain_type_1 (TREE_TYPE (type), level+1); return PUSH_DERIVED_LEVEL (DT_FCN, m); } default: return 0; } } /* Output the symbols defined in block number DO_BLOCK. This function works by walking the tree structure of blocks, counting blocks until it finds the desired block. */ static int do_block = 0; static void sdbout_block (tree block) { while (block) { /* Ignore blocks never expanded or otherwise marked as real. */ if (TREE_USED (block)) { /* When we reach the specified block, output its symbols. */ if (BLOCK_NUMBER (block) == do_block) sdbout_syms (BLOCK_VARS (block)); /* If we are past the specified block, stop the scan. */ if (BLOCK_NUMBER (block) > do_block) return; /* Scan the blocks within this block. */ sdbout_block (BLOCK_SUBBLOCKS (block)); } block = BLOCK_CHAIN (block); } } /* Call sdbout_symbol on each decl in the chain SYMS. */ static void sdbout_syms (tree syms) { while (syms) { if (TREE_CODE (syms) != LABEL_DECL) sdbout_symbol (syms, 1); syms = TREE_CHAIN (syms); } } /* Output SDB information for a symbol described by DECL. LOCAL is nonzero if the symbol is not file-scope. */ void sdbout_symbol (tree decl, int local) { tree type = TREE_TYPE (decl); tree context = NULL_TREE; rtx value; int regno = -1; const char *name; /* If we are called before sdbout_init is run, just save the symbol for later. */ if (!sdbout_initialized) { preinit_symbols = tree_cons (0, decl, preinit_symbols); return; } sdbout_one_type (type); switch (TREE_CODE (decl)) { case CONST_DECL: /* Enum values are defined by defining the enum type. */ return; case FUNCTION_DECL: /* Don't mention a nested function under its parent. */ context = decl_function_context (decl); if (context == current_function_decl) return; /* Check DECL_INITIAL to distinguish declarations from definitions. Don't output debug info here for declarations; they will have a DECL_INITIAL value of 0. */ if (! DECL_INITIAL (decl)) return; if (!MEM_P (DECL_RTL (decl)) || GET_CODE (XEXP (DECL_RTL (decl), 0)) != SYMBOL_REF) return; PUT_SDB_DEF (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))); PUT_SDB_VAL (XEXP (DECL_RTL (decl), 0)); PUT_SDB_SCL (TREE_PUBLIC (decl) ? C_EXT : C_STAT); break; case TYPE_DECL: /* Done with tagged types. */ if (DECL_NAME (decl) == 0) return; if (DECL_IGNORED_P (decl)) return; /* Output typedef name. */ if (template_name_p (DECL_NAME (decl))) PUT_SDB_DEF (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))); else PUT_SDB_DEF (IDENTIFIER_POINTER (DECL_NAME (decl))); PUT_SDB_SCL (C_TPDEF); break; case PARM_DECL: /* Parm decls go in their own separate chains and are output by sdbout_reg_parms and sdbout_parms. */ abort (); case VAR_DECL: /* Don't mention a variable that is external. Let the file that defines it describe it. */ if (DECL_EXTERNAL (decl)) return; /* Ignore __FUNCTION__, etc. */ if (DECL_IGNORED_P (decl)) return; /* If there was an error in the declaration, don't dump core if there is no RTL associated with the variable doesn't exist. */ if (!DECL_RTL_SET_P (decl)) return; SET_DECL_RTL (decl, eliminate_regs (DECL_RTL (decl), 0, NULL_RTX)); #ifdef LEAF_REG_REMAP if (current_function_uses_only_leaf_regs) leaf_renumber_regs_insn (DECL_RTL (decl)); #endif value = DECL_RTL (decl); /* Don't mention a variable at all if it was completely optimized into nothingness. If DECL was from an inline function, then its rtl is not identically the rtl that was used in this particular compilation. */ if (REG_P (value)) { regno = REGNO (value); if (regno >= FIRST_PSEUDO_REGISTER) return; } else if (GET_CODE (value) == SUBREG) { while (GET_CODE (value) == SUBREG) value = SUBREG_REG (value); if (REG_P (value)) { if (REGNO (value) >= FIRST_PSEUDO_REGISTER) return; } regno = REGNO (alter_subreg (&value)); SET_DECL_RTL (decl, value); } /* Don't output anything if an auto variable gets RTL that is static. GAS version 2.2 can't handle such output. */ else if (MEM_P (value) && CONSTANT_P (XEXP (value, 0)) && ! TREE_STATIC (decl)) return; /* Emit any structure, union, or enum type that has not been output. This occurs for tag-less structs (et al) used to declare variables within functions. */ if (TREE_CODE (type) == ENUMERAL_TYPE || TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == QUAL_UNION_TYPE) { if (COMPLETE_TYPE_P (type) /* not a forward reference */ && KNOWN_TYPE_TAG (type) == 0) /* not yet declared */ sdbout_one_type (type); } /* Defer SDB information for top-level initialized variables! */ if (! local && MEM_P (value) && DECL_INITIAL (decl)) return; /* C++ in 2.3 makes nameless symbols. That will be fixed later. For now, avoid crashing. */ if (DECL_NAME (decl) == NULL_TREE) return; /* Record the name for, starting a symtab entry. */ if (local) name = IDENTIFIER_POINTER (DECL_NAME (decl)); else name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)); if (MEM_P (value) && GET_CODE (XEXP (value, 0)) == SYMBOL_REF) { PUT_SDB_DEF (name); if (TREE_PUBLIC (decl)) { PUT_SDB_VAL (XEXP (value, 0)); PUT_SDB_SCL (C_EXT); } else { PUT_SDB_VAL (XEXP (value, 0)); PUT_SDB_SCL (C_STAT); } } else if (regno >= 0) { PUT_SDB_DEF (name); PUT_SDB_INT_VAL (DBX_REGISTER_NUMBER (regno)); PUT_SDB_SCL (C_REG); } else if (MEM_P (value) && (MEM_P (XEXP (value, 0)) || (REG_P (XEXP (value, 0)) && REGNO (XEXP (value, 0)) != HARD_FRAME_POINTER_REGNUM && REGNO (XEXP (value, 0)) != STACK_POINTER_REGNUM))) /* If the value is indirect by memory or by a register that isn't the frame pointer then it means the object is variable-sized and address through that register or stack slot. COFF has no way to represent this so all we can do is output the variable as a pointer. */ { PUT_SDB_DEF (name); if (REG_P (XEXP (value, 0))) { PUT_SDB_INT_VAL (DBX_REGISTER_NUMBER (REGNO (XEXP (value, 0)))); PUT_SDB_SCL (C_REG); } else { /* DECL_RTL looks like (MEM (MEM (PLUS (REG...) (CONST_INT...)))). We want the value of that CONST_INT. */ /* Encore compiler hates a newline in a macro arg, it seems. */ PUT_SDB_INT_VAL (DEBUGGER_AUTO_OFFSET (XEXP (XEXP (value, 0), 0))); PUT_SDB_SCL (C_AUTO); } /* Effectively do build_pointer_type, but don't cache this type, since it might be temporary whereas the type it points to might have been saved for inlining. */ /* Don't use REFERENCE_TYPE because dbx can't handle that. */ type = make_node (POINTER_TYPE); TREE_TYPE (type) = TREE_TYPE (decl); } else if (MEM_P (value) && ((GET_CODE (XEXP (value, 0)) == PLUS && REG_P (XEXP (XEXP (value, 0), 0)) && GET_CODE (XEXP (XEXP (value, 0), 1)) == CONST_INT) /* This is for variables which are at offset zero from the frame pointer. This happens on the Alpha. Non-frame pointer registers are excluded above. */ || (REG_P (XEXP (value, 0))))) { /* DECL_RTL looks like (MEM (PLUS (REG...) (CONST_INT...))) or (MEM (REG...)). We want the value of that CONST_INT or zero. */ PUT_SDB_DEF (name); PUT_SDB_INT_VAL (DEBUGGER_AUTO_OFFSET (XEXP (value, 0))); PUT_SDB_SCL (C_AUTO); } else { /* It is something we don't know how to represent for SDB. */ return; } break; default: break; } PUT_SDB_TYPE (plain_type (type)); PUT_SDB_ENDEF; } /* Output SDB information for a top-level initialized variable that has been delayed. */ static void sdbout_toplevel_data (tree decl) { tree type = TREE_TYPE (decl); if (DECL_IGNORED_P (decl)) return; if (! (TREE_CODE (decl) == VAR_DECL && MEM_P (DECL_RTL (decl)) && DECL_INITIAL (decl))) abort (); PUT_SDB_DEF (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))); PUT_SDB_VAL (XEXP (DECL_RTL (decl), 0)); if (TREE_PUBLIC (decl)) { PUT_SDB_SCL (C_EXT); } else { PUT_SDB_SCL (C_STAT); } PUT_SDB_TYPE (plain_type (type)); PUT_SDB_ENDEF; } #ifdef SDB_ALLOW_FORWARD_REFERENCES /* Machinery to record and output anonymous types. */ static void sdbout_queue_anonymous_type (tree type) { anonymous_types = tree_cons (NULL_TREE, type, anonymous_types); } static void sdbout_dequeue_anonymous_types (void) { tree types, link; while (anonymous_types) { types = nreverse (anonymous_types); anonymous_types = NULL_TREE; for (link = types; link; link = TREE_CHAIN (link)) { tree type = TREE_VALUE (link); if (type && ! TREE_ASM_WRITTEN (type)) sdbout_one_type (type); } } } #endif /* Given a chain of ..._TYPE nodes, all of which have names, output definitions of those names, as typedefs. */ void sdbout_types (tree types) { tree link; for (link = types; link; link = TREE_CHAIN (link)) sdbout_one_type (link); #ifdef SDB_ALLOW_FORWARD_REFERENCES sdbout_dequeue_anonymous_types (); #endif } static void sdbout_type (tree type) { if (type == error_mark_node) type = integer_type_node; PUT_SDB_TYPE (plain_type (type)); } /* Output types of the fields of type TYPE, if they are structs. Formerly did not chase through pointer types, since that could be circular. They must come before TYPE, since forward refs are not allowed. Now james@bigtex.cactus.org says to try them. */ static void sdbout_field_types (tree type) { tree tail; for (tail = TYPE_FIELDS (type); tail; tail = TREE_CHAIN (tail)) /* This condition should match the one for emitting the actual members below. */ if (TREE_CODE (tail) == FIELD_DECL && DECL_NAME (tail) && DECL_SIZE (tail) && host_integerp (DECL_SIZE (tail), 1) && host_integerp (bit_position (tail), 0)) { if (POINTER_TYPE_P (TREE_TYPE (tail))) sdbout_one_type (TREE_TYPE (TREE_TYPE (tail))); else sdbout_one_type (TREE_TYPE (tail)); } } /* Use this to put out the top level defined record and union types for later reference. If this is a struct with a name, then put that name out. Other unnamed structs will have .xxfake labels generated so that they may be referred to later. The label will be stored in the KNOWN_TYPE_TAG slot of a type. It may NOT be called recursively. */ static void sdbout_one_type (tree type) { if (current_function_decl != NULL_TREE && DECL_SECTION_NAME (current_function_decl) != NULL_TREE) ; /* Don't change section amid function. */ else text_section (); switch (TREE_CODE (type)) { case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: case ENUMERAL_TYPE: type = TYPE_MAIN_VARIANT (type); /* Don't output a type twice. */ if (TREE_ASM_WRITTEN (type)) /* James said test TREE_ASM_BEING_WRITTEN here. */ return; /* Output nothing if type is not yet defined. */ if (!COMPLETE_TYPE_P (type)) return; TREE_ASM_WRITTEN (type) = 1; /* This is reputed to cause trouble with the following case, but perhaps checking TYPE_SIZE above will fix it. */ /* Here is a testcase: struct foo { struct badstr *bbb; } forwardref; typedef struct intermediate { int aaaa; } intermediate_ref; typedef struct badstr { int ccccc; } badtype; */ /* This change, which ought to make better output, used to make the COFF assembler unhappy. Changes involving KNOWN_TYPE_TAG may fix the problem. */ /* Before really doing anything, output types we want to refer to. */ /* Note that in version 1 the following two lines are not used if forward references are in use. */ if (TREE_CODE (type) != ENUMERAL_TYPE) sdbout_field_types (type); /* Output a structure type. */ { int size = int_size_in_bytes (type); int member_scl = 0; tree tem; int i, n_baseclasses = 0; /* Record the type tag, but not in its permanent place just yet. */ sdbout_record_type_name (type); PUT_SDB_DEF (KNOWN_TYPE_TAG (type)); switch (TREE_CODE (type)) { case UNION_TYPE: case QUAL_UNION_TYPE: PUT_SDB_SCL (C_UNTAG); PUT_SDB_TYPE (T_UNION); member_scl = C_MOU; break; case RECORD_TYPE: PUT_SDB_SCL (C_STRTAG); PUT_SDB_TYPE (T_STRUCT); member_scl = C_MOS; break; case ENUMERAL_TYPE: PUT_SDB_SCL (C_ENTAG); PUT_SDB_TYPE (T_ENUM); member_scl = C_MOE; break; default: break; } PUT_SDB_SIZE (size); PUT_SDB_ENDEF; /* Print out the base class information with fields named after the types they hold. */ /* This is only relevant to aggregate types. TYPE_BINFO is used for other purposes in an ENUMERAL_TYPE, so we must exclude that case. */ if (TREE_CODE (type) != ENUMERAL_TYPE) { if (TYPE_BINFO (type) && TYPE_BINFO_BASETYPES (type)) n_baseclasses = TREE_VEC_LENGTH (TYPE_BINFO_BASETYPES (type)); for (i = 0; i < n_baseclasses; i++) { tree child = TREE_VEC_ELT (BINFO_BASETYPES (TYPE_BINFO (type)), i); tree child_type = BINFO_TYPE (child); tree child_type_name; if (TYPE_NAME (child_type) == 0) continue; if (TREE_CODE (TYPE_NAME (child_type)) == IDENTIFIER_NODE) child_type_name = TYPE_NAME (child_type); else if (TREE_CODE (TYPE_NAME (child_type)) == TYPE_DECL) { child_type_name = DECL_NAME (TYPE_NAME (child_type)); if (child_type_name && template_name_p (child_type_name)) child_type_name = DECL_ASSEMBLER_NAME (TYPE_NAME (child_type)); } else continue; PUT_SDB_DEF (IDENTIFIER_POINTER (child_type_name)); PUT_SDB_INT_VAL (tree_low_cst (BINFO_OFFSET (child), 0)); PUT_SDB_SCL (member_scl); sdbout_type (BINFO_TYPE (child)); PUT_SDB_ENDEF; } } /* Output the individual fields. */ if (TREE_CODE (type) == ENUMERAL_TYPE) { for (tem = TYPE_VALUES (type); tem; tem = TREE_CHAIN (tem)) if (host_integerp (TREE_VALUE (tem), 0)) { PUT_SDB_DEF (IDENTIFIER_POINTER (TREE_PURPOSE (tem))); PUT_SDB_INT_VAL (tree_low_cst (TREE_VALUE (tem), 0)); PUT_SDB_SCL (C_MOE); PUT_SDB_TYPE (T_MOE); PUT_SDB_ENDEF; } } else /* record or union type */ for (tem = TYPE_FIELDS (type); tem; tem = TREE_CHAIN (tem)) /* Output the name, type, position (in bits), size (in bits) of each field. */ /* Omit here the nameless fields that are used to skip bits. Also omit fields with variable size or position. Also omit non FIELD_DECL nodes that GNU C++ may put here. */ if (TREE_CODE (tem) == FIELD_DECL && DECL_NAME (tem) && DECL_SIZE (tem) && host_integerp (DECL_SIZE (tem), 1) && host_integerp (bit_position (tem), 0)) { const char *name; name = IDENTIFIER_POINTER (DECL_NAME (tem)); PUT_SDB_DEF (name); if (DECL_BIT_FIELD_TYPE (tem)) { PUT_SDB_INT_VAL (int_bit_position (tem)); PUT_SDB_SCL (C_FIELD); sdbout_type (DECL_BIT_FIELD_TYPE (tem)); PUT_SDB_SIZE (tree_low_cst (DECL_SIZE (tem), 1)); } else { PUT_SDB_INT_VAL (int_bit_position (tem) / BITS_PER_UNIT); PUT_SDB_SCL (member_scl); sdbout_type (TREE_TYPE (tem)); } PUT_SDB_ENDEF; } /* Output end of a structure,union, or enumeral definition. */ PUT_SDB_PLAIN_DEF ("eos"); PUT_SDB_INT_VAL (size); PUT_SDB_SCL (C_EOS); PUT_SDB_TAG (KNOWN_TYPE_TAG (type)); PUT_SDB_SIZE (size); PUT_SDB_ENDEF; break; default: break; } } } /* The following two functions output definitions of function parameters. Each parameter gets a definition locating it in the parameter list. Each parameter that is a register variable gets a second definition locating it in the register. Printing or argument lists in gdb uses the definitions that locate in the parameter list. But reference to the variable in expressions uses preferentially the definition as a register. */ /* Output definitions, referring to storage in the parmlist, of all the parms in PARMS, which is a chain of PARM_DECL nodes. */ static void sdbout_parms (tree parms) { for (; parms; parms = TREE_CHAIN (parms)) if (DECL_NAME (parms)) { int current_sym_value = 0; const char *name = IDENTIFIER_POINTER (DECL_NAME (parms)); if (name == 0 || *name == 0) name = gen_fake_label (); /* Perform any necessary register eliminations on the parameter's rtl, so that the debugging output will be accurate. */ DECL_INCOMING_RTL (parms) = eliminate_regs (DECL_INCOMING_RTL (parms), 0, NULL_RTX); SET_DECL_RTL (parms, eliminate_regs (DECL_RTL (parms), 0, NULL_RTX)); if (PARM_PASSED_IN_MEMORY (parms)) { rtx addr = XEXP (DECL_INCOMING_RTL (parms), 0); tree type; /* ??? Here we assume that the parm address is indexed off the frame pointer or arg pointer. If that is not true, we produce meaningless results, but do not crash. */ if (GET_CODE (addr) == PLUS && GET_CODE (XEXP (addr, 1)) == CONST_INT) current_sym_value = INTVAL (XEXP (addr, 1)); else current_sym_value = 0; if (REG_P (DECL_RTL (parms)) && REGNO (DECL_RTL (parms)) < FIRST_PSEUDO_REGISTER) type = DECL_ARG_TYPE (parms); else { int original_sym_value = current_sym_value; /* This is the case where the parm is passed as an int or double and it is converted to a char, short or float and stored back in the parmlist. In this case, describe the parm with the variable's declared type, and adjust the address if the least significant bytes (which we are using) are not the first ones. */ if (BYTES_BIG_ENDIAN && TREE_TYPE (parms) != DECL_ARG_TYPE (parms)) current_sym_value += (GET_MODE_SIZE (TYPE_MODE (DECL_ARG_TYPE (parms))) - GET_MODE_SIZE (GET_MODE (DECL_RTL (parms)))); if (MEM_P (DECL_RTL (parms)) && GET_CODE (XEXP (DECL_RTL (parms), 0)) == PLUS && (GET_CODE (XEXP (XEXP (DECL_RTL (parms), 0), 1)) == CONST_INT) && (INTVAL (XEXP (XEXP (DECL_RTL (parms), 0), 1)) == current_sym_value)) type = TREE_TYPE (parms); else { current_sym_value = original_sym_value; type = DECL_ARG_TYPE (parms); } } PUT_SDB_DEF (name); PUT_SDB_INT_VAL (DEBUGGER_ARG_OFFSET (current_sym_value, addr)); PUT_SDB_SCL (C_ARG); PUT_SDB_TYPE (plain_type (type)); PUT_SDB_ENDEF; } else if (REG_P (DECL_RTL (parms))) { rtx best_rtl; /* Parm passed in registers and lives in registers or nowhere. */ /* If parm lives in a register, use that register; pretend the parm was passed there. It would be more consistent to describe the register where the parm was passed, but in practice that register usually holds something else. */ if (REGNO (DECL_RTL (parms)) < FIRST_PSEUDO_REGISTER) best_rtl = DECL_RTL (parms); /* If the parm lives nowhere, use the register where it was passed. */ else best_rtl = DECL_INCOMING_RTL (parms); PUT_SDB_DEF (name); PUT_SDB_INT_VAL (DBX_REGISTER_NUMBER (REGNO (best_rtl))); PUT_SDB_SCL (C_REGPARM); PUT_SDB_TYPE (plain_type (TREE_TYPE (parms))); PUT_SDB_ENDEF; } else if (MEM_P (DECL_RTL (parms)) && XEXP (DECL_RTL (parms), 0) != const0_rtx) { /* Parm was passed in registers but lives on the stack. */ /* DECL_RTL looks like (MEM (PLUS (REG...) (CONST_INT...))), in which case we want the value of that CONST_INT, or (MEM (REG ...)) or (MEM (MEM ...)), in which case we use a value of zero. */ if (REG_P (XEXP (DECL_RTL (parms), 0)) || MEM_P (XEXP (DECL_RTL (parms), 0))) current_sym_value = 0; else current_sym_value = INTVAL (XEXP (XEXP (DECL_RTL (parms), 0), 1)); /* Again, this assumes the offset is based on the arg pointer. */ PUT_SDB_DEF (name); PUT_SDB_INT_VAL (DEBUGGER_ARG_OFFSET (current_sym_value, XEXP (DECL_RTL (parms), 0))); PUT_SDB_SCL (C_ARG); PUT_SDB_TYPE (plain_type (TREE_TYPE (parms))); PUT_SDB_ENDEF; } } } /* Output definitions for the places where parms live during the function, when different from where they were passed, when the parms were passed in memory. It is not useful to do this for parms passed in registers that live during the function in different registers, because it is impossible to look in the passed register for the passed value, so we use the within-the-function register to begin with. PARMS is a chain of PARM_DECL nodes. */ static void sdbout_reg_parms (tree parms) { for (; parms; parms = TREE_CHAIN (parms)) if (DECL_NAME (parms)) { const char *name = IDENTIFIER_POINTER (DECL_NAME (parms)); /* Report parms that live in registers during the function but were passed in memory. */ if (REG_P (DECL_RTL (parms)) && REGNO (DECL_RTL (parms)) < FIRST_PSEUDO_REGISTER && PARM_PASSED_IN_MEMORY (parms)) { if (name == 0 || *name == 0) name = gen_fake_label (); PUT_SDB_DEF (name); PUT_SDB_INT_VAL (DBX_REGISTER_NUMBER (REGNO (DECL_RTL (parms)))); PUT_SDB_SCL (C_REG); PUT_SDB_TYPE (plain_type (TREE_TYPE (parms))); PUT_SDB_ENDEF; } /* Report parms that live in memory but not where they were passed. */ else if (MEM_P (DECL_RTL (parms)) && GET_CODE (XEXP (DECL_RTL (parms), 0)) == PLUS && GET_CODE (XEXP (XEXP (DECL_RTL (parms), 0), 1)) == CONST_INT && PARM_PASSED_IN_MEMORY (parms) && ! rtx_equal_p (DECL_RTL (parms), DECL_INCOMING_RTL (parms))) { #if 0 /* ??? It is not clear yet what should replace this. */ int offset = DECL_OFFSET (parms) / BITS_PER_UNIT; /* A parm declared char is really passed as an int, so it occupies the least significant bytes. On a big-endian machine those are not the low-numbered ones. */ if (BYTES_BIG_ENDIAN && offset != -1 && TREE_TYPE (parms) != DECL_ARG_TYPE (parms)) offset += (GET_MODE_SIZE (TYPE_MODE (DECL_ARG_TYPE (parms))) - GET_MODE_SIZE (GET_MODE (DECL_RTL (parms)))); if (INTVAL (XEXP (XEXP (DECL_RTL (parms), 0), 1)) != offset) {...} #endif { if (name == 0 || *name == 0) name = gen_fake_label (); PUT_SDB_DEF (name); PUT_SDB_INT_VAL (DEBUGGER_AUTO_OFFSET (XEXP (DECL_RTL (parms), 0))); PUT_SDB_SCL (C_AUTO); PUT_SDB_TYPE (plain_type (TREE_TYPE (parms))); PUT_SDB_ENDEF; } } } } /* Output debug information for a global DECL. Called from toplev.c after compilation proper has finished. */ static void sdbout_global_decl (tree decl) { if (TREE_CODE (decl) == VAR_DECL && !DECL_EXTERNAL (decl) && DECL_RTL_SET_P (decl)) { /* The COFF linker can move initialized global vars to the end. And that can screw up the symbol ordering. Defer those for sdbout_finish (). */ if (!DECL_INITIAL (decl) || !TREE_PUBLIC (decl)) sdbout_symbol (decl, 0); else VARRAY_PUSH_TREE (deferred_global_decls, decl); /* Output COFF information for non-global file-scope initialized variables. */ if (DECL_INITIAL (decl) && MEM_P (DECL_RTL (decl))) sdbout_toplevel_data (decl); } } /* Output initialized global vars at the end, in the order of definition. See comment in sdbout_global_decl. */ static void sdbout_finish (const char *main_filename ATTRIBUTE_UNUSED) { size_t i; for (i = 0; i < VARRAY_ACTIVE_SIZE (deferred_global_decls); i++) sdbout_symbol (VARRAY_TREE (deferred_global_decls, i), 0); } /* Describe the beginning of an internal block within a function. Also output descriptions of variables defined in this block. N is the number of the block, by order of beginning, counting from 1, and not counting the outermost (function top-level) block. The blocks match the BLOCKs in DECL_INITIAL (current_function_decl), if the count starts at 0 for the outermost one. */ static void sdbout_begin_block (unsigned int line, unsigned int n) { tree decl = current_function_decl; MAKE_LINE_SAFE (line); /* The SCO compiler does not emit a separate block for the function level scope, so we avoid it here also. However, mips ECOFF compilers do emit a separate block, so we retain it when MIPS_DEBUGGING_INFO is defined. */ #ifndef MIPS_DEBUGGING_INFO if (n != 1) #endif PUT_SDB_BLOCK_START (line - sdb_begin_function_line); if (n == 1) { /* Include the outermost BLOCK's variables in block 1. */ do_block = BLOCK_NUMBER (DECL_INITIAL (decl)); sdbout_block (DECL_INITIAL (decl)); } /* If -g1, suppress all the internal symbols of functions except for arguments. */ if (debug_info_level != DINFO_LEVEL_TERSE) { do_block = n; sdbout_block (DECL_INITIAL (decl)); } #ifdef SDB_ALLOW_FORWARD_REFERENCES sdbout_dequeue_anonymous_types (); #endif } /* Describe the end line-number of an internal block within a function. */ static void sdbout_end_block (unsigned int line, unsigned int n ATTRIBUTE_UNUSED) { MAKE_LINE_SAFE (line); /* The SCO compiler does not emit a separate block for the function level scope, so we avoid it here also. However, mips ECOFF compilers do emit a separate block, so we retain it when MIPS_DEBUGGING_INFO is defined. */ #ifndef MIPS_DEBUGGING_INFO if (n != 1) #endif PUT_SDB_BLOCK_END (line - sdb_begin_function_line); } /* Output a line number symbol entry for source file FILENAME and line number LINE. */ static void sdbout_source_line (unsigned int line, const char *filename ATTRIBUTE_UNUSED) { /* COFF relative line numbers must be positive. */ if ((int) line > sdb_begin_function_line) { #ifdef ASM_OUTPUT_SOURCE_LINE sdbout_source_line_counter += 1; ASM_OUTPUT_SOURCE_LINE (asm_out_file, line, sdbout_source_line_counter); #else fprintf (asm_out_file, "\t.ln\t%d\n", ((sdb_begin_function_line > -1) ? line - sdb_begin_function_line : 1)); #endif } } /* Output sdb info for the current function name. Called from assemble_start_function. */ static void sdbout_begin_function (tree decl ATTRIBUTE_UNUSED) { sdbout_symbol (current_function_decl, 0); } /* Called at beginning of function body (before or after prologue, depending on MIPS_DEBUGGING_INFO). Record the function's starting line number, so we can output relative line numbers for the other lines. Describe beginning of outermost block. Also describe the parameter list. */ #ifndef MIPS_DEBUGGING_INFO static void sdbout_begin_prologue (unsigned int line, const char *file ATTRIBUTE_UNUSED) { sdbout_end_prologue (line, file); } #endif static void sdbout_end_prologue (unsigned int line, const char *file ATTRIBUTE_UNUSED) { sdb_begin_function_line = line - 1; PUT_SDB_FUNCTION_START (line); sdbout_parms (DECL_ARGUMENTS (current_function_decl)); sdbout_reg_parms (DECL_ARGUMENTS (current_function_decl)); } /* Called at end of function (before epilogue). Describe end of outermost block. */ static void sdbout_end_function (unsigned int line) { #ifdef SDB_ALLOW_FORWARD_REFERENCES sdbout_dequeue_anonymous_types (); #endif MAKE_LINE_SAFE (line); PUT_SDB_FUNCTION_END (line - sdb_begin_function_line); /* Indicate we are between functions, for line-number output. */ sdb_begin_function_line = -1; } /* Output sdb info for the absolute end of a function. Called after the epilogue is output. */ static void sdbout_end_epilogue (unsigned int line ATTRIBUTE_UNUSED, const char *file ATTRIBUTE_UNUSED) { const char *const name ATTRIBUTE_UNUSED = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (current_function_decl)); #ifdef PUT_SDB_EPILOGUE_END PUT_SDB_EPILOGUE_END (name); #else fprintf (asm_out_file, "\t.def\t"); assemble_name (asm_out_file, name); fprintf (asm_out_file, "%s\t.val\t.%s\t.scl\t-1%s\t.endef\n", SDB_DELIM, SDB_DELIM, SDB_DELIM); #endif } /* Output sdb info for the given label. Called only if LABEL_NAME (insn) is present. */ static void sdbout_label (rtx insn) { PUT_SDB_DEF (LABEL_NAME (insn)); PUT_SDB_VAL (insn); PUT_SDB_SCL (C_LABEL); PUT_SDB_TYPE (T_NULL); PUT_SDB_ENDEF; } /* Change to reading from a new source file. */ static void sdbout_start_source_file (unsigned int line ATTRIBUTE_UNUSED, const char *filename ATTRIBUTE_UNUSED) { #ifdef MIPS_DEBUGGING_INFO struct sdb_file *n = xmalloc (sizeof *n); n->next = current_file; n->name = filename; current_file = n; PUT_SDB_SRC_FILE (filename); #endif } /* Revert to reading a previous source file. */ static void sdbout_end_source_file (unsigned int line ATTRIBUTE_UNUSED) { #ifdef MIPS_DEBUGGING_INFO struct sdb_file *next; next = current_file->next; free (current_file); current_file = next; PUT_SDB_SRC_FILE (current_file->name); #endif } /* Set up for SDB output at the start of compilation. */ static void sdbout_init (const char *input_file_name ATTRIBUTE_UNUSED) { tree t; #ifdef MIPS_DEBUGGING_INFO current_file = xmalloc (sizeof *current_file); current_file->next = NULL; current_file->name = input_file_name; #endif VARRAY_TREE_INIT (deferred_global_decls, 12, "deferred_global_decls"); /* Emit debug information which was queued by sdbout_symbol before we got here. */ sdbout_initialized = true; for (t = nreverse (preinit_symbols); t; t = TREE_CHAIN (t)) sdbout_symbol (TREE_VALUE (t), 0); preinit_symbols = 0; } #else /* SDB_DEBUGGING_INFO */ /* This should never be used, but its address is needed for comparisons. */ const struct gcc_debug_hooks sdb_debug_hooks; #endif /* SDB_DEBUGGING_INFO */ /* Type information for sdbout.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_sdbout_h[] = { { &preinit_symbols, 1, sizeof (preinit_symbols), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &deferred_global_decls, 1, sizeof (deferred_global_decls), >_ggc_mx_varray_head_tag, >_pch_nx_varray_head_tag }, { &anonymous_types, 1, sizeof (anonymous_types), >_ggc_mx_tree_node, >_pch_nx_tree_node }, LAST_GGC_ROOT_TAB }; const struct ggc_root_tab gt_pch_rs_gt_sdbout_h[] = { { &sdbout_initialized, 1, sizeof (sdbout_initialized), NULL, NULL }, { &unnamed_struct_number, 1, sizeof (unnamed_struct_number), NULL, NULL }, { &sdbout_source_line_counter, 1, sizeof (sdbout_source_line_counter), NULL, NULL }, LAST_GGC_ROOT_TAB }; /* RTL simplification functions for GNU compiler. Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Simplification and canonicalization of RTL. */ /* Much code operates on (low, high) pairs; the low value is an unsigned wide int, the high value a signed wide int. We occasionally need to sign extend from low to high as if low were a signed wide int. */ #define HWI_SIGN_EXTEND(low) \ ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0)) static rtx neg_const_int (enum machine_mode, rtx); static bool mode_signbit_p (enum machine_mode, rtx); static int simplify_plus_minus_op_data_cmp (const void *, const void *); static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx, int); static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode, unsigned int); static rtx simplify_associative_operation (enum rtx_code, enum machine_mode, rtx, rtx); static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode, enum machine_mode, rtx, rtx); /* Negate a CONST_INT rtx, truncating (because a conversion from a maximally negative number can overflow). */ static rtx neg_const_int (enum machine_mode mode, rtx i) { return gen_int_mode (- INTVAL (i), mode); } /* Test whether expression, X, is an immediate constant that represents the most significant bit of machine mode MODE. */ static bool mode_signbit_p (enum machine_mode mode, rtx x) { unsigned HOST_WIDE_INT val; unsigned int width; if (GET_MODE_CLASS (mode) != MODE_INT) return false; width = GET_MODE_BITSIZE (mode); if (width == 0) return false; if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (x) == CONST_INT) val = INTVAL (x); else if (width <= 2 * HOST_BITS_PER_WIDE_INT && GET_CODE (x) == CONST_DOUBLE && CONST_DOUBLE_LOW (x) == 0) { val = CONST_DOUBLE_HIGH (x); width -= HOST_BITS_PER_WIDE_INT; } else return false; if (width < HOST_BITS_PER_WIDE_INT) val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1; return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1)); } /* Make a binary operation by properly ordering the operands and seeing if the expression folds. */ rtx simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0, rtx op1) { rtx tem; /* Put complex operands first and constants second if commutative. */ if (GET_RTX_CLASS (code) == RTX_COMM_ARITH && swap_commutative_operands_p (op0, op1)) tem = op0, op0 = op1, op1 = tem; /* If this simplifies, do it. */ tem = simplify_binary_operation (code, mode, op0, op1); if (tem) return tem; /* Handle addition and subtraction specially. Otherwise, just form the operation. */ if (code == PLUS || code == MINUS) { tem = simplify_plus_minus (code, mode, op0, op1, 1); if (tem) return tem; } return gen_rtx_fmt_ee (code, mode, op0, op1); } /* If X is a MEM referencing the constant pool, return the real value. Otherwise return X. */ rtx avoid_constant_pool_reference (rtx x) { rtx c, tmp, addr; enum machine_mode cmode; switch (GET_CODE (x)) { case MEM: break; case FLOAT_EXTEND: /* Handle float extensions of constant pool references. */ tmp = XEXP (x, 0); c = avoid_constant_pool_reference (tmp); if (c != tmp && GET_CODE (c) == CONST_DOUBLE) { REAL_VALUE_TYPE d; REAL_VALUE_FROM_CONST_DOUBLE (d, c); return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x)); } return x; default: return x; } addr = XEXP (x, 0); /* Call target hook to avoid the effects of -fpic etc.... */ addr = targetm.delegitimize_address (addr); if (GET_CODE (addr) == LO_SUM) addr = XEXP (addr, 1); if (GET_CODE (addr) != SYMBOL_REF || ! CONSTANT_POOL_ADDRESS_P (addr)) return x; c = get_pool_constant (addr); cmode = get_pool_mode (addr); /* If we're accessing the constant in a different mode than it was originally stored, attempt to fix that up via subreg simplifications. If that fails we have no choice but to return the original memory. */ if (cmode != GET_MODE (x)) { c = simplify_subreg (GET_MODE (x), c, cmode, 0); return c ? c : x; } return c; } /* Make a unary operation by first seeing if it folds and otherwise making the specified operation. */ rtx simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op, enum machine_mode op_mode) { rtx tem; /* If this simplifies, use it. */ if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0) return tem; return gen_rtx_fmt_e (code, mode, op); } /* Likewise for ternary operations. */ rtx simplify_gen_ternary (enum rtx_code code, enum machine_mode mode, enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2) { rtx tem; /* If this simplifies, use it. */ if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2))) return tem; return gen_rtx_fmt_eee (code, mode, op0, op1, op2); } /* Likewise, for relational operations. CMP_MODE specifies mode comparison is done in. */ rtx simplify_gen_relational (enum rtx_code code, enum machine_mode mode, enum machine_mode cmp_mode, rtx op0, rtx op1) { rtx tem; if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode, op0, op1))) return tem; return gen_rtx_fmt_ee (code, mode, op0, op1); } /* Replace all occurrences of OLD in X with NEW and try to simplify the resulting RTX. Return a new RTX which is as simplified as possible. */ rtx simplify_replace_rtx (rtx x, rtx old, rtx new) { enum rtx_code code = GET_CODE (x); enum machine_mode mode = GET_MODE (x); enum machine_mode op_mode; rtx op0, op1, op2; /* If X is OLD, return NEW. Otherwise, if this is an expression, try to build a new expression substituting recursively. If we can't do anything, return our input. */ if (x == old) return new; switch (GET_RTX_CLASS (code)) { case RTX_UNARY: op0 = XEXP (x, 0); op_mode = GET_MODE (op0); op0 = simplify_replace_rtx (op0, old, new); if (op0 == XEXP (x, 0)) return x; return simplify_gen_unary (code, mode, op0, op_mode); case RTX_BIN_ARITH: case RTX_COMM_ARITH: op0 = simplify_replace_rtx (XEXP (x, 0), old, new); op1 = simplify_replace_rtx (XEXP (x, 1), old, new); if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1)) return x; return simplify_gen_binary (code, mode, op0, op1); case RTX_COMPARE: case RTX_COMM_COMPARE: op0 = XEXP (x, 0); op1 = XEXP (x, 1); op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1); op0 = simplify_replace_rtx (op0, old, new); op1 = simplify_replace_rtx (op1, old, new); if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1)) return x; return simplify_gen_relational (code, mode, op_mode, op0, op1); case RTX_TERNARY: case RTX_BITFIELD_OPS: op0 = XEXP (x, 0); op_mode = GET_MODE (op0); op0 = simplify_replace_rtx (op0, old, new); op1 = simplify_replace_rtx (XEXP (x, 1), old, new); op2 = simplify_replace_rtx (XEXP (x, 2), old, new); if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2)) return x; if (op_mode == VOIDmode) op_mode = GET_MODE (op0); return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2); case RTX_EXTRA: /* The only case we try to handle is a SUBREG. */ if (code == SUBREG) { op0 = simplify_replace_rtx (SUBREG_REG (x), old, new); if (op0 == SUBREG_REG (x)) return x; op0 = simplify_gen_subreg (GET_MODE (x), op0, GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x)); return op0 ? op0 : x; } break; case RTX_OBJ: if (code == MEM) { op0 = simplify_replace_rtx (XEXP (x, 0), old, new); if (op0 == XEXP (x, 0)) return x; return replace_equiv_address_nv (x, op0); } else if (code == LO_SUM) { op0 = simplify_replace_rtx (XEXP (x, 0), old, new); op1 = simplify_replace_rtx (XEXP (x, 1), old, new); /* (lo_sum (high x) x) -> x */ if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1)) return op1; if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1)) return x; return gen_rtx_LO_SUM (mode, op0, op1); } else if (code == REG) { if (REG_P (old) && REGNO (x) == REGNO (old)) return new; } break; default: break; } return x; } /* Try to simplify a unary operation CODE whose output mode is to be MODE with input operand OP whose mode was originally OP_MODE. Return zero if no simplification can be made. */ rtx simplify_unary_operation (enum rtx_code code, enum machine_mode mode, rtx op, enum machine_mode op_mode) { unsigned int width = GET_MODE_BITSIZE (mode); rtx trueop = avoid_constant_pool_reference (op); if (code == VEC_DUPLICATE) { if (!VECTOR_MODE_P (mode)) abort (); if (GET_MODE (trueop) != VOIDmode && !VECTOR_MODE_P (GET_MODE (trueop)) && GET_MODE_INNER (mode) != GET_MODE (trueop)) abort (); if (GET_MODE (trueop) != VOIDmode && VECTOR_MODE_P (GET_MODE (trueop)) && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop))) abort (); if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_VECTOR) { int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode)); unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size); rtvec v = rtvec_alloc (n_elts); unsigned int i; if (GET_CODE (trueop) != CONST_VECTOR) for (i = 0; i < n_elts; i++) RTVEC_ELT (v, i) = trueop; else { enum machine_mode inmode = GET_MODE (trueop); int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode)); unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size); if (in_n_elts >= n_elts || n_elts % in_n_elts) abort (); for (i = 0; i < n_elts; i++) RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts); } return gen_rtx_CONST_VECTOR (mode, v); } } else if (GET_CODE (op) == CONST) return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode); if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR) { int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode)); unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size); enum machine_mode opmode = GET_MODE (trueop); int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode)); unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size); rtvec v = rtvec_alloc (n_elts); unsigned int i; if (op_n_elts != n_elts) abort (); for (i = 0; i < n_elts; i++) { rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode), CONST_VECTOR_ELT (trueop, i), GET_MODE_INNER (opmode)); if (!x) return 0; RTVEC_ELT (v, i) = x; } return gen_rtx_CONST_VECTOR (mode, v); } /* The order of these tests is critical so that, for example, we don't check the wrong mode (input vs. output) for a conversion operation, such as FIX. At some point, this should be simplified. */ if (code == FLOAT && GET_MODE (trueop) == VOIDmode && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT)) { HOST_WIDE_INT hv, lv; REAL_VALUE_TYPE d; if (GET_CODE (trueop) == CONST_INT) lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv); else lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop); REAL_VALUE_FROM_INT (d, lv, hv, mode); d = real_value_truncate (mode, d); return CONST_DOUBLE_FROM_REAL_VALUE (d, mode); } else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT)) { HOST_WIDE_INT hv, lv; REAL_VALUE_TYPE d; if (GET_CODE (trueop) == CONST_INT) lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv); else lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop); if (op_mode == VOIDmode) { /* We don't know how to interpret negative-looking numbers in this case, so don't try to fold those. */ if (hv < 0) return 0; } else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2) ; else hv = 0, lv &= GET_MODE_MASK (op_mode); REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode); d = real_value_truncate (mode, d); return CONST_DOUBLE_FROM_REAL_VALUE (d, mode); } if (GET_CODE (trueop) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT && width > 0) { HOST_WIDE_INT arg0 = INTVAL (trueop); HOST_WIDE_INT val; switch (code) { case NOT: val = ~ arg0; break; case NEG: val = - arg0; break; case ABS: val = (arg0 >= 0 ? arg0 : - arg0); break; case FFS: /* Don't use ffs here. Instead, get low order bit and then its number. If arg0 is zero, this will return 0, as desired. */ arg0 &= GET_MODE_MASK (mode); val = exact_log2 (arg0 & (- arg0)) + 1; break; case CLZ: arg0 &= GET_MODE_MASK (mode); if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val)) ; else val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1; break; case CTZ: arg0 &= GET_MODE_MASK (mode); if (arg0 == 0) { /* Even if the value at zero is undefined, we have to come up with some replacement. Seems good enough. */ if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val)) val = GET_MODE_BITSIZE (mode); } else val = exact_log2 (arg0 & -arg0); break; case POPCOUNT: arg0 &= GET_MODE_MASK (mode); val = 0; while (arg0) val++, arg0 &= arg0 - 1; break; case PARITY: arg0 &= GET_MODE_MASK (mode); val = 0; while (arg0) val++, arg0 &= arg0 - 1; val &= 1; break; case TRUNCATE: val = arg0; break; case ZERO_EXTEND: /* When zero-extending a CONST_INT, we need to know its original mode. */ if (op_mode == VOIDmode) abort (); if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT) { /* If we were really extending the mode, we would have to distinguish between zero-extension and sign-extension. */ if (width != GET_MODE_BITSIZE (op_mode)) abort (); val = arg0; } else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT) val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode)); else return 0; break; case SIGN_EXTEND: if (op_mode == VOIDmode) op_mode = mode; if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT) { /* If we were really extending the mode, we would have to distinguish between zero-extension and sign-extension. */ if (width != GET_MODE_BITSIZE (op_mode)) abort (); val = arg0; } else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT) { val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode)); if (val & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1))) val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode); } else return 0; break; case SQRT: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case SS_TRUNCATE: case US_TRUNCATE: return 0; default: abort (); } val = trunc_int_for_mode (val, mode); return GEN_INT (val); } /* We can do some operations on integer CONST_DOUBLEs. Also allow for a DImode operation on a CONST_INT. */ else if (GET_MODE (trueop) == VOIDmode && width <= HOST_BITS_PER_WIDE_INT * 2 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT)) { unsigned HOST_WIDE_INT l1, lv; HOST_WIDE_INT h1, hv; if (GET_CODE (trueop) == CONST_DOUBLE) l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop); else l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1); switch (code) { case NOT: lv = ~ l1; hv = ~ h1; break; case NEG: neg_double (l1, h1, &lv, &hv); break; case ABS: if (h1 < 0) neg_double (l1, h1, &lv, &hv); else lv = l1, hv = h1; break; case FFS: hv = 0; if (l1 == 0) { if (h1 == 0) lv = 0; else lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1; } else lv = exact_log2 (l1 & -l1) + 1; break; case CLZ: hv = 0; if (h1 != 0) lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1 - HOST_BITS_PER_WIDE_INT; else if (l1 != 0) lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1; else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv)) lv = GET_MODE_BITSIZE (mode); break; case CTZ: hv = 0; if (l1 != 0) lv = exact_log2 (l1 & -l1); else if (h1 != 0) lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1); else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv)) lv = GET_MODE_BITSIZE (mode); break; case POPCOUNT: hv = 0; lv = 0; while (l1) lv++, l1 &= l1 - 1; while (h1) lv++, h1 &= h1 - 1; break; case PARITY: hv = 0; lv = 0; while (l1) lv++, l1 &= l1 - 1; while (h1) lv++, h1 &= h1 - 1; lv &= 1; break; case TRUNCATE: /* This is just a change-of-mode, so do nothing. */ lv = l1, hv = h1; break; case ZERO_EXTEND: if (op_mode == VOIDmode) abort (); if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT) return 0; hv = 0; lv = l1 & GET_MODE_MASK (op_mode); break; case SIGN_EXTEND: if (op_mode == VOIDmode || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT) return 0; else { lv = l1 & GET_MODE_MASK (op_mode); if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT && (lv & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0) lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode); hv = HWI_SIGN_EXTEND (lv); } break; case SQRT: return 0; default: return 0; } return immed_double_const (lv, hv, mode); } else if (GET_CODE (trueop) == CONST_DOUBLE && GET_MODE_CLASS (mode) == MODE_FLOAT) { REAL_VALUE_TYPE d, t; REAL_VALUE_FROM_CONST_DOUBLE (d, trueop); switch (code) { case SQRT: if (HONOR_SNANS (mode) && real_isnan (&d)) return 0; real_sqrt (&t, mode, &d); d = t; break; case ABS: d = REAL_VALUE_ABS (d); break; case NEG: d = REAL_VALUE_NEGATE (d); break; case FLOAT_TRUNCATE: d = real_value_truncate (mode, d); break; case FLOAT_EXTEND: /* All this does is change the mode. */ break; case FIX: real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL); break; case NOT: { long tmp[4]; int i; real_to_target (tmp, &d, GET_MODE (trueop)); for (i = 0; i < 4; i++) tmp[i] = ~tmp[i]; real_from_target (&d, tmp, mode); } default: abort (); } return CONST_DOUBLE_FROM_REAL_VALUE (d, mode); } else if (GET_CODE (trueop) == CONST_DOUBLE && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT && GET_MODE_CLASS (mode) == MODE_INT && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0) { /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX operators are intentionally left unspecified (to ease implementation by target backends), for consistency, this routine implements the same semantics for constant folding as used by the middle-end. */ HOST_WIDE_INT xh, xl, th, tl; REAL_VALUE_TYPE x, t; REAL_VALUE_FROM_CONST_DOUBLE (x, trueop); switch (code) { case FIX: if (REAL_VALUE_ISNAN (x)) return const0_rtx; /* Test against the signed upper bound. */ if (width > HOST_BITS_PER_WIDE_INT) { th = ((unsigned HOST_WIDE_INT) 1 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1; tl = -1; } else { th = 0; tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1; } real_from_integer (&t, VOIDmode, tl, th, 0); if (REAL_VALUES_LESS (t, x)) { xh = th; xl = tl; break; } /* Test against the signed lower bound. */ if (width > HOST_BITS_PER_WIDE_INT) { th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1); tl = 0; } else { th = -1; tl = (HOST_WIDE_INT) -1 << (width - 1); } real_from_integer (&t, VOIDmode, tl, th, 0); if (REAL_VALUES_LESS (x, t)) { xh = th; xl = tl; break; } REAL_VALUE_TO_INT (&xl, &xh, x); break; case UNSIGNED_FIX: if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x)) return const0_rtx; /* Test against the unsigned upper bound. */ if (width == 2*HOST_BITS_PER_WIDE_INT) { th = -1; tl = -1; } else if (width >= HOST_BITS_PER_WIDE_INT) { th = ((unsigned HOST_WIDE_INT) 1 << (width - HOST_BITS_PER_WIDE_INT)) - 1; tl = -1; } else { th = 0; tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1; } real_from_integer (&t, VOIDmode, tl, th, 1); if (REAL_VALUES_LESS (t, x)) { xh = th; xl = tl; break; } REAL_VALUE_TO_INT (&xl, &xh, x); break; default: abort (); } return immed_double_const (xl, xh, mode); } /* This was formerly used only for non-IEEE float. eggert@twinsun.com says it is safe for IEEE also. */ else { enum rtx_code reversed; rtx temp; /* There are some simplifications we can do even if the operands aren't constant. */ switch (code) { case NOT: /* (not (not X)) == X. */ if (GET_CODE (op) == NOT) return XEXP (op, 0); /* (not (eq X Y)) == (ne X Y), etc. */ if (COMPARISON_P (op) && (mode == BImode || STORE_FLAG_VALUE == -1) && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN)) return simplify_gen_relational (reversed, mode, VOIDmode, XEXP (op, 0), XEXP (op, 1)); /* (not (plus X -1)) can become (neg X). */ if (GET_CODE (op) == PLUS && XEXP (op, 1) == constm1_rtx) return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode); /* Similarly, (not (neg X)) is (plus X -1). */ if (GET_CODE (op) == NEG) return plus_constant (XEXP (op, 0), -1); /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */ if (GET_CODE (op) == XOR && GET_CODE (XEXP (op, 1)) == CONST_INT && (temp = simplify_unary_operation (NOT, mode, XEXP (op, 1), mode)) != 0) return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp); /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */ if (GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT && mode_signbit_p (mode, XEXP (op, 1)) && (temp = simplify_unary_operation (NOT, mode, XEXP (op, 1), mode)) != 0) return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp); /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for operands other than 1, but that is not valid. We could do a similar simplification for (not (lshiftrt C X)) where C is just the sign bit, but this doesn't seem common enough to bother with. */ if (GET_CODE (op) == ASHIFT && XEXP (op, 0) == const1_rtx) { temp = simplify_gen_unary (NOT, mode, const1_rtx, mode); return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1)); } /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done by reversing the comparison code if valid. */ if (STORE_FLAG_VALUE == -1 && COMPARISON_P (op) && (reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN) return simplify_gen_relational (reversed, mode, VOIDmode, XEXP (op, 0), XEXP (op, 1)); /* (not (ashiftrt foo C)) where C is the number of bits in FOO minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1, so we can perform the above simplification. */ if (STORE_FLAG_VALUE == -1 && GET_CODE (op) == ASHIFTRT && GET_CODE (XEXP (op, 1)) == CONST_INT && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1) return simplify_gen_relational (GE, mode, VOIDmode, XEXP (op, 0), const0_rtx); break; case NEG: /* (neg (neg X)) == X. */ if (GET_CODE (op) == NEG) return XEXP (op, 0); /* (neg (plus X 1)) can become (not X). */ if (GET_CODE (op) == PLUS && XEXP (op, 1) == const1_rtx) return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode); /* Similarly, (neg (not X)) is (plus X 1). */ if (GET_CODE (op) == NOT) return plus_constant (XEXP (op, 0), 1); /* (neg (minus X Y)) can become (minus Y X). This transformation isn't safe for modes with signed zeros, since if X and Y are both +0, (minus Y X) is the same as (minus X Y). If the rounding mode is towards +infinity (or -infinity) then the two expressions will be rounded differently. */ if (GET_CODE (op) == MINUS && !HONOR_SIGNED_ZEROS (mode) && !HONOR_SIGN_DEPENDENT_ROUNDING (mode)) return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0)); if (GET_CODE (op) == PLUS && !HONOR_SIGNED_ZEROS (mode) && !HONOR_SIGN_DEPENDENT_ROUNDING (mode)) { /* (neg (plus A C)) is simplified to (minus -C A). */ if (GET_CODE (XEXP (op, 1)) == CONST_INT || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE) { temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode); if (temp) return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0)); } /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */ temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode); return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1)); } /* (neg (mult A B)) becomes (mult (neg A) B). This works even for floating-point values. */ if (GET_CODE (op) == MULT && !HONOR_SIGN_DEPENDENT_ROUNDING (mode)) { temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode); return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1)); } /* NEG commutes with ASHIFT since it is multiplication. Only do this if we can then eliminate the NEG (e.g., if the operand is a constant). */ if (GET_CODE (op) == ASHIFT) { temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode); if (temp) return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1)); } /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when C is equal to the width of MODE minus 1. */ if (GET_CODE (op) == ASHIFTRT && GET_CODE (XEXP (op, 1)) == CONST_INT && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1) return simplify_gen_binary (LSHIFTRT, mode, XEXP (op, 0), XEXP (op, 1)); /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when C is equal to the width of MODE minus 1. */ if (GET_CODE (op) == LSHIFTRT && GET_CODE (XEXP (op, 1)) == CONST_INT && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1) return simplify_gen_binary (ASHIFTRT, mode, XEXP (op, 0), XEXP (op, 1)); break; case SIGN_EXTEND: /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2)))) becomes just the MINUS if its mode is MODE. This allows folding switch statements on machines using casesi (such as the VAX). */ if (GET_CODE (op) == TRUNCATE && GET_MODE (XEXP (op, 0)) == mode && GET_CODE (XEXP (op, 0)) == MINUS && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF) return XEXP (op, 0); /* Check for a sign extension of a subreg of a promoted variable, where the promotion is sign-extended, and the target mode is the same as the variable's promotion. */ if (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op) && ! SUBREG_PROMOTED_UNSIGNED_P (op) && GET_MODE (XEXP (op, 0)) == mode) return XEXP (op, 0); #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend) if (! POINTERS_EXTEND_UNSIGNED && mode == Pmode && GET_MODE (op) == ptr_mode && (CONSTANT_P (op) || (GET_CODE (op) == SUBREG && REG_P (SUBREG_REG (op)) && REG_POINTER (SUBREG_REG (op)) && GET_MODE (SUBREG_REG (op)) == Pmode))) return convert_memory_address (Pmode, op); #endif break; case ZERO_EXTEND: /* Check for a zero extension of a subreg of a promoted variable, where the promotion is zero-extended, and the target mode is the same as the variable's promotion. */ if (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op) && SUBREG_PROMOTED_UNSIGNED_P (op) && GET_MODE (XEXP (op, 0)) == mode) return XEXP (op, 0); #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend) if (POINTERS_EXTEND_UNSIGNED > 0 && mode == Pmode && GET_MODE (op) == ptr_mode && (CONSTANT_P (op) || (GET_CODE (op) == SUBREG && REG_P (SUBREG_REG (op)) && REG_POINTER (SUBREG_REG (op)) && GET_MODE (SUBREG_REG (op)) == Pmode))) return convert_memory_address (Pmode, op); #endif break; default: break; } return 0; } } /* Subroutine of simplify_binary_operation to simplify a commutative, associative binary operation CODE with result mode MODE, operating on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR, SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or canonicalization is possible. */ static rtx simplify_associative_operation (enum rtx_code code, enum machine_mode mode, rtx op0, rtx op1) { rtx tem; /* Linearize the operator to the left. */ if (GET_CODE (op1) == code) { /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */ if (GET_CODE (op0) == code) { tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0)); return simplify_gen_binary (code, mode, tem, XEXP (op1, 1)); } /* "a op (b op c)" becomes "(b op c) op a". */ if (! swap_commutative_operands_p (op1, op0)) return simplify_gen_binary (code, mode, op1, op0); tem = op0; op0 = op1; op1 = tem; } if (GET_CODE (op0) == code) { /* Canonicalize "(x op c) op y" as "(x op y) op c". */ if (swap_commutative_operands_p (XEXP (op0, 1), op1)) { tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1); return simplify_gen_binary (code, mode, tem, XEXP (op0, 1)); } /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */ tem = swap_commutative_operands_p (XEXP (op0, 1), op1) ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1)) : simplify_binary_operation (code, mode, XEXP (op0, 1), op1); if (tem != 0) return simplify_gen_binary (code, mode, XEXP (op0, 0), tem); /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */ tem = swap_commutative_operands_p (XEXP (op0, 0), op1) ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0)) : simplify_binary_operation (code, mode, XEXP (op0, 0), op1); if (tem != 0) return simplify_gen_binary (code, mode, tem, XEXP (op0, 1)); } return 0; } /* Simplify a binary operation CODE with result mode MODE, operating on OP0 and OP1. Return 0 if no simplification is possible. Don't use this for relational operations such as EQ or LT. Use simplify_relational_operation instead. */ rtx simplify_binary_operation (enum rtx_code code, enum machine_mode mode, rtx op0, rtx op1) { HOST_WIDE_INT arg0, arg1, arg0s, arg1s; HOST_WIDE_INT val; unsigned int width = GET_MODE_BITSIZE (mode); rtx trueop0, trueop1; rtx tem; #ifdef ENABLE_CHECKING /* Relational operations don't work here. We must know the mode of the operands in order to do the comparison correctly. Assuming a full word can give incorrect results. Consider comparing 128 with -128 in QImode. */ if (GET_RTX_CLASS (code) == RTX_COMPARE || GET_RTX_CLASS (code) == RTX_COMM_COMPARE) abort (); #endif /* Make sure the constant is second. */ if (GET_RTX_CLASS (code) == RTX_COMM_ARITH && swap_commutative_operands_p (op0, op1)) { tem = op0, op0 = op1, op1 = tem; } trueop0 = avoid_constant_pool_reference (op0); trueop1 = avoid_constant_pool_reference (op1); if (VECTOR_MODE_P (mode) && GET_CODE (trueop0) == CONST_VECTOR && GET_CODE (trueop1) == CONST_VECTOR) { int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode)); unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size); enum machine_mode op0mode = GET_MODE (trueop0); int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode)); unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size); enum machine_mode op1mode = GET_MODE (trueop1); int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode)); unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size); rtvec v = rtvec_alloc (n_elts); unsigned int i; if (op0_n_elts != n_elts || op1_n_elts != n_elts) abort (); for (i = 0; i < n_elts; i++) { rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode), CONST_VECTOR_ELT (trueop0, i), CONST_VECTOR_ELT (trueop1, i)); if (!x) return 0; RTVEC_ELT (v, i) = x; } return gen_rtx_CONST_VECTOR (mode, v); } if (GET_MODE_CLASS (mode) == MODE_FLOAT && GET_CODE (trueop0) == CONST_DOUBLE && GET_CODE (trueop1) == CONST_DOUBLE && mode == GET_MODE (op0) && mode == GET_MODE (op1)) { if (code == AND || code == IOR || code == XOR) { long tmp0[4]; long tmp1[4]; REAL_VALUE_TYPE r; int i; real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0), GET_MODE (op0)); real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1), GET_MODE (op1)); for (i = 0; i < 4; i++) { if (code == AND) tmp0[i] &= tmp1[i]; else if (code == IOR) tmp0[i] |= tmp1[i]; else if (code == XOR) tmp0[i] ^= tmp1[i]; else abort (); } real_from_target (&r, tmp0, mode); return CONST_DOUBLE_FROM_REAL_VALUE (r, mode); } else { REAL_VALUE_TYPE f0, f1, value; REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0); REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1); f0 = real_value_truncate (mode, f0); f1 = real_value_truncate (mode, f1); if (HONOR_SNANS (mode) && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1))) return 0; if (code == DIV && REAL_VALUES_EQUAL (f1, dconst0) && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode))) return 0; if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode) && flag_trapping_math && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1)) { int s0 = REAL_VALUE_NEGATIVE (f0); int s1 = REAL_VALUE_NEGATIVE (f1); switch (code) { case PLUS: /* Inf + -Inf = NaN plus exception. */ if (s0 != s1) return 0; break; case MINUS: /* Inf - Inf = NaN plus exception. */ if (s0 == s1) return 0; break; case DIV: /* Inf / Inf = NaN plus exception. */ return 0; default: break; } } if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode) && flag_trapping_math && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0)) || (REAL_VALUE_ISINF (f1) && REAL_VALUES_EQUAL (f0, dconst0)))) /* Inf * 0 = NaN plus exception. */ return 0; REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1); value = real_value_truncate (mode, value); return CONST_DOUBLE_FROM_REAL_VALUE (value, mode); } } /* We can fold some multi-word operations. */ if (GET_MODE_CLASS (mode) == MODE_INT && width == HOST_BITS_PER_WIDE_INT * 2 && (GET_CODE (trueop0) == CONST_DOUBLE || GET_CODE (trueop0) == CONST_INT) && (GET_CODE (trueop1) == CONST_DOUBLE || GET_CODE (trueop1) == CONST_INT)) { unsigned HOST_WIDE_INT l1, l2, lv, lt; HOST_WIDE_INT h1, h2, hv, ht; if (GET_CODE (trueop0) == CONST_DOUBLE) l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0); else l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1); if (GET_CODE (trueop1) == CONST_DOUBLE) l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1); else l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2); switch (code) { case MINUS: /* A - B == A + (-B). */ neg_double (l2, h2, &lv, &hv); l2 = lv, h2 = hv; /* Fall through.... */ case PLUS: add_double (l1, h1, l2, h2, &lv, &hv); break; case MULT: mul_double (l1, h1, l2, h2, &lv, &hv); break; case DIV: if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2, &lv, &hv, <, &ht)) return 0; break; case MOD: if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2, <, &ht, &lv, &hv)) return 0; break; case UDIV: if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2, &lv, &hv, <, &ht)) return 0; break; case UMOD: if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2, <, &ht, &lv, &hv)) return 0; break; case AND: lv = l1 & l2, hv = h1 & h2; break; case IOR: lv = l1 | l2, hv = h1 | h2; break; case XOR: lv = l1 ^ l2, hv = h1 ^ h2; break; case SMIN: if (h1 < h2 || (h1 == h2 && ((unsigned HOST_WIDE_INT) l1 < (unsigned HOST_WIDE_INT) l2))) lv = l1, hv = h1; else lv = l2, hv = h2; break; case SMAX: if (h1 > h2 || (h1 == h2 && ((unsigned HOST_WIDE_INT) l1 > (unsigned HOST_WIDE_INT) l2))) lv = l1, hv = h1; else lv = l2, hv = h2; break; case UMIN: if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2 || (h1 == h2 && ((unsigned HOST_WIDE_INT) l1 < (unsigned HOST_WIDE_INT) l2))) lv = l1, hv = h1; else lv = l2, hv = h2; break; case UMAX: if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2 || (h1 == h2 && ((unsigned HOST_WIDE_INT) l1 > (unsigned HOST_WIDE_INT) l2))) lv = l1, hv = h1; else lv = l2, hv = h2; break; case LSHIFTRT: case ASHIFTRT: case ASHIFT: case ROTATE: case ROTATERT: if (SHIFT_COUNT_TRUNCATED) l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0; if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode)) return 0; if (code == LSHIFTRT || code == ASHIFTRT) rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, code == ASHIFTRT); else if (code == ASHIFT) lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1); else if (code == ROTATE) lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv); else /* code == ROTATERT */ rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv); break; default: return 0; } return immed_double_const (lv, hv, mode); } if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT || width > HOST_BITS_PER_WIDE_INT || width == 0) { /* Even if we can't compute a constant result, there are some cases worth simplifying. */ switch (code) { case PLUS: /* Maybe simplify x + 0 to x. The two expressions are equivalent when x is NaN, infinite, or finite and nonzero. They aren't when x is -0 and the rounding mode is not towards -infinity, since (-0) + 0 is then 0. */ if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode)) return op0; /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These transformations are safe even for IEEE. */ if (GET_CODE (op0) == NEG) return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0)); else if (GET_CODE (op1) == NEG) return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0)); /* (~a) + 1 -> -a */ if (INTEGRAL_MODE_P (mode) && GET_CODE (op0) == NOT && trueop1 == const1_rtx) return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode); /* Handle both-operands-constant cases. We can only add CONST_INTs to constants since the sum of relocatable symbols can't be handled by most assemblers. Don't add CONST_INT to CONST_INT since overflow won't be computed properly if wider than HOST_BITS_PER_WIDE_INT. */ if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode && GET_CODE (op1) == CONST_INT) return plus_constant (op0, INTVAL (op1)); else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode && GET_CODE (op0) == CONST_INT) return plus_constant (op1, INTVAL (op0)); /* See if this is something like X * C - X or vice versa or if the multiplication is written as a shift. If so, we can distribute and make a new multiply, shift, or maybe just have X (if C is 2 in the example above). But don't make something more expensive than we had before. */ if (! FLOAT_MODE_P (mode)) { HOST_WIDE_INT coeff0 = 1, coeff1 = 1; rtx lhs = op0, rhs = op1; if (GET_CODE (lhs) == NEG) coeff0 = -1, lhs = XEXP (lhs, 0); else if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 1)) == CONST_INT) { coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0); } else if (GET_CODE (lhs) == ASHIFT && GET_CODE (XEXP (lhs, 1)) == CONST_INT && INTVAL (XEXP (lhs, 1)) >= 0 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT) { coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1)); lhs = XEXP (lhs, 0); } if (GET_CODE (rhs) == NEG) coeff1 = -1, rhs = XEXP (rhs, 0); else if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 1)) == CONST_INT) { coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0); } else if (GET_CODE (rhs) == ASHIFT && GET_CODE (XEXP (rhs, 1)) == CONST_INT && INTVAL (XEXP (rhs, 1)) >= 0 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT) { coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)); rhs = XEXP (rhs, 0); } if (rtx_equal_p (lhs, rhs)) { rtx orig = gen_rtx_PLUS (mode, op0, op1); tem = simplify_gen_binary (MULT, mode, lhs, GEN_INT (coeff0 + coeff1)); return rtx_cost (tem, SET) <= rtx_cost (orig, SET) ? tem : 0; } } /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */ if ((GET_CODE (op1) == CONST_INT || GET_CODE (op1) == CONST_DOUBLE) && GET_CODE (op0) == XOR && (GET_CODE (XEXP (op0, 1)) == CONST_INT || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE) && mode_signbit_p (mode, op1)) return simplify_gen_binary (XOR, mode, XEXP (op0, 0), simplify_gen_binary (XOR, mode, op1, XEXP (op0, 1))); /* If one of the operands is a PLUS or a MINUS, see if we can simplify this by the associative law. Don't use the associative law for floating point. The inaccuracy makes it nonassociative, and subtle programs can break if operations are associated. */ if (INTEGRAL_MODE_P (mode) && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS || (GET_CODE (op0) == CONST && GET_CODE (XEXP (op0, 0)) == PLUS) || (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == PLUS)) && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0) return tem; /* Reassociate floating point addition only when the user specifies unsafe math optimizations. */ if (FLOAT_MODE_P (mode) && flag_unsafe_math_optimizations) { tem = simplify_associative_operation (code, mode, op0, op1); if (tem) return tem; } break; case COMPARE: #ifdef HAVE_cc0 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't using cc0, in which case we want to leave it as a COMPARE so we can distinguish it from a register-register-copy. In IEEE floating point, x-0 is not the same as x. */ if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations) && trueop1 == CONST0_RTX (mode)) return op0; #endif /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */ if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT) || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU)) && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx) { rtx xop00 = XEXP (op0, 0); rtx xop10 = XEXP (op1, 0); #ifdef HAVE_cc0 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0) #else if (REG_P (xop00) && REG_P (xop10) && GET_MODE (xop00) == GET_MODE (xop10) && REGNO (xop00) == REGNO (xop10) && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC) #endif return xop00; } break; case MINUS: /* We can't assume x-x is 0 even with non-IEEE floating point, but since it is zero except in very strange circumstances, we will treat it as zero with -funsafe-math-optimizations. */ if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0) && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)) return CONST0_RTX (mode); /* Change subtraction from zero into negation. (0 - x) is the same as -x when x is NaN, infinite, or finite and nonzero. But if the mode has signed zeros, and does not round towards -infinity, then 0 - 0 is 0, not -0. */ if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode)) return simplify_gen_unary (NEG, mode, op1, mode); /* (-1 - a) is ~a. */ if (trueop0 == constm1_rtx) return simplify_gen_unary (NOT, mode, op1, mode); /* Subtracting 0 has no effect unless the mode has signed zeros and supports rounding towards -infinity. In such a case, 0 - 0 is -0. */ if (!(HONOR_SIGNED_ZEROS (mode) && HONOR_SIGN_DEPENDENT_ROUNDING (mode)) && trueop1 == CONST0_RTX (mode)) return op0; /* See if this is something like X * C - X or vice versa or if the multiplication is written as a shift. If so, we can distribute and make a new multiply, shift, or maybe just have X (if C is 2 in the example above). But don't make something more expensive than we had before. */ if (! FLOAT_MODE_P (mode)) { HOST_WIDE_INT coeff0 = 1, coeff1 = 1; rtx lhs = op0, rhs = op1; if (GET_CODE (lhs) == NEG) coeff0 = -1, lhs = XEXP (lhs, 0); else if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 1)) == CONST_INT) { coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0); } else if (GET_CODE (lhs) == ASHIFT && GET_CODE (XEXP (lhs, 1)) == CONST_INT && INTVAL (XEXP (lhs, 1)) >= 0 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT) { coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1)); lhs = XEXP (lhs, 0); } if (GET_CODE (rhs) == NEG) coeff1 = - 1, rhs = XEXP (rhs, 0); else if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 1)) == CONST_INT) { coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0); } else if (GET_CODE (rhs) == ASHIFT && GET_CODE (XEXP (rhs, 1)) == CONST_INT && INTVAL (XEXP (rhs, 1)) >= 0 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT) { coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)); rhs = XEXP (rhs, 0); } if (rtx_equal_p (lhs, rhs)) { rtx orig = gen_rtx_MINUS (mode, op0, op1); tem = simplify_gen_binary (MULT, mode, lhs, GEN_INT (coeff0 - coeff1)); return rtx_cost (tem, SET) <= rtx_cost (orig, SET) ? tem : 0; } } /* (a - (-b)) -> (a + b). True even for IEEE. */ if (GET_CODE (op1) == NEG) return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0)); /* (-x - c) may be simplified as (-c - x). */ if (GET_CODE (op0) == NEG && (GET_CODE (op1) == CONST_INT || GET_CODE (op1) == CONST_DOUBLE)) { tem = simplify_unary_operation (NEG, mode, op1, mode); if (tem) return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0)); } /* If one of the operands is a PLUS or a MINUS, see if we can simplify this by the associative law. Don't use the associative law for floating point. The inaccuracy makes it nonassociative, and subtle programs can break if operations are associated. */ if (INTEGRAL_MODE_P (mode) && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS || (GET_CODE (op0) == CONST && GET_CODE (XEXP (op0, 0)) == PLUS) || (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == PLUS)) && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0) return tem; /* Don't let a relocatable value get a negative coeff. */ if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode) return simplify_gen_binary (PLUS, mode, op0, neg_const_int (mode, op1)); /* (x - (x & y)) -> (x & ~y) */ if (GET_CODE (op1) == AND) { if (rtx_equal_p (op0, XEXP (op1, 0))) { tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1), GET_MODE (XEXP (op1, 1))); return simplify_gen_binary (AND, mode, op0, tem); } if (rtx_equal_p (op0, XEXP (op1, 1))) { tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0), GET_MODE (XEXP (op1, 0))); return simplify_gen_binary (AND, mode, op0, tem); } } break; case MULT: if (trueop1 == constm1_rtx) return simplify_gen_unary (NEG, mode, op0, mode); /* Maybe simplify x * 0 to 0. The reduction is not valid if x is NaN, since x * 0 is then also NaN. Nor is it valid when the mode has signed zeros, since multiplying a negative number by 0 will give -0, not 0. */ if (!HONOR_NANS (mode) && !HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0)) return op1; /* In IEEE floating point, x*1 is not equivalent to x for signalling NaNs. */ if (!HONOR_SNANS (mode) && trueop1 == CONST1_RTX (mode)) return op0; /* Convert multiply by constant power of two into shift unless we are still generating RTL. This test is a kludge. */ if (GET_CODE (trueop1) == CONST_INT && (val = exact_log2 (INTVAL (trueop1))) >= 0 /* If the mode is larger than the host word size, and the uppermost bit is set, then this isn't a power of two due to implicit sign extension. */ && (width <= HOST_BITS_PER_WIDE_INT || val != HOST_BITS_PER_WIDE_INT - 1) && ! rtx_equal_function_value_matters) return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val)); /* x*2 is x+x and x*(-1) is -x */ if (GET_CODE (trueop1) == CONST_DOUBLE && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT && GET_MODE (op0) == mode) { REAL_VALUE_TYPE d; REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1); if (REAL_VALUES_EQUAL (d, dconst2)) return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0)); if (REAL_VALUES_EQUAL (d, dconstm1)) return simplify_gen_unary (NEG, mode, op0, mode); } /* Reassociate multiplication, but for floating point MULTs only when the user specifies unsafe math optimizations. */ if (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations) { tem = simplify_associative_operation (code, mode, op0, op1); if (tem) return tem; } break; case IOR: if (trueop1 == const0_rtx) return op0; if (GET_CODE (trueop1) == CONST_INT && ((INTVAL (trueop1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))) return op1; if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) return op0; /* A | (~A) -> -1 */ if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1)) || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0))) && ! side_effects_p (op0) && GET_MODE_CLASS (mode) != MODE_CC) return constm1_rtx; tem = simplify_associative_operation (code, mode, op0, op1); if (tem) return tem; break; case XOR: if (trueop1 == const0_rtx) return op0; if (GET_CODE (trueop1) == CONST_INT && ((INTVAL (trueop1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))) return simplify_gen_unary (NOT, mode, op0, mode); if (trueop0 == trueop1 && ! side_effects_p (op0) && GET_MODE_CLASS (mode) != MODE_CC) return const0_rtx; /* Canonicalize XOR of the most significant bit to PLUS. */ if ((GET_CODE (op1) == CONST_INT || GET_CODE (op1) == CONST_DOUBLE) && mode_signbit_p (mode, op1)) return simplify_gen_binary (PLUS, mode, op0, op1); /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */ if ((GET_CODE (op1) == CONST_INT || GET_CODE (op1) == CONST_DOUBLE) && GET_CODE (op0) == PLUS && (GET_CODE (XEXP (op0, 1)) == CONST_INT || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE) && mode_signbit_p (mode, XEXP (op0, 1))) return simplify_gen_binary (XOR, mode, XEXP (op0, 0), simplify_gen_binary (XOR, mode, op1, XEXP (op0, 1))); tem = simplify_associative_operation (code, mode, op0, op1); if (tem) return tem; break; case AND: if (trueop1 == const0_rtx && ! side_effects_p (op0)) return const0_rtx; /* If we are turning off bits already known off in OP0, we need not do an AND. */ if (GET_CODE (trueop1) == CONST_INT && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0) return op0; if (trueop0 == trueop1 && ! side_effects_p (op0) && GET_MODE_CLASS (mode) != MODE_CC) return op0; /* A & (~A) -> 0 */ if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1)) || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0))) && ! side_effects_p (op0) && GET_MODE_CLASS (mode) != MODE_CC) return const0_rtx; /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M, ((A & N) + B) & M -> (A + B) & M Similarly if (N & M) == 0, ((A | N) + B) & M -> (A + B) & M and for - instead of + and/or ^ instead of |. */ if (GET_CODE (trueop1) == CONST_INT && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT && ~INTVAL (trueop1) && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS)) { rtx pmop[2]; int which; pmop[0] = XEXP (op0, 0); pmop[1] = XEXP (op0, 1); for (which = 0; which < 2; which++) { tem = pmop[which]; switch (GET_CODE (tem)) { case AND: if (GET_CODE (XEXP (tem, 1)) == CONST_INT && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == INTVAL (trueop1)) pmop[which] = XEXP (tem, 0); break; case IOR: case XOR: if (GET_CODE (XEXP (tem, 1)) == CONST_INT && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0) pmop[which] = XEXP (tem, 0); break; default: break; } } if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1)) { tem = simplify_gen_binary (GET_CODE (op0), mode, pmop[0], pmop[1]); return simplify_gen_binary (code, mode, tem, op1); } } tem = simplify_associative_operation (code, mode, op0, op1); if (tem) return tem; break; case UDIV: /* 0/x is 0 (or x&0 if x has side-effects). */ if (trueop0 == const0_rtx) return side_effects_p (op1) ? simplify_gen_binary (AND, mode, op1, const0_rtx) : const0_rtx; /* x/1 is x. */ if (trueop1 == const1_rtx) { /* Handle narrowing UDIV. */ rtx x = gen_lowpart_common (mode, op0); if (x) return x; if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode) return gen_lowpart_SUBREG (mode, op0); return op0; } /* Convert divide by power of two into shift. */ if (GET_CODE (trueop1) == CONST_INT && (arg1 = exact_log2 (INTVAL (trueop1))) > 0) return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1)); break; case DIV: /* Handle floating point and integers separately. */ if (GET_MODE_CLASS (mode) == MODE_FLOAT) { /* Maybe change 0.0 / x to 0.0. This transformation isn't safe for modes with NaNs, since 0.0 / 0.0 will then be NaN rather than 0.0. Nor is it safe for modes with signed zeros, since dividing 0 by a negative number gives -0.0 */ if (trueop0 == CONST0_RTX (mode) && !HONOR_NANS (mode) && !HONOR_SIGNED_ZEROS (mode) && ! side_effects_p (op1)) return op0; /* x/1.0 is x. */ if (trueop1 == CONST1_RTX (mode) && !HONOR_SNANS (mode)) return op0; if (GET_CODE (trueop1) == CONST_DOUBLE && trueop1 != CONST0_RTX (mode)) { REAL_VALUE_TYPE d; REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1); /* x/-1.0 is -x. */ if (REAL_VALUES_EQUAL (d, dconstm1) && !HONOR_SNANS (mode)) return simplify_gen_unary (NEG, mode, op0, mode); /* Change FP division by a constant into multiplication. Only do this with -funsafe-math-optimizations. */ if (flag_unsafe_math_optimizations && !REAL_VALUES_EQUAL (d, dconst0)) { REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d); tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode); return simplify_gen_binary (MULT, mode, op0, tem); } } } else { /* 0/x is 0 (or x&0 if x has side-effects). */ if (trueop0 == const0_rtx) return side_effects_p (op1) ? simplify_gen_binary (AND, mode, op1, const0_rtx) : const0_rtx; /* x/1 is x. */ if (trueop1 == const1_rtx) { /* Handle narrowing DIV. */ rtx x = gen_lowpart_common (mode, op0); if (x) return x; if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode) return gen_lowpart_SUBREG (mode, op0); return op0; } /* x/-1 is -x. */ if (trueop1 == constm1_rtx) { rtx x = gen_lowpart_common (mode, op0); if (!x) x = (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode) ? gen_lowpart_SUBREG (mode, op0) : op0; return simplify_gen_unary (NEG, mode, x, mode); } } break; case UMOD: /* 0%x is 0 (or x&0 if x has side-effects). */ if (trueop0 == const0_rtx) return side_effects_p (op1) ? simplify_gen_binary (AND, mode, op1, const0_rtx) : const0_rtx; /* x%1 is 0 (of x&0 if x has side-effects). */ if (trueop1 == const1_rtx) return side_effects_p (op0) ? simplify_gen_binary (AND, mode, op0, const0_rtx) : const0_rtx; /* Implement modulus by power of two as AND. */ if (GET_CODE (trueop1) == CONST_INT && exact_log2 (INTVAL (trueop1)) > 0) return simplify_gen_binary (AND, mode, op0, GEN_INT (INTVAL (op1) - 1)); break; case MOD: /* 0%x is 0 (or x&0 if x has side-effects). */ if (trueop0 == const0_rtx) return side_effects_p (op1) ? simplify_gen_binary (AND, mode, op1, const0_rtx) : const0_rtx; /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */ if (trueop1 == const1_rtx || trueop1 == constm1_rtx) return side_effects_p (op0) ? simplify_gen_binary (AND, mode, op0, const0_rtx) : const0_rtx; break; case ROTATERT: case ROTATE: case ASHIFTRT: /* Rotating ~0 always results in ~0. */ if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode) && ! side_effects_p (op1)) return op0; /* Fall through.... */ case ASHIFT: case LSHIFTRT: if (trueop1 == const0_rtx) return op0; if (trueop0 == const0_rtx && ! side_effects_p (op1)) return op0; break; case SMIN: if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1) && ! side_effects_p (op0)) return op1; if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) return op0; tem = simplify_associative_operation (code, mode, op0, op1); if (tem) return tem; break; case SMAX: if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT && ((unsigned HOST_WIDE_INT) INTVAL (trueop1) == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1) && ! side_effects_p (op0)) return op1; if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) return op0; tem = simplify_associative_operation (code, mode, op0, op1); if (tem) return tem; break; case UMIN: if (trueop1 == const0_rtx && ! side_effects_p (op0)) return op1; if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) return op0; tem = simplify_associative_operation (code, mode, op0, op1); if (tem) return tem; break; case UMAX: if (trueop1 == constm1_rtx && ! side_effects_p (op0)) return op1; if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) return op0; tem = simplify_associative_operation (code, mode, op0, op1); if (tem) return tem; break; case SS_PLUS: case US_PLUS: case SS_MINUS: case US_MINUS: /* ??? There are simplifications that can be done. */ return 0; case VEC_SELECT: if (!VECTOR_MODE_P (mode)) { if (!VECTOR_MODE_P (GET_MODE (trueop0)) || (mode != GET_MODE_INNER (GET_MODE (trueop0))) || GET_CODE (trueop1) != PARALLEL || XVECLEN (trueop1, 0) != 1 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT) abort (); if (GET_CODE (trueop0) == CONST_VECTOR) return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0))); } else { if (!VECTOR_MODE_P (GET_MODE (trueop0)) || (GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop0))) || GET_CODE (trueop1) != PARALLEL) abort (); if (GET_CODE (trueop0) == CONST_VECTOR) { int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode)); unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size); rtvec v = rtvec_alloc (n_elts); unsigned int i; if (XVECLEN (trueop1, 0) != (int) n_elts) abort (); for (i = 0; i < n_elts; i++) { rtx x = XVECEXP (trueop1, 0, i); if (GET_CODE (x) != CONST_INT) abort (); RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x)); } return gen_rtx_CONST_VECTOR (mode, v); } } return 0; case VEC_CONCAT: { enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode ? GET_MODE (trueop0) : GET_MODE_INNER (mode)); enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode ? GET_MODE (trueop1) : GET_MODE_INNER (mode)); if (!VECTOR_MODE_P (mode) || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode) != GET_MODE_SIZE (mode))) abort (); if ((VECTOR_MODE_P (op0_mode) && (GET_MODE_INNER (mode) != GET_MODE_INNER (op0_mode))) || (!VECTOR_MODE_P (op0_mode) && GET_MODE_INNER (mode) != op0_mode)) abort (); if ((VECTOR_MODE_P (op1_mode) && (GET_MODE_INNER (mode) != GET_MODE_INNER (op1_mode))) || (!VECTOR_MODE_P (op1_mode) && GET_MODE_INNER (mode) != op1_mode)) abort (); if ((GET_CODE (trueop0) == CONST_VECTOR || GET_CODE (trueop0) == CONST_INT || GET_CODE (trueop0) == CONST_DOUBLE) && (GET_CODE (trueop1) == CONST_VECTOR || GET_CODE (trueop1) == CONST_INT || GET_CODE (trueop1) == CONST_DOUBLE)) { int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode)); unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size); rtvec v = rtvec_alloc (n_elts); unsigned int i; unsigned in_n_elts = 1; if (VECTOR_MODE_P (op0_mode)) in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size); for (i = 0; i < n_elts; i++) { if (i < in_n_elts) { if (!VECTOR_MODE_P (op0_mode)) RTVEC_ELT (v, i) = trueop0; else RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i); } else { if (!VECTOR_MODE_P (op1_mode)) RTVEC_ELT (v, i) = trueop1; else RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1, i - in_n_elts); } } return gen_rtx_CONST_VECTOR (mode, v); } } return 0; default: abort (); } return 0; } /* Get the integer argument values in two forms: zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */ arg0 = INTVAL (trueop0); arg1 = INTVAL (trueop1); if (width < HOST_BITS_PER_WIDE_INT) { arg0 &= ((HOST_WIDE_INT) 1 << width) - 1; arg1 &= ((HOST_WIDE_INT) 1 << width) - 1; arg0s = arg0; if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1))) arg0s |= ((HOST_WIDE_INT) (-1) << width); arg1s = arg1; if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1))) arg1s |= ((HOST_WIDE_INT) (-1) << width); } else { arg0s = arg0; arg1s = arg1; } /* Compute the value of the arithmetic. */ switch (code) { case PLUS: val = arg0s + arg1s; break; case MINUS: val = arg0s - arg1s; break; case MULT: val = arg0s * arg1s; break; case DIV: if (arg1s == 0 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) && arg1s == -1)) return 0; val = arg0s / arg1s; break; case MOD: if (arg1s == 0 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) && arg1s == -1)) return 0; val = arg0s % arg1s; break; case UDIV: if (arg1 == 0 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) && arg1s == -1)) return 0; val = (unsigned HOST_WIDE_INT) arg0 / arg1; break; case UMOD: if (arg1 == 0 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) && arg1s == -1)) return 0; val = (unsigned HOST_WIDE_INT) arg0 % arg1; break; case AND: val = arg0 & arg1; break; case IOR: val = arg0 | arg1; break; case XOR: val = arg0 ^ arg1; break; case LSHIFTRT: /* If shift count is undefined, don't fold it; let the machine do what it wants. But truncate it if the machine will do that. */ if (arg1 < 0) return 0; if (SHIFT_COUNT_TRUNCATED) arg1 %= width; val = ((unsigned HOST_WIDE_INT) arg0) >> arg1; break; case ASHIFT: if (arg1 < 0) return 0; if (SHIFT_COUNT_TRUNCATED) arg1 %= width; val = ((unsigned HOST_WIDE_INT) arg0) << arg1; break; case ASHIFTRT: if (arg1 < 0) return 0; if (SHIFT_COUNT_TRUNCATED) arg1 %= width; val = arg0s >> arg1; /* Bootstrap compiler may not have sign extended the right shift. Manually extend the sign to insure bootstrap cc matches gcc. */ if (arg0s < 0 && arg1 > 0) val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1); break; case ROTATERT: if (arg1 < 0) return 0; arg1 %= width; val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1)) | (((unsigned HOST_WIDE_INT) arg0) >> arg1)); break; case ROTATE: if (arg1 < 0) return 0; arg1 %= width; val = ((((unsigned HOST_WIDE_INT) arg0) << arg1) | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1))); break; case COMPARE: /* Do nothing here. */ return 0; case SMIN: val = arg0s <= arg1s ? arg0s : arg1s; break; case UMIN: val = ((unsigned HOST_WIDE_INT) arg0 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1); break; case SMAX: val = arg0s > arg1s ? arg0s : arg1s; break; case UMAX: val = ((unsigned HOST_WIDE_INT) arg0 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1); break; case SS_PLUS: case US_PLUS: case SS_MINUS: case US_MINUS: /* ??? There are simplifications that can be done. */ return 0; default: abort (); } val = trunc_int_for_mode (val, mode); return GEN_INT (val); } /* Simplify a PLUS or MINUS, at least one of whose operands may be another PLUS or MINUS. Rather than test for specific case, we do this by a brute-force method and do all possible simplifications until no more changes occur. Then we rebuild the operation. If FORCE is true, then always generate the rtx. This is used to canonicalize stuff emitted from simplify_gen_binary. Note that this can still fail if the rtx is too complex. It won't fail just because the result is not 'simpler' than the input, however. */ struct simplify_plus_minus_op_data { rtx op; int neg; }; static int simplify_plus_minus_op_data_cmp (const void *p1, const void *p2) { const struct simplify_plus_minus_op_data *d1 = p1; const struct simplify_plus_minus_op_data *d2 = p2; return (commutative_operand_precedence (d2->op) - commutative_operand_precedence (d1->op)); } static rtx simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0, rtx op1, int force) { struct simplify_plus_minus_op_data ops[8]; rtx result, tem; int n_ops = 2, input_ops = 2, input_consts = 0, n_consts; int first, changed; int i, j; memset (ops, 0, sizeof ops); /* Set up the two operands and then expand them until nothing has been changed. If we run out of room in our array, give up; this should almost never happen. */ ops[0].op = op0; ops[0].neg = 0; ops[1].op = op1; ops[1].neg = (code == MINUS); do { changed = 0; for (i = 0; i < n_ops; i++) { rtx this_op = ops[i].op; int this_neg = ops[i].neg; enum rtx_code this_code = GET_CODE (this_op); switch (this_code) { case PLUS: case MINUS: if (n_ops == 7) return NULL_RTX; ops[n_ops].op = XEXP (this_op, 1); ops[n_ops].neg = (this_code == MINUS) ^ this_neg; n_ops++; ops[i].op = XEXP (this_op, 0); input_ops++; changed = 1; break; case NEG: ops[i].op = XEXP (this_op, 0); ops[i].neg = ! this_neg; changed = 1; break; case CONST: if (n_ops < 7 && GET_CODE (XEXP (this_op, 0)) == PLUS && CONSTANT_P (XEXP (XEXP (this_op, 0), 0)) && CONSTANT_P (XEXP (XEXP (this_op, 0), 1))) { ops[i].op = XEXP (XEXP (this_op, 0), 0); ops[n_ops].op = XEXP (XEXP (this_op, 0), 1); ops[n_ops].neg = this_neg; n_ops++; input_consts++; changed = 1; } break; case NOT: /* ~a -> (-a - 1) */ if (n_ops != 7) { ops[n_ops].op = constm1_rtx; ops[n_ops++].neg = this_neg; ops[i].op = XEXP (this_op, 0); ops[i].neg = !this_neg; changed = 1; } break; case CONST_INT: if (this_neg) { ops[i].op = neg_const_int (mode, this_op); ops[i].neg = 0; changed = 1; } break; default: break; } } } while (changed); /* If we only have two operands, we can't do anything. */ if (n_ops <= 2 && !force) return NULL_RTX; /* Count the number of CONSTs we didn't split above. */ for (i = 0; i < n_ops; i++) if (GET_CODE (ops[i].op) == CONST) input_consts++; /* Now simplify each pair of operands until nothing changes. The first time through just simplify constants against each other. */ first = 1; do { changed = first; for (i = 0; i < n_ops - 1; i++) for (j = i + 1; j < n_ops; j++) { rtx lhs = ops[i].op, rhs = ops[j].op; int lneg = ops[i].neg, rneg = ops[j].neg; if (lhs != 0 && rhs != 0 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs)))) { enum rtx_code ncode = PLUS; if (lneg != rneg) { ncode = MINUS; if (lneg) tem = lhs, lhs = rhs, rhs = tem; } else if (swap_commutative_operands_p (lhs, rhs)) tem = lhs, lhs = rhs, rhs = tem; tem = simplify_binary_operation (ncode, mode, lhs, rhs); /* Reject "simplifications" that just wrap the two arguments in a CONST. Failure to do so can result in infinite recursion with simplify_binary_operation when it calls us to simplify CONST operations. */ if (tem && ! (GET_CODE (tem) == CONST && GET_CODE (XEXP (tem, 0)) == ncode && XEXP (XEXP (tem, 0), 0) == lhs && XEXP (XEXP (tem, 0), 1) == rhs) /* Don't allow -x + -1 -> ~x simplifications in the first pass. This allows us the chance to combine the -1 with other constants. */ && ! (first && GET_CODE (tem) == NOT && XEXP (tem, 0) == rhs)) { lneg &= rneg; if (GET_CODE (tem) == NEG) tem = XEXP (tem, 0), lneg = !lneg; if (GET_CODE (tem) == CONST_INT && lneg) tem = neg_const_int (mode, tem), lneg = 0; ops[i].op = tem; ops[i].neg = lneg; ops[j].op = NULL_RTX; changed = 1; } } } first = 0; } while (changed); /* Pack all the operands to the lower-numbered entries. */ for (i = 0, j = 0; j < n_ops; j++) if (ops[j].op) ops[i++] = ops[j]; n_ops = i; /* Sort the operations based on swap_commutative_operands_p. */ qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp); /* Create (minus -C X) instead of (neg (const (plus X C))). */ if (n_ops == 2 && GET_CODE (ops[1].op) == CONST_INT && CONSTANT_P (ops[0].op) && ops[0].neg) return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op); /* We suppressed creation of trivial CONST expressions in the combination loop to avoid recursion. Create one manually now. The combination loop should have ensured that there is exactly one CONST_INT, and the sort will have ensured that it is last in the array and that any other constant will be next-to-last. */ if (n_ops > 1 && GET_CODE (ops[n_ops - 1].op) == CONST_INT && CONSTANT_P (ops[n_ops - 2].op)) { rtx value = ops[n_ops - 1].op; if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg) value = neg_const_int (mode, value); ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value)); n_ops--; } /* Count the number of CONSTs that we generated. */ n_consts = 0; for (i = 0; i < n_ops; i++) if (GET_CODE (ops[i].op) == CONST) n_consts++; /* Give up if we didn't reduce the number of operands we had. Make sure we count a CONST as two operands. If we have the same number of operands, but have made more CONSTs than before, this is also an improvement, so accept it. */ if (!force && (n_ops + n_consts > input_ops || (n_ops + n_consts == input_ops && n_consts <= input_consts))) return NULL_RTX; /* Put a non-negated operand first, if possible. */ for (i = 0; i < n_ops && ops[i].neg; i++) continue; if (i == n_ops) ops[0].op = gen_rtx_NEG (mode, ops[0].op); else if (i != 0) { tem = ops[0].op; ops[0] = ops[i]; ops[i].op = tem; ops[i].neg = 1; } /* Now make the result by performing the requested operations. */ result = ops[0].op; for (i = 1; i < n_ops; i++) result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS, mode, result, ops[i].op); return result; } /* Like simplify_binary_operation except used for relational operators. MODE is the mode of the result. If MODE is VOIDmode, both operands must also be VOIDmode. CMP_MODE specifies in which mode the comparison is done in, so it is the mode of the operands. If CMP_MODE is VOIDmode, it is taken from the operands or, if both are VOIDmode, the operands are compared in "infinite precision". */ rtx simplify_relational_operation (enum rtx_code code, enum machine_mode mode, enum machine_mode cmp_mode, rtx op0, rtx op1) { rtx tem, trueop0, trueop1; if (cmp_mode == VOIDmode) cmp_mode = GET_MODE (op0); if (cmp_mode == VOIDmode) cmp_mode = GET_MODE (op1); tem = simplify_const_relational_operation (code, cmp_mode, op0, op1); if (tem) { #ifdef FLOAT_STORE_FLAG_VALUE if (GET_MODE_CLASS (mode) == MODE_FLOAT) { if (tem == const0_rtx) return CONST0_RTX (mode); else if (GET_MODE_CLASS (mode) == MODE_FLOAT) { REAL_VALUE_TYPE val; val = FLOAT_STORE_FLAG_VALUE (mode); return CONST_DOUBLE_FROM_REAL_VALUE (val, mode); } } #endif return tem; } /* For the following tests, ensure const0_rtx is op1. */ if (swap_commutative_operands_p (op0, op1) || (op0 == const0_rtx && op1 != const0_rtx)) tem = op0, op0 = op1, op1 = tem, code = swap_condition (code); /* If op0 is a compare, extract the comparison arguments from it. */ if (GET_CODE (op0) == COMPARE && op1 == const0_rtx) return simplify_relational_operation (code, mode, VOIDmode, XEXP (op0, 0), XEXP (op0, 1)); if (mode == VOIDmode || GET_MODE_CLASS (cmp_mode) == MODE_CC || CC0_P (op0)) return NULL_RTX; trueop0 = avoid_constant_pool_reference (op0); trueop1 = avoid_constant_pool_reference (op1); return simplify_relational_operation_1 (code, mode, cmp_mode, trueop0, trueop1); } /* This part of simplify_relational_operation is only used when CMP_MODE is not in class MODE_CC (i.e. it is a real comparison). MODE is the mode of the result, while CMP_MODE specifies in which mode the comparison is done in, so it is the mode of the operands. */ rtx simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode, enum machine_mode cmp_mode, rtx op0, rtx op1) { if (GET_CODE (op1) == CONST_INT) { if (INTVAL (op1) == 0 && COMPARISON_P (op0)) { /* If op0 is a comparison, extract the comparison arguments form it. */ if (code == NE) { if (GET_MODE (op0) == cmp_mode) return simplify_rtx (op0); else return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode, XEXP (op0, 0), XEXP (op0, 1)); } else if (code == EQ) { enum rtx_code new = reversed_comparison_code (op0, NULL_RTX); if (new != UNKNOWN) return simplify_gen_relational (new, mode, VOIDmode, XEXP (op0, 0), XEXP (op0, 1)); } } } return NULL_RTX; } /* Check if the given comparison (done in the given MODE) is actually a tautology or a contradiction. If no simplification is possible, this function returns zero. Otherwise, it returns either const_true_rtx or const0_rtx. */ rtx simplify_const_relational_operation (enum rtx_code code, enum machine_mode mode, rtx op0, rtx op1) { int equal, op0lt, op0ltu, op1lt, op1ltu; rtx tem; rtx trueop0; rtx trueop1; if (mode == VOIDmode && (GET_MODE (op0) != VOIDmode || GET_MODE (op1) != VOIDmode)) abort (); /* If op0 is a compare, extract the comparison arguments from it. */ if (GET_CODE (op0) == COMPARE && op1 == const0_rtx) op1 = XEXP (op0, 1), op0 = XEXP (op0, 0); /* We can't simplify MODE_CC values since we don't know what the actual comparison is. */ if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0)) return 0; /* Make sure the constant is second. */ if (swap_commutative_operands_p (op0, op1)) { tem = op0, op0 = op1, op1 = tem; code = swap_condition (code); } trueop0 = avoid_constant_pool_reference (op0); trueop1 = avoid_constant_pool_reference (op1); /* For integer comparisons of A and B maybe we can simplify A - B and can then simplify a comparison of that with zero. If A and B are both either a register or a CONST_INT, this can't help; testing for these cases will prevent infinite recursion here and speed things up. If CODE is an unsigned comparison, then we can never do this optimization, because it gives an incorrect result if the subtraction wraps around zero. ANSI C defines unsigned operations such that they never overflow, and thus such cases can not be ignored; but we cannot do it even for signed comparisons for languages such as Java, so test flag_wrapv. */ if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT) && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT)) && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1)) /* We cannot do this for == or != if tem is a nonzero address. */ && ((code != EQ && code != NE) || ! nonzero_address_p (tem)) && code != GTU && code != GEU && code != LTU && code != LEU) return simplify_const_relational_operation (signed_condition (code), mode, tem, const0_rtx); if (flag_unsafe_math_optimizations && code == ORDERED) return const_true_rtx; if (flag_unsafe_math_optimizations && code == UNORDERED) return const0_rtx; /* For modes without NaNs, if the two operands are equal, we know the result except if they have side-effects. */ if (! HONOR_NANS (GET_MODE (trueop0)) && rtx_equal_p (trueop0, trueop1) && ! side_effects_p (trueop0)) equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0; /* If the operands are floating-point constants, see if we can fold the result. */ else if (GET_CODE (trueop0) == CONST_DOUBLE && GET_CODE (trueop1) == CONST_DOUBLE && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT) { REAL_VALUE_TYPE d0, d1; REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0); REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1); /* Comparisons are unordered iff at least one of the values is NaN. */ if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1)) switch (code) { case UNEQ: case UNLT: case UNGT: case UNLE: case UNGE: case NE: case UNORDERED: return const_true_rtx; case EQ: case LT: case GT: case LE: case GE: case LTGT: case ORDERED: return const0_rtx; default: return 0; } equal = REAL_VALUES_EQUAL (d0, d1); op0lt = op0ltu = REAL_VALUES_LESS (d0, d1); op1lt = op1ltu = REAL_VALUES_LESS (d1, d0); } /* Otherwise, see if the operands are both integers. */ else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode) && (GET_CODE (trueop0) == CONST_DOUBLE || GET_CODE (trueop0) == CONST_INT) && (GET_CODE (trueop1) == CONST_DOUBLE || GET_CODE (trueop1) == CONST_INT)) { int width = GET_MODE_BITSIZE (mode); HOST_WIDE_INT l0s, h0s, l1s, h1s; unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u; /* Get the two words comprising each integer constant. */ if (GET_CODE (trueop0) == CONST_DOUBLE) { l0u = l0s = CONST_DOUBLE_LOW (trueop0); h0u = h0s = CONST_DOUBLE_HIGH (trueop0); } else { l0u = l0s = INTVAL (trueop0); h0u = h0s = HWI_SIGN_EXTEND (l0s); } if (GET_CODE (trueop1) == CONST_DOUBLE) { l1u = l1s = CONST_DOUBLE_LOW (trueop1); h1u = h1s = CONST_DOUBLE_HIGH (trueop1); } else { l1u = l1s = INTVAL (trueop1); h1u = h1s = HWI_SIGN_EXTEND (l1s); } /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT, we have to sign or zero-extend the values. */ if (width != 0 && width < HOST_BITS_PER_WIDE_INT) { l0u &= ((HOST_WIDE_INT) 1 << width) - 1; l1u &= ((HOST_WIDE_INT) 1 << width) - 1; if (l0s & ((HOST_WIDE_INT) 1 << (width - 1))) l0s |= ((HOST_WIDE_INT) (-1) << width); if (l1s & ((HOST_WIDE_INT) 1 << (width - 1))) l1s |= ((HOST_WIDE_INT) (-1) << width); } if (width != 0 && width <= HOST_BITS_PER_WIDE_INT) h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s); equal = (h0u == h1u && l0u == l1u); op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u)); op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u)); op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u)); op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u)); } /* Otherwise, there are some code-specific tests we can make. */ else { /* Optimize comparisons with upper and lower bounds. */ if (INTEGRAL_MODE_P (mode) && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT) { rtx mmin, mmax; int sign; if (code == GEU || code == LEU || code == GTU || code == LTU) sign = 0; else sign = 1; get_mode_bounds (mode, sign, mode, &mmin, &mmax); tem = NULL_RTX; switch (code) { case GEU: case GE: /* x >= min is always true. */ if (rtx_equal_p (trueop1, mmin)) tem = const_true_rtx; else break; case LEU: case LE: /* x <= max is always true. */ if (rtx_equal_p (trueop1, mmax)) tem = const_true_rtx; break; case GTU: case GT: /* x > max is always false. */ if (rtx_equal_p (trueop1, mmax)) tem = const0_rtx; break; case LTU: case LT: /* x < min is always false. */ if (rtx_equal_p (trueop1, mmin)) tem = const0_rtx; break; default: break; } if (tem == const0_rtx || tem == const_true_rtx) return tem; } switch (code) { case EQ: if (trueop1 == const0_rtx && nonzero_address_p (op0)) return const0_rtx; break; case NE: if (trueop1 == const0_rtx && nonzero_address_p (op0)) return const_true_rtx; break; case LT: /* Optimize abs(x) < 0.0. */ if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode)) { tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0) : trueop0; if (GET_CODE (tem) == ABS) return const0_rtx; } break; case GE: /* Optimize abs(x) >= 0.0. */ if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode)) { tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0) : trueop0; if (GET_CODE (tem) == ABS) return const_true_rtx; } break; case UNGE: /* Optimize ! (abs(x) < 0.0). */ if (trueop1 == CONST0_RTX (mode)) { tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0) : trueop0; if (GET_CODE (tem) == ABS) return const_true_rtx; } break; default: break; } return 0; } /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set as appropriate. */ switch (code) { case EQ: case UNEQ: return equal ? const_true_rtx : const0_rtx; case NE: case LTGT: return ! equal ? const_true_rtx : const0_rtx; case LT: case UNLT: return op0lt ? const_true_rtx : const0_rtx; case GT: case UNGT: return op1lt ? const_true_rtx : const0_rtx; case LTU: return op0ltu ? const_true_rtx : const0_rtx; case GTU: return op1ltu ? const_true_rtx : const0_rtx; case LE: case UNLE: return equal || op0lt ? const_true_rtx : const0_rtx; case GE: case UNGE: return equal || op1lt ? const_true_rtx : const0_rtx; case LEU: return equal || op0ltu ? const_true_rtx : const0_rtx; case GEU: return equal || op1ltu ? const_true_rtx : const0_rtx; case ORDERED: return const_true_rtx; case UNORDERED: return const0_rtx; default: abort (); } } /* Simplify CODE, an operation with result mode MODE and three operands, OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became a constant. Return 0 if no simplifications is possible. */ rtx simplify_ternary_operation (enum rtx_code code, enum machine_mode mode, enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2) { unsigned int width = GET_MODE_BITSIZE (mode); /* VOIDmode means "infinite" precision. */ if (width == 0) width = HOST_BITS_PER_WIDE_INT; switch (code) { case SIGN_EXTRACT: case ZERO_EXTRACT: if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width) && width <= (unsigned) HOST_BITS_PER_WIDE_INT) { /* Extracting a bit-field from a constant */ HOST_WIDE_INT val = INTVAL (op0); if (BITS_BIG_ENDIAN) val >>= (GET_MODE_BITSIZE (op0_mode) - INTVAL (op2) - INTVAL (op1)); else val >>= INTVAL (op2); if (HOST_BITS_PER_WIDE_INT != INTVAL (op1)) { /* First zero-extend. */ val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1; /* If desired, propagate sign bit. */ if (code == SIGN_EXTRACT && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1)))) val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1); } /* Clear the bits that don't belong in our mode, unless they and our sign bit are all one. So we get either a reasonable negative value or a reasonable unsigned value for this mode. */ if (width < HOST_BITS_PER_WIDE_INT && ((val & ((HOST_WIDE_INT) (-1) << (width - 1))) != ((HOST_WIDE_INT) (-1) << (width - 1)))) val &= ((HOST_WIDE_INT) 1 << width) - 1; return GEN_INT (val); } break; case IF_THEN_ELSE: if (GET_CODE (op0) == CONST_INT) return op0 != const0_rtx ? op1 : op2; /* Convert c ? a : a into "a". */ if (rtx_equal_p (op1, op2) && ! side_effects_p (op0)) return op1; /* Convert a != b ? a : b into "a". */ if (GET_CODE (op0) == NE && ! side_effects_p (op0) && ! HONOR_NANS (mode) && ! HONOR_SIGNED_ZEROS (mode) && ((rtx_equal_p (XEXP (op0, 0), op1) && rtx_equal_p (XEXP (op0, 1), op2)) || (rtx_equal_p (XEXP (op0, 0), op2) && rtx_equal_p (XEXP (op0, 1), op1)))) return op1; /* Convert a == b ? a : b into "b". */ if (GET_CODE (op0) == EQ && ! side_effects_p (op0) && ! HONOR_NANS (mode) && ! HONOR_SIGNED_ZEROS (mode) && ((rtx_equal_p (XEXP (op0, 0), op1) && rtx_equal_p (XEXP (op0, 1), op2)) || (rtx_equal_p (XEXP (op0, 0), op2) && rtx_equal_p (XEXP (op0, 1), op1)))) return op2; if (COMPARISON_P (op0) && ! side_effects_p (op0)) { enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode ? GET_MODE (XEXP (op0, 1)) : GET_MODE (XEXP (op0, 0))); rtx temp; /* Look for happy constants in op1 and op2. */ if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT) { HOST_WIDE_INT t = INTVAL (op1); HOST_WIDE_INT f = INTVAL (op2); if (t == STORE_FLAG_VALUE && f == 0) code = GET_CODE (op0); else if (t == 0 && f == STORE_FLAG_VALUE) { enum rtx_code tmp; tmp = reversed_comparison_code (op0, NULL_RTX); if (tmp == UNKNOWN) break; code = tmp; } else break; return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0), XEXP (op0, 1)); } if (cmp_mode == VOIDmode) cmp_mode = op0_mode; temp = simplify_relational_operation (GET_CODE (op0), op0_mode, cmp_mode, XEXP (op0, 0), XEXP (op0, 1)); /* See if any simplifications were possible. */ if (temp) { if (GET_CODE (temp) == CONST_INT) return temp == const0_rtx ? op2 : op1; else if (temp) return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2); } } break; case VEC_MERGE: if (GET_MODE (op0) != mode || GET_MODE (op1) != mode || !VECTOR_MODE_P (mode)) abort (); op2 = avoid_constant_pool_reference (op2); if (GET_CODE (op2) == CONST_INT) { int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode)); unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size); int mask = (1 << n_elts) - 1; if (!(INTVAL (op2) & mask)) return op1; if ((INTVAL (op2) & mask) == mask) return op0; op0 = avoid_constant_pool_reference (op0); op1 = avoid_constant_pool_reference (op1); if (GET_CODE (op0) == CONST_VECTOR && GET_CODE (op1) == CONST_VECTOR) { rtvec v = rtvec_alloc (n_elts); unsigned int i; for (i = 0; i < n_elts; i++) RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i) ? CONST_VECTOR_ELT (op0, i) : CONST_VECTOR_ELT (op1, i)); return gen_rtx_CONST_VECTOR (mode, v); } } break; default: abort (); } return 0; } /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR, returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR. Works by unpacking OP into a collection of 8-bit values represented as a little-endian array of 'unsigned char', selecting by BYTE, and then repacking them again for OUTERMODE. */ static rtx simplify_immed_subreg (enum machine_mode outermode, rtx op, enum machine_mode innermode, unsigned int byte) { /* We support up to 512-bit values (for V8DFmode). */ enum { max_bitsize = 512, value_bit = 8, value_mask = (1 << value_bit) - 1 }; unsigned char value[max_bitsize / value_bit]; int value_start; int i; int elem; int num_elem; rtx * elems; int elem_bitsize; rtx result_s; rtvec result_v = NULL; enum mode_class outer_class; enum machine_mode outer_submode; /* Some ports misuse CCmode. */ if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT) return op; /* Unpack the value. */ if (GET_CODE (op) == CONST_VECTOR) { num_elem = CONST_VECTOR_NUNITS (op); elems = &CONST_VECTOR_ELT (op, 0); elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode)); } else { num_elem = 1; elems = &op; elem_bitsize = max_bitsize; } if (BITS_PER_UNIT % value_bit != 0) abort (); /* Too complicated; reducing value_bit may help. */ if (elem_bitsize % BITS_PER_UNIT != 0) abort (); /* I don't know how to handle endianness of sub-units. */ for (elem = 0; elem < num_elem; elem++) { unsigned char * vp; rtx el = elems[elem]; /* Vectors are kept in target memory order. (This is probably a mistake.) */ { unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT; unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize) / BITS_PER_UNIT); unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte; unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte; unsigned bytele = (subword_byte % UNITS_PER_WORD + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD); vp = value + (bytele * BITS_PER_UNIT) / value_bit; } switch (GET_CODE (el)) { case CONST_INT: for (i = 0; i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize; i += value_bit) *vp++ = INTVAL (el) >> i; /* CONST_INTs are always logically sign-extended. */ for (; i < elem_bitsize; i += value_bit) *vp++ = INTVAL (el) < 0 ? -1 : 0; break; case CONST_DOUBLE: if (GET_MODE (el) == VOIDmode) { /* If this triggers, someone should have generated a CONST_INT instead. */ if (elem_bitsize <= HOST_BITS_PER_WIDE_INT) abort (); for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit) *vp++ = CONST_DOUBLE_LOW (el) >> i; while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize) { *vp++ = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT); i += value_bit; } /* It shouldn't matter what's done here, so fill it with zero. */ for (; i < max_bitsize; i += value_bit) *vp++ = 0; } else if (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT) { long tmp[max_bitsize / 32]; int bitsize = GET_MODE_BITSIZE (GET_MODE (el)); if (bitsize > elem_bitsize) abort (); if (bitsize % value_bit != 0) abort (); real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el), GET_MODE (el)); /* real_to_target produces its result in words affected by FLOAT_WORDS_BIG_ENDIAN. However, we ignore this, and use WORDS_BIG_ENDIAN instead; see the documentation of SUBREG in rtl.texi. */ for (i = 0; i < bitsize; i += value_bit) { int ibase; if (WORDS_BIG_ENDIAN) ibase = bitsize - 1 - i; else ibase = i; *vp++ = tmp[ibase / 32] >> i % 32; } /* It shouldn't matter what's done here, so fill it with zero. */ for (; i < elem_bitsize; i += value_bit) *vp++ = 0; } else abort (); break; default: abort (); } } /* Now, pick the right byte to start with. */ /* Renumber BYTE so that the least-significant byte is byte 0. A special case is paradoxical SUBREGs, which shouldn't be adjusted since they will already have offset 0. */ if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode)) { unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode) - byte); unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte; unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte; byte = (subword_byte % UNITS_PER_WORD + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD); } /* BYTE should still be inside OP. (Note that BYTE is unsigned, so if it's become negative it will instead be very large.) */ if (byte >= GET_MODE_SIZE (innermode)) abort (); /* Convert from bytes to chunks of size value_bit. */ value_start = byte * (BITS_PER_UNIT / value_bit); /* Re-pack the value. */ if (VECTOR_MODE_P (outermode)) { num_elem = GET_MODE_NUNITS (outermode); result_v = rtvec_alloc (num_elem); elems = &RTVEC_ELT (result_v, 0); outer_submode = GET_MODE_INNER (outermode); } else { num_elem = 1; elems = &result_s; outer_submode = outermode; } outer_class = GET_MODE_CLASS (outer_submode); elem_bitsize = GET_MODE_BITSIZE (outer_submode); if (elem_bitsize % value_bit != 0) abort (); if (elem_bitsize + value_start * value_bit > max_bitsize) abort (); for (elem = 0; elem < num_elem; elem++) { unsigned char *vp; /* Vectors are stored in target memory order. (This is probably a mistake.) */ { unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT; unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize) / BITS_PER_UNIT); unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte; unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte; unsigned bytele = (subword_byte % UNITS_PER_WORD + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD); vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit; } switch (outer_class) { case MODE_INT: case MODE_PARTIAL_INT: { unsigned HOST_WIDE_INT hi = 0, lo = 0; for (i = 0; i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize; i += value_bit) lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i; for (; i < elem_bitsize; i += value_bit) hi |= ((HOST_WIDE_INT)(*vp++ & value_mask) << (i - HOST_BITS_PER_WIDE_INT)); /* immed_double_const doesn't call trunc_int_for_mode. I don't know why. */ if (elem_bitsize <= HOST_BITS_PER_WIDE_INT) elems[elem] = gen_int_mode (lo, outer_submode); else elems[elem] = immed_double_const (lo, hi, outer_submode); } break; case MODE_FLOAT: { REAL_VALUE_TYPE r; long tmp[max_bitsize / 32]; /* real_from_target wants its input in words affected by FLOAT_WORDS_BIG_ENDIAN. However, we ignore this, and use WORDS_BIG_ENDIAN instead; see the documentation of SUBREG in rtl.texi. */ for (i = 0; i < max_bitsize / 32; i++) tmp[i] = 0; for (i = 0; i < elem_bitsize; i += value_bit) { int ibase; if (WORDS_BIG_ENDIAN) ibase = elem_bitsize - 1 - i; else ibase = i; tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32; } real_from_target (&r, tmp, outer_submode); elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode); } break; default: abort (); } } if (VECTOR_MODE_P (outermode)) return gen_rtx_CONST_VECTOR (outermode, result_v); else return result_s; } /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE) Return 0 if no simplifications are possible. */ rtx simplify_subreg (enum machine_mode outermode, rtx op, enum machine_mode innermode, unsigned int byte) { /* Little bit of sanity checking. */ if (innermode == VOIDmode || outermode == VOIDmode || innermode == BLKmode || outermode == BLKmode) abort (); if (GET_MODE (op) != innermode && GET_MODE (op) != VOIDmode) abort (); if (byte % GET_MODE_SIZE (outermode) || byte >= GET_MODE_SIZE (innermode)) abort (); if (outermode == innermode && !byte) return op; if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_VECTOR) return simplify_immed_subreg (outermode, op, innermode, byte); /* Changing mode twice with SUBREG => just change it once, or not at all if changing back op starting mode. */ if (GET_CODE (op) == SUBREG) { enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op)); int final_offset = byte + SUBREG_BYTE (op); rtx new; if (outermode == innermostmode && byte == 0 && SUBREG_BYTE (op) == 0) return SUBREG_REG (op); /* The SUBREG_BYTE represents offset, as if the value were stored in memory. Irritating exception is paradoxical subreg, where we define SUBREG_BYTE to be 0. On big endian machines, this value should be negative. For a moment, undo this exception. */ if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode)) { int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)); if (WORDS_BIG_ENDIAN) final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD; if (BYTES_BIG_ENDIAN) final_offset += difference % UNITS_PER_WORD; } if (SUBREG_BYTE (op) == 0 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode)) { int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode)); if (WORDS_BIG_ENDIAN) final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD; if (BYTES_BIG_ENDIAN) final_offset += difference % UNITS_PER_WORD; } /* See whether resulting subreg will be paradoxical. */ if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode)) { /* In nonparadoxical subregs we can't handle negative offsets. */ if (final_offset < 0) return NULL_RTX; /* Bail out in case resulting subreg would be incorrect. */ if (final_offset % GET_MODE_SIZE (outermode) || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode)) return NULL_RTX; } else { int offset = 0; int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode)); /* In paradoxical subreg, see if we are still looking on lower part. If so, our SUBREG_BYTE will be 0. */ if (WORDS_BIG_ENDIAN) offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD; if (BYTES_BIG_ENDIAN) offset += difference % UNITS_PER_WORD; if (offset == final_offset) final_offset = 0; else return NULL_RTX; } /* Recurse for further possible simplifications. */ new = simplify_subreg (outermode, SUBREG_REG (op), GET_MODE (SUBREG_REG (op)), final_offset); if (new) return new; return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset); } /* SUBREG of a hard register => just change the register number and/or mode. If the hard register is not valid in that mode, suppress this simplification. If the hard register is the stack, frame, or argument pointer, leave this as a SUBREG. */ if (REG_P (op) && (! REG_FUNCTION_VALUE_P (op) || ! rtx_equal_function_value_matters) && REGNO (op) < FIRST_PSEUDO_REGISTER #ifdef CANNOT_CHANGE_MODE_CLASS && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode) && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT) #endif && ((reload_completed && !frame_pointer_needed) || (REGNO (op) != FRAME_POINTER_REGNUM #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM && REGNO (op) != HARD_FRAME_POINTER_REGNUM #endif )) #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM && REGNO (op) != ARG_POINTER_REGNUM #endif && REGNO (op) != STACK_POINTER_REGNUM && subreg_offset_representable_p (REGNO (op), innermode, byte, outermode)) { rtx tem = gen_rtx_SUBREG (outermode, op, byte); int final_regno = subreg_hard_regno (tem, 0); /* ??? We do allow it if the current REG is not valid for its mode. This is a kludge to work around how float/complex arguments are passed on 32-bit SPARC and should be fixed. */ if (HARD_REGNO_MODE_OK (final_regno, outermode) || ! HARD_REGNO_MODE_OK (REGNO (op), innermode)) { rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte); /* Propagate original regno. We don't have any way to specify the offset inside original regno, so do so only for lowpart. The information is used only by alias analysis that can not grog partial register anyway. */ if (subreg_lowpart_offset (outermode, innermode) == byte) ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op); return x; } } /* If we have a SUBREG of a register that we are replacing and we are replacing it with a MEM, make a new MEM and try replacing the SUBREG with it. Don't do this if the MEM has a mode-dependent address or if we would be widening it. */ if (MEM_P (op) && ! mode_dependent_address_p (XEXP (op, 0)) /* Allow splitting of volatile memory references in case we don't have instruction to move the whole thing. */ && (! MEM_VOLATILE_P (op) || ! have_insn_for (SET, innermode)) && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op))) return adjust_address_nv (op, outermode, byte); /* Handle complex values represented as CONCAT of real and imaginary part. */ if (GET_CODE (op) == CONCAT) { int is_realpart = byte < (unsigned int) GET_MODE_UNIT_SIZE (innermode); rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1); unsigned int final_offset; rtx res; final_offset = byte % (GET_MODE_UNIT_SIZE (innermode)); res = simplify_subreg (outermode, part, GET_MODE (part), final_offset); if (res) return res; /* We can at least simplify it by referring directly to the relevant part. */ return gen_rtx_SUBREG (outermode, part, final_offset); } /* Optimize SUBREG truncations of zero and sign extended values. */ if ((GET_CODE (op) == ZERO_EXTEND || GET_CODE (op) == SIGN_EXTEND) && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)) { unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte); /* If we're requesting the lowpart of a zero or sign extension, there are three possibilities. If the outermode is the same as the origmode, we can omit both the extension and the subreg. If the outermode is not larger than the origmode, we can apply the truncation without the extension. Finally, if the outermode is larger than the origmode, but both are integer modes, we can just extend to the appropriate mode. */ if (bitpos == 0) { enum machine_mode origmode = GET_MODE (XEXP (op, 0)); if (outermode == origmode) return XEXP (op, 0); if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode)) return simplify_gen_subreg (outermode, XEXP (op, 0), origmode, subreg_lowpart_offset (outermode, origmode)); if (SCALAR_INT_MODE_P (outermode)) return simplify_gen_unary (GET_CODE (op), outermode, XEXP (op, 0), origmode); } /* A SUBREG resulting from a zero extension may fold to zero if it extracts higher bits that the ZERO_EXTEND's source bits. */ if (GET_CODE (op) == ZERO_EXTEND && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))) return CONST0_RTX (outermode); } return NULL_RTX; } /* Make a SUBREG operation or equivalent if it folds. */ rtx simplify_gen_subreg (enum machine_mode outermode, rtx op, enum machine_mode innermode, unsigned int byte) { rtx new; /* Little bit of sanity checking. */ if (innermode == VOIDmode || outermode == VOIDmode || innermode == BLKmode || outermode == BLKmode) abort (); if (GET_MODE (op) != innermode && GET_MODE (op) != VOIDmode) abort (); if (byte % GET_MODE_SIZE (outermode) || byte >= GET_MODE_SIZE (innermode)) abort (); if (GET_CODE (op) == QUEUED) return NULL_RTX; new = simplify_subreg (outermode, op, innermode, byte); if (new) return new; if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode) return NULL_RTX; return gen_rtx_SUBREG (outermode, op, byte); } /* Simplify X, an rtx expression. Return the simplified expression or NULL if no simplifications were possible. This is the preferred entry point into the simplification routines; however, we still allow passes to call the more specific routines. Right now GCC has three (yes, three) major bodies of RTL simplification code that need to be unified. 1. fold_rtx in cse.c. This code uses various CSE specific information to aid in RTL simplification. 2. simplify_rtx in combine.c. Similar to fold_rtx, except that it uses combine specific information to aid in RTL simplification. 3. The routines in this file. Long term we want to only have one body of simplification code; to get to that state I recommend the following steps: 1. Pour over fold_rtx & simplify_rtx and move any simplifications which are not pass dependent state into these routines. 2. As code is moved by #1, change fold_rtx & simplify_rtx to use this routine whenever possible. 3. Allow for pass dependent state to be provided to these routines and add simplifications based on the pass dependent state. Remove code from cse.c & combine.c that becomes redundant/dead. It will take time, but ultimately the compiler will be easier to maintain and improve. It's totally silly that when we add a simplification that it needs to be added to 4 places (3 for RTL simplification and 1 for tree simplification. */ rtx simplify_rtx (rtx x) { enum rtx_code code = GET_CODE (x); enum machine_mode mode = GET_MODE (x); switch (GET_RTX_CLASS (code)) { case RTX_UNARY: return simplify_unary_operation (code, mode, XEXP (x, 0), GET_MODE (XEXP (x, 0))); case RTX_COMM_ARITH: if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1))) return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0)); /* Fall through.... */ case RTX_BIN_ARITH: return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1)); case RTX_TERNARY: case RTX_BITFIELD_OPS: return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)), XEXP (x, 0), XEXP (x, 1), XEXP (x, 2)); case RTX_COMPARE: case RTX_COMM_COMPARE: return simplify_relational_operation (code, mode, ((GET_MODE (XEXP (x, 0)) != VOIDmode) ? GET_MODE (XEXP (x, 0)) : GET_MODE (XEXP (x, 1))), XEXP (x, 0), XEXP (x, 1)); case RTX_EXTRA: if (code == SUBREG) return simplify_gen_subreg (mode, SUBREG_REG (x), GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x)); break; case RTX_OBJ: if (code == LO_SUM) { /* Convert (lo_sum (high FOO) FOO) to FOO. */ if (GET_CODE (XEXP (x, 0)) == HIGH && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1))) return XEXP (x, 1); } break; default: break; } return NULL; } /* Simple data type for positive real numbers for the GNU compiler. Copyright (C) 2002, 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This library supports positive real numbers and 0; inf and nan are NOT supported. It is written to be simple and fast. Value of sreal is x = sig * 2 ^ exp where sig = significant (for < 64-bit machines sig = sig_lo + sig_hi * 2 ^ SREAL_PART_BITS) exp = exponent One HOST_WIDE_INT is used for the significant on 64-bit (and more than 64-bit) machines, otherwise two HOST_WIDE_INTs are used for the significant. Only a half of significant bits is used (in normalized sreals) so that we do not have problems with overflow, for example when c->sig = a->sig * b->sig. So the precision for 64-bit and 32-bit machines is 32-bit. Invariant: The numbers are normalized before and after each call of sreal_*. Normalized sreals: All numbers (except zero) meet following conditions: SREAL_MIN_SIG <= sig && sig <= SREAL_MAX_SIG -SREAL_MAX_EXP <= exp && exp <= SREAL_MAX_EXP If the number would be too large, it is set to upper bounds of these conditions. If the number is zero or would be too small it meets following conditions: sig == 0 && exp == -SREAL_MAX_EXP */ static inline void copy (sreal *, sreal *); static inline void shift_right (sreal *, int); static void normalize_sreal (sreal *); /* Print the content of struct sreal. */ void dump_sreal (FILE *file, sreal *x) { #if SREAL_PART_BITS < 32 fprintf (file, "((" HOST_WIDE_INT_PRINT_UNSIGNED " * 2^16 + " HOST_WIDE_INT_PRINT_UNSIGNED ") * 2^%d)", x->sig_hi, x->sig_lo, x->exp); #else fprintf (file, "(" HOST_WIDE_INT_PRINT_UNSIGNED " * 2^%d)", x->sig, x->exp); #endif } /* Copy the sreal number. */ static inline void copy (sreal *r, sreal *a) { #if SREAL_PART_BITS < 32 r->sig_lo = a->sig_lo; r->sig_hi = a->sig_hi; #else r->sig = a->sig; #endif r->exp = a->exp; } /* Shift X right by S bits. Needed: 0 < S <= SREAL_BITS. When the most significant bit shifted out is 1, add 1 to X (rounding). */ static inline void shift_right (sreal *x, int s) { #ifdef ENABLE_CHECKING if (s <= 0 || s > SREAL_BITS) abort (); if (x->exp + s > SREAL_MAX_EXP) { /* Exponent should never be so large because shift_right is used only by sreal_add and sreal_sub ant thus the number cannot be shifted out from exponent range. */ abort (); } #endif x->exp += s; #if SREAL_PART_BITS < 32 if (s > SREAL_PART_BITS) { s -= SREAL_PART_BITS; x->sig_hi += (uhwi) 1 << (s - 1); x->sig_lo = x->sig_hi >> s; x->sig_hi = 0; } else { x->sig_lo += (uhwi) 1 << (s - 1); if (x->sig_lo & ((uhwi) 1 << SREAL_PART_BITS)) { x->sig_hi++; x->sig_lo -= (uhwi) 1 << SREAL_PART_BITS; } x->sig_lo >>= s; x->sig_lo |= (x->sig_hi & (((uhwi) 1 << s) - 1)) << (SREAL_PART_BITS - s); x->sig_hi >>= s; } #else x->sig += (uhwi) 1 << (s - 1); x->sig >>= s; #endif } /* Normalize *X. */ static void normalize_sreal (sreal *x) { #if SREAL_PART_BITS < 32 int shift; HOST_WIDE_INT mask; if (x->sig_lo == 0 && x->sig_hi == 0) { x->exp = -SREAL_MAX_EXP; } else if (x->sig_hi < SREAL_MIN_SIG) { if (x->sig_hi == 0) { /* Move lower part of significant to higher part. */ x->sig_hi = x->sig_lo; x->sig_lo = 0; x->exp -= SREAL_PART_BITS; } shift = 0; while (x->sig_hi < SREAL_MIN_SIG) { x->sig_hi <<= 1; x->exp--; shift++; } /* Check underflow. */ if (x->exp < -SREAL_MAX_EXP) { x->exp = -SREAL_MAX_EXP; x->sig_hi = 0; x->sig_lo = 0; } else if (shift) { mask = (1 << SREAL_PART_BITS) - (1 << (SREAL_PART_BITS - shift)); x->sig_hi |= (x->sig_lo & mask) >> (SREAL_PART_BITS - shift); x->sig_lo = (x->sig_lo << shift) & (((uhwi) 1 << SREAL_PART_BITS) - 1); } } else if (x->sig_hi > SREAL_MAX_SIG) { unsigned HOST_WIDE_INT tmp = x->sig_hi; /* Find out how many bits will be shifted. */ shift = 0; do { tmp >>= 1; shift++; } while (tmp > SREAL_MAX_SIG); /* Round the number. */ x->sig_lo += (uhwi) 1 << (shift - 1); x->sig_lo >>= shift; x->sig_lo += ((x->sig_hi & (((uhwi) 1 << shift) - 1)) << (SREAL_PART_BITS - shift)); x->sig_hi >>= shift; x->exp += shift; if (x->sig_lo & ((uhwi) 1 << SREAL_PART_BITS)) { x->sig_lo -= (uhwi) 1 << SREAL_PART_BITS; x->sig_hi++; if (x->sig_hi > SREAL_MAX_SIG) { /* x->sig_hi was SREAL_MAX_SIG before increment so now last bit is zero. */ x->sig_hi >>= 1; x->sig_lo >>= 1; x->exp++; } } /* Check overflow. */ if (x->exp > SREAL_MAX_EXP) { x->exp = SREAL_MAX_EXP; x->sig_hi = SREAL_MAX_SIG; x->sig_lo = SREAL_MAX_SIG; } } #else if (x->sig == 0) { x->exp = -SREAL_MAX_EXP; } else if (x->sig < SREAL_MIN_SIG) { do { x->sig <<= 1; x->exp--; } while (x->sig < SREAL_MIN_SIG); /* Check underflow. */ if (x->exp < -SREAL_MAX_EXP) { x->exp = -SREAL_MAX_EXP; x->sig = 0; } } else if (x->sig > SREAL_MAX_SIG) { int last_bit; do { last_bit = x->sig & 1; x->sig >>= 1; x->exp++; } while (x->sig > SREAL_MAX_SIG); /* Round the number. */ x->sig += last_bit; if (x->sig > SREAL_MAX_SIG) { x->sig >>= 1; x->exp++; } /* Check overflow. */ if (x->exp > SREAL_MAX_EXP) { x->exp = SREAL_MAX_EXP; x->sig = SREAL_MAX_SIG; } } #endif } /* Set *R to SIG * 2 ^ EXP. Return R. */ sreal * sreal_init (sreal *r, unsigned HOST_WIDE_INT sig, signed int exp) { #if SREAL_PART_BITS < 32 r->sig_lo = 0; r->sig_hi = sig; r->exp = exp - 16; #else r->sig = sig; r->exp = exp; #endif normalize_sreal (r); return r; } /* Return integer value of *R. */ HOST_WIDE_INT sreal_to_int (sreal *r) { #if SREAL_PART_BITS < 32 if (r->exp <= -SREAL_BITS) return 0; if (r->exp >= 0) return MAX_HOST_WIDE_INT; return ((r->sig_hi << SREAL_PART_BITS) + r->sig_lo) >> -r->exp; #else if (r->exp <= -SREAL_BITS) return 0; if (r->exp >= SREAL_PART_BITS) return MAX_HOST_WIDE_INT; if (r->exp > 0) return r->sig << r->exp; if (r->exp < 0) return r->sig >> -r->exp; return r->sig; #endif } /* Compare *A and *B. Return -1 if *A < *B, 1 if *A > *B and 0 if *A == *B. */ int sreal_compare (sreal *a, sreal *b) { if (a->exp > b->exp) return 1; if (a->exp < b->exp) return -1; #if SREAL_PART_BITS < 32 if (a->sig_hi > b->sig_hi) return 1; if (a->sig_hi < b->sig_hi) return -1; if (a->sig_lo > b->sig_lo) return 1; if (a->sig_lo < b->sig_lo) return -1; #else if (a->sig > b->sig) return 1; if (a->sig < b->sig) return -1; #endif return 0; } /* *R = *A + *B. Return R. */ sreal * sreal_add (sreal *r, sreal *a, sreal *b) { int dexp; sreal tmp; sreal *bb; if (sreal_compare (a, b) < 0) { sreal *swap; swap = a; a = b; b = swap; } dexp = a->exp - b->exp; r->exp = a->exp; if (dexp > SREAL_BITS) { #if SREAL_PART_BITS < 32 r->sig_hi = a->sig_hi; r->sig_lo = a->sig_lo; #else r->sig = a->sig; #endif return r; } if (dexp == 0) bb = b; else { copy (&tmp, b); shift_right (&tmp, dexp); bb = &tmp; } #if SREAL_PART_BITS < 32 r->sig_hi = a->sig_hi + bb->sig_hi; r->sig_lo = a->sig_lo + bb->sig_lo; if (r->sig_lo & ((uhwi) 1 << SREAL_PART_BITS)) { r->sig_hi++; r->sig_lo -= (uhwi) 1 << SREAL_PART_BITS; } #else r->sig = a->sig + bb->sig; #endif normalize_sreal (r); return r; } /* *R = *A - *B. Return R. */ sreal * sreal_sub (sreal *r, sreal *a, sreal *b) { int dexp; sreal tmp; sreal *bb; if (sreal_compare (a, b) < 0) { abort (); } dexp = a->exp - b->exp; r->exp = a->exp; if (dexp > SREAL_BITS) { #if SREAL_PART_BITS < 32 r->sig_hi = a->sig_hi; r->sig_lo = a->sig_lo; #else r->sig = a->sig; #endif return r; } if (dexp == 0) bb = b; else { copy (&tmp, b); shift_right (&tmp, dexp); bb = &tmp; } #if SREAL_PART_BITS < 32 if (a->sig_lo < bb->sig_lo) { r->sig_hi = a->sig_hi - bb->sig_hi - 1; r->sig_lo = a->sig_lo + ((uhwi) 1 << SREAL_PART_BITS) - bb->sig_lo; } else { r->sig_hi = a->sig_hi - bb->sig_hi; r->sig_lo = a->sig_lo - bb->sig_lo; } #else r->sig = a->sig - bb->sig; #endif normalize_sreal (r); return r; } /* *R = *A * *B. Return R. */ sreal * sreal_mul (sreal *r, sreal *a, sreal *b) { #if SREAL_PART_BITS < 32 if (a->sig_hi < SREAL_MIN_SIG || b->sig_hi < SREAL_MIN_SIG) { r->sig_lo = 0; r->sig_hi = 0; r->exp = -SREAL_MAX_EXP; } else { unsigned HOST_WIDE_INT tmp1, tmp2, tmp3; if (sreal_compare (a, b) < 0) { sreal *swap; swap = a; a = b; b = swap; } r->exp = a->exp + b->exp + SREAL_PART_BITS; tmp1 = a->sig_lo * b->sig_lo; tmp2 = a->sig_lo * b->sig_hi; tmp3 = a->sig_hi * b->sig_lo + (tmp1 >> SREAL_PART_BITS); r->sig_hi = a->sig_hi * b->sig_hi; r->sig_hi += (tmp2 >> SREAL_PART_BITS) + (tmp3 >> SREAL_PART_BITS); tmp2 &= ((uhwi) 1 << SREAL_PART_BITS) - 1; tmp3 &= ((uhwi) 1 << SREAL_PART_BITS) - 1; tmp1 = tmp2 + tmp3; r->sig_lo = tmp1 & (((uhwi) 1 << SREAL_PART_BITS) - 1); r->sig_hi += tmp1 >> SREAL_PART_BITS; normalize_sreal (r); } #else if (a->sig < SREAL_MIN_SIG || b->sig < SREAL_MIN_SIG) { r->sig = 0; r->exp = -SREAL_MAX_EXP; } else { r->sig = a->sig * b->sig; r->exp = a->exp + b->exp; normalize_sreal (r); } #endif return r; } /* *R = *A / *B. Return R. */ sreal * sreal_div (sreal *r, sreal *a, sreal *b) { #if SREAL_PART_BITS < 32 unsigned HOST_WIDE_INT tmp, tmp1, tmp2; if (b->sig_hi < SREAL_MIN_SIG) { abort (); } else if (a->sig_hi < SREAL_MIN_SIG) { r->sig_hi = 0; r->sig_lo = 0; r->exp = -SREAL_MAX_EXP; } else { /* Since division by the whole number is pretty ugly to write we are dividing by first 3/4 of bits of number. */ tmp1 = (a->sig_hi << SREAL_PART_BITS) + a->sig_lo; tmp2 = ((b->sig_hi << (SREAL_PART_BITS / 2)) + (b->sig_lo >> (SREAL_PART_BITS / 2))); if (b->sig_lo & ((uhwi) 1 << ((SREAL_PART_BITS / 2) - 1))) tmp2++; r->sig_lo = 0; tmp = tmp1 / tmp2; tmp1 = (tmp1 % tmp2) << (SREAL_PART_BITS / 2); r->sig_hi = tmp << SREAL_PART_BITS; tmp = tmp1 / tmp2; tmp1 = (tmp1 % tmp2) << (SREAL_PART_BITS / 2); r->sig_hi += tmp << (SREAL_PART_BITS / 2); tmp = tmp1 / tmp2; r->sig_hi += tmp; r->exp = a->exp - b->exp - SREAL_BITS - SREAL_PART_BITS / 2; normalize_sreal (r); } #else if (b->sig == 0) { abort (); } else { r->sig = (a->sig << SREAL_PART_BITS) / b->sig; r->exp = a->exp - b->exp - SREAL_PART_BITS; normalize_sreal (r); } #endif return r; } /* Expands front end tree to back end RTL for GCC Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file handles the generation of rtl code from tree structure above the level of expressions, using subroutines in exp*.c and emit-rtl.c. It also creates the rtl expressions for parameters and auto variables and has full responsibility for allocating stack slots. The functions whose names start with `expand_' are called by the parser to generate RTL instructions for various kinds of constructs. Some control and binding constructs require calling several such functions at different times. For example, a simple if-then is expanded by calling `expand_start_cond' (with the condition-expression as argument) before parsing the then-clause and calling `expand_end_cond' after parsing the then-clause. */ /* Functions and data structures for expanding case statements. */ /* Case label structure, used to hold info on labels within case statements. We handle "range" labels; for a single-value label as in C, the high and low limits are the same. An AVL tree of case nodes is initially created, and later transformed to a list linked via the RIGHT fields in the nodes. Nodes with higher case values are later in the list. Switch statements can be output in one of two forms. A branch table is used if there are more than a few labels and the labels are dense within the range between the smallest and largest case value. If a branch table is used, no further manipulations are done with the case node chain. The alternative to the use of a branch table is to generate a series of compare and jump insns. When that is done, we use the LEFT, RIGHT, and PARENT fields to hold a binary tree. Initially the tree is totally unbalanced, with everything on the right. We balance the tree with nodes on the left having lower case values than the parent and nodes on the right having higher values. We then output the tree in order. */ struct case_node GTY(()) { struct case_node *left; /* Left son in binary tree */ struct case_node *right; /* Right son in binary tree; also node chain */ struct case_node *parent; /* Parent of node in binary tree */ tree low; /* Lowest index value for this label */ tree high; /* Highest index value for this label */ tree code_label; /* Label to jump to when node matches */ int balance; }; typedef struct case_node case_node; typedef struct case_node *case_node_ptr; /* These are used by estimate_case_costs and balance_case_nodes. */ /* This must be a signed type, and non-ANSI compilers lack signed char. */ static short cost_table_[129]; static int use_cost_table; static int cost_table_initialized; /* Special care is needed because we allow -1, but TREE_INT_CST_LOW is unsigned. */ #define COST_TABLE(I) cost_table_[(unsigned HOST_WIDE_INT) ((I) + 1)] /* Stack of control and binding constructs we are currently inside. These constructs begin when you call `expand_start_WHATEVER' and end when you call `expand_end_WHATEVER'. This stack records info about how the construct began that tells the end-function what to do. It also may provide information about the construct to alter the behavior of other constructs within the body. For example, they may affect the behavior of C `break' and `continue'. Each construct gets one `struct nesting' object. All of these objects are chained through the `all' field. `nesting_stack' points to the first object (innermost construct). The position of an entry on `nesting_stack' is in its `depth' field. Each type of construct has its own individual stack. For example, loops have `cond_stack'. Each object points to the next object of the same type through the `next' field. Some constructs are visible to `break' exit-statements and others are not. Which constructs are visible depends on the language. Therefore, the data structure allows each construct to be visible or not, according to the args given when the construct is started. The construct is visible if the `exit_label' field is non-null. In that case, the value should be a CODE_LABEL rtx. */ struct nesting GTY(()) { struct nesting *all; struct nesting *next; int depth; rtx exit_label; enum nesting_desc { COND_NESTING, BLOCK_NESTING, CASE_NESTING } desc; union nesting_u { /* For conds (if-then and if-then-else statements). */ struct nesting_cond { /* Label for the end of the if construct. There is none if EXITFLAG was not set and no `else' has been seen yet. */ rtx endif_label; /* Label for the end of this alternative. This may be the end of the if or the next else/elseif. */ rtx next_label; } GTY ((tag ("COND_NESTING"))) cond; /* For variable binding contours. */ struct nesting_block { /* Sequence number of this binding contour within the function, in order of entry. */ int block_start_count; /* Nonzero => value to restore stack to on exit. */ rtx stack_level; /* The NOTE that starts this contour. Used by expand_goto to check whether the destination is within each contour or not. */ rtx first_insn; /* Innermost containing binding contour that has a stack level. */ struct nesting *innermost_stack_block; /* List of cleanups to be run on exit from this contour. This is a list of expressions to be evaluated. The TREE_PURPOSE of each link is the ..._DECL node which the cleanup pertains to. */ tree cleanups; /* List of cleanup-lists of blocks containing this block, as they were at the locus where this block appears. There is an element for each containing block, ordered innermost containing block first. The tail of this list can be 0, if all remaining elements would be empty lists. The element's TREE_VALUE is the cleanup-list of that block, which may be null. */ tree outer_cleanups; /* Chain of labels defined inside this binding contour. For contours that have stack levels or cleanups. */ struct label_chain *label_chain; /* Nonzero if this is associated with an EH region. */ int exception_region; /* The saved target_temp_slot_level from our outer block. We may reset target_temp_slot_level to be the level of this block, if that is done, target_temp_slot_level reverts to the saved target_temp_slot_level at the very end of the block. */ int block_target_temp_slot_level; /* True if we are currently emitting insns in an area of output code that is controlled by a conditional expression. This is used by the cleanup handling code to generate conditional cleanup actions. */ int conditional_code; /* A place to move the start of the exception region for any of the conditional cleanups, must be at the end or after the start of the last unconditional cleanup, and before any conditional branch points. */ rtx last_unconditional_cleanup; } GTY ((tag ("BLOCK_NESTING"))) block; /* For switch (C) or case (Pascal) statements. */ struct nesting_case { /* The insn after which the case dispatch should finally be emitted. Zero for a dummy. */ rtx start; /* A list of case labels; it is first built as an AVL tree. During expand_end_case, this is converted to a list, and may be rearranged into a nearly balanced binary tree. */ struct case_node *case_list; /* Label to jump to if no case matches. */ tree default_label; /* The expression to be dispatched on. */ tree index_expr; /* Type that INDEX_EXPR should be converted to. */ tree nominal_type; /* Name of this kind of statement, for warnings. */ const char *printname; /* Used to save no_line_numbers till we see the first case label. We set this to -1 when we see the first case label in this case statement. */ int line_number_status; } GTY ((tag ("CASE_NESTING"))) case_stmt; } GTY ((desc ("%1.desc"))) data; }; /* Allocate and return a new `struct nesting'. */ #define ALLOC_NESTING() ggc_alloc (sizeof (struct nesting)) /* Pop the nesting stack element by element until we pop off the element which is at the top of STACK. Update all the other stacks, popping off elements from them as we pop them from nesting_stack. */ #define POPSTACK(STACK) \ do { struct nesting *target = STACK; \ struct nesting *this; \ do { this = nesting_stack; \ if (cond_stack == this) \ cond_stack = cond_stack->next; \ if (block_stack == this) \ block_stack = block_stack->next; \ if (stack_block_stack == this) \ stack_block_stack = stack_block_stack->next; \ if (case_stack == this) \ case_stack = case_stack->next; \ nesting_depth = nesting_stack->depth - 1; \ nesting_stack = this->all; } \ while (this != target); } while (0) /* In some cases it is impossible to generate code for a forward goto until the label definition is seen. This happens when it may be necessary for the goto to reset the stack pointer: we don't yet know how to do that. So expand_goto puts an entry on this fixup list. Each time a binding contour that resets the stack is exited, we check each fixup. If the target label has now been defined, we can insert the proper code. */ struct goto_fixup GTY(()) { /* Points to following fixup. */ struct goto_fixup *next; /* Points to the insn before the jump insn. If more code must be inserted, it goes after this insn. */ rtx before_jump; /* The LABEL_DECL that this jump is jumping to, or 0 for break, continue or return. */ tree target; /* The BLOCK for the place where this goto was found. */ tree context; /* The CODE_LABEL rtx that this is jumping to. */ rtx target_rtl; /* Number of binding contours started in current function before the label reference. */ int block_start_count; /* The outermost stack level that should be restored for this jump. Each time a binding contour that resets the stack is exited, if the target label is *not* yet defined, this slot is updated. */ rtx stack_level; /* List of lists of cleanup expressions to be run by this goto. There is one element for each block that this goto is within. The tail of this list can be 0, if all remaining elements would be empty. The TREE_VALUE contains the cleanup list of that block as of the time this goto was seen. The TREE_ADDRESSABLE flag is 1 for a block that has been exited. */ tree cleanup_list_list; }; /* Within any binding contour that must restore a stack level, all labels are recorded with a chain of these structures. */ struct label_chain GTY(()) { /* Points to following fixup. */ struct label_chain *next; tree label; }; struct stmt_status GTY(()) { /* Chain of all pending binding contours. */ struct nesting * x_block_stack; /* If any new stacks are added here, add them to POPSTACKS too. */ /* Chain of all pending binding contours that restore stack levels or have cleanups. */ struct nesting * x_stack_block_stack; /* Chain of all pending conditional statements. */ struct nesting * x_cond_stack; /* Chain of all pending case or switch statements. */ struct nesting * x_case_stack; /* Separate chain including all of the above, chained through the `all' field. */ struct nesting * x_nesting_stack; /* Number of entries on nesting_stack now. */ int x_nesting_depth; /* Number of binding contours started so far in this function. */ int x_block_start_count; /* Location of last line-number note, whether we actually emitted it or not. */ location_t x_emit_locus; struct goto_fixup *x_goto_fixup_chain; }; #define block_stack (cfun->stmt->x_block_stack) #define stack_block_stack (cfun->stmt->x_stack_block_stack) #define cond_stack (cfun->stmt->x_cond_stack) #define case_stack (cfun->stmt->x_case_stack) #define nesting_stack (cfun->stmt->x_nesting_stack) #define nesting_depth (cfun->stmt->x_nesting_depth) #define current_block_start_count (cfun->stmt->x_block_start_count) #define emit_locus (cfun->stmt->x_emit_locus) #define goto_fixup_chain (cfun->stmt->x_goto_fixup_chain) /* Nonzero if we are using EH to handle cleanups. */ int using_eh_for_cleanups_p = 0; static int n_char_occurrences2 (int, const char *); static bool decl_conflicts_with_clobbers_p (tree, const HARD_REG_SET); static void expand_goto_internal (tree, rtx, rtx); static int expand_fixup (tree, rtx, rtx); static void expand_nl_goto_receiver (void); static void fixup_gotos (struct nesting *, rtx, tree, rtx, int); static bool check_operand_nalternatives (tree, tree); static bool check_unique_operand_names (tree, tree); static char *resolve_operand_name_1 (char *, tree, tree); static void expand_null_return_1 (rtx); static enum br_predictor return_prediction (rtx); static rtx shift_return_value (rtx); static void expand_value_return (rtx); static void expand_cleanups (tree, int, int); static void do_jump_if_equal (rtx, rtx, rtx, int); static int estimate_case_costs (case_node_ptr); static bool same_case_target_p (rtx, rtx); static void strip_default_case_nodes (case_node_ptr *, rtx); static bool lshift_cheap_p (void); static int case_bit_test_cmp (const void *, const void *); static void emit_case_bit_tests (tree, tree, tree, tree, case_node_ptr, rtx); static void group_case_nodes (case_node_ptr); static void balance_case_nodes (case_node_ptr *, case_node_ptr); static int node_has_low_bound (case_node_ptr, tree); static int node_has_high_bound (case_node_ptr, tree); static int node_is_bounded (case_node_ptr, tree); static void emit_jump_if_reachable (rtx); static void emit_case_nodes (rtx, case_node_ptr, rtx, tree); static struct case_node *case_tree2list (case_node *, case_node *); void using_eh_for_cleanups (void) { using_eh_for_cleanups_p = 1; } void init_stmt_for_function (void) { cfun->stmt = ggc_alloc_cleared (sizeof (struct stmt_status)); } /* Record the current file and line. Called from emit_line_note. */ void set_file_and_line_for_stmt (location_t location) { /* If we're outputting an inline function, and we add a line note, there may be no CFUN->STMT information. So, there's no need to update it. */ if (cfun->stmt) emit_locus = location; } /* Emit a no-op instruction. */ void emit_nop (void) { rtx last_insn; last_insn = get_last_insn (); if (!optimize && (GET_CODE (last_insn) == CODE_LABEL || (GET_CODE (last_insn) == NOTE && prev_real_insn (last_insn) == 0))) emit_insn (gen_nop ()); } /* Return the rtx-label that corresponds to a LABEL_DECL, creating it if necessary. */ rtx label_rtx (tree label) { if (TREE_CODE (label) != LABEL_DECL) abort (); if (!DECL_RTL_SET_P (label)) { rtx r = gen_label_rtx (); SET_DECL_RTL (label, r); if (FORCED_LABEL (label) || DECL_NONLOCAL (label)) LABEL_PRESERVE_P (r) = 1; } return DECL_RTL (label); } /* As above, but also put it on the forced-reference list of the function that contains it. */ rtx force_label_rtx (tree label) { rtx ref = label_rtx (label); tree function = decl_function_context (label); struct function *p; if (!function) abort (); if (function != current_function_decl) p = find_function_data (function); else p = cfun; p->expr->x_forced_labels = gen_rtx_EXPR_LIST (VOIDmode, ref, p->expr->x_forced_labels); return ref; } /* Add an unconditional jump to LABEL as the next sequential instruction. */ void emit_jump (rtx label) { do_pending_stack_adjust (); emit_jump_insn (gen_jump (label)); emit_barrier (); } /* Emit code to jump to the address specified by the pointer expression EXP. */ void expand_computed_goto (tree exp) { rtx x = expand_expr (exp, NULL_RTX, VOIDmode, 0); x = convert_memory_address (Pmode, x); emit_queue (); do_pending_stack_adjust (); emit_indirect_jump (x); } /* Handle goto statements and the labels that they can go to. */ /* Specify the location in the RTL code of a label LABEL, which is a LABEL_DECL tree node. This is used for the kind of label that the user can jump to with a goto statement, and for alternatives of a switch or case statement. RTL labels generated for loops and conditionals don't go through here; they are generated directly at the RTL level, by other functions below. Note that this has nothing to do with defining label *names*. Languages vary in how they do that and what that even means. */ void expand_label (tree label) { struct label_chain *p; rtx label_r = label_rtx (label); do_pending_stack_adjust (); emit_label (label_r); if (DECL_NAME (label)) LABEL_NAME (DECL_RTL (label)) = IDENTIFIER_POINTER (DECL_NAME (label)); if (DECL_NONLOCAL (label)) { expand_nl_goto_receiver (); nonlocal_goto_handler_labels = gen_rtx_EXPR_LIST (VOIDmode, label_r, nonlocal_goto_handler_labels); } if (FORCED_LABEL (label)) forced_labels = gen_rtx_EXPR_LIST (VOIDmode, label_r, forced_labels); if (DECL_NONLOCAL (label) || FORCED_LABEL (label)) maybe_set_first_label_num (label_r); if (stack_block_stack != 0) { p = ggc_alloc (sizeof (struct label_chain)); p->next = stack_block_stack->data.block.label_chain; stack_block_stack->data.block.label_chain = p; p->label = label; } } /* Generate RTL code for a `goto' statement with target label LABEL. LABEL should be a LABEL_DECL tree node that was or will later be defined with `expand_label'. */ void expand_goto (tree label) { #ifdef ENABLE_CHECKING /* Check for a nonlocal goto to a containing function. Should have gotten translated to __builtin_nonlocal_goto. */ tree context = decl_function_context (label); if (context != 0 && context != current_function_decl) abort (); #endif expand_goto_internal (label, label_rtx (label), NULL_RTX); } /* Generate RTL code for a `goto' statement with target label BODY. LABEL should be a LABEL_REF. LAST_INSN, if non-0, is the rtx we should consider as the last insn emitted (for the purposes of cleaning up a return). */ static void expand_goto_internal (tree body, rtx label, rtx last_insn) { struct nesting *block; rtx stack_level = 0; if (GET_CODE (label) != CODE_LABEL) abort (); /* If label has already been defined, we can tell now whether and how we must alter the stack level. */ if (PREV_INSN (label) != 0) { /* Find the innermost pending block that contains the label. (Check containment by comparing insn-uids.) Then restore the outermost stack level within that block, and do cleanups of all blocks contained in it. */ for (block = block_stack; block; block = block->next) { if (INSN_UID (block->data.block.first_insn) < INSN_UID (label)) break; if (block->data.block.stack_level != 0) stack_level = block->data.block.stack_level; /* Execute the cleanups for blocks we are exiting. */ if (block->data.block.cleanups != 0) { expand_cleanups (block->data.block.cleanups, 1, 1); do_pending_stack_adjust (); } } if (stack_level) { /* Ensure stack adjust isn't done by emit_jump, as this would clobber the stack pointer. This one should be deleted as dead by flow. */ clear_pending_stack_adjust (); do_pending_stack_adjust (); /* Don't do this adjust if it's to the end label and this function is to return with a depressed stack pointer. */ if (label == return_label && (((TREE_CODE (TREE_TYPE (current_function_decl)) == FUNCTION_TYPE) && (TYPE_RETURNS_STACK_DEPRESSED (TREE_TYPE (current_function_decl)))))) ; else emit_stack_restore (SAVE_BLOCK, stack_level, NULL_RTX); } if (body != 0 && DECL_TOO_LATE (body)) error ("jump to `%s' invalidly jumps into binding contour", IDENTIFIER_POINTER (DECL_NAME (body))); } /* Label not yet defined: may need to put this goto on the fixup list. */ else if (! expand_fixup (body, label, last_insn)) { /* No fixup needed. Record that the label is the target of at least one goto that has no fixup. */ if (body != 0) TREE_ADDRESSABLE (body) = 1; } emit_jump (label); } /* Generate if necessary a fixup for a goto whose target label in tree structure (if any) is TREE_LABEL and whose target in rtl is RTL_LABEL. If LAST_INSN is nonzero, we pretend that the jump appears after insn LAST_INSN instead of at the current point in the insn stream. The fixup will be used later to insert insns just before the goto. Those insns will restore the stack level as appropriate for the target label, and will (in the case of C++) also invoke any object destructors which have to be invoked when we exit the scopes which are exited by the goto. Value is nonzero if a fixup is made. */ static int expand_fixup (tree tree_label, rtx rtl_label, rtx last_insn) { struct nesting *block, *end_block; /* See if we can recognize which block the label will be output in. This is possible in some very common cases. If we succeed, set END_BLOCK to that block. Otherwise, set it to 0. */ if (cond_stack && (rtl_label == cond_stack->data.cond.endif_label || rtl_label == cond_stack->data.cond.next_label)) end_block = cond_stack; else end_block = 0; /* Now set END_BLOCK to the binding level to which we will return. */ if (end_block) { struct nesting *next_block = end_block->all; block = block_stack; /* First see if the END_BLOCK is inside the innermost binding level. If so, then no cleanups or stack levels are relevant. */ while (next_block && next_block != block) next_block = next_block->all; if (next_block) return 0; /* Otherwise, set END_BLOCK to the innermost binding level which is outside the relevant control-structure nesting. */ next_block = block_stack->next; for (block = block_stack; block != end_block; block = block->all) if (block == next_block) next_block = next_block->next; end_block = next_block; } /* Does any containing block have a stack level or cleanups? If not, no fixup is needed, and that is the normal case (the only case, for standard C). */ for (block = block_stack; block != end_block; block = block->next) if (block->data.block.stack_level != 0 || block->data.block.cleanups != 0) break; if (block != end_block) { /* Ok, a fixup is needed. Add a fixup to the list of such. */ struct goto_fixup *fixup = ggc_alloc (sizeof (struct goto_fixup)); /* In case an old stack level is restored, make sure that comes after any pending stack adjust. */ /* ?? If the fixup isn't to come at the present position, doing the stack adjust here isn't useful. Doing it with our settings at that location isn't useful either. Let's hope someone does it! */ if (last_insn == 0) do_pending_stack_adjust (); fixup->target = tree_label; fixup->target_rtl = rtl_label; /* Create a BLOCK node and a corresponding matched set of NOTE_INSN_BLOCK_BEG and NOTE_INSN_BLOCK_END notes at this point. The notes will encapsulate any and all fixup code which we might later insert at this point in the insn stream. Also, the BLOCK node will be the parent (i.e. the `SUPERBLOCK') of any other BLOCK nodes which we might create later on when we are expanding the fixup code. Note that optimization passes might move the *_BLOCK notes away, so we use a NOTE_INSN_DELETED as a placeholder. */ { rtx original_before_jump = last_insn ? last_insn : get_last_insn (); rtx start; rtx end; tree block; block = make_node (BLOCK); TREE_USED (block) = 1; BLOCK_CHAIN (block) = BLOCK_CHAIN (DECL_INITIAL (current_function_decl)); BLOCK_CHAIN (DECL_INITIAL (current_function_decl)) = block; start_sequence (); start = emit_note (NOTE_INSN_BLOCK_BEG); NOTE_BLOCK (start) = block; fixup->before_jump = emit_note (NOTE_INSN_DELETED); end = emit_note (NOTE_INSN_BLOCK_END); NOTE_BLOCK (end) = block; fixup->context = block; end_sequence (); emit_insn_after (start, original_before_jump); } fixup->block_start_count = current_block_start_count; fixup->stack_level = 0; fixup->cleanup_list_list = ((block->data.block.outer_cleanups || block->data.block.cleanups) ? tree_cons (NULL_TREE, block->data.block.cleanups, block->data.block.outer_cleanups) : 0); fixup->next = goto_fixup_chain; goto_fixup_chain = fixup; } return block != 0; } /* Expand any needed fixups in the outputmost binding level of the function. FIRST_INSN is the first insn in the function. */ void expand_fixups (rtx first_insn) { fixup_gotos (NULL, NULL_RTX, NULL_TREE, first_insn, 0); } /* When exiting a binding contour, process all pending gotos requiring fixups. THISBLOCK is the structure that describes the block being exited. STACK_LEVEL is the rtx for the stack level to restore exiting this contour. CLEANUP_LIST is a list of expressions to evaluate on exiting this contour. FIRST_INSN is the insn that began this contour. Gotos that jump out of this contour must restore the stack level and do the cleanups before actually jumping. DONT_JUMP_IN positive means report error if there is a jump into this contour from before the beginning of the contour. This is also done if STACK_LEVEL is nonzero unless DONT_JUMP_IN is negative. */ static void fixup_gotos (struct nesting *thisblock, rtx stack_level, tree cleanup_list, rtx first_insn, int dont_jump_in) { struct goto_fixup *f, *prev; /* F is the fixup we are considering; PREV is the previous one. */ /* We run this loop in two passes so that cleanups of exited blocks are run first, and blocks that are exited are marked so afterwards. */ for (prev = 0, f = goto_fixup_chain; f; prev = f, f = f->next) { /* Test for a fixup that is inactive because it is already handled. */ if (f->before_jump == 0) { /* Delete inactive fixup from the chain, if that is easy to do. */ if (prev != 0) prev->next = f->next; } /* Has this fixup's target label been defined? If so, we can finalize it. */ else if (PREV_INSN (f->target_rtl) != 0) { rtx cleanup_insns; /* If this fixup jumped into this contour from before the beginning of this contour, report an error. This code used to use the first non-label insn after f->target_rtl, but that's wrong since such can be added, by things like put_var_into_stack and have INSN_UIDs that are out of the range of the block. */ /* ??? Bug: this does not detect jumping in through intermediate blocks that have stack levels or cleanups. It detects only a problem with the innermost block around the label. */ if (f->target != 0 && (dont_jump_in > 0 || (dont_jump_in == 0 && stack_level) || cleanup_list) && INSN_UID (first_insn) < INSN_UID (f->target_rtl) && INSN_UID (first_insn) > INSN_UID (f->before_jump) && ! DECL_ERROR_ISSUED (f->target)) { error ("%Jlabel '%D' used before containing binding contour", f->target, f->target); /* Prevent multiple errors for one label. */ DECL_ERROR_ISSUED (f->target) = 1; } /* We will expand the cleanups into a sequence of their own and then later on we will attach this new sequence to the insn stream just ahead of the actual jump insn. */ start_sequence (); /* Temporarily restore the lexical context where we will logically be inserting the fixup code. We do this for the sake of getting the debugging information right. */ lang_hooks.decls.pushlevel (0); lang_hooks.decls.set_block (f->context); /* Expand the cleanups for blocks this jump exits. */ if (f->cleanup_list_list) { tree lists; for (lists = f->cleanup_list_list; lists; lists = TREE_CHAIN (lists)) /* Marked elements correspond to blocks that have been closed. Do their cleanups. */ if (TREE_ADDRESSABLE (lists) && TREE_VALUE (lists) != 0) { expand_cleanups (TREE_VALUE (lists), 1, 1); /* Pop any pushes done in the cleanups, in case function is about to return. */ do_pending_stack_adjust (); } } /* Restore stack level for the biggest contour that this jump jumps out of. */ if (f->stack_level && ! (f->target_rtl == return_label && ((TREE_CODE (TREE_TYPE (current_function_decl)) == FUNCTION_TYPE) && (TYPE_RETURNS_STACK_DEPRESSED (TREE_TYPE (current_function_decl)))))) emit_stack_restore (SAVE_BLOCK, f->stack_level, f->before_jump); /* Finish up the sequence containing the insns which implement the necessary cleanups, and then attach that whole sequence to the insn stream just ahead of the actual jump insn. Attaching it at that point insures that any cleanups which are in fact implicit C++ object destructions (which must be executed upon leaving the block) appear (to the debugger) to be taking place in an area of the generated code where the object(s) being destructed are still "in scope". */ cleanup_insns = get_insns (); lang_hooks.decls.poplevel (1, 0, 0); end_sequence (); emit_insn_after (cleanup_insns, f->before_jump); f->before_jump = 0; } } /* For any still-undefined labels, do the cleanups for this block now. We must do this now since items in the cleanup list may go out of scope when the block ends. */ for (prev = 0, f = goto_fixup_chain; f; prev = f, f = f->next) if (f->before_jump != 0 && PREV_INSN (f->target_rtl) == 0 /* Label has still not appeared. If we are exiting a block with a stack level to restore, that started before the fixup, mark this stack level as needing restoration when the fixup is later finalized. */ && thisblock != 0 /* Note: if THISBLOCK == 0 and we have a label that hasn't appeared, it means the label is undefined. That's erroneous, but possible. */ && (thisblock->data.block.block_start_count <= f->block_start_count)) { tree lists = f->cleanup_list_list; rtx cleanup_insns; for (; lists; lists = TREE_CHAIN (lists)) /* If the following elt. corresponds to our containing block then the elt. must be for this block. */ if (TREE_CHAIN (lists) == thisblock->data.block.outer_cleanups) { start_sequence (); lang_hooks.decls.pushlevel (0); lang_hooks.decls.set_block (f->context); expand_cleanups (TREE_VALUE (lists), 1, 1); do_pending_stack_adjust (); cleanup_insns = get_insns (); lang_hooks.decls.poplevel (1, 0, 0); end_sequence (); if (cleanup_insns != 0) f->before_jump = emit_insn_after (cleanup_insns, f->before_jump); f->cleanup_list_list = TREE_CHAIN (lists); } if (stack_level) f->stack_level = stack_level; } } /* Return the number of times character C occurs in string S. */ /* XXX duplicates n_char_occurrences in genoutput.c */ static int n_char_occurrences2 (int c, const char *s) { int n = 0; while (*s) n += (*s++ == c); return n; } /* Generate RTL for an asm statement (explicit assembler code). STRING is a STRING_CST node containing the assembler code text, or an ADDR_EXPR containing a STRING_CST. VOL nonzero means the insn is volatile; don't optimize it. */ void expand_asm (tree string, int vol) { rtx body; if (TREE_CODE (string) == ADDR_EXPR) string = TREE_OPERAND (string, 0); body = gen_rtx_ASM_INPUT (VOIDmode, TREE_STRING_POINTER (string)); MEM_VOLATILE_P (body) = vol; emit_insn (body); } /* Parse the output constraint pointed to by *CONSTRAINT_P. It is the OPERAND_NUMth output operand, indexed from zero. There are NINPUTS inputs and NOUTPUTS outputs to this extended-asm. Upon return, *ALLOWS_MEM will be TRUE iff the constraint allows the use of a memory operand. Similarly, *ALLOWS_REG will be TRUE iff the constraint allows the use of a register operand. And, *IS_INOUT will be true if the operand is read-write, i.e., if it is used as an input as well as an output. If *CONSTRAINT_P is not in canonical form, it will be made canonical. (Note that `+' will be replaced with `=' as part of this process.) Returns TRUE if all went well; FALSE if an error occurred. */ bool parse_output_constraint (const char **constraint_p, int operand_num, int ninputs, int noutputs, bool *allows_mem, bool *allows_reg, bool *is_inout) { const char *constraint = *constraint_p; const char *p; /* Assume the constraint doesn't allow the use of either a register or memory. */ *allows_mem = false; *allows_reg = false; /* Allow the `=' or `+' to not be at the beginning of the string, since it wasn't explicitly documented that way, and there is a large body of code that puts it last. Swap the character to the front, so as not to uglify any place else. */ p = strchr (constraint, '='); if (!p) p = strchr (constraint, '+'); /* If the string doesn't contain an `=', issue an error message. */ if (!p) { error ("output operand constraint lacks `='"); return false; } /* If the constraint begins with `+', then the operand is both read from and written to. */ *is_inout = (*p == '+'); /* Canonicalize the output constraint so that it begins with `='. */ if (p != constraint || is_inout) { char *buf; size_t c_len = strlen (constraint); if (p != constraint) warning ("output constraint `%c' for operand %d is not at the beginning", *p, operand_num); /* Make a copy of the constraint. */ buf = alloca (c_len + 1); strcpy (buf, constraint); /* Swap the first character and the `=' or `+'. */ buf[p - constraint] = buf[0]; /* Make sure the first character is an `='. (Until we do this, it might be a `+'.) */ buf[0] = '='; /* Replace the constraint with the canonicalized string. */ *constraint_p = ggc_alloc_string (buf, c_len); constraint = *constraint_p; } /* Loop through the constraint string. */ for (p = constraint + 1; *p; p += CONSTRAINT_LEN (*p, p)) switch (*p) { case '+': case '=': error ("operand constraint contains incorrectly positioned '+' or '='"); return false; case '%': if (operand_num + 1 == ninputs + noutputs) { error ("`%%' constraint used with last operand"); return false; } break; case 'V': case 'm': case 'o': *allows_mem = true; break; case '?': case '!': case '*': case '&': case '#': case 'E': case 'F': case 'G': case 'H': case 's': case 'i': case 'n': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case ',': break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '[': error ("matching constraint not valid in output operand"); return false; case '<': case '>': /* ??? Before flow, auto inc/dec insns are not supposed to exist, excepting those that expand_call created. So match memory and hope. */ *allows_mem = true; break; case 'g': case 'X': *allows_reg = true; *allows_mem = true; break; case 'p': case 'r': *allows_reg = true; break; default: if (!ISALPHA (*p)) break; if (REG_CLASS_FROM_CONSTRAINT (*p, p) != NO_REGS) *allows_reg = true; #ifdef EXTRA_CONSTRAINT_STR else if (EXTRA_ADDRESS_CONSTRAINT (*p, p)) *allows_reg = true; else if (EXTRA_MEMORY_CONSTRAINT (*p, p)) *allows_mem = true; else { /* Otherwise we can't assume anything about the nature of the constraint except that it isn't purely registers. Treat it like "g" and hope for the best. */ *allows_reg = true; *allows_mem = true; } #endif break; } return true; } /* Similar, but for input constraints. */ bool parse_input_constraint (const char **constraint_p, int input_num, int ninputs, int noutputs, int ninout, const char * const * constraints, bool *allows_mem, bool *allows_reg) { const char *constraint = *constraint_p; const char *orig_constraint = constraint; size_t c_len = strlen (constraint); size_t j; bool saw_match = false; /* Assume the constraint doesn't allow the use of either a register or memory. */ *allows_mem = false; *allows_reg = false; /* Make sure constraint has neither `=', `+', nor '&'. */ for (j = 0; j < c_len; j += CONSTRAINT_LEN (constraint[j], constraint+j)) switch (constraint[j]) { case '+': case '=': case '&': if (constraint == orig_constraint) { error ("input operand constraint contains `%c'", constraint[j]); return false; } break; case '%': if (constraint == orig_constraint && input_num + 1 == ninputs - ninout) { error ("`%%' constraint used with last operand"); return false; } break; case 'V': case 'm': case 'o': *allows_mem = true; break; case '<': case '>': case '?': case '!': case '*': case '#': case 'E': case 'F': case 'G': case 'H': case 's': case 'i': case 'n': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case ',': break; /* Whether or not a numeric constraint allows a register is decided by the matching constraint, and so there is no need to do anything special with them. We must handle them in the default case, so that we don't unnecessarily force operands to memory. */ case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { char *end; unsigned long match; saw_match = true; match = strtoul (constraint + j, &end, 10); if (match >= (unsigned long) noutputs) { error ("matching constraint references invalid operand number"); return false; } /* Try and find the real constraint for this dup. Only do this if the matching constraint is the only alternative. */ if (*end == '\0' && (j == 0 || (j == 1 && constraint[0] == '%'))) { constraint = constraints[match]; *constraint_p = constraint; c_len = strlen (constraint); j = 0; /* ??? At the end of the loop, we will skip the first part of the matched constraint. This assumes not only that the other constraint is an output constraint, but also that the '=' or '+' come first. */ break; } else j = end - constraint; /* Anticipate increment at end of loop. */ j--; } /* Fall through. */ case 'p': case 'r': *allows_reg = true; break; case 'g': case 'X': *allows_reg = true; *allows_mem = true; break; default: if (! ISALPHA (constraint[j])) { error ("invalid punctuation `%c' in constraint", constraint[j]); return false; } if (REG_CLASS_FROM_CONSTRAINT (constraint[j], constraint + j) != NO_REGS) *allows_reg = true; #ifdef EXTRA_CONSTRAINT_STR else if (EXTRA_ADDRESS_CONSTRAINT (constraint[j], constraint + j)) *allows_reg = true; else if (EXTRA_MEMORY_CONSTRAINT (constraint[j], constraint + j)) *allows_mem = true; else { /* Otherwise we can't assume anything about the nature of the constraint except that it isn't purely registers. Treat it like "g" and hope for the best. */ *allows_reg = true; *allows_mem = true; } #endif break; } if (saw_match && !*allows_reg) warning ("matching constraint does not allow a register"); return true; } /* INPUT is one of the input operands from EXPR, an ASM_EXPR. Returns true if it is an operand which must be passed in memory (i.e. an "m" constraint), false otherwise. */ bool asm_op_is_mem_input (tree input, tree expr) { const char *constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (input))); tree outputs = ASM_OUTPUTS (expr); int noutputs = list_length (outputs); const char **constraints = (const char **) alloca ((noutputs) * sizeof (const char *)); int i = 0; bool allows_mem, allows_reg; tree t; /* Collect output constraints. */ for (t = outputs; t ; t = TREE_CHAIN (t), i++) constraints[i] = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t))); /* We pass 0 for input_num, ninputs and ninout; they are only used for error checking which will be done at expand time. */ parse_input_constraint (&constraint, 0, 0, noutputs, 0, constraints, &allows_mem, &allows_reg); return (!allows_reg && allows_mem); } /* Check for overlap between registers marked in CLOBBERED_REGS and anything inappropriate in DECL. Emit error and return TRUE for error, FALSE for ok. */ static bool decl_conflicts_with_clobbers_p (tree decl, const HARD_REG_SET clobbered_regs) { /* Conflicts between asm-declared register variables and the clobber list are not allowed. */ if ((TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == PARM_DECL) && DECL_REGISTER (decl) && REG_P (DECL_RTL (decl)) && REGNO (DECL_RTL (decl)) < FIRST_PSEUDO_REGISTER) { rtx reg = DECL_RTL (decl); unsigned int regno; for (regno = REGNO (reg); regno < (REGNO (reg) + hard_regno_nregs[REGNO (reg)][GET_MODE (reg)]); regno++) if (TEST_HARD_REG_BIT (clobbered_regs, regno)) { error ("asm-specifier for variable `%s' conflicts with asm clobber list", IDENTIFIER_POINTER (DECL_NAME (decl))); /* Reset registerness to stop multiple errors emitted for a single variable. */ DECL_REGISTER (decl) = 0; return true; } } return false; } /* Generate RTL for an asm statement with arguments. STRING is the instruction template. OUTPUTS is a list of output arguments (lvalues); INPUTS a list of inputs. Each output or input has an expression in the TREE_VALUE and and a tree list in TREE_PURPOSE which in turn contains a constraint name in TREE_VALUE (or NULL_TREE) and a constraint string in TREE_PURPOSE. CLOBBERS is a list of STRING_CST nodes each naming a hard register that is clobbered by this insn. Not all kinds of lvalue that may appear in OUTPUTS can be stored directly. Some elements of OUTPUTS may be replaced with trees representing temporary values. The caller should copy those temporary values to the originally specified lvalues. VOL nonzero means the insn is volatile; don't optimize it. */ void expand_asm_operands (tree string, tree outputs, tree inputs, tree clobbers, int vol, location_t locus) { rtvec argvec, constraintvec; rtx body; int ninputs = list_length (inputs); int noutputs = list_length (outputs); int ninout; int nclobbers; HARD_REG_SET clobbered_regs; int clobber_conflict_found = 0; tree tail; tree t; int i; /* Vector of RTX's of evaluated output operands. */ rtx *output_rtx = alloca (noutputs * sizeof (rtx)); int *inout_opnum = alloca (noutputs * sizeof (int)); rtx *real_output_rtx = alloca (noutputs * sizeof (rtx)); enum machine_mode *inout_mode = alloca (noutputs * sizeof (enum machine_mode)); const char **constraints = alloca ((noutputs + ninputs) * sizeof (const char *)); int old_generating_concat_p = generating_concat_p; /* An ASM with no outputs needs to be treated as volatile, for now. */ if (noutputs == 0) vol = 1; if (! check_operand_nalternatives (outputs, inputs)) return; string = resolve_asm_operand_names (string, outputs, inputs); /* Collect constraints. */ i = 0; for (t = outputs; t ; t = TREE_CHAIN (t), i++) constraints[i] = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t))); for (t = inputs; t ; t = TREE_CHAIN (t), i++) constraints[i] = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t))); /* Sometimes we wish to automatically clobber registers across an asm. Case in point is when the i386 backend moved from cc0 to a hard reg -- maintaining source-level compatibility means automatically clobbering the flags register. */ clobbers = targetm.md_asm_clobbers (clobbers); /* Count the number of meaningful clobbered registers, ignoring what we would ignore later. */ nclobbers = 0; CLEAR_HARD_REG_SET (clobbered_regs); for (tail = clobbers; tail; tail = TREE_CHAIN (tail)) { const char *regname = TREE_STRING_POINTER (TREE_VALUE (tail)); i = decode_reg_name (regname); if (i >= 0 || i == -4) ++nclobbers; else if (i == -2) error ("unknown register name `%s' in `asm'", regname); /* Mark clobbered registers. */ if (i >= 0) { /* Clobbering the PIC register is an error */ if (i == (int) PIC_OFFSET_TABLE_REGNUM) { error ("PIC register `%s' clobbered in `asm'", regname); return; } SET_HARD_REG_BIT (clobbered_regs, i); } } /* First pass over inputs and outputs checks validity and sets mark_addressable if needed. */ ninout = 0; for (i = 0, tail = outputs; tail; tail = TREE_CHAIN (tail), i++) { tree val = TREE_VALUE (tail); tree type = TREE_TYPE (val); const char *constraint; bool is_inout; bool allows_reg; bool allows_mem; /* If there's an erroneous arg, emit no insn. */ if (type == error_mark_node) return; /* Try to parse the output constraint. If that fails, there's no point in going further. */ constraint = constraints[i]; if (!parse_output_constraint (&constraint, i, ninputs, noutputs, &allows_mem, &allows_reg, &is_inout)) return; if (! allows_reg && (allows_mem || is_inout || (DECL_P (val) && REG_P (DECL_RTL (val)) && GET_MODE (DECL_RTL (val)) != TYPE_MODE (type)))) lang_hooks.mark_addressable (val); if (is_inout) ninout++; } ninputs += ninout; if (ninputs + noutputs > MAX_RECOG_OPERANDS) { error ("more than %d operands in `asm'", MAX_RECOG_OPERANDS); return; } for (i = 0, tail = inputs; tail; i++, tail = TREE_CHAIN (tail)) { bool allows_reg, allows_mem; const char *constraint; /* If there's an erroneous arg, emit no insn, because the ASM_INPUT would get VOIDmode and that could cause a crash in reload. */ if (TREE_TYPE (TREE_VALUE (tail)) == error_mark_node) return; constraint = constraints[i + noutputs]; if (! parse_input_constraint (&constraint, i, ninputs, noutputs, ninout, constraints, &allows_mem, &allows_reg)) return; if (! allows_reg && allows_mem) lang_hooks.mark_addressable (TREE_VALUE (tail)); } /* Second pass evaluates arguments. */ ninout = 0; for (i = 0, tail = outputs; tail; tail = TREE_CHAIN (tail), i++) { tree val = TREE_VALUE (tail); tree type = TREE_TYPE (val); bool is_inout; bool allows_reg; bool allows_mem; rtx op; if (!parse_output_constraint (&constraints[i], i, ninputs, noutputs, &allows_mem, &allows_reg, &is_inout)) abort (); /* If an output operand is not a decl or indirect ref and our constraint allows a register, make a temporary to act as an intermediate. Make the asm insn write into that, then our caller will copy it to the real output operand. Likewise for promoted variables. */ generating_concat_p = 0; real_output_rtx[i] = NULL_RTX; if ((TREE_CODE (val) == INDIRECT_REF && allows_mem) || (DECL_P (val) && (allows_mem || REG_P (DECL_RTL (val))) && ! (REG_P (DECL_RTL (val)) && GET_MODE (DECL_RTL (val)) != TYPE_MODE (type))) || ! allows_reg || is_inout) { op = expand_expr (val, NULL_RTX, VOIDmode, EXPAND_WRITE); if (MEM_P (op)) op = validize_mem (op); if (! allows_reg && !MEM_P (op)) error ("output number %d not directly addressable", i); if ((! allows_mem && MEM_P (op)) || GET_CODE (op) == CONCAT) { real_output_rtx[i] = protect_from_queue (op, 1); op = gen_reg_rtx (GET_MODE (op)); if (is_inout) emit_move_insn (op, real_output_rtx[i]); } } else { op = assign_temp (type, 0, 0, 1); op = validize_mem (op); TREE_VALUE (tail) = make_tree (type, op); } output_rtx[i] = op; generating_concat_p = old_generating_concat_p; if (is_inout) { inout_mode[ninout] = TYPE_MODE (type); inout_opnum[ninout++] = i; } if (decl_conflicts_with_clobbers_p (val, clobbered_regs)) clobber_conflict_found = 1; } /* Make vectors for the expression-rtx, constraint strings, and named operands. */ argvec = rtvec_alloc (ninputs); constraintvec = rtvec_alloc (ninputs); body = gen_rtx_ASM_OPERANDS ((noutputs == 0 ? VOIDmode : GET_MODE (output_rtx[0])), TREE_STRING_POINTER (string), empty_string, 0, argvec, constraintvec, locus); MEM_VOLATILE_P (body) = vol; /* Eval the inputs and put them into ARGVEC. Put their constraints into ASM_INPUTs and store in CONSTRAINTS. */ for (i = 0, tail = inputs; tail; tail = TREE_CHAIN (tail), ++i) { bool allows_reg, allows_mem; const char *constraint; tree val, type; rtx op; constraint = constraints[i + noutputs]; if (! parse_input_constraint (&constraint, i, ninputs, noutputs, ninout, constraints, &allows_mem, &allows_reg)) abort (); generating_concat_p = 0; val = TREE_VALUE (tail); type = TREE_TYPE (val); op = expand_expr (val, NULL_RTX, VOIDmode, (allows_mem && !allows_reg ? EXPAND_MEMORY : EXPAND_NORMAL)); /* Never pass a CONCAT to an ASM. */ if (GET_CODE (op) == CONCAT) op = force_reg (GET_MODE (op), op); else if (MEM_P (op)) op = validize_mem (op); if (asm_operand_ok (op, constraint) <= 0) { if (allows_reg) op = force_reg (TYPE_MODE (type), op); else if (!allows_mem) warning ("asm operand %d probably doesn't match constraints", i + noutputs); else if (MEM_P (op)) { /* We won't recognize either volatile memory or memory with a queued address as available a memory_operand at this point. Ignore it: clearly this *is* a memory. */ } else { warning ("use of memory input without lvalue in " "asm operand %d is deprecated", i + noutputs); if (CONSTANT_P (op)) { rtx mem = force_const_mem (TYPE_MODE (type), op); if (mem) op = validize_mem (mem); else op = force_reg (TYPE_MODE (type), op); } if (REG_P (op) || GET_CODE (op) == SUBREG || GET_CODE (op) == CONCAT) { tree qual_type = build_qualified_type (type, (TYPE_QUALS (type) | TYPE_QUAL_CONST)); rtx memloc = assign_temp (qual_type, 1, 1, 1); memloc = validize_mem (memloc); emit_move_insn (memloc, op); op = memloc; } } } generating_concat_p = old_generating_concat_p; ASM_OPERANDS_INPUT (body, i) = op; ASM_OPERANDS_INPUT_CONSTRAINT_EXP (body, i) = gen_rtx_ASM_INPUT (TYPE_MODE (type), constraints[i + noutputs]); if (decl_conflicts_with_clobbers_p (val, clobbered_regs)) clobber_conflict_found = 1; } /* Protect all the operands from the queue now that they have all been evaluated. */ generating_concat_p = 0; for (i = 0; i < ninputs - ninout; i++) ASM_OPERANDS_INPUT (body, i) = protect_from_queue (ASM_OPERANDS_INPUT (body, i), 0); for (i = 0; i < noutputs; i++) output_rtx[i] = protect_from_queue (output_rtx[i], 1); /* For in-out operands, copy output rtx to input rtx. */ for (i = 0; i < ninout; i++) { int j = inout_opnum[i]; char buffer[16]; ASM_OPERANDS_INPUT (body, ninputs - ninout + i) = output_rtx[j]; sprintf (buffer, "%d", j); ASM_OPERANDS_INPUT_CONSTRAINT_EXP (body, ninputs - ninout + i) = gen_rtx_ASM_INPUT (inout_mode[i], ggc_strdup (buffer)); } generating_concat_p = old_generating_concat_p; /* Now, for each output, construct an rtx (set OUTPUT (asm_operands INSN OUTPUTCONSTRAINT OUTPUTNUMBER ARGVEC CONSTRAINTS OPNAMES)) If there is more than one, put them inside a PARALLEL. */ if (noutputs == 1 && nclobbers == 0) { ASM_OPERANDS_OUTPUT_CONSTRAINT (body) = constraints[0]; emit_insn (gen_rtx_SET (VOIDmode, output_rtx[0], body)); } else if (noutputs == 0 && nclobbers == 0) { /* No output operands: put in a raw ASM_OPERANDS rtx. */ emit_insn (body); } else { rtx obody = body; int num = noutputs; if (num == 0) num = 1; body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num + nclobbers)); /* For each output operand, store a SET. */ for (i = 0, tail = outputs; tail; tail = TREE_CHAIN (tail), i++) { XVECEXP (body, 0, i) = gen_rtx_SET (VOIDmode, output_rtx[i], gen_rtx_ASM_OPERANDS (GET_MODE (output_rtx[i]), TREE_STRING_POINTER (string), constraints[i], i, argvec, constraintvec, locus)); MEM_VOLATILE_P (SET_SRC (XVECEXP (body, 0, i))) = vol; } /* If there are no outputs (but there are some clobbers) store the bare ASM_OPERANDS into the PARALLEL. */ if (i == 0) XVECEXP (body, 0, i++) = obody; /* Store (clobber REG) for each clobbered register specified. */ for (tail = clobbers; tail; tail = TREE_CHAIN (tail)) { const char *regname = TREE_STRING_POINTER (TREE_VALUE (tail)); int j = decode_reg_name (regname); rtx clobbered_reg; if (j < 0) { if (j == -3) /* `cc', which is not a register */ continue; if (j == -4) /* `memory', don't cache memory across asm */ { XVECEXP (body, 0, i++) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode))); continue; } /* Ignore unknown register, error already signaled. */ continue; } /* Use QImode since that's guaranteed to clobber just one reg. */ clobbered_reg = gen_rtx_REG (QImode, j); /* Do sanity check for overlap between clobbers and respectively input and outputs that hasn't been handled. Such overlap should have been detected and reported above. */ if (!clobber_conflict_found) { int opno; /* We test the old body (obody) contents to avoid tripping over the under-construction body. */ for (opno = 0; opno < noutputs; opno++) if (reg_overlap_mentioned_p (clobbered_reg, output_rtx[opno])) internal_error ("asm clobber conflict with output operand"); for (opno = 0; opno < ninputs - ninout; opno++) if (reg_overlap_mentioned_p (clobbered_reg, ASM_OPERANDS_INPUT (obody, opno))) internal_error ("asm clobber conflict with input operand"); } XVECEXP (body, 0, i++) = gen_rtx_CLOBBER (VOIDmode, clobbered_reg); } emit_insn (body); } /* For any outputs that needed reloading into registers, spill them back to where they belong. */ for (i = 0; i < noutputs; ++i) if (real_output_rtx[i]) emit_move_insn (real_output_rtx[i], output_rtx[i]); free_temp_slots (); } void expand_asm_expr (tree exp) { int noutputs, i; tree outputs, tail; tree *o; if (ASM_INPUT_P (exp)) { expand_asm (ASM_STRING (exp), ASM_VOLATILE_P (exp)); return; } outputs = ASM_OUTPUTS (exp); noutputs = list_length (outputs); /* o[I] is the place that output number I should be written. */ o = (tree *) alloca (noutputs * sizeof (tree)); /* Record the contents of OUTPUTS before it is modified. */ for (i = 0, tail = outputs; tail; tail = TREE_CHAIN (tail), i++) o[i] = TREE_VALUE (tail); /* Generate the ASM_OPERANDS insn; store into the TREE_VALUEs of OUTPUTS some trees for where the values were actually stored. */ expand_asm_operands (ASM_STRING (exp), outputs, ASM_INPUTS (exp), ASM_CLOBBERS (exp), ASM_VOLATILE_P (exp), input_location); /* Copy all the intermediate outputs into the specified outputs. */ for (i = 0, tail = outputs; tail; tail = TREE_CHAIN (tail), i++) { if (o[i] != TREE_VALUE (tail)) { expand_assignment (o[i], TREE_VALUE (tail), 0); free_temp_slots (); /* Restore the original value so that it's correct the next time we expand this function. */ TREE_VALUE (tail) = o[i]; } } /* Those MODIFY_EXPRs could do autoincrements. */ emit_queue (); } /* A subroutine of expand_asm_operands. Check that all operands have the same number of alternatives. Return true if so. */ static bool check_operand_nalternatives (tree outputs, tree inputs) { if (outputs || inputs) { tree tmp = TREE_PURPOSE (outputs ? outputs : inputs); int nalternatives = n_char_occurrences2 (',', TREE_STRING_POINTER (TREE_VALUE (tmp))); tree next = inputs; if (nalternatives + 1 > MAX_RECOG_ALTERNATIVES) { error ("too many alternatives in `asm'"); return false; } tmp = outputs; while (tmp) { const char *constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (tmp))); if (n_char_occurrences2 (',', constraint) != nalternatives) { error ("operand constraints for `asm' differ in number of alternatives"); return false; } if (TREE_CHAIN (tmp)) tmp = TREE_CHAIN (tmp); else tmp = next, next = 0; } } return true; } /* A subroutine of expand_asm_operands. Check that all operand names are unique. Return true if so. We rely on the fact that these names are identifiers, and so have been canonicalized by get_identifier, so all we need are pointer comparisons. */ static bool check_unique_operand_names (tree outputs, tree inputs) { tree i, j; for (i = outputs; i ; i = TREE_CHAIN (i)) { tree i_name = TREE_PURPOSE (TREE_PURPOSE (i)); if (! i_name) continue; for (j = TREE_CHAIN (i); j ; j = TREE_CHAIN (j)) if (simple_cst_equal (i_name, TREE_PURPOSE (TREE_PURPOSE (j)))) goto failure; } for (i = inputs; i ; i = TREE_CHAIN (i)) { tree i_name = TREE_PURPOSE (TREE_PURPOSE (i)); if (! i_name) continue; for (j = TREE_CHAIN (i); j ; j = TREE_CHAIN (j)) if (simple_cst_equal (i_name, TREE_PURPOSE (TREE_PURPOSE (j)))) goto failure; for (j = outputs; j ; j = TREE_CHAIN (j)) if (simple_cst_equal (i_name, TREE_PURPOSE (TREE_PURPOSE (j)))) goto failure; } return true; failure: error ("duplicate asm operand name '%s'", TREE_STRING_POINTER (TREE_PURPOSE (TREE_PURPOSE (i)))); return false; } /* A subroutine of expand_asm_operands. Resolve the names of the operands in *POUTPUTS and *PINPUTS to numbers, and replace the name expansions in STRING and in the constraints to those numbers. */ tree resolve_asm_operand_names (tree string, tree outputs, tree inputs) { char *buffer; char *p; const char *c; tree t; check_unique_operand_names (outputs, inputs); /* Substitute [] in input constraint strings. There should be no named operands in output constraints. */ for (t = inputs; t ; t = TREE_CHAIN (t)) { c = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t))); if (strchr (c, '[') != NULL) { p = buffer = xstrdup (c); while ((p = strchr (p, '[')) != NULL) p = resolve_operand_name_1 (p, outputs, inputs); TREE_VALUE (TREE_PURPOSE (t)) = build_string (strlen (buffer), buffer); free (buffer); } } /* Now check for any needed substitutions in the template. */ c = TREE_STRING_POINTER (string); while ((c = strchr (c, '%')) != NULL) { if (c[1] == '[') break; else if (ISALPHA (c[1]) && c[2] == '[') break; else { c += 1; continue; } } if (c) { /* OK, we need to make a copy so we can perform the substitutions. Assume that we will not need extra space--we get to remove '[' and ']', which means we cannot have a problem until we have more than 999 operands. */ buffer = xstrdup (TREE_STRING_POINTER (string)); p = buffer + (c - TREE_STRING_POINTER (string)); while ((p = strchr (p, '%')) != NULL) { if (p[1] == '[') p += 1; else if (ISALPHA (p[1]) && p[2] == '[') p += 2; else { p += 1; continue; } p = resolve_operand_name_1 (p, outputs, inputs); } string = build_string (strlen (buffer), buffer); free (buffer); } return string; } /* A subroutine of resolve_operand_names. P points to the '[' for a potential named operand of the form []. In place, replace the name and brackets with a number. Return a pointer to the balance of the string after substitution. */ static char * resolve_operand_name_1 (char *p, tree outputs, tree inputs) { char *q; int op; tree t; size_t len; /* Collect the operand name. */ q = strchr (p, ']'); if (!q) { error ("missing close brace for named operand"); return strchr (p, '\0'); } len = q - p - 1; /* Resolve the name to a number. */ for (op = 0, t = outputs; t ; t = TREE_CHAIN (t), op++) { tree name = TREE_PURPOSE (TREE_PURPOSE (t)); if (name) { const char *c = TREE_STRING_POINTER (name); if (strncmp (c, p + 1, len) == 0 && c[len] == '\0') goto found; } } for (t = inputs; t ; t = TREE_CHAIN (t), op++) { tree name = TREE_PURPOSE (TREE_PURPOSE (t)); if (name) { const char *c = TREE_STRING_POINTER (name); if (strncmp (c, p + 1, len) == 0 && c[len] == '\0') goto found; } } *q = '\0'; error ("undefined named operand '%s'", p + 1); op = 0; found: /* Replace the name with the number. Unfortunately, not all libraries get the return value of sprintf correct, so search for the end of the generated string by hand. */ sprintf (p, "%d", op); p = strchr (p, '\0'); /* Verify the no extra buffer space assumption. */ if (p > q) abort (); /* Shift the rest of the buffer down to fill the gap. */ memmove (p, q + 1, strlen (q + 1) + 1); return p; } /* Generate RTL to evaluate the expression EXP. */ void expand_expr_stmt (tree exp) { rtx value; tree type; value = expand_expr (exp, const0_rtx, VOIDmode, 0); type = TREE_TYPE (exp); /* If all we do is reference a volatile value in memory, copy it to a register to be sure it is actually touched. */ if (value && MEM_P (value) && TREE_THIS_VOLATILE (exp)) { if (TYPE_MODE (type) == VOIDmode) ; else if (TYPE_MODE (type) != BLKmode) value = copy_to_reg (value); else { rtx lab = gen_label_rtx (); /* Compare the value with itself to reference it. */ emit_cmp_and_jump_insns (value, value, EQ, expand_expr (TYPE_SIZE (type), NULL_RTX, VOIDmode, 0), BLKmode, 0, lab); emit_label (lab); } } /* Free any temporaries used to evaluate this expression. */ free_temp_slots (); emit_queue (); } /* Warn if EXP contains any computations whose results are not used. Return 1 if a warning is printed; 0 otherwise. LOCUS is the (potential) location of the expression. */ int warn_if_unused_value (tree exp, location_t locus) { restart: if (TREE_USED (exp)) return 0; /* Don't warn about void constructs. This includes casting to void, void function calls, and statement expressions with a final cast to void. */ if (VOID_TYPE_P (TREE_TYPE (exp))) return 0; if (EXPR_LOCUS (exp)) locus = *EXPR_LOCUS (exp); switch (TREE_CODE (exp)) { case PREINCREMENT_EXPR: case POSTINCREMENT_EXPR: case PREDECREMENT_EXPR: case POSTDECREMENT_EXPR: case MODIFY_EXPR: case INIT_EXPR: case TARGET_EXPR: case CALL_EXPR: case TRY_CATCH_EXPR: case WITH_CLEANUP_EXPR: case EXIT_EXPR: return 0; case BIND_EXPR: /* For a binding, warn if no side effect within it. */ exp = BIND_EXPR_BODY (exp); goto restart; case SAVE_EXPR: exp = TREE_OPERAND (exp, 0); goto restart; case TRUTH_ORIF_EXPR: case TRUTH_ANDIF_EXPR: /* In && or ||, warn if 2nd operand has no side effect. */ exp = TREE_OPERAND (exp, 1); goto restart; case COMPOUND_EXPR: if (TREE_NO_WARNING (exp)) return 0; if (warn_if_unused_value (TREE_OPERAND (exp, 0), locus)) return 1; /* Let people do `(foo (), 0)' without a warning. */ if (TREE_CONSTANT (TREE_OPERAND (exp, 1))) return 0; exp = TREE_OPERAND (exp, 1); goto restart; case NOP_EXPR: case CONVERT_EXPR: case NON_LVALUE_EXPR: /* Don't warn about conversions not explicit in the user's program. */ if (TREE_NO_WARNING (exp)) return 0; /* Assignment to a cast usually results in a cast of a modify. Don't complain about that. There can be an arbitrary number of casts before the modify, so we must loop until we find the first non-cast expression and then test to see if that is a modify. */ { tree tem = TREE_OPERAND (exp, 0); while (TREE_CODE (tem) == CONVERT_EXPR || TREE_CODE (tem) == NOP_EXPR) tem = TREE_OPERAND (tem, 0); if (TREE_CODE (tem) == MODIFY_EXPR || TREE_CODE (tem) == INIT_EXPR || TREE_CODE (tem) == CALL_EXPR) return 0; } goto maybe_warn; case INDIRECT_REF: /* Don't warn about automatic dereferencing of references, since the user cannot control it. */ if (TREE_CODE (TREE_TYPE (TREE_OPERAND (exp, 0))) == REFERENCE_TYPE) { exp = TREE_OPERAND (exp, 0); goto restart; } /* Fall through. */ default: /* Referencing a volatile value is a side effect, so don't warn. */ if ((DECL_P (exp) || TREE_CODE_CLASS (TREE_CODE (exp)) == 'r') && TREE_THIS_VOLATILE (exp)) return 0; /* If this is an expression which has no operands, there is no value to be unused. There are no such language-independent codes, but front ends may define such. */ if (TREE_CODE_CLASS (TREE_CODE (exp)) == 'e' && TREE_CODE_LENGTH (TREE_CODE (exp)) == 0) return 0; maybe_warn: /* If this is an expression with side effects, don't warn. */ if (TREE_SIDE_EFFECTS (exp)) return 0; warning ("%Hvalue computed is not used", &locus); return 1; } } /* Generate RTL for the start of an if-then. COND is the expression whose truth should be tested. If EXITFLAG is nonzero, this conditional is visible to `exit_something'. */ void expand_start_cond (tree cond, int exitflag) { struct nesting *thiscond = ALLOC_NESTING (); /* Make an entry on cond_stack for the cond we are entering. */ thiscond->desc = COND_NESTING; thiscond->next = cond_stack; thiscond->all = nesting_stack; thiscond->depth = ++nesting_depth; thiscond->data.cond.next_label = gen_label_rtx (); /* Before we encounter an `else', we don't need a separate exit label unless there are supposed to be exit statements to exit this conditional. */ thiscond->exit_label = exitflag ? gen_label_rtx () : 0; thiscond->data.cond.endif_label = thiscond->exit_label; cond_stack = thiscond; nesting_stack = thiscond; do_jump (cond, thiscond->data.cond.next_label, NULL_RTX); } /* Generate RTL between then-clause and the elseif-clause of an if-then-elseif-.... */ void expand_start_elseif (tree cond) { if (cond_stack->data.cond.endif_label == 0) cond_stack->data.cond.endif_label = gen_label_rtx (); emit_jump (cond_stack->data.cond.endif_label); emit_label (cond_stack->data.cond.next_label); cond_stack->data.cond.next_label = gen_label_rtx (); do_jump (cond, cond_stack->data.cond.next_label, NULL_RTX); } /* Generate RTL between the then-clause and the else-clause of an if-then-else. */ void expand_start_else (void) { if (cond_stack->data.cond.endif_label == 0) cond_stack->data.cond.endif_label = gen_label_rtx (); emit_jump (cond_stack->data.cond.endif_label); emit_label (cond_stack->data.cond.next_label); cond_stack->data.cond.next_label = 0; /* No more _else or _elseif calls. */ } /* After calling expand_start_else, turn this "else" into an "else if" by providing another condition. */ void expand_elseif (tree cond) { cond_stack->data.cond.next_label = gen_label_rtx (); do_jump (cond, cond_stack->data.cond.next_label, NULL_RTX); } /* Generate RTL for the end of an if-then. Pop the record for it off of cond_stack. */ void expand_end_cond (void) { struct nesting *thiscond = cond_stack; do_pending_stack_adjust (); if (thiscond->data.cond.next_label) emit_label (thiscond->data.cond.next_label); if (thiscond->data.cond.endif_label) emit_label (thiscond->data.cond.endif_label); POPSTACK (cond_stack); } /* Return nonzero if we should preserve sub-expressions as separate pseudos. We never do so if we aren't optimizing. We always do so if -fexpensive-optimizations. */ int preserve_subexpressions_p (void) { if (flag_expensive_optimizations) return 1; if (optimize == 0 || cfun == 0 || cfun->stmt == 0) return 0; return 1; } /* Generate RTL to return from the current function, with no value. (That is, we do not do anything about returning any value.) */ void expand_null_return (void) { rtx last_insn; last_insn = get_last_insn (); /* If this function was declared to return a value, but we didn't, clobber the return registers so that they are not propagated live to the rest of the function. */ clobber_return_register (); expand_null_return_1 (last_insn); } /* Generate RTL to return directly from the current function. (That is, we bypass any return value.) */ void expand_naked_return (void) { rtx last_insn, end_label; last_insn = get_last_insn (); end_label = naked_return_label; clear_pending_stack_adjust (); do_pending_stack_adjust (); if (end_label == 0) end_label = naked_return_label = gen_label_rtx (); expand_goto_internal (NULL_TREE, end_label, last_insn); } /* Try to guess whether the value of return means error code. */ static enum br_predictor return_prediction (rtx val) { /* Different heuristics for pointers and scalars. */ if (POINTER_TYPE_P (TREE_TYPE (DECL_RESULT (current_function_decl)))) { /* NULL is usually not returned. */ if (val == const0_rtx) return PRED_NULL_RETURN; } else { /* Negative return values are often used to indicate errors. */ if (GET_CODE (val) == CONST_INT && INTVAL (val) < 0) return PRED_NEGATIVE_RETURN; /* Constant return values are also usually erors, zero/one often mean booleans so exclude them from the heuristics. */ if (CONSTANT_P (val) && (val != const0_rtx && val != const1_rtx)) return PRED_CONST_RETURN; } return PRED_NO_PREDICTION; } /* If the current function returns values in the most significant part of a register, shift return value VAL appropriately. The mode of the function's return type is known not to be BLKmode. */ static rtx shift_return_value (rtx val) { tree type; type = TREE_TYPE (DECL_RESULT (current_function_decl)); if (targetm.calls.return_in_msb (type)) { rtx target; HOST_WIDE_INT shift; target = DECL_RTL (DECL_RESULT (current_function_decl)); shift = (GET_MODE_BITSIZE (GET_MODE (target)) - BITS_PER_UNIT * int_size_in_bytes (type)); if (shift > 0) val = expand_shift (LSHIFT_EXPR, GET_MODE (target), gen_lowpart (GET_MODE (target), val), build_int_2 (shift, 0), target, 1); } return val; } /* Generate RTL to return from the current function, with value VAL. */ static void expand_value_return (rtx val) { rtx last_insn; rtx return_reg; enum br_predictor pred; if (flag_guess_branch_prob && (pred = return_prediction (val)) != PRED_NO_PREDICTION) { /* Emit information for branch prediction. */ rtx note; note = emit_note (NOTE_INSN_PREDICTION); NOTE_PREDICTION (note) = NOTE_PREDICT (pred, NOT_TAKEN); } last_insn = get_last_insn (); return_reg = DECL_RTL (DECL_RESULT (current_function_decl)); /* Copy the value to the return location unless it's already there. */ if (return_reg != val) { tree type = TREE_TYPE (DECL_RESULT (current_function_decl)); if (targetm.calls.promote_function_return (TREE_TYPE (current_function_decl))) { int unsignedp = TYPE_UNSIGNED (type); enum machine_mode old_mode = DECL_MODE (DECL_RESULT (current_function_decl)); enum machine_mode mode = promote_mode (type, old_mode, &unsignedp, 1); if (mode != old_mode) val = convert_modes (mode, old_mode, val, unsignedp); } if (GET_CODE (return_reg) == PARALLEL) emit_group_load (return_reg, val, type, int_size_in_bytes (type)); else emit_move_insn (return_reg, val); } expand_null_return_1 (last_insn); } /* Output a return with no value. If LAST_INSN is nonzero, pretend that the return takes place after LAST_INSN. */ static void expand_null_return_1 (rtx last_insn) { rtx end_label = return_label; clear_pending_stack_adjust (); do_pending_stack_adjust (); if (end_label == 0) end_label = return_label = gen_label_rtx (); expand_goto_internal (NULL_TREE, end_label, last_insn); } /* Generate RTL to evaluate the expression RETVAL and return it from the current function. */ void expand_return (tree retval) { /* If there are any cleanups to be performed, then they will be inserted following LAST_INSN. It is desirable that the last_insn, for such purposes, should be the last insn before computing the return value. Otherwise, cleanups which call functions can clobber the return value. */ /* ??? rms: I think that is erroneous, because in C++ it would run destructors on variables that might be used in the subsequent computation of the return value. */ rtx last_insn = 0; rtx result_rtl; rtx val = 0; tree retval_rhs; /* If function wants no value, give it none. */ if (TREE_CODE (TREE_TYPE (TREE_TYPE (current_function_decl))) == VOID_TYPE) { expand_expr (retval, NULL_RTX, VOIDmode, 0); emit_queue (); expand_null_return (); return; } if (retval == error_mark_node) { /* Treat this like a return of no value from a function that returns a value. */ expand_null_return (); return; } else if (TREE_CODE (retval) == RESULT_DECL) retval_rhs = retval; else if ((TREE_CODE (retval) == MODIFY_EXPR || TREE_CODE (retval) == INIT_EXPR) && TREE_CODE (TREE_OPERAND (retval, 0)) == RESULT_DECL) retval_rhs = TREE_OPERAND (retval, 1); else retval_rhs = retval; last_insn = get_last_insn (); result_rtl = DECL_RTL (DECL_RESULT (current_function_decl)); /* If the result is an aggregate that is being returned in one (or more) registers, load the registers here. The compiler currently can't handle copying a BLKmode value into registers. We could put this code in a more general area (for use by everyone instead of just function call/return), but until this feature is generally usable it is kept here (and in expand_call). The value must go into a pseudo in case there are cleanups that will clobber the real return register. */ if (retval_rhs != 0 && TYPE_MODE (TREE_TYPE (retval_rhs)) == BLKmode && REG_P (result_rtl)) { int i; unsigned HOST_WIDE_INT bitpos, xbitpos; unsigned HOST_WIDE_INT padding_correction = 0; unsigned HOST_WIDE_INT bytes = int_size_in_bytes (TREE_TYPE (retval_rhs)); int n_regs = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD; unsigned int bitsize = MIN (TYPE_ALIGN (TREE_TYPE (retval_rhs)), BITS_PER_WORD); rtx *result_pseudos = alloca (sizeof (rtx) * n_regs); rtx result_reg, src = NULL_RTX, dst = NULL_RTX; rtx result_val = expand_expr (retval_rhs, NULL_RTX, VOIDmode, 0); enum machine_mode tmpmode, result_reg_mode; if (bytes == 0) { expand_null_return (); return; } /* If the structure doesn't take up a whole number of words, see whether the register value should be padded on the left or on the right. Set PADDING_CORRECTION to the number of padding bits needed on the left side. In most ABIs, the structure will be returned at the least end of the register, which translates to right padding on little-endian targets and left padding on big-endian targets. The opposite holds if the structure is returned at the most significant end of the register. */ if (bytes % UNITS_PER_WORD != 0 && (targetm.calls.return_in_msb (TREE_TYPE (retval_rhs)) ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)) padding_correction = (BITS_PER_WORD - ((bytes % UNITS_PER_WORD) * BITS_PER_UNIT)); /* Copy the structure BITSIZE bits at a time. */ for (bitpos = 0, xbitpos = padding_correction; bitpos < bytes * BITS_PER_UNIT; bitpos += bitsize, xbitpos += bitsize) { /* We need a new destination pseudo each time xbitpos is on a word boundary and when xbitpos == padding_correction (the first time through). */ if (xbitpos % BITS_PER_WORD == 0 || xbitpos == padding_correction) { /* Generate an appropriate register. */ dst = gen_reg_rtx (word_mode); result_pseudos[xbitpos / BITS_PER_WORD] = dst; /* Clear the destination before we move anything into it. */ emit_move_insn (dst, CONST0_RTX (GET_MODE (dst))); } /* We need a new source operand each time bitpos is on a word boundary. */ if (bitpos % BITS_PER_WORD == 0) src = operand_subword_force (result_val, bitpos / BITS_PER_WORD, BLKmode); /* Use bitpos for the source extraction (left justified) and xbitpos for the destination store (right justified). */ store_bit_field (dst, bitsize, xbitpos % BITS_PER_WORD, word_mode, extract_bit_field (src, bitsize, bitpos % BITS_PER_WORD, 1, NULL_RTX, word_mode, word_mode, BITS_PER_WORD), BITS_PER_WORD); } tmpmode = GET_MODE (result_rtl); if (tmpmode == BLKmode) { /* Find the smallest integer mode large enough to hold the entire structure and use that mode instead of BLKmode on the USE insn for the return register. */ for (tmpmode = GET_CLASS_NARROWEST_MODE (MODE_INT); tmpmode != VOIDmode; tmpmode = GET_MODE_WIDER_MODE (tmpmode)) /* Have we found a large enough mode? */ if (GET_MODE_SIZE (tmpmode) >= bytes) break; /* No suitable mode found. */ if (tmpmode == VOIDmode) abort (); PUT_MODE (result_rtl, tmpmode); } if (GET_MODE_SIZE (tmpmode) < GET_MODE_SIZE (word_mode)) result_reg_mode = word_mode; else result_reg_mode = tmpmode; result_reg = gen_reg_rtx (result_reg_mode); emit_queue (); for (i = 0; i < n_regs; i++) emit_move_insn (operand_subword (result_reg, i, 0, result_reg_mode), result_pseudos[i]); if (tmpmode != result_reg_mode) result_reg = gen_lowpart (tmpmode, result_reg); expand_value_return (result_reg); } else if (retval_rhs != 0 && !VOID_TYPE_P (TREE_TYPE (retval_rhs)) && (REG_P (result_rtl) || (GET_CODE (result_rtl) == PARALLEL))) { /* Calculate the return value into a temporary (usually a pseudo reg). */ tree ot = TREE_TYPE (DECL_RESULT (current_function_decl)); tree nt = build_qualified_type (ot, TYPE_QUALS (ot) | TYPE_QUAL_CONST); val = assign_temp (nt, 0, 0, 1); val = expand_expr (retval_rhs, val, GET_MODE (val), 0); val = force_not_mem (val); emit_queue (); /* Return the calculated value, doing cleanups first. */ expand_value_return (shift_return_value (val)); } else { /* No cleanups or no hard reg used; calculate value into hard return reg. */ expand_expr (retval, const0_rtx, VOIDmode, 0); emit_queue (); expand_value_return (result_rtl); } } /* Generate the RTL code for entering a binding contour. The variables are declared one by one, by calls to `expand_decl'. FLAGS is a bitwise or of the following flags: 1 - Nonzero if this construct should be visible to `exit_something'. 2 - Nonzero if this contour does not require a NOTE_INSN_BLOCK_BEG note. Virtually all calls from language-independent code should set this flag because they will not create corresponding BLOCK nodes. (There should be a one-to-one correspondence between NOTE_INSN_BLOCK_BEG notes and BLOCKs.) If this flag is set, MARK_ENDS should be zero when expand_end_bindings is called. If we are creating a NOTE_INSN_BLOCK_BEG note, a BLOCK may optionally be supplied. If so, it becomes the NOTE_BLOCK for the note. */ void expand_start_bindings_and_block (int flags, tree block) { struct nesting *thisblock = ALLOC_NESTING (); rtx note; int exit_flag = ((flags & 1) != 0); int block_flag = ((flags & 2) == 0); /* If a BLOCK is supplied, then the caller should be requesting a NOTE_INSN_BLOCK_BEG note. */ if (!block_flag && block) abort (); /* Create a note to mark the beginning of the block. */ note = emit_note (NOTE_INSN_DELETED); /* Make an entry on block_stack for the block we are entering. */ thisblock->desc = BLOCK_NESTING; thisblock->next = block_stack; thisblock->all = nesting_stack; thisblock->depth = ++nesting_depth; thisblock->data.block.stack_level = 0; thisblock->data.block.cleanups = 0; thisblock->data.block.exception_region = 0; thisblock->data.block.block_target_temp_slot_level = target_temp_slot_level; thisblock->data.block.conditional_code = 0; thisblock->data.block.last_unconditional_cleanup = note; /* When we insert instructions after the last unconditional cleanup, we don't adjust last_insn. That means that a later add_insn will clobber the instructions we've just added. The easiest way to fix this is to just insert another instruction here, so that the instructions inserted after the last unconditional cleanup are never the last instruction. */ emit_note (NOTE_INSN_DELETED); if (block_stack && !(block_stack->data.block.cleanups == NULL_TREE && block_stack->data.block.outer_cleanups == NULL_TREE)) thisblock->data.block.outer_cleanups = tree_cons (NULL_TREE, block_stack->data.block.cleanups, block_stack->data.block.outer_cleanups); else thisblock->data.block.outer_cleanups = 0; thisblock->data.block.label_chain = 0; thisblock->data.block.innermost_stack_block = stack_block_stack; thisblock->data.block.first_insn = note; thisblock->data.block.block_start_count = ++current_block_start_count; thisblock->exit_label = exit_flag ? gen_label_rtx () : 0; block_stack = thisblock; nesting_stack = thisblock; /* Make a new level for allocating stack slots. */ push_temp_slots (); } /* Specify the scope of temporaries created by TARGET_EXPRs. Similar to CLEANUP_POINT_EXPR, but handles cases when a series of calls to expand_expr are made. After we end the region, we know that all space for all temporaries that were created by TARGET_EXPRs will be destroyed and their space freed for reuse. */ void expand_start_target_temps (void) { /* This is so that even if the result is preserved, the space allocated will be freed, as we know that it is no longer in use. */ push_temp_slots (); /* Start a new binding layer that will keep track of all cleanup actions to be performed. */ expand_start_bindings (2); target_temp_slot_level = temp_slot_level; } void expand_end_target_temps (void) { expand_end_bindings (NULL_TREE, 0, 0); /* This is so that even if the result is preserved, the space allocated will be freed, as we know that it is no longer in use. */ pop_temp_slots (); } /* Given a pointer to a BLOCK node return nonzero if (and only if) the node in question represents the outermost pair of curly braces (i.e. the "body block") of a function or method. For any BLOCK node representing a "body block" of a function or method, the BLOCK_SUPERCONTEXT of the node will point to another BLOCK node which represents the outermost (function) scope for the function or method (i.e. the one which includes the formal parameters). The BLOCK_SUPERCONTEXT of *that* node in turn will point to the relevant FUNCTION_DECL node. */ int is_body_block (tree stmt) { if (lang_hooks.no_body_blocks) return 0; if (TREE_CODE (stmt) == BLOCK) { tree parent = BLOCK_SUPERCONTEXT (stmt); if (parent && TREE_CODE (parent) == BLOCK) { tree grandparent = BLOCK_SUPERCONTEXT (parent); if (grandparent && TREE_CODE (grandparent) == FUNCTION_DECL) return 1; } } return 0; } /* True if we are currently emitting insns in an area of output code that is controlled by a conditional expression. This is used by the cleanup handling code to generate conditional cleanup actions. */ int conditional_context (void) { return block_stack && block_stack->data.block.conditional_code; } /* Return an opaque pointer to the current nesting level, so frontend code can check its own sanity. */ struct nesting * current_nesting_level (void) { return cfun ? block_stack : 0; } /* Emit code to restore vital registers at the beginning of a nonlocal goto handler. */ static void expand_nl_goto_receiver (void) { /* Clobber the FP when we get here, so we have to make sure it's marked as used by this function. */ emit_insn (gen_rtx_USE (VOIDmode, hard_frame_pointer_rtx)); /* Mark the static chain as clobbered here so life information doesn't get messed up for it. */ emit_insn (gen_rtx_CLOBBER (VOIDmode, static_chain_rtx)); #ifdef HAVE_nonlocal_goto if (! HAVE_nonlocal_goto) #endif /* First adjust our frame pointer to its actual value. It was previously set to the start of the virtual area corresponding to the stacked variables when we branched here and now needs to be adjusted to the actual hardware fp value. Assignments are to virtual registers are converted by instantiate_virtual_regs into the corresponding assignment to the underlying register (fp in this case) that makes the original assignment true. So the following insn will actually be decrementing fp by STARTING_FRAME_OFFSET. */ emit_move_insn (virtual_stack_vars_rtx, hard_frame_pointer_rtx); #if ARG_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM if (fixed_regs[ARG_POINTER_REGNUM]) { #ifdef ELIMINABLE_REGS /* If the argument pointer can be eliminated in favor of the frame pointer, we don't need to restore it. We assume here that if such an elimination is present, it can always be used. This is the case on all known machines; if we don't make this assumption, we do unnecessary saving on many machines. */ static const struct elims {const int from, to;} elim_regs[] = ELIMINABLE_REGS; size_t i; for (i = 0; i < ARRAY_SIZE (elim_regs); i++) if (elim_regs[i].from == ARG_POINTER_REGNUM && elim_regs[i].to == HARD_FRAME_POINTER_REGNUM) break; if (i == ARRAY_SIZE (elim_regs)) #endif { /* Now restore our arg pointer from the address at which it was saved in our stack frame. */ emit_move_insn (virtual_incoming_args_rtx, copy_to_reg (get_arg_pointer_save_area (cfun))); } } #endif #ifdef HAVE_nonlocal_goto_receiver if (HAVE_nonlocal_goto_receiver) emit_insn (gen_nonlocal_goto_receiver ()); #endif /* @@@ This is a kludge. Not all machine descriptions define a blockage insn, but we must not allow the code we just generated to be reordered by scheduling. Specifically, the update of the frame pointer must happen immediately, not later. So emit an ASM_INPUT to act as blockage insn. */ emit_insn (gen_rtx_ASM_INPUT (VOIDmode, "")); } /* Warn about any unused VARS (which may contain nodes other than VAR_DECLs, but such nodes are ignored). The nodes are connected via the TREE_CHAIN field. */ void warn_about_unused_variables (tree vars) { tree decl; if (warn_unused_variable) for (decl = vars; decl; decl = TREE_CHAIN (decl)) if (TREE_CODE (decl) == VAR_DECL && ! TREE_USED (decl) && ! DECL_IN_SYSTEM_HEADER (decl) && DECL_NAME (decl) && ! DECL_ARTIFICIAL (decl)) warning ("%Junused variable '%D'", decl, decl); } /* Generate RTL code to terminate a binding contour. VARS is the chain of VAR_DECL nodes for the variables bound in this contour. There may actually be other nodes in this chain, but any nodes other than VAR_DECLS are ignored. MARK_ENDS is nonzero if we should put a note at the beginning and end of this binding contour. DONT_JUMP_IN is positive if it is not valid to jump into this contour, zero if we can jump into this contour only if it does not have a saved stack level, and negative if we are not to check for invalid use of labels (because the front end does that). */ void expand_end_bindings (tree vars, int mark_ends ATTRIBUTE_UNUSED, int dont_jump_in) { struct nesting *thisblock = block_stack; /* If any of the variables in this scope were not used, warn the user. */ warn_about_unused_variables (vars); if (thisblock->exit_label) { do_pending_stack_adjust (); emit_label (thisblock->exit_label); } /* Don't allow jumping into a block that has a stack level. Cleanups are allowed, though. */ if (dont_jump_in > 0 || (dont_jump_in == 0 && thisblock->data.block.stack_level != 0)) { struct label_chain *chain; /* Any labels in this block are no longer valid to go to. Mark them to cause an error message. */ for (chain = thisblock->data.block.label_chain; chain; chain = chain->next) { DECL_TOO_LATE (chain->label) = 1; /* If any goto without a fixup came to this label, that must be an error, because gotos without fixups come from outside all saved stack-levels. */ if (TREE_ADDRESSABLE (chain->label)) error ("%Jlabel '%D' used before containing binding contour", chain->label, chain->label); } } /* Restore stack level in effect before the block (only if variable-size objects allocated). */ /* Perform any cleanups associated with the block. */ if (thisblock->data.block.stack_level != 0 || thisblock->data.block.cleanups != 0) { int reachable; rtx insn; /* Only clean up here if this point can actually be reached. */ insn = get_last_insn (); if (GET_CODE (insn) == NOTE) insn = prev_nonnote_insn (insn); reachable = (! insn || GET_CODE (insn) != BARRIER); /* Do the cleanups. */ expand_cleanups (thisblock->data.block.cleanups, 0, reachable); if (reachable) do_pending_stack_adjust (); /* Restore the stack level. */ if (reachable && thisblock->data.block.stack_level != 0) { emit_stack_restore (thisblock->next ? SAVE_BLOCK : SAVE_FUNCTION, thisblock->data.block.stack_level, NULL_RTX); if (cfun->nonlocal_goto_save_area) update_nonlocal_goto_save_area (); } /* Any gotos out of this block must also do these things. Also report any gotos with fixups that came to labels in this level. */ fixup_gotos (thisblock, thisblock->data.block.stack_level, thisblock->data.block.cleanups, thisblock->data.block.first_insn, dont_jump_in); } /* Mark the beginning and end of the scope if requested. We do this now, after running cleanups on the variables just going out of scope, so they are in scope for their cleanups. */ /* Get rid of the beginning-mark if we don't make an end-mark. */ NOTE_LINE_NUMBER (thisblock->data.block.first_insn) = NOTE_INSN_DELETED; /* Restore the temporary level of TARGET_EXPRs. */ target_temp_slot_level = thisblock->data.block.block_target_temp_slot_level; /* Restore block_stack level for containing block. */ stack_block_stack = thisblock->data.block.innermost_stack_block; POPSTACK (block_stack); /* Pop the stack slot nesting and free any slots at this level. */ pop_temp_slots (); } /* Generate code to save the stack pointer at the start of the current block and set up to restore it on exit. */ void save_stack_pointer (void) { struct nesting *thisblock = block_stack; if (thisblock->data.block.stack_level == 0) { emit_stack_save (thisblock->next ? SAVE_BLOCK : SAVE_FUNCTION, &thisblock->data.block.stack_level, thisblock->data.block.first_insn); stack_block_stack = thisblock; } } /* Generate RTL for the automatic variable declaration DECL. (Other kinds of declarations are simply ignored if seen here.) */ void expand_decl (tree decl) { tree type; type = TREE_TYPE (decl); /* For a CONST_DECL, set mode, alignment, and sizes from those of the type in case this node is used in a reference. */ if (TREE_CODE (decl) == CONST_DECL) { DECL_MODE (decl) = TYPE_MODE (type); DECL_ALIGN (decl) = TYPE_ALIGN (type); DECL_SIZE (decl) = TYPE_SIZE (type); DECL_SIZE_UNIT (decl) = TYPE_SIZE_UNIT (type); return; } /* Otherwise, only automatic variables need any expansion done. Static and external variables, and external functions, will be handled by `assemble_variable' (called from finish_decl). TYPE_DECL requires nothing. PARM_DECLs are handled in `assign_parms'. */ if (TREE_CODE (decl) != VAR_DECL) return; if (TREE_STATIC (decl) || DECL_EXTERNAL (decl)) return; /* Create the RTL representation for the variable. */ if (type == error_mark_node) SET_DECL_RTL (decl, gen_rtx_MEM (BLKmode, const0_rtx)); else if (DECL_SIZE (decl) == 0) /* Variable with incomplete type. */ { rtx x; if (DECL_INITIAL (decl) == 0) /* Error message was already done; now avoid a crash. */ x = gen_rtx_MEM (BLKmode, const0_rtx); else /* An initializer is going to decide the size of this array. Until we know the size, represent its address with a reg. */ x = gen_rtx_MEM (BLKmode, gen_reg_rtx (Pmode)); set_mem_attributes (x, decl, 1); SET_DECL_RTL (decl, x); } else if (use_register_for_decl (decl)) { /* Automatic variable that can go in a register. */ int unsignedp = TYPE_UNSIGNED (type); enum machine_mode reg_mode = promote_mode (type, DECL_MODE (decl), &unsignedp, 0); SET_DECL_RTL (decl, gen_reg_rtx (reg_mode)); /* Note if the object is a user variable. */ if (!DECL_ARTIFICIAL (decl)) { mark_user_reg (DECL_RTL (decl)); /* Trust user variables which have a pointer type to really be pointers. Do not trust compiler generated temporaries as our type system is totally busted as it relates to pointer arithmetic which translates into lots of compiler generated objects with pointer types, but which are not really pointers. */ if (POINTER_TYPE_P (type)) mark_reg_pointer (DECL_RTL (decl), TYPE_ALIGN (TREE_TYPE (TREE_TYPE (decl)))); } maybe_set_unchanging (DECL_RTL (decl), decl); } else if (TREE_CODE (DECL_SIZE_UNIT (decl)) == INTEGER_CST && ! (flag_stack_check && ! STACK_CHECK_BUILTIN && 0 < compare_tree_int (DECL_SIZE_UNIT (decl), STACK_CHECK_MAX_VAR_SIZE))) { /* Variable of fixed size that goes on the stack. */ rtx oldaddr = 0; rtx addr; rtx x; /* If we previously made RTL for this decl, it must be an array whose size was determined by the initializer. The old address was a register; set that register now to the proper address. */ if (DECL_RTL_SET_P (decl)) { if (!MEM_P (DECL_RTL (decl)) || !REG_P (XEXP (DECL_RTL (decl), 0))) abort (); oldaddr = XEXP (DECL_RTL (decl), 0); } /* Set alignment we actually gave this decl. */ DECL_ALIGN (decl) = (DECL_MODE (decl) == BLKmode ? BIGGEST_ALIGNMENT : GET_MODE_BITSIZE (DECL_MODE (decl))); DECL_USER_ALIGN (decl) = 0; x = assign_temp (decl, 1, 1, 1); set_mem_attributes (x, decl, 1); SET_DECL_RTL (decl, x); if (oldaddr) { addr = force_operand (XEXP (DECL_RTL (decl), 0), oldaddr); if (addr != oldaddr) emit_move_insn (oldaddr, addr); } } else /* Dynamic-size object: must push space on the stack. */ { rtx address, size, x; /* Record the stack pointer on entry to block, if have not already done so. */ do_pending_stack_adjust (); save_stack_pointer (); /* Compute the variable's size, in bytes. This will expand any needed SAVE_EXPRs for the first time. */ size = expand_expr (DECL_SIZE_UNIT (decl), NULL_RTX, VOIDmode, 0); free_temp_slots (); /* Allocate space on the stack for the variable. Note that DECL_ALIGN says how the variable is to be aligned and we cannot use it to conclude anything about the alignment of the size. */ address = allocate_dynamic_stack_space (size, NULL_RTX, TYPE_ALIGN (TREE_TYPE (decl))); /* Reference the variable indirect through that rtx. */ x = gen_rtx_MEM (DECL_MODE (decl), address); set_mem_attributes (x, decl, 1); SET_DECL_RTL (decl, x); /* Indicate the alignment we actually gave this variable. */ #ifdef STACK_BOUNDARY DECL_ALIGN (decl) = STACK_BOUNDARY; #else DECL_ALIGN (decl) = BIGGEST_ALIGNMENT; #endif DECL_USER_ALIGN (decl) = 0; } } /* Emit code to allocate T_SIZE bytes of dynamic stack space for ALLOC. */ void expand_stack_alloc (tree alloc, tree t_size) { rtx address, dest, size; tree var, type; if (TREE_CODE (alloc) != ADDR_EXPR) abort (); var = TREE_OPERAND (alloc, 0); if (TREE_CODE (var) != VAR_DECL) abort (); type = TREE_TYPE (var); /* Compute the variable's size, in bytes. */ size = expand_expr (t_size, NULL_RTX, VOIDmode, 0); free_temp_slots (); /* Allocate space on the stack for the variable. */ address = XEXP (DECL_RTL (var), 0); dest = allocate_dynamic_stack_space (size, address, TYPE_ALIGN (type)); if (dest != address) emit_move_insn (address, dest); /* Indicate the alignment we actually gave this variable. */ #ifdef STACK_BOUNDARY DECL_ALIGN (var) = STACK_BOUNDARY; #else DECL_ALIGN (var) = BIGGEST_ALIGNMENT; #endif DECL_USER_ALIGN (var) = 0; } /* Emit code to save the current value of stack. */ rtx expand_stack_save (void) { rtx ret = NULL_RTX; do_pending_stack_adjust (); emit_stack_save (SAVE_BLOCK, &ret, NULL_RTX); return ret; } /* Emit code to restore the current value of stack. */ void expand_stack_restore (tree var) { rtx sa = DECL_RTL (var); emit_stack_restore (SAVE_BLOCK, sa, NULL_RTX); } /* Emit code to perform the initialization of a declaration DECL. */ void expand_decl_init (tree decl) { int was_used = TREE_USED (decl); /* If this is a CONST_DECL, we don't have to generate any code. Likewise for static decls. */ if (TREE_CODE (decl) == CONST_DECL || TREE_STATIC (decl)) return; /* Compute and store the initial value now. */ push_temp_slots (); if (DECL_INITIAL (decl) == error_mark_node) { enum tree_code code = TREE_CODE (TREE_TYPE (decl)); if (code == INTEGER_TYPE || code == REAL_TYPE || code == ENUMERAL_TYPE || code == POINTER_TYPE || code == REFERENCE_TYPE) expand_assignment (decl, convert (TREE_TYPE (decl), integer_zero_node), 0); emit_queue (); } else if (DECL_INITIAL (decl) && TREE_CODE (DECL_INITIAL (decl)) != TREE_LIST) { emit_line_note (DECL_SOURCE_LOCATION (decl)); expand_assignment (decl, DECL_INITIAL (decl), 0); emit_queue (); } /* Don't let the initialization count as "using" the variable. */ TREE_USED (decl) = was_used; /* Free any temporaries we made while initializing the decl. */ preserve_temp_slots (NULL_RTX); free_temp_slots (); pop_temp_slots (); } /* CLEANUP is an expression to be executed at exit from this binding contour; for example, in C++, it might call the destructor for this variable. We wrap CLEANUP in an UNSAVE_EXPR node, so that we can expand the CLEANUP multiple times, and have the correct semantics. This happens in exception handling, for gotos, returns, breaks that leave the current scope. If CLEANUP is nonzero and DECL is zero, we record a cleanup that is not associated with any particular variable. */ int expand_decl_cleanup (tree decl, tree cleanup) { struct nesting *thisblock; /* Error if we are not in any block. */ if (cfun == 0 || block_stack == 0) return 0; thisblock = block_stack; /* Record the cleanup if there is one. */ if (cleanup != 0) { tree t; rtx seq; tree *cleanups = &thisblock->data.block.cleanups; int cond_context = conditional_context (); if (cond_context) { rtx flag = gen_reg_rtx (word_mode); rtx set_flag_0; tree cond; start_sequence (); emit_move_insn (flag, const0_rtx); set_flag_0 = get_insns (); end_sequence (); thisblock->data.block.last_unconditional_cleanup = emit_insn_after (set_flag_0, thisblock->data.block.last_unconditional_cleanup); emit_move_insn (flag, const1_rtx); cond = build_decl (VAR_DECL, NULL_TREE, lang_hooks.types.type_for_mode (word_mode, 1)); SET_DECL_RTL (cond, flag); /* Conditionalize the cleanup. */ cleanup = build (COND_EXPR, void_type_node, lang_hooks.truthvalue_conversion (cond), cleanup, integer_zero_node); cleanup = fold (cleanup); cleanups = &thisblock->data.block.cleanups; } cleanup = unsave_expr (cleanup); t = *cleanups = tree_cons (decl, cleanup, *cleanups); if (! cond_context) /* If this block has a cleanup, it belongs in stack_block_stack. */ stack_block_stack = thisblock; if (cond_context) { start_sequence (); } if (! using_eh_for_cleanups_p) TREE_ADDRESSABLE (t) = 1; else expand_eh_region_start (); if (cond_context) { seq = get_insns (); end_sequence (); if (seq) thisblock->data.block.last_unconditional_cleanup = emit_insn_after (seq, thisblock->data.block.last_unconditional_cleanup); } else { thisblock->data.block.last_unconditional_cleanup = get_last_insn (); /* When we insert instructions after the last unconditional cleanup, we don't adjust last_insn. That means that a later add_insn will clobber the instructions we've just added. The easiest way to fix this is to just insert another instruction here, so that the instructions inserted after the last unconditional cleanup are never the last instruction. */ emit_note (NOTE_INSN_DELETED); } } return 1; } /* Like expand_decl_cleanup, but maybe only run the cleanup if an exception is thrown. */ int expand_decl_cleanup_eh (tree decl, tree cleanup, int eh_only) { int ret = expand_decl_cleanup (decl, cleanup); if (cleanup && ret) { tree node = block_stack->data.block.cleanups; CLEANUP_EH_ONLY (node) = eh_only; } return ret; } /* DECL is an anonymous union. CLEANUP is a cleanup for DECL. DECL_ELTS is the list of elements that belong to DECL's type. In each, the TREE_VALUE is a VAR_DECL, and the TREE_PURPOSE a cleanup. */ void expand_anon_union_decl (tree decl, tree cleanup, tree decl_elts) { struct nesting *thisblock = cfun == 0 ? 0 : block_stack; rtx x; tree t; /* If any of the elements are addressable, so is the entire union. */ for (t = decl_elts; t; t = TREE_CHAIN (t)) if (TREE_ADDRESSABLE (TREE_VALUE (t))) { TREE_ADDRESSABLE (decl) = 1; break; } expand_decl (decl); expand_decl_cleanup (decl, cleanup); x = DECL_RTL (decl); /* Go through the elements, assigning RTL to each. */ for (t = decl_elts; t; t = TREE_CHAIN (t)) { tree decl_elt = TREE_VALUE (t); tree cleanup_elt = TREE_PURPOSE (t); enum machine_mode mode = TYPE_MODE (TREE_TYPE (decl_elt)); /* If any of the elements are addressable, so is the entire union. */ if (TREE_USED (decl_elt)) TREE_USED (decl) = 1; /* Propagate the union's alignment to the elements. */ DECL_ALIGN (decl_elt) = DECL_ALIGN (decl); DECL_USER_ALIGN (decl_elt) = DECL_USER_ALIGN (decl); /* If the element has BLKmode and the union doesn't, the union is aligned such that the element doesn't need to have BLKmode, so change the element's mode to the appropriate one for its size. */ if (mode == BLKmode && DECL_MODE (decl) != BLKmode) DECL_MODE (decl_elt) = mode = mode_for_size_tree (DECL_SIZE (decl_elt), MODE_INT, 1); /* (SUBREG (MEM ...)) at RTL generation time is invalid, so we instead create a new MEM rtx with the proper mode. */ if (MEM_P (x)) { if (mode == GET_MODE (x)) SET_DECL_RTL (decl_elt, x); else SET_DECL_RTL (decl_elt, adjust_address_nv (x, mode, 0)); } else if (REG_P (x)) { if (mode == GET_MODE (x)) SET_DECL_RTL (decl_elt, x); else SET_DECL_RTL (decl_elt, gen_lowpart_SUBREG (mode, x)); } else abort (); /* Record the cleanup if there is one. */ if (cleanup != 0) thisblock->data.block.cleanups = tree_cons (decl_elt, cleanup_elt, thisblock->data.block.cleanups); } } /* Expand a list of cleanups LIST. Elements may be expressions or may be nested lists. If IN_FIXUP is nonzero, we are generating this cleanup for a fixup goto and handle protection regions specially in that case. If REACHABLE, we emit code, otherwise just inform the exception handling code about this finalization. */ static void expand_cleanups (tree list, int in_fixup, int reachable) { tree tail; for (tail = list; tail; tail = TREE_CHAIN (tail)) if (TREE_CODE (TREE_VALUE (tail)) == TREE_LIST) expand_cleanups (TREE_VALUE (tail), in_fixup, reachable); else { if (! in_fixup && using_eh_for_cleanups_p) expand_eh_region_end_cleanup (TREE_VALUE (tail)); if (reachable && !CLEANUP_EH_ONLY (tail)) { /* Cleanups may be run multiple times. For example, when exiting a binding contour, we expand the cleanups associated with that contour. When a goto within that binding contour has a target outside that contour, it will expand all cleanups from its scope to the target. Though the cleanups are expanded multiple times, the control paths are non-overlapping so the cleanups will not be executed twice. */ /* We may need to protect from outer cleanups. */ if (in_fixup && using_eh_for_cleanups_p) { expand_eh_region_start (); expand_expr (TREE_VALUE (tail), const0_rtx, VOIDmode, 0); expand_eh_region_end_fixup (TREE_VALUE (tail)); } else expand_expr (TREE_VALUE (tail), const0_rtx, VOIDmode, 0); free_temp_slots (); } } } /* Mark when the context we are emitting RTL for as a conditional context, so that any cleanup actions we register with expand_decl_init will be properly conditionalized when those cleanup actions are later performed. Must be called before any expression (tree) is expanded that is within a conditional context. */ void start_cleanup_deferral (void) { /* block_stack can be NULL if we are inside the parameter list. It is OK to do nothing, because cleanups aren't possible here. */ if (block_stack) ++block_stack->data.block.conditional_code; } /* Mark the end of a conditional region of code. Because cleanup deferrals may be nested, we may still be in a conditional region after we end the currently deferred cleanups, only after we end all deferred cleanups, are we back in unconditional code. */ void end_cleanup_deferral (void) { /* block_stack can be NULL if we are inside the parameter list. It is OK to do nothing, because cleanups aren't possible here. */ if (block_stack) --block_stack->data.block.conditional_code; } tree last_cleanup_this_contour (void) { if (block_stack == 0) return 0; return block_stack->data.block.cleanups; } /* Return nonzero if any containing block has a stack level or cleanups. */ int containing_blocks_have_cleanups_or_stack_level (void) { struct nesting *block; for (block = block_stack; block; block = block->next) if (block->data.block.stack_level != 0 || block->data.block.cleanups != 0) return 1; return 0; } /* Return 1 if there are any pending cleanups at this point. Check the current contour as well as contours that enclose the current contour. */ int any_pending_cleanups (void) { struct nesting *block; if (cfun == NULL || cfun->stmt == NULL || block_stack == 0) return 0; if (block_stack->data.block.cleanups != NULL) return 1; if (block_stack->data.block.outer_cleanups == 0) return 0; for (block = block_stack->next; block; block = block->next) if (block->data.block.cleanups != 0) return 1; return 0; } /* Enter a case (Pascal) or switch (C) statement. Push a block onto case_stack and nesting_stack to accumulate the case-labels that are seen and to record the labels generated for the statement. EXIT_FLAG is nonzero if `exit_something' should exit this case stmt. Otherwise, this construct is transparent for `exit_something'. EXPR is the index-expression to be dispatched on. TYPE is its nominal type. We could simply convert EXPR to this type, but instead we take short cuts. */ void expand_start_case (int exit_flag, tree expr, tree type, const char *printname) { struct nesting *thiscase = ALLOC_NESTING (); /* Make an entry on case_stack for the case we are entering. */ thiscase->desc = CASE_NESTING; thiscase->next = case_stack; thiscase->all = nesting_stack; thiscase->depth = ++nesting_depth; thiscase->exit_label = exit_flag ? gen_label_rtx () : 0; thiscase->data.case_stmt.case_list = 0; thiscase->data.case_stmt.index_expr = expr; thiscase->data.case_stmt.nominal_type = type; thiscase->data.case_stmt.default_label = 0; thiscase->data.case_stmt.printname = printname; thiscase->data.case_stmt.line_number_status = force_line_numbers (); case_stack = thiscase; nesting_stack = thiscase; do_pending_stack_adjust (); emit_queue (); /* Make sure case_stmt.start points to something that won't need any transformation before expand_end_case. */ if (GET_CODE (get_last_insn ()) != NOTE) emit_note (NOTE_INSN_DELETED); thiscase->data.case_stmt.start = get_last_insn (); start_cleanup_deferral (); } /* Accumulate one case or default label inside a case or switch statement. VALUE is the value of the case (a null pointer, for a default label). The function CONVERTER, when applied to arguments T and V, converts the value V to the type T. If not currently inside a case or switch statement, return 1 and do nothing. The caller will print a language-specific error message. If VALUE is a duplicate or overlaps, return 2 and do nothing except store the (first) duplicate node in *DUPLICATE. If VALUE is out of range, return 3 and do nothing. If we are jumping into the scope of a cleanup or var-sized array, return 5. Return 0 on success. Extended to handle range statements. */ int pushcase (tree value, tree (*converter) (tree, tree), tree label, tree *duplicate) { tree index_type; tree nominal_type; /* Fail if not inside a real case statement. */ if (! (case_stack && case_stack->data.case_stmt.start)) return 1; if (stack_block_stack && stack_block_stack->depth > case_stack->depth) return 5; index_type = TREE_TYPE (case_stack->data.case_stmt.index_expr); nominal_type = case_stack->data.case_stmt.nominal_type; /* If the index is erroneous, avoid more problems: pretend to succeed. */ if (index_type == error_mark_node) return 0; /* Convert VALUE to the type in which the comparisons are nominally done. */ if (value != 0) value = (*converter) (nominal_type, value); /* Fail if this value is out of range for the actual type of the index (which may be narrower than NOMINAL_TYPE). */ if (value != 0 && (TREE_CONSTANT_OVERFLOW (value) || ! int_fits_type_p (value, index_type))) return 3; return add_case_node (value, value, label, duplicate, false); } /* Like pushcase but this case applies to all values between VALUE1 and VALUE2 (inclusive). If VALUE1 is NULL, the range starts at the lowest value of the index type and ends at VALUE2. If VALUE2 is NULL, the range starts at VALUE1 and ends at the highest value of the index type. If both are NULL, this case applies to all values. The return value is the same as that of pushcase but there is one additional error code: 4 means the specified range was empty. */ int pushcase_range (tree value1, tree value2, tree (*converter) (tree, tree), tree label, tree *duplicate) { tree index_type; tree nominal_type; /* Fail if not inside a real case statement. */ if (! (case_stack && case_stack->data.case_stmt.start)) return 1; if (stack_block_stack && stack_block_stack->depth > case_stack->depth) return 5; index_type = TREE_TYPE (case_stack->data.case_stmt.index_expr); nominal_type = case_stack->data.case_stmt.nominal_type; /* If the index is erroneous, avoid more problems: pretend to succeed. */ if (index_type == error_mark_node) return 0; /* Convert VALUEs to type in which the comparisons are nominally done and replace any unspecified value with the corresponding bound. */ if (value1 == 0) value1 = TYPE_MIN_VALUE (index_type); if (value2 == 0) value2 = TYPE_MAX_VALUE (index_type); /* Fail if the range is empty. Do this before any conversion since we want to allow out-of-range empty ranges. */ if (value2 != 0 && tree_int_cst_lt (value2, value1)) return 4; /* If the max was unbounded, use the max of the nominal_type we are converting to. Do this after the < check above to suppress false positives. */ if (value2 == 0) value2 = TYPE_MAX_VALUE (nominal_type); value1 = (*converter) (nominal_type, value1); value2 = (*converter) (nominal_type, value2); /* Fail if these values are out of range. */ if (TREE_CONSTANT_OVERFLOW (value1) || ! int_fits_type_p (value1, index_type)) return 3; if (TREE_CONSTANT_OVERFLOW (value2) || ! int_fits_type_p (value2, index_type)) return 3; return add_case_node (value1, value2, label, duplicate, false); } /* Do the actual insertion of a case label for pushcase and pushcase_range into case_stack->data.case_stmt.case_list. Use an AVL tree to avoid slowdown for large switch statements. */ int add_case_node (tree low, tree high, tree label, tree *duplicate, bool dont_expand_label) { struct case_node *p, **q, *r; /* If there's no HIGH value, then this is not a case range; it's just a simple case label. But that's just a degenerate case range. */ if (!high) high = low; /* Handle default labels specially. */ if (!high && !low) { if (case_stack->data.case_stmt.default_label != 0) { *duplicate = case_stack->data.case_stmt.default_label; return 2; } case_stack->data.case_stmt.default_label = label; if (!dont_expand_label) expand_label (label); return 0; } q = &case_stack->data.case_stmt.case_list; p = *q; while ((r = *q)) { p = r; /* Keep going past elements distinctly greater than HIGH. */ if (tree_int_cst_lt (high, p->low)) q = &p->left; /* or distinctly less than LOW. */ else if (tree_int_cst_lt (p->high, low)) q = &p->right; else { /* We have an overlap; this is an error. */ *duplicate = p->code_label; return 2; } } /* Add this label to the chain, and succeed. */ r = ggc_alloc (sizeof (struct case_node)); r->low = low; /* If the bounds are equal, turn this into the one-value case. */ if (tree_int_cst_equal (low, high)) r->high = r->low; else r->high = high; r->code_label = label; if (!dont_expand_label) expand_label (label); *q = r; r->parent = p; r->left = 0; r->right = 0; r->balance = 0; while (p) { struct case_node *s; if (r == p->left) { int b; if (! (b = p->balance)) /* Growth propagation from left side. */ p->balance = -1; else if (b < 0) { if (r->balance < 0) { /* R-Rotation */ if ((p->left = s = r->right)) s->parent = p; r->right = p; p->balance = 0; r->balance = 0; s = p->parent; p->parent = r; if ((r->parent = s)) { if (s->left == p) s->left = r; else s->right = r; } else case_stack->data.case_stmt.case_list = r; } else /* r->balance == +1 */ { /* LR-Rotation */ int b2; struct case_node *t = r->right; if ((p->left = s = t->right)) s->parent = p; t->right = p; if ((r->right = s = t->left)) s->parent = r; t->left = r; b = t->balance; b2 = b < 0; p->balance = b2; b2 = -b2 - b; r->balance = b2; t->balance = 0; s = p->parent; p->parent = t; r->parent = t; if ((t->parent = s)) { if (s->left == p) s->left = t; else s->right = t; } else case_stack->data.case_stmt.case_list = t; } break; } else { /* p->balance == +1; growth of left side balances the node. */ p->balance = 0; break; } } else /* r == p->right */ { int b; if (! (b = p->balance)) /* Growth propagation from right side. */ p->balance++; else if (b > 0) { if (r->balance > 0) { /* L-Rotation */ if ((p->right = s = r->left)) s->parent = p; r->left = p; p->balance = 0; r->balance = 0; s = p->parent; p->parent = r; if ((r->parent = s)) { if (s->left == p) s->left = r; else s->right = r; } else case_stack->data.case_stmt.case_list = r; } else /* r->balance == -1 */ { /* RL-Rotation */ int b2; struct case_node *t = r->left; if ((p->right = s = t->left)) s->parent = p; t->left = p; if ((r->left = s = t->right)) s->parent = r; t->right = r; b = t->balance; b2 = b < 0; r->balance = b2; b2 = -b2 - b; p->balance = b2; t->balance = 0; s = p->parent; p->parent = t; r->parent = t; if ((t->parent = s)) { if (s->left == p) s->left = t; else s->right = t; } else case_stack->data.case_stmt.case_list = t; } break; } else { /* p->balance == -1; growth of right side balances the node. */ p->balance = 0; break; } } r = p; p = p->parent; } return 0; } /* Maximum number of case bit tests. */ #define MAX_CASE_BIT_TESTS 3 /* By default, enable case bit tests on targets with ashlsi3. */ #ifndef CASE_USE_BIT_TESTS #define CASE_USE_BIT_TESTS (ashl_optab->handlers[word_mode].insn_code \ != CODE_FOR_nothing) #endif /* A case_bit_test represents a set of case nodes that may be selected from using a bit-wise comparison. HI and LO hold the integer to be tested against, LABEL contains the label to jump to upon success and BITS counts the number of case nodes handled by this test, typically the number of bits set in HI:LO. */ struct case_bit_test { HOST_WIDE_INT hi; HOST_WIDE_INT lo; rtx label; int bits; }; /* Determine whether "1 << x" is relatively cheap in word_mode. */ static bool lshift_cheap_p (void) { static bool init = false; static bool cheap = true; if (!init) { rtx reg = gen_rtx_REG (word_mode, 10000); int cost = rtx_cost (gen_rtx_ASHIFT (word_mode, const1_rtx, reg), SET); cheap = cost < COSTS_N_INSNS (3); init = true; } return cheap; } /* Comparison function for qsort to order bit tests by decreasing number of case nodes, i.e. the node with the most cases gets tested first. */ static int case_bit_test_cmp (const void *p1, const void *p2) { const struct case_bit_test *d1 = p1; const struct case_bit_test *d2 = p2; return d2->bits - d1->bits; } /* Expand a switch statement by a short sequence of bit-wise comparisons. "switch(x)" is effectively converted into "if ((1 << (x-MINVAL)) & CST)" where CST and MINVAL are integer constants. INDEX_EXPR is the value being switched on, which is of type INDEX_TYPE. MINVAL is the lowest case value of in the case nodes, of INDEX_TYPE type, and RANGE is highest value minus MINVAL, also of type INDEX_TYPE. NODES is the set of case nodes, and DEFAULT_LABEL is the label to branch to should none of the cases match. There *MUST* be MAX_CASE_BIT_TESTS or less unique case node targets. */ static void emit_case_bit_tests (tree index_type, tree index_expr, tree minval, tree range, case_node_ptr nodes, rtx default_label) { struct case_bit_test test[MAX_CASE_BIT_TESTS]; enum machine_mode mode; rtx expr, index, label; unsigned int i,j,lo,hi; struct case_node *n; unsigned int count; count = 0; for (n = nodes; n; n = n->right) { label = label_rtx (n->code_label); for (i = 0; i < count; i++) if (same_case_target_p (label, test[i].label)) break; if (i == count) { if (count >= MAX_CASE_BIT_TESTS) abort (); test[i].hi = 0; test[i].lo = 0; test[i].label = label; test[i].bits = 1; count++; } else test[i].bits++; lo = tree_low_cst (fold (build (MINUS_EXPR, index_type, n->low, minval)), 1); hi = tree_low_cst (fold (build (MINUS_EXPR, index_type, n->high, minval)), 1); for (j = lo; j <= hi; j++) if (j >= HOST_BITS_PER_WIDE_INT) test[i].hi |= (HOST_WIDE_INT) 1 << (j - HOST_BITS_PER_INT); else test[i].lo |= (HOST_WIDE_INT) 1 << j; } qsort (test, count, sizeof(*test), case_bit_test_cmp); index_expr = fold (build (MINUS_EXPR, index_type, convert (index_type, index_expr), convert (index_type, minval))); index = expand_expr (index_expr, NULL_RTX, VOIDmode, 0); emit_queue (); index = protect_from_queue (index, 0); do_pending_stack_adjust (); mode = TYPE_MODE (index_type); expr = expand_expr (range, NULL_RTX, VOIDmode, 0); emit_cmp_and_jump_insns (index, expr, GTU, NULL_RTX, mode, 1, default_label); index = convert_to_mode (word_mode, index, 0); index = expand_binop (word_mode, ashl_optab, const1_rtx, index, NULL_RTX, 1, OPTAB_WIDEN); for (i = 0; i < count; i++) { expr = immed_double_const (test[i].lo, test[i].hi, word_mode); expr = expand_binop (word_mode, and_optab, index, expr, NULL_RTX, 1, OPTAB_WIDEN); emit_cmp_and_jump_insns (expr, const0_rtx, NE, NULL_RTX, word_mode, 1, test[i].label); } emit_jump (default_label); } #ifndef HAVE_casesi #define HAVE_casesi 0 #endif #ifndef HAVE_tablejump #define HAVE_tablejump 0 #endif /* Terminate a case (Pascal) or switch (C) statement in which ORIG_INDEX is the expression to be tested. If ORIG_TYPE is not NULL, it is the original ORIG_INDEX type as given in the source before any compiler conversions. Generate the code to test it and jump to the right place. */ void expand_end_case_type (tree orig_index, tree orig_type) { tree minval = NULL_TREE, maxval = NULL_TREE, range = NULL_TREE; rtx default_label = 0; struct case_node *n, *m; unsigned int count, uniq; rtx index; rtx table_label; int ncases; rtx *labelvec; int i; rtx before_case, end, lab; struct nesting *thiscase = case_stack; tree index_expr, index_type; bool exit_done = false; int unsignedp; /* Don't crash due to previous errors. */ if (thiscase == NULL) return; index_expr = thiscase->data.case_stmt.index_expr; index_type = TREE_TYPE (index_expr); unsignedp = TYPE_UNSIGNED (index_type); if (orig_type == NULL) orig_type = TREE_TYPE (orig_index); do_pending_stack_adjust (); /* An ERROR_MARK occurs for various reasons including invalid data type. */ if (index_type != error_mark_node) { /* If we don't have a default-label, create one here, after the body of the switch. */ if (thiscase->data.case_stmt.default_label == 0) { thiscase->data.case_stmt.default_label = build_decl (LABEL_DECL, NULL_TREE, NULL_TREE); /* Share the exit label if possible. */ if (thiscase->exit_label) { SET_DECL_RTL (thiscase->data.case_stmt.default_label, thiscase->exit_label); exit_done = true; } expand_label (thiscase->data.case_stmt.default_label); } default_label = label_rtx (thiscase->data.case_stmt.default_label); before_case = get_last_insn (); if (thiscase->data.case_stmt.case_list && thiscase->data.case_stmt.case_list->left) thiscase->data.case_stmt.case_list = case_tree2list (thiscase->data.case_stmt.case_list, 0); /* Simplify the case-list before we count it. */ group_case_nodes (thiscase->data.case_stmt.case_list); strip_default_case_nodes (&thiscase->data.case_stmt.case_list, default_label); /* Get upper and lower bounds of case values. Also convert all the case values to the index expr's data type. */ uniq = 0; count = 0; for (n = thiscase->data.case_stmt.case_list; n; n = n->right) { /* Check low and high label values are integers. */ if (TREE_CODE (n->low) != INTEGER_CST) abort (); if (TREE_CODE (n->high) != INTEGER_CST) abort (); n->low = convert (index_type, n->low); n->high = convert (index_type, n->high); /* Count the elements and track the largest and smallest of them (treating them as signed even if they are not). */ if (count++ == 0) { minval = n->low; maxval = n->high; } else { if (INT_CST_LT (n->low, minval)) minval = n->low; if (INT_CST_LT (maxval, n->high)) maxval = n->high; } /* A range counts double, since it requires two compares. */ if (! tree_int_cst_equal (n->low, n->high)) count++; /* Count the number of unique case node targets. */ uniq++; lab = label_rtx (n->code_label); for (m = thiscase->data.case_stmt.case_list; m != n; m = m->right) if (same_case_target_p (label_rtx (m->code_label), lab)) { uniq--; break; } } /* Compute span of values. */ if (count != 0) range = fold (build (MINUS_EXPR, index_type, maxval, minval)); end_cleanup_deferral (); if (count == 0) { expand_expr (index_expr, const0_rtx, VOIDmode, 0); emit_queue (); emit_jump (default_label); } /* Try implementing this switch statement by a short sequence of bit-wise comparisons. However, we let the binary-tree case below handle constant index expressions. */ else if (CASE_USE_BIT_TESTS && ! TREE_CONSTANT (index_expr) && compare_tree_int (range, GET_MODE_BITSIZE (word_mode)) < 0 && compare_tree_int (range, 0) > 0 && lshift_cheap_p () && ((uniq == 1 && count >= 3) || (uniq == 2 && count >= 5) || (uniq == 3 && count >= 6))) { /* Optimize the case where all the case values fit in a word without having to subtract MINVAL. In this case, we can optimize away the subtraction. */ if (compare_tree_int (minval, 0) > 0 && compare_tree_int (maxval, GET_MODE_BITSIZE (word_mode)) < 0) { minval = integer_zero_node; range = maxval; } emit_case_bit_tests (index_type, index_expr, minval, range, thiscase->data.case_stmt.case_list, default_label); } /* If range of values is much bigger than number of values, make a sequence of conditional branches instead of a dispatch. If the switch-index is a constant, do it this way because we can optimize it. */ else if (count < case_values_threshold () || compare_tree_int (range, (optimize_size ? 3 : 10) * count) > 0 /* RANGE may be signed, and really large ranges will show up as negative numbers. */ || compare_tree_int (range, 0) < 0 #ifndef ASM_OUTPUT_ADDR_DIFF_ELT || flag_pic #endif || TREE_CONSTANT (index_expr) /* If neither casesi or tablejump is available, we can only go this way. */ || (!HAVE_casesi && !HAVE_tablejump)) { index = expand_expr (index_expr, NULL_RTX, VOIDmode, 0); /* If the index is a short or char that we do not have an insn to handle comparisons directly, convert it to a full integer now, rather than letting each comparison generate the conversion. */ if (GET_MODE_CLASS (GET_MODE (index)) == MODE_INT && ! have_insn_for (COMPARE, GET_MODE (index))) { enum machine_mode wider_mode; for (wider_mode = GET_MODE (index); wider_mode != VOIDmode; wider_mode = GET_MODE_WIDER_MODE (wider_mode)) if (have_insn_for (COMPARE, wider_mode)) { index = convert_to_mode (wider_mode, index, unsignedp); break; } } emit_queue (); do_pending_stack_adjust (); index = protect_from_queue (index, 0); if (MEM_P (index)) index = copy_to_reg (index); if (GET_CODE (index) == CONST_INT || TREE_CODE (index_expr) == INTEGER_CST) { /* Make a tree node with the proper constant value if we don't already have one. */ if (TREE_CODE (index_expr) != INTEGER_CST) { index_expr = build_int_2 (INTVAL (index), unsignedp || INTVAL (index) >= 0 ? 0 : -1); index_expr = convert (index_type, index_expr); } /* For constant index expressions we need only issue an unconditional branch to the appropriate target code. The job of removing any unreachable code is left to the optimization phase if the "-O" option is specified. */ for (n = thiscase->data.case_stmt.case_list; n; n = n->right) if (! tree_int_cst_lt (index_expr, n->low) && ! tree_int_cst_lt (n->high, index_expr)) break; if (n) emit_jump (label_rtx (n->code_label)); else emit_jump (default_label); } else { /* If the index expression is not constant we generate a binary decision tree to select the appropriate target code. This is done as follows: The list of cases is rearranged into a binary tree, nearly optimal assuming equal probability for each case. The tree is transformed into RTL, eliminating redundant test conditions at the same time. If program flow could reach the end of the decision tree an unconditional jump to the default code is emitted. */ use_cost_table = (TREE_CODE (orig_type) != ENUMERAL_TYPE && estimate_case_costs (thiscase->data.case_stmt.case_list)); balance_case_nodes (&thiscase->data.case_stmt.case_list, NULL); emit_case_nodes (index, thiscase->data.case_stmt.case_list, default_label, index_type); emit_jump_if_reachable (default_label); } } else { table_label = gen_label_rtx (); if (! try_casesi (index_type, index_expr, minval, range, table_label, default_label)) { index_type = thiscase->data.case_stmt.nominal_type; /* Index jumptables from zero for suitable values of minval to avoid a subtraction. */ if (! optimize_size && compare_tree_int (minval, 0) > 0 && compare_tree_int (minval, 3) < 0) { minval = integer_zero_node; range = maxval; } if (! try_tablejump (index_type, index_expr, minval, range, table_label, default_label)) abort (); } /* Get table of labels to jump to, in order of case index. */ ncases = tree_low_cst (range, 0) + 1; labelvec = alloca (ncases * sizeof (rtx)); memset (labelvec, 0, ncases * sizeof (rtx)); for (n = thiscase->data.case_stmt.case_list; n; n = n->right) { /* Compute the low and high bounds relative to the minimum value since that should fit in a HOST_WIDE_INT while the actual values may not. */ HOST_WIDE_INT i_low = tree_low_cst (fold (build (MINUS_EXPR, index_type, n->low, minval)), 1); HOST_WIDE_INT i_high = tree_low_cst (fold (build (MINUS_EXPR, index_type, n->high, minval)), 1); HOST_WIDE_INT i; for (i = i_low; i <= i_high; i ++) labelvec[i] = gen_rtx_LABEL_REF (Pmode, label_rtx (n->code_label)); } /* Fill in the gaps with the default. */ for (i = 0; i < ncases; i++) if (labelvec[i] == 0) labelvec[i] = gen_rtx_LABEL_REF (Pmode, default_label); /* Output the table. */ emit_label (table_label); if (CASE_VECTOR_PC_RELATIVE || flag_pic) emit_jump_insn (gen_rtx_ADDR_DIFF_VEC (CASE_VECTOR_MODE, gen_rtx_LABEL_REF (Pmode, table_label), gen_rtvec_v (ncases, labelvec), const0_rtx, const0_rtx)); else emit_jump_insn (gen_rtx_ADDR_VEC (CASE_VECTOR_MODE, gen_rtvec_v (ncases, labelvec))); /* If the case insn drops through the table, after the table we must jump to the default-label. Otherwise record no drop-through after the table. */ #ifdef CASE_DROPS_THROUGH emit_jump (default_label); #else emit_barrier (); #endif } before_case = NEXT_INSN (before_case); end = get_last_insn (); if (squeeze_notes (&before_case, &end)) abort (); reorder_insns (before_case, end, thiscase->data.case_stmt.start); } else end_cleanup_deferral (); if (thiscase->exit_label && !exit_done) emit_label (thiscase->exit_label); POPSTACK (case_stack); free_temp_slots (); } /* Convert the tree NODE into a list linked by the right field, with the left field zeroed. RIGHT is used for recursion; it is a list to be placed rightmost in the resulting list. */ static struct case_node * case_tree2list (struct case_node *node, struct case_node *right) { struct case_node *left; if (node->right) right = case_tree2list (node->right, right); node->right = right; if ((left = node->left)) { node->left = 0; return case_tree2list (left, node); } return node; } /* Generate code to jump to LABEL if OP1 and OP2 are equal. */ static void do_jump_if_equal (rtx op1, rtx op2, rtx label, int unsignedp) { if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT) { if (op1 == op2) emit_jump (label); } else emit_cmp_and_jump_insns (op1, op2, EQ, NULL_RTX, (GET_MODE (op1) == VOIDmode ? GET_MODE (op2) : GET_MODE (op1)), unsignedp, label); } /* Not all case values are encountered equally. This function uses a heuristic to weight case labels, in cases where that looks like a reasonable thing to do. Right now, all we try to guess is text, and we establish the following weights: chars above space: 16 digits: 16 default: 12 space, punct: 8 tab: 4 newline: 2 other "\" chars: 1 remaining chars: 0 If we find any cases in the switch that are not either -1 or in the range of valid ASCII characters, or are control characters other than those commonly used with "\", don't treat this switch scanning text. Return 1 if these nodes are suitable for cost estimation, otherwise return 0. */ static int estimate_case_costs (case_node_ptr node) { tree min_ascii = integer_minus_one_node; tree max_ascii = convert (TREE_TYPE (node->high), build_int_2 (127, 0)); case_node_ptr n; int i; /* If we haven't already made the cost table, make it now. Note that the lower bound of the table is -1, not zero. */ if (! cost_table_initialized) { cost_table_initialized = 1; for (i = 0; i < 128; i++) { if (ISALNUM (i)) COST_TABLE (i) = 16; else if (ISPUNCT (i)) COST_TABLE (i) = 8; else if (ISCNTRL (i)) COST_TABLE (i) = -1; } COST_TABLE (' ') = 8; COST_TABLE ('\t') = 4; COST_TABLE ('\0') = 4; COST_TABLE ('\n') = 2; COST_TABLE ('\f') = 1; COST_TABLE ('\v') = 1; COST_TABLE ('\b') = 1; } /* See if all the case expressions look like text. It is text if the constant is >= -1 and the highest constant is <= 127. Do all comparisons as signed arithmetic since we don't want to ever access cost_table with a value less than -1. Also check that none of the constants in a range are strange control characters. */ for (n = node; n; n = n->right) { if ((INT_CST_LT (n->low, min_ascii)) || INT_CST_LT (max_ascii, n->high)) return 0; for (i = (HOST_WIDE_INT) TREE_INT_CST_LOW (n->low); i <= (HOST_WIDE_INT) TREE_INT_CST_LOW (n->high); i++) if (COST_TABLE (i) < 0) return 0; } /* All interesting values are within the range of interesting ASCII characters. */ return 1; } /* Determine whether two case labels branch to the same target. */ static bool same_case_target_p (rtx l1, rtx l2) { #if 0 rtx i1, i2; if (l1 == l2) return true; i1 = next_real_insn (l1); i2 = next_real_insn (l2); if (i1 == i2) return true; if (i1 && simplejump_p (i1)) { l1 = XEXP (SET_SRC (PATTERN (i1)), 0); } if (i2 && simplejump_p (i2)) { l2 = XEXP (SET_SRC (PATTERN (i2)), 0); } #endif /* When coming from gimple, we usually won't have emitted either the labels or the body of the switch statement. The job being done here should be done via jump threading at the tree level. Cases that go the same place should have the same label. */ return l1 == l2; } /* Delete nodes that branch to the default label from a list of case nodes. Eg. case 5: default: becomes just default: */ static void strip_default_case_nodes (case_node_ptr *prev, rtx deflab) { case_node_ptr ptr; while (*prev) { ptr = *prev; if (same_case_target_p (label_rtx (ptr->code_label), deflab)) *prev = ptr->right; else prev = &ptr->right; } } /* Scan an ordered list of case nodes combining those with consecutive values or ranges. Eg. three separate entries 1: 2: 3: become one entry 1..3: */ static void group_case_nodes (case_node_ptr head) { case_node_ptr node = head; while (node) { rtx lab; case_node_ptr np = node; lab = label_rtx (node->code_label); /* Try to group the successors of NODE with NODE. */ while (((np = np->right) != 0) /* Do they jump to the same place? */ && same_case_target_p (label_rtx (np->code_label), lab) /* Are their ranges consecutive? */ && tree_int_cst_equal (np->low, fold (build (PLUS_EXPR, TREE_TYPE (node->high), node->high, integer_one_node))) /* An overflow is not consecutive. */ && tree_int_cst_lt (node->high, fold (build (PLUS_EXPR, TREE_TYPE (node->high), node->high, integer_one_node)))) { node->high = np->high; } /* NP is the first node after NODE which can't be grouped with it. Delete the nodes in between, and move on to that node. */ node->right = np; node = np; } } /* Take an ordered list of case nodes and transform them into a near optimal binary tree, on the assumption that any target code selection value is as likely as any other. The transformation is performed by splitting the ordered list into two equal sections plus a pivot. The parts are then attached to the pivot as left and right branches. Each branch is then transformed recursively. */ static void balance_case_nodes (case_node_ptr *head, case_node_ptr parent) { case_node_ptr np; np = *head; if (np) { int cost = 0; int i = 0; int ranges = 0; case_node_ptr *npp; case_node_ptr left; /* Count the number of entries on branch. Also count the ranges. */ while (np) { if (!tree_int_cst_equal (np->low, np->high)) { ranges++; if (use_cost_table) cost += COST_TABLE (TREE_INT_CST_LOW (np->high)); } if (use_cost_table) cost += COST_TABLE (TREE_INT_CST_LOW (np->low)); i++; np = np->right; } if (i > 2) { /* Split this list if it is long enough for that to help. */ npp = head; left = *npp; if (use_cost_table) { /* Find the place in the list that bisects the list's total cost, Here I gets half the total cost. */ int n_moved = 0; i = (cost + 1) / 2; while (1) { /* Skip nodes while their cost does not reach that amount. */ if (!tree_int_cst_equal ((*npp)->low, (*npp)->high)) i -= COST_TABLE (TREE_INT_CST_LOW ((*npp)->high)); i -= COST_TABLE (TREE_INT_CST_LOW ((*npp)->low)); if (i <= 0) break; npp = &(*npp)->right; n_moved += 1; } if (n_moved == 0) { /* Leave this branch lopsided, but optimize left-hand side and fill in `parent' fields for right-hand side. */ np = *head; np->parent = parent; balance_case_nodes (&np->left, np); for (; np->right; np = np->right) np->right->parent = np; return; } } /* If there are just three nodes, split at the middle one. */ else if (i == 3) npp = &(*npp)->right; else { /* Find the place in the list that bisects the list's total cost, where ranges count as 2. Here I gets half the total cost. */ i = (i + ranges + 1) / 2; while (1) { /* Skip nodes while their cost does not reach that amount. */ if (!tree_int_cst_equal ((*npp)->low, (*npp)->high)) i--; i--; if (i <= 0) break; npp = &(*npp)->right; } } *head = np = *npp; *npp = 0; np->parent = parent; np->left = left; /* Optimize each of the two split parts. */ balance_case_nodes (&np->left, np); balance_case_nodes (&np->right, np); } else { /* Else leave this branch as one level, but fill in `parent' fields. */ np = *head; np->parent = parent; for (; np->right; np = np->right) np->right->parent = np; } } } /* Search the parent sections of the case node tree to see if a test for the lower bound of NODE would be redundant. INDEX_TYPE is the type of the index expression. The instructions to generate the case decision tree are output in the same order as nodes are processed so it is known that if a parent node checks the range of the current node minus one that the current node is bounded at its lower span. Thus the test would be redundant. */ static int node_has_low_bound (case_node_ptr node, tree index_type) { tree low_minus_one; case_node_ptr pnode; /* If the lower bound of this node is the lowest value in the index type, we need not test it. */ if (tree_int_cst_equal (node->low, TYPE_MIN_VALUE (index_type))) return 1; /* If this node has a left branch, the value at the left must be less than that at this node, so it cannot be bounded at the bottom and we need not bother testing any further. */ if (node->left) return 0; low_minus_one = fold (build (MINUS_EXPR, TREE_TYPE (node->low), node->low, integer_one_node)); /* If the subtraction above overflowed, we can't verify anything. Otherwise, look for a parent that tests our value - 1. */ if (! tree_int_cst_lt (low_minus_one, node->low)) return 0; for (pnode = node->parent; pnode; pnode = pnode->parent) if (tree_int_cst_equal (low_minus_one, pnode->high)) return 1; return 0; } /* Search the parent sections of the case node tree to see if a test for the upper bound of NODE would be redundant. INDEX_TYPE is the type of the index expression. The instructions to generate the case decision tree are output in the same order as nodes are processed so it is known that if a parent node checks the range of the current node plus one that the current node is bounded at its upper span. Thus the test would be redundant. */ static int node_has_high_bound (case_node_ptr node, tree index_type) { tree high_plus_one; case_node_ptr pnode; /* If there is no upper bound, obviously no test is needed. */ if (TYPE_MAX_VALUE (index_type) == NULL) return 1; /* If the upper bound of this node is the highest value in the type of the index expression, we need not test against it. */ if (tree_int_cst_equal (node->high, TYPE_MAX_VALUE (index_type))) return 1; /* If this node has a right branch, the value at the right must be greater than that at this node, so it cannot be bounded at the top and we need not bother testing any further. */ if (node->right) return 0; high_plus_one = fold (build (PLUS_EXPR, TREE_TYPE (node->high), node->high, integer_one_node)); /* If the addition above overflowed, we can't verify anything. Otherwise, look for a parent that tests our value + 1. */ if (! tree_int_cst_lt (node->high, high_plus_one)) return 0; for (pnode = node->parent; pnode; pnode = pnode->parent) if (tree_int_cst_equal (high_plus_one, pnode->low)) return 1; return 0; } /* Search the parent sections of the case node tree to see if both tests for the upper and lower bounds of NODE would be redundant. */ static int node_is_bounded (case_node_ptr node, tree index_type) { return (node_has_low_bound (node, index_type) && node_has_high_bound (node, index_type)); } /* Emit an unconditional jump to LABEL unless it would be dead code. */ static void emit_jump_if_reachable (rtx label) { if (GET_CODE (get_last_insn ()) != BARRIER) emit_jump (label); } /* Emit step-by-step code to select a case for the value of INDEX. The thus generated decision tree follows the form of the case-node binary tree NODE, whose nodes represent test conditions. INDEX_TYPE is the type of the index of the switch. Care is taken to prune redundant tests from the decision tree by detecting any boundary conditions already checked by emitted rtx. (See node_has_high_bound, node_has_low_bound and node_is_bounded, above.) Where the test conditions can be shown to be redundant we emit an unconditional jump to the target code. As a further optimization, the subordinates of a tree node are examined to check for bounded nodes. In this case conditional and/or unconditional jumps as a result of the boundary check for the current node are arranged to target the subordinates associated code for out of bound conditions on the current node. We can assume that when control reaches the code generated here, the index value has already been compared with the parents of this node, and determined to be on the same side of each parent as this node is. Thus, if this node tests for the value 51, and a parent tested for 52, we don't need to consider the possibility of a value greater than 51. If another parent tests for the value 50, then this node need not test anything. */ static void emit_case_nodes (rtx index, case_node_ptr node, rtx default_label, tree index_type) { /* If INDEX has an unsigned type, we must make unsigned branches. */ int unsignedp = TYPE_UNSIGNED (index_type); enum machine_mode mode = GET_MODE (index); enum machine_mode imode = TYPE_MODE (index_type); /* See if our parents have already tested everything for us. If they have, emit an unconditional jump for this node. */ if (node_is_bounded (node, index_type)) emit_jump (label_rtx (node->code_label)); else if (tree_int_cst_equal (node->low, node->high)) { /* Node is single valued. First see if the index expression matches this node and then check our children, if any. */ do_jump_if_equal (index, convert_modes (mode, imode, expand_expr (node->low, NULL_RTX, VOIDmode, 0), unsignedp), label_rtx (node->code_label), unsignedp); if (node->right != 0 && node->left != 0) { /* This node has children on both sides. Dispatch to one side or the other by comparing the index value with this node's value. If one subtree is bounded, check that one first, so we can avoid real branches in the tree. */ if (node_is_bounded (node->right, index_type)) { emit_cmp_and_jump_insns (index, convert_modes (mode, imode, expand_expr (node->high, NULL_RTX, VOIDmode, 0), unsignedp), GT, NULL_RTX, mode, unsignedp, label_rtx (node->right->code_label)); emit_case_nodes (index, node->left, default_label, index_type); } else if (node_is_bounded (node->left, index_type)) { emit_cmp_and_jump_insns (index, convert_modes (mode, imode, expand_expr (node->high, NULL_RTX, VOIDmode, 0), unsignedp), LT, NULL_RTX, mode, unsignedp, label_rtx (node->left->code_label)); emit_case_nodes (index, node->right, default_label, index_type); } /* If both children are single-valued cases with no children, finish up all the work. This way, we can save one ordered comparison. */ else if (tree_int_cst_equal (node->right->low, node->right->high) && node->right->left == 0 && node->right->right == 0 && tree_int_cst_equal (node->left->low, node->left->high) && node->left->left == 0 && node->left->right == 0) { /* Neither node is bounded. First distinguish the two sides; then emit the code for one side at a time. */ /* See if the value matches what the right hand side wants. */ do_jump_if_equal (index, convert_modes (mode, imode, expand_expr (node->right->low, NULL_RTX, VOIDmode, 0), unsignedp), label_rtx (node->right->code_label), unsignedp); /* See if the value matches what the left hand side wants. */ do_jump_if_equal (index, convert_modes (mode, imode, expand_expr (node->left->low, NULL_RTX, VOIDmode, 0), unsignedp), label_rtx (node->left->code_label), unsignedp); } else { /* Neither node is bounded. First distinguish the two sides; then emit the code for one side at a time. */ tree test_label = build_decl (LABEL_DECL, NULL_TREE, NULL_TREE); /* See if the value is on the right. */ emit_cmp_and_jump_insns (index, convert_modes (mode, imode, expand_expr (node->high, NULL_RTX, VOIDmode, 0), unsignedp), GT, NULL_RTX, mode, unsignedp, label_rtx (test_label)); /* Value must be on the left. Handle the left-hand subtree. */ emit_case_nodes (index, node->left, default_label, index_type); /* If left-hand subtree does nothing, go to default. */ emit_jump_if_reachable (default_label); /* Code branches here for the right-hand subtree. */ expand_label (test_label); emit_case_nodes (index, node->right, default_label, index_type); } } else if (node->right != 0 && node->left == 0) { /* Here we have a right child but no left so we issue conditional branch to default and process the right child. Omit the conditional branch to default if we it avoid only one right child; it costs too much space to save so little time. */ if (node->right->right || node->right->left || !tree_int_cst_equal (node->right->low, node->right->high)) { if (!node_has_low_bound (node, index_type)) { emit_cmp_and_jump_insns (index, convert_modes (mode, imode, expand_expr (node->high, NULL_RTX, VOIDmode, 0), unsignedp), LT, NULL_RTX, mode, unsignedp, default_label); } emit_case_nodes (index, node->right, default_label, index_type); } else /* We cannot process node->right normally since we haven't ruled out the numbers less than this node's value. So handle node->right explicitly. */ do_jump_if_equal (index, convert_modes (mode, imode, expand_expr (node->right->low, NULL_RTX, VOIDmode, 0), unsignedp), label_rtx (node->right->code_label), unsignedp); } else if (node->right == 0 && node->left != 0) { /* Just one subtree, on the left. */ if (node->left->left || node->left->right || !tree_int_cst_equal (node->left->low, node->left->high)) { if (!node_has_high_bound (node, index_type)) { emit_cmp_and_jump_insns (index, convert_modes (mode, imode, expand_expr (node->high, NULL_RTX, VOIDmode, 0), unsignedp), GT, NULL_RTX, mode, unsignedp, default_label); } emit_case_nodes (index, node->left, default_label, index_type); } else /* We cannot process node->left normally since we haven't ruled out the numbers less than this node's value. So handle node->left explicitly. */ do_jump_if_equal (index, convert_modes (mode, imode, expand_expr (node->left->low, NULL_RTX, VOIDmode, 0), unsignedp), label_rtx (node->left->code_label), unsignedp); } } else { /* Node is a range. These cases are very similar to those for a single value, except that we do not start by testing whether this node is the one to branch to. */ if (node->right != 0 && node->left != 0) { /* Node has subtrees on both sides. If the right-hand subtree is bounded, test for it first, since we can go straight there. Otherwise, we need to make a branch in the control structure, then handle the two subtrees. */ tree test_label = 0; if (node_is_bounded (node->right, index_type)) /* Right hand node is fully bounded so we can eliminate any testing and branch directly to the target code. */ emit_cmp_and_jump_insns (index, convert_modes (mode, imode, expand_expr (node->high, NULL_RTX, VOIDmode, 0), unsignedp), GT, NULL_RTX, mode, unsignedp, label_rtx (node->right->code_label)); else { /* Right hand node requires testing. Branch to a label where we will handle it later. */ test_label = build_decl (LABEL_DECL, NULL_TREE, NULL_TREE); emit_cmp_and_jump_insns (index, convert_modes (mode, imode, expand_expr (node->high, NULL_RTX, VOIDmode, 0), unsignedp), GT, NULL_RTX, mode, unsignedp, label_rtx (test_label)); } /* Value belongs to this node or to the left-hand subtree. */ emit_cmp_and_jump_insns (index, convert_modes (mode, imode, expand_expr (node->low, NULL_RTX, VOIDmode, 0), unsignedp), GE, NULL_RTX, mode, unsignedp, label_rtx (node->code_label)); /* Handle the left-hand subtree. */ emit_case_nodes (index, node->left, default_label, index_type); /* If right node had to be handled later, do that now. */ if (test_label) { /* If the left-hand subtree fell through, don't let it fall into the right-hand subtree. */ emit_jump_if_reachable (default_label); expand_label (test_label); emit_case_nodes (index, node->right, default_label, index_type); } } else if (node->right != 0 && node->left == 0) { /* Deal with values to the left of this node, if they are possible. */ if (!node_has_low_bound (node, index_type)) { emit_cmp_and_jump_insns (index, convert_modes (mode, imode, expand_expr (node->low, NULL_RTX, VOIDmode, 0), unsignedp), LT, NULL_RTX, mode, unsignedp, default_label); } /* Value belongs to this node or to the right-hand subtree. */ emit_cmp_and_jump_insns (index, convert_modes (mode, imode, expand_expr (node->high, NULL_RTX, VOIDmode, 0), unsignedp), LE, NULL_RTX, mode, unsignedp, label_rtx (node->code_label)); emit_case_nodes (index, node->right, default_label, index_type); } else if (node->right == 0 && node->left != 0) { /* Deal with values to the right of this node, if they are possible. */ if (!node_has_high_bound (node, index_type)) { emit_cmp_and_jump_insns (index, convert_modes (mode, imode, expand_expr (node->high, NULL_RTX, VOIDmode, 0), unsignedp), GT, NULL_RTX, mode, unsignedp, default_label); } /* Value belongs to this node or to the left-hand subtree. */ emit_cmp_and_jump_insns (index, convert_modes (mode, imode, expand_expr (node->low, NULL_RTX, VOIDmode, 0), unsignedp), GE, NULL_RTX, mode, unsignedp, label_rtx (node->code_label)); emit_case_nodes (index, node->left, default_label, index_type); } else { /* Node has no children so we check low and high bounds to remove redundant tests. Only one of the bounds can exist, since otherwise this node is bounded--a case tested already. */ int high_bound = node_has_high_bound (node, index_type); int low_bound = node_has_low_bound (node, index_type); if (!high_bound && low_bound) { emit_cmp_and_jump_insns (index, convert_modes (mode, imode, expand_expr (node->high, NULL_RTX, VOIDmode, 0), unsignedp), GT, NULL_RTX, mode, unsignedp, default_label); } else if (!low_bound && high_bound) { emit_cmp_and_jump_insns (index, convert_modes (mode, imode, expand_expr (node->low, NULL_RTX, VOIDmode, 0), unsignedp), LT, NULL_RTX, mode, unsignedp, default_label); } else if (!low_bound && !high_bound) { /* Widen LOW and HIGH to the same width as INDEX. */ tree type = lang_hooks.types.type_for_mode (mode, unsignedp); tree low = build1 (CONVERT_EXPR, type, node->low); tree high = build1 (CONVERT_EXPR, type, node->high); rtx low_rtx, new_index, new_bound; /* Instead of doing two branches, emit one unsigned branch for (index-low) > (high-low). */ low_rtx = expand_expr (low, NULL_RTX, mode, 0); new_index = expand_simple_binop (mode, MINUS, index, low_rtx, NULL_RTX, unsignedp, OPTAB_WIDEN); new_bound = expand_expr (fold (build (MINUS_EXPR, type, high, low)), NULL_RTX, mode, 0); emit_cmp_and_jump_insns (new_index, new_bound, GT, NULL_RTX, mode, 1, default_label); } emit_jump (label_rtx (node->code_label)); } } } /* Type information for stmt.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ void gt_ggc_mx_goto_fixup (void *x_p) { struct goto_fixup * const x = (struct goto_fixup *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_10goto_fixup ((*x).next); gt_ggc_m_7rtx_def ((*x).before_jump); gt_ggc_m_9tree_node ((*x).target); gt_ggc_m_9tree_node ((*x).context); gt_ggc_m_7rtx_def ((*x).target_rtl); gt_ggc_m_7rtx_def ((*x).stack_level); gt_ggc_m_9tree_node ((*x).cleanup_list_list); } } void gt_ggc_mx_label_chain (void *x_p) { struct label_chain * const x = (struct label_chain *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_11label_chain ((*x).next); gt_ggc_m_9tree_node ((*x).label); } } void gt_ggc_mx_nesting (void *x_p) { struct nesting * const x = (struct nesting *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_7nesting ((*x).all); gt_ggc_m_7nesting ((*x).next); gt_ggc_m_7rtx_def ((*x).exit_label); switch (((*x)).desc) { case COND_NESTING: gt_ggc_m_7rtx_def ((*x).data.cond.endif_label); gt_ggc_m_7rtx_def ((*x).data.cond.next_label); break; case BLOCK_NESTING: gt_ggc_m_7rtx_def ((*x).data.block.stack_level); gt_ggc_m_7rtx_def ((*x).data.block.first_insn); gt_ggc_m_7nesting ((*x).data.block.innermost_stack_block); gt_ggc_m_9tree_node ((*x).data.block.cleanups); gt_ggc_m_9tree_node ((*x).data.block.outer_cleanups); gt_ggc_m_11label_chain ((*x).data.block.label_chain); gt_ggc_m_7rtx_def ((*x).data.block.last_unconditional_cleanup); break; case CASE_NESTING: gt_ggc_m_7rtx_def ((*x).data.case_stmt.start); gt_ggc_m_9case_node ((*x).data.case_stmt.case_list); gt_ggc_m_9tree_node ((*x).data.case_stmt.default_label); gt_ggc_m_9tree_node ((*x).data.case_stmt.index_expr); gt_ggc_m_9tree_node ((*x).data.case_stmt.nominal_type); break; default: break; } } } void gt_ggc_mx_case_node (void *x_p) { struct case_node * const x = (struct case_node *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_9case_node ((*x).left); gt_ggc_m_9case_node ((*x).right); gt_ggc_m_9case_node ((*x).parent); gt_ggc_m_9tree_node ((*x).low); gt_ggc_m_9tree_node ((*x).high); gt_ggc_m_9tree_node ((*x).code_label); } } void gt_ggc_mx_stmt_status (void *x_p) { struct stmt_status * const x = (struct stmt_status *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_7nesting ((*x).x_block_stack); gt_ggc_m_7nesting ((*x).x_stack_block_stack); gt_ggc_m_7nesting ((*x).x_cond_stack); gt_ggc_m_7nesting ((*x).x_case_stack); gt_ggc_m_7nesting ((*x).x_nesting_stack); gt_ggc_m_10goto_fixup ((*x).x_goto_fixup_chain); } } void gt_pch_nx_goto_fixup (void *x_p) { struct goto_fixup * const x = (struct goto_fixup *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_10goto_fixup)) { gt_pch_n_10goto_fixup ((*x).next); gt_pch_n_7rtx_def ((*x).before_jump); gt_pch_n_9tree_node ((*x).target); gt_pch_n_9tree_node ((*x).context); gt_pch_n_7rtx_def ((*x).target_rtl); gt_pch_n_7rtx_def ((*x).stack_level); gt_pch_n_9tree_node ((*x).cleanup_list_list); } } void gt_pch_nx_label_chain (void *x_p) { struct label_chain * const x = (struct label_chain *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_11label_chain)) { gt_pch_n_11label_chain ((*x).next); gt_pch_n_9tree_node ((*x).label); } } void gt_pch_nx_nesting (void *x_p) { struct nesting * const x = (struct nesting *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_7nesting)) { gt_pch_n_7nesting ((*x).all); gt_pch_n_7nesting ((*x).next); gt_pch_n_7rtx_def ((*x).exit_label); switch (((*x)).desc) { case COND_NESTING: gt_pch_n_7rtx_def ((*x).data.cond.endif_label); gt_pch_n_7rtx_def ((*x).data.cond.next_label); break; case BLOCK_NESTING: gt_pch_n_7rtx_def ((*x).data.block.stack_level); gt_pch_n_7rtx_def ((*x).data.block.first_insn); gt_pch_n_7nesting ((*x).data.block.innermost_stack_block); gt_pch_n_9tree_node ((*x).data.block.cleanups); gt_pch_n_9tree_node ((*x).data.block.outer_cleanups); gt_pch_n_11label_chain ((*x).data.block.label_chain); gt_pch_n_7rtx_def ((*x).data.block.last_unconditional_cleanup); break; case CASE_NESTING: gt_pch_n_7rtx_def ((*x).data.case_stmt.start); gt_pch_n_9case_node ((*x).data.case_stmt.case_list); gt_pch_n_9tree_node ((*x).data.case_stmt.default_label); gt_pch_n_9tree_node ((*x).data.case_stmt.index_expr); gt_pch_n_9tree_node ((*x).data.case_stmt.nominal_type); gt_pch_n_S ((*x).data.case_stmt.printname); break; default: break; } } } void gt_pch_nx_case_node (void *x_p) { struct case_node * const x = (struct case_node *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_9case_node)) { gt_pch_n_9case_node ((*x).left); gt_pch_n_9case_node ((*x).right); gt_pch_n_9case_node ((*x).parent); gt_pch_n_9tree_node ((*x).low); gt_pch_n_9tree_node ((*x).high); gt_pch_n_9tree_node ((*x).code_label); } } void gt_pch_nx_stmt_status (void *x_p) { struct stmt_status * const x = (struct stmt_status *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_11stmt_status)) { gt_pch_n_7nesting ((*x).x_block_stack); gt_pch_n_7nesting ((*x).x_stack_block_stack); gt_pch_n_7nesting ((*x).x_cond_stack); gt_pch_n_7nesting ((*x).x_case_stack); gt_pch_n_7nesting ((*x).x_nesting_stack); gt_pch_n_S ((*x).x_emit_locus.file); gt_pch_n_10goto_fixup ((*x).x_goto_fixup_chain); } } void gt_pch_p_10goto_fixup (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct goto_fixup * const x ATTRIBUTE_UNUSED = (struct goto_fixup *)x_p; if ((void *)(x) == this_obj) op (&((*x).next), cookie); if ((void *)(x) == this_obj) op (&((*x).before_jump), cookie); if ((void *)(x) == this_obj) op (&((*x).target), cookie); if ((void *)(x) == this_obj) op (&((*x).context), cookie); if ((void *)(x) == this_obj) op (&((*x).target_rtl), cookie); if ((void *)(x) == this_obj) op (&((*x).stack_level), cookie); if ((void *)(x) == this_obj) op (&((*x).cleanup_list_list), cookie); } void gt_pch_p_11label_chain (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct label_chain * const x ATTRIBUTE_UNUSED = (struct label_chain *)x_p; if ((void *)(x) == this_obj) op (&((*x).next), cookie); if ((void *)(x) == this_obj) op (&((*x).label), cookie); } void gt_pch_p_7nesting (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct nesting * const x ATTRIBUTE_UNUSED = (struct nesting *)x_p; if ((void *)(x) == this_obj) op (&((*x).all), cookie); if ((void *)(x) == this_obj) op (&((*x).next), cookie); if ((void *)(x) == this_obj) op (&((*x).exit_label), cookie); switch (((*x)).desc) { case COND_NESTING: if ((void *)(x) == this_obj) op (&((*x).data.cond.endif_label), cookie); if ((void *)(x) == this_obj) op (&((*x).data.cond.next_label), cookie); break; case BLOCK_NESTING: if ((void *)(x) == this_obj) op (&((*x).data.block.stack_level), cookie); if ((void *)(x) == this_obj) op (&((*x).data.block.first_insn), cookie); if ((void *)(x) == this_obj) op (&((*x).data.block.innermost_stack_block), cookie); if ((void *)(x) == this_obj) op (&((*x).data.block.cleanups), cookie); if ((void *)(x) == this_obj) op (&((*x).data.block.outer_cleanups), cookie); if ((void *)(x) == this_obj) op (&((*x).data.block.label_chain), cookie); if ((void *)(x) == this_obj) op (&((*x).data.block.last_unconditional_cleanup), cookie); break; case CASE_NESTING: if ((void *)(x) == this_obj) op (&((*x).data.case_stmt.start), cookie); if ((void *)(x) == this_obj) op (&((*x).data.case_stmt.case_list), cookie); if ((void *)(x) == this_obj) op (&((*x).data.case_stmt.default_label), cookie); if ((void *)(x) == this_obj) op (&((*x).data.case_stmt.index_expr), cookie); if ((void *)(x) == this_obj) op (&((*x).data.case_stmt.nominal_type), cookie); if ((void *)(x) == this_obj) op (&((*x).data.case_stmt.printname), cookie); break; default: break; } } void gt_pch_p_9case_node (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct case_node * const x ATTRIBUTE_UNUSED = (struct case_node *)x_p; if ((void *)(x) == this_obj) op (&((*x).left), cookie); if ((void *)(x) == this_obj) op (&((*x).right), cookie); if ((void *)(x) == this_obj) op (&((*x).parent), cookie); if ((void *)(x) == this_obj) op (&((*x).low), cookie); if ((void *)(x) == this_obj) op (&((*x).high), cookie); if ((void *)(x) == this_obj) op (&((*x).code_label), cookie); } void gt_pch_p_11stmt_status (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct stmt_status * const x ATTRIBUTE_UNUSED = (struct stmt_status *)x_p; if ((void *)(x) == this_obj) op (&((*x).x_block_stack), cookie); if ((void *)(x) == this_obj) op (&((*x).x_stack_block_stack), cookie); if ((void *)(x) == this_obj) op (&((*x).x_cond_stack), cookie); if ((void *)(x) == this_obj) op (&((*x).x_case_stack), cookie); if ((void *)(x) == this_obj) op (&((*x).x_nesting_stack), cookie); if ((void *)(x) == this_obj) op (&((*x).x_emit_locus.file), cookie); if ((void *)(x) == this_obj) op (&((*x).x_goto_fixup_chain), cookie); } /* C-compiler utilities for types and variables storage layout Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Set to one when set_sizetype has been called. */ static int sizetype_set; /* List of types created before set_sizetype has been called. We do not make this a GGC root since we want these nodes to be reclaimed. */ static tree early_type_list; /* Data type for the expressions representing sizes of data types. It is the first integer type laid out. */ tree sizetype_tab[(int) TYPE_KIND_LAST]; /* If nonzero, this is an upper limit on alignment of structure fields. The value is measured in bits. */ unsigned int maximum_field_alignment; /* If nonzero, the alignment of a bitstring or (power-)set value, in bits. May be overridden by front-ends. */ unsigned int set_alignment = 0; /* Nonzero if all REFERENCE_TYPEs are internal and hence should be allocated in Pmode, not ptr_mode. Set only by internal_reference_types called only by a front end. */ static int reference_types_internal = 0; static void finalize_record_size (record_layout_info); static void finalize_type_size (tree); static void place_union_field (record_layout_info, tree); #if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED) static int excess_unit_span (HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT, tree); #endif extern void debug_rli (record_layout_info); /* SAVE_EXPRs for sizes of types and decls, waiting to be expanded. */ static GTY(()) tree pending_sizes; /* Show that REFERENCE_TYPES are internal and should be Pmode. Called only by front end. */ void internal_reference_types (void) { reference_types_internal = 1; } /* Get a list of all the objects put on the pending sizes list. */ tree get_pending_sizes (void) { tree chain = pending_sizes; pending_sizes = 0; return chain; } /* Add EXPR to the pending sizes list. */ void put_pending_size (tree expr) { /* Strip any simple arithmetic from EXPR to see if it has an underlying SAVE_EXPR. */ expr = skip_simple_arithmetic (expr); if (TREE_CODE (expr) == SAVE_EXPR) pending_sizes = tree_cons (NULL_TREE, expr, pending_sizes); } /* Put a chain of objects into the pending sizes list, which must be empty. */ void put_pending_sizes (tree chain) { if (pending_sizes) abort (); pending_sizes = chain; } /* Given a size SIZE that may not be a constant, return a SAVE_EXPR to serve as the actual size-expression for a type or decl. */ tree variable_size (tree size) { tree save; /* If the language-processor is to take responsibility for variable-sized items (e.g., languages which have elaboration procedures like Ada), just return SIZE unchanged. Likewise for self-referential sizes and constant sizes. */ if (TREE_CONSTANT (size) || lang_hooks.decls.global_bindings_p () < 0 || CONTAINS_PLACEHOLDER_P (size)) return size; size = save_expr (size); /* If an array with a variable number of elements is declared, and the elements require destruction, we will emit a cleanup for the array. That cleanup is run both on normal exit from the block and in the exception-handler for the block. Normally, when code is used in both ordinary code and in an exception handler it is `unsaved', i.e., all SAVE_EXPRs are recalculated. However, we do not wish to do that here; the array-size is the same in both places. */ save = skip_simple_arithmetic (size); if (cfun && cfun->x_dont_save_pending_sizes_p) /* The front-end doesn't want us to keep a list of the expressions that determine sizes for variable size objects. Trust it. */ return size; if (lang_hooks.decls.global_bindings_p ()) { if (TREE_CONSTANT (size)) error ("type size can't be explicitly evaluated"); else error ("variable-size type declared outside of any function"); return size_one_node; } put_pending_size (save); return size; } #ifndef MAX_FIXED_MODE_SIZE #define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (DImode) #endif /* Return the machine mode to use for a nonscalar of SIZE bits. The mode must be in class CLASS, and have exactly that many value bits; it may have padding as well. If LIMIT is nonzero, modes of wider than MAX_FIXED_MODE_SIZE will not be used. */ enum machine_mode mode_for_size (unsigned int size, enum mode_class class, int limit) { enum machine_mode mode; if (limit && size > MAX_FIXED_MODE_SIZE) return BLKmode; /* Get the first mode which has this size, in the specified class. */ for (mode = GET_CLASS_NARROWEST_MODE (class); mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode)) if (GET_MODE_PRECISION (mode) == size) return mode; return BLKmode; } /* Similar, except passed a tree node. */ enum machine_mode mode_for_size_tree (tree size, enum mode_class class, int limit) { if (TREE_CODE (size) != INTEGER_CST || TREE_OVERFLOW (size) /* What we really want to say here is that the size can fit in a host integer, but we know there's no way we'd find a mode for this many bits, so there's no point in doing the precise test. */ || compare_tree_int (size, 1000) > 0) return BLKmode; else return mode_for_size (tree_low_cst (size, 1), class, limit); } /* Similar, but never return BLKmode; return the narrowest mode that contains at least the requested number of value bits. */ enum machine_mode smallest_mode_for_size (unsigned int size, enum mode_class class) { enum machine_mode mode; /* Get the first mode which has at least this size, in the specified class. */ for (mode = GET_CLASS_NARROWEST_MODE (class); mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode)) if (GET_MODE_PRECISION (mode) >= size) return mode; abort (); } /* Find an integer mode of the exact same size, or BLKmode on failure. */ enum machine_mode int_mode_for_mode (enum machine_mode mode) { switch (GET_MODE_CLASS (mode)) { case MODE_INT: case MODE_PARTIAL_INT: break; case MODE_COMPLEX_INT: case MODE_COMPLEX_FLOAT: case MODE_FLOAT: case MODE_VECTOR_INT: case MODE_VECTOR_FLOAT: mode = mode_for_size (GET_MODE_BITSIZE (mode), MODE_INT, 0); break; case MODE_RANDOM: if (mode == BLKmode) break; /* ... fall through ... */ case MODE_CC: default: abort (); } return mode; } /* Return the alignment of MODE. This will be bounded by 1 and BIGGEST_ALIGNMENT. */ unsigned int get_mode_alignment (enum machine_mode mode) { return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT)); } /* Return the value of VALUE, rounded up to a multiple of DIVISOR. This can only be applied to objects of a sizetype. */ tree round_up (tree value, int divisor) { tree arg = size_int_type (divisor, TREE_TYPE (value)); return size_binop (MULT_EXPR, size_binop (CEIL_DIV_EXPR, value, arg), arg); } /* Likewise, but round down. */ tree round_down (tree value, int divisor) { tree arg = size_int_type (divisor, TREE_TYPE (value)); return size_binop (MULT_EXPR, size_binop (FLOOR_DIV_EXPR, value, arg), arg); } /* Subroutine of layout_decl: Force alignment required for the data type. But if the decl itself wants greater alignment, don't override that. */ static inline void do_type_align (tree type, tree decl) { if (TYPE_ALIGN (type) > DECL_ALIGN (decl)) { DECL_ALIGN (decl) = TYPE_ALIGN (type); if (TREE_CODE (decl) == FIELD_DECL) DECL_USER_ALIGN (decl) = TYPE_USER_ALIGN (type); } } /* Set the size, mode and alignment of a ..._DECL node. TYPE_DECL does need this for C++. Note that LABEL_DECL and CONST_DECL nodes do not need this, and FUNCTION_DECL nodes have them set up in a special (and simple) way. Don't call layout_decl for them. KNOWN_ALIGN is the amount of alignment we can assume this decl has with no special effort. It is relevant only for FIELD_DECLs and depends on the previous fields. All that matters about KNOWN_ALIGN is which powers of 2 divide it. If KNOWN_ALIGN is 0, it means, "as much alignment as you like": the record will be aligned to suit. */ void layout_decl (tree decl, unsigned int known_align) { tree type = TREE_TYPE (decl); enum tree_code code = TREE_CODE (decl); rtx rtl = NULL_RTX; if (code == CONST_DECL) return; else if (code != VAR_DECL && code != PARM_DECL && code != RESULT_DECL && code != TYPE_DECL && code != FIELD_DECL) abort (); rtl = DECL_RTL_IF_SET (decl); if (type == error_mark_node) type = void_type_node; /* Usually the size and mode come from the data type without change, however, the front-end may set the explicit width of the field, so its size may not be the same as the size of its type. This happens with bitfields, of course (an `int' bitfield may be only 2 bits, say), but it also happens with other fields. For example, the C++ front-end creates zero-sized fields corresponding to empty base classes, and depends on layout_type setting DECL_FIELD_BITPOS correctly for the field. Set the size in bytes from the size in bits. If we have already set the mode, don't set it again since we can be called twice for FIELD_DECLs. */ DECL_UNSIGNED (decl) = TYPE_UNSIGNED (type); if (DECL_MODE (decl) == VOIDmode) DECL_MODE (decl) = TYPE_MODE (type); if (DECL_SIZE (decl) == 0) { DECL_SIZE (decl) = TYPE_SIZE (type); DECL_SIZE_UNIT (decl) = TYPE_SIZE_UNIT (type); } else if (DECL_SIZE_UNIT (decl) == 0) DECL_SIZE_UNIT (decl) = convert (sizetype, size_binop (CEIL_DIV_EXPR, DECL_SIZE (decl), bitsize_unit_node)); if (code != FIELD_DECL) /* For non-fields, update the alignment from the type. */ do_type_align (type, decl); else /* For fields, it's a bit more complicated... */ { bool old_user_align = DECL_USER_ALIGN (decl); if (DECL_BIT_FIELD (decl)) { DECL_BIT_FIELD_TYPE (decl) = type; /* A zero-length bit-field affects the alignment of the next field. */ if (integer_zerop (DECL_SIZE (decl)) && ! DECL_PACKED (decl) && ! targetm.ms_bitfield_layout_p (DECL_FIELD_CONTEXT (decl))) { #ifdef PCC_BITFIELD_TYPE_MATTERS if (PCC_BITFIELD_TYPE_MATTERS) do_type_align (type, decl); else #endif { #ifdef EMPTY_FIELD_BOUNDARY if (EMPTY_FIELD_BOUNDARY > DECL_ALIGN (decl)) { DECL_ALIGN (decl) = EMPTY_FIELD_BOUNDARY; DECL_USER_ALIGN (decl) = 0; } #endif } } /* See if we can use an ordinary integer mode for a bit-field. Conditions are: a fixed size that is correct for another mode and occupying a complete byte or bytes on proper boundary. */ if (TYPE_SIZE (type) != 0 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT) { enum machine_mode xmode = mode_for_size_tree (DECL_SIZE (decl), MODE_INT, 1); if (xmode != BLKmode && (known_align == 0 || known_align >= GET_MODE_ALIGNMENT (xmode))) { DECL_ALIGN (decl) = MAX (GET_MODE_ALIGNMENT (xmode), DECL_ALIGN (decl)); DECL_MODE (decl) = xmode; DECL_BIT_FIELD (decl) = 0; } } /* Turn off DECL_BIT_FIELD if we won't need it set. */ if (TYPE_MODE (type) == BLKmode && DECL_MODE (decl) == BLKmode && known_align >= TYPE_ALIGN (type) && DECL_ALIGN (decl) >= TYPE_ALIGN (type)) DECL_BIT_FIELD (decl) = 0; } else if (DECL_PACKED (decl) && DECL_USER_ALIGN (decl)) /* Don't touch DECL_ALIGN. For other packed fields, go ahead and round up; we'll reduce it again below. We want packing to supersede USER_ALIGN inherited from the type, but defer to alignment explicitly specified on the field decl. */; else do_type_align (type, decl); /* If the field is of variable size, we can't misalign it since we have no way to make a temporary to align the result. But this isn't an issue if the decl is not addressable. Likewise if it is of unknown size. Note that do_type_align may set DECL_USER_ALIGN, so we need to check old_user_align instead. */ if (DECL_PACKED (decl) && !old_user_align && (DECL_NONADDRESSABLE_P (decl) || DECL_SIZE_UNIT (decl) == 0 || TREE_CODE (DECL_SIZE_UNIT (decl)) == INTEGER_CST)) DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), BITS_PER_UNIT); if (! DECL_USER_ALIGN (decl) && ! DECL_PACKED (decl)) { /* Some targets (i.e. i386, VMS) limit struct field alignment to a lower boundary than alignment of variables unless it was overridden by attribute aligned. */ #ifdef BIGGEST_FIELD_ALIGNMENT DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), (unsigned) BIGGEST_FIELD_ALIGNMENT); #endif #ifdef ADJUST_FIELD_ALIGN DECL_ALIGN (decl) = ADJUST_FIELD_ALIGN (decl, DECL_ALIGN (decl)); #endif } /* Should this be controlled by DECL_USER_ALIGN, too? */ if (maximum_field_alignment != 0) DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), maximum_field_alignment); } /* Evaluate nonconstant size only once, either now or as soon as safe. */ if (DECL_SIZE (decl) != 0 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST) DECL_SIZE (decl) = variable_size (DECL_SIZE (decl)); if (DECL_SIZE_UNIT (decl) != 0 && TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST) DECL_SIZE_UNIT (decl) = variable_size (DECL_SIZE_UNIT (decl)); /* If requested, warn about definitions of large data objects. */ if (warn_larger_than && (code == VAR_DECL || code == PARM_DECL) && ! DECL_EXTERNAL (decl)) { tree size = DECL_SIZE_UNIT (decl); if (size != 0 && TREE_CODE (size) == INTEGER_CST && compare_tree_int (size, larger_than_size) > 0) { int size_as_int = TREE_INT_CST_LOW (size); if (compare_tree_int (size, size_as_int) == 0) warning ("%Jsize of '%D' is %d bytes", decl, decl, size_as_int); else warning ("%Jsize of '%D' is larger than %d bytes", decl, decl, larger_than_size); } } /* If the RTL was already set, update its mode and mem attributes. */ if (rtl) { PUT_MODE (rtl, DECL_MODE (decl)); SET_DECL_RTL (decl, 0); set_mem_attributes (rtl, decl, 1); SET_DECL_RTL (decl, rtl); } } /* Hook for a front-end function that can modify the record layout as needed immediately before it is finalized. */ void (*lang_adjust_rli) (record_layout_info) = 0; void set_lang_adjust_rli (void (*f) (record_layout_info)) { lang_adjust_rli = f; } /* Begin laying out type T, which may be a RECORD_TYPE, UNION_TYPE, or QUAL_UNION_TYPE. Return a pointer to a struct record_layout_info which is to be passed to all other layout functions for this record. It is the responsibility of the caller to call `free' for the storage returned. Note that garbage collection is not permitted until we finish laying out the record. */ record_layout_info start_record_layout (tree t) { record_layout_info rli = xmalloc (sizeof (struct record_layout_info_s)); rli->t = t; /* If the type has a minimum specified alignment (via an attribute declaration, for example) use it -- otherwise, start with a one-byte alignment. */ rli->record_align = MAX (BITS_PER_UNIT, TYPE_ALIGN (t)); rli->unpacked_align = rli->record_align; rli->offset_align = MAX (rli->record_align, BIGGEST_ALIGNMENT); #ifdef STRUCTURE_SIZE_BOUNDARY /* Packed structures don't need to have minimum size. */ if (! TYPE_PACKED (t)) rli->record_align = MAX (rli->record_align, (unsigned) STRUCTURE_SIZE_BOUNDARY); #endif rli->offset = size_zero_node; rli->bitpos = bitsize_zero_node; rli->prev_field = 0; rli->pending_statics = 0; rli->packed_maybe_necessary = 0; return rli; } /* These four routines perform computations that convert between the offset/bitpos forms and byte and bit offsets. */ tree bit_from_pos (tree offset, tree bitpos) { return size_binop (PLUS_EXPR, bitpos, size_binop (MULT_EXPR, convert (bitsizetype, offset), bitsize_unit_node)); } tree byte_from_pos (tree offset, tree bitpos) { return size_binop (PLUS_EXPR, offset, convert (sizetype, size_binop (TRUNC_DIV_EXPR, bitpos, bitsize_unit_node))); } void pos_from_bit (tree *poffset, tree *pbitpos, unsigned int off_align, tree pos) { *poffset = size_binop (MULT_EXPR, convert (sizetype, size_binop (FLOOR_DIV_EXPR, pos, bitsize_int (off_align))), size_int (off_align / BITS_PER_UNIT)); *pbitpos = size_binop (FLOOR_MOD_EXPR, pos, bitsize_int (off_align)); } /* Given a pointer to bit and byte offsets and an offset alignment, normalize the offsets so they are within the alignment. */ void normalize_offset (tree *poffset, tree *pbitpos, unsigned int off_align) { /* If the bit position is now larger than it should be, adjust it downwards. */ if (compare_tree_int (*pbitpos, off_align) >= 0) { tree extra_aligns = size_binop (FLOOR_DIV_EXPR, *pbitpos, bitsize_int (off_align)); *poffset = size_binop (PLUS_EXPR, *poffset, size_binop (MULT_EXPR, convert (sizetype, extra_aligns), size_int (off_align / BITS_PER_UNIT))); *pbitpos = size_binop (FLOOR_MOD_EXPR, *pbitpos, bitsize_int (off_align)); } } /* Print debugging information about the information in RLI. */ void debug_rli (record_layout_info rli) { print_node_brief (stderr, "type", rli->t, 0); print_node_brief (stderr, "\noffset", rli->offset, 0); print_node_brief (stderr, " bitpos", rli->bitpos, 0); fprintf (stderr, "\naligns: rec = %u, unpack = %u, off = %u\n", rli->record_align, rli->unpacked_align, rli->offset_align); if (rli->packed_maybe_necessary) fprintf (stderr, "packed may be necessary\n"); if (rli->pending_statics) { fprintf (stderr, "pending statics:\n"); debug_tree (rli->pending_statics); } } /* Given an RLI with a possibly-incremented BITPOS, adjust OFFSET and BITPOS if necessary to keep BITPOS below OFFSET_ALIGN. */ void normalize_rli (record_layout_info rli) { normalize_offset (&rli->offset, &rli->bitpos, rli->offset_align); } /* Returns the size in bytes allocated so far. */ tree rli_size_unit_so_far (record_layout_info rli) { return byte_from_pos (rli->offset, rli->bitpos); } /* Returns the size in bits allocated so far. */ tree rli_size_so_far (record_layout_info rli) { return bit_from_pos (rli->offset, rli->bitpos); } /* FIELD is about to be added to RLI->T. The alignment (in bits) of the next available location is given by KNOWN_ALIGN. Update the variable alignment fields in RLI, and return the alignment to give the FIELD. */ unsigned int update_alignment_for_field (record_layout_info rli, tree field, unsigned int known_align) { /* The alignment required for FIELD. */ unsigned int desired_align; /* The type of this field. */ tree type = TREE_TYPE (field); /* True if the field was explicitly aligned by the user. */ bool user_align; bool is_bitfield; /* Lay out the field so we know what alignment it needs. */ layout_decl (field, known_align); desired_align = DECL_ALIGN (field); user_align = DECL_USER_ALIGN (field); is_bitfield = (type != error_mark_node && DECL_BIT_FIELD_TYPE (field) && ! integer_zerop (TYPE_SIZE (type))); /* Record must have at least as much alignment as any field. Otherwise, the alignment of the field within the record is meaningless. */ if (is_bitfield && targetm.ms_bitfield_layout_p (rli->t)) { /* Here, the alignment of the underlying type of a bitfield can affect the alignment of a record; even a zero-sized field can do this. The alignment should be to the alignment of the type, except that for zero-size bitfields this only applies if there was an immediately prior, nonzero-size bitfield. (That's the way it is, experimentally.) */ if (! integer_zerop (DECL_SIZE (field)) ? ! DECL_PACKED (field) : (rli->prev_field && DECL_BIT_FIELD_TYPE (rli->prev_field) && ! integer_zerop (DECL_SIZE (rli->prev_field)))) { unsigned int type_align = TYPE_ALIGN (type); type_align = MAX (type_align, desired_align); if (maximum_field_alignment != 0) type_align = MIN (type_align, maximum_field_alignment); rli->record_align = MAX (rli->record_align, type_align); rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type)); } } #ifdef PCC_BITFIELD_TYPE_MATTERS else if (is_bitfield && PCC_BITFIELD_TYPE_MATTERS) { /* Named bit-fields cause the entire structure to have the alignment implied by their type. Some targets also apply the same rules to unnamed bitfields. */ if (DECL_NAME (field) != 0 || targetm.align_anon_bitfield ()) { unsigned int type_align = TYPE_ALIGN (type); #ifdef ADJUST_FIELD_ALIGN if (! TYPE_USER_ALIGN (type)) type_align = ADJUST_FIELD_ALIGN (field, type_align); #endif if (maximum_field_alignment != 0) type_align = MIN (type_align, maximum_field_alignment); else if (DECL_PACKED (field)) type_align = MIN (type_align, BITS_PER_UNIT); /* The alignment of the record is increased to the maximum of the current alignment, the alignment indicated on the field (i.e., the alignment specified by an __aligned__ attribute), and the alignment indicated by the type of the field. */ rli->record_align = MAX (rli->record_align, desired_align); rli->record_align = MAX (rli->record_align, type_align); if (warn_packed) rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type)); user_align |= TYPE_USER_ALIGN (type); } } #endif else { rli->record_align = MAX (rli->record_align, desired_align); rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type)); } TYPE_USER_ALIGN (rli->t) |= user_align; return desired_align; } /* Called from place_field to handle unions. */ static void place_union_field (record_layout_info rli, tree field) { update_alignment_for_field (rli, field, /*known_align=*/0); DECL_FIELD_OFFSET (field) = size_zero_node; DECL_FIELD_BIT_OFFSET (field) = bitsize_zero_node; SET_DECL_OFFSET_ALIGN (field, BIGGEST_ALIGNMENT); /* We assume the union's size will be a multiple of a byte so we don't bother with BITPOS. */ if (TREE_CODE (rli->t) == UNION_TYPE) rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field)); else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE) rli->offset = fold (build (COND_EXPR, sizetype, DECL_QUALIFIER (field), DECL_SIZE_UNIT (field), rli->offset)); } #if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED) /* A bitfield of SIZE with a required access alignment of ALIGN is allocated at BYTE_OFFSET / BIT_OFFSET. Return nonzero if the field would span more units of alignment than the underlying TYPE. */ static int excess_unit_span (HOST_WIDE_INT byte_offset, HOST_WIDE_INT bit_offset, HOST_WIDE_INT size, HOST_WIDE_INT align, tree type) { /* Note that the calculation of OFFSET might overflow; we calculate it so that we still get the right result as long as ALIGN is a power of two. */ unsigned HOST_WIDE_INT offset = byte_offset * BITS_PER_UNIT + bit_offset; offset = offset % align; return ((offset + size + align - 1) / align > ((unsigned HOST_WIDE_INT) tree_low_cst (TYPE_SIZE (type), 1) / align)); } #endif /* RLI contains information about the layout of a RECORD_TYPE. FIELD is a FIELD_DECL to be added after those fields already present in T. (FIELD is not actually added to the TYPE_FIELDS list here; callers that desire that behavior must manually perform that step.) */ void place_field (record_layout_info rli, tree field) { /* The alignment required for FIELD. */ unsigned int desired_align; /* The alignment FIELD would have if we just dropped it into the record as it presently stands. */ unsigned int known_align; unsigned int actual_align; /* The type of this field. */ tree type = TREE_TYPE (field); if (TREE_CODE (field) == ERROR_MARK || TREE_CODE (type) == ERROR_MARK) return; /* If FIELD is static, then treat it like a separate variable, not really like a structure field. If it is a FUNCTION_DECL, it's a method. In both cases, all we do is lay out the decl, and we do it *after* the record is laid out. */ if (TREE_CODE (field) == VAR_DECL) { rli->pending_statics = tree_cons (NULL_TREE, field, rli->pending_statics); return; } /* Enumerators and enum types which are local to this class need not be laid out. Likewise for initialized constant fields. */ else if (TREE_CODE (field) != FIELD_DECL) return; /* Unions are laid out very differently than records, so split that code off to another function. */ else if (TREE_CODE (rli->t) != RECORD_TYPE) { place_union_field (rli, field); return; } /* Work out the known alignment so far. Note that A & (-A) is the value of the least-significant bit in A that is one. */ if (! integer_zerop (rli->bitpos)) known_align = (tree_low_cst (rli->bitpos, 1) & - tree_low_cst (rli->bitpos, 1)); else if (integer_zerop (rli->offset)) known_align = BIGGEST_ALIGNMENT; else if (host_integerp (rli->offset, 1)) known_align = (BITS_PER_UNIT * (tree_low_cst (rli->offset, 1) & - tree_low_cst (rli->offset, 1))); else known_align = rli->offset_align; desired_align = update_alignment_for_field (rli, field, known_align); if (warn_packed && DECL_PACKED (field)) { if (known_align >= TYPE_ALIGN (type)) { if (TYPE_ALIGN (type) > desired_align) { if (STRICT_ALIGNMENT) warning ("%Jpacked attribute causes inefficient alignment " "for '%D'", field, field); else warning ("%Jpacked attribute is unnecessary for '%D'", field, field); } } else rli->packed_maybe_necessary = 1; } /* Does this field automatically have alignment it needs by virtue of the fields that precede it and the record's own alignment? */ if (known_align < desired_align) { /* No, we need to skip space before this field. Bump the cumulative size to multiple of field alignment. */ if (warn_padded) warning ("%Jpadding struct to align '%D'", field, field); /* If the alignment is still within offset_align, just align the bit position. */ if (desired_align < rli->offset_align) rli->bitpos = round_up (rli->bitpos, desired_align); else { /* First adjust OFFSET by the partial bits, then align. */ rli->offset = size_binop (PLUS_EXPR, rli->offset, convert (sizetype, size_binop (CEIL_DIV_EXPR, rli->bitpos, bitsize_unit_node))); rli->bitpos = bitsize_zero_node; rli->offset = round_up (rli->offset, desired_align / BITS_PER_UNIT); } if (! TREE_CONSTANT (rli->offset)) rli->offset_align = desired_align; } /* Handle compatibility with PCC. Note that if the record has any variable-sized fields, we need not worry about compatibility. */ #ifdef PCC_BITFIELD_TYPE_MATTERS if (PCC_BITFIELD_TYPE_MATTERS && ! targetm.ms_bitfield_layout_p (rli->t) && TREE_CODE (field) == FIELD_DECL && type != error_mark_node && DECL_BIT_FIELD (field) && ! DECL_PACKED (field) && maximum_field_alignment == 0 && ! integer_zerop (DECL_SIZE (field)) && host_integerp (DECL_SIZE (field), 1) && host_integerp (rli->offset, 1) && host_integerp (TYPE_SIZE (type), 1)) { unsigned int type_align = TYPE_ALIGN (type); tree dsize = DECL_SIZE (field); HOST_WIDE_INT field_size = tree_low_cst (dsize, 1); HOST_WIDE_INT offset = tree_low_cst (rli->offset, 0); HOST_WIDE_INT bit_offset = tree_low_cst (rli->bitpos, 0); #ifdef ADJUST_FIELD_ALIGN if (! TYPE_USER_ALIGN (type)) type_align = ADJUST_FIELD_ALIGN (field, type_align); #endif /* A bit field may not span more units of alignment of its type than its type itself. Advance to next boundary if necessary. */ if (excess_unit_span (offset, bit_offset, field_size, type_align, type)) rli->bitpos = round_up (rli->bitpos, type_align); TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type); } #endif #ifdef BITFIELD_NBYTES_LIMITED if (BITFIELD_NBYTES_LIMITED && ! targetm.ms_bitfield_layout_p (rli->t) && TREE_CODE (field) == FIELD_DECL && type != error_mark_node && DECL_BIT_FIELD_TYPE (field) && ! DECL_PACKED (field) && ! integer_zerop (DECL_SIZE (field)) && host_integerp (DECL_SIZE (field), 1) && host_integerp (rli->offset, 1) && host_integerp (TYPE_SIZE (type), 1)) { unsigned int type_align = TYPE_ALIGN (type); tree dsize = DECL_SIZE (field); HOST_WIDE_INT field_size = tree_low_cst (dsize, 1); HOST_WIDE_INT offset = tree_low_cst (rli->offset, 0); HOST_WIDE_INT bit_offset = tree_low_cst (rli->bitpos, 0); #ifdef ADJUST_FIELD_ALIGN if (! TYPE_USER_ALIGN (type)) type_align = ADJUST_FIELD_ALIGN (field, type_align); #endif if (maximum_field_alignment != 0) type_align = MIN (type_align, maximum_field_alignment); /* ??? This test is opposite the test in the containing if statement, so this code is unreachable currently. */ else if (DECL_PACKED (field)) type_align = MIN (type_align, BITS_PER_UNIT); /* A bit field may not span the unit of alignment of its type. Advance to next boundary if necessary. */ if (excess_unit_span (offset, bit_offset, field_size, type_align, type)) rli->bitpos = round_up (rli->bitpos, type_align); TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type); } #endif /* See the docs for TARGET_MS_BITFIELD_LAYOUT_P for details. A subtlety: When a bit field is inserted into a packed record, the whole size of the underlying type is used by one or more same-size adjacent bitfields. (That is, if its long:3, 32 bits is used in the record, and any additional adjacent long bitfields are packed into the same chunk of 32 bits. However, if the size changes, a new field of that size is allocated.) In an unpacked record, this is the same as using alignment, but not equivalent when packing. Note: for compatibility, we use the type size, not the type alignment to determine alignment, since that matches the documentation */ if (targetm.ms_bitfield_layout_p (rli->t) && ((DECL_BIT_FIELD_TYPE (field) && ! DECL_PACKED (field)) || (rli->prev_field && ! DECL_PACKED (rli->prev_field)))) { /* At this point, either the prior or current are bitfields, (possibly both), and we're dealing with MS packing. */ tree prev_saved = rli->prev_field; /* Is the prior field a bitfield? If so, handle "runs" of same type size fields. */ if (rli->prev_field /* necessarily a bitfield if it exists. */) { /* If both are bitfields, nonzero, and the same size, this is the middle of a run. Zero declared size fields are special and handled as "end of run". (Note: it's nonzero declared size, but equal type sizes!) (Since we know that both the current and previous fields are bitfields by the time we check it, DECL_SIZE must be present for both.) */ if (DECL_BIT_FIELD_TYPE (field) && !integer_zerop (DECL_SIZE (field)) && !integer_zerop (DECL_SIZE (rli->prev_field)) && host_integerp (DECL_SIZE (rli->prev_field), 0) && host_integerp (TYPE_SIZE (type), 0) && simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (TREE_TYPE (rli->prev_field)))) { /* We're in the middle of a run of equal type size fields; make sure we realign if we run out of bits. (Not decl size, type size!) */ HOST_WIDE_INT bitsize = tree_low_cst (DECL_SIZE (field), 0); if (rli->remaining_in_alignment < bitsize) { /* out of bits; bump up to next 'word'. */ rli->offset = DECL_FIELD_OFFSET (rli->prev_field); rli->bitpos = size_binop (PLUS_EXPR, TYPE_SIZE (type), DECL_FIELD_BIT_OFFSET (rli->prev_field)); rli->prev_field = field; rli->remaining_in_alignment = tree_low_cst (TYPE_SIZE (type), 0); } rli->remaining_in_alignment -= bitsize; } else { /* End of a run: if leaving a run of bitfields of the same type size, we have to "use up" the rest of the bits of the type size. Compute the new position as the sum of the size for the prior type and where we first started working on that type. Note: since the beginning of the field was aligned then of course the end will be too. No round needed. */ if (!integer_zerop (DECL_SIZE (rli->prev_field))) { tree type_size = TYPE_SIZE (TREE_TYPE (rli->prev_field)); rli->bitpos = size_binop (PLUS_EXPR, type_size, DECL_FIELD_BIT_OFFSET (rli->prev_field)); } else /* We "use up" size zero fields; the code below should behave as if the prior field was not a bitfield. */ prev_saved = NULL; /* Cause a new bitfield to be captured, either this time (if currently a bitfield) or next time we see one. */ if (!DECL_BIT_FIELD_TYPE(field) || integer_zerop (DECL_SIZE (field))) rli->prev_field = NULL; } normalize_rli (rli); } /* If we're starting a new run of same size type bitfields (or a run of non-bitfields), set up the "first of the run" fields. That is, if the current field is not a bitfield, or if there was a prior bitfield the type sizes differ, or if there wasn't a prior bitfield the size of the current field is nonzero. Note: we must be sure to test ONLY the type size if there was a prior bitfield and ONLY for the current field being zero if there wasn't. */ if (!DECL_BIT_FIELD_TYPE (field) || ( prev_saved != NULL ? !simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (TREE_TYPE (prev_saved))) : !integer_zerop (DECL_SIZE (field)) )) { /* Never smaller than a byte for compatibility. */ unsigned int type_align = BITS_PER_UNIT; /* (When not a bitfield), we could be seeing a flex array (with no DECL_SIZE). Since we won't be using remaining_in_alignment until we see a bitfield (and come by here again) we just skip calculating it. */ if (DECL_SIZE (field) != NULL && host_integerp (TYPE_SIZE (TREE_TYPE (field)), 0) && host_integerp (DECL_SIZE (field), 0)) rli->remaining_in_alignment = tree_low_cst (TYPE_SIZE (TREE_TYPE(field)), 0) - tree_low_cst (DECL_SIZE (field), 0); /* Now align (conventionally) for the new type. */ if (!DECL_PACKED(field)) type_align = MAX(TYPE_ALIGN (type), type_align); if (prev_saved && DECL_BIT_FIELD_TYPE (prev_saved) /* If the previous bit-field is zero-sized, we've already accounted for its alignment needs (or ignored it, if appropriate) while placing it. */ && ! integer_zerop (DECL_SIZE (prev_saved))) type_align = MAX (type_align, TYPE_ALIGN (TREE_TYPE (prev_saved))); if (maximum_field_alignment != 0) type_align = MIN (type_align, maximum_field_alignment); rli->bitpos = round_up (rli->bitpos, type_align); /* If we really aligned, don't allow subsequent bitfields to undo that. */ rli->prev_field = NULL; } } /* Offset so far becomes the position of this field after normalizing. */ normalize_rli (rli); DECL_FIELD_OFFSET (field) = rli->offset; DECL_FIELD_BIT_OFFSET (field) = rli->bitpos; SET_DECL_OFFSET_ALIGN (field, rli->offset_align); /* If this field ended up more aligned than we thought it would be (we approximate this by seeing if its position changed), lay out the field again; perhaps we can use an integral mode for it now. */ if (! integer_zerop (DECL_FIELD_BIT_OFFSET (field))) actual_align = (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1) & - tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)); else if (integer_zerop (DECL_FIELD_OFFSET (field))) actual_align = BIGGEST_ALIGNMENT; else if (host_integerp (DECL_FIELD_OFFSET (field), 1)) actual_align = (BITS_PER_UNIT * (tree_low_cst (DECL_FIELD_OFFSET (field), 1) & - tree_low_cst (DECL_FIELD_OFFSET (field), 1))); else actual_align = DECL_OFFSET_ALIGN (field); if (known_align != actual_align) layout_decl (field, actual_align); /* Only the MS bitfields use this. */ if (rli->prev_field == NULL && DECL_BIT_FIELD_TYPE(field)) rli->prev_field = field; /* Now add size of this field to the size of the record. If the size is not constant, treat the field as being a multiple of bytes and just adjust the offset, resetting the bit position. Otherwise, apportion the size amongst the bit position and offset. First handle the case of an unspecified size, which can happen when we have an invalid nested struct definition, such as struct j { struct j { int i; } }. The error message is printed in finish_struct. */ if (DECL_SIZE (field) == 0) /* Do nothing. */; else if (TREE_CODE (DECL_SIZE_UNIT (field)) != INTEGER_CST || TREE_CONSTANT_OVERFLOW (DECL_SIZE_UNIT (field))) { rli->offset = size_binop (PLUS_EXPR, rli->offset, convert (sizetype, size_binop (CEIL_DIV_EXPR, rli->bitpos, bitsize_unit_node))); rli->offset = size_binop (PLUS_EXPR, rli->offset, DECL_SIZE_UNIT (field)); rli->bitpos = bitsize_zero_node; rli->offset_align = MIN (rli->offset_align, desired_align); } else { rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field)); normalize_rli (rli); } } /* Assuming that all the fields have been laid out, this function uses RLI to compute the final TYPE_SIZE, TYPE_ALIGN, etc. for the type indicated by RLI. */ static void finalize_record_size (record_layout_info rli) { tree unpadded_size, unpadded_size_unit; /* Now we want just byte and bit offsets, so set the offset alignment to be a byte and then normalize. */ rli->offset_align = BITS_PER_UNIT; normalize_rli (rli); /* Determine the desired alignment. */ #ifdef ROUND_TYPE_ALIGN TYPE_ALIGN (rli->t) = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t), rli->record_align); #else TYPE_ALIGN (rli->t) = MAX (TYPE_ALIGN (rli->t), rli->record_align); #endif /* Compute the size so far. Be sure to allow for extra bits in the size in bytes. We have guaranteed above that it will be no more than a single byte. */ unpadded_size = rli_size_so_far (rli); unpadded_size_unit = rli_size_unit_so_far (rli); if (! integer_zerop (rli->bitpos)) unpadded_size_unit = size_binop (PLUS_EXPR, unpadded_size_unit, size_one_node); /* Round the size up to be a multiple of the required alignment. */ TYPE_SIZE (rli->t) = round_up (unpadded_size, TYPE_ALIGN (rli->t)); TYPE_SIZE_UNIT (rli->t) = round_up (unpadded_size_unit, TYPE_ALIGN (rli->t) / BITS_PER_UNIT); if (warn_padded && TREE_CONSTANT (unpadded_size) && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0) warning ("padding struct size to alignment boundary"); if (warn_packed && TREE_CODE (rli->t) == RECORD_TYPE && TYPE_PACKED (rli->t) && ! rli->packed_maybe_necessary && TREE_CONSTANT (unpadded_size)) { tree unpacked_size; #ifdef ROUND_TYPE_ALIGN rli->unpacked_align = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t), rli->unpacked_align); #else rli->unpacked_align = MAX (TYPE_ALIGN (rli->t), rli->unpacked_align); #endif unpacked_size = round_up (TYPE_SIZE (rli->t), rli->unpacked_align); if (simple_cst_equal (unpacked_size, TYPE_SIZE (rli->t))) { TYPE_PACKED (rli->t) = 0; if (TYPE_NAME (rli->t)) { const char *name; if (TREE_CODE (TYPE_NAME (rli->t)) == IDENTIFIER_NODE) name = IDENTIFIER_POINTER (TYPE_NAME (rli->t)); else name = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (rli->t))); if (STRICT_ALIGNMENT) warning ("packed attribute causes inefficient alignment for `%s'", name); else warning ("packed attribute is unnecessary for `%s'", name); } else { if (STRICT_ALIGNMENT) warning ("packed attribute causes inefficient alignment"); else warning ("packed attribute is unnecessary"); } } } } /* Compute the TYPE_MODE for the TYPE (which is a RECORD_TYPE). */ void compute_record_mode (tree type) { tree field; enum machine_mode mode = VOIDmode; /* Most RECORD_TYPEs have BLKmode, so we start off assuming that. However, if possible, we use a mode that fits in a register instead, in order to allow for better optimization down the line. */ TYPE_MODE (type) = BLKmode; if (! host_integerp (TYPE_SIZE (type), 1)) return; /* A record which has any BLKmode members must itself be BLKmode; it can't go in a register. Unless the member is BLKmode only because it isn't aligned. */ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) { if (TREE_CODE (field) != FIELD_DECL) continue; if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK || (TYPE_MODE (TREE_TYPE (field)) == BLKmode && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field)) && !(TYPE_SIZE (TREE_TYPE (field)) != 0 && integer_zerop (TYPE_SIZE (TREE_TYPE (field))))) || ! host_integerp (bit_position (field), 1) || DECL_SIZE (field) == 0 || ! host_integerp (DECL_SIZE (field), 1)) return; /* If this field is the whole struct, remember its mode so that, say, we can put a double in a class into a DF register instead of forcing it to live in the stack. */ if (simple_cst_equal (TYPE_SIZE (type), DECL_SIZE (field))) mode = DECL_MODE (field); #ifdef MEMBER_TYPE_FORCES_BLK /* With some targets, eg. c4x, it is sub-optimal to access an aligned BLKmode structure as a scalar. */ if (MEMBER_TYPE_FORCES_BLK (field, mode)) return; #endif /* MEMBER_TYPE_FORCES_BLK */ } /* If we only have one real field; use its mode. This only applies to RECORD_TYPE. This does not apply to unions. */ if (TREE_CODE (type) == RECORD_TYPE && mode != VOIDmode) TYPE_MODE (type) = mode; else TYPE_MODE (type) = mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1); /* If structure's known alignment is less than what the scalar mode would need, and it matters, then stick with BLKmode. */ if (TYPE_MODE (type) != BLKmode && STRICT_ALIGNMENT && ! (TYPE_ALIGN (type) >= BIGGEST_ALIGNMENT || TYPE_ALIGN (type) >= GET_MODE_ALIGNMENT (TYPE_MODE (type)))) { /* If this is the only reason this type is BLKmode, then don't force containing types to be BLKmode. */ TYPE_NO_FORCE_BLK (type) = 1; TYPE_MODE (type) = BLKmode; } } /* Compute TYPE_SIZE and TYPE_ALIGN for TYPE, once it has been laid out. */ static void finalize_type_size (tree type) { /* Normally, use the alignment corresponding to the mode chosen. However, where strict alignment is not required, avoid over-aligning structures, since most compilers do not do this alignment. */ if (TYPE_MODE (type) != BLKmode && TYPE_MODE (type) != VOIDmode && (STRICT_ALIGNMENT || (TREE_CODE (type) != RECORD_TYPE && TREE_CODE (type) != UNION_TYPE && TREE_CODE (type) != QUAL_UNION_TYPE && TREE_CODE (type) != ARRAY_TYPE))) { TYPE_ALIGN (type) = GET_MODE_ALIGNMENT (TYPE_MODE (type)); TYPE_USER_ALIGN (type) = 0; } /* Do machine-dependent extra alignment. */ #ifdef ROUND_TYPE_ALIGN TYPE_ALIGN (type) = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (type), BITS_PER_UNIT); #endif /* If we failed to find a simple way to calculate the unit size of the type, find it by division. */ if (TYPE_SIZE_UNIT (type) == 0 && TYPE_SIZE (type) != 0) /* TYPE_SIZE (type) is computed in bitsizetype. After the division, the result will fit in sizetype. We will get more efficient code using sizetype, so we force a conversion. */ TYPE_SIZE_UNIT (type) = convert (sizetype, size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type), bitsize_unit_node)); if (TYPE_SIZE (type) != 0) { TYPE_SIZE (type) = round_up (TYPE_SIZE (type), TYPE_ALIGN (type)); TYPE_SIZE_UNIT (type) = round_up (TYPE_SIZE_UNIT (type), TYPE_ALIGN (type) / BITS_PER_UNIT); } /* Evaluate nonconstant sizes only once, either now or as soon as safe. */ if (TYPE_SIZE (type) != 0 && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST) TYPE_SIZE (type) = variable_size (TYPE_SIZE (type)); if (TYPE_SIZE_UNIT (type) != 0 && TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST) TYPE_SIZE_UNIT (type) = variable_size (TYPE_SIZE_UNIT (type)); /* Also layout any other variants of the type. */ if (TYPE_NEXT_VARIANT (type) || type != TYPE_MAIN_VARIANT (type)) { tree variant; /* Record layout info of this variant. */ tree size = TYPE_SIZE (type); tree size_unit = TYPE_SIZE_UNIT (type); unsigned int align = TYPE_ALIGN (type); unsigned int user_align = TYPE_USER_ALIGN (type); enum machine_mode mode = TYPE_MODE (type); /* Copy it into all variants. */ for (variant = TYPE_MAIN_VARIANT (type); variant != 0; variant = TYPE_NEXT_VARIANT (variant)) { TYPE_SIZE (variant) = size; TYPE_SIZE_UNIT (variant) = size_unit; TYPE_ALIGN (variant) = align; TYPE_USER_ALIGN (variant) = user_align; TYPE_MODE (variant) = mode; } } } /* Do all of the work required to layout the type indicated by RLI, once the fields have been laid out. This function will call `free' for RLI, unless FREE_P is false. Passing a value other than false for FREE_P is bad practice; this option only exists to support the G++ 3.2 ABI. */ void finish_record_layout (record_layout_info rli, int free_p) { /* Compute the final size. */ finalize_record_size (rli); /* Compute the TYPE_MODE for the record. */ compute_record_mode (rli->t); /* Perform any last tweaks to the TYPE_SIZE, etc. */ finalize_type_size (rli->t); /* Lay out any static members. This is done now because their type may use the record's type. */ while (rli->pending_statics) { layout_decl (TREE_VALUE (rli->pending_statics), 0); rli->pending_statics = TREE_CHAIN (rli->pending_statics); } /* Clean up. */ if (free_p) free (rli); } /* Finish processing a builtin RECORD_TYPE type TYPE. It's name is NAME, its fields are chained in reverse on FIELDS. If ALIGN_TYPE is non-null, it is given the same alignment as ALIGN_TYPE. */ void finish_builtin_struct (tree type, const char *name, tree fields, tree align_type) { tree tail, next; for (tail = NULL_TREE; fields; tail = fields, fields = next) { DECL_FIELD_CONTEXT (fields) = type; next = TREE_CHAIN (fields); TREE_CHAIN (fields) = tail; } TYPE_FIELDS (type) = tail; if (align_type) { TYPE_ALIGN (type) = TYPE_ALIGN (align_type); TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (align_type); } layout_type (type); #if 0 /* not yet, should get fixed properly later */ TYPE_NAME (type) = make_type_decl (get_identifier (name), type); #else TYPE_NAME (type) = build_decl (TYPE_DECL, get_identifier (name), type); #endif TYPE_STUB_DECL (type) = TYPE_NAME (type); layout_decl (TYPE_NAME (type), 0); } /* Calculate the mode, size, and alignment for TYPE. For an array type, calculate the element separation as well. Record TYPE on the chain of permanent or temporary types so that dbxout will find out about it. TYPE_SIZE of a type is nonzero if the type has been laid out already. layout_type does nothing on such a type. If the type is incomplete, its TYPE_SIZE remains zero. */ void layout_type (tree type) { if (type == 0) abort (); if (type == error_mark_node) return; /* Do nothing if type has been laid out before. */ if (TYPE_SIZE (type)) return; switch (TREE_CODE (type)) { case LANG_TYPE: /* This kind of type is the responsibility of the language-specific code. */ abort (); case BOOLEAN_TYPE: /* Used for Java, Pascal, and Chill. */ if (TYPE_PRECISION (type) == 0) TYPE_PRECISION (type) = 1; /* default to one byte/boolean. */ /* ... fall through ... */ case INTEGER_TYPE: case ENUMERAL_TYPE: case CHAR_TYPE: if (TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST && tree_int_cst_sgn (TYPE_MIN_VALUE (type)) >= 0) TYPE_UNSIGNED (type) = 1; TYPE_MODE (type) = smallest_mode_for_size (TYPE_PRECISION (type), MODE_INT); TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type))); TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type))); break; case REAL_TYPE: TYPE_MODE (type) = mode_for_size (TYPE_PRECISION (type), MODE_FLOAT, 0); TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type))); TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type))); break; case COMPLEX_TYPE: TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type)); TYPE_MODE (type) = mode_for_size (2 * TYPE_PRECISION (TREE_TYPE (type)), (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE ? MODE_COMPLEX_FLOAT : MODE_COMPLEX_INT), 0); TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type))); TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type))); break; case VECTOR_TYPE: TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type)); TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type))); TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type))); break; case VOID_TYPE: /* This is an incomplete type and so doesn't have a size. */ TYPE_ALIGN (type) = 1; TYPE_USER_ALIGN (type) = 0; TYPE_MODE (type) = VOIDmode; break; case OFFSET_TYPE: TYPE_SIZE (type) = bitsize_int (POINTER_SIZE); TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE / BITS_PER_UNIT); /* A pointer might be MODE_PARTIAL_INT, but ptrdiff_t must be integral. */ TYPE_MODE (type) = mode_for_size (POINTER_SIZE, MODE_INT, 0); break; case FUNCTION_TYPE: case METHOD_TYPE: /* It's hard to see what the mode and size of a function ought to be, but we do know the alignment is FUNCTION_BOUNDARY, so make it consistent with that. */ TYPE_MODE (type) = mode_for_size (FUNCTION_BOUNDARY, MODE_INT, 0); TYPE_SIZE (type) = bitsize_int (FUNCTION_BOUNDARY); TYPE_SIZE_UNIT (type) = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT); break; case POINTER_TYPE: case REFERENCE_TYPE: { enum machine_mode mode = ((TREE_CODE (type) == REFERENCE_TYPE && reference_types_internal) ? Pmode : TYPE_MODE (type)); int nbits = GET_MODE_BITSIZE (mode); TYPE_SIZE (type) = bitsize_int (nbits); TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode)); TYPE_UNSIGNED (type) = 1; TYPE_PRECISION (type) = nbits; } break; case ARRAY_TYPE: { tree index = TYPE_DOMAIN (type); tree element = TREE_TYPE (type); build_pointer_type (element); /* We need to know both bounds in order to compute the size. */ if (index && TYPE_MAX_VALUE (index) && TYPE_MIN_VALUE (index) && TYPE_SIZE (element)) { tree ub = TYPE_MAX_VALUE (index); tree lb = TYPE_MIN_VALUE (index); tree length; tree element_size; /* The initial subtraction should happen in the original type so that (possible) negative values are handled appropriately. */ length = size_binop (PLUS_EXPR, size_one_node, convert (sizetype, fold (build (MINUS_EXPR, TREE_TYPE (lb), ub, lb)))); /* Special handling for arrays of bits (for Chill). */ element_size = TYPE_SIZE (element); if (TYPE_PACKED (type) && INTEGRAL_TYPE_P (element) && (integer_zerop (TYPE_MAX_VALUE (element)) || integer_onep (TYPE_MAX_VALUE (element))) && host_integerp (TYPE_MIN_VALUE (element), 1)) { HOST_WIDE_INT maxvalue = tree_low_cst (TYPE_MAX_VALUE (element), 1); HOST_WIDE_INT minvalue = tree_low_cst (TYPE_MIN_VALUE (element), 1); if (maxvalue - minvalue == 1 && (maxvalue == 1 || maxvalue == 0)) element_size = integer_one_node; } /* If neither bound is a constant and sizetype is signed, make sure the size is never negative. We should really do this if *either* bound is non-constant, but this is the best compromise between C and Ada. */ if (!TYPE_UNSIGNED (sizetype) && TREE_CODE (TYPE_MIN_VALUE (index)) != INTEGER_CST && TREE_CODE (TYPE_MAX_VALUE (index)) != INTEGER_CST) length = size_binop (MAX_EXPR, length, size_zero_node); TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size, convert (bitsizetype, length)); /* If we know the size of the element, calculate the total size directly, rather than do some division thing below. This optimization helps Fortran assumed-size arrays (where the size of the array is determined at runtime) substantially. Note that we can't do this in the case where the size of the elements is one bit since TYPE_SIZE_UNIT cannot be set correctly in that case. */ if (TYPE_SIZE_UNIT (element) != 0 && ! integer_onep (element_size)) TYPE_SIZE_UNIT (type) = size_binop (MULT_EXPR, TYPE_SIZE_UNIT (element), length); } /* Now round the alignment and size, using machine-dependent criteria if any. */ #ifdef ROUND_TYPE_ALIGN TYPE_ALIGN (type) = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (element), BITS_PER_UNIT); #else TYPE_ALIGN (type) = MAX (TYPE_ALIGN (element), BITS_PER_UNIT); #endif TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (element); TYPE_MODE (type) = BLKmode; if (TYPE_SIZE (type) != 0 #ifdef MEMBER_TYPE_FORCES_BLK && ! MEMBER_TYPE_FORCES_BLK (type, VOIDmode) #endif /* BLKmode elements force BLKmode aggregate; else extract/store fields may lose. */ && (TYPE_MODE (TREE_TYPE (type)) != BLKmode || TYPE_NO_FORCE_BLK (TREE_TYPE (type)))) { /* One-element arrays get the component type's mode. */ if (simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (TREE_TYPE (type)))) TYPE_MODE (type) = TYPE_MODE (TREE_TYPE (type)); else TYPE_MODE (type) = mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1); if (TYPE_MODE (type) != BLKmode && STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (TYPE_MODE (type)) && TYPE_MODE (type) != BLKmode) { TYPE_NO_FORCE_BLK (type) = 1; TYPE_MODE (type) = BLKmode; } } break; } case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: { tree field; record_layout_info rli; /* Initialize the layout information. */ rli = start_record_layout (type); /* If this is a QUAL_UNION_TYPE, we want to process the fields in the reverse order in building the COND_EXPR that denotes its size. We reverse them again later. */ if (TREE_CODE (type) == QUAL_UNION_TYPE) TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type)); /* Place all the fields. */ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) place_field (rli, field); if (TREE_CODE (type) == QUAL_UNION_TYPE) TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type)); if (lang_adjust_rli) (*lang_adjust_rli) (rli); /* Finish laying out the record. */ finish_record_layout (rli, /*free_p=*/true); } break; case SET_TYPE: /* Used by Chill and Pascal. */ if (TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) != INTEGER_CST || TREE_CODE (TYPE_MIN_VALUE (TYPE_DOMAIN (type))) != INTEGER_CST) abort (); else { #ifndef SET_WORD_SIZE #define SET_WORD_SIZE BITS_PER_WORD #endif unsigned int alignment = set_alignment ? set_alignment : SET_WORD_SIZE; HOST_WIDE_INT size_in_bits = (tree_low_cst (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), 0) - tree_low_cst (TYPE_MIN_VALUE (TYPE_DOMAIN (type)), 0) + 1); HOST_WIDE_INT rounded_size = ((size_in_bits + alignment - 1) / alignment) * alignment; if (rounded_size > (int) alignment) TYPE_MODE (type) = BLKmode; else TYPE_MODE (type) = mode_for_size (alignment, MODE_INT, 1); TYPE_SIZE (type) = bitsize_int (rounded_size); TYPE_SIZE_UNIT (type) = size_int (rounded_size / BITS_PER_UNIT); TYPE_ALIGN (type) = alignment; TYPE_USER_ALIGN (type) = 0; TYPE_PRECISION (type) = size_in_bits; } break; case FILE_TYPE: /* The size may vary in different languages, so the language front end should fill in the size. */ TYPE_ALIGN (type) = BIGGEST_ALIGNMENT; TYPE_USER_ALIGN (type) = 0; TYPE_MODE (type) = BLKmode; break; default: abort (); } /* Compute the final TYPE_SIZE, TYPE_ALIGN, etc. for TYPE. For records and unions, finish_record_layout already called this function. */ if (TREE_CODE (type) != RECORD_TYPE && TREE_CODE (type) != UNION_TYPE && TREE_CODE (type) != QUAL_UNION_TYPE) finalize_type_size (type); /* If this type is created before sizetype has been permanently set, record it so set_sizetype can fix it up. */ if (! sizetype_set) early_type_list = tree_cons (NULL_TREE, type, early_type_list); /* If an alias set has been set for this aggregate when it was incomplete, force it into alias set 0. This is too conservative, but we cannot call record_component_aliases here because some frontends still change the aggregates after layout_type. */ if (AGGREGATE_TYPE_P (type) && TYPE_ALIAS_SET_KNOWN_P (type)) TYPE_ALIAS_SET (type) = 0; } /* Create and return a type for signed integers of PRECISION bits. */ tree make_signed_type (int precision) { tree type = make_node (INTEGER_TYPE); TYPE_PRECISION (type) = precision; fixup_signed_type (type); return type; } /* Create and return a type for unsigned integers of PRECISION bits. */ tree make_unsigned_type (int precision) { tree type = make_node (INTEGER_TYPE); TYPE_PRECISION (type) = precision; fixup_unsigned_type (type); return type; } /* Initialize sizetype and bitsizetype to a reasonable and temporary value to enable integer types to be created. */ void initialize_sizetypes (void) { tree t = make_node (INTEGER_TYPE); /* Set this so we do something reasonable for the build_int_2 calls below. */ integer_type_node = t; TYPE_MODE (t) = SImode; TYPE_ALIGN (t) = GET_MODE_ALIGNMENT (SImode); TYPE_USER_ALIGN (t) = 0; TYPE_SIZE (t) = build_int_2 (GET_MODE_BITSIZE (SImode), 0); TYPE_SIZE_UNIT (t) = build_int_2 (GET_MODE_SIZE (SImode), 0); TYPE_UNSIGNED (t) = 1; TYPE_PRECISION (t) = GET_MODE_BITSIZE (SImode); TYPE_MIN_VALUE (t) = build_int_2 (0, 0); TYPE_IS_SIZETYPE (t) = 1; /* 1000 avoids problems with possible overflow and is certainly larger than any size value we'd want to be storing. */ TYPE_MAX_VALUE (t) = build_int_2 (1000, 0); /* These two must be different nodes because of the caching done in size_int_wide. */ sizetype = t; bitsizetype = copy_node (t); integer_type_node = 0; } /* Set sizetype to TYPE, and initialize *sizetype accordingly. Also update the type of any standard type's sizes made so far. */ void set_sizetype (tree type) { int oprecision = TYPE_PRECISION (type); /* The *bitsizetype types use a precision that avoids overflows when calculating signed sizes / offsets in bits. However, when cross-compiling from a 32 bit to a 64 bit host, we are limited to 64 bit precision. */ int precision = MIN (oprecision + BITS_PER_UNIT_LOG + 1, 2 * HOST_BITS_PER_WIDE_INT); unsigned int i; tree t; if (sizetype_set) abort (); /* Make copies of nodes since we'll be setting TYPE_IS_SIZETYPE. */ sizetype = copy_node (type); TYPE_ORIG_SIZE_TYPE (sizetype) = type; TYPE_IS_SIZETYPE (sizetype) = 1; bitsizetype = make_node (INTEGER_TYPE); TYPE_NAME (bitsizetype) = TYPE_NAME (type); TYPE_PRECISION (bitsizetype) = precision; TYPE_IS_SIZETYPE (bitsizetype) = 1; if (TYPE_UNSIGNED (type)) fixup_unsigned_type (bitsizetype); else fixup_signed_type (bitsizetype); layout_type (bitsizetype); if (TYPE_UNSIGNED (type)) { usizetype = sizetype; ubitsizetype = bitsizetype; ssizetype = copy_node (make_signed_type (oprecision)); sbitsizetype = copy_node (make_signed_type (precision)); } else { ssizetype = sizetype; sbitsizetype = bitsizetype; usizetype = copy_node (make_unsigned_type (oprecision)); ubitsizetype = copy_node (make_unsigned_type (precision)); } TYPE_NAME (bitsizetype) = get_identifier ("bit_size_type"); /* Show is a sizetype, is a main type, and has no pointers to it. */ for (i = 0; i < ARRAY_SIZE (sizetype_tab); i++) { TYPE_IS_SIZETYPE (sizetype_tab[i]) = 1; TYPE_MAIN_VARIANT (sizetype_tab[i]) = sizetype_tab[i]; TYPE_NEXT_VARIANT (sizetype_tab[i]) = 0; TYPE_POINTER_TO (sizetype_tab[i]) = 0; TYPE_REFERENCE_TO (sizetype_tab[i]) = 0; } /* Go down each of the types we already made and set the proper type for the sizes in them. */ for (t = early_type_list; t != 0; t = TREE_CHAIN (t)) { if (TREE_CODE (TREE_VALUE (t)) != INTEGER_TYPE && TREE_CODE (TREE_VALUE (t)) != BOOLEAN_TYPE) abort (); TREE_TYPE (TYPE_SIZE (TREE_VALUE (t))) = bitsizetype; TREE_TYPE (TYPE_SIZE_UNIT (TREE_VALUE (t))) = sizetype; } early_type_list = 0; sizetype_set = 1; } /* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE, BOOLEAN_TYPE, or CHAR_TYPE. Set TYPE_MIN_VALUE and TYPE_MAX_VALUE for TYPE, based on the PRECISION and whether or not the TYPE IS_UNSIGNED. PRECISION need not correspond to a width supported natively by the hardware; for example, on a machine with 8-bit, 16-bit, and 32-bit register modes, PRECISION might be 7, 23, or 61. */ void set_min_and_max_values_for_integral_type (tree type, int precision, bool is_unsigned) { tree min_value; tree max_value; if (is_unsigned) { min_value = build_int_2 (0, 0); max_value = build_int_2 (precision - HOST_BITS_PER_WIDE_INT >= 0 ? -1 : ((HOST_WIDE_INT) 1 << precision) - 1, precision - HOST_BITS_PER_WIDE_INT > 0 ? ((unsigned HOST_WIDE_INT) ~0 >> (HOST_BITS_PER_WIDE_INT - (precision - HOST_BITS_PER_WIDE_INT))) : 0); } else { min_value = build_int_2 ((precision - HOST_BITS_PER_WIDE_INT > 0 ? 0 : (HOST_WIDE_INT) (-1) << (precision - 1)), (((HOST_WIDE_INT) (-1) << (precision - HOST_BITS_PER_WIDE_INT - 1 > 0 ? precision - HOST_BITS_PER_WIDE_INT - 1 : 0)))); max_value = build_int_2 ((precision - HOST_BITS_PER_WIDE_INT > 0 ? -1 : ((HOST_WIDE_INT) 1 << (precision - 1)) - 1), (precision - HOST_BITS_PER_WIDE_INT - 1 > 0 ? (((HOST_WIDE_INT) 1 << (precision - HOST_BITS_PER_WIDE_INT - 1))) - 1 : 0)); } TREE_TYPE (min_value) = type; TREE_TYPE (max_value) = type; TYPE_MIN_VALUE (type) = min_value; TYPE_MAX_VALUE (type) = max_value; } /* Set the extreme values of TYPE based on its precision in bits, then lay it out. Used when make_signed_type won't do because the tree code is not INTEGER_TYPE. E.g. for Pascal, when the -fsigned-char option is given. */ void fixup_signed_type (tree type) { int precision = TYPE_PRECISION (type); /* We can not represent properly constants greater then 2 * HOST_BITS_PER_WIDE_INT, still we need the types as they are used by i386 vector extensions and friends. */ if (precision > HOST_BITS_PER_WIDE_INT * 2) precision = HOST_BITS_PER_WIDE_INT * 2; set_min_and_max_values_for_integral_type (type, precision, /*is_unsigned=*/false); /* Lay out the type: set its alignment, size, etc. */ layout_type (type); } /* Set the extreme values of TYPE based on its precision in bits, then lay it out. This is used both in `make_unsigned_type' and for enumeral types. */ void fixup_unsigned_type (tree type) { int precision = TYPE_PRECISION (type); /* We can not represent properly constants greater then 2 * HOST_BITS_PER_WIDE_INT, still we need the types as they are used by i386 vector extensions and friends. */ if (precision > HOST_BITS_PER_WIDE_INT * 2) precision = HOST_BITS_PER_WIDE_INT * 2; set_min_and_max_values_for_integral_type (type, precision, /*is_unsigned=*/true); /* Lay out the type: set its alignment, size, etc. */ layout_type (type); } /* Find the best machine mode to use when referencing a bit field of length BITSIZE bits starting at BITPOS. The underlying object is known to be aligned to a boundary of ALIGN bits. If LARGEST_MODE is not VOIDmode, it means that we should not use a mode larger than LARGEST_MODE (usually SImode). If no mode meets all these conditions, we return VOIDmode. Otherwise, if VOLATILEP is true or SLOW_BYTE_ACCESS is false, we return the smallest mode meeting these conditions. Otherwise (VOLATILEP is false and SLOW_BYTE_ACCESS is true), we return the largest mode (but a mode no wider than UNITS_PER_WORD) that meets all the conditions. */ enum machine_mode get_best_mode (int bitsize, int bitpos, unsigned int align, enum machine_mode largest_mode, int volatilep) { enum machine_mode mode; unsigned int unit = 0; /* Find the narrowest integer mode that contains the bit field. */ for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode)) { unit = GET_MODE_BITSIZE (mode); if ((bitpos % unit) + bitsize <= unit) break; } if (mode == VOIDmode /* It is tempting to omit the following line if STRICT_ALIGNMENT is true. But that is incorrect, since if the bitfield uses part of 3 bytes and we use a 4-byte mode, we could get a spurious segv if the extra 4th byte is past the end of memory. (Though at least one Unix compiler ignores this problem: that on the Sequent 386 machine. */ || MIN (unit, BIGGEST_ALIGNMENT) > align || (largest_mode != VOIDmode && unit > GET_MODE_BITSIZE (largest_mode))) return VOIDmode; if (SLOW_BYTE_ACCESS && ! volatilep) { enum machine_mode wide_mode = VOIDmode, tmode; for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT); tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode)) { unit = GET_MODE_BITSIZE (tmode); if (bitpos / unit == (bitpos + bitsize - 1) / unit && unit <= BITS_PER_WORD && unit <= MIN (align, BIGGEST_ALIGNMENT) && (largest_mode == VOIDmode || unit <= GET_MODE_BITSIZE (largest_mode))) wide_mode = tmode; } if (wide_mode != VOIDmode) return wide_mode; } return mode; } /* Gets minimal and maximal values for MODE (signed or unsigned depending on SIGN). The returned constants are made to be usable in TARGET_MODE. */ void get_mode_bounds (enum machine_mode mode, int sign, enum machine_mode target_mode, rtx *mmin, rtx *mmax) { unsigned size = GET_MODE_BITSIZE (mode); unsigned HOST_WIDE_INT min_val, max_val; if (size > HOST_BITS_PER_WIDE_INT) abort (); if (sign) { min_val = -((unsigned HOST_WIDE_INT) 1 << (size - 1)); max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1)) - 1; } else { min_val = 0; max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1) << 1) - 1; } *mmin = GEN_INT (trunc_int_for_mode (min_val, target_mode)); *mmax = GEN_INT (trunc_int_for_mode (max_val, target_mode)); } /* Type information for stor-layout.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_stor_layout_h[] = { { &pending_sizes, 1, sizeof (pending_sizes), >_ggc_mx_tree_node, >_pch_nx_tree_node }, LAST_GGC_ROOT_TAB }; /* String pool for GCC. Copyright (C) 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* String text, identifier text and identifier node allocator. Strings allocated by ggc_alloc_string are stored in an obstack which is never shrunk. Identifiers are uniquely stored in a hash table. We use cpplib's hash table implementation. libiberty's hashtab.c is not used because it requires 100% average space overhead per string, which is unacceptable. Also, this algorithm is faster. */ /* The "" allocated string. */ const char empty_string[] = ""; /* Character strings, each containing a single decimal digit. Written this way to save space. */ const char digit_vector[] = { '0', 0, '1', 0, '2', 0, '3', 0, '4', 0, '5', 0, '6', 0, '7', 0, '8', 0, '9', 0 }; struct ht *ident_hash; static struct obstack string_stack; static hashnode alloc_string_node (hash_table *); static int mark_ident (struct cpp_reader *, hashnode, const void *); static void * stringpool_ggc_alloc (size_t x) { return ggc_alloc (x); } /* Initialize the string pool. */ void init_stringpool (void) { /* Create with 16K (2^14) entries. */ ident_hash = ht_create (14); ident_hash->alloc_node = alloc_string_node; ident_hash->alloc_subobject = stringpool_ggc_alloc; gcc_obstack_init (&string_stack); } /* Allocate a hash node. */ static hashnode alloc_string_node (hash_table *table ATTRIBUTE_UNUSED) { return GCC_IDENT_TO_HT_IDENT (make_node (IDENTIFIER_NODE)); } /* Allocate and return a string constant of length LENGTH, containing CONTENTS. If LENGTH is -1, CONTENTS is assumed to be a nul-terminated string, and the length is calculated using strlen. If the same string constant has been allocated before, that copy is returned this time too. */ const char * ggc_alloc_string (const char *contents, int length) { if (length == -1) length = strlen (contents); if (length == 0) return empty_string; if (length == 1 && ISDIGIT (contents[0])) return digit_string (contents[0] - '0'); obstack_grow0 (&string_stack, contents, length); return obstack_finish (&string_stack); } /* Return an IDENTIFIER_NODE whose name is TEXT (a null-terminated string). If an identifier with that name has previously been referred to, the same node is returned this time. */ #undef get_identifier tree get_identifier (const char *text) { hashnode ht_node = ht_lookup (ident_hash, (const unsigned char *) text, strlen (text), HT_ALLOC); /* ht_node can't be NULL here. */ return HT_IDENT_TO_GCC_IDENT (ht_node); } /* Identical to get_identifier, except that the length is assumed known. */ tree get_identifier_with_length (const char *text, size_t length) { hashnode ht_node = ht_lookup (ident_hash, (const unsigned char *) text, length, HT_ALLOC); /* ht_node can't be NULL here. */ return HT_IDENT_TO_GCC_IDENT (ht_node); } /* If an identifier with the name TEXT (a null-terminated string) has previously been referred to, return that node; otherwise return NULL_TREE. */ tree maybe_get_identifier (const char *text) { hashnode ht_node; ht_node = ht_lookup (ident_hash, (const unsigned char *) text, strlen (text), HT_NO_INSERT); if (ht_node) return HT_IDENT_TO_GCC_IDENT (ht_node); return NULL_TREE; } /* Report some basic statistics about the string pool. */ void stringpool_statistics (void) { ht_dump_statistics (ident_hash); } /* Mark an identifier for GC. */ static int mark_ident (struct cpp_reader *pfile ATTRIBUTE_UNUSED, hashnode h, const void *v ATTRIBUTE_UNUSED) { gt_ggc_m_9tree_node (HT_IDENT_TO_GCC_IDENT (h)); return 1; } /* Mark the trees hanging off the identifier node for GGC. These are handled specially (not using gengtype) because of the special treatment for strings. */ void ggc_mark_stringpool (void) { ht_forall (ident_hash, mark_ident, NULL); } /* Strings are _not_ GCed, but this routine exists so that a separate roots table isn't needed for the few global variables that refer to strings. */ void gt_ggc_m_S (void *x ATTRIBUTE_UNUSED) { } /* Pointer-walking routine for strings (not very interesting, since strings don't contain pointers). */ void gt_pch_p_S (void *obj ATTRIBUTE_UNUSED, void *x ATTRIBUTE_UNUSED, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { } /* PCH pointer-walking routine for strings. */ void gt_pch_n_S (const void *x) { gt_pch_note_object ((void *)x, (void *)x, >_pch_p_S); } /* Handle saving and restoring the string pool for PCH. */ /* SPD is saved in the PCH file and holds the information needed to restore the string pool. */ struct string_pool_data GTY(()) { struct ht_identifier * * GTY((length ("%h.nslots"), nested_ptr (union tree_node, "%h ? GCC_IDENT_TO_HT_IDENT (%h) : NULL", "%h ? HT_IDENT_TO_GCC_IDENT (%h) : NULL"))) entries; unsigned int nslots; unsigned int nelements; }; static GTY(()) struct string_pool_data * spd; /* Save the stringpool data in SPD. */ void gt_pch_save_stringpool (void) { spd = ggc_alloc (sizeof (*spd)); spd->nslots = ident_hash->nslots; spd->nelements = ident_hash->nelements; spd->entries = ggc_alloc (sizeof (spd->entries[0]) * spd->nslots); memcpy (spd->entries, ident_hash->entries, spd->nslots * sizeof (spd->entries[0])); } /* Return the stringpool to its state before gt_pch_save_stringpool was called. */ void gt_pch_fixup_stringpool (void) { } /* A PCH file has been restored, which loaded SPD; fill the real hash table from SPD. */ void gt_pch_restore_stringpool (void) { ht_load (ident_hash, spd->entries, spd->nslots, spd->nelements, false); spd = NULL; } /* Type information for stringpool.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ void gt_ggc_mx_string_pool_data (void *x_p) { struct string_pool_data * const x = (struct string_pool_data *)x_p; if (ggc_test_and_set_mark (x)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).nslots); i0++) { { union tree_node * const x1 = ((*x).entries[i0]) ? HT_IDENT_TO_GCC_IDENT (((*x).entries[i0])) : NULL; gt_ggc_m_9tree_node (x1); } } ggc_mark ((*x).entries); } } } void gt_pch_nx_string_pool_data (void *x_p) { struct string_pool_data * const x = (struct string_pool_data *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_16string_pool_data)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).nslots); i0++) { { union tree_node * const x1 = ((*x).entries[i0]) ? HT_IDENT_TO_GCC_IDENT (((*x).entries[i0])) : NULL; gt_pch_n_9tree_node (x1); } } gt_pch_note_object ((*x).entries, x, gt_pch_p_16string_pool_data); } } } void gt_pch_p_16string_pool_data (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct string_pool_data * const x ATTRIBUTE_UNUSED = (struct string_pool_data *)x_p; if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).nslots); i0++) { { union tree_node * x1 = ((*x).entries[i0]) ? HT_IDENT_TO_GCC_IDENT (((*x).entries[i0])) : NULL; if ((void *)((*x).entries) == this_obj) op (&(x1), cookie); (*x).entries[i0] = (x1) ? GCC_IDENT_TO_HT_IDENT ((x1)) : NULL; } } if ((void *)(x) == this_obj) op (&((*x).entries), cookie); } } /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_stringpool_h[] = { { &spd, 1, sizeof (spd), >_ggc_mx_string_pool_data, >_pch_nx_string_pool_data }, LAST_GGC_ROOT_TAB }; /* Default target hook functions. Copyright (C) 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* The migration of target macros to target hooks works as follows: 1. Create a target hook that uses the existing target macros to implement the same functionality. 2. Convert all the MI files to use the hook instead of the macro. 3. Repeat for a majority of the remaining target macros. This will take some time. 4. Tell target maintainers to start migrating. 5. Eventually convert the backends to override the hook instead of defining the macros. This will take some time too. 6. TBD when, poison the macros. Unmigrated targets will break at this point. Note that we expect steps 1-3 to be done by the people that understand what the MI does with each macro, and step 5 to be done by the target maintainers for their respective targets. Note that steps 1 and 2 don't have to be done together, but no target can override the new hook until step 2 is complete for it. Once the macros are poisoned, we will revert to the old migration rules - migrate the macro, callers, and targets all at once. This comment can thus be removed at that point. */ /* Default initializers for a generic GCC target. Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. In other words, you are welcome to use, share and improve this program. You are forbidden to forbid anyone else to use, share and improve what you give them. Help stamp out software-hoarding! */ /* See target.h for a description of what this file contains and how to use it. We want to have non-NULL default definitions of all hook functions, even if they do nothing. */ /* Note that if one of these macros must be defined in an OS .h file rather than the .c file, then we need to wrap the default definition in a #ifndef, since files include tm.h before this one. */ /* Assembler output. */ #define TARGET_ASM_OPEN_PAREN "(" #define TARGET_ASM_CLOSE_PAREN ")" #define TARGET_ASM_BYTE_OP "\t.byte\t" #define TARGET_ASM_ALIGNED_HI_OP "\t.short\t" #define TARGET_ASM_ALIGNED_SI_OP "\t.long\t" #define TARGET_ASM_ALIGNED_DI_OP NULL #define TARGET_ASM_ALIGNED_TI_OP NULL /* GAS and SYSV4 assemblers accept these. */ #if defined (OBJECT_FORMAT_ELF) #define TARGET_ASM_UNALIGNED_HI_OP "\t.2byte\t" #define TARGET_ASM_UNALIGNED_SI_OP "\t.4byte\t" #define TARGET_ASM_UNALIGNED_DI_OP "\t.8byte\t" #define TARGET_ASM_UNALIGNED_TI_OP NULL #else #define TARGET_ASM_UNALIGNED_HI_OP NULL #define TARGET_ASM_UNALIGNED_SI_OP NULL #define TARGET_ASM_UNALIGNED_DI_OP NULL #define TARGET_ASM_UNALIGNED_TI_OP NULL #endif /* OBJECT_FORMAT_ELF */ #define TARGET_ASM_INTEGER default_assemble_integer #ifndef TARGET_ASM_GLOBALIZE_LABEL #define TARGET_ASM_GLOBALIZE_LABEL default_globalize_label #endif #ifndef TARGET_ASM_EMIT_UNWIND_LABEL #define TARGET_ASM_EMIT_UNWIND_LABEL default_emit_unwind_label #endif #ifndef TARGET_ASM_INTERNAL_LABEL #define TARGET_ASM_INTERNAL_LABEL default_internal_label #endif #ifndef TARGET_ASM_ASSEMBLE_VISIBILITY #define TARGET_ASM_ASSEMBLE_VISIBILITY default_assemble_visibility #endif #define TARGET_ASM_FUNCTION_PROLOGUE default_function_pro_epilogue #define TARGET_ASM_FUNCTION_EPILOGUE default_function_pro_epilogue #define TARGET_ASM_FUNCTION_END_PROLOGUE no_asm_to_stream #define TARGET_ASM_FUNCTION_BEGIN_EPILOGUE no_asm_to_stream #ifndef TARGET_ASM_SELECT_SECTION #define TARGET_ASM_SELECT_SECTION default_select_section #endif #ifndef TARGET_ASM_UNIQUE_SECTION #define TARGET_ASM_UNIQUE_SECTION default_unique_section #endif #ifndef TARGET_ASM_SELECT_RTX_SECTION #define TARGET_ASM_SELECT_RTX_SECTION default_select_rtx_section #endif #if !defined(TARGET_ASM_CONSTRUCTOR) && !defined(USE_COLLECT2) # ifdef CTORS_SECTION_ASM_OP # define TARGET_ASM_CONSTRUCTOR default_ctor_section_asm_out_constructor # else # ifdef TARGET_ASM_NAMED_SECTION # define TARGET_ASM_CONSTRUCTOR default_named_section_asm_out_constructor # else # define TARGET_ASM_CONSTRUCTOR default_stabs_asm_out_constructor # endif # endif #endif #if !defined(TARGET_ASM_DESTRUCTOR) && !defined(USE_COLLECT2) # ifdef DTORS_SECTION_ASM_OP # define TARGET_ASM_DESTRUCTOR default_dtor_section_asm_out_destructor # else # ifdef TARGET_ASM_NAMED_SECTION # define TARGET_ASM_DESTRUCTOR default_named_section_asm_out_destructor # else # define TARGET_ASM_DESTRUCTOR default_stabs_asm_out_destructor # endif # endif #endif #define TARGET_ASM_OUTPUT_MI_THUNK NULL #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_false #if defined(TARGET_ASM_CONSTRUCTOR) && defined(TARGET_ASM_DESTRUCTOR) #define TARGET_HAVE_CTORS_DTORS true #else #define TARGET_HAVE_CTORS_DTORS false #define TARGET_ASM_CONSTRUCTOR NULL #define TARGET_ASM_DESTRUCTOR NULL #endif #ifdef TARGET_ASM_NAMED_SECTION #define TARGET_HAVE_NAMED_SECTIONS true #else #define TARGET_ASM_NAMED_SECTION default_no_named_section #define TARGET_HAVE_NAMED_SECTIONS false #endif #ifndef TARGET_HAVE_TLS #define TARGET_HAVE_TLS false #endif #ifndef TARGET_HAVE_SRODATA_SECTION #define TARGET_HAVE_SRODATA_SECTION false #endif #ifndef TARGET_TERMINATE_DW2_EH_FRAME_INFO #ifdef EH_FRAME_SECTION_NAME #define TARGET_TERMINATE_DW2_EH_FRAME_INFO false #else #define TARGET_TERMINATE_DW2_EH_FRAME_INFO true #endif #endif #define TARGET_DWARF_REGISTER_SPAN hook_rtx_rtx_null #ifndef TARGET_ASM_EXCEPTION_SECTION #define TARGET_ASM_EXCEPTION_SECTION default_exception_section #endif #ifndef TARGET_ASM_EH_FRAME_SECTION #define TARGET_ASM_EH_FRAME_SECTION default_eh_frame_section #endif #ifndef TARGET_ASM_FILE_START #define TARGET_ASM_FILE_START default_file_start #endif #ifndef TARGET_ASM_FILE_END #define TARGET_ASM_FILE_END hook_void_void #endif #ifndef TARGET_ASM_FILE_START_APP_OFF #define TARGET_ASM_FILE_START_APP_OFF false #endif #ifndef TARGET_ASM_FILE_START_FILE_DIRECTIVE #define TARGET_ASM_FILE_START_FILE_DIRECTIVE false #endif #ifndef TARGET_ASM_EXTERNAL_LIBCALL #define TARGET_ASM_EXTERNAL_LIBCALL default_external_libcall #endif #define TARGET_ASM_ALIGNED_INT_OP \ {TARGET_ASM_ALIGNED_HI_OP, \ TARGET_ASM_ALIGNED_SI_OP, \ TARGET_ASM_ALIGNED_DI_OP, \ TARGET_ASM_ALIGNED_TI_OP} #define TARGET_ASM_UNALIGNED_INT_OP \ {TARGET_ASM_UNALIGNED_HI_OP, \ TARGET_ASM_UNALIGNED_SI_OP, \ TARGET_ASM_UNALIGNED_DI_OP, \ TARGET_ASM_UNALIGNED_TI_OP} #define TARGET_ASM_OUT {TARGET_ASM_OPEN_PAREN, \ TARGET_ASM_CLOSE_PAREN, \ TARGET_ASM_BYTE_OP, \ TARGET_ASM_ALIGNED_INT_OP, \ TARGET_ASM_UNALIGNED_INT_OP, \ TARGET_ASM_INTEGER, \ TARGET_ASM_GLOBALIZE_LABEL, \ TARGET_ASM_EMIT_UNWIND_LABEL, \ TARGET_ASM_INTERNAL_LABEL, \ TARGET_ASM_ASSEMBLE_VISIBILITY, \ TARGET_ASM_FUNCTION_PROLOGUE, \ TARGET_ASM_FUNCTION_END_PROLOGUE, \ TARGET_ASM_FUNCTION_BEGIN_EPILOGUE, \ TARGET_ASM_FUNCTION_EPILOGUE, \ TARGET_ASM_NAMED_SECTION, \ TARGET_ASM_EXCEPTION_SECTION, \ TARGET_ASM_EH_FRAME_SECTION, \ TARGET_ASM_SELECT_SECTION, \ TARGET_ASM_SELECT_RTX_SECTION, \ TARGET_ASM_UNIQUE_SECTION, \ TARGET_ASM_CONSTRUCTOR, \ TARGET_ASM_DESTRUCTOR, \ TARGET_ASM_OUTPUT_MI_THUNK, \ TARGET_ASM_CAN_OUTPUT_MI_THUNK, \ TARGET_ASM_FILE_START, \ TARGET_ASM_FILE_END, \ TARGET_ASM_EXTERNAL_LIBCALL} /* Scheduler hooks. All of these default to null pointers, which haifa-sched.c looks for and handles. */ #define TARGET_SCHED_ADJUST_COST 0 #define TARGET_SCHED_ADJUST_PRIORITY 0 #define TARGET_SCHED_ISSUE_RATE 0 #define TARGET_SCHED_VARIABLE_ISSUE 0 #define TARGET_SCHED_INIT 0 #define TARGET_SCHED_FINISH 0 #define TARGET_SCHED_INIT_GLOBAL 0 #define TARGET_SCHED_FINISH_GLOBAL 0 #define TARGET_SCHED_REORDER 0 #define TARGET_SCHED_REORDER2 0 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK 0 #define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE 0 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN 0 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN 0 #define TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN 0 #define TARGET_SCHED_DFA_POST_CYCLE_INSN 0 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD 0 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD 0 #define TARGET_SCHED_DFA_NEW_CYCLE 0 #define TARGET_SCHED_INIT_DFA_BUBBLES 0 #define TARGET_SCHED_DFA_BUBBLE 0 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE 0 #define TARGET_SCHED \ {TARGET_SCHED_ADJUST_COST, \ TARGET_SCHED_ADJUST_PRIORITY, \ TARGET_SCHED_ISSUE_RATE, \ TARGET_SCHED_VARIABLE_ISSUE, \ TARGET_SCHED_INIT, \ TARGET_SCHED_FINISH, \ TARGET_SCHED_INIT_GLOBAL, \ TARGET_SCHED_FINISH_GLOBAL, \ TARGET_SCHED_REORDER, \ TARGET_SCHED_REORDER2, \ TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK, \ TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE, \ TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN, \ TARGET_SCHED_DFA_PRE_CYCLE_INSN, \ TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN, \ TARGET_SCHED_DFA_POST_CYCLE_INSN, \ TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD, \ TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD, \ TARGET_SCHED_DFA_NEW_CYCLE, \ TARGET_SCHED_INIT_DFA_BUBBLES, \ TARGET_SCHED_DFA_BUBBLE, \ TARGET_SCHED_IS_COSTLY_DEPENDENCE} /* In tree.c. */ #define TARGET_MERGE_DECL_ATTRIBUTES merge_decl_attributes #define TARGET_MERGE_TYPE_ATTRIBUTES merge_type_attributes #define TARGET_ATTRIBUTE_TABLE NULL /* In cse.c. */ #define TARGET_ADDRESS_COST default_address_cost /* In builtins.c. */ #define TARGET_INIT_BUILTINS hook_void_void #define TARGET_EXPAND_BUILTIN default_expand_builtin /* In varasm.c. */ #ifndef TARGET_SECTION_TYPE_FLAGS #define TARGET_SECTION_TYPE_FLAGS default_section_type_flags #endif #ifndef TARGET_STRIP_NAME_ENCODING #define TARGET_STRIP_NAME_ENCODING default_strip_name_encoding #endif #ifndef TARGET_BINDS_LOCAL_P #define TARGET_BINDS_LOCAL_P default_binds_local_p #endif #ifndef TARGET_VALID_POINTER_MODE #define TARGET_VALID_POINTER_MODE default_valid_pointer_mode #endif #ifndef TARGET_VECTOR_OPAQUE_P #define TARGET_VECTOR_OPAQUE_P hook_bool_tree_false #endif /* In hook.c. */ #define TARGET_CANNOT_MODIFY_JUMPS_P hook_bool_void_false #define TARGET_BRANCH_TARGET_REGISTER_CLASS hook_int_void_no_regs #define TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED hook_bool_bool_false #define TARGET_CANNOT_FORCE_CONST_MEM hook_bool_rtx_false #define TARGET_CANNOT_COPY_INSN_P NULL #define TARGET_DELEGITIMIZE_ADDRESS hook_rtx_rtx_identity #define TARGET_FUNCTION_OK_FOR_SIBCALL hook_bool_tree_tree_false #define TARGET_COMP_TYPE_ATTRIBUTES hook_int_tree_tree_1 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES hook_void_tree #define TARGET_INSERT_ATTRIBUTES hook_void_tree_treeptr #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_tree_false #define TARGET_MS_BITFIELD_LAYOUT_P hook_bool_tree_false #define TARGET_ALIGN_ANON_BITFIELD hook_bool_void_false #define TARGET_RTX_COSTS hook_bool_rtx_int_int_intp_false #define TARGET_MANGLE_FUNDAMENTAL_TYPE hook_constcharptr_tree_null #ifndef TARGET_INIT_LIBFUNCS #define TARGET_INIT_LIBFUNCS hook_void_void #endif #ifndef TARGET_IN_SMALL_DATA_P #define TARGET_IN_SMALL_DATA_P hook_bool_tree_false #endif #ifndef TARGET_ENCODE_SECTION_INFO #define TARGET_ENCODE_SECTION_INFO default_encode_section_info #endif #define TARGET_FIXED_CONDITION_CODE_REGS hook_bool_uintp_uintp_false #define TARGET_CC_MODES_COMPATIBLE default_cc_modes_compatible #define TARGET_MACHINE_DEPENDENT_REORG 0 #define TARGET_BUILD_BUILTIN_VA_LIST std_build_builtin_va_list #define TARGET_GET_PCH_VALIDITY default_get_pch_validity #define TARGET_PCH_VALID_P default_pch_valid_p #define TARGET_DEFAULT_SHORT_ENUMS hook_bool_void_false #define TARGET_BUILTIN_SETJMP_FRAME_VALUE default_builtin_setjmp_frame_value #define TARGET_MD_ASM_CLOBBERS hook_tree_tree_identity #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_false #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_false #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_false #define TARGET_STRUCT_VALUE_RTX hook_rtx_tree_int_null #define TARGET_RETURN_IN_MEMORY default_return_in_memory #define TARGET_RETURN_IN_MSB hook_bool_tree_false #define TARGET_EXPAND_BUILTIN_SAVEREGS default_expand_builtin_saveregs #define TARGET_SETUP_INCOMING_VARARGS default_setup_incoming_varargs #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_false #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED \ default_pretend_outgoing_varargs_named #define TARGET_SPLIT_COMPLEX_ARG NULL #ifdef EXPAND_BUILTIN_VA_ARG /* If there's a target-specific va_arg expander, there needs to be a target-specific gimplifier. */ #define TARGET_GIMPLIFY_VA_ARG_EXPR NULL #else #define TARGET_GIMPLIFY_VA_ARG_EXPR std_gimplify_va_arg_expr #endif #define TARGET_LATE_RTL_PROLOGUE_EPILOGUE false #define TARGET_CALLS { \ TARGET_PROMOTE_FUNCTION_ARGS, \ TARGET_PROMOTE_FUNCTION_RETURN, \ TARGET_PROMOTE_PROTOTYPES, \ TARGET_STRUCT_VALUE_RTX, \ TARGET_RETURN_IN_MEMORY, \ TARGET_RETURN_IN_MSB, \ TARGET_EXPAND_BUILTIN_SAVEREGS, \ TARGET_SETUP_INCOMING_VARARGS, \ TARGET_STRICT_ARGUMENT_NAMING, \ TARGET_PRETEND_OUTGOING_VARARGS_NAMED, \ TARGET_SPLIT_COMPLEX_ARG, \ TARGET_GIMPLIFY_VA_ARG_EXPR, \ } #ifndef TARGET_HANDLE_PRAGMA_REDEFINE_EXTNAME #define TARGET_HANDLE_PRAGMA_REDEFINE_EXTNAME 0 #endif #ifndef TARGET_HANDLE_PRAGMA_EXTERN_PREFIX #define TARGET_HANDLE_PRAGMA_EXTERN_PREFIX 0 #endif /* C++ specific. */ #ifndef TARGET_CXX_GUARD_TYPE #define TARGET_CXX_GUARD_TYPE default_cxx_guard_type #endif #ifndef TARGET_CXX_GUARD_MASK_BIT #define TARGET_CXX_GUARD_MASK_BIT hook_bool_void_false #endif #ifndef TARGET_CXX_GET_COOKIE_SIZE #define TARGET_CXX_GET_COOKIE_SIZE default_cxx_get_cookie_size #endif #ifndef TARGET_CXX_COOKIE_HAS_SIZE #define TARGET_CXX_COOKIE_HAS_SIZE hook_bool_void_false #endif #ifndef TARGET_CXX_IMPORT_EXPORT_CLASS #define TARGET_CXX_IMPORT_EXPORT_CLASS NULL #endif #define TARGET_CXX \ { \ TARGET_CXX_GUARD_TYPE, \ TARGET_CXX_GUARD_MASK_BIT, \ TARGET_CXX_GET_COOKIE_SIZE, \ TARGET_CXX_COOKIE_HAS_SIZE, \ TARGET_CXX_IMPORT_EXPORT_CLASS \ } /* The whole shebang. */ #define TARGET_INITIALIZER \ { \ TARGET_ASM_OUT, \ TARGET_SCHED, \ TARGET_MERGE_DECL_ATTRIBUTES, \ TARGET_MERGE_TYPE_ATTRIBUTES, \ TARGET_ATTRIBUTE_TABLE, \ TARGET_COMP_TYPE_ATTRIBUTES, \ TARGET_SET_DEFAULT_TYPE_ATTRIBUTES, \ TARGET_INSERT_ATTRIBUTES, \ TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P, \ TARGET_MS_BITFIELD_LAYOUT_P, \ TARGET_ALIGN_ANON_BITFIELD, \ TARGET_INIT_BUILTINS, \ TARGET_EXPAND_BUILTIN, \ TARGET_MANGLE_FUNDAMENTAL_TYPE, \ TARGET_INIT_LIBFUNCS, \ TARGET_SECTION_TYPE_FLAGS, \ TARGET_CANNOT_MODIFY_JUMPS_P, \ TARGET_BRANCH_TARGET_REGISTER_CLASS, \ TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED, \ TARGET_CANNOT_FORCE_CONST_MEM, \ TARGET_CANNOT_COPY_INSN_P, \ TARGET_DELEGITIMIZE_ADDRESS, \ TARGET_FUNCTION_OK_FOR_SIBCALL, \ TARGET_IN_SMALL_DATA_P, \ TARGET_BINDS_LOCAL_P, \ TARGET_ENCODE_SECTION_INFO, \ TARGET_STRIP_NAME_ENCODING, \ TARGET_VALID_POINTER_MODE, \ TARGET_VECTOR_OPAQUE_P, \ TARGET_RTX_COSTS, \ TARGET_ADDRESS_COST, \ TARGET_DWARF_REGISTER_SPAN, \ TARGET_FIXED_CONDITION_CODE_REGS, \ TARGET_CC_MODES_COMPATIBLE, \ TARGET_MACHINE_DEPENDENT_REORG, \ TARGET_BUILD_BUILTIN_VA_LIST, \ TARGET_GET_PCH_VALIDITY, \ TARGET_PCH_VALID_P, \ TARGET_DEFAULT_SHORT_ENUMS, \ TARGET_BUILTIN_SETJMP_FRAME_VALUE, \ TARGET_MD_ASM_CLOBBERS, \ TARGET_CALLS, \ TARGET_CXX, \ TARGET_HAVE_NAMED_SECTIONS, \ TARGET_HAVE_CTORS_DTORS, \ TARGET_HAVE_TLS, \ TARGET_HAVE_SRODATA_SECTION, \ TARGET_TERMINATE_DW2_EH_FRAME_INFO, \ TARGET_ASM_FILE_START_APP_OFF, \ TARGET_ASM_FILE_START_FILE_DIRECTIVE, \ TARGET_HANDLE_PRAGMA_REDEFINE_EXTNAME, \ TARGET_HANDLE_PRAGMA_EXTERN_PREFIX, \ TARGET_LATE_RTL_PROLOGUE_EPILOGUE, \ } /* Default target hook functions. Copyright (C) 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ extern void default_external_libcall (rtx); extern enum machine_mode default_cc_modes_compatible (enum machine_mode, enum machine_mode); extern bool default_return_in_memory (tree, tree); extern rtx default_expand_builtin_saveregs (void); extern void default_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode, tree, int *, int); extern rtx default_builtin_setjmp_frame_value (void); extern bool hook_bool_CUMULATIVE_ARGS_false (CUMULATIVE_ARGS *); extern bool default_pretend_outgoing_varargs_named (CUMULATIVE_ARGS *); extern bool hook_bool_CUMULATIVE_ARGS_true (CUMULATIVE_ARGS *); extern tree default_cxx_guard_type (void); extern tree default_cxx_get_cookie_size (tree); void default_external_libcall (rtx fun ATTRIBUTE_UNUSED) { #ifdef ASM_OUTPUT_EXTERNAL_LIBCALL ASM_OUTPUT_EXTERNAL_LIBCALL(asm_out_file, fun); #endif } enum machine_mode default_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2) { if (m1 == m2) return m1; return VOIDmode; } bool default_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED) { #ifndef RETURN_IN_MEMORY return (TYPE_MODE (type) == BLKmode); #else return RETURN_IN_MEMORY (type); #endif } rtx default_expand_builtin_saveregs (void) { error ("__builtin_saveregs not supported by this target"); return const0_rtx; } void default_setup_incoming_varargs (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED, enum machine_mode mode ATTRIBUTE_UNUSED, tree type ATTRIBUTE_UNUSED, int *pretend_arg_size ATTRIBUTE_UNUSED, int second_time ATTRIBUTE_UNUSED) { } /* The default implementation of TARGET_BUILTIN_SETJMP_FRAME_VALUE. */ rtx default_builtin_setjmp_frame_value (void) { return virtual_stack_vars_rtx; } /* Generic hook that takes a CUMULATIVE_ARGS pointer and returns false. */ bool hook_bool_CUMULATIVE_ARGS_false (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED) { return false; } bool default_pretend_outgoing_varargs_named (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED) { return (targetm.calls.setup_incoming_varargs != default_setup_incoming_varargs); } /* Generic hook that takes a CUMULATIVE_ARGS pointer and returns true. */ bool hook_bool_CUMULATIVE_ARGS_true (CUMULATIVE_ARGS * a ATTRIBUTE_UNUSED) { return true; } /* The generic C++ ABI specifies this is a 64-bit value. */ tree default_cxx_guard_type (void) { return long_long_integer_type_node; } /* Returns the size of the cookie to use when allocating an array whose elements have the indicated TYPE. Assumes that it is already known that a cookie is needed. */ tree default_cxx_get_cookie_size (tree type) { tree cookie_size; /* We need to allocate an additional max (sizeof (size_t), alignof (true_type)) bytes. */ tree sizetype_size; tree type_align; sizetype_size = size_in_bytes (sizetype); type_align = size_int (TYPE_ALIGN_UNIT (type)); if (INT_CST_LT_UNSIGNED (type_align, sizetype_size)) cookie_size = sizetype_size; else cookie_size = type_align; return cookie_size; } /* Timing variables for measuring compiler performance. Copyright (C) 2000, 2003, 2004 Free Software Foundation, Inc. Contributed by Alex Samuel This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifdef HAVE_SYS_TIMES_H # include #endif #ifdef HAVE_SYS_RESOURCE_H #include #endif #ifndef HAVE_CLOCK_T typedef int clock_t; #endif #ifndef HAVE_STRUCT_TMS struct tms { clock_t tms_utime; clock_t tms_stime; clock_t tms_cutime; clock_t tms_cstime; }; #endif #ifndef RUSAGE_SELF # define RUSAGE_SELF 0 #endif /* Calculation of scale factor to convert ticks to microseconds. We mustn't use CLOCKS_PER_SEC except with clock(). */ #if HAVE_SYSCONF && defined _SC_CLK_TCK # define TICKS_PER_SECOND sysconf (_SC_CLK_TCK) /* POSIX 1003.1-1996 */ #else # ifdef CLK_TCK # define TICKS_PER_SECOND CLK_TCK /* POSIX 1003.1-1988; obsolescent */ # else # ifdef HZ # define TICKS_PER_SECOND HZ /* traditional UNIX */ # else # define TICKS_PER_SECOND 100 /* often the correct value */ # endif # endif #endif /* Prefer times to getrusage to clock (each gives successively less information). */ #ifdef HAVE_TIMES # if defined HAVE_DECL_TIMES && !HAVE_DECL_TIMES extern clock_t times (struct tms *); # endif # define USE_TIMES # define HAVE_USER_TIME # define HAVE_SYS_TIME # define HAVE_WALL_TIME #else #ifdef HAVE_GETRUSAGE # if defined HAVE_DECL_GETRUSAGE && !HAVE_DECL_GETRUSAGE extern int getrusage (int, struct rusage *); # endif # define USE_GETRUSAGE # define HAVE_USER_TIME # define HAVE_SYS_TIME #else #ifdef HAVE_CLOCK # if defined HAVE_DECL_CLOCK && !HAVE_DECL_CLOCK extern clock_t clock (void); # endif # define USE_CLOCK # define HAVE_USER_TIME #endif #endif #endif /* libc is very likely to have snuck a call to sysconf() into one of the underlying constants, and that can be very slow, so we have to precompute them. Whose wonderful idea was it to make all those _constants_ variable at run time, anyway? */ #ifdef USE_TIMES static double ticks_to_msec; #define TICKS_TO_MSEC (1 / (double)TICKS_PER_SECOND) #endif #ifdef USE_CLOCK static double clocks_to_msec; #define CLOCKS_TO_MSEC (1 / (double)CLOCKS_PER_SEC) #endif static bool timevar_enable; /* See timevar.h for an explanation of timing variables. */ /* A timing variable. */ struct timevar_def { /* Elapsed time for this variable. */ struct timevar_time_def elapsed; /* If this variable is timed independently of the timing stack, using timevar_start, this contains the start time. */ struct timevar_time_def start_time; /* The name of this timing variable. */ const char *name; /* Nonzero if this timing variable is running as a standalone timer. */ unsigned standalone : 1; /* Nonzero if this timing variable was ever started or pushed onto the timing stack. */ unsigned used : 1; }; /* An element on the timing stack. Elapsed time is attributed to the topmost timing variable on the stack. */ struct timevar_stack_def { /* The timing variable at this stack level. */ struct timevar_def *timevar; /* The next lower timing variable context in the stack. */ struct timevar_stack_def *next; }; /* Declared timing variables. Constructed from the contents of timevar.def. */ static struct timevar_def timevars[TIMEVAR_LAST]; /* The top of the timing stack. */ static struct timevar_stack_def *timing_stack; /* A list of unused (i.e. allocated and subsequently popped) timevar_stack_def instances. */ static struct timevar_stack_def *unused_stack_instances; /* The time at which the topmost element on the timing stack was pushed. Time elapsed since then is attributed to the topmost element. */ static struct timevar_time_def start_time; static void get_time (struct timevar_time_def *); static void timevar_accumulate (struct timevar_time_def *, struct timevar_time_def *, struct timevar_time_def *); /* Fill the current times into TIME. The definition of this function also defines any or all of the HAVE_USER_TIME, HAVE_SYS_TIME, and HAVE_WALL_TIME macros. */ static void get_time (struct timevar_time_def *now) { now->user = 0; now->sys = 0; now->wall = 0; if (!timevar_enable) return; { #ifdef USE_TIMES struct tms tms; now->wall = times (&tms) * ticks_to_msec; now->user = tms.tms_utime * ticks_to_msec; now->sys = tms.tms_stime * ticks_to_msec; #endif #ifdef USE_GETRUSAGE struct rusage rusage; getrusage (RUSAGE_SELF, &rusage); now->user = rusage.ru_utime.tv_sec + rusage.ru_utime.tv_usec * 1e-6; now->sys = rusage.ru_stime.tv_sec + rusage.ru_stime.tv_usec * 1e-6; #endif #ifdef USE_CLOCK now->user = clock () * clocks_to_msec; #endif } } /* Add the difference between STOP_TIME and START_TIME to TIMER. */ static void timevar_accumulate (struct timevar_time_def *timer, struct timevar_time_def *start_time, struct timevar_time_def *stop_time) { timer->user += stop_time->user - start_time->user; timer->sys += stop_time->sys - start_time->sys; timer->wall += stop_time->wall - start_time->wall; } /* Initialize timing variables. */ void timevar_init (void) { timevar_enable = true; /* Zero all elapsed times. */ memset (timevars, 0, sizeof (timevars)); /* Initialize the names of timing variables. */ #define DEFTIMEVAR(identifier__, name__) \ timevars[identifier__].name = name__; /* This file contains the definitions for timing variables used to measure run-time performance of the compiler. Copyright (C) 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Alex Samuel This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file contains timing variable definitions, used by timevar.h and timevar.c. Syntax: DEFTIMEVAR (id, name) where ID is the enumeral value used to identify the timing variable, and NAME is a character string describing its purpose. */ /* The total execution time. */ DEFTIMEVAR (TV_TOTAL , "total time") /* Time spent garbage-collecting. */ DEFTIMEVAR (TV_GC , "garbage collection") /* Time spent generating dump files. */ DEFTIMEVAR (TV_DUMP , "dump files") DEFTIMEVAR (TV_CGRAPH , "callgraph construction") DEFTIMEVAR (TV_CGRAPHOPT , "callgraph optimization") /* Time spent by constructing CFG. */ DEFTIMEVAR (TV_CFG , "cfg construction") /* Time spent by cleaning up CFG. */ DEFTIMEVAR (TV_CLEANUP_CFG , "cfg cleanup") DEFTIMEVAR (TV_CFG_VERIFY , "CFG verifier") DEFTIMEVAR (TV_DELETE_TRIVIALLY_DEAD , "trivially dead code") /* Time spent by life analysis. */ DEFTIMEVAR (TV_LIFE , "life analysis") DEFTIMEVAR (TV_LIFE_UPDATE , "life info update") DEFTIMEVAR (TV_ALIAS_ANALYSIS , "alias analysis") DEFTIMEVAR (TV_REG_SCAN , "register scan") DEFTIMEVAR (TV_REBUILD_JUMP , "rebuild jump labels") /* Timing in various stages of the compiler. */ DEFTIMEVAR (TV_CPP , "preprocessing") DEFTIMEVAR (TV_LEX , "lexical analysis") DEFTIMEVAR (TV_PARSE , "parser") DEFTIMEVAR (TV_NAME_LOOKUP , "name lookup") DEFTIMEVAR (TV_INTEGRATION , "integration") DEFTIMEVAR (TV_TREE_GIMPLIFY , "tree gimplify") DEFTIMEVAR (TV_TREE_EH , "tree eh") DEFTIMEVAR (TV_TREE_CFG , "tree CFG construction") DEFTIMEVAR (TV_TREE_CLEANUP_CFG , "tree CFG cleanup") DEFTIMEVAR (TV_TREE_PTA , "tree PTA") DEFTIMEVAR (TV_TREE_MAY_ALIAS , "tree alias analysis") DEFTIMEVAR (TV_TREE_INSERT_PHI_NODES , "tree PHI insertion") DEFTIMEVAR (TV_TREE_SSA_REWRITE_BLOCKS, "tree SSA rewrite") DEFTIMEVAR (TV_TREE_SSA_OTHER , "tree SSA other") DEFTIMEVAR (TV_TREE_OPS , "tree operand scan") DEFTIMEVAR (TV_TREE_SSA_DOMINATOR_OPTS , "dominator optimization") DEFTIMEVAR (TV_TREE_SRA , "tree SRA") DEFTIMEVAR (TV_TREE_CCP , "tree CCP") DEFTIMEVAR (TV_TREE_SPLIT_EDGES , "tree split crit edges") DEFTIMEVAR (TV_TREE_PRE , "tree PRE") DEFTIMEVAR (TV_TREE_FRE , "tree FRE") DEFTIMEVAR (TV_TREE_PHIOPT , "tree linearize phis") DEFTIMEVAR (TV_TREE_FORWPROP , "tree forward propagate") DEFTIMEVAR (TV_TREE_DCE , "tree conservative DCE") DEFTIMEVAR (TV_TREE_CD_DCE , "tree aggressive DCE") DEFTIMEVAR (TV_TREE_DSE , "tree DSE") DEFTIMEVAR (TV_TREE_LOOP , "tree loop optimization") DEFTIMEVAR (TV_TREE_CH , "tree copy headers") DEFTIMEVAR (TV_TREE_SSA_TO_NORMAL , "tree SSA to normal") DEFTIMEVAR (TV_TREE_NRV , "tree NRV optimization") DEFTIMEVAR (TV_TREE_COPY_RENAME , "tree rename SSA copies") DEFTIMEVAR (TV_TREE_SSA_VERIFY , "tree SSA verifier") DEFTIMEVAR (TV_TREE_STMT_VERIFY , "tree STMT verifier") DEFTIMEVAR (TV_CGRAPH_VERIFY , "callgraph verifier") DEFTIMEVAR (TV_DOM_FRONTIERS , "dominance frontiers") DEFTIMEVAR (TV_CONTROL_DEPENDENCES , "control dependences") DEFTIMEVAR (TV_OVERLOAD , "overload resolution") DEFTIMEVAR (TV_TEMPLATE_INSTANTIATION, "template instantiation") DEFTIMEVAR (TV_EXPAND , "expand") DEFTIMEVAR (TV_VARCONST , "varconst") DEFTIMEVAR (TV_JUMP , "jump") DEFTIMEVAR (TV_CSE , "CSE") DEFTIMEVAR (TV_GCSE , "global CSE") DEFTIMEVAR (TV_LOOP , "loop analysis") DEFTIMEVAR (TV_BYPASS , "bypass jumps") DEFTIMEVAR (TV_TRACER , "tracer") DEFTIMEVAR (TV_WEB , "web") DEFTIMEVAR (TV_CSE2 , "CSE 2") DEFTIMEVAR (TV_BRANCH_PROB , "branch prediction") DEFTIMEVAR (TV_VPT , "value profile opts") DEFTIMEVAR (TV_FLOW , "flow analysis") DEFTIMEVAR (TV_COMBINE , "combiner") DEFTIMEVAR (TV_IFCVT , "if-conversion") DEFTIMEVAR (TV_REGMOVE , "regmove") DEFTIMEVAR (TV_MODE_SWITCH , "mode switching") DEFTIMEVAR (TV_SMS , "sms modulo scheduling") DEFTIMEVAR (TV_SCHED , "scheduling") DEFTIMEVAR (TV_LOCAL_ALLOC , "local alloc") DEFTIMEVAR (TV_GLOBAL_ALLOC , "global alloc") DEFTIMEVAR (TV_RELOAD_CSE_REGS , "reload CSE regs") DEFTIMEVAR (TV_FLOW2 , "flow 2") DEFTIMEVAR (TV_IFCVT2 , "if-conversion 2") DEFTIMEVAR (TV_PEEPHOLE2 , "peephole 2") DEFTIMEVAR (TV_RENAME_REGISTERS , "rename registers") DEFTIMEVAR (TV_SCHED2 , "scheduling 2") DEFTIMEVAR (TV_MACH_DEP , "machine dep reorg") DEFTIMEVAR (TV_DBR_SCHED , "delay branch sched") DEFTIMEVAR (TV_REORDER_BLOCKS , "reorder blocks") DEFTIMEVAR (TV_SHORTEN_BRANCH , "shorten branches") DEFTIMEVAR (TV_REG_STACK , "reg stack") DEFTIMEVAR (TV_FINAL , "final") DEFTIMEVAR (TV_SYMOUT , "symout") DEFTIMEVAR (TV_VAR_TRACKING , "variable tracking") /* Everything else in rest_of_compilation not included above. */ DEFTIMEVAR (TV_REST_OF_COMPILATION , "rest of compilation") #undef DEFTIMEVAR #ifdef USE_TIMES ticks_to_msec = TICKS_TO_MSEC; #endif #ifdef USE_CLOCK clocks_to_msec = CLOCKS_TO_MSEC; #endif } /* Push TIMEVAR onto the timing stack. No further elapsed time is attributed to the previous topmost timing variable on the stack; subsequent elapsed time is attributed to TIMEVAR, until it is popped or another element is pushed on top. TIMEVAR cannot be running as a standalone timer. */ void timevar_push (timevar_id_t timevar) { struct timevar_def *tv = &timevars[timevar]; struct timevar_stack_def *context; struct timevar_time_def now; if (!timevar_enable) return; /* Mark this timing variable as used. */ tv->used = 1; /* Can't push a standalone timer. */ if (tv->standalone) abort (); /* What time is it? */ get_time (&now); /* If the stack isn't empty, attribute the current elapsed time to the old topmost element. */ if (timing_stack) timevar_accumulate (&timing_stack->timevar->elapsed, &start_time, &now); /* Reset the start time; from now on, time is attributed to TIMEVAR. */ start_time = now; /* See if we have a previously-allocated stack instance. If so, take it off the list. If not, malloc a new one. */ if (unused_stack_instances != NULL) { context = unused_stack_instances; unused_stack_instances = unused_stack_instances->next; } else context = xmalloc (sizeof (struct timevar_stack_def)); /* Fill it in and put it on the stack. */ context->timevar = tv; context->next = timing_stack; timing_stack = context; } /* Pop the topmost timing variable element off the timing stack. The popped variable must be TIMEVAR. Elapsed time since the that element was pushed on, or since it was last exposed on top of the stack when the element above it was popped off, is credited to that timing variable. */ void timevar_pop (timevar_id_t timevar) { struct timevar_time_def now; struct timevar_stack_def *popped = timing_stack; if (!timevar_enable) return; if (&timevars[timevar] != timing_stack->timevar) { sorry ("cannot timevar_pop '%s' when top of timevars stack is '%s'", timevars[timevar].name, timing_stack->timevar->name); abort (); } /* What time is it? */ get_time (&now); /* Attribute the elapsed time to the element we're popping. */ timevar_accumulate (&popped->timevar->elapsed, &start_time, &now); /* Reset the start time; from now on, time is attributed to the element just exposed on the stack. */ start_time = now; /* Take the item off the stack. */ timing_stack = timing_stack->next; /* Don't delete the stack element; instead, add it to the list of unused elements for later use. */ popped->next = unused_stack_instances; unused_stack_instances = popped; } /* Start timing TIMEVAR independently of the timing stack. Elapsed time until timevar_stop is called for the same timing variable is attributed to TIMEVAR. */ void timevar_start (timevar_id_t timevar) { struct timevar_def *tv = &timevars[timevar]; if (!timevar_enable) return; /* Mark this timing variable as used. */ tv->used = 1; /* Don't allow the same timing variable to be started more than once. */ if (tv->standalone) abort (); tv->standalone = 1; get_time (&tv->start_time); } /* Stop timing TIMEVAR. Time elapsed since timevar_start was called is attributed to it. */ void timevar_stop (timevar_id_t timevar) { struct timevar_def *tv = &timevars[timevar]; struct timevar_time_def now; if (!timevar_enable) return; /* TIMEVAR must have been started via timevar_start. */ if (!tv->standalone) abort (); get_time (&now); timevar_accumulate (&tv->elapsed, &tv->start_time, &now); } /* Fill the elapsed time for TIMEVAR into ELAPSED. Returns update-to-date information even if TIMEVAR is currently running. */ void timevar_get (timevar_id_t timevar, struct timevar_time_def *elapsed) { struct timevar_def *tv = &timevars[timevar]; struct timevar_time_def now; *elapsed = tv->elapsed; /* Is TIMEVAR currently running as a standalone timer? */ if (tv->standalone) { get_time (&now); timevar_accumulate (elapsed, &tv->start_time, &now); } /* Or is TIMEVAR at the top of the timer stack? */ else if (timing_stack->timevar == tv) { get_time (&now); timevar_accumulate (elapsed, &start_time, &now); } } /* Summarize timing variables to FP. The timing variable TV_TOTAL has a special meaning -- it's considered to be the total elapsed time, for normalizing the others, and is displayed last. */ void timevar_print (FILE *fp) { /* Only print stuff if we have some sort of time information. */ #if defined (HAVE_USER_TIME) || defined (HAVE_SYS_TIME) || defined (HAVE_WALL_TIME) unsigned int /* timevar_id_t */ id; struct timevar_time_def *total = &timevars[TV_TOTAL].elapsed; struct timevar_time_def now; if (!timevar_enable) return; /* Update timing information in case we're calling this from GDB. */ if (fp == 0) fp = stderr; /* What time is it? */ get_time (&now); /* If the stack isn't empty, attribute the current elapsed time to the old topmost element. */ if (timing_stack) timevar_accumulate (&timing_stack->timevar->elapsed, &start_time, &now); /* Reset the start time; from now on, time is attributed to TIMEVAR. */ start_time = now; fputs (_("\nExecution times (seconds)\n"), fp); for (id = 0; id < (unsigned int) TIMEVAR_LAST; ++id) { struct timevar_def *tv = &timevars[(timevar_id_t) id]; const double tiny = 5e-3; /* Don't print the total execution time here; that goes at the end. */ if ((timevar_id_t) id == TV_TOTAL) continue; /* Don't print timing variables that were never used. */ if (!tv->used) continue; /* Don't print timing variables if we're going to get a row of zeroes. */ if (tv->elapsed.user < tiny && tv->elapsed.sys < tiny && tv->elapsed.wall < tiny) continue; /* The timing variable name. */ fprintf (fp, " %-22s:", tv->name); #ifdef HAVE_USER_TIME /* Print user-mode time for this process. */ fprintf (fp, "%7.2f (%2.0f%%) usr", tv->elapsed.user, (total->user == 0 ? 0 : tv->elapsed.user / total->user) * 100); #endif /* HAVE_USER_TIME */ #ifdef HAVE_SYS_TIME /* Print system-mode time for this process. */ fprintf (fp, "%7.2f (%2.0f%%) sys", tv->elapsed.sys, (total->sys == 0 ? 0 : tv->elapsed.sys / total->sys) * 100); #endif /* HAVE_SYS_TIME */ #ifdef HAVE_WALL_TIME /* Print wall clock time elapsed. */ fprintf (fp, "%7.2f (%2.0f%%) wall", tv->elapsed.wall, (total->wall == 0 ? 0 : tv->elapsed.wall / total->wall) * 100); #endif /* HAVE_WALL_TIME */ putc ('\n', fp); } /* Print total time. */ fputs (_(" TOTAL :"), fp); #ifdef HAVE_USER_TIME fprintf (fp, "%7.2f ", total->user); #endif #ifdef HAVE_SYS_TIME fprintf (fp, "%7.2f ", total->sys); #endif #ifdef HAVE_WALL_TIME fprintf (fp, "%7.2f\n", total->wall); #endif #ifdef ENABLE_CHECKING fprintf (fp, "Extra diagnostic checks enabled; compiler may run slowly.\n"); fprintf (fp, "Configure with --disable-checking to disable checks.\n"); #endif #endif /* defined (HAVE_USER_TIME) || defined (HAVE_SYS_TIME) || defined (HAVE_WALL_TIME) */ } /* Prints a message to stderr stating that time elapsed in STR is TOTAL (given in microseconds). */ void print_time (const char *str, long total) { long all_time = get_run_time (); fprintf (stderr, _("time in %s: %ld.%06ld (%ld%%)\n"), str, total / 1000000, total % 1000000, all_time == 0 ? 0 : (long) (((100.0 * (double) total) / (double) all_time) + .5)); } /* Top level of GCC compilers (cc1, cc1plus, etc.) Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This is the top level of cc1/c++. It parses command args, opens files, invokes the various passes in the proper order, and counts the time used by each. Error messages and low-level interface to malloc also handled here. */ #ifdef ONE_COMPILATION_UNIT #define TARGET_NAME "i686-pc-linux-gnu" #endif #undef FLOAT /* This is for hpux. They should change hpux. */ #undef FFS /* Some systems define this in param.h. */ #include #ifdef HAVE_SYS_RESOURCE_H # include #endif #ifdef HAVE_SYS_TIMES_H # include #endif #if defined (DWARF2_UNWIND_INFO) || defined (DWARF2_DEBUGGING_INFO) #endif #if defined(DBX_DEBUGGING_INFO) || defined(XCOFF_DEBUGGING_INFO) #endif #ifdef SDB_DEBUGGING_INFO #endif #ifdef XCOFF_DEBUGGING_INFO declarations for e.g. AIX 4.x. */ #endif static void general_init (const char *); static void do_compile (void); static void process_options (void); static void backend_init (void); static int lang_dependent_init (const char *); static void init_asm_output (const char *); static void finalize_toplev (void); static void crash_signal (int) ATTRIBUTE_NORETURN; static void setup_core_dumping (void); static void compile_file (void); static int print_single_switch (FILE *, int, int, const char *, const char *, const char *, const char *, const char *); static void print_switch_values (FILE *, int, int, const char *, const char *, const char *); /* Nonzero to dump debug info whilst parsing (-dy option). */ static int set_yydebug; /* True if we don't need a backend (e.g. preprocessing only). */ static bool no_backend; /* Length of line when printing switch values. */ #define MAX_LINE 75 /* Name of program invoked, sans directories. */ const char *progname; /* Copy of argument vector to toplev_main. */ static const char **save_argv; /* Name of top-level original source file (what was input to cpp). This comes from the #-command at the beginning of the actual input. If there isn't any there, then this is the cc1 input file name. */ const char *main_input_filename; #ifndef USE_MAPPED_LOCATION location_t unknown_location = { NULL, 0 }; #endif /* Used to enable -fvar-tracking, -fweb and -frename-registers according to optimize and default_debug_hooks in process_options (). */ #define AUTODETECT_FLAG_VAR_TRACKING 2 /* Current position in real source file. */ location_t input_location; struct line_maps line_table; /* Nonzero if it is unsafe to create any new pseudo registers. */ int no_new_pseudos; /* Stack of currently pending input files. */ struct file_stack *input_file_stack; /* Incremented on each change to input_file_stack. */ int input_file_stack_tick; /* Name to use as base of names for dump output files. */ const char *dump_base_name; /* Name to use as a base for auxiliary output files. */ const char *aux_base_name; /* Bit flags that specify the machine subtype we are compiling for. Bits are tested using macros TARGET_... defined in the tm.h file and set by `-m...' switches. Must be defined in rtlanal.c. */ extern int target_flags; /* A mask of target_flags that includes bit X if X was set or cleared on the command line. */ int target_flags_explicit; /* Debug hooks - dependent upon command line options. */ const struct gcc_debug_hooks *debug_hooks; /* Debug hooks - target default. */ static const struct gcc_debug_hooks *default_debug_hooks; /* Other flags saying which kinds of debugging dump have been requested. */ int rtl_dump_and_exit; int flag_print_asm_name; enum graph_dump_types graph_dump_format; /* Name for output file of assembly code, specified with -o. */ const char *asm_file_name; /* Nonzero means do optimizations. -O. Particular numeric values stand for particular amounts of optimization; thus, -O2 stores 2 here. However, the optimizations beyond the basic ones are not controlled directly by this variable. Instead, they are controlled by individual `flag_...' variables that are defaulted based on this variable. */ int optimize = 0; /* Nonzero means optimize for size. -Os. The only valid values are zero and nonzero. When optimize_size is nonzero, optimize defaults to 2, but certain individual code bloating optimizations are disabled. */ int optimize_size = 0; /* The FUNCTION_DECL for the function currently being compiled, or 0 if between functions. */ tree current_function_decl; /* Set to the FUNC_BEGIN label of the current function, or NULL_TREE if none. */ tree current_function_func_begin_label; /* A DECL for the current file-scope context. When using IMA, this heads a chain of FILE_DECLs; currently only C uses it. */ tree current_file_decl; /* Temporarily suppress certain warnings. This is set while reading code from a system header file. */ int in_system_header = 0; /* Nonzero means to collect statistics which might be expensive and to print them when we are done. */ int flag_detailed_statistics = 0; /* A random sequence of characters, unless overridden by user. */ const char *flag_random_seed; /* A local time stamp derived from the time of compilation. It will be zero if the system cannot provide a time. It will be -1u, if the user has specified a particular random seed. */ unsigned local_tick; /* -f flags. */ /* Nonzero means `char' should be signed. */ int flag_signed_char; /* Nonzero means give an enum type only as many bytes as it needs. A value of 2 means it has not yet been initialized. */ int flag_short_enums; /* Nonzero if structures and unions should be returned in memory. This should only be defined if compatibility with another compiler or with an ABI is needed, because it results in slower code. */ #ifndef DEFAULT_PCC_STRUCT_RETURN #define DEFAULT_PCC_STRUCT_RETURN 1 #endif /* Nonzero for -fpcc-struct-return: return values the same way PCC does. */ int flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN; /* 0 means straightforward implementation of complex divide acceptable. 1 means wide ranges of inputs must work for complex divide. 2 means C99-like requirements for complex divide (not yet implemented). */ int flag_complex_divide_method = 0; /* Nonzero means performs web construction pass. When flag_web == AUTODETECT_FLAG_VAR_TRACKING it will be set according to optimize and default_debug_hooks in process_options (). */ int flag_web = AUTODETECT_FLAG_VAR_TRACKING; /* Nonzero means that we don't want inlining by virtue of -fno-inline, not just because the tree inliner turned us off. */ int flag_really_no_inline = 2; /* Nonzero means we should be saving declaration info into a .X file. */ int flag_gen_aux_info = 0; /* Specified name of aux-info file. */ const char *aux_info_file_name; /* Nonzero if we are compiling code for a shared library, zero for executable. */ int flag_shlib; /* Set to the default thread-local storage (tls) model to use. */ enum tls_model flag_tls_default = TLS_MODEL_GLOBAL_DYNAMIC; /* Nonzero means change certain warnings into errors. Usually these are warnings about failure to conform to some standard. */ int flag_pedantic_errors = 0; /* -dA causes debug commentary information to be produced in the generated assembly code (to make it more readable). This option is generally only of use to those who actually need to read the generated assembly code (perhaps while debugging the compiler itself). Currently, this switch is only used by dwarfout.c; however, it is intended to be a catchall for printing debug information in the assembler file. */ int flag_debug_asm = 0; /* -dP causes the rtl to be emitted as a comment in assembly. */ int flag_dump_rtl_in_asm = 0; /* When non-NULL, indicates that whenever space is allocated on the stack, the resulting stack pointer must not pass this address---that is, for stacks that grow downward, the stack pointer must always be greater than or equal to this address; for stacks that grow upward, the stack pointer must be less than this address. At present, the rtx may be either a REG or a SYMBOL_REF, although the support provided depends on the backend. */ rtx stack_limit_rtx; /* If one, renumber instruction UIDs to reduce the number of unused UIDs if there are a lot of instructions. If greater than one, unconditionally renumber instruction UIDs. */ int flag_renumber_insns = 1; /* Enable points-to analysis on trees. */ enum pta_type flag_tree_points_to = PTA_NONE; /* Nonzero if we should track variables. When flag_var_tracking == AUTODETECT_FLAG_VAR_TRACKING it will be set according to optimize, debug_info_level and debug_hooks in process_options (). */ int flag_var_tracking = AUTODETECT_FLAG_VAR_TRACKING; /* Values of the -falign-* flags: how much to align labels in code. 0 means `use default', 1 means `don't align'. For each variable, there is an _log variant which is the power of two not less than the variable, for .align output. */ int align_loops_log; int align_loops_max_skip; int align_jumps_log; int align_jumps_max_skip; int align_labels_log; int align_labels_max_skip; int align_functions_log; /* Like align_functions_log above, but used by front-ends to force the minimum function alignment. Zero means no alignment is forced. */ int force_align_functions_log; typedef struct { const char *const string; int *const variable; const int on_value; } lang_independent_options; /* Nonzero if subexpressions must be evaluated from left-to-right. */ int flag_evaluation_order = 0; /* The user symbol prefix after having resolved same. */ const char *user_label_prefix; static const param_info lang_independent_params[] = { #define DEFPARAM(ENUM, OPTION, HELP, DEFAULT) \ { OPTION, DEFAULT, HELP }, /* params.def - Run-time parameters. Copyright (C) 2001, 2002, 2004 Free Software Foundation, Inc. Written by Mark Mitchell . This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file contains definitions for language-independent parameters. The DEFPARAM macro takes 4 arguments: - The enumeral corresponding to this parameter. - The name that can be used to set this parameter using the command-line option `--param ='. - A help string explaining how the parameter is used. - A default value for the parameter. Be sure to add an entry to invoke.texi summarizing the parameter. */ /* The single function inlining limit. This is the maximum size of a function counted in internal gcc instructions (not in real machine instructions) that is eligible for inlining by the tree inliner. The default value is 500. Only functions marked inline (or methods defined in the class definition for C++) are affected by this, unless you set the -finline-functions (included in -O3) compiler option. There are more restrictions to inlining: If inlined functions call other functions, the already inlined instructions are counted and once the recursive inline limit (see "max-inline-insns" parameter) is exceeded, the acceptable size gets decreased. */ DEFPARAM (PARAM_MAX_INLINE_INSNS_SINGLE, "max-inline-insns-single", "The maximum number of instructions in a single function eligible for inlining", 500) /* The single function inlining limit for functions that are inlined by virtue of -finline-functions (-O3). This limit should be chosen to be below or equal to the limit that is applied to functions marked inlined (or defined in the class declaration in C++) given by the "max-inline-insns-single" parameter. The default value is 150. */ DEFPARAM (PARAM_MAX_INLINE_INSNS_AUTO, "max-inline-insns-auto", "The maximum number of instructions when automatically inlining", 120) DEFPARAM (PARAM_MAX_INLINE_INSNS_RECURSIVE, "max-inline-insns-recursive", "The maximum number of instructions inline function can grow to via recursive inlining", 500) DEFPARAM (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO, "max-inline-insns-recursive-auto", "The maximum number of instructions non-inline function can grow to via recursive inlining", 500) DEFPARAM (PARAM_MAX_INLINE_RECURSIVE_DEPTH, "max-inline-recursive-depth", "The maximum depth of recursive inlining for inline functions", 8) DEFPARAM (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO, "max-inline-recursive-depth-auto", "The maximum depth of recursive inlining for non-inline functions", 8) /* For languages that (still) use the RTL inliner, we can specify limits for the RTL inliner separately. The parameter here defines the maximum number of RTL instructions a function may have to be eligible for inlining in the RTL inliner. The default value is 600. */ DEFPARAM (PARAM_MAX_INLINE_INSNS_RTL, "max-inline-insns-rtl", "The maximum number of instructions for the RTL inliner", 600) /* The maximum number of instructions to consider when looking for an instruction to fill a delay slot. If more than this arbitrary number of instructions is searched, the time savings from filling the delay slot will be minimal so stop searching. Increasing values mean more aggressive optimization, making the compile time increase with probably small improvement in executable run time. */ DEFPARAM (PARAM_MAX_DELAY_SLOT_INSN_SEARCH, "max-delay-slot-insn-search", "The maximum number of instructions to consider to fill a delay slot", 100) /* When trying to fill delay slots, the maximum number of instructions to consider when searching for a block with valid live register information. Increasing this arbitrarily chosen value means more aggressive optimization, increasing the compile time. This parameter should be removed when the delay slot code is rewritten to maintain the control-flow graph. */ DEFPARAM(PARAM_MAX_DELAY_SLOT_LIVE_SEARCH, "max-delay-slot-live-search", "The maximum number of instructions to consider to find accurate live register information", 333) /* This parameter limits the number of branch elements that the scheduler will track anti-dependencies through without resetting the tracking mechanism. Large functions with few calls or barriers can generate lists containing many 1000's of dependencies. Generally the compiler either uses all available memory, or runs for far too long. */ DEFPARAM(PARAM_MAX_PENDING_LIST_LENGTH, "max-pending-list-length", "The maximum length of scheduling's pending operations list", 32) DEFPARAM(PARAM_LARGE_FUNCTION_INSNS, "large-function-insns", "The size of function body to be considered large", 3000) DEFPARAM(PARAM_LARGE_FUNCTION_GROWTH, "large-function-growth", "Maximal growth due to inlining of large function (in percent)", 100) DEFPARAM(PARAM_INLINE_UNIT_GROWTH, "inline-unit-growth", "how much can given compilation unit grow because of the inlining (in percent)", 50) /* The GCSE optimization will be disabled if it would require significantly more memory than this value. */ DEFPARAM(PARAM_MAX_GCSE_MEMORY, "max-gcse-memory", "The maximum amount of memory to be allocated by GCSE", 50 * 1024 * 1024) /* The number of repetitions of copy/const prop and PRE to run. */ DEFPARAM(PARAM_MAX_GCSE_PASSES, "max-gcse-passes", "The maximum number of passes to make when doing GCSE", 1) /* This is the threshold ratio when to perform partial redundancy elimination after reload. We perform partial redundancy elimination when the following holds: (Redundant load execution count) ------------------------------- >= GCSE_AFTER_RELOAD_PARTIAL_FRACTION (Added loads execution count) */ DEFPARAM(PARAM_GCSE_AFTER_RELOAD_PARTIAL_FRACTION, "gcse-after-reload-partial-fraction", "The threshold ratio for performing partial redundancy elimination \ after reload.", 3) /* This is the threshold ratio of the critical edges execution count compared to the redundant loads execution count that permits performing the load redundancy elimination in gcse after reload. */ DEFPARAM(PARAM_GCSE_AFTER_RELOAD_CRITICAL_FRACTION, "gcse-after-reload-critical-fraction", "The threshold ratio of critical edges execution count that permit \ performing redundancy elimination after reload.", 10) /* This parameter limits the number of insns in a loop that will be unrolled, and by how much the loop is unrolled. This limit should be at most half of the peeling limits: loop unroller decides to not unroll loops that iterate fewer than 2*number of allowed unrollings and thus we would have loops that are neither peeled or unrolled otherwise. */ DEFPARAM(PARAM_MAX_UNROLLED_INSNS, "max-unrolled-insns", "The maximum number of instructions to consider to unroll in a loop", 200) /* This parameter limits how many times the loop is unrolled depending on number of insns really executed in each iteration. */ DEFPARAM(PARAM_MAX_AVERAGE_UNROLLED_INSNS, "max-average-unrolled-insns", "The maximum number of instructions to consider to unroll in a loop on average", 80) /* The maximum number of unrollings of a single loop. */ DEFPARAM(PARAM_MAX_UNROLL_TIMES, "max-unroll-times", "The maximum number of unrollings of a single loop", 8) /* The maximum number of insns of a peeled loop. */ DEFPARAM(PARAM_MAX_PEELED_INSNS, "max-peeled-insns", "The maximum number of insns of a peeled loop", 400) /* The maximum number of peelings of a single loop. */ DEFPARAM(PARAM_MAX_PEEL_TIMES, "max-peel-times", "The maximum number of peelings of a single loop", 16) /* The maximum number of insns of a peeled loop. */ DEFPARAM(PARAM_MAX_COMPLETELY_PEELED_INSNS, "max-completely-peeled-insns", "The maximum number of insns of a completely peeled loop", 400) /* The maximum number of peelings of a single loop that is peeled completely. */ DEFPARAM(PARAM_MAX_COMPLETELY_PEEL_TIMES, "max-completely-peel-times", "The maximum number of peelings of a single loop that is peeled completely", 16) /* The maximum number of insns of a peeled loop that rolls only once. */ DEFPARAM(PARAM_MAX_ONCE_PEELED_INSNS, "max-once-peeled-insns", "The maximum number of insns of a peeled loop that rolls only once", 400) /* The maximum number of insns of an unswitched loop. */ DEFPARAM(PARAM_MAX_UNSWITCH_INSNS, "max-unswitch-insns", "The maximum number of insns of an unswitched loop", 50) /* The maximum level of recursion in unswitch_single_loop. */ DEFPARAM(PARAM_MAX_UNSWITCH_LEVEL, "max-unswitch-level", "The maximum number of unswitchings in a single loop", 3) DEFPARAM(PARAM_MAX_SMS_LOOP_NUMBER, "max-sms-loop-number", "Maximum number of loops to perform swing modulo scheduling on \ (mainly for debugging)", -1) /* This parameter is used to tune SMS MAX II calculations. */ DEFPARAM(PARAM_SMS_MAX_II_FACTOR, "sms-max-ii-factor", "A factor for tuning the upper bound that swing modulo scheduler uses \ for scheduling a loop", 100) DEFPARAM(PARAM_SMS_DFA_HISTORY, "sms-dfa-history", "The number of cycles the swing modulo scheduler considers when \ checking conflicts using DFA", 0) DEFPARAM(PARAM_SMS_LOOP_AVERAGE_COUNT_THRESHOLD, "sms-loop-average-count-threshold", "A threshold on the average loop count considered by the swing modulo \ scheduler", 0) DEFPARAM(HOT_BB_COUNT_FRACTION, "hot-bb-count-fraction", "Select fraction of the maximal count of repetitions of basic block in \ program given basic block needs to have to be considered hot", 10000) DEFPARAM(HOT_BB_FREQUENCY_FRACTION, "hot-bb-frequency-fraction", "Select fraction of the maximal frequency of executions of basic \ block in function given basic block needs to have to be considered hot", 1000) DEFPARAM(TRACER_DYNAMIC_COVERAGE_FEEDBACK, "tracer-dynamic-coverage-feedback", "The percentage of function, weighted by execution frequency, that \ must be covered by trace formation. Used when profile feedback is available", 95) DEFPARAM(TRACER_DYNAMIC_COVERAGE, "tracer-dynamic-coverage", "The percentage of function, weighted by execution frequency, that \ must be covered by trace formation. Used when profile feedback is not available", 75) DEFPARAM(TRACER_MAX_CODE_GROWTH, "tracer-max-code-growth", "Maximal code growth caused by tail duplication (in percent)", 100) DEFPARAM(TRACER_MIN_BRANCH_RATIO, "tracer-min-branch-ratio", "Stop reverse growth if the reverse probability of best edge is less \ than this threshold (in percent)", 10) DEFPARAM(TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK, "tracer-min-branch-probability-feedback", "Stop forward growth if the probability of best edge is less than \ this threshold (in percent). Used when profile feedback is available", 80) DEFPARAM(TRACER_MIN_BRANCH_PROBABILITY, "tracer-min-branch-probability", "Stop forward growth if the probability of best edge is less than \ this threshold (in percent). Used when profile feedback is not available", 50) /* The maximum number of incoming edges to consider for crossjumping. */ DEFPARAM(PARAM_MAX_CROSSJUMP_EDGES, "max-crossjump-edges", "The maximum number of incoming edges to consider for crossjumping", 100) /* The maximum length of path considered in cse. */ DEFPARAM(PARAM_MAX_CSE_PATH_LENGTH, "max-cse-path-length", "The maximum length of path considered in cse", 10) /* The product of the next two is used to decide whether or not to use .GLOBAL_VAR. See tree-dfa.c. */ DEFPARAM(PARAM_GLOBAL_VAR_THRESHOLD, "global-var-threshold", "Given N calls and V call-clobbered vars in a function. Use .GLOBAL_VAR if NxV is larger than this limit", 500000) DEFPARAM(PARAM_MAX_CSELIB_MEMORY_LOCATIONS, "max-cselib-memory-locations", "The maximum memory locations recorded by cselib", 500) #ifdef ENABLE_GC_ALWAYS_COLLECT # define GGC_MIN_EXPAND_DEFAULT 0 # define GGC_MIN_HEAPSIZE_DEFAULT 0 #else # define GGC_MIN_EXPAND_DEFAULT 30 # define GGC_MIN_HEAPSIZE_DEFAULT 4096 #endif DEFPARAM(GGC_MIN_EXPAND, "ggc-min-expand", "Minimum heap expansion to trigger garbage collection, as \ a percentage of the total size of the heap", GGC_MIN_EXPAND_DEFAULT) DEFPARAM(GGC_MIN_HEAPSIZE, "ggc-min-heapsize", "Minimum heap size before we start collecting garbage, in kilobytes", GGC_MIN_HEAPSIZE_DEFAULT) #undef GGC_MIN_EXPAND_DEFAULT #undef GGC_MIN_HEAPSIZE_DEFAULT DEFPARAM(PARAM_MAX_RELOAD_SEARCH_INSNS, "max-reload-search-insns", "The maximum number of instructions to search backward when looking for equivalent reload", 100) DEFPARAM(PARAM_MAX_ALIASED_VOPS, "max-aliased-vops", "The maximum number of virtual operands allowed to represent aliases before triggering alias grouping.", 500) DEFPARAM(PARAM_MAX_SCHED_REGION_BLOCKS, "max-sched-region-blocks", "The maximum number of blocks in a region to be considered for interblock scheduling", 10) DEFPARAM(PARAM_MAX_SCHED_REGION_INSNS, "max-sched-region-insns", "The maximum number of insns in a region to be considered for interblock scheduling", 100) /* Local variables: mode:c End: */ #undef DEFPARAM { NULL, 0, NULL } }; /* Here is a table, controlled by the tm.h file, listing each -m switch and which bits in `target_switches' it should set or clear. If VALUE is positive, it is bits to set. If VALUE is negative, -VALUE is bits to clear. (The sign bit is not used so there is no confusion.) */ static const struct { const char *const name; const int value; const char *const description; } target_switches[] = TARGET_SWITCHES; /* This table is similar, but allows the switch to have a value. */ #ifdef TARGET_OPTIONS static const struct { const char *const prefix; const char **const variable; const char *const description; const char *const value; } target_options[] = TARGET_OPTIONS; #endif /* Nonzero means warn about function definitions that default the return type or that use a null return and have a return-type other than void. */ int warn_return_type; /* Output files for assembler code (real compiler output) and debugging dumps. */ FILE *asm_out_file; FILE *aux_info_file; FILE *dump_file = NULL; FILE *cgraph_dump_file = NULL; /* The current working directory of a translation. It's generally the directory from which compilation was initiated, but a preprocessed file may specify the original directory in which it was created. */ static const char *src_pwd; /* Initialize src_pwd with the given string, and return true. If it was already initialized, return false. As a special case, it may be called with a NULL argument to test whether src_pwd has NOT been initialized yet. */ bool set_src_pwd (const char *pwd) { if (src_pwd) { if (strcmp (src_pwd, pwd) == 0) return true; else return false; } src_pwd = xstrdup (pwd); return true; } /* Return the directory from which the translation unit was initiated, in case set_src_pwd() was not called before to assign it a different value. */ const char * get_src_pwd (void) { if (! src_pwd) src_pwd = getpwd (); return src_pwd; } /* Called when the start of a function definition is parsed, this function prints on stderr the name of the function. */ void announce_function (tree decl) { if (!quiet_flag) { if (rtl_dump_and_exit) verbatim ("%s ", IDENTIFIER_POINTER (DECL_NAME (decl))); else verbatim (" %s", lang_hooks.decl_printable_name (decl, 2)); fflush (stderr); pp_needs_newline (global_dc->printer) = true; diagnostic_set_last_function (global_dc); } } /* Set up a default flag_random_seed and local_tick, unless the user already specified one. */ static void randomize (void) { if (!flag_random_seed) { unsigned HOST_WIDE_INT value; static char random_seed[HOST_BITS_PER_WIDE_INT / 4 + 3]; /* Get some more or less random data. */ #ifdef HAVE_GETTIMEOFDAY { struct timeval tv; gettimeofday (&tv, NULL); local_tick = tv.tv_sec * 1000 + tv.tv_usec / 1000; } #else { time_t now = time (NULL); if (now != (time_t)-1) local_tick = (unsigned) now; } #endif value = local_tick ^ getpid (); sprintf (random_seed, HOST_WIDE_INT_PRINT_HEX, value); flag_random_seed = random_seed; } else if (!local_tick) local_tick = -1; } /* Decode the string P as an integral parameter. If the string is indeed an integer return its numeric value else issue an Invalid Option error for the option PNAME and return DEFVAL. If PNAME is zero just return DEFVAL, do not call error. */ int read_integral_parameter (const char *p, const char *pname, const int defval) { const char *endp = p; while (*endp) { if (ISDIGIT (*endp)) endp++; else break; } if (*endp != 0) { if (pname != 0) error ("invalid option argument `%s'", pname); return defval; } return atoi (p); } /* Given X, an unsigned number, return the largest int Y such that 2**Y <= X. If X is 0, return -1. This should be used via the floor_log2 macro. */ int floor_log2_wide (unsigned HOST_WIDE_INT x) { int t=0; if (x == 0) return -1; if (sizeof (HOST_WIDE_INT) * 8 > 64) if (x >= (unsigned HOST_WIDE_INT) 1 << (t + 64)) t += 64; if (sizeof (HOST_WIDE_INT) * 8 > 32) if (x >= ((unsigned HOST_WIDE_INT) 1) << (t + 32)) t += 32; if (x >= ((unsigned HOST_WIDE_INT) 1) << (t + 16)) t += 16; if (x >= ((unsigned HOST_WIDE_INT) 1) << (t + 8)) t += 8; if (x >= ((unsigned HOST_WIDE_INT) 1) << (t + 4)) t += 4; if (x >= ((unsigned HOST_WIDE_INT) 1) << (t + 2)) t += 2; if (x >= ((unsigned HOST_WIDE_INT) 1) << (t + 1)) t += 1; return t; } /* Return the logarithm of X, base 2, considering X unsigned, if X is a power of 2. Otherwise, returns -1. This should be used via the `exact_log2' macro. */ int exact_log2_wide (unsigned HOST_WIDE_INT x) { /* Test for 0 or a power of 2. */ if (x == 0 || x != (x & -x)) return -1; return floor_log2_wide (x); } /* Handler for fatal signals, such as SIGSEGV. These are transformed into ICE messages, which is much more user friendly. In case the error printer crashes, reset the signal to prevent infinite recursion. */ static void crash_signal (int signo) { signal (signo, SIG_DFL); internal_error ("%s", strsignal (signo)); } /* Arrange to dump core on error. (The regular error message is still printed first, except in the case of abort().) */ static void setup_core_dumping (void) { #ifdef SIGABRT signal (SIGABRT, SIG_DFL); #endif #if defined(HAVE_SETRLIMIT) { struct rlimit rlim; if (getrlimit (RLIMIT_CORE, &rlim) != 0) fatal_error ("getting core file size maximum limit: %m"); rlim.rlim_cur = rlim.rlim_max; if (setrlimit (RLIMIT_CORE, &rlim) != 0) fatal_error ("setting core file size limit to maximum: %m"); } #endif diagnostic_abort_on_error (global_dc); } /* Strip off a legitimate source ending from the input string NAME of length LEN. Rather than having to know the names used by all of our front ends, we strip off an ending of a period followed by up to five characters. (Java uses ".class".) */ void strip_off_ending (char *name, int len) { int i; for (i = 2; i < 6 && len > i; i++) { if (name[len - i] == '.') { name[len - i] = '\0'; break; } } } /* Output a quoted string. */ void output_quoted_string (FILE *asm_file, const char *string) { #ifdef OUTPUT_QUOTED_STRING OUTPUT_QUOTED_STRING (asm_file, string); #else char c; putc ('\"', asm_file); while ((c = *string++) != 0) { if (ISPRINT (c)) { if (c == '\"' || c == '\\') putc ('\\', asm_file); putc (c, asm_file); } else fprintf (asm_file, "\\%03o", (unsigned char) c); } putc ('\"', asm_file); #endif } /* Output a file name in the form wanted by System V. */ void output_file_directive (FILE *asm_file, const char *input_name) { int len; const char *na; if (input_name == NULL) input_name = ""; len = strlen (input_name); na = input_name + len; /* NA gets INPUT_NAME sans directory names. */ while (na > input_name) { if (IS_DIR_SEPARATOR (na[-1])) break; na--; } #ifdef ASM_OUTPUT_SOURCE_FILENAME ASM_OUTPUT_SOURCE_FILENAME (asm_file, na); #else fprintf (asm_file, "\t.file\t"); output_quoted_string (asm_file, na); fputc ('\n', asm_file); #endif } /* Do any final processing required for the declarations in VEC, of which there are LEN. We write out inline functions and variables that have been deferred until this point, but which are required. Returns nonzero if anything was put out. */ int wrapup_global_declarations (tree *vec, int len) { tree decl; int i; int reconsider; int output_something = 0; for (i = 0; i < len; i++) { decl = vec[i]; /* We're not deferring this any longer. Assignment is conditional to avoid needlessly dirtying PCH pages. */ if (DECL_DEFER_OUTPUT (decl) != 0) DECL_DEFER_OUTPUT (decl) = 0; if (TREE_CODE (decl) == VAR_DECL && DECL_SIZE (decl) == 0) lang_hooks.finish_incomplete_decl (decl); } /* Now emit any global variables or functions that we have been putting off. We need to loop in case one of the things emitted here references another one which comes earlier in the list. */ do { reconsider = 0; for (i = 0; i < len; i++) { decl = vec[i]; if (TREE_ASM_WRITTEN (decl) || DECL_EXTERNAL (decl)) continue; /* Don't write out static consts, unless we still need them. We also keep static consts if not optimizing (for debugging), unless the user specified -fno-keep-static-consts. ??? They might be better written into the debug information. This is possible when using DWARF. A language processor that wants static constants to be always written out (even if it is not used) is responsible for calling rest_of_decl_compilation itself. E.g. the C front-end calls rest_of_decl_compilation from finish_decl. One motivation for this is that is conventional in some environments to write things like: static const char rcsid[] = "... version string ..."; intending to force the string to be in the executable. A language processor that would prefer to have unneeded static constants "optimized away" would just defer writing them out until here. E.g. C++ does this, because static constants are often defined in header files. ??? A tempting alternative (for both C and C++) would be to force a constant to be written if and only if it is defined in a main file, as opposed to an include file. */ if (TREE_CODE (decl) == VAR_DECL && TREE_STATIC (decl)) { struct cgraph_varpool_node *node; bool needed = 1; node = cgraph_varpool_node (decl); if (flag_unit_at_a_time && node->finalized) needed = 0; else if ((flag_unit_at_a_time && !cgraph_global_info_ready) && (TREE_USED (decl) || TREE_USED (DECL_ASSEMBLER_NAME (decl)))) /* needed */; else if (node->needed) /* needed */; else if (DECL_COMDAT (decl)) needed = 0; else if (TREE_READONLY (decl) && !TREE_PUBLIC (decl) && (optimize || !flag_keep_static_consts || DECL_ARTIFICIAL (decl))) needed = 0; if (needed) { reconsider = 1; rest_of_decl_compilation (decl, NULL, 1, 1); } } } if (reconsider) output_something = 1; } while (reconsider); return output_something; } /* Issue appropriate warnings for the global declarations in VEC (of which there are LEN). Output debugging information for them. */ void check_global_declarations (tree *vec, int len) { tree decl; int i; for (i = 0; i < len; i++) { decl = vec[i]; if (TREE_CODE (decl) == VAR_DECL && TREE_STATIC (decl) && ! TREE_ASM_WRITTEN (decl)) /* Cancel the RTL for this decl so that, if debugging info output for global variables is still to come, this one will be omitted. */ SET_DECL_RTL (decl, NULL_RTX); /* Warn about any function declared static but not defined. We don't warn about variables, because many programs have static variables that exist only to get some text into the object file. */ if (TREE_CODE (decl) == FUNCTION_DECL && DECL_INITIAL (decl) == 0 && DECL_EXTERNAL (decl) && ! DECL_ARTIFICIAL (decl) && ! TREE_PUBLIC (decl) && (warn_unused_function || TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))) { if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl))) pedwarn ("%J'%F' used but never defined", decl, decl); else warning ("%J'%F' declared `static' but never defined", decl, decl); /* This symbol is effectively an "extern" declaration now. */ TREE_PUBLIC (decl) = 1; assemble_external (decl); } /* Warn about static fns or vars defined but not used. */ if (((warn_unused_function && TREE_CODE (decl) == FUNCTION_DECL) /* We don't warn about "static const" variables because the "rcs_id" idiom uses that construction. */ || (warn_unused_variable && TREE_CODE (decl) == VAR_DECL && ! TREE_READONLY (decl))) && ! DECL_IN_SYSTEM_HEADER (decl) && ! TREE_USED (decl) /* The TREE_USED bit for file-scope decls is kept in the identifier, to handle multiple external decls in different scopes. */ && ! TREE_USED (DECL_NAME (decl)) && ! DECL_EXTERNAL (decl) && ! TREE_PUBLIC (decl) /* A volatile variable might be used in some non-obvious way. */ && ! TREE_THIS_VOLATILE (decl) /* Global register variables must be declared to reserve them. */ && ! (TREE_CODE (decl) == VAR_DECL && DECL_REGISTER (decl)) /* Otherwise, ask the language. */ && lang_hooks.decls.warn_unused_global (decl)) warning ("%J'%D' defined but not used", decl, decl); /* Avoid confusing the debug information machinery when there are errors. */ if (errorcount == 0 && sorrycount == 0) { timevar_push (TV_SYMOUT); (*debug_hooks->global_decl) (decl); timevar_pop (TV_SYMOUT); } } } /* Warn about a use of an identifier which was marked deprecated. */ void warn_deprecated_use (tree node) { if (node == 0 || !warn_deprecated_decl) return; if (DECL_P (node)) { expanded_location xloc = expand_location (DECL_SOURCE_LOCATION (node)); warning ("`%s' is deprecated (declared at %s:%d)", IDENTIFIER_POINTER (DECL_NAME (node)), xloc.file, xloc.line); } else if (TYPE_P (node)) { const char *what = NULL; tree decl = TYPE_STUB_DECL (node); if (TREE_CODE (TYPE_NAME (node)) == IDENTIFIER_NODE) what = IDENTIFIER_POINTER (TYPE_NAME (node)); else if (TREE_CODE (TYPE_NAME (node)) == TYPE_DECL && DECL_NAME (TYPE_NAME (node))) what = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (node))); if (decl) { expanded_location xloc = expand_location (DECL_SOURCE_LOCATION (decl)); if (what) warning ("`%s' is deprecated (declared at %s:%d)", what, xloc.file, xloc.line); else warning ("type is deprecated (declared at %s:%d)", xloc.file, xloc.line); } else { if (what) warning ("type is deprecated"); else warning ("`%s' is deprecated", what); } } } /* Save the current INPUT_LOCATION on the top entry in the INPUT_FILE_STACK. Push a new entry for FILE and LINE, and set the INPUT_LOCATION accordingly. */ void #ifdef USE_MAPPED_LOCATION push_srcloc (location_t fline) #else push_srcloc (const char *file, int line) #endif { struct file_stack *fs; fs = xmalloc (sizeof (struct file_stack)); fs->location = input_location; fs->next = input_file_stack; #ifdef USE_MAPPED_LOCATION input_location = fline; #else input_filename = file; input_line = line; #endif input_file_stack = fs; input_file_stack_tick++; } /* Pop the top entry off the stack of presently open source files. Restore the INPUT_LOCATION from the new topmost entry on the stack. */ void pop_srcloc (void) { struct file_stack *fs; fs = input_file_stack; input_location = fs->location; input_file_stack = fs->next; free (fs); input_file_stack_tick++; } /* Compile an entire translation unit. Write a file of assembly output and various debugging dumps. */ static void compile_file (void) { /* Initialize yet another pass. */ init_final (main_input_filename); coverage_init (aux_base_name); timevar_push (TV_PARSE); /* Call the parser, which parses the entire file (calling rest_of_compilation for each function). */ lang_hooks.parse_file (set_yydebug); /* In case there were missing block closers, get us back to the global binding level. */ lang_hooks.clear_binding_stack (); /* Compilation is now finished except for writing what's left of the symbol table output. */ timevar_pop (TV_PARSE); if (flag_syntax_only) return; lang_hooks.decls.final_write_globals (); cgraph_varpool_assemble_pending_decls (); /* This must occur after the loop to output deferred functions. Else the coverage initializer would not be emitted if all the functions in this compilation unit were deferred. */ coverage_finish (); /* Write out any pending weak symbol declarations. */ weak_finish (); /* Do dbx symbols. */ timevar_push (TV_SYMOUT); #ifdef DWARF2_UNWIND_INFO if (dwarf2out_do_frame ()) dwarf2out_frame_finish (); #endif (*debug_hooks->finish) (main_input_filename); timevar_pop (TV_SYMOUT); /* Output some stuff at end of file if nec. */ dw2_output_indirect_constants (); /* Attach a special .ident directive to the end of the file to identify the version of GCC which compiled this code. The format of the .ident string is patterned after the ones produced by native SVR4 compilers. */ #ifdef IDENT_ASM_OP if (!flag_no_ident) fprintf (asm_out_file, "%s\"GCC: (GNU) %s\"\n", IDENT_ASM_OP, version_string); #endif /* This must be at the end. Some target ports emit end of file directives into the assembly file here, and hence we can not output anything to the assembly file after this point. */ targetm.asm_out.file_end (); } /* Display help for target options. */ void display_target_options (void) { int undoc, i; static bool displayed = false; /* Avoid double printing for --help --target-help. */ if (displayed) return; displayed = true; if (ARRAY_SIZE (target_switches) > 1 #ifdef TARGET_OPTIONS || ARRAY_SIZE (target_options) > 1 #endif ) { int doc = 0; undoc = 0; printf (_("\nTarget specific options:\n")); for (i = ARRAY_SIZE (target_switches); i--;) { const char *option = target_switches[i].name; const char *description = target_switches[i].description; if (option == NULL || *option == 0) continue; else if (description == NULL) { undoc = 1; if (extra_warnings) printf (_(" -m%-23s [undocumented]\n"), option); } else if (*description != 0) doc += printf (" -m%-23s %s\n", option, _(description)); } #ifdef TARGET_OPTIONS for (i = ARRAY_SIZE (target_options); i--;) { const char *option = target_options[i].prefix; const char *description = target_options[i].description; if (option == NULL || *option == 0) continue; else if (description == NULL) { undoc = 1; if (extra_warnings) printf (_(" -m%-23s [undocumented]\n"), option); } else if (*description != 0) doc += printf (" -m%-23s %s\n", option, _(description)); } #endif if (undoc) { if (doc) printf (_("\nThere are undocumented target specific options as well.\n")); else printf (_(" They exist, but they are not documented.\n")); } } } /* Parse a -d... command line switch. */ void decode_d_option (const char *arg) { int c; while (*arg) switch (c = *arg++) { case 'A': flag_debug_asm = 1; break; case 'p': flag_print_asm_name = 1; break; case 'P': flag_dump_rtl_in_asm = 1; flag_print_asm_name = 1; break; case 'v': graph_dump_format = vcg; break; case 'x': rtl_dump_and_exit = 1; break; case 'y': set_yydebug = 1; break; case 'D': /* These are handled by the preprocessor. */ case 'I': break; case 'H': setup_core_dumping(); break; case 'a': default: if (!enable_rtl_dump_file (c)) warning ("unrecognized gcc debugging option: %c", c); break; } } /* Indexed by enum debug_info_type. */ const char *const debug_type_names[] = { "none", "stabs", "coff", "dwarf-1", "dwarf-2", "xcoff", "vms" }; /* Decode -m switches. */ /* Decode the switch -mNAME. */ void set_target_switch (const char *name) { size_t j; int valid_target_option = 0; for (j = 0; j < ARRAY_SIZE (target_switches); j++) if (!strcmp (target_switches[j].name, name)) { if (target_switches[j].value < 0) target_flags &= ~-target_switches[j].value; else target_flags |= target_switches[j].value; if (name[0] != 0) { if (target_switches[j].value < 0) target_flags_explicit |= -target_switches[j].value; else target_flags_explicit |= target_switches[j].value; } valid_target_option = 1; } #ifdef TARGET_OPTIONS if (!valid_target_option) for (j = 0; j < ARRAY_SIZE (target_options); j++) { int len = strlen (target_options[j].prefix); if (target_options[j].value) { if (!strcmp (target_options[j].prefix, name)) { *target_options[j].variable = target_options[j].value; valid_target_option = 1; } } else { if (!strncmp (target_options[j].prefix, name, len)) { *target_options[j].variable = name + len; valid_target_option = 1; } } } #endif if (!valid_target_option) error ("invalid option `%s'", name); } /* Print version information to FILE. Each line begins with INDENT (for the case where FILE is the assembler output file). */ void print_version (FILE *file, const char *indent) { #ifndef __VERSION__ #define __VERSION__ "[?]" #endif fnotice (file, #ifdef __GNUC__ "%s%s%s version %s (%s)\n%s\tcompiled by GNU C version %s.\n" #else "%s%s%s version %s (%s) compiled by CC.\n" #endif , indent, *indent != 0 ? " " : "", lang_hooks.name, version_string, TARGET_NAME, indent, __VERSION__); fnotice (file, "%s%sGGC heuristics: --param ggc-min-expand=%d --param ggc-min-heapsize=%d\n", indent, *indent != 0 ? " " : "", PARAM_VALUE (GGC_MIN_EXPAND), PARAM_VALUE (GGC_MIN_HEAPSIZE)); } /* Print an option value and return the adjusted position in the line. ??? We don't handle error returns from fprintf (disk full); presumably other code will catch a disk full though. */ static int print_single_switch (FILE *file, int pos, int max, const char *indent, const char *sep, const char *term, const char *type, const char *name) { /* The ultrix fprintf returns 0 on success, so compute the result we want here since we need it for the following test. */ int len = strlen (sep) + strlen (type) + strlen (name); if (pos != 0 && pos + len > max) { fprintf (file, "%s", term); pos = 0; } if (pos == 0) { fprintf (file, "%s", indent); pos = strlen (indent); } fprintf (file, "%s%s%s", sep, type, name); pos += len; return pos; } /* Print active target switches to FILE. POS is the current cursor position and MAX is the size of a "line". Each line begins with INDENT and ends with TERM. Each switch is separated from the next by SEP. */ static void print_switch_values (FILE *file, int pos, int max, const char *indent, const char *sep, const char *term) { size_t j; const char **p; /* Fill in the -frandom-seed option, if the user didn't pass it, so that it can be printed below. This helps reproducibility. */ randomize (); /* Print the options as passed. */ pos = print_single_switch (file, pos, max, indent, *indent ? " " : "", term, _("options passed: "), ""); for (p = &save_argv[1]; *p != NULL; p++) if (**p == '-') { /* Ignore these. */ if (strcmp (*p, "-o") == 0) { if (p[1] != NULL) p++; continue; } if (strcmp (*p, "-quiet") == 0) continue; if (strcmp (*p, "-version") == 0) continue; if ((*p)[1] == 'd') continue; pos = print_single_switch (file, pos, max, indent, sep, term, *p, ""); } if (pos > 0) fprintf (file, "%s", term); /* Print the -f and -m options that have been enabled. We don't handle language specific options but printing argv should suffice. */ pos = print_single_switch (file, 0, max, indent, *indent ? " " : "", term, _("options enabled: "), ""); for (j = 0; j < cl_options_count; j++) { if (!cl_options[j].flag_var || !(cl_options[j].flags & CL_REPORT)) continue; if (cl_options[j].has_set_value) { if (*cl_options[j].flag_var != cl_options[j].set_value) continue; } else { if (!*cl_options[j].flag_var) continue; } pos = print_single_switch (file, pos, max, indent, sep, term, "", cl_options[j].opt_text); } /* Print target specific options. */ for (j = 0; j < ARRAY_SIZE (target_switches); j++) if (target_switches[j].name[0] != '\0' && target_switches[j].value > 0 && ((target_switches[j].value & target_flags) == target_switches[j].value)) { pos = print_single_switch (file, pos, max, indent, sep, term, "-m", target_switches[j].name); } #ifdef TARGET_OPTIONS for (j = 0; j < ARRAY_SIZE (target_options); j++) if (*target_options[j].variable != NULL) { char prefix[256]; sprintf (prefix, "-m%s", target_options[j].prefix); pos = print_single_switch (file, pos, max, indent, sep, term, prefix, *target_options[j].variable); } #endif fprintf (file, "%s", term); } /* Open assembly code output file. Do this even if -fsyntax-only is on, because then the driver will have provided the name of a temporary file or bit bucket for us. NAME is the file specified on the command line, possibly NULL. */ static void init_asm_output (const char *name) { if (name == NULL && asm_file_name == 0) asm_out_file = stdout; else { if (asm_file_name == 0) { int len = strlen (dump_base_name); char *dumpname = xmalloc (len + 6); memcpy (dumpname, dump_base_name, len + 1); strip_off_ending (dumpname, len); strcat (dumpname, ".s"); asm_file_name = dumpname; } if (!strcmp (asm_file_name, "-")) asm_out_file = stdout; else asm_out_file = fopen (asm_file_name, "w+b"); if (asm_out_file == 0) fatal_error ("can't open %s for writing: %m", asm_file_name); } #ifdef IO_BUFFER_SIZE setvbuf (asm_out_file, xmalloc (IO_BUFFER_SIZE), _IOFBF, IO_BUFFER_SIZE); #endif if (!flag_syntax_only) { targetm.asm_out.file_start (); #ifdef ASM_COMMENT_START if (flag_verbose_asm) { /* Print the list of options in effect. */ print_version (asm_out_file, ASM_COMMENT_START); print_switch_values (asm_out_file, 0, MAX_LINE, ASM_COMMENT_START, " ", "\n"); /* Add a blank line here so it appears in assembler output but not screen output. */ fprintf (asm_out_file, "\n"); } #endif } } /* Default version of get_pch_validity. By default, every flag difference is fatal; that will be mostly right for most targets, but completely right for very few. */ void * default_get_pch_validity (size_t *len) { #ifdef TARGET_OPTIONS size_t i; #endif char *result, *r; *len = sizeof (target_flags) + 2; #ifdef TARGET_OPTIONS for (i = 0; i < ARRAY_SIZE (target_options); i++) { *len += 1; if (*target_options[i].variable) *len += strlen (*target_options[i].variable); } #endif result = r = xmalloc (*len); r[0] = flag_pic; r[1] = flag_pie; r += 2; memcpy (r, &target_flags, sizeof (target_flags)); r += sizeof (target_flags); #ifdef TARGET_OPTIONS for (i = 0; i < ARRAY_SIZE (target_options); i++) { const char *str = *target_options[i].variable; size_t l; if (! str) str = ""; l = strlen (str) + 1; memcpy (r, str, l); r += l; } #endif return result; } /* Default version of pch_valid_p. */ const char * default_pch_valid_p (const void *data_p, size_t len) { const char *data = (const char *)data_p; const char *flag_that_differs = NULL; size_t i; /* -fpic and -fpie also usually make a PCH invalid. */ if (data[0] != flag_pic) return _("created and used with different settings of -fpic"); if (data[1] != flag_pie) return _("created and used with different settings of -fpie"); data += 2; /* Check target_flags. */ if (memcmp (data, &target_flags, sizeof (target_flags)) != 0) { for (i = 0; i < ARRAY_SIZE (target_switches); i++) { int bits; int tf; memcpy (&tf, data, sizeof (target_flags)); bits = target_switches[i].value; if (bits < 0) bits = -bits; if ((target_flags & bits) != (tf & bits)) { flag_that_differs = target_switches[i].name; goto make_message; } } abort (); } data += sizeof (target_flags); len -= sizeof (target_flags); /* Check string options. */ #ifdef TARGET_OPTIONS for (i = 0; i < ARRAY_SIZE (target_options); i++) { const char *str = *target_options[i].variable; size_t l; if (! str) str = ""; l = strlen (str) + 1; if (len < l || memcmp (data, str, l) != 0) { flag_that_differs = target_options[i].prefix; goto make_message; } data += l; len -= l; } #endif return NULL; make_message: { char *r; asprintf (&r, _("created and used with differing settings of `-m%s'"), flag_that_differs); if (r == NULL) return _("out of memory"); return r; } } /* Default tree printer. Handles declarations only. */ static bool default_tree_printer (pretty_printer * pp, text_info *text) { switch (*text->format_spec) { case 'D': case 'F': case 'T': { tree t = va_arg (*text->args_ptr, tree); const char *n = DECL_NAME (t) ? lang_hooks.decl_printable_name (t, 2) : ""; pp_string (pp, n); } return true; default: return false; } } /* Initialization of the front end environment, before command line options are parsed. Signal handlers, internationalization etc. ARGV0 is main's argv[0]. */ static void general_init (const char *argv0) { const char *p; p = argv0 + strlen (argv0); while (p != argv0 && !IS_DIR_SEPARATOR (p[-1])) --p; progname = p; xmalloc_set_program_name (progname); hex_init (); gcc_init_libintl (); /* Initialize the diagnostics reporting machinery, so option parsing can give warnings and errors. */ diagnostic_initialize (global_dc); /* Set a default printer. Language specific initializations will override it later. */ pp_format_decoder (global_dc->printer) = &default_tree_printer; /* Trap fatal signals, e.g. SIGSEGV, and convert them to ICE messages. */ #ifdef SIGSEGV signal (SIGSEGV, crash_signal); #endif #ifdef SIGILL signal (SIGILL, crash_signal); #endif #ifdef SIGBUS signal (SIGBUS, crash_signal); #endif #ifdef SIGABRT signal (SIGABRT, crash_signal); #endif #if defined SIGIOT && (!defined SIGABRT || SIGABRT != SIGIOT) signal (SIGIOT, crash_signal); #endif #ifdef SIGFPE signal (SIGFPE, crash_signal); #endif /* Other host-specific signal setup. */ (*host_hooks.extra_signals)(); /* Initialize the garbage-collector, string pools and tree type hash table. */ init_ggc (); init_stringpool (); linemap_init (&line_table); init_ttree (); /* Initialize register usage now so switches may override. */ init_reg_sets (); /* Register the language-independent parameters. */ add_params (lang_independent_params, LAST_PARAM); /* This must be done after add_params but before argument processing. */ init_ggc_heuristics(); init_tree_optimization_passes (); } /* Process the options that have been parsed. */ static void process_options (void) { /* Allow the front end to perform consistency checks and do further initialization based on the command line options. This hook also sets the original filename if appropriate (e.g. foo.i -> foo.c) so we can correctly initialize debug output. */ no_backend = lang_hooks.post_options (&main_input_filename); #ifndef USE_MAPPED_LOCATION input_filename = main_input_filename; #endif #ifdef OVERRIDE_OPTIONS /* Some machines may reject certain combinations of options. */ OVERRIDE_OPTIONS; #endif if (flag_short_enums == 2) flag_short_enums = targetm.default_short_enums (); /* Set aux_base_name if not already set. */ if (aux_base_name) ; else if (main_input_filename) { char *name = xstrdup (lbasename (main_input_filename)); strip_off_ending (name, strlen (name)); aux_base_name = name; } else aux_base_name = "gccaux"; /* Set up the align_*_log variables, defaulting them to 1 if they were still unset. */ if (align_loops <= 0) align_loops = 1; if (align_loops_max_skip > align_loops || !align_loops) align_loops_max_skip = align_loops - 1; align_loops_log = floor_log2 (align_loops * 2 - 1); if (align_jumps <= 0) align_jumps = 1; if (align_jumps_max_skip > align_jumps || !align_jumps) align_jumps_max_skip = align_jumps - 1; align_jumps_log = floor_log2 (align_jumps * 2 - 1); if (align_labels <= 0) align_labels = 1; align_labels_log = floor_log2 (align_labels * 2 - 1); if (align_labels_max_skip > align_labels || !align_labels) align_labels_max_skip = align_labels - 1; if (align_functions <= 0) align_functions = 1; align_functions_log = floor_log2 (align_functions * 2 - 1); /* Unrolling all loops implies that standard loop unrolling must also be done. */ if (flag_unroll_all_loops) flag_unroll_loops = 1; if (flag_unroll_loops) { flag_old_unroll_loops = 0; flag_old_unroll_all_loops = 0; } if (flag_old_unroll_all_loops) flag_old_unroll_loops = 1; /* Old loop unrolling requires that strength_reduction be on also. Silently turn on strength reduction here if it isn't already on. Also, the loop unrolling code assumes that cse will be run after loop, so that must be turned on also. */ if (flag_old_unroll_loops) { flag_strength_reduce = 1; flag_rerun_cse_after_loop = 1; } if (flag_unroll_loops || flag_peel_loops) flag_rerun_cse_after_loop = 1; /* If explicitly asked to run new loop optimizer, switch off the old one. */ if (flag_loop_optimize2) flag_loop_optimize = 0; /* Enable new loop optimizer pass if any of its optimizations is called. */ if (flag_move_loop_invariants || flag_unswitch_loops || flag_peel_loops || flag_unroll_loops || flag_branch_on_count_reg) flag_loop_optimize2 = 1; if (flag_non_call_exceptions) flag_asynchronous_unwind_tables = 1; if (flag_asynchronous_unwind_tables) flag_unwind_tables = 1; /* Disable unit-at-a-time mode for frontends not supporting callgraph interface. */ if (flag_unit_at_a_time && ! lang_hooks.callgraph.expand_function) flag_unit_at_a_time = 0; if (flag_value_profile_transformations) flag_profile_values = 1; /* Warn about options that are not supported on this machine. */ #ifndef INSN_SCHEDULING if (flag_schedule_insns || flag_schedule_insns_after_reload) warning ("instruction scheduling not supported on this target machine"); #endif #ifndef DELAY_SLOTS if (flag_delayed_branch) warning ("this target machine does not have delayed branches"); #endif if (flag_tree_based_profiling && flag_test_coverage) sorry ("test-coverage not yet implemented in trees."); if (flag_tree_based_profiling && flag_profile_values) sorry ("value-based profiling not yet implemented in trees."); user_label_prefix = USER_LABEL_PREFIX; if (flag_leading_underscore != -1) { /* If the default prefix is more complicated than "" or "_", issue a warning and ignore this option. */ if (user_label_prefix[0] == 0 || (user_label_prefix[0] == '_' && user_label_prefix[1] == 0)) { user_label_prefix = flag_leading_underscore ? "_" : ""; } else warning ("-f%sleading-underscore not supported on this target machine", flag_leading_underscore ? "" : "no-"); } /* If we are in verbose mode, write out the version and maybe all the option flags in use. */ if (version_flag) { print_version (stderr, ""); if (! quiet_flag) print_switch_values (stderr, 0, MAX_LINE, "", " ", "\n"); } if (flag_syntax_only) { write_symbols = NO_DEBUG; profile_flag = 0; } /* A lot of code assumes write_symbols == NO_DEBUG if the debugging level is 0. */ if (debug_info_level == DINFO_LEVEL_NONE) write_symbols = NO_DEBUG; /* Now we know write_symbols, set up the debug hooks based on it. By default we do nothing for debug output. */ if (PREFERRED_DEBUGGING_TYPE == NO_DEBUG) default_debug_hooks = &do_nothing_debug_hooks; #if defined(DBX_DEBUGGING_INFO) else if (PREFERRED_DEBUGGING_TYPE == DBX_DEBUG) default_debug_hooks = &dbx_debug_hooks; #endif #if defined(XCOFF_DEBUGGING_INFO) else if (PREFERRED_DEBUGGING_TYPE == XCOFF_DEBUG) default_debug_hooks = &xcoff_debug_hooks; #endif #ifdef SDB_DEBUGGING_INFO else if (PREFERRED_DEBUGGING_TYPE == SDB_DEBUG) default_debug_hooks = &sdb_debug_hooks; #endif #ifdef DWARF2_DEBUGGING_INFO else if (PREFERRED_DEBUGGING_TYPE == DWARF2_DEBUG) default_debug_hooks = &dwarf2_debug_hooks; #endif #ifdef VMS_DEBUGGING_INFO else if (PREFERRED_DEBUGGING_TYPE == VMS_DEBUG || PREFERRED_DEBUGGING_TYPE == VMS_AND_DWARF2_DEBUG) default_debug_hooks = &vmsdbg_debug_hooks; #endif if (write_symbols == NO_DEBUG) debug_hooks = &do_nothing_debug_hooks; #if defined(DBX_DEBUGGING_INFO) else if (write_symbols == DBX_DEBUG) debug_hooks = &dbx_debug_hooks; #endif #if defined(XCOFF_DEBUGGING_INFO) else if (write_symbols == XCOFF_DEBUG) debug_hooks = &xcoff_debug_hooks; #endif #ifdef SDB_DEBUGGING_INFO else if (write_symbols == SDB_DEBUG) debug_hooks = &sdb_debug_hooks; #endif #ifdef DWARF2_DEBUGGING_INFO else if (write_symbols == DWARF2_DEBUG) debug_hooks = &dwarf2_debug_hooks; #endif #ifdef VMS_DEBUGGING_INFO else if (write_symbols == VMS_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG) debug_hooks = &vmsdbg_debug_hooks; #endif else error ("target system does not support the \"%s\" debug format", debug_type_names[write_symbols]); /* Now we know which debug output will be used so we can set flag_var_tracking, flag_rename_registers and flag_web if the user has not specified them. */ if (debug_info_level < DINFO_LEVEL_NORMAL || debug_hooks->var_location == do_nothing_debug_hooks.var_location) { if (flag_var_tracking == 1) { if (debug_info_level < DINFO_LEVEL_NORMAL) warning ("variable tracking requested, but useless unless " "producing debug info"); else warning ("variable tracking requested, but not supported " "by this debug format"); } flag_var_tracking = 0; } if (flag_rename_registers == AUTODETECT_FLAG_VAR_TRACKING) flag_rename_registers = default_debug_hooks->var_location != do_nothing_debug_hooks.var_location; if (flag_web == AUTODETECT_FLAG_VAR_TRACKING) flag_web = optimize >= 2 && (default_debug_hooks->var_location != do_nothing_debug_hooks.var_location); if (flag_var_tracking == AUTODETECT_FLAG_VAR_TRACKING) flag_var_tracking = optimize >= 1; /* If auxiliary info generation is desired, open the output file. This goes in the same directory as the source file--unlike all the other output files. */ if (flag_gen_aux_info) { aux_info_file = fopen (aux_info_file_name, "w"); if (aux_info_file == 0) fatal_error ("can't open %s: %m", aux_info_file_name); } if (! targetm.have_named_sections) { if (flag_function_sections) { warning ("-ffunction-sections not supported for this target"); flag_function_sections = 0; } if (flag_data_sections) { warning ("-fdata-sections not supported for this target"); flag_data_sections = 0; } } if (flag_function_sections && profile_flag) { warning ("-ffunction-sections disabled; it makes profiling impossible"); flag_function_sections = 0; } #ifndef HAVE_prefetch if (flag_prefetch_loop_arrays) { warning ("-fprefetch-loop-arrays not supported for this target"); flag_prefetch_loop_arrays = 0; } #else if (flag_prefetch_loop_arrays && !HAVE_prefetch) { warning ("-fprefetch-loop-arrays not supported for this target (try -march switches)"); flag_prefetch_loop_arrays = 0; } #endif /* This combination of options isn't handled for i386 targets and doesn't make much sense anyway, so don't allow it. */ if (flag_prefetch_loop_arrays && optimize_size) { warning ("-fprefetch-loop-arrays is not supported with -Os"); flag_prefetch_loop_arrays = 0; } #ifndef OBJECT_FORMAT_ELF if (flag_function_sections && write_symbols != NO_DEBUG) warning ("-ffunction-sections may affect debugging on some targets"); #endif /* The presence of IEEE signaling NaNs, implies all math can trap. */ if (flag_signaling_nans) flag_trapping_math = 1; } /* Initialize the compiler back end. */ static void backend_init (void) { init_adjust_machine_modes (); init_emit_once (debug_info_level == DINFO_LEVEL_NORMAL || debug_info_level == DINFO_LEVEL_VERBOSE #ifdef VMS_DEBUGGING_INFO /* Enable line number info for traceback. */ || debug_info_level > DINFO_LEVEL_NONE #endif || flag_test_coverage || warn_notreached); init_regs (); init_fake_stack_mems (); init_alias_once (); init_loop (); init_reload (); init_function_once (); init_varasm_once (); /* The following initialization functions need to generate rtl, so provide a dummy function context for them. */ init_dummy_function_start (); init_expmed (); if (flag_caller_saves) init_caller_save (); expand_dummy_function_end (); } /* Language-dependent initialization. Returns nonzero on success. */ static int lang_dependent_init (const char *name) { location_t save_loc = input_location; if (dump_base_name == 0) dump_base_name = name ? name : "gccdump"; /* Other front-end initialization. */ #ifdef USE_MAPPED_LOCATION input_location = BUILTINS_LOCATION; #else input_filename = ""; input_line = 0; #endif if (lang_hooks.init () == 0) return 0; input_location = save_loc; init_asm_output (name); /* These create various _DECL nodes, so need to be called after the front end is initialized. */ init_eh (); init_optabs (); init_optimization_passes (); /* The following initialization functions need to generate rtl, so provide a dummy function context for them. */ init_dummy_function_start (); init_expr_once (); expand_dummy_function_end (); /* If dbx symbol table desired, initialize writing it and output the predefined types. */ timevar_push (TV_SYMOUT); #ifdef DWARF2_UNWIND_INFO if (dwarf2out_do_frame ()) dwarf2out_frame_init (); #endif /* Now we have the correct original filename, we can initialize debug output. */ (*debug_hooks->init) (name); timevar_pop (TV_SYMOUT); return 1; } /* Clean up: close opened files, etc. */ static void finalize_toplev (void) { /* Close the dump files. */ if (flag_gen_aux_info) { fclose (aux_info_file); if (errorcount) unlink (aux_info_file_name); } /* Close non-debugging input and output files. Take special care to note whether fclose returns an error, since the pages might still be on the buffer chain while the file is open. */ if (asm_out_file) { if (ferror (asm_out_file) != 0) fatal_error ("error writing to %s: %m", asm_file_name); if (fclose (asm_out_file) != 0) fatal_error ("error closing %s: %m", asm_file_name); } finish_optimization_passes (); if (mem_report) { ggc_print_statistics (); stringpool_statistics (); dump_tree_statistics (); dump_rtx_statistics (); dump_varray_statistics (); dump_alloc_pool_statistics (); dump_ggc_loc_statistics (); } /* Free up memory for the benefit of leak detectors. */ free_reg_info (); /* Language-specific end of compilation actions. */ lang_hooks.finish (); } /* Initialize the compiler, and compile the input file. */ static void do_compile (void) { /* Initialize timing first. The C front ends read the main file in the post_options hook, and C++ does file timings. */ if (time_report || !quiet_flag || flag_detailed_statistics) timevar_init (); timevar_start (TV_TOTAL); process_options (); /* Don't do any more if an error has already occurred. */ if (!errorcount) { /* Set up the back-end if requested. */ if (!no_backend) backend_init (); /* Language-dependent initialization. Returns true on success. */ if (lang_dependent_init (main_input_filename)) compile_file (); finalize_toplev (); } /* Stop timing and print the times. */ timevar_stop (TV_TOTAL); timevar_print (stderr); } /* Entry point of cc1, cc1plus, jc1, f771, etc. Exit code is FATAL_EXIT_CODE if can't open files or if there were any errors, or SUCCESS_EXIT_CODE if compilation succeeded. It is not safe to call this function more than once. */ int toplev_main (unsigned int argc, const char **argv) { save_argv = argv; /* Initialization of GCC's environment, and diagnostics. */ general_init (argv[0]); /* Parse the options and do minimal processing; basically just enough to default flags appropriately. */ decode_options (argc, argv); randomize (); /* Exit early if we can (e.g. -help). */ if (!exit_after_options) do_compile (); if (errorcount || sorrycount) return (FATAL_EXIT_CODE); return (SUCCESS_EXIT_CODE); } /* The tracer pass for the GNU compiler. Contributed by Jan Hubicka, SuSE Labs. Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This pass performs the tail duplication needed for superblock formation. For more information see: Design and Analysis of Profile-Based Optimization in Compaq's Compilation Tools for Alpha; Journal of Instruction-Level Parallelism 3 (2000) 1-25 Unlike Compaq's implementation we don't do the loop peeling as most probably a better job can be done by a special pass and we don't need to worry too much about the code size implications as the tail duplicates are crossjumped again if optimizations are not performed. */ static int count_insns (basic_block); static bool ignore_bb_p (basic_block); static bool better_p (edge, edge); static edge find_best_successor (basic_block); static edge find_best_predecessor (basic_block); static int find_trace (basic_block, basic_block *); static void tail_duplicate (void); static void layout_superblocks (void); /* Minimal outgoing edge probability considered for superblock formation. */ static int probability_cutoff; static int branch_ratio_cutoff; /* Return true if BB has been seen - it is connected to some trace already. */ #define seen(bb) (bb->rbi->visited || bb->rbi->next) /* Return true if we should ignore the basic block for purposes of tracing. */ static bool ignore_bb_p (basic_block bb) { if (bb->index < 0) return true; if (!maybe_hot_bb_p (bb)) return true; return false; } /* Return number of instructions in the block. */ static int count_insns (basic_block bb) { rtx insn; int n = 0; for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = NEXT_INSN (insn)) if (active_insn_p (insn)) n++; return n; } /* Return true if E1 is more frequent than E2. */ static bool better_p (edge e1, edge e2) { if (e1->count != e2->count) return e1->count > e2->count; if (e1->src->frequency * e1->probability != e2->src->frequency * e2->probability) return (e1->src->frequency * e1->probability > e2->src->frequency * e2->probability); /* This is needed to avoid changes in the decision after CFG is modified. */ if (e1->src != e2->src) return e1->src->index > e2->src->index; return e1->dest->index > e2->dest->index; } /* Return most frequent successor of basic block BB. */ static edge find_best_successor (basic_block bb) { edge e; edge best = NULL; for (e = bb->succ; e; e = e->succ_next) if (!best || better_p (e, best)) best = e; if (!best || ignore_bb_p (best->dest)) return NULL; if (best->probability <= probability_cutoff) return NULL; return best; } /* Return most frequent predecessor of basic block BB. */ static edge find_best_predecessor (basic_block bb) { edge e; edge best = NULL; for (e = bb->pred; e; e = e->pred_next) if (!best || better_p (e, best)) best = e; if (!best || ignore_bb_p (best->src)) return NULL; if (EDGE_FREQUENCY (best) * REG_BR_PROB_BASE < bb->frequency * branch_ratio_cutoff) return NULL; return best; } /* Find the trace using bb and record it in the TRACE array. Return number of basic blocks recorded. */ static int find_trace (basic_block bb, basic_block *trace) { int i = 0; edge e; if (dump_file) fprintf (dump_file, "Trace seed %i [%i]", bb->index, bb->frequency); while ((e = find_best_predecessor (bb)) != NULL) { basic_block bb2 = e->src; if (seen (bb2) || (e->flags & (EDGE_DFS_BACK | EDGE_COMPLEX)) || find_best_successor (bb2) != e) break; if (dump_file) fprintf (dump_file, ",%i [%i]", bb->index, bb->frequency); bb = bb2; } if (dump_file) fprintf (dump_file, " forward %i [%i]", bb->index, bb->frequency); trace[i++] = bb; /* Follow the trace in forward direction. */ while ((e = find_best_successor (bb)) != NULL) { bb = e->dest; if (seen (bb) || (e->flags & (EDGE_DFS_BACK | EDGE_COMPLEX)) || find_best_predecessor (bb) != e) break; if (dump_file) fprintf (dump_file, ",%i [%i]", bb->index, bb->frequency); trace[i++] = bb; } if (dump_file) fprintf (dump_file, "\n"); return i; } /* Look for basic blocks in frequency order, construct traces and tail duplicate if profitable. */ static void tail_duplicate (void) { fibnode_t *blocks = xcalloc (last_basic_block, sizeof (fibnode_t)); basic_block *trace = xmalloc (sizeof (basic_block) * n_basic_blocks); int *counts = xmalloc (sizeof (int) * last_basic_block); int ninsns = 0, nduplicated = 0; gcov_type weighted_insns = 0, traced_insns = 0; fibheap_t heap = fibheap_new (); gcov_type cover_insns; int max_dup_insns; basic_block bb; if (profile_info && flag_branch_probabilities) probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK); else probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY); probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff; branch_ratio_cutoff = (REG_BR_PROB_BASE / 100 * PARAM_VALUE (TRACER_MIN_BRANCH_RATIO)); FOR_EACH_BB (bb) { int n = count_insns (bb); if (!ignore_bb_p (bb)) blocks[bb->index] = fibheap_insert (heap, -bb->frequency, bb); counts [bb->index] = n; ninsns += n; weighted_insns += n * bb->frequency; } if (profile_info && flag_branch_probabilities) cover_insns = PARAM_VALUE (TRACER_DYNAMIC_COVERAGE_FEEDBACK); else cover_insns = PARAM_VALUE (TRACER_DYNAMIC_COVERAGE); cover_insns = (weighted_insns * cover_insns + 50) / 100; max_dup_insns = (ninsns * PARAM_VALUE (TRACER_MAX_CODE_GROWTH) + 50) / 100; while (traced_insns < cover_insns && nduplicated < max_dup_insns && !fibheap_empty (heap)) { basic_block bb = fibheap_extract_min (heap); int n, pos; if (!bb) break; blocks[bb->index] = NULL; if (ignore_bb_p (bb)) continue; if (seen (bb)) abort (); n = find_trace (bb, trace); bb = trace[0]; traced_insns += bb->frequency * counts [bb->index]; if (blocks[bb->index]) { fibheap_delete_node (heap, blocks[bb->index]); blocks[bb->index] = NULL; } for (pos = 1; pos < n; pos++) { basic_block bb2 = trace[pos]; if (blocks[bb2->index]) { fibheap_delete_node (heap, blocks[bb2->index]); blocks[bb2->index] = NULL; } traced_insns += bb2->frequency * counts [bb2->index]; if (bb2->pred && bb2->pred->pred_next && can_duplicate_block_p (bb2)) { edge e = bb2->pred; basic_block old = bb2; while (e->src != bb) e = e->pred_next; nduplicated += counts [bb2->index]; bb2 = duplicate_block (bb2, e); /* Reconsider the original copy of block we've duplicated. Removing the most common predecessor may make it to be head. */ blocks[old->index] = fibheap_insert (heap, -old->frequency, old); if (dump_file) fprintf (dump_file, "Duplicated %i as %i [%i]\n", old->index, bb2->index, bb2->frequency); } bb->rbi->next = bb2; bb2->rbi->visited = 1; bb = bb2; /* In case the trace became infrequent, stop duplicating. */ if (ignore_bb_p (bb)) break; } if (dump_file) fprintf (dump_file, " covered now %.1f\n\n", traced_insns * 100.0 / weighted_insns); } if (dump_file) fprintf (dump_file, "Duplicated %i insns (%i%%)\n", nduplicated, nduplicated * 100 / ninsns); free (blocks); free (trace); free (counts); fibheap_delete (heap); } /* Connect the superblocks into linear sequence. At the moment we attempt to keep the original order as much as possible, but the algorithm may be made smarter later if needed. BB reordering pass should void most of the benefits of such change though. */ static void layout_superblocks (void) { basic_block end = ENTRY_BLOCK_PTR->succ->dest; basic_block bb = ENTRY_BLOCK_PTR->succ->dest->next_bb; while (bb != EXIT_BLOCK_PTR) { edge e, best = NULL; while (end->rbi->next) end = end->rbi->next; for (e = end->succ; e; e = e->succ_next) if (e->dest != EXIT_BLOCK_PTR && e->dest != ENTRY_BLOCK_PTR->succ->dest && !e->dest->rbi->visited && (!best || EDGE_FREQUENCY (e) > EDGE_FREQUENCY (best))) best = e; if (best) { end->rbi->next = best->dest; best->dest->rbi->visited = 1; } else for (; bb != EXIT_BLOCK_PTR; bb = bb->next_bb) { if (!bb->rbi->visited) { end->rbi->next = bb; bb->rbi->visited = 1; break; } } } } /* Main entry point to this file. */ void tracer (void) { if (n_basic_blocks <= 1) return; timevar_push (TV_TRACER); cfg_layout_initialize (); mark_dfs_back_edges (); if (dump_file) dump_flow_info (dump_file); tail_duplicate (); layout_superblocks (); if (dump_file) dump_flow_info (dump_file); cfg_layout_finalize (); /* Merge basic blocks in duplicated traces. */ cleanup_cfg (CLEANUP_EXPENSIVE); timevar_pop (TV_TRACER); } /* Language-independent node constructors for parse phase of GNU compiler. Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file contains the low level primitives for operating on tree nodes, including allocation, list operations, interning of identifiers, construction of data type nodes and statement nodes, and construction of type conversion nodes. It also contains tables index by tree code that describe how to take apart nodes of that code. It is intended to be language-independent, but occasionally calls language-dependent routines defined (for C) in typecheck.c. */ /* obstack.[ch] explicitly declined to prototype this. */ extern int _obstack_allocated_p (struct obstack *h, void *obj); #ifdef GATHER_STATISTICS /* Statistics-gathering stuff. */ int tree_node_counts[(int) all_kinds]; int tree_node_sizes[(int) all_kinds]; /* Keep in sync with tree.h:enum tree_node_kind. */ static const char * const tree_node_kind_names[] = { "decls", "types", "blocks", "stmts", "refs", "exprs", "constants", "identifiers", "perm_tree_lists", "temp_tree_lists", "vecs", "binfos", "phi_nodes", "ssa names", "random kinds", "lang_decl kinds", "lang_type kinds" }; #endif /* GATHER_STATISTICS */ /* Unique id for next decl created. */ static GTY(()) int next_decl_uid; /* Unique id for next type created. */ static GTY(()) int next_type_uid = 1; /* Since we cannot rehash a type after it is in the table, we have to keep the hash code. */ struct type_hash GTY(()) { unsigned long hash; tree type; }; /* Additional language-dependent binfo slots. */ unsigned binfo_lang_slots; /* Initial size of the hash table (rounded to next prime). */ #define TYPE_HASH_INITIAL_SIZE 1000 /* Now here is the hash table. When recording a type, it is added to the slot whose index is the hash code. Note that the hash table is used for several kinds of types (function types, array types and array index range types, for now). While all these live in the same table, they are completely independent, and the hash code is computed differently for each of these. */ static GTY ((if_marked ("type_hash_marked_p"), param_is (struct type_hash))) htab_t type_hash_table; static void set_type_quals (tree, int); static int type_hash_eq (const void *, const void *); static hashval_t type_hash_hash (const void *); static void print_type_hash_statistics (void); static void finish_vector_type (tree); static int type_hash_marked_p (const void *); static unsigned int type_hash_list (tree, hashval_t); static unsigned int attribute_hash_list (tree, hashval_t); tree global_trees[TI_MAX]; tree integer_types[itk_none]; /* Init tree.c. */ void init_ttree (void) { /* Initialize the hash table of types. */ type_hash_table = htab_create_ggc (TYPE_HASH_INITIAL_SIZE, type_hash_hash, type_hash_eq, 0); } /* The name of the object as the assembler will see it (but before any translations made by ASM_OUTPUT_LABELREF). Often this is the same as DECL_NAME. It is an IDENTIFIER_NODE. */ tree decl_assembler_name (tree decl) { if (!DECL_ASSEMBLER_NAME_SET_P (decl)) lang_hooks.set_decl_assembler_name (decl); return DECL_CHECK (decl)->decl.assembler_name; } /* Compute the number of bytes occupied by 'node'. This routine only looks at TREE_CODE and, if the code is TREE_VEC, TREE_VEC_LENGTH. */ size_t tree_size (tree node) { enum tree_code code = TREE_CODE (node); switch (TREE_CODE_CLASS (code)) { case 'd': /* A decl node */ return sizeof (struct tree_decl); case 't': /* a type node */ return sizeof (struct tree_type); case 'r': /* a reference */ case 'e': /* an expression */ case 's': /* an expression with side effects */ case '<': /* a comparison expression */ case '1': /* a unary arithmetic expression */ case '2': /* a binary arithmetic expression */ return (sizeof (struct tree_exp) + TREE_CODE_LENGTH (code) * sizeof (char *) - sizeof (char *)); case 'c': /* a constant */ switch (code) { case INTEGER_CST: return sizeof (struct tree_int_cst); case REAL_CST: return sizeof (struct tree_real_cst); case COMPLEX_CST: return sizeof (struct tree_complex); case VECTOR_CST: return sizeof (struct tree_vector); case STRING_CST: return sizeof (struct tree_string); default: return lang_hooks.tree_size (code); } case 'x': /* something random, like an identifier. */ switch (code) { case IDENTIFIER_NODE: return lang_hooks.identifier_size; case TREE_LIST: return sizeof (struct tree_list); case TREE_VEC: return (sizeof (struct tree_vec) + TREE_VEC_LENGTH(node) * sizeof(char *) - sizeof (char *)); case ERROR_MARK: case PLACEHOLDER_EXPR: return sizeof (struct tree_common); case PHI_NODE: return (sizeof (struct tree_phi_node) + (PHI_ARG_CAPACITY (node) - 1) * sizeof (struct phi_arg_d)); case SSA_NAME: return sizeof (struct tree_ssa_name); case STATEMENT_LIST: return sizeof (struct tree_statement_list); case BLOCK: return sizeof (struct tree_block); case VALUE_HANDLE: return sizeof (struct tree_value_handle); default: return lang_hooks.tree_size (code); } default: abort (); } } /* Return a newly allocated node of code CODE. For decl and type nodes, some other fields are initialized. The rest of the node is initialized to zero. Achoo! I got a code in the node. */ tree make_node_stat (enum tree_code code MEM_STAT_DECL) { tree t; int type = TREE_CODE_CLASS (code); size_t length; #ifdef GATHER_STATISTICS tree_node_kind kind; #endif struct tree_common ttmp; /* We can't allocate a TREE_VEC, PHI_NODE, or STRING_CST without knowing how many elements it will have. */ if (code == TREE_VEC || code == PHI_NODE) abort (); TREE_SET_CODE ((tree)&ttmp, code); length = tree_size ((tree)&ttmp); #ifdef GATHER_STATISTICS switch (type) { case 'd': /* A decl node */ kind = d_kind; break; case 't': /* a type node */ kind = t_kind; break; case 's': /* an expression with side effects */ kind = s_kind; break; case 'r': /* a reference */ kind = r_kind; break; case 'e': /* an expression */ case '<': /* a comparison expression */ case '1': /* a unary arithmetic expression */ case '2': /* a binary arithmetic expression */ kind = e_kind; break; case 'c': /* a constant */ kind = c_kind; break; case 'x': /* something random, like an identifier. */ if (code == IDENTIFIER_NODE) kind = id_kind; else if (code == TREE_VEC) kind = vec_kind; else if (code == TREE_BINFO) kind = binfo_kind; else if (code == PHI_NODE) kind = phi_kind; else if (code == SSA_NAME) kind = ssa_name_kind; else if (code == BLOCK) kind = b_kind; else kind = x_kind; break; default: abort (); } tree_node_counts[(int) kind]++; tree_node_sizes[(int) kind] += length; #endif t = ggc_alloc_zone_stat (length, tree_zone PASS_MEM_STAT); memset (t, 0, length); TREE_SET_CODE (t, code); switch (type) { case 's': TREE_SIDE_EFFECTS (t) = 1; break; case 'd': if (code != FUNCTION_DECL) DECL_ALIGN (t) = 1; DECL_USER_ALIGN (t) = 0; DECL_IN_SYSTEM_HEADER (t) = in_system_header; DECL_SOURCE_LOCATION (t) = input_location; if (code == TRANSLATION_UNIT_DECL) DECL_UID (t) = cur_in_fname; else DECL_UID (t) = next_decl_uid++; /* We have not yet computed the alias set for this declaration. */ DECL_POINTER_ALIAS_SET (t) = -1; break; case 't': TYPE_UID (t) = next_type_uid++; TYPE_ALIGN (t) = char_type_node ? TYPE_ALIGN (char_type_node) : 0; TYPE_USER_ALIGN (t) = 0; TYPE_MAIN_VARIANT (t) = t; /* Default to no attributes for type, but let target change that. */ TYPE_ATTRIBUTES (t) = NULL_TREE; targetm.set_default_type_attributes (t); /* We have not yet computed the alias set for this type. */ TYPE_ALIAS_SET (t) = -1; break; case 'c': TREE_CONSTANT (t) = 1; TREE_INVARIANT (t) = 1; break; case 'e': switch (code) { case INIT_EXPR: case MODIFY_EXPR: case VA_ARG_EXPR: case PREDECREMENT_EXPR: case PREINCREMENT_EXPR: case POSTDECREMENT_EXPR: case POSTINCREMENT_EXPR: /* All of these have side-effects, no matter what their operands are. */ TREE_SIDE_EFFECTS (t) = 1; break; default: break; } break; } return t; } /* Return a new node with the same contents as NODE except that its TREE_CHAIN is zero and it has a fresh uid. */ tree copy_node_stat (tree node MEM_STAT_DECL) { tree t; enum tree_code code = TREE_CODE (node); size_t length; #ifdef ENABLE_CHECKING if (code == STATEMENT_LIST) abort (); #endif length = tree_size (node); t = ggc_alloc_zone_stat (length, tree_zone PASS_MEM_STAT); memcpy (t, node, length); TREE_CHAIN (t) = 0; TREE_ASM_WRITTEN (t) = 0; TREE_VISITED (t) = 0; t->common.ann = 0; if (TREE_CODE_CLASS (code) == 'd' && code != TRANSLATION_UNIT_DECL) DECL_UID (t) = next_decl_uid++; else if (TREE_CODE_CLASS (code) == 't') { TYPE_UID (t) = next_type_uid++; /* The following is so that the debug code for the copy is different from the original type. The two statements usually duplicate each other (because they clear fields of the same union), but the optimizer should catch that. */ TYPE_SYMTAB_POINTER (t) = 0; TYPE_SYMTAB_ADDRESS (t) = 0; } return t; } /* Return a copy of a chain of nodes, chained through the TREE_CHAIN field. For example, this can copy a list made of TREE_LIST nodes. */ tree copy_list (tree list) { tree head; tree prev, next; if (list == 0) return 0; head = prev = copy_node (list); next = TREE_CHAIN (list); while (next) { TREE_CHAIN (prev) = copy_node (next); prev = TREE_CHAIN (prev); next = TREE_CHAIN (next); } return head; } /* Return a newly constructed INTEGER_CST node whose constant value is specified by the two ints LOW and HI. The TREE_TYPE is set to `int'. This function should be used via the `build_int_2' macro. */ tree build_int_2_wide (unsigned HOST_WIDE_INT low, HOST_WIDE_INT hi) { tree t = make_node (INTEGER_CST); TREE_INT_CST_LOW (t) = low; TREE_INT_CST_HIGH (t) = hi; TREE_TYPE (t) = integer_type_node; return t; } /* Return a new VECTOR_CST node whose type is TYPE and whose values are in a list pointed by VALS. */ tree build_vector (tree type, tree vals) { tree v = make_node (VECTOR_CST); int over1 = 0, over2 = 0; tree link; TREE_VECTOR_CST_ELTS (v) = vals; TREE_TYPE (v) = type; /* Iterate through elements and check for overflow. */ for (link = vals; link; link = TREE_CHAIN (link)) { tree value = TREE_VALUE (link); over1 |= TREE_OVERFLOW (value); over2 |= TREE_CONSTANT_OVERFLOW (value); } TREE_OVERFLOW (v) = over1; TREE_CONSTANT_OVERFLOW (v) = over2; return v; } /* Return a new CONSTRUCTOR node whose type is TYPE and whose values are in a list pointed to by VALS. */ tree build_constructor (tree type, tree vals) { tree c = make_node (CONSTRUCTOR); TREE_TYPE (c) = type; CONSTRUCTOR_ELTS (c) = vals; /* ??? May not be necessary. Mirrors what build does. */ if (vals) { TREE_SIDE_EFFECTS (c) = TREE_SIDE_EFFECTS (vals); TREE_READONLY (c) = TREE_READONLY (vals); TREE_CONSTANT (c) = TREE_CONSTANT (vals); TREE_INVARIANT (c) = TREE_INVARIANT (vals); } return c; } /* Return a new REAL_CST node whose type is TYPE and value is D. */ tree build_real (tree type, REAL_VALUE_TYPE d) { tree v; REAL_VALUE_TYPE *dp; int overflow = 0; /* ??? Used to check for overflow here via CHECK_FLOAT_TYPE. Consider doing it via real_convert now. */ v = make_node (REAL_CST); dp = ggc_alloc (sizeof (REAL_VALUE_TYPE)); memcpy (dp, &d, sizeof (REAL_VALUE_TYPE)); TREE_TYPE (v) = type; TREE_REAL_CST_PTR (v) = dp; TREE_OVERFLOW (v) = TREE_CONSTANT_OVERFLOW (v) = overflow; return v; } /* Return a new REAL_CST node whose type is TYPE and whose value is the integer value of the INTEGER_CST node I. */ REAL_VALUE_TYPE real_value_from_int_cst (tree type, tree i) { REAL_VALUE_TYPE d; /* Clear all bits of the real value type so that we can later do bitwise comparisons to see if two values are the same. */ memset (&d, 0, sizeof d); real_from_integer (&d, type ? TYPE_MODE (type) : VOIDmode, TREE_INT_CST_LOW (i), TREE_INT_CST_HIGH (i), TYPE_UNSIGNED (TREE_TYPE (i))); return d; } /* Given a tree representing an integer constant I, return a tree representing the same value as a floating-point constant of type TYPE. */ tree build_real_from_int_cst (tree type, tree i) { tree v; int overflow = TREE_OVERFLOW (i); v = build_real (type, real_value_from_int_cst (type, i)); TREE_OVERFLOW (v) |= overflow; TREE_CONSTANT_OVERFLOW (v) |= overflow; return v; } /* Return a newly constructed STRING_CST node whose value is the LEN characters at STR. The TREE_TYPE is not initialized. */ tree build_string (int len, const char *str) { tree s = make_node (STRING_CST); TREE_STRING_LENGTH (s) = len; TREE_STRING_POINTER (s) = ggc_alloc_string (str, len); return s; } /* Return a newly constructed COMPLEX_CST node whose value is specified by the real and imaginary parts REAL and IMAG. Both REAL and IMAG should be constant nodes. TYPE, if specified, will be the type of the COMPLEX_CST; otherwise a new type will be made. */ tree build_complex (tree type, tree real, tree imag) { tree t = make_node (COMPLEX_CST); TREE_REALPART (t) = real; TREE_IMAGPART (t) = imag; TREE_TYPE (t) = type ? type : build_complex_type (TREE_TYPE (real)); TREE_OVERFLOW (t) = TREE_OVERFLOW (real) | TREE_OVERFLOW (imag); TREE_CONSTANT_OVERFLOW (t) = TREE_CONSTANT_OVERFLOW (real) | TREE_CONSTANT_OVERFLOW (imag); return t; } /* Build a BINFO with LEN language slots. */ tree make_tree_binfo_stat (unsigned lang_slots MEM_STAT_DECL) { tree t; static unsigned length; if (!length) { length = (offsetof (struct tree_binfo, lang_slots) + (sizeof (((struct tree_binfo *)0)->lang_slots[0]) * lang_slots)); binfo_lang_slots = lang_slots; } else if (binfo_lang_slots != lang_slots) abort (); #ifdef GATHER_STATISTICS tree_node_counts[(int) binfo_kind]++; tree_node_sizes[(int) binfo_kind] += length; #endif t = ggc_alloc_zone_stat (length, tree_zone PASS_MEM_STAT); memset (t, 0, length); TREE_SET_CODE (t, TREE_BINFO); return t; } /* Build a newly constructed TREE_VEC node of length LEN. */ tree make_tree_vec_stat (int len MEM_STAT_DECL) { tree t; int length = (len - 1) * sizeof (tree) + sizeof (struct tree_vec); #ifdef GATHER_STATISTICS tree_node_counts[(int) vec_kind]++; tree_node_sizes[(int) vec_kind] += length; #endif t = ggc_alloc_zone_stat (length, tree_zone PASS_MEM_STAT); memset (t, 0, length); TREE_SET_CODE (t, TREE_VEC); TREE_VEC_LENGTH (t) = len; return t; } /* Return 1 if EXPR is the integer constant zero or a complex constant of zero. */ int integer_zerop (tree expr) { STRIP_NOPS (expr); return ((TREE_CODE (expr) == INTEGER_CST && ! TREE_CONSTANT_OVERFLOW (expr) && TREE_INT_CST_LOW (expr) == 0 && TREE_INT_CST_HIGH (expr) == 0) || (TREE_CODE (expr) == COMPLEX_CST && integer_zerop (TREE_REALPART (expr)) && integer_zerop (TREE_IMAGPART (expr)))); } /* Return 1 if EXPR is the integer constant one or the corresponding complex constant. */ int integer_onep (tree expr) { STRIP_NOPS (expr); return ((TREE_CODE (expr) == INTEGER_CST && ! TREE_CONSTANT_OVERFLOW (expr) && TREE_INT_CST_LOW (expr) == 1 && TREE_INT_CST_HIGH (expr) == 0) || (TREE_CODE (expr) == COMPLEX_CST && integer_onep (TREE_REALPART (expr)) && integer_zerop (TREE_IMAGPART (expr)))); } /* Return 1 if EXPR is an integer containing all 1's in as much precision as it contains. Likewise for the corresponding complex constant. */ int integer_all_onesp (tree expr) { int prec; int uns; STRIP_NOPS (expr); if (TREE_CODE (expr) == COMPLEX_CST && integer_all_onesp (TREE_REALPART (expr)) && integer_zerop (TREE_IMAGPART (expr))) return 1; else if (TREE_CODE (expr) != INTEGER_CST || TREE_CONSTANT_OVERFLOW (expr)) return 0; uns = TYPE_UNSIGNED (TREE_TYPE (expr)); if (!uns) return (TREE_INT_CST_LOW (expr) == ~(unsigned HOST_WIDE_INT) 0 && TREE_INT_CST_HIGH (expr) == -1); /* Note that using TYPE_PRECISION here is wrong. We care about the actual bits, not the (arbitrary) range of the type. */ prec = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (expr))); if (prec >= HOST_BITS_PER_WIDE_INT) { HOST_WIDE_INT high_value; int shift_amount; shift_amount = prec - HOST_BITS_PER_WIDE_INT; if (shift_amount > HOST_BITS_PER_WIDE_INT) /* Can not handle precisions greater than twice the host int size. */ abort (); else if (shift_amount == HOST_BITS_PER_WIDE_INT) /* Shifting by the host word size is undefined according to the ANSI standard, so we must handle this as a special case. */ high_value = -1; else high_value = ((HOST_WIDE_INT) 1 << shift_amount) - 1; return (TREE_INT_CST_LOW (expr) == ~(unsigned HOST_WIDE_INT) 0 && TREE_INT_CST_HIGH (expr) == high_value); } else return TREE_INT_CST_LOW (expr) == ((unsigned HOST_WIDE_INT) 1 << prec) - 1; } /* Return 1 if EXPR is an integer constant that is a power of 2 (i.e., has only one bit on). */ int integer_pow2p (tree expr) { int prec; HOST_WIDE_INT high, low; STRIP_NOPS (expr); if (TREE_CODE (expr) == COMPLEX_CST && integer_pow2p (TREE_REALPART (expr)) && integer_zerop (TREE_IMAGPART (expr))) return 1; if (TREE_CODE (expr) != INTEGER_CST || TREE_CONSTANT_OVERFLOW (expr)) return 0; prec = (POINTER_TYPE_P (TREE_TYPE (expr)) ? POINTER_SIZE : TYPE_PRECISION (TREE_TYPE (expr))); high = TREE_INT_CST_HIGH (expr); low = TREE_INT_CST_LOW (expr); /* First clear all bits that are beyond the type's precision in case we've been sign extended. */ if (prec == 2 * HOST_BITS_PER_WIDE_INT) ; else if (prec > HOST_BITS_PER_WIDE_INT) high &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT)); else { high = 0; if (prec < HOST_BITS_PER_WIDE_INT) low &= ~((HOST_WIDE_INT) (-1) << prec); } if (high == 0 && low == 0) return 0; return ((high == 0 && (low & (low - 1)) == 0) || (low == 0 && (high & (high - 1)) == 0)); } /* Return 1 if EXPR is an integer constant other than zero or a complex constant other than zero. */ int integer_nonzerop (tree expr) { STRIP_NOPS (expr); return ((TREE_CODE (expr) == INTEGER_CST && ! TREE_CONSTANT_OVERFLOW (expr) && (TREE_INT_CST_LOW (expr) != 0 || TREE_INT_CST_HIGH (expr) != 0)) || (TREE_CODE (expr) == COMPLEX_CST && (integer_nonzerop (TREE_REALPART (expr)) || integer_nonzerop (TREE_IMAGPART (expr))))); } /* Return the power of two represented by a tree node known to be a power of two. */ int tree_log2 (tree expr) { int prec; HOST_WIDE_INT high, low; STRIP_NOPS (expr); if (TREE_CODE (expr) == COMPLEX_CST) return tree_log2 (TREE_REALPART (expr)); prec = (POINTER_TYPE_P (TREE_TYPE (expr)) ? POINTER_SIZE : TYPE_PRECISION (TREE_TYPE (expr))); high = TREE_INT_CST_HIGH (expr); low = TREE_INT_CST_LOW (expr); /* First clear all bits that are beyond the type's precision in case we've been sign extended. */ if (prec == 2 * HOST_BITS_PER_WIDE_INT) ; else if (prec > HOST_BITS_PER_WIDE_INT) high &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT)); else { high = 0; if (prec < HOST_BITS_PER_WIDE_INT) low &= ~((HOST_WIDE_INT) (-1) << prec); } return (high != 0 ? HOST_BITS_PER_WIDE_INT + exact_log2 (high) : exact_log2 (low)); } /* Similar, but return the largest integer Y such that 2 ** Y is less than or equal to EXPR. */ int tree_floor_log2 (tree expr) { int prec; HOST_WIDE_INT high, low; STRIP_NOPS (expr); if (TREE_CODE (expr) == COMPLEX_CST) return tree_log2 (TREE_REALPART (expr)); prec = (POINTER_TYPE_P (TREE_TYPE (expr)) ? POINTER_SIZE : TYPE_PRECISION (TREE_TYPE (expr))); high = TREE_INT_CST_HIGH (expr); low = TREE_INT_CST_LOW (expr); /* First clear all bits that are beyond the type's precision in case we've been sign extended. Ignore if type's precision hasn't been set since what we are doing is setting it. */ if (prec == 2 * HOST_BITS_PER_WIDE_INT || prec == 0) ; else if (prec > HOST_BITS_PER_WIDE_INT) high &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT)); else { high = 0; if (prec < HOST_BITS_PER_WIDE_INT) low &= ~((HOST_WIDE_INT) (-1) << prec); } return (high != 0 ? HOST_BITS_PER_WIDE_INT + floor_log2 (high) : floor_log2 (low)); } /* Return 1 if EXPR is the real constant zero. */ int real_zerop (tree expr) { STRIP_NOPS (expr); return ((TREE_CODE (expr) == REAL_CST && ! TREE_CONSTANT_OVERFLOW (expr) && REAL_VALUES_EQUAL (TREE_REAL_CST (expr), dconst0)) || (TREE_CODE (expr) == COMPLEX_CST && real_zerop (TREE_REALPART (expr)) && real_zerop (TREE_IMAGPART (expr)))); } /* Return 1 if EXPR is the real constant one in real or complex form. */ int real_onep (tree expr) { STRIP_NOPS (expr); return ((TREE_CODE (expr) == REAL_CST && ! TREE_CONSTANT_OVERFLOW (expr) && REAL_VALUES_EQUAL (TREE_REAL_CST (expr), dconst1)) || (TREE_CODE (expr) == COMPLEX_CST && real_onep (TREE_REALPART (expr)) && real_zerop (TREE_IMAGPART (expr)))); } /* Return 1 if EXPR is the real constant two. */ int real_twop (tree expr) { STRIP_NOPS (expr); return ((TREE_CODE (expr) == REAL_CST && ! TREE_CONSTANT_OVERFLOW (expr) && REAL_VALUES_EQUAL (TREE_REAL_CST (expr), dconst2)) || (TREE_CODE (expr) == COMPLEX_CST && real_twop (TREE_REALPART (expr)) && real_zerop (TREE_IMAGPART (expr)))); } /* Return 1 if EXPR is the real constant minus one. */ int real_minus_onep (tree expr) { STRIP_NOPS (expr); return ((TREE_CODE (expr) == REAL_CST && ! TREE_CONSTANT_OVERFLOW (expr) && REAL_VALUES_EQUAL (TREE_REAL_CST (expr), dconstm1)) || (TREE_CODE (expr) == COMPLEX_CST && real_minus_onep (TREE_REALPART (expr)) && real_zerop (TREE_IMAGPART (expr)))); } /* Nonzero if EXP is a constant or a cast of a constant. */ int really_constant_p (tree exp) { /* This is not quite the same as STRIP_NOPS. It does more. */ while (TREE_CODE (exp) == NOP_EXPR || TREE_CODE (exp) == CONVERT_EXPR || TREE_CODE (exp) == NON_LVALUE_EXPR) exp = TREE_OPERAND (exp, 0); return TREE_CONSTANT (exp); } /* Return first list element whose TREE_VALUE is ELEM. Return 0 if ELEM is not in LIST. */ tree value_member (tree elem, tree list) { while (list) { if (elem == TREE_VALUE (list)) return list; list = TREE_CHAIN (list); } return NULL_TREE; } /* Return first list element whose TREE_PURPOSE is ELEM. Return 0 if ELEM is not in LIST. */ tree purpose_member (tree elem, tree list) { while (list) { if (elem == TREE_PURPOSE (list)) return list; list = TREE_CHAIN (list); } return NULL_TREE; } /* Return first list element whose BINFO_TYPE is ELEM. Return 0 if ELEM is not in LIST. */ tree binfo_member (tree elem, tree list) { while (list) { if (elem == BINFO_TYPE (list)) return list; list = TREE_CHAIN (list); } return NULL_TREE; } /* Return nonzero if ELEM is part of the chain CHAIN. */ int chain_member (tree elem, tree chain) { while (chain) { if (elem == chain) return 1; chain = TREE_CHAIN (chain); } return 0; } /* Return the length of a chain of nodes chained through TREE_CHAIN. We expect a null pointer to mark the end of the chain. This is the Lisp primitive `length'. */ int list_length (tree t) { tree p = t; #ifdef ENABLE_TREE_CHECKING tree q = t; #endif int len = 0; while (p) { p = TREE_CHAIN (p); #ifdef ENABLE_TREE_CHECKING if (len % 2) q = TREE_CHAIN (q); if (p == q) abort (); #endif len++; } return len; } /* Returns the number of FIELD_DECLs in TYPE. */ int fields_length (tree type) { tree t = TYPE_FIELDS (type); int count = 0; for (; t; t = TREE_CHAIN (t)) if (TREE_CODE (t) == FIELD_DECL) ++count; return count; } /* Concatenate two chains of nodes (chained through TREE_CHAIN) by modifying the last node in chain 1 to point to chain 2. This is the Lisp primitive `nconc'. */ tree chainon (tree op1, tree op2) { tree t1; if (!op1) return op2; if (!op2) return op1; for (t1 = op1; TREE_CHAIN (t1); t1 = TREE_CHAIN (t1)) continue; TREE_CHAIN (t1) = op2; #ifdef ENABLE_TREE_CHECKING { tree t2; for (t2 = op2; t2; t2 = TREE_CHAIN (t2)) if (t2 == t1) abort (); /* Circularity created. */ } #endif return op1; } /* Return the last node in a chain of nodes (chained through TREE_CHAIN). */ tree tree_last (tree chain) { tree next; if (chain) while ((next = TREE_CHAIN (chain))) chain = next; return chain; } /* Reverse the order of elements in the chain T, and return the new head of the chain (old last element). */ tree nreverse (tree t) { tree prev = 0, decl, next; for (decl = t; decl; decl = next) { next = TREE_CHAIN (decl); TREE_CHAIN (decl) = prev; prev = decl; } return prev; } /* Return a newly created TREE_LIST node whose purpose and value fields are PARM and VALUE. */ tree build_tree_list_stat (tree parm, tree value MEM_STAT_DECL) { tree t = make_node_stat (TREE_LIST PASS_MEM_STAT); TREE_PURPOSE (t) = parm; TREE_VALUE (t) = value; return t; } /* Return a newly created TREE_LIST node whose purpose and value fields are PURPOSE and VALUE and whose TREE_CHAIN is CHAIN. */ tree tree_cons_stat (tree purpose, tree value, tree chain MEM_STAT_DECL) { tree node; node = ggc_alloc_zone_stat (sizeof (struct tree_list), tree_zone PASS_MEM_STAT); memset (node, 0, sizeof (struct tree_common)); #ifdef GATHER_STATISTICS tree_node_counts[(int) x_kind]++; tree_node_sizes[(int) x_kind] += sizeof (struct tree_list); #endif TREE_SET_CODE (node, TREE_LIST); TREE_CHAIN (node) = chain; TREE_PURPOSE (node) = purpose; TREE_VALUE (node) = value; return node; } /* Return the size nominally occupied by an object of type TYPE when it resides in memory. The value is measured in units of bytes, and its data type is that normally used for type sizes (which is the first type created by make_signed_type or make_unsigned_type). */ tree size_in_bytes (tree type) { tree t; if (type == error_mark_node) return integer_zero_node; type = TYPE_MAIN_VARIANT (type); t = TYPE_SIZE_UNIT (type); if (t == 0) { lang_hooks.types.incomplete_type_error (NULL_TREE, type); return size_zero_node; } if (TREE_CODE (t) == INTEGER_CST) force_fit_type (t, 0); return t; } /* Return the size of TYPE (in bytes) as a wide integer or return -1 if the size can vary or is larger than an integer. */ HOST_WIDE_INT int_size_in_bytes (tree type) { tree t; if (type == error_mark_node) return 0; type = TYPE_MAIN_VARIANT (type); t = TYPE_SIZE_UNIT (type); if (t == 0 || TREE_CODE (t) != INTEGER_CST || TREE_OVERFLOW (t) || TREE_INT_CST_HIGH (t) != 0 /* If the result would appear negative, it's too big to represent. */ || (HOST_WIDE_INT) TREE_INT_CST_LOW (t) < 0) return -1; return TREE_INT_CST_LOW (t); } /* Return the bit position of FIELD, in bits from the start of the record. This is a tree of type bitsizetype. */ tree bit_position (tree field) { return bit_from_pos (DECL_FIELD_OFFSET (field), DECL_FIELD_BIT_OFFSET (field)); } /* Likewise, but return as an integer. Abort if it cannot be represented in that way (since it could be a signed value, we don't have the option of returning -1 like int_size_in_byte can. */ HOST_WIDE_INT int_bit_position (tree field) { return tree_low_cst (bit_position (field), 0); } /* Return the byte position of FIELD, in bytes from the start of the record. This is a tree of type sizetype. */ tree byte_position (tree field) { return byte_from_pos (DECL_FIELD_OFFSET (field), DECL_FIELD_BIT_OFFSET (field)); } /* Likewise, but return as an integer. Abort if it cannot be represented in that way (since it could be a signed value, we don't have the option of returning -1 like int_size_in_byte can. */ HOST_WIDE_INT int_byte_position (tree field) { return tree_low_cst (byte_position (field), 0); } /* Return the strictest alignment, in bits, that T is known to have. */ unsigned int expr_align (tree t) { unsigned int align0, align1; switch (TREE_CODE (t)) { case NOP_EXPR: case CONVERT_EXPR: case NON_LVALUE_EXPR: /* If we have conversions, we know that the alignment of the object must meet each of the alignments of the types. */ align0 = expr_align (TREE_OPERAND (t, 0)); align1 = TYPE_ALIGN (TREE_TYPE (t)); return MAX (align0, align1); case SAVE_EXPR: case COMPOUND_EXPR: case MODIFY_EXPR: case INIT_EXPR: case TARGET_EXPR: case WITH_CLEANUP_EXPR: case CLEANUP_POINT_EXPR: case UNSAVE_EXPR: /* These don't change the alignment of an object. */ return expr_align (TREE_OPERAND (t, 0)); case COND_EXPR: /* The best we can do is say that the alignment is the least aligned of the two arms. */ align0 = expr_align (TREE_OPERAND (t, 1)); align1 = expr_align (TREE_OPERAND (t, 2)); return MIN (align0, align1); case LABEL_DECL: case CONST_DECL: case VAR_DECL: case PARM_DECL: case RESULT_DECL: if (DECL_ALIGN (t) != 0) return DECL_ALIGN (t); break; case FUNCTION_DECL: return FUNCTION_BOUNDARY; default: break; } /* Otherwise take the alignment from that of the type. */ return TYPE_ALIGN (TREE_TYPE (t)); } /* Return, as a tree node, the number of elements for TYPE (which is an ARRAY_TYPE) minus one. This counts only elements of the top array. */ tree array_type_nelts (tree type) { tree index_type, min, max; /* If they did it with unspecified bounds, then we should have already given an error about it before we got here. */ if (! TYPE_DOMAIN (type)) return error_mark_node; index_type = TYPE_DOMAIN (type); min = TYPE_MIN_VALUE (index_type); max = TYPE_MAX_VALUE (index_type); return (integer_zerop (min) ? max : fold (build2 (MINUS_EXPR, TREE_TYPE (max), max, min))); } /* Return nonzero if arg is static -- a reference to an object in static storage. This is not the same as the C meaning of `static'. */ int staticp (tree arg) { switch (TREE_CODE (arg)) { case FUNCTION_DECL: /* Nested functions aren't static, since taking their address involves a trampoline. */ return ((decl_function_context (arg) == 0 || DECL_NO_STATIC_CHAIN (arg)) && ! DECL_NON_ADDR_CONST_P (arg)); case VAR_DECL: return ((TREE_STATIC (arg) || DECL_EXTERNAL (arg)) && ! DECL_THREAD_LOCAL (arg) && ! DECL_NON_ADDR_CONST_P (arg)); case CONSTRUCTOR: return TREE_STATIC (arg); case LABEL_DECL: case STRING_CST: return 1; case COMPONENT_REF: /* If the thing being referenced is not a field, then it is something language specific. */ if (TREE_CODE (TREE_OPERAND (arg, 1)) != FIELD_DECL) return (*lang_hooks.staticp) (arg); /* If we are referencing a bitfield, we can't evaluate an ADDR_EXPR at compile time and so it isn't a constant. */ if (DECL_BIT_FIELD (TREE_OPERAND (arg, 1))) return 0; return staticp (TREE_OPERAND (arg, 0)); case BIT_FIELD_REF: return 0; #if 0 /* This case is technically correct, but results in setting TREE_CONSTANT on ADDR_EXPRs that cannot be evaluated at compile time. */ case INDIRECT_REF: return TREE_CONSTANT (TREE_OPERAND (arg, 0)); #endif case ARRAY_REF: case ARRAY_RANGE_REF: if (TREE_CODE (TYPE_SIZE (TREE_TYPE (arg))) == INTEGER_CST && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST) return staticp (TREE_OPERAND (arg, 0)); else return 0; default: if ((unsigned int) TREE_CODE (arg) >= (unsigned int) LAST_AND_UNUSED_TREE_CODE) return lang_hooks.staticp (arg); else return 0; } } /* Wrap a SAVE_EXPR around EXPR, if appropriate. Do this to any expression which may be used in more than one place, but must be evaluated only once. Normally, expand_expr would reevaluate the expression each time. Calling save_expr produces something that is evaluated and recorded the first time expand_expr is called on it. Subsequent calls to expand_expr just reuse the recorded value. The call to expand_expr that generates code that actually computes the value is the first call *at compile time*. Subsequent calls *at compile time* generate code to use the saved value. This produces correct result provided that *at run time* control always flows through the insns made by the first expand_expr before reaching the other places where the save_expr was evaluated. You, the caller of save_expr, must make sure this is so. Constants, and certain read-only nodes, are returned with no SAVE_EXPR because that is safe. Expressions containing placeholders are not touched; see tree.def for an explanation of what these are used for. */ tree save_expr (tree expr) { tree t = fold (expr); tree inner; /* If the tree evaluates to a constant, then we don't want to hide that fact (i.e. this allows further folding, and direct checks for constants). However, a read-only object that has side effects cannot be bypassed. Since it is no problem to reevaluate literals, we just return the literal node. */ inner = skip_simple_arithmetic (t); if (TREE_INVARIANT (inner) || (TREE_READONLY (inner) && ! TREE_SIDE_EFFECTS (inner)) || TREE_CODE (inner) == SAVE_EXPR || TREE_CODE (inner) == ERROR_MARK) return t; /* If INNER contains a PLACEHOLDER_EXPR, we must evaluate it each time, since it means that the size or offset of some field of an object depends on the value within another field. Note that it must not be the case that T contains both a PLACEHOLDER_EXPR and some variable since it would then need to be both evaluated once and evaluated more than once. Front-ends must assure this case cannot happen by surrounding any such subexpressions in their own SAVE_EXPR and forcing evaluation at the proper time. */ if (contains_placeholder_p (inner)) return t; t = build1 (SAVE_EXPR, TREE_TYPE (expr), t); /* This expression might be placed ahead of a jump to ensure that the value was computed on both sides of the jump. So make sure it isn't eliminated as dead. */ TREE_SIDE_EFFECTS (t) = 1; TREE_READONLY (t) = 1; TREE_INVARIANT (t) = 1; return t; } /* Look inside EXPR and into any simple arithmetic operations. Return the innermost non-arithmetic node. */ tree skip_simple_arithmetic (tree expr) { tree inner; /* We don't care about whether this can be used as an lvalue in this context. */ while (TREE_CODE (expr) == NON_LVALUE_EXPR) expr = TREE_OPERAND (expr, 0); /* If we have simple operations applied to a SAVE_EXPR or to a SAVE_EXPR and a constant, it will be more efficient to not make another SAVE_EXPR since it will allow better simplification and GCSE will be able to merge the computations if they actually occur. */ inner = expr; while (1) { if (TREE_CODE_CLASS (TREE_CODE (inner)) == '1') inner = TREE_OPERAND (inner, 0); else if (TREE_CODE_CLASS (TREE_CODE (inner)) == '2') { if (TREE_INVARIANT (TREE_OPERAND (inner, 1))) inner = TREE_OPERAND (inner, 0); else if (TREE_INVARIANT (TREE_OPERAND (inner, 0))) inner = TREE_OPERAND (inner, 1); else break; } else break; } return inner; } /* Arrange for an expression to be expanded multiple independent times. This is useful for cleanup actions, as the backend can expand them multiple times in different places. */ tree unsave_expr (tree expr) { tree t; /* If this is already protected, no sense in protecting it again. */ if (TREE_CODE (expr) == UNSAVE_EXPR) return expr; t = build1 (UNSAVE_EXPR, TREE_TYPE (expr), expr); TREE_SIDE_EFFECTS (t) = TREE_SIDE_EFFECTS (expr); return t; } /* Returns the index of the first non-tree operand for CODE, or the number of operands if all are trees. */ int first_rtl_op (enum tree_code code) { switch (code) { case GOTO_SUBROUTINE_EXPR: return 0; case WITH_CLEANUP_EXPR: return 2; default: return TREE_CODE_LENGTH (code); } } /* Return which tree structure is used by T. */ enum tree_node_structure_enum tree_node_structure (tree t) { enum tree_code code = TREE_CODE (t); switch (TREE_CODE_CLASS (code)) { case 'd': return TS_DECL; case 't': return TS_TYPE; case 'r': case '<': case '1': case '2': case 'e': case 's': return TS_EXP; default: /* 'c' and 'x' */ break; } switch (code) { /* 'c' cases. */ case INTEGER_CST: return TS_INT_CST; case REAL_CST: return TS_REAL_CST; case COMPLEX_CST: return TS_COMPLEX; case VECTOR_CST: return TS_VECTOR; case STRING_CST: return TS_STRING; /* 'x' cases. */ case ERROR_MARK: return TS_COMMON; case IDENTIFIER_NODE: return TS_IDENTIFIER; case TREE_LIST: return TS_LIST; case TREE_VEC: return TS_VEC; case PHI_NODE: return TS_PHI_NODE; case SSA_NAME: return TS_SSA_NAME; case PLACEHOLDER_EXPR: return TS_COMMON; case STATEMENT_LIST: return TS_STATEMENT_LIST; case BLOCK: return TS_BLOCK; case TREE_BINFO: return TS_BINFO; case VALUE_HANDLE: return TS_VALUE_HANDLE; default: abort (); } } /* Perform any modifications to EXPR required when it is unsaved. Does not recurse into EXPR's subtrees. */ void unsave_expr_1 (tree expr) { switch (TREE_CODE (expr)) { case TARGET_EXPR: /* Don't mess with a TARGET_EXPR that hasn't been expanded. It's OK for this to happen if it was part of a subtree that isn't immediately expanded, such as operand 2 of another TARGET_EXPR. */ if (TREE_OPERAND (expr, 1)) break; TREE_OPERAND (expr, 1) = TREE_OPERAND (expr, 3); TREE_OPERAND (expr, 3) = NULL_TREE; break; default: break; } } /* Return 0 if it is safe to evaluate EXPR multiple times, return 1 if it is safe if EXPR is unsaved afterward, or return 2 if it is completely unsafe. This assumes that CALL_EXPRs and TARGET_EXPRs are never replicated in an expression tree, so that it safe to unsave them and the surrounding context will be correct. SAVE_EXPRs basically *only* appear replicated in an expression tree, occasionally across the whole of a function. It is therefore only safe to unsave a SAVE_EXPR if you know that all occurrences appear below the UNSAVE_EXPR. */ int unsafe_for_reeval (tree expr) { int unsafeness = 0; enum tree_code code; int i, tmp, tmp2; tree exp; int first_rtl; if (expr == NULL_TREE) return 1; code = TREE_CODE (expr); first_rtl = first_rtl_op (code); switch (code) { case SAVE_EXPR: return 2; /* A label can only be emitted once. */ case LABEL_EXPR: return 1; case BIND_EXPR: unsafeness = 1; break; case TREE_LIST: for (exp = expr; exp != 0; exp = TREE_CHAIN (exp)) { tmp = unsafe_for_reeval (TREE_VALUE (exp)); unsafeness = MAX (tmp, unsafeness); } return unsafeness; case CALL_EXPR: tmp2 = unsafe_for_reeval (TREE_OPERAND (expr, 0)); tmp = unsafe_for_reeval (TREE_OPERAND (expr, 1)); return MAX (MAX (tmp, 1), tmp2); case TARGET_EXPR: unsafeness = 1; break; case EXIT_BLOCK_EXPR: /* EXIT_BLOCK_LABELED_BLOCK, a.k.a. TREE_OPERAND (expr, 0), holds a reference to an ancestor LABELED_BLOCK, so we need to avoid unbounded recursion in the 'e' traversal code below. */ exp = EXIT_BLOCK_RETURN (expr); return exp ? unsafe_for_reeval (exp) : 0; default: tmp = lang_hooks.unsafe_for_reeval (expr); if (tmp >= 0) return tmp; break; } switch (TREE_CODE_CLASS (code)) { case 'c': /* a constant */ case 't': /* a type node */ case 'x': /* something random, like an identifier or an ERROR_MARK. */ case 'd': /* A decl node */ return 0; case 'e': /* an expression */ case 'r': /* a reference */ case 's': /* an expression with side effects */ case '<': /* a comparison expression */ case '2': /* a binary arithmetic expression */ case '1': /* a unary arithmetic expression */ for (i = first_rtl - 1; i >= 0; i--) { tmp = unsafe_for_reeval (TREE_OPERAND (expr, i)); unsafeness = MAX (tmp, unsafeness); } return unsafeness; default: return 2; } } /* Return 1 if EXP contains a PLACEHOLDER_EXPR; i.e., if it represents a size or offset that depends on a field within a record. */ bool contains_placeholder_p (tree exp) { enum tree_code code; if (!exp) return 0; code = TREE_CODE (exp); if (code == PLACEHOLDER_EXPR) return 1; switch (TREE_CODE_CLASS (code)) { case 'r': /* Don't look at any PLACEHOLDER_EXPRs that might be in index or bit position computations since they will be converted into a WITH_RECORD_EXPR involving the reference, which will assume here will be valid. */ return CONTAINS_PLACEHOLDER_P (TREE_OPERAND (exp, 0)); case 'x': if (code == TREE_LIST) return (CONTAINS_PLACEHOLDER_P (TREE_VALUE (exp)) || CONTAINS_PLACEHOLDER_P (TREE_CHAIN (exp))); break; case '1': case '2': case '<': case 'e': switch (code) { case COMPOUND_EXPR: /* Ignoring the first operand isn't quite right, but works best. */ return CONTAINS_PLACEHOLDER_P (TREE_OPERAND (exp, 1)); case COND_EXPR: return (CONTAINS_PLACEHOLDER_P (TREE_OPERAND (exp, 0)) || CONTAINS_PLACEHOLDER_P (TREE_OPERAND (exp, 1)) || CONTAINS_PLACEHOLDER_P (TREE_OPERAND (exp, 2))); default: break; } switch (first_rtl_op (code)) { case 1: return CONTAINS_PLACEHOLDER_P (TREE_OPERAND (exp, 0)); case 2: return (CONTAINS_PLACEHOLDER_P (TREE_OPERAND (exp, 0)) || CONTAINS_PLACEHOLDER_P (TREE_OPERAND (exp, 1))); default: return 0; } default: return 0; } return 0; } /* Return 1 if any part of the computation of TYPE involves a PLACEHOLDER_EXPR. This includes size, bounds, qualifiers (for QUAL_UNION_TYPE) and field positions. */ bool type_contains_placeholder_p (tree type) { /* If the size contains a placeholder or the parent type (component type in the case of arrays) type involves a placeholder, this type does. */ if (CONTAINS_PLACEHOLDER_P (TYPE_SIZE (type)) || CONTAINS_PLACEHOLDER_P (TYPE_SIZE_UNIT (type)) || (TREE_TYPE (type) != 0 && type_contains_placeholder_p (TREE_TYPE (type)))) return 1; /* Now do type-specific checks. Note that the last part of the check above greatly limits what we have to do below. */ switch (TREE_CODE (type)) { case VOID_TYPE: case COMPLEX_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: case CHAR_TYPE: case POINTER_TYPE: case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE: case FILE_TYPE: case FUNCTION_TYPE: return 0; case INTEGER_TYPE: case REAL_TYPE: /* Here we just check the bounds. */ return (CONTAINS_PLACEHOLDER_P (TYPE_MIN_VALUE (type)) || CONTAINS_PLACEHOLDER_P (TYPE_MAX_VALUE (type))); case ARRAY_TYPE: case SET_TYPE: case VECTOR_TYPE: /* We're already checked the component type (TREE_TYPE), so just check the index type. */ return type_contains_placeholder_p (TYPE_DOMAIN (type)); case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: { static tree seen_types = 0; tree field; bool ret = 0; /* We have to be careful here that we don't end up in infinite recursions due to a field of a type being a pointer to that type or to a mutually-recursive type. So we store a list of record types that we've seen and see if this type is in them. To save memory, we don't use a list for just one type. Here we check whether we've seen this type before and store it if not. */ if (seen_types == 0) seen_types = type; else if (TREE_CODE (seen_types) != TREE_LIST) { if (seen_types == type) return 0; seen_types = tree_cons (NULL_TREE, type, build_tree_list (NULL_TREE, seen_types)); } else { if (value_member (type, seen_types) != 0) return 0; seen_types = tree_cons (NULL_TREE, type, seen_types); } for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) if (TREE_CODE (field) == FIELD_DECL && (CONTAINS_PLACEHOLDER_P (DECL_FIELD_OFFSET (field)) || (TREE_CODE (type) == QUAL_UNION_TYPE && CONTAINS_PLACEHOLDER_P (DECL_QUALIFIER (field))) || type_contains_placeholder_p (TREE_TYPE (field)))) { ret = true; break; } /* Now remove us from seen_types and return the result. */ if (seen_types == type) seen_types = 0; else seen_types = TREE_CHAIN (seen_types); return ret; } default: abort (); } } /* Return 1 if EXP contains any expressions that produce cleanups for an outer scope to deal with. Used by fold. */ int has_cleanups (tree exp) { int i, nops, cmp; if (! TREE_SIDE_EFFECTS (exp)) return 0; switch (TREE_CODE (exp)) { case TARGET_EXPR: case GOTO_SUBROUTINE_EXPR: case WITH_CLEANUP_EXPR: return 1; case CLEANUP_POINT_EXPR: return 0; case CALL_EXPR: for (exp = TREE_OPERAND (exp, 1); exp; exp = TREE_CHAIN (exp)) { cmp = has_cleanups (TREE_VALUE (exp)); if (cmp) return cmp; } return 0; case DECL_EXPR: return (DECL_INITIAL (DECL_EXPR_DECL (exp)) && has_cleanups (DECL_INITIAL (DECL_EXPR_DECL (exp)))); default: break; } /* This general rule works for most tree codes. All exceptions should be handled above. If this is a language-specific tree code, we can't trust what might be in the operand, so say we don't know the situation. */ if ((int) TREE_CODE (exp) >= (int) LAST_AND_UNUSED_TREE_CODE) return -1; nops = first_rtl_op (TREE_CODE (exp)); for (i = 0; i < nops; i++) if (TREE_OPERAND (exp, i) != 0) { int type = TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (exp, i))); if (type == 'e' || type == '<' || type == '1' || type == '2' || type == 'r' || type == 's') { cmp = has_cleanups (TREE_OPERAND (exp, i)); if (cmp) return cmp; } } return 0; } /* Given a tree EXP, a FIELD_DECL F, and a replacement value R, return a tree with all occurrences of references to F in a PLACEHOLDER_EXPR replaced by R. Note that we assume here that EXP contains only arithmetic expressions or a CALL_EXPR with a PLACEHOLDER_EXPR occurring only in its arglist. */ tree substitute_in_expr (tree exp, tree f, tree r) { enum tree_code code = TREE_CODE (exp); tree op0, op1, op2; tree new; tree inner; /* We handle TREE_LIST and COMPONENT_REF separately. */ if (code == TREE_LIST) { op0 = SUBSTITUTE_IN_EXPR (TREE_CHAIN (exp), f, r); op1 = SUBSTITUTE_IN_EXPR (TREE_VALUE (exp), f, r); if (op0 == TREE_CHAIN (exp) && op1 == TREE_VALUE (exp)) return exp; return tree_cons (TREE_PURPOSE (exp), op1, op0); } else if (code == COMPONENT_REF) { /* If this expression is getting a value from a PLACEHOLDER_EXPR and it is the right field, replace it with R. */ for (inner = TREE_OPERAND (exp, 0); TREE_CODE_CLASS (TREE_CODE (inner)) == 'r'; inner = TREE_OPERAND (inner, 0)) ; if (TREE_CODE (inner) == PLACEHOLDER_EXPR && TREE_OPERAND (exp, 1) == f) return r; /* If this expression hasn't been completed let, leave it alone. */ if (TREE_CODE (inner) == PLACEHOLDER_EXPR && TREE_TYPE (inner) == 0) return exp; op0 = SUBSTITUTE_IN_EXPR (TREE_OPERAND (exp, 0), f, r); if (op0 == TREE_OPERAND (exp, 0)) return exp; new = fold (build (code, TREE_TYPE (exp), op0, TREE_OPERAND (exp, 1), NULL_TREE)); } else switch (TREE_CODE_CLASS (code)) { case 'c': case 'd': return exp; case 'x': case '1': case '2': case '<': case 'e': case 'r': switch (first_rtl_op (code)) { case 0: return exp; case 1: op0 = SUBSTITUTE_IN_EXPR (TREE_OPERAND (exp, 0), f, r); if (op0 == TREE_OPERAND (exp, 0)) return exp; new = fold (build1 (code, TREE_TYPE (exp), op0)); break; case 2: op0 = SUBSTITUTE_IN_EXPR (TREE_OPERAND (exp, 0), f, r); op1 = SUBSTITUTE_IN_EXPR (TREE_OPERAND (exp, 1), f, r); if (op0 == TREE_OPERAND (exp, 0) && op1 == TREE_OPERAND (exp, 1)) return exp; new = fold (build2 (code, TREE_TYPE (exp), op0, op1)); break; case 3: op0 = SUBSTITUTE_IN_EXPR (TREE_OPERAND (exp, 0), f, r); op1 = SUBSTITUTE_IN_EXPR (TREE_OPERAND (exp, 1), f, r); op2 = SUBSTITUTE_IN_EXPR (TREE_OPERAND (exp, 2), f, r); if (op0 == TREE_OPERAND (exp, 0) && op1 == TREE_OPERAND (exp, 1) && op2 == TREE_OPERAND (exp, 2)) return exp; new = fold (build3 (code, TREE_TYPE (exp), op0, op1, op2)); break; default: abort (); } break; default: abort (); } TREE_READONLY (new) = TREE_READONLY (exp); return new; } /* Similar, but look for a PLACEHOLDER_EXPR in EXP and find a replacement for it within OBJ, a tree that is an object or a chain of references. */ tree substitute_placeholder_in_expr (tree exp, tree obj) { enum tree_code code = TREE_CODE (exp); tree op0, op1, op2, op3; /* If this is a PLACEHOLDER_EXPR, see if we find a corresponding type in the chain of OBJ. */ if (code == PLACEHOLDER_EXPR) { tree need_type = TYPE_MAIN_VARIANT (TREE_TYPE (exp)); tree elt; for (elt = obj; elt != 0; elt = ((TREE_CODE (elt) == COMPOUND_EXPR || TREE_CODE (elt) == COND_EXPR) ? TREE_OPERAND (elt, 1) : (TREE_CODE_CLASS (TREE_CODE (elt)) == 'r' || TREE_CODE_CLASS (TREE_CODE (elt)) == '1' || TREE_CODE_CLASS (TREE_CODE (elt)) == '2' || TREE_CODE_CLASS (TREE_CODE (elt)) == 'e') ? TREE_OPERAND (elt, 0) : 0)) if (TYPE_MAIN_VARIANT (TREE_TYPE (elt)) == need_type) return elt; for (elt = obj; elt != 0; elt = ((TREE_CODE (elt) == COMPOUND_EXPR || TREE_CODE (elt) == COND_EXPR) ? TREE_OPERAND (elt, 1) : (TREE_CODE_CLASS (TREE_CODE (elt)) == 'r' || TREE_CODE_CLASS (TREE_CODE (elt)) == '1' || TREE_CODE_CLASS (TREE_CODE (elt)) == '2' || TREE_CODE_CLASS (TREE_CODE (elt)) == 'e') ? TREE_OPERAND (elt, 0) : 0)) if (POINTER_TYPE_P (TREE_TYPE (elt)) && (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (elt))) == need_type)) return fold (build1 (INDIRECT_REF, need_type, elt)); /* If we didn't find it, return the original PLACEHOLDER_EXPR. If it survives until RTL generation, there will be an error. */ return exp; } /* TREE_LIST is special because we need to look at TREE_VALUE and TREE_CHAIN, not TREE_OPERANDS. */ else if (code == TREE_LIST) { op0 = SUBSTITUTE_PLACEHOLDER_IN_EXPR (TREE_CHAIN (exp), obj); op1 = SUBSTITUTE_PLACEHOLDER_IN_EXPR (TREE_VALUE (exp), obj); if (op0 == TREE_CHAIN (exp) && op1 == TREE_VALUE (exp)) return exp; return tree_cons (TREE_PURPOSE (exp), op1, op0); } else switch (TREE_CODE_CLASS (code)) { case 'c': case 'd': return exp; case 'x': case '1': case '2': case '<': case 'e': case 'r': case 's': switch (first_rtl_op (code)) { case 0: return exp; case 1: op0 = SUBSTITUTE_PLACEHOLDER_IN_EXPR (TREE_OPERAND (exp, 0), obj); if (op0 == TREE_OPERAND (exp, 0)) return exp; else return fold (build1 (code, TREE_TYPE (exp), op0)); case 2: op0 = SUBSTITUTE_PLACEHOLDER_IN_EXPR (TREE_OPERAND (exp, 0), obj); op1 = SUBSTITUTE_PLACEHOLDER_IN_EXPR (TREE_OPERAND (exp, 1), obj); if (op0 == TREE_OPERAND (exp, 0) && op1 == TREE_OPERAND (exp, 1)) return exp; else return fold (build2 (code, TREE_TYPE (exp), op0, op1)); case 3: op0 = SUBSTITUTE_PLACEHOLDER_IN_EXPR (TREE_OPERAND (exp, 0), obj); op1 = SUBSTITUTE_PLACEHOLDER_IN_EXPR (TREE_OPERAND (exp, 1), obj); op2 = SUBSTITUTE_PLACEHOLDER_IN_EXPR (TREE_OPERAND (exp, 2), obj); if (op0 == TREE_OPERAND (exp, 0) && op1 == TREE_OPERAND (exp, 1) && op2 == TREE_OPERAND (exp, 2)) return exp; else return fold (build3 (code, TREE_TYPE (exp), op0, op1, op2)); case 4: op0 = SUBSTITUTE_PLACEHOLDER_IN_EXPR (TREE_OPERAND (exp, 0), obj); op1 = SUBSTITUTE_PLACEHOLDER_IN_EXPR (TREE_OPERAND (exp, 1), obj); op2 = SUBSTITUTE_PLACEHOLDER_IN_EXPR (TREE_OPERAND (exp, 2), obj); op3 = SUBSTITUTE_PLACEHOLDER_IN_EXPR (TREE_OPERAND (exp, 3), obj); if (op0 == TREE_OPERAND (exp, 0) && op1 == TREE_OPERAND (exp, 1) && op2 == TREE_OPERAND (exp, 2) && op3 == TREE_OPERAND (exp, 3)) return exp; else return fold (build4 (code, TREE_TYPE (exp), op0, op1, op2, op3)); default: abort (); } break; default: abort (); } } /* Stabilize a reference so that we can use it any number of times without causing its operands to be evaluated more than once. Returns the stabilized reference. This works by means of save_expr, so see the caveats in the comments about save_expr. Also allows conversion expressions whose operands are references. Any other kind of expression is returned unchanged. */ tree stabilize_reference (tree ref) { tree result; enum tree_code code = TREE_CODE (ref); switch (code) { case VAR_DECL: case PARM_DECL: case RESULT_DECL: /* No action is needed in this case. */ return ref; case NOP_EXPR: case CONVERT_EXPR: case FLOAT_EXPR: case FIX_TRUNC_EXPR: case FIX_FLOOR_EXPR: case FIX_ROUND_EXPR: case FIX_CEIL_EXPR: result = build_nt (code, stabilize_reference (TREE_OPERAND (ref, 0))); break; case INDIRECT_REF: result = build_nt (INDIRECT_REF, stabilize_reference_1 (TREE_OPERAND (ref, 0))); break; case COMPONENT_REF: result = build_nt (COMPONENT_REF, stabilize_reference (TREE_OPERAND (ref, 0)), TREE_OPERAND (ref, 1), NULL_TREE); break; case BIT_FIELD_REF: result = build_nt (BIT_FIELD_REF, stabilize_reference (TREE_OPERAND (ref, 0)), stabilize_reference_1 (TREE_OPERAND (ref, 1)), stabilize_reference_1 (TREE_OPERAND (ref, 2))); break; case ARRAY_REF: result = build_nt (ARRAY_REF, stabilize_reference (TREE_OPERAND (ref, 0)), stabilize_reference_1 (TREE_OPERAND (ref, 1)), TREE_OPERAND (ref, 2), TREE_OPERAND (ref, 3)); break; case ARRAY_RANGE_REF: result = build_nt (ARRAY_RANGE_REF, stabilize_reference (TREE_OPERAND (ref, 0)), stabilize_reference_1 (TREE_OPERAND (ref, 1)), TREE_OPERAND (ref, 2), TREE_OPERAND (ref, 3)); break; case COMPOUND_EXPR: /* We cannot wrap the first expression in a SAVE_EXPR, as then it wouldn't be ignored. This matters when dealing with volatiles. */ return stabilize_reference_1 (ref); /* If arg isn't a kind of lvalue we recognize, make no change. Caller should recognize the error for an invalid lvalue. */ default: return ref; case ERROR_MARK: return error_mark_node; } TREE_TYPE (result) = TREE_TYPE (ref); TREE_READONLY (result) = TREE_READONLY (ref); TREE_SIDE_EFFECTS (result) = TREE_SIDE_EFFECTS (ref); TREE_THIS_VOLATILE (result) = TREE_THIS_VOLATILE (ref); return result; } /* Subroutine of stabilize_reference; this is called for subtrees of references. Any expression with side-effects must be put in a SAVE_EXPR to ensure that it is only evaluated once. We don't put SAVE_EXPR nodes around everything, because assigning very simple expressions to temporaries causes us to miss good opportunities for optimizations. Among other things, the opportunity to fold in the addition of a constant into an addressing mode often gets lost, e.g. "y[i+1] += x;". In general, we take the approach that we should not make an assignment unless we are forced into it - i.e., that any non-side effect operator should be allowed, and that cse should take care of coalescing multiple utterances of the same expression should that prove fruitful. */ tree stabilize_reference_1 (tree e) { tree result; enum tree_code code = TREE_CODE (e); /* We cannot ignore const expressions because it might be a reference to a const array but whose index contains side-effects. But we can ignore things that are actual constant or that already have been handled by this function. */ if (TREE_INVARIANT (e)) return e; switch (TREE_CODE_CLASS (code)) { case 'x': case 't': case 'd': case '<': case 's': case 'e': case 'r': /* If the expression has side-effects, then encase it in a SAVE_EXPR so that it will only be evaluated once. */ /* The reference (r) and comparison (<) classes could be handled as below, but it is generally faster to only evaluate them once. */ if (TREE_SIDE_EFFECTS (e)) return save_expr (e); return e; case 'c': /* Constants need no processing. In fact, we should never reach here. */ return e; case '2': /* Division is slow and tends to be compiled with jumps, especially the division by powers of 2 that is often found inside of an array reference. So do it just once. */ if (code == TRUNC_DIV_EXPR || code == TRUNC_MOD_EXPR || code == FLOOR_DIV_EXPR || code == FLOOR_MOD_EXPR || code == CEIL_DIV_EXPR || code == CEIL_MOD_EXPR || code == ROUND_DIV_EXPR || code == ROUND_MOD_EXPR) return save_expr (e); /* Recursively stabilize each operand. */ result = build_nt (code, stabilize_reference_1 (TREE_OPERAND (e, 0)), stabilize_reference_1 (TREE_OPERAND (e, 1))); break; case '1': /* Recursively stabilize each operand. */ result = build_nt (code, stabilize_reference_1 (TREE_OPERAND (e, 0))); break; default: abort (); } TREE_TYPE (result) = TREE_TYPE (e); TREE_READONLY (result) = TREE_READONLY (e); TREE_SIDE_EFFECTS (result) = TREE_SIDE_EFFECTS (e); TREE_THIS_VOLATILE (result) = TREE_THIS_VOLATILE (e); TREE_INVARIANT (result) = 1; return result; } /* Low-level constructors for expressions. */ /* A helper function for build1 and constant folders. Set TREE_CONSTANT, TREE_INVARIANT, and TREE_SIDE_EFFECTS for an ADDR_EXPR. */ void recompute_tree_invarant_for_addr_expr (tree t) { tree node; bool tc = true, ti = true, se = false; /* We started out assuming this address is both invariant and constant, but does not have side effects. Now go down any handled components and see if any of them involve offsets that are either non-constant or non-invariant. Also check for side-effects. ??? Note that this code makes no attempt to deal with the case where taking the address of something causes a copy due to misalignment. */ #define UPDATE_TITCSE(NODE) \ do { tree _node = (NODE); \ if (_node && !TREE_INVARIANT (_node)) ti = false; \ if (_node && !TREE_CONSTANT (_node)) tc = false; \ if (_node && TREE_SIDE_EFFECTS (_node)) se = true; } while (0) for (node = TREE_OPERAND (t, 0); handled_component_p (node); node = TREE_OPERAND (node, 0)) { /* If the first operand doesn't have an ARRAY_TYPE, this is a bogus array reference (probably made temporarily by the G++ front end), so ignore all the operands. */ if ((TREE_CODE (node) == ARRAY_REF || TREE_CODE (node) == ARRAY_RANGE_REF) && TREE_CODE (TREE_TYPE (TREE_OPERAND (node, 0))) == ARRAY_TYPE) { UPDATE_TITCSE (TREE_OPERAND (node, 1)); UPDATE_TITCSE (array_ref_low_bound (node)); UPDATE_TITCSE (array_ref_element_size (node)); } /* Likewise, just because this is a COMPONENT_REF doesn't mean we have a FIELD_DECL, apparently. The G++ front end can put something else there, at least temporarily. */ else if (TREE_CODE (node) == COMPONENT_REF && TREE_CODE (TREE_OPERAND (node, 1)) == FIELD_DECL) UPDATE_TITCSE (component_ref_field_offset (node)); else if (TREE_CODE (node) == BIT_FIELD_REF) UPDATE_TITCSE (TREE_OPERAND (node, 2)); } /* Now see what's inside. If it's an INDIRECT_REF, copy our properties from it. If it's a decl, it's invariant and constant if the decl is static. It's also invariant if it's a decl in the current function. (Taking the address of a volatile variable is not volatile.) If it's a constant, the address is both invariant and constant. Otherwise it's neither. */ if (TREE_CODE (node) == INDIRECT_REF) UPDATE_TITCSE (node); else if (DECL_P (node)) { if (staticp (node)) ; else if (decl_function_context (node) == current_function_decl) tc = false; else ti = tc = false; } else if (TREE_CODE_CLASS (TREE_CODE (node)) == 'c') ; else { ti = tc = false; se |= TREE_SIDE_EFFECTS (node); } TREE_CONSTANT (t) = tc; TREE_INVARIANT (t) = ti; TREE_SIDE_EFFECTS (t) = se; #undef UPDATE_TITCSE } /* Build an expression of code CODE, data type TYPE, and operands as specified. Expressions and reference nodes can be created this way. Constants, decls, types and misc nodes cannot be. We define 5 non-variadic functions, from 0 to 4 arguments. This is enough for all extant tree codes. These functions can be called directly (preferably!), but can also be obtained via GCC preprocessor magic within the build macro. */ tree build0_stat (enum tree_code code, tree tt MEM_STAT_DECL) { tree t; #ifdef ENABLE_CHECKING if (TREE_CODE_LENGTH (code) != 0) abort (); #endif t = make_node_stat (code PASS_MEM_STAT); TREE_TYPE (t) = tt; return t; } tree build1_stat (enum tree_code code, tree type, tree node MEM_STAT_DECL) { int length = sizeof (struct tree_exp); #ifdef GATHER_STATISTICS tree_node_kind kind; #endif tree t; #ifdef GATHER_STATISTICS switch (TREE_CODE_CLASS (code)) { case 's': /* an expression with side effects */ kind = s_kind; break; case 'r': /* a reference */ kind = r_kind; break; default: kind = e_kind; break; } tree_node_counts[(int) kind]++; tree_node_sizes[(int) kind] += length; #endif #ifdef ENABLE_CHECKING if (TREE_CODE_LENGTH (code) != 1) abort (); #endif /* ENABLE_CHECKING */ t = ggc_alloc_zone_stat (length, tree_zone PASS_MEM_STAT); memset (t, 0, sizeof (struct tree_common)); TREE_SET_CODE (t, code); TREE_TYPE (t) = type; #ifdef USE_MAPPED_LOCATION SET_EXPR_LOCATION (t, UNKNOWN_LOCATION); #else SET_EXPR_LOCUS (t, NULL); #endif TREE_COMPLEXITY (t) = 0; TREE_OPERAND (t, 0) = node; TREE_BLOCK (t) = NULL_TREE; if (node && !TYPE_P (node) && first_rtl_op (code) != 0) { TREE_SIDE_EFFECTS (t) = TREE_SIDE_EFFECTS (node); TREE_READONLY (t) = TREE_READONLY (node); } if (TREE_CODE_CLASS (code) == 's') TREE_SIDE_EFFECTS (t) = 1; else switch (code) { case INIT_EXPR: case MODIFY_EXPR: case VA_ARG_EXPR: case PREDECREMENT_EXPR: case PREINCREMENT_EXPR: case POSTDECREMENT_EXPR: case POSTINCREMENT_EXPR: /* All of these have side-effects, no matter what their operands are. */ TREE_SIDE_EFFECTS (t) = 1; TREE_READONLY (t) = 0; break; case INDIRECT_REF: /* Whether a dereference is readonly has nothing to do with whether its operand is readonly. */ TREE_READONLY (t) = 0; break; case ADDR_EXPR: if (node) recompute_tree_invarant_for_addr_expr (t); break; default: if (TREE_CODE_CLASS (code) == '1' && node && !TYPE_P (node) && TREE_CONSTANT (node)) TREE_CONSTANT (t) = 1; if (TREE_CODE_CLASS (code) == '1' && node && TREE_INVARIANT (node)) TREE_INVARIANT (t) = 1; if (TREE_CODE_CLASS (code) == 'r' && node && TREE_THIS_VOLATILE (node)) TREE_THIS_VOLATILE (t) = 1; break; } return t; } #define PROCESS_ARG(N) \ do { \ TREE_OPERAND (t, N) = arg##N; \ if (arg##N &&!TYPE_P (arg##N) && fro > N) \ { \ if (TREE_SIDE_EFFECTS (arg##N)) \ side_effects = 1; \ if (!TREE_READONLY (arg##N)) \ read_only = 0; \ if (!TREE_CONSTANT (arg##N)) \ constant = 0; \ if (!TREE_INVARIANT (arg##N)) \ invariant = 0; \ } \ } while (0) tree build2_stat (enum tree_code code, tree tt, tree arg0, tree arg1 MEM_STAT_DECL) { bool constant, read_only, side_effects, invariant; tree t; int fro; #ifdef ENABLE_CHECKING if (TREE_CODE_LENGTH (code) != 2) abort (); #endif t = make_node_stat (code PASS_MEM_STAT); TREE_TYPE (t) = tt; /* Below, we automatically set TREE_SIDE_EFFECTS and TREE_READONLY for the result based on those same flags for the arguments. But if the arguments aren't really even `tree' expressions, we shouldn't be trying to do this. */ fro = first_rtl_op (code); /* Expressions without side effects may be constant if their arguments are as well. */ constant = (TREE_CODE_CLASS (code) == '<' || TREE_CODE_CLASS (code) == '2'); read_only = 1; side_effects = TREE_SIDE_EFFECTS (t); invariant = constant; PROCESS_ARG(0); PROCESS_ARG(1); TREE_READONLY (t) = read_only; TREE_CONSTANT (t) = constant; TREE_INVARIANT (t) = invariant; TREE_SIDE_EFFECTS (t) = side_effects; TREE_THIS_VOLATILE (t) = TREE_CODE_CLASS (code) == 'r' && arg0 && TREE_THIS_VOLATILE (arg0); return t; } tree build3_stat (enum tree_code code, tree tt, tree arg0, tree arg1, tree arg2 MEM_STAT_DECL) { bool constant, read_only, side_effects, invariant; tree t; int fro; #ifdef ENABLE_CHECKING if (TREE_CODE_LENGTH (code) != 3) abort (); #endif t = make_node_stat (code PASS_MEM_STAT); TREE_TYPE (t) = tt; fro = first_rtl_op (code); side_effects = TREE_SIDE_EFFECTS (t); PROCESS_ARG(0); PROCESS_ARG(1); PROCESS_ARG(2); if (code == CALL_EXPR && !side_effects) { tree node; int i; /* Calls have side-effects, except those to const or pure functions. */ i = call_expr_flags (t); if (!(i & (ECF_CONST | ECF_PURE))) side_effects = 1; /* And even those have side-effects if their arguments do. */ else for (node = arg1; node; node = TREE_CHAIN (node)) if (TREE_SIDE_EFFECTS (TREE_VALUE (node))) { side_effects = 1; break; } } TREE_SIDE_EFFECTS (t) = side_effects; TREE_THIS_VOLATILE (t) = TREE_CODE_CLASS (code) == 'r' && arg0 && TREE_THIS_VOLATILE (arg0); return t; } tree build4_stat (enum tree_code code, tree tt, tree arg0, tree arg1, tree arg2, tree arg3 MEM_STAT_DECL) { bool constant, read_only, side_effects, invariant; tree t; int fro; #ifdef ENABLE_CHECKING if (TREE_CODE_LENGTH (code) != 4) abort (); #endif t = make_node_stat (code PASS_MEM_STAT); TREE_TYPE (t) = tt; fro = first_rtl_op (code); side_effects = TREE_SIDE_EFFECTS (t); PROCESS_ARG(0); PROCESS_ARG(1); PROCESS_ARG(2); PROCESS_ARG(3); TREE_SIDE_EFFECTS (t) = side_effects; TREE_THIS_VOLATILE (t) = TREE_CODE_CLASS (code) == 'r' && arg0 && TREE_THIS_VOLATILE (arg0); return t; } /* Backup definition for non-gcc build compilers. */ tree (build) (enum tree_code code, tree tt, ...) { tree t, arg0, arg1, arg2, arg3; int length = TREE_CODE_LENGTH (code); va_list p; va_start (p, tt); switch (length) { case 0: t = build0 (code, tt); break; case 1: arg0 = va_arg (p, tree); t = build1 (code, tt, arg0); break; case 2: arg0 = va_arg (p, tree); arg1 = va_arg (p, tree); t = build2 (code, tt, arg0, arg1); break; case 3: arg0 = va_arg (p, tree); arg1 = va_arg (p, tree); arg2 = va_arg (p, tree); t = build3 (code, tt, arg0, arg1, arg2); break; case 4: arg0 = va_arg (p, tree); arg1 = va_arg (p, tree); arg2 = va_arg (p, tree); arg3 = va_arg (p, tree); t = build4 (code, tt, arg0, arg1, arg2, arg3); break; default: abort (); } va_end (p); return t; } /* Similar except don't specify the TREE_TYPE and leave the TREE_SIDE_EFFECTS as 0. It is permissible for arguments to be null, or even garbage if their values do not matter. */ tree build_nt (enum tree_code code, ...) { tree t; int length; int i; va_list p; va_start (p, code); t = make_node (code); length = TREE_CODE_LENGTH (code); for (i = 0; i < length; i++) TREE_OPERAND (t, i) = va_arg (p, tree); va_end (p); return t; } /* Create a DECL_... node of code CODE, name NAME and data type TYPE. We do NOT enter this node in any sort of symbol table. layout_decl is used to set up the decl's storage layout. Other slots are initialized to 0 or null pointers. */ tree build_decl_stat (enum tree_code code, tree name, tree type MEM_STAT_DECL) { tree t; t = make_node_stat (code PASS_MEM_STAT); /* if (type == error_mark_node) type = integer_type_node; */ /* That is not done, deliberately, so that having error_mark_node as the type can suppress useless errors in the use of this variable. */ DECL_NAME (t) = name; TREE_TYPE (t) = type; if (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL) layout_decl (t, 0); else if (code == FUNCTION_DECL) DECL_MODE (t) = FUNCTION_MODE; return t; } /* BLOCK nodes are used to represent the structure of binding contours and declarations, once those contours have been exited and their contents compiled. This information is used for outputting debugging info. */ tree build_block (tree vars, tree tags ATTRIBUTE_UNUSED, tree subblocks, tree supercontext, tree chain) { tree block = make_node (BLOCK); BLOCK_VARS (block) = vars; BLOCK_SUBBLOCKS (block) = subblocks; BLOCK_SUPERCONTEXT (block) = supercontext; BLOCK_CHAIN (block) = chain; return block; } #if 1 /* ! defined(USE_MAPPED_LOCATION) */ /* ??? gengtype doesn't handle conditionals */ static GTY(()) tree last_annotated_node; #endif #ifdef USE_MAPPED_LOCATION expanded_location expand_location (source_location loc) { expanded_location xloc; if (loc == 0) { xloc.file = NULL; xloc.line = 0; } else { const struct line_map *map = linemap_lookup (&line_table, loc); xloc.file = map->to_file; xloc.line = SOURCE_LINE (map, loc); }; return xloc; } #else /* Record the exact location where an expression or an identifier were encountered. */ void annotate_with_file_line (tree node, const char *file, int line) { /* Roughly one percent of the calls to this function are to annotate a node with the same information already attached to that node! Just return instead of wasting memory. */ if (EXPR_LOCUS (node) && (EXPR_FILENAME (node) == file || ! strcmp (EXPR_FILENAME (node), file)) && EXPR_LINENO (node) == line) { last_annotated_node = node; return; } /* In heavily macroized code (such as GCC itself) this single entry cache can reduce the number of allocations by more than half. */ if (last_annotated_node && EXPR_LOCUS (last_annotated_node) && (EXPR_FILENAME (last_annotated_node) == file || ! strcmp (EXPR_FILENAME (last_annotated_node), file)) && EXPR_LINENO (last_annotated_node) == line) { SET_EXPR_LOCUS (node, EXPR_LOCUS (last_annotated_node)); return; } SET_EXPR_LOCUS (node, ggc_alloc (sizeof (location_t))); EXPR_LINENO (node) = line; EXPR_FILENAME (node) = file; last_annotated_node = node; } void annotate_with_locus (tree node, location_t locus) { annotate_with_file_line (node, locus.file, locus.line); } #endif /* Return a declaration like DDECL except that its DECL_ATTRIBUTES is ATTRIBUTE. */ tree build_decl_attribute_variant (tree ddecl, tree attribute) { DECL_ATTRIBUTES (ddecl) = attribute; return ddecl; } /* Return a type like TTYPE except that its TYPE_ATTRIBUTE is ATTRIBUTE. Record such modified types already made so we don't make duplicates. */ tree build_type_attribute_variant (tree ttype, tree attribute) { if (! attribute_list_equal (TYPE_ATTRIBUTES (ttype), attribute)) { hashval_t hashcode = 0; tree ntype; enum tree_code code = TREE_CODE (ttype); ntype = copy_node (ttype); TYPE_POINTER_TO (ntype) = 0; TYPE_REFERENCE_TO (ntype) = 0; TYPE_ATTRIBUTES (ntype) = attribute; /* Create a new main variant of TYPE. */ TYPE_MAIN_VARIANT (ntype) = ntype; TYPE_NEXT_VARIANT (ntype) = 0; set_type_quals (ntype, TYPE_UNQUALIFIED); hashcode = iterative_hash_object (code, hashcode); if (TREE_TYPE (ntype)) hashcode = iterative_hash_object (TYPE_HASH (TREE_TYPE (ntype)), hashcode); hashcode = attribute_hash_list (attribute, hashcode); switch (TREE_CODE (ntype)) { case FUNCTION_TYPE: hashcode = type_hash_list (TYPE_ARG_TYPES (ntype), hashcode); break; case ARRAY_TYPE: hashcode = iterative_hash_object (TYPE_HASH (TYPE_DOMAIN (ntype)), hashcode); break; case INTEGER_TYPE: hashcode = iterative_hash_object (TREE_INT_CST_LOW (TYPE_MAX_VALUE (ntype)), hashcode); hashcode = iterative_hash_object (TREE_INT_CST_HIGH (TYPE_MAX_VALUE (ntype)), hashcode); break; case REAL_TYPE: { unsigned int precision = TYPE_PRECISION (ntype); hashcode = iterative_hash_object (precision, hashcode); } break; default: break; } ntype = type_hash_canon (hashcode, ntype); ttype = build_qualified_type (ntype, TYPE_QUALS (ttype)); } return ttype; } /* Return nonzero if IDENT is a valid name for attribute ATTR, or zero if not. We try both `text' and `__text__', ATTR may be either one. */ /* ??? It might be a reasonable simplification to require ATTR to be only `text'. One might then also require attribute lists to be stored in their canonicalized form. */ int is_attribute_p (const char *attr, tree ident) { int ident_len, attr_len; const char *p; if (TREE_CODE (ident) != IDENTIFIER_NODE) return 0; if (strcmp (attr, IDENTIFIER_POINTER (ident)) == 0) return 1; p = IDENTIFIER_POINTER (ident); ident_len = strlen (p); attr_len = strlen (attr); /* If ATTR is `__text__', IDENT must be `text'; and vice versa. */ if (attr[0] == '_') { if (attr[1] != '_' || attr[attr_len - 2] != '_' || attr[attr_len - 1] != '_') abort (); if (ident_len == attr_len - 4 && strncmp (attr + 2, p, attr_len - 4) == 0) return 1; } else { if (ident_len == attr_len + 4 && p[0] == '_' && p[1] == '_' && p[ident_len - 2] == '_' && p[ident_len - 1] == '_' && strncmp (attr, p + 2, attr_len) == 0) return 1; } return 0; } /* Given an attribute name and a list of attributes, return a pointer to the attribute's list element if the attribute is part of the list, or NULL_TREE if not found. If the attribute appears more than once, this only returns the first occurrence; the TREE_CHAIN of the return value should be passed back in if further occurrences are wanted. */ tree lookup_attribute (const char *attr_name, tree list) { tree l; for (l = list; l; l = TREE_CHAIN (l)) { if (TREE_CODE (TREE_PURPOSE (l)) != IDENTIFIER_NODE) abort (); if (is_attribute_p (attr_name, TREE_PURPOSE (l))) return l; } return NULL_TREE; } /* Return an attribute list that is the union of a1 and a2. */ tree merge_attributes (tree a1, tree a2) { tree attributes; /* Either one unset? Take the set one. */ if ((attributes = a1) == 0) attributes = a2; /* One that completely contains the other? Take it. */ else if (a2 != 0 && ! attribute_list_contained (a1, a2)) { if (attribute_list_contained (a2, a1)) attributes = a2; else { /* Pick the longest list, and hang on the other list. */ if (list_length (a1) < list_length (a2)) attributes = a2, a2 = a1; for (; a2 != 0; a2 = TREE_CHAIN (a2)) { tree a; for (a = lookup_attribute (IDENTIFIER_POINTER (TREE_PURPOSE (a2)), attributes); a != NULL_TREE; a = lookup_attribute (IDENTIFIER_POINTER (TREE_PURPOSE (a2)), TREE_CHAIN (a))) { if (simple_cst_equal (TREE_VALUE (a), TREE_VALUE (a2)) == 1) break; } if (a == NULL_TREE) { a1 = copy_node (a2); TREE_CHAIN (a1) = attributes; attributes = a1; } } } } return attributes; } /* Given types T1 and T2, merge their attributes and return the result. */ tree merge_type_attributes (tree t1, tree t2) { return merge_attributes (TYPE_ATTRIBUTES (t1), TYPE_ATTRIBUTES (t2)); } /* Given decls OLDDECL and NEWDECL, merge their attributes and return the result. */ tree merge_decl_attributes (tree olddecl, tree newdecl) { return merge_attributes (DECL_ATTRIBUTES (olddecl), DECL_ATTRIBUTES (newdecl)); } #ifdef TARGET_DLLIMPORT_DECL_ATTRIBUTES /* Specialization of merge_decl_attributes for various Windows targets. This handles the following situation: __declspec (dllimport) int foo; int foo; The second instance of `foo' nullifies the dllimport. */ tree merge_dllimport_decl_attributes (tree old, tree new) { tree a; int delete_dllimport_p; old = DECL_ATTRIBUTES (old); new = DECL_ATTRIBUTES (new); /* What we need to do here is remove from `old' dllimport if it doesn't appear in `new'. dllimport behaves like extern: if a declaration is marked dllimport and a definition appears later, then the object is not dllimport'd. */ if (lookup_attribute ("dllimport", old) != NULL_TREE && lookup_attribute ("dllimport", new) == NULL_TREE) delete_dllimport_p = 1; else delete_dllimport_p = 0; a = merge_attributes (old, new); if (delete_dllimport_p) { tree prev, t; /* Scan the list for dllimport and delete it. */ for (prev = NULL_TREE, t = a; t; prev = t, t = TREE_CHAIN (t)) { if (is_attribute_p ("dllimport", TREE_PURPOSE (t))) { if (prev == NULL_TREE) a = TREE_CHAIN (a); else TREE_CHAIN (prev) = TREE_CHAIN (t); break; } } } return a; } #endif /* TARGET_DLLIMPORT_DECL_ATTRIBUTES */ /* Set the type qualifiers for TYPE to TYPE_QUALS, which is a bitmask of the various TYPE_QUAL values. */ static void set_type_quals (tree type, int type_quals) { TYPE_READONLY (type) = (type_quals & TYPE_QUAL_CONST) != 0; TYPE_VOLATILE (type) = (type_quals & TYPE_QUAL_VOLATILE) != 0; TYPE_RESTRICT (type) = (type_quals & TYPE_QUAL_RESTRICT) != 0; } /* Returns true iff cand is equivalent to base with type_quals. */ bool check_qualified_type (tree cand, tree base, int type_quals) { return (TYPE_QUALS (cand) == type_quals && TYPE_NAME (cand) == TYPE_NAME (base) /* Apparently this is needed for Objective-C. */ && TYPE_CONTEXT (cand) == TYPE_CONTEXT (base) && attribute_list_equal (TYPE_ATTRIBUTES (cand), TYPE_ATTRIBUTES (base))); } /* Return a version of the TYPE, qualified as indicated by the TYPE_QUALS, if one exists. If no qualified version exists yet, return NULL_TREE. */ tree get_qualified_type (tree type, int type_quals) { tree t; if (TYPE_QUALS (type) == type_quals) return type; /* Search the chain of variants to see if there is already one there just like the one we need to have. If so, use that existing one. We must preserve the TYPE_NAME, since there is code that depends on this. */ for (t = TYPE_MAIN_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t)) if (check_qualified_type (t, type, type_quals)) return t; return NULL_TREE; } /* Like get_qualified_type, but creates the type if it does not exist. This function never returns NULL_TREE. */ tree build_qualified_type (tree type, int type_quals) { tree t; /* See if we already have the appropriate qualified variant. */ t = get_qualified_type (type, type_quals); /* If not, build it. */ if (!t) { t = build_type_copy (type); set_type_quals (t, type_quals); } return t; } /* Create a new variant of TYPE, equivalent but distinct. This is so the caller can modify it. */ tree build_type_copy (tree type) { tree t, m = TYPE_MAIN_VARIANT (type); t = copy_node (type); TYPE_POINTER_TO (t) = 0; TYPE_REFERENCE_TO (t) = 0; /* Add this type to the chain of variants of TYPE. */ TYPE_NEXT_VARIANT (t) = TYPE_NEXT_VARIANT (m); TYPE_NEXT_VARIANT (m) = t; return t; } /* Hashing of types so that we don't make duplicates. The entry point is `type_hash_canon'. */ /* Compute a hash code for a list of types (chain of TREE_LIST nodes with types in the TREE_VALUE slots), by adding the hash codes of the individual types. */ unsigned int type_hash_list (tree list, hashval_t hashcode) { tree tail; for (tail = list; tail; tail = TREE_CHAIN (tail)) if (TREE_VALUE (tail) != error_mark_node) hashcode = iterative_hash_object (TYPE_HASH (TREE_VALUE (tail)), hashcode); return hashcode; } /* These are the Hashtable callback functions. */ /* Returns true iff the types are equivalent. */ static int type_hash_eq (const void *va, const void *vb) { const struct type_hash *a = va, *b = vb; /* First test the things that are the same for all types. */ if (a->hash != b->hash || TREE_CODE (a->type) != TREE_CODE (b->type) || TREE_TYPE (a->type) != TREE_TYPE (b->type) || !attribute_list_equal (TYPE_ATTRIBUTES (a->type), TYPE_ATTRIBUTES (b->type)) || TYPE_ALIGN (a->type) != TYPE_ALIGN (b->type) || TYPE_MODE (a->type) != TYPE_MODE (b->type)) return 0; switch (TREE_CODE (a->type)) { case VOID_TYPE: case COMPLEX_TYPE: case VECTOR_TYPE: case POINTER_TYPE: case REFERENCE_TYPE: return 1; case ENUMERAL_TYPE: if (TYPE_VALUES (a->type) != TYPE_VALUES (b->type) && !(TYPE_VALUES (a->type) && TREE_CODE (TYPE_VALUES (a->type)) == TREE_LIST && TYPE_VALUES (b->type) && TREE_CODE (TYPE_VALUES (b->type)) == TREE_LIST && type_list_equal (TYPE_VALUES (a->type), TYPE_VALUES (b->type)))) return 0; /* ... fall through ... */ case INTEGER_TYPE: case REAL_TYPE: case BOOLEAN_TYPE: case CHAR_TYPE: return ((TYPE_MAX_VALUE (a->type) == TYPE_MAX_VALUE (b->type) || tree_int_cst_equal (TYPE_MAX_VALUE (a->type), TYPE_MAX_VALUE (b->type))) && (TYPE_MIN_VALUE (a->type) == TYPE_MIN_VALUE (b->type) || tree_int_cst_equal (TYPE_MIN_VALUE (a->type), TYPE_MIN_VALUE (b->type)))); case OFFSET_TYPE: return TYPE_OFFSET_BASETYPE (a->type) == TYPE_OFFSET_BASETYPE (b->type); case METHOD_TYPE: return (TYPE_METHOD_BASETYPE (a->type) == TYPE_METHOD_BASETYPE (b->type) && (TYPE_ARG_TYPES (a->type) == TYPE_ARG_TYPES (b->type) || (TYPE_ARG_TYPES (a->type) && TREE_CODE (TYPE_ARG_TYPES (a->type)) == TREE_LIST && TYPE_ARG_TYPES (b->type) && TREE_CODE (TYPE_ARG_TYPES (b->type)) == TREE_LIST && type_list_equal (TYPE_ARG_TYPES (a->type), TYPE_ARG_TYPES (b->type))))); case ARRAY_TYPE: case SET_TYPE: return TYPE_DOMAIN (a->type) == TYPE_DOMAIN (b->type); case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: return (TYPE_FIELDS (a->type) == TYPE_FIELDS (b->type) || (TYPE_FIELDS (a->type) && TREE_CODE (TYPE_FIELDS (a->type)) == TREE_LIST && TYPE_FIELDS (b->type) && TREE_CODE (TYPE_FIELDS (b->type)) == TREE_LIST && type_list_equal (TYPE_FIELDS (a->type), TYPE_FIELDS (b->type)))); case FUNCTION_TYPE: return (TYPE_ARG_TYPES (a->type) == TYPE_ARG_TYPES (b->type) || (TYPE_ARG_TYPES (a->type) && TREE_CODE (TYPE_ARG_TYPES (a->type)) == TREE_LIST && TYPE_ARG_TYPES (b->type) && TREE_CODE (TYPE_ARG_TYPES (b->type)) == TREE_LIST && type_list_equal (TYPE_ARG_TYPES (a->type), TYPE_ARG_TYPES (b->type)))); default: return 0; } } /* Return the cached hash value. */ static hashval_t type_hash_hash (const void *item) { return ((const struct type_hash *) item)->hash; } /* Look in the type hash table for a type isomorphic to TYPE. If one is found, return it. Otherwise return 0. */ tree type_hash_lookup (hashval_t hashcode, tree type) { struct type_hash *h, in; /* The TYPE_ALIGN field of a type is set by layout_type(), so we must call that routine before comparing TYPE_ALIGNs. */ layout_type (type); in.hash = hashcode; in.type = type; h = htab_find_with_hash (type_hash_table, &in, hashcode); if (h) return h->type; return NULL_TREE; } /* Add an entry to the type-hash-table for a type TYPE whose hash code is HASHCODE. */ void type_hash_add (hashval_t hashcode, tree type) { struct type_hash *h; void **loc; h = ggc_alloc (sizeof (struct type_hash)); h->hash = hashcode; h->type = type; loc = htab_find_slot_with_hash (type_hash_table, h, hashcode, INSERT); *(struct type_hash **) loc = h; } /* Given TYPE, and HASHCODE its hash code, return the canonical object for an identical type if one already exists. Otherwise, return TYPE, and record it as the canonical object. To use this function, first create a type of the sort you want. Then compute its hash code from the fields of the type that make it different from other similar types. Then call this function and use the value. */ tree type_hash_canon (unsigned int hashcode, tree type) { tree t1; /* The hash table only contains main variants, so ensure that's what we're being passed. */ if (TYPE_MAIN_VARIANT (type) != type) abort (); if (!lang_hooks.types.hash_types) return type; /* See if the type is in the hash table already. If so, return it. Otherwise, add the type. */ t1 = type_hash_lookup (hashcode, type); if (t1 != 0) { #ifdef GATHER_STATISTICS tree_node_counts[(int) t_kind]--; tree_node_sizes[(int) t_kind] -= sizeof (struct tree_type); #endif return t1; } else { type_hash_add (hashcode, type); return type; } } /* See if the data pointed to by the type hash table is marked. We consider it marked if the type is marked or if a debug type number or symbol table entry has been made for the type. This reduces the amount of debugging output and eliminates that dependency of the debug output on the number of garbage collections. */ static int type_hash_marked_p (const void *p) { tree type = ((struct type_hash *) p)->type; return ggc_marked_p (type) || TYPE_SYMTAB_POINTER (type); } static void print_type_hash_statistics (void) { fprintf (stderr, "Type hash: size %ld, %ld elements, %f collisions\n", (long) htab_size (type_hash_table), (long) htab_elements (type_hash_table), htab_collisions (type_hash_table)); } /* Compute a hash code for a list of attributes (chain of TREE_LIST nodes with names in the TREE_PURPOSE slots and args in the TREE_VALUE slots), by adding the hash codes of the individual attributes. */ unsigned int attribute_hash_list (tree list, hashval_t hashcode) { tree tail; for (tail = list; tail; tail = TREE_CHAIN (tail)) /* ??? Do we want to add in TREE_VALUE too? */ hashcode = iterative_hash_object (IDENTIFIER_HASH_VALUE (TREE_PURPOSE (tail)), hashcode); return hashcode; } /* Given two lists of attributes, return true if list l2 is equivalent to l1. */ int attribute_list_equal (tree l1, tree l2) { return attribute_list_contained (l1, l2) && attribute_list_contained (l2, l1); } /* Given two lists of attributes, return true if list L2 is completely contained within L1. */ /* ??? This would be faster if attribute names were stored in a canonicalized form. Otherwise, if L1 uses `foo' and L2 uses `__foo__', the long method must be used to show these elements are equivalent (which they are). */ /* ??? It's not clear that attributes with arguments will always be handled correctly. */ int attribute_list_contained (tree l1, tree l2) { tree t1, t2; /* First check the obvious, maybe the lists are identical. */ if (l1 == l2) return 1; /* Maybe the lists are similar. */ for (t1 = l1, t2 = l2; t1 != 0 && t2 != 0 && TREE_PURPOSE (t1) == TREE_PURPOSE (t2) && TREE_VALUE (t1) == TREE_VALUE (t2); t1 = TREE_CHAIN (t1), t2 = TREE_CHAIN (t2)); /* Maybe the lists are equal. */ if (t1 == 0 && t2 == 0) return 1; for (; t2 != 0; t2 = TREE_CHAIN (t2)) { tree attr; for (attr = lookup_attribute (IDENTIFIER_POINTER (TREE_PURPOSE (t2)), l1); attr != NULL_TREE; attr = lookup_attribute (IDENTIFIER_POINTER (TREE_PURPOSE (t2)), TREE_CHAIN (attr))) { if (simple_cst_equal (TREE_VALUE (t2), TREE_VALUE (attr)) == 1) break; } if (attr == 0) return 0; if (simple_cst_equal (TREE_VALUE (t2), TREE_VALUE (attr)) != 1) return 0; } return 1; } /* Given two lists of types (chains of TREE_LIST nodes with types in the TREE_VALUE slots) return 1 if the lists contain the same types in the same order. Also, the TREE_PURPOSEs must match. */ int type_list_equal (tree l1, tree l2) { tree t1, t2; for (t1 = l1, t2 = l2; t1 && t2; t1 = TREE_CHAIN (t1), t2 = TREE_CHAIN (t2)) if (TREE_VALUE (t1) != TREE_VALUE (t2) || (TREE_PURPOSE (t1) != TREE_PURPOSE (t2) && ! (1 == simple_cst_equal (TREE_PURPOSE (t1), TREE_PURPOSE (t2)) && (TREE_TYPE (TREE_PURPOSE (t1)) == TREE_TYPE (TREE_PURPOSE (t2)))))) return 0; return t1 == t2; } /* Returns the number of arguments to the FUNCTION_TYPE or METHOD_TYPE given by TYPE. If the argument list accepts variable arguments, then this function counts only the ordinary arguments. */ int type_num_arguments (tree type) { int i = 0; tree t; for (t = TYPE_ARG_TYPES (type); t; t = TREE_CHAIN (t)) /* If the function does not take a variable number of arguments, the last element in the list will have type `void'. */ if (VOID_TYPE_P (TREE_VALUE (t))) break; else ++i; return i; } /* Nonzero if integer constants T1 and T2 represent the same constant value. */ int tree_int_cst_equal (tree t1, tree t2) { if (t1 == t2) return 1; if (t1 == 0 || t2 == 0) return 0; if (TREE_CODE (t1) == INTEGER_CST && TREE_CODE (t2) == INTEGER_CST && TREE_INT_CST_LOW (t1) == TREE_INT_CST_LOW (t2) && TREE_INT_CST_HIGH (t1) == TREE_INT_CST_HIGH (t2)) return 1; return 0; } /* Nonzero if integer constants T1 and T2 represent values that satisfy <. The precise way of comparison depends on their data type. */ int tree_int_cst_lt (tree t1, tree t2) { if (t1 == t2) return 0; if (TYPE_UNSIGNED (TREE_TYPE (t1)) != TYPE_UNSIGNED (TREE_TYPE (t2))) { int t1_sgn = tree_int_cst_sgn (t1); int t2_sgn = tree_int_cst_sgn (t2); if (t1_sgn < t2_sgn) return 1; else if (t1_sgn > t2_sgn) return 0; /* Otherwise, both are non-negative, so we compare them as unsigned just in case one of them would overflow a signed type. */ } else if (!TYPE_UNSIGNED (TREE_TYPE (t1))) return INT_CST_LT (t1, t2); return INT_CST_LT_UNSIGNED (t1, t2); } /* Returns -1 if T1 < T2, 0 if T1 == T2, and 1 if T1 > T2. */ int tree_int_cst_compare (tree t1, tree t2) { if (tree_int_cst_lt (t1, t2)) return -1; else if (tree_int_cst_lt (t2, t1)) return 1; else return 0; } /* Return 1 if T is an INTEGER_CST that can be manipulated efficiently on the host. If POS is zero, the value can be represented in a single HOST_WIDE_INT. If POS is nonzero, the value must be positive and can be represented in a single unsigned HOST_WIDE_INT. */ int host_integerp (tree t, int pos) { return (TREE_CODE (t) == INTEGER_CST && ! TREE_OVERFLOW (t) && ((TREE_INT_CST_HIGH (t) == 0 && (HOST_WIDE_INT) TREE_INT_CST_LOW (t) >= 0) || (! pos && TREE_INT_CST_HIGH (t) == -1 && (HOST_WIDE_INT) TREE_INT_CST_LOW (t) < 0 && !TYPE_UNSIGNED (TREE_TYPE (t))) || (pos && TREE_INT_CST_HIGH (t) == 0))); } /* Return the HOST_WIDE_INT least significant bits of T if it is an INTEGER_CST and there is no overflow. POS is nonzero if the result must be positive. Abort if we cannot satisfy the above conditions. */ HOST_WIDE_INT tree_low_cst (tree t, int pos) { if (host_integerp (t, pos)) return TREE_INT_CST_LOW (t); else abort (); } /* Return the most significant bit of the integer constant T. */ int tree_int_cst_msb (tree t) { int prec; HOST_WIDE_INT h; unsigned HOST_WIDE_INT l; /* Note that using TYPE_PRECISION here is wrong. We care about the actual bits, not the (arbitrary) range of the type. */ prec = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (t))) - 1; rshift_double (TREE_INT_CST_LOW (t), TREE_INT_CST_HIGH (t), prec, 2 * HOST_BITS_PER_WIDE_INT, &l, &h, 0); return (l & 1) == 1; } /* Return an indication of the sign of the integer constant T. The return value is -1 if T < 0, 0 if T == 0, and 1 if T > 0. Note that -1 will never be returned it T's type is unsigned. */ int tree_int_cst_sgn (tree t) { if (TREE_INT_CST_LOW (t) == 0 && TREE_INT_CST_HIGH (t) == 0) return 0; else if (TYPE_UNSIGNED (TREE_TYPE (t))) return 1; else if (TREE_INT_CST_HIGH (t) < 0) return -1; else return 1; } /* Compare two constructor-element-type constants. Return 1 if the lists are known to be equal; otherwise return 0. */ int simple_cst_list_equal (tree l1, tree l2) { while (l1 != NULL_TREE && l2 != NULL_TREE) { if (simple_cst_equal (TREE_VALUE (l1), TREE_VALUE (l2)) != 1) return 0; l1 = TREE_CHAIN (l1); l2 = TREE_CHAIN (l2); } return l1 == l2; } /* Return truthvalue of whether T1 is the same tree structure as T2. Return 1 if they are the same. Return 0 if they are understandably different. Return -1 if either contains tree structure not understood by this function. */ int simple_cst_equal (tree t1, tree t2) { enum tree_code code1, code2; int cmp; int i; if (t1 == t2) return 1; if (t1 == 0 || t2 == 0) return 0; code1 = TREE_CODE (t1); code2 = TREE_CODE (t2); if (code1 == NOP_EXPR || code1 == CONVERT_EXPR || code1 == NON_LVALUE_EXPR) { if (code2 == NOP_EXPR || code2 == CONVERT_EXPR || code2 == NON_LVALUE_EXPR) return simple_cst_equal (TREE_OPERAND (t1, 0), TREE_OPERAND (t2, 0)); else return simple_cst_equal (TREE_OPERAND (t1, 0), t2); } else if (code2 == NOP_EXPR || code2 == CONVERT_EXPR || code2 == NON_LVALUE_EXPR) return simple_cst_equal (t1, TREE_OPERAND (t2, 0)); if (code1 != code2) return 0; switch (code1) { case INTEGER_CST: return (TREE_INT_CST_LOW (t1) == TREE_INT_CST_LOW (t2) && TREE_INT_CST_HIGH (t1) == TREE_INT_CST_HIGH (t2)); case REAL_CST: return REAL_VALUES_IDENTICAL (TREE_REAL_CST (t1), TREE_REAL_CST (t2)); case STRING_CST: return (TREE_STRING_LENGTH (t1) == TREE_STRING_LENGTH (t2) && ! memcmp (TREE_STRING_POINTER (t1), TREE_STRING_POINTER (t2), TREE_STRING_LENGTH (t1))); case CONSTRUCTOR: return simple_cst_list_equal (CONSTRUCTOR_ELTS (t1), CONSTRUCTOR_ELTS (t2)); case SAVE_EXPR: return simple_cst_equal (TREE_OPERAND (t1, 0), TREE_OPERAND (t2, 0)); case CALL_EXPR: cmp = simple_cst_equal (TREE_OPERAND (t1, 0), TREE_OPERAND (t2, 0)); if (cmp <= 0) return cmp; return simple_cst_list_equal (TREE_OPERAND (t1, 1), TREE_OPERAND (t2, 1)); case TARGET_EXPR: /* Special case: if either target is an unallocated VAR_DECL, it means that it's going to be unified with whatever the TARGET_EXPR is really supposed to initialize, so treat it as being equivalent to anything. */ if ((TREE_CODE (TREE_OPERAND (t1, 0)) == VAR_DECL && DECL_NAME (TREE_OPERAND (t1, 0)) == NULL_TREE && !DECL_RTL_SET_P (TREE_OPERAND (t1, 0))) || (TREE_CODE (TREE_OPERAND (t2, 0)) == VAR_DECL && DECL_NAME (TREE_OPERAND (t2, 0)) == NULL_TREE && !DECL_RTL_SET_P (TREE_OPERAND (t2, 0)))) cmp = 1; else cmp = simple_cst_equal (TREE_OPERAND (t1, 0), TREE_OPERAND (t2, 0)); if (cmp <= 0) return cmp; return simple_cst_equal (TREE_OPERAND (t1, 1), TREE_OPERAND (t2, 1)); case WITH_CLEANUP_EXPR: cmp = simple_cst_equal (TREE_OPERAND (t1, 0), TREE_OPERAND (t2, 0)); if (cmp <= 0) return cmp; return simple_cst_equal (TREE_OPERAND (t1, 1), TREE_OPERAND (t1, 1)); case COMPONENT_REF: if (TREE_OPERAND (t1, 1) == TREE_OPERAND (t2, 1)) return simple_cst_equal (TREE_OPERAND (t1, 0), TREE_OPERAND (t2, 0)); return 0; case VAR_DECL: case PARM_DECL: case CONST_DECL: case FUNCTION_DECL: return 0; default: break; } /* This general rule works for most tree codes. All exceptions should be handled above. If this is a language-specific tree code, we can't trust what might be in the operand, so say we don't know the situation. */ if ((int) code1 >= (int) LAST_AND_UNUSED_TREE_CODE) return -1; switch (TREE_CODE_CLASS (code1)) { case '1': case '2': case '<': case 'e': case 'r': case 's': cmp = 1; for (i = 0; i < TREE_CODE_LENGTH (code1); i++) { cmp = simple_cst_equal (TREE_OPERAND (t1, i), TREE_OPERAND (t2, i)); if (cmp <= 0) return cmp; } return cmp; default: return -1; } } /* Compare the value of T, an INTEGER_CST, with U, an unsigned integer value. Return -1, 0, or 1 if the value of T is less than, equal to, or greater than U, respectively. */ int compare_tree_int (tree t, unsigned HOST_WIDE_INT u) { if (tree_int_cst_sgn (t) < 0) return -1; else if (TREE_INT_CST_HIGH (t) != 0) return 1; else if (TREE_INT_CST_LOW (t) == u) return 0; else if (TREE_INT_CST_LOW (t) < u) return -1; else return 1; } /* Return true if CODE represents an associative tree code. Otherwise return false. */ bool associative_tree_code (enum tree_code code) { switch (code) { case BIT_IOR_EXPR: case BIT_AND_EXPR: case BIT_XOR_EXPR: case PLUS_EXPR: case MULT_EXPR: case MIN_EXPR: case MAX_EXPR: return true; default: break; } return false; } /* Return true if CODE represents an commutative tree code. Otherwise return false. */ bool commutative_tree_code (enum tree_code code) { switch (code) { case PLUS_EXPR: case MULT_EXPR: case MIN_EXPR: case MAX_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case BIT_AND_EXPR: case NE_EXPR: case EQ_EXPR: case UNORDERED_EXPR: case ORDERED_EXPR: case UNEQ_EXPR: case LTGT_EXPR: case TRUTH_AND_EXPR: case TRUTH_XOR_EXPR: case TRUTH_OR_EXPR: return true; default: break; } return false; } /* Generate a hash value for an expression. This can be used iteratively by passing a previous result as the "val" argument. This function is intended to produce the same hash for expressions which would compare equal using operand_equal_p. */ hashval_t iterative_hash_expr (tree t, hashval_t val) { int i; enum tree_code code; char class; if (t == NULL_TREE) return iterative_hash_object (t, val); code = TREE_CODE (t); class = TREE_CODE_CLASS (code); if (class == 'd' || TREE_CODE (t) == VALUE_HANDLE) { /* Decls we can just compare by pointer. */ val = iterative_hash_object (t, val); } else if (class == 'c') { /* Alas, constants aren't shared, so we can't rely on pointer identity. */ if (code == INTEGER_CST) { val = iterative_hash_object (TREE_INT_CST_LOW (t), val); val = iterative_hash_object (TREE_INT_CST_HIGH (t), val); } else if (code == REAL_CST) { unsigned int val2 = real_hash (TREE_REAL_CST_PTR (t)); val = iterative_hash (&val2, sizeof (unsigned int), val); } else if (code == STRING_CST) val = iterative_hash (TREE_STRING_POINTER (t), TREE_STRING_LENGTH (t), val); else if (code == COMPLEX_CST) { val = iterative_hash_expr (TREE_REALPART (t), val); val = iterative_hash_expr (TREE_IMAGPART (t), val); } else if (code == VECTOR_CST) val = iterative_hash_expr (TREE_VECTOR_CST_ELTS (t), val); else abort (); } else if (IS_EXPR_CODE_CLASS (class)) { val = iterative_hash_object (code, val); /* Don't hash the type, that can lead to having nodes which compare equal according to operand_equal_p, but which have different hash codes. */ if (code == NOP_EXPR || code == CONVERT_EXPR || code == NON_LVALUE_EXPR) { /* Make sure to include signness in the hash computation. */ val += TYPE_UNSIGNED (TREE_TYPE (t)); val = iterative_hash_expr (TREE_OPERAND (t, 0), val); } if (commutative_tree_code (code)) { /* It's a commutative expression. We want to hash it the same however it appears. We do this by first hashing both operands and then rehashing based on the order of their independent hashes. */ hashval_t one = iterative_hash_expr (TREE_OPERAND (t, 0), 0); hashval_t two = iterative_hash_expr (TREE_OPERAND (t, 1), 0); hashval_t t; if (one > two) t = one, one = two, two = t; val = iterative_hash_object (one, val); val = iterative_hash_object (two, val); } else for (i = first_rtl_op (code) - 1; i >= 0; --i) val = iterative_hash_expr (TREE_OPERAND (t, i), val); } else if (code == TREE_LIST) { /* A list of expressions, for a CALL_EXPR or as the elements of a VECTOR_CST. */ for (; t; t = TREE_CHAIN (t)) val = iterative_hash_expr (TREE_VALUE (t), val); } else if (code == SSA_NAME) { val = iterative_hash_object (SSA_NAME_VERSION (t), val); val = iterative_hash_expr (SSA_NAME_VAR (t), val); } else abort (); return val; } /* Constructors for pointer, array and function types. (RECORD_TYPE, UNION_TYPE and ENUMERAL_TYPE nodes are constructed by language-dependent code, not here.) */ /* Construct, lay out and return the type of pointers to TO_TYPE with mode MODE. If CAN_ALIAS_ALL is TRUE, indicate this type can reference all of memory. If such a type has already been constructed, reuse it. */ tree build_pointer_type_for_mode (tree to_type, enum machine_mode mode, bool can_alias_all) { tree t; /* In some cases, languages will have things that aren't a POINTER_TYPE (such as a RECORD_TYPE for fat pointers in Ada) as TYPE_POINTER_TO. In that case, return that type without regard to the rest of our operands. ??? This is a kludge, but consistent with the way this function has always operated and there doesn't seem to be a good way to avoid this at the moment. */ if (TYPE_POINTER_TO (to_type) != 0 && TREE_CODE (TYPE_POINTER_TO (to_type)) != POINTER_TYPE) return TYPE_POINTER_TO (to_type); /* First, if we already have a type for pointers to TO_TYPE and it's the proper mode, use it. */ for (t = TYPE_POINTER_TO (to_type); t; t = TYPE_NEXT_PTR_TO (t)) if (TYPE_MODE (t) == mode && TYPE_REF_CAN_ALIAS_ALL (t) == can_alias_all) return t; t = make_node (POINTER_TYPE); TREE_TYPE (t) = to_type; TYPE_MODE (t) = mode; TYPE_REF_CAN_ALIAS_ALL (t) = can_alias_all; TYPE_NEXT_PTR_TO (t) = TYPE_POINTER_TO (to_type); TYPE_POINTER_TO (to_type) = t; /* Lay out the type. This function has many callers that are concerned with expression-construction, and this simplifies them all. */ layout_type (t); return t; } /* By default build pointers in ptr_mode. */ tree build_pointer_type (tree to_type) { return build_pointer_type_for_mode (to_type, ptr_mode, false); } /* Same as build_pointer_type_for_mode, but for REFERENCE_TYPE. */ tree build_reference_type_for_mode (tree to_type, enum machine_mode mode, bool can_alias_all) { tree t; /* In some cases, languages will have things that aren't a REFERENCE_TYPE (such as a RECORD_TYPE for fat pointers in Ada) as TYPE_REFERENCE_TO. In that case, return that type without regard to the rest of our operands. ??? This is a kludge, but consistent with the way this function has always operated and there doesn't seem to be a good way to avoid this at the moment. */ if (TYPE_REFERENCE_TO (to_type) != 0 && TREE_CODE (TYPE_REFERENCE_TO (to_type)) != REFERENCE_TYPE) return TYPE_REFERENCE_TO (to_type); /* First, if we already have a type for pointers to TO_TYPE and it's the proper mode, use it. */ for (t = TYPE_REFERENCE_TO (to_type); t; t = TYPE_NEXT_REF_TO (t)) if (TYPE_MODE (t) == mode && TYPE_REF_CAN_ALIAS_ALL (t) == can_alias_all) return t; t = make_node (REFERENCE_TYPE); TREE_TYPE (t) = to_type; TYPE_MODE (t) = mode; TYPE_REF_CAN_ALIAS_ALL (t) = can_alias_all; TYPE_NEXT_REF_TO (t) = TYPE_REFERENCE_TO (to_type); TYPE_REFERENCE_TO (to_type) = t; layout_type (t); return t; } /* Build the node for the type of references-to-TO_TYPE by default in ptr_mode. */ tree build_reference_type (tree to_type) { return build_reference_type_for_mode (to_type, ptr_mode, false); } /* Build a type that is compatible with t but has no cv quals anywhere in its type, thus const char *const *const * -> char ***. */ tree build_type_no_quals (tree t) { switch (TREE_CODE (t)) { case POINTER_TYPE: return build_pointer_type_for_mode (build_type_no_quals (TREE_TYPE (t)), TYPE_MODE (t), TYPE_REF_CAN_ALIAS_ALL (t)); case REFERENCE_TYPE: return build_reference_type_for_mode (build_type_no_quals (TREE_TYPE (t)), TYPE_MODE (t), TYPE_REF_CAN_ALIAS_ALL (t)); default: return TYPE_MAIN_VARIANT (t); } } /* Create a type of integers to be the TYPE_DOMAIN of an ARRAY_TYPE. MAXVAL should be the maximum value in the domain (one less than the length of the array). The maximum value that MAXVAL can have is INT_MAX for a HOST_WIDE_INT. We don't enforce this limit, that is up to caller (e.g. language front end). The limit exists because the result is a signed type and we don't handle sizes that use more than one HOST_WIDE_INT. */ tree build_index_type (tree maxval) { tree itype = make_node (INTEGER_TYPE); TREE_TYPE (itype) = sizetype; TYPE_PRECISION (itype) = TYPE_PRECISION (sizetype); TYPE_MIN_VALUE (itype) = size_zero_node; TYPE_MAX_VALUE (itype) = convert (sizetype, maxval); TYPE_MODE (itype) = TYPE_MODE (sizetype); TYPE_SIZE (itype) = TYPE_SIZE (sizetype); TYPE_SIZE_UNIT (itype) = TYPE_SIZE_UNIT (sizetype); TYPE_ALIGN (itype) = TYPE_ALIGN (sizetype); TYPE_USER_ALIGN (itype) = TYPE_USER_ALIGN (sizetype); if (host_integerp (maxval, 1)) return type_hash_canon (tree_low_cst (maxval, 1), itype); else return itype; } /* Create a range of some discrete type TYPE (an INTEGER_TYPE, ENUMERAL_TYPE, BOOLEAN_TYPE, or CHAR_TYPE), with low bound LOWVAL and high bound HIGHVAL. if TYPE==NULL_TREE, sizetype is used. */ tree build_range_type (tree type, tree lowval, tree highval) { tree itype = make_node (INTEGER_TYPE); TREE_TYPE (itype) = type; if (type == NULL_TREE) type = sizetype; TYPE_MIN_VALUE (itype) = convert (type, lowval); TYPE_MAX_VALUE (itype) = highval ? convert (type, highval) : NULL; TYPE_PRECISION (itype) = TYPE_PRECISION (type); TYPE_MODE (itype) = TYPE_MODE (type); TYPE_SIZE (itype) = TYPE_SIZE (type); TYPE_SIZE_UNIT (itype) = TYPE_SIZE_UNIT (type); TYPE_ALIGN (itype) = TYPE_ALIGN (type); TYPE_USER_ALIGN (itype) = TYPE_USER_ALIGN (type); if (host_integerp (lowval, 0) && highval != 0 && host_integerp (highval, 0)) return type_hash_canon (tree_low_cst (highval, 0) - tree_low_cst (lowval, 0), itype); else return itype; } /* Just like build_index_type, but takes lowval and highval instead of just highval (maxval). */ tree build_index_2_type (tree lowval, tree highval) { return build_range_type (sizetype, lowval, highval); } /* Construct, lay out and return the type of arrays of elements with ELT_TYPE and number of elements specified by the range of values of INDEX_TYPE. If such a type has already been constructed, reuse it. */ tree build_array_type (tree elt_type, tree index_type) { tree t; hashval_t hashcode = 0; if (TREE_CODE (elt_type) == FUNCTION_TYPE) { error ("arrays of functions are not meaningful"); elt_type = integer_type_node; } t = make_node (ARRAY_TYPE); TREE_TYPE (t) = elt_type; TYPE_DOMAIN (t) = index_type; if (index_type == 0) return t; hashcode = iterative_hash_object (TYPE_HASH (elt_type), hashcode); hashcode = iterative_hash_object (TYPE_HASH (index_type), hashcode); t = type_hash_canon (hashcode, t); if (!COMPLETE_TYPE_P (t)) layout_type (t); return t; } /* Return the TYPE of the elements comprising the innermost dimension of ARRAY. */ tree get_inner_array_type (tree array) { tree type = TREE_TYPE (array); while (TREE_CODE (type) == ARRAY_TYPE) type = TREE_TYPE (type); return type; } /* Construct, lay out and return the type of functions returning type VALUE_TYPE given arguments of types ARG_TYPES. ARG_TYPES is a chain of TREE_LIST nodes whose TREE_VALUEs are data type nodes for the arguments of the function. If such a type has already been constructed, reuse it. */ tree build_function_type (tree value_type, tree arg_types) { tree t; hashval_t hashcode = 0; if (TREE_CODE (value_type) == FUNCTION_TYPE) { error ("function return type cannot be function"); value_type = integer_type_node; } /* Make a node of the sort we want. */ t = make_node (FUNCTION_TYPE); TREE_TYPE (t) = value_type; TYPE_ARG_TYPES (t) = arg_types; /* If we already have such a type, use the old one. */ hashcode = iterative_hash_object (TYPE_HASH (value_type), hashcode); hashcode = type_hash_list (arg_types, hashcode); t = type_hash_canon (hashcode, t); if (!COMPLETE_TYPE_P (t)) layout_type (t); return t; } /* Build a function type. The RETURN_TYPE is the type returned by the function. If additional arguments are provided, they are additional argument types. The list of argument types must always be terminated by NULL_TREE. */ tree build_function_type_list (tree return_type, ...) { tree t, args, last; va_list p; va_start (p, return_type); t = va_arg (p, tree); for (args = NULL_TREE; t != NULL_TREE; t = va_arg (p, tree)) args = tree_cons (NULL_TREE, t, args); last = args; args = nreverse (args); TREE_CHAIN (last) = void_list_node; args = build_function_type (return_type, args); va_end (p); return args; } /* Build a METHOD_TYPE for a member of BASETYPE. The RETTYPE (a TYPE) and ARGTYPES (a TREE_LIST) are the return type and arguments types for the method. An implicit additional parameter (of type pointer-to-BASETYPE) is added to the ARGTYPES. */ tree build_method_type_directly (tree basetype, tree rettype, tree argtypes) { tree t; tree ptype; int hashcode = 0; /* Make a node of the sort we want. */ t = make_node (METHOD_TYPE); TYPE_METHOD_BASETYPE (t) = TYPE_MAIN_VARIANT (basetype); TREE_TYPE (t) = rettype; ptype = build_pointer_type (basetype); /* The actual arglist for this function includes a "hidden" argument which is "this". Put it into the list of argument types. */ argtypes = tree_cons (NULL_TREE, ptype, argtypes); TYPE_ARG_TYPES (t) = argtypes; /* If we already have such a type, use the old one. */ hashcode = iterative_hash_object (TYPE_HASH (basetype), hashcode); hashcode = iterative_hash_object (TYPE_HASH (rettype), hashcode); hashcode = type_hash_list (argtypes, hashcode); t = type_hash_canon (hashcode, t); if (!COMPLETE_TYPE_P (t)) layout_type (t); return t; } /* Construct, lay out and return the type of methods belonging to class BASETYPE and whose arguments and values are described by TYPE. If that type exists already, reuse it. TYPE must be a FUNCTION_TYPE node. */ tree build_method_type (tree basetype, tree type) { if (TREE_CODE (type) != FUNCTION_TYPE) abort (); return build_method_type_directly (basetype, TREE_TYPE (type), TYPE_ARG_TYPES (type)); } /* Construct, lay out and return the type of offsets to a value of type TYPE, within an object of type BASETYPE. If a suitable offset type exists already, reuse it. */ tree build_offset_type (tree basetype, tree type) { tree t; hashval_t hashcode = 0; /* Make a node of the sort we want. */ t = make_node (OFFSET_TYPE); TYPE_OFFSET_BASETYPE (t) = TYPE_MAIN_VARIANT (basetype); TREE_TYPE (t) = type; /* If we already have such a type, use the old one. */ hashcode = iterative_hash_object (TYPE_HASH (basetype), hashcode); hashcode = iterative_hash_object (TYPE_HASH (type), hashcode); t = type_hash_canon (hashcode, t); if (!COMPLETE_TYPE_P (t)) layout_type (t); return t; } /* Create a complex type whose components are COMPONENT_TYPE. */ tree build_complex_type (tree component_type) { tree t; hashval_t hashcode; /* Make a node of the sort we want. */ t = make_node (COMPLEX_TYPE); TREE_TYPE (t) = TYPE_MAIN_VARIANT (component_type); /* If we already have such a type, use the old one. */ hashcode = iterative_hash_object (TYPE_HASH (component_type), 0); t = type_hash_canon (hashcode, t); if (!COMPLETE_TYPE_P (t)) layout_type (t); /* If we are writing Dwarf2 output we need to create a name, since complex is a fundamental type. */ if ((write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG) && ! TYPE_NAME (t)) { const char *name; if (component_type == char_type_node) name = "complex char"; else if (component_type == signed_char_type_node) name = "complex signed char"; else if (component_type == unsigned_char_type_node) name = "complex unsigned char"; else if (component_type == short_integer_type_node) name = "complex short int"; else if (component_type == short_unsigned_type_node) name = "complex short unsigned int"; else if (component_type == integer_type_node) name = "complex int"; else if (component_type == unsigned_type_node) name = "complex unsigned int"; else if (component_type == long_integer_type_node) name = "complex long int"; else if (component_type == long_unsigned_type_node) name = "complex long unsigned int"; else if (component_type == long_long_integer_type_node) name = "complex long long int"; else if (component_type == long_long_unsigned_type_node) name = "complex long long unsigned int"; else name = 0; if (name != 0) TYPE_NAME (t) = get_identifier (name); } return build_qualified_type (t, TYPE_QUALS (component_type)); } /* Return OP, stripped of any conversions to wider types as much as is safe. Converting the value back to OP's type makes a value equivalent to OP. If FOR_TYPE is nonzero, we return a value which, if converted to type FOR_TYPE, would be equivalent to converting OP to type FOR_TYPE. If FOR_TYPE is nonzero, unaligned bit-field references may be changed to the narrowest type that can hold the value, even if they don't exactly fit. Otherwise, bit-field references are changed to a narrower type only if they can be fetched directly from memory in that type. OP must have integer, real or enumeral type. Pointers are not allowed! There are some cases where the obvious value we could return would regenerate to OP if converted to OP's type, but would not extend like OP to wider types. If FOR_TYPE indicates such extension is contemplated, we eschew such values. For example, if OP is (unsigned short)(signed char)-1, we avoid returning (signed char)-1 if FOR_TYPE is int, even though extending that to an unsigned short would regenerate OP, since the result of extending (signed char)-1 to (int) is different from (int) OP. */ tree get_unwidened (tree op, tree for_type) { /* Set UNS initially if converting OP to FOR_TYPE is a zero-extension. */ tree type = TREE_TYPE (op); unsigned final_prec = TYPE_PRECISION (for_type != 0 ? for_type : type); int uns = (for_type != 0 && for_type != type && final_prec > TYPE_PRECISION (type) && TYPE_UNSIGNED (type)); tree win = op; while (TREE_CODE (op) == NOP_EXPR) { int bitschange = TYPE_PRECISION (TREE_TYPE (op)) - TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (op, 0))); /* Truncations are many-one so cannot be removed. Unless we are later going to truncate down even farther. */ if (bitschange < 0 && final_prec > TYPE_PRECISION (TREE_TYPE (op))) break; /* See what's inside this conversion. If we decide to strip it, we will set WIN. */ op = TREE_OPERAND (op, 0); /* If we have not stripped any zero-extensions (uns is 0), we can strip any kind of extension. If we have previously stripped a zero-extension, only zero-extensions can safely be stripped. Any extension can be stripped if the bits it would produce are all going to be discarded later by truncating to FOR_TYPE. */ if (bitschange > 0) { if (! uns || final_prec <= TYPE_PRECISION (TREE_TYPE (op))) win = op; /* TYPE_UNSIGNED says whether this is a zero-extension. Let's avoid computing it if it does not affect WIN and if UNS will not be needed again. */ if ((uns || TREE_CODE (op) == NOP_EXPR) && TYPE_UNSIGNED (TREE_TYPE (op))) { uns = 1; win = op; } } } if (TREE_CODE (op) == COMPONENT_REF /* Since type_for_size always gives an integer type. */ && TREE_CODE (type) != REAL_TYPE /* Don't crash if field not laid out yet. */ && DECL_SIZE (TREE_OPERAND (op, 1)) != 0 && host_integerp (DECL_SIZE (TREE_OPERAND (op, 1)), 1)) { unsigned int innerprec = tree_low_cst (DECL_SIZE (TREE_OPERAND (op, 1)), 1); int unsignedp = (DECL_UNSIGNED (TREE_OPERAND (op, 1)) || TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (op, 1)))); type = lang_hooks.types.type_for_size (innerprec, unsignedp); /* We can get this structure field in the narrowest type it fits in. If FOR_TYPE is 0, do this only for a field that matches the narrower type exactly and is aligned for it The resulting extension to its nominal type (a fullword type) must fit the same conditions as for other extensions. */ if (type != 0 && INT_CST_LT_UNSIGNED (TYPE_SIZE (type), TYPE_SIZE (TREE_TYPE (op))) && (for_type || ! DECL_BIT_FIELD (TREE_OPERAND (op, 1))) && (! uns || final_prec <= innerprec || unsignedp)) { win = build3 (COMPONENT_REF, type, TREE_OPERAND (op, 0), TREE_OPERAND (op, 1), NULL_TREE); TREE_SIDE_EFFECTS (win) = TREE_SIDE_EFFECTS (op); TREE_THIS_VOLATILE (win) = TREE_THIS_VOLATILE (op); } } return win; } /* Return OP or a simpler expression for a narrower value which can be sign-extended or zero-extended to give back OP. Store in *UNSIGNEDP_PTR either 1 if the value should be zero-extended or 0 if the value should be sign-extended. */ tree get_narrower (tree op, int *unsignedp_ptr) { int uns = 0; int first = 1; tree win = op; while (TREE_CODE (op) == NOP_EXPR) { int bitschange = (TYPE_PRECISION (TREE_TYPE (op)) - TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (op, 0)))); /* Truncations are many-one so cannot be removed. */ if (bitschange < 0) break; /* See what's inside this conversion. If we decide to strip it, we will set WIN. */ if (bitschange > 0) { op = TREE_OPERAND (op, 0); /* An extension: the outermost one can be stripped, but remember whether it is zero or sign extension. */ if (first) uns = TYPE_UNSIGNED (TREE_TYPE (op)); /* Otherwise, if a sign extension has been stripped, only sign extensions can now be stripped; if a zero extension has been stripped, only zero-extensions. */ else if (uns != TYPE_UNSIGNED (TREE_TYPE (op))) break; first = 0; } else /* bitschange == 0 */ { /* A change in nominal type can always be stripped, but we must preserve the unsignedness. */ if (first) uns = TYPE_UNSIGNED (TREE_TYPE (op)); first = 0; op = TREE_OPERAND (op, 0); } win = op; } if (TREE_CODE (op) == COMPONENT_REF /* Since type_for_size always gives an integer type. */ && TREE_CODE (TREE_TYPE (op)) != REAL_TYPE /* Ensure field is laid out already. */ && DECL_SIZE (TREE_OPERAND (op, 1)) != 0 && host_integerp (DECL_SIZE (TREE_OPERAND (op, 1)), 1)) { unsigned HOST_WIDE_INT innerprec = tree_low_cst (DECL_SIZE (TREE_OPERAND (op, 1)), 1); int unsignedp = (DECL_UNSIGNED (TREE_OPERAND (op, 1)) || TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (op, 1)))); tree type = lang_hooks.types.type_for_size (innerprec, unsignedp); /* We can get this structure field in a narrower type that fits it, but the resulting extension to its nominal type (a fullword type) must satisfy the same conditions as for other extensions. Do this only for fields that are aligned (not bit-fields), because when bit-field insns will be used there is no advantage in doing this. */ if (innerprec < TYPE_PRECISION (TREE_TYPE (op)) && ! DECL_BIT_FIELD (TREE_OPERAND (op, 1)) && (first || uns == DECL_UNSIGNED (TREE_OPERAND (op, 1))) && type != 0) { if (first) uns = DECL_UNSIGNED (TREE_OPERAND (op, 1)); win = build3 (COMPONENT_REF, type, TREE_OPERAND (op, 0), TREE_OPERAND (op, 1), NULL_TREE); TREE_SIDE_EFFECTS (win) = TREE_SIDE_EFFECTS (op); TREE_THIS_VOLATILE (win) = TREE_THIS_VOLATILE (op); } } *unsignedp_ptr = uns; return win; } /* Nonzero if integer constant C has a value that is permissible for type TYPE (an INTEGER_TYPE). */ int int_fits_type_p (tree c, tree type) { tree type_low_bound = TYPE_MIN_VALUE (type); tree type_high_bound = TYPE_MAX_VALUE (type); int ok_for_low_bound, ok_for_high_bound; /* Perform some generic filtering first, which may allow making a decision even if the bounds are not constant. First, negative integers never fit in unsigned types, */ if ((TYPE_UNSIGNED (type) && tree_int_cst_sgn (c) < 0) /* Also, unsigned integers with top bit set never fit signed types. */ || (! TYPE_UNSIGNED (type) && TYPE_UNSIGNED (TREE_TYPE (c)) && tree_int_cst_msb (c))) return 0; /* If at least one bound of the type is a constant integer, we can check ourselves and maybe make a decision. If no such decision is possible, but this type is a subtype, try checking against that. Otherwise, use force_fit_type, which checks against the precision. Compute the status for each possibly constant bound, and return if we see one does not match. Use ok_for_xxx_bound for this purpose, assigning -1 for "unknown if constant fits", 0 for "constant known *not* to fit" and 1 for "constant known to fit". */ ok_for_low_bound = -1; ok_for_high_bound = -1; /* Check if C >= type_low_bound. */ if (type_low_bound && TREE_CODE (type_low_bound) == INTEGER_CST) { ok_for_low_bound = ! tree_int_cst_lt (c, type_low_bound); if (! ok_for_low_bound) return 0; } /* Check if c <= type_high_bound. */ if (type_high_bound && TREE_CODE (type_high_bound) == INTEGER_CST) { ok_for_high_bound = ! tree_int_cst_lt (type_high_bound, c); if (! ok_for_high_bound) return 0; } /* If the constant fits both bounds, the result is known. */ if (ok_for_low_bound == 1 && ok_for_high_bound == 1) return 1; /* If we haven't been able to decide at this point, there nothing more we can check ourselves here. Look at the base type if we have one. */ else if (TREE_CODE (type) == INTEGER_TYPE && TREE_TYPE (type) != 0) return int_fits_type_p (c, TREE_TYPE (type)); /* Or to force_fit_type, if nothing else. */ else { c = copy_node (c); TREE_TYPE (c) = type; return !force_fit_type (c, 0); } } /* Subprogram of following function. Called by walk_tree. Return *TP if it is an automatic variable or parameter of the function passed in as DATA. */ static tree find_var_from_fn (tree *tp, int *walk_subtrees, void *data) { tree fn = (tree) data; if (TYPE_P (*tp)) *walk_subtrees = 0; else if (DECL_P (*tp) && lang_hooks.tree_inlining.auto_var_in_fn_p (*tp, fn)) return *tp; return NULL_TREE; } /* Returns true if T is, contains, or refers to a type with variable size. If FN is nonzero, only return true if a modifier of the type or position of FN is a variable or parameter inside FN. This concept is more general than that of C99 'variably modified types': in C99, a struct type is never variably modified because a VLA may not appear as a structure member. However, in GNU C code like: struct S { int i[f()]; }; is valid, and other languages may define similar constructs. */ bool variably_modified_type_p (tree type, tree fn) { tree t; /* Test if T is either variable (if FN is zero) or an expression containing a variable in FN. */ #define RETURN_TRUE_IF_VAR(T) \ do { tree _t = (T); \ if (_t && _t != error_mark_node && TREE_CODE (_t) != INTEGER_CST \ && (!fn || walk_tree (&_t, find_var_from_fn, fn, NULL))) \ return true; } while (0) if (type == error_mark_node) return false; /* If TYPE itself has variable size, it is variably modified. We do not yet have a representation of the C99 '[*]' syntax. When a representation is chosen, this function should be modified to test for that case as well. */ RETURN_TRUE_IF_VAR (TYPE_SIZE (type)); RETURN_TRUE_IF_VAR (TYPE_SIZE_UNIT(type)); switch (TREE_CODE (type)) { case POINTER_TYPE: case REFERENCE_TYPE: case ARRAY_TYPE: case SET_TYPE: case VECTOR_TYPE: if (variably_modified_type_p (TREE_TYPE (type), fn)) return true; break; case FUNCTION_TYPE: case METHOD_TYPE: /* If TYPE is a function type, it is variably modified if any of the parameters or the return type are variably modified. */ if (variably_modified_type_p (TREE_TYPE (type), fn)) return true; for (t = TYPE_ARG_TYPES (type); t && t != void_list_node; t = TREE_CHAIN (t)) if (variably_modified_type_p (TREE_VALUE (t), fn)) return true; break; case INTEGER_TYPE: case REAL_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: case CHAR_TYPE: /* Scalar types are variably modified if their end points aren't constant. */ RETURN_TRUE_IF_VAR (TYPE_MIN_VALUE (type)); RETURN_TRUE_IF_VAR (TYPE_MAX_VALUE (type)); break; case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: /* We can't see if any of the field are variably-modified by the definition we normally use, since that would produce infinite recursion via pointers. */ /* This is variably modified if some field's type is. */ for (t = TYPE_FIELDS (type); t; t = TREE_CHAIN (t)) if (TREE_CODE (t) == FIELD_DECL) { RETURN_TRUE_IF_VAR (DECL_FIELD_OFFSET (t)); RETURN_TRUE_IF_VAR (DECL_SIZE (t)); RETURN_TRUE_IF_VAR (DECL_SIZE_UNIT (t)); if (TREE_CODE (type) == QUAL_UNION_TYPE) RETURN_TRUE_IF_VAR (DECL_QUALIFIER (t)); } break; default: break; } /* The current language may have other cases to check, but in general, all other types are not variably modified. */ return lang_hooks.tree_inlining.var_mod_type_p (type, fn); #undef RETURN_TRUE_IF_VAR } /* Given a DECL or TYPE, return the scope in which it was declared, or NULL_TREE if there is no containing scope. */ tree get_containing_scope (tree t) { return (TYPE_P (t) ? TYPE_CONTEXT (t) : DECL_CONTEXT (t)); } /* Return the innermost context enclosing DECL that is a FUNCTION_DECL, or zero if none. */ tree decl_function_context (tree decl) { tree context; if (TREE_CODE (decl) == ERROR_MARK) return 0; /* C++ virtual functions use DECL_CONTEXT for the class of the vtable where we look up the function at runtime. Such functions always take a first argument of type 'pointer to real context'. C++ should really be fixed to use DECL_CONTEXT for the real context, and use something else for the "virtual context". */ else if (TREE_CODE (decl) == FUNCTION_DECL && DECL_VINDEX (decl)) context = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (decl))))); else context = DECL_CONTEXT (decl); while (context && TREE_CODE (context) != FUNCTION_DECL) { if (TREE_CODE (context) == BLOCK) context = BLOCK_SUPERCONTEXT (context); else context = get_containing_scope (context); } return context; } /* Return the innermost context enclosing DECL that is a RECORD_TYPE, UNION_TYPE or QUAL_UNION_TYPE, or zero if none. TYPE_DECLs and FUNCTION_DECLs are transparent to this function. */ tree decl_type_context (tree decl) { tree context = DECL_CONTEXT (decl); while (context) switch (TREE_CODE (context)) { case NAMESPACE_DECL: case TRANSLATION_UNIT_DECL: return NULL_TREE; case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: return context; case TYPE_DECL: case FUNCTION_DECL: context = DECL_CONTEXT (context); break; case BLOCK: context = BLOCK_SUPERCONTEXT (context); break; default: abort (); } return NULL_TREE; } /* CALL is a CALL_EXPR. Return the declaration for the function called, or NULL_TREE if the called function cannot be determined. */ tree get_callee_fndecl (tree call) { tree addr; /* It's invalid to call this function with anything but a CALL_EXPR. */ if (TREE_CODE (call) != CALL_EXPR) abort (); /* The first operand to the CALL is the address of the function called. */ addr = TREE_OPERAND (call, 0); STRIP_NOPS (addr); /* If this is a readonly function pointer, extract its initial value. */ if (DECL_P (addr) && TREE_CODE (addr) != FUNCTION_DECL && TREE_READONLY (addr) && ! TREE_THIS_VOLATILE (addr) && DECL_INITIAL (addr)) addr = DECL_INITIAL (addr); /* If the address is just `&f' for some function `f', then we know that `f' is being called. */ if (TREE_CODE (addr) == ADDR_EXPR && TREE_CODE (TREE_OPERAND (addr, 0)) == FUNCTION_DECL) return TREE_OPERAND (addr, 0); /* We couldn't figure out what was being called. Maybe the front end has some idea. */ return lang_hooks.lang_get_callee_fndecl (call); } /* Print debugging information about tree nodes generated during the compile, and any language-specific information. */ void dump_tree_statistics (void) { #ifdef GATHER_STATISTICS int i; int total_nodes, total_bytes; #endif fprintf (stderr, "\n??? tree nodes created\n\n"); #ifdef GATHER_STATISTICS fprintf (stderr, "Kind Nodes Bytes\n"); fprintf (stderr, "---------------------------------------\n"); total_nodes = total_bytes = 0; for (i = 0; i < (int) all_kinds; i++) { fprintf (stderr, "%-20s %7d %10d\n", tree_node_kind_names[i], tree_node_counts[i], tree_node_sizes[i]); total_nodes += tree_node_counts[i]; total_bytes += tree_node_sizes[i]; } fprintf (stderr, "---------------------------------------\n"); fprintf (stderr, "%-20s %7d %10d\n", "Total", total_nodes, total_bytes); fprintf (stderr, "---------------------------------------\n"); ssanames_print_statistics (); phinodes_print_statistics (); #else fprintf (stderr, "(No per-node statistics)\n"); #endif print_type_hash_statistics (); lang_hooks.print_statistics (); } #define FILE_FUNCTION_FORMAT "_GLOBAL__%s_%s" /* Generate a crc32 of a string. */ unsigned crc32_string (unsigned chksum, const char *string) { do { unsigned value = *string << 24; unsigned ix; for (ix = 8; ix--; value <<= 1) { unsigned feedback; feedback = (value ^ chksum) & 0x80000000 ? 0x04c11db7 : 0; chksum <<= 1; chksum ^= feedback; } } while (*string++); return chksum; } /* P is a string that will be used in a symbol. Mask out any characters that are not valid in that context. */ void clean_symbol_name (char *p) { for (; *p; p++) if (! (ISALNUM (*p) #ifndef NO_DOLLAR_IN_LABEL /* this for `$'; unlikely, but... -- kr */ || *p == '$' #endif #ifndef NO_DOT_IN_LABEL /* this for `.'; unlikely, but... */ || *p == '.' #endif )) *p = '_'; } /* Generate a name for a function unique to this translation unit. TYPE is some string to identify the purpose of this function to the linker or collect2. */ tree get_file_function_name_long (const char *type) { char *buf; const char *p; char *q; if (first_global_object_name) p = first_global_object_name; else { /* We don't have anything that we know to be unique to this translation unit, so use what we do have and throw in some randomness. */ unsigned len; const char *name = weak_global_object_name; const char *file = main_input_filename; if (! name) name = ""; if (! file) file = input_filename; len = strlen (file); q = alloca (9 * 2 + len + 1); memcpy (q, file, len + 1); clean_symbol_name (q); sprintf (q + len, "_%08X_%08X", crc32_string (0, name), crc32_string (0, flag_random_seed)); p = q; } buf = alloca (sizeof (FILE_FUNCTION_FORMAT) + strlen (p) + strlen (type)); /* Set up the name of the file-level functions we may need. Use a global object (which is already required to be unique over the program) rather than the file name (which imposes extra constraints). */ sprintf (buf, FILE_FUNCTION_FORMAT, type, p); return get_identifier (buf); } /* If KIND=='I', return a suitable global initializer (constructor) name. If KIND=='D', return a suitable global clean-up (destructor) name. */ tree get_file_function_name (int kind) { char p[2]; p[0] = kind; p[1] = 0; return get_file_function_name_long (p); } /* Expand (the constant part of) a SET_TYPE CONSTRUCTOR node. The result is placed in BUFFER (which has length BIT_SIZE), with one bit in each char ('\000' or '\001'). If the constructor is constant, NULL_TREE is returned. Otherwise, a TREE_LIST of the non-constant elements is emitted. */ tree get_set_constructor_bits (tree init, char *buffer, int bit_size) { int i; tree vals; HOST_WIDE_INT domain_min = tree_low_cst (TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (init))), 0); tree non_const_bits = NULL_TREE; for (i = 0; i < bit_size; i++) buffer[i] = 0; for (vals = TREE_OPERAND (init, 1); vals != NULL_TREE; vals = TREE_CHAIN (vals)) { if (!host_integerp (TREE_VALUE (vals), 0) || (TREE_PURPOSE (vals) != NULL_TREE && !host_integerp (TREE_PURPOSE (vals), 0))) non_const_bits = tree_cons (TREE_PURPOSE (vals), TREE_VALUE (vals), non_const_bits); else if (TREE_PURPOSE (vals) != NULL_TREE) { /* Set a range of bits to ones. */ HOST_WIDE_INT lo_index = tree_low_cst (TREE_PURPOSE (vals), 0) - domain_min; HOST_WIDE_INT hi_index = tree_low_cst (TREE_VALUE (vals), 0) - domain_min; if (lo_index < 0 || lo_index >= bit_size || hi_index < 0 || hi_index >= bit_size) abort (); for (; lo_index <= hi_index; lo_index++) buffer[lo_index] = 1; } else { /* Set a single bit to one. */ HOST_WIDE_INT index = tree_low_cst (TREE_VALUE (vals), 0) - domain_min; if (index < 0 || index >= bit_size) { error ("invalid initializer for bit string"); return NULL_TREE; } buffer[index] = 1; } } return non_const_bits; } /* Expand (the constant part of) a SET_TYPE CONSTRUCTOR node. The result is placed in BUFFER (which is an array of bytes). If the constructor is constant, NULL_TREE is returned. Otherwise, a TREE_LIST of the non-constant elements is emitted. */ tree get_set_constructor_bytes (tree init, unsigned char *buffer, int wd_size) { int i; int set_word_size = BITS_PER_UNIT; int bit_size = wd_size * set_word_size; int bit_pos = 0; unsigned char *bytep = buffer; char *bit_buffer = alloca (bit_size); tree non_const_bits = get_set_constructor_bits (init, bit_buffer, bit_size); for (i = 0; i < wd_size; i++) buffer[i] = 0; for (i = 0; i < bit_size; i++) { if (bit_buffer[i]) { if (BYTES_BIG_ENDIAN) *bytep |= (1 << (set_word_size - 1 - bit_pos)); else *bytep |= 1 << bit_pos; } bit_pos++; if (bit_pos >= set_word_size) bit_pos = 0, bytep++; } return non_const_bits; } #if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007) /* Complain that the tree code of NODE does not match the expected 0 terminated list of trailing codes. FILE, LINE, and FUNCTION are of the caller. */ void tree_check_failed (const tree node, const char *file, int line, const char *function, ...) { va_list args; char *buffer; unsigned length = 0; int code; va_start (args, function); while ((code = va_arg (args, int))) length += 4 + strlen (tree_code_name[code]); va_end (args); va_start (args, function); buffer = alloca (length); length = 0; while ((code = va_arg (args, int))) { if (length) { strcpy (buffer + length, " or "); length += 4; } strcpy (buffer + length, tree_code_name[code]); length += strlen (tree_code_name[code]); } va_end (args); internal_error ("tree check: expected %s, have %s in %s, at %s:%d", buffer, tree_code_name[TREE_CODE (node)], function, trim_filename (file), line); } /* Complain that the tree code of NODE does match the expected 0 terminated list of trailing codes. FILE, LINE, and FUNCTION are of the caller. */ void tree_not_check_failed (const tree node, const char *file, int line, const char *function, ...) { va_list args; char *buffer; unsigned length = 0; int code; va_start (args, function); while ((code = va_arg (args, int))) length += 4 + strlen (tree_code_name[code]); va_end (args); va_start (args, function); buffer = alloca (length); length = 0; while ((code = va_arg (args, int))) { if (length) { strcpy (buffer + length, " or "); length += 4; } strcpy (buffer + length, tree_code_name[code]); length += strlen (tree_code_name[code]); } va_end (args); internal_error ("tree check: expected none of %s, have %s in %s, at %s:%d", buffer, tree_code_name[TREE_CODE (node)], function, trim_filename (file), line); } /* Similar to tree_check_failed, except that we check for a class of tree code, given in CL. */ void tree_class_check_failed (const tree node, int cl, const char *file, int line, const char *function) { internal_error ("tree check: expected class '%c', have '%c' (%s) in %s, at %s:%d", cl, TREE_CODE_CLASS (TREE_CODE (node)), tree_code_name[TREE_CODE (node)], function, trim_filename (file), line); } /* Similar to above, except that the check is for the bounds of a TREE_VEC's (dynamically sized) vector. */ void tree_vec_elt_check_failed (int idx, int len, const char *file, int line, const char *function) { internal_error ("tree check: accessed elt %d of tree_vec with %d elts in %s, at %s:%d", idx + 1, len, function, trim_filename (file), line); } /* Similar to above, except that the check is for the bounds of a PHI_NODE's (dynamically sized) vector. */ void phi_node_elt_check_failed (int idx, int len, const char *file, int line, const char *function) { internal_error ("tree check: accessed elt %d of phi_node with %d elts in %s, at %s:%d", idx + 1, len, function, trim_filename (file), line); } /* Similar to above, except that the check is for the bounds of the operand vector of an expression node. */ void tree_operand_check_failed (int idx, enum tree_code code, const char *file, int line, const char *function) { internal_error ("tree check: accessed operand %d of %s with %d operands in %s, at %s:%d", idx + 1, tree_code_name[code], TREE_CODE_LENGTH (code), function, trim_filename (file), line); } #endif /* ENABLE_TREE_CHECKING */ /* For a new vector type node T, build the information necessary for debugging output. */ static void finish_vector_type (tree t) { layout_type (t); { tree index = build_int_2 (TYPE_VECTOR_SUBPARTS (t) - 1, 0); tree array = build_array_type (TREE_TYPE (t), build_index_type (index)); tree rt = make_node (RECORD_TYPE); TYPE_FIELDS (rt) = build_decl (FIELD_DECL, get_identifier ("f"), array); DECL_CONTEXT (TYPE_FIELDS (rt)) = rt; layout_type (rt); TYPE_DEBUG_REPRESENTATION_TYPE (t) = rt; /* In dwarfout.c, type lookup uses TYPE_UID numbers. We want to output the representation type, and we want to find that die when looking up the vector type. This is most easily achieved by making the TYPE_UID numbers equal. */ TYPE_UID (rt) = TYPE_UID (t); } } static tree make_or_reuse_type (unsigned size, int unsignedp) { if (size == INT_TYPE_SIZE) return unsignedp ? unsigned_type_node : integer_type_node; if (size == CHAR_TYPE_SIZE) return unsignedp ? unsigned_char_type_node : signed_char_type_node; if (size == SHORT_TYPE_SIZE) return unsignedp ? short_unsigned_type_node : short_integer_type_node; if (size == LONG_TYPE_SIZE) return unsignedp ? long_unsigned_type_node : long_integer_type_node; if (size == LONG_LONG_TYPE_SIZE) return (unsignedp ? long_long_unsigned_type_node : long_long_integer_type_node); if (unsignedp) return make_unsigned_type (size); else return make_signed_type (size); } /* Create nodes for all integer types (and error_mark_node) using the sizes of C datatypes. The caller should call set_sizetype soon after calling this function to select one of the types as sizetype. */ void build_common_tree_nodes (int signed_char) { /* This function is called after command line parsing is complete, but before any DECL nodes should have been created. Therefore, now is the appropriate time to adjust next_decl_uid so that the range 0 .. num_in_fnames-1 is reserved for TRANSLATION_UNIT_DECLs. */ if (next_decl_uid) abort (); next_decl_uid = num_in_fnames; error_mark_node = make_node (ERROR_MARK); TREE_TYPE (error_mark_node) = error_mark_node; initialize_sizetypes (); /* Define both `signed char' and `unsigned char'. */ signed_char_type_node = make_signed_type (CHAR_TYPE_SIZE); unsigned_char_type_node = make_unsigned_type (CHAR_TYPE_SIZE); /* Define `char', which is like either `signed char' or `unsigned char' but not the same as either. */ char_type_node = (signed_char ? make_signed_type (CHAR_TYPE_SIZE) : make_unsigned_type (CHAR_TYPE_SIZE)); short_integer_type_node = make_signed_type (SHORT_TYPE_SIZE); short_unsigned_type_node = make_unsigned_type (SHORT_TYPE_SIZE); integer_type_node = make_signed_type (INT_TYPE_SIZE); unsigned_type_node = make_unsigned_type (INT_TYPE_SIZE); long_integer_type_node = make_signed_type (LONG_TYPE_SIZE); long_unsigned_type_node = make_unsigned_type (LONG_TYPE_SIZE); long_long_integer_type_node = make_signed_type (LONG_LONG_TYPE_SIZE); long_long_unsigned_type_node = make_unsigned_type (LONG_LONG_TYPE_SIZE); /* Define a boolean type. This type only represents boolean values but may be larger than char depending on the value of BOOL_TYPE_SIZE. Front ends which want to override this size (i.e. Java) can redefine boolean_type_node before calling build_common_tree_nodes_2. */ boolean_type_node = make_unsigned_type (BOOL_TYPE_SIZE); TREE_SET_CODE (boolean_type_node, BOOLEAN_TYPE); TYPE_MAX_VALUE (boolean_type_node) = build_int_2 (1, 0); TREE_TYPE (TYPE_MAX_VALUE (boolean_type_node)) = boolean_type_node; TYPE_PRECISION (boolean_type_node) = 1; /* Fill in the rest of the sized types. Reuse existing type nodes when possible. */ intQI_type_node = make_or_reuse_type (GET_MODE_BITSIZE (QImode), 0); intHI_type_node = make_or_reuse_type (GET_MODE_BITSIZE (HImode), 0); intSI_type_node = make_or_reuse_type (GET_MODE_BITSIZE (SImode), 0); intDI_type_node = make_or_reuse_type (GET_MODE_BITSIZE (DImode), 0); intTI_type_node = make_or_reuse_type (GET_MODE_BITSIZE (TImode), 0); unsigned_intQI_type_node = make_or_reuse_type (GET_MODE_BITSIZE (QImode), 1); unsigned_intHI_type_node = make_or_reuse_type (GET_MODE_BITSIZE (HImode), 1); unsigned_intSI_type_node = make_or_reuse_type (GET_MODE_BITSIZE (SImode), 1); unsigned_intDI_type_node = make_or_reuse_type (GET_MODE_BITSIZE (DImode), 1); unsigned_intTI_type_node = make_or_reuse_type (GET_MODE_BITSIZE (TImode), 1); access_public_node = get_identifier ("public"); access_protected_node = get_identifier ("protected"); access_private_node = get_identifier ("private"); } /* Call this function after calling build_common_tree_nodes and set_sizetype. It will create several other common tree nodes. */ void build_common_tree_nodes_2 (int short_double) { /* Define these next since types below may used them. */ integer_zero_node = build_int_2 (0, 0); integer_one_node = build_int_2 (1, 0); integer_minus_one_node = build_int_2 (-1, -1); size_zero_node = size_int (0); size_one_node = size_int (1); bitsize_zero_node = bitsize_int (0); bitsize_one_node = bitsize_int (1); bitsize_unit_node = bitsize_int (BITS_PER_UNIT); boolean_false_node = TYPE_MIN_VALUE (boolean_type_node); boolean_true_node = TYPE_MAX_VALUE (boolean_type_node); void_type_node = make_node (VOID_TYPE); layout_type (void_type_node); /* We are not going to have real types in C with less than byte alignment, so we might as well not have any types that claim to have it. */ TYPE_ALIGN (void_type_node) = BITS_PER_UNIT; TYPE_USER_ALIGN (void_type_node) = 0; null_pointer_node = build_int_2 (0, 0); TREE_TYPE (null_pointer_node) = build_pointer_type (void_type_node); layout_type (TREE_TYPE (null_pointer_node)); ptr_type_node = build_pointer_type (void_type_node); const_ptr_type_node = build_pointer_type (build_type_variant (void_type_node, 1, 0)); fileptr_type_node = ptr_type_node; float_type_node = make_node (REAL_TYPE); TYPE_PRECISION (float_type_node) = FLOAT_TYPE_SIZE; layout_type (float_type_node); double_type_node = make_node (REAL_TYPE); if (short_double) TYPE_PRECISION (double_type_node) = FLOAT_TYPE_SIZE; else TYPE_PRECISION (double_type_node) = DOUBLE_TYPE_SIZE; layout_type (double_type_node); long_double_type_node = make_node (REAL_TYPE); TYPE_PRECISION (long_double_type_node) = LONG_DOUBLE_TYPE_SIZE; layout_type (long_double_type_node); float_ptr_type_node = build_pointer_type (float_type_node); double_ptr_type_node = build_pointer_type (double_type_node); long_double_ptr_type_node = build_pointer_type (long_double_type_node); integer_ptr_type_node = build_pointer_type (integer_type_node); complex_integer_type_node = make_node (COMPLEX_TYPE); TREE_TYPE (complex_integer_type_node) = integer_type_node; layout_type (complex_integer_type_node); complex_float_type_node = make_node (COMPLEX_TYPE); TREE_TYPE (complex_float_type_node) = float_type_node; layout_type (complex_float_type_node); complex_double_type_node = make_node (COMPLEX_TYPE); TREE_TYPE (complex_double_type_node) = double_type_node; layout_type (complex_double_type_node); complex_long_double_type_node = make_node (COMPLEX_TYPE); TREE_TYPE (complex_long_double_type_node) = long_double_type_node; layout_type (complex_long_double_type_node); { tree t = targetm.build_builtin_va_list (); /* Many back-ends define record types without setting TYPE_NAME. If we copied the record type here, we'd keep the original record type without a name. This breaks name mangling. So, don't copy record types and let c_common_nodes_and_builtins() declare the type to be __builtin_va_list. */ if (TREE_CODE (t) != RECORD_TYPE) t = build_type_copy (t); va_list_type_node = t; } } /* HACK. GROSS. This is absolutely disgusting. I wish there was a better way. If we requested a pointer to a vector, build up the pointers that we stripped off while looking for the inner type. Similarly for return values from functions. The argument TYPE is the top of the chain, and BOTTOM is the new type which we will point to. */ tree reconstruct_complex_type (tree type, tree bottom) { tree inner, outer; if (POINTER_TYPE_P (type)) { inner = reconstruct_complex_type (TREE_TYPE (type), bottom); outer = build_pointer_type (inner); } else if (TREE_CODE (type) == ARRAY_TYPE) { inner = reconstruct_complex_type (TREE_TYPE (type), bottom); outer = build_array_type (inner, TYPE_DOMAIN (type)); } else if (TREE_CODE (type) == FUNCTION_TYPE) { inner = reconstruct_complex_type (TREE_TYPE (type), bottom); outer = build_function_type (inner, TYPE_ARG_TYPES (type)); } else if (TREE_CODE (type) == METHOD_TYPE) { inner = reconstruct_complex_type (TREE_TYPE (type), bottom); outer = build_method_type_directly (TYPE_METHOD_BASETYPE (type), inner, TYPE_ARG_TYPES (type)); } else return bottom; TYPE_READONLY (outer) = TYPE_READONLY (type); TYPE_VOLATILE (outer) = TYPE_VOLATILE (type); return outer; } /* Returns a vector tree node given a vector mode and inner type. */ tree build_vector_type_for_mode (tree innertype, enum machine_mode mode) { tree t; t = make_node (VECTOR_TYPE); TREE_TYPE (t) = innertype; TYPE_MODE (t) = mode; finish_vector_type (t); return t; } /* Similarly, but takes inner type and units. */ tree build_vector_type (tree innertype, int nunits) { enum machine_mode innermode = TYPE_MODE (innertype); enum machine_mode mode; if (GET_MODE_CLASS (innermode) == MODE_FLOAT) mode = MIN_MODE_VECTOR_FLOAT; else mode = MIN_MODE_VECTOR_INT; for (; mode != VOIDmode ; mode = GET_MODE_WIDER_MODE (mode)) if (GET_MODE_NUNITS (mode) == nunits && GET_MODE_INNER (mode) == innermode) return build_vector_type_for_mode (innertype, mode); return NULL_TREE; } /* Given an initializer INIT, return TRUE if INIT is zero or some aggregate of zeros. Otherwise return FALSE. */ bool initializer_zerop (tree init) { tree elt; STRIP_NOPS (init); switch (TREE_CODE (init)) { case INTEGER_CST: return integer_zerop (init); case REAL_CST: /* ??? Note that this is not correct for C4X float formats. There, a bit pattern of all zeros is 1.0; 0.0 is encoded with the most negative exponent. */ return real_zerop (init) && ! REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (init)); case COMPLEX_CST: return integer_zerop (init) || (real_zerop (init) && ! REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (TREE_REALPART (init))) && ! REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (TREE_IMAGPART (init)))); case VECTOR_CST: for (elt = TREE_VECTOR_CST_ELTS (init); elt; elt = TREE_CHAIN (elt)) if (!initializer_zerop (TREE_VALUE (elt))) return false; return true; case CONSTRUCTOR: elt = CONSTRUCTOR_ELTS (init); if (elt == NULL_TREE) return true; /* A set is empty only if it has no elements. */ if (TREE_CODE (TREE_TYPE (init)) == SET_TYPE) return false; for (; elt ; elt = TREE_CHAIN (elt)) if (! initializer_zerop (TREE_VALUE (elt))) return false; return true; default: return false; } } void add_var_to_bind_expr (tree bind_expr, tree var) { BIND_EXPR_VARS (bind_expr) = chainon (BIND_EXPR_VARS (bind_expr), var); if (BIND_EXPR_BLOCK (bind_expr)) BLOCK_VARS (BIND_EXPR_BLOCK (bind_expr)) = BIND_EXPR_VARS (bind_expr); } /* Build an empty statement. */ tree build_empty_stmt (void) { return build1 (NOP_EXPR, void_type_node, size_zero_node); } /* Return true if T (assumed to be a DECL) must be assigned a memory location. */ bool needs_to_live_in_memory (tree t) { return (DECL_NEEDS_TO_LIVE_IN_MEMORY_INTERNAL (t) || TREE_STATIC (t) || DECL_EXTERNAL (t) || DECL_NONLOCAL (t) || (TREE_CODE (t) == RESULT_DECL && aggregate_value_p (t, current_function_decl)) || decl_function_context (t) != current_function_decl); } /* Type information for tree.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ void gt_ggc_mx_type_hash (void *x_p) { struct type_hash * const x = (struct type_hash *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_9tree_node ((*x).type); } } void gt_ggc_m_P9type_hash4htab (void *x_p) { struct htab * const x = (struct htab *)x_p; if (ggc_test_and_set_mark (x)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { gt_ggc_m_9type_hash ((*x).entries[i0]); } ggc_mark ((*x).entries); } } } void gt_pch_nx_type_hash (void *x_p) { struct type_hash * const x = (struct type_hash *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_9type_hash)) { gt_pch_n_9tree_node ((*x).type); } } void gt_pch_n_P9type_hash4htab (void *x_p) { struct htab * const x = (struct htab *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_P9type_hash4htab)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { gt_pch_n_9type_hash ((*x).entries[i0]); } gt_pch_note_object ((*x).entries, x, gt_pch_p_P9type_hash4htab); } } } void gt_pch_p_9type_hash (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct type_hash * const x ATTRIBUTE_UNUSED = (struct type_hash *)x_p; if ((void *)(x) == this_obj) op (&((*x).type), cookie); } void gt_pch_p_P9type_hash4htab (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct htab * const x ATTRIBUTE_UNUSED = (struct htab *)x_p; if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { if ((void *)((*x).entries) == this_obj) op (&((*x).entries[i0]), cookie); } if ((void *)(x) == this_obj) op (&((*x).entries), cookie); } } /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_tree_h[] = { { &last_annotated_node, 1, sizeof (last_annotated_node), >_ggc_mx_tree_node, >_pch_nx_tree_node }, LAST_GGC_ROOT_TAB }; const struct ggc_cache_tab gt_ggc_rc_gt_tree_h[] = { { &type_hash_table, 1, sizeof (type_hash_table), >_ggc_mx_type_hash, >_pch_nx_type_hash, &type_hash_marked_p }, LAST_GGC_CACHE_TAB }; const struct ggc_root_tab gt_pch_rc_gt_tree_h[] = { { &type_hash_table, 1, sizeof (type_hash_table), >_ggc_m_P9type_hash4htab, >_pch_n_P9type_hash4htab }, LAST_GGC_ROOT_TAB }; const struct ggc_root_tab gt_pch_rs_gt_tree_h[] = { { &next_type_uid, 1, sizeof (next_type_uid), NULL, NULL }, { &next_decl_uid, 1, sizeof (next_decl_uid), NULL, NULL }, LAST_GGC_ROOT_TAB }; /* Tree-dumping functionality for intermediate representation. Copyright (C) 1999, 2000, 2002, 2003, 2004 Free Software Foundation, Inc. Written by Mark Mitchell This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ static unsigned int queue (dump_info_p, tree, int); static void dump_index (dump_info_p, unsigned int); static void dequeue_and_dump (dump_info_p); static void dump_new_line (dump_info_p); static void dump_maybe_newline (dump_info_p); static void dump_string_field (dump_info_p, const char *, const char *); static void dump_enable_all (int); /* Add T to the end of the queue of nodes to dump. Returns the index assigned to T. */ static unsigned int queue (dump_info_p di, tree t, int flags) { dump_queue_p dq; dump_node_info_p dni; unsigned int index; /* Assign the next available index to T. */ index = ++di->index; /* Obtain a new queue node. */ if (di->free_list) { dq = di->free_list; di->free_list = dq->next; } else dq = xmalloc (sizeof (struct dump_queue)); /* Create a new entry in the splay-tree. */ dni = xmalloc (sizeof (struct dump_node_info)); dni->index = index; dni->binfo_p = ((flags & DUMP_BINFO) != 0); dq->node = splay_tree_insert (di->nodes, (splay_tree_key) t, (splay_tree_value) dni); /* Add it to the end of the queue. */ dq->next = 0; if (!di->queue_end) di->queue = dq; else di->queue_end->next = dq; di->queue_end = dq; /* Return the index. */ return index; } static void dump_index (dump_info_p di, unsigned int index) { fprintf (di->stream, "@%-6u ", index); di->column += 8; } /* If T has not already been output, queue it for subsequent output. FIELD is a string to print before printing the index. Then, the index of T is printed. */ void queue_and_dump_index (dump_info_p di, const char *field, tree t, int flags) { unsigned int index; splay_tree_node n; /* If there's no node, just return. This makes for fewer checks in our callers. */ if (!t) return; /* See if we've already queued or dumped this node. */ n = splay_tree_lookup (di->nodes, (splay_tree_key) t); if (n) index = ((dump_node_info_p) n->value)->index; else /* If we haven't, add it to the queue. */ index = queue (di, t, flags); /* Print the index of the node. */ dump_maybe_newline (di); fprintf (di->stream, "%-4s: ", field); di->column += 6; dump_index (di, index); } /* Dump the type of T. */ void queue_and_dump_type (dump_info_p di, tree t) { queue_and_dump_index (di, "type", TREE_TYPE (t), DUMP_NONE); } /* Dump column control */ #define SOL_COLUMN 25 /* Start of line column. */ #define EOL_COLUMN 55 /* End of line column. */ #define COLUMN_ALIGNMENT 15 /* Alignment. */ /* Insert a new line in the dump output, and indent to an appropriate place to start printing more fields. */ static void dump_new_line (dump_info_p di) { fprintf (di->stream, "\n%*s", SOL_COLUMN, ""); di->column = SOL_COLUMN; } /* If necessary, insert a new line. */ static void dump_maybe_newline (dump_info_p di) { int extra; /* See if we need a new line. */ if (di->column > EOL_COLUMN) dump_new_line (di); /* See if we need any padding. */ else if ((extra = (di->column - SOL_COLUMN) % COLUMN_ALIGNMENT) != 0) { fprintf (di->stream, "%*s", COLUMN_ALIGNMENT - extra, ""); di->column += COLUMN_ALIGNMENT - extra; } } /* Dump pointer PTR using FIELD to identify it. */ void dump_pointer (dump_info_p di, const char *field, void *ptr) { dump_maybe_newline (di); fprintf (di->stream, "%-4s: %-8lx ", field, (long) ptr); di->column += 15; } /* Dump integer I using FIELD to identify it. */ void dump_int (dump_info_p di, const char *field, int i) { dump_maybe_newline (di); fprintf (di->stream, "%-4s: %-7d ", field, i); di->column += 14; } /* Dump the string S. */ void dump_string (dump_info_p di, const char *string) { dump_maybe_newline (di); fprintf (di->stream, "%-13s ", string); if (strlen (string) > 13) di->column += strlen (string) + 1; else di->column += 14; } /* Dump the string field S. */ static void dump_string_field (dump_info_p di, const char *field, const char *string) { dump_maybe_newline (di); fprintf (di->stream, "%-4s: %-7s ", field, string); if (strlen (string) > 7) di->column += 6 + strlen (string) + 1; else di->column += 14; } /* Dump the next node in the queue. */ static void dequeue_and_dump (dump_info_p di) { dump_queue_p dq; splay_tree_node stn; dump_node_info_p dni; tree t; unsigned int index; enum tree_code code; char code_class; const char* code_name; /* Get the next node from the queue. */ dq = di->queue; stn = dq->node; t = (tree) stn->key; dni = (dump_node_info_p) stn->value; index = dni->index; /* Remove the node from the queue, and put it on the free list. */ di->queue = dq->next; if (!di->queue) di->queue_end = 0; dq->next = di->free_list; di->free_list = dq; /* Print the node index. */ dump_index (di, index); /* And the type of node this is. */ if (dni->binfo_p) code_name = "binfo"; else code_name = tree_code_name[(int) TREE_CODE (t)]; fprintf (di->stream, "%-16s ", code_name); di->column = 25; /* Figure out what kind of node this is. */ code = TREE_CODE (t); code_class = TREE_CODE_CLASS (code); /* Although BINFOs are TREE_VECs, we dump them specially so as to be more informative. */ if (dni->binfo_p) { unsigned ix; tree bases = BINFO_BASETYPES (t); unsigned n_bases = bases ? TREE_VEC_LENGTH (bases): 0; tree accesses = BINFO_BASEACCESSES (t); dump_child ("type", BINFO_TYPE (t)); if (BINFO_VIRTUAL_P (t)) dump_string (di, "virt"); dump_int (di, "bases", n_bases); for (ix = 0; ix != n_bases; ix++) { tree base = TREE_VEC_ELT (bases, ix); tree access = (accesses ? TREE_VEC_ELT (accesses, ix) : access_public_node); const char *string = NULL; if (access == access_public_node) string = "pub"; else if (access == access_protected_node) string = "prot"; else if (access == access_private_node) string = "priv"; else abort (); dump_string (di, string); queue_and_dump_index (di, "binf", base, DUMP_BINFO); } goto done; } /* We can knock off a bunch of expression nodes in exactly the same way. */ if (IS_EXPR_CODE_CLASS (code_class)) { /* If we're dumping children, dump them now. */ queue_and_dump_type (di, t); switch (code_class) { case '1': dump_child ("op 0", TREE_OPERAND (t, 0)); break; case '2': case '<': dump_child ("op 0", TREE_OPERAND (t, 0)); dump_child ("op 1", TREE_OPERAND (t, 1)); break; case 'e': case 'r': case 's': /* These nodes are handled explicitly below. */ break; default: abort (); } } else if (DECL_P (t)) { expanded_location xloc; /* All declarations have names. */ if (DECL_NAME (t)) dump_child ("name", DECL_NAME (t)); if (DECL_ASSEMBLER_NAME_SET_P (t) && DECL_ASSEMBLER_NAME (t) != DECL_NAME (t)) dump_child ("mngl", DECL_ASSEMBLER_NAME (t)); /* And types. */ queue_and_dump_type (di, t); dump_child ("scpe", DECL_CONTEXT (t)); /* And a source position. */ xloc = expand_location (DECL_SOURCE_LOCATION (t)); if (xloc.file) { const char *filename = strrchr (xloc.file, '/'); if (!filename) filename = xloc.file; else /* Skip the slash. */ ++filename; dump_maybe_newline (di); fprintf (di->stream, "srcp: %s:%-6d ", filename, xloc.line); di->column += 6 + strlen (filename) + 8; } /* And any declaration can be compiler-generated. */ if (DECL_ARTIFICIAL (t)) dump_string (di, "artificial"); if (TREE_CHAIN (t) && !dump_flag (di, TDF_SLIM, NULL)) dump_child ("chan", TREE_CHAIN (t)); } else if (code_class == 't') { /* All types have qualifiers. */ int quals = lang_hooks.tree_dump.type_quals (t); if (quals != TYPE_UNQUALIFIED) { fprintf (di->stream, "qual: %c%c%c ", (quals & TYPE_QUAL_CONST) ? 'c' : ' ', (quals & TYPE_QUAL_VOLATILE) ? 'v' : ' ', (quals & TYPE_QUAL_RESTRICT) ? 'r' : ' '); di->column += 14; } /* All types have associated declarations. */ dump_child ("name", TYPE_NAME (t)); /* All types have a main variant. */ if (TYPE_MAIN_VARIANT (t) != t) dump_child ("unql", TYPE_MAIN_VARIANT (t)); /* And sizes. */ dump_child ("size", TYPE_SIZE (t)); /* All types have alignments. */ dump_int (di, "algn", TYPE_ALIGN (t)); } else if (code_class == 'c') /* All constants can have types. */ queue_and_dump_type (di, t); /* Give the language-specific code a chance to print something. If it's completely taken care of things, don't bother printing anything more ourselves. */ if (lang_hooks.tree_dump.dump_tree (di, t)) goto done; /* Now handle the various kinds of nodes. */ switch (code) { int i; case IDENTIFIER_NODE: dump_string_field (di, "strg", IDENTIFIER_POINTER (t)); dump_int (di, "lngt", IDENTIFIER_LENGTH (t)); break; case TREE_LIST: dump_child ("purp", TREE_PURPOSE (t)); dump_child ("valu", TREE_VALUE (t)); dump_child ("chan", TREE_CHAIN (t)); break; case STATEMENT_LIST: { tree_stmt_iterator it; for (i = 0, it = tsi_start (t); !tsi_end_p (it); tsi_next (&it), i++) { char buffer[32]; sprintf (buffer, "%u", i); dump_child (buffer, tsi_stmt (it)); } } break; case TREE_VEC: dump_int (di, "lngt", TREE_VEC_LENGTH (t)); for (i = 0; i < TREE_VEC_LENGTH (t); ++i) { char buffer[32]; sprintf (buffer, "%u", i); dump_child (buffer, TREE_VEC_ELT (t, i)); } break; case INTEGER_TYPE: case ENUMERAL_TYPE: dump_int (di, "prec", TYPE_PRECISION (t)); if (TYPE_UNSIGNED (t)) dump_string (di, "unsigned"); dump_child ("min", TYPE_MIN_VALUE (t)); dump_child ("max", TYPE_MAX_VALUE (t)); if (code == ENUMERAL_TYPE) dump_child ("csts", TYPE_VALUES (t)); break; case REAL_TYPE: dump_int (di, "prec", TYPE_PRECISION (t)); break; case POINTER_TYPE: dump_child ("ptd", TREE_TYPE (t)); break; case REFERENCE_TYPE: dump_child ("refd", TREE_TYPE (t)); break; case METHOD_TYPE: dump_child ("clas", TYPE_METHOD_BASETYPE (t)); /* Fall through. */ case FUNCTION_TYPE: dump_child ("retn", TREE_TYPE (t)); dump_child ("prms", TYPE_ARG_TYPES (t)); break; case ARRAY_TYPE: dump_child ("elts", TREE_TYPE (t)); dump_child ("domn", TYPE_DOMAIN (t)); break; case RECORD_TYPE: case UNION_TYPE: if (TREE_CODE (t) == RECORD_TYPE) dump_string (di, "struct"); else dump_string (di, "union"); dump_child ("flds", TYPE_FIELDS (t)); dump_child ("fncs", TYPE_METHODS (t)); queue_and_dump_index (di, "binf", TYPE_BINFO (t), DUMP_BINFO); break; case CONST_DECL: dump_child ("cnst", DECL_INITIAL (t)); break; case VAR_DECL: case PARM_DECL: case FIELD_DECL: case RESULT_DECL: if (TREE_CODE (t) == PARM_DECL) dump_child ("argt", DECL_ARG_TYPE (t)); else dump_child ("init", DECL_INITIAL (t)); dump_child ("size", DECL_SIZE (t)); dump_int (di, "algn", DECL_ALIGN (t)); if (TREE_CODE (t) == FIELD_DECL) { if (DECL_FIELD_OFFSET (t)) dump_child ("bpos", bit_position (t)); } else if (TREE_CODE (t) == VAR_DECL || TREE_CODE (t) == PARM_DECL) { dump_int (di, "used", TREE_USED (t)); if (DECL_REGISTER (t)) dump_string (di, "register"); } break; case FUNCTION_DECL: dump_child ("args", DECL_ARGUMENTS (t)); if (DECL_EXTERNAL (t)) dump_string (di, "undefined"); if (TREE_PUBLIC (t)) dump_string (di, "extern"); else dump_string (di, "static"); if (DECL_LANG_SPECIFIC (t) && !dump_flag (di, TDF_SLIM, t)) dump_child ("body", DECL_SAVED_TREE (t)); break; case INTEGER_CST: if (TREE_INT_CST_HIGH (t)) dump_int (di, "high", TREE_INT_CST_HIGH (t)); dump_int (di, "low", TREE_INT_CST_LOW (t)); break; case STRING_CST: fprintf (di->stream, "strg: %-7s ", TREE_STRING_POINTER (t)); dump_int (di, "lngt", TREE_STRING_LENGTH (t)); break; case TRUTH_NOT_EXPR: case ADDR_EXPR: case INDIRECT_REF: case CLEANUP_POINT_EXPR: case SAVE_EXPR: case REALPART_EXPR: case IMAGPART_EXPR: /* These nodes are unary, but do not have code class `1'. */ dump_child ("op 0", TREE_OPERAND (t, 0)); break; case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: case INIT_EXPR: case MODIFY_EXPR: case COMPOUND_EXPR: case PREDECREMENT_EXPR: case PREINCREMENT_EXPR: case POSTDECREMENT_EXPR: case POSTINCREMENT_EXPR: /* These nodes are binary, but do not have code class `2'. */ dump_child ("op 0", TREE_OPERAND (t, 0)); dump_child ("op 1", TREE_OPERAND (t, 1)); break; case COMPONENT_REF: dump_child ("op 0", TREE_OPERAND (t, 0)); dump_child ("op 1", TREE_OPERAND (t, 1)); dump_child ("op 2", TREE_OPERAND (t, 2)); break; case ARRAY_REF: case ARRAY_RANGE_REF: dump_child ("op 0", TREE_OPERAND (t, 0)); dump_child ("op 1", TREE_OPERAND (t, 1)); dump_child ("op 2", TREE_OPERAND (t, 2)); dump_child ("op 3", TREE_OPERAND (t, 3)); break; case COND_EXPR: dump_child ("op 0", TREE_OPERAND (t, 0)); dump_child ("op 1", TREE_OPERAND (t, 1)); dump_child ("op 2", TREE_OPERAND (t, 2)); break; case CALL_EXPR: dump_child ("fn", TREE_OPERAND (t, 0)); dump_child ("args", TREE_OPERAND (t, 1)); break; case CONSTRUCTOR: dump_child ("elts", CONSTRUCTOR_ELTS (t)); break; case BIND_EXPR: dump_child ("vars", TREE_OPERAND (t, 0)); dump_child ("body", TREE_OPERAND (t, 1)); break; case LOOP_EXPR: dump_child ("body", TREE_OPERAND (t, 0)); break; case EXIT_EXPR: dump_child ("cond", TREE_OPERAND (t, 0)); break; case TARGET_EXPR: dump_child ("decl", TREE_OPERAND (t, 0)); dump_child ("init", TREE_OPERAND (t, 1)); dump_child ("clnp", TREE_OPERAND (t, 2)); /* There really are two possible places the initializer can be. After RTL expansion, the second operand is moved to the position of the fourth operand, and the second operand becomes NULL. */ dump_child ("init", TREE_OPERAND (t, 3)); break; default: /* There are no additional fields to print. */ break; } done: if (dump_flag (di, TDF_ADDRESS, NULL)) dump_pointer (di, "addr", (void *)t); /* Terminate the line. */ fprintf (di->stream, "\n"); } /* Return nonzero if FLAG has been specified for the dump, and NODE is not the root node of the dump. */ int dump_flag (dump_info_p di, int flag, tree node) { return (di->flags & flag) && (node != di->node); } /* Dump T, and all its children, on STREAM. */ void dump_node (tree t, int flags, FILE *stream) { struct dump_info di; dump_queue_p dq; dump_queue_p next_dq; /* Initialize the dump-information structure. */ di.stream = stream; di.index = 0; di.column = 0; di.queue = 0; di.queue_end = 0; di.free_list = 0; di.flags = flags; di.node = t; di.nodes = splay_tree_new (splay_tree_compare_pointers, 0, (splay_tree_delete_value_fn) &free); /* Queue up the first node. */ queue (&di, t, DUMP_NONE); /* Until the queue is empty, keep dumping nodes. */ while (di.queue) dequeue_and_dump (&di); /* Now, clean up. */ for (dq = di.free_list; dq; dq = next_dq) { next_dq = dq->next; free (dq); } splay_tree_delete (di.nodes); } /* Define a tree dump switch. */ struct tree_dump_file_info { const char *suffix; /* suffix to give output file. */ const char *swtch; /* command line switch */ int flags; /* user flags */ int state; /* state of play */ }; /* Table of tree dump switches. This must be consistent with the TREE_DUMP_INDEX enumeration in tree.h */ static struct tree_dump_file_info dump_files[TDI_end] = { {NULL, NULL, 0, 0}, {".tu", "translation-unit", 0, 0}, {".class", "class-hierarchy", 0, 0}, {".original", "tree-original", 0, 0}, {".generic", "tree-generic", 0, 0}, {".nested", "tree-nested", 0, 0}, {".inlined", "tree-inlined", 0, 0}, {".vcg", "tree-vcg", 0, 0}, {".xml", "call-graph", 0, 0}, {NULL, "tree-all", 0, 0}, }; /* Dynamically registered tree dump files and switches. */ static struct tree_dump_file_info *extra_dump_files; static size_t extra_dump_files_in_use; static size_t extra_dump_files_alloced; /* Define a name->number mapping for a dump flag value. */ struct dump_option_value_info { const char *const name; /* the name of the value */ const int value; /* the value of the name */ }; /* Table of dump options. This must be consistent with the TDF_* flags in tree.h */ static const struct dump_option_value_info dump_options[] = { {"address", TDF_ADDRESS}, {"slim", TDF_SLIM}, {"raw", TDF_RAW}, {"details", TDF_DETAILS}, {"stats", TDF_STATS}, {"blocks", TDF_BLOCKS}, {"vops", TDF_VOPS}, {"lineno", TDF_LINENO}, {"uid", TDF_UID}, {"all", ~(TDF_RAW | TDF_SLIM | TDF_LINENO)}, {NULL, 0} }; unsigned int dump_register (const char *suffix, const char *swtch) { size_t this = extra_dump_files_in_use++; if (this >= extra_dump_files_alloced) { if (extra_dump_files_alloced == 0) extra_dump_files_alloced = 32; else extra_dump_files_alloced *= 2; extra_dump_files = xrealloc (extra_dump_files, sizeof (struct tree_dump_file_info) * extra_dump_files_alloced); } memset (&extra_dump_files[this], 0, sizeof (struct tree_dump_file_info)); extra_dump_files[this].suffix = suffix; extra_dump_files[this].swtch = swtch; return this + TDI_end; } /* Return the tree_dump_file_info for the given phase. */ static struct tree_dump_file_info * get_dump_file_info (enum tree_dump_index phase) { if (phase < TDI_end) return &dump_files[phase]; else if (phase - TDI_end >= extra_dump_files_in_use) abort (); else return extra_dump_files + (phase - TDI_end); } /* Begin a tree dump for PHASE. Stores any user supplied flag in *FLAG_PTR and returns a stream to write to. If the dump is not enabled, returns NULL. Multiple calls will reopen and append to the dump file. */ FILE * dump_begin (enum tree_dump_index phase, int *flag_ptr) { FILE *stream; char *name; char dump_id[10]; struct tree_dump_file_info *dfi; if (phase == TDI_none) return NULL; dfi = get_dump_file_info (phase); if (dfi->state == 0) return NULL; if (snprintf (dump_id, sizeof (dump_id), ".t%02d", phase) < 0) dump_id[0] = '\0'; name = concat (dump_base_name, dump_id, dfi->suffix, NULL); stream = fopen (name, dfi->state < 0 ? "w" : "a"); if (!stream) error ("could not open dump file `%s': %s", name, strerror (errno)); else dfi->state = 1; free (name); if (flag_ptr) *flag_ptr = dfi->flags; return stream; } /* Returns nonzero if tree dump PHASE is enabled. */ int dump_enabled_p (enum tree_dump_index phase) { struct tree_dump_file_info *dfi = get_dump_file_info (phase); return dfi->state; } /* Returns the switch name of PHASE. */ const char * dump_flag_name (enum tree_dump_index phase) { struct tree_dump_file_info *dfi = get_dump_file_info (phase); return dfi->swtch; } /* Finish a tree dump for PHASE. STREAM is the stream created by dump_begin. */ void dump_end (enum tree_dump_index phase ATTRIBUTE_UNUSED, FILE *stream) { fclose (stream); } /* Enable all tree dumps. */ static void dump_enable_all (int flags) { size_t i; for (i = TDI_none + 1; i < (size_t) TDI_end; i++) { dump_files[i].state = -1; dump_files[i].flags = flags; } for (i = 0; i < extra_dump_files_in_use; i++) { extra_dump_files[i].state = -1; extra_dump_files[i].flags = flags; } /* FIXME -fdump-call-graph is broken. */ dump_files[TDI_xml].state = 0; dump_files[TDI_xml].flags = 0; } /* Parse ARG as a dump switch. Return nonzero if it is, and store the relevant details in the dump_files array. */ static int dump_switch_p_1 (const char *arg, struct tree_dump_file_info *dfi) { const char *option_value; const char *ptr; int flags; option_value = skip_leading_substring (arg, dfi->swtch); if (!option_value) return 0; ptr = option_value; flags = 0; while (*ptr) { const struct dump_option_value_info *option_ptr; const char *end_ptr; unsigned length; while (*ptr == '-') ptr++; end_ptr = strchr (ptr, '-'); if (!end_ptr) end_ptr = ptr + strlen (ptr); length = end_ptr - ptr; for (option_ptr = dump_options; option_ptr->name; option_ptr++) if (strlen (option_ptr->name) == length && !memcmp (option_ptr->name, ptr, length)) { flags |= option_ptr->value; goto found; } warning ("ignoring unknown option `%.*s' in `-fdump-%s'", length, ptr, dfi->swtch); found:; ptr = end_ptr; } dfi->state = -1; dfi->flags = flags; /* Process -fdump-tree-all by enabling all the known dumps. */ if (dfi->suffix == NULL) dump_enable_all (flags); return 1; } int dump_switch_p (const char *arg) { size_t i; int any = 0; for (i = TDI_none + 1; i != TDI_end; i++) any |= dump_switch_p_1 (arg, &dump_files[i]); for (i = 0; i < extra_dump_files_in_use; i++) any |= dump_switch_p_1 (arg, &extra_dump_files[i]); return any; } /* Dump FUNCTION_DECL FN as tree dump PHASE. */ void dump_function (enum tree_dump_index phase, tree fn) { FILE *stream; int flags; stream = dump_begin (phase, &flags); if (stream) { dump_function_to_file (fn, stream, flags); dump_end (phase, stream); } } /* Try to unroll loops, and split induction variables. Copyright (C) 1992, 1993, 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by James E. Wilson, Cygnus Support/UC Berkeley. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Try to unroll a loop, and split induction variables. Loops for which the number of iterations can be calculated exactly are handled specially. If the number of iterations times the insn_count is less than MAX_UNROLLED_INSNS, then the loop is unrolled completely. Otherwise, we try to unroll the loop a number of times modulo the number of iterations, so that only one exit test will be needed. It is unrolled a number of times approximately equal to MAX_UNROLLED_INSNS divided by the insn count. Otherwise, if the number of iterations can be calculated exactly at run time, and the loop is always entered at the top, then we try to precondition the loop. That is, at run time, calculate how many times the loop will execute, and then execute the loop body a few times so that the remaining iterations will be some multiple of 4 (or 2 if the loop is large). Then fall through to a loop unrolled 4 (or 2) times, with only one exit test needed at the end of the loop. Otherwise, if the number of iterations can not be calculated exactly, not even at run time, then we still unroll the loop a number of times approximately equal to MAX_UNROLLED_INSNS divided by the insn count, but there must be an exit test after each copy of the loop body. For each induction variable, which is dead outside the loop (replaceable) or for which we can easily calculate the final value, if we can easily calculate its value at each place where it is set as a function of the current loop unroll count and the variable's value at loop entry, then the induction variable is split into `N' different variables, one for each copy of the loop body. One variable is live across the backward branch, and the others are all calculated as a function of this variable. This helps eliminate data dependencies, and leads to further opportunities for cse. */ /* Possible improvements follow: */ /* ??? Add an extra pass somewhere to determine whether unrolling will give any benefit. E.g. after generating all unrolled insns, compute the cost of all insns and compare against cost of insns in rolled loop. - On traditional architectures, unrolling a non-constant bound loop is a win if there is a giv whose only use is in memory addresses, the memory addresses can be split, and hence giv increments can be eliminated. - It is also a win if the loop is executed many times, and preconditioning can be performed for the loop. Add code to check for these and similar cases. */ /* ??? Improve control of which loops get unrolled. Could use profiling info to only unroll the most commonly executed loops. Perhaps have a user specifiable option to control the amount of code expansion, or the percent of loops to consider for unrolling. Etc. */ /* ??? Look at the register copies inside the loop to see if they form a simple permutation. If so, iterate the permutation until it gets back to the start state. This is how many times we should unroll the loop, for best results, because then all register copies can be eliminated. For example, the lisp nreverse function should be unrolled 3 times while (this) { next = this->cdr; this->cdr = prev; prev = this; this = next; } ??? The number of times to unroll the loop may also be based on data references in the loop. For example, if we have a loop that references x[i-1], x[i], and x[i+1], we should unroll it a multiple of 3 times. */ /* ??? Add some simple linear equation solving capability so that we can determine the number of loop iterations for more complex loops. For example, consider this loop from gdb #define SWAP_TARGET_AND_HOST(buffer,len) { char tmp; char *p = (char *) buffer; char *q = ((char *) buffer) + len - 1; int iterations = (len + 1) >> 1; int i; for (p; p < q; p++, q--;) { tmp = *q; *q = *p; *p = tmp; } } Note that: start value = p = &buffer + current_iteration end value = q = &buffer + len - 1 - current_iteration Given the loop exit test of "p < q", then there must be "q - p" iterations, set equal to zero and solve for number of iterations: q - p = len - 1 - 2*current_iteration = 0 current_iteration = (len - 1) / 2 Hence, there are (len - 1) / 2 (rounded up to the nearest integer) iterations of this loop. */ /* ??? Currently, no labels are marked as loop invariant when doing loop unrolling. This is because an insn inside the loop, that loads the address of a label inside the loop into a register, could be moved outside the loop by the invariant code motion pass if labels were invariant. If the loop is subsequently unrolled, the code will be wrong because each unrolled body of the loop will use the same address, whereas each actually needs a different address. A case where this happens is when a loop containing a switch statement is unrolled. It would be better to let labels be considered invariant. When we unroll loops here, check to see if any insns using a label local to the loop were moved before the loop. If so, then correct the problem, by moving the insn back into the loop, or perhaps replicate the insn before the loop, one copy for each time the loop is unrolled. */ /* The prime factors looked for when trying to unroll a loop by some number which is modulo the total number of iterations. Just checking for these 4 prime factors will find at least one factor for 75% of all numbers theoretically. Practically speaking, this will succeed almost all of the time since loops are generally a multiple of 2 and/or 5. */ #define NUM_FACTORS 4 static struct _factor { const int factor; int count; } factors[NUM_FACTORS] = { {2, 0}, {3, 0}, {5, 0}, {7, 0}}; /* Describes the different types of loop unrolling performed. */ enum unroll_types { UNROLL_COMPLETELY, UNROLL_MODULO, UNROLL_NAIVE }; /* Indexed by register number, if nonzero, then it contains a pointer to a struct induction for a DEST_REG giv which has been combined with one of more address givs. This is needed because whenever such a DEST_REG giv is modified, we must modify the value of all split address givs that were combined with this DEST_REG giv. */ static struct induction **addr_combined_regs; /* Indexed by register number, if this is a splittable induction variable, then this will hold the current value of the register, which depends on the iteration number. */ static rtx *splittable_regs; /* Indexed by register number, if this is a splittable induction variable, then this will hold the number of instructions in the loop that modify the induction variable. Used to ensure that only the last insn modifying a split iv will update the original iv of the dest. */ static int *splittable_regs_updates; /* Forward declarations. */ static rtx simplify_cmp_and_jump_insns (enum rtx_code, enum machine_mode, rtx, rtx, rtx); static void init_reg_map (struct inline_remap *, int); static rtx calculate_giv_inc (rtx, rtx, unsigned int); static rtx initial_reg_note_copy (rtx, struct inline_remap *); static void final_reg_note_copy (rtx *, struct inline_remap *); static void copy_loop_body (struct loop *, rtx, rtx, struct inline_remap *, rtx, int, enum unroll_types, rtx, rtx, rtx, rtx); static int find_splittable_regs (const struct loop *, enum unroll_types, int); static int find_splittable_givs (const struct loop *, struct iv_class *, enum unroll_types, rtx, int); static int reg_dead_after_loop (const struct loop *, rtx); static rtx fold_rtx_mult_add (rtx, rtx, rtx, enum machine_mode); static rtx remap_split_bivs (struct loop *, rtx); static rtx find_common_reg_term (rtx, rtx); static rtx subtract_reg_term (rtx, rtx); static rtx loop_find_equiv_value (const struct loop *, rtx); static rtx ujump_to_loop_cont (rtx, rtx); /* Try to unroll one loop and split induction variables in the loop. The loop is described by the arguments LOOP and INSN_COUNT. STRENGTH_REDUCTION_P indicates whether information generated in the strength reduction pass is available. This function is intended to be called from within `strength_reduce' in loop.c. */ void unroll_loop (struct loop *loop, int insn_count, int strength_reduce_p) { struct loop_info *loop_info = LOOP_INFO (loop); struct loop_ivs *ivs = LOOP_IVS (loop); int i, j; unsigned int r; unsigned HOST_WIDE_INT temp; int unroll_number = 1; rtx copy_start, copy_end; rtx insn, sequence, pattern, tem; int max_labelno, max_insnno; rtx insert_before; struct inline_remap *map; char *local_label = NULL; char *local_regno; unsigned int max_local_regnum; unsigned int maxregnum; rtx exit_label = 0; rtx start_label; struct iv_class *bl; int splitting_not_safe = 0; enum unroll_types unroll_type = UNROLL_NAIVE; int loop_preconditioned = 0; rtx safety_label; /* This points to the last real insn in the loop, which should be either a JUMP_INSN (for conditional jumps) or a BARRIER (for unconditional jumps). */ rtx last_loop_insn; rtx loop_start = loop->start; rtx loop_end = loop->end; /* Don't bother unrolling huge loops. Since the minimum factor is two, loops greater than one half of MAX_UNROLLED_INSNS will never be unrolled. */ if (insn_count > MAX_UNROLLED_INSNS / 2) { if (loop_dump_stream) fprintf (loop_dump_stream, "Unrolling failure: Loop too big.\n"); return; } /* Determine type of unroll to perform. Depends on the number of iterations and the size of the loop. */ /* If there is no strength reduce info, then set loop_info->n_iterations to zero. This can happen if strength_reduce can't find any bivs in the loop. A value of zero indicates that the number of iterations could not be calculated. */ if (! strength_reduce_p) loop_info->n_iterations = 0; if (loop_dump_stream && loop_info->n_iterations > 0) fprintf (loop_dump_stream, "Loop unrolling: " HOST_WIDE_INT_PRINT_DEC " iterations.\n", loop_info->n_iterations); /* Find and save a pointer to the last nonnote insn in the loop. */ last_loop_insn = prev_nonnote_insn (loop_end); /* Calculate how many times to unroll the loop. Indicate whether or not the loop is being completely unrolled. */ if (loop_info->n_iterations == 1) { /* Handle the case where the loop begins with an unconditional jump to the loop condition. Make sure to delete the jump insn, otherwise the loop body will never execute. */ /* FIXME this actually checks for a jump to the continue point, which is not the same as the condition in a for loop. As a result, this optimization fails for most for loops. We should really use flow information rather than instruction pattern matching. */ rtx ujump = ujump_to_loop_cont (loop->start, loop->cont); /* If number of iterations is exactly 1, then eliminate the compare and branch at the end of the loop since they will never be taken. Then return, since no other action is needed here. */ /* If the last instruction is not a BARRIER or a JUMP_INSN, then don't do anything. */ if (GET_CODE (last_loop_insn) == BARRIER) { /* Delete the jump insn. This will delete the barrier also. */ last_loop_insn = PREV_INSN (last_loop_insn); } if (ujump && GET_CODE (last_loop_insn) == JUMP_INSN) { #ifdef HAVE_cc0 rtx prev = PREV_INSN (last_loop_insn); #endif delete_related_insns (last_loop_insn); #ifdef HAVE_cc0 /* The immediately preceding insn may be a compare which must be deleted. */ if (only_sets_cc0_p (prev)) delete_related_insns (prev); #endif delete_related_insns (ujump); /* Remove the loop notes since this is no longer a loop. */ if (loop->vtop) delete_related_insns (loop->vtop); if (loop->cont) delete_related_insns (loop->cont); if (loop_start) delete_related_insns (loop_start); if (loop_end) delete_related_insns (loop_end); return; } } if (loop_info->n_iterations > 0 /* Avoid overflow in the next expression. */ && loop_info->n_iterations < (unsigned) MAX_UNROLLED_INSNS && loop_info->n_iterations * insn_count < (unsigned) MAX_UNROLLED_INSNS) { unroll_number = loop_info->n_iterations; unroll_type = UNROLL_COMPLETELY; } else if (loop_info->n_iterations > 0) { /* Try to factor the number of iterations. Don't bother with the general case, only using 2, 3, 5, and 7 will get 75% of all numbers theoretically, and almost all in practice. */ for (i = 0; i < NUM_FACTORS; i++) factors[i].count = 0; temp = loop_info->n_iterations; for (i = NUM_FACTORS - 1; i >= 0; i--) while (temp % factors[i].factor == 0) { factors[i].count++; temp = temp / factors[i].factor; } /* Start with the larger factors first so that we generally get lots of unrolling. */ unroll_number = 1; temp = insn_count; for (i = 3; i >= 0; i--) while (factors[i].count--) { if (temp * factors[i].factor < (unsigned) MAX_UNROLLED_INSNS) { unroll_number *= factors[i].factor; temp *= factors[i].factor; } else break; } /* If we couldn't find any factors, then unroll as in the normal case. */ if (unroll_number == 1) { if (loop_dump_stream) fprintf (loop_dump_stream, "Loop unrolling: No factors found.\n"); } else unroll_type = UNROLL_MODULO; } /* Default case, calculate number of times to unroll loop based on its size. */ if (unroll_type == UNROLL_NAIVE) { if (8 * insn_count < MAX_UNROLLED_INSNS) unroll_number = 8; else if (4 * insn_count < MAX_UNROLLED_INSNS) unroll_number = 4; else unroll_number = 2; } /* Now we know how many times to unroll the loop. */ if (loop_dump_stream) fprintf (loop_dump_stream, "Unrolling loop %d times.\n", unroll_number); if (unroll_type == UNROLL_COMPLETELY || unroll_type == UNROLL_MODULO) { /* Loops of these types can start with jump down to the exit condition in rare circumstances. Consider a pair of nested loops where the inner loop is part of the exit code for the outer loop. In this case jump.c will not duplicate the exit test for the outer loop, so it will start with a jump to the exit code. Then consider if the inner loop turns out to iterate once and only once. We will end up deleting the jumps associated with the inner loop. However, the loop notes are not removed from the instruction stream. And finally assume that we can compute the number of iterations for the outer loop. In this case unroll may want to unroll the outer loop even though it starts with a jump to the outer loop's exit code. We could try to optimize this case, but it hardly seems worth it. Just return without unrolling the loop in such cases. */ insn = loop_start; while (GET_CODE (insn) != CODE_LABEL && GET_CODE (insn) != JUMP_INSN) insn = NEXT_INSN (insn); if (GET_CODE (insn) == JUMP_INSN) return; } if (unroll_type == UNROLL_COMPLETELY) { /* Completely unrolling the loop: Delete the compare and branch at the end (the last two instructions). This delete must done at the very end of loop unrolling, to avoid problems with calls to back_branch_in_range_p, which is called by find_splittable_regs. All increments of splittable bivs/givs are changed to load constant instructions. */ copy_start = loop_start; /* Set insert_before to the instruction immediately after the JUMP_INSN (or BARRIER), so that any NOTEs between the JUMP_INSN and the end of the loop will be correctly handled by copy_loop_body. */ insert_before = NEXT_INSN (last_loop_insn); /* Set copy_end to the insn before the jump at the end of the loop. */ if (GET_CODE (last_loop_insn) == BARRIER) copy_end = PREV_INSN (PREV_INSN (last_loop_insn)); else if (GET_CODE (last_loop_insn) == JUMP_INSN) { copy_end = PREV_INSN (last_loop_insn); #ifdef HAVE_cc0 /* The instruction immediately before the JUMP_INSN may be a compare instruction which we do not want to copy. */ if (sets_cc0_p (PREV_INSN (copy_end))) copy_end = PREV_INSN (copy_end); #endif } else { /* We currently can't unroll a loop if it doesn't end with a JUMP_INSN. There would need to be a mechanism that recognizes this case, and then inserts a jump after each loop body, which jumps to after the last loop body. */ if (loop_dump_stream) fprintf (loop_dump_stream, "Unrolling failure: loop does not end with a JUMP_INSN.\n"); return; } } else if (unroll_type == UNROLL_MODULO) { /* Partially unrolling the loop: The compare and branch at the end (the last two instructions) must remain. Don't copy the compare and branch instructions at the end of the loop. Insert the unrolled code immediately before the compare/branch at the end so that the code will fall through to them as before. */ copy_start = loop_start; /* Set insert_before to the jump insn at the end of the loop. Set copy_end to before the jump insn at the end of the loop. */ if (GET_CODE (last_loop_insn) == BARRIER) { insert_before = PREV_INSN (last_loop_insn); copy_end = PREV_INSN (insert_before); } else if (GET_CODE (last_loop_insn) == JUMP_INSN) { insert_before = last_loop_insn; #ifdef HAVE_cc0 /* The instruction immediately before the JUMP_INSN may be a compare instruction which we do not want to copy or delete. */ if (sets_cc0_p (PREV_INSN (insert_before))) insert_before = PREV_INSN (insert_before); #endif copy_end = PREV_INSN (insert_before); } else { /* We currently can't unroll a loop if it doesn't end with a JUMP_INSN. There would need to be a mechanism that recognizes this case, and then inserts a jump after each loop body, which jumps to after the last loop body. */ if (loop_dump_stream) fprintf (loop_dump_stream, "Unrolling failure: loop does not end with a JUMP_INSN.\n"); return; } } else { /* Normal case: Must copy the compare and branch instructions at the end of the loop. */ if (GET_CODE (last_loop_insn) == BARRIER) { /* Loop ends with an unconditional jump and a barrier. Handle this like above, don't copy jump and barrier. This is not strictly necessary, but doing so prevents generating unconditional jumps to an immediately following label. This will be corrected below if the target of this jump is not the start_label. */ insert_before = PREV_INSN (last_loop_insn); copy_end = PREV_INSN (insert_before); } else if (GET_CODE (last_loop_insn) == JUMP_INSN) { /* Set insert_before to immediately after the JUMP_INSN, so that NOTEs at the end of the loop will be correctly handled by copy_loop_body. */ insert_before = NEXT_INSN (last_loop_insn); copy_end = last_loop_insn; } else { /* We currently can't unroll a loop if it doesn't end with a JUMP_INSN. There would need to be a mechanism that recognizes this case, and then inserts a jump after each loop body, which jumps to after the last loop body. */ if (loop_dump_stream) fprintf (loop_dump_stream, "Unrolling failure: loop does not end with a JUMP_INSN.\n"); return; } /* If copying exit test branches because they can not be eliminated, then must convert the fall through case of the branch to a jump past the end of the loop. Create a label to emit after the loop and save it for later use. Do not use the label after the loop, if any, since it might be used by insns outside the loop, or there might be insns added before it later by final_[bg]iv_value which must be after the real exit label. */ exit_label = gen_label_rtx (); insn = loop_start; while (GET_CODE (insn) != CODE_LABEL && GET_CODE (insn) != JUMP_INSN) insn = NEXT_INSN (insn); if (GET_CODE (insn) == JUMP_INSN) { /* The loop starts with a jump down to the exit condition test. Start copying the loop after the barrier following this jump insn. */ copy_start = NEXT_INSN (insn); /* Splitting induction variables doesn't work when the loop is entered via a jump to the bottom, because then we end up doing a comparison against a new register for a split variable, but we did not execute the set insn for the new register because it was skipped over. */ splitting_not_safe = 1; if (loop_dump_stream) fprintf (loop_dump_stream, "Splitting not safe, because loop not entered at top.\n"); } else copy_start = loop_start; } /* This should always be the first label in the loop. */ start_label = NEXT_INSN (copy_start); /* There may be a line number note and/or a loop continue note here. */ while (GET_CODE (start_label) == NOTE) start_label = NEXT_INSN (start_label); if (GET_CODE (start_label) != CODE_LABEL) { /* This can happen as a result of jump threading. If the first insns in the loop test the same condition as the loop's backward jump, or the opposite condition, then the backward jump will be modified to point to elsewhere, and the loop's start label is deleted. This case currently can not be handled by the loop unrolling code. */ if (loop_dump_stream) fprintf (loop_dump_stream, "Unrolling failure: unknown insns between BEG note and loop label.\n"); return; } if (LABEL_NAME (start_label)) { /* The jump optimization pass must have combined the original start label with a named label for a goto. We can't unroll this case because jumps which go to the named label must be handled differently than jumps to the loop start, and it is impossible to differentiate them in this case. */ if (loop_dump_stream) fprintf (loop_dump_stream, "Unrolling failure: loop start label is gone\n"); return; } if (unroll_type == UNROLL_NAIVE && GET_CODE (last_loop_insn) == BARRIER && GET_CODE (PREV_INSN (last_loop_insn)) == JUMP_INSN && start_label != JUMP_LABEL (PREV_INSN (last_loop_insn))) { /* In this case, we must copy the jump and barrier, because they will not be converted to jumps to an immediately following label. */ insert_before = NEXT_INSN (last_loop_insn); copy_end = last_loop_insn; } if (unroll_type == UNROLL_NAIVE && GET_CODE (last_loop_insn) == JUMP_INSN && start_label != JUMP_LABEL (last_loop_insn)) { /* ??? The loop ends with a conditional branch that does not branch back to the loop start label. In this case, we must emit an unconditional branch to the loop exit after emitting the final branch. copy_loop_body does not have support for this currently, so we give up. It doesn't seem worthwhile to unroll anyways since unrolling would increase the number of branch instructions executed. */ if (loop_dump_stream) fprintf (loop_dump_stream, "Unrolling failure: final conditional branch not to loop start\n"); return; } /* Allocate a translation table for the labels and insn numbers. They will be filled in as we copy the insns in the loop. */ max_labelno = max_label_num (); max_insnno = get_max_uid (); /* Various paths through the unroll code may reach the "egress" label without initializing fields within the map structure. To be safe, we use xcalloc to zero the memory. */ map = xcalloc (1, sizeof (struct inline_remap)); /* Allocate the label map. */ if (max_labelno > 0) { map->label_map = xcalloc (max_labelno, sizeof (rtx)); local_label = xcalloc (max_labelno, sizeof (char)); } /* Search the loop and mark all local labels, i.e. the ones which have to be distinct labels when copied. For all labels which might be non-local, set their label_map entries to point to themselves. If they happen to be local their label_map entries will be overwritten before the loop body is copied. The label_map entries for local labels will be set to a different value each time the loop body is copied. */ for (insn = copy_start; insn != loop_end; insn = NEXT_INSN (insn)) { rtx note; if (GET_CODE (insn) == CODE_LABEL) local_label[CODE_LABEL_NUMBER (insn)] = 1; else if (GET_CODE (insn) == JUMP_INSN) { if (JUMP_LABEL (insn)) set_label_in_map (map, CODE_LABEL_NUMBER (JUMP_LABEL (insn)), JUMP_LABEL (insn)); else if (GET_CODE (PATTERN (insn)) == ADDR_VEC || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC) { rtx pat = PATTERN (insn); int diff_vec_p = GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC; int len = XVECLEN (pat, diff_vec_p); rtx label; for (i = 0; i < len; i++) { label = XEXP (XVECEXP (pat, diff_vec_p, i), 0); set_label_in_map (map, CODE_LABEL_NUMBER (label), label); } } } if ((note = find_reg_note (insn, REG_LABEL, NULL_RTX))) set_label_in_map (map, CODE_LABEL_NUMBER (XEXP (note, 0)), XEXP (note, 0)); } /* Allocate space for the insn map. */ map->insn_map = xmalloc (max_insnno * sizeof (rtx)); /* The register and constant maps depend on the number of registers present, so the final maps can't be created until after find_splittable_regs is called. However, they are needed for preconditioning, so we create temporary maps when preconditioning is performed. */ /* The preconditioning code may allocate two new pseudo registers. */ maxregnum = max_reg_num (); /* local_regno is only valid for regnos < max_local_regnum. */ max_local_regnum = maxregnum; /* Allocate and zero out the splittable_regs and addr_combined_regs arrays. These must be zeroed here because they will be used if loop preconditioning is performed, and must be zero for that case. It is safe to do this here, since the extra registers created by the preconditioning code and find_splittable_regs will never be used to access the splittable_regs[] and addr_combined_regs[] arrays. */ splittable_regs = xcalloc (maxregnum, sizeof (rtx)); splittable_regs_updates = xcalloc (maxregnum, sizeof (int)); addr_combined_regs = xcalloc (maxregnum, sizeof (struct induction *)); local_regno = xcalloc (maxregnum, sizeof (char)); /* Mark all local registers, i.e. the ones which are referenced only inside the loop. */ if (INSN_UID (copy_end) < max_uid_for_loop) { int copy_start_luid = LOOP_INSN_LUID (copy_start); int copy_end_luid = LOOP_INSN_LUID (copy_end); /* If a register is used in the jump insn, we must not duplicate it since it will also be used outside the loop. */ if (GET_CODE (copy_end) == JUMP_INSN) copy_end_luid--; /* If we have a target that uses cc0, then we also must not duplicate the insn that sets cc0 before the jump insn, if one is present. */ #ifdef HAVE_cc0 if (GET_CODE (copy_end) == JUMP_INSN && sets_cc0_p (PREV_INSN (copy_end))) copy_end_luid--; #endif /* If copy_start points to the NOTE that starts the loop, then we must use the next luid, because invariant pseudo-regs moved out of the loop have their lifetimes modified to start here, but they are not safe to duplicate. */ if (copy_start == loop_start) copy_start_luid++; /* If a pseudo's lifetime is entirely contained within this loop, then we can use a different pseudo in each unrolled copy of the loop. This results in better code. */ /* We must limit the generic test to max_reg_before_loop, because only these pseudo registers have valid regno_first_uid info. */ for (r = FIRST_PSEUDO_REGISTER; r < max_reg_before_loop; ++r) if (REGNO_FIRST_UID (r) > 0 && REGNO_FIRST_UID (r) < max_uid_for_loop && REGNO_FIRST_LUID (r) >= copy_start_luid && REGNO_LAST_UID (r) > 0 && REGNO_LAST_UID (r) < max_uid_for_loop && REGNO_LAST_LUID (r) <= copy_end_luid) { /* However, we must also check for loop-carried dependencies. If the value the pseudo has at the end of iteration X is used by iteration X+1, then we can not use a different pseudo for each unrolled copy of the loop. */ /* A pseudo is safe if regno_first_uid is a set, and this set dominates all instructions from regno_first_uid to regno_last_uid. */ /* ??? This check is simplistic. We would get better code if this check was more sophisticated. */ if (set_dominates_use (r, REGNO_FIRST_UID (r), REGNO_LAST_UID (r), copy_start, copy_end)) local_regno[r] = 1; if (loop_dump_stream) { if (local_regno[r]) fprintf (loop_dump_stream, "Marked reg %d as local\n", r); else fprintf (loop_dump_stream, "Did not mark reg %d as local\n", r); } } } /* If this loop requires exit tests when unrolled, check to see if we can precondition the loop so as to make the exit tests unnecessary. Just like variable splitting, this is not safe if the loop is entered via a jump to the bottom. Also, can not do this if no strength reduce info, because precondition_loop_p uses this info. */ /* Must copy the loop body for preconditioning before the following find_splittable_regs call since that will emit insns which need to be after the preconditioned loop copies, but immediately before the unrolled loop copies. */ /* Also, it is not safe to split induction variables for the preconditioned copies of the loop body. If we split induction variables, then the code assumes that each induction variable can be represented as a function of its initial value and the loop iteration number. This is not true in this case, because the last preconditioned copy of the loop body could be any iteration from the first up to the `unroll_number-1'th, depending on the initial value of the iteration variable. Therefore we can not split induction variables here, because we can not calculate their value. Hence, this code must occur before find_splittable_regs is called. */ if (unroll_type == UNROLL_NAIVE && ! splitting_not_safe && strength_reduce_p) { rtx initial_value, final_value, increment; enum machine_mode mode; if (precondition_loop_p (loop, &initial_value, &final_value, &increment, &mode)) { rtx diff, insn; rtx *labels; int abs_inc, neg_inc; enum rtx_code cc = loop_info->comparison_code; int less_p = (cc == LE || cc == LEU || cc == LT || cc == LTU); int unsigned_p = (cc == LEU || cc == GEU || cc == LTU || cc == GTU); map->reg_map = xmalloc (maxregnum * sizeof (rtx)); VARRAY_CONST_EQUIV_INIT (map->const_equiv_varray, maxregnum, "unroll_loop_precondition"); global_const_equiv_varray = map->const_equiv_varray; init_reg_map (map, maxregnum); /* Limit loop unrolling to 4, since this will make 7 copies of the loop body. */ if (unroll_number > 4) unroll_number = 4; /* Save the absolute value of the increment, and also whether or not it is negative. */ neg_inc = 0; abs_inc = INTVAL (increment); if (abs_inc < 0) { abs_inc = -abs_inc; neg_inc = 1; } start_sequence (); /* We must copy the final and initial values here to avoid improperly shared rtl. */ final_value = copy_rtx (final_value); initial_value = copy_rtx (initial_value); /* Final value may have form of (PLUS val1 const1_rtx). We need to convert it into general operand, so compute the real value. */ final_value = force_operand (final_value, NULL_RTX); if (!nonmemory_operand (final_value, VOIDmode)) final_value = force_reg (mode, final_value); /* Calculate the difference between the final and initial values. Final value may be a (plus (reg x) (const_int 1)) rtx. We have to deal with for (i = 0; --i < 6;) type loops. For such loops the real final value is the first time the loop variable overflows, so the diff we calculate is the distance from the overflow value. This is 0 or ~0 for unsigned loops depending on the direction, or INT_MAX, INT_MAX+1 for signed loops. We really do not need the exact value, since we are only interested in the diff modulo the increment, and the increment is a power of 2, so we can pretend that the overflow value is 0/~0. */ if (cc == NE || less_p != neg_inc) diff = simplify_gen_binary (MINUS, mode, final_value, initial_value); else diff = simplify_gen_unary (neg_inc ? NOT : NEG, mode, initial_value, mode); diff = force_operand (diff, NULL_RTX); /* Now calculate (diff % (unroll * abs (increment))) by using an and instruction. */ diff = simplify_gen_binary (AND, mode, diff, GEN_INT (unroll_number*abs_inc - 1)); diff = force_operand (diff, NULL_RTX); /* Now emit a sequence of branches to jump to the proper precond loop entry point. */ labels = xmalloc (sizeof (rtx) * unroll_number); for (i = 0; i < unroll_number; i++) labels[i] = gen_label_rtx (); /* Check for the case where the initial value is greater than or equal to the final value. In that case, we want to execute exactly one loop iteration. The code below will fail for this case. This check does not apply if the loop has a NE comparison at the end. */ if (cc != NE) { rtx incremented_initval; enum rtx_code cmp_code; incremented_initval = simplify_gen_binary (PLUS, mode, initial_value, increment); incremented_initval = force_operand (incremented_initval, NULL_RTX); cmp_code = (less_p ? (unsigned_p ? GEU : GE) : (unsigned_p ? LEU : LE)); insn = simplify_cmp_and_jump_insns (cmp_code, mode, incremented_initval, final_value, labels[1]); if (insn) predict_insn_def (insn, PRED_LOOP_CONDITION, TAKEN); } /* Assuming the unroll_number is 4, and the increment is 2, then for a negative increment: for a positive increment: diff = 0,1 precond 0 diff = 0,7 precond 0 diff = 2,3 precond 3 diff = 1,2 precond 1 diff = 4,5 precond 2 diff = 3,4 precond 2 diff = 6,7 precond 1 diff = 5,6 precond 3 */ /* We only need to emit (unroll_number - 1) branches here, the last case just falls through to the following code. */ /* ??? This would give better code if we emitted a tree of branches instead of the current linear list of branches. */ for (i = 0; i < unroll_number - 1; i++) { int cmp_const; enum rtx_code cmp_code; /* For negative increments, must invert the constant compared against, except when comparing against zero. */ if (i == 0) { cmp_const = 0; cmp_code = EQ; } else if (neg_inc) { cmp_const = unroll_number - i; cmp_code = GE; } else { cmp_const = i; cmp_code = LE; } insn = simplify_cmp_and_jump_insns (cmp_code, mode, diff, GEN_INT (abs_inc*cmp_const), labels[i]); if (insn) predict_insn (insn, PRED_LOOP_PRECONDITIONING, REG_BR_PROB_BASE / (unroll_number - i)); } /* If the increment is greater than one, then we need another branch, to handle other cases equivalent to 0. */ /* ??? This should be merged into the code above somehow to help simplify the code here, and reduce the number of branches emitted. For the negative increment case, the branch here could easily be merged with the `0' case branch above. For the positive increment case, it is not clear how this can be simplified. */ if (abs_inc != 1) { int cmp_const; enum rtx_code cmp_code; if (neg_inc) { cmp_const = abs_inc - 1; cmp_code = LE; } else { cmp_const = abs_inc * (unroll_number - 1) + 1; cmp_code = GE; } simplify_cmp_and_jump_insns (cmp_code, mode, diff, GEN_INT (cmp_const), labels[0]); } sequence = get_insns (); end_sequence (); loop_insn_hoist (loop, sequence); /* Only the last copy of the loop body here needs the exit test, so set copy_end to exclude the compare/branch here, and then reset it inside the loop when get to the last copy. */ if (GET_CODE (last_loop_insn) == BARRIER) copy_end = PREV_INSN (PREV_INSN (last_loop_insn)); else if (GET_CODE (last_loop_insn) == JUMP_INSN) { copy_end = PREV_INSN (last_loop_insn); #ifdef HAVE_cc0 /* The immediately preceding insn may be a compare which we do not want to copy. */ if (sets_cc0_p (PREV_INSN (copy_end))) copy_end = PREV_INSN (copy_end); #endif } else abort (); for (i = 1; i < unroll_number; i++) { emit_label_after (labels[unroll_number - i], PREV_INSN (loop_start)); memset (map->insn_map, 0, max_insnno * sizeof (rtx)); memset (&VARRAY_CONST_EQUIV (map->const_equiv_varray, 0), 0, (VARRAY_SIZE (map->const_equiv_varray) * sizeof (struct const_equiv_data))); map->const_age = 0; for (j = 0; j < max_labelno; j++) if (local_label[j]) set_label_in_map (map, j, gen_label_rtx ()); for (r = FIRST_PSEUDO_REGISTER; r < max_local_regnum; r++) if (local_regno[r]) { map->reg_map[r] = gen_reg_rtx (GET_MODE (regno_reg_rtx[r])); record_base_value (REGNO (map->reg_map[r]), regno_reg_rtx[r], 0); } /* The last copy needs the compare/branch insns at the end, so reset copy_end here if the loop ends with a conditional branch. */ if (i == unroll_number - 1) { if (GET_CODE (last_loop_insn) == BARRIER) copy_end = PREV_INSN (PREV_INSN (last_loop_insn)); else copy_end = last_loop_insn; } /* None of the copies are the `last_iteration', so just pass zero for that parameter. */ copy_loop_body (loop, copy_start, copy_end, map, exit_label, 0, unroll_type, start_label, loop_end, loop_start, copy_end); } emit_label_after (labels[0], PREV_INSN (loop_start)); if (GET_CODE (last_loop_insn) == BARRIER) { insert_before = PREV_INSN (last_loop_insn); copy_end = PREV_INSN (insert_before); } else { insert_before = last_loop_insn; #ifdef HAVE_cc0 /* The instruction immediately before the JUMP_INSN may be a compare instruction which we do not want to copy or delete. */ if (sets_cc0_p (PREV_INSN (insert_before))) insert_before = PREV_INSN (insert_before); #endif copy_end = PREV_INSN (insert_before); } /* Set unroll type to MODULO now. */ unroll_type = UNROLL_MODULO; loop_preconditioned = 1; /* Clean up. */ free (labels); } } /* If reach here, and the loop type is UNROLL_NAIVE, then don't unroll the loop unless all loops are being unrolled. */ if (unroll_type == UNROLL_NAIVE && ! flag_old_unroll_all_loops) { if (loop_dump_stream) fprintf (loop_dump_stream, "Unrolling failure: Naive unrolling not being done.\n"); goto egress; } /* At this point, we are guaranteed to unroll the loop. */ /* Keep track of the unroll factor for the loop. */ loop_info->unroll_number = unroll_number; /* And whether the loop has been preconditioned. */ loop_info->preconditioned = loop_preconditioned; /* Remember whether it was preconditioned for the second loop pass. */ NOTE_PRECONDITIONED (loop->end) = loop_preconditioned; /* For each biv and giv, determine whether it can be safely split into a different variable for each unrolled copy of the loop body. We precalculate and save this info here, since computing it is expensive. Do this before deleting any instructions from the loop, so that back_branch_in_range_p will work correctly. */ if (splitting_not_safe) temp = 0; else temp = find_splittable_regs (loop, unroll_type, unroll_number); /* find_splittable_regs may have created some new registers, so must reallocate the reg_map with the new larger size, and must realloc the constant maps also. */ maxregnum = max_reg_num (); map->reg_map = xmalloc (maxregnum * sizeof (rtx)); init_reg_map (map, maxregnum); if (map->const_equiv_varray == 0) VARRAY_CONST_EQUIV_INIT (map->const_equiv_varray, maxregnum + temp * unroll_number * 2, "unroll_loop"); global_const_equiv_varray = map->const_equiv_varray; /* Search the list of bivs and givs to find ones which need to be remapped when split, and set their reg_map entry appropriately. */ for (bl = ivs->list; bl; bl = bl->next) { if (REGNO (bl->biv->src_reg) != bl->regno) map->reg_map[bl->regno] = bl->biv->src_reg; #if 0 /* Currently, non-reduced/final-value givs are never split. */ for (v = bl->giv; v; v = v->next_iv) if (REGNO (v->src_reg) != bl->regno) map->reg_map[REGNO (v->dest_reg)] = v->src_reg; #endif } /* Use our current register alignment and pointer flags. */ map->regno_pointer_align = cfun->emit->regno_pointer_align; map->x_regno_reg_rtx = cfun->emit->x_regno_reg_rtx; /* If the loop is being partially unrolled, and the iteration variables are being split, and are being renamed for the split, then must fix up the compare/jump instruction at the end of the loop to refer to the new registers. This compare isn't copied, so the registers used in it will never be replaced if it isn't done here. */ if (unroll_type == UNROLL_MODULO) { insn = NEXT_INSN (copy_end); if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN) PATTERN (insn) = remap_split_bivs (loop, PATTERN (insn)); } /* For unroll_number times, make a copy of each instruction between copy_start and copy_end, and insert these new instructions before the end of the loop. */ for (i = 0; i < unroll_number; i++) { memset (map->insn_map, 0, max_insnno * sizeof (rtx)); memset (&VARRAY_CONST_EQUIV (map->const_equiv_varray, 0), 0, VARRAY_SIZE (map->const_equiv_varray) * sizeof (struct const_equiv_data)); map->const_age = 0; for (j = 0; j < max_labelno; j++) if (local_label[j]) set_label_in_map (map, j, gen_label_rtx ()); for (r = FIRST_PSEUDO_REGISTER; r < max_local_regnum; r++) if (local_regno[r]) { map->reg_map[r] = gen_reg_rtx (GET_MODE (regno_reg_rtx[r])); record_base_value (REGNO (map->reg_map[r]), regno_reg_rtx[r], 0); } /* If loop starts with a branch to the test, then fix it so that it points to the test of the first unrolled copy of the loop. */ if (i == 0 && loop_start != copy_start) { insn = PREV_INSN (copy_start); pattern = PATTERN (insn); tem = get_label_from_map (map, CODE_LABEL_NUMBER (XEXP (SET_SRC (pattern), 0))); SET_SRC (pattern) = gen_rtx_LABEL_REF (VOIDmode, tem); /* Set the jump label so that it can be used by later loop unrolling passes. */ JUMP_LABEL (insn) = tem; LABEL_NUSES (tem)++; } copy_loop_body (loop, copy_start, copy_end, map, exit_label, i == unroll_number - 1, unroll_type, start_label, loop_end, insert_before, insert_before); } /* Before deleting any insns, emit a CODE_LABEL immediately after the last insn to be deleted. This prevents any runaway delete_insn call from more insns that it should, as it always stops at a CODE_LABEL. */ /* Delete the compare and branch at the end of the loop if completely unrolling the loop. Deleting the backward branch at the end also deletes the code label at the start of the loop. This is done at the very end to avoid problems with back_branch_in_range_p. */ if (unroll_type == UNROLL_COMPLETELY) safety_label = emit_label_after (gen_label_rtx (), last_loop_insn); else safety_label = emit_label_after (gen_label_rtx (), copy_end); /* Delete all of the original loop instructions. Don't delete the LOOP_BEG note, or the first code label in the loop. */ insn = NEXT_INSN (copy_start); while (insn != safety_label) { /* ??? Don't delete named code labels. They will be deleted when the jump that references them is deleted. Otherwise, we end up deleting them twice, which causes them to completely disappear instead of turn into NOTE_INSN_DELETED_LABEL notes. This in turn causes aborts in dwarfout.c/dwarf2out.c. We could perhaps fix the dwarf*out.c files to handle deleted labels instead. Or perhaps fix DECL_RTL of the associated LABEL_DECL to point to one of the new label instances. */ /* ??? Likewise, we can't delete a NOTE_INSN_DELETED_LABEL note. */ if (insn != start_label && ! (GET_CODE (insn) == CODE_LABEL && LABEL_NAME (insn)) && ! (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED_LABEL)) insn = delete_related_insns (insn); else insn = NEXT_INSN (insn); } /* Can now delete the 'safety' label emitted to protect us from runaway delete_related_insns calls. */ if (INSN_DELETED_P (safety_label)) abort (); delete_related_insns (safety_label); /* If exit_label exists, emit it after the loop. Doing the emit here forces it to have a higher INSN_UID than any insn in the unrolled loop. This is needed so that mostly_true_jump in reorg.c will treat jumps to this loop end label correctly, i.e. predict that they are usually not taken. */ if (exit_label) emit_label_after (exit_label, loop_end); egress: if (unroll_type == UNROLL_COMPLETELY) { /* Remove the loop notes since this is no longer a loop. */ if (loop->vtop) delete_related_insns (loop->vtop); if (loop->cont) delete_related_insns (loop->cont); if (loop_start) delete_related_insns (loop_start); if (loop_end) delete_related_insns (loop_end); } if (map->const_equiv_varray) VARRAY_FREE (map->const_equiv_varray); if (map->label_map) { free (map->label_map); free (local_label); } free (map->insn_map); free (splittable_regs); free (splittable_regs_updates); free (addr_combined_regs); free (local_regno); if (map->reg_map) free (map->reg_map); free (map); } /* A helper function for unroll_loop. Emit a compare and branch to satisfy (CMP OP1 OP2), but pass this through the simplifier first. If the branch turned out to be conditional, return it, otherwise return NULL. */ static rtx simplify_cmp_and_jump_insns (enum rtx_code code, enum machine_mode mode, rtx op0, rtx op1, rtx label) { rtx t, insn; t = simplify_const_relational_operation (code, mode, op0, op1); if (!t) { enum rtx_code scode = signed_condition (code); emit_cmp_and_jump_insns (op0, op1, scode, NULL_RTX, mode, code != scode, label); insn = get_last_insn (); JUMP_LABEL (insn) = label; LABEL_NUSES (label) += 1; return insn; } else if (t == const_true_rtx) { insn = emit_jump_insn (gen_jump (label)); emit_barrier (); JUMP_LABEL (insn) = label; LABEL_NUSES (label) += 1; } return NULL_RTX; } /* Return true if the loop can be safely, and profitably, preconditioned so that the unrolled copies of the loop body don't need exit tests. This only works if final_value, initial_value and increment can be determined, and if increment is a constant power of 2. If increment is not a power of 2, then the preconditioning modulo operation would require a real modulo instead of a boolean AND, and this is not considered `profitable'. */ /* ??? If the loop is known to be executed very many times, or the machine has a very cheap divide instruction, then preconditioning is a win even when the increment is not a power of 2. Use RTX_COST to compute whether divide is cheap. ??? A divide by constant doesn't actually need a divide, look at expand_divmod. The reduced cost of this optimized modulo is not reflected in RTX_COST. */ int precondition_loop_p (const struct loop *loop, rtx *initial_value, rtx *final_value, rtx *increment, enum machine_mode *mode) { rtx loop_start = loop->start; struct loop_info *loop_info = LOOP_INFO (loop); if (loop_info->n_iterations > 0) { if (INTVAL (loop_info->increment) > 0) { *initial_value = const0_rtx; *increment = const1_rtx; *final_value = GEN_INT (loop_info->n_iterations); } else { *initial_value = GEN_INT (loop_info->n_iterations); *increment = constm1_rtx; *final_value = const0_rtx; } *mode = word_mode; if (loop_dump_stream) fprintf (loop_dump_stream, "Preconditioning: Success, number of iterations known, " HOST_WIDE_INT_PRINT_DEC ".\n", loop_info->n_iterations); return 1; } if (loop_info->iteration_var == 0) { if (loop_dump_stream) fprintf (loop_dump_stream, "Preconditioning: Could not find iteration variable.\n"); return 0; } else if (loop_info->initial_value == 0) { if (loop_dump_stream) fprintf (loop_dump_stream, "Preconditioning: Could not find initial value.\n"); return 0; } else if (loop_info->increment == 0) { if (loop_dump_stream) fprintf (loop_dump_stream, "Preconditioning: Could not find increment value.\n"); return 0; } else if (GET_CODE (loop_info->increment) != CONST_INT) { if (loop_dump_stream) fprintf (loop_dump_stream, "Preconditioning: Increment not a constant.\n"); return 0; } else if ((exact_log2 (INTVAL (loop_info->increment)) < 0) && (exact_log2 (-INTVAL (loop_info->increment)) < 0)) { if (loop_dump_stream) fprintf (loop_dump_stream, "Preconditioning: Increment not a constant power of 2.\n"); return 0; } /* Unsigned_compare and compare_dir can be ignored here, since they do not matter for preconditioning. */ if (loop_info->final_value == 0) { if (loop_dump_stream) fprintf (loop_dump_stream, "Preconditioning: EQ comparison loop.\n"); return 0; } /* Must ensure that final_value is invariant, so call loop_invariant_p to check. Before doing so, must check regno against max_reg_before_loop to make sure that the register is in the range covered by loop_invariant_p. If it isn't, then it is most likely a biv/giv which by definition are not invariant. */ if ((REG_P (loop_info->final_value) && REGNO (loop_info->final_value) >= max_reg_before_loop) || (GET_CODE (loop_info->final_value) == PLUS && REGNO (XEXP (loop_info->final_value, 0)) >= max_reg_before_loop) || ! loop_invariant_p (loop, loop_info->final_value)) { if (loop_dump_stream) fprintf (loop_dump_stream, "Preconditioning: Final value not invariant.\n"); return 0; } /* Fail for floating point values, since the caller of this function does not have code to deal with them. */ if (GET_MODE_CLASS (GET_MODE (loop_info->final_value)) == MODE_FLOAT || GET_MODE_CLASS (GET_MODE (loop_info->initial_value)) == MODE_FLOAT) { if (loop_dump_stream) fprintf (loop_dump_stream, "Preconditioning: Floating point final or initial value.\n"); return 0; } /* Fail if loop_info->iteration_var is not live before loop_start, since we need to test its value in the preconditioning code. */ if (REGNO_FIRST_LUID (REGNO (loop_info->iteration_var)) > LOOP_INSN_LUID (loop_start)) { if (loop_dump_stream) fprintf (loop_dump_stream, "Preconditioning: Iteration var not live before loop start.\n"); return 0; } /* Note that loop_iterations biases the initial value for GIV iterators such as "while (i-- > 0)" so that we can calculate the number of iterations just like for BIV iterators. Also note that the absolute values of initial_value and final_value are unimportant as only their difference is used for calculating the number of loop iterations. */ *initial_value = loop_info->initial_value; *increment = loop_info->increment; *final_value = loop_info->final_value; /* Decide what mode to do these calculations in. Choose the larger of final_value's mode and initial_value's mode, or a full-word if both are constants. */ *mode = GET_MODE (*final_value); if (*mode == VOIDmode) { *mode = GET_MODE (*initial_value); if (*mode == VOIDmode) *mode = word_mode; } else if (*mode != GET_MODE (*initial_value) && (GET_MODE_SIZE (*mode) < GET_MODE_SIZE (GET_MODE (*initial_value)))) *mode = GET_MODE (*initial_value); /* Success! */ if (loop_dump_stream) fprintf (loop_dump_stream, "Preconditioning: Successful.\n"); return 1; } /* All pseudo-registers must be mapped to themselves. Two hard registers must be mapped, VIRTUAL_STACK_VARS_REGNUM and VIRTUAL_INCOMING_ARGS_ REGNUM, to avoid function-inlining specific conversions of these registers. All other hard regs can not be mapped because they may be used with different modes. */ static void init_reg_map (struct inline_remap *map, int maxregnum) { int i; for (i = maxregnum - 1; i > LAST_VIRTUAL_REGISTER; i--) map->reg_map[i] = regno_reg_rtx[i]; /* Just clear the rest of the entries. */ for (i = LAST_VIRTUAL_REGISTER; i >= 0; i--) map->reg_map[i] = 0; map->reg_map[VIRTUAL_STACK_VARS_REGNUM] = regno_reg_rtx[VIRTUAL_STACK_VARS_REGNUM]; map->reg_map[VIRTUAL_INCOMING_ARGS_REGNUM] = regno_reg_rtx[VIRTUAL_INCOMING_ARGS_REGNUM]; } /* Strength-reduction will often emit code for optimized biv/givs which calculates their value in a temporary register, and then copies the result to the iv. This procedure reconstructs the pattern computing the iv; verifying that all operands are of the proper form. PATTERN must be the result of single_set. The return value is the amount that the giv is incremented by. */ static rtx calculate_giv_inc (rtx pattern, rtx src_insn, unsigned int regno) { rtx increment; rtx increment_total = 0; int tries = 0; retry: /* Verify that we have an increment insn here. First check for a plus as the set source. */ if (GET_CODE (SET_SRC (pattern)) != PLUS) { /* SR sometimes computes the new giv value in a temp, then copies it to the new_reg. */ src_insn = PREV_INSN (src_insn); pattern = single_set (src_insn); if (GET_CODE (SET_SRC (pattern)) != PLUS) abort (); /* The last insn emitted is not needed, so delete it to avoid confusing the second cse pass. This insn sets the giv unnecessarily. */ delete_related_insns (get_last_insn ()); } /* Verify that we have a constant as the second operand of the plus. */ increment = XEXP (SET_SRC (pattern), 1); if (GET_CODE (increment) != CONST_INT) { /* SR sometimes puts the constant in a register, especially if it is too big to be an add immed operand. */ increment = find_last_value (increment, &src_insn, NULL_RTX, 0); /* SR may have used LO_SUM to compute the constant if it is too large for a load immed operand. In this case, the constant is in operand one of the LO_SUM rtx. */ if (GET_CODE (increment) == LO_SUM) increment = XEXP (increment, 1); /* Some ports store large constants in memory and add a REG_EQUAL note to the store insn. */ else if (MEM_P (increment)) { rtx note = find_reg_note (src_insn, REG_EQUAL, 0); if (note) increment = XEXP (note, 0); } else if (GET_CODE (increment) == IOR || GET_CODE (increment) == PLUS || GET_CODE (increment) == ASHIFT || GET_CODE (increment) == LSHIFTRT) { /* The rs6000 port loads some constants with IOR. The alpha port loads some constants with ASHIFT and PLUS. The sparc64 port loads some constants with LSHIFTRT. */ rtx second_part = XEXP (increment, 1); enum rtx_code code = GET_CODE (increment); increment = find_last_value (XEXP (increment, 0), &src_insn, NULL_RTX, 0); /* Don't need the last insn anymore. */ delete_related_insns (get_last_insn ()); if (GET_CODE (second_part) != CONST_INT || GET_CODE (increment) != CONST_INT) abort (); if (code == IOR) increment = GEN_INT (INTVAL (increment) | INTVAL (second_part)); else if (code == PLUS) increment = GEN_INT (INTVAL (increment) + INTVAL (second_part)); else if (code == ASHIFT) increment = GEN_INT (INTVAL (increment) << INTVAL (second_part)); else increment = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (increment) >> INTVAL (second_part)); } if (GET_CODE (increment) != CONST_INT) abort (); /* The insn loading the constant into a register is no longer needed, so delete it. */ delete_related_insns (get_last_insn ()); } if (increment_total) increment_total = GEN_INT (INTVAL (increment_total) + INTVAL (increment)); else increment_total = increment; /* Check that the source register is the same as the register we expected to see as the source. If not, something is seriously wrong. */ if (!REG_P (XEXP (SET_SRC (pattern), 0)) || REGNO (XEXP (SET_SRC (pattern), 0)) != regno) { /* Some machines (e.g. the romp), may emit two add instructions for certain constants, so lets try looking for another add immediately before this one if we have only seen one add insn so far. */ if (tries == 0) { tries++; src_insn = PREV_INSN (src_insn); pattern = single_set (src_insn); delete_related_insns (get_last_insn ()); goto retry; } abort (); } return increment_total; } /* Copy REG_NOTES, except for insn references, because not all insn_map entries are valid yet. We do need to copy registers now though, because the reg_map entries can change during copying. */ static rtx initial_reg_note_copy (rtx notes, struct inline_remap *map) { rtx copy; if (notes == 0) return 0; copy = rtx_alloc (GET_CODE (notes)); PUT_REG_NOTE_KIND (copy, REG_NOTE_KIND (notes)); if (GET_CODE (notes) == EXPR_LIST) XEXP (copy, 0) = copy_rtx_and_substitute (XEXP (notes, 0), map, 0); else if (GET_CODE (notes) == INSN_LIST) /* Don't substitute for these yet. */ XEXP (copy, 0) = copy_rtx (XEXP (notes, 0)); else abort (); XEXP (copy, 1) = initial_reg_note_copy (XEXP (notes, 1), map); return copy; } /* Fixup insn references in copied REG_NOTES. */ static void final_reg_note_copy (rtx *notesp, struct inline_remap *map) { while (*notesp) { rtx note = *notesp; if (GET_CODE (note) == INSN_LIST) { rtx insn = map->insn_map[INSN_UID (XEXP (note, 0))]; /* If we failed to remap the note, something is awry. Allow REG_LABEL as it may reference label outside the unrolled loop. */ if (!insn) { if (REG_NOTE_KIND (note) != REG_LABEL) abort (); } else XEXP (note, 0) = insn; } notesp = &XEXP (note, 1); } } /* Copy each instruction in the loop, substituting from map as appropriate. This is very similar to a loop in expand_inline_function. */ static void copy_loop_body (struct loop *loop, rtx copy_start, rtx copy_end, struct inline_remap *map, rtx exit_label, int last_iteration, enum unroll_types unroll_type, rtx start_label, rtx loop_end, rtx insert_before, rtx copy_notes_from) { struct loop_ivs *ivs = LOOP_IVS (loop); rtx insn, pattern; rtx set, tem, copy = NULL_RTX; int dest_reg_was_split, i; #ifdef HAVE_cc0 rtx cc0_insn = 0; #endif rtx final_label = 0; rtx giv_inc, giv_dest_reg, giv_src_reg; /* If this isn't the last iteration, then map any references to the start_label to final_label. Final label will then be emitted immediately after the end of this loop body if it was ever used. If this is the last iteration, then map references to the start_label to itself. */ if (! last_iteration) { final_label = gen_label_rtx (); set_label_in_map (map, CODE_LABEL_NUMBER (start_label), final_label); } else set_label_in_map (map, CODE_LABEL_NUMBER (start_label), start_label); start_sequence (); insn = copy_start; do { insn = NEXT_INSN (insn); map->orig_asm_operands_vector = 0; switch (GET_CODE (insn)) { case INSN: pattern = PATTERN (insn); copy = 0; giv_inc = 0; /* Check to see if this is a giv that has been combined with some split address givs. (Combined in the sense that `combine_givs' in loop.c has put two givs in the same register.) In this case, we must search all givs based on the same biv to find the address givs. Then split the address givs. Do this before splitting the giv, since that may map the SET_DEST to a new register. */ if ((set = single_set (insn)) && REG_P (SET_DEST (set)) && addr_combined_regs[REGNO (SET_DEST (set))]) { struct iv_class *bl; struct induction *v, *tv; unsigned int regno = REGNO (SET_DEST (set)); v = addr_combined_regs[REGNO (SET_DEST (set))]; bl = REG_IV_CLASS (ivs, REGNO (v->src_reg)); /* Although the giv_inc amount is not needed here, we must call calculate_giv_inc here since it might try to delete the last insn emitted. If we wait until later to call it, we might accidentally delete insns generated immediately below by emit_unrolled_add. */ giv_inc = calculate_giv_inc (set, insn, regno); /* Now find all address giv's that were combined with this giv 'v'. */ for (tv = bl->giv; tv; tv = tv->next_iv) if (tv->giv_type == DEST_ADDR && tv->same == v) { int this_giv_inc; /* If this DEST_ADDR giv was not split, then ignore it. */ if (*tv->location != tv->dest_reg) continue; /* Scale this_giv_inc if the multiplicative factors of the two givs are different. */ this_giv_inc = INTVAL (giv_inc); if (tv->mult_val != v->mult_val) this_giv_inc = (this_giv_inc / INTVAL (v->mult_val) * INTVAL (tv->mult_val)); tv->dest_reg = plus_constant (tv->dest_reg, this_giv_inc); *tv->location = tv->dest_reg; if (last_iteration && unroll_type != UNROLL_COMPLETELY) { /* Must emit an insn to increment the split address giv. Add in the const_adjust field in case there was a constant eliminated from the address. */ rtx value, dest_reg; /* tv->dest_reg will be either a bare register, or else a register plus a constant. */ if (REG_P (tv->dest_reg)) dest_reg = tv->dest_reg; else dest_reg = XEXP (tv->dest_reg, 0); /* Check for shared address givs, and avoid incrementing the shared pseudo reg more than once. */ if (! tv->same_insn && ! tv->shared) { /* tv->dest_reg may actually be a (PLUS (REG) (CONST)) here, so we must call plus_constant to add the const_adjust amount before calling emit_unrolled_add below. */ value = plus_constant (tv->dest_reg, tv->const_adjust); if (GET_CODE (value) == PLUS) { /* The constant could be too large for an add immediate, so can't directly emit an insn here. */ emit_unrolled_add (dest_reg, XEXP (value, 0), XEXP (value, 1)); } } /* Reset the giv to be just the register again, in case it is used after the set we have just emitted. We must subtract the const_adjust factor added in above. */ tv->dest_reg = plus_constant (dest_reg, -tv->const_adjust); *tv->location = tv->dest_reg; } } } /* If this is a setting of a splittable variable, then determine how to split the variable, create a new set based on this split, and set up the reg_map so that later uses of the variable will use the new split variable. */ dest_reg_was_split = 0; if ((set = single_set (insn)) && REG_P (SET_DEST (set)) && splittable_regs[REGNO (SET_DEST (set))]) { unsigned int regno = REGNO (SET_DEST (set)); unsigned int src_regno; dest_reg_was_split = 1; giv_dest_reg = SET_DEST (set); giv_src_reg = giv_dest_reg; /* Compute the increment value for the giv, if it wasn't already computed above. */ if (giv_inc == 0) giv_inc = calculate_giv_inc (set, insn, regno); src_regno = REGNO (giv_src_reg); if (unroll_type == UNROLL_COMPLETELY) { /* Completely unrolling the loop. Set the induction variable to a known constant value. */ /* The value in splittable_regs may be an invariant value, so we must use plus_constant here. */ splittable_regs[regno] = plus_constant (splittable_regs[src_regno], INTVAL (giv_inc)); if (GET_CODE (splittable_regs[regno]) == PLUS) { giv_src_reg = XEXP (splittable_regs[regno], 0); giv_inc = XEXP (splittable_regs[regno], 1); } else { /* The splittable_regs value must be a REG or a CONST_INT, so put the entire value in the giv_src_reg variable. */ giv_src_reg = splittable_regs[regno]; giv_inc = const0_rtx; } } else { /* Partially unrolling loop. Create a new pseudo register for the iteration variable, and set it to be a constant plus the original register. Except on the last iteration, when the result has to go back into the original iteration var register. */ /* Handle bivs which must be mapped to a new register when split. This happens for bivs which need their final value set before loop entry. The new register for the biv was stored in the biv's first struct induction entry by find_splittable_regs. */ if (regno < ivs->n_regs && REG_IV_TYPE (ivs, regno) == BASIC_INDUCT) { giv_src_reg = REG_IV_CLASS (ivs, regno)->biv->src_reg; giv_dest_reg = giv_src_reg; } #if 0 /* If non-reduced/final-value givs were split, then this would have to remap those givs also. See find_splittable_regs. */ #endif splittable_regs[regno] = simplify_gen_binary (PLUS, GET_MODE (giv_src_reg), giv_inc, splittable_regs[src_regno]); giv_inc = splittable_regs[regno]; /* Now split the induction variable by changing the dest of this insn to a new register, and setting its reg_map entry to point to this new register. If this is the last iteration, and this is the last insn that will update the iv, then reuse the original dest, to ensure that the iv will have the proper value when the loop exits or repeats. Using splittable_regs_updates here like this is safe, because it can only be greater than one if all instructions modifying the iv are always executed in order. */ if (! last_iteration || (splittable_regs_updates[regno]-- != 1)) { tem = gen_reg_rtx (GET_MODE (giv_src_reg)); giv_dest_reg = tem; map->reg_map[regno] = tem; record_base_value (REGNO (tem), giv_inc == const0_rtx ? giv_src_reg : gen_rtx_PLUS (GET_MODE (giv_src_reg), giv_src_reg, giv_inc), 1); } else map->reg_map[regno] = giv_src_reg; } /* The constant being added could be too large for an add immediate, so can't directly emit an insn here. */ emit_unrolled_add (giv_dest_reg, giv_src_reg, giv_inc); copy = get_last_insn (); pattern = PATTERN (copy); } else { pattern = copy_rtx_and_substitute (pattern, map, 0); copy = emit_insn (pattern); } REG_NOTES (copy) = initial_reg_note_copy (REG_NOTES (insn), map); INSN_LOCATOR (copy) = INSN_LOCATOR (insn); /* If there is a REG_EQUAL note present whose value is not loop invariant, then delete it, since it may cause problems with later optimization passes. */ if ((tem = find_reg_note (copy, REG_EQUAL, NULL_RTX)) && !loop_invariant_p (loop, XEXP (tem, 0))) remove_note (copy, tem); #ifdef HAVE_cc0 /* If this insn is setting CC0, it may need to look at the insn that uses CC0 to see what type of insn it is. In that case, the call to recog via validate_change will fail. So don't substitute constants here. Instead, do it when we emit the following insn. For example, see the pyr.md file. That machine has signed and unsigned compares. The compare patterns must check the following branch insn to see which what kind of compare to emit. If the previous insn set CC0, substitute constants on it as well. */ if (sets_cc0_p (PATTERN (copy)) != 0) cc0_insn = copy; else { if (cc0_insn) try_constants (cc0_insn, map); cc0_insn = 0; try_constants (copy, map); } #else try_constants (copy, map); #endif /* Make split induction variable constants `permanent' since we know there are no backward branches across iteration variable settings which would invalidate this. */ if (dest_reg_was_split) { int regno = REGNO (SET_DEST (set)); if ((size_t) regno < VARRAY_SIZE (map->const_equiv_varray) && (VARRAY_CONST_EQUIV (map->const_equiv_varray, regno).age == map->const_age)) VARRAY_CONST_EQUIV (map->const_equiv_varray, regno).age = -1; } break; case JUMP_INSN: pattern = copy_rtx_and_substitute (PATTERN (insn), map, 0); copy = emit_jump_insn (pattern); REG_NOTES (copy) = initial_reg_note_copy (REG_NOTES (insn), map); INSN_LOCATOR (copy) = INSN_LOCATOR (insn); if (JUMP_LABEL (insn)) { JUMP_LABEL (copy) = get_label_from_map (map, CODE_LABEL_NUMBER (JUMP_LABEL (insn))); LABEL_NUSES (JUMP_LABEL (copy))++; } if (JUMP_LABEL (insn) == start_label && insn == copy_end && ! last_iteration) { /* This is a branch to the beginning of the loop; this is the last insn being copied; and this is not the last iteration. In this case, we want to change the original fall through case to be a branch past the end of the loop, and the original jump label case to fall_through. */ if (!invert_jump (copy, exit_label, 0)) { rtx jmp; rtx lab = gen_label_rtx (); /* Can't do it by reversing the jump (probably because we couldn't reverse the conditions), so emit a new jump_insn after COPY, and redirect the jump around that. */ jmp = emit_jump_insn_after (gen_jump (exit_label), copy); JUMP_LABEL (jmp) = exit_label; LABEL_NUSES (exit_label)++; jmp = emit_barrier_after (jmp); emit_label_after (lab, jmp); LABEL_NUSES (lab) = 0; if (!redirect_jump (copy, lab, 0)) abort (); } } #ifdef HAVE_cc0 if (cc0_insn) try_constants (cc0_insn, map); cc0_insn = 0; #endif try_constants (copy, map); /* Set the jump label of COPY correctly to avoid problems with later passes of unroll_loop, if INSN had jump label set. */ if (JUMP_LABEL (insn)) { rtx label = 0; /* Can't use the label_map for every insn, since this may be the backward branch, and hence the label was not mapped. */ if ((set = single_set (copy))) { tem = SET_SRC (set); if (GET_CODE (tem) == LABEL_REF) label = XEXP (tem, 0); else if (GET_CODE (tem) == IF_THEN_ELSE) { if (XEXP (tem, 1) != pc_rtx) label = XEXP (XEXP (tem, 1), 0); else label = XEXP (XEXP (tem, 2), 0); } } if (label && GET_CODE (label) == CODE_LABEL) JUMP_LABEL (copy) = label; else { /* An unrecognizable jump insn, probably the entry jump for a switch statement. This label must have been mapped, so just use the label_map to get the new jump label. */ JUMP_LABEL (copy) = get_label_from_map (map, CODE_LABEL_NUMBER (JUMP_LABEL (insn))); } /* If this is a non-local jump, then must increase the label use count so that the label will not be deleted when the original jump is deleted. */ LABEL_NUSES (JUMP_LABEL (copy))++; } else if (GET_CODE (PATTERN (copy)) == ADDR_VEC || GET_CODE (PATTERN (copy)) == ADDR_DIFF_VEC) { rtx pat = PATTERN (copy); int diff_vec_p = GET_CODE (pat) == ADDR_DIFF_VEC; int len = XVECLEN (pat, diff_vec_p); int i; for (i = 0; i < len; i++) LABEL_NUSES (XEXP (XVECEXP (pat, diff_vec_p, i), 0))++; } /* If this used to be a conditional jump insn but whose branch direction is now known, we must do something special. */ if (any_condjump_p (insn) && onlyjump_p (insn) && map->last_pc_value) { #ifdef HAVE_cc0 /* If the previous insn set cc0 for us, delete it. */ if (only_sets_cc0_p (PREV_INSN (copy))) delete_related_insns (PREV_INSN (copy)); #endif /* If this is now a no-op, delete it. */ if (map->last_pc_value == pc_rtx) { delete_insn (copy); copy = 0; } else /* Otherwise, this is unconditional jump so we must put a BARRIER after it. We could do some dead code elimination here, but jump.c will do it just as well. */ emit_barrier (); } break; case CALL_INSN: pattern = copy_rtx_and_substitute (PATTERN (insn), map, 0); copy = emit_call_insn (pattern); REG_NOTES (copy) = initial_reg_note_copy (REG_NOTES (insn), map); INSN_LOCATOR (copy) = INSN_LOCATOR (insn); SIBLING_CALL_P (copy) = SIBLING_CALL_P (insn); CONST_OR_PURE_CALL_P (copy) = CONST_OR_PURE_CALL_P (insn); /* Because the USAGE information potentially contains objects other than hard registers, we need to copy it. */ CALL_INSN_FUNCTION_USAGE (copy) = copy_rtx_and_substitute (CALL_INSN_FUNCTION_USAGE (insn), map, 0); #ifdef HAVE_cc0 if (cc0_insn) try_constants (cc0_insn, map); cc0_insn = 0; #endif try_constants (copy, map); /* Be lazy and assume CALL_INSNs clobber all hard registers. */ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) VARRAY_CONST_EQUIV (map->const_equiv_varray, i).rtx = 0; break; case CODE_LABEL: /* If this is the loop start label, then we don't need to emit a copy of this label since no one will use it. */ if (insn != start_label) { copy = emit_label (get_label_from_map (map, CODE_LABEL_NUMBER (insn))); map->const_age++; } break; case BARRIER: copy = emit_barrier (); break; case NOTE: /* VTOP and CONT notes are valid only before the loop exit test. If placed anywhere else, loop may generate bad code. */ /* BASIC_BLOCK notes exist to stabilize basic block structures with the associated rtl. We do not want to share the structure in this new block. */ if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_DELETED && NOTE_LINE_NUMBER (insn) != NOTE_INSN_DELETED_LABEL && NOTE_LINE_NUMBER (insn) != NOTE_INSN_BASIC_BLOCK && ((NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_VTOP && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_CONT) || (last_iteration && unroll_type != UNROLL_COMPLETELY))) copy = emit_note_copy (insn); else copy = 0; break; default: abort (); } map->insn_map[INSN_UID (insn)] = copy; } while (insn != copy_end); /* Now finish coping the REG_NOTES. */ insn = copy_start; do { insn = NEXT_INSN (insn); if ((GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN || GET_CODE (insn) == CALL_INSN) && map->insn_map[INSN_UID (insn)]) final_reg_note_copy (®_NOTES (map->insn_map[INSN_UID (insn)]), map); } while (insn != copy_end); /* There may be notes between copy_notes_from and loop_end. Emit a copy of each of these notes here, since there may be some important ones, such as NOTE_INSN_BLOCK_END notes, in this group. We don't do this on the last iteration, because the original notes won't be deleted. We can't use insert_before here, because when from preconditioning, insert_before points before the loop. We can't use copy_end, because there may be insns already inserted after it (which we don't want to copy) when not from preconditioning code. */ if (! last_iteration) { for (insn = copy_notes_from; insn != loop_end; insn = NEXT_INSN (insn)) { /* VTOP notes are valid only before the loop exit test. If placed anywhere else, loop may generate bad code. Although COPY_NOTES_FROM will be at most one or two (for cc0) instructions before the last insn in the loop, COPY_NOTES_FROM can be a NOTE_INSN_LOOP_CONT note if there is no VTOP note, as in a do .. while loop. */ if (GET_CODE (insn) == NOTE && ((NOTE_LINE_NUMBER (insn) != NOTE_INSN_DELETED && NOTE_LINE_NUMBER (insn) != NOTE_INSN_BASIC_BLOCK && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_VTOP && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_CONT))) emit_note_copy (insn); } } if (final_label && LABEL_NUSES (final_label) > 0) emit_label (final_label); tem = get_insns (); end_sequence (); loop_insn_emit_before (loop, 0, insert_before, tem); } /* Emit an insn, using the expand_binop to ensure that a valid insn is emitted. This will correctly handle the case where the increment value won't fit in the immediate field of a PLUS insns. */ void emit_unrolled_add (rtx dest_reg, rtx src_reg, rtx increment) { rtx result; result = expand_simple_binop (GET_MODE (dest_reg), PLUS, src_reg, increment, dest_reg, 0, OPTAB_LIB_WIDEN); if (dest_reg != result) emit_move_insn (dest_reg, result); } /* Searches the insns between INSN and LOOP->END. Returns 1 if there is a backward branch in that range that branches to somewhere between LOOP->START and INSN. Returns 0 otherwise. */ /* ??? This is quadratic algorithm. Could be rewritten to be linear. In practice, this is not a problem, because this function is seldom called, and uses a negligible amount of CPU time on average. */ int back_branch_in_range_p (const struct loop *loop, rtx insn) { rtx p, q, target_insn; rtx loop_start = loop->start; rtx loop_end = loop->end; rtx orig_loop_end = loop->end; /* Stop before we get to the backward branch at the end of the loop. */ loop_end = prev_nonnote_insn (loop_end); if (GET_CODE (loop_end) == BARRIER) loop_end = PREV_INSN (loop_end); /* Check in case insn has been deleted, search forward for first non deleted insn following it. */ while (INSN_DELETED_P (insn)) insn = NEXT_INSN (insn); /* Check for the case where insn is the last insn in the loop. Deal with the case where INSN was a deleted loop test insn, in which case it will now be the NOTE_LOOP_END. */ if (insn == loop_end || insn == orig_loop_end) return 0; for (p = NEXT_INSN (insn); p != loop_end; p = NEXT_INSN (p)) { if (GET_CODE (p) == JUMP_INSN) { target_insn = JUMP_LABEL (p); /* Search from loop_start to insn, to see if one of them is the target_insn. We can't use LOOP_INSN_LUID comparisons here, since insn may not have an LUID entry. */ for (q = loop_start; q != insn; q = NEXT_INSN (q)) if (q == target_insn) return 1; } } return 0; } /* Try to generate the simplest rtx for the expression (PLUS (MULT mult1 mult2) add1). This is used to calculate the initial value of giv's. */ static rtx fold_rtx_mult_add (rtx mult1, rtx mult2, rtx add1, enum machine_mode mode) { rtx temp, mult_res; rtx result; /* The modes must all be the same. This should always be true. For now, check to make sure. */ if ((GET_MODE (mult1) != mode && GET_MODE (mult1) != VOIDmode) || (GET_MODE (mult2) != mode && GET_MODE (mult2) != VOIDmode) || (GET_MODE (add1) != mode && GET_MODE (add1) != VOIDmode)) abort (); /* Ensure that if at least one of mult1/mult2 are constant, then mult2 will be a constant. */ if (GET_CODE (mult1) == CONST_INT) { temp = mult2; mult2 = mult1; mult1 = temp; } mult_res = simplify_binary_operation (MULT, mode, mult1, mult2); if (! mult_res) mult_res = gen_rtx_MULT (mode, mult1, mult2); /* Again, put the constant second. */ if (GET_CODE (add1) == CONST_INT) { temp = add1; add1 = mult_res; mult_res = temp; } result = simplify_binary_operation (PLUS, mode, add1, mult_res); if (! result) result = gen_rtx_PLUS (mode, add1, mult_res); return result; } /* Searches the list of induction struct's for the biv BL, to try to calculate the total increment value for one iteration of the loop as a constant. Returns the increment value as an rtx, simplified as much as possible, if it can be calculated. Otherwise, returns 0. */ rtx biv_total_increment (const struct iv_class *bl) { struct induction *v; rtx result; /* For increment, must check every instruction that sets it. Each instruction must be executed only once each time through the loop. To verify this, we check that the insn is always executed, and that there are no backward branches after the insn that branch to before it. Also, the insn must have a mult_val of one (to make sure it really is an increment). */ result = const0_rtx; for (v = bl->biv; v; v = v->next_iv) { if (v->always_computable && v->mult_val == const1_rtx && ! v->maybe_multiple && SCALAR_INT_MODE_P (v->mode)) { /* If we have already counted it, skip it. */ if (v->same) continue; result = fold_rtx_mult_add (result, const1_rtx, v->add_val, v->mode); } else return 0; } return result; } /* For each biv and giv, determine whether it can be safely split into a different variable for each unrolled copy of the loop body. If it is safe to split, then indicate that by saving some useful info in the splittable_regs array. If the loop is being completely unrolled, then splittable_regs will hold the current value of the induction variable while the loop is unrolled. It must be set to the initial value of the induction variable here. Otherwise, splittable_regs will hold the difference between the current value of the induction variable and the value the induction variable had at the top of the loop. It must be set to the value 0 here. Returns the total number of instructions that set registers that are splittable. */ /* ?? If the loop is only unrolled twice, then most of the restrictions to constant values are unnecessary, since we can easily calculate increment values in this case even if nothing is constant. The increment value should not involve a multiply however. */ /* ?? Even if the biv/giv increment values aren't constant, it may still be beneficial to split the variable if the loop is only unrolled a few times, since multiplies by small integers (1,2,3,4) are very cheap. */ static int find_splittable_regs (const struct loop *loop, enum unroll_types unroll_type, int unroll_number) { struct loop_ivs *ivs = LOOP_IVS (loop); struct iv_class *bl; struct induction *v; rtx increment, tem; rtx biv_final_value; int biv_splittable; int result = 0; for (bl = ivs->list; bl; bl = bl->next) { /* Biv_total_increment must return a constant value, otherwise we can not calculate the split values. */ increment = biv_total_increment (bl); if (! increment || GET_CODE (increment) != CONST_INT) continue; /* The loop must be unrolled completely, or else have a known number of iterations and only one exit, or else the biv must be dead outside the loop, or else the final value must be known. Otherwise, it is unsafe to split the biv since it may not have the proper value on loop exit. */ /* loop_number_exit_count is nonzero if the loop has an exit other than a fall through at the end. */ biv_splittable = 1; biv_final_value = 0; if (unroll_type != UNROLL_COMPLETELY && (loop->exit_count || unroll_type == UNROLL_NAIVE) && (REGNO_LAST_LUID (bl->regno) >= LOOP_INSN_LUID (loop->end) || ! bl->init_insn || INSN_UID (bl->init_insn) >= max_uid_for_loop || (REGNO_FIRST_LUID (bl->regno) < LOOP_INSN_LUID (bl->init_insn)) || reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set))) && ! (biv_final_value = final_biv_value (loop, bl))) biv_splittable = 0; /* If any of the insns setting the BIV don't do so with a simple PLUS, we don't know how to split it. */ for (v = bl->biv; biv_splittable && v; v = v->next_iv) if ((tem = single_set (v->insn)) == 0 || !REG_P (SET_DEST (tem)) || REGNO (SET_DEST (tem)) != bl->regno || GET_CODE (SET_SRC (tem)) != PLUS) biv_splittable = 0; /* If final value is nonzero, then must emit an instruction which sets the value of the biv to the proper value. This is done after handling all of the givs, since some of them may need to use the biv's value in their initialization code. */ /* This biv is splittable. If completely unrolling the loop, save the biv's initial value. Otherwise, save the constant zero. */ if (biv_splittable == 1) { if (unroll_type == UNROLL_COMPLETELY) { /* If the initial value of the biv is itself (i.e. it is too complicated for strength_reduce to compute), or is a hard register, or it isn't invariant, then we must create a new pseudo reg to hold the initial value of the biv. */ if (REG_P (bl->initial_value) && (REGNO (bl->initial_value) == bl->regno || REGNO (bl->initial_value) < FIRST_PSEUDO_REGISTER || ! loop_invariant_p (loop, bl->initial_value))) { rtx tem = gen_reg_rtx (bl->biv->mode); record_base_value (REGNO (tem), bl->biv->add_val, 0); loop_insn_hoist (loop, gen_move_insn (tem, bl->biv->src_reg)); if (loop_dump_stream) fprintf (loop_dump_stream, "Biv %d initial value remapped to %d.\n", bl->regno, REGNO (tem)); splittable_regs[bl->regno] = tem; } else splittable_regs[bl->regno] = bl->initial_value; } else splittable_regs[bl->regno] = const0_rtx; /* Save the number of instructions that modify the biv, so that we can treat the last one specially. */ splittable_regs_updates[bl->regno] = bl->biv_count; result += bl->biv_count; if (loop_dump_stream) fprintf (loop_dump_stream, "Biv %d safe to split.\n", bl->regno); } /* Check every giv that depends on this biv to see whether it is splittable also. Even if the biv isn't splittable, givs which depend on it may be splittable if the biv is live outside the loop, and the givs aren't. */ result += find_splittable_givs (loop, bl, unroll_type, increment, unroll_number); /* If final value is nonzero, then must emit an instruction which sets the value of the biv to the proper value. This is done after handling all of the givs, since some of them may need to use the biv's value in their initialization code. */ if (biv_final_value) { /* If the loop has multiple exits, emit the insns before the loop to ensure that it will always be executed no matter how the loop exits. Otherwise emit the insn after the loop, since this is slightly more efficient. */ if (! loop->exit_count) loop_insn_sink (loop, gen_move_insn (bl->biv->src_reg, biv_final_value)); else { /* Create a new register to hold the value of the biv, and then set the biv to its final value before the loop start. The biv is set to its final value before loop start to ensure that this insn will always be executed, no matter how the loop exits. */ rtx tem = gen_reg_rtx (bl->biv->mode); record_base_value (REGNO (tem), bl->biv->add_val, 0); loop_insn_hoist (loop, gen_move_insn (tem, bl->biv->src_reg)); loop_insn_hoist (loop, gen_move_insn (bl->biv->src_reg, biv_final_value)); if (loop_dump_stream) fprintf (loop_dump_stream, "Biv %d mapped to %d for split.\n", REGNO (bl->biv->src_reg), REGNO (tem)); /* Set up the mapping from the original biv register to the new register. */ bl->biv->src_reg = tem; } } } return result; } /* For every giv based on the biv BL, check to determine whether it is splittable. This is a subroutine to find_splittable_regs (). Return the number of instructions that set splittable registers. */ static int find_splittable_givs (const struct loop *loop, struct iv_class *bl, enum unroll_types unroll_type, rtx increment, int unroll_number ATTRIBUTE_UNUSED) { struct loop_ivs *ivs = LOOP_IVS (loop); struct induction *v, *v2; rtx final_value; rtx tem; int result = 0; /* Scan the list of givs, and set the same_insn field when there are multiple identical givs in the same insn. */ for (v = bl->giv; v; v = v->next_iv) for (v2 = v->next_iv; v2; v2 = v2->next_iv) if (v->insn == v2->insn && rtx_equal_p (v->new_reg, v2->new_reg) && ! v2->same_insn) v2->same_insn = v; for (v = bl->giv; v; v = v->next_iv) { rtx giv_inc, value; /* Only split the giv if it has already been reduced, or if the loop is being completely unrolled. */ if (unroll_type != UNROLL_COMPLETELY && v->ignore) continue; /* The giv can be split if the insn that sets the giv is executed once and only once on every iteration of the loop. */ /* An address giv can always be split. v->insn is just a use not a set, and hence it does not matter whether it is always executed. All that matters is that all the biv increments are always executed, and we won't reach here if they aren't. */ if (v->giv_type != DEST_ADDR && (! v->always_computable || back_branch_in_range_p (loop, v->insn))) continue; /* The giv increment value must be a constant. */ giv_inc = fold_rtx_mult_add (v->mult_val, increment, const0_rtx, v->mode); if (! giv_inc || GET_CODE (giv_inc) != CONST_INT) continue; /* The loop must be unrolled completely, or else have a known number of iterations and only one exit, or else the giv must be dead outside the loop, or else the final value of the giv must be known. Otherwise, it is not safe to split the giv since it may not have the proper value on loop exit. */ /* The used outside loop test will fail for DEST_ADDR givs. They are never used outside the loop anyways, so it is always safe to split a DEST_ADDR giv. */ final_value = 0; if (unroll_type != UNROLL_COMPLETELY && (loop->exit_count || unroll_type == UNROLL_NAIVE) && v->giv_type != DEST_ADDR /* The next part is true if the pseudo is used outside the loop. We assume that this is true for any pseudo created after loop starts, because we don't have a reg_n_info entry for them. */ && (REGNO (v->dest_reg) >= max_reg_before_loop || (REGNO_FIRST_UID (REGNO (v->dest_reg)) != INSN_UID (v->insn) /* Check for the case where the pseudo is set by a shift/add sequence, in which case the first insn setting the pseudo is the first insn of the shift/add sequence. */ && (! (tem = find_reg_note (v->insn, REG_RETVAL, NULL_RTX)) || (REGNO_FIRST_UID (REGNO (v->dest_reg)) != INSN_UID (XEXP (tem, 0))))) /* Line above always fails if INSN was moved by loop opt. */ || (REGNO_LAST_LUID (REGNO (v->dest_reg)) >= LOOP_INSN_LUID (loop->end))) && ! (final_value = v->final_value)) continue; #if 0 /* Currently, non-reduced/final-value givs are never split. */ /* Should emit insns after the loop if possible, as the biv final value code below does. */ /* If the final value is nonzero, and the giv has not been reduced, then must emit an instruction to set the final value. */ if (final_value && !v->new_reg) { /* Create a new register to hold the value of the giv, and then set the giv to its final value before the loop start. The giv is set to its final value before loop start to ensure that this insn will always be executed, no matter how we exit. */ tem = gen_reg_rtx (v->mode); loop_insn_hoist (loop, gen_move_insn (tem, v->dest_reg)); loop_insn_hoist (loop, gen_move_insn (v->dest_reg, final_value)); if (loop_dump_stream) fprintf (loop_dump_stream, "Giv %d mapped to %d for split.\n", REGNO (v->dest_reg), REGNO (tem)); v->src_reg = tem; } #endif /* This giv is splittable. If completely unrolling the loop, save the giv's initial value. Otherwise, save the constant zero for it. */ if (unroll_type == UNROLL_COMPLETELY) { /* It is not safe to use bl->initial_value here, because it may not be invariant. It is safe to use the initial value stored in the splittable_regs array if it is set. In rare cases, it won't be set, so then we do exactly the same thing as find_splittable_regs does to get a safe value. */ rtx biv_initial_value; if (splittable_regs[bl->regno]) biv_initial_value = splittable_regs[bl->regno]; else if (!REG_P (bl->initial_value) || (REGNO (bl->initial_value) != bl->regno && REGNO (bl->initial_value) >= FIRST_PSEUDO_REGISTER)) biv_initial_value = bl->initial_value; else { rtx tem = gen_reg_rtx (bl->biv->mode); record_base_value (REGNO (tem), bl->biv->add_val, 0); loop_insn_hoist (loop, gen_move_insn (tem, bl->biv->src_reg)); biv_initial_value = tem; } biv_initial_value = extend_value_for_giv (v, biv_initial_value); value = fold_rtx_mult_add (v->mult_val, biv_initial_value, v->add_val, v->mode); } else value = const0_rtx; if (v->new_reg) { /* If a giv was combined with another giv, then we can only split this giv if the giv it was combined with was reduced. This is because the value of v->new_reg is meaningless in this case. */ if (v->same && ! v->same->new_reg) { if (loop_dump_stream) fprintf (loop_dump_stream, "giv combined with unreduced giv not split.\n"); continue; } /* If the giv is an address destination, it could be something other than a simple register, these have to be treated differently. */ else if (v->giv_type == DEST_REG) { /* If value is not a constant, register, or register plus constant, then compute its value into a register before loop start. This prevents invalid rtx sharing, and should generate better code. We can use bl->initial_value here instead of splittable_regs[bl->regno] because this code is going before the loop start. */ if (unroll_type == UNROLL_COMPLETELY && GET_CODE (value) != CONST_INT && !REG_P (value) && (GET_CODE (value) != PLUS || !REG_P (XEXP (value, 0)) || GET_CODE (XEXP (value, 1)) != CONST_INT)) { rtx tem = gen_reg_rtx (v->mode); record_base_value (REGNO (tem), v->add_val, 0); loop_iv_add_mult_hoist (loop, extend_value_for_giv (v, bl->initial_value), v->mult_val, v->add_val, tem); value = tem; } splittable_regs[reg_or_subregno (v->new_reg)] = value; } else continue; } else { #if 0 /* Currently, unreduced giv's can't be split. This is not too much of a problem since unreduced giv's are not live across loop iterations anyways. When unrolling a loop completely though, it makes sense to reduce&split givs when possible, as this will result in simpler instructions, and will not require that a reg be live across loop iterations. */ splittable_regs[REGNO (v->dest_reg)] = value; fprintf (stderr, "Giv %d at insn %d not reduced\n", REGNO (v->dest_reg), INSN_UID (v->insn)); #else continue; #endif } /* Unreduced givs are only updated once by definition. Reduced givs are updated as many times as their biv is. Mark it so if this is a splittable register. Don't need to do anything for address givs where this may not be a register. */ if (REG_P (v->new_reg)) { int count = 1; if (! v->ignore) count = REG_IV_CLASS (ivs, REGNO (v->src_reg))->biv_count; splittable_regs_updates[reg_or_subregno (v->new_reg)] = count; } result++; if (loop_dump_stream) { int regnum; if (GET_CODE (v->dest_reg) == CONST_INT) regnum = -1; else if (!REG_P (v->dest_reg)) regnum = REGNO (XEXP (v->dest_reg, 0)); else regnum = REGNO (v->dest_reg); fprintf (loop_dump_stream, "Giv %d at insn %d safe to split.\n", regnum, INSN_UID (v->insn)); } } return result; } /* Try to prove that the register is dead after the loop exits. Trace every loop exit looking for an insn that will always be executed, which sets the register to some value, and appears before the first use of the register is found. If successful, then return 1, otherwise return 0. */ /* ?? Could be made more intelligent in the handling of jumps, so that it can search past if statements and other similar structures. */ static int reg_dead_after_loop (const struct loop *loop, rtx reg) { rtx insn, label; int jump_count = 0; int label_count = 0; /* In addition to checking all exits of this loop, we must also check all exits of inner nested loops that would exit this loop. We don't have any way to identify those, so we just give up if there are any such inner loop exits. */ for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label)) label_count++; if (label_count != loop->exit_count) return 0; /* HACK: Must also search the loop fall through exit, create a label_ref here which points to the loop->end, and append the loop_number_exit_labels list to it. */ label = gen_rtx_LABEL_REF (VOIDmode, loop->end); LABEL_NEXTREF (label) = loop->exit_labels; for (; label; label = LABEL_NEXTREF (label)) { /* Succeed if find an insn which sets the biv or if reach end of function. Fail if find an insn that uses the biv, or if come to a conditional jump. */ insn = NEXT_INSN (XEXP (label, 0)); while (insn) { if (INSN_P (insn)) { rtx set, note; if (reg_referenced_p (reg, PATTERN (insn))) return 0; note = find_reg_equal_equiv_note (insn); if (note && reg_overlap_mentioned_p (reg, XEXP (note, 0))) return 0; set = single_set (insn); if (set && rtx_equal_p (SET_DEST (set), reg)) break; if (GET_CODE (insn) == JUMP_INSN) { if (GET_CODE (PATTERN (insn)) == RETURN) break; else if (!any_uncondjump_p (insn) /* Prevent infinite loop following infinite loops. */ || jump_count++ > 20) return 0; else insn = JUMP_LABEL (insn); } } insn = NEXT_INSN (insn); } } /* Success, the register is dead on all loop exits. */ return 1; } /* Try to calculate the final value of the biv, the value it will have at the end of the loop. If we can do it, return that value. */ rtx final_biv_value (const struct loop *loop, struct iv_class *bl) { unsigned HOST_WIDE_INT n_iterations = LOOP_INFO (loop)->n_iterations; rtx increment, tem; /* ??? This only works for MODE_INT biv's. Reject all others for now. */ if (GET_MODE_CLASS (bl->biv->mode) != MODE_INT) return 0; /* The final value for reversed bivs must be calculated differently than for ordinary bivs. In this case, there is already an insn after the loop which sets this biv's final value (if necessary), and there are no other loop exits, so we can return any value. */ if (bl->reversed) { if (loop_dump_stream) fprintf (loop_dump_stream, "Final biv value for %d, reversed biv.\n", bl->regno); return const0_rtx; } /* Try to calculate the final value as initial value + (number of iterations * increment). For this to work, increment must be invariant, the only exit from the loop must be the fall through at the bottom (otherwise it may not have its final value when the loop exits), and the initial value of the biv must be invariant. */ if (n_iterations != 0 && ! loop->exit_count && loop_invariant_p (loop, bl->initial_value)) { increment = biv_total_increment (bl); if (increment && loop_invariant_p (loop, increment)) { /* Can calculate the loop exit value, emit insns after loop end to calculate this value into a temporary register in case it is needed later. */ tem = gen_reg_rtx (bl->biv->mode); record_base_value (REGNO (tem), bl->biv->add_val, 0); loop_iv_add_mult_sink (loop, increment, GEN_INT (n_iterations), bl->initial_value, tem); if (loop_dump_stream) fprintf (loop_dump_stream, "Final biv value for %d, calculated.\n", bl->regno); return tem; } } /* Check to see if the biv is dead at all loop exits. */ if (reg_dead_after_loop (loop, bl->biv->src_reg)) { if (loop_dump_stream) fprintf (loop_dump_stream, "Final biv value for %d, biv dead after loop exit.\n", bl->regno); return const0_rtx; } return 0; } /* Try to calculate the final value of the giv, the value it will have at the end of the loop. If we can do it, return that value. */ rtx final_giv_value (const struct loop *loop, struct induction *v) { struct loop_ivs *ivs = LOOP_IVS (loop); struct iv_class *bl; rtx insn; rtx increment, tem; rtx seq; rtx loop_end = loop->end; unsigned HOST_WIDE_INT n_iterations = LOOP_INFO (loop)->n_iterations; bl = REG_IV_CLASS (ivs, REGNO (v->src_reg)); /* The final value for givs which depend on reversed bivs must be calculated differently than for ordinary givs. In this case, there is already an insn after the loop which sets this giv's final value (if necessary), and there are no other loop exits, so we can return any value. */ if (bl->reversed) { if (loop_dump_stream) fprintf (loop_dump_stream, "Final giv value for %d, depends on reversed biv\n", REGNO (v->dest_reg)); return const0_rtx; } /* Try to calculate the final value as a function of the biv it depends upon. The only exit from the loop must be the fall through at the bottom and the insn that sets the giv must be executed on every iteration (otherwise the giv may not have its final value when the loop exits). */ /* ??? Can calculate the final giv value by subtracting off the extra biv increments times the giv's mult_val. The loop must have only one exit for this to work, but the loop iterations does not need to be known. */ if (n_iterations != 0 && ! loop->exit_count && v->always_executed) { /* ?? It is tempting to use the biv's value here since these insns will be put after the loop, and hence the biv will have its final value then. However, this fails if the biv is subsequently eliminated. Perhaps determine whether biv's are eliminable before trying to determine whether giv's are replaceable so that we can use the biv value here if it is not eliminable. */ /* We are emitting code after the end of the loop, so we must make sure that bl->initial_value is still valid then. It will still be valid if it is invariant. */ increment = biv_total_increment (bl); if (increment && loop_invariant_p (loop, increment) && loop_invariant_p (loop, bl->initial_value)) { /* Can calculate the loop exit value of its biv as (n_iterations * increment) + initial_value */ /* The loop exit value of the giv is then (final_biv_value - extra increments) * mult_val + add_val. The extra increments are any increments to the biv which occur in the loop after the giv's value is calculated. We must search from the insn that sets the giv to the end of the loop to calculate this value. */ /* Put the final biv value in tem. */ tem = gen_reg_rtx (v->mode); record_base_value (REGNO (tem), bl->biv->add_val, 0); loop_iv_add_mult_sink (loop, extend_value_for_giv (v, increment), GEN_INT (n_iterations), extend_value_for_giv (v, bl->initial_value), tem); /* Subtract off extra increments as we find them. */ for (insn = NEXT_INSN (v->insn); insn != loop_end; insn = NEXT_INSN (insn)) { struct induction *biv; for (biv = bl->biv; biv; biv = biv->next_iv) if (biv->insn == insn) { start_sequence (); tem = expand_simple_binop (GET_MODE (tem), MINUS, tem, biv->add_val, NULL_RTX, 0, OPTAB_LIB_WIDEN); seq = get_insns (); end_sequence (); loop_insn_sink (loop, seq); } } /* Now calculate the giv's final value. */ loop_iv_add_mult_sink (loop, tem, v->mult_val, v->add_val, tem); if (loop_dump_stream) fprintf (loop_dump_stream, "Final giv value for %d, calc from biv's value.\n", REGNO (v->dest_reg)); return tem; } } /* Replaceable giv's should never reach here. */ if (v->replaceable) abort (); /* Check to see if the biv is dead at all loop exits. */ if (reg_dead_after_loop (loop, v->dest_reg)) { if (loop_dump_stream) fprintf (loop_dump_stream, "Final giv value for %d, giv dead after loop exit.\n", REGNO (v->dest_reg)); return const0_rtx; } return 0; } /* Look back before LOOP->START for the insn that sets REG and return the equivalent constant if there is a REG_EQUAL note otherwise just the SET_SRC of REG. */ static rtx loop_find_equiv_value (const struct loop *loop, rtx reg) { rtx loop_start = loop->start; rtx insn, set; rtx ret; ret = reg; for (insn = PREV_INSN (loop_start); insn; insn = PREV_INSN (insn)) { if (GET_CODE (insn) == CODE_LABEL) break; else if (INSN_P (insn) && reg_set_p (reg, insn)) { /* We found the last insn before the loop that sets the register. If it sets the entire register, and has a REG_EQUAL note, then use the value of the REG_EQUAL note. */ if ((set = single_set (insn)) && (SET_DEST (set) == reg)) { rtx note = find_reg_note (insn, REG_EQUAL, NULL_RTX); /* Only use the REG_EQUAL note if it is a constant. Other things, divide in particular, will cause problems later if we use them. */ if (note && GET_CODE (XEXP (note, 0)) != EXPR_LIST && CONSTANT_P (XEXP (note, 0))) ret = XEXP (note, 0); else ret = SET_SRC (set); /* We cannot do this if it changes between the assignment and loop start though. */ if (modified_between_p (ret, insn, loop_start)) ret = reg; } break; } } return ret; } /* Return a simplified rtx for the expression OP - REG. REG must appear in OP, and OP must be a register or the sum of a register and a second term. Thus, the return value must be const0_rtx or the second term. The caller is responsible for verifying that REG appears in OP and OP has the proper form. */ static rtx subtract_reg_term (rtx op, rtx reg) { if (op == reg) return const0_rtx; if (GET_CODE (op) == PLUS) { if (XEXP (op, 0) == reg) return XEXP (op, 1); else if (XEXP (op, 1) == reg) return XEXP (op, 0); } /* OP does not contain REG as a term. */ abort (); } /* Find and return register term common to both expressions OP0 and OP1 or NULL_RTX if no such term exists. Each expression must be a REG or a PLUS of a REG. */ static rtx find_common_reg_term (rtx op0, rtx op1) { if ((REG_P (op0) || GET_CODE (op0) == PLUS) && (REG_P (op1) || GET_CODE (op1) == PLUS)) { rtx op00; rtx op01; rtx op10; rtx op11; if (GET_CODE (op0) == PLUS) op01 = XEXP (op0, 1), op00 = XEXP (op0, 0); else op01 = const0_rtx, op00 = op0; if (GET_CODE (op1) == PLUS) op11 = XEXP (op1, 1), op10 = XEXP (op1, 0); else op11 = const0_rtx, op10 = op1; /* Find and return common register term if present. */ if (REG_P (op00) && (op00 == op10 || op00 == op11)) return op00; else if (REG_P (op01) && (op01 == op10 || op01 == op11)) return op01; } /* No common register term found. */ return NULL_RTX; } /* Determine the loop iterator and calculate the number of loop iterations. Returns the exact number of loop iterations if it can be calculated, otherwise returns zero. */ unsigned HOST_WIDE_INT loop_iterations (struct loop *loop) { struct loop_info *loop_info = LOOP_INFO (loop); struct loop_ivs *ivs = LOOP_IVS (loop); rtx comparison, comparison_value; rtx iteration_var, initial_value, increment, final_value; enum rtx_code comparison_code; HOST_WIDE_INT inc; unsigned HOST_WIDE_INT abs_inc; unsigned HOST_WIDE_INT abs_diff; int off_by_one; int increment_dir; int unsigned_p, compare_dir, final_larger; rtx last_loop_insn; rtx reg_term; struct iv_class *bl; loop_info->n_iterations = 0; loop_info->initial_value = 0; loop_info->initial_equiv_value = 0; loop_info->comparison_value = 0; loop_info->final_value = 0; loop_info->final_equiv_value = 0; loop_info->increment = 0; loop_info->iteration_var = 0; loop_info->unroll_number = 1; loop_info->iv = 0; /* We used to use prev_nonnote_insn here, but that fails because it might accidentally get the branch for a contained loop if the branch for this loop was deleted. We can only trust branches immediately before the loop_end. */ last_loop_insn = PREV_INSN (loop->end); /* ??? We should probably try harder to find the jump insn at the end of the loop. The following code assumes that the last loop insn is a jump to the top of the loop. */ if (GET_CODE (last_loop_insn) != JUMP_INSN) { if (loop_dump_stream) fprintf (loop_dump_stream, "Loop iterations: No final conditional branch found.\n"); return 0; } /* If there is a more than a single jump to the top of the loop we cannot (easily) determine the iteration count. */ if (LABEL_NUSES (JUMP_LABEL (last_loop_insn)) > 1) { if (loop_dump_stream) fprintf (loop_dump_stream, "Loop iterations: Loop has multiple back edges.\n"); return 0; } /* If there are multiple conditionalized loop exit tests, they may jump back to differing CODE_LABELs. */ if (loop->top && loop->cont) { rtx temp = PREV_INSN (last_loop_insn); do { if (GET_CODE (temp) == JUMP_INSN) { /* There are some kinds of jumps we can't deal with easily. */ if (JUMP_LABEL (temp) == 0) { if (loop_dump_stream) fprintf (loop_dump_stream, "Loop iterations: Jump insn has null JUMP_LABEL.\n"); return 0; } if (/* Previous unrolling may have generated new insns not covered by the uid_luid array. */ INSN_UID (JUMP_LABEL (temp)) < max_uid_for_loop /* Check if we jump back into the loop body. */ && LOOP_INSN_LUID (JUMP_LABEL (temp)) > LOOP_INSN_LUID (loop->top) && LOOP_INSN_LUID (JUMP_LABEL (temp)) < LOOP_INSN_LUID (loop->cont)) { if (loop_dump_stream) fprintf (loop_dump_stream, "Loop iterations: Loop has multiple back edges.\n"); return 0; } } } while ((temp = PREV_INSN (temp)) != loop->cont); } /* Find the iteration variable. If the last insn is a conditional branch, and the insn before tests a register value, make that the iteration variable. */ comparison = get_condition_for_loop (loop, last_loop_insn); if (comparison == 0) { if (loop_dump_stream) fprintf (loop_dump_stream, "Loop iterations: No final comparison found.\n"); return 0; } /* ??? Get_condition may switch position of induction variable and invariant register when it canonicalizes the comparison. */ comparison_code = GET_CODE (comparison); iteration_var = XEXP (comparison, 0); comparison_value = XEXP (comparison, 1); if (!REG_P (iteration_var)) { if (loop_dump_stream) fprintf (loop_dump_stream, "Loop iterations: Comparison not against register.\n"); return 0; } /* The only new registers that are created before loop iterations are givs made from biv increments or registers created by load_mems. In the latter case, it is possible that try_copy_prop will propagate a new pseudo into the old iteration register but this will be marked by having the REG_USERVAR_P bit set. */ if ((unsigned) REGNO (iteration_var) >= ivs->n_regs && ! REG_USERVAR_P (iteration_var)) abort (); /* Determine the initial value of the iteration variable, and the amount that it is incremented each loop. Use the tables constructed by the strength reduction pass to calculate these values. */ /* Clear the result values, in case no answer can be found. */ initial_value = 0; increment = 0; /* The iteration variable can be either a giv or a biv. Check to see which it is, and compute the variable's initial value, and increment value if possible. */ /* If this is a new register, can't handle it since we don't have any reg_iv_type entry for it. */ if ((unsigned) REGNO (iteration_var) >= ivs->n_regs) { if (loop_dump_stream) fprintf (loop_dump_stream, "Loop iterations: No reg_iv_type entry for iteration var.\n"); return 0; } /* Reject iteration variables larger than the host wide int size, since they could result in a number of iterations greater than the range of our `unsigned HOST_WIDE_INT' variable loop_info->n_iterations. */ else if ((GET_MODE_BITSIZE (GET_MODE (iteration_var)) > HOST_BITS_PER_WIDE_INT)) { if (loop_dump_stream) fprintf (loop_dump_stream, "Loop iterations: Iteration var rejected because mode too large.\n"); return 0; } else if (GET_MODE_CLASS (GET_MODE (iteration_var)) != MODE_INT) { if (loop_dump_stream) fprintf (loop_dump_stream, "Loop iterations: Iteration var not an integer.\n"); return 0; } /* Try swapping the comparison to identify a suitable iv. */ if (REG_IV_TYPE (ivs, REGNO (iteration_var)) != BASIC_INDUCT && REG_IV_TYPE (ivs, REGNO (iteration_var)) != GENERAL_INDUCT && REG_P (comparison_value) && REGNO (comparison_value) < ivs->n_regs) { rtx temp = comparison_value; comparison_code = swap_condition (comparison_code); comparison_value = iteration_var; iteration_var = temp; } if (REG_IV_TYPE (ivs, REGNO (iteration_var)) == BASIC_INDUCT) { if (REGNO (iteration_var) >= ivs->n_regs) abort (); /* Grab initial value, only useful if it is a constant. */ bl = REG_IV_CLASS (ivs, REGNO (iteration_var)); initial_value = bl->initial_value; if (!bl->biv->always_executed || bl->biv->maybe_multiple) { if (loop_dump_stream) fprintf (loop_dump_stream, "Loop iterations: Basic induction var not set once in each iteration.\n"); return 0; } increment = biv_total_increment (bl); } else if (REG_IV_TYPE (ivs, REGNO (iteration_var)) == GENERAL_INDUCT) { HOST_WIDE_INT offset = 0; struct induction *v = REG_IV_INFO (ivs, REGNO (iteration_var)); rtx biv_initial_value; if (REGNO (v->src_reg) >= ivs->n_regs) abort (); if (!v->always_executed || v->maybe_multiple) { if (loop_dump_stream) fprintf (loop_dump_stream, "Loop iterations: General induction var not set once in each iteration.\n"); return 0; } bl = REG_IV_CLASS (ivs, REGNO (v->src_reg)); /* Increment value is mult_val times the increment value of the biv. */ increment = biv_total_increment (bl); if (increment) { struct induction *biv_inc; increment = fold_rtx_mult_add (v->mult_val, extend_value_for_giv (v, increment), const0_rtx, v->mode); /* The caller assumes that one full increment has occurred at the first loop test. But that's not true when the biv is incremented after the giv is set (which is the usual case), e.g.: i = 6; do {;} while (i++ < 9) . Therefore, we bias the initial value by subtracting the amount of the increment that occurs between the giv set and the giv test. */ for (biv_inc = bl->biv; biv_inc; biv_inc = biv_inc->next_iv) { if (loop_insn_first_p (v->insn, biv_inc->insn)) { if (REG_P (biv_inc->add_val)) { if (loop_dump_stream) fprintf (loop_dump_stream, "Loop iterations: Basic induction var add_val is REG %d.\n", REGNO (biv_inc->add_val)); return 0; } /* If we have already counted it, skip it. */ if (biv_inc->same) continue; offset -= INTVAL (biv_inc->add_val); } } } if (loop_dump_stream) fprintf (loop_dump_stream, "Loop iterations: Giv iterator, initial value bias %ld.\n", (long) offset); /* Initial value is mult_val times the biv's initial value plus add_val. Only useful if it is a constant. */ biv_initial_value = extend_value_for_giv (v, bl->initial_value); initial_value = fold_rtx_mult_add (v->mult_val, plus_constant (biv_initial_value, offset), v->add_val, v->mode); } else { if (loop_dump_stream) fprintf (loop_dump_stream, "Loop iterations: Not basic or general induction var.\n"); return 0; } if (initial_value == 0) return 0; unsigned_p = 0; off_by_one = 0; switch (comparison_code) { case LEU: unsigned_p = 1; case LE: compare_dir = 1; off_by_one = 1; break; case GEU: unsigned_p = 1; case GE: compare_dir = -1; off_by_one = -1; break; case EQ: /* Cannot determine loop iterations with this case. */ compare_dir = 0; break; case LTU: unsigned_p = 1; case LT: compare_dir = 1; break; case GTU: unsigned_p = 1; case GT: compare_dir = -1; break; case NE: compare_dir = 0; break; default: abort (); } /* If the comparison value is an invariant register, then try to find its value from the insns before the start of the loop. */ final_value = comparison_value; if (REG_P (comparison_value) && loop_invariant_p (loop, comparison_value)) { final_value = loop_find_equiv_value (loop, comparison_value); /* If we don't get an invariant final value, we are better off with the original register. */ if (! loop_invariant_p (loop, final_value)) final_value = comparison_value; } /* Calculate the approximate final value of the induction variable (on the last successful iteration). The exact final value depends on the branch operator, and increment sign. It will be wrong if the iteration variable is not incremented by one each time through the loop and (comparison_value + off_by_one - initial_value) % increment != 0. ??? Note that the final_value may overflow and thus final_larger will be bogus. A potentially infinite loop will be classified as immediate, e.g. for (i = 0x7ffffff0; i <= 0x7fffffff; i++) */ if (off_by_one) final_value = plus_constant (final_value, off_by_one); /* Save the calculated values describing this loop's bounds, in case precondition_loop_p will need them later. These values can not be recalculated inside precondition_loop_p because strength reduction optimizations may obscure the loop's structure. These values are only required by precondition_loop_p and insert_bct whenever the number of iterations cannot be computed at compile time. Only the difference between final_value and initial_value is important. Note that final_value is only approximate. */ loop_info->initial_value = initial_value; loop_info->comparison_value = comparison_value; loop_info->final_value = plus_constant (comparison_value, off_by_one); loop_info->increment = increment; loop_info->iteration_var = iteration_var; loop_info->comparison_code = comparison_code; loop_info->iv = bl; /* Try to determine the iteration count for loops such as (for i = init; i < init + const; i++). When running the loop optimization twice, the first pass often converts simple loops into this form. */ if (REG_P (initial_value)) { rtx reg1; rtx reg2; rtx const2; reg1 = initial_value; if (GET_CODE (final_value) == PLUS) reg2 = XEXP (final_value, 0), const2 = XEXP (final_value, 1); else reg2 = final_value, const2 = const0_rtx; /* Check for initial_value = reg1, final_value = reg2 + const2, where reg1 != reg2. */ if (REG_P (reg2) && reg2 != reg1) { rtx temp; /* Find what reg1 is equivalent to. Hopefully it will either be reg2 or reg2 plus a constant. */ temp = loop_find_equiv_value (loop, reg1); if (find_common_reg_term (temp, reg2)) initial_value = temp; else if (loop_invariant_p (loop, reg2)) { /* Find what reg2 is equivalent to. Hopefully it will either be reg1 or reg1 plus a constant. Let's ignore the latter case for now since it is not so common. */ temp = loop_find_equiv_value (loop, reg2); if (temp == loop_info->iteration_var) temp = initial_value; if (temp == reg1) final_value = (const2 == const0_rtx) ? reg1 : gen_rtx_PLUS (GET_MODE (reg1), reg1, const2); } } else if (loop->vtop && GET_CODE (reg2) == CONST_INT) { rtx temp; /* When running the loop optimizer twice, check_dbra_loop further obfuscates reversible loops of the form: for (i = init; i < init + const; i++). We often end up with final_value = 0, initial_value = temp, temp = temp2 - init, where temp2 = init + const. If the loop has a vtop we can replace initial_value with const. */ temp = loop_find_equiv_value (loop, reg1); if (GET_CODE (temp) == MINUS && REG_P (XEXP (temp, 0))) { rtx temp2 = loop_find_equiv_value (loop, XEXP (temp, 0)); if (GET_CODE (temp2) == PLUS && XEXP (temp2, 0) == XEXP (temp, 1)) initial_value = XEXP (temp2, 1); } } } /* If have initial_value = reg + const1 and final_value = reg + const2, then replace initial_value with const1 and final_value with const2. This should be safe since we are protected by the initial comparison before entering the loop if we have a vtop. For example, a + b < a + c is not equivalent to b < c for all a when using modulo arithmetic. ??? Without a vtop we could still perform the optimization if we check the initial and final values carefully. */ if (loop->vtop && (reg_term = find_common_reg_term (initial_value, final_value))) { initial_value = subtract_reg_term (initial_value, reg_term); final_value = subtract_reg_term (final_value, reg_term); } loop_info->initial_equiv_value = initial_value; loop_info->final_equiv_value = final_value; /* For EQ comparison loops, we don't have a valid final value. Check this now so that we won't leave an invalid value if we return early for any other reason. */ if (comparison_code == EQ) loop_info->final_equiv_value = loop_info->final_value = 0; if (increment == 0) { if (loop_dump_stream) fprintf (loop_dump_stream, "Loop iterations: Increment value can't be calculated.\n"); return 0; } if (GET_CODE (increment) != CONST_INT) { /* If we have a REG, check to see if REG holds a constant value. */ /* ??? Other RTL, such as (neg (reg)) is possible here, but it isn't clear if it is worthwhile to try to handle such RTL. */ if (REG_P (increment) || GET_CODE (increment) == SUBREG) increment = loop_find_equiv_value (loop, increment); if (GET_CODE (increment) != CONST_INT) { if (loop_dump_stream) { fprintf (loop_dump_stream, "Loop iterations: Increment value not constant "); print_simple_rtl (loop_dump_stream, increment); fprintf (loop_dump_stream, ".\n"); } return 0; } loop_info->increment = increment; } if (GET_CODE (initial_value) != CONST_INT) { if (loop_dump_stream) { fprintf (loop_dump_stream, "Loop iterations: Initial value not constant "); print_simple_rtl (loop_dump_stream, initial_value); fprintf (loop_dump_stream, ".\n"); } return 0; } else if (GET_CODE (final_value) != CONST_INT) { if (loop_dump_stream) { fprintf (loop_dump_stream, "Loop iterations: Final value not constant "); print_simple_rtl (loop_dump_stream, final_value); fprintf (loop_dump_stream, ".\n"); } return 0; } else if (comparison_code == EQ) { rtx inc_once; if (loop_dump_stream) fprintf (loop_dump_stream, "Loop iterations: EQ comparison loop.\n"); inc_once = gen_int_mode (INTVAL (initial_value) + INTVAL (increment), GET_MODE (iteration_var)); if (inc_once == final_value) { /* The iterator value once through the loop is equal to the comparison value. Either we have an infinite loop, or we'll loop twice. */ if (increment == const0_rtx) return 0; loop_info->n_iterations = 2; } else loop_info->n_iterations = 1; if (GET_CODE (loop_info->initial_value) == CONST_INT) loop_info->final_value = gen_int_mode ((INTVAL (loop_info->initial_value) + loop_info->n_iterations * INTVAL (increment)), GET_MODE (iteration_var)); else loop_info->final_value = plus_constant (loop_info->initial_value, loop_info->n_iterations * INTVAL (increment)); loop_info->final_equiv_value = gen_int_mode ((INTVAL (initial_value) + loop_info->n_iterations * INTVAL (increment)), GET_MODE (iteration_var)); return loop_info->n_iterations; } /* Final_larger is 1 if final larger, 0 if they are equal, otherwise -1. */ if (unsigned_p) final_larger = ((unsigned HOST_WIDE_INT) INTVAL (final_value) > (unsigned HOST_WIDE_INT) INTVAL (initial_value)) - ((unsigned HOST_WIDE_INT) INTVAL (final_value) < (unsigned HOST_WIDE_INT) INTVAL (initial_value)); else final_larger = (INTVAL (final_value) > INTVAL (initial_value)) - (INTVAL (final_value) < INTVAL (initial_value)); if (INTVAL (increment) > 0) increment_dir = 1; else if (INTVAL (increment) == 0) increment_dir = 0; else increment_dir = -1; /* There are 27 different cases: compare_dir = -1, 0, 1; final_larger = -1, 0, 1; increment_dir = -1, 0, 1. There are 4 normal cases, 4 reverse cases (where the iteration variable will overflow before the loop exits), 4 infinite loop cases, and 15 immediate exit (0 or 1 iteration depending on loop type) cases. Only try to optimize the normal cases. */ /* (compare_dir/final_larger/increment_dir) Normal cases: (0/-1/-1), (0/1/1), (-1/-1/-1), (1/1/1) Reverse cases: (0/-1/1), (0/1/-1), (-1/-1/1), (1/1/-1) Infinite loops: (0/-1/0), (0/1/0), (-1/-1/0), (1/1/0) Immediate exit: (0/0/X), (-1/0/X), (-1/1/X), (1/0/X), (1/-1/X) */ /* ?? If the meaning of reverse loops (where the iteration variable will overflow before the loop exits) is undefined, then could eliminate all of these special checks, and just always assume the loops are normal/immediate/infinite. Note that this means the sign of increment_dir does not have to be known. Also, since it does not really hurt if immediate exit loops or infinite loops are optimized, then that case could be ignored also, and hence all loops can be optimized. According to ANSI Spec, the reverse loop case result is undefined, because the action on overflow is undefined. See also the special test for NE loops below. */ if (final_larger == increment_dir && final_larger != 0 && (final_larger == compare_dir || compare_dir == 0)) /* Normal case. */ ; else { if (loop_dump_stream) fprintf (loop_dump_stream, "Loop iterations: Not normal loop.\n"); return 0; } /* Calculate the number of iterations, final_value is only an approximation, so correct for that. Note that abs_diff and n_iterations are unsigned, because they can be as large as 2^n - 1. */ inc = INTVAL (increment); if (inc > 0) { abs_diff = INTVAL (final_value) - INTVAL (initial_value); abs_inc = inc; } else if (inc < 0) { abs_diff = INTVAL (initial_value) - INTVAL (final_value); abs_inc = -inc; } else abort (); /* Given that iteration_var is going to iterate over its own mode, not HOST_WIDE_INT, disregard higher bits that might have come into the picture due to sign extension of initial and final values. */ abs_diff &= ((unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (GET_MODE (iteration_var)) - 1) << 1) - 1; /* For NE tests, make sure that the iteration variable won't miss the final value. If abs_diff mod abs_incr is not zero, then the iteration variable will overflow before the loop exits, and we can not calculate the number of iterations. */ if (compare_dir == 0 && (abs_diff % abs_inc) != 0) return 0; /* Note that the number of iterations could be calculated using (abs_diff + abs_inc - 1) / abs_inc, provided care was taken to handle potential overflow of the summation. */ loop_info->n_iterations = abs_diff / abs_inc + ((abs_diff % abs_inc) != 0); return loop_info->n_iterations; } /* Replace uses of split bivs with their split pseudo register. This is for original instructions which remain after loop unrolling without copying. */ static rtx remap_split_bivs (struct loop *loop, rtx x) { struct loop_ivs *ivs = LOOP_IVS (loop); enum rtx_code code; int i; const char *fmt; if (x == 0) return x; code = GET_CODE (x); switch (code) { case SCRATCH: case PC: case CC0: case CONST_INT: case CONST_DOUBLE: case CONST: case SYMBOL_REF: case LABEL_REF: return x; case REG: #if 0 /* If non-reduced/final-value givs were split, then this would also have to remap those givs also. */ #endif if (REGNO (x) < ivs->n_regs && REG_IV_TYPE (ivs, REGNO (x)) == BASIC_INDUCT) return REG_IV_CLASS (ivs, REGNO (x))->biv->src_reg; break; default: break; } fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') XEXP (x, i) = remap_split_bivs (loop, XEXP (x, i)); else if (fmt[i] == 'E') { int j; for (j = 0; j < XVECLEN (x, i); j++) XVECEXP (x, i, j) = remap_split_bivs (loop, XVECEXP (x, i, j)); } } return x; } /* If FIRST_UID is a set of REGNO, and FIRST_UID dominates LAST_UID (e.g. FIST_UID is always executed if LAST_UID is), then return 1. Otherwise return 0. COPY_START is where we can start looking for the insns FIRST_UID and LAST_UID. COPY_END is where we stop looking for these insns. If there is no JUMP_INSN between LOOP_START and FIRST_UID, then FIRST_UID must dominate LAST_UID. If there is a CODE_LABEL between FIRST_UID and LAST_UID, then FIRST_UID may not dominate LAST_UID. If there is no CODE_LABEL between FIRST_UID and LAST_UID, then FIRST_UID must dominate LAST_UID. */ int set_dominates_use (int regno, int first_uid, int last_uid, rtx copy_start, rtx copy_end) { int passed_jump = 0; rtx p = NEXT_INSN (copy_start); while (INSN_UID (p) != first_uid) { if (GET_CODE (p) == JUMP_INSN) passed_jump = 1; /* Could not find FIRST_UID. */ if (p == copy_end) return 0; p = NEXT_INSN (p); } /* Verify that FIRST_UID is an insn that entirely sets REGNO. */ if (! INSN_P (p) || ! dead_or_set_regno_p (p, regno)) return 0; /* FIRST_UID is always executed. */ if (passed_jump == 0) return 1; while (INSN_UID (p) != last_uid) { /* If we see a CODE_LABEL between FIRST_UID and LAST_UID, then we can not be sure that FIRST_UID dominates LAST_UID. */ if (GET_CODE (p) == CODE_LABEL) return 0; /* Could not find LAST_UID, but we reached the end of the loop, so it must be safe. */ else if (p == copy_end) return 1; p = NEXT_INSN (p); } /* FIRST_UID is always executed if LAST_UID is executed. */ return 1; } /* This routine is called when the number of iterations for the unrolled loop is one. The goal is to identify a loop that begins with an unconditional branch to the loop continuation note (or a label just after). In this case, the unconditional branch that starts the loop needs to be deleted so that we execute the single iteration. */ static rtx ujump_to_loop_cont (rtx loop_start, rtx loop_cont) { rtx x, label, label_ref; /* See if loop start, or the next insn is an unconditional jump. */ loop_start = next_nonnote_insn (loop_start); x = pc_set (loop_start); if (!x) return NULL_RTX; label_ref = SET_SRC (x); if (!label_ref) return NULL_RTX; /* Examine insn after loop continuation note. Return if not a label. */ label = next_nonnote_insn (loop_cont); if (label == 0 || GET_CODE (label) != CODE_LABEL) return NULL_RTX; /* Return the loop start if the branch label matches the code label. */ if (CODE_LABEL_NUMBER (label) == CODE_LABEL_NUMBER (XEXP (label_ref, 0))) return loop_start; else return NULL_RTX; } /* Output variables, constants and external declarations, for GNU compiler. Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file handles generation of all the assembler code *except* the instructions of a function. This includes declarations of variables and their initial values. We also output the assembler code for constants stored in memory and are responsible for combining constants with the same value. */ /* Pragma related interfaces. Copyright (C) 1995, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_C_PRAGMA_H #define GCC_C_PRAGMA_H /* Cause the `yydebug' variable to be defined. */ #define YYDEBUG 1 extern int yydebug; extern struct cpp_reader* parse_in; #define HANDLE_PRAGMA_WEAK SUPPORTS_WEAK #ifdef HANDLE_SYSV_PRAGMA /* We always support #pragma pack for SYSV pragmas. */ #ifndef HANDLE_PRAGMA_PACK #define HANDLE_PRAGMA_PACK 1 #endif #endif /* HANDLE_SYSV_PRAGMA */ #ifdef HANDLE_PRAGMA_PACK_PUSH_POP /* If we are supporting #pragma pack(push... then we automatically support #pragma pack() */ #define HANDLE_PRAGMA_PACK 1 #endif /* HANDLE_PRAGMA_PACK_PUSH_POP */ extern void init_pragma (void); /* Front-end wrapper for pragma registration to avoid dragging cpplib.h in almost everywhere. */ extern void c_register_pragma (const char *, const char *, void (*) (struct cpp_reader *)); extern void maybe_apply_pragma_weak (tree); extern tree maybe_apply_renaming_pragma (tree, tree); extern void add_to_renaming_pragma_list (tree, tree); extern int c_lex (tree *); extern int c_lex_with_flags (tree *, unsigned char *); /* If 1, then lex strings into the execution character set. If 0, lex strings into the host character set. If -1, lex both, and chain them together, such that the former is the TREE_CHAIN of the latter. */ extern int c_lex_string_translate; #endif /* GCC_C_PRAGMA_H */ #ifdef XCOFF_DEBUGGING_INFO declarations for e.g. AIX 4.x. */ #endif #ifndef ASM_STABS_OP #define ASM_STABS_OP "\t.stabs\t" #endif /* The (assembler) name of the first globally-visible object output. */ const char *first_global_object_name; const char *weak_global_object_name; struct addr_const; struct constant_descriptor_rtx; struct rtx_constant_pool; struct varasm_status GTY(()) { /* If we're using a per-function constant pool, this is it. */ struct rtx_constant_pool *pool; /* Number of tree-constants deferred during the expansion of this function. */ unsigned int deferred_constants; }; #define n_deferred_constants (cfun->varasm->deferred_constants) /* Number for making the label on the next constant that is stored in memory. */ static GTY(()) int const_labelno; /* Carry information from ASM_DECLARE_OBJECT_NAME to ASM_FINISH_DECLARE_OBJECT. */ int size_directive_output; /* The last decl for which assemble_variable was called, if it did ASM_DECLARE_OBJECT_NAME. If the last call to assemble_variable didn't do that, this holds 0. */ tree last_assemble_variable_decl; /* The following global variable indicates if the section label for the "cold" section of code has been output yet to the assembler. The label is useful when running gdb. This is part of the optimization that partitions hot and cold basic blocks into separate sections of the .o file. */ bool unlikely_section_label_printed = false; /* RTX_UNCHANGING_P in a MEM can mean it is stored into, for initialization. So giving constant the alias set for the type will allow such initializations to appear to conflict with the load of the constant. We avoid this by giving all constants an alias set for just constants. Since there will be no stores to that alias set, nothing will ever conflict with them. */ static HOST_WIDE_INT const_alias_set; static const char *strip_reg_name (const char *); static int contains_pointers_p (tree); #ifdef ASM_OUTPUT_EXTERNAL static bool incorporeal_function_p (tree); #endif static void decode_addr_const (tree, struct addr_const *); static hashval_t const_desc_hash (const void *); static int const_desc_eq (const void *, const void *); static hashval_t const_hash_1 (const tree); static int compare_constant (const tree, const tree); static tree copy_constant (tree); static void output_constant_def_contents (rtx); static void output_addressed_constants (tree); static unsigned HOST_WIDE_INT array_size_for_constructor (tree); static unsigned min_align (unsigned, unsigned); static void output_constructor (tree, unsigned HOST_WIDE_INT, unsigned int); static void globalize_decl (tree); static void maybe_assemble_visibility (tree); static int in_named_entry_eq (const void *, const void *); static hashval_t in_named_entry_hash (const void *); #ifdef ASM_OUTPUT_BSS static void asm_output_bss (FILE *, tree, const char *, unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT); #endif #ifdef BSS_SECTION_ASM_OP #ifdef ASM_OUTPUT_ALIGNED_BSS static void asm_output_aligned_bss (FILE *, tree, const char *, unsigned HOST_WIDE_INT, int) ATTRIBUTE_UNUSED; #endif #endif /* BSS_SECTION_ASM_OP */ static bool asm_emit_uninitialised (tree, const char*, unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT); static void mark_weak (tree); enum in_section { no_section, in_text, in_unlikely_executed_text, in_data, in_named #ifdef BSS_SECTION_ASM_OP , in_bss #endif #ifdef CTORS_SECTION_ASM_OP , in_ctors #endif #ifdef DTORS_SECTION_ASM_OP , in_dtors #endif #ifdef READONLY_DATA_SECTION_ASM_OP , in_readonly_data #endif #ifdef EXTRA_SECTIONS , EXTRA_SECTIONS #endif }; static GTY(()) enum in_section in_section = no_section; /* Return a nonzero value if DECL has a section attribute. */ #ifndef IN_NAMED_SECTION #define IN_NAMED_SECTION(DECL) \ ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \ && DECL_SECTION_NAME (DECL) != NULL_TREE) #endif /* Text of section name when in_section == in_named. */ static GTY(()) const char *in_named_name; /* Hash table of flags that have been used for a particular named section. */ struct in_named_entry GTY(()) { const char *name; unsigned int flags; bool declared; }; static GTY((param_is (struct in_named_entry))) htab_t in_named_htab; /* Define functions like text_section for any extra sections. */ #ifdef EXTRA_SECTION_FUNCTIONS EXTRA_SECTION_FUNCTIONS #endif /* Tell assembler to switch to text section. */ void text_section (void) { if (in_section != in_text) { in_section = in_text; fprintf (asm_out_file, "%s\n", TEXT_SECTION_ASM_OP); ASM_OUTPUT_ALIGN (asm_out_file, 2); } } /* Tell assembler to switch to unlikely-to-be-executed text section. */ void unlikely_text_section (void) { if ((in_section != in_unlikely_executed_text) && (in_section != in_named || strcmp (in_named_name, UNLIKELY_EXECUTED_TEXT_SECTION_NAME) != 0)) { if (targetm.have_named_sections) named_section (NULL_TREE, UNLIKELY_EXECUTED_TEXT_SECTION_NAME, 0); else { in_section = in_unlikely_executed_text; fprintf (asm_out_file, "%s\n", TEXT_SECTION_ASM_OP); } if (!unlikely_section_label_printed) { fprintf (asm_out_file, "__%s_unlikely_section:\n", current_function_name ()); unlikely_section_label_printed = true; /* Make sure that we have appropriate alignment for instructions in this section. */ assemble_align (FUNCTION_BOUNDARY); } } } /* Tell assembler to switch to data section. */ void data_section (void) { if (in_section != in_data) { in_section = in_data; fprintf (asm_out_file, "%s\n", DATA_SECTION_ASM_OP); } } /* Tell assembler to switch to read-only data section. This is normally the text section. */ void readonly_data_section (void) { #ifdef READONLY_DATA_SECTION READONLY_DATA_SECTION (); /* Note this can call data_section. */ #else #ifdef READONLY_DATA_SECTION_ASM_OP if (in_section != in_readonly_data) { in_section = in_readonly_data; fputs (READONLY_DATA_SECTION_ASM_OP, asm_out_file); fputc ('\n', asm_out_file); } #else text_section (); #endif #endif } /* Determine if we're in the text section. */ int in_text_section (void) { return in_section == in_text; } /* Determine if we're in the unlikely-to-be-executed text section. */ int in_unlikely_text_section (void) { return in_section == in_unlikely_executed_text; } /* Determine if we're in the data section. */ int in_data_section (void) { return in_section == in_data; } /* Helper routines for maintaining in_named_htab. */ static int in_named_entry_eq (const void *p1, const void *p2) { const struct in_named_entry *old = p1; const char *new = p2; return strcmp (old->name, new) == 0; } static hashval_t in_named_entry_hash (const void *p) { const struct in_named_entry *old = p; return htab_hash_string (old->name); } /* If SECTION has been seen before as a named section, return the flags that were used. Otherwise, return 0. Note, that 0 is a perfectly valid set of flags for a section to have, so 0 does not mean that the section has not been seen. */ unsigned int get_named_section_flags (const char *section) { struct in_named_entry **slot; slot = (struct in_named_entry **) htab_find_slot_with_hash (in_named_htab, section, htab_hash_string (section), NO_INSERT); return slot ? (*slot)->flags : 0; } /* Returns true if the section has been declared before. Sets internal flag on this section in in_named_hash so subsequent calls on this section will return false. */ bool named_section_first_declaration (const char *name) { struct in_named_entry **slot; slot = (struct in_named_entry **) htab_find_slot_with_hash (in_named_htab, name, htab_hash_string (name), NO_INSERT); if (! (*slot)->declared) { (*slot)->declared = true; return true; } else { return false; } } /* Record FLAGS for SECTION. If SECTION was previously recorded with a different set of flags, return false. */ bool set_named_section_flags (const char *section, unsigned int flags) { struct in_named_entry **slot, *entry; slot = (struct in_named_entry **) htab_find_slot_with_hash (in_named_htab, section, htab_hash_string (section), INSERT); entry = *slot; if (!entry) { entry = ggc_alloc (sizeof (*entry)); *slot = entry; entry->name = ggc_strdup (section); entry->flags = flags; entry->declared = false; } else if (entry->flags != flags) return false; return true; } /* Tell assembler to change to section NAME with attributes FLAGS. */ void named_section_flags (const char *name, unsigned int flags) { if (in_section != in_named || strcmp (name, in_named_name) != 0) { if (! set_named_section_flags (name, flags)) abort (); targetm.asm_out.named_section (name, flags); if (flags & SECTION_FORGET) in_section = no_section; else { in_named_name = ggc_strdup (name); in_section = in_named; } } } /* Tell assembler to change to section NAME for DECL. If DECL is NULL, just switch to section NAME. If NAME is NULL, get the name from DECL. If RELOC is 1, the initializer for DECL contains relocs. */ void named_section (tree decl, const char *name, int reloc) { unsigned int flags; if (decl != NULL_TREE && !DECL_P (decl)) abort (); if (name == NULL) name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl)); flags = targetm.section_type_flags (decl, name, reloc); /* Sanity check user variables for flag changes. Non-user section flag changes will abort in named_section_flags. However, don't complain if SECTION_OVERRIDE is set. We trust that the setter knows that it is safe to ignore the default flags for this decl. */ if (decl && ! set_named_section_flags (name, flags)) { flags = get_named_section_flags (name); if ((flags & SECTION_OVERRIDE) == 0) error ("%J%D causes a section type conflict", decl, decl); } named_section_flags (name, flags); } /* If required, set DECL_SECTION_NAME to a unique name. */ void resolve_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED, int flag_function_or_data_sections) { if (DECL_SECTION_NAME (decl) == NULL_TREE && targetm.have_named_sections && (flag_function_or_data_sections || DECL_ONE_ONLY (decl))) targetm.asm_out.unique_section (decl, reloc); } #ifdef BSS_SECTION_ASM_OP /* Tell the assembler to switch to the bss section. */ void bss_section (void) { if (in_section != in_bss) { fprintf (asm_out_file, "%s\n", BSS_SECTION_ASM_OP); in_section = in_bss; } } #ifdef ASM_OUTPUT_BSS /* Utility function for ASM_OUTPUT_BSS for targets to use if they don't support alignments in .bss. ??? It is believed that this function will work in most cases so such support is localized here. */ static void asm_output_bss (FILE *file, tree decl ATTRIBUTE_UNUSED, const char *name, unsigned HOST_WIDE_INT size ATTRIBUTE_UNUSED, unsigned HOST_WIDE_INT rounded) { targetm.asm_out.globalize_label (file, name); bss_section (); #ifdef ASM_DECLARE_OBJECT_NAME last_assemble_variable_decl = decl; ASM_DECLARE_OBJECT_NAME (file, name, decl); #else /* Standard thing is just output label for the object. */ ASM_OUTPUT_LABEL (file, name); #endif /* ASM_DECLARE_OBJECT_NAME */ ASM_OUTPUT_SKIP (file, rounded ? rounded : 1); } #endif #ifdef ASM_OUTPUT_ALIGNED_BSS /* Utility function for targets to use in implementing ASM_OUTPUT_ALIGNED_BSS. ??? It is believed that this function will work in most cases so such support is localized here. */ static void asm_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED, const char *name, unsigned HOST_WIDE_INT size, int align) { bss_section (); ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT)); #ifdef ASM_DECLARE_OBJECT_NAME last_assemble_variable_decl = decl; ASM_DECLARE_OBJECT_NAME (file, name, decl); #else /* Standard thing is just output label for the object. */ ASM_OUTPUT_LABEL (file, name); #endif /* ASM_DECLARE_OBJECT_NAME */ ASM_OUTPUT_SKIP (file, size ? size : 1); } #endif #endif /* BSS_SECTION_ASM_OP */ /* Switch to the section for function DECL. If DECL is NULL_TREE, switch to the text section. ??? It's not clear that we will ever be passed NULL_TREE, but it's safer to handle it. */ void function_section (tree decl) { if (scan_ahead_for_unlikely_executed_note (get_insns())) unlikely_text_section (); else { if (decl != NULL_TREE && DECL_SECTION_NAME (decl) != NULL_TREE) named_section (decl, (char *) 0, 0); else text_section (); } } /* Switch to section for variable DECL. RELOC is the same as the argument to SELECT_SECTION. */ void variable_section (tree decl, int reloc) { if (IN_NAMED_SECTION (decl)) named_section (decl, NULL, reloc); else targetm.asm_out.select_section (decl, reloc, DECL_ALIGN (decl)); } /* Tell assembler to switch to the section for string merging. */ void mergeable_string_section (tree decl ATTRIBUTE_UNUSED, unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED, unsigned int flags ATTRIBUTE_UNUSED) { if (HAVE_GAS_SHF_MERGE && flag_merge_constants && TREE_CODE (decl) == STRING_CST && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE && align <= 256 && TREE_STRING_LENGTH (decl) >= int_size_in_bytes (TREE_TYPE (decl))) { enum machine_mode mode; unsigned int modesize; const char *str; int i, j, len, unit; char name[30]; mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (decl))); modesize = GET_MODE_BITSIZE (mode); if (modesize >= 8 && modesize <= 256 && (modesize & (modesize - 1)) == 0) { if (align < modesize) align = modesize; str = TREE_STRING_POINTER (decl); len = TREE_STRING_LENGTH (decl); unit = GET_MODE_SIZE (mode); /* Check for embedded NUL characters. */ for (i = 0; i < len; i += unit) { for (j = 0; j < unit; j++) if (str[i + j] != '\0') break; if (j == unit) break; } if (i == len - unit) { sprintf (name, ".rodata.str%d.%d", modesize / 8, (int) (align / 8)); flags |= (modesize / 8) | SECTION_MERGE | SECTION_STRINGS; if (!i && modesize < align) { /* A "" string with requested alignment greater than character size might cause a problem: if some other string required even bigger alignment than "", then linker might think the "" is just part of padding after some other string and not put it into the hash table initially. But this means "" could have smaller alignment than requested. */ #ifdef ASM_OUTPUT_SECTION_START named_section_flags (name, flags); ASM_OUTPUT_SECTION_START (asm_out_file); #else readonly_data_section (); #endif return; } named_section_flags (name, flags); return; } } } readonly_data_section (); } /* Tell assembler to switch to the section for constant merging. */ void mergeable_constant_section (enum machine_mode mode ATTRIBUTE_UNUSED, unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED, unsigned int flags ATTRIBUTE_UNUSED) { unsigned int modesize = GET_MODE_BITSIZE (mode); if (HAVE_GAS_SHF_MERGE && flag_merge_constants && mode != VOIDmode && mode != BLKmode && modesize <= align && align >= 8 && align <= 256 && (align & (align - 1)) == 0) { char name[24]; sprintf (name, ".rodata.cst%d", (int) (align / 8)); flags |= (align / 8) | SECTION_MERGE; named_section_flags (name, flags); return; } readonly_data_section (); } /* Given NAME, a putative register name, discard any customary prefixes. */ static const char * strip_reg_name (const char *name) { #ifdef REGISTER_PREFIX if (!strncmp (name, REGISTER_PREFIX, strlen (REGISTER_PREFIX))) name += strlen (REGISTER_PREFIX); #endif if (name[0] == '%' || name[0] == '#') name++; return name; } /* Decode an `asm' spec for a declaration as a register name. Return the register number, or -1 if nothing specified, or -2 if the ASMSPEC is not `cc' or `memory' and is not recognized, or -3 if ASMSPEC is `cc' and is not recognized, or -4 if ASMSPEC is `memory' and is not recognized. Accept an exact spelling or a decimal number. Prefixes such as % are optional. */ int decode_reg_name (const char *asmspec) { if (asmspec != 0) { int i; /* Get rid of confusing prefixes. */ asmspec = strip_reg_name (asmspec); /* Allow a decimal number as a "register name". */ for (i = strlen (asmspec) - 1; i >= 0; i--) if (! ISDIGIT (asmspec[i])) break; if (asmspec[0] != 0 && i < 0) { i = atoi (asmspec); if (i < FIRST_PSEUDO_REGISTER && i >= 0) return i; else return -2; } for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (reg_names[i][0] && ! strcmp (asmspec, strip_reg_name (reg_names[i]))) return i; #ifdef ADDITIONAL_REGISTER_NAMES { static const struct { const char *const name; const int number; } table[] = ADDITIONAL_REGISTER_NAMES; for (i = 0; i < (int) ARRAY_SIZE (table); i++) if (! strcmp (asmspec, table[i].name)) return table[i].number; } #endif /* ADDITIONAL_REGISTER_NAMES */ if (!strcmp (asmspec, "memory")) return -4; if (!strcmp (asmspec, "cc")) return -3; return -2; } return -1; } /* Create the DECL_RTL for a VAR_DECL or FUNCTION_DECL. DECL should have static storage duration. In other words, it should not be an automatic variable, including PARM_DECLs. There is, however, one exception: this function handles variables explicitly placed in a particular register by the user. ASMSPEC, if not 0, is the string which the user specified as the assembler symbol name. This is never called for PARM_DECL nodes. */ void make_decl_rtl (tree decl, const char *asmspec) { const char *name = 0; int reg_number; rtx x; /* Check that we are not being given an automatic variable. */ /* A weak alias has TREE_PUBLIC set but not the other bits. */ if (TREE_CODE (decl) == PARM_DECL || TREE_CODE (decl) == RESULT_DECL || (TREE_CODE (decl) == VAR_DECL && !TREE_STATIC (decl) && !TREE_PUBLIC (decl) && !DECL_EXTERNAL (decl) && !DECL_REGISTER (decl))) abort (); /* And that we were not given a type or a label. */ else if (TREE_CODE (decl) == TYPE_DECL || TREE_CODE (decl) == LABEL_DECL) abort (); /* For a duplicate declaration, we can be called twice on the same DECL node. Don't discard the RTL already made. */ if (DECL_RTL_SET_P (decl)) { /* If the old RTL had the wrong mode, fix the mode. */ if (GET_MODE (DECL_RTL (decl)) != DECL_MODE (decl)) SET_DECL_RTL (decl, adjust_address_nv (DECL_RTL (decl), DECL_MODE (decl), 0)); /* ??? Another way to do this would be to maintain a hashed table of such critters. Instead of adding stuff to a DECL to give certain attributes to it, we could use an external hash map from DECL to set of attributes. */ /* Let the target reassign the RTL if it wants. This is necessary, for example, when one machine specific decl attribute overrides another. */ targetm.encode_section_info (decl, DECL_RTL (decl), false); /* Make this function static known to the mudflap runtime. */ if (flag_mudflap && TREE_CODE (decl) == VAR_DECL) mudflap_enqueue_decl (decl); return; } reg_number = decode_reg_name (asmspec); if (reg_number == -2) { /* ASMSPEC is given, and not the name of a register. Mark the name with a star so assemble_name won't munge it. */ char *starred = alloca (strlen (asmspec) + 2); starred[0] = '*'; strcpy (starred + 1, asmspec); change_decl_assembler_name (decl, get_identifier (starred)); } name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)); if (TREE_CODE (decl) != FUNCTION_DECL && DECL_REGISTER (decl)) { /* First detect errors in declaring global registers. */ if (reg_number == -1) error ("%Jregister name not specified for '%D'", decl, decl); else if (reg_number < 0) error ("%Jinvalid register name for '%D'", decl, decl); else if (TYPE_MODE (TREE_TYPE (decl)) == BLKmode) error ("%Jdata type of '%D' isn't suitable for a register", decl, decl); else if (! HARD_REGNO_MODE_OK (reg_number, TYPE_MODE (TREE_TYPE (decl)))) error ("%Jregister specified for '%D' isn't suitable for data type", decl, decl); /* Now handle properly declared static register variables. */ else { int nregs; if (DECL_INITIAL (decl) != 0 && TREE_STATIC (decl)) { DECL_INITIAL (decl) = 0; error ("global register variable has initial value"); } if (TREE_THIS_VOLATILE (decl)) warning ("volatile register variables don't work as you might wish"); /* If the user specified one of the eliminables registers here, e.g., FRAME_POINTER_REGNUM, we don't want to get this variable confused with that register and be eliminated. This usage is somewhat suspect... */ SET_DECL_RTL (decl, gen_rtx_raw_REG (DECL_MODE (decl), reg_number)); ORIGINAL_REGNO (DECL_RTL (decl)) = reg_number; REG_USERVAR_P (DECL_RTL (decl)) = 1; if (TREE_STATIC (decl)) { /* Make this register global, so not usable for anything else. */ #ifdef ASM_DECLARE_REGISTER_GLOBAL ASM_DECLARE_REGISTER_GLOBAL (asm_out_file, decl, reg_number, name); #endif nregs = hard_regno_nregs[reg_number][DECL_MODE (decl)]; while (nregs > 0) globalize_reg (reg_number + --nregs); } /* As a register variable, it has no section. */ return; } } /* Now handle ordinary static variables and functions (in memory). Also handle vars declared register invalidly. */ if (reg_number >= 0 || reg_number == -3) error ("%Jregister name given for non-register variable '%D'", decl, decl); /* Specifying a section attribute on a variable forces it into a non-.bss section, and thus it cannot be common. */ if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != NULL_TREE && DECL_INITIAL (decl) == NULL_TREE && DECL_COMMON (decl)) DECL_COMMON (decl) = 0; /* Variables can't be both common and weak. */ if (TREE_CODE (decl) == VAR_DECL && DECL_WEAK (decl)) DECL_COMMON (decl) = 0; x = gen_rtx_SYMBOL_REF (Pmode, name); SYMBOL_REF_WEAK (x) = DECL_WEAK (decl); SYMBOL_REF_DECL (x) = decl; x = gen_rtx_MEM (DECL_MODE (decl), x); if (TREE_CODE (decl) != FUNCTION_DECL) set_mem_attributes (x, decl, 1); SET_DECL_RTL (decl, x); /* Optionally set flags or add text to the name to record information such as that it is a function name. If the name is changed, the macro ASM_OUTPUT_LABELREF will have to know how to strip this information. */ targetm.encode_section_info (decl, DECL_RTL (decl), true); /* Make this function static known to the mudflap runtime. */ if (flag_mudflap && TREE_CODE (decl) == VAR_DECL) mudflap_enqueue_decl (decl); } /* Make the rtl for variable VAR be volatile. Use this only for static variables. */ void make_var_volatile (tree var) { if (!MEM_P (DECL_RTL (var))) abort (); MEM_VOLATILE_P (DECL_RTL (var)) = 1; } /* Output a string of literal assembler code for an `asm' keyword used between functions. */ void assemble_asm (tree string) { app_enable (); if (TREE_CODE (string) == ADDR_EXPR) string = TREE_OPERAND (string, 0); fprintf (asm_out_file, "\t%s\n", TREE_STRING_POINTER (string)); } /* Record an element in the table of global destructors. SYMBOL is a SYMBOL_REF of the function to be called; PRIORITY is a number between 0 and MAX_INIT_PRIORITY. */ void default_stabs_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED) { /* Tell GNU LD that this is part of the static destructor set. This will work for any system that uses stabs, most usefully aout systems. */ fprintf (asm_out_file, "%s\"___DTOR_LIST__\",22,0,0,", ASM_STABS_OP); assemble_name (asm_out_file, XSTR (symbol, 0)); fputc ('\n', asm_out_file); } void default_named_section_asm_out_destructor (rtx symbol, int priority) { const char *section = ".dtors"; char buf[16]; /* ??? This only works reliably with the GNU linker. */ if (priority != DEFAULT_INIT_PRIORITY) { sprintf (buf, ".dtors.%.5u", /* Invert the numbering so the linker puts us in the proper order; constructors are run from right to left, and the linker sorts in increasing order. */ MAX_INIT_PRIORITY - priority); section = buf; } named_section_flags (section, SECTION_WRITE); assemble_align (POINTER_SIZE); assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1); } #ifdef DTORS_SECTION_ASM_OP void dtors_section (void) { if (in_section != in_dtors) { in_section = in_dtors; fputs (DTORS_SECTION_ASM_OP, asm_out_file); fputc ('\n', asm_out_file); } } void default_dtor_section_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED) { dtors_section (); assemble_align (POINTER_SIZE); assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1); } #endif /* Likewise for global constructors. */ void default_stabs_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED) { /* Tell GNU LD that this is part of the static destructor set. This will work for any system that uses stabs, most usefully aout systems. */ fprintf (asm_out_file, "%s\"___CTOR_LIST__\",22,0,0,", ASM_STABS_OP); assemble_name (asm_out_file, XSTR (symbol, 0)); fputc ('\n', asm_out_file); } void default_named_section_asm_out_constructor (rtx symbol, int priority) { const char *section = ".ctors"; char buf[16]; /* ??? This only works reliably with the GNU linker. */ if (priority != DEFAULT_INIT_PRIORITY) { sprintf (buf, ".ctors.%.5u", /* Invert the numbering so the linker puts us in the proper order; constructors are run from right to left, and the linker sorts in increasing order. */ MAX_INIT_PRIORITY - priority); section = buf; } named_section_flags (section, SECTION_WRITE); assemble_align (POINTER_SIZE); assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1); } #ifdef CTORS_SECTION_ASM_OP void ctors_section (void) { if (in_section != in_ctors) { in_section = in_ctors; fputs (CTORS_SECTION_ASM_OP, asm_out_file); fputc ('\n', asm_out_file); } } void default_ctor_section_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED) { ctors_section (); assemble_align (POINTER_SIZE); assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1); } #endif /* CONSTANT_POOL_BEFORE_FUNCTION may be defined as an expression with a nonzero value if the constant pool should be output before the start of the function, or a zero value if the pool should output after the end of the function. The default is to put it before the start. */ #ifndef CONSTANT_POOL_BEFORE_FUNCTION #define CONSTANT_POOL_BEFORE_FUNCTION 1 #endif /* DECL is an object (either VAR_DECL or FUNCTION_DECL) which is going to be output to assembler. Set first_global_object_name and weak_global_object_name as appropriate. */ void notice_global_symbol (tree decl) { const char **type = &first_global_object_name; if (first_global_object_name || !TREE_PUBLIC (decl) || DECL_EXTERNAL (decl) || !DECL_NAME (decl) || (TREE_CODE (decl) != FUNCTION_DECL && (TREE_CODE (decl) != VAR_DECL || (DECL_COMMON (decl) && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node)))) || !MEM_P (DECL_RTL (decl))) return; /* We win when global object is found, but it is useful to know about weak symbol as well so we can produce nicer unique names. */ if (DECL_WEAK (decl) || DECL_ONE_ONLY (decl)) type = &weak_global_object_name; if (!*type) { const char *p; char *name; rtx decl_rtl = DECL_RTL (decl); p = targetm.strip_name_encoding (XSTR (XEXP (decl_rtl, 0), 0)); name = xstrdup (p); *type = name; } } /* Output assembler code for the constant pool of a function and associated with defining the name of the function. DECL describes the function. NAME is the function's name. For the constant pool, we use the current constant pool data. */ void assemble_start_function (tree decl, const char *fnname) { int align; unlikely_section_label_printed = false; /* The following code does not need preprocessing in the assembler. */ app_disable (); if (CONSTANT_POOL_BEFORE_FUNCTION) output_constant_pool (fnname, decl); resolve_unique_section (decl, 0, flag_function_sections); function_section (decl); /* Tell assembler to move to target machine's alignment for functions. */ align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT); if (align < force_align_functions_log) align = force_align_functions_log; if (align > 0) { ASM_OUTPUT_ALIGN (asm_out_file, align); } /* Handle a user-specified function alignment. Note that we still need to align to FUNCTION_BOUNDARY, as above, because ASM_OUTPUT_MAX_SKIP_ALIGN might not do any alignment at all. */ if (align_functions_log > align && cfun->function_frequency != FUNCTION_FREQUENCY_UNLIKELY_EXECUTED) { #ifdef ASM_OUTPUT_MAX_SKIP_ALIGN ASM_OUTPUT_MAX_SKIP_ALIGN (asm_out_file, align_functions_log, align_functions - 1); #else ASM_OUTPUT_ALIGN (asm_out_file, align_functions_log); #endif } #ifdef ASM_OUTPUT_FUNCTION_PREFIX ASM_OUTPUT_FUNCTION_PREFIX (asm_out_file, fnname); #endif (*debug_hooks->begin_function) (decl); /* Make function name accessible from other files, if appropriate. */ if (TREE_PUBLIC (decl)) { notice_global_symbol (decl); globalize_decl (decl); maybe_assemble_visibility (decl); } /* Do any machine/system dependent processing of the function name. */ #ifdef ASM_DECLARE_FUNCTION_NAME ASM_DECLARE_FUNCTION_NAME (asm_out_file, fnname, current_function_decl); #else /* Standard thing is just output label for the function. */ ASM_OUTPUT_LABEL (asm_out_file, fnname); #endif /* ASM_DECLARE_FUNCTION_NAME */ } /* Output assembler code associated with defining the size of the function. DECL describes the function. NAME is the function's name. */ void assemble_end_function (tree decl, const char *fnname) { #ifdef ASM_DECLARE_FUNCTION_SIZE ASM_DECLARE_FUNCTION_SIZE (asm_out_file, fnname, decl); #endif if (! CONSTANT_POOL_BEFORE_FUNCTION) { output_constant_pool (fnname, decl); function_section (decl); /* need to switch back */ } } /* Assemble code to leave SIZE bytes of zeros. */ void assemble_zeros (unsigned HOST_WIDE_INT size) { /* Do no output if -fsyntax-only. */ if (flag_syntax_only) return; #ifdef ASM_NO_SKIP_IN_TEXT /* The `space' pseudo in the text section outputs nop insns rather than 0s, so we must output 0s explicitly in the text section. */ if ((ASM_NO_SKIP_IN_TEXT && in_text_section ()) || (ASM_NO_SKIP_IN_TEXT && in_unlikely_text_section ())) { unsigned HOST_WIDE_INT i; for (i = 0; i < size; i++) assemble_integer (const0_rtx, 1, BITS_PER_UNIT, 1); } else #endif if (size > 0) ASM_OUTPUT_SKIP (asm_out_file, size); } /* Assemble an alignment pseudo op for an ALIGN-bit boundary. */ void assemble_align (int align) { if (align > BITS_PER_UNIT) { ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT)); } } /* Assemble a string constant with the specified C string as contents. */ void assemble_string (const char *p, int size) { int pos = 0; int maximum = 2000; /* If the string is very long, split it up. */ while (pos < size) { int thissize = size - pos; if (thissize > maximum) thissize = maximum; ASM_OUTPUT_ASCII (asm_out_file, p, thissize); pos += thissize; p += thissize; } } #if defined ASM_OUTPUT_ALIGNED_DECL_LOCAL #define ASM_EMIT_LOCAL(decl, name, size, rounded) \ ASM_OUTPUT_ALIGNED_DECL_LOCAL (asm_out_file, decl, name, size, DECL_ALIGN (decl)) #else #if defined ASM_OUTPUT_ALIGNED_LOCAL #define ASM_EMIT_LOCAL(decl, name, size, rounded) \ ASM_OUTPUT_ALIGNED_LOCAL (asm_out_file, name, size, DECL_ALIGN (decl)) #else #define ASM_EMIT_LOCAL(decl, name, size, rounded) \ ASM_OUTPUT_LOCAL (asm_out_file, name, size, rounded) #endif #endif #if defined ASM_OUTPUT_ALIGNED_BSS #define ASM_EMIT_BSS(decl, name, size, rounded) \ ASM_OUTPUT_ALIGNED_BSS (asm_out_file, decl, name, size, DECL_ALIGN (decl)) #else #if defined ASM_OUTPUT_BSS #define ASM_EMIT_BSS(decl, name, size, rounded) \ ASM_OUTPUT_BSS (asm_out_file, decl, name, size, rounded) #else #undef ASM_EMIT_BSS #endif #endif #if defined ASM_OUTPUT_ALIGNED_DECL_COMMON #define ASM_EMIT_COMMON(decl, name, size, rounded) \ ASM_OUTPUT_ALIGNED_DECL_COMMON (asm_out_file, decl, name, size, DECL_ALIGN (decl)) #else #if defined ASM_OUTPUT_ALIGNED_COMMON #define ASM_EMIT_COMMON(decl, name, size, rounded) \ ASM_OUTPUT_ALIGNED_COMMON (asm_out_file, name, size, DECL_ALIGN (decl)) #else #define ASM_EMIT_COMMON(decl, name, size, rounded) \ ASM_OUTPUT_COMMON (asm_out_file, name, size, rounded) #endif #endif static bool asm_emit_uninitialised (tree decl, const char *name, unsigned HOST_WIDE_INT size ATTRIBUTE_UNUSED, unsigned HOST_WIDE_INT rounded ATTRIBUTE_UNUSED) { enum { asm_dest_common, asm_dest_bss, asm_dest_local } destination = asm_dest_local; /* ??? We should handle .bss via select_section mechanisms rather than via special target hooks. That would eliminate this special case. */ if (TREE_PUBLIC (decl)) { if (!DECL_COMMON (decl)) #ifdef ASM_EMIT_BSS destination = asm_dest_bss; #else return false; #endif else destination = asm_dest_common; } if (destination == asm_dest_bss) globalize_decl (decl); resolve_unique_section (decl, 0, flag_data_sections); if (flag_shared_data) { switch (destination) { #ifdef ASM_OUTPUT_SHARED_BSS case asm_dest_bss: ASM_OUTPUT_SHARED_BSS (asm_out_file, decl, name, size, rounded); return; #endif #ifdef ASM_OUTPUT_SHARED_COMMON case asm_dest_common: ASM_OUTPUT_SHARED_COMMON (asm_out_file, name, size, rounded); return; #endif #ifdef ASM_OUTPUT_SHARED_LOCAL case asm_dest_local: ASM_OUTPUT_SHARED_LOCAL (asm_out_file, name, size, rounded); return; #endif default: break; } } switch (destination) { #ifdef ASM_EMIT_BSS case asm_dest_bss: ASM_EMIT_BSS (decl, name, size, rounded); break; #endif case asm_dest_common: ASM_EMIT_COMMON (decl, name, size, rounded); break; case asm_dest_local: ASM_EMIT_LOCAL (decl, name, size, rounded); break; default: abort (); } return true; } /* Assemble everything that is needed for a variable or function declaration. Not used for automatic variables, and not used for function definitions. Should not be called for variables of incomplete structure type. TOP_LEVEL is nonzero if this variable has file scope. AT_END is nonzero if this is the special handling, at end of compilation, to define things that have had only tentative definitions. DONT_OUTPUT_DATA if nonzero means don't actually output the initial value (that will be done by the caller). */ void assemble_variable (tree decl, int top_level ATTRIBUTE_UNUSED, int at_end ATTRIBUTE_UNUSED, int dont_output_data) { const char *name; unsigned int align; int reloc = 0; rtx decl_rtl; if (lang_hooks.decls.prepare_assemble_variable) lang_hooks.decls.prepare_assemble_variable (decl); last_assemble_variable_decl = 0; /* Normally no need to say anything here for external references, since assemble_external is called by the language-specific code when a declaration is first seen. */ if (DECL_EXTERNAL (decl)) return; /* Output no assembler code for a function declaration. Only definitions of functions output anything. */ if (TREE_CODE (decl) == FUNCTION_DECL) return; /* Do nothing for global register variables. */ if (DECL_RTL_SET_P (decl) && REG_P (DECL_RTL (decl))) { TREE_ASM_WRITTEN (decl) = 1; return; } /* If type was incomplete when the variable was declared, see if it is complete now. */ if (DECL_SIZE (decl) == 0) layout_decl (decl, 0); /* Still incomplete => don't allocate it; treat the tentative defn (which is what it must have been) as an `extern' reference. */ if (!dont_output_data && DECL_SIZE (decl) == 0) { error ("%Jstorage size of `%D' isn't known", decl, decl); TREE_ASM_WRITTEN (decl) = 1; return; } /* The first declaration of a variable that comes through this function decides whether it is global (in C, has external linkage) or local (in C, has internal linkage). So do nothing more if this function has already run. */ if (TREE_ASM_WRITTEN (decl)) return; /* Make sure targetm.encode_section_info is invoked before we set ASM_WRITTEN. */ decl_rtl = DECL_RTL (decl); TREE_ASM_WRITTEN (decl) = 1; /* Do no output if -fsyntax-only. */ if (flag_syntax_only) return; app_disable (); if (! dont_output_data && ! host_integerp (DECL_SIZE_UNIT (decl), 1)) { error ("%Jsize of variable '%D' is too large", decl, decl); return; } name = XSTR (XEXP (decl_rtl, 0), 0); if (TREE_PUBLIC (decl) && DECL_NAME (decl)) notice_global_symbol (decl); /* Compute the alignment of this data. */ align = DECL_ALIGN (decl); /* In the case for initialing an array whose length isn't specified, where we have not yet been able to do the layout, figure out the proper alignment now. */ if (dont_output_data && DECL_SIZE (decl) == 0 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE) align = MAX (align, TYPE_ALIGN (TREE_TYPE (TREE_TYPE (decl)))); /* Some object file formats have a maximum alignment which they support. In particular, a.out format supports a maximum alignment of 4. */ #ifndef MAX_OFILE_ALIGNMENT #define MAX_OFILE_ALIGNMENT BIGGEST_ALIGNMENT #endif if (align > MAX_OFILE_ALIGNMENT) { warning ("%Jalignment of '%D' is greater than maximum object " "file alignment. Using %d", decl, decl, MAX_OFILE_ALIGNMENT/BITS_PER_UNIT); align = MAX_OFILE_ALIGNMENT; } /* On some machines, it is good to increase alignment sometimes. */ if (! DECL_USER_ALIGN (decl)) { #ifdef DATA_ALIGNMENT align = DATA_ALIGNMENT (TREE_TYPE (decl), align); #endif #ifdef CONSTANT_ALIGNMENT if (DECL_INITIAL (decl) != 0 && DECL_INITIAL (decl) != error_mark_node) align = CONSTANT_ALIGNMENT (DECL_INITIAL (decl), align); #endif } /* Reset the alignment in case we have made it tighter, so we can benefit from it in get_pointer_alignment. */ DECL_ALIGN (decl) = align; set_mem_align (decl_rtl, align); if (TREE_PUBLIC (decl)) maybe_assemble_visibility (decl); /* Output any data that we will need to use the address of. */ if (DECL_INITIAL (decl) == error_mark_node) reloc = contains_pointers_p (TREE_TYPE (decl)) ? 3 : 0; else if (DECL_INITIAL (decl)) { reloc = compute_reloc_for_constant (DECL_INITIAL (decl)); output_addressed_constants (DECL_INITIAL (decl)); } resolve_unique_section (decl, reloc, flag_data_sections); /* Handle uninitialized definitions. */ /* If the decl has been given an explicit section name, then it isn't common, and shouldn't be handled as such. */ if (DECL_SECTION_NAME (decl) || dont_output_data) ; /* We don't implement common thread-local data at present. */ else if (DECL_THREAD_LOCAL (decl)) { if (DECL_COMMON (decl)) sorry ("thread-local COMMON data not implemented"); } else if (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node || (flag_zero_initialized_in_bss /* Leave constant zeroes in .rodata so they can be shared. */ && !TREE_READONLY (decl) && initializer_zerop (DECL_INITIAL (decl)))) { unsigned HOST_WIDE_INT size = tree_low_cst (DECL_SIZE_UNIT (decl), 1); unsigned HOST_WIDE_INT rounded = size; /* Don't allocate zero bytes of common, since that means "undefined external" in the linker. */ if (size == 0) rounded = 1; /* Round size up to multiple of BIGGEST_ALIGNMENT bits so that each uninitialized object starts on such a boundary. */ rounded += (BIGGEST_ALIGNMENT / BITS_PER_UNIT) - 1; rounded = (rounded / (BIGGEST_ALIGNMENT / BITS_PER_UNIT) * (BIGGEST_ALIGNMENT / BITS_PER_UNIT)); #if !defined(ASM_OUTPUT_ALIGNED_COMMON) && !defined(ASM_OUTPUT_ALIGNED_DECL_COMMON) && !defined(ASM_OUTPUT_ALIGNED_BSS) if ((unsigned HOST_WIDE_INT) DECL_ALIGN (decl) / BITS_PER_UNIT > rounded) warning ("%Jrequested alignment for '%D' is greater than " "implemented alignment of %d", decl, decl, rounded); #endif /* If the target cannot output uninitialized but not common global data in .bss, then we have to use .data, so fall through. */ if (asm_emit_uninitialised (decl, name, size, rounded)) return; } /* Handle initialized definitions. Also handle uninitialized global definitions if -fno-common and the target doesn't support ASM_OUTPUT_BSS. */ /* First make the assembler name(s) global if appropriate. */ if (TREE_PUBLIC (decl) && DECL_NAME (decl)) globalize_decl (decl); /* Switch to the appropriate section. */ variable_section (decl, reloc); /* dbxout.c needs to know this. */ if (in_text_section () || in_unlikely_text_section ()) DECL_IN_TEXT_SECTION (decl) = 1; /* Output the alignment of this data. */ if (align > BITS_PER_UNIT) { ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT)); } /* Do any machine/system dependent processing of the object. */ #ifdef ASM_DECLARE_OBJECT_NAME last_assemble_variable_decl = decl; ASM_DECLARE_OBJECT_NAME (asm_out_file, name, decl); #else /* Standard thing is just output label for the object. */ ASM_OUTPUT_LABEL (asm_out_file, name); #endif /* ASM_DECLARE_OBJECT_NAME */ if (!dont_output_data) { if (DECL_INITIAL (decl) && DECL_INITIAL (decl) != error_mark_node) /* Output the actual data. */ output_constant (DECL_INITIAL (decl), tree_low_cst (DECL_SIZE_UNIT (decl), 1), align); else /* Leave space for it. */ assemble_zeros (tree_low_cst (DECL_SIZE_UNIT (decl), 1)); } } /* Return 1 if type TYPE contains any pointers. */ static int contains_pointers_p (tree type) { switch (TREE_CODE (type)) { case POINTER_TYPE: case REFERENCE_TYPE: /* I'm not sure whether OFFSET_TYPE needs this treatment, so I'll play safe and return 1. */ case OFFSET_TYPE: return 1; case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: { tree fields; /* For a type that has fields, see if the fields have pointers. */ for (fields = TYPE_FIELDS (type); fields; fields = TREE_CHAIN (fields)) if (TREE_CODE (fields) == FIELD_DECL && contains_pointers_p (TREE_TYPE (fields))) return 1; return 0; } case ARRAY_TYPE: /* An array type contains pointers if its element type does. */ return contains_pointers_p (TREE_TYPE (type)); default: return 0; } } #ifdef ASM_OUTPUT_EXTERNAL /* True if DECL is a function decl for which no out-of-line copy exists. It is assumed that DECL's assembler name has been set. */ static bool incorporeal_function_p (tree decl) { if (TREE_CODE (decl) == FUNCTION_DECL && DECL_BUILT_IN (decl)) { const char *name; if (DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL && DECL_FUNCTION_CODE (decl) == BUILT_IN_ALLOCA) return true; name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)); if (strncmp (name, "__builtin_", strlen ("__builtin_")) == 0) return true; } return false; } #endif /* Output something to declare an external symbol to the assembler. (Most assemblers don't need this, so we normally output nothing.) Do nothing if DECL is not external. */ void assemble_external (tree decl ATTRIBUTE_UNUSED) { /* Because most platforms do not define ASM_OUTPUT_EXTERNAL, the main body of this code is only rarely exercised. To provide some testing, on all platforms, we make sure that the ASM_OUT_FILE is open. If it's not, we should not be calling this function. */ if (!asm_out_file) abort (); #ifdef ASM_OUTPUT_EXTERNAL if (DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)) { rtx rtl = DECL_RTL (decl); if (MEM_P (rtl) && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF && !SYMBOL_REF_USED (XEXP (rtl, 0)) && !incorporeal_function_p (decl)) { /* Some systems do require some output. */ SYMBOL_REF_USED (XEXP (rtl, 0)) = 1; ASM_OUTPUT_EXTERNAL (asm_out_file, decl, XSTR (XEXP (rtl, 0), 0)); } } #endif } /* Similar, for calling a library function FUN. */ void assemble_external_libcall (rtx fun) { /* Declare library function name external when first used, if nec. */ if (! SYMBOL_REF_USED (fun)) { SYMBOL_REF_USED (fun) = 1; targetm.asm_out.external_libcall (fun); } } /* Assemble a label named NAME. */ void assemble_label (const char *name) { ASM_OUTPUT_LABEL (asm_out_file, name); } /* Set the symbol_referenced flag for ID. */ void mark_referenced (tree id) { TREE_SYMBOL_REFERENCED (id) = 1; } /* Set the symbol_referenced flag for DECL and notify callgraph. */ void mark_decl_referenced (tree decl) { if (TREE_CODE (decl) == FUNCTION_DECL) cgraph_mark_needed_node (cgraph_node (decl)); else if (TREE_CODE (decl) == VAR_DECL) cgraph_varpool_mark_needed_node (cgraph_varpool_node (decl)); /* else do nothing - we can get various sorts of CST nodes here, which do not need to be marked. */ } /* Output to FILE a reference to the assembler name of a C-level name NAME. If NAME starts with a *, the rest of NAME is output verbatim. Otherwise NAME is transformed in an implementation-defined way (usually by the addition of an underscore). Many macros in the tm file are defined to call this function. */ void assemble_name (FILE *file, const char *name) { const char *real_name; tree id; real_name = targetm.strip_name_encoding (name); id = maybe_get_identifier (real_name); if (id) mark_referenced (id); if (name[0] == '*') fputs (&name[1], file); else ASM_OUTPUT_LABELREF (file, name); } /* Allocate SIZE bytes writable static space with a gensym name and return an RTX to refer to its address. */ rtx assemble_static_space (unsigned HOST_WIDE_INT size) { char name[12]; const char *namestring; rtx x; #if 0 if (flag_shared_data) data_section (); #endif ASM_GENERATE_INTERNAL_LABEL (name, "LF", const_labelno); ++const_labelno; namestring = ggc_strdup (name); x = gen_rtx_SYMBOL_REF (Pmode, namestring); SYMBOL_REF_FLAGS (x) = SYMBOL_FLAG_LOCAL; #ifdef ASM_OUTPUT_ALIGNED_DECL_LOCAL ASM_OUTPUT_ALIGNED_DECL_LOCAL (asm_out_file, NULL_TREE, name, size, BIGGEST_ALIGNMENT); #else #ifdef ASM_OUTPUT_ALIGNED_LOCAL ASM_OUTPUT_ALIGNED_LOCAL (asm_out_file, name, size, BIGGEST_ALIGNMENT); #else { /* Round size up to multiple of BIGGEST_ALIGNMENT bits so that each uninitialized object starts on such a boundary. */ /* Variable `rounded' might or might not be used in ASM_OUTPUT_LOCAL. */ unsigned HOST_WIDE_INT rounded ATTRIBUTE_UNUSED = ((size + (BIGGEST_ALIGNMENT / BITS_PER_UNIT) - 1) / (BIGGEST_ALIGNMENT / BITS_PER_UNIT) * (BIGGEST_ALIGNMENT / BITS_PER_UNIT)); ASM_OUTPUT_LOCAL (asm_out_file, name, size, rounded); } #endif #endif return x; } /* Assemble the static constant template for function entry trampolines. This is done at most once per compilation. Returns an RTX for the address of the template. */ static GTY(()) rtx initial_trampoline; #ifdef TRAMPOLINE_TEMPLATE rtx assemble_trampoline_template (void) { char label[256]; const char *name; int align; rtx symbol; if (initial_trampoline) return initial_trampoline; /* By default, put trampoline templates in read-only data section. */ #ifdef TRAMPOLINE_SECTION TRAMPOLINE_SECTION (); #else readonly_data_section (); #endif /* Write the assembler code to define one. */ align = floor_log2 (TRAMPOLINE_ALIGNMENT / BITS_PER_UNIT); if (align > 0) { ASM_OUTPUT_ALIGN (asm_out_file, align); } targetm.asm_out.internal_label (asm_out_file, "LTRAMP", 0); TRAMPOLINE_TEMPLATE (asm_out_file); /* Record the rtl to refer to it. */ ASM_GENERATE_INTERNAL_LABEL (label, "LTRAMP", 0); name = ggc_strdup (label); symbol = gen_rtx_SYMBOL_REF (Pmode, name); SYMBOL_REF_FLAGS (symbol) = SYMBOL_FLAG_LOCAL; initial_trampoline = gen_rtx_MEM (BLKmode, symbol); set_mem_align (initial_trampoline, TRAMPOLINE_ALIGNMENT); return initial_trampoline; } #endif /* A and B are either alignments or offsets. Return the minimum alignment that may be assumed after adding the two together. */ static inline unsigned min_align (unsigned int a, unsigned int b) { return (a | b) & -(a | b); } /* Return the assembler directive for creating a given kind of integer object. SIZE is the number of bytes in the object and ALIGNED_P indicates whether it is known to be aligned. Return NULL if the assembly dialect has no such directive. The returned string should be printed at the start of a new line and be followed immediately by the object's initial value. */ const char * integer_asm_op (int size, int aligned_p) { struct asm_int_op *ops; if (aligned_p) ops = &targetm.asm_out.aligned_op; else ops = &targetm.asm_out.unaligned_op; switch (size) { case 1: return targetm.asm_out.byte_op; case 2: return ops->hi; case 4: return ops->si; case 8: return ops->di; case 16: return ops->ti; default: return NULL; } } /* Use directive OP to assemble an integer object X. Print OP at the start of the line, followed immediately by the value of X. */ void assemble_integer_with_op (const char *op, rtx x) { fputs (op, asm_out_file); output_addr_const (asm_out_file, x); fputc ('\n', asm_out_file); } /* The default implementation of the asm_out.integer target hook. */ bool default_assemble_integer (rtx x ATTRIBUTE_UNUSED, unsigned int size ATTRIBUTE_UNUSED, int aligned_p ATTRIBUTE_UNUSED) { const char *op = integer_asm_op (size, aligned_p); return op && (assemble_integer_with_op (op, x), true); } /* Assemble the integer constant X into an object of SIZE bytes. ALIGN is the alignment of the integer in bits. Return 1 if we were able to output the constant, otherwise 0. If FORCE is nonzero, abort if we can't output the constant. */ bool assemble_integer (rtx x, unsigned int size, unsigned int align, int force) { int aligned_p; aligned_p = (align >= MIN (size * BITS_PER_UNIT, BIGGEST_ALIGNMENT)); /* See if the target hook can handle this kind of object. */ if (targetm.asm_out.integer (x, size, aligned_p)) return true; /* If the object is a multi-byte one, try splitting it up. Split it into words it if is multi-word, otherwise split it into bytes. */ if (size > 1) { enum machine_mode omode, imode; unsigned int subalign; unsigned int subsize, i; subsize = size > UNITS_PER_WORD? UNITS_PER_WORD : 1; subalign = MIN (align, subsize * BITS_PER_UNIT); omode = mode_for_size (subsize * BITS_PER_UNIT, MODE_INT, 0); imode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0); for (i = 0; i < size; i += subsize) { rtx partial = simplify_subreg (omode, x, imode, i); if (!partial || !assemble_integer (partial, subsize, subalign, 0)) break; } if (i == size) return true; /* If we've printed some of it, but not all of it, there's no going back now. */ if (i > 0) abort (); } if (force) abort (); return false; } void assemble_real (REAL_VALUE_TYPE d, enum machine_mode mode, unsigned int align) { long data[4]; int i; int bitsize, nelts, nunits, units_per; /* This is hairy. We have a quantity of known size. real_to_target will put it into an array of *host* longs, 32 bits per element (even if long is more than 32 bits). We need to determine the number of array elements that are occupied (nelts) and the number of *target* min-addressable units that will be occupied in the object file (nunits). We cannot assume that 32 divides the mode's bitsize (size * BITS_PER_UNIT) evenly. size * BITS_PER_UNIT is used here to make sure that padding bits (which might appear at either end of the value; real_to_target will include the padding bits in its output array) are included. */ nunits = GET_MODE_SIZE (mode); bitsize = nunits * BITS_PER_UNIT; nelts = CEIL (bitsize, 32); units_per = 32 / BITS_PER_UNIT; real_to_target (data, &d, mode); /* Put out the first word with the specified alignment. */ assemble_integer (GEN_INT (data[0]), MIN (nunits, units_per), align, 1); nunits -= units_per; /* Subsequent words need only 32-bit alignment. */ align = min_align (align, 32); for (i = 1; i < nelts; i++) { assemble_integer (GEN_INT (data[i]), MIN (nunits, units_per), align, 1); nunits -= units_per; } } /* Given an expression EXP with a constant value, reduce it to the sum of an assembler symbol and an integer. Store them both in the structure *VALUE. Abort if EXP does not reduce. */ struct addr_const GTY(()) { rtx base; HOST_WIDE_INT offset; }; static void decode_addr_const (tree exp, struct addr_const *value) { tree target = TREE_OPERAND (exp, 0); int offset = 0; rtx x; while (1) { if (TREE_CODE (target) == COMPONENT_REF && host_integerp (byte_position (TREE_OPERAND (target, 1)), 0)) { offset += int_byte_position (TREE_OPERAND (target, 1)); target = TREE_OPERAND (target, 0); } else if (TREE_CODE (target) == ARRAY_REF || TREE_CODE (target) == ARRAY_RANGE_REF) { offset += (tree_low_cst (TYPE_SIZE_UNIT (TREE_TYPE (target)), 1) * tree_low_cst (TREE_OPERAND (target, 1), 0)); target = TREE_OPERAND (target, 0); } else break; } switch (TREE_CODE (target)) { case VAR_DECL: case FUNCTION_DECL: x = DECL_RTL (target); break; case LABEL_DECL: x = gen_rtx_MEM (FUNCTION_MODE, gen_rtx_LABEL_REF (VOIDmode, force_label_rtx (target))); break; case REAL_CST: case STRING_CST: case COMPLEX_CST: case CONSTRUCTOR: case INTEGER_CST: x = output_constant_def (target, 1); break; default: abort (); } if (!MEM_P (x)) abort (); x = XEXP (x, 0); value->base = x; value->offset = offset; } /* Uniquize all constants that appear in memory. Each constant in memory thus far output is recorded in `const_desc_table'. */ struct constant_descriptor_tree GTY(()) { /* A MEM for the constant. */ rtx rtl; /* The value of the constant. */ tree value; }; static GTY((param_is (struct constant_descriptor_tree))) htab_t const_desc_htab; static struct constant_descriptor_tree * build_constant_desc (tree); static void maybe_output_constant_def_contents (struct constant_descriptor_tree *, int); /* Compute a hash code for a constant expression. */ static hashval_t const_desc_hash (const void *ptr) { return const_hash_1 (((struct constant_descriptor_tree *)ptr)->value); } static hashval_t const_hash_1 (const tree exp) { const char *p; hashval_t hi; int len, i; enum tree_code code = TREE_CODE (exp); /* Either set P and LEN to the address and len of something to hash and exit the switch or return a value. */ switch (code) { case INTEGER_CST: p = (char *) &TREE_INT_CST (exp); len = sizeof TREE_INT_CST (exp); break; case REAL_CST: return real_hash (TREE_REAL_CST_PTR (exp)); case STRING_CST: p = TREE_STRING_POINTER (exp); len = TREE_STRING_LENGTH (exp); break; case COMPLEX_CST: return (const_hash_1 (TREE_REALPART (exp)) * 5 + const_hash_1 (TREE_IMAGPART (exp))); case CONSTRUCTOR: if (TREE_CODE (TREE_TYPE (exp)) == SET_TYPE) { char *tmp; len = int_size_in_bytes (TREE_TYPE (exp)); tmp = alloca (len); get_set_constructor_bytes (exp, (unsigned char *) tmp, len); p = tmp; break; } else { tree link; hi = 5 + int_size_in_bytes (TREE_TYPE (exp)); for (link = CONSTRUCTOR_ELTS (exp); link; link = TREE_CHAIN (link)) if (TREE_VALUE (link)) hi = hi * 603 + const_hash_1 (TREE_VALUE (link)); return hi; } case ADDR_EXPR: case FDESC_EXPR: { struct addr_const value; decode_addr_const (exp, &value); if (GET_CODE (value.base) == SYMBOL_REF) { /* Don't hash the address of the SYMBOL_REF; only use the offset and the symbol name. */ hi = value.offset; p = XSTR (value.base, 0); for (i = 0; p[i] != 0; i++) hi = ((hi * 613) + (unsigned) (p[i])); } else if (GET_CODE (value.base) == LABEL_REF) hi = value.offset + CODE_LABEL_NUMBER (XEXP (value.base, 0)) * 13; else abort (); } return hi; case PLUS_EXPR: case MINUS_EXPR: return (const_hash_1 (TREE_OPERAND (exp, 0)) * 9 + const_hash_1 (TREE_OPERAND (exp, 1))); case NOP_EXPR: case CONVERT_EXPR: case NON_LVALUE_EXPR: return const_hash_1 (TREE_OPERAND (exp, 0)) * 7 + 2; default: /* A language specific constant. Just hash the code. */ return code; } /* Compute hashing function. */ hi = len; for (i = 0; i < len; i++) hi = ((hi * 613) + (unsigned) (p[i])); return hi; } /* Wrapper of compare_constant, for the htab interface. */ static int const_desc_eq (const void *p1, const void *p2) { return compare_constant (((struct constant_descriptor_tree *)p1)->value, ((struct constant_descriptor_tree *)p2)->value); } /* Compare t1 and t2, and return 1 only if they are known to result in the same bit pattern on output. */ static int compare_constant (const tree t1, const tree t2) { enum tree_code typecode; if (t1 == NULL_TREE) return t2 == NULL_TREE; if (t2 == NULL_TREE) return 0; if (TREE_CODE (t1) != TREE_CODE (t2)) return 0; switch (TREE_CODE (t1)) { case INTEGER_CST: /* Integer constants are the same only if the same width of type. */ if (TYPE_PRECISION (TREE_TYPE (t1)) != TYPE_PRECISION (TREE_TYPE (t2))) return 0; return tree_int_cst_equal (t1, t2); case REAL_CST: /* Real constants are the same only if the same width of type. */ if (TYPE_PRECISION (TREE_TYPE (t1)) != TYPE_PRECISION (TREE_TYPE (t2))) return 0; return REAL_VALUES_IDENTICAL (TREE_REAL_CST (t1), TREE_REAL_CST (t2)); case STRING_CST: if (TYPE_MODE (TREE_TYPE (t1)) != TYPE_MODE (TREE_TYPE (t2))) return 0; return (TREE_STRING_LENGTH (t1) == TREE_STRING_LENGTH (t2) && ! memcmp (TREE_STRING_POINTER (t1), TREE_STRING_POINTER (t2), TREE_STRING_LENGTH (t1))); case COMPLEX_CST: return (compare_constant (TREE_REALPART (t1), TREE_REALPART (t2)) && compare_constant (TREE_IMAGPART (t1), TREE_IMAGPART (t2))); case CONSTRUCTOR: typecode = TREE_CODE (TREE_TYPE (t1)); if (typecode != TREE_CODE (TREE_TYPE (t2))) return 0; if (typecode == SET_TYPE) { int len = int_size_in_bytes (TREE_TYPE (t2)); unsigned char *tmp1, *tmp2; if (int_size_in_bytes (TREE_TYPE (t1)) != len) return 0; tmp1 = alloca (len); tmp2 = alloca (len); if (get_set_constructor_bytes (t1, tmp1, len) != NULL_TREE) return 0; if (get_set_constructor_bytes (t2, tmp2, len) != NULL_TREE) return 0; return memcmp (tmp1, tmp2, len) == 0; } else { tree l1, l2; if (typecode == ARRAY_TYPE) { HOST_WIDE_INT size_1 = int_size_in_bytes (TREE_TYPE (t1)); /* For arrays, check that the sizes all match. */ if (TYPE_MODE (TREE_TYPE (t1)) != TYPE_MODE (TREE_TYPE (t2)) || size_1 == -1 || size_1 != int_size_in_bytes (TREE_TYPE (t2))) return 0; } else { /* For record and union constructors, require exact type equality. */ if (TREE_TYPE (t1) != TREE_TYPE (t2)) return 0; } for (l1 = CONSTRUCTOR_ELTS (t1), l2 = CONSTRUCTOR_ELTS (t2); l1 && l2; l1 = TREE_CHAIN (l1), l2 = TREE_CHAIN (l2)) { /* Check that each value is the same... */ if (! compare_constant (TREE_VALUE (l1), TREE_VALUE (l2))) return 0; /* ... and that they apply to the same fields! */ if (typecode == ARRAY_TYPE) { if (! compare_constant (TREE_PURPOSE (l1), TREE_PURPOSE (l2))) return 0; } else { if (TREE_PURPOSE (l1) != TREE_PURPOSE (l2)) return 0; } } return l1 == NULL_TREE && l2 == NULL_TREE; } case ADDR_EXPR: case FDESC_EXPR: { struct addr_const value1, value2; decode_addr_const (t1, &value1); decode_addr_const (t2, &value2); return (value1.offset == value2.offset && strcmp (XSTR (value1.base, 0), XSTR (value2.base, 0)) == 0); } case PLUS_EXPR: case MINUS_EXPR: case RANGE_EXPR: return (compare_constant (TREE_OPERAND (t1, 0), TREE_OPERAND (t2, 0)) && compare_constant(TREE_OPERAND (t1, 1), TREE_OPERAND (t2, 1))); case NOP_EXPR: case CONVERT_EXPR: case NON_LVALUE_EXPR: case VIEW_CONVERT_EXPR: return compare_constant (TREE_OPERAND (t1, 0), TREE_OPERAND (t2, 0)); default: { tree nt1, nt2; nt1 = lang_hooks.expand_constant (t1); nt2 = lang_hooks.expand_constant (t2); if (nt1 != t1 || nt2 != t2) return compare_constant (nt1, nt2); else return 0; } } /* Should not get here. */ abort (); } /* Make a copy of the whole tree structure for a constant. This handles the same types of nodes that compare_constant handles. */ static tree copy_constant (tree exp) { switch (TREE_CODE (exp)) { case ADDR_EXPR: /* For ADDR_EXPR, we do not want to copy the decl whose address is requested. We do want to copy constants though. */ if (TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (exp, 0))) == 'c') return build1 (TREE_CODE (exp), TREE_TYPE (exp), copy_constant (TREE_OPERAND (exp, 0))); else return copy_node (exp); case INTEGER_CST: case REAL_CST: case STRING_CST: return copy_node (exp); case COMPLEX_CST: return build_complex (TREE_TYPE (exp), copy_constant (TREE_REALPART (exp)), copy_constant (TREE_IMAGPART (exp))); case PLUS_EXPR: case MINUS_EXPR: return build (TREE_CODE (exp), TREE_TYPE (exp), copy_constant (TREE_OPERAND (exp, 0)), copy_constant (TREE_OPERAND (exp, 1))); case NOP_EXPR: case CONVERT_EXPR: case NON_LVALUE_EXPR: case VIEW_CONVERT_EXPR: return build1 (TREE_CODE (exp), TREE_TYPE (exp), copy_constant (TREE_OPERAND (exp, 0))); case CONSTRUCTOR: { tree copy = copy_node (exp); tree list = copy_list (CONSTRUCTOR_ELTS (exp)); tree tail; CONSTRUCTOR_ELTS (copy) = list; for (tail = list; tail; tail = TREE_CHAIN (tail)) TREE_VALUE (tail) = copy_constant (TREE_VALUE (tail)); if (TREE_CODE (TREE_TYPE (exp)) == SET_TYPE) for (tail = list; tail; tail = TREE_CHAIN (tail)) TREE_PURPOSE (tail) = copy_constant (TREE_PURPOSE (tail)); return copy; } default: { tree t; t = lang_hooks.expand_constant (exp); if (t != exp) return copy_constant (t); else abort (); } } } /* Subroutine of output_constant_def: No constant equal to EXP is known to have been output. Make a constant descriptor to enter EXP in the hash table. Assign the label number and construct RTL to refer to the constant's location in memory. Caller is responsible for updating the hash table. */ static struct constant_descriptor_tree * build_constant_desc (tree exp) { rtx symbol; rtx rtl; char label[256]; int labelno; struct constant_descriptor_tree *desc; desc = ggc_alloc (sizeof (*desc)); desc->value = copy_constant (exp); /* Propagate marked-ness to copied constant. */ if (flag_mudflap && mf_marked_p (exp)) mf_mark (desc->value); /* Create a string containing the label name, in LABEL. */ labelno = const_labelno++; ASM_GENERATE_INTERNAL_LABEL (label, "LC", labelno); /* We have a symbol name; construct the SYMBOL_REF and the MEM. */ symbol = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (label)); SYMBOL_REF_FLAGS (symbol) = SYMBOL_FLAG_LOCAL; SYMBOL_REF_DECL (symbol) = desc->value; TREE_CONSTANT_POOL_ADDRESS_P (symbol) = 1; rtl = gen_rtx_MEM (TYPE_MODE (TREE_TYPE (exp)), symbol); set_mem_attributes (rtl, exp, 1); set_mem_alias_set (rtl, 0); set_mem_alias_set (rtl, const_alias_set); /* Set flags or add text to the name to record information, such as that it is a local symbol. If the name is changed, the macro ASM_OUTPUT_LABELREF will have to know how to strip this information. This call might invalidate our local variable SYMBOL; we can't use it afterward. */ targetm.encode_section_info (exp, rtl, true); desc->rtl = rtl; return desc; } /* Return an rtx representing a reference to constant data in memory for the constant expression EXP. If assembler code for such a constant has already been output, return an rtx to refer to it. Otherwise, output such a constant in memory and generate an rtx for it. If DEFER is nonzero, this constant can be deferred and output only if referenced in the function after all optimizations. `const_desc_table' records which constants already have label strings. */ rtx output_constant_def (tree exp, int defer) { struct constant_descriptor_tree *desc; struct constant_descriptor_tree key; void **loc; /* Look up EXP in the table of constant descriptors. If we didn't find it, create a new one. */ key.value = exp; loc = htab_find_slot (const_desc_htab, &key, INSERT); desc = *loc; if (desc == 0) { desc = build_constant_desc (exp); *loc = desc; } maybe_output_constant_def_contents (desc, defer); return desc->rtl; } /* Subroutine of output_constant_def: Decide whether or not we need to output the constant DESC now, and if so, do it. */ static void maybe_output_constant_def_contents (struct constant_descriptor_tree *desc, int defer) { rtx symbol = XEXP (desc->rtl, 0); tree exp = desc->value; if (flag_syntax_only) return; if (TREE_ASM_WRITTEN (exp)) /* Already output; don't do it again. */ return; /* We can always defer constants as long as the context allows doing so. */ if (defer) { /* Increment n_deferred_constants if it exists. It needs to be at least as large as the number of constants actually referred to by the function. If it's too small we'll stop looking too early and fail to emit constants; if it's too large we'll only look through the entire function when we could have stopped earlier. */ if (cfun) n_deferred_constants++; return; } output_constant_def_contents (symbol); } /* We must output the constant data referred to by SYMBOL; do so. */ static void output_constant_def_contents (rtx symbol) { tree exp = SYMBOL_REF_DECL (symbol); const char *label = XSTR (symbol, 0); HOST_WIDE_INT size; /* Make sure any other constants whose addresses appear in EXP are assigned label numbers. */ int reloc = compute_reloc_for_constant (exp); /* Align the location counter as required by EXP's data type. */ int align = TYPE_ALIGN (TREE_TYPE (exp)); #ifdef CONSTANT_ALIGNMENT align = CONSTANT_ALIGNMENT (exp, align); #endif output_addressed_constants (exp); /* We are no longer deferring this constant. */ TREE_ASM_WRITTEN (exp) = 1; if (IN_NAMED_SECTION (exp)) named_section (exp, NULL, reloc); else targetm.asm_out.select_section (exp, reloc, align); if (align > BITS_PER_UNIT) { ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT)); } size = int_size_in_bytes (TREE_TYPE (exp)); if (TREE_CODE (exp) == STRING_CST) size = MAX (TREE_STRING_LENGTH (exp), size); /* Do any machine/system dependent processing of the constant. */ #ifdef ASM_DECLARE_CONSTANT_NAME ASM_DECLARE_CONSTANT_NAME (asm_out_file, label, exp, size); #else /* Standard thing is just output label for the constant. */ ASM_OUTPUT_LABEL (asm_out_file, label); #endif /* ASM_DECLARE_CONSTANT_NAME */ /* Output the value of EXP. */ output_constant (exp, size, align); if (flag_mudflap) mudflap_enqueue_constant (exp); } /* Look up EXP in the table of constant descriptors. Return the rtl if it has been emitted, else null. */ rtx lookup_constant_def (tree exp) { struct constant_descriptor_tree *desc; struct constant_descriptor_tree key; key.value = exp; desc = htab_find (const_desc_htab, &key); return (desc ? desc->rtl : NULL_RTX); } /* Used in the hash tables to avoid outputting the same constant twice. Unlike 'struct constant_descriptor_tree', RTX constants are output once per function, not once per file. */ /* ??? Only a few targets need per-function constant pools. Most can use one per-file pool. Should add a targetm bit to tell the difference. */ struct rtx_constant_pool GTY(()) { /* Pointers to first and last constant in pool, as ordered by offset. */ struct constant_descriptor_rtx *first; struct constant_descriptor_rtx *last; /* Hash facility for making memory-constants from constant rtl-expressions. It is used on RISC machines where immediate integer arguments and constant addresses are restricted so that such constants must be stored in memory. */ htab_t GTY((param_is (struct constant_descriptor_rtx))) const_rtx_htab; htab_t GTY((param_is (struct constant_descriptor_rtx))) const_rtx_sym_htab; /* Current offset in constant pool (does not include any machine-specific header). */ HOST_WIDE_INT offset; }; struct constant_descriptor_rtx GTY((chain_next ("%h.next"))) { struct constant_descriptor_rtx *next; rtx mem; rtx sym; rtx constant; HOST_WIDE_INT offset; hashval_t hash; enum machine_mode mode; unsigned int align; int labelno; int mark; }; /* Hash and compare functions for const_rtx_htab. */ static hashval_t const_desc_rtx_hash (const void *ptr) { const struct constant_descriptor_rtx *desc = ptr; return desc->hash; } static int const_desc_rtx_eq (const void *a, const void *b) { const struct constant_descriptor_rtx *x = a; const struct constant_descriptor_rtx *y = b; if (x->mode != y->mode) return 0; return rtx_equal_p (x->constant, y->constant); } /* Hash and compare functions for const_rtx_sym_htab. */ static hashval_t const_desc_rtx_sym_hash (const void *ptr) { const struct constant_descriptor_rtx *desc = ptr; return htab_hash_string (XSTR (desc->sym, 0)); } static int const_desc_rtx_sym_eq (const void *a, const void *b) { const struct constant_descriptor_rtx *x = a; const struct constant_descriptor_rtx *y = b; return XSTR (x->sym, 0) == XSTR (y->sym, 0); } /* This is the worker function for const_rtx_hash, called via for_each_rtx. */ static int const_rtx_hash_1 (rtx *xp, void *data) { unsigned HOST_WIDE_INT hwi; enum machine_mode mode; enum rtx_code code; hashval_t h, *hp; rtx x; x = *xp; code = GET_CODE (x); mode = GET_MODE (x); h = (hashval_t) code * 1048573 + mode; switch (code) { case CONST_INT: hwi = INTVAL (x); fold_hwi: { const int shift = sizeof (hashval_t) * CHAR_BIT; const int n = sizeof (HOST_WIDE_INT) / sizeof (hashval_t); int i; h ^= (hashval_t) hwi; for (i = 1; i < n; ++i) { hwi >>= shift; h ^= (hashval_t) hwi; } } break; case CONST_DOUBLE: if (mode == VOIDmode) { hwi = CONST_DOUBLE_LOW (x) ^ CONST_DOUBLE_HIGH (x); goto fold_hwi; } else h ^= real_hash (CONST_DOUBLE_REAL_VALUE (x)); break; case SYMBOL_REF: h ^= htab_hash_string (XSTR (x, 0)); break; case LABEL_REF: h = h * 251 + CODE_LABEL_NUMBER (XEXP (x, 0)); break; case UNSPEC: case UNSPEC_VOLATILE: h = h * 251 + XINT (x, 1); break; default: break; } hp = data; *hp = *hp * 509 + h; return 0; } /* Compute a hash value for X, which should be a constant. */ static hashval_t const_rtx_hash (rtx x) { hashval_t h = 0; for_each_rtx (&x, const_rtx_hash_1, &h); return h; } /* Initialize constant pool hashing for a new function. */ void init_varasm_status (struct function *f) { struct varasm_status *p; struct rtx_constant_pool *pool; p = ggc_alloc (sizeof (struct varasm_status)); f->varasm = p; pool = ggc_alloc (sizeof (struct rtx_constant_pool)); p->pool = pool; p->deferred_constants = 0; pool->const_rtx_htab = htab_create_ggc (31, const_desc_rtx_hash, const_desc_rtx_eq, NULL); pool->const_rtx_sym_htab = htab_create_ggc (31, const_desc_rtx_sym_hash, const_desc_rtx_sym_eq, NULL); pool->first = pool->last = NULL; pool->offset = 0; } /* Given a MINUS expression, simplify it if both sides include the same symbol. */ rtx simplify_subtraction (rtx x) { rtx r = simplify_rtx (x); return r ? r : x; } /* Given a constant rtx X, make (or find) a memory constant for its value and return a MEM rtx to refer to it in memory. */ rtx force_const_mem (enum machine_mode mode, rtx x) { struct constant_descriptor_rtx *desc, tmp; struct rtx_constant_pool *pool = cfun->varasm->pool; char label[256]; rtx def, symbol; hashval_t hash; unsigned int align; void **slot; /* If we're not allowed to drop X into the constant pool, don't. */ if (targetm.cannot_force_const_mem (x)) return NULL_RTX; /* Lookup the value in the hashtable. */ tmp.constant = x; tmp.mode = mode; hash = const_rtx_hash (x); slot = htab_find_slot_with_hash (pool->const_rtx_htab, &tmp, hash, INSERT); desc = *slot; /* If the constant was already present, return its memory. */ if (desc) return copy_rtx (desc->mem); /* Otherwise, create a new descriptor. */ desc = ggc_alloc (sizeof (*desc)); *slot = desc; /* Align the location counter as required by EXP's data type. */ align = GET_MODE_ALIGNMENT (mode == VOIDmode ? word_mode : mode); #ifdef CONSTANT_ALIGNMENT { tree type = lang_hooks.types.type_for_mode (mode, 0); if (type != NULL_TREE) align = CONSTANT_ALIGNMENT (make_tree (type, x), align); } #endif pool->offset += (align / BITS_PER_UNIT) - 1; pool->offset &= ~ ((align / BITS_PER_UNIT) - 1); desc->next = NULL; desc->constant = tmp.constant; desc->offset = pool->offset; desc->hash = hash; desc->mode = mode; desc->align = align; desc->labelno = const_labelno; desc->mark = 0; pool->offset += GET_MODE_SIZE (mode); if (pool->last) pool->last->next = desc; else pool->first = pool->last = desc; pool->last = desc; /* Create a string containing the label name, in LABEL. */ ASM_GENERATE_INTERNAL_LABEL (label, "LC", const_labelno); ++const_labelno; /* Construct the SYMBOL_REF. Make sure to mark it as belonging to the constants pool. */ desc->sym = symbol = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (label)); SYMBOL_REF_FLAGS (symbol) = SYMBOL_FLAG_LOCAL; CONSTANT_POOL_ADDRESS_P (symbol) = 1; current_function_uses_const_pool = 1; /* Insert the descriptor into the symbol cross-reference table too. */ slot = htab_find_slot (pool->const_rtx_sym_htab, desc, INSERT); if (*slot) abort (); *slot = desc; /* Construct the MEM. */ desc->mem = def = gen_rtx_MEM (mode, symbol); set_mem_attributes (def, lang_hooks.types.type_for_mode (mode, 0), 1); RTX_UNCHANGING_P (def) = 1; /* If we're dropping a label to the constant pool, make sure we don't delete it. */ if (GET_CODE (x) == LABEL_REF) LABEL_PRESERVE_P (XEXP (x, 0)) = 1; return copy_rtx (def); } /* Given a SYMBOL_REF with CONSTANT_POOL_ADDRESS_P true, return a pointer to the corresponding constant_descriptor_rtx structure. */ static struct constant_descriptor_rtx * find_pool_constant (struct rtx_constant_pool *pool, rtx sym) { struct constant_descriptor_rtx tmp; tmp.sym = sym; return htab_find (pool->const_rtx_sym_htab, &tmp); } /* Given a constant pool SYMBOL_REF, return the corresponding constant. */ rtx get_pool_constant (rtx addr) { return find_pool_constant (cfun->varasm->pool, addr)->constant; } /* Given a constant pool SYMBOL_REF, return the corresponding constant and whether it has been output or not. */ rtx get_pool_constant_mark (rtx addr, bool *pmarked) { struct constant_descriptor_rtx *desc; desc = find_pool_constant (cfun->varasm->pool, addr); *pmarked = (desc->mark != 0); return desc->constant; } /* Likewise, but for the constant pool of a specific function. */ rtx get_pool_constant_for_function (struct function *f, rtx addr) { return find_pool_constant (f->varasm->pool, addr)->constant; } /* Similar, return the mode. */ enum machine_mode get_pool_mode (rtx addr) { return find_pool_constant (cfun->varasm->pool, addr)->mode; } enum machine_mode get_pool_mode_for_function (struct function *f, rtx addr) { return find_pool_constant (f->varasm->pool, addr)->mode; } /* Similar, return the offset in the constant pool. */ int get_pool_offset (rtx addr) { return find_pool_constant (cfun->varasm->pool, addr)->offset; } /* Return the size of the constant pool. */ int get_pool_size (void) { return cfun->varasm->pool->offset; } /* Worker function for output_constant_pool_1. Emit assembly for X in MODE with known alignment ALIGN. */ static void output_constant_pool_2 (enum machine_mode mode, rtx x, unsigned int align) { switch (GET_MODE_CLASS (mode)) { case MODE_FLOAT: if (GET_CODE (x) != CONST_DOUBLE) abort (); else { REAL_VALUE_TYPE r; REAL_VALUE_FROM_CONST_DOUBLE (r, x); assemble_real (r, mode, align); } break; case MODE_INT: case MODE_PARTIAL_INT: assemble_integer (x, GET_MODE_SIZE (mode), align, 1); break; case MODE_VECTOR_FLOAT: case MODE_VECTOR_INT: { int i, units; enum machine_mode submode = GET_MODE_INNER (mode); unsigned int subalign = MIN (align, GET_MODE_BITSIZE (submode)); if (GET_CODE (x) != CONST_VECTOR) abort (); units = CONST_VECTOR_NUNITS (x); for (i = 0; i < units; i++) { rtx elt = CONST_VECTOR_ELT (x, i); output_constant_pool_2 (submode, elt, i ? subalign : align); } } break; default: abort (); } } /* Worker function for output_constant_pool. Emit POOL. */ static void output_constant_pool_1 (struct constant_descriptor_rtx *desc) { rtx x, tmp; if (!desc->mark) return; x = desc->constant; /* See if X is a LABEL_REF (or a CONST referring to a LABEL_REF) whose CODE_LABEL has been deleted. This can occur if a jump table is eliminated by optimization. If so, write a constant of zero instead. Note that this can also happen by turning the CODE_LABEL into a NOTE. */ /* ??? This seems completely and utterly wrong. Certainly it's not true for NOTE_INSN_DELETED_LABEL, but I disbelieve proper functioning even with INSN_DELETED_P and friends. */ tmp = x; switch (GET_CODE (x)) { case CONST: if (GET_CODE (XEXP (x, 0)) != PLUS || GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF) break; tmp = XEXP (XEXP (x, 0), 0); /* FALLTHRU */ case LABEL_REF: tmp = XEXP (x, 0); if (INSN_DELETED_P (tmp) || (GET_CODE (tmp) == NOTE && NOTE_LINE_NUMBER (tmp) == NOTE_INSN_DELETED)) { abort (); x = const0_rtx; } break; default: break; } /* First switch to correct section. */ targetm.asm_out.select_rtx_section (desc->mode, x, desc->align); #ifdef ASM_OUTPUT_SPECIAL_POOL_ENTRY ASM_OUTPUT_SPECIAL_POOL_ENTRY (asm_out_file, x, desc->mode, desc->align, desc->labelno, done); #endif assemble_align (desc->align); /* Output the label. */ targetm.asm_out.internal_label (asm_out_file, "LC", desc->labelno); /* Output the data. */ output_constant_pool_2 (desc->mode, x, desc->align); /* Make sure all constants in SECTION_MERGE and not SECTION_STRINGS sections have proper size. */ if (desc->align > GET_MODE_BITSIZE (desc->mode) && in_section == in_named && get_named_section_flags (in_named_name) & SECTION_MERGE) assemble_align (desc->align); #ifdef ASM_OUTPUT_SPECIAL_POOL_ENTRY done: #endif return; } /* Given a SYMBOL_REF CURRENT_RTX, mark it and all constants it refers to as used. Emit referenced deferred strings. This function can be used with for_each_rtx to mark all SYMBOL_REFs in an rtx. */ static int mark_constant (rtx *current_rtx, void *data) { struct rtx_constant_pool *pool = data; rtx x = *current_rtx; if (x == NULL_RTX || GET_CODE (x) != SYMBOL_REF) return 0; if (CONSTANT_POOL_ADDRESS_P (x)) { struct constant_descriptor_rtx *desc = find_pool_constant (pool, x); if (desc->mark == 0) { desc->mark = 1; for_each_rtx (&desc->constant, mark_constant, pool); } } else if (TREE_CONSTANT_POOL_ADDRESS_P (x)) { tree exp = SYMBOL_REF_DECL (x); if (!TREE_ASM_WRITTEN (exp)) { n_deferred_constants--; output_constant_def_contents (x); } } return -1; } /* Look through appropriate parts of INSN, marking all entries in the constant pool which are actually being used. Entries that are only referenced by other constants are also marked as used. Emit deferred strings that are used. */ static void mark_constants (struct rtx_constant_pool *pool, rtx insn) { if (!INSN_P (insn)) return; /* Insns may appear inside a SEQUENCE. Only check the patterns of insns, not any notes that may be attached. We don't want to mark a constant just because it happens to appear in a REG_EQUIV note. */ if (GET_CODE (PATTERN (insn)) == SEQUENCE) { rtx seq = PATTERN (insn); int i, n = XVECLEN (seq, 0); for (i = 0; i < n; ++i) { rtx subinsn = XVECEXP (seq, 0, i); if (INSN_P (subinsn)) for_each_rtx (&PATTERN (subinsn), mark_constant, pool); } } else for_each_rtx (&PATTERN (insn), mark_constant, pool); } /* Look through the instructions for this function, and mark all the entries in POOL which are actually being used. Emit deferred constants which have indeed been used. */ static void mark_constant_pool (struct rtx_constant_pool *pool) { rtx insn, link; if (pool->first == 0 && n_deferred_constants == 0) return; for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) mark_constants (pool, insn); for (link = current_function_epilogue_delay_list; link; link = XEXP (link, 1)) mark_constants (pool, XEXP (link, 0)); } /* Write all the constants in the constant pool. */ void output_constant_pool (const char *fnname ATTRIBUTE_UNUSED, tree fndecl ATTRIBUTE_UNUSED) { struct rtx_constant_pool *pool = cfun->varasm->pool; struct constant_descriptor_rtx *desc; /* It is possible for gcc to call force_const_mem and then to later discard the instructions which refer to the constant. In such a case we do not need to output the constant. */ mark_constant_pool (pool); #ifdef ASM_OUTPUT_POOL_PROLOGUE ASM_OUTPUT_POOL_PROLOGUE (asm_out_file, fnname, fndecl, pool->offset); #endif for (desc = pool->first; desc ; desc = desc->next) output_constant_pool_1 (desc); #ifdef ASM_OUTPUT_POOL_EPILOGUE ASM_OUTPUT_POOL_EPILOGUE (asm_out_file, fnname, fndecl, pool->offset); #endif } /* Determine what kind of relocations EXP may need. */ int compute_reloc_for_constant (tree exp) { int reloc = 0, reloc2; tree tem; /* Give the front-end a chance to convert VALUE to something that looks more like a constant to the back-end. */ exp = lang_hooks.expand_constant (exp); switch (TREE_CODE (exp)) { case ADDR_EXPR: case FDESC_EXPR: /* Go inside any operations that get_inner_reference can handle and see if what's inside is a constant: no need to do anything here for addresses of variables or functions. */ for (tem = TREE_OPERAND (exp, 0); handled_component_p (tem); tem = TREE_OPERAND (tem, 0)) ; if (TREE_PUBLIC (tem)) reloc |= 2; else reloc |= 1; break; case PLUS_EXPR: reloc = compute_reloc_for_constant (TREE_OPERAND (exp, 0)); reloc |= compute_reloc_for_constant (TREE_OPERAND (exp, 1)); break; case MINUS_EXPR: reloc = compute_reloc_for_constant (TREE_OPERAND (exp, 0)); reloc2 = compute_reloc_for_constant (TREE_OPERAND (exp, 1)); /* The difference of two local labels is computable at link time. */ if (reloc == 1 && reloc2 == 1) reloc = 0; else reloc |= reloc2; break; case NOP_EXPR: case CONVERT_EXPR: case NON_LVALUE_EXPR: reloc = compute_reloc_for_constant (TREE_OPERAND (exp, 0)); break; case CONSTRUCTOR: for (tem = CONSTRUCTOR_ELTS (exp); tem; tem = TREE_CHAIN (tem)) if (TREE_VALUE (tem) != 0) reloc |= compute_reloc_for_constant (TREE_VALUE (tem)); break; default: break; } return reloc; } /* Find all the constants whose addresses are referenced inside of EXP, and make sure assembler code with a label has been output for each one. Indicate whether an ADDR_EXPR has been encountered. */ static void output_addressed_constants (tree exp) { tree tem; /* Give the front-end a chance to convert VALUE to something that looks more like a constant to the back-end. */ exp = lang_hooks.expand_constant (exp); switch (TREE_CODE (exp)) { case ADDR_EXPR: case FDESC_EXPR: /* Go inside any operations that get_inner_reference can handle and see if what's inside is a constant: no need to do anything here for addresses of variables or functions. */ for (tem = TREE_OPERAND (exp, 0); handled_component_p (tem); tem = TREE_OPERAND (tem, 0)) ; if (TREE_CODE_CLASS (TREE_CODE (tem)) == 'c' || TREE_CODE (tem) == CONSTRUCTOR) output_constant_def (tem, 0); break; case PLUS_EXPR: case MINUS_EXPR: output_addressed_constants (TREE_OPERAND (exp, 1)); /* Fall through. */ case NOP_EXPR: case CONVERT_EXPR: case NON_LVALUE_EXPR: output_addressed_constants (TREE_OPERAND (exp, 0)); break; case CONSTRUCTOR: for (tem = CONSTRUCTOR_ELTS (exp); tem; tem = TREE_CHAIN (tem)) if (TREE_VALUE (tem) != 0) output_addressed_constants (TREE_VALUE (tem)); break; default: break; } } /* Return nonzero if VALUE is a valid constant-valued expression for use in initializing a static variable; one that can be an element of a "constant" initializer. Return null_pointer_node if the value is absolute; if it is relocatable, return the variable that determines the relocation. We assume that VALUE has been folded as much as possible; therefore, we do not need to check for such things as arithmetic-combinations of integers. */ tree initializer_constant_valid_p (tree value, tree endtype) { /* Give the front-end a chance to convert VALUE to something that looks more like a constant to the back-end. */ value = lang_hooks.expand_constant (value); switch (TREE_CODE (value)) { case CONSTRUCTOR: if ((TREE_CODE (TREE_TYPE (value)) == UNION_TYPE || TREE_CODE (TREE_TYPE (value)) == RECORD_TYPE) && TREE_CONSTANT (value) && CONSTRUCTOR_ELTS (value)) { tree elt; bool absolute = true; for (elt = CONSTRUCTOR_ELTS (value); elt; elt = TREE_CHAIN (elt)) { tree reloc; value = TREE_VALUE (elt); reloc = initializer_constant_valid_p (value, TREE_TYPE (value)); if (!reloc) return NULL_TREE; if (reloc != null_pointer_node) absolute = false; } /* For a non-absolute relocation, there is no single variable that can be "the variable that determines the relocation." */ return absolute ? null_pointer_node : error_mark_node; } return TREE_STATIC (value) ? null_pointer_node : NULL_TREE; case INTEGER_CST: case VECTOR_CST: case REAL_CST: case STRING_CST: case COMPLEX_CST: return null_pointer_node; case ADDR_EXPR: case FDESC_EXPR: return staticp (TREE_OPERAND (value, 0)) ? TREE_OPERAND (value, 0) : 0; case VIEW_CONVERT_EXPR: case NON_LVALUE_EXPR: return initializer_constant_valid_p (TREE_OPERAND (value, 0), endtype); case CONVERT_EXPR: case NOP_EXPR: /* Allow conversions between pointer types. */ if (POINTER_TYPE_P (TREE_TYPE (value)) && POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (value, 0)))) return initializer_constant_valid_p (TREE_OPERAND (value, 0), endtype); /* Allow conversions between real types. */ if (FLOAT_TYPE_P (TREE_TYPE (value)) && FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (value, 0)))) return initializer_constant_valid_p (TREE_OPERAND (value, 0), endtype); /* Allow length-preserving conversions between integer types. */ if (INTEGRAL_TYPE_P (TREE_TYPE (value)) && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (value, 0))) && (TYPE_PRECISION (TREE_TYPE (value)) == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (value, 0))))) return initializer_constant_valid_p (TREE_OPERAND (value, 0), endtype); /* Allow conversions between other integer types only if explicit value. */ if (INTEGRAL_TYPE_P (TREE_TYPE (value)) && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (value, 0)))) { tree inner = initializer_constant_valid_p (TREE_OPERAND (value, 0), endtype); if (inner == null_pointer_node) return null_pointer_node; break; } /* Allow (int) &foo provided int is as wide as a pointer. */ if (INTEGRAL_TYPE_P (TREE_TYPE (value)) && POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (value, 0))) && (TYPE_PRECISION (TREE_TYPE (value)) >= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (value, 0))))) return initializer_constant_valid_p (TREE_OPERAND (value, 0), endtype); /* Likewise conversions from int to pointers, but also allow conversions from 0. */ if ((POINTER_TYPE_P (TREE_TYPE (value)) || TREE_CODE (TREE_TYPE (value)) == OFFSET_TYPE) && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (value, 0)))) { if (integer_zerop (TREE_OPERAND (value, 0))) return null_pointer_node; else if (TYPE_PRECISION (TREE_TYPE (value)) <= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (value, 0)))) return initializer_constant_valid_p (TREE_OPERAND (value, 0), endtype); } /* Allow conversions to struct or union types if the value inside is okay. */ if (TREE_CODE (TREE_TYPE (value)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (value)) == UNION_TYPE) return initializer_constant_valid_p (TREE_OPERAND (value, 0), endtype); break; case PLUS_EXPR: if (! INTEGRAL_TYPE_P (endtype) || TYPE_PRECISION (endtype) >= POINTER_SIZE) { tree valid0 = initializer_constant_valid_p (TREE_OPERAND (value, 0), endtype); tree valid1 = initializer_constant_valid_p (TREE_OPERAND (value, 1), endtype); /* If either term is absolute, use the other terms relocation. */ if (valid0 == null_pointer_node) return valid1; if (valid1 == null_pointer_node) return valid0; } break; case MINUS_EXPR: if (! INTEGRAL_TYPE_P (endtype) || TYPE_PRECISION (endtype) >= POINTER_SIZE) { tree valid0 = initializer_constant_valid_p (TREE_OPERAND (value, 0), endtype); tree valid1 = initializer_constant_valid_p (TREE_OPERAND (value, 1), endtype); /* Win if second argument is absolute. */ if (valid1 == null_pointer_node) return valid0; /* Win if both arguments have the same relocation. Then the value is absolute. */ if (valid0 == valid1 && valid0 != 0) return null_pointer_node; /* Since GCC guarantees that string constants are unique in the generated code, a subtraction between two copies of the same constant string is absolute. */ if (valid0 && TREE_CODE (valid0) == STRING_CST && valid1 && TREE_CODE (valid1) == STRING_CST && TREE_STRING_POINTER (valid0) == TREE_STRING_POINTER (valid1)) return null_pointer_node; } /* Support differences between labels. */ if (INTEGRAL_TYPE_P (endtype)) { tree op0, op1; op0 = TREE_OPERAND (value, 0); op1 = TREE_OPERAND (value, 1); /* Like STRIP_NOPS except allow the operand mode to widen. This works around a feature of fold that simplifies (int)(p1 - p2) to ((int)p1 - (int)p2) under the theory that the narrower operation is cheaper. */ while (TREE_CODE (op0) == NOP_EXPR || TREE_CODE (op0) == CONVERT_EXPR || TREE_CODE (op0) == NON_LVALUE_EXPR) { tree inner = TREE_OPERAND (op0, 0); if (inner == error_mark_node || ! INTEGRAL_MODE_P (TYPE_MODE (TREE_TYPE (inner))) || (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (op0))) > GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (inner))))) break; op0 = inner; } while (TREE_CODE (op1) == NOP_EXPR || TREE_CODE (op1) == CONVERT_EXPR || TREE_CODE (op1) == NON_LVALUE_EXPR) { tree inner = TREE_OPERAND (op1, 0); if (inner == error_mark_node || ! INTEGRAL_MODE_P (TYPE_MODE (TREE_TYPE (inner))) || (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (op1))) > GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (inner))))) break; op1 = inner; } if (TREE_CODE (op0) == ADDR_EXPR && TREE_CODE (TREE_OPERAND (op0, 0)) == LABEL_DECL && TREE_CODE (op1) == ADDR_EXPR && TREE_CODE (TREE_OPERAND (op1, 0)) == LABEL_DECL) return null_pointer_node; } break; default: break; } return 0; } /* Output assembler code for constant EXP to FILE, with no label. This includes the pseudo-op such as ".int" or ".byte", and a newline. Assumes output_addressed_constants has been done on EXP already. Generate exactly SIZE bytes of assembler data, padding at the end with zeros if necessary. SIZE must always be specified. SIZE is important for structure constructors, since trailing members may have been omitted from the constructor. It is also important for initialization of arrays from string constants since the full length of the string constant might not be wanted. It is also needed for initialization of unions, where the initializer's type is just one member, and that may not be as long as the union. There a case in which we would fail to output exactly SIZE bytes: for a structure constructor that wants to produce more than SIZE bytes. But such constructors will never be generated for any possible input. ALIGN is the alignment of the data in bits. */ void output_constant (tree exp, unsigned HOST_WIDE_INT size, unsigned int align) { enum tree_code code; unsigned HOST_WIDE_INT thissize; /* Some front-ends use constants other than the standard language-independent varieties, but which may still be output directly. Give the front-end a chance to convert EXP to a language-independent representation. */ exp = lang_hooks.expand_constant (exp); if (size == 0 || flag_syntax_only) return; /* Eliminate any conversions since we'll be outputting the underlying constant. */ while (TREE_CODE (exp) == NOP_EXPR || TREE_CODE (exp) == CONVERT_EXPR || TREE_CODE (exp) == NON_LVALUE_EXPR || TREE_CODE (exp) == VIEW_CONVERT_EXPR) exp = TREE_OPERAND (exp, 0); code = TREE_CODE (TREE_TYPE (exp)); thissize = int_size_in_bytes (TREE_TYPE (exp)); /* Allow a constructor with no elements for any data type. This means to fill the space with zeros. */ if (TREE_CODE (exp) == CONSTRUCTOR && CONSTRUCTOR_ELTS (exp) == 0) { assemble_zeros (size); return; } if (TREE_CODE (exp) == FDESC_EXPR) { #ifdef ASM_OUTPUT_FDESC HOST_WIDE_INT part = tree_low_cst (TREE_OPERAND (exp, 1), 0); tree decl = TREE_OPERAND (exp, 0); ASM_OUTPUT_FDESC (asm_out_file, decl, part); #else abort (); #endif return; } /* Now output the underlying data. If we've handling the padding, return. Otherwise, break and ensure SIZE is the size written. */ switch (code) { case CHAR_TYPE: case BOOLEAN_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE: case POINTER_TYPE: case REFERENCE_TYPE: case OFFSET_TYPE: if (! assemble_integer (expand_expr (exp, NULL_RTX, VOIDmode, EXPAND_INITIALIZER), MIN (size, thissize), align, 0)) error ("initializer for integer value is too complicated"); break; case REAL_TYPE: if (TREE_CODE (exp) != REAL_CST) error ("initializer for floating value is not a floating constant"); assemble_real (TREE_REAL_CST (exp), TYPE_MODE (TREE_TYPE (exp)), align); break; case COMPLEX_TYPE: output_constant (TREE_REALPART (exp), thissize / 2, align); output_constant (TREE_IMAGPART (exp), thissize / 2, min_align (align, BITS_PER_UNIT * (thissize / 2))); break; case ARRAY_TYPE: case VECTOR_TYPE: if (TREE_CODE (exp) == CONSTRUCTOR) { output_constructor (exp, size, align); return; } else if (TREE_CODE (exp) == STRING_CST) { thissize = MIN ((unsigned HOST_WIDE_INT)TREE_STRING_LENGTH (exp), size); assemble_string (TREE_STRING_POINTER (exp), thissize); } else if (TREE_CODE (exp) == VECTOR_CST) { int elt_size; tree link; unsigned int nalign; enum machine_mode inner; inner = GET_MODE_INNER (TYPE_MODE (TREE_TYPE (exp))); nalign = MIN (align, GET_MODE_ALIGNMENT (inner)); elt_size = GET_MODE_UNIT_SIZE (TYPE_MODE (TREE_TYPE (exp))); link = TREE_VECTOR_CST_ELTS (exp); output_constant (TREE_VALUE (link), elt_size, align); while ((link = TREE_CHAIN (link)) != NULL) output_constant (TREE_VALUE (link), elt_size, nalign); } else abort (); break; case RECORD_TYPE: case UNION_TYPE: if (TREE_CODE (exp) == CONSTRUCTOR) output_constructor (exp, size, align); else abort (); return; case SET_TYPE: if (TREE_CODE (exp) == INTEGER_CST) assemble_integer (expand_expr (exp, NULL_RTX, VOIDmode, EXPAND_INITIALIZER), thissize, align, 1); else if (TREE_CODE (exp) == CONSTRUCTOR) { unsigned char *buffer = alloca (thissize); if (get_set_constructor_bytes (exp, buffer, thissize)) abort (); assemble_string ((char *) buffer, thissize); } else error ("unknown set constructor type"); return; case ERROR_MARK: return; default: abort (); } if (size > thissize) assemble_zeros (size - thissize); } /* Subroutine of output_constructor, used for computing the size of arrays of unspecified length. VAL must be a CONSTRUCTOR of an array type with an unspecified upper bound. */ static unsigned HOST_WIDE_INT array_size_for_constructor (tree val) { tree max_index, i; /* This code used to attempt to handle string constants that are not arrays of single-bytes, but nothing else does, so there's no point in doing it here. */ if (TREE_CODE (val) == STRING_CST) return TREE_STRING_LENGTH (val); max_index = NULL_TREE; for (i = CONSTRUCTOR_ELTS (val); i; i = TREE_CHAIN (i)) { tree index = TREE_PURPOSE (i); if (TREE_CODE (index) == RANGE_EXPR) index = TREE_OPERAND (index, 1); if (max_index == NULL_TREE || tree_int_cst_lt (max_index, index)) max_index = index; } if (max_index == NULL_TREE) return 0; /* Compute the total number of array elements. */ i = size_binop (MINUS_EXPR, convert (sizetype, max_index), convert (sizetype, TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (val))))); i = size_binop (PLUS_EXPR, i, convert (sizetype, integer_one_node)); /* Multiply by the array element unit size to find number of bytes. */ i = size_binop (MULT_EXPR, i, TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (val)))); return tree_low_cst (i, 1); } /* Subroutine of output_constant, used for CONSTRUCTORs (aggregate constants). Generate at least SIZE bytes, padding if necessary. */ static void output_constructor (tree exp, unsigned HOST_WIDE_INT size, unsigned int align) { tree type = TREE_TYPE (exp); tree link, field = 0; tree min_index = 0; /* Number of bytes output or skipped so far. In other words, current position within the constructor. */ HOST_WIDE_INT total_bytes = 0; /* Nonzero means BYTE contains part of a byte, to be output. */ int byte_buffer_in_use = 0; int byte = 0; if (HOST_BITS_PER_WIDE_INT < BITS_PER_UNIT) abort (); if (TREE_CODE (type) == RECORD_TYPE) field = TYPE_FIELDS (type); if (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) != 0) min_index = TYPE_MIN_VALUE (TYPE_DOMAIN (type)); /* As LINK goes through the elements of the constant, FIELD goes through the structure fields, if the constant is a structure. if the constant is a union, then we override this, by getting the field from the TREE_LIST element. But the constant could also be an array. Then FIELD is zero. There is always a maximum of one element in the chain LINK for unions (even if the initializer in a source program incorrectly contains more one). */ for (link = CONSTRUCTOR_ELTS (exp); link; link = TREE_CHAIN (link), field = field ? TREE_CHAIN (field) : 0) { tree val = TREE_VALUE (link); tree index = 0; /* The element in a union constructor specifies the proper field or index. */ if ((TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == QUAL_UNION_TYPE) && TREE_PURPOSE (link) != 0) field = TREE_PURPOSE (link); else if (TREE_CODE (type) == ARRAY_TYPE) index = TREE_PURPOSE (link); #ifdef ASM_COMMENT_START if (field && flag_verbose_asm) fprintf (asm_out_file, "%s %s:\n", ASM_COMMENT_START, DECL_NAME (field) ? IDENTIFIER_POINTER (DECL_NAME (field)) : ""); #endif /* Eliminate the marker that makes a cast not be an lvalue. */ if (val != 0) STRIP_NOPS (val); if (index && TREE_CODE (index) == RANGE_EXPR) { unsigned HOST_WIDE_INT fieldsize = int_size_in_bytes (TREE_TYPE (type)); HOST_WIDE_INT lo_index = tree_low_cst (TREE_OPERAND (index, 0), 0); HOST_WIDE_INT hi_index = tree_low_cst (TREE_OPERAND (index, 1), 0); HOST_WIDE_INT index; unsigned int align2 = min_align (align, fieldsize * BITS_PER_UNIT); for (index = lo_index; index <= hi_index; index++) { /* Output the element's initial value. */ if (val == 0) assemble_zeros (fieldsize); else output_constant (val, fieldsize, align2); /* Count its size. */ total_bytes += fieldsize; } } else if (field == 0 || !DECL_BIT_FIELD (field)) { /* An element that is not a bit-field. */ unsigned HOST_WIDE_INT fieldsize; /* Since this structure is static, we know the positions are constant. */ HOST_WIDE_INT pos = field ? int_byte_position (field) : 0; unsigned int align2; if (index != 0) pos = (tree_low_cst (TYPE_SIZE_UNIT (TREE_TYPE (val)), 1) * (tree_low_cst (index, 0) - tree_low_cst (min_index, 0))); /* Output any buffered-up bit-fields preceding this element. */ if (byte_buffer_in_use) { assemble_integer (GEN_INT (byte), 1, BITS_PER_UNIT, 1); total_bytes++; byte_buffer_in_use = 0; } /* Advance to offset of this element. Note no alignment needed in an array, since that is guaranteed if each element has the proper size. */ if ((field != 0 || index != 0) && pos != total_bytes) { assemble_zeros (pos - total_bytes); total_bytes = pos; } /* Find the alignment of this element. */ align2 = min_align (align, BITS_PER_UNIT * pos); /* Determine size this element should occupy. */ if (field) { fieldsize = 0; /* If this is an array with an unspecified upper bound, the initializer determines the size. */ /* ??? This ought to only checked if DECL_SIZE_UNIT is NULL, but we cannot do this until the deprecated support for initializing zero-length array members is removed. */ if (TREE_CODE (TREE_TYPE (field)) == ARRAY_TYPE && TYPE_DOMAIN (TREE_TYPE (field)) && ! TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (field)))) { fieldsize = array_size_for_constructor (val); /* Given a non-empty initialization, this field had better be last. */ if (fieldsize != 0 && TREE_CHAIN (field) != NULL_TREE) abort (); } else if (DECL_SIZE_UNIT (field)) { /* ??? This can't be right. If the decl size overflows a host integer we will silently emit no data. */ if (host_integerp (DECL_SIZE_UNIT (field), 1)) fieldsize = tree_low_cst (DECL_SIZE_UNIT (field), 1); } } else fieldsize = int_size_in_bytes (TREE_TYPE (type)); /* Output the element's initial value. */ if (val == 0) assemble_zeros (fieldsize); else output_constant (val, fieldsize, align2); /* Count its size. */ total_bytes += fieldsize; } else if (val != 0 && TREE_CODE (val) != INTEGER_CST) error ("invalid initial value for member `%s'", IDENTIFIER_POINTER (DECL_NAME (field))); else { /* Element that is a bit-field. */ HOST_WIDE_INT next_offset = int_bit_position (field); HOST_WIDE_INT end_offset = (next_offset + tree_low_cst (DECL_SIZE (field), 1)); if (val == 0) val = integer_zero_node; /* If this field does not start in this (or, next) byte, skip some bytes. */ if (next_offset / BITS_PER_UNIT != total_bytes) { /* Output remnant of any bit field in previous bytes. */ if (byte_buffer_in_use) { assemble_integer (GEN_INT (byte), 1, BITS_PER_UNIT, 1); total_bytes++; byte_buffer_in_use = 0; } /* If still not at proper byte, advance to there. */ if (next_offset / BITS_PER_UNIT != total_bytes) { assemble_zeros (next_offset / BITS_PER_UNIT - total_bytes); total_bytes = next_offset / BITS_PER_UNIT; } } if (! byte_buffer_in_use) byte = 0; /* We must split the element into pieces that fall within separate bytes, and combine each byte with previous or following bit-fields. */ /* next_offset is the offset n fbits from the beginning of the structure to the next bit of this element to be processed. end_offset is the offset of the first bit past the end of this element. */ while (next_offset < end_offset) { int this_time; int shift; HOST_WIDE_INT value; HOST_WIDE_INT next_byte = next_offset / BITS_PER_UNIT; HOST_WIDE_INT next_bit = next_offset % BITS_PER_UNIT; /* Advance from byte to byte within this element when necessary. */ while (next_byte != total_bytes) { assemble_integer (GEN_INT (byte), 1, BITS_PER_UNIT, 1); total_bytes++; byte = 0; } /* Number of bits we can process at once (all part of the same byte). */ this_time = MIN (end_offset - next_offset, BITS_PER_UNIT - next_bit); if (BYTES_BIG_ENDIAN) { /* On big-endian machine, take the most significant bits first (of the bits that are significant) and put them into bytes from the most significant end. */ shift = end_offset - next_offset - this_time; /* Don't try to take a bunch of bits that cross the word boundary in the INTEGER_CST. We can only select bits from the LOW or HIGH part not from both. */ if (shift < HOST_BITS_PER_WIDE_INT && shift + this_time > HOST_BITS_PER_WIDE_INT) { this_time = shift + this_time - HOST_BITS_PER_WIDE_INT; shift = HOST_BITS_PER_WIDE_INT; } /* Now get the bits from the appropriate constant word. */ if (shift < HOST_BITS_PER_WIDE_INT) value = TREE_INT_CST_LOW (val); else if (shift < 2 * HOST_BITS_PER_WIDE_INT) { value = TREE_INT_CST_HIGH (val); shift -= HOST_BITS_PER_WIDE_INT; } else abort (); /* Get the result. This works only when: 1 <= this_time <= HOST_BITS_PER_WIDE_INT. */ byte |= (((value >> shift) & (((HOST_WIDE_INT) 2 << (this_time - 1)) - 1)) << (BITS_PER_UNIT - this_time - next_bit)); } else { /* On little-endian machines, take first the least significant bits of the value and pack them starting at the least significant bits of the bytes. */ shift = next_offset - int_bit_position (field); /* Don't try to take a bunch of bits that cross the word boundary in the INTEGER_CST. We can only select bits from the LOW or HIGH part not from both. */ if (shift < HOST_BITS_PER_WIDE_INT && shift + this_time > HOST_BITS_PER_WIDE_INT) this_time = (HOST_BITS_PER_WIDE_INT - shift); /* Now get the bits from the appropriate constant word. */ if (shift < HOST_BITS_PER_WIDE_INT) value = TREE_INT_CST_LOW (val); else if (shift < 2 * HOST_BITS_PER_WIDE_INT) { value = TREE_INT_CST_HIGH (val); shift -= HOST_BITS_PER_WIDE_INT; } else abort (); /* Get the result. This works only when: 1 <= this_time <= HOST_BITS_PER_WIDE_INT. */ byte |= (((value >> shift) & (((HOST_WIDE_INT) 2 << (this_time - 1)) - 1)) << next_bit); } next_offset += this_time; byte_buffer_in_use = 1; } } } if (byte_buffer_in_use) { assemble_integer (GEN_INT (byte), 1, BITS_PER_UNIT, 1); total_bytes++; } if ((unsigned HOST_WIDE_INT)total_bytes < size) assemble_zeros (size - total_bytes); } /* This TREE_LIST contains any weak symbol declarations waiting to be emitted. */ static GTY(()) tree weak_decls; /* Mark DECL as weak. */ static void mark_weak (tree decl) { DECL_WEAK (decl) = 1; if (DECL_RTL_SET_P (decl) && MEM_P (DECL_RTL (decl)) && XEXP (DECL_RTL (decl), 0) && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF) SYMBOL_REF_WEAK (XEXP (DECL_RTL (decl), 0)) = 1; } /* Merge weak status between NEWDECL and OLDDECL. */ void merge_weak (tree newdecl, tree olddecl) { if (DECL_WEAK (newdecl) == DECL_WEAK (olddecl)) return; if (DECL_WEAK (newdecl)) { tree wd; /* NEWDECL is weak, but OLDDECL is not. */ /* If we already output the OLDDECL, we're in trouble; we can't go back and make it weak. This error cannot caught in declare_weak because the NEWDECL and OLDDECL was not yet been merged; therefore, TREE_ASM_WRITTEN was not set. */ if (TREE_ASM_WRITTEN (olddecl)) error ("%Jweak declaration of '%D' must precede definition", newdecl, newdecl); /* If we've already generated rtl referencing OLDDECL, we may have done so in a way that will not function properly with a weak symbol. */ else if (TREE_USED (olddecl) && TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (olddecl))) warning ("%Jweak declaration of '%D' after first use results " "in unspecified behavior", newdecl, newdecl); if (SUPPORTS_WEAK) { /* We put the NEWDECL on the weak_decls list at some point. Replace it with the OLDDECL. */ for (wd = weak_decls; wd; wd = TREE_CHAIN (wd)) if (TREE_VALUE (wd) == newdecl) { TREE_VALUE (wd) = olddecl; break; } /* We may not find the entry on the list. If NEWDECL is a weak alias, then we will have already called globalize_decl to remove the entry; in that case, we do not need to do anything. */ } /* Make the OLDDECL weak; it's OLDDECL that we'll be keeping. */ mark_weak (olddecl); } else /* OLDDECL was weak, but NEWDECL was not explicitly marked as weak. Just update NEWDECL to indicate that it's weak too. */ mark_weak (newdecl); } /* Declare DECL to be a weak symbol. */ void declare_weak (tree decl) { if (! TREE_PUBLIC (decl)) error ("%Jweak declaration of '%D' must be public", decl, decl); else if (TREE_CODE (decl) == FUNCTION_DECL && TREE_ASM_WRITTEN (decl)) error ("%Jweak declaration of '%D' must precede definition", decl, decl); else if (SUPPORTS_WEAK) { if (! DECL_WEAK (decl)) weak_decls = tree_cons (NULL, decl, weak_decls); } else warning ("%Jweak declaration of '%D' not supported", decl, decl); mark_weak (decl); } /* Emit any pending weak declarations. */ void weak_finish (void) { tree t; for (t = weak_decls; t; t = TREE_CHAIN (t)) { tree decl = TREE_VALUE (t); #if defined (ASM_WEAKEN_DECL) || defined (ASM_WEAKEN_LABEL) const char *const name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)); #endif if (! TREE_USED (decl)) continue; #ifdef ASM_WEAKEN_DECL ASM_WEAKEN_DECL (asm_out_file, decl, name, NULL); #else #ifdef ASM_WEAKEN_LABEL ASM_WEAKEN_LABEL (asm_out_file, name); #else #ifdef ASM_OUTPUT_WEAK_ALIAS warning ("only weak aliases are supported in this configuration"); return; #endif #endif #endif } } /* Emit the assembly bits to indicate that DECL is globally visible. */ static void globalize_decl (tree decl) { const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0); #if defined (ASM_WEAKEN_LABEL) || defined (ASM_WEAKEN_DECL) if (DECL_WEAK (decl)) { tree *p, t; #ifdef ASM_WEAKEN_DECL ASM_WEAKEN_DECL (asm_out_file, decl, name, 0); #else ASM_WEAKEN_LABEL (asm_out_file, name); #endif /* Remove this function from the pending weak list so that we do not emit multiple .weak directives for it. */ for (p = &weak_decls; (t = *p) ; ) { if (DECL_ASSEMBLER_NAME (decl) == DECL_ASSEMBLER_NAME (TREE_VALUE (t))) *p = TREE_CHAIN (t); else p = &TREE_CHAIN (t); } return; } #elif defined(ASM_MAKE_LABEL_LINKONCE) if (DECL_ONE_ONLY (decl)) ASM_MAKE_LABEL_LINKONCE (asm_out_file, name); #endif targetm.asm_out.globalize_label (asm_out_file, name); } /* Emit an assembler directive to make the symbol for DECL an alias to the symbol for TARGET. */ void assemble_alias (tree decl, tree target ATTRIBUTE_UNUSED) { const char *name; /* We must force creation of DECL_RTL for debug info generation, even though we don't use it here. */ make_decl_rtl (decl, NULL); name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)); #ifdef ASM_OUTPUT_DEF /* Make name accessible from other files, if appropriate. */ if (TREE_PUBLIC (decl)) { globalize_decl (decl); maybe_assemble_visibility (decl); } #ifdef ASM_OUTPUT_DEF_FROM_DECLS ASM_OUTPUT_DEF_FROM_DECLS (asm_out_file, decl, target); #else ASM_OUTPUT_DEF (asm_out_file, name, IDENTIFIER_POINTER (target)); #endif #else /* !ASM_OUTPUT_DEF */ #if defined (ASM_OUTPUT_WEAK_ALIAS) || defined (ASM_WEAKEN_DECL) if (DECL_WEAK (decl)) { tree *p, t; #ifdef ASM_WEAKEN_DECL ASM_WEAKEN_DECL (asm_out_file, decl, name, IDENTIFIER_POINTER (target)); #else ASM_OUTPUT_WEAK_ALIAS (asm_out_file, name, IDENTIFIER_POINTER (target)); #endif /* Remove this function from the pending weak list so that we do not emit multiple .weak directives for it. */ for (p = &weak_decls; (t = *p) ; ) if (DECL_ASSEMBLER_NAME (decl) == DECL_ASSEMBLER_NAME (TREE_VALUE (t))) *p = TREE_CHAIN (t); else p = &TREE_CHAIN (t); } else warning ("only weak aliases are supported in this configuration"); #else warning ("alias definitions not supported in this configuration; ignored"); #endif #endif TREE_USED (decl) = 1; TREE_ASM_WRITTEN (decl) = 1; TREE_ASM_WRITTEN (DECL_ASSEMBLER_NAME (decl)) = 1; } /* Emit an assembler directive to set symbol for DECL visibility to the visibility type VIS, which must not be VISIBILITY_DEFAULT. */ void default_assemble_visibility (tree decl, int vis) { static const char * const visibility_types[] = { NULL, "internal", "hidden", "protected" }; const char *name, *type; name = (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))); type = visibility_types[vis]; #ifdef HAVE_GAS_HIDDEN fprintf (asm_out_file, "\t.%s\t", type); assemble_name (asm_out_file, name); fprintf (asm_out_file, "\n"); #else warning ("visibility attribute not supported in this configuration; ignored"); #endif } /* A helper function to call assemble_visibility when needed for a decl. */ static void maybe_assemble_visibility (tree decl) { enum symbol_visibility vis = DECL_VISIBILITY (decl); if (vis != VISIBILITY_DEFAULT) targetm.asm_out.visibility (decl, vis); } /* Returns 1 if the target configuration supports defining public symbols so that one of them will be chosen at link time instead of generating a multiply-defined symbol error, whether through the use of weak symbols or a target-specific mechanism for having duplicates discarded. */ int supports_one_only (void) { if (SUPPORTS_ONE_ONLY) return 1; return SUPPORTS_WEAK; } /* Set up DECL as a public symbol that can be defined in multiple translation units without generating a linker error. */ void make_decl_one_only (tree decl) { if (TREE_CODE (decl) != VAR_DECL && TREE_CODE (decl) != FUNCTION_DECL) abort (); TREE_PUBLIC (decl) = 1; if (SUPPORTS_ONE_ONLY) { #ifdef MAKE_DECL_ONE_ONLY MAKE_DECL_ONE_ONLY (decl); #endif DECL_ONE_ONLY (decl) = 1; } else if (TREE_CODE (decl) == VAR_DECL && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node)) DECL_COMMON (decl) = 1; else if (SUPPORTS_WEAK) DECL_WEAK (decl) = 1; else abort (); } void init_varasm_once (void) { in_named_htab = htab_create_ggc (31, in_named_entry_hash, in_named_entry_eq, NULL); const_desc_htab = htab_create_ggc (1009, const_desc_hash, const_desc_eq, NULL); const_alias_set = new_alias_set (); } enum tls_model decl_tls_model (tree decl) { enum tls_model kind; tree attr = lookup_attribute ("tls_model", DECL_ATTRIBUTES (decl)); bool is_local; if (attr) { attr = TREE_VALUE (TREE_VALUE (attr)); if (TREE_CODE (attr) != STRING_CST) abort (); if (!strcmp (TREE_STRING_POINTER (attr), "local-exec")) kind = TLS_MODEL_LOCAL_EXEC; else if (!strcmp (TREE_STRING_POINTER (attr), "initial-exec")) kind = TLS_MODEL_INITIAL_EXEC; else if (!strcmp (TREE_STRING_POINTER (attr), "local-dynamic")) kind = optimize ? TLS_MODEL_LOCAL_DYNAMIC : TLS_MODEL_GLOBAL_DYNAMIC; else if (!strcmp (TREE_STRING_POINTER (attr), "global-dynamic")) kind = TLS_MODEL_GLOBAL_DYNAMIC; else abort (); return kind; } is_local = targetm.binds_local_p (decl); if (!flag_pic) { if (is_local) kind = TLS_MODEL_LOCAL_EXEC; else kind = TLS_MODEL_INITIAL_EXEC; } /* Local dynamic is inefficient when we're not combining the parts of the address. */ else if (optimize && is_local) kind = TLS_MODEL_LOCAL_DYNAMIC; else kind = TLS_MODEL_GLOBAL_DYNAMIC; if (kind < flag_tls_default) kind = flag_tls_default; return kind; } /* Select a set of attributes for section NAME based on the properties of DECL and whether or not RELOC indicates that DECL's initializer might contain runtime relocations. We make the section read-only and executable for a function decl, read-only for a const data decl, and writable for a non-const data decl. */ unsigned int default_section_type_flags (tree decl, const char *name, int reloc) { return default_section_type_flags_1 (decl, name, reloc, flag_pic); } unsigned int default_section_type_flags_1 (tree decl, const char *name, int reloc, int shlib) { unsigned int flags; if (decl && TREE_CODE (decl) == FUNCTION_DECL) flags = SECTION_CODE; else if (decl && decl_readonly_section_1 (decl, reloc, shlib)) flags = 0; else if (strcmp (name, UNLIKELY_EXECUTED_TEXT_SECTION_NAME) == 0) flags = SECTION_CODE; else flags = SECTION_WRITE; if (decl && DECL_ONE_ONLY (decl)) flags |= SECTION_LINKONCE; if (decl && TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL (decl)) flags |= SECTION_TLS | SECTION_WRITE; if (strcmp (name, ".bss") == 0 || strncmp (name, ".bss.", 5) == 0 || strncmp (name, ".gnu.linkonce.b.", 16) == 0 || strcmp (name, ".sbss") == 0 || strncmp (name, ".sbss.", 6) == 0 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0 || strcmp (name, ".tbss") == 0 || strncmp (name, ".gnu.linkonce.tb.", 17) == 0) flags |= SECTION_BSS; if (strcmp (name, ".tdata") == 0 || strcmp (name, ".tbss") == 0 || strncmp (name, ".gnu.linkonce.td.", 17) == 0 || strncmp (name, ".gnu.linkonce.tb.", 17) == 0) flags |= SECTION_TLS; /* These three sections have special ELF types. They are neither SHT_PROGBITS nor SHT_NOBITS, so when changing sections we don't want to print a section type (@progbits or @nobits). If someone is silly enough to emit code or TLS variables to one of these sections, then don't handle them specially. */ if (!(flags & (SECTION_CODE | SECTION_BSS | SECTION_TLS)) && (strcmp (name, ".init_array") == 0 || strcmp (name, ".fini_array") == 0 || strcmp (name, ".preinit_array") == 0)) flags |= SECTION_NOTYPE; return flags; } /* Output assembly to switch to section NAME with attribute FLAGS. Four variants for common object file formats. */ void default_no_named_section (const char *name ATTRIBUTE_UNUSED, unsigned int flags ATTRIBUTE_UNUSED) { /* Some object formats don't support named sections at all. The front-end should already have flagged this as an error. */ abort (); } void default_elf_asm_named_section (const char *name, unsigned int flags) { char flagchars[10], *f = flagchars; if (! named_section_first_declaration (name)) { fprintf (asm_out_file, "\t.section\t%s\n", name); return; } if (!(flags & SECTION_DEBUG)) *f++ = 'a'; if (flags & SECTION_WRITE) *f++ = 'w'; if (flags & SECTION_CODE) *f++ = 'x'; if (flags & SECTION_SMALL) *f++ = 's'; if (flags & SECTION_MERGE) *f++ = 'M'; if (flags & SECTION_STRINGS) *f++ = 'S'; if (flags & SECTION_TLS) *f++ = 'T'; *f = '\0'; fprintf (asm_out_file, "\t.section\t%s,\"%s\"", name, flagchars); if (!(flags & SECTION_NOTYPE)) { const char *type; if (flags & SECTION_BSS) type = "nobits"; else type = "progbits"; fprintf (asm_out_file, ",@%s", type); if (flags & SECTION_ENTSIZE) fprintf (asm_out_file, ",%d", flags & SECTION_ENTSIZE); } putc ('\n', asm_out_file); } void default_coff_asm_named_section (const char *name, unsigned int flags) { char flagchars[8], *f = flagchars; if (flags & SECTION_WRITE) *f++ = 'w'; if (flags & SECTION_CODE) *f++ = 'x'; *f = '\0'; fprintf (asm_out_file, "\t.section\t%s,\"%s\"\n", name, flagchars); } void default_pe_asm_named_section (const char *name, unsigned int flags) { default_coff_asm_named_section (name, flags); if (flags & SECTION_LINKONCE) { /* Functions may have been compiled at various levels of optimization so we can't use `same_size' here. Instead, have the linker pick one. */ fprintf (asm_out_file, "\t.linkonce %s\n", (flags & SECTION_CODE ? "discard" : "same_size")); } } /* The lame default section selector. */ void default_select_section (tree decl, int reloc, unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED) { bool readonly = false; if (DECL_P (decl)) { if (decl_readonly_section (decl, reloc)) readonly = true; } else if (TREE_CODE (decl) == CONSTRUCTOR) { if (! ((flag_pic && reloc) || !TREE_READONLY (decl) || TREE_SIDE_EFFECTS (decl) || !TREE_CONSTANT (decl))) readonly = true; } else if (TREE_CODE (decl) == STRING_CST) readonly = true; else if (! (flag_pic && reloc)) readonly = true; if (readonly) readonly_data_section (); else data_section (); } /* A helper function for default_elf_select_section and default_elf_unique_section. Categorizes the DECL. */ enum section_category { SECCAT_TEXT, SECCAT_RODATA, SECCAT_RODATA_MERGE_STR, SECCAT_RODATA_MERGE_STR_INIT, SECCAT_RODATA_MERGE_CONST, SECCAT_SRODATA, SECCAT_DATA, /* To optimize loading of shared programs, define following subsections of data section: _REL Contains data that has relocations, so they get grouped together and dynamic linker will visit fewer pages in memory. _RO Contains data that is otherwise read-only. This is useful with prelinking as most relocations won't be dynamically linked and thus stay read only. _LOCAL Marks data containing relocations only to local objects. These relocations will get fully resolved by prelinking. */ SECCAT_DATA_REL, SECCAT_DATA_REL_LOCAL, SECCAT_DATA_REL_RO, SECCAT_DATA_REL_RO_LOCAL, SECCAT_SDATA, SECCAT_TDATA, SECCAT_BSS, SECCAT_SBSS, SECCAT_TBSS }; static enum section_category categorize_decl_for_section (tree, int, int); static enum section_category categorize_decl_for_section (tree decl, int reloc, int shlib) { enum section_category ret; if (TREE_CODE (decl) == FUNCTION_DECL) return SECCAT_TEXT; else if (TREE_CODE (decl) == STRING_CST) { if (flag_mudflap) /* or !flag_merge_constants */ return SECCAT_RODATA; else return SECCAT_RODATA_MERGE_STR; } else if (TREE_CODE (decl) == VAR_DECL) { if (DECL_INITIAL (decl) == NULL || DECL_INITIAL (decl) == error_mark_node) ret = SECCAT_BSS; else if (! TREE_READONLY (decl) || TREE_SIDE_EFFECTS (decl) || ! TREE_CONSTANT (DECL_INITIAL (decl))) { if (shlib && (reloc & 2)) ret = SECCAT_DATA_REL; else if (shlib && reloc) ret = SECCAT_DATA_REL_LOCAL; else ret = SECCAT_DATA; } else if (shlib && (reloc & 2)) ret = SECCAT_DATA_REL_RO; else if (shlib && reloc) ret = SECCAT_DATA_REL_RO_LOCAL; else if (reloc || flag_merge_constants < 2) /* C and C++ don't allow different variables to share the same location. -fmerge-all-constants allows even that (at the expense of not conforming). */ ret = SECCAT_RODATA; else if (TREE_CODE (DECL_INITIAL (decl)) == STRING_CST) ret = SECCAT_RODATA_MERGE_STR_INIT; else ret = SECCAT_RODATA_MERGE_CONST; } else if (TREE_CODE (decl) == CONSTRUCTOR) { if ((shlib && reloc) || TREE_SIDE_EFFECTS (decl) || ! TREE_CONSTANT (decl)) ret = SECCAT_DATA; else ret = SECCAT_RODATA; } else ret = SECCAT_RODATA; /* There are no read-only thread-local sections. */ if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL (decl)) { if (ret == SECCAT_BSS) ret = SECCAT_TBSS; else ret = SECCAT_TDATA; } /* If the target uses small data sections, select it. */ else if (targetm.in_small_data_p (decl)) { if (ret == SECCAT_BSS) ret = SECCAT_SBSS; else if (targetm.have_srodata_section && ret == SECCAT_RODATA) ret = SECCAT_SRODATA; else ret = SECCAT_SDATA; } return ret; } bool decl_readonly_section (tree decl, int reloc) { return decl_readonly_section_1 (decl, reloc, flag_pic); } bool decl_readonly_section_1 (tree decl, int reloc, int shlib) { switch (categorize_decl_for_section (decl, reloc, shlib)) { case SECCAT_RODATA: case SECCAT_RODATA_MERGE_STR: case SECCAT_RODATA_MERGE_STR_INIT: case SECCAT_RODATA_MERGE_CONST: case SECCAT_SRODATA: return true; break; default: return false; break; } } /* Select a section based on the above categorization. */ void default_elf_select_section (tree decl, int reloc, unsigned HOST_WIDE_INT align) { default_elf_select_section_1 (decl, reloc, align, flag_pic); } void default_elf_select_section_1 (tree decl, int reloc, unsigned HOST_WIDE_INT align, int shlib) { switch (categorize_decl_for_section (decl, reloc, shlib)) { case SECCAT_TEXT: /* We're not supposed to be called on FUNCTION_DECLs. */ abort (); case SECCAT_RODATA: readonly_data_section (); break; case SECCAT_RODATA_MERGE_STR: mergeable_string_section (decl, align, 0); break; case SECCAT_RODATA_MERGE_STR_INIT: mergeable_string_section (DECL_INITIAL (decl), align, 0); break; case SECCAT_RODATA_MERGE_CONST: mergeable_constant_section (DECL_MODE (decl), align, 0); break; case SECCAT_SRODATA: named_section (NULL_TREE, ".sdata2", reloc); break; case SECCAT_DATA: data_section (); break; case SECCAT_DATA_REL: named_section (NULL_TREE, ".data.rel", reloc); break; case SECCAT_DATA_REL_LOCAL: named_section (NULL_TREE, ".data.rel.local", reloc); break; case SECCAT_DATA_REL_RO: named_section (NULL_TREE, ".data.rel.ro", reloc); break; case SECCAT_DATA_REL_RO_LOCAL: named_section (NULL_TREE, ".data.rel.ro.local", reloc); break; case SECCAT_SDATA: named_section (NULL_TREE, ".sdata", reloc); break; case SECCAT_TDATA: named_section (NULL_TREE, ".tdata", reloc); break; case SECCAT_BSS: #ifdef BSS_SECTION_ASM_OP bss_section (); #else named_section (NULL_TREE, ".bss", reloc); #endif break; case SECCAT_SBSS: named_section (NULL_TREE, ".sbss", reloc); break; case SECCAT_TBSS: named_section (NULL_TREE, ".tbss", reloc); break; default: abort (); } } /* Construct a unique section name based on the decl name and the categorization performed above. */ void default_unique_section (tree decl, int reloc) { default_unique_section_1 (decl, reloc, flag_pic); } void default_unique_section_1 (tree decl, int reloc, int shlib) { bool one_only = DECL_ONE_ONLY (decl); const char *prefix, *name; size_t nlen, plen; char *string; switch (categorize_decl_for_section (decl, reloc, shlib)) { case SECCAT_TEXT: prefix = one_only ? ".gnu.linkonce.t." : ".text."; break; case SECCAT_RODATA: case SECCAT_RODATA_MERGE_STR: case SECCAT_RODATA_MERGE_STR_INIT: case SECCAT_RODATA_MERGE_CONST: prefix = one_only ? ".gnu.linkonce.r." : ".rodata."; break; case SECCAT_SRODATA: prefix = one_only ? ".gnu.linkonce.s2." : ".sdata2."; break; case SECCAT_DATA: case SECCAT_DATA_REL: case SECCAT_DATA_REL_LOCAL: case SECCAT_DATA_REL_RO: case SECCAT_DATA_REL_RO_LOCAL: prefix = one_only ? ".gnu.linkonce.d." : ".data."; break; case SECCAT_SDATA: prefix = one_only ? ".gnu.linkonce.s." : ".sdata."; break; case SECCAT_BSS: prefix = one_only ? ".gnu.linkonce.b." : ".bss."; break; case SECCAT_SBSS: prefix = one_only ? ".gnu.linkonce.sb." : ".sbss."; break; case SECCAT_TDATA: prefix = one_only ? ".gnu.linkonce.td." : ".tdata."; break; case SECCAT_TBSS: prefix = one_only ? ".gnu.linkonce.tb." : ".tbss."; break; default: abort (); } plen = strlen (prefix); name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)); name = targetm.strip_name_encoding (name); nlen = strlen (name); string = alloca (nlen + plen + 1); memcpy (string, prefix, plen); memcpy (string + plen, name, nlen + 1); DECL_SECTION_NAME (decl) = build_string (nlen + plen, string); } void default_select_rtx_section (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x, unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED) { if (flag_pic) switch (GET_CODE (x)) { case CONST: case SYMBOL_REF: case LABEL_REF: data_section (); return; default: break; } readonly_data_section (); } void default_elf_select_rtx_section (enum machine_mode mode, rtx x, unsigned HOST_WIDE_INT align) { /* ??? Handle small data here somehow. */ if (flag_pic) switch (GET_CODE (x)) { case CONST: case SYMBOL_REF: named_section (NULL_TREE, ".data.rel.ro", 3); return; case LABEL_REF: named_section (NULL_TREE, ".data.rel.ro.local", 1); return; default: break; } mergeable_constant_section (mode, align, 0); } /* Set the generally applicable flags on the SYMBOL_REF for EXP. */ void default_encode_section_info (tree decl, rtx rtl, int first ATTRIBUTE_UNUSED) { rtx symbol; int flags; /* Careful not to prod global register variables. */ if (!MEM_P (rtl)) return; symbol = XEXP (rtl, 0); if (GET_CODE (symbol) != SYMBOL_REF) return; flags = 0; if (TREE_CODE (decl) == FUNCTION_DECL) flags |= SYMBOL_FLAG_FUNCTION; if (targetm.binds_local_p (decl)) flags |= SYMBOL_FLAG_LOCAL; if (targetm.in_small_data_p (decl)) flags |= SYMBOL_FLAG_SMALL; if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL (decl)) flags |= decl_tls_model (decl) << SYMBOL_FLAG_TLS_SHIFT; /* ??? Why is DECL_EXTERNAL ever set for non-PUBLIC names? Without being PUBLIC, the thing *must* be defined in this translation unit. Prevent this buglet from being propagated into rtl code as well. */ if (DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)) flags |= SYMBOL_FLAG_EXTERNAL; SYMBOL_REF_FLAGS (symbol) = flags; } /* By default, we do nothing for encode_section_info, so we need not do anything but discard the '*' marker. */ const char * default_strip_name_encoding (const char *str) { return str + (*str == '*'); } /* Assume ELF-ish defaults, since that's pretty much the most liberal wrt cross-module name binding. */ bool default_binds_local_p (tree exp) { return default_binds_local_p_1 (exp, flag_shlib); } bool default_binds_local_p_1 (tree exp, int shlib) { bool local_p; /* A non-decl is an entry in the constant pool. */ if (!DECL_P (exp)) local_p = true; /* Static variables are always local. */ else if (! TREE_PUBLIC (exp)) local_p = true; /* A variable is local if the user tells us so. */ else if (DECL_VISIBILITY (exp) != VISIBILITY_DEFAULT) local_p = true; /* Otherwise, variables defined outside this object may not be local. */ else if (DECL_EXTERNAL (exp)) local_p = false; /* Linkonce and weak data are never local. */ else if (DECL_ONE_ONLY (exp) || DECL_WEAK (exp)) local_p = false; /* If PIC, then assume that any global name can be overridden by symbols resolved from other modules. */ else if (shlib) local_p = false; /* Uninitialized COMMON variable may be unified with symbols resolved from other modules. */ else if (DECL_COMMON (exp) && (DECL_INITIAL (exp) == NULL || DECL_INITIAL (exp) == error_mark_node)) local_p = false; /* Otherwise we're left with initialized (or non-common) global data which is of necessity defined locally. */ else local_p = true; return local_p; } /* Determine whether or not a pointer mode is valid. Assume defaults of ptr_mode or Pmode - can be overridden. */ bool default_valid_pointer_mode (enum machine_mode mode) { return (mode == ptr_mode || mode == Pmode); } /* Default function to output code that will globalize a label. A target must define GLOBAL_ASM_OP or provide it's own function to globalize a label. */ #ifdef GLOBAL_ASM_OP void default_globalize_label (FILE * stream, const char *name) { fputs (GLOBAL_ASM_OP, stream); assemble_name (stream, name); putc ('\n', stream); } #endif /* GLOBAL_ASM_OP */ /* Default function to output a label for unwind information. The default is to do nothing. A target that needs nonlocal labels for unwind information must provide its own function to do this. */ void default_emit_unwind_label (FILE * stream ATTRIBUTE_UNUSED, tree decl ATTRIBUTE_UNUSED, int for_eh ATTRIBUTE_UNUSED, int empty ATTRIBUTE_UNUSED) { } /* This is how to output an internal numbered label where PREFIX is the class of label and LABELNO is the number within the class. */ void default_internal_label (FILE *stream, const char *prefix, unsigned long labelno) { char *const buf = alloca (40 + strlen (prefix)); ASM_GENERATE_INTERNAL_LABEL (buf, prefix, labelno); ASM_OUTPUT_LABEL (stream, buf); } /* This is the default behavior at the beginning of a file. It's controlled by two other target-hook toggles. */ void default_file_start (void) { if (targetm.file_start_app_off && !flag_verbose_asm) fputs (ASM_APP_OFF, asm_out_file); if (targetm.file_start_file_directive) output_file_directive (asm_out_file, main_input_filename); } /* This is a generic routine suitable for use as TARGET_ASM_FILE_END which emits a special section directive used to indicate whether or not this object file needs an executable stack. This is primarily a GNU extension to ELF but could be used on other targets. */ int trampolines_created; void file_end_indicate_exec_stack (void) { unsigned int flags = SECTION_DEBUG; if (trampolines_created) flags |= SECTION_CODE; named_section_flags (".note.GNU-stack", flags); } /* Type information for varasm.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ void gt_ggc_mx_constant_descriptor_rtx (void *x_p) { struct constant_descriptor_rtx * x = (struct constant_descriptor_rtx *)x_p; struct constant_descriptor_rtx * xlimit = x; while (ggc_test_and_set_mark (xlimit)) xlimit = ((*xlimit).next); while (x != xlimit) { gt_ggc_m_23constant_descriptor_rtx ((*x).next); gt_ggc_m_7rtx_def ((*x).mem); gt_ggc_m_7rtx_def ((*x).sym); gt_ggc_m_7rtx_def ((*x).constant); x = ((*x).next); } } void gt_ggc_mx_constant_descriptor_tree (void *x_p) { struct constant_descriptor_tree * const x = (struct constant_descriptor_tree *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_7rtx_def ((*x).rtl); gt_ggc_m_9tree_node ((*x).value); } } void gt_ggc_mx_in_named_entry (void *x_p) { struct in_named_entry * const x = (struct in_named_entry *)x_p; if (ggc_test_and_set_mark (x)) { } } void gt_ggc_mx_rtx_constant_pool (void *x_p) { struct rtx_constant_pool * const x = (struct rtx_constant_pool *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_23constant_descriptor_rtx ((*x).first); gt_ggc_m_23constant_descriptor_rtx ((*x).last); gt_ggc_m_P23constant_descriptor_rtx4htab ((*x).const_rtx_htab); gt_ggc_m_P23constant_descriptor_rtx4htab ((*x).const_rtx_sym_htab); } } void gt_ggc_mx_varasm_status (void *x_p) { struct varasm_status * const x = (struct varasm_status *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_17rtx_constant_pool ((*x).pool); } } void gt_ggc_m_P23constant_descriptor_rtx4htab (void *x_p) { struct htab * const x = (struct htab *)x_p; if (ggc_test_and_set_mark (x)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { gt_ggc_m_23constant_descriptor_rtx ((*x).entries[i0]); } ggc_mark ((*x).entries); } } } void gt_ggc_m_P24constant_descriptor_tree4htab (void *x_p) { struct htab * const x = (struct htab *)x_p; if (ggc_test_and_set_mark (x)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { gt_ggc_m_24constant_descriptor_tree ((*x).entries[i0]); } ggc_mark ((*x).entries); } } } void gt_ggc_m_P14in_named_entry4htab (void *x_p) { struct htab * const x = (struct htab *)x_p; if (ggc_test_and_set_mark (x)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { gt_ggc_m_14in_named_entry ((*x).entries[i0]); } ggc_mark ((*x).entries); } } } void gt_pch_nx_constant_descriptor_rtx (void *x_p) { struct constant_descriptor_rtx * x = (struct constant_descriptor_rtx *)x_p; struct constant_descriptor_rtx * xlimit = x; while (gt_pch_note_object (xlimit, xlimit, gt_pch_p_23constant_descriptor_rtx)) xlimit = ((*xlimit).next); while (x != xlimit) { gt_pch_n_23constant_descriptor_rtx ((*x).next); gt_pch_n_7rtx_def ((*x).mem); gt_pch_n_7rtx_def ((*x).sym); gt_pch_n_7rtx_def ((*x).constant); x = ((*x).next); } } void gt_pch_nx_constant_descriptor_tree (void *x_p) { struct constant_descriptor_tree * const x = (struct constant_descriptor_tree *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_24constant_descriptor_tree)) { gt_pch_n_7rtx_def ((*x).rtl); gt_pch_n_9tree_node ((*x).value); } } void gt_pch_nx_in_named_entry (void *x_p) { struct in_named_entry * const x = (struct in_named_entry *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_14in_named_entry)) { gt_pch_n_S ((*x).name); } } void gt_pch_nx_rtx_constant_pool (void *x_p) { struct rtx_constant_pool * const x = (struct rtx_constant_pool *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_17rtx_constant_pool)) { gt_pch_n_23constant_descriptor_rtx ((*x).first); gt_pch_n_23constant_descriptor_rtx ((*x).last); gt_pch_n_P23constant_descriptor_rtx4htab ((*x).const_rtx_htab); gt_pch_n_P23constant_descriptor_rtx4htab ((*x).const_rtx_sym_htab); } } void gt_pch_nx_varasm_status (void *x_p) { struct varasm_status * const x = (struct varasm_status *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_13varasm_status)) { gt_pch_n_17rtx_constant_pool ((*x).pool); } } void gt_pch_n_P23constant_descriptor_rtx4htab (void *x_p) { struct htab * const x = (struct htab *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_P23constant_descriptor_rtx4htab)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { gt_pch_n_23constant_descriptor_rtx ((*x).entries[i0]); } gt_pch_note_object ((*x).entries, x, gt_pch_p_P23constant_descriptor_rtx4htab); } } } void gt_pch_n_P24constant_descriptor_tree4htab (void *x_p) { struct htab * const x = (struct htab *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_P24constant_descriptor_tree4htab)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { gt_pch_n_24constant_descriptor_tree ((*x).entries[i0]); } gt_pch_note_object ((*x).entries, x, gt_pch_p_P24constant_descriptor_tree4htab); } } } void gt_pch_n_P14in_named_entry4htab (void *x_p) { struct htab * const x = (struct htab *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_P14in_named_entry4htab)) { if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { gt_pch_n_14in_named_entry ((*x).entries[i0]); } gt_pch_note_object ((*x).entries, x, gt_pch_p_P14in_named_entry4htab); } } } void gt_pch_p_23constant_descriptor_rtx (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct constant_descriptor_rtx * const x ATTRIBUTE_UNUSED = (struct constant_descriptor_rtx *)x_p; if ((void *)(x) == this_obj) op (&((*x).next), cookie); if ((void *)(x) == this_obj) op (&((*x).mem), cookie); if ((void *)(x) == this_obj) op (&((*x).sym), cookie); if ((void *)(x) == this_obj) op (&((*x).constant), cookie); } void gt_pch_p_24constant_descriptor_tree (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct constant_descriptor_tree * const x ATTRIBUTE_UNUSED = (struct constant_descriptor_tree *)x_p; if ((void *)(x) == this_obj) op (&((*x).rtl), cookie); if ((void *)(x) == this_obj) op (&((*x).value), cookie); } void gt_pch_p_14in_named_entry (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct in_named_entry * const x ATTRIBUTE_UNUSED = (struct in_named_entry *)x_p; if ((void *)(x) == this_obj) op (&((*x).name), cookie); } void gt_pch_p_17rtx_constant_pool (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct rtx_constant_pool * const x ATTRIBUTE_UNUSED = (struct rtx_constant_pool *)x_p; if ((void *)(x) == this_obj) op (&((*x).first), cookie); if ((void *)(x) == this_obj) op (&((*x).last), cookie); if ((void *)(x) == this_obj) op (&((*x).const_rtx_htab), cookie); if ((void *)(x) == this_obj) op (&((*x).const_rtx_sym_htab), cookie); } void gt_pch_p_13varasm_status (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct varasm_status * const x ATTRIBUTE_UNUSED = (struct varasm_status *)x_p; if ((void *)(x) == this_obj) op (&((*x).pool), cookie); } void gt_pch_p_P23constant_descriptor_rtx4htab (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct htab * const x ATTRIBUTE_UNUSED = (struct htab *)x_p; if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { if ((void *)((*x).entries) == this_obj) op (&((*x).entries[i0]), cookie); } if ((void *)(x) == this_obj) op (&((*x).entries), cookie); } } void gt_pch_p_P24constant_descriptor_tree4htab (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct htab * const x ATTRIBUTE_UNUSED = (struct htab *)x_p; if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { if ((void *)((*x).entries) == this_obj) op (&((*x).entries[i0]), cookie); } if ((void *)(x) == this_obj) op (&((*x).entries), cookie); } } void gt_pch_p_P14in_named_entry4htab (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct htab * const x ATTRIBUTE_UNUSED = (struct htab *)x_p; if ((*x).entries != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).size); i0++) { if ((void *)((*x).entries) == this_obj) op (&((*x).entries[i0]), cookie); } if ((void *)(x) == this_obj) op (&((*x).entries), cookie); } } /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_varasm_h[] = { { &weak_decls, 1, sizeof (weak_decls), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &const_desc_htab, 1, sizeof (const_desc_htab), >_ggc_m_P24constant_descriptor_tree4htab, >_pch_n_P24constant_descriptor_tree4htab }, { &initial_trampoline, 1, sizeof (initial_trampoline), >_ggc_mx_rtx_def, >_pch_nx_rtx_def }, { &in_named_htab, 1, sizeof (in_named_htab), >_ggc_m_P14in_named_entry4htab, >_pch_n_P14in_named_entry4htab }, { &in_named_name, 1, sizeof (in_named_name), >_ggc_m_S, (gt_pointer_walker) >_pch_n_S }, LAST_GGC_ROOT_TAB }; const struct ggc_root_tab gt_pch_rs_gt_varasm_h[] = { { &in_section, 1, sizeof (in_section), NULL, NULL }, { &const_labelno, 1, sizeof (const_labelno), NULL, NULL }, LAST_GGC_ROOT_TAB }; /* Virtual array support. Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Cygnus Solutions. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define VARRAY_HDR_SIZE (sizeof (struct varray_head_tag) - sizeof (varray_data)) #ifdef GATHER_STATISTICS /* Store information about each particular varray. */ struct varray_descriptor { const char *name; int allocated; int created; int resized; int copied; }; /* Hashtable mapping varray names to descriptors. */ static htab_t varray_hash; /* Hashtable helpers. */ static hashval_t hash_descriptor (const void *p) { const struct varray_descriptor *d = p; return htab_hash_pointer (d->name); } static int eq_descriptor (const void *p1, const void *p2) { const struct varray_descriptor *d = p1; return d->name == p2; } /* For given name, return descriptor, create new if needed. */ static struct varray_descriptor * varray_descriptor (const char *name) { struct varray_descriptor **slot; if (!varray_hash) varray_hash = htab_create (10, hash_descriptor, eq_descriptor, NULL); slot = (struct varray_descriptor **) htab_find_slot_with_hash (varray_hash, name, htab_hash_pointer (name), 1); if (*slot) return *slot; *slot = xcalloc (sizeof (**slot), 1); (*slot)->name = name; return *slot; } #endif /* Do not add any more non-GC items here. Please either remove or GC those items that are not GCed. */ static const struct { unsigned char size; bool uses_ggc; } element[NUM_VARRAY_DATA] = { { sizeof (char), 1 }, { sizeof (unsigned char), 1 }, { sizeof (short), 1 }, { sizeof (unsigned short), 1 }, { sizeof (int), 1 }, { sizeof (unsigned int), 1 }, { sizeof (long), 1 }, { sizeof (unsigned long), 1 }, { sizeof (HOST_WIDE_INT), 1 }, { sizeof (unsigned HOST_WIDE_INT), 1 }, { sizeof (void *), 1 }, { sizeof (void *), 0 }, { sizeof (char *), 1 }, { sizeof (struct rtx_def *), 1 }, { sizeof (struct rtvec_def *), 1 }, { sizeof (union tree_node *), 1 }, { sizeof (struct bitmap_head_def *), 1 }, { sizeof (struct reg_info_def *), 0 }, { sizeof (struct const_equiv_data), 0 }, { sizeof (struct basic_block_def *), 1 }, { sizeof (struct elt_list *), 1 }, { sizeof (struct edge_def *), 1 }, { sizeof (tree *), 1 }, }; /* Allocate a virtual array with NUM_ELEMENT elements, each of which is ELEMENT_SIZE bytes long, named NAME. Array elements are zeroed. */ varray_type varray_init (size_t num_elements, enum varray_data_enum element_kind, const char *name) { size_t data_size = num_elements * element[element_kind].size; varray_type ptr; #ifdef GATHER_STATISTICS struct varray_descriptor *desc = varray_descriptor (name); desc->created++; desc->allocated += data_size + VARRAY_HDR_SIZE; #endif if (element[element_kind].uses_ggc) ptr = ggc_alloc_cleared (VARRAY_HDR_SIZE + data_size); else ptr = xcalloc (VARRAY_HDR_SIZE + data_size, 1); ptr->num_elements = num_elements; ptr->elements_used = 0; ptr->type = element_kind; ptr->name = name; return ptr; } /* Grow/shrink the virtual array VA to N elements. Zero any new elements allocated. */ varray_type varray_grow (varray_type va, size_t n) { size_t old_elements = va->num_elements; if (n != old_elements) { size_t elem_size = element[va->type].size; size_t old_data_size = old_elements * elem_size; size_t data_size = n * elem_size; #ifdef GATHER_STATISTICS struct varray_descriptor *desc = varray_descriptor (va->name); varray_type oldva = va; if (data_size > old_data_size) desc->allocated += data_size - old_data_size; desc->resized ++; #endif if (element[va->type].uses_ggc) va = ggc_realloc (va, VARRAY_HDR_SIZE + data_size); else va = xrealloc (va, VARRAY_HDR_SIZE + data_size); va->num_elements = n; if (n > old_elements) memset (&va->data.c[old_data_size], 0, data_size - old_data_size); #ifdef GATHER_STATISTICS if (oldva != va) desc->copied++; #endif } return va; } /* Reset a varray to its original state. */ void varray_clear (varray_type va) { size_t data_size = element[va->type].size * va->num_elements; memset (va->data.c, 0, data_size); va->elements_used = 0; } /* Check the bounds of a varray access. */ #if defined ENABLE_CHECKING && (GCC_VERSION >= 2007) void varray_check_failed (varray_type va, size_t n, const char *file, int line, const char *function) { internal_error ("virtual array %s[%lu]: element %lu out of bounds " "in %s, at %s:%d", va->name, (unsigned long) va->num_elements, (unsigned long) n, function, trim_filename (file), line); } void varray_underflow (varray_type va, const char *file, int line, const char *function) { internal_error ("underflowed virtual array %s in %s, at %s:%d", va->name, function, trim_filename (file), line); } #endif /* Copy varray V2 into varray V1. Both arrays must be the same size and type. */ void varray_copy (varray_type v1, varray_type v2) { size_t data_size; if (v1->type != v2->type) abort (); if (v1->num_elements != v2->num_elements) abort (); data_size = element[v2->type].size * v2->num_elements; memcpy (v1->data.c, v2->data.c, data_size); v1->elements_used = v2->elements_used; } /* Output per-varray statistics. */ #ifdef GATHER_STATISTICS /* Used to accumulate statistics about varray sizes. */ struct output_info { int count; int size; }; /* Called via htab_traverse. Output varray descriptor pointed out by SLOT and update statistics. */ static int print_statistics (void **slot, void *b) { struct varray_descriptor *d = (struct varray_descriptor *) *slot; struct output_info *i = (struct output_info *) b; if (d->allocated) { fprintf (stderr, "%-21s %6d %10d %7d %7d\n", d->name, d->created, d->allocated, d->resized, d->copied); i->size += d->allocated; i->count += d->created; } return 1; } #endif /* Output per-varray memory usage statistics. */ void dump_varray_statistics (void) { #ifdef GATHER_STATISTICS struct output_info info; fprintf (stderr, "\nVARRAY Kind Count Bytes Resized copied\n"); fprintf (stderr, "-------------------------------------------------------\n"); info.count = 0; info.size = 0; htab_traverse (varray_hash, print_statistics, &info); fprintf (stderr, "-------------------------------------------------------\n"); fprintf (stderr, "%-20s %7d %10d\n", "Total", info.count, info.size); fprintf (stderr, "-------------------------------------------------------\n"); #endif } /* Vector API for GNU compiler. Copyright (C) 2004 Free Software Foundation, Inc. Contributed by Nathan Sidwell This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Vector API for GNU compiler. Copyright (C) 2004 Free Software Foundation, Inc. Contributed by Nathan Sidwell This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_VEC_H #define GCC_VEC_H /* The macros here implement a set of templated vector types and associated interfaces. These templates are implemented with macros, as we're not in C++ land. The interface functions are typesafe and use static inline functions, sometimes backed by out-of-line generic functions. The vectors are designed to interoperate with the GTY machinery. Because of the different behaviour of objects and of pointers to objects, there are two flavours. One to deal with a vector of pointers to objects, and one to deal with a vector of objects themselves. Both of these pass pointers to objects around -- in the former case the pointers are stored into the vector and in the latter case the pointers are dereferenced and the objects copied into the vector. Therefore, when using a vector of pointers, the objects pointed to must be long lived, but when dealing with a vector of objects, the source objects need not be. The vectors are implemented using the trailing array idiom, thus they are not resizeable without changing the address of the vector object itself. This means you cannot have variables or fields of vector type -- always use a pointer to a vector. The one exception is the final field of a structure, which could be a vector type. You will have to use the embedded_alloc call to create such objects, and they will probably not be resizeable (so don't use the 'safe' allocation variants). The trailing array idiom is used (rather than a pointer to an array of data), because, if we allow NULL to also represent an empty vector, empty vectors occupy minimal space in the structure containing them. Each operation that increases the number of active elements is available in 'quick' and 'safe' variants. The former presumes that there is sufficient allocated space for the operation to succeed (it aborts if there is not). The latter will reallocate the vector, if needed. Reallocation causes an exponential increase in vector size. If you know you will be adding N elements, it would be more efficient to use the reserve operation before adding the elements with the 'quick' operation. You should prefer the push and pop operations, as they append and remove from the end of the vector. The insert and remove operations allow you to change elements in the middle of the vector. There are two remove operations, one which preserves the element ordering 'ordered_remove', and one which does not 'unordered_remove'. The latter function copies the end element into the removed slot, rather than invoke a memmove operation. Vector types are defined using a DEF_VEC_x(TYPEDEF) macro, and variables of vector type are declared using a VEC(TYPEDEF) macro. The 'x' letter indicates whether TYPEDEF is a pointer (P) or object (O) type. An example of their use would be, DEF_VEC_P(tree); // define a vector of tree pointers. This must // appear at file scope. struct my_struct { VEC(tree) *v; // A (pointer to) a vector of tree pointers. }; struct my_struct *s; if (VEC_length(tree,s)) { we have some contents } VEC_safe_push(tree,s,decl); // append some decl onto the end for (ix = 0; (t = VEC_iterate(tree,s,ix)); ix++) { do something with t } */ /* Macros to invoke API calls. A single macro works for both pointer and object vectors, but the argument and return types might well be different. In each macro, TDEF is the typedef of the vector elements. Some of these macros pass the vector, V, by reference (by taking its address), this is noted in the descriptions. */ /* Length of vector size_t VEC_T_length(const VEC(T) *v); Return the number of active elements in V. V can be NULL, in which case zero is returned. */ #define VEC_length(TDEF,V) (VEC_OP(TDEF,length)(V)) /* Get the final element of the vector. T VEC_T_last(VEC(T) *v); // Pointer T *VEC_T_last(VEC(T) *v); // Object Return the final element. If V is empty, abort. */ #define VEC_last(TDEF,V) (VEC_OP(TDEF,last)(V)) /* Index into vector T VEC_T_index(VEC(T) *v, size_t ix); // Pointer T *VEC_T_index(VEC(T) *v, size_t ix); // Object Return the IX'th element. If IX is outside the domain of V, abort. */ #define VEC_index(TDEF,V,I) (VEC_OP(TDEF,index)(V,I)) /* Iterate over vector T VEC_T_index(VEC(T) *v, size_t ix); // Pointer T *VEC_T_index(VEC(T) *v, size_t ix); // Object Return the IX'th element or NULL. Use this to iterate over the elements of a vector as follows, for (ix = 0; (ptr = VEC_iterate(T,v,ix)); ix++) continue; */ #define VEC_iterate(TDEF,V,I) (VEC_OP(TDEF,iterate)(V,I)) /* Allocate new vector. VEC(T) *VEC_T_alloc(size_t reserve); Allocate a new vector with space for RESERVE objects. */ #define VEC_alloc(TDEF,A) (VEC_OP(TDEF,alloc)(A)) /* Allocate new vector offset within a structure void *VEC_T_embedded_alloc(size_t offset, size_t reserve); Allocate a new vector which is at offset OFFSET within a structure, and with space for RESERVE objects. Return a pointer to the start of the structure containing the vector. Naturally, the vector must be the last member of the structure. */ #define VEC_embedded_alloc(TDEF,O,A) (VEC_OP(TDEF,embedded_alloc)(O,A)) /* Reserve space. void VEC_T_reserve(VEC(T) *&v, size_t reserve); Ensure that V has at least RESERVE slots available. Note this can cause V to be reallocated. */ #define VEC_reserve(TDEF,V,R) (VEC_OP(TDEF,reserve)(&(V),R)) /* Push object with no reallocation T *VEC_T_quick_push (VEC(T) *v, T obj); // Pointer T *VEC_T_quick_push (VEC(T) *v, T *obj); // Object Push a new element onto the end, returns a pointer to the slot filled in. For object vectors, the new value can be NULL, in which case NO initialization is performed. Aborts if there is insufficient space in the vector. */ #define VEC_quick_push(TDEF,V,O) (VEC_OP(TDEF,quick_push)(V,O)) /* Push object with reallocation T *VEC_T_safe_push (VEC(T) *&v, T obj); // Pointer T *VEC_T_safe_push (VEC(T) *&v, T *obj); // Object Push a new element onto the end, returns a pointer to the slot filled in. For object vectors, the new value can be NULL, in which case NO initialization is performed. Reallocates V, if needed. */ #define VEC_safe_push(TDEF,V,O) (VEC_OP(TDEF,safe_push)(&(V),O)) /* Pop element off end T VEC_T_pop (VEC(T) *v); // Pointer void VEC_T_pop (VEC(T) *v); // Object Pop the last element off the end. Returns the element popped, for pointer vectors. */ #define VEC_pop(TDEF,V) (VEC_OP(TDEF,pop)(V)) /* Replace element T VEC_T_replace (VEC(T) *v, size_t ix, T val); // Pointer T *VEC_T_replace (VEC(T) *v, size_t ix, T *val); // Object Replace the IXth element of V with a new value, VAL. For pointer vectors returns the original value. For object vectors returns a pointer to the new value. For object vectors the new value can be NULL, in which case no overwriting of the slot is actually performed. */ #define VEC_replace(TDEF,V,I,O) (VEC_OP(TDEF,replace)(V,I,O)) /* Insert object with no reallocation T *VEC_T_quick_insert (VEC(T) *v, size_t ix, T val); // Pointer T *VEC_T_quick_insert (VEC(T) *v, size_t ix, T *val); // Object Insert an element, VAL, at the IXth position of V. Return a pointer to the slot created. For vectors of object, the new value can be NULL, in which case no initialization of the inserted slot takes place. Aborts if there is insufficient space. */ #define VEC_quick_insert(TDEF,V,I,O) (VEC_OP(TDEF,quick_insert)(V,I,O)) /* Insert object with reallocation T *VEC_T_safe_insert (VEC(T) *&v, size_t ix, T val); // Pointer T *VEC_T_safe_insert (VEC(T) *&v, size_t ix, T *val); // Object Insert an element, VAL, at the IXth position of V. Return a pointer to the slot created. For vectors of object, the new value can be NULL, in which case no initialization of the inserted slot takes place. Reallocate V, if necessary. */ #define VEC_safe_insert(TDEF,V,I,O) (VEC_OP(TDEF,safe_insert)(&(V),I,O)) /* Remove element retaining order T VEC_T_ordered_remove (VEC(T) *v, size_t ix); // Pointer void VEC_T_ordered_remove (VEC(T) *v, size_t ix); // Object Remove an element from the IXth position of V. Ordering of remaining elements is preserverd. For pointer vectors returns the removed object. This is an O(N) operation due to a memmove. */ #define VEC_ordered_remove(TDEF,V,I) (VEC_OP(TDEF,ordered_remove)(V,I)) /* Remove element destroying order T VEC_T_unordered_remove (VEC(T) *v, size_t ix); // Pointer void VEC_T_unordered_remove (VEC(T) *v, size_t ix); // Object Remove an element from the IXth position of V. Ordering of remaining elements is destroyed. For pointer vectors returns the removed object. This is an O(1) operation. */ #define VEC_unordered_remove(TDEF,V,I) (VEC_OP(TDEF,unordered_remove)(V,I)) #if !IN_GENGTYPE #ifndef ONE_COMPILATION_UNIT /* auto-host.h. Generated by configure. */ /* config.in. Generated from configure.ac by autoheader. */ /* 1234 = LIL_ENDIAN, 4321 = BIGENDIAN */ #define BYTEORDER 1234 /* Define as the number of bits in a byte, if \`limits.h' doesn't. */ /* #undef CHAR_BIT */ /* Define 0/1 to force the choice for exception handling model. */ /* #undef CONFIG_SJLJ_EXCEPTIONS */ /* Define to enable the use of a default assembler. */ /* #undef DEFAULT_ASSEMBLER */ /* Define to enable the use of a default linker. */ /* #undef DEFAULT_LINKER */ /* Define if you want to use __cxa_atexit, rather than atexit, to register C++ destructors for local statics and global objects. This is essential for fully standards-compliant handling of destructors, but requires __cxa_atexit in libc. */ /* #undef DEFAULT_USE_CXA_ATEXIT */ /* Define if you want more run-time sanity checks. This one gets a grab bag of miscellaneous but relatively cheap checks. */ /* #undef ENABLE_CHECKING */ /* Define if you want fold checked that it never destructs its argument. This is quite expensive. */ /* #undef ENABLE_FOLD_CHECKING */ /* Define if you want the garbage collector to operate in maximally paranoid mode, validating the entire heap and collecting garbage at every opportunity. This is extremely expensive. */ /* #undef ENABLE_GC_ALWAYS_COLLECT */ /* Define if you want the garbage collector to do object poisoning and other memory allocation checks. This is quite expensive. */ /* #undef ENABLE_GC_CHECKING */ /* Define to 1 if translation of program messages to the user's native language is requested. */ #define ENABLE_NLS 1 /* Define if you want all operations on RTL (the basic data structure of the optimizer and back end) to be checked for dynamic type safety at runtime. This is quite expensive. */ /* #undef ENABLE_RTL_CHECKING */ /* Define if you want RTL flag accesses to be checked against the RTL codes that are supported for each access macro. This is relatively cheap. */ /* #undef ENABLE_RTL_FLAG_CHECKING */ /* Define if you want all operations on trees (the basic data structure of the front ends) to be checked for dynamic type safety at runtime. This is moderately expensive. The tree browser debugging routines will also be enabled by this option. */ /* #undef ENABLE_TREE_CHECKING */ /* Define if you want to run subprograms and generated programs through valgrind (a memory checker). This is extremely expensive. */ /* #undef ENABLE_VALGRIND_CHECKING */ /* Define to 1 if installation paths should be looked up in Windows32 Registry. Ignored on non windows32 hosts. */ /* #undef ENABLE_WIN32_REGISTRY */ /* Define to the name of a file containing a list of extra machine modes for this architecture. */ #define EXTRA_MODES_FILE "config/i386/i386-modes.def" /* Define to enable detailed memory allocation stats gathering. */ /* #undef GATHER_STATISTICS */ /* Define to the type of elements in the array set by `getgroups'. Usually this is either `int' or `gid_t'. */ #define GETGROUPS_T gid_t /* Define to 1 if you have the `alphasort' function. */ #define HAVE_ALPHASORT 1 /* Define if your assembler supports dwarf2 .file/.loc directives, and preserves file table indices exactly as given. */ #define HAVE_AS_DWARF2_DEBUG_LINE 1 /* Define if your assembler supports explicit relocations. */ /* #undef HAVE_AS_EXPLICIT_RELOCS */ /* Define if your assembler supports the --gdwarf2 option. */ #define HAVE_AS_GDWARF2_DEBUG_FLAG 1 /* Define true if the assembler supports '.long foo@GOTOFF'. */ #define HAVE_AS_GOTOFF_IN_DATA 1 /* Define if your assembler supports the --gstabs option. */ #define HAVE_AS_GSTABS_DEBUG_FLAG 1 /* Define if your assembler supports the Sun syntax for cmov. */ /* #undef HAVE_AS_IX86_CMOV_SUN_SYNTAX */ /* Define if your assembler supports .sleb128 and .uleb128. */ #define HAVE_AS_LEB128 1 /* Define if your assembler supports ltoffx and ldxmov relocations. */ /* #undef HAVE_AS_LTOFFX_LDXMOV_RELOCS */ /* Define if your assembler supports mfcr field. */ /* #undef HAVE_AS_MFCRF */ /* Define if your assembler supports the -no-mul-bug-abort option. */ /* #undef HAVE_AS_NO_MUL_BUG_ABORT_OPTION */ /* Define if your assembler supports offsetable %lo(). */ /* #undef HAVE_AS_OFFSETABLE_LO10 */ /* Define if your assembler supports .register. */ /* #undef HAVE_AS_REGISTER_PSEUDO_OP */ /* Define if your assembler supports -relax option. */ /* #undef HAVE_AS_RELAX_OPTION */ /* Define if your assembler and linker support unaligned PC relative relocs. */ /* #undef HAVE_AS_SPARC_UA_PCREL */ /* Define if your assembler and linker support unaligned PC relative relocs against hidden symbols. */ /* #undef HAVE_AS_SPARC_UA_PCREL_HIDDEN */ /* Define if your assembler supports thread-local storage. */ /* #undef HAVE_AS_TLS */ /* Define to 1 if you have the `atoll' function. */ #define HAVE_ATOLL 1 /* Define to 1 if you have the `atoq' function. */ /* #undef HAVE_ATOQ */ /* Define if BANSHEE is available */ /* #undef HAVE_BANSHEE */ /* Define to 1 if you have the `clock' function. */ #define HAVE_CLOCK 1 /* Define if defines clock_t. */ #define HAVE_CLOCK_T 1 /* Define to 1 if we found a declaration for 'abort', otherwise define to 0. */ #define HAVE_DECL_ABORT 1 /* Define to 1 if we found a declaration for 'atof', otherwise define to 0. */ #define HAVE_DECL_ATOF 1 /* Define to 1 if we found a declaration for 'atol', otherwise define to 0. */ #define HAVE_DECL_ATOL 1 /* Define to 1 if we found a declaration for 'basename', otherwise define to 0. */ #define HAVE_DECL_BASENAME 1 /* Define to 1 if we found a declaration for 'calloc', otherwise define to 0. */ #define HAVE_DECL_CALLOC 1 /* Define to 1 if we found a declaration for 'clock', otherwise define to 0. */ #define HAVE_DECL_CLOCK 1 /* Define to 1 if we found a declaration for 'errno', otherwise define to 0. */ #define HAVE_DECL_ERRNO 1 /* Define to 1 if we found a declaration for 'fprintf_unlocked', otherwise define to 0. */ #define HAVE_DECL_FPRINTF_UNLOCKED 0 /* Define to 1 if we found a declaration for 'fputs_unlocked', otherwise define to 0. */ #define HAVE_DECL_FPUTS_UNLOCKED 1 /* Define to 1 if we found a declaration for 'free', otherwise define to 0. */ #define HAVE_DECL_FREE 1 /* Define to 1 if we found a declaration for 'fwrite_unlocked', otherwise define to 0. */ #define HAVE_DECL_FWRITE_UNLOCKED 1 /* Define to 1 if we found a declaration for 'getcwd', otherwise define to 0. */ #define HAVE_DECL_GETCWD 1 /* Define to 1 if we found a declaration for 'getenv', otherwise define to 0. */ #define HAVE_DECL_GETENV 1 /* Define to 1 if we found a declaration for 'getopt', otherwise define to 0. */ #define HAVE_DECL_GETOPT 0 /* Define to 1 if we found a declaration for 'getrlimit', otherwise define to 0. */ #define HAVE_DECL_GETRLIMIT 1 /* Define to 1 if we found a declaration for 'getrusage', otherwise define to 0. */ #define HAVE_DECL_GETRUSAGE 1 /* Define to 1 if we found a declaration for 'getwd', otherwise define to 0. */ #define HAVE_DECL_GETWD 1 /* Define to 1 if we found a declaration for 'ldgetname', otherwise define to 0. */ #define HAVE_DECL_LDGETNAME 0 /* Define to 1 if we found a declaration for 'malloc', otherwise define to 0. */ #define HAVE_DECL_MALLOC 1 /* Define to 1 if we found a declaration for 'putc_unlocked', otherwise define to 0. */ #define HAVE_DECL_PUTC_UNLOCKED 1 /* Define to 1 if we found a declaration for 'realloc', otherwise define to 0. */ #define HAVE_DECL_REALLOC 1 /* Define to 1 if we found a declaration for 'sbrk', otherwise define to 0. */ #define HAVE_DECL_SBRK 1 /* Define to 1 if we found a declaration for 'setrlimit', otherwise define to 0. */ #define HAVE_DECL_SETRLIMIT 1 /* Define to 1 if we found a declaration for 'snprintf', otherwise define to 0. */ #define HAVE_DECL_SNPRINTF 1 /* Define to 1 if we found a declaration for 'strsignal', otherwise define to 0. */ #define HAVE_DECL_STRSIGNAL 1 /* Define to 1 if we found a declaration for 'strstr', otherwise define to 0. */ #define HAVE_DECL_STRSTR 1 /* Define to 1 if we found a declaration for 'times', otherwise define to 0. */ #define HAVE_DECL_TIMES 1 /* Define to 1 if we found a declaration for 'vasprintf', otherwise define to 0. */ #define HAVE_DECL_VASPRINTF 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_DIRECT_H */ /* Define to 1 if you have the `dup2' function. */ #define HAVE_DUP2 1 /* Define to 1 if you have the header file. */ #define HAVE_FCNTL_H 1 /* Define to 1 if you have the `fork' function. */ #define HAVE_FORK 1 /* Define to 1 if you have the `fprintf_unlocked' function. */ /* #undef HAVE_FPRINTF_UNLOCKED */ /* Define to 1 if you have the `fputc_unlocked' function. */ #define HAVE_FPUTC_UNLOCKED 1 /* Define to 1 if you have the `fputs_unlocked' function. */ #define HAVE_FPUTS_UNLOCKED 1 /* Define to 1 if you have the `fwrite_unlocked' function. */ #define HAVE_FWRITE_UNLOCKED 1 /* Define if your assembler supports .balign and .p2align. */ #define HAVE_GAS_BALIGN_AND_P2ALIGN 1 /* Define if your assembler uses the new HImode fild and fist notation. */ #define HAVE_GAS_FILDS_FISTS 1 /* Define if your assembler and linker support .hidden. */ /* #undef HAVE_GAS_HIDDEN */ /* Define if your assembler supports specifying the maximum number of bytes to skip when using the GAS .p2align command. */ #define HAVE_GAS_MAX_SKIP_P2ALIGN 1 /* Define if your assembler and linker support 32-bit section relative relocs via '.secrel32 label'. */ /* #undef HAVE_GAS_PE_SECREL32_RELOC */ /* Define 0/1 if your assembler supports marking sections with SHF_MERGE flag. */ #define HAVE_GAS_SHF_MERGE 1 /* Define if your assembler supports .subsection and .subsection -1 starts emitting at the beginning of your section. */ #define HAVE_GAS_SUBSECTION_ORDERING 1 /* Define if your assembler supports .weak. */ #define HAVE_GAS_WEAK 1 /* Define to 1 if you have the `getrlimit' function. */ #define HAVE_GETRLIMIT 1 /* Define to 1 if you have the `getrusage' function. */ #define HAVE_GETRUSAGE 1 /* Define to 1 if you have the `gettimeofday' function. */ #define HAVE_GETTIMEOFDAY 1 /* Define if you have the iconv() function. */ #define HAVE_ICONV 1 /* Define to 1 if you have the header file. */ #define HAVE_ICONV_H 1 /* Define .init_array/.fini_array sections are available and working. */ /* #undef HAVE_INITFINI_ARRAY */ /* Define if you have a working header file. */ #define HAVE_INTTYPES_H 1 /* Define to 1 if you have the `kill' function. */ #define HAVE_KILL 1 /* Define to 1 if you have the header file. */ #define HAVE_LANGINFO_H 1 /* Define if your file defines LC_MESSAGES. */ #define HAVE_LC_MESSAGES 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_LDFCN_H */ /* Define if your linker supports --as-needed and --no-as-needed options. */ /* #undef HAVE_LD_AS_NEEDED */ /* Define if your linker supports --eh-frame-hdr option. */ #define HAVE_LD_EH_FRAME_HDR 1 /* Define if your linker supports -pie option. */ /* #undef HAVE_LD_PIE */ /* Define if your linker links a mix of read-only and read-write sections into a read-write section. */ #define HAVE_LD_RO_RW_SECTION_MIXING 1 /* Define to 1 if you have the header file. */ #define HAVE_LIMITS_H 1 /* Define to 1 if you have the header file. */ #define HAVE_LOCALE_H 1 /* Define if your compiler supports the \`long long' type. */ #define HAVE_LONG_LONG 1 /* Define to 1 if you have the header file. */ #define HAVE_MALLOC_H 1 /* Define to 1 if you have the `mbstowcs' function. */ #define HAVE_MBSTOWCS 1 /* Define if valgrind's memcheck.h header is installed. */ /* #undef HAVE_MEMCHECK_H */ /* Define to 1 if you have the header file. */ #define HAVE_MEMORY_H 1 /* Define to 1 if you have the `mincore' function. */ #define HAVE_MINCORE 1 /* Define to 1 if you have the `mmap' function. */ #define HAVE_MMAP 1 /* Define if mmap with MAP_ANON(YMOUS) works. */ #define HAVE_MMAP_ANON 1 /* Define if mmap of /dev/zero works. */ #define HAVE_MMAP_DEV_ZERO 1 /* Define if read-only mmap of a plain file works. */ #define HAVE_MMAP_FILE 1 /* Define to 1 if you have the `nl_langinfo' function. */ #define HAVE_NL_LANGINFO 1 /* Define if printf supports "%p". */ #define HAVE_PRINTF_PTR 1 /* Define to 1 if you have the `putc_unlocked' function. */ #define HAVE_PUTC_UNLOCKED 1 /* Define to 1 if you have the `scandir' function. */ #define HAVE_SCANDIR 1 /* Define to 1 if you have the `setlocale' function. */ #define HAVE_SETLOCALE 1 /* Define to 1 if you have the `setrlimit' function. */ #define HAVE_SETRLIMIT 1 /* Define to 1 if you have the header file. */ #define HAVE_STDDEF_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STDINT_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STDLIB_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STRINGS_H 1 /* Define to 1 if you have the header file. */ #define HAVE_STRING_H 1 /* Define to 1 if you have the `strsignal' function. */ #define HAVE_STRSIGNAL 1 /* Define if defines struct tms. */ #define HAVE_STRUCT_TMS 1 /* Define to 1 if you have the `sysconf' function. */ #define HAVE_SYSCONF 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_FILE_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_MMAN_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_PARAM_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_RESOURCE_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_STAT_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_TIMES_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_TIME_H 1 /* Define to 1 if you have the header file. */ #define HAVE_SYS_TYPES_H 1 /* Define to 1 if you have that is POSIX.1 compatible. */ #define HAVE_SYS_WAIT_H 1 /* Define to 1 if you have the `times' function. */ #define HAVE_TIMES 1 /* Define to 1 if you have the header file. */ #define HAVE_TIME_H 1 /* Define to 1 if you have the header file. */ #define HAVE_UNISTD_H 1 /* Define if valgrind's valgrind/memcheck.h header is installed. */ /* #undef HAVE_VALGRIND_MEMCHECK_H */ /* Define to 1 if you have the `vfork' function. */ #define HAVE_VFORK 1 /* Define to 1 if you have the header file. */ /* #undef HAVE_VFORK_H */ /* Define to 1 if you have the header file. */ #define HAVE_WCHAR_H 1 /* Define to 1 if you have the `wcswidth' function. */ #define HAVE_WCSWIDTH 1 /* Define to 1 if `fork' works. */ #define HAVE_WORKING_FORK 1 /* Define this macro if mbstowcs does not crash when its first argument is NULL. */ #define HAVE_WORKING_MBSTOWCS 1 /* Define to 1 if `vfork' works. */ #define HAVE_WORKING_VFORK 1 /* Define if your compiler supports the \`__int64' type. */ /* #undef HAVE___INT64 */ /* Define if the host machine stores words of multi-word integers in big-endian order. */ /* #undef HOST_WORDS_BIG_ENDIAN */ /* Define as const if the declaration of iconv() needs const. */ #define ICONV_CONST /* Define if host mkdir takes a single argument. */ /* #undef MKDIR_TAKES_ONE_ARG */ /* Define to 1 if HOST_WIDE_INT must be 64 bits wide (see hwint.h). */ /* #undef NEED_64BIT_HOST_WIDE_INT */ /* Define to 1 if your C compiler doesn't accept -c and -o together. */ /* #undef NO_MINUS_C_MINUS_O */ /* Define to the address where bug reports for this package should be sent. */ #define PACKAGE_BUGREPORT "" /* Define to the full name of this package. */ #define PACKAGE_NAME "" /* Define to the full name and version of this package. */ #define PACKAGE_STRING "" /* Define to the one symbol short name of this package. */ #define PACKAGE_TARNAME "" /* Define to the version of this package. */ #define PACKAGE_VERSION "" /* Define to PREFIX/include if cpp should also search that directory. */ #define PREFIX_INCLUDE_DIR "/scratch2/smcc-extras/build/gcc-cvs/install/include" /* The number of bytes in type int */ #define SIZEOF_INT 4 /* The number of bytes in type long */ #define SIZEOF_LONG 4 /* The number of bytes in type long long */ #define SIZEOF_LONG_LONG 8 /* The number of bytes in type short */ #define SIZEOF_SHORT 2 /* The number of bytes in type void * */ #define SIZEOF_VOID_P 4 /* The number of bytes in type __int64 */ /* #undef SIZEOF___INT64 */ /* Define to 1 if you have the ANSI C header files. */ #define STDC_HEADERS 1 /* Define if you can safely include both and . */ #define STRING_WITH_STRINGS 1 /* Define to 1 if you can safely include both and . */ #define TIME_WITH_SYS_TIME 1 /* Define if your assembler mis-optimizes .eh_frame data. */ /* #undef USE_AS_TRADITIONAL_FORMAT */ /* Define if gcc should use -lunwind. */ /* #undef USE_LIBUNWIND_EXCEPTIONS */ /* Define if location_t is fileline integer cookie. */ /* #undef USE_MAPPED_LOCATION */ /* Define to be the last portion of registry key on windows hosts. */ /* #undef WIN32_REGISTRY_KEY */ /* whether byteorder is bigendian */ /* #undef WORDS_BIGENDIAN */ /* Always define this when using the GNU C Library */ #define _GNU_SOURCE 1 /* Define to `int' if doesn't define. */ /* #undef gid_t */ /* Define to `__inline__' or `__inline' if that's what the C compiler calls it, or to nothing if 'inline' is not supported under any name. */ #ifndef __cplusplus /* #undef inline */ #endif /* Define to `int' if does not define. */ /* #undef pid_t */ /* Define to \`long' if doesn't define. */ /* #undef rlim_t */ /* Define to `int' if does not define. */ /* #undef ssize_t */ /* Define to `int' if doesn't define. */ /* #undef uid_t */ /* Define as `fork' if `vfork' does not work. */ /* #undef vfork */ #endif /* Reallocate an array of elements with prefix. */ extern void *vec_p_reserve (void *, size_t); extern void *vec_o_reserve (void *, size_t, size_t, size_t); extern void *vec_embedded_alloc (size_t, size_t, size_t, size_t); #if ENABLE_CHECKING extern void vec_assert_fail (const char *, const char *, const char *, size_t, const char *) ATTRIBUTE_NORETURN; #define VEC_ASSERT_FAIL(OP,VEC) \ vec_assert_fail (OP,#VEC,__FILE__,__LINE__,__FUNCTION__) #define VEC_ASSERT(EXPR,OP,TDEF) \ (void)((EXPR) ? 0 : (VEC_ASSERT_FAIL(OP,VEC(TDEF)), 0)) #else #define VEC_ASSERT(EXPR,OP,TYPE) (void)(EXPR) #endif #define VEC(TDEF) VEC_##TDEF #define VEC_OP(TDEF,OP) VEC_OP_(VEC(TDEF),OP) #define VEC_OP_(VEC,OP) VEC_OP__(VEC,OP) #define VEC_OP__(VEC,OP) VEC ## _ ## OP #else /* IN_GENGTYPE */ #define VEC(TDEF) VEC_ TDEF #define VEC_STRINGIFY(X) VEC_STRINGIFY_(X) #define VEC_STRINGIFY_(X) #X #undef GTY #endif /* IN_GENGTYPE */ #define VEC_TDEF(TDEF) \ typedef struct VEC (TDEF) GTY(()) \ { \ size_t num; \ size_t alloc; \ TDEF GTY ((length ("%h.num"))) vec[1]; \ } VEC (TDEF) /* Vector of pointer to object. */ #if IN_GENGTYPE {"DEF_VEC_P", VEC_STRINGIFY (VEC_TDEF (#)) ";", NULL}, #else #define DEF_VEC_P(TDEF) \ VEC_TDEF (TDEF); \ \ static inline size_t VEC_OP (TDEF,length) \ (const VEC (TDEF) *vec_) \ { \ return vec_ ? vec_->num : 0; \ } \ \ static inline TDEF VEC_OP (TDEF,last) \ (const VEC (TDEF) *vec_) \ { \ VEC_ASSERT (vec_ && vec_->num, "last", TDEF); \ \ return vec_->vec[vec_->num - 1]; \ } \ \ static inline TDEF VEC_OP (TDEF,index) \ (const VEC (TDEF) *vec_, size_t ix_) \ { \ VEC_ASSERT (vec_ && ix_ < vec_->num, "index", TDEF); \ \ return vec_->vec[ix_]; \ } \ \ static inline TDEF VEC_OP (TDEF,iterate) \ (const VEC (TDEF) *vec_, size_t ix_) \ { \ return vec_ && ix_ < vec_->num ? vec_->vec[ix_] : NULL; \ } \ \ static inline VEC (TDEF) *VEC_OP (TDEF,alloc) \ (size_t alloc_) \ { \ return vec_p_reserve (NULL, alloc_ - !alloc_); \ } \ \ static inline void *VEC_OP (TDEF,embedded_alloc) \ (size_t offset_, size_t alloc_) \ { \ return vec_embedded_alloc (offset_, offsetof (VEC(TDEF),vec), \ sizeof (TDEF), alloc_); \ } \ \ static inline void VEC_OP (TDEF,reserve) \ (VEC (TDEF) **vec_, size_t alloc_) \ { \ *vec_ = vec_p_reserve (*vec_, alloc_); \ } \ \ static inline TDEF *VEC_OP (TDEF,quick_push) \ (VEC (TDEF) *vec_, TDEF obj_) \ { \ TDEF *slot_; \ \ VEC_ASSERT (vec_->num < vec_->alloc, "push", TDEF); \ slot_ = &vec_->vec[vec_->num++]; \ *slot_ = obj_; \ \ return slot_; \ } \ \ static inline TDEF *VEC_OP (TDEF,safe_push) \ (VEC (TDEF) **vec_, TDEF obj_) \ { \ if (!*vec_ || (*vec_)->num == (*vec_)->alloc) \ VEC_OP (TDEF,reserve) (vec_, ~(size_t)0); \ \ return VEC_OP (TDEF,quick_push) (*vec_, obj_); \ } \ \ static inline TDEF VEC_OP (TDEF,pop) \ (VEC (TDEF) *vec_) \ { \ TDEF obj_; \ \ VEC_ASSERT (vec_->num, "pop", TDEF); \ obj_ = vec_->vec[--vec_->num]; \ \ return obj_; \ } \ \ static inline TDEF VEC_OP (TDEF,replace) \ (VEC (TDEF) *vec_, size_t ix_, TDEF obj_) \ { \ TDEF old_obj_; \ \ VEC_ASSERT (ix_ < vec_->num, "replace", TDEF); \ old_obj_ = vec_->vec[ix_]; \ vec_->vec[ix_] = obj_; \ \ return old_obj_; \ } \ \ static inline TDEF *VEC_OP (TDEF,quick_insert) \ (VEC (TDEF) *vec_, size_t ix_, TDEF obj_) \ { \ TDEF *slot_; \ \ VEC_ASSERT (vec_->num < vec_->alloc, "insert", TDEF); \ VEC_ASSERT (ix_ <= vec_->num, "insert", TDEF); \ slot_ = &vec_->vec[ix_]; \ memmove (slot_ + 1, slot_, vec_->num++ - ix_); \ *slot_ = obj_; \ \ return slot_; \ } \ \ static inline TDEF *VEC_OP (TDEF,safe_insert) \ (VEC (TDEF) **vec_, size_t ix_, TDEF obj_) \ { \ if (!*vec_ || (*vec_)->num == (*vec_)->alloc) \ VEC_OP (TDEF,reserve) (vec_, ~(size_t)0); \ \ return VEC_OP (TDEF,quick_insert) (*vec_, ix_, obj_); \ } \ \ static inline TDEF VEC_OP (TDEF,ordered_remove) \ (VEC (TDEF) *vec_, size_t ix_) \ { \ TDEF *slot_; \ TDEF obj_; \ \ VEC_ASSERT (ix_ < vec_->num, "remove", TDEF); \ slot_ = &vec_->vec[ix_]; \ obj_ = *slot_; \ memmove (slot_, slot_ + 1, --vec_->num - ix_); \ \ return obj_; \ } \ \ static inline TDEF VEC_OP (TDEF,unordered_remove) \ (VEC (TDEF) *vec_, size_t ix_) \ { \ TDEF *slot_; \ TDEF obj_; \ \ VEC_ASSERT (ix_ < vec_->num, "remove", TDEF); \ slot_ = &vec_->vec[ix_]; \ obj_ = *slot_; \ *slot_ = vec_->vec[--vec_->num]; \ \ return obj_; \ } \ \ struct vec_swallow_trailing_semi #endif /* Vector of object. */ #if IN_GENGTYPE {"DEF_VEC_O", VEC_STRINGIFY (VEC_TDEF (#)) ";", NULL}, #else #define DEF_VEC_O(TDEF) \ VEC_TDEF (TDEF); \ \ static inline size_t VEC_OP (TDEF,length) \ (const VEC (TDEF) *vec_) \ { \ return vec_ ? vec_->num : 0; \ } \ \ static inline TDEF *VEC_OP (TDEF,last) \ (VEC (TDEF) *vec_) \ { \ VEC_ASSERT (vec_ && vec_->num, "last", TDEF); \ \ return &vec_->vec[vec_->num - 1]; \ } \ \ static inline TDEF *VEC_OP (TDEF,index) \ (VEC (TDEF) *vec_, size_t ix_) \ { \ VEC_ASSERT (vec_ && ix_ < vec_->num, "index", TDEF); \ \ return &vec_->vec[ix_]; \ } \ \ static inline TDEF *VEC_OP (TDEF,iterate) \ (VEC (TDEF) *vec_, size_t ix_) \ { \ return vec_ && ix_ < vec_->num ? &vec_->vec[ix_] : NULL; \ } \ \ static inline VEC (TDEF) *VEC_OP (TDEF,alloc) \ (size_t alloc_) \ { \ return vec_o_reserve (NULL, alloc_ - !alloc_, \ offsetof (VEC(TDEF),vec), sizeof (TDEF)); \ } \ \ static inline void *VEC_OP (TDEF,embedded_alloc) \ (size_t offset_, size_t alloc_) \ { \ return vec_embedded_alloc (offset_, offsetof (VEC(TDEF),vec), \ sizeof (TDEF), alloc_); \ } \ \ static inline void VEC_OP (TDEF,reserve) \ (VEC (TDEF) **vec_, size_t alloc_) \ { \ *vec_ = vec_o_reserve (*vec_, alloc_, \ offsetof (VEC(TDEF),vec), sizeof (TDEF)); \ } \ \ static inline TDEF *VEC_OP (TDEF,quick_push) \ (VEC (TDEF) *vec_, const TDEF *obj_) \ { \ TDEF *slot_; \ \ VEC_ASSERT (vec_->num < vec_->alloc, "push", TDEF); \ slot_ = &vec_->vec[vec_->num++]; \ if (obj_) \ *slot_ = *obj_; \ \ return slot_; \ } \ \ static inline TDEF *VEC_OP (TDEF,safe_push) \ (VEC (TDEF) **vec_, const TDEF *obj_) \ { \ if (!*vec_ || (*vec_)->num == (*vec_)->alloc) \ VEC_OP (TDEF,reserve) (vec_, ~(size_t)0); \ \ return VEC_OP (TDEF,quick_push) (*vec_, obj_); \ } \ \ static inline void VEC_OP (TDEF,pop) \ (VEC (TDEF) *vec_) \ { \ VEC_ASSERT (vec_->num, "pop", TDEF); \ vec_->vec[--vec_->num]; \ } \ \ static inline TDEF *VEC_OP (TDEF,replace) \ (VEC (TDEF) *vec_, size_t ix_, const TDEF *obj_) \ { \ TDEF *slot_; \ \ VEC_ASSERT (ix_ < vec_->num, "replace", TDEF); \ slot_ = &vec_->vec[ix_]; \ if (obj_) \ *slot_ = *obj_; \ \ return slot_; \ } \ \ static inline TDEF *VEC_OP (TDEF,quick_insert) \ (VEC (TDEF) *vec_, size_t ix_, const TDEF *obj_) \ { \ TDEF *slot_; \ \ VEC_ASSERT (vec_->num < vec_->alloc, "insert", TDEF); \ VEC_ASSERT (ix_ <= vec_->num, "insert", TDEF); \ slot_ = &vec_->vec[ix_]; \ memmove (slot_ + 1, slot_, vec_->num++ - ix_); \ if (obj_) \ *slot_ = *obj_; \ \ return slot_; \ } \ \ static inline TDEF *VEC_OP (TDEF,safe_insert) \ (VEC (TDEF) **vec_, size_t ix_, const TDEF *obj_) \ { \ if (!*vec_ || (*vec_)->num == (*vec_)->alloc) \ VEC_OP (TDEF,reserve) (vec_, ~(size_t)0); \ \ return VEC_OP (TDEF,quick_insert) (*vec_, ix_, obj_); \ } \ \ static inline void VEC_OP (TDEF,ordered_remove) \ (VEC (TDEF) *vec_, size_t ix_) \ { \ TDEF *slot_; \ \ VEC_ASSERT (ix_ < vec_->num, "remove", TDEF); \ slot_ = &vec_->vec[ix_]; \ memmove (slot_, slot_ + 1, --vec_->num - ix_); \ } \ \ static inline void VEC_OP (TDEF,unordered_remove) \ (VEC (TDEF) *vec_, size_t ix_) \ { \ VEC_ASSERT (ix_ < vec_->num, "remove", TDEF); \ vec_->vec[ix_] = vec_->vec[--vec_->num]; \ } \ \ struct vec_swallow_trailing_semi #endif #endif /* GCC_VEC_H */ struct vec_prefix { size_t num; size_t alloc; void *vec[1]; }; /* Ensure there are at least RESERVE free slots in VEC, if RESERVE != ~0u. If RESERVE == ~0u increase the current allocation exponentially. VEC can be NULL, to create a new vector. */ void * vec_p_reserve (void *vec, size_t reserve) { return vec_o_reserve (vec, reserve, offsetof (struct vec_prefix, vec), sizeof (void *)); } /* Ensure there are at least RESERVE free slots in VEC, if RESERVE != ~0u. If RESERVE == ~0u, increase the current allocation exponentially. VEC can be NULL, in which case a new vector is created. The vector's trailing array is at VEC_OFFSET offset and consistes of ELT_SIZE sized elements. */ void * vec_o_reserve (void *vec, size_t reserve, size_t vec_offset, size_t elt_size) { struct vec_prefix *pfx = vec; size_t alloc; if (reserve + 1) alloc = (pfx ? pfx->num : 0) + reserve; else alloc = pfx ? pfx->alloc * 2 : 4; if (!pfx || pfx->alloc < alloc) { vec = ggc_realloc (vec, vec_offset + alloc * elt_size); ((struct vec_prefix *)vec)->alloc = alloc; if (!pfx) ((struct vec_prefix *)vec)->num = 0; } return vec; } /* Allocate a structure which contains a vector as a trailing element. The vector is at STRUCT_OFFSET offset within the struct and the vector's array is at VEC_OFFSET offset within the vector. */ void * vec_embedded_alloc (size_t struct_offset, size_t vec_offset, size_t elt_size, size_t reserve) { void *ptr = ggc_alloc (struct_offset + vec_offset + elt_size * reserve); struct vec_prefix *pfx = (struct vec_prefix *)((char *)ptr + struct_offset); pfx->num = 0; pfx->alloc = reserve; return ptr; } #if ENABLE_CHECKING /* Issue a vector domain error, and then fall over. */ void vec_assert_fail (const char *op, const char *struct_name, const char *file, size_t line, const char *function) { internal_error ("vector %s %s domain error, in %s at %s:%u", struct_name, op, function, function, trim_filename (file), line); } #endif /* This is the string reported as the version number by all components of the compiler. If you distribute a modified version of GCC, please modify this string to indicate that, e.g. by putting your organization's name in parentheses at the end of the string. */ const char version_string[] = "3.5.0 20040706 (experimental)"; /* This is the location of the online document giving instructions for reporting bugs. If you distribute a modified version of GCC, please change this to refer to a document giving instructions for reporting bugs to you, not us. (You are of course welcome to forward us bugs reported to you, if you determine that they are not bugs in your modifications.) */ const char bug_report_url[] = ""; /* Output VMS debug format symbol table information from GCC. Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Douglas B. Rupp (rupp@gnat.com). Updated by Bernard W. Giroud (bgiroud@users.sourceforge.net). This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifdef VMS_DEBUGGING_INFO /* Definitions for the data structures and codes used in VMS debugging. Copyright (C) 2001 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_VMSDBG_H #define GCC_VMSDBG_H 1 /* We define types and constants used in VMS Debug output. Note that the structs only approximate the output that is written. We write the output explicitly, field by field. This output would only agree with the structs in this file if no padding were done. The sizes after each struct are the size actually written, which is usually smaller than the size of the struct. */ /* Header type codes. */ typedef enum _DST_TYPE {DST_K_SOURCE = 155, DST_K_PROLOG = 162, DST_K_BLKBEG = 176, DST_K_BLKEND = 177, DST_K_LINE_NUM = 185, DST_K_MODBEG = 188, DST_K_MODEND = 189, DST_K_RTNBEG = 190, DST_K_RTNEND = 191} DST_DTYPE; /* Header. */ typedef struct _DST_HEADER { union { unsigned short int dst_w_length; unsigned short int dst_x_length; } dst__header_length; union { ENUM_BITFIELD (_DST_TYPE) dst_w_type : 16; ENUM_BITFIELD (_DST_TYPE) dst_x_type : 16; } dst__header_type; } DST_HEADER; #define DST_K_DST_HEADER_SIZE sizeof 4 /* Language type codes. */ typedef enum _DST_LANGUAGE {DST_K_FORTRAN = 1, DST_K_C = 7, DST_K_ADA = 9, DST_K_UNKNOWN = 10, DST_K_CXX = 15} DST_LANGUAGE; /* Module header (a module is the result of a single compilation). */ typedef struct _DST_MODULE_BEGIN { DST_HEADER dst_a_modbeg_header; struct { unsigned dst_v_modbeg_hide : 1; unsigned dst_v_modbeg_version : 1; unsigned dst_v_modbeg_unused : 6; } dst_b_modbeg_flags; unsigned char dst_b_modbeg_unused; DST_LANGUAGE dst_l_modbeg_language; unsigned short int dst_w_version_major; unsigned short int dst_w_version_minor; unsigned char dst_b_modbeg_name; } DST_MODULE_BEGIN; #define DST_K_MODBEG_SIZE 15 /* Module trailer. */ typedef struct _DST_MB_TRLR { unsigned char dst_b_compiler; } DST_MB_TRLR; #define DST_K_MB_TRLR_SIZE 1 #define DST_K_VERSION_MAJOR 1 #define DST_K_VERSION_MINOR 13 typedef struct _DST_MODULE_END { DST_HEADER dst_a_modend_header; } DST_MODULE_END; #define DST_K_MODEND_SIZE sizeof 4 /* Routine header. */ typedef struct _DST_ROUTINE_BEGIN { DST_HEADER dst_a_rtnbeg_header; struct { unsigned dst_v_rtnbeg_unused : 4; unsigned dst_v_rtnbeg_unalloc : 1; unsigned dst_v_rtnbeg_prototype : 1; unsigned dst_v_rtnbeg_inlined : 1; unsigned dst_v_rtnbeg_no_call : 1; } dst_b_rtnbeg_flags; int *dst_l_rtnbeg_address; int *dst_l_rtnbeg_pd_address; unsigned char dst_b_rtnbeg_name; } DST_ROUTINE_BEGIN; #define DST_K_RTNBEG_SIZE 14 /* Routine trailer */ typedef struct _DST_ROUTINE_END { DST_HEADER dst_a_rtnend_header; char dst_b_rtnend_unused; unsigned int dst_l_rtnend_size; } DST_ROUTINE_END; #define DST_K_RTNEND_SIZE 9 /* Block header. */ typedef struct _DST_BLOCK_BEGIN { DST_HEADER dst_a_blkbeg_header; unsigned char dst_b_blkbeg_unused; int *dst_l_blkbeg_address; unsigned char dst_b_blkbeg_name; } DST_BLOCK_BEGIN; #define DST_K_BLKBEG_SIZE 10 /* Block trailer. */ typedef struct _DST_BLOCK_END { DST_HEADER dst_a_blkend_header; unsigned char dst_b_blkend_unused; unsigned int dst_l_blkend_size; } DST_BLOCK_END; #define DST_K_BLKEND_SIZE 9 /* Line number header. */ typedef struct _DST_LINE_NUM_HEADER { DST_HEADER dst_a_line_num_header; } DST_LINE_NUM_HEADER; #define DST_K_LINE_NUM_HEADER_SIZE 4 /* PC to Line number correlation. */ typedef struct _DST_PCLINE_COMMANDS { char dst_b_pcline_command; union { unsigned int dst_l_pcline_unslong; unsigned short int dst_w_pcline_unsword; unsigned char dst_b_pcline_unsbyte; } dst_a_pcline_access_fields; } DST_PCLINE_COMMANDS; /* PC and Line number correlation codes. */ #define DST_K_PCLINE_COMMANDS_SIZE 5 #define DST_K_PCLINE_COMMANDS_SIZE_MIN 2 #define DST_K_PCLINE_COMMANDS_SIZE_MAX 5 #define DST_K_DELTA_PC_LOW -128 #define DST_K_DELTA_PC_HIGH 0 #define DST_K_DELTA_PC_W 1 #define DST_K_INCR_LINUM 2 #define DST_K_INCR_LINUM_W 3 #define DST_K_SET_LINUM 9 #define DST_K_SET_ABS_PC 16 #define DST_K_DELTA_PC_L 17 #define DST_K_INCR_LINUM_L 18 #define DST_K_SET_LINUM_B 19 #define DST_K_SET_LINUM_L 20 /* Source file correlation header. */ typedef struct _DST_SOURCE_CORR { DST_HEADER dst_a_source_corr_header; } DST_SOURCE_CORR; #define DST_K_SOURCE_CORR_HEADER_SIZE 4 /* Source file correlation codes. */ #define DST_K_SRC_DECLFILE 1 #define DST_K_SRC_SETFILE 2 #define DST_K_SRC_SETREC_L 3 #define DST_K_SRC_SETREC_W 4 #define DST_K_SRC_SETLNUM_L 5 #define DST_K_SRC_SETLNUM_W 6 #define DST_K_SRC_INCRLNUM_B 7 #define DST_K_SRC_DEFLINES_W 10 #define DST_K_SRC_DEFLINES_B 11 #define DST_K_SRC_FORMFEED 16 #define DST_K_SRC_MIN_CMD 1 #define DST_K_SRC_MAX_CMD 16 /* Source file header. */ typedef struct _DST_SRC_COMMAND { unsigned char dst_b_src_command; union { struct { unsigned char dst_b_src_df_length; unsigned char dst_b_src_df_flags; unsigned short int dst_w_src_df_fileid; #ifdef HAVE_LONG_LONG long long dst_q_src_df_rms_cdt; #else #ifdef HAVE___INT64 __int64 dst_q_src_df_rms_cdt; #endif #endif unsigned int dst_l_src_df_rms_ebk; unsigned short int dst_w_src_df_rms_ffb; unsigned char dst_b_src_df_rms_rfo; unsigned char dst_b_src_df_filename; } dst_a_src_decl_src; unsigned int dst_l_src_unslong; unsigned short int dst_w_src_unsword; unsigned char dst_b_src_unsbyte; } dst_a_src_cmd_fields; } DST_SRC_COMMAND; #define DST_K_SRC_COMMAND_SIZE 21 /* Source file trailer. */ typedef struct _DST_SRC_CMDTRLR { unsigned char dst_b_src_df_libmodname; } DST_SRC_CMDTRLR; #define DST_K_SRC_CMDTRLR_SIZE 1 /* Prolog header. */ typedef struct _DST_PROLOG { DST_HEADER dst_a_prolog_header; unsigned int dst_l_prolog_bkpt_addr; } DST_PROLOG; #define DST_K_PROLOG_SIZE 8 #endif /* GCC_VMSDBG_H */ /* Difference in seconds between the VMS Epoch and the Unix Epoch */ static const long long vms_epoch_offset = 3506716800ll; /* NOTE: In the comments in this file, many references are made to "Debug Symbol Table". This term is abbreviated as `DST' throughout the remainder of this file. */ typedef struct dst_line_info_struct *dst_line_info_ref; /* Each entry in the line_info_table maintains the file and line number associated with the label generated for that entry. The label gives the PC value associated with the line number entry. */ typedef struct dst_line_info_struct { unsigned long dst_file_num; unsigned long dst_line_num; } dst_line_info_entry; typedef struct dst_file_info_struct *dst_file_info_ref; typedef struct dst_file_info_struct { char *file_name; unsigned int max_line; unsigned int listing_line_start; long long cdt; long ebk; short ffb; char rfo; char flen; } dst_file_info_entry; /* How to start an assembler comment. */ #ifndef ASM_COMMENT_START #define ASM_COMMENT_START ";#" #endif /* Maximum size (in bytes) of an artificially generated label. */ #define MAX_ARTIFICIAL_LABEL_BYTES 30 /* Make sure we know the sizes of the various types debug can describe. These are only defaults. If the sizes are different for your target, you should override these values by defining the appropriate symbols in your tm.h file. */ #ifndef PTR_SIZE #define PTR_SIZE 4 /* Must be 32 bits for VMS debug info */ #endif /* Pointer to a structure of filenames referenced by this compilation unit. */ static dst_file_info_ref file_info_table; /* Total number of entries in the table (i.e. array) pointed to by `file_info_table'. This is the *total* and includes both used and unused slots. */ static unsigned int file_info_table_allocated; /* Number of entries in the file_info_table which are actually in use. */ static unsigned int file_info_table_in_use; /* Size (in elements) of increments by which we may expand the filename table. */ #define FILE_TABLE_INCREMENT 64 /* A structure to hold basic information for the VMS end routine. */ typedef struct vms_func_struct { const char *vms_func_name; unsigned funcdef_number; } vms_func_node; typedef struct vms_func_struct *vms_func_ref; static unsigned int func_table_allocated; static unsigned int func_table_in_use; #define FUNC_TABLE_INCREMENT 256 /* A pointer to the base of a table that contains frame description information for each routine. */ static vms_func_ref func_table; /* Local pointer to the name of the main input file. Initialized in avmdbgout_init. */ static const char *primary_filename; static char *module_producer; static unsigned int module_language; /* A pointer to the base of a table that contains line information for each source code line in .text in the compilation unit. */ static dst_line_info_ref line_info_table; /* Number of elements currently allocated for line_info_table. */ static unsigned int line_info_table_allocated; /* Number of elements in line_info_table currently in use. */ static unsigned int line_info_table_in_use; /* Size (in elements) of increments by which we may expand line_info_table. */ #define LINE_INFO_TABLE_INCREMENT 1024 /* Forward declarations for functions defined in this file. */ static char *full_name (const char *); static unsigned int lookup_filename (const char *); static void addr_const_to_string (char *, rtx); static int write_debug_header (DST_HEADER *, const char *, int); static int write_debug_addr (char *, const char *, int); static int write_debug_data1 (unsigned int, const char *, int); static int write_debug_data2 (unsigned int, const char *, int); static int write_debug_data4 (unsigned long, const char *, int); static int write_debug_data8 (unsigned long long, const char *, int); static int write_debug_delta4 (char *, char *, const char *, int); static int write_debug_string (char *, const char *, int); static int write_modbeg (int); static int write_modend (int); static int write_rtnbeg (int, int); static int write_rtnend (int, int); static int write_pclines (int); static int write_srccorr (int, dst_file_info_entry, int); static int write_srccorrs (int); static void vmsdbgout_init (const char *); static void vmsdbgout_finish (const char *); static void vmsdbgout_define (unsigned int, const char *); static void vmsdbgout_undef (unsigned int, const char *); static void vmsdbgout_start_source_file (unsigned int, const char *); static void vmsdbgout_end_source_file (unsigned int); static void vmsdbgout_begin_block (unsigned int, unsigned int); static void vmsdbgout_end_block (unsigned int, unsigned int); static bool vmsdbgout_ignore_block (tree); static void vmsdbgout_source_line (unsigned int, const char *); static void vmsdbgout_begin_prologue (unsigned int, const char *); static void vmsdbgout_end_prologue (unsigned int, const char *); static void vmsdbgout_end_function (unsigned int); static void vmsdbgout_end_epilogue (unsigned int, const char *); static void vmsdbgout_begin_function (tree); static void vmsdbgout_decl (tree); static void vmsdbgout_global_decl (tree); static void vmsdbgout_abstract_function (tree); /* The debug hooks structure. */ const struct gcc_debug_hooks vmsdbg_debug_hooks = {vmsdbgout_init, vmsdbgout_finish, vmsdbgout_define, vmsdbgout_undef, vmsdbgout_start_source_file, vmsdbgout_end_source_file, vmsdbgout_begin_block, vmsdbgout_end_block, vmsdbgout_ignore_block, vmsdbgout_source_line, vmsdbgout_begin_prologue, vmsdbgout_end_prologue, vmsdbgout_end_epilogue, vmsdbgout_begin_function, vmsdbgout_end_function, vmsdbgout_decl, vmsdbgout_global_decl, debug_nothing_tree_int, /* type_decl */ debug_nothing_tree_tree, /* imported_module_or_decl */ debug_nothing_tree, /* deferred_inline_function */ vmsdbgout_abstract_function, debug_nothing_rtx, /* label */ debug_nothing_int, /* handle_pch */ debug_nothing_rtx /* var_location */ }; /* Definitions of defaults for assembler-dependent names of various pseudo-ops and section names. Theses may be overridden in the tm.h file (if necessary) for a particular assembler. */ #ifdef UNALIGNED_SHORT_ASM_OP #undef UNALIGNED_SHORT_ASM_OP #endif #define UNALIGNED_SHORT_ASM_OP ".word" #ifdef UNALIGNED_INT_ASM_OP #undef UNALIGNED_INT_ASM_OP #endif #define UNALIGNED_INT_ASM_OP ".long" #ifdef UNALIGNED_LONG_ASM_OP #undef UNALIGNED_LONG_ASM_OP #endif #define UNALIGNED_LONG_ASM_OP ".long" #ifdef UNALIGNED_DOUBLE_INT_ASM_OP #undef UNALIGNED_DOUBLE_INT_ASM_OP #endif #define UNALIGNED_DOUBLE_INT_ASM_OP ".quad" #ifdef ASM_BYTE_OP #undef ASM_BYTE_OP #endif #define ASM_BYTE_OP ".byte" #define NUMBYTES(I) ((I) < 256 ? 1 : (I) < 65536 ? 2 : 4) #define NUMBYTES0(I) ((I) < 128 ? 0 : (I) < 65536 ? 2 : 4) #ifndef UNALIGNED_PTR_ASM_OP #define UNALIGNED_PTR_ASM_OP \ (PTR_SIZE == 8 ? UNALIGNED_DOUBLE_INT_ASM_OP : UNALIGNED_INT_ASM_OP) #endif #ifndef UNALIGNED_OFFSET_ASM_OP #define UNALIGNED_OFFSET_ASM_OP(OFFSET) \ (NUMBYTES(OFFSET) == 4 \ ? UNALIGNED_LONG_ASM_OP \ : (NUMBYTES(OFFSET) == 2 ? UNALIGNED_SHORT_ASM_OP : ASM_BYTE_OP)) #endif /* Definitions of defaults for formats and names of various special (artificial) labels which may be generated within this file (when the -g options is used and VMS_DEBUGGING_INFO is in effect. If necessary, these may be overridden from within the tm.h file, but typically, overriding these defaults is unnecessary. */ static char text_end_label[MAX_ARTIFICIAL_LABEL_BYTES]; #ifndef TEXT_END_LABEL #define TEXT_END_LABEL "Lvetext" #endif #ifndef FUNC_BEGIN_LABEL #define FUNC_BEGIN_LABEL "LVFB" #endif #ifndef FUNC_PROLOG_LABEL #define FUNC_PROLOG_LABEL "LVFP" #endif #ifndef FUNC_END_LABEL #define FUNC_END_LABEL "LVFE" #endif #ifndef BLOCK_BEGIN_LABEL #define BLOCK_BEGIN_LABEL "LVBB" #endif #ifndef BLOCK_END_LABEL #define BLOCK_END_LABEL "LVBE" #endif #ifndef LINE_CODE_LABEL #define LINE_CODE_LABEL "LVM" #endif #ifndef ASM_OUTPUT_DEBUG_DELTA2 #define ASM_OUTPUT_DEBUG_DELTA2(FILE,LABEL1,LABEL2) \ do \ { \ fprintf ((FILE), "\t%s\t", UNALIGNED_SHORT_ASM_OP); \ assemble_name (FILE, LABEL1); \ fprintf (FILE, "-"); \ assemble_name (FILE, LABEL2); \ } \ while (0) #endif #ifndef ASM_OUTPUT_DEBUG_DELTA4 #define ASM_OUTPUT_DEBUG_DELTA4(FILE,LABEL1,LABEL2) \ do \ { \ fprintf ((FILE), "\t%s\t", UNALIGNED_INT_ASM_OP); \ assemble_name (FILE, LABEL1); \ fprintf (FILE, "-"); \ assemble_name (FILE, LABEL2); \ } \ while (0) #endif #ifndef ASM_OUTPUT_DEBUG_ADDR_DELTA #define ASM_OUTPUT_DEBUG_ADDR_DELTA(FILE,LABEL1,LABEL2) \ do \ { \ fprintf ((FILE), "\t%s\t", UNALIGNED_PTR_ASM_OP); \ assemble_name (FILE, LABEL1); \ fprintf (FILE, "-"); \ assemble_name (FILE, LABEL2); \ } \ while (0) #endif #ifndef ASM_OUTPUT_DEBUG_ADDR #define ASM_OUTPUT_DEBUG_ADDR(FILE,LABEL) \ do \ { \ fprintf ((FILE), "\t%s\t", UNALIGNED_PTR_ASM_OP); \ assemble_name (FILE, LABEL); \ } \ while (0) #endif #ifndef ASM_OUTPUT_DEBUG_ADDR_CONST #define ASM_OUTPUT_DEBUG_ADDR_CONST(FILE,ADDR) \ fprintf ((FILE), "\t%s\t%s", UNALIGNED_PTR_ASM_OP, (ADDR)) #endif #ifndef ASM_OUTPUT_DEBUG_DATA1 #define ASM_OUTPUT_DEBUG_DATA1(FILE,VALUE) \ fprintf ((FILE), "\t%s\t0x%x", ASM_BYTE_OP, (unsigned char) VALUE) #endif #ifndef ASM_OUTPUT_DEBUG_DATA2 #define ASM_OUTPUT_DEBUG_DATA2(FILE,VALUE) \ fprintf ((FILE), "\t%s\t0x%x", UNALIGNED_SHORT_ASM_OP, \ (unsigned short) VALUE) #endif #ifndef ASM_OUTPUT_DEBUG_DATA4 #define ASM_OUTPUT_DEBUG_DATA4(FILE,VALUE) \ fprintf ((FILE), "\t%s\t0x%lx", UNALIGNED_INT_ASM_OP, (unsigned long) VALUE) #endif #ifndef ASM_OUTPUT_DEBUG_DATA #define ASM_OUTPUT_DEBUG_DATA(FILE,VALUE) \ fprintf ((FILE), "\t%s\t0x%lx", UNALIGNED_OFFSET_ASM_OP(VALUE), VALUE) #endif #ifndef ASM_OUTPUT_DEBUG_ADDR_DATA #define ASM_OUTPUT_DEBUG_ADDR_DATA(FILE,VALUE) \ fprintf ((FILE), "\t%s\t0x%lx", UNALIGNED_PTR_ASM_OP, \ (unsigned long) VALUE) #endif #ifndef ASM_OUTPUT_DEBUG_DATA8 #define ASM_OUTPUT_DEBUG_DATA8(FILE,VALUE) \ fprintf ((FILE), "\t%s\t0x%llx", UNALIGNED_DOUBLE_INT_ASM_OP, \ (unsigned long long) VALUE) #endif /* This is similar to the default ASM_OUTPUT_ASCII, except that no trailing newline is produced. When flag_verbose_asm is asserted, we add commentary at the end of the line, so we must avoid output of a newline here. */ #ifndef ASM_OUTPUT_DEBUG_STRING #define ASM_OUTPUT_DEBUG_STRING(FILE,P) \ do \ { \ register int slen = strlen(P); \ register char *p = (P); \ register int i; \ fprintf (FILE, "\t.ascii \""); \ for (i = 0; i < slen; i++) \ { \ register int c = p[i]; \ if (c == '\"' || c == '\\') \ putc ('\\', FILE); \ if (c >= ' ' && c < 0177) \ putc (c, FILE); \ else \ fprintf (FILE, "\\%o", c); \ } \ fprintf (FILE, "\""); \ } \ while (0) #endif /* Convert a reference to the assembler name of a C-level name. This macro has the same effect as ASM_OUTPUT_LABELREF, but copies to a string rather than writing to a file. */ #ifndef ASM_NAME_TO_STRING #define ASM_NAME_TO_STRING(STR, NAME) \ do \ { \ if ((NAME)[0] == '*') \ strcpy (STR, NAME+1); \ else \ strcpy (STR, NAME); \ } \ while (0) #endif /* General utility functions. */ /* Convert an integer constant expression into assembler syntax. Addition and subtraction are the only arithmetic that may appear in these expressions. This is an adaptation of output_addr_const in final.c. Here, the target of the conversion is a string buffer. We can't use output_addr_const directly, because it writes to a file. */ static void addr_const_to_string (char *str, rtx x) { char buf1[256]; char buf2[256]; restart: str[0] = '\0'; switch (GET_CODE (x)) { case PC: if (flag_pic) strcat (str, ","); else abort (); break; case SYMBOL_REF: ASM_NAME_TO_STRING (buf1, XSTR (x, 0)); strcat (str, buf1); break; case LABEL_REF: ASM_GENERATE_INTERNAL_LABEL (buf1, "L", CODE_LABEL_NUMBER (XEXP (x, 0))); ASM_NAME_TO_STRING (buf2, buf1); strcat (str, buf2); break; case CODE_LABEL: ASM_GENERATE_INTERNAL_LABEL (buf1, "L", CODE_LABEL_NUMBER (x)); ASM_NAME_TO_STRING (buf2, buf1); strcat (str, buf2); break; case CONST_INT: sprintf (buf1, HOST_WIDE_INT_PRINT_DEC, INTVAL (x)); strcat (str, buf1); break; case CONST: /* This used to output parentheses around the expression, but that does not work on the 386 (either ATT or BSD assembler). */ addr_const_to_string (buf1, XEXP (x, 0)); strcat (str, buf1); break; case CONST_DOUBLE: if (GET_MODE (x) == VOIDmode) { /* We can use %d if the number is one word and positive. */ if (CONST_DOUBLE_HIGH (x)) sprintf (buf1, HOST_WIDE_INT_PRINT_DOUBLE_HEX, CONST_DOUBLE_HIGH (x), CONST_DOUBLE_LOW (x)); else if (CONST_DOUBLE_LOW (x) < 0) sprintf (buf1, HOST_WIDE_INT_PRINT_HEX, CONST_DOUBLE_LOW (x)); else sprintf (buf1, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x)); strcat (str, buf1); } else /* We can't handle floating point constants; PRINT_OPERAND must handle them. */ output_operand_lossage ("floating constant misused"); break; case PLUS: /* Some assemblers need integer constants to appear last (eg masm). */ if (GET_CODE (XEXP (x, 0)) == CONST_INT) { addr_const_to_string (buf1, XEXP (x, 1)); strcat (str, buf1); if (INTVAL (XEXP (x, 0)) >= 0) strcat (str, "+"); addr_const_to_string (buf1, XEXP (x, 0)); strcat (str, buf1); } else { addr_const_to_string (buf1, XEXP (x, 0)); strcat (str, buf1); if (INTVAL (XEXP (x, 1)) >= 0) strcat (str, "+"); addr_const_to_string (buf1, XEXP (x, 1)); strcat (str, buf1); } break; case MINUS: /* Avoid outputting things like x-x or x+5-x, since some assemblers can't handle that. */ x = simplify_subtraction (x); if (GET_CODE (x) != MINUS) goto restart; addr_const_to_string (buf1, XEXP (x, 0)); strcat (str, buf1); strcat (str, "-"); if (GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) < 0) { strcat (str, "("); addr_const_to_string (buf1, XEXP (x, 1)); strcat (str, buf1); strcat (str, ")"); } else { addr_const_to_string (buf1, XEXP (x, 1)); strcat (str, buf1); } break; case ZERO_EXTEND: case SIGN_EXTEND: addr_const_to_string (buf1, XEXP (x, 0)); strcat (str, buf1); break; default: output_operand_lossage ("invalid expression as operand"); } } /* Output the debug header HEADER. Also output COMMENT if flag_verbose_asm is set. Return the header size. Just return the size if DOSIZEONLY is nonzero. */ static int write_debug_header (DST_HEADER *header, const char *comment, int dosizeonly) { if (!dosizeonly) { ASM_OUTPUT_DEBUG_DATA2 (asm_out_file, header->dst__header_length.dst_w_length); if (flag_verbose_asm) fprintf (asm_out_file, "\t%s record length", ASM_COMMENT_START); fputc ('\n', asm_out_file); ASM_OUTPUT_DEBUG_DATA2 (asm_out_file, header->dst__header_type.dst_w_type); if (flag_verbose_asm) fprintf (asm_out_file, "\t%s record type (%s)", ASM_COMMENT_START, comment); fputc ('\n', asm_out_file); } return 4; } /* Output the address of SYMBOL. Also output COMMENT if flag_verbose_asm is set. Return the address size. Just return the size if DOSIZEONLY is nonzero. */ static int write_debug_addr (char *symbol, const char *comment, int dosizeonly) { if (!dosizeonly) { ASM_OUTPUT_DEBUG_ADDR (asm_out_file, symbol); if (flag_verbose_asm) fprintf (asm_out_file, "\t%s %s", ASM_COMMENT_START, comment); fputc ('\n', asm_out_file); } return PTR_SIZE; } /* Output the single byte DATA1. Also output COMMENT if flag_verbose_asm is set. Return the data size. Just return the size if DOSIZEONLY is nonzero. */ static int write_debug_data1 (unsigned int data1, const char *comment, int dosizeonly) { if (!dosizeonly) { ASM_OUTPUT_DEBUG_DATA1 (asm_out_file, data1); if (flag_verbose_asm) fprintf (asm_out_file, "\t%s %s", ASM_COMMENT_START, comment); fputc ('\n', asm_out_file); } return 1; } /* Output the single word DATA2. Also output COMMENT if flag_verbose_asm is set. Return the data size. Just return the size if DOSIZEONLY is nonzero. */ static int write_debug_data2 (unsigned int data2, const char *comment, int dosizeonly) { if (!dosizeonly) { ASM_OUTPUT_DEBUG_DATA2 (asm_out_file, data2); if (flag_verbose_asm) fprintf (asm_out_file, "\t%s %s", ASM_COMMENT_START, comment); fputc ('\n', asm_out_file); } return 2; } /* Output double word DATA4. Also output COMMENT if flag_verbose_asm is set. Return the data size. Just return the size if DOSIZEONLY is nonzero. */ static int write_debug_data4 (unsigned long data4, const char *comment, int dosizeonly) { if (!dosizeonly) { ASM_OUTPUT_DEBUG_DATA4 (asm_out_file, data4); if (flag_verbose_asm) fprintf (asm_out_file, "\t%s %s", ASM_COMMENT_START, comment); fputc ('\n', asm_out_file); } return 4; } /* Output quad word DATA8. Also output COMMENT if flag_verbose_asm is set. Return the data size. Just return the size if DOSIZEONLY is nonzero. */ static int write_debug_data8 (unsigned long long data8, const char *comment, int dosizeonly) { if (!dosizeonly) { ASM_OUTPUT_DEBUG_DATA8 (asm_out_file, data8); if (flag_verbose_asm) fprintf (asm_out_file, "\t%s %s", ASM_COMMENT_START, comment); fputc ('\n', asm_out_file); } return 8; } /* Output the difference between LABEL1 and LABEL2. Also output COMMENT if flag_verbose_asm is set. Return the data size. Just return the size if DOSIZEONLY is nonzero. */ static int write_debug_delta4 (char *label1, char *label2, const char *comment, int dosizeonly) { if (!dosizeonly) { ASM_OUTPUT_DEBUG_DELTA4 (asm_out_file, label1, label2); if (flag_verbose_asm) fprintf (asm_out_file, "\t%s %s", ASM_COMMENT_START, comment); fputc ('\n', asm_out_file); } return 4; } /* Output a character string STRING. Also write COMMENT if flag_verbose_asm is set. Return the string length. Just return the length if DOSIZEONLY is nonzero. */ static int write_debug_string (char *string, const char *comment, int dosizeonly) { if (!dosizeonly) { ASM_OUTPUT_DEBUG_STRING (asm_out_file, string); if (flag_verbose_asm) fprintf (asm_out_file, "\t%s %s", ASM_COMMENT_START, comment); fputc ('\n', asm_out_file); } return strlen (string); } /* Output a module begin header and return the header size. Just return the size if DOSIZEONLY is nonzero. */ static int write_modbeg (int dosizeonly) { DST_MODULE_BEGIN modbeg; DST_MB_TRLR mb_trlr; int i; char *module_name, *m; int modnamelen; int prodnamelen; int totsize = 0; /* Assumes primary filename has Unix syntax file spec. */ module_name = xstrdup (basename ((char *) primary_filename)); m = strrchr (module_name, '.'); if (m) *m = 0; modnamelen = strlen (module_name); for (i = 0; i < modnamelen; i++) module_name[i] = TOUPPER (module_name[i]); prodnamelen = strlen (module_producer); modbeg.dst_a_modbeg_header.dst__header_length.dst_w_length = DST_K_MODBEG_SIZE + modnamelen + DST_K_MB_TRLR_SIZE + prodnamelen - 1; modbeg.dst_a_modbeg_header.dst__header_type.dst_w_type = DST_K_MODBEG; modbeg.dst_b_modbeg_flags.dst_v_modbeg_hide = 0; modbeg.dst_b_modbeg_flags.dst_v_modbeg_version = 1; modbeg.dst_b_modbeg_flags.dst_v_modbeg_unused = 0; modbeg.dst_b_modbeg_unused = 0; modbeg.dst_l_modbeg_language = module_language; modbeg.dst_w_version_major = DST_K_VERSION_MAJOR; modbeg.dst_w_version_minor = DST_K_VERSION_MINOR; modbeg.dst_b_modbeg_name = strlen (module_name); mb_trlr.dst_b_compiler = strlen (module_producer); totsize += write_debug_header (&modbeg.dst_a_modbeg_header, "modbeg", dosizeonly); totsize += write_debug_data1 (*((char *) &modbeg.dst_b_modbeg_flags), "flags", dosizeonly); totsize += write_debug_data1 (modbeg.dst_b_modbeg_unused, "unused", dosizeonly); totsize += write_debug_data4 (modbeg.dst_l_modbeg_language, "language", dosizeonly); totsize += write_debug_data2 (modbeg.dst_w_version_major, "DST major version", dosizeonly); totsize += write_debug_data2 (modbeg.dst_w_version_minor, "DST minor version", dosizeonly); totsize += write_debug_data1 (modbeg.dst_b_modbeg_name, "length of module name", dosizeonly); totsize += write_debug_string (module_name, "module name", dosizeonly); totsize += write_debug_data1 (mb_trlr.dst_b_compiler, "length of compiler name", dosizeonly); totsize += write_debug_string (module_producer, "compiler name", dosizeonly); return totsize; } /* Output a module end trailer and return the trailer size. Just return the size if DOSIZEONLY is nonzero. */ static int write_modend (int dosizeonly) { DST_MODULE_END modend; int totsize = 0; modend.dst_a_modend_header.dst__header_length.dst_w_length = DST_K_MODEND_SIZE - 1; modend.dst_a_modend_header.dst__header_type.dst_w_type = DST_K_MODEND; totsize += write_debug_header (&modend.dst_a_modend_header, "modend", dosizeonly); return totsize; } /* Output a routine begin header routine RTNNUM and return the header size. Just return the size if DOSIZEONLY is nonzero. */ static int write_rtnbeg (int rtnnum, int dosizeonly) { char *rtnname; int rtnnamelen; char *rtnentryname; int totsize = 0; char label[MAX_ARTIFICIAL_LABEL_BYTES]; DST_ROUTINE_BEGIN rtnbeg; DST_PROLOG prolog; vms_func_ref fde = &func_table[rtnnum]; rtnname = (char *)fde->vms_func_name; rtnnamelen = strlen (rtnname); rtnentryname = concat (rtnname, "..en", NULL); if (!strcmp (rtnname, "main")) { DST_HEADER header; const char *go = "TRANSFER$BREAK$GO"; /* This command isn't documented in DSTRECORDS, so it's made to look like what DEC C does */ /* header size - 1st byte + flag byte + STO_LW size + string count byte + string length */ header.dst__header_length.dst_w_length = DST_K_DST_HEADER_SIZE - 1 + 1 + 4 + 1 + strlen (go); header.dst__header_type.dst_w_type = 0x17; totsize += write_debug_header (&header, "transfer", dosizeonly); /* I think this is a flag byte, but I don't know what this flag means */ totsize += write_debug_data1 (0x1, "flags ???", dosizeonly); /* Routine Begin PD Address */ totsize += write_debug_addr (rtnname, "main procedure descriptor", dosizeonly); totsize += write_debug_data1 (strlen (go), "length of main_name", dosizeonly); totsize += write_debug_string ((char *) go, "main name", dosizeonly); } /* The header length never includes the length byte. */ rtnbeg.dst_a_rtnbeg_header.dst__header_length.dst_w_length = DST_K_RTNBEG_SIZE + rtnnamelen - 1; rtnbeg.dst_a_rtnbeg_header.dst__header_type.dst_w_type = DST_K_RTNBEG; rtnbeg.dst_b_rtnbeg_flags.dst_v_rtnbeg_unused = 0; rtnbeg.dst_b_rtnbeg_flags.dst_v_rtnbeg_unalloc = 0; rtnbeg.dst_b_rtnbeg_flags.dst_v_rtnbeg_prototype = 0; rtnbeg.dst_b_rtnbeg_flags.dst_v_rtnbeg_inlined = 0; rtnbeg.dst_b_rtnbeg_flags.dst_v_rtnbeg_no_call = 1; rtnbeg.dst_b_rtnbeg_name = rtnnamelen; totsize += write_debug_header (&rtnbeg.dst_a_rtnbeg_header, "rtnbeg", dosizeonly); totsize += write_debug_data1 (*((char *) &rtnbeg.dst_b_rtnbeg_flags), "flags", dosizeonly); /* Routine Begin Address */ totsize += write_debug_addr (rtnentryname, "routine entry name", dosizeonly); /* Routine Begin PD Address */ totsize += write_debug_addr (rtnname, "routine procedure descriptor", dosizeonly); /* Routine Begin Name */ totsize += write_debug_data1 (rtnbeg.dst_b_rtnbeg_name, "length of routine name", dosizeonly); totsize += write_debug_string (rtnname, "routine name", dosizeonly); free (rtnentryname); if (debug_info_level > DINFO_LEVEL_TERSE) { prolog.dst_a_prolog_header.dst__header_length.dst_w_length = DST_K_PROLOG_SIZE - 1; prolog.dst_a_prolog_header.dst__header_type.dst_w_type = DST_K_PROLOG; totsize += write_debug_header (&prolog.dst_a_prolog_header, "prolog", dosizeonly); ASM_GENERATE_INTERNAL_LABEL (label, FUNC_PROLOG_LABEL, fde->funcdef_number); totsize += write_debug_addr (label, "prolog breakpoint addr", dosizeonly); } return totsize; } /* Output a routine end trailer for routine RTNNUM and return the header size. Just return the size if DOSIZEONLY is nonzero. */ static int write_rtnend (int rtnnum, int dosizeonly) { DST_ROUTINE_END rtnend; char label1[MAX_ARTIFICIAL_LABEL_BYTES]; char label2[MAX_ARTIFICIAL_LABEL_BYTES]; int totsize; vms_func_ref fde = &func_table[rtnnum]; int corrected_rtnnum = fde->funcdef_number; totsize = 0; rtnend.dst_a_rtnend_header.dst__header_length.dst_w_length = DST_K_RTNEND_SIZE - 1; rtnend.dst_a_rtnend_header.dst__header_type.dst_w_type = DST_K_RTNEND; rtnend.dst_b_rtnend_unused = 0; rtnend.dst_l_rtnend_size = 0; /* Calculated below. */ totsize += write_debug_header (&rtnend.dst_a_rtnend_header, "rtnend", dosizeonly); totsize += write_debug_data1 (rtnend.dst_b_rtnend_unused, "unused", dosizeonly); ASM_GENERATE_INTERNAL_LABEL (label1, FUNC_BEGIN_LABEL, corrected_rtnnum); ASM_GENERATE_INTERNAL_LABEL (label2, FUNC_END_LABEL, corrected_rtnnum); totsize += write_debug_delta4 (label2, label1, "routine size", dosizeonly); return totsize; } #define K_DELTA_PC(I) \ ((I) < 128 ? -(I) : (I) < 65536 ? DST_K_DELTA_PC_W : DST_K_DELTA_PC_L) #define K_SET_LINUM(I) \ ((I) < 256 ? DST_K_SET_LINUM_B \ : (I) < 65536 ? DST_K_SET_LINUM : DST_K_SET_LINUM_L) #define K_INCR_LINUM(I) \ ((I) < 256 ? DST_K_INCR_LINUM \ : (I) < 65536 ? DST_K_INCR_LINUM_W : DST_K_INCR_LINUM_L) /* Output the PC to line number correlations and return the size. Just return the size if DOSIZEONLY is nonzero */ static int write_pclines (int dosizeonly) { unsigned i; int fn; int ln, lastln; int linestart = 0; int max_line; DST_LINE_NUM_HEADER line_num; DST_PCLINE_COMMANDS pcline; char label[MAX_ARTIFICIAL_LABEL_BYTES]; char lastlabel[MAX_ARTIFICIAL_LABEL_BYTES]; int totsize = 0; char buff[256]; max_line = file_info_table[1].max_line; file_info_table[1].listing_line_start = linestart; linestart = linestart + ((max_line / 100000) + 1) * 100000; for (i = 2; i < file_info_table_in_use; i++) { max_line = file_info_table[i].max_line; file_info_table[i].listing_line_start = linestart; linestart = linestart + ((max_line / 10000) + 1) * 10000; } /* Set starting address to beginning of text section. */ line_num.dst_a_line_num_header.dst__header_length.dst_w_length = 8; line_num.dst_a_line_num_header.dst__header_type.dst_w_type = DST_K_LINE_NUM; pcline.dst_b_pcline_command = DST_K_SET_ABS_PC; totsize += write_debug_header (&line_num.dst_a_line_num_header, "line_num", dosizeonly); totsize += write_debug_data1 (pcline.dst_b_pcline_command, "line_num (SET ABS PC)", dosizeonly); if (dosizeonly) totsize += 4; else { ASM_OUTPUT_DEBUG_ADDR (asm_out_file, TEXT_SECTION_ASM_OP); if (flag_verbose_asm) fprintf (asm_out_file, "\t%s line_num", ASM_COMMENT_START); fputc ('\n', asm_out_file); } fn = line_info_table[1].dst_file_num; ln = (file_info_table[fn].listing_line_start + line_info_table[1].dst_line_num); line_num.dst_a_line_num_header.dst__header_length.dst_w_length = 4 + 4; pcline.dst_b_pcline_command = DST_K_SET_LINUM_L; totsize += write_debug_header (&line_num.dst_a_line_num_header, "line_num", dosizeonly); totsize += write_debug_data1 (pcline.dst_b_pcline_command, "line_num (SET LINUM LONG)", dosizeonly); sprintf (buff, "line_num (%d)", ln ? ln - 1 : 0); totsize += write_debug_data4 (ln ? ln - 1 : 0, buff, dosizeonly); lastln = ln; strcpy (lastlabel, TEXT_SECTION_ASM_OP); for (i = 1; i < line_info_table_in_use; i++) { int extrabytes; fn = line_info_table[i].dst_file_num; ln = (file_info_table[fn].listing_line_start + line_info_table[i].dst_line_num); if (ln - lastln > 1) extrabytes = 5; /* NUMBYTES (ln - lastln - 1) + 1; */ else if (ln <= lastln) extrabytes = 5; /* NUMBYTES (ln - 1) + 1; */ else extrabytes = 0; line_num.dst_a_line_num_header.dst__header_length.dst_w_length = 8 + extrabytes; totsize += write_debug_header (&line_num.dst_a_line_num_header, "line_num", dosizeonly); if (ln - lastln > 1) { int lndif = ln - lastln - 1; /* K_INCR_LINUM (lndif); */ pcline.dst_b_pcline_command = DST_K_INCR_LINUM_L; totsize += write_debug_data1 (pcline.dst_b_pcline_command, "line_num (INCR LINUM LONG)", dosizeonly); sprintf (buff, "line_num (%d)", lndif); totsize += write_debug_data4 (lndif, buff, dosizeonly); } else if (ln <= lastln) { /* K_SET_LINUM (ln-1); */ pcline.dst_b_pcline_command = DST_K_SET_LINUM_L; totsize += write_debug_data1 (pcline.dst_b_pcline_command, "line_num (SET LINUM LONG)", dosizeonly); sprintf (buff, "line_num (%d)", ln - 1); totsize += write_debug_data4 (ln - 1, buff, dosizeonly); } pcline.dst_b_pcline_command = DST_K_DELTA_PC_L; totsize += write_debug_data1 (pcline.dst_b_pcline_command, "line_num (DELTA PC LONG)", dosizeonly); ASM_GENERATE_INTERNAL_LABEL (label, LINE_CODE_LABEL, i); totsize += write_debug_delta4 (label, lastlabel, "increment line_num", dosizeonly); lastln = ln; strcpy (lastlabel, label); } return totsize; } /* Output a source correlation for file FILEID using information saved in FILE_INFO_ENTRY and return the size. Just return the size if DOSIZEONLY is nonzero. */ static int write_srccorr (int fileid, dst_file_info_entry file_info_entry, int dosizeonly) { int src_command_size; int linesleft = file_info_entry.max_line; int linestart = file_info_entry.listing_line_start; int flen = file_info_entry.flen; int linestodo = 0; DST_SOURCE_CORR src_header; DST_SRC_COMMAND src_command; DST_SRC_COMMAND src_command_sf; DST_SRC_COMMAND src_command_sl; DST_SRC_COMMAND src_command_sr; DST_SRC_COMMAND src_command_dl; DST_SRC_CMDTRLR src_cmdtrlr; char buff[256]; int totsize = 0; if (fileid == 1) { src_header.dst_a_source_corr_header.dst__header_length.dst_w_length = DST_K_SOURCE_CORR_HEADER_SIZE + 1 - 1; src_header.dst_a_source_corr_header.dst__header_type.dst_w_type = DST_K_SOURCE; src_command.dst_b_src_command = DST_K_SRC_FORMFEED; totsize += write_debug_header (&src_header.dst_a_source_corr_header, "source corr", dosizeonly); totsize += write_debug_data1 (src_command.dst_b_src_command, "source_corr (SRC FORMFEED)", dosizeonly); } src_command_size = DST_K_SRC_COMMAND_SIZE + flen + DST_K_SRC_CMDTRLR_SIZE; src_command.dst_b_src_command = DST_K_SRC_DECLFILE; src_command.dst_a_src_cmd_fields.dst_a_src_decl_src.dst_b_src_df_length = src_command_size - 2; src_command.dst_a_src_cmd_fields.dst_a_src_decl_src.dst_b_src_df_flags = 0; src_command.dst_a_src_cmd_fields.dst_a_src_decl_src.dst_w_src_df_fileid = fileid; src_command.dst_a_src_cmd_fields.dst_a_src_decl_src.dst_q_src_df_rms_cdt = file_info_entry.cdt; src_command.dst_a_src_cmd_fields.dst_a_src_decl_src.dst_l_src_df_rms_ebk = file_info_entry.ebk; src_command.dst_a_src_cmd_fields.dst_a_src_decl_src.dst_w_src_df_rms_ffb = file_info_entry.ffb; src_command.dst_a_src_cmd_fields.dst_a_src_decl_src.dst_b_src_df_rms_rfo = file_info_entry.rfo; src_command.dst_a_src_cmd_fields.dst_a_src_decl_src.dst_b_src_df_filename = file_info_entry.flen; src_header.dst_a_source_corr_header.dst__header_length.dst_w_length = DST_K_SOURCE_CORR_HEADER_SIZE + src_command_size - 1; src_header.dst_a_source_corr_header.dst__header_type.dst_w_type = DST_K_SOURCE; src_cmdtrlr.dst_b_src_df_libmodname = 0; totsize += write_debug_header (&src_header.dst_a_source_corr_header, "source corr", dosizeonly); totsize += write_debug_data1 (src_command.dst_b_src_command, "source_corr (DECL SRC FILE)", dosizeonly); totsize += write_debug_data1 (src_command.dst_a_src_cmd_fields.dst_a_src_decl_src.dst_b_src_df_length, "source_corr (length)", dosizeonly); totsize += write_debug_data1 (src_command.dst_a_src_cmd_fields.dst_a_src_decl_src.dst_b_src_df_flags, "source_corr (flags)", dosizeonly); totsize += write_debug_data2 (src_command.dst_a_src_cmd_fields.dst_a_src_decl_src.dst_w_src_df_fileid, "source_corr (fileid)", dosizeonly); totsize += write_debug_data8 (src_command.dst_a_src_cmd_fields.dst_a_src_decl_src.dst_q_src_df_rms_cdt, "source_corr (creation date)", dosizeonly); totsize += write_debug_data4 (src_command.dst_a_src_cmd_fields.dst_a_src_decl_src.dst_l_src_df_rms_ebk, "source_corr (EOF block number)", dosizeonly); totsize += write_debug_data2 (src_command.dst_a_src_cmd_fields.dst_a_src_decl_src.dst_w_src_df_rms_ffb, "source_corr (first free byte)", dosizeonly); totsize += write_debug_data1 (src_command.dst_a_src_cmd_fields.dst_a_src_decl_src.dst_b_src_df_rms_rfo, "source_corr (record and file organization)", dosizeonly); totsize += write_debug_data1 (src_command.dst_a_src_cmd_fields.dst_a_src_decl_src.dst_b_src_df_filename, "source_corr (filename length)", dosizeonly); totsize += write_debug_string (file_info_entry.file_name, "source file name", dosizeonly); totsize += write_debug_data1 (src_cmdtrlr.dst_b_src_df_libmodname, "source_corr (libmodname)", dosizeonly); src_command_sf.dst_b_src_command = DST_K_SRC_SETFILE; src_command_sf.dst_a_src_cmd_fields.dst_w_src_unsword = fileid; src_command_sr.dst_b_src_command = DST_K_SRC_SETREC_W; src_command_sr.dst_a_src_cmd_fields.dst_w_src_unsword = 1; src_command_sl.dst_b_src_command = DST_K_SRC_SETLNUM_L; src_command_sl.dst_a_src_cmd_fields.dst_l_src_unslong = linestart + 1; src_command_dl.dst_b_src_command = DST_K_SRC_DEFLINES_W; if (linesleft > 65534) linesleft = linesleft - 65534, linestodo = 65534; else linestodo = linesleft, linesleft = 0; src_command_dl.dst_a_src_cmd_fields.dst_w_src_unsword = linestodo; src_header.dst_a_source_corr_header.dst__header_length.dst_w_length = DST_K_SOURCE_CORR_HEADER_SIZE + 3 + 3 + 5 + 3 - 1; src_header.dst_a_source_corr_header.dst__header_type.dst_w_type = DST_K_SOURCE; if (src_command_dl.dst_a_src_cmd_fields.dst_w_src_unsword) { totsize += write_debug_header (&src_header.dst_a_source_corr_header, "source corr", dosizeonly); totsize += write_debug_data1 (src_command_sf.dst_b_src_command, "source_corr (src setfile)", dosizeonly); totsize += write_debug_data2 (src_command_sf.dst_a_src_cmd_fields.dst_w_src_unsword, "source_corr (fileid)", dosizeonly); totsize += write_debug_data1 (src_command_sr.dst_b_src_command, "source_corr (setrec)", dosizeonly); totsize += write_debug_data2 (src_command_sr.dst_a_src_cmd_fields.dst_w_src_unsword, "source_corr (recnum)", dosizeonly); totsize += write_debug_data1 (src_command_sl.dst_b_src_command, "source_corr (setlnum)", dosizeonly); totsize += write_debug_data4 (src_command_sl.dst_a_src_cmd_fields.dst_l_src_unslong, "source_corr (linenum)", dosizeonly); totsize += write_debug_data1 (src_command_dl.dst_b_src_command, "source_corr (deflines)", dosizeonly); sprintf (buff, "source_corr (%d)", src_command_dl.dst_a_src_cmd_fields.dst_w_src_unsword); totsize += write_debug_data2 (src_command_dl.dst_a_src_cmd_fields.dst_w_src_unsword, buff, dosizeonly); while (linesleft > 0) { src_header.dst_a_source_corr_header.dst__header_length.dst_w_length = DST_K_SOURCE_CORR_HEADER_SIZE + 3 - 1; src_header.dst_a_source_corr_header.dst__header_type.dst_w_type = DST_K_SOURCE; src_command_dl.dst_b_src_command = DST_K_SRC_DEFLINES_W; if (linesleft > 65534) linesleft = linesleft - 65534, linestodo = 65534; else linestodo = linesleft, linesleft = 0; src_command_dl.dst_a_src_cmd_fields.dst_w_src_unsword = linestodo; totsize += write_debug_header (&src_header.dst_a_source_corr_header, "source corr", dosizeonly); totsize += write_debug_data1 (src_command_dl.dst_b_src_command, "source_corr (deflines)", dosizeonly); sprintf (buff, "source_corr (%d)", src_command_dl.dst_a_src_cmd_fields.dst_w_src_unsword); totsize += write_debug_data2 (src_command_dl.dst_a_src_cmd_fields.dst_w_src_unsword, buff, dosizeonly); } } return totsize; } /* Output all the source correlation entries and return the size. Just return the size if DOSIZEONLY is nonzero. */ static int write_srccorrs (int dosizeonly) { unsigned int i; int totsize = 0; for (i = 1; i < file_info_table_in_use; i++) totsize += write_srccorr (i, file_info_table[i], dosizeonly); return totsize; } /* Output a marker (i.e. a label) for the beginning of a function, before the prologue. */ static void vmsdbgout_begin_prologue (unsigned int line, const char *file) { char label[MAX_ARTIFICIAL_LABEL_BYTES]; if (write_symbols == VMS_AND_DWARF2_DEBUG) (*dwarf2_debug_hooks.begin_prologue) (line, file); if (debug_info_level > DINFO_LEVEL_NONE) { ASM_GENERATE_INTERNAL_LABEL (label, FUNC_BEGIN_LABEL, current_function_funcdef_no); ASM_OUTPUT_LABEL (asm_out_file, label); } } /* Output a marker (i.e. a label) for the beginning of a function, after the prologue. */ static void vmsdbgout_end_prologue (unsigned int line, const char *file) { char label[MAX_ARTIFICIAL_LABEL_BYTES]; if (write_symbols == VMS_AND_DWARF2_DEBUG) (*dwarf2_debug_hooks.end_prologue) (line, file); if (debug_info_level > DINFO_LEVEL_TERSE) { ASM_GENERATE_INTERNAL_LABEL (label, FUNC_PROLOG_LABEL, current_function_funcdef_no); ASM_OUTPUT_LABEL (asm_out_file, label); /* VMS PCA expects every PC range to correlate to some line and file. */ vmsdbgout_source_line (line, file); } } /* No output for VMS debug, but make obligatory call to Dwarf2 debug */ static void vmsdbgout_end_function (unsigned int line) { if (write_symbols == VMS_AND_DWARF2_DEBUG) (*dwarf2_debug_hooks.end_function) (line); } /* Output a marker (i.e. a label) for the absolute end of the generated code for a function definition. This gets called *after* the epilogue code has been generated. */ static void vmsdbgout_end_epilogue (unsigned int line, const char *file) { char label[MAX_ARTIFICIAL_LABEL_BYTES]; if (write_symbols == VMS_AND_DWARF2_DEBUG) (*dwarf2_debug_hooks.end_epilogue) (line, file); if (debug_info_level > DINFO_LEVEL_NONE) { /* Output a label to mark the endpoint of the code generated for this function. */ ASM_GENERATE_INTERNAL_LABEL (label, FUNC_END_LABEL, current_function_funcdef_no); ASM_OUTPUT_LABEL (asm_out_file, label); /* VMS PCA expects every PC range to correlate to some line and file. */ vmsdbgout_source_line (line, file); } } /* Output a marker (i.e. a label) for the beginning of the generated code for a lexical block. */ static void vmsdbgout_begin_block (register unsigned line, register unsigned blocknum) { if (write_symbols == VMS_AND_DWARF2_DEBUG) (*dwarf2_debug_hooks.begin_block) (line, blocknum); if (debug_info_level > DINFO_LEVEL_TERSE) targetm.asm_out.internal_label (asm_out_file, BLOCK_BEGIN_LABEL, blocknum); } /* Output a marker (i.e. a label) for the end of the generated code for a lexical block. */ static void vmsdbgout_end_block (register unsigned line, register unsigned blocknum) { if (write_symbols == VMS_AND_DWARF2_DEBUG) (*dwarf2_debug_hooks.end_block) (line, blocknum); if (debug_info_level > DINFO_LEVEL_TERSE) targetm.asm_out.internal_label (asm_out_file, BLOCK_END_LABEL, blocknum); } /* Not implemented in VMS Debug. */ static bool vmsdbgout_ignore_block (tree block) { bool retval = 0; if (write_symbols == VMS_AND_DWARF2_DEBUG) retval = (*dwarf2_debug_hooks.ignore_block) (block); return retval; } /* Add an entry for function DECL into the func_table. */ static void vmsdbgout_begin_function (tree decl) { const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0); vms_func_ref fde; if (write_symbols == VMS_AND_DWARF2_DEBUG) (*dwarf2_debug_hooks.begin_function) (decl); if (func_table_in_use == func_table_allocated) { func_table_allocated += FUNC_TABLE_INCREMENT; func_table = (vms_func_ref) xrealloc (func_table, func_table_allocated * sizeof (vms_func_node)); } /* Add the new entry to the end of the function name table. */ fde = &func_table[func_table_in_use++]; fde->vms_func_name = xstrdup (name); fde->funcdef_number = current_function_funcdef_no; } static char fullname_buff [4096]; /* Return the full file specification for FILENAME. The specification must be in VMS syntax in order to be processed by VMS Debug. */ static char * full_name (const char *filename) { #ifdef VMS FILE *fp = fopen (filename, "r"); fgetname (fp, fullname_buff, 1); fclose (fp); #else getcwd (fullname_buff, sizeof (fullname_buff)); strcat (fullname_buff, "/"); strcat (fullname_buff, filename); /* ??? Insert hairy code here to translate Unix style file specification to VMS style. */ #endif return fullname_buff; } /* Lookup a filename (in the list of filenames that we know about here in vmsdbgout.c) and return its "index". The index of each (known) filename is just a unique number which is associated with only that one filename. We need such numbers for the sake of generating labels and references to those files numbers. If the filename given as an argument is not found in our current list, add it to the list and assign it the next available unique index number. In order to speed up searches, we remember the index of the filename was looked up last. This handles the majority of all searches. */ static unsigned int lookup_filename (const char *file_name) { static unsigned int last_file_lookup_index = 0; register char *fn; register unsigned i; char *fnam; long long cdt; long ebk; short ffb; char rfo; char flen; struct stat statbuf; if (stat (file_name, &statbuf) == 0) { long gmtoff; #ifdef VMS struct tm *ts; /* Adjust for GMT. */ ts = (struct tm *) localtime (&statbuf.st_ctime); gmtoff = ts->tm_gmtoff; /* VMS has multiple file format types. */ rfo = statbuf.st_fab_rfm; #else /* Is GMT adjustment an issue with a cross-compiler? */ gmtoff = 0; /* Assume stream LF type file. */ rfo = 2; #endif cdt = 10000000 * (statbuf.st_ctime + gmtoff + vms_epoch_offset); ebk = statbuf.st_size / 512 + 1; ffb = statbuf.st_size - ((statbuf.st_size / 512) * 512); fnam = full_name (file_name); flen = strlen (fnam); } else { cdt = 0; ebk = 0; ffb = 0; rfo = 0; fnam = (char *) ""; flen = 0; } /* Check to see if the file name that was searched on the previous call matches this file name. If so, return the index. */ if (last_file_lookup_index != 0) { fn = file_info_table[last_file_lookup_index].file_name; if (strcmp (fnam, fn) == 0) return last_file_lookup_index; } /* Didn't match the previous lookup, search the table */ for (i = 1; i < file_info_table_in_use; ++i) { fn = file_info_table[i].file_name; if (strcmp (fnam, fn) == 0) { last_file_lookup_index = i; return i; } } /* Prepare to add a new table entry by making sure there is enough space in the table to do so. If not, expand the current table. */ if (file_info_table_in_use == file_info_table_allocated) { file_info_table_allocated += FILE_TABLE_INCREMENT; file_info_table = xrealloc (file_info_table, (file_info_table_allocated * sizeof (dst_file_info_entry))); } /* Add the new entry to the end of the filename table. */ file_info_table[file_info_table_in_use].file_name = xstrdup (fnam); file_info_table[file_info_table_in_use].max_line = 0; file_info_table[file_info_table_in_use].cdt = cdt; file_info_table[file_info_table_in_use].ebk = ebk; file_info_table[file_info_table_in_use].ffb = ffb; file_info_table[file_info_table_in_use].rfo = rfo; file_info_table[file_info_table_in_use].flen = flen; last_file_lookup_index = file_info_table_in_use++; return last_file_lookup_index; } /* Output a label to mark the beginning of a source code line entry and record information relating to this source line, in 'line_info_table' for later output of the .debug_line section. */ static void vmsdbgout_source_line (register unsigned line, register const char *filename) { if (write_symbols == VMS_AND_DWARF2_DEBUG) (*dwarf2_debug_hooks.source_line) (line, filename); if (debug_info_level >= DINFO_LEVEL_TERSE) { dst_line_info_ref line_info; targetm.asm_out.internal_label (asm_out_file, LINE_CODE_LABEL, line_info_table_in_use); /* Expand the line info table if necessary. */ if (line_info_table_in_use == line_info_table_allocated) { line_info_table_allocated += LINE_INFO_TABLE_INCREMENT; line_info_table = xrealloc (line_info_table, (line_info_table_allocated * sizeof (dst_line_info_entry))); } /* Add the new entry at the end of the line_info_table. */ line_info = &line_info_table[line_info_table_in_use++]; line_info->dst_file_num = lookup_filename (filename); line_info->dst_line_num = line; if (line > file_info_table[line_info->dst_file_num].max_line) file_info_table[line_info->dst_file_num].max_line = line; } } /* Record the beginning of a new source file, for later output. At present, unimplemented. */ static void vmsdbgout_start_source_file (unsigned int lineno, const char *filename) { if (write_symbols == VMS_AND_DWARF2_DEBUG) (*dwarf2_debug_hooks.start_source_file) (lineno, filename); } /* Record the end of a source file, for later output. At present, unimplemented. */ static void vmsdbgout_end_source_file (unsigned int lineno ATTRIBUTE_UNUSED) { if (write_symbols == VMS_AND_DWARF2_DEBUG) (*dwarf2_debug_hooks.end_source_file) (lineno); } /* Set up for Debug output at the start of compilation. */ static void vmsdbgout_init (const char *main_input_filename) { const char *language_string = lang_hooks.name; if (write_symbols == VMS_AND_DWARF2_DEBUG) (*dwarf2_debug_hooks.init) (main_input_filename); if (debug_info_level == DINFO_LEVEL_NONE) return; /* Remember the name of the primary input file. */ primary_filename = main_input_filename; /* Allocate the initial hunk of the file_info_table. */ file_info_table = xcalloc (FILE_TABLE_INCREMENT, sizeof (dst_file_info_entry)); file_info_table_allocated = FILE_TABLE_INCREMENT; /* Skip the first entry - file numbers begin at 1 */ file_info_table_in_use = 1; func_table = (vms_func_ref) xcalloc (FUNC_TABLE_INCREMENT, sizeof (vms_func_node)); func_table_allocated = FUNC_TABLE_INCREMENT; func_table_in_use = 1; /* Allocate the initial hunk of the line_info_table. */ line_info_table = xcalloc (LINE_INFO_TABLE_INCREMENT, sizeof (dst_line_info_entry)); line_info_table_allocated = LINE_INFO_TABLE_INCREMENT; /* zero-th entry is allocated, but unused */ line_info_table_in_use = 1; lookup_filename (primary_filename); if (!strcmp (language_string, "GNU C")) module_language = DST_K_C; else if (!strcmp (language_string, "GNU C++")) module_language = DST_K_CXX; else if (!strcmp (language_string, "GNU Ada")) module_language = DST_K_ADA; else if (!strcmp (language_string, "GNU F77")) module_language = DST_K_FORTRAN; else module_language = DST_K_UNKNOWN; module_producer = concat (language_string, " ", version_string, NULL); ASM_GENERATE_INTERNAL_LABEL (text_end_label, TEXT_END_LABEL, 0); } /* Not implemented in VMS Debug. */ static void vmsdbgout_define (unsigned int lineno, const char *buffer) { if (write_symbols == VMS_AND_DWARF2_DEBUG) (*dwarf2_debug_hooks.define) (lineno, buffer); } /* Not implemented in VMS Debug. */ static void vmsdbgout_undef (unsigned int lineno, const char *buffer) { if (write_symbols == VMS_AND_DWARF2_DEBUG) (*dwarf2_debug_hooks.undef) (lineno, buffer); } /* Not implemented in VMS Debug. */ static void vmsdbgout_decl (tree decl) { if (write_symbols == VMS_AND_DWARF2_DEBUG) (*dwarf2_debug_hooks.function_decl) (decl); } /* Not implemented in VMS Debug. */ static void vmsdbgout_global_decl (tree decl) { if (write_symbols == VMS_AND_DWARF2_DEBUG) (*dwarf2_debug_hooks.global_decl) (decl); } /* Not implemented in VMS Debug. */ static void vmsdbgout_abstract_function (tree decl) { if (write_symbols == VMS_AND_DWARF2_DEBUG) (*dwarf2_debug_hooks.outlining_inline_function) (decl); } /* Output stuff that Debug requires at the end of every file and generate the VMS Debug debugging info. */ static void vmsdbgout_finish (const char *main_input_filename ATTRIBUTE_UNUSED) { unsigned int i; int totsize; if (write_symbols == VMS_AND_DWARF2_DEBUG) (*dwarf2_debug_hooks.finish) (main_input_filename); if (debug_info_level == DINFO_LEVEL_NONE) return; /* Output a terminator label for the .text section. */ text_section (); targetm.asm_out.internal_label (asm_out_file, TEXT_END_LABEL, 0); /* Output debugging information. Warning! Do not change the name of the .vmsdebug section without changing it in the assembler also. */ named_section (NULL_TREE, ".vmsdebug", 0); ASM_OUTPUT_ALIGN (asm_out_file, 0); totsize = write_modbeg (1); for (i = 1; i < func_table_in_use; i++) { totsize += write_rtnbeg (i, 1); totsize += write_rtnend (i, 1); } totsize += write_pclines (1); write_modbeg (0); for (i = 1; i < func_table_in_use; i++) { write_rtnbeg (i, 0); write_rtnend (i, 0); } write_pclines (0); if (debug_info_level > DINFO_LEVEL_TERSE) { totsize = write_srccorrs (1); write_srccorrs (0); } totsize = write_modend (1); write_modend (0); } #endif /* VMS_DEBUGGING_INFO */ /* Output xcoff-format symbol table information from GNU compiler. Copyright (C) 1992, 1994, 1995, 1997, 1998, 1999, 2000, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Output xcoff-format symbol table data. The main functionality is contained in dbxout.c. This file implements the sdbout-like parts of the xcoff interface. Many functions are very similar to their counterparts in sdbout.c. */ #ifdef XCOFF_DEBUGGING_INFO /* This defines the C_* storage classes. */ /* Storage classes in XCOFF object file format designed for DBX's use. This info is from the `Files Reference' manual for IBM's AIX version 3 for the RS6000. */ #define C_GSYM 0x80 #define C_LSYM 0x81 #define C_PSYM 0x82 #define C_RSYM 0x83 #define C_RPSYM 0x84 #define C_STSYM 0x85 #define C_BCOMM 0x87 #define C_ECOML 0x88 #define C_ECOMM 0x89 #define C_DECL 0x8c #define C_ENTRY 0x8d #define C_FUN 0x8e /* Line number of beginning of current function, minus one. Negative means not in a function or not using xcoff. */ static int xcoff_begin_function_line = -1; static int xcoff_inlining = 0; /* Name of the current include file. */ const char *xcoff_current_include_file; /* Name of the current function file. This is the file the `.bf' is emitted from. In case a line is emitted from a different file, (by including that file of course), then the line number will be absolute. */ static const char *xcoff_current_function_file; /* Names of bss and data sections. These should be unique names for each compilation unit. */ char *xcoff_bss_section_name; char *xcoff_private_data_section_name; char *xcoff_read_only_section_name; /* Last source file name mentioned in a NOTE insn. */ const char *xcoff_lastfile; /* Macro definitions used below. */ #define ABS_OR_RELATIVE_LINENO(LINENO) \ ((xcoff_inlining) ? (LINENO) : (LINENO) - xcoff_begin_function_line) /* Output source line numbers via ".line" rather than ".stabd". */ #define ASM_OUTPUT_SOURCE_LINE(FILE,LINENUM,COUNTER) \ do \ { \ if (xcoff_begin_function_line >= 0) \ fprintf (FILE, "\t.line\t%d\n", ABS_OR_RELATIVE_LINENO (LINENUM)); \ } \ while (0) #define ASM_OUTPUT_LFB(FILE,LINENUM) \ { \ if (xcoff_begin_function_line == -1) \ { \ xcoff_begin_function_line = (LINENUM) - 1;\ fprintf (FILE, "\t.bf\t%d\n", (LINENUM)); \ } \ xcoff_current_function_file \ = (xcoff_current_include_file \ ? xcoff_current_include_file : main_input_filename); \ } #define ASM_OUTPUT_LFE(FILE,LINENUM) \ do \ { \ fprintf (FILE, "\t.ef\t%d\n", (LINENUM)); \ xcoff_begin_function_line = -1; \ } \ while (0) #define ASM_OUTPUT_LBB(FILE,LINENUM,BLOCKNUM) \ fprintf (FILE, "\t.bb\t%d\n", ABS_OR_RELATIVE_LINENO (LINENUM)) #define ASM_OUTPUT_LBE(FILE,LINENUM,BLOCKNUM) \ fprintf (FILE, "\t.eb\t%d\n", ABS_OR_RELATIVE_LINENO (LINENUM)) static void xcoffout_block (tree, int, tree); static void xcoffout_source_file (FILE *, const char *, int); /* Support routines for XCOFF debugging info. */ struct xcoff_type_number { const char *name; int number; }; static const struct xcoff_type_number xcoff_type_numbers[] = { { "int", -1 }, { "char", -2 }, { "short int", -3 }, { "long int", -4 }, /* fiddled to -31 if 64 bits */ { "unsigned char", -5 }, { "signed char", -6 }, { "short unsigned int", -7 }, { "unsigned int", -8 }, /* No such type "unsigned". */ { "long unsigned int", -10 }, /* fiddled to -32 if 64 bits */ { "void", -11 }, { "float", -12 }, { "double", -13 }, { "long double", -14 }, /* Pascal and Fortran types run from -15 to -29. */ { "wchar", -30 }, /* XXX Should be "wchar_t" ? */ { "long long int", -31 }, { "long long unsigned int", -32 }, /* Additional Fortran types run from -33 to -37. */ /* ??? Should also handle built-in C++ and Obj-C types. There perhaps aren't any that C doesn't already have. */ }; /* Returns an XCOFF fundamental type number for DECL (assumed to be a TYPE_DECL), or 0 if dbxout.c should assign a type number normally. */ int xcoff_assign_fundamental_type_number (tree decl) { const char *name; size_t i; /* Do not waste time searching the list for non-intrinsic types. */ if (DECL_NAME (decl) == 0 || ! DECL_IS_BUILTIN (decl)) return 0; name = IDENTIFIER_POINTER (DECL_NAME (decl)); /* Linear search, blech, but the list is too small to bother doing anything else. */ for (i = 0; i < ARRAY_SIZE (xcoff_type_numbers); i++) if (!strcmp (xcoff_type_numbers[i].name, name)) goto found; return 0; found: /* -4 and -10 should be replaced with -31 and -32, respectively, when used for a 64-bit type. */ if (int_size_in_bytes (TREE_TYPE (decl)) == 8) { if (xcoff_type_numbers[i].number == -4) return -31; if (xcoff_type_numbers[i].number == -10) return -32; } return xcoff_type_numbers[i].number; } /* Print an error message for unrecognized stab codes. */ #define UNKNOWN_STAB(STR) \ internal_error ("no sclass for %s stab (0x%x)\n", STR, stab) /* Conversion routine from BSD stabs to AIX storage classes. */ int stab_to_sclass (int stab) { switch (stab) { case N_GSYM: return C_GSYM; case N_FNAME: UNKNOWN_STAB ("N_FNAME"); case N_FUN: return C_FUN; case N_STSYM: case N_LCSYM: return C_STSYM; case N_MAIN: UNKNOWN_STAB ("N_MAIN"); case N_RSYM: return C_RSYM; case N_SSYM: UNKNOWN_STAB ("N_SSYM"); case N_RPSYM: return C_RPSYM; case N_PSYM: return C_PSYM; case N_LSYM: return C_LSYM; case N_DECL: return C_DECL; case N_ENTRY: return C_ENTRY; case N_SO: UNKNOWN_STAB ("N_SO"); case N_SOL: UNKNOWN_STAB ("N_SOL"); case N_SLINE: UNKNOWN_STAB ("N_SLINE"); case N_DSLINE: UNKNOWN_STAB ("N_DSLINE"); case N_BSLINE: UNKNOWN_STAB ("N_BSLINE"); case N_BINCL: UNKNOWN_STAB ("N_BINCL"); case N_EINCL: UNKNOWN_STAB ("N_EINCL"); case N_EXCL: UNKNOWN_STAB ("N_EXCL"); case N_LBRAC: UNKNOWN_STAB ("N_LBRAC"); case N_RBRAC: UNKNOWN_STAB ("N_RBRAC"); case N_BCOMM: return C_BCOMM; case N_ECOMM: return C_ECOMM; case N_ECOML: return C_ECOML; case N_LENG: UNKNOWN_STAB ("N_LENG"); case N_PC: UNKNOWN_STAB ("N_PC"); case N_M2C: UNKNOWN_STAB ("N_M2C"); case N_SCOPE: UNKNOWN_STAB ("N_SCOPE"); case N_CATCH: UNKNOWN_STAB ("N_CATCH"); case N_OPT: UNKNOWN_STAB ("N_OPT"); default: UNKNOWN_STAB ("?"); } } /* Output debugging info to FILE to switch to sourcefile FILENAME. INLINE_P is true if this is from an inlined function. */ static void xcoffout_source_file (FILE *file, const char *filename, int inline_p) { if (filename && (xcoff_lastfile == 0 || strcmp (filename, xcoff_lastfile) || (inline_p && ! xcoff_inlining) || (! inline_p && xcoff_inlining))) { if (xcoff_current_include_file) { fprintf (file, "\t.ei\t"); output_quoted_string (file, xcoff_current_include_file); fprintf (file, "\n"); xcoff_current_include_file = NULL; } xcoff_inlining = inline_p; if (strcmp (main_input_filename, filename) || inline_p) { fprintf (file, "\t.bi\t"); output_quoted_string (file, filename); fprintf (file, "\n"); xcoff_current_include_file = filename; } xcoff_lastfile = filename; } } /* Output a line number symbol entry for location (FILENAME, LINE). */ void xcoffout_source_line (unsigned int line, const char *filename) { bool inline_p = (strcmp (xcoff_current_function_file, filename) != 0 || (int) line < xcoff_begin_function_line); xcoffout_source_file (asm_out_file, filename, inline_p); ASM_OUTPUT_SOURCE_LINE (asm_out_file, line, 0); } /* Output the symbols defined in block number DO_BLOCK. This function works by walking the tree structure of blocks, counting blocks until it finds the desired block. */ static int do_block = 0; static void xcoffout_block (tree block, int depth, tree args) { while (block) { /* Ignore blocks never expanded or otherwise marked as real. */ if (TREE_USED (block)) { /* When we reach the specified block, output its symbols. */ if (BLOCK_NUMBER (block) == do_block) { /* Output the syms of the block. */ if (debug_info_level != DINFO_LEVEL_TERSE || depth == 0) dbxout_syms (BLOCK_VARS (block)); if (args) dbxout_reg_parms (args); /* We are now done with the block. Don't go to inner blocks. */ return; } /* If we are past the specified block, stop the scan. */ else if (BLOCK_NUMBER (block) >= do_block) return; /* Output the subblocks. */ xcoffout_block (BLOCK_SUBBLOCKS (block), depth + 1, NULL_TREE); } block = BLOCK_CHAIN (block); } } /* Describe the beginning of an internal block within a function. Also output descriptions of variables defined in this block. N is the number of the block, by order of beginning, counting from 1, and not counting the outermost (function top-level) block. The blocks match the BLOCKs in DECL_INITIAL (current_function_decl), if the count starts at 0 for the outermost one. */ void xcoffout_begin_block (unsigned int line, unsigned int n) { tree decl = current_function_decl; /* The IBM AIX compiler does not emit a .bb for the function level scope, so we avoid it here also. */ if (n != 1) ASM_OUTPUT_LBB (asm_out_file, line, n); do_block = n; xcoffout_block (DECL_INITIAL (decl), 0, DECL_ARGUMENTS (decl)); } /* Describe the end line-number of an internal block within a function. */ void xcoffout_end_block (unsigned int line, unsigned int n) { if (n != 1) ASM_OUTPUT_LBE (asm_out_file, line, n); } /* Called at beginning of function (before prologue). Declare function as needed for debugging. */ void xcoffout_declare_function (FILE *file, tree decl, const char *name) { int i; if (*name == '*') name++; else for (i = 0; name[i]; ++i) { if (name[i] == '[') { char *n = alloca (i + 1); strncpy (n, name, i); n[i] = '\0'; name = n; break; } } /* Any pending .bi or .ei must occur before the .function pseudo op. Otherwise debuggers will think that the function is in the previous file and/or at the wrong line number. */ xcoffout_source_file (file, DECL_SOURCE_FILE (decl), 0); dbxout_symbol (decl, 0); /* .function NAME, TOP, MAPPING, TYPE, SIZE 16 and 044 are placeholders for backwards compatibility */ fprintf (file, "\t.function .%s,.%s,16,044,FE..%s-.%s\n", name, name, name, name); } /* Called at beginning of function body (at start of prologue). Record the function's starting line number, so we can output relative line numbers for the other lines. Record the file name that this function is contained in. */ void xcoffout_begin_prologue (unsigned int line, const char *file ATTRIBUTE_UNUSED) { ASM_OUTPUT_LFB (asm_out_file, line); dbxout_parms (DECL_ARGUMENTS (current_function_decl)); /* Emit the symbols for the outermost BLOCK's variables. sdbout.c does this in sdbout_begin_block, but there is no guarantee that there will be any inner block 1, so we must do it here. This gives a result similar to dbxout, so it does make some sense. */ do_block = BLOCK_NUMBER (DECL_INITIAL (current_function_decl)); xcoffout_block (DECL_INITIAL (current_function_decl), 0, DECL_ARGUMENTS (current_function_decl)); ASM_OUTPUT_SOURCE_LINE (asm_out_file, line, 0); } /* Called at end of function (before epilogue). Describe end of outermost block. */ void xcoffout_end_function (unsigned int last_linenum) { ASM_OUTPUT_LFE (asm_out_file, last_linenum); } /* Output xcoff info for the absolute end of a function. Called after the epilogue is output. */ void xcoffout_end_epilogue (unsigned int line ATTRIBUTE_UNUSED, const char *file ATTRIBUTE_UNUSED) { /* We need to pass the correct function size to .function, otherwise, the xas assembler can't figure out the correct size for the function aux entry. So, we emit a label after the last instruction which can be used by the .function pseudo op to calculate the function size. */ const char *fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0); if (*fname == '*') ++fname; fprintf (asm_out_file, "FE.."); ASM_OUTPUT_LABEL (asm_out_file, fname); } #endif /* XCOFF_DEBUGGING_INFO */ /* Functions to support a pool of allocatable objects. Copyright (C) 1987, 1997, 1998, 1999, 2000, 2001, 2003, 2004 Free Software Foundation, Inc. Contributed by Daniel Berlin This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Redefine abort to report an internal error w/o coredump, and reporting the location of the error in the source file. This logic is duplicated in rtl.h and tree.h because every file that needs the special abort includes one or both. toplev.h gets too few files, system.h gets too many. */ extern void fancy_abort (const char *, int, const char *) ATTRIBUTE_NORETURN; #define abort() fancy_abort (__FILE__, __LINE__, __FUNCTION__) #define align_eight(x) (((x+7) >> 3) << 3) /* The internal allocation object. */ typedef struct allocation_object_def { #ifdef ENABLE_CHECKING /* The ID of alloc pool which the object was allocated from. */ ALLOC_POOL_ID_TYPE id; #endif union { /* The data of the object. */ char data[1]; /* Because we want any type of data to be well aligned after the ID, the following elements are here. They are never accessed so the allocated object may be even smaller than this structure. */ char *align_p; HOST_WIDEST_INT align_i; long double align_ld; } u; } allocation_object; /* Convert a pointer to allocation_object from a pointer to user data. */ #define ALLOCATION_OBJECT_PTR_FROM_USER_PTR(X) \ ((allocation_object *) (((char *) (X)) \ - offsetof (allocation_object, u.data))) /* Convert a pointer to user data from a pointer to allocation_object. */ #define USER_PTR_FROM_ALLOCATION_OBJECT_PTR(X) \ ((void *) (((allocation_object *) (X))->u.data)) #ifdef ENABLE_CHECKING /* Last used ID. */ static ALLOC_POOL_ID_TYPE last_id; #endif #ifdef GATHER_STATISTICS /* Store information about each particular alloc_pool. */ struct alloc_pool_descriptor { const char *name; int allocated; int created; int peak; int current; }; /* Hashtable mapping alloc_pool names to descriptors. */ static htab_t alloc_pool_hash; /* Hashtable helpers. */ static hashval_t hash_descriptor (const void *p) { const struct alloc_pool_descriptor *d = p; return htab_hash_pointer (d->name); } static int eq_descriptor (const void *p1, const void *p2) { const struct alloc_pool_descriptor *d = p1; return d->name == p2; } /* For given name, return descriptor, create new if needed. */ static struct alloc_pool_descriptor * alloc_pool_descriptor (const char *name) { struct alloc_pool_descriptor **slot; if (!alloc_pool_hash) alloc_pool_hash = htab_create (10, hash_descriptor, eq_descriptor, NULL); slot = (struct alloc_pool_descriptor **) htab_find_slot_with_hash (alloc_pool_hash, name, htab_hash_pointer (name), 1); if (*slot) return *slot; *slot = xcalloc (sizeof (**slot), 1); (*slot)->name = name; return *slot; } #endif /* Create a pool of things of size SIZE, with NUM in each block we allocate. */ alloc_pool create_alloc_pool (const char *name, size_t size, size_t num) { alloc_pool pool; size_t pool_size, header_size; #ifdef GATHER_STATISTICS struct alloc_pool_descriptor *desc; #endif if (!name) abort (); /* Make size large enough to store the list header. */ if (size < sizeof (alloc_pool_list)) size = sizeof (alloc_pool_list); /* Now align the size to a multiple of 4. */ size = align_eight (size); #ifdef ENABLE_CHECKING /* Add the aligned size of ID. */ size += offsetof (allocation_object, u.data); #endif /* Um, we can't really allocate 0 elements per block. */ if (num == 0) abort (); /* Find the size of the pool structure, and the name. */ pool_size = sizeof (struct alloc_pool_def); /* and allocate that much memory. */ pool = xmalloc (pool_size); /* Now init the various pieces of our pool structure. */ pool->name = /*xstrdup (name)*/name; #ifdef GATHER_STATISTICS desc = alloc_pool_descriptor (name); desc->created++; #endif pool->elt_size = size; pool->elts_per_block = num; /* List header size should be a multiple of 8. */ header_size = align_eight (sizeof (struct alloc_pool_list_def)); pool->block_size = (size * num) + header_size; pool->free_list = NULL; pool->elts_allocated = 0; pool->elts_free = 0; pool->blocks_allocated = 0; pool->block_list = NULL; #ifdef ENABLE_CHECKING /* Increase the last used ID and use it for this pool. ID == 0 is used for free elements of pool so skip it. */ last_id++; if (last_id == 0) last_id++; pool->id = last_id; #endif return (pool); } /* Free all memory allocated for the given memory pool. */ void free_alloc_pool (alloc_pool pool) { alloc_pool_list block, next_block; #ifdef GATHER_STATISTICS struct alloc_pool_descriptor *desc = alloc_pool_descriptor (pool->name); #endif #ifdef ENABLE_CHECKING if (!pool) abort (); #endif /* Free each block allocated to the pool. */ for (block = pool->block_list; block != NULL; block = next_block) { next_block = block->next; free (block); #ifdef GATHER_STATISTICS desc->current -= pool->block_size; #endif } #ifdef ENABLE_CHECKING memset (pool, 0xaf, sizeof (*pool)); #endif /* Lastly, free the pool. */ free (pool); } /* Allocates one element from the pool specified. */ void * pool_alloc (alloc_pool pool) { alloc_pool_list header; char *block; #ifdef GATHER_STATISTICS struct alloc_pool_descriptor *desc = alloc_pool_descriptor (pool->name); desc->allocated+=pool->elt_size; #endif #ifdef ENABLE_CHECKING if (!pool) abort (); #endif /* If there are no more free elements, make some more!. */ if (!pool->free_list) { size_t i; alloc_pool_list block_header; /* Make the block. */ block = xmalloc (pool->block_size); block_header = (alloc_pool_list) block; block += align_eight (sizeof (struct alloc_pool_list_def)); #ifdef GATHER_STATISTICS desc->current += pool->block_size; if (desc->peak < desc->current) desc->peak = desc->current; #endif /* Throw it on the block list. */ block_header->next = pool->block_list; pool->block_list = block_header; /* Now put the actual block pieces onto the free list. */ for (i = 0; i < pool->elts_per_block; i++, block += pool->elt_size) { #ifdef ENABLE_CHECKING /* Mark the element to be free. */ ((allocation_object *) block)->id = 0; #endif header = (alloc_pool_list) USER_PTR_FROM_ALLOCATION_OBJECT_PTR (block); header->next = pool->free_list; pool->free_list = header; } /* Also update the number of elements we have free/allocated, and increment the allocated block count. */ pool->elts_allocated += pool->elts_per_block; pool->elts_free += pool->elts_per_block; pool->blocks_allocated += 1; } /* Pull the first free element from the free list, and return it. */ header = pool->free_list; pool->free_list = header->next; pool->elts_free--; #ifdef ENABLE_CHECKING /* Set the ID for element. */ ALLOCATION_OBJECT_PTR_FROM_USER_PTR (header)->id = pool->id; #endif return ((void *) header); } /* Puts PTR back on POOL's free list. */ void pool_free (alloc_pool pool, void *ptr) { alloc_pool_list header; #ifdef ENABLE_CHECKING if (!ptr) abort (); memset (ptr, 0xaf, pool->elt_size - offsetof (allocation_object, u.data)); /* Check whether the PTR was allocated from POOL. */ if (pool->id != ALLOCATION_OBJECT_PTR_FROM_USER_PTR (ptr)->id) abort (); /* Mark the element to be free. */ ALLOCATION_OBJECT_PTR_FROM_USER_PTR (ptr)->id = 0; #else /* Check if we free more than we allocated, which is Bad (TM). */ if (pool->elts_free + 1 > pool->elts_allocated) abort (); #endif header = (alloc_pool_list) ptr; header->next = pool->free_list; pool->free_list = header; pool->elts_free++; } /* Output per-alloc_pool statistics. */ #ifdef GATHER_STATISTICS /* Used to accumulate statistics about alloc_pool sizes. */ struct output_info { int count; int size; }; /* Called via htab_traverse. Output alloc_pool descriptor pointed out by SLOT and update statistics. */ static int print_statistics (void **slot, void *b) { struct alloc_pool_descriptor *d = (struct alloc_pool_descriptor *) *slot; struct output_info *i = (struct output_info *) b; if (d->allocated) { fprintf (stderr, "%-21s %6d %10d %10d %10d\n", d->name, d->created, d->allocated, d->peak, d->current); i->size += d->allocated; i->count += d->created; } return 1; } #endif /* Output per-alloc_pool memory usage statistics. */ void dump_alloc_pool_statistics (void) { #ifdef GATHER_STATISTICS struct output_info info; fprintf (stderr, "\nAlloc-pool Kind Pools Allocated Peak Leak\n"); fprintf (stderr, "-------------------------------------------------------------\n"); info.count = 0; info.size = 0; htab_traverse (alloc_pool_hash, print_statistics, &info); fprintf (stderr, "-------------------------------------------------------------\n"); fprintf (stderr, "%-20s %7d %10d\n", "Total", info.count, info.size); fprintf (stderr, "-------------------------------------------------------------\n"); #endif } /* ET-trees data structure implementation. Contributed by Pavel Nejedly Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of the libiberty library. Libiberty is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Libiberty is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with libiberty; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. The ET-forest structure is described in: D. D. Sleator and R. E. Tarjan. A data structure for dynamic trees. J. G'omput. System Sci., 26(3):362 381, 1983. */ /* We do not enable this with ENABLE_CHECKING, since it is awfully slow. */ #undef DEBUG_ET #ifdef DEBUG_ET #endif /* The occurrence of a node in the et tree. */ struct et_occ { struct et_node *of; /* The node. */ struct et_occ *parent; /* Parent in the splay-tree. */ struct et_occ *prev; /* Left son in the splay-tree. */ struct et_occ *next; /* Right son in the splay-tree. */ int depth; /* The depth of the node is the sum of depth fields on the path to the root. */ int min; /* The minimum value of the depth in the subtree is obtained by adding sum of depth fields on the path to the root. */ struct et_occ *min_occ; /* The occurrence in the subtree with the minimal depth. */ }; static alloc_pool et_nodes; static alloc_pool et_occurrences; /* Changes depth of OCC to D. */ static inline void set_depth (struct et_occ *occ, int d) { if (!occ) return; occ->min += d - occ->depth; occ->depth = d; } /* Adds D to the depth of OCC. */ static inline void set_depth_add (struct et_occ *occ, int d) { if (!occ) return; occ->min += d; occ->depth += d; } /* Sets prev field of OCC to P. */ static inline void set_prev (struct et_occ *occ, struct et_occ *t) { #ifdef DEBUG_ET if (occ == t) abort (); #endif occ->prev = t; if (t) t->parent = occ; } /* Sets next field of OCC to P. */ static inline void set_next (struct et_occ *occ, struct et_occ *t) { #ifdef DEBUG_ET if (occ == t) abort (); #endif occ->next = t; if (t) t->parent = occ; } /* Recompute minimum for occurrence OCC. */ static inline void et_recomp_min (struct et_occ *occ) { struct et_occ *mson = occ->prev; if (!mson || (occ->next && mson->min > occ->next->min)) mson = occ->next; if (mson && mson->min < 0) { occ->min = mson->min + occ->depth; occ->min_occ = mson->min_occ; } else { occ->min = occ->depth; occ->min_occ = occ; } } #ifdef DEBUG_ET /* Checks whether neighbourhood of OCC seems sane. */ static void et_check_occ_sanity (struct et_occ *occ) { if (!occ) return; if (occ->parent == occ) abort (); if (occ->prev == occ) abort (); if (occ->next == occ) abort (); if (occ->next && occ->next == occ->prev) abort (); if (occ->next) { if (occ->next == occ->parent) abort (); if (occ->next->parent != occ) abort (); } if (occ->prev) { if (occ->prev == occ->parent) abort (); if (occ->prev->parent != occ) abort (); } if (occ->parent && occ->parent->prev != occ && occ->parent->next != occ) abort (); } /* Checks whether tree rooted at OCC is sane. */ static void et_check_sanity (struct et_occ *occ) { et_check_occ_sanity (occ); if (occ->prev) et_check_sanity (occ->prev); if (occ->next) et_check_sanity (occ->next); } /* Checks whether tree containing OCC is sane. */ static void et_check_tree_sanity (struct et_occ *occ) { while (occ->parent) occ = occ->parent; et_check_sanity (occ); } /* For recording the paths. */ /* An ad-hoc constant; if the function has more blocks, this won't work, but since it is used for debugging only, it does not matter. */ #define MAX_NODES 100000 static int len; static void *datas[MAX_NODES]; static int depths[MAX_NODES]; /* Records the path represented by OCC, with depth incremented by DEPTH. */ static int record_path_before_1 (struct et_occ *occ, int depth) { int mn, m; depth += occ->depth; mn = depth; if (occ->prev) { m = record_path_before_1 (occ->prev, depth); if (m < mn) mn = m; } fprintf (stderr, "%d (%d); ", ((basic_block) occ->of->data)->index, depth); if (len >= MAX_NODES) abort (); depths[len] = depth; datas[len] = occ->of; len++; if (occ->next) { m = record_path_before_1 (occ->next, depth); if (m < mn) mn = m; } if (mn != occ->min + depth - occ->depth) abort (); return mn; } /* Records the path represented by a tree containing OCC. */ static void record_path_before (struct et_occ *occ) { while (occ->parent) occ = occ->parent; len = 0; record_path_before_1 (occ, 0); fprintf (stderr, "\n"); } /* Checks whether the path represented by OCC, with depth incremented by DEPTH, was not changed since the last recording. */ static int check_path_after_1 (struct et_occ *occ, int depth) { int mn, m; depth += occ->depth; mn = depth; if (occ->next) { m = check_path_after_1 (occ->next, depth); if (m < mn) mn = m; } len--; if (depths[len] != depth || datas[len] != occ->of) abort (); if (occ->prev) { m = check_path_after_1 (occ->prev, depth); if (m < mn) mn = m; } if (mn != occ->min + depth - occ->depth) abort (); return mn; } /* Checks whether the path represented by a tree containing OCC was not changed since the last recording. */ static void check_path_after (struct et_occ *occ) { while (occ->parent) occ = occ->parent; check_path_after_1 (occ, 0); if (len != 0) abort (); } #endif /* Splay the occurrence OCC to the root of the tree. */ static void et_splay (struct et_occ *occ) { struct et_occ *f, *gf, *ggf; int occ_depth, f_depth, gf_depth; #ifdef DEBUG_ET record_path_before (occ); et_check_tree_sanity (occ); #endif while (occ->parent) { occ_depth = occ->depth; f = occ->parent; f_depth = f->depth; gf = f->parent; if (!gf) { set_depth_add (occ, f_depth); occ->min_occ = f->min_occ; occ->min = f->min; if (f->prev == occ) { /* zig */ set_prev (f, occ->next); set_next (occ, f); set_depth_add (f->prev, occ_depth); } else { /* zag */ set_next (f, occ->prev); set_prev (occ, f); set_depth_add (f->next, occ_depth); } set_depth (f, -occ_depth); occ->parent = NULL; et_recomp_min (f); #ifdef DEBUG_ET et_check_tree_sanity (occ); check_path_after (occ); #endif return; } gf_depth = gf->depth; set_depth_add (occ, f_depth + gf_depth); occ->min_occ = gf->min_occ; occ->min = gf->min; ggf = gf->parent; if (gf->prev == f) { if (f->prev == occ) { /* zig zig */ set_prev (gf, f->next); set_prev (f, occ->next); set_next (occ, f); set_next (f, gf); set_depth (f, -occ_depth); set_depth_add (f->prev, occ_depth); set_depth (gf, -f_depth); set_depth_add (gf->prev, f_depth); } else { /* zag zig */ set_prev (gf, occ->next); set_next (f, occ->prev); set_prev (occ, f); set_next (occ, gf); set_depth (f, -occ_depth); set_depth_add (f->next, occ_depth); set_depth (gf, -occ_depth - f_depth); set_depth_add (gf->prev, occ_depth + f_depth); } } else { if (f->prev == occ) { /* zig zag */ set_next (gf, occ->prev); set_prev (f, occ->next); set_prev (occ, gf); set_next (occ, f); set_depth (f, -occ_depth); set_depth_add (f->prev, occ_depth); set_depth (gf, -occ_depth - f_depth); set_depth_add (gf->next, occ_depth + f_depth); } else { /* zag zag */ set_next (gf, f->prev); set_next (f, occ->prev); set_prev (occ, f); set_prev (f, gf); set_depth (f, -occ_depth); set_depth_add (f->next, occ_depth); set_depth (gf, -f_depth); set_depth_add (gf->next, f_depth); } } occ->parent = ggf; if (ggf) { if (ggf->prev == gf) ggf->prev = occ; else ggf->next = occ; } et_recomp_min (gf); et_recomp_min (f); #ifdef DEBUG_ET et_check_tree_sanity (occ); #endif } #ifdef DEBUG_ET et_check_sanity (occ); check_path_after (occ); #endif } /* Create a new et tree occurrence of NODE. */ static struct et_occ * et_new_occ (struct et_node *node) { struct et_occ *nw; if (!et_occurrences) et_occurrences = create_alloc_pool ("et_occ pool", sizeof (struct et_occ), 300); nw = pool_alloc (et_occurrences); nw->of = node; nw->parent = NULL; nw->prev = NULL; nw->next = NULL; nw->depth = 0; nw->min_occ = nw; nw->min = 0; return nw; } /* Create a new et tree containing DATA. */ struct et_node * et_new_tree (void *data) { struct et_node *nw; if (!et_nodes) et_nodes = create_alloc_pool ("et_node pool", sizeof (struct et_node), 300); nw = pool_alloc (et_nodes); nw->data = data; nw->father = NULL; nw->left = NULL; nw->right = NULL; nw->son = NULL; nw->rightmost_occ = et_new_occ (nw); nw->parent_occ = NULL; return nw; } /* Releases et tree T. */ void et_free_tree (struct et_node *t) { while (t->son) et_split (t->son); if (t->father) et_split (t); pool_free (et_occurrences, t->rightmost_occ); pool_free (et_nodes, t); } /* Sets father of et tree T to FATHER. */ void et_set_father (struct et_node *t, struct et_node *father) { struct et_node *left, *right; struct et_occ *rmost, *left_part, *new_f_occ, *p; /* Update the path represented in the splay tree. */ new_f_occ = et_new_occ (father); rmost = father->rightmost_occ; et_splay (rmost); left_part = rmost->prev; p = t->rightmost_occ; et_splay (p); set_prev (new_f_occ, left_part); set_next (new_f_occ, p); p->depth++; p->min++; et_recomp_min (new_f_occ); set_prev (rmost, new_f_occ); if (new_f_occ->min + rmost->depth < rmost->min) { rmost->min = new_f_occ->min + rmost->depth; rmost->min_occ = new_f_occ->min_occ; } t->parent_occ = new_f_occ; /* Update the tree. */ t->father = father; right = father->son; if (right) left = right->left; else left = right = t; left->right = t; right->left = t; t->left = left; t->right = right; father->son = t; #ifdef DEBUG_ET et_check_tree_sanity (rmost); record_path_before (rmost); #endif } /* Splits the edge from T to its father. */ void et_split (struct et_node *t) { struct et_node *father = t->father; struct et_occ *r, *l, *rmost, *p_occ; /* Update the path represented by the splay tree. */ rmost = t->rightmost_occ; et_splay (rmost); for (r = rmost->next; r->prev; r = r->prev) continue; et_splay (r); r->prev->parent = NULL; p_occ = t->parent_occ; et_splay (p_occ); t->parent_occ = NULL; l = p_occ->prev; p_occ->next->parent = NULL; set_prev (r, l); et_recomp_min (r); et_splay (rmost); rmost->depth = 0; rmost->min = 0; pool_free (et_occurrences, p_occ); /* Update the tree. */ if (father->son == t) father->son = t->right; if (father->son == t) father->son = NULL; else { t->left->right = t->right; t->right->left = t->left; } t->left = t->right = NULL; t->father = NULL; #ifdef DEBUG_ET et_check_tree_sanity (rmost); record_path_before (rmost); et_check_tree_sanity (r); record_path_before (r); #endif } /* Finds the nearest common ancestor of the nodes N1 and N2. */ struct et_node * et_nca (struct et_node *n1, struct et_node *n2) { struct et_occ *o1 = n1->rightmost_occ, *o2 = n2->rightmost_occ, *om; struct et_occ *l, *r, *ret; int mn; if (n1 == n2) return n1; et_splay (o1); l = o1->prev; r = o1->next; if (l) l->parent = NULL; if (r) r->parent = NULL; et_splay (o2); if (l == o2 || (l && l->parent != NULL)) { ret = o2->next; set_prev (o1, o2); if (r) r->parent = o1; } else { ret = o2->prev; set_next (o1, o2); if (l) l->parent = o1; } if (0 < o2->depth) { om = o1; mn = o1->depth; } else { om = o2; mn = o2->depth + o1->depth; } #ifdef DEBUG_ET et_check_tree_sanity (o2); #endif if (ret && ret->min + o1->depth + o2->depth < mn) return ret->min_occ->of; else return om->of; } /* Checks whether the node UP is an ancestor of the node DOWN. */ bool et_below (struct et_node *down, struct et_node *up) { struct et_occ *u = up->rightmost_occ, *d = down->rightmost_occ; struct et_occ *l, *r; if (up == down) return true; et_splay (u); l = u->prev; r = u->next; if (!l) return false; l->parent = NULL; if (r) r->parent = NULL; et_splay (d); if (l == d || l->parent != NULL) { if (r) r->parent = u; set_prev (u, d); #ifdef DEBUG_ET et_check_tree_sanity (u); #endif } else { l->parent = u; /* In case O1 and O2 are in two different trees, we must just restore the original state. */ if (r && r->parent != NULL) set_next (u, d); else set_next (u, r); #ifdef DEBUG_ET et_check_tree_sanity (u); #endif return false; } if (0 >= d->depth) return false; return !d->next || d->next->min + d->depth >= 0; } /* Hooks for cfg representation specific functions. Copyright (C) 2003, 2004 Free Software Foundation, Inc. Contributed by Sebastian Pop This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* A pointer to one of the hooks containers. */ static struct cfg_hooks *cfg_hooks; /* Initialization of functions specific to the rtl IR. */ void rtl_register_cfg_hooks (void) { cfg_hooks = &rtl_cfg_hooks; } /* Initialization of functions specific to the rtl IR. */ void cfg_layout_rtl_register_cfg_hooks (void) { cfg_hooks = &cfg_layout_rtl_cfg_hooks; } /* Initialization of functions specific to the tree IR. */ void tree_register_cfg_hooks (void) { cfg_hooks = &tree_cfg_hooks; } /* Returns current ir type (rtl = 0, trees = 1). */ int ir_type (void) { return cfg_hooks == &tree_cfg_hooks ? 1 : 0; } /* Verify the CFG consistency. Currently it does following: checks edge and basic block list correctness and calls into IL dependent checking then. */ void verify_flow_info (void) { size_t *edge_checksum; int num_bb_notes, err = 0; basic_block bb, last_bb_seen; basic_block *last_visited; timevar_push (TV_CFG_VERIFY); last_visited = xcalloc (last_basic_block + 2, sizeof (basic_block)); edge_checksum = xcalloc (last_basic_block + 2, sizeof (size_t)); /* Check bb chain & numbers. */ last_bb_seen = ENTRY_BLOCK_PTR; FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb, NULL, next_bb) { if (bb != EXIT_BLOCK_PTR && bb != BASIC_BLOCK (bb->index)) { error ("bb %d on wrong place", bb->index); err = 1; } if (bb->prev_bb != last_bb_seen) { error ("prev_bb of %d should be %d, not %d", bb->index, last_bb_seen->index, bb->prev_bb->index); err = 1; } last_bb_seen = bb; } /* Now check the basic blocks (boundaries etc.) */ FOR_EACH_BB_REVERSE (bb) { int n_fallthru = 0; edge e; if (bb->count < 0) { error ("verify_flow_info: Wrong count of block %i %i", bb->index, (int)bb->count); err = 1; } if (bb->frequency < 0) { error ("verify_flow_info: Wrong frequency of block %i %i", bb->index, bb->frequency); err = 1; } for (e = bb->succ; e; e = e->succ_next) { if (last_visited [e->dest->index + 2] == bb) { error ("verify_flow_info: Duplicate edge %i->%i", e->src->index, e->dest->index); err = 1; } if (e->probability < 0 || e->probability > REG_BR_PROB_BASE) { error ("verify_flow_info: Wrong probability of edge %i->%i %i", e->src->index, e->dest->index, e->probability); err = 1; } if (e->count < 0) { error ("verify_flow_info: Wrong count of edge %i->%i %i", e->src->index, e->dest->index, (int)e->count); err = 1; } last_visited [e->dest->index + 2] = bb; if (e->flags & EDGE_FALLTHRU) n_fallthru++; if (e->src != bb) { error ("verify_flow_info: Basic block %d succ edge is corrupted", bb->index); fprintf (stderr, "Predecessor: "); dump_edge_info (stderr, e, 0); fprintf (stderr, "\nSuccessor: "); dump_edge_info (stderr, e, 1); fprintf (stderr, "\n"); err = 1; } edge_checksum[e->dest->index + 2] += (size_t) e; } if (n_fallthru > 1) { error ("Wrong amount of branch edges after unconditional jump %i", bb->index); err = 1; } for (e = bb->pred; e; e = e->pred_next) { if (e->dest != bb) { error ("basic block %d pred edge is corrupted", bb->index); fputs ("Predecessor: ", stderr); dump_edge_info (stderr, e, 0); fputs ("\nSuccessor: ", stderr); dump_edge_info (stderr, e, 1); fputc ('\n', stderr); err = 1; } edge_checksum[e->dest->index + 2] -= (size_t) e; } } /* Complete edge checksumming for ENTRY and EXIT. */ { edge e; for (e = ENTRY_BLOCK_PTR->succ; e ; e = e->succ_next) edge_checksum[e->dest->index + 2] += (size_t) e; for (e = EXIT_BLOCK_PTR->pred; e ; e = e->pred_next) edge_checksum[e->dest->index + 2] -= (size_t) e; } FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) if (edge_checksum[bb->index + 2]) { error ("basic block %i edge lists are corrupted", bb->index); err = 1; } num_bb_notes = 0; last_bb_seen = ENTRY_BLOCK_PTR; /* Clean up. */ free (last_visited); free (edge_checksum); if (cfg_hooks->verify_flow_info) err |= cfg_hooks->verify_flow_info (); if (err) internal_error ("verify_flow_info failed"); timevar_pop (TV_CFG_VERIFY); } /* Print out one basic block. This function takes care of the purely graph related information. The cfg hook for the active representation should dump representation-specific information. */ void dump_bb (basic_block bb, FILE *outf, int indent) { edge e; char *s_indent; s_indent = alloca ((size_t) indent + 1); memset (s_indent, ' ', (size_t) indent); s_indent[indent] = '\0'; fprintf (outf, ";;%s basic block %d, loop depth %d, count ", s_indent, bb->index, bb->loop_depth); fprintf (outf, HOST_WIDEST_INT_PRINT_DEC, (HOST_WIDEST_INT) bb->count); putc ('\n', outf); fprintf (outf, ";;%s prev block ", s_indent); if (bb->prev_bb) fprintf (outf, "%d, ", bb->prev_bb->index); else fprintf (outf, "(nil), "); fprintf (outf, "next block "); if (bb->next_bb) fprintf (outf, "%d", bb->next_bb->index); else fprintf (outf, "(nil)"); putc ('\n', outf); fprintf (outf, ";;%s pred: ", s_indent); for (e = bb->pred; e; e = e->pred_next) dump_edge_info (outf, e, 0); putc ('\n', outf); fprintf (outf, ";;%s succ: ", s_indent); for (e = bb->succ; e; e = e->succ_next) dump_edge_info (outf, e, 1); putc ('\n', outf); if (cfg_hooks->dump_bb) cfg_hooks->dump_bb (bb, outf, indent); } /* Redirect edge E to the given basic block DEST and update underlying program representation. Returns edge representing redirected branch (that may not be equivalent to E in the case of duplicate edges being removed) or NULL if edge is not easily redirectable for whatever reason. */ edge redirect_edge_and_branch (edge e, basic_block dest) { edge ret; if (!cfg_hooks->redirect_edge_and_branch) internal_error ("%s does not support redirect_edge_and_branch.", cfg_hooks->name); ret = cfg_hooks->redirect_edge_and_branch (e, dest); return ret; } /* Redirect the edge E to basic block DEST even if it requires creating of a new basic block; then it returns the newly created basic block. Aborts when redirection is impossible. */ basic_block redirect_edge_and_branch_force (edge e, basic_block dest) { basic_block ret; if (!cfg_hooks->redirect_edge_and_branch_force) internal_error ("%s does not support redirect_edge_and_branch_force.", cfg_hooks->name); ret = cfg_hooks->redirect_edge_and_branch_force (e, dest); return ret; } /* Splits basic block BB after the specified instruction I (but at least after the labels). If I is NULL, splits just after labels. The newly created edge is returned. The new basic block is created just after the old one. */ edge split_block (basic_block bb, void *i) { basic_block new_bb; if (!cfg_hooks->split_block) internal_error ("%s does not support split_block.", cfg_hooks->name); new_bb = cfg_hooks->split_block (bb, i); if (!new_bb) return NULL; new_bb->count = bb->count; new_bb->frequency = bb->frequency; new_bb->loop_depth = bb->loop_depth; if (dom_computed[CDI_DOMINATORS] >= DOM_CONS_OK) { redirect_immediate_dominators (CDI_DOMINATORS, bb, new_bb); set_immediate_dominator (CDI_DOMINATORS, new_bb, bb); } return make_single_succ_edge (bb, new_bb, EDGE_FALLTHRU); } /* Splits block BB just after labels. The newly created edge is returned. */ edge split_block_after_labels (basic_block bb) { return split_block (bb, NULL); } /* Moves block BB immediately after block AFTER. Returns false if the movement was impossible. */ bool move_block_after (basic_block bb, basic_block after) { bool ret; if (!cfg_hooks->move_block_after) internal_error ("%s does not support move_block_after.", cfg_hooks->name); ret = cfg_hooks->move_block_after (bb, after); return ret; } /* Deletes the basic block BB. */ void delete_basic_block (basic_block bb) { if (!cfg_hooks->delete_basic_block) internal_error ("%s does not support delete_basic_block.", cfg_hooks->name); cfg_hooks->delete_basic_block (bb); /* Remove the edges into and out of this block. Note that there may indeed be edges in, if we are removing an unreachable loop. */ while (bb->pred != NULL) remove_edge (bb->pred); while (bb->succ != NULL) remove_edge (bb->succ); bb->pred = NULL; bb->succ = NULL; if (dom_computed[CDI_DOMINATORS]) delete_from_dominance_info (CDI_DOMINATORS, bb); if (dom_computed[CDI_POST_DOMINATORS]) delete_from_dominance_info (CDI_POST_DOMINATORS, bb); /* Remove the basic block from the array. */ expunge_block (bb); } /* Splits edge E and returns the newly created basic block. */ basic_block split_edge (edge e) { basic_block ret; gcov_type count = e->count; int freq = EDGE_FREQUENCY (e); edge f; if (!cfg_hooks->split_edge) internal_error ("%s does not support split_edge.", cfg_hooks->name); ret = cfg_hooks->split_edge (e); ret->count = count; ret->frequency = freq; ret->succ->probability = REG_BR_PROB_BASE; ret->succ->count = count; if (dom_computed[CDI_DOMINATORS]) set_immediate_dominator (CDI_DOMINATORS, ret, ret->pred->src); if (dom_computed[CDI_DOMINATORS] >= DOM_NO_FAST_QUERY) { /* There are two cases: If the immediate dominator of e->dest is not e->src, it remains unchanged. If immediate dominator of e->dest is e->src, it may become ret, provided that all other predecessors of e->dest are dominated by e->dest. */ if (get_immediate_dominator (CDI_DOMINATORS, ret->succ->dest) == ret->pred->src) { for (f = ret->succ->dest->pred; f; f = f->pred_next) { if (f == ret->succ) continue; if (!dominated_by_p (CDI_DOMINATORS, f->src, ret->succ->dest)) break; } if (!f) set_immediate_dominator (CDI_DOMINATORS, ret->succ->dest, ret); } }; return ret; } /* Creates a new basic block just after the basic block AFTER. HEAD and END are the first and the last statement belonging to the block. If both are NULL, an empty block is created. */ basic_block create_basic_block (void *head, void *end, basic_block after) { basic_block ret; if (!cfg_hooks->create_basic_block) internal_error ("%s does not support create_basic_block.", cfg_hooks->name); ret = cfg_hooks->create_basic_block (head, end, after); if (dom_computed[CDI_DOMINATORS]) add_to_dominance_info (CDI_DOMINATORS, ret); if (dom_computed[CDI_POST_DOMINATORS]) add_to_dominance_info (CDI_POST_DOMINATORS, ret); return ret; } /* Creates an empty basic block just after basic block AFTER. */ basic_block create_empty_bb (basic_block after) { return create_basic_block (NULL, NULL, after); } /* Checks whether we may merge blocks BB1 and BB2. */ bool can_merge_blocks_p (basic_block bb1, basic_block bb2) { bool ret; if (!cfg_hooks->can_merge_blocks_p) internal_error ("%s does not support can_merge_blocks_p.", cfg_hooks->name); ret = cfg_hooks->can_merge_blocks_p (bb1, bb2); return ret; } void predict_edge (edge e, enum br_predictor predictor, int probability) { if (!cfg_hooks->predict_edge) internal_error ("%s does not support predict_edge.", cfg_hooks->name); cfg_hooks->predict_edge (e, predictor, probability); } bool predicted_by_p (basic_block bb, enum br_predictor predictor) { if (!cfg_hooks->predict_edge) internal_error ("%s does not support predicted_by_p.", cfg_hooks->name); return cfg_hooks->predicted_by_p (bb, predictor); } /* Merges basic block B into basic block A. */ void merge_blocks (basic_block a, basic_block b) { edge e; if (!cfg_hooks->merge_blocks) internal_error ("%s does not support merge_blocks.", cfg_hooks->name); cfg_hooks->merge_blocks (a, b); /* Normally there should only be one successor of A and that is B, but partway though the merge of blocks for conditional_execution we'll be merging a TEST block with THEN and ELSE successors. Free the whole lot of them and hope the caller knows what they're doing. */ while (a->succ) remove_edge (a->succ); /* Adjust the edges out of B for the new owner. */ for (e = b->succ; e; e = e->succ_next) e->src = a; a->succ = b->succ; a->flags |= b->flags; /* B hasn't quite yet ceased to exist. Attempt to prevent mishap. */ b->pred = b->succ = NULL; a->global_live_at_end = b->global_live_at_end; if (dom_computed[CDI_DOMINATORS]) redirect_immediate_dominators (CDI_DOMINATORS, b, a); if (dom_computed[CDI_DOMINATORS]) delete_from_dominance_info (CDI_DOMINATORS, b); if (dom_computed[CDI_POST_DOMINATORS]) delete_from_dominance_info (CDI_POST_DOMINATORS, b); expunge_block (b); } /* Split BB into entry part and the rest (the rest is the newly created block). Redirect those edges for that REDIRECT_EDGE_P returns true to the entry part. Returns the edge connecting the entry part to the rest. */ edge make_forwarder_block (basic_block bb, bool (*redirect_edge_p) (edge), void (*new_bb_cbk) (basic_block)) { edge e, next_e, fallthru; basic_block dummy, jump; if (!cfg_hooks->make_forwarder_block) internal_error ("%s does not support make_forwarder_block.", cfg_hooks->name); fallthru = split_block_after_labels (bb); dummy = fallthru->src; bb = fallthru->dest; /* Redirect back edges we want to keep. */ for (e = dummy->pred; e; e = next_e) { next_e = e->pred_next; if (redirect_edge_p (e)) continue; dummy->frequency -= EDGE_FREQUENCY (e); dummy->count -= e->count; if (dummy->frequency < 0) dummy->frequency = 0; if (dummy->count < 0) dummy->count = 0; fallthru->count -= e->count; if (fallthru->count < 0) fallthru->count = 0; jump = redirect_edge_and_branch_force (e, bb); if (jump) new_bb_cbk (jump); } if (dom_computed[CDI_DOMINATORS] >= DOM_CONS_OK) { basic_block doms_to_fix[2]; doms_to_fix[0] = dummy; doms_to_fix[1] = bb; iterate_fix_dominators (CDI_DOMINATORS, doms_to_fix, 2); } cfg_hooks->make_forwarder_block (fallthru); return fallthru; } void tidy_fallthru_edge (edge e) { if (cfg_hooks->tidy_fallthru_edge) cfg_hooks->tidy_fallthru_edge (e); } /* Fix up edges that now fall through, or rather should now fall through but previously required a jump around now deleted blocks. Simplify the search by only examining blocks numerically adjacent, since this is how find_basic_blocks created them. */ void tidy_fallthru_edges (void) { basic_block b, c; if (!cfg_hooks->tidy_fallthru_edge) return; if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR) return; FOR_BB_BETWEEN (b, ENTRY_BLOCK_PTR->next_bb, EXIT_BLOCK_PTR->prev_bb, next_bb) { edge s; c = b->next_bb; /* We care about simple conditional or unconditional jumps with a single successor. If we had a conditional branch to the next instruction when find_basic_blocks was called, then there will only be one out edge for the block which ended with the conditional branch (since we do not create duplicate edges). Furthermore, the edge will be marked as a fallthru because we merge the flags for the duplicate edges. So we do not want to check that the edge is not a FALLTHRU edge. */ if ((s = b->succ) != NULL && ! (s->flags & EDGE_COMPLEX) && s->succ_next == NULL && s->dest == c && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)) tidy_fallthru_edge (s); } } /* Returns true if we can duplicate basic block BB. */ bool can_duplicate_block_p (basic_block bb) { edge e; if (!cfg_hooks->can_duplicate_block_p) internal_error ("%s does not support can_duplicate_block_p.", cfg_hooks->name); if (bb == EXIT_BLOCK_PTR || bb == ENTRY_BLOCK_PTR) return false; /* Duplicating fallthru block to exit would require adding a jump and splitting the real last BB. */ for (e = bb->succ; e; e = e->succ_next) if (e->dest == EXIT_BLOCK_PTR && e->flags & EDGE_FALLTHRU) return false; return cfg_hooks->can_duplicate_block_p (bb); } /* Duplicates basic block BB and redirects edge E to it. Returns the new basic block. */ basic_block duplicate_block (basic_block bb, edge e) { edge s, n; basic_block new_bb; gcov_type new_count = e ? e->count : 0; if (!cfg_hooks->duplicate_block) internal_error ("%s does not support duplicate_block.", cfg_hooks->name); if (bb->count < new_count) new_count = bb->count; if (!bb->pred) abort (); #ifdef ENABLE_CHECKING if (!can_duplicate_block_p (bb)) abort (); #endif new_bb = cfg_hooks->duplicate_block (bb); new_bb->loop_depth = bb->loop_depth; new_bb->flags = bb->flags; for (s = bb->succ; s; s = s->succ_next) { /* Since we are creating edges from a new block to successors of another block (which therefore are known to be disjoint), there is no need to actually check for duplicated edges. */ n = unchecked_make_edge (new_bb, s->dest, s->flags); n->probability = s->probability; if (e && bb->count) { /* Take care for overflows! */ n->count = s->count * (new_count * 10000 / bb->count) / 10000; s->count -= n->count; } else n->count = s->count; n->aux = s->aux; } if (e) { new_bb->count = new_count; bb->count -= new_count; new_bb->frequency = EDGE_FREQUENCY (e); bb->frequency -= EDGE_FREQUENCY (e); redirect_edge_and_branch_force (e, new_bb); if (bb->count < 0) bb->count = 0; if (bb->frequency < 0) bb->frequency = 0; } else { new_bb->count = bb->count; new_bb->frequency = bb->frequency; } new_bb->rbi->original = bb; bb->rbi->copy = new_bb; return new_bb; } /* Return 1 if BB ends with a call, possibly followed by some instructions that must stay with the call, 0 otherwise. */ bool block_ends_with_call_p (basic_block bb) { if (!cfg_hooks->block_ends_with_call_p) internal_error ("%s does not support block_ends_with_call_p", cfg_hooks->name); return (cfg_hooks->block_ends_with_call_p) (bb); } /* Return 1 if BB ends with a conditional branch, 0 otherwise. */ bool block_ends_with_condjump_p (basic_block bb) { if (!cfg_hooks->block_ends_with_condjump_p) internal_error ("%s does not support block_ends_with_condjump_p", cfg_hooks->name); return (cfg_hooks->block_ends_with_condjump_p) (bb); } /* Add fake edges to the function exit for any non constant and non noreturn calls, volatile inline assembly in the bitmap of blocks specified by BLOCKS or to the whole CFG if BLOCKS is zero. Return the number of blocks that were split. The goal is to expose cases in which entering a basic block does not imply that all subsequent instructions must be executed. */ int flow_call_edges_add (sbitmap blocks) { if (!cfg_hooks->flow_call_edges_add) internal_error ("%s does not support flow_call_edges_add", cfg_hooks->name); return (cfg_hooks->flow_call_edges_add) (blocks); } /* Perform branch target register load optimizations. Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Target register optimizations - these are performed after reload. */ typedef struct btr_def_group_s { struct btr_def_group_s *next; rtx src; struct btr_def_s *members; } *btr_def_group; typedef struct btr_user_s { struct btr_user_s *next; basic_block bb; int luid; rtx insn; /* If INSN has a single use of a single branch register, then USE points to it within INSN. If there is more than one branch register use, or the use is in some way ambiguous, then USE is NULL. */ rtx use; int n_reaching_defs; int first_reaching_def; char other_use_this_block; } *btr_user; /* btr_def structs appear on three lists: 1. A list of all btr_def structures (head is ALL_BTR_DEFS, linked by the NEXT field). 2. A list of branch reg definitions per basic block (head is BB_BTR_DEFS[i], linked by the NEXT_THIS_BB field). 3. A list of all branch reg definitions belonging to the same group (head is in a BTR_DEF_GROUP struct, linked by NEXT_THIS_GROUP field). */ typedef struct btr_def_s { struct btr_def_s *next_this_bb; struct btr_def_s *next_this_group; basic_block bb; int luid; rtx insn; int btr; int cost; /* For a branch register setting insn that has a constant source (i.e. a label), group links together all the insns with the same source. For other branch register setting insns, group is NULL. */ btr_def_group group; btr_user uses; /* If this def has a reaching use which is not a simple use in a branch instruction, then has_ambiguous_use will be true, and we will not attempt to migrate this definition. */ char has_ambiguous_use; /* live_range is an approximation to the true live range for this def/use web, because it records the set of blocks that contain the live range. There could be other live ranges for the same branch register in that set of blocks, either in the block containing the def (before the def), or in a block containing a use (after the use). If there are such other live ranges, then other_btr_uses_before_def or other_btr_uses_after_use must be set true as appropriate. */ char other_btr_uses_before_def; char other_btr_uses_after_use; bitmap live_range; } *btr_def; static int issue_rate; static int basic_block_freq (basic_block); static int insn_sets_btr_p (rtx, int, int *); static rtx *find_btr_use (rtx); static int btr_referenced_p (rtx, rtx *); static int find_btr_reference (rtx *, void *); static void find_btr_def_group (btr_def_group *, btr_def); static btr_def add_btr_def (fibheap_t, basic_block, int, rtx, unsigned int, int, btr_def_group *); static btr_user new_btr_user (basic_block, int, rtx); static void dump_hard_reg_set (HARD_REG_SET); static void dump_btrs_live (int); static void note_other_use_this_block (unsigned int, btr_user); static void compute_defs_uses_and_gen (fibheap_t, btr_def *,btr_user *, sbitmap *, sbitmap *, HARD_REG_SET *); static void compute_kill (sbitmap *, sbitmap *, HARD_REG_SET *); static void compute_out (sbitmap *bb_out, sbitmap *, sbitmap *, int); static void link_btr_uses (btr_def *, btr_user *, sbitmap *, sbitmap *, int); static void build_btr_def_use_webs (fibheap_t); static int block_at_edge_of_live_range_p (int, btr_def); static void clear_btr_from_live_range (btr_def def); static void add_btr_to_live_range (btr_def); static void augment_live_range (bitmap, HARD_REG_SET *, basic_block, basic_block); static int choose_btr (HARD_REG_SET); static void combine_btr_defs (btr_def, HARD_REG_SET *); static void btr_def_live_range (btr_def, HARD_REG_SET *); static void move_btr_def (basic_block, int, btr_def, bitmap, HARD_REG_SET *); static int migrate_btr_def (btr_def, int); static void migrate_btr_defs (enum reg_class, int); static int can_move_up (basic_block, rtx, int); static void note_btr_set (rtx, rtx, void *); /* The following code performs code motion of target load instructions (instructions that set branch target registers), to move them forward away from the branch instructions and out of loops (or, more generally, from a more frequently executed place to a less frequently executed place). Moving target load instructions further in front of the branch instruction that uses the target register value means that the hardware has a better chance of preloading the instructions at the branch target by the time the branch is reached. This avoids bubbles when a taken branch needs to flush out the pipeline. Moving target load instructions out of loops means they are executed less frequently. */ /* An obstack to hold the def-use web data structures built up for migrating branch target load instructions. */ static struct obstack migrate_btrl_obstack; /* Array indexed by basic block number, giving the set of registers live in that block. */ static HARD_REG_SET *btrs_live; /* Array indexed by basic block number, giving the set of registers live at the end of that block, including any uses by a final jump insn, if any. */ static HARD_REG_SET *btrs_live_at_end; /* Set of all target registers that we are willing to allocate. */ static HARD_REG_SET all_btrs; /* Provide lower and upper bounds for target register numbers, so that we don't need to search through all the hard registers all the time. */ static int first_btr, last_btr; /* Return an estimate of the frequency of execution of block bb. */ static int basic_block_freq (basic_block bb) { return bb->frequency; } static rtx *btr_reference_found; /* A subroutine of btr_referenced_p, called through for_each_rtx. PREG is a pointer to an rtx that is to be excluded from the traversal. If we find a reference to a target register anywhere else, return 1, and put a pointer to it into btr_reference_found. */ static int find_btr_reference (rtx *px, void *preg) { rtx x; int regno, i; if (px == preg) return -1; x = *px; if (!REG_P (x)) return 0; regno = REGNO (x); for (i = hard_regno_nregs[regno][GET_MODE (x)] - 1; i >= 0; i--) if (TEST_HARD_REG_BIT (all_btrs, regno+i)) { btr_reference_found = px; return 1; } return -1; } /* Return nonzero if X references (sets or reads) any branch target register. If EXCLUDEP is set, disregard any references within the rtx pointed to by it. If returning nonzero, also set btr_reference_found as above. */ static int btr_referenced_p (rtx x, rtx *excludep) { return for_each_rtx (&x, find_btr_reference, excludep); } /* Return true if insn is an instruction that sets a target register. if CHECK_CONST is true, only return true if the source is constant. If such a set is found and REGNO is nonzero, assign the register number of the destination register to *REGNO. */ static int insn_sets_btr_p (rtx insn, int check_const, int *regno) { rtx set; if (GET_CODE (insn) == INSN && (set = single_set (insn))) { rtx dest = SET_DEST (set); rtx src = SET_SRC (set); if (GET_CODE (dest) == SUBREG) dest = XEXP (dest, 0); if (REG_P (dest) && TEST_HARD_REG_BIT (all_btrs, REGNO (dest))) { if (btr_referenced_p (src, NULL)) abort(); if (!check_const || CONSTANT_P (src)) { if (regno) *regno = REGNO (dest); return 1; } } } return 0; } /* Find and return a use of a target register within an instruction INSN. */ static rtx * find_btr_use (rtx insn) { return btr_referenced_p (insn, NULL) ? btr_reference_found : NULL; } /* Find the group that the target register definition DEF belongs to in the list starting with *ALL_BTR_DEF_GROUPS. If no such group exists, create one. Add def to the group. */ static void find_btr_def_group (btr_def_group *all_btr_def_groups, btr_def def) { if (insn_sets_btr_p (def->insn, 1, NULL)) { btr_def_group this_group; rtx def_src = SET_SRC (single_set (def->insn)); /* ?? This linear search is an efficiency concern, particularly as the search will almost always fail to find a match. */ for (this_group = *all_btr_def_groups; this_group != NULL; this_group = this_group->next) if (rtx_equal_p (def_src, this_group->src)) break; if (!this_group) { this_group = obstack_alloc (&migrate_btrl_obstack, sizeof (struct btr_def_group_s)); this_group->src = def_src; this_group->members = NULL; this_group->next = *all_btr_def_groups; *all_btr_def_groups = this_group; } def->group = this_group; def->next_this_group = this_group->members; this_group->members = def; } else def->group = NULL; } /* Create a new target register definition structure, for a definition in block BB, instruction INSN, and insert it into ALL_BTR_DEFS. Return the new definition. */ static btr_def add_btr_def (fibheap_t all_btr_defs, basic_block bb, int insn_luid, rtx insn, unsigned int dest_reg, int other_btr_uses_before_def, btr_def_group *all_btr_def_groups) { btr_def this = obstack_alloc (&migrate_btrl_obstack, sizeof (struct btr_def_s)); this->bb = bb; this->luid = insn_luid; this->insn = insn; this->btr = dest_reg; this->cost = basic_block_freq (bb); this->has_ambiguous_use = 0; this->other_btr_uses_before_def = other_btr_uses_before_def; this->other_btr_uses_after_use = 0; this->next_this_bb = NULL; this->next_this_group = NULL; this->uses = NULL; this->live_range = NULL; find_btr_def_group (all_btr_def_groups, this); fibheap_insert (all_btr_defs, -this->cost, this); if (dump_file) fprintf (dump_file, "Found target reg definition: sets %u { bb %d, insn %d }%s priority %d\n", dest_reg, bb->index, INSN_UID (insn), (this->group ? "" : ":not const"), this->cost); return this; } /* Create a new target register user structure, for a use in block BB, instruction INSN. Return the new user. */ static btr_user new_btr_user (basic_block bb, int insn_luid, rtx insn) { /* This instruction reads target registers. We need to decide whether we can replace all target register uses easily. */ rtx *usep = find_btr_use (PATTERN (insn)); rtx use; btr_user user = NULL; if (usep) { int unambiguous_single_use; /* We want to ensure that USE is the only use of a target register in INSN, so that we know that to rewrite INSN to use a different target register, all we have to do is replace USE. */ unambiguous_single_use = !btr_referenced_p (PATTERN (insn), usep); if (!unambiguous_single_use) usep = NULL; } use = usep ? *usep : NULL_RTX; user = obstack_alloc (&migrate_btrl_obstack, sizeof (struct btr_user_s)); user->bb = bb; user->luid = insn_luid; user->insn = insn; user->use = use; user->other_use_this_block = 0; user->next = NULL; user->n_reaching_defs = 0; user->first_reaching_def = -1; if (dump_file) { fprintf (dump_file, "Uses target reg: { bb %d, insn %d }", bb->index, INSN_UID (insn)); if (user->use) fprintf (dump_file, ": unambiguous use of reg %d\n", REGNO (user->use)); } return user; } /* Write the contents of S to the dump file. */ static void dump_hard_reg_set (HARD_REG_SET s) { int reg; for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++) if (TEST_HARD_REG_BIT (s, reg)) fprintf (dump_file, " %d", reg); } /* Write the set of target regs live in block BB to the dump file. */ static void dump_btrs_live (int bb) { fprintf (dump_file, "BB%d live:", bb); dump_hard_reg_set (btrs_live[bb]); fprintf (dump_file, "\n"); } /* REGNO is the number of a branch target register that is being used or set. USERS_THIS_BB is a list of preceding branch target register users; If any of them use the same register, set their other_use_this_block flag. */ static void note_other_use_this_block (unsigned int regno, btr_user users_this_bb) { btr_user user; for (user = users_this_bb; user != NULL; user = user->next) if (user->use && REGNO (user->use) == regno) user->other_use_this_block = 1; } typedef struct { btr_user users_this_bb; HARD_REG_SET btrs_written_in_block; HARD_REG_SET btrs_live_in_block; sbitmap bb_gen; sbitmap *btr_defset; } defs_uses_info; /* Called via note_stores or directly to register stores into / clobbers of a branch target register DEST that are not recognized as straightforward definitions. DATA points to information about the current basic block that needs updating. */ static void note_btr_set (rtx dest, rtx set ATTRIBUTE_UNUSED, void *data) { defs_uses_info *info = data; int regno, end_regno; if (!REG_P (dest)) return; regno = REGNO (dest); end_regno = regno + hard_regno_nregs[regno][GET_MODE (dest)]; for (; regno < end_regno; regno++) if (TEST_HARD_REG_BIT (all_btrs, regno)) { note_other_use_this_block (regno, info->users_this_bb); SET_HARD_REG_BIT (info->btrs_written_in_block, regno); SET_HARD_REG_BIT (info->btrs_live_in_block, regno); sbitmap_difference (info->bb_gen, info->bb_gen, info->btr_defset[regno - first_btr]); } } static void compute_defs_uses_and_gen (fibheap_t all_btr_defs, btr_def *def_array, btr_user *use_array, sbitmap *btr_defset, sbitmap *bb_gen, HARD_REG_SET *btrs_written) { /* Scan the code building up the set of all defs and all uses. For each target register, build the set of defs of that register. For each block, calculate the set of target registers written in that block. Also calculate the set of btrs ever live in that block. */ int i; int insn_luid = 0; btr_def_group all_btr_def_groups = NULL; defs_uses_info info; sbitmap_vector_zero (bb_gen, n_basic_blocks); for (i = 0; i < n_basic_blocks; i++) { basic_block bb = BASIC_BLOCK (i); int reg; btr_def defs_this_bb = NULL; rtx insn; rtx last; int can_throw = 0; info.users_this_bb = NULL; info.bb_gen = bb_gen[i]; info.btr_defset = btr_defset; CLEAR_HARD_REG_SET (info.btrs_live_in_block); CLEAR_HARD_REG_SET (info.btrs_written_in_block); for (reg = first_btr; reg <= last_btr; reg++) if (TEST_HARD_REG_BIT (all_btrs, reg) && REGNO_REG_SET_P (bb->global_live_at_start, reg)) SET_HARD_REG_BIT (info.btrs_live_in_block, reg); for (insn = BB_HEAD (bb), last = NEXT_INSN (BB_END (bb)); insn != last; insn = NEXT_INSN (insn), insn_luid++) { if (INSN_P (insn)) { int regno; int insn_uid = INSN_UID (insn); if (insn_sets_btr_p (insn, 0, ®no)) { btr_def def = add_btr_def ( all_btr_defs, bb, insn_luid, insn, regno, TEST_HARD_REG_BIT (info.btrs_live_in_block, regno), &all_btr_def_groups); def_array[insn_uid] = def; SET_HARD_REG_BIT (info.btrs_written_in_block, regno); SET_HARD_REG_BIT (info.btrs_live_in_block, regno); sbitmap_difference (bb_gen[i], bb_gen[i], btr_defset[regno - first_btr]); SET_BIT (bb_gen[i], insn_uid); def->next_this_bb = defs_this_bb; defs_this_bb = def; SET_BIT (btr_defset[regno - first_btr], insn_uid); note_other_use_this_block (regno, info.users_this_bb); } else { if (btr_referenced_p (PATTERN (insn), NULL)) { btr_user user = new_btr_user (bb, insn_luid, insn); use_array[insn_uid] = user; if (user->use) SET_HARD_REG_BIT (info.btrs_live_in_block, REGNO (user->use)); else { int reg; for (reg = first_btr; reg <= last_btr; reg++) if (TEST_HARD_REG_BIT (all_btrs, reg) && refers_to_regno_p (reg, reg + 1, user->insn, NULL)) { note_other_use_this_block (reg, info.users_this_bb); SET_HARD_REG_BIT (info.btrs_live_in_block, reg); } note_stores (PATTERN (insn), note_btr_set, &info); } user->next = info.users_this_bb; info.users_this_bb = user; } if (GET_CODE (insn) == CALL_INSN) { HARD_REG_SET *clobbered = &call_used_reg_set; HARD_REG_SET call_saved; rtx pat = PATTERN (insn); int i; /* Check for sibcall. */ if (GET_CODE (pat) == PARALLEL) for (i = XVECLEN (pat, 0) - 1; i >= 0; i--) if (GET_CODE (XVECEXP (pat, 0, i)) == RETURN) { COMPL_HARD_REG_SET (call_saved, call_used_reg_set); clobbered = &call_saved; } for (regno = first_btr; regno <= last_btr; regno++) if (TEST_HARD_REG_BIT (*clobbered, regno)) note_btr_set (regno_reg_rtx[regno], NULL_RTX, &info); } } } } COPY_HARD_REG_SET (btrs_live[i], info.btrs_live_in_block); COPY_HARD_REG_SET (btrs_written[i], info.btrs_written_in_block); REG_SET_TO_HARD_REG_SET (btrs_live_at_end[i], bb->global_live_at_end); /* If this block ends in a jump insn, add any uses or even clobbers of branch target registers that it might have. */ for (insn = BB_END (bb); insn != BB_HEAD (bb) && ! INSN_P (insn); ) insn = PREV_INSN (insn); /* ??? for the fall-through edge, it would make sense to insert the btr set on the edge, but that would require to split the block early on so that we can distinguish between dominance from the fall through edge - which can use the call-clobbered registers - from dominance by the throw edge. */ if (can_throw_internal (insn)) { HARD_REG_SET tmp; COPY_HARD_REG_SET (tmp, call_used_reg_set); AND_HARD_REG_SET (tmp, all_btrs); IOR_HARD_REG_SET (btrs_live_at_end[i], tmp); can_throw = 1; } if (can_throw || GET_CODE (insn) == JUMP_INSN) { int regno; for (regno = first_btr; regno <= last_btr; regno++) if (refers_to_regno_p (regno, regno+1, insn, NULL)) SET_HARD_REG_BIT (btrs_live_at_end[i], regno); } if (dump_file) dump_btrs_live(i); } } static void compute_kill (sbitmap *bb_kill, sbitmap *btr_defset, HARD_REG_SET *btrs_written) { int i; int regno; /* For each basic block, form the set BB_KILL - the set of definitions that the block kills. */ sbitmap_vector_zero (bb_kill, n_basic_blocks); for (i = 0; i < n_basic_blocks; i++) { for (regno = first_btr; regno <= last_btr; regno++) if (TEST_HARD_REG_BIT (all_btrs, regno) && TEST_HARD_REG_BIT (btrs_written[i], regno)) sbitmap_a_or_b (bb_kill[i], bb_kill[i], btr_defset[regno - first_btr]); } } static void compute_out (sbitmap *bb_out, sbitmap *bb_gen, sbitmap *bb_kill, int max_uid) { /* Perform iterative dataflow: Initially, for all blocks, BB_OUT = BB_GEN. For each block, BB_IN = union over predecessors of BB_OUT(pred) BB_OUT = (BB_IN - BB_KILL) + BB_GEN Iterate until the bb_out sets stop growing. */ int i; int changed; sbitmap bb_in = sbitmap_alloc (max_uid); for (i = 0; i < n_basic_blocks; i++) sbitmap_copy (bb_out[i], bb_gen[i]); changed = 1; while (changed) { changed = 0; for (i = 0; i < n_basic_blocks; i++) { sbitmap_union_of_preds (bb_in, bb_out, i); changed |= sbitmap_union_of_diff_cg (bb_out[i], bb_gen[i], bb_in, bb_kill[i]); } } sbitmap_free (bb_in); } static void link_btr_uses (btr_def *def_array, btr_user *use_array, sbitmap *bb_out, sbitmap *btr_defset, int max_uid) { int i; sbitmap reaching_defs = sbitmap_alloc (max_uid); /* Link uses to the uses lists of all of their reaching defs. Count up the number of reaching defs of each use. */ for (i = 0; i < n_basic_blocks; i++) { basic_block bb = BASIC_BLOCK (i); rtx insn; rtx last; sbitmap_union_of_preds (reaching_defs, bb_out, i); for (insn = BB_HEAD (bb), last = NEXT_INSN (BB_END (bb)); insn != last; insn = NEXT_INSN (insn)) { if (INSN_P (insn)) { int insn_uid = INSN_UID (insn); btr_def def = def_array[insn_uid]; btr_user user = use_array[insn_uid]; if (def != NULL) { /* Remove all reaching defs of regno except for this one. */ sbitmap_difference (reaching_defs, reaching_defs, btr_defset[def->btr - first_btr]); SET_BIT(reaching_defs, insn_uid); } if (user != NULL) { /* Find all the reaching defs for this use. */ sbitmap reaching_defs_of_reg = sbitmap_alloc(max_uid); int uid; if (user->use) sbitmap_a_and_b ( reaching_defs_of_reg, reaching_defs, btr_defset[REGNO (user->use) - first_btr]); else { int reg; sbitmap_zero (reaching_defs_of_reg); for (reg = first_btr; reg <= last_btr; reg++) if (TEST_HARD_REG_BIT (all_btrs, reg) && refers_to_regno_p (reg, reg + 1, user->insn, NULL)) sbitmap_a_or_b_and_c (reaching_defs_of_reg, reaching_defs_of_reg, reaching_defs, btr_defset[reg - first_btr]); } EXECUTE_IF_SET_IN_SBITMAP (reaching_defs_of_reg, 0, uid, { btr_def def = def_array[uid]; /* We now know that def reaches user. */ if (dump_file) fprintf (dump_file, "Def in insn %d reaches use in insn %d\n", uid, insn_uid); user->n_reaching_defs++; if (!user->use) def->has_ambiguous_use = 1; if (user->first_reaching_def != -1) { /* There is more than one reaching def. This is a rare case, so just give up on this def/use web when it occurs. */ def->has_ambiguous_use = 1; def_array[user->first_reaching_def] ->has_ambiguous_use = 1; if (dump_file) fprintf (dump_file, "(use %d has multiple reaching defs)\n", insn_uid); } else user->first_reaching_def = uid; if (user->other_use_this_block) def->other_btr_uses_after_use = 1; user->next = def->uses; def->uses = user; }); sbitmap_free (reaching_defs_of_reg); } if (GET_CODE (insn) == CALL_INSN) { int regno; for (regno = first_btr; regno <= last_btr; regno++) if (TEST_HARD_REG_BIT (all_btrs, regno) && TEST_HARD_REG_BIT (call_used_reg_set, regno)) sbitmap_difference (reaching_defs, reaching_defs, btr_defset[regno - first_btr]); } } } } sbitmap_free (reaching_defs); } static void build_btr_def_use_webs (fibheap_t all_btr_defs) { const int max_uid = get_max_uid (); btr_def *def_array = xcalloc (max_uid, sizeof (btr_def)); btr_user *use_array = xcalloc (max_uid, sizeof (btr_user)); sbitmap *btr_defset = sbitmap_vector_alloc ( (last_btr - first_btr) + 1, max_uid); sbitmap *bb_gen = sbitmap_vector_alloc (n_basic_blocks, max_uid); HARD_REG_SET *btrs_written = xcalloc (n_basic_blocks, sizeof (HARD_REG_SET)); sbitmap *bb_kill; sbitmap *bb_out; sbitmap_vector_zero (btr_defset, (last_btr - first_btr) + 1); compute_defs_uses_and_gen (all_btr_defs, def_array, use_array, btr_defset, bb_gen, btrs_written); bb_kill = sbitmap_vector_alloc (n_basic_blocks, max_uid); compute_kill (bb_kill, btr_defset, btrs_written); free (btrs_written); bb_out = sbitmap_vector_alloc (n_basic_blocks, max_uid); compute_out (bb_out, bb_gen, bb_kill, max_uid); sbitmap_vector_free (bb_gen); sbitmap_vector_free (bb_kill); link_btr_uses (def_array, use_array, bb_out, btr_defset, max_uid); sbitmap_vector_free (bb_out); sbitmap_vector_free (btr_defset); free (use_array); free (def_array); } /* Return true if basic block BB contains the start or end of the live range of the definition DEF, AND there are other live ranges of the same target register that include BB. */ static int block_at_edge_of_live_range_p (int bb, btr_def def) { if (def->other_btr_uses_before_def && BASIC_BLOCK (bb) == def->bb) return 1; else if (def->other_btr_uses_after_use) { btr_user user; for (user = def->uses; user != NULL; user = user->next) if (BASIC_BLOCK (bb) == user->bb) return 1; } return 0; } /* We are removing the def/use web DEF. The target register used in this web is therefore no longer live in the live range of this web, so remove it from the live set of all basic blocks in the live range of the web. Blocks at the boundary of the live range may contain other live ranges for the same target register, so we have to be careful to remove the target register from the live set of these blocks only if they do not contain other live ranges for the same register. */ static void clear_btr_from_live_range (btr_def def) { int bb; EXECUTE_IF_SET_IN_BITMAP (def->live_range, 0, bb, { if ((!def->other_btr_uses_before_def && !def->other_btr_uses_after_use) || !block_at_edge_of_live_range_p (bb, def)) { CLEAR_HARD_REG_BIT (btrs_live[bb], def->btr); CLEAR_HARD_REG_BIT (btrs_live_at_end[bb], def->btr); if (dump_file) dump_btrs_live (bb); } }); } /* We are adding the def/use web DEF. Add the target register used in this web to the live set of all of the basic blocks that contain the live range of the web. */ static void add_btr_to_live_range (btr_def def) { int bb; EXECUTE_IF_SET_IN_BITMAP (def->live_range, 0, bb, { SET_HARD_REG_BIT (btrs_live[bb], def->btr); SET_HARD_REG_BIT (btrs_live_at_end[bb], def->btr); if (dump_file) dump_btrs_live (bb); }); } /* Update a live range to contain the basic block NEW_BLOCK, and all blocks on paths between the existing live range and NEW_BLOCK. HEAD is a block contained in the existing live range that dominates all other blocks in the existing live range. Also add to the set BTRS_LIVE_IN_RANGE all target registers that are live in the blocks that we add to the live range. It is a precondition that either NEW_BLOCK dominates HEAD,or HEAD dom NEW_BLOCK. This is used to speed up the implementation of this function. */ static void augment_live_range (bitmap live_range, HARD_REG_SET *btrs_live_in_range, basic_block head_bb, basic_block new_bb) { basic_block *worklist, *tos; tos = worklist = xmalloc (sizeof (basic_block) * (n_basic_blocks + 1)); if (dominated_by_p (CDI_DOMINATORS, new_bb, head_bb)) *tos++ = new_bb; else if (dominated_by_p (CDI_DOMINATORS, head_bb, new_bb)) { edge e; int new_block = new_bb->index; bitmap_set_bit (live_range, new_block); if (flag_btr_bb_exclusive) IOR_HARD_REG_SET (*btrs_live_in_range, btrs_live[new_block]); else { IOR_HARD_REG_SET (*btrs_live_in_range, btrs_live_at_end[new_block]); IOR_HARD_REG_SET (*btrs_live_in_range, btrs_live[head_bb->index]); } if (dump_file) { fprintf (dump_file, "Adding end of block %d and rest of %d to live range\n", new_block, head_bb->index); fprintf (dump_file,"Now live btrs are "); dump_hard_reg_set (*btrs_live_in_range); fprintf (dump_file, "\n"); } for (e = head_bb->pred; e; e = e->pred_next) *tos++ = e->src; } else abort(); while (tos != worklist) { basic_block bb = *--tos; if (!bitmap_bit_p (live_range, bb->index)) { edge e; bitmap_set_bit (live_range, bb->index); IOR_HARD_REG_SET (*btrs_live_in_range, btrs_live[bb->index]); if (dump_file) { fprintf (dump_file, "Adding block %d to live range\n", bb->index); fprintf (dump_file,"Now live btrs are "); dump_hard_reg_set (*btrs_live_in_range); fprintf (dump_file, "\n"); } for (e = bb->pred; e != NULL; e = e->pred_next) { basic_block pred = e->src; if (!bitmap_bit_p (live_range, pred->index)) *tos++ = pred; } } } free (worklist); } /* Return the most desirable target register that is not in the set USED_BTRS. */ static int choose_btr (HARD_REG_SET used_btrs) { int i; GO_IF_HARD_REG_SUBSET (all_btrs, used_btrs, give_up); for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) { #ifdef REG_ALLOC_ORDER int regno = reg_alloc_order[i]; #else int regno = i; #endif if (TEST_HARD_REG_BIT (all_btrs, regno) && !TEST_HARD_REG_BIT (used_btrs, regno)) return regno; } give_up: return -1; } /* Calculate the set of basic blocks that contain the live range of the def/use web DEF. Also calculate the set of target registers that are live at time in this live range, but ignore the live range represented by DEF when calculating this set. */ static void btr_def_live_range (btr_def def, HARD_REG_SET *btrs_live_in_range) { if (!def->live_range) { btr_user user; def->live_range = BITMAP_XMALLOC (); bitmap_set_bit (def->live_range, def->bb->index); if (flag_btr_bb_exclusive) COPY_HARD_REG_SET (*btrs_live_in_range, btrs_live[def->bb->index]); else COPY_HARD_REG_SET (*btrs_live_in_range, btrs_live_at_end[def->bb->index]); for (user = def->uses; user != NULL; user = user->next) augment_live_range (def->live_range, btrs_live_in_range, def->bb, user->bb); } else { /* def->live_range is accurate, but we need to recompute the set of target registers live over it, because migration of other PT instructions may have affected it. */ int bb; int def_bb = def->bb->index; CLEAR_HARD_REG_SET (*btrs_live_in_range); if (flag_btr_bb_exclusive) EXECUTE_IF_SET_IN_BITMAP (def->live_range, 0, bb, { IOR_HARD_REG_SET (*btrs_live_in_range, btrs_live[bb]); }); else EXECUTE_IF_SET_IN_BITMAP (def->live_range, 0, bb, { IOR_HARD_REG_SET (*btrs_live_in_range, (def_bb == bb ? btrs_live_at_end : btrs_live) [bb]); }); } if (!def->other_btr_uses_before_def && !def->other_btr_uses_after_use) CLEAR_HARD_REG_BIT (*btrs_live_in_range, def->btr); } /* Merge into the def/use web DEF any other def/use webs in the same group that are dominated by DEF, provided that there is a target register available to allocate to the merged web. */ static void combine_btr_defs (btr_def def, HARD_REG_SET *btrs_live_in_range) { btr_def other_def; for (other_def = def->group->members; other_def != NULL; other_def = other_def->next_this_group) { if (other_def != def && other_def->uses != NULL && ! other_def->has_ambiguous_use && dominated_by_p (CDI_DOMINATORS, other_def->bb, def->bb)) { /* def->bb dominates the other def, so def and other_def could be combined. */ /* Merge their live ranges, and get the set of target registers live over the merged range. */ int btr; HARD_REG_SET combined_btrs_live; bitmap combined_live_range = BITMAP_XMALLOC (); btr_user user; if (other_def->live_range == NULL) { HARD_REG_SET dummy_btrs_live_in_range; btr_def_live_range (other_def, &dummy_btrs_live_in_range); } COPY_HARD_REG_SET (combined_btrs_live, *btrs_live_in_range); bitmap_copy (combined_live_range, def->live_range); for (user = other_def->uses; user != NULL; user = user->next) augment_live_range (combined_live_range, &combined_btrs_live, def->bb, user->bb); btr = choose_btr (combined_btrs_live); if (btr != -1) { /* We can combine them. */ if (dump_file) fprintf (dump_file, "Combining def in insn %d with def in insn %d\n", INSN_UID (other_def->insn), INSN_UID (def->insn)); def->btr = btr; user = other_def->uses; while (user != NULL) { btr_user next = user->next; user->next = def->uses; def->uses = user; user = next; } /* Combining def/use webs can make target registers live after uses where they previously were not. This means some REG_DEAD notes may no longer be correct. We could be more precise about this if we looked at the combined live range, but here I just delete any REG_DEAD notes in case they are no longer correct. */ for (user = def->uses; user != NULL; user = user->next) remove_note (user->insn, find_regno_note (user->insn, REG_DEAD, REGNO (user->use))); clear_btr_from_live_range (other_def); other_def->uses = NULL; bitmap_copy (def->live_range, combined_live_range); if (other_def->other_btr_uses_after_use) def->other_btr_uses_after_use = 1; COPY_HARD_REG_SET (*btrs_live_in_range, combined_btrs_live); /* Delete the old target register initialization. */ delete_insn (other_def->insn); } BITMAP_XFREE (combined_live_range); } } } /* Move the definition DEF from its current position to basic block NEW_DEF_BB, and modify it to use branch target register BTR. Delete the old defining insn, and insert a new one in NEW_DEF_BB. Update all reaching uses of DEF in the RTL to use BTR. If this new position means that other defs in the same group can be combined with DEF then combine them. */ static void move_btr_def (basic_block new_def_bb, int btr, btr_def def, bitmap live_range, HARD_REG_SET *btrs_live_in_range) { /* We can move the instruction. Set a target register in block NEW_DEF_BB to the value needed for this target register definition. Replace all uses of the old target register definition by uses of the new definition. Delete the old definition. */ basic_block b = new_def_bb; rtx insp = BB_HEAD (b); rtx old_insn = def->insn; rtx src; rtx btr_rtx; rtx new_insn; enum machine_mode btr_mode; btr_user user; rtx set; if (dump_file) fprintf(dump_file, "migrating to basic block %d, using reg %d\n", new_def_bb->index, btr); clear_btr_from_live_range (def); def->btr = btr; def->bb = new_def_bb; def->luid = 0; def->cost = basic_block_freq (new_def_bb); def->other_btr_uses_before_def = TEST_HARD_REG_BIT (btrs_live[b->index], btr) ? 1 : 0; bitmap_copy (def->live_range, live_range); combine_btr_defs (def, btrs_live_in_range); btr = def->btr; add_btr_to_live_range (def); if (GET_CODE (insp) == CODE_LABEL) insp = NEXT_INSN (insp); /* N.B.: insp is expected to be NOTE_INSN_BASIC_BLOCK now. Some optimizations can result in insp being both first and last insn of its basic block. */ /* ?? some assertions to check that insp is sensible? */ if (def->other_btr_uses_before_def) { insp = BB_END (b); for (insp = BB_END (b); ! INSN_P (insp); insp = PREV_INSN (insp)) if (insp == BB_HEAD (b)) abort (); if (GET_CODE (insp) == JUMP_INSN || can_throw_internal (insp)) insp = PREV_INSN (insp); } set = single_set (old_insn); src = SET_SRC (set); btr_mode = GET_MODE (SET_DEST (set)); btr_rtx = gen_rtx_REG (btr_mode, btr); new_insn = gen_move_insn (btr_rtx, src); /* Insert target register initialization at head of basic block. */ def->insn = emit_insn_after (new_insn, insp); regs_ever_live[btr] = 1; if (dump_file) fprintf (dump_file, "New pt is insn %d, inserted after insn %d\n", INSN_UID (def->insn), INSN_UID (insp)); /* Delete the old target register initialization. */ delete_insn (old_insn); /* Replace each use of the old target register by a use of the new target register. */ for (user = def->uses; user != NULL; user = user->next) { /* Some extra work here to ensure consistent modes, because it seems that a target register REG rtx can be given a different mode depending on the context (surely that should not be the case?). */ rtx replacement_rtx; if (GET_MODE (user->use) == GET_MODE (btr_rtx) || GET_MODE (user->use) == VOIDmode) replacement_rtx = btr_rtx; else replacement_rtx = gen_rtx_REG (GET_MODE (user->use), btr); replace_rtx (user->insn, user->use, replacement_rtx); user->use = replacement_rtx; } } /* We anticipate intra-block scheduling to be done. See if INSN could move up within BB by N_INSNS. */ static int can_move_up (basic_block bb, rtx insn, int n_insns) { while (insn != BB_HEAD (bb) && n_insns > 0) { insn = PREV_INSN (insn); /* ??? What if we have an anti-dependency that actually prevents the scheduler from doing the move? We'd like to re-allocate the register, but not necessarily put the load into another basic block. */ if (INSN_P (insn)) n_insns--; } return n_insns <= 0; } /* Attempt to migrate the target register definition DEF to an earlier point in the flowgraph. It is a precondition of this function that DEF is migratable: i.e. it has a constant source, and all uses are unambiguous. Only migrations that reduce the cost of DEF will be made. MIN_COST is the lower bound on the cost of the DEF after migration. If we migrate DEF so that its cost falls below MIN_COST, then we do not attempt to migrate further. The idea is that we migrate definitions in a priority order based on their cost, when the cost of this definition falls below MIN_COST, then there is another definition with cost == MIN_COST which now has a higher priority than this definition. Return nonzero if there may be benefit from attempting to migrate this DEF further (i.e. we have reduced the cost below MIN_COST, but we may be able to reduce it further). Return zero if no further migration is possible. */ static int migrate_btr_def (btr_def def, int min_cost) { bitmap live_range; HARD_REG_SET btrs_live_in_range; int btr_used_near_def = 0; int def_basic_block_freq; basic_block try; int give_up = 0; int def_moved = 0; btr_user user; int def_latency = 1; if (dump_file) fprintf (dump_file, "Attempting to migrate pt from insn %d (cost = %d, min_cost = %d) ... ", INSN_UID (def->insn), def->cost, min_cost); if (!def->group || def->has_ambiguous_use) /* These defs are not migratable. */ { if (dump_file) fprintf (dump_file, "it's not migratable\n"); return 0; } if (!def->uses) /* We have combined this def with another in the same group, so no need to consider it further. */ { if (dump_file) fprintf (dump_file, "it's already combined with another pt\n"); return 0; } btr_def_live_range (def, &btrs_live_in_range); live_range = BITMAP_XMALLOC (); bitmap_copy (live_range, def->live_range); #ifdef INSN_SCHEDULING if (targetm.sched.use_dfa_pipeline_interface ()) def_latency = insn_default_latency (def->insn); else def_latency = result_ready_cost (def->insn); #endif def_latency *= issue_rate; for (user = def->uses; user != NULL; user = user->next) { if (user->bb == def->bb && user->luid > def->luid && (def->luid + def_latency) > user->luid && ! can_move_up (def->bb, def->insn, (def->luid + def_latency) - user->luid)) { btr_used_near_def = 1; break; } } def_basic_block_freq = basic_block_freq (def->bb); for (try = get_immediate_dominator (CDI_DOMINATORS, def->bb); !give_up && try && try != ENTRY_BLOCK_PTR && def->cost >= min_cost; try = get_immediate_dominator (CDI_DOMINATORS, try)) { /* Try to move the instruction that sets the target register into basic block TRY. */ int try_freq = basic_block_freq (try); if (dump_file) fprintf (dump_file, "trying block %d ...", try->index); if (try_freq < def_basic_block_freq || (try_freq == def_basic_block_freq && btr_used_near_def)) { int btr; augment_live_range (live_range, &btrs_live_in_range, def->bb, try); if (dump_file) { fprintf (dump_file, "Now btrs live in range are: "); dump_hard_reg_set (btrs_live_in_range); fprintf (dump_file, "\n"); } btr = choose_btr (btrs_live_in_range); if (btr != -1) { move_btr_def (try, btr, def, live_range, &btrs_live_in_range); bitmap_copy(live_range, def->live_range); btr_used_near_def = 0; def_moved = 1; def_basic_block_freq = basic_block_freq (def->bb); } else { /* There are no free target registers available to move this far forward, so give up */ give_up = 1; if (dump_file) fprintf (dump_file, "giving up because there are no free target registers\n"); } } } if (!def_moved) { give_up = 1; if (dump_file) fprintf (dump_file, "failed to move\n"); } BITMAP_XFREE (live_range); return !give_up; } /* Attempt to move instructions that set target registers earlier in the flowgraph, away from their corresponding uses. */ static void migrate_btr_defs (enum reg_class btr_class, int allow_callee_save) { fibheap_t all_btr_defs = fibheap_new (); int reg; gcc_obstack_init (&migrate_btrl_obstack); if (dump_file) { int i; for (i = 0; i < n_basic_blocks; i++) { basic_block bb = BASIC_BLOCK (i); fprintf(dump_file, "Basic block %d: count = " HOST_WIDEST_INT_PRINT_DEC " loop-depth = %d idom = %d\n", i, (HOST_WIDEST_INT) bb->count, bb->loop_depth, get_immediate_dominator (CDI_DOMINATORS, bb)->index); } } CLEAR_HARD_REG_SET (all_btrs); for (first_btr = -1, reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++) if (TEST_HARD_REG_BIT (reg_class_contents[(int) btr_class], reg) && (allow_callee_save || call_used_regs[reg] || regs_ever_live[reg])) { SET_HARD_REG_BIT (all_btrs, reg); last_btr = reg; if (first_btr < 0) first_btr = reg; } btrs_live = xcalloc (n_basic_blocks, sizeof (HARD_REG_SET)); btrs_live_at_end = xcalloc (n_basic_blocks, sizeof (HARD_REG_SET)); build_btr_def_use_webs (all_btr_defs); while (!fibheap_empty (all_btr_defs)) { btr_def def = fibheap_extract_min (all_btr_defs); int min_cost = -fibheap_min_key (all_btr_defs); if (migrate_btr_def (def, min_cost)) { fibheap_insert (all_btr_defs, -def->cost, (void *) def); if (dump_file) { fprintf (dump_file, "Putting insn %d back on queue with priority %d\n", INSN_UID (def->insn), def->cost); } } else { if (def->live_range) BITMAP_XFREE (def->live_range); } } free (btrs_live); free (btrs_live_at_end); obstack_free (&migrate_btrl_obstack, NULL); fibheap_delete (all_btr_defs); } void branch_target_load_optimize (bool after_prologue_epilogue_gen) { enum reg_class class = targetm.branch_target_register_class (); if (class != NO_REGS) { /* Initialize issue_rate. */ if (targetm.sched.issue_rate) issue_rate = targetm.sched.issue_rate (); else issue_rate = 1; /* Build the CFG for migrate_btr_defs. */ #if 1 /* This may or may not be needed, depending on where we run this phase. */ cleanup_cfg (optimize ? CLEANUP_EXPENSIVE : 0); #endif life_analysis (NULL, 0); /* Dominator info is also needed for migrate_btr_def. */ calculate_dominance_info (CDI_DOMINATORS); migrate_btr_defs (class, (targetm.branch_target_register_callee_saved (after_prologue_epilogue_gen))); free_dominance_info (CDI_DOMINATORS); update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES, PROP_DEATH_NOTES | PROP_REG_INFO); } } /* Various declarations for language-independent pretty-print subroutines. Copyright (C) 2003, 2004 Free Software Foundation, Inc. Contributed by Gabriel Dos Reis This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #undef FLOAT /* This is for hpux. They should change hpux. */ #undef FFS /* Some systems define this in param.h. */ /* A pointer to the formatted diagnostic message. */ #define pp_formatted_text_data(PP) \ ((const char *) obstack_base (&pp_base (PP)->buffer->obstack)) /* Format an integer given by va_arg (ARG, type-specifier T) where type-specifier is a precision modifier as indicated by PREC. F is a string used to construct the appropriate format-specifier. */ #define pp_integer_with_precision(PP, ARG, PREC, T, F) \ do \ switch (PREC) \ { \ case 0: \ pp_scalar (PP, "%" F, va_arg (ARG, T)); \ break; \ \ case 1: \ pp_scalar (PP, "%l" F, va_arg (ARG, long T)); \ break; \ \ case 2: \ pp_scalar (PP, "%ll" F, va_arg (ARG, long long T)); \ break; \ \ default: \ break; \ } \ while (0) /* Subroutine of pp_set_maximum_length. Set up PRETTY-PRINTER's internal maximum characters per line. */ static void pp_set_real_maximum_length (pretty_printer *pp) { /* If we're told not to wrap lines then do the obvious thing. In case we'll emit prefix only once per message, it is appropriate not to increase unnecessarily the line-length cut-off. */ if (!pp_is_wrapping_line (pp) || pp_prefixing_rule (pp) == DIAGNOSTICS_SHOW_PREFIX_ONCE || pp_prefixing_rule (pp) == DIAGNOSTICS_SHOW_PREFIX_NEVER) pp->maximum_length = pp_line_cutoff (pp); else { int prefix_length = pp->prefix ? strlen (pp->prefix) : 0; /* If the prefix is ridiculously too long, output at least 32 characters. */ if (pp_line_cutoff (pp) - prefix_length < 32) pp->maximum_length = pp_line_cutoff (pp) + 32; else pp->maximum_length = pp_line_cutoff (pp); } } /* Clear PRETTY-PRINTER's output state. */ static inline void pp_clear_state (pretty_printer *pp) { pp->emitted_prefix = false; pp_indentation (pp) = 0; } /* Flush the formatted text of PRETTY-PRINTER onto the attached stream. */ void pp_write_text_to_stream (pretty_printer *pp) { const char *text = pp_formatted_text (pp); fputs (text, pp->buffer->stream); pp_clear_output_area (pp); } /* Wrap a text delimited by START and END into PRETTY-PRINTER. */ static void pp_wrap_text (pretty_printer *pp, const char *start, const char *end) { bool wrapping_line = pp_is_wrapping_line (pp); while (start != end) { /* Dump anything bordered by whitespaces. */ { const char *p = start; while (p != end && !ISBLANK (*p) && *p != '\n') ++p; if (wrapping_line && p - start >= pp_remaining_character_count_for_line (pp)) pp_newline (pp); pp_append_text (pp, start, p); start = p; } if (start != end && ISBLANK (*start)) { pp_space (pp); ++start; } if (start != end && *start == '\n') { pp_newline (pp); ++start; } } } /* Same as pp_wrap_text but wrap text only when in line-wrapping mode. */ static inline void pp_maybe_wrap_text (pretty_printer *pp, const char *start, const char *end) { if (pp_is_wrapping_line (pp)) pp_wrap_text (pp, start, end); else pp_append_text (pp, start, end); } /* Append to the output area of PRETTY-PRINTER a string specified by its STARTing character and LENGTH. */ static inline void pp_append_r (pretty_printer *pp, const char *start, int length) { obstack_grow (&pp->buffer->obstack, start, length); pp->buffer->line_length += length; } /* Insert enough spaces into the output area of PRETTY-PRINTER to bring the column position to the current indentation level, assuming that a newline has just been written to the buffer. */ void pp_base_indent (pretty_printer *pp) { int n = pp_indentation (pp); int i; for (i = 0; i < n; ++i) pp_space (pp); } /* Format a message pointed to by TEXT. The following format specifiers are recognized as being client independent: %d, %i: (signed) integer in base ten. %u: unsigned integer in base ten. %o: unsigned integer in base eight. %x: unsigned integer in base sixteen. %ld, %li, %lo, %lu, %lx: long versions of the above. %lld, %lli, %llo, %llu, %llx: long long versions. %wd, %wi, %wo, %wu, %wx: HOST_WIDE_INT versions. %c: character. %s: string. %p: pointer. %m: strerror(text->err_no) - does not consume a value from args_ptr. %%: '%'. %<: opening quote. %>: closing quote. %': apostrophe (should only be used in untranslated messages; translations should use appropriate punctuation directly). %.*s: a substring the length of which is specified by an integer. %H: location_t. Flag 'q': quote formatted text (must come immediately after '%'). */ void pp_base_format_text (pretty_printer *pp, text_info *text) { for (; *text->format_spec; ++text->format_spec) { int precision = 0; bool wide = false; bool quoted = false; /* Ignore text. */ { const char *p = text->format_spec; while (*p && *p != '%') ++p; pp_wrap_text (pp, text->format_spec, p); text->format_spec = p; } if (*text->format_spec == '\0') break; /* We got a '%'. Check for 'q', then parse precision modifiers, if any. */ if (*++text->format_spec == 'q') { quoted = true; ++text->format_spec; } switch (*text->format_spec) { case 'w': wide = true; ++text->format_spec; break; case 'l': do ++precision; while (*++text->format_spec == 'l'); break; default: break; } /* We don't support precision beyond that of "long long". */ if (precision > 2) abort(); if (quoted) pp_string (pp, open_quote); switch (*text->format_spec) { case 'c': pp_character (pp, va_arg (*text->args_ptr, int)); break; case 'd': case 'i': if (wide) pp_wide_integer (pp, va_arg (*text->args_ptr, HOST_WIDE_INT)); else pp_integer_with_precision (pp, *text->args_ptr, precision, int, "d"); break; case 'o': if (wide) pp_scalar (pp, "%" HOST_WIDE_INT_PRINT "o", va_arg (*text->args_ptr, unsigned HOST_WIDE_INT)); else pp_integer_with_precision (pp, *text->args_ptr, precision, unsigned, "u"); break; case 's': pp_string (pp, va_arg (*text->args_ptr, const char *)); break; case 'p': pp_pointer (pp, va_arg (*text->args_ptr, void *)); break; case 'u': if (wide) pp_scalar (pp, HOST_WIDE_INT_PRINT_UNSIGNED, va_arg (*text->args_ptr, unsigned HOST_WIDE_INT)); else pp_integer_with_precision (pp, *text->args_ptr, precision, unsigned, "u"); break; case 'x': if (wide) pp_scalar (pp, HOST_WIDE_INT_PRINT_HEX, va_arg (*text->args_ptr, unsigned HOST_WIDE_INT)); else pp_integer_with_precision (pp, *text->args_ptr, precision, unsigned, "x"); break; case 'm': pp_string (pp, xstrerror (text->err_no)); break; case '%': pp_character (pp, '%'); break; case '<': pp_string (pp, open_quote); break; case '>': case '\'': pp_string (pp, close_quote); break; case 'H': { location_t *locus = va_arg (*text->args_ptr, location_t *); expanded_location s = expand_location (*locus); pp_string (pp, "file '"); pp_string (pp, s.file); pp_string (pp, "', line "); pp_decimal_int (pp, s.line); } break; case '.': { int n; const char *s; /* We handle no precision specifier but '%.*s'. */ if (*++text->format_spec != '*') abort (); else if (*++text->format_spec != 's') abort (); n = va_arg (*text->args_ptr, int); s = va_arg (*text->args_ptr, const char *); pp_append_text (pp, s, s + n); } break; default: if (!pp_format_decoder (pp) || !(*pp_format_decoder (pp)) (pp, text)) { /* Hmmm. The client failed to install a format translator but called us with an unrecognized format. Or, maybe, the translated string just contains an invalid format, or has formats in the wrong order. Sorry. */ abort (); } } if (quoted) pp_string (pp, close_quote); } } /* Helper subroutine of output_verbatim and verbatim. Do the appropriate settings needed by BUFFER for a verbatim formatting. */ void pp_base_format_verbatim (pretty_printer *pp, text_info *text) { diagnostic_prefixing_rule_t rule = pp_prefixing_rule (pp); int line_cutoff = pp_line_cutoff (pp); /* Set verbatim mode. */ pp->prefixing_rule = DIAGNOSTICS_SHOW_PREFIX_NEVER; pp_line_cutoff (pp) = 0; /* Do the actual formatting. */ pp_format_text (pp, text); /* Restore previous settings. */ pp_prefixing_rule (pp) = rule; pp_line_cutoff (pp) = line_cutoff; } /* Flush the content of BUFFER onto the attached stream. */ void pp_base_flush (pretty_printer *pp) { pp_write_text_to_stream (pp); pp_clear_state (pp); fputc ('\n', pp->buffer->stream); fflush (pp->buffer->stream); pp_needs_newline (pp) = false; } /* Sets the number of maximum characters per line PRETTY-PRINTER can output in line-wrapping mode. A LENGTH value 0 suppresses line-wrapping. */ void pp_base_set_line_maximum_length (pretty_printer *pp, int length) { pp_line_cutoff (pp) = length; pp_set_real_maximum_length (pp); } /* Clear PRETTY-PRINTER output area text info. */ void pp_base_clear_output_area (pretty_printer *pp) { obstack_free (&pp->buffer->obstack, obstack_base (&pp->buffer->obstack)); pp->buffer->line_length = 0; } /* Set PREFIX for PRETTY-PRINTER. */ void pp_base_set_prefix (pretty_printer *pp, const char *prefix) { pp->prefix = prefix; pp_set_real_maximum_length (pp); pp->emitted_prefix = false; pp_indentation (pp) = 0; } /* Free PRETTY-PRINTER's prefix, a previously malloc()'d string. */ void pp_base_destroy_prefix (pretty_printer *pp) { if (pp->prefix != NULL) { free ((char *) pp->prefix); pp->prefix = NULL; } } /* Write out PRETTY-PRINTER's prefix. */ void pp_base_emit_prefix (pretty_printer *pp) { if (pp->prefix != NULL) { switch (pp_prefixing_rule (pp)) { default: case DIAGNOSTICS_SHOW_PREFIX_NEVER: break; case DIAGNOSTICS_SHOW_PREFIX_ONCE: if (pp->emitted_prefix) { pp_base_indent (pp); break; } pp_indentation (pp) += 3; /* Fall through. */ case DIAGNOSTICS_SHOW_PREFIX_EVERY_LINE: { int prefix_length = strlen (pp->prefix); pp_append_r (pp, pp->prefix, prefix_length); pp->emitted_prefix = true; } break; } } } /* Construct a PRETTY-PRINTER with PREFIX and of MAXIMUM_LENGTH characters per line. */ void pp_construct (pretty_printer *pp, const char *prefix, int maximum_length) { memset (pp, 0, sizeof (pretty_printer)); pp->buffer = xcalloc (1, sizeof (output_buffer)); obstack_init (&pp->buffer->obstack); pp->buffer->stream = stderr; pp_line_cutoff (pp) = maximum_length; pp_prefixing_rule (pp) = DIAGNOSTICS_SHOW_PREFIX_ONCE; pp_set_prefix (pp, prefix); } /* Append a string delimited by START and END to the output area of PRETTY-PRINTER. No line wrapping is done. However, if beginning a new line then emit PRETTY-PRINTER's prefix and skip any leading whitespace if appropriate. The caller must ensure that it is safe to do so. */ void pp_base_append_text (pretty_printer *pp, const char *start, const char *end) { /* Emit prefix and skip whitespace if we're starting a new line. */ if (pp->buffer->line_length == 0) { pp_emit_prefix (pp); if (pp_is_wrapping_line (pp)) while (start != end && *start == ' ') ++start; } pp_append_r (pp, start, end - start); } /* Finishes constructing a NULL-terminated character string representing the PRETTY-PRINTED text. */ const char * pp_base_formatted_text (pretty_printer *pp) { obstack_1grow (&pp->buffer->obstack, '\0'); return pp_formatted_text_data (pp); } /* Return a pointer to the last character emitted in PRETTY-PRINTER's output area. A NULL pointer means no character available. */ const char * pp_base_last_position_in_text (const pretty_printer *pp) { const char *p = NULL; struct obstack *text = &pp->buffer->obstack; if (obstack_base (text) != obstack_next_free (text)) p = ((const char *) obstack_next_free (text)) - 1; return p; } /* Return the amount of characters PRETTY-PRINTER can accept to make a full line. Meaningful only in line-wrapping mode. */ int pp_base_remaining_character_count_for_line (pretty_printer *pp) { return pp->maximum_length - pp->buffer->line_length; } /* Format a message into BUFFER a la printf. */ void pp_printf (pretty_printer *pp, const char *msg, ...) { text_info text; va_list ap; va_start (ap, msg); text.err_no = errno; text.args_ptr = ≈ text.format_spec = msg; pp_format_text (pp, &text); va_end (ap); } /* Output MESSAGE verbatim into BUFFER. */ void pp_verbatim (pretty_printer *pp, const char *msg, ...) { text_info text; va_list ap; va_start (ap, msg); text.err_no = errno; text.args_ptr = ≈ text.format_spec = msg; pp_format_verbatim (pp, &text); va_end (ap); } /* Have PRETTY-PRINTER start a new line. */ void pp_base_newline (pretty_printer *pp) { obstack_1grow (&pp->buffer->obstack, '\n'); pp->buffer->line_length = 0; } /* Have PRETTY-PRINTER add a CHARACTER. */ void pp_base_character (pretty_printer *pp, int c) { if (pp_is_wrapping_line (pp) && pp_remaining_character_count_for_line (pp) <= 0) { pp_newline (pp); if (ISSPACE (c)) return; } obstack_1grow (&pp->buffer->obstack, c); ++pp->buffer->line_length; } /* Append a STRING to the output area of PRETTY-PRINTER; the STRING may be line-wrapped if in appropriate mode. */ void pp_base_string (pretty_printer *pp, const char *str) { pp_maybe_wrap_text (pp, str, str + (str ? strlen (str) : 0)); } /* Maybe print out a whitespace if needed. */ void pp_base_maybe_space (pretty_printer *pp) { if (pp_base (pp)->padding != pp_none) { pp_space (pp); pp_base (pp)->padding = pp_none; } } /* "Bag-of-pages" garbage collector for the GNU compiler. Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Avoid #ifdef:s when we can help it. */ #define VALGRIND_DISCARD(x) /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a file open. Prefer either to valloc. */ #ifdef HAVE_MMAP_ANON # undef HAVE_MMAP_DEV_ZERO # include # ifndef MAP_FAILED # define MAP_FAILED -1 # endif # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON) # define MAP_ANONYMOUS MAP_ANON # endif # define USING_MMAP #endif #ifdef HAVE_MMAP_DEV_ZERO # include # ifndef MAP_FAILED # define MAP_FAILED -1 # endif # define USING_MMAP #endif #ifndef USING_MMAP #define USING_MALLOC_PAGE_GROUPS #endif /* Stategy: This garbage-collecting allocator allocates objects on one of a set of pages. Each page can allocate objects of a single size only; available sizes are powers of two starting at four bytes. The size of an allocation request is rounded up to the next power of two (`order'), and satisfied from the appropriate page. Each page is recorded in a page-entry, which also maintains an in-use bitmap of object positions on the page. This allows the allocation state of a particular object to be flipped without touching the page itself. Each page-entry also has a context depth, which is used to track pushing and popping of allocation contexts. Only objects allocated in the current (highest-numbered) context may be collected. Page entries are arranged in an array of singly-linked lists. The array is indexed by the allocation size, in bits, of the pages on it; i.e. all pages on a list allocate objects of the same size. Pages are ordered on the list such that all non-full pages precede all full pages, with non-full pages arranged in order of decreasing context depth. Empty pages (of all orders) are kept on a single page cache list, and are considered first when new pages are required; they are deallocated at the start of the next collection if they haven't been recycled by then. */ /* Define GGC_DEBUG_LEVEL to print debugging information. 0: No debugging output. 1: GC statistics only. 2: Page-entry allocations/deallocations as well. 3: Object allocations as well. 4: Object marks as well. */ #define GGC_DEBUG_LEVEL (0) #ifndef HOST_BITS_PER_PTR #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG #endif /* A two-level tree is used to look up the page-entry for a given pointer. Two chunks of the pointer's bits are extracted to index the first and second levels of the tree, as follows: HOST_PAGE_SIZE_BITS 32 | | msb +----------------+----+------+------+ lsb | | | PAGE_L1_BITS | | | PAGE_L2_BITS The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry pages are aligned on system page boundaries. The next most significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first index values in the lookup table, respectively. For 32-bit architectures and the settings below, there are no leftover bits. For architectures with wider pointers, the lookup tree points to a list of pages, which must be scanned to find the correct one. */ #define PAGE_L1_BITS (8) #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize) #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS) #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS) #define LOOKUP_L1(p) \ (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1)) #define LOOKUP_L2(p) \ (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1)) /* The number of objects per allocation page, for objects on a page of the indicated ORDER. */ #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER] /* The number of objects in P. */ #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order)) /* The size of an object on a page of the indicated ORDER. */ #define OBJECT_SIZE(ORDER) object_size_table[ORDER] /* For speed, we avoid doing a general integer divide to locate the offset in the allocation bitmap, by precalculating numbers M, S such that (O * M) >> S == O / Z (modulo 2^32), for any offset O within the page which is evenly divisible by the object size Z. */ #define DIV_MULT(ORDER) inverse_table[ORDER].mult #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift #define OFFSET_TO_BIT(OFFSET, ORDER) \ (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER)) /* The number of extra orders, not corresponding to power-of-two sized objects. */ #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table) #define RTL_SIZE(NSLOTS) \ (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion)) #define TREE_EXP_SIZE(OPS) \ (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree)) /* The Ith entry is the maximum size of an object to be stored in the Ith extra order. Adding a new entry to this array is the *only* thing you need to do to add a new special allocation size. */ static const size_t extra_order_size_table[] = { sizeof (struct tree_decl), sizeof (struct tree_list), TREE_EXP_SIZE (2), RTL_SIZE (2), /* MEM, PLUS, etc. */ RTL_SIZE (9), /* INSN */ }; /* The total number of orders. */ #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS) /* We use this structure to determine the alignment required for allocations. For power-of-two sized allocations, that's not a problem, but it does matter for odd-sized allocations. */ struct max_alignment { char c; union { HOST_WIDEST_INT i; long double d; } u; }; /* The biggest alignment required. */ #define MAX_ALIGNMENT (offsetof (struct max_alignment, u)) /* Compute the smallest nonnegative number which when added to X gives a multiple of F. */ #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f)) /* Compute the smallest multiple of F that is >= X. */ #define ROUND_UP(x, f) (CEIL (x, f) * (f)) /* The Ith entry is the number of objects on a page or order I. */ static unsigned objects_per_page_table[NUM_ORDERS]; /* The Ith entry is the size of an object on a page of order I. */ static size_t object_size_table[NUM_ORDERS]; /* The Ith entry is a pair of numbers (mult, shift) such that ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32, for all k evenly divisible by OBJECT_SIZE(I). */ static struct { size_t mult; unsigned int shift; } inverse_table[NUM_ORDERS]; /* A page_entry records the status of an allocation page. This structure is dynamically sized to fit the bitmap in_use_p. */ typedef struct page_entry { /* The next page-entry with objects of the same size, or NULL if this is the last page-entry. */ struct page_entry *next; /* The previous page-entry with objects of the same size, or NULL if this is the first page-entry. The PREV pointer exists solely to keep the cost of ggc_free manageable. */ struct page_entry *prev; /* The number of bytes allocated. (This will always be a multiple of the host system page size.) */ size_t bytes; /* The address at which the memory is allocated. */ char *page; #ifdef USING_MALLOC_PAGE_GROUPS /* Back pointer to the page group this page came from. */ struct page_group *group; #endif /* This is the index in the by_depth varray where this page table can be found. */ unsigned long index_by_depth; /* Context depth of this page. */ unsigned short context_depth; /* The number of free objects remaining on this page. */ unsigned short num_free_objects; /* A likely candidate for the bit position of a free object for the next allocation from this page. */ unsigned short next_bit_hint; /* The lg of size of objects allocated from this page. */ unsigned char order; /* A bit vector indicating whether or not objects are in use. The Nth bit is one if the Nth object on this page is allocated. This array is dynamically sized. */ unsigned long in_use_p[1]; } page_entry; #ifdef USING_MALLOC_PAGE_GROUPS /* A page_group describes a large allocation from malloc, from which we parcel out aligned pages. */ typedef struct page_group { /* A linked list of all extant page groups. */ struct page_group *next; /* The address we received from malloc. */ char *allocation; /* The size of the block. */ size_t alloc_size; /* A bitmask of pages in use. */ unsigned int in_use; } page_group; #endif #if HOST_BITS_PER_PTR <= 32 /* On 32-bit hosts, we use a two level page table, as pictured above. */ typedef page_entry **page_table[PAGE_L1_SIZE]; #else /* On 64-bit hosts, we use the same two level page tables plus a linked list that disambiguates the top 32-bits. There will almost always be exactly one entry in the list. */ typedef struct page_table_chain { struct page_table_chain *next; size_t high_bits; page_entry **table[PAGE_L1_SIZE]; } *page_table; #endif /* The rest of the global variables. */ static struct globals { /* The Nth element in this array is a page with objects of size 2^N. If there are any pages with free objects, they will be at the head of the list. NULL if there are no page-entries for this object size. */ page_entry *pages[NUM_ORDERS]; /* The Nth element in this array is the last page with objects of size 2^N. NULL if there are no page-entries for this object size. */ page_entry *page_tails[NUM_ORDERS]; /* Lookup table for associating allocation pages with object addresses. */ page_table lookup; /* The system's page size. */ size_t pagesize; size_t lg_pagesize; /* Bytes currently allocated. */ size_t allocated; /* Bytes currently allocated at the end of the last collection. */ size_t allocated_last_gc; /* Total amount of memory mapped. */ size_t bytes_mapped; /* Bit N set if any allocations have been done at context depth N. */ unsigned long context_depth_allocations; /* Bit N set if any collections have been done at context depth N. */ unsigned long context_depth_collections; /* The current depth in the context stack. */ unsigned short context_depth; /* A file descriptor open to /dev/zero for reading. */ #if defined (HAVE_MMAP_DEV_ZERO) int dev_zero_fd; #endif /* A cache of free system pages. */ page_entry *free_pages; #ifdef USING_MALLOC_PAGE_GROUPS page_group *page_groups; #endif /* The file descriptor for debugging output. */ FILE *debug_file; /* Current number of elements in use in depth below. */ unsigned int depth_in_use; /* Maximum number of elements that can be used before resizing. */ unsigned int depth_max; /* Each element of this arry is an index in by_depth where the given depth starts. This structure is indexed by that given depth we are interested in. */ unsigned int *depth; /* Current number of elements in use in by_depth below. */ unsigned int by_depth_in_use; /* Maximum number of elements that can be used before resizing. */ unsigned int by_depth_max; /* Each element of this array is a pointer to a page_entry, all page_entries can be found in here by increasing depth. index_by_depth in the page_entry is the index into this data structure where that page_entry can be found. This is used to speed up finding all page_entries at a particular depth. */ page_entry **by_depth; /* Each element is a pointer to the saved in_use_p bits, if any, zero otherwise. We allocate them all together, to enable a better runtime data access pattern. */ unsigned long **save_in_use; #ifdef ENABLE_GC_ALWAYS_COLLECT /* List of free objects to be verified as actually free on the next collection. */ struct free_object { void *object; struct free_object *next; } *free_object_list; #endif #ifdef GATHER_STATISTICS struct { /* Total memory allocated with ggc_alloc. */ unsigned long long total_allocated; /* Total overhead for memory to be allocated with ggc_alloc. */ unsigned long long total_overhead; /* Total allocations and overhead for sizes less than 32, 64 and 128. These sizes are interesting because they are typical cache line sizes. */ unsigned long long total_allocated_under32; unsigned long long total_overhead_under32; unsigned long long total_allocated_under64; unsigned long long total_overhead_under64; unsigned long long total_allocated_under128; unsigned long long total_overhead_under128; /* The allocations for each of the allocation orders. */ unsigned long long total_allocated_per_order[NUM_ORDERS]; /* The overhead for each of the allocation orders. */ unsigned long long total_overhead_per_order[NUM_ORDERS]; } stats; #endif } G; /* The size in bytes required to maintain a bitmap for the objects on a page-entry. */ #define BITMAP_SIZE(Num_objects) \ (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long)) /* Allocate pages in chunks of this size, to throttle calls to memory allocation routines. The first page is used, the rest go onto the free list. This cannot be larger than HOST_BITS_PER_INT for the in_use bitmask for page_group. */ #define GGC_QUIRE_SIZE 16 /* Initial guess as to how many page table entries we might need. */ #define INITIAL_PTE_COUNT 128 static int ggc_allocated_p (const void *); static page_entry *lookup_page_table_entry (const void *); static void set_page_table_entry (void *, page_entry *); #ifdef USING_MMAP static char *alloc_anon (char *, size_t); #endif #ifdef USING_MALLOC_PAGE_GROUPS static size_t page_group_index (char *, char *); static void set_page_group_in_use (page_group *, char *); static void clear_page_group_in_use (page_group *, char *); #endif static struct page_entry * alloc_page (unsigned); static void free_page (struct page_entry *); static void release_pages (void); static void clear_marks (void); static void sweep_pages (void); static void ggc_recalculate_in_use_p (page_entry *); static void compute_inverse (unsigned); static inline void adjust_depth (void); static void move_ptes_to_front (int, int); void debug_print_page_list (int); static void push_depth (unsigned int); static void push_by_depth (page_entry *, unsigned long *); struct alloc_zone *rtl_zone = NULL; struct alloc_zone *tree_zone = NULL; struct alloc_zone *garbage_zone = NULL; /* Push an entry onto G.depth. */ inline static void push_depth (unsigned int i) { if (G.depth_in_use >= G.depth_max) { G.depth_max *= 2; G.depth = xrealloc (G.depth, G.depth_max * sizeof (unsigned int)); } G.depth[G.depth_in_use++] = i; } /* Push an entry onto G.by_depth and G.save_in_use. */ inline static void push_by_depth (page_entry *p, unsigned long *s) { if (G.by_depth_in_use >= G.by_depth_max) { G.by_depth_max *= 2; G.by_depth = xrealloc (G.by_depth, G.by_depth_max * sizeof (page_entry *)); G.save_in_use = xrealloc (G.save_in_use, G.by_depth_max * sizeof (unsigned long *)); } G.by_depth[G.by_depth_in_use] = p; G.save_in_use[G.by_depth_in_use++] = s; } #if (GCC_VERSION < 3001) #define prefetch(X) ((void) X) #else #define prefetch(X) __builtin_prefetch (X) #endif #define save_in_use_p_i(__i) \ (G.save_in_use[__i]) #define save_in_use_p(__p) \ (save_in_use_p_i (__p->index_by_depth)) /* Returns nonzero if P was allocated in GC'able memory. */ static inline int ggc_allocated_p (const void *p) { page_entry ***base; size_t L1, L2; #if HOST_BITS_PER_PTR <= 32 base = &G.lookup[0]; #else page_table table = G.lookup; size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff; while (1) { if (table == NULL) return 0; if (table->high_bits == high_bits) break; table = table->next; } base = &table->table[0]; #endif /* Extract the level 1 and 2 indices. */ L1 = LOOKUP_L1 (p); L2 = LOOKUP_L2 (p); return base[L1] && base[L1][L2]; } /* Traverse the page table and find the entry for a page. Die (probably) if the object wasn't allocated via GC. */ static inline page_entry * lookup_page_table_entry (const void *p) { page_entry ***base; size_t L1, L2; #if HOST_BITS_PER_PTR <= 32 base = &G.lookup[0]; #else page_table table = G.lookup; size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff; while (table->high_bits != high_bits) table = table->next; base = &table->table[0]; #endif /* Extract the level 1 and 2 indices. */ L1 = LOOKUP_L1 (p); L2 = LOOKUP_L2 (p); return base[L1][L2]; } /* Set the page table entry for a page. */ static void set_page_table_entry (void *p, page_entry *entry) { page_entry ***base; size_t L1, L2; #if HOST_BITS_PER_PTR <= 32 base = &G.lookup[0]; #else page_table table; size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff; for (table = G.lookup; table; table = table->next) if (table->high_bits == high_bits) goto found; /* Not found -- allocate a new table. */ table = xcalloc (1, sizeof(*table)); table->next = G.lookup; table->high_bits = high_bits; G.lookup = table; found: base = &table->table[0]; #endif /* Extract the level 1 and 2 indices. */ L1 = LOOKUP_L1 (p); L2 = LOOKUP_L2 (p); if (base[L1] == NULL) base[L1] = xcalloc (PAGE_L2_SIZE, sizeof (page_entry *)); base[L1][L2] = entry; } /* Prints the page-entry for object size ORDER, for debugging. */ void debug_print_page_list (int order) { page_entry *p; printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order], (void *) G.page_tails[order]); p = G.pages[order]; while (p != NULL) { printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth, p->num_free_objects); p = p->next; } printf ("NULL\n"); fflush (stdout); } #ifdef USING_MMAP /* Allocate SIZE bytes of anonymous memory, preferably near PREF, (if non-null). The ifdef structure here is intended to cause a compile error unless exactly one of the HAVE_* is defined. */ static inline char * alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size) { #ifdef HAVE_MMAP_ANON char *page = mmap (pref, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); #endif #ifdef HAVE_MMAP_DEV_ZERO char *page = mmap (pref, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, G.dev_zero_fd, 0); #endif if (page == (char *) MAP_FAILED) { perror ("virtual memory exhausted"); exit (FATAL_EXIT_CODE); } /* Remember that we allocated this memory. */ G.bytes_mapped += size; /* Pretend we don't have access to the allocated pages. We'll enable access to smaller pieces of the area in ggc_alloc. Discard the handle to avoid handle leak. */ VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size)); return page; } #endif #ifdef USING_MALLOC_PAGE_GROUPS /* Compute the index for this page into the page group. */ static inline size_t page_group_index (char *allocation, char *page) { return (size_t) (page - allocation) >> G.lg_pagesize; } /* Set and clear the in_use bit for this page in the page group. */ static inline void set_page_group_in_use (page_group *group, char *page) { group->in_use |= 1 << page_group_index (group->allocation, page); } static inline void clear_page_group_in_use (page_group *group, char *page) { group->in_use &= ~(1 << page_group_index (group->allocation, page)); } #endif /* Allocate a new page for allocating objects of size 2^ORDER, and return an entry for it. The entry is not added to the appropriate page_table list. */ static inline struct page_entry * alloc_page (unsigned order) { struct page_entry *entry, *p, **pp; char *page; size_t num_objects; size_t bitmap_size; size_t page_entry_size; size_t entry_size; #ifdef USING_MALLOC_PAGE_GROUPS page_group *group; #endif num_objects = OBJECTS_PER_PAGE (order); bitmap_size = BITMAP_SIZE (num_objects + 1); page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size; entry_size = num_objects * OBJECT_SIZE (order); if (entry_size < G.pagesize) entry_size = G.pagesize; entry = NULL; page = NULL; /* Check the list of free pages for one we can use. */ for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp) if (p->bytes == entry_size) break; if (p != NULL) { /* Recycle the allocated memory from this page ... */ *pp = p->next; page = p->page; #ifdef USING_MALLOC_PAGE_GROUPS group = p->group; #endif /* ... and, if possible, the page entry itself. */ if (p->order == order) { entry = p; memset (entry, 0, page_entry_size); } else free (p); } #ifdef USING_MMAP else if (entry_size == G.pagesize) { /* We want just one page. Allocate a bunch of them and put the extras on the freelist. (Can only do this optimization with mmap for backing store.) */ struct page_entry *e, *f = G.free_pages; int i; page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE); /* This loop counts down so that the chain will be in ascending memory order. */ for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--) { e = xcalloc (1, page_entry_size); e->order = order; e->bytes = G.pagesize; e->page = page + (i << G.lg_pagesize); e->next = f; f = e; } G.free_pages = f; } else page = alloc_anon (NULL, entry_size); #endif #ifdef USING_MALLOC_PAGE_GROUPS else { /* Allocate a large block of memory and serve out the aligned pages therein. This results in much less memory wastage than the traditional implementation of valloc. */ char *allocation, *a, *enda; size_t alloc_size, head_slop, tail_slop; int multiple_pages = (entry_size == G.pagesize); if (multiple_pages) alloc_size = GGC_QUIRE_SIZE * G.pagesize; else alloc_size = entry_size + G.pagesize - 1; allocation = xmalloc (alloc_size); page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize); head_slop = page - allocation; if (multiple_pages) tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1); else tail_slop = alloc_size - entry_size - head_slop; enda = allocation + alloc_size - tail_slop; /* We allocated N pages, which are likely not aligned, leaving us with N-1 usable pages. We plan to place the page_group structure somewhere in the slop. */ if (head_slop >= sizeof (page_group)) group = (page_group *)page - 1; else { /* We magically got an aligned allocation. Too bad, we have to waste a page anyway. */ if (tail_slop == 0) { enda -= G.pagesize; tail_slop += G.pagesize; } if (tail_slop < sizeof (page_group)) abort (); group = (page_group *)enda; tail_slop -= sizeof (page_group); } /* Remember that we allocated this memory. */ group->next = G.page_groups; group->allocation = allocation; group->alloc_size = alloc_size; group->in_use = 0; G.page_groups = group; G.bytes_mapped += alloc_size; /* If we allocated multiple pages, put the rest on the free list. */ if (multiple_pages) { struct page_entry *e, *f = G.free_pages; for (a = enda - G.pagesize; a != page; a -= G.pagesize) { e = xcalloc (1, page_entry_size); e->order = order; e->bytes = G.pagesize; e->page = a; e->group = group; e->next = f; f = e; } G.free_pages = f; } } #endif if (entry == NULL) entry = xcalloc (1, page_entry_size); entry->bytes = entry_size; entry->page = page; entry->context_depth = G.context_depth; entry->order = order; entry->num_free_objects = num_objects; entry->next_bit_hint = 1; G.context_depth_allocations |= (unsigned long)1 << G.context_depth; #ifdef USING_MALLOC_PAGE_GROUPS entry->group = group; set_page_group_in_use (group, page); #endif /* Set the one-past-the-end in-use bit. This acts as a sentry as we increment the hint. */ entry->in_use_p[num_objects / HOST_BITS_PER_LONG] = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG); set_page_table_entry (page, entry); if (GGC_DEBUG_LEVEL >= 2) fprintf (G.debug_file, "Allocating page at %p, object size=%lu, data %p-%p\n", (void *) entry, (unsigned long) OBJECT_SIZE (order), page, page + entry_size - 1); return entry; } /* Adjust the size of G.depth so that no index greater than the one used by the top of the G.by_depth is used. */ static inline void adjust_depth (void) { page_entry *top; if (G.by_depth_in_use) { top = G.by_depth[G.by_depth_in_use-1]; /* Peel back indices in depth that index into by_depth, so that as new elements are added to by_depth, we note the indices of those elements, if they are for new context depths. */ while (G.depth_in_use > (size_t)top->context_depth+1) --G.depth_in_use; } } /* For a page that is no longer needed, put it on the free page list. */ static void free_page (page_entry *entry) { if (GGC_DEBUG_LEVEL >= 2) fprintf (G.debug_file, "Deallocating page at %p, data %p-%p\n", (void *) entry, entry->page, entry->page + entry->bytes - 1); /* Mark the page as inaccessible. Discard the handle to avoid handle leak. */ VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes)); set_page_table_entry (entry->page, NULL); #ifdef USING_MALLOC_PAGE_GROUPS clear_page_group_in_use (entry->group, entry->page); #endif if (G.by_depth_in_use > 1) { page_entry *top = G.by_depth[G.by_depth_in_use-1]; /* If they are at the same depth, put top element into freed slot. */ if (entry->context_depth == top->context_depth) { int i = entry->index_by_depth; G.by_depth[i] = top; G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1]; top->index_by_depth = i; } else { /* We cannot free a page from a context deeper than the current one. */ abort (); } } --G.by_depth_in_use; adjust_depth (); entry->next = G.free_pages; G.free_pages = entry; } /* Release the free page cache to the system. */ static void release_pages (void) { #ifdef USING_MMAP page_entry *p, *next; char *start; size_t len; /* Gather up adjacent pages so they are unmapped together. */ p = G.free_pages; while (p) { start = p->page; next = p->next; len = p->bytes; free (p); p = next; while (p && p->page == start + len) { next = p->next; len += p->bytes; free (p); p = next; } munmap (start, len); G.bytes_mapped -= len; } G.free_pages = NULL; #endif #ifdef USING_MALLOC_PAGE_GROUPS page_entry **pp, *p; page_group **gp, *g; /* Remove all pages from free page groups from the list. */ pp = &G.free_pages; while ((p = *pp) != NULL) if (p->group->in_use == 0) { *pp = p->next; free (p); } else pp = &p->next; /* Remove all free page groups, and release the storage. */ gp = &G.page_groups; while ((g = *gp) != NULL) if (g->in_use == 0) { *gp = g->next; G.bytes_mapped -= g->alloc_size; free (g->allocation); } else gp = &g->next; #endif } /* This table provides a fast way to determine ceil(log_2(size)) for allocation requests. The minimum allocation size is eight bytes. */ static unsigned char size_lookup[257] = { 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8 }; /* Typed allocation function. Does nothing special in this collector. */ void * ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size MEM_STAT_DECL) { return ggc_alloc_stat (size PASS_MEM_STAT); } /* Zone allocation function. Does nothing special in this collector. */ void * ggc_alloc_zone_stat (size_t size, struct alloc_zone *zone ATTRIBUTE_UNUSED MEM_STAT_DECL) { return ggc_alloc_stat (size PASS_MEM_STAT); } /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */ void * ggc_alloc_stat (size_t size MEM_STAT_DECL) { size_t order, word, bit, object_offset, object_size; struct page_entry *entry; void *result; if (size <= 256) { order = size_lookup[size]; object_size = OBJECT_SIZE (order); } else { order = 9; while (size > (object_size = OBJECT_SIZE (order))) order++; } /* If there are non-full pages for this size allocation, they are at the head of the list. */ entry = G.pages[order]; /* If there is no page for this object size, or all pages in this context are full, allocate a new page. */ if (entry == NULL || entry->num_free_objects == 0) { struct page_entry *new_entry; new_entry = alloc_page (order); new_entry->index_by_depth = G.by_depth_in_use; push_by_depth (new_entry, 0); /* We can skip context depths, if we do, make sure we go all the way to the new depth. */ while (new_entry->context_depth >= G.depth_in_use) push_depth (G.by_depth_in_use-1); /* If this is the only entry, it's also the tail. If it is not the only entry, then we must update the PREV pointer of the ENTRY (G.pages[order]) to point to our new page entry. */ if (entry == NULL) G.page_tails[order] = new_entry; else entry->prev = new_entry; /* Put new pages at the head of the page list. By definition the entry at the head of the list always has a NULL pointer. */ new_entry->next = entry; new_entry->prev = NULL; entry = new_entry; G.pages[order] = new_entry; /* For a new page, we know the word and bit positions (in the in_use bitmap) of the first available object -- they're zero. */ new_entry->next_bit_hint = 1; word = 0; bit = 0; object_offset = 0; } else { /* First try to use the hint left from the previous allocation to locate a clear bit in the in-use bitmap. We've made sure that the one-past-the-end bit is always set, so if the hint has run over, this test will fail. */ unsigned hint = entry->next_bit_hint; word = hint / HOST_BITS_PER_LONG; bit = hint % HOST_BITS_PER_LONG; /* If the hint didn't work, scan the bitmap from the beginning. */ if ((entry->in_use_p[word] >> bit) & 1) { word = bit = 0; while (~entry->in_use_p[word] == 0) ++word; while ((entry->in_use_p[word] >> bit) & 1) ++bit; hint = word * HOST_BITS_PER_LONG + bit; } /* Next time, try the next bit. */ entry->next_bit_hint = hint + 1; object_offset = hint * object_size; } /* Set the in-use bit. */ entry->in_use_p[word] |= ((unsigned long) 1 << bit); /* Keep a running total of the number of free objects. If this page fills up, we may have to move it to the end of the list if the next page isn't full. If the next page is full, all subsequent pages are full, so there's no need to move it. */ if (--entry->num_free_objects == 0 && entry->next != NULL && entry->next->num_free_objects > 0) { /* We have a new head for the list. */ G.pages[order] = entry->next; /* We are moving ENTRY to the end of the page table list. The new page at the head of the list will have NULL in its PREV field and ENTRY will have NULL in its NEXT field. */ entry->next->prev = NULL; entry->next = NULL; /* Append ENTRY to the tail of the list. */ entry->prev = G.page_tails[order]; G.page_tails[order]->next = entry; G.page_tails[order] = entry; } #ifdef GATHER_STATISTICS ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size PASS_MEM_STAT); #endif /* Calculate the object's address. */ result = entry->page + object_offset; #ifdef ENABLE_GC_CHECKING /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the exact same semantics in presence of memory bugs, regardless of ENABLE_VALGRIND_CHECKING. We override this request below. Drop the handle to avoid handle leak. */ VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, object_size)); /* `Poison' the entire allocated object, including any padding at the end. */ memset (result, 0xaf, object_size); /* Make the bytes after the end of the object unaccessible. Discard the handle to avoid handle leak. */ VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) result + size, object_size - size)); #endif /* Tell Valgrind that the memory is there, but its content isn't defined. The bytes at the end of the object are still marked unaccessible. */ VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size)); /* Keep track of how many bytes are being allocated. This information is used in deciding when to collect. */ G.allocated += object_size; #ifdef GATHER_STATISTICS { size_t overhead = object_size - size; G.stats.total_overhead += overhead; G.stats.total_allocated += object_size; G.stats.total_overhead_per_order[order] += overhead; G.stats.total_allocated_per_order[order] += object_size; if (size <= 32) { G.stats.total_overhead_under32 += overhead; G.stats.total_allocated_under32 += object_size; } if (size <= 64) { G.stats.total_overhead_under64 += overhead; G.stats.total_allocated_under64 += object_size; } if (size <= 128) { G.stats.total_overhead_under128 += overhead; G.stats.total_allocated_under128 += object_size; } } #endif if (GGC_DEBUG_LEVEL >= 3) fprintf (G.debug_file, "Allocating object, requested size=%lu, actual=%lu at %p on %p\n", (unsigned long) size, (unsigned long) object_size, result, (void *) entry); return result; } /* If P is not marked, marks it and return false. Otherwise return true. P must have been allocated by the GC allocator; it mustn't point to static objects, stack variables, or memory allocated with malloc. */ int ggc_set_mark (const void *p) { page_entry *entry; unsigned bit, word; unsigned long mask; /* Look up the page on which the object is alloced. If the object wasn't allocated by the collector, we'll probably die. */ entry = lookup_page_table_entry (p); #ifdef ENABLE_CHECKING if (entry == NULL) abort (); #endif /* Calculate the index of the object on the page; this is its bit position in the in_use_p bitmap. */ bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order); word = bit / HOST_BITS_PER_LONG; mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG); /* If the bit was previously set, skip it. */ if (entry->in_use_p[word] & mask) return 1; /* Otherwise set it, and decrement the free object count. */ entry->in_use_p[word] |= mask; entry->num_free_objects -= 1; if (GGC_DEBUG_LEVEL >= 4) fprintf (G.debug_file, "Marking %p\n", p); return 0; } /* Return 1 if P has been marked, zero otherwise. P must have been allocated by the GC allocator; it mustn't point to static objects, stack variables, or memory allocated with malloc. */ int ggc_marked_p (const void *p) { page_entry *entry; unsigned bit, word; unsigned long mask; /* Look up the page on which the object is alloced. If the object wasn't allocated by the collector, we'll probably die. */ entry = lookup_page_table_entry (p); #ifdef ENABLE_CHECKING if (entry == NULL) abort (); #endif /* Calculate the index of the object on the page; this is its bit position in the in_use_p bitmap. */ bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order); word = bit / HOST_BITS_PER_LONG; mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG); return (entry->in_use_p[word] & mask) != 0; } /* Return the size of the gc-able object P. */ size_t ggc_get_size (const void *p) { page_entry *pe = lookup_page_table_entry (p); return OBJECT_SIZE (pe->order); } /* Release the memory for object P. */ void ggc_free (void *p) { page_entry *pe = lookup_page_table_entry (p); size_t order = pe->order; size_t size = OBJECT_SIZE (order); if (GGC_DEBUG_LEVEL >= 3) fprintf (G.debug_file, "Freeing object, actual size=%lu, at %p on %p\n", (unsigned long) size, p, (void *) pe); #ifdef ENABLE_GC_CHECKING /* Poison the data, to indicate the data is garbage. */ VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (p, size)); memset (p, 0xa5, size); #endif /* Let valgrind know the object is free. */ VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (p, size)); #ifdef ENABLE_GC_ALWAYS_COLLECT /* In the completely-anal-checking mode, we do *not* immediately free the data, but instead verify that the data is *actually* not reachable the next time we collect. */ { struct free_object *fo = xmalloc (sizeof (struct free_object)); fo->object = p; fo->next = G.free_object_list; G.free_object_list = fo; } #else { unsigned int bit_offset, word, bit; G.allocated -= size; /* Mark the object not-in-use. */ bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order); word = bit_offset / HOST_BITS_PER_LONG; bit = bit_offset % HOST_BITS_PER_LONG; pe->in_use_p[word] &= ~(1UL << bit); if (pe->num_free_objects++ == 0) { page_entry *p, *q; /* If the page is completely full, then it's supposed to be after all pages that aren't. Since we've freed one object from a page that was full, we need to move the page to the head of the list. PE is the node we want to move. Q is the previous node and P is the next node in the list. */ q = pe->prev; if (q && q->num_free_objects == 0) { p = pe->next; q->next = p; /* If PE was at the end of the list, then Q becomes the new end of the list. If PE was not the end of the list, then we need to update the PREV field for P. */ if (!p) G.page_tails[order] = q; else p->prev = q; /* Move PE to the head of the list. */ pe->next = G.pages[order]; pe->prev = NULL; G.pages[order]->prev = pe; G.pages[order] = pe; } /* Reset the hint bit to point to the only free object. */ pe->next_bit_hint = bit_offset; } } #endif } /* Subroutine of init_ggc which computes the pair of numbers used to perform division by OBJECT_SIZE (order) and fills in inverse_table[]. This algorithm is taken from Granlund and Montgomery's paper "Division by Invariant Integers using Multiplication" (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by constants). */ static void compute_inverse (unsigned order) { size_t size, inv; unsigned int e; size = OBJECT_SIZE (order); e = 0; while (size % 2 == 0) { e++; size >>= 1; } inv = size; while (inv * size != 1) inv = inv * (2 - inv*size); DIV_MULT (order) = inv; DIV_SHIFT (order) = e; } /* Initialize the ggc-mmap allocator. */ void init_ggc (void) { unsigned order; G.pagesize = getpagesize(); G.lg_pagesize = exact_log2 (G.pagesize); #ifdef HAVE_MMAP_DEV_ZERO G.dev_zero_fd = open ("/dev/zero", O_RDONLY); if (G.dev_zero_fd == -1) internal_error ("open /dev/zero: %m"); #endif #if 0 G.debug_file = fopen ("ggc-mmap.debug", "w"); #else G.debug_file = stdout; #endif #ifdef USING_MMAP /* StunOS has an amazing off-by-one error for the first mmap allocation after fiddling with RLIMIT_STACK. The result, as hard as it is to believe, is an unaligned page allocation, which would cause us to hork badly if we tried to use it. */ { char *p = alloc_anon (NULL, G.pagesize); struct page_entry *e; if ((size_t)p & (G.pagesize - 1)) { /* How losing. Discard this one and try another. If we still can't get something useful, give up. */ p = alloc_anon (NULL, G.pagesize); if ((size_t)p & (G.pagesize - 1)) abort (); } /* We have a good page, might as well hold onto it... */ e = xcalloc (1, sizeof (struct page_entry)); e->bytes = G.pagesize; e->page = p; e->next = G.free_pages; G.free_pages = e; } #endif /* Initialize the object size table. */ for (order = 0; order < HOST_BITS_PER_PTR; ++order) object_size_table[order] = (size_t) 1 << order; for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order) { size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR]; /* If S is not a multiple of the MAX_ALIGNMENT, then round it up so that we're sure of getting aligned memory. */ s = ROUND_UP (s, MAX_ALIGNMENT); object_size_table[order] = s; } /* Initialize the objects-per-page and inverse tables. */ for (order = 0; order < NUM_ORDERS; ++order) { objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order); if (objects_per_page_table[order] == 0) objects_per_page_table[order] = 1; compute_inverse (order); } /* Reset the size_lookup array to put appropriately sized objects in the special orders. All objects bigger than the previous power of two, but no greater than the special size, should go in the new order. */ for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order) { int o; int i; o = size_lookup[OBJECT_SIZE (order)]; for (i = OBJECT_SIZE (order); size_lookup [i] == o; --i) size_lookup[i] = order; } G.depth_in_use = 0; G.depth_max = 10; G.depth = xmalloc (G.depth_max * sizeof (unsigned int)); G.by_depth_in_use = 0; G.by_depth_max = INITIAL_PTE_COUNT; G.by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *)); G.save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *)); } /* Start a new GGC zone. */ struct alloc_zone * new_ggc_zone (const char *name ATTRIBUTE_UNUSED) { return NULL; } /* Destroy a GGC zone. */ void destroy_ggc_zone (struct alloc_zone *zone ATTRIBUTE_UNUSED) { } /* Increment the `GC context'. Objects allocated in an outer context are never freed, eliminating the need to register their roots. */ void ggc_push_context (void) { ++G.context_depth; /* Die on wrap. */ if (G.context_depth >= HOST_BITS_PER_LONG) abort (); } /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P reflects reality. Recalculate NUM_FREE_OBJECTS as well. */ static void ggc_recalculate_in_use_p (page_entry *p) { unsigned int i; size_t num_objects; /* Because the past-the-end bit in in_use_p is always set, we pretend there is one additional object. */ num_objects = OBJECTS_IN_PAGE (p) + 1; /* Reset the free object count. */ p->num_free_objects = num_objects; /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */ for (i = 0; i < CEIL (BITMAP_SIZE (num_objects), sizeof (*p->in_use_p)); ++i) { unsigned long j; /* Something is in use if it is marked, or if it was in use in a context further down the context stack. */ p->in_use_p[i] |= save_in_use_p (p)[i]; /* Decrement the free object count for every object allocated. */ for (j = p->in_use_p[i]; j; j >>= 1) p->num_free_objects -= (j & 1); } if (p->num_free_objects >= num_objects) abort (); } /* Decrement the `GC context'. All objects allocated since the previous ggc_push_context are migrated to the outer context. */ void ggc_pop_context (void) { unsigned long omask; unsigned int depth, i, e; #ifdef ENABLE_CHECKING unsigned int order; #endif depth = --G.context_depth; omask = (unsigned long)1 << (depth + 1); if (!((G.context_depth_allocations | G.context_depth_collections) & omask)) return; G.context_depth_allocations |= (G.context_depth_allocations & omask) >> 1; G.context_depth_allocations &= omask - 1; G.context_depth_collections &= omask - 1; /* The G.depth array is shortened so that the last index is the context_depth of the top element of by_depth. */ if (depth+1 < G.depth_in_use) e = G.depth[depth+1]; else e = G.by_depth_in_use; /* We might not have any PTEs of depth depth. */ if (depth < G.depth_in_use) { /* First we go through all the pages at depth depth to recalculate the in use bits. */ for (i = G.depth[depth]; i < e; ++i) { page_entry *p; #ifdef ENABLE_CHECKING p = G.by_depth[i]; /* Check that all of the pages really are at the depth that we expect. */ if (p->context_depth != depth) abort (); if (p->index_by_depth != i) abort (); #endif prefetch (&save_in_use_p_i (i+8)); prefetch (&save_in_use_p_i (i+16)); if (save_in_use_p_i (i)) { p = G.by_depth[i]; ggc_recalculate_in_use_p (p); free (save_in_use_p_i (i)); save_in_use_p_i (i) = 0; } } } /* Then, we reset all page_entries with a depth greater than depth to be at depth. */ for (i = e; i < G.by_depth_in_use; ++i) { page_entry *p = G.by_depth[i]; /* Check that all of the pages really are at the depth we expect. */ #ifdef ENABLE_CHECKING if (p->context_depth <= depth) abort (); if (p->index_by_depth != i) abort (); #endif p->context_depth = depth; } adjust_depth (); #ifdef ENABLE_CHECKING for (order = 2; order < NUM_ORDERS; order++) { page_entry *p; for (p = G.pages[order]; p != NULL; p = p->next) { if (p->context_depth > depth) abort (); else if (p->context_depth == depth && save_in_use_p (p)) abort (); } } #endif } /* Unmark all objects. */ static void clear_marks (void) { unsigned order; for (order = 2; order < NUM_ORDERS; order++) { page_entry *p; for (p = G.pages[order]; p != NULL; p = p->next) { size_t num_objects = OBJECTS_IN_PAGE (p); size_t bitmap_size = BITMAP_SIZE (num_objects + 1); #ifdef ENABLE_CHECKING /* The data should be page-aligned. */ if ((size_t) p->page & (G.pagesize - 1)) abort (); #endif /* Pages that aren't in the topmost context are not collected; nevertheless, we need their in-use bit vectors to store GC marks. So, back them up first. */ if (p->context_depth < G.context_depth) { if (! save_in_use_p (p)) save_in_use_p (p) = xmalloc (bitmap_size); memcpy (save_in_use_p (p), p->in_use_p, bitmap_size); } /* Reset reset the number of free objects and clear the in-use bits. These will be adjusted by mark_obj. */ p->num_free_objects = num_objects; memset (p->in_use_p, 0, bitmap_size); /* Make sure the one-past-the-end bit is always set. */ p->in_use_p[num_objects / HOST_BITS_PER_LONG] = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG)); } } } /* Free all empty pages. Partially empty pages need no attention because the `mark' bit doubles as an `unused' bit. */ static void sweep_pages (void) { unsigned order; for (order = 2; order < NUM_ORDERS; order++) { /* The last page-entry to consider, regardless of entries placed at the end of the list. */ page_entry * const last = G.page_tails[order]; size_t num_objects; size_t live_objects; page_entry *p, *previous; int done; p = G.pages[order]; if (p == NULL) continue; previous = NULL; do { page_entry *next = p->next; /* Loop until all entries have been examined. */ done = (p == last); num_objects = OBJECTS_IN_PAGE (p); /* Add all live objects on this page to the count of allocated memory. */ live_objects = num_objects - p->num_free_objects; G.allocated += OBJECT_SIZE (order) * live_objects; /* Only objects on pages in the topmost context should get collected. */ if (p->context_depth < G.context_depth) ; /* Remove the page if it's empty. */ else if (live_objects == 0) { /* If P was the first page in the list, then NEXT becomes the new first page in the list, otherwise splice P out of the forward pointers. */ if (! previous) G.pages[order] = next; else previous->next = next; /* Splice P out of the back pointers too. */ if (next) next->prev = previous; /* Are we removing the last element? */ if (p == G.page_tails[order]) G.page_tails[order] = previous; free_page (p); p = previous; } /* If the page is full, move it to the end. */ else if (p->num_free_objects == 0) { /* Don't move it if it's already at the end. */ if (p != G.page_tails[order]) { /* Move p to the end of the list. */ p->next = NULL; p->prev = G.page_tails[order]; G.page_tails[order]->next = p; /* Update the tail pointer... */ G.page_tails[order] = p; /* ... and the head pointer, if necessary. */ if (! previous) G.pages[order] = next; else previous->next = next; /* And update the backpointer in NEXT if necessary. */ if (next) next->prev = previous; p = previous; } } /* If we've fallen through to here, it's a page in the topmost context that is neither full nor empty. Such a page must precede pages at lesser context depth in the list, so move it to the head. */ else if (p != G.pages[order]) { previous->next = p->next; /* Update the backchain in the next node if it exists. */ if (p->next) p->next->prev = previous; /* Move P to the head of the list. */ p->next = G.pages[order]; p->prev = NULL; G.pages[order]->prev = p; /* Update the head pointer. */ G.pages[order] = p; /* Are we moving the last element? */ if (G.page_tails[order] == p) G.page_tails[order] = previous; p = previous; } previous = p; p = next; } while (! done); /* Now, restore the in_use_p vectors for any pages from contexts other than the current one. */ for (p = G.pages[order]; p; p = p->next) if (p->context_depth != G.context_depth) ggc_recalculate_in_use_p (p); } } #ifdef ENABLE_GC_CHECKING /* Clobber all free objects. */ static void poison_pages (void) { unsigned order; for (order = 2; order < NUM_ORDERS; order++) { size_t size = OBJECT_SIZE (order); page_entry *p; for (p = G.pages[order]; p != NULL; p = p->next) { size_t num_objects; size_t i; if (p->context_depth != G.context_depth) /* Since we don't do any collection for pages in pushed contexts, there's no need to do any poisoning. And besides, the IN_USE_P array isn't valid until we pop contexts. */ continue; num_objects = OBJECTS_IN_PAGE (p); for (i = 0; i < num_objects; i++) { size_t word, bit; word = i / HOST_BITS_PER_LONG; bit = i % HOST_BITS_PER_LONG; if (((p->in_use_p[word] >> bit) & 1) == 0) { char *object = p->page + i * size; /* Keep poison-by-write when we expect to use Valgrind, so the exact same memory semantics is kept, in case there are memory errors. We override this request below. */ VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (object, size)); memset (object, 0xa5, size); /* Drop the handle to avoid handle leak. */ VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (object, size)); } } } } } #else #define poison_pages() #endif #ifdef ENABLE_GC_ALWAYS_COLLECT /* Validate that the reportedly free objects actually are. */ static void validate_free_objects (void) { struct free_object *f, *next, *still_free = NULL; for (f = G.free_object_list; f ; f = next) { page_entry *pe = lookup_page_table_entry (f->object); size_t bit, word; bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order); word = bit / HOST_BITS_PER_LONG; bit = bit % HOST_BITS_PER_LONG; next = f->next; /* Make certain it isn't visible from any root. Notice that we do this check before sweep_pages merges save_in_use_p. */ if (pe->in_use_p[word] & (1UL << bit)) abort (); /* If the object comes from an outer context, then retain the free_object entry, so that we can verify that the address isn't live on the stack in some outer context. */ if (pe->context_depth != G.context_depth) { f->next = still_free; still_free = f; } else free (f); } G.free_object_list = still_free; } #else #define validate_free_objects() #endif /* Top level mark-and-sweep routine. */ void ggc_collect (void) { /* Avoid frequent unnecessary work by skipping collection if the total allocations haven't expanded much since the last collection. */ float allocated_last_gc = MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024); float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100; if (G.allocated < allocated_last_gc + min_expand) return; timevar_push (TV_GC); if (!quiet_flag) fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024); if (GGC_DEBUG_LEVEL >= 2) fprintf (G.debug_file, "BEGIN COLLECTING\n"); /* Zero the total allocated bytes. This will be recalculated in the sweep phase. */ G.allocated = 0; /* Release the pages we freed the last time we collected, but didn't reuse in the interim. */ release_pages (); /* Indicate that we've seen collections at this context depth. */ G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1; clear_marks (); ggc_mark_roots (); poison_pages (); validate_free_objects (); sweep_pages (); G.allocated_last_gc = G.allocated; timevar_pop (TV_GC); if (!quiet_flag) fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024); if (GGC_DEBUG_LEVEL >= 2) fprintf (G.debug_file, "END COLLECTING\n"); } /* Print allocation statistics. */ #ifndef SCALE #define SCALE(x) ((unsigned long) ((x) < 1024*10 \ ? (x) \ : ((x) < 1024*1024*10 \ ? (x) / 1024 \ : (x) / (1024*1024)))) #endif #ifndef LABEL #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M')) #endif void ggc_print_statistics (void) { struct ggc_statistics stats; unsigned int i; size_t total_overhead = 0; /* Clear the statistics. */ memset (&stats, 0, sizeof (stats)); /* Make sure collection will really occur. */ G.allocated_last_gc = 0; /* Collect and print the statistics common across collectors. */ ggc_print_common_statistics (stderr, &stats); /* Release free pages so that we will not count the bytes allocated there as part of the total allocated memory. */ release_pages (); /* Collect some information about the various sizes of allocation. */ fprintf (stderr, "Memory still allocated at the end of the compilation process\n"); fprintf (stderr, "%-5s %10s %10s %10s\n", "Size", "Allocated", "Used", "Overhead"); for (i = 0; i < NUM_ORDERS; ++i) { page_entry *p; size_t allocated; size_t in_use; size_t overhead; /* Skip empty entries. */ if (!G.pages[i]) continue; overhead = allocated = in_use = 0; /* Figure out the total number of bytes allocated for objects of this size, and how many of them are actually in use. Also figure out how much memory the page table is using. */ for (p = G.pages[i]; p; p = p->next) { allocated += p->bytes; in_use += (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i); overhead += (sizeof (page_entry) - sizeof (long) + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1)); } fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n", (unsigned long) OBJECT_SIZE (i), SCALE (allocated), LABEL (allocated), SCALE (in_use), LABEL (in_use), SCALE (overhead), LABEL (overhead)); total_overhead += overhead; } fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total", SCALE (G.bytes_mapped), LABEL (G.bytes_mapped), SCALE (G.allocated), LABEL(G.allocated), SCALE (total_overhead), LABEL (total_overhead)); #ifdef GATHER_STATISTICS { fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n"); fprintf (stderr, "Total Overhead: %10lld\n", G.stats.total_overhead); fprintf (stderr, "Total Allocated: %10lld\n", G.stats.total_allocated); fprintf (stderr, "Total Overhead under 32B: %10lld\n", G.stats.total_overhead_under32); fprintf (stderr, "Total Allocated under 32B: %10lld\n", G.stats.total_allocated_under32); fprintf (stderr, "Total Overhead under 64B: %10lld\n", G.stats.total_overhead_under64); fprintf (stderr, "Total Allocated under 64B: %10lld\n", G.stats.total_allocated_under64); fprintf (stderr, "Total Overhead under 128B: %10lld\n", G.stats.total_overhead_under128); fprintf (stderr, "Total Allocated under 128B: %10lld\n", G.stats.total_allocated_under128); for (i = 0; i < NUM_ORDERS; i++) if (G.stats.total_allocated_per_order[i]) { fprintf (stderr, "Total Overhead page size %7d: %10lld\n", OBJECT_SIZE (i), G.stats.total_overhead_per_order[i]); fprintf (stderr, "Total Allocated page size %7d: %10lld\n", OBJECT_SIZE (i), G.stats.total_allocated_per_order[i]); } } #endif } struct ggc_pch_data { struct ggc_pch_ondisk { unsigned totals[NUM_ORDERS]; } d; size_t base[NUM_ORDERS]; size_t written[NUM_ORDERS]; }; struct ggc_pch_data * init_ggc_pch (void) { return xcalloc (sizeof (struct ggc_pch_data), 1); } void ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED, size_t size, bool is_string ATTRIBUTE_UNUSED) { unsigned order; if (size <= 256) order = size_lookup[size]; else { order = 9; while (size > OBJECT_SIZE (order)) order++; } d->d.totals[order]++; } size_t ggc_pch_total_size (struct ggc_pch_data *d) { size_t a = 0; unsigned i; for (i = 0; i < NUM_ORDERS; i++) a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize); return a; } void ggc_pch_this_base (struct ggc_pch_data *d, void *base) { size_t a = (size_t) base; unsigned i; for (i = 0; i < NUM_ORDERS; i++) { d->base[i] = a; a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize); } } char * ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED, size_t size, bool is_string ATTRIBUTE_UNUSED) { unsigned order; char *result; if (size <= 256) order = size_lookup[size]; else { order = 9; while (size > OBJECT_SIZE (order)) order++; } result = (char *) d->base[order]; d->base[order] += OBJECT_SIZE (order); return result; } void ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED, FILE *f ATTRIBUTE_UNUSED) { /* Nothing to do. */ } void ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED, FILE *f, void *x, void *newx ATTRIBUTE_UNUSED, size_t size, bool is_string ATTRIBUTE_UNUSED) { unsigned order; static const char emptyBytes[256]; if (size <= 256) order = size_lookup[size]; else { order = 9; while (size > OBJECT_SIZE (order)) order++; } if (fwrite (x, size, 1, f) != 1) fatal_error ("can't write PCH file: %m"); /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the object out to OBJECT_SIZE(order). This happens for strings. */ if (size != OBJECT_SIZE (order)) { unsigned padding = OBJECT_SIZE(order) - size; /* To speed small writes, we use a nulled-out array that's larger than most padding requests as the source for our null bytes. This permits us to do the padding with fwrite() rather than fseek(), and limits the chance the the OS may try to flush any outstanding writes. */ if (padding <= sizeof(emptyBytes)) { if (fwrite (emptyBytes, 1, padding, f) != padding) fatal_error ("can't write PCH file"); } else { /* Larger than our buffer? Just default to fseek. */ if (fseek (f, padding, SEEK_CUR) != 0) fatal_error ("can't write PCH file"); } } d->written[order]++; if (d->written[order] == d->d.totals[order] && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order), G.pagesize), SEEK_CUR) != 0) fatal_error ("can't write PCH file: %m"); } void ggc_pch_finish (struct ggc_pch_data *d, FILE *f) { if (fwrite (&d->d, sizeof (d->d), 1, f) != 1) fatal_error ("can't write PCH file: %m"); free (d); } /* Move the PCH PTE entries just added to the end of by_depth, to the front. */ static void move_ptes_to_front (int count_old_page_tables, int count_new_page_tables) { unsigned i; /* First, we swap the new entries to the front of the varrays. */ page_entry **new_by_depth; unsigned long **new_save_in_use; new_by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *)); new_save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *)); memcpy (&new_by_depth[0], &G.by_depth[count_old_page_tables], count_new_page_tables * sizeof (void *)); memcpy (&new_by_depth[count_new_page_tables], &G.by_depth[0], count_old_page_tables * sizeof (void *)); memcpy (&new_save_in_use[0], &G.save_in_use[count_old_page_tables], count_new_page_tables * sizeof (void *)); memcpy (&new_save_in_use[count_new_page_tables], &G.save_in_use[0], count_old_page_tables * sizeof (void *)); free (G.by_depth); free (G.save_in_use); G.by_depth = new_by_depth; G.save_in_use = new_save_in_use; /* Now update all the index_by_depth fields. */ for (i = G.by_depth_in_use; i > 0; --i) { page_entry *p = G.by_depth[i-1]; p->index_by_depth = i-1; } /* And last, we update the depth pointers in G.depth. The first entry is already 0, and context 0 entries always start at index 0, so there is nothing to update in the first slot. We need a second slot, only if we have old ptes, and if we do, they start at index count_new_page_tables. */ if (count_old_page_tables) push_depth (count_new_page_tables); } void ggc_pch_read (FILE *f, void *addr) { struct ggc_pch_ondisk d; unsigned i; char *offs = addr; unsigned long count_old_page_tables; unsigned long count_new_page_tables; count_old_page_tables = G.by_depth_in_use; /* We've just read in a PCH file. So, every object that used to be allocated is now free. */ clear_marks (); #ifdef ENABLE_GC_CHECKING poison_pages (); #endif /* No object read from a PCH file should ever be freed. So, set the context depth to 1, and set the depth of all the currently-allocated pages to be 1 too. PCH pages will have depth 0. */ if (G.context_depth != 0) abort (); G.context_depth = 1; for (i = 0; i < NUM_ORDERS; i++) { page_entry *p; for (p = G.pages[i]; p != NULL; p = p->next) p->context_depth = G.context_depth; } /* Allocate the appropriate page-table entries for the pages read from the PCH file. */ if (fread (&d, sizeof (d), 1, f) != 1) fatal_error ("can't read PCH file: %m"); for (i = 0; i < NUM_ORDERS; i++) { struct page_entry *entry; char *pte; size_t bytes; size_t num_objs; size_t j; if (d.totals[i] == 0) continue; bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize); num_objs = bytes / OBJECT_SIZE (i); entry = xcalloc (1, (sizeof (struct page_entry) - sizeof (long) + BITMAP_SIZE (num_objs + 1))); entry->bytes = bytes; entry->page = offs; entry->context_depth = 0; offs += bytes; entry->num_free_objects = 0; entry->order = i; for (j = 0; j + HOST_BITS_PER_LONG <= num_objs + 1; j += HOST_BITS_PER_LONG) entry->in_use_p[j / HOST_BITS_PER_LONG] = -1; for (; j < num_objs + 1; j++) entry->in_use_p[j / HOST_BITS_PER_LONG] |= 1L << (j % HOST_BITS_PER_LONG); for (pte = entry->page; pte < entry->page + entry->bytes; pte += G.pagesize) set_page_table_entry (pte, entry); if (G.page_tails[i] != NULL) G.page_tails[i]->next = entry; else G.pages[i] = entry; G.page_tails[i] = entry; /* We start off by just adding all the new information to the end of the varrays, later, we will move the new information to the front of the varrays, as the PCH page tables are at context 0. */ push_by_depth (entry, 0); } /* Now, we update the various data structures that speed page table handling. */ count_new_page_tables = G.by_depth_in_use - count_old_page_tables; move_ptes_to_front (count_old_page_tables, count_new_page_tables); /* Update the statistics. */ G.allocated = G.allocated_last_gc = offs - (char *)addr; } /* Web construction code for GNU compiler. Contributed by Jan Hubicka. Copyright (C) 2001, 2002, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Simple optimization pass that splits independent uses of each pseudo, increasing effectiveness of other optimizations. The optimization can serve as an example of use for the dataflow module. We don't split registers with REG_USERVAR set unless -fmessy-debugging is specified, because debugging information about such split variables is almost unusable. TODO - Add code to keep debugging up-to-date after splitting user variable pseudos. This can be done by keeping track of all the pseudos used for the variable and using life analysis information before reload to determine which one is live and, in case more than one are live, choose the one with the latest definition. Other optimization passes can benefit from the infrastructure too. - We may use profile information and ignore infrequent use for the purpose of web unifying, inserting the compensation code later to implement full induction variable expansion for loops (currently we expand only if the induction variable is dead afterward, which is often the case). */ /* This entry is allocated for each reference in the insn stream. */ struct web_entry { /* Pointer to the parent in the union/find tree. */ struct web_entry *pred; /* Newly assigned register to the entry. Set only for roots. */ rtx reg; }; static struct web_entry *unionfind_root (struct web_entry *); static void unionfind_union (struct web_entry *, struct web_entry *); static void union_defs (struct df *, struct ref *, struct web_entry *, struct web_entry *); static rtx entry_register (struct web_entry *, struct ref *, char *); static void replace_ref (struct ref *, rtx); /* Find the root of unionfind tree (the representative of set). */ static struct web_entry * unionfind_root (struct web_entry *element) { struct web_entry *element1 = element, *element2; while (element->pred) element = element->pred; while (element1->pred) { element2 = element1->pred; element1->pred = element; element1 = element2; } return element; } /* Union sets. */ static void unionfind_union (struct web_entry *first, struct web_entry *second) { first = unionfind_root (first); second = unionfind_root (second); if (first == second) return; second->pred = first; } /* For each use, all possible defs reaching it must come in the same register, union them. */ static void union_defs (struct df *df, struct ref *use, struct web_entry *def_entry, struct web_entry *use_entry) { rtx insn = DF_REF_INSN (use); struct df_link *link = DF_REF_CHAIN (use); struct df_link *use_link = DF_INSN_USES (df, insn); struct df_link *def_link = DF_INSN_DEFS (df, insn); rtx set = single_set (insn); /* Some instructions may use match_dup for their operands. In case the operands are dead, we will assign them different pseudos, creating invalid instructions, so union all uses of the same operand for each insn. */ while (use_link) { if (use != use_link->ref && DF_REF_REAL_REG (use) == DF_REF_REAL_REG (use_link->ref)) unionfind_union (use_entry + DF_REF_ID (use), use_entry + DF_REF_ID (use_link->ref)); use_link = use_link->next; } /* Recognize trivial noop moves and attempt to keep them as noop. While most of noop moves should be removed, we still keep some of them at libcall boundaries and such. */ if (set && SET_SRC (set) == DF_REF_REG (use) && SET_SRC (set) == SET_DEST (set)) { while (def_link) { if (DF_REF_REAL_REG (use) == DF_REF_REAL_REG (def_link->ref)) unionfind_union (use_entry + DF_REF_ID (use), def_entry + DF_REF_ID (def_link->ref)); def_link = def_link->next; } } while (link) { unionfind_union (use_entry + DF_REF_ID (use), def_entry + DF_REF_ID (link->ref)); link = link->next; } /* A READ_WRITE use requires the corresponding def to be in the same register. Find it and union. */ if (use->flags & DF_REF_READ_WRITE) { struct df_link *link = DF_INSN_DEFS (df, DF_REF_INSN (use)); while (link) { if (DF_REF_REAL_REG (link->ref) == DF_REF_REAL_REG (use)) unionfind_union (use_entry + DF_REF_ID (use), def_entry + DF_REF_ID (link->ref)); link = link->next; } } } /* Find the corresponding register for the given entry. */ static rtx entry_register (struct web_entry *entry, struct ref *ref, char *used) { struct web_entry *root; rtx reg, newreg; /* Find the corresponding web and see if it has been visited. */ root = unionfind_root (entry); if (root->reg) return root->reg; /* We are seeing this web for the first time, do the assignment. */ reg = DF_REF_REAL_REG (ref); /* In case the original register is already assigned, generate new one. */ if (!used[REGNO (reg)]) newreg = reg, used[REGNO (reg)] = 1; else if (REG_USERVAR_P (reg) && 0/*&& !flag_messy_debugging*/) { newreg = reg; if (dump_file) fprintf (dump_file, "New web forced to keep reg=%i (user variable)\n", REGNO (reg)); } else { newreg = gen_reg_rtx (GET_MODE (reg)); REG_USERVAR_P (newreg) = REG_USERVAR_P (reg); REG_POINTER (newreg) = REG_POINTER (reg); REG_LOOP_TEST_P (newreg) = REG_LOOP_TEST_P (reg); RTX_UNCHANGING_P (newreg) = RTX_UNCHANGING_P (reg); REG_ATTRS (newreg) = REG_ATTRS (reg); if (dump_file) fprintf (dump_file, "Web oldreg=%i newreg=%i\n", REGNO (reg), REGNO (newreg)); } root->reg = newreg; return newreg; } /* Replace the reference by REG. */ static void replace_ref (struct ref *ref, rtx reg) { rtx oldreg = DF_REF_REAL_REG (ref); rtx *loc = DF_REF_REAL_LOC (ref); if (oldreg == reg) return; if (dump_file) fprintf (dump_file, "Updating insn %i (%i->%i)\n", INSN_UID (DF_REF_INSN (ref)), REGNO (oldreg), REGNO (reg)); *loc = reg; } /* Main entry point. */ void web_main (void) { struct df *df; struct web_entry *def_entry; struct web_entry *use_entry; unsigned int i; int max = max_reg_num (); char *used; df = df_init (); df_analyze (df, 0, DF_UD_CHAIN | DF_EQUIV_NOTES); def_entry = xcalloc (df->n_defs, sizeof (struct web_entry)); use_entry = xcalloc (df->n_uses, sizeof (struct web_entry)); used = xcalloc (max, sizeof (char)); if (dump_file) df_dump (df, DF_UD_CHAIN | DF_DU_CHAIN, dump_file); /* Produce the web. */ for (i = 0; i < df->n_uses; i++) union_defs (df, df->uses[i], def_entry, use_entry); /* Update the instruction stream, allocating new registers for split pseudos in progress. */ for (i = 0; i < df->n_uses; i++) replace_ref (df->uses[i], entry_register (use_entry + i, df->uses[i], used)); for (i = 0; i < df->n_defs; i++) replace_ref (df->defs[i], entry_register (def_entry + i, df->defs[i], used)); /* Dataflow information is corrupt here, but it can be easily updated by creating new entries for new registers and updates or calling df_insns_modify. */ free (def_entry); free (use_entry); free (used); df_finish (df); } /* Top level of GCC compilers (cc1, cc1plus, etc.) Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This is the top level of cc1/c++. It parses command args, opens files, invokes the various passes in the proper order, and counts the time used by each. Error messages and low-level interface to malloc also handled here. */ #undef FLOAT /* This is for hpux. They should change hpux. */ #undef FFS /* Some systems define this in param.h. */ #include #ifdef HAVE_SYS_RESOURCE_H # include #endif #ifdef HAVE_SYS_TIMES_H # include #endif #if defined (DWARF2_UNWIND_INFO) || defined (DWARF2_DEBUGGING_INFO) #endif #if defined(DBX_DEBUGGING_INFO) || defined(XCOFF_DEBUGGING_INFO) #endif #ifdef SDB_DEBUGGING_INFO #endif #ifdef XCOFF_DEBUGGING_INFO declarations for e.g. AIX 4.x. */ #endif #ifndef HAVE_conditional_execution #define HAVE_conditional_execution 0 #endif /* Format to use to print dumpfile index value */ #ifndef DUMPFILE_FORMAT #define DUMPFILE_FORMAT ".%02d." #endif /* Describes a dump file. */ struct rtl_dump_file_info { /* The unique extension to apply, e.g. ".jump". */ const char *const extension; /* The -d character that enables this dump file. */ char const debug_switch; /* True if there is a corresponding graph dump file. */ char const graph_dump_p; /* True if the user selected this dump. */ char enabled; /* True if the files have been initialized (ie truncated). */ char initialized; }; /* Enumerate the extant dump files. */ enum dump_file_index { DFI_cgraph, DFI_rtl, DFI_sibling, DFI_eh, DFI_jump, DFI_null, DFI_cse, DFI_gcse, DFI_loop, DFI_bypass, DFI_cfg, DFI_bp, DFI_vpt, DFI_ce1, DFI_tracer, DFI_loop2, DFI_web, DFI_cse2, DFI_life, DFI_combine, DFI_ce2, DFI_regmove, DFI_sms, DFI_sched, DFI_lreg, DFI_greg, DFI_postreload, DFI_gcse2, DFI_flow2, DFI_peephole2, DFI_ce3, DFI_rnreg, DFI_bbro, DFI_branch_target_load, DFI_sched2, DFI_stack, DFI_vartrack, DFI_mach, DFI_dbr, DFI_MAX }; /* Describes all the dump files. Should be kept in order of the pass and in sync with dump_file_index above. Remaining -d letters: " e q " " F K O Q WXY " */ static struct rtl_dump_file_info dump_file_tbl[DFI_MAX] = { { "cgraph", 'U', 0, 0, 0 }, { "rtl", 'r', 0, 0, 0 }, { "sibling", 'i', 0, 0, 0 }, { "eh", 'h', 0, 0, 0 }, { "jump", 'j', 0, 0, 0 }, { "null", 'u', 0, 0, 0 }, { "cse", 's', 0, 0, 0 }, { "gcse", 'G', 1, 0, 0 }, { "loop", 'L', 1, 0, 0 }, { "bypass", 'G', 1, 0, 0 }, /* Yes, duplicate enable switch. */ { "cfg", 'f', 1, 0, 0 }, { "bp", 'b', 1, 0, 0 }, { "vpt", 'V', 1, 0, 0 }, { "ce1", 'C', 1, 0, 0 }, { "tracer", 'T', 1, 0, 0 }, { "loop2", 'L', 1, 0, 0 }, { "web", 'Z', 0, 0, 0 }, { "cse2", 't', 1, 0, 0 }, { "life", 'f', 1, 0, 0 }, /* Yes, duplicate enable switch. */ { "combine", 'c', 1, 0, 0 }, { "ce2", 'C', 1, 0, 0 }, { "regmove", 'N', 1, 0, 0 }, { "sms", 'm', 0, 0, 0 }, { "sched", 'S', 1, 0, 0 }, { "lreg", 'l', 1, 0, 0 }, { "greg", 'g', 1, 0, 0 }, { "postreload", 'o', 1, 0, 0 }, { "gcse2", 'J', 0, 0, 0 }, { "flow2", 'w', 1, 0, 0 }, { "peephole2", 'z', 1, 0, 0 }, { "ce3", 'E', 1, 0, 0 }, { "rnreg", 'n', 1, 0, 0 }, { "bbro", 'B', 1, 0, 0 }, { "btl", 'd', 1, 0, 0 }, /* Yes, duplicate enable switch. */ { "sched2", 'R', 1, 0, 0 }, { "stack", 'k', 1, 0, 0 }, { "vartrack", 'V', 1, 0, 0 }, /* Yes, duplicate enable switch. */ { "mach", 'M', 1, 0, 0 }, { "dbr", 'd', 0, 0, 0 }, }; /* Routine to open a dump file. Return true if the dump file is enabled. */ static int open_dump_file (enum dump_file_index index, tree decl) { char *dump_name; const char *open_arg; char seq[16]; if (! dump_file_tbl[index].enabled) return 0; timevar_push (TV_DUMP); if (dump_file != NULL) fclose (dump_file); sprintf (seq, DUMPFILE_FORMAT, index); if (! dump_file_tbl[index].initialized) { /* If we've not initialized the files, do so now. */ if (graph_dump_format != no_graph && dump_file_tbl[index].graph_dump_p) { dump_name = concat (seq, dump_file_tbl[index].extension, NULL); clean_graph_dump_file (dump_base_name, dump_name); free (dump_name); } dump_file_tbl[index].initialized = 1; open_arg = "w"; } else open_arg = "a"; dump_name = concat (dump_base_name, seq, dump_file_tbl[index].extension, NULL); dump_file = fopen (dump_name, open_arg); if (dump_file == NULL) fatal_error ("can't open %s: %m", dump_name); free (dump_name); if (decl) fprintf (dump_file, "\n;; Function %s%s\n\n", lang_hooks.decl_printable_name (decl, 2), cfun->function_frequency == FUNCTION_FREQUENCY_HOT ? " (hot)" : cfun->function_frequency == FUNCTION_FREQUENCY_UNLIKELY_EXECUTED ? " (unlikely executed)" : ""); timevar_pop (TV_DUMP); return 1; } /* Routine to close a dump file. */ static void close_dump_file (enum dump_file_index index, void (*func) (FILE *, rtx), rtx insns) { if (! dump_file) return; timevar_push (TV_DUMP); if (insns && graph_dump_format != no_graph && dump_file_tbl[index].graph_dump_p) { char seq[16]; char *suffix; sprintf (seq, DUMPFILE_FORMAT, index); suffix = concat (seq, dump_file_tbl[index].extension, NULL); print_rtl_graph_with_bb (dump_base_name, suffix, insns); free (suffix); } if (func && insns) func (dump_file, insns); fflush (dump_file); fclose (dump_file); dump_file = NULL; timevar_pop (TV_DUMP); } /* This is called from various places for FUNCTION_DECL, VAR_DECL, and TYPE_DECL nodes. This does nothing for local (non-static) variables, unless the variable is a register variable with an ASMSPEC. In that case, or if the variable is not an automatic, it sets up the RTL and outputs any assembler code (label definition, storage allocation and initialization). DECL is the declaration. If ASMSPEC is nonzero, it specifies the assembler symbol name to be used. TOP_LEVEL is nonzero if this declaration is not within a function. */ void rest_of_decl_compilation (tree decl, const char *asmspec, int top_level, int at_end) { /* We deferred calling assemble_alias so that we could collect other attributes such as visibility. Emit the alias now. */ { tree alias; alias = lookup_attribute ("alias", DECL_ATTRIBUTES (decl)); if (alias) { alias = TREE_VALUE (TREE_VALUE (alias)); alias = get_identifier (TREE_STRING_POINTER (alias)); assemble_alias (decl, alias); } } /* Forward declarations for nested functions are not "external", but we need to treat them as if they were. */ if (TREE_STATIC (decl) || DECL_EXTERNAL (decl) || TREE_CODE (decl) == FUNCTION_DECL) { timevar_push (TV_VARCONST); if (asmspec) make_decl_rtl (decl, asmspec); /* Don't output anything when a tentative file-scope definition is seen. But at end of compilation, do output code for them. We do output all variables when unit-at-a-time is active and rely on callgraph code to defer them except for forward declarations (see gcc.c-torture/compile/920624-1.c) */ if ((at_end || !DECL_DEFER_OUTPUT (decl) || (flag_unit_at_a_time && DECL_INITIAL (decl))) && !DECL_EXTERNAL (decl)) { if (flag_unit_at_a_time && !cgraph_global_info_ready && TREE_CODE (decl) != FUNCTION_DECL && top_level) cgraph_varpool_finalize_decl (decl); else assemble_variable (decl, top_level, at_end, 0); } #ifdef ASM_FINISH_DECLARE_OBJECT if (decl == last_assemble_variable_decl) { ASM_FINISH_DECLARE_OBJECT (asm_out_file, decl, top_level, at_end); } #endif timevar_pop (TV_VARCONST); } else if (DECL_REGISTER (decl) && asmspec != 0) { if (decode_reg_name (asmspec) >= 0) { SET_DECL_RTL (decl, NULL_RTX); make_decl_rtl (decl, asmspec); } else { error ("%Hinvalid register name `%s' for register variable", &DECL_SOURCE_LOCATION (decl), asmspec); DECL_REGISTER (decl) = 0; if (!top_level) expand_decl (decl); } } else if (TREE_CODE (decl) == TYPE_DECL) { timevar_push (TV_SYMOUT); debug_hooks->type_decl (decl, !top_level); timevar_pop (TV_SYMOUT); } } /* Called after finishing a record, union or enumeral type. */ void rest_of_type_compilation (tree type, int toplev) { /* Avoid confusing the debug information machinery when there are errors. */ if (errorcount != 0 || sorrycount != 0) return; timevar_push (TV_SYMOUT); debug_hooks->type_decl (TYPE_STUB_DECL (type), !toplev); timevar_pop (TV_SYMOUT); } /* Turn the RTL into assembly. */ static void rest_of_handle_final (void) { timevar_push (TV_FINAL); { rtx x; const char *fnname; /* Get the function's name, as described by its RTL. This may be different from the DECL_NAME name used in the source file. */ x = DECL_RTL (current_function_decl); if (!MEM_P (x)) abort (); x = XEXP (x, 0); if (GET_CODE (x) != SYMBOL_REF) abort (); fnname = XSTR (x, 0); assemble_start_function (current_function_decl, fnname); final_start_function (get_insns (), asm_out_file, optimize); final (get_insns (), asm_out_file, optimize, 0); final_end_function (); #ifdef IA64_UNWIND_INFO /* ??? The IA-64 ".handlerdata" directive must be issued before the ".endp" directive that closes the procedure descriptor. */ output_function_exception_table (); #endif assemble_end_function (current_function_decl, fnname); #ifndef IA64_UNWIND_INFO /* Otherwise, it feels unclean to switch sections in the middle. */ output_function_exception_table (); #endif if (! quiet_flag) fflush (asm_out_file); /* Release all memory allocated by flow. */ free_basic_block_vars (); /* Release all memory held by regsets now. */ regset_release_memory (); } timevar_pop (TV_FINAL); ggc_collect (); } #ifdef DELAY_SLOTS /* Run delay slot optimization. */ static void rest_of_handle_delay_slots (void) { timevar_push (TV_DBR_SCHED); open_dump_file (DFI_dbr, current_function_decl); dbr_schedule (get_insns (), dump_file); close_dump_file (DFI_dbr, print_rtl, get_insns ()); timevar_pop (TV_DBR_SCHED); ggc_collect (); } #endif #ifdef STACK_REGS /* Convert register usage from flat register file usage to a stack register file. */ static void rest_of_handle_stack_regs (void) { #if defined (HAVE_ATTR_length) /* If flow2 creates new instructions which need splitting and scheduling after reload is not done, they might not be split until final which doesn't allow splitting if HAVE_ATTR_length. */ #ifdef INSN_SCHEDULING if (optimize && !flag_schedule_insns_after_reload) #else if (optimize) #endif { timevar_push (TV_SHORTEN_BRANCH); split_all_insns (1); timevar_pop (TV_SHORTEN_BRANCH); } #endif timevar_push (TV_REG_STACK); open_dump_file (DFI_stack, current_function_decl); if (reg_to_stack (dump_file) && optimize) { if (cleanup_cfg (CLEANUP_EXPENSIVE | CLEANUP_POST_REGSTACK | (flag_crossjumping ? CLEANUP_CROSSJUMP : 0)) && (flag_reorder_blocks || flag_reorder_blocks_and_partition)) { reorder_basic_blocks (); cleanup_cfg (CLEANUP_EXPENSIVE | CLEANUP_POST_REGSTACK); } } close_dump_file (DFI_stack, print_rtl_with_bb, get_insns ()); timevar_pop (TV_REG_STACK); ggc_collect (); } #endif /* Track the variables, ie. compute where the variable is stored at each position in function. */ static void rest_of_handle_variable_tracking (void) { timevar_push (TV_VAR_TRACKING); open_dump_file (DFI_vartrack, current_function_decl); variable_tracking_main (); close_dump_file (DFI_vartrack, print_rtl_with_bb, get_insns ()); timevar_pop (TV_VAR_TRACKING); } /* Machine independent reorg pass. */ static void rest_of_handle_machine_reorg (void) { timevar_push (TV_MACH_DEP); open_dump_file (DFI_mach, current_function_decl); targetm.machine_dependent_reorg (); close_dump_file (DFI_mach, print_rtl, get_insns ()); timevar_pop (TV_MACH_DEP); ggc_collect (); } /* Run new register allocator. Return TRUE if we must exit rest_of_compilation upon return. */ static bool rest_of_handle_new_regalloc (void) { int failure; delete_trivially_dead_insns (get_insns (), max_reg_num ()); reg_alloc (); timevar_pop (TV_LOCAL_ALLOC); if (dump_file_tbl[DFI_lreg].enabled) { timevar_push (TV_DUMP); close_dump_file (DFI_lreg, NULL, NULL); timevar_pop (TV_DUMP); } /* XXX clean up the whole mess to bring live info in shape again. */ timevar_push (TV_GLOBAL_ALLOC); open_dump_file (DFI_greg, current_function_decl); build_insn_chain (get_insns ()); failure = reload (get_insns (), 0); timevar_pop (TV_GLOBAL_ALLOC); if (dump_file_tbl[DFI_greg].enabled) { timevar_push (TV_DUMP); dump_global_regs (dump_file); close_dump_file (DFI_greg, print_rtl_with_bb, get_insns ()); timevar_pop (TV_DUMP); } if (failure) return true; reload_completed = 1; return false; } /* Run old register allocator. Return TRUE if we must exit rest_of_compilation upon return. */ static bool rest_of_handle_old_regalloc (void) { int failure; int rebuild_notes; /* Allocate the reg_renumber array. */ allocate_reg_info (max_regno, FALSE, TRUE); /* And the reg_equiv_memory_loc array. */ VARRAY_GROW (reg_equiv_memory_loc_varray, max_regno); reg_equiv_memory_loc = &VARRAY_RTX (reg_equiv_memory_loc_varray, 0); allocate_initial_values (reg_equiv_memory_loc); regclass (get_insns (), max_reg_num (), dump_file); rebuild_notes = local_alloc (); timevar_pop (TV_LOCAL_ALLOC); /* Local allocation may have turned an indirect jump into a direct jump. If so, we must rebuild the JUMP_LABEL fields of jumping instructions. */ if (rebuild_notes) { timevar_push (TV_JUMP); rebuild_jump_labels (get_insns ()); purge_all_dead_edges (0); timevar_pop (TV_JUMP); } if (dump_file_tbl[DFI_lreg].enabled) { timevar_push (TV_DUMP); dump_flow_info (dump_file); dump_local_alloc (dump_file); close_dump_file (DFI_lreg, print_rtl_with_bb, get_insns ()); timevar_pop (TV_DUMP); } ggc_collect (); timevar_push (TV_GLOBAL_ALLOC); open_dump_file (DFI_greg, current_function_decl); /* If optimizing, allocate remaining pseudo-regs. Do the reload pass fixing up any insns that are invalid. */ if (optimize) failure = global_alloc (dump_file); else { build_insn_chain (get_insns ()); failure = reload (get_insns (), 0); } timevar_pop (TV_GLOBAL_ALLOC); if (dump_file_tbl[DFI_greg].enabled) { timevar_push (TV_DUMP); dump_global_regs (dump_file); close_dump_file (DFI_greg, print_rtl_with_bb, get_insns ()); timevar_pop (TV_DUMP); } return failure; } /* Run the regrename and cprop passes. */ static void rest_of_handle_regrename (void) { timevar_push (TV_RENAME_REGISTERS); open_dump_file (DFI_rnreg, current_function_decl); if (flag_rename_registers) regrename_optimize (); if (flag_cprop_registers) copyprop_hardreg_forward (); close_dump_file (DFI_rnreg, print_rtl_with_bb, get_insns ()); timevar_pop (TV_RENAME_REGISTERS); } /* Reorder basic blocks. */ static void rest_of_handle_reorder_blocks (void) { bool changed; open_dump_file (DFI_bbro, current_function_decl); /* Last attempt to optimize CFG, as scheduling, peepholing and insn splitting possibly introduced more crossjumping opportunities. */ changed = cleanup_cfg (CLEANUP_EXPENSIVE | (!HAVE_conditional_execution ? CLEANUP_UPDATE_LIFE : 0)); if (flag_sched2_use_traces && flag_schedule_insns_after_reload) tracer (); if (flag_reorder_blocks || flag_reorder_blocks_and_partition) reorder_basic_blocks (); if (flag_reorder_blocks || flag_reorder_blocks_and_partition || (flag_sched2_use_traces && flag_schedule_insns_after_reload)) changed |= cleanup_cfg (CLEANUP_EXPENSIVE | (!HAVE_conditional_execution ? CLEANUP_UPDATE_LIFE : 0)); /* On conditional execution targets we can not update the life cheaply, so we deffer the updating to after both cleanups. This may lose some cases but should not be terribly bad. */ if (changed && HAVE_conditional_execution) update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES, PROP_DEATH_NOTES); close_dump_file (DFI_bbro, print_rtl_with_bb, get_insns ()); } #ifdef INSN_SCHEDULING /* Run instruction scheduler. */ static void rest_of_handle_sched (void) { timevar_push (TV_SMS); if (optimize > 0 && flag_modulo_sched) { /* Perform SMS module scheduling. */ open_dump_file (DFI_sms, current_function_decl); /* We want to be able to create new pseudos. */ no_new_pseudos = 0; sms_schedule (dump_file); close_dump_file (DFI_sms, print_rtl, get_insns ()); /* Update the life information, because we add pseudos. */ max_regno = max_reg_num (); allocate_reg_info (max_regno, FALSE, FALSE); update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES, (PROP_DEATH_NOTES | PROP_KILL_DEAD_CODE | PROP_SCAN_DEAD_CODE)); no_new_pseudos = 1; } timevar_pop (TV_SMS); timevar_push (TV_SCHED); /* Print function header into sched dump now because doing the sched analysis makes some of the dump. */ if (optimize > 0 && flag_schedule_insns) { open_dump_file (DFI_sched, current_function_decl); /* Do control and data sched analysis, and write some of the results to dump file. */ schedule_insns (dump_file); close_dump_file (DFI_sched, print_rtl_with_bb, get_insns ()); } timevar_pop (TV_SCHED); ggc_collect (); } /* Run second scheduling pass after reload. */ static void rest_of_handle_sched2 (void) { timevar_push (TV_SCHED2); open_dump_file (DFI_sched2, current_function_decl); /* Do control and data sched analysis again, and write some more of the results to dump file. */ split_all_insns (1); if (flag_sched2_use_superblocks || flag_sched2_use_traces) { schedule_ebbs (dump_file); /* No liveness updating code yet, but it should be easy to do. reg-stack recomputes the liveness when needed for now. */ count_or_remove_death_notes (NULL, 1); cleanup_cfg (CLEANUP_EXPENSIVE); } else schedule_insns (dump_file); close_dump_file (DFI_sched2, print_rtl_with_bb, get_insns ()); timevar_pop (TV_SCHED2); ggc_collect (); } #endif static void rest_of_handle_gcse2 (void) { open_dump_file (DFI_gcse2, current_function_decl); gcse_after_reload_main (get_insns (), dump_file); rebuild_jump_labels (get_insns ()); delete_trivially_dead_insns (get_insns (), max_reg_num ()); close_dump_file (DFI_gcse2, print_rtl_with_bb, get_insns ()); ggc_collect (); #ifdef ENABLE_CHECKING verify_flow_info (); #endif } /* Register allocation pre-pass, to reduce number of moves necessary for two-address machines. */ static void rest_of_handle_regmove (void) { timevar_push (TV_REGMOVE); open_dump_file (DFI_regmove, current_function_decl); regmove_optimize (get_insns (), max_reg_num (), dump_file); cleanup_cfg (CLEANUP_EXPENSIVE | CLEANUP_UPDATE_LIFE); close_dump_file (DFI_regmove, print_rtl_with_bb, get_insns ()); timevar_pop (TV_REGMOVE); ggc_collect (); } /* Run tracer. */ static void rest_of_handle_tracer (void) { open_dump_file (DFI_tracer, current_function_decl); if (dump_file) dump_flow_info (dump_file); tracer (); cleanup_cfg (CLEANUP_EXPENSIVE); reg_scan (get_insns (), max_reg_num (), 0); close_dump_file (DFI_tracer, print_rtl_with_bb, get_insns ()); } /* If-conversion and CFG cleanup. */ static void rest_of_handle_if_conversion (void) { open_dump_file (DFI_ce1, current_function_decl); if (flag_if_conversion) { timevar_push (TV_IFCVT); if (dump_file) dump_flow_info (dump_file); cleanup_cfg (CLEANUP_EXPENSIVE); reg_scan (get_insns (), max_reg_num (), 0); if_convert (0); timevar_pop (TV_IFCVT); } timevar_push (TV_JUMP); cleanup_cfg (CLEANUP_EXPENSIVE); reg_scan (get_insns (), max_reg_num (), 0); timevar_pop (TV_JUMP); close_dump_file (DFI_ce1, print_rtl_with_bb, get_insns ()); } /* Rerun if-conversion, as combine may have simplified things enough to now meet sequence length restrictions. */ static void rest_of_handle_if_after_combine (void) { timevar_push (TV_IFCVT); open_dump_file (DFI_ce2, current_function_decl); no_new_pseudos = 0; if_convert (1); no_new_pseudos = 1; close_dump_file (DFI_ce2, print_rtl_with_bb, get_insns ()); timevar_pop (TV_IFCVT); } static void rest_of_handle_web (void) { open_dump_file (DFI_web, current_function_decl); timevar_push (TV_WEB); web_main (); delete_trivially_dead_insns (get_insns (), max_reg_num ()); cleanup_cfg (CLEANUP_EXPENSIVE); timevar_pop (TV_WEB); close_dump_file (DFI_web, print_rtl_with_bb, get_insns ()); reg_scan (get_insns (), max_reg_num (), 0); } /* Do branch profiling and static profile estimation passes. */ static void rest_of_handle_branch_prob (void) { struct loops loops; timevar_push (TV_BRANCH_PROB); open_dump_file (DFI_bp, current_function_decl); if (profile_arc_flag || flag_test_coverage || flag_branch_probabilities) branch_prob (); /* Discover and record the loop depth at the head of each basic block. The loop infrastructure does the real job for us. */ flow_loops_find (&loops, LOOP_TREE); if (dump_file) flow_loops_dump (&loops, dump_file, NULL, 0); /* Estimate using heuristics if no profiling info is available. */ if (flag_guess_branch_prob) estimate_probability (&loops); flow_loops_free (&loops); free_dominance_info (CDI_DOMINATORS); close_dump_file (DFI_bp, print_rtl_with_bb, get_insns ()); timevar_pop (TV_BRANCH_PROB); } /* Do optimizations based on expression value profiles. */ static void rest_of_handle_value_profile_transformations (void) { open_dump_file (DFI_vpt, current_function_decl); timevar_push (TV_VPT); if (value_profile_transformations ()) cleanup_cfg (CLEANUP_EXPENSIVE); timevar_pop (TV_VPT); close_dump_file (DFI_vpt, print_rtl_with_bb, get_insns ()); } /* Do control and data flow analysis; write some of the results to the dump file. */ static void rest_of_handle_cfg (void) { open_dump_file (DFI_cfg, current_function_decl); if (dump_file) dump_flow_info (dump_file); if (optimize) cleanup_cfg (CLEANUP_EXPENSIVE | (flag_thread_jumps ? CLEANUP_THREADING : 0)); /* It may make more sense to mark constant functions after dead code is eliminated by life_analysis, but we need to do it early, as -fprofile-arcs may insert code making function non-constant, but we still must consider it as constant, otherwise -fbranch-probabilities will not read data back. life_analysis rarely eliminates modification of external memory. */ if (optimize) { /* Alias analysis depends on this information and mark_constant_function depends on alias analysis. */ reg_scan (get_insns (), max_reg_num (), 1); mark_constant_function (); } close_dump_file (DFI_cfg, print_rtl_with_bb, get_insns ()); } /* Perform jump bypassing and control flow optimizations. */ static void rest_of_handle_jump_bypass (void) { timevar_push (TV_BYPASS); open_dump_file (DFI_bypass, current_function_decl); cleanup_cfg (CLEANUP_EXPENSIVE); reg_scan (get_insns (), max_reg_num (), 1); if (bypass_jumps (dump_file)) { rebuild_jump_labels (get_insns ()); cleanup_cfg (CLEANUP_EXPENSIVE); delete_trivially_dead_insns (get_insns (), max_reg_num ()); } close_dump_file (DFI_bypass, print_rtl_with_bb, get_insns ()); timevar_pop (TV_BYPASS); ggc_collect (); #ifdef ENABLE_CHECKING verify_flow_info (); #endif } /* Try combining insns through substitution. */ static void rest_of_handle_combine (void) { int rebuild_jump_labels_after_combine = 0; timevar_push (TV_COMBINE); open_dump_file (DFI_combine, current_function_decl); rebuild_jump_labels_after_combine = combine_instructions (get_insns (), max_reg_num ()); /* Combining get_insns () may have turned an indirect jump into a direct jump. Rebuild the JUMP_LABEL fields of jumping instructions. */ if (rebuild_jump_labels_after_combine) { timevar_push (TV_JUMP); rebuild_jump_labels (get_insns ()); timevar_pop (TV_JUMP); cleanup_cfg (CLEANUP_EXPENSIVE | CLEANUP_UPDATE_LIFE); } close_dump_file (DFI_combine, print_rtl_with_bb, get_insns ()); timevar_pop (TV_COMBINE); ggc_collect (); } /* Perform life analysis. */ static void rest_of_handle_life (void) { open_dump_file (DFI_life, current_function_decl); regclass_init (); #ifdef ENABLE_CHECKING verify_flow_info (); #endif life_analysis (dump_file, PROP_FINAL); if (optimize) cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0) | CLEANUP_UPDATE_LIFE | CLEANUP_LOG_LINKS | (flag_thread_jumps ? CLEANUP_THREADING : 0)); timevar_pop (TV_FLOW); if (extra_warnings) { setjmp_vars_warning (DECL_INITIAL (current_function_decl)); setjmp_args_warning (); } if (optimize) { if (!flag_new_regalloc && initialize_uninitialized_subregs ()) { /* Insns were inserted, and possibly pseudos created, so things might look a bit different. */ allocate_reg_life_data (); update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES, PROP_LOG_LINKS | PROP_REG_INFO | PROP_DEATH_NOTES); } } no_new_pseudos = 1; close_dump_file (DFI_life, print_rtl_with_bb, get_insns ()); ggc_collect (); } /* Perform common subexpression elimination. Nonzero value from `cse_main' means that jumps were simplified and some code may now be unreachable, so do jump optimization again. */ static void rest_of_handle_cse (void) { int tem; open_dump_file (DFI_cse, current_function_decl); if (dump_file) dump_flow_info (dump_file); timevar_push (TV_CSE); reg_scan (get_insns (), max_reg_num (), 1); tem = cse_main (get_insns (), max_reg_num (), 0, dump_file); if (tem) rebuild_jump_labels (get_insns ()); if (purge_all_dead_edges (0)) delete_unreachable_blocks (); delete_trivially_dead_insns (get_insns (), max_reg_num ()); /* If we are not running more CSE passes, then we are no longer expecting CSE to be run. But always rerun it in a cheap mode. */ cse_not_expected = !flag_rerun_cse_after_loop && !flag_gcse; if (tem || optimize > 1) cleanup_cfg (CLEANUP_EXPENSIVE | CLEANUP_PRE_LOOP); timevar_pop (TV_CSE); close_dump_file (DFI_cse, print_rtl_with_bb, get_insns ()); } /* Run second CSE pass after loop optimizations. */ static void rest_of_handle_cse2 (void) { int tem; timevar_push (TV_CSE2); open_dump_file (DFI_cse2, current_function_decl); if (dump_file) dump_flow_info (dump_file); /* CFG is no longer maintained up-to-date. */ tem = cse_main (get_insns (), max_reg_num (), 1, dump_file); /* Run a pass to eliminate duplicated assignments to condition code registers. We have to run this after bypass_jumps, because it makes it harder for that pass to determine whether a jump can be bypassed safely. */ cse_condition_code_reg (); purge_all_dead_edges (0); delete_trivially_dead_insns (get_insns (), max_reg_num ()); if (tem) { timevar_push (TV_JUMP); rebuild_jump_labels (get_insns ()); cleanup_cfg (CLEANUP_EXPENSIVE); timevar_pop (TV_JUMP); } reg_scan (get_insns (), max_reg_num (), 0); close_dump_file (DFI_cse2, print_rtl_with_bb, get_insns ()); ggc_collect (); timevar_pop (TV_CSE2); } /* Perform global cse. */ static void rest_of_handle_gcse (void) { int save_csb, save_cfj; int tem2 = 0, tem; timevar_push (TV_GCSE); open_dump_file (DFI_gcse, current_function_decl); tem = gcse_main (get_insns (), dump_file); rebuild_jump_labels (get_insns ()); delete_trivially_dead_insns (get_insns (), max_reg_num ()); save_csb = flag_cse_skip_blocks; save_cfj = flag_cse_follow_jumps; flag_cse_skip_blocks = flag_cse_follow_jumps = 0; /* If -fexpensive-optimizations, re-run CSE to clean up things done by gcse. */ if (flag_expensive_optimizations) { timevar_push (TV_CSE); reg_scan (get_insns (), max_reg_num (), 1); tem2 = cse_main (get_insns (), max_reg_num (), 0, dump_file); purge_all_dead_edges (0); delete_trivially_dead_insns (get_insns (), max_reg_num ()); timevar_pop (TV_CSE); cse_not_expected = !flag_rerun_cse_after_loop; } /* If gcse or cse altered any jumps, rerun jump optimizations to clean things up. Then possibly re-run CSE again. */ while (tem || tem2) { tem = tem2 = 0; timevar_push (TV_JUMP); rebuild_jump_labels (get_insns ()); cleanup_cfg (CLEANUP_EXPENSIVE | CLEANUP_PRE_LOOP); timevar_pop (TV_JUMP); if (flag_expensive_optimizations) { timevar_push (TV_CSE); reg_scan (get_insns (), max_reg_num (), 1); tem2 = cse_main (get_insns (), max_reg_num (), 0, dump_file); purge_all_dead_edges (0); delete_trivially_dead_insns (get_insns (), max_reg_num ()); timevar_pop (TV_CSE); } } close_dump_file (DFI_gcse, print_rtl_with_bb, get_insns ()); timevar_pop (TV_GCSE); ggc_collect (); flag_cse_skip_blocks = save_csb; flag_cse_follow_jumps = save_cfj; #ifdef ENABLE_CHECKING verify_flow_info (); #endif } /* Move constant computations out of loops. */ static void rest_of_handle_loop_optimize (void) { int do_unroll, do_prefetch; timevar_push (TV_LOOP); delete_dead_jumptables (); cleanup_cfg (CLEANUP_EXPENSIVE | CLEANUP_PRE_LOOP); open_dump_file (DFI_loop, current_function_decl); /* CFG is no longer maintained up-to-date. */ free_bb_for_insn (); if (flag_unroll_loops) do_unroll = LOOP_AUTO_UNROLL; /* Having two unrollers is useless. */ else do_unroll = flag_old_unroll_loops ? LOOP_UNROLL : LOOP_AUTO_UNROLL; do_prefetch = flag_prefetch_loop_arrays ? LOOP_PREFETCH : 0; if (flag_rerun_loop_opt) { cleanup_barriers (); /* We only want to perform unrolling once. */ loop_optimize (get_insns (), dump_file, do_unroll); do_unroll = 0; /* The first call to loop_optimize makes some instructions trivially dead. We delete those instructions now in the hope that doing so will make the heuristics in loop work better and possibly speed up compilation. */ delete_trivially_dead_insns (get_insns (), max_reg_num ()); /* The regscan pass is currently necessary as the alias analysis code depends on this information. */ reg_scan (get_insns (), max_reg_num (), 1); } cleanup_barriers (); loop_optimize (get_insns (), dump_file, do_unroll | do_prefetch); /* Loop can create trivially dead instructions. */ delete_trivially_dead_insns (get_insns (), max_reg_num ()); close_dump_file (DFI_loop, print_rtl, get_insns ()); timevar_pop (TV_LOOP); find_basic_blocks (get_insns (), max_reg_num (), dump_file); ggc_collect (); } /* Perform loop optimizations. It might be better to do them a bit sooner, but we want the profile feedback to work more efficiently. */ static void rest_of_handle_loop2 (void) { struct loops *loops; basic_block bb; if (!flag_move_loop_invariants && !flag_unswitch_loops && !flag_peel_loops && !flag_unroll_loops && !flag_branch_on_count_reg) return; timevar_push (TV_LOOP); open_dump_file (DFI_loop2, current_function_decl); if (dump_file) dump_flow_info (dump_file); /* Initialize structures for layout changes. */ cfg_layout_initialize (); loops = loop_optimizer_init (dump_file); if (loops) { /* The optimizations: */ if (flag_move_loop_invariants) move_loop_invariants (loops); if (flag_unswitch_loops) unswitch_loops (loops); if (flag_peel_loops || flag_unroll_loops) unroll_and_peel_loops (loops, (flag_peel_loops ? UAP_PEEL : 0) | (flag_unroll_loops ? UAP_UNROLL : 0) | (flag_unroll_all_loops ? UAP_UNROLL_ALL : 0)); #ifdef HAVE_doloop_end if (flag_branch_on_count_reg && HAVE_doloop_end) doloop_optimize_loops (loops); #endif /* HAVE_doloop_end */ loop_optimizer_finalize (loops, dump_file); } /* Finalize layout changes. */ FOR_EACH_BB (bb) if (bb->next_bb != EXIT_BLOCK_PTR) bb->rbi->next = bb->next_bb; cfg_layout_finalize (); cleanup_cfg (CLEANUP_EXPENSIVE); delete_trivially_dead_insns (get_insns (), max_reg_num ()); reg_scan (get_insns (), max_reg_num (), 0); if (dump_file) dump_flow_info (dump_file); close_dump_file (DFI_loop2, print_rtl_with_bb, get_insns ()); timevar_pop (TV_LOOP); ggc_collect (); } /* This is called from finish_function (within langhooks.parse_file) after each top-level definition is parsed. It is supposed to compile that function or variable and output the assembler code for it. After we return, the tree storage is freed. */ void rest_of_compilation (void) { /* There's no need to defer outputting this function any more; we know we want to output it. */ DECL_DEFER_OUTPUT (current_function_decl) = 0; /* There's no need to defer outputting this function any more; we know we want to output it. */ DECL_DEFER_OUTPUT (current_function_decl) = 0; /* Register rtl specific functions for cfg. */ rtl_register_cfg_hooks (); /* Now that we're out of the frontend, we shouldn't have any more CONCATs anywhere. */ generating_concat_p = 0; /* When processing delayed functions, prepare_function_start() won't have been run to re-initialize it. */ cse_not_expected = ! optimize; finalize_block_changes (); /* Dump the rtl code if we are dumping rtl. */ if (open_dump_file (DFI_rtl, current_function_decl)) close_dump_file (DFI_rtl, print_rtl, get_insns ()); /* Convert from NOTE_INSN_EH_REGION style notes, and do other sorts of eh initialization. Delay this until after the initial rtl dump so that we can see the original nesting. */ convert_from_eh_region_ranges (); /* If we're emitting a nested function, make sure its parent gets emitted as well. Doing otherwise confuses debug info. */ { tree parent; for (parent = DECL_CONTEXT (current_function_decl); parent != NULL_TREE; parent = get_containing_scope (parent)) if (TREE_CODE (parent) == FUNCTION_DECL) TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (parent)) = 1; } /* We are now committed to emitting code for this function. Do any preparation, such as emitting abstract debug info for the inline before it gets mangled by optimization. */ if (cgraph_function_possibly_inlined_p (current_function_decl)) (*debug_hooks->outlining_inline_function) (current_function_decl); /* Remove any notes we don't need. That will make iterating over the instruction sequence faster, and allow the garbage collector to reclaim the memory used by the notes. */ remove_unnecessary_notes (); ggc_collect (); /* Initialize some variables used by the optimizers. */ init_function_for_compilation (); TREE_ASM_WRITTEN (current_function_decl) = 1; /* Now that integrate will no longer see our rtl, we need not distinguish between the return value of this function and the return value of called functions. Also, we can remove all SETs of subregs of hard registers; they are only here because of integrate. Also, we can now initialize pseudos intended to carry magic hard reg data throughout the function. */ rtx_equal_function_value_matters = 0; purge_hard_subreg_sets (get_insns ()); /* Early return if there were errors. We can run afoul of our consistency checks, and there's not really much point in fixing them. */ if (rtl_dump_and_exit || flag_syntax_only || errorcount || sorrycount) goto exit_rest_of_compilation; timevar_push (TV_JUMP); open_dump_file (DFI_sibling, current_function_decl); /* ??? We may get called either via tree_rest_of_compilation when the CFG is already built or directly (for instance from coverage code). The direct callers shall be updated. */ if (!basic_block_info) { init_flow (); rebuild_jump_labels (get_insns ()); find_exception_handler_labels (); find_basic_blocks (get_insns (), max_reg_num (), dump_file); } delete_unreachable_blocks (); #ifdef ENABLE_CHECKING verify_flow_info(); #endif /* Turn NOTE_INSN_PREDICTIONs into branch predictions. */ if (flag_guess_branch_prob) { timevar_push (TV_BRANCH_PROB); note_prediction_to_br_prob (); timevar_pop (TV_BRANCH_PROB); } timevar_pop (TV_JUMP); if (cfun->tail_call_emit) fixup_tail_calls (); insn_locators_initialize (); /* Complete generation of exception handling code. */ if (doing_eh (0)) { timevar_push (TV_JUMP); open_dump_file (DFI_eh, current_function_decl); finish_eh_generation (); close_dump_file (DFI_eh, print_rtl, get_insns ()); timevar_pop (TV_JUMP); } /* Delay emitting hard_reg_initial_value sets until after EH landing pad generation, which might create new sets. */ emit_initial_value_sets (); #ifdef FINALIZE_PIC /* If we are doing position-independent code generation, now is the time to output special prologues and epilogues. We do not want to do this earlier, because it just clutters up inline functions with meaningless insns. */ if (flag_pic) FINALIZE_PIC; #endif /* Copy any shared structure that should not be shared. */ unshare_all_rtl (); #ifdef SETJMP_VIA_SAVE_AREA /* This must be performed before virtual register instantiation. Please be aware that everything in the compiler that can look at the RTL up to this point must understand that REG_SAVE_AREA is just like a use of the REG contained inside. */ if (current_function_calls_alloca) optimize_save_area_alloca (get_insns ()); #endif /* Instantiate all virtual registers. */ instantiate_virtual_regs (); open_dump_file (DFI_jump, current_function_decl); /* Always do one jump optimization pass to ensure that JUMP_LABEL fields are initialized and to compute whether control can drop off the end of the function. */ timevar_push (TV_JUMP); /* Turn NOTE_INSN_EXPECTED_VALUE into REG_BR_PROB. Do this before jump optimization switches branch directions. */ if (flag_guess_branch_prob) expected_value_to_br_prob (); delete_trivially_dead_insns (get_insns (), max_reg_num ()); reg_scan (get_insns(), max_reg_num (), 0); if (dump_file) dump_flow_info (dump_file); cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0) | CLEANUP_PRE_LOOP | (flag_thread_jumps ? CLEANUP_THREADING : 0)); create_loop_notes (); purge_line_number_notes (get_insns ()); close_dump_file (DFI_jump, print_rtl, get_insns ()); if (optimize) cleanup_cfg (CLEANUP_EXPENSIVE | CLEANUP_PRE_LOOP); /* Jump optimization, and the removal of NULL pointer checks, may have reduced the number of instructions substantially. CSE, and future passes, allocate arrays whose dimensions involve the maximum instruction UID, so if we can reduce the maximum UID we'll save big on memory. */ renumber_insns (dump_file); timevar_pop (TV_JUMP); close_dump_file (DFI_jump, print_rtl_with_bb, get_insns ()); ggc_collect (); if (optimize > 0) rest_of_handle_cse (); ggc_collect (); if (optimize > 0) { if (flag_gcse) rest_of_handle_gcse (); if (flag_loop_optimize) rest_of_handle_loop_optimize (); if (flag_gcse) rest_of_handle_jump_bypass (); } timevar_push (TV_FLOW); rest_of_handle_cfg (); if (!flag_tree_based_profiling && (optimize > 0 || profile_arc_flag || flag_test_coverage || flag_branch_probabilities)) { rtl_register_profile_hooks (); rtl_register_value_prof_hooks (); rest_of_handle_branch_prob (); if (flag_branch_probabilities && flag_profile_values && flag_value_profile_transformations) rest_of_handle_value_profile_transformations (); /* Remove the death notes created for vpt. */ if (flag_profile_values) count_or_remove_death_notes (NULL, 1); } if (optimize > 0) rest_of_handle_if_conversion (); if (flag_tracer) rest_of_handle_tracer (); if (optimize > 0 && flag_loop_optimize2) rest_of_handle_loop2 (); if (flag_web) rest_of_handle_web (); if (flag_rerun_cse_after_loop) rest_of_handle_cse2 (); cse_not_expected = 1; rest_of_handle_life (); if (optimize > 0) rest_of_handle_combine (); if (flag_if_conversion) rest_of_handle_if_after_combine (); /* The optimization to partition hot/cold basic blocks into separate sections of the .o file does not work well with exception handling. Don't call it if there are exceptions. */ if (flag_reorder_blocks_and_partition && !flag_exceptions) { no_new_pseudos = 0; partition_hot_cold_basic_blocks (); allocate_reg_life_data (); update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES, PROP_LOG_LINKS | PROP_REG_INFO | PROP_DEATH_NOTES); no_new_pseudos = 1; } if (optimize > 0 && (flag_regmove || flag_expensive_optimizations)) rest_of_handle_regmove (); /* Do unconditional splitting before register allocation to allow machine description to add extra information not needed previously. */ split_all_insns (1); #ifdef OPTIMIZE_MODE_SWITCHING timevar_push (TV_MODE_SWITCH); no_new_pseudos = 0; optimize_mode_switching (NULL); no_new_pseudos = 1; timevar_pop (TV_MODE_SWITCH); #endif /* Any of the several passes since flow1 will have munged register lifetime data a bit. We need it to be up to date for scheduling (see handling of reg_known_equiv in init_alias_analysis). */ recompute_reg_usage (get_insns (), !optimize_size); #ifdef INSN_SCHEDULING rest_of_handle_sched (); #endif /* Determine if the current function is a leaf before running reload since this can impact optimizations done by the prologue and epilogue thus changing register elimination offsets. */ current_function_is_leaf = leaf_function_p (); timevar_push (TV_LOCAL_ALLOC); open_dump_file (DFI_lreg, current_function_decl); if (flag_new_regalloc) { if (rest_of_handle_new_regalloc ()) goto exit_rest_of_compilation; } else { if (rest_of_handle_old_regalloc ()) goto exit_rest_of_compilation; } ggc_collect (); open_dump_file (DFI_postreload, current_function_decl); /* Do a very simple CSE pass over just the hard registers. */ if (optimize > 0) { timevar_push (TV_RELOAD_CSE_REGS); reload_cse_regs (get_insns ()); /* reload_cse_regs can eliminate potentially-trapping MEMs. Remove any EH edges associated with them. */ if (flag_non_call_exceptions) purge_all_dead_edges (0); timevar_pop (TV_RELOAD_CSE_REGS); } close_dump_file (DFI_postreload, print_rtl_with_bb, get_insns ()); if (optimize > 0 && flag_gcse_after_reload) rest_of_handle_gcse2 (); /* Re-create the death notes which were deleted during reload. */ timevar_push (TV_FLOW2); open_dump_file (DFI_flow2, current_function_decl); #ifdef ENABLE_CHECKING verify_flow_info (); #endif /* If optimizing, then go ahead and split get_insns () now. */ #ifndef STACK_REGS if (optimize > 0) #endif split_all_insns (0); if (flag_branch_target_load_optimize) { open_dump_file (DFI_branch_target_load, current_function_decl); branch_target_load_optimize (/*after_prologue_epilogue_gen=*/false); close_dump_file (DFI_branch_target_load, print_rtl_with_bb, get_insns ()); ggc_collect (); } if (! targetm.late_rtl_prologue_epilogue) { if (optimize) cleanup_cfg (CLEANUP_EXPENSIVE); /* On some machines, the prologue and epilogue code, or parts thereof, can be represented as RTL. Doing so lets us schedule insns between it and the rest of the code and also allows delayed branch scheduling to operate in the epilogue. */ thread_prologue_and_epilogue_insns (get_insns ()); epilogue_completed = 1; } if (optimize) { life_analysis (dump_file, PROP_POSTRELOAD); cleanup_cfg (CLEANUP_EXPENSIVE | CLEANUP_UPDATE_LIFE | (flag_crossjumping ? CLEANUP_CROSSJUMP : 0)); /* This is kind of a heuristic. We need to run combine_stack_adjustments even for machines with possibly nonzero RETURN_POPS_ARGS and ACCUMULATE_OUTGOING_ARGS. We expect that only ports having push instructions will have popping returns. */ #ifndef PUSH_ROUNDING if (!ACCUMULATE_OUTGOING_ARGS) #endif combine_stack_adjustments (); ggc_collect (); } flow2_completed = 1; close_dump_file (DFI_flow2, print_rtl_with_bb, get_insns ()); timevar_pop (TV_FLOW2); #ifdef HAVE_peephole2 if (optimize > 0 && flag_peephole2) { timevar_push (TV_PEEPHOLE2); open_dump_file (DFI_peephole2, current_function_decl); peephole2_optimize (dump_file); close_dump_file (DFI_peephole2, print_rtl_with_bb, get_insns ()); timevar_pop (TV_PEEPHOLE2); } #endif open_dump_file (DFI_ce3, current_function_decl); if (optimize) /* Last attempt to optimize CFG, as scheduling, peepholing and insn splitting possibly introduced more crossjumping opportunities. */ cleanup_cfg (CLEANUP_EXPENSIVE | CLEANUP_UPDATE_LIFE | (flag_crossjumping ? CLEANUP_CROSSJUMP : 0)); if (flag_if_conversion2) { timevar_push (TV_IFCVT2); if_convert (1); timevar_pop (TV_IFCVT2); } close_dump_file (DFI_ce3, print_rtl_with_bb, get_insns ()); if (optimize > 0) { if (flag_rename_registers || flag_cprop_registers) rest_of_handle_regrename (); rest_of_handle_reorder_blocks (); } if (flag_branch_target_load_optimize2) { /* Leave this a warning for now so that it is possible to experiment with running this pass twice. In 3.6, we should either make this an error, or use separate dump files. */ if (flag_branch_target_load_optimize) warning ("branch target register load optimization is not intended " "to be run twice"); open_dump_file (DFI_branch_target_load, current_function_decl); branch_target_load_optimize (/*after_prologue_epilogue_gen=*/true); close_dump_file (DFI_branch_target_load, print_rtl_with_bb, get_insns ()); ggc_collect (); } #ifdef LEAF_REGISTERS current_function_uses_only_leaf_regs = optimize > 0 && only_leaf_regs_used () && leaf_function_p (); #endif if (targetm.late_rtl_prologue_epilogue) { /* On some machines, the prologue and epilogue code, or parts thereof, can be represented as RTL. Doing so lets us schedule insns between it and the rest of the code and also allows delayed branch scheduling to operate in the epilogue. */ thread_prologue_and_epilogue_insns (get_insns ()); epilogue_completed = 1; if (optimize) life_analysis (dump_file, PROP_POSTRELOAD); } #ifdef INSN_SCHEDULING if (optimize > 0 && flag_schedule_insns_after_reload) rest_of_handle_sched2 (); #endif #ifdef STACK_REGS rest_of_handle_stack_regs (); #endif compute_alignments (); if (flag_var_tracking) rest_of_handle_variable_tracking (); /* CFG is no longer maintained up-to-date. */ free_bb_for_insn (); if (targetm.machine_dependent_reorg != 0) rest_of_handle_machine_reorg (); purge_line_number_notes (get_insns ()); cleanup_barriers (); #ifdef DELAY_SLOTS if (optimize > 0 && flag_delayed_branch) rest_of_handle_delay_slots (); #endif #if defined (HAVE_ATTR_length) && !defined (STACK_REGS) timevar_push (TV_SHORTEN_BRANCH); split_all_insns_noflow (); timevar_pop (TV_SHORTEN_BRANCH); #endif convert_to_eh_region_ranges (); /* Shorten branches. */ timevar_push (TV_SHORTEN_BRANCH); shorten_branches (get_insns ()); timevar_pop (TV_SHORTEN_BRANCH); set_nothrow_function_flags (); if (current_function_nothrow) /* Now we know that this can't throw; set the flag for the benefit of other functions later in this translation unit. */ TREE_NOTHROW (current_function_decl) = 1; rest_of_handle_final (); /* Write DBX symbols if requested. */ /* Note that for those inline functions where we don't initially know for certain that we will be generating an out-of-line copy, the first invocation of this routine (rest_of_compilation) will skip over this code by doing a `goto exit_rest_of_compilation;'. Later on, wrapup_global_declarations will (indirectly) call rest_of_compilation again for those inline functions that need to have out-of-line copies generated. During that call, we *will* be routed past here. */ timevar_push (TV_SYMOUT); (*debug_hooks->function_decl) (current_function_decl); timevar_pop (TV_SYMOUT); exit_rest_of_compilation: coverage_end_function (); /* In case the function was not output, don't leave any temporary anonymous types queued up for sdb output. */ #ifdef SDB_DEBUGGING_INFO if (write_symbols == SDB_DEBUG) sdbout_types (NULL_TREE); #endif reload_completed = 0; epilogue_completed = 0; flow2_completed = 0; no_new_pseudos = 0; timevar_push (TV_FINAL); /* Clear out the insn_length contents now that they are no longer valid. */ init_insn_lengths (); /* Show no temporary slots allocated. */ init_temp_slots (); free_basic_block_vars (); free_bb_for_insn (); timevar_pop (TV_FINAL); if (targetm.binds_local_p (current_function_decl)) { int pref = cfun->preferred_stack_boundary; if (cfun->recursive_call_emit && cfun->stack_alignment_needed > cfun->preferred_stack_boundary) pref = cfun->stack_alignment_needed; cgraph_rtl_info (current_function_decl)->preferred_incoming_stack_boundary = pref; } /* Make sure volatile mem refs aren't considered valid operands for arithmetic insns. We must call this here if this is a nested inline function, since the above code leaves us in the init_recog state (from final.c), and the function context push/pop code does not save/restore volatile_ok. ??? Maybe it isn't necessary for expand_start_function to call this anymore if we do it here? */ init_recog_no_volatile (); /* We're done with this function. Free up memory if we can. */ free_after_parsing (cfun); } void init_optimization_passes (void) { open_dump_file (DFI_cgraph, NULL); cgraph_dump_file = dump_file; dump_file = NULL; } void finish_optimization_passes (void) { if (profile_arc_flag || flag_test_coverage || flag_branch_probabilities) { timevar_push (TV_DUMP); open_dump_file (DFI_bp, NULL); end_branch_prob (); close_dump_file (DFI_bp, NULL, NULL_RTX); timevar_pop (TV_DUMP); } if (optimize > 0 && open_dump_file (DFI_combine, NULL)) { timevar_push (TV_DUMP); dump_combine_total_stats (dump_file); close_dump_file (DFI_combine, NULL, NULL_RTX); timevar_pop (TV_DUMP); } dump_file = cgraph_dump_file; cgraph_dump_file = NULL; close_dump_file (DFI_cgraph, NULL, NULL_RTX); /* Do whatever is necessary to finish printing the graphs. */ if (graph_dump_format != no_graph) { int i; for (i = 0; i < (int) DFI_MAX; ++i) if (dump_file_tbl[i].initialized && dump_file_tbl[i].graph_dump_p) { char seq[16]; char *suffix; sprintf (seq, DUMPFILE_FORMAT, i); suffix = concat (seq, dump_file_tbl[i].extension, NULL); finish_graph_dump_file (dump_base_name, suffix); free (suffix); } } } bool enable_rtl_dump_file (int letter) { bool matched = false; int i; if (letter == 'a') { for (i = 0; i < (int) DFI_MAX; ++i) dump_file_tbl[i].enabled = 1; matched = true; } else { for (i = 0; i < (int) DFI_MAX; ++i) if (letter == dump_file_tbl[i].debug_switch) { dump_file_tbl[i].enabled = 1; matched = true; } } return matched; } struct tree_opt_pass pass_rest_of_compilation = { "rest of compilation", /* name */ NULL, /* gate */ rest_of_compilation, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ TV_REST_OF_COMPILATION, /* tv_id */ PROP_rtl, /* properties_required */ 0, /* properties_provided */ PROP_rtl, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_ggc_collect /* todo_flags_finish */ }; /* Calculate branch probabilities, and basic block execution counts. Copyright (C) 1990, 1991, 1992, 1993, 1994, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc. Contributed by James E. Wilson, UC Berkeley/Cygnus Support; based on some ideas from Dain Samples of UC Berkeley. Further mangling by Bob Manson, Cygnus Support. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Generate basic block profile instrumentation and auxiliary files. Profile generation is optimized, so that not all arcs in the basic block graph need instrumenting. First, the BB graph is closed with one entry (function start), and one exit (function exit). Any ABNORMAL_EDGE cannot be instrumented (because there is no control path to place the code). We close the graph by inserting fake EDGE_FAKE edges to the EXIT_BLOCK, from the sources of abnormal edges that do not go to the exit_block. We ignore such abnormal edges. Naturally these fake edges are never directly traversed, and so *cannot* be directly instrumented. Some other graph massaging is done. To optimize the instrumentation we generate the BB minimal span tree, only edges that are not on the span tree (plus the entry point) need instrumenting. From that information all other edge counts can be deduced. By construction all fake edges must be on the spanning tree. We also attempt to place EDGE_CRITICAL edges on the spanning tree. The auxiliary file generated is .bbg. The format is described in full in gcov-io.h. */ /* ??? Register allocation should use basic block execution counts to give preference to the most commonly executed blocks. */ /* ??? Should calculate branch probabilities before instrumenting code, since then we can use arc counts to help decide which arcs to instrument. */ /* Output instructions as RTL to increment the edge execution count. */ static void rtl_gen_edge_profiler (int edgeno, edge e) { rtx ref = rtl_coverage_counter_ref (GCOV_COUNTER_ARCS, edgeno); rtx tmp; enum machine_mode mode = GET_MODE (ref); rtx sequence; start_sequence (); ref = validize_mem (ref); tmp = expand_simple_binop (mode, PLUS, ref, const1_rtx, ref, 0, OPTAB_WIDEN); if (tmp != ref) emit_move_insn (copy_rtx (ref), tmp); sequence = get_insns (); end_sequence (); insert_insn_on_edge (sequence, e); rebuild_jump_labels (e->insns.r); } /* Output instructions as RTL to increment the interval histogram counter. VALUE is the expression whose value is profiled. TAG is the tag of the section for counters, BASE is offset of the counter position. */ static void rtl_gen_interval_profiler (struct histogram_value *value, unsigned tag, unsigned base) { unsigned gcov_size = tree_low_cst (TYPE_SIZE (GCOV_TYPE_NODE), 1); enum machine_mode mode = mode_for_size (gcov_size, MODE_INT, 0); rtx mem_ref, tmp, tmp1, mr, val; rtx sequence; rtx more_label = gen_label_rtx (); rtx less_label = gen_label_rtx (); rtx end_of_code_label = gen_label_rtx (); int per_counter = gcov_size / BITS_PER_UNIT; edge e = split_block (BLOCK_FOR_INSN ((rtx)value->insn), PREV_INSN ((rtx)value->insn)); start_sequence (); if (value->seq) emit_insn (value->seq); mr = gen_reg_rtx (Pmode); tmp = rtl_coverage_counter_ref (tag, base); tmp = force_reg (Pmode, XEXP (tmp, 0)); val = expand_simple_binop (value->mode, MINUS, copy_rtx (value->value), GEN_INT (value->hdata.intvl.int_start), NULL_RTX, 0, OPTAB_WIDEN); if (value->hdata.intvl.may_be_more) do_compare_rtx_and_jump (copy_rtx (val), GEN_INT (value->hdata.intvl.steps), GE, 0, value->mode, NULL_RTX, NULL_RTX, more_label); if (value->hdata.intvl.may_be_less) do_compare_rtx_and_jump (copy_rtx (val), const0_rtx, LT, 0, value->mode, NULL_RTX, NULL_RTX, less_label); /* We are in range. */ tmp1 = expand_simple_binop (value->mode, MULT, copy_rtx (val), GEN_INT (per_counter), NULL_RTX, 0, OPTAB_WIDEN); tmp1 = expand_simple_binop (Pmode, PLUS, copy_rtx (tmp), tmp1, mr, 0, OPTAB_WIDEN); if (tmp1 != mr) emit_move_insn (copy_rtx (mr), tmp1); if (value->hdata.intvl.may_be_more || value->hdata.intvl.may_be_less) { emit_jump_insn (gen_jump (end_of_code_label)); emit_barrier (); } /* Above the interval. */ if (value->hdata.intvl.may_be_more) { emit_label (more_label); tmp1 = expand_simple_binop (Pmode, PLUS, copy_rtx (tmp), GEN_INT (per_counter * value->hdata.intvl.steps), mr, 0, OPTAB_WIDEN); if (tmp1 != mr) emit_move_insn (copy_rtx (mr), tmp1); if (value->hdata.intvl.may_be_less) { emit_jump_insn (gen_jump (end_of_code_label)); emit_barrier (); } } /* Below the interval. */ if (value->hdata.intvl.may_be_less) { emit_label (less_label); tmp1 = expand_simple_binop (Pmode, PLUS, copy_rtx (tmp), GEN_INT (per_counter * (value->hdata.intvl.steps + (value->hdata.intvl.may_be_more ? 1 : 0))), mr, 0, OPTAB_WIDEN); if (tmp1 != mr) emit_move_insn (copy_rtx (mr), tmp1); } if (value->hdata.intvl.may_be_more || value->hdata.intvl.may_be_less) emit_label (end_of_code_label); mem_ref = validize_mem (gen_rtx_MEM (mode, mr)); tmp = expand_simple_binop (mode, PLUS, copy_rtx (mem_ref), const1_rtx, mem_ref, 0, OPTAB_WIDEN); if (tmp != mem_ref) emit_move_insn (copy_rtx (mem_ref), tmp); sequence = get_insns (); end_sequence (); rebuild_jump_labels (sequence); safe_insert_insn_on_edge (sequence, e); } /* Output instructions as RTL to increment the power of two histogram counter. VALUE is the expression whose value is profiled. TAG is the tag of the section for counters, BASE is offset of the counter position. */ static void rtl_gen_pow2_profiler (struct histogram_value *value, unsigned tag, unsigned base) { unsigned gcov_size = tree_low_cst (TYPE_SIZE (GCOV_TYPE_NODE), 1); enum machine_mode mode = mode_for_size (gcov_size, MODE_INT, 0); rtx mem_ref, tmp, mr, uval; rtx sequence; rtx end_of_code_label = gen_label_rtx (); rtx loop_label = gen_label_rtx (); int per_counter = gcov_size / BITS_PER_UNIT; edge e = split_block (BLOCK_FOR_INSN ((rtx)value->insn), PREV_INSN ((rtx)value->insn)); start_sequence (); if (value->seq) emit_insn (value->seq); mr = gen_reg_rtx (Pmode); tmp = rtl_coverage_counter_ref (tag, base); tmp = force_reg (Pmode, XEXP (tmp, 0)); emit_move_insn (mr, tmp); uval = gen_reg_rtx (value->mode); emit_move_insn (uval, copy_rtx (value->value)); /* Check for non-power of 2. */ if (value->hdata.pow2.may_be_other) { do_compare_rtx_and_jump (copy_rtx (uval), const0_rtx, LE, 0, value->mode, NULL_RTX, NULL_RTX, end_of_code_label); tmp = expand_simple_binop (value->mode, PLUS, copy_rtx (uval), constm1_rtx, NULL_RTX, 0, OPTAB_WIDEN); tmp = expand_simple_binop (value->mode, AND, copy_rtx (uval), tmp, NULL_RTX, 0, OPTAB_WIDEN); do_compare_rtx_and_jump (tmp, const0_rtx, NE, 0, value->mode, NULL_RTX, NULL_RTX, end_of_code_label); } /* Count log_2(value). */ emit_label (loop_label); tmp = expand_simple_binop (Pmode, PLUS, copy_rtx (mr), GEN_INT (per_counter), mr, 0, OPTAB_WIDEN); if (tmp != mr) emit_move_insn (copy_rtx (mr), tmp); tmp = expand_simple_binop (value->mode, ASHIFTRT, copy_rtx (uval), const1_rtx, uval, 0, OPTAB_WIDEN); if (tmp != uval) emit_move_insn (copy_rtx (uval), tmp); do_compare_rtx_and_jump (copy_rtx (uval), const0_rtx, NE, 0, value->mode, NULL_RTX, NULL_RTX, loop_label); /* Increase the counter. */ emit_label (end_of_code_label); mem_ref = validize_mem (gen_rtx_MEM (mode, mr)); tmp = expand_simple_binop (mode, PLUS, copy_rtx (mem_ref), const1_rtx, mem_ref, 0, OPTAB_WIDEN); if (tmp != mem_ref) emit_move_insn (copy_rtx (mem_ref), tmp); sequence = get_insns (); end_sequence (); rebuild_jump_labels (sequence); safe_insert_insn_on_edge (sequence, e); } /* Output instructions as RTL for code to find the most common value. VALUE is the expression whose value is profiled. TAG is the tag of the section for counters, BASE is offset of the counter position. */ static rtx rtl_gen_one_value_profiler_no_edge_manipulation (struct histogram_value *value, unsigned tag, unsigned base) { unsigned gcov_size = tree_low_cst (TYPE_SIZE (GCOV_TYPE_NODE), 1); enum machine_mode mode = mode_for_size (gcov_size, MODE_INT, 0); rtx stored_value_ref, counter_ref, all_ref, stored_value, counter, all; rtx tmp, uval; rtx sequence; rtx same_label = gen_label_rtx (); rtx zero_label = gen_label_rtx (); rtx end_of_code_label = gen_label_rtx (); start_sequence (); if (value->seq) emit_insn (value->seq); stored_value_ref = rtl_coverage_counter_ref (tag, base); counter_ref = rtl_coverage_counter_ref (tag, base + 1); all_ref = rtl_coverage_counter_ref (tag, base + 2); stored_value = validize_mem (stored_value_ref); counter = validize_mem (counter_ref); all = validize_mem (all_ref); uval = gen_reg_rtx (mode); convert_move (uval, copy_rtx (value->value), 0); /* Check if the stored value matches. */ do_compare_rtx_and_jump (copy_rtx (uval), copy_rtx (stored_value), EQ, 0, mode, NULL_RTX, NULL_RTX, same_label); /* Does not match; check whether the counter is zero. */ do_compare_rtx_and_jump (copy_rtx (counter), const0_rtx, EQ, 0, mode, NULL_RTX, NULL_RTX, zero_label); /* The counter is not zero yet. */ tmp = expand_simple_binop (mode, PLUS, copy_rtx (counter), constm1_rtx, counter, 0, OPTAB_WIDEN); if (tmp != counter) emit_move_insn (copy_rtx (counter), tmp); emit_jump_insn (gen_jump (end_of_code_label)); emit_barrier (); emit_label (zero_label); /* Set new value. */ emit_move_insn (copy_rtx (stored_value), copy_rtx (uval)); emit_label (same_label); /* Increase the counter. */ tmp = expand_simple_binop (mode, PLUS, copy_rtx (counter), const1_rtx, counter, 0, OPTAB_WIDEN); if (tmp != counter) emit_move_insn (copy_rtx (counter), tmp); emit_label (end_of_code_label); /* Increase the counter of all executions; this seems redundant given that ve have counts for edges in cfg, but it may happen that some optimization will change the counts for the block (either because it is unable to update them correctly, or because it will duplicate the block or its part). */ tmp = expand_simple_binop (mode, PLUS, copy_rtx (all), const1_rtx, all, 0, OPTAB_WIDEN); if (tmp != all) emit_move_insn (copy_rtx (all), tmp); sequence = get_insns (); end_sequence (); return sequence; } /* Output instructions as RTL for code to find the most common value. VALUE is the expression whose value is profiled. TAG is the tag of the section for counters, BASE is offset of the counter position. */ static void rtl_gen_one_value_profiler (struct histogram_value *value, unsigned tag, unsigned base) { edge e = split_block (BLOCK_FOR_INSN ((rtx)value->insn), PREV_INSN ((rtx)value->insn)); rtx sequence = rtl_gen_one_value_profiler_no_edge_manipulation (value, tag, base); rebuild_jump_labels (sequence); safe_insert_insn_on_edge (sequence, e); } /* Output instructions as RTL for code to find the most common value of a difference between two evaluations of an expression. VALUE is the expression whose value is profiled. TAG is the tag of the section for counters, BASE is offset of the counter position. */ static void rtl_gen_const_delta_profiler (struct histogram_value *value, unsigned tag, unsigned base) { struct histogram_value one_value_delta; unsigned gcov_size = tree_low_cst (TYPE_SIZE (GCOV_TYPE_NODE), 1); enum machine_mode mode = mode_for_size (gcov_size, MODE_INT, 0); rtx stored_value_ref, stored_value, tmp, uval; rtx sequence; edge e = split_block (BLOCK_FOR_INSN ((rtx)value->insn), PREV_INSN ((rtx)value->insn)); start_sequence (); if (value->seq) emit_insn (value->seq); stored_value_ref = rtl_coverage_counter_ref (tag, base); stored_value = validize_mem (stored_value_ref); uval = gen_reg_rtx (mode); convert_move (uval, copy_rtx (value->value), 0); tmp = expand_simple_binop (mode, MINUS, copy_rtx (uval), copy_rtx (stored_value), NULL_RTX, 0, OPTAB_WIDEN); one_value_delta.value = tmp; one_value_delta.mode = mode; one_value_delta.seq = NULL_RTX; one_value_delta.insn = value->insn; one_value_delta.type = HIST_TYPE_SINGLE_VALUE; emit_insn (rtl_gen_one_value_profiler_no_edge_manipulation (&one_value_delta, tag, base + 1)); emit_move_insn (copy_rtx (stored_value), uval); sequence = get_insns (); end_sequence (); rebuild_jump_labels (sequence); safe_insert_insn_on_edge (sequence, e); } /* Return the file on which profile dump output goes, if any. */ static FILE *rtl_profile_dump_file (void) { return dump_file; } struct profile_hooks rtl_profile_hooks = { rtl_gen_edge_profiler, rtl_gen_interval_profiler, rtl_gen_pow2_profiler, rtl_gen_one_value_profiler, rtl_gen_const_delta_profiler, rtl_profile_dump_file }; /* Calculate branch probabilities, and basic block execution counts. Copyright (C) 1990, 1991, 1992, 1993, 1994, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by James E. Wilson, UC Berkeley/Cygnus Support; based on some ideas from Dain Samples of UC Berkeley. Further mangling by Bob Manson, Cygnus Support. Converted to use trees by Dale Johannesen, Apple Computer. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Generate basic block profile instrumentation and auxiliary files. Profile generation is optimized, so that not all arcs in the basic block graph need instrumenting. First, the BB graph is closed with one entry (function start), and one exit (function exit). Any ABNORMAL_EDGE cannot be instrumented (because there is no control path to place the code). We close the graph by inserting fake EDGE_FAKE edges to the EXIT_BLOCK, from the sources of abnormal edges that do not go to the exit_block. We ignore such abnormal edges. Naturally these fake edges are never directly traversed, and so *cannot* be directly instrumented. Some other graph massaging is done. To optimize the instrumentation we generate the BB minimal span tree, only edges that are not on the span tree (plus the entry point) need instrumenting. From that information all other edge counts can be deduced. By construction all fake edges must be on the spanning tree. We also attempt to place EDGE_CRITICAL edges on the spanning tree. The auxiliary file generated is .bbg. The format is described in full in gcov-io.h. */ /* ??? Register allocation should use basic block execution counts to give preference to the most commonly executed blocks. */ /* ??? Should calculate branch probabilities before instrumenting code, since then we can use arc counts to help decide which arcs to instrument. */ /* Output instructions as GIMPLE trees to increment the edge execution count, and insert them on E. We rely on bsi_insert_on_edge to preserve the order. */ static void tree_gen_edge_profiler (int edgeno, edge e) { tree tmp1 = create_tmp_var (GCOV_TYPE_NODE, "PROF"); tree tmp2 = create_tmp_var (GCOV_TYPE_NODE, "PROF"); tree ref = tree_coverage_counter_ref (GCOV_COUNTER_ARCS, edgeno); tree stmt1 = build (MODIFY_EXPR, GCOV_TYPE_NODE, tmp1, ref); tree stmt2 = build (MODIFY_EXPR, GCOV_TYPE_NODE, tmp2, build (PLUS_EXPR, GCOV_TYPE_NODE, tmp1, integer_one_node)); tree stmt3 = build (MODIFY_EXPR, GCOV_TYPE_NODE, ref, tmp2); bsi_insert_on_edge (e, stmt1); bsi_insert_on_edge (e, stmt2); bsi_insert_on_edge (e, stmt3); } /* Output instructions as GIMPLE trees to increment the interval histogram counter. VALUE is the expression whose value is profiled. TAG is the tag of the section for counters, BASE is offset of the counter position. */ static void tree_gen_interval_profiler (struct histogram_value *value ATTRIBUTE_UNUSED, unsigned tag ATTRIBUTE_UNUSED, unsigned base ATTRIBUTE_UNUSED) { /* FIXME implement this. */ abort (); } /* Output instructions as GIMPLE trees to increment the power of two histogram counter. VALUE is the expression whose value is profiled. TAG is the tag of the section for counters, BASE is offset of the counter position. */ static void tree_gen_pow2_profiler (struct histogram_value *value ATTRIBUTE_UNUSED, unsigned tag ATTRIBUTE_UNUSED, unsigned base ATTRIBUTE_UNUSED) { /* FIXME implement this. */ abort (); } /* Output instructions as GIMPLE trees for code to find the most common value. VALUE is the expression whose value is profiled. TAG is the tag of the section for counters, BASE is offset of the counter position. */ static void tree_gen_one_value_profiler (struct histogram_value *value ATTRIBUTE_UNUSED, unsigned tag ATTRIBUTE_UNUSED, unsigned base ATTRIBUTE_UNUSED) { /* FIXME implement this. */ abort (); } /* Output instructions as GIMPLE trees for code to find the most common value of a difference between two evaluations of an expression. VALUE is the expression whose value is profiled. TAG is the tag of the section for counters, BASE is offset of the counter position. */ static void tree_gen_const_delta_profiler (struct histogram_value *value ATTRIBUTE_UNUSED, unsigned tag ATTRIBUTE_UNUSED, unsigned base ATTRIBUTE_UNUSED) { /* FIXME implement this. */ abort (); } /* Return 1 if tree-based profiling is in effect, else 0. If it is, set up hooks for tree-based profiling. Gate for pass_tree_profile. */ static bool do_tree_profiling (void) { if (flag_tree_based_profiling) { tree_register_profile_hooks (); tree_register_value_prof_hooks (); } return flag_tree_based_profiling; } /* Return the file on which profile dump output goes, if any. */ static FILE *tree_profile_dump_file (void) { return dump_file; } struct tree_opt_pass pass_tree_profile = { "tree_profile", /* name */ do_tree_profiling, /* gate */ branch_prob, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ TV_BRANCH_PROB, /* tv_id */ PROP_gimple_leh | PROP_cfg, /* properties_required */ PROP_gimple_leh | PROP_cfg, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_verify_stmts /* todo_flags_finish */ }; struct profile_hooks tree_profile_hooks = { tree_gen_edge_profiler, /* gen_edge_profiler */ tree_gen_interval_profiler, /* gen_interval_profiler */ tree_gen_pow2_profiler, /* gen_pow2_profiler */ tree_gen_one_value_profiler, /* gen_one_value_profiler */ tree_gen_const_delta_profiler,/* gen_const_delta_profiler */ tree_profile_dump_file /* profile_dump_file */ }; /* Generic hooks for the RTL middle-end. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* For speed, we will copy the RTX hooks struct member-by-member instead of doing indirect calls. For these reason, we initialize *two* struct rtl_hooks globals: rtl_hooks is the one that is used to actually call the hooks, while general_rtl_hooks is used to restore the hooks by passes that modify them. */ const struct rtl_hooks general_rtl_hooks = RTL_HOOKS_INITIALIZER; struct rtl_hooks rtl_hooks = RTL_HOOKS_INITIALIZER; rtx gen_lowpart_general (enum machine_mode mode, rtx x) { rtx result = gen_lowpart_common (mode, x); if (result) return result; else if (REG_P (x)) { /* Must be a hard reg that's not valid in MODE. */ result = gen_lowpart_common (mode, copy_to_reg (x)); if (result == 0) abort (); return result; } else if (MEM_P (x)) { /* The only additional case we can do is MEM. */ int offset = 0; /* The following exposes the use of "x" to CSE. */ if (GET_MODE_SIZE (GET_MODE (x)) <= UNITS_PER_WORD && SCALAR_INT_MODE_P (GET_MODE (x)) && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode), GET_MODE_BITSIZE (GET_MODE (x))) && ! no_new_pseudos) return gen_lowpart_general (mode, force_reg (GET_MODE (x), x)); if (WORDS_BIG_ENDIAN) offset = (MAX (GET_MODE_SIZE (GET_MODE (x)), UNITS_PER_WORD) - MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD)); if (BYTES_BIG_ENDIAN) /* Adjust the address so that the address-after-the-data is unchanged. */ offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (mode)) - MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (x)))); return adjust_address (x, mode, offset); } else abort (); } rtx reg_num_sign_bit_copies_general (rtx x ATTRIBUTE_UNUSED, enum machine_mode mode ATTRIBUTE_UNUSED, rtx known_x ATTRIBUTE_UNUSED, enum machine_mode known_mode ATTRIBUTE_UNUSED, unsigned int known_ret ATTRIBUTE_UNUSED, unsigned int *result ATTRIBUTE_UNUSED) { return NULL; } rtx reg_nonzero_bits_general (rtx x ATTRIBUTE_UNUSED, enum machine_mode mode ATTRIBUTE_UNUSED, rtx known_x ATTRIBUTE_UNUSED, enum machine_mode known_mode ATTRIBUTE_UNUSED, unsigned HOST_WIDE_INT known_ret ATTRIBUTE_UNUSED, unsigned HOST_WIDE_INT *nonzero ATTRIBUTE_UNUSED) { return NULL; } /* A pass for lowering trees to RTL. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Expand basic block BB from GIMPLE trees to RTL. */ static basic_block expand_block (basic_block bb, FILE * dump_file) { block_stmt_iterator bsi = bsi_start (bb); tree stmt = NULL; rtx note, last; edge e; if (dump_file) { tree_register_cfg_hooks (); dump_bb (bb, dump_file, 0); rtl_register_cfg_hooks (); } if (!bsi_end_p (bsi)) stmt = bsi_stmt (bsi); if (stmt && TREE_CODE (stmt) == LABEL_EXPR) { last = get_last_insn (); expand_expr_stmt (stmt); /* Java emits line number notes in the top of labels. ??? Make this go away once line number notes are obsoleted. */ BB_HEAD (bb) = NEXT_INSN (last); if (GET_CODE (BB_HEAD (bb)) == NOTE) BB_HEAD (bb) = NEXT_INSN (BB_HEAD (bb)); bsi_next (&bsi); note = emit_note_after (NOTE_INSN_BASIC_BLOCK, BB_HEAD (bb)); } else note = BB_HEAD (bb) = emit_note (NOTE_INSN_BASIC_BLOCK); NOTE_BASIC_BLOCK (note) = bb; e = bb->succ; while (e) { edge next = e->succ_next; /* Clear EDGE_EXECUTABLE. This flag is never used in the backend. */ e->flags &= ~EDGE_EXECUTABLE; /* At the moment not all abnormal edges match the RTL representation. It is safe to remove them here as find_sub_basic_blocks will rediscover them. In the future we should get this fixed properly. */ if (e->flags & EDGE_ABNORMAL) remove_edge (e); e = next; } for (; !bsi_end_p (bsi); bsi_next (&bsi)) { tree stmt = bsi_stmt (bsi); last = get_last_insn (); if (!stmt) continue; /* Expand this statement, then evaluate the resulting RTL and fixup the CFG accordingly. */ switch (TREE_CODE (stmt)) { case COND_EXPR: { basic_block new_bb, dest; edge new_edge; edge true_edge; edge false_edge; tree pred = COND_EXPR_COND (stmt); tree then_exp = COND_EXPR_THEN (stmt); tree else_exp = COND_EXPR_ELSE (stmt); rtx last = get_last_insn (); extract_true_false_edges_from_block (bb, &true_edge, &false_edge); if (EXPR_LOCUS (stmt)) { emit_line_note (*(EXPR_LOCUS (stmt))); record_block_change (TREE_BLOCK (stmt)); } /* These flags have no purpose in RTL land. */ true_edge->flags &= ~EDGE_TRUE_VALUE; false_edge->flags &= ~EDGE_FALSE_VALUE; /* We can either have a pure conditional jump with one fallthru edge or two-way jump that needs to be decomposed into two basic blocks. */ if (TREE_CODE (then_exp) == GOTO_EXPR && TREE_CODE (else_exp) == NOP_EXPR) { jumpif (pred, label_rtx (GOTO_DESTINATION (then_exp))); break; } if (TREE_CODE (else_exp) == GOTO_EXPR && TREE_CODE (then_exp) == NOP_EXPR) { jumpifnot (pred, label_rtx (GOTO_DESTINATION (else_exp))); break; } if (TREE_CODE (then_exp) != GOTO_EXPR || TREE_CODE (else_exp) != GOTO_EXPR) abort (); jumpif (pred, label_rtx (GOTO_DESTINATION (then_exp))); last = get_last_insn (); expand_expr (else_exp, const0_rtx, VOIDmode, 0); BB_END (bb) = last; if (GET_CODE (BB_END (bb)) == BARRIER) BB_END (bb) = PREV_INSN (BB_END (bb)); update_bb_for_insn (bb); new_bb = create_basic_block (NEXT_INSN (last), get_last_insn (), bb); dest = false_edge->dest; redirect_edge_succ (false_edge, new_bb); false_edge->flags |= EDGE_FALLTHRU; new_bb->count = false_edge->count; new_bb->frequency = EDGE_FREQUENCY (false_edge); new_edge = make_edge (new_bb, dest, 0); new_edge->probability = REG_BR_PROB_BASE; new_edge->count = new_bb->count; if (GET_CODE (BB_END (new_bb)) == BARRIER) BB_END (new_bb) = PREV_INSN (BB_END (new_bb)); update_bb_for_insn (new_bb); if (dump_file) { dump_bb (bb, dump_file, 0); dump_bb (new_bb, dump_file, 0); } return new_bb; } /* Update after expansion of sibling call. */ case CALL_EXPR: case MODIFY_EXPR: case RETURN_EXPR: expand_expr_stmt (stmt); for (last = NEXT_INSN (last); last; last = NEXT_INSN (last)) { if (GET_CODE (last) == CALL_INSN && SIBLING_CALL_P (last)) { edge e; int probability = 0; gcov_type count = 0; do_pending_stack_adjust (); e = bb->succ; while (e) { edge next = e->succ_next; if (!(e->flags & (EDGE_ABNORMAL | EDGE_EH))) { if (e->dest != EXIT_BLOCK_PTR) { e->dest->count -= e->count; e->dest->frequency -= EDGE_FREQUENCY (e); if (e->dest->count < 0) e->dest->count = 0; if (e->dest->frequency < 0) e->dest->frequency = 0; } count += e->count; probability += e->probability; remove_edge (e); } e = next; } /* This is somewhat ugly: the call_expr expander often emits instructions after the sibcall (to perform the function return). These confuse the find_sub_basic_blocks code, so we need to get rid of these. */ last = NEXT_INSN (last); if (GET_CODE (last) != BARRIER) abort (); while (NEXT_INSN (last)) { /* For instance an sqrt builtin expander expands if with sibcall in the then and label for `else`. */ if (GET_CODE (NEXT_INSN (last)) == CODE_LABEL) break; delete_insn (NEXT_INSN (last)); } e = make_edge (bb, EXIT_BLOCK_PTR, EDGE_ABNORMAL | EDGE_SIBCALL); e->probability += probability; e->count += count; BB_END (bb) = last; update_bb_for_insn (bb); if (NEXT_INSN (last)) bb = create_basic_block (NEXT_INSN (last), get_last_insn (), bb); else return bb; } } break; default: expand_expr_stmt (stmt); break; } } do_pending_stack_adjust (); /* Find the the block tail. The last insn is the block is the insn before a barrier and/or table jump insn. */ last = get_last_insn (); if (GET_CODE (last) == BARRIER) last = PREV_INSN (last); if (JUMP_TABLE_DATA_P (last)) last = PREV_INSN (PREV_INSN (last)); BB_END (bb) = last; if (dump_file) dump_bb (bb, dump_file, 0); update_bb_for_insn (bb); return bb; } /* Create a basic block for initialization code. */ static basic_block construct_init_block (void) { basic_block init_block, first_block; edge e; expand_start_bindings_and_block (0, NULL_TREE); for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) if (e->dest == ENTRY_BLOCK_PTR->next_bb) break; init_block = create_basic_block (NEXT_INSN (get_insns ()), get_last_insn (), ENTRY_BLOCK_PTR); init_block->frequency = ENTRY_BLOCK_PTR->frequency; init_block->count = ENTRY_BLOCK_PTR->count; if (e) { first_block = e->dest; redirect_edge_succ (e, init_block); e = make_edge (init_block, first_block, EDGE_FALLTHRU); } else e = make_edge (init_block, EXIT_BLOCK_PTR, EDGE_FALLTHRU); e->probability = REG_BR_PROB_BASE; e->count = ENTRY_BLOCK_PTR->count; update_bb_for_insn (init_block); return init_block; } /* Create a block containing landing pads and similar stuff. */ static void construct_exit_block (void) { rtx head = get_last_insn (); rtx end; basic_block exit_block; edge e, e2, next; /* Make sure the locus is set to the end of the function, so that epilogue line numbers and warnings are set properly. */ #ifdef USE_MAPPED_LOCATION if (cfun->function_end_locus != UNKNOWN_LOCATION) #else if (cfun->function_end_locus.file) #endif input_location = cfun->function_end_locus; /* The following insns belong to the top scope. */ record_block_change (DECL_INITIAL (current_function_decl)); expand_end_bindings (NULL_TREE, 1, 0); /* Generate rtl for function exit. */ expand_function_end (); end = get_last_insn (); if (head == end) return; while (NEXT_INSN (head) && GET_CODE (NEXT_INSN (head)) == NOTE) head = NEXT_INSN (head); exit_block = create_basic_block (NEXT_INSN (head), end, EXIT_BLOCK_PTR->prev_bb); exit_block->frequency = EXIT_BLOCK_PTR->frequency; exit_block->count = EXIT_BLOCK_PTR->count; for (e = EXIT_BLOCK_PTR->pred; e; e = next) { next = e->pred_next; if (!(e->flags & EDGE_ABNORMAL)) redirect_edge_succ (e, exit_block); } e = make_edge (exit_block, EXIT_BLOCK_PTR, EDGE_FALLTHRU); e->probability = REG_BR_PROB_BASE; e->count = EXIT_BLOCK_PTR->count; for (e2 = EXIT_BLOCK_PTR->pred; e2; e2 = e2->pred_next) if (e2 != e) { e->count -= e2->count; exit_block->count -= e2->count; exit_block->frequency -= EDGE_FREQUENCY (e2); } if (e->count < 0) e->count = 0; if (exit_block->count < 0) exit_block->count = 0; if (exit_block->frequency < 0) exit_block->frequency = 0; update_bb_for_insn (exit_block); } /* Translate the intermediate representation contained in the CFG from GIMPLE trees to RTL. We do conversion per basic block and preserve/update the tree CFG. This implies we have to do some magic as the CFG can simultaneously consist of basic blocks containing RTL and GIMPLE trees. This can confuse the CFG hooks, so be careful to not manipulate CFG during the expansion. */ static void tree_expand_cfg (void) { basic_block bb, init_block; sbitmap blocks; if (dump_file) { fprintf (dump_file, "\n;; Function %s", (*lang_hooks.decl_printable_name) (current_function_decl, 2)); fprintf (dump_file, " (%s)\n", IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (current_function_decl))); } /* Prepare the rtl middle end to start recording block changes. */ reset_block_changes (); /* Expand the variables recorded during gimple lowering. This must occur before the call to expand_function_start to ensure that all used variables are expanded before we expand anything on the PENDING_SIZES list. */ expand_used_vars (); /* Set up parameters and prepare for return, for the function. */ expand_function_start (current_function_decl); /* If this function is `main', emit a call to `__main' to run global initializers, etc. */ if (DECL_NAME (current_function_decl) && MAIN_NAME_P (DECL_NAME (current_function_decl)) && DECL_FILE_SCOPE_P (current_function_decl)) expand_main_function (); /* Write the flowgraph to a dot file. */ rtl_register_cfg_hooks (); init_block = construct_init_block (); FOR_BB_BETWEEN (bb, init_block->next_bb, EXIT_BLOCK_PTR, next_bb) bb = expand_block (bb, dump_file); construct_exit_block (); /* Convert from NOTE_INSN_EH_REGION style notes, and do other sorts of eh initialization. Delay this until after the initial rtl dump so that we can see the original nesting. */ convert_from_eh_region_ranges (); rebuild_jump_labels (get_insns ()); find_exception_handler_labels (); blocks = sbitmap_alloc (last_basic_block); sbitmap_ones (blocks); find_many_sub_basic_blocks (blocks); purge_all_dead_edges (0); sbitmap_free (blocks); compact_blocks (); #ifdef ENABLE_CHECKING verify_flow_info(); #endif } struct tree_opt_pass pass_expand = { "expand", /* name */ NULL, /* gate */ tree_expand_cfg, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ TV_EXPAND, /* tv_id */ /* ??? If TER is enabled, we actually receive GENERIC. */ PROP_gimple_leh | PROP_cfg, /* properties_required */ PROP_rtl, /* properties_provided */ PROP_gimple_leh, /* properties_destroyed */ 0, /* todo_flags_start */ 0 /* todo_flags_finish */ }; /* Subroutines used for code generation on IA-32. Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef CHECK_STACK_LIMIT #define CHECK_STACK_LIMIT (-1) #endif /* Return index of given mode in mult and division cost tables. */ #define MODE_INDEX(mode) \ ((mode) == QImode ? 0 \ : (mode) == HImode ? 1 \ : (mode) == SImode ? 2 \ : (mode) == DImode ? 3 \ : 4) /* Processor costs (relative to an add) */ static const struct processor_costs size_cost = { /* costs for tunning for size */ 2, /* cost of an add instruction */ 3, /* cost of a lea instruction */ 2, /* variable shift costs */ 3, /* constant shift costs */ {3, 3, 3, 3, 5}, /* cost of starting a multiply */ 0, /* cost of multiply per each bit set */ {3, 3, 3, 3, 5}, /* cost of a divide/mod */ 3, /* cost of movsx */ 3, /* cost of movzx */ 0, /* "large" insn */ 2, /* MOVE_RATIO */ 2, /* cost for loading QImode using movzbl */ {2, 2, 2}, /* cost of loading integer registers in QImode, HImode and SImode. Relative to reg-reg move (2). */ {2, 2, 2}, /* cost of storing integer registers */ 2, /* cost of reg,reg fld/fst */ {2, 2, 2}, /* cost of loading fp registers in SFmode, DFmode and XFmode */ {2, 2, 2}, /* cost of loading integer registers */ 3, /* cost of moving MMX register */ {3, 3}, /* cost of loading MMX registers in SImode and DImode */ {3, 3}, /* cost of storing MMX registers in SImode and DImode */ 3, /* cost of moving SSE register */ {3, 3, 3}, /* cost of loading SSE registers in SImode, DImode and TImode */ {3, 3, 3}, /* cost of storing SSE registers in SImode, DImode and TImode */ 3, /* MMX or SSE register to integer */ 0, /* size of prefetch block */ 0, /* number of parallel prefetches */ 1, /* Branch cost */ 2, /* cost of FADD and FSUB insns. */ 2, /* cost of FMUL instruction. */ 2, /* cost of FDIV instruction. */ 2, /* cost of FABS instruction. */ 2, /* cost of FCHS instruction. */ 2, /* cost of FSQRT instruction. */ }; /* Processor costs (relative to an add) */ static const struct processor_costs i386_cost = { /* 386 specific costs */ 1, /* cost of an add instruction */ 1, /* cost of a lea instruction */ 3, /* variable shift costs */ 2, /* constant shift costs */ {6, 6, 6, 6, 6}, /* cost of starting a multiply */ 1, /* cost of multiply per each bit set */ {23, 23, 23, 23, 23}, /* cost of a divide/mod */ 3, /* cost of movsx */ 2, /* cost of movzx */ 15, /* "large" insn */ 3, /* MOVE_RATIO */ 4, /* cost for loading QImode using movzbl */ {2, 4, 2}, /* cost of loading integer registers in QImode, HImode and SImode. Relative to reg-reg move (2). */ {2, 4, 2}, /* cost of storing integer registers */ 2, /* cost of reg,reg fld/fst */ {8, 8, 8}, /* cost of loading fp registers in SFmode, DFmode and XFmode */ {8, 8, 8}, /* cost of loading integer registers */ 2, /* cost of moving MMX register */ {4, 8}, /* cost of loading MMX registers in SImode and DImode */ {4, 8}, /* cost of storing MMX registers in SImode and DImode */ 2, /* cost of moving SSE register */ {4, 8, 16}, /* cost of loading SSE registers in SImode, DImode and TImode */ {4, 8, 16}, /* cost of storing SSE registers in SImode, DImode and TImode */ 3, /* MMX or SSE register to integer */ 0, /* size of prefetch block */ 0, /* number of parallel prefetches */ 1, /* Branch cost */ 23, /* cost of FADD and FSUB insns. */ 27, /* cost of FMUL instruction. */ 88, /* cost of FDIV instruction. */ 22, /* cost of FABS instruction. */ 24, /* cost of FCHS instruction. */ 122, /* cost of FSQRT instruction. */ }; static const struct processor_costs i486_cost = { /* 486 specific costs */ 1, /* cost of an add instruction */ 1, /* cost of a lea instruction */ 3, /* variable shift costs */ 2, /* constant shift costs */ {12, 12, 12, 12, 12}, /* cost of starting a multiply */ 1, /* cost of multiply per each bit set */ {40, 40, 40, 40, 40}, /* cost of a divide/mod */ 3, /* cost of movsx */ 2, /* cost of movzx */ 15, /* "large" insn */ 3, /* MOVE_RATIO */ 4, /* cost for loading QImode using movzbl */ {2, 4, 2}, /* cost of loading integer registers in QImode, HImode and SImode. Relative to reg-reg move (2). */ {2, 4, 2}, /* cost of storing integer registers */ 2, /* cost of reg,reg fld/fst */ {8, 8, 8}, /* cost of loading fp registers in SFmode, DFmode and XFmode */ {8, 8, 8}, /* cost of loading integer registers */ 2, /* cost of moving MMX register */ {4, 8}, /* cost of loading MMX registers in SImode and DImode */ {4, 8}, /* cost of storing MMX registers in SImode and DImode */ 2, /* cost of moving SSE register */ {4, 8, 16}, /* cost of loading SSE registers in SImode, DImode and TImode */ {4, 8, 16}, /* cost of storing SSE registers in SImode, DImode and TImode */ 3, /* MMX or SSE register to integer */ 0, /* size of prefetch block */ 0, /* number of parallel prefetches */ 1, /* Branch cost */ 8, /* cost of FADD and FSUB insns. */ 16, /* cost of FMUL instruction. */ 73, /* cost of FDIV instruction. */ 3, /* cost of FABS instruction. */ 3, /* cost of FCHS instruction. */ 83, /* cost of FSQRT instruction. */ }; static const struct processor_costs pentium_cost = { 1, /* cost of an add instruction */ 1, /* cost of a lea instruction */ 4, /* variable shift costs */ 1, /* constant shift costs */ {11, 11, 11, 11, 11}, /* cost of starting a multiply */ 0, /* cost of multiply per each bit set */ {25, 25, 25, 25, 25}, /* cost of a divide/mod */ 3, /* cost of movsx */ 2, /* cost of movzx */ 8, /* "large" insn */ 6, /* MOVE_RATIO */ 6, /* cost for loading QImode using movzbl */ {2, 4, 2}, /* cost of loading integer registers in QImode, HImode and SImode. Relative to reg-reg move (2). */ {2, 4, 2}, /* cost of storing integer registers */ 2, /* cost of reg,reg fld/fst */ {2, 2, 6}, /* cost of loading fp registers in SFmode, DFmode and XFmode */ {4, 4, 6}, /* cost of loading integer registers */ 8, /* cost of moving MMX register */ {8, 8}, /* cost of loading MMX registers in SImode and DImode */ {8, 8}, /* cost of storing MMX registers in SImode and DImode */ 2, /* cost of moving SSE register */ {4, 8, 16}, /* cost of loading SSE registers in SImode, DImode and TImode */ {4, 8, 16}, /* cost of storing SSE registers in SImode, DImode and TImode */ 3, /* MMX or SSE register to integer */ 0, /* size of prefetch block */ 0, /* number of parallel prefetches */ 2, /* Branch cost */ 3, /* cost of FADD and FSUB insns. */ 3, /* cost of FMUL instruction. */ 39, /* cost of FDIV instruction. */ 1, /* cost of FABS instruction. */ 1, /* cost of FCHS instruction. */ 70, /* cost of FSQRT instruction. */ }; static const struct processor_costs pentiumpro_cost = { 1, /* cost of an add instruction */ 1, /* cost of a lea instruction */ 1, /* variable shift costs */ 1, /* constant shift costs */ {4, 4, 4, 4, 4}, /* cost of starting a multiply */ 0, /* cost of multiply per each bit set */ {17, 17, 17, 17, 17}, /* cost of a divide/mod */ 1, /* cost of movsx */ 1, /* cost of movzx */ 8, /* "large" insn */ 6, /* MOVE_RATIO */ 2, /* cost for loading QImode using movzbl */ {4, 4, 4}, /* cost of loading integer registers in QImode, HImode and SImode. Relative to reg-reg move (2). */ {2, 2, 2}, /* cost of storing integer registers */ 2, /* cost of reg,reg fld/fst */ {2, 2, 6}, /* cost of loading fp registers in SFmode, DFmode and XFmode */ {4, 4, 6}, /* cost of loading integer registers */ 2, /* cost of moving MMX register */ {2, 2}, /* cost of loading MMX registers in SImode and DImode */ {2, 2}, /* cost of storing MMX registers in SImode and DImode */ 2, /* cost of moving SSE register */ {2, 2, 8}, /* cost of loading SSE registers in SImode, DImode and TImode */ {2, 2, 8}, /* cost of storing SSE registers in SImode, DImode and TImode */ 3, /* MMX or SSE register to integer */ 32, /* size of prefetch block */ 6, /* number of parallel prefetches */ 2, /* Branch cost */ 3, /* cost of FADD and FSUB insns. */ 5, /* cost of FMUL instruction. */ 56, /* cost of FDIV instruction. */ 2, /* cost of FABS instruction. */ 2, /* cost of FCHS instruction. */ 56, /* cost of FSQRT instruction. */ }; static const struct processor_costs k6_cost = { 1, /* cost of an add instruction */ 2, /* cost of a lea instruction */ 1, /* variable shift costs */ 1, /* constant shift costs */ {3, 3, 3, 3, 3}, /* cost of starting a multiply */ 0, /* cost of multiply per each bit set */ {18, 18, 18, 18, 18}, /* cost of a divide/mod */ 2, /* cost of movsx */ 2, /* cost of movzx */ 8, /* "large" insn */ 4, /* MOVE_RATIO */ 3, /* cost for loading QImode using movzbl */ {4, 5, 4}, /* cost of loading integer registers in QImode, HImode and SImode. Relative to reg-reg move (2). */ {2, 3, 2}, /* cost of storing integer registers */ 4, /* cost of reg,reg fld/fst */ {6, 6, 6}, /* cost of loading fp registers in SFmode, DFmode and XFmode */ {4, 4, 4}, /* cost of loading integer registers */ 2, /* cost of moving MMX register */ {2, 2}, /* cost of loading MMX registers in SImode and DImode */ {2, 2}, /* cost of storing MMX registers in SImode and DImode */ 2, /* cost of moving SSE register */ {2, 2, 8}, /* cost of loading SSE registers in SImode, DImode and TImode */ {2, 2, 8}, /* cost of storing SSE registers in SImode, DImode and TImode */ 6, /* MMX or SSE register to integer */ 32, /* size of prefetch block */ 1, /* number of parallel prefetches */ 1, /* Branch cost */ 2, /* cost of FADD and FSUB insns. */ 2, /* cost of FMUL instruction. */ 56, /* cost of FDIV instruction. */ 2, /* cost of FABS instruction. */ 2, /* cost of FCHS instruction. */ 56, /* cost of FSQRT instruction. */ }; static const struct processor_costs athlon_cost = { 1, /* cost of an add instruction */ 2, /* cost of a lea instruction */ 1, /* variable shift costs */ 1, /* constant shift costs */ {5, 5, 5, 5, 5}, /* cost of starting a multiply */ 0, /* cost of multiply per each bit set */ {18, 26, 42, 74, 74}, /* cost of a divide/mod */ 1, /* cost of movsx */ 1, /* cost of movzx */ 8, /* "large" insn */ 9, /* MOVE_RATIO */ 4, /* cost for loading QImode using movzbl */ {3, 4, 3}, /* cost of loading integer registers in QImode, HImode and SImode. Relative to reg-reg move (2). */ {3, 4, 3}, /* cost of storing integer registers */ 4, /* cost of reg,reg fld/fst */ {4, 4, 12}, /* cost of loading fp registers in SFmode, DFmode and XFmode */ {6, 6, 8}, /* cost of loading integer registers */ 2, /* cost of moving MMX register */ {4, 4}, /* cost of loading MMX registers in SImode and DImode */ {4, 4}, /* cost of storing MMX registers in SImode and DImode */ 2, /* cost of moving SSE register */ {4, 4, 6}, /* cost of loading SSE registers in SImode, DImode and TImode */ {4, 4, 5}, /* cost of storing SSE registers in SImode, DImode and TImode */ 5, /* MMX or SSE register to integer */ 64, /* size of prefetch block */ 6, /* number of parallel prefetches */ 2, /* Branch cost */ 4, /* cost of FADD and FSUB insns. */ 4, /* cost of FMUL instruction. */ 24, /* cost of FDIV instruction. */ 2, /* cost of FABS instruction. */ 2, /* cost of FCHS instruction. */ 35, /* cost of FSQRT instruction. */ }; static const struct processor_costs k8_cost = { 1, /* cost of an add instruction */ 2, /* cost of a lea instruction */ 1, /* variable shift costs */ 1, /* constant shift costs */ {3, 4, 3, 4, 5}, /* cost of starting a multiply */ 0, /* cost of multiply per each bit set */ {18, 26, 42, 74, 74}, /* cost of a divide/mod */ 1, /* cost of movsx */ 1, /* cost of movzx */ 8, /* "large" insn */ 9, /* MOVE_RATIO */ 4, /* cost for loading QImode using movzbl */ {3, 4, 3}, /* cost of loading integer registers in QImode, HImode and SImode. Relative to reg-reg move (2). */ {3, 4, 3}, /* cost of storing integer registers */ 4, /* cost of reg,reg fld/fst */ {4, 4, 12}, /* cost of loading fp registers in SFmode, DFmode and XFmode */ {6, 6, 8}, /* cost of loading integer registers */ 2, /* cost of moving MMX register */ {3, 3}, /* cost of loading MMX registers in SImode and DImode */ {4, 4}, /* cost of storing MMX registers in SImode and DImode */ 2, /* cost of moving SSE register */ {4, 3, 6}, /* cost of loading SSE registers in SImode, DImode and TImode */ {4, 4, 5}, /* cost of storing SSE registers in SImode, DImode and TImode */ 5, /* MMX or SSE register to integer */ 64, /* size of prefetch block */ 6, /* number of parallel prefetches */ 2, /* Branch cost */ 4, /* cost of FADD and FSUB insns. */ 4, /* cost of FMUL instruction. */ 19, /* cost of FDIV instruction. */ 2, /* cost of FABS instruction. */ 2, /* cost of FCHS instruction. */ 35, /* cost of FSQRT instruction. */ }; static const struct processor_costs pentium4_cost = { 1, /* cost of an add instruction */ 3, /* cost of a lea instruction */ 4, /* variable shift costs */ 4, /* constant shift costs */ {15, 15, 15, 15, 15}, /* cost of starting a multiply */ 0, /* cost of multiply per each bit set */ {56, 56, 56, 56, 56}, /* cost of a divide/mod */ 1, /* cost of movsx */ 1, /* cost of movzx */ 16, /* "large" insn */ 6, /* MOVE_RATIO */ 2, /* cost for loading QImode using movzbl */ {4, 5, 4}, /* cost of loading integer registers in QImode, HImode and SImode. Relative to reg-reg move (2). */ {2, 3, 2}, /* cost of storing integer registers */ 2, /* cost of reg,reg fld/fst */ {2, 2, 6}, /* cost of loading fp registers in SFmode, DFmode and XFmode */ {4, 4, 6}, /* cost of loading integer registers */ 2, /* cost of moving MMX register */ {2, 2}, /* cost of loading MMX registers in SImode and DImode */ {2, 2}, /* cost of storing MMX registers in SImode and DImode */ 12, /* cost of moving SSE register */ {12, 12, 12}, /* cost of loading SSE registers in SImode, DImode and TImode */ {2, 2, 8}, /* cost of storing SSE registers in SImode, DImode and TImode */ 10, /* MMX or SSE register to integer */ 64, /* size of prefetch block */ 6, /* number of parallel prefetches */ 2, /* Branch cost */ 5, /* cost of FADD and FSUB insns. */ 7, /* cost of FMUL instruction. */ 43, /* cost of FDIV instruction. */ 2, /* cost of FABS instruction. */ 2, /* cost of FCHS instruction. */ 43, /* cost of FSQRT instruction. */ }; static const struct processor_costs nocona_cost = { 1, /* cost of an add instruction */ 1, /* cost of a lea instruction */ 1, /* variable shift costs */ 1, /* constant shift costs */ {10, 10, 10, 10, 10}, /* cost of starting a multiply */ 0, /* cost of multiply per each bit set */ {66, 66, 66, 66, 66}, /* cost of a divide/mod */ 1, /* cost of movsx */ 1, /* cost of movzx */ 16, /* "large" insn */ 9, /* MOVE_RATIO */ 4, /* cost for loading QImode using movzbl */ {4, 4, 4}, /* cost of loading integer registers in QImode, HImode and SImode. Relative to reg-reg move (2). */ {4, 4, 4}, /* cost of storing integer registers */ 3, /* cost of reg,reg fld/fst */ {12, 12, 12}, /* cost of loading fp registers in SFmode, DFmode and XFmode */ {4, 4, 4}, /* cost of loading integer registers */ 6, /* cost of moving MMX register */ {12, 12}, /* cost of loading MMX registers in SImode and DImode */ {12, 12}, /* cost of storing MMX registers in SImode and DImode */ 6, /* cost of moving SSE register */ {12, 12, 12}, /* cost of loading SSE registers in SImode, DImode and TImode */ {12, 12, 12}, /* cost of storing SSE registers in SImode, DImode and TImode */ 8, /* MMX or SSE register to integer */ 128, /* size of prefetch block */ 8, /* number of parallel prefetches */ 1, /* Branch cost */ 6, /* cost of FADD and FSUB insns. */ 8, /* cost of FMUL instruction. */ 40, /* cost of FDIV instruction. */ 3, /* cost of FABS instruction. */ 3, /* cost of FCHS instruction. */ 44, /* cost of FSQRT instruction. */ }; const struct processor_costs *ix86_cost = &pentium_cost; /* Processor feature/optimization bitmasks. */ #define m_386 (1< to_allocate <- FRAME_POINTER [frame] ( ) [padding2] / */ struct ix86_frame { int nregs; int padding1; int va_arg_size; HOST_WIDE_INT frame; int padding2; int outgoing_arguments_size; int red_zone_size; HOST_WIDE_INT to_allocate; /* The offsets relative to ARG_POINTER. */ HOST_WIDE_INT frame_pointer_offset; HOST_WIDE_INT hard_frame_pointer_offset; HOST_WIDE_INT stack_pointer_offset; /* When save_regs_using_mov is set, emit prologue using move instead of push instructions. */ bool save_regs_using_mov; }; /* Used to enable/disable debugging features. */ const char *ix86_debug_arg_string, *ix86_debug_addr_string; /* Code model option as passed by user. */ const char *ix86_cmodel_string; /* Parsed value. */ enum cmodel ix86_cmodel; /* Asm dialect. */ const char *ix86_asm_string; enum asm_dialect ix86_asm_dialect = ASM_ATT; /* TLS dialext. */ const char *ix86_tls_dialect_string; enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU; /* Which unit we are generating floating point math for. */ enum fpmath_unit ix86_fpmath; /* Which cpu are we scheduling for. */ enum processor_type ix86_tune; /* Which instruction set architecture to use. */ enum processor_type ix86_arch; /* Strings to hold which cpu and instruction set architecture to use. */ const char *ix86_tune_string; /* for -mtune= */ const char *ix86_arch_string; /* for -march= */ const char *ix86_fpmath_string; /* for -mfpmath= */ /* # of registers to use to pass arguments. */ const char *ix86_regparm_string; /* true if sse prefetch instruction is not NOOP. */ int x86_prefetch_sse; /* ix86_regparm_string as a number */ int ix86_regparm; /* Alignment to use for loops and jumps: */ /* Power of two alignment for loops. */ const char *ix86_align_loops_string; /* Power of two alignment for non-loop jumps. */ const char *ix86_align_jumps_string; /* Power of two alignment for stack boundary in bytes. */ const char *ix86_preferred_stack_boundary_string; /* Preferred alignment for stack boundary in bits. */ int ix86_preferred_stack_boundary; /* Values 1-5: see jump.c */ int ix86_branch_cost; const char *ix86_branch_cost_string; /* Power of two alignment for functions. */ const char *ix86_align_funcs_string; /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */ static char internal_label_prefix[16]; static int internal_label_prefix_len; static int local_symbolic_operand (rtx, enum machine_mode); static int tls_symbolic_operand_1 (rtx, enum tls_model); static void output_pic_addr_const (FILE *, rtx, int); static void put_condition_code (enum rtx_code, enum machine_mode, int, int, FILE *); static const char *get_some_local_dynamic_name (void); static int get_some_local_dynamic_name_1 (rtx *, void *); static rtx maybe_get_pool_constant (rtx); static rtx ix86_expand_int_compare (enum rtx_code, rtx, rtx); static enum rtx_code ix86_prepare_fp_compare_args (enum rtx_code, rtx *, rtx *); static bool ix86_fixed_condition_code_regs (unsigned int *, unsigned int *); static enum machine_mode ix86_cc_modes_compatible (enum machine_mode, enum machine_mode); static rtx get_thread_pointer (int); static rtx legitimize_tls_address (rtx, enum tls_model, int); static void get_pc_thunk_name (char [32], unsigned int); static rtx gen_push (rtx); static int memory_address_length (rtx addr); static int ix86_flags_dependant (rtx, rtx, enum attr_type); static int ix86_agi_dependant (rtx, rtx, enum attr_type); static struct machine_function * ix86_init_machine_status (void); static int ix86_split_to_parts (rtx, rtx *, enum machine_mode); static int ix86_nsaved_regs (void); static void ix86_emit_save_regs (void); static void ix86_emit_save_regs_using_mov (rtx, HOST_WIDE_INT); static void ix86_emit_restore_regs_using_mov (rtx, HOST_WIDE_INT, int); static void ix86_output_function_epilogue (FILE *, HOST_WIDE_INT); static HOST_WIDE_INT ix86_GOT_alias_set (void); static void ix86_adjust_counter (rtx, HOST_WIDE_INT); static rtx ix86_expand_aligntest (rtx, int); static void ix86_expand_strlensi_unroll_1 (rtx, rtx, rtx); static int ix86_issue_rate (void); static int ix86_adjust_cost (rtx, rtx, rtx, int); static int ia32_use_dfa_pipeline_interface (void); static int ia32_multipass_dfa_lookahead (void); static void ix86_init_mmx_sse_builtins (void); static rtx x86_this_parameter (tree); static void x86_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT, tree); static bool x86_can_output_mi_thunk (tree, HOST_WIDE_INT, HOST_WIDE_INT, tree); static void x86_file_start (void); static void ix86_reorg (void); static bool ix86_expand_carry_flag_compare (enum rtx_code, rtx, rtx, rtx*); static tree ix86_build_builtin_va_list (void); static void ix86_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode, tree, int *, int); static tree ix86_gimplify_va_arg (tree, tree, tree *, tree *); struct ix86_address { rtx base, index, disp; HOST_WIDE_INT scale; enum ix86_address_seg { SEG_DEFAULT, SEG_FS, SEG_GS } seg; }; static int ix86_decompose_address (rtx, struct ix86_address *); static int ix86_address_cost (rtx); static bool ix86_cannot_force_const_mem (rtx); static rtx ix86_delegitimize_address (rtx); struct builtin_description; static rtx ix86_expand_sse_comi (const struct builtin_description *, tree, rtx); static rtx ix86_expand_sse_compare (const struct builtin_description *, tree, rtx); static rtx ix86_expand_unop1_builtin (enum insn_code, tree, rtx); static rtx ix86_expand_unop_builtin (enum insn_code, tree, rtx, int); static rtx ix86_expand_binop_builtin (enum insn_code, tree, rtx); static rtx ix86_expand_store_builtin (enum insn_code, tree); static rtx safe_vector_operand (rtx, enum machine_mode); static enum rtx_code ix86_fp_compare_code_to_integer (enum rtx_code); static void ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *, enum rtx_code *, enum rtx_code *); static rtx ix86_expand_fp_compare (enum rtx_code, rtx, rtx, rtx, rtx *, rtx *); static int ix86_fp_comparison_arithmetics_cost (enum rtx_code code); static int ix86_fp_comparison_fcomi_cost (enum rtx_code code); static int ix86_fp_comparison_sahf_cost (enum rtx_code code); static int ix86_fp_comparison_cost (enum rtx_code code); static unsigned int ix86_select_alt_pic_regnum (void); static int ix86_save_reg (unsigned int, int); static void ix86_compute_frame_layout (struct ix86_frame *); static int ix86_comp_type_attributes (tree, tree); static int ix86_function_regparm (tree, tree); const struct attribute_spec ix86_attribute_table[]; static bool ix86_function_ok_for_sibcall (tree, tree); static tree ix86_handle_cdecl_attribute (tree *, tree, tree, int, bool *); static tree ix86_handle_regparm_attribute (tree *, tree, tree, int, bool *); static int ix86_value_regno (enum machine_mode); static bool contains_128bit_aligned_vector_p (tree); static rtx ix86_struct_value_rtx (tree, int); static bool ix86_ms_bitfield_layout_p (tree); static tree ix86_handle_struct_attribute (tree *, tree, tree, int, bool *); static int extended_reg_mentioned_1 (rtx *, void *); static bool ix86_rtx_costs (rtx, int, int, int *); static int min_insn_size (rtx); static tree ix86_md_asm_clobbers (tree clobbers); #if defined (DO_GLOBAL_CTORS_BODY) && defined (HAS_INIT_SECTION) static void ix86_svr3_asm_out_constructor (rtx, int); #endif /* Register class used for passing given 64bit part of the argument. These represent classes as documented by the PS ABI, with the exception of SSESF, SSEDF classes, that are basically SSE class, just gcc will use SF or DFmode move instead of DImode to avoid reformatting penalties. Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves whenever possible (upper half does contain padding). */ enum x86_64_reg_class { X86_64_NO_CLASS, X86_64_INTEGER_CLASS, X86_64_INTEGERSI_CLASS, X86_64_SSE_CLASS, X86_64_SSESF_CLASS, X86_64_SSEDF_CLASS, X86_64_SSEUP_CLASS, X86_64_X87_CLASS, X86_64_X87UP_CLASS, X86_64_MEMORY_CLASS }; static const char * const x86_64_reg_class_name[] = {"no", "integer", "integerSI", "sse", "sseSF", "sseDF", "sseup", "x87", "x87up", "no"}; #define MAX_CLASSES 4 static int classify_argument (enum machine_mode, tree, enum x86_64_reg_class [MAX_CLASSES], int); static int examine_argument (enum machine_mode, tree, int, int *, int *); static rtx construct_container (enum machine_mode, tree, int, int, int, const int *, int); static enum x86_64_reg_class merge_classes (enum x86_64_reg_class, enum x86_64_reg_class); /* Table of constants used by fldpi, fldln2, etc.... */ static REAL_VALUE_TYPE ext_80387_constants_table [5]; static bool ext_80387_constants_init = 0; static void init_ext_80387_constants (void); /* Initialize the GCC target structure. */ #undef TARGET_ATTRIBUTE_TABLE #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table #ifdef TARGET_DLLIMPORT_DECL_ATTRIBUTES # undef TARGET_MERGE_DECL_ATTRIBUTES # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes #endif #undef TARGET_COMP_TYPE_ATTRIBUTES #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes #undef TARGET_INIT_BUILTINS #define TARGET_INIT_BUILTINS ix86_init_builtins #undef TARGET_EXPAND_BUILTIN #define TARGET_EXPAND_BUILTIN ix86_expand_builtin #undef TARGET_ASM_FUNCTION_EPILOGUE #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue #undef TARGET_ASM_OPEN_PAREN #define TARGET_ASM_OPEN_PAREN "" #undef TARGET_ASM_CLOSE_PAREN #define TARGET_ASM_CLOSE_PAREN "" #undef TARGET_ASM_ALIGNED_HI_OP #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT #undef TARGET_ASM_ALIGNED_SI_OP #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG #ifdef ASM_QUAD #undef TARGET_ASM_ALIGNED_DI_OP #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD #endif #undef TARGET_ASM_UNALIGNED_HI_OP #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP #undef TARGET_ASM_UNALIGNED_SI_OP #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP #undef TARGET_ASM_UNALIGNED_DI_OP #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP #undef TARGET_SCHED_ADJUST_COST #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost #undef TARGET_SCHED_ISSUE_RATE #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate #undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE #define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE \ ia32_use_dfa_pipeline_interface #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \ ia32_multipass_dfa_lookahead #undef TARGET_FUNCTION_OK_FOR_SIBCALL #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall #ifdef HAVE_AS_TLS #undef TARGET_HAVE_TLS #define TARGET_HAVE_TLS true #endif #undef TARGET_CANNOT_FORCE_CONST_MEM #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem #undef TARGET_DELEGITIMIZE_ADDRESS #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address #undef TARGET_MS_BITFIELD_LAYOUT_P #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p #undef TARGET_ASM_OUTPUT_MI_THUNK #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk #undef TARGET_ASM_FILE_START #define TARGET_ASM_FILE_START x86_file_start #undef TARGET_RTX_COSTS #define TARGET_RTX_COSTS ix86_rtx_costs #undef TARGET_ADDRESS_COST #define TARGET_ADDRESS_COST ix86_address_cost #undef TARGET_FIXED_CONDITION_CODE_REGS #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs #undef TARGET_CC_MODES_COMPATIBLE #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible #undef TARGET_MACHINE_DEPENDENT_REORG #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg #undef TARGET_BUILD_BUILTIN_VA_LIST #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list #undef TARGET_MD_ASM_CLOBBERS #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers #undef TARGET_PROMOTE_PROTOTYPES #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true #undef TARGET_STRUCT_VALUE_RTX #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx #undef TARGET_SETUP_INCOMING_VARARGS #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs #undef TARGET_GIMPLIFY_VA_ARG_EXPR #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg struct gcc_target targetm = TARGET_INITIALIZER; /* The svr4 ABI for the i386 says that records and unions are returned in memory. */ #ifndef DEFAULT_PCC_STRUCT_RETURN #define DEFAULT_PCC_STRUCT_RETURN 1 #endif /* Sometimes certain combinations of command options do not make sense on a particular target machine. You can define a macro `OVERRIDE_OPTIONS' to take account of this. This macro, if defined, is executed once just after all the command options have been parsed. Don't use this macro to turn on various extra optimizations for `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */ void override_options (void) { int i; /* Comes from final.c -- no real reason to change it. */ #define MAX_CODE_ALIGN 16 static struct ptt { const struct processor_costs *cost; /* Processor costs */ const int target_enable; /* Target flags to enable. */ const int target_disable; /* Target flags to disable. */ const int align_loop; /* Default alignments. */ const int align_loop_max_skip; const int align_jump; const int align_jump_max_skip; const int align_func; } const processor_target_table[PROCESSOR_max] = { {&i386_cost, 0, 0, 4, 3, 4, 3, 4}, {&i486_cost, 0, 0, 16, 15, 16, 15, 16}, {&pentium_cost, 0, 0, 16, 7, 16, 7, 16}, {&pentiumpro_cost, 0, 0, 16, 15, 16, 7, 16}, {&k6_cost, 0, 0, 32, 7, 32, 7, 32}, {&athlon_cost, 0, 0, 16, 7, 16, 7, 16}, {&pentium4_cost, 0, 0, 0, 0, 0, 0, 0}, {&k8_cost, 0, 0, 16, 7, 16, 7, 16}, {&nocona_cost, 0, 0, 0, 0, 0, 0, 0} }; static const char * const cpu_names[] = TARGET_CPU_DEFAULT_NAMES; static struct pta { const char *const name; /* processor name or nickname. */ const enum processor_type processor; const enum pta_flags { PTA_SSE = 1, PTA_SSE2 = 2, PTA_SSE3 = 4, PTA_MMX = 8, PTA_PREFETCH_SSE = 16, PTA_3DNOW = 32, PTA_3DNOW_A = 64, PTA_64BIT = 128 } flags; } const processor_alias_table[] = { {"i386", PROCESSOR_I386, 0}, {"i486", PROCESSOR_I486, 0}, {"i586", PROCESSOR_PENTIUM, 0}, {"pentium", PROCESSOR_PENTIUM, 0}, {"pentium-mmx", PROCESSOR_PENTIUM, PTA_MMX}, {"winchip-c6", PROCESSOR_I486, PTA_MMX}, {"winchip2", PROCESSOR_I486, PTA_MMX | PTA_3DNOW}, {"c3", PROCESSOR_I486, PTA_MMX | PTA_3DNOW}, {"c3-2", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_PREFETCH_SSE | PTA_SSE}, {"i686", PROCESSOR_PENTIUMPRO, 0}, {"pentiumpro", PROCESSOR_PENTIUMPRO, 0}, {"pentium2", PROCESSOR_PENTIUMPRO, PTA_MMX}, {"pentium3", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE}, {"pentium3m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE}, {"pentium-m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE | PTA_SSE2}, {"pentium4", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2 | PTA_MMX | PTA_PREFETCH_SSE}, {"pentium4m", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2 | PTA_MMX | PTA_PREFETCH_SSE}, {"prescott", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_MMX | PTA_PREFETCH_SSE}, {"nocona", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_64BIT | PTA_MMX | PTA_PREFETCH_SSE}, {"k6", PROCESSOR_K6, PTA_MMX}, {"k6-2", PROCESSOR_K6, PTA_MMX | PTA_3DNOW}, {"k6-3", PROCESSOR_K6, PTA_MMX | PTA_3DNOW}, {"athlon", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_3DNOW_A}, {"athlon-tbird", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_3DNOW_A}, {"athlon-4", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE}, {"athlon-xp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE}, {"athlon-mp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE}, {"x86-64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_64BIT | PTA_SSE | PTA_SSE2 }, {"k8", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT | PTA_3DNOW_A | PTA_SSE | PTA_SSE2}, {"opteron", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT | PTA_3DNOW_A | PTA_SSE | PTA_SSE2}, {"athlon64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT | PTA_3DNOW_A | PTA_SSE | PTA_SSE2}, {"athlon-fx", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT | PTA_3DNOW_A | PTA_SSE | PTA_SSE2}, }; int const pta_size = ARRAY_SIZE (processor_alias_table); /* Set the default values for switches whose default depends on TARGET_64BIT in case they weren't overwritten by command line options. */ if (TARGET_64BIT) { if (flag_omit_frame_pointer == 2) flag_omit_frame_pointer = 1; if (flag_asynchronous_unwind_tables == 2) flag_asynchronous_unwind_tables = 1; if (flag_pcc_struct_return == 2) flag_pcc_struct_return = 0; } else { if (flag_omit_frame_pointer == 2) flag_omit_frame_pointer = 0; if (flag_asynchronous_unwind_tables == 2) flag_asynchronous_unwind_tables = 0; if (flag_pcc_struct_return == 2) flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN; } #ifdef SUBTARGET_OVERRIDE_OPTIONS SUBTARGET_OVERRIDE_OPTIONS; #endif if (!ix86_tune_string && ix86_arch_string) ix86_tune_string = ix86_arch_string; if (!ix86_tune_string) ix86_tune_string = cpu_names [TARGET_CPU_DEFAULT]; if (!ix86_arch_string) ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386"; if (ix86_cmodel_string != 0) { if (!strcmp (ix86_cmodel_string, "small")) ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL; else if (flag_pic) sorry ("code model %s not supported in PIC mode", ix86_cmodel_string); else if (!strcmp (ix86_cmodel_string, "32")) ix86_cmodel = CM_32; else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic) ix86_cmodel = CM_KERNEL; else if (!strcmp (ix86_cmodel_string, "medium") && !flag_pic) ix86_cmodel = CM_MEDIUM; else if (!strcmp (ix86_cmodel_string, "large") && !flag_pic) ix86_cmodel = CM_LARGE; else error ("bad value (%s) for -mcmodel= switch", ix86_cmodel_string); } else { ix86_cmodel = CM_32; if (TARGET_64BIT) ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL; } if (ix86_asm_string != 0) { if (!strcmp (ix86_asm_string, "intel")) ix86_asm_dialect = ASM_INTEL; else if (!strcmp (ix86_asm_string, "att")) ix86_asm_dialect = ASM_ATT; else error ("bad value (%s) for -masm= switch", ix86_asm_string); } if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32)) error ("code model `%s' not supported in the %s bit mode", ix86_cmodel_string, TARGET_64BIT ? "64" : "32"); if (ix86_cmodel == CM_LARGE) sorry ("code model `large' not supported yet"); if ((TARGET_64BIT != 0) != ((target_flags & MASK_64BIT) != 0)) sorry ("%i-bit mode not compiled in", (target_flags & MASK_64BIT) ? 64 : 32); for (i = 0; i < pta_size; i++) if (! strcmp (ix86_arch_string, processor_alias_table[i].name)) { ix86_arch = processor_alias_table[i].processor; /* Default cpu tuning to the architecture. */ ix86_tune = ix86_arch; if (processor_alias_table[i].flags & PTA_MMX && !(target_flags_explicit & MASK_MMX)) target_flags |= MASK_MMX; if (processor_alias_table[i].flags & PTA_3DNOW && !(target_flags_explicit & MASK_3DNOW)) target_flags |= MASK_3DNOW; if (processor_alias_table[i].flags & PTA_3DNOW_A && !(target_flags_explicit & MASK_3DNOW_A)) target_flags |= MASK_3DNOW_A; if (processor_alias_table[i].flags & PTA_SSE && !(target_flags_explicit & MASK_SSE)) target_flags |= MASK_SSE; if (processor_alias_table[i].flags & PTA_SSE2 && !(target_flags_explicit & MASK_SSE2)) target_flags |= MASK_SSE2; if (processor_alias_table[i].flags & PTA_SSE3 && !(target_flags_explicit & MASK_SSE3)) target_flags |= MASK_SSE3; if (processor_alias_table[i].flags & PTA_PREFETCH_SSE) x86_prefetch_sse = true; if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT)) error ("CPU you selected does not support x86-64 instruction set"); break; } if (i == pta_size) error ("bad value (%s) for -march= switch", ix86_arch_string); for (i = 0; i < pta_size; i++) if (! strcmp (ix86_tune_string, processor_alias_table[i].name)) { ix86_tune = processor_alias_table[i].processor; if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT)) error ("CPU you selected does not support x86-64 instruction set"); break; } if (processor_alias_table[i].flags & PTA_PREFETCH_SSE) x86_prefetch_sse = true; if (i == pta_size) error ("bad value (%s) for -mtune= switch", ix86_tune_string); if (optimize_size) ix86_cost = &size_cost; else ix86_cost = processor_target_table[ix86_tune].cost; target_flags |= processor_target_table[ix86_tune].target_enable; target_flags &= ~processor_target_table[ix86_tune].target_disable; /* Arrange to set up i386_stack_locals for all functions. */ init_machine_status = ix86_init_machine_status; /* Validate -mregparm= value. */ if (ix86_regparm_string) { i = atoi (ix86_regparm_string); if (i < 0 || i > REGPARM_MAX) error ("-mregparm=%d is not between 0 and %d", i, REGPARM_MAX); else ix86_regparm = i; } else if (TARGET_64BIT) ix86_regparm = REGPARM_MAX; /* If the user has provided any of the -malign-* options, warn and use that value only if -falign-* is not set. Remove this code in GCC 3.2 or later. */ if (ix86_align_loops_string) { warning ("-malign-loops is obsolete, use -falign-loops"); if (align_loops == 0) { i = atoi (ix86_align_loops_string); if (i < 0 || i > MAX_CODE_ALIGN) error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN); else align_loops = 1 << i; } } if (ix86_align_jumps_string) { warning ("-malign-jumps is obsolete, use -falign-jumps"); if (align_jumps == 0) { i = atoi (ix86_align_jumps_string); if (i < 0 || i > MAX_CODE_ALIGN) error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN); else align_jumps = 1 << i; } } if (ix86_align_funcs_string) { warning ("-malign-functions is obsolete, use -falign-functions"); if (align_functions == 0) { i = atoi (ix86_align_funcs_string); if (i < 0 || i > MAX_CODE_ALIGN) error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN); else align_functions = 1 << i; } } /* Default align_* from the processor table. */ if (align_loops == 0) { align_loops = processor_target_table[ix86_tune].align_loop; align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip; } if (align_jumps == 0) { align_jumps = processor_target_table[ix86_tune].align_jump; align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip; } if (align_functions == 0) { align_functions = processor_target_table[ix86_tune].align_func; } /* Validate -mpreferred-stack-boundary= value, or provide default. The default of 128 bits is for Pentium III's SSE __m128, but we don't want additional code to keep the stack aligned when optimizing for code size. */ ix86_preferred_stack_boundary = (optimize_size ? TARGET_64BIT ? 128 : 32 : 128); if (ix86_preferred_stack_boundary_string) { i = atoi (ix86_preferred_stack_boundary_string); if (i < (TARGET_64BIT ? 4 : 2) || i > 12) error ("-mpreferred-stack-boundary=%d is not between %d and 12", i, TARGET_64BIT ? 4 : 2); else ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT; } /* Validate -mbranch-cost= value, or provide default. */ ix86_branch_cost = processor_target_table[ix86_tune].cost->branch_cost; if (ix86_branch_cost_string) { i = atoi (ix86_branch_cost_string); if (i < 0 || i > 5) error ("-mbranch-cost=%d is not between 0 and 5", i); else ix86_branch_cost = i; } if (ix86_tls_dialect_string) { if (strcmp (ix86_tls_dialect_string, "gnu") == 0) ix86_tls_dialect = TLS_DIALECT_GNU; else if (strcmp (ix86_tls_dialect_string, "sun") == 0) ix86_tls_dialect = TLS_DIALECT_SUN; else error ("bad value (%s) for -mtls-dialect= switch", ix86_tls_dialect_string); } /* Keep nonleaf frame pointers. */ if (TARGET_OMIT_LEAF_FRAME_POINTER) flag_omit_frame_pointer = 1; /* If we're doing fast math, we don't care about comparison order wrt NaNs. This lets us use a shorter comparison sequence. */ if (flag_unsafe_math_optimizations) target_flags &= ~MASK_IEEE_FP; /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387, since the insns won't need emulation. */ if (x86_arch_always_fancy_math_387 & (1 << ix86_arch)) target_flags &= ~MASK_NO_FANCY_MATH_387; /* Turn on SSE2 builtins for -msse3. */ if (TARGET_SSE3) target_flags |= MASK_SSE2; /* Turn on SSE builtins for -msse2. */ if (TARGET_SSE2) target_flags |= MASK_SSE; if (TARGET_64BIT) { if (TARGET_ALIGN_DOUBLE) error ("-malign-double makes no sense in the 64bit mode"); if (TARGET_RTD) error ("-mrtd calling convention not supported in the 64bit mode"); /* Enable by default the SSE and MMX builtins. */ target_flags |= (MASK_SSE2 | MASK_SSE | MASK_MMX | MASK_128BIT_LONG_DOUBLE); ix86_fpmath = FPMATH_SSE; } else { ix86_fpmath = FPMATH_387; /* i386 ABI does not specify red zone. It still makes sense to use it when programmer takes care to stack from being destroyed. */ if (!(target_flags_explicit & MASK_NO_RED_ZONE)) target_flags |= MASK_NO_RED_ZONE; } if (ix86_fpmath_string != 0) { if (! strcmp (ix86_fpmath_string, "387")) ix86_fpmath = FPMATH_387; else if (! strcmp (ix86_fpmath_string, "sse")) { if (!TARGET_SSE) { warning ("SSE instruction set disabled, using 387 arithmetics"); ix86_fpmath = FPMATH_387; } else ix86_fpmath = FPMATH_SSE; } else if (! strcmp (ix86_fpmath_string, "387,sse") || ! strcmp (ix86_fpmath_string, "sse,387")) { if (!TARGET_SSE) { warning ("SSE instruction set disabled, using 387 arithmetics"); ix86_fpmath = FPMATH_387; } else if (!TARGET_80387) { warning ("387 instruction set disabled, using SSE arithmetics"); ix86_fpmath = FPMATH_SSE; } else ix86_fpmath = FPMATH_SSE | FPMATH_387; } else error ("bad value (%s) for -mfpmath= switch", ix86_fpmath_string); } /* It makes no sense to ask for just SSE builtins, so MMX is also turned on by -msse. */ if (TARGET_SSE) { target_flags |= MASK_MMX; x86_prefetch_sse = true; } /* If it has 3DNow! it also has MMX so MMX is also turned on by -m3dnow */ if (TARGET_3DNOW) { target_flags |= MASK_MMX; /* If we are targeting the Athlon architecture, enable the 3Dnow/MMX extensions it adds. */ if (x86_3dnow_a & (1 << ix86_arch)) target_flags |= MASK_3DNOW_A; } if ((x86_accumulate_outgoing_args & TUNEMASK) && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS) && !optimize_size) target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS; /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */ { char *p; ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0); p = strchr (internal_label_prefix, 'X'); internal_label_prefix_len = p - internal_label_prefix; *p = '\0'; } } void optimization_options (int level, int size ATTRIBUTE_UNUSED) { /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to make the problem with not enough registers even worse. */ #ifdef INSN_SCHEDULING if (level > 1) flag_schedule_insns = 0; #endif /* The default values of these switches depend on the TARGET_64BIT that is not known at this moment. Mark these values with 2 and let user the to override these. In case there is no command line option specifying them, we will set the defaults in override_options. */ if (optimize >= 1) flag_omit_frame_pointer = 2; flag_pcc_struct_return = 2; flag_asynchronous_unwind_tables = 2; } /* Table of valid machine attributes. */ const struct attribute_spec ix86_attribute_table[] = { /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */ /* Stdcall attribute says callee is responsible for popping arguments if they are not variable. */ { "stdcall", 0, 0, false, true, true, ix86_handle_cdecl_attribute }, /* Fastcall attribute says callee is responsible for popping arguments if they are not variable. */ { "fastcall", 0, 0, false, true, true, ix86_handle_cdecl_attribute }, /* Cdecl attribute says the callee is a normal C declaration */ { "cdecl", 0, 0, false, true, true, ix86_handle_cdecl_attribute }, /* Regparm attribute specifies how many integer arguments are to be passed in registers. */ { "regparm", 1, 1, false, true, true, ix86_handle_regparm_attribute }, #ifdef TARGET_DLLIMPORT_DECL_ATTRIBUTES { "dllimport", 0, 0, false, false, false, ix86_handle_dll_attribute }, { "dllexport", 0, 0, false, false, false, ix86_handle_dll_attribute }, { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute }, #endif { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute }, { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute }, { NULL, 0, 0, false, false, false, NULL } }; /* Decide whether we can make a sibling call to a function. DECL is the declaration of the function being targeted by the call and EXP is the CALL_EXPR representing the call. */ static bool ix86_function_ok_for_sibcall (tree decl, tree exp) { /* If we are generating position-independent code, we cannot sibcall optimize any indirect call, or a direct call to a global function, as the PLT requires %ebx be live. */ if (!TARGET_64BIT && flag_pic && (!decl || TREE_PUBLIC (decl))) return false; /* If we are returning floats on the 80387 register stack, we cannot make a sibcall from a function that doesn't return a float to a function that does or, conversely, from a function that does return a float to a function that doesn't; the necessary stack adjustment would not be executed. */ if (STACK_REG_P (ix86_function_value (TREE_TYPE (exp))) != STACK_REG_P (ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl))))) return false; /* If this call is indirect, we'll need to be able to use a call-clobbered register for the address of the target function. Make sure that all such registers are not used for passing parameters. */ if (!decl && !TARGET_64BIT) { tree type; /* We're looking at the CALL_EXPR, we need the type of the function. */ type = TREE_OPERAND (exp, 0); /* pointer expression */ type = TREE_TYPE (type); /* pointer type */ type = TREE_TYPE (type); /* function type */ if (ix86_function_regparm (type, NULL) >= 3) { /* ??? Need to count the actual number of registers to be used, not the possible number of registers. Fix later. */ return false; } } /* Otherwise okay. That also includes certain types of indirect calls. */ return true; } /* Handle a "cdecl", "stdcall", or "fastcall" attribute; arguments as in struct attribute_spec.handler. */ static tree ix86_handle_cdecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) { if (TREE_CODE (*node) != FUNCTION_TYPE && TREE_CODE (*node) != METHOD_TYPE && TREE_CODE (*node) != FIELD_DECL && TREE_CODE (*node) != TYPE_DECL) { warning ("`%s' attribute only applies to functions", IDENTIFIER_POINTER (name)); *no_add_attrs = true; } else { if (is_attribute_p ("fastcall", name)) { if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node))) { error ("fastcall and stdcall attributes are not compatible"); } else if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node))) { error ("fastcall and regparm attributes are not compatible"); } } else if (is_attribute_p ("stdcall", name)) { if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node))) { error ("fastcall and stdcall attributes are not compatible"); } } } if (TARGET_64BIT) { warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name)); *no_add_attrs = true; } return NULL_TREE; } /* Handle a "regparm" attribute; arguments as in struct attribute_spec.handler. */ static tree ix86_handle_regparm_attribute (tree *node, tree name, tree args, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) { if (TREE_CODE (*node) != FUNCTION_TYPE && TREE_CODE (*node) != METHOD_TYPE && TREE_CODE (*node) != FIELD_DECL && TREE_CODE (*node) != TYPE_DECL) { warning ("`%s' attribute only applies to functions", IDENTIFIER_POINTER (name)); *no_add_attrs = true; } else { tree cst; cst = TREE_VALUE (args); if (TREE_CODE (cst) != INTEGER_CST) { warning ("`%s' attribute requires an integer constant argument", IDENTIFIER_POINTER (name)); *no_add_attrs = true; } else if (compare_tree_int (cst, REGPARM_MAX) > 0) { warning ("argument to `%s' attribute larger than %d", IDENTIFIER_POINTER (name), REGPARM_MAX); *no_add_attrs = true; } if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node))) { error ("fastcall and regparm attributes are not compatible"); } } return NULL_TREE; } /* Return 0 if the attributes for two types are incompatible, 1 if they are compatible, and 2 if they are nearly compatible (which causes a warning to be generated). */ static int ix86_comp_type_attributes (tree type1, tree type2) { /* Check for mismatch of non-default calling convention. */ const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall"; if (TREE_CODE (type1) != FUNCTION_TYPE) return 1; /* Check for mismatched fastcall types */ if (!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1)) != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2))) return 0; /* Check for mismatched return types (cdecl vs stdcall). */ if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1)) != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2))) return 0; if (ix86_function_regparm (type1, NULL) != ix86_function_regparm (type2, NULL)) return 0; return 1; } /* Return the regparm value for a fuctio with the indicated TYPE and DECL. DECL may be NULL when calling function indirectly or considering a libcall. */ static int ix86_function_regparm (tree type, tree decl) { tree attr; int regparm = ix86_regparm; bool user_convention = false; if (!TARGET_64BIT) { attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type)); if (attr) { regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr))); user_convention = true; } if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type))) { regparm = 2; user_convention = true; } /* Use register calling convention for local functions when possible. */ if (!TARGET_64BIT && !user_convention && decl && flag_unit_at_a_time && !profile_flag) { struct cgraph_local_info *i = cgraph_local_info (decl); if (i && i->local) { /* We can't use regparm(3) for nested functions as these use static chain pointer in third argument. */ if (DECL_CONTEXT (decl) && !DECL_NO_STATIC_CHAIN (decl)) regparm = 2; else regparm = 3; } } } return regparm; } /* Return true if EAX is live at the start of the function. Used by ix86_expand_prologue to determine if we need special help before calling allocate_stack_worker. */ static bool ix86_eax_live_at_start_p (void) { /* Cheat. Don't bother working forward from ix86_function_regparm to the function type to whether an actual argument is located in eax. Instead just look at cfg info, which is still close enough to correct at this point. This gives false positives for broken functions that might use uninitialized data that happens to be allocated in eax, but who cares? */ return REGNO_REG_SET_P (ENTRY_BLOCK_PTR->global_live_at_end, 0); } /* Value is the number of bytes of arguments automatically popped when returning from a subroutine call. FUNDECL is the declaration node of the function (as a tree), FUNTYPE is the data type of the function (as a tree), or for a library call it is an identifier node for the subroutine name. SIZE is the number of bytes of arguments passed on the stack. On the 80386, the RTD insn may be used to pop them if the number of args is fixed, but if the number is variable then the caller must pop them all. RTD can't be used for library calls now because the library is compiled with the Unix compiler. Use of RTD is a selectable option, since it is incompatible with standard Unix calling sequences. If the option is not selected, the caller must always pop the args. The attribute stdcall is equivalent to RTD on a per module basis. */ int ix86_return_pops_args (tree fundecl, tree funtype, int size) { int rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE); /* Cdecl functions override -mrtd, and never pop the stack. */ if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype))) { /* Stdcall and fastcall functions will pop the stack if not variable args. */ if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype)) || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype))) rtd = 1; if (rtd && (TYPE_ARG_TYPES (funtype) == NULL_TREE || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (funtype))) == void_type_node))) return size; } /* Lose any fake structure return argument if it is passed on the stack. */ if (aggregate_value_p (TREE_TYPE (funtype), fundecl) && !TARGET_64BIT) { int nregs = ix86_function_regparm (funtype, fundecl); if (!nregs) return GET_MODE_SIZE (Pmode); } return 0; } /* Argument support functions. */ /* Return true when register may be used to pass function parameters. */ bool ix86_function_arg_regno_p (int regno) { int i; if (!TARGET_64BIT) return (regno < REGPARM_MAX || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno])); if (SSE_REGNO_P (regno) && TARGET_SSE) return true; /* RAX is used as hidden argument to va_arg functions. */ if (!regno) return true; for (i = 0; i < REGPARM_MAX; i++) if (regno == x86_64_int_parameter_registers[i]) return true; return false; } /* Initialize a variable CUM of type CUMULATIVE_ARGS for a call to a function whose data type is FNTYPE. For a library call, FNTYPE is 0. */ void init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */ tree fntype, /* tree ptr for function decl */ rtx libname, /* SYMBOL_REF of library name or 0 */ tree fndecl) { static CUMULATIVE_ARGS zero_cum; tree param, next_param; if (TARGET_DEBUG_ARG) { fprintf (stderr, "\ninit_cumulative_args ("); if (fntype) fprintf (stderr, "fntype code = %s, ret code = %s", tree_code_name[(int) TREE_CODE (fntype)], tree_code_name[(int) TREE_CODE (TREE_TYPE (fntype))]); else fprintf (stderr, "no fntype"); if (libname) fprintf (stderr, ", libname = %s", XSTR (libname, 0)); } *cum = zero_cum; /* Set up the number of registers to use for passing arguments. */ if (fntype) cum->nregs = ix86_function_regparm (fntype, fndecl); else cum->nregs = ix86_regparm; cum->sse_nregs = SSE_REGPARM_MAX; cum->mmx_nregs = MMX_REGPARM_MAX; cum->warn_sse = true; cum->warn_mmx = true; cum->maybe_vaarg = false; /* Use ecx and edx registers if function has fastcall attribute */ if (fntype && !TARGET_64BIT) { if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype))) { cum->nregs = 2; cum->fastcall = 1; } } /* Determine if this function has variable arguments. This is indicated by the last argument being 'void_type_mode' if there are no variable arguments. If there are variable arguments, then we won't pass anything in registers */ if (cum->nregs || !TARGET_MMX || !TARGET_SSE) { for (param = (fntype) ? TYPE_ARG_TYPES (fntype) : 0; param != 0; param = next_param) { next_param = TREE_CHAIN (param); if (next_param == 0 && TREE_VALUE (param) != void_type_node) { if (!TARGET_64BIT) { cum->nregs = 0; cum->sse_nregs = 0; cum->mmx_nregs = 0; cum->warn_sse = 0; cum->warn_mmx = 0; cum->fastcall = 0; } cum->maybe_vaarg = true; } } } if ((!fntype && !libname) || (fntype && !TYPE_ARG_TYPES (fntype))) cum->maybe_vaarg = 1; if (TARGET_DEBUG_ARG) fprintf (stderr, ", nregs=%d )\n", cum->nregs); return; } /* x86-64 register passing implementation. See x86-64 ABI for details. Goal of this code is to classify each 8bytes of incoming argument by the register class and assign registers accordingly. */ /* Return the union class of CLASS1 and CLASS2. See the x86-64 PS ABI for details. */ static enum x86_64_reg_class merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2) { /* Rule #1: If both classes are equal, this is the resulting class. */ if (class1 == class2) return class1; /* Rule #2: If one of the classes is NO_CLASS, the resulting class is the other class. */ if (class1 == X86_64_NO_CLASS) return class2; if (class2 == X86_64_NO_CLASS) return class1; /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */ if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS) return X86_64_MEMORY_CLASS; /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */ if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS) || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS)) return X86_64_INTEGERSI_CLASS; if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS) return X86_64_INTEGER_CLASS; /* Rule #5: If one of the classes is X87 or X87UP class, MEMORY is used. */ if (class1 == X86_64_X87_CLASS || class1 == X86_64_X87UP_CLASS || class2 == X86_64_X87_CLASS || class2 == X86_64_X87UP_CLASS) return X86_64_MEMORY_CLASS; /* Rule #6: Otherwise class SSE is used. */ return X86_64_SSE_CLASS; } /* Classify the argument of type TYPE and mode MODE. CLASSES will be filled by the register class used to pass each word of the operand. The number of words is returned. In case the parameter should be passed in memory, 0 is returned. As a special case for zero sized containers, classes[0] will be NO_CLASS and 1 is returned. BIT_OFFSET is used internally for handling records and specifies offset of the offset in bits modulo 256 to avoid overflow cases. See the x86-64 PS ABI for details. */ static int classify_argument (enum machine_mode mode, tree type, enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset) { HOST_WIDE_INT bytes = (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode); int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* Variable sized entities are always passed/returned in memory. */ if (bytes < 0) return 0; if (mode != VOIDmode && MUST_PASS_IN_STACK (mode, type)) return 0; if (type && AGGREGATE_TYPE_P (type)) { int i; tree field; enum x86_64_reg_class subclasses[MAX_CLASSES]; /* On x86-64 we pass structures larger than 16 bytes on the stack. */ if (bytes > 16) return 0; for (i = 0; i < words; i++) classes[i] = X86_64_NO_CLASS; /* Zero sized arrays or structures are NO_CLASS. We return 0 to signalize memory class, so handle it as special case. */ if (!words) { classes[0] = X86_64_NO_CLASS; return 1; } /* Classify each field of record and merge classes. */ if (TREE_CODE (type) == RECORD_TYPE) { /* For classes first merge in the field of the subclasses. */ if (TYPE_BINFO (type) != NULL && TYPE_BINFO_BASETYPES (type) != NULL) { tree bases = TYPE_BINFO_BASETYPES (type); int n_bases = TREE_VEC_LENGTH (bases); int i; for (i = 0; i < n_bases; ++i) { tree binfo = TREE_VEC_ELT (bases, i); int num; int offset = tree_low_cst (BINFO_OFFSET (binfo), 0) * 8; tree type = BINFO_TYPE (binfo); num = classify_argument (TYPE_MODE (type), type, subclasses, (offset + bit_offset) % 256); if (!num) return 0; for (i = 0; i < num; i++) { int pos = (offset + (bit_offset % 64)) / 8 / 8; classes[i + pos] = merge_classes (subclasses[i], classes[i + pos]); } } } /* And now merge the fields of structure. */ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) { if (TREE_CODE (field) == FIELD_DECL) { int num; /* Bitfields are always classified as integer. Handle them early, since later code would consider them to be misaligned integers. */ if (DECL_BIT_FIELD (field)) { for (i = int_bit_position (field) / 8 / 8; i < (int_bit_position (field) + tree_low_cst (DECL_SIZE (field), 0) + 63) / 8 / 8; i++) classes[i] = merge_classes (X86_64_INTEGER_CLASS, classes[i]); } else { num = classify_argument (TYPE_MODE (TREE_TYPE (field)), TREE_TYPE (field), subclasses, (int_bit_position (field) + bit_offset) % 256); if (!num) return 0; for (i = 0; i < num; i++) { int pos = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8; classes[i + pos] = merge_classes (subclasses[i], classes[i + pos]); } } } } } /* Arrays are handled as small records. */ else if (TREE_CODE (type) == ARRAY_TYPE) { int num; num = classify_argument (TYPE_MODE (TREE_TYPE (type)), TREE_TYPE (type), subclasses, bit_offset); if (!num) return 0; /* The partial classes are now full classes. */ if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4) subclasses[0] = X86_64_SSE_CLASS; if (subclasses[0] == X86_64_INTEGERSI_CLASS && bytes != 4) subclasses[0] = X86_64_INTEGER_CLASS; for (i = 0; i < words; i++) classes[i] = subclasses[i % num]; } /* Unions are similar to RECORD_TYPE but offset is always 0. */ else if (TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == QUAL_UNION_TYPE) { /* For classes first merge in the field of the subclasses. */ if (TYPE_BINFO (type) != NULL && TYPE_BINFO_BASETYPES (type) != NULL) { tree bases = TYPE_BINFO_BASETYPES (type); int n_bases = TREE_VEC_LENGTH (bases); int i; for (i = 0; i < n_bases; ++i) { tree binfo = TREE_VEC_ELT (bases, i); int num; int offset = tree_low_cst (BINFO_OFFSET (binfo), 0) * 8; tree type = BINFO_TYPE (binfo); num = classify_argument (TYPE_MODE (type), type, subclasses, (offset + (bit_offset % 64)) % 256); if (!num) return 0; for (i = 0; i < num; i++) { int pos = (offset + (bit_offset % 64)) / 8 / 8; classes[i + pos] = merge_classes (subclasses[i], classes[i + pos]); } } } for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) { if (TREE_CODE (field) == FIELD_DECL) { int num; num = classify_argument (TYPE_MODE (TREE_TYPE (field)), TREE_TYPE (field), subclasses, bit_offset); if (!num) return 0; for (i = 0; i < num; i++) classes[i] = merge_classes (subclasses[i], classes[i]); } } } else if (TREE_CODE (type) == SET_TYPE) { if (bytes <= 4) { classes[0] = X86_64_INTEGERSI_CLASS; return 1; } else if (bytes <= 8) { classes[0] = X86_64_INTEGER_CLASS; return 1; } else if (bytes <= 12) { classes[0] = X86_64_INTEGER_CLASS; classes[1] = X86_64_INTEGERSI_CLASS; return 2; } else { classes[0] = X86_64_INTEGER_CLASS; classes[1] = X86_64_INTEGER_CLASS; return 2; } } else abort (); /* Final merger cleanup. */ for (i = 0; i < words; i++) { /* If one class is MEMORY, everything should be passed in memory. */ if (classes[i] == X86_64_MEMORY_CLASS) return 0; /* The X86_64_SSEUP_CLASS should be always preceded by X86_64_SSE_CLASS. */ if (classes[i] == X86_64_SSEUP_CLASS && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS)) classes[i] = X86_64_SSE_CLASS; /* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */ if (classes[i] == X86_64_X87UP_CLASS && (i == 0 || classes[i - 1] != X86_64_X87_CLASS)) classes[i] = X86_64_SSE_CLASS; } return words; } /* Compute alignment needed. We align all types to natural boundaries with exception of XFmode that is aligned to 64bits. */ if (mode != VOIDmode && mode != BLKmode) { int mode_alignment = GET_MODE_BITSIZE (mode); if (mode == XFmode) mode_alignment = 128; else if (mode == XCmode) mode_alignment = 256; if (COMPLEX_MODE_P (mode)) mode_alignment /= 2; /* Misaligned fields are always returned in memory. */ if (bit_offset % mode_alignment) return 0; } /* Classification of atomic types. */ switch (mode) { case DImode: case SImode: case HImode: case QImode: case CSImode: case CHImode: case CQImode: if (bit_offset + GET_MODE_BITSIZE (mode) <= 32) classes[0] = X86_64_INTEGERSI_CLASS; else classes[0] = X86_64_INTEGER_CLASS; return 1; case CDImode: case TImode: classes[0] = classes[1] = X86_64_INTEGER_CLASS; return 2; case CTImode: classes[0] = classes[1] = X86_64_INTEGER_CLASS; classes[2] = classes[3] = X86_64_INTEGER_CLASS; return 4; case SFmode: if (!(bit_offset % 64)) classes[0] = X86_64_SSESF_CLASS; else classes[0] = X86_64_SSE_CLASS; return 1; case DFmode: classes[0] = X86_64_SSEDF_CLASS; return 1; case XFmode: classes[0] = X86_64_X87_CLASS; classes[1] = X86_64_X87UP_CLASS; return 2; case TFmode: case TCmode: return 0; case XCmode: classes[0] = X86_64_X87_CLASS; classes[1] = X86_64_X87UP_CLASS; classes[2] = X86_64_X87_CLASS; classes[3] = X86_64_X87UP_CLASS; return 4; case DCmode: classes[0] = X86_64_SSEDF_CLASS; classes[1] = X86_64_SSEDF_CLASS; return 2; case SCmode: classes[0] = X86_64_SSE_CLASS; return 1; case V4SFmode: case V4SImode: case V16QImode: case V8HImode: case V2DFmode: case V2DImode: classes[0] = X86_64_SSE_CLASS; classes[1] = X86_64_SSEUP_CLASS; return 2; case V2SFmode: case V2SImode: case V4HImode: case V8QImode: return 0; case BLKmode: case VOIDmode: return 0; default: abort (); } } /* Examine the argument and return set number of register required in each class. Return 0 iff parameter should be passed in memory. */ static int examine_argument (enum machine_mode mode, tree type, int in_return, int *int_nregs, int *sse_nregs) { enum x86_64_reg_class class[MAX_CLASSES]; int n = classify_argument (mode, type, class, 0); *int_nregs = 0; *sse_nregs = 0; if (!n) return 0; for (n--; n >= 0; n--) switch (class[n]) { case X86_64_INTEGER_CLASS: case X86_64_INTEGERSI_CLASS: (*int_nregs)++; break; case X86_64_SSE_CLASS: case X86_64_SSESF_CLASS: case X86_64_SSEDF_CLASS: (*sse_nregs)++; break; case X86_64_NO_CLASS: case X86_64_SSEUP_CLASS: break; case X86_64_X87_CLASS: case X86_64_X87UP_CLASS: if (!in_return) return 0; break; case X86_64_MEMORY_CLASS: abort (); } return 1; } /* Construct container for the argument used by GCC interface. See FUNCTION_ARG for the detailed description. */ static rtx construct_container (enum machine_mode mode, tree type, int in_return, int nintregs, int nsseregs, const int * intreg, int sse_regno) { enum machine_mode tmpmode; int bytes = (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode); enum x86_64_reg_class class[MAX_CLASSES]; int n; int i; int nexps = 0; int needed_sseregs, needed_intregs; rtx exp[MAX_CLASSES]; rtx ret; n = classify_argument (mode, type, class, 0); if (TARGET_DEBUG_ARG) { if (!n) fprintf (stderr, "Memory class\n"); else { fprintf (stderr, "Classes:"); for (i = 0; i < n; i++) { fprintf (stderr, " %s", x86_64_reg_class_name[class[i]]); } fprintf (stderr, "\n"); } } if (!n) return NULL; if (!examine_argument (mode, type, in_return, &needed_intregs, &needed_sseregs)) return NULL; if (needed_intregs > nintregs || needed_sseregs > nsseregs) return NULL; /* First construct simple cases. Avoid SCmode, since we want to use single register to pass this type. */ if (n == 1 && mode != SCmode) switch (class[0]) { case X86_64_INTEGER_CLASS: case X86_64_INTEGERSI_CLASS: return gen_rtx_REG (mode, intreg[0]); case X86_64_SSE_CLASS: case X86_64_SSESF_CLASS: case X86_64_SSEDF_CLASS: return gen_rtx_REG (mode, SSE_REGNO (sse_regno)); case X86_64_X87_CLASS: return gen_rtx_REG (mode, FIRST_STACK_REG); case X86_64_NO_CLASS: /* Zero sized array, struct or class. */ return NULL; default: abort (); } if (n == 2 && class[0] == X86_64_SSE_CLASS && class[1] == X86_64_SSEUP_CLASS && mode != BLKmode) return gen_rtx_REG (mode, SSE_REGNO (sse_regno)); if (n == 2 && class[0] == X86_64_X87_CLASS && class[1] == X86_64_X87UP_CLASS) return gen_rtx_REG (XFmode, FIRST_STACK_REG); if (n == 2 && class[0] == X86_64_INTEGER_CLASS && class[1] == X86_64_INTEGER_CLASS && (mode == CDImode || mode == TImode || mode == TFmode) && intreg[0] + 1 == intreg[1]) return gen_rtx_REG (mode, intreg[0]); if (n == 4 && class[0] == X86_64_X87_CLASS && class[1] == X86_64_X87UP_CLASS && class[2] == X86_64_X87_CLASS && class[3] == X86_64_X87UP_CLASS && mode != BLKmode) return gen_rtx_REG (XCmode, FIRST_STACK_REG); /* Otherwise figure out the entries of the PARALLEL. */ for (i = 0; i < n; i++) { switch (class[i]) { case X86_64_NO_CLASS: break; case X86_64_INTEGER_CLASS: case X86_64_INTEGERSI_CLASS: /* Merge TImodes on aligned occasions here too. */ if (i * 8 + 8 > bytes) tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0); else if (class[i] == X86_64_INTEGERSI_CLASS) tmpmode = SImode; else tmpmode = DImode; /* We've requested 24 bytes we don't have mode for. Use DImode. */ if (tmpmode == BLKmode) tmpmode = DImode; exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (tmpmode, *intreg), GEN_INT (i*8)); intreg++; break; case X86_64_SSESF_CLASS: exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (SFmode, SSE_REGNO (sse_regno)), GEN_INT (i*8)); sse_regno++; break; case X86_64_SSEDF_CLASS: exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (DFmode, SSE_REGNO (sse_regno)), GEN_INT (i*8)); sse_regno++; break; case X86_64_SSE_CLASS: if (i < n - 1 && class[i + 1] == X86_64_SSEUP_CLASS) tmpmode = TImode; else tmpmode = DImode; exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (tmpmode, SSE_REGNO (sse_regno)), GEN_INT (i*8)); if (tmpmode == TImode) i++; sse_regno++; break; default: abort (); } } ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps)); for (i = 0; i < nexps; i++) XVECEXP (ret, 0, i) = exp [i]; return ret; } /* Update the data in CUM to advance over an argument of mode MODE and data type TYPE. (TYPE is null for libcalls where that information may not be available.) */ void function_arg_advance (CUMULATIVE_ARGS *cum, /* current arg information */ enum machine_mode mode, /* current arg mode */ tree type, /* type of the argument or 0 if lib support */ int named) /* whether or not the argument was named */ { int bytes = (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode); int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD; if (TARGET_DEBUG_ARG) fprintf (stderr, "function_adv (sz=%d, wds=%2d, nregs=%d, ssenregs=%d, mode=%s, named=%d)\n\n", words, cum->words, cum->nregs, cum->sse_nregs, GET_MODE_NAME (mode), named); if (TARGET_64BIT) { int int_nregs, sse_nregs; if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs)) cum->words += words; else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs) { cum->nregs -= int_nregs; cum->sse_nregs -= sse_nregs; cum->regno += int_nregs; cum->sse_regno += sse_nregs; } else cum->words += words; } else { if (TARGET_SSE && SSE_REG_MODE_P (mode) && (!type || !AGGREGATE_TYPE_P (type))) { cum->sse_words += words; cum->sse_nregs -= 1; cum->sse_regno += 1; if (cum->sse_nregs <= 0) { cum->sse_nregs = 0; cum->sse_regno = 0; } } else if (TARGET_MMX && MMX_REG_MODE_P (mode) && (!type || !AGGREGATE_TYPE_P (type))) { cum->mmx_words += words; cum->mmx_nregs -= 1; cum->mmx_regno += 1; if (cum->mmx_nregs <= 0) { cum->mmx_nregs = 0; cum->mmx_regno = 0; } } else { cum->words += words; cum->nregs -= words; cum->regno += words; if (cum->nregs <= 0) { cum->nregs = 0; cum->regno = 0; } } } return; } /* Define where to put the arguments to a function. Value is zero to push the argument on the stack, or a hard register in which to store the argument. MODE is the argument's machine mode. TYPE is the data type of the argument (as a tree). This is null for libcalls where that information may not be available. CUM is a variable of type CUMULATIVE_ARGS which gives info about the preceding args and about the function being called. NAMED is nonzero if this argument is a named parameter (otherwise it is an extra parameter matching an ellipsis). */ rtx function_arg (CUMULATIVE_ARGS *cum, /* current arg information */ enum machine_mode mode, /* current arg mode */ tree type, /* type of the argument or 0 if lib support */ int named) /* != 0 for normal args, == 0 for ... args */ { rtx ret = NULL_RTX; int bytes = (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode); int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD; static bool warnedsse, warnedmmx; /* Handle a hidden AL argument containing number of registers for varargs x86-64 functions. For i386 ABI just return constm1_rtx to avoid any AL settings. */ if (mode == VOIDmode) { if (TARGET_64BIT) return GEN_INT (cum->maybe_vaarg ? (cum->sse_nregs < 0 ? SSE_REGPARM_MAX : cum->sse_regno) : -1); else return constm1_rtx; } if (TARGET_64BIT) ret = construct_container (mode, type, 0, cum->nregs, cum->sse_nregs, &x86_64_int_parameter_registers [cum->regno], cum->sse_regno); else switch (mode) { /* For now, pass fp/complex values on the stack. */ default: break; case BLKmode: if (bytes < 0) break; /* FALLTHRU */ case DImode: case SImode: case HImode: case QImode: if (words <= cum->nregs) { int regno = cum->regno; /* Fastcall allocates the first two DWORD (SImode) or smaller arguments to ECX and EDX. */ if (cum->fastcall) { if (mode == BLKmode || mode == DImode) break; /* ECX not EAX is the first allocated register. */ if (regno == 0) regno = 2; } ret = gen_rtx_REG (mode, regno); } break; case TImode: case V16QImode: case V8HImode: case V4SImode: case V2DImode: case V4SFmode: case V2DFmode: if (!type || !AGGREGATE_TYPE_P (type)) { if (!TARGET_SSE && !warnedmmx && cum->warn_sse) { warnedsse = true; warning ("SSE vector argument without SSE enabled " "changes the ABI"); } if (cum->sse_nregs) ret = gen_rtx_REG (mode, cum->sse_regno + FIRST_SSE_REG); } break; case V8QImode: case V4HImode: case V2SImode: case V2SFmode: if (!type || !AGGREGATE_TYPE_P (type)) { if (!TARGET_MMX && !warnedmmx && cum->warn_mmx) { warnedmmx = true; warning ("MMX vector argument without MMX enabled " "changes the ABI"); } if (cum->mmx_nregs) ret = gen_rtx_REG (mode, cum->mmx_regno + FIRST_MMX_REG); } break; } if (TARGET_DEBUG_ARG) { fprintf (stderr, "function_arg (size=%d, wds=%2d, nregs=%d, mode=%4s, named=%d, ", words, cum->words, cum->nregs, GET_MODE_NAME (mode), named); if (ret) print_simple_rtl (stderr, ret); else fprintf (stderr, ", stack"); fprintf (stderr, " )\n"); } return ret; } /* A C expression that indicates when an argument must be passed by reference. If nonzero for an argument, a copy of that argument is made in memory and a pointer to the argument is passed instead of the argument itself. The pointer is passed in whatever way is appropriate for passing a pointer to that type. */ int function_arg_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED, enum machine_mode mode ATTRIBUTE_UNUSED, tree type, int named ATTRIBUTE_UNUSED) { if (!TARGET_64BIT) return 0; if (type && int_size_in_bytes (type) == -1) { if (TARGET_DEBUG_ARG) fprintf (stderr, "function_arg_pass_by_reference\n"); return 1; } return 0; } /* Return true when TYPE should be 128bit aligned for 32bit argument passing ABI */ static bool contains_128bit_aligned_vector_p (tree type) { enum machine_mode mode = TYPE_MODE (type); if (SSE_REG_MODE_P (mode) && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128)) return true; if (TYPE_ALIGN (type) < 128) return false; if (AGGREGATE_TYPE_P (type)) { /* Walk the aggregates recursively. */ if (TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == QUAL_UNION_TYPE) { tree field; if (TYPE_BINFO (type) != NULL && TYPE_BINFO_BASETYPES (type) != NULL) { tree bases = TYPE_BINFO_BASETYPES (type); int n_bases = TREE_VEC_LENGTH (bases); int i; for (i = 0; i < n_bases; ++i) { tree binfo = TREE_VEC_ELT (bases, i); tree type = BINFO_TYPE (binfo); if (contains_128bit_aligned_vector_p (type)) return true; } } /* And now merge the fields of structure. */ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) { if (TREE_CODE (field) == FIELD_DECL && contains_128bit_aligned_vector_p (TREE_TYPE (field))) return true; } } /* Just for use if some languages passes arrays by value. */ else if (TREE_CODE (type) == ARRAY_TYPE) { if (contains_128bit_aligned_vector_p (TREE_TYPE (type))) return true; } else abort (); } return false; } /* Gives the alignment boundary, in bits, of an argument with the specified mode and type. */ int ix86_function_arg_boundary (enum machine_mode mode, tree type) { int align; if (type) align = TYPE_ALIGN (type); else align = GET_MODE_ALIGNMENT (mode); if (align < PARM_BOUNDARY) align = PARM_BOUNDARY; if (!TARGET_64BIT) { /* i386 ABI defines all arguments to be 4 byte aligned. We have to make an exception for SSE modes since these require 128bit alignment. The handling here differs from field_alignment. ICC aligns MMX arguments to 4 byte boundaries, while structure fields are aligned to 8 byte boundaries. */ if (!type) { if (!SSE_REG_MODE_P (mode)) align = PARM_BOUNDARY; } else { if (!contains_128bit_aligned_vector_p (type)) align = PARM_BOUNDARY; } } if (align > 128) align = 128; return align; } /* Return true if N is a possible register number of function value. */ bool ix86_function_value_regno_p (int regno) { if (!TARGET_64BIT) { return ((regno) == 0 || ((regno) == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387) || ((regno) == FIRST_SSE_REG && TARGET_SSE)); } return ((regno) == 0 || (regno) == FIRST_FLOAT_REG || ((regno) == FIRST_SSE_REG && TARGET_SSE) || ((regno) == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387)); } /* Define how to find the value returned by a function. VALTYPE is the data type of the value (as a tree). If the precise function being called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0. */ rtx ix86_function_value (tree valtype) { if (TARGET_64BIT) { rtx ret = construct_container (TYPE_MODE (valtype), valtype, 1, REGPARM_MAX, SSE_REGPARM_MAX, x86_64_int_return_registers, 0); /* For zero sized structures, construct_container return NULL, but we need to keep rest of compiler happy by returning meaningful value. */ if (!ret) ret = gen_rtx_REG (TYPE_MODE (valtype), 0); return ret; } else return gen_rtx_REG (TYPE_MODE (valtype), ix86_value_regno (TYPE_MODE (valtype))); } /* Return false iff type is returned in memory. */ int ix86_return_in_memory (tree type) { int needed_intregs, needed_sseregs, size; enum machine_mode mode = TYPE_MODE (type); if (TARGET_64BIT) return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs); if (mode == BLKmode) return 1; size = int_size_in_bytes (type); if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8) return 0; if (VECTOR_MODE_P (mode) || mode == TImode) { /* User-created vectors small enough to fit in EAX. */ if (size < 8) return 0; /* MMX/3dNow values are returned on the stack, since we've got to EMMS/FEMMS before returning. */ if (size == 8) return 1; /* SSE values are returned in XMM0, except when it doesn't exist. */ if (size == 16) return (TARGET_SSE ? 0 : 1); } if (mode == XFmode) return 0; if (size > 12) return 1; return 0; } /* When returning SSE vector types, we have a choice of either (1) being abi incompatible with a -march switch, or (2) generating an error. Given no good solution, I think the safest thing is one warning. The user won't be able to use -Werror, but.... Choose the STRUCT_VALUE_RTX hook because that's (at present) only called in response to actually generating a caller or callee that uses such a type. As opposed to RETURN_IN_MEMORY, which is called via aggregate_value_p for general type probing from tree-ssa. */ static rtx ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED) { static bool warned; if (!TARGET_SSE && type && !warned) { /* Look at the return type of the function, not the function type. */ enum machine_mode mode = TYPE_MODE (TREE_TYPE (type)); if (mode == TImode || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16)) { warned = true; warning ("SSE vector return without SSE enabled changes the ABI"); } } return NULL; } /* Define how to find the value returned by a library function assuming the value has mode MODE. */ rtx ix86_libcall_value (enum machine_mode mode) { if (TARGET_64BIT) { switch (mode) { case SFmode: case SCmode: case DFmode: case DCmode: return gen_rtx_REG (mode, FIRST_SSE_REG); case XFmode: case XCmode: return gen_rtx_REG (mode, FIRST_FLOAT_REG); case TFmode: case TCmode: return NULL; default: return gen_rtx_REG (mode, 0); } } else return gen_rtx_REG (mode, ix86_value_regno (mode)); } /* Given a mode, return the register to use for a return value. */ static int ix86_value_regno (enum machine_mode mode) { /* Floating point return values in %st(0). */ if (GET_MODE_CLASS (mode) == MODE_FLOAT && TARGET_FLOAT_RETURNS_IN_80387) return FIRST_FLOAT_REG; /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where we prevent this case when sse is not available. */ if (mode == TImode || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16)) return FIRST_SSE_REG; /* Everything else in %eax. */ return 0; } /* Create the va_list data type. */ static tree ix86_build_builtin_va_list (void) { tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl; /* For i386 we use plain pointer to argument area. */ if (!TARGET_64BIT) return build_pointer_type (char_type_node); record = (*lang_hooks.types.make_type) (RECORD_TYPE); type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record); f_gpr = build_decl (FIELD_DECL, get_identifier ("gp_offset"), unsigned_type_node); f_fpr = build_decl (FIELD_DECL, get_identifier ("fp_offset"), unsigned_type_node); f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"), ptr_type_node); f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"), ptr_type_node); DECL_FIELD_CONTEXT (f_gpr) = record; DECL_FIELD_CONTEXT (f_fpr) = record; DECL_FIELD_CONTEXT (f_ovf) = record; DECL_FIELD_CONTEXT (f_sav) = record; TREE_CHAIN (record) = type_decl; TYPE_NAME (record) = type_decl; TYPE_FIELDS (record) = f_gpr; TREE_CHAIN (f_gpr) = f_fpr; TREE_CHAIN (f_fpr) = f_ovf; TREE_CHAIN (f_ovf) = f_sav; layout_type (record); /* The correct type is an array type of one element. */ return build_array_type (record, build_index_type (size_zero_node)); } /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */ static void ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type, int *pretend_size ATTRIBUTE_UNUSED, int no_rtl) { CUMULATIVE_ARGS next_cum; rtx save_area = NULL_RTX, mem; rtx label; rtx label_ref; rtx tmp_reg; rtx nsse_reg; int set; tree fntype; int stdarg_p; int i; if (!TARGET_64BIT) return; /* Indicate to allocate space on the stack for varargs save area. */ ix86_save_varrargs_registers = 1; cfun->stack_alignment_needed = 128; fntype = TREE_TYPE (current_function_decl); stdarg_p = (TYPE_ARG_TYPES (fntype) != 0 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype))) != void_type_node)); /* For varargs, we do not want to skip the dummy va_dcl argument. For stdargs, we do want to skip the last named argument. */ next_cum = *cum; if (stdarg_p) function_arg_advance (&next_cum, mode, type, 1); if (!no_rtl) save_area = frame_pointer_rtx; set = get_varargs_alias_set (); for (i = next_cum.regno; i < ix86_regparm; i++) { mem = gen_rtx_MEM (Pmode, plus_constant (save_area, i * UNITS_PER_WORD)); set_mem_alias_set (mem, set); emit_move_insn (mem, gen_rtx_REG (Pmode, x86_64_int_parameter_registers[i])); } if (next_cum.sse_nregs) { /* Now emit code to save SSE registers. The AX parameter contains number of SSE parameter registers used to call this function. We use sse_prologue_save insn template that produces computed jump across SSE saves. We need some preparation work to get this working. */ label = gen_label_rtx (); label_ref = gen_rtx_LABEL_REF (Pmode, label); /* Compute address to jump to : label - 5*eax + nnamed_sse_arguments*5 */ tmp_reg = gen_reg_rtx (Pmode); nsse_reg = gen_reg_rtx (Pmode); emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, 0))); emit_insn (gen_rtx_SET (VOIDmode, tmp_reg, gen_rtx_MULT (Pmode, nsse_reg, GEN_INT (4)))); if (next_cum.sse_regno) emit_move_insn (nsse_reg, gen_rtx_CONST (DImode, gen_rtx_PLUS (DImode, label_ref, GEN_INT (next_cum.sse_regno * 4)))); else emit_move_insn (nsse_reg, label_ref); emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg)); /* Compute address of memory block we save into. We always use pointer pointing 127 bytes after first byte to store - this is needed to keep instruction size limited by 4 bytes. */ tmp_reg = gen_reg_rtx (Pmode); emit_insn (gen_rtx_SET (VOIDmode, tmp_reg, plus_constant (save_area, 8 * REGPARM_MAX + 127))); mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127)); set_mem_alias_set (mem, set); set_mem_align (mem, BITS_PER_WORD); /* And finally do the dirty job! */ emit_insn (gen_sse_prologue_save (mem, nsse_reg, GEN_INT (next_cum.sse_regno), label)); } } /* Implement va_start. */ void ix86_va_start (tree valist, rtx nextarg) { HOST_WIDE_INT words, n_gpr, n_fpr; tree f_gpr, f_fpr, f_ovf, f_sav; tree gpr, fpr, ovf, sav, t; /* Only 64bit target needs something special. */ if (!TARGET_64BIT) { std_expand_builtin_va_start (valist, nextarg); return; } f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node)); f_fpr = TREE_CHAIN (f_gpr); f_ovf = TREE_CHAIN (f_fpr); f_sav = TREE_CHAIN (f_ovf); valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist); gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE); fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE); ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE); sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE); /* Count number of gp and fp argument registers used. */ words = current_function_args_info.words; n_gpr = current_function_args_info.regno; n_fpr = current_function_args_info.sse_regno; if (TARGET_DEBUG_ARG) fprintf (stderr, "va_start: words = %d, n_gpr = %d, n_fpr = %d\n", (int) words, (int) n_gpr, (int) n_fpr); t = build (MODIFY_EXPR, TREE_TYPE (gpr), gpr, build_int_2 (n_gpr * 8, 0)); TREE_SIDE_EFFECTS (t) = 1; expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); t = build (MODIFY_EXPR, TREE_TYPE (fpr), fpr, build_int_2 (n_fpr * 16 + 8*REGPARM_MAX, 0)); TREE_SIDE_EFFECTS (t) = 1; expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); /* Find the overflow area. */ t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx); if (words != 0) t = build (PLUS_EXPR, TREE_TYPE (ovf), t, build_int_2 (words * UNITS_PER_WORD, 0)); t = build (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t); TREE_SIDE_EFFECTS (t) = 1; expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); /* Find the register save area. Prologue of the function save it right above stack frame. */ t = make_tree (TREE_TYPE (sav), frame_pointer_rtx); t = build (MODIFY_EXPR, TREE_TYPE (sav), sav, t); TREE_SIDE_EFFECTS (t) = 1; expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); } /* Implement va_arg. */ tree ix86_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p) { static const int intreg[6] = { 0, 1, 2, 3, 4, 5 }; tree f_gpr, f_fpr, f_ovf, f_sav; tree gpr, fpr, ovf, sav, t; int size, rsize; tree lab_false, lab_over = NULL_TREE; tree addr, t2; rtx container; int indirect_p = 0; tree ptrtype; /* Only 64bit target needs something special. */ if (!TARGET_64BIT) return std_gimplify_va_arg_expr (valist, type, pre_p, post_p); f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node)); f_fpr = TREE_CHAIN (f_gpr); f_ovf = TREE_CHAIN (f_fpr); f_sav = TREE_CHAIN (f_ovf); valist = build_fold_indirect_ref (valist); gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE); fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE); ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE); sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE); size = int_size_in_bytes (type); if (size == -1) { /* Variable-size types are passed by reference. */ indirect_p = 1; type = build_pointer_type (type); size = int_size_in_bytes (type); } rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD; container = construct_container (TYPE_MODE (type), type, 0, REGPARM_MAX, SSE_REGPARM_MAX, intreg, 0); /* * Pull the value out of the saved registers ... */ addr = create_tmp_var (ptr_type_node, "addr"); DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set (); if (container) { int needed_intregs, needed_sseregs; int need_temp; tree int_addr, sse_addr; lab_false = create_artificial_label (); lab_over = create_artificial_label (); examine_argument (TYPE_MODE (type), type, 0, &needed_intregs, &needed_sseregs); need_temp = ((needed_intregs && TYPE_ALIGN (type) > 64) || TYPE_ALIGN (type) > 128); /* In case we are passing structure, verify that it is consecutive block on the register save area. If not we need to do moves. */ if (!need_temp && !REG_P (container)) { /* Verify that all registers are strictly consecutive */ if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0)))) { int i; for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++) { rtx slot = XVECEXP (container, 0, i); if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i || INTVAL (XEXP (slot, 1)) != i * 16) need_temp = 1; } } else { int i; for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++) { rtx slot = XVECEXP (container, 0, i); if (REGNO (XEXP (slot, 0)) != (unsigned int) i || INTVAL (XEXP (slot, 1)) != i * 8) need_temp = 1; } } } if (!need_temp) { int_addr = addr; sse_addr = addr; } else { int_addr = create_tmp_var (ptr_type_node, "int_addr"); DECL_POINTER_ALIAS_SET (int_addr) = get_varargs_alias_set (); sse_addr = create_tmp_var (ptr_type_node, "sse_addr"); DECL_POINTER_ALIAS_SET (sse_addr) = get_varargs_alias_set (); } /* First ensure that we fit completely in registers. */ if (needed_intregs) { t = build_int_2 ((REGPARM_MAX - needed_intregs + 1) * 8, 0); TREE_TYPE (t) = TREE_TYPE (gpr); t = build2 (GE_EXPR, boolean_type_node, gpr, t); t2 = build1 (GOTO_EXPR, void_type_node, lab_false); t = build (COND_EXPR, void_type_node, t, t2, NULL_TREE); gimplify_and_add (t, pre_p); } if (needed_sseregs) { t = build_int_2 ((SSE_REGPARM_MAX - needed_sseregs + 1) * 16 + REGPARM_MAX * 8, 0); TREE_TYPE (t) = TREE_TYPE (fpr); t = build2 (GE_EXPR, boolean_type_node, fpr, t); t2 = build1 (GOTO_EXPR, void_type_node, lab_false); t = build (COND_EXPR, void_type_node, t, t2, NULL_TREE); gimplify_and_add (t, pre_p); } /* Compute index to start of area used for integer regs. */ if (needed_intregs) { /* int_addr = gpr + sav; */ t = build2 (PLUS_EXPR, ptr_type_node, sav, gpr); t = build2 (MODIFY_EXPR, void_type_node, int_addr, t); gimplify_and_add (t, pre_p); } if (needed_sseregs) { /* sse_addr = fpr + sav; */ t = build2 (PLUS_EXPR, ptr_type_node, sav, fpr); t = build2 (MODIFY_EXPR, void_type_node, sse_addr, t); gimplify_and_add (t, pre_p); } if (need_temp) { int i; tree temp = create_tmp_var (type, "va_arg_tmp"); /* addr = &temp; */ t = build1 (ADDR_EXPR, build_pointer_type (type), temp); t = build2 (MODIFY_EXPR, void_type_node, addr, t); gimplify_and_add (t, pre_p); for (i = 0; i < XVECLEN (container, 0); i++) { rtx slot = XVECEXP (container, 0, i); rtx reg = XEXP (slot, 0); enum machine_mode mode = GET_MODE (reg); tree piece_type = lang_hooks.types.type_for_mode (mode, 1); tree addr_type = build_pointer_type (piece_type); tree src_addr, src; int src_offset; tree dest_addr, dest; if (SSE_REGNO_P (REGNO (reg))) { src_addr = sse_addr; src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16; } else { src_addr = int_addr; src_offset = REGNO (reg) * 8; } src_addr = fold_convert (addr_type, src_addr); src_addr = fold (build2 (PLUS_EXPR, addr_type, src_addr, size_int (src_offset))); src = build_fold_indirect_ref (src_addr); dest_addr = fold_convert (addr_type, addr); dest_addr = fold (build2 (PLUS_EXPR, addr_type, dest_addr, size_int (INTVAL (XEXP (slot, 1))))); dest = build_fold_indirect_ref (dest_addr); t = build2 (MODIFY_EXPR, void_type_node, dest, src); gimplify_and_add (t, pre_p); } } if (needed_intregs) { t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr, build_int_2 (needed_intregs * 8, 0)); t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr, t); gimplify_and_add (t, pre_p); } if (needed_sseregs) { t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr, build_int_2 (needed_sseregs * 16, 0)); t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr, t); gimplify_and_add (t, pre_p); } t = build1 (GOTO_EXPR, void_type_node, lab_over); gimplify_and_add (t, pre_p); t = build1 (LABEL_EXPR, void_type_node, lab_false); append_to_statement_list (t, pre_p); } /* ... otherwise out of the overflow area. */ /* Care for on-stack alignment if needed. */ if (FUNCTION_ARG_BOUNDARY (VOIDmode, type) <= 64) t = ovf; else { HOST_WIDE_INT align = FUNCTION_ARG_BOUNDARY (VOIDmode, type) / 8; t = build (PLUS_EXPR, TREE_TYPE (ovf), ovf, build_int_2 (align - 1, 0)); t = build (BIT_AND_EXPR, TREE_TYPE (t), t, build_int_2 (-align, -1)); } gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue); t2 = build2 (MODIFY_EXPR, void_type_node, addr, t); gimplify_and_add (t2, pre_p); t = build2 (PLUS_EXPR, TREE_TYPE (t), t, build_int_2 (rsize * UNITS_PER_WORD, 0)); t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t); gimplify_and_add (t, pre_p); if (container) { t = build1 (LABEL_EXPR, void_type_node, lab_over); append_to_statement_list (t, pre_p); } ptrtype = build_pointer_type (type); addr = fold_convert (ptrtype, addr); if (indirect_p) addr = build_fold_indirect_ref (addr); return build_fold_indirect_ref (addr); } /* Return nonzero if OP is either a i387 or SSE fp register. */ int any_fp_register_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { return ANY_FP_REG_P (op); } /* Return nonzero if OP is an i387 fp register. */ int fp_register_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { return FP_REG_P (op); } /* Return nonzero if OP is a non-fp register_operand. */ int register_and_not_any_fp_reg_operand (rtx op, enum machine_mode mode) { return register_operand (op, mode) && !ANY_FP_REG_P (op); } /* Return nonzero if OP is a register operand other than an i387 fp register. */ int register_and_not_fp_reg_operand (rtx op, enum machine_mode mode) { return register_operand (op, mode) && !FP_REG_P (op); } /* Return nonzero if OP is general operand representable on x86_64. */ int x86_64_general_operand (rtx op, enum machine_mode mode) { if (!TARGET_64BIT) return general_operand (op, mode); if (nonimmediate_operand (op, mode)) return 1; return x86_64_sign_extended_value (op); } /* Return nonzero if OP is general operand representable on x86_64 as either sign extended or zero extended constant. */ int x86_64_szext_general_operand (rtx op, enum machine_mode mode) { if (!TARGET_64BIT) return general_operand (op, mode); if (nonimmediate_operand (op, mode)) return 1; return x86_64_sign_extended_value (op) || x86_64_zero_extended_value (op); } /* Return nonzero if OP is nonmemory operand representable on x86_64. */ int x86_64_nonmemory_operand (rtx op, enum machine_mode mode) { if (!TARGET_64BIT) return nonmemory_operand (op, mode); if (register_operand (op, mode)) return 1; return x86_64_sign_extended_value (op); } /* Return nonzero if OP is nonmemory operand acceptable by movabs patterns. */ int x86_64_movabs_operand (rtx op, enum machine_mode mode) { if (!TARGET_64BIT || !flag_pic) return nonmemory_operand (op, mode); if (register_operand (op, mode) || x86_64_sign_extended_value (op)) return 1; if (CONSTANT_P (op) && !symbolic_reference_mentioned_p (op)) return 1; return 0; } /* Return nonzero if OPNUM's MEM should be matched in movabs* patterns. */ int ix86_check_movabs (rtx insn, int opnum) { rtx set, mem; set = PATTERN (insn); if (GET_CODE (set) == PARALLEL) set = XVECEXP (set, 0, 0); if (GET_CODE (set) != SET) abort (); mem = XEXP (set, opnum); while (GET_CODE (mem) == SUBREG) mem = SUBREG_REG (mem); if (GET_CODE (mem) != MEM) abort (); return (volatile_ok || !MEM_VOLATILE_P (mem)); } /* Return nonzero if OP is nonmemory operand representable on x86_64. */ int x86_64_szext_nonmemory_operand (rtx op, enum machine_mode mode) { if (!TARGET_64BIT) return nonmemory_operand (op, mode); if (register_operand (op, mode)) return 1; return x86_64_sign_extended_value (op) || x86_64_zero_extended_value (op); } /* Return nonzero if OP is immediate operand representable on x86_64. */ int x86_64_immediate_operand (rtx op, enum machine_mode mode) { if (!TARGET_64BIT) return immediate_operand (op, mode); return x86_64_sign_extended_value (op); } /* Return nonzero if OP is immediate operand representable on x86_64. */ int x86_64_zext_immediate_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { return x86_64_zero_extended_value (op); } /* Return nonzero if OP is CONST_INT >= 1 and <= 31 (a valid operand for shift & compare patterns, as shifting by 0 does not change flags), else return zero. */ int const_int_1_31_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { return (GET_CODE (op) == CONST_INT && INTVAL (op) >= 1 && INTVAL (op) <= 31); } /* Returns 1 if OP is either a symbol reference or a sum of a symbol reference and a constant. */ int symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { switch (GET_CODE (op)) { case SYMBOL_REF: case LABEL_REF: return 1; case CONST: op = XEXP (op, 0); if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF || (GET_CODE (op) == UNSPEC && (XINT (op, 1) == UNSPEC_GOT || XINT (op, 1) == UNSPEC_GOTOFF || XINT (op, 1) == UNSPEC_GOTPCREL))) return 1; if (GET_CODE (op) != PLUS || GET_CODE (XEXP (op, 1)) != CONST_INT) return 0; op = XEXP (op, 0); if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF) return 1; /* Only @GOTOFF gets offsets. */ if (GET_CODE (op) != UNSPEC || XINT (op, 1) != UNSPEC_GOTOFF) return 0; op = XVECEXP (op, 0, 0); if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF) return 1; return 0; default: return 0; } } /* Return true if the operand contains a @GOT or @GOTOFF reference. */ int pic_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { if (GET_CODE (op) != CONST) return 0; op = XEXP (op, 0); if (TARGET_64BIT) { if (GET_CODE (op) == UNSPEC && XINT (op, 1) == UNSPEC_GOTPCREL) return 1; if (GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 0)) == UNSPEC && XINT (XEXP (op, 0), 1) == UNSPEC_GOTPCREL) return 1; } else { if (GET_CODE (op) == UNSPEC) return 1; if (GET_CODE (op) != PLUS || GET_CODE (XEXP (op, 1)) != CONST_INT) return 0; op = XEXP (op, 0); if (GET_CODE (op) == UNSPEC) return 1; } return 0; } /* Return true if OP is a symbolic operand that resolves locally. */ static int local_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { if (GET_CODE (op) == CONST && GET_CODE (XEXP (op, 0)) == PLUS && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT) op = XEXP (XEXP (op, 0), 0); if (GET_CODE (op) == LABEL_REF) return 1; if (GET_CODE (op) != SYMBOL_REF) return 0; if (SYMBOL_REF_LOCAL_P (op)) return 1; /* There is, however, a not insubstantial body of code in the rest of the compiler that assumes it can just stick the results of ASM_GENERATE_INTERNAL_LABEL in a symbol_ref and have done. */ /* ??? This is a hack. Should update the body of the compiler to always create a DECL an invoke targetm.encode_section_info. */ if (strncmp (XSTR (op, 0), internal_label_prefix, internal_label_prefix_len) == 0) return 1; return 0; } /* Test for various thread-local symbols. */ int tls_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { if (GET_CODE (op) != SYMBOL_REF) return 0; return SYMBOL_REF_TLS_MODEL (op); } static inline int tls_symbolic_operand_1 (rtx op, enum tls_model kind) { if (GET_CODE (op) != SYMBOL_REF) return 0; return SYMBOL_REF_TLS_MODEL (op) == kind; } int global_dynamic_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { return tls_symbolic_operand_1 (op, TLS_MODEL_GLOBAL_DYNAMIC); } int local_dynamic_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { return tls_symbolic_operand_1 (op, TLS_MODEL_LOCAL_DYNAMIC); } int initial_exec_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { return tls_symbolic_operand_1 (op, TLS_MODEL_INITIAL_EXEC); } int local_exec_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { return tls_symbolic_operand_1 (op, TLS_MODEL_LOCAL_EXEC); } /* Test for a valid operand for a call instruction. Don't allow the arg pointer register or virtual regs since they may decay into reg + const, which the patterns can't handle. */ int call_insn_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { /* Disallow indirect through a virtual register. This leads to compiler aborts when trying to eliminate them. */ if (GET_CODE (op) == REG && (op == arg_pointer_rtx || op == frame_pointer_rtx || (REGNO (op) >= FIRST_PSEUDO_REGISTER && REGNO (op) <= LAST_VIRTUAL_REGISTER))) return 0; /* Disallow `call 1234'. Due to varying assembler lameness this gets either rejected or translated to `call .+1234'. */ if (GET_CODE (op) == CONST_INT) return 0; /* Explicitly allow SYMBOL_REF even if pic. */ if (GET_CODE (op) == SYMBOL_REF) return 1; /* Otherwise we can allow any general_operand in the address. */ return general_operand (op, Pmode); } /* Test for a valid operand for a call instruction. Don't allow the arg pointer register or virtual regs since they may decay into reg + const, which the patterns can't handle. */ int sibcall_insn_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { /* Disallow indirect through a virtual register. This leads to compiler aborts when trying to eliminate them. */ if (GET_CODE (op) == REG && (op == arg_pointer_rtx || op == frame_pointer_rtx || (REGNO (op) >= FIRST_PSEUDO_REGISTER && REGNO (op) <= LAST_VIRTUAL_REGISTER))) return 0; /* Explicitly allow SYMBOL_REF even if pic. */ if (GET_CODE (op) == SYMBOL_REF) return 1; /* Otherwise we can only allow register operands. */ return register_operand (op, Pmode); } int constant_call_address_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { if (GET_CODE (op) == CONST && GET_CODE (XEXP (op, 0)) == PLUS && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT) op = XEXP (XEXP (op, 0), 0); return GET_CODE (op) == SYMBOL_REF; } /* Match exactly zero and one. */ int const0_operand (rtx op, enum machine_mode mode) { return op == CONST0_RTX (mode); } int const1_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { return op == const1_rtx; } /* Match 2, 4, or 8. Used for leal multiplicands. */ int const248_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { return (GET_CODE (op) == CONST_INT && (INTVAL (op) == 2 || INTVAL (op) == 4 || INTVAL (op) == 8)); } int const_0_to_3_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { return (GET_CODE (op) == CONST_INT && INTVAL (op) >= 0 && INTVAL (op) < 4); } int const_0_to_7_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { return (GET_CODE (op) == CONST_INT && INTVAL (op) >= 0 && INTVAL (op) < 8); } int const_0_to_15_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { return (GET_CODE (op) == CONST_INT && INTVAL (op) >= 0 && INTVAL (op) < 16); } int const_0_to_255_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { return (GET_CODE (op) == CONST_INT && INTVAL (op) >= 0 && INTVAL (op) < 256); } /* True if this is a constant appropriate for an increment or decrement. */ int incdec_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { /* On Pentium4, the inc and dec operations causes extra dependency on flag registers, since carry flag is not set. */ if ((TARGET_PENTIUM4 || TARGET_NOCONA) && !optimize_size) return 0; return op == const1_rtx || op == constm1_rtx; } /* Return nonzero if OP is acceptable as operand of DImode shift expander. */ int shiftdi_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { if (TARGET_64BIT) return nonimmediate_operand (op, mode); else return register_operand (op, mode); } /* Return false if this is the stack pointer, or any other fake register eliminable to the stack pointer. Otherwise, this is a register operand. This is used to prevent esp from being used as an index reg. Which would only happen in pathological cases. */ int reg_no_sp_operand (rtx op, enum machine_mode mode) { rtx t = op; if (GET_CODE (t) == SUBREG) t = SUBREG_REG (t); if (t == stack_pointer_rtx || t == arg_pointer_rtx || t == frame_pointer_rtx) return 0; return register_operand (op, mode); } int mmx_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { return MMX_REG_P (op); } /* Return false if this is any eliminable register. Otherwise general_operand. */ int general_no_elim_operand (rtx op, enum machine_mode mode) { rtx t = op; if (GET_CODE (t) == SUBREG) t = SUBREG_REG (t); if (t == arg_pointer_rtx || t == frame_pointer_rtx || t == virtual_incoming_args_rtx || t == virtual_stack_vars_rtx || t == virtual_stack_dynamic_rtx) return 0; if (REG_P (t) && REGNO (t) >= FIRST_VIRTUAL_REGISTER && REGNO (t) <= LAST_VIRTUAL_REGISTER) return 0; return general_operand (op, mode); } /* Return false if this is any eliminable register. Otherwise register_operand or const_int. */ int nonmemory_no_elim_operand (rtx op, enum machine_mode mode) { rtx t = op; if (GET_CODE (t) == SUBREG) t = SUBREG_REG (t); if (t == arg_pointer_rtx || t == frame_pointer_rtx || t == virtual_incoming_args_rtx || t == virtual_stack_vars_rtx || t == virtual_stack_dynamic_rtx) return 0; return GET_CODE (op) == CONST_INT || register_operand (op, mode); } /* Return false if this is any eliminable register or stack register, otherwise work like register_operand. */ int index_register_operand (rtx op, enum machine_mode mode) { rtx t = op; if (GET_CODE (t) == SUBREG) t = SUBREG_REG (t); if (!REG_P (t)) return 0; if (t == arg_pointer_rtx || t == frame_pointer_rtx || t == virtual_incoming_args_rtx || t == virtual_stack_vars_rtx || t == virtual_stack_dynamic_rtx || REGNO (t) == STACK_POINTER_REGNUM) return 0; return general_operand (op, mode); } /* Return true if op is a Q_REGS class register. */ int q_regs_operand (rtx op, enum machine_mode mode) { if (mode != VOIDmode && GET_MODE (op) != mode) return 0; if (GET_CODE (op) == SUBREG) op = SUBREG_REG (op); return ANY_QI_REG_P (op); } /* Return true if op is an flags register. */ int flags_reg_operand (rtx op, enum machine_mode mode) { if (mode != VOIDmode && GET_MODE (op) != mode) return 0; return REG_P (op) && REGNO (op) == FLAGS_REG && GET_MODE (op) != VOIDmode; } /* Return true if op is a NON_Q_REGS class register. */ int non_q_regs_operand (rtx op, enum machine_mode mode) { if (mode != VOIDmode && GET_MODE (op) != mode) return 0; if (GET_CODE (op) == SUBREG) op = SUBREG_REG (op); return NON_QI_REG_P (op); } int zero_extended_scalar_load_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { unsigned n_elts; if (GET_CODE (op) != MEM) return 0; op = maybe_get_pool_constant (op); if (!op) return 0; if (GET_CODE (op) != CONST_VECTOR) return 0; n_elts = (GET_MODE_SIZE (GET_MODE (op)) / GET_MODE_SIZE (GET_MODE_INNER (GET_MODE (op)))); for (n_elts--; n_elts > 0; n_elts--) { rtx elt = CONST_VECTOR_ELT (op, n_elts); if (elt != CONST0_RTX (GET_MODE_INNER (GET_MODE (op)))) return 0; } return 1; } /* Return 1 when OP is operand acceptable for standard SSE move. */ int vector_move_operand (rtx op, enum machine_mode mode) { if (nonimmediate_operand (op, mode)) return 1; if (GET_MODE (op) != mode && mode != VOIDmode) return 0; return (op == CONST0_RTX (GET_MODE (op))); } /* Return true if op if a valid address, and does not contain a segment override. */ int no_seg_address_operand (rtx op, enum machine_mode mode) { struct ix86_address parts; if (! address_operand (op, mode)) return 0; if (! ix86_decompose_address (op, &parts)) abort (); return parts.seg == SEG_DEFAULT; } /* Return 1 if OP is a comparison that can be used in the CMPSS/CMPPS insns. */ int sse_comparison_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { enum rtx_code code = GET_CODE (op); switch (code) { /* Operations supported directly. */ case EQ: case LT: case LE: case UNORDERED: case NE: case UNGE: case UNGT: case ORDERED: return 1; /* These are equivalent to ones above in non-IEEE comparisons. */ case UNEQ: case UNLT: case UNLE: case LTGT: case GE: case GT: return !TARGET_IEEE_FP; default: return 0; } } /* Return 1 if OP is a valid comparison operator in valid mode. */ int ix86_comparison_operator (rtx op, enum machine_mode mode) { enum machine_mode inmode; enum rtx_code code = GET_CODE (op); if (mode != VOIDmode && GET_MODE (op) != mode) return 0; if (!COMPARISON_P (op)) return 0; inmode = GET_MODE (XEXP (op, 0)); if (inmode == CCFPmode || inmode == CCFPUmode) { enum rtx_code second_code, bypass_code; ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code); return (bypass_code == NIL && second_code == NIL); } switch (code) { case EQ: case NE: return 1; case LT: case GE: if (inmode == CCmode || inmode == CCGCmode || inmode == CCGOCmode || inmode == CCNOmode) return 1; return 0; case LTU: case GTU: case LEU: case ORDERED: case UNORDERED: case GEU: if (inmode == CCmode) return 1; return 0; case GT: case LE: if (inmode == CCmode || inmode == CCGCmode || inmode == CCNOmode) return 1; return 0; default: return 0; } } /* Return 1 if OP is a valid comparison operator testing carry flag to be set. */ int ix86_carry_flag_operator (rtx op, enum machine_mode mode) { enum machine_mode inmode; enum rtx_code code = GET_CODE (op); if (mode != VOIDmode && GET_MODE (op) != mode) return 0; if (!COMPARISON_P (op)) return 0; inmode = GET_MODE (XEXP (op, 0)); if (GET_CODE (XEXP (op, 0)) != REG || REGNO (XEXP (op, 0)) != 17 || XEXP (op, 1) != const0_rtx) return 0; if (inmode == CCFPmode || inmode == CCFPUmode) { enum rtx_code second_code, bypass_code; ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code); if (bypass_code != NIL || second_code != NIL) return 0; code = ix86_fp_compare_code_to_integer (code); } else if (inmode != CCmode) return 0; return code == LTU; } /* Return 1 if OP is a comparison operator that can be issued by fcmov. */ int fcmov_comparison_operator (rtx op, enum machine_mode mode) { enum machine_mode inmode; enum rtx_code code = GET_CODE (op); if (mode != VOIDmode && GET_MODE (op) != mode) return 0; if (!COMPARISON_P (op)) return 0; inmode = GET_MODE (XEXP (op, 0)); if (inmode == CCFPmode || inmode == CCFPUmode) { enum rtx_code second_code, bypass_code; ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code); if (bypass_code != NIL || second_code != NIL) return 0; code = ix86_fp_compare_code_to_integer (code); } /* i387 supports just limited amount of conditional codes. */ switch (code) { case LTU: case GTU: case LEU: case GEU: if (inmode == CCmode || inmode == CCFPmode || inmode == CCFPUmode) return 1; return 0; case ORDERED: case UNORDERED: case EQ: case NE: return 1; default: return 0; } } /* Return 1 if OP is a binary operator that can be promoted to wider mode. */ int promotable_binary_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { switch (GET_CODE (op)) { case MULT: /* Modern CPUs have same latency for HImode and SImode multiply, but 386 and 486 do HImode multiply faster. */ return ix86_tune > PROCESSOR_I486; case PLUS: case AND: case IOR: case XOR: case ASHIFT: return 1; default: return 0; } } /* Nearly general operand, but accept any const_double, since we wish to be able to drop them into memory rather than have them get pulled into registers. */ int cmp_fp_expander_operand (rtx op, enum machine_mode mode) { if (mode != VOIDmode && mode != GET_MODE (op)) return 0; if (GET_CODE (op) == CONST_DOUBLE) return 1; return general_operand (op, mode); } /* Match an SI or HImode register for a zero_extract. */ int ext_register_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { int regno; if ((!TARGET_64BIT || GET_MODE (op) != DImode) && GET_MODE (op) != SImode && GET_MODE (op) != HImode) return 0; if (!register_operand (op, VOIDmode)) return 0; /* Be careful to accept only registers having upper parts. */ regno = REG_P (op) ? REGNO (op) : REGNO (SUBREG_REG (op)); return (regno > LAST_VIRTUAL_REGISTER || regno < 4); } /* Return 1 if this is a valid binary floating-point operation. OP is the expression matched, and MODE is its mode. */ int binary_fp_operator (rtx op, enum machine_mode mode) { if (mode != VOIDmode && mode != GET_MODE (op)) return 0; switch (GET_CODE (op)) { case PLUS: case MINUS: case MULT: case DIV: return GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT; default: return 0; } } int mult_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { return GET_CODE (op) == MULT; } int div_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED) { return GET_CODE (op) == DIV; } int arith_or_logical_operator (rtx op, enum machine_mode mode) { return ((mode == VOIDmode || GET_MODE (op) == mode) && ARITHMETIC_P (op)); } /* Returns 1 if OP is memory operand with a displacement. */ int memory_displacement_operand (rtx op, enum machine_mode mode) { struct ix86_address parts; if (! memory_operand (op, mode)) return 0; if (! ix86_decompose_address (XEXP (op, 0), &parts)) abort (); return parts.disp != NULL_RTX; } /* To avoid problems when jump re-emits comparisons like testqi_ext_ccno_0, re-recognize the operand to avoid a copy_to_mode_reg that will fail. ??? It seems likely that this will only work because cmpsi is an expander, and no actual insns use this. */ int cmpsi_operand (rtx op, enum machine_mode mode) { if (nonimmediate_operand (op, mode)) return 1; if (GET_CODE (op) == AND && GET_MODE (op) == SImode && GET_CODE (XEXP (op, 0)) == ZERO_EXTRACT && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT && GET_CODE (XEXP (XEXP (op, 0), 2)) == CONST_INT && INTVAL (XEXP (XEXP (op, 0), 1)) == 8 && INTVAL (XEXP (XEXP (op, 0), 2)) == 8 && GET_CODE (XEXP (op, 1)) == CONST_INT) return 1; return 0; } /* Returns 1 if OP is memory operand that can not be represented by the modRM array. */ int long_memory_operand (rtx op, enum machine_mode mode) { if (! memory_operand (op, mode)) return 0; return memory_address_length (op) != 0; } /* Return nonzero if the rtx is known aligned. */ int aligned_operand (rtx op, enum machine_mode mode) { struct ix86_address parts; if (!general_operand (op, mode)) return 0; /* Registers and immediate operands are always "aligned". */ if (GET_CODE (op) != MEM) return 1; /* Don't even try to do any aligned optimizations with volatiles. */ if (MEM_VOLATILE_P (op)) return 0; op = XEXP (op, 0); /* Pushes and pops are only valid on the stack pointer. */ if (GET_CODE (op) == PRE_DEC || GET_CODE (op) == POST_INC) return 1; /* Decode the address. */ if (! ix86_decompose_address (op, &parts)) abort (); /* Look for some component that isn't known to be aligned. */ if (parts.index) { if (parts.scale < 4 && REGNO_POINTER_ALIGN (REGNO (parts.index)) < 32) return 0; } if (parts.base) { if (REGNO_POINTER_ALIGN (REGNO (parts.base)) < 32) return 0; } if (parts.disp) { if (GET_CODE (parts.disp) != CONST_INT || (INTVAL (parts.disp) & 3) != 0) return 0; } /* Didn't find one -- this must be an aligned address. */ return 1; } /* Initialize the table of extra 80387 mathematical constants. */ static void init_ext_80387_constants (void) { static const char * cst[5] = { "0.3010299956639811952256464283594894482", /* 0: fldlg2 */ "0.6931471805599453094286904741849753009", /* 1: fldln2 */ "1.4426950408889634073876517827983434472", /* 2: fldl2e */ "3.3219280948873623478083405569094566090", /* 3: fldl2t */ "3.1415926535897932385128089594061862044", /* 4: fldpi */ }; int i; for (i = 0; i < 5; i++) { real_from_string (&ext_80387_constants_table[i], cst[i]); /* Ensure each constant is rounded to XFmode precision. */ real_convert (&ext_80387_constants_table[i], XFmode, &ext_80387_constants_table[i]); } ext_80387_constants_init = 1; } /* Return true if the constant is something that can be loaded with a special instruction. */ int standard_80387_constant_p (rtx x) { if (GET_CODE (x) != CONST_DOUBLE || !FLOAT_MODE_P (GET_MODE (x))) return -1; if (x == CONST0_RTX (GET_MODE (x))) return 1; if (x == CONST1_RTX (GET_MODE (x))) return 2; /* For XFmode constants, try to find a special 80387 instruction when optimizing for size or on those CPUs that benefit from them. */ if (GET_MODE (x) == XFmode && (optimize_size || x86_ext_80387_constants & TUNEMASK)) { REAL_VALUE_TYPE r; int i; if (! ext_80387_constants_init) init_ext_80387_constants (); REAL_VALUE_FROM_CONST_DOUBLE (r, x); for (i = 0; i < 5; i++) if (real_identical (&r, &ext_80387_constants_table[i])) return i + 3; } return 0; } /* Return the opcode of the special instruction to be used to load the constant X. */ const char * standard_80387_constant_opcode (rtx x) { switch (standard_80387_constant_p (x)) { case 1: return "fldz"; case 2: return "fld1"; case 3: return "fldlg2"; case 4: return "fldln2"; case 5: return "fldl2e"; case 6: return "fldl2t"; case 7: return "fldpi"; } abort (); } /* Return the CONST_DOUBLE representing the 80387 constant that is loaded by the specified special instruction. The argument IDX matches the return value from standard_80387_constant_p. */ rtx standard_80387_constant_rtx (int idx) { int i; if (! ext_80387_constants_init) init_ext_80387_constants (); switch (idx) { case 3: case 4: case 5: case 6: case 7: i = idx - 3; break; default: abort (); } return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i], XFmode); } /* Return 1 if X is FP constant we can load to SSE register w/o using memory. */ int standard_sse_constant_p (rtx x) { if (x == const0_rtx) return 1; return (x == CONST0_RTX (GET_MODE (x))); } /* Returns 1 if OP contains a symbol reference */ int symbolic_reference_mentioned_p (rtx op) { const char *fmt; int i; if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF) return 1; fmt = GET_RTX_FORMAT (GET_CODE (op)); for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--) { if (fmt[i] == 'E') { int j; for (j = XVECLEN (op, i) - 1; j >= 0; j--) if (symbolic_reference_mentioned_p (XVECEXP (op, i, j))) return 1; } else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i))) return 1; } return 0; } /* Return 1 if it is appropriate to emit `ret' instructions in the body of a function. Do this only if the epilogue is simple, needing a couple of insns. Prior to reloading, we can't tell how many registers must be saved, so return 0 then. Return 0 if there is no frame marker to de-allocate. If NON_SAVING_SETJMP is defined and true, then it is not possible for the epilogue to be simple, so return 0. This is a special case since NON_SAVING_SETJMP will not cause regs_ever_live to change until final, but jump_optimize may need to know sooner if a `return' is OK. */ int ix86_can_use_return_insn_p (void) { struct ix86_frame frame; #ifdef NON_SAVING_SETJMP if (NON_SAVING_SETJMP && current_function_calls_setjmp) return 0; #endif if (! reload_completed || frame_pointer_needed) return 0; /* Don't allow more than 32 pop, since that's all we can do with one instruction. */ if (current_function_pops_args && current_function_args_size >= 32768) return 0; ix86_compute_frame_layout (&frame); return frame.to_allocate == 0 && frame.nregs == 0; } /* Return 1 if VALUE can be stored in the sign extended immediate field. */ int x86_64_sign_extended_value (rtx value) { switch (GET_CODE (value)) { /* CONST_DOUBLES never match, since HOST_BITS_PER_WIDE_INT is known to be at least 32 and this all acceptable constants are represented as CONST_INT. */ case CONST_INT: if (HOST_BITS_PER_WIDE_INT == 32) return 1; else { HOST_WIDE_INT val = trunc_int_for_mode (INTVAL (value), DImode); return trunc_int_for_mode (val, SImode) == val; } break; /* For certain code models, the symbolic references are known to fit. in CM_SMALL_PIC model we know it fits if it is local to the shared library. Don't count TLS SYMBOL_REFs here, since they should fit only if inside of UNSPEC handled below. */ case SYMBOL_REF: /* TLS symbols are not constant. */ if (tls_symbolic_operand (value, Pmode)) return false; return (ix86_cmodel == CM_SMALL || ix86_cmodel == CM_KERNEL); /* For certain code models, the code is near as well. */ case LABEL_REF: return (ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_KERNEL); /* We also may accept the offsetted memory references in certain special cases. */ case CONST: if (GET_CODE (XEXP (value, 0)) == UNSPEC) switch (XINT (XEXP (value, 0), 1)) { case UNSPEC_GOTPCREL: case UNSPEC_DTPOFF: case UNSPEC_GOTNTPOFF: case UNSPEC_NTPOFF: return 1; default: break; } if (GET_CODE (XEXP (value, 0)) == PLUS) { rtx op1 = XEXP (XEXP (value, 0), 0); rtx op2 = XEXP (XEXP (value, 0), 1); HOST_WIDE_INT offset; if (ix86_cmodel == CM_LARGE) return 0; if (GET_CODE (op2) != CONST_INT) return 0; offset = trunc_int_for_mode (INTVAL (op2), DImode); switch (GET_CODE (op1)) { case SYMBOL_REF: /* For CM_SMALL assume that latest object is 16MB before end of 31bits boundary. We may also accept pretty large negative constants knowing that all objects are in the positive half of address space. */ if (ix86_cmodel == CM_SMALL && offset < 16*1024*1024 && trunc_int_for_mode (offset, SImode) == offset) return 1; /* For CM_KERNEL we know that all object resist in the negative half of 32bits address space. We may not accept negative offsets, since they may be just off and we may accept pretty large positive ones. */ if (ix86_cmodel == CM_KERNEL && offset > 0 && trunc_int_for_mode (offset, SImode) == offset) return 1; break; case LABEL_REF: /* These conditions are similar to SYMBOL_REF ones, just the constraints for code models differ. */ if ((ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM) && offset < 16*1024*1024 && trunc_int_for_mode (offset, SImode) == offset) return 1; if (ix86_cmodel == CM_KERNEL && offset > 0 && trunc_int_for_mode (offset, SImode) == offset) return 1; break; case UNSPEC: switch (XINT (op1, 1)) { case UNSPEC_DTPOFF: case UNSPEC_NTPOFF: if (offset > 0 && trunc_int_for_mode (offset, SImode) == offset) return 1; } break; default: return 0; } } return 0; default: return 0; } } /* Return 1 if VALUE can be stored in the zero extended immediate field. */ int x86_64_zero_extended_value (rtx value) { switch (GET_CODE (value)) { case CONST_DOUBLE: if (HOST_BITS_PER_WIDE_INT == 32) return (GET_MODE (value) == VOIDmode && !CONST_DOUBLE_HIGH (value)); else return 0; case CONST_INT: if (HOST_BITS_PER_WIDE_INT == 32) return INTVAL (value) >= 0; else return !(INTVAL (value) & ~(HOST_WIDE_INT) 0xffffffff); break; /* For certain code models, the symbolic references are known to fit. */ case SYMBOL_REF: /* TLS symbols are not constant. */ if (tls_symbolic_operand (value, Pmode)) return false; return ix86_cmodel == CM_SMALL; /* For certain code models, the code is near as well. */ case LABEL_REF: return ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM; /* We also may accept the offsetted memory references in certain special cases. */ case CONST: if (GET_CODE (XEXP (value, 0)) == PLUS) { rtx op1 = XEXP (XEXP (value, 0), 0); rtx op2 = XEXP (XEXP (value, 0), 1); if (ix86_cmodel == CM_LARGE) return 0; switch (GET_CODE (op1)) { case SYMBOL_REF: return 0; /* For small code model we may accept pretty large positive offsets, since one bit is available for free. Negative offsets are limited by the size of NULL pointer area specified by the ABI. */ if (ix86_cmodel == CM_SMALL && GET_CODE (op2) == CONST_INT && trunc_int_for_mode (INTVAL (op2), DImode) > -0x10000 && (trunc_int_for_mode (INTVAL (op2), SImode) == INTVAL (op2))) return 1; /* ??? For the kernel, we may accept adjustment of -0x10000000, since we know that it will just convert negative address space to positive, but perhaps this is not worthwhile. */ break; case LABEL_REF: /* These conditions are similar to SYMBOL_REF ones, just the constraints for code models differ. */ if ((ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM) && GET_CODE (op2) == CONST_INT && trunc_int_for_mode (INTVAL (op2), DImode) > -0x10000 && (trunc_int_for_mode (INTVAL (op2), SImode) == INTVAL (op2))) return 1; break; default: return 0; } } return 0; default: return 0; } } /* Value should be nonzero if functions must have frame pointers. Zero means the frame pointer need not be set up (and parms may be accessed via the stack pointer) in functions that seem suitable. */ int ix86_frame_pointer_required (void) { /* If we accessed previous frames, then the generated code expects to be able to access the saved ebp value in our frame. */ if (cfun->machine->accesses_prev_frame) return 1; /* Several x86 os'es need a frame pointer for other reasons, usually pertaining to setjmp. */ if (SUBTARGET_FRAME_POINTER_REQUIRED) return 1; /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off the frame pointer by default. Turn it back on now if we've not got a leaf function. */ if (TARGET_OMIT_LEAF_FRAME_POINTER && (!current_function_is_leaf)) return 1; if (current_function_profile) return 1; return 0; } /* Record that the current function accesses previous call frames. */ void ix86_setup_frame_addresses (void) { cfun->machine->accesses_prev_frame = 1; } #if defined(HAVE_GAS_HIDDEN) && defined(SUPPORTS_ONE_ONLY) # define USE_HIDDEN_LINKONCE 1 #else # define USE_HIDDEN_LINKONCE 0 #endif static int pic_labels_used; /* Fills in the label name that should be used for a pc thunk for the given register. */ static void get_pc_thunk_name (char name[32], unsigned int regno) { if (USE_HIDDEN_LINKONCE) sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]); else ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno); } /* This function generates code for -fpic that loads %ebx with the return address of the caller and then returns. */ void ix86_file_end (void) { rtx xops[2]; int regno; for (regno = 0; regno < 8; ++regno) { char name[32]; if (! ((pic_labels_used >> regno) & 1)) continue; get_pc_thunk_name (name, regno); if (USE_HIDDEN_LINKONCE) { tree decl; decl = build_decl (FUNCTION_DECL, get_identifier (name), error_mark_node); TREE_PUBLIC (decl) = 1; TREE_STATIC (decl) = 1; DECL_ONE_ONLY (decl) = 1; (*targetm.asm_out.unique_section) (decl, 0); named_section (decl, NULL, 0); (*targetm.asm_out.globalize_label) (asm_out_file, name); fputs ("\t.hidden\t", asm_out_file); assemble_name (asm_out_file, name); fputc ('\n', asm_out_file); ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl); } else { text_section (); ASM_OUTPUT_LABEL (asm_out_file, name); } xops[0] = gen_rtx_REG (SImode, regno); xops[1] = gen_rtx_MEM (SImode, stack_pointer_rtx); output_asm_insn ("mov{l}\t{%1, %0|%0, %1}", xops); output_asm_insn ("ret", xops); } if (NEED_INDICATE_EXEC_STACK) file_end_indicate_exec_stack (); } /* Emit code for the SET_GOT patterns. */ const char * output_set_got (rtx dest) { rtx xops[3]; xops[0] = dest; xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME); if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic) { xops[2] = gen_rtx_LABEL_REF (Pmode, gen_label_rtx ()); if (!flag_pic) output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops); else output_asm_insn ("call\t%a2", xops); #if TARGET_MACHO /* Output the "canonical" label name ("Lxx$pb") here too. This is what will be referred to by the Mach-O PIC subsystem. */ ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ()); #endif (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (XEXP (xops[2], 0))); if (flag_pic) output_asm_insn ("pop{l}\t%0", xops); } else { char name[32]; get_pc_thunk_name (name, REGNO (dest)); pic_labels_used |= 1 << REGNO (dest); xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name)); xops[2] = gen_rtx_MEM (QImode, xops[2]); output_asm_insn ("call\t%X2", xops); } if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION) output_asm_insn ("add{l}\t{%1, %0|%0, %1}", xops); else if (!TARGET_MACHO) output_asm_insn ("add{l}\t{%1+[.-%a2], %0|%0, %a1+(.-%a2)}", xops); return ""; } /* Generate an "push" pattern for input ARG. */ static rtx gen_push (rtx arg) { return gen_rtx_SET (VOIDmode, gen_rtx_MEM (Pmode, gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx)), arg); } /* Return >= 0 if there is an unused call-clobbered register available for the entire function. */ static unsigned int ix86_select_alt_pic_regnum (void) { if (current_function_is_leaf && !current_function_profile) { int i; for (i = 2; i >= 0; --i) if (!regs_ever_live[i]) return i; } return INVALID_REGNUM; } /* Return 1 if we need to save REGNO. */ static int ix86_save_reg (unsigned int regno, int maybe_eh_return) { if (pic_offset_table_rtx && regno == REAL_PIC_OFFSET_TABLE_REGNUM && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM] || current_function_profile || current_function_calls_eh_return || current_function_uses_const_pool)) { if (ix86_select_alt_pic_regnum () != INVALID_REGNUM) return 0; return 1; } if (current_function_calls_eh_return && maybe_eh_return) { unsigned i; for (i = 0; ; i++) { unsigned test = EH_RETURN_DATA_REGNO (i); if (test == INVALID_REGNUM) break; if (test == regno) return 1; } } return (regs_ever_live[regno] && !call_used_regs[regno] && !fixed_regs[regno] && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed)); } /* Return number of registers to be saved on the stack. */ static int ix86_nsaved_regs (void) { int nregs = 0; int regno; for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--) if (ix86_save_reg (regno, true)) nregs++; return nregs; } /* Return the offset between two registers, one to be eliminated, and the other its replacement, at the start of a routine. */ HOST_WIDE_INT ix86_initial_elimination_offset (int from, int to) { struct ix86_frame frame; ix86_compute_frame_layout (&frame); if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM) return frame.hard_frame_pointer_offset; else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM) return frame.hard_frame_pointer_offset - frame.frame_pointer_offset; else { if (to != STACK_POINTER_REGNUM) abort (); else if (from == ARG_POINTER_REGNUM) return frame.stack_pointer_offset; else if (from != FRAME_POINTER_REGNUM) abort (); else return frame.stack_pointer_offset - frame.frame_pointer_offset; } } /* Fill structure ix86_frame about frame of currently computed function. */ static void ix86_compute_frame_layout (struct ix86_frame *frame) { HOST_WIDE_INT total_size; int stack_alignment_needed = cfun->stack_alignment_needed / BITS_PER_UNIT; HOST_WIDE_INT offset; int preferred_alignment = cfun->preferred_stack_boundary / BITS_PER_UNIT; HOST_WIDE_INT size = get_frame_size (); frame->nregs = ix86_nsaved_regs (); total_size = size; /* During reload iteration the amount of registers saved can change. Recompute the value as needed. Do not recompute when amount of registers didn't change as reload does mutiple calls to the function and does not expect the decision to change within single iteration. */ if (!optimize_size && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs) { int count = frame->nregs; cfun->machine->use_fast_prologue_epilogue_nregs = count; /* The fast prologue uses move instead of push to save registers. This is significantly longer, but also executes faster as modern hardware can execute the moves in parallel, but can't do that for push/pop. Be careful about choosing what prologue to emit: When function takes many instructions to execute we may use slow version as well as in case function is known to be outside hot spot (this is known with feedback only). Weight the size of function by number of registers to save as it is cheap to use one or two push instructions but very slow to use many of them. */ if (count) count = (count - 1) * FAST_PROLOGUE_INSN_COUNT; if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL || (flag_branch_probabilities && cfun->function_frequency < FUNCTION_FREQUENCY_HOT)) cfun->machine->use_fast_prologue_epilogue = false; else cfun->machine->use_fast_prologue_epilogue = !expensive_function_p (count); } if (TARGET_PROLOGUE_USING_MOVE && cfun->machine->use_fast_prologue_epilogue) frame->save_regs_using_mov = true; else frame->save_regs_using_mov = false; /* Skip return address and saved base pointer. */ offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD; frame->hard_frame_pointer_offset = offset; /* Do some sanity checking of stack_alignment_needed and preferred_alignment, since i386 port is the only using those features that may break easily. */ if (size && !stack_alignment_needed) abort (); if (preferred_alignment < STACK_BOUNDARY / BITS_PER_UNIT) abort (); if (preferred_alignment > PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT) abort (); if (stack_alignment_needed > PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT) abort (); if (stack_alignment_needed < STACK_BOUNDARY / BITS_PER_UNIT) stack_alignment_needed = STACK_BOUNDARY / BITS_PER_UNIT; /* Register save area */ offset += frame->nregs * UNITS_PER_WORD; /* Va-arg area */ if (ix86_save_varrargs_registers) { offset += X86_64_VARARGS_SIZE; frame->va_arg_size = X86_64_VARARGS_SIZE; } else frame->va_arg_size = 0; /* Align start of frame for local function. */ frame->padding1 = ((offset + stack_alignment_needed - 1) & -stack_alignment_needed) - offset; offset += frame->padding1; /* Frame pointer points here. */ frame->frame_pointer_offset = offset; offset += size; /* Add outgoing arguments area. Can be skipped if we eliminated all the function calls as dead code. Skipping is however impossible when function calls alloca. Alloca expander assumes that last current_function_outgoing_args_size of stack frame are unused. */ if (ACCUMULATE_OUTGOING_ARGS && (!current_function_is_leaf || current_function_calls_alloca)) { offset += current_function_outgoing_args_size; frame->outgoing_arguments_size = current_function_outgoing_args_size; } else frame->outgoing_arguments_size = 0; /* Align stack boundary. Only needed if we're calling another function or using alloca. */ if (!current_function_is_leaf || current_function_calls_alloca) frame->padding2 = ((offset + preferred_alignment - 1) & -preferred_alignment) - offset; else frame->padding2 = 0; offset += frame->padding2; /* We've reached end of stack frame. */ frame->stack_pointer_offset = offset; /* Size prologue needs to allocate. */ frame->to_allocate = (size + frame->padding1 + frame->padding2 + frame->outgoing_arguments_size + frame->va_arg_size); if ((!frame->to_allocate && frame->nregs <= 1) || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000)) frame->save_regs_using_mov = false; if (TARGET_RED_ZONE && current_function_sp_is_unchanging && current_function_is_leaf) { frame->red_zone_size = frame->to_allocate; if (frame->save_regs_using_mov) frame->red_zone_size += frame->nregs * UNITS_PER_WORD; if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE) frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE; } else frame->red_zone_size = 0; frame->to_allocate -= frame->red_zone_size; frame->stack_pointer_offset -= frame->red_zone_size; #if 0 fprintf (stderr, "nregs: %i\n", frame->nregs); fprintf (stderr, "size: %i\n", size); fprintf (stderr, "alignment1: %i\n", stack_alignment_needed); fprintf (stderr, "padding1: %i\n", frame->padding1); fprintf (stderr, "va_arg: %i\n", frame->va_arg_size); fprintf (stderr, "padding2: %i\n", frame->padding2); fprintf (stderr, "to_allocate: %i\n", frame->to_allocate); fprintf (stderr, "red_zone_size: %i\n", frame->red_zone_size); fprintf (stderr, "frame_pointer_offset: %i\n", frame->frame_pointer_offset); fprintf (stderr, "hard_frame_pointer_offset: %i\n", frame->hard_frame_pointer_offset); fprintf (stderr, "stack_pointer_offset: %i\n", frame->stack_pointer_offset); #endif } /* Emit code to save registers in the prologue. */ static void ix86_emit_save_regs (void) { int regno; rtx insn; for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--) if (ix86_save_reg (regno, true)) { insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno))); RTX_FRAME_RELATED_P (insn) = 1; } } /* Emit code to save registers using MOV insns. First register is restored from POINTER + OFFSET. */ static void ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset) { int regno; rtx insn; for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) if (ix86_save_reg (regno, true)) { insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer), Pmode, offset), gen_rtx_REG (Pmode, regno)); RTX_FRAME_RELATED_P (insn) = 1; offset += UNITS_PER_WORD; } } /* Expand prologue or epilogue stack adjustment. The pattern exist to put a dependency on all ebp-based memory accesses. STYLE should be negative if instructions should be marked as frame related, zero if %r11 register is live and cannot be freely used and positive otherwise. */ static void pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style) { rtx insn; if (! TARGET_64BIT) insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset)); else if (x86_64_immediate_operand (offset, DImode)) insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset)); else { rtx r11; /* r11 is used by indirect sibcall return as well, set before the epilogue and used after the epilogue. ATM indirect sibcall shouldn't be used together with huge frame sizes in one function because of the frame_size check in sibcall.c. */ if (style == 0) abort (); r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */); insn = emit_insn (gen_rtx_SET (DImode, r11, offset)); if (style < 0) RTX_FRAME_RELATED_P (insn) = 1; insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11, offset)); } if (style < 0) RTX_FRAME_RELATED_P (insn) = 1; } /* Expand the prologue into a bunch of separate insns. */ void ix86_expand_prologue (void) { rtx insn; bool pic_reg_used; struct ix86_frame frame; HOST_WIDE_INT allocate; ix86_compute_frame_layout (&frame); /* Note: AT&T enter does NOT have reversed args. Enter is probably slower on all targets. Also sdb doesn't like it. */ if (frame_pointer_needed) { insn = emit_insn (gen_push (hard_frame_pointer_rtx)); RTX_FRAME_RELATED_P (insn) = 1; insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx); RTX_FRAME_RELATED_P (insn) = 1; } allocate = frame.to_allocate; if (!frame.save_regs_using_mov) ix86_emit_save_regs (); else allocate += frame.nregs * UNITS_PER_WORD; /* When using red zone we may start register saving before allocating the stack frame saving one cycle of the prologue. */ if (TARGET_RED_ZONE && frame.save_regs_using_mov) ix86_emit_save_regs_using_mov (frame_pointer_needed ? hard_frame_pointer_rtx : stack_pointer_rtx, -frame.nregs * UNITS_PER_WORD); if (allocate == 0) ; else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT) pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-allocate), -1); else { /* Only valid for Win32. */ rtx eax = gen_rtx_REG (SImode, 0); bool eax_live = ix86_eax_live_at_start_p (); if (TARGET_64BIT) abort (); if (eax_live) { emit_insn (gen_push (eax)); allocate -= 4; } insn = emit_move_insn (eax, GEN_INT (allocate)); RTX_FRAME_RELATED_P (insn) = 1; insn = emit_insn (gen_allocate_stack_worker (eax)); RTX_FRAME_RELATED_P (insn) = 1; if (eax_live) { rtx t = plus_constant (stack_pointer_rtx, allocate); emit_move_insn (eax, gen_rtx_MEM (SImode, t)); } } if (frame.save_regs_using_mov && !TARGET_RED_ZONE) { if (!frame_pointer_needed || !frame.to_allocate) ix86_emit_save_regs_using_mov (stack_pointer_rtx, frame.to_allocate); else ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx, -frame.nregs * UNITS_PER_WORD); } pic_reg_used = false; if (pic_offset_table_rtx && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM] || current_function_profile)) { unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum (); if (alt_pic_reg_used != INVALID_REGNUM) REGNO (pic_offset_table_rtx) = alt_pic_reg_used; pic_reg_used = true; } if (pic_reg_used) { insn = emit_insn (gen_set_got (pic_offset_table_rtx)); /* Even with accurate pre-reload life analysis, we can wind up deleting all references to the pic register after reload. Consider if cross-jumping unifies two sides of a branch controlled by a comparison vs the only read from a global. In which case, allow the set_got to be deleted, though we're too late to do anything about the ebx save in the prologue. */ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, NULL); } /* Prevent function calls from be scheduled before the call to mcount. In the pic_reg_used case, make sure that the got load isn't deleted. */ if (current_function_profile) emit_insn (gen_blockage (pic_reg_used ? pic_offset_table_rtx : const0_rtx)); } /* Emit code to restore saved registers using MOV insns. First register is restored from POINTER + OFFSET. */ static void ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset, int maybe_eh_return) { int regno; rtx base_address = gen_rtx_MEM (Pmode, pointer); for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) if (ix86_save_reg (regno, maybe_eh_return)) { /* Ensure that adjust_address won't be forced to produce pointer out of range allowed by x86-64 instruction set. */ if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode)) { rtx r11; r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */); emit_move_insn (r11, GEN_INT (offset)); emit_insn (gen_adddi3 (r11, r11, pointer)); base_address = gen_rtx_MEM (Pmode, r11); offset = 0; } emit_move_insn (gen_rtx_REG (Pmode, regno), adjust_address (base_address, Pmode, offset)); offset += UNITS_PER_WORD; } } /* Restore function stack, frame, and registers. */ void ix86_expand_epilogue (int style) { int regno; int sp_valid = !frame_pointer_needed || current_function_sp_is_unchanging; struct ix86_frame frame; HOST_WIDE_INT offset; ix86_compute_frame_layout (&frame); /* Calculate start of saved registers relative to ebp. Special care must be taken for the normal return case of a function using eh_return: the eax and edx registers are marked as saved, but not restored along this path. */ offset = frame.nregs; if (current_function_calls_eh_return && style != 2) offset -= 2; offset *= -UNITS_PER_WORD; /* If we're only restoring one register and sp is not valid then using a move instruction to restore the register since it's less work than reloading sp and popping the register. The default code result in stack adjustment using add/lea instruction, while this code results in LEAVE instruction (or discrete equivalent), so it is profitable in some other cases as well. Especially when there are no registers to restore. We also use this code when TARGET_USE_LEAVE and there is exactly one register to pop. This heuristic may need some tuning in future. */ if ((!sp_valid && frame.nregs <= 1) || (TARGET_EPILOGUE_USING_MOVE && cfun->machine->use_fast_prologue_epilogue && (frame.nregs > 1 || frame.to_allocate)) || (frame_pointer_needed && !frame.nregs && frame.to_allocate) || (frame_pointer_needed && TARGET_USE_LEAVE && cfun->machine->use_fast_prologue_epilogue && frame.nregs == 1) || current_function_calls_eh_return) { /* Restore registers. We can use ebp or esp to address the memory locations. If both are available, default to ebp, since offsets are known to be small. Only exception is esp pointing directly to the end of block of saved registers, where we may simplify addressing mode. */ if (!frame_pointer_needed || (sp_valid && !frame.to_allocate)) ix86_emit_restore_regs_using_mov (stack_pointer_rtx, frame.to_allocate, style == 2); else ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx, offset, style == 2); /* eh_return epilogues need %ecx added to the stack pointer. */ if (style == 2) { rtx tmp, sa = EH_RETURN_STACKADJ_RTX; if (frame_pointer_needed) { tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa); tmp = plus_constant (tmp, UNITS_PER_WORD); emit_insn (gen_rtx_SET (VOIDmode, sa, tmp)); tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx); emit_move_insn (hard_frame_pointer_rtx, tmp); pro_epilogue_adjust_stack (stack_pointer_rtx, sa, const0_rtx, style); } else { tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa); tmp = plus_constant (tmp, (frame.to_allocate + frame.nregs * UNITS_PER_WORD)); emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp)); } } else if (!frame_pointer_needed) pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (frame.to_allocate + frame.nregs * UNITS_PER_WORD), style); /* If not an i386, mov & pop is faster than "leave". */ else if (TARGET_USE_LEAVE || optimize_size || !cfun->machine->use_fast_prologue_epilogue) emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ()); else { pro_epilogue_adjust_stack (stack_pointer_rtx, hard_frame_pointer_rtx, const0_rtx, style); if (TARGET_64BIT) emit_insn (gen_popdi1 (hard_frame_pointer_rtx)); else emit_insn (gen_popsi1 (hard_frame_pointer_rtx)); } } else { /* First step is to deallocate the stack frame so that we can pop the registers. */ if (!sp_valid) { if (!frame_pointer_needed) abort (); pro_epilogue_adjust_stack (stack_pointer_rtx, hard_frame_pointer_rtx, GEN_INT (offset), style); } else if (frame.to_allocate) pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (frame.to_allocate), style); for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) if (ix86_save_reg (regno, false)) { if (TARGET_64BIT) emit_insn (gen_popdi1 (gen_rtx_REG (Pmode, regno))); else emit_insn (gen_popsi1 (gen_rtx_REG (Pmode, regno))); } if (frame_pointer_needed) { /* Leave results in shorter dependency chains on CPUs that are able to grok it fast. */ if (TARGET_USE_LEAVE) emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ()); else if (TARGET_64BIT) emit_insn (gen_popdi1 (hard_frame_pointer_rtx)); else emit_insn (gen_popsi1 (hard_frame_pointer_rtx)); } } /* Sibcall epilogues don't want a return instruction. */ if (style == 0) return; if (current_function_pops_args && current_function_args_size) { rtx popc = GEN_INT (current_function_pops_args); /* i386 can only pop 64K bytes. If asked to pop more, pop return address, do explicit add, and jump indirectly to the caller. */ if (current_function_pops_args >= 65536) { rtx ecx = gen_rtx_REG (SImode, 2); /* There is no "pascal" calling convention in 64bit ABI. */ if (TARGET_64BIT) abort (); emit_insn (gen_popsi1 (ecx)); emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc)); emit_jump_insn (gen_return_indirect_internal (ecx)); } else emit_jump_insn (gen_return_pop_internal (popc)); } else emit_jump_insn (gen_return_internal ()); } /* Reset from the function's potential modifications. */ static void ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED, HOST_WIDE_INT size ATTRIBUTE_UNUSED) { if (pic_offset_table_rtx) REGNO (pic_offset_table_rtx) = REAL_PIC_OFFSET_TABLE_REGNUM; } /* Extract the parts of an RTL expression that is a valid memory address for an instruction. Return 0 if the structure of the address is grossly off. Return -1 if the address contains ASHIFT, so it is not strictly valid, but still used for computing length of lea instruction. */ static int ix86_decompose_address (rtx addr, struct ix86_address *out) { rtx base = NULL_RTX; rtx index = NULL_RTX; rtx disp = NULL_RTX; HOST_WIDE_INT scale = 1; rtx scale_rtx = NULL_RTX; int retval = 1; enum ix86_address_seg seg = SEG_DEFAULT; if (GET_CODE (addr) == REG || GET_CODE (addr) == SUBREG) base = addr; else if (GET_CODE (addr) == PLUS) { rtx addends[4], op; int n = 0, i; op = addr; do { if (n >= 4) return 0; addends[n++] = XEXP (op, 1); op = XEXP (op, 0); } while (GET_CODE (op) == PLUS); if (n >= 4) return 0; addends[n] = op; for (i = n; i >= 0; --i) { op = addends[i]; switch (GET_CODE (op)) { case MULT: if (index) return 0; index = XEXP (op, 0); scale_rtx = XEXP (op, 1); break; case UNSPEC: if (XINT (op, 1) == UNSPEC_TP && TARGET_TLS_DIRECT_SEG_REFS && seg == SEG_DEFAULT) seg = TARGET_64BIT ? SEG_FS : SEG_GS; else return 0; break; case REG: case SUBREG: if (!base) base = op; else if (!index) index = op; else return 0; break; case CONST: case CONST_INT: case SYMBOL_REF: case LABEL_REF: if (disp) return 0; disp = op; break; default: return 0; } } } else if (GET_CODE (addr) == MULT) { index = XEXP (addr, 0); /* index*scale */ scale_rtx = XEXP (addr, 1); } else if (GET_CODE (addr) == ASHIFT) { rtx tmp; /* We're called for lea too, which implements ashift on occasion. */ index = XEXP (addr, 0); tmp = XEXP (addr, 1); if (GET_CODE (tmp) != CONST_INT) return 0; scale = INTVAL (tmp); if ((unsigned HOST_WIDE_INT) scale > 3) return 0; scale = 1 << scale; retval = -1; } else disp = addr; /* displacement */ /* Extract the integral value of scale. */ if (scale_rtx) { if (GET_CODE (scale_rtx) != CONST_INT) return 0; scale = INTVAL (scale_rtx); } /* Allow arg pointer and stack pointer as index if there is not scaling. */ if (base && index && scale == 1 && (index == arg_pointer_rtx || index == frame_pointer_rtx || (REG_P (index) && REGNO (index) == STACK_POINTER_REGNUM))) { rtx tmp = base; base = index; index = tmp; } /* Special case: %ebp cannot be encoded as a base without a displacement. */ if ((base == hard_frame_pointer_rtx || base == frame_pointer_rtx || base == arg_pointer_rtx) && !disp) disp = const0_rtx; /* Special case: on K6, [%esi] makes the instruction vector decoded. Avoid this by transforming to [%esi+0]. */ if (ix86_tune == PROCESSOR_K6 && !optimize_size && base && !index && !disp && REG_P (base) && REGNO_REG_CLASS (REGNO (base)) == SIREG) disp = const0_rtx; /* Special case: encode reg+reg instead of reg*2. */ if (!base && index && scale && scale == 2) base = index, scale = 1; /* Special case: scaling cannot be encoded without base or displacement. */ if (!base && !disp && index && scale != 1) disp = const0_rtx; out->base = base; out->index = index; out->disp = disp; out->scale = scale; out->seg = seg; return retval; } /* Return cost of the memory address x. For i386, it is better to use a complex address than let gcc copy the address into a reg and make a new pseudo. But not if the address requires to two regs - that would mean more pseudos with longer lifetimes. */ static int ix86_address_cost (rtx x) { struct ix86_address parts; int cost = 1; if (!ix86_decompose_address (x, &parts)) abort (); /* More complex memory references are better. */ if (parts.disp && parts.disp != const0_rtx) cost--; if (parts.seg != SEG_DEFAULT) cost--; /* Attempt to minimize number of registers in the address. */ if ((parts.base && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)) || (parts.index && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER))) cost++; if (parts.base && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER) && parts.index && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER) && parts.base != parts.index) cost++; /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b, since it's predecode logic can't detect the length of instructions and it degenerates to vector decoded. Increase cost of such addresses here. The penalty is minimally 2 cycles. It may be worthwhile to split such addresses or even refuse such addresses at all. Following addressing modes are affected: [base+scale*index] [scale*index+disp] [base+index] The first and last case may be avoidable by explicitly coding the zero in memory address, but I don't have AMD-K6 machine handy to check this theory. */ if (TARGET_K6 && ((!parts.disp && parts.base && parts.index && parts.scale != 1) || (parts.disp && !parts.base && parts.index && parts.scale != 1) || (!parts.disp && parts.base && parts.index && parts.scale == 1))) cost += 10; return cost; } /* If X is a machine specific address (i.e. a symbol or label being referenced as a displacement from the GOT implemented using an UNSPEC), then return the base term. Otherwise return X. */ rtx ix86_find_base_term (rtx x) { rtx term; if (TARGET_64BIT) { if (GET_CODE (x) != CONST) return x; term = XEXP (x, 0); if (GET_CODE (term) == PLUS && (GET_CODE (XEXP (term, 1)) == CONST_INT || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE)) term = XEXP (term, 0); if (GET_CODE (term) != UNSPEC || XINT (term, 1) != UNSPEC_GOTPCREL) return x; term = XVECEXP (term, 0, 0); if (GET_CODE (term) != SYMBOL_REF && GET_CODE (term) != LABEL_REF) return x; return term; } term = ix86_delegitimize_address (x); if (GET_CODE (term) != SYMBOL_REF && GET_CODE (term) != LABEL_REF) return x; return term; } /* Determine if a given RTX is a valid constant. We already know this satisfies CONSTANT_P. */ bool legitimate_constant_p (rtx x) { rtx inner; switch (GET_CODE (x)) { case SYMBOL_REF: /* TLS symbols are not constant. */ if (tls_symbolic_operand (x, Pmode)) return false; break; case CONST: inner = XEXP (x, 0); /* Offsets of TLS symbols are never valid. Discourage CSE from creating them. */ if (GET_CODE (inner) == PLUS && tls_symbolic_operand (XEXP (inner, 0), Pmode)) return false; if (GET_CODE (inner) == PLUS || GET_CODE (inner) == MINUS) { if (GET_CODE (XEXP (inner, 1)) != CONST_INT) return false; inner = XEXP (inner, 0); } /* Only some unspecs are valid as "constants". */ if (GET_CODE (inner) == UNSPEC) switch (XINT (inner, 1)) { case UNSPEC_TPOFF: case UNSPEC_NTPOFF: return local_exec_symbolic_operand (XVECEXP (inner, 0, 0), Pmode); case UNSPEC_DTPOFF: return local_dynamic_symbolic_operand (XVECEXP (inner, 0, 0), Pmode); default: return false; } break; default: break; } /* Otherwise we handle everything else in the move patterns. */ return true; } /* Determine if it's legal to put X into the constant pool. This is not possible for the address of thread-local symbols, which is checked above. */ static bool ix86_cannot_force_const_mem (rtx x) { return !legitimate_constant_p (x); } /* Determine if a given RTX is a valid constant address. */ bool constant_address_p (rtx x) { return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1); } /* Nonzero if the constant value X is a legitimate general operand when generating PIC code. It is given that flag_pic is on and that X satisfies CONSTANT_P or is a CONST_DOUBLE. */ bool legitimate_pic_operand_p (rtx x) { rtx inner; switch (GET_CODE (x)) { case CONST: inner = XEXP (x, 0); /* Only some unspecs are valid as "constants". */ if (GET_CODE (inner) == UNSPEC) switch (XINT (inner, 1)) { case UNSPEC_TPOFF: return local_exec_symbolic_operand (XVECEXP (inner, 0, 0), Pmode); default: return false; } /* FALLTHRU */ case SYMBOL_REF: case LABEL_REF: return legitimate_pic_address_disp_p (x); default: return true; } } /* Determine if a given CONST RTX is a valid memory displacement in PIC mode. */ int legitimate_pic_address_disp_p (rtx disp) { bool saw_plus; /* In 64bit mode we can allow direct addresses of symbols and labels when they are not dynamic symbols. */ if (TARGET_64BIT) { /* TLS references should always be enclosed in UNSPEC. */ if (tls_symbolic_operand (disp, GET_MODE (disp))) return 0; if (GET_CODE (disp) == SYMBOL_REF && ix86_cmodel == CM_SMALL_PIC && SYMBOL_REF_LOCAL_P (disp)) return 1; if (GET_CODE (disp) == LABEL_REF) return 1; if (GET_CODE (disp) == CONST && GET_CODE (XEXP (disp, 0)) == PLUS) { rtx op0 = XEXP (XEXP (disp, 0), 0); rtx op1 = XEXP (XEXP (disp, 0), 1); /* TLS references should always be enclosed in UNSPEC. */ if (tls_symbolic_operand (op0, GET_MODE (op0))) return 0; if (((GET_CODE (op0) == SYMBOL_REF && ix86_cmodel == CM_SMALL_PIC && SYMBOL_REF_LOCAL_P (op0)) || GET_CODE (op0) == LABEL_REF) && GET_CODE (op1) == CONST_INT && INTVAL (op1) < 16*1024*1024 && INTVAL (op1) >= -16*1024*1024) return 1; } } if (GET_CODE (disp) != CONST) return 0; disp = XEXP (disp, 0); if (TARGET_64BIT) { /* We are unsafe to allow PLUS expressions. This limit allowed distance of GOT tables. We should not need these anyway. */ if (GET_CODE (disp) != UNSPEC || XINT (disp, 1) != UNSPEC_GOTPCREL) return 0; if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF) return 0; return 1; } saw_plus = false; if (GET_CODE (disp) == PLUS) { if (GET_CODE (XEXP (disp, 1)) != CONST_INT) return 0; disp = XEXP (disp, 0); saw_plus = true; } /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O. */ if (TARGET_MACHO && GET_CODE (disp) == MINUS) { if (GET_CODE (XEXP (disp, 0)) == LABEL_REF || GET_CODE (XEXP (disp, 0)) == SYMBOL_REF) if (GET_CODE (XEXP (disp, 1)) == SYMBOL_REF) { const char *sym_name = XSTR (XEXP (disp, 1), 0); if (! strcmp (sym_name, "")) return 1; } } if (GET_CODE (disp) != UNSPEC) return 0; switch (XINT (disp, 1)) { case UNSPEC_GOT: if (saw_plus) return false; return GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF; case UNSPEC_GOTOFF: if (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF) return local_symbolic_operand (XVECEXP (disp, 0, 0), Pmode); return false; case UNSPEC_GOTTPOFF: case UNSPEC_GOTNTPOFF: case UNSPEC_INDNTPOFF: if (saw_plus) return false; return initial_exec_symbolic_operand (XVECEXP (disp, 0, 0), Pmode); case UNSPEC_NTPOFF: return local_exec_symbolic_operand (XVECEXP (disp, 0, 0), Pmode); case UNSPEC_DTPOFF: return local_dynamic_symbolic_operand (XVECEXP (disp, 0, 0), Pmode); } return 0; } /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid memory address for an instruction. The MODE argument is the machine mode for the MEM expression that wants to use this address. It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should convert common non-canonical forms to canonical form so that they will be recognized. */ int legitimate_address_p (enum machine_mode mode, rtx addr, int strict) { struct ix86_address parts; rtx base, index, disp; HOST_WIDE_INT scale; const char *reason = NULL; rtx reason_rtx = NULL_RTX; if (TARGET_DEBUG_ADDR) { fprintf (stderr, "\n======\nGO_IF_LEGITIMATE_ADDRESS, mode = %s, strict = %d\n", GET_MODE_NAME (mode), strict); debug_rtx (addr); } if (ix86_decompose_address (addr, &parts) <= 0) { reason = "decomposition failed"; goto report_error; } base = parts.base; index = parts.index; disp = parts.disp; scale = parts.scale; /* Validate base register. Don't allow SUBREG's here, it can lead to spill failures when the base is one word out of a two word structure, which is represented internally as a DImode int. */ if (base) { reason_rtx = base; if (GET_CODE (base) != REG) { reason = "base is not a register"; goto report_error; } if (GET_MODE (base) != Pmode) { reason = "base is not in Pmode"; goto report_error; } if ((strict && ! REG_OK_FOR_BASE_STRICT_P (base)) || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (base))) { reason = "base is not valid"; goto report_error; } } /* Validate index register. Don't allow SUBREG's here, it can lead to spill failures when the index is one word out of a two word structure, which is represented internally as a DImode int. */ if (index) { reason_rtx = index; if (GET_CODE (index) != REG) { reason = "index is not a register"; goto report_error; } if (GET_MODE (index) != Pmode) { reason = "index is not in Pmode"; goto report_error; } if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (index)) || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (index))) { reason = "index is not valid"; goto report_error; } } /* Validate scale factor. */ if (scale != 1) { reason_rtx = GEN_INT (scale); if (!index) { reason = "scale without index"; goto report_error; } if (scale != 2 && scale != 4 && scale != 8) { reason = "scale is not a valid multiplier"; goto report_error; } } /* Validate displacement. */ if (disp) { reason_rtx = disp; if (GET_CODE (disp) == CONST && GET_CODE (XEXP (disp, 0)) == UNSPEC) switch (XINT (XEXP (disp, 0), 1)) { case UNSPEC_GOT: case UNSPEC_GOTOFF: case UNSPEC_GOTPCREL: if (!flag_pic) abort (); goto is_legitimate_pic; case UNSPEC_GOTTPOFF: case UNSPEC_GOTNTPOFF: case UNSPEC_INDNTPOFF: case UNSPEC_NTPOFF: case UNSPEC_DTPOFF: break; default: reason = "invalid address unspec"; goto report_error; } else if (flag_pic && (SYMBOLIC_CONST (disp) #if TARGET_MACHO && !machopic_operand_p (disp) #endif )) { is_legitimate_pic: if (TARGET_64BIT && (index || base)) { /* foo@dtpoff(%rX) is ok. */ if (GET_CODE (disp) != CONST || GET_CODE (XEXP (disp, 0)) != PLUS || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC || GET_CODE (XEXP (XEXP (disp, 0), 1)) != CONST_INT || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF)) { reason = "non-constant pic memory reference"; goto report_error; } } else if (! legitimate_pic_address_disp_p (disp)) { reason = "displacement is an invalid pic construct"; goto report_error; } /* This code used to verify that a symbolic pic displacement includes the pic_offset_table_rtx register. While this is good idea, unfortunately these constructs may be created by "adds using lea" optimization for incorrect code like: int a; int foo(int i) { return *(&a+i); } This code is nonsensical, but results in addressing GOT table with pic_offset_table_rtx base. We can't just refuse it easily, since it gets matched by "addsi3" pattern, that later gets split to lea in the case output register differs from input. While this can be handled by separate addsi pattern for this case that never results in lea, this seems to be easier and correct fix for crash to disable this test. */ } else if (GET_CODE (disp) != LABEL_REF && GET_CODE (disp) != CONST_INT && (GET_CODE (disp) != CONST || !legitimate_constant_p (disp)) && (GET_CODE (disp) != SYMBOL_REF || !legitimate_constant_p (disp))) { reason = "displacement is not constant"; goto report_error; } else if (TARGET_64BIT && !x86_64_sign_extended_value (disp)) { reason = "displacement is out of range"; goto report_error; } } /* Everything looks valid. */ if (TARGET_DEBUG_ADDR) fprintf (stderr, "Success.\n"); return TRUE; report_error: if (TARGET_DEBUG_ADDR) { fprintf (stderr, "Error: %s\n", reason); debug_rtx (reason_rtx); } return FALSE; } /* Return an unique alias set for the GOT. */ static HOST_WIDE_INT ix86_GOT_alias_set (void) { static HOST_WIDE_INT set = -1; if (set == -1) set = new_alias_set (); return set; } /* Return a legitimate reference for ORIG (an address) using the register REG. If REG is 0, a new pseudo is generated. There are two types of references that must be handled: 1. Global data references must load the address from the GOT, via the PIC reg. An insn is emitted to do this load, and the reg is returned. 2. Static data references, constant pool addresses, and code labels compute the address as an offset from the GOT, whose base is in the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to differentiate them from global data objects. The returned address is the PIC reg + an unspec constant. GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC reg also appears in the address. */ rtx legitimize_pic_address (rtx orig, rtx reg) { rtx addr = orig; rtx new = orig; rtx base; #if TARGET_MACHO if (reg == 0) reg = gen_reg_rtx (Pmode); /* Use the generic Mach-O PIC machinery. */ return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg); #endif if (TARGET_64BIT && legitimate_pic_address_disp_p (addr)) new = addr; else if (!TARGET_64BIT && local_symbolic_operand (addr, Pmode)) { /* This symbol may be referenced via a displacement from the PIC base address (@GOTOFF). */ if (reload_in_progress) regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1; if (GET_CODE (addr) == CONST) addr = XEXP (addr, 0); if (GET_CODE (addr) == PLUS) { new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF); new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1)); } else new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF); new = gen_rtx_CONST (Pmode, new); new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new); if (reg != 0) { emit_move_insn (reg, new); new = reg; } } else if (GET_CODE (addr) == SYMBOL_REF) { if (TARGET_64BIT) { new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL); new = gen_rtx_CONST (Pmode, new); new = gen_rtx_MEM (Pmode, new); RTX_UNCHANGING_P (new) = 1; set_mem_alias_set (new, ix86_GOT_alias_set ()); if (reg == 0) reg = gen_reg_rtx (Pmode); /* Use directly gen_movsi, otherwise the address is loaded into register for CSE. We don't want to CSE this addresses, instead we CSE addresses from the GOT table, so skip this. */ emit_insn (gen_movsi (reg, new)); new = reg; } else { /* This symbol must be referenced via a load from the Global Offset Table (@GOT). */ if (reload_in_progress) regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1; new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT); new = gen_rtx_CONST (Pmode, new); new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new); new = gen_rtx_MEM (Pmode, new); RTX_UNCHANGING_P (new) = 1; set_mem_alias_set (new, ix86_GOT_alias_set ()); if (reg == 0) reg = gen_reg_rtx (Pmode); emit_move_insn (reg, new); new = reg; } } else { if (GET_CODE (addr) == CONST) { addr = XEXP (addr, 0); /* We must match stuff we generate before. Assume the only unspecs that can get here are ours. Not that we could do anything with them anyway.... */ if (GET_CODE (addr) == UNSPEC || (GET_CODE (addr) == PLUS && GET_CODE (XEXP (addr, 0)) == UNSPEC)) return orig; if (GET_CODE (addr) != PLUS) abort (); } if (GET_CODE (addr) == PLUS) { rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1); /* Check first to see if this is a constant offset from a @GOTOFF symbol reference. */ if (local_symbolic_operand (op0, Pmode) && GET_CODE (op1) == CONST_INT) { if (!TARGET_64BIT) { if (reload_in_progress) regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1; new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0), UNSPEC_GOTOFF); new = gen_rtx_PLUS (Pmode, new, op1); new = gen_rtx_CONST (Pmode, new); new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new); if (reg != 0) { emit_move_insn (reg, new); new = reg; } } else { if (INTVAL (op1) < -16*1024*1024 || INTVAL (op1) >= 16*1024*1024) new = gen_rtx_PLUS (Pmode, op0, force_reg (Pmode, op1)); } } else { base = legitimize_pic_address (XEXP (addr, 0), reg); new = legitimize_pic_address (XEXP (addr, 1), base == reg ? NULL_RTX : reg); if (GET_CODE (new) == CONST_INT) new = plus_constant (base, INTVAL (new)); else { if (GET_CODE (new) == PLUS && CONSTANT_P (XEXP (new, 1))) { base = gen_rtx_PLUS (Pmode, base, XEXP (new, 0)); new = XEXP (new, 1); } new = gen_rtx_PLUS (Pmode, base, new); } } } } return new; } /* Load the thread pointer. If TO_REG is true, force it into a register. */ static rtx get_thread_pointer (int to_reg) { rtx tp, reg, insn; tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP); if (!to_reg) return tp; reg = gen_reg_rtx (Pmode); insn = gen_rtx_SET (VOIDmode, reg, tp); insn = emit_insn (insn); return reg; } /* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is false if we expect this to be used for a memory address and true if we expect to load the address into a register. */ static rtx legitimize_tls_address (rtx x, enum tls_model model, int for_mov) { rtx dest, base, off, pic; int type; switch (model) { case TLS_MODEL_GLOBAL_DYNAMIC: dest = gen_reg_rtx (Pmode); if (TARGET_64BIT) { rtx rax = gen_rtx_REG (Pmode, 0), insns; start_sequence (); emit_call_insn (gen_tls_global_dynamic_64 (rax, x)); insns = get_insns (); end_sequence (); emit_libcall_block (insns, dest, rax, x); } else emit_insn (gen_tls_global_dynamic_32 (dest, x)); break; case TLS_MODEL_LOCAL_DYNAMIC: base = gen_reg_rtx (Pmode); if (TARGET_64BIT) { rtx rax = gen_rtx_REG (Pmode, 0), insns, note; start_sequence (); emit_call_insn (gen_tls_local_dynamic_base_64 (rax)); insns = get_insns (); end_sequence (); note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL); note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note); emit_libcall_block (insns, base, rax, note); } else emit_insn (gen_tls_local_dynamic_base_32 (base)); off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF); off = gen_rtx_CONST (Pmode, off); return gen_rtx_PLUS (Pmode, base, off); case TLS_MODEL_INITIAL_EXEC: if (TARGET_64BIT) { pic = NULL; type = UNSPEC_GOTNTPOFF; } else if (flag_pic) { if (reload_in_progress) regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1; pic = pic_offset_table_rtx; type = TARGET_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF; } else if (!TARGET_GNU_TLS) { pic = gen_reg_rtx (Pmode); emit_insn (gen_set_got (pic)); type = UNSPEC_GOTTPOFF; } else { pic = NULL; type = UNSPEC_INDNTPOFF; } off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type); off = gen_rtx_CONST (Pmode, off); if (pic) off = gen_rtx_PLUS (Pmode, pic, off); off = gen_rtx_MEM (Pmode, off); RTX_UNCHANGING_P (off) = 1; set_mem_alias_set (off, ix86_GOT_alias_set ()); if (TARGET_64BIT || TARGET_GNU_TLS) { base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS); off = force_reg (Pmode, off); return gen_rtx_PLUS (Pmode, base, off); } else { base = get_thread_pointer (true); dest = gen_reg_rtx (Pmode); emit_insn (gen_subsi3 (dest, base, off)); } break; case TLS_MODEL_LOCAL_EXEC: off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), (TARGET_64BIT || TARGET_GNU_TLS) ? UNSPEC_NTPOFF : UNSPEC_TPOFF); off = gen_rtx_CONST (Pmode, off); if (TARGET_64BIT || TARGET_GNU_TLS) { base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS); return gen_rtx_PLUS (Pmode, base, off); } else { base = get_thread_pointer (true); dest = gen_reg_rtx (Pmode); emit_insn (gen_subsi3 (dest, base, off)); } break; default: abort (); } return dest; } /* Try machine-dependent ways of modifying an illegitimate address to be legitimate. If we find one, return the new, valid address. This macro is used in only one place: `memory_address' in explow.c. OLDX is the address as it was before break_out_memory_refs was called. In some cases it is useful to look at this to decide what needs to be done. MODE and WIN are passed so that this macro can use GO_IF_LEGITIMATE_ADDRESS. It is always safe for this macro to do nothing. It exists to recognize opportunities to optimize the output. For the 80386, we handle X+REG by loading X into a register R and using R+REG. R will go in a general reg and indexing will be used. However, if REG is a broken-out memory address or multiplication, nothing needs to be done because REG can certainly go in a general reg. When -fpic is used, special handling is needed for symbolic references. See comments by legitimize_pic_address in i386.c for details. */ rtx legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode) { int changed = 0; unsigned log; if (TARGET_DEBUG_ADDR) { fprintf (stderr, "\n==========\nLEGITIMIZE_ADDRESS, mode = %s\n", GET_MODE_NAME (mode)); debug_rtx (x); } log = tls_symbolic_operand (x, mode); if (log) return legitimize_tls_address (x, log, false); if (flag_pic && SYMBOLIC_CONST (x)) return legitimize_pic_address (x, 0); /* Canonicalize shifts by 0, 1, 2, 3 into multiply */ if (GET_CODE (x) == ASHIFT && GET_CODE (XEXP (x, 1)) == CONST_INT && (log = (unsigned) exact_log2 (INTVAL (XEXP (x, 1)))) < 4) { changed = 1; x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)), GEN_INT (1 << log)); } if (GET_CODE (x) == PLUS) { /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */ if (GET_CODE (XEXP (x, 0)) == ASHIFT && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT && (log = (unsigned) exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1)))) < 4) { changed = 1; XEXP (x, 0) = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (XEXP (x, 0), 0)), GEN_INT (1 << log)); } if (GET_CODE (XEXP (x, 1)) == ASHIFT && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT && (log = (unsigned) exact_log2 (INTVAL (XEXP (XEXP (x, 1), 1)))) < 4) { changed = 1; XEXP (x, 1) = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (XEXP (x, 1), 0)), GEN_INT (1 << log)); } /* Put multiply first if it isn't already. */ if (GET_CODE (XEXP (x, 1)) == MULT) { rtx tmp = XEXP (x, 0); XEXP (x, 0) = XEXP (x, 1); XEXP (x, 1) = tmp; changed = 1; } /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const))) into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be created by virtual register instantiation, register elimination, and similar optimizations. */ if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS) { changed = 1; x = gen_rtx_PLUS (Pmode, gen_rtx_PLUS (Pmode, XEXP (x, 0), XEXP (XEXP (x, 1), 0)), XEXP (XEXP (x, 1), 1)); } /* Canonicalize (plus (plus (mult (reg) (const)) (plus (reg) (const))) const) into (plus (plus (mult (reg) (const)) (reg)) (const)). */ else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS && CONSTANT_P (XEXP (x, 1))) { rtx constant; rtx other = NULL_RTX; if (GET_CODE (XEXP (x, 1)) == CONST_INT) { constant = XEXP (x, 1); other = XEXP (XEXP (XEXP (x, 0), 1), 1); } else if (GET_CODE (XEXP (XEXP (XEXP (x, 0), 1), 1)) == CONST_INT) { constant = XEXP (XEXP (XEXP (x, 0), 1), 1); other = XEXP (x, 1); } else constant = 0; if (constant) { changed = 1; x = gen_rtx_PLUS (Pmode, gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0), XEXP (XEXP (XEXP (x, 0), 1), 0)), plus_constant (other, INTVAL (constant))); } } if (changed && legitimate_address_p (mode, x, FALSE)) return x; if (GET_CODE (XEXP (x, 0)) == MULT) { changed = 1; XEXP (x, 0) = force_operand (XEXP (x, 0), 0); } if (GET_CODE (XEXP (x, 1)) == MULT) { changed = 1; XEXP (x, 1) = force_operand (XEXP (x, 1), 0); } if (changed && GET_CODE (XEXP (x, 1)) == REG && GET_CODE (XEXP (x, 0)) == REG) return x; if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1))) { changed = 1; x = legitimize_pic_address (x, 0); } if (changed && legitimate_address_p (mode, x, FALSE)) return x; if (GET_CODE (XEXP (x, 0)) == REG) { rtx temp = gen_reg_rtx (Pmode); rtx val = force_operand (XEXP (x, 1), temp); if (val != temp) emit_move_insn (temp, val); XEXP (x, 1) = temp; return x; } else if (GET_CODE (XEXP (x, 1)) == REG) { rtx temp = gen_reg_rtx (Pmode); rtx val = force_operand (XEXP (x, 0), temp); if (val != temp) emit_move_insn (temp, val); XEXP (x, 0) = temp; return x; } } return x; } /* Print an integer constant expression in assembler syntax. Addition and subtraction are the only arithmetic that may appear in these expressions. FILE is the stdio stream to write to, X is the rtx, and CODE is the operand print code from the output string. */ static void output_pic_addr_const (FILE *file, rtx x, int code) { char buf[256]; switch (GET_CODE (x)) { case PC: if (flag_pic) putc ('.', file); else abort (); break; case SYMBOL_REF: /* Mark the decl as referenced so that cgraph will output the function. */ if (SYMBOL_REF_DECL (x)) mark_decl_referenced (SYMBOL_REF_DECL (x)); assemble_name (file, XSTR (x, 0)); if (!TARGET_MACHO && code == 'P' && ! SYMBOL_REF_LOCAL_P (x)) fputs ("@PLT", file); break; case LABEL_REF: x = XEXP (x, 0); /* FALLTHRU */ case CODE_LABEL: ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x)); assemble_name (asm_out_file, buf); break; case CONST_INT: fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x)); break; case CONST: /* This used to output parentheses around the expression, but that does not work on the 386 (either ATT or BSD assembler). */ output_pic_addr_const (file, XEXP (x, 0), code); break; case CONST_DOUBLE: if (GET_MODE (x) == VOIDmode) { /* We can use %d if the number is <32 bits and positive. */ if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0) fprintf (file, "0x%lx%08lx", (unsigned long) CONST_DOUBLE_HIGH (x), (unsigned long) CONST_DOUBLE_LOW (x)); else fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x)); } else /* We can't handle floating point constants; PRINT_OPERAND must handle them. */ output_operand_lossage ("floating constant misused"); break; case PLUS: /* Some assemblers need integer constants to appear first. */ if (GET_CODE (XEXP (x, 0)) == CONST_INT) { output_pic_addr_const (file, XEXP (x, 0), code); putc ('+', file); output_pic_addr_const (file, XEXP (x, 1), code); } else if (GET_CODE (XEXP (x, 1)) == CONST_INT) { output_pic_addr_const (file, XEXP (x, 1), code); putc ('+', file); output_pic_addr_const (file, XEXP (x, 0), code); } else abort (); break; case MINUS: if (!TARGET_MACHO) putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file); output_pic_addr_const (file, XEXP (x, 0), code); putc ('-', file); output_pic_addr_const (file, XEXP (x, 1), code); if (!TARGET_MACHO) putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file); break; case UNSPEC: if (XVECLEN (x, 0) != 1) abort (); output_pic_addr_const (file, XVECEXP (x, 0, 0), code); switch (XINT (x, 1)) { case UNSPEC_GOT: fputs ("@GOT", file); break; case UNSPEC_GOTOFF: fputs ("@GOTOFF", file); break; case UNSPEC_GOTPCREL: fputs ("@GOTPCREL(%rip)", file); break; case UNSPEC_GOTTPOFF: /* FIXME: This might be @TPOFF in Sun ld too. */ fputs ("@GOTTPOFF", file); break; case UNSPEC_TPOFF: fputs ("@TPOFF", file); break; case UNSPEC_NTPOFF: if (TARGET_64BIT) fputs ("@TPOFF", file); else fputs ("@NTPOFF", file); break; case UNSPEC_DTPOFF: fputs ("@DTPOFF", file); break; case UNSPEC_GOTNTPOFF: if (TARGET_64BIT) fputs ("@GOTTPOFF(%rip)", file); else fputs ("@GOTNTPOFF", file); break; case UNSPEC_INDNTPOFF: fputs ("@INDNTPOFF", file); break; default: output_operand_lossage ("invalid UNSPEC as operand"); break; } break; default: output_operand_lossage ("invalid expression as operand"); } } /* This is called from dwarfout.c via ASM_OUTPUT_DWARF_ADDR_CONST. We need to handle our special PIC relocations. */ void i386_dwarf_output_addr_const (FILE *file, rtx x) { #ifdef ASM_QUAD fprintf (file, "%s", TARGET_64BIT ? ASM_QUAD : ASM_LONG); #else if (TARGET_64BIT) abort (); fprintf (file, "%s", ASM_LONG); #endif if (flag_pic) output_pic_addr_const (file, x, '\0'); else output_addr_const (file, x); fputc ('\n', file); } /* This is called from dwarf2out.c via ASM_OUTPUT_DWARF_DTPREL. We need to emit DTP-relative relocations. */ void i386_output_dwarf_dtprel (FILE *file, int size, rtx x) { fputs (ASM_LONG, file); output_addr_const (file, x); fputs ("@DTPOFF", file); switch (size) { case 4: break; case 8: fputs (", 0", file); break; default: abort (); } } /* In the name of slightly smaller debug output, and to cater to general assembler losage, recognize PIC+GOTOFF and turn it back into a direct symbol reference. */ static rtx ix86_delegitimize_address (rtx orig_x) { rtx x = orig_x, y; if (GET_CODE (x) == MEM) x = XEXP (x, 0); if (TARGET_64BIT) { if (GET_CODE (x) != CONST || GET_CODE (XEXP (x, 0)) != UNSPEC || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL || GET_CODE (orig_x) != MEM) return orig_x; return XVECEXP (XEXP (x, 0), 0, 0); } if (GET_CODE (x) != PLUS || GET_CODE (XEXP (x, 1)) != CONST) return orig_x; if (GET_CODE (XEXP (x, 0)) == REG && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM) /* %ebx + GOT/GOTOFF */ y = NULL; else if (GET_CODE (XEXP (x, 0)) == PLUS) { /* %ebx + %reg * scale + GOT/GOTOFF */ y = XEXP (x, 0); if (GET_CODE (XEXP (y, 0)) == REG && REGNO (XEXP (y, 0)) == PIC_OFFSET_TABLE_REGNUM) y = XEXP (y, 1); else if (GET_CODE (XEXP (y, 1)) == REG && REGNO (XEXP (y, 1)) == PIC_OFFSET_TABLE_REGNUM) y = XEXP (y, 0); else return orig_x; if (GET_CODE (y) != REG && GET_CODE (y) != MULT && GET_CODE (y) != ASHIFT) return orig_x; } else return orig_x; x = XEXP (XEXP (x, 1), 0); if (GET_CODE (x) == UNSPEC && ((XINT (x, 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM) || (XINT (x, 1) == UNSPEC_GOTOFF && GET_CODE (orig_x) != MEM))) { if (y) return gen_rtx_PLUS (Pmode, y, XVECEXP (x, 0, 0)); return XVECEXP (x, 0, 0); } if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == UNSPEC && GET_CODE (XEXP (x, 1)) == CONST_INT && ((XINT (XEXP (x, 0), 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM) || (XINT (XEXP (x, 0), 1) == UNSPEC_GOTOFF && GET_CODE (orig_x) != MEM))) { x = gen_rtx_PLUS (VOIDmode, XVECEXP (XEXP (x, 0), 0, 0), XEXP (x, 1)); if (y) return gen_rtx_PLUS (Pmode, y, x); return x; } return orig_x; } static void put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse, int fp, FILE *file) { const char *suffix; if (mode == CCFPmode || mode == CCFPUmode) { enum rtx_code second_code, bypass_code; ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code); if (bypass_code != NIL || second_code != NIL) abort (); code = ix86_fp_compare_code_to_integer (code); mode = CCmode; } if (reverse) code = reverse_condition (code); switch (code) { case EQ: suffix = "e"; break; case NE: suffix = "ne"; break; case GT: if (mode != CCmode && mode != CCNOmode && mode != CCGCmode) abort (); suffix = "g"; break; case GTU: /* ??? Use "nbe" instead of "a" for fcmov losage on some assemblers. Those same assemblers have the same but opposite losage on cmov. */ if (mode != CCmode) abort (); suffix = fp ? "nbe" : "a"; break; case LT: if (mode == CCNOmode || mode == CCGOCmode) suffix = "s"; else if (mode == CCmode || mode == CCGCmode) suffix = "l"; else abort (); break; case LTU: if (mode != CCmode) abort (); suffix = "b"; break; case GE: if (mode == CCNOmode || mode == CCGOCmode) suffix = "ns"; else if (mode == CCmode || mode == CCGCmode) suffix = "ge"; else abort (); break; case GEU: /* ??? As above. */ if (mode != CCmode) abort (); suffix = fp ? "nb" : "ae"; break; case LE: if (mode != CCmode && mode != CCGCmode && mode != CCNOmode) abort (); suffix = "le"; break; case LEU: if (mode != CCmode) abort (); suffix = "be"; break; case UNORDERED: suffix = fp ? "u" : "p"; break; case ORDERED: suffix = fp ? "nu" : "np"; break; default: abort (); } fputs (suffix, file); } /* Print the name of register X to FILE based on its machine mode and number. If CODE is 'w', pretend the mode is HImode. If CODE is 'b', pretend the mode is QImode. If CODE is 'k', pretend the mode is SImode. If CODE is 'q', pretend the mode is DImode. If CODE is 'h', pretend the reg is the `high' byte register. If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op. */ void print_reg (rtx x, int code, FILE *file) { if (REGNO (x) == ARG_POINTER_REGNUM || REGNO (x) == FRAME_POINTER_REGNUM || REGNO (x) == FLAGS_REG || REGNO (x) == FPSR_REG) abort (); if (ASSEMBLER_DIALECT == ASM_ATT || USER_LABEL_PREFIX[0] == 0) putc ('%', file); if (code == 'w' || MMX_REG_P (x)) code = 2; else if (code == 'b') code = 1; else if (code == 'k') code = 4; else if (code == 'q') code = 8; else if (code == 'y') code = 3; else if (code == 'h') code = 0; else code = GET_MODE_SIZE (GET_MODE (x)); /* Irritatingly, AMD extended registers use different naming convention from the normal registers. */ if (REX_INT_REG_P (x)) { if (!TARGET_64BIT) abort (); switch (code) { case 0: error ("extended registers have no high halves"); break; case 1: fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8); break; case 2: fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8); break; case 4: fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8); break; case 8: fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8); break; default: error ("unsupported operand size for extended register"); break; } return; } switch (code) { case 3: if (STACK_TOP_P (x)) { fputs ("st(0)", file); break; } /* FALLTHRU */ case 8: case 4: case 12: if (! ANY_FP_REG_P (x)) putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file); /* FALLTHRU */ case 16: case 2: normal: fputs (hi_reg_name[REGNO (x)], file); break; case 1: if (REGNO (x) >= ARRAY_SIZE (qi_reg_name)) goto normal; fputs (qi_reg_name[REGNO (x)], file); break; case 0: if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name)) goto normal; fputs (qi_high_reg_name[REGNO (x)], file); break; default: abort (); } } /* Locate some local-dynamic symbol still in use by this function so that we can print its name in some tls_local_dynamic_base pattern. */ static const char * get_some_local_dynamic_name (void) { rtx insn; if (cfun->machine->some_ld_name) return cfun->machine->some_ld_name; for (insn = get_insns (); insn ; insn = NEXT_INSN (insn)) if (INSN_P (insn) && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0)) return cfun->machine->some_ld_name; abort (); } static int get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED) { rtx x = *px; if (GET_CODE (x) == SYMBOL_REF && local_dynamic_symbolic_operand (x, Pmode)) { cfun->machine->some_ld_name = XSTR (x, 0); return 1; } return 0; } /* Meaning of CODE: L,W,B,Q,S,T -- print the opcode suffix for specified size of operand. C -- print opcode suffix for set/cmov insn. c -- like C, but print reversed condition F,f -- likewise, but for floating-point. O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.", otherwise nothing R -- print the prefix for register names. z -- print the opcode suffix for the size of the current operand. * -- print a star (in certain assembler syntax) A -- print an absolute memory reference. w -- print the operand as if it's a "word" (HImode) even if it isn't. s -- print a shift double count, followed by the assemblers argument delimiter. b -- print the QImode name of the register for the indicated operand. %b0 would print %al if operands[0] is reg 0. w -- likewise, print the HImode name of the register. k -- likewise, print the SImode name of the register. q -- likewise, print the DImode name of the register. h -- print the QImode name for a "high" register, either ah, bh, ch or dh. y -- print "st(0)" instead of "st" as a register. D -- print condition for SSE cmp instruction. P -- if PIC, print an @PLT suffix. X -- don't print any sort of PIC '@' suffix for a symbol. & -- print some in-use local-dynamic symbol name. */ void print_operand (FILE *file, rtx x, int code) { if (code) { switch (code) { case '*': if (ASSEMBLER_DIALECT == ASM_ATT) putc ('*', file); return; case '&': assemble_name (file, get_some_local_dynamic_name ()); return; case 'A': if (ASSEMBLER_DIALECT == ASM_ATT) putc ('*', file); else if (ASSEMBLER_DIALECT == ASM_INTEL) { /* Intel syntax. For absolute addresses, registers should not be surrounded by braces. */ if (GET_CODE (x) != REG) { putc ('[', file); PRINT_OPERAND (file, x, 0); putc (']', file); return; } } else abort (); PRINT_OPERAND (file, x, 0); return; case 'L': if (ASSEMBLER_DIALECT == ASM_ATT) putc ('l', file); return; case 'W': if (ASSEMBLER_DIALECT == ASM_ATT) putc ('w', file); return; case 'B': if (ASSEMBLER_DIALECT == ASM_ATT) putc ('b', file); return; case 'Q': if (ASSEMBLER_DIALECT == ASM_ATT) putc ('l', file); return; case 'S': if (ASSEMBLER_DIALECT == ASM_ATT) putc ('s', file); return; case 'T': if (ASSEMBLER_DIALECT == ASM_ATT) putc ('t', file); return; case 'z': /* 387 opcodes don't get size suffixes if the operands are registers. */ if (STACK_REG_P (x)) return; /* Likewise if using Intel opcodes. */ if (ASSEMBLER_DIALECT == ASM_INTEL) return; /* This is the size of op from size of operand. */ switch (GET_MODE_SIZE (GET_MODE (x))) { case 2: #ifdef HAVE_GAS_FILDS_FISTS putc ('s', file); #endif return; case 4: if (GET_MODE (x) == SFmode) { putc ('s', file); return; } else putc ('l', file); return; case 12: case 16: putc ('t', file); return; case 8: if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT) { #ifdef GAS_MNEMONICS putc ('q', file); #else putc ('l', file); putc ('l', file); #endif } else putc ('l', file); return; default: abort (); } case 'b': case 'w': case 'k': case 'q': case 'h': case 'y': case 'X': case 'P': break; case 's': if (GET_CODE (x) == CONST_INT || ! SHIFT_DOUBLE_OMITS_COUNT) { PRINT_OPERAND (file, x, 0); putc (',', file); } return; case 'D': /* Little bit of braindamage here. The SSE compare instructions does use completely different names for the comparisons that the fp conditional moves. */ switch (GET_CODE (x)) { case EQ: case UNEQ: fputs ("eq", file); break; case LT: case UNLT: fputs ("lt", file); break; case LE: case UNLE: fputs ("le", file); break; case UNORDERED: fputs ("unord", file); break; case NE: case LTGT: fputs ("neq", file); break; case UNGE: case GE: fputs ("nlt", file); break; case UNGT: case GT: fputs ("nle", file); break; case ORDERED: fputs ("ord", file); break; default: abort (); break; } return; case 'O': #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX if (ASSEMBLER_DIALECT == ASM_ATT) { switch (GET_MODE (x)) { case HImode: putc ('w', file); break; case SImode: case SFmode: putc ('l', file); break; case DImode: case DFmode: putc ('q', file); break; default: abort (); } putc ('.', file); } #endif return; case 'C': put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file); return; case 'F': #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX if (ASSEMBLER_DIALECT == ASM_ATT) putc ('.', file); #endif put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file); return; /* Like above, but reverse condition */ case 'c': /* Check to see if argument to %c is really a constant and not a condition code which needs to be reversed. */ if (!COMPARISON_P (x)) { output_operand_lossage ("operand is neither a constant nor a condition code, invalid operand code 'c'"); return; } put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file); return; case 'f': #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX if (ASSEMBLER_DIALECT == ASM_ATT) putc ('.', file); #endif put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file); return; case '+': { rtx x; if (!optimize || optimize_size || !TARGET_BRANCH_PREDICTION_HINTS) return; x = find_reg_note (current_output_insn, REG_BR_PROB, 0); if (x) { int pred_val = INTVAL (XEXP (x, 0)); if (pred_val < REG_BR_PROB_BASE * 45 / 100 || pred_val > REG_BR_PROB_BASE * 55 / 100) { int taken = pred_val > REG_BR_PROB_BASE / 2; int cputaken = final_forward_branch_p (current_output_insn) == 0; /* Emit hints only in the case default branch prediction heuristics would fail. */ if (taken != cputaken) { /* We use 3e (DS) prefix for taken branches and 2e (CS) prefix for not taken branches. */ if (taken) fputs ("ds ; ", file); else fputs ("cs ; ", file); } } } return; } default: output_operand_lossage ("invalid operand code `%c'", code); } } if (GET_CODE (x) == REG) print_reg (x, code, file); else if (GET_CODE (x) == MEM) { /* No `byte ptr' prefix for call instructions. */ if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P') { const char * size; switch (GET_MODE_SIZE (GET_MODE (x))) { case 1: size = "BYTE"; break; case 2: size = "WORD"; break; case 4: size = "DWORD"; break; case 8: size = "QWORD"; break; case 12: size = "XWORD"; break; case 16: size = "XMMWORD"; break; default: abort (); } /* Check for explicit size override (codes 'b', 'w' and 'k') */ if (code == 'b') size = "BYTE"; else if (code == 'w') size = "WORD"; else if (code == 'k') size = "DWORD"; fputs (size, file); fputs (" PTR ", file); } x = XEXP (x, 0); /* Avoid (%rip) for call operands. */ if (CONSTANT_ADDRESS_P (x) && code == 'P' && GET_CODE (x) != CONST_INT) output_addr_const (file, x); else if (this_is_asm_operands && ! address_operand (x, VOIDmode)) output_operand_lossage ("invalid constraints for operand"); else output_address (x); } else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode) { REAL_VALUE_TYPE r; long l; REAL_VALUE_FROM_CONST_DOUBLE (r, x); REAL_VALUE_TO_TARGET_SINGLE (r, l); if (ASSEMBLER_DIALECT == ASM_ATT) putc ('$', file); fprintf (file, "0x%08lx", l); } /* These float cases don't actually occur as immediate operands. */ else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode) { char dstr[30]; real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1); fprintf (file, "%s", dstr); } else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == XFmode) { char dstr[30]; real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1); fprintf (file, "%s", dstr); } else { if (code != 'P') { if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE) { if (ASSEMBLER_DIALECT == ASM_ATT) putc ('$', file); } else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF) { if (ASSEMBLER_DIALECT == ASM_ATT) putc ('$', file); else fputs ("OFFSET FLAT:", file); } } if (GET_CODE (x) == CONST_INT) fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x)); else if (flag_pic) output_pic_addr_const (file, x, code); else output_addr_const (file, x); } } /* Print a memory operand whose address is ADDR. */ void print_operand_address (FILE *file, rtx addr) { struct ix86_address parts; rtx base, index, disp; int scale; if (! ix86_decompose_address (addr, &parts)) abort (); base = parts.base; index = parts.index; disp = parts.disp; scale = parts.scale; switch (parts.seg) { case SEG_DEFAULT: break; case SEG_FS: case SEG_GS: if (USER_LABEL_PREFIX[0] == 0) putc ('%', file); fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file); break; default: abort (); } if (!base && !index) { /* Displacement only requires special attention. */ if (GET_CODE (disp) == CONST_INT) { if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT) { if (USER_LABEL_PREFIX[0] == 0) putc ('%', file); fputs ("ds:", file); } fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp)); } else if (flag_pic) output_pic_addr_const (file, disp, 0); else output_addr_const (file, disp); /* Use one byte shorter RIP relative addressing for 64bit mode. */ if (TARGET_64BIT && ((GET_CODE (disp) == SYMBOL_REF && ! tls_symbolic_operand (disp, GET_MODE (disp))) || GET_CODE (disp) == LABEL_REF || (GET_CODE (disp) == CONST && GET_CODE (XEXP (disp, 0)) == PLUS && (GET_CODE (XEXP (XEXP (disp, 0), 0)) == SYMBOL_REF || GET_CODE (XEXP (XEXP (disp, 0), 0)) == LABEL_REF) && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT))) fputs ("(%rip)", file); } else { if (ASSEMBLER_DIALECT == ASM_ATT) { if (disp) { if (flag_pic) output_pic_addr_const (file, disp, 0); else if (GET_CODE (disp) == LABEL_REF) output_asm_label (disp); else output_addr_const (file, disp); } putc ('(', file); if (base) print_reg (base, 0, file); if (index) { putc (',', file); print_reg (index, 0, file); if (scale != 1) fprintf (file, ",%d", scale); } putc (')', file); } else { rtx offset = NULL_RTX; if (disp) { /* Pull out the offset of a symbol; print any symbol itself. */ if (GET_CODE (disp) == CONST && GET_CODE (XEXP (disp, 0)) == PLUS && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT) { offset = XEXP (XEXP (disp, 0), 1); disp = gen_rtx_CONST (VOIDmode, XEXP (XEXP (disp, 0), 0)); } if (flag_pic) output_pic_addr_const (file, disp, 0); else if (GET_CODE (disp) == LABEL_REF) output_asm_label (disp); else if (GET_CODE (disp) == CONST_INT) offset = disp; else output_addr_const (file, disp); } putc ('[', file); if (base) { print_reg (base, 0, file); if (offset) { if (INTVAL (offset) >= 0) putc ('+', file); fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset)); } } else if (offset) fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset)); else putc ('0', file); if (index) { putc ('+', file); print_reg (index, 0, file); if (scale != 1) fprintf (file, "*%d", scale); } putc (']', file); } } } bool output_addr_const_extra (FILE *file, rtx x) { rtx op; if (GET_CODE (x) != UNSPEC) return false; op = XVECEXP (x, 0, 0); switch (XINT (x, 1)) { case UNSPEC_GOTTPOFF: output_addr_const (file, op); /* FIXME: This might be @TPOFF in Sun ld. */ fputs ("@GOTTPOFF", file); break; case UNSPEC_TPOFF: output_addr_const (file, op); fputs ("@TPOFF", file); break; case UNSPEC_NTPOFF: output_addr_const (file, op); if (TARGET_64BIT) fputs ("@TPOFF", file); else fputs ("@NTPOFF", file); break; case UNSPEC_DTPOFF: output_addr_const (file, op); fputs ("@DTPOFF", file); break; case UNSPEC_GOTNTPOFF: output_addr_const (file, op); if (TARGET_64BIT) fputs ("@GOTTPOFF(%rip)", file); else fputs ("@GOTNTPOFF", file); break; case UNSPEC_INDNTPOFF: output_addr_const (file, op); fputs ("@INDNTPOFF", file); break; default: return false; } return true; } /* Split one or more DImode RTL references into pairs of SImode references. The RTL can be REG, offsettable MEM, integer constant, or CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to split and "num" is its length. lo_half and hi_half are output arrays that parallel "operands". */ void split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[]) { while (num--) { rtx op = operands[num]; /* simplify_subreg refuse to split volatile memory addresses, but we still have to handle it. */ if (GET_CODE (op) == MEM) { lo_half[num] = adjust_address (op, SImode, 0); hi_half[num] = adjust_address (op, SImode, 4); } else { lo_half[num] = simplify_gen_subreg (SImode, op, GET_MODE (op) == VOIDmode ? DImode : GET_MODE (op), 0); hi_half[num] = simplify_gen_subreg (SImode, op, GET_MODE (op) == VOIDmode ? DImode : GET_MODE (op), 4); } } } /* Split one or more TImode RTL references into pairs of SImode references. The RTL can be REG, offsettable MEM, integer constant, or CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to split and "num" is its length. lo_half and hi_half are output arrays that parallel "operands". */ void split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[]) { while (num--) { rtx op = operands[num]; /* simplify_subreg refuse to split volatile memory addresses, but we still have to handle it. */ if (GET_CODE (op) == MEM) { lo_half[num] = adjust_address (op, DImode, 0); hi_half[num] = adjust_address (op, DImode, 8); } else { lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0); hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8); } } } /* Output code to perform a 387 binary operation in INSN, one of PLUS, MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3] is the expression of the binary operation. The output may either be emitted here, or returned to the caller, like all output_* functions. There is no guarantee that the operands are the same mode, as they might be within FLOAT or FLOAT_EXTEND expressions. */ #ifndef SYSV386_COMPAT /* Set to 1 for compatibility with brain-damaged assemblers. No-one wants to fix the assemblers because that causes incompatibility with gcc. No-one wants to fix gcc because that causes incompatibility with assemblers... You can use the option of -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */ #define SYSV386_COMPAT 1 #endif const char * output_387_binary_op (rtx insn, rtx *operands) { static char buf[30]; const char *p; const char *ssep; int is_sse = SSE_REG_P (operands[0]) | SSE_REG_P (operands[1]) | SSE_REG_P (operands[2]); #ifdef ENABLE_CHECKING /* Even if we do not want to check the inputs, this documents input constraints. Which helps in understanding the following code. */ if (STACK_REG_P (operands[0]) && ((REG_P (operands[1]) && REGNO (operands[0]) == REGNO (operands[1]) && (STACK_REG_P (operands[2]) || GET_CODE (operands[2]) == MEM)) || (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]) && (STACK_REG_P (operands[1]) || GET_CODE (operands[1]) == MEM))) && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2]))) ; /* ok */ else if (!is_sse) abort (); #endif switch (GET_CODE (operands[3])) { case PLUS: if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT) p = "fiadd"; else p = "fadd"; ssep = "add"; break; case MINUS: if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT) p = "fisub"; else p = "fsub"; ssep = "sub"; break; case MULT: if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT) p = "fimul"; else p = "fmul"; ssep = "mul"; break; case DIV: if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT) p = "fidiv"; else p = "fdiv"; ssep = "div"; break; default: abort (); } if (is_sse) { strcpy (buf, ssep); if (GET_MODE (operands[0]) == SFmode) strcat (buf, "ss\t{%2, %0|%0, %2}"); else strcat (buf, "sd\t{%2, %0|%0, %2}"); return buf; } strcpy (buf, p); switch (GET_CODE (operands[3])) { case MULT: case PLUS: if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2])) { rtx temp = operands[2]; operands[2] = operands[1]; operands[1] = temp; } /* know operands[0] == operands[1]. */ if (GET_CODE (operands[2]) == MEM) { p = "%z2\t%2"; break; } if (find_regno_note (insn, REG_DEAD, REGNO (operands[2]))) { if (STACK_TOP_P (operands[0])) /* How is it that we are storing to a dead operand[2]? Well, presumably operands[1] is dead too. We can't store the result to st(0) as st(0) gets popped on this instruction. Instead store to operands[2] (which I think has to be st(1)). st(1) will be popped later. gcc <= 2.8.1 didn't have this check and generated assembly code that the Unixware assembler rejected. */ p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */ else p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */ break; } if (STACK_TOP_P (operands[0])) p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */ else p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */ break; case MINUS: case DIV: if (GET_CODE (operands[1]) == MEM) { p = "r%z1\t%1"; break; } if (GET_CODE (operands[2]) == MEM) { p = "%z2\t%2"; break; } if (find_regno_note (insn, REG_DEAD, REGNO (operands[2]))) { #if SYSV386_COMPAT /* The SystemV/386 SVR3.2 assembler, and probably all AT&T derived assemblers, confusingly reverse the direction of the operation for fsub{r} and fdiv{r} when the destination register is not st(0). The Intel assembler doesn't have this brain damage. Read !SYSV386_COMPAT to figure out what the hardware really does. */ if (STACK_TOP_P (operands[0])) p = "{p\t%0, %2|rp\t%2, %0}"; else p = "{rp\t%2, %0|p\t%0, %2}"; #else if (STACK_TOP_P (operands[0])) /* As above for fmul/fadd, we can't store to st(0). */ p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */ else p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */ #endif break; } if (find_regno_note (insn, REG_DEAD, REGNO (operands[1]))) { #if SYSV386_COMPAT if (STACK_TOP_P (operands[0])) p = "{rp\t%0, %1|p\t%1, %0}"; else p = "{p\t%1, %0|rp\t%0, %1}"; #else if (STACK_TOP_P (operands[0])) p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */ else p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */ #endif break; } if (STACK_TOP_P (operands[0])) { if (STACK_TOP_P (operands[1])) p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */ else p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */ break; } else if (STACK_TOP_P (operands[1])) { #if SYSV386_COMPAT p = "{\t%1, %0|r\t%0, %1}"; #else p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */ #endif } else { #if SYSV386_COMPAT p = "{r\t%2, %0|\t%0, %2}"; #else p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */ #endif } break; default: abort (); } strcat (buf, p); return buf; } /* Output code to initialize control word copies used by trunc?f?i patterns. NORMAL is set to current control word, while ROUND_DOWN is set to control word rounding downwards. */ void emit_i387_cw_initialization (rtx normal, rtx round_down) { rtx reg = gen_reg_rtx (HImode); emit_insn (gen_x86_fnstcw_1 (normal)); emit_move_insn (reg, normal); if (!TARGET_PARTIAL_REG_STALL && !optimize_size && !TARGET_64BIT) emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc))); else emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0xc00))); emit_move_insn (round_down, reg); } /* Output code for INSN to convert a float to a signed int. OPERANDS are the insn operands. The output may be [HSD]Imode and the input operand may be [SDX]Fmode. */ const char * output_fix_trunc (rtx insn, rtx *operands) { int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0; int dimode_p = GET_MODE (operands[0]) == DImode; /* Jump through a hoop or two for DImode, since the hardware has no non-popping instruction. We used to do this a different way, but that was somewhat fragile and broke with post-reload splitters. */ if (dimode_p && !stack_top_dies) output_asm_insn ("fld\t%y1", operands); if (!STACK_TOP_P (operands[1])) abort (); if (GET_CODE (operands[0]) != MEM) abort (); output_asm_insn ("fldcw\t%3", operands); if (stack_top_dies || dimode_p) output_asm_insn ("fistp%z0\t%0", operands); else output_asm_insn ("fist%z0\t%0", operands); output_asm_insn ("fldcw\t%2", operands); return ""; } /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi should be used and 2 when fnstsw should be used. UNORDERED_P is true when fucom should be used. */ const char * output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p) { int stack_top_dies; rtx cmp_op0 = operands[0]; rtx cmp_op1 = operands[1]; int is_sse = SSE_REG_P (operands[0]) | SSE_REG_P (operands[1]); if (eflags_p == 2) { cmp_op0 = cmp_op1; cmp_op1 = operands[2]; } if (is_sse) { if (GET_MODE (operands[0]) == SFmode) if (unordered_p) return "ucomiss\t{%1, %0|%0, %1}"; else return "comiss\t{%1, %0|%0, %1}"; else if (unordered_p) return "ucomisd\t{%1, %0|%0, %1}"; else return "comisd\t{%1, %0|%0, %1}"; } if (! STACK_TOP_P (cmp_op0)) abort (); stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0; if (STACK_REG_P (cmp_op1) && stack_top_dies && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1)) && REGNO (cmp_op1) != FIRST_STACK_REG) { /* If both the top of the 387 stack dies, and the other operand is also a stack register that dies, then this must be a `fcompp' float compare */ if (eflags_p == 1) { /* There is no double popping fcomi variant. Fortunately, eflags is immune from the fstp's cc clobbering. */ if (unordered_p) output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands); else output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands); return "fstp\t%y0"; } else { if (eflags_p == 2) { if (unordered_p) return "fucompp\n\tfnstsw\t%0"; else return "fcompp\n\tfnstsw\t%0"; } else { if (unordered_p) return "fucompp"; else return "fcompp"; } } } else { /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */ static const char * const alt[24] = { "fcom%z1\t%y1", "fcomp%z1\t%y1", "fucom%z1\t%y1", "fucomp%z1\t%y1", "ficom%z1\t%y1", "ficomp%z1\t%y1", NULL, NULL, "fcomi\t{%y1, %0|%0, %y1}", "fcomip\t{%y1, %0|%0, %y1}", "fucomi\t{%y1, %0|%0, %y1}", "fucomip\t{%y1, %0|%0, %y1}", NULL, NULL, NULL, NULL, "fcom%z2\t%y2\n\tfnstsw\t%0", "fcomp%z2\t%y2\n\tfnstsw\t%0", "fucom%z2\t%y2\n\tfnstsw\t%0", "fucomp%z2\t%y2\n\tfnstsw\t%0", "ficom%z2\t%y2\n\tfnstsw\t%0", "ficomp%z2\t%y2\n\tfnstsw\t%0", NULL, NULL }; int mask; const char *ret; mask = eflags_p << 3; mask |= (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT) << 2; mask |= unordered_p << 1; mask |= stack_top_dies; if (mask >= 24) abort (); ret = alt[mask]; if (ret == NULL) abort (); return ret; } } void ix86_output_addr_vec_elt (FILE *file, int value) { const char *directive = ASM_LONG; if (TARGET_64BIT) { #ifdef ASM_QUAD directive = ASM_QUAD; #else abort (); #endif } fprintf (file, "%s%s%d\n", directive, LPREFIX, value); } void ix86_output_addr_diff_elt (FILE *file, int value, int rel) { if (TARGET_64BIT) fprintf (file, "%s%s%d-%s%d\n", ASM_LONG, LPREFIX, value, LPREFIX, rel); else if (HAVE_AS_GOTOFF_IN_DATA) fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value); #if TARGET_MACHO else if (TARGET_MACHO) { fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value); machopic_output_function_base_name (file); fprintf(file, "\n"); } #endif else asm_fprintf (file, "%s%U%s+[.-%s%d]\n", ASM_LONG, GOT_SYMBOL_NAME, LPREFIX, value); } /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate for the target. */ void ix86_expand_clear (rtx dest) { rtx tmp; /* We play register width games, which are only valid after reload. */ if (!reload_completed) abort (); /* Avoid HImode and its attendant prefix byte. */ if (GET_MODE_SIZE (GET_MODE (dest)) < 4) dest = gen_rtx_REG (SImode, REGNO (dest)); tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx); /* This predicate should match that for movsi_xor and movdi_xor_rex64. */ if (reload_completed && (!TARGET_USE_MOV0 || optimize_size)) { rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, 17)); tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob)); } emit_insn (tmp); } /* X is an unchanging MEM. If it is a constant pool reference, return the constant pool rtx, else NULL. */ static rtx maybe_get_pool_constant (rtx x) { x = ix86_delegitimize_address (XEXP (x, 0)); if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x)) return get_pool_constant (x); return NULL_RTX; } void ix86_expand_move (enum machine_mode mode, rtx operands[]) { int strict = (reload_in_progress || reload_completed); rtx op0, op1; enum tls_model model; op0 = operands[0]; op1 = operands[1]; model = tls_symbolic_operand (op1, Pmode); if (model) { op1 = legitimize_tls_address (op1, model, true); op1 = force_operand (op1, op0); if (op1 == op0) return; } if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode)) { #if TARGET_MACHO if (MACHOPIC_PURE) { rtx temp = ((reload_in_progress || ((op0 && GET_CODE (op0) == REG) && mode == Pmode)) ? op0 : gen_reg_rtx (Pmode)); op1 = machopic_indirect_data_reference (op1, temp); op1 = machopic_legitimize_pic_address (op1, mode, temp == op1 ? 0 : temp); } else if (MACHOPIC_INDIRECT) op1 = machopic_indirect_data_reference (op1, 0); if (op0 == op1) return; #else if (GET_CODE (op0) == MEM) op1 = force_reg (Pmode, op1); else { rtx temp = op0; if (GET_CODE (temp) != REG) temp = gen_reg_rtx (Pmode); temp = legitimize_pic_address (op1, temp); if (temp == op0) return; op1 = temp; } #endif /* TARGET_MACHO */ } else { if (GET_CODE (op0) == MEM && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode) || !push_operand (op0, mode)) && GET_CODE (op1) == MEM) op1 = force_reg (mode, op1); if (push_operand (op0, mode) && ! general_no_elim_operand (op1, mode)) op1 = copy_to_mode_reg (mode, op1); /* Force large constants in 64bit compilation into register to get them CSEed. */ if (TARGET_64BIT && mode == DImode && immediate_operand (op1, mode) && !x86_64_zero_extended_value (op1) && !register_operand (op0, mode) && optimize && !reload_completed && !reload_in_progress) op1 = copy_to_mode_reg (mode, op1); if (FLOAT_MODE_P (mode)) { /* If we are loading a floating point constant to a register, force the value to memory now, since we'll get better code out the back end. */ if (strict) ; else if (GET_CODE (op1) == CONST_DOUBLE) { op1 = validize_mem (force_const_mem (mode, op1)); if (!register_operand (op0, mode)) { rtx temp = gen_reg_rtx (mode); emit_insn (gen_rtx_SET (VOIDmode, temp, op1)); emit_move_insn (op0, temp); return; } } } } emit_insn (gen_rtx_SET (VOIDmode, op0, op1)); } void ix86_expand_vector_move (enum machine_mode mode, rtx operands[]) { /* Force constants other than zero into memory. We do not know how the instructions used to build constants modify the upper 64 bits of the register, once we have that information we may be able to handle some of them more efficiently. */ if ((reload_in_progress | reload_completed) == 0 && register_operand (operands[0], mode) && CONSTANT_P (operands[1]) && operands[1] != CONST0_RTX (mode)) operands[1] = validize_mem (force_const_mem (mode, operands[1])); /* Make operand1 a register if it isn't already. */ if (!no_new_pseudos && !register_operand (operands[0], mode) && !register_operand (operands[1], mode)) { rtx temp = force_reg (GET_MODE (operands[1]), operands[1]); emit_move_insn (operands[0], temp); return; } emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1])); } /* Attempt to expand a binary operator. Make the expansion closer to the actual machine, then just general_operand, which will allow 3 separate memory references (one output, two input) in a single insn. */ void ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode, rtx operands[]) { int matching_memory; rtx src1, src2, dst, op, clob; dst = operands[0]; src1 = operands[1]; src2 = operands[2]; /* Recognize = for commutative operators */ if (GET_RTX_CLASS (code) == RTX_COMM_ARITH && (rtx_equal_p (dst, src2) || immediate_operand (src1, mode))) { rtx temp = src1; src1 = src2; src2 = temp; } /* If the destination is memory, and we do not have matching source operands, do things in registers. */ matching_memory = 0; if (GET_CODE (dst) == MEM) { if (rtx_equal_p (dst, src1)) matching_memory = 1; else if (GET_RTX_CLASS (code) == RTX_COMM_ARITH && rtx_equal_p (dst, src2)) matching_memory = 2; else dst = gen_reg_rtx (mode); } /* Both source operands cannot be in memory. */ if (GET_CODE (src1) == MEM && GET_CODE (src2) == MEM) { if (matching_memory != 2) src2 = force_reg (mode, src2); else src1 = force_reg (mode, src1); } /* If the operation is not commutable, source 1 cannot be a constant or non-matching memory. */ if ((CONSTANT_P (src1) || (!matching_memory && GET_CODE (src1) == MEM)) && GET_RTX_CLASS (code) != RTX_COMM_ARITH) src1 = force_reg (mode, src1); /* If optimizing, copy to regs to improve CSE */ if (optimize && ! no_new_pseudos) { if (GET_CODE (dst) == MEM) dst = gen_reg_rtx (mode); if (GET_CODE (src1) == MEM) src1 = force_reg (mode, src1); if (GET_CODE (src2) == MEM) src2 = force_reg (mode, src2); } /* Emit the instruction. */ op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2)); if (reload_in_progress) { /* Reload doesn't know about the flags register, and doesn't know that it doesn't want to clobber it. We can only do this with PLUS. */ if (code != PLUS) abort (); emit_insn (op); } else { clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG)); emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob))); } /* Fix up the destination if needed. */ if (dst != operands[0]) emit_move_insn (operands[0], dst); } /* Return TRUE or FALSE depending on whether the binary operator meets the appropriate constraints. */ int ix86_binary_operator_ok (enum rtx_code code, enum machine_mode mode ATTRIBUTE_UNUSED, rtx operands[3]) { /* Both source operands cannot be in memory. */ if (GET_CODE (operands[1]) == MEM && GET_CODE (operands[2]) == MEM) return 0; /* If the operation is not commutable, source 1 cannot be a constant. */ if (CONSTANT_P (operands[1]) && GET_RTX_CLASS (code) != RTX_COMM_ARITH) return 0; /* If the destination is memory, we must have a matching source operand. */ if (GET_CODE (operands[0]) == MEM && ! (rtx_equal_p (operands[0], operands[1]) || (GET_RTX_CLASS (code) == RTX_COMM_ARITH && rtx_equal_p (operands[0], operands[2])))) return 0; /* If the operation is not commutable and the source 1 is memory, we must have a matching destination. */ if (GET_CODE (operands[1]) == MEM && GET_RTX_CLASS (code) != RTX_COMM_ARITH && ! rtx_equal_p (operands[0], operands[1])) return 0; return 1; } /* Attempt to expand a unary operator. Make the expansion closer to the actual machine, then just general_operand, which will allow 2 separate memory references (one output, one input) in a single insn. */ void ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode, rtx operands[]) { int matching_memory; rtx src, dst, op, clob; dst = operands[0]; src = operands[1]; /* If the destination is memory, and we do not have matching source operands, do things in registers. */ matching_memory = 0; if (GET_CODE (dst) == MEM) { if (rtx_equal_p (dst, src)) matching_memory = 1; else dst = gen_reg_rtx (mode); } /* When source operand is memory, destination must match. */ if (!matching_memory && GET_CODE (src) == MEM) src = force_reg (mode, src); /* If optimizing, copy to regs to improve CSE */ if (optimize && ! no_new_pseudos) { if (GET_CODE (dst) == MEM) dst = gen_reg_rtx (mode); if (GET_CODE (src) == MEM) src = force_reg (mode, src); } /* Emit the instruction. */ op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src)); if (reload_in_progress || code == NOT) { /* Reload doesn't know about the flags register, and doesn't know that it doesn't want to clobber it. */ if (code != NOT) abort (); emit_insn (op); } else { clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG)); emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob))); } /* Fix up the destination if needed. */ if (dst != operands[0]) emit_move_insn (operands[0], dst); } /* Return TRUE or FALSE depending on whether the unary operator meets the appropriate constraints. */ int ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED, enum machine_mode mode ATTRIBUTE_UNUSED, rtx operands[2] ATTRIBUTE_UNUSED) { /* If one of operands is memory, source and destination must match. */ if ((GET_CODE (operands[0]) == MEM || GET_CODE (operands[1]) == MEM) && ! rtx_equal_p (operands[0], operands[1])) return FALSE; return TRUE; } /* Return TRUE or FALSE depending on whether the first SET in INSN has source and destination with matching CC modes, and that the CC mode is at least as constrained as REQ_MODE. */ int ix86_match_ccmode (rtx insn, enum machine_mode req_mode) { rtx set; enum machine_mode set_mode; set = PATTERN (insn); if (GET_CODE (set) == PARALLEL) set = XVECEXP (set, 0, 0); if (GET_CODE (set) != SET) abort (); if (GET_CODE (SET_SRC (set)) != COMPARE) abort (); set_mode = GET_MODE (SET_DEST (set)); switch (set_mode) { case CCNOmode: if (req_mode != CCNOmode && (req_mode != CCmode || XEXP (SET_SRC (set), 1) != const0_rtx)) return 0; break; case CCmode: if (req_mode == CCGCmode) return 0; /* FALLTHRU */ case CCGCmode: if (req_mode == CCGOCmode || req_mode == CCNOmode) return 0; /* FALLTHRU */ case CCGOCmode: if (req_mode == CCZmode) return 0; /* FALLTHRU */ case CCZmode: break; default: abort (); } return (GET_MODE (SET_SRC (set)) == set_mode); } /* Generate insn patterns to do an integer compare of OPERANDS. */ static rtx ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1) { enum machine_mode cmpmode; rtx tmp, flags; cmpmode = SELECT_CC_MODE (code, op0, op1); flags = gen_rtx_REG (cmpmode, FLAGS_REG); /* This is very simple, but making the interface the same as in the FP case makes the rest of the code easier. */ tmp = gen_rtx_COMPARE (cmpmode, op0, op1); emit_insn (gen_rtx_SET (VOIDmode, flags, tmp)); /* Return the test that should be put into the flags user, i.e. the bcc, scc, or cmov instruction. */ return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx); } /* Figure out whether to use ordered or unordered fp comparisons. Return the appropriate mode to use. */ enum machine_mode ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED) { /* ??? In order to make all comparisons reversible, we do all comparisons non-trapping when compiling for IEEE. Once gcc is able to distinguish all forms trapping and nontrapping comparisons, we can make inequality comparisons trapping again, since it results in better code when using FCOM based compares. */ return TARGET_IEEE_FP ? CCFPUmode : CCFPmode; } enum machine_mode ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1) { if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT) return ix86_fp_compare_mode (code); switch (code) { /* Only zero flag is needed. */ case EQ: /* ZF=0 */ case NE: /* ZF!=0 */ return CCZmode; /* Codes needing carry flag. */ case GEU: /* CF=0 */ case GTU: /* CF=0 & ZF=0 */ case LTU: /* CF=1 */ case LEU: /* CF=1 | ZF=1 */ return CCmode; /* Codes possibly doable only with sign flag when comparing against zero. */ case GE: /* SF=OF or SF=0 */ case LT: /* SF<>OF or SF=1 */ if (op1 == const0_rtx) return CCGOCmode; else /* For other cases Carry flag is not required. */ return CCGCmode; /* Codes doable only with sign flag when comparing against zero, but we miss jump instruction for it so we need to use relational tests against overflow that thus needs to be zero. */ case GT: /* ZF=0 & SF=OF */ case LE: /* ZF=1 | SF<>OF */ if (op1 == const0_rtx) return CCNOmode; else return CCGCmode; /* strcmp pattern do (use flags) and combine may ask us for proper mode. */ case USE: return CCmode; default: abort (); } } /* Return the fixed registers used for condition codes. */ static bool ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2) { *p1 = FLAGS_REG; *p2 = FPSR_REG; return true; } /* If two condition code modes are compatible, return a condition code mode which is compatible with both. Otherwise, return VOIDmode. */ static enum machine_mode ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2) { if (m1 == m2) return m1; if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC) return VOIDmode; if ((m1 == CCGCmode && m2 == CCGOCmode) || (m1 == CCGOCmode && m2 == CCGCmode)) return CCGCmode; switch (m1) { default: abort (); case CCmode: case CCGCmode: case CCGOCmode: case CCNOmode: case CCZmode: switch (m2) { default: return VOIDmode; case CCmode: case CCGCmode: case CCGOCmode: case CCNOmode: case CCZmode: return CCmode; } case CCFPmode: case CCFPUmode: /* These are only compatible with themselves, which we already checked above. */ return VOIDmode; } } /* Return true if we should use an FCOMI instruction for this fp comparison. */ int ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED) { enum rtx_code swapped_code = swap_condition (code); return ((ix86_fp_comparison_cost (code) == ix86_fp_comparison_fcomi_cost (code)) || (ix86_fp_comparison_cost (swapped_code) == ix86_fp_comparison_fcomi_cost (swapped_code))); } /* Swap, force into registers, or otherwise massage the two operands to a fp comparison. The operands are updated in place; the new comparison code is returned. */ static enum rtx_code ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1) { enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code); rtx op0 = *pop0, op1 = *pop1; enum machine_mode op_mode = GET_MODE (op0); int is_sse = SSE_REG_P (op0) | SSE_REG_P (op1); /* All of the unordered compare instructions only work on registers. The same is true of the XFmode compare instructions. The same is true of the fcomi compare instructions. */ if (!is_sse && (fpcmp_mode == CCFPUmode || op_mode == XFmode || ix86_use_fcomi_compare (code))) { op0 = force_reg (op_mode, op0); op1 = force_reg (op_mode, op1); } else { /* %%% We only allow op1 in memory; op0 must be st(0). So swap things around if they appear profitable, otherwise force op0 into a register. */ if (standard_80387_constant_p (op0) == 0 || (GET_CODE (op0) == MEM && ! (standard_80387_constant_p (op1) == 0 || GET_CODE (op1) == MEM))) { rtx tmp; tmp = op0, op0 = op1, op1 = tmp; code = swap_condition (code); } if (GET_CODE (op0) != REG) op0 = force_reg (op_mode, op0); if (CONSTANT_P (op1)) { if (standard_80387_constant_p (op1)) op1 = force_reg (op_mode, op1); else op1 = validize_mem (force_const_mem (op_mode, op1)); } } /* Try to rearrange the comparison to make it cheaper. */ if (ix86_fp_comparison_cost (code) > ix86_fp_comparison_cost (swap_condition (code)) && (GET_CODE (op1) == REG || !no_new_pseudos)) { rtx tmp; tmp = op0, op0 = op1, op1 = tmp; code = swap_condition (code); if (GET_CODE (op0) != REG) op0 = force_reg (op_mode, op0); } *pop0 = op0; *pop1 = op1; return code; } /* Convert comparison codes we use to represent FP comparison to integer code that will result in proper branch. Return UNKNOWN if no such code is available. */ static enum rtx_code ix86_fp_compare_code_to_integer (enum rtx_code code) { switch (code) { case GT: return GTU; case GE: return GEU; case ORDERED: case UNORDERED: return code; break; case UNEQ: return EQ; break; case UNLT: return LTU; break; case UNLE: return LEU; break; case LTGT: return NE; break; default: return UNKNOWN; } } /* Split comparison code CODE into comparisons we can do using branch instructions. BYPASS_CODE is comparison code for branch that will branch around FIRST_CODE and SECOND_CODE. If some of branches is not required, set value to NIL. We never require more than two branches. */ static void ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code, enum rtx_code *first_code, enum rtx_code *second_code) { *first_code = code; *bypass_code = NIL; *second_code = NIL; /* The fcomi comparison sets flags as follows: cmp ZF PF CF > 0 0 0 < 0 0 1 = 1 0 0 un 1 1 1 */ switch (code) { case GT: /* GTU - CF=0 & ZF=0 */ case GE: /* GEU - CF=0 */ case ORDERED: /* PF=0 */ case UNORDERED: /* PF=1 */ case UNEQ: /* EQ - ZF=1 */ case UNLT: /* LTU - CF=1 */ case UNLE: /* LEU - CF=1 | ZF=1 */ case LTGT: /* EQ - ZF=0 */ break; case LT: /* LTU - CF=1 - fails on unordered */ *first_code = UNLT; *bypass_code = UNORDERED; break; case LE: /* LEU - CF=1 | ZF=1 - fails on unordered */ *first_code = UNLE; *bypass_code = UNORDERED; break; case EQ: /* EQ - ZF=1 - fails on unordered */ *first_code = UNEQ; *bypass_code = UNORDERED; break; case NE: /* NE - ZF=0 - fails on unordered */ *first_code = LTGT; *second_code = UNORDERED; break; case UNGE: /* GEU - CF=0 - fails on unordered */ *first_code = GE; *second_code = UNORDERED; break; case UNGT: /* GTU - CF=0 & ZF=0 - fails on unordered */ *first_code = GT; *second_code = UNORDERED; break; default: abort (); } if (!TARGET_IEEE_FP) { *second_code = NIL; *bypass_code = NIL; } } /* Return cost of comparison done fcom + arithmetics operations on AX. All following functions do use number of instructions as a cost metrics. In future this should be tweaked to compute bytes for optimize_size and take into account performance of various instructions on various CPUs. */ static int ix86_fp_comparison_arithmetics_cost (enum rtx_code code) { if (!TARGET_IEEE_FP) return 4; /* The cost of code output by ix86_expand_fp_compare. */ switch (code) { case UNLE: case UNLT: case LTGT: case GT: case GE: case UNORDERED: case ORDERED: case UNEQ: return 4; break; case LT: case NE: case EQ: case UNGE: return 5; break; case LE: case UNGT: return 6; break; default: abort (); } } /* Return cost of comparison done using fcomi operation. See ix86_fp_comparison_arithmetics_cost for the metrics. */ static int ix86_fp_comparison_fcomi_cost (enum rtx_code code) { enum rtx_code bypass_code, first_code, second_code; /* Return arbitrarily high cost when instruction is not supported - this prevents gcc from using it. */ if (!TARGET_CMOVE) return 1024; ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code); return (bypass_code != NIL || second_code != NIL) + 2; } /* Return cost of comparison done using sahf operation. See ix86_fp_comparison_arithmetics_cost for the metrics. */ static int ix86_fp_comparison_sahf_cost (enum rtx_code code) { enum rtx_code bypass_code, first_code, second_code; /* Return arbitrarily high cost when instruction is not preferred - this avoids gcc from using it. */ if (!TARGET_USE_SAHF && !optimize_size) return 1024; ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code); return (bypass_code != NIL || second_code != NIL) + 3; } /* Compute cost of the comparison done using any method. See ix86_fp_comparison_arithmetics_cost for the metrics. */ static int ix86_fp_comparison_cost (enum rtx_code code) { int fcomi_cost, sahf_cost, arithmetics_cost = 1024; int min; fcomi_cost = ix86_fp_comparison_fcomi_cost (code); sahf_cost = ix86_fp_comparison_sahf_cost (code); min = arithmetics_cost = ix86_fp_comparison_arithmetics_cost (code); if (min > sahf_cost) min = sahf_cost; if (min > fcomi_cost) min = fcomi_cost; return min; } /* Generate insn patterns to do a floating point compare of OPERANDS. */ static rtx ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch, rtx *second_test, rtx *bypass_test) { enum machine_mode fpcmp_mode, intcmp_mode; rtx tmp, tmp2; int cost = ix86_fp_comparison_cost (code); enum rtx_code bypass_code, first_code, second_code; fpcmp_mode = ix86_fp_compare_mode (code); code = ix86_prepare_fp_compare_args (code, &op0, &op1); if (second_test) *second_test = NULL_RTX; if (bypass_test) *bypass_test = NULL_RTX; ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code); /* Do fcomi/sahf based test when profitable. */ if ((bypass_code == NIL || bypass_test) && (second_code == NIL || second_test) && ix86_fp_comparison_arithmetics_cost (code) > cost) { if (TARGET_CMOVE) { tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1); tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG), tmp); emit_insn (tmp); } else { tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1); tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW); if (!scratch) scratch = gen_reg_rtx (HImode); emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2)); emit_insn (gen_x86_sahf_1 (scratch)); } /* The FP codes work out to act like unsigned. */ intcmp_mode = fpcmp_mode; code = first_code; if (bypass_code != NIL) *bypass_test = gen_rtx_fmt_ee (bypass_code, VOIDmode, gen_rtx_REG (intcmp_mode, FLAGS_REG), const0_rtx); if (second_code != NIL) *second_test = gen_rtx_fmt_ee (second_code, VOIDmode, gen_rtx_REG (intcmp_mode, FLAGS_REG), const0_rtx); } else { /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */ tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1); tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW); if (!scratch) scratch = gen_reg_rtx (HImode); emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2)); /* In the unordered case, we have to check C2 for NaN's, which doesn't happen to work out to anything nice combination-wise. So do some bit twiddling on the value we've got in AH to come up with an appropriate set of condition codes. */ intcmp_mode = CCNOmode; switch (code) { case GT: case UNGT: if (code == GT || !TARGET_IEEE_FP) { emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45))); code = EQ; } else { emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45))); emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx)); emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44))); intcmp_mode = CCmode; code = GEU; } break; case LT: case UNLT: if (code == LT && TARGET_IEEE_FP) { emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45))); emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01))); intcmp_mode = CCmode; code = EQ; } else { emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01))); code = NE; } break; case GE: case UNGE: if (code == GE || !TARGET_IEEE_FP) { emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05))); code = EQ; } else { emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45))); emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch, GEN_INT (0x01))); code = NE; } break; case LE: case UNLE: if (code == LE && TARGET_IEEE_FP) { emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45))); emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx)); emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40))); intcmp_mode = CCmode; code = LTU; } else { emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45))); code = NE; } break; case EQ: case UNEQ: if (code == EQ && TARGET_IEEE_FP) { emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45))); emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40))); intcmp_mode = CCmode; code = EQ; } else { emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40))); code = NE; break; } break; case NE: case LTGT: if (code == NE && TARGET_IEEE_FP) { emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45))); emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch, GEN_INT (0x40))); code = NE; } else { emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40))); code = EQ; } break; case UNORDERED: emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04))); code = NE; break; case ORDERED: emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04))); code = EQ; break; default: abort (); } } /* Return the test that should be put into the flags user, i.e. the bcc, scc, or cmov instruction. */ return gen_rtx_fmt_ee (code, VOIDmode, gen_rtx_REG (intcmp_mode, FLAGS_REG), const0_rtx); } rtx ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test) { rtx op0, op1, ret; op0 = ix86_compare_op0; op1 = ix86_compare_op1; if (second_test) *second_test = NULL_RTX; if (bypass_test) *bypass_test = NULL_RTX; if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT) ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX, second_test, bypass_test); else ret = ix86_expand_int_compare (code, op0, op1); return ret; } /* Return true if the CODE will result in nontrivial jump sequence. */ bool ix86_fp_jump_nontrivial_p (enum rtx_code code) { enum rtx_code bypass_code, first_code, second_code; if (!TARGET_CMOVE) return true; ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code); return bypass_code != NIL || second_code != NIL; } void ix86_expand_branch (enum rtx_code code, rtx label) { rtx tmp; switch (GET_MODE (ix86_compare_op0)) { case QImode: case HImode: case SImode: simple: tmp = ix86_expand_compare (code, NULL, NULL); tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp, gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx); emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp)); return; case SFmode: case DFmode: case XFmode: { rtvec vec; int use_fcomi; enum rtx_code bypass_code, first_code, second_code; code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0, &ix86_compare_op1); ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code); /* Check whether we will use the natural sequence with one jump. If so, we can expand jump early. Otherwise delay expansion by creating compound insn to not confuse optimizers. */ if (bypass_code == NIL && second_code == NIL && TARGET_CMOVE) { ix86_split_fp_branch (code, ix86_compare_op0, ix86_compare_op1, gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx, NULL_RTX); } else { tmp = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_op0, ix86_compare_op1); tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp, gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx); tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp); use_fcomi = ix86_use_fcomi_compare (code); vec = rtvec_alloc (3 + !use_fcomi); RTVEC_ELT (vec, 0) = tmp; RTVEC_ELT (vec, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 18)); RTVEC_ELT (vec, 2) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 17)); if (! use_fcomi) RTVEC_ELT (vec, 3) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode)); emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec)); } return; } case DImode: if (TARGET_64BIT) goto simple; /* Expand DImode branch into multiple compare+branch. */ { rtx lo[2], hi[2], label2; enum rtx_code code1, code2, code3; if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1)) { tmp = ix86_compare_op0; ix86_compare_op0 = ix86_compare_op1; ix86_compare_op1 = tmp; code = swap_condition (code); } split_di (&ix86_compare_op0, 1, lo+0, hi+0); split_di (&ix86_compare_op1, 1, lo+1, hi+1); /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to avoid two branches. This costs one extra insn, so disable when optimizing for size. */ if ((code == EQ || code == NE) && (!optimize_size || hi[1] == const0_rtx || lo[1] == const0_rtx)) { rtx xor0, xor1; xor1 = hi[0]; if (hi[1] != const0_rtx) xor1 = expand_binop (SImode, xor_optab, xor1, hi[1], NULL_RTX, 0, OPTAB_WIDEN); xor0 = lo[0]; if (lo[1] != const0_rtx) xor0 = expand_binop (SImode, xor_optab, xor0, lo[1], NULL_RTX, 0, OPTAB_WIDEN); tmp = expand_binop (SImode, ior_optab, xor1, xor0, NULL_RTX, 0, OPTAB_WIDEN); ix86_compare_op0 = tmp; ix86_compare_op1 = const0_rtx; ix86_expand_branch (code, label); return; } /* Otherwise, if we are doing less-than or greater-or-equal-than, op1 is a constant and the low word is zero, then we can just examine the high word. */ if (GET_CODE (hi[1]) == CONST_INT && lo[1] == const0_rtx) switch (code) { case LT: case LTU: case GE: case GEU: ix86_compare_op0 = hi[0]; ix86_compare_op1 = hi[1]; ix86_expand_branch (code, label); return; default: break; } /* Otherwise, we need two or three jumps. */ label2 = gen_label_rtx (); code1 = code; code2 = swap_condition (code); code3 = unsigned_condition (code); switch (code) { case LT: case GT: case LTU: case GTU: break; case LE: code1 = LT; code2 = GT; break; case GE: code1 = GT; code2 = LT; break; case LEU: code1 = LTU; code2 = GTU; break; case GEU: code1 = GTU; code2 = LTU; break; case EQ: code1 = NIL; code2 = NE; break; case NE: code2 = NIL; break; default: abort (); } /* * a < b => * if (hi(a) < hi(b)) goto true; * if (hi(a) > hi(b)) goto false; * if (lo(a) < lo(b)) goto true; * false: */ ix86_compare_op0 = hi[0]; ix86_compare_op1 = hi[1]; if (code1 != NIL) ix86_expand_branch (code1, label); if (code2 != NIL) ix86_expand_branch (code2, label2); ix86_compare_op0 = lo[0]; ix86_compare_op1 = lo[1]; ix86_expand_branch (code3, label); if (code2 != NIL) emit_label (label2); return; } default: abort (); } } /* Split branch based on floating point condition. */ void ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2, rtx target1, rtx target2, rtx tmp) { rtx second, bypass; rtx label = NULL_RTX; rtx condition; int bypass_probability = -1, second_probability = -1, probability = -1; rtx i; if (target2 != pc_rtx) { rtx tmp = target2; code = reverse_condition_maybe_unordered (code); target2 = target1; target1 = tmp; } condition = ix86_expand_fp_compare (code, op1, op2, tmp, &second, &bypass); if (split_branch_probability >= 0) { /* Distribute the probabilities across the jumps. Assume the BYPASS and SECOND to be always test for UNORDERED. */ probability = split_branch_probability; /* Value of 1 is low enough to make no need for probability to be updated. Later we may run some experiments and see if unordered values are more frequent in practice. */ if (bypass) bypass_probability = 1; if (second) second_probability = 1; } if (bypass != NULL_RTX) { label = gen_label_rtx (); i = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, gen_rtx_IF_THEN_ELSE (VOIDmode, bypass, gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx))); if (bypass_probability >= 0) REG_NOTES (i) = gen_rtx_EXPR_LIST (REG_BR_PROB, GEN_INT (bypass_probability), REG_NOTES (i)); } i = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, gen_rtx_IF_THEN_ELSE (VOIDmode, condition, target1, target2))); if (probability >= 0) REG_NOTES (i) = gen_rtx_EXPR_LIST (REG_BR_PROB, GEN_INT (probability), REG_NOTES (i)); if (second != NULL_RTX) { i = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, gen_rtx_IF_THEN_ELSE (VOIDmode, second, target1, target2))); if (second_probability >= 0) REG_NOTES (i) = gen_rtx_EXPR_LIST (REG_BR_PROB, GEN_INT (second_probability), REG_NOTES (i)); } if (label != NULL_RTX) emit_label (label); } int ix86_expand_setcc (enum rtx_code code, rtx dest) { rtx ret, tmp, tmpreg, equiv; rtx second_test, bypass_test; if (GET_MODE (ix86_compare_op0) == DImode && !TARGET_64BIT) return 0; /* FAIL */ if (GET_MODE (dest) != QImode) abort (); ret = ix86_expand_compare (code, &second_test, &bypass_test); PUT_MODE (ret, QImode); tmp = dest; tmpreg = dest; emit_insn (gen_rtx_SET (VOIDmode, tmp, ret)); if (bypass_test || second_test) { rtx test = second_test; int bypass = 0; rtx tmp2 = gen_reg_rtx (QImode); if (bypass_test) { if (second_test) abort (); test = bypass_test; bypass = 1; PUT_CODE (test, reverse_condition_maybe_unordered (GET_CODE (test))); } PUT_MODE (test, QImode); emit_insn (gen_rtx_SET (VOIDmode, tmp2, test)); if (bypass) emit_insn (gen_andqi3 (tmp, tmpreg, tmp2)); else emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2)); } /* Attach a REG_EQUAL note describing the comparison result. */ equiv = simplify_gen_relational (code, QImode, GET_MODE (ix86_compare_op0), ix86_compare_op0, ix86_compare_op1); set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv); return 1; /* DONE */ } /* Expand comparison setting or clearing carry flag. Return true when successful and set pop for the operation. */ static bool ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop) { enum machine_mode mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1); /* Do not handle DImode compares that go trought special path. Also we can't deal with FP compares yet. This is possible to add. */ if ((mode == DImode && !TARGET_64BIT)) return false; if (FLOAT_MODE_P (mode)) { rtx second_test = NULL, bypass_test = NULL; rtx compare_op, compare_seq; /* Shortcut: following common codes never translate into carry flag compares. */ if (code == EQ || code == NE || code == UNEQ || code == LTGT || code == ORDERED || code == UNORDERED) return false; /* These comparisons require zero flag; swap operands so they won't. */ if ((code == GT || code == UNLE || code == LE || code == UNGT) && !TARGET_IEEE_FP) { rtx tmp = op0; op0 = op1; op1 = tmp; code = swap_condition (code); } /* Try to expand the comparison and verify that we end up with carry flag based comparison. This is fails to be true only when we decide to expand comparison using arithmetic that is not too common scenario. */ start_sequence (); compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX, &second_test, &bypass_test); compare_seq = get_insns (); end_sequence (); if (second_test || bypass_test) return false; if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode) code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op)); else code = GET_CODE (compare_op); if (code != LTU && code != GEU) return false; emit_insn (compare_seq); *pop = compare_op; return true; } if (!INTEGRAL_MODE_P (mode)) return false; switch (code) { case LTU: case GEU: break; /* Convert a==0 into (unsigned)a<1. */ case EQ: case NE: if (op1 != const0_rtx) return false; op1 = const1_rtx; code = (code == EQ ? LTU : GEU); break; /* Convert a>b into b=b-1. */ case GTU: case LEU: if (GET_CODE (op1) == CONST_INT) { op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0)); /* Bail out on overflow. We still can swap operands but that would force loading of the constant into register. */ if (op1 == const0_rtx || !x86_64_immediate_operand (op1, GET_MODE (op1))) return false; code = (code == GTU ? GEU : LTU); } else { rtx tmp = op1; op1 = op0; op0 = tmp; code = (code == GTU ? LTU : GEU); } break; /* Convert a>=0 into (unsigned)a<0x80000000. */ case LT: case GE: if (mode == DImode || op1 != const0_rtx) return false; op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode); code = (code == LT ? GEU : LTU); break; case LE: case GT: if (mode == DImode || op1 != constm1_rtx) return false; op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode); code = (code == LE ? GEU : LTU); break; default: return false; } /* Swapping operands may cause constant to appear as first operand. */ if (!nonimmediate_operand (op0, VOIDmode)) { if (no_new_pseudos) return false; op0 = force_reg (mode, op0); } ix86_compare_op0 = op0; ix86_compare_op1 = op1; *pop = ix86_expand_compare (code, NULL, NULL); if (GET_CODE (*pop) != LTU && GET_CODE (*pop) != GEU) abort (); return true; } int ix86_expand_int_movcc (rtx operands[]) { enum rtx_code code = GET_CODE (operands[1]), compare_code; rtx compare_seq, compare_op; rtx second_test, bypass_test; enum machine_mode mode = GET_MODE (operands[0]); bool sign_bit_compare_p = false;; start_sequence (); compare_op = ix86_expand_compare (code, &second_test, &bypass_test); compare_seq = get_insns (); end_sequence (); compare_code = GET_CODE (compare_op); if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT)) || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE))) sign_bit_compare_p = true; /* Don't attempt mode expansion here -- if we had to expand 5 or 6 HImode insns, we'd be swallowed in word prefix ops. */ if ((mode != HImode || TARGET_FAST_PREFIX) && (mode != DImode || TARGET_64BIT) && GET_CODE (operands[2]) == CONST_INT && GET_CODE (operands[3]) == CONST_INT) { rtx out = operands[0]; HOST_WIDE_INT ct = INTVAL (operands[2]); HOST_WIDE_INT cf = INTVAL (operands[3]); HOST_WIDE_INT diff; diff = ct - cf; /* Sign bit compares are better done using shifts than we do by using sbb. */ if (sign_bit_compare_p || ix86_expand_carry_flag_compare (code, ix86_compare_op0, ix86_compare_op1, &compare_op)) { /* Detect overlap between destination and compare sources. */ rtx tmp = out; if (!sign_bit_compare_p) { bool fpcmp = false; compare_code = GET_CODE (compare_op); if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode) { fpcmp = true; compare_code = ix86_fp_compare_code_to_integer (compare_code); } /* To simplify rest of code, restrict to the GEU case. */ if (compare_code == LTU) { HOST_WIDE_INT tmp = ct; ct = cf; cf = tmp; compare_code = reverse_condition (compare_code); code = reverse_condition (code); } else { if (fpcmp) PUT_CODE (compare_op, reverse_condition_maybe_unordered (GET_CODE (compare_op))); else PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op))); } diff = ct - cf; if (reg_overlap_mentioned_p (out, ix86_compare_op0) || reg_overlap_mentioned_p (out, ix86_compare_op1)) tmp = gen_reg_rtx (mode); if (mode == DImode) emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op)); else emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op)); } else { if (code == GT || code == GE) code = reverse_condition (code); else { HOST_WIDE_INT tmp = ct; ct = cf; cf = tmp; diff = ct - cf; } tmp = emit_store_flag (tmp, code, ix86_compare_op0, ix86_compare_op1, VOIDmode, 0, -1); } if (diff == 1) { /* * cmpl op0,op1 * sbbl dest,dest * [addl dest, ct] * * Size 5 - 8. */ if (ct) tmp = expand_simple_binop (mode, PLUS, tmp, GEN_INT (ct), copy_rtx (tmp), 1, OPTAB_DIRECT); } else if (cf == -1) { /* * cmpl op0,op1 * sbbl dest,dest * orl $ct, dest * * Size 8. */ tmp = expand_simple_binop (mode, IOR, tmp, GEN_INT (ct), copy_rtx (tmp), 1, OPTAB_DIRECT); } else if (diff == -1 && ct) { /* * cmpl op0,op1 * sbbl dest,dest * notl dest * [addl dest, cf] * * Size 8 - 11. */ tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1); if (cf) tmp = expand_simple_binop (mode, PLUS, copy_rtx (tmp), GEN_INT (cf), copy_rtx (tmp), 1, OPTAB_DIRECT); } else { /* * cmpl op0,op1 * sbbl dest,dest * [notl dest] * andl cf - ct, dest * [addl dest, ct] * * Size 8 - 11. */ if (cf == 0) { cf = ct; ct = 0; tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1); } tmp = expand_simple_binop (mode, AND, copy_rtx (tmp), gen_int_mode (cf - ct, mode), copy_rtx (tmp), 1, OPTAB_DIRECT); if (ct) tmp = expand_simple_binop (mode, PLUS, copy_rtx (tmp), GEN_INT (ct), copy_rtx (tmp), 1, OPTAB_DIRECT); } if (!rtx_equal_p (tmp, out)) emit_move_insn (copy_rtx (out), copy_rtx (tmp)); return 1; /* DONE */ } if (diff < 0) { HOST_WIDE_INT tmp; tmp = ct, ct = cf, cf = tmp; diff = -diff; if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0))) { /* We may be reversing unordered compare to normal compare, that is not valid in general (we may convert non-trapping condition to trapping one), however on i386 we currently emit all comparisons unordered. */ compare_code = reverse_condition_maybe_unordered (compare_code); code = reverse_condition_maybe_unordered (code); } else { compare_code = reverse_condition (compare_code); code = reverse_condition (code); } } compare_code = NIL; if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT && GET_CODE (ix86_compare_op1) == CONST_INT) { if (ix86_compare_op1 == const0_rtx && (code == LT || code == GE)) compare_code = code; else if (ix86_compare_op1 == constm1_rtx) { if (code == LE) compare_code = LT; else if (code == GT) compare_code = GE; } } /* Optimize dest = (op0 < 0) ? -1 : cf. */ if (compare_code != NIL && GET_MODE (ix86_compare_op0) == GET_MODE (out) && (cf == -1 || ct == -1)) { /* If lea code below could be used, only optimize if it results in a 2 insn sequence. */ if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8 || diff == 3 || diff == 5 || diff == 9) || (compare_code == LT && ct == -1) || (compare_code == GE && cf == -1)) { /* * notl op1 (if necessary) * sarl $31, op1 * orl cf, op1 */ if (ct != -1) { cf = ct; ct = -1; code = reverse_condition (code); } out = emit_store_flag (out, code, ix86_compare_op0, ix86_compare_op1, VOIDmode, 0, -1); out = expand_simple_binop (mode, IOR, out, GEN_INT (cf), out, 1, OPTAB_DIRECT); if (out != operands[0]) emit_move_insn (operands[0], out); return 1; /* DONE */ } } if ((diff == 1 || diff == 2 || diff == 4 || diff == 8 || diff == 3 || diff == 5 || diff == 9) && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL) && (mode != DImode || x86_64_sign_extended_value (GEN_INT (cf)))) { /* * xorl dest,dest * cmpl op1,op2 * setcc dest * lea cf(dest*(ct-cf)),dest * * Size 14. * * This also catches the degenerate setcc-only case. */ rtx tmp; int nops; out = emit_store_flag (out, code, ix86_compare_op0, ix86_compare_op1, VOIDmode, 0, 1); nops = 0; /* On x86_64 the lea instruction operates on Pmode, so we need to get arithmetics done in proper mode to match. */ if (diff == 1) tmp = copy_rtx (out); else { rtx out1; out1 = copy_rtx (out); tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1)); nops++; if (diff & 1) { tmp = gen_rtx_PLUS (mode, tmp, out1); nops++; } } if (cf != 0) { tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf)); nops++; } if (!rtx_equal_p (tmp, out)) { if (nops == 1) out = force_operand (tmp, copy_rtx (out)); else emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp))); } if (!rtx_equal_p (out, operands[0])) emit_move_insn (operands[0], copy_rtx (out)); return 1; /* DONE */ } /* * General case: Jumpful: * xorl dest,dest cmpl op1, op2 * cmpl op1, op2 movl ct, dest * setcc dest jcc 1f * decl dest movl cf, dest * andl (cf-ct),dest 1: * addl ct,dest * * Size 20. Size 14. * * This is reasonably steep, but branch mispredict costs are * high on modern cpus, so consider failing only if optimizing * for space. */ if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL)) && BRANCH_COST >= 2) { if (cf == 0) { cf = ct; ct = 0; if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0))) /* We may be reversing unordered compare to normal compare, that is not valid in general (we may convert non-trapping condition to trapping one), however on i386 we currently emit all comparisons unordered. */ code = reverse_condition_maybe_unordered (code); else { code = reverse_condition (code); if (compare_code != NIL) compare_code = reverse_condition (compare_code); } } if (compare_code != NIL) { /* notl op1 (if needed) sarl $31, op1 andl (cf-ct), op1 addl ct, op1 For x < 0 (resp. x <= -1) there will be no notl, so if possible swap the constants to get rid of the complement. True/false will be -1/0 while code below (store flag followed by decrement) is 0/-1, so the constants need to be exchanged once more. */ if (compare_code == GE || !cf) { code = reverse_condition (code); compare_code = LT; } else { HOST_WIDE_INT tmp = cf; cf = ct; ct = tmp; } out = emit_store_flag (out, code, ix86_compare_op0, ix86_compare_op1, VOIDmode, 0, -1); } else { out = emit_store_flag (out, code, ix86_compare_op0, ix86_compare_op1, VOIDmode, 0, 1); out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx, copy_rtx (out), 1, OPTAB_DIRECT); } out = expand_simple_binop (mode, AND, copy_rtx (out), gen_int_mode (cf - ct, mode), copy_rtx (out), 1, OPTAB_DIRECT); if (ct) out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct), copy_rtx (out), 1, OPTAB_DIRECT); if (!rtx_equal_p (out, operands[0])) emit_move_insn (operands[0], copy_rtx (out)); return 1; /* DONE */ } } if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL)) { /* Try a few things more with specific constants and a variable. */ optab op; rtx var, orig_out, out, tmp; if (BRANCH_COST <= 2) return 0; /* FAIL */ /* If one of the two operands is an interesting constant, load a constant with the above and mask it in with a logical operation. */ if (GET_CODE (operands[2]) == CONST_INT) { var = operands[3]; if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx) operands[3] = constm1_rtx, op = and_optab; else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx) operands[3] = const0_rtx, op = ior_optab; else return 0; /* FAIL */ } else if (GET_CODE (operands[3]) == CONST_INT) { var = operands[2]; if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx) operands[2] = constm1_rtx, op = and_optab; else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx) operands[2] = const0_rtx, op = ior_optab; else return 0; /* FAIL */ } else return 0; /* FAIL */ orig_out = operands[0]; tmp = gen_reg_rtx (mode); operands[0] = tmp; /* Recurse to get the constant loaded. */ if (ix86_expand_int_movcc (operands) == 0) return 0; /* FAIL */ /* Mask in the interesting variable. */ out = expand_binop (mode, op, var, tmp, orig_out, 0, OPTAB_WIDEN); if (!rtx_equal_p (out, orig_out)) emit_move_insn (copy_rtx (orig_out), copy_rtx (out)); return 1; /* DONE */ } /* * For comparison with above, * * movl cf,dest * movl ct,tmp * cmpl op1,op2 * cmovcc tmp,dest * * Size 15. */ if (! nonimmediate_operand (operands[2], mode)) operands[2] = force_reg (mode, operands[2]); if (! nonimmediate_operand (operands[3], mode)) operands[3] = force_reg (mode, operands[3]); if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3])) { rtx tmp = gen_reg_rtx (mode); emit_move_insn (tmp, operands[3]); operands[3] = tmp; } if (second_test && reg_overlap_mentioned_p (operands[0], operands[2])) { rtx tmp = gen_reg_rtx (mode); emit_move_insn (tmp, operands[2]); operands[2] = tmp; } if (! register_operand (operands[2], VOIDmode) && (mode == QImode || ! register_operand (operands[3], VOIDmode))) operands[2] = force_reg (mode, operands[2]); if (mode == QImode && ! register_operand (operands[3], VOIDmode)) operands[3] = force_reg (mode, operands[3]); emit_insn (compare_seq); emit_insn (gen_rtx_SET (VOIDmode, operands[0], gen_rtx_IF_THEN_ELSE (mode, compare_op, operands[2], operands[3]))); if (bypass_test) emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]), gen_rtx_IF_THEN_ELSE (mode, bypass_test, copy_rtx (operands[3]), copy_rtx (operands[0])))); if (second_test) emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]), gen_rtx_IF_THEN_ELSE (mode, second_test, copy_rtx (operands[2]), copy_rtx (operands[0])))); return 1; /* DONE */ } int ix86_expand_fp_movcc (rtx operands[]) { enum rtx_code code; rtx tmp; rtx compare_op, second_test, bypass_test; /* For SF/DFmode conditional moves based on comparisons in same mode, we may want to use SSE min/max instructions. */ if (((TARGET_SSE_MATH && GET_MODE (operands[0]) == SFmode) || (TARGET_SSE2 && TARGET_SSE_MATH && GET_MODE (operands[0]) == DFmode)) && GET_MODE (ix86_compare_op0) == GET_MODE (operands[0]) /* The SSE comparisons does not support the LTGT/UNEQ pair. */ && (!TARGET_IEEE_FP || (GET_CODE (operands[1]) != LTGT && GET_CODE (operands[1]) != UNEQ)) /* We may be called from the post-reload splitter. */ && (!REG_P (operands[0]) || SSE_REG_P (operands[0]) || REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER)) { rtx op0 = ix86_compare_op0, op1 = ix86_compare_op1; code = GET_CODE (operands[1]); /* See if we have (cross) match between comparison operands and conditional move operands. */ if (rtx_equal_p (operands[2], op1)) { rtx tmp = op0; op0 = op1; op1 = tmp; code = reverse_condition_maybe_unordered (code); } if (rtx_equal_p (operands[2], op0) && rtx_equal_p (operands[3], op1)) { /* Check for min operation. */ if (code == LT || code == UNLE) { if (code == UNLE) { rtx tmp = op0; op0 = op1; op1 = tmp; } operands[0] = force_reg (GET_MODE (operands[0]), operands[0]); if (memory_operand (op0, VOIDmode)) op0 = force_reg (GET_MODE (operands[0]), op0); if (GET_MODE (operands[0]) == SFmode) emit_insn (gen_minsf3 (operands[0], op0, op1)); else emit_insn (gen_mindf3 (operands[0], op0, op1)); return 1; } /* Check for max operation. */ if (code == GT || code == UNGE) { if (code == UNGE) { rtx tmp = op0; op0 = op1; op1 = tmp; } operands[0] = force_reg (GET_MODE (operands[0]), operands[0]); if (memory_operand (op0, VOIDmode)) op0 = force_reg (GET_MODE (operands[0]), op0); if (GET_MODE (operands[0]) == SFmode) emit_insn (gen_maxsf3 (operands[0], op0, op1)); else emit_insn (gen_maxdf3 (operands[0], op0, op1)); return 1; } } /* Manage condition to be sse_comparison_operator. In case we are in non-ieee mode, try to canonicalize the destination operand to be first in the comparison - this helps reload to avoid extra moves. */ if (!sse_comparison_operator (operands[1], VOIDmode) || (rtx_equal_p (operands[0], ix86_compare_op1) && !TARGET_IEEE_FP)) { rtx tmp = ix86_compare_op0; ix86_compare_op0 = ix86_compare_op1; ix86_compare_op1 = tmp; operands[1] = gen_rtx_fmt_ee (swap_condition (GET_CODE (operands[1])), VOIDmode, ix86_compare_op0, ix86_compare_op1); } /* Similarly try to manage result to be first operand of conditional move. We also don't support the NE comparison on SSE, so try to avoid it. */ if ((rtx_equal_p (operands[0], operands[3]) && (!TARGET_IEEE_FP || GET_CODE (operands[1]) != EQ)) || (GET_CODE (operands[1]) == NE && TARGET_IEEE_FP)) { rtx tmp = operands[2]; operands[2] = operands[3]; operands[3] = tmp; operands[1] = gen_rtx_fmt_ee (reverse_condition_maybe_unordered (GET_CODE (operands[1])), VOIDmode, ix86_compare_op0, ix86_compare_op1); } if (GET_MODE (operands[0]) == SFmode) emit_insn (gen_sse_movsfcc (operands[0], operands[1], operands[2], operands[3], ix86_compare_op0, ix86_compare_op1)); else emit_insn (gen_sse_movdfcc (operands[0], operands[1], operands[2], operands[3], ix86_compare_op0, ix86_compare_op1)); return 1; } /* The floating point conditional move instructions don't directly support conditions resulting from a signed integer comparison. */ code = GET_CODE (operands[1]); compare_op = ix86_expand_compare (code, &second_test, &bypass_test); /* The floating point conditional move instructions don't directly support signed integer comparisons. */ if (!fcmov_comparison_operator (compare_op, VOIDmode)) { if (second_test != NULL || bypass_test != NULL) abort (); tmp = gen_reg_rtx (QImode); ix86_expand_setcc (code, tmp); code = NE; ix86_compare_op0 = tmp; ix86_compare_op1 = const0_rtx; compare_op = ix86_expand_compare (code, &second_test, &bypass_test); } if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3])) { tmp = gen_reg_rtx (GET_MODE (operands[0])); emit_move_insn (tmp, operands[3]); operands[3] = tmp; } if (second_test && reg_overlap_mentioned_p (operands[0], operands[2])) { tmp = gen_reg_rtx (GET_MODE (operands[0])); emit_move_insn (tmp, operands[2]); operands[2] = tmp; } emit_insn (gen_rtx_SET (VOIDmode, operands[0], gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]), compare_op, operands[2], operands[3]))); if (bypass_test) emit_insn (gen_rtx_SET (VOIDmode, operands[0], gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]), bypass_test, operands[3], operands[0]))); if (second_test) emit_insn (gen_rtx_SET (VOIDmode, operands[0], gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]), second_test, operands[2], operands[0]))); return 1; } /* Expand conditional increment or decrement using adb/sbb instructions. The default case using setcc followed by the conditional move can be done by generic code. */ int ix86_expand_int_addcc (rtx operands[]) { enum rtx_code code = GET_CODE (operands[1]); rtx compare_op; rtx val = const0_rtx; bool fpcmp = false; enum machine_mode mode = GET_MODE (operands[0]); if (operands[3] != const1_rtx && operands[3] != constm1_rtx) return 0; if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0, ix86_compare_op1, &compare_op)) return 0; code = GET_CODE (compare_op); if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode) { fpcmp = true; code = ix86_fp_compare_code_to_integer (code); } if (code != LTU) { val = constm1_rtx; if (fpcmp) PUT_CODE (compare_op, reverse_condition_maybe_unordered (GET_CODE (compare_op))); else PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op))); } PUT_MODE (compare_op, mode); /* Construct either adc or sbb insn. */ if ((code == LTU) == (operands[3] == constm1_rtx)) { switch (GET_MODE (operands[0])) { case QImode: emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op)); break; case HImode: emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op)); break; case SImode: emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op)); break; case DImode: emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op)); break; default: abort (); } } else { switch (GET_MODE (operands[0])) { case QImode: emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op)); break; case HImode: emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op)); break; case SImode: emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op)); break; case DImode: emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op)); break; default: abort (); } } return 1; /* DONE */ } /* Split operands 0 and 1 into SImode parts. Similar to split_di, but works for floating pointer parameters and nonoffsetable memories. For pushes, it returns just stack offsets; the values will be saved in the right order. Maximally three parts are generated. */ static int ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode) { int size; if (!TARGET_64BIT) size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4; else size = (GET_MODE_SIZE (mode) + 4) / 8; if (GET_CODE (operand) == REG && MMX_REGNO_P (REGNO (operand))) abort (); if (size < 2 || size > 3) abort (); /* Optimize constant pool reference to immediates. This is used by fp moves, that force all constants to memory to allow combining. */ if (GET_CODE (operand) == MEM && RTX_UNCHANGING_P (operand)) { rtx tmp = maybe_get_pool_constant (operand); if (tmp) operand = tmp; } if (GET_CODE (operand) == MEM && !offsettable_memref_p (operand)) { /* The only non-offsetable memories we handle are pushes. */ if (! push_operand (operand, VOIDmode)) abort (); operand = copy_rtx (operand); PUT_MODE (operand, Pmode); parts[0] = parts[1] = parts[2] = operand; } else if (!TARGET_64BIT) { if (mode == DImode) split_di (&operand, 1, &parts[0], &parts[1]); else { if (REG_P (operand)) { if (!reload_completed) abort (); parts[0] = gen_rtx_REG (SImode, REGNO (operand) + 0); parts[1] = gen_rtx_REG (SImode, REGNO (operand) + 1); if (size == 3) parts[2] = gen_rtx_REG (SImode, REGNO (operand) + 2); } else if (offsettable_memref_p (operand)) { operand = adjust_address (operand, SImode, 0); parts[0] = operand; parts[1] = adjust_address (operand, SImode, 4); if (size == 3) parts[2] = adjust_address (operand, SImode, 8); } else if (GET_CODE (operand) == CONST_DOUBLE) { REAL_VALUE_TYPE r; long l[4]; REAL_VALUE_FROM_CONST_DOUBLE (r, operand); switch (mode) { case XFmode: REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l); parts[2] = gen_int_mode (l[2], SImode); break; case DFmode: REAL_VALUE_TO_TARGET_DOUBLE (r, l); break; default: abort (); } parts[1] = gen_int_mode (l[1], SImode); parts[0] = gen_int_mode (l[0], SImode); } else abort (); } } else { if (mode == TImode) split_ti (&operand, 1, &parts[0], &parts[1]); if (mode == XFmode || mode == TFmode) { enum machine_mode upper_mode = mode==XFmode ? SImode : DImode; if (REG_P (operand)) { if (!reload_completed) abort (); parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0); parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1); } else if (offsettable_memref_p (operand)) { operand = adjust_address (operand, DImode, 0); parts[0] = operand; parts[1] = adjust_address (operand, upper_mode, 8); } else if (GET_CODE (operand) == CONST_DOUBLE) { REAL_VALUE_TYPE r; long l[3]; REAL_VALUE_FROM_CONST_DOUBLE (r, operand); real_to_target (l, &r, mode); /* Do not use shift by 32 to avoid warning on 32bit systems. */ if (HOST_BITS_PER_WIDE_INT >= 64) parts[0] = gen_int_mode ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1)) + ((((HOST_WIDE_INT) l[1]) << 31) << 1), DImode); else parts[0] = immed_double_const (l[0], l[1], DImode); if (upper_mode == SImode) parts[1] = gen_int_mode (l[2], SImode); else if (HOST_BITS_PER_WIDE_INT >= 64) parts[1] = gen_int_mode ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1)) + ((((HOST_WIDE_INT) l[3]) << 31) << 1), DImode); else parts[1] = immed_double_const (l[2], l[3], DImode); } else abort (); } } return size; } /* Emit insns to perform a move or push of DI, DF, and XF values. Return false when normal moves are needed; true when all required insns have been emitted. Operands 2-4 contain the input values int the correct order; operands 5-7 contain the output values. */ void ix86_split_long_move (rtx operands[]) { rtx part[2][3]; int nparts; int push = 0; int collisions = 0; enum machine_mode mode = GET_MODE (operands[0]); /* The DFmode expanders may ask us to move double. For 64bit target this is single move. By hiding the fact here we simplify i386.md splitters. */ if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT) { /* Optimize constant pool reference to immediates. This is used by fp moves, that force all constants to memory to allow combining. */ if (GET_CODE (operands[1]) == MEM && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0))) operands[1] = get_pool_constant (XEXP (operands[1], 0)); if (push_operand (operands[0], VOIDmode)) { operands[0] = copy_rtx (operands[0]); PUT_MODE (operands[0], Pmode); } else operands[0] = gen_lowpart (DImode, operands[0]); operands[1] = gen_lowpart (DImode, operands[1]); emit_move_insn (operands[0], operands[1]); return; } /* The only non-offsettable memory we handle is push. */ if (push_operand (operands[0], VOIDmode)) push = 1; else if (GET_CODE (operands[0]) == MEM && ! offsettable_memref_p (operands[0])) abort (); nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0])); ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0])); /* When emitting push, take care for source operands on the stack. */ if (push && GET_CODE (operands[1]) == MEM && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1])) { if (nparts == 3) part[1][1] = change_address (part[1][1], GET_MODE (part[1][1]), XEXP (part[1][2], 0)); part[1][0] = change_address (part[1][0], GET_MODE (part[1][0]), XEXP (part[1][1], 0)); } /* We need to do copy in the right order in case an address register of the source overlaps the destination. */ if (REG_P (part[0][0]) && GET_CODE (part[1][0]) == MEM) { if (reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))) collisions++; if (reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0))) collisions++; if (nparts == 3 && reg_overlap_mentioned_p (part[0][2], XEXP (part[1][0], 0))) collisions++; /* Collision in the middle part can be handled by reordering. */ if (collisions == 1 && nparts == 3 && reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0))) { rtx tmp; tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp; tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp; } /* If there are more collisions, we can't handle it by reordering. Do an lea to the last part and use only one colliding move. */ else if (collisions > 1) { rtx base; collisions = 1; base = part[0][nparts - 1]; /* Handle the case when the last part isn't valid for lea. Happens in 64-bit mode storing the 12-byte XFmode. */ if (GET_MODE (base) != Pmode) base = gen_rtx_REG (Pmode, REGNO (base)); emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0))); part[1][0] = replace_equiv_address (part[1][0], base); part[1][1] = replace_equiv_address (part[1][1], plus_constant (base, UNITS_PER_WORD)); if (nparts == 3) part[1][2] = replace_equiv_address (part[1][2], plus_constant (base, 8)); } } if (push) { if (!TARGET_64BIT) { if (nparts == 3) { if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode) emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4))); emit_move_insn (part[0][2], part[1][2]); } } else { /* In 64bit mode we don't have 32bit push available. In case this is register, it is OK - we will just use larger counterpart. We also retype memory - these comes from attempt to avoid REX prefix on moving of second half of TFmode value. */ if (GET_MODE (part[1][1]) == SImode) { if (GET_CODE (part[1][1]) == MEM) part[1][1] = adjust_address (part[1][1], DImode, 0); else if (REG_P (part[1][1])) part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1])); else abort (); if (GET_MODE (part[1][0]) == SImode) part[1][0] = part[1][1]; } } emit_move_insn (part[0][1], part[1][1]); emit_move_insn (part[0][0], part[1][0]); return; } /* Choose correct order to not overwrite the source before it is copied. */ if ((REG_P (part[0][0]) && REG_P (part[1][1]) && (REGNO (part[0][0]) == REGNO (part[1][1]) || (nparts == 3 && REGNO (part[0][0]) == REGNO (part[1][2])))) || (collisions > 0 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0)))) { if (nparts == 3) { operands[2] = part[0][2]; operands[3] = part[0][1]; operands[4] = part[0][0]; operands[5] = part[1][2]; operands[6] = part[1][1]; operands[7] = part[1][0]; } else { operands[2] = part[0][1]; operands[3] = part[0][0]; operands[5] = part[1][1]; operands[6] = part[1][0]; } } else { if (nparts == 3) { operands[2] = part[0][0]; operands[3] = part[0][1]; operands[4] = part[0][2]; operands[5] = part[1][0]; operands[6] = part[1][1]; operands[7] = part[1][2]; } else { operands[2] = part[0][0]; operands[3] = part[0][1]; operands[5] = part[1][0]; operands[6] = part[1][1]; } } emit_move_insn (operands[2], operands[5]); emit_move_insn (operands[3], operands[6]); if (nparts == 3) emit_move_insn (operands[4], operands[7]); return; } void ix86_split_ashldi (rtx *operands, rtx scratch) { rtx low[2], high[2]; int count; if (GET_CODE (operands[2]) == CONST_INT) { split_di (operands, 2, low, high); count = INTVAL (operands[2]) & 63; if (count >= 32) { emit_move_insn (high[0], low[1]); emit_move_insn (low[0], const0_rtx); if (count > 32) emit_insn (gen_ashlsi3 (high[0], high[0], GEN_INT (count - 32))); } else { if (!rtx_equal_p (operands[0], operands[1])) emit_move_insn (operands[0], operands[1]); emit_insn (gen_x86_shld_1 (high[0], low[0], GEN_INT (count))); emit_insn (gen_ashlsi3 (low[0], low[0], GEN_INT (count))); } } else { if (!rtx_equal_p (operands[0], operands[1])) emit_move_insn (operands[0], operands[1]); split_di (operands, 1, low, high); emit_insn (gen_x86_shld_1 (high[0], low[0], operands[2])); emit_insn (gen_ashlsi3 (low[0], low[0], operands[2])); if (TARGET_CMOVE && (! no_new_pseudos || scratch)) { if (! no_new_pseudos) scratch = force_reg (SImode, const0_rtx); else emit_move_insn (scratch, const0_rtx); emit_insn (gen_x86_shift_adj_1 (high[0], low[0], operands[2], scratch)); } else emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2])); } } void ix86_split_ashrdi (rtx *operands, rtx scratch) { rtx low[2], high[2]; int count; if (GET_CODE (operands[2]) == CONST_INT) { split_di (operands, 2, low, high); count = INTVAL (operands[2]) & 63; if (count == 63) { emit_move_insn (high[0], high[1]); emit_insn (gen_ashrsi3 (high[0], high[0], GEN_INT (31))); emit_move_insn (low[0], high[0]); } else if (count >= 32) { emit_move_insn (low[0], high[1]); if (! reload_completed) emit_insn (gen_ashrsi3 (high[0], low[0], GEN_INT (31))); else { emit_move_insn (high[0], low[0]); emit_insn (gen_ashrsi3 (high[0], high[0], GEN_INT (31))); } if (count > 32) emit_insn (gen_ashrsi3 (low[0], low[0], GEN_INT (count - 32))); } else { if (!rtx_equal_p (operands[0], operands[1])) emit_move_insn (operands[0], operands[1]); emit_insn (gen_x86_shrd_1 (low[0], high[0], GEN_INT (count))); emit_insn (gen_ashrsi3 (high[0], high[0], GEN_INT (count))); } } else { if (!rtx_equal_p (operands[0], operands[1])) emit_move_insn (operands[0], operands[1]); split_di (operands, 1, low, high); emit_insn (gen_x86_shrd_1 (low[0], high[0], operands[2])); emit_insn (gen_ashrsi3 (high[0], high[0], operands[2])); if (TARGET_CMOVE && (! no_new_pseudos || scratch)) { if (! no_new_pseudos) scratch = gen_reg_rtx (SImode); emit_move_insn (scratch, high[0]); emit_insn (gen_ashrsi3 (scratch, scratch, GEN_INT (31))); emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2], scratch)); } else emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2])); } } void ix86_split_lshrdi (rtx *operands, rtx scratch) { rtx low[2], high[2]; int count; if (GET_CODE (operands[2]) == CONST_INT) { split_di (operands, 2, low, high); count = INTVAL (operands[2]) & 63; if (count >= 32) { emit_move_insn (low[0], high[1]); emit_move_insn (high[0], const0_rtx); if (count > 32) emit_insn (gen_lshrsi3 (low[0], low[0], GEN_INT (count - 32))); } else { if (!rtx_equal_p (operands[0], operands[1])) emit_move_insn (operands[0], operands[1]); emit_insn (gen_x86_shrd_1 (low[0], high[0], GEN_INT (count))); emit_insn (gen_lshrsi3 (high[0], high[0], GEN_INT (count))); } } else { if (!rtx_equal_p (operands[0], operands[1])) emit_move_insn (operands[0], operands[1]); split_di (operands, 1, low, high); emit_insn (gen_x86_shrd_1 (low[0], high[0], operands[2])); emit_insn (gen_lshrsi3 (high[0], high[0], operands[2])); /* Heh. By reversing the arguments, we can reuse this pattern. */ if (TARGET_CMOVE && (! no_new_pseudos || scratch)) { if (! no_new_pseudos) scratch = force_reg (SImode, const0_rtx); else emit_move_insn (scratch, const0_rtx); emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2], scratch)); } else emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2])); } } /* Helper function for the string operations below. Dest VARIABLE whether it is aligned to VALUE bytes. If true, jump to the label. */ static rtx ix86_expand_aligntest (rtx variable, int value) { rtx label = gen_label_rtx (); rtx tmpcount = gen_reg_rtx (GET_MODE (variable)); if (GET_MODE (variable) == DImode) emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value))); else emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value))); emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable), 1, label); return label; } /* Adjust COUNTER by the VALUE. */ static void ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value) { if (GET_MODE (countreg) == DImode) emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value))); else emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value))); } /* Zero extend possibly SImode EXP to Pmode register. */ rtx ix86_zero_extend_to_Pmode (rtx exp) { rtx r; if (GET_MODE (exp) == VOIDmode) return force_reg (Pmode, exp); if (GET_MODE (exp) == Pmode) return copy_to_mode_reg (Pmode, exp); r = gen_reg_rtx (Pmode); emit_insn (gen_zero_extendsidi2 (r, exp)); return r; } /* Expand string move (memcpy) operation. Use i386 string operations when profitable. expand_clrstr contains similar code. */ int ix86_expand_movstr (rtx dst, rtx src, rtx count_exp, rtx align_exp) { rtx srcreg, destreg, countreg, srcexp, destexp; enum machine_mode counter_mode; HOST_WIDE_INT align = 0; unsigned HOST_WIDE_INT count = 0; if (GET_CODE (align_exp) == CONST_INT) align = INTVAL (align_exp); /* Can't use any of this if the user has appropriated esi or edi. */ if (global_regs[4] || global_regs[5]) return 0; /* This simple hack avoids all inlining code and simplifies code below. */ if (!TARGET_ALIGN_STRINGOPS) align = 64; if (GET_CODE (count_exp) == CONST_INT) { count = INTVAL (count_exp); if (!TARGET_INLINE_ALL_STRINGOPS && count > 64) return 0; } /* Figure out proper mode for counter. For 32bits it is always SImode, for 64bits use SImode when possible, otherwise DImode. Set count to number of bytes copied when known at compile time. */ if (!TARGET_64BIT || GET_MODE (count_exp) == SImode || x86_64_zero_extended_value (count_exp)) counter_mode = SImode; else counter_mode = DImode; if (counter_mode != SImode && counter_mode != DImode) abort (); destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0)); if (destreg != XEXP (dst, 0)) dst = replace_equiv_address_nv (dst, destreg); srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0)); if (srcreg != XEXP (src, 0)) src = replace_equiv_address_nv (src, srcreg); /* When optimizing for size emit simple rep ; movsb instruction for counts not divisible by 4. */ if ((!optimize || optimize_size) && (count == 0 || (count & 0x03))) { emit_insn (gen_cld ()); countreg = ix86_zero_extend_to_Pmode (count_exp); destexp = gen_rtx_PLUS (Pmode, destreg, countreg); srcexp = gen_rtx_PLUS (Pmode, srcreg, countreg); emit_insn (gen_rep_mov (destreg, dst, srcreg, src, countreg, destexp, srcexp)); } /* For constant aligned (or small unaligned) copies use rep movsl followed by code copying the rest. For PentiumPro ensure 8 byte alignment to allow rep movsl acceleration. */ else if (count != 0 && (align >= 8 || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4) || optimize_size || count < (unsigned int) 64)) { unsigned HOST_WIDE_INT offset = 0; int size = TARGET_64BIT && !optimize_size ? 8 : 4; rtx srcmem, dstmem; emit_insn (gen_cld ()); if (count & ~(size - 1)) { countreg = copy_to_mode_reg (counter_mode, GEN_INT ((count >> (size == 4 ? 2 : 3)) & (TARGET_64BIT ? -1 : 0x3fffffff))); countreg = ix86_zero_extend_to_Pmode (countreg); destexp = gen_rtx_ASHIFT (Pmode, countreg, GEN_INT (size == 4 ? 2 : 3)); srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg); destexp = gen_rtx_PLUS (Pmode, destexp, destreg); emit_insn (gen_rep_mov (destreg, dst, srcreg, src, countreg, destexp, srcexp)); offset = count & ~(size - 1); } if (size == 8 && (count & 0x04)) { srcmem = adjust_automodify_address_nv (src, SImode, srcreg, offset); dstmem = adjust_automodify_address_nv (dst, SImode, destreg, offset); emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem)); offset += 4; } if (count & 0x02) { srcmem = adjust_automodify_address_nv (src, HImode, srcreg, offset); dstmem = adjust_automodify_address_nv (dst, HImode, destreg, offset); emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem)); offset += 2; } if (count & 0x01) { srcmem = adjust_automodify_address_nv (src, QImode, srcreg, offset); dstmem = adjust_automodify_address_nv (dst, QImode, destreg, offset); emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem)); } } /* The generic code based on the glibc implementation: - align destination to 4 bytes (8 byte alignment is used for PentiumPro allowing accelerated copying there) - copy the data using rep movsl - copy the rest. */ else { rtx countreg2; rtx label = NULL; rtx srcmem, dstmem; int desired_alignment = (TARGET_PENTIUMPRO && (count == 0 || count >= (unsigned int) 260) ? 8 : UNITS_PER_WORD); /* Get rid of MEM_OFFSETs, they won't be accurate. */ dst = change_address (dst, BLKmode, destreg); src = change_address (src, BLKmode, srcreg); /* In case we don't know anything about the alignment, default to library version, since it is usually equally fast and result in shorter code. Also emit call when we know that the count is large and call overhead will not be important. */ if (!TARGET_INLINE_ALL_STRINGOPS && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL)) return 0; if (TARGET_SINGLE_STRINGOP) emit_insn (gen_cld ()); countreg2 = gen_reg_rtx (Pmode); countreg = copy_to_mode_reg (counter_mode, count_exp); /* We don't use loops to align destination and to copy parts smaller than 4 bytes, because gcc is able to optimize such code better (in the case the destination or the count really is aligned, gcc is often able to predict the branches) and also it is friendlier to the hardware branch prediction. Using loops is beneficial for generic case, because we can handle small counts using the loops. Many CPUs (such as Athlon) have large REP prefix setup costs. This is quite costly. Maybe we can revisit this decision later or add some customizability to this code. */ if (count == 0 && align < desired_alignment) { label = gen_label_rtx (); emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1), LEU, 0, counter_mode, 1, label); } if (align <= 1) { rtx label = ix86_expand_aligntest (destreg, 1); srcmem = change_address (src, QImode, srcreg); dstmem = change_address (dst, QImode, destreg); emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem)); ix86_adjust_counter (countreg, 1); emit_label (label); LABEL_NUSES (label) = 1; } if (align <= 2) { rtx label = ix86_expand_aligntest (destreg, 2); srcmem = change_address (src, HImode, srcreg); dstmem = change_address (dst, HImode, destreg); emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem)); ix86_adjust_counter (countreg, 2); emit_label (label); LABEL_NUSES (label) = 1; } if (align <= 4 && desired_alignment > 4) { rtx label = ix86_expand_aligntest (destreg, 4); srcmem = change_address (src, SImode, srcreg); dstmem = change_address (dst, SImode, destreg); emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem)); ix86_adjust_counter (countreg, 4); emit_label (label); LABEL_NUSES (label) = 1; } if (label && desired_alignment > 4 && !TARGET_64BIT) { emit_label (label); LABEL_NUSES (label) = 1; label = NULL_RTX; } if (!TARGET_SINGLE_STRINGOP) emit_insn (gen_cld ()); if (TARGET_64BIT) { emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg), GEN_INT (3))); destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3)); } else { emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx)); destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx); } srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg); destexp = gen_rtx_PLUS (Pmode, destexp, destreg); emit_insn (gen_rep_mov (destreg, dst, srcreg, src, countreg2, destexp, srcexp)); if (label) { emit_label (label); LABEL_NUSES (label) = 1; } if (TARGET_64BIT && align > 4 && count != 0 && (count & 4)) { srcmem = change_address (src, SImode, srcreg); dstmem = change_address (dst, SImode, destreg); emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem)); } if ((align <= 4 || count == 0) && TARGET_64BIT) { rtx label = ix86_expand_aligntest (countreg, 4); srcmem = change_address (src, SImode, srcreg); dstmem = change_address (dst, SImode, destreg); emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem)); emit_label (label); LABEL_NUSES (label) = 1; } if (align > 2 && count != 0 && (count & 2)) { srcmem = change_address (src, HImode, srcreg); dstmem = change_address (dst, HImode, destreg); emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem)); } if (align <= 2 || count == 0) { rtx label = ix86_expand_aligntest (countreg, 2); srcmem = change_address (src, HImode, srcreg); dstmem = change_address (dst, HImode, destreg); emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem)); emit_label (label); LABEL_NUSES (label) = 1; } if (align > 1 && count != 0 && (count & 1)) { srcmem = change_address (src, QImode, srcreg); dstmem = change_address (dst, QImode, destreg); emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem)); } if (align <= 1 || count == 0) { rtx label = ix86_expand_aligntest (countreg, 1); srcmem = change_address (src, QImode, srcreg); dstmem = change_address (dst, QImode, destreg); emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem)); emit_label (label); LABEL_NUSES (label) = 1; } } return 1; } /* Expand string clear operation (bzero). Use i386 string operations when profitable. expand_movstr contains similar code. */ int ix86_expand_clrstr (rtx dst, rtx count_exp, rtx align_exp) { rtx destreg, zeroreg, countreg, destexp; enum machine_mode counter_mode; HOST_WIDE_INT align = 0; unsigned HOST_WIDE_INT count = 0; if (GET_CODE (align_exp) == CONST_INT) align = INTVAL (align_exp); /* Can't use any of this if the user has appropriated esi. */ if (global_regs[4]) return 0; /* This simple hack avoids all inlining code and simplifies code below. */ if (!TARGET_ALIGN_STRINGOPS) align = 32; if (GET_CODE (count_exp) == CONST_INT) { count = INTVAL (count_exp); if (!TARGET_INLINE_ALL_STRINGOPS && count > 64) return 0; } /* Figure out proper mode for counter. For 32bits it is always SImode, for 64bits use SImode when possible, otherwise DImode. Set count to number of bytes copied when known at compile time. */ if (!TARGET_64BIT || GET_MODE (count_exp) == SImode || x86_64_zero_extended_value (count_exp)) counter_mode = SImode; else counter_mode = DImode; destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0)); if (destreg != XEXP (dst, 0)) dst = replace_equiv_address_nv (dst, destreg); emit_insn (gen_cld ()); /* When optimizing for size emit simple rep ; movsb instruction for counts not divisible by 4. */ if ((!optimize || optimize_size) && (count == 0 || (count & 0x03))) { countreg = ix86_zero_extend_to_Pmode (count_exp); zeroreg = copy_to_mode_reg (QImode, const0_rtx); destexp = gen_rtx_PLUS (Pmode, destreg, countreg); emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg, destexp)); } else if (count != 0 && (align >= 8 || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4) || optimize_size || count < (unsigned int) 64)) { int size = TARGET_64BIT && !optimize_size ? 8 : 4; unsigned HOST_WIDE_INT offset = 0; zeroreg = copy_to_mode_reg (size == 4 ? SImode : DImode, const0_rtx); if (count & ~(size - 1)) { countreg = copy_to_mode_reg (counter_mode, GEN_INT ((count >> (size == 4 ? 2 : 3)) & (TARGET_64BIT ? -1 : 0x3fffffff))); countreg = ix86_zero_extend_to_Pmode (countreg); destexp = gen_rtx_ASHIFT (Pmode, countreg, GEN_INT (size == 4 ? 2 : 3)); destexp = gen_rtx_PLUS (Pmode, destexp, destreg); emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg, destexp)); offset = count & ~(size - 1); } if (size == 8 && (count & 0x04)) { rtx mem = adjust_automodify_address_nv (dst, SImode, destreg, offset); emit_insn (gen_strset (destreg, mem, gen_rtx_SUBREG (SImode, zeroreg, 0))); offset += 4; } if (count & 0x02) { rtx mem = adjust_automodify_address_nv (dst, HImode, destreg, offset); emit_insn (gen_strset (destreg, mem, gen_rtx_SUBREG (HImode, zeroreg, 0))); offset += 2; } if (count & 0x01) { rtx mem = adjust_automodify_address_nv (dst, QImode, destreg, offset); emit_insn (gen_strset (destreg, mem, gen_rtx_SUBREG (QImode, zeroreg, 0))); } } else { rtx countreg2; rtx label = NULL; /* Compute desired alignment of the string operation. */ int desired_alignment = (TARGET_PENTIUMPRO && (count == 0 || count >= (unsigned int) 260) ? 8 : UNITS_PER_WORD); /* In case we don't know anything about the alignment, default to library version, since it is usually equally fast and result in shorter code. Also emit call when we know that the count is large and call overhead will not be important. */ if (!TARGET_INLINE_ALL_STRINGOPS && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL)) return 0; if (TARGET_SINGLE_STRINGOP) emit_insn (gen_cld ()); countreg2 = gen_reg_rtx (Pmode); countreg = copy_to_mode_reg (counter_mode, count_exp); zeroreg = copy_to_mode_reg (Pmode, const0_rtx); /* Get rid of MEM_OFFSET, it won't be accurate. */ dst = change_address (dst, BLKmode, destreg); if (count == 0 && align < desired_alignment) { label = gen_label_rtx (); emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1), LEU, 0, counter_mode, 1, label); } if (align <= 1) { rtx label = ix86_expand_aligntest (destreg, 1); emit_insn (gen_strset (destreg, dst, gen_rtx_SUBREG (QImode, zeroreg, 0))); ix86_adjust_counter (countreg, 1); emit_label (label); LABEL_NUSES (label) = 1; } if (align <= 2) { rtx label = ix86_expand_aligntest (destreg, 2); emit_insn (gen_strset (destreg, dst, gen_rtx_SUBREG (HImode, zeroreg, 0))); ix86_adjust_counter (countreg, 2); emit_label (label); LABEL_NUSES (label) = 1; } if (align <= 4 && desired_alignment > 4) { rtx label = ix86_expand_aligntest (destreg, 4); emit_insn (gen_strset (destreg, dst, (TARGET_64BIT ? gen_rtx_SUBREG (SImode, zeroreg, 0) : zeroreg))); ix86_adjust_counter (countreg, 4); emit_label (label); LABEL_NUSES (label) = 1; } if (label && desired_alignment > 4 && !TARGET_64BIT) { emit_label (label); LABEL_NUSES (label) = 1; label = NULL_RTX; } if (!TARGET_SINGLE_STRINGOP) emit_insn (gen_cld ()); if (TARGET_64BIT) { emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg), GEN_INT (3))); destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3)); } else { emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx)); destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx); } destexp = gen_rtx_PLUS (Pmode, destexp, destreg); emit_insn (gen_rep_stos (destreg, countreg2, dst, zeroreg, destexp)); if (label) { emit_label (label); LABEL_NUSES (label) = 1; } if (TARGET_64BIT && align > 4 && count != 0 && (count & 4)) emit_insn (gen_strset (destreg, dst, gen_rtx_SUBREG (SImode, zeroreg, 0))); if (TARGET_64BIT && (align <= 4 || count == 0)) { rtx label = ix86_expand_aligntest (countreg, 4); emit_insn (gen_strset (destreg, dst, gen_rtx_SUBREG (SImode, zeroreg, 0))); emit_label (label); LABEL_NUSES (label) = 1; } if (align > 2 && count != 0 && (count & 2)) emit_insn (gen_strset (destreg, dst, gen_rtx_SUBREG (HImode, zeroreg, 0))); if (align <= 2 || count == 0) { rtx label = ix86_expand_aligntest (countreg, 2); emit_insn (gen_strset (destreg, dst, gen_rtx_SUBREG (HImode, zeroreg, 0))); emit_label (label); LABEL_NUSES (label) = 1; } if (align > 1 && count != 0 && (count & 1)) emit_insn (gen_strset (destreg, dst, gen_rtx_SUBREG (QImode, zeroreg, 0))); if (align <= 1 || count == 0) { rtx label = ix86_expand_aligntest (countreg, 1); emit_insn (gen_strset (destreg, dst, gen_rtx_SUBREG (QImode, zeroreg, 0))); emit_label (label); LABEL_NUSES (label) = 1; } } return 1; } /* Expand strlen. */ int ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align) { rtx addr, scratch1, scratch2, scratch3, scratch4; /* The generic case of strlen expander is long. Avoid it's expanding unless TARGET_INLINE_ALL_STRINGOPS. */ if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1 && !TARGET_INLINE_ALL_STRINGOPS && !optimize_size && (GET_CODE (align) != CONST_INT || INTVAL (align) < 4)) return 0; addr = force_reg (Pmode, XEXP (src, 0)); scratch1 = gen_reg_rtx (Pmode); if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1 && !optimize_size) { /* Well it seems that some optimizer does not combine a call like foo(strlen(bar), strlen(bar)); when the move and the subtraction is done here. It does calculate the length just once when these instructions are done inside of output_strlen_unroll(). But I think since &bar[strlen(bar)] is often used and I use one fewer register for the lifetime of output_strlen_unroll() this is better. */ emit_move_insn (out, addr); ix86_expand_strlensi_unroll_1 (out, src, align); /* strlensi_unroll_1 returns the address of the zero at the end of the string, like memchr(), so compute the length by subtracting the start address. */ if (TARGET_64BIT) emit_insn (gen_subdi3 (out, out, addr)); else emit_insn (gen_subsi3 (out, out, addr)); } else { rtx unspec; scratch2 = gen_reg_rtx (Pmode); scratch3 = gen_reg_rtx (Pmode); scratch4 = force_reg (Pmode, constm1_rtx); emit_move_insn (scratch3, addr); eoschar = force_reg (QImode, eoschar); emit_insn (gen_cld ()); src = replace_equiv_address_nv (src, scratch3); /* If .md starts supporting :P, this can be done in .md. */ unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align, scratch4), UNSPEC_SCAS); emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec)); if (TARGET_64BIT) { emit_insn (gen_one_cmpldi2 (scratch2, scratch1)); emit_insn (gen_adddi3 (out, scratch2, constm1_rtx)); } else { emit_insn (gen_one_cmplsi2 (scratch2, scratch1)); emit_insn (gen_addsi3 (out, scratch2, constm1_rtx)); } } return 1; } /* Expand the appropriate insns for doing strlen if not just doing repnz; scasb out = result, initialized with the start address align_rtx = alignment of the address. scratch = scratch register, initialized with the startaddress when not aligned, otherwise undefined This is just the body. It needs the initializations mentioned above and some address computing at the end. These things are done in i386.md. */ static void ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx) { int align; rtx tmp; rtx align_2_label = NULL_RTX; rtx align_3_label = NULL_RTX; rtx align_4_label = gen_label_rtx (); rtx end_0_label = gen_label_rtx (); rtx mem; rtx tmpreg = gen_reg_rtx (SImode); rtx scratch = gen_reg_rtx (SImode); rtx cmp; align = 0; if (GET_CODE (align_rtx) == CONST_INT) align = INTVAL (align_rtx); /* Loop to check 1..3 bytes for null to get an aligned pointer. */ /* Is there a known alignment and is it less than 4? */ if (align < 4) { rtx scratch1 = gen_reg_rtx (Pmode); emit_move_insn (scratch1, out); /* Is there a known alignment and is it not 2? */ if (align != 2) { align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */ align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */ /* Leave just the 3 lower bits. */ align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3), NULL_RTX, 0, OPTAB_WIDEN); emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL, Pmode, 1, align_4_label); emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL, Pmode, 1, align_2_label); emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL, Pmode, 1, align_3_label); } else { /* Since the alignment is 2, we have to check 2 or 0 bytes; check if is aligned to 4 - byte. */ align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx, NULL_RTX, 0, OPTAB_WIDEN); emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL, Pmode, 1, align_4_label); } mem = change_address (src, QImode, out); /* Now compare the bytes. */ /* Compare the first n unaligned byte on a byte per byte basis. */ emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1, end_0_label); /* Increment the address. */ if (TARGET_64BIT) emit_insn (gen_adddi3 (out, out, const1_rtx)); else emit_insn (gen_addsi3 (out, out, const1_rtx)); /* Not needed with an alignment of 2 */ if (align != 2) { emit_label (align_2_label); emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1, end_0_label); if (TARGET_64BIT) emit_insn (gen_adddi3 (out, out, const1_rtx)); else emit_insn (gen_addsi3 (out, out, const1_rtx)); emit_label (align_3_label); } emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1, end_0_label); if (TARGET_64BIT) emit_insn (gen_adddi3 (out, out, const1_rtx)); else emit_insn (gen_addsi3 (out, out, const1_rtx)); } /* Generate loop to check 4 bytes at a time. It is not a good idea to align this loop. It gives only huge programs, but does not help to speed up. */ emit_label (align_4_label); mem = change_address (src, SImode, out); emit_move_insn (scratch, mem); if (TARGET_64BIT) emit_insn (gen_adddi3 (out, out, GEN_INT (4))); else emit_insn (gen_addsi3 (out, out, GEN_INT (4))); /* This formula yields a nonzero result iff one of the bytes is zero. This saves three branches inside loop and many cycles. */ emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101))); emit_insn (gen_one_cmplsi2 (scratch, scratch)); emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch)); emit_insn (gen_andsi3 (tmpreg, tmpreg, gen_int_mode (0x80808080, SImode))); emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1, align_4_label); if (TARGET_CMOVE) { rtx reg = gen_reg_rtx (SImode); rtx reg2 = gen_reg_rtx (Pmode); emit_move_insn (reg, tmpreg); emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16))); /* If zero is not in the first two bytes, move two bytes forward. */ emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080))); tmp = gen_rtx_REG (CCNOmode, FLAGS_REG); tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx); emit_insn (gen_rtx_SET (VOIDmode, tmpreg, gen_rtx_IF_THEN_ELSE (SImode, tmp, reg, tmpreg))); /* Emit lea manually to avoid clobbering of flags. */ emit_insn (gen_rtx_SET (SImode, reg2, gen_rtx_PLUS (Pmode, out, const2_rtx))); tmp = gen_rtx_REG (CCNOmode, FLAGS_REG); tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx); emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_IF_THEN_ELSE (Pmode, tmp, reg2, out))); } else { rtx end_2_label = gen_label_rtx (); /* Is zero in the first two bytes? */ emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080))); tmp = gen_rtx_REG (CCNOmode, FLAGS_REG); tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx); tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp, gen_rtx_LABEL_REF (VOIDmode, end_2_label), pc_rtx); tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp)); JUMP_LABEL (tmp) = end_2_label; /* Not in the first two. Move two bytes forward. */ emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16))); if (TARGET_64BIT) emit_insn (gen_adddi3 (out, out, const2_rtx)); else emit_insn (gen_addsi3 (out, out, const2_rtx)); emit_label (end_2_label); } /* Avoid branch in fixing the byte. */ tmpreg = gen_lowpart (QImode, tmpreg); emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg)); cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, 17), const0_rtx); if (TARGET_64BIT) emit_insn (gen_subdi3_carry_rex64 (out, out, GEN_INT (3), cmp)); else emit_insn (gen_subsi3_carry (out, out, GEN_INT (3), cmp)); emit_label (end_0_label); } void ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1, rtx callarg2 ATTRIBUTE_UNUSED, rtx pop, int sibcall) { rtx use = NULL, call; if (pop == const0_rtx) pop = NULL; if (TARGET_64BIT && pop) abort (); #if TARGET_MACHO if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF) fnaddr = machopic_indirect_call_target (fnaddr); #else /* Static functions and indirect calls don't need the pic register. */ if (! TARGET_64BIT && flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0))) use_reg (&use, pic_offset_table_rtx); if (TARGET_64BIT && INTVAL (callarg2) >= 0) { rtx al = gen_rtx_REG (QImode, 0); emit_move_insn (al, callarg2); use_reg (&use, al); } #endif /* TARGET_MACHO */ if (! call_insn_operand (XEXP (fnaddr, 0), Pmode)) { fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0)); fnaddr = gen_rtx_MEM (QImode, fnaddr); } if (sibcall && TARGET_64BIT && !constant_call_address_operand (XEXP (fnaddr, 0), Pmode)) { rtx addr; addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0)); fnaddr = gen_rtx_REG (Pmode, FIRST_REX_INT_REG + 3 /* R11 */); emit_move_insn (fnaddr, addr); fnaddr = gen_rtx_MEM (QImode, fnaddr); } call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1); if (retval) call = gen_rtx_SET (VOIDmode, retval, call); if (pop) { pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop); pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop); call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop)); } call = emit_call_insn (call); if (use) CALL_INSN_FUNCTION_USAGE (call) = use; } /* Clear stack slot assignments remembered from previous functions. This is called from INIT_EXPANDERS once before RTL is emitted for each function. */ static struct machine_function * ix86_init_machine_status (void) { struct machine_function *f; f = ggc_alloc_cleared (sizeof (struct machine_function)); f->use_fast_prologue_epilogue_nregs = -1; return f; } /* Return a MEM corresponding to a stack slot with mode MODE. Allocate a new slot if necessary. The RTL for a function can have several slots available: N is which slot to use. */ rtx assign_386_stack_local (enum machine_mode mode, int n) { struct stack_local_entry *s; if (n < 0 || n >= MAX_386_STACK_LOCALS) abort (); for (s = ix86_stack_locals; s; s = s->next) if (s->mode == mode && s->n == n) return s->rtl; s = (struct stack_local_entry *) ggc_alloc (sizeof (struct stack_local_entry)); s->n = n; s->mode = mode; s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0); s->next = ix86_stack_locals; ix86_stack_locals = s; return s->rtl; } /* Construct the SYMBOL_REF for the tls_get_addr function. */ static GTY(()) rtx ix86_tls_symbol; rtx ix86_tls_get_addr (void) { if (!ix86_tls_symbol) { ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, (TARGET_GNU_TLS && !TARGET_64BIT) ? "___tls_get_addr" : "__tls_get_addr"); } return ix86_tls_symbol; } /* Calculate the length of the memory address in the instruction encoding. Does not include the one-byte modrm, opcode, or prefix. */ static int memory_address_length (rtx addr) { struct ix86_address parts; rtx base, index, disp; int len; if (GET_CODE (addr) == PRE_DEC || GET_CODE (addr) == POST_INC || GET_CODE (addr) == PRE_MODIFY || GET_CODE (addr) == POST_MODIFY) return 0; if (! ix86_decompose_address (addr, &parts)) abort (); base = parts.base; index = parts.index; disp = parts.disp; len = 0; /* Rule of thumb: - esp as the base always wants an index, - ebp as the base always wants a displacement. */ /* Register Indirect. */ if (base && !index && !disp) { /* esp (for its index) and ebp (for its displacement) need the two-byte modrm form. */ if (addr == stack_pointer_rtx || addr == arg_pointer_rtx || addr == frame_pointer_rtx || addr == hard_frame_pointer_rtx) len = 1; } /* Direct Addressing. */ else if (disp && !base && !index) len = 4; else { /* Find the length of the displacement constant. */ if (disp) { if (GET_CODE (disp) == CONST_INT && CONST_OK_FOR_LETTER_P (INTVAL (disp), 'K') && base) len = 1; else len = 4; } /* ebp always wants a displacement. */ else if (base == hard_frame_pointer_rtx) len = 1; /* An index requires the two-byte modrm form.... */ if (index /* ...like esp, which always wants an index. */ || base == stack_pointer_rtx || base == arg_pointer_rtx || base == frame_pointer_rtx) len += 1; } return len; } /* Compute default value for "length_immediate" attribute. When SHORTFORM is set, expect that insn have 8bit immediate alternative. */ int ix86_attr_length_immediate_default (rtx insn, int shortform) { int len = 0; int i; extract_insn_cached (insn); for (i = recog_data.n_operands - 1; i >= 0; --i) if (CONSTANT_P (recog_data.operand[i])) { if (len) abort (); if (shortform && GET_CODE (recog_data.operand[i]) == CONST_INT && CONST_OK_FOR_LETTER_P (INTVAL (recog_data.operand[i]), 'K')) len = 1; else { switch (get_attr_mode (insn)) { case MODE_QI: len+=1; break; case MODE_HI: len+=2; break; case MODE_SI: len+=4; break; /* Immediates for DImode instructions are encoded as 32bit sign extended values. */ case MODE_DI: len+=4; break; default: fatal_insn ("unknown insn mode", insn); } } } return len; } /* Compute default value for "length_address" attribute. */ int ix86_attr_length_address_default (rtx insn) { int i; if (get_attr_type (insn) == TYPE_LEA) { rtx set = PATTERN (insn); if (GET_CODE (set) == SET) ; else if (GET_CODE (set) == PARALLEL && GET_CODE (XVECEXP (set, 0, 0)) == SET) set = XVECEXP (set, 0, 0); else { #ifdef ENABLE_CHECKING abort (); #endif return 0; } return memory_address_length (SET_SRC (set)); } extract_insn_cached (insn); for (i = recog_data.n_operands - 1; i >= 0; --i) if (GET_CODE (recog_data.operand[i]) == MEM) { return memory_address_length (XEXP (recog_data.operand[i], 0)); break; } return 0; } /* Return the maximum number of instructions a cpu can issue. */ static int ix86_issue_rate (void) { switch (ix86_tune) { case PROCESSOR_PENTIUM: case PROCESSOR_K6: return 2; case PROCESSOR_PENTIUMPRO: case PROCESSOR_PENTIUM4: case PROCESSOR_ATHLON: case PROCESSOR_K8: case PROCESSOR_NOCONA: return 3; default: return 1; } } /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set by DEP_INSN and nothing set by DEP_INSN. */ static int ix86_flags_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type) { rtx set, set2; /* Simplify the test for uninteresting insns. */ if (insn_type != TYPE_SETCC && insn_type != TYPE_ICMOV && insn_type != TYPE_FCMOV && insn_type != TYPE_IBR) return 0; if ((set = single_set (dep_insn)) != 0) { set = SET_DEST (set); set2 = NULL_RTX; } else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL && XVECLEN (PATTERN (dep_insn), 0) == 2 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET) { set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0)); set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0)); } else return 0; if (GET_CODE (set) != REG || REGNO (set) != FLAGS_REG) return 0; /* This test is true if the dependent insn reads the flags but not any other potentially set register. */ if (!reg_overlap_mentioned_p (set, PATTERN (insn))) return 0; if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn))) return 0; return 1; } /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory address with operands set by DEP_INSN. */ static int ix86_agi_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type) { rtx addr; if (insn_type == TYPE_LEA && TARGET_PENTIUM) { addr = PATTERN (insn); if (GET_CODE (addr) == SET) ; else if (GET_CODE (addr) == PARALLEL && GET_CODE (XVECEXP (addr, 0, 0)) == SET) addr = XVECEXP (addr, 0, 0); else abort (); addr = SET_SRC (addr); } else { int i; extract_insn_cached (insn); for (i = recog_data.n_operands - 1; i >= 0; --i) if (GET_CODE (recog_data.operand[i]) == MEM) { addr = XEXP (recog_data.operand[i], 0); goto found; } return 0; found:; } return modified_in_p (addr, dep_insn); } static int ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost) { enum attr_type insn_type, dep_insn_type; enum attr_memory memory; rtx set, set2; int dep_insn_code_number; /* Anti and output dependencies have zero cost on all CPUs. */ if (REG_NOTE_KIND (link) != 0) return 0; dep_insn_code_number = recog_memoized (dep_insn); /* If we can't recognize the insns, we can't really do anything. */ if (dep_insn_code_number < 0 || recog_memoized (insn) < 0) return cost; insn_type = get_attr_type (insn); dep_insn_type = get_attr_type (dep_insn); switch (ix86_tune) { case PROCESSOR_PENTIUM: /* Address Generation Interlock adds a cycle of latency. */ if (ix86_agi_dependant (insn, dep_insn, insn_type)) cost += 1; /* ??? Compares pair with jump/setcc. */ if (ix86_flags_dependant (insn, dep_insn, insn_type)) cost = 0; /* Floating point stores require value to be ready one cycle earlier. */ if (insn_type == TYPE_FMOV && get_attr_memory (insn) == MEMORY_STORE && !ix86_agi_dependant (insn, dep_insn, insn_type)) cost += 1; break; case PROCESSOR_PENTIUMPRO: memory = get_attr_memory (insn); /* INT->FP conversion is expensive. */ if (get_attr_fp_int_src (dep_insn)) cost += 5; /* There is one cycle extra latency between an FP op and a store. */ if (insn_type == TYPE_FMOV && (set = single_set (dep_insn)) != NULL_RTX && (set2 = single_set (insn)) != NULL_RTX && rtx_equal_p (SET_DEST (set), SET_SRC (set2)) && GET_CODE (SET_DEST (set2)) == MEM) cost += 1; /* Show ability of reorder buffer to hide latency of load by executing in parallel with previous instruction in case previous instruction is not needed to compute the address. */ if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH) && !ix86_agi_dependant (insn, dep_insn, insn_type)) { /* Claim moves to take one cycle, as core can issue one load at time and the next load can start cycle later. */ if (dep_insn_type == TYPE_IMOV || dep_insn_type == TYPE_FMOV) cost = 1; else if (cost > 1) cost--; } break; case PROCESSOR_K6: memory = get_attr_memory (insn); /* The esp dependency is resolved before the instruction is really finished. */ if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP) && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP)) return 1; /* INT->FP conversion is expensive. */ if (get_attr_fp_int_src (dep_insn)) cost += 5; /* Show ability of reorder buffer to hide latency of load by executing in parallel with previous instruction in case previous instruction is not needed to compute the address. */ if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH) && !ix86_agi_dependant (insn, dep_insn, insn_type)) { /* Claim moves to take one cycle, as core can issue one load at time and the next load can start cycle later. */ if (dep_insn_type == TYPE_IMOV || dep_insn_type == TYPE_FMOV) cost = 1; else if (cost > 2) cost -= 2; else cost = 1; } break; case PROCESSOR_ATHLON: case PROCESSOR_K8: memory = get_attr_memory (insn); /* Show ability of reorder buffer to hide latency of load by executing in parallel with previous instruction in case previous instruction is not needed to compute the address. */ if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH) && !ix86_agi_dependant (insn, dep_insn, insn_type)) { enum attr_unit unit = get_attr_unit (insn); int loadcost = 3; /* Because of the difference between the length of integer and floating unit pipeline preparation stages, the memory operands for floating point are cheaper. ??? For Athlon it the difference is most probably 2. */ if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN) loadcost = 3; else loadcost = TARGET_ATHLON ? 2 : 0; if (cost >= loadcost) cost -= loadcost; else cost = 0; } default: break; } return cost; } static int ia32_use_dfa_pipeline_interface (void) { if (TARGET_PENTIUM || TARGET_PENTIUMPRO || TARGET_K6 || TARGET_ATHLON_K8) return 1; return 0; } /* How many alternative schedules to try. This should be as wide as the scheduling freedom in the DFA, but no wider. Making this value too large results extra work for the scheduler. */ static int ia32_multipass_dfa_lookahead (void) { if (ix86_tune == PROCESSOR_PENTIUM) return 2; if (ix86_tune == PROCESSOR_PENTIUMPRO || ix86_tune == PROCESSOR_K6) return 1; else return 0; } /* Compute the alignment given to a constant that is being placed in memory. EXP is the constant and ALIGN is the alignment that the object would ordinarily have. The value of this function is used instead of that alignment to align the object. */ int ix86_constant_alignment (tree exp, int align) { if (TREE_CODE (exp) == REAL_CST) { if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64) return 64; else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128) return 128; } else if (!optimize_size && TREE_CODE (exp) == STRING_CST && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD) return BITS_PER_WORD; return align; } /* Compute the alignment for a static variable. TYPE is the data type, and ALIGN is the alignment that the object would ordinarily have. The value of this function is used instead of that alignment to align the object. */ int ix86_data_alignment (tree type, int align) { if (AGGREGATE_TYPE_P (type) && TYPE_SIZE (type) && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 256 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 256) return 256; /* x86-64 ABI requires arrays greater than 16 bytes to be aligned to 16byte boundary. */ if (TARGET_64BIT) { if (AGGREGATE_TYPE_P (type) && TYPE_SIZE (type) && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128) return 128; } if (TREE_CODE (type) == ARRAY_TYPE) { if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64) return 64; if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128) return 128; } else if (TREE_CODE (type) == COMPLEX_TYPE) { if (TYPE_MODE (type) == DCmode && align < 64) return 64; if (TYPE_MODE (type) == XCmode && align < 128) return 128; } else if ((TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == QUAL_UNION_TYPE) && TYPE_FIELDS (type)) { if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64) return 64; if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128) return 128; } else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE || TREE_CODE (type) == INTEGER_TYPE) { if (TYPE_MODE (type) == DFmode && align < 64) return 64; if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128) return 128; } return align; } /* Compute the alignment for a local variable. TYPE is the data type, and ALIGN is the alignment that the object would ordinarily have. The value of this macro is used instead of that alignment to align the object. */ int ix86_local_alignment (tree type, int align) { /* x86-64 ABI requires arrays greater than 16 bytes to be aligned to 16byte boundary. */ if (TARGET_64BIT) { if (AGGREGATE_TYPE_P (type) && TYPE_SIZE (type) && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128) return 128; } if (TREE_CODE (type) == ARRAY_TYPE) { if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64) return 64; if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128) return 128; } else if (TREE_CODE (type) == COMPLEX_TYPE) { if (TYPE_MODE (type) == DCmode && align < 64) return 64; if (TYPE_MODE (type) == XCmode && align < 128) return 128; } else if ((TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == QUAL_UNION_TYPE) && TYPE_FIELDS (type)) { if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64) return 64; if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128) return 128; } else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE || TREE_CODE (type) == INTEGER_TYPE) { if (TYPE_MODE (type) == DFmode && align < 64) return 64; if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128) return 128; } return align; } /* Emit RTL insns to initialize the variable parts of a trampoline. FNADDR is an RTX for the address of the function's pure code. CXT is an RTX for the static chain value for the function. */ void x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt) { if (!TARGET_64BIT) { /* Compute offset from the end of the jmp to the target function. */ rtx disp = expand_binop (SImode, sub_optab, fnaddr, plus_constant (tramp, 10), NULL_RTX, 1, OPTAB_DIRECT); emit_move_insn (gen_rtx_MEM (QImode, tramp), gen_int_mode (0xb9, QImode)); emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt); emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)), gen_int_mode (0xe9, QImode)); emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp); } else { int offset = 0; /* Try to load address using shorter movl instead of movabs. We may want to support movq for kernel mode, but kernel does not use trampolines at the moment. */ if (x86_64_zero_extended_value (fnaddr)) { fnaddr = copy_to_mode_reg (DImode, fnaddr); emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)), gen_int_mode (0xbb41, HImode)); emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)), gen_lowpart (SImode, fnaddr)); offset += 6; } else { emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)), gen_int_mode (0xbb49, HImode)); emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)), fnaddr); offset += 10; } /* Load static chain using movabs to r10. */ emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)), gen_int_mode (0xba49, HImode)); emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)), cxt); offset += 10; /* Jump to the r11 */ emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)), gen_int_mode (0xff49, HImode)); emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)), gen_int_mode (0xe3, QImode)); offset += 3; if (offset > TRAMPOLINE_SIZE) abort (); } #ifdef TRANSFER_FROM_TRAMPOLINE emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"), LCT_NORMAL, VOIDmode, 1, tramp, Pmode); #endif } #define def_builtin(MASK, NAME, TYPE, CODE) \ do { \ if ((MASK) & target_flags \ && (!((MASK) & MASK_64BIT) || TARGET_64BIT)) \ builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, \ NULL, NULL_TREE); \ } while (0) struct builtin_description { const unsigned int mask; const enum insn_code icode; const char *const name; const enum ix86_builtins code; const enum rtx_code comparison; const unsigned int flag; }; static const struct builtin_description bdesc_comi[] = { { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 }, { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 }, { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 }, { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 }, { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 }, { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 }, { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 }, { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 }, { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 }, { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 }, { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 }, { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 }, { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 }, { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 }, { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 }, { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 }, { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 }, { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 }, { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 }, { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 }, { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 }, { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 }, { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 }, { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 }, }; static const struct builtin_description bdesc_2arg[] = { /* SSE */ { MASK_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, 0, 0 }, { MASK_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, 0, 0 }, { MASK_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, 0, 0 }, { MASK_SSE, CODE_FOR_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, 0, 0 }, { MASK_SSE, CODE_FOR_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, 0, 0 }, { MASK_SSE, CODE_FOR_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, 0, 0 }, { MASK_SSE, CODE_FOR_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, 0, 0 }, { MASK_SSE, CODE_FOR_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, 0, 0 }, { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, 0 }, { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, 0 }, { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, 0 }, { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, 1 }, { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, 1 }, { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, 0 }, { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, EQ, 0 }, { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, LT, 0 }, { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, LE, 0 }, { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, LT, 1 }, { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, LE, 1 }, { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, UNORDERED, 0 }, { MASK_SSE, CODE_FOR_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, 0 }, { MASK_SSE, CODE_FOR_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, 0 }, { MASK_SSE, CODE_FOR_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, 0 }, { MASK_SSE, CODE_FOR_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, 0 }, { MASK_SSE, CODE_FOR_vmmaskncmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, EQ, 0 }, { MASK_SSE, CODE_FOR_vmmaskncmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, LT, 0 }, { MASK_SSE, CODE_FOR_vmmaskncmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, LE, 0 }, { MASK_SSE, CODE_FOR_vmmaskncmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, UNORDERED, 0 }, { MASK_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, 0, 0 }, { MASK_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, 0, 0 }, { MASK_SSE, CODE_FOR_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, 0, 0 }, { MASK_SSE, CODE_FOR_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, 0, 0 }, { MASK_SSE, CODE_FOR_sse_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, 0, 0 }, { MASK_SSE, CODE_FOR_sse_nandv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, 0, 0 }, { MASK_SSE, CODE_FOR_sse_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, 0, 0 }, { MASK_SSE, CODE_FOR_sse_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, 0, 0 }, { MASK_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, 0, 0 }, { MASK_SSE, CODE_FOR_sse_movhlps, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, 0, 0 }, { MASK_SSE, CODE_FOR_sse_movlhps, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, 0, 0 }, { MASK_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, 0, 0 }, { MASK_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, 0, 0 }, /* MMX */ { MASK_MMX, CODE_FOR_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, 0, 0 }, { MASK_MMX, CODE_FOR_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, 0, 0 }, { MASK_MMX, CODE_FOR_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, 0, 0 }, { MASK_MMX, CODE_FOR_mmx_adddi3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, 0, 0 }, { MASK_MMX, CODE_FOR_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, 0, 0 }, { MASK_MMX, CODE_FOR_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, 0, 0 }, { MASK_MMX, CODE_FOR_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, 0, 0 }, { MASK_MMX, CODE_FOR_mmx_subdi3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, 0, 0 }, { MASK_MMX, CODE_FOR_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, 0, 0 }, { MASK_MMX, CODE_FOR_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, 0, 0 }, { MASK_MMX, CODE_FOR_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, 0, 0 }, { MASK_MMX, CODE_FOR_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, 0, 0 }, { MASK_MMX, CODE_FOR_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, 0, 0 }, { MASK_MMX, CODE_FOR_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, 0, 0 }, { MASK_MMX, CODE_FOR_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, 0, 0 }, { MASK_MMX, CODE_FOR_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, 0, 0 }, { MASK_MMX, CODE_FOR_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, 0, 0 }, { MASK_MMX, CODE_FOR_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, 0, 0 }, { MASK_SSE | MASK_3DNOW_A, CODE_FOR_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, 0, 0 }, { MASK_MMX, CODE_FOR_mmx_anddi3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, 0, 0 }, { MASK_MMX, CODE_FOR_mmx_nanddi3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, 0, 0 }, { MASK_MMX, CODE_FOR_mmx_iordi3, "__builtin_ia32_por", IX86_BUILTIN_POR, 0, 0 }, { MASK_MMX, CODE_FOR_mmx_xordi3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, 0, 0 }, { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, 0, 0 }, { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, 0, 0 }, { MASK_MMX, CODE_FOR_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, 0, 0 }, { MASK_MMX, CODE_FOR_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, 0, 0 }, { MASK_MMX, CODE_FOR_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, 0, 0 }, { MASK_MMX, CODE_FOR_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, 0, 0 }, { MASK_MMX, CODE_FOR_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, 0, 0 }, { MASK_MMX, CODE_FOR_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, 0, 0 }, { MASK_SSE | MASK_3DNOW_A, CODE_FOR_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, 0, 0 }, { MASK_SSE | MASK_3DNOW_A, CODE_FOR_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, 0, 0 }, { MASK_SSE | MASK_3DNOW_A, CODE_FOR_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, 0, 0 }, { MASK_SSE | MASK_3DNOW_A, CODE_FOR_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, 0, 0 }, { MASK_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, 0, 0 }, { MASK_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, 0, 0 }, { MASK_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, 0, 0 }, { MASK_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, 0, 0 }, { MASK_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, 0, 0 }, { MASK_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, 0, 0 }, /* Special. */ { MASK_MMX, CODE_FOR_mmx_packsswb, 0, IX86_BUILTIN_PACKSSWB, 0, 0 }, { MASK_MMX, CODE_FOR_mmx_packssdw, 0, IX86_BUILTIN_PACKSSDW, 0, 0 }, { MASK_MMX, CODE_FOR_mmx_packuswb, 0, IX86_BUILTIN_PACKUSWB, 0, 0 }, { MASK_SSE, CODE_FOR_cvtpi2ps, 0, IX86_BUILTIN_CVTPI2PS, 0, 0 }, { MASK_SSE, CODE_FOR_cvtsi2ss, 0, IX86_BUILTIN_CVTSI2SS, 0, 0 }, { MASK_SSE | MASK_64BIT, CODE_FOR_cvtsi2ssq, 0, IX86_BUILTIN_CVTSI642SS, 0, 0 }, { MASK_MMX, CODE_FOR_ashlv4hi3, 0, IX86_BUILTIN_PSLLW, 0, 0 }, { MASK_MMX, CODE_FOR_ashlv4hi3, 0, IX86_BUILTIN_PSLLWI, 0, 0 }, { MASK_MMX, CODE_FOR_ashlv2si3, 0, IX86_BUILTIN_PSLLD, 0, 0 }, { MASK_MMX, CODE_FOR_ashlv2si3, 0, IX86_BUILTIN_PSLLDI, 0, 0 }, { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQ, 0, 0 }, { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQI, 0, 0 }, { MASK_MMX, CODE_FOR_lshrv4hi3, 0, IX86_BUILTIN_PSRLW, 0, 0 }, { MASK_MMX, CODE_FOR_lshrv4hi3, 0, IX86_BUILTIN_PSRLWI, 0, 0 }, { MASK_MMX, CODE_FOR_lshrv2si3, 0, IX86_BUILTIN_PSRLD, 0, 0 }, { MASK_MMX, CODE_FOR_lshrv2si3, 0, IX86_BUILTIN_PSRLDI, 0, 0 }, { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQ, 0, 0 }, { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQI, 0, 0 }, { MASK_MMX, CODE_FOR_ashrv4hi3, 0, IX86_BUILTIN_PSRAW, 0, 0 }, { MASK_MMX, CODE_FOR_ashrv4hi3, 0, IX86_BUILTIN_PSRAWI, 0, 0 }, { MASK_MMX, CODE_FOR_ashrv2si3, 0, IX86_BUILTIN_PSRAD, 0, 0 }, { MASK_MMX, CODE_FOR_ashrv2si3, 0, IX86_BUILTIN_PSRADI, 0, 0 }, { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_psadbw, 0, IX86_BUILTIN_PSADBW, 0, 0 }, { MASK_MMX, CODE_FOR_mmx_pmaddwd, 0, IX86_BUILTIN_PMADDWD, 0, 0 }, /* SSE2 */ { MASK_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, 0, 0 }, { MASK_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, 0, 0 }, { MASK_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, 0, 0 }, { MASK_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, 0, 0 }, { MASK_SSE2, CODE_FOR_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, 0, 0 }, { MASK_SSE2, CODE_FOR_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, 0, 0 }, { MASK_SSE2, CODE_FOR_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, 0, 0 }, { MASK_SSE2, CODE_FOR_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, 0, 0 }, { MASK_SSE2, CODE_FOR_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, 0 }, { MASK_SSE2, CODE_FOR_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, 0 }, { MASK_SSE2, CODE_FOR_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, 0 }, { MASK_SSE2, CODE_FOR_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, 1 }, { MASK_SSE2, CODE_FOR_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, 1 }, { MASK_SSE2, CODE_FOR_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, 0 }, { MASK_SSE2, CODE_FOR_maskncmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, EQ, 0 }, { MASK_SSE2, CODE_FOR_maskncmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, LT, 0 }, { MASK_SSE2, CODE_FOR_maskncmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, LE, 0 }, { MASK_SSE2, CODE_FOR_maskncmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, LT, 1 }, { MASK_SSE2, CODE_FOR_maskncmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, LE, 1 }, { MASK_SSE2, CODE_FOR_maskncmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, UNORDERED, 0 }, { MASK_SSE2, CODE_FOR_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, 0 }, { MASK_SSE2, CODE_FOR_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, 0 }, { MASK_SSE2, CODE_FOR_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, 0 }, { MASK_SSE2, CODE_FOR_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, 0 }, { MASK_SSE2, CODE_FOR_vmmaskncmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, EQ, 0 }, { MASK_SSE2, CODE_FOR_vmmaskncmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, LT, 0 }, { MASK_SSE2, CODE_FOR_vmmaskncmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, LE, 0 }, { MASK_SSE2, CODE_FOR_vmmaskncmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, UNORDERED, 0 }, { MASK_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, 0, 0 }, { MASK_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, 0, 0 }, { MASK_SSE2, CODE_FOR_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, 0, 0 }, { MASK_SSE2, CODE_FOR_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_nandv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_unpckhpd, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_unpcklpd, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, 0, 0 }, /* SSE2 MMX */ { MASK_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, 0, 0 }, { MASK_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, 0, 0 }, { MASK_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, 0, 0 }, { MASK_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, 0, 0 }, { MASK_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, 0, 0 }, { MASK_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, 0, 0 }, { MASK_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, 0, 0 }, { MASK_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, 0, 0 }, { MASK_MMX, CODE_FOR_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, 0, 0 }, { MASK_MMX, CODE_FOR_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, 0, 0 }, { MASK_MMX, CODE_FOR_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, 0, 0 }, { MASK_MMX, CODE_FOR_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, 0, 0 }, { MASK_MMX, CODE_FOR_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, 0, 0 }, { MASK_MMX, CODE_FOR_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, 0, 0 }, { MASK_MMX, CODE_FOR_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, 0, 0 }, { MASK_MMX, CODE_FOR_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, 0, 0 }, { MASK_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, 0, 0 }, { MASK_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_umulsidi3, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_umulv2siv2di3, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_nandv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, 0, 0 }, { MASK_SSE2, CODE_FOR_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, 0, 0 }, { MASK_SSE2, CODE_FOR_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, 0, 0 }, { MASK_SSE2, CODE_FOR_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, 0, 0 }, { MASK_SSE2, CODE_FOR_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, 0, 0 }, { MASK_SSE2, CODE_FOR_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, 0, 0 }, { MASK_SSE2, CODE_FOR_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, 0, 0 }, { MASK_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, 0, 0 }, { MASK_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, 0, 0 }, { MASK_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, 0, 0 }, { MASK_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_punpckhbw, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_punpckhwd, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_punpckhdq, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_punpckhqdq, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_punpcklbw, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_punpcklwd, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_punpckldq, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_punpcklqdq, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, 0, 0 }, { MASK_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_psadbw, 0, IX86_BUILTIN_PSADBW128, 0, 0 }, { MASK_SSE2, CODE_FOR_ashlv8hi3_ti, 0, IX86_BUILTIN_PSLLW128, 0, 0 }, { MASK_SSE2, CODE_FOR_ashlv8hi3, 0, IX86_BUILTIN_PSLLWI128, 0, 0 }, { MASK_SSE2, CODE_FOR_ashlv4si3_ti, 0, IX86_BUILTIN_PSLLD128, 0, 0 }, { MASK_SSE2, CODE_FOR_ashlv4si3, 0, IX86_BUILTIN_PSLLDI128, 0, 0 }, { MASK_SSE2, CODE_FOR_ashlv2di3_ti, 0, IX86_BUILTIN_PSLLQ128, 0, 0 }, { MASK_SSE2, CODE_FOR_ashlv2di3, 0, IX86_BUILTIN_PSLLQI128, 0, 0 }, { MASK_SSE2, CODE_FOR_lshrv8hi3_ti, 0, IX86_BUILTIN_PSRLW128, 0, 0 }, { MASK_SSE2, CODE_FOR_lshrv8hi3, 0, IX86_BUILTIN_PSRLWI128, 0, 0 }, { MASK_SSE2, CODE_FOR_lshrv4si3_ti, 0, IX86_BUILTIN_PSRLD128, 0, 0 }, { MASK_SSE2, CODE_FOR_lshrv4si3, 0, IX86_BUILTIN_PSRLDI128, 0, 0 }, { MASK_SSE2, CODE_FOR_lshrv2di3_ti, 0, IX86_BUILTIN_PSRLQ128, 0, 0 }, { MASK_SSE2, CODE_FOR_lshrv2di3, 0, IX86_BUILTIN_PSRLQI128, 0, 0 }, { MASK_SSE2, CODE_FOR_ashrv8hi3_ti, 0, IX86_BUILTIN_PSRAW128, 0, 0 }, { MASK_SSE2, CODE_FOR_ashrv8hi3, 0, IX86_BUILTIN_PSRAWI128, 0, 0 }, { MASK_SSE2, CODE_FOR_ashrv4si3_ti, 0, IX86_BUILTIN_PSRAD128, 0, 0 }, { MASK_SSE2, CODE_FOR_ashrv4si3, 0, IX86_BUILTIN_PSRADI128, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_pmaddwd, 0, IX86_BUILTIN_PMADDWD128, 0, 0 }, { MASK_SSE2, CODE_FOR_cvtsi2sd, 0, IX86_BUILTIN_CVTSI2SD, 0, 0 }, { MASK_SSE2 | MASK_64BIT, CODE_FOR_cvtsi2sdq, 0, IX86_BUILTIN_CVTSI642SD, 0, 0 }, { MASK_SSE2, CODE_FOR_cvtsd2ss, 0, IX86_BUILTIN_CVTSD2SS, 0, 0 }, { MASK_SSE2, CODE_FOR_cvtss2sd, 0, IX86_BUILTIN_CVTSS2SD, 0, 0 }, /* SSE3 MMX */ { MASK_SSE3, CODE_FOR_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, 0, 0 }, { MASK_SSE3, CODE_FOR_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, 0, 0 }, { MASK_SSE3, CODE_FOR_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, 0, 0 }, { MASK_SSE3, CODE_FOR_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, 0, 0 }, { MASK_SSE3, CODE_FOR_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, 0, 0 }, { MASK_SSE3, CODE_FOR_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, 0, 0 } }; static const struct builtin_description bdesc_1arg[] = { { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB, 0, 0 }, { MASK_SSE, CODE_FOR_sse_movmskps, 0, IX86_BUILTIN_MOVMSKPS, 0, 0 }, { MASK_SSE, CODE_FOR_sqrtv4sf2, 0, IX86_BUILTIN_SQRTPS, 0, 0 }, { MASK_SSE, CODE_FOR_rsqrtv4sf2, 0, IX86_BUILTIN_RSQRTPS, 0, 0 }, { MASK_SSE, CODE_FOR_rcpv4sf2, 0, IX86_BUILTIN_RCPPS, 0, 0 }, { MASK_SSE, CODE_FOR_cvtps2pi, 0, IX86_BUILTIN_CVTPS2PI, 0, 0 }, { MASK_SSE, CODE_FOR_cvtss2si, 0, IX86_BUILTIN_CVTSS2SI, 0, 0 }, { MASK_SSE | MASK_64BIT, CODE_FOR_cvtss2siq, 0, IX86_BUILTIN_CVTSS2SI64, 0, 0 }, { MASK_SSE, CODE_FOR_cvttps2pi, 0, IX86_BUILTIN_CVTTPS2PI, 0, 0 }, { MASK_SSE, CODE_FOR_cvttss2si, 0, IX86_BUILTIN_CVTTSS2SI, 0, 0 }, { MASK_SSE | MASK_64BIT, CODE_FOR_cvttss2siq, 0, IX86_BUILTIN_CVTTSS2SI64, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB128, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_movmskpd, 0, IX86_BUILTIN_MOVMSKPD, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_movq2dq, 0, IX86_BUILTIN_MOVQ2DQ, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_movdq2q, 0, IX86_BUILTIN_MOVDQ2Q, 0, 0 }, { MASK_SSE2, CODE_FOR_sqrtv2df2, 0, IX86_BUILTIN_SQRTPD, 0, 0 }, { MASK_SSE2, CODE_FOR_cvtdq2pd, 0, IX86_BUILTIN_CVTDQ2PD, 0, 0 }, { MASK_SSE2, CODE_FOR_cvtdq2ps, 0, IX86_BUILTIN_CVTDQ2PS, 0, 0 }, { MASK_SSE2, CODE_FOR_cvtpd2dq, 0, IX86_BUILTIN_CVTPD2DQ, 0, 0 }, { MASK_SSE2, CODE_FOR_cvtpd2pi, 0, IX86_BUILTIN_CVTPD2PI, 0, 0 }, { MASK_SSE2, CODE_FOR_cvtpd2ps, 0, IX86_BUILTIN_CVTPD2PS, 0, 0 }, { MASK_SSE2, CODE_FOR_cvttpd2dq, 0, IX86_BUILTIN_CVTTPD2DQ, 0, 0 }, { MASK_SSE2, CODE_FOR_cvttpd2pi, 0, IX86_BUILTIN_CVTTPD2PI, 0, 0 }, { MASK_SSE2, CODE_FOR_cvtpi2pd, 0, IX86_BUILTIN_CVTPI2PD, 0, 0 }, { MASK_SSE2, CODE_FOR_cvtsd2si, 0, IX86_BUILTIN_CVTSD2SI, 0, 0 }, { MASK_SSE2, CODE_FOR_cvttsd2si, 0, IX86_BUILTIN_CVTTSD2SI, 0, 0 }, { MASK_SSE2 | MASK_64BIT, CODE_FOR_cvtsd2siq, 0, IX86_BUILTIN_CVTSD2SI64, 0, 0 }, { MASK_SSE2 | MASK_64BIT, CODE_FOR_cvttsd2siq, 0, IX86_BUILTIN_CVTTSD2SI64, 0, 0 }, { MASK_SSE2, CODE_FOR_cvtps2dq, 0, IX86_BUILTIN_CVTPS2DQ, 0, 0 }, { MASK_SSE2, CODE_FOR_cvtps2pd, 0, IX86_BUILTIN_CVTPS2PD, 0, 0 }, { MASK_SSE2, CODE_FOR_cvttps2dq, 0, IX86_BUILTIN_CVTTPS2DQ, 0, 0 }, { MASK_SSE2, CODE_FOR_sse2_movq, 0, IX86_BUILTIN_MOVQ, 0, 0 }, /* SSE3 */ { MASK_SSE3, CODE_FOR_movshdup, 0, IX86_BUILTIN_MOVSHDUP, 0, 0 }, { MASK_SSE3, CODE_FOR_movsldup, 0, IX86_BUILTIN_MOVSLDUP, 0, 0 }, { MASK_SSE3, CODE_FOR_movddup, 0, IX86_BUILTIN_MOVDDUP, 0, 0 } }; void ix86_init_builtins (void) { if (TARGET_MMX) ix86_init_mmx_sse_builtins (); } /* Set up all the MMX/SSE builtins. This is not called if TARGET_MMX is zero. Otherwise, if TARGET_SSE is not set, only expand the MMX builtins. */ static void ix86_init_mmx_sse_builtins (void) { const struct builtin_description * d; size_t i; tree V16QI_type_node = build_vector_type_for_mode (intQI_type_node, V16QImode); tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode); tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode); tree V2DI_type_node = build_vector_type_for_mode (intDI_type_node, V2DImode); tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode); tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode); tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode); tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode); tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode); tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode); tree pchar_type_node = build_pointer_type (char_type_node); tree pcchar_type_node = build_pointer_type ( build_type_variant (char_type_node, 1, 0)); tree pfloat_type_node = build_pointer_type (float_type_node); tree pcfloat_type_node = build_pointer_type ( build_type_variant (float_type_node, 1, 0)); tree pv2si_type_node = build_pointer_type (V2SI_type_node); tree pv2di_type_node = build_pointer_type (V2DI_type_node); tree pdi_type_node = build_pointer_type (long_long_unsigned_type_node); /* Comparisons. */ tree int_ftype_v4sf_v4sf = build_function_type_list (integer_type_node, V4SF_type_node, V4SF_type_node, NULL_TREE); tree v4si_ftype_v4sf_v4sf = build_function_type_list (V4SI_type_node, V4SF_type_node, V4SF_type_node, NULL_TREE); /* MMX/SSE/integer conversions. */ tree int_ftype_v4sf = build_function_type_list (integer_type_node, V4SF_type_node, NULL_TREE); tree int64_ftype_v4sf = build_function_type_list (long_long_integer_type_node, V4SF_type_node, NULL_TREE); tree int_ftype_v8qi = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE); tree v4sf_ftype_v4sf_int = build_function_type_list (V4SF_type_node, V4SF_type_node, integer_type_node, NULL_TREE); tree v4sf_ftype_v4sf_int64 = build_function_type_list (V4SF_type_node, V4SF_type_node, long_long_integer_type_node, NULL_TREE); tree v4sf_ftype_v4sf_v2si = build_function_type_list (V4SF_type_node, V4SF_type_node, V2SI_type_node, NULL_TREE); tree int_ftype_v4hi_int = build_function_type_list (integer_type_node, V4HI_type_node, integer_type_node, NULL_TREE); tree v4hi_ftype_v4hi_int_int = build_function_type_list (V4HI_type_node, V4HI_type_node, integer_type_node, integer_type_node, NULL_TREE); /* Miscellaneous. */ tree v8qi_ftype_v4hi_v4hi = build_function_type_list (V8QI_type_node, V4HI_type_node, V4HI_type_node, NULL_TREE); tree v4hi_ftype_v2si_v2si = build_function_type_list (V4HI_type_node, V2SI_type_node, V2SI_type_node, NULL_TREE); tree v4sf_ftype_v4sf_v4sf_int = build_function_type_list (V4SF_type_node, V4SF_type_node, V4SF_type_node, integer_type_node, NULL_TREE); tree v2si_ftype_v4hi_v4hi = build_function_type_list (V2SI_type_node, V4HI_type_node, V4HI_type_node, NULL_TREE); tree v4hi_ftype_v4hi_int = build_function_type_list (V4HI_type_node, V4HI_type_node, integer_type_node, NULL_TREE); tree v4hi_ftype_v4hi_di = build_function_type_list (V4HI_type_node, V4HI_type_node, long_long_unsigned_type_node, NULL_TREE); tree v2si_ftype_v2si_di = build_function_type_list (V2SI_type_node, V2SI_type_node, long_long_unsigned_type_node, NULL_TREE); tree void_ftype_void = build_function_type (void_type_node, void_list_node); tree void_ftype_unsigned = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE); tree void_ftype_unsigned_unsigned = build_function_type_list (void_type_node, unsigned_type_node, unsigned_type_node, NULL_TREE); tree void_ftype_pcvoid_unsigned_unsigned = build_function_type_list (void_type_node, const_ptr_type_node, unsigned_type_node, unsigned_type_node, NULL_TREE); tree unsigned_ftype_void = build_function_type (unsigned_type_node, void_list_node); tree di_ftype_void = build_function_type (long_long_unsigned_type_node, void_list_node); tree v4sf_ftype_void = build_function_type (V4SF_type_node, void_list_node); tree v2si_ftype_v4sf = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE); /* Loads/stores. */ tree void_ftype_v8qi_v8qi_pchar = build_function_type_list (void_type_node, V8QI_type_node, V8QI_type_node, pchar_type_node, NULL_TREE); tree v4sf_ftype_pcfloat = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE); /* @@@ the type is bogus */ tree v4sf_ftype_v4sf_pv2si = build_function_type_list (V4SF_type_node, V4SF_type_node, pv2si_type_node, NULL_TREE); tree void_ftype_pv2si_v4sf = build_function_type_list (void_type_node, pv2si_type_node, V4SF_type_node, NULL_TREE); tree void_ftype_pfloat_v4sf = build_function_type_list (void_type_node, pfloat_type_node, V4SF_type_node, NULL_TREE); tree void_ftype_pdi_di = build_function_type_list (void_type_node, pdi_type_node, long_long_unsigned_type_node, NULL_TREE); tree void_ftype_pv2di_v2di = build_function_type_list (void_type_node, pv2di_type_node, V2DI_type_node, NULL_TREE); /* Normal vector unops. */ tree v4sf_ftype_v4sf = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE); /* Normal vector binops. */ tree v4sf_ftype_v4sf_v4sf = build_function_type_list (V4SF_type_node, V4SF_type_node, V4SF_type_node, NULL_TREE); tree v8qi_ftype_v8qi_v8qi = build_function_type_list (V8QI_type_node, V8QI_type_node, V8QI_type_node, NULL_TREE); tree v4hi_ftype_v4hi_v4hi = build_function_type_list (V4HI_type_node, V4HI_type_node, V4HI_type_node, NULL_TREE); tree v2si_ftype_v2si_v2si = build_function_type_list (V2SI_type_node, V2SI_type_node, V2SI_type_node, NULL_TREE); tree di_ftype_di_di = build_function_type_list (long_long_unsigned_type_node, long_long_unsigned_type_node, long_long_unsigned_type_node, NULL_TREE); tree v2si_ftype_v2sf = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE); tree v2sf_ftype_v2si = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE); tree v2si_ftype_v2si = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE); tree v2sf_ftype_v2sf = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE); tree v2sf_ftype_v2sf_v2sf = build_function_type_list (V2SF_type_node, V2SF_type_node, V2SF_type_node, NULL_TREE); tree v2si_ftype_v2sf_v2sf = build_function_type_list (V2SI_type_node, V2SF_type_node, V2SF_type_node, NULL_TREE); tree pint_type_node = build_pointer_type (integer_type_node); tree pcint_type_node = build_pointer_type ( build_type_variant (integer_type_node, 1, 0)); tree pdouble_type_node = build_pointer_type (double_type_node); tree pcdouble_type_node = build_pointer_type ( build_type_variant (double_type_node, 1, 0)); tree int_ftype_v2df_v2df = build_function_type_list (integer_type_node, V2DF_type_node, V2DF_type_node, NULL_TREE); tree ti_ftype_void = build_function_type (intTI_type_node, void_list_node); tree v2di_ftype_void = build_function_type (V2DI_type_node, void_list_node); tree ti_ftype_ti_ti = build_function_type_list (intTI_type_node, intTI_type_node, intTI_type_node, NULL_TREE); tree void_ftype_pcvoid = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE); tree v2di_ftype_di = build_function_type_list (V2DI_type_node, long_long_unsigned_type_node, NULL_TREE); tree di_ftype_v2di = build_function_type_list (long_long_unsigned_type_node, V2DI_type_node, NULL_TREE); tree v4sf_ftype_v4si = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE); tree v4si_ftype_v4sf = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE); tree v2df_ftype_v4si = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE); tree v4si_ftype_v2df = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE); tree v2si_ftype_v2df = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE); tree v4sf_ftype_v2df = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE); tree v2df_ftype_v2si = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE); tree v2df_ftype_v4sf = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE); tree int_ftype_v2df = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE); tree int64_ftype_v2df = build_function_type_list (long_long_integer_type_node, V2DF_type_node, NULL_TREE); tree v2df_ftype_v2df_int = build_function_type_list (V2DF_type_node, V2DF_type_node, integer_type_node, NULL_TREE); tree v2df_ftype_v2df_int64 = build_function_type_list (V2DF_type_node, V2DF_type_node, long_long_integer_type_node, NULL_TREE); tree v4sf_ftype_v4sf_v2df = build_function_type_list (V4SF_type_node, V4SF_type_node, V2DF_type_node, NULL_TREE); tree v2df_ftype_v2df_v4sf = build_function_type_list (V2DF_type_node, V2DF_type_node, V4SF_type_node, NULL_TREE); tree v2df_ftype_v2df_v2df_int = build_function_type_list (V2DF_type_node, V2DF_type_node, V2DF_type_node, integer_type_node, NULL_TREE); tree v2df_ftype_v2df_pv2si = build_function_type_list (V2DF_type_node, V2DF_type_node, pv2si_type_node, NULL_TREE); tree void_ftype_pv2si_v2df = build_function_type_list (void_type_node, pv2si_type_node, V2DF_type_node, NULL_TREE); tree void_ftype_pdouble_v2df = build_function_type_list (void_type_node, pdouble_type_node, V2DF_type_node, NULL_TREE); tree void_ftype_pint_int = build_function_type_list (void_type_node, pint_type_node, integer_type_node, NULL_TREE); tree void_ftype_v16qi_v16qi_pchar = build_function_type_list (void_type_node, V16QI_type_node, V16QI_type_node, pchar_type_node, NULL_TREE); tree v2df_ftype_pcdouble = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE); tree v2df_ftype_v2df_v2df = build_function_type_list (V2DF_type_node, V2DF_type_node, V2DF_type_node, NULL_TREE); tree v16qi_ftype_v16qi_v16qi = build_function_type_list (V16QI_type_node, V16QI_type_node, V16QI_type_node, NULL_TREE); tree v8hi_ftype_v8hi_v8hi = build_function_type_list (V8HI_type_node, V8HI_type_node, V8HI_type_node, NULL_TREE); tree v4si_ftype_v4si_v4si = build_function_type_list (V4SI_type_node, V4SI_type_node, V4SI_type_node, NULL_TREE); tree v2di_ftype_v2di_v2di = build_function_type_list (V2DI_type_node, V2DI_type_node, V2DI_type_node, NULL_TREE); tree v2di_ftype_v2df_v2df = build_function_type_list (V2DI_type_node, V2DF_type_node, V2DF_type_node, NULL_TREE); tree v2df_ftype_v2df = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE); tree v2df_ftype_double = build_function_type_list (V2DF_type_node, double_type_node, NULL_TREE); tree v2df_ftype_double_double = build_function_type_list (V2DF_type_node, double_type_node, double_type_node, NULL_TREE); tree int_ftype_v8hi_int = build_function_type_list (integer_type_node, V8HI_type_node, integer_type_node, NULL_TREE); tree v8hi_ftype_v8hi_int_int = build_function_type_list (V8HI_type_node, V8HI_type_node, integer_type_node, integer_type_node, NULL_TREE); tree v2di_ftype_v2di_int = build_function_type_list (V2DI_type_node, V2DI_type_node, integer_type_node, NULL_TREE); tree v4si_ftype_v4si_int = build_function_type_list (V4SI_type_node, V4SI_type_node, integer_type_node, NULL_TREE); tree v8hi_ftype_v8hi_int = build_function_type_list (V8HI_type_node, V8HI_type_node, integer_type_node, NULL_TREE); tree v8hi_ftype_v8hi_v2di = build_function_type_list (V8HI_type_node, V8HI_type_node, V2DI_type_node, NULL_TREE); tree v4si_ftype_v4si_v2di = build_function_type_list (V4SI_type_node, V4SI_type_node, V2DI_type_node, NULL_TREE); tree v4si_ftype_v8hi_v8hi = build_function_type_list (V4SI_type_node, V8HI_type_node, V8HI_type_node, NULL_TREE); tree di_ftype_v8qi_v8qi = build_function_type_list (long_long_unsigned_type_node, V8QI_type_node, V8QI_type_node, NULL_TREE); tree v2di_ftype_v16qi_v16qi = build_function_type_list (V2DI_type_node, V16QI_type_node, V16QI_type_node, NULL_TREE); tree int_ftype_v16qi = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE); tree v16qi_ftype_pcchar = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE); tree void_ftype_pchar_v16qi = build_function_type_list (void_type_node, pchar_type_node, V16QI_type_node, NULL_TREE); tree v4si_ftype_pcint = build_function_type_list (V4SI_type_node, pcint_type_node, NULL_TREE); tree void_ftype_pcint_v4si = build_function_type_list (void_type_node, pcint_type_node, V4SI_type_node, NULL_TREE); tree v2di_ftype_v2di = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE); tree float80_type; tree float128_type; /* The __float80 type. */ if (TYPE_MODE (long_double_type_node) == XFmode) (*lang_hooks.types.register_builtin_type) (long_double_type_node, "__float80"); else { /* The __float80 type. */ float80_type = make_node (REAL_TYPE); TYPE_PRECISION (float80_type) = 96; layout_type (float80_type); (*lang_hooks.types.register_builtin_type) (float80_type, "__float80"); } float128_type = make_node (REAL_TYPE); TYPE_PRECISION (float128_type) = 128; layout_type (float128_type); (*lang_hooks.types.register_builtin_type) (float128_type, "__float128"); /* Add all builtins that are more or less simple operations on two operands. */ for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++) { /* Use one of the operands; the target can have a different mode for mask-generating compares. */ enum machine_mode mode; tree type; if (d->name == 0) continue; mode = insn_data[d->icode].operand[1].mode; switch (mode) { case V16QImode: type = v16qi_ftype_v16qi_v16qi; break; case V8HImode: type = v8hi_ftype_v8hi_v8hi; break; case V4SImode: type = v4si_ftype_v4si_v4si; break; case V2DImode: type = v2di_ftype_v2di_v2di; break; case V2DFmode: type = v2df_ftype_v2df_v2df; break; case TImode: type = ti_ftype_ti_ti; break; case V4SFmode: type = v4sf_ftype_v4sf_v4sf; break; case V8QImode: type = v8qi_ftype_v8qi_v8qi; break; case V4HImode: type = v4hi_ftype_v4hi_v4hi; break; case V2SImode: type = v2si_ftype_v2si_v2si; break; case DImode: type = di_ftype_di_di; break; default: abort (); } /* Override for comparisons. */ if (d->icode == CODE_FOR_maskcmpv4sf3 || d->icode == CODE_FOR_maskncmpv4sf3 || d->icode == CODE_FOR_vmmaskcmpv4sf3 || d->icode == CODE_FOR_vmmaskncmpv4sf3) type = v4si_ftype_v4sf_v4sf; if (d->icode == CODE_FOR_maskcmpv2df3 || d->icode == CODE_FOR_maskncmpv2df3 || d->icode == CODE_FOR_vmmaskcmpv2df3 || d->icode == CODE_FOR_vmmaskncmpv2df3) type = v2di_ftype_v2df_v2df; def_builtin (d->mask, d->name, type, d->code); } /* Add the remaining MMX insns with somewhat more complicated types. */ def_builtin (MASK_MMX, "__builtin_ia32_mmx_zero", di_ftype_void, IX86_BUILTIN_MMX_ZERO); def_builtin (MASK_MMX, "__builtin_ia32_emms", void_ftype_void, IX86_BUILTIN_EMMS); def_builtin (MASK_MMX, "__builtin_ia32_psllw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSLLW); def_builtin (MASK_MMX, "__builtin_ia32_pslld", v2si_ftype_v2si_di, IX86_BUILTIN_PSLLD); def_builtin (MASK_MMX, "__builtin_ia32_psllq", di_ftype_di_di, IX86_BUILTIN_PSLLQ); def_builtin (MASK_MMX, "__builtin_ia32_psrlw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRLW); def_builtin (MASK_MMX, "__builtin_ia32_psrld", v2si_ftype_v2si_di, IX86_BUILTIN_PSRLD); def_builtin (MASK_MMX, "__builtin_ia32_psrlq", di_ftype_di_di, IX86_BUILTIN_PSRLQ); def_builtin (MASK_MMX, "__builtin_ia32_psraw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRAW); def_builtin (MASK_MMX, "__builtin_ia32_psrad", v2si_ftype_v2si_di, IX86_BUILTIN_PSRAD); def_builtin (MASK_MMX, "__builtin_ia32_pshufw", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSHUFW); def_builtin (MASK_MMX, "__builtin_ia32_pmaddwd", v2si_ftype_v4hi_v4hi, IX86_BUILTIN_PMADDWD); /* comi/ucomi insns. */ for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++) if (d->mask == MASK_SSE2) def_builtin (d->mask, d->name, int_ftype_v2df_v2df, d->code); else def_builtin (d->mask, d->name, int_ftype_v4sf_v4sf, d->code); def_builtin (MASK_MMX, "__builtin_ia32_packsswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKSSWB); def_builtin (MASK_MMX, "__builtin_ia32_packssdw", v4hi_ftype_v2si_v2si, IX86_BUILTIN_PACKSSDW); def_builtin (MASK_MMX, "__builtin_ia32_packuswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKUSWB); def_builtin (MASK_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR); def_builtin (MASK_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR); def_builtin (MASK_SSE, "__builtin_ia32_cvtpi2ps", v4sf_ftype_v4sf_v2si, IX86_BUILTIN_CVTPI2PS); def_builtin (MASK_SSE, "__builtin_ia32_cvtps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTPS2PI); def_builtin (MASK_SSE, "__builtin_ia32_cvtsi2ss", v4sf_ftype_v4sf_int, IX86_BUILTIN_CVTSI2SS); def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtsi642ss", v4sf_ftype_v4sf_int64, IX86_BUILTIN_CVTSI642SS); def_builtin (MASK_SSE, "__builtin_ia32_cvtss2si", int_ftype_v4sf, IX86_BUILTIN_CVTSS2SI); def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTSS2SI64); def_builtin (MASK_SSE, "__builtin_ia32_cvttps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTTPS2PI); def_builtin (MASK_SSE, "__builtin_ia32_cvttss2si", int_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI); def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvttss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI64); def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pextrw", int_ftype_v4hi_int, IX86_BUILTIN_PEXTRW); def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pinsrw", v4hi_ftype_v4hi_int_int, IX86_BUILTIN_PINSRW); def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ); def_builtin (MASK_SSE, "__builtin_ia32_loadaps", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADAPS); def_builtin (MASK_SSE, "__builtin_ia32_loadups", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADUPS); def_builtin (MASK_SSE, "__builtin_ia32_loadss", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADSS); def_builtin (MASK_SSE, "__builtin_ia32_storeaps", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREAPS); def_builtin (MASK_SSE, "__builtin_ia32_storeups", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREUPS); def_builtin (MASK_SSE, "__builtin_ia32_storess", void_ftype_pfloat_v4sf, IX86_BUILTIN_STORESS); def_builtin (MASK_SSE, "__builtin_ia32_loadhps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADHPS); def_builtin (MASK_SSE, "__builtin_ia32_loadlps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADLPS); def_builtin (MASK_SSE, "__builtin_ia32_storehps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STOREHPS); def_builtin (MASK_SSE, "__builtin_ia32_storelps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STORELPS); def_builtin (MASK_SSE, "__builtin_ia32_movmskps", int_ftype_v4sf, IX86_BUILTIN_MOVMSKPS); def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pmovmskb", int_ftype_v8qi, IX86_BUILTIN_PMOVMSKB); def_builtin (MASK_SSE, "__builtin_ia32_movntps", void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTPS); def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_movntq", void_ftype_pdi_di, IX86_BUILTIN_MOVNTQ); def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_sfence", void_ftype_void, IX86_BUILTIN_SFENCE); def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_psadbw", di_ftype_v8qi_v8qi, IX86_BUILTIN_PSADBW); def_builtin (MASK_SSE, "__builtin_ia32_rcpps", v4sf_ftype_v4sf, IX86_BUILTIN_RCPPS); def_builtin (MASK_SSE, "__builtin_ia32_rcpss", v4sf_ftype_v4sf, IX86_BUILTIN_RCPSS); def_builtin (MASK_SSE, "__builtin_ia32_rsqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTPS); def_builtin (MASK_SSE, "__builtin_ia32_rsqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTSS); def_builtin (MASK_SSE, "__builtin_ia32_sqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTPS); def_builtin (MASK_SSE, "__builtin_ia32_sqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTSS); def_builtin (MASK_SSE, "__builtin_ia32_shufps", v4sf_ftype_v4sf_v4sf_int, IX86_BUILTIN_SHUFPS); /* Original 3DNow! */ def_builtin (MASK_3DNOW, "__builtin_ia32_femms", void_ftype_void, IX86_BUILTIN_FEMMS); def_builtin (MASK_3DNOW, "__builtin_ia32_pavgusb", v8qi_ftype_v8qi_v8qi, IX86_BUILTIN_PAVGUSB); def_builtin (MASK_3DNOW, "__builtin_ia32_pf2id", v2si_ftype_v2sf, IX86_BUILTIN_PF2ID); def_builtin (MASK_3DNOW, "__builtin_ia32_pfacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFACC); def_builtin (MASK_3DNOW, "__builtin_ia32_pfadd", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFADD); def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpeq", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPEQ); def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpge", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGE); def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpgt", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGT); def_builtin (MASK_3DNOW, "__builtin_ia32_pfmax", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMAX); def_builtin (MASK_3DNOW, "__builtin_ia32_pfmin", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMIN); def_builtin (MASK_3DNOW, "__builtin_ia32_pfmul", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMUL); def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcp", v2sf_ftype_v2sf, IX86_BUILTIN_PFRCP); def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT1); def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit2", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT2); def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqrt", v2sf_ftype_v2sf, IX86_BUILTIN_PFRSQRT); def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRSQIT1); def_builtin (MASK_3DNOW, "__builtin_ia32_pfsub", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUB); def_builtin (MASK_3DNOW, "__builtin_ia32_pfsubr", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUBR); def_builtin (MASK_3DNOW, "__builtin_ia32_pi2fd", v2sf_ftype_v2si, IX86_BUILTIN_PI2FD); def_builtin (MASK_3DNOW, "__builtin_ia32_pmulhrw", v4hi_ftype_v4hi_v4hi, IX86_BUILTIN_PMULHRW); /* 3DNow! extension as used in the Athlon CPU. */ def_builtin (MASK_3DNOW_A, "__builtin_ia32_pf2iw", v2si_ftype_v2sf, IX86_BUILTIN_PF2IW); def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFNACC); def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfpnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFPNACC); def_builtin (MASK_3DNOW_A, "__builtin_ia32_pi2fw", v2sf_ftype_v2si, IX86_BUILTIN_PI2FW); def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsf", v2sf_ftype_v2sf, IX86_BUILTIN_PSWAPDSF); def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsi", v2si_ftype_v2si, IX86_BUILTIN_PSWAPDSI); def_builtin (MASK_SSE, "__builtin_ia32_setzerops", v4sf_ftype_void, IX86_BUILTIN_SSE_ZERO); /* SSE2 */ def_builtin (MASK_SSE2, "__builtin_ia32_pextrw128", int_ftype_v8hi_int, IX86_BUILTIN_PEXTRW128); def_builtin (MASK_SSE2, "__builtin_ia32_pinsrw128", v8hi_ftype_v8hi_int_int, IX86_BUILTIN_PINSRW128); def_builtin (MASK_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU); def_builtin (MASK_SSE2, "__builtin_ia32_movq2dq", v2di_ftype_di, IX86_BUILTIN_MOVQ2DQ); def_builtin (MASK_SSE2, "__builtin_ia32_movdq2q", di_ftype_v2di, IX86_BUILTIN_MOVDQ2Q); def_builtin (MASK_SSE2, "__builtin_ia32_loadapd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADAPD); def_builtin (MASK_SSE2, "__builtin_ia32_loadupd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADUPD); def_builtin (MASK_SSE2, "__builtin_ia32_loadsd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADSD); def_builtin (MASK_SSE2, "__builtin_ia32_storeapd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREAPD); def_builtin (MASK_SSE2, "__builtin_ia32_storeupd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREUPD); def_builtin (MASK_SSE2, "__builtin_ia32_storesd", void_ftype_pdouble_v2df, IX86_BUILTIN_STORESD); def_builtin (MASK_SSE2, "__builtin_ia32_loadhpd", v2df_ftype_v2df_pv2si, IX86_BUILTIN_LOADHPD); def_builtin (MASK_SSE2, "__builtin_ia32_loadlpd", v2df_ftype_v2df_pv2si, IX86_BUILTIN_LOADLPD); def_builtin (MASK_SSE2, "__builtin_ia32_storehpd", void_ftype_pv2si_v2df, IX86_BUILTIN_STOREHPD); def_builtin (MASK_SSE2, "__builtin_ia32_storelpd", void_ftype_pv2si_v2df, IX86_BUILTIN_STORELPD); def_builtin (MASK_SSE2, "__builtin_ia32_movmskpd", int_ftype_v2df, IX86_BUILTIN_MOVMSKPD); def_builtin (MASK_SSE2, "__builtin_ia32_pmovmskb128", int_ftype_v16qi, IX86_BUILTIN_PMOVMSKB128); def_builtin (MASK_SSE2, "__builtin_ia32_movnti", void_ftype_pint_int, IX86_BUILTIN_MOVNTI); def_builtin (MASK_SSE2, "__builtin_ia32_movntpd", void_ftype_pdouble_v2df, IX86_BUILTIN_MOVNTPD); def_builtin (MASK_SSE2, "__builtin_ia32_movntdq", void_ftype_pv2di_v2di, IX86_BUILTIN_MOVNTDQ); def_builtin (MASK_SSE2, "__builtin_ia32_pshufd", v4si_ftype_v4si_int, IX86_BUILTIN_PSHUFD); def_builtin (MASK_SSE2, "__builtin_ia32_pshuflw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFLW); def_builtin (MASK_SSE2, "__builtin_ia32_pshufhw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFHW); def_builtin (MASK_SSE2, "__builtin_ia32_psadbw128", v2di_ftype_v16qi_v16qi, IX86_BUILTIN_PSADBW128); def_builtin (MASK_SSE2, "__builtin_ia32_sqrtpd", v2df_ftype_v2df, IX86_BUILTIN_SQRTPD); def_builtin (MASK_SSE2, "__builtin_ia32_sqrtsd", v2df_ftype_v2df, IX86_BUILTIN_SQRTSD); def_builtin (MASK_SSE2, "__builtin_ia32_shufpd", v2df_ftype_v2df_v2df_int, IX86_BUILTIN_SHUFPD); def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2pd", v2df_ftype_v4si, IX86_BUILTIN_CVTDQ2PD); def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2ps", v4sf_ftype_v4si, IX86_BUILTIN_CVTDQ2PS); def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTPD2DQ); def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTPD2PI); def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2ps", v4sf_ftype_v2df, IX86_BUILTIN_CVTPD2PS); def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTTPD2DQ); def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTTPD2PI); def_builtin (MASK_SSE2, "__builtin_ia32_cvtpi2pd", v2df_ftype_v2si, IX86_BUILTIN_CVTPI2PD); def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2si", int_ftype_v2df, IX86_BUILTIN_CVTSD2SI); def_builtin (MASK_SSE2, "__builtin_ia32_cvttsd2si", int_ftype_v2df, IX86_BUILTIN_CVTTSD2SI); def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTSD2SI64); def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvttsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTTSD2SI64); def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTPS2DQ); def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2pd", v2df_ftype_v4sf, IX86_BUILTIN_CVTPS2PD); def_builtin (MASK_SSE2, "__builtin_ia32_cvttps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTTPS2DQ); def_builtin (MASK_SSE2, "__builtin_ia32_cvtsi2sd", v2df_ftype_v2df_int, IX86_BUILTIN_CVTSI2SD); def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsi642sd", v2df_ftype_v2df_int64, IX86_BUILTIN_CVTSI642SD); def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2ss", v4sf_ftype_v4sf_v2df, IX86_BUILTIN_CVTSD2SS); def_builtin (MASK_SSE2, "__builtin_ia32_cvtss2sd", v2df_ftype_v2df_v4sf, IX86_BUILTIN_CVTSS2SD); def_builtin (MASK_SSE2, "__builtin_ia32_setpd1", v2df_ftype_double, IX86_BUILTIN_SETPD1); def_builtin (MASK_SSE2, "__builtin_ia32_setpd", v2df_ftype_double_double, IX86_BUILTIN_SETPD); def_builtin (MASK_SSE2, "__builtin_ia32_setzeropd", ti_ftype_void, IX86_BUILTIN_CLRPD); def_builtin (MASK_SSE2, "__builtin_ia32_loadpd1", v2df_ftype_pcdouble, IX86_BUILTIN_LOADPD1); def_builtin (MASK_SSE2, "__builtin_ia32_loadrpd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADRPD); def_builtin (MASK_SSE2, "__builtin_ia32_storepd1", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREPD1); def_builtin (MASK_SSE2, "__builtin_ia32_storerpd", void_ftype_pdouble_v2df, IX86_BUILTIN_STORERPD); def_builtin (MASK_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH); def_builtin (MASK_SSE2, "__builtin_ia32_lfence", void_ftype_void, IX86_BUILTIN_LFENCE); def_builtin (MASK_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE); def_builtin (MASK_SSE2, "__builtin_ia32_loaddqa", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQA); def_builtin (MASK_SSE2, "__builtin_ia32_loaddqu", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQU); def_builtin (MASK_SSE2, "__builtin_ia32_loadd", v4si_ftype_pcint, IX86_BUILTIN_LOADD); def_builtin (MASK_SSE2, "__builtin_ia32_storedqa", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQA); def_builtin (MASK_SSE2, "__builtin_ia32_storedqu", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQU); def_builtin (MASK_SSE2, "__builtin_ia32_stored", void_ftype_pcint_v4si, IX86_BUILTIN_STORED); def_builtin (MASK_SSE2, "__builtin_ia32_movq", v2di_ftype_v2di, IX86_BUILTIN_MOVQ); def_builtin (MASK_SSE, "__builtin_ia32_setzero128", v2di_ftype_void, IX86_BUILTIN_CLRTI); def_builtin (MASK_SSE2, "__builtin_ia32_psllw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSLLW128); def_builtin (MASK_SSE2, "__builtin_ia32_pslld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSLLD128); def_builtin (MASK_SSE2, "__builtin_ia32_psllq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSLLQ128); def_builtin (MASK_SSE2, "__builtin_ia32_psrlw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRLW128); def_builtin (MASK_SSE2, "__builtin_ia32_psrld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRLD128); def_builtin (MASK_SSE2, "__builtin_ia32_psrlq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSRLQ128); def_builtin (MASK_SSE2, "__builtin_ia32_psraw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRAW128); def_builtin (MASK_SSE2, "__builtin_ia32_psrad128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRAD128); def_builtin (MASK_SSE2, "__builtin_ia32_pslldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLDQI128); def_builtin (MASK_SSE2, "__builtin_ia32_psllwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSLLWI128); def_builtin (MASK_SSE2, "__builtin_ia32_pslldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSLLDI128); def_builtin (MASK_SSE2, "__builtin_ia32_psllqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLQI128); def_builtin (MASK_SSE2, "__builtin_ia32_psrldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLDQI128); def_builtin (MASK_SSE2, "__builtin_ia32_psrlwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRLWI128); def_builtin (MASK_SSE2, "__builtin_ia32_psrldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRLDI128); def_builtin (MASK_SSE2, "__builtin_ia32_psrlqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLQI128); def_builtin (MASK_SSE2, "__builtin_ia32_psrawi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRAWI128); def_builtin (MASK_SSE2, "__builtin_ia32_psradi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRADI128); def_builtin (MASK_SSE2, "__builtin_ia32_pmaddwd128", v4si_ftype_v8hi_v8hi, IX86_BUILTIN_PMADDWD128); /* Prescott New Instructions. */ def_builtin (MASK_SSE3, "__builtin_ia32_monitor", void_ftype_pcvoid_unsigned_unsigned, IX86_BUILTIN_MONITOR); def_builtin (MASK_SSE3, "__builtin_ia32_mwait", void_ftype_unsigned_unsigned, IX86_BUILTIN_MWAIT); def_builtin (MASK_SSE3, "__builtin_ia32_movshdup", v4sf_ftype_v4sf, IX86_BUILTIN_MOVSHDUP); def_builtin (MASK_SSE3, "__builtin_ia32_movsldup", v4sf_ftype_v4sf, IX86_BUILTIN_MOVSLDUP); def_builtin (MASK_SSE3, "__builtin_ia32_lddqu", v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU); def_builtin (MASK_SSE3, "__builtin_ia32_loadddup", v2df_ftype_pcdouble, IX86_BUILTIN_LOADDDUP); def_builtin (MASK_SSE3, "__builtin_ia32_movddup", v2df_ftype_v2df, IX86_BUILTIN_MOVDDUP); } /* Errors in the source file can cause expand_expr to return const0_rtx where we expect a vector. To avoid crashing, use one of the vector clear instructions. */ static rtx safe_vector_operand (rtx x, enum machine_mode mode) { if (x != const0_rtx) return x; x = gen_reg_rtx (mode); if (VALID_MMX_REG_MODE (mode) || VALID_MMX_REG_MODE_3DNOW (mode)) emit_insn (gen_mmx_clrdi (mode == DImode ? x : gen_rtx_SUBREG (DImode, x, 0))); else emit_insn (gen_sse_clrv4sf (mode == V4SFmode ? x : gen_rtx_SUBREG (V4SFmode, x, 0), CONST0_RTX (V4SFmode))); return x; } /* Subroutine of ix86_expand_builtin to take care of binop insns. */ static rtx ix86_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target) { rtx pat; tree arg0 = TREE_VALUE (arglist); tree arg1 = TREE_VALUE (TREE_CHAIN (arglist)); rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); enum machine_mode tmode = insn_data[icode].operand[0].mode; enum machine_mode mode0 = insn_data[icode].operand[1].mode; enum machine_mode mode1 = insn_data[icode].operand[2].mode; if (VECTOR_MODE_P (mode0)) op0 = safe_vector_operand (op0, mode0); if (VECTOR_MODE_P (mode1)) op1 = safe_vector_operand (op1, mode1); if (! target || GET_MODE (target) != tmode || ! (*insn_data[icode].operand[0].predicate) (target, tmode)) target = gen_reg_rtx (tmode); if (GET_MODE (op1) == SImode && mode1 == TImode) { rtx x = gen_reg_rtx (V4SImode); emit_insn (gen_sse2_loadd (x, op1)); op1 = gen_lowpart (TImode, x); } /* In case the insn wants input operands in modes different from the result, abort. */ if ((GET_MODE (op0) != mode0 && GET_MODE (op0) != VOIDmode) || (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)) abort (); if (! (*insn_data[icode].operand[1].predicate) (op0, mode0)) op0 = copy_to_mode_reg (mode0, op0); if (! (*insn_data[icode].operand[2].predicate) (op1, mode1)) op1 = copy_to_mode_reg (mode1, op1); /* In the commutative cases, both op0 and op1 are nonimmediate_operand, yet one of the two must not be a memory. This is normally enforced by expanders, but we didn't bother to create one here. */ if (GET_CODE (op0) == MEM && GET_CODE (op1) == MEM) op0 = copy_to_mode_reg (mode0, op0); pat = GEN_FCN (icode) (target, op0, op1); if (! pat) return 0; emit_insn (pat); return target; } /* Subroutine of ix86_expand_builtin to take care of stores. */ static rtx ix86_expand_store_builtin (enum insn_code icode, tree arglist) { rtx pat; tree arg0 = TREE_VALUE (arglist); tree arg1 = TREE_VALUE (TREE_CHAIN (arglist)); rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); enum machine_mode mode0 = insn_data[icode].operand[0].mode; enum machine_mode mode1 = insn_data[icode].operand[1].mode; if (VECTOR_MODE_P (mode1)) op1 = safe_vector_operand (op1, mode1); op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0)); op1 = copy_to_mode_reg (mode1, op1); pat = GEN_FCN (icode) (op0, op1); if (pat) emit_insn (pat); return 0; } /* Subroutine of ix86_expand_builtin to take care of unop insns. */ static rtx ix86_expand_unop_builtin (enum insn_code icode, tree arglist, rtx target, int do_load) { rtx pat; tree arg0 = TREE_VALUE (arglist); rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); enum machine_mode tmode = insn_data[icode].operand[0].mode; enum machine_mode mode0 = insn_data[icode].operand[1].mode; if (! target || GET_MODE (target) != tmode || ! (*insn_data[icode].operand[0].predicate) (target, tmode)) target = gen_reg_rtx (tmode); if (do_load) op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0)); else { if (VECTOR_MODE_P (mode0)) op0 = safe_vector_operand (op0, mode0); if (! (*insn_data[icode].operand[1].predicate) (op0, mode0)) op0 = copy_to_mode_reg (mode0, op0); } pat = GEN_FCN (icode) (target, op0); if (! pat) return 0; emit_insn (pat); return target; } /* Subroutine of ix86_expand_builtin to take care of three special unop insns: sqrtss, rsqrtss, rcpss. */ static rtx ix86_expand_unop1_builtin (enum insn_code icode, tree arglist, rtx target) { rtx pat; tree arg0 = TREE_VALUE (arglist); rtx op1, op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); enum machine_mode tmode = insn_data[icode].operand[0].mode; enum machine_mode mode0 = insn_data[icode].operand[1].mode; if (! target || GET_MODE (target) != tmode || ! (*insn_data[icode].operand[0].predicate) (target, tmode)) target = gen_reg_rtx (tmode); if (VECTOR_MODE_P (mode0)) op0 = safe_vector_operand (op0, mode0); if (! (*insn_data[icode].operand[1].predicate) (op0, mode0)) op0 = copy_to_mode_reg (mode0, op0); op1 = op0; if (! (*insn_data[icode].operand[2].predicate) (op1, mode0)) op1 = copy_to_mode_reg (mode0, op1); pat = GEN_FCN (icode) (target, op0, op1); if (! pat) return 0; emit_insn (pat); return target; } /* Subroutine of ix86_expand_builtin to take care of comparison insns. */ static rtx ix86_expand_sse_compare (const struct builtin_description *d, tree arglist, rtx target) { rtx pat; tree arg0 = TREE_VALUE (arglist); tree arg1 = TREE_VALUE (TREE_CHAIN (arglist)); rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); rtx op2; enum machine_mode tmode = insn_data[d->icode].operand[0].mode; enum machine_mode mode0 = insn_data[d->icode].operand[1].mode; enum machine_mode mode1 = insn_data[d->icode].operand[2].mode; enum rtx_code comparison = d->comparison; if (VECTOR_MODE_P (mode0)) op0 = safe_vector_operand (op0, mode0); if (VECTOR_MODE_P (mode1)) op1 = safe_vector_operand (op1, mode1); /* Swap operands if we have a comparison that isn't available in hardware. */ if (d->flag) { rtx tmp = gen_reg_rtx (mode1); emit_move_insn (tmp, op1); op1 = op0; op0 = tmp; } if (! target || GET_MODE (target) != tmode || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode)) target = gen_reg_rtx (tmode); if (! (*insn_data[d->icode].operand[1].predicate) (op0, mode0)) op0 = copy_to_mode_reg (mode0, op0); if (! (*insn_data[d->icode].operand[2].predicate) (op1, mode1)) op1 = copy_to_mode_reg (mode1, op1); op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1); pat = GEN_FCN (d->icode) (target, op0, op1, op2); if (! pat) return 0; emit_insn (pat); return target; } /* Subroutine of ix86_expand_builtin to take care of comi insns. */ static rtx ix86_expand_sse_comi (const struct builtin_description *d, tree arglist, rtx target) { rtx pat; tree arg0 = TREE_VALUE (arglist); tree arg1 = TREE_VALUE (TREE_CHAIN (arglist)); rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); rtx op2; enum machine_mode mode0 = insn_data[d->icode].operand[0].mode; enum machine_mode mode1 = insn_data[d->icode].operand[1].mode; enum rtx_code comparison = d->comparison; if (VECTOR_MODE_P (mode0)) op0 = safe_vector_operand (op0, mode0); if (VECTOR_MODE_P (mode1)) op1 = safe_vector_operand (op1, mode1); /* Swap operands if we have a comparison that isn't available in hardware. */ if (d->flag) { rtx tmp = op1; op1 = op0; op0 = tmp; } target = gen_reg_rtx (SImode); emit_move_insn (target, const0_rtx); target = gen_rtx_SUBREG (QImode, target, 0); if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0)) op0 = copy_to_mode_reg (mode0, op0); if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1)) op1 = copy_to_mode_reg (mode1, op1); op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1); pat = GEN_FCN (d->icode) (op0, op1); if (! pat) return 0; emit_insn (pat); emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target), gen_rtx_fmt_ee (comparison, QImode, SET_DEST (pat), const0_rtx))); return SUBREG_REG (target); } /* Expand an expression EXP that calls a built-in function, with result going to TARGET if that's convenient (and in mode MODE if that's convenient). SUBTARGET may be used as the target for computing one of EXP's operands. IGNORE is nonzero if the value is to be ignored. */ rtx ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED, enum machine_mode mode ATTRIBUTE_UNUSED, int ignore ATTRIBUTE_UNUSED) { const struct builtin_description *d; size_t i; enum insn_code icode; tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0); tree arglist = TREE_OPERAND (exp, 1); tree arg0, arg1, arg2; rtx op0, op1, op2, pat; enum machine_mode tmode, mode0, mode1, mode2; unsigned int fcode = DECL_FUNCTION_CODE (fndecl); switch (fcode) { case IX86_BUILTIN_EMMS: emit_insn (gen_emms ()); return 0; case IX86_BUILTIN_SFENCE: emit_insn (gen_sfence ()); return 0; case IX86_BUILTIN_PEXTRW: case IX86_BUILTIN_PEXTRW128: icode = (fcode == IX86_BUILTIN_PEXTRW ? CODE_FOR_mmx_pextrw : CODE_FOR_sse2_pextrw); arg0 = TREE_VALUE (arglist); arg1 = TREE_VALUE (TREE_CHAIN (arglist)); op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); tmode = insn_data[icode].operand[0].mode; mode0 = insn_data[icode].operand[1].mode; mode1 = insn_data[icode].operand[2].mode; if (! (*insn_data[icode].operand[1].predicate) (op0, mode0)) op0 = copy_to_mode_reg (mode0, op0); if (! (*insn_data[icode].operand[2].predicate) (op1, mode1)) { error ("selector must be an integer constant in the range 0..%i", fcode == IX86_BUILTIN_PEXTRW ? 3:7); return gen_reg_rtx (tmode); } if (target == 0 || GET_MODE (target) != tmode || ! (*insn_data[icode].operand[0].predicate) (target, tmode)) target = gen_reg_rtx (tmode); pat = GEN_FCN (icode) (target, op0, op1); if (! pat) return 0; emit_insn (pat); return target; case IX86_BUILTIN_PINSRW: case IX86_BUILTIN_PINSRW128: icode = (fcode == IX86_BUILTIN_PINSRW ? CODE_FOR_mmx_pinsrw : CODE_FOR_sse2_pinsrw); arg0 = TREE_VALUE (arglist); arg1 = TREE_VALUE (TREE_CHAIN (arglist)); arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0); tmode = insn_data[icode].operand[0].mode; mode0 = insn_data[icode].operand[1].mode; mode1 = insn_data[icode].operand[2].mode; mode2 = insn_data[icode].operand[3].mode; if (! (*insn_data[icode].operand[1].predicate) (op0, mode0)) op0 = copy_to_mode_reg (mode0, op0); if (! (*insn_data[icode].operand[2].predicate) (op1, mode1)) op1 = copy_to_mode_reg (mode1, op1); if (! (*insn_data[icode].operand[3].predicate) (op2, mode2)) { error ("selector must be an integer constant in the range 0..%i", fcode == IX86_BUILTIN_PINSRW ? 15:255); return const0_rtx; } if (target == 0 || GET_MODE (target) != tmode || ! (*insn_data[icode].operand[0].predicate) (target, tmode)) target = gen_reg_rtx (tmode); pat = GEN_FCN (icode) (target, op0, op1, op2); if (! pat) return 0; emit_insn (pat); return target; case IX86_BUILTIN_MASKMOVQ: case IX86_BUILTIN_MASKMOVDQU: icode = (fcode == IX86_BUILTIN_MASKMOVQ ? (TARGET_64BIT ? CODE_FOR_mmx_maskmovq_rex : CODE_FOR_mmx_maskmovq) : (TARGET_64BIT ? CODE_FOR_sse2_maskmovdqu_rex64 : CODE_FOR_sse2_maskmovdqu)); /* Note the arg order is different from the operand order. */ arg1 = TREE_VALUE (arglist); arg2 = TREE_VALUE (TREE_CHAIN (arglist)); arg0 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0); mode0 = insn_data[icode].operand[0].mode; mode1 = insn_data[icode].operand[1].mode; mode2 = insn_data[icode].operand[2].mode; if (! (*insn_data[icode].operand[0].predicate) (op0, mode0)) op0 = copy_to_mode_reg (mode0, op0); if (! (*insn_data[icode].operand[1].predicate) (op1, mode1)) op1 = copy_to_mode_reg (mode1, op1); if (! (*insn_data[icode].operand[2].predicate) (op2, mode2)) op2 = copy_to_mode_reg (mode2, op2); pat = GEN_FCN (icode) (op0, op1, op2); if (! pat) return 0; emit_insn (pat); return 0; case IX86_BUILTIN_SQRTSS: return ix86_expand_unop1_builtin (CODE_FOR_vmsqrtv4sf2, arglist, target); case IX86_BUILTIN_RSQRTSS: return ix86_expand_unop1_builtin (CODE_FOR_vmrsqrtv4sf2, arglist, target); case IX86_BUILTIN_RCPSS: return ix86_expand_unop1_builtin (CODE_FOR_vmrcpv4sf2, arglist, target); case IX86_BUILTIN_LOADAPS: return ix86_expand_unop_builtin (CODE_FOR_sse_movaps, arglist, target, 1); case IX86_BUILTIN_LOADUPS: return ix86_expand_unop_builtin (CODE_FOR_sse_movups, arglist, target, 1); case IX86_BUILTIN_STOREAPS: return ix86_expand_store_builtin (CODE_FOR_sse_movaps, arglist); case IX86_BUILTIN_STOREUPS: return ix86_expand_store_builtin (CODE_FOR_sse_movups, arglist); case IX86_BUILTIN_LOADSS: return ix86_expand_unop_builtin (CODE_FOR_sse_loadss, arglist, target, 1); case IX86_BUILTIN_STORESS: return ix86_expand_store_builtin (CODE_FOR_sse_storess, arglist); case IX86_BUILTIN_LOADHPS: case IX86_BUILTIN_LOADLPS: case IX86_BUILTIN_LOADHPD: case IX86_BUILTIN_LOADLPD: icode = (fcode == IX86_BUILTIN_LOADHPS ? CODE_FOR_sse_movhps : fcode == IX86_BUILTIN_LOADLPS ? CODE_FOR_sse_movlps : fcode == IX86_BUILTIN_LOADHPD ? CODE_FOR_sse2_movhpd : CODE_FOR_sse2_movsd); arg0 = TREE_VALUE (arglist); arg1 = TREE_VALUE (TREE_CHAIN (arglist)); op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); tmode = insn_data[icode].operand[0].mode; mode0 = insn_data[icode].operand[1].mode; mode1 = insn_data[icode].operand[2].mode; if (! (*insn_data[icode].operand[1].predicate) (op0, mode0)) op0 = copy_to_mode_reg (mode0, op0); op1 = gen_rtx_MEM (mode1, copy_to_mode_reg (Pmode, op1)); if (target == 0 || GET_MODE (target) != tmode || ! (*insn_data[icode].operand[0].predicate) (target, tmode)) target = gen_reg_rtx (tmode); pat = GEN_FCN (icode) (target, op0, op1); if (! pat) return 0; emit_insn (pat); return target; case IX86_BUILTIN_STOREHPS: case IX86_BUILTIN_STORELPS: case IX86_BUILTIN_STOREHPD: case IX86_BUILTIN_STORELPD: icode = (fcode == IX86_BUILTIN_STOREHPS ? CODE_FOR_sse_movhps : fcode == IX86_BUILTIN_STORELPS ? CODE_FOR_sse_movlps : fcode == IX86_BUILTIN_STOREHPD ? CODE_FOR_sse2_movhpd : CODE_FOR_sse2_movsd); arg0 = TREE_VALUE (arglist); arg1 = TREE_VALUE (TREE_CHAIN (arglist)); op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); mode0 = insn_data[icode].operand[1].mode; mode1 = insn_data[icode].operand[2].mode; op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0)); if (! (*insn_data[icode].operand[2].predicate) (op1, mode1)) op1 = copy_to_mode_reg (mode1, op1); pat = GEN_FCN (icode) (op0, op0, op1); if (! pat) return 0; emit_insn (pat); return 0; case IX86_BUILTIN_MOVNTPS: return ix86_expand_store_builtin (CODE_FOR_sse_movntv4sf, arglist); case IX86_BUILTIN_MOVNTQ: return ix86_expand_store_builtin (CODE_FOR_sse_movntdi, arglist); case IX86_BUILTIN_LDMXCSR: op0 = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0); target = assign_386_stack_local (SImode, 0); emit_move_insn (target, op0); emit_insn (gen_ldmxcsr (target)); return 0; case IX86_BUILTIN_STMXCSR: target = assign_386_stack_local (SImode, 0); emit_insn (gen_stmxcsr (target)); return copy_to_mode_reg (SImode, target); case IX86_BUILTIN_SHUFPS: case IX86_BUILTIN_SHUFPD: icode = (fcode == IX86_BUILTIN_SHUFPS ? CODE_FOR_sse_shufps : CODE_FOR_sse2_shufpd); arg0 = TREE_VALUE (arglist); arg1 = TREE_VALUE (TREE_CHAIN (arglist)); arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0); tmode = insn_data[icode].operand[0].mode; mode0 = insn_data[icode].operand[1].mode; mode1 = insn_data[icode].operand[2].mode; mode2 = insn_data[icode].operand[3].mode; if (! (*insn_data[icode].operand[1].predicate) (op0, mode0)) op0 = copy_to_mode_reg (mode0, op0); if (! (*insn_data[icode].operand[2].predicate) (op1, mode1)) op1 = copy_to_mode_reg (mode1, op1); if (! (*insn_data[icode].operand[3].predicate) (op2, mode2)) { /* @@@ better error message */ error ("mask must be an immediate"); return gen_reg_rtx (tmode); } if (target == 0 || GET_MODE (target) != tmode || ! (*insn_data[icode].operand[0].predicate) (target, tmode)) target = gen_reg_rtx (tmode); pat = GEN_FCN (icode) (target, op0, op1, op2); if (! pat) return 0; emit_insn (pat); return target; case IX86_BUILTIN_PSHUFW: case IX86_BUILTIN_PSHUFD: case IX86_BUILTIN_PSHUFHW: case IX86_BUILTIN_PSHUFLW: icode = ( fcode == IX86_BUILTIN_PSHUFHW ? CODE_FOR_sse2_pshufhw : fcode == IX86_BUILTIN_PSHUFLW ? CODE_FOR_sse2_pshuflw : fcode == IX86_BUILTIN_PSHUFD ? CODE_FOR_sse2_pshufd : CODE_FOR_mmx_pshufw); arg0 = TREE_VALUE (arglist); arg1 = TREE_VALUE (TREE_CHAIN (arglist)); op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); tmode = insn_data[icode].operand[0].mode; mode1 = insn_data[icode].operand[1].mode; mode2 = insn_data[icode].operand[2].mode; if (! (*insn_data[icode].operand[1].predicate) (op0, mode1)) op0 = copy_to_mode_reg (mode1, op0); if (! (*insn_data[icode].operand[2].predicate) (op1, mode2)) { /* @@@ better error message */ error ("mask must be an immediate"); return const0_rtx; } if (target == 0 || GET_MODE (target) != tmode || ! (*insn_data[icode].operand[0].predicate) (target, tmode)) target = gen_reg_rtx (tmode); pat = GEN_FCN (icode) (target, op0, op1); if (! pat) return 0; emit_insn (pat); return target; case IX86_BUILTIN_PSLLDQI128: case IX86_BUILTIN_PSRLDQI128: icode = ( fcode == IX86_BUILTIN_PSLLDQI128 ? CODE_FOR_sse2_ashlti3 : CODE_FOR_sse2_lshrti3); arg0 = TREE_VALUE (arglist); arg1 = TREE_VALUE (TREE_CHAIN (arglist)); op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); tmode = insn_data[icode].operand[0].mode; mode1 = insn_data[icode].operand[1].mode; mode2 = insn_data[icode].operand[2].mode; if (! (*insn_data[icode].operand[1].predicate) (op0, mode1)) { op0 = copy_to_reg (op0); op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0); } if (! (*insn_data[icode].operand[2].predicate) (op1, mode2)) { error ("shift must be an immediate"); return const0_rtx; } target = gen_reg_rtx (V2DImode); pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, V2DImode, 0), op0, op1); if (! pat) return 0; emit_insn (pat); return target; case IX86_BUILTIN_FEMMS: emit_insn (gen_femms ()); return NULL_RTX; case IX86_BUILTIN_PAVGUSB: return ix86_expand_binop_builtin (CODE_FOR_pavgusb, arglist, target); case IX86_BUILTIN_PF2ID: return ix86_expand_unop_builtin (CODE_FOR_pf2id, arglist, target, 0); case IX86_BUILTIN_PFACC: return ix86_expand_binop_builtin (CODE_FOR_pfacc, arglist, target); case IX86_BUILTIN_PFADD: return ix86_expand_binop_builtin (CODE_FOR_addv2sf3, arglist, target); case IX86_BUILTIN_PFCMPEQ: return ix86_expand_binop_builtin (CODE_FOR_eqv2sf3, arglist, target); case IX86_BUILTIN_PFCMPGE: return ix86_expand_binop_builtin (CODE_FOR_gev2sf3, arglist, target); case IX86_BUILTIN_PFCMPGT: return ix86_expand_binop_builtin (CODE_FOR_gtv2sf3, arglist, target); case IX86_BUILTIN_PFMAX: return ix86_expand_binop_builtin (CODE_FOR_pfmaxv2sf3, arglist, target); case IX86_BUILTIN_PFMIN: return ix86_expand_binop_builtin (CODE_FOR_pfminv2sf3, arglist, target); case IX86_BUILTIN_PFMUL: return ix86_expand_binop_builtin (CODE_FOR_mulv2sf3, arglist, target); case IX86_BUILTIN_PFRCP: return ix86_expand_unop_builtin (CODE_FOR_pfrcpv2sf2, arglist, target, 0); case IX86_BUILTIN_PFRCPIT1: return ix86_expand_binop_builtin (CODE_FOR_pfrcpit1v2sf3, arglist, target); case IX86_BUILTIN_PFRCPIT2: return ix86_expand_binop_builtin (CODE_FOR_pfrcpit2v2sf3, arglist, target); case IX86_BUILTIN_PFRSQIT1: return ix86_expand_binop_builtin (CODE_FOR_pfrsqit1v2sf3, arglist, target); case IX86_BUILTIN_PFRSQRT: return ix86_expand_unop_builtin (CODE_FOR_pfrsqrtv2sf2, arglist, target, 0); case IX86_BUILTIN_PFSUB: return ix86_expand_binop_builtin (CODE_FOR_subv2sf3, arglist, target); case IX86_BUILTIN_PFSUBR: return ix86_expand_binop_builtin (CODE_FOR_subrv2sf3, arglist, target); case IX86_BUILTIN_PI2FD: return ix86_expand_unop_builtin (CODE_FOR_floatv2si2, arglist, target, 0); case IX86_BUILTIN_PMULHRW: return ix86_expand_binop_builtin (CODE_FOR_pmulhrwv4hi3, arglist, target); case IX86_BUILTIN_PF2IW: return ix86_expand_unop_builtin (CODE_FOR_pf2iw, arglist, target, 0); case IX86_BUILTIN_PFNACC: return ix86_expand_binop_builtin (CODE_FOR_pfnacc, arglist, target); case IX86_BUILTIN_PFPNACC: return ix86_expand_binop_builtin (CODE_FOR_pfpnacc, arglist, target); case IX86_BUILTIN_PI2FW: return ix86_expand_unop_builtin (CODE_FOR_pi2fw, arglist, target, 0); case IX86_BUILTIN_PSWAPDSI: return ix86_expand_unop_builtin (CODE_FOR_pswapdv2si2, arglist, target, 0); case IX86_BUILTIN_PSWAPDSF: return ix86_expand_unop_builtin (CODE_FOR_pswapdv2sf2, arglist, target, 0); case IX86_BUILTIN_SSE_ZERO: target = gen_reg_rtx (V4SFmode); emit_insn (gen_sse_clrv4sf (target, CONST0_RTX (V4SFmode))); return target; case IX86_BUILTIN_MMX_ZERO: target = gen_reg_rtx (DImode); emit_insn (gen_mmx_clrdi (target)); return target; case IX86_BUILTIN_CLRTI: target = gen_reg_rtx (V2DImode); emit_insn (gen_sse2_clrti (simplify_gen_subreg (TImode, target, V2DImode, 0))); return target; case IX86_BUILTIN_SQRTSD: return ix86_expand_unop1_builtin (CODE_FOR_vmsqrtv2df2, arglist, target); case IX86_BUILTIN_LOADAPD: return ix86_expand_unop_builtin (CODE_FOR_sse2_movapd, arglist, target, 1); case IX86_BUILTIN_LOADUPD: return ix86_expand_unop_builtin (CODE_FOR_sse2_movupd, arglist, target, 1); case IX86_BUILTIN_STOREAPD: return ix86_expand_store_builtin (CODE_FOR_sse2_movapd, arglist); case IX86_BUILTIN_STOREUPD: return ix86_expand_store_builtin (CODE_FOR_sse2_movupd, arglist); case IX86_BUILTIN_LOADSD: return ix86_expand_unop_builtin (CODE_FOR_sse2_loadsd, arglist, target, 1); case IX86_BUILTIN_STORESD: return ix86_expand_store_builtin (CODE_FOR_sse2_storesd, arglist); case IX86_BUILTIN_SETPD1: target = assign_386_stack_local (DFmode, 0); arg0 = TREE_VALUE (arglist); emit_move_insn (adjust_address (target, DFmode, 0), expand_expr (arg0, NULL_RTX, VOIDmode, 0)); op0 = gen_reg_rtx (V2DFmode); emit_insn (gen_sse2_loadsd (op0, adjust_address (target, V2DFmode, 0))); emit_insn (gen_sse2_shufpd (op0, op0, op0, const0_rtx)); return op0; case IX86_BUILTIN_SETPD: target = assign_386_stack_local (V2DFmode, 0); arg0 = TREE_VALUE (arglist); arg1 = TREE_VALUE (TREE_CHAIN (arglist)); emit_move_insn (adjust_address (target, DFmode, 0), expand_expr (arg0, NULL_RTX, VOIDmode, 0)); emit_move_insn (adjust_address (target, DFmode, 8), expand_expr (arg1, NULL_RTX, VOIDmode, 0)); op0 = gen_reg_rtx (V2DFmode); emit_insn (gen_sse2_movapd (op0, target)); return op0; case IX86_BUILTIN_LOADRPD: target = ix86_expand_unop_builtin (CODE_FOR_sse2_movapd, arglist, gen_reg_rtx (V2DFmode), 1); emit_insn (gen_sse2_shufpd (target, target, target, const1_rtx)); return target; case IX86_BUILTIN_LOADPD1: target = ix86_expand_unop_builtin (CODE_FOR_sse2_loadsd, arglist, gen_reg_rtx (V2DFmode), 1); emit_insn (gen_sse2_shufpd (target, target, target, const0_rtx)); return target; case IX86_BUILTIN_STOREPD1: return ix86_expand_store_builtin (CODE_FOR_sse2_movapd, arglist); case IX86_BUILTIN_STORERPD: return ix86_expand_store_builtin (CODE_FOR_sse2_movapd, arglist); case IX86_BUILTIN_CLRPD: target = gen_reg_rtx (V2DFmode); emit_insn (gen_sse_clrv2df (target)); return target; case IX86_BUILTIN_MFENCE: emit_insn (gen_sse2_mfence ()); return 0; case IX86_BUILTIN_LFENCE: emit_insn (gen_sse2_lfence ()); return 0; case IX86_BUILTIN_CLFLUSH: arg0 = TREE_VALUE (arglist); op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); icode = CODE_FOR_sse2_clflush; if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode)) op0 = copy_to_mode_reg (Pmode, op0); emit_insn (gen_sse2_clflush (op0)); return 0; case IX86_BUILTIN_MOVNTPD: return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2df, arglist); case IX86_BUILTIN_MOVNTDQ: return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2di, arglist); case IX86_BUILTIN_MOVNTI: return ix86_expand_store_builtin (CODE_FOR_sse2_movntsi, arglist); case IX86_BUILTIN_LOADDQA: return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqa, arglist, target, 1); case IX86_BUILTIN_LOADDQU: return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqu, arglist, target, 1); case IX86_BUILTIN_LOADD: return ix86_expand_unop_builtin (CODE_FOR_sse2_loadd, arglist, target, 1); case IX86_BUILTIN_STOREDQA: return ix86_expand_store_builtin (CODE_FOR_sse2_movdqa, arglist); case IX86_BUILTIN_STOREDQU: return ix86_expand_store_builtin (CODE_FOR_sse2_movdqu, arglist); case IX86_BUILTIN_STORED: return ix86_expand_store_builtin (CODE_FOR_sse2_stored, arglist); case IX86_BUILTIN_MONITOR: arg0 = TREE_VALUE (arglist); arg1 = TREE_VALUE (TREE_CHAIN (arglist)); arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0); if (!REG_P (op0)) op0 = copy_to_mode_reg (SImode, op0); if (!REG_P (op1)) op1 = copy_to_mode_reg (SImode, op1); if (!REG_P (op2)) op2 = copy_to_mode_reg (SImode, op2); emit_insn (gen_monitor (op0, op1, op2)); return 0; case IX86_BUILTIN_MWAIT: arg0 = TREE_VALUE (arglist); arg1 = TREE_VALUE (TREE_CHAIN (arglist)); op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0); op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0); if (!REG_P (op0)) op0 = copy_to_mode_reg (SImode, op0); if (!REG_P (op1)) op1 = copy_to_mode_reg (SImode, op1); emit_insn (gen_mwait (op0, op1)); return 0; case IX86_BUILTIN_LOADDDUP: return ix86_expand_unop_builtin (CODE_FOR_loadddup, arglist, target, 1); case IX86_BUILTIN_LDDQU: return ix86_expand_unop_builtin (CODE_FOR_lddqu, arglist, target, 1); default: break; } for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++) if (d->code == fcode) { /* Compares are treated specially. */ if (d->icode == CODE_FOR_maskcmpv4sf3 || d->icode == CODE_FOR_vmmaskcmpv4sf3 || d->icode == CODE_FOR_maskncmpv4sf3 || d->icode == CODE_FOR_vmmaskncmpv4sf3 || d->icode == CODE_FOR_maskcmpv2df3 || d->icode == CODE_FOR_vmmaskcmpv2df3 || d->icode == CODE_FOR_maskncmpv2df3 || d->icode == CODE_FOR_vmmaskncmpv2df3) return ix86_expand_sse_compare (d, arglist, target); return ix86_expand_binop_builtin (d->icode, arglist, target); } for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++) if (d->code == fcode) return ix86_expand_unop_builtin (d->icode, arglist, target, 0); for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++) if (d->code == fcode) return ix86_expand_sse_comi (d, arglist, target); /* @@@ Should really do something sensible here. */ return 0; } /* Store OPERAND to the memory after reload is completed. This means that we can't easily use assign_stack_local. */ rtx ix86_force_to_memory (enum machine_mode mode, rtx operand) { rtx result; if (!reload_completed) abort (); if (TARGET_RED_ZONE) { result = gen_rtx_MEM (mode, gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-RED_ZONE_SIZE))); emit_move_insn (result, operand); } else if (!TARGET_RED_ZONE && TARGET_64BIT) { switch (mode) { case HImode: case SImode: operand = gen_lowpart (DImode, operand); /* FALLTHRU */ case DImode: emit_insn ( gen_rtx_SET (VOIDmode, gen_rtx_MEM (DImode, gen_rtx_PRE_DEC (DImode, stack_pointer_rtx)), operand)); break; default: abort (); } result = gen_rtx_MEM (mode, stack_pointer_rtx); } else { switch (mode) { case DImode: { rtx operands[2]; split_di (&operand, 1, operands, operands + 1); emit_insn ( gen_rtx_SET (VOIDmode, gen_rtx_MEM (SImode, gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx)), operands[1])); emit_insn ( gen_rtx_SET (VOIDmode, gen_rtx_MEM (SImode, gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx)), operands[0])); } break; case HImode: /* It is better to store HImodes as SImodes. */ if (!TARGET_PARTIAL_REG_STALL) operand = gen_lowpart (SImode, operand); /* FALLTHRU */ case SImode: emit_insn ( gen_rtx_SET (VOIDmode, gen_rtx_MEM (GET_MODE (operand), gen_rtx_PRE_DEC (SImode, stack_pointer_rtx)), operand)); break; default: abort (); } result = gen_rtx_MEM (mode, stack_pointer_rtx); } return result; } /* Free operand from the memory. */ void ix86_free_from_memory (enum machine_mode mode) { if (!TARGET_RED_ZONE) { int size; if (mode == DImode || TARGET_64BIT) size = 8; else if (mode == HImode && TARGET_PARTIAL_REG_STALL) size = 2; else size = 4; /* Use LEA to deallocate stack space. In peephole2 it will be converted to pop or add instruction if registers are available. */ emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (size)))); } } /* Put float CONST_DOUBLE in the constant pool instead of fp regs. QImode must go into class Q_REGS. Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and movdf to do mem-to-mem moves through integer regs. */ enum reg_class ix86_preferred_reload_class (rtx x, enum reg_class class) { if (GET_CODE (x) == CONST_VECTOR && x != CONST0_RTX (GET_MODE (x))) return NO_REGS; if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode) { /* SSE can't load any constant directly yet. */ if (SSE_CLASS_P (class)) return NO_REGS; /* Floats can load 0 and 1. */ if (MAYBE_FLOAT_CLASS_P (class) && standard_80387_constant_p (x)) { /* Limit class to non-SSE. Use GENERAL_REGS if possible. */ if (MAYBE_SSE_CLASS_P (class)) return (reg_class_subset_p (class, GENERAL_REGS) ? GENERAL_REGS : FLOAT_REGS); else return class; } /* General regs can load everything. */ if (reg_class_subset_p (class, GENERAL_REGS)) return GENERAL_REGS; /* In case we haven't resolved FLOAT or SSE yet, give up. */ if (MAYBE_FLOAT_CLASS_P (class) || MAYBE_SSE_CLASS_P (class)) return NO_REGS; } if (MAYBE_MMX_CLASS_P (class) && CONSTANT_P (x)) return NO_REGS; if (GET_MODE (x) == QImode && ! reg_class_subset_p (class, Q_REGS)) return Q_REGS; return class; } /* If we are copying between general and FP registers, we need a memory location. The same is true for SSE and MMX registers. The macro can't work reliably when one of the CLASSES is class containing registers from multiple units (SSE, MMX, integer). We avoid this by never combining those units in single alternative in the machine description. Ensure that this constraint holds to avoid unexpected surprises. When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not enforce these sanity checks. */ int ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2, enum machine_mode mode, int strict) { if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1) || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2) || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1) || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2) || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1) || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2)) { if (strict) abort (); else return 1; } return (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2) || ((SSE_CLASS_P (class1) != SSE_CLASS_P (class2) || MMX_CLASS_P (class1) != MMX_CLASS_P (class2)) && ((mode != SImode && (mode != DImode || !TARGET_64BIT)) || (!TARGET_INTER_UNIT_MOVES && !optimize_size)))); } /* Return the cost of moving data from a register in class CLASS1 to one in class CLASS2. It is not required that the cost always equal 2 when FROM is the same as TO; on some machines it is expensive to move between registers if they are not general registers. */ int ix86_register_move_cost (enum machine_mode mode, enum reg_class class1, enum reg_class class2) { /* In case we require secondary memory, compute cost of the store followed by load. In order to avoid bad register allocation choices, we need for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */ if (ix86_secondary_memory_needed (class1, class2, mode, 0)) { int cost = 1; cost += MAX (MEMORY_MOVE_COST (mode, class1, 0), MEMORY_MOVE_COST (mode, class1, 1)); cost += MAX (MEMORY_MOVE_COST (mode, class2, 0), MEMORY_MOVE_COST (mode, class2, 1)); /* In case of copying from general_purpose_register we may emit multiple stores followed by single load causing memory size mismatch stall. Count this as arbitrarily high cost of 20. */ if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode)) cost += 20; /* In the case of FP/MMX moves, the registers actually overlap, and we have to switch modes in order to treat them differently. */ if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2)) || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1))) cost += 20; return cost; } /* Moves between SSE/MMX and integer unit are expensive. */ if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2) || SSE_CLASS_P (class1) != SSE_CLASS_P (class2)) return ix86_cost->mmxsse_to_integer; if (MAYBE_FLOAT_CLASS_P (class1)) return ix86_cost->fp_move; if (MAYBE_SSE_CLASS_P (class1)) return ix86_cost->sse_move; if (MAYBE_MMX_CLASS_P (class1)) return ix86_cost->mmx_move; return 2; } /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */ int ix86_hard_regno_mode_ok (int regno, enum machine_mode mode) { /* Flags and only flags can only hold CCmode values. */ if (CC_REGNO_P (regno)) return GET_MODE_CLASS (mode) == MODE_CC; if (GET_MODE_CLASS (mode) == MODE_CC || GET_MODE_CLASS (mode) == MODE_RANDOM || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT) return 0; if (FP_REGNO_P (regno)) return VALID_FP_MODE_P (mode); if (SSE_REGNO_P (regno)) return (TARGET_SSE ? VALID_SSE_REG_MODE (mode) : 0); if (MMX_REGNO_P (regno)) return (TARGET_MMX ? VALID_MMX_REG_MODE (mode) || VALID_MMX_REG_MODE_3DNOW (mode) : 0); /* We handle both integer and floats in the general purpose registers. In future we should be able to handle vector modes as well. */ if (!VALID_INT_MODE_P (mode) && !VALID_FP_MODE_P (mode)) return 0; /* Take care for QImode values - they can be in non-QI regs, but then they do cause partial register stalls. */ if (regno < 4 || mode != QImode || TARGET_64BIT) return 1; return reload_in_progress || reload_completed || !TARGET_PARTIAL_REG_STALL; } /* Return the cost of moving data of mode M between a register and memory. A value of 2 is the default; this cost is relative to those in `REGISTER_MOVE_COST'. If moving between registers and memory is more expensive than between two registers, you should define this macro to express the relative cost. Model also increased moving costs of QImode registers in non Q_REGS classes. */ int ix86_memory_move_cost (enum machine_mode mode, enum reg_class class, int in) { if (FLOAT_CLASS_P (class)) { int index; switch (mode) { case SFmode: index = 0; break; case DFmode: index = 1; break; case XFmode: index = 2; break; default: return 100; } return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index]; } if (SSE_CLASS_P (class)) { int index; switch (GET_MODE_SIZE (mode)) { case 4: index = 0; break; case 8: index = 1; break; case 16: index = 2; break; default: return 100; } return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index]; } if (MMX_CLASS_P (class)) { int index; switch (GET_MODE_SIZE (mode)) { case 4: index = 0; break; case 8: index = 1; break; default: return 100; } return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index]; } switch (GET_MODE_SIZE (mode)) { case 1: if (in) return (Q_CLASS_P (class) ? ix86_cost->int_load[0] : ix86_cost->movzbl_load); else return (Q_CLASS_P (class) ? ix86_cost->int_store[0] : ix86_cost->int_store[0] + 4); break; case 2: return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1]; default: /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */ if (mode == TFmode) mode = XFmode; return ((in ? ix86_cost->int_load[2] : ix86_cost->int_store[2]) * (((int) GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)); } } /* Compute a (partial) cost for rtx X. Return true if the complete cost has been computed, and false if subexpressions should be scanned. In either case, *TOTAL contains the cost result. */ static bool ix86_rtx_costs (rtx x, int code, int outer_code, int *total) { enum machine_mode mode = GET_MODE (x); switch (code) { case CONST_INT: case CONST: case LABEL_REF: case SYMBOL_REF: if (TARGET_64BIT && !x86_64_sign_extended_value (x)) *total = 3; else if (TARGET_64BIT && !x86_64_zero_extended_value (x)) *total = 2; else if (flag_pic && SYMBOLIC_CONST (x) && (!TARGET_64BIT || (!GET_CODE (x) != LABEL_REF && (GET_CODE (x) != SYMBOL_REF || !SYMBOL_REF_LOCAL_P (x))))) *total = 1; else *total = 0; return true; case CONST_DOUBLE: if (mode == VOIDmode) *total = 0; else switch (standard_80387_constant_p (x)) { case 1: /* 0.0 */ *total = 1; break; default: /* Other constants */ *total = 2; break; case 0: case -1: /* Start with (MEM (SYMBOL_REF)), since that's where it'll probably end up. Add a penalty for size. */ *total = (COSTS_N_INSNS (1) + (flag_pic != 0 && !TARGET_64BIT) + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2)); break; } return true; case ZERO_EXTEND: /* The zero extensions is often completely free on x86_64, so make it as cheap as possible. */ if (TARGET_64BIT && mode == DImode && GET_MODE (XEXP (x, 0)) == SImode) *total = 1; else if (TARGET_ZERO_EXTEND_WITH_AND) *total = COSTS_N_INSNS (ix86_cost->add); else *total = COSTS_N_INSNS (ix86_cost->movzx); return false; case SIGN_EXTEND: *total = COSTS_N_INSNS (ix86_cost->movsx); return false; case ASHIFT: if (GET_CODE (XEXP (x, 1)) == CONST_INT && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT)) { HOST_WIDE_INT value = INTVAL (XEXP (x, 1)); if (value == 1) { *total = COSTS_N_INSNS (ix86_cost->add); return false; } if ((value == 2 || value == 3) && ix86_cost->lea <= ix86_cost->shift_const) { *total = COSTS_N_INSNS (ix86_cost->lea); return false; } } /* FALLTHRU */ case ROTATE: case ASHIFTRT: case LSHIFTRT: case ROTATERT: if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode) { if (GET_CODE (XEXP (x, 1)) == CONST_INT) { if (INTVAL (XEXP (x, 1)) > 32) *total = COSTS_N_INSNS(ix86_cost->shift_const + 2); else *total = COSTS_N_INSNS(ix86_cost->shift_const * 2); } else { if (GET_CODE (XEXP (x, 1)) == AND) *total = COSTS_N_INSNS(ix86_cost->shift_var * 2); else *total = COSTS_N_INSNS(ix86_cost->shift_var * 6 + 2); } } else { if (GET_CODE (XEXP (x, 1)) == CONST_INT) *total = COSTS_N_INSNS (ix86_cost->shift_const); else *total = COSTS_N_INSNS (ix86_cost->shift_var); } return false; case MULT: if (FLOAT_MODE_P (mode)) { *total = COSTS_N_INSNS (ix86_cost->fmul); return false; } else { rtx op0 = XEXP (x, 0); rtx op1 = XEXP (x, 1); int nbits; if (GET_CODE (XEXP (x, 1)) == CONST_INT) { unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1)); for (nbits = 0; value != 0; value &= value - 1) nbits++; } else /* This is arbitrary. */ nbits = 7; /* Compute costs correctly for widening multiplication. */ if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op1) == ZERO_EXTEND) && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2 == GET_MODE_SIZE (mode)) { int is_mulwiden = 0; enum machine_mode inner_mode = GET_MODE (op0); if (GET_CODE (op0) == GET_CODE (op1)) is_mulwiden = 1, op1 = XEXP (op1, 0); else if (GET_CODE (op1) == CONST_INT) { if (GET_CODE (op0) == SIGN_EXTEND) is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode) == INTVAL (op1); else is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode)); } if (is_mulwiden) op0 = XEXP (op0, 0), mode = GET_MODE (op0); } *total = COSTS_N_INSNS (ix86_cost->mult_init[MODE_INDEX (mode)] + nbits * ix86_cost->mult_bit) + rtx_cost (op0, outer_code) + rtx_cost (op1, outer_code); return true; } case DIV: case UDIV: case MOD: case UMOD: if (FLOAT_MODE_P (mode)) *total = COSTS_N_INSNS (ix86_cost->fdiv); else *total = COSTS_N_INSNS (ix86_cost->divide[MODE_INDEX (mode)]); return false; case PLUS: if (FLOAT_MODE_P (mode)) *total = COSTS_N_INSNS (ix86_cost->fadd); else if (GET_MODE_CLASS (mode) == MODE_INT && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode)) { if (GET_CODE (XEXP (x, 0)) == PLUS && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT && CONSTANT_P (XEXP (x, 1))) { HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)); if (val == 2 || val == 4 || val == 8) { *total = COSTS_N_INSNS (ix86_cost->lea); *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code); *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0), outer_code); *total += rtx_cost (XEXP (x, 1), outer_code); return true; } } else if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT) { HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1)); if (val == 2 || val == 4 || val == 8) { *total = COSTS_N_INSNS (ix86_cost->lea); *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code); *total += rtx_cost (XEXP (x, 1), outer_code); return true; } } else if (GET_CODE (XEXP (x, 0)) == PLUS) { *total = COSTS_N_INSNS (ix86_cost->lea); *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code); *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code); *total += rtx_cost (XEXP (x, 1), outer_code); return true; } } /* FALLTHRU */ case MINUS: if (FLOAT_MODE_P (mode)) { *total = COSTS_N_INSNS (ix86_cost->fadd); return false; } /* FALLTHRU */ case AND: case IOR: case XOR: if (!TARGET_64BIT && mode == DImode) { *total = (COSTS_N_INSNS (ix86_cost->add) * 2 + (rtx_cost (XEXP (x, 0), outer_code) << (GET_MODE (XEXP (x, 0)) != DImode)) + (rtx_cost (XEXP (x, 1), outer_code) << (GET_MODE (XEXP (x, 1)) != DImode))); return true; } /* FALLTHRU */ case NEG: if (FLOAT_MODE_P (mode)) { *total = COSTS_N_INSNS (ix86_cost->fchs); return false; } /* FALLTHRU */ case NOT: if (!TARGET_64BIT && mode == DImode) *total = COSTS_N_INSNS (ix86_cost->add * 2); else *total = COSTS_N_INSNS (ix86_cost->add); return false; case FLOAT_EXTEND: if (!TARGET_SSE_MATH || !VALID_SSE_REG_MODE (mode)) *total = 0; return false; case ABS: if (FLOAT_MODE_P (mode)) *total = COSTS_N_INSNS (ix86_cost->fabs); return false; case SQRT: if (FLOAT_MODE_P (mode)) *total = COSTS_N_INSNS (ix86_cost->fsqrt); return false; case UNSPEC: if (XINT (x, 1) == UNSPEC_TP) *total = 0; return false; default: return false; } } #if defined (DO_GLOBAL_CTORS_BODY) && defined (HAS_INIT_SECTION) static void ix86_svr3_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED) { init_section (); fputs ("\tpushl $", asm_out_file); assemble_name (asm_out_file, XSTR (symbol, 0)); fputc ('\n', asm_out_file); } #endif #if TARGET_MACHO static int current_machopic_label_num; /* Given a symbol name and its associated stub, write out the definition of the stub. */ void machopic_output_stub (FILE *file, const char *symb, const char *stub) { unsigned int length; char *binder_name, *symbol_name, lazy_ptr_name[32]; int label = ++current_machopic_label_num; /* Lose our funky encoding stuff so it doesn't contaminate the stub. */ symb = (*targetm.strip_name_encoding) (symb); length = strlen (stub); binder_name = alloca (length + 32); GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length); length = strlen (symb); symbol_name = alloca (length + 32); GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length); sprintf (lazy_ptr_name, "L%d$lz", label); if (MACHOPIC_PURE) machopic_picsymbol_stub_section (); else machopic_symbol_stub_section (); fprintf (file, "%s:\n", stub); fprintf (file, "\t.indirect_symbol %s\n", symbol_name); if (MACHOPIC_PURE) { fprintf (file, "\tcall LPC$%d\nLPC$%d:\tpopl %%eax\n", label, label); fprintf (file, "\tmovl %s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label); fprintf (file, "\tjmp %%edx\n"); } else fprintf (file, "\tjmp *%s\n", lazy_ptr_name); fprintf (file, "%s:\n", binder_name); if (MACHOPIC_PURE) { fprintf (file, "\tlea %s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label); fprintf (file, "\tpushl %%eax\n"); } else fprintf (file, "\t pushl $%s\n", lazy_ptr_name); fprintf (file, "\tjmp dyld_stub_binding_helper\n"); machopic_lazy_symbol_ptr_section (); fprintf (file, "%s:\n", lazy_ptr_name); fprintf (file, "\t.indirect_symbol %s\n", symbol_name); fprintf (file, "\t.long %s\n", binder_name); } #endif /* TARGET_MACHO */ /* Order the registers for register allocator. */ void x86_order_regs_for_local_alloc (void) { int pos = 0; int i; /* First allocate the local general purpose registers. */ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (GENERAL_REGNO_P (i) && call_used_regs[i]) reg_alloc_order [pos++] = i; /* Global general purpose registers. */ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (GENERAL_REGNO_P (i) && !call_used_regs[i]) reg_alloc_order [pos++] = i; /* x87 registers come first in case we are doing FP math using them. */ if (!TARGET_SSE_MATH) for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++) reg_alloc_order [pos++] = i; /* SSE registers. */ for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++) reg_alloc_order [pos++] = i; for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++) reg_alloc_order [pos++] = i; /* x87 registers. */ if (TARGET_SSE_MATH) for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++) reg_alloc_order [pos++] = i; for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++) reg_alloc_order [pos++] = i; /* Initialize the rest of array as we do not allocate some registers at all. */ while (pos < FIRST_PSEUDO_REGISTER) reg_alloc_order [pos++] = 0; } #ifndef TARGET_USE_MS_BITFIELD_LAYOUT #define TARGET_USE_MS_BITFIELD_LAYOUT 0 #endif /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in struct attribute_spec.handler. */ static tree ix86_handle_struct_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) { tree *type = NULL; if (DECL_P (*node)) { if (TREE_CODE (*node) == TYPE_DECL) type = &TREE_TYPE (*node); } else type = node; if (!(type && (TREE_CODE (*type) == RECORD_TYPE || TREE_CODE (*type) == UNION_TYPE))) { warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name)); *no_add_attrs = true; } else if ((is_attribute_p ("ms_struct", name) && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type))) || ((is_attribute_p ("gcc_struct", name) && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type))))) { warning ("`%s' incompatible attribute ignored", IDENTIFIER_POINTER (name)); *no_add_attrs = true; } return NULL_TREE; } static bool ix86_ms_bitfield_layout_p (tree record_type) { return (TARGET_USE_MS_BITFIELD_LAYOUT && !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type))) || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type)); } /* Returns an expression indicating where the this parameter is located on entry to the FUNCTION. */ static rtx x86_this_parameter (tree function) { tree type = TREE_TYPE (function); if (TARGET_64BIT) { int n = aggregate_value_p (TREE_TYPE (type), type) != 0; return gen_rtx_REG (DImode, x86_64_int_parameter_registers[n]); } if (ix86_function_regparm (type, function) > 0) { tree parm; parm = TYPE_ARG_TYPES (type); /* Figure out whether or not the function has a variable number of arguments. */ for (; parm; parm = TREE_CHAIN (parm)) if (TREE_VALUE (parm) == void_type_node) break; /* If not, the this parameter is in the first argument. */ if (parm) { int regno = 0; if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type))) regno = 2; return gen_rtx_REG (SImode, regno); } } if (aggregate_value_p (TREE_TYPE (type), type)) return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 8)); else return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 4)); } /* Determine whether x86_output_mi_thunk can succeed. */ static bool x86_can_output_mi_thunk (tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta ATTRIBUTE_UNUSED, HOST_WIDE_INT vcall_offset, tree function) { /* 64-bit can handle anything. */ if (TARGET_64BIT) return true; /* For 32-bit, everything's fine if we have one free register. */ if (ix86_function_regparm (TREE_TYPE (function), function) < 3) return true; /* Need a free register for vcall_offset. */ if (vcall_offset) return false; /* Need a free register for GOT references. */ if (flag_pic && !(*targetm.binds_local_p) (function)) return false; /* Otherwise ok. */ return true; } /* Output the assembler code for a thunk function. THUNK_DECL is the declaration for the thunk function itself, FUNCTION is the decl for the target function. DELTA is an immediate constant offset to be added to THIS. If VCALL_OFFSET is nonzero, the word at *(*this + vcall_offset) should be added to THIS. */ static void x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED, tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset, tree function) { rtx xops[3]; rtx this = x86_this_parameter (function); rtx this_reg, tmp; /* If VCALL_OFFSET, we'll need THIS in a register. Might as well pull it in now and let DELTA benefit. */ if (REG_P (this)) this_reg = this; else if (vcall_offset) { /* Put the this parameter into %eax. */ xops[0] = this; xops[1] = this_reg = gen_rtx_REG (Pmode, 0); output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops); } else this_reg = NULL_RTX; /* Adjust the this parameter by a fixed constant. */ if (delta) { xops[0] = GEN_INT (delta); xops[1] = this_reg ? this_reg : this; if (TARGET_64BIT) { if (!x86_64_general_operand (xops[0], DImode)) { tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */); xops[1] = tmp; output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops); xops[0] = tmp; xops[1] = this; } output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops); } else output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops); } /* Adjust the this parameter by a value stored in the vtable. */ if (vcall_offset) { if (TARGET_64BIT) tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */); else { int tmp_regno = 2 /* ECX */; if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (TREE_TYPE (function)))) tmp_regno = 0 /* EAX */; tmp = gen_rtx_REG (SImode, tmp_regno); } xops[0] = gen_rtx_MEM (Pmode, this_reg); xops[1] = tmp; if (TARGET_64BIT) output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops); else output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops); /* Adjust the this parameter. */ xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset)); if (TARGET_64BIT && !memory_operand (xops[0], Pmode)) { rtx tmp2 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */); xops[0] = GEN_INT (vcall_offset); xops[1] = tmp2; output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops); xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2)); } xops[1] = this_reg; if (TARGET_64BIT) output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops); else output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops); } /* If necessary, drop THIS back to its stack slot. */ if (this_reg && this_reg != this) { xops[0] = this_reg; xops[1] = this; output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops); } xops[0] = XEXP (DECL_RTL (function), 0); if (TARGET_64BIT) { if (!flag_pic || (*targetm.binds_local_p) (function)) output_asm_insn ("jmp\t%P0", xops); else { tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL); tmp = gen_rtx_CONST (Pmode, tmp); tmp = gen_rtx_MEM (QImode, tmp); xops[0] = tmp; output_asm_insn ("jmp\t%A0", xops); } } else { if (!flag_pic || (*targetm.binds_local_p) (function)) output_asm_insn ("jmp\t%P0", xops); else #if TARGET_MACHO if (TARGET_MACHO) { const char *ip = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (function)); tmp = gen_rtx_SYMBOL_REF (Pmode, machopic_stub_name (ip)); tmp = gen_rtx_MEM (QImode, tmp); xops[0] = tmp; output_asm_insn ("jmp\t%0", xops); } else #endif /* TARGET_MACHO */ { tmp = gen_rtx_REG (SImode, 2 /* ECX */); output_set_got (tmp); xops[1] = tmp; output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops); output_asm_insn ("jmp\t{*}%1", xops); } } } static void x86_file_start (void) { default_file_start (); if (X86_FILE_START_VERSION_DIRECTIVE) fputs ("\t.version\t\"01.01\"\n", asm_out_file); if (X86_FILE_START_FLTUSED) fputs ("\t.global\t__fltused\n", asm_out_file); if (ix86_asm_dialect == ASM_INTEL) fputs ("\t.intel_syntax\n", asm_out_file); } int x86_field_alignment (tree field, int computed) { enum machine_mode mode; tree type = TREE_TYPE (field); if (TARGET_64BIT || TARGET_ALIGN_DOUBLE) return computed; mode = TYPE_MODE (TREE_CODE (type) == ARRAY_TYPE ? get_inner_array_type (type) : type); if (mode == DFmode || mode == DCmode || GET_MODE_CLASS (mode) == MODE_INT || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT) return MIN (32, computed); return computed; } /* Output assembler code to FILE to increment profiler label # LABELNO for profiling a function entry. */ void x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED) { if (TARGET_64BIT) if (flag_pic) { #ifndef NO_PROFILE_COUNTERS fprintf (file, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX, labelno); #endif fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME); } else { #ifndef NO_PROFILE_COUNTERS fprintf (file, "\tmovq\t$%sP%d,%%r11\n", LPREFIX, labelno); #endif fprintf (file, "\tcall\t%s\n", MCOUNT_NAME); } else if (flag_pic) { #ifndef NO_PROFILE_COUNTERS fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n", LPREFIX, labelno, PROFILE_COUNT_REGISTER); #endif fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME); } else { #ifndef NO_PROFILE_COUNTERS fprintf (file, "\tmovl\t$%sP%d,%%%s\n", LPREFIX, labelno, PROFILE_COUNT_REGISTER); #endif fprintf (file, "\tcall\t%s\n", MCOUNT_NAME); } } /* We don't have exact information about the insn sizes, but we may assume quite safely that we are informed about all 1 byte insns and memory address sizes. This is enough to eliminate unnecessary padding in 99% of cases. */ static int min_insn_size (rtx insn) { int l = 0; if (!INSN_P (insn) || !active_insn_p (insn)) return 0; /* Discard alignments we've emit and jump instructions. */ if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN) return 0; if (GET_CODE (insn) == JUMP_INSN && (GET_CODE (PATTERN (insn)) == ADDR_VEC || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)) return 0; /* Important case - calls are always 5 bytes. It is common to have many calls in the row. */ if (GET_CODE (insn) == CALL_INSN && symbolic_reference_mentioned_p (PATTERN (insn)) && !SIBLING_CALL_P (insn)) return 5; if (get_attr_length (insn) <= 1) return 1; /* For normal instructions we may rely on the sizes of addresses and the presence of symbol to require 4 bytes of encoding. This is not the case for jumps where references are PC relative. */ if (GET_CODE (insn) != JUMP_INSN) { l = get_attr_length_address (insn); if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn))) l = 4; } if (l) return 1+l; else return 2; } /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte window. */ static void ix86_avoid_jump_misspredicts (void) { rtx insn, start = get_insns (); int nbytes = 0, njumps = 0; int isjump = 0; /* Look for all minimal intervals of instructions containing 4 jumps. The intervals are bounded by START and INSN. NBYTES is the total size of instructions in the interval including INSN and not including START. When the NBYTES is smaller than 16 bytes, it is possible that the end of START and INSN ends up in the same 16byte page. The smallest offset in the page INSN can start is the case where START ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN). We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN). */ for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) { nbytes += min_insn_size (insn); if (dump_file) fprintf(dump_file, "Insn %i estimated to %i bytes\n", INSN_UID (insn), min_insn_size (insn)); if ((GET_CODE (insn) == JUMP_INSN && GET_CODE (PATTERN (insn)) != ADDR_VEC && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC) || GET_CODE (insn) == CALL_INSN) njumps++; else continue; while (njumps > 3) { start = NEXT_INSN (start); if ((GET_CODE (start) == JUMP_INSN && GET_CODE (PATTERN (start)) != ADDR_VEC && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC) || GET_CODE (start) == CALL_INSN) njumps--, isjump = 1; else isjump = 0; nbytes -= min_insn_size (start); } if (njumps < 0) abort (); if (dump_file) fprintf (dump_file, "Interval %i to %i has %i bytes\n", INSN_UID (start), INSN_UID (insn), nbytes); if (njumps == 3 && isjump && nbytes < 16) { int padsize = 15 - nbytes + min_insn_size (insn); if (dump_file) fprintf (dump_file, "Padding insn %i by %i bytes!\n", INSN_UID (insn), padsize); emit_insn_before (gen_align (GEN_INT (padsize)), insn); } } } /* AMD Athlon works faster when RET is not destination of conditional jump or directly preceded by other jump instruction. We avoid the penalty by inserting NOP just before the RET instructions in such cases. */ static void ix86_pad_returns (void) { edge e; for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next) { basic_block bb = e->src; rtx ret = BB_END (bb); rtx prev; bool replace = false; if (GET_CODE (ret) != JUMP_INSN || GET_CODE (PATTERN (ret)) != RETURN || !maybe_hot_bb_p (bb)) continue; for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev)) if (active_insn_p (prev) || GET_CODE (prev) == CODE_LABEL) break; if (prev && GET_CODE (prev) == CODE_LABEL) { edge e; for (e = bb->pred; e; e = e->pred_next) if (EDGE_FREQUENCY (e) && e->src->index >= 0 && !(e->flags & EDGE_FALLTHRU)) replace = true; } if (!replace) { prev = prev_active_insn (ret); if (prev && ((GET_CODE (prev) == JUMP_INSN && any_condjump_p (prev)) || GET_CODE (prev) == CALL_INSN)) replace = true; /* Empty functions get branch mispredict even when the jump destination is not visible to us. */ if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED) replace = true; } if (replace) { emit_insn_before (gen_return_internal_long (), ret); delete_insn (ret); } } } /* Implement machine specific optimizations. We implement padding of returns for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */ static void ix86_reorg (void) { if (TARGET_ATHLON_K8 && optimize && !optimize_size) ix86_pad_returns (); if (TARGET_FOUR_JUMP_LIMIT && optimize && !optimize_size) ix86_avoid_jump_misspredicts (); } /* Return nonzero when QImode register that must be represented via REX prefix is used. */ bool x86_extended_QIreg_mentioned_p (rtx insn) { int i; extract_insn_cached (insn); for (i = 0; i < recog_data.n_operands; i++) if (REG_P (recog_data.operand[i]) && REGNO (recog_data.operand[i]) >= 4) return true; return false; } /* Return nonzero when P points to register encoded via REX prefix. Called via for_each_rtx. */ static int extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED) { unsigned int regno; if (!REG_P (*p)) return 0; regno = REGNO (*p); return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno); } /* Return true when INSN mentions register that must be encoded using REX prefix. */ bool x86_extended_reg_mentioned_p (rtx insn) { return for_each_rtx (&PATTERN (insn), extended_reg_mentioned_1, NULL); } /* Generate an unsigned DImode/SImode to FP conversion. This is the same code optabs would emit if we didn't have TFmode patterns. */ void x86_emit_floatuns (rtx operands[2]) { rtx neglab, donelab, i0, i1, f0, in, out; enum machine_mode mode, inmode; inmode = GET_MODE (operands[1]); if (inmode != SImode && inmode != DImode) abort (); out = operands[0]; in = force_reg (inmode, operands[1]); mode = GET_MODE (out); neglab = gen_label_rtx (); donelab = gen_label_rtx (); i1 = gen_reg_rtx (Pmode); f0 = gen_reg_rtx (mode); emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, Pmode, 0, neglab); emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in))); emit_jump_insn (gen_jump (donelab)); emit_barrier (); emit_label (neglab); i0 = expand_simple_binop (Pmode, LSHIFTRT, in, const1_rtx, NULL, 1, OPTAB_DIRECT); i1 = expand_simple_binop (Pmode, AND, in, const1_rtx, NULL, 1, OPTAB_DIRECT); i0 = expand_simple_binop (Pmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT); expand_float (f0, i0, 0); emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0))); emit_label (donelab); } /* Return if we do not know how to pass TYPE solely in registers. */ bool ix86_must_pass_in_stack (enum machine_mode mode, tree type) { if (default_must_pass_in_stack (mode, type)) return true; return (!TARGET_64BIT && type && mode == TImode); } /* Initialize vector TARGET via VALS. */ void ix86_expand_vector_init (rtx target, rtx vals) { enum machine_mode mode = GET_MODE (target); int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode)); int n_elts = (GET_MODE_SIZE (mode) / elt_size); int i; for (i = n_elts - 1; i >= 0; i--) if (GET_CODE (XVECEXP (vals, 0, i)) != CONST_INT && GET_CODE (XVECEXP (vals, 0, i)) != CONST_DOUBLE) break; /* Few special cases first... ... constants are best loaded from constant pool. */ if (i < 0) { emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0))); return; } /* ... values where only first field is non-constant are best loaded from the pool and overwritten via move later. */ if (!i) { rtx op = simplify_gen_subreg (mode, XVECEXP (vals, 0, 0), GET_MODE_INNER (mode), 0); op = force_reg (mode, op); XVECEXP (vals, 0, 0) = CONST0_RTX (GET_MODE_INNER (mode)); emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0))); switch (GET_MODE (target)) { case V2DFmode: emit_insn (gen_sse2_movsd (target, target, op)); break; case V4SFmode: emit_insn (gen_sse_movss (target, target, op)); break; default: break; } return; } /* And the busy sequence doing rotations. */ switch (GET_MODE (target)) { case V2DFmode: { rtx vecop0 = simplify_gen_subreg (V2DFmode, XVECEXP (vals, 0, 0), DFmode, 0); rtx vecop1 = simplify_gen_subreg (V2DFmode, XVECEXP (vals, 0, 1), DFmode, 0); vecop0 = force_reg (V2DFmode, vecop0); vecop1 = force_reg (V2DFmode, vecop1); emit_insn (gen_sse2_unpcklpd (target, vecop0, vecop1)); } break; case V4SFmode: { rtx vecop0 = simplify_gen_subreg (V4SFmode, XVECEXP (vals, 0, 0), SFmode, 0); rtx vecop1 = simplify_gen_subreg (V4SFmode, XVECEXP (vals, 0, 1), SFmode, 0); rtx vecop2 = simplify_gen_subreg (V4SFmode, XVECEXP (vals, 0, 2), SFmode, 0); rtx vecop3 = simplify_gen_subreg (V4SFmode, XVECEXP (vals, 0, 3), SFmode, 0); rtx tmp1 = gen_reg_rtx (V4SFmode); rtx tmp2 = gen_reg_rtx (V4SFmode); vecop0 = force_reg (V4SFmode, vecop0); vecop1 = force_reg (V4SFmode, vecop1); vecop2 = force_reg (V4SFmode, vecop2); vecop3 = force_reg (V4SFmode, vecop3); emit_insn (gen_sse_unpcklps (tmp1, vecop1, vecop3)); emit_insn (gen_sse_unpcklps (tmp2, vecop0, vecop2)); emit_insn (gen_sse_unpcklps (target, tmp2, tmp1)); } break; default: abort (); } } /* Worker function for TARGET_MD_ASM_CLOBBERS. We do this in the new i386 backend to maintain source compatibility with the old cc0-based compiler. */ static tree ix86_md_asm_clobbers (tree clobbers) { clobbers = tree_cons (NULL_TREE, build_string (5, "flags"), clobbers); clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"), clobbers); clobbers = tree_cons (NULL_TREE, build_string (7, "dirflag"), clobbers); return clobbers; } /* Worker function for REVERSE_CONDITION. */ enum rtx_code ix86_reverse_condition (enum rtx_code code, enum machine_mode mode) { return (mode != CCFPmode && mode != CCFPUmode ? reverse_condition (code) : reverse_condition_maybe_unordered (code)); } /* Output code to perform an x87 FP register move, from OPERANDS[1] to OPERANDS[0]. */ const char * output_387_reg_move (rtx insn, rtx *operands) { if (REG_P (operands[1]) && find_regno_note (insn, REG_DEAD, REGNO (operands[1]))) { if (REGNO (operands[0]) == FIRST_STACK_REG && TARGET_USE_FFREEP) return "ffreep\t%y0"; return "fstp\t%y0"; } if (STACK_TOP_P (operands[0])) return "fld%z1\t%y1"; return "fst\t%y0"; } /* Output code to perform a conditional jump to LABEL, if C2 flag in FP status register is set. */ void ix86_emit_fp_unordered_jump (rtx label) { rtx reg = gen_reg_rtx (HImode); rtx temp; emit_insn (gen_x86_fnstsw_1 (reg)); if (TARGET_USE_SAHF) { emit_insn (gen_x86_sahf_1 (reg)); temp = gen_rtx_REG (CCmode, FLAGS_REG); temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx); } else { emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04))); temp = gen_rtx_REG (CCNOmode, FLAGS_REG); temp = gen_rtx_NE (VOIDmode, temp, const0_rtx); } temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp, gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx); temp = gen_rtx_SET (VOIDmode, pc_rtx, temp); emit_jump_insn (temp); } /* Output code to perform a log1p XFmode calculation. */ void ix86_emit_i387_log1p (rtx op0, rtx op1) { rtx label1 = gen_label_rtx (); rtx label2 = gen_label_rtx (); rtx tmp = gen_reg_rtx (XFmode); rtx tmp2 = gen_reg_rtx (XFmode); emit_insn (gen_absxf2 (tmp, op1)); emit_insn (gen_cmpxf (tmp, CONST_DOUBLE_FROM_REAL_VALUE ( REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode), XFmode))); emit_jump_insn (gen_bge (label1)); emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */ emit_insn (gen_fyl2xp1_xf3 (op0, tmp2, op1)); emit_jump (label2); emit_label (label1); emit_move_insn (tmp, CONST1_RTX (XFmode)); emit_insn (gen_addxf3 (tmp, op1, tmp)); emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */ emit_insn (gen_fyl2x_xf3 (op0, tmp2, tmp)); emit_label (label2); } /* Type information for i386.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ void gt_ggc_mx_stack_local_entry (void *x_p) { struct stack_local_entry * const x = (struct stack_local_entry *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_7rtx_def ((*x).rtl); gt_ggc_m_17stack_local_entry ((*x).next); } } void gt_pch_nx_stack_local_entry (void *x_p) { struct stack_local_entry * const x = (struct stack_local_entry *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_17stack_local_entry)) { gt_pch_n_7rtx_def ((*x).rtl); gt_pch_n_17stack_local_entry ((*x).next); } } void gt_pch_p_17stack_local_entry (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct stack_local_entry * const x ATTRIBUTE_UNUSED = (struct stack_local_entry *)x_p; if ((void *)(x) == this_obj) op (&((*x).rtl), cookie); if ((void *)(x) == this_obj) op (&((*x).next), cookie); } /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_i386_h[] = { { &ix86_tls_symbol, 1, sizeof (ix86_tls_symbol), >_ggc_mx_rtx_def, >_pch_nx_rtx_def }, LAST_GGC_ROOT_TAB }; /* Linux host-specific hook definitions. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include /* Linux has a feature called exec-shield-randomize that perturbs the address of non-fixed mapped segments by a (relatively) small amount. The feature is intended to make it harder to attack the system with buffer overflow attacks, since every invocation of a program will have its libraries and data segments at slightly different addresses. This feature causes us problems with PCH because it makes it that much harder to acquire a stable location at which to map our PCH data file. [ The feature causes other points of non-determinism within the compiler as well, so we'd *really* like to be able to have the driver disable exec-shield-randomize for the process group, but that isn't possible at present. ] We're going to try several things: * Select an architecture specific address as "likely" and see if that's free. For our 64-bit hosts, we can easily choose an address in Never Never Land. * If exec-shield-randomize is disabled, then just use the address chosen by mmap in step one. * If exec-shield-randomize is enabled, then temporarily allocate 32M of memory as a buffer, then allocate PCH memory, then free the buffer. The theory here is that the perturbation is no more than 16M, and so by allocating our buffer larger than that we make it considerably more likely that the address will be free when we want to load the data back. */ #undef HOST_HOOKS_GT_PCH_GET_ADDRESS #define HOST_HOOKS_GT_PCH_GET_ADDRESS linux_gt_pch_get_address /* For various ports, try to guess a fixed spot in the vm space that's probably free. */ #if defined(__alpha) # define TRY_EMPTY_VM_SPACE 0x10000000000 #elif defined(__ia64) # define TRY_EMPTY_VM_SPACE 0x2000000100000000 #elif defined(__x86_64) # define TRY_EMPTY_VM_SPACE 0x1000000000 #elif defined(__i386) # define TRY_EMPTY_VM_SPACE 0x60000000 #elif defined(__s390x__) # define TRY_EMPTY_VM_SPACE 0x8000000000 #elif defined(__s390__) # define TRY_EMPTY_VM_SPACE 0x60000000 #else # define TRY_EMPTY_VM_SPACE 0 #endif /* Determine a location where we might be able to reliably allocate SIZE bytes. FD is the PCH file, though we should return with the file unmapped. */ static void * linux_gt_pch_get_address (size_t size, int fd) { size_t buffer_size = 32 * 1024 * 1024; void *addr, *buffer; FILE *f; bool randomize_on; addr = mmap ((void *)TRY_EMPTY_VM_SPACE, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); /* If we failed the map, that means there's *no* free space. */ if (addr == (void *) MAP_FAILED) return NULL; /* Unmap the area before returning. */ munmap (addr, size); /* If we got the exact area we requested, then that's great. */ if (TRY_EMPTY_VM_SPACE && addr == (void *) TRY_EMPTY_VM_SPACE) return addr; /* If we didn't, then we need to look to see if randomization is on. */ f = fopen ("/proc/sys/kernel/exec-shield-randomize", "r"); randomize_on = false; if (f != NULL) { char buf[100]; size_t c; c = fread (buf, 1, sizeof buf - 1, f); if (c > 0) { buf[c] = '\0'; randomize_on = (atoi (buf) > 0); } fclose (f); } /* If it isn't, then accept the address that mmap selected as fine. */ if (!randomize_on) return addr; /* Otherwise, we need to try again with buffer space. */ buffer = mmap (0, buffer_size, PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0); addr = mmap (0, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); if (buffer != (void *) MAP_FAILED) munmap (buffer, buffer_size); if (addr == (void *) MAP_FAILED) return NULL; munmap (addr, size); return addr; } const struct host_hooks host_hooks = HOST_HOOKS_INITIALIZER; /* Tree inlining. Copyright 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Alexandre Oliva This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* I'm not real happy about this, but we need to handle gimple and non-gimple trees. */ /* 0 if we should not perform inlining. 1 if we should expand functions calls inline at the tree level. 2 if we should consider *all* functions to be inline candidates. */ int flag_inline_trees = 0; /* To Do: o In order to make inlining-on-trees work, we pessimized function-local static constants. In particular, they are now always output, even when not addressed. Fix this by treating function-local static constants just like global static constants; the back-end already knows not to output them if they are not needed. o Provide heuristics to clamp inlining of recursive template calls? */ /* Data required for function inlining. */ typedef struct inline_data { /* A stack of the functions we are inlining. For example, if we are compiling `f', which calls `g', which calls `h', and we are inlining the body of `h', the stack will contain, `h', followed by `g', followed by `f'. The first few elements of the stack may contain other functions that we know we should not recurse into, even though they are not directly being inlined. */ varray_type fns; /* The index of the first element of FNS that really represents an inlined function. */ unsigned first_inlined_fn; /* The label to jump to when a return statement is encountered. If this value is NULL, then return statements will simply be remapped as return statements, rather than as jumps. */ tree ret_label; /* The VAR_DECL for the return value. */ tree retvar; /* The map from local declarations in the inlined function to equivalents in the function into which it is being inlined. */ splay_tree decl_map; /* Nonzero if we are currently within the cleanup for a TARGET_EXPR. */ int in_target_cleanup_p; /* A list of the functions current function has inlined. */ varray_type inlined_fns; /* We use the same mechanism to build clones that we do to perform inlining. However, there are a few places where we need to distinguish between those two situations. This flag is true if we are cloning, rather than inlining. */ bool cloning_p; /* Similarly for saving function body. */ bool saving_p; /* Hash table used to prevent walk_tree from visiting the same node umpteen million times. */ htab_t tree_pruner; /* Callgraph node of function we are inlining into. */ struct cgraph_node *node; /* Callgraph node of currently inlined function. */ struct cgraph_node *current_node; /* Statement iterator. We need this so we can keep the tree in gimple form when we insert the inlined function. It is not used when we are not dealing with gimple trees. */ tree_stmt_iterator tsi; } inline_data; /* Prototypes. */ /* The approximate number of instructions per statement. This number need not be particularly accurate; it is used only to make decisions about when a function is too big to inline. */ #define INSNS_PER_STMT (10) static tree declare_return_variable (inline_data *, tree, tree *); static tree copy_body_r (tree *, int *, void *); static tree copy_body (inline_data *); static tree expand_call_inline (tree *, int *, void *); static void expand_calls_inline (tree *, inline_data *); static bool inlinable_function_p (tree); static tree remap_decl (tree, inline_data *); static tree remap_type (tree, inline_data *); static tree initialize_inlined_parameters (inline_data *, tree, tree, tree, tree); static void remap_block (tree *, inline_data *); static tree remap_decls (tree, inline_data *); static void copy_bind_expr (tree *, int *, inline_data *); static tree mark_local_for_remap_r (tree *, int *, void *); static tree unsave_r (tree *, int *, void *); static void declare_inline_vars (tree bind_expr, tree vars); /* Insert a tree->tree mapping for ID. Despite the name suggests that the trees should be variables, it is used for more than that. */ static void insert_decl_map (inline_data *id, tree key, tree value) { splay_tree_insert (id->decl_map, (splay_tree_key) key, (splay_tree_value) value); /* Always insert an identity map as well. If we see this same new node again, we won't want to duplicate it a second time. */ if (key != value) splay_tree_insert (id->decl_map, (splay_tree_key) value, (splay_tree_value) value); } /* Remap DECL during the copying of the BLOCK tree for the function. We are only called to remap local variables in the current function. */ static tree remap_decl (tree decl, inline_data *id) { splay_tree_node n = splay_tree_lookup (id->decl_map, (splay_tree_key) decl); tree fn = VARRAY_TOP_TREE (id->fns); /* See if we have remapped this declaration. If we didn't already have an equivalent for this declaration, create one now. */ if (!n) { /* Make a copy of the variable or label. */ tree t = copy_decl_for_inlining (decl, fn, VARRAY_TREE (id->fns, 0)); /* Remap types, if necessary. */ TREE_TYPE (t) = remap_type (TREE_TYPE (t), id); if (TREE_CODE (t) == TYPE_DECL) DECL_ORIGINAL_TYPE (t) = remap_type (DECL_ORIGINAL_TYPE (t), id); else if (TREE_CODE (t) == PARM_DECL) DECL_ARG_TYPE_AS_WRITTEN (t) = remap_type (DECL_ARG_TYPE_AS_WRITTEN (t), id); /* Remap sizes as necessary. */ walk_tree (&DECL_SIZE (t), copy_body_r, id, NULL); walk_tree (&DECL_SIZE_UNIT (t), copy_body_r, id, NULL); /* If fields, do likewise for offset and qualifier. */ if (TREE_CODE (t) == FIELD_DECL) { walk_tree (&DECL_FIELD_OFFSET (t), copy_body_r, id, NULL); if (TREE_CODE (DECL_CONTEXT (t)) == QUAL_UNION_TYPE) walk_tree (&DECL_QUALIFIER (t), copy_body_r, id, NULL); } #if 0 /* FIXME handle anon aggrs. */ if (! DECL_NAME (t) && TREE_TYPE (t) && lang_hooks.tree_inlining.anon_aggr_type_p (TREE_TYPE (t))) { /* For a VAR_DECL of anonymous type, we must also copy the member VAR_DECLS here and rechain the DECL_ANON_UNION_ELEMS. */ tree members = NULL; tree src; for (src = DECL_ANON_UNION_ELEMS (t); src; src = TREE_CHAIN (src)) { tree member = remap_decl (TREE_VALUE (src), id); if (TREE_PURPOSE (src)) abort (); members = tree_cons (NULL, member, members); } DECL_ANON_UNION_ELEMS (t) = nreverse (members); } #endif /* Remember it, so that if we encounter this local entity again we can reuse this copy. */ insert_decl_map (id, decl, t); return t; } return unshare_expr ((tree) n->value); } static tree remap_type (tree type, inline_data *id) { splay_tree_node node; tree new, t; if (type == NULL) return type; /* See if we have remapped this type. */ node = splay_tree_lookup (id->decl_map, (splay_tree_key) type); if (node) return (tree) node->value; /* The type only needs remapping if it's variably modified by a variable in the function we are inlining. */ if (! variably_modified_type_p (type, VARRAY_TOP_TREE (id->fns))) { insert_decl_map (id, type, type); return type; } /* We do need a copy. build and register it now. If this is a pointer or reference type, remap the designated type and make a new pointer or reference type. */ if (TREE_CODE (type) == POINTER_TYPE) { new = build_pointer_type_for_mode (remap_type (TREE_TYPE (type), id), TYPE_MODE (type), TYPE_REF_CAN_ALIAS_ALL (type)); insert_decl_map (id, type, new); return new; } else if (TREE_CODE (type) == REFERENCE_TYPE) { new = build_reference_type_for_mode (remap_type (TREE_TYPE (type), id), TYPE_MODE (type), TYPE_REF_CAN_ALIAS_ALL (type)); insert_decl_map (id, type, new); return new; } else new = copy_node (type); insert_decl_map (id, type, new); /* This is a new type, not a copy of an old type. Need to reassociate variants. We can handle everything except the main variant lazily. */ t = TYPE_MAIN_VARIANT (type); if (type != t) { t = remap_type (t, id); TYPE_MAIN_VARIANT (new) = t; TYPE_NEXT_VARIANT (new) = TYPE_MAIN_VARIANT (t); TYPE_NEXT_VARIANT (t) = new; } else { TYPE_MAIN_VARIANT (new) = new; TYPE_NEXT_VARIANT (new) = NULL; } /* Lazily create pointer and reference types. */ TYPE_POINTER_TO (new) = NULL; TYPE_REFERENCE_TO (new) = NULL; switch (TREE_CODE (new)) { case INTEGER_TYPE: case REAL_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: case CHAR_TYPE: t = TYPE_MIN_VALUE (new); if (t && TREE_CODE (t) != INTEGER_CST) walk_tree (&TYPE_MIN_VALUE (new), copy_body_r, id, NULL); t = TYPE_MAX_VALUE (new); if (t && TREE_CODE (t) != INTEGER_CST) walk_tree (&TYPE_MAX_VALUE (new), copy_body_r, id, NULL); return new; case FUNCTION_TYPE: TREE_TYPE (new) = remap_type (TREE_TYPE (new), id); walk_tree (&TYPE_ARG_TYPES (new), copy_body_r, id, NULL); return new; case ARRAY_TYPE: TREE_TYPE (new) = remap_type (TREE_TYPE (new), id); TYPE_DOMAIN (new) = remap_type (TYPE_DOMAIN (new), id); break; case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: walk_tree (&TYPE_FIELDS (new), copy_body_r, id, NULL); break; case FILE_TYPE: case SET_TYPE: case OFFSET_TYPE: default: /* Shouldn't have been thought variable sized. */ abort (); } walk_tree (&TYPE_SIZE (new), copy_body_r, id, NULL); walk_tree (&TYPE_SIZE_UNIT (new), copy_body_r, id, NULL); return new; } static tree remap_decls (tree decls, inline_data *id) { tree old_var; tree new_decls = NULL_TREE; /* Remap its variables. */ for (old_var = decls; old_var; old_var = TREE_CHAIN (old_var)) { tree new_var; /* Remap the variable. */ new_var = remap_decl (old_var, id); /* If we didn't remap this variable, so we can't mess with its TREE_CHAIN. If we remapped this variable to the return slot, it's already declared somewhere else, so don't declare it here. */ if (!new_var || new_var == id->retvar) ; #ifdef ENABLE_CHECKING else if (!DECL_P (new_var)) abort (); #endif else { TREE_CHAIN (new_var) = new_decls; new_decls = new_var; } } return nreverse (new_decls); } /* Copy the BLOCK to contain remapped versions of the variables therein. And hook the new block into the block-tree. */ static void remap_block (tree *block, inline_data *id) { tree old_block; tree new_block; tree fn; /* Make the new block. */ old_block = *block; new_block = make_node (BLOCK); TREE_USED (new_block) = TREE_USED (old_block); BLOCK_ABSTRACT_ORIGIN (new_block) = old_block; *block = new_block; /* Remap its variables. */ BLOCK_VARS (new_block) = remap_decls (BLOCK_VARS (old_block), id); fn = VARRAY_TREE (id->fns, 0); #if 1 /* FIXME! It shouldn't be so hard to manage blocks. Rebuilding them in rest_of_compilation is a good start. */ if (id->cloning_p) /* We're building a clone; DECL_INITIAL is still error_mark_node, and current_binding_level is the parm binding level. */ lang_hooks.decls.insert_block (new_block); else { /* Attach this new block after the DECL_INITIAL block for the function into which this block is being inlined. In rest_of_compilation we will straighten out the BLOCK tree. */ tree *first_block; if (DECL_INITIAL (fn)) first_block = &BLOCK_CHAIN (DECL_INITIAL (fn)); else first_block = &DECL_INITIAL (fn); BLOCK_CHAIN (new_block) = *first_block; *first_block = new_block; } #endif /* Remember the remapped block. */ insert_decl_map (id, old_block, new_block); } static void copy_statement_list (tree *tp) { tree_stmt_iterator oi, ni; tree new; new = alloc_stmt_list (); ni = tsi_start (new); oi = tsi_start (*tp); *tp = new; for (; !tsi_end_p (oi); tsi_next (&oi)) tsi_link_after (&ni, tsi_stmt (oi), TSI_NEW_STMT); } static void copy_bind_expr (tree *tp, int *walk_subtrees, inline_data *id) { tree block = BIND_EXPR_BLOCK (*tp); /* Copy (and replace) the statement. */ copy_tree_r (tp, walk_subtrees, NULL); if (block) { remap_block (&block, id); BIND_EXPR_BLOCK (*tp) = block; } if (BIND_EXPR_VARS (*tp)) /* This will remap a lot of the same decls again, but this should be harmless. */ BIND_EXPR_VARS (*tp) = remap_decls (BIND_EXPR_VARS (*tp), id); } /* Called from copy_body via walk_tree. DATA is really an `inline_data *'. */ static tree copy_body_r (tree *tp, int *walk_subtrees, void *data) { inline_data *id = (inline_data *) data; tree fn = VARRAY_TOP_TREE (id->fns); #if 0 /* All automatic variables should have a DECL_CONTEXT indicating what function they come from. */ if ((TREE_CODE (*tp) == VAR_DECL || TREE_CODE (*tp) == LABEL_DECL) && DECL_NAMESPACE_SCOPE_P (*tp)) if (! DECL_EXTERNAL (*tp) && ! TREE_STATIC (*tp)) abort (); #endif /* If this is a RETURN_EXPR, change it into a MODIFY_EXPR and a GOTO_EXPR with the RET_LABEL as its target. */ if (TREE_CODE (*tp) == RETURN_EXPR && id->ret_label) { tree return_stmt = *tp; tree goto_stmt; /* Build the GOTO_EXPR. */ tree assignment = TREE_OPERAND (return_stmt, 0); goto_stmt = build1 (GOTO_EXPR, void_type_node, id->ret_label); TREE_USED (id->ret_label) = 1; /* If we're returning something, just turn that into an assignment into the equivalent of the original RESULT_DECL. */ if (assignment) { /* Do not create a statement containing a naked RESULT_DECL. */ if (lang_hooks.gimple_before_inlining) if (TREE_CODE (assignment) == RESULT_DECL) gimplify_stmt (&assignment); *tp = build (BIND_EXPR, void_type_node, NULL, NULL, NULL); append_to_statement_list (assignment, &BIND_EXPR_BODY (*tp)); append_to_statement_list (goto_stmt, &BIND_EXPR_BODY (*tp)); } /* If we're not returning anything just do the jump. */ else *tp = goto_stmt; } /* Local variables and labels need to be replaced by equivalent variables. We don't want to copy static variables; there's only one of those, no matter how many times we inline the containing function. Similarly for globals from an outer function. */ else if (lang_hooks.tree_inlining.auto_var_in_fn_p (*tp, fn)) { tree new_decl; /* Remap the declaration. */ new_decl = remap_decl (*tp, id); if (! new_decl) abort (); /* Replace this variable with the copy. */ STRIP_TYPE_NOPS (new_decl); *tp = new_decl; } #if 0 else if (nonstatic_local_decl_p (*tp) && DECL_CONTEXT (*tp) != VARRAY_TREE (id->fns, 0)) abort (); #endif else if (TREE_CODE (*tp) == STATEMENT_LIST) copy_statement_list (tp); else if (TREE_CODE (*tp) == SAVE_EXPR) remap_save_expr (tp, id->decl_map, walk_subtrees); else if (TREE_CODE (*tp) == UNSAVE_EXPR) /* UNSAVE_EXPRs should not be generated until expansion time. */ abort (); else if (TREE_CODE (*tp) == BIND_EXPR) copy_bind_expr (tp, walk_subtrees, id); else if (TREE_CODE (*tp) == LABELED_BLOCK_EXPR) { /* We need a new copy of this labeled block; the EXIT_BLOCK_EXPR will refer to it, so save a copy ready for remapping. We save it in the decl_map, although it isn't a decl. */ tree new_block = copy_node (*tp); insert_decl_map (id, *tp, new_block); *tp = new_block; } else if (TREE_CODE (*tp) == EXIT_BLOCK_EXPR) { splay_tree_node n = splay_tree_lookup (id->decl_map, (splay_tree_key) TREE_OPERAND (*tp, 0)); /* We _must_ have seen the enclosing LABELED_BLOCK_EXPR. */ if (! n) abort (); *tp = copy_node (*tp); TREE_OPERAND (*tp, 0) = (tree) n->value; } /* Types may need remapping as well. */ else if (TYPE_P (*tp)) *tp = remap_type (*tp, id); /* Otherwise, just copy the node. Note that copy_tree_r already knows not to copy VAR_DECLs, etc., so this is safe. */ else { tree old_node = *tp; if (TREE_CODE (*tp) == MODIFY_EXPR && TREE_OPERAND (*tp, 0) == TREE_OPERAND (*tp, 1) && (lang_hooks.tree_inlining.auto_var_in_fn_p (TREE_OPERAND (*tp, 0), fn))) { /* Some assignments VAR = VAR; don't generate any rtl code and thus don't count as variable modification. Avoid keeping bogosities like 0 = 0. */ tree decl = TREE_OPERAND (*tp, 0), value; splay_tree_node n; n = splay_tree_lookup (id->decl_map, (splay_tree_key) decl); if (n) { value = (tree) n->value; STRIP_TYPE_NOPS (value); if (TREE_CONSTANT (value) || TREE_READONLY_DECL_P (value)) { *tp = value; return copy_body_r (tp, walk_subtrees, data); } } } else if (TREE_CODE (*tp) == ADDR_EXPR && (lang_hooks.tree_inlining.auto_var_in_fn_p (TREE_OPERAND (*tp, 0), fn))) { /* Get rid of &* from inline substitutions. It can occur when someone takes the address of a parm or return slot passed by invisible reference. */ tree decl = TREE_OPERAND (*tp, 0), value; splay_tree_node n; n = splay_tree_lookup (id->decl_map, (splay_tree_key) decl); if (n) { value = (tree) n->value; if (TREE_CODE (value) == INDIRECT_REF) { if (!lang_hooks.types_compatible_p (TREE_TYPE (*tp), TREE_TYPE (TREE_OPERAND (value, 0)))) *tp = fold_convert (TREE_TYPE (*tp), TREE_OPERAND (value, 0)); else *tp = TREE_OPERAND (value, 0); return copy_body_r (tp, walk_subtrees, data); } } } else if (TREE_CODE (*tp) == INDIRECT_REF) { /* Get rid of *& from inline substitutions that can happen when a pointer argument is an ADDR_EXPR. */ tree decl = TREE_OPERAND (*tp, 0), value; splay_tree_node n; n = splay_tree_lookup (id->decl_map, (splay_tree_key) decl); if (n) { value = (tree) n->value; STRIP_NOPS (value); if (TREE_CODE (value) == ADDR_EXPR && (lang_hooks.types_compatible_p (TREE_TYPE (*tp), TREE_TYPE (TREE_OPERAND (value, 0))))) { *tp = TREE_OPERAND (value, 0); return copy_body_r (tp, walk_subtrees, data); } } } copy_tree_r (tp, walk_subtrees, NULL); if (TREE_CODE (*tp) == CALL_EXPR && id->node && get_callee_fndecl (*tp)) { if (id->saving_p) { struct cgraph_node *node; struct cgraph_edge *edge; for (node = id->node->next_clone; node; node = node->next_clone) { edge = cgraph_edge (node, old_node); if (edge) edge->call_expr = *tp; else abort (); } } else { struct cgraph_edge *edge = cgraph_edge (id->current_node, old_node); if (edge) cgraph_clone_edge (edge, id->node, *tp); } } TREE_TYPE (*tp) = remap_type (TREE_TYPE (*tp), id); /* The copied TARGET_EXPR has never been expanded, even if the original node was expanded already. */ if (TREE_CODE (*tp) == TARGET_EXPR && TREE_OPERAND (*tp, 3)) { TREE_OPERAND (*tp, 1) = TREE_OPERAND (*tp, 3); TREE_OPERAND (*tp, 3) = NULL_TREE; } } /* Keep iterating. */ return NULL_TREE; } /* Make a copy of the body of FN so that it can be inserted inline in another function. */ static tree copy_body (inline_data *id) { tree body; tree fndecl = VARRAY_TOP_TREE (id->fns); if (fndecl == current_function_decl && cfun->saved_tree) body = cfun->saved_tree; else body = DECL_SAVED_TREE (fndecl); walk_tree (&body, copy_body_r, id, NULL); return body; } static void setup_one_parameter (inline_data *id, tree p, tree value, tree fn, tree *init_stmts, tree *vars, bool *gimplify_init_stmts_p) { tree init_stmt; tree var; tree var_sub; /* If the parameter is never assigned to, we may not need to create a new variable here at all. Instead, we may be able to just use the argument value. */ if (TREE_READONLY (p) && !TREE_ADDRESSABLE (p) && value && !TREE_SIDE_EFFECTS (value)) { /* We can't risk substituting complex expressions. They might contain variables that will be assigned to later. Theoretically, we could check the expression to see if all of the variables that determine its value are read-only, but we don't bother. */ if ((TREE_CONSTANT (value) || TREE_READONLY_DECL_P (value)) /* We may produce non-gimple trees by adding NOPs or introduce invalid sharing when operand is not really constant. It is not big deal to prohibit constant propagation here as we will constant propagate in DOM1 pass anyway. */ && (!lang_hooks.gimple_before_inlining || (is_gimple_min_invariant (value) && TREE_TYPE (value) == TREE_TYPE (p)))) { /* If this is a declaration, wrap it a NOP_EXPR so that we don't try to put the VALUE on the list of BLOCK_VARS. */ if (DECL_P (value)) value = build1 (NOP_EXPR, TREE_TYPE (value), value); /* If this is a constant, make sure it has the right type. */ else if (TREE_TYPE (value) != TREE_TYPE (p)) value = fold (build1 (NOP_EXPR, TREE_TYPE (p), value)); insert_decl_map (id, p, value); return; } } /* Make an equivalent VAR_DECL. Note that we must NOT remap the type here since the type of this decl must be visible to the calling function. */ var = copy_decl_for_inlining (p, fn, VARRAY_TREE (id->fns, 0)); /* See if the frontend wants to pass this by invisible reference. If so, our new VAR_DECL will have REFERENCE_TYPE, and we need to replace uses of the PARM_DECL with dereferences. */ if (TREE_TYPE (var) != TREE_TYPE (p) && POINTER_TYPE_P (TREE_TYPE (var)) && TREE_TYPE (TREE_TYPE (var)) == TREE_TYPE (p)) { insert_decl_map (id, var, var); var_sub = build1 (INDIRECT_REF, TREE_TYPE (p), var); } else var_sub = var; /* Register the VAR_DECL as the equivalent for the PARM_DECL; that way, when the PARM_DECL is encountered, it will be automatically replaced by the VAR_DECL. */ insert_decl_map (id, p, var_sub); /* Declare this new variable. */ TREE_CHAIN (var) = *vars; *vars = var; /* Make gimplifier happy about this variable. */ DECL_SEEN_IN_BIND_EXPR_P (var) = lang_hooks.gimple_before_inlining; /* Even if P was TREE_READONLY, the new VAR should not be. In the original code, we would have constructed a temporary, and then the function body would have never changed the value of P. However, now, we will be constructing VAR directly. The constructor body may change its value multiple times as it is being constructed. Therefore, it must not be TREE_READONLY; the back-end assumes that TREE_READONLY variable is assigned to only once. */ if (TYPE_NEEDS_CONSTRUCTING (TREE_TYPE (p))) TREE_READONLY (var) = 0; /* Initialize this VAR_DECL from the equivalent argument. Convert the argument to the proper type in case it was promoted. */ if (value) { tree rhs = fold_convert (TREE_TYPE (var), value); if (rhs == error_mark_node) return; /* We want to use MODIFY_EXPR, not INIT_EXPR here so that we keep our trees in gimple form. */ init_stmt = build (MODIFY_EXPR, TREE_TYPE (var), var, rhs); append_to_statement_list (init_stmt, init_stmts); /* If we did not create a gimple value and we did not create a gimple cast of a gimple value, then we will need to gimplify INIT_STMTS at the end. Note that is_gimple_cast only checks the outer tree code, not its operand. Thus the explicit check that it's operand is a gimple value. */ if (!is_gimple_val (rhs) && (!is_gimple_cast (rhs) || !is_gimple_val (TREE_OPERAND (rhs, 0)))) *gimplify_init_stmts_p = true; } } /* Generate code to initialize the parameters of the function at the top of the stack in ID from the ARGS (presented as a TREE_LIST). */ static tree initialize_inlined_parameters (inline_data *id, tree args, tree static_chain, tree fn, tree bind_expr) { tree init_stmts = NULL_TREE; tree parms; tree a; tree p; tree vars = NULL_TREE; bool gimplify_init_stmts_p = false; int argnum = 0; /* Figure out what the parameters are. */ parms = DECL_ARGUMENTS (fn); if (fn == current_function_decl) parms = cfun->saved_args; /* Loop through the parameter declarations, replacing each with an equivalent VAR_DECL, appropriately initialized. */ for (p = parms, a = args; p; a = a ? TREE_CHAIN (a) : a, p = TREE_CHAIN (p)) { tree value; ++argnum; /* Find the initializer. */ value = lang_hooks.tree_inlining.convert_parm_for_inlining (p, a ? TREE_VALUE (a) : NULL_TREE, fn, argnum); setup_one_parameter (id, p, value, fn, &init_stmts, &vars, &gimplify_init_stmts_p); } /* Evaluate trailing arguments. */ for (; a; a = TREE_CHAIN (a)) { tree value = TREE_VALUE (a); append_to_statement_list (value, &init_stmts); } /* Initialize the static chain. */ p = DECL_STRUCT_FUNCTION (fn)->static_chain_decl; if (p) { /* No static chain? Seems like a bug in tree-nested.c. */ if (!static_chain) abort (); setup_one_parameter (id, p, static_chain, fn, &init_stmts, &vars, &gimplify_init_stmts_p); } if (gimplify_init_stmts_p && lang_hooks.gimple_before_inlining) gimplify_body (&init_stmts, current_function_decl); declare_inline_vars (bind_expr, vars); return init_stmts; } /* Declare a return variable to replace the RESULT_DECL for the function we are calling. An appropriate decl is returned. ??? Needs documentation of parameters. */ static tree declare_return_variable (inline_data *id, tree return_slot_addr, tree *use_p) { tree fn = VARRAY_TOP_TREE (id->fns); tree result = DECL_RESULT (fn); int need_return_decl = 1; tree var; /* We don't need to do anything for functions that don't return anything. */ if (!result || VOID_TYPE_P (TREE_TYPE (result))) { *use_p = NULL_TREE; return NULL_TREE; } var = (lang_hooks.tree_inlining.copy_res_decl_for_inlining (result, fn, VARRAY_TREE (id->fns, 0), id->decl_map, &need_return_decl, return_slot_addr)); /* Do not have the rest of GCC warn about this variable as it should not be visible to the user. */ TREE_NO_WARNING (var) = 1; /* Register the VAR_DECL as the equivalent for the RESULT_DECL; that way, when the RESULT_DECL is encountered, it will be automatically replaced by the VAR_DECL. */ insert_decl_map (id, result, var); /* Remember this so we can ignore it in remap_decls. */ id->retvar = var; /* Build the use expr. If the return type of the function was promoted, convert it back to the expected type. */ if (return_slot_addr) /* The function returns through an explicit return slot, not a normal return value. */ *use_p = NULL_TREE; else if (TREE_TYPE (var) == TREE_TYPE (TREE_TYPE (fn))) *use_p = var; else if (TREE_CODE (var) == INDIRECT_REF) *use_p = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (fn)), TREE_OPERAND (var, 0)); else if (TREE_ADDRESSABLE (TREE_TYPE (var))) abort (); else *use_p = build1 (NOP_EXPR, TREE_TYPE (TREE_TYPE (fn)), var); /* Build the declaration statement if FN does not return an aggregate. */ if (need_return_decl) return var; /* If FN does return an aggregate, there's no need to declare the return variable; we're using a variable in our caller's frame. */ else return NULL_TREE; } /* Returns nonzero if a function can be inlined as a tree. */ bool tree_inlinable_function_p (tree fn) { return inlinable_function_p (fn); } static const char *inline_forbidden_reason; static tree inline_forbidden_p_1 (tree *nodep, int *walk_subtrees ATTRIBUTE_UNUSED, void *fnp) { tree node = *nodep; tree fn = (tree) fnp; tree t; switch (TREE_CODE (node)) { case CALL_EXPR: /* Refuse to inline alloca call unless user explicitly forced so as this may change program's memory overhead drastically when the function using alloca is called in loop. In GCC present in SPEC2000 inlining into schedule_block cause it to require 2GB of RAM instead of 256MB. */ if (alloca_call_p (node) && !lookup_attribute ("always_inline", DECL_ATTRIBUTES (fn))) { inline_forbidden_reason = N_("%Jfunction '%F' can never be inlined because it uses " "alloca (override using the always_inline attribute)"); return node; } t = get_callee_fndecl (node); if (! t) break; /* We cannot inline functions that call setjmp. */ if (setjmp_call_p (t)) { inline_forbidden_reason = N_("%Jfunction '%F' can never be inlined because it uses setjmp"); return node; } if (DECL_BUILT_IN_CLASS (t) == BUILT_IN_NORMAL) switch (DECL_FUNCTION_CODE (t)) { /* We cannot inline functions that take a variable number of arguments. */ case BUILT_IN_VA_START: case BUILT_IN_STDARG_START: case BUILT_IN_NEXT_ARG: case BUILT_IN_VA_END: inline_forbidden_reason = N_("%Jfunction '%F' can never be inlined because it " "uses variable argument lists"); return node; case BUILT_IN_LONGJMP: /* We can't inline functions that call __builtin_longjmp at all. The non-local goto machinery really requires the destination be in a different function. If we allow the function calling __builtin_longjmp to be inlined into the function calling __builtin_setjmp, Things will Go Awry. */ inline_forbidden_reason = N_("%Jfunction '%F' can never be inlined because " "it uses setjmp-longjmp exception handling"); return node; case BUILT_IN_NONLOCAL_GOTO: /* Similarly. */ inline_forbidden_reason = N_("%Jfunction '%F' can never be inlined because " "it uses non-local goto"); return node; default: break; } break; case BIND_EXPR: for (t = BIND_EXPR_VARS (node); t ; t = TREE_CHAIN (t)) { /* We cannot inline functions that contain other functions. */ if (TREE_CODE (t) == FUNCTION_DECL && DECL_INITIAL (t)) { inline_forbidden_reason = N_("%Jfunction '%F' can never be inlined " "because it contains a nested function"); return node; } } break; case GOTO_EXPR: t = TREE_OPERAND (node, 0); /* We will not inline a function which uses computed goto. The addresses of its local labels, which may be tucked into global storage, are of course not constant across instantiations, which causes unexpected behavior. */ if (TREE_CODE (t) != LABEL_DECL) { inline_forbidden_reason = N_("%Jfunction '%F' can never be inlined " "because it contains a computed goto"); return node; } break; case LABEL_EXPR: t = TREE_OPERAND (node, 0); if (DECL_NONLOCAL (t)) { /* We cannot inline a function that receives a non-local goto because we cannot remap the destination label used in the function that is performing the non-local goto. */ inline_forbidden_reason = N_("%Jfunction '%F' can never be inlined " "because it receives a non-local goto"); return node; } break; case RECORD_TYPE: case UNION_TYPE: /* We cannot inline a function of the form void F (int i) { struct S { int ar[i]; } s; } Attempting to do so produces a catch-22. If walk_tree examines the TYPE_FIELDS chain of RECORD_TYPE/ UNION_TYPE nodes, then it goes into infinite recursion on a structure containing a pointer to its own type. If it doesn't, then the type node for S doesn't get adjusted properly when F is inlined, and we abort in find_function_data. */ for (t = TYPE_FIELDS (node); t; t = TREE_CHAIN (t)) if (variably_modified_type_p (TREE_TYPE (t), NULL)) { inline_forbidden_reason = N_("%Jfunction '%F' can never be inlined " "because it uses variable sized variables"); return node; } default: break; } return NULL_TREE; } /* Return subexpression representing possible alloca call, if any. */ static tree inline_forbidden_p (tree fndecl) { location_t saved_loc = input_location; tree ret = walk_tree_without_duplicates (&DECL_SAVED_TREE (fndecl), inline_forbidden_p_1, fndecl); input_location = saved_loc; return ret; } /* Returns nonzero if FN is a function that does not have any fundamental inline blocking properties. */ static bool inlinable_function_p (tree fn) { bool inlinable = true; /* If we've already decided this function shouldn't be inlined, there's no need to check again. */ if (DECL_UNINLINABLE (fn)) return false; /* See if there is any language-specific reason it cannot be inlined. (It is important that this hook be called early because in C++ it may result in template instantiation.) If the function is not inlinable for language-specific reasons, it is left up to the langhook to explain why. */ inlinable = !lang_hooks.tree_inlining.cannot_inline_tree_fn (&fn); /* If we don't have the function body available, we can't inline it. However, this should not be recorded since we also get here for forward declared inline functions. Therefore, return at once. */ if (!DECL_SAVED_TREE (fn)) return false; /* If we're not inlining at all, then we cannot inline this function. */ else if (!flag_inline_trees) inlinable = false; /* Only try to inline functions if DECL_INLINE is set. This should be true for all functions declared `inline', and for all other functions as well with -finline-functions. Don't think of disregarding DECL_INLINE when flag_inline_trees == 2; it's the front-end that must set DECL_INLINE in this case, because dwarf2out loses if a function that does not have DECL_INLINE set is inlined anyway. That is why we have both DECL_INLINE and DECL_DECLARED_INLINE_P. */ /* FIXME: When flag_inline_trees dies, the check for flag_unit_at_a_time here should be redundant. */ else if (!DECL_INLINE (fn) && !flag_unit_at_a_time) inlinable = false; else if (inline_forbidden_p (fn)) { /* See if we should warn about uninlinable functions. Previously, some of these warnings would be issued while trying to expand the function inline, but that would cause multiple warnings about functions that would for example call alloca. But since this a property of the function, just one warning is enough. As a bonus we can now give more details about the reason why a function is not inlinable. We only warn for functions declared `inline' by the user. */ bool do_warning = (warn_inline && DECL_INLINE (fn) && DECL_DECLARED_INLINE_P (fn) && !DECL_IN_SYSTEM_HEADER (fn)); if (lookup_attribute ("always_inline", DECL_ATTRIBUTES (fn))) sorry (inline_forbidden_reason, fn, fn); else if (do_warning) warning (inline_forbidden_reason, fn, fn); inlinable = false; } /* Squirrel away the result so that we don't have to check again. */ DECL_UNINLINABLE (fn) = !inlinable; return inlinable; } /* Used by estimate_num_insns. Estimate number of instructions seen by given statement. */ static tree estimate_num_insns_1 (tree *tp, int *walk_subtrees, void *data) { int *count = data; tree x = *tp; if (TYPE_P (x) || DECL_P (x)) { *walk_subtrees = 0; return NULL; } /* Assume that constants and references counts nothing. These should be majorized by amount of operations among them we count later and are common target of CSE and similar optimizations. */ else if (TREE_CODE_CLASS (TREE_CODE (x)) == 'c' || TREE_CODE_CLASS (TREE_CODE (x)) == 'r') return NULL; switch (TREE_CODE (x)) { /* Containers have no cost. */ case TREE_LIST: case TREE_VEC: case BLOCK: case COMPONENT_REF: case BIT_FIELD_REF: case INDIRECT_REF: case BUFFER_REF: case ARRAY_REF: case ARRAY_RANGE_REF: case OBJ_TYPE_REF: case EXC_PTR_EXPR: /* ??? */ case FILTER_EXPR: /* ??? */ case COMPOUND_EXPR: case BIND_EXPR: case LABELED_BLOCK_EXPR: case WITH_CLEANUP_EXPR: case NOP_EXPR: case VIEW_CONVERT_EXPR: case SAVE_EXPR: case UNSAVE_EXPR: case ADDR_EXPR: case REFERENCE_EXPR: case COMPLEX_EXPR: case EXIT_BLOCK_EXPR: case CASE_LABEL_EXPR: case SSA_NAME: case CATCH_EXPR: case EH_FILTER_EXPR: case STATEMENT_LIST: case ERROR_MARK: case NON_LVALUE_EXPR: case ENTRY_VALUE_EXPR: case FDESC_EXPR: case VA_ARG_EXPR: case TRY_CATCH_EXPR: case TRY_FINALLY_EXPR: case LABEL_EXPR: case GOTO_EXPR: case RETURN_EXPR: case EXIT_EXPR: case LOOP_EXPR: case PHI_NODE: break; /* We don't account constants for now. Assume that the cost is amortized by operations that do use them. We may re-consider this decision once we are able to optimize the tree before estimating it's size and break out static initializers. */ case IDENTIFIER_NODE: case INTEGER_CST: case REAL_CST: case COMPLEX_CST: case VECTOR_CST: case STRING_CST: *walk_subtrees = 0; return NULL; /* Recognize assignments of large structures and constructors of big arrays. */ case INIT_EXPR: case MODIFY_EXPR: x = TREE_OPERAND (x, 0); /* FALLTHRU */ case TARGET_EXPR: case CONSTRUCTOR: { HOST_WIDE_INT size; size = int_size_in_bytes (TREE_TYPE (x)); if (size < 0 || size > MOVE_MAX_PIECES * MOVE_RATIO) *count += 10; else *count += ((size + MOVE_MAX_PIECES - 1) / MOVE_MAX_PIECES); } break; /* Assign cost of 1 to usual operations. ??? We may consider mapping RTL costs to this. */ case COND_EXPR: case PLUS_EXPR: case MINUS_EXPR: case MULT_EXPR: case FIX_TRUNC_EXPR: case FIX_CEIL_EXPR: case FIX_FLOOR_EXPR: case FIX_ROUND_EXPR: case NEGATE_EXPR: case FLOAT_EXPR: case MIN_EXPR: case MAX_EXPR: case ABS_EXPR: case LSHIFT_EXPR: case RSHIFT_EXPR: case LROTATE_EXPR: case RROTATE_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case BIT_AND_EXPR: case BIT_NOT_EXPR: case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case TRUTH_XOR_EXPR: case TRUTH_NOT_EXPR: case LT_EXPR: case LE_EXPR: case GT_EXPR: case GE_EXPR: case EQ_EXPR: case NE_EXPR: case ORDERED_EXPR: case UNORDERED_EXPR: case UNLT_EXPR: case UNLE_EXPR: case UNGT_EXPR: case UNGE_EXPR: case UNEQ_EXPR: case LTGT_EXPR: case CONVERT_EXPR: case CONJ_EXPR: case PREDECREMENT_EXPR: case PREINCREMENT_EXPR: case POSTDECREMENT_EXPR: case POSTINCREMENT_EXPR: case SWITCH_EXPR: case ASM_EXPR: case RESX_EXPR: *count++; break; /* Few special cases of expensive operations. This is useful to avoid inlining on functions having too many of these. */ case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case EXACT_DIV_EXPR: case TRUNC_MOD_EXPR: case CEIL_MOD_EXPR: case FLOOR_MOD_EXPR: case ROUND_MOD_EXPR: case RDIV_EXPR: *count += 10; break; case CALL_EXPR: { tree decl = get_callee_fndecl (x); if (decl && DECL_BUILT_IN (decl)) switch (DECL_FUNCTION_CODE (decl)) { case BUILT_IN_CONSTANT_P: *walk_subtrees = 0; return NULL_TREE; case BUILT_IN_EXPECT: return NULL_TREE; default: break; } *count += 10; break; } default: /* Abort here se we know we don't miss any nodes. */ abort (); } return NULL; } /* Estimate number of instructions that will be created by expanding EXPR. */ int estimate_num_insns (tree expr) { int num = 0; walk_tree_without_duplicates (&expr, estimate_num_insns_1, &num); return num; } /* If *TP is a CALL_EXPR, replace it with its inline expansion. */ static tree expand_call_inline (tree *tp, int *walk_subtrees, void *data) { inline_data *id; tree t; tree expr; tree stmt; tree use_retvar; tree decl; tree fn; tree arg_inits; tree *inlined_body; tree inline_result; splay_tree st; tree args; tree return_slot_addr; location_t saved_location; struct cgraph_edge *edge; const char *reason; /* See what we've got. */ id = (inline_data *) data; t = *tp; /* Set input_location here so we get the right instantiation context if we call instantiate_decl from inlinable_function_p. */ saved_location = input_location; if (EXPR_HAS_LOCATION (t)) input_location = EXPR_LOCATION (t); /* Recurse, but letting recursive invocations know that we are inside the body of a TARGET_EXPR. */ if (TREE_CODE (*tp) == TARGET_EXPR) { #if 0 int i, len = first_rtl_op (TARGET_EXPR); /* We're walking our own subtrees. */ *walk_subtrees = 0; /* Actually walk over them. This loop is the body of walk_trees, omitting the case where the TARGET_EXPR itself is handled. */ for (i = 0; i < len; ++i) { if (i == 2) ++id->in_target_cleanup_p; walk_tree (&TREE_OPERAND (*tp, i), expand_call_inline, data, id->tree_pruner); if (i == 2) --id->in_target_cleanup_p; } goto egress; #endif } if (TYPE_P (t)) /* Because types were not copied in copy_body, CALL_EXPRs beneath them should not be expanded. This can happen if the type is a dynamic array type, for example. */ *walk_subtrees = 0; /* From here on, we're only interested in CALL_EXPRs. */ if (TREE_CODE (t) != CALL_EXPR) goto egress; /* First, see if we can figure out what function is being called. If we cannot, then there is no hope of inlining the function. */ fn = get_callee_fndecl (t); if (!fn) goto egress; /* Turn forward declarations into real ones. */ fn = cgraph_node (fn)->decl; /* If fn is a declaration of a function in a nested scope that was globally declared inline, we don't set its DECL_INITIAL. However, we can't blindly follow DECL_ABSTRACT_ORIGIN because the C++ front-end uses it for cdtors to refer to their internal declarations, that are not real functions. Fortunately those don't have trees to be saved, so we can tell by checking their DECL_SAVED_TREE. */ if (! DECL_INITIAL (fn) && DECL_ABSTRACT_ORIGIN (fn) && DECL_SAVED_TREE (DECL_ABSTRACT_ORIGIN (fn))) fn = DECL_ABSTRACT_ORIGIN (fn); /* Objective C and fortran still calls tree_rest_of_compilation directly. Kill this check once this is fixed. */ if (!id->current_node->analyzed) goto egress; edge = cgraph_edge (id->current_node, t); /* Constant propagation on argument done during previous inlining may create new direct call. Produce an edge for it. */ if (!edge) { struct cgraph_node *dest = cgraph_node (fn); /* We have missing edge in the callgraph. This can happen in one case where previous inlining turned indirect call into direct call by constant propagating arguments. In all other cases we hit a bug (incorrect node sharing is most common reason for missing edges. */ if (!dest->needed) abort (); cgraph_create_edge (id->node, dest, t)->inline_failed = N_("originally indirect function call not considered for inlining"); goto egress; } /* Don't try to inline functions that are not well-suited to inlining. */ if (!cgraph_inline_p (edge, &reason)) { if (lookup_attribute ("always_inline", DECL_ATTRIBUTES (fn))) { sorry ("%Jinlining failed in call to '%F': %s", fn, fn, reason); sorry ("called from here"); } else if (warn_inline && DECL_DECLARED_INLINE_P (fn) && !DECL_IN_SYSTEM_HEADER (fn) && strlen (reason)) { warning ("%Jinlining failed in call to '%F': %s", fn, fn, reason); warning ("called from here"); } goto egress; } #ifdef ENABLE_CHECKING if (edge->callee->decl != id->node->decl) verify_cgraph_node (edge->callee); #endif if (! lang_hooks.tree_inlining.start_inlining (fn)) goto egress; /* Build a block containing code to initialize the arguments, the actual inline expansion of the body, and a label for the return statements within the function to jump to. The type of the statement expression is the return type of the function call. */ stmt = NULL; expr = build (BIND_EXPR, TREE_TYPE (TREE_TYPE (fn)), NULL_TREE, stmt, make_node (BLOCK)); BLOCK_ABSTRACT_ORIGIN (BIND_EXPR_BLOCK (expr)) = fn; /* Local declarations will be replaced by their equivalents in this map. */ st = id->decl_map; id->decl_map = splay_tree_new (splay_tree_compare_pointers, NULL, NULL); /* Initialize the parameters. */ args = TREE_OPERAND (t, 1); return_slot_addr = NULL_TREE; if (CALL_EXPR_HAS_RETURN_SLOT_ADDR (t)) { return_slot_addr = TREE_VALUE (args); args = TREE_CHAIN (args); TREE_TYPE (expr) = void_type_node; } arg_inits = initialize_inlined_parameters (id, args, TREE_OPERAND (t, 2), fn, expr); if (arg_inits) { /* Expand any inlined calls in the initializers. Do this before we push FN on the stack of functions we are inlining; we want to inline calls to FN that appear in the initializers for the parameters. Note we need to save and restore the saved tree statement iterator to avoid having it clobbered by expand_calls_inline. */ tree_stmt_iterator save_tsi; save_tsi = id->tsi; expand_calls_inline (&arg_inits, id); id->tsi = save_tsi; /* And add them to the tree. */ append_to_statement_list (arg_inits, &BIND_EXPR_BODY (expr)); } /* Record the function we are about to inline so that we can avoid recursing into it. */ VARRAY_PUSH_TREE (id->fns, fn); /* Record the function we are about to inline if optimize_function has not been called on it yet and we don't have it in the list. */ if (! DECL_INLINED_FNS (fn)) { int i; for (i = VARRAY_ACTIVE_SIZE (id->inlined_fns) - 1; i >= 0; i--) if (VARRAY_TREE (id->inlined_fns, i) == fn) break; if (i < 0) VARRAY_PUSH_TREE (id->inlined_fns, fn); } /* Return statements in the function body will be replaced by jumps to the RET_LABEL. */ id->ret_label = build_decl (LABEL_DECL, NULL_TREE, NULL_TREE); DECL_ARTIFICIAL (id->ret_label) = 1; DECL_CONTEXT (id->ret_label) = VARRAY_TREE (id->fns, 0); insert_decl_map (id, id->ret_label, id->ret_label); if (! DECL_INITIAL (fn) || TREE_CODE (DECL_INITIAL (fn)) != BLOCK) abort (); /* Declare the return variable for the function. */ decl = declare_return_variable (id, return_slot_addr, &use_retvar); if (decl) declare_inline_vars (expr, decl); /* After we've initialized the parameters, we insert the body of the function itself. */ { struct cgraph_node *old_node = id->current_node; id->current_node = edge->callee; append_to_statement_list (copy_body (id), &BIND_EXPR_BODY (expr)); id->current_node = old_node; } inlined_body = &BIND_EXPR_BODY (expr); /* After the body of the function comes the RET_LABEL. This must come before we evaluate the returned value below, because that evaluation may cause RTL to be generated. */ if (TREE_USED (id->ret_label)) { tree label = build1 (LABEL_EXPR, void_type_node, id->ret_label); append_to_statement_list (label, &BIND_EXPR_BODY (expr)); } /* Finally, mention the returned value so that the value of the statement-expression is the returned value of the function. */ if (use_retvar) /* Set TREE_TYPE on BIND_EXPR? */ append_to_statement_list_force (use_retvar, &BIND_EXPR_BODY (expr)); /* Clean up. */ splay_tree_delete (id->decl_map); id->decl_map = st; /* The new expression has side-effects if the old one did. */ TREE_SIDE_EFFECTS (expr) = TREE_SIDE_EFFECTS (t); /* If we are working with gimple form, then we need to keep the tree in gimple form. If we are not in gimple form, we can just replace *tp with the new BIND_EXPR. */ if (lang_hooks.gimple_before_inlining) { tree save_decl; /* We want to create a new variable to hold the result of the inlined body. This new variable needs to be added to the function which we are inlining into, thus the saving and restoring of current_function_decl. */ save_decl = current_function_decl; current_function_decl = id->node->decl; inline_result = voidify_wrapper_expr (expr, NULL); current_function_decl = save_decl; /* If the inlined function returns a result that we care about, then we're going to need to splice in a MODIFY_EXPR. Otherwise the call was a standalone statement and we can just replace it with the BIND_EXPR inline representation of the called function. */ if (TREE_CODE (tsi_stmt (id->tsi)) != CALL_EXPR) { tsi_link_before (&id->tsi, expr, TSI_SAME_STMT); *tp = inline_result; } else *tp = expr; /* When we gimplify a function call, we may clear TREE_SIDE_EFFECTS on the call if it is to a "const" function. Thus the copy of TREE_SIDE_EFFECTS from the CALL_EXPR to the BIND_EXPR above with result in TREE_SIDE_EFFECTS not being set for the inlined copy of a "const" function. Unfortunately, that is wrong as inlining the function can create/expose interesting side effects (such as setting of a return value). The easiest solution is to simply recalculate TREE_SIDE_EFFECTS for the toplevel expression. */ recalculate_side_effects (expr); } else *tp = expr; /* If the value of the new expression is ignored, that's OK. We don't warn about this for CALL_EXPRs, so we shouldn't warn about the equivalent inlined version either. */ TREE_USED (*tp) = 1; /* Update callgraph if needed. */ cgraph_remove_node (edge->callee); /* Recurse into the body of the just inlined function. */ expand_calls_inline (inlined_body, id); VARRAY_POP (id->fns); /* Don't walk into subtrees. We've already handled them above. */ *walk_subtrees = 0; lang_hooks.tree_inlining.end_inlining (fn); /* Keep iterating. */ egress: input_location = saved_location; return NULL_TREE; } static void gimple_expand_calls_inline (tree *stmt_p, inline_data *id) { tree stmt = *stmt_p; enum tree_code code = TREE_CODE (stmt); int dummy; switch (code) { case STATEMENT_LIST: { tree_stmt_iterator i; tree new; for (i = tsi_start (stmt); !tsi_end_p (i); ) { id->tsi = i; gimple_expand_calls_inline (tsi_stmt_ptr (i), id); new = tsi_stmt (i); if (TREE_CODE (new) == STATEMENT_LIST) { tsi_link_before (&i, new, TSI_SAME_STMT); tsi_delink (&i); } else tsi_next (&i); } } break; case COND_EXPR: gimple_expand_calls_inline (&COND_EXPR_THEN (stmt), id); gimple_expand_calls_inline (&COND_EXPR_ELSE (stmt), id); break; case CATCH_EXPR: gimple_expand_calls_inline (&CATCH_BODY (stmt), id); break; case EH_FILTER_EXPR: gimple_expand_calls_inline (&EH_FILTER_FAILURE (stmt), id); break; case TRY_CATCH_EXPR: case TRY_FINALLY_EXPR: gimple_expand_calls_inline (&TREE_OPERAND (stmt, 0), id); gimple_expand_calls_inline (&TREE_OPERAND (stmt, 1), id); break; case BIND_EXPR: gimple_expand_calls_inline (&BIND_EXPR_BODY (stmt), id); break; case COMPOUND_EXPR: /* We're gimple. We should have gotten rid of all these. */ abort (); case RETURN_EXPR: stmt_p = &TREE_OPERAND (stmt, 0); stmt = *stmt_p; if (!stmt || TREE_CODE (stmt) != MODIFY_EXPR) break; /* FALLTHRU */ case MODIFY_EXPR: stmt_p = &TREE_OPERAND (stmt, 1); stmt = *stmt_p; if (TREE_CODE (stmt) != CALL_EXPR) break; /* FALLTHRU */ case CALL_EXPR: expand_call_inline (stmt_p, &dummy, id); break; default: break; } } /* Walk over the entire tree *TP, replacing CALL_EXPRs with inline expansions as appropriate. */ static void expand_calls_inline (tree *tp, inline_data *id) { /* If we are not in gimple form, then we want to walk the tree recursively as we do not know anything about the structure of the tree. */ if (!lang_hooks.gimple_before_inlining) { walk_tree (tp, expand_call_inline, id, id->tree_pruner); return; } /* We are in gimple form. We want to stay in gimple form. Walk the statements, inlining calls in each statement. By walking the statements, we have enough information to keep the tree in gimple form as we insert inline bodies. */ gimple_expand_calls_inline (tp, id); } /* Expand calls to inline functions in the body of FN. */ void optimize_inline_calls (tree fn) { inline_data id; tree prev_fn; /* There is no point in performing inlining if errors have already occurred -- and we might crash if we try to inline invalid code. */ if (errorcount || sorrycount) return; /* Clear out ID. */ memset (&id, 0, sizeof (id)); id.current_node = id.node = cgraph_node (fn); /* Don't allow recursion into FN. */ VARRAY_TREE_INIT (id.fns, 32, "fns"); VARRAY_PUSH_TREE (id.fns, fn); /* Or any functions that aren't finished yet. */ prev_fn = NULL_TREE; if (current_function_decl) { VARRAY_PUSH_TREE (id.fns, current_function_decl); prev_fn = current_function_decl; } prev_fn = lang_hooks.tree_inlining.add_pending_fn_decls (&id.fns, prev_fn); /* Create the list of functions this call will inline. */ VARRAY_TREE_INIT (id.inlined_fns, 32, "inlined_fns"); /* Keep track of the low-water mark, i.e., the point where the first real inlining is represented in ID.FNS. */ id.first_inlined_fn = VARRAY_ACTIVE_SIZE (id.fns); /* Replace all calls to inline functions with the bodies of those functions. */ id.tree_pruner = htab_create (37, htab_hash_pointer, htab_eq_pointer, NULL); expand_calls_inline (&DECL_SAVED_TREE (fn), &id); /* Clean up. */ htab_delete (id.tree_pruner); if (DECL_LANG_SPECIFIC (fn)) { tree ifn = make_tree_vec (VARRAY_ACTIVE_SIZE (id.inlined_fns)); if (VARRAY_ACTIVE_SIZE (id.inlined_fns)) memcpy (&TREE_VEC_ELT (ifn, 0), &VARRAY_TREE (id.inlined_fns, 0), VARRAY_ACTIVE_SIZE (id.inlined_fns) * sizeof (tree)); DECL_INLINED_FNS (fn) = ifn; } #ifdef ENABLE_CHECKING { struct cgraph_edge *e; verify_cgraph_node (id.node); /* Double check that we inlined everything we are supposed to inline. */ for (e = id.node->callees; e; e = e->next_callee) if (!e->inline_failed) abort (); } #endif } /* FN is a function that has a complete body, and CLONE is a function whose body is to be set to a copy of FN, mapping argument declarations according to the ARG_MAP splay_tree. */ void clone_body (tree clone, tree fn, void *arg_map) { inline_data id; /* Clone the body, as if we were making an inline call. But, remap the parameters in the callee to the parameters of caller. If there's an in-charge parameter, map it to an appropriate constant. */ memset (&id, 0, sizeof (id)); VARRAY_TREE_INIT (id.fns, 2, "fns"); VARRAY_PUSH_TREE (id.fns, clone); VARRAY_PUSH_TREE (id.fns, fn); id.decl_map = (splay_tree)arg_map; /* Cloning is treated slightly differently from inlining. Set CLONING_P so that it's clear which operation we're performing. */ id.cloning_p = true; /* Actually copy the body. */ append_to_statement_list_force (copy_body (&id), &DECL_SAVED_TREE (clone)); } /* Save duplicate of body in FN. MAP is used to pass around splay tree used to update arguments in restore_body. */ tree save_body (tree fn, tree *arg_copy) { inline_data id; tree body, *parg; memset (&id, 0, sizeof (id)); VARRAY_TREE_INIT (id.fns, 1, "fns"); VARRAY_PUSH_TREE (id.fns, fn); id.node = cgraph_node (fn); id.saving_p = true; id.decl_map = splay_tree_new (splay_tree_compare_pointers, NULL, NULL); *arg_copy = DECL_ARGUMENTS (fn); for (parg = arg_copy; *parg; parg = &TREE_CHAIN (*parg)) { tree new = copy_node (*parg); lang_hooks.dup_lang_specific_decl (new); DECL_ABSTRACT_ORIGIN (new) = DECL_ORIGIN (*parg); insert_decl_map (&id, *parg, new); TREE_CHAIN (new) = TREE_CHAIN (*parg); *parg = new; } insert_decl_map (&id, DECL_RESULT (fn), DECL_RESULT (fn)); /* Actually copy the body. */ body = copy_body (&id); /* Clean up. */ splay_tree_delete (id.decl_map); return body; } #define WALK_SUBTREE(NODE) \ do \ { \ result = walk_tree (&(NODE), func, data, htab); \ if (result) \ return result; \ } \ while (0) /* This is a subroutine of walk_tree that walks field of TYPE that are to be walked whenever a type is seen in the tree. Rest of operands and return value are as for walk_tree. */ static tree walk_type_fields (tree type, walk_tree_fn func, void *data, void *htab) { tree result = NULL_TREE; switch (TREE_CODE (type)) { case POINTER_TYPE: case REFERENCE_TYPE: /* We have to worry about mutually recursive pointers. These can't be written in C. They can in Ada. It's pathlogical, but there's an ACATS test (c38102a) that checks it. Deal with this by checking if we're pointing to another pointer, that one points to another pointer, that one does too, and we have no htab. If so, get a hash table. We check three levels deep to avoid the cost of the hash table if we don't need one. */ if (POINTER_TYPE_P (TREE_TYPE (type)) && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (type))) && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (TREE_TYPE (type)))) && !htab) { result = walk_tree_without_duplicates (&TREE_TYPE (type), func, data); if (result) return result; break; } /* ... fall through ... */ case COMPLEX_TYPE: WALK_SUBTREE (TREE_TYPE (type)); break; case METHOD_TYPE: WALK_SUBTREE (TYPE_METHOD_BASETYPE (type)); /* Fall through. */ case FUNCTION_TYPE: WALK_SUBTREE (TREE_TYPE (type)); { tree arg; /* We never want to walk into default arguments. */ for (arg = TYPE_ARG_TYPES (type); arg; arg = TREE_CHAIN (arg)) WALK_SUBTREE (TREE_VALUE (arg)); } break; case ARRAY_TYPE: /* Don't follow this nodes's type if a pointer for fear that we'll have infinite recursion. Those types are uninteresting anyway. */ if (!POINTER_TYPE_P (TREE_TYPE (type)) && TREE_CODE (TREE_TYPE (type)) != OFFSET_TYPE) WALK_SUBTREE (TREE_TYPE (type)); WALK_SUBTREE (TYPE_DOMAIN (type)); break; case BOOLEAN_TYPE: case ENUMERAL_TYPE: case INTEGER_TYPE: case CHAR_TYPE: case REAL_TYPE: WALK_SUBTREE (TYPE_MIN_VALUE (type)); WALK_SUBTREE (TYPE_MAX_VALUE (type)); break; case OFFSET_TYPE: WALK_SUBTREE (TREE_TYPE (type)); WALK_SUBTREE (TYPE_OFFSET_BASETYPE (type)); break; default: break; } return NULL_TREE; } /* Apply FUNC to all the sub-trees of TP in a pre-order traversal. FUNC is called with the DATA and the address of each sub-tree. If FUNC returns a non-NULL value, the traversal is aborted, and the value returned by FUNC is returned. If HTAB is non-NULL it is used to record the nodes visited, and to avoid visiting a node more than once. */ tree walk_tree (tree *tp, walk_tree_fn func, void *data, void *htab_) { htab_t htab = (htab_t) htab_; enum tree_code code; int walk_subtrees; tree result; #define WALK_SUBTREE_TAIL(NODE) \ do \ { \ tp = & (NODE); \ goto tail_recurse; \ } \ while (0) tail_recurse: /* Skip empty subtrees. */ if (!*tp) return NULL_TREE; if (htab) { void **slot; /* Don't walk the same tree twice, if the user has requested that we avoid doing so. */ slot = htab_find_slot (htab, *tp, INSERT); if (*slot) return NULL_TREE; *slot = *tp; } /* Call the function. */ walk_subtrees = 1; result = (*func) (tp, &walk_subtrees, data); /* If we found something, return it. */ if (result) return result; code = TREE_CODE (*tp); /* Even if we didn't, FUNC may have decided that there was nothing interesting below this point in the tree. */ if (!walk_subtrees) { if (code == TREE_LIST) /* But we still need to check our siblings. */ WALK_SUBTREE_TAIL (TREE_CHAIN (*tp)); else return NULL_TREE; } result = lang_hooks.tree_inlining.walk_subtrees (tp, &walk_subtrees, func, data, htab); if (result || ! walk_subtrees) return result; /* If this is a DECL_EXPR, walk into various fields of the type that it's defining. We only want to walk into these fields of a type in this case. Note that decls get walked as part of the processing of a BIND_EXPR. ??? Precisely which fields of types that we are supposed to walk in this case vs. the normal case aren't well defined. */ if (code == DECL_EXPR && TREE_CODE (DECL_EXPR_DECL (*tp)) == TYPE_DECL && TREE_CODE (TREE_TYPE (DECL_EXPR_DECL (*tp))) != ERROR_MARK) { tree *type_p = &TREE_TYPE (DECL_EXPR_DECL (*tp)); /* Call the function for the type. See if it returns anything or doesn't want us to continue. If we are to continue, walk both the normal fields and those for the declaration case. */ result = (*func) (type_p, &walk_subtrees, data); if (result || !walk_subtrees) return NULL_TREE; result = walk_type_fields (*type_p, func, data, htab_); if (result) return result; WALK_SUBTREE (TYPE_SIZE (*type_p)); WALK_SUBTREE (TYPE_SIZE_UNIT (*type_p)); /* If this is a record type, also walk the fields. */ if (TREE_CODE (*type_p) == RECORD_TYPE || TREE_CODE (*type_p) == UNION_TYPE || TREE_CODE (*type_p) == QUAL_UNION_TYPE) { tree field; for (field = TYPE_FIELDS (*type_p); field; field = TREE_CHAIN (field)) { /* We'd like to look at the type of the field, but we can easily get infinite recursion. So assume it's pointed to elsewhere in the tree. Also, ignore things that aren't fields. */ if (TREE_CODE (field) != FIELD_DECL) continue; WALK_SUBTREE (DECL_FIELD_OFFSET (field)); WALK_SUBTREE (DECL_SIZE (field)); WALK_SUBTREE (DECL_SIZE_UNIT (field)); if (TREE_CODE (*type_p) == QUAL_UNION_TYPE) WALK_SUBTREE (DECL_QUALIFIER (field)); } } } else if (code != EXIT_BLOCK_EXPR && code != SAVE_EXPR && code != BIND_EXPR && IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code))) { int i, len; /* Walk over all the sub-trees of this operand. */ len = first_rtl_op (code); /* TARGET_EXPRs are peculiar: operands 1 and 3 can be the same. But, we only want to walk once. */ if (code == TARGET_EXPR && TREE_OPERAND (*tp, 3) == TREE_OPERAND (*tp, 1)) --len; /* Go through the subtrees. We need to do this in forward order so that the scope of a FOR_EXPR is handled properly. */ #ifdef DEBUG_WALK_TREE for (i = 0; i < len; ++i) WALK_SUBTREE (TREE_OPERAND (*tp, i)); #else for (i = 0; i < len - 1; ++i) WALK_SUBTREE (TREE_OPERAND (*tp, i)); if (len) { /* The common case is that we may tail recurse here. */ if (code != BIND_EXPR && !TREE_CHAIN (*tp)) WALK_SUBTREE_TAIL (TREE_OPERAND (*tp, len - 1)); else WALK_SUBTREE (TREE_OPERAND (*tp, len - 1)); } #endif } /* If this is a type, walk the needed fields in the type. */ else if (TYPE_P (*tp)) { result = walk_type_fields (*tp, func, data, htab_); if (result) return result; } else { /* Not one of the easy cases. We must explicitly go through the children. */ switch (code) { case ERROR_MARK: case IDENTIFIER_NODE: case INTEGER_CST: case REAL_CST: case VECTOR_CST: case STRING_CST: case BLOCK: case PLACEHOLDER_EXPR: case SSA_NAME: case FIELD_DECL: case RESULT_DECL: /* None of thse have subtrees other than those already walked above. */ break; case TREE_LIST: WALK_SUBTREE (TREE_VALUE (*tp)); WALK_SUBTREE_TAIL (TREE_CHAIN (*tp)); break; case TREE_VEC: { int len = TREE_VEC_LENGTH (*tp); if (len == 0) break; /* Walk all elements but the first. */ while (--len) WALK_SUBTREE (TREE_VEC_ELT (*tp, len)); /* Now walk the first one as a tail call. */ WALK_SUBTREE_TAIL (TREE_VEC_ELT (*tp, 0)); } case COMPLEX_CST: WALK_SUBTREE (TREE_REALPART (*tp)); WALK_SUBTREE_TAIL (TREE_IMAGPART (*tp)); case CONSTRUCTOR: WALK_SUBTREE_TAIL (CONSTRUCTOR_ELTS (*tp)); case EXIT_BLOCK_EXPR: WALK_SUBTREE_TAIL (TREE_OPERAND (*tp, 1)); case SAVE_EXPR: WALK_SUBTREE_TAIL (TREE_OPERAND (*tp, 0)); case BIND_EXPR: { tree decl; for (decl = BIND_EXPR_VARS (*tp); decl; decl = TREE_CHAIN (decl)) { /* Walk the DECL_INITIAL and DECL_SIZE. We don't want to walk into declarations that are just mentioned, rather than declared; they don't really belong to this part of the tree. And, we can see cycles: the initializer for a declaration can refer to the declaration itself. */ WALK_SUBTREE (DECL_INITIAL (decl)); WALK_SUBTREE (DECL_SIZE (decl)); WALK_SUBTREE (DECL_SIZE_UNIT (decl)); } WALK_SUBTREE_TAIL (BIND_EXPR_BODY (*tp)); } case STATEMENT_LIST: { tree_stmt_iterator i; for (i = tsi_start (*tp); !tsi_end_p (i); tsi_next (&i)) WALK_SUBTREE (*tsi_stmt_ptr (i)); } break; default: /* ??? This could be a language-defined node. We really should make a hook for it, but right now just ignore it. */ break; } } /* We didn't find what we were looking for. */ return NULL_TREE; #undef WALK_SUBTREE #undef WALK_SUBTREE_TAIL } /* Like walk_tree, but does not walk duplicate nodes more than once. */ tree walk_tree_without_duplicates (tree *tp, walk_tree_fn func, void *data) { tree result; htab_t htab; htab = htab_create (37, htab_hash_pointer, htab_eq_pointer, NULL); result = walk_tree (tp, func, data, htab); htab_delete (htab); return result; } /* Passed to walk_tree. Copies the node pointed to, if appropriate. */ tree copy_tree_r (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED) { enum tree_code code = TREE_CODE (*tp); /* We make copies of most nodes. */ if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)) || TREE_CODE_CLASS (code) == 'c' || code == TREE_LIST || code == TREE_VEC || code == TYPE_DECL) { /* Because the chain gets clobbered when we make a copy, we save it here. */ tree chain = TREE_CHAIN (*tp); tree new; /* Copy the node. */ new = copy_node (*tp); /* Propagate mudflap marked-ness. */ if (flag_mudflap && mf_marked_p (*tp)) mf_mark (new); *tp = new; /* Now, restore the chain, if appropriate. That will cause walk_tree to walk into the chain as well. */ if (code == PARM_DECL || code == TREE_LIST) TREE_CHAIN (*tp) = chain; /* For now, we don't update BLOCKs when we make copies. So, we have to nullify all BIND_EXPRs. */ if (TREE_CODE (*tp) == BIND_EXPR) BIND_EXPR_BLOCK (*tp) = NULL_TREE; } else if (TREE_CODE_CLASS (code) == 't') *walk_subtrees = 0; else if (TREE_CODE_CLASS (code) == 'd') *walk_subtrees = 0; else if (code == STATEMENT_LIST) abort (); return NULL_TREE; } /* The SAVE_EXPR pointed to by TP is being copied. If ST contains information indicating to what new SAVE_EXPR this one should be mapped, use that one. Otherwise, create a new node and enter it in ST. */ void remap_save_expr (tree *tp, void *st_, int *walk_subtrees) { splay_tree st = (splay_tree) st_; splay_tree_node n; tree t; /* See if we already encountered this SAVE_EXPR. */ n = splay_tree_lookup (st, (splay_tree_key) *tp); /* If we didn't already remap this SAVE_EXPR, do so now. */ if (!n) { t = copy_node (*tp); /* Remember this SAVE_EXPR. */ splay_tree_insert (st, (splay_tree_key) *tp, (splay_tree_value) t); /* Make sure we don't remap an already-remapped SAVE_EXPR. */ splay_tree_insert (st, (splay_tree_key) t, (splay_tree_value) t); } else { /* We've already walked into this SAVE_EXPR; don't do it again. */ *walk_subtrees = 0; t = (tree) n->value; } /* Replace this SAVE_EXPR with the copy. */ *tp = t; } /* Called via walk_tree. If *TP points to a DECL_STMT for a local label, copies the declaration and enters it in the splay_tree in DATA (which is really an `inline_data *'). */ static tree mark_local_for_remap_r (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED, void *data) { inline_data *id = (inline_data *) data; /* Don't walk into types. */ if (TYPE_P (*tp)) *walk_subtrees = 0; else if (TREE_CODE (*tp) == LABEL_EXPR) { tree decl = TREE_OPERAND (*tp, 0); /* Copy the decl and remember the copy. */ insert_decl_map (id, decl, copy_decl_for_inlining (decl, DECL_CONTEXT (decl), DECL_CONTEXT (decl))); } return NULL_TREE; } /* Called via walk_tree when an expression is unsaved. Using the splay_tree pointed to by ST (which is really a `splay_tree'), remaps all local declarations to appropriate replacements. */ static tree unsave_r (tree *tp, int *walk_subtrees, void *data) { inline_data *id = (inline_data *) data; splay_tree st = id->decl_map; splay_tree_node n; /* Only a local declaration (variable or label). */ if ((TREE_CODE (*tp) == VAR_DECL && !TREE_STATIC (*tp)) || TREE_CODE (*tp) == LABEL_DECL) { /* Lookup the declaration. */ n = splay_tree_lookup (st, (splay_tree_key) *tp); /* If it's there, remap it. */ if (n) *tp = (tree) n->value; } else if (TREE_CODE (*tp) == STATEMENT_LIST) copy_statement_list (tp); else if (TREE_CODE (*tp) == BIND_EXPR) copy_bind_expr (tp, walk_subtrees, id); else if (TREE_CODE (*tp) == SAVE_EXPR) remap_save_expr (tp, st, walk_subtrees); else { copy_tree_r (tp, walk_subtrees, NULL); /* Do whatever unsaving is required. */ unsave_expr_1 (*tp); } /* Keep iterating. */ return NULL_TREE; } /* Default lang hook for "unsave_expr_now". Copies everything in EXPR and replaces variables, labels and SAVE_EXPRs local to EXPR. */ tree lhd_unsave_expr_now (tree expr) { inline_data id; /* There's nothing to do for NULL_TREE. */ if (expr == 0) return expr; /* Set up ID. */ memset (&id, 0, sizeof (id)); VARRAY_TREE_INIT (id.fns, 1, "fns"); VARRAY_PUSH_TREE (id.fns, current_function_decl); id.decl_map = splay_tree_new (splay_tree_compare_pointers, NULL, NULL); /* Walk the tree once to find local labels. */ walk_tree_without_duplicates (&expr, mark_local_for_remap_r, &id); /* Walk the tree again, copying, remapping, and unsaving. */ walk_tree (&expr, unsave_r, &id, NULL); /* Clean up. */ splay_tree_delete (id.decl_map); return expr; } /* Allow someone to determine if SEARCH is a child of TOP from gdb. */ static tree debug_find_tree_1 (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED, void *data) { if (*tp == data) return (tree) data; else return NULL; } bool debug_find_tree (tree top, tree search) { return walk_tree_without_duplicates (&top, debug_find_tree_1, search) != 0; } /* Declare the variables created by the inliner. Add all the variables in VARS to BIND_EXPR. */ static void declare_inline_vars (tree bind_expr, tree vars) { if (lang_hooks.gimple_before_inlining) { tree t; for (t = vars; t; t = TREE_CHAIN (t)) DECL_SEEN_IN_BIND_EXPR_P (t) = 1; } add_var_to_bind_expr (bind_expr, vars); } /* Callgraph handling code. Copyright (C) 2003, 2004 Free Software Foundation, Inc. Contributed by Jan Hubicka This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file contains basic routines manipulating call graph and variable pool The callgraph: The call-graph is data structure designed for intra-procedural optimization but it is also used in non-unit-at-a-time compilation to allow easier code sharing. The call-graph consist of nodes and edges represented via linked lists. Each function (external or not) corresponds to the unique node (in contrast to tree DECL nodes where we can have multiple nodes for each function). The mapping from declarations to call-graph nodes is done using hash table based on DECL_ASSEMBLER_NAME, so it is essential for assembler name to not change once the declaration is inserted into the call-graph. The call-graph nodes are created lazily using cgraph_node function when called for unknown declaration. When built, there is one edge for each direct call. It is possible that the reference will be later optimized out. The call-graph is built conservatively in order to make conservative data flow analysis possible. The callgraph at the moment does not represent indirect calls or calls from other compilation unit. Flag NEEDED is set for each node that may be accessed in such a invisible way and it shall be considered an entry point to the callgraph. Intraprocedural information: Callgraph is place to store data needed for intraprocedural optimization. All data structures are divided into three components: local_info that is produced while analyzing the function, global_info that is result of global walking of the callgraph on the end of compilation and rtl_info used by RTL backend to propagate data from already compiled functions to their callers. Inlining plans: The function inlining information is decided in advance and maintained in the callgraph as so called inline plan. For each inlined call, the callee's node is cloned to represent the new function copy produced by inliner. Each inlined call gets a unique corresponding clone node of the callee and the data structure is updated while inlining is performed, so the clones are eliminated and their callee edges redirected to the caller. Each edge has "inline_failed" field. When the field is set to NULL, the call will be inlined. When it is non-NULL it contains a reason why inlining wasn't performed. The varpool data structure: Varpool is used to maintain variables in similar manner as call-graph is used for functions. Most of the API is symmetric replacing cgraph function prefix by cgraph_varpool */ /* Hash table used to convert declarations into nodes. */ static GTY((param_is (struct cgraph_node))) htab_t cgraph_hash; /* The linked list of cgraph nodes. */ struct cgraph_node *cgraph_nodes; /* Queue of cgraph nodes scheduled to be lowered. */ struct cgraph_node *cgraph_nodes_queue; /* Number of nodes in existence. */ int cgraph_n_nodes; /* Maximal uid used in cgraph nodes. */ int cgraph_max_uid; /* Set when whole unit has been analyzed so we can access global info. */ bool cgraph_global_info_ready = false; /* Hash table used to convert declarations into nodes. */ static GTY((param_is (struct cgraph_varpool_node))) htab_t cgraph_varpool_hash; /* Queue of cgraph nodes scheduled to be lowered and output. */ struct cgraph_varpool_node *cgraph_varpool_nodes_queue; /* Number of nodes in existence. */ int cgraph_varpool_n_nodes; /* The linked list of cgraph varpool nodes. */ static GTY(()) struct cgraph_varpool_node *cgraph_varpool_nodes; static hashval_t hash_node (const void *); static int eq_node (const void *, const void *); /* Returns a hash code for P. */ static hashval_t hash_node (const void *p) { const struct cgraph_node *n = p; return (hashval_t) DECL_UID (n->decl); } /* Returns nonzero if P1 and P2 are equal. */ static int eq_node (const void *p1, const void *p2) { const struct cgraph_node *n1 = p1, *n2 = p2; return DECL_UID (n1->decl) == DECL_UID (n2->decl); } /* Allocate new callgraph node and insert it into basic data structures. */ static struct cgraph_node * cgraph_create_node (void) { struct cgraph_node *node; node = ggc_alloc_cleared (sizeof (*node)); node->next = cgraph_nodes; node->uid = cgraph_max_uid++; if (cgraph_nodes) cgraph_nodes->previous = node; node->previous = NULL; cgraph_nodes = node; cgraph_n_nodes++; return node; } /* Return cgraph node assigned to DECL. Create new one when needed. */ struct cgraph_node * cgraph_node (tree decl) { struct cgraph_node key, *node, **slot; if (TREE_CODE (decl) != FUNCTION_DECL) abort (); if (!cgraph_hash) cgraph_hash = htab_create_ggc (10, hash_node, eq_node, NULL); key.decl = decl; slot = (struct cgraph_node **) htab_find_slot (cgraph_hash, &key, INSERT); if (*slot) return *slot; node = cgraph_create_node (); node->decl = decl; *slot = node; if (DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL) { node->origin = cgraph_node (DECL_CONTEXT (decl)); node->next_nested = node->origin->nested; node->origin->nested = node; } return node; } /* Return callgraph edge representing CALL_EXPR. */ struct cgraph_edge * cgraph_edge (struct cgraph_node *node, tree call_expr) { struct cgraph_edge *e; /* This loop may turn out to be performance problem. In such case adding hashtables into call nodes with very many edges is probably best solution. It is not good idea to add pointer into CALL_EXPR itself because we want to make possible having multiple cgraph nodes representing different clones of the same body before the body is actually cloned. */ for (e = node->callees; e; e= e->next_callee) if (e->call_expr == call_expr) break; return e; } /* Create edge from CALLER to CALLEE in the cgraph. */ struct cgraph_edge * cgraph_create_edge (struct cgraph_node *caller, struct cgraph_node *callee, tree call_expr) { struct cgraph_edge *edge = ggc_alloc (sizeof (struct cgraph_edge)); #ifdef ENABLE_CHECKING struct cgraph_edge *e; for (e = caller->callees; e; e = e->next_callee) if (e->call_expr == call_expr) abort (); #endif if (TREE_CODE (call_expr) != CALL_EXPR) abort (); if (!DECL_SAVED_TREE (callee->decl)) edge->inline_failed = N_("function body not available"); else if (callee->local.redefined_extern_inline) edge->inline_failed = N_("redefined extern inline functions are not " "considered for inlining"); else if (callee->local.inlinable) edge->inline_failed = N_("function not considered for inlining"); else edge->inline_failed = N_("function not inlinable"); edge->aux = NULL; edge->caller = caller; edge->callee = callee; edge->call_expr = call_expr; edge->next_caller = callee->callers; edge->next_callee = caller->callees; caller->callees = edge; callee->callers = edge; return edge; } /* Remove the edge E the cgraph. */ void cgraph_remove_edge (struct cgraph_edge *e) { struct cgraph_edge **edge, **edge2; for (edge = &e->callee->callers; *edge && *edge != e; edge = &((*edge)->next_caller)) continue; if (!*edge) abort (); *edge = (*edge)->next_caller; for (edge2 = &e->caller->callees; *edge2 && *edge2 != e; edge2 = &(*edge2)->next_callee) continue; if (!*edge2) abort (); *edge2 = (*edge2)->next_callee; } /* Redirect callee of E to N. The function does not update underlying call expression. */ void cgraph_redirect_edge_callee (struct cgraph_edge *e, struct cgraph_node *n) { struct cgraph_edge **edge; for (edge = &e->callee->callers; *edge && *edge != e; edge = &((*edge)->next_caller)) continue; if (!*edge) abort (); *edge = (*edge)->next_caller; e->callee = n; e->next_caller = n->callers; n->callers = e; } /* Remove the node from cgraph. */ void cgraph_remove_node (struct cgraph_node *node) { void **slot; bool check_dead = 1; while (node->callers) cgraph_remove_edge (node->callers); while (node->callees) cgraph_remove_edge (node->callees); while (node->nested) cgraph_remove_node (node->nested); if (node->origin) { struct cgraph_node **node2 = &node->origin->nested; while (*node2 != node) node2 = &(*node2)->next_nested; *node2 = node->next_nested; } if (node->previous) node->previous->next = node->next; else cgraph_nodes = node->next; if (node->next) node->next->previous = node->previous; slot = htab_find_slot (cgraph_hash, node, NO_INSERT); if (*slot == node) { if (node->next_clone) *slot = node->next_clone; else { htab_clear_slot (cgraph_hash, slot); if (!dump_enabled_p (TDI_all)) { DECL_SAVED_TREE (node->decl) = NULL; DECL_STRUCT_FUNCTION (node->decl) = NULL; } check_dead = false; } } else { struct cgraph_node *n; for (n = *slot; n->next_clone != node; n = n->next_clone) continue; n->next_clone = node->next_clone; } /* Work out whether we still need a function body (either there is inline clone or there is out of line function whose body is not written). */ if (check_dead && flag_unit_at_a_time) { struct cgraph_node *n; for (n = *slot; n; n = n->next_clone) if (n->global.inlined_to || (!n->global.inlined_to && !TREE_ASM_WRITTEN (n->decl) && !DECL_EXTERNAL (n->decl))) break; if (!n && !dump_enabled_p (TDI_all)) { DECL_SAVED_TREE (node->decl) = NULL; DECL_STRUCT_FUNCTION (node->decl) = NULL; } } cgraph_n_nodes--; /* Do not free the structure itself so the walk over chain can continue. */ } /* Notify finalize_compilation_unit that given node is reachable. */ void cgraph_mark_reachable_node (struct cgraph_node *node) { if (!node->reachable && node->local.finalized) { notice_global_symbol (node->decl); node->reachable = 1; node->next_needed = cgraph_nodes_queue; cgraph_nodes_queue = node; } } /* Likewise indicate that a node is needed, i.e. reachable via some external means. */ void cgraph_mark_needed_node (struct cgraph_node *node) { node->needed = 1; cgraph_mark_reachable_node (node); } /* Return true when CALLER_DECL calls CALLEE_DECL. */ bool cgraph_calls_p (tree caller_decl, tree callee_decl) { struct cgraph_node *caller = cgraph_node (caller_decl); struct cgraph_node *callee = cgraph_node (callee_decl); struct cgraph_edge *edge; for (edge = callee->callers; edge && (edge)->caller != caller; edge = (edge->next_caller)) continue; return edge != NULL; } /* Return local info for the compiled function. */ struct cgraph_local_info * cgraph_local_info (tree decl) { struct cgraph_node *node; if (TREE_CODE (decl) != FUNCTION_DECL) abort (); node = cgraph_node (decl); return &node->local; } /* Return local info for the compiled function. */ struct cgraph_global_info * cgraph_global_info (tree decl) { struct cgraph_node *node; if (TREE_CODE (decl) != FUNCTION_DECL || !cgraph_global_info_ready) abort (); node = cgraph_node (decl); return &node->global; } /* Return local info for the compiled function. */ struct cgraph_rtl_info * cgraph_rtl_info (tree decl) { struct cgraph_node *node; if (TREE_CODE (decl) != FUNCTION_DECL) abort (); node = cgraph_node (decl); if (decl != current_function_decl && !TREE_ASM_WRITTEN (node->decl)) return NULL; return &node->rtl; } /* Return name of the node used in debug output. */ const char * cgraph_node_name (struct cgraph_node *node) { return lang_hooks.decl_printable_name (node->decl, 2); } /* Dump given cgraph node. */ void dump_cgraph_node (FILE *f, struct cgraph_node *node) { struct cgraph_edge *edge; fprintf (f, "%s/%i:", cgraph_node_name (node), node->uid); if (node->global.inlined_to) fprintf (f, " (inline copy in %s/%i)", cgraph_node_name (node->global.inlined_to), node->global.inlined_to->uid); if (node->local.self_insns) fprintf (f, " %i insns", node->local.self_insns); if (node->global.insns && node->global.insns != node->local.self_insns) fprintf (f, " (%i after inlining)", node->global.insns); if (node->origin) fprintf (f, " nested in: %s", cgraph_node_name (node->origin)); if (node->needed) fprintf (f, " needed"); else if (node->reachable) fprintf (f, " reachable"); if (DECL_SAVED_TREE (node->decl)) fprintf (f, " tree"); if (node->output) fprintf (f, " output"); if (node->local.local) fprintf (f, " local"); if (node->local.disregard_inline_limits) fprintf (f, " always_inline"); else if (node->local.inlinable) fprintf (f, " inlinable"); if (TREE_ASM_WRITTEN (node->decl)) fprintf (f, " asm_written"); fprintf (f, "\n called by: "); for (edge = node->callers; edge; edge = edge->next_caller) { fprintf (f, "%s/%i ", cgraph_node_name (edge->caller), edge->caller->uid); if (!edge->inline_failed) fprintf(f, "(inlined) "); } fprintf (f, "\n calls: "); for (edge = node->callees; edge; edge = edge->next_callee) { fprintf (f, "%s/%i ", cgraph_node_name (edge->callee), edge->callee->uid); if (!edge->inline_failed) fprintf(f, "(inlined) "); } fprintf (f, "\n"); } /* Dump the callgraph. */ void dump_cgraph (FILE *f) { struct cgraph_node *node; fprintf (f, "callgraph:\n\n"); for (node = cgraph_nodes; node; node = node->next) dump_cgraph_node (f, node); } /* Returns a hash code for P. */ static hashval_t hash_varpool_node (const void *p) { const struct cgraph_varpool_node *n = p; return (hashval_t) DECL_UID (n->decl); } /* Returns nonzero if P1 and P2 are equal. */ static int eq_varpool_node (const void *p1, const void *p2) { const struct cgraph_varpool_node *n1 = p1, *n2 = p2; return DECL_UID (n1->decl) == DECL_UID (n2->decl); } /* Return cgraph_varpool node assigned to DECL. Create new one when needed. */ struct cgraph_varpool_node * cgraph_varpool_node (tree decl) { struct cgraph_varpool_node key, *node, **slot; if (!DECL_P (decl) || TREE_CODE (decl) == FUNCTION_DECL) abort (); if (!cgraph_varpool_hash) cgraph_varpool_hash = htab_create_ggc (10, hash_varpool_node, eq_varpool_node, NULL); key.decl = decl; slot = (struct cgraph_varpool_node **) htab_find_slot (cgraph_varpool_hash, &key, INSERT); if (*slot) return *slot; node = ggc_alloc_cleared (sizeof (*node)); node->decl = decl; cgraph_varpool_n_nodes++; cgraph_varpool_nodes = node; *slot = node; return node; } /* Set the DECL_ASSEMBLER_NAME and update cgraph hashtables. */ void change_decl_assembler_name (tree decl, tree name) { if (!DECL_ASSEMBLER_NAME_SET_P (decl)) { SET_DECL_ASSEMBLER_NAME (decl, name); return; } if (name == DECL_ASSEMBLER_NAME (decl)) return; if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)) && DECL_RTL_SET_P (decl)) warning ("%D renamed after being referenced in assembly", decl); SET_DECL_ASSEMBLER_NAME (decl, name); } /* Notify finalize_compilation_unit that given node is reachable or needed. */ void cgraph_varpool_mark_needed_node (struct cgraph_varpool_node *node) { if (!node->needed && node->finalized) { node->next_needed = cgraph_varpool_nodes_queue; cgraph_varpool_nodes_queue = node; notice_global_symbol (node->decl); } node->needed = 1; } void cgraph_varpool_finalize_decl (tree decl) { struct cgraph_varpool_node *node = cgraph_varpool_node (decl); /* The first declaration of a variable that comes through this function decides whether it is global (in C, has external linkage) or local (in C, has internal linkage). So do nothing more if this function has already run. */ if (node->finalized) return; if (node->needed) { node->next_needed = cgraph_varpool_nodes_queue; cgraph_varpool_nodes_queue = node; notice_global_symbol (decl); } node->finalized = true; if (/* Externally visible variables must be output. The exception are COMDAT functions that must be output only when they are needed. */ (TREE_PUBLIC (decl) && !DECL_COMDAT (decl)) /* Function whose name is output to the assembler file must be produced. It is possible to assemble the name later after finalizing the function and the fact is noticed in assemble_name then. */ || (DECL_ASSEMBLER_NAME_SET_P (decl) && TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))) { cgraph_varpool_mark_needed_node (node); } } bool cgraph_varpool_assemble_pending_decls (void) { bool changed = false; while (cgraph_varpool_nodes_queue) { tree decl = cgraph_varpool_nodes_queue->decl; struct cgraph_varpool_node *node = cgraph_varpool_nodes_queue; cgraph_varpool_nodes_queue = cgraph_varpool_nodes_queue->next_needed; if (!TREE_ASM_WRITTEN (decl)) { assemble_variable (decl, 0, 1, 0); changed = true; } node->next_needed = NULL; } return changed; } /* Return true when the DECL can possibly be inlined. */ bool cgraph_function_possibly_inlined_p (tree decl) { if (!cgraph_global_info_ready) return (DECL_INLINE (decl) && !flag_really_no_inline); return DECL_POSSIBLY_INLINED (decl); } /* Create clone of E in the node N represented by CALL_EXPR the callgraph. */ struct cgraph_edge * cgraph_clone_edge (struct cgraph_edge *e, struct cgraph_node *n, tree call_expr) { struct cgraph_edge *new = cgraph_create_edge (n, e->callee, call_expr); new->inline_failed = e->inline_failed; return new; } /* Create node representing clone of N. */ struct cgraph_node * cgraph_clone_node (struct cgraph_node *n) { struct cgraph_node *new = cgraph_create_node (); struct cgraph_edge *e; new->decl = n->decl; new->origin = n->origin; if (new->origin) { new->next_nested = new->origin->nested; new->origin->nested = new; } new->analyzed = n->analyzed; new->local = n->local; new->global = n->global; new->rtl = n->rtl; for (e = n->callees;e; e=e->next_callee) cgraph_clone_edge (e, new, e->call_expr); new->next_clone = n->next_clone; n->next_clone = new; return new; } /* Type information for cgraph.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_cgraph_h[] = { { &cgraph_varpool_nodes, 1, sizeof (cgraph_varpool_nodes), >_ggc_mx_cgraph_varpool_node, >_pch_nx_cgraph_varpool_node }, { &cgraph_varpool_hash, 1, sizeof (cgraph_varpool_hash), >_ggc_m_P19cgraph_varpool_node4htab, >_pch_n_P19cgraph_varpool_node4htab }, { &cgraph_hash, 1, sizeof (cgraph_hash), >_ggc_m_P11cgraph_node4htab, >_pch_n_P11cgraph_node4htab }, LAST_GGC_ROOT_TAB }; /* Callgraph based intraprocedural optimizations. Copyright (C) 2003, 2004 Free Software Foundation, Inc. Contributed by Jan Hubicka This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This module implements main driver of compilation process as well as few basic intraprocedural optimizers. The main scope of this file is to act as an interface in between tree based frontends and the backend (and middle end) The front-end is supposed to use following functionality: - cgraph_finalize_function This function is called once front-end has parsed whole body of function and it is certain that the function body nor the declaration will change. (There is one exception needed for implementing GCC extern inline function.) - cgraph_varpool_finalize_variable This function has same behavior as the above but is used for static variables. - cgraph_finalize_compilation_unit This function is called once compilation unit is finalized and it will no longer change. In the unit-at-a-time the call-graph construction and local function analysis takes place here. Bodies of unreachable functions are released to conserve memory usage. ??? The compilation unit in this point of view should be compilation unit as defined by the language - for instance C frontend allows multiple compilation units to be parsed at once and it should call function each time parsing is done so we save memory. - cgraph_optimize In this unit-at-a-time compilation the intra procedural analysis takes place here. In particular the static functions whose address is never taken are marked as local. Backend can then use this information to modify calling conventions, do better inlining or similar optimizations. - cgraph_assemble_pending_functions - cgraph_varpool_assemble_pending_variables In non-unit-at-a-time mode these functions can be used to force compilation of functions or variables that are known to be needed at given stage of compilation - cgraph_mark_needed_node - cgraph_varpool_mark_needed_node When function or variable is referenced by some hidden way (for instance via assembly code and marked by attribute "used"), the call-graph data structure must be updated accordingly by this function. - analyze_expr callback This function is responsible for lowering tree nodes not understood by generic code into understandable ones or alternatively marking callgraph and varpool nodes referenced by the as needed. ??? On the tree-ssa genericizing should take place here and we will avoid need for these hooks (replacing them by genericizing hook) - expand_function callback This function is used to expand function and pass it into RTL back-end. Front-end should not make any assumptions about when this function can be called. In particular cgraph_assemble_pending_functions, cgraph_varpool_assemble_pending_variables, cgraph_finalize_function, cgraph_varpool_finalize_function, cgraph_optimize can cause arbitrarily previously finalized functions to be expanded. We implement two compilation modes. - unit-at-a-time: In this mode analyzing of all functions is deferred to cgraph_finalize_compilation_unit and expansion into cgraph_optimize. In cgraph_finalize_compilation_unit the reachable functions are analyzed. During analysis the call-graph edges from reachable functions are constructed and their destinations are marked as reachable. References to functions and variables are discovered too and variables found to be needed output to the assembly file. Via mark_referenced call in assemble_variable functions referenced by static variables are noticed too. The intra-procedural information is produced and it's existence indicated by global_info_ready. Once this flag is set it is impossible to change function from !reachable to reachable and thus assemble_variable no longer call mark_referenced. Finally the call-graph is topologically sorted and all reachable functions that has not been completely inlined or are not external are output. ??? It is possible that reference to function or variable is optimized out. We can not deal with this nicely because topological order is not suitable for it. For tree-ssa we may consider another pass doing optimization and re-discovering reachable functions. ??? Reorganize code so variables are output very last and only if they really has been referenced by produced code, so we catch more cases where reference has been optimized out. - non-unit-at-a-time All functions are variables are output as early as possible to conserve memory consumption. This may or may not result in less memory used but it is still needed for some legacy code that rely on particular ordering of things output from the compiler. Varpool data structures are not used and variables are output directly. Functions are output early using call of cgraph_assemble_pending_function from cgraph_finalize_function. The decision on whether function is needed is made more conservative so uninlininable static functions are needed too. During the call-graph construction the edge destinations are not marked as reachable and it is completely relied upn assemble_variable to mark them. Inlining decision heuristics ??? Move this to separate file after tree-ssa merge. We separate inlining decisions from the inliner itself and store it inside callgraph as so called inline plan. Reffer to cgraph.c documentation about particular representation of inline plans in the callgraph The implementation of particular heuristics is separated from the rest of code to make it easier to replace it with more complicated implementation in the future. The rest of inlining code acts as a library aimed to modify the callgraph and verify that the parameters on code size growth fits. To mark given call inline, use cgraph_mark_inline function, the verification is performed by cgraph_default_inline_p and cgraph_check_inline_limits. The heuristics implements simple knapsack style algorithm ordering all functions by their "profitability" (estimated by code size growth) and inlining them in priority order. cgraph_decide_inlining implements heuristics taking whole callgraph into account, while cgraph_decide_inlining_incrementally considers only one function at a time and is used in non-unit-at-a-time mode. */ #define INSNS_PER_CALL 10 static void cgraph_expand_all_functions (void); static void cgraph_mark_functions_to_output (void); static void cgraph_expand_function (struct cgraph_node *); static tree record_call_1 (tree *, int *, void *); static void cgraph_mark_local_functions (void); static bool cgraph_default_inline_p (struct cgraph_node *n); static void cgraph_analyze_function (struct cgraph_node *node); static void cgraph_decide_inlining_incrementally (struct cgraph_node *); /* Statistics we collect about inlining algorithm. */ static int ncalls_inlined; static int nfunctions_inlined; static int initial_insns; static int overall_insns; /* Records tree nodes seen in cgraph_create_edges. Simply using walk_tree_without_duplicates doesn't guarantee each node is visited once because it gets a new htab upon each recursive call from record_calls_1. */ static htab_t visited_nodes; /* Determine if function DECL is needed. That is, visible to something either outside this translation unit, something magic in the system configury, or (if not doing unit-at-a-time) to something we havn't seen yet. */ static bool decide_is_function_needed (struct cgraph_node *node, tree decl) { struct cgraph_node *origin; /* If we decided it was needed before, but at the time we didn't have the body of the function available, then it's still needed. We have to go back and re-check its dependencies now. */ if (node->needed) return true; /* Externally visible functions must be output. The exception is COMDAT functions that must be output only when they are needed. */ if (TREE_PUBLIC (decl) && !DECL_COMDAT (decl) && !DECL_EXTERNAL (decl)) return true; /* Constructors and destructors are reachable from the runtime by some mechanism. */ if (DECL_STATIC_CONSTRUCTOR (decl) || DECL_STATIC_DESTRUCTOR (decl)) return true; /* If the user told us it is used, then it must be so. */ if (lookup_attribute ("used", DECL_ATTRIBUTES (decl))) return true; /* ??? If the assembler name is set by hand, it is possible to assemble the name later after finalizing the function and the fact is noticed in assemble_name then. This is arguably a bug. */ if (DECL_ASSEMBLER_NAME_SET_P (decl) && TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl))) return true; if (flag_unit_at_a_time) return false; /* If not doing unit at a time, then we'll only defer this function if its marked for inlining. Otherwise we want to emit it now. */ /* "extern inline" functions are never output locally. */ if (DECL_EXTERNAL (decl)) return false; /* Nested functions of extern inline function shall not be emit unless we inlined the origin. */ for (origin = node->origin; origin; origin = origin->origin) if (DECL_EXTERNAL (origin->decl)) return false; /* We want to emit COMDAT functions only when absolutely necessary. */ if (DECL_COMDAT (decl)) return false; if (!DECL_INLINE (decl) || (!node->local.disregard_inline_limits /* When declared inline, defer even the uninlinable functions. This allows them to be eliminated when unused. */ && !DECL_DECLARED_INLINE_P (decl) && (!node->local.inlinable || !cgraph_default_inline_p (node)))) return true; return false; } /* When not doing unit-at-a-time, output all functions enqueued. Return true when such a functions were found. */ bool cgraph_assemble_pending_functions (void) { bool output = false; if (flag_unit_at_a_time) return false; while (cgraph_nodes_queue) { struct cgraph_node *n = cgraph_nodes_queue; cgraph_nodes_queue = cgraph_nodes_queue->next_needed; n->next_needed = NULL; if (!n->global.inlined_to && !DECL_EXTERNAL (n->decl)) { cgraph_expand_function (n); output = true; } } return output; } /* DECL has been parsed. Take it, queue it, compile it at the whim of the logic in effect. If NESTED is true, then our caller cannot stand to have the garbage collector run at the moment. We would need to either create a new GC context, or just not compile right now. */ void cgraph_finalize_function (tree decl, bool nested) { struct cgraph_node *node = cgraph_node (decl); if (node->local.finalized) { /* As an GCC extension we allow redefinition of the function. The semantics when both copies of bodies differ is not well defined. We replace the old body with new body so in unit at a time mode we always use new body, while in normal mode we may end up with old body inlined into some functions and new body expanded and inlined in others. ??? It may make more sense to use one body for inlining and other body for expanding the function but this is difficult to do. */ /* If node->output is set, then this is a unit-at-a-time compilation and we have already begun whole-unit analysis. This is *not* testing for whether we've already emitted the function. That case can be sort-of legitimately seen with real function redefinition errors. I would argue that the front end should never present us with such a case, but don't enforce that for now. */ if (node->output) abort (); /* Reset our data structures so we can analyze the function again. */ memset (&node->local, 0, sizeof (node->local)); memset (&node->global, 0, sizeof (node->global)); memset (&node->rtl, 0, sizeof (node->rtl)); node->analyzed = false; node->local.redefined_extern_inline = true; while (node->callees) cgraph_remove_edge (node->callees); /* We may need to re-queue the node for assembling in case we already proceeded it and ignored as not needed. */ if (node->reachable && !flag_unit_at_a_time) { struct cgraph_node *n; for (n = cgraph_nodes_queue; n; n = n->next_needed) if (n == node) break; if (!n) node->reachable = 0; } } notice_global_symbol (decl); node->decl = decl; node->local.finalized = true; /* If not unit at a time, then we need to create the call graph now, so that called functions can be queued and emitted now. */ if (!flag_unit_at_a_time) { cgraph_analyze_function (node); cgraph_decide_inlining_incrementally (node); } if (decide_is_function_needed (node, decl)) cgraph_mark_needed_node (node); /* If not unit at a time, go ahead and emit everything we've found to be reachable at this time. */ if (!nested) { if (!cgraph_assemble_pending_functions ()) ggc_collect (); } /* If we've not yet emitted decl, tell the debug info about it. */ if (!TREE_ASM_WRITTEN (decl)) (*debug_hooks->deferred_inline_function) (decl); /* Possibly warn about unused parameters. */ if (warn_unused_parameter) do_warn_unused_parameter (decl); } /* Walk tree and record all calls. Called via walk_tree. */ static tree record_call_1 (tree *tp, int *walk_subtrees, void *data) { tree t = *tp; switch (TREE_CODE (t)) { case VAR_DECL: /* ??? Really, we should mark this decl as *potentially* referenced by this function and re-examine whether the decl is actually used after rtl has been generated. */ if (TREE_STATIC (t)) cgraph_varpool_mark_needed_node (cgraph_varpool_node (t)); break; case ADDR_EXPR: if (flag_unit_at_a_time) { /* Record dereferences to the functions. This makes the functions reachable unconditionally. */ tree decl = TREE_OPERAND (*tp, 0); if (TREE_CODE (decl) == FUNCTION_DECL) cgraph_mark_needed_node (cgraph_node (decl)); } break; case CALL_EXPR: { tree decl = get_callee_fndecl (*tp); if (decl && TREE_CODE (decl) == FUNCTION_DECL) { cgraph_create_edge (data, cgraph_node (decl), *tp); /* When we see a function call, we don't want to look at the function reference in the ADDR_EXPR that is hanging from the CALL_EXPR we're examining here, because we would conclude incorrectly that the function's address could be taken by something that is not a function call. So only walk the function parameter list, skip the other subtrees. */ walk_tree (&TREE_OPERAND (*tp, 1), record_call_1, data, visited_nodes); *walk_subtrees = 0; } break; } default: /* Save some cycles by not walking types and declaration as we won't find anything useful there anyway. */ if (DECL_P (*tp) || TYPE_P (*tp)) { *walk_subtrees = 0; break; } if ((unsigned int) TREE_CODE (t) >= LAST_AND_UNUSED_TREE_CODE) return lang_hooks.callgraph.analyze_expr (tp, walk_subtrees, data); break; } return NULL; } /* Create cgraph edges for function calls inside BODY from NODE. */ void cgraph_create_edges (struct cgraph_node *node, tree body) { /* The nodes we're interested in are never shared, so walk the tree ignoring duplicates. */ visited_nodes = htab_create (37, htab_hash_pointer, htab_eq_pointer, NULL); walk_tree (&body, record_call_1, node, visited_nodes); htab_delete (visited_nodes); visited_nodes = NULL; } static bool error_found; /* Callbrack of verify_cgraph_node. Check that all call_exprs have cgraph nodes. */ static tree verify_cgraph_node_1 (tree *tp, int *walk_subtrees, void *data) { tree t = *tp; tree decl; if (TREE_CODE (t) == CALL_EXPR && (decl = get_callee_fndecl (t))) { struct cgraph_edge *e = cgraph_edge (data, t); if (e) { if (e->aux) { error ("Shared call_expr:"); debug_tree (t); error_found = true; } if (e->callee->decl != cgraph_node (decl)->decl) { error ("Edge points to wrong declaration:"); debug_tree (e->callee->decl); fprintf (stderr," Instead of:"); debug_tree (decl); } e->aux = (void *)1; } else { error ("Missing callgraph edge for call expr:"); debug_tree (t); error_found = true; } } /* Save some cycles by not walking types and declaration as we won't find anything useful there anyway. */ if (DECL_P (*tp) || TYPE_P (*tp)) *walk_subtrees = 0; return NULL_TREE; } /* Verify cgraph nodes of given cgraph node. */ void verify_cgraph_node (struct cgraph_node *node) { struct cgraph_edge *e; struct cgraph_node *main_clone; timevar_push (TV_CGRAPH_VERIFY); error_found = false; for (e = node->callees; e; e = e->next_callee) if (e->aux) { error ("Aux field set for edge %s->%s", cgraph_node_name (e->caller), cgraph_node_name (e->callee)); error_found = true; } for (e = node->callers; e; e = e->next_caller) { if (!e->inline_failed) { if (node->global.inlined_to != (e->caller->global.inlined_to ? e->caller->global.inlined_to : e->caller)) { error ("Inlined_to pointer is wrong"); error_found = true; } if (node->callers->next_caller) { error ("Multiple inline callers"); error_found = true; } } else if (node->global.inlined_to) { error ("Inlined_to pointer set for noninline callers"); error_found = true; } } if (!node->callers && node->global.inlined_to) { error ("Inlined_to pointer is set but no predecesors found"); error_found = true; } if (node->global.inlined_to == node) { error ("Inlined_to pointer reffers to itself"); error_found = true; } for (main_clone = cgraph_node (node->decl); main_clone; main_clone = main_clone->next_clone) if (main_clone == node) break; if (!node) { error ("Node not found in DECL_ASSEMBLER_NAME hash"); error_found = true; } if (node->analyzed && DECL_SAVED_TREE (node->decl) && !TREE_ASM_WRITTEN (node->decl) && (!DECL_EXTERNAL (node->decl) || node->global.inlined_to)) { walk_tree_without_duplicates (&DECL_SAVED_TREE (node->decl), verify_cgraph_node_1, node); for (e = node->callees; e; e = e->next_callee) { if (!e->aux) { error ("Edge %s->%s has no corresponding call_expr", cgraph_node_name (e->caller), cgraph_node_name (e->callee)); error_found = true; } e->aux = 0; } } if (error_found) { dump_cgraph_node (stderr, node); internal_error ("verify_cgraph_node failed."); } timevar_pop (TV_CGRAPH_VERIFY); } /* Verify whole cgraph structure. */ void verify_cgraph (void) { struct cgraph_node *node; for (node = cgraph_nodes; node; node = node->next) verify_cgraph_node (node); } /* Analyze the function scheduled to be output. */ static void cgraph_analyze_function (struct cgraph_node *node) { tree decl = node->decl; struct cgraph_edge *e; current_function_decl = decl; /* First kill forward declaration so reverse inlining works properly. */ cgraph_create_edges (node, DECL_SAVED_TREE (decl)); node->local.inlinable = tree_inlinable_function_p (decl); node->local.self_insns = estimate_num_insns (DECL_SAVED_TREE (decl)); if (node->local.inlinable) node->local.disregard_inline_limits = lang_hooks.tree_inlining.disregard_inline_limits (decl); for (e = node->callers; e; e = e->next_caller) { if (node->local.redefined_extern_inline) e->inline_failed = N_("redefined extern inline functions are not " "considered for inlining"); else if (!node->local.inlinable) e->inline_failed = N_("function not inlinable"); else e->inline_failed = N_("function not considered for inlining"); } if (flag_really_no_inline && !node->local.disregard_inline_limits) node->local.inlinable = 0; /* Inlining characteristics are maintained by the cgraph_mark_inline. */ node->global.insns = node->local.self_insns; node->analyzed = true; current_function_decl = NULL; } /* Analyze the whole compilation unit once it is parsed completely. */ void cgraph_finalize_compilation_unit (void) { struct cgraph_node *node; if (!flag_unit_at_a_time) { cgraph_assemble_pending_functions (); return; } cgraph_varpool_assemble_pending_decls (); if (!quiet_flag) fprintf (stderr, "\nAnalyzing compilation unit\n"); timevar_push (TV_CGRAPH); if (cgraph_dump_file) { fprintf (cgraph_dump_file, "Initial entry points:"); for (node = cgraph_nodes; node; node = node->next) if (node->needed && DECL_SAVED_TREE (node->decl)) fprintf (cgraph_dump_file, " %s", cgraph_node_name (node)); fprintf (cgraph_dump_file, "\n"); } /* Propagate reachability flag and lower representation of all reachable functions. In the future, lowering will introduce new functions and new entry points on the way (by template instantiation and virtual method table generation for instance). */ while (cgraph_nodes_queue) { struct cgraph_edge *edge; tree decl = cgraph_nodes_queue->decl; node = cgraph_nodes_queue; cgraph_nodes_queue = cgraph_nodes_queue->next_needed; node->next_needed = NULL; /* ??? It is possible to create extern inline function and later using weak alas attribute to kill its body. See gcc.c-torture/compile/20011119-1.c */ if (!DECL_SAVED_TREE (decl)) continue; if (node->analyzed || !node->reachable || !DECL_SAVED_TREE (decl)) abort (); cgraph_analyze_function (node); for (edge = node->callees; edge; edge = edge->next_callee) if (!edge->callee->reachable) cgraph_mark_reachable_node (edge->callee); cgraph_varpool_assemble_pending_decls (); } /* Collect entry points to the unit. */ if (cgraph_dump_file) { fprintf (cgraph_dump_file, "Unit entry points:"); for (node = cgraph_nodes; node; node = node->next) if (node->needed && DECL_SAVED_TREE (node->decl)) fprintf (cgraph_dump_file, " %s", cgraph_node_name (node)); fprintf (cgraph_dump_file, "\n\nInitial "); dump_cgraph (cgraph_dump_file); } if (cgraph_dump_file) fprintf (cgraph_dump_file, "\nReclaiming functions:"); for (node = cgraph_nodes; node; node = node->next) { tree decl = node->decl; if (!node->reachable && DECL_SAVED_TREE (decl)) { if (cgraph_dump_file) fprintf (cgraph_dump_file, " %s", cgraph_node_name (node)); cgraph_remove_node (node); } else node->next_needed = NULL; } if (cgraph_dump_file) { fprintf (cgraph_dump_file, "\n\nReclaimed "); dump_cgraph (cgraph_dump_file); } ggc_collect (); timevar_pop (TV_CGRAPH); } /* Figure out what functions we want to assemble. */ static void cgraph_mark_functions_to_output (void) { struct cgraph_node *node; for (node = cgraph_nodes; node; node = node->next) { tree decl = node->decl; struct cgraph_edge *e; if (node->output) abort (); for (e = node->callers; e; e = e->next_caller) if (e->inline_failed) break; /* We need to output all local functions that are used and not always inlined, as well as those that are reachable from outside the current compilation unit. */ if (DECL_SAVED_TREE (decl) && !node->global.inlined_to && (node->needed || (e && node->reachable)) && !TREE_ASM_WRITTEN (decl) && !DECL_EXTERNAL (decl)) node->output = 1; /* We should've reclaimed all functions that are not needed. */ else if (!node->global.inlined_to && DECL_SAVED_TREE (decl) && !DECL_EXTERNAL (decl)) { dump_cgraph_node (stderr, node); abort (); } } } /* Expand function specified by NODE. */ static void cgraph_expand_function (struct cgraph_node *node) { tree decl = node->decl; /* We ought to not compile any inline clones. */ if (node->global.inlined_to) abort (); if (flag_unit_at_a_time) announce_function (decl); /* Generate RTL for the body of DECL. Nested functions are expanded via lang_expand_decl_stmt. */ lang_hooks.callgraph.expand_function (decl); /* Make sure that BE didn't give up on compiling. */ /* ??? Can happen with nested function of extern inline. */ if (!TREE_ASM_WRITTEN (node->decl)) abort (); current_function_decl = NULL; if (DECL_SAVED_TREE (node->decl) && !cgraph_preserve_function_body_p (node->decl)) { DECL_SAVED_TREE (node->decl) = NULL; DECL_STRUCT_FUNCTION (node->decl) = NULL; DECL_INITIAL (node->decl) = error_mark_node; } } /* Fill array order with all nodes with output flag set in the reverse topological order. */ static int cgraph_postorder (struct cgraph_node **order) { struct cgraph_node *node, *node2; int stack_size = 0; int order_pos = 0; struct cgraph_edge *edge, last; struct cgraph_node **stack = xcalloc (cgraph_n_nodes, sizeof (struct cgraph_node *)); /* We have to deal with cycles nicely, so use a depth first traversal output algorithm. Ignore the fact that some functions won't need to be output and put them into order as well, so we get dependencies right through intline functions. */ for (node = cgraph_nodes; node; node = node->next) node->aux = NULL; for (node = cgraph_nodes; node; node = node->next) if (!node->aux) { node2 = node; if (!node->callers) node->aux = &last; else node->aux = node->callers; while (node2) { while (node2->aux != &last) { edge = node2->aux; if (edge->next_caller) node2->aux = edge->next_caller; else node2->aux = &last; if (!edge->caller->aux) { if (!edge->caller->callers) edge->caller->aux = &last; else edge->caller->aux = edge->caller->callers; stack[stack_size++] = node2; node2 = edge->caller; break; } } if (node2->aux == &last) { order[order_pos++] = node2; if (stack_size) node2 = stack[--stack_size]; else node2 = NULL; } } } free (stack); return order_pos; } /* Perform reachability analysis and reclaim all unreachable nodes. This function also remove unneeded bodies of extern inline functions and thus needs to be done only after inlining decisions has been made. */ static bool cgraph_remove_unreachable_nodes (void) { struct cgraph_node *first = (void *) 1; struct cgraph_node *node; bool changed = false; int insns = 0; #ifdef ENABLE_CHECKING verify_cgraph (); #endif if (cgraph_dump_file) fprintf (cgraph_dump_file, "\nReclaiming functions:"); #ifdef ENABLE_CHECKING for (node = cgraph_nodes; node; node = node->next) if (node->aux) abort (); #endif for (node = cgraph_nodes; node; node = node->next) if (node->needed && !node->global.inlined_to && (!DECL_EXTERNAL (node->decl) || !node->analyzed)) { node->aux = first; first = node; } else if (node->aux) abort (); /* Perform reachability analysis. As a special case do not consider extern inline functions not inlined as live because we won't output them at all. */ while (first != (void *) 1) { struct cgraph_edge *e; node = first; first = first->aux; for (e = node->callees; e; e = e->next_callee) if (!e->callee->aux && node->analyzed && (!e->inline_failed || !e->callee->analyzed || !DECL_EXTERNAL (e->callee->decl))) { e->callee->aux = first; first = e->callee; } } /* Remove unreachable nodes. Extern inline functions need special care; Unreachable extern inline functions shall be removed. Reachable extern inline functions we never inlined shall get their bodies eliminated. Reachable extern inline functions we sometimes inlined will be turned into unanalyzed nodes so they look like for true extern functions to the rest of code. Body of such functions is released via remove_node once the inline clones are eliminated. */ for (node = cgraph_nodes; node; node = node->next) { if (!node->aux) { int local_insns; tree decl = node->decl; node->global.inlined_to = NULL; if (DECL_STRUCT_FUNCTION (decl)) local_insns = node->local.self_insns; else local_insns = 0; if (cgraph_dump_file) fprintf (cgraph_dump_file, " %s", cgraph_node_name (node)); if (!node->analyzed || !DECL_EXTERNAL (node->decl)) cgraph_remove_node (node); else { struct cgraph_edge *e; for (e = node->callers; e; e = e->next_caller) if (e->caller->aux) break; if (e || node->needed) { struct cgraph_node *clone; for (clone = node->next_clone; clone; clone = clone->next_clone) if (clone->aux) break; if (!clone) { DECL_SAVED_TREE (node->decl) = NULL; DECL_STRUCT_FUNCTION (node->decl) = NULL; DECL_INITIAL (node->decl) = error_mark_node; } while (node->callees) cgraph_remove_edge (node->callees); node->analyzed = false; } else cgraph_remove_node (node); } if (!DECL_SAVED_TREE (decl)) insns += local_insns; changed = true; } } for (node = cgraph_nodes; node; node = node->next) node->aux = NULL; if (cgraph_dump_file) fprintf (cgraph_dump_file, "\nReclaimed %i insns", insns); return changed; } /* Estimate size of the function after inlining WHAT into TO. */ static int cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to, struct cgraph_node *what) { return (what->global.insns - INSNS_PER_CALL) * times + to->global.insns; } /* Estimate the growth caused by inlining NODE into all callees. */ static int cgraph_estimate_growth (struct cgraph_node *node) { int growth = 0; struct cgraph_edge *e; for (e = node->callers; e; e = e->next_caller) if (e->inline_failed) growth += (cgraph_estimate_size_after_inlining (1, e->caller, node) - e->caller->global.insns); /* ??? Wrong for self recursive functions or cases where we decide to not inline for different reasons, but it is not big deal as in that case we will keep the body around, but we will also avoid some inlining. */ if (!node->needed && !DECL_EXTERNAL (node->decl)) growth -= node->global.insns; return growth; } /* E is expected to be an edge being inlined. Clone destination node of the edge and redirect it to the new clone. DUPLICATE is used for bookkeeping on whether we are actually creating new clones or re-using node originally representing out-of-line function call. */ void cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate) { struct cgraph_node *n; /* We may eliminate the need for out-of-line copy to be output. In that case just go ahead and re-use it. */ if (!e->callee->callers->next_caller && (!e->callee->needed || DECL_EXTERNAL (e->callee->decl)) && duplicate && flag_unit_at_a_time) { if (e->callee->global.inlined_to) abort (); if (!DECL_EXTERNAL (e->callee->decl)) overall_insns -= e->callee->global.insns, nfunctions_inlined++; duplicate = 0; } else if (duplicate) { n = cgraph_clone_node (e->callee); cgraph_redirect_edge_callee (e, n); } if (e->caller->global.inlined_to) e->callee->global.inlined_to = e->caller->global.inlined_to; else e->callee->global.inlined_to = e->caller; /* Recursively clone all bodies. */ for (e = e->callee->callees; e; e = e->next_callee) if (!e->inline_failed) cgraph_clone_inlined_nodes (e, duplicate); } /* Mark edge E as inlined and update callgraph accordingly. */ void cgraph_mark_inline_edge (struct cgraph_edge *e) { int old_insns = 0, new_insns = 0; struct cgraph_node *to = NULL, *what; if (!e->inline_failed) abort (); e->inline_failed = NULL; if (!e->callee->global.inlined && flag_unit_at_a_time) DECL_POSSIBLY_INLINED (e->callee->decl) = true; e->callee->global.inlined = true; cgraph_clone_inlined_nodes (e, true); what = e->callee; /* Now update size of caller and all functions caller is inlined into. */ for (;e && !e->inline_failed; e = e->caller->callers) { old_insns = e->caller->global.insns; new_insns = cgraph_estimate_size_after_inlining (1, e->caller, what); if (new_insns < 0) abort (); to = e->caller; to->global.insns = new_insns; } if (what->global.inlined_to != to) abort (); overall_insns += new_insns - old_insns; ncalls_inlined++; } /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER. Return following unredirected edge in the list of callers of EDGE->CALLEE */ static struct cgraph_edge * cgraph_mark_inline (struct cgraph_edge *edge) { struct cgraph_node *to = edge->caller; struct cgraph_node *what = edge->callee; struct cgraph_edge *e, *next; int times = 0; /* Look for all calls, mark them inline and clone recursively all inlined functions. */ for (e = what->callers; e; e = next) { next = e->next_caller; if (e->caller == to && e->inline_failed) { cgraph_mark_inline_edge (e); if (e == edge) edge = next; times ++; } } if (!times) abort (); return edge; } /* Return false when inlining WHAT into TO is not good idea as it would cause too large growth of function bodies. */ static bool cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what, const char **reason) { int times = 0; struct cgraph_edge *e; int newsize; int limit; if (to->global.inlined_to) to = to->global.inlined_to; for (e = to->callees; e; e = e->next_callee) if (e->callee == what) times++; /* When inlining large function body called once into small function, take the inlined function as base for limiting the growth. */ if (to->local.self_insns > what->local.self_insns) limit = to->local.self_insns; else limit = what->local.self_insns; limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100; newsize = cgraph_estimate_size_after_inlining (times, to, what); if (newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS) && newsize > limit) { if (reason) *reason = N_("--param large-function-growth limit reached"); return false; } return true; } /* Return true when function N is small enough to be inlined. */ static bool cgraph_default_inline_p (struct cgraph_node *n) { if (!DECL_INLINE (n->decl) || !DECL_SAVED_TREE (n->decl)) return false; if (DECL_DECLARED_INLINE_P (n->decl)) return n->global.insns < MAX_INLINE_INSNS_SINGLE; else return n->global.insns < MAX_INLINE_INSNS_AUTO; } /* Return true when inlining WHAT would create recursive inlining. We call recursive inlining all cases where same function appears more than once in the single recursion nest path in the inline graph. */ static bool cgraph_recursive_inlining_p (struct cgraph_node *to, struct cgraph_node *what, const char **reason) { bool recursive; if (to->global.inlined_to) recursive = what->decl == to->global.inlined_to->decl; else recursive = what->decl == to->decl; /* Marking recursive function inline has sane semantic and thus we should not warn on it. */ if (recursive && reason) *reason = (what->local.disregard_inline_limits ? N_("recursive inlining") : ""); return recursive; } /* Recompute heap nodes for each of callees. */ static void update_callee_keys (fibheap_t heap, struct fibnode **heap_node, struct cgraph_node *node) { struct cgraph_edge *e; for (e = node->callees; e; e = e->next_callee) if (e->inline_failed && heap_node[e->callee->uid]) fibheap_replace_key (heap, heap_node[e->callee->uid], cgraph_estimate_growth (e->callee)); else if (!e->inline_failed) update_callee_keys (heap, heap_node, e->callee); } /* Enqueue all recursive calls from NODE into queue linked via aux pointers in between FIRST and LAST. WHERE is used for bookkeeping while looking int calls inlined within NODE. */ static void lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where, struct cgraph_edge **first, struct cgraph_edge **last) { struct cgraph_edge *e; for (e = where->callees; e; e = e->next_callee) if (e->callee == node) { if (!*first) *first = e; else (*last)->aux = e; *last = e; } for (e = where->callees; e; e = e->next_callee) if (!e->inline_failed) lookup_recursive_calls (node, e->callee, first, last); } /* Decide on recursive inlining: in the case function has recursive calls, inline until body size reaches given argument. */ static void cgraph_decide_recursive_inlining (struct cgraph_node *node) { int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO); int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO); struct cgraph_edge *first_call = NULL, *last_call = NULL; struct cgraph_edge *last_in_current_depth; struct cgraph_edge *e; struct cgraph_node *master_clone; int depth = 0; int n = 0; if (DECL_DECLARED_INLINE_P (node->decl)) { limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE); max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH); } /* Make sure that function is small enought to be considered for inlining. */ if (!max_depth || cgraph_estimate_size_after_inlining (1, node, node) >= limit) return; lookup_recursive_calls (node, node, &first_call, &last_call); if (!first_call) return; if (cgraph_dump_file) fprintf (cgraph_dump_file, "\nPerforming recursive inlining on %s\n", cgraph_node_name (node)); /* We need original clone to copy around. */ master_clone = cgraph_clone_node (node); master_clone->needed = true; for (e = master_clone->callees; e; e = e->next_callee) if (!e->inline_failed) cgraph_clone_inlined_nodes (e, true); /* Do the inlining and update list of recursive call during process. */ last_in_current_depth = last_call; while (first_call && cgraph_estimate_size_after_inlining (1, node, master_clone) <= limit) { struct cgraph_edge *curr = first_call; first_call = first_call->aux; curr->aux = NULL; cgraph_redirect_edge_callee (curr, master_clone); cgraph_mark_inline_edge (curr); lookup_recursive_calls (node, curr->callee, &first_call, &last_call); if (last_in_current_depth && ++depth >= max_depth) break; n++; } /* Cleanup queue pointers. */ while (first_call) { struct cgraph_edge *next = first_call->aux; first_call->aux = NULL; first_call = next; } if (cgraph_dump_file) fprintf (cgraph_dump_file, "\n Inlined %i times, body grown from %i to %i insns\n", n, master_clone->global.insns, node->global.insns); /* Remove master clone we used for inlining. We rely that clones inlined into master clone gets queued just before master clone so we don't need recursion. */ for (node = cgraph_nodes; node != master_clone; node = node->next) if (node->global.inlined_to == master_clone) cgraph_remove_node (node); cgraph_remove_node (master_clone); } /* Set inline_failed for all callers of given function to REASON. */ static void cgraph_set_inline_failed (struct cgraph_node *node, const char *reason) { struct cgraph_edge *e; if (cgraph_dump_file) fprintf (cgraph_dump_file, "Inlining failed: %s\n", reason); for (e = node->callers; e; e = e->next_caller) if (e->inline_failed) e->inline_failed = reason; } /* We use greedy algorithm for inlining of small functions: All inline candidates are put into prioritized heap based on estimated growth of the overall number of instructions and then update the estimates. INLINED and INLINED_CALEES are just pointers to arrays large enough to be passed to cgraph_inlined_into and cgraph_inlined_callees. */ static void cgraph_decide_inlining_of_small_functions (void) { struct cgraph_node *node; fibheap_t heap = fibheap_new (); struct fibnode **heap_node = xcalloc (cgraph_max_uid, sizeof (struct fibnode *)); int max_insns = ((HOST_WIDEST_INT) initial_insns * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100); /* Put all inline candidates into the heap. */ for (node = cgraph_nodes; node; node = node->next) { if (!node->local.inlinable || !node->callers || node->local.disregard_inline_limits) continue; if (!cgraph_default_inline_p (node)) { cgraph_set_inline_failed (node, N_("--param max-inline-insns-single limit reached")); continue; } heap_node[node->uid] = fibheap_insert (heap, cgraph_estimate_growth (node), node); } if (cgraph_dump_file) fprintf (cgraph_dump_file, "\nDeciding on smaller functions:\n"); while (overall_insns <= max_insns && (node = fibheap_extract_min (heap))) { struct cgraph_edge *e, *next; int old_insns = overall_insns; heap_node[node->uid] = NULL; if (cgraph_dump_file) fprintf (cgraph_dump_file, "\nConsidering %s with %i insns\n" " Estimated growth is %+i insns.\n", cgraph_node_name (node), node->global.insns, cgraph_estimate_growth (node)); if (!cgraph_default_inline_p (node)) { cgraph_set_inline_failed (node, N_("--param max-inline-insns-single limit reached after inlining into the callee")); continue; } for (e = node->callers; e; e = next) { next = e->next_caller; if (e->inline_failed) { struct cgraph_node *where; if (cgraph_recursive_inlining_p (e->caller, e->callee, &e->inline_failed) || !cgraph_check_inline_limits (e->caller, e->callee, &e->inline_failed)) { if (cgraph_dump_file) fprintf (cgraph_dump_file, " Not inlining into %s:%s.\n", cgraph_node_name (e->caller), e->inline_failed); continue; } next = cgraph_mark_inline (e); where = e->caller; if (where->global.inlined_to) where = where->global.inlined_to; if (heap_node[where->uid]) fibheap_replace_key (heap, heap_node[where->uid], cgraph_estimate_growth (where)); if (cgraph_dump_file) fprintf (cgraph_dump_file, " Inlined into %s which now has %i insns.\n", cgraph_node_name (e->caller), e->caller->global.insns); } } cgraph_decide_recursive_inlining (node); /* Similarly all functions called by the function we just inlined are now called more times; update keys. */ update_callee_keys (heap, heap_node, node); if (cgraph_dump_file) fprintf (cgraph_dump_file, " Inlined for a net change of %+i insns.\n", overall_insns - old_insns); } while ((node = fibheap_extract_min (heap)) != NULL) if (!node->local.disregard_inline_limits) cgraph_set_inline_failed (node, N_("--param inline-unit-growth limit reached")); fibheap_delete (heap); free (heap_node); } /* Decide on the inlining. We do so in the topological order to avoid expenses on updating data structures. */ static void cgraph_decide_inlining (void) { struct cgraph_node *node; int nnodes; struct cgraph_node **order = xcalloc (cgraph_n_nodes, sizeof (struct cgraph_node *)); int old_insns = 0; int i; for (node = cgraph_nodes; node; node = node->next) initial_insns += node->local.self_insns; overall_insns = initial_insns; nnodes = cgraph_postorder (order); if (cgraph_dump_file) fprintf (cgraph_dump_file, "\nDeciding on inlining. Starting with %i insns.\n", initial_insns); for (node = cgraph_nodes; node; node = node->next) node->aux = 0; if (cgraph_dump_file) fprintf (cgraph_dump_file, "\nInlining always_inline functions:\n"); /* In the first pass mark all always_inline edges. Do this with a priority so none of our later choices will make this impossible. */ for (i = nnodes - 1; i >= 0; i--) { struct cgraph_edge *e, *next; node = order[i]; if (!node->local.disregard_inline_limits) continue; if (cgraph_dump_file) fprintf (cgraph_dump_file, "\nConsidering %s %i insns (always inline)\n", cgraph_node_name (node), node->global.insns); old_insns = overall_insns; for (e = node->callers; e; e = next) { next = e->next_caller; if (!e->inline_failed) continue; if (cgraph_recursive_inlining_p (e->caller, e->callee, &e->inline_failed)) continue; cgraph_mark_inline_edge (e); if (cgraph_dump_file) fprintf (cgraph_dump_file, " Inlined into %s which now has %i insns.\n", cgraph_node_name (e->caller), e->caller->global.insns); } if (cgraph_dump_file) fprintf (cgraph_dump_file, " Inlined for a net change of %+i insns.\n", overall_insns - old_insns); } if (!flag_really_no_inline) { cgraph_decide_inlining_of_small_functions (); if (cgraph_dump_file) fprintf (cgraph_dump_file, "\nDeciding on functions called once:\n"); /* And finally decide what functions are called once. */ for (i = nnodes - 1; i >= 0; i--) { node = order[i]; if (node->callers && !node->callers->next_caller && !node->needed && node->local.inlinable && node->callers->inline_failed && !DECL_EXTERNAL (node->decl) && !DECL_COMDAT (node->decl)) { bool ok = true; struct cgraph_node *node1; /* Verify that we won't duplicate the caller. */ for (node1 = node->callers->caller; node1->callers && !node1->callers->inline_failed && ok; node1 = node1->callers->caller) if (node1->callers->next_caller || node1->needed) ok = false; if (ok) { if (cgraph_dump_file) fprintf (cgraph_dump_file, "\nConsidering %s %i insns.\n" " Called once from %s %i insns.\n", cgraph_node_name (node), node->global.insns, cgraph_node_name (node->callers->caller), node->callers->caller->global.insns); old_insns = overall_insns; if (cgraph_check_inline_limits (node->callers->caller, node, NULL)) { cgraph_mark_inline (node->callers); if (cgraph_dump_file) fprintf (cgraph_dump_file, " Inlined into %s which now has %i insns" " for a net change of %+i insns.\n", cgraph_node_name (node->callers->caller), node->callers->caller->global.insns, overall_insns - old_insns); } else { if (cgraph_dump_file) fprintf (cgraph_dump_file, " Inline limit reached, not inlined.\n"); } } } } } /* We will never output extern functions we didn't inline. ??? Perhaps we can prevent accounting of growth of external inline functions. */ cgraph_remove_unreachable_nodes (); if (cgraph_dump_file) fprintf (cgraph_dump_file, "\nInlined %i calls, eliminated %i functions, " "%i insns turned to %i insns.\n\n", ncalls_inlined, nfunctions_inlined, initial_insns, overall_insns); free (order); } /* Decide on the inlining. We do so in the topological order to avoid expenses on updating data structures. */ static void cgraph_decide_inlining_incrementally (struct cgraph_node *node) { struct cgraph_edge *e; /* First of all look for always inline functions. */ for (e = node->callees; e; e = e->next_callee) if (e->callee->local.disregard_inline_limits && e->inline_failed && !cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed) /* ??? It is possible that renaming variable removed the function body in duplicate_decls. See gcc.c-torture/compile/20011119-2.c */ && DECL_SAVED_TREE (e->callee->decl)) cgraph_mark_inline (e); /* Now do the automatic inlining. */ if (!flag_really_no_inline) for (e = node->callees; e; e = e->next_callee) if (e->callee->local.inlinable && e->inline_failed && !e->callee->local.disregard_inline_limits && !cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed) && cgraph_check_inline_limits (node, e->callee, &e->inline_failed) && DECL_SAVED_TREE (e->callee->decl)) { if (cgraph_default_inline_p (e->callee)) cgraph_mark_inline (e); else e->inline_failed = N_("--param max-inline-insns-single limit reached"); } } /* Return true when CALLER_DECL should be inlined into CALLEE_DECL. */ bool cgraph_inline_p (struct cgraph_edge *e, const char **reason) { *reason = e->inline_failed; return !e->inline_failed; } /* Expand all functions that must be output. Attempt to topologically sort the nodes so function is output when all called functions are already assembled to allow data to be propagated across the callgraph. Use a stack to get smaller distance between a function and its callees (later we may choose to use a more sophisticated algorithm for function reordering; we will likely want to use subsections to make the output functions appear in top-down order). */ static void cgraph_expand_all_functions (void) { struct cgraph_node *node; struct cgraph_node **order = xcalloc (cgraph_n_nodes, sizeof (struct cgraph_node *)); int order_pos = 0, new_order_pos = 0; int i; cgraph_mark_functions_to_output (); order_pos = cgraph_postorder (order); if (order_pos != cgraph_n_nodes) abort (); /* Garbage collector may remove inline clones we eliminate during optimization. So we must be sure to not reference them. */ for (i = 0; i < order_pos; i++) if (order[i]->output) order[new_order_pos++] = order[i]; for (i = new_order_pos - 1; i >= 0; i--) { node = order[i]; if (node->output) { if (!node->reachable) abort (); node->output = 0; cgraph_expand_function (node); } } free (order); } /* Mark all local functions. A local function is one whose calls can occur only in the current compilation unit and all its calls are explicit, so we can change its calling convention. We simply mark all static functions whose address is not taken as local. */ static void cgraph_mark_local_functions (void) { struct cgraph_node *node; if (cgraph_dump_file) fprintf (cgraph_dump_file, "\nMarking local functions:"); /* Figure out functions we want to assemble. */ for (node = cgraph_nodes; node; node = node->next) { node->local.local = (!node->needed && DECL_SAVED_TREE (node->decl) && !TREE_PUBLIC (node->decl)); if (cgraph_dump_file && node->local.local) fprintf (cgraph_dump_file, " %s", cgraph_node_name (node)); } if (cgraph_dump_file) fprintf (cgraph_dump_file, "\n\n"); } /* Return true when function body of DECL still needs to be kept around for later re-use. */ bool cgraph_preserve_function_body_p (tree decl) { struct cgraph_node *node; /* Keep the body; we're going to dump it. */ if (dump_enabled_p (TDI_all)) return true; if (!cgraph_global_info_ready) return (DECL_INLINE (decl) && !flag_really_no_inline); /* Look if there is any clone around. */ for (node = cgraph_node (decl); node; node = node->next_clone) if (node->global.inlined_to) return true; return false; } /* Perform simple optimizations based on callgraph. */ void cgraph_optimize (void) { #ifdef ENABLE_CHECKING verify_cgraph (); #endif if (!flag_unit_at_a_time) return; timevar_push (TV_CGRAPHOPT); if (!quiet_flag) fprintf (stderr, "Performing intraprocedural optimizations\n"); cgraph_mark_local_functions (); if (cgraph_dump_file) { fprintf (cgraph_dump_file, "Marked "); dump_cgraph (cgraph_dump_file); } if (flag_inline_trees) cgraph_decide_inlining (); cgraph_global_info_ready = true; if (cgraph_dump_file) { fprintf (cgraph_dump_file, "Optimized "); dump_cgraph (cgraph_dump_file); } timevar_pop (TV_CGRAPHOPT); /* Output everything. */ if (!quiet_flag) fprintf (stderr, "Assembling functions:\n"); #ifdef ENABLE_CHECKING verify_cgraph (); #endif cgraph_expand_all_functions (); if (cgraph_dump_file) { fprintf (cgraph_dump_file, "\nFinal "); dump_cgraph (cgraph_dump_file); } #ifdef ENABLE_CHECKING verify_cgraph (); /* Double check that all inline clones are gone and that all function bodies have been released from memory. */ if (flag_unit_at_a_time && !dump_enabled_p (TDI_all) && !(sorrycount || errorcount)) { struct cgraph_node *node; bool error_found = false; for (node = cgraph_nodes; node; node = node->next) if (node->analyzed && (node->global.inlined_to || DECL_SAVED_TREE (node->decl))) { error_found = true; dump_cgraph_node (stderr, node); } if (error_found) internal_error ("Nodes with no released memory found."); } #endif } /* Generate and emit a static constructor or destructor. WHICH must be one of 'I' or 'D'. BODY should be a STATEMENT_LIST containing GENERIC statements. */ void cgraph_build_static_cdtor (char which, tree body) { static int counter = 0; char which_buf[16]; tree decl, name; sprintf (which_buf, "%c_%d", which, counter++); name = get_file_function_name_long (which_buf); decl = build_decl (FUNCTION_DECL, name, build_function_type (void_type_node, void_list_node)); current_function_decl = decl; DECL_RESULT (decl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node); allocate_struct_function (decl); TREE_STATIC (decl) = 1; TREE_USED (decl) = 1; DECL_ARTIFICIAL (decl) = 1; DECL_IGNORED_P (decl) = 1; DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (decl) = 1; DECL_SAVED_TREE (decl) = body; TREE_PUBLIC (decl) = ! targetm.have_ctors_dtors; DECL_UNINLINABLE (decl) = 1; DECL_INITIAL (decl) = make_node (BLOCK); TREE_USED (DECL_INITIAL (decl)) = 1; DECL_SOURCE_LOCATION (decl) = input_location; cfun->function_end_locus = input_location; if (which == 'I') DECL_STATIC_CONSTRUCTOR (decl) = 1; else if (which == 'D') DECL_STATIC_DESTRUCTOR (decl) = 1; else abort (); gimplify_function_tree (decl); /* ??? We will get called LATE in the compilation process. */ if (cgraph_global_info_ready) tree_rest_of_compilation (decl, false); else cgraph_finalize_function (decl, 0); if (targetm.have_ctors_dtors) { void (*fn) (rtx, int); if (which == 'I') fn = targetm.asm_out.constructor; else fn = targetm.asm_out.destructor; fn (XEXP (DECL_RTL (decl), 0), DEFAULT_INIT_PRIORITY); } } /* Language-specific hook definitions for C front end. Copyright (C) 1991, 1995, 1997, 1998, 1999, 2000, 2001, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Various declarations for the C and C++ pretty-printers. Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Gabriel Dos Reis This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_C_PRETTY_PRINTER #define GCC_C_PRETTY_PRINTER typedef enum { pp_c_flag_abstract = 1 << 1, pp_c_flag_last_bit = 2 } pp_c_pretty_print_flags; /* The data type used to bundle information necessary for pretty-printing a C or C++ entity. */ typedef struct c_pretty_print_info c_pretty_printer; /* The type of a C pretty-printer 'member' function. */ typedef void (*c_pretty_print_fn) (c_pretty_printer *, tree); /* The datatype that contains information necessary for pretty-printing a tree that represents a C construct. Any pretty-printer for a language using C/c++ syntax can derive from this datatype and reuse facilities provided here. It can do so by having a subobject of type c_pretty_printer and override the macro pp_c_base to return a pointer to that subobject. Such a pretty-printer has the responsibility to initialize the pp_base() part, then call pp_c_pretty_printer_init to set up the components that are specific to the C pretty-printer. A derived pretty-printer can override any function listed in the vtable below. See cp/cxx-pretty-print.h and cp/cxx-pretty-print.c for an example of derivation. */ struct c_pretty_print_info { pretty_printer base; /* Points to the first element of an array of offset-list. Not used yet. */ int *offset_list; pp_flags flags; /* These must be overridden by each of the C and C++ front-end to reflect their understanding of syntactic productions when they differ. */ c_pretty_print_fn declaration; c_pretty_print_fn declaration_specifiers; c_pretty_print_fn declarator; c_pretty_print_fn abstract_declarator; c_pretty_print_fn direct_abstract_declarator; c_pretty_print_fn type_specifier_seq; c_pretty_print_fn direct_declarator; c_pretty_print_fn ptr_operator; c_pretty_print_fn parameter_list; c_pretty_print_fn type_id; c_pretty_print_fn simple_type_specifier; c_pretty_print_fn function_specifier; c_pretty_print_fn storage_class_specifier; c_pretty_print_fn initializer; c_pretty_print_fn statement; c_pretty_print_fn id_expression; c_pretty_print_fn primary_expression; c_pretty_print_fn postfix_expression; c_pretty_print_fn unary_expression; c_pretty_print_fn multiplicative_expression; c_pretty_print_fn conditional_expression; c_pretty_print_fn assignment_expression; c_pretty_print_fn expression; }; /* Override the pp_base macro. Derived pretty-printers should not touch this macro. Instead they should override pp_c_base instead. */ #undef pp_base #define pp_base(PP) (&pp_c_base (PP)->base) #define pp_c_tree_identifier(PPI, ID) \ pp_c_identifier (PPI, IDENTIFIER_POINTER (ID)) #define pp_declaration(PPI, T) \ pp_c_base (PPI)->declaration (pp_c_base (PPI), T) #define pp_declaration_specifiers(PPI, D) \ pp_c_base (PPI)->declaration_specifiers (pp_c_base (PPI), D) #define pp_abstract_declarator(PP, D) \ pp_c_base (PP)->abstract_declarator (pp_c_base (PP), D) #define pp_type_specifier_seq(PPI, D) \ pp_c_base (PPI)->type_specifier_seq (pp_c_base (PPI), D) #define pp_declarator(PPI, D) \ pp_c_base (PPI)->declarator (pp_c_base (PPI), D) #define pp_direct_declarator(PPI, D) \ pp_c_base (PPI)->direct_declarator (pp_c_base (PPI), D) #define pp_direct_abstract_declarator(PP, D) \ pp_c_base (PP)->direct_abstract_declarator (pp_c_base (PP), D) #define pp_ptr_operator(PP, D) \ pp_c_base (PP)->ptr_operator (pp_c_base (PP), D) #define pp_parameter_list(PPI, T) \ pp_c_base (PPI)->parameter_list (pp_c_base (PPI), T) #define pp_type_id(PPI, D) \ pp_c_base (PPI)->type_id (pp_c_base (PPI), D) #define pp_simple_type_specifier(PP, T) \ pp_c_base (PP)->simple_type_specifier (pp_c_base (PP), T) #define pp_function_specifier(PP, D) \ pp_c_base (PP)->function_specifier (pp_c_base (PP), D) #define pp_storage_class_specifier(PP, D) \ pp_c_base (PP)->storage_class_specifier (pp_c_base (PP), D); #define pp_statement(PPI, S) \ pp_c_base (PPI)->statement (pp_c_base (PPI), S) #define pp_id_expression(PP, E) \ pp_c_base (PP)->id_expression (pp_c_base (PP), E) #define pp_primary_expression(PPI, E) \ pp_c_base (PPI)->primary_expression (pp_c_base (PPI), E) #define pp_postfix_expression(PPI, E) \ pp_c_base (PPI)->postfix_expression (pp_c_base (PPI), E) #define pp_unary_expression(PPI, E) \ pp_c_base (PPI)->unary_expression (pp_c_base (PPI), E) #define pp_initializer(PPI, E) \ pp_c_base (PPI)->initializer (pp_c_base (PPI), E) #define pp_multiplicative_expression(PPI, E) \ pp_c_base (PPI)->multiplicative_expression (pp_c_base (PPI), E) #define pp_conditional_expression(PPI, E) \ pp_c_base (PPI)->conditional_expression (pp_c_base (PPI), E) #define pp_assignment_expression(PPI, E) \ pp_c_base (PPI)->assignment_expression (pp_c_base (PPI), E) #define pp_expression(PP, E) \ pp_c_base (PP)->expression (pp_c_base (PP), E) /* Returns the c_pretty_printer base object of PRETTY-PRINTER. This macro must be overridden by any subclass of c_pretty_print_info. */ #define pp_c_base(PP) (PP) extern void pp_c_pretty_printer_init (c_pretty_printer *); void pp_c_whitespace (c_pretty_printer *); void pp_c_left_paren (c_pretty_printer *); void pp_c_right_paren (c_pretty_printer *); void pp_c_left_brace (c_pretty_printer *); void pp_c_right_brace (c_pretty_printer *); void pp_c_left_bracket (c_pretty_printer *); void pp_c_right_bracket (c_pretty_printer *); void pp_c_dot (c_pretty_printer *); void pp_c_ampersand (c_pretty_printer *); void pp_c_star (c_pretty_printer *); void pp_c_arrow (c_pretty_printer *); void pp_c_semicolon (c_pretty_printer *); void pp_c_complement (c_pretty_printer *); void pp_c_exclamation (c_pretty_printer *); void pp_c_space_for_pointer_operator (c_pretty_printer *, tree); /* Declarations. */ void pp_c_tree_decl_identifier (c_pretty_printer *, tree); void pp_c_function_definition (c_pretty_printer *, tree); void pp_c_attributes (c_pretty_printer *, tree); void pp_c_type_qualifier_list (c_pretty_printer *, tree); void pp_c_parameter_type_list (c_pretty_printer *, tree); void pp_c_declaration (c_pretty_printer *, tree); void pp_c_declaration_specifiers (c_pretty_printer *, tree); void pp_c_declarator (c_pretty_printer *, tree); void pp_c_direct_declarator (c_pretty_printer *, tree); void pp_c_specifier_qualifier_list (c_pretty_printer *, tree); void pp_c_function_specifier (c_pretty_printer *, tree); void pp_c_type_id (c_pretty_printer *, tree); void pp_c_direct_abstract_declarator (c_pretty_printer *, tree); void pp_c_type_specifier (c_pretty_printer *, tree); void pp_c_storage_class_specifier (c_pretty_printer *, tree); /* Statements. */ void pp_c_statement (c_pretty_printer *, tree); /* Expressions. */ void pp_c_expression (c_pretty_printer *, tree); void pp_c_logical_or_expression (c_pretty_printer *, tree); void pp_c_expression_list (c_pretty_printer *, tree); void pp_c_call_argument_list (c_pretty_printer *, tree); void pp_c_unary_expression (c_pretty_printer *, tree); void pp_c_cast_expression (c_pretty_printer *, tree); void pp_c_postfix_expression (c_pretty_printer *, tree); void pp_c_primary_expression (c_pretty_printer *, tree); void pp_c_init_declarator (c_pretty_printer *, tree); void pp_c_constant (c_pretty_printer *, tree); void pp_c_id_expression (c_pretty_printer *, tree); void pp_c_identifier (c_pretty_printer *, const char *); void pp_c_string_literal (c_pretty_printer *, tree); void print_c_tree (FILE *file, tree t); #endif /* GCC_C_PRETTY_PRINTER */ enum c_language_kind c_language = clk_c; /* ### When changing hooks, consider if ObjC needs changing too!! ### */ #undef LANG_HOOKS_NAME #define LANG_HOOKS_NAME "GNU C" #undef LANG_HOOKS_IDENTIFIER_SIZE #define LANG_HOOKS_IDENTIFIER_SIZE C_SIZEOF_STRUCT_LANG_IDENTIFIER #undef LANG_HOOKS_INIT #define LANG_HOOKS_INIT c_objc_common_init #undef LANG_HOOKS_FINISH #define LANG_HOOKS_FINISH c_common_finish #undef LANG_HOOKS_INIT_OPTIONS #define LANG_HOOKS_INIT_OPTIONS c_common_init_options #undef LANG_HOOKS_INITIALIZE_DIAGNOSTICS #define LANG_HOOKS_INITIALIZE_DIAGNOSTICS c_initialize_diagnostics #undef LANG_HOOKS_HANDLE_OPTION #define LANG_HOOKS_HANDLE_OPTION c_common_handle_option #undef LANG_HOOKS_MISSING_ARGUMENT #define LANG_HOOKS_MISSING_ARGUMENT c_common_missing_argument #undef LANG_HOOKS_POST_OPTIONS #define LANG_HOOKS_POST_OPTIONS c_common_post_options #undef LANG_HOOKS_GET_ALIAS_SET #define LANG_HOOKS_GET_ALIAS_SET c_common_get_alias_set #undef LANG_HOOKS_EXPAND_EXPR #define LANG_HOOKS_EXPAND_EXPR c_expand_expr #undef LANG_HOOKS_EXPAND_DECL #define LANG_HOOKS_EXPAND_DECL c_expand_decl #undef LANG_HOOKS_MARK_ADDRESSABLE #define LANG_HOOKS_MARK_ADDRESSABLE c_mark_addressable #undef LANG_HOOKS_PARSE_FILE #define LANG_HOOKS_PARSE_FILE c_common_parse_file #undef LANG_HOOKS_CLEAR_BINDING_STACK #define LANG_HOOKS_CLEAR_BINDING_STACK lhd_do_nothing #undef LANG_HOOKS_TRUTHVALUE_CONVERSION #define LANG_HOOKS_TRUTHVALUE_CONVERSION c_objc_common_truthvalue_conversion #undef LANG_HOOKS_FINISH_INCOMPLETE_DECL #define LANG_HOOKS_FINISH_INCOMPLETE_DECL c_finish_incomplete_decl #undef LANG_HOOKS_UNSAFE_FOR_REEVAL #define LANG_HOOKS_UNSAFE_FOR_REEVAL c_common_unsafe_for_reeval #undef LANG_HOOKS_STATICP #define LANG_HOOKS_STATICP c_staticp #undef LANG_HOOKS_NO_BODY_BLOCKS #define LANG_HOOKS_NO_BODY_BLOCKS true #undef LANG_HOOKS_WARN_UNUSED_GLOBAL_DECL #define LANG_HOOKS_WARN_UNUSED_GLOBAL_DECL c_warn_unused_global_decl #undef LANG_HOOKS_PRINT_IDENTIFIER #define LANG_HOOKS_PRINT_IDENTIFIER c_print_identifier #undef LANG_HOOKS_FUNCTION_ENTER_NESTED #define LANG_HOOKS_FUNCTION_ENTER_NESTED c_push_function_context #undef LANG_HOOKS_FUNCTION_LEAVE_NESTED #define LANG_HOOKS_FUNCTION_LEAVE_NESTED c_pop_function_context #undef LANG_HOOKS_FUNCTION_MISSING_NORETURN_OK_P #define LANG_HOOKS_FUNCTION_MISSING_NORETURN_OK_P c_missing_noreturn_ok_p #undef LANG_HOOKS_DUP_LANG_SPECIFIC_DECL #define LANG_HOOKS_DUP_LANG_SPECIFIC_DECL c_dup_lang_specific_decl /* Attribute hooks. */ #undef LANG_HOOKS_COMMON_ATTRIBUTE_TABLE #define LANG_HOOKS_COMMON_ATTRIBUTE_TABLE c_common_attribute_table #undef LANG_HOOKS_FORMAT_ATTRIBUTE_TABLE #define LANG_HOOKS_FORMAT_ATTRIBUTE_TABLE c_common_format_attribute_table #undef LANG_HOOKS_TREE_INLINING_CANNOT_INLINE_TREE_FN #define LANG_HOOKS_TREE_INLINING_CANNOT_INLINE_TREE_FN \ c_cannot_inline_tree_fn #undef LANG_HOOKS_TREE_INLINING_DISREGARD_INLINE_LIMITS #define LANG_HOOKS_TREE_INLINING_DISREGARD_INLINE_LIMITS \ c_disregard_inline_limits #undef LANG_HOOKS_TREE_INLINING_ANON_AGGR_TYPE_P #define LANG_HOOKS_TREE_INLINING_ANON_AGGR_TYPE_P \ anon_aggr_type_p #undef LANG_HOOKS_TREE_INLINING_CONVERT_PARM_FOR_INLINING #define LANG_HOOKS_TREE_INLINING_CONVERT_PARM_FOR_INLINING \ c_convert_parm_for_inlining #undef LANG_HOOKS_TREE_DUMP_DUMP_TREE_FN #define LANG_HOOKS_TREE_DUMP_DUMP_TREE_FN c_dump_tree #undef LANG_HOOKS_CALLGRAPH_EXPAND_FUNCTION #define LANG_HOOKS_CALLGRAPH_EXPAND_FUNCTION c_expand_body #undef LANG_HOOKS_TYPE_FOR_MODE #define LANG_HOOKS_TYPE_FOR_MODE c_common_type_for_mode #undef LANG_HOOKS_TYPE_FOR_SIZE #define LANG_HOOKS_TYPE_FOR_SIZE c_common_type_for_size #undef LANG_HOOKS_SIGNED_TYPE #define LANG_HOOKS_SIGNED_TYPE c_common_signed_type #undef LANG_HOOKS_UNSIGNED_TYPE #define LANG_HOOKS_UNSIGNED_TYPE c_common_unsigned_type #undef LANG_HOOKS_SIGNED_OR_UNSIGNED_TYPE #define LANG_HOOKS_SIGNED_OR_UNSIGNED_TYPE c_common_signed_or_unsigned_type #undef LANG_HOOKS_INCOMPLETE_TYPE_ERROR #define LANG_HOOKS_INCOMPLETE_TYPE_ERROR c_incomplete_type_error #undef LANG_HOOKS_TYPE_PROMOTES_TO #define LANG_HOOKS_TYPE_PROMOTES_TO c_type_promotes_to #undef LANG_HOOKS_REGISTER_BUILTIN_TYPE #define LANG_HOOKS_REGISTER_BUILTIN_TYPE c_register_builtin_type /* The C front end's scoping structure is very different from that expected by the language-independent code; it is best to disable all of pushlevel, poplevel, set_block, and getdecls. This means it must also provide its own write_globals. */ #undef LANG_HOOKS_PUSHLEVEL #define LANG_HOOKS_PUSHLEVEL lhd_do_nothing_i #undef LANG_HOOKS_POPLEVEL #define LANG_HOOKS_POPLEVEL lhd_do_nothing_iii_return_null_tree #undef LANG_HOOKS_SET_BLOCK #define LANG_HOOKS_SET_BLOCK lhd_do_nothing_t #undef LANG_HOOKS_GETDECLS #define LANG_HOOKS_GETDECLS lhd_return_null_tree_v #undef LANG_HOOKS_WRITE_GLOBALS #define LANG_HOOKS_WRITE_GLOBALS c_write_global_declarations /* Hooks for tree gimplification. */ #undef LANG_HOOKS_GIMPLIFY_EXPR #define LANG_HOOKS_GIMPLIFY_EXPR c_gimplify_expr #undef LANG_HOOKS_TYPES_COMPATIBLE_P #define LANG_HOOKS_TYPES_COMPATIBLE_P c_types_compatible_p /* ### When changing hooks, consider if ObjC needs changing too!! ### */ /* Each front end provides its own. */ const struct lang_hooks lang_hooks = LANG_HOOKS_INITIALIZER; /* Tree code classes. */ #define DEFTREECODE(SYM, NAME, TYPE, LENGTH) TYPE, const char tree_code_type[] = { /* This file contains the definitions and documentation for the tree codes used in GCC. Copyright (C) 1987, 1988, 1993, 1995, 1997, 1998, 2000, 2001, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* The third argument can be: 'x' for an exceptional code (fits no category). 't' for a type object code. 'c' for codes for constants. 'd' for codes for declarations (also serving as variable refs). 'r' for codes for references to storage. '<' for codes for comparison expressions. '1' for codes for unary arithmetic expressions. '2' for codes for binary arithmetic expressions. 's' for codes for "statement" expressions, which have side-effects, but usually no interesting value. 'e' for codes for other kinds of expressions. */ /* For `r', `e', `<', `1', `2', and `s' nodes, which use struct tree_exp, the 4th element is the number of argument slots to allocate. This determines the size of the tree node object. Other nodes use different structures, and the size is determined by the tree_union member structure; the 4th element should be zero. Languages that define language-specific 'x' or 'c' codes must define the tree_size langhook to say how big they are. */ /* Any erroneous construct is parsed into a node of this type. This type of node is accepted without complaint in all contexts by later parsing activities, to avoid multiple error messages for one error. No fields in these nodes are used except the TREE_CODE. */ DEFTREECODE (ERROR_MARK, "error_mark", 'x', 0) /* Used to represent a name (such as, in the DECL_NAME of a decl node). Internally it looks like a STRING_CST node. There is only one IDENTIFIER_NODE ever made for any particular name. Use `get_identifier' to get it (or create it, the first time). */ DEFTREECODE (IDENTIFIER_NODE, "identifier_node", 'x', 0) /* Has the TREE_VALUE and TREE_PURPOSE fields. */ /* These nodes are made into lists by chaining through the TREE_CHAIN field. The elements of the list live in the TREE_VALUE fields, while TREE_PURPOSE fields are occasionally used as well to get the effect of Lisp association lists. */ DEFTREECODE (TREE_LIST, "tree_list", 'x', 0) /* These nodes contain an array of tree nodes. */ DEFTREECODE (TREE_VEC, "tree_vec", 'x', 0) /* A symbol binding block. These are arranged in a tree, where the BLOCK_SUBBLOCKS field contains a chain of subblocks chained through the BLOCK_CHAIN field. BLOCK_SUPERCONTEXT points to the parent block. For a block which represents the outermost scope of a function, it points to the FUNCTION_DECL node. BLOCK_VARS points to a chain of decl nodes. BLOCK_TYPE_TAGS points to a chain of types which have their own names. BLOCK_CHAIN points to the next BLOCK at the same level. BLOCK_ABSTRACT_ORIGIN points to the original (abstract) tree node which this block is an instance of, or else is NULL to indicate that this block is not an instance of anything else. When non-NULL, the value could either point to another BLOCK node or it could point to a FUNCTION_DECL node (e.g. in the case of a block representing the outermost scope of a particular inlining of a function). BLOCK_ABSTRACT is nonzero if the block represents an abstract instance of a block (i.e. one which is nested within an abstract instance of an inline function). TREE_ASM_WRITTEN is nonzero if the block was actually referenced in the generated assembly. */ DEFTREECODE (BLOCK, "block", 'x', 0) /* Each data type is represented by a tree node whose code is one of the following: */ /* Each node that represents a data type has a component TYPE_SIZE containing a tree that is an expression for the size in bits. The TYPE_MODE contains the machine mode for values of this type. The TYPE_POINTER_TO field contains a type for a pointer to this type, or zero if no such has been created yet. The TYPE_NEXT_VARIANT field is used to chain together types that are variants made by type modifiers such as "const" and "volatile". The TYPE_MAIN_VARIANT field, in any member of such a chain, points to the start of the chain. The TYPE_NONCOPIED_PARTS field is a list specifying which parts of an object of this type should *not* be copied by assignment. The TREE_VALUE of each is a FIELD_DECL that should not be copied. The TREE_PURPOSE is an initial value for that field when an object of this type is initialized via an INIT_EXPR. It may be NULL if no special value is required. Even the things in this list are copied if the right-hand side of an assignment is known to be a complete object (rather than being, perhaps, a subobject of some other object.) The determination of what constitutes a complete object is done by fixed_type_p. The TYPE_NAME field contains info on the name used in the program for this type (for GDB symbol table output). It is either a TYPE_DECL node, for types that are typedefs, or an IDENTIFIER_NODE in the case of structs, unions or enums that are known with a tag, or zero for types that have no special name. The TYPE_CONTEXT for any sort of type which could have a name or which could have named members (e.g. tagged types in C/C++) will point to the node which represents the scope of the given type, or will be NULL_TREE if the type has "file scope". For most types, this will point to a BLOCK node or a FUNCTION_DECL node, but it could also point to a FUNCTION_TYPE node (for types whose scope is limited to the formal parameter list of some function type specification) or it could point to a RECORD_TYPE, UNION_TYPE or QUAL_UNION_TYPE node (for C++ "member" types). For non-tagged-types, TYPE_CONTEXT need not be set to anything in particular, since any type which is of some type category (e.g. an array type or a function type) which cannot either have a name itself or have named members doesn't really have a "scope" per se. The TREE_CHAIN field is used as a forward-references to names for ENUMERAL_TYPE, RECORD_TYPE, UNION_TYPE, and QUAL_UNION_TYPE nodes; see below. */ DEFTREECODE (VOID_TYPE, "void_type", 't', 0) /* The void type in C */ /* Integer types in all languages, including char in C. Also used for sub-ranges of other discrete types. Has components TYPE_MIN_VALUE, TYPE_MAX_VALUE (expressions, inclusive) and TYPE_PRECISION (number of bits used by this type). In the case of a subrange type in Pascal, the TREE_TYPE of this will point at the supertype (another INTEGER_TYPE, or an ENUMERAL_TYPE, CHAR_TYPE, or BOOLEAN_TYPE). Otherwise, the TREE_TYPE is zero. */ DEFTREECODE (INTEGER_TYPE, "integer_type", 't', 0) /* C's float and double. Different floating types are distinguished by machine mode and by the TYPE_SIZE and the TYPE_PRECISION. */ DEFTREECODE (REAL_TYPE, "real_type", 't', 0) /* Complex number types. The TREE_TYPE field is the data type of the real and imaginary parts. */ DEFTREECODE (COMPLEX_TYPE, "complex_type", 't', 0) /* Vector types. The TREE_TYPE field is the data type of the vector elements. */ DEFTREECODE (VECTOR_TYPE, "vector_type", 't', 0) /* C enums. The type node looks just like an INTEGER_TYPE node. The symbols for the values of the enum type are defined by CONST_DECL nodes, but the type does not point to them; however, the TYPE_VALUES is a list in which each element's TREE_PURPOSE is a name and the TREE_VALUE is the value (an INTEGER_CST node). */ /* A forward reference `enum foo' when no enum named foo is defined yet has zero (a null pointer) in its TYPE_SIZE. The tag name is in the TYPE_NAME field. If the type is later defined, the normal fields are filled in. RECORD_TYPE, UNION_TYPE, and QUAL_UNION_TYPE forward refs are treated similarly. */ DEFTREECODE (ENUMERAL_TYPE, "enumeral_type", 't', 0) /* Pascal's boolean type (true or false are the only values); no special fields needed. */ DEFTREECODE (BOOLEAN_TYPE, "boolean_type", 't', 0) /* CHAR in Pascal; not used in C. No special fields needed. */ DEFTREECODE (CHAR_TYPE, "char_type", 't', 0) /* All pointer-to-x types have code POINTER_TYPE. The TREE_TYPE points to the node for the type pointed to. */ DEFTREECODE (POINTER_TYPE, "pointer_type", 't', 0) /* An offset is a pointer relative to an object. The TREE_TYPE field is the type of the object at the offset. The TYPE_OFFSET_BASETYPE points to the node for the type of object that the offset is relative to. */ DEFTREECODE (OFFSET_TYPE, "offset_type", 't', 0) /* A reference is like a pointer except that it is coerced automatically to the value it points to. Used in C++. */ DEFTREECODE (REFERENCE_TYPE, "reference_type", 't', 0) /* METHOD_TYPE is the type of a function which takes an extra first argument for "self", which is not present in the declared argument list. The TREE_TYPE is the return type of the method. The TYPE_METHOD_BASETYPE is the type of "self". TYPE_ARG_TYPES is the real argument list, which includes the hidden argument for "self". */ DEFTREECODE (METHOD_TYPE, "method_type", 't', 0) /* Used for Pascal; details not determined right now. */ DEFTREECODE (FILE_TYPE, "file_type", 't', 0) /* Types of arrays. Special fields: TREE_TYPE Type of an array element. TYPE_DOMAIN Type to index by. Its range of values specifies the array length. The field TYPE_POINTER_TO (TREE_TYPE (array_type)) is always nonzero and holds the type to coerce a value of that array type to in C. TYPE_STRING_FLAG indicates a string (in contrast to an array of chars) in languages (such as Chill) that make a distinction. */ /* Array types in C or Pascal */ DEFTREECODE (ARRAY_TYPE, "array_type", 't', 0) /* Types of sets for Pascal. Special fields are the same as in an array type. The target type is always a boolean type. Used for both bitstrings and powersets in Chill; TYPE_STRING_FLAG indicates a bitstring. */ DEFTREECODE (SET_TYPE, "set_type", 't', 0) /* Struct in C, or record in Pascal. */ /* Special fields: TYPE_FIELDS chain of FIELD_DECLs for the fields of the struct, and VAR_DECLs, TYPE_DECLs and CONST_DECLs for record-scope variables, types and enumerators. A few may need to be added for Pascal. */ /* See the comment above, before ENUMERAL_TYPE, for how forward references to struct tags are handled in C. */ DEFTREECODE (RECORD_TYPE, "record_type", 't', 0) /* Union in C. Like a struct, except that the offsets of the fields will all be zero. */ /* See the comment above, before ENUMERAL_TYPE, for how forward references to union tags are handled in C. */ DEFTREECODE (UNION_TYPE, "union_type", 't', 0) /* C union type */ /* Similar to UNION_TYPE, except that the expressions in DECL_QUALIFIER in each FIELD_DECL determine what the union contains. The first field whose DECL_QUALIFIER expression is true is deemed to occupy the union. */ DEFTREECODE (QUAL_UNION_TYPE, "qual_union_type", 't', 0) /* Type of functions. Special fields: TREE_TYPE type of value returned. TYPE_ARG_TYPES list of types of arguments expected. this list is made of TREE_LIST nodes. Types of "Procedures" in languages where they are different from functions have code FUNCTION_TYPE also, but then TREE_TYPE is zero or void type. */ DEFTREECODE (FUNCTION_TYPE, "function_type", 't', 0) /* This is a language-specific kind of type. Its meaning is defined by the language front end. layout_type does not know how to lay this out, so the front-end must do so manually. */ DEFTREECODE (LANG_TYPE, "lang_type", 't', 0) /* Expressions */ /* First, the constants. */ /* Contents are in TREE_INT_CST_LOW and TREE_INT_CST_HIGH fields, 32 bits each, giving us a 64 bit constant capability. Note: constants of type char in Pascal are INTEGER_CST, and so are pointer constants such as nil in Pascal or NULL in C. `(int *) 1' in C also results in an INTEGER_CST. */ DEFTREECODE (INTEGER_CST, "integer_cst", 'c', 0) /* Contents are in TREE_REAL_CST field. */ DEFTREECODE (REAL_CST, "real_cst", 'c', 0) /* Contents are in TREE_REALPART and TREE_IMAGPART fields, whose contents are other constant nodes. */ DEFTREECODE (COMPLEX_CST, "complex_cst", 'c', 0) /* Contents are in TREE_VECTOR_CST_ELTS field. */ DEFTREECODE (VECTOR_CST, "vector_cst", 'c', 0) /* Contents are TREE_STRING_LENGTH and TREE_STRING_POINTER fields. */ DEFTREECODE (STRING_CST, "string_cst", 'c', 0) /* Declarations. All references to names are represented as ..._DECL nodes. The decls in one binding context are chained through the TREE_CHAIN field. Each DECL has a DECL_NAME field which contains an IDENTIFIER_NODE. (Some decls, most often labels, may have zero as the DECL_NAME). DECL_CONTEXT points to the node representing the context in which this declaration has its scope. For FIELD_DECLs, this is the RECORD_TYPE, UNION_TYPE, or QUAL_UNION_TYPE node that the field is a member of. For VAR_DECL, PARM_DECL, FUNCTION_DECL, LABEL_DECL, and CONST_DECL nodes, this points to either the FUNCTION_DECL for the containing function, the RECORD_TYPE or UNION_TYPE for the containing type, or NULL_TREE or a TRANSLATION_UNIT_DECL if the given decl has "file scope". DECL_ABSTRACT_ORIGIN, if non-NULL, points to the original (abstract) ..._DECL node of which this decl is an (inlined or template expanded) instance. The TREE_TYPE field holds the data type of the object, when relevant. LABEL_DECLs have no data type. For TYPE_DECL, the TREE_TYPE field contents are the type whose name is being declared. The DECL_ALIGN, DECL_SIZE, and DECL_MODE fields exist in decl nodes just as in type nodes. They are unused in LABEL_DECL, TYPE_DECL and CONST_DECL nodes. DECL_FIELD_BIT_OFFSET holds an integer number of bits offset for the location. DECL_VOFFSET holds an expression for a variable offset; it is to be multiplied by DECL_VOFFSET_UNIT (an integer). These fields are relevant only in FIELD_DECLs and PARM_DECLs. DECL_INITIAL holds the value to initialize a variable to, or the value of a constant. For a function, it holds the body (a node of type BLOCK representing the function's binding contour and whose body contains the function's statements.) For a LABEL_DECL in C, it is a flag, nonzero if the label's definition has been seen. PARM_DECLs use a special field: DECL_ARG_TYPE is the type in which the argument is actually passed, which may be different from its type within the function. FUNCTION_DECLs use four special fields: DECL_ARGUMENTS holds a chain of PARM_DECL nodes for the arguments. DECL_RESULT holds a RESULT_DECL node for the value of a function, or it is 0 for a function that returns no value. (C functions returning void have zero here.) The TREE_TYPE field is the type in which the result is actually returned. This is usually the same as the return type of the FUNCTION_DECL, but it may be a wider integer type because of promotion. DECL_FUNCTION_CODE is a code number that is nonzero for built-in functions. Its value is an enum built_in_function that says which built-in function it is. DECL_SOURCE_FILE holds a filename string and DECL_SOURCE_LINE holds a line number. In some cases these can be the location of a reference, if no definition has been seen. DECL_ABSTRACT is nonzero if the decl represents an abstract instance of a decl (i.e. one which is nested within an abstract instance of a inline function. */ DEFTREECODE (FUNCTION_DECL, "function_decl", 'd', 0) DEFTREECODE (LABEL_DECL, "label_decl", 'd', 0) DEFTREECODE (CONST_DECL, "const_decl", 'd', 0) DEFTREECODE (TYPE_DECL, "type_decl", 'd', 0) DEFTREECODE (VAR_DECL, "var_decl", 'd', 0) DEFTREECODE (PARM_DECL, "parm_decl", 'd', 0) DEFTREECODE (RESULT_DECL, "result_decl", 'd', 0) DEFTREECODE (FIELD_DECL, "field_decl", 'd', 0) /* A namespace declaration. Namespaces appear in DECL_CONTEXT of other _DECLs, providing a hierarchy of names. */ DEFTREECODE (NAMESPACE_DECL, "namespace_decl", 'd', 0) /* A translation unit. This is not technically a declaration, since it can't be looked up, but it's close enough. */ DEFTREECODE (TRANSLATION_UNIT_DECL, "translation_unit_decl", 'd', 0) /* References to storage. */ /* Value is structure or union component. Operand 0 is the structure or union (an expression). Operand 1 is the field (a node of type FIELD_DECL). Operand 2, if present, is the value of DECL_FIELD_OFFSET, measured in units of DECL_OFFSET_ALIGN / BITS_PER_UNIT. */ DEFTREECODE (COMPONENT_REF, "component_ref", 'r', 3) /* Reference to a group of bits within an object. Similar to COMPONENT_REF except the position is given explicitly rather than via a FIELD_DECL. Operand 0 is the structure or union expression; operand 1 is a tree giving the number of bits being referenced; operand 2 is a tree giving the position of the first referenced bit. The field can be either a signed or unsigned field; BIT_FIELD_REF_UNSIGNED says which. */ DEFTREECODE (BIT_FIELD_REF, "bit_field_ref", 'r', 3) /* C unary `*' or Pascal `^'. One operand, an expression for a pointer. */ DEFTREECODE (INDIRECT_REF, "indirect_ref", 'r', 1) /* Pascal `^` on a file. One operand, an expression for the file. */ DEFTREECODE (BUFFER_REF, "buffer_ref", 'r', 1) /* Array indexing. Operand 0 is the array; operand 1 is a (single) array index. Operand 2, if present, is a copy of TYPE_MIN_VALUE of the index. Operand 3, if present, is the element size, measured in units of the alignment of the element type. */ DEFTREECODE (ARRAY_REF, "array_ref", 'r', 4) /* Likewise, except that the result is a range ("slice") of the array. The starting index of the resulting array is taken from operand 1 and the size of the range is taken from the type of the expression. */ DEFTREECODE (ARRAY_RANGE_REF, "array_range_ref", 'r', 4) /* Used to represent lookup of runtime type dependent data. Often this is a reference to a vtable, but it needn't be. Operands are: OBJ_TYPE_REF_EXPR: An expression that evaluates the value to use. OBJ_TYPE_REF_OBJECT: Is the object on whose behalf the lookup is being performed. Through this the optimizers may be able to statically determine the dynamic type of the object. OBJ_TYPE_REF_TOKEN: Something front-end specific used to resolve the reference to something simpler, usually to the address of a DECL. Never touched by the middle-end. Good choices would be either an identifier or a vtable index. */ DEFTREECODE (OBJ_TYPE_REF, "obj_type_ref", 'e', 3) /* The exception object from the runtime. */ DEFTREECODE (EXC_PTR_EXPR, "exc_ptr_expr", 'e', 0) /* The filter object from the runtime. */ DEFTREECODE (FILTER_EXPR, "filter_expr", 'e', 0) /* Constructor: return an aggregate value made from specified components. In C, this is used only for structure and array initializers. Also used for SET_TYPE in Chill (and potentially Pascal). The operand is a list of component values made out of a chain of TREE_LIST nodes. For ARRAY_TYPE: The TREE_PURPOSE of each node is the corresponding index. If the TREE_PURPOSE is a RANGE_EXPR, it is a short-hand for many nodes, one for each index in the range. (If the corresponding TREE_VALUE has side-effects, they are evaluated once for each element. Wrap the value in a SAVE_EXPR if you want to evaluate side effects only once.) For RECORD_TYPE, UNION_TYPE, or QUAL_UNION_TYPE: The TREE_PURPOSE of each node is a FIELD_DECL. For SET_TYPE: The TREE_VALUE specifies a value (index) in the set that is true. If TREE_PURPOSE is non-NULL, it specifies the lower limit of a range of true values. Elements not listed are false (not in the set). */ DEFTREECODE (CONSTRUCTOR, "constructor", 'e', 1) /* The expression types are mostly straightforward, with the fourth argument of DEFTREECODE saying how many operands there are. Unless otherwise specified, the operands are expressions and the types of all the operands and the expression must all be the same. */ /* Contains two expressions to compute, one followed by the other. the first value is ignored. The second one's value is used. The type of the first expression need not agree with the other types. */ DEFTREECODE (COMPOUND_EXPR, "compound_expr", 'e', 2) /* Assignment expression. Operand 0 is the what to set; 1, the new value. */ DEFTREECODE (MODIFY_EXPR, "modify_expr", 'e', 2) /* Initialization expression. Operand 0 is the variable to initialize; Operand 1 is the initializer. */ DEFTREECODE (INIT_EXPR, "init_expr", 'e', 2) /* For TARGET_EXPR, operand 0 is the target of an initialization, operand 1 is the initializer for the target, which may be void if simply expanding it initializes the target. operand 2 is the cleanup for this node, if any. operand 3 is the saved initializer after this node has been expanded once; this is so we can re-expand the tree later. */ DEFTREECODE (TARGET_EXPR, "target_expr", 'e', 4) /* Conditional expression ( ... ? ... : ... in C). Operand 0 is the condition. Operand 1 is the then-value. Operand 2 is the else-value. Operand 0 may be of any type. Operand 1 must have the same type as the entire expression, unless it unconditionally throws an exception, in which case it should have VOID_TYPE. The same constraints apply to operand 2. */ DEFTREECODE (COND_EXPR, "cond_expr", 'e', 3) /* Declare local variables, including making RTL and allocating space. BIND_EXPR_VARS is a chain of VAR_DECL nodes for the variables. BIND_EXPR_BODY is the body, the expression to be computed using the variables. The value of operand 1 becomes that of the BIND_EXPR. BIND_EXPR_BLOCK is the BLOCK that corresponds to these bindings for debugging purposes. If this BIND_EXPR is actually expanded, that sets the TREE_USED flag in the BLOCK. The BIND_EXPR is not responsible for informing parsers about these variables. If the body is coming from the input file, then the code that creates the BIND_EXPR is also responsible for informing the parser of the variables. If the BIND_EXPR is ever expanded, its TREE_USED flag is set. This tells the code for debugging symbol tables not to ignore the BIND_EXPR. If the BIND_EXPR should be output for debugging but will not be expanded, set the TREE_USED flag by hand. In order for the BIND_EXPR to be known at all, the code that creates it must also install it as a subblock in the tree of BLOCK nodes for the function. */ DEFTREECODE (BIND_EXPR, "bind_expr", 'e', 3) /* A labeled block. Operand 0 is the label that will be generated to mark the end of the block. Operand 1 is the labeled block body. */ DEFTREECODE (LABELED_BLOCK_EXPR, "labeled_block_expr", 'e', 2) /* Function call. Operand 0 is the function. Operand 1 is the argument list, a list of expressions made out of a chain of TREE_LIST nodes. Operand 2 is the static chain argument, or NULL. */ DEFTREECODE (CALL_EXPR, "call_expr", 'e', 3) /* Specify a value to compute along with its corresponding cleanup. Operand 0 argument is an expression whose value needs a cleanup. Operand 1 is the cleanup expression for the object. Operand 2 is unused. The cleanup is executed by the first enclosing CLEANUP_POINT_EXPR, if it exists, otherwise it is the responsibility of the caller to manually call expand_start_target_temps/expand_end_target_temps, as needed. This differs from TRY_CATCH_EXPR in that operand 2 is always evaluated when an exception isn't thrown when cleanups are run. */ DEFTREECODE (WITH_CLEANUP_EXPR, "with_cleanup_expr", 'e', 3) /* Specify a cleanup point. Operand 0 is an expression that may have cleanups. If it does, those cleanups are executed after the expression is expanded. Note that if the expression is a reference to storage, it is forced out of memory before the cleanups are run. This is necessary to handle cases where the cleanups modify the storage referenced; in the expression 't.i', if 't' is a struct with an integer member 'i' and a cleanup which modifies 'i', the value of the expression depends on whether the cleanup is run before or after 't.i' is evaluated. When expand_expr is run on 't.i', it returns a MEM. This is not good enough; the value of 't.i' must be forced out of memory. As a consequence, the operand of a CLEANUP_POINT_EXPR must not have BLKmode, because it will not be forced out of memory. */ DEFTREECODE (CLEANUP_POINT_EXPR, "cleanup_point_expr", 'e', 1) /* The following two codes are used in languages that have types where some field in an object of the type contains a value that is used in the computation of another field's offset or size and/or the size of the type. The positions and/or sizes of fields can vary from object to object of the same type or even for one and the same object within its scope. Record types with discriminants in Ada or schema types in Pascal are examples of such types. This mechanism is also used to create "fat pointers" for unconstrained array types in Ada; the fat pointer is a structure one of whose fields is a pointer to the actual array type and the other field is a pointer to a template, which is a structure containing the bounds of the array. The bounds in the type pointed to by the first field in the fat pointer refer to the values in the template. When you wish to construct such a type you need "self-references" that allow you to reference the object having this type from the TYPE node, i.e. without having a variable instantiating this type. Such a "self-references" is done using a PLACEHOLDER_EXPR. This is a node that will later be replaced with the object being referenced. Its type is that of the object and selects which object to use from a chain of references (see below). No other slots are used in the PLACEHOLDER_EXPR. For example, if your type FOO is a RECORD_TYPE with a field BAR, and you need the value of .BAR to calculate TYPE_SIZE (FOO), just substitute above with a PLACEHOLDER_EXPR whose TREE_TYPE is FOO. Then construct your COMPONENT_REF with the PLACEHOLDER_EXPR as the first operand (which has the correct type). Later, when the size is needed in the program, the back-end will find this PLACEHOLDER_EXPR and generate code to calculate the actual size at run-time. In the following, we describe how this calculation is done. When we wish to evaluate a size or offset, we check whether it contains a PLACEHOLDER_EXPR. If it does, we call substitute_placeholder_in_expr passing both that tree and an expression within which the object may be found. The latter expression is the object itself in the simple case of an Ada record with discriminant, but it can be the array in the case of an unconstrained array. In the latter case, we need the fat pointer, because the bounds of the array can only be accessed from it. However, we rely here on the fact that the expression for the array contains the dereference of the fat pointer that obtained the array pointer. */ /* Denotes a record to later be substituted before evaluating this expression. The type of this expression is used to find the record to replace it. */ DEFTREECODE (PLACEHOLDER_EXPR, "placeholder_expr", 'x', 0) /* Simple arithmetic. */ DEFTREECODE (PLUS_EXPR, "plus_expr", '2', 2) DEFTREECODE (MINUS_EXPR, "minus_expr", '2', 2) DEFTREECODE (MULT_EXPR, "mult_expr", '2', 2) /* Division for integer result that rounds the quotient toward zero. */ DEFTREECODE (TRUNC_DIV_EXPR, "trunc_div_expr", '2', 2) /* Division for integer result that rounds the quotient toward infinity. */ DEFTREECODE (CEIL_DIV_EXPR, "ceil_div_expr", '2', 2) /* Division for integer result that rounds toward minus infinity. */ DEFTREECODE (FLOOR_DIV_EXPR, "floor_div_expr", '2', 2) /* Division for integer result that rounds toward nearest integer. */ DEFTREECODE (ROUND_DIV_EXPR, "round_div_expr", '2', 2) /* Four kinds of remainder that go with the four kinds of division. */ DEFTREECODE (TRUNC_MOD_EXPR, "trunc_mod_expr", '2', 2) DEFTREECODE (CEIL_MOD_EXPR, "ceil_mod_expr", '2', 2) DEFTREECODE (FLOOR_MOD_EXPR, "floor_mod_expr", '2', 2) DEFTREECODE (ROUND_MOD_EXPR, "round_mod_expr", '2', 2) /* Division for real result. */ DEFTREECODE (RDIV_EXPR, "rdiv_expr", '2', 2) /* Division which is not supposed to need rounding. Used for pointer subtraction in C. */ DEFTREECODE (EXACT_DIV_EXPR, "exact_div_expr", '2', 2) /* Conversion of real to fixed point: four ways to round, like the four ways to divide. CONVERT_EXPR can also be used to convert a real to an integer, and that is what is used in languages that do not have ways of specifying which of these is wanted. Maybe these are not needed. */ DEFTREECODE (FIX_TRUNC_EXPR, "fix_trunc_expr", '1', 1) DEFTREECODE (FIX_CEIL_EXPR, "fix_ceil_expr", '1', 1) DEFTREECODE (FIX_FLOOR_EXPR, "fix_floor_expr", '1', 1) DEFTREECODE (FIX_ROUND_EXPR, "fix_round_expr", '1', 1) /* Conversion of an integer to a real. */ DEFTREECODE (FLOAT_EXPR, "float_expr", '1', 1) /* Unary negation. */ DEFTREECODE (NEGATE_EXPR, "negate_expr", '1', 1) DEFTREECODE (MIN_EXPR, "min_expr", '2', 2) DEFTREECODE (MAX_EXPR, "max_expr", '2', 2) /* Represents the absolute value of the operand. An ABS_EXPR must have either an INTEGER_TYPE or a REAL_TYPE. The operand of the ABS_EXPR must have the same type. */ DEFTREECODE (ABS_EXPR, "abs_expr", '1', 1) /* Shift operations for shift and rotate. Shift means logical shift if done on an unsigned type, arithmetic shift if done on a signed type. The second operand is the number of bits to shift by; it need not be the same type as the first operand and result. Note that the result is undefined if the second operand is larger than the first operand's type size. */ DEFTREECODE (LSHIFT_EXPR, "lshift_expr", '2', 2) DEFTREECODE (RSHIFT_EXPR, "rshift_expr", '2', 2) DEFTREECODE (LROTATE_EXPR, "lrotate_expr", '2', 2) DEFTREECODE (RROTATE_EXPR, "rrotate_expr", '2', 2) /* Bitwise operations. Operands have same mode as result. */ DEFTREECODE (BIT_IOR_EXPR, "bit_ior_expr", '2', 2) DEFTREECODE (BIT_XOR_EXPR, "bit_xor_expr", '2', 2) DEFTREECODE (BIT_AND_EXPR, "bit_and_expr", '2', 2) DEFTREECODE (BIT_NOT_EXPR, "bit_not_expr", '1', 1) /* ANDIF and ORIF allow the second operand not to be computed if the value of the expression is determined from the first operand. AND, OR, and XOR always compute the second operand whether its value is needed or not (for side effects). The operand may have BOOLEAN_TYPE or INTEGER_TYPE. In either case, the argument will be either zero or one. For example, a TRUTH_NOT_EXPR will never have an INTEGER_TYPE VAR_DECL as its argument; instead, a NE_EXPR will be used to compare the VAR_DECL to zero, thereby obtaining a node with value zero or one. */ DEFTREECODE (TRUTH_ANDIF_EXPR, "truth_andif_expr", 'e', 2) DEFTREECODE (TRUTH_ORIF_EXPR, "truth_orif_expr", 'e', 2) DEFTREECODE (TRUTH_AND_EXPR, "truth_and_expr", 'e', 2) DEFTREECODE (TRUTH_OR_EXPR, "truth_or_expr", 'e', 2) DEFTREECODE (TRUTH_XOR_EXPR, "truth_xor_expr", 'e', 2) DEFTREECODE (TRUTH_NOT_EXPR, "truth_not_expr", 'e', 1) /* Relational operators. `EQ_EXPR' and `NE_EXPR' are allowed for any types. The others are allowed only for integer (or pointer or enumeral) or real types. In all cases the operands will have the same type, and the value is always the type used by the language for booleans. */ DEFTREECODE (LT_EXPR, "lt_expr", '<', 2) DEFTREECODE (LE_EXPR, "le_expr", '<', 2) DEFTREECODE (GT_EXPR, "gt_expr", '<', 2) DEFTREECODE (GE_EXPR, "ge_expr", '<', 2) DEFTREECODE (EQ_EXPR, "eq_expr", '<', 2) DEFTREECODE (NE_EXPR, "ne_expr", '<', 2) /* Additional relational operators for floating point unordered. */ DEFTREECODE (UNORDERED_EXPR, "unordered_expr", '<', 2) DEFTREECODE (ORDERED_EXPR, "ordered_expr", '<', 2) /* These are equivalent to unordered or ... */ DEFTREECODE (UNLT_EXPR, "unlt_expr", '<', 2) DEFTREECODE (UNLE_EXPR, "unle_expr", '<', 2) DEFTREECODE (UNGT_EXPR, "ungt_expr", '<', 2) DEFTREECODE (UNGE_EXPR, "unge_expr", '<', 2) DEFTREECODE (UNEQ_EXPR, "uneq_expr", '<', 2) /* This is the reverse of uneq_expr. */ DEFTREECODE (LTGT_EXPR, "ltgt_expr", '<', 2) /* Operations for Pascal sets. Not used now. */ DEFTREECODE (IN_EXPR, "in_expr", '2', 2) DEFTREECODE (SET_LE_EXPR, "set_le_expr", '<', 2) DEFTREECODE (CARD_EXPR, "card_expr", '1', 1) DEFTREECODE (RANGE_EXPR, "range_expr", '2', 2) /* Represents a conversion of type of a value. All conversions, including implicit ones, must be represented by CONVERT_EXPR or NOP_EXPR nodes. */ DEFTREECODE (CONVERT_EXPR, "convert_expr", '1', 1) /* Represents a conversion expected to require no code to be generated. */ DEFTREECODE (NOP_EXPR, "nop_expr", '1', 1) /* Value is same as argument, but guaranteed not an lvalue. */ DEFTREECODE (NON_LVALUE_EXPR, "non_lvalue_expr", '1', 1) /* Represents viewing something of one type as being of a second type. This corresponds to an "Unchecked Conversion" in Ada and roughly to the idiom *(type2 *)&X in C. The only operand is the value to be viewed as being of another type. It is undefined if the type of the input and of the expression have different sizes. This code may also be used within the LHS of a MODIFY_EXPR, in which case no actual data motion may occur. TREE_ADDRESSABLE will be set in this case and GCC must abort if it could not do the operation without generating insns. */ DEFTREECODE (VIEW_CONVERT_EXPR, "view_convert_expr", '1', 1) /* Represents something we computed once and will use multiple times. First operand is that expression. After it is evaluated once, it will be replaced by the temporary variable that holds the value. */ DEFTREECODE (SAVE_EXPR, "save_expr", 'e', 1) /* For a UNSAVE_EXPR, operand 0 is the value to unsave. By unsave, we mean that all _EXPRs such as TARGET_EXPRs, SAVE_EXPRs, CALL_EXPRs, that are protected from being evaluated more than once should be reset so that a new expand_expr call of this expr will cause those to be re-evaluated. This is useful when we want to reuse a tree in different places, but where we must re-expand. */ DEFTREECODE (UNSAVE_EXPR, "unsave_expr", 'e', 1) /* & in C. Value is the address at which the operand's value resides. Operand may have any mode. Result mode is Pmode. */ DEFTREECODE (ADDR_EXPR, "addr_expr", 'e', 1) /* Non-lvalue reference or pointer to an object. */ DEFTREECODE (REFERENCE_EXPR, "reference_expr", 'e', 1) /* Operand is a function constant; result is a function variable value of type EPmode. Used only for languages that need static chains. */ DEFTREECODE (ENTRY_VALUE_EXPR, "entry_value_expr", 'e', 1) /* Operand0 is a function constant; result is part N of a function descriptor of type ptr_mode. */ DEFTREECODE (FDESC_EXPR, "fdesc_expr", 'e', 2) /* Given two real or integer operands of the same type, returns a complex value of the corresponding complex type. */ DEFTREECODE (COMPLEX_EXPR, "complex_expr", '2', 2) /* Complex conjugate of operand. Used only on complex types. */ DEFTREECODE (CONJ_EXPR, "conj_expr", '1', 1) /* Used only on an operand of complex type, these return a value of the corresponding component type. */ DEFTREECODE (REALPART_EXPR, "realpart_expr", 'r', 1) DEFTREECODE (IMAGPART_EXPR, "imagpart_expr", 'r', 1) /* Nodes for ++ and -- in C. The second arg is how much to increment or decrement by. For a pointer, it would be the size of the object pointed to. */ DEFTREECODE (PREDECREMENT_EXPR, "predecrement_expr", 'e', 2) DEFTREECODE (PREINCREMENT_EXPR, "preincrement_expr", 'e', 2) DEFTREECODE (POSTDECREMENT_EXPR, "postdecrement_expr", 'e', 2) DEFTREECODE (POSTINCREMENT_EXPR, "postincrement_expr", 'e', 2) /* Used to implement `va_arg'. */ DEFTREECODE (VA_ARG_EXPR, "va_arg_expr", 'e', 1) /* Evaluate operand 1. If and only if an exception is thrown during the evaluation of operand 1, evaluate operand 2. This differs from TRY_FINALLY_EXPR in that operand 2 is not evaluated on a normal or jump exit, only on an exception. */ DEFTREECODE (TRY_CATCH_EXPR, "try_catch_expr", 's', 2) /* Evaluate the first operand. The second operand is a cleanup expression which is evaluated on any exit (normal, exception, or jump out) from this expression. */ DEFTREECODE (TRY_FINALLY_EXPR, "try_finally", 's', 2) /* These types of expressions have no useful value, and always have side effects. */ /* Used to represent a local declaration. The operand is DECL_EXPR_DECL. */ DEFTREECODE (DECL_EXPR, "decl_expr", 's', 1) /* A label definition, encapsulated as a statement. Operand 0 is the LABEL_DECL node for the label that appears here. The type should be void and the value should be ignored. */ DEFTREECODE (LABEL_EXPR, "label_expr", 's', 1) /* GOTO. Operand 0 is a LABEL_DECL node or an expression. The type should be void and the value should be ignored. */ DEFTREECODE (GOTO_EXPR, "goto_expr", 's', 1) /* Used internally for cleanups in the implementation of TRY_FINALLY_EXPR. (Specifically, it is created by expand_expr, not front-ends.) Operand 0 is the rtx for the start of the subroutine we need to call. Operand 1 is the rtx for a variable in which to store the address of where the subroutine should return to. */ DEFTREECODE (GOTO_SUBROUTINE_EXPR, "goto_subroutine", 's', 2) /* RETURN. Evaluates operand 0, then returns from the current function. Presumably that operand is an assignment that stores into the RESULT_DECL that hold the value to be returned. The operand may be null. The type should be void and the value should be ignored. */ DEFTREECODE (RETURN_EXPR, "return_expr", 's', 1) /* Exit the inner most loop conditionally. Operand 0 is the condition. The type should be void and the value should be ignored. */ DEFTREECODE (EXIT_EXPR, "exit_expr", 's', 1) /* A loop. Operand 0 is the body of the loop. It must contain an EXIT_EXPR or is an infinite loop. The type should be void and the value should be ignored. */ DEFTREECODE (LOOP_EXPR, "loop_expr", 's', 1) /* Exit a labeled block, possibly returning a value. Operand 0 is a LABELED_BLOCK_EXPR to exit. Operand 1 is the value to return. It may be left null. */ DEFTREECODE (EXIT_BLOCK_EXPR, "exit_block_expr", 's', 2) /* Switch expression. TREE_TYPE is the original type of the condition, before any language required type conversions. It may be NULL, in which case the original type and final types are assumed to be the same. Operand 0 is the expression used to perform the branch, Operand 1 is the body of the switch, which probably contains CASE_LABEL_EXPRs. It may also be NULL, in which case operand 2 must not be NULL. Operand 2 is either NULL_TREE or a TREE_VEC of the CASE_LABEL_EXPRs of all the cases. */ DEFTREECODE (SWITCH_EXPR, "switch_expr", 's', 3) /* Used to represent a case label. The operands are CASE_LOW and CASE_HIGH, respectively. If CASE_LOW is NULL_TREE, the label is a 'default' label. If CASE_HIGH is NULL_TREE, the label is a normal case label. CASE_LABEL is the corresponding LABEL_DECL. */ DEFTREECODE (CASE_LABEL_EXPR, "case_label_expr", 's', 3) /* RESX. Resume execution after an exception. Operand 0 is a number indicating the exception region that is being left. */ DEFTREECODE (RESX_EXPR, "resx_expr", 's', 1) /* Used to represent an inline assembly statement. ASM_STRING returns a STRING_CST for the instruction (e.g., "mov x, y"). ASM_OUTPUTS, ASM_INPUTS, and ASM_CLOBBERS represent the outputs, inputs, and clobbers for the statement. */ DEFTREECODE (ASM_EXPR, "asm_expr", 's', 4) /* Variable references for SSA analysis. New SSA names are created every time a variable is assigned a new value. The SSA builder uses SSA_NAME nodes to implement SSA versioning. */ DEFTREECODE (SSA_NAME, "ssa_name", 'x', 0) /* SSA PHI operator. PHI_RESULT is the new SSA_NAME node created by the PHI node. PHI_ARG_LENGTH is the number of arguments. PHI_ARG_ELT returns the Ith tuple from the argument list. Each tuple contains the incoming reaching definition (SSA_NAME node) and the edge via which that definition is coming through. */ DEFTREECODE (PHI_NODE, "phi_node", 'x', 0) /* Used to represent a typed exception handler. CATCH_TYPES is the type (or list of types) handled, and CATCH_BODY is the code for the handler. */ DEFTREECODE (CATCH_EXPR, "catch_expr", 's', 2) /* Used to represent an exception specification. EH_FILTER_TYPES is a list of allowed types, and EH_FILTER_FAILURE is an expression to evaluate on failure. EH_FILTER_MUST_NOT_THROW controls which range type to use when expanding. */ DEFTREECODE (EH_FILTER_EXPR, "eh_filter_expr", 's', 2) /* Node used for describing a property that is known at compile time. */ DEFTREECODE (SCEV_KNOWN, "scev_known", 'e', 0) /* Node used for describing a property that is not known at compile time. */ DEFTREECODE (SCEV_NOT_KNOWN, "scev_not_known", 'e', 0) /* Polynomial chains of recurrences. Under the form: cr = {CHREC_LEFT (cr), +, CHREC_RIGHT (cr)}. */ DEFTREECODE (POLYNOMIAL_CHREC, "polynomial_chrec", 'e', 3) /* Used to chain children of container statements together. Use the interface in tree-iterator.h to access this node. */ DEFTREECODE (STATEMENT_LIST, "statement_list", 'x', 0) /* Value handles. Artificial nodes to represent expressions in partial redundancy elimination (tree-ssa-pre.c). These nodes are used for expression canonicalization. If two expressions compute the same value, they will be assigned the same value handle. */ DEFTREECODE (VALUE_HANDLE, "value_handle", 'x', 0) /* Base class information. Holds information about a class as a baseclass of itself or another class. */ DEFTREECODE (TREE_BINFO, "tree_binfo", 'x', 0) /* Local variables: mode:c End: */ 'x', /* This file contains the definitions and documentation for the additional tree codes used in the GNU C++ compiler (see tree.def for the standard codes). Copyright (C) 1987, 1988, 1990, 1993, 1997, 1998, 1999, 2000, 2001, 2004 Free Software Foundation, Inc. Written by Benjamin Chelf This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Tree nodes relevant to both C and C++. These were originally in cp-tree.def in the cp subdir. */ DEFTREECODE (SIZEOF_EXPR, "sizeof_expr", '1', 1) DEFTREECODE (ARROW_EXPR, "arrow_expr", 'e', 1) DEFTREECODE (ALIGNOF_EXPR, "alignof_expr", '1', 1) /* Used to represent an expression statement. Use `EXPR_STMT_EXPR' to obtain the expression. */ DEFTREECODE (EXPR_STMT, "expr_stmt", 'e', 1) /* Used to represent a `for' statement. The operands are FOR_INIT_STMT, FOR_COND, FOR_EXPR, and FOR_BODY, respectively. */ DEFTREECODE (FOR_STMT, "for_stmt", 'e', 4) /* Used to represent a 'while' statement. The operands are WHILE_COND and WHILE_BODY, respectively. */ DEFTREECODE (WHILE_STMT, "while_stmt", 'e', 2) /* Used to represent a 'do' statement. The operands are DO_BODY and DO_COND, respectively. */ DEFTREECODE (DO_STMT, "do_stmt", 'e', 2) /* Used to represent a 'break' statement. */ DEFTREECODE (BREAK_STMT, "break_stmt", 'e', 0) /* Used to represent a 'continue' statement. */ DEFTREECODE (CONTINUE_STMT, "continue_stmt", 'e', 0) /* Used to represent a 'switch' statement. The operands are SWITCH_COND, SWITCH_BODY and SWITCH_TYPE, respectively. */ DEFTREECODE (SWITCH_STMT, "switch_stmt", 'e', 3) /* A STMT_EXPR represents a statement-expression. The STMT_EXPR_STMT is the statement given by the expression. */ DEFTREECODE (STMT_EXPR, "stmt_expr", 'e', 1) /* A COMPOUND_LITERAL_EXPR represents a C99 compound literal. The COMPOUND_LITERAL_EXPR_DECL_STMT is the a DECL_STMT containing the decl for the anonymous object represented by the COMPOUND_LITERAL; the DECL_INITIAL of that decl is the CONSTRUCTOR that initializes the compound literal. */ DEFTREECODE (COMPOUND_LITERAL_EXPR, "compound_literal_expr", 'e', 1) /* Local variables: mode:c End: */ }; #undef DEFTREECODE /* Table indexed by tree code giving number of expression operands beyond the fixed part of the node structure. Not used for types or decls. */ #define DEFTREECODE(SYM, NAME, TYPE, LENGTH) LENGTH, const unsigned char tree_code_length[] = { /* This file contains the definitions and documentation for the tree codes used in GCC. Copyright (C) 1987, 1988, 1993, 1995, 1997, 1998, 2000, 2001, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* The third argument can be: 'x' for an exceptional code (fits no category). 't' for a type object code. 'c' for codes for constants. 'd' for codes for declarations (also serving as variable refs). 'r' for codes for references to storage. '<' for codes for comparison expressions. '1' for codes for unary arithmetic expressions. '2' for codes for binary arithmetic expressions. 's' for codes for "statement" expressions, which have side-effects, but usually no interesting value. 'e' for codes for other kinds of expressions. */ /* For `r', `e', `<', `1', `2', and `s' nodes, which use struct tree_exp, the 4th element is the number of argument slots to allocate. This determines the size of the tree node object. Other nodes use different structures, and the size is determined by the tree_union member structure; the 4th element should be zero. Languages that define language-specific 'x' or 'c' codes must define the tree_size langhook to say how big they are. */ /* Any erroneous construct is parsed into a node of this type. This type of node is accepted without complaint in all contexts by later parsing activities, to avoid multiple error messages for one error. No fields in these nodes are used except the TREE_CODE. */ DEFTREECODE (ERROR_MARK, "error_mark", 'x', 0) /* Used to represent a name (such as, in the DECL_NAME of a decl node). Internally it looks like a STRING_CST node. There is only one IDENTIFIER_NODE ever made for any particular name. Use `get_identifier' to get it (or create it, the first time). */ DEFTREECODE (IDENTIFIER_NODE, "identifier_node", 'x', 0) /* Has the TREE_VALUE and TREE_PURPOSE fields. */ /* These nodes are made into lists by chaining through the TREE_CHAIN field. The elements of the list live in the TREE_VALUE fields, while TREE_PURPOSE fields are occasionally used as well to get the effect of Lisp association lists. */ DEFTREECODE (TREE_LIST, "tree_list", 'x', 0) /* These nodes contain an array of tree nodes. */ DEFTREECODE (TREE_VEC, "tree_vec", 'x', 0) /* A symbol binding block. These are arranged in a tree, where the BLOCK_SUBBLOCKS field contains a chain of subblocks chained through the BLOCK_CHAIN field. BLOCK_SUPERCONTEXT points to the parent block. For a block which represents the outermost scope of a function, it points to the FUNCTION_DECL node. BLOCK_VARS points to a chain of decl nodes. BLOCK_TYPE_TAGS points to a chain of types which have their own names. BLOCK_CHAIN points to the next BLOCK at the same level. BLOCK_ABSTRACT_ORIGIN points to the original (abstract) tree node which this block is an instance of, or else is NULL to indicate that this block is not an instance of anything else. When non-NULL, the value could either point to another BLOCK node or it could point to a FUNCTION_DECL node (e.g. in the case of a block representing the outermost scope of a particular inlining of a function). BLOCK_ABSTRACT is nonzero if the block represents an abstract instance of a block (i.e. one which is nested within an abstract instance of an inline function). TREE_ASM_WRITTEN is nonzero if the block was actually referenced in the generated assembly. */ DEFTREECODE (BLOCK, "block", 'x', 0) /* Each data type is represented by a tree node whose code is one of the following: */ /* Each node that represents a data type has a component TYPE_SIZE containing a tree that is an expression for the size in bits. The TYPE_MODE contains the machine mode for values of this type. The TYPE_POINTER_TO field contains a type for a pointer to this type, or zero if no such has been created yet. The TYPE_NEXT_VARIANT field is used to chain together types that are variants made by type modifiers such as "const" and "volatile". The TYPE_MAIN_VARIANT field, in any member of such a chain, points to the start of the chain. The TYPE_NONCOPIED_PARTS field is a list specifying which parts of an object of this type should *not* be copied by assignment. The TREE_VALUE of each is a FIELD_DECL that should not be copied. The TREE_PURPOSE is an initial value for that field when an object of this type is initialized via an INIT_EXPR. It may be NULL if no special value is required. Even the things in this list are copied if the right-hand side of an assignment is known to be a complete object (rather than being, perhaps, a subobject of some other object.) The determination of what constitutes a complete object is done by fixed_type_p. The TYPE_NAME field contains info on the name used in the program for this type (for GDB symbol table output). It is either a TYPE_DECL node, for types that are typedefs, or an IDENTIFIER_NODE in the case of structs, unions or enums that are known with a tag, or zero for types that have no special name. The TYPE_CONTEXT for any sort of type which could have a name or which could have named members (e.g. tagged types in C/C++) will point to the node which represents the scope of the given type, or will be NULL_TREE if the type has "file scope". For most types, this will point to a BLOCK node or a FUNCTION_DECL node, but it could also point to a FUNCTION_TYPE node (for types whose scope is limited to the formal parameter list of some function type specification) or it could point to a RECORD_TYPE, UNION_TYPE or QUAL_UNION_TYPE node (for C++ "member" types). For non-tagged-types, TYPE_CONTEXT need not be set to anything in particular, since any type which is of some type category (e.g. an array type or a function type) which cannot either have a name itself or have named members doesn't really have a "scope" per se. The TREE_CHAIN field is used as a forward-references to names for ENUMERAL_TYPE, RECORD_TYPE, UNION_TYPE, and QUAL_UNION_TYPE nodes; see below. */ DEFTREECODE (VOID_TYPE, "void_type", 't', 0) /* The void type in C */ /* Integer types in all languages, including char in C. Also used for sub-ranges of other discrete types. Has components TYPE_MIN_VALUE, TYPE_MAX_VALUE (expressions, inclusive) and TYPE_PRECISION (number of bits used by this type). In the case of a subrange type in Pascal, the TREE_TYPE of this will point at the supertype (another INTEGER_TYPE, or an ENUMERAL_TYPE, CHAR_TYPE, or BOOLEAN_TYPE). Otherwise, the TREE_TYPE is zero. */ DEFTREECODE (INTEGER_TYPE, "integer_type", 't', 0) /* C's float and double. Different floating types are distinguished by machine mode and by the TYPE_SIZE and the TYPE_PRECISION. */ DEFTREECODE (REAL_TYPE, "real_type", 't', 0) /* Complex number types. The TREE_TYPE field is the data type of the real and imaginary parts. */ DEFTREECODE (COMPLEX_TYPE, "complex_type", 't', 0) /* Vector types. The TREE_TYPE field is the data type of the vector elements. */ DEFTREECODE (VECTOR_TYPE, "vector_type", 't', 0) /* C enums. The type node looks just like an INTEGER_TYPE node. The symbols for the values of the enum type are defined by CONST_DECL nodes, but the type does not point to them; however, the TYPE_VALUES is a list in which each element's TREE_PURPOSE is a name and the TREE_VALUE is the value (an INTEGER_CST node). */ /* A forward reference `enum foo' when no enum named foo is defined yet has zero (a null pointer) in its TYPE_SIZE. The tag name is in the TYPE_NAME field. If the type is later defined, the normal fields are filled in. RECORD_TYPE, UNION_TYPE, and QUAL_UNION_TYPE forward refs are treated similarly. */ DEFTREECODE (ENUMERAL_TYPE, "enumeral_type", 't', 0) /* Pascal's boolean type (true or false are the only values); no special fields needed. */ DEFTREECODE (BOOLEAN_TYPE, "boolean_type", 't', 0) /* CHAR in Pascal; not used in C. No special fields needed. */ DEFTREECODE (CHAR_TYPE, "char_type", 't', 0) /* All pointer-to-x types have code POINTER_TYPE. The TREE_TYPE points to the node for the type pointed to. */ DEFTREECODE (POINTER_TYPE, "pointer_type", 't', 0) /* An offset is a pointer relative to an object. The TREE_TYPE field is the type of the object at the offset. The TYPE_OFFSET_BASETYPE points to the node for the type of object that the offset is relative to. */ DEFTREECODE (OFFSET_TYPE, "offset_type", 't', 0) /* A reference is like a pointer except that it is coerced automatically to the value it points to. Used in C++. */ DEFTREECODE (REFERENCE_TYPE, "reference_type", 't', 0) /* METHOD_TYPE is the type of a function which takes an extra first argument for "self", which is not present in the declared argument list. The TREE_TYPE is the return type of the method. The TYPE_METHOD_BASETYPE is the type of "self". TYPE_ARG_TYPES is the real argument list, which includes the hidden argument for "self". */ DEFTREECODE (METHOD_TYPE, "method_type", 't', 0) /* Used for Pascal; details not determined right now. */ DEFTREECODE (FILE_TYPE, "file_type", 't', 0) /* Types of arrays. Special fields: TREE_TYPE Type of an array element. TYPE_DOMAIN Type to index by. Its range of values specifies the array length. The field TYPE_POINTER_TO (TREE_TYPE (array_type)) is always nonzero and holds the type to coerce a value of that array type to in C. TYPE_STRING_FLAG indicates a string (in contrast to an array of chars) in languages (such as Chill) that make a distinction. */ /* Array types in C or Pascal */ DEFTREECODE (ARRAY_TYPE, "array_type", 't', 0) /* Types of sets for Pascal. Special fields are the same as in an array type. The target type is always a boolean type. Used for both bitstrings and powersets in Chill; TYPE_STRING_FLAG indicates a bitstring. */ DEFTREECODE (SET_TYPE, "set_type", 't', 0) /* Struct in C, or record in Pascal. */ /* Special fields: TYPE_FIELDS chain of FIELD_DECLs for the fields of the struct, and VAR_DECLs, TYPE_DECLs and CONST_DECLs for record-scope variables, types and enumerators. A few may need to be added for Pascal. */ /* See the comment above, before ENUMERAL_TYPE, for how forward references to struct tags are handled in C. */ DEFTREECODE (RECORD_TYPE, "record_type", 't', 0) /* Union in C. Like a struct, except that the offsets of the fields will all be zero. */ /* See the comment above, before ENUMERAL_TYPE, for how forward references to union tags are handled in C. */ DEFTREECODE (UNION_TYPE, "union_type", 't', 0) /* C union type */ /* Similar to UNION_TYPE, except that the expressions in DECL_QUALIFIER in each FIELD_DECL determine what the union contains. The first field whose DECL_QUALIFIER expression is true is deemed to occupy the union. */ DEFTREECODE (QUAL_UNION_TYPE, "qual_union_type", 't', 0) /* Type of functions. Special fields: TREE_TYPE type of value returned. TYPE_ARG_TYPES list of types of arguments expected. this list is made of TREE_LIST nodes. Types of "Procedures" in languages where they are different from functions have code FUNCTION_TYPE also, but then TREE_TYPE is zero or void type. */ DEFTREECODE (FUNCTION_TYPE, "function_type", 't', 0) /* This is a language-specific kind of type. Its meaning is defined by the language front end. layout_type does not know how to lay this out, so the front-end must do so manually. */ DEFTREECODE (LANG_TYPE, "lang_type", 't', 0) /* Expressions */ /* First, the constants. */ /* Contents are in TREE_INT_CST_LOW and TREE_INT_CST_HIGH fields, 32 bits each, giving us a 64 bit constant capability. Note: constants of type char in Pascal are INTEGER_CST, and so are pointer constants such as nil in Pascal or NULL in C. `(int *) 1' in C also results in an INTEGER_CST. */ DEFTREECODE (INTEGER_CST, "integer_cst", 'c', 0) /* Contents are in TREE_REAL_CST field. */ DEFTREECODE (REAL_CST, "real_cst", 'c', 0) /* Contents are in TREE_REALPART and TREE_IMAGPART fields, whose contents are other constant nodes. */ DEFTREECODE (COMPLEX_CST, "complex_cst", 'c', 0) /* Contents are in TREE_VECTOR_CST_ELTS field. */ DEFTREECODE (VECTOR_CST, "vector_cst", 'c', 0) /* Contents are TREE_STRING_LENGTH and TREE_STRING_POINTER fields. */ DEFTREECODE (STRING_CST, "string_cst", 'c', 0) /* Declarations. All references to names are represented as ..._DECL nodes. The decls in one binding context are chained through the TREE_CHAIN field. Each DECL has a DECL_NAME field which contains an IDENTIFIER_NODE. (Some decls, most often labels, may have zero as the DECL_NAME). DECL_CONTEXT points to the node representing the context in which this declaration has its scope. For FIELD_DECLs, this is the RECORD_TYPE, UNION_TYPE, or QUAL_UNION_TYPE node that the field is a member of. For VAR_DECL, PARM_DECL, FUNCTION_DECL, LABEL_DECL, and CONST_DECL nodes, this points to either the FUNCTION_DECL for the containing function, the RECORD_TYPE or UNION_TYPE for the containing type, or NULL_TREE or a TRANSLATION_UNIT_DECL if the given decl has "file scope". DECL_ABSTRACT_ORIGIN, if non-NULL, points to the original (abstract) ..._DECL node of which this decl is an (inlined or template expanded) instance. The TREE_TYPE field holds the data type of the object, when relevant. LABEL_DECLs have no data type. For TYPE_DECL, the TREE_TYPE field contents are the type whose name is being declared. The DECL_ALIGN, DECL_SIZE, and DECL_MODE fields exist in decl nodes just as in type nodes. They are unused in LABEL_DECL, TYPE_DECL and CONST_DECL nodes. DECL_FIELD_BIT_OFFSET holds an integer number of bits offset for the location. DECL_VOFFSET holds an expression for a variable offset; it is to be multiplied by DECL_VOFFSET_UNIT (an integer). These fields are relevant only in FIELD_DECLs and PARM_DECLs. DECL_INITIAL holds the value to initialize a variable to, or the value of a constant. For a function, it holds the body (a node of type BLOCK representing the function's binding contour and whose body contains the function's statements.) For a LABEL_DECL in C, it is a flag, nonzero if the label's definition has been seen. PARM_DECLs use a special field: DECL_ARG_TYPE is the type in which the argument is actually passed, which may be different from its type within the function. FUNCTION_DECLs use four special fields: DECL_ARGUMENTS holds a chain of PARM_DECL nodes for the arguments. DECL_RESULT holds a RESULT_DECL node for the value of a function, or it is 0 for a function that returns no value. (C functions returning void have zero here.) The TREE_TYPE field is the type in which the result is actually returned. This is usually the same as the return type of the FUNCTION_DECL, but it may be a wider integer type because of promotion. DECL_FUNCTION_CODE is a code number that is nonzero for built-in functions. Its value is an enum built_in_function that says which built-in function it is. DECL_SOURCE_FILE holds a filename string and DECL_SOURCE_LINE holds a line number. In some cases these can be the location of a reference, if no definition has been seen. DECL_ABSTRACT is nonzero if the decl represents an abstract instance of a decl (i.e. one which is nested within an abstract instance of a inline function. */ DEFTREECODE (FUNCTION_DECL, "function_decl", 'd', 0) DEFTREECODE (LABEL_DECL, "label_decl", 'd', 0) DEFTREECODE (CONST_DECL, "const_decl", 'd', 0) DEFTREECODE (TYPE_DECL, "type_decl", 'd', 0) DEFTREECODE (VAR_DECL, "var_decl", 'd', 0) DEFTREECODE (PARM_DECL, "parm_decl", 'd', 0) DEFTREECODE (RESULT_DECL, "result_decl", 'd', 0) DEFTREECODE (FIELD_DECL, "field_decl", 'd', 0) /* A namespace declaration. Namespaces appear in DECL_CONTEXT of other _DECLs, providing a hierarchy of names. */ DEFTREECODE (NAMESPACE_DECL, "namespace_decl", 'd', 0) /* A translation unit. This is not technically a declaration, since it can't be looked up, but it's close enough. */ DEFTREECODE (TRANSLATION_UNIT_DECL, "translation_unit_decl", 'd', 0) /* References to storage. */ /* Value is structure or union component. Operand 0 is the structure or union (an expression). Operand 1 is the field (a node of type FIELD_DECL). Operand 2, if present, is the value of DECL_FIELD_OFFSET, measured in units of DECL_OFFSET_ALIGN / BITS_PER_UNIT. */ DEFTREECODE (COMPONENT_REF, "component_ref", 'r', 3) /* Reference to a group of bits within an object. Similar to COMPONENT_REF except the position is given explicitly rather than via a FIELD_DECL. Operand 0 is the structure or union expression; operand 1 is a tree giving the number of bits being referenced; operand 2 is a tree giving the position of the first referenced bit. The field can be either a signed or unsigned field; BIT_FIELD_REF_UNSIGNED says which. */ DEFTREECODE (BIT_FIELD_REF, "bit_field_ref", 'r', 3) /* C unary `*' or Pascal `^'. One operand, an expression for a pointer. */ DEFTREECODE (INDIRECT_REF, "indirect_ref", 'r', 1) /* Pascal `^` on a file. One operand, an expression for the file. */ DEFTREECODE (BUFFER_REF, "buffer_ref", 'r', 1) /* Array indexing. Operand 0 is the array; operand 1 is a (single) array index. Operand 2, if present, is a copy of TYPE_MIN_VALUE of the index. Operand 3, if present, is the element size, measured in units of the alignment of the element type. */ DEFTREECODE (ARRAY_REF, "array_ref", 'r', 4) /* Likewise, except that the result is a range ("slice") of the array. The starting index of the resulting array is taken from operand 1 and the size of the range is taken from the type of the expression. */ DEFTREECODE (ARRAY_RANGE_REF, "array_range_ref", 'r', 4) /* Used to represent lookup of runtime type dependent data. Often this is a reference to a vtable, but it needn't be. Operands are: OBJ_TYPE_REF_EXPR: An expression that evaluates the value to use. OBJ_TYPE_REF_OBJECT: Is the object on whose behalf the lookup is being performed. Through this the optimizers may be able to statically determine the dynamic type of the object. OBJ_TYPE_REF_TOKEN: Something front-end specific used to resolve the reference to something simpler, usually to the address of a DECL. Never touched by the middle-end. Good choices would be either an identifier or a vtable index. */ DEFTREECODE (OBJ_TYPE_REF, "obj_type_ref", 'e', 3) /* The exception object from the runtime. */ DEFTREECODE (EXC_PTR_EXPR, "exc_ptr_expr", 'e', 0) /* The filter object from the runtime. */ DEFTREECODE (FILTER_EXPR, "filter_expr", 'e', 0) /* Constructor: return an aggregate value made from specified components. In C, this is used only for structure and array initializers. Also used for SET_TYPE in Chill (and potentially Pascal). The operand is a list of component values made out of a chain of TREE_LIST nodes. For ARRAY_TYPE: The TREE_PURPOSE of each node is the corresponding index. If the TREE_PURPOSE is a RANGE_EXPR, it is a short-hand for many nodes, one for each index in the range. (If the corresponding TREE_VALUE has side-effects, they are evaluated once for each element. Wrap the value in a SAVE_EXPR if you want to evaluate side effects only once.) For RECORD_TYPE, UNION_TYPE, or QUAL_UNION_TYPE: The TREE_PURPOSE of each node is a FIELD_DECL. For SET_TYPE: The TREE_VALUE specifies a value (index) in the set that is true. If TREE_PURPOSE is non-NULL, it specifies the lower limit of a range of true values. Elements not listed are false (not in the set). */ DEFTREECODE (CONSTRUCTOR, "constructor", 'e', 1) /* The expression types are mostly straightforward, with the fourth argument of DEFTREECODE saying how many operands there are. Unless otherwise specified, the operands are expressions and the types of all the operands and the expression must all be the same. */ /* Contains two expressions to compute, one followed by the other. the first value is ignored. The second one's value is used. The type of the first expression need not agree with the other types. */ DEFTREECODE (COMPOUND_EXPR, "compound_expr", 'e', 2) /* Assignment expression. Operand 0 is the what to set; 1, the new value. */ DEFTREECODE (MODIFY_EXPR, "modify_expr", 'e', 2) /* Initialization expression. Operand 0 is the variable to initialize; Operand 1 is the initializer. */ DEFTREECODE (INIT_EXPR, "init_expr", 'e', 2) /* For TARGET_EXPR, operand 0 is the target of an initialization, operand 1 is the initializer for the target, which may be void if simply expanding it initializes the target. operand 2 is the cleanup for this node, if any. operand 3 is the saved initializer after this node has been expanded once; this is so we can re-expand the tree later. */ DEFTREECODE (TARGET_EXPR, "target_expr", 'e', 4) /* Conditional expression ( ... ? ... : ... in C). Operand 0 is the condition. Operand 1 is the then-value. Operand 2 is the else-value. Operand 0 may be of any type. Operand 1 must have the same type as the entire expression, unless it unconditionally throws an exception, in which case it should have VOID_TYPE. The same constraints apply to operand 2. */ DEFTREECODE (COND_EXPR, "cond_expr", 'e', 3) /* Declare local variables, including making RTL and allocating space. BIND_EXPR_VARS is a chain of VAR_DECL nodes for the variables. BIND_EXPR_BODY is the body, the expression to be computed using the variables. The value of operand 1 becomes that of the BIND_EXPR. BIND_EXPR_BLOCK is the BLOCK that corresponds to these bindings for debugging purposes. If this BIND_EXPR is actually expanded, that sets the TREE_USED flag in the BLOCK. The BIND_EXPR is not responsible for informing parsers about these variables. If the body is coming from the input file, then the code that creates the BIND_EXPR is also responsible for informing the parser of the variables. If the BIND_EXPR is ever expanded, its TREE_USED flag is set. This tells the code for debugging symbol tables not to ignore the BIND_EXPR. If the BIND_EXPR should be output for debugging but will not be expanded, set the TREE_USED flag by hand. In order for the BIND_EXPR to be known at all, the code that creates it must also install it as a subblock in the tree of BLOCK nodes for the function. */ DEFTREECODE (BIND_EXPR, "bind_expr", 'e', 3) /* A labeled block. Operand 0 is the label that will be generated to mark the end of the block. Operand 1 is the labeled block body. */ DEFTREECODE (LABELED_BLOCK_EXPR, "labeled_block_expr", 'e', 2) /* Function call. Operand 0 is the function. Operand 1 is the argument list, a list of expressions made out of a chain of TREE_LIST nodes. Operand 2 is the static chain argument, or NULL. */ DEFTREECODE (CALL_EXPR, "call_expr", 'e', 3) /* Specify a value to compute along with its corresponding cleanup. Operand 0 argument is an expression whose value needs a cleanup. Operand 1 is the cleanup expression for the object. Operand 2 is unused. The cleanup is executed by the first enclosing CLEANUP_POINT_EXPR, if it exists, otherwise it is the responsibility of the caller to manually call expand_start_target_temps/expand_end_target_temps, as needed. This differs from TRY_CATCH_EXPR in that operand 2 is always evaluated when an exception isn't thrown when cleanups are run. */ DEFTREECODE (WITH_CLEANUP_EXPR, "with_cleanup_expr", 'e', 3) /* Specify a cleanup point. Operand 0 is an expression that may have cleanups. If it does, those cleanups are executed after the expression is expanded. Note that if the expression is a reference to storage, it is forced out of memory before the cleanups are run. This is necessary to handle cases where the cleanups modify the storage referenced; in the expression 't.i', if 't' is a struct with an integer member 'i' and a cleanup which modifies 'i', the value of the expression depends on whether the cleanup is run before or after 't.i' is evaluated. When expand_expr is run on 't.i', it returns a MEM. This is not good enough; the value of 't.i' must be forced out of memory. As a consequence, the operand of a CLEANUP_POINT_EXPR must not have BLKmode, because it will not be forced out of memory. */ DEFTREECODE (CLEANUP_POINT_EXPR, "cleanup_point_expr", 'e', 1) /* The following two codes are used in languages that have types where some field in an object of the type contains a value that is used in the computation of another field's offset or size and/or the size of the type. The positions and/or sizes of fields can vary from object to object of the same type or even for one and the same object within its scope. Record types with discriminants in Ada or schema types in Pascal are examples of such types. This mechanism is also used to create "fat pointers" for unconstrained array types in Ada; the fat pointer is a structure one of whose fields is a pointer to the actual array type and the other field is a pointer to a template, which is a structure containing the bounds of the array. The bounds in the type pointed to by the first field in the fat pointer refer to the values in the template. When you wish to construct such a type you need "self-references" that allow you to reference the object having this type from the TYPE node, i.e. without having a variable instantiating this type. Such a "self-references" is done using a PLACEHOLDER_EXPR. This is a node that will later be replaced with the object being referenced. Its type is that of the object and selects which object to use from a chain of references (see below). No other slots are used in the PLACEHOLDER_EXPR. For example, if your type FOO is a RECORD_TYPE with a field BAR, and you need the value of .BAR to calculate TYPE_SIZE (FOO), just substitute above with a PLACEHOLDER_EXPR whose TREE_TYPE is FOO. Then construct your COMPONENT_REF with the PLACEHOLDER_EXPR as the first operand (which has the correct type). Later, when the size is needed in the program, the back-end will find this PLACEHOLDER_EXPR and generate code to calculate the actual size at run-time. In the following, we describe how this calculation is done. When we wish to evaluate a size or offset, we check whether it contains a PLACEHOLDER_EXPR. If it does, we call substitute_placeholder_in_expr passing both that tree and an expression within which the object may be found. The latter expression is the object itself in the simple case of an Ada record with discriminant, but it can be the array in the case of an unconstrained array. In the latter case, we need the fat pointer, because the bounds of the array can only be accessed from it. However, we rely here on the fact that the expression for the array contains the dereference of the fat pointer that obtained the array pointer. */ /* Denotes a record to later be substituted before evaluating this expression. The type of this expression is used to find the record to replace it. */ DEFTREECODE (PLACEHOLDER_EXPR, "placeholder_expr", 'x', 0) /* Simple arithmetic. */ DEFTREECODE (PLUS_EXPR, "plus_expr", '2', 2) DEFTREECODE (MINUS_EXPR, "minus_expr", '2', 2) DEFTREECODE (MULT_EXPR, "mult_expr", '2', 2) /* Division for integer result that rounds the quotient toward zero. */ DEFTREECODE (TRUNC_DIV_EXPR, "trunc_div_expr", '2', 2) /* Division for integer result that rounds the quotient toward infinity. */ DEFTREECODE (CEIL_DIV_EXPR, "ceil_div_expr", '2', 2) /* Division for integer result that rounds toward minus infinity. */ DEFTREECODE (FLOOR_DIV_EXPR, "floor_div_expr", '2', 2) /* Division for integer result that rounds toward nearest integer. */ DEFTREECODE (ROUND_DIV_EXPR, "round_div_expr", '2', 2) /* Four kinds of remainder that go with the four kinds of division. */ DEFTREECODE (TRUNC_MOD_EXPR, "trunc_mod_expr", '2', 2) DEFTREECODE (CEIL_MOD_EXPR, "ceil_mod_expr", '2', 2) DEFTREECODE (FLOOR_MOD_EXPR, "floor_mod_expr", '2', 2) DEFTREECODE (ROUND_MOD_EXPR, "round_mod_expr", '2', 2) /* Division for real result. */ DEFTREECODE (RDIV_EXPR, "rdiv_expr", '2', 2) /* Division which is not supposed to need rounding. Used for pointer subtraction in C. */ DEFTREECODE (EXACT_DIV_EXPR, "exact_div_expr", '2', 2) /* Conversion of real to fixed point: four ways to round, like the four ways to divide. CONVERT_EXPR can also be used to convert a real to an integer, and that is what is used in languages that do not have ways of specifying which of these is wanted. Maybe these are not needed. */ DEFTREECODE (FIX_TRUNC_EXPR, "fix_trunc_expr", '1', 1) DEFTREECODE (FIX_CEIL_EXPR, "fix_ceil_expr", '1', 1) DEFTREECODE (FIX_FLOOR_EXPR, "fix_floor_expr", '1', 1) DEFTREECODE (FIX_ROUND_EXPR, "fix_round_expr", '1', 1) /* Conversion of an integer to a real. */ DEFTREECODE (FLOAT_EXPR, "float_expr", '1', 1) /* Unary negation. */ DEFTREECODE (NEGATE_EXPR, "negate_expr", '1', 1) DEFTREECODE (MIN_EXPR, "min_expr", '2', 2) DEFTREECODE (MAX_EXPR, "max_expr", '2', 2) /* Represents the absolute value of the operand. An ABS_EXPR must have either an INTEGER_TYPE or a REAL_TYPE. The operand of the ABS_EXPR must have the same type. */ DEFTREECODE (ABS_EXPR, "abs_expr", '1', 1) /* Shift operations for shift and rotate. Shift means logical shift if done on an unsigned type, arithmetic shift if done on a signed type. The second operand is the number of bits to shift by; it need not be the same type as the first operand and result. Note that the result is undefined if the second operand is larger than the first operand's type size. */ DEFTREECODE (LSHIFT_EXPR, "lshift_expr", '2', 2) DEFTREECODE (RSHIFT_EXPR, "rshift_expr", '2', 2) DEFTREECODE (LROTATE_EXPR, "lrotate_expr", '2', 2) DEFTREECODE (RROTATE_EXPR, "rrotate_expr", '2', 2) /* Bitwise operations. Operands have same mode as result. */ DEFTREECODE (BIT_IOR_EXPR, "bit_ior_expr", '2', 2) DEFTREECODE (BIT_XOR_EXPR, "bit_xor_expr", '2', 2) DEFTREECODE (BIT_AND_EXPR, "bit_and_expr", '2', 2) DEFTREECODE (BIT_NOT_EXPR, "bit_not_expr", '1', 1) /* ANDIF and ORIF allow the second operand not to be computed if the value of the expression is determined from the first operand. AND, OR, and XOR always compute the second operand whether its value is needed or not (for side effects). The operand may have BOOLEAN_TYPE or INTEGER_TYPE. In either case, the argument will be either zero or one. For example, a TRUTH_NOT_EXPR will never have an INTEGER_TYPE VAR_DECL as its argument; instead, a NE_EXPR will be used to compare the VAR_DECL to zero, thereby obtaining a node with value zero or one. */ DEFTREECODE (TRUTH_ANDIF_EXPR, "truth_andif_expr", 'e', 2) DEFTREECODE (TRUTH_ORIF_EXPR, "truth_orif_expr", 'e', 2) DEFTREECODE (TRUTH_AND_EXPR, "truth_and_expr", 'e', 2) DEFTREECODE (TRUTH_OR_EXPR, "truth_or_expr", 'e', 2) DEFTREECODE (TRUTH_XOR_EXPR, "truth_xor_expr", 'e', 2) DEFTREECODE (TRUTH_NOT_EXPR, "truth_not_expr", 'e', 1) /* Relational operators. `EQ_EXPR' and `NE_EXPR' are allowed for any types. The others are allowed only for integer (or pointer or enumeral) or real types. In all cases the operands will have the same type, and the value is always the type used by the language for booleans. */ DEFTREECODE (LT_EXPR, "lt_expr", '<', 2) DEFTREECODE (LE_EXPR, "le_expr", '<', 2) DEFTREECODE (GT_EXPR, "gt_expr", '<', 2) DEFTREECODE (GE_EXPR, "ge_expr", '<', 2) DEFTREECODE (EQ_EXPR, "eq_expr", '<', 2) DEFTREECODE (NE_EXPR, "ne_expr", '<', 2) /* Additional relational operators for floating point unordered. */ DEFTREECODE (UNORDERED_EXPR, "unordered_expr", '<', 2) DEFTREECODE (ORDERED_EXPR, "ordered_expr", '<', 2) /* These are equivalent to unordered or ... */ DEFTREECODE (UNLT_EXPR, "unlt_expr", '<', 2) DEFTREECODE (UNLE_EXPR, "unle_expr", '<', 2) DEFTREECODE (UNGT_EXPR, "ungt_expr", '<', 2) DEFTREECODE (UNGE_EXPR, "unge_expr", '<', 2) DEFTREECODE (UNEQ_EXPR, "uneq_expr", '<', 2) /* This is the reverse of uneq_expr. */ DEFTREECODE (LTGT_EXPR, "ltgt_expr", '<', 2) /* Operations for Pascal sets. Not used now. */ DEFTREECODE (IN_EXPR, "in_expr", '2', 2) DEFTREECODE (SET_LE_EXPR, "set_le_expr", '<', 2) DEFTREECODE (CARD_EXPR, "card_expr", '1', 1) DEFTREECODE (RANGE_EXPR, "range_expr", '2', 2) /* Represents a conversion of type of a value. All conversions, including implicit ones, must be represented by CONVERT_EXPR or NOP_EXPR nodes. */ DEFTREECODE (CONVERT_EXPR, "convert_expr", '1', 1) /* Represents a conversion expected to require no code to be generated. */ DEFTREECODE (NOP_EXPR, "nop_expr", '1', 1) /* Value is same as argument, but guaranteed not an lvalue. */ DEFTREECODE (NON_LVALUE_EXPR, "non_lvalue_expr", '1', 1) /* Represents viewing something of one type as being of a second type. This corresponds to an "Unchecked Conversion" in Ada and roughly to the idiom *(type2 *)&X in C. The only operand is the value to be viewed as being of another type. It is undefined if the type of the input and of the expression have different sizes. This code may also be used within the LHS of a MODIFY_EXPR, in which case no actual data motion may occur. TREE_ADDRESSABLE will be set in this case and GCC must abort if it could not do the operation without generating insns. */ DEFTREECODE (VIEW_CONVERT_EXPR, "view_convert_expr", '1', 1) /* Represents something we computed once and will use multiple times. First operand is that expression. After it is evaluated once, it will be replaced by the temporary variable that holds the value. */ DEFTREECODE (SAVE_EXPR, "save_expr", 'e', 1) /* For a UNSAVE_EXPR, operand 0 is the value to unsave. By unsave, we mean that all _EXPRs such as TARGET_EXPRs, SAVE_EXPRs, CALL_EXPRs, that are protected from being evaluated more than once should be reset so that a new expand_expr call of this expr will cause those to be re-evaluated. This is useful when we want to reuse a tree in different places, but where we must re-expand. */ DEFTREECODE (UNSAVE_EXPR, "unsave_expr", 'e', 1) /* & in C. Value is the address at which the operand's value resides. Operand may have any mode. Result mode is Pmode. */ DEFTREECODE (ADDR_EXPR, "addr_expr", 'e', 1) /* Non-lvalue reference or pointer to an object. */ DEFTREECODE (REFERENCE_EXPR, "reference_expr", 'e', 1) /* Operand is a function constant; result is a function variable value of type EPmode. Used only for languages that need static chains. */ DEFTREECODE (ENTRY_VALUE_EXPR, "entry_value_expr", 'e', 1) /* Operand0 is a function constant; result is part N of a function descriptor of type ptr_mode. */ DEFTREECODE (FDESC_EXPR, "fdesc_expr", 'e', 2) /* Given two real or integer operands of the same type, returns a complex value of the corresponding complex type. */ DEFTREECODE (COMPLEX_EXPR, "complex_expr", '2', 2) /* Complex conjugate of operand. Used only on complex types. */ DEFTREECODE (CONJ_EXPR, "conj_expr", '1', 1) /* Used only on an operand of complex type, these return a value of the corresponding component type. */ DEFTREECODE (REALPART_EXPR, "realpart_expr", 'r', 1) DEFTREECODE (IMAGPART_EXPR, "imagpart_expr", 'r', 1) /* Nodes for ++ and -- in C. The second arg is how much to increment or decrement by. For a pointer, it would be the size of the object pointed to. */ DEFTREECODE (PREDECREMENT_EXPR, "predecrement_expr", 'e', 2) DEFTREECODE (PREINCREMENT_EXPR, "preincrement_expr", 'e', 2) DEFTREECODE (POSTDECREMENT_EXPR, "postdecrement_expr", 'e', 2) DEFTREECODE (POSTINCREMENT_EXPR, "postincrement_expr", 'e', 2) /* Used to implement `va_arg'. */ DEFTREECODE (VA_ARG_EXPR, "va_arg_expr", 'e', 1) /* Evaluate operand 1. If and only if an exception is thrown during the evaluation of operand 1, evaluate operand 2. This differs from TRY_FINALLY_EXPR in that operand 2 is not evaluated on a normal or jump exit, only on an exception. */ DEFTREECODE (TRY_CATCH_EXPR, "try_catch_expr", 's', 2) /* Evaluate the first operand. The second operand is a cleanup expression which is evaluated on any exit (normal, exception, or jump out) from this expression. */ DEFTREECODE (TRY_FINALLY_EXPR, "try_finally", 's', 2) /* These types of expressions have no useful value, and always have side effects. */ /* Used to represent a local declaration. The operand is DECL_EXPR_DECL. */ DEFTREECODE (DECL_EXPR, "decl_expr", 's', 1) /* A label definition, encapsulated as a statement. Operand 0 is the LABEL_DECL node for the label that appears here. The type should be void and the value should be ignored. */ DEFTREECODE (LABEL_EXPR, "label_expr", 's', 1) /* GOTO. Operand 0 is a LABEL_DECL node or an expression. The type should be void and the value should be ignored. */ DEFTREECODE (GOTO_EXPR, "goto_expr", 's', 1) /* Used internally for cleanups in the implementation of TRY_FINALLY_EXPR. (Specifically, it is created by expand_expr, not front-ends.) Operand 0 is the rtx for the start of the subroutine we need to call. Operand 1 is the rtx for a variable in which to store the address of where the subroutine should return to. */ DEFTREECODE (GOTO_SUBROUTINE_EXPR, "goto_subroutine", 's', 2) /* RETURN. Evaluates operand 0, then returns from the current function. Presumably that operand is an assignment that stores into the RESULT_DECL that hold the value to be returned. The operand may be null. The type should be void and the value should be ignored. */ DEFTREECODE (RETURN_EXPR, "return_expr", 's', 1) /* Exit the inner most loop conditionally. Operand 0 is the condition. The type should be void and the value should be ignored. */ DEFTREECODE (EXIT_EXPR, "exit_expr", 's', 1) /* A loop. Operand 0 is the body of the loop. It must contain an EXIT_EXPR or is an infinite loop. The type should be void and the value should be ignored. */ DEFTREECODE (LOOP_EXPR, "loop_expr", 's', 1) /* Exit a labeled block, possibly returning a value. Operand 0 is a LABELED_BLOCK_EXPR to exit. Operand 1 is the value to return. It may be left null. */ DEFTREECODE (EXIT_BLOCK_EXPR, "exit_block_expr", 's', 2) /* Switch expression. TREE_TYPE is the original type of the condition, before any language required type conversions. It may be NULL, in which case the original type and final types are assumed to be the same. Operand 0 is the expression used to perform the branch, Operand 1 is the body of the switch, which probably contains CASE_LABEL_EXPRs. It may also be NULL, in which case operand 2 must not be NULL. Operand 2 is either NULL_TREE or a TREE_VEC of the CASE_LABEL_EXPRs of all the cases. */ DEFTREECODE (SWITCH_EXPR, "switch_expr", 's', 3) /* Used to represent a case label. The operands are CASE_LOW and CASE_HIGH, respectively. If CASE_LOW is NULL_TREE, the label is a 'default' label. If CASE_HIGH is NULL_TREE, the label is a normal case label. CASE_LABEL is the corresponding LABEL_DECL. */ DEFTREECODE (CASE_LABEL_EXPR, "case_label_expr", 's', 3) /* RESX. Resume execution after an exception. Operand 0 is a number indicating the exception region that is being left. */ DEFTREECODE (RESX_EXPR, "resx_expr", 's', 1) /* Used to represent an inline assembly statement. ASM_STRING returns a STRING_CST for the instruction (e.g., "mov x, y"). ASM_OUTPUTS, ASM_INPUTS, and ASM_CLOBBERS represent the outputs, inputs, and clobbers for the statement. */ DEFTREECODE (ASM_EXPR, "asm_expr", 's', 4) /* Variable references for SSA analysis. New SSA names are created every time a variable is assigned a new value. The SSA builder uses SSA_NAME nodes to implement SSA versioning. */ DEFTREECODE (SSA_NAME, "ssa_name", 'x', 0) /* SSA PHI operator. PHI_RESULT is the new SSA_NAME node created by the PHI node. PHI_ARG_LENGTH is the number of arguments. PHI_ARG_ELT returns the Ith tuple from the argument list. Each tuple contains the incoming reaching definition (SSA_NAME node) and the edge via which that definition is coming through. */ DEFTREECODE (PHI_NODE, "phi_node", 'x', 0) /* Used to represent a typed exception handler. CATCH_TYPES is the type (or list of types) handled, and CATCH_BODY is the code for the handler. */ DEFTREECODE (CATCH_EXPR, "catch_expr", 's', 2) /* Used to represent an exception specification. EH_FILTER_TYPES is a list of allowed types, and EH_FILTER_FAILURE is an expression to evaluate on failure. EH_FILTER_MUST_NOT_THROW controls which range type to use when expanding. */ DEFTREECODE (EH_FILTER_EXPR, "eh_filter_expr", 's', 2) /* Node used for describing a property that is known at compile time. */ DEFTREECODE (SCEV_KNOWN, "scev_known", 'e', 0) /* Node used for describing a property that is not known at compile time. */ DEFTREECODE (SCEV_NOT_KNOWN, "scev_not_known", 'e', 0) /* Polynomial chains of recurrences. Under the form: cr = {CHREC_LEFT (cr), +, CHREC_RIGHT (cr)}. */ DEFTREECODE (POLYNOMIAL_CHREC, "polynomial_chrec", 'e', 3) /* Used to chain children of container statements together. Use the interface in tree-iterator.h to access this node. */ DEFTREECODE (STATEMENT_LIST, "statement_list", 'x', 0) /* Value handles. Artificial nodes to represent expressions in partial redundancy elimination (tree-ssa-pre.c). These nodes are used for expression canonicalization. If two expressions compute the same value, they will be assigned the same value handle. */ DEFTREECODE (VALUE_HANDLE, "value_handle", 'x', 0) /* Base class information. Holds information about a class as a baseclass of itself or another class. */ DEFTREECODE (TREE_BINFO, "tree_binfo", 'x', 0) /* Local variables: mode:c End: */ 0, /* This file contains the definitions and documentation for the additional tree codes used in the GNU C++ compiler (see tree.def for the standard codes). Copyright (C) 1987, 1988, 1990, 1993, 1997, 1998, 1999, 2000, 2001, 2004 Free Software Foundation, Inc. Written by Benjamin Chelf This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Tree nodes relevant to both C and C++. These were originally in cp-tree.def in the cp subdir. */ DEFTREECODE (SIZEOF_EXPR, "sizeof_expr", '1', 1) DEFTREECODE (ARROW_EXPR, "arrow_expr", 'e', 1) DEFTREECODE (ALIGNOF_EXPR, "alignof_expr", '1', 1) /* Used to represent an expression statement. Use `EXPR_STMT_EXPR' to obtain the expression. */ DEFTREECODE (EXPR_STMT, "expr_stmt", 'e', 1) /* Used to represent a `for' statement. The operands are FOR_INIT_STMT, FOR_COND, FOR_EXPR, and FOR_BODY, respectively. */ DEFTREECODE (FOR_STMT, "for_stmt", 'e', 4) /* Used to represent a 'while' statement. The operands are WHILE_COND and WHILE_BODY, respectively. */ DEFTREECODE (WHILE_STMT, "while_stmt", 'e', 2) /* Used to represent a 'do' statement. The operands are DO_BODY and DO_COND, respectively. */ DEFTREECODE (DO_STMT, "do_stmt", 'e', 2) /* Used to represent a 'break' statement. */ DEFTREECODE (BREAK_STMT, "break_stmt", 'e', 0) /* Used to represent a 'continue' statement. */ DEFTREECODE (CONTINUE_STMT, "continue_stmt", 'e', 0) /* Used to represent a 'switch' statement. The operands are SWITCH_COND, SWITCH_BODY and SWITCH_TYPE, respectively. */ DEFTREECODE (SWITCH_STMT, "switch_stmt", 'e', 3) /* A STMT_EXPR represents a statement-expression. The STMT_EXPR_STMT is the statement given by the expression. */ DEFTREECODE (STMT_EXPR, "stmt_expr", 'e', 1) /* A COMPOUND_LITERAL_EXPR represents a C99 compound literal. The COMPOUND_LITERAL_EXPR_DECL_STMT is the a DECL_STMT containing the decl for the anonymous object represented by the COMPOUND_LITERAL; the DECL_INITIAL of that decl is the CONSTRUCTOR that initializes the compound literal. */ DEFTREECODE (COMPOUND_LITERAL_EXPR, "compound_literal_expr", 'e', 1) /* Local variables: mode:c End: */ }; #undef DEFTREECODE /* Names of tree components. Used for printing out the tree and error messages. */ #define DEFTREECODE(SYM, NAME, TYPE, LEN) NAME, const char *const tree_code_name[] = { /* This file contains the definitions and documentation for the tree codes used in GCC. Copyright (C) 1987, 1988, 1993, 1995, 1997, 1998, 2000, 2001, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* The third argument can be: 'x' for an exceptional code (fits no category). 't' for a type object code. 'c' for codes for constants. 'd' for codes for declarations (also serving as variable refs). 'r' for codes for references to storage. '<' for codes for comparison expressions. '1' for codes for unary arithmetic expressions. '2' for codes for binary arithmetic expressions. 's' for codes for "statement" expressions, which have side-effects, but usually no interesting value. 'e' for codes for other kinds of expressions. */ /* For `r', `e', `<', `1', `2', and `s' nodes, which use struct tree_exp, the 4th element is the number of argument slots to allocate. This determines the size of the tree node object. Other nodes use different structures, and the size is determined by the tree_union member structure; the 4th element should be zero. Languages that define language-specific 'x' or 'c' codes must define the tree_size langhook to say how big they are. */ /* Any erroneous construct is parsed into a node of this type. This type of node is accepted without complaint in all contexts by later parsing activities, to avoid multiple error messages for one error. No fields in these nodes are used except the TREE_CODE. */ DEFTREECODE (ERROR_MARK, "error_mark", 'x', 0) /* Used to represent a name (such as, in the DECL_NAME of a decl node). Internally it looks like a STRING_CST node. There is only one IDENTIFIER_NODE ever made for any particular name. Use `get_identifier' to get it (or create it, the first time). */ DEFTREECODE (IDENTIFIER_NODE, "identifier_node", 'x', 0) /* Has the TREE_VALUE and TREE_PURPOSE fields. */ /* These nodes are made into lists by chaining through the TREE_CHAIN field. The elements of the list live in the TREE_VALUE fields, while TREE_PURPOSE fields are occasionally used as well to get the effect of Lisp association lists. */ DEFTREECODE (TREE_LIST, "tree_list", 'x', 0) /* These nodes contain an array of tree nodes. */ DEFTREECODE (TREE_VEC, "tree_vec", 'x', 0) /* A symbol binding block. These are arranged in a tree, where the BLOCK_SUBBLOCKS field contains a chain of subblocks chained through the BLOCK_CHAIN field. BLOCK_SUPERCONTEXT points to the parent block. For a block which represents the outermost scope of a function, it points to the FUNCTION_DECL node. BLOCK_VARS points to a chain of decl nodes. BLOCK_TYPE_TAGS points to a chain of types which have their own names. BLOCK_CHAIN points to the next BLOCK at the same level. BLOCK_ABSTRACT_ORIGIN points to the original (abstract) tree node which this block is an instance of, or else is NULL to indicate that this block is not an instance of anything else. When non-NULL, the value could either point to another BLOCK node or it could point to a FUNCTION_DECL node (e.g. in the case of a block representing the outermost scope of a particular inlining of a function). BLOCK_ABSTRACT is nonzero if the block represents an abstract instance of a block (i.e. one which is nested within an abstract instance of an inline function). TREE_ASM_WRITTEN is nonzero if the block was actually referenced in the generated assembly. */ DEFTREECODE (BLOCK, "block", 'x', 0) /* Each data type is represented by a tree node whose code is one of the following: */ /* Each node that represents a data type has a component TYPE_SIZE containing a tree that is an expression for the size in bits. The TYPE_MODE contains the machine mode for values of this type. The TYPE_POINTER_TO field contains a type for a pointer to this type, or zero if no such has been created yet. The TYPE_NEXT_VARIANT field is used to chain together types that are variants made by type modifiers such as "const" and "volatile". The TYPE_MAIN_VARIANT field, in any member of such a chain, points to the start of the chain. The TYPE_NONCOPIED_PARTS field is a list specifying which parts of an object of this type should *not* be copied by assignment. The TREE_VALUE of each is a FIELD_DECL that should not be copied. The TREE_PURPOSE is an initial value for that field when an object of this type is initialized via an INIT_EXPR. It may be NULL if no special value is required. Even the things in this list are copied if the right-hand side of an assignment is known to be a complete object (rather than being, perhaps, a subobject of some other object.) The determination of what constitutes a complete object is done by fixed_type_p. The TYPE_NAME field contains info on the name used in the program for this type (for GDB symbol table output). It is either a TYPE_DECL node, for types that are typedefs, or an IDENTIFIER_NODE in the case of structs, unions or enums that are known with a tag, or zero for types that have no special name. The TYPE_CONTEXT for any sort of type which could have a name or which could have named members (e.g. tagged types in C/C++) will point to the node which represents the scope of the given type, or will be NULL_TREE if the type has "file scope". For most types, this will point to a BLOCK node or a FUNCTION_DECL node, but it could also point to a FUNCTION_TYPE node (for types whose scope is limited to the formal parameter list of some function type specification) or it could point to a RECORD_TYPE, UNION_TYPE or QUAL_UNION_TYPE node (for C++ "member" types). For non-tagged-types, TYPE_CONTEXT need not be set to anything in particular, since any type which is of some type category (e.g. an array type or a function type) which cannot either have a name itself or have named members doesn't really have a "scope" per se. The TREE_CHAIN field is used as a forward-references to names for ENUMERAL_TYPE, RECORD_TYPE, UNION_TYPE, and QUAL_UNION_TYPE nodes; see below. */ DEFTREECODE (VOID_TYPE, "void_type", 't', 0) /* The void type in C */ /* Integer types in all languages, including char in C. Also used for sub-ranges of other discrete types. Has components TYPE_MIN_VALUE, TYPE_MAX_VALUE (expressions, inclusive) and TYPE_PRECISION (number of bits used by this type). In the case of a subrange type in Pascal, the TREE_TYPE of this will point at the supertype (another INTEGER_TYPE, or an ENUMERAL_TYPE, CHAR_TYPE, or BOOLEAN_TYPE). Otherwise, the TREE_TYPE is zero. */ DEFTREECODE (INTEGER_TYPE, "integer_type", 't', 0) /* C's float and double. Different floating types are distinguished by machine mode and by the TYPE_SIZE and the TYPE_PRECISION. */ DEFTREECODE (REAL_TYPE, "real_type", 't', 0) /* Complex number types. The TREE_TYPE field is the data type of the real and imaginary parts. */ DEFTREECODE (COMPLEX_TYPE, "complex_type", 't', 0) /* Vector types. The TREE_TYPE field is the data type of the vector elements. */ DEFTREECODE (VECTOR_TYPE, "vector_type", 't', 0) /* C enums. The type node looks just like an INTEGER_TYPE node. The symbols for the values of the enum type are defined by CONST_DECL nodes, but the type does not point to them; however, the TYPE_VALUES is a list in which each element's TREE_PURPOSE is a name and the TREE_VALUE is the value (an INTEGER_CST node). */ /* A forward reference `enum foo' when no enum named foo is defined yet has zero (a null pointer) in its TYPE_SIZE. The tag name is in the TYPE_NAME field. If the type is later defined, the normal fields are filled in. RECORD_TYPE, UNION_TYPE, and QUAL_UNION_TYPE forward refs are treated similarly. */ DEFTREECODE (ENUMERAL_TYPE, "enumeral_type", 't', 0) /* Pascal's boolean type (true or false are the only values); no special fields needed. */ DEFTREECODE (BOOLEAN_TYPE, "boolean_type", 't', 0) /* CHAR in Pascal; not used in C. No special fields needed. */ DEFTREECODE (CHAR_TYPE, "char_type", 't', 0) /* All pointer-to-x types have code POINTER_TYPE. The TREE_TYPE points to the node for the type pointed to. */ DEFTREECODE (POINTER_TYPE, "pointer_type", 't', 0) /* An offset is a pointer relative to an object. The TREE_TYPE field is the type of the object at the offset. The TYPE_OFFSET_BASETYPE points to the node for the type of object that the offset is relative to. */ DEFTREECODE (OFFSET_TYPE, "offset_type", 't', 0) /* A reference is like a pointer except that it is coerced automatically to the value it points to. Used in C++. */ DEFTREECODE (REFERENCE_TYPE, "reference_type", 't', 0) /* METHOD_TYPE is the type of a function which takes an extra first argument for "self", which is not present in the declared argument list. The TREE_TYPE is the return type of the method. The TYPE_METHOD_BASETYPE is the type of "self". TYPE_ARG_TYPES is the real argument list, which includes the hidden argument for "self". */ DEFTREECODE (METHOD_TYPE, "method_type", 't', 0) /* Used for Pascal; details not determined right now. */ DEFTREECODE (FILE_TYPE, "file_type", 't', 0) /* Types of arrays. Special fields: TREE_TYPE Type of an array element. TYPE_DOMAIN Type to index by. Its range of values specifies the array length. The field TYPE_POINTER_TO (TREE_TYPE (array_type)) is always nonzero and holds the type to coerce a value of that array type to in C. TYPE_STRING_FLAG indicates a string (in contrast to an array of chars) in languages (such as Chill) that make a distinction. */ /* Array types in C or Pascal */ DEFTREECODE (ARRAY_TYPE, "array_type", 't', 0) /* Types of sets for Pascal. Special fields are the same as in an array type. The target type is always a boolean type. Used for both bitstrings and powersets in Chill; TYPE_STRING_FLAG indicates a bitstring. */ DEFTREECODE (SET_TYPE, "set_type", 't', 0) /* Struct in C, or record in Pascal. */ /* Special fields: TYPE_FIELDS chain of FIELD_DECLs for the fields of the struct, and VAR_DECLs, TYPE_DECLs and CONST_DECLs for record-scope variables, types and enumerators. A few may need to be added for Pascal. */ /* See the comment above, before ENUMERAL_TYPE, for how forward references to struct tags are handled in C. */ DEFTREECODE (RECORD_TYPE, "record_type", 't', 0) /* Union in C. Like a struct, except that the offsets of the fields will all be zero. */ /* See the comment above, before ENUMERAL_TYPE, for how forward references to union tags are handled in C. */ DEFTREECODE (UNION_TYPE, "union_type", 't', 0) /* C union type */ /* Similar to UNION_TYPE, except that the expressions in DECL_QUALIFIER in each FIELD_DECL determine what the union contains. The first field whose DECL_QUALIFIER expression is true is deemed to occupy the union. */ DEFTREECODE (QUAL_UNION_TYPE, "qual_union_type", 't', 0) /* Type of functions. Special fields: TREE_TYPE type of value returned. TYPE_ARG_TYPES list of types of arguments expected. this list is made of TREE_LIST nodes. Types of "Procedures" in languages where they are different from functions have code FUNCTION_TYPE also, but then TREE_TYPE is zero or void type. */ DEFTREECODE (FUNCTION_TYPE, "function_type", 't', 0) /* This is a language-specific kind of type. Its meaning is defined by the language front end. layout_type does not know how to lay this out, so the front-end must do so manually. */ DEFTREECODE (LANG_TYPE, "lang_type", 't', 0) /* Expressions */ /* First, the constants. */ /* Contents are in TREE_INT_CST_LOW and TREE_INT_CST_HIGH fields, 32 bits each, giving us a 64 bit constant capability. Note: constants of type char in Pascal are INTEGER_CST, and so are pointer constants such as nil in Pascal or NULL in C. `(int *) 1' in C also results in an INTEGER_CST. */ DEFTREECODE (INTEGER_CST, "integer_cst", 'c', 0) /* Contents are in TREE_REAL_CST field. */ DEFTREECODE (REAL_CST, "real_cst", 'c', 0) /* Contents are in TREE_REALPART and TREE_IMAGPART fields, whose contents are other constant nodes. */ DEFTREECODE (COMPLEX_CST, "complex_cst", 'c', 0) /* Contents are in TREE_VECTOR_CST_ELTS field. */ DEFTREECODE (VECTOR_CST, "vector_cst", 'c', 0) /* Contents are TREE_STRING_LENGTH and TREE_STRING_POINTER fields. */ DEFTREECODE (STRING_CST, "string_cst", 'c', 0) /* Declarations. All references to names are represented as ..._DECL nodes. The decls in one binding context are chained through the TREE_CHAIN field. Each DECL has a DECL_NAME field which contains an IDENTIFIER_NODE. (Some decls, most often labels, may have zero as the DECL_NAME). DECL_CONTEXT points to the node representing the context in which this declaration has its scope. For FIELD_DECLs, this is the RECORD_TYPE, UNION_TYPE, or QUAL_UNION_TYPE node that the field is a member of. For VAR_DECL, PARM_DECL, FUNCTION_DECL, LABEL_DECL, and CONST_DECL nodes, this points to either the FUNCTION_DECL for the containing function, the RECORD_TYPE or UNION_TYPE for the containing type, or NULL_TREE or a TRANSLATION_UNIT_DECL if the given decl has "file scope". DECL_ABSTRACT_ORIGIN, if non-NULL, points to the original (abstract) ..._DECL node of which this decl is an (inlined or template expanded) instance. The TREE_TYPE field holds the data type of the object, when relevant. LABEL_DECLs have no data type. For TYPE_DECL, the TREE_TYPE field contents are the type whose name is being declared. The DECL_ALIGN, DECL_SIZE, and DECL_MODE fields exist in decl nodes just as in type nodes. They are unused in LABEL_DECL, TYPE_DECL and CONST_DECL nodes. DECL_FIELD_BIT_OFFSET holds an integer number of bits offset for the location. DECL_VOFFSET holds an expression for a variable offset; it is to be multiplied by DECL_VOFFSET_UNIT (an integer). These fields are relevant only in FIELD_DECLs and PARM_DECLs. DECL_INITIAL holds the value to initialize a variable to, or the value of a constant. For a function, it holds the body (a node of type BLOCK representing the function's binding contour and whose body contains the function's statements.) For a LABEL_DECL in C, it is a flag, nonzero if the label's definition has been seen. PARM_DECLs use a special field: DECL_ARG_TYPE is the type in which the argument is actually passed, which may be different from its type within the function. FUNCTION_DECLs use four special fields: DECL_ARGUMENTS holds a chain of PARM_DECL nodes for the arguments. DECL_RESULT holds a RESULT_DECL node for the value of a function, or it is 0 for a function that returns no value. (C functions returning void have zero here.) The TREE_TYPE field is the type in which the result is actually returned. This is usually the same as the return type of the FUNCTION_DECL, but it may be a wider integer type because of promotion. DECL_FUNCTION_CODE is a code number that is nonzero for built-in functions. Its value is an enum built_in_function that says which built-in function it is. DECL_SOURCE_FILE holds a filename string and DECL_SOURCE_LINE holds a line number. In some cases these can be the location of a reference, if no definition has been seen. DECL_ABSTRACT is nonzero if the decl represents an abstract instance of a decl (i.e. one which is nested within an abstract instance of a inline function. */ DEFTREECODE (FUNCTION_DECL, "function_decl", 'd', 0) DEFTREECODE (LABEL_DECL, "label_decl", 'd', 0) DEFTREECODE (CONST_DECL, "const_decl", 'd', 0) DEFTREECODE (TYPE_DECL, "type_decl", 'd', 0) DEFTREECODE (VAR_DECL, "var_decl", 'd', 0) DEFTREECODE (PARM_DECL, "parm_decl", 'd', 0) DEFTREECODE (RESULT_DECL, "result_decl", 'd', 0) DEFTREECODE (FIELD_DECL, "field_decl", 'd', 0) /* A namespace declaration. Namespaces appear in DECL_CONTEXT of other _DECLs, providing a hierarchy of names. */ DEFTREECODE (NAMESPACE_DECL, "namespace_decl", 'd', 0) /* A translation unit. This is not technically a declaration, since it can't be looked up, but it's close enough. */ DEFTREECODE (TRANSLATION_UNIT_DECL, "translation_unit_decl", 'd', 0) /* References to storage. */ /* Value is structure or union component. Operand 0 is the structure or union (an expression). Operand 1 is the field (a node of type FIELD_DECL). Operand 2, if present, is the value of DECL_FIELD_OFFSET, measured in units of DECL_OFFSET_ALIGN / BITS_PER_UNIT. */ DEFTREECODE (COMPONENT_REF, "component_ref", 'r', 3) /* Reference to a group of bits within an object. Similar to COMPONENT_REF except the position is given explicitly rather than via a FIELD_DECL. Operand 0 is the structure or union expression; operand 1 is a tree giving the number of bits being referenced; operand 2 is a tree giving the position of the first referenced bit. The field can be either a signed or unsigned field; BIT_FIELD_REF_UNSIGNED says which. */ DEFTREECODE (BIT_FIELD_REF, "bit_field_ref", 'r', 3) /* C unary `*' or Pascal `^'. One operand, an expression for a pointer. */ DEFTREECODE (INDIRECT_REF, "indirect_ref", 'r', 1) /* Pascal `^` on a file. One operand, an expression for the file. */ DEFTREECODE (BUFFER_REF, "buffer_ref", 'r', 1) /* Array indexing. Operand 0 is the array; operand 1 is a (single) array index. Operand 2, if present, is a copy of TYPE_MIN_VALUE of the index. Operand 3, if present, is the element size, measured in units of the alignment of the element type. */ DEFTREECODE (ARRAY_REF, "array_ref", 'r', 4) /* Likewise, except that the result is a range ("slice") of the array. The starting index of the resulting array is taken from operand 1 and the size of the range is taken from the type of the expression. */ DEFTREECODE (ARRAY_RANGE_REF, "array_range_ref", 'r', 4) /* Used to represent lookup of runtime type dependent data. Often this is a reference to a vtable, but it needn't be. Operands are: OBJ_TYPE_REF_EXPR: An expression that evaluates the value to use. OBJ_TYPE_REF_OBJECT: Is the object on whose behalf the lookup is being performed. Through this the optimizers may be able to statically determine the dynamic type of the object. OBJ_TYPE_REF_TOKEN: Something front-end specific used to resolve the reference to something simpler, usually to the address of a DECL. Never touched by the middle-end. Good choices would be either an identifier or a vtable index. */ DEFTREECODE (OBJ_TYPE_REF, "obj_type_ref", 'e', 3) /* The exception object from the runtime. */ DEFTREECODE (EXC_PTR_EXPR, "exc_ptr_expr", 'e', 0) /* The filter object from the runtime. */ DEFTREECODE (FILTER_EXPR, "filter_expr", 'e', 0) /* Constructor: return an aggregate value made from specified components. In C, this is used only for structure and array initializers. Also used for SET_TYPE in Chill (and potentially Pascal). The operand is a list of component values made out of a chain of TREE_LIST nodes. For ARRAY_TYPE: The TREE_PURPOSE of each node is the corresponding index. If the TREE_PURPOSE is a RANGE_EXPR, it is a short-hand for many nodes, one for each index in the range. (If the corresponding TREE_VALUE has side-effects, they are evaluated once for each element. Wrap the value in a SAVE_EXPR if you want to evaluate side effects only once.) For RECORD_TYPE, UNION_TYPE, or QUAL_UNION_TYPE: The TREE_PURPOSE of each node is a FIELD_DECL. For SET_TYPE: The TREE_VALUE specifies a value (index) in the set that is true. If TREE_PURPOSE is non-NULL, it specifies the lower limit of a range of true values. Elements not listed are false (not in the set). */ DEFTREECODE (CONSTRUCTOR, "constructor", 'e', 1) /* The expression types are mostly straightforward, with the fourth argument of DEFTREECODE saying how many operands there are. Unless otherwise specified, the operands are expressions and the types of all the operands and the expression must all be the same. */ /* Contains two expressions to compute, one followed by the other. the first value is ignored. The second one's value is used. The type of the first expression need not agree with the other types. */ DEFTREECODE (COMPOUND_EXPR, "compound_expr", 'e', 2) /* Assignment expression. Operand 0 is the what to set; 1, the new value. */ DEFTREECODE (MODIFY_EXPR, "modify_expr", 'e', 2) /* Initialization expression. Operand 0 is the variable to initialize; Operand 1 is the initializer. */ DEFTREECODE (INIT_EXPR, "init_expr", 'e', 2) /* For TARGET_EXPR, operand 0 is the target of an initialization, operand 1 is the initializer for the target, which may be void if simply expanding it initializes the target. operand 2 is the cleanup for this node, if any. operand 3 is the saved initializer after this node has been expanded once; this is so we can re-expand the tree later. */ DEFTREECODE (TARGET_EXPR, "target_expr", 'e', 4) /* Conditional expression ( ... ? ... : ... in C). Operand 0 is the condition. Operand 1 is the then-value. Operand 2 is the else-value. Operand 0 may be of any type. Operand 1 must have the same type as the entire expression, unless it unconditionally throws an exception, in which case it should have VOID_TYPE. The same constraints apply to operand 2. */ DEFTREECODE (COND_EXPR, "cond_expr", 'e', 3) /* Declare local variables, including making RTL and allocating space. BIND_EXPR_VARS is a chain of VAR_DECL nodes for the variables. BIND_EXPR_BODY is the body, the expression to be computed using the variables. The value of operand 1 becomes that of the BIND_EXPR. BIND_EXPR_BLOCK is the BLOCK that corresponds to these bindings for debugging purposes. If this BIND_EXPR is actually expanded, that sets the TREE_USED flag in the BLOCK. The BIND_EXPR is not responsible for informing parsers about these variables. If the body is coming from the input file, then the code that creates the BIND_EXPR is also responsible for informing the parser of the variables. If the BIND_EXPR is ever expanded, its TREE_USED flag is set. This tells the code for debugging symbol tables not to ignore the BIND_EXPR. If the BIND_EXPR should be output for debugging but will not be expanded, set the TREE_USED flag by hand. In order for the BIND_EXPR to be known at all, the code that creates it must also install it as a subblock in the tree of BLOCK nodes for the function. */ DEFTREECODE (BIND_EXPR, "bind_expr", 'e', 3) /* A labeled block. Operand 0 is the label that will be generated to mark the end of the block. Operand 1 is the labeled block body. */ DEFTREECODE (LABELED_BLOCK_EXPR, "labeled_block_expr", 'e', 2) /* Function call. Operand 0 is the function. Operand 1 is the argument list, a list of expressions made out of a chain of TREE_LIST nodes. Operand 2 is the static chain argument, or NULL. */ DEFTREECODE (CALL_EXPR, "call_expr", 'e', 3) /* Specify a value to compute along with its corresponding cleanup. Operand 0 argument is an expression whose value needs a cleanup. Operand 1 is the cleanup expression for the object. Operand 2 is unused. The cleanup is executed by the first enclosing CLEANUP_POINT_EXPR, if it exists, otherwise it is the responsibility of the caller to manually call expand_start_target_temps/expand_end_target_temps, as needed. This differs from TRY_CATCH_EXPR in that operand 2 is always evaluated when an exception isn't thrown when cleanups are run. */ DEFTREECODE (WITH_CLEANUP_EXPR, "with_cleanup_expr", 'e', 3) /* Specify a cleanup point. Operand 0 is an expression that may have cleanups. If it does, those cleanups are executed after the expression is expanded. Note that if the expression is a reference to storage, it is forced out of memory before the cleanups are run. This is necessary to handle cases where the cleanups modify the storage referenced; in the expression 't.i', if 't' is a struct with an integer member 'i' and a cleanup which modifies 'i', the value of the expression depends on whether the cleanup is run before or after 't.i' is evaluated. When expand_expr is run on 't.i', it returns a MEM. This is not good enough; the value of 't.i' must be forced out of memory. As a consequence, the operand of a CLEANUP_POINT_EXPR must not have BLKmode, because it will not be forced out of memory. */ DEFTREECODE (CLEANUP_POINT_EXPR, "cleanup_point_expr", 'e', 1) /* The following two codes are used in languages that have types where some field in an object of the type contains a value that is used in the computation of another field's offset or size and/or the size of the type. The positions and/or sizes of fields can vary from object to object of the same type or even for one and the same object within its scope. Record types with discriminants in Ada or schema types in Pascal are examples of such types. This mechanism is also used to create "fat pointers" for unconstrained array types in Ada; the fat pointer is a structure one of whose fields is a pointer to the actual array type and the other field is a pointer to a template, which is a structure containing the bounds of the array. The bounds in the type pointed to by the first field in the fat pointer refer to the values in the template. When you wish to construct such a type you need "self-references" that allow you to reference the object having this type from the TYPE node, i.e. without having a variable instantiating this type. Such a "self-references" is done using a PLACEHOLDER_EXPR. This is a node that will later be replaced with the object being referenced. Its type is that of the object and selects which object to use from a chain of references (see below). No other slots are used in the PLACEHOLDER_EXPR. For example, if your type FOO is a RECORD_TYPE with a field BAR, and you need the value of .BAR to calculate TYPE_SIZE (FOO), just substitute above with a PLACEHOLDER_EXPR whose TREE_TYPE is FOO. Then construct your COMPONENT_REF with the PLACEHOLDER_EXPR as the first operand (which has the correct type). Later, when the size is needed in the program, the back-end will find this PLACEHOLDER_EXPR and generate code to calculate the actual size at run-time. In the following, we describe how this calculation is done. When we wish to evaluate a size or offset, we check whether it contains a PLACEHOLDER_EXPR. If it does, we call substitute_placeholder_in_expr passing both that tree and an expression within which the object may be found. The latter expression is the object itself in the simple case of an Ada record with discriminant, but it can be the array in the case of an unconstrained array. In the latter case, we need the fat pointer, because the bounds of the array can only be accessed from it. However, we rely here on the fact that the expression for the array contains the dereference of the fat pointer that obtained the array pointer. */ /* Denotes a record to later be substituted before evaluating this expression. The type of this expression is used to find the record to replace it. */ DEFTREECODE (PLACEHOLDER_EXPR, "placeholder_expr", 'x', 0) /* Simple arithmetic. */ DEFTREECODE (PLUS_EXPR, "plus_expr", '2', 2) DEFTREECODE (MINUS_EXPR, "minus_expr", '2', 2) DEFTREECODE (MULT_EXPR, "mult_expr", '2', 2) /* Division for integer result that rounds the quotient toward zero. */ DEFTREECODE (TRUNC_DIV_EXPR, "trunc_div_expr", '2', 2) /* Division for integer result that rounds the quotient toward infinity. */ DEFTREECODE (CEIL_DIV_EXPR, "ceil_div_expr", '2', 2) /* Division for integer result that rounds toward minus infinity. */ DEFTREECODE (FLOOR_DIV_EXPR, "floor_div_expr", '2', 2) /* Division for integer result that rounds toward nearest integer. */ DEFTREECODE (ROUND_DIV_EXPR, "round_div_expr", '2', 2) /* Four kinds of remainder that go with the four kinds of division. */ DEFTREECODE (TRUNC_MOD_EXPR, "trunc_mod_expr", '2', 2) DEFTREECODE (CEIL_MOD_EXPR, "ceil_mod_expr", '2', 2) DEFTREECODE (FLOOR_MOD_EXPR, "floor_mod_expr", '2', 2) DEFTREECODE (ROUND_MOD_EXPR, "round_mod_expr", '2', 2) /* Division for real result. */ DEFTREECODE (RDIV_EXPR, "rdiv_expr", '2', 2) /* Division which is not supposed to need rounding. Used for pointer subtraction in C. */ DEFTREECODE (EXACT_DIV_EXPR, "exact_div_expr", '2', 2) /* Conversion of real to fixed point: four ways to round, like the four ways to divide. CONVERT_EXPR can also be used to convert a real to an integer, and that is what is used in languages that do not have ways of specifying which of these is wanted. Maybe these are not needed. */ DEFTREECODE (FIX_TRUNC_EXPR, "fix_trunc_expr", '1', 1) DEFTREECODE (FIX_CEIL_EXPR, "fix_ceil_expr", '1', 1) DEFTREECODE (FIX_FLOOR_EXPR, "fix_floor_expr", '1', 1) DEFTREECODE (FIX_ROUND_EXPR, "fix_round_expr", '1', 1) /* Conversion of an integer to a real. */ DEFTREECODE (FLOAT_EXPR, "float_expr", '1', 1) /* Unary negation. */ DEFTREECODE (NEGATE_EXPR, "negate_expr", '1', 1) DEFTREECODE (MIN_EXPR, "min_expr", '2', 2) DEFTREECODE (MAX_EXPR, "max_expr", '2', 2) /* Represents the absolute value of the operand. An ABS_EXPR must have either an INTEGER_TYPE or a REAL_TYPE. The operand of the ABS_EXPR must have the same type. */ DEFTREECODE (ABS_EXPR, "abs_expr", '1', 1) /* Shift operations for shift and rotate. Shift means logical shift if done on an unsigned type, arithmetic shift if done on a signed type. The second operand is the number of bits to shift by; it need not be the same type as the first operand and result. Note that the result is undefined if the second operand is larger than the first operand's type size. */ DEFTREECODE (LSHIFT_EXPR, "lshift_expr", '2', 2) DEFTREECODE (RSHIFT_EXPR, "rshift_expr", '2', 2) DEFTREECODE (LROTATE_EXPR, "lrotate_expr", '2', 2) DEFTREECODE (RROTATE_EXPR, "rrotate_expr", '2', 2) /* Bitwise operations. Operands have same mode as result. */ DEFTREECODE (BIT_IOR_EXPR, "bit_ior_expr", '2', 2) DEFTREECODE (BIT_XOR_EXPR, "bit_xor_expr", '2', 2) DEFTREECODE (BIT_AND_EXPR, "bit_and_expr", '2', 2) DEFTREECODE (BIT_NOT_EXPR, "bit_not_expr", '1', 1) /* ANDIF and ORIF allow the second operand not to be computed if the value of the expression is determined from the first operand. AND, OR, and XOR always compute the second operand whether its value is needed or not (for side effects). The operand may have BOOLEAN_TYPE or INTEGER_TYPE. In either case, the argument will be either zero or one. For example, a TRUTH_NOT_EXPR will never have an INTEGER_TYPE VAR_DECL as its argument; instead, a NE_EXPR will be used to compare the VAR_DECL to zero, thereby obtaining a node with value zero or one. */ DEFTREECODE (TRUTH_ANDIF_EXPR, "truth_andif_expr", 'e', 2) DEFTREECODE (TRUTH_ORIF_EXPR, "truth_orif_expr", 'e', 2) DEFTREECODE (TRUTH_AND_EXPR, "truth_and_expr", 'e', 2) DEFTREECODE (TRUTH_OR_EXPR, "truth_or_expr", 'e', 2) DEFTREECODE (TRUTH_XOR_EXPR, "truth_xor_expr", 'e', 2) DEFTREECODE (TRUTH_NOT_EXPR, "truth_not_expr", 'e', 1) /* Relational operators. `EQ_EXPR' and `NE_EXPR' are allowed for any types. The others are allowed only for integer (or pointer or enumeral) or real types. In all cases the operands will have the same type, and the value is always the type used by the language for booleans. */ DEFTREECODE (LT_EXPR, "lt_expr", '<', 2) DEFTREECODE (LE_EXPR, "le_expr", '<', 2) DEFTREECODE (GT_EXPR, "gt_expr", '<', 2) DEFTREECODE (GE_EXPR, "ge_expr", '<', 2) DEFTREECODE (EQ_EXPR, "eq_expr", '<', 2) DEFTREECODE (NE_EXPR, "ne_expr", '<', 2) /* Additional relational operators for floating point unordered. */ DEFTREECODE (UNORDERED_EXPR, "unordered_expr", '<', 2) DEFTREECODE (ORDERED_EXPR, "ordered_expr", '<', 2) /* These are equivalent to unordered or ... */ DEFTREECODE (UNLT_EXPR, "unlt_expr", '<', 2) DEFTREECODE (UNLE_EXPR, "unle_expr", '<', 2) DEFTREECODE (UNGT_EXPR, "ungt_expr", '<', 2) DEFTREECODE (UNGE_EXPR, "unge_expr", '<', 2) DEFTREECODE (UNEQ_EXPR, "uneq_expr", '<', 2) /* This is the reverse of uneq_expr. */ DEFTREECODE (LTGT_EXPR, "ltgt_expr", '<', 2) /* Operations for Pascal sets. Not used now. */ DEFTREECODE (IN_EXPR, "in_expr", '2', 2) DEFTREECODE (SET_LE_EXPR, "set_le_expr", '<', 2) DEFTREECODE (CARD_EXPR, "card_expr", '1', 1) DEFTREECODE (RANGE_EXPR, "range_expr", '2', 2) /* Represents a conversion of type of a value. All conversions, including implicit ones, must be represented by CONVERT_EXPR or NOP_EXPR nodes. */ DEFTREECODE (CONVERT_EXPR, "convert_expr", '1', 1) /* Represents a conversion expected to require no code to be generated. */ DEFTREECODE (NOP_EXPR, "nop_expr", '1', 1) /* Value is same as argument, but guaranteed not an lvalue. */ DEFTREECODE (NON_LVALUE_EXPR, "non_lvalue_expr", '1', 1) /* Represents viewing something of one type as being of a second type. This corresponds to an "Unchecked Conversion" in Ada and roughly to the idiom *(type2 *)&X in C. The only operand is the value to be viewed as being of another type. It is undefined if the type of the input and of the expression have different sizes. This code may also be used within the LHS of a MODIFY_EXPR, in which case no actual data motion may occur. TREE_ADDRESSABLE will be set in this case and GCC must abort if it could not do the operation without generating insns. */ DEFTREECODE (VIEW_CONVERT_EXPR, "view_convert_expr", '1', 1) /* Represents something we computed once and will use multiple times. First operand is that expression. After it is evaluated once, it will be replaced by the temporary variable that holds the value. */ DEFTREECODE (SAVE_EXPR, "save_expr", 'e', 1) /* For a UNSAVE_EXPR, operand 0 is the value to unsave. By unsave, we mean that all _EXPRs such as TARGET_EXPRs, SAVE_EXPRs, CALL_EXPRs, that are protected from being evaluated more than once should be reset so that a new expand_expr call of this expr will cause those to be re-evaluated. This is useful when we want to reuse a tree in different places, but where we must re-expand. */ DEFTREECODE (UNSAVE_EXPR, "unsave_expr", 'e', 1) /* & in C. Value is the address at which the operand's value resides. Operand may have any mode. Result mode is Pmode. */ DEFTREECODE (ADDR_EXPR, "addr_expr", 'e', 1) /* Non-lvalue reference or pointer to an object. */ DEFTREECODE (REFERENCE_EXPR, "reference_expr", 'e', 1) /* Operand is a function constant; result is a function variable value of type EPmode. Used only for languages that need static chains. */ DEFTREECODE (ENTRY_VALUE_EXPR, "entry_value_expr", 'e', 1) /* Operand0 is a function constant; result is part N of a function descriptor of type ptr_mode. */ DEFTREECODE (FDESC_EXPR, "fdesc_expr", 'e', 2) /* Given two real or integer operands of the same type, returns a complex value of the corresponding complex type. */ DEFTREECODE (COMPLEX_EXPR, "complex_expr", '2', 2) /* Complex conjugate of operand. Used only on complex types. */ DEFTREECODE (CONJ_EXPR, "conj_expr", '1', 1) /* Used only on an operand of complex type, these return a value of the corresponding component type. */ DEFTREECODE (REALPART_EXPR, "realpart_expr", 'r', 1) DEFTREECODE (IMAGPART_EXPR, "imagpart_expr", 'r', 1) /* Nodes for ++ and -- in C. The second arg is how much to increment or decrement by. For a pointer, it would be the size of the object pointed to. */ DEFTREECODE (PREDECREMENT_EXPR, "predecrement_expr", 'e', 2) DEFTREECODE (PREINCREMENT_EXPR, "preincrement_expr", 'e', 2) DEFTREECODE (POSTDECREMENT_EXPR, "postdecrement_expr", 'e', 2) DEFTREECODE (POSTINCREMENT_EXPR, "postincrement_expr", 'e', 2) /* Used to implement `va_arg'. */ DEFTREECODE (VA_ARG_EXPR, "va_arg_expr", 'e', 1) /* Evaluate operand 1. If and only if an exception is thrown during the evaluation of operand 1, evaluate operand 2. This differs from TRY_FINALLY_EXPR in that operand 2 is not evaluated on a normal or jump exit, only on an exception. */ DEFTREECODE (TRY_CATCH_EXPR, "try_catch_expr", 's', 2) /* Evaluate the first operand. The second operand is a cleanup expression which is evaluated on any exit (normal, exception, or jump out) from this expression. */ DEFTREECODE (TRY_FINALLY_EXPR, "try_finally", 's', 2) /* These types of expressions have no useful value, and always have side effects. */ /* Used to represent a local declaration. The operand is DECL_EXPR_DECL. */ DEFTREECODE (DECL_EXPR, "decl_expr", 's', 1) /* A label definition, encapsulated as a statement. Operand 0 is the LABEL_DECL node for the label that appears here. The type should be void and the value should be ignored. */ DEFTREECODE (LABEL_EXPR, "label_expr", 's', 1) /* GOTO. Operand 0 is a LABEL_DECL node or an expression. The type should be void and the value should be ignored. */ DEFTREECODE (GOTO_EXPR, "goto_expr", 's', 1) /* Used internally for cleanups in the implementation of TRY_FINALLY_EXPR. (Specifically, it is created by expand_expr, not front-ends.) Operand 0 is the rtx for the start of the subroutine we need to call. Operand 1 is the rtx for a variable in which to store the address of where the subroutine should return to. */ DEFTREECODE (GOTO_SUBROUTINE_EXPR, "goto_subroutine", 's', 2) /* RETURN. Evaluates operand 0, then returns from the current function. Presumably that operand is an assignment that stores into the RESULT_DECL that hold the value to be returned. The operand may be null. The type should be void and the value should be ignored. */ DEFTREECODE (RETURN_EXPR, "return_expr", 's', 1) /* Exit the inner most loop conditionally. Operand 0 is the condition. The type should be void and the value should be ignored. */ DEFTREECODE (EXIT_EXPR, "exit_expr", 's', 1) /* A loop. Operand 0 is the body of the loop. It must contain an EXIT_EXPR or is an infinite loop. The type should be void and the value should be ignored. */ DEFTREECODE (LOOP_EXPR, "loop_expr", 's', 1) /* Exit a labeled block, possibly returning a value. Operand 0 is a LABELED_BLOCK_EXPR to exit. Operand 1 is the value to return. It may be left null. */ DEFTREECODE (EXIT_BLOCK_EXPR, "exit_block_expr", 's', 2) /* Switch expression. TREE_TYPE is the original type of the condition, before any language required type conversions. It may be NULL, in which case the original type and final types are assumed to be the same. Operand 0 is the expression used to perform the branch, Operand 1 is the body of the switch, which probably contains CASE_LABEL_EXPRs. It may also be NULL, in which case operand 2 must not be NULL. Operand 2 is either NULL_TREE or a TREE_VEC of the CASE_LABEL_EXPRs of all the cases. */ DEFTREECODE (SWITCH_EXPR, "switch_expr", 's', 3) /* Used to represent a case label. The operands are CASE_LOW and CASE_HIGH, respectively. If CASE_LOW is NULL_TREE, the label is a 'default' label. If CASE_HIGH is NULL_TREE, the label is a normal case label. CASE_LABEL is the corresponding LABEL_DECL. */ DEFTREECODE (CASE_LABEL_EXPR, "case_label_expr", 's', 3) /* RESX. Resume execution after an exception. Operand 0 is a number indicating the exception region that is being left. */ DEFTREECODE (RESX_EXPR, "resx_expr", 's', 1) /* Used to represent an inline assembly statement. ASM_STRING returns a STRING_CST for the instruction (e.g., "mov x, y"). ASM_OUTPUTS, ASM_INPUTS, and ASM_CLOBBERS represent the outputs, inputs, and clobbers for the statement. */ DEFTREECODE (ASM_EXPR, "asm_expr", 's', 4) /* Variable references for SSA analysis. New SSA names are created every time a variable is assigned a new value. The SSA builder uses SSA_NAME nodes to implement SSA versioning. */ DEFTREECODE (SSA_NAME, "ssa_name", 'x', 0) /* SSA PHI operator. PHI_RESULT is the new SSA_NAME node created by the PHI node. PHI_ARG_LENGTH is the number of arguments. PHI_ARG_ELT returns the Ith tuple from the argument list. Each tuple contains the incoming reaching definition (SSA_NAME node) and the edge via which that definition is coming through. */ DEFTREECODE (PHI_NODE, "phi_node", 'x', 0) /* Used to represent a typed exception handler. CATCH_TYPES is the type (or list of types) handled, and CATCH_BODY is the code for the handler. */ DEFTREECODE (CATCH_EXPR, "catch_expr", 's', 2) /* Used to represent an exception specification. EH_FILTER_TYPES is a list of allowed types, and EH_FILTER_FAILURE is an expression to evaluate on failure. EH_FILTER_MUST_NOT_THROW controls which range type to use when expanding. */ DEFTREECODE (EH_FILTER_EXPR, "eh_filter_expr", 's', 2) /* Node used for describing a property that is known at compile time. */ DEFTREECODE (SCEV_KNOWN, "scev_known", 'e', 0) /* Node used for describing a property that is not known at compile time. */ DEFTREECODE (SCEV_NOT_KNOWN, "scev_not_known", 'e', 0) /* Polynomial chains of recurrences. Under the form: cr = {CHREC_LEFT (cr), +, CHREC_RIGHT (cr)}. */ DEFTREECODE (POLYNOMIAL_CHREC, "polynomial_chrec", 'e', 3) /* Used to chain children of container statements together. Use the interface in tree-iterator.h to access this node. */ DEFTREECODE (STATEMENT_LIST, "statement_list", 'x', 0) /* Value handles. Artificial nodes to represent expressions in partial redundancy elimination (tree-ssa-pre.c). These nodes are used for expression canonicalization. If two expressions compute the same value, they will be assigned the same value handle. */ DEFTREECODE (VALUE_HANDLE, "value_handle", 'x', 0) /* Base class information. Holds information about a class as a baseclass of itself or another class. */ DEFTREECODE (TREE_BINFO, "tree_binfo", 'x', 0) /* Local variables: mode:c End: */ "@@dummy", /* This file contains the definitions and documentation for the additional tree codes used in the GNU C++ compiler (see tree.def for the standard codes). Copyright (C) 1987, 1988, 1990, 1993, 1997, 1998, 1999, 2000, 2001, 2004 Free Software Foundation, Inc. Written by Benjamin Chelf This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Tree nodes relevant to both C and C++. These were originally in cp-tree.def in the cp subdir. */ DEFTREECODE (SIZEOF_EXPR, "sizeof_expr", '1', 1) DEFTREECODE (ARROW_EXPR, "arrow_expr", 'e', 1) DEFTREECODE (ALIGNOF_EXPR, "alignof_expr", '1', 1) /* Used to represent an expression statement. Use `EXPR_STMT_EXPR' to obtain the expression. */ DEFTREECODE (EXPR_STMT, "expr_stmt", 'e', 1) /* Used to represent a `for' statement. The operands are FOR_INIT_STMT, FOR_COND, FOR_EXPR, and FOR_BODY, respectively. */ DEFTREECODE (FOR_STMT, "for_stmt", 'e', 4) /* Used to represent a 'while' statement. The operands are WHILE_COND and WHILE_BODY, respectively. */ DEFTREECODE (WHILE_STMT, "while_stmt", 'e', 2) /* Used to represent a 'do' statement. The operands are DO_BODY and DO_COND, respectively. */ DEFTREECODE (DO_STMT, "do_stmt", 'e', 2) /* Used to represent a 'break' statement. */ DEFTREECODE (BREAK_STMT, "break_stmt", 'e', 0) /* Used to represent a 'continue' statement. */ DEFTREECODE (CONTINUE_STMT, "continue_stmt", 'e', 0) /* Used to represent a 'switch' statement. The operands are SWITCH_COND, SWITCH_BODY and SWITCH_TYPE, respectively. */ DEFTREECODE (SWITCH_STMT, "switch_stmt", 'e', 3) /* A STMT_EXPR represents a statement-expression. The STMT_EXPR_STMT is the statement given by the expression. */ DEFTREECODE (STMT_EXPR, "stmt_expr", 'e', 1) /* A COMPOUND_LITERAL_EXPR represents a C99 compound literal. The COMPOUND_LITERAL_EXPR_DECL_STMT is the a DECL_STMT containing the decl for the anonymous object represented by the COMPOUND_LITERAL; the DECL_INITIAL of that decl is the CONSTRUCTOR that initializes the compound literal. */ DEFTREECODE (COMPOUND_LITERAL_EXPR, "compound_literal_expr", 'e', 1) /* Local variables: mode:c End: */ }; #undef DEFTREECODE void finish_file (void) { c_objc_common_finish_file (); } int c_types_compatible_p (tree x, tree y) { return comptypes (TYPE_MAIN_VARIANT (x), TYPE_MAIN_VARIANT (y)); } /* Type information for c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ extern const struct ggc_root_tab gt_ggc_r_gt_coverage_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_alias_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_cselib_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_cgraph_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_dbxout_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_dwarf2out_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_dwarf2asm_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_dojump_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_emit_rtl_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_except_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_explow_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_expr_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_fold_const_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_function_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_gcse_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_optabs_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_ra_build_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_regclass_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_reg_stack_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_cfglayout_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_sdbout_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_stor_layout_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_stringpool_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_tree_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_varasm_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_tree_mudflap_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_c_common_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_c_parse_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_tree_ssanames_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_tree_eh_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_tree_cfg_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_tree_ssa_ccp_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_gimplify_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_tree_alias_common_h[]; extern const struct ggc_root_tab gt_ggc_r_gtype_desc_c[]; extern const struct ggc_root_tab gt_ggc_r_gt_tree_ssa_operands_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_tree_nested_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_i386_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_c_decl_h[]; extern const struct ggc_root_tab gt_ggc_r_gt_c_pragma_h[]; const struct ggc_root_tab * const gt_ggc_rtab[] = { gt_ggc_r_gt_coverage_h, gt_ggc_r_gt_alias_h, gt_ggc_r_gt_cselib_h, gt_ggc_r_gt_cgraph_h, gt_ggc_r_gt_dbxout_h, gt_ggc_r_gt_dwarf2out_h, gt_ggc_r_gt_dwarf2asm_h, gt_ggc_r_gt_dojump_h, gt_ggc_r_gt_emit_rtl_h, gt_ggc_r_gt_except_h, gt_ggc_r_gt_explow_h, gt_ggc_r_gt_expr_h, gt_ggc_r_gt_fold_const_h, gt_ggc_r_gt_function_h, gt_ggc_r_gt_gcse_h, gt_ggc_r_gt_optabs_h, gt_ggc_r_gt_ra_build_h, gt_ggc_r_gt_regclass_h, gt_ggc_r_gt_reg_stack_h, gt_ggc_r_gt_cfglayout_h, gt_ggc_r_gt_sdbout_h, gt_ggc_r_gt_stor_layout_h, gt_ggc_r_gt_stringpool_h, gt_ggc_r_gt_tree_h, gt_ggc_r_gt_varasm_h, gt_ggc_r_gt_tree_mudflap_h, gt_ggc_r_gt_c_common_h, gt_ggc_r_gt_c_parse_h, gt_ggc_r_gt_tree_ssanames_h, gt_ggc_r_gt_tree_eh_h, gt_ggc_r_gt_tree_cfg_h, gt_ggc_r_gt_tree_ssa_ccp_h, gt_ggc_r_gt_gimplify_h, gt_ggc_r_gt_tree_alias_common_h, gt_ggc_r_gtype_desc_c, gt_ggc_r_gt_tree_ssa_operands_h, gt_ggc_r_gt_tree_nested_h, gt_ggc_r_gt_i386_h, gt_ggc_r_gt_c_decl_h, gt_ggc_r_gt_c_pragma_h, NULL }; extern const struct ggc_root_tab gt_ggc_rd_gt_alias_h[]; extern const struct ggc_root_tab gt_ggc_rd_gt_bitmap_h[]; extern const struct ggc_root_tab gt_ggc_rd_gt_emit_rtl_h[]; extern const struct ggc_root_tab gt_ggc_rd_gt_lists_h[]; extern const struct ggc_root_tab gt_ggc_rd_gt_tree_phinodes_h[]; extern const struct ggc_root_tab gt_ggc_rd_gt_tree_iterator_h[]; extern const struct ggc_root_tab gt_ggc_rd_gt_c_decl_h[]; const struct ggc_root_tab * const gt_ggc_deletable_rtab[] = { gt_ggc_rd_gt_alias_h, gt_ggc_rd_gt_bitmap_h, gt_ggc_rd_gt_emit_rtl_h, gt_ggc_rd_gt_lists_h, gt_ggc_rd_gt_tree_phinodes_h, gt_ggc_rd_gt_tree_iterator_h, gt_ggc_rd_gt_c_decl_h, NULL }; extern const struct ggc_cache_tab gt_ggc_rc_gt_emit_rtl_h[]; extern const struct ggc_cache_tab gt_ggc_rc_gt_fold_const_h[]; extern const struct ggc_cache_tab gt_ggc_rc_gt_tree_h[]; const struct ggc_cache_tab * const gt_ggc_cache_rtab[] = { gt_ggc_rc_gt_emit_rtl_h, gt_ggc_rc_gt_fold_const_h, gt_ggc_rc_gt_tree_h, NULL }; extern const struct ggc_root_tab gt_pch_rc_gt_emit_rtl_h[]; extern const struct ggc_root_tab gt_pch_rc_gt_fold_const_h[]; extern const struct ggc_root_tab gt_pch_rc_gt_tree_h[]; const struct ggc_root_tab * const gt_pch_cache_rtab[] = { gt_pch_rc_gt_emit_rtl_h, gt_pch_rc_gt_fold_const_h, gt_pch_rc_gt_tree_h, NULL }; extern const struct ggc_root_tab gt_pch_rs_gt_alias_h[]; extern const struct ggc_root_tab gt_pch_rs_gt_dbxout_h[]; extern const struct ggc_root_tab gt_pch_rs_gt_dwarf2out_h[]; extern const struct ggc_root_tab gt_pch_rs_gt_dwarf2asm_h[]; extern const struct ggc_root_tab gt_pch_rs_gt_emit_rtl_h[]; extern const struct ggc_root_tab gt_pch_rs_gt_except_h[]; extern const struct ggc_root_tab gt_pch_rs_gt_function_h[]; extern const struct ggc_root_tab gt_pch_rs_gt_sdbout_h[]; extern const struct ggc_root_tab gt_pch_rs_gt_tree_h[]; extern const struct ggc_root_tab gt_pch_rs_gt_varasm_h[]; extern const struct ggc_root_tab gt_pch_rs_gt_c_common_h[]; extern const struct ggc_root_tab gt_pch_rs_gt_gimplify_h[]; extern const struct ggc_root_tab gt_pch_rs_gtype_desc_c[]; extern const struct ggc_root_tab gt_pch_rs_gt_c_decl_h[]; const struct ggc_root_tab * const gt_pch_scalar_rtab[] = { gt_pch_rs_gt_alias_h, gt_pch_rs_gt_dbxout_h, gt_pch_rs_gt_dwarf2out_h, gt_pch_rs_gt_dwarf2asm_h, gt_pch_rs_gt_emit_rtl_h, gt_pch_rs_gt_except_h, gt_pch_rs_gt_function_h, gt_pch_rs_gt_sdbout_h, gt_pch_rs_gt_tree_h, gt_pch_rs_gt_varasm_h, gt_pch_rs_gt_c_common_h, gt_pch_rs_gt_gimplify_h, gt_pch_rs_gtype_desc_c, gt_pch_rs_gt_c_decl_h, NULL }; /* Stub functions for Objective-C and Objective-C++ routines that are called from within the C and C++ front-ends, respectively. Copyright (C) 1991, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ tree lookup_interface (tree arg ATTRIBUTE_UNUSED) { return 0; } tree is_class_name (tree arg ATTRIBUTE_UNUSED) { return 0; } tree objc_is_object_ptr (tree arg ATTRIBUTE_UNUSED) { return 0; } tree lookup_objc_ivar (tree arg ATTRIBUTE_UNUSED) { return 0; } void objc_check_decl (tree decl ATTRIBUTE_UNUSED) { } int objc_comptypes (tree lhs ATTRIBUTE_UNUSED, tree rhs ATTRIBUTE_UNUSED, int reflexive ATTRIBUTE_UNUSED) { return -1; } tree objc_message_selector (void) { return 0; } void objc_clear_super_receiver (void) { } int objc_is_public (tree expr ATTRIBUTE_UNUSED, tree identifier ATTRIBUTE_UNUSED) { return 1; } /* Functions dealing with attribute handling, used by most front ends. Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ static void init_attributes (void); /* Table of the tables of attributes (common, language, format, machine) searched. */ static const struct attribute_spec *attribute_tables[4]; static bool attributes_initialized = false; /* Default empty table of attributes. */ static const struct attribute_spec empty_attribute_table[] = { { NULL, 0, 0, false, false, false, NULL } }; /* Initialize attribute tables, and make some sanity checks if --enable-checking. */ static void init_attributes (void) { size_t i; attribute_tables[0] = lang_hooks.common_attribute_table; attribute_tables[1] = lang_hooks.attribute_table; attribute_tables[2] = lang_hooks.format_attribute_table; attribute_tables[3] = targetm.attribute_table; /* Translate NULL pointers to pointers to the empty table. */ for (i = 0; i < ARRAY_SIZE (attribute_tables); i++) if (attribute_tables[i] == NULL) attribute_tables[i] = empty_attribute_table; #ifdef ENABLE_CHECKING /* Make some sanity checks on the attribute tables. */ for (i = 0; i < ARRAY_SIZE (attribute_tables); i++) { int j; for (j = 0; attribute_tables[i][j].name != NULL; j++) { /* The name must not begin and end with __. */ const char *name = attribute_tables[i][j].name; int len = strlen (name); if (name[0] == '_' && name[1] == '_' && name[len - 1] == '_' && name[len - 2] == '_') abort (); /* The minimum and maximum lengths must be consistent. */ if (attribute_tables[i][j].min_length < 0) abort (); if (attribute_tables[i][j].max_length != -1 && (attribute_tables[i][j].max_length < attribute_tables[i][j].min_length)) abort (); /* An attribute cannot require both a DECL and a TYPE. */ if (attribute_tables[i][j].decl_required && attribute_tables[i][j].type_required) abort (); /* If an attribute requires a function type, in particular it requires a type. */ if (attribute_tables[i][j].function_type_required && !attribute_tables[i][j].type_required) abort (); } } /* Check that each name occurs just once in each table. */ for (i = 0; i < ARRAY_SIZE (attribute_tables); i++) { int j, k; for (j = 0; attribute_tables[i][j].name != NULL; j++) for (k = j + 1; attribute_tables[i][k].name != NULL; k++) if (!strcmp (attribute_tables[i][j].name, attribute_tables[i][k].name)) abort (); } /* Check that no name occurs in more than one table. */ for (i = 0; i < ARRAY_SIZE (attribute_tables); i++) { size_t j, k, l; for (j = i + 1; j < ARRAY_SIZE (attribute_tables); j++) for (k = 0; attribute_tables[i][k].name != NULL; k++) for (l = 0; attribute_tables[j][l].name != NULL; l++) if (!strcmp (attribute_tables[i][k].name, attribute_tables[j][l].name)) abort (); } #endif attributes_initialized = true; } /* Process the attributes listed in ATTRIBUTES and install them in *NODE, which is either a DECL (including a TYPE_DECL) or a TYPE. If a DECL, it should be modified in place; if a TYPE, a copy should be created unless ATTR_FLAG_TYPE_IN_PLACE is set in FLAGS. FLAGS gives further information, in the form of a bitwise OR of flags in enum attribute_flags from tree.h. Depending on these flags, some attributes may be returned to be applied at a later stage (for example, to apply a decl attribute to the declaration rather than to its type). */ tree decl_attributes (tree *node, tree attributes, int flags) { tree a; tree returned_attrs = NULL_TREE; if (!attributes_initialized) init_attributes (); targetm.insert_attributes (*node, &attributes); for (a = attributes; a; a = TREE_CHAIN (a)) { tree name = TREE_PURPOSE (a); tree args = TREE_VALUE (a); tree *anode = node; const struct attribute_spec *spec = NULL; bool no_add_attrs = 0; tree fn_ptr_tmp = NULL_TREE; size_t i; for (i = 0; i < ARRAY_SIZE (attribute_tables); i++) { int j; for (j = 0; attribute_tables[i][j].name != NULL; j++) { if (is_attribute_p (attribute_tables[i][j].name, name)) { spec = &attribute_tables[i][j]; break; } } if (spec != NULL) break; } if (spec == NULL) { warning ("`%s' attribute directive ignored", IDENTIFIER_POINTER (name)); continue; } else if (list_length (args) < spec->min_length || (spec->max_length >= 0 && list_length (args) > spec->max_length)) { error ("wrong number of arguments specified for `%s' attribute", IDENTIFIER_POINTER (name)); continue; } if (spec->decl_required && !DECL_P (*anode)) { if (flags & ((int) ATTR_FLAG_DECL_NEXT | (int) ATTR_FLAG_FUNCTION_NEXT | (int) ATTR_FLAG_ARRAY_NEXT)) { /* Pass on this attribute to be tried again. */ returned_attrs = tree_cons (name, args, returned_attrs); continue; } else { warning ("`%s' attribute does not apply to types", IDENTIFIER_POINTER (name)); continue; } } /* If we require a type, but were passed a decl, set up to make a new type and update the one in the decl. ATTR_FLAG_TYPE_IN_PLACE would have applied if we'd been passed a type, but we cannot modify the decl's type in place here. */ if (spec->type_required && DECL_P (*anode)) { anode = &TREE_TYPE (*anode); flags &= ~(int) ATTR_FLAG_TYPE_IN_PLACE; } if (spec->function_type_required && TREE_CODE (*anode) != FUNCTION_TYPE && TREE_CODE (*anode) != METHOD_TYPE) { if (TREE_CODE (*anode) == POINTER_TYPE && (TREE_CODE (TREE_TYPE (*anode)) == FUNCTION_TYPE || TREE_CODE (TREE_TYPE (*anode)) == METHOD_TYPE)) { /* OK, this is a bit convoluted. We can't just make a copy of the pointer type and modify its TREE_TYPE, because if we change the attributes of the target type the pointer type needs to have a different TYPE_MAIN_VARIANT. So we pull out the target type now, frob it as appropriate, and rebuild the pointer type later. This would all be simpler if attributes were part of the declarator, grumble grumble. */ fn_ptr_tmp = TREE_TYPE (*anode); anode = &fn_ptr_tmp; flags &= ~(int) ATTR_FLAG_TYPE_IN_PLACE; } else if (flags & (int) ATTR_FLAG_FUNCTION_NEXT) { /* Pass on this attribute to be tried again. */ returned_attrs = tree_cons (name, args, returned_attrs); continue; } if (TREE_CODE (*anode) != FUNCTION_TYPE && TREE_CODE (*anode) != METHOD_TYPE) { warning ("`%s' attribute only applies to function types", IDENTIFIER_POINTER (name)); continue; } } if (spec->handler != NULL) returned_attrs = chainon ((*spec->handler) (anode, name, args, flags, &no_add_attrs), returned_attrs); /* Layout the decl in case anything changed. */ if (spec->type_required && DECL_P (*node) && (TREE_CODE (*node) == VAR_DECL || TREE_CODE (*node) == PARM_DECL || TREE_CODE (*node) == RESULT_DECL)) { /* Force a recalculation of mode and size. */ DECL_MODE (*node) = VOIDmode; DECL_SIZE (*node) = 0; layout_decl (*node, 0); } if (!no_add_attrs) { tree old_attrs; tree a; if (DECL_P (*anode)) old_attrs = DECL_ATTRIBUTES (*anode); else old_attrs = TYPE_ATTRIBUTES (*anode); for (a = lookup_attribute (spec->name, old_attrs); a != NULL_TREE; a = lookup_attribute (spec->name, TREE_CHAIN (a))) { if (simple_cst_equal (TREE_VALUE (a), args) == 1) break; } if (a == NULL_TREE) { /* This attribute isn't already in the list. */ if (DECL_P (*anode)) DECL_ATTRIBUTES (*anode) = tree_cons (name, args, old_attrs); else if (flags & (int) ATTR_FLAG_TYPE_IN_PLACE) { TYPE_ATTRIBUTES (*anode) = tree_cons (name, args, old_attrs); /* If this is the main variant, also push the attributes out to the other variants. */ if (*anode == TYPE_MAIN_VARIANT (*anode)) { tree variant; for (variant = *anode; variant; variant = TYPE_NEXT_VARIANT (variant)) { if (TYPE_ATTRIBUTES (variant) == old_attrs) TYPE_ATTRIBUTES (variant) = TYPE_ATTRIBUTES (*anode); else if (!lookup_attribute (spec->name, TYPE_ATTRIBUTES (variant))) TYPE_ATTRIBUTES (variant) = tree_cons (name, args, TYPE_ATTRIBUTES (variant)); } } } else *anode = build_type_attribute_variant (*anode, tree_cons (name, args, old_attrs)); } } if (fn_ptr_tmp) { /* Rebuild the function pointer type and put it in the appropriate place. */ fn_ptr_tmp = build_pointer_type (fn_ptr_tmp); if (DECL_P (*node)) TREE_TYPE (*node) = fn_ptr_tmp; else if (TREE_CODE (*node) == POINTER_TYPE) *node = fn_ptr_tmp; else abort (); } } return returned_attrs; } /* Split SPECS_ATTRS, a list of declspecs and prefix attributes, into two lists. SPECS_ATTRS may also be just a typespec (eg: RECORD_TYPE). The head of the declspec list is stored in DECLSPECS. The head of the attribute list is stored in PREFIX_ATTRIBUTES. Note that attributes in SPECS_ATTRS are stored in the TREE_PURPOSE of the list elements. We drop the containing TREE_LIST nodes and link the resulting attributes together the way decl_attributes expects them. */ void split_specs_attrs (tree specs_attrs, tree *declspecs, tree *prefix_attributes) { tree t, s, a, next, specs, attrs; /* This can happen after an __extension__ in pedantic mode. */ if (specs_attrs != NULL_TREE && TREE_CODE (specs_attrs) == INTEGER_CST) { *declspecs = NULL_TREE; *prefix_attributes = NULL_TREE; return; } /* This can happen in c++ (eg: decl: typespec initdecls ';'). */ if (specs_attrs != NULL_TREE && TREE_CODE (specs_attrs) != TREE_LIST) { *declspecs = specs_attrs; *prefix_attributes = NULL_TREE; return; } /* Remember to keep the lists in the same order, element-wise. */ specs = s = NULL_TREE; attrs = a = NULL_TREE; for (t = specs_attrs; t; t = next) { next = TREE_CHAIN (t); /* Declspecs have a non-NULL TREE_VALUE. */ if (TREE_VALUE (t) != NULL_TREE) { if (specs == NULL_TREE) specs = s = t; else { TREE_CHAIN (s) = t; s = t; } } /* The TREE_PURPOSE may also be empty in the case of __attribute__(()). */ else if (TREE_PURPOSE (t) != NULL_TREE) { if (attrs == NULL_TREE) attrs = a = TREE_PURPOSE (t); else { TREE_CHAIN (a) = TREE_PURPOSE (t); a = TREE_PURPOSE (t); } /* More attrs can be linked here, move A to the end. */ while (TREE_CHAIN (a) != NULL_TREE) a = TREE_CHAIN (a); } } /* Terminate the lists. */ if (s != NULL_TREE) TREE_CHAIN (s) = NULL_TREE; if (a != NULL_TREE) TREE_CHAIN (a) = NULL_TREE; /* All done. */ *declspecs = specs; *prefix_attributes = attrs; } /* Strip attributes from SPECS_ATTRS, a list of declspecs and attributes. This function is used by the parser when a rule will accept attributes in a particular position, but we don't want to support that just yet. A warning is issued for every ignored attribute. */ tree strip_attrs (tree specs_attrs) { tree specs, attrs; split_specs_attrs (specs_attrs, &specs, &attrs); while (attrs) { warning ("`%s' attribute ignored", IDENTIFIER_POINTER (TREE_PURPOSE (attrs))); attrs = TREE_CHAIN (attrs); } return specs; } /* Various diagnostic subroutines for the GNU C language. Copyright (C) 2000, 2001, 2003 Free Software Foundation, Inc. Contributed by Gabriel Dos Reis This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Issue an ISO C99 pedantic warning MSGID. */ void pedwarn_c99 (const char *msgid, ...) { diagnostic_info diagnostic; va_list ap; va_start (ap, msgid); diagnostic_set_info (&diagnostic, msgid, &ap, input_location, flag_isoc99 ? pedantic_error_kind () : DK_WARNING); report_diagnostic (&diagnostic); va_end (ap); } /* Issue an ISO C90 pedantic warning MSGID. This function is supposed to be used for matters that are allowed in ISO C99 but not supported in ISO C90, thus we explicitly don't pedwarn when C99 is specified. (There is no flag_c90.) */ void pedwarn_c90 (const char *msgid, ...) { diagnostic_info diagnostic; va_list ap; va_start (ap, msgid); diagnostic_set_info (&diagnostic, msgid, &ap, input_location, flag_isoc99 ? DK_WARNING : pedantic_error_kind ()); report_diagnostic (&diagnostic); va_end (ap); } /* Mainly the interface between cpplib and the C front ends. Copyright (C) 1987, 1988, 1989, 1992, 1994, 1995, 1996, 1997 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* We may keep statistics about how long which files took to compile. */ static int header_time, body_time; static splay_tree file_info_tree; #undef WCHAR_TYPE_SIZE #define WCHAR_TYPE_SIZE TYPE_PRECISION (wchar_type_node) /* Number of bytes in a wide character. */ #define WCHAR_BYTES (WCHAR_TYPE_SIZE / BITS_PER_UNIT) int pending_lang_change; /* If we need to switch languages - C++ only */ int c_header_level; /* depth in C headers - C++ only */ /* If we need to translate characters received. This is tri-state: 0 means use only the untranslated string; 1 means use only the translated string; -1 means chain the translated string to the untranslated one. */ int c_lex_string_translate = 1; static tree interpret_integer (const cpp_token *, unsigned int); static tree interpret_float (const cpp_token *, unsigned int); static enum integer_type_kind narrowest_unsigned_type (tree, unsigned int); static enum integer_type_kind narrowest_signed_type (tree, unsigned int); static enum cpp_ttype lex_c_string (const cpp_token *, tree *, bool); static tree lex_charconst (const cpp_token *); static void update_header_times (const char *); static int dump_one_header (splay_tree_node, void *); static void cb_line_change (cpp_reader *, const cpp_token *, int); static void cb_ident (cpp_reader *, unsigned int, const cpp_string *); static void cb_def_pragma (cpp_reader *, unsigned int); static void cb_define (cpp_reader *, unsigned int, cpp_hashnode *); static void cb_undef (cpp_reader *, unsigned int, cpp_hashnode *); void init_c_lex (void) { struct cpp_callbacks *cb; struct c_fileinfo *toplevel; /* Set up filename timing. Must happen before cpp_read_main_file. */ file_info_tree = splay_tree_new ((splay_tree_compare_fn)strcmp, 0, (splay_tree_delete_value_fn)free); toplevel = get_fileinfo (""); if (flag_detailed_statistics) { header_time = 0; body_time = get_run_time (); toplevel->time = body_time; } cb = cpp_get_callbacks (parse_in); cb->line_change = cb_line_change; cb->ident = cb_ident; cb->def_pragma = cb_def_pragma; cb->valid_pch = c_common_valid_pch; cb->read_pch = c_common_read_pch; /* Set the debug callbacks if we can use them. */ if (debug_info_level == DINFO_LEVEL_VERBOSE && (write_symbols == DWARF_DEBUG || write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)) { cb->define = cb_define; cb->undef = cb_undef; } } struct c_fileinfo * get_fileinfo (const char *name) { splay_tree_node n; struct c_fileinfo *fi; n = splay_tree_lookup (file_info_tree, (splay_tree_key) name); if (n) return (struct c_fileinfo *) n->value; fi = xmalloc (sizeof (struct c_fileinfo)); fi->time = 0; fi->interface_only = 0; fi->interface_unknown = 1; splay_tree_insert (file_info_tree, (splay_tree_key) name, (splay_tree_value) fi); return fi; } static void update_header_times (const char *name) { /* Changing files again. This means currently collected time is charged against header time, and body time starts back at 0. */ if (flag_detailed_statistics) { int this_time = get_run_time (); struct c_fileinfo *file = get_fileinfo (name); header_time += this_time - body_time; file->time += this_time - body_time; body_time = this_time; } } static int dump_one_header (splay_tree_node n, void *dummy ATTRIBUTE_UNUSED) { print_time ((const char *) n->key, ((struct c_fileinfo *) n->value)->time); return 0; } void dump_time_statistics (void) { struct c_fileinfo *file = get_fileinfo (input_filename); int this_time = get_run_time (); file->time += this_time - body_time; fprintf (stderr, "\n******\n"); print_time ("header files (total)", header_time); print_time ("main file (total)", this_time - body_time); fprintf (stderr, "ratio = %g : 1\n", (double)header_time / (double)(this_time - body_time)); fprintf (stderr, "\n******\n"); splay_tree_foreach (file_info_tree, dump_one_header, 0); } static void cb_ident (cpp_reader *pfile ATTRIBUTE_UNUSED, unsigned int line ATTRIBUTE_UNUSED, const cpp_string *str ATTRIBUTE_UNUSED) { #ifdef ASM_OUTPUT_IDENT if (! flag_no_ident) { /* Convert escapes in the string. */ cpp_string cstr = { 0, 0 }; if (cpp_interpret_string (pfile, str, 1, &cstr, false)) { ASM_OUTPUT_IDENT (asm_out_file, (const char *) cstr.text); free ((void *)cstr.text); } } #endif } /* Called at the start of every non-empty line. TOKEN is the first lexed token on the line. Used for diagnostic line numbers. */ static void cb_line_change (cpp_reader *pfile ATTRIBUTE_UNUSED, const cpp_token *token, int parsing_args) { if (token->type != CPP_EOF && !parsing_args) #ifdef USE_MAPPED_LOCATION input_location = token->src_loc; #else { source_location loc = token->src_loc; const struct line_map *map = linemap_lookup (&line_table, loc); input_line = SOURCE_LINE (map, loc); } #endif } void fe_file_change (const struct line_map *new_map) { if (new_map == NULL) return; if (new_map->reason == LC_ENTER) { /* Don't stack the main buffer on the input stack; we already did in compile_file. */ if (! MAIN_FILE_P (new_map)) { #ifdef USE_MAPPED_LOCATION int included_at = LAST_SOURCE_LINE_LOCATION (new_map - 1); input_location = included_at; push_srcloc (new_map->start_location); #else int included_at = LAST_SOURCE_LINE (new_map - 1); input_line = included_at; push_srcloc (new_map->to_file, 1); #endif (*debug_hooks->start_source_file) (included_at, new_map->to_file); #ifndef NO_IMPLICIT_EXTERN_C if (c_header_level) ++c_header_level; else if (new_map->sysp == 2) { c_header_level = 1; ++pending_lang_change; } #endif } } else if (new_map->reason == LC_LEAVE) { #ifndef NO_IMPLICIT_EXTERN_C if (c_header_level && --c_header_level == 0) { if (new_map->sysp == 2) warning ("badly nested C headers from preprocessor"); --pending_lang_change; } #endif pop_srcloc (); (*debug_hooks->end_source_file) (new_map->to_line); } update_header_times (new_map->to_file); in_system_header = new_map->sysp != 0; #ifdef USE_MAPPED_LOCATION input_location = new_map->start_location; #else input_filename = new_map->to_file; input_line = new_map->to_line; #endif /* Hook for C++. */ extract_interface_info (); } static void cb_def_pragma (cpp_reader *pfile, source_location loc) { /* Issue a warning message if we have been asked to do so. Ignore unknown pragmas in system headers unless an explicit -Wunknown-pragmas has been given. */ if (warn_unknown_pragmas > in_system_header) { #ifndef USE_MAPPED_LOCATION const struct line_map *map = linemap_lookup (&line_table, loc); #endif const unsigned char *space, *name; const cpp_token *s; space = name = (const unsigned char *) ""; s = cpp_get_token (pfile); if (s->type != CPP_EOF) { space = cpp_token_as_text (pfile, s); s = cpp_get_token (pfile); if (s->type == CPP_NAME) name = cpp_token_as_text (pfile, s); } #ifdef USE_MAPPED_LOCATION input_location = loc; #else input_line = SOURCE_LINE (map, loc); #endif warning ("ignoring #pragma %s %s", space, name); } } /* #define callback for DWARF and DWARF2 debug info. */ static void cb_define (cpp_reader *pfile, source_location loc, cpp_hashnode *node) { const struct line_map *map = linemap_lookup (&line_table, loc); (*debug_hooks->define) (SOURCE_LINE (map, loc), (const char *) cpp_macro_definition (pfile, node)); } /* #undef callback for DWARF and DWARF2 debug info. */ static void cb_undef (cpp_reader *pfile ATTRIBUTE_UNUSED, source_location loc, cpp_hashnode *node) { const struct line_map *map = linemap_lookup (&line_table, loc); (*debug_hooks->undef) (SOURCE_LINE (map, loc), (const char *) NODE_NAME (node)); } static inline const cpp_token * get_nonpadding_token (void) { const cpp_token *tok; timevar_push (TV_CPP); do tok = cpp_get_token (parse_in); while (tok->type == CPP_PADDING); timevar_pop (TV_CPP); return tok; } int c_lex_with_flags (tree *value, unsigned char *cpp_flags) { const cpp_token *tok; location_t atloc; static bool no_more_pch; retry: tok = get_nonpadding_token (); retry_after_at: switch (tok->type) { case CPP_NAME: *value = HT_IDENT_TO_GCC_IDENT (HT_NODE (tok->val.node)); break; case CPP_NUMBER: { unsigned int flags = cpp_classify_number (parse_in, tok); switch (flags & CPP_N_CATEGORY) { case CPP_N_INVALID: /* cpplib has issued an error. */ *value = error_mark_node; break; case CPP_N_INTEGER: *value = interpret_integer (tok, flags); break; case CPP_N_FLOATING: *value = interpret_float (tok, flags); break; default: abort (); } } break; case CPP_ATSIGN: /* An @ may give the next token special significance in Objective-C. */ atloc = input_location; tok = get_nonpadding_token (); if (c_dialect_objc ()) { tree val; switch (tok->type) { case CPP_NAME: val = HT_IDENT_TO_GCC_IDENT (HT_NODE (tok->val.node)); if (C_IS_RESERVED_WORD (val) && OBJC_IS_AT_KEYWORD (C_RID_CODE (val))) { *value = val; return CPP_AT_NAME; } break; case CPP_STRING: case CPP_WSTRING: return lex_c_string (tok, value, true); default: break; } } /* ... or not. */ error ("%Hstray '@' in program", &atloc); goto retry_after_at; case CPP_OTHER: { cppchar_t c = tok->val.str.text[0]; if (c == '"' || c == '\'') error ("missing terminating %c character", (int) c); else if (ISGRAPH (c)) error ("stray '%c' in program", (int) c); else error ("stray '\\%o' in program", (int) c); } goto retry; case CPP_CHAR: case CPP_WCHAR: *value = lex_charconst (tok); break; case CPP_STRING: case CPP_WSTRING: return lex_c_string (tok, value, false); break; /* These tokens should not be visible outside cpplib. */ case CPP_HEADER_NAME: case CPP_COMMENT: case CPP_MACRO_ARG: abort (); default: *value = NULL_TREE; break; } if (! no_more_pch) { no_more_pch = true; c_common_no_more_pch (); } if (cpp_flags) *cpp_flags = tok->flags; return tok->type; } int c_lex (tree *value) { return c_lex_with_flags (value, NULL); } /* Returns the narrowest C-visible unsigned type, starting with the minimum specified by FLAGS, that can fit VALUE, or itk_none if there isn't one. */ static enum integer_type_kind narrowest_unsigned_type (tree value, unsigned int flags) { enum integer_type_kind itk; if ((flags & CPP_N_WIDTH) == CPP_N_SMALL) itk = itk_unsigned_int; else if ((flags & CPP_N_WIDTH) == CPP_N_MEDIUM) itk = itk_unsigned_long; else itk = itk_unsigned_long_long; /* int_fits_type_p must think the type of its first argument is wider than its second argument, or it won't do the proper check. */ TREE_TYPE (value) = widest_unsigned_literal_type_node; for (; itk < itk_none; itk += 2 /* skip unsigned types */) if (int_fits_type_p (value, integer_types[itk])) return itk; return itk_none; } /* Ditto, but narrowest signed type. */ static enum integer_type_kind narrowest_signed_type (tree value, unsigned int flags) { enum integer_type_kind itk; if ((flags & CPP_N_WIDTH) == CPP_N_SMALL) itk = itk_int; else if ((flags & CPP_N_WIDTH) == CPP_N_MEDIUM) itk = itk_long; else itk = itk_long_long; /* int_fits_type_p must think the type of its first argument is wider than its second argument, or it won't do the proper check. */ TREE_TYPE (value) = widest_unsigned_literal_type_node; for (; itk < itk_none; itk += 2 /* skip signed types */) if (int_fits_type_p (value, integer_types[itk])) return itk; return itk_none; } /* Interpret TOKEN, an integer with FLAGS as classified by cpplib. */ static tree interpret_integer (const cpp_token *token, unsigned int flags) { tree value, type; enum integer_type_kind itk; cpp_num integer; cpp_options *options = cpp_get_options (parse_in); integer = cpp_interpret_integer (parse_in, token, flags); integer = cpp_num_sign_extend (integer, options->precision); value = build_int_2_wide (integer.low, integer.high); /* The type of a constant with a U suffix is straightforward. */ if (flags & CPP_N_UNSIGNED) itk = narrowest_unsigned_type (value, flags); else { /* The type of a potentially-signed integer constant varies depending on the base it's in, the standard in use, and the length suffixes. */ enum integer_type_kind itk_u = narrowest_unsigned_type (value, flags); enum integer_type_kind itk_s = narrowest_signed_type (value, flags); /* In both C89 and C99, octal and hex constants may be signed or unsigned, whichever fits tighter. We do not warn about this choice differing from the traditional choice, as the constant is probably a bit pattern and either way will work. */ if ((flags & CPP_N_RADIX) != CPP_N_DECIMAL) itk = MIN (itk_u, itk_s); else { /* In C99, decimal constants are always signed. In C89, decimal constants that don't fit in long have undefined behavior; we try to make them unsigned long. In GCC's extended C89, that last is true of decimal constants that don't fit in long long, too. */ itk = itk_s; if (itk_s > itk_u && itk_s > itk_long) { if (!flag_isoc99) { if (itk_u < itk_unsigned_long) itk_u = itk_unsigned_long; itk = itk_u; warning ("this decimal constant is unsigned only in ISO C90"); } else if (warn_traditional) warning ("this decimal constant would be unsigned in ISO C90"); } } } if (itk == itk_none) /* cpplib has already issued a warning for overflow. */ type = ((flags & CPP_N_UNSIGNED) ? widest_unsigned_literal_type_node : widest_integer_literal_type_node); else type = integer_types[itk]; if (itk > itk_unsigned_long && (flags & CPP_N_WIDTH) != CPP_N_LARGE && ! in_system_header && ! flag_isoc99) pedwarn ("integer constant is too large for \"%s\" type", (flags & CPP_N_UNSIGNED) ? "unsigned long" : "long"); TREE_TYPE (value) = type; /* Convert imaginary to a complex type. */ if (flags & CPP_N_IMAGINARY) value = build_complex (NULL_TREE, convert (type, integer_zero_node), value); return value; } /* Interpret TOKEN, a floating point number with FLAGS as classified by cpplib. */ static tree interpret_float (const cpp_token *token, unsigned int flags) { tree type; tree value; REAL_VALUE_TYPE real; char *copy; size_t copylen; const char *typename; /* FIXME: make %T work in error/warning, then we don't need typename. */ if ((flags & CPP_N_WIDTH) == CPP_N_LARGE) { type = long_double_type_node; typename = "long double"; } else if ((flags & CPP_N_WIDTH) == CPP_N_SMALL || flag_single_precision_constant) { type = float_type_node; typename = "float"; } else { type = double_type_node; typename = "double"; } /* Copy the constant to a nul-terminated buffer. If the constant has any suffixes, cut them off; REAL_VALUE_ATOF/ REAL_VALUE_HTOF can't handle them. */ copylen = token->val.str.len; if ((flags & CPP_N_WIDTH) != CPP_N_MEDIUM) /* Must be an F or L suffix. */ copylen--; if (flags & CPP_N_IMAGINARY) /* I or J suffix. */ copylen--; copy = alloca (copylen + 1); memcpy (copy, token->val.str.text, copylen); copy[copylen] = '\0'; real_from_string (&real, copy); real_convert (&real, TYPE_MODE (type), &real); /* A diagnostic is required for "soft" overflow by some ISO C testsuites. This is not pedwarn, because some people don't want an error for this. ??? That's a dubious reason... is this a mandatory diagnostic or isn't it? -- zw, 2001-08-21. */ if (REAL_VALUE_ISINF (real) && pedantic) warning ("floating constant exceeds range of \"%s\"", typename); /* Create a node with determined type and value. */ value = build_real (type, real); if (flags & CPP_N_IMAGINARY) value = build_complex (NULL_TREE, convert (type, integer_zero_node), value); return value; } /* Convert a series of STRING and/or WSTRING tokens into a tree, performing string constant concatenation. TOK is the first of these. VALP is the location to write the string into. OBJC_STRING indicates whether an '@' token preceded the incoming token. Returns the CPP token type of the result (CPP_STRING, CPP_WSTRING, or CPP_OBJC_STRING). This is unfortunately more work than it should be. If any of the strings in the series has an L prefix, the result is a wide string (6.4.5p4). Whether or not the result is a wide string affects the meaning of octal and hexadecimal escapes (6.4.4.4p6,9). But escape sequences do not continue across the boundary between two strings in a series (6.4.5p7), so we must not lose the boundaries. Therefore cpp_interpret_string takes a vector of cpp_string structures, which we must arrange to provide. */ static enum cpp_ttype lex_c_string (const cpp_token *tok, tree *valp, bool objc_string) { tree value; bool wide = false; size_t count = 1; struct obstack str_ob; cpp_string istr; /* Try to avoid the overhead of creating and destroying an obstack for the common case of just one string. */ cpp_string str = tok->val.str; cpp_string *strs = &str; if (tok->type == CPP_WSTRING) wide = true; tok = get_nonpadding_token (); if (c_dialect_objc () && tok->type == CPP_ATSIGN) { objc_string = true; tok = get_nonpadding_token (); } if (tok->type == CPP_STRING || tok->type == CPP_WSTRING) { gcc_obstack_init (&str_ob); obstack_grow (&str_ob, &str, sizeof (cpp_string)); do { count++; if (tok->type == CPP_WSTRING) wide = true; obstack_grow (&str_ob, &tok->val.str, sizeof (cpp_string)); tok = get_nonpadding_token (); if (c_dialect_objc () && tok->type == CPP_ATSIGN) { objc_string = true; tok = get_nonpadding_token (); } } while (tok->type == CPP_STRING || tok->type == CPP_WSTRING); strs = obstack_finish (&str_ob); } /* We have read one more token than we want. */ _cpp_backup_tokens (parse_in, 1); if (count > 1 && !objc_string && warn_traditional && !in_system_header) warning ("traditional C rejects string constant concatenation"); if ((c_lex_string_translate ? cpp_interpret_string : cpp_interpret_string_notranslate) (parse_in, strs, count, &istr, wide)) { value = build_string (istr.len, (char *)istr.text); free ((void *)istr.text); if (c_lex_string_translate == -1) { if (!cpp_interpret_string_notranslate (parse_in, strs, count, &istr, wide)) /* Assume that, if we managed to translate the string above, then the untranslated parsing will always succeed. */ abort (); if (TREE_STRING_LENGTH (value) != (int)istr.len || 0 != strncmp (TREE_STRING_POINTER (value), (char *)istr.text, istr.len)) { /* Arrange for us to return the untranslated string in *valp, but to set up the C type of the translated one. */ *valp = build_string (istr.len, (char *)istr.text); valp = &TREE_CHAIN (*valp); } free ((void *)istr.text); } } else { /* Callers cannot generally handle error_mark_node in this context, so return the empty string instead. cpp_interpret_string has issued an error. */ if (wide) value = build_string (TYPE_PRECISION (wchar_type_node) / TYPE_PRECISION (char_type_node), "\0\0\0"); /* widest supported wchar_t is 32 bits */ else value = build_string (1, ""); } TREE_TYPE (value) = wide ? wchar_array_type_node : char_array_type_node; *valp = fix_string_type (value); if (strs != &str) obstack_free (&str_ob, 0); return objc_string ? CPP_OBJC_STRING : wide ? CPP_WSTRING : CPP_STRING; } /* Converts a (possibly wide) character constant token into a tree. */ static tree lex_charconst (const cpp_token *token) { cppchar_t result; tree type, value; unsigned int chars_seen; int unsignedp; result = cpp_interpret_charconst (parse_in, token, &chars_seen, &unsignedp); /* Cast to cppchar_signed_t to get correct sign-extension of RESULT before possibly widening to HOST_WIDE_INT for build_int_2. */ if (unsignedp || (cppchar_signed_t) result >= 0) value = build_int_2 (result, 0); else value = build_int_2 ((cppchar_signed_t) result, -1); if (token->type == CPP_WCHAR) type = wchar_type_node; /* In C, a character constant has type 'int'. In C++ 'char', but multi-char charconsts have type 'int'. */ else if (!c_dialect_cxx () || chars_seen > 1) type = integer_type_node; else type = char_type_node; TREE_TYPE (value) = type; return value; } /* Handle #pragma, system V.4 style. Supports #pragma weak and #pragma pack. Copyright (C) 1992, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define GCC_BAD(msgid) do { warning (msgid); return; } while (0) #define GCC_BAD2(msgid, arg) do { warning (msgid, arg); return; } while (0) typedef struct align_stack GTY(()) { int alignment; unsigned int num_pushes; tree id; struct align_stack * prev; } align_stack; static GTY(()) struct align_stack * alignment_stack; #ifdef HANDLE_PRAGMA_PACK static void handle_pragma_pack (cpp_reader *); #ifdef HANDLE_PRAGMA_PACK_PUSH_POP /* If we have a "global" #pragma pack() in effect when the first #pragma pack(push,) is encountered, this stores the value of maximum_field_alignment in effect. When the final pop_alignment() happens, we restore the value to this, not to a value of 0 for maximum_field_alignment. Value is in bits. */ static int default_alignment; #define SET_GLOBAL_ALIGNMENT(ALIGN) \ (default_alignment = maximum_field_alignment = (ALIGN)) static void push_alignment (int, tree); static void pop_alignment (tree); /* Push an alignment value onto the stack. */ static void push_alignment (int alignment, tree id) { if (alignment_stack == NULL || alignment_stack->alignment != alignment || id != NULL_TREE) { align_stack * entry; entry = ggc_alloc (sizeof (* entry)); entry->alignment = alignment; entry->num_pushes = 1; entry->id = id; entry->prev = alignment_stack; /* The current value of maximum_field_alignment is not necessarily 0 since there may be a #pragma pack() in effect; remember it so that we can restore it after the final #pragma pop(). */ if (alignment_stack == NULL) default_alignment = maximum_field_alignment; alignment_stack = entry; maximum_field_alignment = alignment; } else alignment_stack->num_pushes ++; } /* Undo a push of an alignment onto the stack. */ static void pop_alignment (tree id) { align_stack * entry; if (alignment_stack == NULL) { warning ("\ #pragma pack (pop) encountered without matching #pragma pack (push, )" ); return; } /* If we got an identifier, strip away everything above the target entry so that the next step will restore the state just below it. */ if (id) { for (entry = alignment_stack; entry; entry = entry->prev) if (entry->id == id) { entry->num_pushes = 1; alignment_stack = entry; break; } if (entry == NULL) warning ("\ #pragma pack(pop, %s) encountered without matching #pragma pack(push, %s, )" , IDENTIFIER_POINTER (id), IDENTIFIER_POINTER (id)); } if (-- alignment_stack->num_pushes == 0) { entry = alignment_stack->prev; if (entry == NULL) maximum_field_alignment = default_alignment; else maximum_field_alignment = entry->alignment; alignment_stack = entry; } } #else /* not HANDLE_PRAGMA_PACK_PUSH_POP */ #define SET_GLOBAL_ALIGNMENT(ALIGN) (maximum_field_alignment = (ALIGN)) #define push_alignment(ID, N) \ GCC_BAD("#pragma pack(push[, id], ) is not supported on this target") #define pop_alignment(ID) \ GCC_BAD("#pragma pack(pop[, id], ) is not supported on this target") #endif /* HANDLE_PRAGMA_PACK_PUSH_POP */ /* #pragma pack () #pragma pack (N) #pragma pack (push, N) #pragma pack (push, ID, N) #pragma pack (pop) #pragma pack (pop, ID) */ static void handle_pragma_pack (cpp_reader *dummy ATTRIBUTE_UNUSED) { tree x, id = 0; int align = -1; enum cpp_ttype token; enum { set, push, pop } action; if (c_lex (&x) != CPP_OPEN_PAREN) GCC_BAD ("missing '(' after '#pragma pack' - ignored"); token = c_lex (&x); if (token == CPP_CLOSE_PAREN) { action = set; align = 0; } else if (token == CPP_NUMBER) { align = TREE_INT_CST_LOW (x); action = set; if (c_lex (&x) != CPP_CLOSE_PAREN) GCC_BAD ("malformed '#pragma pack' - ignored"); } else if (token == CPP_NAME) { #define GCC_BAD_ACTION do { if (action == push) \ GCC_BAD ("malformed '#pragma pack(push[, id], )' - ignored"); \ else \ GCC_BAD ("malformed '#pragma pack(pop[, id])' - ignored"); \ } while (0) const char *op = IDENTIFIER_POINTER (x); if (!strcmp (op, "push")) action = push; else if (!strcmp (op, "pop")) action = pop; else GCC_BAD2 ("unknown action '%s' for '#pragma pack' - ignored", op); token = c_lex (&x); if (token != CPP_COMMA && action == push) GCC_BAD_ACTION; if (token == CPP_COMMA) { token = c_lex (&x); if (token == CPP_NAME) { id = x; if (action == push && c_lex (&x) != CPP_COMMA) GCC_BAD_ACTION; token = c_lex (&x); } if (action == push) { if (token == CPP_NUMBER) { align = TREE_INT_CST_LOW (x); token = c_lex (&x); } else GCC_BAD_ACTION; } } if (token != CPP_CLOSE_PAREN) GCC_BAD_ACTION; #undef GCC_BAD_ACTION } else GCC_BAD ("malformed '#pragma pack' - ignored"); if (c_lex (&x) != CPP_EOF) warning ("junk at end of '#pragma pack'"); if (action != pop) switch (align) { case 0: case 1: case 2: case 4: case 8: case 16: align *= BITS_PER_UNIT; break; default: GCC_BAD2 ("alignment must be a small power of two, not %d", align); } switch (action) { case set: SET_GLOBAL_ALIGNMENT (align); break; case push: push_alignment (align, id); break; case pop: pop_alignment (id); break; } } #endif /* HANDLE_PRAGMA_PACK */ static GTY(()) tree pending_weaks; #ifdef HANDLE_PRAGMA_WEAK static void apply_pragma_weak (tree, tree); static void handle_pragma_weak (cpp_reader *); static void apply_pragma_weak (tree decl, tree value) { if (value) { value = build_string (IDENTIFIER_LENGTH (value), IDENTIFIER_POINTER (value)); decl_attributes (&decl, build_tree_list (get_identifier ("alias"), build_tree_list (NULL, value)), 0); } if (SUPPORTS_WEAK && DECL_EXTERNAL (decl) && TREE_USED (decl) && !DECL_WEAK (decl) /* Don't complain about a redundant #pragma. */ && TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl))) warning ("%Japplying #pragma weak '%D' after first use results " "in unspecified behavior", decl, decl); declare_weak (decl); } void maybe_apply_pragma_weak (tree decl) { tree *p, t, id; /* Avoid asking for DECL_ASSEMBLER_NAME when it's not needed. */ /* No weak symbols pending, take the short-cut. */ if (!pending_weaks) return; /* If it's not visible outside this file, it doesn't matter whether it's weak. */ if (!DECL_EXTERNAL (decl) && !TREE_PUBLIC (decl)) return; /* If it's not a function or a variable, it can't be weak. FIXME: what kinds of things are visible outside this file but aren't functions or variables? Should this be an abort() instead? */ if (TREE_CODE (decl) != FUNCTION_DECL && TREE_CODE (decl) != VAR_DECL) return; id = DECL_ASSEMBLER_NAME (decl); for (p = &pending_weaks; (t = *p) ; p = &TREE_CHAIN (t)) if (id == TREE_PURPOSE (t)) { apply_pragma_weak (decl, TREE_VALUE (t)); *p = TREE_CHAIN (t); break; } } /* #pragma weak name [= value] */ static void handle_pragma_weak (cpp_reader *dummy ATTRIBUTE_UNUSED) { tree name, value, x, decl; enum cpp_ttype t; value = 0; if (c_lex (&name) != CPP_NAME) GCC_BAD ("malformed #pragma weak, ignored"); t = c_lex (&x); if (t == CPP_EQ) { if (c_lex (&value) != CPP_NAME) GCC_BAD ("malformed #pragma weak, ignored"); t = c_lex (&x); } if (t != CPP_EOF) warning ("junk at end of #pragma weak"); decl = identifier_global_value (name); if (decl && TREE_CODE_CLASS (TREE_CODE (decl)) == 'd') { apply_pragma_weak (decl, value); if (value) assemble_alias (decl, value); } else pending_weaks = tree_cons (name, value, pending_weaks); } #else void maybe_apply_pragma_weak (tree decl ATTRIBUTE_UNUSED) { } #endif /* HANDLE_PRAGMA_WEAK */ /* GCC supports two #pragma directives for renaming the external symbol associated with a declaration (DECL_ASSEMBLER_NAME), for compatibility with the Solaris and Tru64 system headers. GCC also has its own notation for this, __asm__("name") annotations. Corner cases of these features and their interaction: 1) Both pragmas silently apply only to declarations with external linkage (that is, TREE_PUBLIC || DECL_EXTERNAL). Asm labels do not have this restriction. 2) In C++, both #pragmas silently apply only to extern "C" declarations. Asm labels do not have this restriction. 3) If any of the three ways of changing DECL_ASSEMBLER_NAME is applied to a decl whose DECL_ASSEMBLER_NAME is already set, and the new name is different, a warning issues and the name does not change. 4) The "source name" for #pragma redefine_extname is the DECL_NAME, *not* the DECL_ASSEMBLER_NAME. 5) If #pragma extern_prefix is in effect and a declaration occurs with an __asm__ name, the #pragma extern_prefix is silently ignored for that declaration. 6) If #pragma extern_prefix and #pragma redefine_extname apply to the same declaration, whichever triggered first wins, and a warning is issued. (We would like to have #pragma redefine_extname always win, but it can appear either before or after the declaration, and if it appears afterward, we have no way of knowing whether a modified DECL_ASSEMBLER_NAME is due to #pragma extern_prefix.) */ static GTY(()) tree pending_redefine_extname; static void handle_pragma_redefine_extname (cpp_reader *); /* #pragma redefine_extname oldname newname */ static void handle_pragma_redefine_extname (cpp_reader *dummy ATTRIBUTE_UNUSED) { tree oldname, newname, decl, x; enum cpp_ttype t; if (c_lex (&oldname) != CPP_NAME) GCC_BAD ("malformed #pragma redefine_extname, ignored"); if (c_lex (&newname) != CPP_NAME) GCC_BAD ("malformed #pragma redefine_extname, ignored"); t = c_lex (&x); if (t != CPP_EOF) warning ("junk at end of #pragma redefine_extname"); if (!flag_mudflap && !targetm.handle_pragma_redefine_extname) { if (warn_unknown_pragmas > in_system_header) warning ("#pragma redefine_extname not supported on this target"); return; } decl = identifier_global_value (oldname); if (decl && (TREE_PUBLIC (decl) || DECL_EXTERNAL (decl)) && (TREE_CODE (decl) == FUNCTION_DECL || TREE_CODE (decl) == VAR_DECL) && has_c_linkage (decl)) { if (DECL_ASSEMBLER_NAME_SET_P (decl)) { const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)); name = targetm.strip_name_encoding (name); if (strcmp (name, IDENTIFIER_POINTER (newname))) warning ("#pragma redefine_extname ignored due to conflict with " "previous rename"); } else change_decl_assembler_name (decl, newname); } else /* We have to add this to the rename list even if there's already a global value that doesn't meet the above criteria, because in C++ "struct foo {...};" puts "foo" in the current namespace but does *not* conflict with a subsequent declaration of a function or variable foo. See g++.dg/other/pragma-re-2.C. */ add_to_renaming_pragma_list (oldname, newname); } /* This is called from here and from ia64.c. */ void add_to_renaming_pragma_list (tree oldname, tree newname) { tree previous = purpose_member (oldname, pending_redefine_extname); if (previous) { if (TREE_VALUE (previous) != newname) warning ("#pragma redefine_extname ignored due to conflict with " "previous #pragma redefine_extname"); return; } pending_redefine_extname = tree_cons (oldname, newname, pending_redefine_extname); } static GTY(()) tree pragma_extern_prefix; /* #pragma extern_prefix "prefix" */ static void handle_pragma_extern_prefix (cpp_reader *dummy ATTRIBUTE_UNUSED) { tree prefix, x; enum cpp_ttype t; if (c_lex (&prefix) != CPP_STRING) GCC_BAD ("malformed #pragma extern_prefix, ignored"); t = c_lex (&x); if (t != CPP_EOF) warning ("junk at end of #pragma extern_prefix"); if (targetm.handle_pragma_extern_prefix) /* Note that the length includes the null terminator. */ pragma_extern_prefix = (TREE_STRING_LENGTH (prefix) > 1 ? prefix : NULL); else if (warn_unknown_pragmas > in_system_header) warning ("#pragma extern_prefix not supported on this target"); } /* Hook from the front ends to apply the results of one of the preceding pragmas that rename variables. */ tree maybe_apply_renaming_pragma (tree decl, tree asmname) { tree *p, t; /* The renaming pragmas are only applied to declarations with external linkage. */ if ((TREE_CODE (decl) != FUNCTION_DECL && TREE_CODE (decl) != VAR_DECL) || (!TREE_PUBLIC (decl) && !DECL_EXTERNAL (decl)) || !has_c_linkage (decl)) return asmname; /* If the DECL_ASSEMBLER_NAME is already set, it does not change, but we may warn about a rename that conflicts. */ if (DECL_ASSEMBLER_NAME_SET_P (decl)) { const char *oldname = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)); oldname = targetm.strip_name_encoding (oldname); if (asmname && strcmp (TREE_STRING_POINTER (asmname), oldname)) warning ("asm declaration ignored due to " "conflict with previous rename"); /* Take any pending redefine_extname off the list. */ for (p = &pending_redefine_extname; (t = *p); p = &TREE_CHAIN (t)) if (DECL_NAME (decl) == TREE_PURPOSE (t)) { /* Only warn if there is a conflict. */ if (strcmp (IDENTIFIER_POINTER (TREE_VALUE (t)), oldname)) warning ("#pragma redefine_extname ignored due to " "conflict with previous rename"); *p = TREE_CHAIN (t); break; } return 0; } /* Find out if we have a pending #pragma redefine_extname. */ for (p = &pending_redefine_extname; (t = *p); p = &TREE_CHAIN (t)) if (DECL_NAME (decl) == TREE_PURPOSE (t)) { tree newname = TREE_VALUE (t); *p = TREE_CHAIN (t); /* If we already have an asmname, #pragma redefine_extname is ignored (with a warning if it conflicts). */ if (asmname) { if (strcmp (TREE_STRING_POINTER (asmname), IDENTIFIER_POINTER (newname)) != 0) warning ("#pragma redefine_extname ignored due to " "conflict with __asm__ declaration"); return asmname; } /* Otherwise we use what we've got; #pragma extern_prefix is silently ignored. */ return build_string (IDENTIFIER_LENGTH (newname), IDENTIFIER_POINTER (newname)); } /* If we've got an asmname, #pragma extern_prefix is silently ignored. */ if (asmname) return asmname; /* If #pragma extern_prefix is in effect, apply it. */ if (pragma_extern_prefix) { const char *prefix = TREE_STRING_POINTER (pragma_extern_prefix); size_t plen = TREE_STRING_LENGTH (pragma_extern_prefix) - 1; const char *id = IDENTIFIER_POINTER (DECL_NAME (decl)); size_t ilen = IDENTIFIER_LENGTH (DECL_NAME (decl)); char *newname = alloca (plen + ilen + 1); memcpy (newname, prefix, plen); memcpy (newname + plen, id, ilen + 1); return build_string (plen + ilen, newname); } /* Nada. */ return 0; } /* Front-end wrapper for pragma registration to avoid dragging cpplib.h in almost everywhere. */ void c_register_pragma (const char *space, const char *name, void (*handler) (struct cpp_reader *)) { cpp_register_pragma (parse_in, space, name, handler); } /* Set up front-end pragmas. */ void init_pragma (void) { #ifdef HANDLE_PRAGMA_PACK c_register_pragma (0, "pack", handle_pragma_pack); #endif #ifdef HANDLE_PRAGMA_WEAK c_register_pragma (0, "weak", handle_pragma_weak); #endif c_register_pragma (0, "redefine_extname", handle_pragma_redefine_extname); c_register_pragma (0, "extern_prefix", handle_pragma_extern_prefix); c_register_pragma ("GCC", "pch_preprocess", c_common_pch_pragma); #ifdef REGISTER_TARGET_PRAGMAS REGISTER_TARGET_PRAGMAS (); #endif } /* Type information for c-pragma.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ void gt_ggc_mx_align_stack (void *x_p) { struct align_stack * const x = (struct align_stack *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_9tree_node ((*x).id); gt_ggc_m_11align_stack ((*x).prev); } } void gt_pch_nx_align_stack (void *x_p) { struct align_stack * const x = (struct align_stack *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_11align_stack)) { gt_pch_n_9tree_node ((*x).id); gt_pch_n_11align_stack ((*x).prev); } } void gt_pch_p_11align_stack (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct align_stack * const x ATTRIBUTE_UNUSED = (struct align_stack *)x_p; if ((void *)(x) == this_obj) op (&((*x).id), cookie); if ((void *)(x) == this_obj) op (&((*x).prev), cookie); } /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_c_pragma_h[] = { { &pragma_extern_prefix, 1, sizeof (pragma_extern_prefix), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &pending_redefine_extname, 1, sizeof (pending_redefine_extname), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &pending_weaks, 1, sizeof (pending_weaks), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &alignment_stack, 1, sizeof (alignment_stack), >_ggc_mx_align_stack, >_pch_nx_align_stack }, LAST_GGC_ROOT_TAB }; /* Process declarations and variables for C compiler. Copyright (C) 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Process declarations and symbol lookup for C front end. Also constructs types; the standard scalar types at initialization, and structure, union, array and enum types when they are declared. */ /* ??? not all decl nodes are given the most useful possible line numbers. For example, the CONST_DECLs for enum values. */ /* In grokdeclarator, distinguish syntactic contexts of declarators. */ enum decl_context { NORMAL, /* Ordinary declaration */ FUNCDEF, /* Function definition */ PARM, /* Declaration of parm before function body */ FIELD, /* Declaration inside struct or union */ TYPENAME}; /* Typename (inside cast or sizeof) */ /* Nonzero if we have seen an invalid cross reference to a struct, union, or enum, but not yet printed the message. */ tree pending_invalid_xref; /* File and line to appear in the eventual error message. */ location_t pending_invalid_xref_location; /* True means we've initialized exception handling. */ bool c_eh_initialized_p; /* While defining an enum type, this is 1 plus the last enumerator constant value. Note that will do not have to save this or `enum_overflow' around nested function definition since such a definition could only occur in an enum value expression and we don't use these variables in that case. */ static tree enum_next_value; /* Nonzero means that there was overflow computing enum_next_value. */ static int enum_overflow; /* These #defines are for clarity in working with the information block returned by get_parm_info. */ #define ARG_INFO_PARMS(args) TREE_PURPOSE(args) #define ARG_INFO_TAGS(args) TREE_VALUE(args) #define ARG_INFO_TYPES(args) TREE_CHAIN(args) #define ARG_INFO_OTHERS(args) TREE_TYPE(args) /* The file and line that the prototype came from if this is an old-style definition; used for diagnostics in store_parm_decls_oldstyle. */ static location_t current_function_prototype_locus; /* The current statement tree. */ static GTY(()) struct stmt_tree_s c_stmt_tree; /* State saving variables. */ tree c_break_label; tree c_cont_label; /* Linked list of TRANSLATION_UNIT_DECLS for the translation units included in this invocation. Note that the current translation unit is not included in this list. */ static GTY(()) tree all_translation_units; /* A list of decls to be made automatically visible in each file scope. */ static GTY(()) tree visible_builtins; /* Set to 0 at beginning of a function definition, set to 1 if a return statement that specifies a return value is seen. */ int current_function_returns_value; /* Set to 0 at beginning of a function definition, set to 1 if a return statement with no argument is seen. */ int current_function_returns_null; /* Set to 0 at beginning of a function definition, set to 1 if a call to a noreturn function is seen. */ int current_function_returns_abnormally; /* Set to nonzero by `grokdeclarator' for a function whose return type is defaulted, if warnings for this are desired. */ static int warn_about_return_type; /* Nonzero when starting a function declared `extern inline'. */ static int current_extern_inline; /* True means global_bindings_p should return false even if the scope stack says we are in file scope. */ bool c_override_global_bindings_to_false; /* Each c_binding structure describes one binding of an identifier to a decl. All the decls in a scope - irrespective of namespace - are chained together by the ->prev field, which (as the name implies) runs in reverse order. All the decls in a given namespace bound to a given identifier are chained by the ->shadowed field, which runs from inner to outer scopes. The ->decl field usually points to a DECL node, but there are two exceptions. In the namespace of type tags, the bound entity is a RECORD_TYPE, UNION_TYPE, or ENUMERAL_TYPE node. If an undeclared identifier is encountered, it is bound to error_mark_node to suppress further errors about that identifier in the current function. The depth field is copied from the scope structure that holds this decl. It is used to preserve the proper ordering of the ->shadowed field (see bind()) and also for a handful of special-case checks. Finally, the invisible bit is true for a decl which should be ignored for purposes of normal name lookup, and the nested bit is true for a decl that's been bound a second time in an inner scope; in all such cases, the binding in the outer scope will have its invisible bit true. */ struct c_binding GTY((chain_next ("%h.prev"))) { tree decl; /* the decl bound */ tree id; /* the identifier it's bound to */ struct c_binding *prev; /* the previous decl in this scope */ struct c_binding *shadowed; /* the innermost decl shadowed by this one */ unsigned int depth : 28; /* depth of this scope */ BOOL_BITFIELD invisible : 1; /* normal lookup should ignore this binding */ BOOL_BITFIELD nested : 1; /* do not set DECL_CONTEXT when popping */ /* two free bits */ }; #define B_IN_SCOPE(b1, b2) ((b1)->depth == (b2)->depth) #define B_IN_CURRENT_SCOPE(b) ((b)->depth == current_scope->depth) #define B_IN_FILE_SCOPE(b) ((b)->depth == 1 /*file_scope->depth*/) #define B_IN_EXTERNAL_SCOPE(b) ((b)->depth == 0 /*external_scope->depth*/) #define I_SYMBOL_BINDING(node) \ (((struct lang_identifier *)IDENTIFIER_NODE_CHECK(node))->symbol_binding) #define I_SYMBOL_DECL(node) \ (I_SYMBOL_BINDING(node) ? I_SYMBOL_BINDING(node)->decl : 0) #define I_TAG_BINDING(node) \ (((struct lang_identifier *)IDENTIFIER_NODE_CHECK(node))->tag_binding) #define I_TAG_DECL(node) \ (I_TAG_BINDING(node) ? I_TAG_BINDING(node)->decl : 0) #define I_LABEL_BINDING(node) \ (((struct lang_identifier *)IDENTIFIER_NODE_CHECK(node))->label_binding) #define I_LABEL_DECL(node) \ (I_LABEL_BINDING(node) ? I_LABEL_BINDING(node)->decl : 0) /* Each C symbol points to three linked lists of c_binding structures. These describe the values of the identifier in the three different namespaces defined by the language. */ struct lang_identifier GTY(()) { struct c_common_identifier common_id; struct c_binding *symbol_binding; /* vars, funcs, constants, typedefs */ struct c_binding *tag_binding; /* struct/union/enum tags */ struct c_binding *label_binding; /* labels */ }; /* Validate c-lang.c's assumptions. */ extern char C_SIZEOF_STRUCT_LANG_IDENTIFIER_isnt_accurate [(sizeof(struct lang_identifier) == C_SIZEOF_STRUCT_LANG_IDENTIFIER) ? 1 : -1]; /* The resulting tree type. */ union lang_tree_node GTY((desc ("TREE_CODE (&%h.generic) == IDENTIFIER_NODE"), chain_next ("TREE_CODE (&%h.generic) == INTEGER_TYPE ? (union lang_tree_node *)TYPE_NEXT_VARIANT (&%h.generic) : (union lang_tree_node *)TREE_CHAIN (&%h.generic)"))) { union tree_node GTY ((tag ("0"), desc ("tree_node_structure (&%h)"))) generic; struct lang_identifier GTY ((tag ("1"))) identifier; }; /* Each c_scope structure describes the complete contents of one scope. Four scopes are distinguished specially: the innermost or current scope, the innermost function scope, the file scope (always the second to outermost) and the outermost or external scope. Most declarations are recorded in the current scope. All normal label declarations are recorded in the innermost function scope, as are bindings of undeclared identifiers to error_mark_node. (GCC permits nested functions as an extension, hence the 'innermost' qualifier.) Explicitly declared labels (using the __label__ extension) appear in the current scope. Being in the file scope (current_scope == file_scope) causes special behavior in several places below. Also, under some conditions the Objective-C front end records declarations in the file scope even though that isn't the current scope. All declarations with external linkage are recorded in the external scope, even if they aren't visible there; this models the fact that such declarations are visible to the entire program, and (with a bit of cleverness, see pushdecl) allows diagnosis of some violations of C99 6.2.2p7 and 6.2.7p2: If, within the same translation unit, the same identifier appears with both internal and external linkage, the behavior is undefined. All declarations that refer to the same object or function shall have compatible type; otherwise, the behavior is undefined. Initially only the built-in declarations, which describe compiler intrinsic functions plus a subset of the standard library, are in this scope. The order of the blocks list matters, and it is frequently appended to. To avoid having to walk all the way to the end of the list on each insertion, or reverse the list later, we maintain a pointer to the last list entry. (FIXME: It should be feasible to use a reversed list here.) The bindings list is strictly in reverse order of declarations; pop_scope relies on this. */ struct c_scope GTY((chain_next ("%h.outer"))) { /* The scope containing this one. */ struct c_scope *outer; /* The next outermost function scope. */ struct c_scope *outer_function; /* All bindings in this scope. */ struct c_binding *bindings; /* For each scope (except the global one), a chain of BLOCK nodes for all the scopes that were entered and exited one level down. */ tree blocks; tree blocks_last; /* The depth of this scope. Used to keep the ->shadowed chain of bindings sorted innermost to outermost. */ unsigned int depth : 28; /* True if we are currently filling this scope with parameter declarations. */ BOOL_BITFIELD parm_flag : 1; /* True if we already complained about forward parameter decls in this scope. This prevents double warnings on foo (int a; int b; ...) */ BOOL_BITFIELD warned_forward_parm_decls : 1; /* True if this is the outermost block scope of a function body. This scope contains the parameters, the local variables declared in the outermost block, and all the labels (except those in nested functions, or declared at block scope with __label__). */ BOOL_BITFIELD function_body : 1; /* True means make a BLOCK for this scope no matter what. */ BOOL_BITFIELD keep : 1; }; /* The scope currently in effect. */ static GTY(()) struct c_scope *current_scope; /* The innermost function scope. Ordinary (not explicitly declared) labels, bindings to error_mark_node, and the lazily-created bindings of __func__ and its friends get this scope. */ static GTY(()) struct c_scope *current_function_scope; /* The C file scope. This is reset for each input translation unit. */ static GTY(()) struct c_scope *file_scope; /* The outermost scope. This is used for all declarations with external linkage, and only these, hence the name. */ static GTY(()) struct c_scope *external_scope; /* A chain of c_scope structures awaiting reuse. */ static GTY((deletable)) struct c_scope *scope_freelist; /* A chain of c_binding structures awaiting reuse. */ static GTY((deletable)) struct c_binding *binding_freelist; /* Append VAR to LIST in scope SCOPE. */ #define SCOPE_LIST_APPEND(scope, list, decl) do { \ struct c_scope *s_ = (scope); \ tree d_ = (decl); \ if (s_->list##_last) \ TREE_CHAIN (s_->list##_last) = d_; \ else \ s_->list = d_; \ s_->list##_last = d_; \ } while (0) /* Concatenate FROM in scope FSCOPE onto TO in scope TSCOPE. */ #define SCOPE_LIST_CONCAT(tscope, to, fscope, from) do { \ struct c_scope *t_ = (tscope); \ struct c_scope *f_ = (fscope); \ if (t_->to##_last) \ TREE_CHAIN (t_->to##_last) = f_->from; \ else \ t_->to = f_->from; \ t_->to##_last = f_->from##_last; \ } while (0) /* True means unconditionally make a BLOCK for the next scope pushed. */ static bool keep_next_level_flag; /* True means the next call to push_scope will be the outermost scope of a function body, so do not push a new scope, merely cease expecting parameter decls. */ static bool next_is_function_body; /* Functions called automatically at the beginning and end of execution. */ tree static_ctors, static_dtors; /* Forward declarations. */ static tree lookup_name_in_scope (tree, struct c_scope *); static tree c_make_fname_decl (tree, int); static tree grokdeclarator (tree, tree, enum decl_context, int, tree *); static tree grokparms (tree, int); static void layout_array_type (tree); /* States indicating how grokdeclarator() should handle declspecs marked with __attribute__((deprecated)). An object declared as __attribute__((deprecated)) suppresses warnings of uses of other deprecated items. */ enum deprecated_states { DEPRECATED_NORMAL, DEPRECATED_SUPPRESS }; static enum deprecated_states deprecated_state = DEPRECATED_NORMAL; void c_print_identifier (FILE *file, tree node, int indent) { print_node (file, "symbol", I_SYMBOL_DECL (node), indent + 4); print_node (file, "tag", I_TAG_DECL (node), indent + 4); print_node (file, "label", I_LABEL_DECL (node), indent + 4); if (C_IS_RESERVED_WORD (node)) { tree rid = ridpointers[C_RID_CODE (node)]; indent_to (file, indent + 4); fprintf (file, "rid " HOST_PTR_PRINTF " \"%s\"", (void *) rid, IDENTIFIER_POINTER (rid)); } } /* Establish a binding between NAME, an IDENTIFIER_NODE, and DECL, which may be any of several kinds of DECL or TYPE or error_mark_node, in the scope SCOPE. */ static void bind (tree name, tree decl, struct c_scope *scope, bool invisible, bool nested) { struct c_binding *b, **here; if (binding_freelist) { b = binding_freelist; binding_freelist = b->prev; } else b = ggc_alloc (sizeof (struct c_binding)); b->shadowed = 0; b->decl = decl; b->id = name; b->depth = scope->depth; b->invisible = invisible; b->nested = nested; b->prev = scope->bindings; scope->bindings = b; if (!name) return; switch (TREE_CODE (decl)) { case LABEL_DECL: here = &I_LABEL_BINDING (name); break; case ENUMERAL_TYPE: case UNION_TYPE: case RECORD_TYPE: here = &I_TAG_BINDING (name); break; case VAR_DECL: case FUNCTION_DECL: case TYPE_DECL: case CONST_DECL: case PARM_DECL: case ERROR_MARK: here = &I_SYMBOL_BINDING (name); break; default: abort (); } /* Locate the appropriate place in the chain of shadowed decls to insert this binding. Normally, scope == current_scope and this does nothing. */ while (*here && (*here)->depth > scope->depth) here = &(*here)->shadowed; b->shadowed = *here; *here = b; } /* Clear the binding structure B, stick it on the binding_freelist, and return the former value of b->prev. This is used by pop_scope and get_parm_info to iterate destructively over all the bindings from a given scope. */ static struct c_binding * free_binding_and_advance (struct c_binding *b) { struct c_binding *prev = b->prev; memset (b, 0, sizeof (struct c_binding)); b->prev = binding_freelist; binding_freelist = b; return prev; } /* Hook called at end of compilation to assume 1 elt for a file-scope tentative array defn that wasn't complete before. */ void c_finish_incomplete_decl (tree decl) { if (TREE_CODE (decl) == VAR_DECL) { tree type = TREE_TYPE (decl); if (type != error_mark_node && TREE_CODE (type) == ARRAY_TYPE && ! DECL_EXTERNAL (decl) && TYPE_DOMAIN (type) == 0) { warning ("%Jarray '%D' assumed to have one element", decl, decl); complete_array_type (type, NULL_TREE, 1); layout_decl (decl, 0); } } } /* The Objective-C front-end often needs to determine the current scope. */ void * get_current_scope (void) { return current_scope; } /* The following function is used only by Objective-C. It needs to live here because it accesses the innards of c_scope. */ void objc_mark_locals_volatile (void *enclosing_blk) { struct c_scope *scope; struct c_binding *b; for (scope = current_scope; scope && scope != enclosing_blk; scope = scope->outer) { for (b = scope->bindings; b; b = b->prev) { if (TREE_CODE (b->decl) == VAR_DECL || TREE_CODE (b->decl) == PARM_DECL) { C_DECL_REGISTER (b->decl) = 0; DECL_REGISTER (b->decl) = 0; TREE_THIS_VOLATILE (b->decl) = 1; } } /* Do not climb up past the current function. */ if (scope->function_body) break; } } /* Nonzero if we are currently in file scope. */ int global_bindings_p (void) { return current_scope == file_scope && !c_override_global_bindings_to_false; } void keep_next_level (void) { keep_next_level_flag = true; } /* Identify this scope as currently being filled with parameters. */ void declare_parm_level (void) { current_scope->parm_flag = true; } void push_scope (void) { if (next_is_function_body) { /* This is the transition from the parameters to the top level of the function body. These are the same scope (C99 6.2.1p4,6) so we do not push another scope structure. next_is_function_body is set only by store_parm_decls, which in turn is called when and only when we are about to encounter the opening curly brace for the function body. The outermost block of a function always gets a BLOCK node, because the debugging output routines expect that each function has at least one BLOCK. */ current_scope->parm_flag = false; current_scope->function_body = true; current_scope->keep = true; current_scope->outer_function = current_function_scope; current_function_scope = current_scope; keep_next_level_flag = false; next_is_function_body = false; } else { struct c_scope *scope; if (scope_freelist) { scope = scope_freelist; scope_freelist = scope->outer; } else scope = ggc_alloc_cleared (sizeof (struct c_scope)); scope->keep = keep_next_level_flag; scope->outer = current_scope; scope->depth = current_scope ? (current_scope->depth + 1) : 0; /* Check for scope depth overflow. Unlikely (2^28 == 268,435,456) but possible. */ if (current_scope && scope->depth == 0) { scope->depth--; sorry ("GCC supports only %u nested scopes\n", scope->depth); } current_scope = scope; keep_next_level_flag = false; } } /* Exit a scope. Restore the state of the identifier-decl mappings that were in effect when this scope was entered. Return a BLOCK node containing all the DECLs in this scope that are of interest to debug info generation. */ tree pop_scope (void) { struct c_scope *scope = current_scope; tree block, context, p; struct c_binding *b; bool functionbody = scope->function_body; bool keep = functionbody || scope->keep || scope->bindings; /* If appropriate, create a BLOCK to record the decls for the life of this function. */ block = 0; if (keep) { block = make_node (BLOCK); BLOCK_SUBBLOCKS (block) = scope->blocks; TREE_USED (block) = 1; /* In each subblock, record that this is its superior. */ for (p = scope->blocks; p; p = TREE_CHAIN (p)) BLOCK_SUPERCONTEXT (p) = block; BLOCK_VARS (block) = 0; } /* The TYPE_CONTEXTs for all of the tagged types belonging to this scope must be set so that they point to the appropriate construct, i.e. either to the current FUNCTION_DECL node, or else to the BLOCK node we just constructed. Note that for tagged types whose scope is just the formal parameter list for some function type specification, we can't properly set their TYPE_CONTEXTs here, because we don't have a pointer to the appropriate FUNCTION_TYPE node readily available to us. For those cases, the TYPE_CONTEXTs of the relevant tagged type nodes get set in `grokdeclarator' as soon as we have created the FUNCTION_TYPE node which will represent the "scope" for these "parameter list local" tagged types. */ if (scope->function_body) context = current_function_decl; else if (scope == file_scope) { tree file_decl = build_decl (TRANSLATION_UNIT_DECL, 0, 0); TREE_CHAIN (file_decl) = all_translation_units; all_translation_units = file_decl; context = file_decl; } else context = block; /* Clear all bindings in this scope. */ for (b = scope->bindings; b; b = free_binding_and_advance (b)) { p = b->decl; switch (TREE_CODE (p)) { case LABEL_DECL: /* Warnings for unused labels, errors for undefined labels. */ if (TREE_USED (p) && !DECL_INITIAL (p)) { error ("%Jlabel `%D' used but not defined", p, p); DECL_INITIAL (p) = error_mark_node; } else if (!TREE_USED (p) && warn_unused_label) { if (DECL_INITIAL (p)) warning ("%Jlabel `%D' defined but not used", p, p); else warning ("%Jlabel `%D' declared but not defined", p, p); } /* Labels go in BLOCK_VARS. */ TREE_CHAIN (p) = BLOCK_VARS (block); BLOCK_VARS (block) = p; #ifdef ENABLE_CHECKING if (I_LABEL_BINDING (b->id) != b) abort (); #endif I_LABEL_BINDING (b->id) = b->shadowed; break; case ENUMERAL_TYPE: case UNION_TYPE: case RECORD_TYPE: TYPE_CONTEXT (p) = context; /* Types may not have tag-names, in which case the type appears in the bindings list with b->id NULL. */ if (b->id) { #ifdef ENABLE_CHECKING if (I_TAG_BINDING (b->id) != b) abort (); #endif I_TAG_BINDING (b->id) = b->shadowed; } break; case FUNCTION_DECL: /* Propagate TREE_ADDRESSABLE from nested functions to their containing functions. */ if (! TREE_ASM_WRITTEN (p) && DECL_INITIAL (p) != 0 && TREE_ADDRESSABLE (p) && DECL_ABSTRACT_ORIGIN (p) != 0 && DECL_ABSTRACT_ORIGIN (p) != p) TREE_ADDRESSABLE (DECL_ABSTRACT_ORIGIN (p)) = 1; goto common_symbol; case VAR_DECL: /* Warnings for unused variables. Keep this in sync with stmt.c:warn_about_unused_variables, which we cannot use since it expects a different data structure. */ if (warn_unused_variable && !TREE_USED (p) && !DECL_IN_SYSTEM_HEADER (p) && DECL_NAME (p) && !DECL_ARTIFICIAL (p) && (scope != file_scope || (TREE_STATIC (p) && !TREE_PUBLIC (p) && !TREE_THIS_VOLATILE (p))) && scope != external_scope) warning ("%Junused variable `%D'", p, p); /* Fall through. */ case TYPE_DECL: case CONST_DECL: common_symbol: /* All of these go in BLOCK_VARS, but only if this is the binding in the home scope. */ if (!b->nested) { TREE_CHAIN (p) = BLOCK_VARS (block); BLOCK_VARS (block) = p; } /* If this is the file scope, must set DECL_CONTEXT on these. Do so even for externals, so that same_translation_unit_p works. */ if (scope == file_scope) DECL_CONTEXT (p) = context; /* Fall through. */ /* Parameters go in DECL_ARGUMENTS, not BLOCK_VARS, and have already been put there by store_parm_decls. Unused- parameter warnings are handled by function.c. error_mark_node obviously does not go in BLOCK_VARS and does not get unused-variable warnings. */ case PARM_DECL: case ERROR_MARK: /* It is possible for a decl not to have a name. We get here with b->id NULL in this case. */ if (b->id) { #ifdef ENABLE_CHECKING if (I_SYMBOL_BINDING (b->id) != b) abort (); #endif I_SYMBOL_BINDING (b->id) = b->shadowed; } break; default: abort (); } } /* Dispose of the block that we just made inside some higher level. */ if ((scope->function_body || scope == file_scope) && context) { DECL_INITIAL (context) = block; BLOCK_SUPERCONTEXT (block) = context; } else if (scope->outer) { if (block) SCOPE_LIST_APPEND (scope->outer, blocks, block); /* If we did not make a block for the scope just exited, any blocks made for inner scopes must be carried forward so they will later become subblocks of something else. */ else if (scope->blocks) SCOPE_LIST_CONCAT (scope->outer, blocks, scope, blocks); } /* Pop the current scope, and free the structure for reuse. */ current_scope = scope->outer; if (scope->function_body) current_function_scope = scope->outer_function; memset (scope, 0, sizeof (struct c_scope)); scope->outer = scope_freelist; scope_freelist = scope; return block; } void push_file_scope (void) { tree decl; if (file_scope) return; push_scope (); file_scope = current_scope; start_fname_decls (); for (decl = visible_builtins; decl; decl = TREE_CHAIN (decl)) bind (DECL_NAME (decl), decl, file_scope, /*invisible=*/false, /*nested=*/true); } void pop_file_scope (void) { /* In case there were missing closebraces, get us back to the global binding level. */ while (current_scope != file_scope) pop_scope (); /* __FUNCTION__ is defined at file scope (""). This call may not be necessary as my tests indicate it still works without it. */ finish_fname_decls (); /* Kludge: don't actually pop the file scope if generating a precompiled header, so that macros and local symbols are still visible to the PCH generator. */ if (pch_file) return; /* And pop off the file scope. */ pop_scope (); file_scope = 0; cpp_undef_all (parse_in); } /* Insert BLOCK at the end of the list of subblocks of the current scope. This is used when a BIND_EXPR is expanded, to handle the BLOCK node inside the BIND_EXPR. */ void insert_block (tree block) { TREE_USED (block) = 1; SCOPE_LIST_APPEND (current_scope, blocks, block); } /* Push a definition or a declaration of struct, union or enum tag "name". "type" should be the type node. We assume that the tag "name" is not already defined. Note that the definition may really be just a forward reference. In that case, the TYPE_SIZE will be zero. */ static void pushtag (tree name, tree type) { /* Record the identifier as the type's name if it has none. */ if (name && !TYPE_NAME (type)) TYPE_NAME (type) = name; bind (name, type, current_scope, /*invisible=*/false, /*nested=*/false); /* Create a fake NULL-named TYPE_DECL node whose TREE_TYPE will be the tagged type we just added to the current scope. This fake NULL-named TYPE_DECL node helps dwarfout.c to know when it needs to output a representation of a tagged type, and it also gives us a convenient place to record the "scope start" address for the tagged type. */ TYPE_STUB_DECL (type) = pushdecl (build_decl (TYPE_DECL, NULL_TREE, type)); /* An approximation for now, so we can tell this is a function-scope tag. This will be updated in pop_scope. */ TYPE_CONTEXT (type) = DECL_CONTEXT (TYPE_STUB_DECL (type)); } /* Subroutine of compare_decls. Allow harmless mismatches in return and argument types provided that the type modes match. This function return a unified type given a suitable match, and 0 otherwise. */ static tree match_builtin_function_types (tree newtype, tree oldtype) { tree newrettype, oldrettype; tree newargs, oldargs; tree trytype, tryargs; /* Accept the return type of the new declaration if same modes. */ oldrettype = TREE_TYPE (oldtype); newrettype = TREE_TYPE (newtype); if (TYPE_MODE (oldrettype) != TYPE_MODE (newrettype)) return 0; oldargs = TYPE_ARG_TYPES (oldtype); newargs = TYPE_ARG_TYPES (newtype); tryargs = newargs; while (oldargs || newargs) { if (! oldargs || ! newargs || ! TREE_VALUE (oldargs) || ! TREE_VALUE (newargs) || TYPE_MODE (TREE_VALUE (oldargs)) != TYPE_MODE (TREE_VALUE (newargs))) return 0; oldargs = TREE_CHAIN (oldargs); newargs = TREE_CHAIN (newargs); } trytype = build_function_type (newrettype, tryargs); return build_type_attribute_variant (trytype, TYPE_ATTRIBUTES (oldtype)); } /* Subroutine of diagnose_mismatched_decls. Check for function type mismatch involving an empty arglist vs a nonempty one and give clearer diagnostics. */ static void diagnose_arglist_conflict (tree newdecl, tree olddecl, tree newtype, tree oldtype) { tree t; if (TREE_CODE (olddecl) != FUNCTION_DECL || !comptypes (TREE_TYPE (oldtype), TREE_TYPE (newtype)) || !((TYPE_ARG_TYPES (oldtype) == 0 && DECL_INITIAL (olddecl) == 0) || (TYPE_ARG_TYPES (newtype) == 0 && DECL_INITIAL (newdecl) == 0))) return; t = TYPE_ARG_TYPES (oldtype); if (t == 0) t = TYPE_ARG_TYPES (newtype); for (; t; t = TREE_CHAIN (t)) { tree type = TREE_VALUE (t); if (TREE_CHAIN (t) == 0 && TYPE_MAIN_VARIANT (type) != void_type_node) { inform ("a parameter list with an ellipsis can't match " "an empty parameter name list declaration"); break; } if (c_type_promotes_to (type) != type) { inform ("an argument type that has a default promotion can't match " "an empty parameter name list declaration"); break; } } } /* Another subroutine of diagnose_mismatched_decls. OLDDECL is an old-style function definition, NEWDECL is a prototype declaration. Diagnose inconsistencies in the argument list. Returns TRUE if the prototype is compatible, FALSE if not. */ static bool validate_proto_after_old_defn (tree newdecl, tree newtype, tree oldtype) { tree newargs, oldargs; int i; /* ??? Elsewhere TYPE_MAIN_VARIANT is not used in this context. */ #define END_OF_ARGLIST(t) (TYPE_MAIN_VARIANT (t) == void_type_node) oldargs = TYPE_ACTUAL_ARG_TYPES (oldtype); newargs = TYPE_ARG_TYPES (newtype); i = 1; for (;;) { tree oldargtype = TREE_VALUE (oldargs); tree newargtype = TREE_VALUE (newargs); if (END_OF_ARGLIST (oldargtype) && END_OF_ARGLIST (newargtype)) break; /* Reaching the end of just one list means the two decls don't agree on the number of arguments. */ if (END_OF_ARGLIST (oldargtype)) { error ("%Jprototype for '%D' declares more arguments " "than previous old-style definition", newdecl, newdecl); return false; } else if (END_OF_ARGLIST (newargtype)) { error ("%Jprototype for '%D' declares fewer arguments " "than previous old-style definition", newdecl, newdecl); return false; } /* Type for passing arg must be consistent with that declared for the arg. */ else if (! comptypes (oldargtype, newargtype)) { error ("%Jprototype for '%D' declares arg %d with incompatible type", newdecl, newdecl, i); return false; } oldargs = TREE_CHAIN (oldargs); newargs = TREE_CHAIN (newargs); i++; } /* If we get here, no errors were found, but do issue a warning for this poor-style construct. */ warning ("%Jprototype for '%D' follows non-prototype definition", newdecl, newdecl); return true; #undef END_OF_ARGLIST } /* Subroutine of diagnose_mismatched_decls. Report the location of DECL, first in a pair of mismatched declarations, using the diagnostic function DIAG. */ static void locate_old_decl (tree decl, void (*diag)(const char *, ...)) { if (TREE_CODE (decl) == FUNCTION_DECL && DECL_BUILT_IN (decl)) ; else if (DECL_INITIAL (decl)) diag (N_("%Jprevious definition of '%D' was here"), decl, decl); else if (C_DECL_IMPLICIT (decl)) diag (N_("%Jprevious implicit declaration of '%D' was here"), decl, decl); else diag (N_("%Jprevious declaration of '%D' was here"), decl, decl); } /* Subroutine of duplicate_decls. Compare NEWDECL to OLDDECL. Returns true if the caller should proceed to merge the two, false if OLDDECL should simply be discarded. As a side effect, issues all necessary diagnostics for invalid or poor-style combinations. If it returns true, writes the types of NEWDECL and OLDDECL to *NEWTYPEP and *OLDTYPEP - these may have been adjusted from TREE_TYPE (NEWDECL, OLDDECL) respectively. */ static bool diagnose_mismatched_decls (tree newdecl, tree olddecl, tree *newtypep, tree *oldtypep) { tree newtype, oldtype; bool pedwarned = false; bool warned = false; /* If we have error_mark_node for either decl or type, just discard the previous decl - we're in an error cascade already. */ if (olddecl == error_mark_node || newdecl == error_mark_node) return false; *oldtypep = oldtype = TREE_TYPE (olddecl); *newtypep = newtype = TREE_TYPE (newdecl); if (oldtype == error_mark_node || newtype == error_mark_node) return false; /* Two different categories of symbol altogether. This is an error unless OLDDECL is a builtin. OLDDECL will be discarded in any case. */ if (TREE_CODE (olddecl) != TREE_CODE (newdecl)) { if (!(TREE_CODE (olddecl) == FUNCTION_DECL && DECL_BUILT_IN (olddecl) && !C_DECL_DECLARED_BUILTIN (olddecl))) { error ("%J'%D' redeclared as different kind of symbol", newdecl, newdecl); locate_old_decl (olddecl, error); } else if (TREE_PUBLIC (newdecl)) warning ("%Jbuilt-in function '%D' declared as non-function", newdecl, newdecl); else if (warn_shadow) warning ("%Jdeclaration of '%D' shadows a built-in function", newdecl, newdecl); return false; } if (!comptypes (oldtype, newtype)) { if (TREE_CODE (olddecl) == FUNCTION_DECL && DECL_BUILT_IN (olddecl) && !C_DECL_DECLARED_BUILTIN (olddecl)) { /* Accept harmless mismatch in function types. This is for the ffs and fprintf builtins. */ tree trytype = match_builtin_function_types (newtype, oldtype); if (trytype && comptypes (newtype, trytype)) *oldtypep = oldtype = trytype; else { /* If types don't match for a built-in, throw away the built-in. No point in calling locate_old_decl here, it won't print anything. */ warning ("%Jconflicting types for built-in function '%D'", newdecl, newdecl); return false; } } else if (TREE_CODE (olddecl) == FUNCTION_DECL && DECL_IS_BUILTIN (olddecl)) { /* A conflicting function declaration for a predeclared function that isn't actually built in. Objective C uses these. The new declaration silently overrides everything but the volatility (i.e. noreturn) indication. See also below. FIXME: Make Objective C use normal builtins. */ TREE_THIS_VOLATILE (newdecl) |= TREE_THIS_VOLATILE (olddecl); return false; } /* Permit void foo (...) to match int foo (...) if the latter is the definition and implicit int was used. See c-torture/compile/920625-2.c. */ else if (TREE_CODE (newdecl) == FUNCTION_DECL && DECL_INITIAL (newdecl) && TYPE_MAIN_VARIANT (TREE_TYPE (oldtype)) == void_type_node && TYPE_MAIN_VARIANT (TREE_TYPE (newtype)) == integer_type_node && C_FUNCTION_IMPLICIT_INT (newdecl)) { pedwarn ("%Jconflicting types for '%D'", newdecl, newdecl); /* Make sure we keep void as the return type. */ TREE_TYPE (newdecl) = *newtypep = newtype = oldtype; C_FUNCTION_IMPLICIT_INT (newdecl) = 0; pedwarned = true; } else { if (TYPE_QUALS (newtype) != TYPE_QUALS (oldtype)) error ("%J conflicting type qualifiers for '%D'", newdecl, newdecl); else error ("%Jconflicting types for '%D'", newdecl, newdecl); diagnose_arglist_conflict (newdecl, olddecl, newtype, oldtype); locate_old_decl (olddecl, error); return false; } } /* Redeclaration of a type is a constraint violation (6.7.2.3p1), but silently ignore the redeclaration if either is in a system header. (Conflicting redeclarations were handled above.) */ if (TREE_CODE (newdecl) == TYPE_DECL) { if (DECL_IN_SYSTEM_HEADER (newdecl) || DECL_IN_SYSTEM_HEADER (olddecl)) return true; /* Allow OLDDECL to continue in use. */ error ("%Jredefinition of typedef '%D'", newdecl, newdecl); locate_old_decl (olddecl, error); return false; } /* Function declarations can either be 'static' or 'extern' (no qualifier is equivalent to 'extern' - C99 6.2.2p5) and therefore can never conflict with each other on account of linkage (6.2.2p4). Multiple definitions are not allowed (6.9p3,5) but GCC permits two definitions if one is 'extern inline' and one is not. The non- extern-inline definition supersedes the extern-inline definition. */ else if (TREE_CODE (newdecl) == FUNCTION_DECL) { /* If you declare a built-in function name as static, or define the built-in with an old-style definition (so we can't validate the argument list) the built-in definition is overridden, but optionally warn this was a bad choice of name. */ if (DECL_BUILT_IN (olddecl) && !C_DECL_DECLARED_BUILTIN (olddecl) && (!TREE_PUBLIC (newdecl) || (DECL_INITIAL (newdecl) && !TYPE_ARG_TYPES (TREE_TYPE (newdecl))))) { if (warn_shadow) warning ("%Jdeclaration of '%D' shadows a built-in function", newdecl, newdecl); /* Discard the old built-in function. */ return false; } if (DECL_INITIAL (newdecl)) { if (DECL_INITIAL (olddecl) && !(DECL_DECLARED_INLINE_P (olddecl) && DECL_EXTERNAL (olddecl) && !(DECL_DECLARED_INLINE_P (newdecl) && DECL_EXTERNAL (newdecl)))) { error ("%Jredefinition of '%D'", newdecl, newdecl); locate_old_decl (olddecl, error); return false; } } /* If we have a prototype after an old-style function definition, the argument types must be checked specially. */ else if (DECL_INITIAL (olddecl) && !TYPE_ARG_TYPES (oldtype) && TYPE_ARG_TYPES (newtype) && TYPE_ACTUAL_ARG_TYPES (oldtype) && !validate_proto_after_old_defn (newdecl, newtype, oldtype)) { locate_old_decl (olddecl, error); return false; } /* A non-static declaration (even an "extern") followed by a static declaration is undefined behavior per C99 6.2.2p3-5,7. The same is true for a static forward declaration at block scope followed by a non-static declaration/definition at file scope. Static followed by non-static at the same scope is not undefined behavior, and is the most convenient way to get some effects (see e.g. what unwind-dw2-fde-glibc.c does to the definition of _Unwind_Find_FDE in unwind-dw2-fde.c), but we do diagnose it if -Wtraditional. */ if (TREE_PUBLIC (olddecl) && !TREE_PUBLIC (newdecl)) { /* Two exceptions to the rule. If olddecl is an extern inline, or a predeclared function that isn't actually built in, newdecl silently overrides olddecl. The latter occur only in Objective C; see also above. (FIXME: Make Objective C use normal builtins.) */ if (!DECL_IS_BUILTIN (olddecl) && !(DECL_EXTERNAL (olddecl) && DECL_DECLARED_INLINE_P (olddecl))) { error ("%Jstatic declaration of '%D' follows " "non-static declaration", newdecl, newdecl); locate_old_decl (olddecl, error); } return false; } else if (TREE_PUBLIC (newdecl) && !TREE_PUBLIC (olddecl)) { if (DECL_CONTEXT (olddecl)) { error ("%Jnon-static declaration of '%D' follows " "static declaration", newdecl, newdecl); locate_old_decl (olddecl, error); return false; } else if (warn_traditional) { warning ("%Jnon-static declaration of '%D' follows " "static declaration", newdecl, newdecl); warned = true; } } } else if (TREE_CODE (newdecl) == VAR_DECL) { /* Only variables can be thread-local, and all declarations must agree on this property. */ if (DECL_THREAD_LOCAL (newdecl) != DECL_THREAD_LOCAL (olddecl)) { if (DECL_THREAD_LOCAL (newdecl)) error ("%Jthread-local declaration of '%D' follows " "non-thread-local declaration", newdecl, newdecl); else error ("%Jnon-thread-local declaration of '%D' follows " "thread-local declaration", newdecl, newdecl); locate_old_decl (olddecl, error); return false; } /* Multiple initialized definitions are not allowed (6.9p3,5). */ if (DECL_INITIAL (newdecl) && DECL_INITIAL (olddecl)) { error ("%Jredefinition of '%D'", newdecl, newdecl); locate_old_decl (olddecl, error); return false; } /* Objects declared at file scope: if the first declaration had external linkage (even if it was an external reference) the second must have external linkage as well, or the behavior is undefined. If the first declaration had internal linkage, then the second must too, or else be an external reference (in which case the composite declaration still has internal linkage). As for function declarations, we warn about the static-then- extern case only for -Wtraditional. See generally 6.2.2p3-5,7. */ if (DECL_FILE_SCOPE_P (newdecl) && TREE_PUBLIC (newdecl) != TREE_PUBLIC (olddecl)) { if (DECL_EXTERNAL (newdecl)) { if (warn_traditional) { warning ("%Jnon-static declaration of '%D' follows " "static declaration", newdecl, newdecl); warned = true; } } else { if (TREE_PUBLIC (newdecl)) error ("%Jnon-static declaration of '%D' follows " "static declaration", newdecl, newdecl); else error ("%Jstatic declaration of '%D' follows " "non-static declaration", newdecl, newdecl); locate_old_decl (olddecl, error); return false; } } /* Two objects with the same name declared at the same block scope must both be external references (6.7p3). */ else if (!DECL_FILE_SCOPE_P (newdecl) && DECL_CONTEXT (newdecl) == DECL_CONTEXT (olddecl) && (!DECL_EXTERNAL (newdecl) || !DECL_EXTERNAL (olddecl))) { if (DECL_EXTERNAL (newdecl)) error ("%Jextern declaration of '%D' follows " "declaration with no linkage", newdecl, newdecl); else if (DECL_EXTERNAL (olddecl)) error ("%Jdeclaration of '%D' with no linkage follows " "extern declaration", newdecl, newdecl); else error ("%Jredeclaration of '%D' with no linkage", newdecl, newdecl); locate_old_decl (olddecl, error); return false; } } /* warnings */ /* All decls must agree on a non-default visibility. */ if (DECL_VISIBILITY (newdecl) != VISIBILITY_DEFAULT && DECL_VISIBILITY (olddecl) != VISIBILITY_DEFAULT && DECL_VISIBILITY (newdecl) != DECL_VISIBILITY (olddecl)) { warning ("%Jredeclaration of '%D' with different visibility " "(old visibility preserved)", newdecl, newdecl); warned = true; } if (TREE_CODE (newdecl) == FUNCTION_DECL) { /* Diagnose inline __attribute__ ((noinline)) which is silly. */ if (DECL_DECLARED_INLINE_P (newdecl) && lookup_attribute ("noinline", DECL_ATTRIBUTES (olddecl))) { warning ("%Jinline declaration of '%D' follows " "declaration with attribute noinline", newdecl, newdecl); warned = true; } else if (DECL_DECLARED_INLINE_P (olddecl) && lookup_attribute ("noinline", DECL_ATTRIBUTES (newdecl))) { warning ("%Jdeclaration of '%D' with attribute noinline follows " "inline declaration ", newdecl, newdecl); warned = true; } /* Inline declaration after use or definition. ??? Should we still warn about this now we have unit-at-a-time mode and can get it right? */ if (DECL_DECLARED_INLINE_P (newdecl) && !DECL_DECLARED_INLINE_P (olddecl)) { if (TREE_USED (olddecl)) { warning ("%J'%D' declared inline after being called", olddecl, olddecl); warned = true; } else if (DECL_INITIAL (olddecl)) { warning ("%J'%D' declared inline after its definition", olddecl, olddecl); warned = true; } } } else /* PARM_DECL, VAR_DECL */ { /* Redeclaration of a parameter is a constraint violation (this is not explicitly stated, but follows from C99 6.7p3 [no more than one declaration of the same identifier with no linkage in the same scope, except type tags] and 6.2.2p6 [parameters have no linkage]). We must check for a forward parameter declaration, indicated by TREE_ASM_WRITTEN on the old declaration - this is an extension, the mandatory diagnostic for which is handled by mark_forward_parm_decls. */ if (TREE_CODE (newdecl) == PARM_DECL && (!TREE_ASM_WRITTEN (olddecl) || TREE_ASM_WRITTEN (newdecl))) { error ("%Jredefinition of parameter '%D'", newdecl, newdecl); locate_old_decl (olddecl, error); return false; } } /* Optional warning for completely redundant decls. */ if (!warned && !pedwarned && warn_redundant_decls /* Don't warn about a function declaration followed by a definition. */ && !(TREE_CODE (newdecl) == FUNCTION_DECL && DECL_INITIAL (newdecl) && !DECL_INITIAL (olddecl)) /* Don't warn about an extern followed by a definition. */ && !(DECL_EXTERNAL (olddecl) && !DECL_EXTERNAL (newdecl)) /* Don't warn about forward parameter decls. */ && !(TREE_CODE (newdecl) == PARM_DECL && TREE_ASM_WRITTEN (olddecl) && !TREE_ASM_WRITTEN (newdecl))) { warning ("%Jredundant redeclaration of '%D'", newdecl, newdecl); warned = true; } /* Report location of previous decl/defn in a consistent manner. */ if (warned || pedwarned) locate_old_decl (olddecl, pedwarned ? pedwarn : warning); return true; } /* Subroutine of duplicate_decls. NEWDECL has been found to be consistent with OLDDECL, but carries new information. Merge the new information into OLDDECL. This function issues no diagnostics. */ static void merge_decls (tree newdecl, tree olddecl, tree newtype, tree oldtype) { int new_is_definition = (TREE_CODE (newdecl) == FUNCTION_DECL && DECL_INITIAL (newdecl) != 0); /* For real parm decl following a forward decl, rechain the old decl in its new location and clear TREE_ASM_WRITTEN (it's not a forward decl anymore). */ if (TREE_CODE (newdecl) == PARM_DECL && TREE_ASM_WRITTEN (olddecl) && ! TREE_ASM_WRITTEN (newdecl)) { struct c_binding *b, **here; for (here = ¤t_scope->bindings; *here; here = &(*here)->prev) if ((*here)->decl == olddecl) goto found; abort (); found: b = *here; *here = b->prev; b->prev = current_scope->bindings; current_scope->bindings = b; TREE_ASM_WRITTEN (olddecl) = 0; } DECL_ATTRIBUTES (newdecl) = targetm.merge_decl_attributes (olddecl, newdecl); /* Merge the data types specified in the two decls. */ TREE_TYPE (newdecl) = TREE_TYPE (olddecl) = composite_type (newtype, oldtype); /* Lay the type out, unless already done. */ if (oldtype != TREE_TYPE (newdecl)) { if (TREE_TYPE (newdecl) != error_mark_node) layout_type (TREE_TYPE (newdecl)); if (TREE_CODE (newdecl) != FUNCTION_DECL && TREE_CODE (newdecl) != TYPE_DECL && TREE_CODE (newdecl) != CONST_DECL) layout_decl (newdecl, 0); } else { /* Since the type is OLDDECL's, make OLDDECL's size go with. */ DECL_SIZE (newdecl) = DECL_SIZE (olddecl); DECL_SIZE_UNIT (newdecl) = DECL_SIZE_UNIT (olddecl); DECL_MODE (newdecl) = DECL_MODE (olddecl); if (TREE_CODE (olddecl) != FUNCTION_DECL) if (DECL_ALIGN (olddecl) > DECL_ALIGN (newdecl)) { DECL_ALIGN (newdecl) = DECL_ALIGN (olddecl); DECL_USER_ALIGN (newdecl) |= DECL_ALIGN (olddecl); } } /* Keep the old rtl since we can safely use it. */ COPY_DECL_RTL (olddecl, newdecl); /* Merge the type qualifiers. */ if (TREE_READONLY (newdecl)) TREE_READONLY (olddecl) = 1; if (TREE_THIS_VOLATILE (newdecl)) { TREE_THIS_VOLATILE (olddecl) = 1; if (TREE_CODE (newdecl) == VAR_DECL) make_var_volatile (newdecl); } /* Keep source location of definition rather than declaration. */ if (DECL_INITIAL (newdecl) == 0 && DECL_INITIAL (olddecl) != 0) DECL_SOURCE_LOCATION (newdecl) = DECL_SOURCE_LOCATION (olddecl); /* Merge the unused-warning information. */ if (DECL_IN_SYSTEM_HEADER (olddecl)) DECL_IN_SYSTEM_HEADER (newdecl) = 1; else if (DECL_IN_SYSTEM_HEADER (newdecl)) DECL_IN_SYSTEM_HEADER (olddecl) = 1; /* Merge the initialization information. */ if (DECL_INITIAL (newdecl) == 0) DECL_INITIAL (newdecl) = DECL_INITIAL (olddecl); /* Merge the section attribute. We want to issue an error if the sections conflict but that must be done later in decl_attributes since we are called before attributes are assigned. */ if (DECL_SECTION_NAME (newdecl) == NULL_TREE) DECL_SECTION_NAME (newdecl) = DECL_SECTION_NAME (olddecl); /* Copy the assembler name. Currently, it can only be defined in the prototype. */ COPY_DECL_ASSEMBLER_NAME (olddecl, newdecl); /* If either declaration has a nondefault visibility, use it. */ if (DECL_VISIBILITY (olddecl) != VISIBILITY_DEFAULT) DECL_VISIBILITY (newdecl) = DECL_VISIBILITY (olddecl); if (TREE_CODE (newdecl) == FUNCTION_DECL) { DECL_STATIC_CONSTRUCTOR(newdecl) |= DECL_STATIC_CONSTRUCTOR(olddecl); DECL_STATIC_DESTRUCTOR (newdecl) |= DECL_STATIC_DESTRUCTOR (olddecl); DECL_NO_LIMIT_STACK (newdecl) |= DECL_NO_LIMIT_STACK (olddecl); DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (newdecl) |= DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (olddecl); TREE_THIS_VOLATILE (newdecl) |= TREE_THIS_VOLATILE (olddecl); TREE_READONLY (newdecl) |= TREE_READONLY (olddecl); DECL_IS_MALLOC (newdecl) |= DECL_IS_MALLOC (olddecl); DECL_IS_PURE (newdecl) |= DECL_IS_PURE (olddecl); } /* Merge the storage class information. */ merge_weak (newdecl, olddecl); /* For functions, static overrides non-static. */ if (TREE_CODE (newdecl) == FUNCTION_DECL) { TREE_PUBLIC (newdecl) &= TREE_PUBLIC (olddecl); /* This is since we don't automatically copy the attributes of NEWDECL into OLDDECL. */ TREE_PUBLIC (olddecl) = TREE_PUBLIC (newdecl); /* If this clears `static', clear it in the identifier too. */ if (! TREE_PUBLIC (olddecl)) TREE_PUBLIC (DECL_NAME (olddecl)) = 0; } if (DECL_EXTERNAL (newdecl)) { TREE_STATIC (newdecl) = TREE_STATIC (olddecl); DECL_EXTERNAL (newdecl) = DECL_EXTERNAL (olddecl); /* An extern decl does not override previous storage class. */ TREE_PUBLIC (newdecl) = TREE_PUBLIC (olddecl); if (! DECL_EXTERNAL (newdecl)) { DECL_CONTEXT (newdecl) = DECL_CONTEXT (olddecl); DECL_COMMON (newdecl) = DECL_COMMON (olddecl); } } else { TREE_STATIC (olddecl) = TREE_STATIC (newdecl); TREE_PUBLIC (olddecl) = TREE_PUBLIC (newdecl); } if (TREE_CODE (newdecl) == FUNCTION_DECL) { /* If we're redefining a function previously defined as extern inline, make sure we emit debug info for the inline before we throw it away, in case it was inlined into a function that hasn't been written out yet. */ if (new_is_definition && DECL_INITIAL (olddecl)) { if (TREE_USED (olddecl) /* In unit-at-a-time mode we never inline re-defined extern inline functions. */ && !flag_unit_at_a_time && cgraph_function_possibly_inlined_p (olddecl)) (*debug_hooks->outlining_inline_function) (olddecl); /* The new defn must not be inline. */ DECL_INLINE (newdecl) = 0; DECL_UNINLINABLE (newdecl) = 1; } else { /* If either decl says `inline', this fn is inline, unless its definition was passed already. */ if (DECL_DECLARED_INLINE_P (newdecl) || DECL_DECLARED_INLINE_P (olddecl)) DECL_DECLARED_INLINE_P (newdecl) = 1; DECL_UNINLINABLE (newdecl) = DECL_UNINLINABLE (olddecl) = (DECL_UNINLINABLE (newdecl) || DECL_UNINLINABLE (olddecl)); } if (DECL_BUILT_IN (olddecl)) { /* If redeclaring a builtin function, it stays built in. But it gets tagged as having been declared. */ DECL_BUILT_IN_CLASS (newdecl) = DECL_BUILT_IN_CLASS (olddecl); DECL_FUNCTION_CODE (newdecl) = DECL_FUNCTION_CODE (olddecl); C_DECL_DECLARED_BUILTIN (newdecl) = 1; } /* Also preserve various other info from the definition. */ if (! new_is_definition) { DECL_RESULT (newdecl) = DECL_RESULT (olddecl); DECL_INITIAL (newdecl) = DECL_INITIAL (olddecl); DECL_STRUCT_FUNCTION (newdecl) = DECL_STRUCT_FUNCTION (olddecl); DECL_SAVED_TREE (newdecl) = DECL_SAVED_TREE (olddecl); DECL_ARGUMENTS (newdecl) = DECL_ARGUMENTS (olddecl); /* Set DECL_INLINE on the declaration if we've got a body from which to instantiate. */ if (DECL_INLINE (olddecl) && ! DECL_UNINLINABLE (newdecl)) { DECL_INLINE (newdecl) = 1; DECL_ABSTRACT_ORIGIN (newdecl) = DECL_ABSTRACT_ORIGIN (olddecl); } } else { /* If a previous declaration said inline, mark the definition as inlinable. */ if (DECL_DECLARED_INLINE_P (newdecl) && ! DECL_UNINLINABLE (newdecl)) DECL_INLINE (newdecl) = 1; } } /* Copy most of the decl-specific fields of NEWDECL into OLDDECL. But preserve OLDDECL's DECL_UID. */ { unsigned olddecl_uid = DECL_UID (olddecl); memcpy ((char *) olddecl + sizeof (struct tree_common), (char *) newdecl + sizeof (struct tree_common), sizeof (struct tree_decl) - sizeof (struct tree_common)); DECL_UID (olddecl) = olddecl_uid; } /* If OLDDECL had its DECL_RTL instantiated, re-invoke make_decl_rtl so that encode_section_info has a chance to look at the new decl flags and attributes. */ if (DECL_RTL_SET_P (olddecl) && (TREE_CODE (olddecl) == FUNCTION_DECL || (TREE_CODE (olddecl) == VAR_DECL && TREE_STATIC (olddecl)))) make_decl_rtl (olddecl, NULL); } /* Handle when a new declaration NEWDECL has the same name as an old one OLDDECL in the same binding contour. Prints an error message if appropriate. If safely possible, alter OLDDECL to look like NEWDECL, and return true. Otherwise, return false. */ static bool duplicate_decls (tree newdecl, tree olddecl) { tree newtype = NULL, oldtype = NULL; if (!diagnose_mismatched_decls (newdecl, olddecl, &newtype, &oldtype)) return false; merge_decls (newdecl, olddecl, newtype, oldtype); return true; } /* Check whether decl-node NEW shadows an existing declaration. */ static void warn_if_shadowing (tree new) { struct c_binding *b; /* Shadow warnings wanted? */ if (!warn_shadow /* No shadow warnings for internally generated vars. */ || DECL_IS_BUILTIN (new) /* No shadow warnings for vars made for inlining. */ || DECL_FROM_INLINE (new) /* Don't warn about the parm names in function declarator within a function declarator. It would be nice to avoid warning in any function declarator in a declaration, as opposed to a definition, but there is no way to tell it's not a definition at this point. */ || (TREE_CODE (new) == PARM_DECL && current_scope->outer->parm_flag)) return; /* Is anything being shadowed? Invisible decls do not count. */ for (b = I_SYMBOL_BINDING (DECL_NAME (new)); b; b = b->shadowed) if (b->decl && b->decl != new && !b->invisible) { tree old = b->decl; if (TREE_CODE (old) == PARM_DECL) warning ("%Jdeclaration of '%D' shadows a parameter", new, new); else if (DECL_FILE_SCOPE_P (old)) warning ("%Jdeclaration of '%D' shadows a global declaration", new, new); else if (TREE_CODE (old) == FUNCTION_DECL && DECL_BUILT_IN (old)) warning ("%Jdeclaration of '%D' shadows a built-in function", new, new); else warning ("%Jdeclaration of '%D' shadows a previous local", new, new); if (TREE_CODE (old) != FUNCTION_DECL || !DECL_BUILT_IN (old)) warning ("%Jshadowed declaration is here", old); break; } } /* Subroutine of pushdecl. X is a TYPE_DECL for a typedef statement. Create a brand new ..._TYPE node (which will be just a variant of the existing ..._TYPE node with identical properties) and then install X as the TYPE_NAME of this brand new (duplicate) ..._TYPE node. The whole point here is to end up with a situation where each and every ..._TYPE node the compiler creates will be uniquely associated with AT MOST one node representing a typedef name. This way, even though the compiler substitutes corresponding ..._TYPE nodes for TYPE_DECL (i.e. "typedef name") nodes very early on, later parts of the compiler can always do the reverse translation and get back the corresponding typedef name. For example, given: typedef struct S MY_TYPE; MY_TYPE object; Later parts of the compiler might only know that `object' was of type `struct S' if it were not for code just below. With this code however, later parts of the compiler see something like: struct S' == struct S typedef struct S' MY_TYPE; struct S' object; And they can then deduce (from the node for type struct S') that the original object declaration was: MY_TYPE object; Being able to do this is important for proper support of protoize, and also for generating precise symbolic debugging information which takes full account of the programmer's (typedef) vocabulary. Obviously, we don't want to generate a duplicate ..._TYPE node if the TYPE_DECL node that we are now processing really represents a standard built-in type. Since all standard types are effectively declared at line zero in the source file, we can easily check to see if we are working on a standard type by checking the current value of lineno. */ static void clone_underlying_type (tree x) { if (DECL_IS_BUILTIN (x)) { if (TYPE_NAME (TREE_TYPE (x)) == 0) TYPE_NAME (TREE_TYPE (x)) = x; } else if (TREE_TYPE (x) != error_mark_node && DECL_ORIGINAL_TYPE (x) == NULL_TREE) { tree tt = TREE_TYPE (x); DECL_ORIGINAL_TYPE (x) = tt; tt = build_type_copy (tt); TYPE_NAME (tt) = x; TREE_USED (tt) = TREE_USED (x); TREE_TYPE (x) = tt; } } /* Record a decl-node X as belonging to the current lexical scope. Check for errors (such as an incompatible declaration for the same name already seen in the same scope). Returns either X or an old decl for the same name. If an old decl is returned, it may have been smashed to agree with what X says. */ tree pushdecl (tree x) { tree name = DECL_NAME (x); struct c_scope *scope = current_scope; struct c_binding *b; bool nested = false; /* Functions need the lang_decl data. */ if (TREE_CODE (x) == FUNCTION_DECL && ! DECL_LANG_SPECIFIC (x)) DECL_LANG_SPECIFIC (x) = ggc_alloc_cleared (sizeof (struct lang_decl)); /* Must set DECL_CONTEXT for everything not at file scope or DECL_FILE_SCOPE_P won't work. Local externs don't count unless they have initializers (which generate code). */ if (current_function_decl && ((TREE_CODE (x) != FUNCTION_DECL && TREE_CODE (x) != VAR_DECL) || DECL_INITIAL (x) || !DECL_EXTERNAL (x))) DECL_CONTEXT (x) = current_function_decl; /* Anonymous decls are just inserted in the scope. */ if (!name) { bind (name, x, scope, /*invisible=*/false, /*nested=*/false); return x; } /* First, see if there is another declaration with the same name in the current scope. If there is, duplicate_decls may do all the work for us. If duplicate_decls returns false, that indicates two incompatible decls in the same scope; we are to silently replace the old one (duplicate_decls has issued all appropriate diagnostics). In particular, we should not consider possible duplicates in the external scope, or shadowing. */ b = I_SYMBOL_BINDING (name); if (b && B_IN_SCOPE (b, scope)) { if (duplicate_decls (x, b->decl)) return b->decl; else goto skip_external_and_shadow_checks; } /* All declarations with external linkage, and all external references, go in the external scope, no matter what scope is current. However, the binding in that scope is ignored for purposes of normal name lookup. A separate binding structure is created in the requested scope; this governs the normal visibility of the symbol. The binding in the externals scope is used exclusively for detecting duplicate declarations of the same object, no matter what scope they are in; this is what we do here. (C99 6.2.7p2: All declarations that refer to the same object or function shall have compatible type; otherwise, the behavior is undefined.) */ if (DECL_EXTERNAL (x) || scope == file_scope) { if (warn_nested_externs && scope != file_scope && !DECL_IN_SYSTEM_HEADER (x)) warning ("nested extern declaration of '%D'", x); while (b && !B_IN_EXTERNAL_SCOPE (b)) b = b->shadowed; /* The point of the same_translation_unit_p check here is, we want to detect a duplicate decl for a construct like foo() { extern bar(); } ... static bar(); but not if they are in different translation units. In any case, the static does not go in the externals scope. */ if (b && (DECL_EXTERNAL (x) || TREE_PUBLIC (x) || same_translation_unit_p (x, b->decl)) && duplicate_decls (x, b->decl)) { bind (name, b->decl, scope, /*invisible=*/false, /*nested=*/true); return b->decl; } else if (DECL_EXTERNAL (x) || TREE_PUBLIC (x)) { bind (name, x, external_scope, /*invisible=*/true, /*nested=*/false); nested = true; } } /* Similarly, a declaration of a function with static linkage at block scope must be checked against any existing declaration of that function at file scope. */ else if (TREE_CODE (x) == FUNCTION_DECL && scope != file_scope && !TREE_PUBLIC (x) && !DECL_INITIAL (x)) { if (warn_nested_externs && !DECL_IN_SYSTEM_HEADER (x)) warning ("nested static declaration of '%D'", x); while (b && !B_IN_FILE_SCOPE (b)) b = b->shadowed; if (b && same_translation_unit_p (x, b->decl) && duplicate_decls (x, b->decl)) { bind (name, b->decl, scope, /*invisible=*/false, /*nested=*/true); return b->decl; } else { bind (name, x, file_scope, /*invisible=*/true, /*nested=*/false); nested = true; } } warn_if_shadowing (x); skip_external_and_shadow_checks: if (TREE_CODE (x) == TYPE_DECL) clone_underlying_type (x); bind (name, x, scope, /*invisible=*/false, nested); /* If x's type is incomplete because it's based on a structure or union which has not yet been fully declared, attach it to that structure or union type, so we can go back and complete the variable declaration later, if the structure or union gets fully declared. If the input is erroneous, we can have error_mark in the type slot (e.g. "f(void a, ...)") - that doesn't count as an incomplete type. */ if (TREE_TYPE (x) != error_mark_node && !COMPLETE_TYPE_P (TREE_TYPE (x))) { tree element = TREE_TYPE (x); while (TREE_CODE (element) == ARRAY_TYPE) element = TREE_TYPE (element); element = TYPE_MAIN_VARIANT (element); if ((TREE_CODE (element) == RECORD_TYPE || TREE_CODE (element) == UNION_TYPE) && (TREE_CODE (x) != TYPE_DECL || TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE) && !COMPLETE_TYPE_P (element)) C_TYPE_INCOMPLETE_VARS (element) = tree_cons (NULL_TREE, x, C_TYPE_INCOMPLETE_VARS (element)); } return x; } /* Record X as belonging to file scope. This is used only internally by the Objective-C front end, and is limited to its needs. duplicate_decls is not called; if there is any preexisting decl for this identifier, it is an ICE. */ tree pushdecl_top_level (tree x) { tree name; bool nested = false; if (TREE_CODE (x) != VAR_DECL) abort (); name = DECL_NAME (x); if (I_SYMBOL_BINDING (name)) abort (); if (DECL_EXTERNAL (x) || TREE_PUBLIC (x)) { bind (name, x, external_scope, /*invisible=*/true, /*nested=*/false); nested = true; } if (file_scope) bind (name, x, file_scope, /*invisible=*/false, nested); return x; } static void implicit_decl_warning (tree id, tree olddecl) { void (*diag) (const char *, ...); switch (mesg_implicit_function_declaration) { case 0: return; case 1: diag = warning; break; case 2: diag = error; break; default: abort (); } diag (N_("implicit declaration of function '%E'"), id); if (olddecl) locate_old_decl (olddecl, diag); } /* Generate an implicit declaration for identifier FUNCTIONID as a function of type int (). */ tree implicitly_declare (tree functionid) { tree decl = lookup_name_in_scope (functionid, external_scope); if (decl) { /* FIXME: Objective-C has weird not-really-builtin functions which are supposed to be visible automatically. They wind up in the external scope because they're pushed before the file scope gets created. Catch this here and rebind them into the file scope. */ if (!DECL_BUILT_IN (decl) && DECL_IS_BUILTIN (decl)) { bind (functionid, decl, file_scope, /*invisible=*/false, /*nested=*/true); return decl; } else { /* Implicit declaration of a function already declared (somehow) in a different scope, or as a built-in. If this is the first time this has happened, warn; then recycle the old declaration. */ if (!C_DECL_IMPLICIT (decl)) { implicit_decl_warning (functionid, decl); C_DECL_IMPLICIT (decl) = 1; } bind (functionid, decl, current_scope, /*invisible=*/false, /*nested=*/true); return decl; } } /* Not seen before. */ decl = build_decl (FUNCTION_DECL, functionid, default_function_type); DECL_EXTERNAL (decl) = 1; TREE_PUBLIC (decl) = 1; C_DECL_IMPLICIT (decl) = 1; implicit_decl_warning (functionid, 0); /* C89 says implicit declarations are in the innermost block. So we record the decl in the standard fashion. */ decl = pushdecl (decl); /* No need to call objc_check_decl here - it's a function type. */ rest_of_decl_compilation (decl, NULL, 0, 0); /* Write a record describing this implicit function declaration to the prototypes file (if requested). */ gen_aux_info_record (decl, 0, 1, 0); /* Possibly apply some default attributes to this implicit declaration. */ decl_attributes (&decl, NULL_TREE, 0); return decl; } /* Issue an error message for a reference to an undeclared variable ID, including a reference to a builtin outside of function-call context. Establish a binding of the identifier to error_mark_node in an appropriate scope, which will suppress further errors for the same identifier. */ void undeclared_variable (tree id) { static bool already = false; struct c_scope *scope; if (current_function_decl == 0) { error ("'%E' undeclared here (not in a function)", id); scope = current_scope; } else { error ("'%E' undeclared (first use in this function)", id); if (! already) { error ("(Each undeclared identifier is reported only once"); error ("for each function it appears in.)"); already = true; } /* If we are parsing old-style parameter decls, current_function_decl will be nonnull but current_function_scope will be null. */ scope = current_function_scope ? current_function_scope : current_scope; } bind (id, error_mark_node, scope, /*invisible=*/false, /*nested=*/false); } /* Subroutine of lookup_label, declare_label, define_label: construct a LABEL_DECL with all the proper frills. */ static tree make_label (tree name, location_t location) { tree label = build_decl (LABEL_DECL, name, void_type_node); DECL_CONTEXT (label) = current_function_decl; DECL_MODE (label) = VOIDmode; DECL_SOURCE_LOCATION (label) = location; return label; } /* Get the LABEL_DECL corresponding to identifier NAME as a label. Create one if none exists so far for the current function. This is called when a label is used in a goto expression or has its address taken. */ tree lookup_label (tree name) { tree label; if (current_function_decl == 0) { error ("label %s referenced outside of any function", IDENTIFIER_POINTER (name)); return 0; } /* Use a label already defined or ref'd with this name, but not if it is inherited from a containing function and wasn't declared using __label__. */ label = I_LABEL_DECL (name); if (label && (DECL_CONTEXT (label) == current_function_decl || C_DECLARED_LABEL_FLAG (label))) { /* If the label has only been declared, update its apparent location to point here, for better diagnostics if it turns out not to have been defined. */ if (!TREE_USED (label)) DECL_SOURCE_LOCATION (label) = input_location; return label; } /* No label binding for that identifier; make one. */ label = make_label (name, input_location); /* Ordinary labels go in the current function scope. */ bind (name, label, current_function_scope, /*invisible=*/false, /*nested=*/false); return label; } /* Make a label named NAME in the current function, shadowing silently any that may be inherited from containing functions or containing scopes. This is called for __label__ declarations. */ tree declare_label (tree name) { struct c_binding *b = I_LABEL_BINDING (name); tree label; /* Check to make sure that the label hasn't already been declared at this scope */ if (b && B_IN_CURRENT_SCOPE (b)) { error ("duplicate label declaration `%s'", IDENTIFIER_POINTER (name)); locate_old_decl (b->decl, error); /* Just use the previous declaration. */ return b->decl; } label = make_label (name, input_location); C_DECLARED_LABEL_FLAG (label) = 1; /* Declared labels go in the current scope. */ bind (name, label, current_scope, /*invisible=*/false, /*nested=*/false); return label; } /* Define a label, specifying the location in the source file. Return the LABEL_DECL node for the label, if the definition is valid. Otherwise return 0. */ tree define_label (location_t location, tree name) { /* Find any preexisting label with this name. It is an error if that label has already been defined in this function, or if there is a containing function with a declared label with the same name. */ tree label = I_LABEL_DECL (name); if (label && ((DECL_CONTEXT (label) == current_function_decl && DECL_INITIAL (label) != 0) || (DECL_CONTEXT (label) != current_function_decl && C_DECLARED_LABEL_FLAG (label)))) { error ("%Hduplicate label `%D'", &location, label); locate_old_decl (label, error); return 0; } else if (label && DECL_CONTEXT (label) == current_function_decl) { /* The label has been used or declared already in this function, but not defined. Update its location to point to this definition. */ DECL_SOURCE_LOCATION (label) = location; } else { /* No label binding for that identifier; make one. */ label = make_label (name, location); /* Ordinary labels go in the current function scope. */ bind (name, label, current_function_scope, /*invisible=*/false, /*nested=*/false); } if (warn_traditional && !in_system_header && lookup_name (name)) warning ("%Htraditional C lacks a separate namespace for labels, " "identifier `%s' conflicts", &location, IDENTIFIER_POINTER (name)); /* Mark label as having been defined. */ DECL_INITIAL (label) = error_mark_node; return label; } /* Given NAME, an IDENTIFIER_NODE, return the structure (or union or enum) definition for that name. If THISLEVEL_ONLY is nonzero, searches only the current_scope. CODE says which kind of type the caller wants; it is RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE. If the wrong kind of type is found, an error is reported. */ static tree lookup_tag (enum tree_code code, tree name, int thislevel_only) { struct c_binding *b = I_TAG_BINDING (name); int thislevel = 0; if (!b || !b->decl) return 0; /* We only care about whether it's in this level if thislevel_only was set or it might be a type clash. */ if (thislevel_only || TREE_CODE (b->decl) != code) { /* For our purposes, a tag in the external scope is the same as a tag in the file scope. (Primarily relevant to Objective-C and its builtin structure tags, which get pushed before the file scope is created.) */ if (B_IN_CURRENT_SCOPE (b) || (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b))) thislevel = 1; } if (thislevel_only && !thislevel) return 0; if (TREE_CODE (b->decl) != code) { /* Definition isn't the kind we were looking for. */ pending_invalid_xref = name; pending_invalid_xref_location = input_location; /* If in the same binding level as a declaration as a tag of a different type, this must not be allowed to shadow that tag, so give the error immediately. (For example, "struct foo; union foo;" is invalid.) */ if (thislevel) pending_xref_error (); } return b->decl; } /* Print an error message now for a recent invalid struct, union or enum cross reference. We don't print them immediately because they are not invalid when used in the `struct foo;' construct for shadowing. */ void pending_xref_error (void) { if (pending_invalid_xref != 0) error ("%H`%s' defined as wrong kind of tag", &pending_invalid_xref_location, IDENTIFIER_POINTER (pending_invalid_xref)); pending_invalid_xref = 0; } /* Look up NAME in the current scope and its superiors in the namespace of variables, functions and typedefs. Return a ..._DECL node of some kind representing its definition, or return 0 if it is undefined. */ tree lookup_name (tree name) { struct c_binding *b = I_SYMBOL_BINDING (name); if (b && !b->invisible) return b->decl; return 0; } /* Similar to `lookup_name' but look only at the indicated scope. */ static tree lookup_name_in_scope (tree name, struct c_scope *scope) { struct c_binding *b; for (b = I_SYMBOL_BINDING (name); b; b = b->shadowed) if (B_IN_SCOPE (b, scope)) return b->decl; return 0; } /* Create the predefined scalar types of C, and some nodes representing standard constants (0, 1, (void *) 0). Initialize the global scope. Make definitions for built-in primitive functions. */ void c_init_decl_processing (void) { tree endlink; tree ptr_ftype_void, ptr_ftype_ptr; location_t save_loc = input_location; /* Adds some ggc roots, and reserved words for c-parse.in. */ c_parse_init (); current_function_decl = 0; /* Make the externals scope. */ push_scope (); external_scope = current_scope; /* Declarations from c_common_nodes_and_builtins must not be associated with this input file, lest we get differences between using and not using preprocessed headers. */ #ifdef USE_MAPPED_LOCATION input_location = BUILTINS_LOCATION; #else input_location.file = ""; input_location.line = 0; #endif build_common_tree_nodes (flag_signed_char); c_common_nodes_and_builtins (); /* In C, comparisons and TRUTH_* expressions have type int. */ truthvalue_type_node = integer_type_node; truthvalue_true_node = integer_one_node; truthvalue_false_node = integer_zero_node; /* Even in C99, which has a real boolean type. */ pushdecl (build_decl (TYPE_DECL, get_identifier ("_Bool"), boolean_type_node)); endlink = void_list_node; ptr_ftype_void = build_function_type (ptr_type_node, endlink); ptr_ftype_ptr = build_function_type (ptr_type_node, tree_cons (NULL_TREE, ptr_type_node, endlink)); input_location = save_loc; pedantic_lvalues = true; make_fname_decl = c_make_fname_decl; start_fname_decls (); } /* Create the VAR_DECL for __FUNCTION__ etc. ID is the name to give the decl, NAME is the initialization string and TYPE_DEP indicates whether NAME depended on the type of the function. As we don't yet implement delayed emission of static data, we mark the decl as emitted so it is not placed in the output. Anything using it must therefore pull out the STRING_CST initializer directly. FIXME. */ static tree c_make_fname_decl (tree id, int type_dep) { const char *name = fname_as_string (type_dep); tree decl, type, init; size_t length = strlen (name); type = build_array_type (build_qualified_type (char_type_node, TYPE_QUAL_CONST), build_index_type (size_int (length))); decl = build_decl (VAR_DECL, id, type); TREE_STATIC (decl) = 1; TREE_READONLY (decl) = 1; DECL_ARTIFICIAL (decl) = 1; init = build_string (length + 1, name); free ((char *) name); TREE_TYPE (init) = type; DECL_INITIAL (decl) = init; TREE_USED (decl) = 1; if (current_function_decl) { DECL_CONTEXT (decl) = current_function_decl; bind (id, decl, current_function_scope, /*invisible=*/false, /*nested=*/false); } finish_decl (decl, init, NULL_TREE); return decl; } /* Return a definition for a builtin function named NAME and whose data type is TYPE. TYPE should be a function type with argument types. FUNCTION_CODE tells later passes how to compile calls to this function. See tree.h for its possible values. If LIBRARY_NAME is nonzero, use that for DECL_ASSEMBLER_NAME, the name to be called if we can't opencode the function. If ATTRS is nonzero, use that for the function's attribute list. */ tree builtin_function (const char *name, tree type, int function_code, enum built_in_class class, const char *library_name, tree attrs) { tree id = get_identifier (name); tree decl = build_decl (FUNCTION_DECL, id, type); TREE_PUBLIC (decl) = 1; DECL_EXTERNAL (decl) = 1; DECL_LANG_SPECIFIC (decl) = ggc_alloc_cleared (sizeof (struct lang_decl)); DECL_BUILT_IN_CLASS (decl) = class; DECL_FUNCTION_CODE (decl) = function_code; if (library_name) SET_DECL_ASSEMBLER_NAME (decl, get_identifier (library_name)); /* Should never be called on a symbol with a preexisting meaning. */ if (I_SYMBOL_BINDING (id)) abort (); bind (id, decl, external_scope, /*invisible=*/true, /*nested=*/false); /* Builtins in the implementation namespace are made visible without needing to be explicitly declared. See push_file_scope. */ if (name[0] == '_' && (name[1] == '_' || ISUPPER (name[1]))) { TREE_CHAIN (decl) = visible_builtins; visible_builtins = decl; } /* Possibly apply some default attributes to this built-in function. */ if (attrs) decl_attributes (&decl, attrs, ATTR_FLAG_BUILT_IN); else decl_attributes (&decl, NULL_TREE, 0); return decl; } /* Called when a declaration is seen that contains no names to declare. If its type is a reference to a structure, union or enum inherited from a containing scope, shadow that tag name for the current scope with a forward reference. If its type defines a new named structure or union or defines an enum, it is valid but we need not do anything here. Otherwise, it is an error. */ void shadow_tag (tree declspecs) { shadow_tag_warned (declspecs, 0); } void shadow_tag_warned (tree declspecs, int warned) /* 1 => we have done a pedwarn. 2 => we have done a warning, but no pedwarn. */ { int found_tag = 0; tree link; tree specs, attrs; pending_invalid_xref = 0; /* Remove the attributes from declspecs, since they will confuse the following code. */ split_specs_attrs (declspecs, &specs, &attrs); for (link = specs; link; link = TREE_CHAIN (link)) { tree value = TREE_VALUE (link); enum tree_code code = TREE_CODE (value); if (code == RECORD_TYPE || code == UNION_TYPE || code == ENUMERAL_TYPE) /* Used to test also that TYPE_SIZE (value) != 0. That caused warning for `struct foo;' at top level in the file. */ { tree name = TYPE_NAME (value); tree t; found_tag++; if (name == 0) { if (warned != 1 && code != ENUMERAL_TYPE) /* Empty unnamed enum OK */ { pedwarn ("unnamed struct/union that defines no instances"); warned = 1; } } else { t = lookup_tag (code, name, 1); if (t == 0) { t = make_node (code); pushtag (name, t); } } } else { if (!warned && ! in_system_header) { warning ("useless keyword or type name in empty declaration"); warned = 2; } } } if (found_tag > 1) error ("two types specified in one empty declaration"); if (warned != 1) { if (found_tag == 0) pedwarn ("empty declaration"); } } /* Construct an array declarator. EXPR is the expression inside [], or NULL_TREE. QUALS are the type qualifiers inside the [] (to be applied to the pointer to which a parameter array is converted). STATIC_P is nonzero if "static" is inside the [], zero otherwise. VLA_UNSPEC_P is nonzero is the array is [*], a VLA of unspecified length which is nevertheless a complete type (not currently implemented by GCC), zero otherwise. The declarator is constructed as an ARRAY_REF (to be decoded by grokdeclarator), whose operand 0 is what's on the left of the [] (filled by in set_array_declarator_type) and operand 1 is the expression inside; whose TREE_TYPE is the type qualifiers and which has TREE_STATIC set if "static" is used. */ tree build_array_declarator (tree expr, tree quals, int static_p, int vla_unspec_p) { tree decl; decl = build_nt (ARRAY_REF, NULL_TREE, expr, NULL_TREE, NULL_TREE); TREE_TYPE (decl) = quals; TREE_STATIC (decl) = (static_p ? 1 : 0); if (pedantic && !flag_isoc99) { if (static_p || quals != NULL_TREE) pedwarn ("ISO C90 does not support `static' or type qualifiers in parameter array declarators"); if (vla_unspec_p) pedwarn ("ISO C90 does not support `[*]' array declarators"); } if (vla_unspec_p) warning ("GCC does not yet properly implement `[*]' array declarators"); return decl; } /* Set the type of an array declarator. DECL is the declarator, as constructed by build_array_declarator; TYPE is what appears on the left of the [] and goes in operand 0. ABSTRACT_P is nonzero if it is an abstract declarator, zero otherwise; this is used to reject static and type qualifiers in abstract declarators, where they are not in the C99 grammar. */ tree set_array_declarator_type (tree decl, tree type, int abstract_p) { TREE_OPERAND (decl, 0) = type; if (abstract_p && (TREE_TYPE (decl) != NULL_TREE || TREE_STATIC (decl))) error ("static or type qualifiers in abstract declarator"); return decl; } /* Decode a "typename", such as "int **", returning a ..._TYPE node. */ tree groktypename (tree typename) { tree specs, attrs; if (TREE_CODE (typename) != TREE_LIST) return typename; split_specs_attrs (TREE_PURPOSE (typename), &specs, &attrs); typename = grokdeclarator (TREE_VALUE (typename), specs, TYPENAME, 0, NULL); /* Apply attributes. */ decl_attributes (&typename, attrs, 0); return typename; } /* Return a PARM_DECL node for a given pair of specs and declarator. */ tree groktypename_in_parm_context (tree typename) { if (TREE_CODE (typename) != TREE_LIST) return typename; return grokdeclarator (TREE_VALUE (typename), TREE_PURPOSE (typename), PARM, 0, NULL); } /* Decode a declarator in an ordinary declaration or data definition. This is called as soon as the type information and variable name have been parsed, before parsing the initializer if any. Here we create the ..._DECL node, fill in its type, and put it on the list of decls for the current context. The ..._DECL node is returned as the value. Exception: for arrays where the length is not specified, the type is left null, to be filled in by `finish_decl'. Function definitions do not come here; they go to start_function instead. However, external and forward declarations of functions do go through here. Structure field declarations are done by grokfield and not through here. */ tree start_decl (tree declarator, tree declspecs, int initialized, tree attributes) { tree decl; tree tem; /* An object declared as __attribute__((deprecated)) suppresses warnings of uses of other deprecated items. */ if (lookup_attribute ("deprecated", attributes)) deprecated_state = DEPRECATED_SUPPRESS; decl = grokdeclarator (declarator, declspecs, NORMAL, initialized, NULL); deprecated_state = DEPRECATED_NORMAL; if (warn_main > 0 && TREE_CODE (decl) != FUNCTION_DECL && MAIN_NAME_P (DECL_NAME (decl))) warning ("%J'%D' is usually a function", decl, decl); if (initialized) /* Is it valid for this decl to have an initializer at all? If not, set INITIALIZED to zero, which will indirectly tell 'finish_decl' to ignore the initializer once it is parsed. */ switch (TREE_CODE (decl)) { case TYPE_DECL: error ("typedef '%D' is initialized (use __typeof__ instead)", decl); initialized = 0; break; case FUNCTION_DECL: error ("function '%D' is initialized like a variable", decl); initialized = 0; break; case PARM_DECL: /* DECL_INITIAL in a PARM_DECL is really DECL_ARG_TYPE. */ error ("parameter '%D' is initialized", decl); initialized = 0; break; default: /* Don't allow initializations for incomplete types except for arrays which might be completed by the initialization. */ /* This can happen if the array size is an undefined macro. We already gave a warning, so we don't need another one. */ if (TREE_TYPE (decl) == error_mark_node) initialized = 0; else if (COMPLETE_TYPE_P (TREE_TYPE (decl))) { /* A complete type is ok if size is fixed. */ if (TREE_CODE (TYPE_SIZE (TREE_TYPE (decl))) != INTEGER_CST || C_DECL_VARIABLE_SIZE (decl)) { error ("variable-sized object may not be initialized"); initialized = 0; } } else if (TREE_CODE (TREE_TYPE (decl)) != ARRAY_TYPE) { error ("variable '%D' has initializer but incomplete type", decl); initialized = 0; } else if (!COMPLETE_TYPE_P (TREE_TYPE (TREE_TYPE (decl)))) { error ("elements of array '%D' have incomplete type", decl); initialized = 0; } } if (initialized) { DECL_EXTERNAL (decl) = 0; if (current_scope == file_scope) TREE_STATIC (decl) = 1; /* Tell 'pushdecl' this is an initialized decl even though we don't yet have the initializer expression. Also tell 'finish_decl' it may store the real initializer. */ DECL_INITIAL (decl) = error_mark_node; } /* If this is a function declaration, write a record describing it to the prototypes file (if requested). */ if (TREE_CODE (decl) == FUNCTION_DECL) gen_aux_info_record (decl, 0, 0, TYPE_ARG_TYPES (TREE_TYPE (decl)) != 0); /* ANSI specifies that a tentative definition which is not merged with a non-tentative definition behaves exactly like a definition with an initializer equal to zero. (Section 3.7.2) -fno-common gives strict ANSI behavior, though this tends to break a large body of code that grew up without this rule. Thread-local variables are never common, since there's no entrenched body of code to break, and it allows more efficient variable references in the presence of dynamic linking. */ if (TREE_CODE (decl) == VAR_DECL && !initialized && TREE_PUBLIC (decl) && !DECL_THREAD_LOCAL (decl) && !flag_no_common) DECL_COMMON (decl) = 1; /* Set attributes here so if duplicate decl, will have proper attributes. */ decl_attributes (&decl, attributes, 0); if (TREE_CODE (decl) == FUNCTION_DECL && targetm.calls.promote_prototypes (TREE_TYPE (decl))) { tree ce = declarator; if (TREE_CODE (ce) == INDIRECT_REF) ce = TREE_OPERAND (declarator, 0); if (TREE_CODE (ce) == CALL_EXPR) { tree args = TREE_PURPOSE (TREE_OPERAND (ce, 1)); for (; args; args = TREE_CHAIN (args)) { tree type = TREE_TYPE (args); if (type && INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node)) DECL_ARG_TYPE (args) = integer_type_node; } } } if (TREE_CODE (decl) == FUNCTION_DECL && DECL_DECLARED_INLINE_P (decl) && DECL_UNINLINABLE (decl) && lookup_attribute ("noinline", DECL_ATTRIBUTES (decl))) warning ("%Jinline function '%D' given attribute noinline", decl, decl); /* Add this decl to the current scope. TEM may equal DECL or it may be a previous decl of the same name. */ tem = pushdecl (decl); return tem; } /* Finish processing of a declaration; install its initial value. If the length of an array type is not known before, it must be determined now, from the initial value, or it is an error. */ void finish_decl (tree decl, tree init, tree asmspec_tree) { tree type = TREE_TYPE (decl); int was_incomplete = (DECL_SIZE (decl) == 0); const char *asmspec = 0; /* If a name was specified, get the string. */ if (current_scope == file_scope) asmspec_tree = maybe_apply_renaming_pragma (decl, asmspec_tree); if (asmspec_tree) asmspec = TREE_STRING_POINTER (asmspec_tree); /* If `start_decl' didn't like having an initialization, ignore it now. */ if (init != 0 && DECL_INITIAL (decl) == 0) init = 0; /* Don't crash if parm is initialized. */ if (TREE_CODE (decl) == PARM_DECL) init = 0; if (init) store_init_value (decl, init); if (c_dialect_objc () && (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == FUNCTION_DECL || TREE_CODE (decl) == FIELD_DECL)) objc_check_decl (decl); /* Deduce size of array from initialization, if not already known. */ if (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) == 0 && TREE_CODE (decl) != TYPE_DECL) { int do_default = (TREE_STATIC (decl) /* Even if pedantic, an external linkage array may have incomplete type at first. */ ? pedantic && !TREE_PUBLIC (decl) : !DECL_EXTERNAL (decl)); int failure = complete_array_type (type, DECL_INITIAL (decl), do_default); /* Get the completed type made by complete_array_type. */ type = TREE_TYPE (decl); if (failure == 1) error ("%Jinitializer fails to determine size of '%D'", decl, decl); else if (failure == 2) { if (do_default) error ("%Jarray size missing in '%D'", decl, decl); /* If a `static' var's size isn't known, make it extern as well as static, so it does not get allocated. If it is not `static', then do not mark extern; finish_incomplete_decl will give it a default size and it will get allocated. */ else if (!pedantic && TREE_STATIC (decl) && ! TREE_PUBLIC (decl)) DECL_EXTERNAL (decl) = 1; } /* TYPE_MAX_VALUE is always one less than the number of elements in the array, because we start counting at zero. Therefore, warn only if the value is less than zero. */ else if (pedantic && TYPE_DOMAIN (type) != 0 && tree_int_cst_sgn (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) < 0) error ("%Jzero or negative size array '%D'", decl, decl); layout_decl (decl, 0); } if (TREE_CODE (decl) == VAR_DECL) { if (DECL_SIZE (decl) == 0 && TREE_TYPE (decl) != error_mark_node && COMPLETE_TYPE_P (TREE_TYPE (decl))) layout_decl (decl, 0); if (DECL_SIZE (decl) == 0 /* Don't give an error if we already gave one earlier. */ && TREE_TYPE (decl) != error_mark_node && (TREE_STATIC (decl) ? /* A static variable with an incomplete type is an error if it is initialized. Also if it is not file scope. Otherwise, let it through, but if it is not `extern' then it may cause an error message later. */ (DECL_INITIAL (decl) != 0 || !DECL_FILE_SCOPE_P (decl)) : /* An automatic variable with an incomplete type is an error. */ !DECL_EXTERNAL (decl))) { error ("%Jstorage size of '%D' isn't known", decl, decl); TREE_TYPE (decl) = error_mark_node; } if ((DECL_EXTERNAL (decl) || TREE_STATIC (decl)) && DECL_SIZE (decl) != 0) { if (TREE_CODE (DECL_SIZE (decl)) == INTEGER_CST) constant_expression_warning (DECL_SIZE (decl)); else error ("%Jstorage size of '%D' isn't constant", decl, decl); } if (TREE_USED (type)) TREE_USED (decl) = 1; } /* If this is a function and an assembler name is specified, reset DECL_RTL so we can give it its new name. Also, update built_in_decls if it was a normal built-in. */ if (TREE_CODE (decl) == FUNCTION_DECL && asmspec) { /* ASMSPEC is given, and not the name of a register. Mark the name with a star so assemble_name won't munge it. */ char *starred = alloca (strlen (asmspec) + 2); starred[0] = '*'; strcpy (starred + 1, asmspec); if (DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL) { tree builtin = built_in_decls [DECL_FUNCTION_CODE (decl)]; SET_DECL_RTL (builtin, NULL_RTX); change_decl_assembler_name (builtin, get_identifier (starred)); if (DECL_FUNCTION_CODE (decl) == BUILT_IN_MEMCPY) init_block_move_fn (starred); else if (DECL_FUNCTION_CODE (decl) == BUILT_IN_MEMSET) init_block_clear_fn (starred); } SET_DECL_RTL (decl, NULL_RTX); change_decl_assembler_name (decl, get_identifier (starred)); } /* If #pragma weak was used, mark the decl weak now. */ if (current_scope == file_scope) maybe_apply_pragma_weak (decl); /* Output the assembler code and/or RTL code for variables and functions, unless the type is an undefined structure or union. If not, it will get done when the type is completed. */ if (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == FUNCTION_DECL) { /* This is a no-op in c-lang.c or something real in objc-act.c. */ if (c_dialect_objc ()) objc_check_decl (decl); if (DECL_FILE_SCOPE_P (decl)) { if (DECL_INITIAL (decl) == NULL_TREE || DECL_INITIAL (decl) == error_mark_node) /* Don't output anything when a tentative file-scope definition is seen. But at end of compilation, do output code for them. */ DECL_DEFER_OUTPUT (decl) = 1; rest_of_decl_compilation (decl, asmspec, true, 0); } else { /* This is a local variable. If there is an ASMSPEC, the user has requested that we handle it specially. */ if (asmspec) { /* In conjunction with an ASMSPEC, the `register' keyword indicates that we should place the variable in a particular register. */ if (C_DECL_REGISTER (decl)) { DECL_HARD_REGISTER (decl) = 1; /* This cannot be done for a structure with volatile fields, on which DECL_REGISTER will have been reset. */ if (!DECL_REGISTER (decl)) error ("cannot put object with volatile field into register"); } /* If this is not a static variable, issue a warning. It doesn't make any sense to give an ASMSPEC for an ordinary, non-register local variable. Historically, GCC has accepted -- but ignored -- the ASMSPEC in this case. */ if (TREE_CODE (decl) == VAR_DECL && !C_DECL_REGISTER (decl) && !TREE_STATIC (decl)) warning ("%Jignoring asm-specifier for non-static local " "variable '%D'", decl, decl); else change_decl_assembler_name (decl, get_identifier (asmspec)); } if (TREE_CODE (decl) != FUNCTION_DECL) add_stmt (build_stmt (DECL_EXPR, decl)); } if (!DECL_FILE_SCOPE_P (decl)) { /* Recompute the RTL of a local array now if it used to be an incomplete type. */ if (was_incomplete && ! TREE_STATIC (decl) && ! DECL_EXTERNAL (decl)) { /* If we used it already as memory, it must stay in memory. */ TREE_ADDRESSABLE (decl) = TREE_USED (decl); /* If it's still incomplete now, no init will save it. */ if (DECL_SIZE (decl) == 0) DECL_INITIAL (decl) = 0; } } } /* If this was marked 'used', be sure it will be output. */ if (lookup_attribute ("used", DECL_ATTRIBUTES (decl))) mark_decl_referenced (decl); if (TREE_CODE (decl) == TYPE_DECL) { if (!DECL_FILE_SCOPE_P (decl) && variably_modified_type_p (TREE_TYPE (decl), NULL_TREE)) add_stmt (build_stmt (DECL_EXPR, decl)); rest_of_decl_compilation (decl, NULL, DECL_FILE_SCOPE_P (decl), 0); } /* At the end of a declaration, throw away any variable type sizes of types defined inside that declaration. There is no use computing them in the following function definition. */ if (current_scope == file_scope) get_pending_sizes (); /* Install a cleanup (aka destructor) if one was given. */ if (TREE_CODE (decl) == VAR_DECL && !TREE_STATIC (decl)) { tree attr = lookup_attribute ("cleanup", DECL_ATTRIBUTES (decl)); if (attr) { tree cleanup_id = TREE_VALUE (TREE_VALUE (attr)); tree cleanup_decl = lookup_name (cleanup_id); tree cleanup; /* Build "cleanup(&decl)" for the destructor. */ cleanup = build_unary_op (ADDR_EXPR, decl, 0); cleanup = build_tree_list (NULL_TREE, cleanup); cleanup = build_function_call (cleanup_decl, cleanup); /* Don't warn about decl unused; the cleanup uses it. */ TREE_USED (decl) = 1; TREE_USED (cleanup_decl) = 1; /* Initialize EH, if we've been told to do so. */ if (flag_exceptions && !c_eh_initialized_p) { c_eh_initialized_p = true; eh_personality_libfunc = init_one_libfunc (USING_SJLJ_EXCEPTIONS ? "__gcc_personality_sj0" : "__gcc_personality_v0"); using_eh_for_cleanups (); } push_cleanup (decl, cleanup, false); } } } /* Given a parsed parameter declaration, decode it into a PARM_DECL and push that on the current scope. */ void push_parm_decl (tree parm) { tree decl; decl = grokdeclarator (TREE_VALUE (TREE_PURPOSE (parm)), TREE_PURPOSE (TREE_PURPOSE (parm)), PARM, 0, NULL); decl_attributes (&decl, TREE_VALUE (parm), 0); decl = pushdecl (decl); finish_decl (decl, NULL_TREE, NULL_TREE); } /* Mark all the parameter declarations to date as forward decls. Also diagnose use of this extension. */ void mark_forward_parm_decls (void) { struct c_binding *b; if (pedantic && !current_scope->warned_forward_parm_decls) { pedwarn ("ISO C forbids forward parameter declarations"); current_scope->warned_forward_parm_decls = true; } for (b = current_scope->bindings; b; b = b->prev) if (TREE_CODE (b->decl) == PARM_DECL) TREE_ASM_WRITTEN (b->decl) = 1; } static GTY(()) int compound_literal_number; /* Build a COMPOUND_LITERAL_EXPR. TYPE is the type given in the compound literal, which may be an incomplete array type completed by the initializer; INIT is a CONSTRUCTOR that initializes the compound literal. */ tree build_compound_literal (tree type, tree init) { /* We do not use start_decl here because we have a type, not a declarator; and do not use finish_decl because the decl should be stored inside the COMPOUND_LITERAL_EXPR rather than added elsewhere as a DECL_EXPR. */ tree decl = build_decl (VAR_DECL, NULL_TREE, type); tree complit; tree stmt; DECL_EXTERNAL (decl) = 0; TREE_PUBLIC (decl) = 0; TREE_STATIC (decl) = (current_scope == file_scope); DECL_CONTEXT (decl) = current_function_decl; TREE_USED (decl) = 1; TREE_TYPE (decl) = type; TREE_READONLY (decl) = TYPE_READONLY (type); store_init_value (decl, init); if (TREE_CODE (type) == ARRAY_TYPE && !COMPLETE_TYPE_P (type)) { int failure = complete_array_type (type, DECL_INITIAL (decl), 1); if (failure) abort (); } type = TREE_TYPE (decl); if (type == error_mark_node || !COMPLETE_TYPE_P (type)) return error_mark_node; stmt = build_stmt (DECL_EXPR, decl); complit = build1 (COMPOUND_LITERAL_EXPR, TREE_TYPE (decl), stmt); TREE_SIDE_EFFECTS (complit) = 1; layout_decl (decl, 0); if (TREE_STATIC (decl)) { /* This decl needs a name for the assembler output. We also need a unique suffix to be added to the name. */ char *name; ASM_FORMAT_PRIVATE_NAME (name, "__compound_literal", compound_literal_number); compound_literal_number++; DECL_NAME (decl) = get_identifier (name); DECL_DEFER_OUTPUT (decl) = 1; DECL_COMDAT (decl) = 1; DECL_ARTIFICIAL (decl) = 1; pushdecl (decl); rest_of_decl_compilation (decl, NULL, 1, 0); } return complit; } /* Make TYPE a complete type based on INITIAL_VALUE. Return 0 if successful, 1 if INITIAL_VALUE can't be deciphered, 2 if there was no information (in which case assume 1 if DO_DEFAULT). */ int complete_array_type (tree type, tree initial_value, int do_default) { tree maxindex = NULL_TREE; int value = 0; if (initial_value) { /* Note MAXINDEX is really the maximum index, one less than the size. */ if (TREE_CODE (initial_value) == STRING_CST) { int eltsize = int_size_in_bytes (TREE_TYPE (TREE_TYPE (initial_value))); maxindex = build_int_2 ((TREE_STRING_LENGTH (initial_value) / eltsize) - 1, 0); } else if (TREE_CODE (initial_value) == CONSTRUCTOR) { tree elts = CONSTRUCTOR_ELTS (initial_value); maxindex = build_int_2 (-1, -1); for (; elts; elts = TREE_CHAIN (elts)) { if (TREE_PURPOSE (elts)) maxindex = TREE_PURPOSE (elts); else maxindex = fold (build (PLUS_EXPR, integer_type_node, maxindex, integer_one_node)); } maxindex = copy_node (maxindex); } else { /* Make an error message unless that happened already. */ if (initial_value != error_mark_node) value = 1; /* Prevent further error messages. */ maxindex = build_int_2 (0, 0); } } if (!maxindex) { if (do_default) maxindex = build_int_2 (0, 0); value = 2; } if (maxindex) { TYPE_DOMAIN (type) = build_index_type (maxindex); if (!TREE_TYPE (maxindex)) TREE_TYPE (maxindex) = TYPE_DOMAIN (type); } /* Lay out the type now that we can get the real answer. */ layout_type (type); return value; } /* Determine whether TYPE is a structure with a flexible array member, or a union containing such a structure (possibly recursively). */ static bool flexible_array_type_p (tree type) { tree x; switch (TREE_CODE (type)) { case RECORD_TYPE: x = TYPE_FIELDS (type); if (x == NULL_TREE) return false; while (TREE_CHAIN (x) != NULL_TREE) x = TREE_CHAIN (x); if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE && TYPE_SIZE (TREE_TYPE (x)) == NULL_TREE && TYPE_DOMAIN (TREE_TYPE (x)) != NULL_TREE && TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (x))) == NULL_TREE) return true; return false; case UNION_TYPE: for (x = TYPE_FIELDS (type); x != NULL_TREE; x = TREE_CHAIN (x)) { if (flexible_array_type_p (TREE_TYPE (x))) return true; } return false; default: return false; } } /* Performs sanity checks on the TYPE and WIDTH of the bit-field NAME, replacing with appropriate values if they are invalid. */ static void check_bitfield_type_and_width (tree *type, tree *width, const char *orig_name) { tree type_mv; unsigned int max_width; unsigned HOST_WIDE_INT w; const char *name = orig_name ? orig_name: _(""); /* Necessary? */ STRIP_NOPS (*width); /* Detect and ignore out of range field width and process valid field widths. */ if (TREE_CODE (*width) != INTEGER_CST) { error ("bit-field `%s' width not an integer constant", name); *width = integer_one_node; } else { constant_expression_warning (*width); if (tree_int_cst_sgn (*width) < 0) { error ("negative width in bit-field `%s'", name); *width = integer_one_node; } else if (integer_zerop (*width) && orig_name) { error ("zero width for bit-field `%s'", name); *width = integer_one_node; } } /* Detect invalid bit-field type. */ if (TREE_CODE (*type) != INTEGER_TYPE && TREE_CODE (*type) != BOOLEAN_TYPE && TREE_CODE (*type) != ENUMERAL_TYPE) { error ("bit-field `%s' has invalid type", name); *type = unsigned_type_node; } type_mv = TYPE_MAIN_VARIANT (*type); if (pedantic && type_mv != integer_type_node && type_mv != unsigned_type_node && type_mv != boolean_type_node) pedwarn ("type of bit-field `%s' is a GCC extension", name); if (type_mv == boolean_type_node) max_width = CHAR_TYPE_SIZE; else max_width = TYPE_PRECISION (*type); if (0 < compare_tree_int (*width, max_width)) { error ("width of `%s' exceeds its type", name); w = max_width; *width = build_int_2 (w, 0); } else w = tree_low_cst (*width, 1); if (TREE_CODE (*type) == ENUMERAL_TYPE) { struct lang_type *lt = TYPE_LANG_SPECIFIC (*type); if (!lt || w < min_precision (lt->enum_min, TYPE_UNSIGNED (*type)) || w < min_precision (lt->enum_max, TYPE_UNSIGNED (*type))) warning ("`%s' is narrower than values of its type", name); } } /* Given declspecs and a declarator, determine the name and type of the object declared and construct a ..._DECL node for it. (In one case we can return a ..._TYPE node instead. For invalid input we sometimes return 0.) DECLSPECS is a chain of tree_list nodes whose value fields are the storage classes and type specifiers. DECL_CONTEXT says which syntactic context this declaration is in: NORMAL for most contexts. Make a VAR_DECL or FUNCTION_DECL or TYPE_DECL. FUNCDEF for a function definition. Like NORMAL but a few different error messages in each case. Return value may be zero meaning this definition is too screwy to try to parse. PARM for a parameter declaration (either within a function prototype or before a function body). Make a PARM_DECL, or return void_type_node. TYPENAME if for a typename (in a cast or sizeof). Don't make a DECL node; just return the ..._TYPE node. FIELD for a struct or union field; make a FIELD_DECL. INITIALIZED is 1 if the decl has an initializer. WIDTH is non-NULL for bit-fields, and is a pointer to an INTEGER_CST node representing the width of the bit-field. In the TYPENAME case, DECLARATOR is really an absolute declarator. It may also be so in the PARM case, for a prototype where the argument type is specified but not the name. This function is where the complicated C meanings of `static' and `extern' are interpreted. */ static tree grokdeclarator (tree declarator, tree declspecs, enum decl_context decl_context, int initialized, tree *width) { int specbits = 0; tree spec; tree type = NULL_TREE; int longlong = 0; int constp; int restrictp; int volatilep; int type_quals = TYPE_UNQUALIFIED; int inlinep; int explicit_int = 0; int explicit_char = 0; int defaulted_int = 0; tree typedef_decl = 0; const char *name, *orig_name; tree typedef_type = 0; int funcdef_flag = 0; enum tree_code innermost_code = ERROR_MARK; int size_varies = 0; tree decl_attr = NULL_TREE; tree array_ptr_quals = NULL_TREE; int array_parm_static = 0; tree returned_attrs = NULL_TREE; bool bitfield = width != NULL; tree element_type; tree arg_info = NULL_TREE; if (decl_context == FUNCDEF) funcdef_flag = 1, decl_context = NORMAL; /* Look inside a declarator for the name being declared and get it as a string, for an error message. */ { tree decl = declarator; name = 0; while (decl) switch (TREE_CODE (decl)) { case ARRAY_REF: case INDIRECT_REF: case CALL_EXPR: innermost_code = TREE_CODE (decl); decl = TREE_OPERAND (decl, 0); break; case TREE_LIST: decl = TREE_VALUE (decl); break; case IDENTIFIER_NODE: name = IDENTIFIER_POINTER (decl); decl = 0; break; default: abort (); } orig_name = name; if (name == 0) name = "type name"; } /* A function definition's declarator must have the form of a function declarator. */ if (funcdef_flag && innermost_code != CALL_EXPR) return 0; /* If this looks like a function definition, make it one, even if it occurs where parms are expected. Then store_parm_decls will reject it and not use it as a parm. */ if (decl_context == NORMAL && !funcdef_flag && current_scope->parm_flag) decl_context = PARM; /* Look through the decl specs and record which ones appear. Some typespecs are defined as built-in typenames. Others, the ones that are modifiers of other types, are represented by bits in SPECBITS: set the bits for the modifiers that appear. Storage class keywords are also in SPECBITS. If there is a typedef name or a type, store the type in TYPE. This includes builtin typedefs such as `int'. Set EXPLICIT_INT or EXPLICIT_CHAR if the type is `int' or `char' and did not come from a user typedef. Set LONGLONG if `long' is mentioned twice. */ for (spec = declspecs; spec; spec = TREE_CHAIN (spec)) { tree id = TREE_VALUE (spec); /* If the entire declaration is itself tagged as deprecated then suppress reports of deprecated items. */ if (id && TREE_DEPRECATED (id)) { if (deprecated_state != DEPRECATED_SUPPRESS) warn_deprecated_use (id); } if (id == ridpointers[(int) RID_INT]) explicit_int = 1; if (id == ridpointers[(int) RID_CHAR]) explicit_char = 1; if (TREE_CODE (id) == IDENTIFIER_NODE && C_IS_RESERVED_WORD (id)) { enum rid i = C_RID_CODE (id); if ((int) i <= (int) RID_LAST_MODIFIER) { if (i == RID_LONG && (specbits & (1 << (int) RID_LONG))) { if (longlong) error ("`long long long' is too long for GCC"); else { if (pedantic && !flag_isoc99 && ! in_system_header && warn_long_long) pedwarn ("ISO C90 does not support `long long'"); longlong = 1; } } else if (specbits & (1 << (int) i)) { if (i == RID_CONST || i == RID_VOLATILE || i == RID_RESTRICT) { if (pedantic && !flag_isoc99) pedwarn ("duplicate `%s'", IDENTIFIER_POINTER (id)); } else error ("duplicate `%s'", IDENTIFIER_POINTER (id)); } /* Diagnose "__thread extern". Recall that this list is in the reverse order seen in the text. */ if (i == RID_THREAD && (specbits & (1 << (int) RID_EXTERN | 1 << (int) RID_STATIC))) { if (specbits & 1 << (int) RID_EXTERN) error ("`__thread' before `extern'"); else error ("`__thread' before `static'"); } specbits |= 1 << (int) i; goto found; } } if (type) error ("two or more data types in declaration of `%s'", name); /* Actual typedefs come to us as TYPE_DECL nodes. */ else if (TREE_CODE (id) == TYPE_DECL) { if (TREE_TYPE (id) == error_mark_node) ; /* Allow the type to default to int to avoid cascading errors. */ else { type = TREE_TYPE (id); decl_attr = DECL_ATTRIBUTES (id); typedef_decl = id; } } /* Built-in types come as identifiers. */ else if (TREE_CODE (id) == IDENTIFIER_NODE) { tree t = lookup_name (id); if (!t || TREE_CODE (t) != TYPE_DECL) error ("`%s' fails to be a typedef or built in type", IDENTIFIER_POINTER (id)); else if (TREE_TYPE (t) == error_mark_node) ; else { type = TREE_TYPE (t); typedef_decl = t; } } else if (TREE_CODE (id) != ERROR_MARK) type = id; found: ; } typedef_type = type; if (type) size_varies = C_TYPE_VARIABLE_SIZE (type); /* No type at all: default to `int', and set DEFAULTED_INT because it was not a user-defined typedef. */ if (type == 0) { if ((! (specbits & ((1 << (int) RID_LONG) | (1 << (int) RID_SHORT) | (1 << (int) RID_SIGNED) | (1 << (int) RID_UNSIGNED) | (1 << (int) RID_COMPLEX)))) /* Don't warn about typedef foo = bar. */ && ! (specbits & (1 << (int) RID_TYPEDEF) && initialized) && ! in_system_header) { /* Issue a warning if this is an ISO C 99 program or if -Wreturn-type and this is a function, or if -Wimplicit; prefer the former warning since it is more explicit. */ if ((warn_implicit_int || warn_return_type || flag_isoc99) && funcdef_flag) warn_about_return_type = 1; else if (warn_implicit_int || flag_isoc99) pedwarn_c99 ("type defaults to `int' in declaration of `%s'", name); } defaulted_int = 1; type = integer_type_node; } /* Now process the modifiers that were specified and check for invalid combinations. */ /* Long double is a special combination. */ if ((specbits & 1 << (int) RID_LONG) && ! longlong && TYPE_MAIN_VARIANT (type) == double_type_node) { specbits &= ~(1 << (int) RID_LONG); type = long_double_type_node; } /* Check all other uses of type modifiers. */ if (specbits & ((1 << (int) RID_LONG) | (1 << (int) RID_SHORT) | (1 << (int) RID_UNSIGNED) | (1 << (int) RID_SIGNED))) { int ok = 0; if ((specbits & 1 << (int) RID_LONG) && (specbits & 1 << (int) RID_SHORT)) error ("both long and short specified for `%s'", name); else if (((specbits & 1 << (int) RID_LONG) || (specbits & 1 << (int) RID_SHORT)) && explicit_char) error ("long or short specified with char for `%s'", name); else if (((specbits & 1 << (int) RID_LONG) || (specbits & 1 << (int) RID_SHORT)) && TREE_CODE (type) == REAL_TYPE) { static int already = 0; error ("long or short specified with floating type for `%s'", name); if (! already && ! pedantic) { error ("the only valid combination is `long double'"); already = 1; } } else if ((specbits & 1 << (int) RID_SIGNED) && (specbits & 1 << (int) RID_UNSIGNED)) error ("both signed and unsigned specified for `%s'", name); else if (TREE_CODE (type) != INTEGER_TYPE) error ("long, short, signed or unsigned invalid for `%s'", name); else { ok = 1; if (!explicit_int && !defaulted_int && !explicit_char) { error ("long, short, signed or unsigned used invalidly for `%s'", name); ok = 0; } } /* Discard the type modifiers if they are invalid. */ if (! ok) { specbits &= ~((1 << (int) RID_LONG) | (1 << (int) RID_SHORT) | (1 << (int) RID_UNSIGNED) | (1 << (int) RID_SIGNED)); longlong = 0; } } if ((specbits & (1 << (int) RID_COMPLEX)) && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE) { error ("complex invalid for `%s'", name); specbits &= ~(1 << (int) RID_COMPLEX); } /* Decide whether an integer type is signed or not. Optionally treat bit-fields as signed by default. */ if (specbits & 1 << (int) RID_UNSIGNED || (bitfield && ! flag_signed_bitfields && (explicit_int || defaulted_int || explicit_char /* A typedef for plain `int' without `signed' can be controlled just like plain `int'. */ || ! (typedef_decl != 0 && C_TYPEDEF_EXPLICITLY_SIGNED (typedef_decl))) && TREE_CODE (type) != ENUMERAL_TYPE && !(specbits & 1 << (int) RID_SIGNED))) { if (longlong) type = long_long_unsigned_type_node; else if (specbits & 1 << (int) RID_LONG) type = long_unsigned_type_node; else if (specbits & 1 << (int) RID_SHORT) type = short_unsigned_type_node; else if (type == char_type_node) type = unsigned_char_type_node; else if (typedef_decl) type = c_common_unsigned_type (type); else type = unsigned_type_node; } else if ((specbits & 1 << (int) RID_SIGNED) && type == char_type_node) type = signed_char_type_node; else if (longlong) type = long_long_integer_type_node; else if (specbits & 1 << (int) RID_LONG) type = long_integer_type_node; else if (specbits & 1 << (int) RID_SHORT) type = short_integer_type_node; if (specbits & 1 << (int) RID_COMPLEX) { if (pedantic && !flag_isoc99) pedwarn ("ISO C90 does not support complex types"); /* If we just have "complex", it is equivalent to "complex double", but if any modifiers at all are specified it is the complex form of TYPE. E.g, "complex short" is "complex short int". */ if (defaulted_int && ! longlong && ! (specbits & ((1 << (int) RID_LONG) | (1 << (int) RID_SHORT) | (1 << (int) RID_SIGNED) | (1 << (int) RID_UNSIGNED)))) { if (pedantic) pedwarn ("ISO C does not support plain `complex' meaning `double complex'"); type = complex_double_type_node; } else if (type == integer_type_node) { if (pedantic) pedwarn ("ISO C does not support complex integer types"); type = complex_integer_type_node; } else if (type == float_type_node) type = complex_float_type_node; else if (type == double_type_node) type = complex_double_type_node; else if (type == long_double_type_node) type = complex_long_double_type_node; else { if (pedantic) pedwarn ("ISO C does not support complex integer types"); type = build_complex_type (type); } } /* Check the type and width of a bit-field. */ if (bitfield) check_bitfield_type_and_width (&type, width, orig_name); /* Figure out the type qualifiers for the declaration. There are two ways a declaration can become qualified. One is something like `const int i' where the `const' is explicit. Another is something like `typedef const int CI; CI i' where the type of the declaration contains the `const'. A third possibility is that there is a type qualifier on the element type of a typedefed array type, in which case we should extract that qualifier so that c_apply_type_quals_to_decls receives the full list of qualifiers to work with (C90 is not entirely clear about whether duplicate qualifiers should be diagnosed in this case, but it seems most appropriate to do so). */ element_type = strip_array_types (type); constp = !! (specbits & 1 << (int) RID_CONST) + TYPE_READONLY (element_type); restrictp = !! (specbits & 1 << (int) RID_RESTRICT) + TYPE_RESTRICT (element_type); volatilep = !! (specbits & 1 << (int) RID_VOLATILE) + TYPE_VOLATILE (element_type); inlinep = !! (specbits & (1 << (int) RID_INLINE)); if (pedantic && !flag_isoc99) { if (constp > 1) pedwarn ("duplicate `const'"); if (restrictp > 1) pedwarn ("duplicate `restrict'"); if (volatilep > 1) pedwarn ("duplicate `volatile'"); } if (! flag_gen_aux_info && (TYPE_QUALS (type))) type = TYPE_MAIN_VARIANT (type); type_quals = ((constp ? TYPE_QUAL_CONST : 0) | (restrictp ? TYPE_QUAL_RESTRICT : 0) | (volatilep ? TYPE_QUAL_VOLATILE : 0)); /* Warn if two storage classes are given. Default to `auto'. */ { int nclasses = 0; if (specbits & 1 << (int) RID_AUTO) nclasses++; if (specbits & 1 << (int) RID_STATIC) nclasses++; if (specbits & 1 << (int) RID_EXTERN) nclasses++; if (specbits & 1 << (int) RID_REGISTER) nclasses++; if (specbits & 1 << (int) RID_TYPEDEF) nclasses++; /* "static __thread" and "extern __thread" are allowed. */ if ((specbits & (1 << (int) RID_THREAD | 1 << (int) RID_STATIC | 1 << (int) RID_EXTERN)) == (1 << (int) RID_THREAD)) nclasses++; /* Warn about storage classes that are invalid for certain kinds of declarations (parameters, typenames, etc.). */ if (nclasses > 1) error ("multiple storage classes in declaration of `%s'", name); else if (funcdef_flag && (specbits & ((1 << (int) RID_REGISTER) | (1 << (int) RID_AUTO) | (1 << (int) RID_TYPEDEF) | (1 << (int) RID_THREAD)))) { if (specbits & 1 << (int) RID_AUTO && (pedantic || current_scope == file_scope)) pedwarn ("function definition declared `auto'"); if (specbits & 1 << (int) RID_REGISTER) error ("function definition declared `register'"); if (specbits & 1 << (int) RID_TYPEDEF) error ("function definition declared `typedef'"); if (specbits & 1 << (int) RID_THREAD) error ("function definition declared `__thread'"); specbits &= ~((1 << (int) RID_TYPEDEF) | (1 << (int) RID_REGISTER) | (1 << (int) RID_AUTO) | (1 << (int) RID_THREAD)); } else if (decl_context != NORMAL && nclasses > 0) { if (decl_context == PARM && specbits & 1 << (int) RID_REGISTER) ; else { switch (decl_context) { case FIELD: error ("storage class specified for structure field `%s'", name); break; case PARM: error ("storage class specified for parameter `%s'", name); break; default: error ("storage class specified for typename"); break; } specbits &= ~((1 << (int) RID_TYPEDEF) | (1 << (int) RID_REGISTER) | (1 << (int) RID_AUTO) | (1 << (int) RID_STATIC) | (1 << (int) RID_EXTERN) | (1 << (int) RID_THREAD)); } } else if (specbits & 1 << (int) RID_EXTERN && initialized && ! funcdef_flag) { /* `extern' with initialization is invalid if not at file scope. */ if (current_scope == file_scope) warning ("`%s' initialized and declared `extern'", name); else error ("`%s' has both `extern' and initializer", name); } else if (current_scope == file_scope) { if (specbits & 1 << (int) RID_AUTO) error ("file-scope declaration of `%s' specifies `auto'", name); } else { if (specbits & 1 << (int) RID_EXTERN && funcdef_flag) error ("nested function `%s' declared `extern'", name); else if ((specbits & (1 << (int) RID_THREAD | 1 << (int) RID_EXTERN | 1 << (int) RID_STATIC)) == (1 << (int) RID_THREAD)) { error ("function-scope `%s' implicitly auto and declared `__thread'", name); specbits &= ~(1 << (int) RID_THREAD); } } } /* Now figure out the structure of the declarator proper. Descend through it, creating more complex types, until we reach the declared identifier (or NULL_TREE, in an absolute declarator). */ while (declarator && TREE_CODE (declarator) != IDENTIFIER_NODE) { if (type == error_mark_node) { declarator = TREE_OPERAND (declarator, 0); continue; } /* Each level of DECLARATOR is either an ARRAY_REF (for ...[..]), an INDIRECT_REF (for *...), a CALL_EXPR (for ...(...)), a TREE_LIST (for nested attributes), an identifier (for the name being declared) or a null pointer (for the place in an absolute declarator where the name was omitted). For the last two cases, we have just exited the loop. At this point, TYPE is the type of elements of an array, or for a function to return, or for a pointer to point to. After this sequence of ifs, TYPE is the type of the array or function or pointer, and DECLARATOR has had its outermost layer removed. */ if (array_ptr_quals != NULL_TREE || array_parm_static) { /* Only the innermost declarator (making a parameter be of array type which is converted to pointer type) may have static or type qualifiers. */ error ("static or type qualifiers in non-parameter array declarator"); array_ptr_quals = NULL_TREE; array_parm_static = 0; } if (TREE_CODE (declarator) == TREE_LIST) { /* We encode a declarator with embedded attributes using a TREE_LIST. */ tree attrs = TREE_PURPOSE (declarator); tree inner_decl; int attr_flags = 0; declarator = TREE_VALUE (declarator); inner_decl = declarator; while (inner_decl != NULL_TREE && TREE_CODE (inner_decl) == TREE_LIST) inner_decl = TREE_VALUE (inner_decl); if (inner_decl == NULL_TREE || TREE_CODE (inner_decl) == IDENTIFIER_NODE) attr_flags |= (int) ATTR_FLAG_DECL_NEXT; else if (TREE_CODE (inner_decl) == CALL_EXPR) attr_flags |= (int) ATTR_FLAG_FUNCTION_NEXT; else if (TREE_CODE (inner_decl) == ARRAY_REF) attr_flags |= (int) ATTR_FLAG_ARRAY_NEXT; returned_attrs = decl_attributes (&type, chainon (returned_attrs, attrs), attr_flags); } else if (TREE_CODE (declarator) == ARRAY_REF) { tree itype = NULL_TREE; tree size = TREE_OPERAND (declarator, 1); /* The index is a signed object `sizetype' bits wide. */ tree index_type = c_common_signed_type (sizetype); array_ptr_quals = TREE_TYPE (declarator); array_parm_static = TREE_STATIC (declarator); declarator = TREE_OPERAND (declarator, 0); /* Check for some types that there cannot be arrays of. */ if (VOID_TYPE_P (type)) { error ("declaration of `%s' as array of voids", name); type = error_mark_node; } if (TREE_CODE (type) == FUNCTION_TYPE) { error ("declaration of `%s' as array of functions", name); type = error_mark_node; } if (pedantic && !in_system_header && flexible_array_type_p (type)) pedwarn ("invalid use of structure with flexible array member"); if (size == error_mark_node) type = error_mark_node; if (type == error_mark_node) continue; /* If size was specified, set ITYPE to a range-type for that size. Otherwise, ITYPE remains null. finish_decl may figure it out from an initial value. */ if (size) { /* Strip NON_LVALUE_EXPRs since we aren't using as an lvalue. */ STRIP_TYPE_NOPS (size); if (! INTEGRAL_TYPE_P (TREE_TYPE (size))) { error ("size of array `%s' has non-integer type", name); size = integer_one_node; } if (pedantic && integer_zerop (size)) pedwarn ("ISO C forbids zero-size array `%s'", name); if (TREE_CODE (size) == INTEGER_CST) { constant_expression_warning (size); if (tree_int_cst_sgn (size) < 0) { error ("size of array `%s' is negative", name); size = integer_one_node; } } else { /* Make sure the array size remains visibly nonconstant even if it is (eg) a const variable with known value. */ size_varies = 1; if (!flag_isoc99 && pedantic) { if (TREE_CONSTANT (size)) pedwarn ("ISO C90 forbids array `%s' whose size can't be evaluated", name); else pedwarn ("ISO C90 forbids variable-size array `%s'", name); } } if (integer_zerop (size)) { /* A zero-length array cannot be represented with an unsigned index type, which is what we'll get with build_index_type. Create an open-ended range instead. */ itype = build_range_type (sizetype, size, NULL_TREE); } else { /* Compute the maximum valid index, that is, size - 1. Do the calculation in index_type, so that if it is a variable the computations will be done in the proper mode. */ itype = fold (build (MINUS_EXPR, index_type, convert (index_type, size), convert (index_type, size_one_node))); /* If that overflowed, the array is too big. ??? While a size of INT_MAX+1 technically shouldn't cause an overflow (because we subtract 1), the overflow is recorded during the conversion to index_type, before the subtraction. Handling this case seems like an unnecessary complication. */ if (TREE_OVERFLOW (itype)) { error ("size of array `%s' is too large", name); type = error_mark_node; continue; } if (size_varies) itype = variable_size (itype); itype = build_index_type (itype); } } else if (decl_context == FIELD) { if (pedantic && !flag_isoc99 && !in_system_header) pedwarn ("ISO C90 does not support flexible array members"); /* ISO C99 Flexible array members are effectively identical to GCC's zero-length array extension. */ itype = build_range_type (sizetype, size_zero_node, NULL_TREE); } /* If pedantic, complain about arrays of incomplete types. */ if (pedantic && !COMPLETE_TYPE_P (type)) pedwarn ("array type has incomplete element type"); /* Build the array type itself, then merge any constancy or volatility into the target type. We must do it in this order to ensure that the TYPE_MAIN_VARIANT field of the array type is set correctly. */ type = build_array_type (type, itype); if (type_quals) type = c_build_qualified_type (type, type_quals); if (size_varies) C_TYPE_VARIABLE_SIZE (type) = 1; /* The GCC extension for zero-length arrays differs from ISO flexible array members in that sizeof yields zero. */ if (size && integer_zerop (size)) { layout_type (type); TYPE_SIZE (type) = bitsize_zero_node; TYPE_SIZE_UNIT (type) = size_zero_node; } else if (declarator && TREE_CODE (declarator) == INDIRECT_REF) /* We can never complete an array type which is the target of a pointer, so go ahead and lay it out. */ layout_type (type); if (decl_context != PARM && (array_ptr_quals != NULL_TREE || array_parm_static)) { error ("static or type qualifiers in non-parameter array declarator"); array_ptr_quals = NULL_TREE; array_parm_static = 0; } } else if (TREE_CODE (declarator) == CALL_EXPR) { /* Say it's a definition only for the CALL_EXPR closest to the identifier. */ bool really_funcdef = (funcdef_flag && (TREE_CODE (TREE_OPERAND (declarator, 0)) == IDENTIFIER_NODE)); tree arg_types; /* Declaring a function type. Make sure we have a valid type for the function to return. */ if (type == error_mark_node) continue; size_varies = 0; /* Warn about some types functions can't return. */ if (TREE_CODE (type) == FUNCTION_TYPE) { error ("`%s' declared as function returning a function", name); type = integer_type_node; } if (TREE_CODE (type) == ARRAY_TYPE) { error ("`%s' declared as function returning an array", name); type = integer_type_node; } /* Construct the function type and go to the next inner layer of declarator. */ arg_info = TREE_OPERAND (declarator, 1); arg_types = grokparms (arg_info, really_funcdef); /* Type qualifiers before the return type of the function qualify the return type, not the function type. */ if (type_quals) { /* Type qualifiers on a function return type are normally permitted by the standard but have no effect, so give a warning at -Wextra. Qualifiers on a void return type have meaning as a GNU extension, and are banned on function definitions in ISO C. FIXME: strictly we shouldn't pedwarn for qualified void return types except on function definitions, but not doing so could lead to the undesirable state of a "volatile void" function return type not being warned about, and a use of the function being compiled with GNU semantics, with no diagnostics under -pedantic. */ if (VOID_TYPE_P (type) && pedantic && !in_system_header) pedwarn ("ISO C forbids qualified void function return type"); else if (extra_warnings && !(VOID_TYPE_P (type) && type_quals == TYPE_QUAL_VOLATILE)) warning ("type qualifiers ignored on function return type"); type = c_build_qualified_type (type, type_quals); } type_quals = TYPE_UNQUALIFIED; type = build_function_type (type, arg_types); declarator = TREE_OPERAND (declarator, 0); /* Set the TYPE_CONTEXTs for each tagged type which is local to the formal parameter list of this FUNCTION_TYPE to point to the FUNCTION_TYPE node itself. */ { tree link; for (link = ARG_INFO_TAGS (arg_info); link; link = TREE_CHAIN (link)) TYPE_CONTEXT (TREE_VALUE (link)) = type; } } else if (TREE_CODE (declarator) == INDIRECT_REF) { /* Merge any constancy or volatility into the target type for the pointer. */ if (pedantic && TREE_CODE (type) == FUNCTION_TYPE && type_quals) pedwarn ("ISO C forbids qualified function types"); if (type_quals) type = c_build_qualified_type (type, type_quals); type_quals = TYPE_UNQUALIFIED; size_varies = 0; type = build_pointer_type (type); /* Process a list of type modifier keywords (such as const or volatile) that were given inside the `*'. */ if (TREE_TYPE (declarator)) { tree typemodlist; int erred = 0; constp = 0; volatilep = 0; restrictp = 0; for (typemodlist = TREE_TYPE (declarator); typemodlist; typemodlist = TREE_CHAIN (typemodlist)) { tree qualifier = TREE_VALUE (typemodlist); if (C_IS_RESERVED_WORD (qualifier)) { if (C_RID_CODE (qualifier) == RID_CONST) constp++; else if (C_RID_CODE (qualifier) == RID_VOLATILE) volatilep++; else if (C_RID_CODE (qualifier) == RID_RESTRICT) restrictp++; else erred++; } else erred++; } if (erred) error ("invalid type modifier within pointer declarator"); if (pedantic && !flag_isoc99) { if (constp > 1) pedwarn ("duplicate `const'"); if (volatilep > 1) pedwarn ("duplicate `volatile'"); if (restrictp > 1) pedwarn ("duplicate `restrict'"); } type_quals = ((constp ? TYPE_QUAL_CONST : 0) | (restrictp ? TYPE_QUAL_RESTRICT : 0) | (volatilep ? TYPE_QUAL_VOLATILE : 0)); } declarator = TREE_OPERAND (declarator, 0); } else abort (); } /* Now TYPE has the actual type. */ /* Did array size calculations overflow? */ if (TREE_CODE (type) == ARRAY_TYPE && COMPLETE_TYPE_P (type) && TREE_OVERFLOW (TYPE_SIZE (type))) { error ("size of array `%s' is too large", name); /* If we proceed with the array type as it is, we'll eventually crash in tree_low_cst(). */ type = error_mark_node; } /* If this is declaring a typedef name, return a TYPE_DECL. */ if (specbits & (1 << (int) RID_TYPEDEF)) { tree decl; /* Note that the grammar rejects storage classes in typenames, fields or parameters */ if (pedantic && TREE_CODE (type) == FUNCTION_TYPE && type_quals) pedwarn ("ISO C forbids qualified function types"); if (type_quals) type = c_build_qualified_type (type, type_quals); decl = build_decl (TYPE_DECL, declarator, type); if ((specbits & (1 << (int) RID_SIGNED)) || (typedef_decl && C_TYPEDEF_EXPLICITLY_SIGNED (typedef_decl))) C_TYPEDEF_EXPLICITLY_SIGNED (decl) = 1; decl_attributes (&decl, returned_attrs, 0); return decl; } /* Detect the case of an array type of unspecified size which came, as such, direct from a typedef name. We must copy the type, so that each identifier gets a distinct type, so that each identifier's size can be controlled separately by its own initializer. */ if (type != 0 && typedef_type != 0 && TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) == 0 && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (typedef_type)) { type = build_array_type (TREE_TYPE (type), 0); if (size_varies) C_TYPE_VARIABLE_SIZE (type) = 1; } /* If this is a type name (such as, in a cast or sizeof), compute the type and return it now. */ if (decl_context == TYPENAME) { /* Note that the grammar rejects storage classes in typenames, fields or parameters */ if (pedantic && TREE_CODE (type) == FUNCTION_TYPE && type_quals) pedwarn ("ISO C forbids const or volatile function types"); if (type_quals) type = c_build_qualified_type (type, type_quals); decl_attributes (&type, returned_attrs, 0); return type; } /* Aside from typedefs and type names (handle above), `void' at top level (not within pointer) is allowed only in public variables. We don't complain about parms either, but that is because a better error message can be made later. */ if (VOID_TYPE_P (type) && decl_context != PARM && ! ((decl_context != FIELD && TREE_CODE (type) != FUNCTION_TYPE) && ((specbits & (1 << (int) RID_EXTERN)) || (current_scope == file_scope && !(specbits & ((1 << (int) RID_STATIC) | (1 << (int) RID_REGISTER))))))) { error ("variable or field `%s' declared void", name); type = integer_type_node; } /* Now create the decl, which may be a VAR_DECL, a PARM_DECL or a FUNCTION_DECL, depending on DECL_CONTEXT and TYPE. */ { tree decl; if (decl_context == PARM) { tree type_as_written; tree promoted_type; /* A parameter declared as an array of T is really a pointer to T. One declared as a function is really a pointer to a function. */ if (TREE_CODE (type) == ARRAY_TYPE) { /* Transfer const-ness of array into that of type pointed to. */ type = TREE_TYPE (type); if (type_quals) type = c_build_qualified_type (type, type_quals); type = build_pointer_type (type); type_quals = TYPE_UNQUALIFIED; if (array_ptr_quals) { tree new_ptr_quals, new_ptr_attrs; int erred = 0; split_specs_attrs (array_ptr_quals, &new_ptr_quals, &new_ptr_attrs); /* We don't yet implement attributes in this context. */ if (new_ptr_attrs != NULL_TREE) warning ("attributes in parameter array declarator ignored"); constp = 0; volatilep = 0; restrictp = 0; for (; new_ptr_quals; new_ptr_quals = TREE_CHAIN (new_ptr_quals)) { tree qualifier = TREE_VALUE (new_ptr_quals); if (C_IS_RESERVED_WORD (qualifier)) { if (C_RID_CODE (qualifier) == RID_CONST) constp++; else if (C_RID_CODE (qualifier) == RID_VOLATILE) volatilep++; else if (C_RID_CODE (qualifier) == RID_RESTRICT) restrictp++; else erred++; } else erred++; } if (erred) error ("invalid type modifier within array declarator"); type_quals = ((constp ? TYPE_QUAL_CONST : 0) | (restrictp ? TYPE_QUAL_RESTRICT : 0) | (volatilep ? TYPE_QUAL_VOLATILE : 0)); } size_varies = 0; } else if (TREE_CODE (type) == FUNCTION_TYPE) { if (pedantic && type_quals) pedwarn ("ISO C forbids qualified function types"); if (type_quals) type = c_build_qualified_type (type, type_quals); type = build_pointer_type (type); type_quals = TYPE_UNQUALIFIED; } else if (type_quals) type = c_build_qualified_type (type, type_quals); type_as_written = type; decl = build_decl (PARM_DECL, declarator, type); if (size_varies) C_DECL_VARIABLE_SIZE (decl) = 1; /* Compute the type actually passed in the parmlist, for the case where there is no prototype. (For example, shorts and chars are passed as ints.) When there is a prototype, this is overridden later. */ if (type == error_mark_node) promoted_type = type; else promoted_type = c_type_promotes_to (type); DECL_ARG_TYPE (decl) = promoted_type; DECL_ARG_TYPE_AS_WRITTEN (decl) = type_as_written; } else if (decl_context == FIELD) { /* Structure field. It may not be a function. */ if (TREE_CODE (type) == FUNCTION_TYPE) { error ("field `%s' declared as a function", name); type = build_pointer_type (type); } else if (TREE_CODE (type) != ERROR_MARK && !COMPLETE_OR_UNBOUND_ARRAY_TYPE_P (type)) { error ("field `%s' has incomplete type", name); type = error_mark_node; } /* Move type qualifiers down to element of an array. */ if (TREE_CODE (type) == ARRAY_TYPE && type_quals) type = build_array_type (c_build_qualified_type (TREE_TYPE (type), type_quals), TYPE_DOMAIN (type)); decl = build_decl (FIELD_DECL, declarator, type); DECL_NONADDRESSABLE_P (decl) = bitfield; if (size_varies) C_DECL_VARIABLE_SIZE (decl) = 1; } else if (TREE_CODE (type) == FUNCTION_TYPE) { /* Every function declaration is "external" except for those which are inside a function body in which `auto' is used. That is a case not specified by ANSI C, and we use it for forward declarations for nested functions. */ int extern_ref = (!(specbits & (1 << (int) RID_AUTO)) || current_scope == file_scope); if (specbits & (1 << (int) RID_AUTO) && (pedantic || current_scope == file_scope)) pedwarn ("invalid storage class for function `%s'", name); if (specbits & (1 << (int) RID_REGISTER)) error ("invalid storage class for function `%s'", name); if (specbits & (1 << (int) RID_THREAD)) error ("invalid storage class for function `%s'", name); /* Function declaration not at file scope. Storage classes other than `extern' are not allowed and `extern' makes no difference. */ if (current_scope != file_scope && (specbits & ((1 << (int) RID_STATIC) | (1 << (int) RID_INLINE))) && pedantic) pedwarn ("invalid storage class for function `%s'", name); decl = build_decl (FUNCTION_DECL, declarator, type); decl = build_decl_attribute_variant (decl, decl_attr); DECL_LANG_SPECIFIC (decl) = ggc_alloc_cleared (sizeof (struct lang_decl)); if (pedantic && type_quals && ! DECL_IN_SYSTEM_HEADER (decl)) pedwarn ("ISO C forbids qualified function types"); /* GNU C interprets a `volatile void' return type to indicate that the function does not return. */ if ((type_quals & TYPE_QUAL_VOLATILE) && !VOID_TYPE_P (TREE_TYPE (TREE_TYPE (decl)))) warning ("`noreturn' function returns non-void value"); if (extern_ref) DECL_EXTERNAL (decl) = 1; /* Record absence of global scope for `static' or `auto'. */ TREE_PUBLIC (decl) = !(specbits & ((1 << (int) RID_STATIC) | (1 << (int) RID_AUTO))); /* For a function definition, record the argument information block in DECL_ARGUMENTS where store_parm_decls will look for it. */ if (funcdef_flag) DECL_ARGUMENTS (decl) = arg_info; if (defaulted_int) C_FUNCTION_IMPLICIT_INT (decl) = 1; /* Record presence of `inline', if it is reasonable. */ if (MAIN_NAME_P (declarator)) { if (inlinep) warning ("cannot inline function `main'"); } else if (inlinep) { /* Record that the function is declared `inline'. */ DECL_DECLARED_INLINE_P (decl) = 1; /* Do not mark bare declarations as DECL_INLINE. Doing so in the presence of multiple declarations can result in the abstract origin pointing between the declarations, which will confuse dwarf2out. */ if (initialized) { DECL_INLINE (decl) = 1; if (specbits & (1 << (int) RID_EXTERN)) current_extern_inline = 1; } } /* If -finline-functions, assume it can be inlined. This does two things: let the function be deferred until it is actually needed, and let dwarf2 know that the function is inlinable. */ else if (flag_inline_trees == 2 && initialized) DECL_INLINE (decl) = 1; } else { /* It's a variable. */ /* An uninitialized decl with `extern' is a reference. */ int extern_ref = !initialized && (specbits & (1 << (int) RID_EXTERN)); /* Move type qualifiers down to element of an array. */ if (TREE_CODE (type) == ARRAY_TYPE && type_quals) { int saved_align = TYPE_ALIGN(type); type = build_array_type (c_build_qualified_type (TREE_TYPE (type), type_quals), TYPE_DOMAIN (type)); TYPE_ALIGN (type) = saved_align; } else if (type_quals) type = c_build_qualified_type (type, type_quals); /* C99 6.2.2p7: It is invalid (compile-time undefined behavior) to create an 'extern' declaration for a variable if there is a global declaration that is 'static' and the global declaration is not visible. (If the static declaration _is_ currently visible, the 'extern' declaration is taken to refer to that decl.) */ if (extern_ref && current_scope != file_scope) { tree global_decl = identifier_global_value (declarator); tree visible_decl = lookup_name (declarator); if (global_decl && global_decl != visible_decl && TREE_CODE (global_decl) == VAR_DECL && !TREE_PUBLIC (global_decl)) error ("variable previously declared 'static' redeclared " "'extern'"); } decl = build_decl (VAR_DECL, declarator, type); if (size_varies) C_DECL_VARIABLE_SIZE (decl) = 1; if (inlinep) pedwarn ("%Jvariable '%D' declared `inline'", decl, decl); DECL_EXTERNAL (decl) = extern_ref; /* At file scope, the presence of a `static' or `register' storage class specifier, or the absence of all storage class specifiers makes this declaration a definition (perhaps tentative). Also, the absence of both `static' and `register' makes it public. */ if (current_scope == file_scope) { TREE_PUBLIC (decl) = !(specbits & ((1 << (int) RID_STATIC) | (1 << (int) RID_REGISTER))); TREE_STATIC (decl) = !extern_ref; } /* Not at file scope, only `static' makes a static definition. */ else { TREE_STATIC (decl) = (specbits & (1 << (int) RID_STATIC)) != 0; TREE_PUBLIC (decl) = extern_ref; } if (specbits & 1 << (int) RID_THREAD) { if (targetm.have_tls) DECL_THREAD_LOCAL (decl) = 1; else /* A mere warning is sure to result in improper semantics at runtime. Don't bother to allow this to compile. */ error ("thread-local storage not supported for this target"); } } /* Record `register' declaration for warnings on & and in case doing stupid register allocation. */ if (specbits & (1 << (int) RID_REGISTER)) { C_DECL_REGISTER (decl) = 1; DECL_REGISTER (decl) = 1; } /* Record constancy and volatility. */ c_apply_type_quals_to_decl (type_quals, decl); /* If a type has volatile components, it should be stored in memory. Otherwise, the fact that those components are volatile will be ignored, and would even crash the compiler. */ if (C_TYPE_FIELDS_VOLATILE (TREE_TYPE (decl))) { /* It is not an error for a structure with volatile fields to be declared register, but reset DECL_REGISTER since it cannot actually go in a register. */ int was_reg = C_DECL_REGISTER (decl); C_DECL_REGISTER (decl) = 0; DECL_REGISTER (decl) = 0; c_mark_addressable (decl); C_DECL_REGISTER (decl) = was_reg; } #ifdef ENABLE_CHECKING /* This is the earliest point at which we might know the assembler name of a variable. Thus, if it's known before this, die horribly. */ if (DECL_ASSEMBLER_NAME_SET_P (decl)) abort (); #endif decl_attributes (&decl, returned_attrs, 0); return decl; } } /* Decode the parameter-list info for a function type or function definition. The argument is the value returned by `get_parm_info' (or made in parse.y if there is an identifier list instead of a parameter decl list). These two functions are separate because when a function returns or receives functions then each is called multiple times but the order of calls is different. The last call to `grokparms' is always the one that contains the formal parameter names of a function definition. Return a list of arg types to use in the FUNCTION_TYPE for this function. FUNCDEF_FLAG is nonzero for a function definition, 0 for a mere declaration. A nonempty identifier-list gets an error message when FUNCDEF_FLAG is zero. */ static tree grokparms (tree arg_info, int funcdef_flag) { tree arg_types = ARG_INFO_TYPES (arg_info); if (warn_strict_prototypes && arg_types == 0 && !funcdef_flag && !in_system_header) warning ("function declaration isn't a prototype"); if (arg_types == error_mark_node) return 0; /* don't set TYPE_ARG_TYPES in this case */ else if (arg_types && TREE_CODE (TREE_VALUE (arg_types)) == IDENTIFIER_NODE) { if (! funcdef_flag) pedwarn ("parameter names (without types) in function declaration"); ARG_INFO_PARMS (arg_info) = ARG_INFO_TYPES (arg_info); ARG_INFO_TYPES (arg_info) = 0; return 0; } else { tree parm, type, typelt; unsigned int parmno; /* If the arg types are incomplete in a declaration, they must include undefined tags. These tags can never be defined in the scope of the declaration, so the types can never be completed, and no call can be compiled successfully. */ for (parm = ARG_INFO_PARMS (arg_info), typelt = arg_types, parmno = 1; parm; parm = TREE_CHAIN (parm), typelt = TREE_CHAIN (typelt), parmno++) { type = TREE_VALUE (typelt); if (type == error_mark_node) continue; if (!COMPLETE_TYPE_P (type)) { if (funcdef_flag) { if (DECL_NAME (parm)) error ("%Jparameter %u ('%D') has incomplete type", parm, parmno, parm); else error ("%Jparameter %u has incomplete type", parm, parmno); TREE_VALUE (typelt) = error_mark_node; TREE_TYPE (parm) = error_mark_node; } else { if (DECL_NAME (parm)) warning ("%Jparameter %u ('%D') has incomplete type", parm, parmno, parm); else warning ("%Jparameter %u has incomplete type", parm, parmno); } } } return arg_types; } } /* Take apart the current scope and return a tree_list node with info on a parameter list just parsed. This tree_list node should be examined using the ARG_INFO_* macros, defined above: ARG_INFO_PARMS: a list of parameter decls. ARG_INFO_TAGS: a list of structure, union and enum tags defined. ARG_INFO_TYPES: a list of argument types to go in the FUNCTION_TYPE. ARG_INFO_OTHERS: a list of non-parameter decls (notably enumeration constants) defined with the parameters. This tree_list node is later fed to 'grokparms' and 'store_parm_decls'. ELLIPSIS being true means the argument list ended in '...' so don't append a sentinel (void_list_node) to the end of the type-list. */ tree get_parm_info (bool ellipsis) { struct c_binding *b = current_scope->bindings; tree arg_info = make_node (TREE_LIST); tree parms = 0; tree tags = 0; tree types = 0; tree others = 0; static bool explained_incomplete_types = false; bool gave_void_only_once_err = false; /* The bindings in this scope must not get put into a block. We will take care of deleting the binding nodes. */ current_scope->bindings = 0; /* This function is only called if there was *something* on the parameter list. */ #ifdef ENABLE_CHECKING if (b == 0) abort (); #endif /* A parameter list consisting solely of 'void' indicates that the function takes no arguments. But if the 'void' is qualified (by 'const' or 'volatile'), or has a storage class specifier ('register'), then the behavior is undefined; issue an error. Typedefs for 'void' are OK (see DR#157). */ if (b->prev == 0 /* one binding */ && TREE_CODE (b->decl) == PARM_DECL /* which is a parameter */ && !DECL_NAME (b->decl) /* anonymous */ && VOID_TYPE_P (TREE_TYPE (b->decl))) /* of void type */ { if (TREE_THIS_VOLATILE (b->decl) || TREE_READONLY (b->decl) || C_DECL_REGISTER (b->decl)) error ("'void' as only parameter may not be qualified"); /* There cannot be an ellipsis. */ if (ellipsis) error ("'void' must be the only parameter"); ARG_INFO_TYPES (arg_info) = void_list_node; return arg_info; } if (!ellipsis) types = void_list_node; /* Break up the bindings list into parms, tags, types, and others; apply sanity checks; purge the name-to-decl bindings. */ while (b) { tree decl = b->decl; tree type = TREE_TYPE (decl); const char *keyword; switch (TREE_CODE (decl)) { case PARM_DECL: if (b->id) { #ifdef ENABLE_CHECKING if (I_SYMBOL_BINDING (b->id) != b) abort (); #endif I_SYMBOL_BINDING (b->id) = b->shadowed; } /* Check for forward decls that never got their actual decl. */ if (TREE_ASM_WRITTEN (decl)) error ("%Jparameter '%D' has just a forward declaration", decl, decl); /* Check for (..., void, ...) and issue an error. */ else if (VOID_TYPE_P (type) && !DECL_NAME (decl)) { if (!gave_void_only_once_err) { error ("'void' must be the only parameter"); gave_void_only_once_err = true; } } else { /* Valid parameter, add it to the list. */ TREE_CHAIN (decl) = parms; parms = decl; /* Since there is a prototype, args are passed in their declared types. The back end may override this later. */ DECL_ARG_TYPE (decl) = type; types = tree_cons (0, type, types); } break; case ENUMERAL_TYPE: keyword = "enum"; goto tag; case UNION_TYPE: keyword = "union"; goto tag; case RECORD_TYPE: keyword = "struct"; goto tag; tag: /* Types may not have tag-names, in which case the type appears in the bindings list with b->id NULL. */ if (b->id) { #ifdef ENABLE_CHECKING if (I_TAG_BINDING (b->id) != b) abort (); #endif I_TAG_BINDING (b->id) = b->shadowed; } /* Warn about any struct, union or enum tags defined in a parameter list. The scope of such types is limited to the parameter list, which is rarely if ever desirable (it's impossible to call such a function with type- correct arguments). An anonymous union parm type is meaningful as a GNU extension, so don't warn for that. */ if (TREE_CODE (decl) != UNION_TYPE || b->id != 0) { if (b->id) /* The %s will be one of 'struct', 'union', or 'enum'. */ warning ("'%s %E' declared inside parameter list", keyword, b->id); else /* The %s will be one of 'struct', 'union', or 'enum'. */ warning ("anonymous %s declared inside parameter list", keyword); if (! explained_incomplete_types) { warning ("its scope is only this definition or declaration," " which is probably not what you want"); explained_incomplete_types = true; } } tags = tree_cons (b->id, decl, tags); break; case CONST_DECL: case TYPE_DECL: /* CONST_DECLs appear here when we have an embedded enum, and TYPE_DECLs appear here when we have an embedded struct or union. No warnings for this - we already warned about the type itself. */ TREE_CHAIN (decl) = others; others = decl; /* fall through */ case ERROR_MARK: /* error_mark_node appears here when we have an undeclared variable. Just throw it away. */ if (b->id) { #ifdef ENABLE_CHECKING if (I_SYMBOL_BINDING (b->id) != b) abort (); #endif I_SYMBOL_BINDING (b->id) = b->shadowed; } break; /* Other things that might be encountered. */ case LABEL_DECL: case FUNCTION_DECL: case VAR_DECL: default: abort (); } b = free_binding_and_advance (b); } ARG_INFO_PARMS (arg_info) = parms; ARG_INFO_TAGS (arg_info) = tags; ARG_INFO_TYPES (arg_info) = types; ARG_INFO_OTHERS (arg_info) = others; return arg_info; } /* Get the struct, enum or union (CODE says which) with tag NAME. Define the tag as a forward-reference if it is not defined. */ tree xref_tag (enum tree_code code, tree name) { /* If a cross reference is requested, look up the type already defined for this tag and return it. */ tree ref = lookup_tag (code, name, 0); /* If this is the right type of tag, return what we found. (This reference will be shadowed by shadow_tag later if appropriate.) If this is the wrong type of tag, do not return it. If it was the wrong type in the same scope, we will have had an error message already; if in a different scope and declaring a name, pending_xref_error will give an error message; but if in a different scope and not declaring a name, this tag should shadow the previous declaration of a different type of tag, and this would not work properly if we return the reference found. (For example, with "struct foo" in an outer scope, "union foo;" must shadow that tag with a new one of union type.) */ if (ref && TREE_CODE (ref) == code) return ref; /* If no such tag is yet defined, create a forward-reference node and record it as the "definition". When a real declaration of this type is found, the forward-reference will be altered into a real type. */ ref = make_node (code); if (code == ENUMERAL_TYPE) { /* Give the type a default layout like unsigned int to avoid crashing if it does not get defined. */ TYPE_MODE (ref) = TYPE_MODE (unsigned_type_node); TYPE_ALIGN (ref) = TYPE_ALIGN (unsigned_type_node); TYPE_USER_ALIGN (ref) = 0; TYPE_UNSIGNED (ref) = 1; TYPE_PRECISION (ref) = TYPE_PRECISION (unsigned_type_node); TYPE_MIN_VALUE (ref) = TYPE_MIN_VALUE (unsigned_type_node); TYPE_MAX_VALUE (ref) = TYPE_MAX_VALUE (unsigned_type_node); } pushtag (name, ref); return ref; } /* Make sure that the tag NAME is defined *in the current scope* at least as a forward reference. CODE says which kind of tag NAME ought to be. */ tree start_struct (enum tree_code code, tree name) { /* If there is already a tag defined at this scope (as a forward reference), just return it. */ tree ref = 0; if (name != 0) ref = lookup_tag (code, name, 1); if (ref && TREE_CODE (ref) == code) { if (TYPE_FIELDS (ref)) { if (code == UNION_TYPE) error ("redefinition of `union %s'", IDENTIFIER_POINTER (name)); else error ("redefinition of `struct %s'", IDENTIFIER_POINTER (name)); } } else { /* Otherwise create a forward-reference just so the tag is in scope. */ ref = make_node (code); pushtag (name, ref); } C_TYPE_BEING_DEFINED (ref) = 1; TYPE_PACKED (ref) = flag_pack_struct; return ref; } /* Process the specs, declarator (NULL if omitted) and width (NULL if omitted) of a structure component, returning a FIELD_DECL node. WIDTH is non-NULL for bit-fields only, and is an INTEGER_CST node. This is done during the parsing of the struct declaration. The FIELD_DECL nodes are chained together and the lot of them are ultimately passed to `build_struct' to make the RECORD_TYPE node. */ tree grokfield (tree declarator, tree declspecs, tree width) { tree value; if (declarator == NULL_TREE && width == NULL_TREE) { /* This is an unnamed decl. If we have something of the form "union { list } ;" then this is the anonymous union extension. Similarly for struct. If this is something of the form "struct foo;", then If MS extensions are enabled, this is handled as an anonymous struct. Otherwise this is a forward declaration of a structure tag. If this is something of the form "foo;" and foo is a TYPE_DECL, then If MS extensions are enabled and foo names a structure, then again this is an anonymous struct. Otherwise this is an error. Oh what a horrid tangled web we weave. I wonder if MS consciously took this from Plan 9 or if it was an accident of implementation that took root before someone noticed the bug... */ tree type = TREE_VALUE (declspecs); if (flag_ms_extensions && TREE_CODE (type) == TYPE_DECL) type = TREE_TYPE (type); if (TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE) { if (flag_ms_extensions) ; /* ok */ else if (flag_iso) goto warn_unnamed_field; else if (TYPE_NAME (type) == NULL) ; /* ok */ else goto warn_unnamed_field; } else { warn_unnamed_field: warning ("declaration does not declare anything"); return NULL_TREE; } } value = grokdeclarator (declarator, declspecs, FIELD, 0, width ? &width : NULL); finish_decl (value, NULL_TREE, NULL_TREE); DECL_INITIAL (value) = width; return value; } /* Generate an error for any duplicate field names in FIELDLIST. Munge the list such that this does not present a problem later. */ static void detect_field_duplicates (tree fieldlist) { tree x, y; int timeout = 10; /* First, see if there are more than "a few" fields. This is trivially true if there are zero or one fields. */ if (!fieldlist) return; x = TREE_CHAIN (fieldlist); if (!x) return; do { timeout--; x = TREE_CHAIN (x); } while (timeout > 0 && x); /* If there were "few" fields, avoid the overhead of allocating a hash table. Instead just do the nested traversal thing. */ if (timeout > 0) { for (x = TREE_CHAIN (fieldlist); x ; x = TREE_CHAIN (x)) if (DECL_NAME (x)) { for (y = fieldlist; y != x; y = TREE_CHAIN (y)) if (DECL_NAME (y) == DECL_NAME (x)) { error ("%Jduplicate member '%D'", x, x); DECL_NAME (x) = NULL_TREE; } } } else { htab_t htab = htab_create (37, htab_hash_pointer, htab_eq_pointer, NULL); void **slot; for (x = fieldlist; x ; x = TREE_CHAIN (x)) if ((y = DECL_NAME (x)) != 0) { slot = htab_find_slot (htab, y, INSERT); if (*slot) { error ("%Jduplicate member '%D'", x, x); DECL_NAME (x) = NULL_TREE; } *slot = y; } htab_delete (htab); } } /* Fill in the fields of a RECORD_TYPE or UNION_TYPE node, T. FIELDLIST is a chain of FIELD_DECL nodes for the fields. ATTRIBUTES are attributes to be applied to the structure. */ tree finish_struct (tree t, tree fieldlist, tree attributes) { tree x; bool toplevel = file_scope == current_scope; int saw_named_field; /* If this type was previously laid out as a forward reference, make sure we lay it out again. */ TYPE_SIZE (t) = 0; decl_attributes (&t, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE); if (pedantic) { for (x = fieldlist; x; x = TREE_CHAIN (x)) if (DECL_NAME (x) != 0) break; if (x == 0) pedwarn ("%s has no %s", TREE_CODE (t) == UNION_TYPE ? _("union") : _("struct"), fieldlist ? _("named members") : _("members")); } /* Install struct as DECL_CONTEXT of each field decl. Also process specified field sizes,m which is found in the DECL_INITIAL. Store 0 there, except for ": 0" fields (so we can find them and delete them, below). */ saw_named_field = 0; for (x = fieldlist; x; x = TREE_CHAIN (x)) { DECL_CONTEXT (x) = t; DECL_PACKED (x) |= TYPE_PACKED (t); /* If any field is const, the structure type is pseudo-const. */ if (TREE_READONLY (x)) C_TYPE_FIELDS_READONLY (t) = 1; else { /* A field that is pseudo-const makes the structure likewise. */ tree t1 = TREE_TYPE (x); while (TREE_CODE (t1) == ARRAY_TYPE) t1 = TREE_TYPE (t1); if ((TREE_CODE (t1) == RECORD_TYPE || TREE_CODE (t1) == UNION_TYPE) && C_TYPE_FIELDS_READONLY (t1)) C_TYPE_FIELDS_READONLY (t) = 1; } /* Any field that is volatile means variables of this type must be treated in some ways as volatile. */ if (TREE_THIS_VOLATILE (x)) C_TYPE_FIELDS_VOLATILE (t) = 1; /* Any field of nominal variable size implies structure is too. */ if (C_DECL_VARIABLE_SIZE (x)) C_TYPE_VARIABLE_SIZE (t) = 1; /* Detect invalid nested redefinition. */ if (TREE_TYPE (x) == t) error ("nested redefinition of `%s'", IDENTIFIER_POINTER (TYPE_NAME (t))); if (DECL_INITIAL (x)) { unsigned HOST_WIDE_INT width = tree_low_cst (DECL_INITIAL (x), 1); DECL_SIZE (x) = bitsize_int (width); DECL_BIT_FIELD (x) = 1; SET_DECL_C_BIT_FIELD (x); } DECL_INITIAL (x) = 0; /* Detect flexible array member in an invalid context. */ if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE && TYPE_SIZE (TREE_TYPE (x)) == NULL_TREE && TYPE_DOMAIN (TREE_TYPE (x)) != NULL_TREE && TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (x))) == NULL_TREE) { if (TREE_CODE (t) == UNION_TYPE) { error ("%Jflexible array member in union", x); TREE_TYPE (x) = error_mark_node; } else if (TREE_CHAIN (x) != NULL_TREE) { error ("%Jflexible array member not at end of struct", x); TREE_TYPE (x) = error_mark_node; } else if (! saw_named_field) { error ("%Jflexible array member in otherwise empty struct", x); TREE_TYPE (x) = error_mark_node; } } if (pedantic && !in_system_header && TREE_CODE (t) == RECORD_TYPE && flexible_array_type_p (TREE_TYPE (x))) pedwarn ("%Jinvalid use of structure with flexible array member", x); if (DECL_NAME (x)) saw_named_field = 1; } detect_field_duplicates (fieldlist); /* Now we have the nearly final fieldlist. Record it, then lay out the structure or union (including the fields). */ TYPE_FIELDS (t) = fieldlist; layout_type (t); /* Delete all zero-width bit-fields from the fieldlist. */ { tree *fieldlistp = &fieldlist; while (*fieldlistp) if (TREE_CODE (*fieldlistp) == FIELD_DECL && DECL_INITIAL (*fieldlistp)) *fieldlistp = TREE_CHAIN (*fieldlistp); else fieldlistp = &TREE_CHAIN (*fieldlistp); } /* Now we have the truly final field list. Store it in this type and in the variants. */ TYPE_FIELDS (t) = fieldlist; /* If there are lots of fields, sort so we can look through them fast. We arbitrarily consider 16 or more elts to be "a lot". */ { int len = 0; for (x = fieldlist; x; x = TREE_CHAIN (x)) { if (len > 15 || DECL_NAME (x) == NULL) break; len += 1; } if (len > 15) { tree *field_array; struct lang_type *space; struct sorted_fields_type *space2; len += list_length (x); /* Use the same allocation policy here that make_node uses, to ensure that this lives as long as the rest of the struct decl. All decls in an inline function need to be saved. */ space = ggc_alloc_cleared (sizeof (struct lang_type)); space2 = ggc_alloc (sizeof (struct sorted_fields_type) + len * sizeof (tree)); len = 0; space->s = space2; field_array = &space2->elts[0]; for (x = fieldlist; x; x = TREE_CHAIN (x)) { field_array[len++] = x; /* If there is anonymous struct or union, break out of the loop. */ if (DECL_NAME (x) == NULL) break; } /* Found no anonymous struct/union. Add the TYPE_LANG_SPECIFIC. */ if (x == NULL) { TYPE_LANG_SPECIFIC (t) = space; TYPE_LANG_SPECIFIC (t)->s->len = len; field_array = TYPE_LANG_SPECIFIC (t)->s->elts; qsort (field_array, len, sizeof (tree), field_decl_cmp); } } } for (x = TYPE_MAIN_VARIANT (t); x; x = TYPE_NEXT_VARIANT (x)) { TYPE_FIELDS (x) = TYPE_FIELDS (t); TYPE_LANG_SPECIFIC (x) = TYPE_LANG_SPECIFIC (t); TYPE_ALIGN (x) = TYPE_ALIGN (t); TYPE_USER_ALIGN (x) = TYPE_USER_ALIGN (t); } /* If this was supposed to be a transparent union, but we can't make it one, warn and turn off the flag. */ if (TREE_CODE (t) == UNION_TYPE && TYPE_TRANSPARENT_UNION (t) && TYPE_MODE (t) != DECL_MODE (TYPE_FIELDS (t))) { TYPE_TRANSPARENT_UNION (t) = 0; warning ("union cannot be made transparent"); } /* If this structure or union completes the type of any previous variable declaration, lay it out and output its rtl. */ for (x = C_TYPE_INCOMPLETE_VARS (TYPE_MAIN_VARIANT (t)); x; x = TREE_CHAIN (x)) { tree decl = TREE_VALUE (x); if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE) layout_array_type (TREE_TYPE (decl)); if (TREE_CODE (decl) != TYPE_DECL) { layout_decl (decl, 0); if (c_dialect_objc ()) objc_check_decl (decl); rest_of_decl_compilation (decl, NULL, toplevel, 0); if (! toplevel) expand_decl (decl); } } C_TYPE_INCOMPLETE_VARS (TYPE_MAIN_VARIANT (t)) = 0; /* Finish debugging output for this type. */ rest_of_type_compilation (t, toplevel); return t; } /* Lay out the type T, and its element type, and so on. */ static void layout_array_type (tree t) { if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE) layout_array_type (TREE_TYPE (t)); layout_type (t); } /* Begin compiling the definition of an enumeration type. NAME is its name (or null if anonymous). Returns the type object, as yet incomplete. Also records info about it so that build_enumerator may be used to declare the individual values as they are read. */ tree start_enum (tree name) { tree enumtype = 0; /* If this is the real definition for a previous forward reference, fill in the contents in the same object that used to be the forward reference. */ if (name != 0) enumtype = lookup_tag (ENUMERAL_TYPE, name, 1); if (enumtype == 0 || TREE_CODE (enumtype) != ENUMERAL_TYPE) { enumtype = make_node (ENUMERAL_TYPE); pushtag (name, enumtype); } C_TYPE_BEING_DEFINED (enumtype) = 1; if (TYPE_VALUES (enumtype) != 0) { /* This enum is a named one that has been declared already. */ error ("redeclaration of `enum %s'", IDENTIFIER_POINTER (name)); /* Completely replace its old definition. The old enumerators remain defined, however. */ TYPE_VALUES (enumtype) = 0; } enum_next_value = integer_zero_node; enum_overflow = 0; if (flag_short_enums) TYPE_PACKED (enumtype) = 1; return enumtype; } /* After processing and defining all the values of an enumeration type, install their decls in the enumeration type and finish it off. ENUMTYPE is the type object, VALUES a list of decl-value pairs, and ATTRIBUTES are the specified attributes. Returns ENUMTYPE. */ tree finish_enum (tree enumtype, tree values, tree attributes) { tree pair, tem; tree minnode = 0, maxnode = 0; int precision, unsign; bool toplevel = (file_scope == current_scope); struct lang_type *lt; decl_attributes (&enumtype, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE); /* Calculate the maximum value of any enumerator in this type. */ if (values == error_mark_node) minnode = maxnode = integer_zero_node; else { minnode = maxnode = TREE_VALUE (values); for (pair = TREE_CHAIN (values); pair; pair = TREE_CHAIN (pair)) { tree value = TREE_VALUE (pair); if (tree_int_cst_lt (maxnode, value)) maxnode = value; if (tree_int_cst_lt (value, minnode)) minnode = value; } } /* Construct the final type of this enumeration. It is the same as one of the integral types - the narrowest one that fits, except that normally we only go as narrow as int - and signed iff any of the values are negative. */ unsign = (tree_int_cst_sgn (minnode) >= 0); precision = MAX (min_precision (minnode, unsign), min_precision (maxnode, unsign)); if (TYPE_PACKED (enumtype) || precision > TYPE_PRECISION (integer_type_node)) { tem = c_common_type_for_size (precision, unsign); if (tem == NULL) { warning ("enumeration values exceed range of largest integer"); tem = long_long_integer_type_node; } } else tem = unsign ? unsigned_type_node : integer_type_node; TYPE_MIN_VALUE (enumtype) = TYPE_MIN_VALUE (tem); TYPE_MAX_VALUE (enumtype) = TYPE_MAX_VALUE (tem); TYPE_PRECISION (enumtype) = TYPE_PRECISION (tem); TYPE_UNSIGNED (enumtype) = TYPE_UNSIGNED (tem); TYPE_SIZE (enumtype) = 0; layout_type (enumtype); if (values != error_mark_node) { /* Change the type of the enumerators to be the enum type. We need to do this irrespective of the size of the enum, for proper type checking. Replace the DECL_INITIALs of the enumerators, and the value slots of the list, with copies that have the enum type; they cannot be modified in place because they may be shared (e.g. integer_zero_node) Finally, change the purpose slots to point to the names of the decls. */ for (pair = values; pair; pair = TREE_CHAIN (pair)) { tree enu = TREE_PURPOSE (pair); tree ini = DECL_INITIAL (enu); TREE_TYPE (enu) = enumtype; /* The ISO C Standard mandates enumerators to have type int, even though the underlying type of an enum type is unspecified. Here we convert any enumerators that fit in an int to type int, to avoid promotions to unsigned types when comparing integers with enumerators that fit in the int range. When -pedantic is given, build_enumerator() would have already taken care of those that don't fit. */ if (int_fits_type_p (ini, integer_type_node)) tem = integer_type_node; else tem = enumtype; ini = convert (tem, ini); DECL_INITIAL (enu) = ini; TREE_PURPOSE (pair) = DECL_NAME (enu); TREE_VALUE (pair) = ini; } TYPE_VALUES (enumtype) = values; } /* Record the min/max values so that we can warn about bit-field enumerations that are too small for the values. */ lt = ggc_alloc_cleared (sizeof (struct lang_type)); lt->enum_min = minnode; lt->enum_max = maxnode; TYPE_LANG_SPECIFIC (enumtype) = lt; /* Fix up all variant types of this enum type. */ for (tem = TYPE_MAIN_VARIANT (enumtype); tem; tem = TYPE_NEXT_VARIANT (tem)) { if (tem == enumtype) continue; TYPE_VALUES (tem) = TYPE_VALUES (enumtype); TYPE_MIN_VALUE (tem) = TYPE_MIN_VALUE (enumtype); TYPE_MAX_VALUE (tem) = TYPE_MAX_VALUE (enumtype); TYPE_SIZE (tem) = TYPE_SIZE (enumtype); TYPE_SIZE_UNIT (tem) = TYPE_SIZE_UNIT (enumtype); TYPE_MODE (tem) = TYPE_MODE (enumtype); TYPE_PRECISION (tem) = TYPE_PRECISION (enumtype); TYPE_ALIGN (tem) = TYPE_ALIGN (enumtype); TYPE_USER_ALIGN (tem) = TYPE_USER_ALIGN (enumtype); TYPE_UNSIGNED (tem) = TYPE_UNSIGNED (enumtype); TYPE_LANG_SPECIFIC (tem) = TYPE_LANG_SPECIFIC (enumtype); } /* Finish debugging output for this type. */ rest_of_type_compilation (enumtype, toplevel); return enumtype; } /* Build and install a CONST_DECL for one value of the current enumeration type (one that was begun with start_enum). Return a tree-list containing the CONST_DECL and its value. Assignment of sequential values by default is handled here. */ tree build_enumerator (tree name, tree value) { tree decl, type; /* Validate and default VALUE. */ /* Remove no-op casts from the value. */ if (value) STRIP_TYPE_NOPS (value); if (value != 0) { /* Don't issue more errors for error_mark_node (i.e. an undeclared identifier) - just ignore the value expression. */ if (value == error_mark_node) value = 0; else if (TREE_CODE (value) != INTEGER_CST) { error ("enumerator value for '%E' is not an integer constant", name); value = 0; } else { value = default_conversion (value); constant_expression_warning (value); } } /* Default based on previous value. */ /* It should no longer be possible to have NON_LVALUE_EXPR in the default. */ if (value == 0) { value = enum_next_value; if (enum_overflow) error ("overflow in enumeration values"); } if (pedantic && ! int_fits_type_p (value, integer_type_node)) { pedwarn ("ISO C restricts enumerator values to range of `int'"); /* XXX This causes -pedantic to change the meaning of the program. Remove? -zw 2004-03-15 */ value = convert (integer_type_node, value); } /* Set basis for default for next value. */ enum_next_value = build_binary_op (PLUS_EXPR, value, integer_one_node, 0); enum_overflow = tree_int_cst_lt (enum_next_value, value); /* Now create a declaration for the enum value name. */ type = TREE_TYPE (value); type = c_common_type_for_size (MAX (TYPE_PRECISION (type), TYPE_PRECISION (integer_type_node)), (TYPE_PRECISION (type) >= TYPE_PRECISION (integer_type_node) && TYPE_UNSIGNED (type))); decl = build_decl (CONST_DECL, name, type); DECL_INITIAL (decl) = convert (type, value); pushdecl (decl); return tree_cons (decl, value, NULL_TREE); } /* Create the FUNCTION_DECL for a function definition. DECLSPECS, DECLARATOR and ATTRIBUTES are the parts of the declaration; they describe the function's name and the type it returns, but twisted together in a fashion that parallels the syntax of C. This function creates a binding context for the function body as well as setting up the FUNCTION_DECL in current_function_decl. Returns 1 on success. If the DECLARATOR is not suitable for a function (it defines a datum instead), we return 0, which tells yyparse to report a parse error. */ int start_function (tree declspecs, tree declarator, tree attributes) { tree decl1, old_decl; tree restype; current_function_returns_value = 0; /* Assume, until we see it does. */ current_function_returns_null = 0; current_function_returns_abnormally = 0; warn_about_return_type = 0; current_extern_inline = 0; c_switch_stack = NULL; /* Indicate no valid break/continue context by setting these variables to some non-null, non-label value. We'll notice and emit the proper error message in c_finish_bc_stmt. */ c_break_label = c_cont_label = size_zero_node; decl1 = grokdeclarator (declarator, declspecs, FUNCDEF, 1, NULL); /* If the declarator is not suitable for a function definition, cause a syntax error. */ if (decl1 == 0) return 0; decl_attributes (&decl1, attributes, 0); if (DECL_DECLARED_INLINE_P (decl1) && DECL_UNINLINABLE (decl1) && lookup_attribute ("noinline", DECL_ATTRIBUTES (decl1))) warning ("%Jinline function '%D' given attribute noinline", decl1, decl1); announce_function (decl1); if (!COMPLETE_OR_VOID_TYPE_P (TREE_TYPE (TREE_TYPE (decl1)))) { error ("return type is an incomplete type"); /* Make it return void instead. */ TREE_TYPE (decl1) = build_function_type (void_type_node, TYPE_ARG_TYPES (TREE_TYPE (decl1))); } if (warn_about_return_type) pedwarn_c99 ("return type defaults to `int'"); /* Make the init_value nonzero so pushdecl knows this is not tentative. error_mark_node is replaced below (in pop_scope) with the BLOCK. */ DECL_INITIAL (decl1) = error_mark_node; /* If this definition isn't a prototype and we had a prototype declaration before, copy the arg type info from that prototype. But not if what we had before was a builtin function. */ old_decl = lookup_name_in_scope (DECL_NAME (decl1), current_scope); if (old_decl != 0 && TREE_CODE (TREE_TYPE (old_decl)) == FUNCTION_TYPE && !DECL_BUILT_IN (old_decl) && (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl1))) == TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (old_decl)))) && TYPE_ARG_TYPES (TREE_TYPE (decl1)) == 0) { TREE_TYPE (decl1) = TREE_TYPE (old_decl); current_function_prototype_locus = DECL_SOURCE_LOCATION (old_decl); } /* Optionally warn of old-fashioned def with no previous prototype. */ if (warn_strict_prototypes && TYPE_ARG_TYPES (TREE_TYPE (decl1)) == 0 && C_DECL_ISNT_PROTOTYPE (old_decl)) warning ("function declaration isn't a prototype"); /* Optionally warn of any global def with no previous prototype. */ else if (warn_missing_prototypes && TREE_PUBLIC (decl1) && ! MAIN_NAME_P (DECL_NAME (decl1)) && C_DECL_ISNT_PROTOTYPE (old_decl)) warning ("%Jno previous prototype for '%D'", decl1, decl1); /* Optionally warn of any def with no previous prototype if the function has already been used. */ else if (warn_missing_prototypes && old_decl != 0 && TREE_USED (old_decl) && TYPE_ARG_TYPES (TREE_TYPE (old_decl)) == 0) warning ("%J'%D' was used with no prototype before its definition", decl1, decl1); /* Optionally warn of any global def with no previous declaration. */ else if (warn_missing_declarations && TREE_PUBLIC (decl1) && old_decl == 0 && ! MAIN_NAME_P (DECL_NAME (decl1))) warning ("%Jno previous declaration for '%D'", decl1, decl1); /* Optionally warn of any def with no previous declaration if the function has already been used. */ else if (warn_missing_declarations && old_decl != 0 && TREE_USED (old_decl) && C_DECL_IMPLICIT (old_decl)) warning ("%J`%D' was used with no declaration before its definition", decl1, decl1); /* This is a definition, not a reference. So normally clear DECL_EXTERNAL. However, `extern inline' acts like a declaration except for defining how to inline. So set DECL_EXTERNAL in that case. */ DECL_EXTERNAL (decl1) = current_extern_inline; /* This function exists in static storage. (This does not mean `static' in the C sense!) */ TREE_STATIC (decl1) = 1; /* A nested function is not global. */ if (current_function_decl != 0) TREE_PUBLIC (decl1) = 0; #ifdef ENABLE_CHECKING /* This is the earliest point at which we might know the assembler name of the function. Thus, if it's set before this, die horribly. */ if (DECL_ASSEMBLER_NAME_SET_P (decl1)) abort (); #endif /* If #pragma weak was used, mark the decl weak now. */ if (current_scope == file_scope) maybe_apply_pragma_weak (decl1); /* Warn for unlikely, improbable, or stupid declarations of `main'. */ if (warn_main > 0 && MAIN_NAME_P (DECL_NAME (decl1))) { tree args; int argct = 0; if (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl1))) != integer_type_node) pedwarn ("%Jreturn type of '%D' is not `int'", decl1, decl1); for (args = TYPE_ARG_TYPES (TREE_TYPE (decl1)); args; args = TREE_CHAIN (args)) { tree type = args ? TREE_VALUE (args) : 0; if (type == void_type_node) break; ++argct; switch (argct) { case 1: if (TYPE_MAIN_VARIANT (type) != integer_type_node) pedwarn ("%Jfirst argument of '%D' should be `int'", decl1, decl1); break; case 2: if (TREE_CODE (type) != POINTER_TYPE || TREE_CODE (TREE_TYPE (type)) != POINTER_TYPE || (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (type))) != char_type_node)) pedwarn ("%Jsecond argument of '%D' should be 'char **'", decl1, decl1); break; case 3: if (TREE_CODE (type) != POINTER_TYPE || TREE_CODE (TREE_TYPE (type)) != POINTER_TYPE || (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (type))) != char_type_node)) pedwarn ("%Jthird argument of '%D' should probably be " "'char **'", decl1, decl1); break; } } /* It is intentional that this message does not mention the third argument because it's only mentioned in an appendix of the standard. */ if (argct > 0 && (argct < 2 || argct > 3)) pedwarn ("%J'%D' takes only zero or two arguments", decl1, decl1); if (! TREE_PUBLIC (decl1)) pedwarn ("%J'%D' is normally a non-static function", decl1, decl1); } /* Record the decl so that the function name is defined. If we already have a decl for this name, and it is a FUNCTION_DECL, use the old decl. */ current_function_decl = pushdecl (decl1); push_scope (); declare_parm_level (); restype = TREE_TYPE (TREE_TYPE (current_function_decl)); /* Promote the value to int before returning it. */ if (c_promoting_integer_type_p (restype)) { /* It retains unsignedness if not really getting wider. */ if (TYPE_UNSIGNED (restype) && (TYPE_PRECISION (restype) == TYPE_PRECISION (integer_type_node))) restype = unsigned_type_node; else restype = integer_type_node; } DECL_RESULT (current_function_decl) = build_decl (RESULT_DECL, NULL_TREE, restype); start_fname_decls (); return 1; } /* Subroutine of store_parm_decls which handles new-style function definitions (prototype format). The parms already have decls, so we need only record them as in effect and complain if any redundant old-style parm decls were written. */ static void store_parm_decls_newstyle (tree fndecl, tree arg_info) { tree decl; tree parms = ARG_INFO_PARMS (arg_info); tree tags = ARG_INFO_TAGS (arg_info); tree others = ARG_INFO_OTHERS (arg_info); if (current_scope->bindings) { error ("%Jold-style parameter declarations in prototyped " "function definition", fndecl); /* Get rid of the old-style declarations. */ pop_scope (); push_scope (); } /* Don't issue this warning for nested functions, and don't issue this warning if we got here because ARG_INFO_TYPES was error_mark_node (this happens when a function definition has just an ellipsis in its parameter list). */ else if (warn_traditional && !in_system_header && !current_function_scope && ARG_INFO_TYPES (arg_info) != error_mark_node) warning ("%Jtraditional C rejects ISO C style function definitions", fndecl); /* Now make all the parameter declarations visible in the function body. We can bypass most of the grunt work of pushdecl. */ for (decl = parms; decl; decl = TREE_CHAIN (decl)) { DECL_CONTEXT (decl) = current_function_decl; if (DECL_NAME (decl)) bind (DECL_NAME (decl), decl, current_scope, /*invisible=*/false, /*nested=*/false); else error ("%Jparameter name omitted", decl); } /* Record the parameter list in the function declaration. */ DECL_ARGUMENTS (fndecl) = parms; /* Now make all the ancillary declarations visible, likewise. */ for (decl = others; decl; decl = TREE_CHAIN (decl)) { DECL_CONTEXT (decl) = current_function_decl; if (DECL_NAME (decl)) bind (DECL_NAME (decl), decl, current_scope, /*invisible=*/false, /*nested=*/false); } /* And all the tag declarations. */ for (decl = tags; decl; decl = TREE_CHAIN (decl)) if (TREE_PURPOSE (decl)) bind (TREE_PURPOSE (decl), TREE_VALUE (decl), current_scope, /*invisible=*/false, /*nested=*/false); } /* Subroutine of store_parm_decls which handles old-style function definitions (separate parameter list and declarations). */ static void store_parm_decls_oldstyle (tree fndecl, tree arg_info) { struct c_binding *b; tree parm, decl, last; tree parmids = ARG_INFO_PARMS (arg_info); /* We use DECL_WEAK as a flag to show which parameters have been seen already, since it is not used on PARM_DECL. */ #ifdef ENABLE_CHECKING for (b = current_scope->bindings; b; b = b->prev) if (TREE_CODE (b->decl) == PARM_DECL && DECL_WEAK (b->decl)) abort (); #endif if (warn_old_style_definition && !in_system_header) warning ("%Jold-style function definition", fndecl); /* Match each formal parameter name with its declaration. Save each decl in the appropriate TREE_PURPOSE slot of the parmids chain. */ for (parm = parmids; parm; parm = TREE_CHAIN (parm)) { if (TREE_VALUE (parm) == 0) { error ("%Jparameter name missing from parameter list", fndecl); TREE_PURPOSE (parm) = 0; continue; } b = I_SYMBOL_BINDING (TREE_VALUE (parm)); if (b && B_IN_CURRENT_SCOPE (b)) { decl = b->decl; /* If we got something other than a PARM_DECL it is an error. */ if (TREE_CODE (decl) != PARM_DECL) error ("%J'%D' declared as a non-parameter", decl, decl); /* If the declaration is already marked, we have a duplicate name. Complain and ignore the duplicate. */ else if (DECL_WEAK (decl)) { error ("%Jmultiple parameters named '%D'", decl, decl); TREE_PURPOSE (parm) = 0; continue; } /* If the declaration says "void", complain and turn it into an int. */ else if (VOID_TYPE_P (TREE_TYPE (decl))) { error ("%Jparameter '%D' declared with void type", decl, decl); TREE_TYPE (decl) = integer_type_node; DECL_ARG_TYPE (decl) = integer_type_node; layout_decl (decl, 0); } } /* If no declaration found, default to int. */ else { decl = build_decl (PARM_DECL, TREE_VALUE (parm), integer_type_node); DECL_ARG_TYPE (decl) = TREE_TYPE (decl); DECL_SOURCE_LOCATION (decl) = DECL_SOURCE_LOCATION (fndecl); pushdecl (decl); if (flag_isoc99) pedwarn ("%Jtype of '%D' defaults to 'int'", decl, decl); else if (extra_warnings) warning ("%Jtype of '%D' defaults to 'int'", decl, decl); } TREE_PURPOSE (parm) = decl; DECL_WEAK (decl) = 1; } /* Now examine the parms chain for incomplete declarations and declarations with no corresponding names. */ for (b = current_scope->bindings; b; b = b->prev) { parm = b->decl; if (TREE_CODE (parm) != PARM_DECL) continue; if (!COMPLETE_TYPE_P (TREE_TYPE (parm))) { error ("%Jparameter '%D' has incomplete type", parm, parm); TREE_TYPE (parm) = error_mark_node; } if (! DECL_WEAK (parm)) { error ("%Jdeclaration for parameter '%D' but no such parameter", parm, parm); /* Pretend the parameter was not missing. This gets us to a standard state and minimizes further error messages. */ parmids = chainon (parmids, tree_cons (parm, 0, 0)); } } /* Chain the declarations together in the order of the list of names. Store that chain in the function decl, replacing the list of names. Update the current scope to match. */ DECL_ARGUMENTS (fndecl) = 0; for (parm = parmids; parm; parm = TREE_CHAIN (parm)) if (TREE_PURPOSE (parm)) break; if (parm && TREE_PURPOSE (parm)) { last = TREE_PURPOSE (parm); DECL_ARGUMENTS (fndecl) = last; DECL_WEAK (last) = 0; for (parm = TREE_CHAIN (parm); parm; parm = TREE_CHAIN (parm)) if (TREE_PURPOSE (parm)) { TREE_CHAIN (last) = TREE_PURPOSE (parm); last = TREE_PURPOSE (parm); DECL_WEAK (last) = 0; } TREE_CHAIN (last) = 0; } /* If there was a previous prototype, set the DECL_ARG_TYPE of each argument according to the type previously specified, and report any mismatches. */ if (TYPE_ARG_TYPES (TREE_TYPE (fndecl))) { tree type; for (parm = DECL_ARGUMENTS (fndecl), type = TYPE_ARG_TYPES (TREE_TYPE (fndecl)); parm || (type && (TYPE_MAIN_VARIANT (TREE_VALUE (type)) != void_type_node)); parm = TREE_CHAIN (parm), type = TREE_CHAIN (type)) { if (parm == 0 || type == 0 || TYPE_MAIN_VARIANT (TREE_VALUE (type)) == void_type_node) { error ("number of arguments doesn't match prototype"); error ("%Hprototype declaration", ¤t_function_prototype_locus); break; } /* Type for passing arg must be consistent with that declared for the arg. ISO C says we take the unqualified type for parameters declared with qualified type. */ if (! comptypes (TYPE_MAIN_VARIANT (DECL_ARG_TYPE (parm)), TYPE_MAIN_VARIANT (TREE_VALUE (type)))) { if (TYPE_MAIN_VARIANT (TREE_TYPE (parm)) == TYPE_MAIN_VARIANT (TREE_VALUE (type))) { /* Adjust argument to match prototype. E.g. a previous `int foo(float);' prototype causes `int foo(x) float x; {...}' to be treated like `int foo(float x) {...}'. This is particularly useful for argument types like uid_t. */ DECL_ARG_TYPE (parm) = TREE_TYPE (parm); if (targetm.calls.promote_prototypes (TREE_TYPE (current_function_decl)) && INTEGRAL_TYPE_P (TREE_TYPE (parm)) && TYPE_PRECISION (TREE_TYPE (parm)) < TYPE_PRECISION (integer_type_node)) DECL_ARG_TYPE (parm) = integer_type_node; if (pedantic) { pedwarn ("promoted argument '%D' " "doesn't match prototype", parm); pedwarn ("%Hprototype declaration", ¤t_function_prototype_locus); } } else { error ("argument '%D' doesn't match prototype", parm); error ("%Hprototype declaration", ¤t_function_prototype_locus); } } } TYPE_ACTUAL_ARG_TYPES (TREE_TYPE (fndecl)) = 0; } /* Otherwise, create a prototype that would match. */ else { tree actual = 0, last = 0, type; for (parm = DECL_ARGUMENTS (fndecl); parm; parm = TREE_CHAIN (parm)) { type = tree_cons (NULL_TREE, DECL_ARG_TYPE (parm), NULL_TREE); if (last) TREE_CHAIN (last) = type; else actual = type; last = type; } type = tree_cons (NULL_TREE, void_type_node, NULL_TREE); if (last) TREE_CHAIN (last) = type; else actual = type; /* We are going to assign a new value for the TYPE_ACTUAL_ARG_TYPES of the type of this function, but we need to avoid having this affect the types of other similarly-typed functions, so we must first force the generation of an identical (but separate) type node for the relevant function type. The new node we create will be a variant of the main variant of the original function type. */ TREE_TYPE (fndecl) = build_type_copy (TREE_TYPE (fndecl)); TYPE_ACTUAL_ARG_TYPES (TREE_TYPE (fndecl)) = actual; } } /* Store the parameter declarations into the current function declaration. This is called after parsing the parameter declarations, before digesting the body of the function. For an old-style definition, construct a prototype out of the old-style parameter declarations and inject it into the function's type. */ void store_parm_decls (void) { tree fndecl = current_function_decl; /* The argument information block for FNDECL. */ tree arg_info = DECL_ARGUMENTS (fndecl); /* True if this definition is written with a prototype. Note: despite C99 6.7.5.3p14, we can *not* treat an empty argument list in a function definition as equivalent to (void) -- an empty argument list specifies the function has no parameters, but only (void) sets up a prototype for future calls. */ bool proto = ARG_INFO_TYPES (arg_info) != 0; if (proto) store_parm_decls_newstyle (fndecl, arg_info); else store_parm_decls_oldstyle (fndecl, arg_info); /* The next call to push_scope will be a function body. */ next_is_function_body = true; /* Write a record describing this function definition to the prototypes file (if requested). */ gen_aux_info_record (fndecl, 1, 0, proto); /* Initialize the RTL code for the function. */ allocate_struct_function (fndecl); /* Begin the statement tree for this function. */ DECL_SAVED_TREE (fndecl) = push_stmt_list (); /* ??? Insert the contents of the pending sizes list into the function to be evaluated. This just changes mis-behaviour until assign_parms phase ordering problems are resolved. */ { tree t; for (t = nreverse (get_pending_sizes ()); t ; t = TREE_CHAIN (t)) add_stmt (TREE_VALUE (t)); } /* Even though we're inside a function body, we still don't want to call expand_expr to calculate the size of a variable-sized array. We haven't necessarily assigned RTL to all variables yet, so it's not safe to try to expand expressions involving them. */ cfun->x_dont_save_pending_sizes_p = 1; } /* Give FNDECL and all its nested functions to cgraph for compilation. */ static void c_finalize (tree fndecl) { struct cgraph_node *cgn; /* Handle attribute((warn_unused_result)). Relies on gimple input. */ c_warn_unused_result (&DECL_SAVED_TREE (fndecl)); /* ??? Objc emits functions after finalizing the compilation unit. This should be cleaned up later and this conditional removed. */ if (cgraph_global_info_ready) { c_expand_body (fndecl); return; } /* Finalize all nested functions now. */ cgn = cgraph_node (fndecl); for (cgn = cgn->nested; cgn ; cgn = cgn->next_nested) c_finalize (cgn->decl); cgraph_finalize_function (fndecl, false); } /* Finish up a function declaration and compile that function all the way to assembler language output. The free the storage for the function definition. This is called after parsing the body of the function definition. */ void finish_function (void) { tree fndecl = current_function_decl; if (TREE_CODE (fndecl) == FUNCTION_DECL && targetm.calls.promote_prototypes (TREE_TYPE (fndecl))) { tree args = DECL_ARGUMENTS (fndecl); for (; args; args = TREE_CHAIN (args)) { tree type = TREE_TYPE (args); if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node)) DECL_ARG_TYPE (args) = integer_type_node; } } if (DECL_INITIAL (fndecl) && DECL_INITIAL (fndecl) != error_mark_node) BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl; /* Must mark the RESULT_DECL as being in this function. */ if (DECL_RESULT (fndecl) && DECL_RESULT (fndecl) != error_mark_node) DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl; if (MAIN_NAME_P (DECL_NAME (fndecl)) && flag_hosted) { if (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (fndecl))) != integer_type_node) { /* If warn_main is 1 (-Wmain) or 2 (-Wall), we have already warned. If warn_main is -1 (-Wno-main) we don't want to be warned. */ if (!warn_main) pedwarn ("%Jreturn type of '%D' is not `int'", fndecl, fndecl); } else { if (flag_isoc99) c_finish_return (integer_zero_node); } } /* Tie off the statement tree for this function. */ DECL_SAVED_TREE (fndecl) = pop_stmt_list (DECL_SAVED_TREE (fndecl)); finish_fname_decls (); /* Complain if there's just no return statement. */ if (warn_return_type && TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl))) != VOID_TYPE && !current_function_returns_value && !current_function_returns_null /* Don't complain if we abort. */ && !current_function_returns_abnormally /* Don't warn for main(). */ && !MAIN_NAME_P (DECL_NAME (fndecl)) /* Or if they didn't actually specify a return type. */ && !C_FUNCTION_IMPLICIT_INT (fndecl) /* Normally, with -Wreturn-type, flow will complain. Unless we're an inline function, as we might never be compiled separately. */ && DECL_INLINE (fndecl)) warning ("no return statement in function returning non-void"); /* With just -Wextra, complain only if function returns both with and without a value. */ if (extra_warnings && current_function_returns_value && current_function_returns_null) warning ("this function may return with or without a value"); /* Store the end of the function, so that we get good line number info for the epilogue. */ cfun->function_end_locus = input_location; /* If we don't have ctors/dtors sections, and this is a static constructor or destructor, it must be recorded now. */ if (DECL_STATIC_CONSTRUCTOR (fndecl) && !targetm.have_ctors_dtors) static_ctors = tree_cons (NULL_TREE, fndecl, static_ctors); if (DECL_STATIC_DESTRUCTOR (fndecl) && !targetm.have_ctors_dtors) static_dtors = tree_cons (NULL_TREE, fndecl, static_dtors); /* Genericize before inlining. Delay genericizing nested functions until their parent function is genericized. Since finalizing requires GENERIC, delay that as well. */ if (DECL_INITIAL (fndecl) && DECL_INITIAL (fndecl) != error_mark_node) { if (!decl_function_context (fndecl)) { c_genericize (fndecl); lower_nested_functions (fndecl); c_finalize (fndecl); } else { /* Register this function with cgraph just far enough to get it added to our parent's nested function list. Handy, since the C front end doesn't have such a list. */ (void) cgraph_node (fndecl); } } /* We're leaving the context of this function, so zap cfun. It's still in DECL_STRUCT_FUNCTION, and we'll restore it in tree_rest_of_compilation. */ cfun = NULL; current_function_decl = NULL; } /* Generate the RTL for the body of FNDECL. */ void c_expand_body (tree fndecl) { if (!DECL_INITIAL (fndecl) || DECL_INITIAL (fndecl) == error_mark_node) return; tree_rest_of_compilation (fndecl, false); if (DECL_STATIC_CONSTRUCTOR (fndecl) && targetm.have_ctors_dtors) targetm.asm_out.constructor (XEXP (DECL_RTL (fndecl), 0), DEFAULT_INIT_PRIORITY); if (DECL_STATIC_DESTRUCTOR (fndecl) && targetm.have_ctors_dtors) targetm.asm_out.destructor (XEXP (DECL_RTL (fndecl), 0), DEFAULT_INIT_PRIORITY); } /* Check the declarations given in a for-loop for satisfying the C99 constraints. */ void check_for_loop_decls (void) { struct c_binding *b; if (!flag_isoc99) { /* If we get here, declarations have been used in a for loop without the C99 for loop scope. This doesn't make much sense, so don't allow it. */ error ("'for' loop initial declaration used outside C99 mode"); return; } /* C99 subclause 6.8.5 paragraph 3: [#3] The declaration part of a for statement shall only declare identifiers for objects having storage class auto or register. It isn't clear whether, in this sentence, "identifiers" binds to "shall only declare" or to "objects" - that is, whether all identifiers declared must be identifiers for objects, or whether the restriction only applies to those that are. (A question on this in comp.std.c in November 2000 received no answer.) We implement the strictest interpretation, to avoid creating an extension which later causes problems. */ for (b = current_scope->bindings; b; b = b->prev) { tree id = b->id; tree decl = b->decl; if (!id) continue; switch (TREE_CODE (decl)) { case VAR_DECL: if (TREE_STATIC (decl)) error ("%Jdeclaration of static variable '%D' in 'for' loop " "initial declaration", decl, decl); else if (DECL_EXTERNAL (decl)) error ("%Jdeclaration of 'extern' variable '%D' in 'for' loop " "initial declaration", decl, decl); break; case RECORD_TYPE: error ("'struct %E' declared in 'for' loop initial declaration", id); break; case UNION_TYPE: error ("'union %E' declared in 'for' loop initial declaration", id); break; case ENUMERAL_TYPE: error ("'enum %E' declared in 'for' loop initial declaration", id); break; default: error ("%Jdeclaration of non-variable '%D' in 'for' loop " "initial declaration", decl, decl); } } } /* Save and reinitialize the variables used during compilation of a C function. */ void c_push_function_context (struct function *f) { struct language_function *p; p = ggc_alloc (sizeof (struct language_function)); f->language = p; p->base.x_stmt_tree = c_stmt_tree; p->x_break_label = c_break_label; p->x_cont_label = c_cont_label; p->x_switch_stack = c_switch_stack; p->returns_value = current_function_returns_value; p->returns_null = current_function_returns_null; p->returns_abnormally = current_function_returns_abnormally; p->warn_about_return_type = warn_about_return_type; p->extern_inline = current_extern_inline; } /* Restore the variables used during compilation of a C function. */ void c_pop_function_context (struct function *f) { struct language_function *p = f->language; if (DECL_STRUCT_FUNCTION (current_function_decl) == 0 && DECL_SAVED_TREE (current_function_decl) == NULL_TREE) { /* Stop pointing to the local nodes about to be freed. */ /* But DECL_INITIAL must remain nonzero so we know this was an actual function definition. */ DECL_INITIAL (current_function_decl) = error_mark_node; DECL_ARGUMENTS (current_function_decl) = 0; } c_stmt_tree = p->base.x_stmt_tree; c_break_label = p->x_break_label; c_cont_label = p->x_cont_label; c_switch_stack = p->x_switch_stack; current_function_returns_value = p->returns_value; current_function_returns_null = p->returns_null; current_function_returns_abnormally = p->returns_abnormally; warn_about_return_type = p->warn_about_return_type; current_extern_inline = p->extern_inline; f->language = NULL; } /* Copy the DECL_LANG_SPECIFIC data associated with DECL. */ void c_dup_lang_specific_decl (tree decl) { struct lang_decl *ld; if (!DECL_LANG_SPECIFIC (decl)) return; ld = ggc_alloc (sizeof (struct lang_decl)); memcpy (ld, DECL_LANG_SPECIFIC (decl), sizeof (struct lang_decl)); DECL_LANG_SPECIFIC (decl) = ld; } /* The functions below are required for functionality of doing function at once processing in the C front end. Currently these functions are not called from anywhere in the C front end, but as these changes continue, that will change. */ /* Returns nonzero if the current statement is a full expression, i.e. temporaries created during that statement should be destroyed at the end of the statement. */ int stmts_are_full_exprs_p (void) { return 0; } /* Returns the stmt_tree (if any) to which statements are currently being added. If there is no active statement-tree, NULL is returned. */ stmt_tree current_stmt_tree (void) { return &c_stmt_tree; } /* Nonzero if TYPE is an anonymous union or struct type. Always 0 in C. */ int anon_aggr_type_p (tree node ATTRIBUTE_UNUSED) { return 0; } /* Dummy function in place of callback used by C++. */ void extract_interface_info (void) { } /* Return the global value of T as a symbol. */ tree identifier_global_value (tree t) { struct c_binding *b; for (b = I_SYMBOL_BINDING (t); b; b = b->shadowed) if (B_IN_FILE_SCOPE (b) || B_IN_EXTERNAL_SCOPE (b)) return b->decl; return 0; } /* Record a builtin type for C. If NAME is non-NULL, it is the name used; otherwise the name is found in ridpointers from RID_INDEX. */ void record_builtin_type (enum rid rid_index, const char *name, tree type) { tree id; if (name == 0) id = ridpointers[(int) rid_index]; else id = get_identifier (name); pushdecl (build_decl (TYPE_DECL, id, type)); } /* Build the void_list_node (void_type_node having been created). */ tree build_void_list_node (void) { tree t = build_tree_list (NULL_TREE, void_type_node); return t; } /* Return something to represent absolute declarators containing a *. TARGET is the absolute declarator that the * contains. TYPE_QUALS_ATTRS is a list of modifiers such as const or volatile to apply to the pointer type, represented as identifiers, possible mixed with attributes. We return an INDIRECT_REF whose "contents" are TARGET (inside a TREE_LIST, if attributes are present) and whose type is the modifier list. */ tree make_pointer_declarator (tree type_quals_attrs, tree target) { tree quals, attrs; tree itarget = target; split_specs_attrs (type_quals_attrs, &quals, &attrs); if (attrs != NULL_TREE) itarget = tree_cons (attrs, target, NULL_TREE); return build1 (INDIRECT_REF, quals, itarget); } /* Perform final processing on file-scope data. */ static void c_write_global_declarations_1 (tree globals) { size_t len = list_length (globals); tree *vec = xmalloc (sizeof (tree) * len); size_t i; tree decl; /* Process the decls in the order they were written. */ for (i = 0, decl = globals; i < len; i++, decl = TREE_CHAIN (decl)) vec[i] = decl; wrapup_global_declarations (vec, len); check_global_declarations (vec, len); free (vec); } void c_write_global_declarations (void) { tree t; /* We don't want to do this if generating a PCH. */ if (pch_file) return; /* Process all file scopes in this compilation. */ for (t = all_translation_units; t; t = TREE_CHAIN (t)) c_write_global_declarations_1 (BLOCK_VARS (DECL_INITIAL (t))); /* Now do the same for the externals scope. */ t = pop_scope (); if (t) c_write_global_declarations_1 (BLOCK_VARS (t)); } /* Type information for c-decl.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ void gt_ggc_mx_c_scope (void *x_p) { struct c_scope * x = (struct c_scope *)x_p; struct c_scope * xlimit = x; while (ggc_test_and_set_mark (xlimit)) xlimit = ((*xlimit).outer); while (x != xlimit) { gt_ggc_m_7c_scope ((*x).outer); gt_ggc_m_7c_scope ((*x).outer_function); gt_ggc_m_9c_binding ((*x).bindings); gt_ggc_m_9tree_node ((*x).blocks); gt_ggc_m_9tree_node ((*x).blocks_last); x = ((*x).outer); } } void gt_ggc_mx_c_binding (void *x_p) { struct c_binding * x = (struct c_binding *)x_p; struct c_binding * xlimit = x; while (ggc_test_and_set_mark (xlimit)) xlimit = ((*xlimit).prev); while (x != xlimit) { gt_ggc_m_9tree_node ((*x).decl); gt_ggc_m_9tree_node ((*x).id); gt_ggc_m_9c_binding ((*x).prev); gt_ggc_m_9c_binding ((*x).shadowed); x = ((*x).prev); } } void gt_ggc_mx_lang_tree_node (void *x_p) { union lang_tree_node * x = (union lang_tree_node *)x_p; union lang_tree_node * xlimit = x; while (ggc_test_and_set_mark (xlimit)) xlimit = (TREE_CODE (&(*xlimit).generic) == INTEGER_TYPE ? (union lang_tree_node *)TYPE_NEXT_VARIANT (&(*xlimit).generic) : (union lang_tree_node *)TREE_CHAIN (&(*xlimit).generic)); while (x != xlimit) { switch (TREE_CODE (&((*x)).generic) == IDENTIFIER_NODE) { case 0: switch (tree_node_structure (&((*x).generic))) { case TS_COMMON: gt_ggc_m_9tree_node ((*x).generic.common.chain); gt_ggc_m_9tree_node ((*x).generic.common.type); gt_ggc_m_10tree_ann_d ((*x).generic.common.ann); break; case TS_INT_CST: gt_ggc_m_9tree_node ((*x).generic.int_cst.common.chain); gt_ggc_m_9tree_node ((*x).generic.int_cst.common.type); gt_ggc_m_10tree_ann_d ((*x).generic.int_cst.common.ann); break; case TS_REAL_CST: gt_ggc_m_9tree_node ((*x).generic.real_cst.common.chain); gt_ggc_m_9tree_node ((*x).generic.real_cst.common.type); gt_ggc_m_10tree_ann_d ((*x).generic.real_cst.common.ann); gt_ggc_m_10real_value ((*x).generic.real_cst.real_cst_ptr); break; case TS_VECTOR: gt_ggc_m_9tree_node ((*x).generic.vector.common.chain); gt_ggc_m_9tree_node ((*x).generic.vector.common.type); gt_ggc_m_10tree_ann_d ((*x).generic.vector.common.ann); gt_ggc_m_9tree_node ((*x).generic.vector.elements); break; case TS_STRING: gt_ggc_m_9tree_node ((*x).generic.string.common.chain); gt_ggc_m_9tree_node ((*x).generic.string.common.type); gt_ggc_m_10tree_ann_d ((*x).generic.string.common.ann); break; case TS_COMPLEX: gt_ggc_m_9tree_node ((*x).generic.complex.common.chain); gt_ggc_m_9tree_node ((*x).generic.complex.common.type); gt_ggc_m_10tree_ann_d ((*x).generic.complex.common.ann); gt_ggc_m_9tree_node ((*x).generic.complex.real); gt_ggc_m_9tree_node ((*x).generic.complex.imag); break; case TS_IDENTIFIER: gt_ggc_m_9tree_node ((*x).generic.identifier.common.chain); gt_ggc_m_9tree_node ((*x).generic.identifier.common.type); gt_ggc_m_10tree_ann_d ((*x).generic.identifier.common.ann); break; case TS_DECL: gt_ggc_m_9tree_node ((*x).generic.decl.common.chain); gt_ggc_m_9tree_node ((*x).generic.decl.common.type); gt_ggc_m_10tree_ann_d ((*x).generic.decl.common.ann); gt_ggc_m_9tree_node ((*x).generic.decl.size); gt_ggc_m_9tree_node ((*x).generic.decl.size_unit); gt_ggc_m_9tree_node ((*x).generic.decl.name); gt_ggc_m_9tree_node ((*x).generic.decl.context); gt_ggc_m_9tree_node ((*x).generic.decl.arguments); gt_ggc_m_9tree_node ((*x).generic.decl.result); gt_ggc_m_9tree_node ((*x).generic.decl.initial); gt_ggc_m_9tree_node ((*x).generic.decl.abstract_origin); gt_ggc_m_9tree_node ((*x).generic.decl.assembler_name); gt_ggc_m_9tree_node ((*x).generic.decl.section_name); gt_ggc_m_9tree_node ((*x).generic.decl.attributes); gt_ggc_m_7rtx_def ((*x).generic.decl.rtl); switch (TREE_CODE((tree) &((*x)))) { case FUNCTION_DECL: gt_ggc_m_8function ((*x).generic.decl.u2.f); break; case PARM_DECL: gt_ggc_m_7rtx_def ((*x).generic.decl.u2.r); break; case FIELD_DECL: gt_ggc_m_9tree_node ((*x).generic.decl.u2.t); break; case VAR_DECL: break; default: break; } gt_ggc_m_9tree_node ((*x).generic.decl.saved_tree); gt_ggc_m_9tree_node ((*x).generic.decl.inlined_fns); gt_ggc_m_9tree_node ((*x).generic.decl.vindex); gt_ggc_m_9lang_decl ((*x).generic.decl.lang_specific); break; case TS_TYPE: gt_ggc_m_9tree_node ((*x).generic.type.common.chain); gt_ggc_m_9tree_node ((*x).generic.type.common.type); gt_ggc_m_10tree_ann_d ((*x).generic.type.common.ann); gt_ggc_m_9tree_node ((*x).generic.type.values); gt_ggc_m_9tree_node ((*x).generic.type.size); gt_ggc_m_9tree_node ((*x).generic.type.size_unit); gt_ggc_m_9tree_node ((*x).generic.type.attributes); gt_ggc_m_9tree_node ((*x).generic.type.pointer_to); gt_ggc_m_9tree_node ((*x).generic.type.reference_to); switch (debug_hooks == &sdb_debug_hooks ? 1 : debug_hooks == &dwarf2_debug_hooks ? 2 : 0) { case 0: break; case 1: break; case 2: gt_ggc_m_10die_struct ((*x).generic.type.symtab.die); break; default: break; } gt_ggc_m_9tree_node ((*x).generic.type.name); gt_ggc_m_9tree_node ((*x).generic.type.minval); gt_ggc_m_9tree_node ((*x).generic.type.maxval); gt_ggc_m_9tree_node ((*x).generic.type.next_variant); gt_ggc_m_9tree_node ((*x).generic.type.main_variant); gt_ggc_m_9tree_node ((*x).generic.type.binfo); gt_ggc_m_9tree_node ((*x).generic.type.context); gt_ggc_m_9lang_type ((*x).generic.type.lang_specific); break; case TS_LIST: gt_ggc_m_9tree_node ((*x).generic.list.common.chain); gt_ggc_m_9tree_node ((*x).generic.list.common.type); gt_ggc_m_10tree_ann_d ((*x).generic.list.common.ann); gt_ggc_m_9tree_node ((*x).generic.list.purpose); gt_ggc_m_9tree_node ((*x).generic.list.value); break; case TS_VEC: gt_ggc_m_9tree_node ((*x).generic.vec.common.chain); gt_ggc_m_9tree_node ((*x).generic.vec.common.type); gt_ggc_m_10tree_ann_d ((*x).generic.vec.common.ann); { size_t i0; for (i0 = 0; i0 < (size_t)(TREE_VEC_LENGTH ((tree)&((*x).generic.vec))); i0++) { gt_ggc_m_9tree_node ((*x).generic.vec.a[i0]); } } break; case TS_EXP: gt_ggc_m_9tree_node ((*x).generic.exp.common.chain); gt_ggc_m_9tree_node ((*x).generic.exp.common.type); gt_ggc_m_10tree_ann_d ((*x).generic.exp.common.ann); gt_ggc_m_10location_s ((*x).generic.exp.locus); gt_ggc_m_9tree_node ((*x).generic.exp.block); switch (TREE_CODE ((tree) &(*x))) { case WITH_CLEANUP_EXPR: gt_ggc_m_7rtx_def ((*x).generic.exp.operands[2]); gt_ggc_m_9tree_node ((*x).generic.exp.operands[1]); gt_ggc_m_9tree_node ((*x).generic.exp.operands[0]); break; case GOTO_SUBROUTINE_EXPR: gt_ggc_m_7rtx_def ((*x).generic.exp.operands[1]); gt_ggc_m_7rtx_def ((*x).generic.exp.operands[0]); break; default: { size_t i1; for (i1 = 0; i1 < (size_t)(TREE_CODE_LENGTH (TREE_CODE ((tree) &(*x)))); i1++) { gt_ggc_m_9tree_node ((*x).generic.exp.operands[i1]); } } break; } break; case TS_SSA_NAME: gt_ggc_m_9tree_node ((*x).generic.ssa_name.common.chain); gt_ggc_m_9tree_node ((*x).generic.ssa_name.common.type); gt_ggc_m_10tree_ann_d ((*x).generic.ssa_name.common.ann); gt_ggc_m_9tree_node ((*x).generic.ssa_name.var); gt_ggc_m_12ptr_info_def ((*x).generic.ssa_name.ptr_info); break; case TS_PHI_NODE: gt_ggc_m_9tree_node ((*x).generic.phi.common.chain); gt_ggc_m_9tree_node ((*x).generic.phi.common.type); gt_ggc_m_10tree_ann_d ((*x).generic.phi.common.ann); gt_ggc_m_9tree_node ((*x).generic.phi.result); { size_t i2; for (i2 = 0; i2 < (size_t)(((tree)&((*x).generic.phi))->phi.capacity); i2++) { gt_ggc_m_9tree_node ((*x).generic.phi.a[i2].def); } } break; case TS_BLOCK: gt_ggc_m_9tree_node ((*x).generic.block.common.chain); gt_ggc_m_9tree_node ((*x).generic.block.common.type); gt_ggc_m_10tree_ann_d ((*x).generic.block.common.ann); gt_ggc_m_9tree_node ((*x).generic.block.vars); gt_ggc_m_9tree_node ((*x).generic.block.subblocks); gt_ggc_m_9tree_node ((*x).generic.block.supercontext); gt_ggc_m_9tree_node ((*x).generic.block.abstract_origin); gt_ggc_m_9tree_node ((*x).generic.block.fragment_origin); gt_ggc_m_9tree_node ((*x).generic.block.fragment_chain); break; case TS_BINFO: gt_ggc_m_9tree_node ((*x).generic.binfo.common.chain); gt_ggc_m_9tree_node ((*x).generic.binfo.common.type); gt_ggc_m_10tree_ann_d ((*x).generic.binfo.common.ann); gt_ggc_m_9tree_node ((*x).generic.binfo.offset); gt_ggc_m_9tree_node ((*x).generic.binfo.vtable); gt_ggc_m_9tree_node ((*x).generic.binfo.virtuals); gt_ggc_m_9tree_node ((*x).generic.binfo.base_types); gt_ggc_m_9tree_node ((*x).generic.binfo.vptr_field); gt_ggc_m_9tree_node ((*x).generic.binfo.base_accesses); gt_ggc_m_9tree_node ((*x).generic.binfo.inheritance); { size_t i3; for (i3 = 0; i3 < (size_t)(binfo_lang_slots); i3++) { gt_ggc_m_9tree_node ((*x).generic.binfo.lang_slots[i3]); } } break; case TS_STATEMENT_LIST: gt_ggc_m_9tree_node ((*x).generic.stmt_list.common.chain); gt_ggc_m_9tree_node ((*x).generic.stmt_list.common.type); gt_ggc_m_10tree_ann_d ((*x).generic.stmt_list.common.ann); gt_ggc_m_24tree_statement_list_node ((*x).generic.stmt_list.head); gt_ggc_m_24tree_statement_list_node ((*x).generic.stmt_list.tail); break; case TS_VALUE_HANDLE: gt_ggc_m_9tree_node ((*x).generic.value_handle.common.chain); gt_ggc_m_9tree_node ((*x).generic.value_handle.common.type); gt_ggc_m_10tree_ann_d ((*x).generic.value_handle.common.ann); break; default: break; } break; case 1: gt_ggc_m_9tree_node ((*x).identifier.common_id.common.chain); gt_ggc_m_9tree_node ((*x).identifier.common_id.common.type); gt_ggc_m_10tree_ann_d ((*x).identifier.common_id.common.ann); switch (CPP_HASHNODE_VALUE_IDX (((*x).identifier.common_id.node))) { case NTV_MACRO: gt_ggc_m_9cpp_macro ((*x).identifier.common_id.node.value.macro); break; case NTV_ANSWER: gt_ggc_m_6answer ((*x).identifier.common_id.node.value.answers); break; case NTV_BUILTIN: break; case NTV_ARGUMENT: break; default: break; } gt_ggc_m_9c_binding ((*x).identifier.symbol_binding); gt_ggc_m_9c_binding ((*x).identifier.tag_binding); gt_ggc_m_9c_binding ((*x).identifier.label_binding); break; default: break; } x = (TREE_CODE (&(*x).generic) == INTEGER_TYPE ? (union lang_tree_node *)TYPE_NEXT_VARIANT (&(*x).generic) : (union lang_tree_node *)TREE_CHAIN (&(*x).generic)); } } void gt_ggc_mx_lang_decl (void *x_p) { struct lang_decl * const x = (struct lang_decl *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_9tree_node ((*x).pending_sizes); } } void gt_ggc_mx_lang_type (void *x_p) { struct lang_type * const x = (struct lang_type *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_18sorted_fields_type ((*x).s); gt_ggc_m_9tree_node ((*x).enum_min); gt_ggc_m_9tree_node ((*x).enum_max); } } void gt_ggc_mx_language_function (void *x_p) { struct language_function * const x = (struct language_function *)x_p; if (ggc_test_and_set_mark (x)) { gt_ggc_m_9tree_node ((*x).base.x_stmt_tree.x_cur_stmt_list); gt_ggc_m_9tree_node ((*x).x_break_label); gt_ggc_m_9tree_node ((*x).x_cont_label); } } void gt_pch_nx_c_scope (void *x_p) { struct c_scope * x = (struct c_scope *)x_p; struct c_scope * xlimit = x; while (gt_pch_note_object (xlimit, xlimit, gt_pch_p_7c_scope)) xlimit = ((*xlimit).outer); while (x != xlimit) { gt_pch_n_7c_scope ((*x).outer); gt_pch_n_7c_scope ((*x).outer_function); gt_pch_n_9c_binding ((*x).bindings); gt_pch_n_9tree_node ((*x).blocks); gt_pch_n_9tree_node ((*x).blocks_last); x = ((*x).outer); } } void gt_pch_nx_c_binding (void *x_p) { struct c_binding * x = (struct c_binding *)x_p; struct c_binding * xlimit = x; while (gt_pch_note_object (xlimit, xlimit, gt_pch_p_9c_binding)) xlimit = ((*xlimit).prev); while (x != xlimit) { gt_pch_n_9tree_node ((*x).decl); gt_pch_n_9tree_node ((*x).id); gt_pch_n_9c_binding ((*x).prev); gt_pch_n_9c_binding ((*x).shadowed); x = ((*x).prev); } } void gt_pch_nx_lang_tree_node (void *x_p) { union lang_tree_node * x = (union lang_tree_node *)x_p; union lang_tree_node * xlimit = x; while (gt_pch_note_object (xlimit, xlimit, gt_pch_p_14lang_tree_node)) xlimit = (TREE_CODE (&(*xlimit).generic) == INTEGER_TYPE ? (union lang_tree_node *)TYPE_NEXT_VARIANT (&(*xlimit).generic) : (union lang_tree_node *)TREE_CHAIN (&(*xlimit).generic)); while (x != xlimit) { switch (TREE_CODE (&((*x)).generic) == IDENTIFIER_NODE) { case 0: switch (tree_node_structure (&((*x).generic))) { case TS_COMMON: gt_pch_n_9tree_node ((*x).generic.common.chain); gt_pch_n_9tree_node ((*x).generic.common.type); gt_pch_n_10tree_ann_d ((*x).generic.common.ann); break; case TS_INT_CST: gt_pch_n_9tree_node ((*x).generic.int_cst.common.chain); gt_pch_n_9tree_node ((*x).generic.int_cst.common.type); gt_pch_n_10tree_ann_d ((*x).generic.int_cst.common.ann); break; case TS_REAL_CST: gt_pch_n_9tree_node ((*x).generic.real_cst.common.chain); gt_pch_n_9tree_node ((*x).generic.real_cst.common.type); gt_pch_n_10tree_ann_d ((*x).generic.real_cst.common.ann); gt_pch_n_10real_value ((*x).generic.real_cst.real_cst_ptr); break; case TS_VECTOR: gt_pch_n_9tree_node ((*x).generic.vector.common.chain); gt_pch_n_9tree_node ((*x).generic.vector.common.type); gt_pch_n_10tree_ann_d ((*x).generic.vector.common.ann); gt_pch_n_9tree_node ((*x).generic.vector.elements); break; case TS_STRING: gt_pch_n_9tree_node ((*x).generic.string.common.chain); gt_pch_n_9tree_node ((*x).generic.string.common.type); gt_pch_n_10tree_ann_d ((*x).generic.string.common.ann); gt_pch_n_S ((*x).generic.string.pointer); break; case TS_COMPLEX: gt_pch_n_9tree_node ((*x).generic.complex.common.chain); gt_pch_n_9tree_node ((*x).generic.complex.common.type); gt_pch_n_10tree_ann_d ((*x).generic.complex.common.ann); gt_pch_n_9tree_node ((*x).generic.complex.real); gt_pch_n_9tree_node ((*x).generic.complex.imag); break; case TS_IDENTIFIER: gt_pch_n_9tree_node ((*x).generic.identifier.common.chain); gt_pch_n_9tree_node ((*x).generic.identifier.common.type); gt_pch_n_10tree_ann_d ((*x).generic.identifier.common.ann); gt_pch_n_S ((*x).generic.identifier.id.str); break; case TS_DECL: gt_pch_n_9tree_node ((*x).generic.decl.common.chain); gt_pch_n_9tree_node ((*x).generic.decl.common.type); gt_pch_n_10tree_ann_d ((*x).generic.decl.common.ann); gt_pch_n_S ((*x).generic.decl.locus.file); gt_pch_n_9tree_node ((*x).generic.decl.size); gt_pch_n_9tree_node ((*x).generic.decl.size_unit); gt_pch_n_9tree_node ((*x).generic.decl.name); gt_pch_n_9tree_node ((*x).generic.decl.context); gt_pch_n_9tree_node ((*x).generic.decl.arguments); gt_pch_n_9tree_node ((*x).generic.decl.result); gt_pch_n_9tree_node ((*x).generic.decl.initial); gt_pch_n_9tree_node ((*x).generic.decl.abstract_origin); gt_pch_n_9tree_node ((*x).generic.decl.assembler_name); gt_pch_n_9tree_node ((*x).generic.decl.section_name); gt_pch_n_9tree_node ((*x).generic.decl.attributes); gt_pch_n_7rtx_def ((*x).generic.decl.rtl); switch (TREE_CODE((tree) &((*x)))) { case FUNCTION_DECL: gt_pch_n_8function ((*x).generic.decl.u2.f); break; case PARM_DECL: gt_pch_n_7rtx_def ((*x).generic.decl.u2.r); break; case FIELD_DECL: gt_pch_n_9tree_node ((*x).generic.decl.u2.t); break; case VAR_DECL: break; default: break; } gt_pch_n_9tree_node ((*x).generic.decl.saved_tree); gt_pch_n_9tree_node ((*x).generic.decl.inlined_fns); gt_pch_n_9tree_node ((*x).generic.decl.vindex); gt_pch_n_9lang_decl ((*x).generic.decl.lang_specific); break; case TS_TYPE: gt_pch_n_9tree_node ((*x).generic.type.common.chain); gt_pch_n_9tree_node ((*x).generic.type.common.type); gt_pch_n_10tree_ann_d ((*x).generic.type.common.ann); gt_pch_n_9tree_node ((*x).generic.type.values); gt_pch_n_9tree_node ((*x).generic.type.size); gt_pch_n_9tree_node ((*x).generic.type.size_unit); gt_pch_n_9tree_node ((*x).generic.type.attributes); gt_pch_n_9tree_node ((*x).generic.type.pointer_to); gt_pch_n_9tree_node ((*x).generic.type.reference_to); switch (debug_hooks == &sdb_debug_hooks ? 1 : debug_hooks == &dwarf2_debug_hooks ? 2 : 0) { case 0: break; case 1: gt_pch_n_S ((*x).generic.type.symtab.pointer); break; case 2: gt_pch_n_10die_struct ((*x).generic.type.symtab.die); break; default: break; } gt_pch_n_9tree_node ((*x).generic.type.name); gt_pch_n_9tree_node ((*x).generic.type.minval); gt_pch_n_9tree_node ((*x).generic.type.maxval); gt_pch_n_9tree_node ((*x).generic.type.next_variant); gt_pch_n_9tree_node ((*x).generic.type.main_variant); gt_pch_n_9tree_node ((*x).generic.type.binfo); gt_pch_n_9tree_node ((*x).generic.type.context); gt_pch_n_9lang_type ((*x).generic.type.lang_specific); break; case TS_LIST: gt_pch_n_9tree_node ((*x).generic.list.common.chain); gt_pch_n_9tree_node ((*x).generic.list.common.type); gt_pch_n_10tree_ann_d ((*x).generic.list.common.ann); gt_pch_n_9tree_node ((*x).generic.list.purpose); gt_pch_n_9tree_node ((*x).generic.list.value); break; case TS_VEC: gt_pch_n_9tree_node ((*x).generic.vec.common.chain); gt_pch_n_9tree_node ((*x).generic.vec.common.type); gt_pch_n_10tree_ann_d ((*x).generic.vec.common.ann); { size_t i0; for (i0 = 0; i0 < (size_t)(TREE_VEC_LENGTH ((tree)&((*x).generic.vec))); i0++) { gt_pch_n_9tree_node ((*x).generic.vec.a[i0]); } } break; case TS_EXP: gt_pch_n_9tree_node ((*x).generic.exp.common.chain); gt_pch_n_9tree_node ((*x).generic.exp.common.type); gt_pch_n_10tree_ann_d ((*x).generic.exp.common.ann); gt_pch_n_10location_s ((*x).generic.exp.locus); gt_pch_n_9tree_node ((*x).generic.exp.block); switch (TREE_CODE ((tree) &(*x))) { case WITH_CLEANUP_EXPR: gt_pch_n_7rtx_def ((*x).generic.exp.operands[2]); gt_pch_n_9tree_node ((*x).generic.exp.operands[1]); gt_pch_n_9tree_node ((*x).generic.exp.operands[0]); break; case GOTO_SUBROUTINE_EXPR: gt_pch_n_7rtx_def ((*x).generic.exp.operands[1]); gt_pch_n_7rtx_def ((*x).generic.exp.operands[0]); break; default: { size_t i1; for (i1 = 0; i1 < (size_t)(TREE_CODE_LENGTH (TREE_CODE ((tree) &(*x)))); i1++) { gt_pch_n_9tree_node ((*x).generic.exp.operands[i1]); } } break; } break; case TS_SSA_NAME: gt_pch_n_9tree_node ((*x).generic.ssa_name.common.chain); gt_pch_n_9tree_node ((*x).generic.ssa_name.common.type); gt_pch_n_10tree_ann_d ((*x).generic.ssa_name.common.ann); gt_pch_n_9tree_node ((*x).generic.ssa_name.var); gt_pch_n_12ptr_info_def ((*x).generic.ssa_name.ptr_info); break; case TS_PHI_NODE: gt_pch_n_9tree_node ((*x).generic.phi.common.chain); gt_pch_n_9tree_node ((*x).generic.phi.common.type); gt_pch_n_10tree_ann_d ((*x).generic.phi.common.ann); gt_pch_n_9tree_node ((*x).generic.phi.result); { size_t i2; for (i2 = 0; i2 < (size_t)(((tree)&((*x).generic.phi))->phi.capacity); i2++) { gt_pch_n_9tree_node ((*x).generic.phi.a[i2].def); } } break; case TS_BLOCK: gt_pch_n_9tree_node ((*x).generic.block.common.chain); gt_pch_n_9tree_node ((*x).generic.block.common.type); gt_pch_n_10tree_ann_d ((*x).generic.block.common.ann); gt_pch_n_9tree_node ((*x).generic.block.vars); gt_pch_n_9tree_node ((*x).generic.block.subblocks); gt_pch_n_9tree_node ((*x).generic.block.supercontext); gt_pch_n_9tree_node ((*x).generic.block.abstract_origin); gt_pch_n_9tree_node ((*x).generic.block.fragment_origin); gt_pch_n_9tree_node ((*x).generic.block.fragment_chain); break; case TS_BINFO: gt_pch_n_9tree_node ((*x).generic.binfo.common.chain); gt_pch_n_9tree_node ((*x).generic.binfo.common.type); gt_pch_n_10tree_ann_d ((*x).generic.binfo.common.ann); gt_pch_n_9tree_node ((*x).generic.binfo.offset); gt_pch_n_9tree_node ((*x).generic.binfo.vtable); gt_pch_n_9tree_node ((*x).generic.binfo.virtuals); gt_pch_n_9tree_node ((*x).generic.binfo.base_types); gt_pch_n_9tree_node ((*x).generic.binfo.vptr_field); gt_pch_n_9tree_node ((*x).generic.binfo.base_accesses); gt_pch_n_9tree_node ((*x).generic.binfo.inheritance); { size_t i3; for (i3 = 0; i3 < (size_t)(binfo_lang_slots); i3++) { gt_pch_n_9tree_node ((*x).generic.binfo.lang_slots[i3]); } } break; case TS_STATEMENT_LIST: gt_pch_n_9tree_node ((*x).generic.stmt_list.common.chain); gt_pch_n_9tree_node ((*x).generic.stmt_list.common.type); gt_pch_n_10tree_ann_d ((*x).generic.stmt_list.common.ann); gt_pch_n_24tree_statement_list_node ((*x).generic.stmt_list.head); gt_pch_n_24tree_statement_list_node ((*x).generic.stmt_list.tail); break; case TS_VALUE_HANDLE: gt_pch_n_9tree_node ((*x).generic.value_handle.common.chain); gt_pch_n_9tree_node ((*x).generic.value_handle.common.type); gt_pch_n_10tree_ann_d ((*x).generic.value_handle.common.ann); break; default: break; } break; case 1: gt_pch_n_9tree_node ((*x).identifier.common_id.common.chain); gt_pch_n_9tree_node ((*x).identifier.common_id.common.type); gt_pch_n_10tree_ann_d ((*x).identifier.common_id.common.ann); gt_pch_n_S ((*x).identifier.common_id.node.ident.str); switch (CPP_HASHNODE_VALUE_IDX (((*x).identifier.common_id.node))) { case NTV_MACRO: gt_pch_n_9cpp_macro ((*x).identifier.common_id.node.value.macro); break; case NTV_ANSWER: gt_pch_n_6answer ((*x).identifier.common_id.node.value.answers); break; case NTV_BUILTIN: break; case NTV_ARGUMENT: break; default: break; } gt_pch_n_9c_binding ((*x).identifier.symbol_binding); gt_pch_n_9c_binding ((*x).identifier.tag_binding); gt_pch_n_9c_binding ((*x).identifier.label_binding); break; default: break; } x = (TREE_CODE (&(*x).generic) == INTEGER_TYPE ? (union lang_tree_node *)TYPE_NEXT_VARIANT (&(*x).generic) : (union lang_tree_node *)TREE_CHAIN (&(*x).generic)); } } void gt_pch_nx_lang_decl (void *x_p) { struct lang_decl * const x = (struct lang_decl *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_9lang_decl)) { gt_pch_n_9tree_node ((*x).pending_sizes); } } void gt_pch_nx_lang_type (void *x_p) { struct lang_type * const x = (struct lang_type *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_9lang_type)) { gt_pch_n_18sorted_fields_type ((*x).s); gt_pch_note_reorder ((*x).s, (*x).s, resort_sorted_fields); gt_pch_n_9tree_node ((*x).enum_min); gt_pch_n_9tree_node ((*x).enum_max); } } void gt_pch_nx_language_function (void *x_p) { struct language_function * const x = (struct language_function *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_17language_function)) { gt_pch_n_9tree_node ((*x).base.x_stmt_tree.x_cur_stmt_list); gt_pch_n_9tree_node ((*x).x_break_label); gt_pch_n_9tree_node ((*x).x_cont_label); } } void gt_pch_p_7c_scope (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct c_scope * const x ATTRIBUTE_UNUSED = (struct c_scope *)x_p; if ((void *)(x) == this_obj) op (&((*x).outer), cookie); if ((void *)(x) == this_obj) op (&((*x).outer_function), cookie); if ((void *)(x) == this_obj) op (&((*x).bindings), cookie); if ((void *)(x) == this_obj) op (&((*x).blocks), cookie); if ((void *)(x) == this_obj) op (&((*x).blocks_last), cookie); } void gt_pch_p_9c_binding (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct c_binding * const x ATTRIBUTE_UNUSED = (struct c_binding *)x_p; if ((void *)(x) == this_obj) op (&((*x).decl), cookie); if ((void *)(x) == this_obj) op (&((*x).id), cookie); if ((void *)(x) == this_obj) op (&((*x).prev), cookie); if ((void *)(x) == this_obj) op (&((*x).shadowed), cookie); } void gt_pch_p_14lang_tree_node (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { union lang_tree_node * const x ATTRIBUTE_UNUSED = (union lang_tree_node *)x_p; switch (TREE_CODE (&((*x)).generic) == IDENTIFIER_NODE) { case 0: switch (tree_node_structure (&((*x).generic))) { case TS_COMMON: if ((void *)(x) == this_obj) op (&((*x).generic.common.chain), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.common.type), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.common.ann), cookie); break; case TS_INT_CST: if ((void *)(x) == this_obj) op (&((*x).generic.int_cst.common.chain), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.int_cst.common.type), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.int_cst.common.ann), cookie); break; case TS_REAL_CST: if ((void *)(x) == this_obj) op (&((*x).generic.real_cst.common.chain), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.real_cst.common.type), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.real_cst.common.ann), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.real_cst.real_cst_ptr), cookie); break; case TS_VECTOR: if ((void *)(x) == this_obj) op (&((*x).generic.vector.common.chain), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.vector.common.type), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.vector.common.ann), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.vector.elements), cookie); break; case TS_STRING: if ((void *)(x) == this_obj) op (&((*x).generic.string.common.chain), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.string.common.type), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.string.common.ann), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.string.pointer), cookie); break; case TS_COMPLEX: if ((void *)(x) == this_obj) op (&((*x).generic.complex.common.chain), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.complex.common.type), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.complex.common.ann), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.complex.real), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.complex.imag), cookie); break; case TS_IDENTIFIER: if ((void *)(x) == this_obj) op (&((*x).generic.identifier.common.chain), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.identifier.common.type), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.identifier.common.ann), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.identifier.id.str), cookie); break; case TS_DECL: if ((void *)(x) == this_obj) op (&((*x).generic.decl.common.chain), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.decl.common.type), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.decl.common.ann), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.decl.locus.file), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.decl.size), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.decl.size_unit), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.decl.name), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.decl.context), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.decl.arguments), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.decl.result), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.decl.initial), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.decl.abstract_origin), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.decl.assembler_name), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.decl.section_name), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.decl.attributes), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.decl.rtl), cookie); switch (TREE_CODE((tree) &((*x)))) { case FUNCTION_DECL: if ((void *)(x) == this_obj) op (&((*x).generic.decl.u2.f), cookie); break; case PARM_DECL: if ((void *)(x) == this_obj) op (&((*x).generic.decl.u2.r), cookie); break; case FIELD_DECL: if ((void *)(x) == this_obj) op (&((*x).generic.decl.u2.t), cookie); break; case VAR_DECL: break; default: break; } if ((void *)(x) == this_obj) op (&((*x).generic.decl.saved_tree), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.decl.inlined_fns), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.decl.vindex), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.decl.lang_specific), cookie); break; case TS_TYPE: if ((void *)(x) == this_obj) op (&((*x).generic.type.common.chain), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.type.common.type), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.type.common.ann), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.type.values), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.type.size), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.type.size_unit), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.type.attributes), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.type.pointer_to), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.type.reference_to), cookie); switch (debug_hooks == &sdb_debug_hooks ? 1 : debug_hooks == &dwarf2_debug_hooks ? 2 : 0) { case 0: break; case 1: if ((void *)(x) == this_obj) op (&((*x).generic.type.symtab.pointer), cookie); break; case 2: if ((void *)(x) == this_obj) op (&((*x).generic.type.symtab.die), cookie); break; default: break; } if ((void *)(x) == this_obj) op (&((*x).generic.type.name), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.type.minval), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.type.maxval), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.type.next_variant), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.type.main_variant), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.type.binfo), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.type.context), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.type.lang_specific), cookie); break; case TS_LIST: if ((void *)(x) == this_obj) op (&((*x).generic.list.common.chain), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.list.common.type), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.list.common.ann), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.list.purpose), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.list.value), cookie); break; case TS_VEC: if ((void *)(x) == this_obj) op (&((*x).generic.vec.common.chain), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.vec.common.type), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.vec.common.ann), cookie); { size_t i0; for (i0 = 0; i0 < (size_t)(TREE_VEC_LENGTH ((tree)&((*x).generic.vec))); i0++) { if ((void *)(x) == this_obj) op (&((*x).generic.vec.a[i0]), cookie); } } break; case TS_EXP: if ((void *)(x) == this_obj) op (&((*x).generic.exp.common.chain), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.exp.common.type), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.exp.common.ann), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.exp.locus), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.exp.block), cookie); switch (TREE_CODE ((tree) &(*x))) { case WITH_CLEANUP_EXPR: if ((void *)(x) == this_obj) op (&((*x).generic.exp.operands[2]), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.exp.operands[1]), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.exp.operands[0]), cookie); break; case GOTO_SUBROUTINE_EXPR: if ((void *)(x) == this_obj) op (&((*x).generic.exp.operands[1]), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.exp.operands[0]), cookie); break; default: { size_t i1; for (i1 = 0; i1 < (size_t)(TREE_CODE_LENGTH (TREE_CODE ((tree) &(*x)))); i1++) { if ((void *)(x) == this_obj) op (&((*x).generic.exp.operands[i1]), cookie); } } break; } break; case TS_SSA_NAME: if ((void *)(x) == this_obj) op (&((*x).generic.ssa_name.common.chain), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.ssa_name.common.type), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.ssa_name.common.ann), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.ssa_name.var), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.ssa_name.ptr_info), cookie); break; case TS_PHI_NODE: if ((void *)(x) == this_obj) op (&((*x).generic.phi.common.chain), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.phi.common.type), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.phi.common.ann), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.phi.result), cookie); { size_t i2; for (i2 = 0; i2 < (size_t)(((tree)&((*x).generic.phi))->phi.capacity); i2++) { if ((void *)(x) == this_obj) op (&((*x).generic.phi.a[i2].def), cookie); } } break; case TS_BLOCK: if ((void *)(x) == this_obj) op (&((*x).generic.block.common.chain), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.block.common.type), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.block.common.ann), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.block.vars), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.block.subblocks), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.block.supercontext), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.block.abstract_origin), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.block.fragment_origin), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.block.fragment_chain), cookie); break; case TS_BINFO: if ((void *)(x) == this_obj) op (&((*x).generic.binfo.common.chain), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.binfo.common.type), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.binfo.common.ann), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.binfo.offset), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.binfo.vtable), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.binfo.virtuals), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.binfo.base_types), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.binfo.vptr_field), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.binfo.base_accesses), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.binfo.inheritance), cookie); { size_t i3; for (i3 = 0; i3 < (size_t)(binfo_lang_slots); i3++) { if ((void *)(x) == this_obj) op (&((*x).generic.binfo.lang_slots[i3]), cookie); } } break; case TS_STATEMENT_LIST: if ((void *)(x) == this_obj) op (&((*x).generic.stmt_list.common.chain), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.stmt_list.common.type), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.stmt_list.common.ann), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.stmt_list.head), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.stmt_list.tail), cookie); break; case TS_VALUE_HANDLE: if ((void *)(x) == this_obj) op (&((*x).generic.value_handle.common.chain), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.value_handle.common.type), cookie); if ((void *)(x) == this_obj) op (&((*x).generic.value_handle.common.ann), cookie); break; default: break; } break; case 1: if ((void *)(x) == this_obj) op (&((*x).identifier.common_id.common.chain), cookie); if ((void *)(x) == this_obj) op (&((*x).identifier.common_id.common.type), cookie); if ((void *)(x) == this_obj) op (&((*x).identifier.common_id.common.ann), cookie); if ((void *)(x) == this_obj) op (&((*x).identifier.common_id.node.ident.str), cookie); switch (CPP_HASHNODE_VALUE_IDX (((*x).identifier.common_id.node))) { case NTV_MACRO: if ((void *)(x) == this_obj) op (&((*x).identifier.common_id.node.value.macro), cookie); break; case NTV_ANSWER: if ((void *)(x) == this_obj) op (&((*x).identifier.common_id.node.value.answers), cookie); break; case NTV_BUILTIN: break; case NTV_ARGUMENT: break; default: break; } if ((void *)(x) == this_obj) op (&((*x).identifier.symbol_binding), cookie); if ((void *)(x) == this_obj) op (&((*x).identifier.tag_binding), cookie); if ((void *)(x) == this_obj) op (&((*x).identifier.label_binding), cookie); break; default: break; } } void gt_pch_p_9lang_decl (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct lang_decl * const x ATTRIBUTE_UNUSED = (struct lang_decl *)x_p; if ((void *)(x) == this_obj) op (&((*x).pending_sizes), cookie); } void gt_pch_p_9lang_type (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct lang_type * const x ATTRIBUTE_UNUSED = (struct lang_type *)x_p; if ((void *)(x) == this_obj) op (&((*x).s), cookie); if ((void *)(x) == this_obj) op (&((*x).enum_min), cookie); if ((void *)(x) == this_obj) op (&((*x).enum_max), cookie); } void gt_pch_p_17language_function (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct language_function * const x ATTRIBUTE_UNUSED = (struct language_function *)x_p; if ((void *)(x) == this_obj) op (&((*x).base.x_stmt_tree.x_cur_stmt_list), cookie); if ((void *)(x) == this_obj) op (&((*x).x_break_label), cookie); if ((void *)(x) == this_obj) op (&((*x).x_cont_label), cookie); } /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_c_decl_h[] = { { &external_scope, 1, sizeof (external_scope), >_ggc_mx_c_scope, >_pch_nx_c_scope }, { &file_scope, 1, sizeof (file_scope), >_ggc_mx_c_scope, >_pch_nx_c_scope }, { ¤t_function_scope, 1, sizeof (current_function_scope), >_ggc_mx_c_scope, >_pch_nx_c_scope }, { ¤t_scope, 1, sizeof (current_scope), >_ggc_mx_c_scope, >_pch_nx_c_scope }, { &visible_builtins, 1, sizeof (visible_builtins), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &all_translation_units, 1, sizeof (all_translation_units), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &c_stmt_tree.x_cur_stmt_list, 1, sizeof (c_stmt_tree), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &static_dtors, 1, sizeof (static_dtors), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &static_ctors, 1, sizeof (static_ctors), >_ggc_mx_tree_node, >_pch_nx_tree_node }, LAST_GGC_ROOT_TAB }; const struct ggc_root_tab gt_ggc_rd_gt_c_decl_h[] = { { &binding_freelist, 1, sizeof (binding_freelist), NULL, NULL }, { &scope_freelist, 1, sizeof (scope_freelist), NULL, NULL }, LAST_GGC_ROOT_TAB }; const struct ggc_root_tab gt_pch_rs_gt_c_decl_h[] = { { &compound_literal_number, 1, sizeof (compound_literal_number), NULL, NULL }, { &c_stmt_tree, 1, sizeof (c_stmt_tree), NULL, NULL }, LAST_GGC_ROOT_TAB }; /* Build expressions with type checking for C compiler. Copyright (C) 1987, 1988, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is part of the C front end. It contains routines to build C expressions given their operands, including computing the types of the result, C-specific error checks, and some optimization. */ /* Nonzero if we've already printed a "missing braces around initializer" message within this initializer. */ static int missing_braces_mentioned; static int require_constant_value; static int require_constant_elements; static tree qualify_type (tree, tree); static int tagged_types_tu_compatible_p (tree, tree); static int comp_target_types (tree, tree, int); static int function_types_compatible_p (tree, tree); static int type_lists_compatible_p (tree, tree); static tree decl_constant_value_for_broken_optimization (tree); static tree default_function_array_conversion (tree); static tree lookup_field (tree, tree); static tree convert_arguments (tree, tree, tree, tree); static tree pointer_diff (tree, tree); static tree internal_build_compound_expr (tree, int); static tree convert_for_assignment (tree, tree, const char *, tree, tree, int); static void warn_for_assignment (const char *, const char *, tree, int); static tree valid_compound_expr_initializer (tree, tree); static void push_string (const char *); static void push_member_name (tree); static void push_array_bounds (int); static int spelling_length (void); static char *print_spelling (char *); static void warning_init (const char *); static tree digest_init (tree, tree, int); static void output_init_element (tree, tree, tree, int); static void output_pending_init_elements (int); static int set_designator (int); static void push_range_stack (tree); static void add_pending_init (tree, tree); static void set_nonincremental_init (void); static void set_nonincremental_init_from_string (tree); static tree find_init_member (tree); static int lvalue_or_else (tree, const char *); /* Do `exp = require_complete_type (exp);' to make sure exp does not have an incomplete type. (That includes void types.) */ tree require_complete_type (tree value) { tree type = TREE_TYPE (value); if (value == error_mark_node || type == error_mark_node) return error_mark_node; /* First, detect a valid value with a complete type. */ if (COMPLETE_TYPE_P (type)) return value; c_incomplete_type_error (value, type); return error_mark_node; } /* Print an error message for invalid use of an incomplete type. VALUE is the expression that was used (or 0 if that isn't known) and TYPE is the type that was invalid. */ void c_incomplete_type_error (tree value, tree type) { const char *type_code_string; /* Avoid duplicate error message. */ if (TREE_CODE (type) == ERROR_MARK) return; if (value != 0 && (TREE_CODE (value) == VAR_DECL || TREE_CODE (value) == PARM_DECL)) error ("`%s' has an incomplete type", IDENTIFIER_POINTER (DECL_NAME (value))); else { retry: /* We must print an error message. Be clever about what it says. */ switch (TREE_CODE (type)) { case RECORD_TYPE: type_code_string = "struct"; break; case UNION_TYPE: type_code_string = "union"; break; case ENUMERAL_TYPE: type_code_string = "enum"; break; case VOID_TYPE: error ("invalid use of void expression"); return; case ARRAY_TYPE: if (TYPE_DOMAIN (type)) { if (TYPE_MAX_VALUE (TYPE_DOMAIN (type)) == NULL) { error ("invalid use of flexible array member"); return; } type = TREE_TYPE (type); goto retry; } error ("invalid use of array with unspecified bounds"); return; default: abort (); } if (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE) error ("invalid use of undefined type `%s %s'", type_code_string, IDENTIFIER_POINTER (TYPE_NAME (type))); else /* If this type has a typedef-name, the TYPE_NAME is a TYPE_DECL. */ error ("invalid use of incomplete typedef `%s'", IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type)))); } } /* Given a type, apply default promotions wrt unnamed function arguments and return the new type. */ tree c_type_promotes_to (tree type) { if (TYPE_MAIN_VARIANT (type) == float_type_node) return double_type_node; if (c_promoting_integer_type_p (type)) { /* Preserve unsignedness if not really getting any wider. */ if (TYPE_UNSIGNED (type) && (TYPE_PRECISION (type) == TYPE_PRECISION (integer_type_node))) return unsigned_type_node; return integer_type_node; } return type; } /* Return a variant of TYPE which has all the type qualifiers of LIKE as well as those of TYPE. */ static tree qualify_type (tree type, tree like) { return c_build_qualified_type (type, TYPE_QUALS (type) | TYPE_QUALS (like)); } /* Return the composite type of two compatible types. We assume that comptypes has already been done and returned nonzero; if that isn't so, this may crash. In particular, we assume that qualifiers match. */ tree composite_type (tree t1, tree t2) { enum tree_code code1; enum tree_code code2; tree attributes; /* Save time if the two types are the same. */ if (t1 == t2) return t1; /* If one type is nonsense, use the other. */ if (t1 == error_mark_node) return t2; if (t2 == error_mark_node) return t1; code1 = TREE_CODE (t1); code2 = TREE_CODE (t2); /* Merge the attributes. */ attributes = targetm.merge_type_attributes (t1, t2); /* If one is an enumerated type and the other is the compatible integer type, the composite type might be either of the two (DR#013 question 3). For consistency, use the enumerated type as the composite type. */ if (code1 == ENUMERAL_TYPE && code2 == INTEGER_TYPE) return t1; if (code2 == ENUMERAL_TYPE && code1 == INTEGER_TYPE) return t2; if (code1 != code2) abort (); switch (code1) { case POINTER_TYPE: /* For two pointers, do this recursively on the target type. */ { tree pointed_to_1 = TREE_TYPE (t1); tree pointed_to_2 = TREE_TYPE (t2); tree target = composite_type (pointed_to_1, pointed_to_2); t1 = build_pointer_type (target); t1 = build_type_attribute_variant (t1, attributes); return qualify_type (t1, t2); } case ARRAY_TYPE: { tree elt = composite_type (TREE_TYPE (t1), TREE_TYPE (t2)); /* We should not have any type quals on arrays at all. */ if (TYPE_QUALS (t1) || TYPE_QUALS (t2)) abort (); /* Save space: see if the result is identical to one of the args. */ if (elt == TREE_TYPE (t1) && TYPE_DOMAIN (t1)) return build_type_attribute_variant (t1, attributes); if (elt == TREE_TYPE (t2) && TYPE_DOMAIN (t2)) return build_type_attribute_variant (t2, attributes); if (elt == TREE_TYPE (t1) && !TYPE_DOMAIN (t2) && !TYPE_DOMAIN (t1)) return build_type_attribute_variant (t1, attributes); if (elt == TREE_TYPE (t2) && !TYPE_DOMAIN (t2) && !TYPE_DOMAIN (t1)) return build_type_attribute_variant (t2, attributes); /* Merge the element types, and have a size if either arg has one. */ t1 = build_array_type (elt, TYPE_DOMAIN (TYPE_DOMAIN (t1) ? t1 : t2)); return build_type_attribute_variant (t1, attributes); } case FUNCTION_TYPE: /* Function types: prefer the one that specified arg types. If both do, merge the arg types. Also merge the return types. */ { tree valtype = composite_type (TREE_TYPE (t1), TREE_TYPE (t2)); tree p1 = TYPE_ARG_TYPES (t1); tree p2 = TYPE_ARG_TYPES (t2); int len; tree newargs, n; int i; /* Save space: see if the result is identical to one of the args. */ if (valtype == TREE_TYPE (t1) && ! TYPE_ARG_TYPES (t2)) return build_type_attribute_variant (t1, attributes); if (valtype == TREE_TYPE (t2) && ! TYPE_ARG_TYPES (t1)) return build_type_attribute_variant (t2, attributes); /* Simple way if one arg fails to specify argument types. */ if (TYPE_ARG_TYPES (t1) == 0) { t1 = build_function_type (valtype, TYPE_ARG_TYPES (t2)); t1 = build_type_attribute_variant (t1, attributes); return qualify_type (t1, t2); } if (TYPE_ARG_TYPES (t2) == 0) { t1 = build_function_type (valtype, TYPE_ARG_TYPES (t1)); t1 = build_type_attribute_variant (t1, attributes); return qualify_type (t1, t2); } /* If both args specify argument types, we must merge the two lists, argument by argument. */ /* Tell global_bindings_p to return false so that variable_size doesn't abort on VLAs in parameter types. */ c_override_global_bindings_to_false = true; len = list_length (p1); newargs = 0; for (i = 0; i < len; i++) newargs = tree_cons (NULL_TREE, NULL_TREE, newargs); n = newargs; for (; p1; p1 = TREE_CHAIN (p1), p2 = TREE_CHAIN (p2), n = TREE_CHAIN (n)) { /* A null type means arg type is not specified. Take whatever the other function type has. */ if (TREE_VALUE (p1) == 0) { TREE_VALUE (n) = TREE_VALUE (p2); goto parm_done; } if (TREE_VALUE (p2) == 0) { TREE_VALUE (n) = TREE_VALUE (p1); goto parm_done; } /* Given wait (union {union wait *u; int *i} *) and wait (union wait *), prefer union wait * as type of parm. */ if (TREE_CODE (TREE_VALUE (p1)) == UNION_TYPE && TREE_VALUE (p1) != TREE_VALUE (p2)) { tree memb; for (memb = TYPE_FIELDS (TREE_VALUE (p1)); memb; memb = TREE_CHAIN (memb)) if (comptypes (TREE_TYPE (memb), TREE_VALUE (p2))) { TREE_VALUE (n) = TREE_VALUE (p2); if (pedantic) pedwarn ("function types not truly compatible in ISO C"); goto parm_done; } } if (TREE_CODE (TREE_VALUE (p2)) == UNION_TYPE && TREE_VALUE (p2) != TREE_VALUE (p1)) { tree memb; for (memb = TYPE_FIELDS (TREE_VALUE (p2)); memb; memb = TREE_CHAIN (memb)) if (comptypes (TREE_TYPE (memb), TREE_VALUE (p1))) { TREE_VALUE (n) = TREE_VALUE (p1); if (pedantic) pedwarn ("function types not truly compatible in ISO C"); goto parm_done; } } TREE_VALUE (n) = composite_type (TREE_VALUE (p1), TREE_VALUE (p2)); parm_done: ; } c_override_global_bindings_to_false = false; t1 = build_function_type (valtype, newargs); t1 = qualify_type (t1, t2); /* ... falls through ... */ } default: return build_type_attribute_variant (t1, attributes); } } /* Return the type of a conditional expression between pointers to possibly differently qualified versions of compatible types. We assume that comp_target_types has already been done and returned nonzero; if that isn't so, this may crash. */ static tree common_pointer_type (tree t1, tree t2) { tree attributes; tree pointed_to_1; tree pointed_to_2; tree target; /* Save time if the two types are the same. */ if (t1 == t2) return t1; /* If one type is nonsense, use the other. */ if (t1 == error_mark_node) return t2; if (t2 == error_mark_node) return t1; if (TREE_CODE (t1) != POINTER_TYPE || TREE_CODE (t2) != POINTER_TYPE) abort (); /* Merge the attributes. */ attributes = targetm.merge_type_attributes (t1, t2); /* Find the composite type of the target types, and combine the qualifiers of the two types' targets. */ pointed_to_1 = TREE_TYPE (t1); pointed_to_2 = TREE_TYPE (t2); target = composite_type (TYPE_MAIN_VARIANT (pointed_to_1), TYPE_MAIN_VARIANT (pointed_to_2)); t1 = build_pointer_type (c_build_qualified_type (target, TYPE_QUALS (pointed_to_1) | TYPE_QUALS (pointed_to_2))); return build_type_attribute_variant (t1, attributes); } /* Return the common type for two arithmetic types under the usual arithmetic conversions. The default conversions have already been applied, and enumerated types converted to their compatible integer types. The resulting type is unqualified and has no attributes. This is the type for the result of most arithmetic operations if the operands have the given two types. */ tree common_type (tree t1, tree t2) { enum tree_code code1; enum tree_code code2; /* If one type is nonsense, use the other. */ if (t1 == error_mark_node) return t2; if (t2 == error_mark_node) return t1; if (TYPE_QUALS (t1) != TYPE_UNQUALIFIED) t1 = TYPE_MAIN_VARIANT (t1); if (TYPE_QUALS (t2) != TYPE_UNQUALIFIED) t2 = TYPE_MAIN_VARIANT (t2); if (TYPE_ATTRIBUTES (t1) != NULL_TREE) t1 = build_type_attribute_variant (t1, NULL_TREE); if (TYPE_ATTRIBUTES (t2) != NULL_TREE) t2 = build_type_attribute_variant (t2, NULL_TREE); /* Save time if the two types are the same. */ if (t1 == t2) return t1; code1 = TREE_CODE (t1); code2 = TREE_CODE (t2); if (code1 != VECTOR_TYPE && code1 != COMPLEX_TYPE && code1 != REAL_TYPE && code1 != INTEGER_TYPE) abort (); if (code2 != VECTOR_TYPE && code2 != COMPLEX_TYPE && code2 != REAL_TYPE && code2 != INTEGER_TYPE) abort (); /* If one type is a vector type, return that type. (How the usual arithmetic conversions apply to the vector types extension is not precisely specified.) */ if (code1 == VECTOR_TYPE) return t1; if (code2 == VECTOR_TYPE) return t2; /* If one type is complex, form the common type of the non-complex components, then make that complex. Use T1 or T2 if it is the required type. */ if (code1 == COMPLEX_TYPE || code2 == COMPLEX_TYPE) { tree subtype1 = code1 == COMPLEX_TYPE ? TREE_TYPE (t1) : t1; tree subtype2 = code2 == COMPLEX_TYPE ? TREE_TYPE (t2) : t2; tree subtype = common_type (subtype1, subtype2); if (code1 == COMPLEX_TYPE && TREE_TYPE (t1) == subtype) return t1; else if (code2 == COMPLEX_TYPE && TREE_TYPE (t2) == subtype) return t2; else return build_complex_type (subtype); } /* If only one is real, use it as the result. */ if (code1 == REAL_TYPE && code2 != REAL_TYPE) return t1; if (code2 == REAL_TYPE && code1 != REAL_TYPE) return t2; /* Both real or both integers; use the one with greater precision. */ if (TYPE_PRECISION (t1) > TYPE_PRECISION (t2)) return t1; else if (TYPE_PRECISION (t2) > TYPE_PRECISION (t1)) return t2; /* Same precision. Prefer long longs to longs to ints when the same precision, following the C99 rules on integer type rank (which are equivalent to the C90 rules for C90 types). */ if (TYPE_MAIN_VARIANT (t1) == long_long_unsigned_type_node || TYPE_MAIN_VARIANT (t2) == long_long_unsigned_type_node) return long_long_unsigned_type_node; if (TYPE_MAIN_VARIANT (t1) == long_long_integer_type_node || TYPE_MAIN_VARIANT (t2) == long_long_integer_type_node) { if (TYPE_UNSIGNED (t1) || TYPE_UNSIGNED (t2)) return long_long_unsigned_type_node; else return long_long_integer_type_node; } if (TYPE_MAIN_VARIANT (t1) == long_unsigned_type_node || TYPE_MAIN_VARIANT (t2) == long_unsigned_type_node) return long_unsigned_type_node; if (TYPE_MAIN_VARIANT (t1) == long_integer_type_node || TYPE_MAIN_VARIANT (t2) == long_integer_type_node) { /* But preserve unsignedness from the other type, since long cannot hold all the values of an unsigned int. */ if (TYPE_UNSIGNED (t1) || TYPE_UNSIGNED (t2)) return long_unsigned_type_node; else return long_integer_type_node; } /* Likewise, prefer long double to double even if same size. */ if (TYPE_MAIN_VARIANT (t1) == long_double_type_node || TYPE_MAIN_VARIANT (t2) == long_double_type_node) return long_double_type_node; /* Otherwise prefer the unsigned one. */ if (TYPE_UNSIGNED (t1)) return t1; else return t2; } /* Return 1 if TYPE1 and TYPE2 are compatible types for assignment or various other operations. Return 2 if they are compatible but a warning may be needed if you use them together. */ int comptypes (tree type1, tree type2) { tree t1 = type1; tree t2 = type2; int attrval, val; /* Suppress errors caused by previously reported errors. */ if (t1 == t2 || !t1 || !t2 || TREE_CODE (t1) == ERROR_MARK || TREE_CODE (t2) == ERROR_MARK) return 1; /* If either type is the internal version of sizetype, return the language version. */ if (TREE_CODE (t1) == INTEGER_TYPE && TYPE_IS_SIZETYPE (t1) && TYPE_ORIG_SIZE_TYPE (t1)) t1 = TYPE_ORIG_SIZE_TYPE (t1); if (TREE_CODE (t2) == INTEGER_TYPE && TYPE_IS_SIZETYPE (t2) && TYPE_ORIG_SIZE_TYPE (t2)) t2 = TYPE_ORIG_SIZE_TYPE (t2); /* Enumerated types are compatible with integer types, but this is not transitive: two enumerated types in the same translation unit are compatible with each other only if they are the same type. */ if (TREE_CODE (t1) == ENUMERAL_TYPE && TREE_CODE (t2) != ENUMERAL_TYPE) t1 = c_common_type_for_size (TYPE_PRECISION (t1), TYPE_UNSIGNED (t1)); else if (TREE_CODE (t2) == ENUMERAL_TYPE && TREE_CODE (t1) != ENUMERAL_TYPE) t2 = c_common_type_for_size (TYPE_PRECISION (t2), TYPE_UNSIGNED (t2)); if (t1 == t2) return 1; /* Different classes of types can't be compatible. */ if (TREE_CODE (t1) != TREE_CODE (t2)) return 0; /* Qualifiers must match. C99 6.7.3p9 */ if (TYPE_QUALS (t1) != TYPE_QUALS (t2)) return 0; /* Allow for two different type nodes which have essentially the same definition. Note that we already checked for equality of the type qualifiers (just above). */ if (TYPE_MAIN_VARIANT (t1) == TYPE_MAIN_VARIANT (t2)) return 1; /* 1 if no need for warning yet, 2 if warning cause has been seen. */ if (! (attrval = targetm.comp_type_attributes (t1, t2))) return 0; /* 1 if no need for warning yet, 2 if warning cause has been seen. */ val = 0; switch (TREE_CODE (t1)) { case POINTER_TYPE: /* We must give ObjC the first crack at comparing pointers, since protocol qualifiers may be involved. */ if (c_dialect_objc () && (val = objc_comptypes (t1, t2, 0)) >= 0) break; val = (TREE_TYPE (t1) == TREE_TYPE (t2) ? 1 : comptypes (TREE_TYPE (t1), TREE_TYPE (t2))); break; case FUNCTION_TYPE: val = function_types_compatible_p (t1, t2); break; case ARRAY_TYPE: { tree d1 = TYPE_DOMAIN (t1); tree d2 = TYPE_DOMAIN (t2); bool d1_variable, d2_variable; bool d1_zero, d2_zero; val = 1; /* Target types must match incl. qualifiers. */ if (TREE_TYPE (t1) != TREE_TYPE (t2) && 0 == (val = comptypes (TREE_TYPE (t1), TREE_TYPE (t2)))) return 0; /* Sizes must match unless one is missing or variable. */ if (d1 == 0 || d2 == 0 || d1 == d2) break; d1_zero = ! TYPE_MAX_VALUE (d1); d2_zero = ! TYPE_MAX_VALUE (d2); d1_variable = (! d1_zero && (TREE_CODE (TYPE_MIN_VALUE (d1)) != INTEGER_CST || TREE_CODE (TYPE_MAX_VALUE (d1)) != INTEGER_CST)); d2_variable = (! d2_zero && (TREE_CODE (TYPE_MIN_VALUE (d2)) != INTEGER_CST || TREE_CODE (TYPE_MAX_VALUE (d2)) != INTEGER_CST)); if (d1_variable || d2_variable) break; if (d1_zero && d2_zero) break; if (d1_zero || d2_zero || ! tree_int_cst_equal (TYPE_MIN_VALUE (d1), TYPE_MIN_VALUE (d2)) || ! tree_int_cst_equal (TYPE_MAX_VALUE (d1), TYPE_MAX_VALUE (d2))) val = 0; break; } case RECORD_TYPE: /* We are dealing with two distinct structs. In assorted Objective-C corner cases, however, these can still be deemed equivalent. */ if (c_dialect_objc () && objc_comptypes (t1, t2, 0) == 1) val = 1; case ENUMERAL_TYPE: case UNION_TYPE: if (val != 1 && !same_translation_unit_p (t1, t2)) val = tagged_types_tu_compatible_p (t1, t2); break; case VECTOR_TYPE: val = TYPE_VECTOR_SUBPARTS (t1) == TYPE_VECTOR_SUBPARTS (t2) && comptypes (TREE_TYPE (t1), TREE_TYPE (t2)); break; default: break; } return attrval == 2 && val == 1 ? 2 : val; } /* Return 1 if TTL and TTR are pointers to types that are equivalent, ignoring their qualifiers. REFLEXIVE is only used by ObjC - set it to 1 or 0 depending if the check of the pointer types is meant to be reflexive or not (typically, assignments are not reflexive, while comparisons are reflexive). */ static int comp_target_types (tree ttl, tree ttr, int reflexive) { int val; /* Give objc_comptypes a crack at letting these types through. */ if ((val = objc_comptypes (ttl, ttr, reflexive)) >= 0) return val; val = comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (ttl)), TYPE_MAIN_VARIANT (TREE_TYPE (ttr))); if (val == 2 && pedantic) pedwarn ("types are not quite compatible"); return val; } /* Subroutines of `comptypes'. */ /* Determine whether two trees derive from the same translation unit. If the CONTEXT chain ends in a null, that tree's context is still being parsed, so if two trees have context chains ending in null, they're in the same translation unit. */ int same_translation_unit_p (tree t1, tree t2) { while (t1 && TREE_CODE (t1) != TRANSLATION_UNIT_DECL) switch (TREE_CODE_CLASS (TREE_CODE (t1))) { case 'd': t1 = DECL_CONTEXT (t1); break; case 't': t1 = TYPE_CONTEXT (t1); break; case 'x': t1 = BLOCK_SUPERCONTEXT (t1); break; /* assume block */ default: abort (); } while (t2 && TREE_CODE (t2) != TRANSLATION_UNIT_DECL) switch (TREE_CODE_CLASS (TREE_CODE (t2))) { case 'd': t2 = DECL_CONTEXT (t2); break; case 't': t2 = TYPE_CONTEXT (t2); break; case 'x': t2 = BLOCK_SUPERCONTEXT (t2); break; /* assume block */ default: abort (); } return t1 == t2; } /* The C standard says that two structures in different translation units are compatible with each other only if the types of their fields are compatible (among other things). So, consider two copies of this structure: */ struct tagged_tu_seen { const struct tagged_tu_seen * next; tree t1; tree t2; }; /* Can they be compatible with each other? We choose to break the recursion by allowing those types to be compatible. */ static const struct tagged_tu_seen * tagged_tu_seen_base; /* Return 1 if two 'struct', 'union', or 'enum' types T1 and T2 are compatible. If the two types are not the same (which has been checked earlier), this can only happen when multiple translation units are being compiled. See C99 6.2.7 paragraph 1 for the exact rules. */ static int tagged_types_tu_compatible_p (tree t1, tree t2) { tree s1, s2; bool needs_warning = false; /* We have to verify that the tags of the types are the same. This is harder than it looks because this may be a typedef, so we have to go look at the original type. It may even be a typedef of a typedef... In the case of compiler-created builtin structs the TYPE_DECL may be a dummy, with no DECL_ORIGINAL_TYPE. Don't fault. */ while (TYPE_NAME (t1) && TREE_CODE (TYPE_NAME (t1)) == TYPE_DECL && DECL_ORIGINAL_TYPE (TYPE_NAME (t1))) t1 = DECL_ORIGINAL_TYPE (TYPE_NAME (t1)); while (TYPE_NAME (t2) && TREE_CODE (TYPE_NAME (t2)) == TYPE_DECL && DECL_ORIGINAL_TYPE (TYPE_NAME (t2))) t2 = DECL_ORIGINAL_TYPE (TYPE_NAME (t2)); /* C90 didn't have the requirement that the two tags be the same. */ if (flag_isoc99 && TYPE_NAME (t1) != TYPE_NAME (t2)) return 0; /* C90 didn't say what happened if one or both of the types were incomplete; we choose to follow C99 rules here, which is that they are compatible. */ if (TYPE_SIZE (t1) == NULL || TYPE_SIZE (t2) == NULL) return 1; { const struct tagged_tu_seen * tts_i; for (tts_i = tagged_tu_seen_base; tts_i != NULL; tts_i = tts_i->next) if (tts_i->t1 == t1 && tts_i->t2 == t2) return 1; } switch (TREE_CODE (t1)) { case ENUMERAL_TYPE: { /* Speed up the case where the type values are in the same order. */ tree tv1 = TYPE_VALUES (t1); tree tv2 = TYPE_VALUES (t2); if (tv1 == tv2) return 1; for (;tv1 && tv2; tv1 = TREE_CHAIN (tv1), tv2 = TREE_CHAIN (tv2)) { if (TREE_PURPOSE (tv1) != TREE_PURPOSE (tv2)) break; if (simple_cst_equal (TREE_VALUE (tv1), TREE_VALUE (tv2)) != 1) return 0; } if (tv1 == NULL_TREE && tv2 == NULL_TREE) return 1; if (tv1 == NULL_TREE || tv2 == NULL_TREE) return 0; if (list_length (TYPE_VALUES (t1)) != list_length (TYPE_VALUES (t2))) return 0; for (s1 = TYPE_VALUES (t1); s1; s1 = TREE_CHAIN (s1)) { s2 = purpose_member (TREE_PURPOSE (s1), TYPE_VALUES (t2)); if (s2 == NULL || simple_cst_equal (TREE_VALUE (s1), TREE_VALUE (s2)) != 1) return 0; } return 1; } case UNION_TYPE: { if (list_length (TYPE_FIELDS (t1)) != list_length (TYPE_FIELDS (t2))) return 0; for (s1 = TYPE_FIELDS (t1); s1; s1 = TREE_CHAIN (s1)) { bool ok = false; struct tagged_tu_seen tts; tts.next = tagged_tu_seen_base; tts.t1 = t1; tts.t2 = t2; tagged_tu_seen_base = &tts; if (DECL_NAME (s1) != NULL) for (s2 = TYPE_FIELDS (t2); s2; s2 = TREE_CHAIN (s2)) if (DECL_NAME (s1) == DECL_NAME (s2)) { int result; result = comptypes (TREE_TYPE (s1), TREE_TYPE (s2)); if (result == 0) break; if (result == 2) needs_warning = true; if (TREE_CODE (s1) == FIELD_DECL && simple_cst_equal (DECL_FIELD_BIT_OFFSET (s1), DECL_FIELD_BIT_OFFSET (s2)) != 1) break; ok = true; break; } tagged_tu_seen_base = tts.next; if (! ok) return 0; } return needs_warning ? 2 : 1; } case RECORD_TYPE: { struct tagged_tu_seen tts; tts.next = tagged_tu_seen_base; tts.t1 = t1; tts.t2 = t2; tagged_tu_seen_base = &tts; for (s1 = TYPE_FIELDS (t1), s2 = TYPE_FIELDS (t2); s1 && s2; s1 = TREE_CHAIN (s1), s2 = TREE_CHAIN (s2)) { int result; if (TREE_CODE (s1) != TREE_CODE (s2) || DECL_NAME (s1) != DECL_NAME (s2)) break; result = comptypes (TREE_TYPE (s1), TREE_TYPE (s2)); if (result == 0) break; if (result == 2) needs_warning = true; if (TREE_CODE (s1) == FIELD_DECL && simple_cst_equal (DECL_FIELD_BIT_OFFSET (s1), DECL_FIELD_BIT_OFFSET (s2)) != 1) break; } tagged_tu_seen_base = tts.next; if (s1 && s2) return 0; return needs_warning ? 2 : 1; } default: abort (); } } /* Return 1 if two function types F1 and F2 are compatible. If either type specifies no argument types, the other must specify a fixed number of self-promoting arg types. Otherwise, if one type specifies only the number of arguments, the other must specify that number of self-promoting arg types. Otherwise, the argument types must match. */ static int function_types_compatible_p (tree f1, tree f2) { tree args1, args2; /* 1 if no need for warning yet, 2 if warning cause has been seen. */ int val = 1; int val1; tree ret1, ret2; ret1 = TREE_TYPE (f1); ret2 = TREE_TYPE (f2); /* 'volatile' qualifiers on a function's return type mean the function is noreturn. */ if (pedantic && TYPE_VOLATILE (ret1) != TYPE_VOLATILE (ret2)) pedwarn ("function return types not compatible due to `volatile'"); if (TYPE_VOLATILE (ret1)) ret1 = build_qualified_type (TYPE_MAIN_VARIANT (ret1), TYPE_QUALS (ret1) & ~TYPE_QUAL_VOLATILE); if (TYPE_VOLATILE (ret2)) ret2 = build_qualified_type (TYPE_MAIN_VARIANT (ret2), TYPE_QUALS (ret2) & ~TYPE_QUAL_VOLATILE); val = comptypes (ret1, ret2); if (val == 0) return 0; args1 = TYPE_ARG_TYPES (f1); args2 = TYPE_ARG_TYPES (f2); /* An unspecified parmlist matches any specified parmlist whose argument types don't need default promotions. */ if (args1 == 0) { if (!self_promoting_args_p (args2)) return 0; /* If one of these types comes from a non-prototype fn definition, compare that with the other type's arglist. If they don't match, ask for a warning (but no error). */ if (TYPE_ACTUAL_ARG_TYPES (f1) && 1 != type_lists_compatible_p (args2, TYPE_ACTUAL_ARG_TYPES (f1))) val = 2; return val; } if (args2 == 0) { if (!self_promoting_args_p (args1)) return 0; if (TYPE_ACTUAL_ARG_TYPES (f2) && 1 != type_lists_compatible_p (args1, TYPE_ACTUAL_ARG_TYPES (f2))) val = 2; return val; } /* Both types have argument lists: compare them and propagate results. */ val1 = type_lists_compatible_p (args1, args2); return val1 != 1 ? val1 : val; } /* Check two lists of types for compatibility, returning 0 for incompatible, 1 for compatible, or 2 for compatible with warning. */ static int type_lists_compatible_p (tree args1, tree args2) { /* 1 if no need for warning yet, 2 if warning cause has been seen. */ int val = 1; int newval = 0; while (1) { if (args1 == 0 && args2 == 0) return val; /* If one list is shorter than the other, they fail to match. */ if (args1 == 0 || args2 == 0) return 0; /* A null pointer instead of a type means there is supposed to be an argument but nothing is specified about what type it has. So match anything that self-promotes. */ if (TREE_VALUE (args1) == 0) { if (c_type_promotes_to (TREE_VALUE (args2)) != TREE_VALUE (args2)) return 0; } else if (TREE_VALUE (args2) == 0) { if (c_type_promotes_to (TREE_VALUE (args1)) != TREE_VALUE (args1)) return 0; } /* If one of the lists has an error marker, ignore this arg. */ else if (TREE_CODE (TREE_VALUE (args1)) == ERROR_MARK || TREE_CODE (TREE_VALUE (args2)) == ERROR_MARK) ; else if (! (newval = comptypes (TYPE_MAIN_VARIANT (TREE_VALUE (args1)), TYPE_MAIN_VARIANT (TREE_VALUE (args2))))) { /* Allow wait (union {union wait *u; int *i} *) and wait (union wait *) to be compatible. */ if (TREE_CODE (TREE_VALUE (args1)) == UNION_TYPE && (TYPE_NAME (TREE_VALUE (args1)) == 0 || TYPE_TRANSPARENT_UNION (TREE_VALUE (args1))) && TREE_CODE (TYPE_SIZE (TREE_VALUE (args1))) == INTEGER_CST && tree_int_cst_equal (TYPE_SIZE (TREE_VALUE (args1)), TYPE_SIZE (TREE_VALUE (args2)))) { tree memb; for (memb = TYPE_FIELDS (TREE_VALUE (args1)); memb; memb = TREE_CHAIN (memb)) if (comptypes (TREE_TYPE (memb), TREE_VALUE (args2))) break; if (memb == 0) return 0; } else if (TREE_CODE (TREE_VALUE (args2)) == UNION_TYPE && (TYPE_NAME (TREE_VALUE (args2)) == 0 || TYPE_TRANSPARENT_UNION (TREE_VALUE (args2))) && TREE_CODE (TYPE_SIZE (TREE_VALUE (args2))) == INTEGER_CST && tree_int_cst_equal (TYPE_SIZE (TREE_VALUE (args2)), TYPE_SIZE (TREE_VALUE (args1)))) { tree memb; for (memb = TYPE_FIELDS (TREE_VALUE (args2)); memb; memb = TREE_CHAIN (memb)) if (comptypes (TREE_TYPE (memb), TREE_VALUE (args1))) break; if (memb == 0) return 0; } else return 0; } /* comptypes said ok, but record if it said to warn. */ if (newval > val) val = newval; args1 = TREE_CHAIN (args1); args2 = TREE_CHAIN (args2); } } /* Compute the size to increment a pointer by. */ tree c_size_in_bytes (tree type) { enum tree_code code = TREE_CODE (type); if (code == FUNCTION_TYPE || code == VOID_TYPE || code == ERROR_MARK) return size_one_node; if (!COMPLETE_OR_VOID_TYPE_P (type)) { error ("arithmetic on pointer to an incomplete type"); return size_one_node; } /* Convert in case a char is more than one unit. */ return size_binop (CEIL_DIV_EXPR, TYPE_SIZE_UNIT (type), size_int (TYPE_PRECISION (char_type_node) / BITS_PER_UNIT)); } /* Return either DECL or its known constant value (if it has one). */ tree decl_constant_value (tree decl) { if (/* Don't change a variable array bound or initial value to a constant in a place where a variable is invalid. Note that DECL_INITIAL isn't valid for a PARM_DECL. */ current_function_decl != 0 && TREE_CODE (decl) != PARM_DECL && ! TREE_THIS_VOLATILE (decl) && TREE_READONLY (decl) && DECL_INITIAL (decl) != 0 && TREE_CODE (DECL_INITIAL (decl)) != ERROR_MARK /* This is invalid if initial value is not constant. If it has either a function call, a memory reference, or a variable, then re-evaluating it could give different results. */ && TREE_CONSTANT (DECL_INITIAL (decl)) /* Check for cases where this is sub-optimal, even though valid. */ && TREE_CODE (DECL_INITIAL (decl)) != CONSTRUCTOR) return DECL_INITIAL (decl); return decl; } /* Return either DECL or its known constant value (if it has one), but return DECL if pedantic or DECL has mode BLKmode. This is for bug-compatibility with the old behavior of decl_constant_value (before GCC 3.0); every use of this function is a bug and it should be removed before GCC 3.1. It is not appropriate to use pedantic in a way that affects optimization, and BLKmode is probably not the right test for avoiding misoptimizations either. */ static tree decl_constant_value_for_broken_optimization (tree decl) { if (pedantic || DECL_MODE (decl) == BLKmode) return decl; else return decl_constant_value (decl); } /* Perform the default conversion of arrays and functions to pointers. Return the result of converting EXP. For any other expression, just return EXP. */ static tree default_function_array_conversion (tree exp) { tree orig_exp; tree type = TREE_TYPE (exp); enum tree_code code = TREE_CODE (type); int not_lvalue = 0; /* Strip NON_LVALUE_EXPRs and no-op conversions, since we aren't using as an lvalue. Do not use STRIP_NOPS here! It will remove conversions from pointer to integer and cause infinite recursion. */ orig_exp = exp; while (TREE_CODE (exp) == NON_LVALUE_EXPR || (TREE_CODE (exp) == NOP_EXPR && TREE_TYPE (TREE_OPERAND (exp, 0)) == TREE_TYPE (exp))) { if (TREE_CODE (exp) == NON_LVALUE_EXPR) not_lvalue = 1; exp = TREE_OPERAND (exp, 0); } /* Preserve the original expression code. */ if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (TREE_CODE (exp)))) C_SET_EXP_ORIGINAL_CODE (exp, C_EXP_ORIGINAL_CODE (orig_exp)); if (code == FUNCTION_TYPE) { return build_unary_op (ADDR_EXPR, exp, 0); } if (code == ARRAY_TYPE) { tree adr; tree restype = TREE_TYPE (type); tree ptrtype; int constp = 0; int volatilep = 0; int lvalue_array_p; if (TREE_CODE_CLASS (TREE_CODE (exp)) == 'r' || DECL_P (exp)) { constp = TREE_READONLY (exp); volatilep = TREE_THIS_VOLATILE (exp); } if (TYPE_QUALS (type) || constp || volatilep) restype = c_build_qualified_type (restype, TYPE_QUALS (type) | (constp * TYPE_QUAL_CONST) | (volatilep * TYPE_QUAL_VOLATILE)); if (TREE_CODE (exp) == INDIRECT_REF) return convert (build_pointer_type (restype), TREE_OPERAND (exp, 0)); if (TREE_CODE (exp) == COMPOUND_EXPR) { tree op1 = default_conversion (TREE_OPERAND (exp, 1)); return build (COMPOUND_EXPR, TREE_TYPE (op1), TREE_OPERAND (exp, 0), op1); } lvalue_array_p = !not_lvalue && lvalue_p (exp); if (!flag_isoc99 && !lvalue_array_p) { /* Before C99, non-lvalue arrays do not decay to pointers. Normally, using such an array would be invalid; but it can be used correctly inside sizeof or as a statement expression. Thus, do not give an error here; an error will result later. */ return exp; } ptrtype = build_pointer_type (restype); if (TREE_CODE (exp) == VAR_DECL) { /* We are making an ADDR_EXPR of ptrtype. This is a valid ADDR_EXPR because it's the best way of representing what happens in C when we take the address of an array and place it in a pointer to the element type. */ adr = build1 (ADDR_EXPR, ptrtype, exp); if (!c_mark_addressable (exp)) return error_mark_node; TREE_SIDE_EFFECTS (adr) = 0; /* Default would be, same as EXP. */ return adr; } /* This way is better for a COMPONENT_REF since it can simplify the offset for a component. */ adr = build_unary_op (ADDR_EXPR, exp, 1); return convert (ptrtype, adr); } return exp; } /* Perform default promotions for C data used in expressions. Arrays and functions are converted to pointers; enumeral types or short or char, to int. In addition, manifest constants symbols are replaced by their values. */ tree default_conversion (tree exp) { tree orig_exp; tree type = TREE_TYPE (exp); enum tree_code code = TREE_CODE (type); if (code == FUNCTION_TYPE || code == ARRAY_TYPE) return default_function_array_conversion (exp); /* Constants can be used directly unless they're not loadable. */ if (TREE_CODE (exp) == CONST_DECL) exp = DECL_INITIAL (exp); /* Replace a nonvolatile const static variable with its value unless it is an array, in which case we must be sure that taking the address of the array produces consistent results. */ else if (optimize && TREE_CODE (exp) == VAR_DECL && code != ARRAY_TYPE) { exp = decl_constant_value_for_broken_optimization (exp); type = TREE_TYPE (exp); } /* Strip NON_LVALUE_EXPRs and no-op conversions, since we aren't using as an lvalue. Do not use STRIP_NOPS here! It will remove conversions from pointer to integer and cause infinite recursion. */ orig_exp = exp; while (TREE_CODE (exp) == NON_LVALUE_EXPR || (TREE_CODE (exp) == NOP_EXPR && TREE_TYPE (TREE_OPERAND (exp, 0)) == TREE_TYPE (exp))) exp = TREE_OPERAND (exp, 0); /* Preserve the original expression code. */ if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (TREE_CODE (exp)))) C_SET_EXP_ORIGINAL_CODE (exp, C_EXP_ORIGINAL_CODE (orig_exp)); /* Normally convert enums to int, but convert wide enums to something wider. */ if (code == ENUMERAL_TYPE) { type = c_common_type_for_size (MAX (TYPE_PRECISION (type), TYPE_PRECISION (integer_type_node)), ((TYPE_PRECISION (type) >= TYPE_PRECISION (integer_type_node)) && TYPE_UNSIGNED (type))); return convert (type, exp); } if (TREE_CODE (exp) == COMPONENT_REF && DECL_C_BIT_FIELD (TREE_OPERAND (exp, 1)) /* If it's thinner than an int, promote it like a c_promoting_integer_type_p, otherwise leave it alone. */ && 0 > compare_tree_int (DECL_SIZE (TREE_OPERAND (exp, 1)), TYPE_PRECISION (integer_type_node))) return convert (integer_type_node, exp); if (c_promoting_integer_type_p (type)) { /* Preserve unsignedness if not really getting any wider. */ if (TYPE_UNSIGNED (type) && TYPE_PRECISION (type) == TYPE_PRECISION (integer_type_node)) return convert (unsigned_type_node, exp); return convert (integer_type_node, exp); } if (code == VOID_TYPE) { error ("void value not ignored as it ought to be"); return error_mark_node; } return exp; } /* Look up COMPONENT in a structure or union DECL. If the component name is not found, returns NULL_TREE. Otherwise, the return value is a TREE_LIST, with each TREE_VALUE a FIELD_DECL stepping down the chain to the component, which is in the last TREE_VALUE of the list. Normally the list is of length one, but if the component is embedded within (nested) anonymous structures or unions, the list steps down the chain to the component. */ static tree lookup_field (tree decl, tree component) { tree type = TREE_TYPE (decl); tree field; /* If TYPE_LANG_SPECIFIC is set, then it is a sorted array of pointers to the field elements. Use a binary search on this array to quickly find the element. Otherwise, do a linear search. TYPE_LANG_SPECIFIC will always be set for structures which have many elements. */ if (TYPE_LANG_SPECIFIC (type)) { int bot, top, half; tree *field_array = &TYPE_LANG_SPECIFIC (type)->s->elts[0]; field = TYPE_FIELDS (type); bot = 0; top = TYPE_LANG_SPECIFIC (type)->s->len; while (top - bot > 1) { half = (top - bot + 1) >> 1; field = field_array[bot+half]; if (DECL_NAME (field) == NULL_TREE) { /* Step through all anon unions in linear fashion. */ while (DECL_NAME (field_array[bot]) == NULL_TREE) { field = field_array[bot++]; if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (field)) == UNION_TYPE) { tree anon = lookup_field (field, component); if (anon) return tree_cons (NULL_TREE, field, anon); } } /* Entire record is only anon unions. */ if (bot > top) return NULL_TREE; /* Restart the binary search, with new lower bound. */ continue; } if (DECL_NAME (field) == component) break; if (DECL_NAME (field) < component) bot += half; else top = bot + half; } if (DECL_NAME (field_array[bot]) == component) field = field_array[bot]; else if (DECL_NAME (field) != component) return NULL_TREE; } else { for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) { if (DECL_NAME (field) == NULL_TREE && (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (field)) == UNION_TYPE)) { tree anon = lookup_field (field, component); if (anon) return tree_cons (NULL_TREE, field, anon); } if (DECL_NAME (field) == component) break; } if (field == NULL_TREE) return NULL_TREE; } return tree_cons (NULL_TREE, field, NULL_TREE); } /* Make an expression to refer to the COMPONENT field of structure or union value DATUM. COMPONENT is an IDENTIFIER_NODE. */ tree build_component_ref (tree datum, tree component) { tree type = TREE_TYPE (datum); enum tree_code code = TREE_CODE (type); tree field = NULL; tree ref; if (!objc_is_public (datum, component)) return error_mark_node; /* If DATUM is a COMPOUND_EXPR, move our reference inside it. Ensure that the arguments are not lvalues; otherwise, if the component is an array, it would wrongly decay to a pointer in C89 mode. We cannot do this with a COND_EXPR, because in a conditional expression the default promotions are applied to both sides, and this would yield the wrong type of the result; for example, if the components have type "char". */ switch (TREE_CODE (datum)) { case COMPOUND_EXPR: { tree value = build_component_ref (TREE_OPERAND (datum, 1), component); return build (COMPOUND_EXPR, TREE_TYPE (value), TREE_OPERAND (datum, 0), non_lvalue (value)); } default: break; } /* See if there is a field or component with name COMPONENT. */ if (code == RECORD_TYPE || code == UNION_TYPE) { if (!COMPLETE_TYPE_P (type)) { c_incomplete_type_error (NULL_TREE, type); return error_mark_node; } field = lookup_field (datum, component); if (!field) { error ("%s has no member named `%s'", code == RECORD_TYPE ? "structure" : "union", IDENTIFIER_POINTER (component)); return error_mark_node; } /* Chain the COMPONENT_REFs if necessary down to the FIELD. This might be better solved in future the way the C++ front end does it - by giving the anonymous entities each a separate name and type, and then have build_component_ref recursively call itself. We can't do that here. */ do { tree subdatum = TREE_VALUE (field); if (TREE_TYPE (subdatum) == error_mark_node) return error_mark_node; ref = build (COMPONENT_REF, TREE_TYPE (subdatum), datum, subdatum, NULL_TREE); if (TREE_READONLY (datum) || TREE_READONLY (subdatum)) TREE_READONLY (ref) = 1; if (TREE_THIS_VOLATILE (datum) || TREE_THIS_VOLATILE (subdatum)) TREE_THIS_VOLATILE (ref) = 1; if (TREE_DEPRECATED (subdatum)) warn_deprecated_use (subdatum); datum = ref; field = TREE_CHAIN (field); } while (field); return ref; } else if (code != ERROR_MARK) error ("request for member `%s' in something not a structure or union", IDENTIFIER_POINTER (component)); return error_mark_node; } /* Given an expression PTR for a pointer, return an expression for the value pointed to. ERRORSTRING is the name of the operator to appear in error messages. */ tree build_indirect_ref (tree ptr, const char *errorstring) { tree pointer = default_conversion (ptr); tree type = TREE_TYPE (pointer); if (TREE_CODE (type) == POINTER_TYPE) { if (TREE_CODE (pointer) == ADDR_EXPR && (TREE_TYPE (TREE_OPERAND (pointer, 0)) == TREE_TYPE (type))) return TREE_OPERAND (pointer, 0); else { tree t = TREE_TYPE (type); tree ref = build1 (INDIRECT_REF, TYPE_MAIN_VARIANT (t), pointer); if (!COMPLETE_OR_VOID_TYPE_P (t) && TREE_CODE (t) != ARRAY_TYPE) { error ("dereferencing pointer to incomplete type"); return error_mark_node; } if (VOID_TYPE_P (t) && skip_evaluation == 0) warning ("dereferencing `void *' pointer"); /* We *must* set TREE_READONLY when dereferencing a pointer to const, so that we get the proper error message if the result is used to assign to. Also, &* is supposed to be a no-op. And ANSI C seems to specify that the type of the result should be the const type. */ /* A de-reference of a pointer to const is not a const. It is valid to change it via some other pointer. */ TREE_READONLY (ref) = TYPE_READONLY (t); TREE_SIDE_EFFECTS (ref) = TYPE_VOLATILE (t) || TREE_SIDE_EFFECTS (pointer); TREE_THIS_VOLATILE (ref) = TYPE_VOLATILE (t); return ref; } } else if (TREE_CODE (pointer) != ERROR_MARK) error ("invalid type argument of `%s'", errorstring); return error_mark_node; } /* This handles expressions of the form "a[i]", which denotes an array reference. This is logically equivalent in C to *(a+i), but we may do it differently. If A is a variable or a member, we generate a primitive ARRAY_REF. This avoids forcing the array out of registers, and can work on arrays that are not lvalues (for example, members of structures returned by functions). */ tree build_array_ref (tree array, tree index) { if (index == 0) { error ("subscript missing in array reference"); return error_mark_node; } if (TREE_TYPE (array) == error_mark_node || TREE_TYPE (index) == error_mark_node) return error_mark_node; if (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE) { tree rval, type; /* Subscripting with type char is likely to lose on a machine where chars are signed. So warn on any machine, but optionally. Don't warn for unsigned char since that type is safe. Don't warn for signed char because anyone who uses that must have done so deliberately. */ if (warn_char_subscripts && TYPE_MAIN_VARIANT (TREE_TYPE (index)) == char_type_node) warning ("array subscript has type `char'"); /* Apply default promotions *after* noticing character types. */ index = default_conversion (index); /* Require integer *after* promotion, for sake of enums. */ if (TREE_CODE (TREE_TYPE (index)) != INTEGER_TYPE) { error ("array subscript is not an integer"); return error_mark_node; } /* An array that is indexed by a non-constant cannot be stored in a register; we must be able to do address arithmetic on its address. Likewise an array of elements of variable size. */ if (TREE_CODE (index) != INTEGER_CST || (COMPLETE_TYPE_P (TREE_TYPE (TREE_TYPE (array))) && TREE_CODE (TYPE_SIZE (TREE_TYPE (TREE_TYPE (array)))) != INTEGER_CST)) { if (!c_mark_addressable (array)) return error_mark_node; } /* An array that is indexed by a constant value which is not within the array bounds cannot be stored in a register either; because we would get a crash in store_bit_field/extract_bit_field when trying to access a non-existent part of the register. */ if (TREE_CODE (index) == INTEGER_CST && TYPE_DOMAIN (TREE_TYPE (array)) && ! int_fits_type_p (index, TYPE_DOMAIN (TREE_TYPE (array)))) { if (!c_mark_addressable (array)) return error_mark_node; } if (pedantic) { tree foo = array; while (TREE_CODE (foo) == COMPONENT_REF) foo = TREE_OPERAND (foo, 0); if (TREE_CODE (foo) == VAR_DECL && C_DECL_REGISTER (foo)) pedwarn ("ISO C forbids subscripting `register' array"); else if (! flag_isoc99 && ! lvalue_p (foo)) pedwarn ("ISO C90 forbids subscripting non-lvalue array"); } type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (array))); rval = build (ARRAY_REF, type, array, index, NULL_TREE, NULL_TREE); /* Array ref is const/volatile if the array elements are or if the array is. */ TREE_READONLY (rval) |= (TYPE_READONLY (TREE_TYPE (TREE_TYPE (array))) | TREE_READONLY (array)); TREE_SIDE_EFFECTS (rval) |= (TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (array))) | TREE_SIDE_EFFECTS (array)); TREE_THIS_VOLATILE (rval) |= (TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (array))) /* This was added by rms on 16 Nov 91. It fixes vol struct foo *a; a->elts[1] in an inline function. Hope it doesn't break something else. */ | TREE_THIS_VOLATILE (array)); return require_complete_type (fold (rval)); } { tree ar = default_conversion (array); tree ind = default_conversion (index); /* Do the same warning check as above, but only on the part that's syntactically the index and only if it is also semantically the index. */ if (warn_char_subscripts && TREE_CODE (TREE_TYPE (index)) == INTEGER_TYPE && TYPE_MAIN_VARIANT (TREE_TYPE (index)) == char_type_node) warning ("subscript has type `char'"); /* Put the integer in IND to simplify error checking. */ if (TREE_CODE (TREE_TYPE (ar)) == INTEGER_TYPE) { tree temp = ar; ar = ind; ind = temp; } if (ar == error_mark_node) return ar; if (TREE_CODE (TREE_TYPE (ar)) != POINTER_TYPE || TREE_CODE (TREE_TYPE (TREE_TYPE (ar))) == FUNCTION_TYPE) { error ("subscripted value is neither array nor pointer"); return error_mark_node; } if (TREE_CODE (TREE_TYPE (ind)) != INTEGER_TYPE) { error ("array subscript is not an integer"); return error_mark_node; } return build_indirect_ref (build_binary_op (PLUS_EXPR, ar, ind, 0), "array indexing"); } } /* Build an external reference to identifier ID. FUN indicates whether this will be used for a function call. */ tree build_external_ref (tree id, int fun) { tree ref; tree decl = lookup_name (id); tree objc_ivar = lookup_objc_ivar (id); if (decl && decl != error_mark_node) { /* Properly declared variable or function reference. */ if (!objc_ivar) ref = decl; else if (decl != objc_ivar && !DECL_FILE_SCOPE_P (decl)) { warning ("local declaration of `%s' hides instance variable", IDENTIFIER_POINTER (id)); ref = decl; } else ref = objc_ivar; } else if (objc_ivar) ref = objc_ivar; else if (fun) /* Implicit function declaration. */ ref = implicitly_declare (id); else if (decl == error_mark_node) /* Don't complain about something that's already been complained about. */ return error_mark_node; else { undeclared_variable (id); return error_mark_node; } if (TREE_TYPE (ref) == error_mark_node) return error_mark_node; if (TREE_DEPRECATED (ref)) warn_deprecated_use (ref); if (!skip_evaluation) assemble_external (ref); TREE_USED (ref) = 1; if (TREE_CODE (ref) == CONST_DECL) { ref = DECL_INITIAL (ref); TREE_CONSTANT (ref) = 1; TREE_INVARIANT (ref) = 1; } else if (current_function_decl != 0 && !DECL_FILE_SCOPE_P (current_function_decl) && (TREE_CODE (ref) == VAR_DECL || TREE_CODE (ref) == PARM_DECL || TREE_CODE (ref) == FUNCTION_DECL)) { tree context = decl_function_context (ref); if (context != 0 && context != current_function_decl) DECL_NONLOCAL (ref) = 1; } return ref; } /* Build a function call to function FUNCTION with parameters PARAMS. PARAMS is a list--a chain of TREE_LIST nodes--in which the TREE_VALUE of each node is a parameter-expression. FUNCTION's data type may be a function type or a pointer-to-function. */ tree build_function_call (tree function, tree params) { tree fntype, fundecl = 0; tree coerced_params; tree name = NULL_TREE, result; tree tem; /* Strip NON_LVALUE_EXPRs, etc., since we aren't using as an lvalue. */ STRIP_TYPE_NOPS (function); /* Convert anything with function type to a pointer-to-function. */ if (TREE_CODE (function) == FUNCTION_DECL) { name = DECL_NAME (function); /* Differs from default_conversion by not setting TREE_ADDRESSABLE (because calling an inline function does not mean the function needs to be separately compiled). */ fntype = build_type_variant (TREE_TYPE (function), TREE_READONLY (function), TREE_THIS_VOLATILE (function)); fundecl = function; function = build1 (ADDR_EXPR, build_pointer_type (fntype), function); } else function = default_conversion (function); fntype = TREE_TYPE (function); if (TREE_CODE (fntype) == ERROR_MARK) return error_mark_node; if (!(TREE_CODE (fntype) == POINTER_TYPE && TREE_CODE (TREE_TYPE (fntype)) == FUNCTION_TYPE)) { error ("called object is not a function"); return error_mark_node; } if (fundecl && TREE_THIS_VOLATILE (fundecl)) current_function_returns_abnormally = 1; /* fntype now gets the type of function pointed to. */ fntype = TREE_TYPE (fntype); /* Check that the function is called through a compatible prototype. If it is not, replace the call by a trap, wrapped up in a compound expression if necessary. This has the nice side-effect to prevent the tree-inliner from generating invalid assignment trees which may blow up in the RTL expander later. ??? This doesn't work for Objective-C because objc_comptypes refuses to compare function prototypes, yet the compiler appears to build calls that are flagged as invalid by C's comptypes. */ if (! c_dialect_objc () && TREE_CODE (function) == NOP_EXPR && TREE_CODE (tem = TREE_OPERAND (function, 0)) == ADDR_EXPR && TREE_CODE (tem = TREE_OPERAND (tem, 0)) == FUNCTION_DECL && ! comptypes (fntype, TREE_TYPE (tem))) { tree return_type = TREE_TYPE (fntype); tree trap = build_function_call (built_in_decls[BUILT_IN_TRAP], NULL_TREE); /* This situation leads to run-time undefined behavior. We can't, therefore, simply error unless we can prove that all possible executions of the program must execute the code. */ warning ("function called through a non-compatible type"); /* We can, however, treat "undefined" any way we please. Call abort to encourage the user to fix the program. */ inform ("if this code is reached, the program will abort"); if (VOID_TYPE_P (return_type)) return trap; else { tree rhs; if (AGGREGATE_TYPE_P (return_type)) rhs = build_compound_literal (return_type, build_constructor (return_type, NULL_TREE)); else rhs = fold (build1 (NOP_EXPR, return_type, integer_zero_node)); return build (COMPOUND_EXPR, return_type, trap, rhs); } } /* Convert the parameters to the types declared in the function prototype, or apply default promotions. */ coerced_params = convert_arguments (TYPE_ARG_TYPES (fntype), params, name, fundecl); /* Check that the arguments to the function are valid. */ check_function_arguments (TYPE_ATTRIBUTES (fntype), coerced_params); result = build (CALL_EXPR, TREE_TYPE (fntype), function, coerced_params, NULL_TREE); TREE_SIDE_EFFECTS (result) = 1; if (require_constant_value) { result = fold_initializer (result); if (TREE_CONSTANT (result) && (name == NULL_TREE || strncmp (IDENTIFIER_POINTER (name), "__builtin_", 10) != 0)) pedwarn_init ("initializer element is not constant"); } else result = fold (result); if (VOID_TYPE_P (TREE_TYPE (result))) return result; return require_complete_type (result); } /* Convert the argument expressions in the list VALUES to the types in the list TYPELIST. The result is a list of converted argument expressions. If TYPELIST is exhausted, or when an element has NULL as its type, perform the default conversions. PARMLIST is the chain of parm decls for the function being called. It may be 0, if that info is not available. It is used only for generating error messages. NAME is an IDENTIFIER_NODE or 0. It is used only for error messages. This is also where warnings about wrong number of args are generated. Both VALUES and the returned value are chains of TREE_LIST nodes with the elements of the list in the TREE_VALUE slots of those nodes. */ static tree convert_arguments (tree typelist, tree values, tree name, tree fundecl) { tree typetail, valtail; tree result = NULL; int parmnum; /* Scan the given expressions and types, producing individual converted arguments and pushing them on RESULT in reverse order. */ for (valtail = values, typetail = typelist, parmnum = 0; valtail; valtail = TREE_CHAIN (valtail), parmnum++) { tree type = typetail ? TREE_VALUE (typetail) : 0; tree val = TREE_VALUE (valtail); if (type == void_type_node) { if (name) error ("too many arguments to function `%s'", IDENTIFIER_POINTER (name)); else error ("too many arguments to function"); break; } /* Strip NON_LVALUE_EXPRs since we aren't using as an lvalue. */ /* Do not use STRIP_NOPS here! We do not want an enumerator with value 0 to convert automatically to a pointer. */ if (TREE_CODE (val) == NON_LVALUE_EXPR) val = TREE_OPERAND (val, 0); val = default_function_array_conversion (val); val = require_complete_type (val); if (type != 0) { /* Formal parm type is specified by a function prototype. */ tree parmval; if (!COMPLETE_TYPE_P (type)) { error ("type of formal parameter %d is incomplete", parmnum + 1); parmval = val; } else { /* Optionally warn about conversions that differ from the default conversions. */ if (warn_conversion || warn_traditional) { int formal_prec = TYPE_PRECISION (type); if (INTEGRAL_TYPE_P (type) && TREE_CODE (TREE_TYPE (val)) == REAL_TYPE) warn_for_assignment ("%s as integer rather than floating due to prototype", (char *) 0, name, parmnum + 1); if (INTEGRAL_TYPE_P (type) && TREE_CODE (TREE_TYPE (val)) == COMPLEX_TYPE) warn_for_assignment ("%s as integer rather than complex due to prototype", (char *) 0, name, parmnum + 1); else if (TREE_CODE (type) == COMPLEX_TYPE && TREE_CODE (TREE_TYPE (val)) == REAL_TYPE) warn_for_assignment ("%s as complex rather than floating due to prototype", (char *) 0, name, parmnum + 1); else if (TREE_CODE (type) == REAL_TYPE && INTEGRAL_TYPE_P (TREE_TYPE (val))) warn_for_assignment ("%s as floating rather than integer due to prototype", (char *) 0, name, parmnum + 1); else if (TREE_CODE (type) == COMPLEX_TYPE && INTEGRAL_TYPE_P (TREE_TYPE (val))) warn_for_assignment ("%s as complex rather than integer due to prototype", (char *) 0, name, parmnum + 1); else if (TREE_CODE (type) == REAL_TYPE && TREE_CODE (TREE_TYPE (val)) == COMPLEX_TYPE) warn_for_assignment ("%s as floating rather than complex due to prototype", (char *) 0, name, parmnum + 1); /* ??? At some point, messages should be written about conversions between complex types, but that's too messy to do now. */ else if (TREE_CODE (type) == REAL_TYPE && TREE_CODE (TREE_TYPE (val)) == REAL_TYPE) { /* Warn if any argument is passed as `float', since without a prototype it would be `double'. */ if (formal_prec == TYPE_PRECISION (float_type_node)) warn_for_assignment ("%s as `float' rather than `double' due to prototype", (char *) 0, name, parmnum + 1); } /* Detect integer changing in width or signedness. These warnings are only activated with -Wconversion, not with -Wtraditional. */ else if (warn_conversion && INTEGRAL_TYPE_P (type) && INTEGRAL_TYPE_P (TREE_TYPE (val))) { tree would_have_been = default_conversion (val); tree type1 = TREE_TYPE (would_have_been); if (TREE_CODE (type) == ENUMERAL_TYPE && (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (TREE_TYPE (val)))) /* No warning if function asks for enum and the actual arg is that enum type. */ ; else if (formal_prec != TYPE_PRECISION (type1)) warn_for_assignment ("%s with different width due to prototype", (char *) 0, name, parmnum + 1); else if (TYPE_UNSIGNED (type) == TYPE_UNSIGNED (type1)) ; /* Don't complain if the formal parameter type is an enum, because we can't tell now whether the value was an enum--even the same enum. */ else if (TREE_CODE (type) == ENUMERAL_TYPE) ; else if (TREE_CODE (val) == INTEGER_CST && int_fits_type_p (val, type)) /* Change in signedness doesn't matter if a constant value is unaffected. */ ; /* Likewise for a constant in a NOP_EXPR. */ else if (TREE_CODE (val) == NOP_EXPR && TREE_CODE (TREE_OPERAND (val, 0)) == INTEGER_CST && int_fits_type_p (TREE_OPERAND (val, 0), type)) ; /* If the value is extended from a narrower unsigned type, it doesn't matter whether we pass it as signed or unsigned; the value certainly is the same either way. */ else if (TYPE_PRECISION (TREE_TYPE (val)) < TYPE_PRECISION (type) && TYPE_UNSIGNED (TREE_TYPE (val))) ; else if (TYPE_UNSIGNED (type)) warn_for_assignment ("%s as unsigned due to prototype", (char *) 0, name, parmnum + 1); else warn_for_assignment ("%s as signed due to prototype", (char *) 0, name, parmnum + 1); } } parmval = convert_for_assignment (type, val, (char *) 0, /* arg passing */ fundecl, name, parmnum + 1); if (targetm.calls.promote_prototypes (fundecl ? TREE_TYPE (fundecl) : 0) && INTEGRAL_TYPE_P (type) && (TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node))) parmval = default_conversion (parmval); } result = tree_cons (NULL_TREE, parmval, result); } else if (TREE_CODE (TREE_TYPE (val)) == REAL_TYPE && (TYPE_PRECISION (TREE_TYPE (val)) < TYPE_PRECISION (double_type_node))) /* Convert `float' to `double'. */ result = tree_cons (NULL_TREE, convert (double_type_node, val), result); else /* Convert `short' and `char' to full-size `int'. */ result = tree_cons (NULL_TREE, default_conversion (val), result); if (typetail) typetail = TREE_CHAIN (typetail); } if (typetail != 0 && TREE_VALUE (typetail) != void_type_node) { if (name) error ("too few arguments to function `%s'", IDENTIFIER_POINTER (name)); else error ("too few arguments to function"); } return nreverse (result); } /* This is the entry point used by the parser for binary operators in the input. In addition to constructing the expression, we check for operands that were written with other binary operators in a way that is likely to confuse the user. */ tree parser_build_binary_op (enum tree_code code, tree arg1, tree arg2) { tree result = build_binary_op (code, arg1, arg2, 1); char class; char class1 = TREE_CODE_CLASS (TREE_CODE (arg1)); char class2 = TREE_CODE_CLASS (TREE_CODE (arg2)); enum tree_code code1 = ERROR_MARK; enum tree_code code2 = ERROR_MARK; if (TREE_CODE (result) == ERROR_MARK) return error_mark_node; if (IS_EXPR_CODE_CLASS (class1)) code1 = C_EXP_ORIGINAL_CODE (arg1); if (IS_EXPR_CODE_CLASS (class2)) code2 = C_EXP_ORIGINAL_CODE (arg2); /* Check for cases such as x+y< qualifications. But when constructing cast expressions, the protocols do matter and must be kept around. */ if (!c_dialect_objc () || !objc_is_object_ptr (type)) type = TYPE_MAIN_VARIANT (type); if (TREE_CODE (type) == ARRAY_TYPE) { error ("cast specifies array type"); return error_mark_node; } if (TREE_CODE (type) == FUNCTION_TYPE) { error ("cast specifies function type"); return error_mark_node; } if (type == TYPE_MAIN_VARIANT (TREE_TYPE (value))) { if (pedantic) { if (TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE) pedwarn ("ISO C forbids casting nonscalar to the same type"); } } else if (TREE_CODE (type) == UNION_TYPE) { tree field; value = default_function_array_conversion (value); for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) if (comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (field)), TYPE_MAIN_VARIANT (TREE_TYPE (value)))) break; if (field) { tree t; if (pedantic) pedwarn ("ISO C forbids casts to union type"); t = digest_init (type, build_constructor (type, build_tree_list (field, value)), 0); TREE_CONSTANT (t) = TREE_CONSTANT (value); TREE_INVARIANT (t) = TREE_INVARIANT (value); return t; } error ("cast to union type from type not present in union"); return error_mark_node; } else { tree otype, ovalue; /* If casting to void, avoid the error that would come from default_conversion in the case of a non-lvalue array. */ if (type == void_type_node) return build1 (CONVERT_EXPR, type, value); /* Convert functions and arrays to pointers, but don't convert any other types. */ value = default_function_array_conversion (value); otype = TREE_TYPE (value); /* Optionally warn about potentially worrisome casts. */ if (warn_cast_qual && TREE_CODE (type) == POINTER_TYPE && TREE_CODE (otype) == POINTER_TYPE) { tree in_type = type; tree in_otype = otype; int added = 0; int discarded = 0; /* Check that the qualifiers on IN_TYPE are a superset of the qualifiers of IN_OTYPE. The outermost level of POINTER_TYPE nodes is uninteresting and we stop as soon as we hit a non-POINTER_TYPE node on either type. */ do { in_otype = TREE_TYPE (in_otype); in_type = TREE_TYPE (in_type); /* GNU C allows cv-qualified function types. 'const' means the function is very pure, 'volatile' means it can't return. We need to warn when such qualifiers are added, not when they're taken away. */ if (TREE_CODE (in_otype) == FUNCTION_TYPE && TREE_CODE (in_type) == FUNCTION_TYPE) added |= (TYPE_QUALS (in_type) & ~TYPE_QUALS (in_otype)); else discarded |= (TYPE_QUALS (in_otype) & ~TYPE_QUALS (in_type)); } while (TREE_CODE (in_type) == POINTER_TYPE && TREE_CODE (in_otype) == POINTER_TYPE); if (added) warning ("cast adds new qualifiers to function type"); if (discarded) /* There are qualifiers present in IN_OTYPE that are not present in IN_TYPE. */ warning ("cast discards qualifiers from pointer target type"); } /* Warn about possible alignment problems. */ if (STRICT_ALIGNMENT && warn_cast_align && TREE_CODE (type) == POINTER_TYPE && TREE_CODE (otype) == POINTER_TYPE && TREE_CODE (TREE_TYPE (otype)) != VOID_TYPE && TREE_CODE (TREE_TYPE (otype)) != FUNCTION_TYPE /* Don't warn about opaque types, where the actual alignment restriction is unknown. */ && !((TREE_CODE (TREE_TYPE (otype)) == UNION_TYPE || TREE_CODE (TREE_TYPE (otype)) == RECORD_TYPE) && TYPE_MODE (TREE_TYPE (otype)) == VOIDmode) && TYPE_ALIGN (TREE_TYPE (type)) > TYPE_ALIGN (TREE_TYPE (otype))) warning ("cast increases required alignment of target type"); if (TREE_CODE (type) == INTEGER_TYPE && TREE_CODE (otype) == POINTER_TYPE && TYPE_PRECISION (type) != TYPE_PRECISION (otype) && !TREE_CONSTANT (value)) warning ("cast from pointer to integer of different size"); if (warn_bad_function_cast && TREE_CODE (value) == CALL_EXPR && TREE_CODE (type) != TREE_CODE (otype)) warning ("cast does not match function type"); if (TREE_CODE (type) == POINTER_TYPE && TREE_CODE (otype) == INTEGER_TYPE && TYPE_PRECISION (type) != TYPE_PRECISION (otype) /* Don't warn about converting any constant. */ && !TREE_CONSTANT (value)) warning ("cast to pointer from integer of different size"); if (TREE_CODE (type) == POINTER_TYPE && TREE_CODE (otype) == POINTER_TYPE && TREE_CODE (expr) == ADDR_EXPR && DECL_P (TREE_OPERAND (expr, 0)) && flag_strict_aliasing && warn_strict_aliasing && !VOID_TYPE_P (TREE_TYPE (type))) { /* Casting the address of a decl to non void pointer. Warn if the cast breaks type based aliasing. */ if (!COMPLETE_TYPE_P (TREE_TYPE (type))) warning ("type-punning to incomplete type might break strict-aliasing rules"); else { HOST_WIDE_INT set1 = get_alias_set (TREE_TYPE (TREE_OPERAND (expr, 0))); HOST_WIDE_INT set2 = get_alias_set (TREE_TYPE (type)); if (!alias_sets_conflict_p (set1, set2)) warning ("dereferencing type-punned pointer will break strict-aliasing rules"); else if (warn_strict_aliasing > 1 && !alias_sets_might_conflict_p (set1, set2)) warning ("dereferencing type-punned pointer might break strict-aliasing rules"); } } /* If pedantic, warn for conversions between function and object pointer types, except for converting a null pointer constant to function pointer type. */ if (pedantic && TREE_CODE (type) == POINTER_TYPE && TREE_CODE (otype) == POINTER_TYPE && TREE_CODE (TREE_TYPE (otype)) == FUNCTION_TYPE && TREE_CODE (TREE_TYPE (type)) != FUNCTION_TYPE) pedwarn ("ISO C forbids conversion of function pointer to object pointer type"); if (pedantic && TREE_CODE (type) == POINTER_TYPE && TREE_CODE (otype) == POINTER_TYPE && TREE_CODE (TREE_TYPE (type)) == FUNCTION_TYPE && TREE_CODE (TREE_TYPE (otype)) != FUNCTION_TYPE && !(integer_zerop (value) && TREE_TYPE (otype) == void_type_node && TREE_CODE (expr) != NOP_EXPR)) pedwarn ("ISO C forbids conversion of object pointer to function pointer type"); ovalue = value; /* Replace a nonvolatile const static variable with its value. */ if (optimize && TREE_CODE (value) == VAR_DECL) value = decl_constant_value (value); value = convert (type, value); /* Ignore any integer overflow caused by the cast. */ if (TREE_CODE (value) == INTEGER_CST) { TREE_OVERFLOW (value) = TREE_OVERFLOW (ovalue); if (TREE_CODE_CLASS (TREE_CODE (ovalue)) == 'c') TREE_CONSTANT_OVERFLOW (value) = TREE_CONSTANT_OVERFLOW (ovalue); } } /* Don't let (void *) (FOO *) 0 be a null pointer constant. */ if (TREE_CODE (value) == INTEGER_CST && TREE_CODE (expr) == INTEGER_CST && TREE_CODE (TREE_TYPE (expr)) != INTEGER_TYPE) value = non_lvalue (value); /* Don't let a cast be an lvalue. */ if (value == expr) value = non_lvalue (value); return value; } /* Interpret a cast of expression EXPR to type TYPE. */ tree c_cast_expr (tree type, tree expr) { int saved_wsp = warn_strict_prototypes; /* This avoids warnings about unprototyped casts on integers. E.g. "#define SIG_DFL (void(*)())0". */ if (TREE_CODE (expr) == INTEGER_CST) warn_strict_prototypes = 0; type = groktypename (type); warn_strict_prototypes = saved_wsp; return build_c_cast (type, expr); } /* Build an assignment expression of lvalue LHS from value RHS. MODIFYCODE is the code for a binary operator that we use to combine the old value of LHS with RHS to get the new value. Or else MODIFYCODE is NOP_EXPR meaning do a simple assignment. */ tree build_modify_expr (tree lhs, enum tree_code modifycode, tree rhs) { tree result; tree newrhs; tree lhstype = TREE_TYPE (lhs); tree olhstype = lhstype; /* Types that aren't fully specified cannot be used in assignments. */ lhs = require_complete_type (lhs); /* Avoid duplicate error messages from operands that had errors. */ if (TREE_CODE (lhs) == ERROR_MARK || TREE_CODE (rhs) == ERROR_MARK) return error_mark_node; /* Strip NON_LVALUE_EXPRs since we aren't using as an lvalue. */ /* Do not use STRIP_NOPS here. We do not want an enumerator whose value is 0 to count as a null pointer constant. */ if (TREE_CODE (rhs) == NON_LVALUE_EXPR) rhs = TREE_OPERAND (rhs, 0); newrhs = rhs; /* If a binary op has been requested, combine the old LHS value with the RHS producing the value we should actually store into the LHS. */ if (modifycode != NOP_EXPR) { lhs = stabilize_reference (lhs); newrhs = build_binary_op (modifycode, lhs, rhs, 1); } if (!lvalue_or_else (lhs, "invalid lvalue in assignment")) return error_mark_node; /* Warn about storing in something that is `const'. */ if (TREE_READONLY (lhs) || TYPE_READONLY (lhstype) || ((TREE_CODE (lhstype) == RECORD_TYPE || TREE_CODE (lhstype) == UNION_TYPE) && C_TYPE_FIELDS_READONLY (lhstype))) readonly_error (lhs, "assignment"); /* If storing into a structure or union member, it has probably been given type `int'. Compute the type that would go with the actual amount of storage the member occupies. */ if (TREE_CODE (lhs) == COMPONENT_REF && (TREE_CODE (lhstype) == INTEGER_TYPE || TREE_CODE (lhstype) == BOOLEAN_TYPE || TREE_CODE (lhstype) == REAL_TYPE || TREE_CODE (lhstype) == ENUMERAL_TYPE)) lhstype = TREE_TYPE (get_unwidened (lhs, 0)); /* If storing in a field that is in actuality a short or narrower than one, we must store in the field in its actual type. */ if (lhstype != TREE_TYPE (lhs)) { lhs = copy_node (lhs); TREE_TYPE (lhs) = lhstype; } /* Convert new value to destination type. */ newrhs = convert_for_assignment (lhstype, newrhs, _("assignment"), NULL_TREE, NULL_TREE, 0); if (TREE_CODE (newrhs) == ERROR_MARK) return error_mark_node; /* Scan operands */ result = build (MODIFY_EXPR, lhstype, lhs, newrhs); TREE_SIDE_EFFECTS (result) = 1; /* If we got the LHS in a different type for storing in, convert the result back to the nominal type of LHS so that the value we return always has the same type as the LHS argument. */ if (olhstype == TREE_TYPE (result)) return result; return convert_for_assignment (olhstype, result, _("assignment"), NULL_TREE, NULL_TREE, 0); } /* Convert value RHS to type TYPE as preparation for an assignment to an lvalue of type TYPE. The real work of conversion is done by `convert'. The purpose of this function is to generate error messages for assignments that are not allowed in C. ERRTYPE is a string to use in error messages: "assignment", "return", etc. If it is null, this is parameter passing for a function call (and different error messages are output). FUNNAME is the name of the function being called, as an IDENTIFIER_NODE, or null. PARMNUM is the number of the argument, for printing in error messages. */ static tree convert_for_assignment (tree type, tree rhs, const char *errtype, tree fundecl, tree funname, int parmnum) { enum tree_code codel = TREE_CODE (type); tree rhstype; enum tree_code coder; /* Strip NON_LVALUE_EXPRs since we aren't using as an lvalue. */ /* Do not use STRIP_NOPS here. We do not want an enumerator whose value is 0 to count as a null pointer constant. */ if (TREE_CODE (rhs) == NON_LVALUE_EXPR) rhs = TREE_OPERAND (rhs, 0); if (TREE_CODE (TREE_TYPE (rhs)) == ARRAY_TYPE || TREE_CODE (TREE_TYPE (rhs)) == FUNCTION_TYPE) rhs = default_conversion (rhs); else if (optimize && TREE_CODE (rhs) == VAR_DECL) rhs = decl_constant_value_for_broken_optimization (rhs); rhstype = TREE_TYPE (rhs); coder = TREE_CODE (rhstype); if (coder == ERROR_MARK) return error_mark_node; if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (rhstype)) { overflow_warning (rhs); /* Check for Objective-C protocols. This will automatically issue a warning if there are protocol violations. No need to use the return value. */ if (c_dialect_objc ()) objc_comptypes (type, rhstype, 0); return rhs; } if (coder == VOID_TYPE) { error ("void value not ignored as it ought to be"); return error_mark_node; } /* A type converts to a reference to it. This code doesn't fully support references, it's just for the special case of va_start and va_copy. */ if (codel == REFERENCE_TYPE && comptypes (TREE_TYPE (type), TREE_TYPE (rhs)) == 1) { if (!lvalue_p (rhs)) { error ("cannot pass rvalue to reference parameter"); return error_mark_node; } if (!c_mark_addressable (rhs)) return error_mark_node; rhs = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (rhs)), rhs); /* We already know that these two types are compatible, but they may not be exactly identical. In fact, `TREE_TYPE (type)' is likely to be __builtin_va_list and `TREE_TYPE (rhs)' is likely to be va_list, a typedef to __builtin_va_list, which is different enough that it will cause problems later. */ if (TREE_TYPE (TREE_TYPE (rhs)) != TREE_TYPE (type)) rhs = build1 (NOP_EXPR, build_pointer_type (TREE_TYPE (type)), rhs); rhs = build1 (NOP_EXPR, type, rhs); return rhs; } /* Some types can interconvert without explicit casts. */ else if (codel == VECTOR_TYPE && vector_types_convertible_p (type, TREE_TYPE (rhs))) return convert (type, rhs); /* Arithmetic types all interconvert, and enum is treated like int. */ else if ((codel == INTEGER_TYPE || codel == REAL_TYPE || codel == ENUMERAL_TYPE || codel == COMPLEX_TYPE || codel == BOOLEAN_TYPE) && (coder == INTEGER_TYPE || coder == REAL_TYPE || coder == ENUMERAL_TYPE || coder == COMPLEX_TYPE || coder == BOOLEAN_TYPE)) return convert_and_check (type, rhs); /* Conversion to a transparent union from its member types. This applies only to function arguments. */ else if (codel == UNION_TYPE && TYPE_TRANSPARENT_UNION (type) && ! errtype) { tree memb_types; tree marginal_memb_type = 0; for (memb_types = TYPE_FIELDS (type); memb_types; memb_types = TREE_CHAIN (memb_types)) { tree memb_type = TREE_TYPE (memb_types); if (comptypes (TYPE_MAIN_VARIANT (memb_type), TYPE_MAIN_VARIANT (rhstype))) break; if (TREE_CODE (memb_type) != POINTER_TYPE) continue; if (coder == POINTER_TYPE) { tree ttl = TREE_TYPE (memb_type); tree ttr = TREE_TYPE (rhstype); /* Any non-function converts to a [const][volatile] void * and vice versa; otherwise, targets must be the same. Meanwhile, the lhs target must have all the qualifiers of the rhs. */ if (VOID_TYPE_P (ttl) || VOID_TYPE_P (ttr) || comp_target_types (memb_type, rhstype, 0)) { /* If this type won't generate any warnings, use it. */ if (TYPE_QUALS (ttl) == TYPE_QUALS (ttr) || ((TREE_CODE (ttr) == FUNCTION_TYPE && TREE_CODE (ttl) == FUNCTION_TYPE) ? ((TYPE_QUALS (ttl) | TYPE_QUALS (ttr)) == TYPE_QUALS (ttr)) : ((TYPE_QUALS (ttl) | TYPE_QUALS (ttr)) == TYPE_QUALS (ttl)))) break; /* Keep looking for a better type, but remember this one. */ if (! marginal_memb_type) marginal_memb_type = memb_type; } } /* Can convert integer zero to any pointer type. */ if (integer_zerop (rhs) || (TREE_CODE (rhs) == NOP_EXPR && integer_zerop (TREE_OPERAND (rhs, 0)))) { rhs = null_pointer_node; break; } } if (memb_types || marginal_memb_type) { if (! memb_types) { /* We have only a marginally acceptable member type; it needs a warning. */ tree ttl = TREE_TYPE (marginal_memb_type); tree ttr = TREE_TYPE (rhstype); /* Const and volatile mean something different for function types, so the usual warnings are not appropriate. */ if (TREE_CODE (ttr) == FUNCTION_TYPE && TREE_CODE (ttl) == FUNCTION_TYPE) { /* Because const and volatile on functions are restrictions that say the function will not do certain things, it is okay to use a const or volatile function where an ordinary one is wanted, but not vice-versa. */ if (TYPE_QUALS (ttl) & ~TYPE_QUALS (ttr)) warn_for_assignment ("%s makes qualified function pointer from unqualified", errtype, funname, parmnum); } else if (TYPE_QUALS (ttr) & ~TYPE_QUALS (ttl)) warn_for_assignment ("%s discards qualifiers from pointer target type", errtype, funname, parmnum); } if (pedantic && ! DECL_IN_SYSTEM_HEADER (fundecl)) pedwarn ("ISO C prohibits argument conversion to union type"); return build1 (NOP_EXPR, type, rhs); } } /* Conversions among pointers */ else if ((codel == POINTER_TYPE || codel == REFERENCE_TYPE) && (coder == codel)) { tree ttl = TREE_TYPE (type); tree ttr = TREE_TYPE (rhstype); bool is_opaque_pointer; int target_cmp = 0; /* Cache comp_target_types () result. */ /* Opaque pointers are treated like void pointers. */ is_opaque_pointer = (targetm.vector_opaque_p (type) || targetm.vector_opaque_p (rhstype)) && TREE_CODE (ttl) == VECTOR_TYPE && TREE_CODE (ttr) == VECTOR_TYPE; /* Any non-function converts to a [const][volatile] void * and vice versa; otherwise, targets must be the same. Meanwhile, the lhs target must have all the qualifiers of the rhs. */ if (VOID_TYPE_P (ttl) || VOID_TYPE_P (ttr) || (target_cmp = comp_target_types (type, rhstype, 0)) || is_opaque_pointer || (c_common_unsigned_type (TYPE_MAIN_VARIANT (ttl)) == c_common_unsigned_type (TYPE_MAIN_VARIANT (ttr)))) { if (pedantic && ((VOID_TYPE_P (ttl) && TREE_CODE (ttr) == FUNCTION_TYPE) || (VOID_TYPE_P (ttr) /* Check TREE_CODE to catch cases like (void *) (char *) 0 which are not ANSI null ptr constants. */ && (!integer_zerop (rhs) || TREE_CODE (rhs) == NOP_EXPR) && TREE_CODE (ttl) == FUNCTION_TYPE))) warn_for_assignment ("ISO C forbids %s between function pointer and `void *'", errtype, funname, parmnum); /* Const and volatile mean something different for function types, so the usual warnings are not appropriate. */ else if (TREE_CODE (ttr) != FUNCTION_TYPE && TREE_CODE (ttl) != FUNCTION_TYPE) { if (TYPE_QUALS (ttr) & ~TYPE_QUALS (ttl)) warn_for_assignment ("%s discards qualifiers from pointer target type", errtype, funname, parmnum); /* If this is not a case of ignoring a mismatch in signedness, no warning. */ else if (VOID_TYPE_P (ttl) || VOID_TYPE_P (ttr) || target_cmp) ; /* If there is a mismatch, do warn. */ else if (pedantic) warn_for_assignment ("pointer targets in %s differ in signedness", errtype, funname, parmnum); } else if (TREE_CODE (ttl) == FUNCTION_TYPE && TREE_CODE (ttr) == FUNCTION_TYPE) { /* Because const and volatile on functions are restrictions that say the function will not do certain things, it is okay to use a const or volatile function where an ordinary one is wanted, but not vice-versa. */ if (TYPE_QUALS (ttl) & ~TYPE_QUALS (ttr)) warn_for_assignment ("%s makes qualified function pointer from unqualified", errtype, funname, parmnum); } } else warn_for_assignment ("%s from incompatible pointer type", errtype, funname, parmnum); return convert (type, rhs); } else if (codel == POINTER_TYPE && coder == ARRAY_TYPE) { error ("invalid use of non-lvalue array"); return error_mark_node; } else if (codel == POINTER_TYPE && coder == INTEGER_TYPE) { /* An explicit constant 0 can convert to a pointer, or one that results from arithmetic, even including a cast to integer type. */ if (! (TREE_CODE (rhs) == INTEGER_CST && integer_zerop (rhs)) && ! (TREE_CODE (rhs) == NOP_EXPR && TREE_CODE (TREE_TYPE (rhs)) == INTEGER_TYPE && TREE_CODE (TREE_OPERAND (rhs, 0)) == INTEGER_CST && integer_zerop (TREE_OPERAND (rhs, 0)))) warn_for_assignment ("%s makes pointer from integer without a cast", errtype, funname, parmnum); return convert (type, rhs); } else if (codel == INTEGER_TYPE && coder == POINTER_TYPE) { warn_for_assignment ("%s makes integer from pointer without a cast", errtype, funname, parmnum); return convert (type, rhs); } else if (codel == BOOLEAN_TYPE && coder == POINTER_TYPE) return convert (type, rhs); if (!errtype) { if (funname) { tree selector = objc_message_selector (); if (selector && parmnum > 2) error ("incompatible type for argument %d of `%s'", parmnum - 2, IDENTIFIER_POINTER (selector)); else error ("incompatible type for argument %d of `%s'", parmnum, IDENTIFIER_POINTER (funname)); } else error ("incompatible type for argument %d of indirect function call", parmnum); } else error ("incompatible types in %s", errtype); return error_mark_node; } /* Convert VALUE for assignment into inlined parameter PARM. ARGNUM is used for error and waring reporting and indicates which argument is being processed. */ tree c_convert_parm_for_inlining (tree parm, tree value, tree fn, int argnum) { tree ret, type; /* If FN was prototyped, the value has been converted already in convert_arguments. */ if (! value || TYPE_ARG_TYPES (TREE_TYPE (fn))) return value; type = TREE_TYPE (parm); ret = convert_for_assignment (type, value, (char *) 0 /* arg passing */, fn, DECL_NAME (fn), argnum); if (targetm.calls.promote_prototypes (TREE_TYPE (fn)) && INTEGRAL_TYPE_P (type) && (TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node))) ret = default_conversion (ret); return ret; } /* Print a warning using MSGID. It gets OPNAME as its one parameter. if OPNAME is null and ARGNUM is 0, it is replaced by "passing arg of `FUNCTION'". Otherwise if OPNAME is null, it is replaced by "passing arg ARGNUM of `FUNCTION'". FUNCTION and ARGNUM are handled specially if we are building an Objective-C selector. */ static void warn_for_assignment (const char *msgid, const char *opname, tree function, int argnum) { if (opname == 0) { tree selector = objc_message_selector (); char * new_opname; if (selector && argnum > 2) { function = selector; argnum -= 2; } if (argnum == 0) { if (function) { /* Function name is known; supply it. */ const char *const argstring = _("passing arg of `%s'"); new_opname = alloca (IDENTIFIER_LENGTH (function) + strlen (argstring) + 1 + 1); sprintf (new_opname, argstring, IDENTIFIER_POINTER (function)); } else { /* Function name unknown (call through ptr). */ const char *const argnofun = _("passing arg of pointer to function"); new_opname = alloca (strlen (argnofun) + 1 + 1); sprintf (new_opname, argnofun); } } else if (function) { /* Function name is known; supply it. */ const char *const argstring = _("passing arg %d of `%s'"); new_opname = alloca (IDENTIFIER_LENGTH (function) + strlen (argstring) + 1 + 25 /*%d*/ + 1); sprintf (new_opname, argstring, argnum, IDENTIFIER_POINTER (function)); } else { /* Function name unknown (call through ptr); just give arg number. */ const char *const argnofun = _("passing arg %d of pointer to function"); new_opname = alloca (strlen (argnofun) + 1 + 25 /*%d*/ + 1); sprintf (new_opname, argnofun, argnum); } opname = new_opname; } pedwarn (msgid, opname); } /* If VALUE is a compound expr all of whose expressions are constant, then return its value. Otherwise, return error_mark_node. This is for handling COMPOUND_EXPRs as initializer elements which is allowed with a warning when -pedantic is specified. */ static tree valid_compound_expr_initializer (tree value, tree endtype) { if (TREE_CODE (value) == COMPOUND_EXPR) { if (valid_compound_expr_initializer (TREE_OPERAND (value, 0), endtype) == error_mark_node) return error_mark_node; return valid_compound_expr_initializer (TREE_OPERAND (value, 1), endtype); } else if (! TREE_CONSTANT (value) && ! initializer_constant_valid_p (value, endtype)) return error_mark_node; else return value; } /* Perform appropriate conversions on the initial value of a variable, store it in the declaration DECL, and print any error messages that are appropriate. If the init is invalid, store an ERROR_MARK. */ void store_init_value (tree decl, tree init) { tree value, type; /* If variable's type was invalidly declared, just ignore it. */ type = TREE_TYPE (decl); if (TREE_CODE (type) == ERROR_MARK) return; /* Digest the specified initializer into an expression. */ value = digest_init (type, init, TREE_STATIC (decl)); /* Store the expression if valid; else report error. */ if (warn_traditional && !in_system_header && AGGREGATE_TYPE_P (TREE_TYPE (decl)) && ! TREE_STATIC (decl)) warning ("traditional C rejects automatic aggregate initialization"); DECL_INITIAL (decl) = value; /* ANSI wants warnings about out-of-range constant initializers. */ STRIP_TYPE_NOPS (value); constant_expression_warning (value); /* Check if we need to set array size from compound literal size. */ if (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) == 0 && value != error_mark_node) { tree inside_init = init; if (TREE_CODE (init) == NON_LVALUE_EXPR) inside_init = TREE_OPERAND (init, 0); inside_init = fold (inside_init); if (TREE_CODE (inside_init) == COMPOUND_LITERAL_EXPR) { tree decl = COMPOUND_LITERAL_EXPR_DECL (inside_init); if (TYPE_DOMAIN (TREE_TYPE (decl))) { /* For int foo[] = (int [3]){1}; we need to set array size now since later on array initializer will be just the brace enclosed list of the compound literal. */ TYPE_DOMAIN (type) = TYPE_DOMAIN (TREE_TYPE (decl)); layout_type (type); layout_decl (decl, 0); } } } } /* Methods for storing and printing names for error messages. */ /* Implement a spelling stack that allows components of a name to be pushed and popped. Each element on the stack is this structure. */ struct spelling { int kind; union { int i; const char *s; } u; }; #define SPELLING_STRING 1 #define SPELLING_MEMBER 2 #define SPELLING_BOUNDS 3 static struct spelling *spelling; /* Next stack element (unused). */ static struct spelling *spelling_base; /* Spelling stack base. */ static int spelling_size; /* Size of the spelling stack. */ /* Macros to save and restore the spelling stack around push_... functions. Alternative to SAVE_SPELLING_STACK. */ #define SPELLING_DEPTH() (spelling - spelling_base) #define RESTORE_SPELLING_DEPTH(DEPTH) (spelling = spelling_base + (DEPTH)) /* Push an element on the spelling stack with type KIND and assign VALUE to MEMBER. */ #define PUSH_SPELLING(KIND, VALUE, MEMBER) \ { \ int depth = SPELLING_DEPTH (); \ \ if (depth >= spelling_size) \ { \ spelling_size += 10; \ if (spelling_base == 0) \ spelling_base = xmalloc (spelling_size * sizeof (struct spelling)); \ else \ spelling_base = xrealloc (spelling_base, \ spelling_size * sizeof (struct spelling)); \ RESTORE_SPELLING_DEPTH (depth); \ } \ \ spelling->kind = (KIND); \ spelling->MEMBER = (VALUE); \ spelling++; \ } /* Push STRING on the stack. Printed literally. */ static void push_string (const char *string) { PUSH_SPELLING (SPELLING_STRING, string, u.s); } /* Push a member name on the stack. Printed as '.' STRING. */ static void push_member_name (tree decl) { const char *const string = DECL_NAME (decl) ? IDENTIFIER_POINTER (DECL_NAME (decl)) : ""; PUSH_SPELLING (SPELLING_MEMBER, string, u.s); } /* Push an array bounds on the stack. Printed as [BOUNDS]. */ static void push_array_bounds (int bounds) { PUSH_SPELLING (SPELLING_BOUNDS, bounds, u.i); } /* Compute the maximum size in bytes of the printed spelling. */ static int spelling_length (void) { int size = 0; struct spelling *p; for (p = spelling_base; p < spelling; p++) { if (p->kind == SPELLING_BOUNDS) size += 25; else size += strlen (p->u.s) + 1; } return size; } /* Print the spelling to BUFFER and return it. */ static char * print_spelling (char *buffer) { char *d = buffer; struct spelling *p; for (p = spelling_base; p < spelling; p++) if (p->kind == SPELLING_BOUNDS) { sprintf (d, "[%d]", p->u.i); d += strlen (d); } else { const char *s; if (p->kind == SPELLING_MEMBER) *d++ = '.'; for (s = p->u.s; (*d = *s++); d++) ; } *d++ = '\0'; return buffer; } /* Issue an error message for a bad initializer component. MSGID identifies the message. The component name is taken from the spelling stack. */ void error_init (const char *msgid) { char *ofwhat; error ("%s", _(msgid)); ofwhat = print_spelling (alloca (spelling_length () + 1)); if (*ofwhat) error ("(near initialization for `%s')", ofwhat); } /* Issue a pedantic warning for a bad initializer component. MSGID identifies the message. The component name is taken from the spelling stack. */ void pedwarn_init (const char *msgid) { char *ofwhat; pedwarn ("%s", _(msgid)); ofwhat = print_spelling (alloca (spelling_length () + 1)); if (*ofwhat) pedwarn ("(near initialization for `%s')", ofwhat); } /* Issue a warning for a bad initializer component. MSGID identifies the message. The component name is taken from the spelling stack. */ static void warning_init (const char *msgid) { char *ofwhat; warning ("%s", _(msgid)); ofwhat = print_spelling (alloca (spelling_length () + 1)); if (*ofwhat) warning ("(near initialization for `%s')", ofwhat); } /* Digest the parser output INIT as an initializer for type TYPE. Return a C expression of type TYPE to represent the initial value. REQUIRE_CONSTANT requests an error if non-constant initializers or elements are seen. */ static tree digest_init (tree type, tree init, int require_constant) { enum tree_code code = TREE_CODE (type); tree inside_init = init; if (type == error_mark_node || init == error_mark_node || TREE_TYPE (init) == error_mark_node) return error_mark_node; /* Strip NON_LVALUE_EXPRs since we aren't using as an lvalue. */ /* Do not use STRIP_NOPS here. We do not want an enumerator whose value is 0 to count as a null pointer constant. */ if (TREE_CODE (init) == NON_LVALUE_EXPR) inside_init = TREE_OPERAND (init, 0); inside_init = fold (inside_init); /* Initialization of an array of chars from a string constant optionally enclosed in braces. */ if (code == ARRAY_TYPE) { tree typ1 = TYPE_MAIN_VARIANT (TREE_TYPE (type)); if ((typ1 == char_type_node || typ1 == signed_char_type_node || typ1 == unsigned_char_type_node || typ1 == unsigned_wchar_type_node || typ1 == signed_wchar_type_node) && ((inside_init && TREE_CODE (inside_init) == STRING_CST))) { if (comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (inside_init)), TYPE_MAIN_VARIANT (type))) return inside_init; if ((TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (inside_init))) != char_type_node) && TYPE_PRECISION (typ1) == TYPE_PRECISION (char_type_node)) { error_init ("char-array initialized from wide string"); return error_mark_node; } if ((TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (inside_init))) == char_type_node) && TYPE_PRECISION (typ1) != TYPE_PRECISION (char_type_node)) { error_init ("int-array initialized from non-wide string"); return error_mark_node; } TREE_TYPE (inside_init) = type; if (TYPE_DOMAIN (type) != 0 && TYPE_SIZE (type) != 0 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST /* Subtract 1 (or sizeof (wchar_t)) because it's ok to ignore the terminating null char that is counted in the length of the constant. */ && 0 > compare_tree_int (TYPE_SIZE_UNIT (type), TREE_STRING_LENGTH (inside_init) - ((TYPE_PRECISION (typ1) != TYPE_PRECISION (char_type_node)) ? (TYPE_PRECISION (wchar_type_node) / BITS_PER_UNIT) : 1))) pedwarn_init ("initializer-string for array of chars is too long"); return inside_init; } } /* Build a VECTOR_CST from a *constant* vector constructor. If the vector constructor is not constant (e.g. {1,2,3,foo()}) then punt below and handle as a constructor. */ if (code == VECTOR_TYPE && vector_types_convertible_p (TREE_TYPE (inside_init), type) && TREE_CONSTANT (inside_init)) { if (TREE_CODE (inside_init) == VECTOR_CST && comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (inside_init)), TYPE_MAIN_VARIANT (type))) return inside_init; else return build_vector (type, CONSTRUCTOR_ELTS (inside_init)); } /* Any type can be initialized from an expression of the same type, optionally with braces. */ if (inside_init && TREE_TYPE (inside_init) != 0 && (comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (inside_init)), TYPE_MAIN_VARIANT (type)) || (code == ARRAY_TYPE && comptypes (TREE_TYPE (inside_init), type)) || (code == VECTOR_TYPE && comptypes (TREE_TYPE (inside_init), type)) || (code == POINTER_TYPE && TREE_CODE (TREE_TYPE (inside_init)) == ARRAY_TYPE && comptypes (TREE_TYPE (TREE_TYPE (inside_init)), TREE_TYPE (type))) || (code == POINTER_TYPE && TREE_CODE (TREE_TYPE (inside_init)) == FUNCTION_TYPE && comptypes (TREE_TYPE (inside_init), TREE_TYPE (type))))) { if (code == POINTER_TYPE) { inside_init = default_function_array_conversion (inside_init); if (TREE_CODE (TREE_TYPE (inside_init)) == ARRAY_TYPE) { error_init ("invalid use of non-lvalue array"); return error_mark_node; } } if (code == VECTOR_TYPE) /* Although the types are compatible, we may require a conversion. */ inside_init = convert (type, inside_init); if (require_constant && !flag_isoc99 && TREE_CODE (inside_init) == COMPOUND_LITERAL_EXPR) { /* As an extension, allow initializing objects with static storage duration with compound literals (which are then treated just as the brace enclosed list they contain). */ tree decl = COMPOUND_LITERAL_EXPR_DECL (inside_init); inside_init = DECL_INITIAL (decl); } if (code == ARRAY_TYPE && TREE_CODE (inside_init) != STRING_CST && TREE_CODE (inside_init) != CONSTRUCTOR) { error_init ("array initialized from non-constant array expression"); return error_mark_node; } if (optimize && TREE_CODE (inside_init) == VAR_DECL) inside_init = decl_constant_value_for_broken_optimization (inside_init); /* Compound expressions can only occur here if -pedantic or -pedantic-errors is specified. In the later case, we always want an error. In the former case, we simply want a warning. */ if (require_constant && pedantic && TREE_CODE (inside_init) == COMPOUND_EXPR) { inside_init = valid_compound_expr_initializer (inside_init, TREE_TYPE (inside_init)); if (inside_init == error_mark_node) error_init ("initializer element is not constant"); else pedwarn_init ("initializer element is not constant"); if (flag_pedantic_errors) inside_init = error_mark_node; } else if (require_constant && (!TREE_CONSTANT (inside_init) /* This test catches things like `7 / 0' which result in an expression for which TREE_CONSTANT is true, but which is not actually something that is a legal constant. We really should not be using this function, because it is a part of the back-end. Instead, the expression should already have been turned into ERROR_MARK_NODE. */ || !initializer_constant_valid_p (inside_init, TREE_TYPE (inside_init)))) { error_init ("initializer element is not constant"); inside_init = error_mark_node; } return inside_init; } /* Handle scalar types, including conversions. */ if (code == INTEGER_TYPE || code == REAL_TYPE || code == POINTER_TYPE || code == ENUMERAL_TYPE || code == BOOLEAN_TYPE || code == COMPLEX_TYPE || code == VECTOR_TYPE) { /* Note that convert_for_assignment calls default_conversion for arrays and functions. We must not call it in the case where inside_init is a null pointer constant. */ inside_init = convert_for_assignment (type, init, _("initialization"), NULL_TREE, NULL_TREE, 0); if (require_constant && ! TREE_CONSTANT (inside_init)) { error_init ("initializer element is not constant"); inside_init = error_mark_node; } else if (require_constant && initializer_constant_valid_p (inside_init, TREE_TYPE (inside_init)) == 0) { error_init ("initializer element is not computable at load time"); inside_init = error_mark_node; } return inside_init; } /* Come here only for records and arrays. */ if (COMPLETE_TYPE_P (type) && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST) { error_init ("variable-sized object may not be initialized"); return error_mark_node; } error_init ("invalid initializer"); return error_mark_node; } /* Handle initializers that use braces. */ /* Type of object we are accumulating a constructor for. This type is always a RECORD_TYPE, UNION_TYPE or ARRAY_TYPE. */ static tree constructor_type; /* For a RECORD_TYPE or UNION_TYPE, this is the chain of fields left to fill. */ static tree constructor_fields; /* For an ARRAY_TYPE, this is the specified index at which to store the next element we get. */ static tree constructor_index; /* For an ARRAY_TYPE, this is the maximum index. */ static tree constructor_max_index; /* For a RECORD_TYPE, this is the first field not yet written out. */ static tree constructor_unfilled_fields; /* For an ARRAY_TYPE, this is the index of the first element not yet written out. */ static tree constructor_unfilled_index; /* In a RECORD_TYPE, the byte index of the next consecutive field. This is so we can generate gaps between fields, when appropriate. */ static tree constructor_bit_index; /* If we are saving up the elements rather than allocating them, this is the list of elements so far (in reverse order, most recent first). */ static tree constructor_elements; /* 1 if constructor should be incrementally stored into a constructor chain, 0 if all the elements should be kept in AVL tree. */ static int constructor_incremental; /* 1 if so far this constructor's elements are all compile-time constants. */ static int constructor_constant; /* 1 if so far this constructor's elements are all valid address constants. */ static int constructor_simple; /* 1 if this constructor is erroneous so far. */ static int constructor_erroneous; /* Structure for managing pending initializer elements, organized as an AVL tree. */ struct init_node { struct init_node *left, *right; struct init_node *parent; int balance; tree purpose; tree value; }; /* Tree of pending elements at this constructor level. These are elements encountered out of order which belong at places we haven't reached yet in actually writing the output. Will never hold tree nodes across GC runs. */ static struct init_node *constructor_pending_elts; /* The SPELLING_DEPTH of this constructor. */ static int constructor_depth; /* 0 if implicitly pushing constructor levels is allowed. */ int constructor_no_implicit = 0; /* 0 for C; 1 for some other languages. */ /* DECL node for which an initializer is being read. 0 means we are reading a constructor expression such as (struct foo) {...}. */ static tree constructor_decl; /* start_init saves the ASMSPEC arg here for really_start_incremental_init. */ static const char *constructor_asmspec; /* Nonzero if this is an initializer for a top-level decl. */ static int constructor_top_level; /* Nonzero if there were any member designators in this initializer. */ static int constructor_designated; /* Nesting depth of designator list. */ static int designator_depth; /* Nonzero if there were diagnosed errors in this designator list. */ static int designator_errorneous; /* This stack has a level for each implicit or explicit level of structuring in the initializer, including the outermost one. It saves the values of most of the variables above. */ struct constructor_range_stack; struct constructor_stack { struct constructor_stack *next; tree type; tree fields; tree index; tree max_index; tree unfilled_index; tree unfilled_fields; tree bit_index; tree elements; struct init_node *pending_elts; int offset; int depth; /* If nonzero, this value should replace the entire constructor at this level. */ tree replacement_value; struct constructor_range_stack *range_stack; char constant; char simple; char implicit; char erroneous; char outer; char incremental; char designated; }; struct constructor_stack *constructor_stack; /* This stack represents designators from some range designator up to the last designator in the list. */ struct constructor_range_stack { struct constructor_range_stack *next, *prev; struct constructor_stack *stack; tree range_start; tree index; tree range_end; tree fields; }; struct constructor_range_stack *constructor_range_stack; /* This stack records separate initializers that are nested. Nested initializers can't happen in ANSI C, but GNU C allows them in cases like { ... (struct foo) { ... } ... }. */ struct initializer_stack { struct initializer_stack *next; tree decl; const char *asmspec; struct constructor_stack *constructor_stack; struct constructor_range_stack *constructor_range_stack; tree elements; struct spelling *spelling; struct spelling *spelling_base; int spelling_size; char top_level; char require_constant_value; char require_constant_elements; }; struct initializer_stack *initializer_stack; /* Prepare to parse and output the initializer for variable DECL. */ void start_init (tree decl, tree asmspec_tree, int top_level) { const char *locus; struct initializer_stack *p = xmalloc (sizeof (struct initializer_stack)); const char *asmspec = 0; if (asmspec_tree) asmspec = TREE_STRING_POINTER (asmspec_tree); p->decl = constructor_decl; p->asmspec = constructor_asmspec; p->require_constant_value = require_constant_value; p->require_constant_elements = require_constant_elements; p->constructor_stack = constructor_stack; p->constructor_range_stack = constructor_range_stack; p->elements = constructor_elements; p->spelling = spelling; p->spelling_base = spelling_base; p->spelling_size = spelling_size; p->top_level = constructor_top_level; p->next = initializer_stack; initializer_stack = p; constructor_decl = decl; constructor_asmspec = asmspec; constructor_designated = 0; constructor_top_level = top_level; if (decl != 0) { require_constant_value = TREE_STATIC (decl); require_constant_elements = ((TREE_STATIC (decl) || (pedantic && !flag_isoc99)) /* For a scalar, you can always use any value to initialize, even within braces. */ && (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE || TREE_CODE (TREE_TYPE (decl)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (decl)) == UNION_TYPE || TREE_CODE (TREE_TYPE (decl)) == QUAL_UNION_TYPE)); locus = IDENTIFIER_POINTER (DECL_NAME (decl)); } else { require_constant_value = 0; require_constant_elements = 0; locus = "(anonymous)"; } constructor_stack = 0; constructor_range_stack = 0; missing_braces_mentioned = 0; spelling_base = 0; spelling_size = 0; RESTORE_SPELLING_DEPTH (0); if (locus) push_string (locus); } void finish_init (void) { struct initializer_stack *p = initializer_stack; /* Free the whole constructor stack of this initializer. */ while (constructor_stack) { struct constructor_stack *q = constructor_stack; constructor_stack = q->next; free (q); } if (constructor_range_stack) abort (); /* Pop back to the data of the outer initializer (if any). */ free (spelling_base); constructor_decl = p->decl; constructor_asmspec = p->asmspec; require_constant_value = p->require_constant_value; require_constant_elements = p->require_constant_elements; constructor_stack = p->constructor_stack; constructor_range_stack = p->constructor_range_stack; constructor_elements = p->elements; spelling = p->spelling; spelling_base = p->spelling_base; spelling_size = p->spelling_size; constructor_top_level = p->top_level; initializer_stack = p->next; free (p); } /* Call here when we see the initializer is surrounded by braces. This is instead of a call to push_init_level; it is matched by a call to pop_init_level. TYPE is the type to initialize, for a constructor expression. For an initializer for a decl, TYPE is zero. */ void really_start_incremental_init (tree type) { struct constructor_stack *p = xmalloc (sizeof (struct constructor_stack)); if (type == 0) type = TREE_TYPE (constructor_decl); if (targetm.vector_opaque_p (type)) error ("opaque vector types cannot be initialized"); p->type = constructor_type; p->fields = constructor_fields; p->index = constructor_index; p->max_index = constructor_max_index; p->unfilled_index = constructor_unfilled_index; p->unfilled_fields = constructor_unfilled_fields; p->bit_index = constructor_bit_index; p->elements = constructor_elements; p->constant = constructor_constant; p->simple = constructor_simple; p->erroneous = constructor_erroneous; p->pending_elts = constructor_pending_elts; p->depth = constructor_depth; p->replacement_value = 0; p->implicit = 0; p->range_stack = 0; p->outer = 0; p->incremental = constructor_incremental; p->designated = constructor_designated; p->next = 0; constructor_stack = p; constructor_constant = 1; constructor_simple = 1; constructor_depth = SPELLING_DEPTH (); constructor_elements = 0; constructor_pending_elts = 0; constructor_type = type; constructor_incremental = 1; constructor_designated = 0; designator_depth = 0; designator_errorneous = 0; if (TREE_CODE (constructor_type) == RECORD_TYPE || TREE_CODE (constructor_type) == UNION_TYPE) { constructor_fields = TYPE_FIELDS (constructor_type); /* Skip any nameless bit fields at the beginning. */ while (constructor_fields != 0 && DECL_C_BIT_FIELD (constructor_fields) && DECL_NAME (constructor_fields) == 0) constructor_fields = TREE_CHAIN (constructor_fields); constructor_unfilled_fields = constructor_fields; constructor_bit_index = bitsize_zero_node; } else if (TREE_CODE (constructor_type) == ARRAY_TYPE) { if (TYPE_DOMAIN (constructor_type)) { constructor_max_index = TYPE_MAX_VALUE (TYPE_DOMAIN (constructor_type)); /* Detect non-empty initializations of zero-length arrays. */ if (constructor_max_index == NULL_TREE && TYPE_SIZE (constructor_type)) constructor_max_index = build_int_2 (-1, -1); /* constructor_max_index needs to be an INTEGER_CST. Attempts to initialize VLAs will cause a proper error; avoid tree checking errors as well by setting a safe value. */ if (constructor_max_index && TREE_CODE (constructor_max_index) != INTEGER_CST) constructor_max_index = build_int_2 (-1, -1); constructor_index = convert (bitsizetype, TYPE_MIN_VALUE (TYPE_DOMAIN (constructor_type))); } else constructor_index = bitsize_zero_node; constructor_unfilled_index = constructor_index; } else if (TREE_CODE (constructor_type) == VECTOR_TYPE) { /* Vectors are like simple fixed-size arrays. */ constructor_max_index = build_int_2 (TYPE_VECTOR_SUBPARTS (constructor_type) - 1, 0); constructor_index = convert (bitsizetype, bitsize_zero_node); constructor_unfilled_index = constructor_index; } else { /* Handle the case of int x = {5}; */ constructor_fields = constructor_type; constructor_unfilled_fields = constructor_type; } } /* Push down into a subobject, for initialization. If this is for an explicit set of braces, IMPLICIT is 0. If it is because the next element belongs at a lower level, IMPLICIT is 1 (or 2 if the push is because of designator list). */ void push_init_level (int implicit) { struct constructor_stack *p; tree value = NULL_TREE; /* If we've exhausted any levels that didn't have braces, pop them now. */ while (constructor_stack->implicit) { if ((TREE_CODE (constructor_type) == RECORD_TYPE || TREE_CODE (constructor_type) == UNION_TYPE) && constructor_fields == 0) process_init_element (pop_init_level (1)); else if (TREE_CODE (constructor_type) == ARRAY_TYPE && constructor_max_index && tree_int_cst_lt (constructor_max_index, constructor_index)) process_init_element (pop_init_level (1)); else break; } /* Unless this is an explicit brace, we need to preserve previous content if any. */ if (implicit) { if ((TREE_CODE (constructor_type) == RECORD_TYPE || TREE_CODE (constructor_type) == UNION_TYPE) && constructor_fields) value = find_init_member (constructor_fields); else if (TREE_CODE (constructor_type) == ARRAY_TYPE) value = find_init_member (constructor_index); } p = xmalloc (sizeof (struct constructor_stack)); p->type = constructor_type; p->fields = constructor_fields; p->index = constructor_index; p->max_index = constructor_max_index; p->unfilled_index = constructor_unfilled_index; p->unfilled_fields = constructor_unfilled_fields; p->bit_index = constructor_bit_index; p->elements = constructor_elements; p->constant = constructor_constant; p->simple = constructor_simple; p->erroneous = constructor_erroneous; p->pending_elts = constructor_pending_elts; p->depth = constructor_depth; p->replacement_value = 0; p->implicit = implicit; p->outer = 0; p->incremental = constructor_incremental; p->designated = constructor_designated; p->next = constructor_stack; p->range_stack = 0; constructor_stack = p; constructor_constant = 1; constructor_simple = 1; constructor_depth = SPELLING_DEPTH (); constructor_elements = 0; constructor_incremental = 1; constructor_designated = 0; constructor_pending_elts = 0; if (!implicit) { p->range_stack = constructor_range_stack; constructor_range_stack = 0; designator_depth = 0; designator_errorneous = 0; } /* Don't die if an entire brace-pair level is superfluous in the containing level. */ if (constructor_type == 0) ; else if (TREE_CODE (constructor_type) == RECORD_TYPE || TREE_CODE (constructor_type) == UNION_TYPE) { /* Don't die if there are extra init elts at the end. */ if (constructor_fields == 0) constructor_type = 0; else { constructor_type = TREE_TYPE (constructor_fields); push_member_name (constructor_fields); constructor_depth++; } } else if (TREE_CODE (constructor_type) == ARRAY_TYPE) { constructor_type = TREE_TYPE (constructor_type); push_array_bounds (tree_low_cst (constructor_index, 0)); constructor_depth++; } if (constructor_type == 0) { error_init ("extra brace group at end of initializer"); constructor_fields = 0; constructor_unfilled_fields = 0; return; } if (value && TREE_CODE (value) == CONSTRUCTOR) { constructor_constant = TREE_CONSTANT (value); constructor_simple = TREE_STATIC (value); constructor_elements = CONSTRUCTOR_ELTS (value); if (constructor_elements && (TREE_CODE (constructor_type) == RECORD_TYPE || TREE_CODE (constructor_type) == ARRAY_TYPE)) set_nonincremental_init (); } if (implicit == 1 && warn_missing_braces && !missing_braces_mentioned) { missing_braces_mentioned = 1; warning_init ("missing braces around initializer"); } if (TREE_CODE (constructor_type) == RECORD_TYPE || TREE_CODE (constructor_type) == UNION_TYPE) { constructor_fields = TYPE_FIELDS (constructor_type); /* Skip any nameless bit fields at the beginning. */ while (constructor_fields != 0 && DECL_C_BIT_FIELD (constructor_fields) && DECL_NAME (constructor_fields) == 0) constructor_fields = TREE_CHAIN (constructor_fields); constructor_unfilled_fields = constructor_fields; constructor_bit_index = bitsize_zero_node; } else if (TREE_CODE (constructor_type) == VECTOR_TYPE) { /* Vectors are like simple fixed-size arrays. */ constructor_max_index = build_int_2 (TYPE_VECTOR_SUBPARTS (constructor_type) - 1, 0); constructor_index = convert (bitsizetype, integer_zero_node); constructor_unfilled_index = constructor_index; } else if (TREE_CODE (constructor_type) == ARRAY_TYPE) { if (TYPE_DOMAIN (constructor_type)) { constructor_max_index = TYPE_MAX_VALUE (TYPE_DOMAIN (constructor_type)); /* Detect non-empty initializations of zero-length arrays. */ if (constructor_max_index == NULL_TREE && TYPE_SIZE (constructor_type)) constructor_max_index = build_int_2 (-1, -1); /* constructor_max_index needs to be an INTEGER_CST. Attempts to initialize VLAs will cause a proper error; avoid tree checking errors as well by setting a safe value. */ if (constructor_max_index && TREE_CODE (constructor_max_index) != INTEGER_CST) constructor_max_index = build_int_2 (-1, -1); constructor_index = convert (bitsizetype, TYPE_MIN_VALUE (TYPE_DOMAIN (constructor_type))); } else constructor_index = bitsize_zero_node; constructor_unfilled_index = constructor_index; if (value && TREE_CODE (value) == STRING_CST) { /* We need to split the char/wchar array into individual characters, so that we don't have to special case it everywhere. */ set_nonincremental_init_from_string (value); } } else { warning_init ("braces around scalar initializer"); constructor_fields = constructor_type; constructor_unfilled_fields = constructor_type; } } /* At the end of an implicit or explicit brace level, finish up that level of constructor. If we were outputting the elements as they are read, return 0 from inner levels (process_init_element ignores that), but return error_mark_node from the outermost level (that's what we want to put in DECL_INITIAL). Otherwise, return a CONSTRUCTOR expression. */ tree pop_init_level (int implicit) { struct constructor_stack *p; tree constructor = 0; if (implicit == 0) { /* When we come to an explicit close brace, pop any inner levels that didn't have explicit braces. */ while (constructor_stack->implicit) process_init_element (pop_init_level (1)); if (constructor_range_stack) abort (); } /* Now output all pending elements. */ constructor_incremental = 1; output_pending_init_elements (1); p = constructor_stack; /* Error for initializing a flexible array member, or a zero-length array member in an inappropriate context. */ if (constructor_type && constructor_fields && TREE_CODE (constructor_type) == ARRAY_TYPE && TYPE_DOMAIN (constructor_type) && ! TYPE_MAX_VALUE (TYPE_DOMAIN (constructor_type))) { /* Silently discard empty initializations. The parser will already have pedwarned for empty brackets. */ if (integer_zerop (constructor_unfilled_index)) constructor_type = NULL_TREE; else if (! TYPE_SIZE (constructor_type)) { if (constructor_depth > 2) error_init ("initialization of flexible array member in a nested context"); else if (pedantic) pedwarn_init ("initialization of a flexible array member"); /* We have already issued an error message for the existence of a flexible array member not at the end of the structure. Discard the initializer so that we do not abort later. */ if (TREE_CHAIN (constructor_fields) != NULL_TREE) constructor_type = NULL_TREE; } else /* Zero-length arrays are no longer special, so we should no longer get here. */ abort (); } /* Warn when some struct elements are implicitly initialized to zero. */ if (extra_warnings && constructor_type && TREE_CODE (constructor_type) == RECORD_TYPE && constructor_unfilled_fields) { /* Do not warn for flexible array members or zero-length arrays. */ while (constructor_unfilled_fields && (! DECL_SIZE (constructor_unfilled_fields) || integer_zerop (DECL_SIZE (constructor_unfilled_fields)))) constructor_unfilled_fields = TREE_CHAIN (constructor_unfilled_fields); /* Do not warn if this level of the initializer uses member designators; it is likely to be deliberate. */ if (constructor_unfilled_fields && !constructor_designated) { push_member_name (constructor_unfilled_fields); warning_init ("missing initializer"); RESTORE_SPELLING_DEPTH (constructor_depth); } } /* Pad out the end of the structure. */ if (p->replacement_value) /* If this closes a superfluous brace pair, just pass out the element between them. */ constructor = p->replacement_value; else if (constructor_type == 0) ; else if (TREE_CODE (constructor_type) != RECORD_TYPE && TREE_CODE (constructor_type) != UNION_TYPE && TREE_CODE (constructor_type) != ARRAY_TYPE && TREE_CODE (constructor_type) != VECTOR_TYPE) { /* A nonincremental scalar initializer--just return the element, after verifying there is just one. */ if (constructor_elements == 0) { if (!constructor_erroneous) error_init ("empty scalar initializer"); constructor = error_mark_node; } else if (TREE_CHAIN (constructor_elements) != 0) { error_init ("extra elements in scalar initializer"); constructor = TREE_VALUE (constructor_elements); } else constructor = TREE_VALUE (constructor_elements); } else { if (constructor_erroneous) constructor = error_mark_node; else { constructor = build_constructor (constructor_type, nreverse (constructor_elements)); if (constructor_constant) TREE_CONSTANT (constructor) = TREE_INVARIANT (constructor) = 1; if (constructor_constant && constructor_simple) TREE_STATIC (constructor) = 1; } } constructor_type = p->type; constructor_fields = p->fields; constructor_index = p->index; constructor_max_index = p->max_index; constructor_unfilled_index = p->unfilled_index; constructor_unfilled_fields = p->unfilled_fields; constructor_bit_index = p->bit_index; constructor_elements = p->elements; constructor_constant = p->constant; constructor_simple = p->simple; constructor_erroneous = p->erroneous; constructor_incremental = p->incremental; constructor_designated = p->designated; constructor_pending_elts = p->pending_elts; constructor_depth = p->depth; if (!p->implicit) constructor_range_stack = p->range_stack; RESTORE_SPELLING_DEPTH (constructor_depth); constructor_stack = p->next; free (p); if (constructor == 0) { if (constructor_stack == 0) return error_mark_node; return NULL_TREE; } return constructor; } /* Common handling for both array range and field name designators. ARRAY argument is nonzero for array ranges. Returns zero for success. */ static int set_designator (int array) { tree subtype; enum tree_code subcode; /* Don't die if an entire brace-pair level is superfluous in the containing level. */ if (constructor_type == 0) return 1; /* If there were errors in this designator list already, bail out silently. */ if (designator_errorneous) return 1; if (!designator_depth) { if (constructor_range_stack) abort (); /* Designator list starts at the level of closest explicit braces. */ while (constructor_stack->implicit) process_init_element (pop_init_level (1)); constructor_designated = 1; return 0; } if (constructor_no_implicit) { error_init ("initialization designators may not nest"); return 1; } if (TREE_CODE (constructor_type) == RECORD_TYPE || TREE_CODE (constructor_type) == UNION_TYPE) { subtype = TREE_TYPE (constructor_fields); if (subtype != error_mark_node) subtype = TYPE_MAIN_VARIANT (subtype); } else if (TREE_CODE (constructor_type) == ARRAY_TYPE) { subtype = TYPE_MAIN_VARIANT (TREE_TYPE (constructor_type)); } else abort (); subcode = TREE_CODE (subtype); if (array && subcode != ARRAY_TYPE) { error_init ("array index in non-array initializer"); return 1; } else if (!array && subcode != RECORD_TYPE && subcode != UNION_TYPE) { error_init ("field name not in record or union initializer"); return 1; } constructor_designated = 1; push_init_level (2); return 0; } /* If there are range designators in designator list, push a new designator to constructor_range_stack. RANGE_END is end of such stack range or NULL_TREE if there is no range designator at this level. */ static void push_range_stack (tree range_end) { struct constructor_range_stack *p; p = ggc_alloc (sizeof (struct constructor_range_stack)); p->prev = constructor_range_stack; p->next = 0; p->fields = constructor_fields; p->range_start = constructor_index; p->index = constructor_index; p->stack = constructor_stack; p->range_end = range_end; if (constructor_range_stack) constructor_range_stack->next = p; constructor_range_stack = p; } /* Within an array initializer, specify the next index to be initialized. FIRST is that index. If LAST is nonzero, then initialize a range of indices, running from FIRST through LAST. */ void set_init_index (tree first, tree last) { if (set_designator (1)) return; designator_errorneous = 1; while ((TREE_CODE (first) == NOP_EXPR || TREE_CODE (first) == CONVERT_EXPR || TREE_CODE (first) == NON_LVALUE_EXPR) && (TYPE_MODE (TREE_TYPE (first)) == TYPE_MODE (TREE_TYPE (TREE_OPERAND (first, 0))))) first = TREE_OPERAND (first, 0); if (last) while ((TREE_CODE (last) == NOP_EXPR || TREE_CODE (last) == CONVERT_EXPR || TREE_CODE (last) == NON_LVALUE_EXPR) && (TYPE_MODE (TREE_TYPE (last)) == TYPE_MODE (TREE_TYPE (TREE_OPERAND (last, 0))))) last = TREE_OPERAND (last, 0); if (TREE_CODE (first) != INTEGER_CST) error_init ("nonconstant array index in initializer"); else if (last != 0 && TREE_CODE (last) != INTEGER_CST) error_init ("nonconstant array index in initializer"); else if (TREE_CODE (constructor_type) != ARRAY_TYPE) error_init ("array index in non-array initializer"); else if (tree_int_cst_sgn (first) == -1) error_init ("array index in initializer exceeds array bounds"); else if (constructor_max_index && tree_int_cst_lt (constructor_max_index, first)) error_init ("array index in initializer exceeds array bounds"); else { constructor_index = convert (bitsizetype, first); if (last) { if (tree_int_cst_equal (first, last)) last = 0; else if (tree_int_cst_lt (last, first)) { error_init ("empty index range in initializer"); last = 0; } else { last = convert (bitsizetype, last); if (constructor_max_index != 0 && tree_int_cst_lt (constructor_max_index, last)) { error_init ("array index range in initializer exceeds array bounds"); last = 0; } } } designator_depth++; designator_errorneous = 0; if (constructor_range_stack || last) push_range_stack (last); } } /* Within a struct initializer, specify the next field to be initialized. */ void set_init_label (tree fieldname) { tree tail; if (set_designator (0)) return; designator_errorneous = 1; if (TREE_CODE (constructor_type) != RECORD_TYPE && TREE_CODE (constructor_type) != UNION_TYPE) { error_init ("field name not in record or union initializer"); return; } for (tail = TYPE_FIELDS (constructor_type); tail; tail = TREE_CHAIN (tail)) { if (DECL_NAME (tail) == fieldname) break; } if (tail == 0) error ("unknown field `%s' specified in initializer", IDENTIFIER_POINTER (fieldname)); else { constructor_fields = tail; designator_depth++; designator_errorneous = 0; if (constructor_range_stack) push_range_stack (NULL_TREE); } } /* Add a new initializer to the tree of pending initializers. PURPOSE identifies the initializer, either array index or field in a structure. VALUE is the value of that index or field. */ static void add_pending_init (tree purpose, tree value) { struct init_node *p, **q, *r; q = &constructor_pending_elts; p = 0; if (TREE_CODE (constructor_type) == ARRAY_TYPE) { while (*q != 0) { p = *q; if (tree_int_cst_lt (purpose, p->purpose)) q = &p->left; else if (tree_int_cst_lt (p->purpose, purpose)) q = &p->right; else { if (TREE_SIDE_EFFECTS (p->value)) warning_init ("initialized field with side-effects overwritten"); p->value = value; return; } } } else { tree bitpos; bitpos = bit_position (purpose); while (*q != NULL) { p = *q; if (tree_int_cst_lt (bitpos, bit_position (p->purpose))) q = &p->left; else if (p->purpose != purpose) q = &p->right; else { if (TREE_SIDE_EFFECTS (p->value)) warning_init ("initialized field with side-effects overwritten"); p->value = value; return; } } } r = ggc_alloc (sizeof (struct init_node)); r->purpose = purpose; r->value = value; *q = r; r->parent = p; r->left = 0; r->right = 0; r->balance = 0; while (p) { struct init_node *s; if (r == p->left) { if (p->balance == 0) p->balance = -1; else if (p->balance < 0) { if (r->balance < 0) { /* L rotation. */ p->left = r->right; if (p->left) p->left->parent = p; r->right = p; p->balance = 0; r->balance = 0; s = p->parent; p->parent = r; r->parent = s; if (s) { if (s->left == p) s->left = r; else s->right = r; } else constructor_pending_elts = r; } else { /* LR rotation. */ struct init_node *t = r->right; r->right = t->left; if (r->right) r->right->parent = r; t->left = r; p->left = t->right; if (p->left) p->left->parent = p; t->right = p; p->balance = t->balance < 0; r->balance = -(t->balance > 0); t->balance = 0; s = p->parent; p->parent = t; r->parent = t; t->parent = s; if (s) { if (s->left == p) s->left = t; else s->right = t; } else constructor_pending_elts = t; } break; } else { /* p->balance == +1; growth of left side balances the node. */ p->balance = 0; break; } } else /* r == p->right */ { if (p->balance == 0) /* Growth propagation from right side. */ p->balance++; else if (p->balance > 0) { if (r->balance > 0) { /* R rotation. */ p->right = r->left; if (p->right) p->right->parent = p; r->left = p; p->balance = 0; r->balance = 0; s = p->parent; p->parent = r; r->parent = s; if (s) { if (s->left == p) s->left = r; else s->right = r; } else constructor_pending_elts = r; } else /* r->balance == -1 */ { /* RL rotation */ struct init_node *t = r->left; r->left = t->right; if (r->left) r->left->parent = r; t->right = r; p->right = t->left; if (p->right) p->right->parent = p; t->left = p; r->balance = (t->balance < 0); p->balance = -(t->balance > 0); t->balance = 0; s = p->parent; p->parent = t; r->parent = t; t->parent = s; if (s) { if (s->left == p) s->left = t; else s->right = t; } else constructor_pending_elts = t; } break; } else { /* p->balance == -1; growth of right side balances the node. */ p->balance = 0; break; } } r = p; p = p->parent; } } /* Build AVL tree from a sorted chain. */ static void set_nonincremental_init (void) { tree chain; if (TREE_CODE (constructor_type) != RECORD_TYPE && TREE_CODE (constructor_type) != ARRAY_TYPE) return; for (chain = constructor_elements; chain; chain = TREE_CHAIN (chain)) add_pending_init (TREE_PURPOSE (chain), TREE_VALUE (chain)); constructor_elements = 0; if (TREE_CODE (constructor_type) == RECORD_TYPE) { constructor_unfilled_fields = TYPE_FIELDS (constructor_type); /* Skip any nameless bit fields at the beginning. */ while (constructor_unfilled_fields != 0 && DECL_C_BIT_FIELD (constructor_unfilled_fields) && DECL_NAME (constructor_unfilled_fields) == 0) constructor_unfilled_fields = TREE_CHAIN (constructor_unfilled_fields); } else if (TREE_CODE (constructor_type) == ARRAY_TYPE) { if (TYPE_DOMAIN (constructor_type)) constructor_unfilled_index = convert (bitsizetype, TYPE_MIN_VALUE (TYPE_DOMAIN (constructor_type))); else constructor_unfilled_index = bitsize_zero_node; } constructor_incremental = 0; } /* Build AVL tree from a string constant. */ static void set_nonincremental_init_from_string (tree str) { tree value, purpose, type; HOST_WIDE_INT val[2]; const char *p, *end; int byte, wchar_bytes, charwidth, bitpos; if (TREE_CODE (constructor_type) != ARRAY_TYPE) abort (); if (TYPE_PRECISION (TREE_TYPE (TREE_TYPE (str))) == TYPE_PRECISION (char_type_node)) wchar_bytes = 1; else if (TYPE_PRECISION (TREE_TYPE (TREE_TYPE (str))) == TYPE_PRECISION (wchar_type_node)) wchar_bytes = TYPE_PRECISION (wchar_type_node) / BITS_PER_UNIT; else abort (); charwidth = TYPE_PRECISION (char_type_node); type = TREE_TYPE (constructor_type); p = TREE_STRING_POINTER (str); end = p + TREE_STRING_LENGTH (str); for (purpose = bitsize_zero_node; p < end && !tree_int_cst_lt (constructor_max_index, purpose); purpose = size_binop (PLUS_EXPR, purpose, bitsize_one_node)) { if (wchar_bytes == 1) { val[1] = (unsigned char) *p++; val[0] = 0; } else { val[0] = 0; val[1] = 0; for (byte = 0; byte < wchar_bytes; byte++) { if (BYTES_BIG_ENDIAN) bitpos = (wchar_bytes - byte - 1) * charwidth; else bitpos = byte * charwidth; val[bitpos < HOST_BITS_PER_WIDE_INT] |= ((unsigned HOST_WIDE_INT) ((unsigned char) *p++)) << (bitpos % HOST_BITS_PER_WIDE_INT); } } if (!TYPE_UNSIGNED (type)) { bitpos = ((wchar_bytes - 1) * charwidth) + HOST_BITS_PER_CHAR; if (bitpos < HOST_BITS_PER_WIDE_INT) { if (val[1] & (((HOST_WIDE_INT) 1) << (bitpos - 1))) { val[1] |= ((HOST_WIDE_INT) -1) << bitpos; val[0] = -1; } } else if (bitpos == HOST_BITS_PER_WIDE_INT) { if (val[1] < 0) val[0] = -1; } else if (val[0] & (((HOST_WIDE_INT) 1) << (bitpos - 1 - HOST_BITS_PER_WIDE_INT))) val[0] |= ((HOST_WIDE_INT) -1) << (bitpos - HOST_BITS_PER_WIDE_INT); } value = build_int_2 (val[1], val[0]); TREE_TYPE (value) = type; add_pending_init (purpose, value); } constructor_incremental = 0; } /* Return value of FIELD in pending initializer or zero if the field was not initialized yet. */ static tree find_init_member (tree field) { struct init_node *p; if (TREE_CODE (constructor_type) == ARRAY_TYPE) { if (constructor_incremental && tree_int_cst_lt (field, constructor_unfilled_index)) set_nonincremental_init (); p = constructor_pending_elts; while (p) { if (tree_int_cst_lt (field, p->purpose)) p = p->left; else if (tree_int_cst_lt (p->purpose, field)) p = p->right; else return p->value; } } else if (TREE_CODE (constructor_type) == RECORD_TYPE) { tree bitpos = bit_position (field); if (constructor_incremental && (!constructor_unfilled_fields || tree_int_cst_lt (bitpos, bit_position (constructor_unfilled_fields)))) set_nonincremental_init (); p = constructor_pending_elts; while (p) { if (field == p->purpose) return p->value; else if (tree_int_cst_lt (bitpos, bit_position (p->purpose))) p = p->left; else p = p->right; } } else if (TREE_CODE (constructor_type) == UNION_TYPE) { if (constructor_elements && TREE_PURPOSE (constructor_elements) == field) return TREE_VALUE (constructor_elements); } return 0; } /* "Output" the next constructor element. At top level, really output it to assembler code now. Otherwise, collect it in a list from which we will make a CONSTRUCTOR. TYPE is the data type that the containing data type wants here. FIELD is the field (a FIELD_DECL) or the index that this element fills. PENDING if non-nil means output pending elements that belong right after this element. (PENDING is normally 1; it is 0 while outputting pending elements, to avoid recursion.) */ static void output_init_element (tree value, tree type, tree field, int pending) { if (type == error_mark_node) { constructor_erroneous = 1; return; } if (TREE_CODE (TREE_TYPE (value)) == FUNCTION_TYPE || (TREE_CODE (TREE_TYPE (value)) == ARRAY_TYPE && !(TREE_CODE (value) == STRING_CST && TREE_CODE (type) == ARRAY_TYPE && TREE_CODE (TREE_TYPE (type)) == INTEGER_TYPE) && !comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (value)), TYPE_MAIN_VARIANT (type)))) value = default_conversion (value); if (TREE_CODE (value) == COMPOUND_LITERAL_EXPR && require_constant_value && !flag_isoc99 && pending) { /* As an extension, allow initializing objects with static storage duration with compound literals (which are then treated just as the brace enclosed list they contain). */ tree decl = COMPOUND_LITERAL_EXPR_DECL (value); value = DECL_INITIAL (decl); } if (value == error_mark_node) constructor_erroneous = 1; else if (!TREE_CONSTANT (value)) constructor_constant = 0; else if (initializer_constant_valid_p (value, TREE_TYPE (value)) == 0 || ((TREE_CODE (constructor_type) == RECORD_TYPE || TREE_CODE (constructor_type) == UNION_TYPE) && DECL_C_BIT_FIELD (field) && TREE_CODE (value) != INTEGER_CST)) constructor_simple = 0; if (require_constant_value && ! TREE_CONSTANT (value)) { error_init ("initializer element is not constant"); value = error_mark_node; } else if (require_constant_elements && initializer_constant_valid_p (value, TREE_TYPE (value)) == 0) pedwarn ("initializer element is not computable at load time"); /* If this field is empty (and not at the end of structure), don't do anything other than checking the initializer. */ if (field && (TREE_TYPE (field) == error_mark_node || (COMPLETE_TYPE_P (TREE_TYPE (field)) && integer_zerop (TYPE_SIZE (TREE_TYPE (field))) && (TREE_CODE (constructor_type) == ARRAY_TYPE || TREE_CHAIN (field))))) return; value = digest_init (type, value, require_constant_value); if (value == error_mark_node) { constructor_erroneous = 1; return; } /* If this element doesn't come next in sequence, put it on constructor_pending_elts. */ if (TREE_CODE (constructor_type) == ARRAY_TYPE && (!constructor_incremental || !tree_int_cst_equal (field, constructor_unfilled_index))) { if (constructor_incremental && tree_int_cst_lt (field, constructor_unfilled_index)) set_nonincremental_init (); add_pending_init (field, value); return; } else if (TREE_CODE (constructor_type) == RECORD_TYPE && (!constructor_incremental || field != constructor_unfilled_fields)) { /* We do this for records but not for unions. In a union, no matter which field is specified, it can be initialized right away since it starts at the beginning of the union. */ if (constructor_incremental) { if (!constructor_unfilled_fields) set_nonincremental_init (); else { tree bitpos, unfillpos; bitpos = bit_position (field); unfillpos = bit_position (constructor_unfilled_fields); if (tree_int_cst_lt (bitpos, unfillpos)) set_nonincremental_init (); } } add_pending_init (field, value); return; } else if (TREE_CODE (constructor_type) == UNION_TYPE && constructor_elements) { if (TREE_SIDE_EFFECTS (TREE_VALUE (constructor_elements))) warning_init ("initialized field with side-effects overwritten"); /* We can have just one union field set. */ constructor_elements = 0; } /* Otherwise, output this element either to constructor_elements or to the assembler file. */ if (field && TREE_CODE (field) == INTEGER_CST) field = copy_node (field); constructor_elements = tree_cons (field, value, constructor_elements); /* Advance the variable that indicates sequential elements output. */ if (TREE_CODE (constructor_type) == ARRAY_TYPE) constructor_unfilled_index = size_binop (PLUS_EXPR, constructor_unfilled_index, bitsize_one_node); else if (TREE_CODE (constructor_type) == RECORD_TYPE) { constructor_unfilled_fields = TREE_CHAIN (constructor_unfilled_fields); /* Skip any nameless bit fields. */ while (constructor_unfilled_fields != 0 && DECL_C_BIT_FIELD (constructor_unfilled_fields) && DECL_NAME (constructor_unfilled_fields) == 0) constructor_unfilled_fields = TREE_CHAIN (constructor_unfilled_fields); } else if (TREE_CODE (constructor_type) == UNION_TYPE) constructor_unfilled_fields = 0; /* Now output any pending elements which have become next. */ if (pending) output_pending_init_elements (0); } /* Output any pending elements which have become next. As we output elements, constructor_unfilled_{fields,index} advances, which may cause other elements to become next; if so, they too are output. If ALL is 0, we return when there are no more pending elements to output now. If ALL is 1, we output space as necessary so that we can output all the pending elements. */ static void output_pending_init_elements (int all) { struct init_node *elt = constructor_pending_elts; tree next; retry: /* Look through the whole pending tree. If we find an element that should be output now, output it. Otherwise, set NEXT to the element that comes first among those still pending. */ next = 0; while (elt) { if (TREE_CODE (constructor_type) == ARRAY_TYPE) { if (tree_int_cst_equal (elt->purpose, constructor_unfilled_index)) output_init_element (elt->value, TREE_TYPE (constructor_type), constructor_unfilled_index, 0); else if (tree_int_cst_lt (constructor_unfilled_index, elt->purpose)) { /* Advance to the next smaller node. */ if (elt->left) elt = elt->left; else { /* We have reached the smallest node bigger than the current unfilled index. Fill the space first. */ next = elt->purpose; break; } } else { /* Advance to the next bigger node. */ if (elt->right) elt = elt->right; else { /* We have reached the biggest node in a subtree. Find the parent of it, which is the next bigger node. */ while (elt->parent && elt->parent->right == elt) elt = elt->parent; elt = elt->parent; if (elt && tree_int_cst_lt (constructor_unfilled_index, elt->purpose)) { next = elt->purpose; break; } } } } else if (TREE_CODE (constructor_type) == RECORD_TYPE || TREE_CODE (constructor_type) == UNION_TYPE) { tree ctor_unfilled_bitpos, elt_bitpos; /* If the current record is complete we are done. */ if (constructor_unfilled_fields == 0) break; ctor_unfilled_bitpos = bit_position (constructor_unfilled_fields); elt_bitpos = bit_position (elt->purpose); /* We can't compare fields here because there might be empty fields in between. */ if (tree_int_cst_equal (elt_bitpos, ctor_unfilled_bitpos)) { constructor_unfilled_fields = elt->purpose; output_init_element (elt->value, TREE_TYPE (elt->purpose), elt->purpose, 0); } else if (tree_int_cst_lt (ctor_unfilled_bitpos, elt_bitpos)) { /* Advance to the next smaller node. */ if (elt->left) elt = elt->left; else { /* We have reached the smallest node bigger than the current unfilled field. Fill the space first. */ next = elt->purpose; break; } } else { /* Advance to the next bigger node. */ if (elt->right) elt = elt->right; else { /* We have reached the biggest node in a subtree. Find the parent of it, which is the next bigger node. */ while (elt->parent && elt->parent->right == elt) elt = elt->parent; elt = elt->parent; if (elt && (tree_int_cst_lt (ctor_unfilled_bitpos, bit_position (elt->purpose)))) { next = elt->purpose; break; } } } } } /* Ordinarily return, but not if we want to output all and there are elements left. */ if (! (all && next != 0)) return; /* If it's not incremental, just skip over the gap, so that after jumping to retry we will output the next successive element. */ if (TREE_CODE (constructor_type) == RECORD_TYPE || TREE_CODE (constructor_type) == UNION_TYPE) constructor_unfilled_fields = next; else if (TREE_CODE (constructor_type) == ARRAY_TYPE) constructor_unfilled_index = next; /* ELT now points to the node in the pending tree with the next initializer to output. */ goto retry; } /* Add one non-braced element to the current constructor level. This adjusts the current position within the constructor's type. This may also start or terminate implicit levels to handle a partly-braced initializer. Once this has found the correct level for the new element, it calls output_init_element. */ void process_init_element (tree value) { tree orig_value = value; int string_flag = value != 0 && TREE_CODE (value) == STRING_CST; designator_depth = 0; designator_errorneous = 0; /* Handle superfluous braces around string cst as in char x[] = {"foo"}; */ if (string_flag && constructor_type && TREE_CODE (constructor_type) == ARRAY_TYPE && TREE_CODE (TREE_TYPE (constructor_type)) == INTEGER_TYPE && integer_zerop (constructor_unfilled_index)) { if (constructor_stack->replacement_value) error_init ("excess elements in char array initializer"); constructor_stack->replacement_value = value; return; } if (constructor_stack->replacement_value != 0) { error_init ("excess elements in struct initializer"); return; } /* Ignore elements of a brace group if it is entirely superfluous and has already been diagnosed. */ if (constructor_type == 0) return; /* If we've exhausted any levels that didn't have braces, pop them now. */ while (constructor_stack->implicit) { if ((TREE_CODE (constructor_type) == RECORD_TYPE || TREE_CODE (constructor_type) == UNION_TYPE) && constructor_fields == 0) process_init_element (pop_init_level (1)); else if (TREE_CODE (constructor_type) == ARRAY_TYPE && (constructor_max_index == 0 || tree_int_cst_lt (constructor_max_index, constructor_index))) process_init_element (pop_init_level (1)); else break; } /* In the case of [LO ... HI] = VALUE, only evaluate VALUE once. */ if (constructor_range_stack) { /* If value is a compound literal and we'll be just using its content, don't put it into a SAVE_EXPR. */ if (TREE_CODE (value) != COMPOUND_LITERAL_EXPR || !require_constant_value || flag_isoc99) value = save_expr (value); } while (1) { if (TREE_CODE (constructor_type) == RECORD_TYPE) { tree fieldtype; enum tree_code fieldcode; if (constructor_fields == 0) { pedwarn_init ("excess elements in struct initializer"); break; } fieldtype = TREE_TYPE (constructor_fields); if (fieldtype != error_mark_node) fieldtype = TYPE_MAIN_VARIANT (fieldtype); fieldcode = TREE_CODE (fieldtype); /* Error for non-static initialization of a flexible array member. */ if (fieldcode == ARRAY_TYPE && !require_constant_value && TYPE_SIZE (fieldtype) == NULL_TREE && TREE_CHAIN (constructor_fields) == NULL_TREE) { error_init ("non-static initialization of a flexible array member"); break; } /* Accept a string constant to initialize a subarray. */ if (value != 0 && fieldcode == ARRAY_TYPE && TREE_CODE (TREE_TYPE (fieldtype)) == INTEGER_TYPE && string_flag) value = orig_value; /* Otherwise, if we have come to a subaggregate, and we don't have an element of its type, push into it. */ else if (value != 0 && !constructor_no_implicit && value != error_mark_node && TYPE_MAIN_VARIANT (TREE_TYPE (value)) != fieldtype && (fieldcode == RECORD_TYPE || fieldcode == ARRAY_TYPE || fieldcode == UNION_TYPE)) { push_init_level (1); continue; } if (value) { push_member_name (constructor_fields); output_init_element (value, fieldtype, constructor_fields, 1); RESTORE_SPELLING_DEPTH (constructor_depth); } else /* Do the bookkeeping for an element that was directly output as a constructor. */ { /* For a record, keep track of end position of last field. */ if (DECL_SIZE (constructor_fields)) constructor_bit_index = size_binop (PLUS_EXPR, bit_position (constructor_fields), DECL_SIZE (constructor_fields)); /* If the current field was the first one not yet written out, it isn't now, so update. */ if (constructor_unfilled_fields == constructor_fields) { constructor_unfilled_fields = TREE_CHAIN (constructor_fields); /* Skip any nameless bit fields. */ while (constructor_unfilled_fields != 0 && DECL_C_BIT_FIELD (constructor_unfilled_fields) && DECL_NAME (constructor_unfilled_fields) == 0) constructor_unfilled_fields = TREE_CHAIN (constructor_unfilled_fields); } } constructor_fields = TREE_CHAIN (constructor_fields); /* Skip any nameless bit fields at the beginning. */ while (constructor_fields != 0 && DECL_C_BIT_FIELD (constructor_fields) && DECL_NAME (constructor_fields) == 0) constructor_fields = TREE_CHAIN (constructor_fields); } else if (TREE_CODE (constructor_type) == UNION_TYPE) { tree fieldtype; enum tree_code fieldcode; if (constructor_fields == 0) { pedwarn_init ("excess elements in union initializer"); break; } fieldtype = TREE_TYPE (constructor_fields); if (fieldtype != error_mark_node) fieldtype = TYPE_MAIN_VARIANT (fieldtype); fieldcode = TREE_CODE (fieldtype); /* Warn that traditional C rejects initialization of unions. We skip the warning if the value is zero. This is done under the assumption that the zero initializer in user code appears conditioned on e.g. __STDC__ to avoid "missing initializer" warnings and relies on default initialization to zero in the traditional C case. We also skip the warning if the initializer is designated, again on the assumption that this must be conditional on __STDC__ anyway (and we've already complained about the member-designator already). */ if (warn_traditional && !in_system_header && !constructor_designated && !(value && (integer_zerop (value) || real_zerop (value)))) warning ("traditional C rejects initialization of unions"); /* Accept a string constant to initialize a subarray. */ if (value != 0 && fieldcode == ARRAY_TYPE && TREE_CODE (TREE_TYPE (fieldtype)) == INTEGER_TYPE && string_flag) value = orig_value; /* Otherwise, if we have come to a subaggregate, and we don't have an element of its type, push into it. */ else if (value != 0 && !constructor_no_implicit && value != error_mark_node && TYPE_MAIN_VARIANT (TREE_TYPE (value)) != fieldtype && (fieldcode == RECORD_TYPE || fieldcode == ARRAY_TYPE || fieldcode == UNION_TYPE)) { push_init_level (1); continue; } if (value) { push_member_name (constructor_fields); output_init_element (value, fieldtype, constructor_fields, 1); RESTORE_SPELLING_DEPTH (constructor_depth); } else /* Do the bookkeeping for an element that was directly output as a constructor. */ { constructor_bit_index = DECL_SIZE (constructor_fields); constructor_unfilled_fields = TREE_CHAIN (constructor_fields); } constructor_fields = 0; } else if (TREE_CODE (constructor_type) == ARRAY_TYPE) { tree elttype = TYPE_MAIN_VARIANT (TREE_TYPE (constructor_type)); enum tree_code eltcode = TREE_CODE (elttype); /* Accept a string constant to initialize a subarray. */ if (value != 0 && eltcode == ARRAY_TYPE && TREE_CODE (TREE_TYPE (elttype)) == INTEGER_TYPE && string_flag) value = orig_value; /* Otherwise, if we have come to a subaggregate, and we don't have an element of its type, push into it. */ else if (value != 0 && !constructor_no_implicit && value != error_mark_node && TYPE_MAIN_VARIANT (TREE_TYPE (value)) != elttype && (eltcode == RECORD_TYPE || eltcode == ARRAY_TYPE || eltcode == UNION_TYPE)) { push_init_level (1); continue; } if (constructor_max_index != 0 && (tree_int_cst_lt (constructor_max_index, constructor_index) || integer_all_onesp (constructor_max_index))) { pedwarn_init ("excess elements in array initializer"); break; } /* Now output the actual element. */ if (value) { push_array_bounds (tree_low_cst (constructor_index, 0)); output_init_element (value, elttype, constructor_index, 1); RESTORE_SPELLING_DEPTH (constructor_depth); } constructor_index = size_binop (PLUS_EXPR, constructor_index, bitsize_one_node); if (! value) /* If we are doing the bookkeeping for an element that was directly output as a constructor, we must update constructor_unfilled_index. */ constructor_unfilled_index = constructor_index; } else if (TREE_CODE (constructor_type) == VECTOR_TYPE) { tree elttype = TYPE_MAIN_VARIANT (TREE_TYPE (constructor_type)); /* Do a basic check of initializer size. Note that vectors always have a fixed size derived from their type. */ if (tree_int_cst_lt (constructor_max_index, constructor_index)) { pedwarn_init ("excess elements in vector initializer"); break; } /* Now output the actual element. */ if (value) output_init_element (value, elttype, constructor_index, 1); constructor_index = size_binop (PLUS_EXPR, constructor_index, bitsize_one_node); if (! value) /* If we are doing the bookkeeping for an element that was directly output as a constructor, we must update constructor_unfilled_index. */ constructor_unfilled_index = constructor_index; } /* Handle the sole element allowed in a braced initializer for a scalar variable. */ else if (constructor_fields == 0) { pedwarn_init ("excess elements in scalar initializer"); break; } else { if (value) output_init_element (value, constructor_type, NULL_TREE, 1); constructor_fields = 0; } /* Handle range initializers either at this level or anywhere higher in the designator stack. */ if (constructor_range_stack) { struct constructor_range_stack *p, *range_stack; int finish = 0; range_stack = constructor_range_stack; constructor_range_stack = 0; while (constructor_stack != range_stack->stack) { if (!constructor_stack->implicit) abort (); process_init_element (pop_init_level (1)); } for (p = range_stack; !p->range_end || tree_int_cst_equal (p->index, p->range_end); p = p->prev) { if (!constructor_stack->implicit) abort (); process_init_element (pop_init_level (1)); } p->index = size_binop (PLUS_EXPR, p->index, bitsize_one_node); if (tree_int_cst_equal (p->index, p->range_end) && !p->prev) finish = 1; while (1) { constructor_index = p->index; constructor_fields = p->fields; if (finish && p->range_end && p->index == p->range_start) { finish = 0; p->prev = 0; } p = p->next; if (!p) break; push_init_level (2); p->stack = constructor_stack; if (p->range_end && tree_int_cst_equal (p->index, p->range_end)) p->index = p->range_start; } if (!finish) constructor_range_stack = range_stack; continue; } break; } constructor_range_stack = 0; } /* Build a complete asm-statement, whose components are a CV_QUALIFIER (guaranteed to be 'volatile' or null) and ARGS (represented using an ASM_EXPR node). */ tree build_asm_stmt (tree cv_qualifier, tree args) { if (!ASM_VOLATILE_P (args) && cv_qualifier) ASM_VOLATILE_P (args) = 1; return add_stmt (args); } /* Build an asm-expr, whose components are a STRING, some OUTPUTS, some INPUTS, and some CLOBBERS. The latter three may be NULL. SIMPLE indicates whether there was anything at all after the string in the asm expression -- asm("blah") and asm("blah" : ) are subtly different. We use a ASM_EXPR node to represent this. */ tree build_asm_expr (tree string, tree outputs, tree inputs, tree clobbers, bool simple) { tree tail; tree args; int i; const char *constraint; bool allows_mem, allows_reg, is_inout; int ninputs; int noutputs; ninputs = list_length (inputs); noutputs = list_length (outputs); /* Remove output conversions that change the type but not the mode. */ for (i = 0, tail = outputs; tail; ++i, tail = TREE_CHAIN (tail)) { tree output = TREE_VALUE (tail); STRIP_NOPS (output); TREE_VALUE (tail) = output; lvalue_or_else (output, "invalid lvalue in asm statement"); constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (tail))); if (!parse_output_constraint (&constraint, i, ninputs, noutputs, &allows_mem, &allows_reg, &is_inout)) { /* By marking this operand as erroneous, we will not try to process this operand again in expand_asm_operands. */ TREE_VALUE (tail) = error_mark_node; continue; } /* If the operand is a DECL that is going to end up in memory, assume it is addressable. This is a bit more conservative than it would ideally be; the exact test is buried deep in expand_asm_operands and depends on the DECL_RTL for the OPERAND -- which we don't have at this point. */ if (!allows_reg && DECL_P (output)) c_mark_addressable (output); } /* Perform default conversions on array and function inputs. Don't do this for other types as it would screw up operands expected to be in memory. */ for (tail = inputs; tail; tail = TREE_CHAIN (tail)) TREE_VALUE (tail) = default_function_array_conversion (TREE_VALUE (tail)); args = build_stmt (ASM_EXPR, string, outputs, inputs, clobbers); /* Simple asm statements are treated as volatile. */ if (simple) { ASM_VOLATILE_P (args) = 1; ASM_INPUT_P (args) = 1; } return args; } /* Expand an ASM statement with operands, handling output operands that are not variables or INDIRECT_REFS by transforming such cases into cases that expand_asm_operands can handle. Arguments are same as for expand_asm_operands. */ void c_expand_asm_operands (tree string, tree outputs, tree inputs, tree clobbers, int vol, location_t locus) { int noutputs = list_length (outputs); int i; /* o[I] is the place that output number I should be written. */ tree *o = alloca (noutputs * sizeof (tree)); tree tail; /* Record the contents of OUTPUTS before it is modified. */ for (i = 0, tail = outputs; tail; tail = TREE_CHAIN (tail), i++) { o[i] = TREE_VALUE (tail); if (o[i] == error_mark_node) return; } /* Generate the ASM_OPERANDS insn; store into the TREE_VALUEs of OUTPUTS some trees for where the values were actually stored. */ expand_asm_operands (string, outputs, inputs, clobbers, vol, locus); /* Copy all the intermediate outputs into the specified outputs. */ for (i = 0, tail = outputs; tail; tail = TREE_CHAIN (tail), i++) { if (o[i] != TREE_VALUE (tail)) { expand_expr (build_modify_expr (o[i], NOP_EXPR, TREE_VALUE (tail)), NULL_RTX, VOIDmode, EXPAND_NORMAL); free_temp_slots (); /* Restore the original value so that it's correct the next time we expand this function. */ TREE_VALUE (tail) = o[i]; } /* Detect modification of read-only values. (Otherwise done by build_modify_expr.) */ else { tree type = TREE_TYPE (o[i]); if (TREE_READONLY (o[i]) || TYPE_READONLY (type) || ((TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE) && C_TYPE_FIELDS_READONLY (type))) readonly_error (o[i], "modification by `asm'"); } } /* Those MODIFY_EXPRs could do autoincrements. */ emit_queue (); } /* Generate a goto statement to LABEL. */ tree c_finish_goto_label (tree label) { tree decl = lookup_label (label); if (!decl) return NULL_TREE; TREE_USED (decl) = 1; return add_stmt (build (GOTO_EXPR, void_type_node, decl)); } /* Generate a computed goto statement to EXPR. */ tree c_finish_goto_ptr (tree expr) { if (pedantic) pedwarn ("ISO C forbids `goto *expr;'"); expr = convert (ptr_type_node, expr); return add_stmt (build (GOTO_EXPR, void_type_node, expr)); } /* Generate a C `return' statement. RETVAL is the expression for what to return, or a null pointer for `return;' with no value. */ tree c_finish_return (tree retval) { tree valtype = TREE_TYPE (TREE_TYPE (current_function_decl)); if (TREE_THIS_VOLATILE (current_function_decl)) warning ("function declared `noreturn' has a `return' statement"); if (!retval) { current_function_returns_null = 1; if ((warn_return_type || flag_isoc99) && valtype != 0 && TREE_CODE (valtype) != VOID_TYPE) pedwarn_c99 ("`return' with no value, in function returning non-void"); } else if (valtype == 0 || TREE_CODE (valtype) == VOID_TYPE) { current_function_returns_null = 1; if (pedantic || TREE_CODE (TREE_TYPE (retval)) != VOID_TYPE) pedwarn ("`return' with a value, in function returning void"); } else { tree t = convert_for_assignment (valtype, retval, _("return"), NULL_TREE, NULL_TREE, 0); tree res = DECL_RESULT (current_function_decl); tree inner; current_function_returns_value = 1; if (t == error_mark_node) return NULL_TREE; inner = t = convert (TREE_TYPE (res), t); /* Strip any conversions, additions, and subtractions, and see if we are returning the address of a local variable. Warn if so. */ while (1) { switch (TREE_CODE (inner)) { case NOP_EXPR: case NON_LVALUE_EXPR: case CONVERT_EXPR: case PLUS_EXPR: inner = TREE_OPERAND (inner, 0); continue; case MINUS_EXPR: /* If the second operand of the MINUS_EXPR has a pointer type (or is converted from it), this may be valid, so don't give a warning. */ { tree op1 = TREE_OPERAND (inner, 1); while (! POINTER_TYPE_P (TREE_TYPE (op1)) && (TREE_CODE (op1) == NOP_EXPR || TREE_CODE (op1) == NON_LVALUE_EXPR || TREE_CODE (op1) == CONVERT_EXPR)) op1 = TREE_OPERAND (op1, 0); if (POINTER_TYPE_P (TREE_TYPE (op1))) break; inner = TREE_OPERAND (inner, 0); continue; } case ADDR_EXPR: inner = TREE_OPERAND (inner, 0); while (TREE_CODE_CLASS (TREE_CODE (inner)) == 'r') inner = TREE_OPERAND (inner, 0); if (DECL_P (inner) && ! DECL_EXTERNAL (inner) && ! TREE_STATIC (inner) && DECL_CONTEXT (inner) == current_function_decl) warning ("function returns address of local variable"); break; default: break; } break; } retval = build (MODIFY_EXPR, TREE_TYPE (res), res, t); } return add_stmt (build_stmt (RETURN_EXPR, retval)); } struct c_switch { /* The SWITCH_STMT being built. */ tree switch_stmt; /* A splay-tree mapping the low element of a case range to the high element, or NULL_TREE if there is no high element. Used to determine whether or not a new case label duplicates an old case label. We need a tree, rather than simply a hash table, because of the GNU case range extension. */ splay_tree cases; /* The next node on the stack. */ struct c_switch *next; }; /* A stack of the currently active switch statements. The innermost switch statement is on the top of the stack. There is no need to mark the stack for garbage collection because it is only active during the processing of the body of a function, and we never collect at that point. */ struct c_switch *c_switch_stack; /* Start a C switch statement, testing expression EXP. Return the new SWITCH_STMT. */ tree c_start_case (tree exp) { enum tree_code code; tree type, orig_type = error_mark_node; struct c_switch *cs; if (exp != error_mark_node) { code = TREE_CODE (TREE_TYPE (exp)); orig_type = TREE_TYPE (exp); if (! INTEGRAL_TYPE_P (orig_type) && code != ERROR_MARK) { error ("switch quantity not an integer"); exp = integer_zero_node; } else { type = TYPE_MAIN_VARIANT (TREE_TYPE (exp)); if (warn_traditional && !in_system_header && (type == long_integer_type_node || type == long_unsigned_type_node)) warning ("`long' switch expression not converted to `int' in ISO C"); exp = default_conversion (exp); type = TREE_TYPE (exp); } } /* Add this new SWITCH_STMT to the stack. */ cs = xmalloc (sizeof (*cs)); cs->switch_stmt = build_stmt (SWITCH_STMT, exp, NULL_TREE, orig_type); cs->cases = splay_tree_new (case_compare, NULL, NULL); cs->next = c_switch_stack; c_switch_stack = cs; return add_stmt (cs->switch_stmt); } /* Process a case label. */ tree do_case (tree low_value, tree high_value) { tree label = NULL_TREE; if (c_switch_stack) { label = c_add_case_label (c_switch_stack->cases, SWITCH_COND (c_switch_stack->switch_stmt), low_value, high_value); if (label == error_mark_node) label = NULL_TREE; } else if (low_value) error ("case label not within a switch statement"); else error ("`default' label not within a switch statement"); return label; } /* Finish the switch statement. */ void c_finish_case (tree body) { struct c_switch *cs = c_switch_stack; SWITCH_BODY (cs->switch_stmt) = body; /* Emit warnings as needed. */ c_do_switch_warnings (cs->cases, cs->switch_stmt); /* Pop the stack. */ c_switch_stack = cs->next; splay_tree_delete (cs->cases); free (cs); } /* Emit an if statement. IF_LOCUS is the location of the 'if'. COND, THEN_BLOCK and ELSE_BLOCK are expressions to be used; ELSE_BLOCK may be null. NESTED_IF is true if THEN_BLOCK contains another IF statement, and was not surrounded with parenthesis. */ void c_finish_if_stmt (location_t if_locus, tree cond, tree then_block, tree else_block, bool nested_if) { tree stmt; /* Diagnose an ambiguous else if if-then-else is nested inside if-then. */ if (warn_parentheses && nested_if && else_block == NULL) { tree inner_if = then_block; /* We know from the grammar productions that there is an IF nested within THEN_BLOCK. Due to labels and c99 conditional declarations, it might not be exactly THEN_BLOCK, but should be the last non-container statement within. */ while (1) switch (TREE_CODE (inner_if)) { case COND_EXPR: goto found; case BIND_EXPR: inner_if = BIND_EXPR_BODY (inner_if); break; case STATEMENT_LIST: inner_if = expr_last (then_block); break; case TRY_FINALLY_EXPR: case TRY_CATCH_EXPR: inner_if = TREE_OPERAND (inner_if, 0); break; default: abort (); } found: if (COND_EXPR_ELSE (inner_if)) warning ("%Hsuggest explicit braces to avoid ambiguous `else'", &if_locus); } /* Diagnose ";" via the special empty statement node that we create. */ if (extra_warnings) { if (TREE_CODE (then_block) == NOP_EXPR && !TREE_TYPE (then_block)) { if (!else_block) warning ("%Hempty body in an if-statement", EXPR_LOCUS (then_block)); then_block = alloc_stmt_list (); } if (else_block && TREE_CODE (else_block) == NOP_EXPR && !TREE_TYPE (else_block)) { warning ("%Hempty body in an else-statement", EXPR_LOCUS (else_block)); else_block = alloc_stmt_list (); } } stmt = build3 (COND_EXPR, NULL_TREE, cond, then_block, else_block); SET_EXPR_LOCATION (stmt, if_locus); add_stmt (stmt); } /* Emit a general-purpose loop construct. START_LOCUS is the location of the beginning of the loop. COND is the loop condition. COND_IS_FIRST is false for DO loops. INCR is the FOR increment expression. BODY is the statement controlled by the loop. BLAB is the break label. CLAB is the continue label. Everything is allowed to be NULL. */ void c_finish_loop (location_t start_locus, tree cond, tree incr, tree body, tree blab, tree clab, bool cond_is_first) { tree entry = NULL, exit = NULL, t; /* Detect do { ... } while (0) and don't generate loop construct. */ if (cond && !cond_is_first && integer_zerop (cond)) cond = NULL; if (cond_is_first || cond) { tree top = build1 (LABEL_EXPR, void_type_node, NULL_TREE); /* If we have an exit condition, then we build an IF with gotos either out of the loop, or to the top of it. If there's no exit condition, then we just build a jump back to the top. */ exit = build_and_jump (&LABEL_EXPR_LABEL (top)); if (cond) { /* Canonicalize the loop condition to the end. This means generating a branch to the loop condition. Reuse the continue label, if possible. */ if (cond_is_first) { if (incr || !clab) { entry = build1 (LABEL_EXPR, void_type_node, NULL_TREE); t = build_and_jump (&LABEL_EXPR_LABEL (entry)); } else t = build1 (GOTO_EXPR, void_type_node, clab); SET_EXPR_LOCATION (t, start_locus); add_stmt (t); } t = build_and_jump (&blab); exit = build (COND_EXPR, void_type_node, cond, exit, t); exit = fold (exit); if (cond_is_first) SET_EXPR_LOCATION (exit, start_locus); else SET_EXPR_LOCATION (exit, input_location); } add_stmt (top); } if (body) add_stmt (body); if (clab) add_stmt (build1 (LABEL_EXPR, void_type_node, clab)); if (incr) add_stmt (incr); if (entry) add_stmt (entry); if (exit) add_stmt (exit); if (blab) add_stmt (build1 (LABEL_EXPR, void_type_node, blab)); } tree c_finish_bc_stmt (tree *label_p, bool is_break) { tree label = *label_p; if (!label) *label_p = label = create_artificial_label (); else if (TREE_CODE (label) != LABEL_DECL) { if (is_break) error ("break statement not within loop or switch"); else error ("continue statement not within a loop"); return NULL_TREE; } return add_stmt (build (GOTO_EXPR, void_type_node, label)); } /* A helper routine for c_process_expr_stmt and c_finish_stmt_expr. */ static void emit_side_effect_warnings (tree expr) { if (expr == error_mark_node) ; else if (!TREE_SIDE_EFFECTS (expr)) { if (!VOID_TYPE_P (TREE_TYPE (expr)) && !TREE_NO_WARNING (expr)) warning ("%Hstatement with no effect", EXPR_LOCUS (expr) ? EXPR_LOCUS (expr) : &input_location); } else if (warn_unused_value) warn_if_unused_value (expr, input_location); } /* Process an expression as if it were a complete statement. Emit diagnostics, but do not call ADD_STMT. */ tree c_process_expr_stmt (tree expr) { if (!expr) return NULL_TREE; /* Do default conversion if safe and possibly important, in case within ({...}). */ if ((TREE_CODE (TREE_TYPE (expr)) == ARRAY_TYPE && (flag_isoc99 || lvalue_p (expr))) || TREE_CODE (TREE_TYPE (expr)) == FUNCTION_TYPE) expr = default_conversion (expr); if (warn_sequence_point) verify_sequence_points (expr); if (TREE_TYPE (expr) != error_mark_node && !COMPLETE_OR_VOID_TYPE_P (TREE_TYPE (expr)) && TREE_CODE (TREE_TYPE (expr)) != ARRAY_TYPE) error ("expression statement has incomplete type"); /* If we're not processing a statement expression, warn about unused values. Warnings for statement expressions will be emitted later, once we figure out which is the result. */ if (!STATEMENT_LIST_STMT_EXPR (cur_stmt_list) && (extra_warnings || warn_unused_value)) emit_side_effect_warnings (expr); /* If the expression is not of a type to which we cannot assign a line number, wrap the thing in a no-op NOP_EXPR. */ if (DECL_P (expr) || TREE_CODE_CLASS (TREE_CODE (expr)) == 'c') expr = build1 (NOP_EXPR, TREE_TYPE (expr), expr); if (EXPR_P (expr)) SET_EXPR_LOCATION (expr, input_location); return expr; } /* Emit an expression as a statement. */ tree c_finish_expr_stmt (tree expr) { if (expr) return add_stmt (c_process_expr_stmt (expr)); else return NULL; } /* Do the opposite and emit a statement as an expression. To begin, create a new binding level and return it. */ tree c_begin_stmt_expr (void) { tree ret; /* We must force a BLOCK for this level so that, if it is not expanded later, there is a way to turn off the entire subtree of blocks that are contained in it. */ keep_next_level (); ret = c_begin_compound_stmt (true); /* Mark the current statement list as belonging to a statement list. */ STATEMENT_LIST_STMT_EXPR (ret) = 1; return ret; } tree c_finish_stmt_expr (tree body) { tree last, type, tmp, val; tree *last_p; body = c_end_compound_stmt (body, true); /* Locate the last statement in BODY. See c_end_compound_stmt about always returning a BIND_EXPR. */ last_p = &BIND_EXPR_BODY (body); last = BIND_EXPR_BODY (body); continue_searching: if (TREE_CODE (last) == STATEMENT_LIST) { tree_stmt_iterator i; /* This can happen with degenerate cases like ({ }). No value. */ if (!TREE_SIDE_EFFECTS (last)) return body; /* If we're supposed to generate side effects warnings, process all of the statements except the last. */ if (extra_warnings || warn_unused_value) { for (i = tsi_start (last); !tsi_one_before_end_p (i); tsi_next (&i)) emit_side_effect_warnings (tsi_stmt (i)); } else i = tsi_last (last); last_p = tsi_stmt_ptr (i); last = *last_p; } /* If the end of the list is exception related, then the list was split by a call to push_cleanup. Continue searching. */ if (TREE_CODE (last) == TRY_FINALLY_EXPR || TREE_CODE (last) == TRY_CATCH_EXPR) { last_p = &TREE_OPERAND (last, 0); last = *last_p; goto continue_searching; } /* In the case that the BIND_EXPR is not necessary, return the expression out from inside it. */ if (last == error_mark_node || (last == BIND_EXPR_BODY (body) && BIND_EXPR_VARS (body) == NULL)) return last; /* Extract the type of said expression. */ type = TREE_TYPE (last); /* If we're not returning a value at all, then the BIND_EXPR that we already have is a fine expression to return. */ if (!type || VOID_TYPE_P (type)) return body; /* Now that we've located the expression containing the value, it seems silly to make voidify_wrapper_expr repeat the process. Create a temporary of the appropriate type and stick it in a TARGET_EXPR. */ tmp = create_tmp_var_raw (type, NULL); /* Unwrap a no-op NOP_EXPR as added by c_finish_expr_stmt. This avoids tree_expr_nonnegative_p giving up immediately. */ val = last; if (TREE_CODE (val) == NOP_EXPR && TREE_TYPE (val) == TREE_TYPE (TREE_OPERAND (val, 0))) val = TREE_OPERAND (val, 0); *last_p = build (MODIFY_EXPR, void_type_node, tmp, val); SET_EXPR_LOCUS (*last_p, EXPR_LOCUS (last)); return build (TARGET_EXPR, type, tmp, body, NULL_TREE, NULL_TREE); } /* Begin and end compound statements. This is as simple as pushing and popping new statement lists from the tree. */ tree c_begin_compound_stmt (bool do_scope) { tree stmt = push_stmt_list (); if (do_scope) push_scope (); return stmt; } tree c_end_compound_stmt (tree stmt, bool do_scope) { tree block = NULL; if (do_scope) { if (c_dialect_objc ()) objc_clear_super_receiver (); block = pop_scope (); } stmt = pop_stmt_list (stmt); stmt = c_build_bind_expr (block, stmt); /* If this compound statement is nested immediately inside a statement expression, then force a BIND_EXPR to be created. Otherwise we'll do the wrong thing for ({ { 1; } }) or ({ 1; { } }). In particular, STATEMENT_LISTs merge, and thus we can lose track of what statement was really last. */ if (cur_stmt_list && STATEMENT_LIST_STMT_EXPR (cur_stmt_list) && TREE_CODE (stmt) != BIND_EXPR) { stmt = build (BIND_EXPR, void_type_node, NULL, stmt, NULL); TREE_SIDE_EFFECTS (stmt) = 1; } return stmt; } /* Queue a cleanup. CLEANUP is an expression/statement to be executed when the current scope is exited. EH_ONLY is true when this is not meant to apply to normal control flow transfer. */ void push_cleanup (tree decl ATTRIBUTE_UNUSED, tree cleanup, bool eh_only) { enum tree_code code; tree stmt, list; bool stmt_expr; code = eh_only ? TRY_CATCH_EXPR : TRY_FINALLY_EXPR; stmt = build_stmt (code, NULL, cleanup); add_stmt (stmt); stmt_expr = STATEMENT_LIST_STMT_EXPR (cur_stmt_list); list = push_stmt_list (); TREE_OPERAND (stmt, 0) = list; STATEMENT_LIST_STMT_EXPR (list) = stmt_expr; } /* Build a binary-operation expression without default conversions. CODE is the kind of expression to build. This function differs from `build' in several ways: the data type of the result is computed and recorded in it, warnings are generated if arg data types are invalid, special handling for addition and subtraction of pointers is known, and some optimization is done (operations on narrow ints are done in the narrower type when that gives the same result). Constant folding is also done before the result is returned. Note that the operands will never have enumeral types, or function or array types, because either they will have the default conversions performed or they have both just been converted to some other type in which the arithmetic is to be done. */ tree build_binary_op (enum tree_code code, tree orig_op0, tree orig_op1, int convert_p) { tree type0, type1; enum tree_code code0, code1; tree op0, op1; /* Expression code to give to the expression when it is built. Normally this is CODE, which is what the caller asked for, but in some special cases we change it. */ enum tree_code resultcode = code; /* Data type in which the computation is to be performed. In the simplest cases this is the common type of the arguments. */ tree result_type = NULL; /* Nonzero means operands have already been type-converted in whatever way is necessary. Zero means they need to be converted to RESULT_TYPE. */ int converted = 0; /* Nonzero means create the expression with this type, rather than RESULT_TYPE. */ tree build_type = 0; /* Nonzero means after finally constructing the expression convert it to this type. */ tree final_type = 0; /* Nonzero if this is an operation like MIN or MAX which can safely be computed in short if both args are promoted shorts. Also implies COMMON. -1 indicates a bitwise operation; this makes a difference in the exact conditions for when it is safe to do the operation in a narrower mode. */ int shorten = 0; /* Nonzero if this is a comparison operation; if both args are promoted shorts, compare the original shorts. Also implies COMMON. */ int short_compare = 0; /* Nonzero if this is a right-shift operation, which can be computed on the original short and then promoted if the operand is a promoted short. */ int short_shift = 0; /* Nonzero means set RESULT_TYPE to the common type of the args. */ int common = 0; if (convert_p) { op0 = default_conversion (orig_op0); op1 = default_conversion (orig_op1); } else { op0 = orig_op0; op1 = orig_op1; } type0 = TREE_TYPE (op0); type1 = TREE_TYPE (op1); /* The expression codes of the data types of the arguments tell us whether the arguments are integers, floating, pointers, etc. */ code0 = TREE_CODE (type0); code1 = TREE_CODE (type1); /* Strip NON_LVALUE_EXPRs, etc., since we aren't using as an lvalue. */ STRIP_TYPE_NOPS (op0); STRIP_TYPE_NOPS (op1); /* If an error was already reported for one of the arguments, avoid reporting another error. */ if (code0 == ERROR_MARK || code1 == ERROR_MARK) return error_mark_node; switch (code) { case PLUS_EXPR: /* Handle the pointer + int case. */ if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE) return pointer_int_sum (PLUS_EXPR, op0, op1); else if (code1 == POINTER_TYPE && code0 == INTEGER_TYPE) return pointer_int_sum (PLUS_EXPR, op1, op0); else common = 1; break; case MINUS_EXPR: /* Subtraction of two similar pointers. We must subtract them as integers, then divide by object size. */ if (code0 == POINTER_TYPE && code1 == POINTER_TYPE && comp_target_types (type0, type1, 1)) return pointer_diff (op0, op1); /* Handle pointer minus int. Just like pointer plus int. */ else if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE) return pointer_int_sum (MINUS_EXPR, op0, op1); else common = 1; break; case MULT_EXPR: common = 1; break; case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case EXACT_DIV_EXPR: /* Floating point division by zero is a legitimate way to obtain infinities and NaNs. */ if (warn_div_by_zero && skip_evaluation == 0 && integer_zerop (op1)) warning ("division by zero"); if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE || code0 == COMPLEX_TYPE || code0 == VECTOR_TYPE) && (code1 == INTEGER_TYPE || code1 == REAL_TYPE || code1 == COMPLEX_TYPE || code1 == VECTOR_TYPE)) { if (!(code0 == INTEGER_TYPE && code1 == INTEGER_TYPE)) resultcode = RDIV_EXPR; else /* Although it would be tempting to shorten always here, that loses on some targets, since the modulo instruction is undefined if the quotient can't be represented in the computation mode. We shorten only if unsigned or if dividing by something we know != -1. */ shorten = (TYPE_UNSIGNED (TREE_TYPE (orig_op0)) || (TREE_CODE (op1) == INTEGER_CST && ! integer_all_onesp (op1))); common = 1; } break; case BIT_AND_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: if (code0 == INTEGER_TYPE && code1 == INTEGER_TYPE) shorten = -1; else if (code0 == VECTOR_TYPE && code1 == VECTOR_TYPE) common = 1; break; case TRUNC_MOD_EXPR: case FLOOR_MOD_EXPR: if (warn_div_by_zero && skip_evaluation == 0 && integer_zerop (op1)) warning ("division by zero"); if (code0 == INTEGER_TYPE && code1 == INTEGER_TYPE) { /* Although it would be tempting to shorten always here, that loses on some targets, since the modulo instruction is undefined if the quotient can't be represented in the computation mode. We shorten only if unsigned or if dividing by something we know != -1. */ shorten = (TYPE_UNSIGNED (TREE_TYPE (orig_op0)) || (TREE_CODE (op1) == INTEGER_CST && ! integer_all_onesp (op1))); common = 1; } break; case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case TRUTH_XOR_EXPR: if ((code0 == INTEGER_TYPE || code0 == POINTER_TYPE || code0 == REAL_TYPE || code0 == COMPLEX_TYPE) && (code1 == INTEGER_TYPE || code1 == POINTER_TYPE || code1 == REAL_TYPE || code1 == COMPLEX_TYPE)) { /* Result of these operations is always an int, but that does not mean the operands should be converted to ints! */ result_type = integer_type_node; op0 = lang_hooks.truthvalue_conversion (op0); op1 = lang_hooks.truthvalue_conversion (op1); converted = 1; } break; /* Shift operations: result has same type as first operand; always convert second operand to int. Also set SHORT_SHIFT if shifting rightward. */ case RSHIFT_EXPR: if (code0 == INTEGER_TYPE && code1 == INTEGER_TYPE) { if (TREE_CODE (op1) == INTEGER_CST && skip_evaluation == 0) { if (tree_int_cst_sgn (op1) < 0) warning ("right shift count is negative"); else { if (! integer_zerop (op1)) short_shift = 1; if (compare_tree_int (op1, TYPE_PRECISION (type0)) >= 0) warning ("right shift count >= width of type"); } } /* Use the type of the value to be shifted. */ result_type = type0; /* Convert the shift-count to an integer, regardless of size of value being shifted. */ if (TYPE_MAIN_VARIANT (TREE_TYPE (op1)) != integer_type_node) op1 = convert (integer_type_node, op1); /* Avoid converting op1 to result_type later. */ converted = 1; } break; case LSHIFT_EXPR: if (code0 == INTEGER_TYPE && code1 == INTEGER_TYPE) { if (TREE_CODE (op1) == INTEGER_CST && skip_evaluation == 0) { if (tree_int_cst_sgn (op1) < 0) warning ("left shift count is negative"); else if (compare_tree_int (op1, TYPE_PRECISION (type0)) >= 0) warning ("left shift count >= width of type"); } /* Use the type of the value to be shifted. */ result_type = type0; /* Convert the shift-count to an integer, regardless of size of value being shifted. */ if (TYPE_MAIN_VARIANT (TREE_TYPE (op1)) != integer_type_node) op1 = convert (integer_type_node, op1); /* Avoid converting op1 to result_type later. */ converted = 1; } break; case RROTATE_EXPR: case LROTATE_EXPR: if (code0 == INTEGER_TYPE && code1 == INTEGER_TYPE) { if (TREE_CODE (op1) == INTEGER_CST && skip_evaluation == 0) { if (tree_int_cst_sgn (op1) < 0) warning ("shift count is negative"); else if (compare_tree_int (op1, TYPE_PRECISION (type0)) >= 0) warning ("shift count >= width of type"); } /* Use the type of the value to be shifted. */ result_type = type0; /* Convert the shift-count to an integer, regardless of size of value being shifted. */ if (TYPE_MAIN_VARIANT (TREE_TYPE (op1)) != integer_type_node) op1 = convert (integer_type_node, op1); /* Avoid converting op1 to result_type later. */ converted = 1; } break; case EQ_EXPR: case NE_EXPR: if (warn_float_equal && (code0 == REAL_TYPE || code1 == REAL_TYPE)) warning ("comparing floating point with == or != is unsafe"); /* Result of comparison is always int, but don't convert the args to int! */ build_type = integer_type_node; if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE || code0 == COMPLEX_TYPE) && (code1 == INTEGER_TYPE || code1 == REAL_TYPE || code1 == COMPLEX_TYPE)) short_compare = 1; else if (code0 == POINTER_TYPE && code1 == POINTER_TYPE) { tree tt0 = TREE_TYPE (type0); tree tt1 = TREE_TYPE (type1); /* Anything compares with void *. void * compares with anything. Otherwise, the targets must be compatible and both must be object or both incomplete. */ if (comp_target_types (type0, type1, 1)) result_type = common_pointer_type (type0, type1); else if (VOID_TYPE_P (tt0)) { /* op0 != orig_op0 detects the case of something whose value is 0 but which isn't a valid null ptr const. */ if (pedantic && (!integer_zerop (op0) || op0 != orig_op0) && TREE_CODE (tt1) == FUNCTION_TYPE) pedwarn ("ISO C forbids comparison of `void *' with function pointer"); } else if (VOID_TYPE_P (tt1)) { if (pedantic && (!integer_zerop (op1) || op1 != orig_op1) && TREE_CODE (tt0) == FUNCTION_TYPE) pedwarn ("ISO C forbids comparison of `void *' with function pointer"); } else pedwarn ("comparison of distinct pointer types lacks a cast"); if (result_type == NULL_TREE) result_type = ptr_type_node; } else if (code0 == POINTER_TYPE && TREE_CODE (op1) == INTEGER_CST && integer_zerop (op1)) result_type = type0; else if (code1 == POINTER_TYPE && TREE_CODE (op0) == INTEGER_CST && integer_zerop (op0)) result_type = type1; else if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE) { result_type = type0; pedwarn ("comparison between pointer and integer"); } else if (code0 == INTEGER_TYPE && code1 == POINTER_TYPE) { result_type = type1; pedwarn ("comparison between pointer and integer"); } break; case MAX_EXPR: case MIN_EXPR: if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE) && (code1 == INTEGER_TYPE || code1 == REAL_TYPE)) shorten = 1; else if (code0 == POINTER_TYPE && code1 == POINTER_TYPE) { if (comp_target_types (type0, type1, 1)) { result_type = common_pointer_type (type0, type1); if (pedantic && TREE_CODE (TREE_TYPE (type0)) == FUNCTION_TYPE) pedwarn ("ISO C forbids ordered comparisons of pointers to functions"); } else { result_type = ptr_type_node; pedwarn ("comparison of distinct pointer types lacks a cast"); } } break; case LE_EXPR: case GE_EXPR: case LT_EXPR: case GT_EXPR: build_type = integer_type_node; if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE) && (code1 == INTEGER_TYPE || code1 == REAL_TYPE)) short_compare = 1; else if (code0 == POINTER_TYPE && code1 == POINTER_TYPE) { if (comp_target_types (type0, type1, 1)) { result_type = common_pointer_type (type0, type1); if (!COMPLETE_TYPE_P (TREE_TYPE (type0)) != !COMPLETE_TYPE_P (TREE_TYPE (type1))) pedwarn ("comparison of complete and incomplete pointers"); else if (pedantic && TREE_CODE (TREE_TYPE (type0)) == FUNCTION_TYPE) pedwarn ("ISO C forbids ordered comparisons of pointers to functions"); } else { result_type = ptr_type_node; pedwarn ("comparison of distinct pointer types lacks a cast"); } } else if (code0 == POINTER_TYPE && TREE_CODE (op1) == INTEGER_CST && integer_zerop (op1)) { result_type = type0; if (pedantic || extra_warnings) pedwarn ("ordered comparison of pointer with integer zero"); } else if (code1 == POINTER_TYPE && TREE_CODE (op0) == INTEGER_CST && integer_zerop (op0)) { result_type = type1; if (pedantic) pedwarn ("ordered comparison of pointer with integer zero"); } else if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE) { result_type = type0; pedwarn ("comparison between pointer and integer"); } else if (code0 == INTEGER_TYPE && code1 == POINTER_TYPE) { result_type = type1; pedwarn ("comparison between pointer and integer"); } break; case UNORDERED_EXPR: case ORDERED_EXPR: case UNLT_EXPR: case UNLE_EXPR: case UNGT_EXPR: case UNGE_EXPR: case UNEQ_EXPR: case LTGT_EXPR: build_type = integer_type_node; if (code0 != REAL_TYPE || code1 != REAL_TYPE) { error ("unordered comparison on non-floating point argument"); return error_mark_node; } common = 1; break; default: break; } if (code0 == ERROR_MARK || code1 == ERROR_MARK) return error_mark_node; if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE || code0 == COMPLEX_TYPE || code0 == VECTOR_TYPE) && (code1 == INTEGER_TYPE || code1 == REAL_TYPE || code1 == COMPLEX_TYPE || code1 == VECTOR_TYPE)) { int none_complex = (code0 != COMPLEX_TYPE && code1 != COMPLEX_TYPE); if (shorten || common || short_compare) result_type = common_type (type0, type1); /* For certain operations (which identify themselves by shorten != 0) if both args were extended from the same smaller type, do the arithmetic in that type and then extend. shorten !=0 and !=1 indicates a bitwise operation. For them, this optimization is safe only if both args are zero-extended or both are sign-extended. Otherwise, we might change the result. Eg, (short)-1 | (unsigned short)-1 is (int)-1 but calculated in (unsigned short) it would be (unsigned short)-1. */ if (shorten && none_complex) { int unsigned0, unsigned1; tree arg0 = get_narrower (op0, &unsigned0); tree arg1 = get_narrower (op1, &unsigned1); /* UNS is 1 if the operation to be done is an unsigned one. */ int uns = TYPE_UNSIGNED (result_type); tree type; final_type = result_type; /* Handle the case that OP0 (or OP1) does not *contain* a conversion but it *requires* conversion to FINAL_TYPE. */ if ((TYPE_PRECISION (TREE_TYPE (op0)) == TYPE_PRECISION (TREE_TYPE (arg0))) && TREE_TYPE (op0) != final_type) unsigned0 = TYPE_UNSIGNED (TREE_TYPE (op0)); if ((TYPE_PRECISION (TREE_TYPE (op1)) == TYPE_PRECISION (TREE_TYPE (arg1))) && TREE_TYPE (op1) != final_type) unsigned1 = TYPE_UNSIGNED (TREE_TYPE (op1)); /* Now UNSIGNED0 is 1 if ARG0 zero-extends to FINAL_TYPE. */ /* For bitwise operations, signedness of nominal type does not matter. Consider only how operands were extended. */ if (shorten == -1) uns = unsigned0; /* Note that in all three cases below we refrain from optimizing an unsigned operation on sign-extended args. That would not be valid. */ /* Both args variable: if both extended in same way from same width, do it in that width. Do it unsigned if args were zero-extended. */ if ((TYPE_PRECISION (TREE_TYPE (arg0)) < TYPE_PRECISION (result_type)) && (TYPE_PRECISION (TREE_TYPE (arg1)) == TYPE_PRECISION (TREE_TYPE (arg0))) && unsigned0 == unsigned1 && (unsigned0 || !uns)) result_type = c_common_signed_or_unsigned_type (unsigned0, common_type (TREE_TYPE (arg0), TREE_TYPE (arg1))); else if (TREE_CODE (arg0) == INTEGER_CST && (unsigned1 || !uns) && (TYPE_PRECISION (TREE_TYPE (arg1)) < TYPE_PRECISION (result_type)) && (type = c_common_signed_or_unsigned_type (unsigned1, TREE_TYPE (arg1)), int_fits_type_p (arg0, type))) result_type = type; else if (TREE_CODE (arg1) == INTEGER_CST && (unsigned0 || !uns) && (TYPE_PRECISION (TREE_TYPE (arg0)) < TYPE_PRECISION (result_type)) && (type = c_common_signed_or_unsigned_type (unsigned0, TREE_TYPE (arg0)), int_fits_type_p (arg1, type))) result_type = type; } /* Shifts can be shortened if shifting right. */ if (short_shift) { int unsigned_arg; tree arg0 = get_narrower (op0, &unsigned_arg); final_type = result_type; if (arg0 == op0 && final_type == TREE_TYPE (op0)) unsigned_arg = TYPE_UNSIGNED (TREE_TYPE (op0)); if (TYPE_PRECISION (TREE_TYPE (arg0)) < TYPE_PRECISION (result_type) /* We can shorten only if the shift count is less than the number of bits in the smaller type size. */ && compare_tree_int (op1, TYPE_PRECISION (TREE_TYPE (arg0))) < 0 /* We cannot drop an unsigned shift after sign-extension. */ && (!TYPE_UNSIGNED (final_type) || unsigned_arg)) { /* Do an unsigned shift if the operand was zero-extended. */ result_type = c_common_signed_or_unsigned_type (unsigned_arg, TREE_TYPE (arg0)); /* Convert value-to-be-shifted to that type. */ if (TREE_TYPE (op0) != result_type) op0 = convert (result_type, op0); converted = 1; } } /* Comparison operations are shortened too but differently. They identify themselves by setting short_compare = 1. */ if (short_compare) { /* Don't write &op0, etc., because that would prevent op0 from being kept in a register. Instead, make copies of the our local variables and pass the copies by reference, then copy them back afterward. */ tree xop0 = op0, xop1 = op1, xresult_type = result_type; enum tree_code xresultcode = resultcode; tree val = shorten_compare (&xop0, &xop1, &xresult_type, &xresultcode); if (val != 0) return val; op0 = xop0, op1 = xop1; converted = 1; resultcode = xresultcode; if (warn_sign_compare && skip_evaluation == 0) { int op0_signed = ! TYPE_UNSIGNED (TREE_TYPE (orig_op0)); int op1_signed = ! TYPE_UNSIGNED (TREE_TYPE (orig_op1)); int unsignedp0, unsignedp1; tree primop0 = get_narrower (op0, &unsignedp0); tree primop1 = get_narrower (op1, &unsignedp1); xop0 = orig_op0; xop1 = orig_op1; STRIP_TYPE_NOPS (xop0); STRIP_TYPE_NOPS (xop1); /* Give warnings for comparisons between signed and unsigned quantities that may fail. Do the checking based on the original operand trees, so that casts will be considered, but default promotions won't be. Do not warn if the comparison is being done in a signed type, since the signed type will only be chosen if it can represent all the values of the unsigned type. */ if (! TYPE_UNSIGNED (result_type)) /* OK */; /* Do not warn if both operands are the same signedness. */ else if (op0_signed == op1_signed) /* OK */; else { tree sop, uop; if (op0_signed) sop = xop0, uop = xop1; else sop = xop1, uop = xop0; /* Do not warn if the signed quantity is an unsuffixed integer literal (or some static constant expression involving such literals or a conditional expression involving such literals) and it is non-negative. */ if (tree_expr_nonnegative_p (sop)) /* OK */; /* Do not warn if the comparison is an equality operation, the unsigned quantity is an integral constant, and it would fit in the result if the result were signed. */ else if (TREE_CODE (uop) == INTEGER_CST && (resultcode == EQ_EXPR || resultcode == NE_EXPR) && int_fits_type_p (uop, c_common_signed_type (result_type))) /* OK */; /* Do not warn if the unsigned quantity is an enumeration constant and its maximum value would fit in the result if the result were signed. */ else if (TREE_CODE (uop) == INTEGER_CST && TREE_CODE (TREE_TYPE (uop)) == ENUMERAL_TYPE && int_fits_type_p (TYPE_MAX_VALUE (TREE_TYPE(uop)), c_common_signed_type (result_type))) /* OK */; else warning ("comparison between signed and unsigned"); } /* Warn if two unsigned values are being compared in a size larger than their original size, and one (and only one) is the result of a `~' operator. This comparison will always fail. Also warn if one operand is a constant, and the constant does not have all bits set that are set in the ~ operand when it is extended. */ if ((TREE_CODE (primop0) == BIT_NOT_EXPR) != (TREE_CODE (primop1) == BIT_NOT_EXPR)) { if (TREE_CODE (primop0) == BIT_NOT_EXPR) primop0 = get_narrower (TREE_OPERAND (primop0, 0), &unsignedp0); else primop1 = get_narrower (TREE_OPERAND (primop1, 0), &unsignedp1); if (host_integerp (primop0, 0) || host_integerp (primop1, 0)) { tree primop; HOST_WIDE_INT constant, mask; int unsignedp, bits; if (host_integerp (primop0, 0)) { primop = primop1; unsignedp = unsignedp1; constant = tree_low_cst (primop0, 0); } else { primop = primop0; unsignedp = unsignedp0; constant = tree_low_cst (primop1, 0); } bits = TYPE_PRECISION (TREE_TYPE (primop)); if (bits < TYPE_PRECISION (result_type) && bits < HOST_BITS_PER_WIDE_INT && unsignedp) { mask = (~ (HOST_WIDE_INT) 0) << bits; if ((mask & constant) != mask) warning ("comparison of promoted ~unsigned with constant"); } } else if (unsignedp0 && unsignedp1 && (TYPE_PRECISION (TREE_TYPE (primop0)) < TYPE_PRECISION (result_type)) && (TYPE_PRECISION (TREE_TYPE (primop1)) < TYPE_PRECISION (result_type))) warning ("comparison of promoted ~unsigned with unsigned"); } } } } /* At this point, RESULT_TYPE must be nonzero to avoid an error message. If CONVERTED is zero, both args will be converted to type RESULT_TYPE. Then the expression will be built. It will be given type FINAL_TYPE if that is nonzero; otherwise, it will be given type RESULT_TYPE. */ if (!result_type) { binary_op_error (code); return error_mark_node; } if (! converted) { if (TREE_TYPE (op0) != result_type) op0 = convert (result_type, op0); if (TREE_TYPE (op1) != result_type) op1 = convert (result_type, op1); } if (build_type == NULL_TREE) build_type = result_type; { tree result = build (resultcode, build_type, op0, op1); /* Treat expressions in initializers specially as they can't trap. */ result = require_constant_value ? fold_initializer (result) : fold (result); if (final_type != 0) result = convert (final_type, result); return result; } } /* Build the result of __builtin_offsetof. TYPE is the first argument to offsetof, i.e. a type. LIST is a tree_list that encodes component and array references; PURPOSE is set for the former and VALUE is set for the later. */ tree build_offsetof (tree type, tree list) { tree t; /* Build "*(type *)0". */ t = convert (build_pointer_type (type), null_pointer_node); t = build_indirect_ref (t, ""); /* Build COMPONENT and ARRAY_REF expressions as needed. */ for (list = nreverse (list); list ; list = TREE_CHAIN (list)) if (TREE_PURPOSE (list)) t = build_component_ref (t, TREE_PURPOSE (list)); else t = build_array_ref (t, TREE_VALUE (list)); /* Finalize the offsetof expression. For now all we need to do is take the address of the expression we created, and cast that to an integer type; this mirrors the traditional macro implementation of offsetof. */ t = build_unary_op (ADDR_EXPR, t, 0); return convert (size_type_node, t); } /* Language-level data type conversion for GNU C. Copyright (C) 1987, 1988, 1991, 1998, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file contains the functions for converting C expressions to different data types. The only entry point is `convert'. Every language front end must have a `convert' function but what kind of conversions it does will depend on the language. */ /* Change of width--truncation and extension of integers or reals-- is represented with NOP_EXPR. Proper functioning of many things assumes that no other conversions can be NOP_EXPRs. Conversion between integer and pointer is represented with CONVERT_EXPR. Converting integer to real uses FLOAT_EXPR and real to integer uses FIX_TRUNC_EXPR. Here is a list of all the functions that assume that widening and narrowing is always done with a NOP_EXPR: In convert.c, convert_to_integer. In c-typeck.c, build_binary_op (boolean ops), and c_common_truthvalue_conversion. In expr.c: expand_expr, for operands of a MULT_EXPR. In fold-const.c: fold. In tree.c: get_narrower and get_unwidened. */ /* Subroutines of `convert'. */ /* Create an expression whose value is that of EXPR, converted to type TYPE. The TREE_TYPE of the value is always TYPE. This function implements all reasonable conversions; callers should filter out those that are not permitted by the language being compiled. */ tree convert (tree type, tree expr) { tree e = expr; enum tree_code code = TREE_CODE (type); if (type == TREE_TYPE (expr) || TREE_CODE (expr) == ERROR_MARK || code == ERROR_MARK || TREE_CODE (TREE_TYPE (expr)) == ERROR_MARK) return expr; if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (TREE_TYPE (expr))) return fold (build1 (NOP_EXPR, type, expr)); if (TREE_CODE (TREE_TYPE (expr)) == ERROR_MARK) return error_mark_node; if (TREE_CODE (TREE_TYPE (expr)) == VOID_TYPE) { error ("void value not ignored as it ought to be"); return error_mark_node; } if (code == VOID_TYPE) return build1 (CONVERT_EXPR, type, e); #if 0 /* This is incorrect. A truncation can't be stripped this way. Extensions will be stripped by the use of get_unwidened. */ if (TREE_CODE (expr) == NOP_EXPR) return convert (type, TREE_OPERAND (expr, 0)); #endif if (code == INTEGER_TYPE || code == ENUMERAL_TYPE) return fold (convert_to_integer (type, e)); if (code == BOOLEAN_TYPE) { tree t = lang_hooks.truthvalue_conversion (expr); if (TREE_CODE (t) == ERROR_MARK) return t; /* If it returns a NOP_EXPR, we must fold it here to avoid infinite recursion between fold () and convert (). */ if (TREE_CODE (t) == NOP_EXPR) return fold (build1 (NOP_EXPR, type, TREE_OPERAND (t, 0))); else return fold (build1 (NOP_EXPR, type, t)); } if (code == POINTER_TYPE || code == REFERENCE_TYPE) return fold (convert_to_pointer (type, e)); if (code == REAL_TYPE) return fold (convert_to_real (type, e)); if (code == COMPLEX_TYPE) return fold (convert_to_complex (type, e)); if (code == VECTOR_TYPE) return fold (convert_to_vector (type, e)); if ((code == RECORD_TYPE || code == UNION_TYPE) && lang_hooks.types_compatible_p (type, TREE_TYPE (expr))) return e; error ("conversion to non-scalar type requested"); return error_mark_node; } /* Generate information regarding function declarations and definitions based on information stored in GCC's tree structure. This code implements the -aux-info option. Copyright (C) 1989, 1991, 1994, 1995, 1997, 1998, 1999, 2000, 2003, 2004 Free Software Foundation, Inc. Contributed by Ron Guilmette (rfg@segfault.us.com). This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ enum formals_style_enum { ansi, k_and_r_names, k_and_r_decls }; typedef enum formals_style_enum formals_style; static const char *data_type; static char *affix_data_type (const char *) ATTRIBUTE_MALLOC; static const char *gen_formal_list_for_type (tree, formals_style); static int deserves_ellipsis (tree); static const char *gen_formal_list_for_func_def (tree, formals_style); static const char *gen_type (const char *, tree, formals_style); static const char *gen_decl (tree, int, formals_style); /* Given a string representing an entire type or an entire declaration which only lacks the actual "data-type" specifier (at its left end), affix the data-type specifier to the left end of the given type specification or object declaration. Because of C language weirdness, the data-type specifier (which normally goes in at the very left end) may have to be slipped in just to the right of any leading "const" or "volatile" qualifiers (there may be more than one). Actually this may not be strictly necessary because it seems that GCC (at least) accepts ` const foo;' and treats it the same as `const foo;' but people are accustomed to seeing `const char *foo;' and *not* `char const *foo;' so we try to create types that look as expected. */ static char * affix_data_type (const char *param) { char *const type_or_decl = ASTRDUP (param); char *p = type_or_decl; char *qualifiers_then_data_type; char saved; /* Skip as many leading const's or volatile's as there are. */ for (;;) { if (!strncmp (p, "volatile ", 9)) { p += 9; continue; } if (!strncmp (p, "const ", 6)) { p += 6; continue; } break; } /* p now points to the place where we can insert the data type. We have to add a blank after the data-type of course. */ if (p == type_or_decl) return concat (data_type, " ", type_or_decl, NULL); saved = *p; *p = '\0'; qualifiers_then_data_type = concat (type_or_decl, data_type, NULL); *p = saved; return reconcat (qualifiers_then_data_type, qualifiers_then_data_type, " ", p, NULL); } /* Given a tree node which represents some "function type", generate the source code version of a formal parameter list (of some given style) for this function type. Return the whole formal parameter list (including a pair of surrounding parens) as a string. Note that if the style we are currently aiming for is non-ansi, then we just return a pair of empty parens here. */ static const char * gen_formal_list_for_type (tree fntype, formals_style style) { const char *formal_list = ""; tree formal_type; if (style != ansi) return "()"; formal_type = TYPE_ARG_TYPES (fntype); while (formal_type && TREE_VALUE (formal_type) != void_type_node) { const char *this_type; if (*formal_list) formal_list = concat (formal_list, ", ", NULL); this_type = gen_type ("", TREE_VALUE (formal_type), ansi); formal_list = ((strlen (this_type)) ? concat (formal_list, affix_data_type (this_type), NULL) : concat (formal_list, data_type, NULL)); formal_type = TREE_CHAIN (formal_type); } /* If we got to here, then we are trying to generate an ANSI style formal parameters list. New style prototyped ANSI formal parameter lists should in theory always contain some stuff between the opening and closing parens, even if it is only "void". The brutal truth though is that there is lots of old K&R code out there which contains declarations of "pointer-to-function" parameters and these almost never have fully specified formal parameter lists associated with them. That is, the pointer-to-function parameters are declared with just empty parameter lists. In cases such as these, protoize should really insert *something* into the vacant parameter lists, but what? It has no basis on which to insert anything in particular. Here, we make life easy for protoize by trying to distinguish between K&R empty parameter lists and new-style prototyped parameter lists that actually contain "void". In the latter case we (obviously) want to output the "void" verbatim, and that what we do. In the former case, we do our best to give protoize something nice to insert. This "something nice" should be something that is still valid (when re-compiled) but something that can clearly indicate to the user that more typing information (for the parameter list) should be added (by hand) at some convenient moment. The string chosen here is a comment with question marks in it. */ if (!*formal_list) { if (TYPE_ARG_TYPES (fntype)) /* assert (TREE_VALUE (TYPE_ARG_TYPES (fntype)) == void_type_node); */ formal_list = "void"; else formal_list = "/* ??? */"; } else { /* If there were at least some parameters, and if the formals-types-list petered out to a NULL (i.e. without being terminated by a void_type_node) then we need to tack on an ellipsis. */ if (!formal_type) formal_list = concat (formal_list, ", ...", NULL); } return concat (" (", formal_list, ")", NULL); } /* For the generation of an ANSI prototype for a function definition, we have to look at the formal parameter list of the function's own "type" to determine if the function's formal parameter list should end with an ellipsis. Given a tree node, the following function will return nonzero if the "function type" parameter list should end with an ellipsis. */ static int deserves_ellipsis (tree fntype) { tree formal_type; formal_type = TYPE_ARG_TYPES (fntype); while (formal_type && TREE_VALUE (formal_type) != void_type_node) formal_type = TREE_CHAIN (formal_type); /* If there were at least some parameters, and if the formals-types-list petered out to a NULL (i.e. without being terminated by a void_type_node) then we need to tack on an ellipsis. */ return (!formal_type && TYPE_ARG_TYPES (fntype)); } /* Generate a parameter list for a function definition (in some given style). Note that this routine has to be separate (and different) from the code that generates the prototype parameter lists for function declarations, because in the case of a function declaration, all we have to go on is a tree node representing the function's own "function type". This can tell us the types of all of the formal parameters for the function, but it cannot tell us the actual *names* of each of the formal parameters. We need to output those parameter names for each function definition. This routine gets a pointer to a tree node which represents the actual declaration of the given function, and this DECL node has a list of formal parameter (variable) declarations attached to it. These formal parameter (variable) declaration nodes give us the actual names of the formal parameters for the given function definition. This routine returns a string which is the source form for the entire function formal parameter list. */ static const char * gen_formal_list_for_func_def (tree fndecl, formals_style style) { const char *formal_list = ""; tree formal_decl; formal_decl = DECL_ARGUMENTS (fndecl); while (formal_decl) { const char *this_formal; if (*formal_list && ((style == ansi) || (style == k_and_r_names))) formal_list = concat (formal_list, ", ", NULL); this_formal = gen_decl (formal_decl, 0, style); if (style == k_and_r_decls) formal_list = concat (formal_list, this_formal, "; ", NULL); else formal_list = concat (formal_list, this_formal, NULL); formal_decl = TREE_CHAIN (formal_decl); } if (style == ansi) { if (!DECL_ARGUMENTS (fndecl)) formal_list = concat (formal_list, "void", NULL); if (deserves_ellipsis (TREE_TYPE (fndecl))) formal_list = concat (formal_list, ", ...", NULL); } if ((style == ansi) || (style == k_and_r_names)) formal_list = concat (" (", formal_list, ")", NULL); return formal_list; } /* Generate a string which is the source code form for a given type (t). This routine is ugly and complex because the C syntax for declarations is ugly and complex. This routine is straightforward so long as *no* pointer types, array types, or function types are involved. In the simple cases, this routine will return the (string) value which was passed in as the "ret_val" argument. Usually, this starts out either as an empty string, or as the name of the declared item (i.e. the formal function parameter variable). This routine will also return with the global variable "data_type" set to some string value which is the "basic" data-type of the given complete type. This "data_type" string can be concatenated onto the front of the returned string after this routine returns to its caller. In complicated cases involving pointer types, array types, or function types, the C declaration syntax requires an "inside out" approach, i.e. if you have a type which is a "pointer-to-function" type, you need to handle the "pointer" part first, but it also has to be "innermost" (relative to the declaration stuff for the "function" type). Thus, is this case, you must prepend a "(*" and append a ")" to the name of the item (i.e. formal variable). Then you must append and prepend the other info for the "function type" part of the overall type. To handle the "innermost precedence" rules of complicated C declarators, we do the following (in this routine). The input parameter called "ret_val" is treated as a "seed". Each time gen_type is called (perhaps recursively) some additional strings may be appended or prepended (or both) to the "seed" string. If yet another (lower) level of the GCC tree exists for the given type (as in the case of a pointer type, an array type, or a function type) then the (wrapped) seed is passed to a (recursive) invocation of gen_type() this recursive invocation may again "wrap" the (new) seed with yet more declarator stuff, by appending, prepending (or both). By the time the recursion bottoms out, the "seed value" at that point will have a value which is (almost) the complete source version of the declarator (except for the data_type info). Thus, this deepest "seed" value is simply passed back up through all of the recursive calls until it is given (as the return value) to the initial caller of the gen_type() routine. All that remains to do at this point is for the initial caller to prepend the "data_type" string onto the returned "seed". */ static const char * gen_type (const char *ret_val, tree t, formals_style style) { tree chain_p; /* If there is a typedef name for this type, use it. */ if (TYPE_NAME (t) && TREE_CODE (TYPE_NAME (t)) == TYPE_DECL) data_type = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (t))); else { switch (TREE_CODE (t)) { case POINTER_TYPE: if (TYPE_READONLY (t)) ret_val = concat ("const ", ret_val, NULL); if (TYPE_VOLATILE (t)) ret_val = concat ("volatile ", ret_val, NULL); ret_val = concat ("*", ret_val, NULL); if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE || TREE_CODE (TREE_TYPE (t)) == FUNCTION_TYPE) ret_val = concat ("(", ret_val, ")", NULL); ret_val = gen_type (ret_val, TREE_TYPE (t), style); return ret_val; case ARRAY_TYPE: if (!COMPLETE_TYPE_P (t) || TREE_CODE (TYPE_SIZE (t)) != INTEGER_CST) ret_val = gen_type (concat (ret_val, "[]", NULL), TREE_TYPE (t), style); else if (int_size_in_bytes (t) == 0) ret_val = gen_type (concat (ret_val, "[0]", NULL), TREE_TYPE (t), style); else { int size = (int_size_in_bytes (t) / int_size_in_bytes (TREE_TYPE (t))); char buff[10]; sprintf (buff, "[%d]", size); ret_val = gen_type (concat (ret_val, buff, NULL), TREE_TYPE (t), style); } break; case FUNCTION_TYPE: ret_val = gen_type (concat (ret_val, gen_formal_list_for_type (t, style), NULL), TREE_TYPE (t), style); break; case IDENTIFIER_NODE: data_type = IDENTIFIER_POINTER (t); break; /* The following three cases are complicated by the fact that a user may do something really stupid, like creating a brand new "anonymous" type specification in a formal argument list (or as part of a function return type specification). For example: int f (enum { red, green, blue } color); In such cases, we have no name that we can put into the prototype to represent the (anonymous) type. Thus, we have to generate the whole darn type specification. Yuck! */ case RECORD_TYPE: if (TYPE_NAME (t)) data_type = IDENTIFIER_POINTER (TYPE_NAME (t)); else { data_type = ""; chain_p = TYPE_FIELDS (t); while (chain_p) { data_type = concat (data_type, gen_decl (chain_p, 0, ansi), NULL); chain_p = TREE_CHAIN (chain_p); data_type = concat (data_type, "; ", NULL); } data_type = concat ("{ ", data_type, "}", NULL); } data_type = concat ("struct ", data_type, NULL); break; case UNION_TYPE: if (TYPE_NAME (t)) data_type = IDENTIFIER_POINTER (TYPE_NAME (t)); else { data_type = ""; chain_p = TYPE_FIELDS (t); while (chain_p) { data_type = concat (data_type, gen_decl (chain_p, 0, ansi), NULL); chain_p = TREE_CHAIN (chain_p); data_type = concat (data_type, "; ", NULL); } data_type = concat ("{ ", data_type, "}", NULL); } data_type = concat ("union ", data_type, NULL); break; case ENUMERAL_TYPE: if (TYPE_NAME (t)) data_type = IDENTIFIER_POINTER (TYPE_NAME (t)); else { data_type = ""; chain_p = TYPE_VALUES (t); while (chain_p) { data_type = concat (data_type, IDENTIFIER_POINTER (TREE_PURPOSE (chain_p)), NULL); chain_p = TREE_CHAIN (chain_p); if (chain_p) data_type = concat (data_type, ", ", NULL); } data_type = concat ("{ ", data_type, " }", NULL); } data_type = concat ("enum ", data_type, NULL); break; case TYPE_DECL: data_type = IDENTIFIER_POINTER (DECL_NAME (t)); break; case INTEGER_TYPE: data_type = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (t))); /* Normally, `unsigned' is part of the deal. Not so if it comes with a type qualifier. */ if (TYPE_UNSIGNED (t) && TYPE_QUALS (t)) data_type = concat ("unsigned ", data_type, NULL); break; case REAL_TYPE: data_type = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (t))); break; case VOID_TYPE: data_type = "void"; break; case ERROR_MARK: data_type = "[ERROR]"; break; default: abort (); } } if (TYPE_READONLY (t)) ret_val = concat ("const ", ret_val, NULL); if (TYPE_VOLATILE (t)) ret_val = concat ("volatile ", ret_val, NULL); if (TYPE_RESTRICT (t)) ret_val = concat ("restrict ", ret_val, NULL); return ret_val; } /* Generate a string (source) representation of an entire entity declaration (using some particular style for function types). The given entity may be either a variable or a function. If the "is_func_definition" parameter is nonzero, assume that the thing we are generating a declaration for is a FUNCTION_DECL node which is associated with a function definition. In this case, we can assume that an attached list of DECL nodes for function formal arguments is present. */ static const char * gen_decl (tree decl, int is_func_definition, formals_style style) { const char *ret_val; if (DECL_NAME (decl)) ret_val = IDENTIFIER_POINTER (DECL_NAME (decl)); else ret_val = ""; /* If we are just generating a list of names of formal parameters, we can simply return the formal parameter name (with no typing information attached to it) now. */ if (style == k_and_r_names) return ret_val; /* Note that for the declaration of some entity (either a function or a data object, like for instance a parameter) if the entity itself was declared as either const or volatile, then const and volatile properties are associated with just the declaration of the entity, and *not* with the `type' of the entity. Thus, for such declared entities, we have to generate the qualifiers here. */ if (TREE_THIS_VOLATILE (decl)) ret_val = concat ("volatile ", ret_val, NULL); if (TREE_READONLY (decl)) ret_val = concat ("const ", ret_val, NULL); data_type = ""; /* For FUNCTION_DECL nodes, there are two possible cases here. First, if this FUNCTION_DECL node was generated from a function "definition", then we will have a list of DECL_NODE's, one for each of the function's formal parameters. In this case, we can print out not only the types of each formal, but also each formal's name. In the second case, this FUNCTION_DECL node came from an actual function declaration (and *not* a definition). In this case, we do nothing here because the formal argument type-list will be output later, when the "type" of the function is added to the string we are building. Note that the ANSI-style formal parameter list is considered to be a (suffix) part of the "type" of the function. */ if (TREE_CODE (decl) == FUNCTION_DECL && is_func_definition) { ret_val = concat (ret_val, gen_formal_list_for_func_def (decl, ansi), NULL); /* Since we have already added in the formals list stuff, here we don't add the whole "type" of the function we are considering (which would include its parameter-list info), rather, we only add in the "type" of the "type" of the function, which is really just the return-type of the function (and does not include the parameter list info). */ ret_val = gen_type (ret_val, TREE_TYPE (TREE_TYPE (decl)), style); } else ret_val = gen_type (ret_val, TREE_TYPE (decl), style); ret_val = affix_data_type (ret_val); if (TREE_CODE (decl) != FUNCTION_DECL && C_DECL_REGISTER (decl)) ret_val = concat ("register ", ret_val, NULL); if (TREE_PUBLIC (decl)) ret_val = concat ("extern ", ret_val, NULL); if (TREE_CODE (decl) == FUNCTION_DECL && !TREE_PUBLIC (decl)) ret_val = concat ("static ", ret_val, NULL); return ret_val; } extern FILE *aux_info_file; /* Generate and write a new line of info to the aux-info (.X) file. This routine is called once for each function declaration, and once for each function definition (even the implicit ones). */ void gen_aux_info_record (tree fndecl, int is_definition, int is_implicit, int is_prototyped) { if (flag_gen_aux_info) { static int compiled_from_record = 0; expanded_location xloc = expand_location (DECL_SOURCE_LOCATION (fndecl)); /* Each output .X file must have a header line. Write one now if we have not yet done so. */ if (! compiled_from_record++) { /* The first line tells which directory file names are relative to. Currently, -aux-info works only for files in the working directory, so just use a `.' as a placeholder for now. */ fprintf (aux_info_file, "/* compiled from: . */\n"); } /* Write the actual line of auxiliary info. */ fprintf (aux_info_file, "/* %s:%d:%c%c */ %s;", xloc.file, xloc.line, (is_implicit) ? 'I' : (is_prototyped) ? 'N' : 'O', (is_definition) ? 'F' : 'C', gen_decl (fndecl, is_definition, ansi)); /* If this is an explicit function declaration, we need to also write out an old-style (i.e. K&R) function header, just in case the user wants to run unprotoize. */ if (is_definition) { fprintf (aux_info_file, " /*%s %s*/", gen_formal_list_for_func_def (fndecl, k_and_r_names), gen_formal_list_for_func_def (fndecl, k_and_r_decls)); } fprintf (aux_info_file, "\n"); } } /* Subroutines shared by all languages that are variants of C. Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ cpp_reader *parse_in; /* Declared in c-pragma.h. */ /* We let tm.h override the types used here, to handle trivial differences such as the choice of unsigned int or long unsigned int for size_t. When machines start needing nontrivial differences in the size type, it would be best to do something here to figure out automatically from other information what type to use. */ #ifndef SIZE_TYPE #define SIZE_TYPE "long unsigned int" #endif #ifndef PID_TYPE #define PID_TYPE "int" #endif #ifndef WCHAR_TYPE #define WCHAR_TYPE "int" #endif /* WCHAR_TYPE gets overridden by -fshort-wchar. */ #define MODIFIED_WCHAR_TYPE \ (flag_short_wchar ? "short unsigned int" : WCHAR_TYPE) #ifndef PTRDIFF_TYPE #define PTRDIFF_TYPE "long int" #endif #ifndef WINT_TYPE #define WINT_TYPE "unsigned int" #endif #ifndef INTMAX_TYPE #define INTMAX_TYPE ((INT_TYPE_SIZE == LONG_LONG_TYPE_SIZE) \ ? "int" \ : ((LONG_TYPE_SIZE == LONG_LONG_TYPE_SIZE) \ ? "long int" \ : "long long int")) #endif #ifndef UINTMAX_TYPE #define UINTMAX_TYPE ((INT_TYPE_SIZE == LONG_LONG_TYPE_SIZE) \ ? "unsigned int" \ : ((LONG_TYPE_SIZE == LONG_LONG_TYPE_SIZE) \ ? "long unsigned int" \ : "long long unsigned int")) #endif /* The following symbols are subsumed in the c_global_trees array, and listed here individually for documentation purposes. INTEGER_TYPE and REAL_TYPE nodes for the standard data types. tree short_integer_type_node; tree long_integer_type_node; tree long_long_integer_type_node; tree short_unsigned_type_node; tree long_unsigned_type_node; tree long_long_unsigned_type_node; tree truthvalue_type_node; tree truthvalue_false_node; tree truthvalue_true_node; tree ptrdiff_type_node; tree unsigned_char_type_node; tree signed_char_type_node; tree wchar_type_node; tree signed_wchar_type_node; tree unsigned_wchar_type_node; tree float_type_node; tree double_type_node; tree long_double_type_node; tree complex_integer_type_node; tree complex_float_type_node; tree complex_double_type_node; tree complex_long_double_type_node; tree intQI_type_node; tree intHI_type_node; tree intSI_type_node; tree intDI_type_node; tree intTI_type_node; tree unsigned_intQI_type_node; tree unsigned_intHI_type_node; tree unsigned_intSI_type_node; tree unsigned_intDI_type_node; tree unsigned_intTI_type_node; tree widest_integer_literal_type_node; tree widest_unsigned_literal_type_node; Nodes for types `void *' and `const void *'. tree ptr_type_node, const_ptr_type_node; Nodes for types `char *' and `const char *'. tree string_type_node, const_string_type_node; Type `char[SOMENUMBER]'. Used when an array of char is needed and the size is irrelevant. tree char_array_type_node; Type `int[SOMENUMBER]' or something like it. Used when an array of int needed and the size is irrelevant. tree int_array_type_node; Type `wchar_t[SOMENUMBER]' or something like it. Used when a wide string literal is created. tree wchar_array_type_node; Type `int ()' -- used for implicit declaration of functions. tree default_function_type; A VOID_TYPE node, packaged in a TREE_LIST. tree void_list_node; The lazily created VAR_DECLs for __FUNCTION__, __PRETTY_FUNCTION__, and __func__. (C doesn't generate __FUNCTION__ and__PRETTY_FUNCTION__ VAR_DECLS, but C++ does.) tree function_name_decl_node; tree pretty_function_name_decl_node; tree c99_function_name_decl_node; Stack of nested function name VAR_DECLs. tree saved_function_name_decls; */ tree c_global_trees[C_TREE_IDX_MAX]; /* TRUE if a code represents a statement. The front end init langhook should take care of initialization of this array. */ bool statement_code_p[MAX_TREE_CODES]; /* Switches common to the C front ends. */ /* Nonzero if prepreprocessing only. */ int flag_preprocess_only; /* Nonzero means don't output line number information. */ char flag_no_line_commands; /* Nonzero causes -E output not to be done, but directives such as #define that have side effects are still obeyed. */ char flag_no_output; /* Nonzero means dump macros in some fashion. */ char flag_dump_macros; /* Nonzero means pass #include lines through to the output. */ char flag_dump_includes; /* Nonzero means process PCH files while preprocessing. */ bool flag_pch_preprocess; /* The file name to which we should write a precompiled header, or NULL if no header will be written in this compile. */ const char *pch_file; /* Nonzero if an ISO standard was selected. It rejects macros in the user's namespace. */ int flag_iso; /* Nonzero if -undef was given. It suppresses target built-in macros and assertions. */ int flag_undef; /* Nonzero means don't recognize the non-ANSI builtin functions. */ int flag_no_builtin; /* Nonzero means don't recognize the non-ANSI builtin functions. -ansi sets this. */ int flag_no_nonansi_builtin; /* Nonzero means give `double' the same size as `float'. */ int flag_short_double; /* Nonzero means give `wchar_t' the same size as `short'. */ int flag_short_wchar; /* Nonzero means allow Microsoft extensions without warnings or errors. */ int flag_ms_extensions; /* Nonzero means don't recognize the keyword `asm'. */ int flag_no_asm; /* Nonzero means give string constants the type `const char *', as mandated by the standard. */ int flag_const_strings; /* Nonzero means to treat bitfields as signed unless they say `unsigned'. */ int flag_signed_bitfields = 1; int explicit_flag_signed_bitfields; /* Nonzero means warn about deprecated conversion from string constant to `char *'. */ int warn_write_strings; /* Warn about #pragma directives that are not recognized. */ int warn_unknown_pragmas; /* Tri state variable. */ /* Warn about format/argument anomalies in calls to formatted I/O functions (*printf, *scanf, strftime, strfmon, etc.). */ int warn_format; /* Zero means that faster, ...NonNil variants of objc_msgSend... calls will be used in ObjC; passing nil receivers to such calls will most likely result in crashes. */ int flag_nil_receivers = 1; /* Nonzero means that we will allow new ObjC exception syntax (@throw, @try, etc.) in source code. */ int flag_objc_exceptions = 0; /* Nonzero means that we generate NeXT setjmp based exceptions. */ int flag_objc_sjlj_exceptions = -1; /* Nonzero means that code generation will be altered to support "zero-link" execution. This currently affects ObjC only, but may affect other languages in the future. */ int flag_zero_link = 0; /* Nonzero means emit an '__OBJC, __image_info' for the current translation unit. It will inform the ObjC runtime that class definition(s) herein contained are to replace one(s) previously loaded. */ int flag_replace_objc_classes = 0; /* C/ObjC language option variables. */ /* Nonzero means allow type mismatches in conditional expressions; just make their values `void'. */ int flag_cond_mismatch; /* Nonzero means enable C89 Amendment 1 features. */ int flag_isoc94; /* Nonzero means use the ISO C99 dialect of C. */ int flag_isoc99; /* Nonzero means that we have builtin functions, and main is an int. */ int flag_hosted = 1; /* Warn if main is suspicious. */ int warn_main; /* ObjC language option variables. */ /* Open and close the file for outputting class declarations, if requested (ObjC). */ int flag_gen_declaration; /* Generate code for GNU or NeXT runtime environment. */ #ifdef NEXT_OBJC_RUNTIME int flag_next_runtime = 1; #else int flag_next_runtime = 0; #endif /* Tells the compiler that this is a special run. Do not perform any compiling, instead we are to test some platform dependent features and output a C header file with appropriate definitions. */ int print_struct_values; /* ???. Undocumented. */ const char *constant_string_class_name; /* C++ language option variables. */ /* Nonzero means don't recognize any extension keywords. */ int flag_no_gnu_keywords; /* Nonzero means do emit exported implementations of functions even if they can be inlined. */ int flag_implement_inlines = 1; /* Nonzero means that implicit instantiations will be emitted if needed. */ int flag_implicit_templates = 1; /* Nonzero means that implicit instantiations of inline templates will be emitted if needed, even if instantiations of non-inline templates aren't. */ int flag_implicit_inline_templates = 1; /* Nonzero means generate separate instantiation control files and juggle them at link time. */ int flag_use_repository; /* Nonzero if we want to issue diagnostics that the standard says are not required. */ int flag_optional_diags = 1; /* Nonzero means we should attempt to elide constructors when possible. */ int flag_elide_constructors = 1; /* Nonzero means that member functions defined in class scope are inline by default. */ int flag_default_inline = 1; /* Controls whether compiler generates 'type descriptor' that give run-time type information. */ int flag_rtti = 1; /* Nonzero if we want to conserve space in the .o files. We do this by putting uninitialized data and runtime initialized data into .common instead of .data at the expense of not flagging multiple definitions. */ int flag_conserve_space; /* Nonzero if we want to obey access control semantics. */ int flag_access_control = 1; /* Nonzero if we want to check the return value of new and avoid calling constructors if it is a null pointer. */ int flag_check_new; /* Nonzero if we want the new ISO rules for pushing a new scope for `for' initialization variables. 0: Old rules, set by -fno-for-scope. 2: New ISO rules, set by -ffor-scope. 1: Try to implement new ISO rules, but with backup compatibility (and warnings). This is the default, for now. */ int flag_new_for_scope = 1; /* Nonzero if we want to emit defined symbols with common-like linkage as weak symbols where possible, in order to conform to C++ semantics. Otherwise, emit them as local symbols. */ int flag_weak = 1; /* 0 means we want the preprocessor to not emit line directives for the current working directory. 1 means we want it to do it. -1 means we should decide depending on whether debugging information is being emitted or not. */ int flag_working_directory = -1; /* Nonzero to use __cxa_atexit, rather than atexit, to register destructors for local statics and global objects. */ int flag_use_cxa_atexit = DEFAULT_USE_CXA_ATEXIT; /* Nonzero means make the default pedwarns warnings instead of errors. The value of this flag is ignored if -pedantic is specified. */ int flag_permissive; /* Nonzero means to implement standard semantics for exception specifications, calling unexpected if an exception is thrown that doesn't match the specification. Zero means to treat them as assertions and optimize accordingly, but not check them. */ int flag_enforce_eh_specs = 1; /* Nonzero means warn about implicit declarations. */ int warn_implicit = 1; /* Maximum template instantiation depth. This limit is rather arbitrary, but it exists to limit the time it takes to notice infinite template instantiations. */ int max_tinst_depth = 500; /* The elements of `ridpointers' are identifier nodes for the reserved type names and storage classes. It is indexed by a RID_... value. */ tree *ridpointers; tree (*make_fname_decl) (tree, int); /* If non-NULL, the address of a language-specific function that returns 1 for language-specific statement codes. */ int (*lang_statement_code_p) (enum tree_code); /* If non-NULL, the address of a language-specific function that takes any action required right before expand_function_end is called. */ void (*lang_expand_function_end) (void); /* Nonzero means the expression being parsed will never be evaluated. This is a count, since unevaluated expressions can nest. */ int skip_evaluation; /* Information about how a function name is generated. */ struct fname_var_t { tree *const decl; /* pointer to the VAR_DECL. */ const unsigned rid; /* RID number for the identifier. */ const int pretty; /* How pretty is it? */ }; /* The three ways of getting then name of the current function. */ const struct fname_var_t fname_vars[] = { /* C99 compliant __func__, must be first. */ {&c99_function_name_decl_node, RID_C99_FUNCTION_NAME, 0}, /* GCC __FUNCTION__ compliant. */ {&function_name_decl_node, RID_FUNCTION_NAME, 0}, /* GCC __PRETTY_FUNCTION__ compliant. */ {&pretty_function_name_decl_node, RID_PRETTY_FUNCTION_NAME, 1}, {NULL, 0, 0}, }; static int constant_fits_type_p (tree, tree); static tree handle_packed_attribute (tree *, tree, tree, int, bool *); static tree handle_nocommon_attribute (tree *, tree, tree, int, bool *); static tree handle_common_attribute (tree *, tree, tree, int, bool *); static tree handle_noreturn_attribute (tree *, tree, tree, int, bool *); static tree handle_noinline_attribute (tree *, tree, tree, int, bool *); static tree handle_always_inline_attribute (tree *, tree, tree, int, bool *); static tree handle_used_attribute (tree *, tree, tree, int, bool *); static tree handle_unused_attribute (tree *, tree, tree, int, bool *); static tree handle_const_attribute (tree *, tree, tree, int, bool *); static tree handle_transparent_union_attribute (tree *, tree, tree, int, bool *); static tree handle_constructor_attribute (tree *, tree, tree, int, bool *); static tree handle_destructor_attribute (tree *, tree, tree, int, bool *); static tree handle_mode_attribute (tree *, tree, tree, int, bool *); static tree handle_section_attribute (tree *, tree, tree, int, bool *); static tree handle_aligned_attribute (tree *, tree, tree, int, bool *); static tree handle_weak_attribute (tree *, tree, tree, int, bool *) ; static tree handle_alias_attribute (tree *, tree, tree, int, bool *); static tree handle_visibility_attribute (tree *, tree, tree, int, bool *); static tree handle_tls_model_attribute (tree *, tree, tree, int, bool *); static tree handle_no_instrument_function_attribute (tree *, tree, tree, int, bool *); static tree handle_malloc_attribute (tree *, tree, tree, int, bool *); static tree handle_no_limit_stack_attribute (tree *, tree, tree, int, bool *); static tree handle_pure_attribute (tree *, tree, tree, int, bool *); static tree handle_deprecated_attribute (tree *, tree, tree, int, bool *); static tree handle_vector_size_attribute (tree *, tree, tree, int, bool *); static tree handle_nonnull_attribute (tree *, tree, tree, int, bool *); static tree handle_nothrow_attribute (tree *, tree, tree, int, bool *); static tree handle_cleanup_attribute (tree *, tree, tree, int, bool *); static tree handle_warn_unused_result_attribute (tree *, tree, tree, int, bool *); static void check_function_nonnull (tree, tree); static void check_nonnull_arg (void *, tree, unsigned HOST_WIDE_INT); static bool nonnull_check_p (tree, unsigned HOST_WIDE_INT); static bool get_nonnull_operand (tree, unsigned HOST_WIDE_INT *); static int resort_field_decl_cmp (const void *, const void *); /* Table of machine-independent attributes common to all C-like languages. */ const struct attribute_spec c_common_attribute_table[] = { /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */ { "packed", 0, 0, false, false, false, handle_packed_attribute }, { "nocommon", 0, 0, true, false, false, handle_nocommon_attribute }, { "common", 0, 0, true, false, false, handle_common_attribute }, /* FIXME: logically, noreturn attributes should be listed as "false, true, true" and apply to function types. But implementing this would require all the places in the compiler that use TREE_THIS_VOLATILE on a decl to identify non-returning functions to be located and fixed to check the function type instead. */ { "noreturn", 0, 0, true, false, false, handle_noreturn_attribute }, { "volatile", 0, 0, true, false, false, handle_noreturn_attribute }, { "noinline", 0, 0, true, false, false, handle_noinline_attribute }, { "always_inline", 0, 0, true, false, false, handle_always_inline_attribute }, { "used", 0, 0, true, false, false, handle_used_attribute }, { "unused", 0, 0, false, false, false, handle_unused_attribute }, /* The same comments as for noreturn attributes apply to const ones. */ { "const", 0, 0, true, false, false, handle_const_attribute }, { "transparent_union", 0, 0, false, false, false, handle_transparent_union_attribute }, { "constructor", 0, 0, true, false, false, handle_constructor_attribute }, { "destructor", 0, 0, true, false, false, handle_destructor_attribute }, { "mode", 1, 1, false, true, false, handle_mode_attribute }, { "section", 1, 1, true, false, false, handle_section_attribute }, { "aligned", 0, 1, false, false, false, handle_aligned_attribute }, { "weak", 0, 0, true, false, false, handle_weak_attribute }, { "alias", 1, 1, true, false, false, handle_alias_attribute }, { "no_instrument_function", 0, 0, true, false, false, handle_no_instrument_function_attribute }, { "malloc", 0, 0, true, false, false, handle_malloc_attribute }, { "no_stack_limit", 0, 0, true, false, false, handle_no_limit_stack_attribute }, { "pure", 0, 0, true, false, false, handle_pure_attribute }, { "deprecated", 0, 0, false, false, false, handle_deprecated_attribute }, { "vector_size", 1, 1, false, true, false, handle_vector_size_attribute }, { "visibility", 1, 1, true, false, false, handle_visibility_attribute }, { "tls_model", 1, 1, true, false, false, handle_tls_model_attribute }, { "nonnull", 0, -1, false, true, true, handle_nonnull_attribute }, { "nothrow", 0, 0, true, false, false, handle_nothrow_attribute }, { "may_alias", 0, 0, false, true, false, NULL }, { "cleanup", 1, 1, true, false, false, handle_cleanup_attribute }, { "warn_unused_result", 0, 0, false, true, true, handle_warn_unused_result_attribute }, { NULL, 0, 0, false, false, false, NULL } }; /* Give the specifications for the format attributes, used by C and all descendants. */ const struct attribute_spec c_common_format_attribute_table[] = { /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */ { "format", 3, 3, false, true, true, handle_format_attribute }, { "format_arg", 1, 1, false, true, true, handle_format_arg_attribute }, { NULL, 0, 0, false, false, false, NULL } }; /* Push current bindings for the function name VAR_DECLS. */ void start_fname_decls (void) { unsigned ix; tree saved = NULL_TREE; for (ix = 0; fname_vars[ix].decl; ix++) { tree decl = *fname_vars[ix].decl; if (decl) { saved = tree_cons (decl, build_int_2 (ix, 0), saved); *fname_vars[ix].decl = NULL_TREE; } } if (saved || saved_function_name_decls) /* Normally they'll have been NULL, so only push if we've got a stack, or they are non-NULL. */ saved_function_name_decls = tree_cons (saved, NULL_TREE, saved_function_name_decls); } /* Finish up the current bindings, adding them into the current function's statement tree. This must be done _before_ finish_stmt_tree is called. If there is no current function, we must be at file scope and no statements are involved. Pop the previous bindings. */ void finish_fname_decls (void) { unsigned ix; tree stmts = NULL_TREE; tree stack = saved_function_name_decls; for (; stack && TREE_VALUE (stack); stack = TREE_CHAIN (stack)) append_to_statement_list (TREE_VALUE (stack), &stmts); if (stmts) { tree *bodyp = &DECL_SAVED_TREE (current_function_decl); if (TREE_CODE (*bodyp) == BIND_EXPR) bodyp = &BIND_EXPR_BODY (*bodyp); append_to_statement_list (*bodyp, &stmts); *bodyp = stmts; } for (ix = 0; fname_vars[ix].decl; ix++) *fname_vars[ix].decl = NULL_TREE; if (stack) { /* We had saved values, restore them. */ tree saved; for (saved = TREE_PURPOSE (stack); saved; saved = TREE_CHAIN (saved)) { tree decl = TREE_PURPOSE (saved); unsigned ix = TREE_INT_CST_LOW (TREE_VALUE (saved)); *fname_vars[ix].decl = decl; } stack = TREE_CHAIN (stack); } saved_function_name_decls = stack; } /* Return the text name of the current function, suitably prettified by PRETTY_P. Return string must be freed by caller. */ const char * fname_as_string (int pretty_p) { const char *name = "top level"; char *namep; int vrb = 2; if (! pretty_p) { name = ""; vrb = 0; } if (current_function_decl) name = lang_hooks.decl_printable_name (current_function_decl, vrb); if (c_lex_string_translate) { int len = strlen (name) + 3; /* Two for '"'s. One for NULL. */ cpp_string cstr = { 0, 0 }, strname; namep = xmalloc (len); snprintf (namep, len, "\"%s\"", name); strname.text = (unsigned char *) namep; strname.len = len - 1; if (cpp_interpret_string (parse_in, &strname, 1, &cstr, false)) return (char *) cstr.text; } else namep = xstrdup (name); return namep; } /* Expand DECL if it declares an entity not handled by the common code. */ int c_expand_decl (tree decl) { if (TREE_CODE (decl) == VAR_DECL && !TREE_STATIC (decl)) { /* Let the back-end know about this variable. */ if (!anon_aggr_type_p (TREE_TYPE (decl))) emit_local_var (decl); else expand_anon_union_decl (decl, NULL_TREE, DECL_ANON_UNION_ELEMS (decl)); } else if (TREE_CODE (decl) == VAR_DECL && TREE_STATIC (decl)) make_rtl_for_local_static (decl); else return 0; return 1; } /* Return the VAR_DECL for a const char array naming the current function. If the VAR_DECL has not yet been created, create it now. RID indicates how it should be formatted and IDENTIFIER_NODE ID is its name (unfortunately C and C++ hold the RID values of keywords in different places, so we can't derive RID from ID in this language independent code. */ tree fname_decl (unsigned int rid, tree id) { unsigned ix; tree decl = NULL_TREE; for (ix = 0; fname_vars[ix].decl; ix++) if (fname_vars[ix].rid == rid) break; decl = *fname_vars[ix].decl; if (!decl) { /* If a tree is built here, it would normally have the lineno of the current statement. Later this tree will be moved to the beginning of the function and this line number will be wrong. To avoid this problem set the lineno to 0 here; that prevents it from appearing in the RTL. */ tree stmts; location_t saved_location = input_location; #ifdef USE_MAPPED_LOCATION input_location = UNKNOWN_LOCATION; #else input_line = 0; #endif stmts = push_stmt_list (); decl = (*make_fname_decl) (id, fname_vars[ix].pretty); stmts = pop_stmt_list (stmts); if (!IS_EMPTY_STMT (stmts)) saved_function_name_decls = tree_cons (decl, stmts, saved_function_name_decls); *fname_vars[ix].decl = decl; input_location = saved_location; } if (!ix && !current_function_decl) pedwarn ("%J'%D' is not defined outside of function scope", decl, decl); return decl; } /* Given a STRING_CST, give it a suitable array-of-chars data type. */ tree fix_string_type (tree value) { const int wchar_bytes = TYPE_PRECISION (wchar_type_node) / BITS_PER_UNIT; const int wide_flag = TREE_TYPE (value) == wchar_array_type_node; const int nchars_max = flag_isoc99 ? 4095 : 509; int length = TREE_STRING_LENGTH (value); int nchars; /* Compute the number of elements, for the array type. */ nchars = wide_flag ? length / wchar_bytes : length; if (pedantic && nchars - 1 > nchars_max && !c_dialect_cxx ()) pedwarn ("string length `%d' is greater than the length `%d' ISO C%d compilers are required to support", nchars - 1, nchars_max, flag_isoc99 ? 99 : 89); /* Create the array type for the string constant. -Wwrite-strings says make the string constant an array of const char so that copying it to a non-const pointer will get a warning. For C++, this is the standard behavior. */ if (flag_const_strings) { tree elements = build_type_variant (wide_flag ? wchar_type_node : char_type_node, 1, 0); TREE_TYPE (value) = build_array_type (elements, build_index_type (build_int_2 (nchars - 1, 0))); } else TREE_TYPE (value) = build_array_type (wide_flag ? wchar_type_node : char_type_node, build_index_type (build_int_2 (nchars - 1, 0))); TREE_CONSTANT (value) = 1; TREE_INVARIANT (value) = 1; TREE_READONLY (value) = 1; TREE_STATIC (value) = 1; return value; } /* Print a warning if a constant expression had overflow in folding. Invoke this function on every expression that the language requires to be a constant expression. Note the ANSI C standard says it is erroneous for a constant expression to overflow. */ void constant_expression_warning (tree value) { if ((TREE_CODE (value) == INTEGER_CST || TREE_CODE (value) == REAL_CST || TREE_CODE (value) == VECTOR_CST || TREE_CODE (value) == COMPLEX_CST) && TREE_CONSTANT_OVERFLOW (value) && pedantic) pedwarn ("overflow in constant expression"); } /* Print a warning if an expression had overflow in folding. Invoke this function on every expression that (1) appears in the source code, and (2) might be a constant expression that overflowed, and (3) is not already checked by convert_and_check; however, do not invoke this function on operands of explicit casts. */ void overflow_warning (tree value) { if ((TREE_CODE (value) == INTEGER_CST || (TREE_CODE (value) == COMPLEX_CST && TREE_CODE (TREE_REALPART (value)) == INTEGER_CST)) && TREE_OVERFLOW (value)) { TREE_OVERFLOW (value) = 0; if (skip_evaluation == 0) warning ("integer overflow in expression"); } else if ((TREE_CODE (value) == REAL_CST || (TREE_CODE (value) == COMPLEX_CST && TREE_CODE (TREE_REALPART (value)) == REAL_CST)) && TREE_OVERFLOW (value)) { TREE_OVERFLOW (value) = 0; if (skip_evaluation == 0) warning ("floating point overflow in expression"); } else if (TREE_CODE (value) == VECTOR_CST && TREE_OVERFLOW (value)) { TREE_OVERFLOW (value) = 0; if (skip_evaluation == 0) warning ("vector overflow in expression"); } } /* Print a warning if a large constant is truncated to unsigned, or if -Wconversion is used and a constant < 0 is converted to unsigned. Invoke this function on every expression that might be implicitly converted to an unsigned type. */ void unsigned_conversion_warning (tree result, tree operand) { tree type = TREE_TYPE (result); if (TREE_CODE (operand) == INTEGER_CST && TREE_CODE (type) == INTEGER_TYPE && TYPE_UNSIGNED (type) && skip_evaluation == 0 && !int_fits_type_p (operand, type)) { if (!int_fits_type_p (operand, c_common_signed_type (type))) /* This detects cases like converting -129 or 256 to unsigned char. */ warning ("large integer implicitly truncated to unsigned type"); else if (warn_conversion) warning ("negative integer implicitly converted to unsigned type"); } } /* Nonzero if constant C has a value that is permissible for type TYPE (an INTEGER_TYPE). */ static int constant_fits_type_p (tree c, tree type) { if (TREE_CODE (c) == INTEGER_CST) return int_fits_type_p (c, type); c = convert (type, c); return !TREE_OVERFLOW (c); } /* Nonzero if vector types T1 and T2 can be converted to each other without an explicit cast. */ int vector_types_convertible_p (tree t1, tree t2) { return targetm.vector_opaque_p (t1) || targetm.vector_opaque_p (t2) || TYPE_MODE (t1) == TYPE_MODE (t2); } /* Convert EXPR to TYPE, warning about conversion problems with constants. Invoke this function on every expression that is converted implicitly, i.e. because of language rules and not because of an explicit cast. */ tree convert_and_check (tree type, tree expr) { tree t = convert (type, expr); if (TREE_CODE (t) == INTEGER_CST) { if (TREE_OVERFLOW (t)) { TREE_OVERFLOW (t) = 0; /* Do not diagnose overflow in a constant expression merely because a conversion overflowed. */ TREE_CONSTANT_OVERFLOW (t) = TREE_CONSTANT_OVERFLOW (expr); /* No warning for converting 0x80000000 to int. */ if (!(TYPE_UNSIGNED (type) < TYPE_UNSIGNED (TREE_TYPE (expr)) && TREE_CODE (TREE_TYPE (expr)) == INTEGER_TYPE && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (expr)))) /* If EXPR fits in the unsigned version of TYPE, don't warn unless pedantic. */ if ((pedantic || TYPE_UNSIGNED (type) || ! constant_fits_type_p (expr, c_common_unsigned_type (type))) && skip_evaluation == 0) warning ("overflow in implicit constant conversion"); } else unsigned_conversion_warning (t, expr); } return t; } /* A node in a list that describes references to variables (EXPR), which are either read accesses if WRITER is zero, or write accesses, in which case WRITER is the parent of EXPR. */ struct tlist { struct tlist *next; tree expr, writer; }; /* Used to implement a cache the results of a call to verify_tree. We only use this for SAVE_EXPRs. */ struct tlist_cache { struct tlist_cache *next; struct tlist *cache_before_sp; struct tlist *cache_after_sp; tree expr; }; /* Obstack to use when allocating tlist structures, and corresponding firstobj. */ static struct obstack tlist_obstack; static char *tlist_firstobj = 0; /* Keep track of the identifiers we've warned about, so we can avoid duplicate warnings. */ static struct tlist *warned_ids; /* SAVE_EXPRs need special treatment. We process them only once and then cache the results. */ static struct tlist_cache *save_expr_cache; static void add_tlist (struct tlist **, struct tlist *, tree, int); static void merge_tlist (struct tlist **, struct tlist *, int); static void verify_tree (tree, struct tlist **, struct tlist **, tree); static int warning_candidate_p (tree); static void warn_for_collisions (struct tlist *); static void warn_for_collisions_1 (tree, tree, struct tlist *, int); static struct tlist *new_tlist (struct tlist *, tree, tree); /* Create a new struct tlist and fill in its fields. */ static struct tlist * new_tlist (struct tlist *next, tree t, tree writer) { struct tlist *l; l = obstack_alloc (&tlist_obstack, sizeof *l); l->next = next; l->expr = t; l->writer = writer; return l; } /* Add duplicates of the nodes found in ADD to the list *TO. If EXCLUDE_WRITER is nonnull, we ignore any node we find which has a writer equal to it. */ static void add_tlist (struct tlist **to, struct tlist *add, tree exclude_writer, int copy) { while (add) { struct tlist *next = add->next; if (! copy) add->next = *to; if (! exclude_writer || add->writer != exclude_writer) *to = copy ? new_tlist (*to, add->expr, add->writer) : add; add = next; } } /* Merge the nodes of ADD into TO. This merging process is done so that for each variable that already exists in TO, no new node is added; however if there is a write access recorded in ADD, and an occurrence on TO is only a read access, then the occurrence in TO will be modified to record the write. */ static void merge_tlist (struct tlist **to, struct tlist *add, int copy) { struct tlist **end = to; while (*end) end = &(*end)->next; while (add) { int found = 0; struct tlist *tmp2; struct tlist *next = add->next; for (tmp2 = *to; tmp2; tmp2 = tmp2->next) if (tmp2->expr == add->expr) { found = 1; if (! tmp2->writer) tmp2->writer = add->writer; } if (! found) { *end = copy ? add : new_tlist (NULL, add->expr, add->writer); end = &(*end)->next; *end = 0; } add = next; } } /* WRITTEN is a variable, WRITER is its parent. Warn if any of the variable references in list LIST conflict with it, excluding reads if ONLY writers is nonzero. */ static void warn_for_collisions_1 (tree written, tree writer, struct tlist *list, int only_writes) { struct tlist *tmp; /* Avoid duplicate warnings. */ for (tmp = warned_ids; tmp; tmp = tmp->next) if (tmp->expr == written) return; while (list) { if (list->expr == written && list->writer != writer && (! only_writes || list->writer) && DECL_NAME (list->expr)) { warned_ids = new_tlist (warned_ids, written, NULL_TREE); warning ("operation on `%s' may be undefined", IDENTIFIER_POINTER (DECL_NAME (list->expr))); } list = list->next; } } /* Given a list LIST of references to variables, find whether any of these can cause conflicts due to missing sequence points. */ static void warn_for_collisions (struct tlist *list) { struct tlist *tmp; for (tmp = list; tmp; tmp = tmp->next) { if (tmp->writer) warn_for_collisions_1 (tmp->expr, tmp->writer, list, 0); } } /* Return nonzero if X is a tree that can be verified by the sequence point warnings. */ static int warning_candidate_p (tree x) { return TREE_CODE (x) == VAR_DECL || TREE_CODE (x) == PARM_DECL; } /* Walk the tree X, and record accesses to variables. If X is written by the parent tree, WRITER is the parent. We store accesses in one of the two lists: PBEFORE_SP, and PNO_SP. If this expression or its only operand forces a sequence point, then everything up to the sequence point is stored in PBEFORE_SP. Everything else gets stored in PNO_SP. Once we return, we will have emitted warnings if any subexpression before such a sequence point could be undefined. On a higher level, however, the sequence point may not be relevant, and we'll merge the two lists. Example: (b++, a) + b; The call that processes the COMPOUND_EXPR will store the increment of B in PBEFORE_SP, and the use of A in PNO_SP. The higher-level call that processes the PLUS_EXPR will need to merge the two lists so that eventually, all accesses end up on the same list (and we'll warn about the unordered subexpressions b++ and b. A note on merging. If we modify the former example so that our expression becomes (b++, b) + a care must be taken not simply to add all three expressions into the final PNO_SP list. The function merge_tlist takes care of that by merging the before-SP list of the COMPOUND_EXPR into its after-SP list in a special way, so that no more than one access to B is recorded. */ static void verify_tree (tree x, struct tlist **pbefore_sp, struct tlist **pno_sp, tree writer) { struct tlist *tmp_before, *tmp_nosp, *tmp_list2, *tmp_list3; enum tree_code code; char class; /* X may be NULL if it is the operand of an empty statement expression ({ }). */ if (x == NULL) return; restart: code = TREE_CODE (x); class = TREE_CODE_CLASS (code); if (warning_candidate_p (x)) { *pno_sp = new_tlist (*pno_sp, x, writer); return; } switch (code) { case CONSTRUCTOR: return; case COMPOUND_EXPR: case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: tmp_before = tmp_nosp = tmp_list3 = 0; verify_tree (TREE_OPERAND (x, 0), &tmp_before, &tmp_nosp, NULL_TREE); warn_for_collisions (tmp_nosp); merge_tlist (pbefore_sp, tmp_before, 0); merge_tlist (pbefore_sp, tmp_nosp, 0); verify_tree (TREE_OPERAND (x, 1), &tmp_list3, pno_sp, NULL_TREE); merge_tlist (pbefore_sp, tmp_list3, 0); return; case COND_EXPR: tmp_before = tmp_list2 = 0; verify_tree (TREE_OPERAND (x, 0), &tmp_before, &tmp_list2, NULL_TREE); warn_for_collisions (tmp_list2); merge_tlist (pbefore_sp, tmp_before, 0); merge_tlist (pbefore_sp, tmp_list2, 1); tmp_list3 = tmp_nosp = 0; verify_tree (TREE_OPERAND (x, 1), &tmp_list3, &tmp_nosp, NULL_TREE); warn_for_collisions (tmp_nosp); merge_tlist (pbefore_sp, tmp_list3, 0); tmp_list3 = tmp_list2 = 0; verify_tree (TREE_OPERAND (x, 2), &tmp_list3, &tmp_list2, NULL_TREE); warn_for_collisions (tmp_list2); merge_tlist (pbefore_sp, tmp_list3, 0); /* Rather than add both tmp_nosp and tmp_list2, we have to merge the two first, to avoid warning for (a ? b++ : b++). */ merge_tlist (&tmp_nosp, tmp_list2, 0); add_tlist (pno_sp, tmp_nosp, NULL_TREE, 0); return; case PREDECREMENT_EXPR: case PREINCREMENT_EXPR: case POSTDECREMENT_EXPR: case POSTINCREMENT_EXPR: verify_tree (TREE_OPERAND (x, 0), pno_sp, pno_sp, x); return; case MODIFY_EXPR: tmp_before = tmp_nosp = tmp_list3 = 0; verify_tree (TREE_OPERAND (x, 1), &tmp_before, &tmp_nosp, NULL_TREE); verify_tree (TREE_OPERAND (x, 0), &tmp_list3, &tmp_list3, x); /* Expressions inside the LHS are not ordered wrt. the sequence points in the RHS. Example: *a = (a++, 2) Despite the fact that the modification of "a" is in the before_sp list (tmp_before), it conflicts with the use of "a" in the LHS. We can handle this by adding the contents of tmp_list3 to those of tmp_before, and redoing the collision warnings for that list. */ add_tlist (&tmp_before, tmp_list3, x, 1); warn_for_collisions (tmp_before); /* Exclude the LHS itself here; we first have to merge it into the tmp_nosp list. This is done to avoid warning for "a = a"; if we didn't exclude the LHS, we'd get it twice, once as a read and once as a write. */ add_tlist (pno_sp, tmp_list3, x, 0); warn_for_collisions_1 (TREE_OPERAND (x, 0), x, tmp_nosp, 1); merge_tlist (pbefore_sp, tmp_before, 0); if (warning_candidate_p (TREE_OPERAND (x, 0))) merge_tlist (&tmp_nosp, new_tlist (NULL, TREE_OPERAND (x, 0), x), 0); add_tlist (pno_sp, tmp_nosp, NULL_TREE, 1); return; case CALL_EXPR: /* We need to warn about conflicts among arguments and conflicts between args and the function address. Side effects of the function address, however, are not ordered by the sequence point of the call. */ tmp_before = tmp_nosp = tmp_list2 = tmp_list3 = 0; verify_tree (TREE_OPERAND (x, 0), &tmp_before, &tmp_nosp, NULL_TREE); if (TREE_OPERAND (x, 1)) verify_tree (TREE_OPERAND (x, 1), &tmp_list2, &tmp_list3, NULL_TREE); merge_tlist (&tmp_list3, tmp_list2, 0); add_tlist (&tmp_before, tmp_list3, NULL_TREE, 0); add_tlist (&tmp_before, tmp_nosp, NULL_TREE, 0); warn_for_collisions (tmp_before); add_tlist (pbefore_sp, tmp_before, NULL_TREE, 0); return; case TREE_LIST: /* Scan all the list, e.g. indices of multi dimensional array. */ while (x) { tmp_before = tmp_nosp = 0; verify_tree (TREE_VALUE (x), &tmp_before, &tmp_nosp, NULL_TREE); merge_tlist (&tmp_nosp, tmp_before, 0); add_tlist (pno_sp, tmp_nosp, NULL_TREE, 0); x = TREE_CHAIN (x); } return; case SAVE_EXPR: { struct tlist_cache *t; for (t = save_expr_cache; t; t = t->next) if (t->expr == x) break; if (! t) { t = obstack_alloc (&tlist_obstack, sizeof *t); t->next = save_expr_cache; t->expr = x; save_expr_cache = t; tmp_before = tmp_nosp = 0; verify_tree (TREE_OPERAND (x, 0), &tmp_before, &tmp_nosp, NULL_TREE); warn_for_collisions (tmp_nosp); tmp_list3 = 0; while (tmp_nosp) { struct tlist *t = tmp_nosp; tmp_nosp = t->next; merge_tlist (&tmp_list3, t, 0); } t->cache_before_sp = tmp_before; t->cache_after_sp = tmp_list3; } merge_tlist (pbefore_sp, t->cache_before_sp, 1); add_tlist (pno_sp, t->cache_after_sp, NULL_TREE, 1); return; } default: break; } if (class == '1') { if (first_rtl_op (code) == 0) return; x = TREE_OPERAND (x, 0); writer = 0; goto restart; } switch (class) { case 'r': case '<': case '2': case 'e': case 's': case 'x': { int lp; int max = first_rtl_op (TREE_CODE (x)); for (lp = 0; lp < max; lp++) { tmp_before = tmp_nosp = 0; verify_tree (TREE_OPERAND (x, lp), &tmp_before, &tmp_nosp, NULL_TREE); merge_tlist (&tmp_nosp, tmp_before, 0); add_tlist (pno_sp, tmp_nosp, NULL_TREE, 0); } break; } } } /* Try to warn for undefined behavior in EXPR due to missing sequence points. */ void verify_sequence_points (tree expr) { struct tlist *before_sp = 0, *after_sp = 0; warned_ids = 0; save_expr_cache = 0; if (tlist_firstobj == 0) { gcc_obstack_init (&tlist_obstack); tlist_firstobj = obstack_alloc (&tlist_obstack, 0); } verify_tree (expr, &before_sp, &after_sp, 0); warn_for_collisions (after_sp); obstack_free (&tlist_obstack, tlist_firstobj); } /* Validate the expression after `case' and apply default promotions. */ tree check_case_value (tree value) { if (value == NULL_TREE) return value; /* Strip NON_LVALUE_EXPRs since we aren't using as an lvalue. */ STRIP_TYPE_NOPS (value); /* In C++, the following is allowed: const int i = 3; switch (...) { case i: ... } So, we try to reduce the VALUE to a constant that way. */ if (c_dialect_cxx ()) { value = decl_constant_value (value); STRIP_TYPE_NOPS (value); value = fold (value); } if (TREE_CODE (value) != INTEGER_CST && value != error_mark_node) { error ("case label does not reduce to an integer constant"); value = error_mark_node; } else /* Promote char or short to int. */ value = default_conversion (value); constant_expression_warning (value); return value; } /* Return an integer type with BITS bits of precision, that is unsigned if UNSIGNEDP is nonzero, otherwise signed. */ tree c_common_type_for_size (unsigned int bits, int unsignedp) { if (bits == TYPE_PRECISION (integer_type_node)) return unsignedp ? unsigned_type_node : integer_type_node; if (bits == TYPE_PRECISION (signed_char_type_node)) return unsignedp ? unsigned_char_type_node : signed_char_type_node; if (bits == TYPE_PRECISION (short_integer_type_node)) return unsignedp ? short_unsigned_type_node : short_integer_type_node; if (bits == TYPE_PRECISION (long_integer_type_node)) return unsignedp ? long_unsigned_type_node : long_integer_type_node; if (bits == TYPE_PRECISION (long_long_integer_type_node)) return (unsignedp ? long_long_unsigned_type_node : long_long_integer_type_node); if (bits == TYPE_PRECISION (widest_integer_literal_type_node)) return (unsignedp ? widest_unsigned_literal_type_node : widest_integer_literal_type_node); if (bits <= TYPE_PRECISION (intQI_type_node)) return unsignedp ? unsigned_intQI_type_node : intQI_type_node; if (bits <= TYPE_PRECISION (intHI_type_node)) return unsignedp ? unsigned_intHI_type_node : intHI_type_node; if (bits <= TYPE_PRECISION (intSI_type_node)) return unsignedp ? unsigned_intSI_type_node : intSI_type_node; if (bits <= TYPE_PRECISION (intDI_type_node)) return unsignedp ? unsigned_intDI_type_node : intDI_type_node; return 0; } /* Used for communication between c_common_type_for_mode and c_register_builtin_type. */ static GTY(()) tree registered_builtin_types; /* Return a data type that has machine mode MODE. If the mode is an integer, then UNSIGNEDP selects between signed and unsigned types. */ tree c_common_type_for_mode (enum machine_mode mode, int unsignedp) { tree t; if (mode == TYPE_MODE (integer_type_node)) return unsignedp ? unsigned_type_node : integer_type_node; if (mode == TYPE_MODE (signed_char_type_node)) return unsignedp ? unsigned_char_type_node : signed_char_type_node; if (mode == TYPE_MODE (short_integer_type_node)) return unsignedp ? short_unsigned_type_node : short_integer_type_node; if (mode == TYPE_MODE (long_integer_type_node)) return unsignedp ? long_unsigned_type_node : long_integer_type_node; if (mode == TYPE_MODE (long_long_integer_type_node)) return unsignedp ? long_long_unsigned_type_node : long_long_integer_type_node; if (mode == TYPE_MODE (widest_integer_literal_type_node)) return unsignedp ? widest_unsigned_literal_type_node : widest_integer_literal_type_node; if (mode == QImode) return unsignedp ? unsigned_intQI_type_node : intQI_type_node; if (mode == HImode) return unsignedp ? unsigned_intHI_type_node : intHI_type_node; if (mode == SImode) return unsignedp ? unsigned_intSI_type_node : intSI_type_node; if (mode == DImode) return unsignedp ? unsigned_intDI_type_node : intDI_type_node; #if HOST_BITS_PER_WIDE_INT >= 64 if (mode == TYPE_MODE (intTI_type_node)) return unsignedp ? unsigned_intTI_type_node : intTI_type_node; #endif if (mode == TYPE_MODE (float_type_node)) return float_type_node; if (mode == TYPE_MODE (double_type_node)) return double_type_node; if (mode == TYPE_MODE (long_double_type_node)) return long_double_type_node; if (mode == TYPE_MODE (void_type_node)) return void_type_node; if (mode == TYPE_MODE (build_pointer_type (char_type_node))) return unsignedp ? make_unsigned_type (mode) : make_signed_type (mode); if (mode == TYPE_MODE (build_pointer_type (integer_type_node))) return unsignedp ? make_unsigned_type (mode) : make_signed_type (mode); if (VECTOR_MODE_P (mode)) { enum machine_mode inner_mode = GET_MODE_INNER (mode); tree inner_type = c_common_type_for_mode (inner_mode, unsignedp); if (inner_type != NULL_TREE) return build_vector_type_for_mode (inner_type, mode); } for (t = registered_builtin_types; t; t = TREE_CHAIN (t)) if (TYPE_MODE (TREE_VALUE (t)) == mode) return TREE_VALUE (t); return 0; } /* Return an unsigned type the same as TYPE in other respects. */ tree c_common_unsigned_type (tree type) { tree type1 = TYPE_MAIN_VARIANT (type); if (type1 == signed_char_type_node || type1 == char_type_node) return unsigned_char_type_node; if (type1 == integer_type_node) return unsigned_type_node; if (type1 == short_integer_type_node) return short_unsigned_type_node; if (type1 == long_integer_type_node) return long_unsigned_type_node; if (type1 == long_long_integer_type_node) return long_long_unsigned_type_node; if (type1 == widest_integer_literal_type_node) return widest_unsigned_literal_type_node; #if HOST_BITS_PER_WIDE_INT >= 64 if (type1 == intTI_type_node) return unsigned_intTI_type_node; #endif if (type1 == intDI_type_node) return unsigned_intDI_type_node; if (type1 == intSI_type_node) return unsigned_intSI_type_node; if (type1 == intHI_type_node) return unsigned_intHI_type_node; if (type1 == intQI_type_node) return unsigned_intQI_type_node; return c_common_signed_or_unsigned_type (1, type); } /* Return a signed type the same as TYPE in other respects. */ tree c_common_signed_type (tree type) { tree type1 = TYPE_MAIN_VARIANT (type); if (type1 == unsigned_char_type_node || type1 == char_type_node) return signed_char_type_node; if (type1 == unsigned_type_node) return integer_type_node; if (type1 == short_unsigned_type_node) return short_integer_type_node; if (type1 == long_unsigned_type_node) return long_integer_type_node; if (type1 == long_long_unsigned_type_node) return long_long_integer_type_node; if (type1 == widest_unsigned_literal_type_node) return widest_integer_literal_type_node; #if HOST_BITS_PER_WIDE_INT >= 64 if (type1 == unsigned_intTI_type_node) return intTI_type_node; #endif if (type1 == unsigned_intDI_type_node) return intDI_type_node; if (type1 == unsigned_intSI_type_node) return intSI_type_node; if (type1 == unsigned_intHI_type_node) return intHI_type_node; if (type1 == unsigned_intQI_type_node) return intQI_type_node; return c_common_signed_or_unsigned_type (0, type); } /* Return a type the same as TYPE except unsigned or signed according to UNSIGNEDP. */ tree c_common_signed_or_unsigned_type (int unsignedp, tree type) { if (! INTEGRAL_TYPE_P (type) || TYPE_UNSIGNED (type) == unsignedp) return type; /* Must check the mode of the types, not the precision. Enumeral types in C++ have precision set to match their range, but may use a wider mode to match an ABI. If we change modes, we may wind up with bad conversions. */ if (TYPE_MODE (type) == TYPE_MODE (signed_char_type_node)) return unsignedp ? unsigned_char_type_node : signed_char_type_node; if (TYPE_MODE (type) == TYPE_MODE (integer_type_node)) return unsignedp ? unsigned_type_node : integer_type_node; if (TYPE_MODE (type) == TYPE_MODE (short_integer_type_node)) return unsignedp ? short_unsigned_type_node : short_integer_type_node; if (TYPE_MODE (type) == TYPE_MODE (long_integer_type_node)) return unsignedp ? long_unsigned_type_node : long_integer_type_node; if (TYPE_MODE (type) == TYPE_MODE (long_long_integer_type_node)) return (unsignedp ? long_long_unsigned_type_node : long_long_integer_type_node); if (TYPE_MODE (type) == TYPE_MODE (widest_integer_literal_type_node)) return (unsignedp ? widest_unsigned_literal_type_node : widest_integer_literal_type_node); #if HOST_BITS_PER_WIDE_INT >= 64 if (TYPE_MODE (type) == TYPE_MODE (intTI_type_node)) return unsignedp ? unsigned_intTI_type_node : intTI_type_node; #endif if (TYPE_MODE (type) == TYPE_MODE (intDI_type_node)) return unsignedp ? unsigned_intDI_type_node : intDI_type_node; if (TYPE_MODE (type) == TYPE_MODE (intSI_type_node)) return unsignedp ? unsigned_intSI_type_node : intSI_type_node; if (TYPE_MODE (type) == TYPE_MODE (intHI_type_node)) return unsignedp ? unsigned_intHI_type_node : intHI_type_node; if (TYPE_MODE (type) == TYPE_MODE (intQI_type_node)) return unsignedp ? unsigned_intQI_type_node : intQI_type_node; return type; } /* The C version of the register_builtin_type langhook. */ void c_register_builtin_type (tree type, const char* name) { tree decl; decl = build_decl (TYPE_DECL, get_identifier (name), type); DECL_ARTIFICIAL (decl) = 1; if (!TYPE_NAME (type)) TYPE_NAME (type) = decl; pushdecl (decl); registered_builtin_types = tree_cons (0, type, registered_builtin_types); } /* Return the minimum number of bits needed to represent VALUE in a signed or unsigned type, UNSIGNEDP says which. */ unsigned int min_precision (tree value, int unsignedp) { int log; /* If the value is negative, compute its negative minus 1. The latter adjustment is because the absolute value of the largest negative value is one larger than the largest positive value. This is equivalent to a bit-wise negation, so use that operation instead. */ if (tree_int_cst_sgn (value) < 0) value = fold (build1 (BIT_NOT_EXPR, TREE_TYPE (value), value)); /* Return the number of bits needed, taking into account the fact that we need one more bit for a signed than unsigned type. */ if (integer_zerop (value)) log = 0; else log = tree_floor_log2 (value); return log + 1 + ! unsignedp; } /* Print an error message for invalid operands to arith operation CODE. NOP_EXPR is used as a special case (see c_common_truthvalue_conversion). */ void binary_op_error (enum tree_code code) { const char *opname; switch (code) { case NOP_EXPR: error ("invalid truth-value expression"); return; case PLUS_EXPR: opname = "+"; break; case MINUS_EXPR: opname = "-"; break; case MULT_EXPR: opname = "*"; break; case MAX_EXPR: opname = "max"; break; case MIN_EXPR: opname = "min"; break; case EQ_EXPR: opname = "=="; break; case NE_EXPR: opname = "!="; break; case LE_EXPR: opname = "<="; break; case GE_EXPR: opname = ">="; break; case LT_EXPR: opname = "<"; break; case GT_EXPR: opname = ">"; break; case LSHIFT_EXPR: opname = "<<"; break; case RSHIFT_EXPR: opname = ">>"; break; case TRUNC_MOD_EXPR: case FLOOR_MOD_EXPR: opname = "%"; break; case TRUNC_DIV_EXPR: case FLOOR_DIV_EXPR: opname = "/"; break; case BIT_AND_EXPR: opname = "&"; break; case BIT_IOR_EXPR: opname = "|"; break; case TRUTH_ANDIF_EXPR: opname = "&&"; break; case TRUTH_ORIF_EXPR: opname = "||"; break; case BIT_XOR_EXPR: opname = "^"; break; case LROTATE_EXPR: case RROTATE_EXPR: opname = "rotate"; break; default: opname = "unknown"; break; } error ("invalid operands to binary %s", opname); } /* Subroutine of build_binary_op, used for comparison operations. See if the operands have both been converted from subword integer types and, if so, perhaps change them both back to their original type. This function is also responsible for converting the two operands to the proper common type for comparison. The arguments of this function are all pointers to local variables of build_binary_op: OP0_PTR is &OP0, OP1_PTR is &OP1, RESTYPE_PTR is &RESULT_TYPE and RESCODE_PTR is &RESULTCODE. If this function returns nonzero, it means that the comparison has a constant value. What this function returns is an expression for that value. */ tree shorten_compare (tree *op0_ptr, tree *op1_ptr, tree *restype_ptr, enum tree_code *rescode_ptr) { tree type; tree op0 = *op0_ptr; tree op1 = *op1_ptr; int unsignedp0, unsignedp1; int real1, real2; tree primop0, primop1; enum tree_code code = *rescode_ptr; /* Throw away any conversions to wider types already present in the operands. */ primop0 = get_narrower (op0, &unsignedp0); primop1 = get_narrower (op1, &unsignedp1); /* Handle the case that OP0 does not *contain* a conversion but it *requires* conversion to FINAL_TYPE. */ if (op0 == primop0 && TREE_TYPE (op0) != *restype_ptr) unsignedp0 = TYPE_UNSIGNED (TREE_TYPE (op0)); if (op1 == primop1 && TREE_TYPE (op1) != *restype_ptr) unsignedp1 = TYPE_UNSIGNED (TREE_TYPE (op1)); /* If one of the operands must be floated, we cannot optimize. */ real1 = TREE_CODE (TREE_TYPE (primop0)) == REAL_TYPE; real2 = TREE_CODE (TREE_TYPE (primop1)) == REAL_TYPE; /* If first arg is constant, swap the args (changing operation so value is preserved), for canonicalization. Don't do this if the second arg is 0. */ if (TREE_CONSTANT (primop0) && ! integer_zerop (primop1) && ! real_zerop (primop1)) { tree tem = primop0; int temi = unsignedp0; primop0 = primop1; primop1 = tem; tem = op0; op0 = op1; op1 = tem; *op0_ptr = op0; *op1_ptr = op1; unsignedp0 = unsignedp1; unsignedp1 = temi; temi = real1; real1 = real2; real2 = temi; switch (code) { case LT_EXPR: code = GT_EXPR; break; case GT_EXPR: code = LT_EXPR; break; case LE_EXPR: code = GE_EXPR; break; case GE_EXPR: code = LE_EXPR; break; default: break; } *rescode_ptr = code; } /* If comparing an integer against a constant more bits wide, maybe we can deduce a value of 1 or 0 independent of the data. Or else truncate the constant now rather than extend the variable at run time. This is only interesting if the constant is the wider arg. Also, it is not safe if the constant is unsigned and the variable arg is signed, since in this case the variable would be sign-extended and then regarded as unsigned. Our technique fails in this case because the lowest/highest possible unsigned results don't follow naturally from the lowest/highest possible values of the variable operand. For just EQ_EXPR and NE_EXPR there is another technique that could be used: see if the constant can be faithfully represented in the other operand's type, by truncating it and reextending it and see if that preserves the constant's value. */ if (!real1 && !real2 && TREE_CODE (primop1) == INTEGER_CST && TYPE_PRECISION (TREE_TYPE (primop0)) < TYPE_PRECISION (*restype_ptr)) { int min_gt, max_gt, min_lt, max_lt; tree maxval, minval; /* 1 if comparison is nominally unsigned. */ int unsignedp = TYPE_UNSIGNED (*restype_ptr); tree val; type = c_common_signed_or_unsigned_type (unsignedp0, TREE_TYPE (primop0)); /* In C, if TYPE is an enumeration, then we need to get its min/max values from its underlying integral type, not the enumerated type itself. In C++, TYPE_MAX_VALUE and TYPE_MIN_VALUE have already been set correctly on the enumeration type. */ if (!c_dialect_cxx() && TREE_CODE (type) == ENUMERAL_TYPE) type = c_common_type_for_size (TYPE_PRECISION (type), unsignedp0); maxval = TYPE_MAX_VALUE (type); minval = TYPE_MIN_VALUE (type); if (unsignedp && !unsignedp0) *restype_ptr = c_common_signed_type (*restype_ptr); if (TREE_TYPE (primop1) != *restype_ptr) primop1 = convert (*restype_ptr, primop1); if (type != *restype_ptr) { minval = convert (*restype_ptr, minval); maxval = convert (*restype_ptr, maxval); } if (unsignedp && unsignedp0) { min_gt = INT_CST_LT_UNSIGNED (primop1, minval); max_gt = INT_CST_LT_UNSIGNED (primop1, maxval); min_lt = INT_CST_LT_UNSIGNED (minval, primop1); max_lt = INT_CST_LT_UNSIGNED (maxval, primop1); } else { min_gt = INT_CST_LT (primop1, minval); max_gt = INT_CST_LT (primop1, maxval); min_lt = INT_CST_LT (minval, primop1); max_lt = INT_CST_LT (maxval, primop1); } val = 0; /* This used to be a switch, but Genix compiler can't handle that. */ if (code == NE_EXPR) { if (max_lt || min_gt) val = truthvalue_true_node; } else if (code == EQ_EXPR) { if (max_lt || min_gt) val = truthvalue_false_node; } else if (code == LT_EXPR) { if (max_lt) val = truthvalue_true_node; if (!min_lt) val = truthvalue_false_node; } else if (code == GT_EXPR) { if (min_gt) val = truthvalue_true_node; if (!max_gt) val = truthvalue_false_node; } else if (code == LE_EXPR) { if (!max_gt) val = truthvalue_true_node; if (min_gt) val = truthvalue_false_node; } else if (code == GE_EXPR) { if (!min_lt) val = truthvalue_true_node; if (max_lt) val = truthvalue_false_node; } /* If primop0 was sign-extended and unsigned comparison specd, we did a signed comparison above using the signed type bounds. But the comparison we output must be unsigned. Also, for inequalities, VAL is no good; but if the signed comparison had *any* fixed result, it follows that the unsigned comparison just tests the sign in reverse (positive values are LE, negative ones GE). So we can generate an unsigned comparison against an extreme value of the signed type. */ if (unsignedp && !unsignedp0) { if (val != 0) switch (code) { case LT_EXPR: case GE_EXPR: primop1 = TYPE_MIN_VALUE (type); val = 0; break; case LE_EXPR: case GT_EXPR: primop1 = TYPE_MAX_VALUE (type); val = 0; break; default: break; } type = c_common_unsigned_type (type); } if (TREE_CODE (primop0) != INTEGER_CST) { if (val == truthvalue_false_node) warning ("comparison is always false due to limited range of data type"); if (val == truthvalue_true_node) warning ("comparison is always true due to limited range of data type"); } if (val != 0) { /* Don't forget to evaluate PRIMOP0 if it has side effects. */ if (TREE_SIDE_EFFECTS (primop0)) return build (COMPOUND_EXPR, TREE_TYPE (val), primop0, val); return val; } /* Value is not predetermined, but do the comparison in the type of the operand that is not constant. TYPE is already properly set. */ } else if (real1 && real2 && (TYPE_PRECISION (TREE_TYPE (primop0)) == TYPE_PRECISION (TREE_TYPE (primop1)))) type = TREE_TYPE (primop0); /* If args' natural types are both narrower than nominal type and both extend in the same manner, compare them in the type of the wider arg. Otherwise must actually extend both to the nominal common type lest different ways of extending alter the result. (eg, (short)-1 == (unsigned short)-1 should be 0.) */ else if (unsignedp0 == unsignedp1 && real1 == real2 && TYPE_PRECISION (TREE_TYPE (primop0)) < TYPE_PRECISION (*restype_ptr) && TYPE_PRECISION (TREE_TYPE (primop1)) < TYPE_PRECISION (*restype_ptr)) { type = common_type (TREE_TYPE (primop0), TREE_TYPE (primop1)); type = c_common_signed_or_unsigned_type (unsignedp0 || TYPE_UNSIGNED (*restype_ptr), type); /* Make sure shorter operand is extended the right way to match the longer operand. */ primop0 = convert (c_common_signed_or_unsigned_type (unsignedp0, TREE_TYPE (primop0)), primop0); primop1 = convert (c_common_signed_or_unsigned_type (unsignedp1, TREE_TYPE (primop1)), primop1); } else { /* Here we must do the comparison on the nominal type using the args exactly as we received them. */ type = *restype_ptr; primop0 = op0; primop1 = op1; if (!real1 && !real2 && integer_zerop (primop1) && TYPE_UNSIGNED (*restype_ptr)) { tree value = 0; switch (code) { case GE_EXPR: /* All unsigned values are >= 0, so we warn if extra warnings are requested. However, if OP0 is a constant that is >= 0, the signedness of the comparison isn't an issue, so suppress the warning. */ if (extra_warnings && !in_system_header && ! (TREE_CODE (primop0) == INTEGER_CST && ! TREE_OVERFLOW (convert (c_common_signed_type (type), primop0)))) warning ("comparison of unsigned expression >= 0 is always true"); value = truthvalue_true_node; break; case LT_EXPR: if (extra_warnings && !in_system_header && ! (TREE_CODE (primop0) == INTEGER_CST && ! TREE_OVERFLOW (convert (c_common_signed_type (type), primop0)))) warning ("comparison of unsigned expression < 0 is always false"); value = truthvalue_false_node; break; default: break; } if (value != 0) { /* Don't forget to evaluate PRIMOP0 if it has side effects. */ if (TREE_SIDE_EFFECTS (primop0)) return build (COMPOUND_EXPR, TREE_TYPE (value), primop0, value); return value; } } } *op0_ptr = convert (type, primop0); *op1_ptr = convert (type, primop1); *restype_ptr = truthvalue_type_node; return 0; } /* Return a tree for the sum or difference (RESULTCODE says which) of pointer PTROP and integer INTOP. */ tree pointer_int_sum (enum tree_code resultcode, tree ptrop, tree intop) { tree size_exp; /* The result is a pointer of the same type that is being added. */ tree result_type = TREE_TYPE (ptrop); if (TREE_CODE (TREE_TYPE (result_type)) == VOID_TYPE) { if (pedantic || warn_pointer_arith) pedwarn ("pointer of type `void *' used in arithmetic"); size_exp = integer_one_node; } else if (TREE_CODE (TREE_TYPE (result_type)) == FUNCTION_TYPE) { if (pedantic || warn_pointer_arith) pedwarn ("pointer to a function used in arithmetic"); size_exp = integer_one_node; } else if (TREE_CODE (TREE_TYPE (result_type)) == METHOD_TYPE) { if (pedantic || warn_pointer_arith) pedwarn ("pointer to member function used in arithmetic"); size_exp = integer_one_node; } else size_exp = size_in_bytes (TREE_TYPE (result_type)); /* If what we are about to multiply by the size of the elements contains a constant term, apply distributive law and multiply that constant term separately. This helps produce common subexpressions. */ if ((TREE_CODE (intop) == PLUS_EXPR || TREE_CODE (intop) == MINUS_EXPR) && ! TREE_CONSTANT (intop) && TREE_CONSTANT (TREE_OPERAND (intop, 1)) && TREE_CONSTANT (size_exp) /* If the constant comes from pointer subtraction, skip this optimization--it would cause an error. */ && TREE_CODE (TREE_TYPE (TREE_OPERAND (intop, 0))) == INTEGER_TYPE /* If the constant is unsigned, and smaller than the pointer size, then we must skip this optimization. This is because it could cause an overflow error if the constant is negative but INTOP is not. */ && (! TYPE_UNSIGNED (TREE_TYPE (intop)) || (TYPE_PRECISION (TREE_TYPE (intop)) == TYPE_PRECISION (TREE_TYPE (ptrop))))) { enum tree_code subcode = resultcode; tree int_type = TREE_TYPE (intop); if (TREE_CODE (intop) == MINUS_EXPR) subcode = (subcode == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR); /* Convert both subexpression types to the type of intop, because weird cases involving pointer arithmetic can result in a sum or difference with different type args. */ ptrop = build_binary_op (subcode, ptrop, convert (int_type, TREE_OPERAND (intop, 1)), 1); intop = convert (int_type, TREE_OPERAND (intop, 0)); } /* Convert the integer argument to a type the same size as sizetype so the multiply won't overflow spuriously. */ if (TYPE_PRECISION (TREE_TYPE (intop)) != TYPE_PRECISION (sizetype) || TYPE_UNSIGNED (TREE_TYPE (intop)) != TYPE_UNSIGNED (sizetype)) intop = convert (c_common_type_for_size (TYPE_PRECISION (sizetype), TYPE_UNSIGNED (sizetype)), intop); /* Replace the integer argument with a suitable product by the object size. Do this multiplication as signed, then convert to the appropriate pointer type (actually unsigned integral). */ intop = convert (result_type, build_binary_op (MULT_EXPR, intop, convert (TREE_TYPE (intop), size_exp), 1)); /* Create the sum or difference. */ return fold (build (resultcode, result_type, ptrop, intop)); } /* Prepare expr to be an argument of a TRUTH_NOT_EXPR, or validate its data type for an `if' or `while' statement or ?..: exp. This preparation consists of taking the ordinary representation of an expression expr and producing a valid tree boolean expression describing whether expr is nonzero. We could simply always do build_binary_op (NE_EXPR, expr, truthvalue_false_node, 1), but we optimize comparisons, &&, ||, and !. The resulting type should always be `truthvalue_type_node'. */ tree c_common_truthvalue_conversion (tree expr) { if (TREE_CODE (expr) == ERROR_MARK) return expr; if (TREE_CODE (expr) == FUNCTION_DECL) expr = build_unary_op (ADDR_EXPR, expr, 0); switch (TREE_CODE (expr)) { case EQ_EXPR: case NE_EXPR: case UNEQ_EXPR: case LTGT_EXPR: case LE_EXPR: case GE_EXPR: case LT_EXPR: case GT_EXPR: case UNLE_EXPR: case UNGE_EXPR: case UNLT_EXPR: case UNGT_EXPR: case ORDERED_EXPR: case UNORDERED_EXPR: case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case TRUTH_XOR_EXPR: case TRUTH_NOT_EXPR: TREE_TYPE (expr) = truthvalue_type_node; return expr; case ERROR_MARK: return expr; case INTEGER_CST: return integer_zerop (expr) ? truthvalue_false_node : truthvalue_true_node; case REAL_CST: return real_zerop (expr) ? truthvalue_false_node : truthvalue_true_node; case ADDR_EXPR: { if (TREE_CODE (TREE_OPERAND (expr, 0)) == FUNCTION_DECL && ! DECL_WEAK (TREE_OPERAND (expr, 0))) { /* Common Ada/Pascal programmer's mistake. We always warn about this since it is so bad. */ warning ("the address of `%D', will always evaluate as `true'", TREE_OPERAND (expr, 0)); return truthvalue_true_node; } /* If we are taking the address of an external decl, it might be zero if it is weak, so we cannot optimize. */ if (DECL_P (TREE_OPERAND (expr, 0)) && DECL_EXTERNAL (TREE_OPERAND (expr, 0))) break; if (TREE_SIDE_EFFECTS (TREE_OPERAND (expr, 0))) return build (COMPOUND_EXPR, truthvalue_type_node, TREE_OPERAND (expr, 0), truthvalue_true_node); else return truthvalue_true_node; } case COMPLEX_EXPR: return build_binary_op ((TREE_SIDE_EFFECTS (TREE_OPERAND (expr, 1)) ? TRUTH_OR_EXPR : TRUTH_ORIF_EXPR), lang_hooks.truthvalue_conversion (TREE_OPERAND (expr, 0)), lang_hooks.truthvalue_conversion (TREE_OPERAND (expr, 1)), 0); case NEGATE_EXPR: case ABS_EXPR: case FLOAT_EXPR: /* These don't change whether an object is nonzero or zero. */ return lang_hooks.truthvalue_conversion (TREE_OPERAND (expr, 0)); case LROTATE_EXPR: case RROTATE_EXPR: /* These don't change whether an object is zero or nonzero, but we can't ignore them if their second arg has side-effects. */ if (TREE_SIDE_EFFECTS (TREE_OPERAND (expr, 1))) return build (COMPOUND_EXPR, truthvalue_type_node, TREE_OPERAND (expr, 1), lang_hooks.truthvalue_conversion (TREE_OPERAND (expr, 0))); else return lang_hooks.truthvalue_conversion (TREE_OPERAND (expr, 0)); case COND_EXPR: /* Distribute the conversion into the arms of a COND_EXPR. */ return fold (build (COND_EXPR, truthvalue_type_node, TREE_OPERAND (expr, 0), lang_hooks.truthvalue_conversion (TREE_OPERAND (expr, 1)), lang_hooks.truthvalue_conversion (TREE_OPERAND (expr, 2)))); case CONVERT_EXPR: /* Don't cancel the effect of a CONVERT_EXPR from a REFERENCE_TYPE, since that affects how `default_conversion' will behave. */ if (TREE_CODE (TREE_TYPE (expr)) == REFERENCE_TYPE || TREE_CODE (TREE_TYPE (TREE_OPERAND (expr, 0))) == REFERENCE_TYPE) break; /* Fall through.... */ case NOP_EXPR: /* If this is widening the argument, we can ignore it. */ if (TYPE_PRECISION (TREE_TYPE (expr)) >= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (expr, 0)))) return lang_hooks.truthvalue_conversion (TREE_OPERAND (expr, 0)); break; case MINUS_EXPR: /* Perhaps reduce (x - y) != 0 to (x != y). The expressions aren't guaranteed to the be same for modes that can represent infinity, since if x and y are both +infinity, or both -infinity, then x - y is not a number. Note that this transformation is safe when x or y is NaN. (x - y) is then NaN, and both (x - y) != 0 and x != y will be false. */ if (HONOR_INFINITIES (TYPE_MODE (TREE_TYPE (TREE_OPERAND (expr, 0))))) break; /* Fall through.... */ case BIT_XOR_EXPR: /* This and MINUS_EXPR can be changed into a comparison of the two objects. */ if (TREE_TYPE (TREE_OPERAND (expr, 0)) == TREE_TYPE (TREE_OPERAND (expr, 1))) return build_binary_op (NE_EXPR, TREE_OPERAND (expr, 0), TREE_OPERAND (expr, 1), 1); return build_binary_op (NE_EXPR, TREE_OPERAND (expr, 0), fold (build1 (NOP_EXPR, TREE_TYPE (TREE_OPERAND (expr, 0)), TREE_OPERAND (expr, 1))), 1); case BIT_AND_EXPR: if (integer_onep (TREE_OPERAND (expr, 1)) && TREE_TYPE (expr) != truthvalue_type_node) /* Using convert here would cause infinite recursion. */ return build1 (NOP_EXPR, truthvalue_type_node, expr); break; case MODIFY_EXPR: if (warn_parentheses && C_EXP_ORIGINAL_CODE (expr) == MODIFY_EXPR) warning ("suggest parentheses around assignment used as truth value"); break; default: break; } if (TREE_CODE (TREE_TYPE (expr)) == COMPLEX_TYPE) { tree t = save_expr (expr); return (build_binary_op ((TREE_SIDE_EFFECTS (expr) ? TRUTH_OR_EXPR : TRUTH_ORIF_EXPR), lang_hooks.truthvalue_conversion (build_unary_op (REALPART_EXPR, t, 0)), lang_hooks.truthvalue_conversion (build_unary_op (IMAGPART_EXPR, t, 0)), 0)); } return build_binary_op (NE_EXPR, expr, integer_zero_node, 1); } static tree builtin_function_2 (const char *, const char *, tree, tree, int, enum built_in_class, int, int, tree); /* Make a variant type in the proper way for C/C++, propagating qualifiers down to the element type of an array. */ tree c_build_qualified_type (tree type, int type_quals) { if (type == error_mark_node) return type; if (TREE_CODE (type) == ARRAY_TYPE) return build_array_type (c_build_qualified_type (TREE_TYPE (type), type_quals), TYPE_DOMAIN (type)); /* A restrict-qualified pointer type must be a pointer to object or incomplete type. Note that the use of POINTER_TYPE_P also allows REFERENCE_TYPEs, which is appropriate for C++. */ if ((type_quals & TYPE_QUAL_RESTRICT) && (!POINTER_TYPE_P (type) || !C_TYPE_OBJECT_OR_INCOMPLETE_P (TREE_TYPE (type)))) { error ("invalid use of `restrict'"); type_quals &= ~TYPE_QUAL_RESTRICT; } return build_qualified_type (type, type_quals); } /* Apply the TYPE_QUALS to the new DECL. */ void c_apply_type_quals_to_decl (int type_quals, tree decl) { tree type = TREE_TYPE (decl); if (type == error_mark_node) return; if (((type_quals & TYPE_QUAL_CONST) || (type && TREE_CODE (type) == REFERENCE_TYPE)) /* An object declared 'const' is only readonly after it is initialized. We don't have any way of expressing this currently, so we need to be conservative and unset TREE_READONLY for types with constructors. Otherwise aliasing code will ignore stores in an inline constructor. */ && !(type && TYPE_NEEDS_CONSTRUCTING (type))) TREE_READONLY (decl) = 1; if (type_quals & TYPE_QUAL_VOLATILE) { TREE_SIDE_EFFECTS (decl) = 1; TREE_THIS_VOLATILE (decl) = 1; } if (type_quals & TYPE_QUAL_RESTRICT) { while (type && TREE_CODE (type) == ARRAY_TYPE) /* Allow 'restrict' on arrays of pointers. FIXME currently we just ignore it. */ type = TREE_TYPE (type); if (!type || !POINTER_TYPE_P (type) || !C_TYPE_OBJECT_OR_INCOMPLETE_P (TREE_TYPE (type))) error ("invalid use of `restrict'"); else if (flag_strict_aliasing && type == TREE_TYPE (decl)) /* Indicate we need to make a unique alias set for this pointer. We can't do it here because it might be pointing to an incomplete type. */ DECL_POINTER_ALIAS_SET (decl) = -2; } } /* Hash function for the problem of multiple type definitions in different files. This must hash all types that will compare equal via comptypes to the same value. In practice it hashes on some of the simple stuff and leaves the details to comptypes. */ static hashval_t c_type_hash (const void *p) { int i = 0; int shift, size; tree t = (tree)p; tree t2; switch (TREE_CODE (t)) { /* For pointers, hash on pointee type plus some swizzling. */ case POINTER_TYPE: return c_type_hash (TREE_TYPE (t)) ^ 0x3003003; /* Hash on number of elements and total size. */ case ENUMERAL_TYPE: shift = 3; t2 = TYPE_VALUES (t); break; case RECORD_TYPE: shift = 0; t2 = TYPE_FIELDS (t); break; case QUAL_UNION_TYPE: shift = 1; t2 = TYPE_FIELDS (t); break; case UNION_TYPE: shift = 2; t2 = TYPE_FIELDS (t); break; default: abort (); } for (; t2; t2 = TREE_CHAIN (t2)) i++; size = TREE_INT_CST_LOW (TYPE_SIZE (t)); return ((size << 24) | (i << shift)); } /* Return the typed-based alias set for T, which may be an expression or a type. Return -1 if we don't do anything special. */ HOST_WIDE_INT c_common_get_alias_set (tree t) { tree u; PTR *slot; static htab_t type_hash_table; /* Permit type-punning when accessing a union, provided the access is directly through the union. For example, this code does not permit taking the address of a union member and then storing through it. Even the type-punning allowed here is a GCC extension, albeit a common and useful one; the C standard says that such accesses have implementation-defined behavior. */ for (u = t; TREE_CODE (u) == COMPONENT_REF || TREE_CODE (u) == ARRAY_REF; u = TREE_OPERAND (u, 0)) if (TREE_CODE (u) == COMPONENT_REF && TREE_CODE (TREE_TYPE (TREE_OPERAND (u, 0))) == UNION_TYPE) return 0; /* That's all the expressions we handle specially. */ if (! TYPE_P (t)) return -1; /* The C standard guarantees that any object may be accessed via an lvalue that has character type. */ if (t == char_type_node || t == signed_char_type_node || t == unsigned_char_type_node) return 0; /* If it has the may_alias attribute, it can alias anything. */ if (lookup_attribute ("may_alias", TYPE_ATTRIBUTES (t))) return 0; /* The C standard specifically allows aliasing between signed and unsigned variants of the same type. We treat the signed variant as canonical. */ if (TREE_CODE (t) == INTEGER_TYPE && TYPE_UNSIGNED (t)) { tree t1 = c_common_signed_type (t); /* t1 == t can happen for boolean nodes which are always unsigned. */ if (t1 != t) return get_alias_set (t1); } else if (POINTER_TYPE_P (t)) { tree t1; /* Unfortunately, there is no canonical form of a pointer type. In particular, if we have `typedef int I', then `int *', and `I *' are different types. So, we have to pick a canonical representative. We do this below. Technically, this approach is actually more conservative that it needs to be. In particular, `const int *' and `int *' should be in different alias sets, according to the C and C++ standard, since their types are not the same, and so, technically, an `int **' and `const int **' cannot point at the same thing. But, the standard is wrong. In particular, this code is legal C++: int *ip; int **ipp = &ip; const int* const* cipp = ipp; And, it doesn't make sense for that to be legal unless you can dereference IPP and CIPP. So, we ignore cv-qualifiers on the pointed-to types. This issue has been reported to the C++ committee. */ t1 = build_type_no_quals (t); if (t1 != t) return get_alias_set (t1); } /* Handle the case of multiple type nodes referring to "the same" type, which occurs with IMA. These share an alias set. FIXME: Currently only C90 is handled. (In C99 type compatibility is not transitive, which complicates things mightily. The alias set splay trees can theoretically represent this, but insertion is tricky when you consider all the different orders things might arrive in.) */ if (c_language != clk_c || flag_isoc99) return -1; /* Save time if there's only one input file. */ if (!current_file_decl || TREE_CHAIN (current_file_decl) == NULL_TREE) return -1; /* Pointers need special handling if they point to any type that needs special handling (below). */ if (TREE_CODE (t) == POINTER_TYPE) { tree t2; /* Find bottom type under any nested POINTERs. */ for (t2 = TREE_TYPE (t); TREE_CODE (t2) == POINTER_TYPE; t2 = TREE_TYPE (t2)) ; if (TREE_CODE (t2) != RECORD_TYPE && TREE_CODE (t2) != ENUMERAL_TYPE && TREE_CODE (t2) != QUAL_UNION_TYPE && TREE_CODE (t2) != UNION_TYPE) return -1; if (TYPE_SIZE (t2) == 0) return -1; } /* These are the only cases that need special handling. */ if (TREE_CODE (t) != RECORD_TYPE && TREE_CODE (t) != ENUMERAL_TYPE && TREE_CODE (t) != QUAL_UNION_TYPE && TREE_CODE (t) != UNION_TYPE && TREE_CODE (t) != POINTER_TYPE) return -1; /* Undefined? */ if (TYPE_SIZE (t) == 0) return -1; /* Look up t in hash table. Only one of the compatible types within each alias set is recorded in the table. */ if (!type_hash_table) type_hash_table = htab_create (1021, c_type_hash, (htab_eq) lang_hooks.types_compatible_p, NULL); slot = htab_find_slot (type_hash_table, t, INSERT); if (*slot != NULL) return TYPE_ALIAS_SET ((tree)*slot); else /* Our caller will assign and record (in t) a new alias set; all we need to do is remember t in the hash table. */ *slot = t; return -1; } /* Compute the value of 'sizeof (TYPE)' or '__alignof__ (TYPE)', where the second parameter indicates which OPERATOR is being applied. The COMPLAIN flag controls whether we should diagnose possibly ill-formed constructs or not. */ tree c_sizeof_or_alignof_type (tree type, enum tree_code op, int complain) { const char *op_name; tree value = NULL; enum tree_code type_code = TREE_CODE (type); my_friendly_assert (op == SIZEOF_EXPR || op == ALIGNOF_EXPR, 20020720); op_name = op == SIZEOF_EXPR ? "sizeof" : "__alignof__"; if (type_code == FUNCTION_TYPE) { if (op == SIZEOF_EXPR) { if (complain && (pedantic || warn_pointer_arith)) pedwarn ("invalid application of `sizeof' to a function type"); value = size_one_node; } else value = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT); } else if (type_code == VOID_TYPE || type_code == ERROR_MARK) { if (type_code == VOID_TYPE && complain && (pedantic || warn_pointer_arith)) pedwarn ("invalid application of `%s' to a void type", op_name); value = size_one_node; } else if (!COMPLETE_TYPE_P (type)) { if (complain) error ("invalid application of `%s' to incomplete type `%T' ", op_name, type); value = size_zero_node; } else { if (op == SIZEOF_EXPR) /* Convert in case a char is more than one unit. */ value = size_binop (CEIL_DIV_EXPR, TYPE_SIZE_UNIT (type), size_int (TYPE_PRECISION (char_type_node) / BITS_PER_UNIT)); else value = size_int (TYPE_ALIGN (type) / BITS_PER_UNIT); } /* VALUE will have an integer type with TYPE_IS_SIZETYPE set. TYPE_IS_SIZETYPE means that certain things (like overflow) will never happen. However, this node should really have type `size_t', which is just a typedef for an ordinary integer type. */ value = fold (build1 (NOP_EXPR, size_type_node, value)); my_friendly_assert (!TYPE_IS_SIZETYPE (TREE_TYPE (value)), 20001021); return value; } /* Implement the __alignof keyword: Return the minimum required alignment of EXPR, measured in bytes. For VAR_DECL's and FIELD_DECL's return DECL_ALIGN (which can be set from an "aligned" __attribute__ specification). */ tree c_alignof_expr (tree expr) { tree t; if (TREE_CODE (expr) == VAR_DECL) t = size_int (DECL_ALIGN (expr) / BITS_PER_UNIT); else if (TREE_CODE (expr) == COMPONENT_REF && DECL_C_BIT_FIELD (TREE_OPERAND (expr, 1))) { error ("`__alignof' applied to a bit-field"); t = size_one_node; } else if (TREE_CODE (expr) == COMPONENT_REF && TREE_CODE (TREE_OPERAND (expr, 1)) == FIELD_DECL) t = size_int (DECL_ALIGN (TREE_OPERAND (expr, 1)) / BITS_PER_UNIT); else if (TREE_CODE (expr) == INDIRECT_REF) { tree t = TREE_OPERAND (expr, 0); tree best = t; int bestalign = TYPE_ALIGN (TREE_TYPE (TREE_TYPE (t))); while (TREE_CODE (t) == NOP_EXPR && TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0))) == POINTER_TYPE) { int thisalign; t = TREE_OPERAND (t, 0); thisalign = TYPE_ALIGN (TREE_TYPE (TREE_TYPE (t))); if (thisalign > bestalign) best = t, bestalign = thisalign; } return c_alignof (TREE_TYPE (TREE_TYPE (best))); } else return c_alignof (TREE_TYPE (expr)); return fold (build1 (NOP_EXPR, size_type_node, t)); } /* Handle C and C++ default attributes. */ enum built_in_attribute { #define DEF_ATTR_NULL_TREE(ENUM) ENUM, #define DEF_ATTR_INT(ENUM, VALUE) ENUM, #define DEF_ATTR_IDENT(ENUM, STRING) ENUM, #define DEF_ATTR_TREE_LIST(ENUM, PURPOSE, VALUE, CHAIN) ENUM, /* Copyright (C) 2001, 2002 Free Software Foundation, Inc. Contributed by Joseph Myers . This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This header provides a declarative way of describing the attributes that are applied to some functions by default. Before including this header, you must define the following macros. In each case where there is an ENUM, it is an identifier used to reference the tree in subsequent definitions. DEF_ATTR_NULL_TREE (ENUM) Constructs a NULL_TREE. DEF_ATTR_INT (ENUM, VALUE) Constructs an INTEGER_CST with value VALUE (an integer representable in HOST_WIDE_INT). DEF_ATTR_IDENT (ENUM, STRING) Constructs an IDENTIFIER_NODE for STRING. DEF_ATTR_TREE_LIST (ENUM, PURPOSE, VALUE, CHAIN) Constructs a TREE_LIST with given PURPOSE, VALUE and CHAIN (given as previous ENUM names). */ DEF_ATTR_NULL_TREE (ATTR_NULL) /* Construct a tree for a given integer and a list containing it. */ #define DEF_ATTR_FOR_INT(VALUE) \ DEF_ATTR_INT (ATTR_##VALUE, VALUE) \ DEF_ATTR_TREE_LIST (ATTR_LIST_##VALUE, ATTR_NULL, \ ATTR_##VALUE, ATTR_NULL) DEF_ATTR_FOR_INT (0) DEF_ATTR_FOR_INT (1) DEF_ATTR_FOR_INT (2) DEF_ATTR_FOR_INT (3) DEF_ATTR_FOR_INT (4) #undef DEF_ATTR_FOR_INT /* Construct a tree for a list of two integers. */ #define DEF_LIST_INT_INT(VALUE1, VALUE2) \ DEF_ATTR_TREE_LIST (ATTR_LIST_##VALUE1##_##VALUE2, ATTR_NULL, \ ATTR_##VALUE1, ATTR_LIST_##VALUE2) DEF_LIST_INT_INT (1,0) DEF_LIST_INT_INT (1,2) DEF_LIST_INT_INT (2,0) DEF_LIST_INT_INT (2,3) DEF_LIST_INT_INT (3,0) DEF_LIST_INT_INT (3,4) #undef DEF_LIST_INT_INT /* Construct trees for identifiers. */ DEF_ATTR_IDENT (ATTR_CONST, "const") DEF_ATTR_IDENT (ATTR_FORMAT, "format") DEF_ATTR_IDENT (ATTR_FORMAT_ARG, "format_arg") DEF_ATTR_IDENT (ATTR_MALLOC, "malloc") DEF_ATTR_IDENT (ATTR_NONNULL, "nonnull") DEF_ATTR_IDENT (ATTR_NORETURN, "noreturn") DEF_ATTR_IDENT (ATTR_NOTHROW, "nothrow") DEF_ATTR_IDENT (ATTR_PRINTF, "printf") DEF_ATTR_IDENT (ATTR_ASM_FPRINTF, "asm_fprintf") DEF_ATTR_IDENT (ATTR_GCC_DIAG, "gcc_diag") DEF_ATTR_IDENT (ATTR_GCC_CDIAG, "gcc_cdiag") DEF_ATTR_IDENT (ATTR_GCC_CXXDIAG, "gcc_cxxdiag") DEF_ATTR_IDENT (ATTR_PURE, "pure") DEF_ATTR_IDENT (ATTR_SCANF, "scanf") DEF_ATTR_IDENT (ATTR_STRFMON, "strfmon") DEF_ATTR_IDENT (ATTR_STRFTIME, "strftime") DEF_ATTR_TREE_LIST (ATTR_NOTHROW_LIST, ATTR_NOTHROW, ATTR_NULL, ATTR_NULL) DEF_ATTR_TREE_LIST (ATTR_CONST_NOTHROW_LIST, ATTR_CONST, \ ATTR_NULL, ATTR_NOTHROW_LIST) DEF_ATTR_TREE_LIST (ATTR_PURE_NOTHROW_LIST, ATTR_PURE, \ ATTR_NULL, ATTR_NOTHROW_LIST) DEF_ATTR_TREE_LIST (ATTR_NORETURN_NOTHROW_LIST, ATTR_NORETURN, \ ATTR_NULL, ATTR_NOTHROW_LIST) DEF_ATTR_TREE_LIST (ATTR_MALLOC_NOTHROW_LIST, ATTR_MALLOC, \ ATTR_NULL, ATTR_NOTHROW_LIST) DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_1, ATTR_NONNULL, ATTR_LIST_1, \ ATTR_NOTHROW_LIST) DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_2, ATTR_NONNULL, ATTR_LIST_2, \ ATTR_NOTHROW_LIST) DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_3, ATTR_NONNULL, ATTR_LIST_3, \ ATTR_NOTHROW_LIST) /* Nothrow functions whose first and second parameters are nonnull pointers. */ DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_1_2, ATTR_NONNULL, ATTR_LIST_2, \ ATTR_NOTHROW_NONNULL_1) /* Nothrow functions whose first and fourth parameters are nonnull pointers. */ DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_1_4, ATTR_NONNULL, ATTR_LIST_4, \ ATTR_NOTHROW_NONNULL_1) /* Nothrow const functions whose first parameter is a nonnull pointer. */ DEF_ATTR_TREE_LIST (ATTR_CONST_NOTHROW_NONNULL_1, ATTR_CONST, ATTR_NULL, \ ATTR_NOTHROW_NONNULL_1) /* Nothrow pure functions whose first parameter is a nonnull pointer. */ DEF_ATTR_TREE_LIST (ATTR_PURE_NOTHROW_NONNULL_1, ATTR_PURE, ATTR_NULL, \ ATTR_NOTHROW_NONNULL_1) /* Nothrow pure functions whose first and second parameters are nonnull pointers. */ DEF_ATTR_TREE_LIST (ATTR_PURE_NOTHROW_NONNULL_1_2, ATTR_PURE, ATTR_NULL, \ ATTR_NOTHROW_NONNULL_1_2) /* Nothrow malloc functions whose first parameter is a nonnull pointer. */ DEF_ATTR_TREE_LIST (ATTR_MALLOC_NOTHROW_NONNULL_1, ATTR_MALLOC, ATTR_NULL, \ ATTR_NOTHROW_NONNULL_1) /* Construct a tree for a format attribute. */ #define DEF_FORMAT_ATTRIBUTE(TYPE, FA, VALUES) \ DEF_ATTR_TREE_LIST (ATTR_##TYPE##_##VALUES, ATTR_NULL, \ ATTR_##TYPE, ATTR_LIST_##VALUES) \ DEF_ATTR_TREE_LIST (ATTR_FORMAT_##TYPE##_##VALUES, ATTR_FORMAT, \ ATTR_##TYPE##_##VALUES, ATTR_NOTHROW_NONNULL_##FA) DEF_FORMAT_ATTRIBUTE(PRINTF,1,1_0) DEF_FORMAT_ATTRIBUTE(PRINTF,1,1_2) DEF_FORMAT_ATTRIBUTE(PRINTF,2,2_0) DEF_FORMAT_ATTRIBUTE(PRINTF,2,2_3) DEF_FORMAT_ATTRIBUTE(PRINTF,3,3_0) DEF_FORMAT_ATTRIBUTE(PRINTF,3,3_4) DEF_FORMAT_ATTRIBUTE(SCANF,1,1_0) DEF_FORMAT_ATTRIBUTE(SCANF,1,1_2) DEF_FORMAT_ATTRIBUTE(SCANF,2,2_0) DEF_FORMAT_ATTRIBUTE(SCANF,2,2_3) DEF_FORMAT_ATTRIBUTE(STRFTIME,3,3_0) DEF_FORMAT_ATTRIBUTE(STRFMON,3,3_4) #undef DEF_FORMAT_ATTRIBUTE /* Construct a tree for a format_arg attribute. */ #define DEF_FORMAT_ARG_ATTRIBUTE(FA) \ DEF_ATTR_TREE_LIST (ATTR_FORMAT_ARG_##FA, ATTR_FORMAT_ARG, \ ATTR_LIST_##FA, ATTR_NOTHROW_NONNULL_##FA) DEF_FORMAT_ARG_ATTRIBUTE(1) DEF_FORMAT_ARG_ATTRIBUTE(2) #undef DEF_FORMAT_ARG_ATTRIBUTE #undef DEF_ATTR_NULL_TREE #undef DEF_ATTR_INT #undef DEF_ATTR_IDENT #undef DEF_ATTR_TREE_LIST ATTR_LAST }; static GTY(()) tree built_in_attributes[(int) ATTR_LAST]; static void c_init_attributes (void); /* Build tree nodes and builtin functions common to both C and C++ language frontends. */ void c_common_nodes_and_builtins (void) { enum builtin_type { #define DEF_PRIMITIVE_TYPE(NAME, VALUE) NAME, #define DEF_FUNCTION_TYPE_0(NAME, RETURN) NAME, #define DEF_FUNCTION_TYPE_1(NAME, RETURN, ARG1) NAME, #define DEF_FUNCTION_TYPE_2(NAME, RETURN, ARG1, ARG2) NAME, #define DEF_FUNCTION_TYPE_3(NAME, RETURN, ARG1, ARG2, ARG3) NAME, #define DEF_FUNCTION_TYPE_4(NAME, RETURN, ARG1, ARG2, ARG3, ARG4) NAME, #define DEF_FUNCTION_TYPE_VAR_0(NAME, RETURN) NAME, #define DEF_FUNCTION_TYPE_VAR_1(NAME, RETURN, ARG1) NAME, #define DEF_FUNCTION_TYPE_VAR_2(NAME, RETURN, ARG1, ARG2) NAME, #define DEF_FUNCTION_TYPE_VAR_3(NAME, RETURN, ARG1, ARG2, ARG3) NAME, #define DEF_POINTER_TYPE(NAME, TYPE) NAME, /* Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This header provides a declarative way of describing the types that are used when declaring builtin functions. Before including this header, you must define the following macros: DEF_PRIMITIVE_TYPE (ENUM, TYPE) The ENUM is an identifier indicating which type is being defined. TYPE is an expression for a `tree' that represents the type. DEF_FUNCTION_TYPE_0 (ENUM, RETURN) DEF_FUNCTION_TYPE_1 (ENUM, RETURN, ARG1) DEF_FUNCTION_TYPE_2 (ENUM, RETURN, ARG1, ARG2) DEF_FUNCTION_TYPE_3 (ENUM, RETURN, ARG1, ARG2, ARG3) DEF_FUNCTION_TYPE_4 (ENUM, RETURN, ARG1, ARG2, ARG3, ARG4) These macros describe function types. ENUM is as above. The RETURN type is one of the enumerals already defined. ARG1, ARG2, and ARG3 give the types of the arguments, similarly. DEF_FUNCTION_TYPE_VAR_0 (ENUM, RETURN) DEF_FUNCTION_TYPE_VAR_1 (ENUM, RETURN, ARG1) DEF_FUNCTION_TYPE_VAR_2 (ENUM, RETURN, ARG1, ARG2) DEF_FUNCTION_TYPE_VAR_3 (ENUM, RETURN, ARG1, ARG2, ARG3) Similar, but for function types that take variable arguments. For example: DEF_FUNCTION_TYPE_1 (BT_INT_DOUBLE, BT_INT, BT_DOUBLE) describes the type `int ()(double)', using the enumeral BT_INT_DOUBLE, whereas: DEF_FUNCTION_TYPE_VAR_1 (BT_INT_DOUBLE_VAR, BT_INT, BT_DOUBLE) describes the type `int ()(double, ...)'. DEF_POINTER_TYPE (ENUM, TYPE) This macro describes a pointer type. ENUM is as above; TYPE is the type pointed to. */ DEF_PRIMITIVE_TYPE (BT_VOID, void_type_node) DEF_PRIMITIVE_TYPE (BT_INT, integer_type_node) DEF_PRIMITIVE_TYPE (BT_UNSIGNED, unsigned_type_node) DEF_PRIMITIVE_TYPE (BT_LONG, long_integer_type_node) DEF_PRIMITIVE_TYPE (BT_LONGLONG, long_long_integer_type_node) DEF_PRIMITIVE_TYPE (BT_WORD, (*lang_hooks.types.type_for_mode) (word_mode, 0)) DEF_PRIMITIVE_TYPE (BT_FLOAT, float_type_node) DEF_PRIMITIVE_TYPE (BT_INTMAX, intmax_type_node) DEF_PRIMITIVE_TYPE (BT_DOUBLE, double_type_node) DEF_PRIMITIVE_TYPE (BT_LONGDOUBLE, long_double_type_node) DEF_PRIMITIVE_TYPE (BT_COMPLEX_FLOAT, complex_float_type_node) DEF_PRIMITIVE_TYPE (BT_COMPLEX_DOUBLE, complex_double_type_node) DEF_PRIMITIVE_TYPE (BT_COMPLEX_LONGDOUBLE, complex_long_double_type_node) DEF_PRIMITIVE_TYPE (BT_PTR, ptr_type_node) DEF_PRIMITIVE_TYPE (BT_FILEPTR, fileptr_type_node) DEF_PRIMITIVE_TYPE (BT_CONST_PTR, const_ptr_type_node) DEF_PRIMITIVE_TYPE (BT_PTRMODE, (*lang_hooks.types.type_for_mode)(ptr_mode, 0)) DEF_PRIMITIVE_TYPE (BT_INT_PTR, integer_ptr_type_node) DEF_PRIMITIVE_TYPE (BT_FLOAT_PTR, float_ptr_type_node) DEF_PRIMITIVE_TYPE (BT_DOUBLE_PTR, double_ptr_type_node) DEF_PRIMITIVE_TYPE (BT_LONGDOUBLE_PTR, long_double_ptr_type_node) DEF_PRIMITIVE_TYPE (BT_PID, pid_type_node) DEF_PRIMITIVE_TYPE (BT_SIZE, size_type_node) DEF_PRIMITIVE_TYPE (BT_SSIZE, signed_size_type_node) DEF_PRIMITIVE_TYPE (BT_WINT, wint_type_node) DEF_PRIMITIVE_TYPE (BT_STRING, string_type_node) DEF_PRIMITIVE_TYPE (BT_CONST_STRING, const_string_type_node) DEF_PRIMITIVE_TYPE (BT_VALIST_REF, va_list_ref_type_node) DEF_PRIMITIVE_TYPE (BT_VALIST_ARG, va_list_arg_type_node) DEF_POINTER_TYPE (BT_PTR_CONST_STRING, BT_CONST_STRING) DEF_FUNCTION_TYPE_0 (BT_FN_VOID, BT_VOID) DEF_FUNCTION_TYPE_0 (BT_FN_PTR, BT_PTR) DEF_FUNCTION_TYPE_0 (BT_FN_PID, BT_PID) DEF_FUNCTION_TYPE_0 (BT_FN_UNSIGNED, BT_UNSIGNED) DEF_FUNCTION_TYPE_0 (BT_FN_FLOAT, BT_FLOAT) DEF_FUNCTION_TYPE_0 (BT_FN_DOUBLE, BT_DOUBLE) /* For "long double" we use LONGDOUBLE (not LONG_DOUBLE) to distinguish it from two types in sequence, "long" followed by "double". */ DEF_FUNCTION_TYPE_0 (BT_FN_LONGDOUBLE, BT_LONGDOUBLE) DEF_FUNCTION_TYPE_1 (BT_FN_LONG_LONG, BT_LONG, BT_LONG) DEF_FUNCTION_TYPE_1 (BT_FN_LONGLONG_LONGLONG, BT_LONGLONG, BT_LONGLONG) DEF_FUNCTION_TYPE_1 (BT_FN_INTMAX_INTMAX, BT_INTMAX, BT_INTMAX) DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT_FLOAT, BT_FLOAT, BT_FLOAT) DEF_FUNCTION_TYPE_1 (BT_FN_DOUBLE_DOUBLE, BT_DOUBLE, BT_DOUBLE) DEF_FUNCTION_TYPE_1 (BT_FN_LONGDOUBLE_LONGDOUBLE, BT_LONGDOUBLE, BT_LONGDOUBLE) DEF_FUNCTION_TYPE_1 (BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, BT_COMPLEX_FLOAT, BT_COMPLEX_FLOAT) DEF_FUNCTION_TYPE_1 (BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, BT_COMPLEX_DOUBLE, BT_COMPLEX_DOUBLE) DEF_FUNCTION_TYPE_1 (BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, BT_COMPLEX_LONGDOUBLE, BT_COMPLEX_LONGDOUBLE) DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT_COMPLEX_FLOAT, BT_FLOAT, BT_COMPLEX_FLOAT) DEF_FUNCTION_TYPE_1 (BT_FN_DOUBLE_COMPLEX_DOUBLE, BT_DOUBLE, BT_COMPLEX_DOUBLE) DEF_FUNCTION_TYPE_1 (BT_FN_LONGDOUBLE_COMPLEX_LONGDOUBLE, BT_LONGDOUBLE, BT_COMPLEX_LONGDOUBLE) DEF_FUNCTION_TYPE_1 (BT_FN_PTR_UNSIGNED, BT_PTR, BT_UNSIGNED) DEF_FUNCTION_TYPE_1 (BT_FN_PTR_SIZE, BT_PTR, BT_SIZE) DEF_FUNCTION_TYPE_1 (BT_FN_INT_INT, BT_INT, BT_INT) DEF_FUNCTION_TYPE_1 (BT_FN_INT_LONG, BT_INT, BT_LONG) DEF_FUNCTION_TYPE_1 (BT_FN_INT_LONGLONG, BT_INT, BT_LONGLONG) DEF_FUNCTION_TYPE_1 (BT_FN_INT_PTR, BT_INT, BT_PTR) DEF_FUNCTION_TYPE_1 (BT_FN_INT_FLOAT, BT_INT, BT_FLOAT) DEF_FUNCTION_TYPE_1 (BT_FN_INT_DOUBLE, BT_INT, BT_DOUBLE) DEF_FUNCTION_TYPE_1 (BT_FN_INT_LONGDOUBLE, BT_INT, BT_LONGDOUBLE) DEF_FUNCTION_TYPE_1 (BT_FN_LONG_FLOAT, BT_LONG, BT_FLOAT) DEF_FUNCTION_TYPE_1 (BT_FN_LONG_DOUBLE, BT_LONG, BT_DOUBLE) DEF_FUNCTION_TYPE_1 (BT_FN_LONG_LONGDOUBLE, BT_LONG, BT_LONGDOUBLE) DEF_FUNCTION_TYPE_1 (BT_FN_LONGLONG_FLOAT, BT_LONGLONG, BT_FLOAT) DEF_FUNCTION_TYPE_1 (BT_FN_LONGLONG_DOUBLE, BT_LONGLONG, BT_DOUBLE) DEF_FUNCTION_TYPE_1 (BT_FN_LONGLONG_LONGDOUBLE, BT_LONGLONG, BT_LONGDOUBLE) DEF_FUNCTION_TYPE_1 (BT_FN_VOID_PTR, BT_VOID, BT_PTR) DEF_FUNCTION_TYPE_1 (BT_FN_SIZE_CONST_STRING, BT_SIZE, BT_CONST_STRING) DEF_FUNCTION_TYPE_1 (BT_FN_INT_CONST_STRING, BT_INT, BT_CONST_STRING) DEF_FUNCTION_TYPE_1 (BT_FN_PTR_PTR, BT_PTR, BT_PTR) DEF_FUNCTION_TYPE_1 (BT_FN_VOID_VALIST_REF, BT_VOID, BT_VALIST_REF) DEF_FUNCTION_TYPE_1 (BT_FN_VOID_INT, BT_VOID, BT_INT) DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT_CONST_STRING, BT_FLOAT, BT_CONST_STRING) DEF_FUNCTION_TYPE_1 (BT_FN_DOUBLE_CONST_STRING, BT_DOUBLE, BT_CONST_STRING) DEF_FUNCTION_TYPE_1 (BT_FN_LONGDOUBLE_CONST_STRING, BT_LONGDOUBLE, BT_CONST_STRING) DEF_FUNCTION_TYPE_1 (BT_FN_STRING_CONST_STRING, BT_STRING, BT_CONST_STRING) DEF_FUNCTION_TYPE_1 (BT_FN_WORD_PTR, BT_WORD, BT_PTR) DEF_FUNCTION_TYPE_1 (BT_FN_INT_WINT, BT_INT, BT_WINT) DEF_FUNCTION_TYPE_1 (BT_FN_WINT_WINT, BT_WINT, BT_WINT) DEF_FUNCTION_TYPE_2 (BT_FN_VOID_PTR_INT, BT_VOID, BT_PTR, BT_INT) DEF_FUNCTION_TYPE_2 (BT_FN_STRING_STRING_CONST_STRING, BT_STRING, BT_STRING, BT_CONST_STRING) DEF_FUNCTION_TYPE_2 (BT_FN_INT_CONST_STRING_CONST_STRING, BT_INT, BT_CONST_STRING, BT_CONST_STRING) DEF_FUNCTION_TYPE_2 (BT_FN_STRING_CONST_STRING_CONST_STRING, BT_STRING, BT_CONST_STRING, BT_CONST_STRING) DEF_FUNCTION_TYPE_2 (BT_FN_SIZE_CONST_STRING_CONST_STRING, BT_SIZE, BT_CONST_STRING, BT_CONST_STRING) DEF_FUNCTION_TYPE_2 (BT_FN_STRING_CONST_STRING_INT, BT_STRING, BT_CONST_STRING, BT_INT) DEF_FUNCTION_TYPE_2 (BT_FN_INT_CONST_STRING_FILEPTR, BT_INT, BT_CONST_STRING, BT_FILEPTR) DEF_FUNCTION_TYPE_2 (BT_FN_INT_INT_FILEPTR, BT_INT, BT_INT, BT_FILEPTR) DEF_FUNCTION_TYPE_2 (BT_FN_VOID_PTRMODE_PTR, BT_VOID, BT_PTRMODE, BT_PTR) DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VALIST_REF_VALIST_ARG, BT_VOID, BT_VALIST_REF, BT_VALIST_ARG) DEF_FUNCTION_TYPE_2 (BT_FN_LONG_LONG_LONG, BT_LONG, BT_LONG, BT_LONG) DEF_FUNCTION_TYPE_2 (BT_FN_INT_PTR_CONST_STRING, BT_INT, BT_PTR, BT_CONST_STRING) DEF_FUNCTION_TYPE_2 (BT_FN_VOID_PTR_SIZE, BT_VOID, BT_PTR, BT_SIZE) DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT_FLOAT_FLOAT, BT_FLOAT, BT_FLOAT, BT_FLOAT) DEF_FUNCTION_TYPE_2 (BT_FN_DOUBLE_DOUBLE_DOUBLE, BT_DOUBLE, BT_DOUBLE, BT_DOUBLE) DEF_FUNCTION_TYPE_2 (BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, BT_LONGDOUBLE, BT_LONGDOUBLE, BT_LONGDOUBLE) DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT_FLOAT_FLOATPTR, BT_FLOAT, BT_FLOAT, BT_FLOAT_PTR) DEF_FUNCTION_TYPE_2 (BT_FN_DOUBLE_DOUBLE_DOUBLEPTR, BT_DOUBLE, BT_DOUBLE, BT_DOUBLE_PTR) DEF_FUNCTION_TYPE_2 (BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLEPTR, BT_LONGDOUBLE, BT_LONGDOUBLE, BT_LONGDOUBLE_PTR) DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT_FLOAT_LONGDOUBLE, BT_FLOAT, BT_FLOAT, BT_LONGDOUBLE) DEF_FUNCTION_TYPE_2 (BT_FN_DOUBLE_DOUBLE_LONGDOUBLE, BT_DOUBLE, BT_DOUBLE, BT_LONGDOUBLE) DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT_FLOAT_INT, BT_FLOAT, BT_FLOAT, BT_INT) DEF_FUNCTION_TYPE_2 (BT_FN_DOUBLE_DOUBLE_INT, BT_DOUBLE, BT_DOUBLE, BT_INT) DEF_FUNCTION_TYPE_2 (BT_FN_LONGDOUBLE_LONGDOUBLE_INT, BT_LONGDOUBLE, BT_LONGDOUBLE, BT_INT) DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT_FLOAT_INTPTR, BT_FLOAT, BT_FLOAT, BT_INT_PTR) DEF_FUNCTION_TYPE_2 (BT_FN_DOUBLE_DOUBLE_INTPTR, BT_DOUBLE, BT_DOUBLE, BT_INT_PTR) DEF_FUNCTION_TYPE_2 (BT_FN_LONGDOUBLE_LONGDOUBLE_INTPTR, BT_LONGDOUBLE, BT_LONGDOUBLE, BT_INT_PTR) DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT_INT_FLOAT, BT_FLOAT, BT_INT, BT_FLOAT) DEF_FUNCTION_TYPE_2 (BT_FN_DOUBLE_INT_DOUBLE, BT_DOUBLE, BT_INT, BT_DOUBLE) DEF_FUNCTION_TYPE_2 (BT_FN_LONGDOUBLE_INT_LONGDOUBLE, BT_LONGDOUBLE, BT_INT, BT_LONGDOUBLE) DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT_FLOAT_LONG, BT_FLOAT, BT_FLOAT, BT_LONG) DEF_FUNCTION_TYPE_2 (BT_FN_DOUBLE_DOUBLE_LONG, BT_DOUBLE, BT_DOUBLE, BT_LONG) DEF_FUNCTION_TYPE_2 (BT_FN_LONGDOUBLE_LONGDOUBLE_LONG, BT_LONGDOUBLE, BT_LONGDOUBLE, BT_LONG) DEF_FUNCTION_TYPE_2 (BT_FN_INT_CONST_STRING_VALIST_ARG, BT_INT, BT_CONST_STRING, BT_VALIST_ARG) DEF_FUNCTION_TYPE_2 (BT_FN_PTR_SIZE_SIZE, BT_PTR, BT_SIZE, BT_SIZE) DEF_FUNCTION_TYPE_2 (BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT_COMPLEX_FLOAT, BT_COMPLEX_FLOAT, BT_COMPLEX_FLOAT, BT_COMPLEX_FLOAT) DEF_FUNCTION_TYPE_2 (BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE_COMPLEX_DOUBLE, BT_COMPLEX_DOUBLE, BT_COMPLEX_DOUBLE, BT_COMPLEX_DOUBLE) DEF_FUNCTION_TYPE_2 (BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, BT_COMPLEX_LONGDOUBLE, BT_COMPLEX_LONGDOUBLE, BT_COMPLEX_LONGDOUBLE) DEF_FUNCTION_TYPE_2 (BT_FN_VOID_PTR_PTR, BT_VOID, BT_PTR, BT_PTR) DEF_FUNCTION_TYPE_2 (BT_FN_INT_CONST_STRING_PTR_CONST_STRING, BT_INT, BT_CONST_STRING, BT_PTR_CONST_STRING) DEF_FUNCTION_TYPE_3 (BT_FN_STRING_STRING_CONST_STRING_SIZE, BT_STRING, BT_STRING, BT_CONST_STRING, BT_SIZE) DEF_FUNCTION_TYPE_3 (BT_FN_INT_CONST_STRING_CONST_STRING_SIZE, BT_INT, BT_CONST_STRING, BT_CONST_STRING, BT_SIZE) DEF_FUNCTION_TYPE_3 (BT_FN_PTR_PTR_CONST_PTR_SIZE, BT_PTR, BT_PTR, BT_CONST_PTR, BT_SIZE) DEF_FUNCTION_TYPE_3 (BT_FN_INT_CONST_PTR_CONST_PTR_SIZE, BT_INT, BT_CONST_PTR, BT_CONST_PTR, BT_SIZE) DEF_FUNCTION_TYPE_3 (BT_FN_PTR_PTR_INT_SIZE, BT_PTR, BT_PTR, BT_INT, BT_SIZE) DEF_FUNCTION_TYPE_3 (BT_FN_VOID_PTR_INT_INT, BT_VOID, BT_PTR, BT_INT, BT_INT) DEF_FUNCTION_TYPE_3 (BT_FN_VOID_CONST_PTR_PTR_SIZE, BT_VOID, BT_CONST_PTR, BT_PTR, BT_SIZE) DEF_FUNCTION_TYPE_3 (BT_FN_INT_STRING_CONST_STRING_VALIST_ARG, BT_INT, BT_STRING, BT_CONST_STRING, BT_VALIST_ARG) DEF_FUNCTION_TYPE_3 (BT_FN_INT_CONST_STRING_CONST_STRING_VALIST_ARG, BT_INT, BT_CONST_STRING, BT_CONST_STRING, BT_VALIST_ARG) DEF_FUNCTION_TYPE_3 (BT_FN_INT_FILEPTR_CONST_STRING_VALIST_ARG, BT_INT, BT_FILEPTR, BT_CONST_STRING, BT_VALIST_ARG) DEF_FUNCTION_TYPE_3 (BT_FN_STRING_CONST_STRING_CONST_STRING_INT, BT_STRING, BT_CONST_STRING, BT_CONST_STRING, BT_INT) DEF_FUNCTION_TYPE_3 (BT_FN_FLOAT_FLOAT_FLOAT_FLOAT, BT_FLOAT, BT_FLOAT, BT_FLOAT, BT_FLOAT) DEF_FUNCTION_TYPE_3 (BT_FN_DOUBLE_DOUBLE_DOUBLE_DOUBLE, BT_DOUBLE, BT_DOUBLE, BT_DOUBLE, BT_DOUBLE) DEF_FUNCTION_TYPE_3 (BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, BT_LONGDOUBLE, BT_LONGDOUBLE, BT_LONGDOUBLE, BT_LONGDOUBLE) DEF_FUNCTION_TYPE_3 (BT_FN_FLOAT_FLOAT_FLOAT_INTPTR, BT_FLOAT, BT_FLOAT, BT_FLOAT, BT_INT_PTR) DEF_FUNCTION_TYPE_3 (BT_FN_DOUBLE_DOUBLE_DOUBLE_INTPTR, BT_DOUBLE, BT_DOUBLE, BT_DOUBLE, BT_INT_PTR) DEF_FUNCTION_TYPE_3 (BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE_INTPTR, BT_LONGDOUBLE, BT_LONGDOUBLE, BT_LONGDOUBLE, BT_INT_PTR) DEF_FUNCTION_TYPE_3 (BT_FN_VOID_FLOAT_FLOATPTR_FLOATPTR, BT_VOID, BT_FLOAT, BT_FLOAT_PTR, BT_FLOAT_PTR) DEF_FUNCTION_TYPE_3 (BT_FN_VOID_DOUBLE_DOUBLEPTR_DOUBLEPTR, BT_VOID, BT_DOUBLE, BT_DOUBLE_PTR, BT_DOUBLE_PTR) DEF_FUNCTION_TYPE_3 (BT_FN_VOID_LONGDOUBLE_LONGDOUBLEPTR_LONGDOUBLEPTR, BT_VOID, BT_LONGDOUBLE, BT_LONGDOUBLE_PTR, BT_LONGDOUBLE_PTR) DEF_FUNCTION_TYPE_3 (BT_FN_VOID_PTR_PTR_PTR, BT_VOID, BT_PTR, BT_PTR, BT_PTR) DEF_FUNCTION_TYPE_3 (BT_FN_INT_CONST_STRING_PTR_CONST_STRING_PTR_CONST_STRING, BT_INT, BT_CONST_STRING, BT_PTR_CONST_STRING, BT_PTR_CONST_STRING) DEF_FUNCTION_TYPE_4 (BT_FN_SIZE_CONST_PTR_SIZE_SIZE_FILEPTR, BT_SIZE, BT_CONST_PTR, BT_SIZE, BT_SIZE, BT_FILEPTR) DEF_FUNCTION_TYPE_4 (BT_FN_INT_STRING_SIZE_CONST_STRING_VALIST_ARG, BT_INT, BT_STRING, BT_SIZE, BT_CONST_STRING, BT_VALIST_ARG) DEF_FUNCTION_TYPE_4 (BT_FN_SIZE_STRING_SIZE_CONST_STRING_CONST_PTR, BT_SIZE, BT_STRING, BT_SIZE, BT_CONST_STRING, BT_CONST_PTR) DEF_FUNCTION_TYPE_VAR_0 (BT_FN_VOID_VAR, BT_VOID) DEF_FUNCTION_TYPE_VAR_0 (BT_FN_INT_VAR, BT_INT) DEF_FUNCTION_TYPE_VAR_0 (BT_FN_PTR_VAR, BT_PTR) DEF_FUNCTION_TYPE_VAR_1 (BT_FN_VOID_VALIST_REF_VAR, BT_VOID, BT_VALIST_REF) DEF_FUNCTION_TYPE_VAR_1 (BT_FN_VOID_CONST_PTR_VAR, BT_VOID, BT_CONST_PTR) DEF_FUNCTION_TYPE_VAR_1 (BT_FN_INT_CONST_STRING_VAR, BT_INT, BT_CONST_STRING) DEF_FUNCTION_TYPE_VAR_2 (BT_FN_INT_FILEPTR_CONST_STRING_VAR, BT_INT, BT_FILEPTR, BT_CONST_STRING) DEF_FUNCTION_TYPE_VAR_2 (BT_FN_INT_STRING_CONST_STRING_VAR, BT_INT, BT_STRING, BT_CONST_STRING) DEF_FUNCTION_TYPE_VAR_2 (BT_FN_INT_CONST_STRING_CONST_STRING_VAR, BT_INT, BT_CONST_STRING, BT_CONST_STRING) DEF_FUNCTION_TYPE_VAR_3 (BT_FN_INT_STRING_SIZE_CONST_STRING_VAR, BT_INT, BT_STRING, BT_SIZE, BT_CONST_STRING) DEF_FUNCTION_TYPE_VAR_3 (BT_FN_SSIZE_STRING_SIZE_CONST_STRING_VAR, BT_SSIZE, BT_STRING, BT_SIZE, BT_CONST_STRING) DEF_POINTER_TYPE (BT_PTR_FN_VOID_VAR, BT_FN_VOID_VAR) DEF_FUNCTION_TYPE_3 (BT_FN_PTR_PTR_FN_VOID_VAR_PTR_SIZE, BT_PTR, BT_PTR_FN_VOID_VAR, BT_PTR, BT_SIZE) #undef DEF_PRIMITIVE_TYPE #undef DEF_FUNCTION_TYPE_0 #undef DEF_FUNCTION_TYPE_1 #undef DEF_FUNCTION_TYPE_2 #undef DEF_FUNCTION_TYPE_3 #undef DEF_FUNCTION_TYPE_4 #undef DEF_FUNCTION_TYPE_VAR_0 #undef DEF_FUNCTION_TYPE_VAR_1 #undef DEF_FUNCTION_TYPE_VAR_2 #undef DEF_FUNCTION_TYPE_VAR_3 #undef DEF_POINTER_TYPE BT_LAST }; typedef enum builtin_type builtin_type; tree builtin_types[(int) BT_LAST]; int wchar_type_size; tree array_domain_type; tree va_list_ref_type_node; tree va_list_arg_type_node; /* Define `int' and `char' first so that dbx will output them first. */ record_builtin_type (RID_INT, NULL, integer_type_node); record_builtin_type (RID_CHAR, "char", char_type_node); /* `signed' is the same as `int'. FIXME: the declarations of "signed", "unsigned long", "long long unsigned" and "unsigned short" were in C++ but not C. Are the conditionals here needed? */ if (c_dialect_cxx ()) record_builtin_type (RID_SIGNED, NULL, integer_type_node); record_builtin_type (RID_LONG, "long int", long_integer_type_node); record_builtin_type (RID_UNSIGNED, "unsigned int", unsigned_type_node); record_builtin_type (RID_MAX, "long unsigned int", long_unsigned_type_node); if (c_dialect_cxx ()) record_builtin_type (RID_MAX, "unsigned long", long_unsigned_type_node); record_builtin_type (RID_MAX, "long long int", long_long_integer_type_node); record_builtin_type (RID_MAX, "long long unsigned int", long_long_unsigned_type_node); if (c_dialect_cxx ()) record_builtin_type (RID_MAX, "long long unsigned", long_long_unsigned_type_node); record_builtin_type (RID_SHORT, "short int", short_integer_type_node); record_builtin_type (RID_MAX, "short unsigned int", short_unsigned_type_node); if (c_dialect_cxx ()) record_builtin_type (RID_MAX, "unsigned short", short_unsigned_type_node); /* Define both `signed char' and `unsigned char'. */ record_builtin_type (RID_MAX, "signed char", signed_char_type_node); record_builtin_type (RID_MAX, "unsigned char", unsigned_char_type_node); /* These are types that c_common_type_for_size and c_common_type_for_mode use. */ lang_hooks.decls.pushdecl (build_decl (TYPE_DECL, NULL_TREE, intQI_type_node)); lang_hooks.decls.pushdecl (build_decl (TYPE_DECL, NULL_TREE, intHI_type_node)); lang_hooks.decls.pushdecl (build_decl (TYPE_DECL, NULL_TREE, intSI_type_node)); lang_hooks.decls.pushdecl (build_decl (TYPE_DECL, NULL_TREE, intDI_type_node)); #if HOST_BITS_PER_WIDE_INT >= 64 lang_hooks.decls.pushdecl (build_decl (TYPE_DECL, get_identifier ("__int128_t"), intTI_type_node)); #endif lang_hooks.decls.pushdecl (build_decl (TYPE_DECL, NULL_TREE, unsigned_intQI_type_node)); lang_hooks.decls.pushdecl (build_decl (TYPE_DECL, NULL_TREE, unsigned_intHI_type_node)); lang_hooks.decls.pushdecl (build_decl (TYPE_DECL, NULL_TREE, unsigned_intSI_type_node)); lang_hooks.decls.pushdecl (build_decl (TYPE_DECL, NULL_TREE, unsigned_intDI_type_node)); #if HOST_BITS_PER_WIDE_INT >= 64 lang_hooks.decls.pushdecl (build_decl (TYPE_DECL, get_identifier ("__uint128_t"), unsigned_intTI_type_node)); #endif /* Create the widest literal types. */ widest_integer_literal_type_node = make_signed_type (HOST_BITS_PER_WIDE_INT * 2); lang_hooks.decls.pushdecl (build_decl (TYPE_DECL, NULL_TREE, widest_integer_literal_type_node)); widest_unsigned_literal_type_node = make_unsigned_type (HOST_BITS_PER_WIDE_INT * 2); lang_hooks.decls.pushdecl (build_decl (TYPE_DECL, NULL_TREE, widest_unsigned_literal_type_node)); /* `unsigned long' is the standard type for sizeof. Note that stddef.h uses `unsigned long', and this must agree, even if long and int are the same size. */ size_type_node = TREE_TYPE (identifier_global_value (get_identifier (SIZE_TYPE))); signed_size_type_node = c_common_signed_type (size_type_node); set_sizetype (size_type_node); pid_type_node = TREE_TYPE (identifier_global_value (get_identifier (PID_TYPE))); build_common_tree_nodes_2 (flag_short_double); record_builtin_type (RID_FLOAT, NULL, float_type_node); record_builtin_type (RID_DOUBLE, NULL, double_type_node); record_builtin_type (RID_MAX, "long double", long_double_type_node); lang_hooks.decls.pushdecl (build_decl (TYPE_DECL, get_identifier ("complex int"), complex_integer_type_node)); lang_hooks.decls.pushdecl (build_decl (TYPE_DECL, get_identifier ("complex float"), complex_float_type_node)); lang_hooks.decls.pushdecl (build_decl (TYPE_DECL, get_identifier ("complex double"), complex_double_type_node)); lang_hooks.decls.pushdecl (build_decl (TYPE_DECL, get_identifier ("complex long double"), complex_long_double_type_node)); if (c_dialect_cxx ()) /* For C++, make fileptr_type_node a distinct void * type until FILE type is defined. */ fileptr_type_node = build_type_copy (ptr_type_node); record_builtin_type (RID_VOID, NULL, void_type_node); void_zero_node = build_int_2 (0, 0); TREE_TYPE (void_zero_node) = void_type_node; void_list_node = build_void_list_node (); /* Make a type to be the domain of a few array types whose domains don't really matter. 200 is small enough that it always fits in size_t and large enough that it can hold most function names for the initializations of __FUNCTION__ and __PRETTY_FUNCTION__. */ array_domain_type = build_index_type (size_int (200)); /* Make a type for arrays of characters. With luck nothing will ever really depend on the length of this array type. */ char_array_type_node = build_array_type (char_type_node, array_domain_type); /* Likewise for arrays of ints. */ int_array_type_node = build_array_type (integer_type_node, array_domain_type); string_type_node = build_pointer_type (char_type_node); const_string_type_node = build_pointer_type (build_qualified_type (char_type_node, TYPE_QUAL_CONST)); /* This is special for C++ so functions can be overloaded. */ wchar_type_node = get_identifier (MODIFIED_WCHAR_TYPE); wchar_type_node = TREE_TYPE (identifier_global_value (wchar_type_node)); wchar_type_size = TYPE_PRECISION (wchar_type_node); if (c_dialect_cxx ()) { if (TYPE_UNSIGNED (wchar_type_node)) wchar_type_node = make_unsigned_type (wchar_type_size); else wchar_type_node = make_signed_type (wchar_type_size); record_builtin_type (RID_WCHAR, "wchar_t", wchar_type_node); } else { signed_wchar_type_node = c_common_signed_type (wchar_type_node); unsigned_wchar_type_node = c_common_unsigned_type (wchar_type_node); } /* This is for wide string constants. */ wchar_array_type_node = build_array_type (wchar_type_node, array_domain_type); wint_type_node = TREE_TYPE (identifier_global_value (get_identifier (WINT_TYPE))); intmax_type_node = TREE_TYPE (identifier_global_value (get_identifier (INTMAX_TYPE))); uintmax_type_node = TREE_TYPE (identifier_global_value (get_identifier (UINTMAX_TYPE))); default_function_type = build_function_type (integer_type_node, NULL_TREE); ptrdiff_type_node = TREE_TYPE (identifier_global_value (get_identifier (PTRDIFF_TYPE))); unsigned_ptrdiff_type_node = c_common_unsigned_type (ptrdiff_type_node); lang_hooks.decls.pushdecl (build_decl (TYPE_DECL, get_identifier ("__builtin_va_list"), va_list_type_node)); lang_hooks.decls.pushdecl (build_decl (TYPE_DECL, get_identifier ("__builtin_ptrdiff_t"), ptrdiff_type_node)); lang_hooks.decls.pushdecl (build_decl (TYPE_DECL, get_identifier ("__builtin_size_t"), sizetype)); if (TREE_CODE (va_list_type_node) == ARRAY_TYPE) { va_list_arg_type_node = va_list_ref_type_node = build_pointer_type (TREE_TYPE (va_list_type_node)); } else { va_list_arg_type_node = va_list_type_node; va_list_ref_type_node = build_reference_type (va_list_type_node); } #define DEF_PRIMITIVE_TYPE(ENUM, VALUE) \ builtin_types[(int) ENUM] = VALUE; #define DEF_FUNCTION_TYPE_0(ENUM, RETURN) \ builtin_types[(int) ENUM] \ = build_function_type (builtin_types[(int) RETURN], \ void_list_node); #define DEF_FUNCTION_TYPE_1(ENUM, RETURN, ARG1) \ builtin_types[(int) ENUM] \ = build_function_type (builtin_types[(int) RETURN], \ tree_cons (NULL_TREE, \ builtin_types[(int) ARG1], \ void_list_node)); #define DEF_FUNCTION_TYPE_2(ENUM, RETURN, ARG1, ARG2) \ builtin_types[(int) ENUM] \ = build_function_type \ (builtin_types[(int) RETURN], \ tree_cons (NULL_TREE, \ builtin_types[(int) ARG1], \ tree_cons (NULL_TREE, \ builtin_types[(int) ARG2], \ void_list_node))); #define DEF_FUNCTION_TYPE_3(ENUM, RETURN, ARG1, ARG2, ARG3) \ builtin_types[(int) ENUM] \ = build_function_type \ (builtin_types[(int) RETURN], \ tree_cons (NULL_TREE, \ builtin_types[(int) ARG1], \ tree_cons (NULL_TREE, \ builtin_types[(int) ARG2], \ tree_cons (NULL_TREE, \ builtin_types[(int) ARG3], \ void_list_node)))); #define DEF_FUNCTION_TYPE_4(ENUM, RETURN, ARG1, ARG2, ARG3, ARG4) \ builtin_types[(int) ENUM] \ = build_function_type \ (builtin_types[(int) RETURN], \ tree_cons (NULL_TREE, \ builtin_types[(int) ARG1], \ tree_cons (NULL_TREE, \ builtin_types[(int) ARG2], \ tree_cons \ (NULL_TREE, \ builtin_types[(int) ARG3], \ tree_cons (NULL_TREE, \ builtin_types[(int) ARG4], \ void_list_node))))); #define DEF_FUNCTION_TYPE_VAR_0(ENUM, RETURN) \ builtin_types[(int) ENUM] \ = build_function_type (builtin_types[(int) RETURN], NULL_TREE); #define DEF_FUNCTION_TYPE_VAR_1(ENUM, RETURN, ARG1) \ builtin_types[(int) ENUM] \ = build_function_type (builtin_types[(int) RETURN], \ tree_cons (NULL_TREE, \ builtin_types[(int) ARG1], \ NULL_TREE)); #define DEF_FUNCTION_TYPE_VAR_2(ENUM, RETURN, ARG1, ARG2) \ builtin_types[(int) ENUM] \ = build_function_type \ (builtin_types[(int) RETURN], \ tree_cons (NULL_TREE, \ builtin_types[(int) ARG1], \ tree_cons (NULL_TREE, \ builtin_types[(int) ARG2], \ NULL_TREE))); #define DEF_FUNCTION_TYPE_VAR_3(ENUM, RETURN, ARG1, ARG2, ARG3) \ builtin_types[(int) ENUM] \ = build_function_type \ (builtin_types[(int) RETURN], \ tree_cons (NULL_TREE, \ builtin_types[(int) ARG1], \ tree_cons (NULL_TREE, \ builtin_types[(int) ARG2], \ tree_cons (NULL_TREE, \ builtin_types[(int) ARG3], \ NULL_TREE)))); #define DEF_POINTER_TYPE(ENUM, TYPE) \ builtin_types[(int) ENUM] \ = build_pointer_type (builtin_types[(int) TYPE]); /* Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This header provides a declarative way of describing the types that are used when declaring builtin functions. Before including this header, you must define the following macros: DEF_PRIMITIVE_TYPE (ENUM, TYPE) The ENUM is an identifier indicating which type is being defined. TYPE is an expression for a `tree' that represents the type. DEF_FUNCTION_TYPE_0 (ENUM, RETURN) DEF_FUNCTION_TYPE_1 (ENUM, RETURN, ARG1) DEF_FUNCTION_TYPE_2 (ENUM, RETURN, ARG1, ARG2) DEF_FUNCTION_TYPE_3 (ENUM, RETURN, ARG1, ARG2, ARG3) DEF_FUNCTION_TYPE_4 (ENUM, RETURN, ARG1, ARG2, ARG3, ARG4) These macros describe function types. ENUM is as above. The RETURN type is one of the enumerals already defined. ARG1, ARG2, and ARG3 give the types of the arguments, similarly. DEF_FUNCTION_TYPE_VAR_0 (ENUM, RETURN) DEF_FUNCTION_TYPE_VAR_1 (ENUM, RETURN, ARG1) DEF_FUNCTION_TYPE_VAR_2 (ENUM, RETURN, ARG1, ARG2) DEF_FUNCTION_TYPE_VAR_3 (ENUM, RETURN, ARG1, ARG2, ARG3) Similar, but for function types that take variable arguments. For example: DEF_FUNCTION_TYPE_1 (BT_INT_DOUBLE, BT_INT, BT_DOUBLE) describes the type `int ()(double)', using the enumeral BT_INT_DOUBLE, whereas: DEF_FUNCTION_TYPE_VAR_1 (BT_INT_DOUBLE_VAR, BT_INT, BT_DOUBLE) describes the type `int ()(double, ...)'. DEF_POINTER_TYPE (ENUM, TYPE) This macro describes a pointer type. ENUM is as above; TYPE is the type pointed to. */ DEF_PRIMITIVE_TYPE (BT_VOID, void_type_node) DEF_PRIMITIVE_TYPE (BT_INT, integer_type_node) DEF_PRIMITIVE_TYPE (BT_UNSIGNED, unsigned_type_node) DEF_PRIMITIVE_TYPE (BT_LONG, long_integer_type_node) DEF_PRIMITIVE_TYPE (BT_LONGLONG, long_long_integer_type_node) DEF_PRIMITIVE_TYPE (BT_WORD, (*lang_hooks.types.type_for_mode) (word_mode, 0)) DEF_PRIMITIVE_TYPE (BT_FLOAT, float_type_node) DEF_PRIMITIVE_TYPE (BT_INTMAX, intmax_type_node) DEF_PRIMITIVE_TYPE (BT_DOUBLE, double_type_node) DEF_PRIMITIVE_TYPE (BT_LONGDOUBLE, long_double_type_node) DEF_PRIMITIVE_TYPE (BT_COMPLEX_FLOAT, complex_float_type_node) DEF_PRIMITIVE_TYPE (BT_COMPLEX_DOUBLE, complex_double_type_node) DEF_PRIMITIVE_TYPE (BT_COMPLEX_LONGDOUBLE, complex_long_double_type_node) DEF_PRIMITIVE_TYPE (BT_PTR, ptr_type_node) DEF_PRIMITIVE_TYPE (BT_FILEPTR, fileptr_type_node) DEF_PRIMITIVE_TYPE (BT_CONST_PTR, const_ptr_type_node) DEF_PRIMITIVE_TYPE (BT_PTRMODE, (*lang_hooks.types.type_for_mode)(ptr_mode, 0)) DEF_PRIMITIVE_TYPE (BT_INT_PTR, integer_ptr_type_node) DEF_PRIMITIVE_TYPE (BT_FLOAT_PTR, float_ptr_type_node) DEF_PRIMITIVE_TYPE (BT_DOUBLE_PTR, double_ptr_type_node) DEF_PRIMITIVE_TYPE (BT_LONGDOUBLE_PTR, long_double_ptr_type_node) DEF_PRIMITIVE_TYPE (BT_PID, pid_type_node) DEF_PRIMITIVE_TYPE (BT_SIZE, size_type_node) DEF_PRIMITIVE_TYPE (BT_SSIZE, signed_size_type_node) DEF_PRIMITIVE_TYPE (BT_WINT, wint_type_node) DEF_PRIMITIVE_TYPE (BT_STRING, string_type_node) DEF_PRIMITIVE_TYPE (BT_CONST_STRING, const_string_type_node) DEF_PRIMITIVE_TYPE (BT_VALIST_REF, va_list_ref_type_node) DEF_PRIMITIVE_TYPE (BT_VALIST_ARG, va_list_arg_type_node) DEF_POINTER_TYPE (BT_PTR_CONST_STRING, BT_CONST_STRING) DEF_FUNCTION_TYPE_0 (BT_FN_VOID, BT_VOID) DEF_FUNCTION_TYPE_0 (BT_FN_PTR, BT_PTR) DEF_FUNCTION_TYPE_0 (BT_FN_PID, BT_PID) DEF_FUNCTION_TYPE_0 (BT_FN_UNSIGNED, BT_UNSIGNED) DEF_FUNCTION_TYPE_0 (BT_FN_FLOAT, BT_FLOAT) DEF_FUNCTION_TYPE_0 (BT_FN_DOUBLE, BT_DOUBLE) /* For "long double" we use LONGDOUBLE (not LONG_DOUBLE) to distinguish it from two types in sequence, "long" followed by "double". */ DEF_FUNCTION_TYPE_0 (BT_FN_LONGDOUBLE, BT_LONGDOUBLE) DEF_FUNCTION_TYPE_1 (BT_FN_LONG_LONG, BT_LONG, BT_LONG) DEF_FUNCTION_TYPE_1 (BT_FN_LONGLONG_LONGLONG, BT_LONGLONG, BT_LONGLONG) DEF_FUNCTION_TYPE_1 (BT_FN_INTMAX_INTMAX, BT_INTMAX, BT_INTMAX) DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT_FLOAT, BT_FLOAT, BT_FLOAT) DEF_FUNCTION_TYPE_1 (BT_FN_DOUBLE_DOUBLE, BT_DOUBLE, BT_DOUBLE) DEF_FUNCTION_TYPE_1 (BT_FN_LONGDOUBLE_LONGDOUBLE, BT_LONGDOUBLE, BT_LONGDOUBLE) DEF_FUNCTION_TYPE_1 (BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, BT_COMPLEX_FLOAT, BT_COMPLEX_FLOAT) DEF_FUNCTION_TYPE_1 (BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, BT_COMPLEX_DOUBLE, BT_COMPLEX_DOUBLE) DEF_FUNCTION_TYPE_1 (BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, BT_COMPLEX_LONGDOUBLE, BT_COMPLEX_LONGDOUBLE) DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT_COMPLEX_FLOAT, BT_FLOAT, BT_COMPLEX_FLOAT) DEF_FUNCTION_TYPE_1 (BT_FN_DOUBLE_COMPLEX_DOUBLE, BT_DOUBLE, BT_COMPLEX_DOUBLE) DEF_FUNCTION_TYPE_1 (BT_FN_LONGDOUBLE_COMPLEX_LONGDOUBLE, BT_LONGDOUBLE, BT_COMPLEX_LONGDOUBLE) DEF_FUNCTION_TYPE_1 (BT_FN_PTR_UNSIGNED, BT_PTR, BT_UNSIGNED) DEF_FUNCTION_TYPE_1 (BT_FN_PTR_SIZE, BT_PTR, BT_SIZE) DEF_FUNCTION_TYPE_1 (BT_FN_INT_INT, BT_INT, BT_INT) DEF_FUNCTION_TYPE_1 (BT_FN_INT_LONG, BT_INT, BT_LONG) DEF_FUNCTION_TYPE_1 (BT_FN_INT_LONGLONG, BT_INT, BT_LONGLONG) DEF_FUNCTION_TYPE_1 (BT_FN_INT_PTR, BT_INT, BT_PTR) DEF_FUNCTION_TYPE_1 (BT_FN_INT_FLOAT, BT_INT, BT_FLOAT) DEF_FUNCTION_TYPE_1 (BT_FN_INT_DOUBLE, BT_INT, BT_DOUBLE) DEF_FUNCTION_TYPE_1 (BT_FN_INT_LONGDOUBLE, BT_INT, BT_LONGDOUBLE) DEF_FUNCTION_TYPE_1 (BT_FN_LONG_FLOAT, BT_LONG, BT_FLOAT) DEF_FUNCTION_TYPE_1 (BT_FN_LONG_DOUBLE, BT_LONG, BT_DOUBLE) DEF_FUNCTION_TYPE_1 (BT_FN_LONG_LONGDOUBLE, BT_LONG, BT_LONGDOUBLE) DEF_FUNCTION_TYPE_1 (BT_FN_LONGLONG_FLOAT, BT_LONGLONG, BT_FLOAT) DEF_FUNCTION_TYPE_1 (BT_FN_LONGLONG_DOUBLE, BT_LONGLONG, BT_DOUBLE) DEF_FUNCTION_TYPE_1 (BT_FN_LONGLONG_LONGDOUBLE, BT_LONGLONG, BT_LONGDOUBLE) DEF_FUNCTION_TYPE_1 (BT_FN_VOID_PTR, BT_VOID, BT_PTR) DEF_FUNCTION_TYPE_1 (BT_FN_SIZE_CONST_STRING, BT_SIZE, BT_CONST_STRING) DEF_FUNCTION_TYPE_1 (BT_FN_INT_CONST_STRING, BT_INT, BT_CONST_STRING) DEF_FUNCTION_TYPE_1 (BT_FN_PTR_PTR, BT_PTR, BT_PTR) DEF_FUNCTION_TYPE_1 (BT_FN_VOID_VALIST_REF, BT_VOID, BT_VALIST_REF) DEF_FUNCTION_TYPE_1 (BT_FN_VOID_INT, BT_VOID, BT_INT) DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT_CONST_STRING, BT_FLOAT, BT_CONST_STRING) DEF_FUNCTION_TYPE_1 (BT_FN_DOUBLE_CONST_STRING, BT_DOUBLE, BT_CONST_STRING) DEF_FUNCTION_TYPE_1 (BT_FN_LONGDOUBLE_CONST_STRING, BT_LONGDOUBLE, BT_CONST_STRING) DEF_FUNCTION_TYPE_1 (BT_FN_STRING_CONST_STRING, BT_STRING, BT_CONST_STRING) DEF_FUNCTION_TYPE_1 (BT_FN_WORD_PTR, BT_WORD, BT_PTR) DEF_FUNCTION_TYPE_1 (BT_FN_INT_WINT, BT_INT, BT_WINT) DEF_FUNCTION_TYPE_1 (BT_FN_WINT_WINT, BT_WINT, BT_WINT) DEF_FUNCTION_TYPE_2 (BT_FN_VOID_PTR_INT, BT_VOID, BT_PTR, BT_INT) DEF_FUNCTION_TYPE_2 (BT_FN_STRING_STRING_CONST_STRING, BT_STRING, BT_STRING, BT_CONST_STRING) DEF_FUNCTION_TYPE_2 (BT_FN_INT_CONST_STRING_CONST_STRING, BT_INT, BT_CONST_STRING, BT_CONST_STRING) DEF_FUNCTION_TYPE_2 (BT_FN_STRING_CONST_STRING_CONST_STRING, BT_STRING, BT_CONST_STRING, BT_CONST_STRING) DEF_FUNCTION_TYPE_2 (BT_FN_SIZE_CONST_STRING_CONST_STRING, BT_SIZE, BT_CONST_STRING, BT_CONST_STRING) DEF_FUNCTION_TYPE_2 (BT_FN_STRING_CONST_STRING_INT, BT_STRING, BT_CONST_STRING, BT_INT) DEF_FUNCTION_TYPE_2 (BT_FN_INT_CONST_STRING_FILEPTR, BT_INT, BT_CONST_STRING, BT_FILEPTR) DEF_FUNCTION_TYPE_2 (BT_FN_INT_INT_FILEPTR, BT_INT, BT_INT, BT_FILEPTR) DEF_FUNCTION_TYPE_2 (BT_FN_VOID_PTRMODE_PTR, BT_VOID, BT_PTRMODE, BT_PTR) DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VALIST_REF_VALIST_ARG, BT_VOID, BT_VALIST_REF, BT_VALIST_ARG) DEF_FUNCTION_TYPE_2 (BT_FN_LONG_LONG_LONG, BT_LONG, BT_LONG, BT_LONG) DEF_FUNCTION_TYPE_2 (BT_FN_INT_PTR_CONST_STRING, BT_INT, BT_PTR, BT_CONST_STRING) DEF_FUNCTION_TYPE_2 (BT_FN_VOID_PTR_SIZE, BT_VOID, BT_PTR, BT_SIZE) DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT_FLOAT_FLOAT, BT_FLOAT, BT_FLOAT, BT_FLOAT) DEF_FUNCTION_TYPE_2 (BT_FN_DOUBLE_DOUBLE_DOUBLE, BT_DOUBLE, BT_DOUBLE, BT_DOUBLE) DEF_FUNCTION_TYPE_2 (BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, BT_LONGDOUBLE, BT_LONGDOUBLE, BT_LONGDOUBLE) DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT_FLOAT_FLOATPTR, BT_FLOAT, BT_FLOAT, BT_FLOAT_PTR) DEF_FUNCTION_TYPE_2 (BT_FN_DOUBLE_DOUBLE_DOUBLEPTR, BT_DOUBLE, BT_DOUBLE, BT_DOUBLE_PTR) DEF_FUNCTION_TYPE_2 (BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLEPTR, BT_LONGDOUBLE, BT_LONGDOUBLE, BT_LONGDOUBLE_PTR) DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT_FLOAT_LONGDOUBLE, BT_FLOAT, BT_FLOAT, BT_LONGDOUBLE) DEF_FUNCTION_TYPE_2 (BT_FN_DOUBLE_DOUBLE_LONGDOUBLE, BT_DOUBLE, BT_DOUBLE, BT_LONGDOUBLE) DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT_FLOAT_INT, BT_FLOAT, BT_FLOAT, BT_INT) DEF_FUNCTION_TYPE_2 (BT_FN_DOUBLE_DOUBLE_INT, BT_DOUBLE, BT_DOUBLE, BT_INT) DEF_FUNCTION_TYPE_2 (BT_FN_LONGDOUBLE_LONGDOUBLE_INT, BT_LONGDOUBLE, BT_LONGDOUBLE, BT_INT) DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT_FLOAT_INTPTR, BT_FLOAT, BT_FLOAT, BT_INT_PTR) DEF_FUNCTION_TYPE_2 (BT_FN_DOUBLE_DOUBLE_INTPTR, BT_DOUBLE, BT_DOUBLE, BT_INT_PTR) DEF_FUNCTION_TYPE_2 (BT_FN_LONGDOUBLE_LONGDOUBLE_INTPTR, BT_LONGDOUBLE, BT_LONGDOUBLE, BT_INT_PTR) DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT_INT_FLOAT, BT_FLOAT, BT_INT, BT_FLOAT) DEF_FUNCTION_TYPE_2 (BT_FN_DOUBLE_INT_DOUBLE, BT_DOUBLE, BT_INT, BT_DOUBLE) DEF_FUNCTION_TYPE_2 (BT_FN_LONGDOUBLE_INT_LONGDOUBLE, BT_LONGDOUBLE, BT_INT, BT_LONGDOUBLE) DEF_FUNCTION_TYPE_2 (BT_FN_FLOAT_FLOAT_LONG, BT_FLOAT, BT_FLOAT, BT_LONG) DEF_FUNCTION_TYPE_2 (BT_FN_DOUBLE_DOUBLE_LONG, BT_DOUBLE, BT_DOUBLE, BT_LONG) DEF_FUNCTION_TYPE_2 (BT_FN_LONGDOUBLE_LONGDOUBLE_LONG, BT_LONGDOUBLE, BT_LONGDOUBLE, BT_LONG) DEF_FUNCTION_TYPE_2 (BT_FN_INT_CONST_STRING_VALIST_ARG, BT_INT, BT_CONST_STRING, BT_VALIST_ARG) DEF_FUNCTION_TYPE_2 (BT_FN_PTR_SIZE_SIZE, BT_PTR, BT_SIZE, BT_SIZE) DEF_FUNCTION_TYPE_2 (BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT_COMPLEX_FLOAT, BT_COMPLEX_FLOAT, BT_COMPLEX_FLOAT, BT_COMPLEX_FLOAT) DEF_FUNCTION_TYPE_2 (BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE_COMPLEX_DOUBLE, BT_COMPLEX_DOUBLE, BT_COMPLEX_DOUBLE, BT_COMPLEX_DOUBLE) DEF_FUNCTION_TYPE_2 (BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, BT_COMPLEX_LONGDOUBLE, BT_COMPLEX_LONGDOUBLE, BT_COMPLEX_LONGDOUBLE) DEF_FUNCTION_TYPE_2 (BT_FN_VOID_PTR_PTR, BT_VOID, BT_PTR, BT_PTR) DEF_FUNCTION_TYPE_2 (BT_FN_INT_CONST_STRING_PTR_CONST_STRING, BT_INT, BT_CONST_STRING, BT_PTR_CONST_STRING) DEF_FUNCTION_TYPE_3 (BT_FN_STRING_STRING_CONST_STRING_SIZE, BT_STRING, BT_STRING, BT_CONST_STRING, BT_SIZE) DEF_FUNCTION_TYPE_3 (BT_FN_INT_CONST_STRING_CONST_STRING_SIZE, BT_INT, BT_CONST_STRING, BT_CONST_STRING, BT_SIZE) DEF_FUNCTION_TYPE_3 (BT_FN_PTR_PTR_CONST_PTR_SIZE, BT_PTR, BT_PTR, BT_CONST_PTR, BT_SIZE) DEF_FUNCTION_TYPE_3 (BT_FN_INT_CONST_PTR_CONST_PTR_SIZE, BT_INT, BT_CONST_PTR, BT_CONST_PTR, BT_SIZE) DEF_FUNCTION_TYPE_3 (BT_FN_PTR_PTR_INT_SIZE, BT_PTR, BT_PTR, BT_INT, BT_SIZE) DEF_FUNCTION_TYPE_3 (BT_FN_VOID_PTR_INT_INT, BT_VOID, BT_PTR, BT_INT, BT_INT) DEF_FUNCTION_TYPE_3 (BT_FN_VOID_CONST_PTR_PTR_SIZE, BT_VOID, BT_CONST_PTR, BT_PTR, BT_SIZE) DEF_FUNCTION_TYPE_3 (BT_FN_INT_STRING_CONST_STRING_VALIST_ARG, BT_INT, BT_STRING, BT_CONST_STRING, BT_VALIST_ARG) DEF_FUNCTION_TYPE_3 (BT_FN_INT_CONST_STRING_CONST_STRING_VALIST_ARG, BT_INT, BT_CONST_STRING, BT_CONST_STRING, BT_VALIST_ARG) DEF_FUNCTION_TYPE_3 (BT_FN_INT_FILEPTR_CONST_STRING_VALIST_ARG, BT_INT, BT_FILEPTR, BT_CONST_STRING, BT_VALIST_ARG) DEF_FUNCTION_TYPE_3 (BT_FN_STRING_CONST_STRING_CONST_STRING_INT, BT_STRING, BT_CONST_STRING, BT_CONST_STRING, BT_INT) DEF_FUNCTION_TYPE_3 (BT_FN_FLOAT_FLOAT_FLOAT_FLOAT, BT_FLOAT, BT_FLOAT, BT_FLOAT, BT_FLOAT) DEF_FUNCTION_TYPE_3 (BT_FN_DOUBLE_DOUBLE_DOUBLE_DOUBLE, BT_DOUBLE, BT_DOUBLE, BT_DOUBLE, BT_DOUBLE) DEF_FUNCTION_TYPE_3 (BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, BT_LONGDOUBLE, BT_LONGDOUBLE, BT_LONGDOUBLE, BT_LONGDOUBLE) DEF_FUNCTION_TYPE_3 (BT_FN_FLOAT_FLOAT_FLOAT_INTPTR, BT_FLOAT, BT_FLOAT, BT_FLOAT, BT_INT_PTR) DEF_FUNCTION_TYPE_3 (BT_FN_DOUBLE_DOUBLE_DOUBLE_INTPTR, BT_DOUBLE, BT_DOUBLE, BT_DOUBLE, BT_INT_PTR) DEF_FUNCTION_TYPE_3 (BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE_INTPTR, BT_LONGDOUBLE, BT_LONGDOUBLE, BT_LONGDOUBLE, BT_INT_PTR) DEF_FUNCTION_TYPE_3 (BT_FN_VOID_FLOAT_FLOATPTR_FLOATPTR, BT_VOID, BT_FLOAT, BT_FLOAT_PTR, BT_FLOAT_PTR) DEF_FUNCTION_TYPE_3 (BT_FN_VOID_DOUBLE_DOUBLEPTR_DOUBLEPTR, BT_VOID, BT_DOUBLE, BT_DOUBLE_PTR, BT_DOUBLE_PTR) DEF_FUNCTION_TYPE_3 (BT_FN_VOID_LONGDOUBLE_LONGDOUBLEPTR_LONGDOUBLEPTR, BT_VOID, BT_LONGDOUBLE, BT_LONGDOUBLE_PTR, BT_LONGDOUBLE_PTR) DEF_FUNCTION_TYPE_3 (BT_FN_VOID_PTR_PTR_PTR, BT_VOID, BT_PTR, BT_PTR, BT_PTR) DEF_FUNCTION_TYPE_3 (BT_FN_INT_CONST_STRING_PTR_CONST_STRING_PTR_CONST_STRING, BT_INT, BT_CONST_STRING, BT_PTR_CONST_STRING, BT_PTR_CONST_STRING) DEF_FUNCTION_TYPE_4 (BT_FN_SIZE_CONST_PTR_SIZE_SIZE_FILEPTR, BT_SIZE, BT_CONST_PTR, BT_SIZE, BT_SIZE, BT_FILEPTR) DEF_FUNCTION_TYPE_4 (BT_FN_INT_STRING_SIZE_CONST_STRING_VALIST_ARG, BT_INT, BT_STRING, BT_SIZE, BT_CONST_STRING, BT_VALIST_ARG) DEF_FUNCTION_TYPE_4 (BT_FN_SIZE_STRING_SIZE_CONST_STRING_CONST_PTR, BT_SIZE, BT_STRING, BT_SIZE, BT_CONST_STRING, BT_CONST_PTR) DEF_FUNCTION_TYPE_VAR_0 (BT_FN_VOID_VAR, BT_VOID) DEF_FUNCTION_TYPE_VAR_0 (BT_FN_INT_VAR, BT_INT) DEF_FUNCTION_TYPE_VAR_0 (BT_FN_PTR_VAR, BT_PTR) DEF_FUNCTION_TYPE_VAR_1 (BT_FN_VOID_VALIST_REF_VAR, BT_VOID, BT_VALIST_REF) DEF_FUNCTION_TYPE_VAR_1 (BT_FN_VOID_CONST_PTR_VAR, BT_VOID, BT_CONST_PTR) DEF_FUNCTION_TYPE_VAR_1 (BT_FN_INT_CONST_STRING_VAR, BT_INT, BT_CONST_STRING) DEF_FUNCTION_TYPE_VAR_2 (BT_FN_INT_FILEPTR_CONST_STRING_VAR, BT_INT, BT_FILEPTR, BT_CONST_STRING) DEF_FUNCTION_TYPE_VAR_2 (BT_FN_INT_STRING_CONST_STRING_VAR, BT_INT, BT_STRING, BT_CONST_STRING) DEF_FUNCTION_TYPE_VAR_2 (BT_FN_INT_CONST_STRING_CONST_STRING_VAR, BT_INT, BT_CONST_STRING, BT_CONST_STRING) DEF_FUNCTION_TYPE_VAR_3 (BT_FN_INT_STRING_SIZE_CONST_STRING_VAR, BT_INT, BT_STRING, BT_SIZE, BT_CONST_STRING) DEF_FUNCTION_TYPE_VAR_3 (BT_FN_SSIZE_STRING_SIZE_CONST_STRING_VAR, BT_SSIZE, BT_STRING, BT_SIZE, BT_CONST_STRING) DEF_POINTER_TYPE (BT_PTR_FN_VOID_VAR, BT_FN_VOID_VAR) DEF_FUNCTION_TYPE_3 (BT_FN_PTR_PTR_FN_VOID_VAR_PTR_SIZE, BT_PTR, BT_PTR_FN_VOID_VAR, BT_PTR, BT_SIZE) #undef DEF_PRIMITIVE_TYPE #undef DEF_FUNCTION_TYPE_1 #undef DEF_FUNCTION_TYPE_2 #undef DEF_FUNCTION_TYPE_3 #undef DEF_FUNCTION_TYPE_4 #undef DEF_FUNCTION_TYPE_VAR_0 #undef DEF_FUNCTION_TYPE_VAR_1 #undef DEF_FUNCTION_TYPE_VAR_2 #undef DEF_FUNCTION_TYPE_VAR_3 #undef DEF_POINTER_TYPE c_init_attributes (); #define DEF_BUILTIN(ENUM, NAME, CLASS, TYPE, LIBTYPE, \ BOTH_P, FALLBACK_P, NONANSI_P, ATTRS, IMPLICIT) \ if (NAME) \ { \ tree decl; \ \ if (strncmp (NAME, "__builtin_", strlen ("__builtin_")) != 0) \ abort (); \ \ if (!BOTH_P) \ decl = builtin_function (NAME, builtin_types[TYPE], ENUM, \ CLASS, \ (FALLBACK_P \ ? (NAME + strlen ("__builtin_")) \ : NULL), \ built_in_attributes[(int) ATTRS]); \ else \ decl = builtin_function_2 (NAME, \ NAME + strlen ("__builtin_"), \ builtin_types[TYPE], \ builtin_types[LIBTYPE], \ ENUM, \ CLASS, \ FALLBACK_P, \ NONANSI_P, \ built_in_attributes[(int) ATTRS]); \ \ built_in_decls[(int) ENUM] = decl; \ if (IMPLICIT) \ implicit_built_in_decls[(int) ENUM] = decl; \ } /* This file contains the definitions and documentation for the builtins used in the GNU compiler. Copyright (C) 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Before including this file, you should define a macro: DEF_BUILTIN (ENUM, NAME, CLASS, TYPE, LIBTYPE, BOTH_P, FALLBACK_P, NONANSI_P, ATTRS, IMPLICIT) This macro will be called once for each builtin function. The ENUM will be of type `enum built_in_function', and will indicate which builtin function is being processed. The NAME of the builtin function (which will always start with `__builtin_') is a string literal. The CLASS is of type `enum built_in_class' and indicates what kind of builtin is being processed. Some builtins are actually two separate functions. For example, for `strcmp' there are two builtin functions; `__builtin_strcmp' and `strcmp' itself. Both behave identically. Other builtins define only the `__builtin' variant. If BOTH_P is TRUE, then this builtin has both variants; otherwise, it is has only the first variant. TYPE indicates the type of the function. The symbols correspond to enumerals from builtin-types.def. If BOTH_P is true, then LIBTYPE is the type of the non-`__builtin_' variant. Otherwise, LIBTYPE should be ignored. If FALLBACK_P is true then, if for some reason, the compiler cannot expand the builtin function directly, it will call the corresponding library function (which does not have the `__builtin_' prefix. If NONANSI_P is true, then the non-`__builtin_' variant is not an ANSI/ISO library function, and so we should pretend it does not exist when compiling in ANSI conformant mode. ATTRs is an attribute list as defined in builtin-attrs.def that describes the attributes of this builtin function. IMPLICIT specifies condition when the builtin can be produced by compiler. For instance C90 reserves floorf function, but does not define it's meaning. When user uses floorf we may assume that the floorf has the meaning we expect, but we can't produce floorf by simplifying floor((double)float) since the runtime need not implement it. */ /* A GCC builtin (like __builtin_saveregs) is provided by the compiler, but does not correspond to a function in the standard library. */ #undef DEF_GCC_BUILTIN #define DEF_GCC_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, BT_LAST, \ false, false, false, ATTRS, true) /* A library builtin (like __builtin_strchr) is a builtin equivalent of an ANSI/ISO standard library function. In addition to the `__builtin' version, we will create an ordinary version (e.g, `strchr') as well. If we cannot compute the answer using the builtin function, we will fall back to the standard library version. */ #undef DEF_LIB_BUILTIN #define DEF_LIB_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \ true, true, false, ATTRS, true) /* Like DEF_LIB_BUILTIN, except that the function is not one that is specified by ANSI/ISO C. So, when we're being fully conformant we ignore the version of these builtins that does not begin with __builtin. */ #undef DEF_EXT_LIB_BUILTIN #define DEF_EXT_LIB_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \ true, true, true, ATTRS, false) /* Like DEF_LIB_BUILTIN, except that the function is only a part of the standard in C94 or above. */ #undef DEF_C94_BUILTIN #define DEF_C94_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \ true, true, !flag_isoc94, ATTRS, TARGET_C99_FUNCTIONS) /* Like DEF_LIB_BUILTIN, except that the function is only a part of the standard in C99 or above. */ #undef DEF_C99_BUILTIN #define DEF_C99_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \ true, true, !flag_isoc99, ATTRS, TARGET_C99_FUNCTIONS) /* Builtin that is specified by C99 and C90 reserve the name for future use. We can still recognize the builtin in C90 mode but we can't produce it implicitly. */ #undef DEF_C99_C90RES_BUILTIN #define DEF_C99_C90RES_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \ true, true, !flag_isoc99, ATTRS, TARGET_C99_FUNCTIONS) /* Define an attribute list for math functions that are normally "impure" because some of them may write into global memory for `errno'. If !flag_errno_math they are instead "const". */ #undef ATTR_MATHFN_ERRNO #define ATTR_MATHFN_ERRNO (flag_errno_math ? \ ATTR_NOTHROW_LIST : ATTR_CONST_NOTHROW_LIST) /* Define an attribute list for math functions that are normally "pure" but if flag_unsafe_math_optimizations is set they are instead "const". This distinction accounts for the fact that some math functions check the rounding mode which is akin to examining global memory. In "unsafe" mode we can be less careful. */ #undef ATTR_MATHFN_FPROUNDING #define ATTR_MATHFN_FPROUNDING (flag_unsafe_math_optimizations ? \ ATTR_CONST_NOTHROW_LIST : ATTR_PURE_NOTHROW_LIST) /* Define an attribute list for math functions that are normally "impure" because some of them may write into global memory for `errno'. If !flag_errno_math, we can possibly use "pure" or "const" depending on whether we care about FP rounding. */ #undef ATTR_MATHFN_FPROUNDING_ERRNO #define ATTR_MATHFN_FPROUNDING_ERRNO (flag_errno_math ? \ ATTR_NOTHROW_LIST : ATTR_MATHFN_FPROUNDING) /* Define an attribute list for math functions that need to mind FP rounding, but because they store into memory they are never "const" or "pure". Use of this macro is mainly for documentation and maintenance purposes. */ #undef ATTR_MATHFN_FPROUNDING_STORE #define ATTR_MATHFN_FPROUNDING_STORE ATTR_NOTHROW_LIST /* Category: math builtins. */ DEF_LIB_BUILTIN (BUILT_IN_ACOS, "acos", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_ACOSF, "acosf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ACOSH, "acosh", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ACOSHF, "acoshf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ACOSHL, "acoshl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_ACOSL, "acosl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_ASIN, "asin", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_ASINF, "asinf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ASINH, "asinh", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_ASINHF, "asinhf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_ASINHL, "asinhl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_C90RES_BUILTIN (BUILT_IN_ASINL, "asinl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_ATAN, "atan", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_LIB_BUILTIN (BUILT_IN_ATAN2, "atan2", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_ATAN2F, "atan2f", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_ATAN2L, "atan2l", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_ATANF, "atanf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_ATANH, "atanh", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ATANHF, "atanhf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ATANHL, "atanhl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_ATANL, "atanl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CBRT, "cbrt", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CBRTF, "cbrtf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CBRTL, "cbrtl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_LIB_BUILTIN (BUILT_IN_CEIL, "ceil", BT_FN_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_C90RES_BUILTIN (BUILT_IN_CEILF, "ceilf", BT_FN_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_C90RES_BUILTIN (BUILT_IN_CEILL, "ceill", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_COPYSIGN, "copysign", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_COPYSIGNF, "copysignf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_COPYSIGNL, "copysignl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_COS, "cos", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_C90RES_BUILTIN (BUILT_IN_COSF, "cosf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_LIB_BUILTIN (BUILT_IN_COSH, "cosh", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_COSHF, "coshf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_COSHL, "coshl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_COSL, "cosl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_EXT_LIB_BUILTIN (BUILT_IN_DREM, "drem", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_DREMF, "dremf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_DREML, "dreml", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ERF, "erf", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_ERFC, "erfc", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ERFCF, "erfcf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ERFCL, "erfcl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ERFF, "erff", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_ERFL, "erfl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_LIB_BUILTIN (BUILT_IN_EXP, "exp", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_EXP10, "exp10", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_EXP10F, "exp10f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_EXP10L, "exp10l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_EXP2, "exp2", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_EXP2F, "exp2f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_EXP2L, "exp2l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_EXPF, "expf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_EXPL, "expl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_EXPM1, "expm1", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_EXPM1F, "expm1f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_EXPM1L, "expm1l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_FABS, "fabs", BT_FN_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_C90RES_BUILTIN (BUILT_IN_FABSF, "fabsf", BT_FN_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_C90RES_BUILTIN (BUILT_IN_FABSL, "fabsl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_FDIM, "fdim", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_FDIMF, "fdimf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_FDIML, "fdiml", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_FLOOR, "floor", BT_FN_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_C90RES_BUILTIN (BUILT_IN_FLOORF, "floorf", BT_FN_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_C90RES_BUILTIN (BUILT_IN_FLOORL, "floorl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_FMA, "fma", BT_FN_DOUBLE_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_FMAF, "fmaf", BT_FN_FLOAT_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_FMAL, "fmal", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_FMAX, "fmax", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_FMAXF, "fmaxf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_FMAXL, "fmaxl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_FMIN, "fmin", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_FMINF, "fminf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_FMINL, "fminl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_FMOD, "fmod", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_FMODF, "fmodf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_FMODL, "fmodl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_FREXP, "frexp", BT_FN_DOUBLE_DOUBLE_INTPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_C99_C90RES_BUILTIN (BUILT_IN_FREXPF, "frexpf", BT_FN_FLOAT_FLOAT_INTPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_C99_C90RES_BUILTIN (BUILT_IN_FREXPL, "frexpl", BT_FN_LONGDOUBLE_LONGDOUBLE_INTPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_EXT_LIB_BUILTIN (BUILT_IN_GAMMA, "gamma", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_GAMMAF, "gammaf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_GAMMAL, "gammal", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_GCC_BUILTIN (BUILT_IN_HUGE_VAL, "huge_val", BT_FN_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_HUGE_VALF, "huge_valf", BT_FN_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_HUGE_VALL, "huge_vall", BT_FN_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_HYPOT, "hypot", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_HYPOTF, "hypotf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_HYPOTL, "hypotl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ILOGB, "ilogb", BT_FN_INT_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ILOGBF, "ilogbf", BT_FN_INT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ILOGBL, "ilogbl", BT_FN_INT_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_GCC_BUILTIN (BUILT_IN_INF, "inf", BT_FN_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_INFF, "inff", BT_FN_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_INFL, "infl", BT_FN_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_J0, "j0", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_J0F, "j0f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_J0L, "j0l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_J1, "j1", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_J1F, "j1f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_J1L, "j1l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_JN, "jn", BT_FN_DOUBLE_INT_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_JNF, "jnf", BT_FN_FLOAT_INT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_JNL, "jnl", BT_FN_LONGDOUBLE_INT_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_LDEXP, "ldexp", BT_FN_DOUBLE_DOUBLE_INT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_LDEXPF, "ldexpf", BT_FN_FLOAT_FLOAT_INT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_LDEXPL, "ldexpl", BT_FN_LONGDOUBLE_LONGDOUBLE_INT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LGAMMA, "lgamma", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LGAMMAF, "lgammaf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LGAMMAL, "lgammal", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LLRINT, "llrint", BT_FN_LONGLONG_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LLRINTF, "llrintf", BT_FN_LONGLONG_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LLRINTL, "llrintl", BT_FN_LONGLONG_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LLROUND, "llround", BT_FN_LONGLONG_DOUBLE, ATTR_MATHFN_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LLROUNDF, "llroundf", BT_FN_LONGLONG_FLOAT, ATTR_MATHFN_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LLROUNDL, "llroundl", BT_FN_LONGLONG_LONGDOUBLE, ATTR_MATHFN_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_LOG, "log", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_LOG10, "log10", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_LOG10F, "log10f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_LOG10L, "log10l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LOG1P, "log1p", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LOG1PF, "log1pf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LOG1PL, "log1pl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LOG2, "log2", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LOG2F, "log2f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LOG2L, "log2l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LOGB, "logb", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LOGBF, "logbf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LOGBL, "logbl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_LOGF, "logf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_LOGL, "logl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LRINT, "lrint", BT_FN_LONG_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LRINTF, "lrintf", BT_FN_LONG_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LRINTL, "lrintl", BT_FN_LONG_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LROUND, "lround", BT_FN_LONG_DOUBLE, ATTR_MATHFN_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LROUNDF, "lroundf", BT_FN_LONG_FLOAT, ATTR_MATHFN_ERRNO) DEF_C99_BUILTIN (BUILT_IN_LROUNDL, "lroundl", BT_FN_LONG_LONGDOUBLE, ATTR_MATHFN_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_MODF, "modf", BT_FN_DOUBLE_DOUBLE_DOUBLEPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_C99_C90RES_BUILTIN (BUILT_IN_MODFF, "modff", BT_FN_FLOAT_FLOAT_FLOATPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_C99_C90RES_BUILTIN (BUILT_IN_MODFL, "modfl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLEPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_GCC_BUILTIN (BUILT_IN_NAN, "nan", BT_FN_DOUBLE_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL_1) DEF_GCC_BUILTIN (BUILT_IN_NANF, "nanf", BT_FN_FLOAT_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL_1) DEF_GCC_BUILTIN (BUILT_IN_NANL, "nanl", BT_FN_LONGDOUBLE_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL_1) DEF_GCC_BUILTIN (BUILT_IN_NANS, "nans", BT_FN_DOUBLE_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL_1) DEF_GCC_BUILTIN (BUILT_IN_NANSF, "nansf", BT_FN_FLOAT_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL_1) DEF_GCC_BUILTIN (BUILT_IN_NANSL, "nansl", BT_FN_LONGDOUBLE_CONST_STRING, ATTR_CONST_NOTHROW_NONNULL_1) DEF_C99_BUILTIN (BUILT_IN_NEARBYINT, "nearbyint", BT_FN_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_NEARBYINTF, "nearbyintf", BT_FN_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_NEARBYINTL, "nearbyintl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_NEXTAFTER, "nextafter", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_NEXTAFTERF, "nextafterf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_NEXTAFTERL, "nextafterl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_NEXTTOWARD, "nexttoward", BT_FN_DOUBLE_DOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_NEXTTOWARDF, "nexttowardf", BT_FN_FLOAT_FLOAT_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_NEXTTOWARDL, "nexttowardl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_POW, "pow", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_POW10, "pow10", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_POW10F, "pow10f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_POW10L, "pow10l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_POWF, "powf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_POWL, "powl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_REMAINDER, "remainder", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_REMAINDERF, "remainderf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_REMAINDERL, "remainderl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_REMQUO, "remquo", BT_FN_DOUBLE_DOUBLE_DOUBLE_INTPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_C99_BUILTIN (BUILT_IN_REMQUOF, "remquof", BT_FN_FLOAT_FLOAT_FLOAT_INTPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_C99_BUILTIN (BUILT_IN_REMQUOL, "remquol", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE_INTPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_C99_BUILTIN (BUILT_IN_RINT, "rint", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_RINTF, "rintf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_RINTL, "rintl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_ROUND, "round", BT_FN_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_ROUNDF, "roundf", BT_FN_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_ROUNDL, "roundl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_SCALB, "scalb", BT_FN_DOUBLE_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_SCALBF, "scalbf", BT_FN_FLOAT_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_SCALBL, "scalbl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_SCALBLN, "scalbln", BT_FN_DOUBLE_DOUBLE_LONG, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_SCALBLNF, "scalblnf", BT_FN_FLOAT_FLOAT_LONG, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_SCALBLNL, "scalblnl", BT_FN_LONGDOUBLE_LONGDOUBLE_LONG, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_SCALBN, "scalbn", BT_FN_DOUBLE_DOUBLE_INT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_SCALBNF, "scalbnf", BT_FN_FLOAT_FLOAT_INT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_SCALBNL, "scalbnl", BT_FN_LONGDOUBLE_LONGDOUBLE_INT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNBIT, "signbit", BT_FN_INT_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNBITF, "signbitf", BT_FN_INT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNBITL, "signbitl", BT_FN_INT_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNIFICAND, "significand", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNIFICANDF, "significandf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_SIGNIFICANDL, "significandl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_SIN, "sin", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_EXT_LIB_BUILTIN (BUILT_IN_SINCOS, "sincos", BT_FN_VOID_DOUBLE_DOUBLEPTR_DOUBLEPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_EXT_LIB_BUILTIN (BUILT_IN_SINCOSF, "sincosf", BT_FN_VOID_FLOAT_FLOATPTR_FLOATPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_EXT_LIB_BUILTIN (BUILT_IN_SINCOSL, "sincosl", BT_FN_VOID_LONGDOUBLE_LONGDOUBLEPTR_LONGDOUBLEPTR, ATTR_MATHFN_FPROUNDING_STORE) DEF_C99_C90RES_BUILTIN (BUILT_IN_SINF, "sinf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_LIB_BUILTIN (BUILT_IN_SINH, "sinh", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_SINHF, "sinhf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_SINHL, "sinhl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_SINL, "sinl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_LIB_BUILTIN (BUILT_IN_SQRT, "sqrt", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_SQRTF, "sqrtf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_C90RES_BUILTIN (BUILT_IN_SQRTL, "sqrtl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_LIB_BUILTIN (BUILT_IN_TAN, "tan", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_C90RES_BUILTIN (BUILT_IN_TANF, "tanf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_LIB_BUILTIN (BUILT_IN_TANH, "tanh", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_C90RES_BUILTIN (BUILT_IN_TANHF, "tanhf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_C90RES_BUILTIN (BUILT_IN_TANHL, "tanhl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_C90RES_BUILTIN (BUILT_IN_TANL, "tanl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_TGAMMA, "tgamma", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_TGAMMAF, "tgammaf", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_TGAMMAL, "tgammal", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_C99_BUILTIN (BUILT_IN_TRUNC, "trunc", BT_FN_DOUBLE_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_TRUNCF, "truncf", BT_FN_FLOAT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_TRUNCL, "truncl", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_Y0, "y0", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_Y0F, "y0f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_Y0L, "y0l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_Y1, "y1", BT_FN_DOUBLE_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_Y1F, "y1f", BT_FN_FLOAT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_Y1L, "y1l", BT_FN_LONGDOUBLE_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_YN, "yn", BT_FN_DOUBLE_INT_DOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_YNF, "ynf", BT_FN_FLOAT_INT_FLOAT, ATTR_MATHFN_FPROUNDING_ERRNO) DEF_EXT_LIB_BUILTIN (BUILT_IN_YNL, "ynl", BT_FN_LONGDOUBLE_INT_LONGDOUBLE, ATTR_MATHFN_FPROUNDING_ERRNO) /* Category: _Complex math builtins. */ /* The C99 clog function conflicts with C++ iostreams clog, see http://gcc.gnu.org/ml/gcc-patches/2003-09/msg00510.html */ DEF_C99_BUILTIN (BUILT_IN_CABS, "cabs", BT_FN_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CABSF, "cabsf", BT_FN_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CABSL, "cabsl", BT_FN_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CACOS, "cacos", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CACOSF, "cacosf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CACOSH, "cacosh", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CACOSHF, "cacoshf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CACOSHL, "cacoshl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CACOSL, "cacosl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CARG, "carg", BT_FN_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CARGF, "cargf", BT_FN_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CARGL, "cargl", BT_FN_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CASIN, "casin", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CASINF, "casinf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CASINH, "casinh", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CASINHF, "casinhf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CASINHL, "casinhl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CASINL, "casinl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CATAN, "catan", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CATANF, "catanf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CATANH, "catanh", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CATANHF, "catanhf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CATANHL, "catanhl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CATANL, "catanl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CCOS, "ccos", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CCOSF, "ccosf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CCOSH, "ccosh", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CCOSHF, "ccoshf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CCOSHL, "ccoshl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CCOSL, "ccosl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CEXP, "cexp", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CEXPF, "cexpf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CEXPL, "cexpl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CIMAG, "cimag", BT_FN_DOUBLE_COMPLEX_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_CIMAGF, "cimagf", BT_FN_FLOAT_COMPLEX_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_CIMAGL, "cimagl", BT_FN_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) /*DEF_C99_BUILTIN (BUILT_IN_CLOG, "clog", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING)*/ /*DEF_C99_BUILTIN (BUILT_IN_CLOGF, "clogf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING)*/ /*DEF_C99_BUILTIN (BUILT_IN_CLOGL, "clogl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING)*/ DEF_C99_BUILTIN (BUILT_IN_CONJ, "conj", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_CONJF, "conjf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_CONJL, "conjl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_CPOW, "cpow", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CPOWF, "cpowf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CPOWL, "cpowl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CPROJ, "cproj", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CPROJF, "cprojf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CPROJL, "cprojl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CREAL, "creal", BT_FN_DOUBLE_COMPLEX_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_CREALF, "crealf", BT_FN_FLOAT_COMPLEX_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_CREALL, "creall", BT_FN_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_CSIN, "csin", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CSINF, "csinf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CSINH, "csinh", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CSINHF, "csinhf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CSINHL, "csinhl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CSINL, "csinl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CSQRT, "csqrt", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CSQRTF, "csqrtf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CSQRTL, "csqrtl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CTAN, "ctan", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CTANF, "ctanf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CTANH, "ctanh", BT_FN_COMPLEX_DOUBLE_COMPLEX_DOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CTANHF, "ctanhf", BT_FN_COMPLEX_FLOAT_COMPLEX_FLOAT, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CTANHL, "ctanhl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) DEF_C99_BUILTIN (BUILT_IN_CTANL, "ctanl", BT_FN_COMPLEX_LONGDOUBLE_COMPLEX_LONGDOUBLE, ATTR_MATHFN_FPROUNDING) /* Category: string/memory builtins. */ /* bcmp, bcopy and bzero have traditionally accepted NULL pointers when the length parameter is zero, so don't apply attribute "nonnull". */ DEF_EXT_LIB_BUILTIN (BUILT_IN_BCMP, "bcmp", BT_FN_INT_CONST_PTR_CONST_PTR_SIZE, ATTR_PURE_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_BCOPY, "bcopy", BT_FN_VOID_CONST_PTR_PTR_SIZE, ATTR_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_BZERO, "bzero", BT_FN_VOID_PTR_SIZE, ATTR_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_FFS, "ffs", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_FFSL, "ffsl", BT_FN_INT_LONG, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_FFSLL, "ffsll", BT_FN_INT_LONGLONG, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_INDEX, "index", BT_FN_STRING_CONST_STRING_INT, ATTR_PURE_NOTHROW_NONNULL_1) DEF_LIB_BUILTIN (BUILT_IN_MEMCMP, "memcmp", BT_FN_INT_CONST_PTR_CONST_PTR_SIZE, ATTR_PURE_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_MEMCPY, "memcpy", BT_FN_PTR_PTR_CONST_PTR_SIZE, ATTR_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_MEMMOVE, "memmove", BT_FN_PTR_PTR_CONST_PTR_SIZE, ATTR_NOTHROW_NONNULL_1_2) DEF_EXT_LIB_BUILTIN (BUILT_IN_MEMPCPY, "mempcpy", BT_FN_PTR_PTR_CONST_PTR_SIZE, ATTR_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_MEMSET, "memset", BT_FN_PTR_PTR_INT_SIZE, ATTR_NOTHROW_NONNULL_1) DEF_EXT_LIB_BUILTIN (BUILT_IN_RINDEX, "rindex", BT_FN_STRING_CONST_STRING_INT, ATTR_PURE_NOTHROW_NONNULL_1) DEF_EXT_LIB_BUILTIN (BUILT_IN_STPCPY, "stpcpy", BT_FN_STRING_STRING_CONST_STRING, ATTR_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_STRCAT, "strcat", BT_FN_STRING_STRING_CONST_STRING, ATTR_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_STRCHR, "strchr", BT_FN_STRING_CONST_STRING_INT, ATTR_PURE_NOTHROW_NONNULL_1) DEF_LIB_BUILTIN (BUILT_IN_STRCMP, "strcmp", BT_FN_INT_CONST_STRING_CONST_STRING, ATTR_PURE_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_STRCPY, "strcpy", BT_FN_STRING_STRING_CONST_STRING, ATTR_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_STRCSPN, "strcspn", BT_FN_SIZE_CONST_STRING_CONST_STRING, ATTR_PURE_NOTHROW_NONNULL_1_2) DEF_EXT_LIB_BUILTIN (BUILT_IN_STRDUP, "strdup", BT_FN_STRING_CONST_STRING, ATTR_MALLOC_NOTHROW_NONNULL_1) DEF_LIB_BUILTIN (BUILT_IN_STRLEN, "strlen", BT_FN_SIZE_CONST_STRING, ATTR_PURE_NOTHROW_NONNULL_1) DEF_LIB_BUILTIN (BUILT_IN_STRNCAT, "strncat", BT_FN_STRING_STRING_CONST_STRING_SIZE, ATTR_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_STRNCMP, "strncmp", BT_FN_INT_CONST_STRING_CONST_STRING_SIZE, ATTR_PURE_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_STRNCPY, "strncpy", BT_FN_STRING_STRING_CONST_STRING_SIZE, ATTR_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_STRPBRK, "strpbrk", BT_FN_STRING_CONST_STRING_CONST_STRING, ATTR_PURE_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_STRRCHR, "strrchr", BT_FN_STRING_CONST_STRING_INT, ATTR_PURE_NOTHROW_NONNULL_1) DEF_LIB_BUILTIN (BUILT_IN_STRSPN, "strspn", BT_FN_SIZE_CONST_STRING_CONST_STRING, ATTR_PURE_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_STRSTR, "strstr", BT_FN_STRING_CONST_STRING_CONST_STRING, ATTR_PURE_NOTHROW_NONNULL_1_2) /* Category: stdio builtins. */ DEF_LIB_BUILTIN (BUILT_IN_FPRINTF, "fprintf", BT_FN_INT_FILEPTR_CONST_STRING_VAR, ATTR_FORMAT_PRINTF_2_3) DEF_EXT_LIB_BUILTIN (BUILT_IN_FPRINTF_UNLOCKED, "fprintf_unlocked", BT_FN_INT_FILEPTR_CONST_STRING_VAR, ATTR_FORMAT_PRINTF_2_3) DEF_LIB_BUILTIN (BUILT_IN_FPUTC, "fputc", BT_FN_INT_INT_FILEPTR, ATTR_NOTHROW_NONNULL_2) DEF_EXT_LIB_BUILTIN (BUILT_IN_FPUTC_UNLOCKED, "fputc_unlocked", BT_FN_INT_INT_FILEPTR, ATTR_NOTHROW_NONNULL_2) DEF_LIB_BUILTIN (BUILT_IN_FPUTS, "fputs", BT_FN_INT_CONST_STRING_FILEPTR, ATTR_NOTHROW_NONNULL_1_2) DEF_EXT_LIB_BUILTIN (BUILT_IN_FPUTS_UNLOCKED, "fputs_unlocked", BT_FN_INT_CONST_STRING_FILEPTR, ATTR_NOTHROW_NONNULL_1_2) DEF_LIB_BUILTIN (BUILT_IN_FSCANF, "fscanf", BT_FN_INT_FILEPTR_CONST_STRING_VAR, ATTR_FORMAT_SCANF_2_3) DEF_LIB_BUILTIN (BUILT_IN_FWRITE, "fwrite", BT_FN_SIZE_CONST_PTR_SIZE_SIZE_FILEPTR, ATTR_NOTHROW_NONNULL_1_4) DEF_EXT_LIB_BUILTIN (BUILT_IN_FWRITE_UNLOCKED, "fwrite_unlocked", BT_FN_SIZE_CONST_PTR_SIZE_SIZE_FILEPTR, ATTR_NOTHROW_NONNULL_1_4) DEF_LIB_BUILTIN (BUILT_IN_PRINTF, "printf", BT_FN_INT_CONST_STRING_VAR, ATTR_FORMAT_PRINTF_1_2) DEF_EXT_LIB_BUILTIN (BUILT_IN_PRINTF_UNLOCKED, "printf_unlocked", BT_FN_INT_CONST_STRING_VAR, ATTR_FORMAT_PRINTF_1_2) DEF_LIB_BUILTIN (BUILT_IN_PUTCHAR, "putchar", BT_FN_INT_INT, ATTR_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_PUTCHAR_UNLOCKED, "putchar_unlocked", BT_FN_INT_INT, ATTR_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_PUTS, "puts", BT_FN_INT_CONST_STRING, ATTR_NOTHROW_NONNULL_1) DEF_EXT_LIB_BUILTIN (BUILT_IN_PUTS_UNLOCKED, "puts_unlocked", BT_FN_INT_CONST_STRING, ATTR_NOTHROW_NONNULL_1) DEF_LIB_BUILTIN (BUILT_IN_SCANF, "scanf", BT_FN_INT_CONST_STRING_VAR, ATTR_FORMAT_SCANF_1_2) DEF_C99_BUILTIN (BUILT_IN_SNPRINTF, "snprintf", BT_FN_INT_STRING_SIZE_CONST_STRING_VAR, ATTR_FORMAT_PRINTF_3_4) DEF_LIB_BUILTIN (BUILT_IN_SPRINTF, "sprintf", BT_FN_INT_STRING_CONST_STRING_VAR, ATTR_FORMAT_PRINTF_2_3) DEF_LIB_BUILTIN (BUILT_IN_SSCANF, "sscanf", BT_FN_INT_CONST_STRING_CONST_STRING_VAR, ATTR_FORMAT_SCANF_2_3) DEF_LIB_BUILTIN (BUILT_IN_VFPRINTF, "vfprintf", BT_FN_INT_FILEPTR_CONST_STRING_VALIST_ARG, ATTR_FORMAT_PRINTF_2_0) DEF_C99_BUILTIN (BUILT_IN_VFSCANF, "vfscanf", BT_FN_INT_FILEPTR_CONST_STRING_VALIST_ARG, ATTR_FORMAT_SCANF_2_0) DEF_LIB_BUILTIN (BUILT_IN_VPRINTF, "vprintf", BT_FN_INT_CONST_STRING_VALIST_ARG, ATTR_FORMAT_PRINTF_1_0) DEF_C99_BUILTIN (BUILT_IN_VSCANF, "vscanf", BT_FN_INT_CONST_STRING_VALIST_ARG, ATTR_FORMAT_SCANF_1_0) DEF_C99_BUILTIN (BUILT_IN_VSNPRINTF, "vsnprintf", BT_FN_INT_STRING_SIZE_CONST_STRING_VALIST_ARG, ATTR_FORMAT_PRINTF_3_0) DEF_LIB_BUILTIN (BUILT_IN_VSPRINTF, "vsprintf", BT_FN_INT_STRING_CONST_STRING_VALIST_ARG, ATTR_FORMAT_PRINTF_2_0) DEF_C99_BUILTIN (BUILT_IN_VSSCANF, "vsscanf", BT_FN_INT_CONST_STRING_CONST_STRING_VALIST_ARG, ATTR_FORMAT_SCANF_2_0) /* Category: ctype builtins. */ DEF_LIB_BUILTIN (BUILT_IN_ISALNUM, "isalnum", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ISALPHA, "isalpha", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_ISASCII, "isascii", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_ISBLANK, "isblank", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ISCNTRL, "iscntrl", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ISDIGIT, "isdigit", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ISGRAPH, "isgraph", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ISLOWER, "islower", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ISPRINT, "isprint", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ISPUNCT, "ispunct", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ISSPACE, "isspace", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ISUPPER, "isupper", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ISXDIGIT, "isxdigit", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_TOASCII, "toascii", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_TOLOWER, "tolower", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_TOUPPER, "toupper", BT_FN_INT_INT, ATTR_PURE_NOTHROW_LIST) /* Category: wctype builtins. */ DEF_C94_BUILTIN (BUILT_IN_ISWALNUM, "iswalnum", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_ISWALPHA, "iswalpha", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_ISWBLANK, "iswblank", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_ISWCNTRL, "iswcntrl", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_ISWDIGIT, "iswdigit", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_ISWGRAPH, "iswgraph", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_ISWLOWER, "iswlower", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_ISWPRINT, "iswprint", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_ISWPUNCT, "iswpunct", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_ISWSPACE, "iswspace", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_ISWUPPER, "iswupper", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_ISWXDIGIT, "iswxdigit", BT_FN_INT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_TOWLOWER, "towlower", BT_FN_WINT_WINT, ATTR_PURE_NOTHROW_LIST) DEF_C94_BUILTIN (BUILT_IN_TOWUPPER, "towupper", BT_FN_WINT_WINT, ATTR_PURE_NOTHROW_LIST) /* Category: miscellaneous builtins. */ DEF_LIB_BUILTIN (BUILT_IN_ABORT, "abort", BT_FN_VOID, ATTR_NORETURN_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_ABS, "abs", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_AGGREGATE_INCOMING_ADDRESS, "aggregate_incoming_address", BT_FN_PTR_VAR, ATTR_NULL) DEF_EXT_LIB_BUILTIN (BUILT_IN_ALLOCA, "alloca", BT_FN_PTR_SIZE, ATTR_MALLOC_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_APPLY, "apply", BT_FN_PTR_PTR_FN_VOID_VAR_PTR_SIZE, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_APPLY_ARGS, "apply_args", BT_FN_PTR_VAR, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_ARGS_INFO, "args_info", BT_FN_INT_INT, ATTR_NULL) DEF_LIB_BUILTIN (BUILT_IN_CALLOC, "calloc", BT_FN_PTR_SIZE_SIZE, ATTR_MALLOC_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_CLASSIFY_TYPE, "classify_type", BT_FN_INT_VAR, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_CLZ, "clz", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_CLZL, "clzl", BT_FN_INT_LONG, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_CLZLL, "clzll", BT_FN_INT_LONGLONG, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_CONSTANT_P, "constant_p", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_CTZ, "ctz", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_CTZL, "ctzl", BT_FN_INT_LONG, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_CTZLL, "ctzll", BT_FN_INT_LONGLONG, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_DCGETTEXT, "dcgettext", BT_FN_STRING_CONST_STRING_CONST_STRING_INT, ATTR_FORMAT_ARG_2) DEF_EXT_LIB_BUILTIN (BUILT_IN_DGETTEXT, "dgettext", BT_FN_STRING_CONST_STRING_CONST_STRING, ATTR_FORMAT_ARG_2) DEF_GCC_BUILTIN (BUILT_IN_DWARF_CFA, "dwarf_cfa", BT_FN_PTR, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_DWARF_SP_COLUMN, "dwarf_sp_column", BT_FN_UNSIGNED, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_EH_RETURN, "eh_return", BT_FN_VOID_PTRMODE_PTR, ATTR_NORETURN_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_EH_RETURN_DATA_REGNO, "eh_return_data_regno", BT_FN_INT_INT, ATTR_NULL) DEF_EXT_LIB_BUILTIN (BUILT_IN_EXECL, "execl", BT_FN_INT_CONST_STRING_CONST_STRING_VAR, ATTR_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_EXECLP, "execlp", BT_FN_INT_CONST_STRING_CONST_STRING_VAR, ATTR_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_EXECLE, "execle", BT_FN_INT_CONST_STRING_CONST_STRING_VAR, ATTR_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_EXECV, "execv", BT_FN_INT_CONST_STRING_PTR_CONST_STRING, ATTR_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_EXECVP, "execvp", BT_FN_INT_CONST_STRING_PTR_CONST_STRING, ATTR_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_EXECVE, "execve", BT_FN_INT_CONST_STRING_PTR_CONST_STRING_PTR_CONST_STRING, ATTR_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_EXIT, "exit", BT_FN_VOID_INT, ATTR_NORETURN_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_EXPECT, "expect", BT_FN_LONG_LONG_LONG, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_EXTEND_POINTER, "extend_pointer", BT_FN_WORD_PTR, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_EXTRACT_RETURN_ADDR, "extract_return_addr", BT_FN_PTR_PTR, ATTR_NULL) DEF_EXT_LIB_BUILTIN (BUILT_IN_FORK, "fork", BT_FN_PID, ATTR_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_FRAME_ADDRESS, "frame_address", BT_FN_PTR_UNSIGNED, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_FROB_RETURN_ADDR, "frob_return_addr", BT_FN_PTR_PTR, ATTR_NULL) DEF_EXT_LIB_BUILTIN (BUILT_IN_GETTEXT, "gettext", BT_FN_STRING_CONST_STRING, ATTR_FORMAT_ARG_1) DEF_C99_BUILTIN (BUILT_IN_IMAXABS, "imaxabs", BT_FN_INTMAX_INTMAX, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_INIT_DWARF_REG_SIZES, "init_dwarf_reg_size_table", BT_FN_VOID_PTR, ATTR_NULL) DEF_EXT_LIB_BUILTIN (BUILT_IN_FINITE, "finite", BT_FN_INT_DOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_FINITEF, "finitef", BT_FN_INT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_FINITEL, "finitel", BT_FN_INT_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_C90RES_BUILTIN (BUILT_IN_ISINF, "isinf", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_ISINFF, "isinff", BT_FN_INT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_ISINFL, "isinfl", BT_FN_INT_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_C99_C90RES_BUILTIN (BUILT_IN_ISNAN, "isnan", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_ISNANF, "isnanf", BT_FN_INT_FLOAT, ATTR_CONST_NOTHROW_LIST) DEF_EXT_LIB_BUILTIN (BUILT_IN_ISNANL, "isnanl", BT_FN_INT_LONGDOUBLE, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_ISGREATER, "isgreater", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_ISGREATEREQUAL, "isgreaterequal", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_ISLESS, "isless", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_ISLESSEQUAL, "islessequal", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_ISLESSGREATER, "islessgreater", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_ISUNORDERED, "isunordered", BT_FN_INT_VAR, ATTR_CONST_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_LABS, "labs", BT_FN_LONG_LONG, ATTR_CONST_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN_LLABS, "llabs", BT_FN_LONGLONG_LONGLONG, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_LONGJMP, "longjmp", BT_FN_VOID_PTR_INT, ATTR_NORETURN_NOTHROW_LIST) DEF_LIB_BUILTIN (BUILT_IN_MALLOC, "malloc", BT_FN_PTR_SIZE, ATTR_MALLOC_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_NEXT_ARG, "next_arg", BT_FN_PTR_VAR, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_PARITY, "parity", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_PARITYL, "parityl", BT_FN_INT_LONG, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_PARITYLL, "parityll", BT_FN_INT_LONGLONG, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_POPCOUNT, "popcount", BT_FN_INT_INT, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_POPCOUNTL, "popcountl", BT_FN_INT_LONG, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_POPCOUNTLL, "popcountll", BT_FN_INT_LONGLONG, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_PREFETCH, "prefetch", BT_FN_VOID_CONST_PTR_VAR, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_RETURN, "return", BT_FN_VOID_PTR, ATTR_NORETURN_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_RETURN_ADDRESS, "return_address", BT_FN_PTR_UNSIGNED, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_SAVEREGS, "saveregs", BT_FN_PTR_VAR, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_SETJMP, "setjmp", BT_FN_INT_PTR, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_STACK_ALLOC, "stack_alloc", BT_FN_VOID_PTR_SIZE, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_STACK_SAVE, "stack_save", BT_FN_PTR, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_STACK_RESTORE, "stack_restore", BT_FN_VOID_PTR, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_STDARG_START, "stdarg_start", BT_FN_VOID_VALIST_REF_VAR, ATTR_NULL) DEF_EXT_LIB_BUILTIN (BUILT_IN_STRFMON, "strfmon", BT_FN_SSIZE_STRING_SIZE_CONST_STRING_VAR, ATTR_FORMAT_STRFMON_3_4) DEF_LIB_BUILTIN (BUILT_IN_STRFTIME, "strftime", BT_FN_SIZE_STRING_SIZE_CONST_STRING_CONST_PTR, ATTR_FORMAT_STRFTIME_3_0) DEF_GCC_BUILTIN (BUILT_IN_TRAP, "trap", BT_FN_VOID, ATTR_NORETURN_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_UNWIND_INIT, "unwind_init", BT_FN_VOID, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_UPDATE_SETJMP_BUF, "update_setjmp_buf", BT_FN_VOID_PTR_INT, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_VA_COPY, "va_copy", BT_FN_VOID_VALIST_REF_VALIST_ARG, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_VA_END, "va_end", BT_FN_VOID_VALIST_REF, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_VA_START, "va_start", BT_FN_VOID_VALIST_REF_VAR, ATTR_NULL) DEF_EXT_LIB_BUILTIN (BUILT_IN__EXIT, "_exit", BT_FN_VOID_INT, ATTR_NORETURN_NOTHROW_LIST) DEF_C99_BUILTIN (BUILT_IN__EXIT2, "_Exit", BT_FN_VOID_INT, ATTR_NORETURN_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_INIT_TRAMPOLINE, "init_trampoline", BT_FN_VOID_PTR_PTR_PTR, ATTR_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_ADJUST_TRAMPOLINE, "adjust_trampoline", BT_FN_PTR_PTR, ATTR_CONST_NOTHROW_LIST) DEF_GCC_BUILTIN (BUILT_IN_NONLOCAL_GOTO, "nonlocal_goto", BT_FN_PTR_PTR, ATTR_NORETURN_NOTHROW_LIST) /* Profiling hooks. */ DEF_GCC_BUILTIN (BUILT_IN_PROFILE_FUNC_ENTER, "profile_func_enter", BT_FN_VOID, ATTR_NULL) DEF_GCC_BUILTIN (BUILT_IN_PROFILE_FUNC_EXIT, "profile_func_exit", BT_FN_VOID, ATTR_NULL) #undef DEF_BUILTIN targetm.init_builtins (); if (flag_mudflap) mudflap_init (); main_identifier_node = get_identifier ("main"); } tree build_va_arg (tree expr, tree type) { return build1 (VA_ARG_EXPR, type, expr); } /* Linked list of disabled built-in functions. */ typedef struct disabled_builtin { const char *name; struct disabled_builtin *next; } disabled_builtin; static disabled_builtin *disabled_builtins = NULL; static bool builtin_function_disabled_p (const char *); /* Disable a built-in function specified by -fno-builtin-NAME. If NAME begins with "__builtin_", give an error. */ void disable_builtin_function (const char *name) { if (strncmp (name, "__builtin_", strlen ("__builtin_")) == 0) error ("cannot disable built-in function `%s'", name); else { disabled_builtin *new = xmalloc (sizeof (disabled_builtin)); new->name = name; new->next = disabled_builtins; disabled_builtins = new; } } /* Return true if the built-in function NAME has been disabled, false otherwise. */ static bool builtin_function_disabled_p (const char *name) { disabled_builtin *p; for (p = disabled_builtins; p != NULL; p = p->next) { if (strcmp (name, p->name) == 0) return true; } return false; } /* Possibly define a builtin function with one or two names. BUILTIN_NAME is an __builtin_-prefixed name; NAME is the ordinary name; one or both of these may be NULL (though both being NULL is useless). BUILTIN_TYPE is the type of the __builtin_-prefixed function; TYPE is the type of the function with the ordinary name. These may differ if the ordinary name is declared with a looser type to avoid conflicts with headers. FUNCTION_CODE and CLASS are as for builtin_function. If LIBRARY_NAME_P is nonzero, NAME is passed as the LIBRARY_NAME parameter to builtin_function when declaring BUILTIN_NAME. If NONANSI_P is nonzero, the name NAME is treated as a non-ANSI name; ATTRS is the tree list representing the builtin's function attributes. Returns the declaration of BUILTIN_NAME, if any, otherwise the declaration of NAME. Does not declare NAME if flag_no_builtin, or if NONANSI_P and flag_no_nonansi_builtin. */ static tree builtin_function_2 (const char *builtin_name, const char *name, tree builtin_type, tree type, int function_code, enum built_in_class class, int library_name_p, int nonansi_p, tree attrs) { tree bdecl = NULL_TREE; tree decl = NULL_TREE; if (builtin_name != 0) bdecl = builtin_function (builtin_name, builtin_type, function_code, class, library_name_p ? name : NULL, attrs); if (name != 0 && !flag_no_builtin && !builtin_function_disabled_p (name) && !(nonansi_p && flag_no_nonansi_builtin)) decl = builtin_function (name, type, function_code, class, NULL, attrs); return (bdecl != 0 ? bdecl : decl); } /* Nonzero if the type T promotes to int. This is (nearly) the integral promotions defined in ISO C99 6.3.1.1/2. */ bool c_promoting_integer_type_p (tree t) { switch (TREE_CODE (t)) { case INTEGER_TYPE: return (TYPE_MAIN_VARIANT (t) == char_type_node || TYPE_MAIN_VARIANT (t) == signed_char_type_node || TYPE_MAIN_VARIANT (t) == unsigned_char_type_node || TYPE_MAIN_VARIANT (t) == short_integer_type_node || TYPE_MAIN_VARIANT (t) == short_unsigned_type_node || TYPE_PRECISION (t) < TYPE_PRECISION (integer_type_node)); case ENUMERAL_TYPE: /* ??? Technically all enumerations not larger than an int promote to an int. But this is used along code paths that only want to notice a size change. */ return TYPE_PRECISION (t) < TYPE_PRECISION (integer_type_node); case BOOLEAN_TYPE: return 1; default: return 0; } } /* Return 1 if PARMS specifies a fixed number of parameters and none of their types is affected by default promotions. */ int self_promoting_args_p (tree parms) { tree t; for (t = parms; t; t = TREE_CHAIN (t)) { tree type = TREE_VALUE (t); if (TREE_CHAIN (t) == 0 && type != void_type_node) return 0; if (type == 0) return 0; if (TYPE_MAIN_VARIANT (type) == float_type_node) return 0; if (c_promoting_integer_type_p (type)) return 0; } return 1; } /* Recursively examines the array elements of TYPE, until a non-array element type is found. */ tree strip_array_types (tree type) { while (TREE_CODE (type) == ARRAY_TYPE) type = TREE_TYPE (type); return type; } /* Recursively remove any '*' or '&' operator from TYPE. */ tree strip_pointer_operator (tree t) { while (POINTER_TYPE_P (t)) t = TREE_TYPE (t); return t; } /* Walk the statement tree, rooted at *tp. Apply FUNC to all the sub-trees of *TP in a pre-order traversal. FUNC is called with the DATA and the address of each sub-tree. If FUNC returns a non-NULL value, the traversal is aborted, and the value returned by FUNC is returned. If FUNC sets WALK_SUBTREES to zero, then the subtrees of the node being visited are not walked. We don't need a without_duplicates variant of this one because the statement tree is a tree, not a graph. */ tree walk_stmt_tree (tree *tp, walk_tree_fn func, void *data) { enum tree_code code; int walk_subtrees; tree result; int i, len; #define WALK_SUBTREE(NODE) \ do \ { \ result = walk_stmt_tree (&(NODE), func, data); \ if (result) \ return result; \ } \ while (0) /* Skip empty subtrees. */ if (!*tp) return NULL_TREE; /* Skip subtrees below non-statement nodes. */ if (!STATEMENT_CODE_P (TREE_CODE (*tp))) return NULL_TREE; /* Call the function. */ walk_subtrees = 1; result = (*func) (tp, &walk_subtrees, data); /* If we found something, return it. */ if (result) return result; /* FUNC may have modified the tree, recheck that we're looking at a statement node. */ code = TREE_CODE (*tp); if (!STATEMENT_CODE_P (code)) return NULL_TREE; /* Visit the subtrees unless FUNC decided that there was nothing interesting below this point in the tree. */ if (walk_subtrees) { /* Walk over all the sub-trees of this operand. Statement nodes never contain RTL, and we needn't worry about TARGET_EXPRs. */ len = TREE_CODE_LENGTH (code); /* Go through the subtrees. We need to do this in forward order so that the scope of a FOR_EXPR is handled properly. */ for (i = 0; i < len; ++i) WALK_SUBTREE (TREE_OPERAND (*tp, i)); } /* Finally visit the chain. This can be tail-recursion optimized if we write it this way. */ return walk_stmt_tree (&TREE_CHAIN (*tp), func, data); #undef WALK_SUBTREE } /* Used to compare case labels. K1 and K2 are actually tree nodes representing case labels, or NULL_TREE for a `default' label. Returns -1 if K1 is ordered before K2, -1 if K1 is ordered after K2, and 0 if K1 and K2 are equal. */ int case_compare (splay_tree_key k1, splay_tree_key k2) { /* Consider a NULL key (such as arises with a `default' label) to be smaller than anything else. */ if (!k1) return k2 ? -1 : 0; else if (!k2) return k1 ? 1 : 0; return tree_int_cst_compare ((tree) k1, (tree) k2); } /* Process a case label for the range LOW_VALUE ... HIGH_VALUE. If LOW_VALUE and HIGH_VALUE are both NULL_TREE then this case label is actually a `default' label. If only HIGH_VALUE is NULL_TREE, then case label was declared using the usual C/C++ syntax, rather than the GNU case range extension. CASES is a tree containing all the case ranges processed so far; COND is the condition for the switch-statement itself. Returns the CASE_LABEL_EXPR created, or ERROR_MARK_NODE if no CASE_LABEL_EXPR is created. */ tree c_add_case_label (splay_tree cases, tree cond, tree low_value, tree high_value) { tree type; tree label; tree case_label; splay_tree_node node; /* Create the LABEL_DECL itself. */ label = create_artificial_label (); /* If there was an error processing the switch condition, bail now before we get more confused. */ if (!cond || cond == error_mark_node) goto error_out; if ((low_value && TREE_TYPE (low_value) && POINTER_TYPE_P (TREE_TYPE (low_value))) || (high_value && TREE_TYPE (high_value) && POINTER_TYPE_P (TREE_TYPE (high_value)))) error ("pointers are not permitted as case values"); /* Case ranges are a GNU extension. */ if (high_value && pedantic) pedwarn ("range expressions in switch statements are non-standard"); type = TREE_TYPE (cond); if (low_value) { low_value = check_case_value (low_value); low_value = convert_and_check (type, low_value); } if (high_value) { high_value = check_case_value (high_value); high_value = convert_and_check (type, high_value); } /* If an error has occurred, bail out now. */ if (low_value == error_mark_node || high_value == error_mark_node) goto error_out; /* If the LOW_VALUE and HIGH_VALUE are the same, then this isn't really a case range, even though it was written that way. Remove the HIGH_VALUE to simplify later processing. */ if (tree_int_cst_equal (low_value, high_value)) high_value = NULL_TREE; if (low_value && high_value && !tree_int_cst_lt (low_value, high_value)) warning ("empty range specified"); /* Look up the LOW_VALUE in the table of case labels we already have. */ node = splay_tree_lookup (cases, (splay_tree_key) low_value); /* If there was not an exact match, check for overlapping ranges. There's no need to do this if there's no LOW_VALUE or HIGH_VALUE; that's a `default' label and the only overlap is an exact match. */ if (!node && (low_value || high_value)) { splay_tree_node low_bound; splay_tree_node high_bound; /* Even though there wasn't an exact match, there might be an overlap between this case range and another case range. Since we've (inductively) not allowed any overlapping case ranges, we simply need to find the greatest low case label that is smaller that LOW_VALUE, and the smallest low case label that is greater than LOW_VALUE. If there is an overlap it will occur in one of these two ranges. */ low_bound = splay_tree_predecessor (cases, (splay_tree_key) low_value); high_bound = splay_tree_successor (cases, (splay_tree_key) low_value); /* Check to see if the LOW_BOUND overlaps. It is smaller than the LOW_VALUE, so there is no need to check unless the LOW_BOUND is in fact itself a case range. */ if (low_bound && CASE_HIGH ((tree) low_bound->value) && tree_int_cst_compare (CASE_HIGH ((tree) low_bound->value), low_value) >= 0) node = low_bound; /* Check to see if the HIGH_BOUND overlaps. The low end of that range is bigger than the low end of the current range, so we are only interested if the current range is a real range, and not an ordinary case label. */ else if (high_bound && high_value && (tree_int_cst_compare ((tree) high_bound->key, high_value) <= 0)) node = high_bound; } /* If there was an overlap, issue an error. */ if (node) { tree duplicate = CASE_LABEL ((tree) node->value); if (high_value) { error ("duplicate (or overlapping) case value"); error ("%Jthis is the first entry overlapping that value", duplicate); } else if (low_value) { error ("duplicate case value") ; error ("%Jpreviously used here", duplicate); } else { error ("multiple default labels in one switch"); error ("%Jthis is the first default label", duplicate); } goto error_out; } /* Add a CASE_LABEL to the statement-tree. */ case_label = add_stmt (build_case_label (low_value, high_value, label)); /* Register this case label in the splay tree. */ splay_tree_insert (cases, (splay_tree_key) low_value, (splay_tree_value) case_label); return case_label; error_out: /* Add a label so that the back-end doesn't think that the beginning of the switch is unreachable. Note that we do not add a case label, as that just leads to duplicates and thence to aborts later on. */ if (!cases->root) { tree t = create_artificial_label (); add_stmt (build_stmt (LABEL_EXPR, t)); } return error_mark_node; } /* Subroutines of c_do_switch_warnings, called via splay_tree_foreach. Used to verify that case values match up with enumerator values. */ static void match_case_to_enum_1 (tree key, tree type, tree label) { char buf[2 + 2*HOST_BITS_PER_WIDE_INT/4 + 1]; /* ??? Not working too hard to print the double-word value. Should perhaps be done with %lwd in the diagnostic routines? */ if (TREE_INT_CST_HIGH (key) == 0) snprintf (buf, sizeof (buf), HOST_WIDE_INT_PRINT_UNSIGNED, TREE_INT_CST_LOW (key)); else if (!TYPE_UNSIGNED (type) && TREE_INT_CST_HIGH (key) == -1 && TREE_INT_CST_LOW (key) != 0) snprintf (buf, sizeof (buf), "-" HOST_WIDE_INT_PRINT_UNSIGNED, -TREE_INT_CST_LOW (key)); else snprintf (buf, sizeof (buf), HOST_WIDE_INT_PRINT_DOUBLE_HEX, TREE_INT_CST_HIGH (key), TREE_INT_CST_LOW (key)); if (TYPE_NAME (type) == 0) warning ("%Jcase value `%s' not in enumerated type", CASE_LABEL (label), buf); else warning ("%Jcase value `%s' not in enumerated type `%T'", CASE_LABEL (label), buf, type); } static int match_case_to_enum (splay_tree_node node, void *data) { tree label = (tree) node->value; tree type = data; /* Skip default case. */ if (!CASE_LOW (label)) return 0; /* If TREE_ADDRESSABLE is not set, that means CASE_LOW did not appear when we did our enum->case scan. Reset our scratch bit after. */ if (!TREE_ADDRESSABLE (label)) match_case_to_enum_1 (CASE_LOW (label), type, label); else TREE_ADDRESSABLE (label) = 0; /* If CASE_HIGH is non-null, we have a range. Here we must search. Note that the old code in stmt.c did not check for the values in the range either, just the endpoints. */ if (CASE_HIGH (label)) { tree chain, key = CASE_HIGH (label); for (chain = TYPE_VALUES (type); chain && !tree_int_cst_equal (key, TREE_VALUE (chain)); chain = TREE_CHAIN (chain)) continue; if (!chain) match_case_to_enum_1 (key, type, label); } return 0; } /* Handle -Wswitch*. Called from the front end after parsing the switch construct. */ /* ??? Should probably be somewhere generic, since other languages besides C and C++ would want this. We'd want to agree on the datastructure, however, which is a problem. Alternately, we operate on gimplified switch_exprs, which I don't especially like. At the moment, however, C/C++ are the only tree-ssa languages that support enumerations at all, so the point is moot. */ void c_do_switch_warnings (splay_tree cases, tree switch_stmt) { splay_tree_node default_node; location_t switch_location; tree type; if (!warn_switch && !warn_switch_enum && !warn_switch_default) return; if (EXPR_HAS_LOCATION (switch_stmt)) switch_location = EXPR_LOCATION (switch_stmt); else switch_location = input_location; type = SWITCH_TYPE (switch_stmt); default_node = splay_tree_lookup (cases, (splay_tree_key) NULL); if (warn_switch_default && !default_node) warning ("%Hswitch missing default case", &switch_location); /* If the switch expression was an enumerated type, check that exactly all enumeration literals are covered by the cases. The check is made when -Wswitch was specified and there is no default case, or when -Wswitch-enum was specified. */ if (((warn_switch && !default_node) || warn_switch_enum) && type && TREE_CODE (type) == ENUMERAL_TYPE && TREE_CODE (SWITCH_COND (switch_stmt)) != INTEGER_CST) { tree chain; /* The time complexity here is O(N*lg(N)) worst case, but for the common case of monotonically increasing enumerators, it is O(N), since the nature of the splay tree will keep the next element adjacent to the root at all times. */ for (chain = TYPE_VALUES (type); chain; chain = TREE_CHAIN (chain)) { splay_tree_node node = splay_tree_lookup (cases, (splay_tree_key) TREE_VALUE (chain)); if (node) { /* Mark the CASE_LOW part of the case entry as seen, so that we save time later. Choose TREE_ADDRESSABLE randomly as a bit that won't have been set to-date. */ tree label = (tree) node->value; TREE_ADDRESSABLE (label) = 1; } else { /* Warn if there are enumerators that don't correspond to case expressions. */ warning ("%Henumeration value `%E' not handled in switch", &switch_location, TREE_PURPOSE (chain)); } } /* Warn if there are case expressions that don't correspond to enumerators. This can occur since C and C++ don't enforce type-checking of assignments to enumeration variables. The time complexity here is O(N**2) worst case, since we've not sorted the enumeration values. However, in the absence of case ranges this is O(N), since all single cases that corresponded to enumerations have been marked above. */ splay_tree_foreach (cases, match_case_to_enum, type); } } /* Finish an expression taking the address of LABEL (an IDENTIFIER_NODE). Returns an expression for the address. */ tree finish_label_address_expr (tree label) { tree result; if (pedantic) pedwarn ("taking the address of a label is non-standard"); if (label == error_mark_node) return error_mark_node; label = lookup_label (label); if (label == NULL_TREE) result = null_pointer_node; else { TREE_USED (label) = 1; result = build1 (ADDR_EXPR, ptr_type_node, label); /* The current function in not necessarily uninlinable. Computed gotos are incompatible with inlining, but the value here could be used only in a diagnostic, for example. */ } return result; } /* Hook used by expand_expr to expand language-specific tree codes. */ /* The only things that should go here are bits needed to expand constant initializers. Everything else should be handled by the gimplification routines. */ rtx c_expand_expr (tree exp, rtx target, enum machine_mode tmode, int modifier /* Actually enum_modifier. */, rtx *alt_rtl) { switch (TREE_CODE (exp)) { case COMPOUND_LITERAL_EXPR: { /* Initialize the anonymous variable declared in the compound literal, then return the variable. */ tree decl = COMPOUND_LITERAL_EXPR_DECL (exp); emit_local_var (decl); return expand_expr_real (decl, target, tmode, modifier, alt_rtl); } default: abort (); } } /* Hook used by unsafe_for_reeval to handle language-specific tree codes. */ int c_common_unsafe_for_reeval (tree exp) { /* Statement expressions may not be reevaluated, likewise compound literals. */ if (TREE_CODE (exp) == STMT_EXPR || TREE_CODE (exp) == COMPOUND_LITERAL_EXPR) return 2; /* Walk all other expressions. */ return -1; } /* Hook used by staticp to handle language-specific tree codes. */ int c_staticp (tree exp) { if (TREE_CODE (exp) == COMPOUND_LITERAL_EXPR && TREE_STATIC (COMPOUND_LITERAL_EXPR_DECL (exp))) return 1; return 0; } /* Given a boolean expression ARG, return a tree representing an increment or decrement (as indicated by CODE) of ARG. The front end must check for invalid cases (e.g., decrement in C++). */ tree boolean_increment (enum tree_code code, tree arg) { tree val; tree true_res = boolean_true_node; arg = stabilize_reference (arg); switch (code) { case PREINCREMENT_EXPR: val = build (MODIFY_EXPR, TREE_TYPE (arg), arg, true_res); break; case POSTINCREMENT_EXPR: val = build (MODIFY_EXPR, TREE_TYPE (arg), arg, true_res); arg = save_expr (arg); val = build (COMPOUND_EXPR, TREE_TYPE (arg), val, arg); val = build (COMPOUND_EXPR, TREE_TYPE (arg), arg, val); break; case PREDECREMENT_EXPR: val = build (MODIFY_EXPR, TREE_TYPE (arg), arg, invert_truthvalue (arg)); break; case POSTDECREMENT_EXPR: val = build (MODIFY_EXPR, TREE_TYPE (arg), arg, invert_truthvalue (arg)); arg = save_expr (arg); val = build (COMPOUND_EXPR, TREE_TYPE (arg), val, arg); val = build (COMPOUND_EXPR, TREE_TYPE (arg), arg, val); break; default: abort (); } TREE_SIDE_EFFECTS (val) = 1; return val; } /* Built-in macros for stddef.h, that require macros defined in this file. */ void c_stddef_cpp_builtins(void) { builtin_define_with_value ("__SIZE_TYPE__", SIZE_TYPE, 0); builtin_define_with_value ("__PTRDIFF_TYPE__", PTRDIFF_TYPE, 0); builtin_define_with_value ("__WCHAR_TYPE__", MODIFIED_WCHAR_TYPE, 0); builtin_define_with_value ("__WINT_TYPE__", WINT_TYPE, 0); } static void c_init_attributes (void) { /* Fill in the built_in_attributes array. */ #define DEF_ATTR_NULL_TREE(ENUM) \ built_in_attributes[(int) ENUM] = NULL_TREE; #define DEF_ATTR_INT(ENUM, VALUE) \ built_in_attributes[(int) ENUM] = build_int_2 (VALUE, VALUE < 0 ? -1 : 0); #define DEF_ATTR_IDENT(ENUM, STRING) \ built_in_attributes[(int) ENUM] = get_identifier (STRING); #define DEF_ATTR_TREE_LIST(ENUM, PURPOSE, VALUE, CHAIN) \ built_in_attributes[(int) ENUM] \ = tree_cons (built_in_attributes[(int) PURPOSE], \ built_in_attributes[(int) VALUE], \ built_in_attributes[(int) CHAIN]); /* Copyright (C) 2001, 2002 Free Software Foundation, Inc. Contributed by Joseph Myers . This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This header provides a declarative way of describing the attributes that are applied to some functions by default. Before including this header, you must define the following macros. In each case where there is an ENUM, it is an identifier used to reference the tree in subsequent definitions. DEF_ATTR_NULL_TREE (ENUM) Constructs a NULL_TREE. DEF_ATTR_INT (ENUM, VALUE) Constructs an INTEGER_CST with value VALUE (an integer representable in HOST_WIDE_INT). DEF_ATTR_IDENT (ENUM, STRING) Constructs an IDENTIFIER_NODE for STRING. DEF_ATTR_TREE_LIST (ENUM, PURPOSE, VALUE, CHAIN) Constructs a TREE_LIST with given PURPOSE, VALUE and CHAIN (given as previous ENUM names). */ DEF_ATTR_NULL_TREE (ATTR_NULL) /* Construct a tree for a given integer and a list containing it. */ #define DEF_ATTR_FOR_INT(VALUE) \ DEF_ATTR_INT (ATTR_##VALUE, VALUE) \ DEF_ATTR_TREE_LIST (ATTR_LIST_##VALUE, ATTR_NULL, \ ATTR_##VALUE, ATTR_NULL) DEF_ATTR_FOR_INT (0) DEF_ATTR_FOR_INT (1) DEF_ATTR_FOR_INT (2) DEF_ATTR_FOR_INT (3) DEF_ATTR_FOR_INT (4) #undef DEF_ATTR_FOR_INT /* Construct a tree for a list of two integers. */ #define DEF_LIST_INT_INT(VALUE1, VALUE2) \ DEF_ATTR_TREE_LIST (ATTR_LIST_##VALUE1##_##VALUE2, ATTR_NULL, \ ATTR_##VALUE1, ATTR_LIST_##VALUE2) DEF_LIST_INT_INT (1,0) DEF_LIST_INT_INT (1,2) DEF_LIST_INT_INT (2,0) DEF_LIST_INT_INT (2,3) DEF_LIST_INT_INT (3,0) DEF_LIST_INT_INT (3,4) #undef DEF_LIST_INT_INT /* Construct trees for identifiers. */ DEF_ATTR_IDENT (ATTR_CONST, "const") DEF_ATTR_IDENT (ATTR_FORMAT, "format") DEF_ATTR_IDENT (ATTR_FORMAT_ARG, "format_arg") DEF_ATTR_IDENT (ATTR_MALLOC, "malloc") DEF_ATTR_IDENT (ATTR_NONNULL, "nonnull") DEF_ATTR_IDENT (ATTR_NORETURN, "noreturn") DEF_ATTR_IDENT (ATTR_NOTHROW, "nothrow") DEF_ATTR_IDENT (ATTR_PRINTF, "printf") DEF_ATTR_IDENT (ATTR_ASM_FPRINTF, "asm_fprintf") DEF_ATTR_IDENT (ATTR_GCC_DIAG, "gcc_diag") DEF_ATTR_IDENT (ATTR_GCC_CDIAG, "gcc_cdiag") DEF_ATTR_IDENT (ATTR_GCC_CXXDIAG, "gcc_cxxdiag") DEF_ATTR_IDENT (ATTR_PURE, "pure") DEF_ATTR_IDENT (ATTR_SCANF, "scanf") DEF_ATTR_IDENT (ATTR_STRFMON, "strfmon") DEF_ATTR_IDENT (ATTR_STRFTIME, "strftime") DEF_ATTR_TREE_LIST (ATTR_NOTHROW_LIST, ATTR_NOTHROW, ATTR_NULL, ATTR_NULL) DEF_ATTR_TREE_LIST (ATTR_CONST_NOTHROW_LIST, ATTR_CONST, \ ATTR_NULL, ATTR_NOTHROW_LIST) DEF_ATTR_TREE_LIST (ATTR_PURE_NOTHROW_LIST, ATTR_PURE, \ ATTR_NULL, ATTR_NOTHROW_LIST) DEF_ATTR_TREE_LIST (ATTR_NORETURN_NOTHROW_LIST, ATTR_NORETURN, \ ATTR_NULL, ATTR_NOTHROW_LIST) DEF_ATTR_TREE_LIST (ATTR_MALLOC_NOTHROW_LIST, ATTR_MALLOC, \ ATTR_NULL, ATTR_NOTHROW_LIST) DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_1, ATTR_NONNULL, ATTR_LIST_1, \ ATTR_NOTHROW_LIST) DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_2, ATTR_NONNULL, ATTR_LIST_2, \ ATTR_NOTHROW_LIST) DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_3, ATTR_NONNULL, ATTR_LIST_3, \ ATTR_NOTHROW_LIST) /* Nothrow functions whose first and second parameters are nonnull pointers. */ DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_1_2, ATTR_NONNULL, ATTR_LIST_2, \ ATTR_NOTHROW_NONNULL_1) /* Nothrow functions whose first and fourth parameters are nonnull pointers. */ DEF_ATTR_TREE_LIST (ATTR_NOTHROW_NONNULL_1_4, ATTR_NONNULL, ATTR_LIST_4, \ ATTR_NOTHROW_NONNULL_1) /* Nothrow const functions whose first parameter is a nonnull pointer. */ DEF_ATTR_TREE_LIST (ATTR_CONST_NOTHROW_NONNULL_1, ATTR_CONST, ATTR_NULL, \ ATTR_NOTHROW_NONNULL_1) /* Nothrow pure functions whose first parameter is a nonnull pointer. */ DEF_ATTR_TREE_LIST (ATTR_PURE_NOTHROW_NONNULL_1, ATTR_PURE, ATTR_NULL, \ ATTR_NOTHROW_NONNULL_1) /* Nothrow pure functions whose first and second parameters are nonnull pointers. */ DEF_ATTR_TREE_LIST (ATTR_PURE_NOTHROW_NONNULL_1_2, ATTR_PURE, ATTR_NULL, \ ATTR_NOTHROW_NONNULL_1_2) /* Nothrow malloc functions whose first parameter is a nonnull pointer. */ DEF_ATTR_TREE_LIST (ATTR_MALLOC_NOTHROW_NONNULL_1, ATTR_MALLOC, ATTR_NULL, \ ATTR_NOTHROW_NONNULL_1) /* Construct a tree for a format attribute. */ #define DEF_FORMAT_ATTRIBUTE(TYPE, FA, VALUES) \ DEF_ATTR_TREE_LIST (ATTR_##TYPE##_##VALUES, ATTR_NULL, \ ATTR_##TYPE, ATTR_LIST_##VALUES) \ DEF_ATTR_TREE_LIST (ATTR_FORMAT_##TYPE##_##VALUES, ATTR_FORMAT, \ ATTR_##TYPE##_##VALUES, ATTR_NOTHROW_NONNULL_##FA) DEF_FORMAT_ATTRIBUTE(PRINTF,1,1_0) DEF_FORMAT_ATTRIBUTE(PRINTF,1,1_2) DEF_FORMAT_ATTRIBUTE(PRINTF,2,2_0) DEF_FORMAT_ATTRIBUTE(PRINTF,2,2_3) DEF_FORMAT_ATTRIBUTE(PRINTF,3,3_0) DEF_FORMAT_ATTRIBUTE(PRINTF,3,3_4) DEF_FORMAT_ATTRIBUTE(SCANF,1,1_0) DEF_FORMAT_ATTRIBUTE(SCANF,1,1_2) DEF_FORMAT_ATTRIBUTE(SCANF,2,2_0) DEF_FORMAT_ATTRIBUTE(SCANF,2,2_3) DEF_FORMAT_ATTRIBUTE(STRFTIME,3,3_0) DEF_FORMAT_ATTRIBUTE(STRFMON,3,3_4) #undef DEF_FORMAT_ATTRIBUTE /* Construct a tree for a format_arg attribute. */ #define DEF_FORMAT_ARG_ATTRIBUTE(FA) \ DEF_ATTR_TREE_LIST (ATTR_FORMAT_ARG_##FA, ATTR_FORMAT_ARG, \ ATTR_LIST_##FA, ATTR_NOTHROW_NONNULL_##FA) DEF_FORMAT_ARG_ATTRIBUTE(1) DEF_FORMAT_ARG_ATTRIBUTE(2) #undef DEF_FORMAT_ARG_ATTRIBUTE #undef DEF_ATTR_NULL_TREE #undef DEF_ATTR_INT #undef DEF_ATTR_IDENT #undef DEF_ATTR_TREE_LIST } /* Attribute handlers common to C front ends. */ /* Handle a "packed" attribute; arguments as in struct attribute_spec.handler. */ static tree handle_packed_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED, int flags, bool *no_add_attrs) { if (TYPE_P (*node)) { if (!(flags & (int) ATTR_FLAG_TYPE_IN_PLACE)) *node = build_type_copy (*node); TYPE_PACKED (*node) = 1; if (TYPE_MAIN_VARIANT (*node) == *node) { /* If it is the main variant, then pack the other variants too. This happens in, struct Foo { struct Foo const *ptr; // creates a variant w/o packed flag } __ attribute__((packed)); // packs it now. */ tree probe; for (probe = *node; probe; probe = TYPE_NEXT_VARIANT (probe)) TYPE_PACKED (probe) = 1; } } else if (TREE_CODE (*node) == FIELD_DECL) DECL_PACKED (*node) = 1; /* We can't set DECL_PACKED for a VAR_DECL, because the bit is used for DECL_REGISTER. It wouldn't mean anything anyway. We can't set DECL_PACKED on the type of a TYPE_DECL, because that changes what the typedef is typing. */ else { warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name)); *no_add_attrs = true; } return NULL_TREE; } /* Handle a "nocommon" attribute; arguments as in struct attribute_spec.handler. */ static tree handle_nocommon_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) { if (TREE_CODE (*node) == VAR_DECL) DECL_COMMON (*node) = 0; else { warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name)); *no_add_attrs = true; } return NULL_TREE; } /* Handle a "common" attribute; arguments as in struct attribute_spec.handler. */ static tree handle_common_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) { if (TREE_CODE (*node) == VAR_DECL) DECL_COMMON (*node) = 1; else { warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name)); *no_add_attrs = true; } return NULL_TREE; } /* Handle a "noreturn" attribute; arguments as in struct attribute_spec.handler. */ static tree handle_noreturn_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) { tree type = TREE_TYPE (*node); /* See FIXME comment in c_common_attribute_table. */ if (TREE_CODE (*node) == FUNCTION_DECL) TREE_THIS_VOLATILE (*node) = 1; else if (TREE_CODE (type) == POINTER_TYPE && TREE_CODE (TREE_TYPE (type)) == FUNCTION_TYPE) TREE_TYPE (*node) = build_pointer_type (build_type_variant (TREE_TYPE (type), TYPE_READONLY (TREE_TYPE (type)), 1)); else { warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name)); *no_add_attrs = true; } return NULL_TREE; } /* Handle a "noinline" attribute; arguments as in struct attribute_spec.handler. */ static tree handle_noinline_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) { if (TREE_CODE (*node) == FUNCTION_DECL) DECL_UNINLINABLE (*node) = 1; else { warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name)); *no_add_attrs = true; } return NULL_TREE; } /* Handle a "always_inline" attribute; arguments as in struct attribute_spec.handler. */ static tree handle_always_inline_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) { if (TREE_CODE (*node) == FUNCTION_DECL) { /* Do nothing else, just set the attribute. We'll get at it later with lookup_attribute. */ } else { warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name)); *no_add_attrs = true; } return NULL_TREE; } /* Handle a "used" attribute; arguments as in struct attribute_spec.handler. */ static tree handle_used_attribute (tree *pnode, tree name, tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) { tree node = *pnode; if (TREE_CODE (node) == FUNCTION_DECL || (TREE_CODE (node) == VAR_DECL && TREE_STATIC (node))) { TREE_USED (node) = 1; } else { warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name)); *no_add_attrs = true; } return NULL_TREE; } /* Handle a "unused" attribute; arguments as in struct attribute_spec.handler. */ static tree handle_unused_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) { if (DECL_P (*node)) { tree decl = *node; if (TREE_CODE (decl) == PARM_DECL || TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == FUNCTION_DECL || TREE_CODE (decl) == LABEL_DECL || TREE_CODE (decl) == TYPE_DECL) TREE_USED (decl) = 1; else { warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name)); *no_add_attrs = true; } } else { if (!(flags & (int) ATTR_FLAG_TYPE_IN_PLACE)) *node = build_type_copy (*node); TREE_USED (*node) = 1; } return NULL_TREE; } /* Handle a "const" attribute; arguments as in struct attribute_spec.handler. */ static tree handle_const_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) { tree type = TREE_TYPE (*node); /* See FIXME comment on noreturn in c_common_attribute_table. */ if (TREE_CODE (*node) == FUNCTION_DECL) TREE_READONLY (*node) = 1; else if (TREE_CODE (type) == POINTER_TYPE && TREE_CODE (TREE_TYPE (type)) == FUNCTION_TYPE) TREE_TYPE (*node) = build_pointer_type (build_type_variant (TREE_TYPE (type), 1, TREE_THIS_VOLATILE (TREE_TYPE (type)))); else { warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name)); *no_add_attrs = true; } return NULL_TREE; } /* Handle a "transparent_union" attribute; arguments as in struct attribute_spec.handler. */ static tree handle_transparent_union_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED, int flags, bool *no_add_attrs) { tree decl = NULL_TREE; tree *type = NULL; int is_type = 0; if (DECL_P (*node)) { decl = *node; type = &TREE_TYPE (decl); is_type = TREE_CODE (*node) == TYPE_DECL; } else if (TYPE_P (*node)) type = node, is_type = 1; if (is_type && TREE_CODE (*type) == UNION_TYPE && (decl == 0 || (TYPE_FIELDS (*type) != 0 && TYPE_MODE (*type) == DECL_MODE (TYPE_FIELDS (*type))))) { if (!(flags & (int) ATTR_FLAG_TYPE_IN_PLACE)) *type = build_type_copy (*type); TYPE_TRANSPARENT_UNION (*type) = 1; } else if (decl != 0 && TREE_CODE (decl) == PARM_DECL && TREE_CODE (*type) == UNION_TYPE && TYPE_MODE (*type) == DECL_MODE (TYPE_FIELDS (*type))) DECL_TRANSPARENT_UNION (decl) = 1; else { warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name)); *no_add_attrs = true; } return NULL_TREE; } /* Handle a "constructor" attribute; arguments as in struct attribute_spec.handler. */ static tree handle_constructor_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) { tree decl = *node; tree type = TREE_TYPE (decl); if (TREE_CODE (decl) == FUNCTION_DECL && TREE_CODE (type) == FUNCTION_TYPE && decl_function_context (decl) == 0) { DECL_STATIC_CONSTRUCTOR (decl) = 1; TREE_USED (decl) = 1; } else { warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name)); *no_add_attrs = true; } return NULL_TREE; } /* Handle a "destructor" attribute; arguments as in struct attribute_spec.handler. */ static tree handle_destructor_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) { tree decl = *node; tree type = TREE_TYPE (decl); if (TREE_CODE (decl) == FUNCTION_DECL && TREE_CODE (type) == FUNCTION_TYPE && decl_function_context (decl) == 0) { DECL_STATIC_DESTRUCTOR (decl) = 1; TREE_USED (decl) = 1; } else { warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name)); *no_add_attrs = true; } return NULL_TREE; } /* Handle a "mode" attribute; arguments as in struct attribute_spec.handler. */ static tree handle_mode_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) { tree type = *node; *no_add_attrs = true; if (TREE_CODE (TREE_VALUE (args)) != IDENTIFIER_NODE) warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name)); else { int j; const char *p = IDENTIFIER_POINTER (TREE_VALUE (args)); int len = strlen (p); enum machine_mode mode = VOIDmode; tree typefm; tree ptr_type; if (len > 4 && p[0] == '_' && p[1] == '_' && p[len - 1] == '_' && p[len - 2] == '_') { char *newp = alloca (len - 1); strcpy (newp, &p[2]); newp[len - 4] = '\0'; p = newp; } /* Change this type to have a type with the specified mode. First check for the special modes. */ if (! strcmp (p, "byte")) mode = byte_mode; else if (!strcmp (p, "word")) mode = word_mode; else if (! strcmp (p, "pointer")) mode = ptr_mode; else for (j = 0; j < NUM_MACHINE_MODES; j++) if (!strcmp (p, GET_MODE_NAME (j))) mode = (enum machine_mode) j; if (mode == VOIDmode) { error ("unknown machine mode `%s'", p); return NULL_TREE; } if (VECTOR_MODE_P (mode)) { warning ("specifying vector types with __attribute__ ((mode)) " "is deprecated"); warning ("use __attribute__ ((vector_size)) instead"); } typefm = lang_hooks.types.type_for_mode (mode, TYPE_UNSIGNED (type)); if (typefm == NULL_TREE) error ("no data type for mode `%s'", p); else if ((TREE_CODE (type) == POINTER_TYPE || TREE_CODE (type) == REFERENCE_TYPE) && !targetm.valid_pointer_mode (mode)) error ("invalid pointer mode `%s'", p); else { /* If this is a vector, make sure we either have hardware support, or we can emulate it. */ if (VECTOR_MODE_P (mode) && !vector_mode_valid_p (mode)) { error ("unable to emulate '%s'", GET_MODE_NAME (mode)); return NULL_TREE; } if (TREE_CODE (type) == POINTER_TYPE) { ptr_type = build_pointer_type_for_mode (TREE_TYPE (type), mode, false); *node = ptr_type; } else if (TREE_CODE (type) == REFERENCE_TYPE) { ptr_type = build_reference_type_for_mode (TREE_TYPE (type), mode, false); *node = ptr_type; } else *node = typefm; /* No need to layout the type here. The caller should do this. */ } } return NULL_TREE; } /* Handle a "section" attribute; arguments as in struct attribute_spec.handler. */ static tree handle_section_attribute (tree *node, tree name ATTRIBUTE_UNUSED, tree args, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) { tree decl = *node; if (targetm.have_named_sections) { if ((TREE_CODE (decl) == FUNCTION_DECL || TREE_CODE (decl) == VAR_DECL) && TREE_CODE (TREE_VALUE (args)) == STRING_CST) { if (TREE_CODE (decl) == VAR_DECL && current_function_decl != NULL_TREE && ! TREE_STATIC (decl)) { error ("%Jsection attribute cannot be specified for " "local variables", decl); *no_add_attrs = true; } /* The decl may have already been given a section attribute from a previous declaration. Ensure they match. */ else if (DECL_SECTION_NAME (decl) != NULL_TREE && strcmp (TREE_STRING_POINTER (DECL_SECTION_NAME (decl)), TREE_STRING_POINTER (TREE_VALUE (args))) != 0) { error ("%Jsection of '%D' conflicts with previous declaration", *node, *node); *no_add_attrs = true; } else DECL_SECTION_NAME (decl) = TREE_VALUE (args); } else { error ("%Jsection attribute not allowed for '%D'", *node, *node); *no_add_attrs = true; } } else { error ("%Jsection attributes are not supported for this target", *node); *no_add_attrs = true; } return NULL_TREE; } /* Handle a "aligned" attribute; arguments as in struct attribute_spec.handler. */ static tree handle_aligned_attribute (tree *node, tree name ATTRIBUTE_UNUSED, tree args, int flags, bool *no_add_attrs) { tree decl = NULL_TREE; tree *type = NULL; int is_type = 0; tree align_expr = (args ? TREE_VALUE (args) : size_int (BIGGEST_ALIGNMENT / BITS_PER_UNIT)); int i; if (DECL_P (*node)) { decl = *node; type = &TREE_TYPE (decl); is_type = TREE_CODE (*node) == TYPE_DECL; } else if (TYPE_P (*node)) type = node, is_type = 1; /* Strip any NOPs of any kind. */ while (TREE_CODE (align_expr) == NOP_EXPR || TREE_CODE (align_expr) == CONVERT_EXPR || TREE_CODE (align_expr) == NON_LVALUE_EXPR) align_expr = TREE_OPERAND (align_expr, 0); if (TREE_CODE (align_expr) != INTEGER_CST) { error ("requested alignment is not a constant"); *no_add_attrs = true; } else if ((i = tree_log2 (align_expr)) == -1) { error ("requested alignment is not a power of 2"); *no_add_attrs = true; } else if (i > HOST_BITS_PER_INT - 2) { error ("requested alignment is too large"); *no_add_attrs = true; } else if (is_type) { /* If we have a TYPE_DECL, then copy the type, so that we don't accidentally modify a builtin type. See pushdecl. */ if (decl && TREE_TYPE (decl) != error_mark_node && DECL_ORIGINAL_TYPE (decl) == NULL_TREE) { tree tt = TREE_TYPE (decl); *type = build_type_copy (*type); DECL_ORIGINAL_TYPE (decl) = tt; TYPE_NAME (*type) = decl; TREE_USED (*type) = TREE_USED (decl); TREE_TYPE (decl) = *type; } else if (!(flags & (int) ATTR_FLAG_TYPE_IN_PLACE)) *type = build_type_copy (*type); TYPE_ALIGN (*type) = (1 << i) * BITS_PER_UNIT; TYPE_USER_ALIGN (*type) = 1; } else if (TREE_CODE (decl) != VAR_DECL && TREE_CODE (decl) != FIELD_DECL) { error ("%Jalignment may not be specified for '%D'", decl, decl); *no_add_attrs = true; } else { DECL_ALIGN (decl) = (1 << i) * BITS_PER_UNIT; DECL_USER_ALIGN (decl) = 1; } return NULL_TREE; } /* Handle a "weak" attribute; arguments as in struct attribute_spec.handler. */ static tree handle_weak_attribute (tree *node, tree name ATTRIBUTE_UNUSED, tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs ATTRIBUTE_UNUSED) { declare_weak (*node); return NULL_TREE; } /* Handle an "alias" attribute; arguments as in struct attribute_spec.handler. */ static tree handle_alias_attribute (tree *node, tree name, tree args, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) { tree decl = *node; if ((TREE_CODE (decl) == FUNCTION_DECL && DECL_INITIAL (decl)) || (TREE_CODE (decl) != FUNCTION_DECL && ! DECL_EXTERNAL (decl))) { error ("%J'%D' defined both normally and as an alias", decl, decl); *no_add_attrs = true; } /* Note that the very first time we process a nested declaration, decl_function_context will not be set. Indeed, *would* never be set except for the DECL_INITIAL/DECL_EXTERNAL frobbery that we do below. After such frobbery, pushdecl would set the context. In any case, this is never what we want. */ else if (decl_function_context (decl) == 0 && current_function_decl == NULL) { tree id; id = TREE_VALUE (args); if (TREE_CODE (id) != STRING_CST) { error ("alias arg not a string"); *no_add_attrs = true; return NULL_TREE; } id = get_identifier (TREE_STRING_POINTER (id)); /* This counts as a use of the object pointed to. */ TREE_USED (id) = 1; if (TREE_CODE (decl) == FUNCTION_DECL) DECL_INITIAL (decl) = error_mark_node; else { DECL_EXTERNAL (decl) = 0; TREE_STATIC (decl) = 1; } } else { warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name)); *no_add_attrs = true; } return NULL_TREE; } /* Handle an "visibility" attribute; arguments as in struct attribute_spec.handler. */ static tree handle_visibility_attribute (tree *node, tree name, tree args, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) { tree decl = *node; tree id = TREE_VALUE (args); *no_add_attrs = true; if (decl_function_context (decl) != 0 || ! TREE_PUBLIC (decl)) { warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name)); return NULL_TREE; } if (TREE_CODE (id) != STRING_CST) { error ("visibility arg not a string"); return NULL_TREE; } if (strcmp (TREE_STRING_POINTER (id), "default") == 0) DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT; else if (strcmp (TREE_STRING_POINTER (id), "internal") == 0) DECL_VISIBILITY (decl) = VISIBILITY_INTERNAL; else if (strcmp (TREE_STRING_POINTER (id), "hidden") == 0) DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN; else if (strcmp (TREE_STRING_POINTER (id), "protected") == 0) DECL_VISIBILITY (decl) = VISIBILITY_PROTECTED; else error ("visibility arg must be one of \"default\", \"hidden\", \"protected\" or \"internal\""); return NULL_TREE; } /* Handle an "tls_model" attribute; arguments as in struct attribute_spec.handler. */ static tree handle_tls_model_attribute (tree *node, tree name, tree args, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) { tree decl = *node; if (! DECL_THREAD_LOCAL (decl)) { warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name)); *no_add_attrs = true; } else { tree id; id = TREE_VALUE (args); if (TREE_CODE (id) != STRING_CST) { error ("tls_model arg not a string"); *no_add_attrs = true; return NULL_TREE; } if (strcmp (TREE_STRING_POINTER (id), "local-exec") && strcmp (TREE_STRING_POINTER (id), "initial-exec") && strcmp (TREE_STRING_POINTER (id), "local-dynamic") && strcmp (TREE_STRING_POINTER (id), "global-dynamic")) { error ("tls_model arg must be one of \"local-exec\", \"initial-exec\", \"local-dynamic\" or \"global-dynamic\""); *no_add_attrs = true; return NULL_TREE; } } return NULL_TREE; } /* Handle a "no_instrument_function" attribute; arguments as in struct attribute_spec.handler. */ static tree handle_no_instrument_function_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) { tree decl = *node; if (TREE_CODE (decl) != FUNCTION_DECL) { error ("%J'%E' attribute applies only to functions", decl, name); *no_add_attrs = true; } else if (DECL_INITIAL (decl)) { error ("%Jcan't set '%E' attribute after definition", decl, name); *no_add_attrs = true; } else DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (decl) = 1; return NULL_TREE; } /* Handle a "malloc" attribute; arguments as in struct attribute_spec.handler. */ static tree handle_malloc_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) { if (TREE_CODE (*node) == FUNCTION_DECL) DECL_IS_MALLOC (*node) = 1; /* ??? TODO: Support types. */ else { warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name)); *no_add_attrs = true; } return NULL_TREE; } /* Handle a "no_limit_stack" attribute; arguments as in struct attribute_spec.handler. */ static tree handle_no_limit_stack_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) { tree decl = *node; if (TREE_CODE (decl) != FUNCTION_DECL) { error ("%J'%E' attribute applies only to functions", decl, name); *no_add_attrs = true; } else if (DECL_INITIAL (decl)) { error ("%Jcan't set '%E' attribute after definition", decl, name); *no_add_attrs = true; } else DECL_NO_LIMIT_STACK (decl) = 1; return NULL_TREE; } /* Handle a "pure" attribute; arguments as in struct attribute_spec.handler. */ static tree handle_pure_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) { if (TREE_CODE (*node) == FUNCTION_DECL) DECL_IS_PURE (*node) = 1; /* ??? TODO: Support types. */ else { warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name)); *no_add_attrs = true; } return NULL_TREE; } /* Handle a "deprecated" attribute; arguments as in struct attribute_spec.handler. */ static tree handle_deprecated_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED, int flags, bool *no_add_attrs) { tree type = NULL_TREE; int warn = 0; const char *what = NULL; if (DECL_P (*node)) { tree decl = *node; type = TREE_TYPE (decl); if (TREE_CODE (decl) == TYPE_DECL || TREE_CODE (decl) == PARM_DECL || TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == FUNCTION_DECL || TREE_CODE (decl) == FIELD_DECL) TREE_DEPRECATED (decl) = 1; else warn = 1; } else if (TYPE_P (*node)) { if (!(flags & (int) ATTR_FLAG_TYPE_IN_PLACE)) *node = build_type_copy (*node); TREE_DEPRECATED (*node) = 1; type = *node; } else warn = 1; if (warn) { *no_add_attrs = true; if (type && TYPE_NAME (type)) { if (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE) what = IDENTIFIER_POINTER (TYPE_NAME (*node)); else if (TREE_CODE (TYPE_NAME (type)) == TYPE_DECL && DECL_NAME (TYPE_NAME (type))) what = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type))); } if (what) warning ("`%s' attribute ignored for `%s'", IDENTIFIER_POINTER (name), what); else warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name)); } return NULL_TREE; } /* Handle a "vector_size" attribute; arguments as in struct attribute_spec.handler. */ static tree handle_vector_size_attribute (tree *node, tree name, tree args, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) { unsigned HOST_WIDE_INT vecsize, nunits; enum machine_mode mode, orig_mode, new_mode; tree type = *node, new_type, size; *no_add_attrs = true; /* Stripping NON_LVALUE_EXPR allows declarations such as typedef short v4si __attribute__((vector_size (4 * sizeof(short)))). */ size = TREE_VALUE (args); if (TREE_CODE (size) == NON_LVALUE_EXPR) size = TREE_OPERAND (size, 0); if (! host_integerp (size, 1)) { warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name)); return NULL_TREE; } /* Get the vector size (in bytes). */ vecsize = tree_low_cst (size, 1); /* We need to provide for vector pointers, vector arrays, and functions returning vectors. For example: __attribute__((vector_size(16))) short *foo; In this case, the mode is SI, but the type being modified is HI, so we need to look further. */ while (POINTER_TYPE_P (type) || TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE || TREE_CODE (type) == ARRAY_TYPE) type = TREE_TYPE (type); /* Get the mode of the type being modified. */ orig_mode = TYPE_MODE (type); if (TREE_CODE (type) == RECORD_TYPE || (GET_MODE_CLASS (orig_mode) != MODE_FLOAT && GET_MODE_CLASS (orig_mode) != MODE_INT) || ! host_integerp (TYPE_SIZE_UNIT (type), 1)) { error ("invalid vector type for attribute `%s'", IDENTIFIER_POINTER (name)); return NULL_TREE; } /* Calculate how many units fit in the vector. */ nunits = vecsize / tree_low_cst (TYPE_SIZE_UNIT (type), 1); /* Find a suitably sized vector. */ new_mode = VOIDmode; for (mode = GET_CLASS_NARROWEST_MODE (GET_MODE_CLASS (orig_mode) == MODE_INT ? MODE_VECTOR_INT : MODE_VECTOR_FLOAT); mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode)) if (vecsize == GET_MODE_SIZE (mode) && nunits == (unsigned HOST_WIDE_INT) GET_MODE_NUNITS (mode)) { new_mode = mode; break; } if (new_mode == VOIDmode) { error ("no vector mode with the size and type specified could be found"); return NULL_TREE; } new_type = build_vector_type_for_mode (type, new_mode); /* Build back pointers if needed. */ *node = reconstruct_complex_type (*node, new_type); return NULL_TREE; } /* Handle the "nonnull" attribute. */ static tree handle_nonnull_attribute (tree *node, tree name ATTRIBUTE_UNUSED, tree args, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) { tree type = *node; unsigned HOST_WIDE_INT attr_arg_num; /* If no arguments are specified, all pointer arguments should be non-null. Verify a full prototype is given so that the arguments will have the correct types when we actually check them later. */ if (! args) { if (! TYPE_ARG_TYPES (type)) { error ("nonnull attribute without arguments on a non-prototype"); *no_add_attrs = true; } return NULL_TREE; } /* Argument list specified. Verify that each argument number references a pointer argument. */ for (attr_arg_num = 1; args; args = TREE_CHAIN (args)) { tree argument; unsigned HOST_WIDE_INT arg_num = 0, ck_num; if (! get_nonnull_operand (TREE_VALUE (args), &arg_num)) { error ("nonnull argument has invalid operand number (arg %lu)", (unsigned long) attr_arg_num); *no_add_attrs = true; return NULL_TREE; } argument = TYPE_ARG_TYPES (type); if (argument) { for (ck_num = 1; ; ck_num++) { if (! argument || ck_num == arg_num) break; argument = TREE_CHAIN (argument); } if (! argument || TREE_CODE (TREE_VALUE (argument)) == VOID_TYPE) { error ("nonnull argument with out-of-range operand number (arg %lu, operand %lu)", (unsigned long) attr_arg_num, (unsigned long) arg_num); *no_add_attrs = true; return NULL_TREE; } if (TREE_CODE (TREE_VALUE (argument)) != POINTER_TYPE) { error ("nonnull argument references non-pointer operand (arg %lu, operand %lu)", (unsigned long) attr_arg_num, (unsigned long) arg_num); *no_add_attrs = true; return NULL_TREE; } } } return NULL_TREE; } /* Check the argument list of a function call for null in argument slots that are marked as requiring a non-null pointer argument. */ static void check_function_nonnull (tree attrs, tree params) { tree a, args, param; int param_num; for (a = attrs; a; a = TREE_CHAIN (a)) { if (is_attribute_p ("nonnull", TREE_PURPOSE (a))) { args = TREE_VALUE (a); /* Walk the argument list. If we encounter an argument number we should check for non-null, do it. If the attribute has no args, then every pointer argument is checked (in which case the check for pointer type is done in check_nonnull_arg). */ for (param = params, param_num = 1; ; param_num++, param = TREE_CHAIN (param)) { if (! param) break; if (! args || nonnull_check_p (args, param_num)) check_function_arguments_recurse (check_nonnull_arg, NULL, TREE_VALUE (param), param_num); } } } } /* Helper for check_function_nonnull; given a list of operands which must be non-null in ARGS, determine if operand PARAM_NUM should be checked. */ static bool nonnull_check_p (tree args, unsigned HOST_WIDE_INT param_num) { unsigned HOST_WIDE_INT arg_num = 0; for (; args; args = TREE_CHAIN (args)) { if (! get_nonnull_operand (TREE_VALUE (args), &arg_num)) abort (); if (arg_num == param_num) return true; } return false; } /* Check that the function argument PARAM (which is operand number PARAM_NUM) is non-null. This is called by check_function_nonnull via check_function_arguments_recurse. */ static void check_nonnull_arg (void *ctx ATTRIBUTE_UNUSED, tree param, unsigned HOST_WIDE_INT param_num) { /* Just skip checking the argument if it's not a pointer. This can happen if the "nonnull" attribute was given without an operand list (which means to check every pointer argument). */ if (TREE_CODE (TREE_TYPE (param)) != POINTER_TYPE) return; if (integer_zerop (param)) warning ("null argument where non-null required (arg %lu)", (unsigned long) param_num); } /* Helper for nonnull attribute handling; fetch the operand number from the attribute argument list. */ static bool get_nonnull_operand (tree arg_num_expr, unsigned HOST_WIDE_INT *valp) { /* Strip any conversions from the arg number and verify they are constants. */ while (TREE_CODE (arg_num_expr) == NOP_EXPR || TREE_CODE (arg_num_expr) == CONVERT_EXPR || TREE_CODE (arg_num_expr) == NON_LVALUE_EXPR) arg_num_expr = TREE_OPERAND (arg_num_expr, 0); if (TREE_CODE (arg_num_expr) != INTEGER_CST || TREE_INT_CST_HIGH (arg_num_expr) != 0) return false; *valp = TREE_INT_CST_LOW (arg_num_expr); return true; } /* Handle a "nothrow" attribute; arguments as in struct attribute_spec.handler. */ static tree handle_nothrow_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) { if (TREE_CODE (*node) == FUNCTION_DECL) TREE_NOTHROW (*node) = 1; /* ??? TODO: Support types. */ else { warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name)); *no_add_attrs = true; } return NULL_TREE; } /* Handle a "cleanup" attribute; arguments as in struct attribute_spec.handler. */ static tree handle_cleanup_attribute (tree *node, tree name, tree args, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) { tree decl = *node; tree cleanup_id, cleanup_decl; /* ??? Could perhaps support cleanups on TREE_STATIC, much like we do for global destructors in C++. This requires infrastructure that we don't have generically at the moment. It's also not a feature we'd be missing too much, since we do have attribute constructor. */ if (TREE_CODE (decl) != VAR_DECL || TREE_STATIC (decl)) { warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name)); *no_add_attrs = true; return NULL_TREE; } /* Verify that the argument is a function in scope. */ /* ??? We could support pointers to functions here as well, if that was considered desirable. */ cleanup_id = TREE_VALUE (args); if (TREE_CODE (cleanup_id) != IDENTIFIER_NODE) { error ("cleanup arg not an identifier"); *no_add_attrs = true; return NULL_TREE; } cleanup_decl = lookup_name (cleanup_id); if (!cleanup_decl || TREE_CODE (cleanup_decl) != FUNCTION_DECL) { error ("cleanup arg not a function"); *no_add_attrs = true; return NULL_TREE; } /* That the function has proper type is checked with the eventual call to build_function_call. */ return NULL_TREE; } /* Handle a "warn_unused_result" attribute. No special handling. */ static tree handle_warn_unused_result_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs) { /* Ignore the attribute for functions not returning any value. */ if (VOID_TYPE_P (TREE_TYPE (*node))) { warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name)); *no_add_attrs = true; } return NULL_TREE; } /* Check for valid arguments being passed to a function. */ void check_function_arguments (tree attrs, tree params) { /* Check for null being passed in a pointer argument that must be non-null. We also need to do this if format checking is enabled. */ if (warn_nonnull) check_function_nonnull (attrs, params); /* Check for errors in format strings. */ if (warn_format) check_function_format (attrs, params); } /* Generic argument checking recursion routine. PARAM is the argument to be checked. PARAM_NUM is the number of the argument. CALLBACK is invoked once the argument is resolved. CTX is context for the callback. */ void check_function_arguments_recurse (void (*callback) (void *, tree, unsigned HOST_WIDE_INT), void *ctx, tree param, unsigned HOST_WIDE_INT param_num) { if (TREE_CODE (param) == NOP_EXPR) { /* Strip coercion. */ check_function_arguments_recurse (callback, ctx, TREE_OPERAND (param, 0), param_num); return; } if (TREE_CODE (param) == CALL_EXPR) { tree type = TREE_TYPE (TREE_TYPE (TREE_OPERAND (param, 0))); tree attrs; bool found_format_arg = false; /* See if this is a call to a known internationalization function that modifies a format arg. Such a function may have multiple format_arg attributes (for example, ngettext). */ for (attrs = TYPE_ATTRIBUTES (type); attrs; attrs = TREE_CHAIN (attrs)) if (is_attribute_p ("format_arg", TREE_PURPOSE (attrs))) { tree inner_args; tree format_num_expr; int format_num; int i; /* Extract the argument number, which was previously checked to be valid. */ format_num_expr = TREE_VALUE (TREE_VALUE (attrs)); while (TREE_CODE (format_num_expr) == NOP_EXPR || TREE_CODE (format_num_expr) == CONVERT_EXPR || TREE_CODE (format_num_expr) == NON_LVALUE_EXPR) format_num_expr = TREE_OPERAND (format_num_expr, 0); if (TREE_CODE (format_num_expr) != INTEGER_CST || TREE_INT_CST_HIGH (format_num_expr) != 0) abort (); format_num = TREE_INT_CST_LOW (format_num_expr); for (inner_args = TREE_OPERAND (param, 1), i = 1; inner_args != 0; inner_args = TREE_CHAIN (inner_args), i++) if (i == format_num) { check_function_arguments_recurse (callback, ctx, TREE_VALUE (inner_args), param_num); found_format_arg = true; break; } } /* If we found a format_arg attribute and did a recursive check, we are done with checking this argument. Otherwise, we continue and this will be considered a non-literal. */ if (found_format_arg) return; } if (TREE_CODE (param) == COND_EXPR) { /* Check both halves of the conditional expression. */ check_function_arguments_recurse (callback, ctx, TREE_OPERAND (param, 1), param_num); check_function_arguments_recurse (callback, ctx, TREE_OPERAND (param, 2), param_num); return; } (*callback) (ctx, param, param_num); } /* Function to help qsort sort FIELD_DECLs by name order. */ int field_decl_cmp (const void *x_p, const void *y_p) { const tree *const x = x_p; const tree *const y = y_p; if (DECL_NAME (*x) == DECL_NAME (*y)) /* A nontype is "greater" than a type. */ return (TREE_CODE (*y) == TYPE_DECL) - (TREE_CODE (*x) == TYPE_DECL); if (DECL_NAME (*x) == NULL_TREE) return -1; if (DECL_NAME (*y) == NULL_TREE) return 1; if (DECL_NAME (*x) < DECL_NAME (*y)) return -1; return 1; } static struct { gt_pointer_operator new_value; void *cookie; } resort_data; /* This routine compares two fields like field_decl_cmp but using the pointer operator in resort_data. */ static int resort_field_decl_cmp (const void *x_p, const void *y_p) { const tree *const x = x_p; const tree *const y = y_p; if (DECL_NAME (*x) == DECL_NAME (*y)) /* A nontype is "greater" than a type. */ return (TREE_CODE (*y) == TYPE_DECL) - (TREE_CODE (*x) == TYPE_DECL); if (DECL_NAME (*x) == NULL_TREE) return -1; if (DECL_NAME (*y) == NULL_TREE) return 1; { tree d1 = DECL_NAME (*x); tree d2 = DECL_NAME (*y); resort_data.new_value (&d1, resort_data.cookie); resort_data.new_value (&d2, resort_data.cookie); if (d1 < d2) return -1; } return 1; } /* Resort DECL_SORTED_FIELDS because pointers have been reordered. */ void resort_sorted_fields (void *obj, void *orig_obj ATTRIBUTE_UNUSED , gt_pointer_operator new_value, void *cookie) { struct sorted_fields_type *sf = obj; resort_data.new_value = new_value; resort_data.cookie = cookie; qsort (&sf->elts[0], sf->len, sizeof (tree), resort_field_decl_cmp); } /* Issue the error given by MSGID, indicating that it occurred before TOKEN, which had the associated VALUE. */ void c_parse_error (const char *msgid, enum cpp_ttype token, tree value) { const char *string = _(msgid); if (token == CPP_EOF) error ("%s at end of input", string); else if (token == CPP_CHAR || token == CPP_WCHAR) { unsigned int val = TREE_INT_CST_LOW (value); const char *const ell = (token == CPP_CHAR) ? "" : "L"; if (val <= UCHAR_MAX && ISGRAPH (val)) error ("%s before %s'%c'", string, ell, val); else error ("%s before %s'\\x%x'", string, ell, val); } else if (token == CPP_STRING || token == CPP_WSTRING) error ("%s before string constant", string); else if (token == CPP_NUMBER) error ("%s before numeric constant", string); else if (token == CPP_NAME) error ("%s before \"%s\"", string, IDENTIFIER_POINTER (value)); else if (token < N_TTYPES) error ("%s before '%s' token", string, cpp_type2name (token)); else error ("%s", string); } /* Walk a gimplified function and warn for functions whose return value is ignored and attribute((warn_unused_result)) is set. This is done before inlining, so we don't have to worry about that. */ void c_warn_unused_result (tree *top_p) { tree t = *top_p; tree_stmt_iterator i; tree fdecl, ftype; switch (TREE_CODE (t)) { case STATEMENT_LIST: for (i = tsi_start (*top_p); !tsi_end_p (i); tsi_next (&i)) c_warn_unused_result (tsi_stmt_ptr (i)); break; case COND_EXPR: c_warn_unused_result (&COND_EXPR_THEN (t)); c_warn_unused_result (&COND_EXPR_ELSE (t)); break; case BIND_EXPR: c_warn_unused_result (&BIND_EXPR_BODY (t)); break; case TRY_FINALLY_EXPR: case TRY_CATCH_EXPR: c_warn_unused_result (&TREE_OPERAND (t, 0)); c_warn_unused_result (&TREE_OPERAND (t, 1)); break; case CATCH_EXPR: c_warn_unused_result (&CATCH_BODY (t)); break; case EH_FILTER_EXPR: c_warn_unused_result (&EH_FILTER_FAILURE (t)); break; case CALL_EXPR: /* This is a naked call, as opposed to a CALL_EXPR nested inside a MODIFY_EXPR. All calls whose value is ignored should be represented like this. Look for the attribute. */ fdecl = get_callee_fndecl (t); if (fdecl) ftype = TREE_TYPE (fdecl); else { ftype = TREE_TYPE (TREE_OPERAND (t, 0)); /* Look past pointer-to-function to the function type itself. */ ftype = TREE_TYPE (ftype); } if (lookup_attribute ("warn_unused_result", TYPE_ATTRIBUTES (ftype))) { if (fdecl) warning ("%Hignoring return value of `%D', " "declared with attribute warn_unused_result", EXPR_LOCUS (t), fdecl); else warning ("%Hignoring return value of function " "declared with attribute warn_unused_result", EXPR_LOCUS (t)); } break; default: /* Not a container, not a call, or a call whose value is used. */ break; } } /* Type information for c-common.c. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ void gt_ggc_mx_sorted_fields_type (void *x_p) { struct sorted_fields_type * const x = (struct sorted_fields_type *)x_p; if (ggc_test_and_set_mark (x)) { { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).len); i0++) { gt_ggc_m_9tree_node ((*x).elts[i0]); } } } } void gt_pch_nx_sorted_fields_type (void *x_p) { struct sorted_fields_type * const x = (struct sorted_fields_type *)x_p; if (gt_pch_note_object (x, x, gt_pch_p_18sorted_fields_type)) { { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).len); i0++) { gt_pch_n_9tree_node ((*x).elts[i0]); } } } } void gt_pch_p_18sorted_fields_type (void *this_obj ATTRIBUTE_UNUSED, void *x_p, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { struct sorted_fields_type * const x ATTRIBUTE_UNUSED = (struct sorted_fields_type *)x_p; { size_t i0; for (i0 = 0; i0 < (size_t)(((*x)).len); i0++) { if ((void *)(x) == this_obj) op (&((*x).elts[i0]), cookie); } } } /* GC roots. */ static void gt_ggc_ma_ridpointers (void *); static void gt_ggc_ma_ridpointers (void *x_p ATTRIBUTE_UNUSED) { if (ridpointers != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)((int)RID_MAX); i0++) { gt_ggc_m_9tree_node (ridpointers[i0]); } ggc_mark (ridpointers); } } static void gt_pch_pa_ridpointers (void *, void *, gt_pointer_operator, void *); static void gt_pch_pa_ridpointers (void *this_obj ATTRIBUTE_UNUSED, void *x_p ATTRIBUTE_UNUSED, gt_pointer_operator op ATTRIBUTE_UNUSED, void *cookie ATTRIBUTE_UNUSED) { if (ridpointers != NULL) { size_t i0; for (i0 = 0; i0 < (size_t)((int)RID_MAX); i0++) { if ((void *)(ridpointers) == this_obj) op (&(ridpointers[i0]), cookie); } if ((void *)(&ridpointers) == this_obj) op (&(ridpointers), cookie); } } static void gt_pch_na_ridpointers (void *); static void gt_pch_na_ridpointers (void *x_p ATTRIBUTE_UNUSED) { if (ridpointers != NULL) { size_t i1; for (i1 = 0; i1 < (size_t)((int)RID_MAX); i1++) { gt_pch_n_9tree_node (ridpointers[i1]); } gt_pch_note_object (ridpointers, &ridpointers, gt_pch_pa_ridpointers); } } const struct ggc_root_tab gt_ggc_r_gt_c_common_h[] = { { &built_in_attributes[0], 1 * ((int) ATTR_LAST), sizeof (built_in_attributes[0]), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { ®istered_builtin_types, 1, sizeof (registered_builtin_types), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &c_global_trees[0], 1 * (C_TREE_IDX_MAX), sizeof (c_global_trees[0]), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &ridpointers, 1, sizeof (ridpointers), >_ggc_ma_ridpointers, >_pch_na_ridpointers }, LAST_GGC_ROOT_TAB }; const struct ggc_root_tab gt_pch_rs_gt_c_common_h[] = { { &pending_lang_change, 1, sizeof (pending_lang_change), NULL, NULL }, LAST_GGC_ROOT_TAB }; /* C/ObjC/C++ command line option handling. Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Neil Booth. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* CPP Library. Copyright (C) 1986, 1987, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2003 Free Software Foundation, Inc. Contributed by Per Bothner, 1994-95. Based on CCCP program by Paul Rubin, June 1986 Adapted to ANSI C, Richard Stallman, Jan 1987 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_CPPDEFAULT_H #define GCC_CPPDEFAULT_H /* This is the default list of directories to search for include files. It may be overridden by the various -I and -ixxx options. #include "file" looks in the same directory as the current file, then this list. #include just looks in this list. All these directories are treated as `system' include directories (they are not subject to pedantic warnings in some cases). */ struct default_include { const char *const fname; /* The name of the directory. */ const char *const component; /* The component containing the directory (see update_path in prefix.c) */ const char cplusplus; /* Only look here if we're compiling C++. */ const char cxx_aware; /* Includes in this directory don't need to be wrapped in extern "C" when compiling C++. */ const char add_sysroot; /* FNAME should be prefixed by cpp_SYSROOT. */ }; extern const struct default_include cpp_include_defaults[]; extern const char cpp_GCC_INCLUDE_DIR[]; extern const size_t cpp_GCC_INCLUDE_DIR_len; extern const char *cpp_SYSROOT; #endif /* ! GCC_CPPDEFAULT_H */ /* Set up combined include path for the preprocessor. Copyright (C) 2003, 2004 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_C_INCPATH #define GCC_C_INCPATH extern void split_quote_chain (void); extern void add_path (char *, int, int, bool); extern void register_include_chains (cpp_reader *, const char *, const char *, int, int, int); extern void add_cpp_dir_path (struct cpp_dir *, int); struct target_c_incpath_s { /* Do extra includes processing. STDINC is false iff -nostdinc was given. */ void (*extra_includes) (int); }; extern struct target_c_incpath_s target_c_incpath; #define C_INCPATH_INIT { TARGET_EXTRA_INCLUDES } enum { QUOTE = 0, BRACKET, SYSTEM, AFTER }; #endif /* GCC_C_INCPATH */ #ifndef DOLLARS_IN_IDENTIFIERS # define DOLLARS_IN_IDENTIFIERS true #endif #ifndef TARGET_SYSTEM_ROOT # define TARGET_SYSTEM_ROOT NULL #endif #ifndef TARGET_OPTF #define TARGET_OPTF(ARG) #endif /* CPP's options. */ static cpp_options *cpp_opts; /* Input filename. */ static const char *this_input_filename; /* Filename and stream for preprocessed output. */ static const char *out_fname; static FILE *out_stream; /* Append dependencies to deps_file. */ static bool deps_append; /* If dependency switches (-MF etc.) have been given. */ static bool deps_seen; /* If -v seen. */ static bool verbose; /* Dependency output file. */ static const char *deps_file; /* The prefix given by -iprefix, if any. */ static const char *iprefix; /* The system root, if any. Overridden by -isysroot. */ static const char *sysroot = TARGET_SYSTEM_ROOT; /* Zero disables all standard directories for headers. */ static bool std_inc = true; /* Zero disables the C++-specific standard directories for headers. */ static bool std_cxx_inc = true; /* If the quote chain has been split by -I-. */ static bool quote_chain_split; /* If -Wunused-macros. */ static bool warn_unused_macros; /* If -Wvariadic-macros. */ static bool warn_variadic_macros = true; /* Number of deferred options. */ static size_t deferred_count; /* Number of deferred options scanned for -include. */ static size_t include_cursor; /* Permit Fortran front-end options. */ static bool permit_fortran_options; static void set_Wimplicit (int); static void handle_OPT_d (const char *); static void set_std_cxx98 (int); static void set_std_c89 (int, int); static void set_std_c99 (int); static void check_deps_environment_vars (void); static void handle_deferred_opts (void); static void sanitize_cpp_opts (void); static void add_prefixed_path (const char *, size_t); static void push_command_line_include (void); static void cb_file_change (cpp_reader *, const struct line_map *); static void cb_dir_change (cpp_reader *, const char *); static void finish_options (void); #ifndef STDC_0_IN_SYSTEM_HEADERS #define STDC_0_IN_SYSTEM_HEADERS 0 #endif /* Holds switches parsed by c_common_handle_option (), but whose handling is deferred to c_common_post_options (). */ static void defer_opt (enum opt_code, const char *); static struct deferred_opt { enum opt_code code; const char *arg; } *deferred_opts; /* Complain that switch CODE expects an argument but none was provided. OPT was the command-line option. Return FALSE to get the default message in opts.c, TRUE if we provide a specialized one. */ bool c_common_missing_argument (const char *opt, size_t code) { switch (code) { default: /* Pick up the default message. */ return false; case OPT_fconstant_string_class_: error ("no class name specified with \"%s\"", opt); break; case OPT_A: error ("assertion missing after \"%s\"", opt); break; case OPT_D: case OPT_U: error ("macro name missing after \"%s\"", opt); break; case OPT_F: case OPT_I: case OPT_idirafter: case OPT_isysroot: case OPT_isystem: case OPT_iquote: error ("missing path after \"%s\"", opt); break; case OPT_MF: case OPT_MD: case OPT_MMD: case OPT_include: case OPT_imacros: case OPT_o: error ("missing filename after \"%s\"", opt); break; case OPT_MQ: case OPT_MT: error ("missing makefile target after \"%s\"", opt); break; } return true; } /* Defer option CODE with argument ARG. */ static void defer_opt (enum opt_code code, const char *arg) { deferred_opts[deferred_count].code = code; deferred_opts[deferred_count].arg = arg; deferred_count++; } /* Common initialization before parsing options. */ unsigned int c_common_init_options (unsigned int argc, const char **argv ATTRIBUTE_UNUSED) { static const unsigned int lang_flags[] = {CL_C, CL_ObjC, CL_CXX, CL_ObjCXX}; unsigned int result; /* This is conditionalized only because that is the way the front ends used to do it. Maybe this should be unconditional? */ if (c_dialect_cxx ()) { /* By default wrap lines at 80 characters. Is getenv ("COLUMNS") preferable? */ diagnostic_line_cutoff (global_dc) = 80; /* By default, emit location information once for every diagnostic message. */ diagnostic_prefixing_rule (global_dc) = DIAGNOSTICS_SHOW_PREFIX_ONCE; } parse_in = cpp_create_reader (c_dialect_cxx () ? CLK_GNUCXX: CLK_GNUC89, ident_hash, &line_table); cpp_opts = cpp_get_options (parse_in); cpp_opts->dollars_in_ident = DOLLARS_IN_IDENTIFIERS; cpp_opts->objc = c_dialect_objc (); /* Reset to avoid warnings on internal definitions. We set it just before passing on command-line options to cpplib. */ cpp_opts->warn_dollars = 0; flag_const_strings = c_dialect_cxx (); flag_exceptions = c_dialect_cxx (); warn_pointer_arith = c_dialect_cxx (); deferred_opts = xmalloc (argc * sizeof (struct deferred_opt)); result = lang_flags[c_language]; /* If potentially preprocessing Fortran we have to accept its front end options since the driver passes most of them through. */ #ifdef CL_F77 if (c_language == clk_c && argc > 2 && !strcmp (argv[2], "-traditional-cpp" )) { permit_fortran_options = true; result |= CL_F77; } #endif return result; } /* Handle switch SCODE with argument ARG. VALUE is true, unless no- form of an -f or -W option was given. Returns 0 if the switch was invalid, a negative number to prevent language-independent processing in toplev.c (a hack necessary for the short-term). */ int c_common_handle_option (size_t scode, const char *arg, int value) { const struct cl_option *option = &cl_options[scode]; enum opt_code code = (enum opt_code) scode; int result = 1; switch (code) { default: if (cl_options[code].flags & (CL_C | CL_CXX | CL_ObjC | CL_ObjCXX)) break; result = permit_fortran_options; break; case OPT__output_pch_: pch_file = arg; break; case OPT_A: defer_opt (code, arg); break; case OPT_C: cpp_opts->discard_comments = 0; break; case OPT_CC: cpp_opts->discard_comments = 0; cpp_opts->discard_comments_in_macro_exp = 0; break; case OPT_D: defer_opt (code, arg); break; case OPT_E: flag_preprocess_only = 1; break; case OPT_H: cpp_opts->print_include_names = 1; break; case OPT_F: TARGET_OPTF (xstrdup (arg)); break; case OPT_I: if (strcmp (arg, "-")) add_path (xstrdup (arg), BRACKET, 0, true); else { if (quote_chain_split) error ("-I- specified twice"); quote_chain_split = true; split_quote_chain (); inform ("obsolete option -I- used, please use -iquote instead"); } break; case OPT_M: case OPT_MM: /* When doing dependencies with -M or -MM, suppress normal preprocessed output, but still do -dM etc. as software depends on this. Preprocessed output does occur if -MD, -MMD or environment var dependency generation is used. */ cpp_opts->deps.style = (code == OPT_M ? DEPS_SYSTEM: DEPS_USER); flag_no_output = 1; cpp_opts->inhibit_warnings = 1; break; case OPT_MD: case OPT_MMD: cpp_opts->deps.style = (code == OPT_MD ? DEPS_SYSTEM: DEPS_USER); deps_file = arg; break; case OPT_MF: deps_seen = true; deps_file = arg; break; case OPT_MG: deps_seen = true; cpp_opts->deps.missing_files = true; break; case OPT_MP: deps_seen = true; cpp_opts->deps.phony_targets = true; break; case OPT_MQ: case OPT_MT: deps_seen = true; defer_opt (code, arg); break; case OPT_P: flag_no_line_commands = 1; break; case OPT_fworking_directory: flag_working_directory = value; break; case OPT_U: defer_opt (code, arg); break; case OPT_Wall: set_Wunused (value); set_Wformat (value); set_Wimplicit (value); warn_char_subscripts = value; warn_missing_braces = value; warn_parentheses = value; warn_return_type = value; warn_sequence_point = value; /* Was C only. */ if (c_dialect_cxx ()) warn_sign_compare = value; warn_switch = value; warn_strict_aliasing = value; /* Only warn about unknown pragmas that are not in system headers. */ warn_unknown_pragmas = value; /* We save the value of warn_uninitialized, since if they put -Wuninitialized on the command line, we need to generate a warning about not using it without also specifying -O. */ if (warn_uninitialized != 1) warn_uninitialized = (value ? 2 : 0); if (!c_dialect_cxx ()) /* We set this to 2 here, but 1 in -Wmain, so -ffreestanding can turn it off only if it's not explicit. */ warn_main = value * 2; else { /* C++-specific warnings. */ warn_nonvdtor = value; warn_reorder = value; warn_nontemplate_friend = value; } cpp_opts->warn_trigraphs = value; cpp_opts->warn_comments = value; cpp_opts->warn_num_sign_change = value; cpp_opts->warn_multichar = value; /* Was C++ only. */ break; case OPT_Wcomment: case OPT_Wcomments: cpp_opts->warn_comments = value; break; case OPT_Wdeprecated: cpp_opts->warn_deprecated = value; break; case OPT_Wdiv_by_zero: warn_div_by_zero = value; break; case OPT_Wendif_labels: cpp_opts->warn_endif_labels = value; break; case OPT_Werror: cpp_opts->warnings_are_errors = value; break; case OPT_Werror_implicit_function_declaration: mesg_implicit_function_declaration = 2; break; case OPT_Wformat: set_Wformat (value); break; case OPT_Wformat_: set_Wformat (atoi (arg)); break; case OPT_Wimplicit: set_Wimplicit (value); break; case OPT_Wimport: /* Silently ignore for now. */ break; case OPT_Winvalid_pch: cpp_opts->warn_invalid_pch = value; break; case OPT_Wmain: if (value) warn_main = 1; else warn_main = -1; break; case OPT_Wmissing_include_dirs: cpp_opts->warn_missing_include_dirs = value; break; case OPT_Wmultichar: cpp_opts->warn_multichar = value; break; case OPT_Wreturn_type: warn_return_type = value; break; case OPT_Wsystem_headers: cpp_opts->warn_system_headers = value; break; case OPT_Wtraditional: cpp_opts->warn_traditional = value; break; case OPT_Wtrigraphs: cpp_opts->warn_trigraphs = value; break; case OPT_Wundef: cpp_opts->warn_undef = value; break; case OPT_Wunknown_pragmas: /* Set to greater than 1, so that even unknown pragmas in system headers will be warned about. */ warn_unknown_pragmas = value * 2; break; case OPT_Wunused_macros: warn_unused_macros = value; break; case OPT_Wvariadic_macros: warn_variadic_macros = value; break; case OPT_Wwrite_strings: if (!c_dialect_cxx ()) flag_const_strings = value; else warn_write_strings = value; break; case OPT_ansi: if (!c_dialect_cxx ()) set_std_c89 (false, true); else set_std_cxx98 (true); break; case OPT_d: handle_OPT_d (arg); break; case OPT_fcond_mismatch: if (!c_dialect_cxx ()) { flag_cond_mismatch = value; break; } /* Fall through. */ case OPT_fall_virtual: case OPT_falt_external_templates: case OPT_fenum_int_equiv: case OPT_fexternal_templates: case OPT_fguiding_decls: case OPT_fhonor_std: case OPT_fhuge_objects: case OPT_flabels_ok: case OPT_fname_mangling_version_: case OPT_fnew_abi: case OPT_fnonnull_objects: case OPT_fsquangle: case OPT_fstrict_prototype: case OPT_fthis_is_variable: case OPT_fvtable_thunks: case OPT_fxref: case OPT_fvtable_gc: warning ("switch \"%s\" is no longer supported", option->opt_text); break; case OPT_faccess_control: flag_access_control = value; break; case OPT_fasm: flag_no_asm = !value; break; case OPT_fbuiltin: flag_no_builtin = !value; break; case OPT_fbuiltin_: if (value) result = 0; else disable_builtin_function (arg); break; case OPT_fdollars_in_identifiers: cpp_opts->dollars_in_ident = value; break; case OPT_ffreestanding: value = !value; /* Fall through.... */ case OPT_fhosted: flag_hosted = value; flag_no_builtin = !value; /* warn_main will be 2 if set by -Wall, 1 if set by -Wmain */ if (!value && warn_main == 2) warn_main = 0; break; case OPT_fshort_double: flag_short_double = value; break; case OPT_fshort_enums: flag_short_enums = value; break; case OPT_fshort_wchar: flag_short_wchar = value; break; case OPT_fsigned_bitfields: flag_signed_bitfields = value; explicit_flag_signed_bitfields = 1; break; case OPT_fsigned_char: flag_signed_char = value; break; case OPT_funsigned_bitfields: flag_signed_bitfields = !value; explicit_flag_signed_bitfields = 1; break; case OPT_funsigned_char: flag_signed_char = !value; break; case OPT_fcheck_new: flag_check_new = value; break; case OPT_fconserve_space: flag_conserve_space = value; break; case OPT_fconst_strings: flag_const_strings = value; break; case OPT_fconstant_string_class_: constant_string_class_name = arg; break; case OPT_fdefault_inline: flag_default_inline = value; break; case OPT_felide_constructors: flag_elide_constructors = value; break; case OPT_fenforce_eh_specs: flag_enforce_eh_specs = value; break; case OPT_ffixed_form: case OPT_ffixed_line_length_: /* Fortran front end options ignored when preprocessing only. */ if (!flag_preprocess_only) result = 0; break; case OPT_ffor_scope: flag_new_for_scope = value; break; case OPT_fgnu_keywords: flag_no_gnu_keywords = !value; break; case OPT_fgnu_runtime: flag_next_runtime = !value; break; case OPT_fhandle_exceptions: warning ("-fhandle-exceptions has been renamed -fexceptions (and is now on by default)"); flag_exceptions = value; break; case OPT_fimplement_inlines: flag_implement_inlines = value; break; case OPT_fimplicit_inline_templates: flag_implicit_inline_templates = value; break; case OPT_fimplicit_templates: flag_implicit_templates = value; break; case OPT_fms_extensions: flag_ms_extensions = value; break; case OPT_fnext_runtime: flag_next_runtime = value; break; case OPT_fnil_receivers: flag_nil_receivers = value; break; case OPT_fnonansi_builtins: flag_no_nonansi_builtin = !value; break; case OPT_fobjc_exceptions: flag_objc_exceptions = value; break; case OPT_fobjc_sjlj_exceptions: flag_objc_sjlj_exceptions = value; break; case OPT_foperator_names: cpp_opts->operator_names = value; break; case OPT_foptional_diags: flag_optional_diags = value; break; case OPT_fpch_deps: cpp_opts->restore_pch_deps = value; break; case OPT_fpch_preprocess: flag_pch_preprocess = value; break; case OPT_fpermissive: flag_permissive = value; break; case OPT_fpreprocessed: cpp_opts->preprocessed = value; break; case OPT_freplace_objc_classes: flag_replace_objc_classes = value; break; case OPT_frepo: flag_use_repository = value; if (value) flag_implicit_templates = 0; break; case OPT_frtti: flag_rtti = value; break; case OPT_fshow_column: cpp_opts->show_column = value; break; case OPT_fstats: flag_detailed_statistics = value; break; case OPT_ftabstop_: /* It is documented that we silently ignore silly values. */ if (value >= 1 && value <= 100) cpp_opts->tabstop = value; break; case OPT_fexec_charset_: cpp_opts->narrow_charset = arg; break; case OPT_fwide_exec_charset_: cpp_opts->wide_charset = arg; break; case OPT_finput_charset_: cpp_opts->input_charset = arg; break; case OPT_ftemplate_depth_: max_tinst_depth = value; break; case OPT_fuse_cxa_atexit: flag_use_cxa_atexit = value; break; case OPT_fweak: flag_weak = value; break; case OPT_fzero_link: flag_zero_link = value; break; case OPT_gen_decls: flag_gen_declaration = 1; break; case OPT_idirafter: add_path (xstrdup (arg), AFTER, 0, true); break; case OPT_imacros: case OPT_include: defer_opt (code, arg); break; case OPT_iprefix: iprefix = arg; break; case OPT_iquote: add_path (xstrdup (arg), QUOTE, 0, true); break; case OPT_isysroot: sysroot = arg; break; case OPT_isystem: add_path (xstrdup (arg), SYSTEM, 0, true); break; case OPT_iwithprefix: add_prefixed_path (arg, SYSTEM); break; case OPT_iwithprefixbefore: add_prefixed_path (arg, BRACKET); break; case OPT_lang_asm: cpp_set_lang (parse_in, CLK_ASM); cpp_opts->dollars_in_ident = false; break; case OPT_lang_objc: cpp_opts->objc = 1; break; case OPT_nostdinc: std_inc = false; break; case OPT_nostdinc__: std_cxx_inc = false; break; case OPT_o: if (!out_fname) out_fname = arg; else error ("output filename specified twice"); break; /* We need to handle the -pedantic switches here, rather than in c_common_post_options, so that a subsequent -Wno-endif-labels is not overridden. */ case OPT_pedantic_errors: cpp_opts->pedantic_errors = 1; /* Fall through. */ case OPT_pedantic: cpp_opts->pedantic = 1; cpp_opts->warn_endif_labels = 1; break; case OPT_print_objc_runtime_info: print_struct_values = 1; break; case OPT_remap: cpp_opts->remap = 1; break; case OPT_std_c__98: case OPT_std_gnu__98: set_std_cxx98 (code == OPT_std_c__98 /* ISO */); break; case OPT_std_c89: case OPT_std_iso9899_1990: case OPT_std_iso9899_199409: set_std_c89 (code == OPT_std_iso9899_199409 /* c94 */, true /* ISO */); break; case OPT_std_gnu89: set_std_c89 (false /* c94 */, false /* ISO */); break; case OPT_std_c99: case OPT_std_c9x: case OPT_std_iso9899_1999: case OPT_std_iso9899_199x: set_std_c99 (true /* ISO */); break; case OPT_std_gnu99: case OPT_std_gnu9x: set_std_c99 (false /* ISO */); break; case OPT_trigraphs: cpp_opts->trigraphs = 1; break; case OPT_traditional_cpp: cpp_opts->traditional = 1; break; case OPT_undef: flag_undef = 1; break; case OPT_w: cpp_opts->inhibit_warnings = 1; break; case OPT_v: verbose = true; break; } return result; } /* Post-switch processing. */ bool c_common_post_options (const char **pfilename) { struct cpp_callbacks *cb; /* Canonicalize the input and output filenames. */ if (in_fnames == NULL) { in_fnames = xmalloc (sizeof (in_fnames[0])); in_fnames[0] = ""; } else if (strcmp (in_fnames[0], "-") == 0) in_fnames[0] = ""; if (out_fname == NULL || !strcmp (out_fname, "-")) out_fname = ""; if (cpp_opts->deps.style == DEPS_NONE) check_deps_environment_vars (); handle_deferred_opts (); sanitize_cpp_opts (); register_include_chains (parse_in, sysroot, iprefix, std_inc, std_cxx_inc && c_dialect_cxx (), verbose); flag_inline_trees = 1; /* Use tree inlining. */ if (!flag_no_inline) flag_no_inline = 1; if (flag_inline_functions) { flag_inline_trees = 2; flag_inline_functions = 0; } /* Default to ObjC sjlj exception handling if NeXT runtime. */ if (flag_objc_sjlj_exceptions < 0) flag_objc_sjlj_exceptions = flag_next_runtime; if (flag_objc_exceptions && !flag_objc_sjlj_exceptions) flag_exceptions = 1; /* -Wextra implies -Wsign-compare, but not if explicitly overridden. */ if (warn_sign_compare == -1) warn_sign_compare = extra_warnings; /* Special format checking options don't work without -Wformat; warn if they are used. */ if (warn_format_y2k && !warn_format) warning ("-Wformat-y2k ignored without -Wformat"); if (warn_format_extra_args && !warn_format) warning ("-Wformat-extra-args ignored without -Wformat"); if (warn_format_zero_length && !warn_format) warning ("-Wformat-zero-length ignored without -Wformat"); if (warn_format_nonliteral && !warn_format) warning ("-Wformat-nonliteral ignored without -Wformat"); if (warn_format_security && !warn_format) warning ("-Wformat-security ignored without -Wformat"); if (warn_missing_format_attribute && !warn_format) warning ("-Wmissing-format-attribute ignored without -Wformat"); if (flag_preprocess_only) { /* Open the output now. We must do so even if flag_no_output is on, because there may be other output than from the actual preprocessing (e.g. from -dM). */ if (out_fname[0] == '\0') out_stream = stdout; else out_stream = fopen (out_fname, "w"); if (out_stream == NULL) { fatal_error ("opening output file %s: %m", out_fname); return false; } if (num_in_fnames > 1) error ("too many filenames given. Type %s --help for usage", progname); init_pp_output (out_stream); } else { init_c_lex (); /* Yuk. WTF is this? I do know ObjC relies on it somewhere. */ input_location = UNKNOWN_LOCATION; } cb = cpp_get_callbacks (parse_in); cb->file_change = cb_file_change; cb->dir_change = cb_dir_change; cpp_post_options (parse_in); input_location = UNKNOWN_LOCATION; /* If an error has occurred in cpplib, note it so we fail immediately. */ errorcount += cpp_errors (parse_in); *pfilename = this_input_filename = cpp_read_main_file (parse_in, in_fnames[0]); /* Don't do any compilation or preprocessing if there is no input file. */ if (this_input_filename == NULL) { errorcount++; return false; } if (flag_working_directory && flag_preprocess_only && ! flag_no_line_commands) pp_dir_change (parse_in, get_src_pwd ()); return flag_preprocess_only; } /* Front end initialization common to C, ObjC and C++. */ bool c_common_init (void) { /* Set up preprocessor arithmetic. Must be done after call to c_common_nodes_and_builtins for type nodes to be good. */ cpp_opts->precision = TYPE_PRECISION (intmax_type_node); cpp_opts->char_precision = TYPE_PRECISION (char_type_node); cpp_opts->int_precision = TYPE_PRECISION (integer_type_node); cpp_opts->wchar_precision = TYPE_PRECISION (wchar_type_node); cpp_opts->unsigned_wchar = TYPE_UNSIGNED (wchar_type_node); cpp_opts->bytes_big_endian = BYTES_BIG_ENDIAN; /* This can't happen until after wchar_precision and bytes_big_endian are known. */ cpp_init_iconv (parse_in); if (flag_preprocess_only) { finish_options (); preprocess_file (parse_in); return false; } /* Has to wait until now so that cpplib has its hash table. */ init_pragma (); return true; } /* Initialize the integrated preprocessor after debug output has been initialized; loop over each input file. */ void c_common_parse_file (int set_yydebug) { #if YYDEBUG != 0 yydebug = set_yydebug; #else if (set_yydebug) warning ("YYDEBUG not defined"); #endif if (num_in_fnames > 1) fatal_error ("sorry, inter-module analysis temporarily out of commission"); finish_options (); pch_init (); push_file_scope (); c_parse_file (); finish_file (); pop_file_scope (); } /* Common finish hook for the C, ObjC and C++ front ends. */ void c_common_finish (void) { FILE *deps_stream = NULL; if (cpp_opts->deps.style != DEPS_NONE) { /* If -M or -MM was seen without -MF, default output to the output stream. */ if (!deps_file) deps_stream = out_stream; else { deps_stream = fopen (deps_file, deps_append ? "a": "w"); if (!deps_stream) fatal_error ("opening dependency file %s: %m", deps_file); } } /* For performance, avoid tearing down cpplib's internal structures with cpp_destroy (). */ errorcount += cpp_finish (parse_in, deps_stream); if (deps_stream && deps_stream != out_stream && (ferror (deps_stream) || fclose (deps_stream))) fatal_error ("closing dependency file %s: %m", deps_file); if (out_stream && (ferror (out_stream) || fclose (out_stream))) fatal_error ("when writing output to %s: %m", out_fname); } /* Either of two environment variables can specify output of dependencies. Their value is either "OUTPUT_FILE" or "OUTPUT_FILE DEPS_TARGET", where OUTPUT_FILE is the file to write deps info to and DEPS_TARGET is the target to mention in the deps. They also result in dependency information being appended to the output file rather than overwriting it, and like Sun's compiler SUNPRO_DEPENDENCIES suppresses the dependency on the main file. */ static void check_deps_environment_vars (void) { char *spec; GET_ENVIRONMENT (spec, "DEPENDENCIES_OUTPUT"); if (spec) cpp_opts->deps.style = DEPS_USER; else { GET_ENVIRONMENT (spec, "SUNPRO_DEPENDENCIES"); if (spec) { cpp_opts->deps.style = DEPS_SYSTEM; cpp_opts->deps.ignore_main_file = true; } } if (spec) { /* Find the space before the DEPS_TARGET, if there is one. */ char *s = strchr (spec, ' '); if (s) { /* Let the caller perform MAKE quoting. */ defer_opt (OPT_MT, s + 1); *s = '\0'; } /* Command line -MF overrides environment variables and default. */ if (!deps_file) deps_file = spec; deps_append = 1; } } /* Handle deferred command line switches. */ static void handle_deferred_opts (void) { size_t i; struct depends *deps; /* Avoid allocating the deps buffer if we don't need it. (This flag may be true without there having been -MT or -MQ options, but we'll still need the deps buffer.) */ if (!deps_seen) return; deps = cpp_get_deps (parse_in); for (i = 0; i < deferred_count; i++) { struct deferred_opt *opt = &deferred_opts[i]; if (opt->code == OPT_MT || opt->code == OPT_MQ) deps_add_target (deps, opt->arg, opt->code == OPT_MQ); } } /* These settings are appropriate for GCC, but not necessarily so for cpplib as a library. */ static void sanitize_cpp_opts (void) { /* If we don't know what style of dependencies to output, complain if any other dependency switches have been given. */ if (deps_seen && cpp_opts->deps.style == DEPS_NONE) error ("to generate dependencies you must specify either -M or -MM"); /* -dM and dependencies suppress normal output; do it here so that the last -d[MDN] switch overrides earlier ones. */ if (flag_dump_macros == 'M') flag_no_output = 1; /* Disable -dD, -dN and -dI if normal output is suppressed. Allow -dM since at least glibc relies on -M -dM to work. */ if (flag_no_output) { if (flag_dump_macros != 'M') flag_dump_macros = 0; flag_dump_includes = 0; } cpp_opts->unsigned_char = !flag_signed_char; cpp_opts->stdc_0_in_system_headers = STDC_0_IN_SYSTEM_HEADERS; /* We want -Wno-long-long to override -pedantic -std=non-c99 and/or -Wtraditional, whatever the ordering. */ cpp_opts->warn_long_long = warn_long_long && ((!flag_isoc99 && pedantic) || warn_traditional); /* Similarly with -Wno-variadic-macros. No check for c99 here, since this also turns off warnings about GCCs extension. */ cpp_opts->warn_variadic_macros = warn_variadic_macros && (pedantic || warn_traditional); /* If we're generating preprocessor output, emit current directory if explicitly requested or if debugging information is enabled. ??? Maybe we should only do it for debugging formats that actually output the current directory? */ if (flag_working_directory == -1) flag_working_directory = (debug_info_level != DINFO_LEVEL_NONE); } /* Add include path with a prefix at the front of its name. */ static void add_prefixed_path (const char *suffix, size_t chain) { char *path; const char *prefix; size_t prefix_len, suffix_len; suffix_len = strlen (suffix); prefix = iprefix ? iprefix : cpp_GCC_INCLUDE_DIR; prefix_len = iprefix ? strlen (iprefix) : cpp_GCC_INCLUDE_DIR_len; path = xmalloc (prefix_len + suffix_len + 1); memcpy (path, prefix, prefix_len); memcpy (path + prefix_len, suffix, suffix_len); path[prefix_len + suffix_len] = '\0'; add_path (path, chain, 0, false); } /* Handle -D, -U, -A, -imacros, and the first -include. */ static void finish_options (void) { if (!cpp_opts->preprocessed) { size_t i; cpp_change_file (parse_in, LC_RENAME, _("")); cpp_init_builtins (parse_in, flag_hosted); c_cpp_builtins (parse_in); /* We're about to send user input to cpplib, so make it warn for things that we previously (when we sent it internal definitions) told it to not warn. C99 permits implementation-defined characters in identifiers. The documented meaning of -std= is to turn off extensions that conflict with the specified standard, and since a strictly conforming program cannot contain a '$', we do not condition their acceptance on the -std= setting. */ cpp_opts->warn_dollars = (cpp_opts->pedantic && !cpp_opts->c99); cpp_change_file (parse_in, LC_RENAME, _("")); for (i = 0; i < deferred_count; i++) { struct deferred_opt *opt = &deferred_opts[i]; if (opt->code == OPT_D) cpp_define (parse_in, opt->arg); else if (opt->code == OPT_U) cpp_undef (parse_in, opt->arg); else if (opt->code == OPT_A) { if (opt->arg[0] == '-') cpp_unassert (parse_in, opt->arg + 1); else cpp_assert (parse_in, opt->arg); } } /* Handle -imacros after -D and -U. */ for (i = 0; i < deferred_count; i++) { struct deferred_opt *opt = &deferred_opts[i]; if (opt->code == OPT_imacros && cpp_push_include (parse_in, opt->arg)) { /* Disable push_command_line_include callback for now. */ include_cursor = deferred_count + 1; cpp_scan_nooutput (parse_in); } } } include_cursor = 0; push_command_line_include (); } /* Give CPP the next file given by -include, if any. */ static void push_command_line_include (void) { while (include_cursor < deferred_count) { struct deferred_opt *opt = &deferred_opts[include_cursor++]; if (! cpp_opts->preprocessed && opt->code == OPT_include && cpp_push_include (parse_in, opt->arg)) return; } if (include_cursor == deferred_count) { include_cursor++; /* -Wunused-macros should only warn about macros defined hereafter. */ cpp_opts->warn_unused_macros = warn_unused_macros; /* Restore the line map from . */ if (! cpp_opts->preprocessed) cpp_change_file (parse_in, LC_RENAME, main_input_filename); /* Set this here so the client can change the option if it wishes, and after stacking the main file so we don't trace the main file. */ line_table.trace_includes = cpp_opts->print_include_names; } } /* File change callback. Has to handle -include files. */ static void cb_file_change (cpp_reader *pfile ATTRIBUTE_UNUSED, const struct line_map *new_map) { if (flag_preprocess_only) pp_file_change (new_map); else fe_file_change (new_map); if (new_map == 0 || (new_map->reason == LC_LEAVE && MAIN_FILE_P (new_map))) push_command_line_include (); } void cb_dir_change (cpp_reader *pfile ATTRIBUTE_UNUSED, const char *dir) { if (! set_src_pwd (dir)) warning ("too late for # directive to set debug directory"); } /* Set the C 89 standard (with 1994 amendments if C94, without GNU extensions if ISO). There is no concept of gnu94. */ static void set_std_c89 (int c94, int iso) { cpp_set_lang (parse_in, c94 ? CLK_STDC94: iso ? CLK_STDC89: CLK_GNUC89); flag_iso = iso; flag_no_asm = iso; flag_no_gnu_keywords = iso; flag_no_nonansi_builtin = iso; flag_isoc94 = c94; flag_isoc99 = 0; } /* Set the C 99 standard (without GNU extensions if ISO). */ static void set_std_c99 (int iso) { cpp_set_lang (parse_in, iso ? CLK_STDC99: CLK_GNUC99); flag_no_asm = iso; flag_no_nonansi_builtin = iso; flag_iso = iso; flag_isoc99 = 1; flag_isoc94 = 1; } /* Set the C++ 98 standard (without GNU extensions if ISO). */ static void set_std_cxx98 (int iso) { cpp_set_lang (parse_in, iso ? CLK_CXX98: CLK_GNUCXX); flag_no_gnu_keywords = iso; flag_no_nonansi_builtin = iso; flag_iso = iso; } /* Handle setting implicit to ON. */ static void set_Wimplicit (int on) { warn_implicit = on; warn_implicit_int = on; if (on) { if (mesg_implicit_function_declaration != 2) mesg_implicit_function_declaration = 1; } else mesg_implicit_function_declaration = 0; } /* Args to -d specify what to dump. Silently ignore unrecognized options; they may be aimed at toplev.c. */ static void handle_OPT_d (const char *arg) { char c; while ((c = *arg++) != '\0') switch (c) { case 'M': /* Dump macros only. */ case 'N': /* Dump names. */ case 'D': /* Dump definitions. */ flag_dump_macros = c; break; case 'I': flag_dump_includes = 1; break; } } /* Check calls to formatted I/O functions (-Wformat). Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Set format warning options according to a -Wformat=n option. */ void set_Wformat (int setting) { warn_format = setting; warn_format_extra_args = setting; warn_format_zero_length = setting; if (setting != 1) { warn_format_nonliteral = setting; warn_format_security = setting; warn_format_y2k = setting; } /* Make sure not to disable -Wnonnull if -Wformat=0 is specified. */ if (setting) warn_nonnull = setting; } /* Handle attributes associated with format checking. */ /* This must be in the same order as format_types, with format_type_error last. */ enum format_type { printf_format_type, asm_fprintf_format_type, gcc_diag_format_type, gcc_cdiag_format_type, gcc_cxxdiag_format_type, scanf_format_type, strftime_format_type, strfmon_format_type, format_type_error }; typedef struct function_format_info { enum format_type format_type; /* type of format (printf, scanf, etc.) */ unsigned HOST_WIDE_INT format_num; /* number of format argument */ unsigned HOST_WIDE_INT first_arg_num; /* number of first arg (zero for varargs) */ } function_format_info; static bool decode_format_attr (tree, function_format_info *, int); static enum format_type decode_format_type (const char *); static bool check_format_string (tree argument, unsigned HOST_WIDE_INT format_num, int flags, bool *no_add_attrs); static bool get_constant (tree expr, unsigned HOST_WIDE_INT *value, int validated_p); /* Handle a "format_arg" attribute; arguments as in struct attribute_spec.handler. */ tree handle_format_arg_attribute (tree *node, tree name ATTRIBUTE_UNUSED, tree args, int flags, bool *no_add_attrs) { tree type = *node; tree format_num_expr = TREE_VALUE (args); unsigned HOST_WIDE_INT format_num = 0; tree argument; if (!get_constant (format_num_expr, &format_num, 0)) { error ("format string has invalid operand number"); *no_add_attrs = true; return NULL_TREE; } argument = TYPE_ARG_TYPES (type); if (argument) { if (!check_format_string (argument, format_num, flags, no_add_attrs)) return NULL_TREE; } if (TREE_CODE (TREE_TYPE (type)) != POINTER_TYPE || (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (type))) != char_type_node)) { if (!(flags & (int) ATTR_FLAG_BUILT_IN)) error ("function does not return string type"); *no_add_attrs = true; return NULL_TREE; } return NULL_TREE; } /* Verify that the format_num argument is actually a string, in case the format attribute is in error. */ static bool check_format_string (tree argument, unsigned HOST_WIDE_INT format_num, int flags, bool *no_add_attrs) { unsigned HOST_WIDE_INT i; for (i = 1; i != format_num; i++) { if (argument == 0) break; argument = TREE_CHAIN (argument); } if (!argument || TREE_CODE (TREE_VALUE (argument)) != POINTER_TYPE || (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_VALUE (argument))) != char_type_node)) { if (!(flags & (int) ATTR_FLAG_BUILT_IN)) error ("format string arg not a string type"); *no_add_attrs = true; return false; } return true; } /* Strip any conversions from the expression, verify it is a constant, and store its value. If validated_p is true, abort on errors. Returns true on success, false otherwise. */ static bool get_constant(tree expr, unsigned HOST_WIDE_INT *value, int validated_p) { while (TREE_CODE (expr) == NOP_EXPR || TREE_CODE (expr) == CONVERT_EXPR || TREE_CODE (expr) == NON_LVALUE_EXPR) expr = TREE_OPERAND (expr, 0); if (TREE_CODE (expr) != INTEGER_CST || TREE_INT_CST_HIGH (expr) != 0) { if (validated_p) abort (); return false; } *value = TREE_INT_CST_LOW (expr); return true; } /* Decode the arguments to a "format" attribute into a function_format_info structure. It is already known that the list is of the right length. If VALIDATED_P is true, then these attributes have already been validated and this function will abort if they are erroneous; if false, it will give an error message. Returns true if the attributes are successfully decoded, false otherwise. */ static bool decode_format_attr (tree args, function_format_info *info, int validated_p) { tree format_type_id = TREE_VALUE (args); tree format_num_expr = TREE_VALUE (TREE_CHAIN (args)); tree first_arg_num_expr = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (args))); if (TREE_CODE (format_type_id) != IDENTIFIER_NODE) { if (validated_p) abort (); error ("unrecognized format specifier"); return false; } else { const char *p = IDENTIFIER_POINTER (format_type_id); info->format_type = decode_format_type (p); if (info->format_type == format_type_error) { if (validated_p) abort (); warning ("%qs is an unrecognized format function type", p); return false; } } if (!get_constant (format_num_expr, &info->format_num, validated_p)) { error ("format string has invalid operand number"); return false; } if (!get_constant (first_arg_num_expr, &info->first_arg_num, validated_p)) { error ("'...' has invalid operand number"); return false; } if (info->first_arg_num != 0 && info->first_arg_num <= info->format_num) { if (validated_p) abort (); error ("format string arg follows the args to be formatted"); return false; } return true; } /* Check a call to a format function against a parameter list. */ /* The meaningfully distinct length modifiers for format checking recognized by GCC. */ enum format_lengths { FMT_LEN_none, FMT_LEN_hh, FMT_LEN_h, FMT_LEN_l, FMT_LEN_ll, FMT_LEN_L, FMT_LEN_z, FMT_LEN_t, FMT_LEN_j, FMT_LEN_MAX }; /* The standard versions in which various format features appeared. */ enum format_std_version { STD_C89, STD_C94, STD_C9L, /* C99, but treat as C89 if -Wno-long-long. */ STD_C99, STD_EXT }; /* The C standard version C++ is treated as equivalent to or inheriting from, for the purpose of format features supported. */ #define CPLUSPLUS_STD_VER STD_C94 /* The C standard version we are checking formats against when pedantic. */ #define C_STD_VER ((int)(c_dialect_cxx () \ ? CPLUSPLUS_STD_VER \ : (flag_isoc99 \ ? STD_C99 \ : (flag_isoc94 ? STD_C94 : STD_C89)))) /* The name to give to the standard version we are warning about when pedantic. FEATURE_VER is the version in which the feature warned out appeared, which is higher than C_STD_VER. */ #define C_STD_NAME(FEATURE_VER) (c_dialect_cxx () \ ? "ISO C++" \ : ((FEATURE_VER) == STD_EXT \ ? "ISO C" \ : "ISO C90")) /* Adjust a C standard version, which may be STD_C9L, to account for -Wno-long-long. Returns other standard versions unchanged. */ #define ADJ_STD(VER) ((int)((VER) == STD_C9L \ ? (warn_long_long ? STD_C99 : STD_C89) \ : (VER))) /* Flags that may apply to a particular kind of format checked by GCC. */ enum { /* This format converts arguments of types determined by the format string. */ FMT_FLAG_ARG_CONVERT = 1, /* The scanf allocation 'a' kludge applies to this format kind. */ FMT_FLAG_SCANF_A_KLUDGE = 2, /* A % during parsing a specifier is allowed to be a modified % rather that indicating the format is broken and we are out-of-sync. */ FMT_FLAG_FANCY_PERCENT_OK = 4, /* With $ operand numbers, it is OK to reference the same argument more than once. */ FMT_FLAG_DOLLAR_MULTIPLE = 8, /* This format type uses $ operand numbers (strfmon doesn't). */ FMT_FLAG_USE_DOLLAR = 16, /* Zero width is bad in this type of format (scanf). */ FMT_FLAG_ZERO_WIDTH_BAD = 32, /* Empty precision specification is OK in this type of format (printf). */ FMT_FLAG_EMPTY_PREC_OK = 64, /* Gaps are allowed in the arguments with $ operand numbers if all arguments are pointers (scanf). */ FMT_FLAG_DOLLAR_GAP_POINTER_OK = 128 /* Not included here: details of whether width or precision may occur (controlled by width_char and precision_char); details of whether '*' can be used for these (width_type and precision_type); details of whether length modifiers can occur (length_char_specs). */ }; /* Structure describing a length modifier supported in format checking, and possibly a doubled version such as "hh". */ typedef struct { /* Name of the single-character length modifier. */ const char *name; /* Index into a format_char_info.types array. */ enum format_lengths index; /* Standard version this length appears in. */ enum format_std_version std; /* Same, if the modifier can be repeated, or NULL if it can't. */ const char *double_name; enum format_lengths double_index; enum format_std_version double_std; } format_length_info; /* Structure describing the combination of a conversion specifier (or a set of specifiers which act identically) and a length modifier. */ typedef struct { /* The standard version this combination of length and type appeared in. This is only relevant if greater than those for length and type individually; otherwise it is ignored. */ enum format_std_version std; /* The name to use for the type, if different from that generated internally (e.g., "signed size_t"). */ const char *name; /* The type itself. */ tree *type; } format_type_detail; /* Macros to fill out tables of these. */ #define NOARGUMENTS { T89_V, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN } #define BADLEN { 0, NULL, NULL } #define NOLENGTHS { BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN } /* Structure describing a format conversion specifier (or a set of specifiers which act identically), and the length modifiers used with it. */ typedef struct { const char *format_chars; int pointer_count; enum format_std_version std; /* Types accepted for each length modifier. */ format_type_detail types[FMT_LEN_MAX]; /* List of other modifier characters allowed with these specifiers. This lists flags, and additionally "w" for width, "p" for precision (right precision, for strfmon), "#" for left precision (strfmon), "a" for scanf "a" allocation extension (not applicable in C99 mode), "*" for scanf suppression, and "E" and "O" for those strftime modifiers. */ const char *flag_chars; /* List of additional flags describing these conversion specifiers. "c" for generic character pointers being allowed, "2" for strftime two digit year formats, "3" for strftime formats giving two digit years in some locales, "4" for "2" which becomes "3" with an "E" modifier, "o" if use of strftime "O" is a GNU extension beyond C99, "W" if the argument is a pointer which is dereferenced and written into, "R" if the argument is a pointer which is dereferenced and read from, "i" for printf integer formats where the '0' flag is ignored with precision, and "[" for the starting character of a scanf scanset. */ const char *flags2; } format_char_info; /* Structure describing a flag accepted by some kind of format. */ typedef struct { /* The flag character in question (0 for end of array). */ int flag_char; /* Zero if this entry describes the flag character in general, or a nonzero character that may be found in flags2 if it describes the flag when used with certain formats only. If the latter, only the first such entry found that applies to the current conversion specifier is used; the values of `name' and `long_name' it supplies will be used, if non-NULL and the standard version is higher than the unpredicated one, for any pedantic warning. For example, 'o' for strftime formats (meaning 'O' is an extension over C99). */ int predicate; /* Nonzero if the next character after this flag in the format should be skipped ('=' in strfmon), zero otherwise. */ int skip_next_char; /* The name to use for this flag in diagnostic messages. For example, N_("`0' flag"), N_("field width"). */ const char *name; /* Long name for this flag in diagnostic messages; currently only used for "ISO C does not support ...". For example, N_("the `I' printf flag"). */ const char *long_name; /* The standard version in which it appeared. */ enum format_std_version std; } format_flag_spec; /* Structure describing a combination of flags that is bad for some kind of format. */ typedef struct { /* The first flag character in question (0 for end of array). */ int flag_char1; /* The second flag character. */ int flag_char2; /* Nonzero if the message should say that the first flag is ignored with the second, zero if the combination should simply be objected to. */ int ignored; /* Zero if this entry applies whenever this flag combination occurs, a nonzero character from flags2 if it only applies in some circumstances (e.g. 'i' for printf formats ignoring 0 with precision). */ int predicate; } format_flag_pair; /* Structure describing a particular kind of format processed by GCC. */ typedef struct { /* The name of this kind of format, for use in diagnostics. Also the name of the attribute (without preceding and following __). */ const char *name; /* Specifications of the length modifiers accepted; possibly NULL. */ const format_length_info *length_char_specs; /* Details of the conversion specification characters accepted. */ const format_char_info *conversion_specs; /* String listing the flag characters that are accepted. */ const char *flag_chars; /* String listing modifier characters (strftime) accepted. May be NULL. */ const char *modifier_chars; /* Details of the flag characters, including pseudo-flags. */ const format_flag_spec *flag_specs; /* Details of bad combinations of flags. */ const format_flag_pair *bad_flag_pairs; /* Flags applicable to this kind of format. */ int flags; /* Flag character to treat a width as, or 0 if width not used. */ int width_char; /* Flag character to treat a left precision (strfmon) as, or 0 if left precision not used. */ int left_precision_char; /* Flag character to treat a precision (for strfmon, right precision) as, or 0 if precision not used. */ int precision_char; /* If a flag character has the effect of suppressing the conversion of an argument ('*' in scanf), that flag character, otherwise 0. */ int suppression_char; /* Flag character to treat a length modifier as (ignored if length modifiers not used). Need not be placed in flag_chars for conversion specifiers, but is used to check for bad combinations such as length modifier with assignment suppression in scanf. */ int length_code_char; /* Pointer to type of argument expected if '*' is used for a width, or NULL if '*' not used for widths. */ tree *width_type; /* Pointer to type of argument expected if '*' is used for a precision, or NULL if '*' not used for precisions. */ tree *precision_type; } format_kind_info; /* Structure describing details of a type expected in format checking, and the type to check against it. */ typedef struct format_wanted_type { /* The type wanted. */ tree wanted_type; /* The name of this type to use in diagnostics. */ const char *wanted_type_name; /* The level of indirection through pointers at which this type occurs. */ int pointer_count; /* Whether, when pointer_count is 1, to allow any character type when pedantic, rather than just the character or void type specified. */ int char_lenient_flag; /* Whether the argument, dereferenced once, is written into and so the argument must not be a pointer to a const-qualified type. */ int writing_in_flag; /* Whether the argument, dereferenced once, is read from and so must not be a NULL pointer. */ int reading_from_flag; /* If warnings should be of the form "field precision should have type 'int'", the name to use (in this case "field precision"), otherwise NULL, for "format expects type 'long'" type messages. */ const char *name; /* The actual parameter to check against the wanted type. */ tree param; /* The argument number of that parameter. */ int arg_num; /* The next type to check for this format conversion, or NULL if none. */ struct format_wanted_type *next; } format_wanted_type; static const format_length_info printf_length_specs[] = { { "h", FMT_LEN_h, STD_C89, "hh", FMT_LEN_hh, STD_C99 }, { "l", FMT_LEN_l, STD_C89, "ll", FMT_LEN_ll, STD_C9L }, { "q", FMT_LEN_ll, STD_EXT, NULL, 0, 0 }, { "L", FMT_LEN_L, STD_C89, NULL, 0, 0 }, { "z", FMT_LEN_z, STD_C99, NULL, 0, 0 }, { "Z", FMT_LEN_z, STD_EXT, NULL, 0, 0 }, { "t", FMT_LEN_t, STD_C99, NULL, 0, 0 }, { "j", FMT_LEN_j, STD_C99, NULL, 0, 0 }, { NULL, 0, 0, NULL, 0, 0 } }; /* Length specifiers valid for asm_fprintf. */ static const format_length_info asm_fprintf_length_specs[] = { { "l", FMT_LEN_l, STD_C89, "ll", FMT_LEN_ll, STD_C89 }, { "w", FMT_LEN_none, STD_C89, NULL, 0, 0 }, { NULL, 0, 0, NULL, 0, 0 } }; /* Length specifiers valid for GCC diagnostics. */ static const format_length_info gcc_diag_length_specs[] = { { "l", FMT_LEN_l, STD_C89, "ll", FMT_LEN_ll, STD_C89 }, { "w", FMT_LEN_none, STD_C89, NULL, 0, 0 }, { NULL, 0, 0, NULL, 0, 0 } }; /* The custom diagnostics all accept the same length specifiers. */ #define gcc_cdiag_length_specs gcc_diag_length_specs #define gcc_cxxdiag_length_specs gcc_diag_length_specs /* This differs from printf_length_specs only in that "Z" is not accepted. */ static const format_length_info scanf_length_specs[] = { { "h", FMT_LEN_h, STD_C89, "hh", FMT_LEN_hh, STD_C99 }, { "l", FMT_LEN_l, STD_C89, "ll", FMT_LEN_ll, STD_C9L }, { "q", FMT_LEN_ll, STD_EXT, NULL, 0, 0 }, { "L", FMT_LEN_L, STD_C89, NULL, 0, 0 }, { "z", FMT_LEN_z, STD_C99, NULL, 0, 0 }, { "t", FMT_LEN_t, STD_C99, NULL, 0, 0 }, { "j", FMT_LEN_j, STD_C99, NULL, 0, 0 }, { NULL, 0, 0, NULL, 0, 0 } }; /* All tables for strfmon use STD_C89 everywhere, since -pedantic warnings make no sense for a format type not part of any C standard version. */ static const format_length_info strfmon_length_specs[] = { /* A GNU extension. */ { "L", FMT_LEN_L, STD_C89, NULL, 0, 0 }, { NULL, 0, 0, NULL, 0, 0 } }; static const format_flag_spec printf_flag_specs[] = { { ' ', 0, 0, N_("` ' flag"), N_("the ` ' printf flag"), STD_C89 }, { '+', 0, 0, N_("`+' flag"), N_("the `+' printf flag"), STD_C89 }, { '#', 0, 0, N_("`#' flag"), N_("the `#' printf flag"), STD_C89 }, { '0', 0, 0, N_("`0' flag"), N_("the `0' printf flag"), STD_C89 }, { '-', 0, 0, N_("`-' flag"), N_("the `-' printf flag"), STD_C89 }, { '\'', 0, 0, N_("`'' flag"), N_("the `'' printf flag"), STD_EXT }, { 'I', 0, 0, N_("`I' flag"), N_("the `I' printf flag"), STD_EXT }, { 'w', 0, 0, N_("field width"), N_("field width in printf format"), STD_C89 }, { 'p', 0, 0, N_("precision"), N_("precision in printf format"), STD_C89 }, { 'L', 0, 0, N_("length modifier"), N_("length modifier in printf format"), STD_C89 }, { 0, 0, 0, NULL, NULL, 0 } }; static const format_flag_pair printf_flag_pairs[] = { { ' ', '+', 1, 0 }, { '0', '-', 1, 0 }, { '0', 'p', 1, 'i' }, { 0, 0, 0, 0 } }; static const format_flag_spec asm_fprintf_flag_specs[] = { { ' ', 0, 0, N_("` ' flag"), N_("the ` ' printf flag"), STD_C89 }, { '+', 0, 0, N_("`+' flag"), N_("the `+' printf flag"), STD_C89 }, { '#', 0, 0, N_("`#' flag"), N_("the `#' printf flag"), STD_C89 }, { '0', 0, 0, N_("`0' flag"), N_("the `0' printf flag"), STD_C89 }, { '-', 0, 0, N_("`-' flag"), N_("the `-' printf flag"), STD_C89 }, { 'w', 0, 0, N_("field width"), N_("field width in printf format"), STD_C89 }, { 'p', 0, 0, N_("precision"), N_("precision in printf format"), STD_C89 }, { 'L', 0, 0, N_("length modifier"), N_("length modifier in printf format"), STD_C89 }, { 0, 0, 0, NULL, NULL, 0 } }; static const format_flag_pair asm_fprintf_flag_pairs[] = { { ' ', '+', 1, 0 }, { '0', '-', 1, 0 }, { '0', 'p', 1, 'i' }, { 0, 0, 0, 0 } }; static const format_flag_pair gcc_diag_flag_pairs[] = { { 0, 0, 0, 0 } }; #define gcc_cdiag_flag_pairs gcc_diag_flag_pairs #define gcc_cxxdiag_flag_pairs gcc_diag_flag_pairs static const format_flag_spec gcc_diag_flag_specs[] = { { 'q', 0, 0, N_("`q' flag"), N_("the `q' diagnostic flag"), STD_C89 }, { 'p', 0, 0, N_("precision"), N_("precision in printf format"), STD_C89 }, { 'L', 0, 0, N_("length modifier"), N_("length modifier in printf format"), STD_C89 }, { 0, 0, 0, NULL, NULL, 0 } }; #define gcc_cdiag_flag_specs gcc_diag_flag_specs static const format_flag_spec gcc_cxxdiag_flag_specs[] = { { '+', 0, 0, N_("`+' flag"), N_("the `+' printf flag"), STD_C89 }, { '#', 0, 0, N_("`#' flag"), N_("the `#' printf flag"), STD_C89 }, { 'q', 0, 0, N_("`q' flag"), N_("the `q' diagnostic flag"), STD_C89 }, { 'p', 0, 0, N_("precision"), N_("precision in printf format"), STD_C89 }, { 'L', 0, 0, N_("length modifier"), N_("length modifier in printf format"), STD_C89 }, { 0, 0, 0, NULL, NULL, 0 } }; static const format_flag_spec scanf_flag_specs[] = { { '*', 0, 0, N_("assignment suppression"), N_("the assignment suppression scanf feature"), STD_C89 }, { 'a', 0, 0, N_("`a' flag"), N_("the `a' scanf flag"), STD_EXT }, { 'w', 0, 0, N_("field width"), N_("field width in scanf format"), STD_C89 }, { 'L', 0, 0, N_("length modifier"), N_("length modifier in scanf format"), STD_C89 }, { '\'', 0, 0, N_("`'' flag"), N_("the `'' scanf flag"), STD_EXT }, { 'I', 0, 0, N_("`I' flag"), N_("the `I' scanf flag"), STD_EXT }, { 0, 0, 0, NULL, NULL, 0 } }; static const format_flag_pair scanf_flag_pairs[] = { { '*', 'L', 0, 0 }, { 0, 0, 0, 0 } }; static const format_flag_spec strftime_flag_specs[] = { { '_', 0, 0, N_("`_' flag"), N_("the `_' strftime flag"), STD_EXT }, { '-', 0, 0, N_("`-' flag"), N_("the `-' strftime flag"), STD_EXT }, { '0', 0, 0, N_("`0' flag"), N_("the `0' strftime flag"), STD_EXT }, { '^', 0, 0, N_("`^' flag"), N_("the `^' strftime flag"), STD_EXT }, { '#', 0, 0, N_("`#' flag"), N_("the `#' strftime flag"), STD_EXT }, { 'w', 0, 0, N_("field width"), N_("field width in strftime format"), STD_EXT }, { 'E', 0, 0, N_("`E' modifier"), N_("the `E' strftime modifier"), STD_C99 }, { 'O', 0, 0, N_("`O' modifier"), N_("the `O' strftime modifier"), STD_C99 }, { 'O', 'o', 0, NULL, N_("the `O' modifier"), STD_EXT }, { 0, 0, 0, NULL, NULL, 0 } }; static const format_flag_pair strftime_flag_pairs[] = { { 'E', 'O', 0, 0 }, { '_', '-', 0, 0 }, { '_', '0', 0, 0 }, { '-', '0', 0, 0 }, { '^', '#', 0, 0 }, { 0, 0, 0, 0 } }; static const format_flag_spec strfmon_flag_specs[] = { { '=', 0, 1, N_("fill character"), N_("fill character in strfmon format"), STD_C89 }, { '^', 0, 0, N_("`^' flag"), N_("the `^' strfmon flag"), STD_C89 }, { '+', 0, 0, N_("`+' flag"), N_("the `+' strfmon flag"), STD_C89 }, { '(', 0, 0, N_("`(' flag"), N_("the `(' strfmon flag"), STD_C89 }, { '!', 0, 0, N_("`!' flag"), N_("the `!' strfmon flag"), STD_C89 }, { '-', 0, 0, N_("`-' flag"), N_("the `-' strfmon flag"), STD_C89 }, { 'w', 0, 0, N_("field width"), N_("field width in strfmon format"), STD_C89 }, { '#', 0, 0, N_("left precision"), N_("left precision in strfmon format"), STD_C89 }, { 'p', 0, 0, N_("right precision"), N_("right precision in strfmon format"), STD_C89 }, { 'L', 0, 0, N_("length modifier"), N_("length modifier in strfmon format"), STD_C89 }, { 0, 0, 0, NULL, NULL, 0 } }; static const format_flag_pair strfmon_flag_pairs[] = { { '+', '(', 0, 0 }, { 0, 0, 0, 0 } }; #define T_I &integer_type_node #define T89_I { STD_C89, NULL, T_I } #define T_L &long_integer_type_node #define T89_L { STD_C89, NULL, T_L } #define T_LL &long_long_integer_type_node #define T9L_LL { STD_C9L, NULL, T_LL } #define TEX_LL { STD_EXT, NULL, T_LL } #define T_S &short_integer_type_node #define T89_S { STD_C89, NULL, T_S } #define T_UI &unsigned_type_node #define T89_UI { STD_C89, NULL, T_UI } #define T_UL &long_unsigned_type_node #define T89_UL { STD_C89, NULL, T_UL } #define T_ULL &long_long_unsigned_type_node #define T9L_ULL { STD_C9L, NULL, T_ULL } #define TEX_ULL { STD_EXT, NULL, T_ULL } #define T_US &short_unsigned_type_node #define T89_US { STD_C89, NULL, T_US } #define T_F &float_type_node #define T89_F { STD_C89, NULL, T_F } #define T99_F { STD_C99, NULL, T_F } #define T_D &double_type_node #define T89_D { STD_C89, NULL, T_D } #define T99_D { STD_C99, NULL, T_D } #define T_LD &long_double_type_node #define T89_LD { STD_C89, NULL, T_LD } #define T99_LD { STD_C99, NULL, T_LD } #define T_C &char_type_node #define T89_C { STD_C89, NULL, T_C } #define T_SC &signed_char_type_node #define T99_SC { STD_C99, NULL, T_SC } #define T_UC &unsigned_char_type_node #define T99_UC { STD_C99, NULL, T_UC } #define T_V &void_type_node #define T89_V { STD_C89, NULL, T_V } #define T_W &wchar_type_node #define T94_W { STD_C94, "wchar_t", T_W } #define TEX_W { STD_EXT, "wchar_t", T_W } #define T_WI &wint_type_node #define T94_WI { STD_C94, "wint_t", T_WI } #define TEX_WI { STD_EXT, "wint_t", T_WI } #define T_ST &size_type_node #define T99_ST { STD_C99, "size_t", T_ST } #define T_SST &signed_size_type_node #define T99_SST { STD_C99, "signed size_t", T_SST } #define T_PD &ptrdiff_type_node #define T99_PD { STD_C99, "ptrdiff_t", T_PD } #define T_UPD &unsigned_ptrdiff_type_node #define T99_UPD { STD_C99, "unsigned ptrdiff_t", T_UPD } #define T_IM &intmax_type_node #define T99_IM { STD_C99, "intmax_t", T_IM } #define T_UIM &uintmax_type_node #define T99_UIM { STD_C99, "uintmax_t", T_UIM } static const format_char_info print_char_table[] = { /* C89 conversion specifiers. */ { "di", 0, STD_C89, { T89_I, T99_SC, T89_S, T89_L, T9L_LL, TEX_LL, T99_SST, T99_PD, T99_IM }, "-wp0 +'I", "i" }, { "oxX", 0, STD_C89, { T89_UI, T99_UC, T89_US, T89_UL, T9L_ULL, TEX_ULL, T99_ST, T99_UPD, T99_UIM }, "-wp0#", "i" }, { "u", 0, STD_C89, { T89_UI, T99_UC, T89_US, T89_UL, T9L_ULL, TEX_ULL, T99_ST, T99_UPD, T99_UIM }, "-wp0'I", "i" }, { "fgG", 0, STD_C89, { T89_D, BADLEN, BADLEN, T99_D, BADLEN, T89_LD, BADLEN, BADLEN, BADLEN }, "-wp0 +#'I", "" }, { "eE", 0, STD_C89, { T89_D, BADLEN, BADLEN, T99_D, BADLEN, T89_LD, BADLEN, BADLEN, BADLEN }, "-wp0 +#I", "" }, { "c", 0, STD_C89, { T89_I, BADLEN, BADLEN, T94_WI, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "-w", "" }, { "s", 1, STD_C89, { T89_C, BADLEN, BADLEN, T94_W, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "-wp", "cR" }, { "p", 1, STD_C89, { T89_V, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "-w", "c" }, { "n", 1, STD_C89, { T89_I, T99_SC, T89_S, T89_L, T9L_LL, BADLEN, T99_SST, T99_PD, T99_IM }, "", "W" }, /* C99 conversion specifiers. */ { "F", 0, STD_C99, { T99_D, BADLEN, BADLEN, T99_D, BADLEN, T99_LD, BADLEN, BADLEN, BADLEN }, "-wp0 +#'I", "" }, { "aA", 0, STD_C99, { T99_D, BADLEN, BADLEN, T99_D, BADLEN, T99_LD, BADLEN, BADLEN, BADLEN }, "-wp0 +#", "" }, /* X/Open conversion specifiers. */ { "C", 0, STD_EXT, { TEX_WI, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "-w", "" }, { "S", 1, STD_EXT, { TEX_W, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "-wp", "R" }, /* GNU conversion specifiers. */ { "m", 0, STD_EXT, { T89_V, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "-wp", "" }, { NULL, 0, 0, NOLENGTHS, NULL, NULL } }; static const format_char_info asm_fprintf_char_table[] = { /* C89 conversion specifiers. */ { "di", 0, STD_C89, { T89_I, BADLEN, BADLEN, T89_L, T9L_LL, BADLEN, BADLEN, BADLEN, BADLEN }, "-wp0 +", "i" }, { "oxX", 0, STD_C89, { T89_UI, BADLEN, BADLEN, T89_UL, T9L_ULL, BADLEN, BADLEN, BADLEN, BADLEN }, "-wp0#", "i" }, { "u", 0, STD_C89, { T89_UI, BADLEN, BADLEN, T89_UL, T9L_ULL, BADLEN, BADLEN, BADLEN, BADLEN }, "-wp0", "i" }, { "c", 0, STD_C89, { T89_I, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "-w", "" }, { "s", 1, STD_C89, { T89_C, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "-wp", "cR" }, /* asm_fprintf conversion specifiers. */ { "O", 0, STD_C89, NOARGUMENTS, "", "" }, { "R", 0, STD_C89, NOARGUMENTS, "", "" }, { "I", 0, STD_C89, NOARGUMENTS, "", "" }, { "L", 0, STD_C89, NOARGUMENTS, "", "" }, { "U", 0, STD_C89, NOARGUMENTS, "", "" }, { "r", 0, STD_C89, { T89_I, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "", "" }, { "@", 0, STD_C89, NOARGUMENTS, "", "" }, { NULL, 0, 0, NOLENGTHS, NULL, NULL } }; static const format_char_info gcc_diag_char_table[] = { /* C89 conversion specifiers. */ { "di", 0, STD_C89, { T89_I, BADLEN, BADLEN, T89_L, T9L_LL, BADLEN, BADLEN, BADLEN, BADLEN }, "q", "" }, { "ox", 0, STD_C89, { T89_UI, BADLEN, BADLEN, T89_UL, T9L_ULL, BADLEN, BADLEN, BADLEN, BADLEN }, "q", "" }, { "u", 0, STD_C89, { T89_UI, BADLEN, BADLEN, T89_UL, T9L_ULL, BADLEN, BADLEN, BADLEN, BADLEN }, "q", "" }, { "c", 0, STD_C89, { T89_I, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "q", "" }, { "s", 1, STD_C89, { T89_C, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "pq", "cR" }, { "p", 1, STD_C89, { T89_V, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "q", "c" }, /* Custom conversion specifiers. */ /* %H will require "location_t" at runtime. */ { "H", 0, STD_C89, { T89_V, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "q", "" }, /* These will require a "tree" at runtime. */ { "J", 0, STD_C89, { T89_V, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "q", "" }, { "<>'", 0, STD_C89, NOARGUMENTS, "", "" }, { "m", 0, STD_C89, NOARGUMENTS, "q", "" }, { NULL, 0, 0, NOLENGTHS, NULL, NULL } }; static const format_char_info gcc_cdiag_char_table[] = { /* C89 conversion specifiers. */ { "di", 0, STD_C89, { T89_I, BADLEN, BADLEN, T89_L, T9L_LL, BADLEN, BADLEN, BADLEN, BADLEN }, "q", "" }, { "ox", 0, STD_C89, { T89_UI, BADLEN, BADLEN, T89_UL, T9L_ULL, BADLEN, BADLEN, BADLEN, BADLEN }, "q", "" }, { "u", 0, STD_C89, { T89_UI, BADLEN, BADLEN, T89_UL, T9L_ULL, BADLEN, BADLEN, BADLEN, BADLEN }, "q", "" }, { "c", 0, STD_C89, { T89_I, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "q", "" }, { "s", 1, STD_C89, { T89_C, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "pq", "cR" }, { "p", 1, STD_C89, { T89_V, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "q", "c" }, /* Custom conversion specifiers. */ /* %H will require "location_t" at runtime. */ { "H", 0, STD_C89, { T89_V, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "q", "" }, /* These will require a "tree" at runtime. */ { "DEFJT", 0, STD_C89, { T89_V, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "q", "" }, { "<>'", 0, STD_C89, NOARGUMENTS, "", "" }, { "m", 0, STD_C89, NOARGUMENTS, "q", "" }, { NULL, 0, 0, NOLENGTHS, NULL, NULL } }; static const format_char_info gcc_cxxdiag_char_table[] = { /* C89 conversion specifiers. */ { "di", 0, STD_C89, { T89_I, BADLEN, BADLEN, T89_L, T9L_LL, BADLEN, BADLEN, BADLEN, BADLEN }, "q", "" }, { "ox", 0, STD_C89, { T89_UI, BADLEN, BADLEN, T89_UL, T9L_ULL, BADLEN, BADLEN, BADLEN, BADLEN }, "q", "" }, { "u", 0, STD_C89, { T89_UI, BADLEN, BADLEN, T89_UL, T9L_ULL, BADLEN, BADLEN, BADLEN, BADLEN }, "q", "" }, { "c", 0, STD_C89, { T89_I, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "q", "" }, { "s", 1, STD_C89, { T89_C, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "pq", "cR" }, { "p", 1, STD_C89, { T89_V, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "q", "c" }, /* Custom conversion specifiers. */ /* %H will require "location_t" at runtime. */ { "H", 0, STD_C89, { T89_V, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "q", "" }, /* These will require a "tree" at runtime. */ { "ADEFJTV",0,STD_C89,{ T89_V, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "q+#", "" }, /* These accept either an `int' or an `enum tree_code' (which is handled as an `int'.) */ { "CLOPQ",0,STD_C89, { T89_I, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "q", "" }, { "<>'", 0, STD_C89, NOARGUMENTS, "", "" }, { "m", 0, STD_C89, NOARGUMENTS, "q", "" }, { NULL, 0, 0, NOLENGTHS, NULL, NULL } }; static const format_char_info scan_char_table[] = { /* C89 conversion specifiers. */ { "di", 1, STD_C89, { T89_I, T99_SC, T89_S, T89_L, T9L_LL, TEX_LL, T99_SST, T99_PD, T99_IM }, "*w'I", "W" }, { "u", 1, STD_C89, { T89_UI, T99_UC, T89_US, T89_UL, T9L_ULL, TEX_ULL, T99_ST, T99_UPD, T99_UIM }, "*w'I", "W" }, { "oxX", 1, STD_C89, { T89_UI, T99_UC, T89_US, T89_UL, T9L_ULL, TEX_ULL, T99_ST, T99_UPD, T99_UIM }, "*w", "W" }, { "efgEG", 1, STD_C89, { T89_F, BADLEN, BADLEN, T89_D, BADLEN, T89_LD, BADLEN, BADLEN, BADLEN }, "*w'", "W" }, { "c", 1, STD_C89, { T89_C, BADLEN, BADLEN, T94_W, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "*w", "cW" }, { "s", 1, STD_C89, { T89_C, BADLEN, BADLEN, T94_W, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "*aw", "cW" }, { "[", 1, STD_C89, { T89_C, BADLEN, BADLEN, T94_W, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "*aw", "cW[" }, { "p", 2, STD_C89, { T89_V, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "*w", "W" }, { "n", 1, STD_C89, { T89_I, T99_SC, T89_S, T89_L, T9L_LL, BADLEN, T99_SST, T99_PD, T99_IM }, "", "W" }, /* C99 conversion specifiers. */ { "FaA", 1, STD_C99, { T99_F, BADLEN, BADLEN, T99_D, BADLEN, T99_LD, BADLEN, BADLEN, BADLEN }, "*w'", "W" }, /* X/Open conversion specifiers. */ { "C", 1, STD_EXT, { TEX_W, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "*w", "W" }, { "S", 1, STD_EXT, { TEX_W, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN, BADLEN }, "*aw", "W" }, { NULL, 0, 0, NOLENGTHS, NULL, NULL } }; static const format_char_info time_char_table[] = { /* C89 conversion specifiers. */ { "ABZab", 0, STD_C89, NOLENGTHS, "^#", "" }, { "cx", 0, STD_C89, NOLENGTHS, "E", "3" }, { "HIMSUWdmw", 0, STD_C89, NOLENGTHS, "-_0Ow", "" }, { "j", 0, STD_C89, NOLENGTHS, "-_0Ow", "o" }, { "p", 0, STD_C89, NOLENGTHS, "#", "" }, { "X", 0, STD_C89, NOLENGTHS, "E", "" }, { "y", 0, STD_C89, NOLENGTHS, "EO-_0w", "4" }, { "Y", 0, STD_C89, NOLENGTHS, "-_0EOw", "o" }, { "%", 0, STD_C89, NOLENGTHS, "", "" }, /* C99 conversion specifiers. */ { "C", 0, STD_C99, NOLENGTHS, "-_0EOw", "o" }, { "D", 0, STD_C99, NOLENGTHS, "", "2" }, { "eVu", 0, STD_C99, NOLENGTHS, "-_0Ow", "" }, { "FRTnrt", 0, STD_C99, NOLENGTHS, "", "" }, { "g", 0, STD_C99, NOLENGTHS, "O-_0w", "2o" }, { "G", 0, STD_C99, NOLENGTHS, "-_0Ow", "o" }, { "h", 0, STD_C99, NOLENGTHS, "^#", "" }, { "z", 0, STD_C99, NOLENGTHS, "O", "o" }, /* GNU conversion specifiers. */ { "kls", 0, STD_EXT, NOLENGTHS, "-_0Ow", "" }, { "P", 0, STD_EXT, NOLENGTHS, "", "" }, { NULL, 0, 0, NOLENGTHS, NULL, NULL } }; static const format_char_info monetary_char_table[] = { { "in", 0, STD_C89, { T89_D, BADLEN, BADLEN, BADLEN, BADLEN, T89_LD, BADLEN, BADLEN, BADLEN }, "=^+(!-w#p", "" }, { NULL, 0, 0, NOLENGTHS, NULL, NULL } }; /* This must be in the same order as enum format_type. */ static const format_kind_info format_types_orig[] = { { "printf", printf_length_specs, print_char_table, " +#0-'I", NULL, printf_flag_specs, printf_flag_pairs, FMT_FLAG_ARG_CONVERT|FMT_FLAG_DOLLAR_MULTIPLE|FMT_FLAG_USE_DOLLAR|FMT_FLAG_EMPTY_PREC_OK, 'w', 0, 'p', 0, 'L', &integer_type_node, &integer_type_node }, { "asm_fprintf", asm_fprintf_length_specs, asm_fprintf_char_table, " +#0-", NULL, asm_fprintf_flag_specs, asm_fprintf_flag_pairs, FMT_FLAG_ARG_CONVERT|FMT_FLAG_EMPTY_PREC_OK, 'w', 0, 'p', 0, 'L', NULL, NULL }, { "gcc_diag", gcc_diag_length_specs, gcc_diag_char_table, "q", NULL, gcc_diag_flag_specs, gcc_diag_flag_pairs, FMT_FLAG_ARG_CONVERT, 0, 0, 'p', 0, 'L', NULL, &integer_type_node }, { "gcc_cdiag", gcc_cdiag_length_specs, gcc_cdiag_char_table, "q", NULL, gcc_cdiag_flag_specs, gcc_cdiag_flag_pairs, FMT_FLAG_ARG_CONVERT, 0, 0, 'p', 0, 'L', NULL, &integer_type_node }, { "gcc_cxxdiag", gcc_cxxdiag_length_specs, gcc_cxxdiag_char_table, "q+#", NULL, gcc_cxxdiag_flag_specs, gcc_cxxdiag_flag_pairs, FMT_FLAG_ARG_CONVERT, 0, 0, 'p', 0, 'L', NULL, &integer_type_node }, { "scanf", scanf_length_specs, scan_char_table, "*'I", NULL, scanf_flag_specs, scanf_flag_pairs, FMT_FLAG_ARG_CONVERT|FMT_FLAG_SCANF_A_KLUDGE|FMT_FLAG_USE_DOLLAR|FMT_FLAG_ZERO_WIDTH_BAD|FMT_FLAG_DOLLAR_GAP_POINTER_OK, 'w', 0, 0, '*', 'L', NULL, NULL }, { "strftime", NULL, time_char_table, "_-0^#", "EO", strftime_flag_specs, strftime_flag_pairs, FMT_FLAG_FANCY_PERCENT_OK, 'w', 0, 0, 0, 0, NULL, NULL }, { "strfmon", strfmon_length_specs, monetary_char_table, "=^+(!-", NULL, strfmon_flag_specs, strfmon_flag_pairs, FMT_FLAG_ARG_CONVERT, 'w', '#', 'p', 0, 'L', NULL, NULL } }; /* This layer of indirection allows GCC to reassign format_types with new data if necessary, while still allowing the original data to be const. */ static const format_kind_info *format_types = format_types_orig; /* We can modify this one. */ static format_kind_info *dynamic_format_types; /* Structure detailing the results of checking a format function call where the format expression may be a conditional expression with many leaves resulting from nested conditional expressions. */ typedef struct { /* Number of leaves of the format argument that could not be checked as they were not string literals. */ int number_non_literal; /* Number of leaves of the format argument that were null pointers or string literals, but had extra format arguments. */ int number_extra_args; /* Number of leaves of the format argument that were null pointers or string literals, but had extra format arguments and used $ operand numbers. */ int number_dollar_extra_args; /* Number of leaves of the format argument that were wide string literals. */ int number_wide; /* Number of leaves of the format argument that were empty strings. */ int number_empty; /* Number of leaves of the format argument that were unterminated strings. */ int number_unterminated; /* Number of leaves of the format argument that were not counted above. */ int number_other; } format_check_results; typedef struct { format_check_results *res; function_format_info *info; tree params; } format_check_context; static void check_format_info (function_format_info *, tree); static void check_format_arg (void *, tree, unsigned HOST_WIDE_INT); static void check_format_info_main (format_check_results *, function_format_info *, const char *, int, tree, unsigned HOST_WIDE_INT); static void init_dollar_format_checking (int, tree); static int maybe_read_dollar_number (const char **, int, tree, tree *, const format_kind_info *); static bool avoid_dollar_number (const char *); static void finish_dollar_format_checking (format_check_results *, int); static const format_flag_spec *get_flag_spec (const format_flag_spec *, int, const char *); static void check_format_types (format_wanted_type *, const char *, int); static void format_type_warning (const char *, const char *, int, tree, int, const char *, tree, int); /* Decode a format type from a string, returning the type, or format_type_error if not valid, in which case the caller should print an error message. */ static enum format_type decode_format_type (const char *s) { int i; int slen; slen = strlen (s); for (i = 0; i < (int) format_type_error; i++) { int alen; if (!strcmp (s, format_types[i].name)) break; alen = strlen (format_types[i].name); if (slen == alen + 4 && s[0] == '_' && s[1] == '_' && s[slen - 1] == '_' && s[slen - 2] == '_' && !strncmp (s + 2, format_types[i].name, alen)) break; } return ((enum format_type) i); } /* Check the argument list of a call to printf, scanf, etc. ATTRS are the attributes on the function type. PARAMS is the list of argument values. Also, if -Wmissing-format-attribute, warn for calls to vprintf or vscanf in functions with no such format attribute themselves. */ void check_function_format (tree attrs, tree params) { tree a; /* See if this function has any format attributes. */ for (a = attrs; a; a = TREE_CHAIN (a)) { if (is_attribute_p ("format", TREE_PURPOSE (a))) { /* Yup; check it. */ function_format_info info; decode_format_attr (TREE_VALUE (a), &info, 1); check_format_info (&info, params); if (warn_missing_format_attribute && info.first_arg_num == 0 && (format_types[info.format_type].flags & (int) FMT_FLAG_ARG_CONVERT)) { tree c; for (c = TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl)); c; c = TREE_CHAIN (c)) if (is_attribute_p ("format", TREE_PURPOSE (c)) && (decode_format_type (IDENTIFIER_POINTER (TREE_VALUE (TREE_VALUE (c)))) == info.format_type)) break; if (c == NULL_TREE) { /* Check if the current function has a parameter to which the format attribute could be attached; if not, it can't be a candidate for a format attribute, despite the vprintf-like or vscanf-like call. */ tree args; for (args = DECL_ARGUMENTS (current_function_decl); args != 0; args = TREE_CHAIN (args)) { if (TREE_CODE (TREE_TYPE (args)) == POINTER_TYPE && (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (args))) == char_type_node)) break; } if (args != 0) warning ("function might be possible candidate for %qs format attribute", format_types[info.format_type].name); } } } } } /* Variables used by the checking of $ operand number formats. */ static char *dollar_arguments_used = NULL; static char *dollar_arguments_pointer_p = NULL; static int dollar_arguments_alloc = 0; static int dollar_arguments_count; static int dollar_first_arg_num; static int dollar_max_arg_used; static int dollar_format_warned; /* Initialize the checking for a format string that may contain $ parameter number specifications; we will need to keep track of whether each parameter has been used. FIRST_ARG_NUM is the number of the first argument that is a parameter to the format, or 0 for a vprintf-style function; PARAMS is the list of arguments starting at this argument. */ static void init_dollar_format_checking (int first_arg_num, tree params) { tree oparams = params; dollar_first_arg_num = first_arg_num; dollar_arguments_count = 0; dollar_max_arg_used = 0; dollar_format_warned = 0; if (first_arg_num > 0) { while (params) { dollar_arguments_count++; params = TREE_CHAIN (params); } } if (dollar_arguments_alloc < dollar_arguments_count) { if (dollar_arguments_used) free (dollar_arguments_used); if (dollar_arguments_pointer_p) free (dollar_arguments_pointer_p); dollar_arguments_alloc = dollar_arguments_count; dollar_arguments_used = xmalloc (dollar_arguments_alloc); dollar_arguments_pointer_p = xmalloc (dollar_arguments_alloc); } if (dollar_arguments_alloc) { memset (dollar_arguments_used, 0, dollar_arguments_alloc); if (first_arg_num > 0) { int i = 0; params = oparams; while (params) { dollar_arguments_pointer_p[i] = (TREE_CODE (TREE_TYPE (TREE_VALUE (params))) == POINTER_TYPE); params = TREE_CHAIN (params); i++; } } } } /* Look for a decimal number followed by a $ in *FORMAT. If DOLLAR_NEEDED is set, it is an error if one is not found; otherwise, it is OK. If such a number is found, check whether it is within range and mark that numbered operand as being used for later checking. Returns the operand number if found and within range, zero if no such number was found and this is OK, or -1 on error. PARAMS points to the first operand of the format; PARAM_PTR is made to point to the parameter referred to. If a $ format is found, *FORMAT is updated to point just after it. */ static int maybe_read_dollar_number (const char **format, int dollar_needed, tree params, tree *param_ptr, const format_kind_info *fki) { int argnum; int overflow_flag; const char *fcp = *format; if (! ISDIGIT (*fcp)) { if (dollar_needed) { warning ("missing $ operand number in format"); return -1; } else return 0; } argnum = 0; overflow_flag = 0; while (ISDIGIT (*fcp)) { int nargnum; nargnum = 10 * argnum + (*fcp - '0'); if (nargnum < 0 || nargnum / 10 != argnum) overflow_flag = 1; argnum = nargnum; fcp++; } if (*fcp != '$') { if (dollar_needed) { warning ("missing $ operand number in format"); return -1; } else return 0; } *format = fcp + 1; if (pedantic && !dollar_format_warned) { warning ("%s does not support %%n$ operand number formats", C_STD_NAME (STD_EXT)); dollar_format_warned = 1; } if (overflow_flag || argnum == 0 || (dollar_first_arg_num && argnum > dollar_arguments_count)) { warning ("operand number out of range in format"); return -1; } if (argnum > dollar_max_arg_used) dollar_max_arg_used = argnum; /* For vprintf-style functions we may need to allocate more memory to track which arguments are used. */ while (dollar_arguments_alloc < dollar_max_arg_used) { int nalloc; nalloc = 2 * dollar_arguments_alloc + 16; dollar_arguments_used = xrealloc (dollar_arguments_used, nalloc); dollar_arguments_pointer_p = xrealloc (dollar_arguments_pointer_p, nalloc); memset (dollar_arguments_used + dollar_arguments_alloc, 0, nalloc - dollar_arguments_alloc); dollar_arguments_alloc = nalloc; } if (!(fki->flags & (int) FMT_FLAG_DOLLAR_MULTIPLE) && dollar_arguments_used[argnum - 1] == 1) { dollar_arguments_used[argnum - 1] = 2; warning ("format argument %d used more than once in %s format", argnum, fki->name); } else dollar_arguments_used[argnum - 1] = 1; if (dollar_first_arg_num) { int i; *param_ptr = params; for (i = 1; i < argnum && *param_ptr != 0; i++) *param_ptr = TREE_CHAIN (*param_ptr); if (*param_ptr == 0) { /* This case shouldn't be caught here. */ abort (); } } else *param_ptr = 0; return argnum; } /* Ensure that FORMAT does not start with a decimal number followed by a $; give a diagnostic and return true if it does, false otherwise. */ static bool avoid_dollar_number (const char *format) { if (!ISDIGIT (*format)) return false; while (ISDIGIT (*format)) format++; if (*format == '$') { warning ("$ operand number used after format without operand number"); return true; } return false; } /* Finish the checking for a format string that used $ operand number formats instead of non-$ formats. We check for unused operands before used ones (a serious error, since the implementation of the format function can't know what types to pass to va_arg to find the later arguments). and for unused operands at the end of the format (if we know how many arguments the format had, so not for vprintf). If there were operand numbers out of range on a non-vprintf-style format, we won't have reached here. If POINTER_GAP_OK, unused arguments are OK if all arguments are pointers. */ static void finish_dollar_format_checking (format_check_results *res, int pointer_gap_ok) { int i; bool found_pointer_gap = false; for (i = 0; i < dollar_max_arg_used; i++) { if (!dollar_arguments_used[i]) { if (pointer_gap_ok && (dollar_first_arg_num == 0 || dollar_arguments_pointer_p[i])) found_pointer_gap = true; else warning ("format argument %d unused before used argument %d in $-style format", i + 1, dollar_max_arg_used); } } if (found_pointer_gap || (dollar_first_arg_num && dollar_max_arg_used < dollar_arguments_count)) { res->number_other--; res->number_dollar_extra_args++; } } /* Retrieve the specification for a format flag. SPEC contains the specifications for format flags for the applicable kind of format. FLAG is the flag in question. If PREDICATES is NULL, the basic spec for that flag must be retrieved and this function aborts if it cannot be found. If PREDICATES is not NULL, it is a string listing possible predicates for the spec entry; if an entry predicated on any of these is found, it is returned, otherwise NULL is returned. */ static const format_flag_spec * get_flag_spec (const format_flag_spec *spec, int flag, const char *predicates) { int i; for (i = 0; spec[i].flag_char != 0; i++) { if (spec[i].flag_char != flag) continue; if (predicates != NULL) { if (spec[i].predicate != 0 && strchr (predicates, spec[i].predicate) != 0) return &spec[i]; } else if (spec[i].predicate == 0) return &spec[i]; } if (predicates == NULL) abort (); else return NULL; } /* Check the argument list of a call to printf, scanf, etc. INFO points to the function_format_info structure. PARAMS is the list of argument values. */ static void check_format_info (function_format_info *info, tree params) { format_check_context format_ctx; unsigned HOST_WIDE_INT arg_num; tree format_tree; format_check_results res; /* Skip to format argument. If the argument isn't available, there's no work for us to do; prototype checking will catch the problem. */ for (arg_num = 1; ; ++arg_num) { if (params == 0) return; if (arg_num == info->format_num) break; params = TREE_CHAIN (params); } format_tree = TREE_VALUE (params); params = TREE_CHAIN (params); if (format_tree == 0) return; res.number_non_literal = 0; res.number_extra_args = 0; res.number_dollar_extra_args = 0; res.number_wide = 0; res.number_empty = 0; res.number_unterminated = 0; res.number_other = 0; format_ctx.res = &res; format_ctx.info = info; format_ctx.params = params; check_function_arguments_recurse (check_format_arg, &format_ctx, format_tree, arg_num); if (res.number_non_literal > 0) { /* Functions taking a va_list normally pass a non-literal format string. These functions typically are declared with first_arg_num == 0, so avoid warning in those cases. */ if (!(format_types[info->format_type].flags & (int) FMT_FLAG_ARG_CONVERT)) { /* For strftime-like formats, warn for not checking the format string; but there are no arguments to check. */ if (warn_format_nonliteral) warning ("format not a string literal, format string not checked"); } else if (info->first_arg_num != 0) { /* If there are no arguments for the format at all, we may have printf (foo) which is likely to be a security hole. */ while (arg_num + 1 < info->first_arg_num) { if (params == 0) break; params = TREE_CHAIN (params); ++arg_num; } if (params == 0 && (warn_format_nonliteral || warn_format_security)) warning ("format not a string literal and no format arguments"); else if (warn_format_nonliteral) warning ("format not a string literal, argument types not checked"); } } /* If there were extra arguments to the format, normally warn. However, the standard does say extra arguments are ignored, so in the specific case where we have multiple leaves (conditional expressions or ngettext) allow extra arguments if at least one leaf didn't have extra arguments, but was otherwise OK (either non-literal or checked OK). If the format is an empty string, this should be counted similarly to the case of extra format arguments. */ if (res.number_extra_args > 0 && res.number_non_literal == 0 && res.number_other == 0 && warn_format_extra_args) warning ("too many arguments for format"); if (res.number_dollar_extra_args > 0 && res.number_non_literal == 0 && res.number_other == 0 && warn_format_extra_args) warning ("unused arguments in $-style format"); if (res.number_empty > 0 && res.number_non_literal == 0 && res.number_other == 0 && warn_format_zero_length) warning ("zero-length %s format string", format_types[info->format_type].name); if (res.number_wide > 0) warning ("format is a wide character string"); if (res.number_unterminated > 0) warning ("unterminated format string"); } /* Callback from check_function_arguments_recurse to check a format string. FORMAT_TREE is the format parameter. ARG_NUM is the number of the format argument. CTX points to a format_check_context. */ static void check_format_arg (void *ctx, tree format_tree, unsigned HOST_WIDE_INT arg_num) { format_check_context *format_ctx = ctx; format_check_results *res = format_ctx->res; function_format_info *info = format_ctx->info; tree params = format_ctx->params; int format_length; HOST_WIDE_INT offset; const char *format_chars; tree array_size = 0; tree array_init; if (integer_zerop (format_tree)) { /* Skip to first argument to check, so we can see if this format has any arguments (it shouldn't). */ while (arg_num + 1 < info->first_arg_num) { if (params == 0) return; params = TREE_CHAIN (params); ++arg_num; } if (params == 0) res->number_other++; else res->number_extra_args++; return; } offset = 0; if (TREE_CODE (format_tree) == PLUS_EXPR) { tree arg0, arg1; arg0 = TREE_OPERAND (format_tree, 0); arg1 = TREE_OPERAND (format_tree, 1); STRIP_NOPS (arg0); STRIP_NOPS (arg1); if (TREE_CODE (arg1) == INTEGER_CST) format_tree = arg0; else if (TREE_CODE (arg0) == INTEGER_CST) { format_tree = arg1; arg1 = arg0; } else { res->number_non_literal++; return; } if (!host_integerp (arg1, 0) || (offset = tree_low_cst (arg1, 0)) < 0) { res->number_non_literal++; return; } } if (TREE_CODE (format_tree) != ADDR_EXPR) { res->number_non_literal++; return; } format_tree = TREE_OPERAND (format_tree, 0); if (TREE_CODE (format_tree) == VAR_DECL && TREE_CODE (TREE_TYPE (format_tree)) == ARRAY_TYPE && (array_init = decl_constant_value (format_tree)) != format_tree && TREE_CODE (array_init) == STRING_CST) { /* Extract the string constant initializer. Note that this may include a trailing NUL character that is not in the array (e.g. const char a[3] = "foo";). */ array_size = DECL_SIZE_UNIT (format_tree); format_tree = array_init; } if (TREE_CODE (format_tree) != STRING_CST) { res->number_non_literal++; return; } if (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (format_tree))) != char_type_node) { res->number_wide++; return; } format_chars = TREE_STRING_POINTER (format_tree); format_length = TREE_STRING_LENGTH (format_tree); if (array_size != 0) { /* Variable length arrays can't be initialized. */ if (TREE_CODE (array_size) != INTEGER_CST) abort (); if (host_integerp (array_size, 0)) { HOST_WIDE_INT array_size_value = TREE_INT_CST_LOW (array_size); if (array_size_value > 0 && array_size_value == (int) array_size_value && format_length > array_size_value) format_length = array_size_value; } } if (offset) { if (offset >= format_length) { res->number_non_literal++; return; } format_chars += offset; format_length -= offset; } if (format_length < 1) { res->number_unterminated++; return; } if (format_length == 1) { res->number_empty++; return; } if (format_chars[--format_length] != 0) { res->number_unterminated++; return; } /* Skip to first argument to check. */ while (arg_num + 1 < info->first_arg_num) { if (params == 0) return; params = TREE_CHAIN (params); ++arg_num; } /* Provisionally increment res->number_other; check_format_info_main will decrement it if it finds there are extra arguments, but this way need not adjust it for every return. */ res->number_other++; check_format_info_main (res, info, format_chars, format_length, params, arg_num); } /* Do the main part of checking a call to a format function. FORMAT_CHARS is the NUL-terminated format string (which at this point may contain internal NUL characters); FORMAT_LENGTH is its length (excluding the terminating NUL character). ARG_NUM is one less than the number of the first format argument to check; PARAMS points to that format argument in the list of arguments. */ static void check_format_info_main (format_check_results *res, function_format_info *info, const char *format_chars, int format_length, tree params, unsigned HOST_WIDE_INT arg_num) { const char *orig_format_chars = format_chars; tree first_fillin_param = params; const format_kind_info *fki = &format_types[info->format_type]; const format_flag_spec *flag_specs = fki->flag_specs; const format_flag_pair *bad_flag_pairs = fki->bad_flag_pairs; /* -1 if no conversions taking an operand have been found; 0 if one has and it didn't use $; 1 if $ formats are in use. */ int has_operand_number = -1; init_dollar_format_checking (info->first_arg_num, first_fillin_param); while (1) { int i; int suppressed = FALSE; const char *length_chars = NULL; enum format_lengths length_chars_val = FMT_LEN_none; enum format_std_version length_chars_std = STD_C89; int format_char; tree cur_param; tree wanted_type; int main_arg_num = 0; tree main_arg_params = 0; enum format_std_version wanted_type_std; const char *wanted_type_name; format_wanted_type width_wanted_type; format_wanted_type precision_wanted_type; format_wanted_type main_wanted_type; format_wanted_type *first_wanted_type = NULL; format_wanted_type *last_wanted_type = NULL; const format_length_info *fli = NULL; const format_char_info *fci = NULL; char flag_chars[256]; int aflag = 0; const char *format_start = format_chars; if (*format_chars == 0) { if (format_chars - orig_format_chars != format_length) warning ("embedded %<\\0%> in format"); if (info->first_arg_num != 0 && params != 0 && has_operand_number <= 0) { res->number_other--; res->number_extra_args++; } if (has_operand_number > 0) finish_dollar_format_checking (res, fki->flags & (int) FMT_FLAG_DOLLAR_GAP_POINTER_OK); return; } if (*format_chars++ != '%') continue; if (*format_chars == 0) { warning ("spurious trailing %<%%%> in format"); continue; } if (*format_chars == '%') { ++format_chars; continue; } flag_chars[0] = 0; if ((fki->flags & (int) FMT_FLAG_USE_DOLLAR) && has_operand_number != 0) { /* Possibly read a $ operand number at the start of the format. If one was previously used, one is required here. If one is not used here, we can't immediately conclude this is a format without them, since it could be printf %m or scanf %*. */ int opnum; opnum = maybe_read_dollar_number (&format_chars, 0, first_fillin_param, &main_arg_params, fki); if (opnum == -1) return; else if (opnum > 0) { has_operand_number = 1; main_arg_num = opnum + info->first_arg_num - 1; } } else if (fki->flags & FMT_FLAG_USE_DOLLAR) { if (avoid_dollar_number (format_chars)) return; } /* Read any format flags, but do not yet validate them beyond removing duplicates, since in general validation depends on the rest of the format. */ while (*format_chars != 0 && strchr (fki->flag_chars, *format_chars) != 0) { const format_flag_spec *s = get_flag_spec (flag_specs, *format_chars, NULL); if (strchr (flag_chars, *format_chars) != 0) { warning ("repeated %s in format", _(s->name)); } else { i = strlen (flag_chars); flag_chars[i++] = *format_chars; flag_chars[i] = 0; } if (s->skip_next_char) { ++format_chars; if (*format_chars == 0) { warning ("missing fill character at end of strfmon format"); return; } } ++format_chars; } /* Read any format width, possibly * or *m$. */ if (fki->width_char != 0) { if (fki->width_type != NULL && *format_chars == '*') { i = strlen (flag_chars); flag_chars[i++] = fki->width_char; flag_chars[i] = 0; /* "...a field width...may be indicated by an asterisk. In this case, an int argument supplies the field width..." */ ++format_chars; if (has_operand_number != 0) { int opnum; opnum = maybe_read_dollar_number (&format_chars, has_operand_number == 1, first_fillin_param, ¶ms, fki); if (opnum == -1) return; else if (opnum > 0) { has_operand_number = 1; arg_num = opnum + info->first_arg_num - 1; } else has_operand_number = 0; } else { if (avoid_dollar_number (format_chars)) return; } if (info->first_arg_num != 0) { if (params == 0) { warning ("too few arguments for format"); return; } cur_param = TREE_VALUE (params); if (has_operand_number <= 0) { params = TREE_CHAIN (params); ++arg_num; } width_wanted_type.wanted_type = *fki->width_type; width_wanted_type.wanted_type_name = NULL; width_wanted_type.pointer_count = 0; width_wanted_type.char_lenient_flag = 0; width_wanted_type.writing_in_flag = 0; width_wanted_type.reading_from_flag = 0; width_wanted_type.name = _("field width"); width_wanted_type.param = cur_param; width_wanted_type.arg_num = arg_num; width_wanted_type.next = NULL; if (last_wanted_type != 0) last_wanted_type->next = &width_wanted_type; if (first_wanted_type == 0) first_wanted_type = &width_wanted_type; last_wanted_type = &width_wanted_type; } } else { /* Possibly read a numeric width. If the width is zero, we complain if appropriate. */ int non_zero_width_char = FALSE; int found_width = FALSE; while (ISDIGIT (*format_chars)) { found_width = TRUE; if (*format_chars != '0') non_zero_width_char = TRUE; ++format_chars; } if (found_width && !non_zero_width_char && (fki->flags & (int) FMT_FLAG_ZERO_WIDTH_BAD)) warning ("zero width in %s format", fki->name); if (found_width) { i = strlen (flag_chars); flag_chars[i++] = fki->width_char; flag_chars[i] = 0; } } } /* Read any format left precision (must be a number, not *). */ if (fki->left_precision_char != 0 && *format_chars == '#') { ++format_chars; i = strlen (flag_chars); flag_chars[i++] = fki->left_precision_char; flag_chars[i] = 0; if (!ISDIGIT (*format_chars)) warning ("empty left precision in %s format", fki->name); while (ISDIGIT (*format_chars)) ++format_chars; } /* Read any format precision, possibly * or *m$. */ if (fki->precision_char != 0 && *format_chars == '.') { ++format_chars; i = strlen (flag_chars); flag_chars[i++] = fki->precision_char; flag_chars[i] = 0; if (fki->precision_type != NULL && *format_chars == '*') { /* "...a...precision...may be indicated by an asterisk. In this case, an int argument supplies the...precision." */ ++format_chars; if (has_operand_number != 0) { int opnum; opnum = maybe_read_dollar_number (&format_chars, has_operand_number == 1, first_fillin_param, ¶ms, fki); if (opnum == -1) return; else if (opnum > 0) { has_operand_number = 1; arg_num = opnum + info->first_arg_num - 1; } else has_operand_number = 0; } else { if (avoid_dollar_number (format_chars)) return; } if (info->first_arg_num != 0) { if (params == 0) { warning ("too few arguments for format"); return; } cur_param = TREE_VALUE (params); if (has_operand_number <= 0) { params = TREE_CHAIN (params); ++arg_num; } precision_wanted_type.wanted_type = *fki->precision_type; precision_wanted_type.wanted_type_name = NULL; precision_wanted_type.pointer_count = 0; precision_wanted_type.char_lenient_flag = 0; precision_wanted_type.writing_in_flag = 0; precision_wanted_type.reading_from_flag = 0; precision_wanted_type.name = _("field precision"); precision_wanted_type.param = cur_param; precision_wanted_type.arg_num = arg_num; precision_wanted_type.next = NULL; if (last_wanted_type != 0) last_wanted_type->next = &precision_wanted_type; if (first_wanted_type == 0) first_wanted_type = &precision_wanted_type; last_wanted_type = &precision_wanted_type; } } else { if (!(fki->flags & (int) FMT_FLAG_EMPTY_PREC_OK) && !ISDIGIT (*format_chars)) warning ("empty precision in %s format", fki->name); while (ISDIGIT (*format_chars)) ++format_chars; } } /* Read any length modifier, if this kind of format has them. */ fli = fki->length_char_specs; length_chars = NULL; length_chars_val = FMT_LEN_none; length_chars_std = STD_C89; if (fli) { while (fli->name != 0 && fli->name[0] != *format_chars) fli++; if (fli->name != 0) { format_chars++; if (fli->double_name != 0 && fli->name[0] == *format_chars) { format_chars++; length_chars = fli->double_name; length_chars_val = fli->double_index; length_chars_std = fli->double_std; } else { length_chars = fli->name; length_chars_val = fli->index; length_chars_std = fli->std; } i = strlen (flag_chars); flag_chars[i++] = fki->length_code_char; flag_chars[i] = 0; } if (pedantic) { /* Warn if the length modifier is non-standard. */ if (ADJ_STD (length_chars_std) > C_STD_VER) warning ("%s does not support the %qs %s length modifier", C_STD_NAME (length_chars_std), length_chars, fki->name); } } /* Read any modifier (strftime E/O). */ if (fki->modifier_chars != NULL) { while (*format_chars != 0 && strchr (fki->modifier_chars, *format_chars) != 0) { if (strchr (flag_chars, *format_chars) != 0) { const format_flag_spec *s = get_flag_spec (flag_specs, *format_chars, NULL); warning ("repeated %s in format", _(s->name)); } else { i = strlen (flag_chars); flag_chars[i++] = *format_chars; flag_chars[i] = 0; } ++format_chars; } } /* Handle the scanf allocation kludge. */ if (fki->flags & (int) FMT_FLAG_SCANF_A_KLUDGE) { if (*format_chars == 'a' && !flag_isoc99) { if (format_chars[1] == 's' || format_chars[1] == 'S' || format_chars[1] == '[') { /* `a' is used as a flag. */ i = strlen (flag_chars); flag_chars[i++] = 'a'; flag_chars[i] = 0; format_chars++; } } } format_char = *format_chars; if (format_char == 0 || (!(fki->flags & (int) FMT_FLAG_FANCY_PERCENT_OK) && format_char == '%')) { warning ("conversion lacks type at end of format"); continue; } format_chars++; fci = fki->conversion_specs; while (fci->format_chars != 0 && strchr (fci->format_chars, format_char) == 0) ++fci; if (fci->format_chars == 0) { if (ISGRAPH(format_char)) warning ("unknown conversion type character %qc in format", format_char); else warning ("unknown conversion type character 0x%x in format", format_char); continue; } if (pedantic) { if (ADJ_STD (fci->std) > C_STD_VER) warning ("%s does not support the %<%%%c%> %s format", C_STD_NAME (fci->std), format_char, fki->name); } /* Validate the individual flags used, removing any that are invalid. */ { int d = 0; for (i = 0; flag_chars[i] != 0; i++) { const format_flag_spec *s = get_flag_spec (flag_specs, flag_chars[i], NULL); flag_chars[i - d] = flag_chars[i]; if (flag_chars[i] == fki->length_code_char) continue; if (strchr (fci->flag_chars, flag_chars[i]) == 0) { warning ("%s used with %<%%%c%> %s format", _(s->name), format_char, fki->name); d++; continue; } if (pedantic) { const format_flag_spec *t; if (ADJ_STD (s->std) > C_STD_VER) warning ("%s does not support %s", C_STD_NAME (s->std), _(s->long_name)); t = get_flag_spec (flag_specs, flag_chars[i], fci->flags2); if (t != NULL && ADJ_STD (t->std) > ADJ_STD (s->std)) { const char *long_name = (t->long_name != NULL ? t->long_name : s->long_name); if (ADJ_STD (t->std) > C_STD_VER) warning ("%s does not support %s with the %<%%%c%> %s format", C_STD_NAME (t->std), _(long_name), format_char, fki->name); } } } flag_chars[i - d] = 0; } if ((fki->flags & (int) FMT_FLAG_SCANF_A_KLUDGE) && strchr (flag_chars, 'a') != 0) aflag = 1; if (fki->suppression_char && strchr (flag_chars, fki->suppression_char) != 0) suppressed = 1; /* Validate the pairs of flags used. */ for (i = 0; bad_flag_pairs[i].flag_char1 != 0; i++) { const format_flag_spec *s, *t; if (strchr (flag_chars, bad_flag_pairs[i].flag_char1) == 0) continue; if (strchr (flag_chars, bad_flag_pairs[i].flag_char2) == 0) continue; if (bad_flag_pairs[i].predicate != 0 && strchr (fci->flags2, bad_flag_pairs[i].predicate) == 0) continue; s = get_flag_spec (flag_specs, bad_flag_pairs[i].flag_char1, NULL); t = get_flag_spec (flag_specs, bad_flag_pairs[i].flag_char2, NULL); if (bad_flag_pairs[i].ignored) { if (bad_flag_pairs[i].predicate != 0) warning ("%s ignored with %s and %<%%%c%> %s format", _(s->name), _(t->name), format_char, fki->name); else warning ("%s ignored with %s in %s format", _(s->name), _(t->name), fki->name); } else { if (bad_flag_pairs[i].predicate != 0) warning ("use of %s and %s together with %<%%%c%> %s format", _(s->name), _(t->name), format_char, fki->name); else warning ("use of %s and %s together in %s format", _(s->name), _(t->name), fki->name); } } /* Give Y2K warnings. */ if (warn_format_y2k) { int y2k_level = 0; if (strchr (fci->flags2, '4') != 0) if (strchr (flag_chars, 'E') != 0) y2k_level = 3; else y2k_level = 2; else if (strchr (fci->flags2, '3') != 0) y2k_level = 3; else if (strchr (fci->flags2, '2') != 0) y2k_level = 2; if (y2k_level == 3) warning ("%<%%%c%> yields only last 2 digits of year in some locales", format_char); else if (y2k_level == 2) warning ("%<%%%c%> yields only last 2 digits of year", format_char); } if (strchr (fci->flags2, '[') != 0) { /* Skip over scan set, in case it happens to have '%' in it. */ if (*format_chars == '^') ++format_chars; /* Find closing bracket; if one is hit immediately, then it's part of the scan set rather than a terminator. */ if (*format_chars == ']') ++format_chars; while (*format_chars && *format_chars != ']') ++format_chars; if (*format_chars != ']') /* The end of the format string was reached. */ warning ("no closing %<]%> for %<%%[%> format"); } wanted_type = 0; wanted_type_name = 0; if (fki->flags & (int) FMT_FLAG_ARG_CONVERT) { wanted_type = (fci->types[length_chars_val].type ? *fci->types[length_chars_val].type : 0); wanted_type_name = fci->types[length_chars_val].name; wanted_type_std = fci->types[length_chars_val].std; if (wanted_type == 0) { warning ("use of %qs length modifier with %qc type character", length_chars, format_char); /* Heuristic: skip one argument when an invalid length/type combination is encountered. */ arg_num++; if (params == 0) { warning ("too few arguments for format"); return; } params = TREE_CHAIN (params); continue; } else if (pedantic /* Warn if non-standard, provided it is more non-standard than the length and type characters that may already have been warned for. */ && ADJ_STD (wanted_type_std) > ADJ_STD (length_chars_std) && ADJ_STD (wanted_type_std) > ADJ_STD (fci->std)) { if (ADJ_STD (wanted_type_std) > C_STD_VER) warning ("%s does not support the %<%%%s%c%> %s format", C_STD_NAME (wanted_type_std), length_chars, format_char, fki->name); } } /* Finally. . .check type of argument against desired type! */ if (info->first_arg_num == 0) continue; if ((fci->pointer_count == 0 && wanted_type == void_type_node) || suppressed) { if (main_arg_num != 0) { if (suppressed) warning ("operand number specified with suppressed assignment"); else warning ("operand number specified for format taking no argument"); } } else { if (main_arg_num != 0) { arg_num = main_arg_num; params = main_arg_params; } else { ++arg_num; if (has_operand_number > 0) { warning ("missing $ operand number in format"); return; } else has_operand_number = 0; if (params == 0) { warning ("too few arguments for format"); return; } } cur_param = TREE_VALUE (params); params = TREE_CHAIN (params); main_wanted_type.wanted_type = wanted_type; main_wanted_type.wanted_type_name = wanted_type_name; main_wanted_type.pointer_count = fci->pointer_count + aflag; main_wanted_type.char_lenient_flag = 0; if (strchr (fci->flags2, 'c') != 0) main_wanted_type.char_lenient_flag = 1; main_wanted_type.writing_in_flag = 0; main_wanted_type.reading_from_flag = 0; if (aflag) main_wanted_type.writing_in_flag = 1; else { if (strchr (fci->flags2, 'W') != 0) main_wanted_type.writing_in_flag = 1; if (strchr (fci->flags2, 'R') != 0) main_wanted_type.reading_from_flag = 1; } main_wanted_type.name = NULL; main_wanted_type.param = cur_param; main_wanted_type.arg_num = arg_num; main_wanted_type.next = NULL; if (last_wanted_type != 0) last_wanted_type->next = &main_wanted_type; if (first_wanted_type == 0) first_wanted_type = &main_wanted_type; last_wanted_type = &main_wanted_type; } if (first_wanted_type != 0) check_format_types (first_wanted_type, format_start, format_chars - format_start); } } /* Check the argument types from a single format conversion (possibly including width and precision arguments). */ static void check_format_types (format_wanted_type *types, const char *format_start, int format_length) { for (; types != 0; types = types->next) { tree cur_param; tree cur_type; tree orig_cur_type; tree wanted_type; int arg_num; int i; int char_type_flag; cur_param = types->param; cur_type = TREE_TYPE (cur_param); if (cur_type == error_mark_node) continue; orig_cur_type = cur_type; char_type_flag = 0; wanted_type = types->wanted_type; arg_num = types->arg_num; /* The following should not occur here. */ if (wanted_type == 0) abort (); if (wanted_type == void_type_node && types->pointer_count == 0) abort (); if (types->pointer_count == 0) wanted_type = lang_hooks.types.type_promotes_to (wanted_type); wanted_type = TYPE_MAIN_VARIANT (wanted_type); STRIP_NOPS (cur_param); /* Check the types of any additional pointer arguments that precede the "real" argument. */ for (i = 0; i < types->pointer_count; ++i) { if (TREE_CODE (cur_type) == POINTER_TYPE) { cur_type = TREE_TYPE (cur_type); if (cur_type == error_mark_node) break; /* Check for writing through a NULL pointer. */ if (types->writing_in_flag && i == 0 && cur_param != 0 && integer_zerop (cur_param)) warning ("writing through null pointer (arg %d)", arg_num); /* Check for reading through a NULL pointer. */ if (types->reading_from_flag && i == 0 && cur_param != 0 && integer_zerop (cur_param)) warning ("reading through null pointer (arg %d)", arg_num); if (cur_param != 0 && TREE_CODE (cur_param) == ADDR_EXPR) cur_param = TREE_OPERAND (cur_param, 0); else cur_param = 0; /* See if this is an attempt to write into a const type with scanf or with printf "%n". Note: the writing in happens at the first indirection only, if for example void * const * is passed to scanf %p; passing const void ** is simply passing an incompatible type. */ if (types->writing_in_flag && i == 0 && (TYPE_READONLY (cur_type) || (cur_param != 0 && (TREE_CODE_CLASS (TREE_CODE (cur_param)) == 'c' || (DECL_P (cur_param) && TREE_READONLY (cur_param)))))) warning ("writing into constant object (arg %d)", arg_num); /* If there are extra type qualifiers beyond the first indirection, then this makes the types technically incompatible. */ if (i > 0 && pedantic && (TYPE_READONLY (cur_type) || TYPE_VOLATILE (cur_type) || TYPE_RESTRICT (cur_type))) warning ("extra type qualifiers in format argument (arg %d)", arg_num); } else { format_type_warning (types->name, format_start, format_length, wanted_type, types->pointer_count, types->wanted_type_name, orig_cur_type, arg_num); break; } } if (i < types->pointer_count) continue; cur_type = TYPE_MAIN_VARIANT (cur_type); /* Check whether the argument type is a character type. This leniency only applies to certain formats, flagged with 'c'. */ if (types->char_lenient_flag) char_type_flag = (cur_type == char_type_node || cur_type == signed_char_type_node || cur_type == unsigned_char_type_node); /* Check the type of the "real" argument, if there's a type we want. */ if (wanted_type == cur_type) continue; /* If we want `void *', allow any pointer type. (Anything else would already have got a warning.) With -pedantic, only allow pointers to void and to character types. */ if (wanted_type == void_type_node && (!pedantic || (i == 1 && char_type_flag))) continue; /* Don't warn about differences merely in signedness, unless -pedantic. With -pedantic, warn if the type is a pointer target and not a character type, and for character types at a second level of indirection. */ if (TREE_CODE (wanted_type) == INTEGER_TYPE && TREE_CODE (cur_type) == INTEGER_TYPE && (! pedantic || i == 0 || (i == 1 && char_type_flag)) && (TYPE_UNSIGNED (wanted_type) ? wanted_type == c_common_unsigned_type (cur_type) : wanted_type == c_common_signed_type (cur_type))) continue; /* Likewise, "signed char", "unsigned char" and "char" are equivalent but the above test won't consider them equivalent. */ if (wanted_type == char_type_node && (! pedantic || i < 2) && char_type_flag) continue; /* Now we have a type mismatch. */ format_type_warning (types->name, format_start, format_length, wanted_type, types->pointer_count, types->wanted_type_name, orig_cur_type, arg_num); } } /* Give a warning about a format argument of different type from that expected. DESCR is a description such as "field precision", or NULL for an ordinary format. For an ordinary format, FORMAT_START points to where the format starts in the format string and FORMAT_LENGTH is its length. WANTED_TYPE is the type the argument should have after POINTER_COUNT pointer dereferences. WANTED_NAME_NAME is a possibly more friendly name of WANTED_TYPE, or NULL if the ordinary name of the type should be used. ARG_TYPE is the type of the actual argument. ARG_NUM is the number of that argument. */ static void format_type_warning (const char *descr, const char *format_start, int format_length, tree wanted_type, int pointer_count, const char *wanted_type_name, tree arg_type, int arg_num) { char *p; /* If ARG_TYPE is a typedef with a misleading name (for example, size_t but not the standard size_t expected by printf %zu), avoid printing the typedef name. */ if (wanted_type_name && TYPE_NAME (arg_type) && TREE_CODE (TYPE_NAME (arg_type)) == TYPE_DECL && DECL_NAME (TYPE_NAME (arg_type)) && !strcmp (wanted_type_name, lang_hooks.decl_printable_name (TYPE_NAME (arg_type), 2))) arg_type = TYPE_MAIN_VARIANT (arg_type); /* The format type and name exclude any '*' for pointers, so those must be formatted manually. For all the types we currently have, this is adequate, but formats taking pointers to functions or arrays would require the full type to be built up in order to print it with %T. */ p = alloca (pointer_count + 2); if (pointer_count == 0) p[0] = 0; else if (c_dialect_cxx ()) { memset (p, '*', pointer_count); p[pointer_count] = 0; } else { p[0] = ' '; memset (p + 1, '*', pointer_count); p[pointer_count + 1] = 0; } if (wanted_type_name) { if (descr) warning ("%s should have type %<%s%s%>, but argument %d has type %qT", descr, wanted_type_name, p, arg_num, arg_type); else warning ("format %q.*s expects type %<%s%s%>, but argument %d has type %qT", format_length, format_start, wanted_type_name, p, arg_num, arg_type); } else { if (descr) warning ("%s should have type %<%T%s%>, but argument %d has type %qT", descr, wanted_type, p, arg_num, arg_type); else warning ("format %q.*s expects type %<%T%s%>, but argument %d has type %qT", format_length, format_start, wanted_type, p, arg_num, arg_type); } } /* Given a format_char_info array FCI, and a character C, this function returns the index into the conversion_specs where that specifier's data is located. If the character isn't found it aborts. */ static unsigned int find_char_info_specifier_index (const format_char_info *fci, int c) { unsigned int i = 0; while (fci->format_chars) { if (strchr (fci->format_chars, c)) return i; i++; fci++; } /* We shouldn't be looking for a non-existent specifier. */ abort (); } /* Given a format_length_info array FLI, and a character C, this function returns the index into the conversion_specs where that modifier's data is located. If the character isn't found it aborts. */ static unsigned int find_length_info_modifier_index (const format_length_info *fli, int c) { unsigned int i = 0; while (fli->name) { if (strchr (fli->name, c)) return i; i++; fli++; } /* We shouldn't be looking for a non-existent modifier. */ abort (); } /* Determine the type of HOST_WIDE_INT in the code being compiled for use in GCC's __asm_fprintf__ custom format attribute. You must have set dynamic_format_types before calling this function. */ static void init_dynamic_asm_fprintf_info (void) { static tree hwi; if (!hwi) { format_length_info *new_asm_fprintf_length_specs; unsigned int i; /* Find the underlying type for HOST_WIDE_INT. For the %w length modifier to work, one must have issued: "typedef HOST_WIDE_INT __gcc_host_wide_int__;" in one's source code prior to using that modifier. */ if (!(hwi = maybe_get_identifier ("__gcc_host_wide_int__")) || !(hwi = DECL_ORIGINAL_TYPE (identifier_global_value (hwi)))) abort (); /* Create a new (writable) copy of asm_fprintf_length_specs. */ new_asm_fprintf_length_specs = xmemdup (asm_fprintf_length_specs, sizeof (asm_fprintf_length_specs), sizeof (asm_fprintf_length_specs)); /* HOST_WIDE_INT must be one of 'long' or 'long long'. */ i = find_length_info_modifier_index (new_asm_fprintf_length_specs, 'w'); if (hwi == long_integer_type_node) new_asm_fprintf_length_specs[i].index = FMT_LEN_l; else if (hwi == long_long_integer_type_node) new_asm_fprintf_length_specs[i].index = FMT_LEN_ll; else abort (); /* Assign the new data for use. */ dynamic_format_types[asm_fprintf_format_type].length_char_specs = new_asm_fprintf_length_specs; } } /* Determine the types of "tree" and "location_t" in the code being compiled for use in GCC's diagnostic custom format attributes. You must have set dynamic_format_types before calling this function. */ static void init_dynamic_diag_info (void) { static tree t, loc, hwi; if (!loc || !t || !hwi) { static format_char_info *diag_fci, *cdiag_fci, *cxxdiag_fci; static format_length_info *diag_ls; unsigned int i; /* For the GCC-diagnostics custom format specifiers to work, one must have declared `tree' and/or `location_t' prior to using those attributes. If we haven't seen these declarations then you shouldn't use the specifiers requiring these types. However we don't force a hard ICE because we may see only one or the other type. */ if ((loc = maybe_get_identifier ("location_t"))) loc = TREE_TYPE (identifier_global_value (loc)); /* We need to grab the underlying `union tree_node' so peek into an extra type level. */ if ((t = maybe_get_identifier ("tree"))) t = TREE_TYPE (TREE_TYPE (identifier_global_value (t))); /* Find the underlying type for HOST_WIDE_INT. For the %w length modifier to work, one must have issued: "typedef HOST_WIDE_INT __gcc_host_wide_int__;" in one's source code prior to using that modifier. */ if ((hwi = maybe_get_identifier ("__gcc_host_wide_int__"))) hwi = DECL_ORIGINAL_TYPE (identifier_global_value (hwi)); /* Assign the new data for use. */ /* All the GCC diag formats use the same length specs. */ if (! diag_ls) dynamic_format_types[gcc_diag_format_type].length_char_specs = dynamic_format_types[gcc_cdiag_format_type].length_char_specs = dynamic_format_types[gcc_cxxdiag_format_type].length_char_specs = diag_ls = xmemdup (gcc_diag_length_specs, sizeof (gcc_diag_length_specs), sizeof (gcc_diag_length_specs)); if (hwi) { /* HOST_WIDE_INT must be one of 'long' or 'long long'. */ i = find_length_info_modifier_index (diag_ls, 'w'); if (hwi == long_integer_type_node) diag_ls[i].index = FMT_LEN_l; else if (hwi == long_long_integer_type_node) diag_ls[i].index = FMT_LEN_ll; else abort (); } /* Handle the __gcc_diag__ format specifics. */ if (! diag_fci) dynamic_format_types[gcc_diag_format_type].conversion_specs = diag_fci = xmemdup (gcc_diag_char_table, sizeof(gcc_diag_char_table), sizeof(gcc_diag_char_table)); if (loc) { i = find_char_info_specifier_index (diag_fci, 'H'); diag_fci[i].types[0].type = &loc; diag_fci[i].pointer_count = 1; } if (t) { i = find_char_info_specifier_index (diag_fci, 'J'); diag_fci[i].types[0].type = &t; diag_fci[i].pointer_count = 1; } /* Handle the __gcc_cdiag__ format specifics. */ if (! cdiag_fci) dynamic_format_types[gcc_cdiag_format_type].conversion_specs = cdiag_fci = xmemdup (gcc_cdiag_char_table, sizeof(gcc_cdiag_char_table), sizeof(gcc_cdiag_char_table)); if (loc) { i = find_char_info_specifier_index (cdiag_fci, 'H'); cdiag_fci[i].types[0].type = &loc; cdiag_fci[i].pointer_count = 1; } if (t) { /* All specifiers taking a tree share the same struct. */ i = find_char_info_specifier_index (cdiag_fci, 'D'); cdiag_fci[i].types[0].type = &t; cdiag_fci[i].pointer_count = 1; i = find_char_info_specifier_index (cdiag_fci, 'J'); cdiag_fci[i].types[0].type = &t; cdiag_fci[i].pointer_count = 1; } /* Handle the __gcc_cxxdiag__ format specifics. */ if (! cxxdiag_fci) dynamic_format_types[gcc_cxxdiag_format_type].conversion_specs = cxxdiag_fci = xmemdup (gcc_cxxdiag_char_table, sizeof(gcc_cxxdiag_char_table), sizeof(gcc_cxxdiag_char_table)); if (loc) { i = find_char_info_specifier_index (cxxdiag_fci, 'H'); cxxdiag_fci[i].types[0].type = &loc; cxxdiag_fci[i].pointer_count = 1; } if (t) { /* All specifiers taking a tree share the same struct. */ i = find_char_info_specifier_index (cxxdiag_fci, 'D'); cxxdiag_fci[i].types[0].type = &t; cxxdiag_fci[i].pointer_count = 1; i = find_char_info_specifier_index (cxxdiag_fci, 'J'); cxxdiag_fci[i].types[0].type = &t; cxxdiag_fci[i].pointer_count = 1; } } } /* Handle a "format" attribute; arguments as in struct attribute_spec.handler. */ tree handle_format_attribute (tree *node, tree name ATTRIBUTE_UNUSED, tree args, int flags, bool *no_add_attrs) { tree type = *node; function_format_info info; tree argument; if (!decode_format_attr (args, &info, 0)) { *no_add_attrs = true; return NULL_TREE; } argument = TYPE_ARG_TYPES (type); if (argument) { if (!check_format_string (argument, info.format_num, flags, no_add_attrs)) return NULL_TREE; if (info.first_arg_num != 0) { unsigned HOST_WIDE_INT arg_num = 1; /* Verify that first_arg_num points to the last arg, the ... */ while (argument) arg_num++, argument = TREE_CHAIN (argument); if (arg_num != info.first_arg_num) { if (!(flags & (int) ATTR_FLAG_BUILT_IN)) error ("args to be formatted is not '...'"); *no_add_attrs = true; return NULL_TREE; } } } if (info.format_type == strftime_format_type && info.first_arg_num != 0) { error ("strftime formats cannot format arguments"); *no_add_attrs = true; return NULL_TREE; } /* If this is a custom GCC-internal format type, we have to initialize certain bits a runtime. */ if (info.format_type == asm_fprintf_format_type || info.format_type == gcc_diag_format_type || info.format_type == gcc_cdiag_format_type || info.format_type == gcc_cxxdiag_format_type) { /* Our first time through, we have to make sure that our format_type data is allocated dynamically and is modifiable. */ if (!dynamic_format_types) format_types = dynamic_format_types = xmemdup (format_types_orig, sizeof (format_types_orig), sizeof (format_types_orig)); /* If this is format __asm_fprintf__, we have to initialize GCC's notion of HOST_WIDE_INT for checking %wd. */ if (info.format_type == asm_fprintf_format_type) init_dynamic_asm_fprintf_info(); /* If this is one of the diagnostic attributes, then we have to initialize `location_t' and `tree' at runtime. */ else if (info.format_type == gcc_diag_format_type || info.format_type == gcc_cdiag_format_type || info.format_type == gcc_cxxdiag_format_type) init_dynamic_diag_info(); else abort(); } return NULL_TREE; } /* This file contains the definitions and documentation for the common tree codes used in the GNU C and C++ compilers (see c-common.def for the standard codes). Copyright (C) 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Written by Benjamin Chelf (chelf@codesourcery.com). This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* In order for the format checking to accept the C frontend diagnostic framework extensions, you must define this token before including toplev.h. */ #ifndef GCC_DIAG_STYLE #define GCC_DIAG_STYLE __gcc_cdiag__ #endif /* Create an empty statement tree rooted at T. */ tree push_stmt_list (void) { tree t; t = alloc_stmt_list (); TREE_CHAIN (t) = cur_stmt_list; cur_stmt_list = t; return t; } /* Similarly, except that T may have already been pushed/popped, and thus may already contain statement(s). Arrage for new statements to be appended. */ tree re_push_stmt_list (tree t) { if (t) { if (TREE_CODE (t) != STATEMENT_LIST) { tree u = alloc_stmt_list (); append_to_statement_list_force (t, &u); t = u; } } else t = alloc_stmt_list (); TREE_CHAIN (t) = cur_stmt_list; cur_stmt_list = t; return t; } /* Finish the statement tree rooted at T. */ tree pop_stmt_list (tree t) { tree u = cur_stmt_list, chain; /* Pop statement lists until we reach the target level. The extra nestings will be due to outstanding cleanups. */ while (1) { chain = TREE_CHAIN (u); TREE_CHAIN (u) = NULL_TREE; if (t == u) break; u = chain; } cur_stmt_list = chain; /* If the statement list is completely empty, just return it. This is just as good small as build_empty_stmt, with the advantage that statement lists are merged when they appended to one another. So using the STATEMENT_LIST avoids pathological buildup of EMPTY_STMT_P statements. */ if (TREE_SIDE_EFFECTS (t)) { tree_stmt_iterator i = tsi_start (t); /* If the statement list contained exactly one statement, then extract it immediately. */ if (tsi_one_before_end_p (i)) { u = tsi_stmt (i); tsi_delink (&i); free_stmt_list (t); t = u; } } return t; } /* T is a statement. Add it to the statement-tree. */ tree add_stmt (tree t) { enum tree_code code = TREE_CODE (t); if ((EXPR_P (t) || STATEMENT_CODE_P (code)) && code != LABEL_EXPR) { if (!EXPR_HAS_LOCATION (t)) SET_EXPR_LOCATION (t, input_location); /* When we expand a statement-tree, we must know whether or not the statements are full-expressions. We record that fact here. */ STMT_IS_FULL_EXPR_P (t) = stmts_are_full_exprs_p (); } /* Add T to the statement-tree. Non-side-effect statements need to be recorded during statement expressions. */ append_to_statement_list_force (t, &cur_stmt_list); return t; } /* Build a generic statement based on the given type of node and arguments. Similar to `build_nt', except that we set EXPR_LOCATION to be the current source location. */ /* ??? This should be obsolete with the lineno_stmt productions in the grammar. */ tree build_stmt (enum tree_code code, ...) { tree ret; int length, i; va_list p; bool side_effects; va_start (p, code); ret = make_node (code); TREE_TYPE (ret) = void_type_node; length = TREE_CODE_LENGTH (code); SET_EXPR_LOCATION (ret, input_location); /* Most statements have implicit side effects all on their own, such as control transfer. For those that do, we'll compute the real value of TREE_SIDE_EFFECTS from its arguments. */ switch (code) { case EXPR_STMT: side_effects = false; break; default: side_effects = true; break; } for (i = 0; i < length; i++) { tree t = va_arg (p, tree); if (t && IS_NON_TYPE_CODE_CLASS (TREE_CODE_CLASS (TREE_CODE (t)))) side_effects |= TREE_SIDE_EFFECTS (t); TREE_OPERAND (ret, i) = t; } TREE_SIDE_EFFECTS (ret) = side_effects; va_end (p); return ret; } /* Create RTL for the local static variable DECL. */ void make_rtl_for_local_static (tree decl) { const char *asmspec = NULL; /* If we inlined this variable, we could see it's declaration again. */ if (TREE_ASM_WRITTEN (decl)) return; /* If the DECL_ASSEMBLER_NAME is not the same as the DECL_NAME, then either we already created RTL for this DECL (and since it was a local variable, its DECL_ASSEMBLER_NAME got hacked up to prevent clashes with other local statics with the same name by a previous call to make_decl_rtl), or the user explicitly requested a particular assembly name for this variable, using the GNU extension for this purpose: int i asm ("j"); There's no way to know which case we're in, here. But, it turns out we're safe. If there's already RTL, then rest_of_decl_compilation ignores the ASMSPEC parameter, so we may as well not pass it in. If there isn't RTL, then we didn't already create RTL, which means that the modification to DECL_ASSEMBLER_NAME came only via the explicit extension. */ if (DECL_ASSEMBLER_NAME (decl) != DECL_NAME (decl) && !DECL_RTL_SET_P (decl)) asmspec = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)); rest_of_decl_compilation (decl, asmspec, /*top_level=*/0, /*at_end=*/0); } /* Let the back-end know about DECL. */ void emit_local_var (tree decl) { /* Create RTL for this variable. */ if (!DECL_RTL_SET_P (decl)) { if (DECL_HARD_REGISTER (decl)) /* The user specified an assembler name for this variable. Set that up now. */ rest_of_decl_compilation (decl, IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), /*top_level=*/0, /*at_end=*/0); else expand_decl (decl); } } /* Build a break statement node and return it. */ tree build_break_stmt (void) { return (build_stmt (BREAK_STMT)); } /* Build a continue statement node and return it. */ tree build_continue_stmt (void) { return (build_stmt (CONTINUE_STMT)); } /* Create a CASE_LABEL_EXPR tree node and return it. */ tree build_case_label (tree low_value, tree high_value, tree label_decl) { return build_stmt (CASE_LABEL_EXPR, low_value, high_value, label_decl); } /* Set up combined include path chain for the preprocessor. Copyright (C) 1986, 1987, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Broken out of cppinit.c and cppfiles.c and rewritten Mar 2003. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Provide prototypes for functions exported from prefix.c. Copyright (C) 1999, 2003 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef GCC_PREFIX_H #define GCC_PREFIX_H /* Update PATH using KEY if PATH starts with PREFIX. The returned string is always malloc-ed, and the caller is responsible for freeing it. */ extern char *update_path (const char *path, const char *key); extern void set_std_prefix (const char *, int); #endif /* ! GCC_PREFIX_H */ /* Windows does not natively support inodes, and neither does MSDOS. Cygwin's emulation can generate non-unique inodes, so don't use it. VMS has non-numeric inodes. */ #ifdef VMS # define INO_T_EQ(A, B) (!memcmp (&(A), &(B), sizeof (A))) # define INO_T_COPY(DEST, SRC) memcpy(&(DEST), &(SRC), sizeof (SRC)) #else # if (defined _WIN32 && ! defined (_UWIN)) || defined __MSDOS__ # define INO_T_EQ(A, B) 0 # else # define INO_T_EQ(A, B) ((A) == (B)) # endif # define INO_T_COPY(DEST, SRC) (DEST) = (SRC) #endif static void add_env_var_paths (const char *, int); static void add_standard_paths (const char *, const char *, int); static void free_path (struct cpp_dir *, int); static void merge_include_chains (cpp_reader *, int); static struct cpp_dir *remove_duplicates (cpp_reader *, struct cpp_dir *, struct cpp_dir *, struct cpp_dir *, int); /* Include chains heads and tails. */ static struct cpp_dir *heads[4]; static struct cpp_dir *tails[4]; static bool quote_ignores_source_dir; enum { REASON_QUIET = 0, REASON_NOENT, REASON_DUP, REASON_DUP_SYS }; /* Free an element of the include chain, possibly giving a reason. */ static void free_path (struct cpp_dir *path, int reason) { switch (reason) { case REASON_DUP: case REASON_DUP_SYS: fprintf (stderr, _("ignoring duplicate directory \"%s\"\n"), path->name); if (reason == REASON_DUP_SYS) fprintf (stderr, _(" as it is a non-system directory that duplicates a system directory\n")); break; case REASON_NOENT: fprintf (stderr, _("ignoring nonexistent directory \"%s\"\n"), path->name); break; case REASON_QUIET: default: break; } free (path->name); free (path); } /* Read ENV_VAR for a PATH_SEPARATOR-separated list of file names; and append all the names to the search path CHAIN. */ static void add_env_var_paths (const char *env_var, int chain) { char *p, *q, *path; GET_ENVIRONMENT (q, env_var); if (!q) return; for (p = q; *q; p = q + 1) { q = p; while (*q != 0 && *q != PATH_SEPARATOR) q++; if (p == q) path = xstrdup ("."); else { path = xmalloc (q - p + 1); memcpy (path, p, q - p); path[q - p] = '\0'; } add_path (path, chain, chain == SYSTEM, false); } } /* Append the standard include chain defined in cppdefault.c. */ static void add_standard_paths (const char *sysroot, const char *iprefix, int cxx_stdinc) { const struct default_include *p; size_t len; if (iprefix && (len = cpp_GCC_INCLUDE_DIR_len) != 0) { /* Look for directories that start with the standard prefix. "Translate" them, ie. replace /usr/local/lib/gcc... with IPREFIX and search them first. */ for (p = cpp_include_defaults; p->fname; p++) { if (!p->cplusplus || cxx_stdinc) { /* Should we be translating sysrooted dirs too? Assume that iprefix and sysroot are mutually exclusive, for now. */ if (sysroot && p->add_sysroot) continue; if (!strncmp (p->fname, cpp_GCC_INCLUDE_DIR, len)) { char *str = concat (iprefix, p->fname + len, NULL); add_path (str, SYSTEM, p->cxx_aware, false); } } } } for (p = cpp_include_defaults; p->fname; p++) { if (!p->cplusplus || cxx_stdinc) { char *str; /* Should this directory start with the sysroot? */ if (sysroot && p->add_sysroot) str = concat (sysroot, p->fname, NULL); else str = update_path (p->fname, p->component); add_path (str, SYSTEM, p->cxx_aware, false); } } } /* For each duplicate path in chain HEAD, keep just the first one. Remove each path in chain HEAD that also exists in chain SYSTEM. Set the NEXT pointer of the last path in the resulting chain to JOIN, unless it duplicates JOIN in which case the last path is removed. Return the head of the resulting chain. Any of HEAD, JOIN and SYSTEM can be NULL. */ static struct cpp_dir * remove_duplicates (cpp_reader *pfile, struct cpp_dir *head, struct cpp_dir *system, struct cpp_dir *join, int verbose) { struct cpp_dir **pcur, *tmp, *cur; struct stat st; for (pcur = &head; *pcur; ) { int reason = REASON_QUIET; cur = *pcur; if (stat (cur->name, &st)) { /* Dirs that don't exist are silently ignored, unless verbose. */ if (errno != ENOENT) cpp_errno (pfile, CPP_DL_ERROR, cur->name); else { /* If -Wmissing-include-dirs is given, warn. */ cpp_options *opts = cpp_get_options (pfile); if (opts->warn_missing_include_dirs && cur->user_supplied_p) cpp_errno (pfile, CPP_DL_WARNING, cur->name); reason = REASON_NOENT; } } else if (!S_ISDIR (st.st_mode)) cpp_error_with_line (pfile, CPP_DL_ERROR, 0, 0, "%s: not a directory", cur->name); else { INO_T_COPY (cur->ino, st.st_ino); cur->dev = st.st_dev; /* Remove this one if it is in the system chain. */ reason = REASON_DUP_SYS; for (tmp = system; tmp; tmp = tmp->next) if (INO_T_EQ (tmp->ino, cur->ino) && tmp->dev == cur->dev) break; if (!tmp) { /* Duplicate of something earlier in the same chain? */ reason = REASON_DUP; for (tmp = head; tmp != cur; tmp = tmp->next) if (INO_T_EQ (cur->ino, tmp->ino) && cur->dev == tmp->dev) break; if (tmp == cur /* Last in the chain and duplicate of JOIN? */ && !(cur->next == NULL && join && INO_T_EQ (cur->ino, join->ino) && cur->dev == join->dev)) { /* Unique, so keep this directory. */ pcur = &cur->next; continue; } } } /* Remove this entry from the chain. */ *pcur = cur->next; free_path (cur, verbose ? reason: REASON_QUIET); } *pcur = join; return head; } /* Merge the four include chains together in the order quote, bracket, system, after. Remove duplicate dirs (as determined by INO_T_EQ()). We can't just merge the lists and then uniquify them because then we may lose directories from the <> search path that should be there; consider -iquote foo -iquote bar -Ifoo -Iquux. It is however safe to treat -iquote bar -iquote foo -Ifoo -Iquux as if written -iquote bar -Ifoo -Iquux. */ static void merge_include_chains (cpp_reader *pfile, int verbose) { /* Join the SYSTEM and AFTER chains. Remove duplicates in the resulting SYSTEM chain. */ if (heads[SYSTEM]) tails[SYSTEM]->next = heads[AFTER]; else heads[SYSTEM] = heads[AFTER]; heads[SYSTEM] = remove_duplicates (pfile, heads[SYSTEM], 0, 0, verbose); /* Remove duplicates from BRACKET that are in itself or SYSTEM, and join it to SYSTEM. */ heads[BRACKET] = remove_duplicates (pfile, heads[BRACKET], heads[SYSTEM], heads[SYSTEM], verbose); /* Remove duplicates from QUOTE that are in itself or SYSTEM, and join it to BRACKET. */ heads[QUOTE] = remove_duplicates (pfile, heads[QUOTE], heads[SYSTEM], heads[BRACKET], verbose); /* If verbose, print the list of dirs to search. */ if (verbose) { struct cpp_dir *p; fprintf (stderr, _("#include \"...\" search starts here:\n")); for (p = heads[QUOTE];; p = p->next) { if (p == heads[BRACKET]) fprintf (stderr, _("#include <...> search starts here:\n")); if (!p) break; fprintf (stderr, " %s\n", p->name); } fprintf (stderr, _("End of search list.\n")); } } /* Use given -I paths for #include "..." but not #include <...>, and don't search the directory of the present file for #include "...". (Note that -I. -I- is not the same as the default setup; -I. uses the compiler's working dir.) */ void split_quote_chain (void) { heads[QUOTE] = heads[BRACKET]; tails[QUOTE] = tails[BRACKET]; heads[BRACKET] = NULL; tails[BRACKET] = NULL; /* This is NOT redundant. */ quote_ignores_source_dir = true; } /* Add P to the chain specified by CHAIN. */ void add_cpp_dir_path (cpp_dir *p, int chain) { if (tails[chain]) tails[chain]->next = p; else heads[chain] = p; tails[chain] = p; } /* Add PATH to the include chain CHAIN. PATH must be malloc-ed and NUL-terminated. */ void add_path (char *path, int chain, int cxx_aware, bool user_supplied_p) { cpp_dir *p; #if defined (HAVE_DOS_BASED_FILE_SYSTEM) /* Convert all backslashes to slashes. The native CRT stat() function does not recognise a directory that ends in a backslash (unless it is a drive root dir, such "c:\"). Forward slashes, trailing or otherwise, cause no problems for stat(). */ char* c; for (c = path; *c; c++) if (*c == '\\') *c = '/'; #endif p = xmalloc (sizeof (cpp_dir)); p->next = NULL; p->name = path; if (chain == SYSTEM || chain == AFTER) p->sysp = 1 + !cxx_aware; else p->sysp = 0; p->construct = 0; p->user_supplied_p = user_supplied_p; add_cpp_dir_path (p, chain); } /* Exported function to handle include chain merging, duplicate removal, and registration with cpplib. */ void register_include_chains (cpp_reader *pfile, const char *sysroot, const char *iprefix, int stdinc, int cxx_stdinc, int verbose) { static const char *const lang_env_vars[] = { "C_INCLUDE_PATH", "CPLUS_INCLUDE_PATH", "OBJC_INCLUDE_PATH", "OBJCPLUS_INCLUDE_PATH" }; cpp_options *cpp_opts = cpp_get_options (pfile); size_t idx = (cpp_opts->objc ? 2: 0); if (cpp_opts->cplusplus) idx++; else cxx_stdinc = false; /* CPATH and language-dependent environment variables may add to the include chain. */ add_env_var_paths ("CPATH", BRACKET); add_env_var_paths (lang_env_vars[idx], SYSTEM); /* Finally chain on the standard directories. */ if (stdinc) add_standard_paths (sysroot, iprefix, cxx_stdinc); target_c_incpath.extra_includes (stdinc); merge_include_chains (pfile, verbose); cpp_set_include_chains (pfile, heads[QUOTE], heads[BRACKET], quote_ignores_source_dir); } #ifndef TARGET_EXTRA_INCLUDES static void hook_void_int2(int u ATTRIBUTE_UNUSED) { } struct target_c_incpath_s target_c_incpath = { hook_void_int2 }; #endif /* CPP Library. Copyright (C) 1986, 1987, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2003 Free Software Foundation, Inc. Contributed by Per Bothner, 1994-95. Based on CCCP program by Paul Rubin, June 1986 Adapted to ANSI C, Richard Stallman, Jan 1987 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifdef ONE_COMPILATION_UNIT #define GCC_INCLUDE_DIR "/usr/local/lib/gcc/i686-pc-linux-gnu/3.4.0/include" #define GPLUSPLUS_INCLUDE_DIR "/usr/local/lib/gcc/i686-pc-linux-gnu/3.4.0/../../../../include/c++/3.4.0" #define GPLUSPLUS_TOOL_INCLUDE_DIR "/usr/local/lib/gcc/i686-pc-linux-gnu/3.4.0/../../../../include/c++/3.4.0/i686-pc-linux-gnu" #define GPLUSPLUS_BACKWARD_INCLUDE_DIR "/usr/local/lib/gcc/i686-pc-linux-gnu/3.4.0/../../../../include/c++/3.4.0/backward" #define LOCAL_INCLUDE_DIR "/usr/local/include" #define CROSS_INCLUDE_DIR "/usr/local/lib/gcc/i686-pc-linux-gnu/3.4.0/../../../../i686-pc-linux-gnu/sys-include" #define TOOL_INCLUDE_DIR "/usr/local/lib/gcc/i686-pc-linux-gnu/3.4.0/../../../../i686-pc-linux-gnu/include" #endif #ifndef STANDARD_INCLUDE_DIR #define STANDARD_INCLUDE_DIR "/usr/include" #endif #ifndef STANDARD_INCLUDE_COMPONENT #define STANDARD_INCLUDE_COMPONENT 0 #endif #if defined (CROSS_COMPILE) && !defined (TARGET_SYSTEM_ROOT) # undef LOCAL_INCLUDE_DIR # undef SYSTEM_INCLUDE_DIR # undef STANDARD_INCLUDE_DIR #else # undef CROSS_INCLUDE_DIR #endif const struct default_include cpp_include_defaults[] #ifdef INCLUDE_DEFAULTS = INCLUDE_DEFAULTS; #else = { #ifdef GPLUSPLUS_INCLUDE_DIR /* Pick up GNU C++ generic include files. */ { GPLUSPLUS_INCLUDE_DIR, "G++", 1, 1, 0 }, #endif #ifdef GPLUSPLUS_TOOL_INCLUDE_DIR /* Pick up GNU C++ target-dependent include files. */ { GPLUSPLUS_TOOL_INCLUDE_DIR, "G++", 1, 1, 0 }, #endif #ifdef GPLUSPLUS_BACKWARD_INCLUDE_DIR /* Pick up GNU C++ backward and deprecated include files. */ { GPLUSPLUS_BACKWARD_INCLUDE_DIR, "G++", 1, 1, 0 }, #endif #ifdef LOCAL_INCLUDE_DIR /* /usr/local/include comes before the fixincluded header files. */ { LOCAL_INCLUDE_DIR, 0, 0, 1, 1 }, #endif #ifdef PREFIX_INCLUDE_DIR { PREFIX_INCLUDE_DIR, 0, 0, 1, 0 }, #endif #ifdef GCC_INCLUDE_DIR /* This is the dir for fixincludes and for gcc's private headers. */ { GCC_INCLUDE_DIR, "GCC", 0, 0, 0 }, #endif #ifdef CROSS_INCLUDE_DIR /* One place the target system's headers might be. */ { CROSS_INCLUDE_DIR, "GCC", 0, 0, 0 }, #endif #ifdef TOOL_INCLUDE_DIR /* Another place the target system's headers might be. */ { TOOL_INCLUDE_DIR, "BINUTILS", 0, 1, 0 }, #endif #ifdef SYSTEM_INCLUDE_DIR /* Some systems have an extra dir of include files. */ { SYSTEM_INCLUDE_DIR, 0, 0, 0, 1 }, #endif #ifdef STANDARD_INCLUDE_DIR /* /usr/include comes dead last. */ { STANDARD_INCLUDE_DIR, STANDARD_INCLUDE_COMPONENT, 0, 0, 1 }, #endif { 0, 0, 0, 0, 0 } }; #endif /* no INCLUDE_DEFAULTS */ #ifdef GCC_INCLUDE_DIR const char cpp_GCC_INCLUDE_DIR[] = GCC_INCLUDE_DIR; const size_t cpp_GCC_INCLUDE_DIR_len = sizeof GCC_INCLUDE_DIR - 8; #else const char cpp_GCC_INCLUDE_DIR[] = ""; const size_t cpp_GCC_INCLUDE_DIR_len = 0; #endif #ifdef TARGET_SYSTEM_ROOT const char *cpp_SYSROOT = TARGET_SYSTEM_ROOT; #else const char *cpp_SYSROOT = ""; #endif /* Preprocess only, using cpplib. Copyright (C) 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. Written by Per Bothner, 1994-95. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Part of CPP library. Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This header defines all the internal data structures and functions that need to be visible across files. It should not be used outside cpplib. */ #ifndef LIBCPP_INTERNAL_H #define LIBCPP_INTERNAL_H #if defined HAVE_ICONV_H && defined HAVE_ICONV #include #else #define HAVE_ICONV 0 typedef int iconv_t; /* dummy */ #endif struct directive; /* Deliberately incomplete. */ struct pending_option; struct op; struct _cpp_strbuf; typedef bool (*convert_f) (iconv_t, const unsigned char *, size_t, struct _cpp_strbuf *); struct cset_converter { convert_f func; iconv_t cd; }; #define BITS_PER_CPPCHAR_T (CHAR_BIT * sizeof (cppchar_t)) /* Test if a sign is valid within a preprocessing number. */ #define VALID_SIGN(c, prevc) \ (((c) == '+' || (c) == '-') && \ ((prevc) == 'e' || (prevc) == 'E' \ || (((prevc) == 'p' || (prevc) == 'P') \ && CPP_OPTION (pfile, extended_numbers)))) #define CPP_OPTION(PFILE, OPTION) ((PFILE)->opts.OPTION) #define CPP_BUFFER(PFILE) ((PFILE)->buffer) #define CPP_BUF_COLUMN(BUF, CUR) ((CUR) - (BUF)->line_base) #define CPP_BUF_COL(BUF) CPP_BUF_COLUMN(BUF, (BUF)->cur) #define CPP_INCREMENT_LINE(PFILE, COLS_HINT) do { \ const struct line_maps *line_table = PFILE->line_table; \ const struct line_map *map = &line_table->maps[line_table->used-1]; \ unsigned int line = SOURCE_LINE (map, line_table->highest_line); \ linemap_line_start (PFILE->line_table, line + 1, COLS_HINT); \ } while (0) /* Maximum nesting of cpp_buffers. We use a static limit, partly for efficiency, and partly to limit runaway recursion. */ #define CPP_STACK_MAX 200 /* Host alignment handling. */ struct dummy { char c; union { double d; int *p; } u; }; #define DEFAULT_ALIGNMENT offsetof (struct dummy, u) #define CPP_ALIGN2(size, align) (((size) + ((align) - 1)) & ~((align) - 1)) #define CPP_ALIGN(size) CPP_ALIGN2 (size, DEFAULT_ALIGNMENT) #define _cpp_mark_macro_used(NODE) do { \ if ((NODE)->type == NT_MACRO && !((NODE)->flags & NODE_BUILTIN)) \ (NODE)->value.macro->used = 1; } while (0) /* A generic memory buffer, and operations on it. */ typedef struct _cpp_buff _cpp_buff; struct _cpp_buff { struct _cpp_buff *next; unsigned char *base, *cur, *limit; }; extern _cpp_buff *_cpp_get_buff (cpp_reader *, size_t); extern void _cpp_release_buff (cpp_reader *, _cpp_buff *); extern void _cpp_extend_buff (cpp_reader *, _cpp_buff **, size_t); extern _cpp_buff *_cpp_append_extend_buff (cpp_reader *, _cpp_buff *, size_t); extern void _cpp_free_buff (_cpp_buff *); extern unsigned char *_cpp_aligned_alloc (cpp_reader *, size_t); extern unsigned char *_cpp_unaligned_alloc (cpp_reader *, size_t); #define BUFF_ROOM(BUFF) (size_t) ((BUFF)->limit - (BUFF)->cur) #define BUFF_FRONT(BUFF) ((BUFF)->cur) #define BUFF_LIMIT(BUFF) ((BUFF)->limit) /* #include types. */ enum include_type {IT_INCLUDE, IT_INCLUDE_NEXT, IT_IMPORT, IT_CMDLINE}; union utoken { const cpp_token *token; const cpp_token **ptoken; }; /* A "run" of tokens; part of a chain of runs. */ typedef struct tokenrun tokenrun; struct tokenrun { tokenrun *next, *prev; cpp_token *base, *limit; }; /* Accessor macros for struct cpp_context. */ #define FIRST(c) ((c)->u.iso.first) #define LAST(c) ((c)->u.iso.last) #define CUR(c) ((c)->u.trad.cur) #define RLIMIT(c) ((c)->u.trad.rlimit) typedef struct cpp_context cpp_context; struct cpp_context { /* Doubly-linked list. */ cpp_context *next, *prev; union { /* For ISO macro expansion. Contexts other than the base context are contiguous tokens. e.g. macro expansions, expanded argument tokens. */ struct { union utoken first; union utoken last; } iso; /* For traditional macro expansion. */ struct { const uchar *cur; const uchar *rlimit; } trad; } u; /* If non-NULL, a buffer used for storage related to this context. When the context is popped, the buffer is released. */ _cpp_buff *buff; /* For a macro context, the macro node, otherwise NULL. */ cpp_hashnode *macro; /* True if utoken element is token, else ptoken. */ bool direct_p; }; struct lexer_state { /* Nonzero if first token on line is CPP_HASH. */ unsigned char in_directive; /* Nonzero if in a directive that will handle padding tokens itself. #include needs this to avoid problems with computed include and spacing between tokens. */ unsigned char directive_wants_padding; /* True if we are skipping a failed conditional group. */ unsigned char skipping; /* Nonzero if in a directive that takes angle-bracketed headers. */ unsigned char angled_headers; /* Nonzero if in a #if or #elif directive. */ unsigned char in_expression; /* Nonzero to save comments. Turned off if discard_comments, and in all directives apart from #define. */ unsigned char save_comments; /* Nonzero if lexing __VA_ARGS__ is valid. */ unsigned char va_args_ok; /* Nonzero if lexing poisoned identifiers is valid. */ unsigned char poisoned_ok; /* Nonzero to prevent macro expansion. */ unsigned char prevent_expansion; /* Nonzero when parsing arguments to a function-like macro. */ unsigned char parsing_args; /* Nonzero if prevent_expansion is true only because output is being discarded. */ unsigned char discarding_output; /* Nonzero to skip evaluating part of an expression. */ unsigned int skip_eval; }; /* Special nodes - identifiers with predefined significance. */ struct spec_nodes { cpp_hashnode *n_defined; /* defined operator */ cpp_hashnode *n_true; /* C++ keyword true */ cpp_hashnode *n_false; /* C++ keyword false */ cpp_hashnode *n__VA_ARGS__; /* C99 vararg macros */ }; typedef struct _cpp_line_note _cpp_line_note; struct _cpp_line_note { /* Location in the clean line the note refers to. */ const uchar *pos; /* Type of note. The 9 'from' trigraph characters represent those trigraphs, '\\' an escaped newline, ' ' an escaped newline with intervening space, and anything else is invalid. */ unsigned int type; }; /* Represents the contents of a file cpplib has read in. */ struct cpp_buffer { const uchar *cur; /* Current location. */ const uchar *line_base; /* Start of current physical line. */ const uchar *next_line; /* Start of to-be-cleaned logical line. */ const uchar *buf; /* Entire character buffer. */ const uchar *rlimit; /* Writable byte at end of file. */ _cpp_line_note *notes; /* Array of notes. */ unsigned int cur_note; /* Next note to process. */ unsigned int notes_used; /* Number of notes. */ unsigned int notes_cap; /* Size of allocated array. */ struct cpp_buffer *prev; /* Pointer into the file table; non-NULL if this is a file buffer. Used for include_next and to record control macros. */ struct _cpp_file *file; /* Value of if_stack at start of this file. Used to prohibit unmatched #endif (etc) in an include file. */ struct if_stack *if_stack; /* True if we need to get the next clean line. */ bool need_line; /* True if we have already warned about C++ comments in this file. The warning happens only for C89 extended mode with -pedantic on, or for -Wtraditional, and only once per file (otherwise it would be far too noisy). */ unsigned int warned_cplusplus_comments : 1; /* True if we don't process trigraphs and escaped newlines. True for preprocessed input, command line directives, and _Pragma buffers. */ unsigned int from_stage3 : 1; /* At EOF, a buffer is automatically popped. If RETURN_AT_EOF is true, a CPP_EOF token is then returned. Otherwise, the next token from the enclosing buffer is returned. */ unsigned int return_at_eof : 1; /* One for a system header, two for a C system header file that therefore needs to be extern "C" protected in C++, and zero otherwise. */ unsigned char sysp; /* The directory of the this buffer's file. Its NAME member is not allocated, so we don't need to worry about freeing it. */ struct cpp_dir dir; /* Descriptor for converting from the input character set to the source character set. */ struct cset_converter input_cset_desc; }; /* A cpp_reader encapsulates the "state" of a pre-processor run. Applying cpp_get_token repeatedly yields a stream of pre-processor tokens. Usually, there is only one cpp_reader object active. */ struct cpp_reader { /* Top of buffer stack. */ cpp_buffer *buffer; /* Overlaid buffer (can be different after processing #include). */ cpp_buffer *overlaid_buffer; /* Lexer state. */ struct lexer_state state; /* Source line tracking. */ struct line_maps *line_table; /* The line of the '#' of the current directive. */ source_location directive_line; /* Memory buffers. */ _cpp_buff *a_buff; /* Aligned permanent storage. */ _cpp_buff *u_buff; /* Unaligned permanent storage. */ _cpp_buff *free_buffs; /* Free buffer chain. */ /* Context stack. */ struct cpp_context base_context; struct cpp_context *context; /* If in_directive, the directive if known. */ const struct directive *directive; /* Search paths for include files. */ struct cpp_dir *quote_include; /* "" */ struct cpp_dir *bracket_include; /* <> */ struct cpp_dir no_search_path; /* No path. */ /* Chain of all hashed _cpp_file instances. */ struct _cpp_file *all_files; struct _cpp_file *main_file; /* File and directory hash table. */ struct htab *file_hash; struct file_hash_entry *file_hash_entries; unsigned int file_hash_entries_allocated, file_hash_entries_used; /* Nonzero means don't look for #include "foo" the source-file directory. */ bool quote_ignores_source_dir; /* Nonzero if any file has contained #pragma once or #import has been used. */ bool seen_once_only; /* Multiple include optimization. */ const cpp_hashnode *mi_cmacro; const cpp_hashnode *mi_ind_cmacro; bool mi_valid; /* Lexing. */ cpp_token *cur_token; tokenrun base_run, *cur_run; unsigned int lookaheads; /* Nonzero prevents the lexer from re-using the token runs. */ unsigned int keep_tokens; /* Error counter for exit code. */ unsigned int errors; /* Buffer to hold macro definition string. */ unsigned char *macro_buffer; unsigned int macro_buffer_len; /* Descriptor for converting from the source character set to the execution character set. */ struct cset_converter narrow_cset_desc; /* Descriptor for converting from the source character set to the wide execution character set. */ struct cset_converter wide_cset_desc; /* Date and time text. Calculated together if either is requested. */ const uchar *date; const uchar *time; /* EOF token, and a token forcing paste avoidance. */ cpp_token avoid_paste; cpp_token eof; /* Opaque handle to the dependencies of mkdeps.c. */ struct depends *deps; /* Obstack holding all macro hash nodes. This never shrinks. See cpphash.c */ struct obstack hash_ob; /* Obstack holding buffer and conditional structures. This is a real stack. See cpplib.c. */ struct obstack buffer_ob; /* Pragma table - dynamic, because a library user can add to the list of recognized pragmas. */ struct pragma_entry *pragmas; /* Call backs to cpplib client. */ struct cpp_callbacks cb; /* Identifier hash table. */ struct ht *hash_table; /* Expression parser stack. */ struct op *op_stack, *op_limit; /* User visible options. */ struct cpp_options opts; /* Special nodes - identifiers with predefined significance to the preprocessor. */ struct spec_nodes spec_nodes; /* Whether cpplib owns the hashtable. */ bool our_hashtable; /* Traditional preprocessing output buffer (a logical line). */ struct { uchar *base; uchar *limit; uchar *cur; source_location first_line; } out; /* Used for buffer overlays by cpptrad.c. */ const uchar *saved_cur, *saved_rlimit, *saved_line_base; /* A saved list of the defined macros, for dependency checking of precompiled headers. */ struct cpp_savedstate *savedstate; }; /* Character classes. Based on the more primitive macros in safe-ctype.h. If the definition of `numchar' looks odd to you, please look up the definition of a pp-number in the C standard [section 6.4.8 of C99]. In the unlikely event that characters other than \r and \n enter the set is_vspace, the macro handle_newline() in cpplex.c must be updated. */ #define _dollar_ok(x) ((x) == '$' && CPP_OPTION (pfile, dollars_in_ident)) #define is_idchar(x) (ISIDNUM(x) || _dollar_ok(x)) #define is_numchar(x) ISIDNUM(x) #define is_idstart(x) (ISIDST(x) || _dollar_ok(x)) #define is_numstart(x) ISDIGIT(x) #define is_hspace(x) ISBLANK(x) #define is_vspace(x) IS_VSPACE(x) #define is_nvspace(x) IS_NVSPACE(x) #define is_space(x) IS_SPACE_OR_NUL(x) /* This table is constant if it can be initialized at compile time, which is the case if cpp was compiled with GCC >=2.7, or another compiler that supports C99. */ #if HAVE_DESIGNATED_INITIALIZERS extern const unsigned char _cpp_trigraph_map[UCHAR_MAX + 1]; #else extern unsigned char _cpp_trigraph_map[UCHAR_MAX + 1]; #endif /* Macros. */ static inline int cpp_in_system_header (cpp_reader *); static inline int cpp_in_system_header (cpp_reader *pfile) { return pfile->buffer ? pfile->buffer->sysp : 0; } #define CPP_PEDANTIC(PF) CPP_OPTION (PF, pedantic) #define CPP_WTRADITIONAL(PF) CPP_OPTION (PF, warn_traditional) /* In cpperror.c */ extern int _cpp_begin_message (cpp_reader *, int, source_location, unsigned int); /* In cppmacro.c */ extern void _cpp_free_definition (cpp_hashnode *); extern bool _cpp_create_definition (cpp_reader *, cpp_hashnode *); extern void _cpp_pop_context (cpp_reader *); extern void _cpp_push_text_context (cpp_reader *, cpp_hashnode *, const uchar *, size_t); extern bool _cpp_save_parameter (cpp_reader *, cpp_macro *, cpp_hashnode *); extern bool _cpp_arguments_ok (cpp_reader *, cpp_macro *, const cpp_hashnode *, unsigned int); extern const uchar *_cpp_builtin_macro_text (cpp_reader *, cpp_hashnode *); int _cpp_warn_if_unused_macro (cpp_reader *, cpp_hashnode *, void *); /* In cpphash.c */ extern void _cpp_init_hashtable (cpp_reader *, hash_table *); extern void _cpp_destroy_hashtable (cpp_reader *); /* In cppfiles.c */ typedef struct _cpp_file _cpp_file; extern _cpp_file *_cpp_find_file (cpp_reader *, const char *fname, cpp_dir *start_dir, bool fake); extern bool _cpp_find_failed (_cpp_file *); extern void _cpp_mark_file_once_only (cpp_reader *, struct _cpp_file *); extern void _cpp_fake_include (cpp_reader *, const char *); extern bool _cpp_stack_file (cpp_reader *, _cpp_file*, bool); extern bool _cpp_stack_include (cpp_reader *, const char *, int, enum include_type); extern int _cpp_compare_file_date (cpp_reader *, const char *, int); extern void _cpp_report_missing_guards (cpp_reader *); extern void _cpp_init_files (cpp_reader *); extern void _cpp_cleanup_files (cpp_reader *); extern void _cpp_pop_file_buffer (cpp_reader *, struct _cpp_file *); extern bool _cpp_save_file_entries (cpp_reader *pfile, FILE *f); extern bool _cpp_read_file_entries (cpp_reader *, FILE *); /* In cppexp.c */ extern bool _cpp_parse_expr (cpp_reader *); extern struct op *_cpp_expand_op_stack (cpp_reader *); /* In cpplex.c */ extern void _cpp_process_line_notes (cpp_reader *, int); extern void _cpp_clean_line (cpp_reader *); extern bool _cpp_get_fresh_line (cpp_reader *); extern bool _cpp_skip_block_comment (cpp_reader *); extern cpp_token *_cpp_temp_token (cpp_reader *); extern const cpp_token *_cpp_lex_token (cpp_reader *); extern cpp_token *_cpp_lex_direct (cpp_reader *); extern int _cpp_equiv_tokens (const cpp_token *, const cpp_token *); extern void _cpp_init_tokenrun (tokenrun *, unsigned int); /* In cppinit.c. */ extern void _cpp_maybe_push_include_file (cpp_reader *); /* In cpplib.c */ extern int _cpp_test_assertion (cpp_reader *, unsigned int *); extern int _cpp_handle_directive (cpp_reader *, int); extern void _cpp_define_builtin (cpp_reader *, const char *); extern char ** _cpp_save_pragma_names (cpp_reader *); extern void _cpp_restore_pragma_names (cpp_reader *, char **); extern void _cpp_do__Pragma (cpp_reader *); extern void _cpp_init_directives (cpp_reader *); extern void _cpp_init_internal_pragmas (cpp_reader *); extern void _cpp_do_file_change (cpp_reader *, enum lc_reason, const char *, unsigned int, unsigned int); extern void _cpp_pop_buffer (cpp_reader *); /* In cpptrad.c. */ extern bool _cpp_scan_out_logical_line (cpp_reader *, cpp_macro *); extern bool _cpp_read_logical_line_trad (cpp_reader *); extern void _cpp_overlay_buffer (cpp_reader *pfile, const uchar *, size_t); extern void _cpp_remove_overlay (cpp_reader *); extern bool _cpp_create_trad_definition (cpp_reader *, cpp_macro *); extern bool _cpp_expansions_different_trad (const cpp_macro *, const cpp_macro *); extern uchar *_cpp_copy_replacement_text (const cpp_macro *, uchar *); extern size_t _cpp_replacement_text_len (const cpp_macro *); /* In cppcharset.c. */ extern cppchar_t _cpp_valid_ucn (cpp_reader *, const uchar **, const uchar *, int); extern void _cpp_destroy_iconv (cpp_reader *); extern uchar *_cpp_convert_input (cpp_reader *, const char *, uchar *, size_t, size_t, off_t *); extern const char *_cpp_default_encoding (void); /* Utility routines and macros. */ #define DSC(str) (const uchar *)str, sizeof str - 1 #define xnew(T) (T *) xmalloc (sizeof(T)) #define xcnew(T) (T *) xcalloc (1, sizeof(T)) #define xnewvec(T, N) (T *) xmalloc (sizeof(T) * (N)) #define xcnewvec(T, N) (T *) xcalloc (N, sizeof(T)) #define xobnew(O, T) (T *) obstack_alloc (O, sizeof(T)) /* These are inline functions instead of macros so we can get type checking. */ static inline int ustrcmp (const uchar *, const uchar *); static inline int ustrncmp (const uchar *, const uchar *, size_t); static inline size_t ustrlen (const uchar *); static inline uchar *uxstrdup (const uchar *); static inline uchar *ustrchr (const uchar *, int); static inline int ufputs (const uchar *, FILE *); static inline int ustrcmp (const uchar *s1, const uchar *s2) { return strcmp ((const char *)s1, (const char *)s2); } static inline int ustrncmp (const uchar *s1, const uchar *s2, size_t n) { return strncmp ((const char *)s1, (const char *)s2, n); } static inline size_t ustrlen (const uchar *s1) { return strlen ((const char *)s1); } static inline uchar * uxstrdup (const uchar *s1) { return (uchar *) xstrdup ((const char *)s1); } static inline uchar * ustrchr (const uchar *s1, int c) { return (uchar *) strchr ((const char *)s1, c); } static inline int ufputs (const uchar *s, FILE *f) { return fputs ((const char *)s, f); } #endif /* ! LIBCPP_INTERNAL_H */ /* Encapsulates state used to convert a stream of tokens into a text file. */ static struct { FILE *outf; /* Stream to write to. */ const cpp_token *prev; /* Previous token. */ const cpp_token *source; /* Source token for spacing. */ int src_line; /* Line number currently being written. */ unsigned char printed; /* Nonzero if something output at line. */ bool first_time; /* pp_file_change hasn't been called yet. */ } print; /* General output routines. */ static void scan_translation_unit (cpp_reader *); static void scan_translation_unit_trad (cpp_reader *); static void account_for_newlines (const unsigned char *, size_t); static int dump_macro (cpp_reader *, cpp_hashnode *, void *); static void print_line (source_location, const char *); static void maybe_print_line (source_location); /* Callback routines for the parser. Most of these are active only in specific modes. */ static void cb_line_change_ppo (cpp_reader *, const cpp_token *, int); static void cb_define_ppo (cpp_reader *, source_location, cpp_hashnode *); static void cb_undef_ppo (cpp_reader *, source_location, cpp_hashnode *); static void cb_include (cpp_reader *, source_location, const unsigned char *, const char *, int); static void cb_ident_ppo (cpp_reader *, source_location, const cpp_string *); static void cb_def_pragma_ppo (cpp_reader *, source_location); static void cb_read_pch (cpp_reader *pfile, const char *name, int fd, const char *orig_name); /* Preprocess and output. */ void preprocess_file (cpp_reader *pfile) { /* A successful cpp_read_main_file guarantees that we can call cpp_scan_nooutput or cpp_get_token next. */ if (flag_no_output) { /* Scan -included buffers, then the main file. */ while (pfile->buffer->prev) cpp_scan_nooutput (pfile); cpp_scan_nooutput (pfile); } else if (cpp_get_options (pfile)->traditional) scan_translation_unit_trad (pfile); else scan_translation_unit (pfile); /* -dM command line option. Should this be elsewhere? */ if (flag_dump_macros == 'M') cpp_forall_identifiers (pfile, dump_macro, NULL); /* Flush any pending output. */ if (print.printed) putc ('\n', print.outf); } /* Set up the callbacks as appropriate. */ void init_pp_output (FILE *out_stream) { cpp_callbacks *cb = cpp_get_callbacks (parse_in); if (!flag_no_output) { cb->line_change = cb_line_change_ppo; /* Don't emit #pragma or #ident directives if we are processing assembly language; the assembler may choke on them. */ if (cpp_get_options (parse_in)->lang != CLK_ASM) { cb->ident = cb_ident_ppo; cb->def_pragma = cb_def_pragma_ppo; } } if (flag_dump_includes) cb->include = cb_include; if (flag_pch_preprocess) { cb->valid_pch = c_common_valid_pch; cb->read_pch = cb_read_pch; } if (flag_dump_macros == 'N' || flag_dump_macros == 'D') { cb->define = cb_define_ppo; cb->undef = cb_undef_ppo; } /* Initialize the print structure. Setting print.src_line to -1 here is a trick to guarantee that the first token of the file will cause a linemarker to be output by maybe_print_line. */ print.src_line = -1; print.printed = 0; print.prev = 0; print.outf = out_stream; print.first_time = 1; } /* Writes out the preprocessed file, handling spacing and paste avoidance issues. */ static void scan_translation_unit (cpp_reader *pfile) { bool avoid_paste = false; print.source = NULL; for (;;) { const cpp_token *token = cpp_get_token (pfile); if (token->type == CPP_PADDING) { avoid_paste = true; if (print.source == NULL || (!(print.source->flags & PREV_WHITE) && token->val.source == NULL)) print.source = token->val.source; continue; } if (token->type == CPP_EOF) break; /* Subtle logic to output a space if and only if necessary. */ if (avoid_paste) { if (print.source == NULL) print.source = token; if (print.source->flags & PREV_WHITE || (print.prev && cpp_avoid_paste (pfile, print.prev, token)) || (print.prev == NULL && token->type == CPP_HASH)) putc (' ', print.outf); } else if (token->flags & PREV_WHITE) putc (' ', print.outf); avoid_paste = false; print.source = NULL; print.prev = token; cpp_output_token (token, print.outf); if (token->type == CPP_COMMENT) account_for_newlines (token->val.str.text, token->val.str.len); } } /* Adjust print.src_line for newlines embedded in output. */ static void account_for_newlines (const unsigned char *str, size_t len) { while (len--) if (*str++ == '\n') print.src_line++; } /* Writes out a traditionally preprocessed file. */ static void scan_translation_unit_trad (cpp_reader *pfile) { while (_cpp_read_logical_line_trad (pfile)) { size_t len = pfile->out.cur - pfile->out.base; maybe_print_line (pfile->out.first_line); fwrite (pfile->out.base, 1, len, print.outf); print.printed = 1; if (!CPP_OPTION (pfile, discard_comments)) account_for_newlines (pfile->out.base, len); } } /* If the token read on logical line LINE needs to be output on a different line to the current one, output the required newlines or a line marker, and return 1. Otherwise return 0. */ static void maybe_print_line (source_location src_loc) { const struct line_map *map = linemap_lookup (&line_table, src_loc); int src_line = SOURCE_LINE (map, src_loc); /* End the previous line of text. */ if (print.printed) { putc ('\n', print.outf); print.src_line++; print.printed = 0; } if (src_line >= print.src_line && src_line < print.src_line + 8) { while (src_line > print.src_line) { putc ('\n', print.outf); print.src_line++; } } else print_line (src_loc, ""); } /* Output a line marker for logical line LINE. Special flags are "1" or "2" indicating entering or leaving a file. */ static void print_line (source_location src_loc, const char *special_flags) { /* End any previous line of text. */ if (print.printed) putc ('\n', print.outf); print.printed = 0; if (!flag_no_line_commands) { const struct line_map *map = linemap_lookup (&line_table, src_loc); size_t to_file_len = strlen (map->to_file); unsigned char *to_file_quoted = alloca (to_file_len * 4 + 1); unsigned char *p; print.src_line = SOURCE_LINE (map, src_loc); /* cpp_quote_string does not nul-terminate, so we have to do it ourselves. */ p = cpp_quote_string (to_file_quoted, (unsigned char *)map->to_file, to_file_len); *p = '\0'; fprintf (print.outf, "# %u \"%s\"%s", print.src_line, to_file_quoted, special_flags); if (map->sysp == 2) fputs (" 3 4", print.outf); else if (map->sysp == 1) fputs (" 3", print.outf); putc ('\n', print.outf); } } /* Called when a line of output is started. TOKEN is the first token of the line, and at end of file will be CPP_EOF. */ static void cb_line_change_ppo (cpp_reader *pfile, const cpp_token *token, int parsing_args) { source_location src_loc = token->src_loc; if (token->type == CPP_EOF || parsing_args) return; maybe_print_line (src_loc); print.prev = 0; print.source = 0; /* Supply enough spaces to put this token in its original column, one space per column greater than 2, since scan_translation_unit will provide a space if PREV_WHITE. Don't bother trying to reconstruct tabs; we can't get it right in general, and nothing ought to care. Some things do care; the fault lies with them. */ if (!CPP_OPTION (pfile, traditional)) { const struct line_map *map = linemap_lookup (&line_table, src_loc); int spaces = SOURCE_COLUMN (map, src_loc) - 2; print.printed = 1; while (-- spaces >= 0) putc (' ', print.outf); } } static void cb_ident_ppo (cpp_reader *pfile ATTRIBUTE_UNUSED, source_location line, const cpp_string *str) { maybe_print_line (line); fprintf (print.outf, "#ident \"%s\"\n", str->text); print.src_line++; } static void cb_define_ppo (cpp_reader *pfile, source_location line, cpp_hashnode *node) { maybe_print_line (line); fputs ("#define ", print.outf); /* 'D' is whole definition; 'N' is name only. */ if (flag_dump_macros == 'D') fputs ((const char *) cpp_macro_definition (pfile, node), print.outf); else fputs ((const char *) NODE_NAME (node), print.outf); putc ('\n', print.outf); print.src_line++; } static void cb_undef_ppo (cpp_reader *pfile ATTRIBUTE_UNUSED, source_location line, cpp_hashnode *node) { maybe_print_line (line); fprintf (print.outf, "#undef %s\n", NODE_NAME (node)); print.src_line++; } static void cb_include (cpp_reader *pfile ATTRIBUTE_UNUSED, source_location line, const unsigned char *dir, const char *header, int angle_brackets) { maybe_print_line (line); if (angle_brackets) fprintf (print.outf, "#%s <%s>\n", dir, header); else fprintf (print.outf, "#%s \"%s\"\n", dir, header); print.src_line++; } /* Callback called when -fworking-director and -E to emit working directory in cpp output file. */ void pp_dir_change (cpp_reader *pfile ATTRIBUTE_UNUSED, const char *dir) { size_t to_file_len = strlen (dir); unsigned char *to_file_quoted = alloca (to_file_len * 4 + 1); unsigned char *p; /* cpp_quote_string does not nul-terminate, so we have to do it ourselves. */ p = cpp_quote_string (to_file_quoted, (unsigned char *) dir, to_file_len); *p = '\0'; fprintf (print.outf, "# 1 \"%s//\"\n", to_file_quoted); } /* The file name, line number or system header flags have changed, as described in MAP. */ void pp_file_change (const struct line_map *map) { const char *flags = ""; if (flag_no_line_commands || flag_no_output) return; if (map != NULL) { if (print.first_time) { /* Avoid printing foo.i when the main file is foo.c. */ if (!cpp_get_options (parse_in)->preprocessed) print_line (map->start_location, flags); print.first_time = 0; } else { /* Bring current file to correct line when entering a new file. */ if (map->reason == LC_ENTER) { const struct line_map *from = INCLUDED_FROM (&line_table, map); maybe_print_line (LAST_SOURCE_LINE_LOCATION (from)); } if (map->reason == LC_ENTER) flags = " 1"; else if (map->reason == LC_LEAVE) flags = " 2"; print_line (map->start_location, flags); } } } /* Copy a #pragma directive to the preprocessed output. */ static void cb_def_pragma_ppo (cpp_reader *pfile, source_location line) { maybe_print_line (line); fputs ("#pragma ", print.outf); cpp_output_line (pfile, print.outf); print.src_line++; } /* Dump out the hash table. */ static int dump_macro (cpp_reader *pfile, cpp_hashnode *node, void *v ATTRIBUTE_UNUSED) { if (node->type == NT_MACRO && !(node->flags & NODE_BUILTIN)) { fputs ("#define ", print.outf); fputs ((const char *) cpp_macro_definition (pfile, node), print.outf); putc ('\n', print.outf); print.src_line++; } return 1; } /* Load in the PCH file NAME, open on FD. It was originally searched for by ORIG_NAME. Also, print out a #include command so that the PCH file can be loaded when the preprocessed output is compiled. */ static void cb_read_pch (cpp_reader *pfile, const char *name, int fd, const char *orig_name ATTRIBUTE_UNUSED) { c_common_read_pch (pfile, name, fd, orig_name); fprintf (print.outf, "#pragma GCC pch_preprocess \"%s\"\n", name); print.src_line++; } /* Define builtin-in macros for the C family front ends. Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef TARGET_OS_CPP_BUILTINS # define TARGET_OS_CPP_BUILTINS() #endif #ifndef TARGET_OBJFMT_CPP_BUILTINS # define TARGET_OBJFMT_CPP_BUILTINS() #endif #ifndef REGISTER_PREFIX #define REGISTER_PREFIX "" #endif /* Non-static as some targets don't use it. */ void builtin_define_std (const char *) ATTRIBUTE_UNUSED; static void builtin_define_with_value_n (const char *, const char *, size_t); static void builtin_define_with_int_value (const char *, HOST_WIDE_INT); static void builtin_define_with_hex_fp_value (const char *, tree, int, const char *, const char *); static void builtin_define_type_max (const char *, tree, int); static void builtin_define_type_precision (const char *, tree); static void builtin_define_float_constants (const char *, const char *, tree); static void define__GNUC__ (void); /* Define NAME with value TYPE precision. */ static void builtin_define_type_precision (const char *name, tree type) { builtin_define_with_int_value (name, TYPE_PRECISION (type)); } /* Define the float.h constants for TYPE using NAME_PREFIX and FP_SUFFIX. */ static void builtin_define_float_constants (const char *name_prefix, const char *fp_suffix, tree type) { /* Used to convert radix-based values to base 10 values in several cases. In the max_exp -> max_10_exp conversion for 128-bit IEEE, we need at least 6 significant digits for correct results. Using the fraction formed by (log(2)*1e6)/(log(10)*1e6) overflows a 32-bit integer as an intermediate; perhaps someone can find a better approximation, in the mean time, I suspect using doubles won't harm the bootstrap here. */ const double log10_2 = .30102999566398119521; double log10_b; const struct real_format *fmt; char name[64], buf[128]; int dig, min_10_exp, max_10_exp; int decimal_dig; fmt = REAL_MODE_FORMAT (TYPE_MODE (type)); /* The radix of the exponent representation. */ if (type == float_type_node) builtin_define_with_int_value ("__FLT_RADIX__", fmt->b); log10_b = log10_2 * fmt->log2_b; /* The number of radix digits, p, in the floating-point significand. */ sprintf (name, "__%s_MANT_DIG__", name_prefix); builtin_define_with_int_value (name, fmt->p); /* The number of decimal digits, q, such that any floating-point number with q decimal digits can be rounded into a floating-point number with p radix b digits and back again without change to the q decimal digits, p log10 b if b is a power of 10 floor((p - 1) log10 b) otherwise */ dig = (fmt->p - 1) * log10_b; sprintf (name, "__%s_DIG__", name_prefix); builtin_define_with_int_value (name, dig); /* The minimum negative int x such that b**(x-1) is a normalized float. */ sprintf (name, "__%s_MIN_EXP__", name_prefix); sprintf (buf, "(%d)", fmt->emin); builtin_define_with_value (name, buf, 0); /* The minimum negative int x such that 10**x is a normalized float, ceil (log10 (b ** (emin - 1))) = ceil (log10 (b) * (emin - 1)) Recall that emin is negative, so the integer truncation calculates the ceiling, not the floor, in this case. */ min_10_exp = (fmt->emin - 1) * log10_b; sprintf (name, "__%s_MIN_10_EXP__", name_prefix); sprintf (buf, "(%d)", min_10_exp); builtin_define_with_value (name, buf, 0); /* The maximum int x such that b**(x-1) is a representable float. */ sprintf (name, "__%s_MAX_EXP__", name_prefix); builtin_define_with_int_value (name, fmt->emax); /* The maximum int x such that 10**x is in the range of representable finite floating-point numbers, floor (log10((1 - b**-p) * b**emax)) = floor (log10(1 - b**-p) + log10(b**emax)) = floor (log10(1 - b**-p) + log10(b)*emax) The safest thing to do here is to just compute this number. But since we don't link cc1 with libm, we cannot. We could implement log10 here a series expansion, but that seems too much effort because: Note that the first term, for all extant p, is a number exceedingly close to zero, but slightly negative. Note that the second term is an integer scaling an irrational number, and that because of the floor we are only interested in its integral portion. In order for the first term to have any effect on the integral portion of the second term, the second term has to be exceedingly close to an integer itself (e.g. 123.000000000001 or something). Getting a result that close to an integer requires that the irrational multiplicand have a long series of zeros in its expansion, which doesn't occur in the first 20 digits or so of log10(b). Hand-waving aside, crunching all of the sets of constants above by hand does not yield a case for which the first term is significant, which in the end is all that matters. */ max_10_exp = fmt->emax * log10_b; sprintf (name, "__%s_MAX_10_EXP__", name_prefix); builtin_define_with_int_value (name, max_10_exp); /* The number of decimal digits, n, such that any floating-point number can be rounded to n decimal digits and back again without change to the value. p * log10(b) if b is a power of 10 ceil(1 + p * log10(b)) otherwise The only macro we care about is this number for the widest supported floating type, but we want this value for rendering constants below. */ { double d_decimal_dig = 1 + fmt->p * log10_b; decimal_dig = d_decimal_dig; if (decimal_dig < d_decimal_dig) decimal_dig++; } if (type == long_double_type_node) builtin_define_with_int_value ("__DECIMAL_DIG__", decimal_dig); /* Since, for the supported formats, B is always a power of 2, we construct the following numbers directly as a hexadecimal constants. */ /* The maximum representable finite floating-point number, (1 - b**-p) * b**emax */ { int i, n; char *p; strcpy (buf, "0x0."); n = fmt->p * fmt->log2_b; for (i = 0, p = buf + 4; i + 3 < n; i += 4) *p++ = 'f'; if (i < n) *p++ = "08ce"[n - i]; sprintf (p, "p%d", fmt->emax * fmt->log2_b); if (fmt->pnan < fmt->p) { /* This is an IBM extended double format made up of two IEEE doubles. The value of the long double is the sum of the values of the two parts. The most significant part is required to be the value of the long double rounded to the nearest double. Rounding means we need a slightly smaller value for LDBL_MAX. */ buf[4 + fmt->pnan / 4] = "7bde"[fmt->pnan % 4]; } } sprintf (name, "__%s_MAX__", name_prefix); builtin_define_with_hex_fp_value (name, type, decimal_dig, buf, fp_suffix); /* The minimum normalized positive floating-point number, b**(emin-1). */ sprintf (name, "__%s_MIN__", name_prefix); sprintf (buf, "0x1p%d", (fmt->emin - 1) * fmt->log2_b); builtin_define_with_hex_fp_value (name, type, decimal_dig, buf, fp_suffix); /* The difference between 1 and the least value greater than 1 that is representable in the given floating point type, b**(1-p). */ sprintf (name, "__%s_EPSILON__", name_prefix); sprintf (buf, "0x1p%d", (1 - fmt->p) * fmt->log2_b); builtin_define_with_hex_fp_value (name, type, decimal_dig, buf, fp_suffix); /* For C++ std::numeric_limits::denorm_min. The minimum denormalized positive floating-point number, b**(emin-p). Zero for formats that don't support denormals. */ sprintf (name, "__%s_DENORM_MIN__", name_prefix); if (fmt->has_denorm) { sprintf (buf, "0x1p%d", (fmt->emin - fmt->p) * fmt->log2_b); builtin_define_with_hex_fp_value (name, type, decimal_dig, buf, fp_suffix); } else { sprintf (buf, "0.0%s", fp_suffix); builtin_define_with_value (name, buf, 0); } /* For C++ std::numeric_limits::has_infinity. */ sprintf (name, "__%s_HAS_INFINITY__", name_prefix); builtin_define_with_int_value (name, MODE_HAS_INFINITIES (TYPE_MODE (type))); /* For C++ std::numeric_limits::has_quiet_NaN. We do not have a predicate to distinguish a target that has both quiet and signalling NaNs from a target that has only quiet NaNs or only signalling NaNs, so we assume that a target that has any kind of NaN has quiet NaNs. */ sprintf (name, "__%s_HAS_QUIET_NAN__", name_prefix); builtin_define_with_int_value (name, MODE_HAS_NANS (TYPE_MODE (type))); } /* Define __GNUC__, __GNUC_MINOR__ and __GNUC_PATCHLEVEL__. */ static void define__GNUC__ (void) { /* The format of the version string, enforced below, is ([^0-9]*-)?[0-9]+[.][0-9]+([.][0-9]+)?([- ].*)? */ const char *q, *v = version_string; while (*v && ! ISDIGIT (*v)) v++; if (!*v || (v > version_string && v[-1] != '-')) abort (); q = v; while (ISDIGIT (*v)) v++; builtin_define_with_value_n ("__GNUC__", q, v - q); if (c_dialect_cxx ()) builtin_define_with_value_n ("__GNUG__", q, v - q); if (*v != '.' || !ISDIGIT (v[1])) abort (); q = ++v; while (ISDIGIT (*v)) v++; builtin_define_with_value_n ("__GNUC_MINOR__", q, v - q); if (*v == '.') { if (!ISDIGIT (v[1])) abort (); q = ++v; while (ISDIGIT (*v)) v++; builtin_define_with_value_n ("__GNUC_PATCHLEVEL__", q, v - q); } else builtin_define_with_value_n ("__GNUC_PATCHLEVEL__", "0", 1); if (*v && *v != ' ' && *v != '-') abort (); } /* Hook that registers front end and target-specific built-ins. */ void c_cpp_builtins (cpp_reader *pfile) { /* -undef turns off target-specific built-ins. */ if (flag_undef) return; define__GNUC__ (); /* For stddef.h. They require macros defined in c-common.c. */ c_stddef_cpp_builtins (); if (c_dialect_cxx ()) { if (SUPPORTS_ONE_ONLY) cpp_define (pfile, "__GXX_WEAK__=1"); else cpp_define (pfile, "__GXX_WEAK__=0"); if (warn_deprecated) cpp_define (pfile, "__DEPRECATED"); } /* Note that we define this for C as well, so that we know if __attribute__((cleanup)) will interface with EH. */ if (flag_exceptions) cpp_define (pfile, "__EXCEPTIONS"); /* Represents the C++ ABI version, always defined so it can be used while preprocessing C and assembler. */ if (flag_abi_version == 0) /* Use a very large value so that: #if __GXX_ABI_VERSION >= will work whether the user explicitly says "-fabi-version=x" or "-fabi-version=0". Do not use INT_MAX because that will be different from system to system. */ builtin_define_with_int_value ("__GXX_ABI_VERSION", 999999); else if (flag_abi_version == 1) /* Due to an historical accident, this version had the value "102". */ builtin_define_with_int_value ("__GXX_ABI_VERSION", 102); else /* Newer versions have values 1002, 1003, .... */ builtin_define_with_int_value ("__GXX_ABI_VERSION", 1000 + flag_abi_version); /* libgcc needs to know this. */ if (USING_SJLJ_EXCEPTIONS) cpp_define (pfile, "__USING_SJLJ_EXCEPTIONS__"); /* limits.h needs to know these. */ builtin_define_type_max ("__SCHAR_MAX__", signed_char_type_node, 0); builtin_define_type_max ("__SHRT_MAX__", short_integer_type_node, 0); builtin_define_type_max ("__INT_MAX__", integer_type_node, 0); builtin_define_type_max ("__LONG_MAX__", long_integer_type_node, 1); builtin_define_type_max ("__LONG_LONG_MAX__", long_long_integer_type_node, 2); builtin_define_type_max ("__WCHAR_MAX__", wchar_type_node, 0); builtin_define_type_precision ("__CHAR_BIT__", char_type_node); /* float.h needs to know these. */ builtin_define_with_int_value ("__FLT_EVAL_METHOD__", TARGET_FLT_EVAL_METHOD); builtin_define_float_constants ("FLT", "F", float_type_node); builtin_define_float_constants ("DBL", "", double_type_node); builtin_define_float_constants ("LDBL", "L", long_double_type_node); /* For use in assembly language. */ builtin_define_with_value ("__REGISTER_PREFIX__", REGISTER_PREFIX, 0); builtin_define_with_value ("__USER_LABEL_PREFIX__", user_label_prefix, 0); /* Misc. */ builtin_define_with_value ("__VERSION__", version_string, 1); /* Definitions for LP64 model. */ if (TYPE_PRECISION (long_integer_type_node) == 64 && POINTER_SIZE == 64 && TYPE_PRECISION (integer_type_node) == 32) { cpp_define (pfile, "_LP64"); cpp_define (pfile, "__LP64__"); } /* Other target-independent built-ins determined by command-line options. */ if (optimize_size) cpp_define (pfile, "__OPTIMIZE_SIZE__"); if (optimize) cpp_define (pfile, "__OPTIMIZE__"); if (fast_math_flags_set_p ()) cpp_define (pfile, "__FAST_MATH__"); if (flag_really_no_inline) cpp_define (pfile, "__NO_INLINE__"); if (flag_signaling_nans) cpp_define (pfile, "__SUPPORT_SNAN__"); if (flag_finite_math_only) cpp_define (pfile, "__FINITE_MATH_ONLY__=1"); else cpp_define (pfile, "__FINITE_MATH_ONLY__=0"); if (flag_iso) cpp_define (pfile, "__STRICT_ANSI__"); if (!flag_signed_char) cpp_define (pfile, "__CHAR_UNSIGNED__"); if (c_dialect_cxx () && TYPE_UNSIGNED (wchar_type_node)) cpp_define (pfile, "__WCHAR_UNSIGNED__"); /* Make the choice of ObjC runtime visible to source code. */ if (c_dialect_objc () && flag_next_runtime) cpp_define (pfile, "__NEXT_RUNTIME__"); /* Show the availability of some target pragmas. */ if (flag_mudflap || targetm.handle_pragma_redefine_extname) cpp_define (pfile, "__PRAGMA_REDEFINE_EXTNAME"); if (targetm.handle_pragma_extern_prefix) cpp_define (pfile, "__PRAGMA_EXTERN_PREFIX"); /* A straightforward target hook doesn't work, because of problems linking that hook's body when part of non-C front ends. */ # define preprocessing_asm_p() (cpp_get_options (pfile)->lang == CLK_ASM) # define preprocessing_trad_p() (cpp_get_options (pfile)->traditional) # define builtin_define(TXT) cpp_define (pfile, TXT) # define builtin_assert(TXT) cpp_assert (pfile, TXT) TARGET_CPU_CPP_BUILTINS (); TARGET_OS_CPP_BUILTINS (); TARGET_OBJFMT_CPP_BUILTINS (); } /* Pass an object-like macro. If it doesn't lie in the user's namespace, defines it unconditionally. Otherwise define a version with two leading underscores, and another version with two leading and trailing underscores, and define the original only if an ISO standard was not nominated. e.g. passing "unix" defines "__unix", "__unix__" and possibly "unix". Passing "_mips" defines "__mips", "__mips__" and possibly "_mips". */ void builtin_define_std (const char *macro) { size_t len = strlen (macro); char *buff = alloca (len + 5); char *p = buff + 2; char *q = p + len; /* prepend __ (or maybe just _) if in user's namespace. */ memcpy (p, macro, len + 1); if (!( *p == '_' && (p[1] == '_' || ISUPPER (p[1])))) { if (*p != '_') *--p = '_'; if (p[1] != '_') *--p = '_'; } cpp_define (parse_in, p); /* If it was in user's namespace... */ if (p != buff + 2) { /* Define the macro with leading and following __. */ if (q[-1] != '_') *q++ = '_'; if (q[-2] != '_') *q++ = '_'; *q = '\0'; cpp_define (parse_in, p); /* Finally, define the original macro if permitted. */ if (!flag_iso) cpp_define (parse_in, macro); } } /* Pass an object-like macro and a value to define it to. The third parameter says whether or not to turn the value into a string constant. */ void builtin_define_with_value (const char *macro, const char *expansion, int is_str) { char *buf; size_t mlen = strlen (macro); size_t elen = strlen (expansion); size_t extra = 2; /* space for an = and a NUL */ if (is_str) extra += 2; /* space for two quote marks */ buf = alloca (mlen + elen + extra); if (is_str) sprintf (buf, "%s=\"%s\"", macro, expansion); else sprintf (buf, "%s=%s", macro, expansion); cpp_define (parse_in, buf); } /* Pass an object-like macro and a value to define it to. The third parameter is the length of the expansion. */ static void builtin_define_with_value_n (const char *macro, const char *expansion, size_t elen) { char *buf; size_t mlen = strlen (macro); /* Space for an = and a NUL. */ buf = alloca (mlen + elen + 2); memcpy (buf, macro, mlen); buf[mlen] = '='; memcpy (buf + mlen + 1, expansion, elen); buf[mlen + elen + 1] = '\0'; cpp_define (parse_in, buf); } /* Pass an object-like macro and an integer value to define it to. */ static void builtin_define_with_int_value (const char *macro, HOST_WIDE_INT value) { char *buf; size_t mlen = strlen (macro); size_t vlen = 18; size_t extra = 2; /* space for = and NUL. */ buf = alloca (mlen + vlen + extra); memcpy (buf, macro, mlen); buf[mlen] = '='; sprintf (buf + mlen + 1, HOST_WIDE_INT_PRINT_DEC, value); cpp_define (parse_in, buf); } /* Pass an object-like macro a hexadecimal floating-point value. */ static void builtin_define_with_hex_fp_value (const char *macro, tree type ATTRIBUTE_UNUSED, int digits, const char *hex_str, const char *fp_suffix) { REAL_VALUE_TYPE real; char dec_str[64], buf[256]; /* Hex values are really cool and convenient, except that they're not supported in strict ISO C90 mode. First, the "p-" sequence is not valid as part of a preprocessor number. Second, we get a pedwarn from the preprocessor, which has no context, so we can't suppress the warning with __extension__. So instead what we do is construct the number in hex (because it's easy to get the exact correct value), parse it as a real, then print it back out as decimal. */ real_from_string (&real, hex_str); real_to_decimal (dec_str, &real, sizeof (dec_str), digits, 0); sprintf (buf, "%s=%s%s", macro, dec_str, fp_suffix); cpp_define (parse_in, buf); } /* Define MAX for TYPE based on the precision of the type. IS_LONG is 1 for type "long" and 2 for "long long". We have to handle unsigned types, since wchar_t might be unsigned. */ static void builtin_define_type_max (const char *macro, tree type, int is_long) { static const char *const values[] = { "127", "255", "32767", "65535", "2147483647", "4294967295", "9223372036854775807", "18446744073709551615", "170141183460469231731687303715884105727", "340282366920938463463374607431768211455" }; static const char *const suffixes[] = { "", "U", "L", "UL", "LL", "ULL" }; const char *value, *suffix; char *buf; size_t idx; /* Pre-rendering the values mean we don't have to futz with printing a multi-word decimal value. There are also a very limited number of precisions that we support, so it's really a waste of time. */ switch (TYPE_PRECISION (type)) { case 8: idx = 0; break; case 16: idx = 2; break; case 32: idx = 4; break; case 64: idx = 6; break; case 128: idx = 8; break; default: abort (); } value = values[idx + TYPE_UNSIGNED (type)]; suffix = suffixes[is_long * 2 + TYPE_UNSIGNED (type)]; buf = alloca (strlen (macro) + 1 + strlen (value) + strlen (suffix) + 1); sprintf (buf, "%s=%s%s", macro, value, suffix); cpp_define (parse_in, buf); } /* Utility to update paths from internal to external forms. Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file contains routines to update a path, both to canonicalize the directory format and to handle any prefix translation. This file must be compiled with -DPREFIX= to specify the "prefix" value used by configure. If a filename does not begin with this prefix, it will not be affected other than by directory canonicalization. Each caller of 'update_path' may specify both a filename and a translation prefix and consist of the name of the package that contains the file ("@GCC", "@BINUTIL", "@GNU", etc). If the prefix is not specified, the filename will only undergo directory canonicalization. If it is specified, the string given by PREFIX will be replaced by the specified prefix (with a '@' in front unless the prefix begins with a '$') and further translation will be done as follows until none of the two conditions below are met: 1) If the filename begins with '@', the string between the '@' and the end of the name or the first '/' or directory separator will be considered a "key" and looked up as follows: -- If this is a Win32 OS, then the Registry will be examined for an entry of "key" in HKEY_LOCAL_MACHINE\SOFTWARE\Free Software Foundation\ if found, that value will be used. defaults to GCC version string, but can be overridden at configuration time. -- If not found (or not a Win32 OS), the environment variable key_ROOT (the value of "key" concatenated with the constant "_ROOT") is tried. If that fails, then PREFIX (see above) is used. 2) If the filename begins with a '$', the rest of the string up to the end or the first '/' or directory separator will be used as an environment variable, whose value will be returned. Once all this is done, any '/' will be converted to DIR_SEPARATOR, if they are different. NOTE: using resolve_keyed_path under Win32 requires linking with advapi32.dll. */ #ifdef ONE_COMPILATION_UNIT #define PREFIX "/scratch2/smcc-extras/build/gcc-cvs/install" #endif static const char *std_prefix = PREFIX; static const char *get_key_value (char *); static char *translate_name (char *); static char *save_string2 (const char *, int); static void tr (char *, int, int); #if defined(_WIN32) && defined(ENABLE_WIN32_REGISTRY) static char *lookup_key (char *); static HKEY reg_key = (HKEY) INVALID_HANDLE_VALUE; #endif /* Given KEY, as above, return its value. */ static const char * get_key_value (char *key) { const char *prefix = 0; char *temp = 0; #if defined(_WIN32) && defined(ENABLE_WIN32_REGISTRY) prefix = lookup_key (key); #endif if (prefix == 0) prefix = getenv (temp = concat (key, "_ROOT", NULL)); if (prefix == 0) prefix = std_prefix; if (temp) free (temp); return prefix; } /* Return a copy of a string that has been placed in the heap. */ /* XXX duplicates version in libiberty's make-relative-prefix.c */ static char * save_string2 (const char *s, int len) { char *result = xmalloc (len + 1); memcpy (result, s, len); result[len] = 0; return result; } #if defined(_WIN32) && defined(ENABLE_WIN32_REGISTRY) /* Look up "key" in the registry, as above. */ static char * lookup_key (char *key) { char *dst; DWORD size; DWORD type; LONG res; if (reg_key == (HKEY) INVALID_HANDLE_VALUE) { res = RegOpenKeyExA (HKEY_LOCAL_MACHINE, "SOFTWARE", 0, KEY_READ, ®_key); if (res == ERROR_SUCCESS) res = RegOpenKeyExA (reg_key, "Free Software Foundation", 0, KEY_READ, ®_key); if (res == ERROR_SUCCESS) res = RegOpenKeyExA (reg_key, WIN32_REGISTRY_KEY, 0, KEY_READ, ®_key); if (res != ERROR_SUCCESS) { reg_key = (HKEY) INVALID_HANDLE_VALUE; return 0; } } size = 32; dst = xmalloc (size); res = RegQueryValueExA (reg_key, key, 0, &type, (LPBYTE) dst, &size); if (res == ERROR_MORE_DATA && type == REG_SZ) { dst = xrealloc (dst, size); res = RegQueryValueExA (reg_key, key, 0, &type, (LPBYTE) dst, &size); } if (type != REG_SZ || res != ERROR_SUCCESS) { free (dst); dst = 0; } return dst; } #endif /* If NAME, a malloc-ed string, starts with a '@' or '$', apply the translation rules above and return a newly malloc-ed name. Otherwise, return the given name. */ static char * translate_name (char *name) { char code; char *key, *old_name; const char *prefix; int keylen; for (;;) { code = name[0]; if (code != '@' && code != '$') break; for (keylen = 0; (name[keylen + 1] != 0 && !IS_DIR_SEPARATOR (name[keylen + 1])); keylen++) ; key = alloca (keylen + 1); strncpy (key, &name[1], keylen); key[keylen] = 0; if (code == '@') { prefix = get_key_value (key); if (prefix == 0) prefix = std_prefix; } else prefix = getenv (key); if (prefix == 0) prefix = PREFIX; /* We used to strip trailing DIR_SEPARATORs here, but that can sometimes yield a result with no separator when one was coded and intended by the user, causing two path components to run together. */ old_name = name; name = concat (prefix, &name[keylen + 1], NULL); free (old_name); } return name; } /* In a NUL-terminated STRING, replace character C1 with C2 in-place. */ static void tr (char *string, int c1, int c2) { do { if (*string == c1) *string = c2; } while (*string++); } /* Update PATH using KEY if PATH starts with PREFIX as a directory. The returned string is always malloc-ed, and the caller is responsible for freeing it. */ char * update_path (const char *path, const char *key) { char *result, *p; const int len = strlen (std_prefix); if (! strncmp (path, std_prefix, len) && (IS_DIR_SEPARATOR(path[len]) || path[len] == '\0') && key != 0) { bool free_key = false; if (key[0] != '$') { key = concat ("@", key, NULL); free_key = true; } result = concat (key, &path[len], NULL); if (free_key) free ((char *) key); result = translate_name (result); } else result = xstrdup (path); #ifndef ALWAYS_STRIP_DOTDOT #define ALWAYS_STRIP_DOTDOT 0 #endif p = result; while (1) { char *src, *dest; p = strchr (p, '.'); if (p == NULL) break; /* Look for `/../' */ if (p[1] == '.' && IS_DIR_SEPARATOR (p[2]) && (p != result && IS_DIR_SEPARATOR (p[-1]))) { *p = 0; if (!ALWAYS_STRIP_DOTDOT && access (result, X_OK) == 0) { *p = '.'; break; } else { /* We can't access the dir, so we won't be able to access dir/.. either. Strip out `dir/../'. If `dir' turns out to be `.', strip one more path component. */ dest = p; do { --dest; while (dest != result && IS_DIR_SEPARATOR (*dest)) --dest; while (dest != result && !IS_DIR_SEPARATOR (dest[-1])) --dest; } while (dest != result && *dest == '.'); /* If we have something like `./..' or `/..', don't strip anything more. */ if (*dest == '.' || IS_DIR_SEPARATOR (*dest)) { *p = '.'; break; } src = p + 3; while (IS_DIR_SEPARATOR (*src)) ++src; p = dest; while ((*dest++ = *src++) != 0) ; } } else ++p; } #ifdef UPDATE_PATH_HOST_CANONICALIZE /* Perform host dependent canonicalization when needed. */ UPDATE_PATH_HOST_CANONICALIZE (result); #endif #ifdef DIR_SEPARATOR_2 /* Convert DIR_SEPARATOR_2 to DIR_SEPARATOR. */ if (DIR_SEPARATOR_2 != DIR_SEPARATOR) tr (result, DIR_SEPARATOR_2, DIR_SEPARATOR); #endif #if defined (DIR_SEPARATOR) && !defined (DIR_SEPARATOR_2) if (DIR_SEPARATOR != '/') tr (result, '/', DIR_SEPARATOR); #endif return result; } /* Reset the standard prefix. */ void set_std_prefix (const char *prefix, int len) { std_prefix = save_string2 (prefix, len); } /* Some code common to C and ObjC front ends. Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ static bool c_tree_printer (pretty_printer *, text_info *); bool c_missing_noreturn_ok_p (tree decl) { /* A missing noreturn is not ok for freestanding implementations and ok for the `main' function in hosted implementations. */ return flag_hosted && MAIN_NAME_P (DECL_ASSEMBLER_NAME (decl)); } /* We want to inline `extern inline' functions even if this would violate inlining limits. Some glibc and linux constructs depend on such functions always being inlined when optimizing. */ int c_disregard_inline_limits (tree fn) { if (lookup_attribute ("always_inline", DECL_ATTRIBUTES (fn)) != NULL) return 1; return (!flag_really_no_inline && DECL_DECLARED_INLINE_P (fn) && DECL_EXTERNAL (fn)); } int c_cannot_inline_tree_fn (tree *fnp) { tree fn = *fnp; tree t; bool do_warning = (warn_inline && DECL_INLINE (fn) && DECL_DECLARED_INLINE_P (fn) && !DECL_IN_SYSTEM_HEADER (fn)); if (flag_really_no_inline && lookup_attribute ("always_inline", DECL_ATTRIBUTES (fn)) == NULL) { if (do_warning) warning ("%Jfunction '%F' can never be inlined because it " "is suppressed using -fno-inline", fn, fn); goto cannot_inline; } /* Don't auto-inline anything that might not be bound within this unit of translation. */ if (!DECL_DECLARED_INLINE_P (fn) && !targetm.binds_local_p (fn)) { if (do_warning) warning ("%Jfunction '%F' can never be inlined because it might not " "be bound within this unit of translation", fn, fn); goto cannot_inline; } if (! function_attribute_inlinable_p (fn)) { if (do_warning) warning ("%Jfunction '%F' can never be inlined because it uses " "attributes conflicting with inlining", fn, fn); goto cannot_inline; } /* If a function has pending sizes, we must not defer its compilation, and we can't inline it as a tree. */ if (fn == current_function_decl) { t = get_pending_sizes (); put_pending_sizes (t); if (t) { if (do_warning) warning ("%Jfunction '%F' can never be inlined because it has " "pending sizes", fn, fn); goto cannot_inline; } } if (! DECL_FILE_SCOPE_P (fn)) { /* If a nested function has pending sizes, we may have already saved them. */ if (DECL_LANG_SPECIFIC (fn)->pending_sizes) { if (do_warning) warning ("%Jnested function '%F' can never be inlined because it " "has possibly saved pending sizes", fn, fn); goto cannot_inline; } } return 0; cannot_inline: DECL_UNINLINABLE (fn) = 1; return 1; } /* Called from check_global_declarations. */ bool c_warn_unused_global_decl (tree decl) { if (TREE_CODE (decl) == FUNCTION_DECL && DECL_DECLARED_INLINE_P (decl)) return false; if (DECL_IN_SYSTEM_HEADER (decl)) return false; return true; } /* Initialization common to C and Objective-C front ends. */ bool c_objc_common_init (void) { static const enum tree_code stmt_codes[] = { c_common_stmt_codes }; INIT_STATEMENT_CODES (stmt_codes); c_init_decl_processing (); if (c_common_init () == false) return false; /* These were not defined in the Objective-C front end, but I'm putting them here anyway. The diagnostic format decoder might want an enhanced ObjC implementation. */ diagnostic_format_decoder (global_dc) = &c_tree_printer; /* If still unspecified, make it match -std=c99 (allowing for -pedantic-errors). */ if (mesg_implicit_function_declaration < 0) { if (flag_isoc99) mesg_implicit_function_declaration = flag_pedantic_errors ? 2 : 1; else mesg_implicit_function_declaration = 0; } return true; } /* Synthesize a function which calls all the global ctors or global dtors in this file. */ static void build_cdtor (int method_type, tree cdtors) { tree body; body = push_stmt_list (); for (; cdtors; cdtors = TREE_CHAIN (cdtors)) add_stmt (build_function_call (TREE_VALUE (cdtors), NULL_TREE)); body = pop_stmt_list (body); cgraph_build_static_cdtor (method_type, body); } /* Called at end of parsing, but before end-of-file processing. */ void c_objc_common_finish_file (void) { if (pch_file) c_common_write_pch (); if (static_ctors) { build_cdtor ('I', static_ctors); static_ctors = 0; } if (static_dtors) { build_cdtor ('D', static_dtors); static_dtors = 0; } cgraph_finalize_compilation_unit (); cgraph_optimize (); if (flag_mudflap) mudflap_finish_file (); } /* Called during diagnostic message formatting process to print a source-level entity onto BUFFER. The meaning of the format specifiers is as follows: %D: a general decl, %E: An expression, %F: a function declaration, %T: a type. These format specifiers form a subset of the format specifiers set used by the C++ front-end. Please notice when called, the `%' part was already skipped by the diagnostic machinery. */ static bool c_tree_printer (pretty_printer *pp, text_info *text) { tree t = va_arg (*text->args_ptr, tree); tree name; const char *n = "({anonymous})"; c_pretty_printer *cpp = (c_pretty_printer *) pp; pp->padding = pp_none; switch (*text->format_spec) { case 'D': case 'F': if (DECL_NAME (t)) n = lang_hooks.decl_printable_name (t, 2); break; case 'T': if (TYPE_P (t)) name = TYPE_NAME (t); else abort (); if (name && TREE_CODE (name) == TYPE_DECL) { if (DECL_NAME (name)) pp_string (cpp, lang_hooks.decl_printable_name (name, 2)); else pp_type_id (cpp, t); return true; } else { pp_type_id (cpp, t); return true; } break; case 'E': if (TREE_CODE (t) == IDENTIFIER_NODE) n = IDENTIFIER_POINTER (t); else return false; break; default: return false; } pp_string (cpp, n); return true; } tree c_objc_common_truthvalue_conversion (tree expr) { retry: switch (TREE_CODE (TREE_TYPE (expr))) { case ARRAY_TYPE: expr = default_conversion (expr); if (TREE_CODE (TREE_TYPE (expr)) != ARRAY_TYPE) goto retry; error ("used array that cannot be converted to pointer where scalar is required"); return error_mark_node; case RECORD_TYPE: error ("used struct type value where scalar is required"); return error_mark_node; case UNION_TYPE: error ("used union type value where scalar is required"); return error_mark_node; default: break; } return c_common_truthvalue_conversion (expr); } /* In C and ObjC, all decls have "C" linkage. */ bool has_c_linkage (tree decl ATTRIBUTE_UNUSED) { return true; } void c_initialize_diagnostics (diagnostic_context *context) { pretty_printer *base = context->printer; c_pretty_printer *pp = xmalloc (sizeof (c_pretty_printer)); memcpy (pp_base (pp), base, sizeof (pretty_printer)); pp_c_pretty_printer_init (pp); context->printer = (pretty_printer *) pp; /* It is safe to free this object because it was previously malloc()'d. */ free (base); } /* Tree-dumping functionality for C-family languages. Copyright (C) 2002, 2004 Free Software Foundation, Inc. Written by Mark Mitchell This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Dump information common to statements from STMT. */ void dump_stmt (dump_info_p di, tree t) { if (EXPR_HAS_LOCATION (t)) dump_int (di, "line", EXPR_LINENO (t)); } /* Dump any C-specific tree codes and attributes of common codes. */ bool c_dump_tree (void *dump_info, tree t) { enum tree_code code; dump_info_p di = (dump_info_p) dump_info; /* Figure out what kind of node this is. */ code = TREE_CODE (t); switch (code) { case FIELD_DECL: if (DECL_C_BIT_FIELD (t)) dump_string (di, "bitfield"); break; case BREAK_STMT: case CONTINUE_STMT: dump_stmt (di, t); break; case DO_STMT: dump_stmt (di, t); dump_child ("body", DO_BODY (t)); dump_child ("cond", DO_COND (t)); break; case EXPR_STMT: dump_stmt (di, t); dump_child ("expr", EXPR_STMT_EXPR (t)); break; case FOR_STMT: dump_stmt (di, t); dump_child ("init", FOR_INIT_STMT (t)); dump_child ("cond", FOR_COND (t)); dump_child ("expr", FOR_EXPR (t)); dump_child ("body", FOR_BODY (t)); break; case SWITCH_STMT: dump_stmt (di, t); dump_child ("cond", SWITCH_COND (t)); dump_child ("body", SWITCH_BODY (t)); break; case WHILE_STMT: dump_stmt (di, t); dump_child ("cond", WHILE_COND (t)); dump_child ("body", WHILE_BODY (t)); break; case STMT_EXPR: dump_child ("stmt", STMT_EXPR_STMT (t)); break; default: break; } return false; } /* Precompiled header implementation for the C languages. Copyright (C) 2000, 2002, 2003, 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifdef ONE_COMPILATION_UNIT #define HOST_MACHINE "i686-pc-linux-gnu" #define TARGET_MACHINE "i686-pc-linux-gnu" #endif /* This is a list of flag variables that must match exactly, and their names for the error message. The possible values for *flag_var must fit in a 'signed char'. */ static const struct c_pch_matching { int *flag_var; const char *flag_name; } pch_matching[] = { { &flag_exceptions, "-fexceptions" }, { &flag_unit_at_a_time, "-funit-at-a-time" } }; enum { MATCH_SIZE = ARRAY_SIZE (pch_matching) }; /* This structure is read very early when validating the PCH, and might be read for a PCH which is for a completely different compiler for a different operating system. Thus, it should really only contain 'unsigned char' entries, at least in the initial entries. If you add or change entries before version_length, you should increase the version number in get_ident(). There are a bunch of fields named *_length; those are lengths of data that follows this structure in the same order as the fields in the structure. */ struct c_pch_validity { unsigned char host_machine_length; unsigned char target_machine_length; unsigned char version_length; unsigned char debug_info_type; signed char match[MATCH_SIZE]; void (*pch_init) (void); size_t target_data_length; }; struct c_pch_header { unsigned long asm_size; }; #define IDENT_LENGTH 8 /* The file we'll be writing the PCH to. */ static FILE *pch_outfile; /* The position in the assembler output file when pch_init was called. */ static long asm_file_startpos; /* The host and target machines. */ static const char host_machine[] = HOST_MACHINE; static const char target_machine[] = TARGET_MACHINE; static const char *get_ident (void); /* Compute an appropriate 8-byte magic number for the PCH file, so that utilities like file(1) can identify it, and so that GCC can quickly ignore non-PCH files and PCH files that are of a completely different format. */ static const char * get_ident(void) { static char result[IDENT_LENGTH]; static const char template[IDENT_LENGTH] = "gpch.012"; static const char c_language_chars[] = "Co+O"; memcpy (result, template, IDENT_LENGTH); result[4] = c_language_chars[c_language]; return result; } /* Prepare to write a PCH file. This is called at the start of compilation. */ void pch_init (void) { FILE *f; struct c_pch_validity v; void *target_validity; static const char partial_pch[IDENT_LENGTH] = "gpcWrite"; if (! pch_file) return; f = fopen (pch_file, "w+b"); if (f == NULL) fatal_error ("can't create precompiled header %s: %m", pch_file); pch_outfile = f; if (strlen (host_machine) > 255 || strlen (target_machine) > 255 || strlen (version_string) > 255) abort (); v.host_machine_length = strlen (host_machine); v.target_machine_length = strlen (target_machine); v.version_length = strlen (version_string); v.debug_info_type = write_symbols; { size_t i; for (i = 0; i < MATCH_SIZE; i++) { v.match[i] = *pch_matching[i].flag_var; if (v.match[i] != *pch_matching[i].flag_var) abort (); } } v.pch_init = &pch_init; target_validity = targetm.get_pch_validity (&v.target_data_length); if (fwrite (partial_pch, IDENT_LENGTH, 1, f) != 1 || fwrite (&v, sizeof (v), 1, f) != 1 || fwrite (host_machine, v.host_machine_length, 1, f) != 1 || fwrite (target_machine, v.target_machine_length, 1, f) != 1 || fwrite (version_string, v.version_length, 1, f) != 1 || fwrite (target_validity, v.target_data_length, 1, f) != 1) fatal_error ("can't write to %s: %m", pch_file); /* We need to be able to re-read the output. */ /* The driver always provides a valid -o option. */ if (asm_file_name == NULL || strcmp (asm_file_name, "-") == 0) fatal_error ("`%s' is not a valid output file", asm_file_name); asm_file_startpos = ftell (asm_out_file); /* Let the debugging format deal with the PCHness. */ (*debug_hooks->handle_pch) (0); cpp_save_state (parse_in, f); } /* Write the PCH file. This is called at the end of a compilation which will produce a PCH file. */ void c_common_write_pch (void) { char *buf; long asm_file_end; long written; struct c_pch_header h; (*debug_hooks->handle_pch) (1); cpp_write_pch_deps (parse_in, pch_outfile); asm_file_end = ftell (asm_out_file); h.asm_size = asm_file_end - asm_file_startpos; if (fwrite (&h, sizeof (h), 1, pch_outfile) != 1) fatal_error ("can't write %s: %m", pch_file); buf = xmalloc (16384); if (fseek (asm_out_file, asm_file_startpos, SEEK_SET) != 0) fatal_error ("can't seek in %s: %m", asm_file_name); for (written = asm_file_startpos; written < asm_file_end; ) { long size = asm_file_end - written; if (size > 16384) size = 16384; if (fread (buf, size, 1, asm_out_file) != 1) fatal_error ("can't read %s: %m", asm_file_name); if (fwrite (buf, size, 1, pch_outfile) != 1) fatal_error ("can't write %s: %m", pch_file); written += size; } free (buf); /* asm_out_file can be written afterwards, so fseek to clear _IOREAD flag. */ if (fseek (asm_out_file, 0, SEEK_END) != 0) fatal_error ("can't seek in %s: %m", asm_file_name); gt_pch_save (pch_outfile); cpp_write_pch_state (parse_in, pch_outfile); if (fseek (pch_outfile, 0, SEEK_SET) != 0 || fwrite (get_ident (), IDENT_LENGTH, 1, pch_outfile) != 1) fatal_error ("can't write %s: %m", pch_file); fclose (pch_outfile); } /* Check the PCH file called NAME, open on FD, to see if it can be used in this compilation. Return 1 if valid, 0 if the file can't be used now but might be if it's seen later in the compilation, and 2 if this file could never be used in the compilation. */ int c_common_valid_pch (cpp_reader *pfile, const char *name, int fd) { int sizeread; int result; char ident[IDENT_LENGTH]; char short_strings[256 * 3]; int strings_length; const char *pch_ident; struct c_pch_validity v; /* Perform a quick test of whether this is a valid precompiled header for the current language. */ sizeread = read (fd, ident, IDENT_LENGTH); if (sizeread == -1) fatal_error ("can't read %s: %m", name); else if (sizeread != IDENT_LENGTH) return 2; pch_ident = get_ident(); if (memcmp (ident, pch_ident, IDENT_LENGTH) != 0) { if (cpp_get_options (pfile)->warn_invalid_pch) { if (memcmp (ident, pch_ident, 5) == 0) /* It's a PCH, for the right language, but has the wrong version. */ cpp_error (pfile, CPP_DL_WARNING, "%s: not compatible with this GCC version", name); else if (memcmp (ident, pch_ident, 4) == 0) /* It's a PCH for the wrong language. */ cpp_error (pfile, CPP_DL_WARNING, "%s: not for %s", name, lang_hooks.name); else /* Not any kind of PCH. */ cpp_error (pfile, CPP_DL_WARNING, "%s: not a PCH file", name); } return 2; } /* At this point, we know it's a PCH file, so it ought to be long enough that we can read a c_pch_validity structure. */ if (read (fd, &v, sizeof (v)) != sizeof (v)) fatal_error ("can't read %s: %m", name); strings_length = (v.host_machine_length + v.target_machine_length + v.version_length); if (read (fd, short_strings, strings_length) != strings_length) fatal_error ("can't read %s: %m", name); if (v.host_machine_length != strlen (host_machine) || memcmp (host_machine, short_strings, strlen (host_machine)) != 0) { if (cpp_get_options (pfile)->warn_invalid_pch) cpp_error (pfile, CPP_DL_WARNING, "%s: created on host `%.*s', but used on host `%s'", name, v.host_machine_length, short_strings, host_machine); return 2; } if (v.target_machine_length != strlen (target_machine) || memcmp (target_machine, short_strings + v.host_machine_length, strlen (target_machine)) != 0) { if (cpp_get_options (pfile)->warn_invalid_pch) cpp_error (pfile, CPP_DL_WARNING, "%s: created for target `%.*s', but used for target `%s'", name, v.target_machine_length, short_strings + v.host_machine_length, target_machine); return 2; } if (v.version_length != strlen (version_string) || memcmp (version_string, (short_strings + v.host_machine_length + v.target_machine_length), v.version_length) != 0) { if (cpp_get_options (pfile)->warn_invalid_pch) cpp_error (pfile, CPP_DL_WARNING, "%s: created by version `%.*s', but this is version `%s'", name, v.version_length, (short_strings + v.host_machine_length + v.target_machine_length), version_string); return 2; } /* The allowable debug info combinations are that either the PCH file was built with the same as is being used now, or the PCH file was built for some kind of debug info but now none is in use. */ if (v.debug_info_type != write_symbols && write_symbols != NO_DEBUG) { if (cpp_get_options (pfile)->warn_invalid_pch) cpp_error (pfile, CPP_DL_WARNING, "%s: created with -g%s, but used with -g%s", name, debug_type_names[v.debug_info_type], debug_type_names[write_symbols]); return 2; } /* Check flags that must match exactly. */ { size_t i; for (i = 0; i < MATCH_SIZE; i++) if (*pch_matching[i].flag_var != v.match[i]) { if (cpp_get_options (pfile)->warn_invalid_pch) cpp_error (pfile, CPP_DL_WARNING, "%s: settings for %s do not match", name, pch_matching[i].flag_name); return 2; } } /* If the text segment was not loaded at the same address as it was when the PCH file was created, function pointers loaded from the PCH will not be valid. We could in theory remap all the function pointers, but no support for that exists at present. */ if (v.pch_init != &pch_init) { if (cpp_get_options (pfile)->warn_invalid_pch) cpp_error (pfile, CPP_DL_WARNING, "%s: had text segment at different address", name); return 2; } /* Check the target-specific validity data. */ { void *this_file_data = xmalloc (v.target_data_length); const char *msg; if ((size_t) read (fd, this_file_data, v.target_data_length) != v.target_data_length) fatal_error ("can't read %s: %m", name); msg = targetm.pch_valid_p (this_file_data, v.target_data_length); free (this_file_data); if (msg != NULL) { if (cpp_get_options (pfile)->warn_invalid_pch) cpp_error (pfile, CPP_DL_WARNING, "%s: %s", name, msg); return 2; } } /* Check the preprocessor macros are the same as when the PCH was generated. */ result = cpp_valid_state (pfile, name, fd); if (result == -1) return 2; else return result == 0; } /* Load in the PCH file NAME, open on FD. It was originally searched for by ORIG_NAME. */ void c_common_read_pch (cpp_reader *pfile, const char *name, int fd, const char *orig_name ATTRIBUTE_UNUSED) { FILE *f; struct c_pch_header h; struct save_macro_data *smd; f = fdopen (fd, "rb"); if (f == NULL) { cpp_errno (pfile, CPP_DL_ERROR, "calling fdopen"); return; } cpp_get_callbacks (parse_in)->valid_pch = NULL; if (fread (&h, sizeof (h), 1, f) != 1) { cpp_errno (pfile, CPP_DL_ERROR, "reading"); return; } if (!flag_preprocess_only) { unsigned long written; char * buf = xmalloc (16384); for (written = 0; written < h.asm_size; ) { long size = h.asm_size - written; if (size > 16384) size = 16384; if (fread (buf, size, 1, f) != 1 || fwrite (buf, size, 1, asm_out_file) != 1) cpp_errno (pfile, CPP_DL_ERROR, "reading"); written += size; } free (buf); } else { /* If we're preprocessing, don't write to a NULL asm_out_file. */ if (fseek (f, h.asm_size, SEEK_CUR) != 0) cpp_errno (pfile, CPP_DL_ERROR, "seeking"); } cpp_prepare_state (pfile, &smd); gt_pch_restore (f); if (cpp_read_state (pfile, name, f, smd) != 0) return; fclose (f); } /* Indicate that no more PCH files should be read. */ void c_common_no_more_pch (void) { if (cpp_get_callbacks (parse_in)->valid_pch) { cpp_get_callbacks (parse_in)->valid_pch = NULL; host_hooks.gt_pch_use_address (NULL, 0, -1, 0); } } /* Handle #pragma GCC pch_preprocess, to load in the PCH file. */ #ifndef O_BINARY # define O_BINARY 0 #endif void c_common_pch_pragma (cpp_reader *pfile) { tree name_t; const char *name; int fd; if (c_lex (&name_t) != CPP_STRING) { error ("malformed #pragma GCC pch_preprocess, ignored"); return; } if (! cpp_get_options (pfile)->preprocessed) { error ("pch_preprocess pragma should only be used with -fpreprocessed"); inform ("use #include instead"); return; } name = TREE_STRING_POINTER (name_t); fd = open (name, O_RDONLY | O_BINARY, 0666); if (fd == -1) fatal_error ("%s: couldn't open PCH file: %m\n", name); if (c_common_valid_pch (pfile, name, fd) != 1) { if (!cpp_get_options (pfile)->warn_invalid_pch) inform ("use -Winvalid-pch for more information"); fatal_error ("%s: PCH file was invalid", name); } c_common_read_pch (pfile, name, fd, name); close (fd); } /* Tree lowering pass. This pass gimplifies the tree representation built by the C-based front ends. The structure of gimplified, or language-independent, trees is dictated by the grammar described in this file. Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc. Lowering of expressions contributed by Sebastian Pop Re-written to support lowering of whole function trees, documentation and miscellaneous cleanups by Diego Novillo This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* The gimplification pass converts the language-dependent trees (ld-trees) emitted by the parser into language-independent trees (li-trees) that are the target of SSA analysis and transformations. Language-independent trees are based on the SIMPLE intermediate representation used in the McCAT compiler framework: "Designing the McCAT Compiler Based on a Family of Structured Intermediate Representations," L. Hendren, C. Donawa, M. Emami, G. Gao, Justiani, and B. Sridharan, Proceedings of the 5th International Workshop on Languages and Compilers for Parallel Computing, no. 757 in Lecture Notes in Computer Science, New Haven, Connecticut, pp. 406-420, Springer-Verlag, August 3-5, 1992. http://www-acaps.cs.mcgill.ca/info/McCAT/McCAT.html Basically, we walk down gimplifying the nodes that we encounter. As we walk back up, we check that they fit our constraints, and copy them into temporaries if not. */ /* Local declarations. */ enum bc_t { bc_break = 0, bc_continue = 1 }; static struct c_gimplify_ctx { /* For handling break and continue. */ tree current_bc_label; tree bc_id[2]; } *ctxp; static void push_context (void) { if (ctxp) abort (); ctxp = (struct c_gimplify_ctx *) xcalloc (1, sizeof (struct c_gimplify_ctx)); ctxp->bc_id[bc_continue] = get_identifier ("continue"); ctxp->bc_id[bc_break] = get_identifier ("break"); } static void pop_context (void) { if (!ctxp || ctxp->current_bc_label) abort (); free (ctxp); ctxp = NULL; } /* Gimplification of statement trees. */ /* Convert the tree representation of FNDECL from C frontend trees to GENERIC. */ void c_genericize (tree fndecl) { FILE *dump_file; int local_dump_flags; struct cgraph_node *cgn; /* Dump the C-specific tree IR. */ dump_file = dump_begin (TDI_original, &local_dump_flags); if (dump_file) { fprintf (dump_file, "\n;; Function %s", lang_hooks.decl_printable_name (fndecl, 2)); fprintf (dump_file, " (%s)\n", IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (fndecl))); fprintf (dump_file, ";; enabled by -%s\n", dump_flag_name (TDI_original)); fprintf (dump_file, "\n"); if (local_dump_flags & TDF_RAW) dump_node (DECL_SAVED_TREE (fndecl), TDF_SLIM | local_dump_flags, dump_file); else print_c_tree (dump_file, DECL_SAVED_TREE (fndecl)); fprintf (dump_file, "\n"); dump_end (TDI_original, dump_file); } /* Go ahead and gimplify for now. */ push_context (); gimplify_function_tree (fndecl); pop_context (); /* Dump the genericized tree IR. */ dump_function (TDI_generic, fndecl); /* Genericize all nested functions now. We do things in this order so that items like VLA sizes are expanded properly in the context of the correct function. */ cgn = cgraph_node (fndecl); for (cgn = cgn->nested; cgn ; cgn = cgn->next_nested) c_genericize (cgn->decl); } static void add_block_to_enclosing (tree block) { tree enclosing; for (enclosing = gimple_current_bind_expr (); enclosing; enclosing = TREE_CHAIN (enclosing)) if (BIND_EXPR_BLOCK (enclosing)) break; enclosing = BIND_EXPR_BLOCK (enclosing); BLOCK_SUBBLOCKS (enclosing) = chainon (BLOCK_SUBBLOCKS (enclosing), block); } /* Genericize a scope by creating a new BIND_EXPR. BLOCK is either a BLOCK representing the scope or a chain of _DECLs. In the latter case, we need to create a new BLOCK and add it to the BLOCK_SUBBLOCKS of the enclosing block. BODY is a chain of C _STMT nodes for the contents of the scope, to be genericized. */ tree c_build_bind_expr (tree block, tree body) { tree decls, bind; if (block == NULL_TREE) decls = NULL_TREE; else if (TREE_CODE (block) == BLOCK) decls = BLOCK_VARS (block); else { decls = block; if (DECL_ARTIFICIAL (decls)) block = NULL_TREE; else { block = make_node (BLOCK); BLOCK_VARS (block) = decls; add_block_to_enclosing (block); } } if (!body) body = build_empty_stmt (); if (decls || block) { bind = build (BIND_EXPR, void_type_node, decls, body, block); TREE_SIDE_EFFECTS (bind) = 1; } else bind = body; return bind; } /* Gimplify an EXPR_STMT node. STMT is the statement node. PRE_P points to the list where side effects that must happen before STMT should be stored. POST_P points to the list where side effects that must happen after STMT should be stored. */ static enum gimplify_status gimplify_expr_stmt (tree *stmt_p) { tree stmt = EXPR_STMT_EXPR (*stmt_p); if (stmt == error_mark_node) stmt = NULL; /* Gimplification of a statement expression will nullify the statement if all its side effects are moved to *PRE_P and *POST_P. In this case we will not want to emit the gimplified statement. However, we may still want to emit a warning, so we do that before gimplification. */ if (stmt && (extra_warnings || warn_unused_value)) { if (!TREE_SIDE_EFFECTS (stmt)) { if (!IS_EMPTY_STMT (stmt) && !VOID_TYPE_P (TREE_TYPE (stmt)) && !TREE_NO_WARNING (stmt)) warning ("statement with no effect"); } else if (warn_unused_value) warn_if_unused_value (stmt, input_location); } if (stmt == NULL_TREE) stmt = alloc_stmt_list (); *stmt_p = stmt; return GS_OK; } /* Begin a scope which can be exited by a break or continue statement. BC indicates which. Just creates a label and pushes it into the current context. */ static tree begin_bc_block (enum bc_t bc) { tree label = create_artificial_label (); DECL_NAME (label) = ctxp->bc_id[bc]; TREE_CHAIN (label) = ctxp->current_bc_label; ctxp->current_bc_label = label; return label; } /* Finish a scope which can be exited by a break or continue statement. LABEL was returned from the most recent call to begin_bc_block. BODY is an expression for the contents of the scope. If we saw a break (or continue) in the scope, append a LABEL_EXPR to body. Otherwise, just forget the label. */ static tree finish_bc_block (tree label, tree body) { if (label != ctxp->current_bc_label) abort (); if (TREE_USED (label)) { tree t, sl = NULL; /* Clear the name so flow can delete the label. */ DECL_NAME (label) = NULL_TREE; t = build1 (LABEL_EXPR, void_type_node, label); append_to_statement_list (body, &sl); append_to_statement_list (t, &sl); body = sl; } ctxp->current_bc_label = TREE_CHAIN (label); TREE_CHAIN (label) = NULL_TREE; return body; } /* Build a GOTO_EXPR to represent a break or continue statement. BC indicates which. */ static tree build_bc_goto (enum bc_t bc) { tree label; tree target_name = ctxp->bc_id[bc]; /* Look for the appropriate type of label. */ for (label = ctxp->current_bc_label; label; label = TREE_CHAIN (label)) if (DECL_NAME (label) == target_name) break; if (label == NULL_TREE) { if (bc == bc_break) error ("break statement not within loop or switch"); else error ("continue statement not within loop or switch"); return NULL_TREE; } /* Mark the label used for finish_bc_block. */ TREE_USED (label) = 1; return build1 (GOTO_EXPR, void_type_node, label); } /* Build a generic representation of one of the C loop forms. COND is the loop condition or NULL_TREE. BODY is the (possibly compound) statement controlled by the loop. INCR is the increment expression of a for-loop, or NULL_TREE. COND_IS_FIRST indicates whether the condition is evaluated before the loop body as in while and for loops, or after the loop body as in do-while loops. */ static tree gimplify_c_loop (tree cond, tree body, tree incr, bool cond_is_first) { tree top, entry, exit, cont_block, break_block, stmt_list, t; location_t stmt_locus; stmt_locus = input_location; /* Detect do { ... } while (0) and don't generate loop construct. */ if (!cond_is_first && cond && integer_zerop (cond)) top = cond = NULL; else { /* If we use a LOOP_EXPR here, we have to feed the whole thing back through the main gimplifier to lower it. Given that we have to gimplify the loop body NOW so that we can resolve break/continue stmts, seems easier to just expand to gotos. */ top = build1 (LABEL_EXPR, void_type_node, NULL_TREE); } break_block = begin_bc_block (bc_break); if (top) { /* If we have an exit condition, then we build an IF with gotos either out of the loop, or to the top of it. If there's no exit condition, then we just build a jump back to the top. */ exit = build_and_jump (&LABEL_EXPR_LABEL (top)); if (cond) { t = build_bc_goto (bc_break); exit = build (COND_EXPR, void_type_node, cond, exit, t); exit = fold (exit); gimplify_stmt (&exit); } } else exit = NULL_TREE; cont_block = begin_bc_block (bc_continue); gimplify_stmt (&body); gimplify_stmt (&incr); body = finish_bc_block (cont_block, body); stmt_list = NULL; if (cond_is_first && cond) { entry = build1 (LABEL_EXPR, void_type_node, NULL_TREE); t = build_and_jump (&LABEL_EXPR_LABEL (entry)); append_to_statement_list (t, &stmt_list); } else entry = NULL_TREE; append_to_statement_list (top, &stmt_list); append_to_statement_list (body, &stmt_list); append_to_statement_list (incr, &stmt_list); append_to_statement_list (entry, &stmt_list); append_to_statement_list (exit, &stmt_list); annotate_all_with_locus (&stmt_list, stmt_locus); return finish_bc_block (break_block, stmt_list); } /* Gimplify a FOR_STMT node. Move the stuff in the for-init-stmt into the prequeue and hand off to gimplify_c_loop. */ static enum gimplify_status gimplify_for_stmt (tree *stmt_p, tree *pre_p) { tree stmt = *stmt_p; if (FOR_INIT_STMT (stmt)) gimplify_and_add (FOR_INIT_STMT (stmt), pre_p); *stmt_p = gimplify_c_loop (FOR_COND (stmt), FOR_BODY (stmt), FOR_EXPR (stmt), 1); return GS_ALL_DONE; } /* Gimplify a WHILE_STMT node. */ static enum gimplify_status gimplify_while_stmt (tree *stmt_p) { tree stmt = *stmt_p; *stmt_p = gimplify_c_loop (WHILE_COND (stmt), WHILE_BODY (stmt), NULL_TREE, 1); return GS_ALL_DONE; } /* Gimplify a DO_STMT node. */ static enum gimplify_status gimplify_do_stmt (tree *stmt_p) { tree stmt = *stmt_p; *stmt_p = gimplify_c_loop (DO_COND (stmt), DO_BODY (stmt), NULL_TREE, 0); return GS_ALL_DONE; } /* Genericize a SWITCH_STMT by turning it into a SWITCH_EXPR. */ static enum gimplify_status gimplify_switch_stmt (tree *stmt_p) { tree stmt = *stmt_p; tree break_block, body; location_t stmt_locus = input_location; break_block = begin_bc_block (bc_break); body = SWITCH_BODY (stmt); if (!body) body = build_empty_stmt (); *stmt_p = build (SWITCH_EXPR, SWITCH_TYPE (stmt), SWITCH_COND (stmt), body, NULL_TREE); SET_EXPR_LOCATION (*stmt_p, stmt_locus); gimplify_stmt (stmt_p); *stmt_p = finish_bc_block (break_block, *stmt_p); return GS_ALL_DONE; } /* Gimplification of expression trees. */ /* Gimplify a C99 compound literal expression. This just means adding the DECL_EXPR before the current EXPR_STMT and using its anonymous decl instead. */ static enum gimplify_status gimplify_compound_literal_expr (tree *expr_p, tree *pre_p) { tree decl_s = COMPOUND_LITERAL_EXPR_DECL_STMT (*expr_p); tree decl = DECL_EXPR_DECL (decl_s); /* This decl isn't mentioned in the enclosing block, so add it to the list of temps. FIXME it seems a bit of a kludge to say that anonymous artificial vars aren't pushed, but everything else is. */ if (DECL_NAME (decl) == NULL_TREE) gimple_add_tmp_var (decl); gimplify_and_add (decl_s, pre_p); *expr_p = decl; return GS_OK; } /* Do C-specific gimplification. Args are as for gimplify_expr. */ int c_gimplify_expr (tree *expr_p, tree *pre_p, tree *post_p ATTRIBUTE_UNUSED) { enum tree_code code = TREE_CODE (*expr_p); switch (code) { case DECL_EXPR: /* This is handled mostly by gimplify.c, but we have to deal with not warning about int x = x; as it is a GCC extension to turn off this warning but only if warn_init_self is zero. */ if (TREE_CODE (DECL_EXPR_DECL (*expr_p)) == VAR_DECL && !DECL_EXTERNAL (DECL_EXPR_DECL (*expr_p)) && !TREE_STATIC (DECL_EXPR_DECL (*expr_p)) && (DECL_INITIAL (DECL_EXPR_DECL (*expr_p)) == DECL_EXPR_DECL (*expr_p)) && !warn_init_self) TREE_NO_WARNING (DECL_EXPR_DECL (*expr_p)) = 1; return GS_UNHANDLED; case COMPOUND_LITERAL_EXPR: return gimplify_compound_literal_expr (expr_p, pre_p); case FOR_STMT: return gimplify_for_stmt (expr_p, pre_p); case WHILE_STMT: return gimplify_while_stmt (expr_p); case DO_STMT: return gimplify_do_stmt (expr_p); case SWITCH_STMT: return gimplify_switch_stmt (expr_p); case EXPR_STMT: return gimplify_expr_stmt (expr_p); case CONTINUE_STMT: *expr_p = build_bc_goto (bc_continue); return GS_ALL_DONE; case BREAK_STMT: *expr_p = build_bc_goto (bc_break); return GS_ALL_DONE; default: return GS_UNHANDLED; } } /* Subroutines common to both C and C++ pretty-printers. Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc. Contributed by Gabriel Dos Reis This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* The pretty-printer code is primarily designed to closely follow (GNU) C and C++ grammars. That is to be contrasted with spaghetti codes we used to have in the past. Following a structured approach (preferably the official grammars) is believed to make it much easier to add extensions and nifty pretty-printing effects that takes expression or declaration contexts into account. */ #define pp_c_maybe_whitespace(PP) \ do { \ if (pp_base (PP)->padding == pp_before) \ pp_c_whitespace (PP); \ } while (0) /* literal */ static void pp_c_char (c_pretty_printer *, int); /* postfix-expression */ static void pp_c_initializer_list (c_pretty_printer *, tree); static void pp_c_brace_enclosed_initializer_list (c_pretty_printer *, tree); static void pp_c_multiplicative_expression (c_pretty_printer *, tree); static void pp_c_additive_expression (c_pretty_printer *, tree); static void pp_c_shift_expression (c_pretty_printer *, tree); static void pp_c_relational_expression (c_pretty_printer *, tree); static void pp_c_equality_expression (c_pretty_printer *, tree); static void pp_c_and_expression (c_pretty_printer *, tree); static void pp_c_exclusive_or_expression (c_pretty_printer *, tree); static void pp_c_inclusive_or_expression (c_pretty_printer *, tree); static void pp_c_logical_and_expression (c_pretty_printer *, tree); static void pp_c_conditional_expression (c_pretty_printer *, tree); static void pp_c_assignment_expression (c_pretty_printer *, tree); /* declarations. */ /* Helper functions. */ void pp_c_whitespace (c_pretty_printer *pp) { pp_space (pp); pp_base (pp)->padding = pp_none; } void pp_c_left_paren (c_pretty_printer *pp) { pp_left_paren (pp); pp_base (pp)->padding = pp_none; } void pp_c_right_paren (c_pretty_printer *pp) { pp_right_paren (pp); pp_base (pp)->padding = pp_none; } void pp_c_left_brace (c_pretty_printer *pp) { pp_left_brace (pp); pp_base (pp)->padding = pp_none; } void pp_c_right_brace (c_pretty_printer *pp) { pp_right_brace (pp); pp_base (pp)->padding = pp_none; } void pp_c_left_bracket (c_pretty_printer *pp) { pp_left_bracket (pp); pp_base (pp)->padding = pp_none; } void pp_c_right_bracket (c_pretty_printer *pp) { pp_right_bracket (pp); pp_base (pp)->padding = pp_none; } void pp_c_dot (c_pretty_printer *pp) { pp_dot (pp); pp_base (pp)->padding = pp_none; } void pp_c_ampersand (c_pretty_printer *pp) { pp_ampersand (pp); pp_base (pp)->padding = pp_none; } void pp_c_star (c_pretty_printer *pp) { pp_star (pp); pp_base (pp)->padding = pp_none; } void pp_c_arrow (c_pretty_printer *pp) { pp_arrow (pp); pp_base (pp)->padding = pp_none; } void pp_c_semicolon (c_pretty_printer *pp) { pp_semicolon (pp); pp_base (pp)->padding = pp_none; } void pp_c_complement (c_pretty_printer *pp) { pp_complement (pp); pp_base (pp)->padding = pp_none; } void pp_c_exclamation (c_pretty_printer *pp) { pp_exclamation (pp); pp_base (pp)->padding = pp_none; } /* Print out the external representation of CV-QUALIFIER. */ static void pp_c_cv_qualifier (c_pretty_printer *pp, const char *cv) { const char *p = pp_last_position_in_text (pp); /* The C programming language does not have references, but it is much simpler to handle those here rather than going through the same logic in the C++ pretty-printer. */ if (p != NULL && (*p == '*' || *p == '&')) pp_c_whitespace (pp); pp_c_identifier (pp, cv); } /* Pretty-print T using the type-cast notation '( type-name )'. */ static void pp_c_type_cast (c_pretty_printer *pp, tree t) { pp_c_left_paren (pp); pp_type_id (pp, t); pp_c_right_paren (pp); } /* We're about to pretty-print a pointer type as indicated by T. Output a whitespace, if needed, preparing for subsequent output. */ void pp_c_space_for_pointer_operator (c_pretty_printer *pp, tree t) { if (POINTER_TYPE_P (t)) { tree pointee = strip_pointer_operator (TREE_TYPE (t)); if (TREE_CODE (pointee) != ARRAY_TYPE && TREE_CODE (pointee) != FUNCTION_TYPE) pp_c_whitespace (pp); } } /* Declarations. */ /* C++ cv-qualifiers are called type-qualifiers in C. Print out the cv-qualifiers of T. If T is a declaration then it is the cv-qualifier of its type. Take care of possible extensions. type-qualifier-list: type-qualifier type-qualifier-list type-qualifier type-qualifier: const restrict -- C99 __restrict__ -- GNU C volatile */ void pp_c_type_qualifier_list (c_pretty_printer *pp, tree t) { int qualifiers; if (!TYPE_P (t)) t = TREE_TYPE (t); qualifiers = TYPE_QUALS (t); if (qualifiers & TYPE_QUAL_CONST) pp_c_cv_qualifier (pp, "const"); if (qualifiers & TYPE_QUAL_VOLATILE) pp_c_cv_qualifier (pp, "volatile"); if (qualifiers & TYPE_QUAL_RESTRICT) pp_c_cv_qualifier (pp, flag_isoc99 ? "restrict" : "__restrict__"); } /* pointer: * type-qualifier-list(opt) * type-qualifier-list(opt) pointer */ static void pp_c_pointer (c_pretty_printer *pp, tree t) { if (!TYPE_P (t) && TREE_CODE (t) != TYPE_DECL) t = TREE_TYPE (t); switch (TREE_CODE (t)) { case POINTER_TYPE: /* It is easier to handle C++ reference types here. */ case REFERENCE_TYPE: if (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE) pp_c_pointer (pp, TREE_TYPE (t)); if (TREE_CODE (t) == POINTER_TYPE) pp_c_star (pp); else pp_c_ampersand (pp); pp_c_type_qualifier_list (pp, t); break; /* ??? This node is now in GENERIC and so shouldn't be here. But we'll fix that later. */ case DECL_EXPR: pp_declaration (pp, DECL_EXPR_DECL (t)); pp_needs_newline (pp) = true; break; default: pp_unsupported_tree (pp, t); } } /* type-specifier: void char short int long float double signed unsigned _Bool -- C99 _Complex -- C99 _Imaginary -- C99 struct-or-union-specifier enum-specifier typedef-name. GNU extensions. simple-type-specifier: __complex__ __vector__ */ void pp_c_type_specifier (c_pretty_printer *pp, tree t) { const enum tree_code code = TREE_CODE (t); switch (code) { case ERROR_MARK: pp_c_identifier (pp, ""); break; case IDENTIFIER_NODE: pp_c_tree_decl_identifier (pp, t); break; case VOID_TYPE: case BOOLEAN_TYPE: case CHAR_TYPE: case INTEGER_TYPE: case REAL_TYPE: if (TYPE_NAME (t)) t = TYPE_NAME (t); else t = c_common_type_for_mode (TYPE_MODE (t), TYPE_UNSIGNED (t)); pp_c_type_specifier (pp, t); break; case TYPE_DECL: if (DECL_NAME (t)) pp_id_expression (pp, t); else pp_c_identifier (pp, ""); break; case UNION_TYPE: case RECORD_TYPE: case ENUMERAL_TYPE: if (code == UNION_TYPE) pp_c_identifier (pp, "union"); else if (code == RECORD_TYPE) pp_c_identifier (pp, "struct"); else if (code == ENUMERAL_TYPE) pp_c_identifier (pp, "enum"); else pp_c_identifier (pp, ""); if (TYPE_NAME (t)) pp_id_expression (pp, TYPE_NAME (t)); else pp_c_identifier (pp, ""); break; default: pp_unsupported_tree (pp, t); break; } } /* specifier-qualifier-list: type-specifier specifier-qualifier-list-opt type-qualifier specifier-qualifier-list-opt Implementation note: Because of the non-linearities in array or function declarations, this routine prints not just the specifier-qualifier-list of such entities or types of such entities, but also the 'pointer' production part of their declarators. The remaining part is done by pp_declarator or pp_c_abstract_declarator. */ void pp_c_specifier_qualifier_list (c_pretty_printer *pp, tree t) { const enum tree_code code = TREE_CODE (t); if (TREE_CODE (t) != POINTER_TYPE) pp_c_type_qualifier_list (pp, t); switch (code) { case REFERENCE_TYPE: case POINTER_TYPE: { /* Get the types-specifier of this type. */ tree pointee = strip_pointer_operator (TREE_TYPE (t)); pp_c_specifier_qualifier_list (pp, pointee); if (TREE_CODE (pointee) == ARRAY_TYPE || TREE_CODE (pointee) == FUNCTION_TYPE) { pp_c_whitespace (pp); pp_c_left_paren (pp); } else if (!c_dialect_cxx ()) pp_c_whitespace (pp); pp_ptr_operator (pp, t); } break; case FUNCTION_TYPE: case ARRAY_TYPE: pp_c_specifier_qualifier_list (pp, TREE_TYPE (t)); break; case VECTOR_TYPE: case COMPLEX_TYPE: pp_c_specifier_qualifier_list (pp, TREE_TYPE (t)); if (code == COMPLEX_TYPE) pp_c_identifier (pp, flag_isoc99 ? "_Complex" : "__complex__"); else if (code == VECTOR_TYPE) pp_c_identifier (pp, "__vector__"); break; default: pp_simple_type_specifier (pp, t); break; } } /* parameter-type-list: parameter-list parameter-list , ... parameter-list: parameter-declaration parameter-list , parameter-declaration parameter-declaration: declaration-specifiers declarator declaration-specifiers abstract-declarator(opt) */ void pp_c_parameter_type_list (c_pretty_printer *pp, tree t) { bool want_parm_decl = DECL_P (t) && !(pp->flags & pp_c_flag_abstract); tree parms = want_parm_decl ? DECL_ARGUMENTS (t) : TYPE_ARG_TYPES (t); pp_c_left_paren (pp); if (parms == void_list_node) pp_c_identifier (pp, "void"); else { bool first = true; for ( ; parms && parms != void_list_node; parms = TREE_CHAIN (parms)) { if (!first) pp_separate_with (pp, ','); first = false; pp_declaration_specifiers (pp, want_parm_decl ? parms : TREE_VALUE (parms)); if (want_parm_decl) pp_declarator (pp, parms); else pp_abstract_declarator (pp, TREE_VALUE (parms)); } } pp_c_right_paren (pp); } /* abstract-declarator: pointer pointer(opt) direct-abstract-declarator */ static void pp_c_abstract_declarator (c_pretty_printer *pp, tree t) { if (TREE_CODE (t) == POINTER_TYPE) { if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE || TREE_CODE (TREE_TYPE (t)) == FUNCTION_TYPE) pp_c_right_paren (pp); t = TREE_TYPE (t); } pp_direct_abstract_declarator (pp, t); } /* direct-abstract-declarator: ( abstract-declarator ) direct-abstract-declarator(opt) [ assignment-expression(opt) ] direct-abstract-declarator(opt) [ * ] direct-abstract-declarator(opt) ( parameter-type-list(opt) ) */ void pp_c_direct_abstract_declarator (c_pretty_printer *pp, tree t) { switch (TREE_CODE (t)) { case POINTER_TYPE: pp_abstract_declarator (pp, t); break; case FUNCTION_TYPE: pp_c_parameter_type_list (pp, t); pp_direct_abstract_declarator (pp, TREE_TYPE (t)); break; case ARRAY_TYPE: pp_c_left_bracket (pp); if (TYPE_DOMAIN (t) && TYPE_MAX_VALUE (TYPE_DOMAIN (t))) pp_expression (pp, TYPE_MAX_VALUE (TYPE_DOMAIN (t))); pp_c_right_bracket (pp); pp_direct_abstract_declarator (pp, TREE_TYPE (t)); break; case IDENTIFIER_NODE: case VOID_TYPE: case BOOLEAN_TYPE: case INTEGER_TYPE: case REAL_TYPE: case ENUMERAL_TYPE: case RECORD_TYPE: case UNION_TYPE: case VECTOR_TYPE: case COMPLEX_TYPE: case TYPE_DECL: break; default: pp_unsupported_tree (pp, t); break; } } /* type-name: specifier-qualifier-list abstract-declarator(opt) */ void pp_c_type_id (c_pretty_printer *pp, tree t) { pp_c_specifier_qualifier_list (pp, t); pp_abstract_declarator (pp, t); } /* storage-class-specifier: typedef extern static auto register */ void pp_c_storage_class_specifier (c_pretty_printer *pp, tree t) { if (TREE_CODE (t) == TYPE_DECL) pp_c_identifier (pp, "typedef"); else if (DECL_P (t)) { if (DECL_REGISTER (t)) pp_c_identifier (pp, "register"); else if (TREE_STATIC (t) && TREE_CODE (t) == VAR_DECL) pp_c_identifier (pp, "static"); } } /* function-specifier: inline */ void pp_c_function_specifier (c_pretty_printer *pp, tree t) { if (TREE_CODE (t) == FUNCTION_DECL && DECL_DECLARED_INLINE_P (t)) pp_c_identifier (pp, "inline"); } /* declaration-specifiers: storage-class-specifier declaration-specifiers(opt) type-specifier declaration-specifiers(opt) type-qualifier declaration-specifiers(opt) function-specifier declaration-specifiers(opt) */ void pp_c_declaration_specifiers (c_pretty_printer *pp, tree t) { pp_storage_class_specifier (pp, t); pp_function_specifier (pp, t); pp_c_specifier_qualifier_list (pp, DECL_P (t) ? TREE_TYPE (t) : t); } /* direct-declarator identifier ( declarator ) direct-declarator [ type-qualifier-list(opt) assignment-expression(opt) ] direct-declarator [ static type-qualifier-list(opt) assignment-expression(opt)] direct-declarator [ type-qualifier-list static assignment-expression ] direct-declarator [ type-qualifier-list * ] direct-declarator ( parameter-type-list ) direct-declarator ( identifier-list(opt) ) */ void pp_c_direct_declarator (c_pretty_printer *pp, tree t) { switch (TREE_CODE (t)) { case VAR_DECL: case PARM_DECL: case TYPE_DECL: case FIELD_DECL: case LABEL_DECL: pp_c_space_for_pointer_operator (pp, TREE_TYPE (t)); pp_c_tree_decl_identifier (pp, t); break; case ARRAY_TYPE: case POINTER_TYPE: pp_abstract_declarator (pp, TREE_TYPE (t)); break; case FUNCTION_TYPE: pp_parameter_list (pp, t); pp_abstract_declarator (pp, TREE_TYPE (t)); break; case FUNCTION_DECL: pp_c_space_for_pointer_operator (pp, TREE_TYPE (TREE_TYPE (t))); pp_c_tree_decl_identifier (pp, t); if (pp_c_base (pp)->flags & pp_c_flag_abstract) pp_abstract_declarator (pp, TREE_TYPE (t)); else { pp_parameter_list (pp, t); pp_abstract_declarator (pp, TREE_TYPE (TREE_TYPE (t))); } break; case INTEGER_TYPE: case REAL_TYPE: case ENUMERAL_TYPE: case UNION_TYPE: case RECORD_TYPE: break; default: pp_unsupported_tree (pp, t); break; } } /* declarator: pointer(opt) direct-declarator */ void pp_c_declarator (c_pretty_printer *pp, tree t) { switch (TREE_CODE (t)) { case INTEGER_TYPE: case REAL_TYPE: case ENUMERAL_TYPE: case UNION_TYPE: case RECORD_TYPE: break; case VAR_DECL: case PARM_DECL: case FIELD_DECL: case ARRAY_TYPE: case FUNCTION_TYPE: case FUNCTION_DECL: case TYPE_DECL: pp_direct_declarator (pp, t); break; default: pp_unsupported_tree (pp, t); break; } } /* declaration: declaration-specifiers init-declarator-list(opt) ; */ void pp_c_declaration (c_pretty_printer *pp, tree t) { pp_declaration_specifiers (pp, t); pp_c_init_declarator (pp, t); } /* Pretty-print ATTRIBUTES using GNU C extension syntax. */ void pp_c_attributes (c_pretty_printer *pp, tree attributes) { if (attributes == NULL_TREE) return; pp_c_identifier (pp, "__attribute__"); pp_c_left_paren (pp); pp_c_left_paren (pp); for (; attributes != NULL_TREE; attributes = TREE_CHAIN (attributes)) { pp_tree_identifier (pp, TREE_PURPOSE (attributes)); if (TREE_VALUE (attributes)) pp_c_call_argument_list (pp, TREE_VALUE (attributes)); if (TREE_CHAIN (attributes)) pp_separate_with (pp, ','); } pp_c_right_paren (pp); pp_c_right_paren (pp); } /* function-definition: declaration-specifiers declarator compound-statement */ void pp_c_function_definition (c_pretty_printer *pp, tree t) { pp_declaration_specifiers (pp, t); pp_declarator (pp, t); pp_needs_newline (pp) = true; pp_statement (pp, DECL_SAVED_TREE (t)); pp_newline (pp); pp_flush (pp); } /* Expressions. */ /* Print out a c-char. */ static void pp_c_char (c_pretty_printer *pp, int c) { switch (c) { case TARGET_NEWLINE: pp_string (pp, "\\n"); break; case TARGET_TAB: pp_string (pp, "\\t"); break; case TARGET_VT: pp_string (pp, "\\v"); break; case TARGET_BS: pp_string (pp, "\\b"); break; case TARGET_CR: pp_string (pp, "\\r"); break; case TARGET_FF: pp_string (pp, "\\f"); break; case TARGET_BELL: pp_string (pp, "\\a"); break; case '\\': pp_string (pp, "\\\\"); break; case '\'': pp_string (pp, "\\'"); break; case '\"': pp_string (pp, "\\\""); break; default: if (ISPRINT (c)) pp_character (pp, c); else pp_scalar (pp, "\\%03o", (unsigned) c); break; } } /* Print out a STRING literal. */ void pp_c_string_literal (c_pretty_printer *pp, tree s) { const char *p = TREE_STRING_POINTER (s); int n = TREE_STRING_LENGTH (s) - 1; int i; pp_doublequote (pp); for (i = 0; i < n; ++i) pp_c_char (pp, p[i]); pp_doublequote (pp); } /* Pretty-print an INTEGER literal. */ static void pp_c_integer_constant (c_pretty_printer *pp, tree i) { tree type = TREE_TYPE (i); if (TREE_INT_CST_HIGH (i) == 0) pp_wide_integer (pp, TREE_INT_CST_LOW (i)); else { if (tree_int_cst_sgn (i) < 0) { pp_c_char (pp, '-'); i = build_int_2 (-TREE_INT_CST_LOW (i), ~TREE_INT_CST_HIGH (i) + !TREE_INT_CST_LOW (i)); } sprintf (pp_buffer (pp)->digit_buffer, HOST_WIDE_INT_PRINT_DOUBLE_HEX, TREE_INT_CST_HIGH (i), TREE_INT_CST_LOW (i)); pp_string (pp, pp_buffer (pp)->digit_buffer); } if (TYPE_UNSIGNED (type)) pp_character (pp, 'u'); if (type == long_integer_type_node || type == long_unsigned_type_node) pp_character (pp, 'l'); else if (type == long_long_integer_type_node || type == long_long_unsigned_type_node) pp_string (pp, "ll"); } /* Print out a CHARACTER literal. */ static void pp_c_character_constant (c_pretty_printer *pp, tree c) { tree type = TREE_TYPE (c); if (type == wchar_type_node) pp_character (pp, 'L'); pp_quote (pp); if (host_integerp (c, TYPE_UNSIGNED (type))) pp_c_char (pp, tree_low_cst (c, TYPE_UNSIGNED (type))); else pp_scalar (pp, "\\x%x", (unsigned) TREE_INT_CST_LOW (c)); pp_quote (pp); } /* Print out a BOOLEAN literal. */ static void pp_c_bool_constant (c_pretty_printer *pp, tree b) { if (b == boolean_false_node) { if (c_dialect_cxx ()) pp_c_identifier (pp, "false"); else if (flag_isoc99) pp_c_identifier (pp, "_False"); else pp_unsupported_tree (pp, b); } else if (b == boolean_true_node) { if (c_dialect_cxx ()) pp_c_identifier (pp, "true"); else if (flag_isoc99) pp_c_identifier (pp, "_True"); else pp_unsupported_tree (pp, b); } else if (TREE_CODE (b) == INTEGER_CST) pp_c_integer_constant (pp, b); else pp_unsupported_tree (pp, b); } /* Attempt to print out an ENUMERATOR. Return true on success. Else return false; that means the value was obtained by a cast, in which case print out the type-id part of the cast-expression -- the casted value is then printed by pp_c_integer_literal. */ static bool pp_c_enumeration_constant (c_pretty_printer *pp, tree e) { bool value_is_named = true; tree type = TREE_TYPE (e); tree value; /* Find the name of this constant. */ for (value = TYPE_VALUES (type); value != NULL_TREE && !tree_int_cst_equal (TREE_VALUE (value), e); value = TREE_CHAIN (value)) ; if (value != NULL_TREE) pp_id_expression (pp, TREE_PURPOSE (value)); else { /* Value must have been cast. */ pp_c_type_cast (pp, type); value_is_named = false; } return value_is_named; } /* Print out a REAL value as a decimal-floating-constant. */ static void pp_c_floating_constant (c_pretty_printer *pp, tree r) { real_to_decimal (pp_buffer (pp)->digit_buffer, &TREE_REAL_CST (r), sizeof (pp_buffer (pp)->digit_buffer), 0, 1); pp_string (pp, pp_buffer(pp)->digit_buffer); if (TREE_TYPE (r) == float_type_node) pp_character (pp, 'f'); else if (TREE_TYPE (r) == long_double_type_node) pp_character (pp, 'l'); } /* Pretty-print a compound literal expression. GNU extensions include vector constants. */ static void pp_c_compound_literal (c_pretty_printer *pp, tree e) { tree type = TREE_TYPE (e); pp_c_type_cast (pp, type); switch (TREE_CODE (type)) { case RECORD_TYPE: case UNION_TYPE: case ARRAY_TYPE: case VECTOR_TYPE: case COMPLEX_TYPE: pp_c_brace_enclosed_initializer_list (pp, e); break; default: pp_unsupported_tree (pp, e); break; } } /* constant: integer-constant floating-constant enumeration-constant character-constant */ void pp_c_constant (c_pretty_printer *pp, tree e) { const enum tree_code code = TREE_CODE (e); switch (code) { case INTEGER_CST: { tree type = TREE_TYPE (e); if (type == boolean_type_node) pp_c_bool_constant (pp, e); else if (type == char_type_node) pp_c_character_constant (pp, e); else if (TREE_CODE (type) == ENUMERAL_TYPE && pp_c_enumeration_constant (pp, e)) ; else pp_c_integer_constant (pp, e); } break; case REAL_CST: pp_c_floating_constant (pp, e); break; case STRING_CST: pp_c_string_literal (pp, e); break; default: pp_unsupported_tree (pp, e); break; } } /* Pretty-print an IDENTIFIER_NODE, preceded by whitespace is necessary. */ void pp_c_identifier (c_pretty_printer *pp, const char *id) { pp_c_maybe_whitespace (pp); pp_identifier (pp, id); pp_base (pp)->padding = pp_before; } /* Pretty-print a C primary-expression. primary-expression: identifier constant string-literal ( expression ) */ void pp_c_primary_expression (c_pretty_printer *pp, tree e) { switch (TREE_CODE (e)) { case VAR_DECL: case PARM_DECL: case FIELD_DECL: case CONST_DECL: case FUNCTION_DECL: case LABEL_DECL: pp_c_tree_decl_identifier (pp, e); break; case IDENTIFIER_NODE: pp_c_tree_identifier (pp, e); break; case ERROR_MARK: pp_c_identifier (pp, ""); break; case RESULT_DECL: pp_c_identifier (pp, ""); break; case INTEGER_CST: case REAL_CST: case STRING_CST: pp_c_constant (pp, e); break; case TARGET_EXPR: pp_c_identifier (pp, "__builtin_memcpy"); pp_c_left_paren (pp); pp_ampersand (pp); pp_primary_expression (pp, TREE_OPERAND (e, 0)); pp_separate_with (pp, ','); pp_ampersand (pp); pp_initializer (pp, TREE_OPERAND (e, 1)); if (TREE_OPERAND (e, 2)) { pp_separate_with (pp, ','); pp_c_expression (pp, TREE_OPERAND (e, 2)); } pp_c_right_paren (pp); break; case STMT_EXPR: pp_c_left_paren (pp); pp_statement (pp, STMT_EXPR_STMT (e)); pp_c_right_paren (pp); break; default: /* FIXME: Make sure we won't get into an infinie loop. */ pp_c_left_paren (pp); pp_expression (pp, e); pp_c_right_paren (pp); break; } } /* Print out a C initializer -- also support C compound-literals. initializer: assignment-expression: { initializer-list } { initializer-list , } */ static void pp_c_initializer (c_pretty_printer *pp, tree e) { if (TREE_CODE (e) == CONSTRUCTOR) pp_c_brace_enclosed_initializer_list (pp, e); else pp_expression (pp, e); } /* init-declarator: declarator: declarator = initializer */ void pp_c_init_declarator (c_pretty_printer *pp, tree t) { pp_declarator (pp, t); /* We don't want to output function definitions here. There are handled elsewhere (and the syntactic form is bogus anyway). */ if (TREE_CODE (t) != FUNCTION_DECL && DECL_INITIAL (t)) { tree init = DECL_INITIAL (t); /* This C++ bit is handled here because it is easier to do so. In templates, the C++ parser builds a TREE_LIST for a direct-initialization; the TREE_PURPOSE is the variable to initialize and the TREE_VALUE is the initializer. */ if (TREE_CODE (init) == TREE_LIST) { pp_c_left_paren (pp); pp_expression (pp, TREE_VALUE (init)); pp_right_paren (pp); } else { pp_space (pp); pp_equal (pp); pp_space (pp); pp_c_initializer (pp, init); } } } /* initializer-list: designation(opt) initializer initializer-list , designation(opt) initializer designation: designator-list = designator-list: designator designator-list designator designator: [ constant-expression ] identifier */ static void pp_c_initializer_list (c_pretty_printer *pp, tree e) { tree type = TREE_TYPE (e); const enum tree_code code = TREE_CODE (type); switch (code) { case RECORD_TYPE: case UNION_TYPE: case ARRAY_TYPE: { tree init = TREE_OPERAND (e, 0); for (; init != NULL_TREE; init = TREE_CHAIN (init)) { if (code == RECORD_TYPE || code == UNION_TYPE) { pp_c_dot (pp); pp_c_primary_expression (pp, TREE_PURPOSE (init)); } else { pp_c_left_bracket (pp); if (TREE_PURPOSE (init)) pp_c_constant (pp, TREE_PURPOSE (init)); pp_c_right_bracket (pp); } pp_c_whitespace (pp); pp_equal (pp); pp_c_whitespace (pp); pp_initializer (pp, TREE_VALUE (init)); if (TREE_CHAIN (init)) pp_separate_with (pp, ','); } } return; case VECTOR_TYPE: if (TREE_CODE (e) == VECTOR_CST) pp_c_expression_list (pp, TREE_VECTOR_CST_ELTS (e)); else if (TREE_CODE (e) == CONSTRUCTOR) pp_c_expression_list (pp, CONSTRUCTOR_ELTS (e)); else break; return; case COMPLEX_TYPE: if (TREE_CODE (e) == CONSTRUCTOR) pp_c_expression_list (pp, CONSTRUCTOR_ELTS (e)); else if (TREE_CODE (e) == COMPLEX_CST || TREE_CODE (e) == COMPLEX_EXPR) { const bool cst = TREE_CODE (e) == COMPLEX_CST; pp_expression (pp, cst ? TREE_REALPART (e) : TREE_OPERAND (e, 0)); pp_separate_with (pp, ','); pp_expression (pp, cst ? TREE_IMAGPART (e) : TREE_OPERAND (e, 1)); } else break; return; default: break; } pp_unsupported_tree (pp, type); } /* Pretty-print a brace-enclosed initializer-list. */ static void pp_c_brace_enclosed_initializer_list (c_pretty_printer *pp, tree l) { pp_c_left_brace (pp); pp_c_initializer_list (pp, l); pp_c_right_brace (pp); } /* This is a convenient function, used to bridge gap between C and C++ grammars. id-expression: identifier */ void pp_c_id_expression (c_pretty_printer *pp, tree t) { switch (TREE_CODE (t)) { case VAR_DECL: case PARM_DECL: case CONST_DECL: case TYPE_DECL: case FUNCTION_DECL: case FIELD_DECL: case LABEL_DECL: pp_c_tree_decl_identifier (pp, t); break; case IDENTIFIER_NODE: pp_c_tree_identifier (pp, t); break; default: pp_unsupported_tree (pp, t); break; } } /* postfix-expression: primary-expression postfix-expression [ expression ] postfix-expression ( argument-expression-list(opt) ) postfix-expression . identifier postfix-expression -> identifier postfix-expression ++ postfix-expression -- ( type-name ) { initializer-list } ( type-name ) { initializer-list , } */ void pp_c_postfix_expression (c_pretty_printer *pp, tree e) { enum tree_code code = TREE_CODE (e); switch (code) { case POSTINCREMENT_EXPR: case POSTDECREMENT_EXPR: pp_postfix_expression (pp, TREE_OPERAND (e, 0)); pp_identifier (pp, code == POSTINCREMENT_EXPR ? "++" : "--"); break; case ARROW_EXPR: pp_postfix_expression (pp, TREE_OPERAND (e, 0)); pp_c_arrow (pp); break; case ARRAY_REF: pp_postfix_expression (pp, TREE_OPERAND (e, 0)); pp_c_left_bracket (pp); pp_expression (pp, TREE_OPERAND (e, 1)); pp_c_right_bracket (pp); break; case CALL_EXPR: pp_postfix_expression (pp, TREE_OPERAND (e, 0)); pp_c_call_argument_list (pp, TREE_OPERAND (e, 1)); break; case UNORDERED_EXPR: pp_c_identifier (pp, flag_isoc99 ? "isunordered" : "__builtin_isunordered"); goto two_args_fun; case ORDERED_EXPR: pp_c_identifier (pp, flag_isoc99 ? "!isunordered" : "!__builtin_isunordered"); goto two_args_fun; case UNLT_EXPR: pp_c_identifier (pp, flag_isoc99 ? "!isgreaterequal" : "!__builtin_isgreaterequal"); goto two_args_fun; case UNLE_EXPR: pp_c_identifier (pp, flag_isoc99 ? "!isgreater" : "!__builtin_isgreater"); goto two_args_fun; case UNGT_EXPR: pp_c_identifier (pp, flag_isoc99 ? "!islessequal" : "!__builtin_islessequal"); goto two_args_fun; case UNGE_EXPR: pp_c_identifier (pp, flag_isoc99 ? "!isless" : "!__builtin_isless"); goto two_args_fun; case UNEQ_EXPR: pp_c_identifier (pp, flag_isoc99 ? "!islessgreater" : "!__builtin_islessgreater"); goto two_args_fun; case LTGT_EXPR: pp_c_identifier (pp, flag_isoc99 ? "islessgreater" : "__builtin_islessgreater"); goto two_args_fun; two_args_fun: pp_c_left_paren (pp); pp_expression (pp, TREE_OPERAND (e, 0)); pp_separate_with (pp, ','); pp_expression (pp, TREE_OPERAND (e, 1)); pp_c_right_paren (pp); break; case ABS_EXPR: pp_c_identifier (pp, "__builtin_abs"); pp_c_left_paren (pp); pp_expression (pp, TREE_OPERAND (e, 0)); pp_c_right_paren (pp); break; case COMPONENT_REF: { tree object = TREE_OPERAND (e, 0); if (TREE_CODE (object) == INDIRECT_REF) { pp_postfix_expression (pp, TREE_OPERAND (object, 0)); pp_c_arrow (pp); } else { pp_postfix_expression (pp, object); pp_c_dot (pp); } pp_expression (pp, TREE_OPERAND (e, 1)); } break; case COMPLEX_CST: case VECTOR_CST: case COMPLEX_EXPR: pp_c_compound_literal (pp, e); break; case COMPOUND_LITERAL_EXPR: e = DECL_INITIAL (COMPOUND_LITERAL_EXPR_DECL (e)); /* Fall through. */ case CONSTRUCTOR: pp_initializer (pp, e); break; case VA_ARG_EXPR: pp_c_identifier (pp, "__builtin_va_arg"); pp_c_left_paren (pp); pp_assignment_expression (pp, TREE_OPERAND (e, 0)); pp_separate_with (pp, ','); pp_type_id (pp, TREE_TYPE (e)); pp_c_right_paren (pp); break; case ADDR_EXPR: if (TREE_CODE (TREE_OPERAND (e, 0)) == FUNCTION_DECL) { pp_c_id_expression (pp, TREE_OPERAND (e, 0)); break; } /* else fall through. */ default: pp_primary_expression (pp, e); break; } } /* Print out an expression-list; E is expected to be a TREE_LIST. */ void pp_c_expression_list (c_pretty_printer *pp, tree e) { for (; e != NULL_TREE; e = TREE_CHAIN (e)) { pp_expression (pp, TREE_VALUE (e)); if (TREE_CHAIN (e)) pp_separate_with (pp, ','); } } /* Print out an expression-list in parens, as in a function call. */ void pp_c_call_argument_list (c_pretty_printer *pp, tree t) { pp_c_left_paren (pp); if (t && TREE_CODE (t) == TREE_LIST) pp_c_expression_list (pp, t); pp_c_right_paren (pp); } /* unary-expression: postfix-expression ++ cast-expression -- cast-expression unary-operator cast-expression sizeof unary-expression sizeof ( type-id ) unary-operator: one of * & + - ! ~ GNU extensions. unary-expression: __alignof__ unary-expression __alignof__ ( type-id ) __real__ unary-expression __imag__ unary-expression */ void pp_c_unary_expression (c_pretty_printer *pp, tree e) { enum tree_code code = TREE_CODE (e); switch (code) { case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: pp_identifier (pp, code == PREINCREMENT_EXPR ? "++" : "--"); pp_c_unary_expression (pp, TREE_OPERAND (e, 0)); break; case ADDR_EXPR: case INDIRECT_REF: case NEGATE_EXPR: case BIT_NOT_EXPR: case TRUTH_NOT_EXPR: case CONJ_EXPR: /* String literal are used by address. */ if (code == ADDR_EXPR && TREE_CODE (TREE_OPERAND (e, 0)) != STRING_CST) pp_ampersand (pp); else if (code == INDIRECT_REF) pp_c_star (pp); else if (code == NEGATE_EXPR) pp_minus (pp); else if (code == BIT_NOT_EXPR || code == CONJ_EXPR) pp_complement (pp); else if (code == TRUTH_NOT_EXPR) pp_exclamation (pp); pp_c_cast_expression (pp, TREE_OPERAND (e, 0)); break; case SIZEOF_EXPR: case ALIGNOF_EXPR: pp_c_identifier (pp, code == SIZEOF_EXPR ? "sizeof" : "__alignof__"); pp_c_whitespace (pp); if (TYPE_P (TREE_OPERAND (e, 0))) pp_c_type_cast (pp, TREE_OPERAND (e, 0)); else pp_unary_expression (pp, TREE_OPERAND (e, 0)); break; case REALPART_EXPR: case IMAGPART_EXPR: pp_c_identifier (pp, code == REALPART_EXPR ? "__real__" : "__imag__"); pp_c_whitespace (pp); pp_unary_expression (pp, TREE_OPERAND (e, 0)); break; default: pp_postfix_expression (pp, e); break; } } /* cast-expression: unary-expression ( type-name ) cast-expression */ void pp_c_cast_expression (c_pretty_printer *pp, tree e) { switch (TREE_CODE (e)) { case FLOAT_EXPR: case FIX_TRUNC_EXPR: case CONVERT_EXPR: pp_c_type_cast (pp, TREE_TYPE (e)); pp_c_cast_expression (pp, TREE_OPERAND (e, 0)); break; default: pp_unary_expression (pp, e); } } /* multiplicative-expression: cast-expression multiplicative-expression * cast-expression multiplicative-expression / cast-expression multiplicative-expression % cast-expression */ static void pp_c_multiplicative_expression (c_pretty_printer *pp, tree e) { enum tree_code code = TREE_CODE (e); switch (code) { case MULT_EXPR: case TRUNC_DIV_EXPR: case TRUNC_MOD_EXPR: pp_multiplicative_expression (pp, TREE_OPERAND (e, 0)); pp_c_whitespace (pp); if (code == MULT_EXPR) pp_c_star (pp); else if (code == TRUNC_DIV_EXPR) pp_slash (pp); else pp_modulo (pp); pp_c_whitespace (pp); pp_c_cast_expression (pp, TREE_OPERAND (e, 1)); break; default: pp_c_cast_expression (pp, e); break; } } /* additive-expression: multiplicative-expression additive-expression + multiplicative-expression additive-expression - multiplicative-expression */ static void pp_c_additive_expression (c_pretty_printer *pp, tree e) { enum tree_code code = TREE_CODE (e); switch (code) { case PLUS_EXPR: case MINUS_EXPR: pp_c_additive_expression (pp, TREE_OPERAND (e, 0)); pp_c_whitespace (pp); if (code == PLUS_EXPR) pp_plus (pp); else pp_minus (pp); pp_c_whitespace (pp); pp_multiplicative_expression (pp, TREE_OPERAND (e, 1)); break; default: pp_multiplicative_expression (pp, e); break; } } /* additive-expression: additive-expression shift-expression << additive-expression shift-expression >> additive-expression */ static void pp_c_shift_expression (c_pretty_printer *pp, tree e) { enum tree_code code = TREE_CODE (e); switch (code) { case LSHIFT_EXPR: case RSHIFT_EXPR: pp_c_shift_expression (pp, TREE_OPERAND (e, 0)); pp_c_whitespace (pp); pp_identifier (pp, code == LSHIFT_EXPR ? "<<" : ">>"); pp_c_whitespace (pp); pp_c_additive_expression (pp, TREE_OPERAND (e, 1)); break; default: pp_c_additive_expression (pp, e); } } /* relational-expression: shift-expression relational-expression < shift-expression relational-expression > shift-expression relational-expression <= shift-expression relational-expression >= shift-expression */ static void pp_c_relational_expression (c_pretty_printer *pp, tree e) { enum tree_code code = TREE_CODE (e); switch (code) { case LT_EXPR: case GT_EXPR: case LE_EXPR: case GE_EXPR: pp_c_relational_expression (pp, TREE_OPERAND (e, 0)); pp_c_whitespace (pp); if (code == LT_EXPR) pp_less (pp); else if (code == GT_EXPR) pp_greater (pp); else if (code == LE_EXPR) pp_identifier (pp, "<="); else if (code == GE_EXPR) pp_identifier (pp, ">="); pp_c_whitespace (pp); pp_c_shift_expression (pp, TREE_OPERAND (e, 1)); break; default: pp_c_shift_expression (pp, e); break; } } /* equality-expression: relational-expression equality-expression == relational-expression equality-equality != relational-expression */ static void pp_c_equality_expression (c_pretty_printer *pp, tree e) { enum tree_code code = TREE_CODE (e); switch (code) { case EQ_EXPR: case NE_EXPR: pp_c_equality_expression (pp, TREE_OPERAND (e, 0)); pp_c_whitespace (pp); pp_identifier (pp, code == EQ_EXPR ? "==" : "!="); pp_c_whitespace (pp); pp_c_relational_expression (pp, TREE_OPERAND (e, 1)); break; default: pp_c_relational_expression (pp, e); break; } } /* AND-expression: equality-expression AND-expression & equality-equality */ static void pp_c_and_expression (c_pretty_printer *pp, tree e) { if (TREE_CODE (e) == BIT_AND_EXPR) { pp_c_and_expression (pp, TREE_OPERAND (e, 0)); pp_c_whitespace (pp); pp_ampersand (pp); pp_c_whitespace (pp); pp_c_equality_expression (pp, TREE_OPERAND (e, 1)); } else pp_c_equality_expression (pp, e); } /* exclusive-OR-expression: AND-expression exclusive-OR-expression ^ AND-expression */ static void pp_c_exclusive_or_expression (c_pretty_printer *pp, tree e) { if (TREE_CODE (e) == BIT_XOR_EXPR) { pp_c_exclusive_or_expression (pp, TREE_OPERAND (e, 0)); pp_c_maybe_whitespace (pp); pp_carret (pp); pp_c_whitespace (pp); pp_c_and_expression (pp, TREE_OPERAND (e, 1)); } else pp_c_and_expression (pp, e); } /* inclusive-OR-expression: exclusive-OR-expression inclusive-OR-expression | exclusive-OR-expression */ static void pp_c_inclusive_or_expression (c_pretty_printer *pp, tree e) { if (TREE_CODE (e) == BIT_IOR_EXPR) { pp_c_exclusive_or_expression (pp, TREE_OPERAND (e, 0)); pp_c_whitespace (pp); pp_bar (pp); pp_c_whitespace (pp); pp_c_exclusive_or_expression (pp, TREE_OPERAND (e, 1)); } else pp_c_exclusive_or_expression (pp, e); } /* logical-AND-expression: inclusive-OR-expression logical-AND-expression && inclusive-OR-expression */ static void pp_c_logical_and_expression (c_pretty_printer *pp, tree e) { if (TREE_CODE (e) == TRUTH_ANDIF_EXPR) { pp_c_logical_and_expression (pp, TREE_OPERAND (e, 0)); pp_c_whitespace (pp); pp_identifier (pp, "&&"); pp_c_whitespace (pp); pp_c_inclusive_or_expression (pp, TREE_OPERAND (e, 1)); } else pp_c_inclusive_or_expression (pp, e); } /* logical-OR-expression: logical-AND-expression logical-OR-expression || logical-AND-expression */ void pp_c_logical_or_expression (c_pretty_printer *pp, tree e) { if (TREE_CODE (e) == TRUTH_ORIF_EXPR) { pp_c_logical_or_expression (pp, TREE_OPERAND (e, 0)); pp_c_whitespace (pp); pp_identifier (pp, "||"); pp_c_whitespace (pp); pp_c_logical_and_expression (pp, TREE_OPERAND (e, 1)); } else pp_c_logical_and_expression (pp, e); } /* conditional-expression: logical-OR-expression logical-OR-expression ? expression : conditional-expression */ static void pp_c_conditional_expression (c_pretty_printer *pp, tree e) { if (TREE_CODE (e) == COND_EXPR) { pp_c_logical_or_expression (pp, TREE_OPERAND (e, 0)); pp_c_whitespace (pp); pp_question (pp); pp_c_whitespace (pp); pp_expression (pp, TREE_OPERAND (e, 1)); pp_c_whitespace (pp); pp_colon (pp); pp_c_whitespace (pp); pp_c_conditional_expression (pp, TREE_OPERAND (e, 2)); } else pp_c_logical_or_expression (pp, e); } /* assignment-expression: conditional-expression unary-expression assignment-operator assignment-expression assignment-expression: one of = *= /= %= += -= >>= <<= &= ^= |= */ static void pp_c_assignment_expression (c_pretty_printer *pp, tree e) { if (TREE_CODE (e) == MODIFY_EXPR || TREE_CODE (e) == INIT_EXPR) { pp_c_unary_expression (pp, TREE_OPERAND (e, 0)); pp_c_whitespace (pp); pp_equal (pp); pp_space (pp); pp_c_expression (pp, TREE_OPERAND (e, 1)); } else pp_c_conditional_expression (pp, e); } /* expression: assignment-expression expression , assignment-expression Implementation note: instead of going through the usual recursion chain, I take the liberty of dispatching nodes to the appropriate functions. This makes some redundancy, but it worths it. That also prevents a possible infinite recursion between pp_c_primary_expression () and pp_c_expression (). */ void pp_c_expression (c_pretty_printer *pp, tree e) { switch (TREE_CODE (e)) { case INTEGER_CST: pp_c_integer_constant (pp, e); break; case REAL_CST: pp_c_floating_constant (pp, e); break; case STRING_CST: pp_c_string_literal (pp, e); break; case IDENTIFIER_NODE: case FUNCTION_DECL: case VAR_DECL: case CONST_DECL: case PARM_DECL: case RESULT_DECL: case FIELD_DECL: case LABEL_DECL: case ERROR_MARK: case STMT_EXPR: pp_primary_expression (pp, e); break; case POSTINCREMENT_EXPR: case POSTDECREMENT_EXPR: case ARROW_EXPR: case ARRAY_REF: case CALL_EXPR: case COMPONENT_REF: case COMPLEX_CST: case COMPLEX_EXPR: case VECTOR_CST: case ORDERED_EXPR: case UNORDERED_EXPR: case LTGT_EXPR: case UNEQ_EXPR: case UNLE_EXPR: case UNLT_EXPR: case UNGE_EXPR: case UNGT_EXPR: case ABS_EXPR: case CONSTRUCTOR: case COMPOUND_LITERAL_EXPR: case VA_ARG_EXPR: pp_postfix_expression (pp, e); break; case CONJ_EXPR: case ADDR_EXPR: case INDIRECT_REF: case NEGATE_EXPR: case BIT_NOT_EXPR: case TRUTH_NOT_EXPR: case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: case SIZEOF_EXPR: case ALIGNOF_EXPR: case REALPART_EXPR: case IMAGPART_EXPR: pp_c_unary_expression (pp, e); break; case FLOAT_EXPR: case FIX_TRUNC_EXPR: case CONVERT_EXPR: pp_c_cast_expression (pp, e); break; case MULT_EXPR: case TRUNC_MOD_EXPR: case TRUNC_DIV_EXPR: pp_multiplicative_expression (pp, e); break; case LSHIFT_EXPR: case RSHIFT_EXPR: pp_c_shift_expression (pp, e); break; case LT_EXPR: case GT_EXPR: case LE_EXPR: case GE_EXPR: pp_c_relational_expression (pp, e); break; case BIT_AND_EXPR: pp_c_and_expression (pp, e); break; case BIT_XOR_EXPR: pp_c_exclusive_or_expression (pp, e); break; case BIT_IOR_EXPR: pp_c_inclusive_or_expression (pp, e); break; case TRUTH_ANDIF_EXPR: pp_c_logical_and_expression (pp, e); break; case TRUTH_ORIF_EXPR: pp_c_logical_or_expression (pp, e); break; case EQ_EXPR: case NE_EXPR: pp_c_equality_expression (pp, e); break; case COND_EXPR: pp_conditional_expression (pp, e); break; case PLUS_EXPR: case MINUS_EXPR: pp_c_additive_expression (pp, e); break; case MODIFY_EXPR: case INIT_EXPR: pp_assignment_expression (pp, e); break; case COMPOUND_EXPR: pp_c_left_paren (pp); pp_expression (pp, TREE_OPERAND (e, 0)); pp_separate_with (pp, ','); pp_assignment_expression (pp, TREE_OPERAND (e, 1)); pp_c_right_paren (pp); break; case NOP_EXPR: case NON_LVALUE_EXPR: case SAVE_EXPR: case UNSAVE_EXPR: pp_expression (pp, TREE_OPERAND (e, 0)); break; case TARGET_EXPR: pp_postfix_expression (pp, TREE_OPERAND (e, 1)); break; default: pp_unsupported_tree (pp, e); break; } } /* Statements. */ /* statement: labeled-statement compound-statement expression-statement selection-statement iteration-statement jump-statement */ void pp_c_statement (c_pretty_printer *pp, tree stmt) { enum tree_code code; if (stmt == NULL) return; if (pp_needs_newline (pp)) pp_newline_and_indent (pp, 0); code = TREE_CODE (stmt); switch (code) { /* expression-statement: expression(opt) ; */ case EXPR_STMT: pp_expression (pp, EXPR_STMT_EXPR (stmt)); pp_c_semicolon (pp); pp_needs_newline (pp) = true; break; case SWITCH_STMT: pp_c_identifier (pp, "switch"); pp_space (pp); pp_c_left_paren (pp); pp_expression (pp, SWITCH_COND (stmt)); pp_c_right_paren (pp); pp_indentation (pp) += 3; pp_needs_newline (pp) = true; pp_statement (pp, SWITCH_BODY (stmt)); pp_newline_and_indent (pp, -3); break; /* iteration-statement: while ( expression ) statement do statement while ( expression ) ; for ( expression(opt) ; expression(opt) ; expression(opt) ) statement for ( declaration expression(opt) ; expression(opt) ) statement */ case WHILE_STMT: pp_c_identifier (pp, "while"); pp_space (pp); pp_c_left_paren (pp); pp_expression (pp, WHILE_COND (stmt)); pp_c_right_paren (pp); pp_newline_and_indent (pp, 3); pp_statement (pp, WHILE_BODY (stmt)); pp_indentation (pp) -= 3; pp_needs_newline (pp) = true; break; case DO_STMT: pp_c_identifier (pp, "do"); pp_newline_and_indent (pp, 3); pp_statement (pp, DO_BODY (stmt)); pp_newline_and_indent (pp, -3); pp_c_identifier (pp, "while"); pp_space (pp); pp_c_left_paren (pp); pp_expression (pp, DO_COND (stmt)); pp_c_right_paren (pp); pp_c_semicolon (pp); pp_needs_newline (pp) = true; break; case FOR_STMT: pp_c_identifier (pp, "for"); pp_space (pp); pp_c_left_paren (pp); if (FOR_INIT_STMT (stmt)) pp_statement (pp, FOR_INIT_STMT (stmt)); else pp_c_semicolon (pp); pp_needs_newline (pp) = false; pp_c_whitespace (pp); if (FOR_COND (stmt)) pp_expression (pp, FOR_COND (stmt)); pp_c_semicolon (pp); pp_needs_newline (pp) = false; pp_c_whitespace (pp); if (FOR_EXPR (stmt)) pp_expression (pp, FOR_EXPR (stmt)); pp_c_right_paren (pp); pp_newline_and_indent (pp, 3); pp_statement (pp, FOR_BODY (stmt)); pp_indentation (pp) -= 3; pp_needs_newline (pp) = true; break; /* jump-statement: goto identifier; continue ; return expression(opt) ; */ case BREAK_STMT: case CONTINUE_STMT: pp_identifier (pp, code == BREAK_STMT ? "break" : "continue"); pp_c_semicolon (pp); pp_needs_newline (pp) = true; break; default: dump_generic_node (pp_base (pp), stmt, pp_indentation (pp), 0, true); break; } } /* Initialize the PRETTY-PRINTER for handling C codes. */ void pp_c_pretty_printer_init (c_pretty_printer *pp) { pp->offset_list = 0; pp->declaration = pp_c_declaration; pp->declaration_specifiers = pp_c_declaration_specifiers; pp->declarator = pp_c_declarator; pp->direct_declarator = pp_c_direct_declarator; pp->type_specifier_seq = pp_c_specifier_qualifier_list; pp->abstract_declarator = pp_c_abstract_declarator; pp->direct_abstract_declarator = pp_c_direct_abstract_declarator; pp->ptr_operator = pp_c_pointer; pp->parameter_list = pp_c_parameter_type_list; pp->type_id = pp_c_type_id; pp->simple_type_specifier = pp_c_type_specifier; pp->function_specifier = pp_c_function_specifier; pp->storage_class_specifier = pp_c_storage_class_specifier; pp->statement = pp_c_statement; pp->id_expression = pp_c_id_expression; pp->primary_expression = pp_c_primary_expression; pp->postfix_expression = pp_c_postfix_expression; pp->unary_expression = pp_c_unary_expression; pp->initializer = pp_c_initializer; pp->multiplicative_expression = pp_c_multiplicative_expression; pp->conditional_expression = pp_c_conditional_expression; pp->assignment_expression = pp_c_assignment_expression; pp->expression = pp_c_expression; } /* Print the tree T in full, on file FILE. */ void print_c_tree (FILE *file, tree t) { static c_pretty_printer pp_rec; static bool initialized = 0; c_pretty_printer *pp = &pp_rec; if (!initialized) { initialized = 1; pp_construct (pp_base (pp), NULL, 0); pp_c_pretty_printer_init (pp); pp_needs_newline (pp) = true; } pp_base (pp)->buffer->stream = file; pp_statement (pp, t); pp_newline (pp); pp_flush (pp); } /* Print the tree T in full, on stderr. */ void debug_c_tree (tree t) { print_c_tree (stderr, t); fputc ('\n', stderr); } /* Output the DECL_NAME of T. If T has no DECL_NAME, output a string made up of T's memory address. */ void pp_c_tree_decl_identifier (c_pretty_printer *pp, tree t) { const char *name; if (!DECL_P (t)) abort (); if (DECL_NAME (t)) name = IDENTIFIER_POINTER (DECL_NAME (t)); else { static char xname[8]; sprintf (xname, "", ((unsigned)((unsigned long)(t) & 0xffff))); name = xname; } pp_c_identifier (pp, name); } /* main.c: defines main() for cc1, cc1plus, etc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ int main (int argc, char **argv); /* We define main() to call toplev_main(), which is defined in toplev.c. We do this in a separate file in order to allow the language front-end to define a different main(), if it so desires. */ int main (int argc, char **argv) { return toplev_main (argc, (const char **) argv); } /* A Bison parser, made from c-parse.y by GNU bison 1.35. */ #define YYBISON 1 /* Identify Bison output. */ #undef LABEL # define IDENTIFIER 257 # define TYPENAME 258 # define SCSPEC 259 # define STATIC 260 # define TYPESPEC 261 # define TYPE_QUAL 262 # define CONSTANT 263 # define STRING 264 # define ELLIPSIS 265 # define SIZEOF 266 # define ENUM 267 # define STRUCT 268 # define UNION 269 # define IF 270 # define ELSE 271 # define WHILE 272 # define DO 273 # define FOR 274 # define SWITCH 275 # define CASE 276 # define DEFAULT 277 # define BREAK 278 # define CONTINUE 279 # define RETURN 280 # define GOTO 281 # define ASM_KEYWORD 282 # define TYPEOF 283 # define ALIGNOF 284 # define ATTRIBUTE 285 # define EXTENSION 286 # define LABEL 287 # define REALPART 288 # define IMAGPART 289 # define VA_ARG 290 # define CHOOSE_EXPR 291 # define TYPES_COMPATIBLE_P 292 # define PTR_VALUE 293 # define PTR_BASE 294 # define PTR_EXTENT 295 # define FUNC_NAME 296 # define OFFSETOF 297 # define ASSIGN 298 # define OROR 299 # define ANDAND 300 # define EQCOMPARE 301 # define ARITHCOMPARE 302 # define LSHIFT 303 # define RSHIFT 304 # define UNARY 305 # define PLUSPLUS 306 # define MINUSMINUS 307 # define HYPERUNARY 308 # define POINTSAT 309 # define AT_INTERFACE 310 # define AT_IMPLEMENTATION 311 # define AT_END 312 # define AT_SELECTOR 313 # define AT_DEFS 314 # define AT_ENCODE 315 # define CLASSNAME 316 # define AT_PUBLIC 317 # define AT_PRIVATE 318 # define AT_PROTECTED 319 # define AT_PROTOCOL 320 # define OBJECTNAME 321 # define AT_CLASS 322 # define AT_ALIAS 323 # define AT_THROW 324 # define AT_TRY 325 # define AT_CATCH 326 # define AT_FINALLY 327 # define AT_SYNCHRONIZED 328 # define OBJC_STRING 329 #line 34 "c-parse.y" /* Like YYERROR but do call yyerror. */ #define YYERROR1 { yyerror ("syntax error"); YYERROR; } /* Like the default stack expander, except (1) use realloc when possible, (2) impose no hard maxiumum on stack size, (3) REALLY do not use alloca. Irritatingly, YYSTYPE is defined after this %{ %} block, so we cannot give malloced_yyvs its proper type. This is ok since all we need from it is to be able to free it. */ static short *malloced_yyss; static void *malloced_yyvs; #define yyoverflow(MSG, SS, SSSIZE, VS, VSSIZE, YYSSZ) \ do { \ size_t newsize; \ short *newss; \ YYSTYPE *newvs; \ newsize = *(YYSSZ) *= 2; \ if (malloced_yyss) \ { \ newss = really_call_realloc (*(SS), newsize * sizeof (short)); \ newvs = really_call_realloc (*(VS), newsize * sizeof (YYSTYPE)); \ } \ else \ { \ newss = really_call_malloc (newsize * sizeof (short)); \ newvs = really_call_malloc (newsize * sizeof (YYSTYPE)); \ if (newss) \ memcpy (newss, *(SS), (SSSIZE)); \ if (newvs) \ memcpy (newvs, *(VS), (VSSIZE)); \ } \ if (!newss || !newvs) \ { \ yyerror (MSG); \ return 2; \ } \ *(SS) = newss; \ *(VS) = newvs; \ malloced_yyss = newss; \ malloced_yyvs = (void *) newvs; \ } while (0) #line 101 "c-parse.y" #ifndef YYSTYPE typedef union {long itype; tree ttype; enum tree_code code; location_t location; } yystype; # define YYSTYPE yystype # define YYSTYPE_IS_TRIVIAL 1 #endif #line 239 "c-parse.y" /* List of types and structure classes of the current declaration. */ static GTY(()) tree current_declspecs; static GTY(()) tree prefix_attributes; /* List of all the attributes applying to the identifier currently being declared; includes prefix_attributes and possibly some more attributes just after a comma. */ static GTY(()) tree all_prefix_attributes; /* Stack of saved values of current_declspecs, prefix_attributes and all_prefix_attributes. */ static GTY(()) tree declspec_stack; /* PUSH_DECLSPEC_STACK is called from setspecs; POP_DECLSPEC_STACK should be called from the productions making use of setspecs. */ #define PUSH_DECLSPEC_STACK \ do { \ declspec_stack = tree_cons (build_tree_list (prefix_attributes, \ all_prefix_attributes), \ current_declspecs, \ declspec_stack); \ } while (0) #define POP_DECLSPEC_STACK \ do { \ current_declspecs = TREE_VALUE (declspec_stack); \ prefix_attributes = TREE_PURPOSE (TREE_PURPOSE (declspec_stack)); \ all_prefix_attributes = TREE_VALUE (TREE_PURPOSE (declspec_stack)); \ declspec_stack = TREE_CHAIN (declspec_stack); \ } while (0) /* For __extension__, save/restore the warning flags which are controlled by __extension__. */ #define SAVE_EXT_FLAGS() \ (pedantic \ | (warn_pointer_arith << 1) \ | (warn_traditional << 2) \ | (flag_iso << 3)) #define RESTORE_EXT_FLAGS(val) \ do { \ pedantic = val & 1; \ warn_pointer_arith = (val >> 1) & 1; \ warn_traditional = (val >> 2) & 1; \ flag_iso = (val >> 3) & 1; \ } while (0) #define OBJC_NEED_RAW_IDENTIFIER(VAL) /* nothing */ /* Tell yyparse how to print a token's value, if yydebug is set. */ #define YYPRINT(FILE,YYCHAR,YYLVAL) yyprint(FILE,YYCHAR,YYLVAL) static void yyprint (FILE *, int, YYSTYPE); static void yyerror (const char *); static int yylexname (void); static inline int _yylex (void); static int yylex (void); static void init_reswords (void); /* Initialisation routine for this file. */ void c_parse_init (void) { init_reswords (); } #ifndef YYDEBUG # define YYDEBUG 0 #endif #define YYFINAL 925 #define YYFLAG -32768 #define YYNTBASE 98 /* YYTRANSLATE(YYLEX) -- Bison token number corresponding to YYLEX. */ #define YYTRANSLATE(x) ((unsigned)(x) <= 329 ? yytranslate[x] : 302) /* YYTRANSLATE[YYLEX] -- Bison token number corresponding to YYLEX. */ static const char yytranslate[] = { 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 93, 2, 2, 2, 61, 52, 2, 68, 95, 59, 57, 94, 58, 67, 60, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 47, 90, 2, 45, 2, 46, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 69, 2, 97, 51, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 96, 50, 91, 92, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 48, 49, 53, 54, 55, 56, 62, 63, 64, 65, 66, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89 }; #if YYDEBUG static const short yyprhs[] = { 0, 0, 1, 3, 4, 7, 8, 12, 14, 16, 18, 21, 25, 30, 35, 38, 41, 44, 46, 47, 48, 57, 62, 63, 64, 73, 78, 79, 80, 88, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 113, 115, 117, 121, 123, 126, 129, 132, 135, 138, 143, 146, 151, 154, 157, 159, 161, 163, 165, 170, 172, 176, 180, 184, 188, 192, 196, 200, 204, 208, 212, 216, 220, 221, 226, 227, 232, 233, 234, 242, 243, 249, 253, 257, 259, 261, 263, 265, 266, 274, 278, 282, 286, 290, 295, 302, 309, 314, 323, 328, 335, 340, 345, 349, 353, 356, 359, 361, 365, 370, 371, 373, 376, 378, 380, 383, 386, 391, 396, 399, 402, 405, 406, 408, 413, 418, 422, 426, 429, 432, 434, 437, 440, 443, 446, 449, 451, 454, 456, 459, 462, 465, 468, 471, 474, 476, 479, 482, 485, 488, 491, 494, 497, 500, 503, 506, 509, 512, 515, 518, 521, 524, 526, 529, 532, 535, 538, 541, 544, 547, 550, 553, 556, 559, 562, 565, 568, 571, 574, 577, 580, 583, 586, 589, 592, 595, 598, 601, 604, 607, 610, 613, 616, 619, 622, 625, 628, 631, 634, 637, 640, 643, 646, 649, 652, 655, 658, 660, 662, 664, 666, 668, 670, 672, 674, 676, 678, 680, 682, 684, 686, 688, 690, 692, 694, 696, 698, 700, 702, 704, 706, 708, 710, 712, 714, 716, 718, 720, 722, 724, 726, 728, 730, 732, 734, 736, 738, 740, 742, 744, 746, 748, 750, 752, 754, 756, 758, 760, 762, 764, 766, 768, 770, 771, 773, 775, 777, 779, 781, 783, 785, 787, 792, 797, 799, 804, 806, 811, 812, 819, 823, 824, 831, 835, 836, 838, 840, 843, 852, 856, 858, 862, 863, 865, 870, 877, 882, 884, 886, 888, 890, 892, 894, 896, 897, 902, 904, 905, 908, 910, 914, 918, 921, 922, 927, 929, 930, 935, 937, 939, 941, 944, 947, 953, 957, 958, 959, 966, 967, 968, 975, 977, 979, 984, 988, 991, 995, 997, 999, 1001, 1005, 1008, 1010, 1014, 1017, 1021, 1025, 1030, 1034, 1039, 1043, 1046, 1048, 1050, 1053, 1055, 1058, 1060, 1063, 1064, 1072, 1078, 1079, 1087, 1093, 1094, 1103, 1104, 1112, 1115, 1118, 1121, 1122, 1124, 1125, 1127, 1129, 1132, 1133, 1137, 1140, 1144, 1147, 1151, 1153, 1155, 1158, 1160, 1165, 1167, 1172, 1175, 1180, 1184, 1187, 1192, 1196, 1198, 1202, 1204, 1206, 1210, 1211, 1215, 1216, 1218, 1219, 1221, 1224, 1226, 1228, 1230, 1234, 1237, 1241, 1246, 1250, 1253, 1256, 1258, 1263, 1267, 1272, 1278, 1284, 1286, 1288, 1290, 1292, 1294, 1297, 1300, 1303, 1306, 1308, 1311, 1314, 1317, 1319, 1322, 1325, 1328, 1331, 1333, 1336, 1338, 1340, 1342, 1344, 1347, 1348, 1349, 1351, 1353, 1356, 1360, 1362, 1365, 1367, 1369, 1373, 1375, 1377, 1380, 1383, 1384, 1385, 1388, 1392, 1395, 1398, 1401, 1405, 1409, 1411, 1421, 1431, 1439, 1447, 1448, 1449, 1459, 1460, 1461, 1475, 1476, 1478, 1481, 1483, 1486, 1488, 1501, 1502, 1511, 1514, 1516, 1518, 1520, 1522, 1524, 1527, 1530, 1533, 1537, 1539, 1543, 1548, 1550, 1552, 1554, 1558, 1564, 1567, 1572, 1579, 1580, 1582, 1585, 1590, 1599, 1601, 1605, 1611, 1619, 1620, 1622, 1623, 1625, 1627, 1631, 1638, 1648, 1650, 1654, 1655, 1656, 1657, 1661, 1664, 1665, 1666, 1673, 1676, 1677, 1679, 1681, 1685, 1687, 1691, 1696, 1701, 1705, 1710, 1714, 1719, 1724, 1728, 1733, 1737, 1739, 1740, 1744, 1746, 1749, 1751, 1755, 1757, 1761 }; static const short yyrhs[] = { -1, 99, 0, 0, 100, 102, 0, 0, 99, 101, 102, 0, 104, 0, 103, 0, 276, 0, 301, 102, 0, 135, 169, 90, 0, 155, 135, 169, 90, 0, 154, 135, 168, 90, 0, 161, 90, 0, 1, 90, 0, 1, 91, 0, 90, 0, 0, 0, 154, 135, 197, 105, 130, 249, 106, 243, 0, 154, 135, 197, 1, 0, 0, 0, 155, 135, 202, 107, 130, 249, 108, 243, 0, 155, 135, 202, 1, 0, 0, 0, 135, 202, 109, 130, 249, 110, 243, 0, 135, 202, 1, 0, 3, 0, 4, 0, 52, 0, 58, 0, 57, 0, 63, 0, 64, 0, 92, 0, 93, 0, 115, 0, 0, 115, 0, 121, 0, 115, 94, 121, 0, 127, 0, 59, 120, 0, 301, 120, 0, 112, 120, 0, 49, 111, 0, 117, 116, 0, 117, 68, 223, 95, 0, 118, 116, 0, 118, 68, 223, 95, 0, 34, 120, 0, 35, 120, 0, 12, 0, 30, 0, 29, 0, 116, 0, 68, 223, 95, 120, 0, 120, 0, 121, 57, 121, 0, 121, 58, 121, 0, 121, 59, 121, 0, 121, 60, 121, 0, 121, 61, 121, 0, 121, 55, 121, 0, 121, 56, 121, 0, 121, 54, 121, 0, 121, 53, 121, 0, 121, 52, 121, 0, 121, 50, 121, 0, 121, 51, 121, 0, 0, 121, 49, 122, 121, 0, 0, 121, 48, 123, 121, 0, 0, 0, 121, 46, 124, 113, 47, 125, 121, 0, 0, 121, 46, 126, 47, 121, 0, 121, 45, 121, 0, 121, 44, 121, 0, 3, 0, 9, 0, 10, 0, 42, 0, 0, 68, 223, 95, 96, 128, 183, 91, 0, 68, 113, 95, 0, 68, 1, 95, 0, 247, 245, 95, 0, 247, 1, 95, 0, 127, 68, 114, 95, 0, 36, 68, 121, 94, 223, 95, 0, 43, 68, 223, 94, 129, 95, 0, 43, 68, 1, 95, 0, 37, 68, 121, 94, 121, 94, 121, 95, 0, 37, 68, 1, 95, 0, 38, 68, 223, 94, 223, 95, 0, 38, 68, 1, 95, 0, 127, 69, 113, 97, 0, 127, 67, 111, 0, 127, 66, 111, 0, 127, 63, 0, 127, 64, 0, 111, 0, 129, 67, 111, 0, 129, 69, 113, 97, 0, 0, 132, 0, 249, 133, 0, 131, 0, 238, 0, 132, 131, 0, 131, 238, 0, 156, 135, 168, 90, 0, 157, 135, 169, 90, 0, 156, 90, 0, 157, 90, 0, 249, 137, 0, 0, 174, 0, 154, 135, 168, 90, 0, 155, 135, 169, 90, 0, 154, 135, 191, 0, 155, 135, 194, 0, 161, 90, 0, 301, 137, 0, 8, 0, 138, 8, 0, 139, 8, 0, 138, 175, 0, 140, 8, 0, 141, 8, 0, 175, 0, 140, 175, 0, 163, 0, 142, 8, 0, 143, 8, 0, 142, 165, 0, 143, 165, 0, 138, 163, 0, 139, 163, 0, 164, 0, 142, 175, 0, 142, 166, 0, 143, 166, 0, 138, 164, 0, 139, 164, 0, 144, 8, 0, 145, 8, 0, 144, 165, 0, 145, 165, 0, 140, 163, 0, 141, 163, 0, 144, 175, 0, 144, 166, 0, 145, 166, 0, 140, 164, 0, 141, 164, 0, 180, 0, 146, 8, 0, 147, 8, 0, 138, 180, 0, 139, 180, 0, 146, 180, 0, 147, 180, 0, 146, 175, 0, 148, 8, 0, 149, 8, 0, 140, 180, 0, 141, 180, 0, 148, 180, 0, 149, 180, 0, 148, 175, 0, 150, 8, 0, 151, 8, 0, 150, 165, 0, 151, 165, 0, 146, 163, 0, 147, 163, 0, 142, 180, 0, 143, 180, 0, 150, 180, 0, 151, 180, 0, 150, 175, 0, 150, 166, 0, 151, 166, 0, 146, 164, 0, 147, 164, 0, 152, 8, 0, 153, 8, 0, 152, 165, 0, 153, 165, 0, 148, 163, 0, 149, 163, 0, 144, 180, 0, 145, 180, 0, 152, 180, 0, 153, 180, 0, 152, 175, 0, 152, 166, 0, 153, 166, 0, 148, 164, 0, 149, 164, 0, 142, 0, 143, 0, 144, 0, 145, 0, 150, 0, 151, 0, 152, 0, 153, 0, 138, 0, 139, 0, 140, 0, 141, 0, 146, 0, 147, 0, 148, 0, 149, 0, 142, 0, 143, 0, 150, 0, 151, 0, 138, 0, 139, 0, 146, 0, 147, 0, 142, 0, 143, 0, 144, 0, 145, 0, 138, 0, 139, 0, 140, 0, 141, 0, 142, 0, 143, 0, 144, 0, 145, 0, 138, 0, 139, 0, 140, 0, 141, 0, 138, 0, 139, 0, 140, 0, 141, 0, 142, 0, 143, 0, 144, 0, 145, 0, 146, 0, 147, 0, 148, 0, 149, 0, 150, 0, 151, 0, 152, 0, 153, 0, 0, 159, 0, 165, 0, 167, 0, 166, 0, 7, 0, 211, 0, 206, 0, 4, 0, 119, 68, 113, 95, 0, 119, 68, 223, 95, 0, 170, 0, 168, 94, 136, 170, 0, 172, 0, 169, 94, 136, 172, 0, 0, 197, 275, 174, 45, 171, 181, 0, 197, 275, 174, 0, 0, 202, 275, 174, 45, 173, 181, 0, 202, 275, 174, 0, 0, 175, 0, 176, 0, 175, 176, 0, 31, 284, 68, 68, 177, 95, 95, 285, 0, 31, 1, 285, 0, 178, 0, 177, 94, 178, 0, 0, 179, 0, 179, 68, 3, 95, 0, 179, 68, 3, 94, 115, 95, 0, 179, 68, 114, 95, 0, 111, 0, 180, 0, 7, 0, 8, 0, 6, 0, 5, 0, 121, 0, 0, 96, 182, 183, 91, 0, 1, 0, 0, 184, 212, 0, 185, 0, 184, 94, 185, 0, 189, 45, 187, 0, 190, 187, 0, 0, 111, 47, 186, 187, 0, 187, 0, 0, 96, 188, 183, 91, 0, 121, 0, 1, 0, 190, 0, 189, 190, 0, 67, 111, 0, 69, 121, 11, 121, 97, 0, 69, 121, 97, 0, 0, 0, 197, 192, 130, 249, 193, 248, 0, 0, 0, 202, 195, 130, 249, 196, 248, 0, 198, 0, 202, 0, 68, 174, 198, 95, 0, 198, 68, 296, 0, 198, 231, 0, 59, 162, 198, 0, 4, 0, 200, 0, 201, 0, 200, 68, 296, 0, 200, 231, 0, 4, 0, 201, 68, 296, 0, 201, 231, 0, 59, 162, 200, 0, 59, 162, 201, 0, 68, 174, 201, 95, 0, 202, 68, 296, 0, 68, 174, 202, 95, 0, 59, 162, 202, 0, 202, 231, 0, 3, 0, 14, 0, 14, 175, 0, 15, 0, 15, 175, 0, 13, 0, 13, 175, 0, 0, 203, 111, 96, 207, 214, 91, 174, 0, 203, 96, 214, 91, 174, 0, 0, 204, 111, 96, 208, 214, 91, 174, 0, 204, 96, 214, 91, 174, 0, 0, 205, 111, 96, 209, 221, 213, 91, 174, 0, 0, 205, 96, 210, 221, 213, 91, 174, 0, 203, 111, 0, 204, 111, 0, 205, 111, 0, 0, 94, 0, 0, 94, 0, 215, 0, 215, 216, 0, 0, 215, 216, 90, 0, 215, 90, 0, 158, 135, 217, 0, 158, 135, 0, 159, 135, 218, 0, 159, 0, 1, 0, 301, 216, 0, 219, 0, 217, 94, 136, 219, 0, 220, 0, 218, 94, 136, 220, 0, 197, 174, 0, 197, 47, 121, 174, 0, 47, 121, 174, 0, 202, 174, 0, 202, 47, 121, 174, 0, 47, 121, 174, 0, 222, 0, 221, 94, 222, 0, 1, 0, 111, 0, 111, 45, 121, 0, 0, 160, 224, 225, 0, 0, 227, 0, 0, 227, 0, 228, 175, 0, 229, 0, 228, 0, 230, 0, 59, 162, 228, 0, 59, 162, 0, 59, 162, 229, 0, 68, 174, 227, 95, 0, 230, 68, 286, 0, 230, 231, 0, 68, 286, 0, 231, 0, 69, 162, 121, 97, 0, 69, 162, 97, 0, 69, 162, 59, 97, 0, 69, 6, 162, 121, 97, 0, 69, 159, 6, 121, 97, 0, 233, 0, 234, 0, 235, 0, 236, 0, 252, 0, 233, 252, 0, 234, 252, 0, 235, 252, 0, 236, 252, 0, 134, 0, 233, 134, 0, 234, 134, 0, 236, 134, 0, 253, 0, 233, 253, 0, 234, 253, 0, 235, 253, 0, 236, 253, 0, 238, 0, 237, 238, 0, 233, 0, 234, 0, 235, 0, 236, 0, 1, 90, 0, 0, 0, 241, 0, 242, 0, 241, 242, 0, 33, 300, 90, 0, 248, 0, 1, 248, 0, 96, 0, 91, 0, 240, 246, 91, 0, 232, 0, 1, 0, 68, 96, 0, 244, 245, 0, 0, 0, 250, 253, 0, 239, 250, 252, 0, 249, 272, 0, 249, 273, 0, 249, 113, 0, 239, 250, 257, 0, 239, 250, 90, 0, 251, 0, 16, 239, 249, 68, 254, 95, 255, 17, 256, 0, 16, 239, 249, 68, 254, 95, 256, 17, 256, 0, 16, 239, 249, 68, 254, 95, 255, 0, 16, 239, 249, 68, 254, 95, 256, 0, 0, 0, 18, 239, 249, 68, 254, 95, 258, 259, 251, 0, 0, 0, 19, 239, 249, 258, 259, 251, 18, 262, 263, 68, 254, 95, 90, 0, 0, 113, 0, 264, 90, 0, 137, 0, 249, 264, 0, 264, 0, 20, 239, 68, 265, 249, 266, 90, 267, 95, 258, 259, 251, 0, 0, 21, 239, 68, 113, 95, 270, 258, 251, 0, 113, 90, 0, 257, 0, 260, 0, 261, 0, 268, 0, 269, 0, 24, 90, 0, 25, 90, 0, 26, 90, 0, 26, 113, 90, 0, 277, 0, 27, 111, 90, 0, 27, 59, 113, 90, 0, 90, 0, 248, 0, 271, 0, 22, 121, 47, 0, 22, 121, 11, 121, 47, 0, 23, 47, 0, 111, 249, 47, 174, 0, 28, 284, 68, 10, 95, 285, 0, 0, 274, 0, 274, 90, 0, 28, 1, 285, 90, 0, 28, 279, 284, 68, 278, 95, 285, 90, 0, 10, 0, 10, 47, 280, 0, 10, 47, 280, 47, 280, 0, 10, 47, 280, 47, 280, 47, 283, 0, 0, 8, 0, 0, 281, 0, 282, 0, 281, 94, 282, 0, 10, 285, 68, 113, 95, 284, 0, 69, 111, 97, 10, 285, 68, 113, 95, 284, 0, 10, 0, 283, 94, 10, 0, 0, 0, 0, 174, 287, 288, 0, 291, 95, 0, 0, 0, 292, 90, 289, 174, 290, 288, 0, 1, 95, 0, 0, 11, 0, 292, 0, 292, 94, 11, 0, 294, 0, 292, 94, 293, 0, 154, 135, 199, 174, 0, 154, 135, 202, 174, 0, 154, 135, 226, 0, 155, 135, 202, 174, 0, 155, 135, 226, 0, 156, 295, 199, 174, 0, 156, 295, 202, 174, 0, 156, 295, 226, 0, 157, 295, 202, 174, 0, 157, 295, 226, 0, 135, 0, 0, 174, 297, 298, 0, 288, 0, 299, 95, 0, 3, 0, 299, 94, 3, 0, 111, 0, 300, 94, 111, 0, 32, 0 }; #endif #if YYDEBUG /* YYRLINE[YYN] -- source line where rule number YYN was defined. */ static const short yyrline[] = { 0, 311, 315, 322, 322, 324, 324, 327, 329, 330, 331, 335, 343, 345, 347, 349, 350, 351, 356, 356, 356, 368, 370, 370, 370, 381, 383, 383, 383, 394, 398, 400, 403, 405, 407, 412, 414, 416, 418, 422, 426, 429, 432, 435, 439, 441, 444, 447, 451, 453, 459, 462, 465, 468, 470, 474, 478, 482, 486, 488, 492, 494, 496, 498, 500, 502, 504, 506, 508, 510, 512, 514, 516, 518, 518, 525, 525, 532, 532, 532, 542, 542, 552, 559, 570, 577, 578, 579, 581, 581, 594, 599, 601, 606, 610, 612, 615, 617, 619, 631, 633, 643, 645, 647, 649, 654, 656, 666, 669, 671, 675, 677, 683, 688, 690, 691, 692, 699, 702, 704, 707, 715, 724, 734, 739, 742, 744, 746, 748, 750, 806, 810, 813, 818, 824, 828, 833, 837, 842, 846, 849, 852, 855, 858, 861, 866, 870, 873, 876, 879, 882, 887, 891, 894, 897, 900, 903, 908, 912, 915, 918, 921, 926, 930, 933, 936, 942, 948, 954, 962, 968, 972, 975, 981, 987, 993, 1001, 1007, 1011, 1014, 1017, 1020, 1023, 1026, 1032, 1038, 1044, 1052, 1056, 1059, 1062, 1065, 1070, 1074, 1077, 1080, 1083, 1086, 1089, 1095, 1101, 1107, 1115, 1119, 1122, 1125, 1128, 1134, 1136, 1137, 1138, 1139, 1140, 1141, 1142, 1145, 1147, 1148, 1149, 1150, 1151, 1152, 1153, 1156, 1158, 1159, 1160, 1163, 1165, 1166, 1167, 1170, 1172, 1173, 1174, 1177, 1179, 1180, 1181, 1184, 1186, 1187, 1188, 1189, 1190, 1191, 1192, 1195, 1197, 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, 1206, 1207, 1208, 1209, 1210, 1211, 1215, 1218, 1243, 1245, 1248, 1252, 1255, 1258, 1262, 1267, 1273, 1279, 1281, 1284, 1286, 1289, 1289, 1298, 1305, 1305, 1314, 1321, 1324, 1328, 1331, 1335, 1339, 1343, 1346, 1350, 1353, 1355, 1357, 1359, 1366, 1368, 1369, 1370, 1373, 1375, 1380, 1382, 1382, 1386, 1391, 1395, 1398, 1400, 1405, 1409, 1412, 1412, 1418, 1421, 1421, 1426, 1428, 1431, 1433, 1436, 1439, 1443, 1447, 1447, 1447, 1477, 1477, 1477, 1510, 1512, 1517, 1520, 1522, 1524, 1526, 1533, 1535, 1538, 1541, 1543, 1546, 1549, 1551, 1553, 1555, 1562, 1565, 1567, 1569, 1571, 1574, 1577, 1581, 1584, 1588, 1591, 1601, 1601, 1609, 1613, 1613, 1618, 1622, 1622, 1627, 1627, 1634, 1637, 1639, 1647, 1649, 1652, 1654, 1671, 1674, 1679, 1681, 1683, 1688, 1692, 1702, 1705, 1710, 1712, 1717, 1719, 1723, 1725, 1729, 1734, 1738, 1744, 1749, 1753, 1762, 1764, 1769, 1774, 1777, 1781, 1781, 1789, 1792, 1795, 1800, 1804, 1810, 1812, 1815, 1817, 1821, 1824, 1828, 1831, 1833, 1835, 1837, 1843, 1846, 1848, 1850, 1853, 1863, 1865, 1866, 1870, 1873, 1875, 1876, 1877, 1878, 1881, 1883, 1889, 1890, 1893, 1895, 1896, 1897, 1898, 1901, 1903, 1906, 1908, 1909, 1910, 1913, 1917, 1923, 1925, 1930, 1932, 1935, 1949, 1952, 1955, 1958, 1959, 1962, 1964, 1967, 1979, 1987, 1993, 1995, 1999, 2004, 2022, 2027, 2039, 2044, 2049, 2052, 2057, 2061, 2065, 2071, 2075, 2079, 2088, 2088, 2088, 2099, 2102, 2105, 2108, 2112, 2124, 2128, 2138, 2138, 2151, 2154, 2156, 2158, 2160, 2162, 2164, 2166, 2168, 2170, 2172, 2173, 2175, 2177, 2182, 2185, 2192, 2194, 2196, 2198, 2214, 2221, 2224, 2228, 2231, 2237, 2243, 2248, 2251, 2254, 2260, 2263, 2276, 2278, 2281, 2283, 2287, 2290, 2297, 2300, 2304, 2308, 2318, 2318, 2327, 2329, 2329, 2329, 2336, 2342, 2344, 2350, 2352, 2356, 2359, 2365, 2371, 2376, 2379, 2385, 2392, 2398, 2403, 2406, 2412, 2417, 2426, 2426, 2435, 2437, 2454, 2457, 2462, 2465, 2469 }; #endif #if (YYDEBUG) || defined YYERROR_VERBOSE /* YYTNAME[TOKEN_NUM] -- String name of the token TOKEN_NUM. */ static const char *const yytname[] = { "$", "error", "$undefined.", "IDENTIFIER", "TYPENAME", "SCSPEC", "STATIC", "TYPESPEC", "TYPE_QUAL", "CONSTANT", "STRING", "ELLIPSIS", "SIZEOF", "ENUM", "STRUCT", "UNION", "IF", "ELSE", "WHILE", "DO", "FOR", "SWITCH", "CASE", "DEFAULT", "BREAK", "CONTINUE", "RETURN", "GOTO", "ASM_KEYWORD", "TYPEOF", "ALIGNOF", "ATTRIBUTE", "EXTENSION", "LABEL", "REALPART", "IMAGPART", "VA_ARG", "CHOOSE_EXPR", "TYPES_COMPATIBLE_P", "PTR_VALUE", "PTR_BASE", "PTR_EXTENT", "FUNC_NAME", "OFFSETOF", "ASSIGN", "'='", "'?'", "':'", "OROR", "ANDAND", "'|'", "'^'", "'&'", "EQCOMPARE", "ARITHCOMPARE", "LSHIFT", "RSHIFT", "'+'", "'-'", "'*'", "'/'", "'%'", "UNARY", "PLUSPLUS", "MINUSMINUS", "HYPERUNARY", "POINTSAT", "'.'", "'('", "'['", "AT_INTERFACE", "AT_IMPLEMENTATION", "AT_END", "AT_SELECTOR", "AT_DEFS", "AT_ENCODE", "CLASSNAME", "AT_PUBLIC", "AT_PRIVATE", "AT_PROTECTED", "AT_PROTOCOL", "OBJECTNAME", "AT_CLASS", "AT_ALIAS", "AT_THROW", "AT_TRY", "AT_CATCH", "AT_FINALLY", "AT_SYNCHRONIZED", "OBJC_STRING", "';'", "'}'", "'~'", "'!'", "','", "')'", "'{'", "']'", "program", "extdefs", "@1", "@2", "extdef", "datadef", "fndef", "@3", "@4", "@5", "@6", "@7", "@8", "identifier", "unop", "expr", "exprlist", "nonnull_exprlist", "unary_expr", "sizeof", "alignof", "typeof", "cast_expr", "expr_no_commas", "@9", "@10", "@11", "@12", "@13", "primary", "@14", "offsetof_member_designator", "old_style_parm_decls", "lineno_datadecl", "datadecls", "datadecl", "lineno_decl", "setspecs", "maybe_resetattrs", "decl", "declspecs_nosc_nots_nosa_noea", "declspecs_nosc_nots_nosa_ea", "declspecs_nosc_nots_sa_noea", "declspecs_nosc_nots_sa_ea", "declspecs_nosc_ts_nosa_noea", "declspecs_nosc_ts_nosa_ea", "declspecs_nosc_ts_sa_noea", "declspecs_nosc_ts_sa_ea", "declspecs_sc_nots_nosa_noea", "declspecs_sc_nots_nosa_ea", "declspecs_sc_nots_sa_noea", "declspecs_sc_nots_sa_ea", "declspecs_sc_ts_nosa_noea", "declspecs_sc_ts_nosa_ea", "declspecs_sc_ts_sa_noea", "declspecs_sc_ts_sa_ea", "declspecs_ts", "declspecs_nots", "declspecs_ts_nosa", "declspecs_nots_nosa", "declspecs_nosc_ts", "declspecs_nosc_nots", "declspecs_nosc", "declspecs", "maybe_type_quals_attrs", "typespec_nonattr", "typespec_attr", "typespec_reserved_nonattr", "typespec_reserved_attr", "typespec_nonreserved_nonattr", "initdecls", "notype_initdecls", "initdcl", "@15", "notype_initdcl", "@16", "maybe_attribute", "attributes", "attribute", "attribute_list", "attrib", "any_word", "scspec", "init", "@17", "initlist_maybe_comma", "initlist1", "initelt", "@18", "initval", "@19", "designator_list", "designator", "nested_function", "@20", "@21", "notype_nested_function", "@22", "@23", "declarator", "after_type_declarator", "parm_declarator", "parm_declarator_starttypename", "parm_declarator_nostarttypename", "notype_declarator", "struct_head", "union_head", "enum_head", "structsp_attr", "@24", "@25", "@26", "@27", "structsp_nonattr", "maybecomma", "maybecomma_warn", "component_decl_list", "component_decl_list2", "component_decl", "components", "components_notype", "component_declarator", "component_notype_declarator", "enumlist", "enumerator", "typename", "@28", "absdcl", "absdcl_maybe_attribute", "absdcl1", "absdcl1_noea", "absdcl1_ea", "direct_absdcl1", "array_declarator", "stmts_and_decls", "lineno_stmt_decl_or_labels_ending_stmt", "lineno_stmt_decl_or_labels_ending_decl", "lineno_stmt_decl_or_labels_ending_label", "lineno_stmt_decl_or_labels_ending_error", "lineno_stmt_decl_or_labels", "errstmt", "c99_block_start", "maybe_label_decls", "label_decls", "label_decl", "compstmt_or_error", "compstmt_start", "compstmt_nostart", "compstmt_contents_nonempty", "compstmt_primary_start", "compstmt", "save_location", "lineno_labels", "c99_block_lineno_labeled_stmt", "lineno_stmt", "lineno_label", "condition", "if_statement_1", "if_statement_2", "if_statement", "start_break", "start_continue", "while_statement", "do_statement", "@29", "@30", "xexpr", "for_init_stmt", "for_cond_expr", "for_incr_expr", "for_statement", "switch_statement", "@31", "stmt_nocomp", "stmt", "label", "simple_asm_expr", "maybeasm", "asmdef", "asm_stmt", "asm_argument", "maybe_volatile", "asm_operands", "nonnull_asm_operands", "asm_operand", "asm_clobbers", "stop_string_translation", "start_string_translation", "parmlist", "@32", "parmlist_1", "@33", "@34", "parmlist_2", "parms", "parm", "firstparm", "setspecs_fp", "parmlist_or_identifiers", "@35", "parmlist_or_identifiers_1", "identifiers", "identifiers_or_typenames", "extension", 0 }; #endif /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */ static const short yyr1[] = { 0, 98, 98, 100, 99, 101, 99, 102, 102, 102, 102, 103, 103, 103, 103, 103, 103, 103, 105, 106, 104, 104, 107, 108, 104, 104, 109, 110, 104, 104, 111, 111, 112, 112, 112, 112, 112, 112, 112, 113, 114, 114, 115, 115, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 117, 118, 119, 120, 120, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 122, 121, 123, 121, 124, 125, 121, 126, 121, 121, 121, 127, 127, 127, 127, 128, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 129, 129, 129, 130, 130, 131, 132, 132, 132, 132, 133, 133, 133, 133, 134, 135, 136, 137, 137, 137, 137, 137, 137, 138, 138, 138, 139, 140, 140, 141, 141, 142, 142, 142, 142, 142, 142, 142, 143, 143, 143, 143, 143, 143, 144, 144, 144, 144, 144, 144, 145, 145, 145, 145, 145, 146, 146, 146, 146, 146, 146, 146, 147, 148, 148, 148, 148, 148, 148, 149, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 151, 151, 151, 151, 151, 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, 153, 153, 153, 153, 153, 154, 154, 154, 154, 154, 154, 154, 154, 155, 155, 155, 155, 155, 155, 155, 155, 156, 156, 156, 156, 157, 157, 157, 157, 158, 158, 158, 158, 159, 159, 159, 159, 160, 160, 160, 160, 160, 160, 160, 160, 161, 161, 161, 161, 161, 161, 161, 161, 161, 161, 161, 161, 161, 161, 161, 161, 162, 162, 163, 163, 164, 165, 165, 166, 167, 167, 167, 168, 168, 169, 169, 171, 170, 170, 173, 172, 172, 174, 174, 175, 175, 176, 176, 177, 177, 178, 178, 178, 178, 178, 179, 179, 179, 179, 180, 180, 181, 182, 181, 181, 183, 183, 184, 184, 185, 185, 186, 185, 185, 188, 187, 187, 187, 189, 189, 190, 190, 190, 192, 193, 191, 195, 196, 194, 197, 197, 198, 198, 198, 198, 198, 199, 199, 200, 200, 200, 201, 201, 201, 201, 201, 202, 202, 202, 202, 202, 203, 203, 204, 204, 205, 205, 207, 206, 206, 208, 206, 206, 209, 206, 210, 206, 211, 211, 211, 212, 212, 213, 213, 214, 214, 215, 215, 215, 216, 216, 216, 216, 216, 216, 217, 217, 218, 218, 219, 219, 219, 220, 220, 220, 221, 221, 221, 222, 222, 224, 223, 225, 225, 226, 226, 226, 227, 227, 228, 228, 229, 229, 230, 230, 230, 230, 230, 231, 231, 231, 231, 231, 232, 232, 232, 232, 233, 233, 233, 233, 233, 234, 234, 234, 234, 235, 235, 235, 235, 235, 236, 236, 237, 237, 237, 237, 238, 239, 240, 240, 241, 241, 242, 243, 243, 244, 245, 245, 246, 246, 247, 248, 249, 250, 250, 251, 252, 253, 254, 255, 256, 256, 257, 257, 257, 257, 258, 259, 260, 262, 263, 261, 264, 264, 265, 265, 266, 267, 268, 270, 269, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 272, 272, 273, 273, 273, 273, 274, 275, 275, 276, 276, 277, 278, 278, 278, 278, 279, 279, 280, 280, 281, 281, 282, 282, 283, 283, 284, 285, 287, 286, 288, 289, 290, 288, 288, 291, 291, 291, 291, 292, 292, 293, 293, 293, 293, 293, 294, 294, 294, 294, 294, 295, 297, 296, 298, 298, 299, 299, 300, 300, 301 }; /* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */ static const short yyr2[] = { 0, 0, 1, 0, 2, 0, 3, 1, 1, 1, 2, 3, 4, 4, 2, 2, 2, 1, 0, 0, 8, 4, 0, 0, 8, 4, 0, 0, 7, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 3, 1, 2, 2, 2, 2, 2, 4, 2, 4, 2, 2, 1, 1, 1, 1, 4, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 4, 0, 4, 0, 0, 7, 0, 5, 3, 3, 1, 1, 1, 1, 0, 7, 3, 3, 3, 3, 4, 6, 6, 4, 8, 4, 6, 4, 4, 3, 3, 2, 2, 1, 3, 4, 0, 1, 2, 1, 1, 2, 2, 4, 4, 2, 2, 2, 0, 1, 4, 4, 3, 3, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 4, 4, 1, 4, 1, 4, 0, 6, 3, 0, 6, 3, 0, 1, 1, 2, 8, 3, 1, 3, 0, 1, 4, 6, 4, 1, 1, 1, 1, 1, 1, 1, 0, 4, 1, 0, 2, 1, 3, 3, 2, 0, 4, 1, 0, 4, 1, 1, 1, 2, 2, 5, 3, 0, 0, 6, 0, 0, 6, 1, 1, 4, 3, 2, 3, 1, 1, 1, 3, 2, 1, 3, 2, 3, 3, 4, 3, 4, 3, 2, 1, 1, 2, 1, 2, 1, 2, 0, 7, 5, 0, 7, 5, 0, 8, 0, 7, 2, 2, 2, 0, 1, 0, 1, 1, 2, 0, 3, 2, 3, 2, 3, 1, 1, 2, 1, 4, 1, 4, 2, 4, 3, 2, 4, 3, 1, 3, 1, 1, 3, 0, 3, 0, 1, 0, 1, 2, 1, 1, 1, 3, 2, 3, 4, 3, 2, 2, 1, 4, 3, 4, 5, 5, 1, 1, 1, 1, 1, 2, 2, 2, 2, 1, 2, 2, 2, 1, 2, 2, 2, 2, 1, 2, 1, 1, 1, 1, 2, 0, 0, 1, 1, 2, 3, 1, 2, 1, 1, 3, 1, 1, 2, 2, 0, 0, 2, 3, 2, 2, 2, 3, 3, 1, 9, 9, 7, 7, 0, 0, 9, 0, 0, 13, 0, 1, 2, 1, 2, 1, 12, 0, 8, 2, 1, 1, 1, 1, 1, 2, 2, 2, 3, 1, 3, 4, 1, 1, 1, 3, 5, 2, 4, 6, 0, 1, 2, 4, 8, 1, 3, 5, 7, 0, 1, 0, 1, 1, 3, 6, 9, 1, 3, 0, 0, 0, 3, 2, 0, 0, 6, 2, 0, 1, 1, 3, 1, 3, 4, 4, 3, 4, 3, 4, 4, 3, 4, 3, 1, 0, 3, 1, 2, 1, 3, 1, 3, 1 }; /* YYDEFACT[S] -- default rule to reduce with in state S when YYTABLE doesn't specify something else to do. Zero means the default is an error. */ static const short yydefact[] = { 3, 5, 0, 0, 0, 271, 302, 301, 268, 130, 357, 353, 355, 0, 57, 0, 568, 17, 4, 8, 7, 0, 0, 215, 216, 217, 218, 207, 208, 209, 210, 219, 220, 221, 222, 211, 212, 213, 214, 122, 122, 0, 138, 145, 265, 267, 266, 136, 286, 162, 0, 0, 0, 270, 269, 0, 9, 0, 6, 15, 16, 358, 354, 356, 535, 0, 535, 0, 0, 352, 263, 284, 0, 276, 0, 131, 143, 149, 133, 165, 132, 144, 150, 166, 134, 155, 160, 137, 172, 135, 156, 161, 173, 139, 141, 147, 146, 183, 140, 142, 148, 184, 151, 153, 158, 157, 198, 152, 154, 159, 199, 163, 181, 190, 169, 167, 164, 182, 191, 168, 170, 196, 205, 176, 174, 171, 197, 206, 175, 177, 179, 188, 187, 185, 178, 180, 189, 186, 192, 194, 203, 202, 200, 193, 195, 204, 201, 0, 0, 14, 287, 30, 31, 378, 369, 378, 370, 367, 371, 517, 10, 0, 0, 289, 0, 84, 85, 86, 55, 56, 0, 0, 0, 0, 0, 87, 0, 0, 32, 34, 33, 0, 35, 36, 0, 37, 38, 0, 0, 39, 58, 0, 0, 60, 42, 44, 243, 244, 245, 246, 239, 240, 241, 242, 402, 0, 0, 0, 235, 236, 237, 238, 264, 0, 0, 285, 11, 284, 29, 534, 284, 263, 0, 351, 516, 284, 337, 263, 284, 0, 274, 0, 331, 332, 0, 0, 0, 0, 359, 0, 362, 0, 365, 518, 0, 292, 53, 54, 0, 0, 0, 0, 48, 45, 0, 463, 0, 0, 47, 272, 0, 0, 49, 0, 51, 0, 0, 77, 75, 73, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 105, 106, 0, 0, 40, 0, 404, 273, 0, 0, 459, 0, 452, 453, 0, 46, 350, 0, 0, 123, 560, 348, 263, 264, 0, 0, 465, 0, 465, 114, 0, 283, 0, 0, 13, 284, 21, 0, 284, 284, 335, 12, 25, 0, 284, 385, 380, 235, 236, 237, 238, 231, 232, 233, 234, 122, 122, 377, 0, 378, 284, 378, 399, 400, 374, 397, 0, 535, 299, 300, 297, 0, 290, 293, 298, 0, 0, 0, 0, 0, 0, 0, 91, 90, 0, 43, 0, 0, 83, 82, 0, 0, 0, 0, 71, 72, 70, 69, 68, 66, 67, 61, 62, 63, 64, 65, 104, 103, 0, 41, 0, 263, 284, 403, 405, 410, 409, 411, 419, 93, 566, 0, 462, 434, 461, 465, 465, 465, 465, 0, 443, 0, 0, 429, 438, 454, 92, 349, 277, 515, 0, 0, 0, 0, 421, 0, 449, 27, 116, 115, 112, 227, 228, 223, 224, 229, 230, 225, 226, 122, 122, 281, 336, 0, 0, 465, 280, 334, 465, 361, 382, 0, 379, 386, 0, 364, 0, 0, 375, 0, 374, 514, 292, 0, 40, 0, 99, 0, 101, 0, 97, 0, 88, 59, 50, 52, 0, 0, 76, 74, 94, 102, 413, 536, 418, 284, 417, 455, 0, 435, 430, 439, 436, 431, 440, 0, 432, 441, 437, 433, 442, 444, 460, 84, 271, 450, 450, 450, 450, 450, 0, 0, 0, 0, 0, 0, 524, 507, 458, 465, 0, 121, 122, 122, 0, 451, 508, 495, 496, 497, 498, 499, 509, 469, 470, 504, 0, 0, 564, 544, 122, 122, 562, 0, 545, 547, 561, 0, 0, 0, 422, 420, 0, 119, 0, 120, 0, 0, 333, 275, 515, 19, 278, 23, 0, 284, 381, 387, 0, 284, 383, 389, 284, 284, 401, 398, 284, 0, 291, 535, 84, 0, 0, 0, 0, 107, 0, 0, 78, 81, 412, 414, 0, 0, 536, 416, 567, 465, 465, 465, 0, 0, 0, 512, 500, 501, 502, 0, 0, 0, 525, 534, 0, 494, 0, 0, 128, 464, 129, 542, 559, 406, 406, 538, 539, 0, 0, 563, 423, 424, 0, 28, 456, 0, 0, 306, 304, 303, 282, 0, 0, 0, 284, 0, 391, 284, 284, 0, 394, 284, 360, 363, 368, 284, 288, 0, 294, 296, 95, 0, 100, 0, 0, 96, 319, 0, 0, 316, 0, 318, 0, 372, 309, 315, 0, 0, 0, 415, 537, 0, 0, 479, 485, 0, 0, 510, 503, 0, 505, 0, 284, 0, 126, 325, 0, 127, 328, 342, 263, 284, 284, 338, 339, 284, 556, 407, 410, 263, 284, 284, 558, 284, 546, 215, 216, 217, 218, 207, 208, 209, 210, 219, 220, 221, 222, 211, 212, 213, 214, 122, 122, 548, 565, 457, 117, 118, 0, 20, 279, 24, 393, 284, 0, 396, 284, 0, 366, 0, 0, 108, 0, 322, 0, 0, 313, 89, 0, 308, 0, 321, 312, 79, 465, 465, 480, 486, 488, 0, 465, 0, 0, 506, 0, 513, 124, 0, 125, 0, 413, 536, 554, 284, 341, 284, 344, 555, 408, 413, 536, 557, 540, 406, 406, 0, 392, 388, 395, 390, 295, 98, 109, 0, 324, 0, 0, 310, 311, 0, 0, 0, 450, 487, 465, 492, 511, 520, 0, 465, 465, 345, 346, 0, 340, 343, 0, 284, 284, 551, 284, 553, 305, 0, 317, 314, 471, 450, 479, 466, 0, 485, 0, 479, 526, 535, 326, 329, 347, 541, 549, 550, 552, 323, 466, 474, 477, 478, 480, 465, 482, 489, 485, 450, 535, 0, 521, 527, 528, 0, 0, 0, 465, 450, 450, 450, 468, 467, 483, 490, 0, 493, 0, 0, 526, 0, 519, 327, 330, 473, 472, 466, 475, 476, 481, 0, 479, 0, 0, 522, 529, 465, 465, 480, 0, 535, 0, 0, 450, 534, 0, 532, 523, 0, 491, 530, 0, 0, 484, 0, 533, 534, 531, 0, 0, 0 }; static const short yydefgoto[] = { 923, 1, 2, 3, 18, 19, 20, 319, 641, 325, 643, 222, 554, 670, 187, 256, 390, 189, 190, 191, 192, 21, 193, 194, 375, 374, 372, 678, 373, 195, 589, 588, 308, 309, 310, 432, 405, 22, 300, 523, 196, 197, 198, 199, 200, 201, 202, 203, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 542, 543, 337, 212, 204, 41, 213, 42, 43, 44, 45, 46, 229, 72, 230, 642, 73, 559, 301, 215, 48, 353, 354, 355, 49, 640, 738, 672, 673, 674, 806, 675, 755, 676, 677, 694, 777, 870, 697, 779, 871, 562, 232, 702, 703, 704, 233, 50, 51, 52, 53, 341, 343, 348, 241, 54, 759, 461, 236, 237, 339, 568, 572, 569, 573, 346, 347, 205, 288, 395, 706, 707, 397, 398, 399, 223, 406, 407, 408, 409, 410, 411, 311, 839, 293, 294, 295, 633, 527, 296, 413, 206, 634, 312, 859, 855, 876, 877, 810, 856, 857, 529, 766, 812, 530, 531, 878, 895, 769, 770, 842, 880, 532, 533, 843, 534, 535, 536, 224, 225, 56, 537, 818, 613, 866, 867, 868, 912, 65, 161, 486, 595, 544, 713, 826, 545, 546, 733, 547, 623, 303, 422, 548, 549, 403, 207 }; static const short yypact[] = { 111, 113, 1307, 1307, 105,-32768,-32768,-32768,-32768,-32768, 71, 71, 71, 63,-32768, 108,-32768,-32768,-32768,-32768, -32768, 14, 218, 134, 1437, 252, 1556, 401, 926, 1278, 1584, 565, 1573, 1091, 1690, 1598, 1853, 1715, 1865,-32768, -32768, 32,-32768,-32768,-32768,-32768,-32768, 71,-32768,-32768, 82, 93, 103,-32768,-32768, 133,-32768, 1307,-32768,-32768, -32768, 71, 71, 71,-32768, 124,-32768, 167, 2644,-32768, 106, 71, 210,-32768, 1142,-32768,-32768,-32768, 71,-32768, -32768,-32768,-32768,-32768,-32768,-32768,-32768, 71,-32768,-32768, -32768,-32768,-32768,-32768,-32768,-32768, 71,-32768,-32768,-32768, -32768,-32768,-32768,-32768,-32768, 71,-32768,-32768,-32768,-32768, -32768,-32768,-32768,-32768, 71,-32768,-32768,-32768,-32768,-32768, -32768,-32768,-32768, 71,-32768,-32768,-32768,-32768,-32768,-32768, -32768,-32768, 71,-32768,-32768,-32768,-32768,-32768,-32768,-32768, -32768, 71,-32768,-32768,-32768,-32768,-32768, 276, 218,-32768, -32768,-32768,-32768,-32768, 168,-32768, 193,-32768, 216,-32768, -32768, 185, 321,-32768, 271,-32768,-32768,-32768,-32768,-32768, 2772, 2772, 290, 294, 297,-32768, 305, 483,-32768,-32768, -32768, 2772,-32768,-32768, 1618,-32768,-32768, 2772, 280, 287, -32768, 2836, 2900,-32768, 3258, 745, 1825, 775, 2089, 1391, 432, 470, 673, 730,-32768, 291, 1735, 2772, 179, 390, 182, 411,-32768, 218, 218, 71,-32768, 71,-32768,-32768, 71, 87, 1079,-32768,-32768, 71,-32768, 106, 71, 288, -32768, 2108, 445, 477, 307, 1806, 330, 857,-32768, 333, -32768, 520,-32768,-32768, 334, 606,-32768,-32768, 2772, 2512, 996, 1039,-32768,-32768, 360,-32768, 367, 376,-32768,-32768, 2772, 1618,-32768, 1618,-32768, 2772, 2772, 406,-32768,-32768, 2772, 2772, 2772, 2772, 2772, 2772, 2772, 2772, 2772, 2772, 2772, 2772,-32768,-32768, 483, 483, 2772, 2772, 320,-32768, 378, 483,-32768, 1904, 456,-32768, 400,-32768, 477, 22, 218,-32768,-32768,-32768, 106, 516, 2137, 436,-32768, 1111, 49,-32768, 2782, 486, 276, 276,-32768, 71,-32768, 1079, 71, 71,-32768,-32768,-32768, 1079, 71,-32768,-32768, 1825, 775, 2089, 1391, 432, 470, 673, 730,-32768, 462, 444, 2338,-32768, 71,-32768,-32768, 496, 460,-32768, 520,-32768, -32768,-32768,-32768, 469,-32768, 493,-32768, 3142, 455, 3162, 464, 480, 497, 487,-32768,-32768, 2444, 3258, 500, 507, 3258, 3258, 2772, 536, 2772, 2772, 2474, 1789, 909, 602, 1415, 764, 764, 351, 351,-32768,-32768,-32768,-32768,-32768, 510, 287, 524, 106, 71,-32768,-32768,-32768,-32768, 488, -32768,-32768,-32768, 310, 436,-32768,-32768, 59, 79, 114, 115, 629,-32768, 541, 2298,-32768,-32768,-32768,-32768,-32768, -32768, 324, 1501, 2772, 2772, 2203,-32768, 3014,-32768,-32768, -32768,-32768,-32768, 2417, 2812, 762, 1877, 2684, 2846, 1212, 2369, 517, 547,-32768, 445, 225, 276,-32768, 601,-32768, -32768,-32768, 248, 223,-32768,-32768, 556,-32768, 557, 2772, 483, 575, 460,-32768, 606, 573, 2964, 2147,-32768, 2772, -32768, 2147,-32768, 483,-32768,-32768, 574, 574, 625, 2772, 3271, 3283,-32768,-32768, 320, 320,-32768, 71,-32768,-32768, 483,-32768,-32768,-32768,-32768,-32768,-32768, 2377,-32768,-32768, -32768,-32768,-32768,-32768,-32768, 627, 635,-32768,-32768,-32768, -32768,-32768, 2772, 636, 586, 594, 2708, 311, 677,-32768, -32768,-32768, 599,-32768,-32768,-32768, 604, 102,-32768,-32768, -32768,-32768,-32768,-32768,-32768,-32768,-32768,-32768, 2578, 596, -32768,-32768,-32768,-32768,-32768, 603, 328,-32768,-32768, 482, 3042, 3064,-32768,-32768, 76,-32768, 276,-32768, 218, 949, -32768,-32768, 672,-32768,-32768,-32768, 2772, 329, 607,-32768, 2772, 160, 608,-32768, 71, 71, 3258,-32768, 71, 614, -32768,-32768, 491, 612, 616, 3189, 618,-32768, 250, 1238, -32768, 2929,-32768,-32768, 619, 453,-32768,-32768,-32768,-32768, -32768,-32768, 648, 649, 2231,-32768,-32768,-32768,-32768, 630, 2772, 639,-32768,-32768, 687,-32768, 276, 218,-32768,-32768, -32768,-32768,-32768, 228, 64,-32768,-32768, 2243, 733,-32768, -32768,-32768, 645,-32768,-32768, 358, 386,-32768,-32768, 3258, -32768, 76, 949, 76, 3220, 2772,-32768, 71, 3220, 2772, -32768, 71,-32768,-32768,-32768, 71,-32768, 2772,-32768,-32768, -32768, 2772,-32768, 483, 2772,-32768,-32768, 483, 2772,-32768, 700, 3258, 655, 658,-32768,-32768, 327, 1973, 2772,-32768, -32768, 698, 703,-32768, 2578, 2772, 2772,-32768,-32768, 682, -32768, 710, 71, 402,-32768, 537, 442,-32768, 1959,-32768, 106, 71, 71, 519, 522, 186,-32768,-32768, 71, 106, 71, 186,-32768, 71,-32768, 2417, 2812, 2718, 2876, 762, 1877, 2151, 2484, 2684, 2846, 2748, 2910, 1212, 2369, 2212, 2552,-32768,-32768,-32768,-32768,-32768,-32768,-32768, 1238,-32768, -32768,-32768,-32768, 3220, 248,-32768, 3220, 223,-32768, 505, 3114,-32768, 683,-32768, 2992, 1238,-32768,-32768, 1344,-32768, 2042,-32768,-32768, 2929,-32768,-32768,-32768,-32768,-32768, 694, -32768, 691, 3240,-32768, 777,-32768,-32768, 1079,-32768, 1079, 228, 194,-32768, 71,-32768, 71,-32768,-32768, 71, 64, 64,-32768,-32768, 228, 64, 701,-32768,-32768,-32768,-32768, -32768,-32768,-32768, 2772,-32768, 705, 2042,-32768,-32768, 2772, 702, 704,-32768,-32768,-32768,-32768,-32768, 751, 707,-32768, -32768, 519, 522, 268,-32768,-32768, 453, 71, 186,-32768, 186,-32768,-32768, 3092,-32768,-32768,-32768,-32768,-32768,-32768, 785, 2772, 715,-32768, 77,-32768,-32768,-32768,-32768,-32768, -32768,-32768,-32768,-32768,-32768,-32768, 800, 810,-32768,-32768, -32768,-32768, 2772,-32768,-32768, 483, 781, 735,-32768, 744, 645, 645, 36,-32768,-32768,-32768,-32768,-32768,-32768,-32768, 741,-32768, 783, 746, 77, 77,-32768,-32768,-32768,-32768, -32768,-32768,-32768,-32768,-32768, 789,-32768, 2772, 849, 813, -32768, 772,-32768,-32768, 771,-32768, 858, 778,-32768,-32768, 799,-32768, 780, 787,-32768,-32768, 2772, 870,-32768, 786, -32768,-32768,-32768, 882, 883,-32768 }; static const short yypgoto[] = { -32768,-32768,-32768,-32768, 107,-32768,-32768,-32768,-32768,-32768, -32768,-32768,-32768, -47,-32768, -67, 418, -254, 433,-32768, -32768,-32768, -82, 1220,-32768,-32768,-32768,-32768,-32768,-32768, -32768,-32768, -289, 577,-32768,-32768, 132, -5, -297, -496, 6, 9, 51, 101, 11, 13, 24, 37, -293, -271, 258, 263, -268, -266, 264, 272, -386, -385, 588, 589, -32768, -159,-32768, -381, -184, 699, 731, 818, 888,-32768, -472, -124, 458,-32768, 620,-32768, 48, 642, -40,-32768, 438,-32768, 474, 277,-32768, -530,-32768, 163,-32768, -626, -32768,-32768, 246,-32768,-32768,-32768,-32768,-32768,-32768, -141, 314, 142, 156, -146, 26,-32768,-32768,-32768,-32768,-32768, -32768,-32768,-32768,-32768,-32768, 475, -96,-32768, 605,-32768, -32768, 198, 196, 626, 484, -84,-32768,-32768, -575, -263, -409, -423,-32768, 761,-32768,-32768,-32768,-32768,-32768,-32768, -238, -307,-32768,-32768, 657, -335,-32768, 422,-32768,-32768, -342, 664, -620, -739, -248, -224, -734,-32768, -232, 81, -627, -760,-32768,-32768,-32768,-32768, -532,-32768,-32768,-32768, -32768,-32768,-32768,-32768,-32768,-32768, 125, -214,-32768,-32768, -32768,-32768, 70,-32768, 75,-32768, -15, -64, 468,-32768, -577,-32768,-32768,-32768,-32768,-32768,-32768, 413, -311,-32768, -32768,-32768,-32768, 55 }; #define YYLAST 3344 static const short yytable[] = { 67, 188, 163, 154, 156, 158, 231, 150, 23, 23, 449, 24, 24, 27, 27, 28, 28, 320, 680, 437, 446, 150, 150, 150, 234, 396, 29, 29, 524, 525, 447, 811, 391, 526, 147, 148, 450, 306, 150, 30, 30, 438, 620, 314, 439, 415, 440, 150, 74, 712, -111, 762, 507, 25, 25, 412, 150, 57, 57, 239, -445, 593, 305, 23, 64, 150, 24, 69, 27, 416, 28, 430, 528, 840, 150, 592, 208, 632, 338, 209, -446, 29, 68, 150, 635, 151, 152, 864, 246, 247, 220, 221, 150, 304, 30, 9, 151, 152, 875, 253, 257, 150, 15, 26, 26, 258, 151, 152, 25, 66, 58, -1, 57, -2, 9, -447, -448, 419, 15, 214, 423, 210, 149, 709, 881, 297, 889, 55, 55, 437, 252, -534, 710, 221, 808, 291, 894, 15, 5, 6, 7, 8, 75, 908, 693, -111, 865, 10, 11, 12, -425, 438, 524, 525, 439, 528, 440, 526, 26, 492, 495, 498, 501, 14, 160, 15, 361, 363, 907, 914, -426, 211, 520, 503, 235, 150, -534, 368, 153, 369, 835, 338, 55, 493, 496, 499, 502, 75, 768, 155, 84, 15, 162, 292, 345, 59, 60, 69, 352, 157, 599, 600, 601, 602, 603, -427, -428, 649, 795, 484, 15, 858, 391, 15, 708, 708, 863, 15, 829, 831, 392, 69, 594, 159, -247, 805, 69, 208, 220, 221, 209, 69, 699, 208, 872, 164, 209, 388, 389, 298, 299, 731, 732, 329, 402, 456, 330, 458, 333, 849, 334, 69, 226, 700, 220, 221, 5, 6, 7, 8, 84, 335, 701, 221, 238, 10, 11, 12, 302, 903, 570, 901, 210, 313, 336, 243, 315, 70, 210, 69, 226, 14, 70, 15, 475, 463, 71, 700, 331, 240, 735, 71, 340, 321, 221, 566, 701, 221, 524, 525, 216, 345, 437, 526, 217, 478, 739, 227, 741, 861, 208, 567, 242, 209, 151, 152, 228, 663, 433, 664, 560, 434, 211, 435, 438, 436, 421, 439, 211, 440, 879, 244, 452, 453, 723, 227, 785, 221, 332, 245, 298, 299, -249, 253, 228, 665, 329, 522, 320, 330, 744, 333, 219, 334, 747, 210, 724, 593, 248, 727, 15, 728, 249, 848, 335, 250, 593, 521, 448, 302, 610, 592, 760, 251, 451, 259, 645, 336, 316, 393, 592, 260, 317, 584, 708, 708, 289, 586, 394, 221, 457, 331, 220, 221, 667, 340, 668, 323, 80, 208, 489, 217, 209, 749, 490, 211, 6, 7, 8, 93, 279, 280, 281, 345, 10, 11, 12, 352, 626, 89, 23, 326, 627, 24, 342, 27, 587, 28, 433, 349, 522, 434, 15, 435, 636, 436, 556, 558, 29, 8, 93, 332, 485, 598, 210, 10, 11, 12, 736, 609, 521, 30, 317, -80, 539, 364, 297, 5, 6, 7, 8, 9, 365, 15, 541, 25, 10, 11, 12, 538, 611, 366, 824, 401, 825, 695, 737, 8, 98, 571, 217, 320, 14, 10, 11, 12, 151, 152, 819, 291, 820, -251, 776, 696, 211, 418, 317, 79, 83, 88, 92, 97, 101, 106, 110, 115, 119, 124, 128, 133, 137, 142, 146, 321, 221, 26, 780, 656, 594, 616, 617, 344, 424, 151, 152, 789, 428, 594, 887, 888, 854, 443, 778, 437, 454, 596, 217, 622, 622, 491, 494, 459, 500, 689, 23, 220, 221, 24, -543, 27, 468, 28, -384, -384, 460, 438, 487, 221, 439, 470, 440, 466, 29, 464, 465, 219, 891, 891, -515, 5, 6, 7, 8, 111, 471, 30, 628, 629, 10, 11, 12, 473, -515, 479, 421, 657, 658, 783, 221, 25, 785, 221, 472, 538, 14, 476, 15, 752, 691, 260, 800, 433, 477, 567, 434, 482, 435, 555, 436, 151, 152, 6, 7, 350, 351, 646, 751, 767, 771, 650, 753, 483, 652, 653, 262, 264, 654, -515, 444, 445, 307, -515, 504, 715, 822, 823, 716, 557, 719, 26, 720, 892, 893, 698, 47, 47, 564, 574, 575, 705, 711, 721, 61, 62, 63, -255, 274, 275, 276, 277, 278, 279, 280, 281, 722, 78, 578, 87, 581, 96, 474, 105, 590, 114, -30, 123, 606, 132, 717, 141, 8, 102, -31, 605, 607, 612, 10, 11, 12, 615, 23, 621, 742, 24, 618, 27, 745, 28, 625, 47, 219, 647, 651, 748, 15, 655, 208, 659, 29, 209, 47, 660, 47, 662, 679, 208, 684, 685, 209, 356, 688, 30, 76, 81, 85, 90, 793, 794, 718, 690, 112, 117, 121, 126, 692, 25, 734, 8, 107, 538, 775, 520, 836, 10, 11, 12, 757, 756, 150, 781, 782, 210, 758, 787, 77, 82, 86, 91, 790, 791, 210, 792, 113, 118, 122, 127, 764, 6, 7, 8, 93, 765, 773, 571, 767, 10, 11, 12, 774, 5, 802, 869, 8, 80, 813, 26, 815, 817, 10, 11, 12, 796, 832, 15, 798, 767, 834, 837, 844, 838, 882, 211, 845, 860, 14, 862, 298, 299, 282, 283, 211, 284, 285, 286, 287, 298, 299, 873, 883, 828, 830, 277, 278, 279, 280, 281, 47, 874, 884, 885, 904, 302, 433, 302, 886, 434, 896, 435, 78, 436, 87, 910, 96, 898, 105, 94, 99, 103, 108, 919, 78, 897, 87, 130, 135, 139, 144, 902, 327, 905, 906, 5, 889, 47, 8, 9, 909, 916, 911, 47, 10, 11, 12, 913, 917, 850, 851, 918, 852, 47, 920, 921, 924, 925, 583, 725, 14, 431, 15, 16, 726, 729, 47, 47, 915, 76, 81, 85, 90, 730, 441, 442, 580, 47, 561, 47, 922, 79, 83, 97, 101, 115, 119, 133, 137, 95, 100, 104, 109, 740, 420, 807, 761, 131, 136, 140, 145, 77, 82, 86, 91, 6, 7, 8, 98, 827, 821, 579, 356, 10, 11, 12, 797, 799, 577, 455, 47, 328, -376, 619, 637, 417, 165, 890, 899, 597, 624, 414, 166, 167, 900, 168, 273, 274, 275, 276, 277, 278, 279, 280, 281, 78, 429, 87, 462, 96, 0, 105, 0, 169, 0, 16, 47, 170, 171, 172, 173, 174, 0, 0, 0, 175, 176, 322, 0, 0, 0, 360, 177, 0, 5, 178, 0, 8, 9, 0, 179, 180, 181, 10, 11, 12, 182, 183, 0, 0, -252, 184, 94, 99, 103, 108, 0, 0, 0, 14, 0, 15, 76, 81, 85, 90, 0, 0, 0, 47, 0, 0, 0, 0, 362, 185, 186, 5, 0, 638, 8, 9, 0, 400, 0, 0, 10, 11, 12, 0, 47, 0, 0, 0, 77, 82, 86, 91, 0, 0, 0, 0, 14, 0, 15, 414, 414, 497, 414, 78, 0, 96, 0, 114, 307, 132, 0, -465, -465, -465, -465, -465, 95, 100, 104, 109, -465, -465, -465, 5, 6, 7, 8, 120, 0, 0, 0, 0, 10, 11, 12, 0, -465, 47, 0, 563, 307, 47, 565, -113, -113, -113, -113, -113, 14, 0, 15, 0, -113, -113, -113, 0, 0, 0, 0, 0, 76, 81, 0, 0, 112, 117, 0, 0, -113, 0, 0, 218, 0, 0, -26, -26, -26, -26, -26, 94, 99, 103, 108, -26, -26, -26, 0, 0, 488, 0, 0, 0, 77, 82, 0, 0, 113, 118, 219, -26, 0, -515, 0, -110, 0, 0, 0, 0, 47, -257, 0, 0, 0, 614, 0, -515, 0, 79, 83, 88, 92, 97, 101, 106, 110, 115, 119, 124, 128, 133, 137, 142, 146, 322, 322, -113, 0, 0, 220, 221, 0, 0, 0, 0, 0, 6, 7, 8, 129, 95, 100, 104, 109, 10, 11, 12, 0, 0, 0, 0, -515, 0, 0, 0, -515, 0, -26, 666, 0, 505, 152, 15, 0, 400, 400, 166, 167, 0, 168, 0, 0, 94, 99, 0, 0, 130, 135, 0, 0, 0, 0, 681, 682, 683, 0, 0, 169, 47, 16, 0, 170, 171, 172, 173, 174, 0, 0, 0, 175, 176, 0, 6, 7, 8, 102, 177, 0, 0, 178, 10, 11, 12, 0, 179, 180, 181, 0, 0, 0, 182, 183, 0, 0, 667, 184, 668, 4, 15, -122, 5, 6, 7, 8, 9, 0, 0, 0, 0, 10, 11, 12, 95, 100, 0, 47, 131, 136, -307, 185, 186, 0, 0, 669, 13, 14, 0, 15, 16, 0, 0, 47, 0, 0, 666, 0, 505, 152, 0, 788, 47, 0, 166, 167, 0, 168, 78, 0, 87, 0, 96, 0, 105, 0, 114, -122, 123, -253, 132, 0, 141, 0, 0, 169, -122, 16, 0, 170, 171, 172, 173, 174, 0, 400, 400, 175, 176, 0, 0, 0, 0, 0, 177, 0, 5, 178, 17, 8, 89, 0, 179, 180, 181, 10, 11, 12, 182, 183, 0, 0, 667, 184, 668, 76, 81, 85, 90, 0, 0, 14, 0, 112, 117, 121, 126, 0, 0, 809, 809, 0, 0, 0, 0, 814, -373, 185, 186, 0, 0, 669, 5, 6, 7, 8, 80, 77, 82, 86, 91, 10, 11, 12, 0, 113, 118, 122, 127, 0, 0, 0, 0, 0, 0, 784, 786, 14, 0, 357, 359, 275, 276, 277, 278, 279, 280, 281, 0, 841, 0, 367, 0, 0, 846, 847, 370, 371, 0, 0, 0, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 539, 0, 540, 5, 6, 7, 8, 9, 0, 0, 541, 0, 10, 11, 12, 0, 0, 0, 0, 0, 0, 497, 0, 0, 427, -248, 0, 0, 14, 0, 0, 0, 0, 0, 497, 94, 99, 103, 108, 400, 400, 0, 0, 130, 135, 139, 144, 0, 400, 400, 0, 0, 400, 400, 0, 0, 0, 0, 5, 6, 7, 8, 89, 497, 809, 0, 0, 10, 11, 12, 0, 0, 0, 0, 0, 5, 6, 7, 8, 116, 784, 786, 786, 14, 10, 11, 12, 6, 7, 8, 107, 0, 480, 481, -543, 10, 11, 12, 0, 0, 14, 6, 7, 8, 129, 95, 100, 104, 109, 10, 11, 12, 0, 131, 136, 140, 145, 254, 0, 165, 5, 0, 0, 8, 9, 166, 167, 15, 168, 10, 11, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 550, 551, 0, -250, 14, 169, 15, 16, 0, 170, 171, 172, 173, 174, 0, 0, 0, 175, 176, 0, -256, 0, 0, 0, 177, 0, 0, 178, 0, 0, 0, -254, 179, 180, 181, 0, 576, 0, 182, 183, 0, 0, 0, 184, 0, -259, 585, 0, 0, 0, 0, 5, 6, 7, 8, 125, 591, 0, 0, 0, 10, 11, 12, 0, 0, 0, 0, 185, 186, 0, 0, 255, 0, 0, 0, 0, 14, 6, 7, 8, 138, 0, 0, 0, 0, 10, 11, 12, 0, 604, 0, 0, 0, 290, 0, -451, -451, -451, -451, -451, -451, -451, -451, 15, -451, -451, -451, -451, -451, 0, -451, -451, -451, -451, -451, -451, -451, -451, -451, -451, -451, -451, -451, -451, -451, 291, -451, -451, -451, -451, -451, 0, 0, 0, -451, -451, 639, -258, 0, 0, 0, -451, 0, 644, -451, 0, 0, 648, 0, -451, -451, -451, 0, 0, 0, -451, -451, 0, 0, 0, -451, 0, -261, 0, 324, 0, 671, -22, -22, -22, -22, -22, 0, 0, 0, 0, -22, -22, -22, 0, 0, 0, -451, 292, -451, -451, 5, 0, -451, 8, 75, 219, -22, 0, -515, 10, 11, 12, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, -515, 0, 0, 14, 0, 15, 0, 6, 7, 8, 134, 639, 0, 0, 743, 10, 11, 12, 746, 6, 7, 8, 143, 220, 221, 0, 0, 10, 11, 12, 750, 6, 7, 8, 98, 0, 0, 754, 0, 10, 11, 12, 0, 0, 0, -515, 671, 763, 0, -515, 0, -22, 0, 0, 404, 772, -465, -465, -465, -465, -465, -465, -465, -465, 0, -465, -465, -465, -465, -465, 0, -465, -465, -465, -465, -465, -465, -465, -465, -465, -465, -465, -465, -465, -465, -465, 0, -465, -465, -465, -465, -465, -260, 0, 0, -465, -465, 0, 0, 0, 0, 0, -465, 0, -262, -465, 0, 671, 0, 0, -465, -465, -465, 0, 0, 0, -465, -465, 0, 0, 0, -465, 0, 666, 671, 165, 0, 671, 0, 671, 0, 166, 167, 0, 168, 0, 219, 0, 0, -515, 0, 0, 0, -465, 0, -465, -465, 0, 0, -465, 0, 0, 169, -515, 16, 0, 170, 171, 172, 173, 174, 0, 0, 0, 175, 176, 0, -320, 0, 0, 0, 177, 833, 0, 178, 671, 220, 221, 0, 179, 180, 181, 0, 0, 0, 182, 183, 0, 0, -320, 184, -320, 666, 0, 165, 0, 0, 0, -515, 0, 166, 167, -515, 168, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 185, 186, 0, 0, 669, 0, 0, 169, 0, 16, 0, 170, 171, 172, 173, 174, 0, 0, 0, 175, 176, 0, 0, 0, 0, 0, 177, 0, 5, 178, 0, 8, 84, 0, 179, 180, 181, 10, 11, 12, 182, 183, 0, 0, 318, 184, 0, -18, -18, -18, -18, -18, 0, 14, 0, 15, -18, -18, -18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 185, 186, 219, -18, 669, -515, 165, 0, 0, 0, 0, 0, 166, 167, 0, 168, 0, 5, 0, -515, 8, 9, 6, 7, 8, 102, 10, 11, 12, 0, 10, 11, 12, 169, 0, 16, 0, 170, 171, 172, 173, 174, 14, 0, 15, 175, 176, 0, 15, 0, 0, 0, 177, 0, 0, 178, 0, 0, 0, 0, 179, 180, 425, 0, -515, 0, 182, 183, -515, 0, -18, 184, 165, 0, 0, 0, 0, 0, 166, 167, 0, 168, 0, 6, 7, 8, 138, 0, 0, 0, 0, 10, 11, 12, 0, 185, 186, 0, 0, 169, 426, 16, 0, 170, 171, 172, 173, 174, 686, 15, 0, 175, 176, 5, 6, 7, 8, 9, 177, 0, 714, 178, 10, 11, 12, 0, 179, 180, 181, 0, 0, 0, 182, 183, 0, 0, 0, 184, 14, 0, 15, 265, 266, 267, 687, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 0, 0, 185, 186, 0, 0, 0, 552, 505, 506, 6, 7, 8, 9, 166, 167, 0, 168, 10, 11, 12, 507, 0, 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, 518, 14, 169, 15, 16, 0, 170, 171, 172, 173, 174, 0, 0, 327, 175, 176, 5, 0, 0, 8, 9, 177, 0, 0, 178, 10, 11, 12, 0, 179, 180, 181, 0, 0, 0, 182, 183, 0, 0, 0, 184, 14, 0, 15, 16, 0, 0, 0, 6, 7, 8, 134, 0, 0, 505, 152, 10, 11, 12, 0, 166, 167, 519, 168, 185, 186, 0, 507, 520, 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, 518, 0, 169, 0, 16, 0, 170, 171, 172, 173, 174, 0, 0, 0, 175, 176, 5, 6, 7, 8, 75, 177, 0, 0, 178, 10, 11, 12, 0, 179, 180, 181, 0, 0, 0, 182, 183, 0, 0, 0, 184, 14, 165, 15, 0, 0, 0, 0, 166, 167, 0, 168, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 519, 0, 185, 186, 0, 0, 520, 169, 0, 16, 0, 170, 171, 172, 173, 174, 0, 0, 0, 175, 176, 0, 6, 7, 8, 107, 177, 0, 0, 178, 10, 11, 12, 0, 179, 180, 181, 0, 0, 0, 182, 183, 0, 0, 0, 184, 358, 0, 165, 0, 0, 0, 0, 0, 166, 167, 0, 168, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 185, 186, 0, 0, 474, 0, 169, 0, 16, 0, 170, 171, 172, 173, 174, 0, 0, 0, 175, 176, 0, 6, 7, 8, 143, 177, 0, 0, 178, 10, 11, 12, 0, 179, 180, 181, 0, 0, 0, 182, 183, 0, 0, 0, 184, 165, 5, 6, 7, 8, 9, 166, 167, 0, 168, 10, 11, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 185, 186, 0, 14, 169, 15, 16, 0, 170, 171, 172, 173, 174, 0, 0, 0, 175, 176, 0, 0, 0, 0, 0, 177, 0, 0, 178, 0, 0, 0, 0, 179, 180, 181, 0, 0, 0, 182, 183, 0, 0, 0, 184, 165, 5, 0, 0, 8, 9, 166, 167, 0, 168, 10, 11, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 185, 186, 0, 14, 169, 15, 16, 0, 170, 171, 172, 173, 174, 0, 0, 0, 175, 176, 5, 6, 7, 8, 111, 177, 0, 0, 178, 10, 11, 12, 0, 179, 180, 181, 0, 0, 0, 182, 183, 0, 0, 165, 184, 14, 0, 15, 0, 166, 167, 0, 168, 0, 5, 6, 7, 8, 84, 0, 0, 0, 0, 10, 11, 12, 0, 0, 185, 186, 169, 0, 16, 0, 170, 171, 172, 173, 174, 14, 0, 15, 175, 176, 5, 6, 7, 8, 120, 177, 0, 0, 178, 10, 11, 12, 0, 179, 180, 181, 0, 0, 0, 182, 183, 0, 0, 165, 184, 14, 0, 15, 0, 166, 167, 0, 168, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 10, 11, 12, 608, 0, 185, 186, 169, 0, 16, 0, 170, 171, 172, 173, 174, 14, 0, 0, 175, 176, 5, 6, 7, 8, 80, 177, 0, 0, 178, 10, 11, 12, 0, 179, 180, 181, 0, 0, 0, 182, 183, 0, 0, 165, 184, 14, 0, 0, 0, 166, 167, 0, 168, 0, 5, 6, 7, 8, 116, 0, 0, 0, 0, 10, 11, 12, 0, 0, 185, 186, 169, 0, 16, 0, 170, 171, 172, 173, 174, 14, 0, 0, 175, 176, 5, 6, 7, 8, 89, 177, 0, 0, 178, 10, 11, 12, 0, 179, 180, 181, 0, 0, 0, 182, 183, 0, 0, 165, 261, 14, 0, 0, 0, 166, 167, 0, 168, 0, 5, 6, 7, 8, 125, 0, 0, 0, 0, 10, 11, 12, 0, 0, 185, 186, 169, 0, 16, 0, 170, 171, 172, 173, 174, 14, 0, 0, 175, 176, 0, 0, 0, 0, 0, 177, 0, 0, 178, 0, 0, 0, 0, 179, 180, 181, 0, 0, 0, 182, 183, 0, 0, 582, 263, 0, 0, 0, 0, 166, 167, 267, 168, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 0, 185, 186, 169, 0, 16, 0, 170, 171, 172, 173, 174, 803, 0, 0, 175, 176, 0, 0, 0, 0, 0, 177, 0, 0, 178, 0, 0, 0, 0, 179, 180, 181, 0, 0, 0, 182, 183, 0, 0, 0, 184, 0, 0, 0, 265, 266, 267, 0, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 0, 0, 185, 186, 265, 266, 267, 0, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 265, 266, 267, 804, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 0, 0, 0, 0, 265, 266, 267, 553, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 265, 266, 267, 630, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 0, 0, 0, 0, 265, 266, 267, 631, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 265, 266, 267, 853, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 0, 0, 265, 266, 267, 801, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 0, 0, 0, 0, 0, 0, 0, 0, 0, 265, 266, 267, 467, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 15, 0, 0, 0, 0, 469, 0, 0, 0, 0, 0, 0, 0, 265, 266, 267, 0, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 0, 661, 265, 266, 267, 816, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 265, 266, 267, 0, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281 }; static const short yycheck[] = { 15, 68, 66, 50, 51, 52, 147, 47, 2, 3, 321, 2, 3, 2, 3, 2, 3, 231, 595, 312, 317, 61, 62, 63, 148, 288, 2, 3, 414, 414, 319, 765, 286, 414, 39, 40, 325, 221, 78, 2, 3, 312, 538, 227, 312, 293, 312, 87, 22, 624, 1, 677, 16, 2, 3, 293, 96, 2, 3, 155, 1, 484, 221, 57, 1, 105, 57, 3, 57, 293, 57, 309, 414, 812, 114, 484, 70, 1, 237, 70, 1, 57, 68, 123, 556, 3, 4, 10, 170, 171, 68, 69, 132, 6, 57, 8, 3, 4, 858, 181, 184, 141, 31, 2, 3, 187, 3, 4, 57, 1, 3, 0, 57, 0, 8, 1, 1, 95, 31, 71, 304, 70, 90, 59, 863, 207, 90, 2, 3, 422, 177, 68, 68, 69, 760, 33, 875, 31, 4, 5, 6, 7, 8, 903, 616, 96, 69, 13, 14, 15, 91, 422, 538, 538, 422, 497, 422, 538, 57, 407, 408, 409, 410, 29, 57, 31, 250, 251, 902, 908, 91, 70, 96, 411, 148, 215, 68, 261, 96, 263, 806, 340, 57, 407, 408, 409, 410, 8, 684, 96, 8, 31, 68, 91, 241, 90, 91, 3, 245, 96, 507, 508, 509, 510, 511, 91, 91, 47, 738, 393, 31, 838, 466, 31, 623, 624, 843, 31, 793, 794, 287, 3, 485, 90, 90, 755, 3, 221, 68, 69, 221, 3, 4, 227, 854, 68, 227, 284, 285, 213, 214, 627, 627, 237, 291, 341, 237, 343, 237, 826, 237, 3, 4, 59, 68, 69, 4, 5, 6, 7, 8, 237, 68, 69, 96, 13, 14, 15, 220, 896, 47, 891, 221, 225, 237, 90, 228, 59, 227, 3, 4, 29, 59, 31, 366, 349, 68, 59, 237, 96, 632, 68, 237, 68, 69, 47, 68, 69, 684, 684, 90, 348, 595, 684, 94, 372, 641, 59, 643, 841, 304, 452, 96, 304, 3, 4, 68, 67, 312, 69, 95, 312, 221, 312, 595, 312, 300, 595, 227, 595, 862, 10, 337, 338, 627, 59, 68, 69, 237, 68, 314, 315, 90, 425, 68, 95, 340, 414, 562, 340, 647, 340, 28, 340, 651, 304, 627, 780, 68, 627, 31, 627, 68, 95, 340, 68, 789, 414, 320, 321, 59, 780, 45, 68, 326, 95, 47, 340, 90, 59, 789, 94, 94, 467, 793, 794, 95, 471, 68, 69, 342, 340, 68, 69, 67, 340, 69, 90, 8, 393, 90, 94, 393, 657, 94, 304, 5, 6, 7, 8, 59, 60, 61, 460, 13, 14, 15, 464, 90, 8, 414, 91, 94, 414, 91, 414, 473, 414, 422, 95, 497, 422, 31, 422, 558, 422, 441, 442, 414, 7, 8, 340, 394, 490, 393, 13, 14, 15, 90, 516, 497, 414, 94, 47, 1, 95, 538, 4, 5, 6, 7, 8, 95, 31, 11, 414, 13, 14, 15, 414, 517, 95, 783, 95, 785, 616, 90, 7, 8, 453, 94, 695, 29, 13, 14, 15, 3, 4, 777, 33, 779, 90, 90, 617, 393, 95, 94, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 68, 69, 414, 700, 581, 781, 524, 525, 1, 6, 3, 4, 709, 90, 790, 870, 871, 837, 45, 90, 826, 90, 487, 94, 542, 543, 407, 408, 45, 410, 610, 538, 68, 69, 538, 95, 538, 95, 538, 90, 91, 94, 826, 68, 69, 826, 95, 826, 68, 538, 94, 95, 28, 873, 874, 31, 4, 5, 6, 7, 8, 94, 538, 94, 95, 13, 14, 15, 94, 45, 47, 558, 94, 95, 68, 69, 538, 68, 69, 95, 538, 29, 95, 31, 664, 613, 94, 95, 595, 95, 744, 595, 95, 595, 90, 595, 3, 4, 5, 6, 7, 8, 567, 663, 684, 685, 571, 667, 97, 574, 575, 191, 192, 578, 90, 314, 315, 1, 94, 91, 627, 780, 781, 627, 90, 627, 538, 627, 873, 874, 617, 2, 3, 45, 91, 91, 623, 624, 627, 10, 11, 12, 90, 54, 55, 56, 57, 58, 59, 60, 61, 627, 23, 91, 25, 95, 27, 96, 29, 47, 31, 47, 33, 90, 35, 627, 37, 7, 8, 47, 47, 90, 8, 13, 14, 15, 90, 684, 95, 644, 684, 90, 684, 648, 684, 95, 57, 28, 94, 94, 655, 31, 91, 700, 95, 684, 700, 68, 95, 70, 95, 95, 709, 68, 68, 709, 245, 90, 684, 23, 24, 25, 26, 731, 732, 627, 90, 31, 32, 33, 34, 47, 684, 3, 7, 8, 684, 692, 96, 809, 13, 14, 15, 91, 47, 788, 701, 702, 700, 94, 705, 23, 24, 25, 26, 710, 711, 709, 713, 31, 32, 33, 34, 68, 5, 6, 7, 8, 68, 90, 747, 841, 13, 14, 15, 68, 4, 97, 845, 7, 8, 90, 684, 95, 10, 13, 14, 15, 743, 91, 31, 746, 862, 91, 95, 47, 95, 864, 700, 95, 18, 29, 90, 780, 781, 63, 64, 709, 66, 67, 68, 69, 789, 790, 17, 865, 793, 794, 57, 58, 59, 60, 61, 184, 17, 47, 94, 897, 783, 826, 785, 90, 826, 95, 826, 196, 826, 198, 905, 200, 97, 202, 27, 28, 29, 30, 916, 208, 68, 210, 35, 36, 37, 38, 68, 1, 10, 47, 4, 90, 221, 7, 8, 95, 68, 10, 227, 13, 14, 15, 95, 94, 827, 828, 90, 830, 237, 10, 95, 0, 0, 466, 627, 29, 310, 31, 32, 627, 627, 250, 251, 909, 196, 197, 198, 199, 627, 312, 312, 464, 261, 446, 263, 921, 433, 434, 435, 436, 437, 438, 439, 440, 27, 28, 29, 30, 642, 300, 758, 676, 35, 36, 37, 38, 196, 197, 198, 199, 5, 6, 7, 8, 793, 780, 462, 464, 13, 14, 15, 744, 747, 460, 340, 304, 90, 91, 527, 1, 294, 3, 872, 884, 487, 543, 293, 9, 10, 885, 12, 53, 54, 55, 56, 57, 58, 59, 60, 61, 329, 308, 331, 348, 333, -1, 335, -1, 30, -1, 32, 340, 34, 35, 36, 37, 38, -1, -1, -1, 42, 43, 232, -1, -1, -1, 1, 49, -1, 4, 52, -1, 7, 8, -1, 57, 58, 59, 13, 14, 15, 63, 64, -1, -1, 90, 68, 200, 201, 202, 203, -1, -1, -1, 29, -1, 31, 329, 330, 331, 332, -1, -1, -1, 393, -1, -1, -1, -1, 1, 92, 93, 4, -1, 96, 7, 8, -1, 288, -1, -1, 13, 14, 15, -1, 414, -1, -1, -1, 329, 330, 331, 332, -1, -1, -1, -1, 29, -1, 31, 407, 408, 409, 410, 433, -1, 435, -1, 437, 1, 439, -1, 4, 5, 6, 7, 8, 200, 201, 202, 203, 13, 14, 15, 4, 5, 6, 7, 8, -1, -1, -1, -1, 13, 14, 15, -1, 29, 467, -1, 447, 1, 471, 450, 4, 5, 6, 7, 8, 29, -1, 31, -1, 13, 14, 15, -1, -1, -1, -1, -1, 433, 434, -1, -1, 437, 438, -1, -1, 29, -1, -1, 1, -1, -1, 4, 5, 6, 7, 8, 333, 334, 335, 336, 13, 14, 15, -1, -1, 399, -1, -1, -1, 433, 434, -1, -1, 437, 438, 28, 29, -1, 31, -1, 96, -1, -1, -1, -1, 538, 90, -1, -1, -1, 521, -1, 45, -1, 715, 716, 717, 718, 719, 720, 721, 722, 723, 724, 725, 726, 727, 728, 729, 730, 444, 445, 96, -1, -1, 68, 69, -1, -1, -1, -1, -1, 5, 6, 7, 8, 333, 334, 335, 336, 13, 14, 15, -1, -1, -1, -1, 90, -1, -1, -1, 94, -1, 96, 1, -1, 3, 4, 31, -1, 484, 485, 9, 10, -1, 12, -1, -1, 435, 436, -1, -1, 439, 440, -1, -1, -1, -1, 599, 600, 601, -1, -1, 30, 627, 32, -1, 34, 35, 36, 37, 38, -1, -1, -1, 42, 43, -1, 5, 6, 7, 8, 49, -1, -1, 52, 13, 14, 15, -1, 57, 58, 59, -1, -1, -1, 63, 64, -1, -1, 67, 68, 69, 1, 31, 3, 4, 5, 6, 7, 8, -1, -1, -1, -1, 13, 14, 15, 435, 436, -1, 684, 439, 440, 91, 92, 93, -1, -1, 96, 28, 29, -1, 31, 32, -1, -1, 700, -1, -1, 1, -1, 3, 4, -1, 708, 709, -1, 9, 10, -1, 12, 715, -1, 717, -1, 719, -1, 721, -1, 723, 59, 725, 90, 727, -1, 729, -1, -1, 30, 68, 32, -1, 34, 35, 36, 37, 38, -1, 623, 624, 42, 43, -1, -1, -1, -1, -1, 49, -1, 4, 52, 90, 7, 8, -1, 57, 58, 59, 13, 14, 15, 63, 64, -1, -1, 67, 68, 69, 715, 716, 717, 718, -1, -1, 29, -1, 723, 724, 725, 726, -1, -1, 764, 765, -1, -1, -1, -1, 770, 91, 92, 93, -1, -1, 96, 4, 5, 6, 7, 8, 715, 716, 717, 718, 13, 14, 15, -1, 723, 724, 725, 726, -1, -1, -1, -1, -1, -1, 703, 704, 29, -1, 248, 249, 55, 56, 57, 58, 59, 60, 61, -1, 814, -1, 260, -1, -1, 819, 820, 265, 266, -1, -1, -1, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 1, -1, 3, 4, 5, 6, 7, 8, -1, -1, 11, -1, 13, 14, 15, -1, -1, -1, -1, -1, -1, 859, -1, -1, 306, 90, -1, -1, 29, -1, -1, -1, -1, -1, 872, 719, 720, 721, 722, 780, 781, -1, -1, 727, 728, 729, 730, -1, 789, 790, -1, -1, 793, 794, -1, -1, -1, -1, 4, 5, 6, 7, 8, 901, 902, -1, -1, 13, 14, 15, -1, -1, -1, -1, -1, 4, 5, 6, 7, 8, 821, 822, 823, 29, 13, 14, 15, 5, 6, 7, 8, -1, 374, 375, 95, 13, 14, 15, -1, -1, 29, 5, 6, 7, 8, 719, 720, 721, 722, 13, 14, 15, -1, 727, 728, 729, 730, 1, -1, 3, 4, -1, -1, 7, 8, 9, 10, 31, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, 423, 424, -1, 90, 29, 30, 31, 32, -1, 34, 35, 36, 37, 38, -1, -1, -1, 42, 43, -1, 90, -1, -1, -1, 49, -1, -1, 52, -1, -1, -1, 90, 57, 58, 59, -1, 459, -1, 63, 64, -1, -1, -1, 68, -1, 90, 469, -1, -1, -1, -1, 4, 5, 6, 7, 8, 479, -1, -1, -1, 13, 14, 15, -1, -1, -1, -1, 92, 93, -1, -1, 96, -1, -1, -1, -1, 29, 5, 6, 7, 8, -1, -1, -1, -1, 13, 14, 15, -1, 512, -1, -1, -1, 1, -1, 3, 4, 5, 6, 7, 8, 9, 10, 31, 12, 13, 14, 15, 16, -1, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, -1, -1, -1, 42, 43, 559, 90, -1, -1, -1, 49, -1, 566, 52, -1, -1, 570, -1, 57, 58, 59, -1, -1, -1, 63, 64, -1, -1, -1, 68, -1, 90, -1, 1, -1, 589, 4, 5, 6, 7, 8, -1, -1, -1, -1, 13, 14, 15, -1, -1, -1, 90, 91, 92, 93, 4, -1, 96, 7, 8, 28, 29, -1, 31, 13, 14, 15, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 45, -1, -1, 29, -1, 31, -1, 5, 6, 7, 8, 642, -1, -1, 645, 13, 14, 15, 649, 5, 6, 7, 8, 68, 69, -1, -1, 13, 14, 15, 661, 5, 6, 7, 8, -1, -1, 668, -1, 13, 14, 15, -1, -1, -1, 90, 677, 678, -1, 94, -1, 96, -1, -1, 1, 686, 3, 4, 5, 6, 7, 8, 9, 10, -1, 12, 13, 14, 15, 16, -1, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, -1, 34, 35, 36, 37, 38, 90, -1, -1, 42, 43, -1, -1, -1, -1, -1, 49, -1, 90, 52, -1, 738, -1, -1, 57, 58, 59, -1, -1, -1, 63, 64, -1, -1, -1, 68, -1, 1, 755, 3, -1, 758, -1, 760, -1, 9, 10, -1, 12, -1, 28, -1, -1, 31, -1, -1, -1, 90, -1, 92, 93, -1, -1, 96, -1, -1, 30, 45, 32, -1, 34, 35, 36, 37, 38, -1, -1, -1, 42, 43, -1, 45, -1, -1, -1, 49, 803, -1, 52, 806, 68, 69, -1, 57, 58, 59, -1, -1, -1, 63, 64, -1, -1, 67, 68, 69, 1, -1, 3, -1, -1, -1, 90, -1, 9, 10, 94, 12, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 92, 93, -1, -1, 96, -1, -1, 30, -1, 32, -1, 34, 35, 36, 37, 38, -1, -1, -1, 42, 43, -1, -1, -1, -1, -1, 49, -1, 4, 52, -1, 7, 8, -1, 57, 58, 59, 13, 14, 15, 63, 64, -1, -1, 1, 68, -1, 4, 5, 6, 7, 8, -1, 29, -1, 31, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 92, 93, 28, 29, 96, 31, 3, -1, -1, -1, -1, -1, 9, 10, -1, 12, -1, 4, -1, 45, 7, 8, 5, 6, 7, 8, 13, 14, 15, -1, 13, 14, 15, 30, -1, 32, -1, 34, 35, 36, 37, 38, 29, -1, 31, 42, 43, -1, 31, -1, -1, -1, 49, -1, -1, 52, -1, -1, -1, -1, 57, 58, 59, -1, 90, -1, 63, 64, 94, -1, 96, 68, 3, -1, -1, -1, -1, -1, 9, 10, -1, 12, -1, 5, 6, 7, 8, -1, -1, -1, -1, 13, 14, 15, -1, 92, 93, -1, -1, 30, 97, 32, -1, 34, 35, 36, 37, 38, 11, 31, -1, 42, 43, 4, 5, 6, 7, 8, 49, -1, 11, 52, 13, 14, 15, -1, 57, 58, 59, -1, -1, -1, 63, 64, -1, -1, -1, 68, 29, -1, 31, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -1, -1, 92, 93, -1, -1, -1, 97, 3, 4, 5, 6, 7, 8, 9, 10, -1, 12, 13, 14, 15, 16, -1, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, -1, 34, 35, 36, 37, 38, -1, -1, 1, 42, 43, 4, -1, -1, 7, 8, 49, -1, -1, 52, 13, 14, 15, -1, 57, 58, 59, -1, -1, -1, 63, 64, -1, -1, -1, 68, 29, -1, 31, 32, -1, -1, -1, 5, 6, 7, 8, -1, -1, 3, 4, 13, 14, 15, -1, 9, 10, 90, 12, 92, 93, -1, 16, 96, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, -1, 30, -1, 32, -1, 34, 35, 36, 37, 38, -1, -1, -1, 42, 43, 4, 5, 6, 7, 8, 49, -1, -1, 52, 13, 14, 15, -1, 57, 58, 59, -1, -1, -1, 63, 64, -1, -1, -1, 68, 29, 3, 31, -1, -1, -1, -1, 9, 10, -1, 12, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 90, -1, 92, 93, -1, -1, 96, 30, -1, 32, -1, 34, 35, 36, 37, 38, -1, -1, -1, 42, 43, -1, 5, 6, 7, 8, 49, -1, -1, 52, 13, 14, 15, -1, 57, 58, 59, -1, -1, -1, 63, 64, -1, -1, -1, 68, 1, -1, 3, -1, -1, -1, -1, -1, 9, 10, -1, 12, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 92, 93, -1, -1, 96, -1, 30, -1, 32, -1, 34, 35, 36, 37, 38, -1, -1, -1, 42, 43, -1, 5, 6, 7, 8, 49, -1, -1, 52, 13, 14, 15, -1, 57, 58, 59, -1, -1, -1, 63, 64, -1, -1, -1, 68, 3, 4, 5, 6, 7, 8, 9, 10, -1, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 92, 93, -1, 29, 30, 31, 32, -1, 34, 35, 36, 37, 38, -1, -1, -1, 42, 43, -1, -1, -1, -1, -1, 49, -1, -1, 52, -1, -1, -1, -1, 57, 58, 59, -1, -1, -1, 63, 64, -1, -1, -1, 68, 3, 4, -1, -1, 7, 8, 9, 10, -1, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 92, 93, -1, 29, 30, 31, 32, -1, 34, 35, 36, 37, 38, -1, -1, -1, 42, 43, 4, 5, 6, 7, 8, 49, -1, -1, 52, 13, 14, 15, -1, 57, 58, 59, -1, -1, -1, 63, 64, -1, -1, 3, 68, 29, -1, 31, -1, 9, 10, -1, 12, -1, 4, 5, 6, 7, 8, -1, -1, -1, -1, 13, 14, 15, -1, -1, 92, 93, 30, -1, 32, -1, 34, 35, 36, 37, 38, 29, -1, 31, 42, 43, 4, 5, 6, 7, 8, 49, -1, -1, 52, 13, 14, 15, -1, 57, 58, 59, -1, -1, -1, 63, 64, -1, -1, 3, 68, 29, -1, 31, -1, 9, 10, -1, 12, -1, 4, 5, 6, 7, 8, -1, -1, -1, -1, 13, 14, 15, 90, -1, 92, 93, 30, -1, 32, -1, 34, 35, 36, 37, 38, 29, -1, -1, 42, 43, 4, 5, 6, 7, 8, 49, -1, -1, 52, 13, 14, 15, -1, 57, 58, 59, -1, -1, -1, 63, 64, -1, -1, 3, 68, 29, -1, -1, -1, 9, 10, -1, 12, -1, 4, 5, 6, 7, 8, -1, -1, -1, -1, 13, 14, 15, -1, -1, 92, 93, 30, -1, 32, -1, 34, 35, 36, 37, 38, 29, -1, -1, 42, 43, 4, 5, 6, 7, 8, 49, -1, -1, 52, 13, 14, 15, -1, 57, 58, 59, -1, -1, -1, 63, 64, -1, -1, 3, 68, 29, -1, -1, -1, 9, 10, -1, 12, -1, 4, 5, 6, 7, 8, -1, -1, -1, -1, 13, 14, 15, -1, -1, 92, 93, 30, -1, 32, -1, 34, 35, 36, 37, 38, 29, -1, -1, 42, 43, -1, -1, -1, -1, -1, 49, -1, -1, 52, -1, -1, -1, -1, 57, 58, 59, -1, -1, -1, 63, 64, -1, -1, 3, 68, -1, -1, -1, -1, 9, 10, 46, 12, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -1, 92, 93, 30, -1, 32, -1, 34, 35, 36, 37, 38, 11, -1, -1, 42, 43, -1, -1, -1, -1, -1, 49, -1, -1, 52, -1, -1, -1, -1, 57, 58, 59, -1, -1, -1, 63, 64, -1, -1, -1, 68, -1, -1, -1, 44, 45, 46, -1, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -1, -1, 92, 93, 44, 45, 46, -1, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 44, 45, 46, 97, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -1, -1, -1, -1, 44, 45, 46, 97, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 44, 45, 46, 97, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -1, -1, -1, -1, 44, 45, 46, 97, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 44, 45, 46, 97, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -1, -1, 44, 45, 46, 95, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -1, -1, -1, -1, -1, -1, -1, -1, -1, 44, 45, 46, 94, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 31, -1, -1, -1, -1, 94, -1, -1, -1, -1, -1, -1, -1, 44, 45, 46, -1, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -1, 94, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 44, 45, 46, -1, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61 }; /* -*-C-*- Note some compilers choke on comments on `#line' lines. */ #line 3 "/usr/share/bison/bison.simple" /* Skeleton output parser for bison, Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* As a special exception, when this file is copied by Bison into a Bison output file, you may use that output file without restriction. This special exception was added by the Free Software Foundation in version 1.24 of Bison. */ /* This is the parser code that is written into each bison parser when the %semantic_parser declaration is not specified in the grammar. It was written by Richard Stallman by simplifying the hairy parser used when %semantic_parser is specified. */ /* All symbols defined below should begin with yy or YY, to avoid infringing on user name space. This should be done even for local variables, as they might otherwise be expanded by user macros. There are some unavoidable exceptions within include files to define necessary library symbols; they are noted "INFRINGES ON USER NAME SPACE" below. */ #if ! defined (yyoverflow) || defined (YYERROR_VERBOSE) /* The parser invokes alloca or malloc; define the necessary symbols. */ # if YYSTACK_USE_ALLOCA # define YYSTACK_ALLOC alloca # else # ifndef YYSTACK_USE_ALLOCA # if defined (alloca) || defined (_ALLOCA_H) # define YYSTACK_ALLOC alloca # else # ifdef __GNUC__ # define YYSTACK_ALLOC __builtin_alloca # endif # endif # endif # endif # ifdef YYSTACK_ALLOC /* Pacify GCC's `empty if-body' warning. */ # define YYSTACK_FREE(Ptr) do { /* empty */; } while (0) # else # if defined (__STDC__) || defined (__cplusplus) # include /* INFRINGES ON USER NAME SPACE */ # define YYSIZE_T size_t # endif # define YYSTACK_ALLOC malloc # define YYSTACK_FREE free # endif #endif /* ! defined (yyoverflow) || defined (YYERROR_VERBOSE) */ #if (! defined (yyoverflow) \ && (! defined (__cplusplus) \ || (YYLTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL))) /* A type that is properly aligned for any stack member. */ union yyalloc { short yyss; YYSTYPE yyvs; # if YYLSP_NEEDED YYLTYPE yyls; # endif }; /* The size of the maximum gap between one aligned stack and the next. */ # define YYSTACK_GAP_MAX (sizeof (union yyalloc) - 1) /* The size of an array large to enough to hold all stacks, each with N elements. */ # if YYLSP_NEEDED # define YYSTACK_BYTES(N) \ ((N) * (sizeof (short) + sizeof (YYSTYPE) + sizeof (YYLTYPE)) \ + 2 * YYSTACK_GAP_MAX) # else # define YYSTACK_BYTES(N) \ ((N) * (sizeof (short) + sizeof (YYSTYPE)) \ + YYSTACK_GAP_MAX) # endif /* Copy COUNT objects from FROM to TO. The source and destination do not overlap. */ # ifndef YYCOPY # if 1 < __GNUC__ # define YYCOPY(To, From, Count) \ __builtin_memcpy (To, From, (Count) * sizeof (*(From))) # else # define YYCOPY(To, From, Count) \ do \ { \ register YYSIZE_T yyi; \ for (yyi = 0; yyi < (Count); yyi++) \ (To)[yyi] = (From)[yyi]; \ } \ while (0) # endif # endif /* Relocate STACK from its old location to the new one. The local variables YYSIZE and YYSTACKSIZE give the old and new number of elements in the stack, and YYPTR gives the new location of the stack. Advance YYPTR to a properly aligned location for the next stack. */ # define YYSTACK_RELOCATE(Stack) \ do \ { \ YYSIZE_T yynewbytes; \ YYCOPY (&yyptr->Stack, Stack, yysize); \ Stack = &yyptr->Stack; \ yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAX; \ yyptr += yynewbytes / sizeof (*yyptr); \ } \ while (0) #endif #if ! defined (YYSIZE_T) && defined (__SIZE_TYPE__) # define YYSIZE_T __SIZE_TYPE__ #endif #if ! defined (YYSIZE_T) && defined (size_t) # define YYSIZE_T size_t #endif #if ! defined (YYSIZE_T) # if defined (__STDC__) || defined (__cplusplus) # include /* INFRINGES ON USER NAME SPACE */ # define YYSIZE_T size_t # endif #endif #if ! defined (YYSIZE_T) # define YYSIZE_T unsigned int #endif #define yyerrok (yyerrstatus = 0) #define yyclearin (yychar = YYEMPTY) #define YYEMPTY -2 #define YYEOF 0 #define YYACCEPT goto yyacceptlab #define YYABORT goto yyabortlab #define YYERROR goto yyerrlab1 /* Like YYERROR except do call yyerror. This remains here temporarily to ease the transition to the new meaning of YYERROR, for GCC. Once GCC version 2 has supplanted version 1, this can go. */ #define YYFAIL goto yyerrlab #define YYRECOVERING() (!!yyerrstatus) #define YYBACKUP(Token, Value) \ do \ if (yychar == YYEMPTY && yylen == 1) \ { \ yychar = (Token); \ yylval = (Value); \ yychar1 = YYTRANSLATE (yychar); \ YYPOPSTACK; \ goto yybackup; \ } \ else \ { \ yyerror ("syntax error: cannot back up"); \ YYERROR; \ } \ while (0) #define YYTERROR 1 #define YYERRCODE 256 /* YYLLOC_DEFAULT -- Compute the default location (before the actions are run). When YYLLOC_DEFAULT is run, CURRENT is set the location of the first token. By default, to implement support for ranges, extend its range to the last symbol. */ #ifndef YYLLOC_DEFAULT # define YYLLOC_DEFAULT(Current, Rhs, N) \ Current.last_line = Rhs[N].last_line; \ Current.last_column = Rhs[N].last_column; #endif /* YYLEX -- calling `yylex' with the right arguments. */ #if YYPURE # if YYLSP_NEEDED # ifdef YYLEX_PARAM # define YYLEX yylex (&yylval, &yylloc, YYLEX_PARAM) # else # define YYLEX yylex (&yylval, &yylloc) # endif # else /* !YYLSP_NEEDED */ # ifdef YYLEX_PARAM # define YYLEX yylex (&yylval, YYLEX_PARAM) # else # define YYLEX yylex (&yylval) # endif # endif /* !YYLSP_NEEDED */ #else /* !YYPURE */ # define YYLEX yylex () #endif /* !YYPURE */ /* Enable debugging if requested. */ #if YYDEBUG # ifndef YYFPRINTF # include /* INFRINGES ON USER NAME SPACE */ # define YYFPRINTF fprintf # endif # define YYDPRINTF(Args) \ do { \ if (yydebug) \ YYFPRINTF Args; \ } while (0) /* Nonzero means print parse trace. It is left uninitialized so that multiple parsers can coexist. */ int yydebug; #else /* !YYDEBUG */ # define YYDPRINTF(Args) #endif /* !YYDEBUG */ /* YYINITDEPTH -- initial size of the parser's stacks. */ #ifndef YYINITDEPTH # define YYINITDEPTH 200 #endif /* YYMAXDEPTH -- maximum size the stacks can grow to (effective only if the built-in stack extension method is used). Do not make this value too large; the results are undefined if SIZE_MAX < YYSTACK_BYTES (YYMAXDEPTH) evaluated with infinite-precision integer arithmetic. */ #if YYMAXDEPTH == 0 # undef YYMAXDEPTH #endif #ifndef YYMAXDEPTH # define YYMAXDEPTH 10000 #endif #ifdef YYERROR_VERBOSE # ifndef yystrlen # if defined (__GLIBC__) && defined (_STRING_H) # define yystrlen strlen # else /* Return the length of YYSTR. */ static YYSIZE_T # if defined (__STDC__) || defined (__cplusplus) yystrlen (const char *yystr) # else yystrlen (yystr) const char *yystr; # endif { register const char *yys = yystr; while (*yys++ != '\0') continue; return yys - yystr - 1; } # endif # endif # ifndef yystpcpy # if defined (__GLIBC__) && defined (_STRING_H) && defined (_GNU_SOURCE) # define yystpcpy stpcpy # else /* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in YYDEST. */ static char * # if defined (__STDC__) || defined (__cplusplus) yystpcpy (char *yydest, const char *yysrc) # else yystpcpy (yydest, yysrc) char *yydest; const char *yysrc; # endif { register char *yyd = yydest; register const char *yys = yysrc; while ((*yyd++ = *yys++) != '\0') continue; return yyd - 1; } # endif # endif #endif #line 315 "/usr/share/bison/bison.simple" /* The user can define YYPARSE_PARAM as the name of an argument to be passed into yyparse. The argument should have type void *. It should actually point to an object. Grammar actions can access the variable by casting it to the proper pointer type. */ #ifdef YYPARSE_PARAM # if defined (__STDC__) || defined (__cplusplus) # define YYPARSE_PARAM_ARG void *YYPARSE_PARAM # define YYPARSE_PARAM_DECL # else # define YYPARSE_PARAM_ARG YYPARSE_PARAM # define YYPARSE_PARAM_DECL void *YYPARSE_PARAM; # endif #else /* !YYPARSE_PARAM */ # define YYPARSE_PARAM_ARG # define YYPARSE_PARAM_DECL #endif /* !YYPARSE_PARAM */ /* Prevent warning if -Wstrict-prototypes. */ #ifdef __GNUC__ # ifdef YYPARSE_PARAM int yyparse (void *); # else int yyparse (void); # endif #endif /* YY_DECL_VARIABLES -- depending whether we use a pure parser, variables are global, or local to YYPARSE. */ #define YY_DECL_NON_LSP_VARIABLES \ /* The lookahead symbol. */ \ int yychar; \ \ /* The semantic value of the lookahead symbol. */ \ YYSTYPE yylval; \ \ /* Number of parse errors so far. */ \ int yynerrs; #if YYLSP_NEEDED # define YY_DECL_VARIABLES \ YY_DECL_NON_LSP_VARIABLES \ \ /* Location data for the lookahead symbol. */ \ YYLTYPE yylloc; #else # define YY_DECL_VARIABLES \ YY_DECL_NON_LSP_VARIABLES #endif /* If nonreentrant, generate the variables here. */ #if !YYPURE YY_DECL_VARIABLES #endif /* !YYPURE */ int yyparse (YYPARSE_PARAM_ARG) YYPARSE_PARAM_DECL { /* If reentrant, generate the variables here. */ #if YYPURE YY_DECL_VARIABLES #endif /* !YYPURE */ register int yystate; register int yyn; int yyresult; /* Number of tokens to shift before error messages enabled. */ int yyerrstatus; /* Lookahead token as an internal (translated) token number. */ int yychar1 = 0; /* Three stacks and their tools: `yyss': related to states, `yyvs': related to semantic values, `yyls': related to locations. Refer to the stacks thru separate pointers, to allow yyoverflow to reallocate them elsewhere. */ /* The state stack. */ short yyssa[YYINITDEPTH]; short *yyss = yyssa; register short *yyssp; /* The semantic value stack. */ YYSTYPE yyvsa[YYINITDEPTH]; YYSTYPE *yyvs = yyvsa; register YYSTYPE *yyvsp; #if YYLSP_NEEDED /* The location stack. */ YYLTYPE yylsa[YYINITDEPTH]; YYLTYPE *yyls = yylsa; YYLTYPE *yylsp; #endif #if YYLSP_NEEDED # define YYPOPSTACK (yyvsp--, yyssp--, yylsp--) #else # define YYPOPSTACK (yyvsp--, yyssp--) #endif YYSIZE_T yystacksize = YYINITDEPTH; /* The variables used to return semantic value and location from the action routines. */ YYSTYPE yyval; #if YYLSP_NEEDED YYLTYPE yyloc; #endif /* When reducing, the number of symbols on the RHS of the reduced rule. */ int yylen; YYDPRINTF ((stderr, "Starting parse\n")); yystate = 0; yyerrstatus = 0; yynerrs = 0; yychar = YYEMPTY; /* Cause a token to be read. */ /* Initialize stack pointers. Waste one element of value and location stack so that they stay on the same level as the state stack. The wasted elements are never initialized. */ yyssp = yyss; yyvsp = yyvs; #if YYLSP_NEEDED yylsp = yyls; #endif goto yysetstate; /*------------------------------------------------------------. | yynewstate -- Push a new state, which is found in yystate. | `------------------------------------------------------------*/ yynewstate: /* In all cases, when you get here, the value and location stacks have just been pushed. so pushing a state here evens the stacks. */ yyssp++; yysetstate: *yyssp = yystate; if (yyssp >= yyss + yystacksize - 1) { /* Get the current used size of the three stacks, in elements. */ YYSIZE_T yysize = yyssp - yyss + 1; { /* Give user a chance to reallocate the stack. Use copies of these so that the &'s don't force the real ones into memory. */ YYSTYPE *yyvs1 = yyvs; short *yyss1 = yyss; /* Each stack pointer address is followed by the size of the data in use in that stack, in bytes. */ # if YYLSP_NEEDED YYLTYPE *yyls1 = yyls; /* This used to be a conditional around just the two extra args, but that might be undefined if yyoverflow is a macro. */ yyoverflow ("parser stack overflow", &yyss1, yysize * sizeof (*yyssp), &yyvs1, yysize * sizeof (*yyvsp), &yyls1, yysize * sizeof (*yylsp), &yystacksize); yyls = yyls1; # else yyoverflow ("parser stack overflow", &yyss1, yysize * sizeof (*yyssp), &yyvs1, yysize * sizeof (*yyvsp), &yystacksize); # endif yyss = yyss1; yyvs = yyvs1; } yyssp = yyss + yysize - 1; yyvsp = yyvs + yysize - 1; #if YYLSP_NEEDED yylsp = yyls + yysize - 1; #endif YYDPRINTF ((stderr, "Stack size increased to %lu\n", (unsigned long int) yystacksize)); if (yyssp >= yyss + yystacksize - 1) YYABORT; } YYDPRINTF ((stderr, "Entering state %d\n", yystate)); goto yybackup; /*-----------. | yybackup. | `-----------*/ yybackup: /* Do appropriate processing given the current state. */ /* Read a lookahead token if we need one and don't already have one. */ /* yyresume: */ /* First try to decide what to do without reference to lookahead token. */ yyn = yypact[yystate]; if (yyn == YYFLAG) goto yydefault; /* Not known => get a lookahead token if don't already have one. */ /* yychar is either YYEMPTY or YYEOF or a valid token in external form. */ if (yychar == YYEMPTY) { YYDPRINTF ((stderr, "Reading a token: ")); yychar = YYLEX; } /* Convert token to internal form (in yychar1) for indexing tables with */ if (yychar <= 0) /* This means end of input. */ { yychar1 = 0; yychar = YYEOF; /* Don't call YYLEX any more */ YYDPRINTF ((stderr, "Now at end of input.\n")); } else { yychar1 = YYTRANSLATE (yychar); #if YYDEBUG /* We have to keep this `#if YYDEBUG', since we use variables which are defined only if `YYDEBUG' is set. */ if (yydebug) { YYFPRINTF (stderr, "Next token is %d (%s", yychar, yytname[yychar1]); /* Give the individual parser a way to print the precise meaning of a token, for further debugging info. */ # ifdef YYPRINT YYPRINT (stderr, yychar, yylval); # endif YYFPRINTF (stderr, ")\n"); } #endif } yyn += yychar1; if (yyn < 0 || yyn > YYLAST || yycheck[yyn] != yychar1) goto yydefault; yyn = yytable[yyn]; /* yyn is what to do for this token type in this state. Negative => reduce, -yyn is rule number. Positive => shift, yyn is new state. New state is final state => don't bother to shift, just return success. 0, or most negative number => error. */ if (yyn < 0) { if (yyn == YYFLAG) goto yyerrlab; yyn = -yyn; goto yyreduce; } else if (yyn == 0) goto yyerrlab; if (yyn == YYFINAL) YYACCEPT; /* Shift the lookahead token. */ YYDPRINTF ((stderr, "Shifting token %d (%s), ", yychar, yytname[yychar1])); /* Discard the token being shifted unless it is eof. */ if (yychar != YYEOF) yychar = YYEMPTY; *++yyvsp = yylval; #if YYLSP_NEEDED *++yylsp = yylloc; #endif /* Count tokens shifted since error; after three, turn off error status. */ if (yyerrstatus) yyerrstatus--; yystate = yyn; goto yynewstate; /*-----------------------------------------------------------. | yydefault -- do the default action for the current state. | `-----------------------------------------------------------*/ yydefault: yyn = yydefact[yystate]; if (yyn == 0) goto yyerrlab; goto yyreduce; /*-----------------------------. | yyreduce -- Do a reduction. | `-----------------------------*/ yyreduce: /* yyn is the number of a rule to reduce with. */ yylen = yyr2[yyn]; /* If YYLEN is nonzero, implement the default value of the action: `$$ = $1'. Otherwise, the following line sets YYVAL to the semantic value of the lookahead token. This behavior is undocumented and Bison users should not rely upon it. Assigning to YYVAL unconditionally makes the parser a bit smaller, and it avoids a GCC warning that YYVAL may be used uninitialized. */ yyval = yyvsp[1-yylen]; #if YYLSP_NEEDED /* Similarly for the default location. Let the user run additional commands if for instance locations are ranges. */ yyloc = yylsp[1-yylen]; YYLLOC_DEFAULT (yyloc, (yylsp - yylen), yylen); #endif #if YYDEBUG /* We have to keep this `#if YYDEBUG', since we use variables which are defined only if `YYDEBUG' is set. */ if (yydebug) { int yyi; YYFPRINTF (stderr, "Reducing via rule %d (line %d), ", yyn, yyrline[yyn]); /* Print the symbols being reduced, and their result. */ for (yyi = yyprhs[yyn]; yyrhs[yyi] > 0; yyi++) YYFPRINTF (stderr, "%s ", yytname[yyrhs[yyi]]); YYFPRINTF (stderr, " -> %s\n", yytname[yyr1[yyn]]); } #endif switch (yyn) { case 1: #line 312 "c-parse.y" { if (pedantic) pedwarn ("ISO C forbids an empty source file"); ; break;} case 3: #line 323 "c-parse.y" {yyval.ttype = NULL_TREE; ; break;} case 5: #line 324 "c-parse.y" {yyval.ttype = NULL_TREE; ggc_collect(); ; break;} case 10: #line 332 "c-parse.y" { RESTORE_EXT_FLAGS (yyvsp[-1].itype); ; break;} case 11: #line 337 "c-parse.y" { if (pedantic) error ("ISO C forbids data definition with no type or storage class"); else warning ("data definition has no type or storage class"); POP_DECLSPEC_STACK; ; break;} case 12: #line 344 "c-parse.y" { POP_DECLSPEC_STACK; ; break;} case 13: #line 346 "c-parse.y" { POP_DECLSPEC_STACK; ; break;} case 14: #line 348 "c-parse.y" { shadow_tag (yyvsp[-1].ttype); ; break;} case 17: #line 352 "c-parse.y" { if (pedantic) pedwarn ("ISO C does not allow extra `;' outside of a function"); ; break;} case 18: #line 358 "c-parse.y" { if (! start_function (current_declspecs, yyvsp[0].ttype, all_prefix_attributes)) YYERROR1; ; break;} case 19: #line 363 "c-parse.y" { DECL_SOURCE_LOCATION (current_function_decl) = yyvsp[0].location; store_parm_decls (); ; break;} case 20: #line 366 "c-parse.y" { finish_function (); POP_DECLSPEC_STACK; ; break;} case 21: #line 369 "c-parse.y" { POP_DECLSPEC_STACK; ; break;} case 22: #line 371 "c-parse.y" { if (! start_function (current_declspecs, yyvsp[0].ttype, all_prefix_attributes)) YYERROR1; ; break;} case 23: #line 376 "c-parse.y" { DECL_SOURCE_LOCATION (current_function_decl) = yyvsp[0].location; store_parm_decls (); ; break;} case 24: #line 379 "c-parse.y" { finish_function (); POP_DECLSPEC_STACK; ; break;} case 25: #line 382 "c-parse.y" { POP_DECLSPEC_STACK; ; break;} case 26: #line 384 "c-parse.y" { if (! start_function (NULL_TREE, yyvsp[0].ttype, all_prefix_attributes)) YYERROR1; ; break;} case 27: #line 389 "c-parse.y" { DECL_SOURCE_LOCATION (current_function_decl) = yyvsp[0].location; store_parm_decls (); ; break;} case 28: #line 392 "c-parse.y" { finish_function (); POP_DECLSPEC_STACK; ; break;} case 29: #line 395 "c-parse.y" { POP_DECLSPEC_STACK; ; break;} case 32: #line 404 "c-parse.y" { yyval.code = ADDR_EXPR; ; break;} case 33: #line 406 "c-parse.y" { yyval.code = NEGATE_EXPR; ; break;} case 34: #line 408 "c-parse.y" { yyval.code = CONVERT_EXPR; if (warn_traditional && !in_system_header) warning ("traditional C rejects the unary plus operator"); ; break;} case 35: #line 413 "c-parse.y" { yyval.code = PREINCREMENT_EXPR; ; break;} case 36: #line 415 "c-parse.y" { yyval.code = PREDECREMENT_EXPR; ; break;} case 37: #line 417 "c-parse.y" { yyval.code = BIT_NOT_EXPR; ; break;} case 38: #line 419 "c-parse.y" { yyval.code = TRUTH_NOT_EXPR; ; break;} case 39: #line 423 "c-parse.y" { yyval.ttype = build_compound_expr (yyvsp[0].ttype); ; break;} case 40: #line 428 "c-parse.y" { yyval.ttype = NULL_TREE; ; break;} case 42: #line 434 "c-parse.y" { yyval.ttype = build_tree_list (NULL_TREE, yyvsp[0].ttype); ; break;} case 43: #line 436 "c-parse.y" { chainon (yyvsp[-2].ttype, build_tree_list (NULL_TREE, yyvsp[0].ttype)); ; break;} case 45: #line 442 "c-parse.y" { yyval.ttype = build_indirect_ref (yyvsp[0].ttype, "unary *"); ; break;} case 46: #line 445 "c-parse.y" { yyval.ttype = yyvsp[0].ttype; RESTORE_EXT_FLAGS (yyvsp[-1].itype); ; break;} case 47: #line 448 "c-parse.y" { yyval.ttype = build_unary_op (yyvsp[-1].code, yyvsp[0].ttype, 0); overflow_warning (yyval.ttype); ; break;} case 48: #line 452 "c-parse.y" { yyval.ttype = finish_label_address_expr (yyvsp[0].ttype); ; break;} case 49: #line 454 "c-parse.y" { skip_evaluation--; if (TREE_CODE (yyvsp[0].ttype) == COMPONENT_REF && DECL_C_BIT_FIELD (TREE_OPERAND (yyvsp[0].ttype, 1))) error ("`sizeof' applied to a bit-field"); yyval.ttype = c_sizeof (TREE_TYPE (yyvsp[0].ttype)); ; break;} case 50: #line 460 "c-parse.y" { skip_evaluation--; yyval.ttype = c_sizeof (groktypename (yyvsp[-1].ttype)); ; break;} case 51: #line 463 "c-parse.y" { skip_evaluation--; yyval.ttype = c_alignof_expr (yyvsp[0].ttype); ; break;} case 52: #line 466 "c-parse.y" { skip_evaluation--; yyval.ttype = c_alignof (groktypename (yyvsp[-1].ttype)); ; break;} case 53: #line 469 "c-parse.y" { yyval.ttype = build_unary_op (REALPART_EXPR, yyvsp[0].ttype, 0); ; break;} case 54: #line 471 "c-parse.y" { yyval.ttype = build_unary_op (IMAGPART_EXPR, yyvsp[0].ttype, 0); ; break;} case 55: #line 475 "c-parse.y" { skip_evaluation++; ; break;} case 56: #line 479 "c-parse.y" { skip_evaluation++; ; break;} case 57: #line 483 "c-parse.y" { skip_evaluation++; ; break;} case 59: #line 489 "c-parse.y" { yyval.ttype = c_cast_expr (yyvsp[-2].ttype, yyvsp[0].ttype); ; break;} case 61: #line 495 "c-parse.y" { yyval.ttype = parser_build_binary_op (yyvsp[-1].code, yyvsp[-2].ttype, yyvsp[0].ttype); ; break;} case 62: #line 497 "c-parse.y" { yyval.ttype = parser_build_binary_op (yyvsp[-1].code, yyvsp[-2].ttype, yyvsp[0].ttype); ; break;} case 63: #line 499 "c-parse.y" { yyval.ttype = parser_build_binary_op (yyvsp[-1].code, yyvsp[-2].ttype, yyvsp[0].ttype); ; break;} case 64: #line 501 "c-parse.y" { yyval.ttype = parser_build_binary_op (yyvsp[-1].code, yyvsp[-2].ttype, yyvsp[0].ttype); ; break;} case 65: #line 503 "c-parse.y" { yyval.ttype = parser_build_binary_op (yyvsp[-1].code, yyvsp[-2].ttype, yyvsp[0].ttype); ; break;} case 66: #line 505 "c-parse.y" { yyval.ttype = parser_build_binary_op (yyvsp[-1].code, yyvsp[-2].ttype, yyvsp[0].ttype); ; break;} case 67: #line 507 "c-parse.y" { yyval.ttype = parser_build_binary_op (yyvsp[-1].code, yyvsp[-2].ttype, yyvsp[0].ttype); ; break;} case 68: #line 509 "c-parse.y" { yyval.ttype = parser_build_binary_op (yyvsp[-1].code, yyvsp[-2].ttype, yyvsp[0].ttype); ; break;} case 69: #line 511 "c-parse.y" { yyval.ttype = parser_build_binary_op (yyvsp[-1].code, yyvsp[-2].ttype, yyvsp[0].ttype); ; break;} case 70: #line 513 "c-parse.y" { yyval.ttype = parser_build_binary_op (yyvsp[-1].code, yyvsp[-2].ttype, yyvsp[0].ttype); ; break;} case 71: #line 515 "c-parse.y" { yyval.ttype = parser_build_binary_op (yyvsp[-1].code, yyvsp[-2].ttype, yyvsp[0].ttype); ; break;} case 72: #line 517 "c-parse.y" { yyval.ttype = parser_build_binary_op (yyvsp[-1].code, yyvsp[-2].ttype, yyvsp[0].ttype); ; break;} case 73: #line 519 "c-parse.y" { yyvsp[-1].ttype = lang_hooks.truthvalue_conversion (default_conversion (yyvsp[-1].ttype)); skip_evaluation += yyvsp[-1].ttype == truthvalue_false_node; ; break;} case 74: #line 523 "c-parse.y" { skip_evaluation -= yyvsp[-3].ttype == truthvalue_false_node; yyval.ttype = parser_build_binary_op (TRUTH_ANDIF_EXPR, yyvsp[-3].ttype, yyvsp[0].ttype); ; break;} case 75: #line 526 "c-parse.y" { yyvsp[-1].ttype = lang_hooks.truthvalue_conversion (default_conversion (yyvsp[-1].ttype)); skip_evaluation += yyvsp[-1].ttype == truthvalue_true_node; ; break;} case 76: #line 530 "c-parse.y" { skip_evaluation -= yyvsp[-3].ttype == truthvalue_true_node; yyval.ttype = parser_build_binary_op (TRUTH_ORIF_EXPR, yyvsp[-3].ttype, yyvsp[0].ttype); ; break;} case 77: #line 533 "c-parse.y" { yyvsp[-1].ttype = lang_hooks.truthvalue_conversion (default_conversion (yyvsp[-1].ttype)); skip_evaluation += yyvsp[-1].ttype == truthvalue_false_node; ; break;} case 78: #line 537 "c-parse.y" { skip_evaluation += ((yyvsp[-4].ttype == truthvalue_true_node) - (yyvsp[-4].ttype == truthvalue_false_node)); ; break;} case 79: #line 540 "c-parse.y" { skip_evaluation -= yyvsp[-6].ttype == truthvalue_true_node; yyval.ttype = build_conditional_expr (yyvsp[-6].ttype, yyvsp[-3].ttype, yyvsp[0].ttype); ; break;} case 80: #line 543 "c-parse.y" { if (pedantic) pedwarn ("ISO C forbids omitting the middle term of a ?: expression"); /* Make sure first operand is calculated only once. */ yyvsp[0].ttype = save_expr (default_conversion (yyvsp[-1].ttype)); yyvsp[-1].ttype = lang_hooks.truthvalue_conversion (yyvsp[0].ttype); skip_evaluation += yyvsp[-1].ttype == truthvalue_true_node; ; break;} case 81: #line 550 "c-parse.y" { skip_evaluation -= yyvsp[-4].ttype == truthvalue_true_node; yyval.ttype = build_conditional_expr (yyvsp[-4].ttype, yyvsp[-3].ttype, yyvsp[0].ttype); ; break;} case 82: #line 553 "c-parse.y" { char class; yyval.ttype = build_modify_expr (yyvsp[-2].ttype, NOP_EXPR, yyvsp[0].ttype); class = TREE_CODE_CLASS (TREE_CODE (yyval.ttype)); if (IS_EXPR_CODE_CLASS (class)) C_SET_EXP_ORIGINAL_CODE (yyval.ttype, MODIFY_EXPR); ; break;} case 83: #line 560 "c-parse.y" { char class; yyval.ttype = build_modify_expr (yyvsp[-2].ttype, yyvsp[-1].code, yyvsp[0].ttype); /* This inhibits warnings in c_common_truthvalue_conversion. */ class = TREE_CODE_CLASS (TREE_CODE (yyval.ttype)); if (IS_EXPR_CODE_CLASS (class)) C_SET_EXP_ORIGINAL_CODE (yyval.ttype, ERROR_MARK); ; break;} case 84: #line 572 "c-parse.y" { if (yychar == YYEMPTY) yychar = YYLEX; yyval.ttype = build_external_ref (yyvsp[0].ttype, yychar == '('); ; break;} case 87: #line 580 "c-parse.y" { yyval.ttype = fname_decl (C_RID_CODE (yyval.ttype), yyval.ttype); ; break;} case 88: #line 582 "c-parse.y" { start_init (NULL_TREE, NULL, 0); yyvsp[-2].ttype = groktypename (yyvsp[-2].ttype); really_start_incremental_init (yyvsp[-2].ttype); ; break;} case 89: #line 586 "c-parse.y" { tree constructor = pop_init_level (0); tree type = yyvsp[-5].ttype; finish_init (); if (pedantic && ! flag_isoc99) pedwarn ("ISO C90 forbids compound literals"); yyval.ttype = build_compound_literal (type, constructor); ; break;} case 90: #line 595 "c-parse.y" { char class = TREE_CODE_CLASS (TREE_CODE (yyvsp[-1].ttype)); if (IS_EXPR_CODE_CLASS (class)) C_SET_EXP_ORIGINAL_CODE (yyvsp[-1].ttype, ERROR_MARK); yyval.ttype = yyvsp[-1].ttype; ; break;} case 91: #line 600 "c-parse.y" { yyval.ttype = error_mark_node; ; break;} case 92: #line 602 "c-parse.y" { if (pedantic) pedwarn ("ISO C forbids braced-groups within expressions"); yyval.ttype = c_finish_stmt_expr (yyvsp[-2].ttype); ; break;} case 93: #line 607 "c-parse.y" { c_finish_stmt_expr (yyvsp[-2].ttype); yyval.ttype = error_mark_node; ; break;} case 94: #line 611 "c-parse.y" { yyval.ttype = build_function_call (yyvsp[-3].ttype, yyvsp[-1].ttype); ; break;} case 95: #line 613 "c-parse.y" { yyval.ttype = build_va_arg (yyvsp[-3].ttype, groktypename (yyvsp[-1].ttype)); ; break;} case 96: #line 616 "c-parse.y" { yyval.ttype = build_offsetof (groktypename (yyvsp[-3].ttype), yyvsp[-1].ttype); ; break;} case 97: #line 618 "c-parse.y" { yyval.ttype = error_mark_node; ; break;} case 98: #line 621 "c-parse.y" { tree c; c = fold (yyvsp[-5].ttype); STRIP_NOPS (c); if (TREE_CODE (c) != INTEGER_CST) error ("first argument to __builtin_choose_expr not" " a constant"); yyval.ttype = integer_zerop (c) ? yyvsp[-1].ttype : yyvsp[-3].ttype; ; break;} case 99: #line 632 "c-parse.y" { yyval.ttype = error_mark_node; ; break;} case 100: #line 634 "c-parse.y" { tree e1, e2; e1 = TYPE_MAIN_VARIANT (groktypename (yyvsp[-3].ttype)); e2 = TYPE_MAIN_VARIANT (groktypename (yyvsp[-1].ttype)); yyval.ttype = comptypes (e1, e2) ? build_int_2 (1, 0) : build_int_2 (0, 0); ; break;} case 101: #line 644 "c-parse.y" { yyval.ttype = error_mark_node; ; break;} case 102: #line 646 "c-parse.y" { yyval.ttype = build_array_ref (yyvsp[-3].ttype, yyvsp[-1].ttype); ; break;} case 103: #line 648 "c-parse.y" { yyval.ttype = build_component_ref (yyvsp[-2].ttype, yyvsp[0].ttype); ; break;} case 104: #line 650 "c-parse.y" { tree expr = build_indirect_ref (yyvsp[-2].ttype, "->"); yyval.ttype = build_component_ref (expr, yyvsp[0].ttype); ; break;} case 105: #line 655 "c-parse.y" { yyval.ttype = build_unary_op (POSTINCREMENT_EXPR, yyvsp[-1].ttype, 0); ; break;} case 106: #line 657 "c-parse.y" { yyval.ttype = build_unary_op (POSTDECREMENT_EXPR, yyvsp[-1].ttype, 0); ; break;} case 107: #line 668 "c-parse.y" { yyval.ttype = tree_cons (yyvsp[0].ttype, NULL_TREE, NULL_TREE); ; break;} case 108: #line 670 "c-parse.y" { yyval.ttype = tree_cons (yyvsp[0].ttype, NULL_TREE, yyvsp[-2].ttype); ; break;} case 109: #line 672 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[-1].ttype, yyvsp[-3].ttype); ; break;} case 112: #line 685 "c-parse.y" { ; break;} case 117: #line 701 "c-parse.y" { POP_DECLSPEC_STACK; ; break;} case 118: #line 703 "c-parse.y" { POP_DECLSPEC_STACK; ; break;} case 119: #line 705 "c-parse.y" { shadow_tag_warned (yyvsp[-1].ttype, 1); pedwarn ("empty declaration"); ; break;} case 120: #line 708 "c-parse.y" { pedwarn ("empty declaration"); ; break;} case 121: #line 717 "c-parse.y" { ; break;} case 122: #line 725 "c-parse.y" { pending_xref_error (); PUSH_DECLSPEC_STACK; split_specs_attrs (yyvsp[0].ttype, ¤t_declspecs, &prefix_attributes); all_prefix_attributes = prefix_attributes; ; break;} case 123: #line 736 "c-parse.y" { all_prefix_attributes = chainon (yyvsp[0].ttype, prefix_attributes); ; break;} case 124: #line 741 "c-parse.y" { POP_DECLSPEC_STACK; ; break;} case 125: #line 743 "c-parse.y" { POP_DECLSPEC_STACK; ; break;} case 126: #line 745 "c-parse.y" { POP_DECLSPEC_STACK; ; break;} case 127: #line 747 "c-parse.y" { POP_DECLSPEC_STACK; ; break;} case 128: #line 749 "c-parse.y" { shadow_tag (yyvsp[-1].ttype); ; break;} case 129: #line 751 "c-parse.y" { RESTORE_EXT_FLAGS (yyvsp[-1].itype); ; break;} case 130: #line 808 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, NULL_TREE); TREE_STATIC (yyval.ttype) = 1; ; break;} case 131: #line 811 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 132: #line 814 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 133: #line 820 "c-parse.y" { yyval.ttype = tree_cons (yyvsp[0].ttype, NULL_TREE, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = TREE_STATIC (yyvsp[-1].ttype); ; break;} case 134: #line 826 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 135: #line 829 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 136: #line 835 "c-parse.y" { yyval.ttype = tree_cons (yyvsp[0].ttype, NULL_TREE, NULL_TREE); TREE_STATIC (yyval.ttype) = 0; ; break;} case 137: #line 838 "c-parse.y" { yyval.ttype = tree_cons (yyvsp[0].ttype, NULL_TREE, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = TREE_STATIC (yyvsp[-1].ttype); ; break;} case 138: #line 844 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, NULL_TREE); TREE_STATIC (yyval.ttype) = 1; ; break;} case 139: #line 847 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 140: #line 850 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 141: #line 853 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 142: #line 856 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 143: #line 859 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 144: #line 862 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 145: #line 868 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, NULL_TREE); TREE_STATIC (yyval.ttype) = 1; ; break;} case 146: #line 871 "c-parse.y" { yyval.ttype = tree_cons (yyvsp[0].ttype, NULL_TREE, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = TREE_STATIC (yyvsp[-1].ttype); ; break;} case 147: #line 874 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 148: #line 877 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 149: #line 880 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 150: #line 883 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 151: #line 889 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 152: #line 892 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 153: #line 895 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 154: #line 898 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 155: #line 901 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 156: #line 904 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 157: #line 910 "c-parse.y" { yyval.ttype = tree_cons (yyvsp[0].ttype, NULL_TREE, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = TREE_STATIC (yyvsp[-1].ttype); ; break;} case 158: #line 913 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 159: #line 916 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 160: #line 919 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 161: #line 922 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 162: #line 928 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, NULL_TREE); TREE_STATIC (yyval.ttype) = 0; ; break;} case 163: #line 931 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 164: #line 934 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 165: #line 937 "c-parse.y" { if (extra_warnings && TREE_STATIC (yyvsp[-1].ttype)) warning ("`%s' is not at beginning of declaration", IDENTIFIER_POINTER (yyvsp[0].ttype)); yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = TREE_STATIC (yyvsp[-1].ttype); ; break;} case 166: #line 943 "c-parse.y" { if (extra_warnings && TREE_STATIC (yyvsp[-1].ttype)) warning ("`%s' is not at beginning of declaration", IDENTIFIER_POINTER (yyvsp[0].ttype)); yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = TREE_STATIC (yyvsp[-1].ttype); ; break;} case 167: #line 949 "c-parse.y" { if (extra_warnings && TREE_STATIC (yyvsp[-1].ttype)) warning ("`%s' is not at beginning of declaration", IDENTIFIER_POINTER (yyvsp[0].ttype)); yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = TREE_STATIC (yyvsp[-1].ttype); ; break;} case 168: #line 955 "c-parse.y" { if (extra_warnings && TREE_STATIC (yyvsp[-1].ttype)) warning ("`%s' is not at beginning of declaration", IDENTIFIER_POINTER (yyvsp[0].ttype)); yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = TREE_STATIC (yyvsp[-1].ttype); ; break;} case 169: #line 964 "c-parse.y" { yyval.ttype = tree_cons (yyvsp[0].ttype, NULL_TREE, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = TREE_STATIC (yyvsp[-1].ttype); ; break;} case 170: #line 970 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 171: #line 973 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 172: #line 976 "c-parse.y" { if (extra_warnings && TREE_STATIC (yyvsp[-1].ttype)) warning ("`%s' is not at beginning of declaration", IDENTIFIER_POINTER (yyvsp[0].ttype)); yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = TREE_STATIC (yyvsp[-1].ttype); ; break;} case 173: #line 982 "c-parse.y" { if (extra_warnings && TREE_STATIC (yyvsp[-1].ttype)) warning ("`%s' is not at beginning of declaration", IDENTIFIER_POINTER (yyvsp[0].ttype)); yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = TREE_STATIC (yyvsp[-1].ttype); ; break;} case 174: #line 988 "c-parse.y" { if (extra_warnings && TREE_STATIC (yyvsp[-1].ttype)) warning ("`%s' is not at beginning of declaration", IDENTIFIER_POINTER (yyvsp[0].ttype)); yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = TREE_STATIC (yyvsp[-1].ttype); ; break;} case 175: #line 994 "c-parse.y" { if (extra_warnings && TREE_STATIC (yyvsp[-1].ttype)) warning ("`%s' is not at beginning of declaration", IDENTIFIER_POINTER (yyvsp[0].ttype)); yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = TREE_STATIC (yyvsp[-1].ttype); ; break;} case 176: #line 1003 "c-parse.y" { yyval.ttype = tree_cons (yyvsp[0].ttype, NULL_TREE, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = TREE_STATIC (yyvsp[-1].ttype); ; break;} case 177: #line 1009 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 178: #line 1012 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 179: #line 1015 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 180: #line 1018 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 181: #line 1021 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 182: #line 1024 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 183: #line 1027 "c-parse.y" { if (extra_warnings && TREE_STATIC (yyvsp[-1].ttype)) warning ("`%s' is not at beginning of declaration", IDENTIFIER_POINTER (yyvsp[0].ttype)); yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = TREE_STATIC (yyvsp[-1].ttype); ; break;} case 184: #line 1033 "c-parse.y" { if (extra_warnings && TREE_STATIC (yyvsp[-1].ttype)) warning ("`%s' is not at beginning of declaration", IDENTIFIER_POINTER (yyvsp[0].ttype)); yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = TREE_STATIC (yyvsp[-1].ttype); ; break;} case 185: #line 1039 "c-parse.y" { if (extra_warnings && TREE_STATIC (yyvsp[-1].ttype)) warning ("`%s' is not at beginning of declaration", IDENTIFIER_POINTER (yyvsp[0].ttype)); yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = TREE_STATIC (yyvsp[-1].ttype); ; break;} case 186: #line 1045 "c-parse.y" { if (extra_warnings && TREE_STATIC (yyvsp[-1].ttype)) warning ("`%s' is not at beginning of declaration", IDENTIFIER_POINTER (yyvsp[0].ttype)); yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = TREE_STATIC (yyvsp[-1].ttype); ; break;} case 187: #line 1054 "c-parse.y" { yyval.ttype = tree_cons (yyvsp[0].ttype, NULL_TREE, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = TREE_STATIC (yyvsp[-1].ttype); ; break;} case 188: #line 1057 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 189: #line 1060 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 190: #line 1063 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 191: #line 1066 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 192: #line 1072 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 193: #line 1075 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 194: #line 1078 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 195: #line 1081 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 196: #line 1084 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 197: #line 1087 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 198: #line 1090 "c-parse.y" { if (extra_warnings && TREE_STATIC (yyvsp[-1].ttype)) warning ("`%s' is not at beginning of declaration", IDENTIFIER_POINTER (yyvsp[0].ttype)); yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = TREE_STATIC (yyvsp[-1].ttype); ; break;} case 199: #line 1096 "c-parse.y" { if (extra_warnings && TREE_STATIC (yyvsp[-1].ttype)) warning ("`%s' is not at beginning of declaration", IDENTIFIER_POINTER (yyvsp[0].ttype)); yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = TREE_STATIC (yyvsp[-1].ttype); ; break;} case 200: #line 1102 "c-parse.y" { if (extra_warnings && TREE_STATIC (yyvsp[-1].ttype)) warning ("`%s' is not at beginning of declaration", IDENTIFIER_POINTER (yyvsp[0].ttype)); yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = TREE_STATIC (yyvsp[-1].ttype); ; break;} case 201: #line 1108 "c-parse.y" { if (extra_warnings && TREE_STATIC (yyvsp[-1].ttype)) warning ("`%s' is not at beginning of declaration", IDENTIFIER_POINTER (yyvsp[0].ttype)); yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = TREE_STATIC (yyvsp[-1].ttype); ; break;} case 202: #line 1117 "c-parse.y" { yyval.ttype = tree_cons (yyvsp[0].ttype, NULL_TREE, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = TREE_STATIC (yyvsp[-1].ttype); ; break;} case 203: #line 1120 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 204: #line 1123 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 205: #line 1126 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 206: #line 1129 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-1].ttype); TREE_STATIC (yyval.ttype) = 1; ; break;} case 263: #line 1217 "c-parse.y" { yyval.ttype = NULL_TREE; ; break;} case 264: #line 1219 "c-parse.y" { yyval.ttype = yyvsp[0].ttype; ; break;} case 268: #line 1254 "c-parse.y" { OBJC_NEED_RAW_IDENTIFIER (1); ; break;} case 271: #line 1264 "c-parse.y" { /* For a typedef name, record the meaning, not the name. In case of `foo foo, bar;'. */ yyval.ttype = lookup_name (yyvsp[0].ttype); ; break;} case 272: #line 1268 "c-parse.y" { skip_evaluation--; if (TREE_CODE (yyvsp[-1].ttype) == COMPONENT_REF && DECL_C_BIT_FIELD (TREE_OPERAND (yyvsp[-1].ttype, 1))) error ("`typeof' applied to a bit-field"); yyval.ttype = TREE_TYPE (yyvsp[-1].ttype); ; break;} case 273: #line 1274 "c-parse.y" { skip_evaluation--; yyval.ttype = groktypename (yyvsp[-1].ttype); ; break;} case 278: #line 1291 "c-parse.y" { yyval.ttype = start_decl (yyvsp[-3].ttype, current_declspecs, 1, chainon (yyvsp[-1].ttype, all_prefix_attributes)); start_init (yyval.ttype, yyvsp[-2].ttype, global_bindings_p ()); ; break;} case 279: #line 1296 "c-parse.y" { finish_init (); finish_decl (yyvsp[-1].ttype, yyvsp[0].ttype, yyvsp[-4].ttype); ; break;} case 280: #line 1299 "c-parse.y" { tree d = start_decl (yyvsp[-2].ttype, current_declspecs, 0, chainon (yyvsp[0].ttype, all_prefix_attributes)); finish_decl (d, NULL_TREE, yyvsp[-1].ttype); ; break;} case 281: #line 1307 "c-parse.y" { yyval.ttype = start_decl (yyvsp[-3].ttype, current_declspecs, 1, chainon (yyvsp[-1].ttype, all_prefix_attributes)); start_init (yyval.ttype, yyvsp[-2].ttype, global_bindings_p ()); ; break;} case 282: #line 1312 "c-parse.y" { finish_init (); finish_decl (yyvsp[-1].ttype, yyvsp[0].ttype, yyvsp[-4].ttype); ; break;} case 283: #line 1315 "c-parse.y" { tree d = start_decl (yyvsp[-2].ttype, current_declspecs, 0, chainon (yyvsp[0].ttype, all_prefix_attributes)); finish_decl (d, NULL_TREE, yyvsp[-1].ttype); ; break;} case 284: #line 1323 "c-parse.y" { yyval.ttype = NULL_TREE; ; break;} case 285: #line 1325 "c-parse.y" { yyval.ttype = yyvsp[0].ttype; ; break;} case 286: #line 1330 "c-parse.y" { yyval.ttype = yyvsp[0].ttype; ; break;} case 287: #line 1332 "c-parse.y" { yyval.ttype = chainon (yyvsp[-1].ttype, yyvsp[0].ttype); ; break;} case 288: #line 1338 "c-parse.y" { yyval.ttype = yyvsp[-3].ttype; ; break;} case 289: #line 1340 "c-parse.y" {; break;} case 290: #line 1345 "c-parse.y" { yyval.ttype = yyvsp[0].ttype; ; break;} case 291: #line 1347 "c-parse.y" { yyval.ttype = chainon (yyvsp[-2].ttype, yyvsp[0].ttype); ; break;} case 292: #line 1352 "c-parse.y" { yyval.ttype = NULL_TREE; ; break;} case 293: #line 1354 "c-parse.y" { yyval.ttype = build_tree_list (yyvsp[0].ttype, NULL_TREE); ; break;} case 294: #line 1356 "c-parse.y" { yyval.ttype = build_tree_list (yyvsp[-3].ttype, build_tree_list (NULL_TREE, yyvsp[-1].ttype)); ; break;} case 295: #line 1358 "c-parse.y" { yyval.ttype = build_tree_list (yyvsp[-5].ttype, tree_cons (NULL_TREE, yyvsp[-3].ttype, yyvsp[-1].ttype)); ; break;} case 296: #line 1360 "c-parse.y" { yyval.ttype = build_tree_list (yyvsp[-3].ttype, yyvsp[-1].ttype); ; break;} case 304: #line 1383 "c-parse.y" { really_start_incremental_init (NULL_TREE); ; break;} case 305: #line 1385 "c-parse.y" { yyval.ttype = pop_init_level (0); ; break;} case 306: #line 1387 "c-parse.y" { yyval.ttype = error_mark_node; ; break;} case 307: #line 1393 "c-parse.y" { if (pedantic) pedwarn ("ISO C forbids empty initializer braces"); ; break;} case 311: #line 1407 "c-parse.y" { if (pedantic && ! flag_isoc99) pedwarn ("ISO C90 forbids specifying subobject to initialize"); ; break;} case 312: #line 1410 "c-parse.y" { if (pedantic) pedwarn ("obsolete use of designated initializer without `='"); ; break;} case 313: #line 1413 "c-parse.y" { set_init_label (yyvsp[-1].ttype); if (pedantic) pedwarn ("obsolete use of designated initializer with `:'"); ; break;} case 314: #line 1417 "c-parse.y" {; break;} case 316: #line 1423 "c-parse.y" { push_init_level (0); ; break;} case 317: #line 1425 "c-parse.y" { process_init_element (pop_init_level (0)); ; break;} case 318: #line 1427 "c-parse.y" { process_init_element (yyvsp[0].ttype); ; break;} case 322: #line 1438 "c-parse.y" { set_init_label (yyvsp[0].ttype); ; break;} case 323: #line 1440 "c-parse.y" { set_init_index (yyvsp[-3].ttype, yyvsp[-1].ttype); if (pedantic) pedwarn ("ISO C forbids specifying range of elements to initialize"); ; break;} case 324: #line 1444 "c-parse.y" { set_init_index (yyvsp[-1].ttype, NULL_TREE); ; break;} case 325: #line 1449 "c-parse.y" { if (pedantic) pedwarn ("ISO C forbids nested functions"); push_function_context (); if (! start_function (current_declspecs, yyvsp[0].ttype, all_prefix_attributes)) { pop_function_context (); YYERROR1; } ; break;} case 326: #line 1461 "c-parse.y" { tree decl = current_function_decl; DECL_SOURCE_LOCATION (decl) = yyvsp[0].location; store_parm_decls (); ; break;} case 327: #line 1470 "c-parse.y" { tree decl = current_function_decl; add_stmt (yyvsp[0].ttype); finish_function (); pop_function_context (); add_stmt (build_stmt (DECL_EXPR, decl)); ; break;} case 328: #line 1479 "c-parse.y" { if (pedantic) pedwarn ("ISO C forbids nested functions"); push_function_context (); if (! start_function (current_declspecs, yyvsp[0].ttype, all_prefix_attributes)) { pop_function_context (); YYERROR1; } ; break;} case 329: #line 1491 "c-parse.y" { tree decl = current_function_decl; DECL_SOURCE_LOCATION (decl) = yyvsp[0].location; store_parm_decls (); ; break;} case 330: #line 1500 "c-parse.y" { tree decl = current_function_decl; add_stmt (yyvsp[0].ttype); finish_function (); pop_function_context (); add_stmt (build_stmt (DECL_EXPR, decl)); ; break;} case 333: #line 1519 "c-parse.y" { yyval.ttype = yyvsp[-2].ttype ? tree_cons (yyvsp[-2].ttype, yyvsp[-1].ttype, NULL_TREE) : yyvsp[-1].ttype; ; break;} case 334: #line 1521 "c-parse.y" { yyval.ttype = build_nt (CALL_EXPR, yyvsp[-2].ttype, yyvsp[0].ttype, NULL_TREE); ; break;} case 335: #line 1523 "c-parse.y" { yyval.ttype = set_array_declarator_type (yyvsp[0].ttype, yyvsp[-1].ttype, 0); ; break;} case 336: #line 1525 "c-parse.y" { yyval.ttype = make_pointer_declarator (yyvsp[-1].ttype, yyvsp[0].ttype); ; break;} case 340: #line 1540 "c-parse.y" { yyval.ttype = build_nt (CALL_EXPR, yyvsp[-2].ttype, yyvsp[0].ttype, NULL_TREE); ; break;} case 341: #line 1542 "c-parse.y" { yyval.ttype = set_array_declarator_type (yyvsp[0].ttype, yyvsp[-1].ttype, 0); ; break;} case 343: #line 1548 "c-parse.y" { yyval.ttype = build_nt (CALL_EXPR, yyvsp[-2].ttype, yyvsp[0].ttype, NULL_TREE); ; break;} case 344: #line 1550 "c-parse.y" { yyval.ttype = set_array_declarator_type (yyvsp[0].ttype, yyvsp[-1].ttype, 0); ; break;} case 345: #line 1552 "c-parse.y" { yyval.ttype = make_pointer_declarator (yyvsp[-1].ttype, yyvsp[0].ttype); ; break;} case 346: #line 1554 "c-parse.y" { yyval.ttype = make_pointer_declarator (yyvsp[-1].ttype, yyvsp[0].ttype); ; break;} case 347: #line 1556 "c-parse.y" { yyval.ttype = yyvsp[-2].ttype ? tree_cons (yyvsp[-2].ttype, yyvsp[-1].ttype, NULL_TREE) : yyvsp[-1].ttype; ; break;} case 348: #line 1564 "c-parse.y" { yyval.ttype = build_nt (CALL_EXPR, yyvsp[-2].ttype, yyvsp[0].ttype, NULL_TREE); ; break;} case 349: #line 1566 "c-parse.y" { yyval.ttype = yyvsp[-2].ttype ? tree_cons (yyvsp[-2].ttype, yyvsp[-1].ttype, NULL_TREE) : yyvsp[-1].ttype; ; break;} case 350: #line 1568 "c-parse.y" { yyval.ttype = make_pointer_declarator (yyvsp[-1].ttype, yyvsp[0].ttype); ; break;} case 351: #line 1570 "c-parse.y" { yyval.ttype = set_array_declarator_type (yyvsp[0].ttype, yyvsp[-1].ttype, 0); ; break;} case 353: #line 1576 "c-parse.y" { yyval.ttype = NULL_TREE; ; break;} case 354: #line 1578 "c-parse.y" { yyval.ttype = yyvsp[0].ttype; ; break;} case 355: #line 1583 "c-parse.y" { yyval.ttype = NULL_TREE; ; break;} case 356: #line 1585 "c-parse.y" { yyval.ttype = yyvsp[0].ttype; ; break;} case 357: #line 1590 "c-parse.y" { yyval.ttype = NULL_TREE; ; break;} case 358: #line 1592 "c-parse.y" { yyval.ttype = yyvsp[0].ttype; ; break;} case 359: #line 1603 "c-parse.y" { yyval.ttype = start_struct (RECORD_TYPE, yyvsp[-1].ttype); /* Start scope of tag before parsing components. */ ; break;} case 360: #line 1607 "c-parse.y" { yyval.ttype = finish_struct (yyvsp[-3].ttype, nreverse (yyvsp[-2].ttype), chainon (yyvsp[-6].ttype, yyvsp[0].ttype)); ; break;} case 361: #line 1610 "c-parse.y" { yyval.ttype = finish_struct (start_struct (RECORD_TYPE, NULL_TREE), nreverse (yyvsp[-2].ttype), chainon (yyvsp[-4].ttype, yyvsp[0].ttype)); ; break;} case 362: #line 1614 "c-parse.y" { yyval.ttype = start_struct (UNION_TYPE, yyvsp[-1].ttype); ; break;} case 363: #line 1616 "c-parse.y" { yyval.ttype = finish_struct (yyvsp[-3].ttype, nreverse (yyvsp[-2].ttype), chainon (yyvsp[-6].ttype, yyvsp[0].ttype)); ; break;} case 364: #line 1619 "c-parse.y" { yyval.ttype = finish_struct (start_struct (UNION_TYPE, NULL_TREE), nreverse (yyvsp[-2].ttype), chainon (yyvsp[-4].ttype, yyvsp[0].ttype)); ; break;} case 365: #line 1623 "c-parse.y" { yyval.ttype = start_enum (yyvsp[-1].ttype); ; break;} case 366: #line 1625 "c-parse.y" { yyval.ttype = finish_enum (yyvsp[-4].ttype, nreverse (yyvsp[-3].ttype), chainon (yyvsp[-7].ttype, yyvsp[0].ttype)); ; break;} case 367: #line 1628 "c-parse.y" { yyval.ttype = start_enum (NULL_TREE); ; break;} case 368: #line 1630 "c-parse.y" { yyval.ttype = finish_enum (yyvsp[-4].ttype, nreverse (yyvsp[-3].ttype), chainon (yyvsp[-6].ttype, yyvsp[0].ttype)); ; break;} case 369: #line 1636 "c-parse.y" { yyval.ttype = xref_tag (RECORD_TYPE, yyvsp[0].ttype); ; break;} case 370: #line 1638 "c-parse.y" { yyval.ttype = xref_tag (UNION_TYPE, yyvsp[0].ttype); ; break;} case 371: #line 1640 "c-parse.y" { yyval.ttype = xref_tag (ENUMERAL_TYPE, yyvsp[0].ttype); /* In ISO C, enumerated types can be referred to only if already defined. */ if (pedantic && !COMPLETE_TYPE_P (yyval.ttype)) pedwarn ("ISO C forbids forward references to `enum' types"); ; break;} case 375: #line 1655 "c-parse.y" { if (pedantic && ! flag_isoc99) pedwarn ("comma at end of enumerator list"); ; break;} case 376: #line 1673 "c-parse.y" { yyval.ttype = yyvsp[0].ttype; ; break;} case 377: #line 1675 "c-parse.y" { yyval.ttype = chainon (yyvsp[0].ttype, yyvsp[-1].ttype); pedwarn ("no semicolon at end of struct or union"); ; break;} case 378: #line 1680 "c-parse.y" { yyval.ttype = NULL_TREE; ; break;} case 379: #line 1682 "c-parse.y" { yyval.ttype = chainon (yyvsp[-1].ttype, yyvsp[-2].ttype); ; break;} case 380: #line 1684 "c-parse.y" { if (pedantic) pedwarn ("extra semicolon in struct or union specified"); ; break;} case 381: #line 1690 "c-parse.y" { yyval.ttype = yyvsp[0].ttype; POP_DECLSPEC_STACK; ; break;} case 382: #line 1693 "c-parse.y" { /* Support for unnamed structs or unions as members of structs or unions (which is [a] useful and [b] supports MS P-SDK). */ if (pedantic) pedwarn ("ISO C doesn't support unnamed structs/unions"); yyval.ttype = grokfield(NULL, current_declspecs, NULL_TREE); POP_DECLSPEC_STACK; ; break;} case 383: #line 1703 "c-parse.y" { yyval.ttype = yyvsp[0].ttype; POP_DECLSPEC_STACK; ; break;} case 384: #line 1706 "c-parse.y" { if (pedantic) pedwarn ("ISO C forbids member declarations with no members"); shadow_tag_warned (yyvsp[0].ttype, pedantic); yyval.ttype = NULL_TREE; ; break;} case 385: #line 1711 "c-parse.y" { yyval.ttype = NULL_TREE; ; break;} case 386: #line 1713 "c-parse.y" { yyval.ttype = yyvsp[0].ttype; RESTORE_EXT_FLAGS (yyvsp[-1].itype); ; break;} case 388: #line 1720 "c-parse.y" { TREE_CHAIN (yyvsp[0].ttype) = yyvsp[-3].ttype; yyval.ttype = yyvsp[0].ttype; ; break;} case 390: #line 1726 "c-parse.y" { TREE_CHAIN (yyvsp[0].ttype) = yyvsp[-3].ttype; yyval.ttype = yyvsp[0].ttype; ; break;} case 391: #line 1731 "c-parse.y" { yyval.ttype = grokfield (yyvsp[-1].ttype, current_declspecs, NULL_TREE); decl_attributes (&yyval.ttype, chainon (yyvsp[0].ttype, all_prefix_attributes), 0); ; break;} case 392: #line 1735 "c-parse.y" { yyval.ttype = grokfield (yyvsp[-3].ttype, current_declspecs, yyvsp[-1].ttype); decl_attributes (&yyval.ttype, chainon (yyvsp[0].ttype, all_prefix_attributes), 0); ; break;} case 393: #line 1739 "c-parse.y" { yyval.ttype = grokfield (NULL_TREE, current_declspecs, yyvsp[-1].ttype); decl_attributes (&yyval.ttype, chainon (yyvsp[0].ttype, all_prefix_attributes), 0); ; break;} case 394: #line 1746 "c-parse.y" { yyval.ttype = grokfield (yyvsp[-1].ttype, current_declspecs, NULL_TREE); decl_attributes (&yyval.ttype, chainon (yyvsp[0].ttype, all_prefix_attributes), 0); ; break;} case 395: #line 1750 "c-parse.y" { yyval.ttype = grokfield (yyvsp[-3].ttype, current_declspecs, yyvsp[-1].ttype); decl_attributes (&yyval.ttype, chainon (yyvsp[0].ttype, all_prefix_attributes), 0); ; break;} case 396: #line 1754 "c-parse.y" { yyval.ttype = grokfield (NULL_TREE, current_declspecs, yyvsp[-1].ttype); decl_attributes (&yyval.ttype, chainon (yyvsp[0].ttype, all_prefix_attributes), 0); ; break;} case 398: #line 1765 "c-parse.y" { if (yyvsp[-2].ttype == error_mark_node) yyval.ttype = yyvsp[-2].ttype; else TREE_CHAIN (yyvsp[0].ttype) = yyvsp[-2].ttype, yyval.ttype = yyvsp[0].ttype; ; break;} case 399: #line 1770 "c-parse.y" { yyval.ttype = error_mark_node; ; break;} case 400: #line 1776 "c-parse.y" { yyval.ttype = build_enumerator (yyvsp[0].ttype, NULL_TREE); ; break;} case 401: #line 1778 "c-parse.y" { yyval.ttype = build_enumerator (yyvsp[-2].ttype, yyvsp[0].ttype); ; break;} case 402: #line 1783 "c-parse.y" { pending_xref_error (); yyval.ttype = yyvsp[0].ttype; ; break;} case 403: #line 1786 "c-parse.y" { yyval.ttype = build_tree_list (yyvsp[-1].ttype, yyvsp[0].ttype); ; break;} case 404: #line 1791 "c-parse.y" { yyval.ttype = NULL_TREE; ; break;} case 406: #line 1797 "c-parse.y" { yyval.ttype = build_tree_list (build_tree_list (current_declspecs, NULL_TREE), all_prefix_attributes); ; break;} case 407: #line 1801 "c-parse.y" { yyval.ttype = build_tree_list (build_tree_list (current_declspecs, yyvsp[0].ttype), all_prefix_attributes); ; break;} case 408: #line 1805 "c-parse.y" { yyval.ttype = build_tree_list (build_tree_list (current_declspecs, yyvsp[-1].ttype), chainon (yyvsp[0].ttype, all_prefix_attributes)); ; break;} case 412: #line 1818 "c-parse.y" { yyval.ttype = make_pointer_declarator (yyvsp[-1].ttype, yyvsp[0].ttype); ; break;} case 413: #line 1823 "c-parse.y" { yyval.ttype = make_pointer_declarator (yyvsp[0].ttype, NULL_TREE); ; break;} case 414: #line 1825 "c-parse.y" { yyval.ttype = make_pointer_declarator (yyvsp[-1].ttype, yyvsp[0].ttype); ; break;} case 415: #line 1830 "c-parse.y" { yyval.ttype = yyvsp[-2].ttype ? tree_cons (yyvsp[-2].ttype, yyvsp[-1].ttype, NULL_TREE) : yyvsp[-1].ttype; ; break;} case 416: #line 1832 "c-parse.y" { yyval.ttype = build_nt (CALL_EXPR, yyvsp[-2].ttype, yyvsp[0].ttype, NULL_TREE); ; break;} case 417: #line 1834 "c-parse.y" { yyval.ttype = set_array_declarator_type (yyvsp[0].ttype, yyvsp[-1].ttype, 1); ; break;} case 418: #line 1836 "c-parse.y" { yyval.ttype = build_nt (CALL_EXPR, NULL_TREE, yyvsp[0].ttype, NULL_TREE); ; break;} case 419: #line 1838 "c-parse.y" { yyval.ttype = set_array_declarator_type (yyvsp[0].ttype, NULL_TREE, 1); ; break;} case 420: #line 1845 "c-parse.y" { yyval.ttype = build_array_declarator (yyvsp[-1].ttype, yyvsp[-2].ttype, 0, 0); ; break;} case 421: #line 1847 "c-parse.y" { yyval.ttype = build_array_declarator (NULL_TREE, yyvsp[-1].ttype, 0, 0); ; break;} case 422: #line 1849 "c-parse.y" { yyval.ttype = build_array_declarator (NULL_TREE, yyvsp[-2].ttype, 0, 1); ; break;} case 423: #line 1851 "c-parse.y" { yyval.ttype = build_array_declarator (yyvsp[-1].ttype, yyvsp[-2].ttype, 1, 0); ; break;} case 424: #line 1854 "c-parse.y" { yyval.ttype = build_array_declarator (yyvsp[-1].ttype, yyvsp[-3].ttype, 1, 0); ; break;} case 427: #line 1867 "c-parse.y" { error ("label at end of compound statement"); ; break;} case 435: #line 1884 "c-parse.y" { if ((pedantic && !flag_isoc99) || warn_declaration_after_statement) pedwarn_c90 ("ISO C90 forbids mixed declarations and code"); ; break;} case 450: #line 1918 "c-parse.y" { yyval.ttype = c_begin_compound_stmt (flag_isoc99); ; break;} case 452: #line 1926 "c-parse.y" { if (pedantic) pedwarn ("ISO C forbids label declarations"); ; break;} case 455: #line 1937 "c-parse.y" { tree link; for (link = yyvsp[-1].ttype; link; link = TREE_CHAIN (link)) { tree label = declare_label (TREE_VALUE (link)); C_DECLARED_LABEL_FLAG (label) = 1; add_stmt (build_stmt (DECL_EXPR, label)); } ; break;} case 456: #line 1951 "c-parse.y" { add_stmt (yyvsp[0].ttype); ; break;} case 458: #line 1955 "c-parse.y" { yyval.ttype = c_begin_compound_stmt (true); ; break;} case 463: #line 1969 "c-parse.y" { if (current_function_decl == 0) { error ("braced-group within expression allowed " "only inside a function"); YYERROR; } yyval.ttype = c_begin_stmt_expr (); ; break;} case 464: #line 1980 "c-parse.y" { yyval.ttype = c_end_compound_stmt (yyvsp[-1].ttype, true); ; break;} case 465: #line 1988 "c-parse.y" { if (yychar == YYEMPTY) yychar = YYLEX; yyval.location = input_location; ; break;} case 468: #line 2001 "c-parse.y" { yyval.ttype = c_end_compound_stmt (yyvsp[-2].ttype, flag_isoc99); ; break;} case 469: #line 2006 "c-parse.y" { /* Two cases cannot and do not have line numbers associated: If stmt is degenerate, such as "2;", then stmt is an INTEGER_CST, which cannot hold line numbers. But that's ok because the statement will either be changed to a MODIFY_EXPR during gimplification of the statement expr, or discarded. If stmt was compound, but without new variables, we will have skipped the creation of a BIND and will have a bare STATEMENT_LIST. But that's ok because (recursively) all of the component statments should already have line numbers assigned. */ if (yyvsp[0].ttype && EXPR_P (yyvsp[0].ttype)) SET_EXPR_LOCATION (yyvsp[0].ttype, yyvsp[-1].location); ; break;} case 470: #line 2024 "c-parse.y" { if (yyvsp[0].ttype) SET_EXPR_LOCATION (yyvsp[0].ttype, yyvsp[-1].location); ; break;} case 471: #line 2028 "c-parse.y" { yyval.ttype = lang_hooks.truthvalue_conversion (yyvsp[0].ttype); if (EXPR_P (yyval.ttype)) SET_EXPR_LOCATION (yyval.ttype, yyvsp[-1].location); ; break;} case 472: #line 2041 "c-parse.y" { yyval.ttype = c_end_compound_stmt (yyvsp[-2].ttype, flag_isoc99); ; break;} case 473: #line 2046 "c-parse.y" { if (extra_warnings) add_stmt (build (NOP_EXPR, NULL_TREE, NULL_TREE)); yyval.ttype = c_end_compound_stmt (yyvsp[-2].ttype, flag_isoc99); ; break;} case 475: #line 2055 "c-parse.y" { c_finish_if_stmt (yyvsp[-6].location, yyvsp[-4].ttype, yyvsp[-2].ttype, yyvsp[0].ttype, true); add_stmt (c_end_compound_stmt (yyvsp[-7].ttype, flag_isoc99)); ; break;} case 476: #line 2059 "c-parse.y" { c_finish_if_stmt (yyvsp[-6].location, yyvsp[-4].ttype, yyvsp[-2].ttype, yyvsp[0].ttype, false); add_stmt (c_end_compound_stmt (yyvsp[-7].ttype, flag_isoc99)); ; break;} case 477: #line 2063 "c-parse.y" { c_finish_if_stmt (yyvsp[-4].location, yyvsp[-2].ttype, yyvsp[0].ttype, NULL, true); add_stmt (c_end_compound_stmt (yyvsp[-5].ttype, flag_isoc99)); ; break;} case 478: #line 2067 "c-parse.y" { c_finish_if_stmt (yyvsp[-4].location, yyvsp[-2].ttype, yyvsp[0].ttype, NULL, false); add_stmt (c_end_compound_stmt (yyvsp[-5].ttype, flag_isoc99)); ; break;} case 479: #line 2072 "c-parse.y" { yyval.ttype = c_break_label; c_break_label = NULL; ; break;} case 480: #line 2076 "c-parse.y" { yyval.ttype = c_cont_label; c_cont_label = NULL; ; break;} case 481: #line 2082 "c-parse.y" { c_finish_loop (yyvsp[-6].location, yyvsp[-4].ttype, NULL, yyvsp[0].ttype, c_break_label, c_cont_label, true); add_stmt (c_end_compound_stmt (yyvsp[-7].ttype, flag_isoc99)); c_break_label = yyvsp[-2].ttype; c_cont_label = yyvsp[-1].ttype; ; break;} case 482: #line 2091 "c-parse.y" { yyval.ttype = c_break_label; c_break_label = yyvsp[-3].ttype; ; break;} case 483: #line 2092 "c-parse.y" { yyval.ttype = c_cont_label; c_cont_label = yyvsp[-3].ttype; ; break;} case 484: #line 2094 "c-parse.y" { c_finish_loop (yyvsp[-10].location, yyvsp[-2].ttype, NULL, yyvsp[-7].ttype, yyvsp[-5].ttype, yyvsp[-4].ttype, false); add_stmt (c_end_compound_stmt (yyvsp[-11].ttype, flag_isoc99)); ; break;} case 485: #line 2101 "c-parse.y" { yyval.ttype = NULL_TREE; ; break;} case 487: #line 2107 "c-parse.y" { c_finish_expr_stmt (yyvsp[-1].ttype); ; break;} case 488: #line 2109 "c-parse.y" { check_for_loop_decls (); ; break;} case 489: #line 2113 "c-parse.y" { if (yyvsp[0].ttype) { yyval.ttype = lang_hooks.truthvalue_conversion (yyvsp[0].ttype); if (EXPR_P (yyval.ttype)) SET_EXPR_LOCATION (yyval.ttype, yyvsp[-1].location); } else yyval.ttype = NULL; ; break;} case 490: #line 2125 "c-parse.y" { yyval.ttype = c_process_expr_stmt (yyvsp[0].ttype); ; break;} case 491: #line 2132 "c-parse.y" { c_finish_loop (yyvsp[-7].location, yyvsp[-6].ttype, yyvsp[-4].ttype, yyvsp[0].ttype, c_break_label, c_cont_label, true); add_stmt (c_end_compound_stmt (yyvsp[-10].ttype, flag_isoc99)); c_break_label = yyvsp[-2].ttype; c_cont_label = yyvsp[-1].ttype; ; break;} case 492: #line 2140 "c-parse.y" { yyval.ttype = c_start_case (yyvsp[-1].ttype); ; break;} case 493: #line 2142 "c-parse.y" { c_finish_case (yyvsp[0].ttype); if (c_break_label) add_stmt (build (LABEL_EXPR, void_type_node, c_break_label)); c_break_label = yyvsp[-1].ttype; add_stmt (c_end_compound_stmt (yyvsp[-6].ttype, flag_isoc99)); ; break;} case 494: #line 2153 "c-parse.y" { yyval.ttype = c_finish_expr_stmt (yyvsp[-1].ttype); ; break;} case 495: #line 2155 "c-parse.y" { yyval.ttype = NULL_TREE; ; break;} case 496: #line 2157 "c-parse.y" { yyval.ttype = NULL_TREE; ; break;} case 497: #line 2159 "c-parse.y" { yyval.ttype = NULL_TREE; ; break;} case 498: #line 2161 "c-parse.y" { yyval.ttype = NULL_TREE; ; break;} case 499: #line 2163 "c-parse.y" { yyval.ttype = NULL_TREE; ; break;} case 500: #line 2165 "c-parse.y" { yyval.ttype = c_finish_bc_stmt (&c_break_label, true); ; break;} case 501: #line 2167 "c-parse.y" { yyval.ttype = c_finish_bc_stmt (&c_cont_label, false); ; break;} case 502: #line 2169 "c-parse.y" { yyval.ttype = c_finish_return (NULL_TREE); ; break;} case 503: #line 2171 "c-parse.y" { yyval.ttype = c_finish_return (yyvsp[-1].ttype); ; break;} case 505: #line 2174 "c-parse.y" { yyval.ttype = c_finish_goto_label (yyvsp[-1].ttype); ; break;} case 506: #line 2176 "c-parse.y" { yyval.ttype = c_finish_goto_ptr (yyvsp[-1].ttype); ; break;} case 507: #line 2178 "c-parse.y" { yyval.ttype = NULL_TREE; ; break;} case 508: #line 2184 "c-parse.y" { add_stmt (yyvsp[0].ttype); yyval.ttype = NULL_TREE; ; break;} case 510: #line 2193 "c-parse.y" { yyval.ttype = do_case (yyvsp[-1].ttype, NULL_TREE); ; break;} case 511: #line 2195 "c-parse.y" { yyval.ttype = do_case (yyvsp[-3].ttype, yyvsp[-1].ttype); ; break;} case 512: #line 2197 "c-parse.y" { yyval.ttype = do_case (NULL_TREE, NULL_TREE); ; break;} case 513: #line 2199 "c-parse.y" { tree label = define_label (yyvsp[-2].location, yyvsp[-3].ttype); if (label) { decl_attributes (&label, yyvsp[0].ttype, 0); yyval.ttype = add_stmt (build_stmt (LABEL_EXPR, label)); } else yyval.ttype = NULL_TREE; ; break;} case 514: #line 2217 "c-parse.y" { yyval.ttype = yyvsp[-2].ttype; ; break;} case 515: #line 2223 "c-parse.y" { yyval.ttype = NULL_TREE; ; break;} case 517: #line 2230 "c-parse.y" { assemble_asm (yyvsp[-1].ttype); ; break;} case 518: #line 2232 "c-parse.y" {; break;} case 519: #line 2240 "c-parse.y" { yyval.ttype = build_asm_stmt (yyvsp[-6].ttype, yyvsp[-3].ttype); ; break;} case 520: #line 2246 "c-parse.y" { yyval.ttype = build_asm_expr (yyvsp[0].ttype, 0, 0, 0, true); ; break;} case 521: #line 2249 "c-parse.y" { yyval.ttype = build_asm_expr (yyvsp[-2].ttype, yyvsp[0].ttype, 0, 0, false); ; break;} case 522: #line 2252 "c-parse.y" { yyval.ttype = build_asm_expr (yyvsp[-4].ttype, yyvsp[-2].ttype, yyvsp[0].ttype, 0, false); ; break;} case 523: #line 2255 "c-parse.y" { yyval.ttype = build_asm_expr (yyvsp[-6].ttype, yyvsp[-4].ttype, yyvsp[-2].ttype, yyvsp[0].ttype, false); ; break;} case 524: #line 2262 "c-parse.y" { yyval.ttype = 0; ; break;} case 525: #line 2264 "c-parse.y" { if (yyvsp[0].ttype != ridpointers[RID_VOLATILE]) { warning ("%E qualifier ignored on asm", yyvsp[0].ttype); yyval.ttype = 0; } else yyval.ttype = yyvsp[0].ttype; ; break;} case 526: #line 2277 "c-parse.y" { yyval.ttype = NULL_TREE; ; break;} case 529: #line 2284 "c-parse.y" { yyval.ttype = chainon (yyvsp[-2].ttype, yyvsp[0].ttype); ; break;} case 530: #line 2289 "c-parse.y" { yyval.ttype = build_tree_list (build_tree_list (NULL_TREE, yyvsp[-5].ttype), yyvsp[-2].ttype); ; break;} case 531: #line 2292 "c-parse.y" { yyvsp[-7].ttype = build_string (IDENTIFIER_LENGTH (yyvsp[-7].ttype), IDENTIFIER_POINTER (yyvsp[-7].ttype)); yyval.ttype = build_tree_list (build_tree_list (yyvsp[-7].ttype, yyvsp[-5].ttype), yyvsp[-2].ttype); ; break;} case 532: #line 2299 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, NULL_TREE); ; break;} case 533: #line 2301 "c-parse.y" { yyval.ttype = tree_cons (NULL_TREE, yyvsp[0].ttype, yyvsp[-2].ttype); ; break;} case 534: #line 2305 "c-parse.y" { c_lex_string_translate = 0; ; break;} case 535: #line 2309 "c-parse.y" { c_lex_string_translate = 1; ; break;} case 536: #line 2320 "c-parse.y" { push_scope (); declare_parm_level (); ; break;} case 537: #line 2323 "c-parse.y" { yyval.ttype = yyvsp[0].ttype; pop_scope (); ; break;} case 539: #line 2330 "c-parse.y" { mark_forward_parm_decls (); ; break;} case 540: #line 2332 "c-parse.y" { /* Dummy action so attributes are in known place on parser stack. */ ; break;} case 541: #line 2335 "c-parse.y" { yyval.ttype = yyvsp[0].ttype; ; break;} case 542: #line 2337 "c-parse.y" { yyval.ttype = make_node (TREE_LIST); ; break;} case 543: #line 2343 "c-parse.y" { yyval.ttype = make_node (TREE_LIST); ; break;} case 544: #line 2345 "c-parse.y" { yyval.ttype = make_node (TREE_LIST); /* Suppress -Wold-style-definition for this case. */ TREE_CHAIN (yyval.ttype) = error_mark_node; error ("ISO C requires a named argument before `...'"); ; break;} case 545: #line 2351 "c-parse.y" { yyval.ttype = get_parm_info (/*ellipsis=*/false); ; break;} case 546: #line 2353 "c-parse.y" { yyval.ttype = get_parm_info (/*ellipsis=*/true); ; break;} case 547: #line 2358 "c-parse.y" { push_parm_decl (yyvsp[0].ttype); ; break;} case 548: #line 2360 "c-parse.y" { push_parm_decl (yyvsp[0].ttype); ; break;} case 549: #line 2367 "c-parse.y" { yyval.ttype = build_tree_list (build_tree_list (current_declspecs, yyvsp[-1].ttype), chainon (yyvsp[0].ttype, all_prefix_attributes)); POP_DECLSPEC_STACK; ; break;} case 550: #line 2372 "c-parse.y" { yyval.ttype = build_tree_list (build_tree_list (current_declspecs, yyvsp[-1].ttype), chainon (yyvsp[0].ttype, all_prefix_attributes)); POP_DECLSPEC_STACK; ; break;} case 551: #line 2377 "c-parse.y" { yyval.ttype = yyvsp[0].ttype; POP_DECLSPEC_STACK; ; break;} case 552: #line 2380 "c-parse.y" { yyval.ttype = build_tree_list (build_tree_list (current_declspecs, yyvsp[-1].ttype), chainon (yyvsp[0].ttype, all_prefix_attributes)); POP_DECLSPEC_STACK; ; break;} case 553: #line 2386 "c-parse.y" { yyval.ttype = yyvsp[0].ttype; POP_DECLSPEC_STACK; ; break;} case 554: #line 2394 "c-parse.y" { yyval.ttype = build_tree_list (build_tree_list (current_declspecs, yyvsp[-1].ttype), chainon (yyvsp[0].ttype, all_prefix_attributes)); POP_DECLSPEC_STACK; ; break;} case 555: #line 2399 "c-parse.y" { yyval.ttype = build_tree_list (build_tree_list (current_declspecs, yyvsp[-1].ttype), chainon (yyvsp[0].ttype, all_prefix_attributes)); POP_DECLSPEC_STACK; ; break;} case 556: #line 2404 "c-parse.y" { yyval.ttype = yyvsp[0].ttype; POP_DECLSPEC_STACK; ; break;} case 557: #line 2407 "c-parse.y" { yyval.ttype = build_tree_list (build_tree_list (current_declspecs, yyvsp[-1].ttype), chainon (yyvsp[0].ttype, all_prefix_attributes)); POP_DECLSPEC_STACK; ; break;} case 558: #line 2413 "c-parse.y" { yyval.ttype = yyvsp[0].ttype; POP_DECLSPEC_STACK; ; break;} case 559: #line 2419 "c-parse.y" { prefix_attributes = chainon (prefix_attributes, yyvsp[-3].ttype); all_prefix_attributes = prefix_attributes; ; break;} case 560: #line 2428 "c-parse.y" { push_scope (); declare_parm_level (); ; break;} case 561: #line 2431 "c-parse.y" { yyval.ttype = yyvsp[0].ttype; pop_scope (); ; break;} case 563: #line 2438 "c-parse.y" { tree t; for (t = yyvsp[-1].ttype; t; t = TREE_CHAIN (t)) if (TREE_VALUE (t) == NULL_TREE) error ("`...' in old-style identifier list"); yyval.ttype = tree_cons (NULL_TREE, NULL_TREE, yyvsp[-1].ttype); /* Make sure we have a parmlist after attributes. */ if (yyvsp[-3].ttype != 0 && (TREE_CODE (yyval.ttype) != TREE_LIST || TREE_PURPOSE (yyval.ttype) == 0 || TREE_CODE (TREE_PURPOSE (yyval.ttype)) != PARM_DECL)) YYERROR1; ; break;} case 564: #line 2456 "c-parse.y" { yyval.ttype = build_tree_list (NULL_TREE, yyvsp[0].ttype); ; break;} case 565: #line 2458 "c-parse.y" { yyval.ttype = chainon (yyvsp[-2].ttype, build_tree_list (NULL_TREE, yyvsp[0].ttype)); ; break;} case 566: #line 2464 "c-parse.y" { yyval.ttype = build_tree_list (NULL_TREE, yyvsp[0].ttype); ; break;} case 567: #line 2466 "c-parse.y" { yyval.ttype = chainon (yyvsp[-2].ttype, build_tree_list (NULL_TREE, yyvsp[0].ttype)); ; break;} case 568: #line 2471 "c-parse.y" { yyval.itype = SAVE_EXT_FLAGS(); pedantic = 0; warn_pointer_arith = 0; warn_traditional = 0; flag_iso = 0; ; break;} } #line 705 "/usr/share/bison/bison.simple" yyvsp -= yylen; yyssp -= yylen; #if YYLSP_NEEDED yylsp -= yylen; #endif #if YYDEBUG if (yydebug) { short *yyssp1 = yyss - 1; YYFPRINTF (stderr, "state stack now"); while (yyssp1 != yyssp) YYFPRINTF (stderr, " %d", *++yyssp1); YYFPRINTF (stderr, "\n"); } #endif *++yyvsp = yyval; #if YYLSP_NEEDED *++yylsp = yyloc; #endif /* Now `shift' the result of the reduction. Determine what state that goes to, based on the state we popped back to and the rule number reduced by. */ yyn = yyr1[yyn]; yystate = yypgoto[yyn - YYNTBASE] + *yyssp; if (yystate >= 0 && yystate <= YYLAST && yycheck[yystate] == *yyssp) yystate = yytable[yystate]; else yystate = yydefgoto[yyn - YYNTBASE]; goto yynewstate; /*------------------------------------. | yyerrlab -- here on detecting error | `------------------------------------*/ yyerrlab: /* If not already recovering from an error, report this error. */ if (!yyerrstatus) { ++yynerrs; #ifdef YYERROR_VERBOSE yyn = yypact[yystate]; if (yyn > YYFLAG && yyn < YYLAST) { YYSIZE_T yysize = 0; char *yymsg; int yyx, yycount; yycount = 0; /* Start YYX at -YYN if negative to avoid negative indexes in YYCHECK. */ for (yyx = yyn < 0 ? -yyn : 0; yyx < (int) (sizeof (yytname) / sizeof (char *)); yyx++) if (yycheck[yyx + yyn] == yyx) yysize += yystrlen (yytname[yyx]) + 15, yycount++; yysize += yystrlen ("parse error, unexpected ") + 1; yysize += yystrlen (yytname[YYTRANSLATE (yychar)]); yymsg = (char *) YYSTACK_ALLOC (yysize); if (yymsg != 0) { char *yyp = yystpcpy (yymsg, "parse error, unexpected "); yyp = yystpcpy (yyp, yytname[YYTRANSLATE (yychar)]); if (yycount < 5) { yycount = 0; for (yyx = yyn < 0 ? -yyn : 0; yyx < (int) (sizeof (yytname) / sizeof (char *)); yyx++) if (yycheck[yyx + yyn] == yyx) { const char *yyq = ! yycount ? ", expecting " : " or "; yyp = yystpcpy (yyp, yyq); yyp = yystpcpy (yyp, yytname[yyx]); yycount++; } } yyerror (yymsg); YYSTACK_FREE (yymsg); } else yyerror ("parse error; also virtual memory exhausted"); } else #endif /* defined (YYERROR_VERBOSE) */ yyerror ("parse error"); } goto yyerrlab1; /*--------------------------------------------------. | yyerrlab1 -- error raised explicitly by an action | `--------------------------------------------------*/ yyerrlab1: if (yyerrstatus == 3) { /* If just tried and failed to reuse lookahead token after an error, discard it. */ /* return failure if at end of input */ if (yychar == YYEOF) YYABORT; YYDPRINTF ((stderr, "Discarding token %d (%s).\n", yychar, yytname[yychar1])); yychar = YYEMPTY; } /* Else will try to reuse lookahead token after shifting the error token. */ yyerrstatus = 3; /* Each real token shifted decrements this */ goto yyerrhandle; /*-------------------------------------------------------------------. | yyerrdefault -- current state does not do anything special for the | | error token. | `-------------------------------------------------------------------*/ yyerrdefault: #if 0 /* This is wrong; only states that explicitly want error tokens should shift them. */ /* If its default is to accept any token, ok. Otherwise pop it. */ yyn = yydefact[yystate]; if (yyn) goto yydefault; #endif /*---------------------------------------------------------------. | yyerrpop -- pop the current state because it cannot handle the | | error token | `---------------------------------------------------------------*/ yyerrpop: if (yyssp == yyss) YYABORT; yyvsp--; yystate = *--yyssp; #if YYLSP_NEEDED yylsp--; #endif #if YYDEBUG if (yydebug) { short *yyssp1 = yyss - 1; YYFPRINTF (stderr, "Error: state stack now"); while (yyssp1 != yyssp) YYFPRINTF (stderr, " %d", *++yyssp1); YYFPRINTF (stderr, "\n"); } #endif /*--------------. | yyerrhandle. | `--------------*/ yyerrhandle: yyn = yypact[yystate]; if (yyn == YYFLAG) goto yyerrdefault; yyn += YYTERROR; if (yyn < 0 || yyn > YYLAST || yycheck[yyn] != YYTERROR) goto yyerrdefault; yyn = yytable[yyn]; if (yyn < 0) { if (yyn == YYFLAG) goto yyerrpop; yyn = -yyn; goto yyreduce; } else if (yyn == 0) goto yyerrpop; if (yyn == YYFINAL) YYACCEPT; YYDPRINTF ((stderr, "Shifting error token, ")); *++yyvsp = yylval; #if YYLSP_NEEDED *++yylsp = yylloc; #endif yystate = yyn; goto yynewstate; /*-------------------------------------. | yyacceptlab -- YYACCEPT comes here. | `-------------------------------------*/ yyacceptlab: yyresult = 0; goto yyreturn; /*-----------------------------------. | yyabortlab -- YYABORT comes here. | `-----------------------------------*/ yyabortlab: yyresult = 1; goto yyreturn; yyreturn: #ifndef yyoverflow if (yyss != yyssa) YYSTACK_FREE (yyss); #endif return yyresult; } #line 2478 "c-parse.y" /* yylex() is a thin wrapper around c_lex(), all it does is translate cpplib.h's token codes into yacc's token codes. */ static enum cpp_ttype last_token; /* The reserved keyword table. */ struct resword { const char *word; ENUM_BITFIELD(rid) rid : 16; unsigned int disable : 16; }; /* Disable mask. Keywords are disabled if (reswords[i].disable & mask) is _true_. */ #define D_C89 0x01 /* not in C89 */ #define D_EXT 0x02 /* GCC extension */ #define D_EXT89 0x04 /* GCC extension incorporated in C99 */ #define D_OBJC 0x08 /* Objective C only */ static const struct resword reswords[] = { { "_Bool", RID_BOOL, 0 }, { "_Complex", RID_COMPLEX, 0 }, { "__FUNCTION__", RID_FUNCTION_NAME, 0 }, { "__PRETTY_FUNCTION__", RID_PRETTY_FUNCTION_NAME, 0 }, { "__alignof", RID_ALIGNOF, 0 }, { "__alignof__", RID_ALIGNOF, 0 }, { "__asm", RID_ASM, 0 }, { "__asm__", RID_ASM, 0 }, { "__attribute", RID_ATTRIBUTE, 0 }, { "__attribute__", RID_ATTRIBUTE, 0 }, { "__builtin_choose_expr", RID_CHOOSE_EXPR, 0 }, { "__builtin_offsetof", RID_OFFSETOF, 0 }, { "__builtin_types_compatible_p", RID_TYPES_COMPATIBLE_P, 0 }, { "__builtin_va_arg", RID_VA_ARG, 0 }, { "__complex", RID_COMPLEX, 0 }, { "__complex__", RID_COMPLEX, 0 }, { "__const", RID_CONST, 0 }, { "__const__", RID_CONST, 0 }, { "__extension__", RID_EXTENSION, 0 }, { "__func__", RID_C99_FUNCTION_NAME, 0 }, { "__imag", RID_IMAGPART, 0 }, { "__imag__", RID_IMAGPART, 0 }, { "__inline", RID_INLINE, 0 }, { "__inline__", RID_INLINE, 0 }, { "__label__", RID_LABEL, 0 }, { "__ptrbase", RID_PTRBASE, 0 }, { "__ptrbase__", RID_PTRBASE, 0 }, { "__ptrextent", RID_PTREXTENT, 0 }, { "__ptrextent__", RID_PTREXTENT, 0 }, { "__ptrvalue", RID_PTRVALUE, 0 }, { "__ptrvalue__", RID_PTRVALUE, 0 }, { "__real", RID_REALPART, 0 }, { "__real__", RID_REALPART, 0 }, { "__restrict", RID_RESTRICT, 0 }, { "__restrict__", RID_RESTRICT, 0 }, { "__signed", RID_SIGNED, 0 }, { "__signed__", RID_SIGNED, 0 }, { "__thread", RID_THREAD, 0 }, { "__typeof", RID_TYPEOF, 0 }, { "__typeof__", RID_TYPEOF, 0 }, { "__volatile", RID_VOLATILE, 0 }, { "__volatile__", RID_VOLATILE, 0 }, { "asm", RID_ASM, D_EXT }, { "auto", RID_AUTO, 0 }, { "break", RID_BREAK, 0 }, { "case", RID_CASE, 0 }, { "char", RID_CHAR, 0 }, { "const", RID_CONST, 0 }, { "continue", RID_CONTINUE, 0 }, { "default", RID_DEFAULT, 0 }, { "do", RID_DO, 0 }, { "double", RID_DOUBLE, 0 }, { "else", RID_ELSE, 0 }, { "enum", RID_ENUM, 0 }, { "extern", RID_EXTERN, 0 }, { "float", RID_FLOAT, 0 }, { "for", RID_FOR, 0 }, { "goto", RID_GOTO, 0 }, { "if", RID_IF, 0 }, { "inline", RID_INLINE, D_EXT89 }, { "int", RID_INT, 0 }, { "long", RID_LONG, 0 }, { "register", RID_REGISTER, 0 }, { "restrict", RID_RESTRICT, D_C89 }, { "return", RID_RETURN, 0 }, { "short", RID_SHORT, 0 }, { "signed", RID_SIGNED, 0 }, { "sizeof", RID_SIZEOF, 0 }, { "static", RID_STATIC, 0 }, { "struct", RID_STRUCT, 0 }, { "switch", RID_SWITCH, 0 }, { "typedef", RID_TYPEDEF, 0 }, { "typeof", RID_TYPEOF, D_EXT }, { "union", RID_UNION, 0 }, { "unsigned", RID_UNSIGNED, 0 }, { "void", RID_VOID, 0 }, { "volatile", RID_VOLATILE, 0 }, { "while", RID_WHILE, 0 }, }; #define N_reswords (sizeof reswords / sizeof (struct resword)) /* Table mapping from RID_* constants to yacc token numbers. Unfortunately we have to have entries for all the keywords in all three languages. */ static const short rid_to_yy[RID_MAX] = { /* RID_STATIC */ STATIC, /* RID_UNSIGNED */ TYPESPEC, /* RID_LONG */ TYPESPEC, /* RID_CONST */ TYPE_QUAL, /* RID_EXTERN */ SCSPEC, /* RID_REGISTER */ SCSPEC, /* RID_TYPEDEF */ SCSPEC, /* RID_SHORT */ TYPESPEC, /* RID_INLINE */ SCSPEC, /* RID_VOLATILE */ TYPE_QUAL, /* RID_SIGNED */ TYPESPEC, /* RID_AUTO */ SCSPEC, /* RID_RESTRICT */ TYPE_QUAL, /* C extensions */ /* RID_COMPLEX */ TYPESPEC, /* RID_THREAD */ SCSPEC, /* C++ */ /* RID_FRIEND */ 0, /* RID_VIRTUAL */ 0, /* RID_EXPLICIT */ 0, /* RID_EXPORT */ 0, /* RID_MUTABLE */ 0, /* ObjC */ /* RID_IN */ TYPE_QUAL, /* RID_OUT */ TYPE_QUAL, /* RID_INOUT */ TYPE_QUAL, /* RID_BYCOPY */ TYPE_QUAL, /* RID_BYREF */ TYPE_QUAL, /* RID_ONEWAY */ TYPE_QUAL, /* C */ /* RID_INT */ TYPESPEC, /* RID_CHAR */ TYPESPEC, /* RID_FLOAT */ TYPESPEC, /* RID_DOUBLE */ TYPESPEC, /* RID_VOID */ TYPESPEC, /* RID_ENUM */ ENUM, /* RID_STRUCT */ STRUCT, /* RID_UNION */ UNION, /* RID_IF */ IF, /* RID_ELSE */ ELSE, /* RID_WHILE */ WHILE, /* RID_DO */ DO, /* RID_FOR */ FOR, /* RID_SWITCH */ SWITCH, /* RID_CASE */ CASE, /* RID_DEFAULT */ DEFAULT, /* RID_BREAK */ BREAK, /* RID_CONTINUE */ CONTINUE, /* RID_RETURN */ RETURN, /* RID_GOTO */ GOTO, /* RID_SIZEOF */ SIZEOF, /* C extensions */ /* RID_ASM */ ASM_KEYWORD, /* RID_TYPEOF */ TYPEOF, /* RID_ALIGNOF */ ALIGNOF, /* RID_ATTRIBUTE */ ATTRIBUTE, /* RID_VA_ARG */ VA_ARG, /* RID_EXTENSION */ EXTENSION, /* RID_IMAGPART */ IMAGPART, /* RID_REALPART */ REALPART, /* RID_LABEL */ LABEL, /* RID_PTRBASE */ PTR_BASE, /* RID_PTREXTENT */ PTR_EXTENT, /* RID_PTRVALUE */ PTR_VALUE, /* RID_CHOOSE_EXPR */ CHOOSE_EXPR, /* RID_TYPES_COMPATIBLE_P */ TYPES_COMPATIBLE_P, /* RID_FUNCTION_NAME */ FUNC_NAME, /* RID_PRETTY_FUNCTION_NAME */ FUNC_NAME, /* RID_C99_FUNCTION_NAME */ FUNC_NAME, /* C++ */ /* RID_BOOL */ TYPESPEC, /* RID_WCHAR */ 0, /* RID_CLASS */ 0, /* RID_PUBLIC */ 0, /* RID_PRIVATE */ 0, /* RID_PROTECTED */ 0, /* RID_TEMPLATE */ 0, /* RID_NULL */ 0, /* RID_CATCH */ 0, /* RID_DELETE */ 0, /* RID_FALSE */ 0, /* RID_NAMESPACE */ 0, /* RID_NEW */ 0, /* RID_OFFSETOF */ OFFSETOF, /* RID_OPERATOR */ 0, /* RID_THIS */ 0, /* RID_THROW */ 0, /* RID_TRUE */ 0, /* RID_TRY */ 0, /* RID_TYPENAME */ 0, /* RID_TYPEID */ 0, /* RID_USING */ 0, /* casts */ /* RID_CONSTCAST */ 0, /* RID_DYNCAST */ 0, /* RID_REINTCAST */ 0, /* RID_STATCAST */ 0, /* Objective C */ /* RID_ID */ OBJECTNAME, /* RID_AT_ENCODE */ AT_ENCODE, /* RID_AT_END */ AT_END, /* RID_AT_CLASS */ AT_CLASS, /* RID_AT_ALIAS */ AT_ALIAS, /* RID_AT_DEFS */ AT_DEFS, /* RID_AT_PRIVATE */ AT_PRIVATE, /* RID_AT_PROTECTED */ AT_PROTECTED, /* RID_AT_PUBLIC */ AT_PUBLIC, /* RID_AT_PROTOCOL */ AT_PROTOCOL, /* RID_AT_SELECTOR */ AT_SELECTOR, /* RID_AT_THROW */ AT_THROW, /* RID_AT_TRY */ AT_TRY, /* RID_AT_CATCH */ AT_CATCH, /* RID_AT_FINALLY */ AT_FINALLY, /* RID_AT_SYNCHRONIZED */ AT_SYNCHRONIZED, /* RID_AT_INTERFACE */ AT_INTERFACE, /* RID_AT_IMPLEMENTATION */ AT_IMPLEMENTATION }; static void init_reswords (void) { unsigned int i; tree id; int mask = (flag_isoc99 ? 0 : D_C89) | (flag_no_asm ? (flag_isoc99 ? D_EXT : D_EXT|D_EXT89) : 0); if (!c_dialect_objc ()) mask |= D_OBJC; ridpointers = ggc_calloc ((int) RID_MAX, sizeof (tree)); for (i = 0; i < N_reswords; i++) { /* If a keyword is disabled, do not enter it into the table and so create a canonical spelling that isn't a keyword. */ if (reswords[i].disable & mask) continue; id = get_identifier (reswords[i].word); C_RID_CODE (id) = reswords[i].rid; C_IS_RESERVED_WORD (id) = 1; ridpointers [(int) reswords[i].rid] = id; } } #define NAME(type) cpp_type2name (type) static void yyerror (const char *msgid) { c_parse_error (msgid, last_token, yylval.ttype); } static int yylexname (void) { tree decl; if (C_IS_RESERVED_WORD (yylval.ttype)) { enum rid rid_code = C_RID_CODE (yylval.ttype); { /* Return the canonical spelling for this keyword. */ yylval.ttype = ridpointers[(int) rid_code]; return rid_to_yy[(int) rid_code]; } } decl = lookup_name (yylval.ttype); if (decl) { if (TREE_CODE (decl) == TYPE_DECL) return TYPENAME; } return IDENTIFIER; } static inline int _yylex (void) { get_next: last_token = c_lex (&yylval.ttype); switch (last_token) { case CPP_EQ: return '='; case CPP_NOT: return '!'; case CPP_GREATER: yylval.code = GT_EXPR; return ARITHCOMPARE; case CPP_LESS: yylval.code = LT_EXPR; return ARITHCOMPARE; case CPP_PLUS: yylval.code = PLUS_EXPR; return '+'; case CPP_MINUS: yylval.code = MINUS_EXPR; return '-'; case CPP_MULT: yylval.code = MULT_EXPR; return '*'; case CPP_DIV: yylval.code = TRUNC_DIV_EXPR; return '/'; case CPP_MOD: yylval.code = TRUNC_MOD_EXPR; return '%'; case CPP_AND: yylval.code = BIT_AND_EXPR; return '&'; case CPP_OR: yylval.code = BIT_IOR_EXPR; return '|'; case CPP_XOR: yylval.code = BIT_XOR_EXPR; return '^'; case CPP_RSHIFT: yylval.code = RSHIFT_EXPR; return RSHIFT; case CPP_LSHIFT: yylval.code = LSHIFT_EXPR; return LSHIFT; case CPP_COMPL: return '~'; case CPP_AND_AND: return ANDAND; case CPP_OR_OR: return OROR; case CPP_QUERY: return '?'; case CPP_OPEN_PAREN: return '('; case CPP_EQ_EQ: yylval.code = EQ_EXPR; return EQCOMPARE; case CPP_NOT_EQ: yylval.code = NE_EXPR; return EQCOMPARE; case CPP_GREATER_EQ:yylval.code = GE_EXPR; return ARITHCOMPARE; case CPP_LESS_EQ: yylval.code = LE_EXPR; return ARITHCOMPARE; case CPP_PLUS_EQ: yylval.code = PLUS_EXPR; return ASSIGN; case CPP_MINUS_EQ: yylval.code = MINUS_EXPR; return ASSIGN; case CPP_MULT_EQ: yylval.code = MULT_EXPR; return ASSIGN; case CPP_DIV_EQ: yylval.code = TRUNC_DIV_EXPR; return ASSIGN; case CPP_MOD_EQ: yylval.code = TRUNC_MOD_EXPR; return ASSIGN; case CPP_AND_EQ: yylval.code = BIT_AND_EXPR; return ASSIGN; case CPP_OR_EQ: yylval.code = BIT_IOR_EXPR; return ASSIGN; case CPP_XOR_EQ: yylval.code = BIT_XOR_EXPR; return ASSIGN; case CPP_RSHIFT_EQ: yylval.code = RSHIFT_EXPR; return ASSIGN; case CPP_LSHIFT_EQ: yylval.code = LSHIFT_EXPR; return ASSIGN; case CPP_OPEN_SQUARE: return '['; case CPP_CLOSE_SQUARE: return ']'; case CPP_OPEN_BRACE: return '{'; case CPP_CLOSE_BRACE: return '}'; case CPP_ELLIPSIS: return ELLIPSIS; case CPP_PLUS_PLUS: return PLUSPLUS; case CPP_MINUS_MINUS: return MINUSMINUS; case CPP_DEREF: return POINTSAT; case CPP_DOT: return '.'; /* The following tokens may affect the interpretation of any identifiers following, if doing Objective-C. */ case CPP_COLON: OBJC_NEED_RAW_IDENTIFIER (0); return ':'; case CPP_COMMA: OBJC_NEED_RAW_IDENTIFIER (0); return ','; case CPP_CLOSE_PAREN: OBJC_NEED_RAW_IDENTIFIER (0); return ')'; case CPP_SEMICOLON: OBJC_NEED_RAW_IDENTIFIER (0); return ';'; case CPP_EOF: return 0; case CPP_NAME: return yylexname (); case CPP_AT_NAME: /* This only happens in Objective-C; it must be a keyword. */ return rid_to_yy [(int) C_RID_CODE (yylval.ttype)]; case CPP_NUMBER: case CPP_CHAR: case CPP_WCHAR: return CONSTANT; case CPP_STRING: case CPP_WSTRING: return STRING; case CPP_OBJC_STRING: return OBJC_STRING; /* These tokens are C++ specific (and will not be generated in C mode, but let's be cautious). */ case CPP_SCOPE: case CPP_DEREF_STAR: case CPP_DOT_STAR: case CPP_MIN_EQ: case CPP_MAX_EQ: case CPP_MIN: case CPP_MAX: /* These tokens should not survive translation phase 4. */ case CPP_HASH: case CPP_PASTE: error ("syntax error at '%s' token", NAME(last_token)); goto get_next; default: abort (); } /* NOTREACHED */ } static int yylex (void) { int r; timevar_push (TV_LEX); r = _yylex(); timevar_pop (TV_LEX); return r; } /* Function used when yydebug is set, to print a token in more detail. */ static void yyprint (FILE *file, int yychar, YYSTYPE yyl) { tree t = yyl.ttype; fprintf (file, " [%s]", NAME(last_token)); switch (yychar) { case IDENTIFIER: case TYPENAME: case OBJECTNAME: case TYPESPEC: case TYPE_QUAL: case SCSPEC: case STATIC: if (IDENTIFIER_POINTER (t)) fprintf (file, " `%s'", IDENTIFIER_POINTER (t)); break; case CONSTANT: fprintf (file, " %s", GET_MODE_NAME (TYPE_MODE (TREE_TYPE (t)))); if (TREE_CODE (t) == INTEGER_CST) { fputs (" ", file); fprintf (file, HOST_WIDE_INT_PRINT_DOUBLE_HEX, TREE_INT_CST_HIGH (t), TREE_INT_CST_LOW (t)); } break; } } /* This is not the ideal place to put this, but we have to get it out of c-lex.c because cp/lex.c has its own version. */ /* Parse the file. */ void c_parse_file (void) { yyparse (); if (malloced_yyss) { free (malloced_yyss); free (malloced_yyvs); malloced_yyss = 0; } } /* Type information for c-parse.in. Copyright (C) 2004 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* This file is machine generated. Do not edit. */ /* GC roots. */ const struct ggc_root_tab gt_ggc_r_gt_c_parse_h[] = { { &declspec_stack, 1, sizeof (declspec_stack), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &all_prefix_attributes, 1, sizeof (all_prefix_attributes), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { &prefix_attributes, 1, sizeof (prefix_attributes), >_ggc_mx_tree_node, >_pch_nx_tree_node }, { ¤t_declspecs, 1, sizeof (current_declspecs), >_ggc_mx_tree_node, >_pch_nx_tree_node }, LAST_GGC_ROOT_TAB };